From 38a2b86184089f45863b4b1344036f6cf4645e1a Mon Sep 17 00:00:00 2001 From: James Date: Wed, 26 Jul 2017 12:48:53 -0700 Subject: [PATCH] glide update --- cli/glide.lock | 8 +- .../github.com/aws/aws-sdk-go/CHANGELOG.md | 32 + .../aws/aws-sdk-go/aws/endpoints/doc.go | 4 +- .../aws/aws-sdk-go/aws/signer/v4/v4.go | 64 +- .../aws/aws-sdk-go/aws/signer/v4/v4_test.go | 44 +- .../github.com/aws/aws-sdk-go/aws/version.go | 2 +- .../apis/appstream/2016-12-01/api-2.json | 213 +- .../apis/appstream/2016-12-01/docs-2.json | 153 +- .../apis/appstream/2016-12-01/waiters-2.json | 12 +- .../apis/clouddirectory/2016-05-10/api-2.json | 311 +- .../clouddirectory/2016-05-10/docs-2.json | 276 +- .../apis/cloudformation/2010-05-15/api-2.json | 807 +- .../cloudformation/2010-05-15/docs-2.json | 527 +- .../models/apis/ec2/2016-11-15/api-2.json | 24 + .../models/apis/ec2/2016-11-15/docs-2.json | 26 +- .../elasticmapreduce/2009-03-31/api-2.json | 18 +- .../elasticmapreduce/2009-03-31/docs-2.json | 43 +- .../protocol/restxml/build_bench_test.go | 482 +- .../aws/aws-sdk-go/service/appstream/api.go | 991 +- .../appstream/appstreamiface/interface.go | 16 + .../aws-sdk-go/service/appstream/waiters.go | 12 +- .../aws-sdk-go/service/clouddirectory/api.go | 1919 +- .../aws-sdk-go/service/cloudformation/api.go | 3729 +- .../cloudformationiface/interface.go | 52 + .../aws-sdk-go/service/cloudformation/doc.go | 166 + .../service/cloudformation/errors.go | 74 +- .../aws/aws-sdk-go/service/ec2/api.go | 50 + .../aws/aws-sdk-go/service/emr/api.go | 188 +- .../github.com/funcy/functions_go/VERSION | 2 +- .../functions_go/client/call/call_client.go | 30 +- .../get_apps_app_calls_call_parameters.go | 156 + .../call/get_apps_app_calls_call_responses.go | 101 + .../call/get_apps_app_calls_parameters.go | 167 + .../call/get_apps_app_calls_responses.go | 101 + ...lete_apps_app_calls_call_log_parameters.go | 156 + ...elete_apps_app_calls_call_log_responses.go | 138 + .../get_apps_app_calls_call_log_parameters.go | 156 + .../get_apps_app_calls_call_log_responses.go | 101 + .../client/operations/operations_client.go | 28 +- glide.lock | 99 +- vendor/github.com/aws/aws-sdk-go/.gitignore | 11 + .../github.com/aws/aws-sdk-go/.godoc_config | 14 + vendor/github.com/aws/aws-sdk-go/.travis.yml | 23 + vendor/github.com/aws/aws-sdk-go/.yardopts | 7 + vendor/github.com/aws/aws-sdk-go/Gemfile | 6 + vendor/github.com/aws/aws-sdk-go/LICENSE.txt | 202 + vendor/github.com/aws/aws-sdk-go/Makefile | 152 + vendor/github.com/aws/aws-sdk-go/NOTICE.txt | 3 + vendor/github.com/aws/aws-sdk-go/README.md | 116 + .../aws/aws-sdk-go/aws/awserr/error.go | 145 + .../aws/aws-sdk-go/aws/awserr/types.go | 194 + .../aws/aws-sdk-go/aws/awsutil/copy.go | 100 + .../aws/aws-sdk-go/aws/awsutil/copy_test.go | 233 + .../aws/aws-sdk-go/aws/awsutil/equal.go | 27 + .../aws/aws-sdk-go/aws/awsutil/equal_test.go | 29 + .../aws/aws-sdk-go/aws/awsutil/path_value.go | 222 + .../aws-sdk-go/aws/awsutil/path_value_test.go | 142 + .../aws/aws-sdk-go/aws/awsutil/prettify.go | 107 + .../aws-sdk-go/aws/awsutil/string_value.go | 89 + .../aws/aws-sdk-go/aws/client/client.go | 120 + .../aws-sdk-go/aws/client/default_retryer.go | 90 + .../aws/client/metadata/client_info.go | 12 + .../github.com/aws/aws-sdk-go/aws/config.go | 363 + .../aws/aws-sdk-go/aws/config_test.go | 86 + .../aws/aws-sdk-go/aws/convert_types.go | 369 + .../aws/aws-sdk-go/aws/convert_types_test.go | 437 + .../aws-sdk-go/aws/corehandlers/handlers.go | 152 + .../aws/corehandlers/handlers_test.go | 192 + .../aws/corehandlers/param_validator.go | 17 + .../aws/corehandlers/param_validator_test.go | 254 + .../aws/credentials/chain_provider.go | 100 + .../aws/credentials/chain_provider_test.go | 154 + .../aws-sdk-go/aws/credentials/credentials.go | 223 + .../aws/credentials/credentials_test.go | 73 + .../ec2rolecreds/ec2_role_provider.go | 178 + .../ec2rolecreds/ec2_role_provider_test.go | 159 + .../aws/credentials/env_provider.go | 77 + .../aws/credentials/env_provider_test.go | 70 + .../aws-sdk-go/aws/credentials/example.ini | 12 + .../shared_credentials_provider.go | 151 + .../shared_credentials_provider_test.go | 116 + .../aws/credentials/static_provider.go | 48 + .../aws/credentials/static_provider_test.go | 34 + .../stscreds/assume_role_provider.go | 161 + .../stscreds/assume_role_provider_test.go | 56 + .../aws/aws-sdk-go/aws/defaults/defaults.go | 98 + .../aws/aws-sdk-go/aws/ec2metadata/api.go | 140 + .../aws-sdk-go/aws/ec2metadata/api_test.go | 195 + .../aws/aws-sdk-go/aws/ec2metadata/service.go | 124 + .../aws/ec2metadata/service_test.go | 79 + .../github.com/aws/aws-sdk-go/aws/errors.go | 17 + .../github.com/aws/aws-sdk-go/aws/logger.go | 112 + .../aws/aws-sdk-go/aws/request/handlers.go | 187 + .../aws-sdk-go/aws/request/handlers_test.go | 87 + .../aws-sdk-go/aws/request/http_request.go | 33 + .../aws/request/http_request_1_4.go | 31 + .../aws/request/http_request_copy_test.go | 34 + .../aws/request/http_request_retry_test.go | 37 + .../aws-sdk-go/aws/request/offset_reader.go | 49 + .../aws/request/offset_reader_test.go | 122 + .../aws/aws-sdk-go/aws/request/request.go | 330 + .../aws/request/request_1_6_test.go | 33 + .../aws/request/request_pagination.go | 104 + .../aws/request/request_pagination_test.go | 455 + .../aws-sdk-go/aws/request/request_test.go | 380 + .../aws/aws-sdk-go/aws/request/retryer.go | 101 + .../aws-sdk-go/aws/request/retryer_test.go | 16 + .../aws/aws-sdk-go/aws/request/validation.go | 234 + .../aws/aws-sdk-go/aws/session/session.go | 120 + .../aws-sdk-go/aws/session/session_test.go | 20 + .../aws/signer/v4/functional_test.go | 77 + .../aws-sdk-go/aws/signer/v4/header_rules.go | 82 + .../aws/signer/v4/header_rules_test.go | 57 + .../aws/aws-sdk-go/aws/signer/v4/v4.go | 644 + .../aws/aws-sdk-go/aws/signer/v4/v4_test.go | 345 + vendor/github.com/aws/aws-sdk-go/aws/types.go | 106 + .../aws/aws-sdk-go/aws/types_test.go | 75 + .../github.com/aws/aws-sdk-go/aws/version.go | 8 + .../awsmigrate-renamer/Godeps/Godeps.json | 19 + .../awsmigrate-renamer/Godeps/Readme | 5 + .../awsmigrate/awsmigrate-renamer/gen/gen.go | 200 + .../awsmigrate-renamer/rename/rename.go | 116 + .../awsmigrate-renamer/rename/renames.go | 2120 + .../awsmigrate/awsmigrate-renamer/renamer.go | 45 + .../aws/aws-sdk-go/awstesting/assert.go | 130 + .../aws/aws-sdk-go/awstesting/assert_test.go | 64 + .../aws/aws-sdk-go/awstesting/client.go | 42 + .../customizations/s3/integration_test.go | 124 + .../s3/s3manager/integration_test.go | 163 + .../customizations/s3/s3manager/stub.go | 1 + .../integration/customizations/s3/stub.go | 1 + .../awstesting/integration/integration.go | 44 + .../integration/smoke/acm/acm.feature | 14 + .../integration/smoke/acm/client.go | 16 + .../smoke/apigateway/apigateway.feature | 16 + .../integration/smoke/apigateway/client.go | 16 + .../applicationdiscoveryservice.feature | 8 + .../applicationdiscoveryservice/client.go | 17 + .../smoke/autoscaling/autoscaling.feature | 18 + .../integration/smoke/autoscaling/client.go | 16 + .../smoke/cloudformation/client.go | 16 + .../cloudformation/cloudformation.feature | 17 + .../integration/smoke/cloudfront/client.go | 16 + .../smoke/cloudfront/cloudfront.feature | 17 + .../integration/smoke/cloudhsm/client.go | 16 + .../smoke/cloudhsm/cloudhsm.feature | 16 + .../integration/smoke/cloudsearch/client.go | 16 + .../smoke/cloudsearch/cloudsearch.feature | 16 + .../integration/smoke/cloudtrail/client.go | 16 + .../smoke/cloudtrail/cloudtrail.feature | 16 + .../integration/smoke/cloudwatch/client.go | 16 + .../smoke/cloudwatch/cloudwatch.feature | 19 + .../smoke/cloudwatchlogs/client.go | 16 + .../cloudwatchlogs/cloudwatchlogs.feature | 17 + .../integration/smoke/codecommit/client.go | 16 + .../smoke/codecommit/codecommit.feature | 16 + .../integration/smoke/codedeploy/client.go | 16 + .../smoke/codedeploy/codedeploy.feature | 16 + .../integration/smoke/codepipeline/client.go | 16 + .../smoke/codepipeline/codepipeline.feature | 16 + .../smoke/cognitoidentity/client.go | 16 + .../cognitoidentity/cognitoidentity.feature | 19 + .../integration/smoke/cognitosync/client.go | 16 + .../smoke/cognitosync/cognitosync.feature | 16 + .../integration/smoke/configservice/client.go | 16 + .../smoke/configservice/configservice.feature | 17 + .../integration/smoke/datapipeline/client.go | 16 + .../smoke/datapipeline/datapipeline.feature | 16 + .../integration/smoke/devicefarm/client.go | 19 + .../smoke/devicefarm/devicefarm.feature | 16 + .../integration/smoke/directconnect/client.go | 16 + .../smoke/directconnect/directconnect.feature | 16 + .../smoke/directoryservice/client.go | 16 + .../directoryservice/directoryservice.feature | 17 + .../integration/smoke/dynamodb/client.go | 16 + .../smoke/dynamodb/dynamodb.feature | 19 + .../smoke/dynamodbstreams/client.go | 16 + .../dynamodbstreams/dynamodbstreams.feature | 16 + .../integration/smoke/ec2/client.go | 16 + .../integration/smoke/ec2/ec2.feature | 18 + .../integration/smoke/ecs/client.go | 19 + .../integration/smoke/ecs/ecs.feature | 14 + .../integration/smoke/efs/client.go | 19 + .../integration/smoke/efs/efs.feature | 14 + .../integration/smoke/elasticache/client.go | 16 + .../smoke/elasticache/elasticache.feature | 16 + .../smoke/elasticbeanstalk/client.go | 16 + .../elasticbeanstalk/elasticbeanstalk.feature | 16 + .../smoke/elasticloadbalancing/client.go | 16 + .../elasticloadbalancing.feature | 18 + .../smoke/elastictranscoder/client.go | 16 + .../elastictranscoder.feature | 16 + .../integration/smoke/emr/client.go | 16 + .../integration/smoke/emr/emr.feature | 16 + .../awstesting/integration/smoke/es/client.go | 16 + .../integration/smoke/es/es.feature | 16 + .../integration/smoke/glacier/client.go | 16 + .../integration/smoke/glacier/glacier.feature | 16 + .../integration/smoke/iam/client.go | 16 + .../integration/smoke/iam/iam.feature | 16 + .../integration/smoke/iotdataplane/client.go | 26 + .../smoke/iotdataplane/iotdataplane.feature | 12 + .../integration/smoke/kinesis/client.go | 16 + .../integration/smoke/kinesis/kinesis.feature | 16 + .../integration/smoke/kms/client.go | 16 + .../integration/smoke/kms/kms.feature | 13 + .../integration/smoke/lambda/client.go | 16 + .../integration/smoke/lambda/lambda.feature | 16 + .../smoke/machinelearning/client.go | 16 + .../machinelearning/machinelearning.feature | 18 + .../integration/smoke/opsworks/client.go | 16 + .../smoke/opsworks/opsworks.feature | 16 + .../integration/smoke/rds/client.go | 16 + .../integration/smoke/rds/rds.feature | 16 + .../integration/smoke/redshift/client.go | 16 + .../smoke/redshift/redshift.feature | 16 + .../integration/smoke/route53/client.go | 16 + .../integration/smoke/route53/route53.feature | 16 + .../smoke/route53domains/client.go | 16 + .../route53domains/route53domains.feature | 16 + .../integration/smoke/ses/client.go | 16 + .../integration/smoke/ses/ses.feature | 16 + .../awstesting/integration/smoke/shared.go | 230 + .../integration/smoke/simpledb/client.go | 16 + .../smoke/simpledb/simpledb.feature | 24 + .../integration/smoke/sns/client.go | 16 + .../integration/smoke/sns/sns.feature | 14 + .../integration/smoke/sqs/client.go | 16 + .../integration/smoke/sqs/sqs.feature | 16 + .../integration/smoke/ssm/client.go | 16 + .../integration/smoke/ssm/ssm.feature | 16 + .../smoke/storagegateway/client.go | 16 + .../storagegateway/storagegateway.feature | 16 + .../integration/smoke/sts/client.go | 16 + .../integration/smoke/sts/sts.feature | 17 + .../integration/smoke/support/client.go | 16 + .../integration/smoke/support/support.feature | 22 + .../integration/smoke/swf/client.go | 16 + .../integration/smoke/swf/swf.feature | 17 + .../integration/smoke/waf/client.go | 16 + .../integration/smoke/waf/waf.feature | 20 + .../integration/smoke/workspaces/client.go | 16 + .../smoke/workspaces/workspaces.feature | 18 + .../aws/aws-sdk-go/awstesting/mock/server.go | 20 + .../awstesting/performance/benchmarks.go | 122 + .../awstesting/performance/client.go | 13 + .../awstesting/performance/clients.feature | 17 + .../awstesting/performance/clients.go | 137 + .../aws-sdk-go/awstesting/performance/init.go | 93 + .../awstesting/performance/logging.go | 122 + .../awstesting/performance/streaming.feature | 26 + .../awstesting/sandbox/Dockerfile.golang-tip | 42 + .../awstesting/sandbox/Dockerfile.test.go1.4 | 7 + .../awstesting/sandbox/Dockerfile.test.go1.5 | 9 + .../sandbox/Dockerfile.test.go1.5-novendorexp | 7 + .../awstesting/sandbox/Dockerfile.test.go1.6 | 7 + .../awstesting/sandbox/Dockerfile.test.go1.7 | 7 + .../awstesting/sandbox/Dockerfile.test.gotip | 7 + .../aws/aws-sdk-go/awstesting/unit/unit.go | 13 + .../aws/aws-sdk-go/awstesting/util.go | 67 + .../aws/aws-sdk-go/awstesting/util_test.go | 49 + .../aws-godoc/templates/callgraph.html | 15 + .../doc-src/aws-godoc/templates/codewalk.html | 56 + .../aws-godoc/templates/codewalkdir.html | 16 + .../doc-src/aws-godoc/templates/dirlist.html | 31 + .../doc-src/aws-godoc/templates/error.html | 9 + .../doc-src/aws-godoc/templates/example.html | 30 + .../doc-src/aws-godoc/templates/godoc.html | 138 + .../doc-src/aws-godoc/templates/godocs.js | 571 + .../aws-godoc/templates/implements.html | 9 + .../doc-src/aws-godoc/templates/jquery.js | 2 + .../aws-godoc/templates/jquery.treeview.css | 76 + .../templates/jquery.treeview.edit.js | 39 + .../aws-godoc/templates/jquery.treeview.js | 256 + .../aws-godoc/templates/methodset.html | 9 + .../aws-godoc/templates/opensearch.xml | 11 + .../doc-src/aws-godoc/templates/package.txt | 116 + .../aws-godoc/templates/package_default.html | 245 + .../aws-godoc/templates/package_service.html | 262 + .../doc-src/aws-godoc/templates/pkglist.html | 24 + .../doc-src/aws-godoc/templates/search.html | 18 + .../doc-src/aws-godoc/templates/search.txt | 54 + .../aws-godoc/templates/searchcode.html | 64 + .../aws-godoc/templates/searchdoc.html | 24 + .../aws-godoc/templates/searchtxt.html | 42 + .../doc-src/aws-godoc/templates/style.css | 1033 + .../templates/user_guide_example.html | 82 + .../aws/aws-sdk-go/doc-src/plugin/plugin.rb | 187 + .../templates/default/layout/html/footer.erb | 31 + .../templates/default/module/html/client.erb | 4 + .../default/module/html/item_summary.erb | 28 + .../templates/default/module/html/setup.rb | 9 + .../templates/default/package/html/setup.rb | 8 + .../default/struct/html/paginators.erb | 4 + .../default/struct/html/request_methods.erb | 4 + .../templates/default/struct/html/setup.rb | 20 + .../service/cloudfront/signCookies/README.md | 12 + .../cloudfront/signCookies/signCookies.go | 77 + .../example/service/s3/listObjects/README.md | 27 + .../service/s3/listObjects/listObjects.go | 36 + .../s3/listObjectsConcurrently/README.md | 13 + .../listObjectsConcurrently.go | 228 + .../models/apis/acm/2015-12-08/api-2.json | 495 + .../models/apis/acm/2015-12-08/docs-2.json | 335 + .../apis/acm/2015-12-08/examples-1.json | 5 + .../apis/acm/2015-12-08/paginators-1.json | 10 + .../apis/apigateway/2015-07-09/api-2.json | 3347 ++ .../apis/apigateway/2015-07-09/docs-2.json | 1315 + .../apigateway/2015-07-09/examples-1.json | 5 + .../apigateway/2015-07-09/paginators-1.json | 52 + .../2016-02-06/api-2.json | 502 + .../2016-02-06/docs-2.json | 362 + .../2016-02-06/examples-1.json | 5 + .../2016-02-06/paginators-1.json | 22 + .../apis/autoscaling/2011-01-01/api-2.json | 2038 + .../apis/autoscaling/2011-01-01/docs-2.json | 1410 + .../autoscaling/2011-01-01/examples-1.json | 5 + .../autoscaling/2011-01-01/paginators-1.json | 52 + .../autoscaling/2011-01-01/waiters-2.json | 62 + .../apis/cloudformation/2010-05-15/api-2.json | 1299 + .../cloudformation/2010-05-15/docs-2.json | 1049 + .../cloudformation/2010-05-15/examples-1.json | 5 + .../2010-05-15/paginators-1.json | 27 + .../cloudformation/2010-05-15/waiters-2.json | 235 + .../apis/cloudfront/2015-04-17/api-2.json | 2651 ++ .../apis/cloudfront/2015-04-17/docs-2.json | 1141 + .../cloudfront/2015-04-17/paginators-1.json | 32 + .../apis/cloudfront/2015-04-17/waiters-2.json | 47 + .../apis/cloudfront/2015-07-27/api-2.json | 2721 ++ .../apis/cloudfront/2015-07-27/docs-2.json | 1164 + .../cloudfront/2015-07-27/paginators-1.json | 32 + .../apis/cloudfront/2015-07-27/waiters-2.json | 47 + .../apis/cloudfront/2015-09-17/api-2.json | 2150 + .../apis/cloudfront/2015-09-17/docs-2.json | 1173 + .../cloudfront/2015-09-17/examples-1.json | 5 + .../cloudfront/2015-09-17/paginators-1.json | 32 + .../apis/cloudfront/2015-09-17/waiters-2.json | 47 + .../apis/cloudfront/2016-01-13/api-2.json | 2216 + .../apis/cloudfront/2016-01-13/docs-2.json | 1219 + .../cloudfront/2016-01-13/examples-1.json | 5 + .../cloudfront/2016-01-13/paginators-1.json | 32 + .../apis/cloudfront/2016-01-13/waiters-2.json | 47 + .../apis/cloudfront/2016-01-28/api-2.json | 2218 ++ .../apis/cloudfront/2016-01-28/docs-2.json | 1220 + .../cloudfront/2016-01-28/examples-1.json | 5 + .../cloudfront/2016-01-28/paginators-1.json | 32 + .../apis/cloudfront/2016-01-28/waiters-2.json | 47 + .../apis/cloudhsm/2014-05-30/api-2.json | 877 + .../apis/cloudhsm/2014-05-30/docs-2.json | 543 + .../apis/cloudhsm/2014-05-30/examples-1.json | 5 + .../apis/cloudsearch/2013-01-01/api-2.json | 2001 + .../apis/cloudsearch/2013-01-01/docs-2.json | 865 + .../cloudsearch/2013-01-01/paginators-1.json | 20 + .../cloudsearchdomain/2013-01-01/api-2.json | 373 + .../cloudsearchdomain/2013-01-01/docs-2.json | 343 + .../2013-01-01/examples-1.json | 5 + .../apis/cloudtrail/2013-11-01/api-2.json | 801 + .../apis/cloudtrail/2013-11-01/docs-2.json | 548 + .../cloudtrail/2013-11-01/examples-1.json | 5 + .../cloudtrail/2013-11-01/paginators-1.json | 7 + .../apis/codecommit/2015-04-13/api-2.json | 916 + .../apis/codecommit/2015-04-13/docs-2.json | 632 + .../codecommit/2015-04-13/examples-1.json | 5 + .../codecommit/2015-04-13/paginators-1.json | 14 + .../apis/codedeploy/2014-10-06/api-2.json | 1954 + .../apis/codedeploy/2014-10-06/docs-2.json | 1390 + .../codedeploy/2014-10-06/examples-1.json | 5 + .../codedeploy/2014-10-06/paginators-1.json | 34 + .../apis/codepipeline/2015-07-09/api-2.json | 1633 + .../apis/codepipeline/2015-07-09/docs-2.json | 1194 + .../codepipeline/2015-07-09/examples-1.json | 902 + .../cognito-identity/2014-06-30/api-2.json | 859 + .../cognito-identity/2014-06-30/docs-2.json | 546 + .../2014-06-30/examples-1.json | 5 + .../apis/cognito-idp/2016-04-18/api-2.json | 1657 + .../apis/cognito-idp/2016-04-18/docs-2.json | 980 + .../cognito-idp/2016-04-18/examples-1.json | 5 + .../apis/cognito-sync/2014-06-30/api-2.json | 1874 + .../apis/cognito-sync/2014-06-30/docs-2.json | 588 + .../models/apis/config/2014-11-12/api-2.json | 1303 + .../models/apis/config/2014-11-12/docs-2.json | 1057 + .../apis/config/2014-11-12/examples-1.json | 5 + .../apis/config/2014-11-12/paginators-1.json | 10 + .../apis/datapipeline/2012-10-29/api-2.json | 1167 + .../apis/datapipeline/2012-10-29/docs-2.json | 607 + .../datapipeline/2012-10-29/paginators-1.json | 26 + .../apis/devicefarm/2015-06-23/api-2.json | 1819 + .../apis/devicefarm/2015-06-23/docs-2.json | 1194 + .../devicefarm/2015-06-23/examples-1.json | 5 + .../devicefarm/2015-06-23/paginators-1.json | 74 + .../apis/directconnect/2012-10-25/api-2.json | 793 + .../apis/directconnect/2012-10-25/docs-2.json | 554 + .../directconnect/2012-10-25/examples-1.json | 5 + .../2012-10-25/paginators-1.json | 22 + .../apis/discovery/2015-11-01/api-2.json | 556 + .../apis/discovery/2015-11-01/docs-2.json | 447 + .../apis/discovery/2015-11-01/examples-1.json | 5 + .../models/apis/dms/2016-01-01/api-2.json | 1325 + .../models/apis/dms/2016-01-01/docs-2.json | 894 + .../apis/dms/2016-01-01/examples-1.json | 5 + .../models/apis/ds/2015-04-16/api-2.json | 1674 + .../models/apis/ds/2015-04-16/docs-2.json | 1152 + .../models/apis/ds/2015-04-16/examples-1.json | 5 + .../apis/dynamodb/2011-12-05/api-2.json | 801 + .../apis/dynamodb/2011-12-05/docs-2.json | 606 + .../apis/dynamodb/2011-12-05/examples-1.json | 5 + .../dynamodb/2011-12-05/paginators-1.json | 26 + .../apis/dynamodb/2011-12-05/waiters-2.json | 35 + .../apis/dynamodb/2012-08-10/api-2.json | 1200 + .../apis/dynamodb/2012-08-10/docs-2.json | 1028 + .../apis/dynamodb/2012-08-10/examples-1.json | 5 + .../dynamodb/2012-08-10/paginators-1.json | 26 + .../apis/dynamodb/2012-08-10/waiters-2.json | 35 + .../models/apis/ec2/2015-04-15/api-2.json | 12049 ++++++ .../models/apis/ec2/2015-04-15/docs-2.json | 5495 +++ .../apis/ec2/2015-04-15/paginators-1.json | 125 + .../models/apis/ec2/2015-04-15/waiters-2.json | 494 + .../models/apis/ec2/2015-10-01/api-2.json | 13759 +++++++ .../models/apis/ec2/2015-10-01/docs-2.json | 6382 +++ .../apis/ec2/2015-10-01/examples-1.json | 5 + .../apis/ec2/2015-10-01/paginators-1.json | 138 + .../models/apis/ec2/2015-10-01/waiters-2.json | 593 + .../models/apis/ec2/2016-04-01/api-2.json | 13842 +++++++ .../models/apis/ec2/2016-04-01/docs-2.json | 6410 +++ .../apis/ec2/2016-04-01/examples-1.json | 5 + .../apis/ec2/2016-04-01/paginators-1.json | 138 + .../models/apis/ec2/2016-04-01/waiters-2.json | 593 + .../models/apis/ecr/2015-09-21/api-2.json | 849 + .../models/apis/ecr/2015-09-21/docs-2.json | 620 + .../apis/ecr/2015-09-21/examples-1.json | 5 + .../models/apis/ecs/2014-11-13/api-2.json | 1393 + .../models/apis/ecs/2014-11-13/docs-2.json | 1002 + .../apis/ecs/2014-11-13/examples-1.json | 5 + .../apis/ecs/2014-11-13/paginators-1.json | 40 + .../models/apis/ecs/2014-11-13/waiters-2.json | 93 + .../apis/elasticache/2015-02-02/api-2.json | 2426 ++ .../apis/elasticache/2015-02-02/docs-2.json | 1421 + .../elasticache/2015-02-02/examples-1.json | 5 + .../elasticache/2015-02-02/paginators-1.json | 76 + .../elasticache/2015-02-02/waiters-2.json | 143 + .../elasticbeanstalk/2010-12-01/api-2.json | 1894 + .../elasticbeanstalk/2010-12-01/docs-2.json | 1528 + .../2010-12-01/examples-1.json | 1109 + .../2010-12-01/paginators-1.json | 25 + .../elasticfilesystem/2015-02-01/api-2.json | 713 + .../elasticfilesystem/2015-02-01/docs-2.json | 422 + .../2015-02-01/examples-1.json | 5 + .../2012-06-01/api-2.json | 2145 + .../2012-06-01/docs-2.json | 1078 + .../2012-06-01/paginators-1.json | 18 + .../2012-06-01/waiters-2.json | 49 + .../elasticmapreduce/2009-03-31/api-2.json | 1341 + .../elasticmapreduce/2009-03-31/docs-2.json | 1087 + .../2009-03-31/examples-1.json | 5 + .../2009-03-31/paginators-1.json | 32 + .../2009-03-31/waiters-2.json | 67 + .../elastictranscoder/2012-09-25/api-2.json | 1807 + .../elastictranscoder/2012-09-25/docs-2.json | 1152 + .../2012-09-25/paginators-1.json | 24 + .../2012-09-25/waiters-2.json | 30 + .../models/apis/email/2010-12-01/api-2.json | 1791 + .../models/apis/email/2010-12-01/docs-2.json | 1212 + .../apis/email/2010-12-01/examples-1.json | 5 + .../apis/email/2010-12-01/paginators-1.json | 13 + .../apis/email/2010-12-01/waiters-2.json | 18 + .../models/apis/es/2015-01-01/api-2.json | 764 + .../models/apis/es/2015-01-01/docs-2.json | 401 + .../models/apis/events/2014-02-03/api-2.json | 643 + .../models/apis/events/2014-02-03/docs-2.json | 411 + .../apis/events/2014-02-03/examples-1.json | 5 + .../models/apis/events/2015-10-07/api-2.json | 643 + .../models/apis/events/2015-10-07/docs-2.json | 411 + .../apis/events/2015-10-07/examples-1.json | 5 + .../apis/firehose/2015-08-04/api-2.json | 719 + .../apis/firehose/2015-08-04/docs-2.json | 576 + .../apis/gamelift/2015-10-01/api-2.json | 2307 ++ .../apis/gamelift/2015-10-01/docs-2.json | 1084 + .../apis/gamelift/2015-10-01/examples-1.json | 5 + .../models/apis/glacier/2012-06-01/api-2.json | 2144 + .../apis/glacier/2012-06-01/docs-2.json | 685 + .../apis/glacier/2012-06-01/paginators-1.json | 28 + .../apis/glacier/2012-06-01/waiters-2.json | 39 + .../models/apis/iam/2010-05-08/api-2.json | 4514 +++ .../models/apis/iam/2010-05-08/docs-2.json | 2523 ++ .../apis/iam/2010-05-08/examples-1.json | 5 + .../apis/iam/2010-05-08/paginators-1.json | 198 + .../models/apis/iam/2010-05-08/waiters-2.json | 39 + .../apis/importexport/2010-06-01/api-2.json | 666 + .../apis/importexport/2010-06-01/docs-2.json | 482 + .../importexport/2010-06-01/paginators-1.json | 11 + .../apis/inspector/2015-08-18/api-2.json | 1426 + .../apis/inspector/2015-08-18/docs-2.json | 1016 + .../apis/inspector/2015-08-18/examples-1.json | 5 + .../apis/inspector/2016-02-16/api-2.json | 1964 + .../apis/inspector/2016-02-16/docs-2.json | 1199 + .../apis/inspector/2016-02-16/examples-1.json | 5 + .../apis/iot-data/2015-05-28/api-2.json | 263 + .../apis/iot-data/2015-05-28/docs-2.json | 152 + .../apis/iot-data/2015-05-28/examples-1.json | 5 + .../models/apis/iot/2015-05-28/api-2.json | 3800 ++ .../models/apis/iot/2015-05-28/docs-2.json | 1422 + .../apis/iot/2015-05-28/examples-1.json | 5 + .../models/apis/kinesis/2013-12-02/api-2.json | 822 + .../apis/kinesis/2013-12-02/docs-2.json | 506 + .../apis/kinesis/2013-12-02/examples-1.json | 5 + .../apis/kinesis/2013-12-02/paginators-1.json | 18 + .../apis/kinesis/2013-12-02/waiters-2.json | 18 + .../models/apis/kms/2014-11-01/api-2.json | 1209 + .../models/apis/kms/2014-11-01/docs-2.json | 701 + .../apis/kms/2014-11-01/examples-1.json | 5 + .../apis/kms/2014-11-01/paginators-1.json | 32 + .../models/apis/lambda/2014-11-11/api-2.json | 667 + .../models/apis/lambda/2014-11-11/docs-2.json | 303 + .../apis/lambda/2014-11-11/paginators-1.json | 16 + .../models/apis/lambda/2015-03-31/api-2.json | 1342 + .../models/apis/lambda/2015-03-31/docs-2.json | 716 + .../apis/lambda/2015-03-31/examples-1.json | 5 + .../apis/lambda/2015-03-31/paginators-1.json | 16 + .../models/apis/logs/2014-03-28/api-2.json | 1138 + .../models/apis/logs/2014-03-28/docs-2.json | 789 + .../apis/logs/2014-03-28/examples-1.json | 5 + .../apis/logs/2014-03-28/paginators-1.json | 49 + .../machinelearning/2014-12-12/api-2.json | 1947 + .../machinelearning/2014-12-12/docs-2.json | 1128 + .../2014-12-12/paginators-1.json | 28 + .../machinelearning/2014-12-12/waiters-2.json | 81 + .../2015-07-01/api-2.json | 120 + .../2015-07-01/docs-2.json | 90 + .../2015-07-01/examples-1.json | 5 + .../meteringmarketplace/2016-01-14/api-2.json | 127 + .../2016-01-14/docs-2.json | 102 + .../2016-01-14/examples-1.json | 5 + .../mobileanalytics/2014-06-05/api-2.json | 119 + .../mobileanalytics/2014-06-05/docs-2.json | 98 + .../apis/monitoring/2010-08-01/api-2.json | 790 + .../apis/monitoring/2010-08-01/docs-2.json | 515 + .../monitoring/2010-08-01/examples-1.json | 5 + .../monitoring/2010-08-01/paginators-1.json | 24 + .../apis/monitoring/2010-08-01/waiters-2.json | 18 + .../apis/opsworks/2013-02-18/api-2.json | 2606 ++ .../apis/opsworks/2013-02-18/docs-2.json | 1688 + .../apis/opsworks/2013-02-18/examples-1.json | 5 + .../opsworks/2013-02-18/paginators-1.json | 55 + .../apis/opsworks/2013-02-18/waiters-2.json | 295 + .../models/apis/rds/2013-01-10/api-2.json | 2901 ++ .../models/apis/rds/2013-01-10/docs-2.json | 1681 + .../apis/rds/2013-01-10/examples-1.json | 5 + .../apis/rds/2013-01-10/paginators-1.json | 97 + .../models/apis/rds/2013-02-12/api-2.json | 3057 ++ .../models/apis/rds/2013-02-12/docs-2.json | 1796 + .../apis/rds/2013-02-12/examples-1.json | 5 + .../apis/rds/2013-02-12/paginators-1.json | 110 + .../models/apis/rds/2013-09-09/api-2.json | 3158 ++ .../models/apis/rds/2013-09-09/docs-2.json | 1876 + .../apis/rds/2013-09-09/examples-1.json | 5 + .../apis/rds/2013-09-09/paginators-1.json | 110 + .../models/apis/rds/2013-09-09/waiters-2.json | 97 + .../models/apis/rds/2014-09-01/api-2.json | 3271 ++ .../models/apis/rds/2014-09-01/docs-2.json | 1932 + .../apis/rds/2014-09-01/examples-1.json | 5 + .../models/apis/rds/2014-10-31/api-2.json | 4720 +++ .../models/apis/rds/2014-10-31/docs-2.json | 2755 ++ .../apis/rds/2014-10-31/examples-1.json | 5 + .../apis/rds/2014-10-31/paginators-1.json | 110 + .../models/apis/rds/2014-10-31/waiters-2.json | 102 + .../apis/redshift/2012-12-01/api-2.json | 3768 ++ .../apis/redshift/2012-12-01/docs-2.json | 2135 + .../apis/redshift/2012-12-01/examples-1.json | 5 + .../redshift/2012-12-01/paginators-1.json | 94 + .../apis/redshift/2012-12-01/waiters-2.json | 97 + .../models/apis/route53/2013-04-01/api-2.json | 3245 ++ .../apis/route53/2013-04-01/docs-2.json | 1794 + .../apis/route53/2013-04-01/examples-1.json | 5 + .../apis/route53/2013-04-01/paginators-1.json | 33 + .../apis/route53/2013-04-01/waiters-2.json | 18 + .../apis/route53domains/2014-05-15/api-2.json | 1197 + .../route53domains/2014-05-15/docs-2.json | 654 + .../route53domains/2014-05-15/examples-1.json | 5 + .../2014-05-15/paginators-1.json | 17 + .../models/apis/s3/2006-03-01/api-2.json | 4517 +++ .../models/apis/s3/2006-03-01/docs-2.json | 2445 ++ .../models/apis/s3/2006-03-01/examples-1.json | 5 + .../apis/s3/2006-03-01/paginators-1.json | 66 + .../models/apis/s3/2006-03-01/waiters-2.json | 73 + .../models/apis/sdb/2009-04-15/api-2.json | 971 + .../models/apis/sdb/2009-04-15/docs-2.json | 353 + .../apis/sdb/2009-04-15/paginators-1.json | 15 + .../apis/servicecatalog/2015-12-10/api-2.json | 749 + .../servicecatalog/2015-12-10/docs-2.json | 789 + .../servicecatalog/2015-12-10/examples-1.json | 5 + .../models/apis/sns/2010-03-31/api-2.json | 1139 + .../models/apis/sns/2010-03-31/docs-2.json | 658 + .../apis/sns/2010-03-31/paginators-1.json | 29 + .../models/apis/sqs/2012-11-05/api-2.json | 950 + .../models/apis/sqs/2012-11-05/docs-2.json | 504 + .../apis/sqs/2012-11-05/examples-1.json | 44 + .../apis/sqs/2012-11-05/paginators-1.json | 7 + .../models/apis/ssm/2014-11-06/api-2.json | 1742 + .../models/apis/ssm/2014-11-06/docs-2.json | 1251 + .../apis/ssm/2014-11-06/examples-1.json | 5 + .../apis/ssm/2014-11-06/paginators-1.json | 34 + .../apis/storagegateway/2013-06-30/api-2.json | 2275 ++ .../storagegateway/2013-06-30/docs-2.json | 1484 + .../storagegateway/2013-06-30/examples-1.json | 5 + .../2013-06-30/paginators-1.json | 52 + .../streams.dynamodb/2012-08-10/api-2.json | 397 + .../streams.dynamodb/2012-08-10/docs-2.json | 354 + .../2012-08-10/examples-1.json | 5 + .../models/apis/sts/2011-06-15/api-2.json | 521 + .../models/apis/sts/2011-06-15/docs-2.json | 391 + .../apis/sts/2011-06-15/examples-1.json | 5 + .../models/apis/support/2013-04-15/api-2.json | 869 + .../apis/support/2013-04-15/docs-2.json | 680 + .../apis/support/2013-04-15/paginators-1.json | 25 + .../models/apis/swf/2012-01-25/api-2.json | 2838 ++ .../models/apis/swf/2012-01-25/docs-2.json | 1695 + .../apis/swf/2012-01-25/paginators-1.json | 46 + .../models/apis/waf/2015-08-24/api-2.json | 1959 + .../models/apis/waf/2015-08-24/docs-2.json | 1208 + .../apis/waf/2015-08-24/examples-1.json | 5 + .../apis/workspaces/2015-04-08/api-2.json | 632 + .../apis/workspaces/2015-04-08/docs-2.json | 550 + .../workspaces/2015-04-08/examples-1.json | 5 + .../workspaces/2015-04-08/paginators-1.json | 20 + .../models/protocol_tests/generate.go | 432 + .../models/protocol_tests/input/ec2.json | 422 + .../models/protocol_tests/input/json.json | 541 + .../models/protocol_tests/input/query.json | 842 + .../protocol_tests/input/rest-json.json | 1240 + .../models/protocol_tests/input/rest-xml.json | 1633 + .../models/protocol_tests/output/ec2.json | 454 + .../models/protocol_tests/output/json.json | 369 + .../models/protocol_tests/output/query.json | 775 + .../protocol_tests/output/rest-json.json | 608 + .../protocol_tests/output/rest-xml.json | 720 + .../aws/aws-sdk-go/private/README.md | 4 + .../aws-sdk-go/private/endpoints/endpoints.go | 65 + .../private/endpoints/endpoints.json | 75 + .../private/endpoints/endpoints_map.go | 88 + .../private/endpoints/endpoints_test.go | 51 + .../aws/aws-sdk-go/private/model/api/api.go | 485 + .../aws-sdk-go/private/model/api/api_test.go | 44 + .../private/model/api/customization_passes.go | 104 + .../aws-sdk-go/private/model/api/docstring.go | 156 + .../private/model/api/exportable_name.go | 12 + .../aws/aws-sdk-go/private/model/api/load.go | 71 + .../aws-sdk-go/private/model/api/load_test.go | 32 + .../aws-sdk-go/private/model/api/operation.go | 354 + .../private/model/api/pagination.go | 89 + .../private/model/api/param_filler.go | 131 + .../aws-sdk-go/private/model/api/passes.go | 255 + .../aws/aws-sdk-go/private/model/api/shape.go | 505 + .../private/model/api/shape_validation.go | 153 + .../private/model/api/shapetag_test.go | 25 + .../aws-sdk-go/private/model/api/waiters.go | 133 + .../private/model/cli/api-info/api-info.go | 27 + .../private/model/cli/gen-api/main.go | 254 + .../private/model/cli/gen-endpoints/main.go | 47 + .../aws/aws-sdk-go/private/model/endpoints.go | 57 + .../private/protocol/ec2query/build.go | 35 + .../protocol/ec2query/build_bench_test.go | 85 + .../private/protocol/ec2query/build_test.go | 1380 + .../private/protocol/ec2query/unmarshal.go | 63 + .../protocol/ec2query/unmarshal_test.go | 1252 + .../private/protocol/idempotency.go | 75 + .../private/protocol/idempotency_test.go | 106 + .../private/protocol/json/jsonutil/build.go | 254 + .../protocol/json/jsonutil/build_test.go | 100 + .../protocol/json/jsonutil/unmarshal.go | 213 + .../protocol/jsonrpc/build_bench_test.go | 71 + .../private/protocol/jsonrpc/build_test.go | 1639 + .../private/protocol/jsonrpc/jsonrpc.go | 111 + .../protocol/jsonrpc/unmarshal_test.go | 967 + .../private/protocol/protocol_test.go | 203 + .../private/protocol/query/build.go | 36 + .../private/protocol/query/build_test.go | 2682 ++ .../protocol/query/queryutil/queryutil.go | 230 + .../private/protocol/query/unmarshal.go | 35 + .../private/protocol/query/unmarshal_error.go | 66 + .../private/protocol/query/unmarshal_test.go | 2068 + .../aws-sdk-go/private/protocol/rest/build.go | 256 + .../private/protocol/rest/payload.go | 45 + .../private/protocol/rest/unmarshal.go | 198 + .../protocol/restjson/build_bench_test.go | 356 + .../private/protocol/restjson/build_test.go | 3551 ++ .../private/protocol/restjson/restjson.go | 92 + .../protocol/restjson/unmarshal_test.go | 1560 + .../protocol/restxml/build_bench_test.go | 246 + .../private/protocol/restxml/build_test.go | 4443 +++ .../private/protocol/restxml/restxml.go | 69 + .../protocol/restxml/unmarshal_test.go | 1778 + .../aws-sdk-go/private/protocol/unmarshal.go | 21 + .../private/protocol/unmarshal_test.go | 40 + .../private/protocol/xml/xmlutil/build.go | 293 + .../private/protocol/xml/xmlutil/unmarshal.go | 260 + .../protocol/xml/xmlutil/xml_to_struct.go | 105 + .../aws/aws-sdk-go/private/signer/v2/v2.go | 180 + .../aws-sdk-go/private/signer/v2/v2_test.go | 195 + .../aws/aws-sdk-go/private/util/sort_keys.go | 14 + .../aws/aws-sdk-go/private/util/util.go | 109 + .../aws/aws-sdk-go/private/waiter/waiter.go | 134 + .../aws-sdk-go/private/waiter/waiter_test.go | 401 + vendor/github.com/aws/aws-sdk-go/sdk.go | 7 + .../service/acm/acmiface/interface.go | 52 + .../aws/aws-sdk-go/service/acm/api.go | 1452 + .../aws-sdk-go/service/acm/examples_test.go | 220 + .../aws/aws-sdk-go/service/acm/service.go | 95 + .../aws/aws-sdk-go/service/apigateway/api.go | 8608 ++++ .../apigateway/apigatewayiface/interface.go | 338 + .../service/apigateway/customization.go | 14 + .../service/apigateway/examples_test.go | 1837 + .../aws-sdk-go/service/apigateway/service.go | 90 + .../service/applicationautoscaling/api.go | 1450 + .../applicationautoscalingiface/interface.go | 48 + .../applicationautoscaling/examples_test.go | 196 + .../service/applicationautoscaling/service.go | 112 + .../applicationdiscoveryservice/api.go | 1430 + .../interface.go | 54 + .../examples_test.go | 267 + .../applicationdiscoveryservice/service.go | 282 + .../aws/aws-sdk-go/service/autoscaling/api.go | 7179 ++++ .../autoscaling/autoscalingiface/interface.go | 226 + .../service/autoscaling/examples_test.go | 1209 + .../aws-sdk-go/service/autoscaling/service.go | 88 + .../aws-sdk-go/service/autoscaling/waiters.go | 94 + .../aws-sdk-go/service/cloudformation/api.go | 4172 ++ .../cloudformationiface/interface.go | 118 + .../service/cloudformation/examples_test.go | 609 + .../service/cloudformation/service.go | 103 + .../service/cloudformation/waiters.go | 279 + .../aws/aws-sdk-go/service/cloudfront/api.go | 4951 +++ .../cloudfront/cloudfrontiface/interface.go | 110 + .../service/cloudfront/examples_test.go | 917 + .../aws-sdk-go/service/cloudfront/service.go | 86 + .../service/cloudfront/sign/policy.go | 226 + .../service/cloudfront/sign/policy_test.go | 139 + .../service/cloudfront/sign/privkey.go | 68 + .../service/cloudfront/sign/privkey_test.go | 90 + .../service/cloudfront/sign/randomreader.go | 30 + .../service/cloudfront/sign/sign_cookie.go | 241 + .../sign/sign_cookie_example_test.go | 163 + .../cloudfront/sign/sign_cookie_test.go | 83 + .../service/cloudfront/sign/sign_url.go | 205 + .../service/cloudfront/sign/sign_url_test.go | 149 + .../aws-sdk-go/service/cloudfront/waiters.go | 76 + .../aws/aws-sdk-go/service/cloudhsm/api.go | 2226 ++ .../cloudhsm/cloudhsmiface/interface.go | 94 + .../service/cloudhsm/examples_test.go | 431 + .../aws-sdk-go/service/cloudhsm/service.go | 87 + .../aws/aws-sdk-go/service/cloudsearch/api.go | 4103 ++ .../cloudsearch/cloudsearchiface/interface.go | 110 + .../service/cloudsearch/examples_test.go | 616 + .../aws-sdk-go/service/cloudsearch/service.go | 94 + .../service/cloudsearchdomain/api.go | 964 + .../cloudsearchdomainiface/interface.go | 26 + .../cloudsearchdomain/customizations_test.go | 50 + .../cloudsearchdomain/examples_test.go | 89 + .../service/cloudsearchdomain/service.go | 96 + .../aws/aws-sdk-go/service/cloudtrail/api.go | 1920 + .../cloudtrail/cloudtrailiface/interface.go | 62 + .../service/cloudtrail/examples_test.go | 296 + .../aws-sdk-go/service/cloudtrail/service.go | 107 + .../aws/aws-sdk-go/service/cloudwatch/api.go | 2123 + .../cloudwatch/cloudwatchiface/interface.go | 64 + .../service/cloudwatch/examples_test.go | 337 + .../aws-sdk-go/service/cloudwatch/service.go | 100 + .../aws-sdk-go/service/cloudwatch/waiters.go | 30 + .../service/cloudwatchevents/api.go | 1622 + .../cloudwatcheventsiface/interface.go | 62 + .../service/cloudwatchevents/examples_test.go | 281 + .../service/cloudwatchevents/service.go | 101 + .../aws-sdk-go/service/cloudwatchlogs/api.go | 3785 ++ .../cloudwatchlogsiface/interface.go | 128 + .../service/cloudwatchlogs/examples_test.go | 566 + .../service/cloudwatchlogs/service.go | 116 + .../aws/aws-sdk-go/service/codecommit/api.go | 1909 + .../codecommit/codecommitiface/interface.go | 78 + .../service/codecommit/examples_test.go | 347 + .../aws-sdk-go/service/codecommit/service.go | 114 + .../aws/aws-sdk-go/service/codedeploy/api.go | 4927 +++ .../codedeploy/codedeployiface/interface.go | 166 + .../service/codedeploy/examples_test.go | 891 + .../aws-sdk-go/service/codedeploy/service.go | 135 + .../aws-sdk-go/service/codepipeline/api.go | 4535 +++ .../codepipelineiface/interface.go | 114 + .../service/codepipeline/examples_test.go | 707 + .../service/codepipeline/service.go | 192 + .../aws-sdk-go/service/cognitoidentity/api.go | 2281 ++ .../cognitoidentityiface/interface.go | 86 + .../service/cognitoidentity/customizations.go | 12 + .../cognitoidentity/customizations_test.go | 42 + .../service/cognitoidentity/examples_test.go | 450 + .../service/cognitoidentity/service.go | 119 + .../service/cognitoidentityprovider/api.go | 4546 +++ .../cognitoidentityprovideriface/interface.go | 150 + .../cognitoidentityprovider/examples_test.go | 829 + .../cognitoidentityprovider/service.go | 93 + .../aws/aws-sdk-go/service/cognitosync/api.go | 2436 ++ .../cognitosync/cognitosynciface/interface.go | 82 + .../service/cognitosync/examples_test.go | 394 + .../aws-sdk-go/service/cognitosync/service.go | 103 + .../aws-sdk-go/service/configservice/api.go | 3845 ++ .../configserviceiface/interface.go | 112 + .../service/configservice/examples_test.go | 590 + .../service/configservice/service.go | 111 + .../service/databasemigrationservice/api.go | 3957 ++ .../interface.go | 130 + .../databasemigrationservice/examples_test.go | 739 + .../databasemigrationservice/service.go | 93 + .../aws-sdk-go/service/datapipeline/api.go | 2976 ++ .../datapipelineiface/interface.go | 96 + .../service/datapipeline/examples_test.go | 530 + .../service/datapipeline/service.go | 109 + .../aws/aws-sdk-go/service/devicefarm/api.go | 6288 +++ .../devicefarm/devicefarmiface/interface.go | 190 + .../service/devicefarm/examples_test.go | 790 + .../aws-sdk-go/service/devicefarm/service.go | 90 + .../aws-sdk-go/service/directconnect/api.go | 3122 ++ .../directconnectiface/interface.go | 98 + .../service/directconnect/examples_test.go | 472 + .../service/directconnect/service.go | 99 + .../service/directoryservice/api.go | 4253 ++ .../directoryserviceiface/interface.go | 142 + .../service/directoryservice/examples_test.go | 761 + .../service/directoryservice/service.go | 90 + .../aws/aws-sdk-go/service/dynamodb/api.go | 6686 ++++ .../service/dynamodb/customizations.go | 98 + .../service/dynamodb/customizations_test.go | 106 + .../dynamodb/dynamodbattribute/converter.go | 443 + .../converter_examples_test.go | 80 + .../dynamodbattribute/converter_test.go | 498 + .../dynamodb/dynamodbattribute/decode.go | 661 + .../dynamodb/dynamodbattribute/decode_test.go | 394 + .../service/dynamodb/dynamodbattribute/doc.go | 60 + .../dynamodb/dynamodbattribute/encode.go | 557 + .../dynamodb/dynamodbattribute/encode_test.go | 126 + .../dynamodb/dynamodbattribute/field.go | 269 + .../dynamodb/dynamodbattribute/field_test.go | 110 + .../marshaler_examples_test.go | 104 + .../dynamodbattribute/marshaler_test.go | 526 + .../dynamodb/dynamodbattribute/shared_test.go | 389 + .../service/dynamodb/dynamodbattribute/tag.go | 65 + .../dynamodb/dynamodbattribute/tag_test.go | 45 + .../dynamodb/dynamodbiface/interface.go | 78 + .../service/dynamodb/examples_test.go | 1353 + .../aws-sdk-go/service/dynamodb/service.go | 202 + .../aws-sdk-go/service/dynamodb/waiters.go | 59 + .../aws-sdk-go/service/dynamodbstreams/api.go | 838 + .../dynamodbstreamsiface/interface.go | 30 + .../service/dynamodbstreams/examples_test.go | 100 + .../service/dynamodbstreams/service.go | 105 + .../aws/aws-sdk-go/service/ec2/api.go | 33259 ++++++++++++++++ .../aws-sdk-go/service/ec2/customizations.go | 55 + .../service/ec2/customizations_test.go | 35 + .../service/ec2/ec2iface/interface.go | 858 + .../aws-sdk-go/service/ec2/examples_test.go | 5848 +++ .../aws/aws-sdk-go/service/ec2/service.go | 89 + .../aws/aws-sdk-go/service/ec2/waiters.go | 907 + .../aws/aws-sdk-go/service/ecr/api.go | 2079 + .../service/ecr/ecriface/interface.go | 78 + .../aws-sdk-go/service/ecr/examples_test.go | 376 + .../aws/aws-sdk-go/service/ecr/service.go | 93 + .../aws/aws-sdk-go/service/ecs/api.go | 4742 +++ .../service/ecs/ecsiface/interface.go | 134 + .../aws-sdk-go/service/ecs/examples_test.go | 792 + .../aws/aws-sdk-go/service/ecs/service.go | 99 + .../aws/aws-sdk-go/service/ecs/waiters.go | 135 + .../aws/aws-sdk-go/service/efs/api.go | 1575 + .../service/efs/efsiface/interface.go | 58 + .../aws-sdk-go/service/efs/examples_test.go | 255 + .../aws/aws-sdk-go/service/efs/service.go | 85 + .../aws/aws-sdk-go/service/elasticache/api.go | 6837 ++++ .../elasticache/elasticacheiface/interface.go | 190 + .../service/elasticache/examples_test.go | 966 + .../aws-sdk-go/service/elasticache/service.go | 96 + .../aws-sdk-go/service/elasticache/waiters.go | 183 + .../service/elasticbeanstalk/api.go | 5781 +++ .../elasticbeanstalkiface/interface.go | 160 + .../service/elasticbeanstalk/examples_test.go | 903 + .../service/elasticbeanstalk/service.go | 102 + .../service/elasticsearchservice/api.go | 1527 + .../elasticsearchserviceiface/interface.go | 54 + .../elasticsearchservice/examples_test.go | 262 + .../service/elasticsearchservice/service.go | 92 + .../service/elastictranscoder/api.go | 4944 +++ .../elastictranscoderiface/interface.go | 90 + .../elastictranscoder/examples_test.go | 737 + .../service/elastictranscoder/service.go | 86 + .../service/elastictranscoder/waiters.go | 42 + .../aws/aws-sdk-go/service/elb/api.go | 4051 ++ .../service/elb/elbiface/interface.go | 128 + .../aws-sdk-go/service/elb/examples_test.go | 722 + .../aws/aws-sdk-go/service/elb/service.go | 98 + .../aws/aws-sdk-go/service/elb/waiters.go | 82 + .../aws/aws-sdk-go/service/emr/api.go | 4103 ++ .../service/emr/emriface/interface.go | 92 + .../aws-sdk-go/service/emr/examples_test.go | 635 + .../aws/aws-sdk-go/service/emr/service.go | 92 + .../aws/aws-sdk-go/service/emr/waiters.go | 89 + .../aws/aws-sdk-go/service/firehose/api.go | 2132 + .../service/firehose/examples_test.go | 366 + .../firehose/firehoseiface/interface.go | 42 + .../aws-sdk-go/service/firehose/service.go | 90 + .../aws/aws-sdk-go/service/gamelift/api.go | 5683 +++ .../service/gamelift/examples_test.go | 874 + .../gamelift/gameliftiface/interface.go | 162 + .../aws-sdk-go/service/gamelift/service.go | 133 + .../aws/aws-sdk-go/service/generate.go | 5 + .../aws/aws-sdk-go/service/glacier/api.go | 4804 +++ .../service/glacier/customizations.go | 54 + .../service/glacier/customizations_test.go | 90 + .../service/glacier/examples_test.go | 706 + .../service/glacier/glacieriface/interface.go | 146 + .../aws/aws-sdk-go/service/glacier/service.go | 116 + .../aws-sdk-go/service/glacier/treehash.go | 71 + .../service/glacier/treehash_test.go | 28 + .../aws/aws-sdk-go/service/glacier/waiters.go | 65 + .../aws/aws-sdk-go/service/iam/api.go | 17020 ++++++++ .../aws-sdk-go/service/iam/examples_test.go | 2366 ++ .../service/iam/iamiface/interface.go | 518 + .../aws/aws-sdk-go/service/iam/service.go | 139 + .../aws/aws-sdk-go/service/iam/waiters.go | 65 + .../aws/aws-sdk-go/service/inspector/api.go | 4836 +++ .../service/inspector/examples_test.go | 807 + .../inspector/inspectoriface/interface.go | 142 + .../aws-sdk-go/service/inspector/service.go | 90 + .../aws/aws-sdk-go/service/iot/api.go | 6558 +++ .../aws-sdk-go/service/iot/examples_test.go | 1186 + .../service/iot/iotiface/interface.go | 218 + .../aws/aws-sdk-go/service/iot/service.go | 94 + .../aws-sdk-go/service/iotdataplane/api.go | 430 + .../iotdataplane/customizations_test.go | 52 + .../service/iotdataplane/examples_test.go | 95 + .../iotdataplaneiface/interface.go | 30 + .../service/iotdataplane/service.go | 92 + .../aws/aws-sdk-go/service/kinesis/api.go | 2773 ++ .../service/kinesis/examples_test.go | 384 + .../service/kinesis/kinesisiface/interface.go | 86 + .../aws/aws-sdk-go/service/kinesis/service.go | 89 + .../aws/aws-sdk-go/service/kinesis/waiters.go | 30 + .../aws/aws-sdk-go/service/kms/api.go | 4049 ++ .../aws-sdk-go/service/kms/examples_test.go | 664 + .../service/kms/kmsiface/interface.go | 138 + .../aws/aws-sdk-go/service/kms/service.go | 158 + .../aws/aws-sdk-go/service/lambda/api.go | 3350 ++ .../service/lambda/examples_test.go | 561 + .../service/lambda/lambdaiface/interface.go | 114 + .../aws/aws-sdk-go/service/lambda/service.go | 92 + .../aws-sdk-go/service/machinelearning/api.go | 5602 +++ .../service/machinelearning/customizations.go | 33 + .../machinelearning/customizations_test.go | 37 + .../service/machinelearning/examples_test.go | 681 + .../machinelearningiface/interface.go | 134 + .../service/machinelearning/service.go | 88 + .../service/machinelearning/waiters.go | 123 + .../marketplacecommerceanalytics/api.go | 241 + .../examples_test.go | 44 + .../interface.go | 18 + .../marketplacecommerceanalytics/service.go | 89 + .../service/marketplacemetering/api.go | 142 + .../marketplacemetering/examples_test.go | 39 + .../marketplacemeteringiface/interface.go | 18 + .../service/marketplacemetering/service.go | 97 + .../aws-sdk-go/service/mobileanalytics/api.go | 240 + .../service/mobileanalytics/examples_test.go | 58 + .../mobileanalyticsiface/interface.go | 18 + .../service/mobileanalytics/service.go | 87 + .../aws/aws-sdk-go/service/opsworks/api.go | 10469 +++++ .../service/opsworks/examples_test.go | 1895 + .../opsworks/opsworksiface/interface.go | 296 + .../aws-sdk-go/service/opsworks/service.go | 135 + .../aws-sdk-go/service/opsworks/waiters.go | 355 + .../aws/aws-sdk-go/service/rds/api.go | 15249 +++++++ .../aws-sdk-go/service/rds/examples_test.go | 2420 ++ .../service/rds/rdsiface/interface.go | 372 + .../aws/aws-sdk-go/service/rds/service.go | 127 + .../aws/aws-sdk-go/service/rds/waiters.go | 125 + .../aws/aws-sdk-go/service/redshift/api.go | 9829 +++++ .../service/redshift/examples_test.go | 1584 + .../redshift/redshiftiface/interface.go | 292 + .../aws-sdk-go/service/redshift/service.go | 107 + .../aws-sdk-go/service/redshift/waiters.go | 141 + .../aws/aws-sdk-go/service/route53/api.go | 7830 ++++ .../service/route53/customizations.go | 30 + .../service/route53/customizations_test.go | 22 + .../service/route53/examples_test.go | 1100 + .../service/route53/route53iface/interface.go | 212 + .../aws/aws-sdk-go/service/route53/service.go | 86 + .../service/route53/unmarshal_error.go | 77 + .../route53/unmarshal_error_leak_test.go | 37 + .../service/route53/unmarshal_error_test.go | 111 + .../aws/aws-sdk-go/service/route53/waiters.go | 30 + .../aws-sdk-go/service/route53domains/api.go | 4025 ++ .../service/route53domains/examples_test.go | 645 + .../route53domainsiface/interface.go | 98 + .../service/route53domains/service.go | 88 + .../aws/aws-sdk-go/service/s3/api.go | 9527 +++++ .../aws-sdk-go/service/s3/bucket_location.go | 43 + .../service/s3/bucket_location_test.go | 78 + .../aws/aws-sdk-go/service/s3/content_md5.go | 36 + .../aws-sdk-go/service/s3/customizations.go | 46 + .../service/s3/customizations_test.go | 105 + .../aws-sdk-go/service/s3/examples_test.go | 1675 + .../service/s3/host_style_bucket.go | 165 + .../service/s3/host_style_bucket_test.go | 103 + .../service/s3/platform_handlers.go | 8 + .../service/s3/platform_handlers_go1.6.go | 28 + .../s3/platform_handlers_go1.6_test.go | 68 + .../service/s3/s3iface/interface.go | 260 + .../aws-sdk-go/service/s3/s3manager/doc.go | 3 + .../service/s3/s3manager/download.go | 354 + .../service/s3/s3manager/download_test.go | 309 + .../s3/s3manager/s3manageriface/interface.go | 23 + .../service/s3/s3manager/shared_test.go | 4 + .../aws-sdk-go/service/s3/s3manager/upload.go | 664 + .../service/s3/s3manager/upload_test.go | 595 + .../aws/aws-sdk-go/service/s3/service.go | 86 + .../aws/aws-sdk-go/service/s3/sse.go | 44 + .../aws/aws-sdk-go/service/s3/sse_test.go | 79 + .../aws-sdk-go/service/s3/statusok_error.go | 36 + .../service/s3/statusok_error_test.go | 130 + .../aws-sdk-go/service/s3/unmarshal_error.go | 59 + .../service/s3/unmarshal_error_leak_test.go | 33 + .../service/s3/unmarshal_error_test.go | 166 + .../aws/aws-sdk-go/service/s3/waiters.go | 123 + .../aws-sdk-go/service/servicecatalog/api.go | 1930 + .../service/servicecatalog/examples_test.go | 296 + .../service/servicecatalog/service.go | 100 + .../servicecatalogiface/interface.go | 58 + .../aws/aws-sdk-go/service/ses/api.go | 6279 +++ .../aws-sdk-go/service/ses/examples_test.go | 1031 + .../aws/aws-sdk-go/service/ses/service.go | 93 + .../service/ses/sesiface/interface.go | 184 + .../aws/aws-sdk-go/service/ses/waiters.go | 30 + .../aws/aws-sdk-go/service/simpledb/api.go | 1528 + .../service/simpledb/customizations.go | 11 + .../service/simpledb/examples_test.go | 269 + .../aws-sdk-go/service/simpledb/service.go | 102 + .../simpledb/simpledbiface/interface.go | 58 + .../simpledb/unmarshal_error_leak_test.go | 33 + .../service/simpledb/unmarshall_error.go | 53 + .../service/simpledb/unmarshall_error_test.go | 139 + .../aws/aws-sdk-go/service/sns/api.go | 3739 ++ .../aws-sdk-go/service/sns/examples_test.go | 644 + .../aws/aws-sdk-go/service/sns/service.go | 98 + .../service/sns/snsiface/interface.go | 144 + .../aws/aws-sdk-go/service/sqs/api.go | 2627 ++ .../aws/aws-sdk-go/service/sqs/api_test.go | 30 + .../aws/aws-sdk-go/service/sqs/checksums.go | 115 + .../aws-sdk-go/service/sqs/checksums_test.go | 208 + .../aws-sdk-go/service/sqs/customizations.go | 9 + .../aws-sdk-go/service/sqs/examples_test.go | 433 + .../aws/aws-sdk-go/service/sqs/service.go | 119 + .../service/sqs/sqsiface/interface.go | 82 + .../aws/aws-sdk-go/service/ssm/api.go | 3953 ++ .../aws-sdk-go/service/ssm/examples_test.go | 650 + .../aws/aws-sdk-go/service/ssm/service.go | 204 + .../service/ssm/ssmiface/interface.go | 128 + .../aws-sdk-go/service/storagegateway/api.go | 7730 ++++ .../service/storagegateway/examples_test.go | 1184 + .../service/storagegateway/service.go | 139 + .../storagegatewayiface/interface.go | 250 + .../aws/aws-sdk-go/service/sts/api.go | 1625 + .../aws-sdk-go/service/sts/customizations.go | 12 + .../service/sts/customizations_test.go | 39 + .../aws-sdk-go/service/sts/examples_test.go | 166 + .../aws/aws-sdk-go/service/sts/service.go | 130 + .../service/sts/stsiface/interface.go | 42 + .../aws/aws-sdk-go/service/support/api.go | 2128 + .../service/support/examples_test.go | 332 + .../aws/aws-sdk-go/service/support/service.go | 122 + .../service/support/supportiface/interface.go | 74 + .../aws/aws-sdk-go/service/swf/api.go | 9517 +++++ .../aws-sdk-go/service/swf/examples_test.go | 900 + .../aws/aws-sdk-go/service/swf/service.go | 100 + .../service/swf/swfiface/interface.go | 152 + .../aws/aws-sdk-go/service/waf/api.go | 6579 +++ .../aws-sdk-go/service/waf/examples_test.go | 868 + .../aws/aws-sdk-go/service/waf/service.go | 91 + .../service/waf/wafiface/interface.go | 166 + .../aws/aws-sdk-go/service/workspaces/api.go | 1818 + .../service/workspaces/examples_test.go | 266 + .../aws-sdk-go/service/workspaces/service.go | 90 + .../workspaces/workspacesiface/interface.go | 60 + .../github.com/coreos/go-semver/.travis.yml | 8 + vendor/github.com/coreos/go-semver/LICENSE | 202 + vendor/github.com/coreos/go-semver/README.md | 28 + vendor/github.com/coreos/go-semver/example.go | 20 + .../coreos/go-semver/semver/semver.go | 296 + .../coreos/go-semver/semver/semver_test.go | 373 + .../coreos/go-semver/semver/sort.go | 38 + vendor/github.com/funcy/functions_go/VERSION | 2 +- .../functions_go/client/call/call_client.go | 30 +- .../get_apps_app_calls_call_parameters.go | 156 + .../call/get_apps_app_calls_call_responses.go | 101 + .../call/get_apps_app_calls_parameters.go | 167 + .../call/get_apps_app_calls_responses.go | 101 + ...lete_apps_app_calls_call_log_parameters.go | 156 + ...elete_apps_app_calls_call_log_responses.go | 138 + .../get_apps_app_calls_call_log_parameters.go | 156 + .../get_apps_app_calls_call_log_responses.go | 101 + .../client/operations/operations_client.go | 28 +- .../patch_apps_app_routes_route_responses.go | 2 +- .../put_apps_app_routes_route_parameters.go | 30 +- .../put_apps_app_routes_route_responses.go | 63 +- .../client/routes/routes_client.go | 34 +- .../giantswarm/semver-bump/.gitignore | 2 + .../giantswarm/semver-bump/.travis.yml | 8 + .../github.com/giantswarm/semver-bump/LICENSE | 13 + .../giantswarm/semver-bump/Makefile | 36 + .../giantswarm/semver-bump/README.md | 40 + .../github.com/giantswarm/semver-bump/VERSION | 1 + .../giantswarm/semver-bump/bump/bump.go | 101 + .../giantswarm/semver-bump/bump/bump_test.go | 115 + .../semver-bump/commands/bump_major.go | 29 + .../semver-bump/commands/bump_minor.go | 29 + .../semver-bump/commands/bump_patch.go | 30 + .../giantswarm/semver-bump/commands/init.go | 34 + .../semver-bump/commands/semver_bump.go | 70 + .../giantswarm/semver-bump/commands/util.go | 17 + .../semver-bump/commands/version.go | 16 + .../github.com/giantswarm/semver-bump/main.go | 9 + .../semver-bump/storage/version_storage.go | 24 + .../storage/version_storage_file.go | 44 + .../storage/version_storage_file_test.go | 123 + .../storage/version_storage_local.go | 38 + .../storage/version_storage_local_test.go | 90 + .../storage/version_storage_util.go | 7 + vendor/github.com/go-ini/ini/.gitignore | 5 + vendor/github.com/go-ini/ini/.travis.yml | 14 + vendor/github.com/go-ini/ini/LICENSE | 191 + vendor/github.com/go-ini/ini/Makefile | 12 + vendor/github.com/go-ini/ini/README.md | 746 + vendor/github.com/go-ini/ini/README_ZH.md | 733 + vendor/github.com/go-ini/ini/error.go | 32 + vendor/github.com/go-ini/ini/ini.go | 561 + vendor/github.com/go-ini/ini/ini_test.go | 491 + vendor/github.com/go-ini/ini/key.go | 699 + vendor/github.com/go-ini/ini/key_test.go | 573 + vendor/github.com/go-ini/ini/parser.go | 361 + vendor/github.com/go-ini/ini/parser_test.go | 42 + vendor/github.com/go-ini/ini/section.go | 248 + vendor/github.com/go-ini/ini/section_test.go | 75 + vendor/github.com/go-ini/ini/struct.go | 500 + vendor/github.com/go-ini/ini/struct_test.go | 352 + .../go-ini/ini/testdata/UTF-16-BE-BOM.ini | Bin 0 -> 56 bytes .../go-ini/ini/testdata/UTF-16-LE-BOM.ini | Bin 0 -> 56 bytes .../go-ini/ini/testdata/UTF-8-BOM.ini | 2 + .../github.com/go-ini/ini/testdata/aicc.ini | 11 + .../github.com/go-ini/ini/testdata/conf.ini | 2 + vendor/github.com/go-resty/resty/.gitignore | 26 + vendor/github.com/go-resty/resty/.travis.yml | 31 + vendor/github.com/go-resty/resty/LICENSE | 21 + vendor/github.com/go-resty/resty/README.md | 648 + vendor/github.com/go-resty/resty/client.go | 926 + .../github.com/go-resty/resty/client_test.go | 365 + .../go-resty/resty/context17_test.go | 15 + .../go-resty/resty/context18_test.go | 22 + .../github.com/go-resty/resty/context_test.go | 199 + vendor/github.com/go-resty/resty/default.go | 283 + .../github.com/go-resty/resty/example_test.go | 217 + .../github.com/go-resty/resty/middleware.go | 414 + vendor/github.com/go-resty/resty/redirect.go | 99 + vendor/github.com/go-resty/resty/request.go | 496 + vendor/github.com/go-resty/resty/request16.go | 55 + vendor/github.com/go-resty/resty/request17.go | 72 + vendor/github.com/go-resty/resty/response.go | 125 + vendor/github.com/go-resty/resty/resty.go | 9 + .../github.com/go-resty/resty/resty_test.go | 1639 + vendor/github.com/go-resty/resty/retry.go | 114 + .../github.com/go-resty/resty/retry_test.go | 270 + .../go-resty/resty/test-data/test-img.png | Bin 0 -> 2579468 bytes .../go-resty/resty/test-data/text-file.txt | 3 + .../jmespath/go-jmespath/.gitignore | 4 + .../jmespath/go-jmespath/.travis.yml | 9 + .../github.com/jmespath/go-jmespath/LICENSE | 13 + .../github.com/jmespath/go-jmespath/Makefile | 44 + .../github.com/jmespath/go-jmespath/README.md | 7 + vendor/github.com/jmespath/go-jmespath/api.go | 49 + .../jmespath/go-jmespath/api_test.go | 32 + .../go-jmespath/astnodetype_string.go | 16 + .../jmespath/go-jmespath/cmd/jpgo/main.go | 96 + .../go-jmespath/compliance/basic.json | 96 + .../go-jmespath/compliance/boolean.json | 257 + .../go-jmespath/compliance/current.json | 25 + .../go-jmespath/compliance/escape.json | 46 + .../go-jmespath/compliance/filters.json | 468 + .../go-jmespath/compliance/functions.json | 825 + .../go-jmespath/compliance/identifiers.json | 1377 + .../go-jmespath/compliance/indices.json | 346 + .../go-jmespath/compliance/literal.json | 185 + .../go-jmespath/compliance/multiselect.json | 393 + .../go-jmespath/compliance/ormatch.json | 59 + .../jmespath/go-jmespath/compliance/pipe.json | 131 + .../go-jmespath/compliance/slice.json | 187 + .../go-jmespath/compliance/syntax.json | 616 + .../go-jmespath/compliance/unicode.json | 38 + .../go-jmespath/compliance/wildcard.json | 460 + .../jmespath/go-jmespath/compliance_test.go | 123 + .../jmespath/go-jmespath/functions.go | 842 + .../jmespath/go-jmespath/fuzz/jmespath.go | 13 + .../jmespath/go-jmespath/fuzz/testdata/expr-1 | 1 + .../go-jmespath/fuzz/testdata/expr-10 | 1 + .../go-jmespath/fuzz/testdata/expr-100 | 1 + .../go-jmespath/fuzz/testdata/expr-101 | 1 + .../go-jmespath/fuzz/testdata/expr-102 | 1 + .../go-jmespath/fuzz/testdata/expr-103 | 1 + .../go-jmespath/fuzz/testdata/expr-104 | 1 + .../go-jmespath/fuzz/testdata/expr-105 | 1 + .../go-jmespath/fuzz/testdata/expr-106 | 1 + .../go-jmespath/fuzz/testdata/expr-107 | 1 + .../go-jmespath/fuzz/testdata/expr-108 | 1 + .../go-jmespath/fuzz/testdata/expr-109 | 1 + .../go-jmespath/fuzz/testdata/expr-110 | 1 + .../go-jmespath/fuzz/testdata/expr-112 | 1 + .../go-jmespath/fuzz/testdata/expr-115 | 1 + .../go-jmespath/fuzz/testdata/expr-118 | 1 + .../go-jmespath/fuzz/testdata/expr-119 | 1 + .../go-jmespath/fuzz/testdata/expr-12 | 1 + .../go-jmespath/fuzz/testdata/expr-120 | 1 + .../go-jmespath/fuzz/testdata/expr-121 | 1 + .../go-jmespath/fuzz/testdata/expr-122 | 1 + .../go-jmespath/fuzz/testdata/expr-123 | 1 + .../go-jmespath/fuzz/testdata/expr-126 | 1 + .../go-jmespath/fuzz/testdata/expr-128 | 1 + .../go-jmespath/fuzz/testdata/expr-129 | 1 + .../go-jmespath/fuzz/testdata/expr-13 | 1 + .../go-jmespath/fuzz/testdata/expr-130 | 1 + .../go-jmespath/fuzz/testdata/expr-131 | 1 + .../go-jmespath/fuzz/testdata/expr-132 | 1 + .../go-jmespath/fuzz/testdata/expr-133 | 1 + .../go-jmespath/fuzz/testdata/expr-134 | 1 + .../go-jmespath/fuzz/testdata/expr-135 | 1 + .../go-jmespath/fuzz/testdata/expr-136 | 1 + .../go-jmespath/fuzz/testdata/expr-137 | 1 + .../go-jmespath/fuzz/testdata/expr-138 | 1 + .../go-jmespath/fuzz/testdata/expr-139 | 1 + .../go-jmespath/fuzz/testdata/expr-14 | 1 + .../go-jmespath/fuzz/testdata/expr-140 | 1 + .../go-jmespath/fuzz/testdata/expr-141 | 1 + .../go-jmespath/fuzz/testdata/expr-142 | 1 + .../go-jmespath/fuzz/testdata/expr-143 | 1 + .../go-jmespath/fuzz/testdata/expr-144 | 1 + .../go-jmespath/fuzz/testdata/expr-145 | 1 + .../go-jmespath/fuzz/testdata/expr-146 | 1 + .../go-jmespath/fuzz/testdata/expr-147 | 1 + .../go-jmespath/fuzz/testdata/expr-148 | 1 + .../go-jmespath/fuzz/testdata/expr-149 | 1 + .../go-jmespath/fuzz/testdata/expr-15 | 1 + .../go-jmespath/fuzz/testdata/expr-150 | 1 + .../go-jmespath/fuzz/testdata/expr-151 | 1 + .../go-jmespath/fuzz/testdata/expr-152 | 1 + .../go-jmespath/fuzz/testdata/expr-153 | 1 + .../go-jmespath/fuzz/testdata/expr-155 | 1 + .../go-jmespath/fuzz/testdata/expr-156 | 1 + .../go-jmespath/fuzz/testdata/expr-157 | 1 + .../go-jmespath/fuzz/testdata/expr-158 | 1 + .../go-jmespath/fuzz/testdata/expr-159 | 1 + .../go-jmespath/fuzz/testdata/expr-16 | 1 + .../go-jmespath/fuzz/testdata/expr-160 | 1 + .../go-jmespath/fuzz/testdata/expr-161 | 1 + .../go-jmespath/fuzz/testdata/expr-162 | 1 + .../go-jmespath/fuzz/testdata/expr-163 | 1 + .../go-jmespath/fuzz/testdata/expr-164 | 1 + .../go-jmespath/fuzz/testdata/expr-165 | 1 + .../go-jmespath/fuzz/testdata/expr-166 | 1 + .../go-jmespath/fuzz/testdata/expr-167 | 1 + .../go-jmespath/fuzz/testdata/expr-168 | 1 + .../go-jmespath/fuzz/testdata/expr-169 | 1 + .../go-jmespath/fuzz/testdata/expr-17 | 1 + .../go-jmespath/fuzz/testdata/expr-170 | 1 + .../go-jmespath/fuzz/testdata/expr-171 | 1 + .../go-jmespath/fuzz/testdata/expr-172 | 1 + .../go-jmespath/fuzz/testdata/expr-173 | 1 + .../go-jmespath/fuzz/testdata/expr-174 | 1 + .../go-jmespath/fuzz/testdata/expr-175 | 1 + .../go-jmespath/fuzz/testdata/expr-178 | 1 + .../go-jmespath/fuzz/testdata/expr-179 | 1 + .../go-jmespath/fuzz/testdata/expr-18 | 1 + .../go-jmespath/fuzz/testdata/expr-180 | 1 + .../go-jmespath/fuzz/testdata/expr-181 | 1 + .../go-jmespath/fuzz/testdata/expr-182 | 1 + .../go-jmespath/fuzz/testdata/expr-183 | 1 + .../go-jmespath/fuzz/testdata/expr-184 | 1 + .../go-jmespath/fuzz/testdata/expr-185 | 1 + .../go-jmespath/fuzz/testdata/expr-186 | 1 + .../go-jmespath/fuzz/testdata/expr-187 | 1 + .../go-jmespath/fuzz/testdata/expr-188 | 1 + .../go-jmespath/fuzz/testdata/expr-189 | 1 + .../go-jmespath/fuzz/testdata/expr-19 | 1 + .../go-jmespath/fuzz/testdata/expr-190 | 1 + .../go-jmespath/fuzz/testdata/expr-191 | 1 + .../go-jmespath/fuzz/testdata/expr-192 | 1 + .../go-jmespath/fuzz/testdata/expr-193 | 1 + .../go-jmespath/fuzz/testdata/expr-194 | 1 + .../go-jmespath/fuzz/testdata/expr-195 | 1 + .../go-jmespath/fuzz/testdata/expr-196 | 1 + .../go-jmespath/fuzz/testdata/expr-198 | 1 + .../go-jmespath/fuzz/testdata/expr-199 | 1 + .../jmespath/go-jmespath/fuzz/testdata/expr-2 | 1 + .../go-jmespath/fuzz/testdata/expr-20 | 1 + .../go-jmespath/fuzz/testdata/expr-200 | 1 + .../go-jmespath/fuzz/testdata/expr-201 | 1 + .../go-jmespath/fuzz/testdata/expr-202 | 1 + .../go-jmespath/fuzz/testdata/expr-203 | 1 + .../go-jmespath/fuzz/testdata/expr-204 | 1 + .../go-jmespath/fuzz/testdata/expr-205 | 1 + .../go-jmespath/fuzz/testdata/expr-206 | 1 + .../go-jmespath/fuzz/testdata/expr-207 | 1 + .../go-jmespath/fuzz/testdata/expr-208 | 1 + .../go-jmespath/fuzz/testdata/expr-209 | 1 + .../go-jmespath/fuzz/testdata/expr-21 | 1 + .../go-jmespath/fuzz/testdata/expr-210 | 1 + .../go-jmespath/fuzz/testdata/expr-211 | 1 + .../go-jmespath/fuzz/testdata/expr-212 | 1 + .../go-jmespath/fuzz/testdata/expr-213 | 1 + .../go-jmespath/fuzz/testdata/expr-214 | 1 + .../go-jmespath/fuzz/testdata/expr-215 | 1 + .../go-jmespath/fuzz/testdata/expr-216 | 1 + .../go-jmespath/fuzz/testdata/expr-217 | 1 + .../go-jmespath/fuzz/testdata/expr-218 | 1 + .../go-jmespath/fuzz/testdata/expr-219 | 1 + .../go-jmespath/fuzz/testdata/expr-22 | 1 + .../go-jmespath/fuzz/testdata/expr-220 | 1 + .../go-jmespath/fuzz/testdata/expr-221 | 1 + .../go-jmespath/fuzz/testdata/expr-222 | 1 + .../go-jmespath/fuzz/testdata/expr-223 | 1 + .../go-jmespath/fuzz/testdata/expr-224 | 1 + .../go-jmespath/fuzz/testdata/expr-225 | 1 + .../go-jmespath/fuzz/testdata/expr-226 | 1 + .../go-jmespath/fuzz/testdata/expr-227 | 1 + .../go-jmespath/fuzz/testdata/expr-228 | 1 + .../go-jmespath/fuzz/testdata/expr-229 | 1 + .../go-jmespath/fuzz/testdata/expr-23 | 1 + .../go-jmespath/fuzz/testdata/expr-230 | 1 + .../go-jmespath/fuzz/testdata/expr-231 | 1 + .../go-jmespath/fuzz/testdata/expr-232 | 1 + .../go-jmespath/fuzz/testdata/expr-233 | 1 + .../go-jmespath/fuzz/testdata/expr-234 | 1 + .../go-jmespath/fuzz/testdata/expr-235 | 1 + .../go-jmespath/fuzz/testdata/expr-236 | 1 + .../go-jmespath/fuzz/testdata/expr-237 | 1 + .../go-jmespath/fuzz/testdata/expr-238 | 1 + .../go-jmespath/fuzz/testdata/expr-239 | 1 + .../go-jmespath/fuzz/testdata/expr-24 | 1 + .../go-jmespath/fuzz/testdata/expr-240 | 1 + .../go-jmespath/fuzz/testdata/expr-241 | 1 + .../go-jmespath/fuzz/testdata/expr-242 | 1 + .../go-jmespath/fuzz/testdata/expr-243 | 1 + .../go-jmespath/fuzz/testdata/expr-244 | 1 + .../go-jmespath/fuzz/testdata/expr-245 | 1 + .../go-jmespath/fuzz/testdata/expr-246 | 1 + .../go-jmespath/fuzz/testdata/expr-247 | 1 + .../go-jmespath/fuzz/testdata/expr-248 | 1 + .../go-jmespath/fuzz/testdata/expr-249 | 1 + .../go-jmespath/fuzz/testdata/expr-25 | 1 + .../go-jmespath/fuzz/testdata/expr-250 | 1 + .../go-jmespath/fuzz/testdata/expr-251 | 1 + .../go-jmespath/fuzz/testdata/expr-252 | 1 + .../go-jmespath/fuzz/testdata/expr-253 | 1 + .../go-jmespath/fuzz/testdata/expr-254 | 1 + .../go-jmespath/fuzz/testdata/expr-255 | 1 + .../go-jmespath/fuzz/testdata/expr-256 | 1 + .../go-jmespath/fuzz/testdata/expr-257 | 1 + .../go-jmespath/fuzz/testdata/expr-258 | 1 + .../go-jmespath/fuzz/testdata/expr-259 | 1 + .../go-jmespath/fuzz/testdata/expr-26 | 1 + .../go-jmespath/fuzz/testdata/expr-260 | 1 + .../go-jmespath/fuzz/testdata/expr-261 | 1 + .../go-jmespath/fuzz/testdata/expr-262 | 1 + .../go-jmespath/fuzz/testdata/expr-263 | 1 + .../go-jmespath/fuzz/testdata/expr-264 | 1 + .../go-jmespath/fuzz/testdata/expr-265 | 1 + .../go-jmespath/fuzz/testdata/expr-266 | 1 + .../go-jmespath/fuzz/testdata/expr-267 | 1 + .../go-jmespath/fuzz/testdata/expr-268 | 1 + .../go-jmespath/fuzz/testdata/expr-269 | 1 + .../go-jmespath/fuzz/testdata/expr-27 | 1 + .../go-jmespath/fuzz/testdata/expr-270 | 1 + .../go-jmespath/fuzz/testdata/expr-271 | 1 + .../go-jmespath/fuzz/testdata/expr-272 | 1 + .../go-jmespath/fuzz/testdata/expr-273 | 1 + .../go-jmespath/fuzz/testdata/expr-274 | 1 + .../go-jmespath/fuzz/testdata/expr-275 | 1 + .../go-jmespath/fuzz/testdata/expr-276 | 1 + .../go-jmespath/fuzz/testdata/expr-277 | 1 + .../go-jmespath/fuzz/testdata/expr-278 | 1 + .../go-jmespath/fuzz/testdata/expr-279 | 1 + .../go-jmespath/fuzz/testdata/expr-28 | 1 + .../go-jmespath/fuzz/testdata/expr-280 | 1 + .../go-jmespath/fuzz/testdata/expr-281 | 1 + .../go-jmespath/fuzz/testdata/expr-282 | 1 + .../go-jmespath/fuzz/testdata/expr-283 | 1 + .../go-jmespath/fuzz/testdata/expr-284 | 1 + .../go-jmespath/fuzz/testdata/expr-285 | 1 + .../go-jmespath/fuzz/testdata/expr-286 | 1 + .../go-jmespath/fuzz/testdata/expr-287 | 1 + .../go-jmespath/fuzz/testdata/expr-288 | 1 + .../go-jmespath/fuzz/testdata/expr-289 | 1 + .../go-jmespath/fuzz/testdata/expr-29 | 1 + .../go-jmespath/fuzz/testdata/expr-290 | 1 + .../go-jmespath/fuzz/testdata/expr-291 | 1 + .../go-jmespath/fuzz/testdata/expr-292 | 1 + .../go-jmespath/fuzz/testdata/expr-293 | 1 + .../go-jmespath/fuzz/testdata/expr-294 | 1 + .../go-jmespath/fuzz/testdata/expr-295 | 1 + .../go-jmespath/fuzz/testdata/expr-296 | 1 + .../go-jmespath/fuzz/testdata/expr-297 | 1 + .../go-jmespath/fuzz/testdata/expr-298 | 1 + .../go-jmespath/fuzz/testdata/expr-299 | 1 + .../jmespath/go-jmespath/fuzz/testdata/expr-3 | 1 + .../go-jmespath/fuzz/testdata/expr-30 | 1 + .../go-jmespath/fuzz/testdata/expr-300 | 1 + .../go-jmespath/fuzz/testdata/expr-301 | 1 + .../go-jmespath/fuzz/testdata/expr-302 | 1 + .../go-jmespath/fuzz/testdata/expr-303 | 1 + .../go-jmespath/fuzz/testdata/expr-304 | 1 + .../go-jmespath/fuzz/testdata/expr-305 | 1 + .../go-jmespath/fuzz/testdata/expr-306 | 1 + .../go-jmespath/fuzz/testdata/expr-307 | 1 + .../go-jmespath/fuzz/testdata/expr-308 | 1 + .../go-jmespath/fuzz/testdata/expr-309 | 1 + .../go-jmespath/fuzz/testdata/expr-31 | 1 + .../go-jmespath/fuzz/testdata/expr-310 | 1 + .../go-jmespath/fuzz/testdata/expr-311 | 1 + .../go-jmespath/fuzz/testdata/expr-312 | 1 + .../go-jmespath/fuzz/testdata/expr-313 | 1 + .../go-jmespath/fuzz/testdata/expr-314 | 1 + .../go-jmespath/fuzz/testdata/expr-315 | 1 + .../go-jmespath/fuzz/testdata/expr-316 | 1 + .../go-jmespath/fuzz/testdata/expr-317 | 1 + .../go-jmespath/fuzz/testdata/expr-318 | 1 + .../go-jmespath/fuzz/testdata/expr-319 | 1 + .../go-jmespath/fuzz/testdata/expr-32 | 1 + .../go-jmespath/fuzz/testdata/expr-320 | 1 + .../go-jmespath/fuzz/testdata/expr-321 | 1 + .../go-jmespath/fuzz/testdata/expr-322 | 1 + .../go-jmespath/fuzz/testdata/expr-323 | 1 + .../go-jmespath/fuzz/testdata/expr-324 | 1 + .../go-jmespath/fuzz/testdata/expr-325 | 1 + .../go-jmespath/fuzz/testdata/expr-326 | 1 + .../go-jmespath/fuzz/testdata/expr-327 | 1 + .../go-jmespath/fuzz/testdata/expr-328 | 1 + .../go-jmespath/fuzz/testdata/expr-329 | 1 + .../go-jmespath/fuzz/testdata/expr-33 | 1 + .../go-jmespath/fuzz/testdata/expr-330 | 1 + .../go-jmespath/fuzz/testdata/expr-331 | 1 + .../go-jmespath/fuzz/testdata/expr-332 | 1 + .../go-jmespath/fuzz/testdata/expr-333 | 1 + .../go-jmespath/fuzz/testdata/expr-334 | 1 + .../go-jmespath/fuzz/testdata/expr-335 | 1 + .../go-jmespath/fuzz/testdata/expr-336 | 1 + .../go-jmespath/fuzz/testdata/expr-337 | 1 + .../go-jmespath/fuzz/testdata/expr-338 | 1 + .../go-jmespath/fuzz/testdata/expr-339 | 1 + .../go-jmespath/fuzz/testdata/expr-34 | 1 + .../go-jmespath/fuzz/testdata/expr-340 | 1 + .../go-jmespath/fuzz/testdata/expr-341 | 1 + .../go-jmespath/fuzz/testdata/expr-342 | 1 + .../go-jmespath/fuzz/testdata/expr-343 | 1 + .../go-jmespath/fuzz/testdata/expr-344 | 1 + .../go-jmespath/fuzz/testdata/expr-345 | 1 + .../go-jmespath/fuzz/testdata/expr-346 | 1 + .../go-jmespath/fuzz/testdata/expr-347 | 1 + .../go-jmespath/fuzz/testdata/expr-348 | 1 + .../go-jmespath/fuzz/testdata/expr-349 | 1 + .../go-jmespath/fuzz/testdata/expr-35 | 1 + .../go-jmespath/fuzz/testdata/expr-350 | 1 + .../go-jmespath/fuzz/testdata/expr-351 | 1 + .../go-jmespath/fuzz/testdata/expr-352 | 1 + .../go-jmespath/fuzz/testdata/expr-353 | 1 + .../go-jmespath/fuzz/testdata/expr-354 | 1 + .../go-jmespath/fuzz/testdata/expr-355 | 1 + .../go-jmespath/fuzz/testdata/expr-356 | 1 + .../go-jmespath/fuzz/testdata/expr-357 | 1 + .../go-jmespath/fuzz/testdata/expr-358 | 1 + .../go-jmespath/fuzz/testdata/expr-359 | 1 + .../go-jmespath/fuzz/testdata/expr-36 | 1 + .../go-jmespath/fuzz/testdata/expr-360 | 1 + .../go-jmespath/fuzz/testdata/expr-361 | 1 + .../go-jmespath/fuzz/testdata/expr-362 | 1 + .../go-jmespath/fuzz/testdata/expr-363 | 1 + .../go-jmespath/fuzz/testdata/expr-364 | 1 + .../go-jmespath/fuzz/testdata/expr-365 | 1 + .../go-jmespath/fuzz/testdata/expr-366 | 1 + .../go-jmespath/fuzz/testdata/expr-367 | 1 + .../go-jmespath/fuzz/testdata/expr-368 | 1 + .../go-jmespath/fuzz/testdata/expr-369 | 1 + .../go-jmespath/fuzz/testdata/expr-37 | 1 + .../go-jmespath/fuzz/testdata/expr-370 | 1 + .../go-jmespath/fuzz/testdata/expr-371 | 1 + .../go-jmespath/fuzz/testdata/expr-372 | 1 + .../go-jmespath/fuzz/testdata/expr-373 | 1 + .../go-jmespath/fuzz/testdata/expr-374 | 1 + .../go-jmespath/fuzz/testdata/expr-375 | 1 + .../go-jmespath/fuzz/testdata/expr-376 | 1 + .../go-jmespath/fuzz/testdata/expr-377 | 1 + .../go-jmespath/fuzz/testdata/expr-378 | 1 + .../go-jmespath/fuzz/testdata/expr-379 | 1 + .../go-jmespath/fuzz/testdata/expr-38 | 1 + .../go-jmespath/fuzz/testdata/expr-380 | 1 + .../go-jmespath/fuzz/testdata/expr-381 | 1 + .../go-jmespath/fuzz/testdata/expr-382 | 1 + .../go-jmespath/fuzz/testdata/expr-383 | 1 + .../go-jmespath/fuzz/testdata/expr-384 | 1 + .../go-jmespath/fuzz/testdata/expr-385 | 1 + .../go-jmespath/fuzz/testdata/expr-386 | 1 + .../go-jmespath/fuzz/testdata/expr-387 | 1 + .../go-jmespath/fuzz/testdata/expr-388 | 1 + .../go-jmespath/fuzz/testdata/expr-389 | 1 + .../go-jmespath/fuzz/testdata/expr-39 | 1 + .../go-jmespath/fuzz/testdata/expr-390 | 1 + .../go-jmespath/fuzz/testdata/expr-391 | 1 + .../go-jmespath/fuzz/testdata/expr-392 | 1 + .../go-jmespath/fuzz/testdata/expr-393 | 1 + .../go-jmespath/fuzz/testdata/expr-394 | 1 + .../go-jmespath/fuzz/testdata/expr-395 | 1 + .../go-jmespath/fuzz/testdata/expr-396 | 1 + .../go-jmespath/fuzz/testdata/expr-397 | 1 + .../go-jmespath/fuzz/testdata/expr-398 | 1 + .../go-jmespath/fuzz/testdata/expr-399 | 1 + .../jmespath/go-jmespath/fuzz/testdata/expr-4 | 1 + .../go-jmespath/fuzz/testdata/expr-40 | 1 + .../go-jmespath/fuzz/testdata/expr-400 | 1 + .../go-jmespath/fuzz/testdata/expr-401 | 1 + .../go-jmespath/fuzz/testdata/expr-402 | 1 + .../go-jmespath/fuzz/testdata/expr-403 | 1 + .../go-jmespath/fuzz/testdata/expr-404 | 1 + .../go-jmespath/fuzz/testdata/expr-405 | 1 + .../go-jmespath/fuzz/testdata/expr-406 | 1 + .../go-jmespath/fuzz/testdata/expr-407 | 1 + .../go-jmespath/fuzz/testdata/expr-408 | 1 + .../go-jmespath/fuzz/testdata/expr-409 | 1 + .../go-jmespath/fuzz/testdata/expr-41 | 1 + .../go-jmespath/fuzz/testdata/expr-410 | 1 + .../go-jmespath/fuzz/testdata/expr-411 | 1 + .../go-jmespath/fuzz/testdata/expr-412 | 2 + .../go-jmespath/fuzz/testdata/expr-413 | 2 + .../go-jmespath/fuzz/testdata/expr-414 | 1 + .../go-jmespath/fuzz/testdata/expr-415 | 1 + .../go-jmespath/fuzz/testdata/expr-416 | 1 + .../go-jmespath/fuzz/testdata/expr-417 | 1 + .../go-jmespath/fuzz/testdata/expr-418 | 1 + .../go-jmespath/fuzz/testdata/expr-419 | 1 + .../go-jmespath/fuzz/testdata/expr-42 | 1 + .../go-jmespath/fuzz/testdata/expr-420 | 1 + .../go-jmespath/fuzz/testdata/expr-421 | 1 + .../go-jmespath/fuzz/testdata/expr-422 | 1 + .../go-jmespath/fuzz/testdata/expr-423 | 1 + .../go-jmespath/fuzz/testdata/expr-424 | 1 + .../go-jmespath/fuzz/testdata/expr-425 | 1 + .../go-jmespath/fuzz/testdata/expr-426 | 1 + .../go-jmespath/fuzz/testdata/expr-427 | 1 + .../go-jmespath/fuzz/testdata/expr-428 | 1 + .../go-jmespath/fuzz/testdata/expr-429 | 1 + .../go-jmespath/fuzz/testdata/expr-43 | 1 + .../go-jmespath/fuzz/testdata/expr-430 | 1 + .../go-jmespath/fuzz/testdata/expr-431 | 1 + .../go-jmespath/fuzz/testdata/expr-432 | 1 + .../go-jmespath/fuzz/testdata/expr-433 | 1 + .../go-jmespath/fuzz/testdata/expr-434 | 1 + .../go-jmespath/fuzz/testdata/expr-435 | 1 + .../go-jmespath/fuzz/testdata/expr-436 | 1 + .../go-jmespath/fuzz/testdata/expr-437 | 1 + .../go-jmespath/fuzz/testdata/expr-438 | 1 + .../go-jmespath/fuzz/testdata/expr-439 | 1 + .../go-jmespath/fuzz/testdata/expr-44 | 1 + .../go-jmespath/fuzz/testdata/expr-440 | 1 + .../go-jmespath/fuzz/testdata/expr-441 | 1 + .../go-jmespath/fuzz/testdata/expr-442 | 1 + .../go-jmespath/fuzz/testdata/expr-443 | 1 + .../go-jmespath/fuzz/testdata/expr-444 | 1 + .../go-jmespath/fuzz/testdata/expr-445 | 1 + .../go-jmespath/fuzz/testdata/expr-446 | 1 + .../go-jmespath/fuzz/testdata/expr-447 | 1 + .../go-jmespath/fuzz/testdata/expr-448 | 1 + .../go-jmespath/fuzz/testdata/expr-449 | 1 + .../go-jmespath/fuzz/testdata/expr-45 | 1 + .../go-jmespath/fuzz/testdata/expr-450 | 1 + .../go-jmespath/fuzz/testdata/expr-451 | 1 + .../go-jmespath/fuzz/testdata/expr-452 | 1 + .../go-jmespath/fuzz/testdata/expr-453 | 1 + .../go-jmespath/fuzz/testdata/expr-454 | 1 + .../go-jmespath/fuzz/testdata/expr-455 | 1 + .../go-jmespath/fuzz/testdata/expr-456 | 1 + .../go-jmespath/fuzz/testdata/expr-457 | 1 + .../go-jmespath/fuzz/testdata/expr-458 | 1 + .../go-jmespath/fuzz/testdata/expr-459 | 1 + .../go-jmespath/fuzz/testdata/expr-46 | 1 + .../go-jmespath/fuzz/testdata/expr-460 | 1 + .../go-jmespath/fuzz/testdata/expr-461 | 1 + .../go-jmespath/fuzz/testdata/expr-462 | 1 + .../go-jmespath/fuzz/testdata/expr-463 | 1 + .../go-jmespath/fuzz/testdata/expr-464 | 1 + .../go-jmespath/fuzz/testdata/expr-465 | 1 + .../go-jmespath/fuzz/testdata/expr-466 | 1 + .../go-jmespath/fuzz/testdata/expr-467 | 1 + .../go-jmespath/fuzz/testdata/expr-468 | 1 + .../go-jmespath/fuzz/testdata/expr-469 | 1 + .../go-jmespath/fuzz/testdata/expr-47 | 1 + .../go-jmespath/fuzz/testdata/expr-470 | 1 + .../go-jmespath/fuzz/testdata/expr-471 | 1 + .../go-jmespath/fuzz/testdata/expr-472 | 1 + .../go-jmespath/fuzz/testdata/expr-473 | 1 + .../go-jmespath/fuzz/testdata/expr-474 | 1 + .../go-jmespath/fuzz/testdata/expr-475 | 1 + .../go-jmespath/fuzz/testdata/expr-476 | 1 + .../go-jmespath/fuzz/testdata/expr-477 | 1 + .../go-jmespath/fuzz/testdata/expr-478 | 1 + .../go-jmespath/fuzz/testdata/expr-479 | 1 + .../go-jmespath/fuzz/testdata/expr-48 | 1 + .../go-jmespath/fuzz/testdata/expr-480 | 1 + .../go-jmespath/fuzz/testdata/expr-481 | 1 + .../go-jmespath/fuzz/testdata/expr-482 | 1 + .../go-jmespath/fuzz/testdata/expr-483 | 1 + .../go-jmespath/fuzz/testdata/expr-484 | 1 + .../go-jmespath/fuzz/testdata/expr-485 | 1 + .../go-jmespath/fuzz/testdata/expr-486 | 1 + .../go-jmespath/fuzz/testdata/expr-487 | 1 + .../go-jmespath/fuzz/testdata/expr-488 | 1 + .../go-jmespath/fuzz/testdata/expr-489 | 1 + .../go-jmespath/fuzz/testdata/expr-49 | 1 + .../go-jmespath/fuzz/testdata/expr-490 | 1 + .../go-jmespath/fuzz/testdata/expr-491 | 1 + .../go-jmespath/fuzz/testdata/expr-492 | 1 + .../go-jmespath/fuzz/testdata/expr-493 | 1 + .../go-jmespath/fuzz/testdata/expr-494 | 1 + .../go-jmespath/fuzz/testdata/expr-495 | 1 + .../go-jmespath/fuzz/testdata/expr-496 | 1 + .../go-jmespath/fuzz/testdata/expr-497 | 1 + .../go-jmespath/fuzz/testdata/expr-498 | 1 + .../go-jmespath/fuzz/testdata/expr-499 | 1 + .../jmespath/go-jmespath/fuzz/testdata/expr-5 | 1 + .../go-jmespath/fuzz/testdata/expr-50 | 1 + .../go-jmespath/fuzz/testdata/expr-500 | 1 + .../go-jmespath/fuzz/testdata/expr-501 | 1 + .../go-jmespath/fuzz/testdata/expr-502 | 1 + .../go-jmespath/fuzz/testdata/expr-503 | 1 + .../go-jmespath/fuzz/testdata/expr-504 | 1 + .../go-jmespath/fuzz/testdata/expr-505 | 1 + .../go-jmespath/fuzz/testdata/expr-506 | 1 + .../go-jmespath/fuzz/testdata/expr-507 | 1 + .../go-jmespath/fuzz/testdata/expr-508 | 1 + .../go-jmespath/fuzz/testdata/expr-509 | 1 + .../go-jmespath/fuzz/testdata/expr-51 | 1 + .../go-jmespath/fuzz/testdata/expr-510 | 1 + .../go-jmespath/fuzz/testdata/expr-511 | 1 + .../go-jmespath/fuzz/testdata/expr-512 | 1 + .../go-jmespath/fuzz/testdata/expr-513 | 1 + .../go-jmespath/fuzz/testdata/expr-514 | 1 + .../go-jmespath/fuzz/testdata/expr-515 | 1 + .../go-jmespath/fuzz/testdata/expr-516 | 1 + .../go-jmespath/fuzz/testdata/expr-517 | 1 + .../go-jmespath/fuzz/testdata/expr-518 | 1 + .../go-jmespath/fuzz/testdata/expr-519 | 1 + .../go-jmespath/fuzz/testdata/expr-52 | 1 + .../go-jmespath/fuzz/testdata/expr-520 | 1 + .../go-jmespath/fuzz/testdata/expr-521 | 1 + .../go-jmespath/fuzz/testdata/expr-522 | 1 + .../go-jmespath/fuzz/testdata/expr-523 | 1 + .../go-jmespath/fuzz/testdata/expr-524 | 1 + .../go-jmespath/fuzz/testdata/expr-525 | 1 + .../go-jmespath/fuzz/testdata/expr-526 | 1 + .../go-jmespath/fuzz/testdata/expr-527 | 1 + .../go-jmespath/fuzz/testdata/expr-528 | 1 + .../go-jmespath/fuzz/testdata/expr-529 | 1 + .../go-jmespath/fuzz/testdata/expr-53 | 1 + .../go-jmespath/fuzz/testdata/expr-530 | 1 + .../go-jmespath/fuzz/testdata/expr-531 | 1 + .../go-jmespath/fuzz/testdata/expr-532 | 1 + .../go-jmespath/fuzz/testdata/expr-533 | 1 + .../go-jmespath/fuzz/testdata/expr-534 | 1 + .../go-jmespath/fuzz/testdata/expr-535 | 1 + .../go-jmespath/fuzz/testdata/expr-536 | 1 + .../go-jmespath/fuzz/testdata/expr-537 | 1 + .../go-jmespath/fuzz/testdata/expr-538 | 1 + .../go-jmespath/fuzz/testdata/expr-539 | 1 + .../go-jmespath/fuzz/testdata/expr-54 | 1 + .../go-jmespath/fuzz/testdata/expr-540 | 1 + .../go-jmespath/fuzz/testdata/expr-541 | 1 + .../go-jmespath/fuzz/testdata/expr-542 | 1 + .../go-jmespath/fuzz/testdata/expr-543 | 1 + .../go-jmespath/fuzz/testdata/expr-544 | 1 + .../go-jmespath/fuzz/testdata/expr-545 | 1 + .../go-jmespath/fuzz/testdata/expr-546 | 1 + .../go-jmespath/fuzz/testdata/expr-547 | 1 + .../go-jmespath/fuzz/testdata/expr-548 | 1 + .../go-jmespath/fuzz/testdata/expr-549 | 1 + .../go-jmespath/fuzz/testdata/expr-55 | 1 + .../go-jmespath/fuzz/testdata/expr-550 | 1 + .../go-jmespath/fuzz/testdata/expr-551 | 1 + .../go-jmespath/fuzz/testdata/expr-552 | 1 + .../go-jmespath/fuzz/testdata/expr-553 | 1 + .../go-jmespath/fuzz/testdata/expr-554 | 1 + .../go-jmespath/fuzz/testdata/expr-555 | 1 + .../go-jmespath/fuzz/testdata/expr-556 | 1 + .../go-jmespath/fuzz/testdata/expr-557 | 1 + .../go-jmespath/fuzz/testdata/expr-558 | 1 + .../go-jmespath/fuzz/testdata/expr-559 | 1 + .../go-jmespath/fuzz/testdata/expr-56 | 1 + .../go-jmespath/fuzz/testdata/expr-560 | 1 + .../go-jmespath/fuzz/testdata/expr-561 | 1 + .../go-jmespath/fuzz/testdata/expr-562 | 1 + .../go-jmespath/fuzz/testdata/expr-563 | 1 + .../go-jmespath/fuzz/testdata/expr-564 | 1 + .../go-jmespath/fuzz/testdata/expr-565 | 1 + .../go-jmespath/fuzz/testdata/expr-566 | 1 + .../go-jmespath/fuzz/testdata/expr-567 | 1 + .../go-jmespath/fuzz/testdata/expr-568 | 1 + .../go-jmespath/fuzz/testdata/expr-569 | 1 + .../go-jmespath/fuzz/testdata/expr-57 | 1 + .../go-jmespath/fuzz/testdata/expr-570 | 1 + .../go-jmespath/fuzz/testdata/expr-571 | 1 + .../go-jmespath/fuzz/testdata/expr-572 | 1 + .../go-jmespath/fuzz/testdata/expr-573 | 1 + .../go-jmespath/fuzz/testdata/expr-574 | 1 + .../go-jmespath/fuzz/testdata/expr-575 | 1 + .../go-jmespath/fuzz/testdata/expr-576 | 1 + .../go-jmespath/fuzz/testdata/expr-577 | 1 + .../go-jmespath/fuzz/testdata/expr-578 | 1 + .../go-jmespath/fuzz/testdata/expr-579 | 1 + .../go-jmespath/fuzz/testdata/expr-58 | 1 + .../go-jmespath/fuzz/testdata/expr-580 | 1 + .../go-jmespath/fuzz/testdata/expr-581 | 1 + .../go-jmespath/fuzz/testdata/expr-582 | 1 + .../go-jmespath/fuzz/testdata/expr-583 | 1 + .../go-jmespath/fuzz/testdata/expr-584 | 1 + .../go-jmespath/fuzz/testdata/expr-585 | 1 + .../go-jmespath/fuzz/testdata/expr-586 | 1 + .../go-jmespath/fuzz/testdata/expr-587 | 1 + .../go-jmespath/fuzz/testdata/expr-588 | 1 + .../go-jmespath/fuzz/testdata/expr-589 | 1 + .../go-jmespath/fuzz/testdata/expr-59 | 1 + .../go-jmespath/fuzz/testdata/expr-590 | 1 + .../go-jmespath/fuzz/testdata/expr-591 | 1 + .../go-jmespath/fuzz/testdata/expr-592 | 1 + .../go-jmespath/fuzz/testdata/expr-593 | 1 + .../go-jmespath/fuzz/testdata/expr-594 | 1 + .../go-jmespath/fuzz/testdata/expr-595 | 1 + .../go-jmespath/fuzz/testdata/expr-596 | 1 + .../go-jmespath/fuzz/testdata/expr-597 | 1 + .../go-jmespath/fuzz/testdata/expr-598 | 1 + .../go-jmespath/fuzz/testdata/expr-599 | 1 + .../jmespath/go-jmespath/fuzz/testdata/expr-6 | 1 + .../go-jmespath/fuzz/testdata/expr-60 | 1 + .../go-jmespath/fuzz/testdata/expr-600 | 1 + .../go-jmespath/fuzz/testdata/expr-601 | 1 + .../go-jmespath/fuzz/testdata/expr-602 | 1 + .../go-jmespath/fuzz/testdata/expr-603 | 1 + .../go-jmespath/fuzz/testdata/expr-604 | 1 + .../go-jmespath/fuzz/testdata/expr-605 | 1 + .../go-jmespath/fuzz/testdata/expr-606 | 1 + .../go-jmespath/fuzz/testdata/expr-607 | 1 + .../go-jmespath/fuzz/testdata/expr-608 | 1 + .../go-jmespath/fuzz/testdata/expr-609 | 1 + .../go-jmespath/fuzz/testdata/expr-61 | 1 + .../go-jmespath/fuzz/testdata/expr-610 | 1 + .../go-jmespath/fuzz/testdata/expr-611 | 1 + .../go-jmespath/fuzz/testdata/expr-612 | 1 + .../go-jmespath/fuzz/testdata/expr-613 | 1 + .../go-jmespath/fuzz/testdata/expr-614 | 1 + .../go-jmespath/fuzz/testdata/expr-615 | 1 + .../go-jmespath/fuzz/testdata/expr-616 | 1 + .../go-jmespath/fuzz/testdata/expr-617 | 1 + .../go-jmespath/fuzz/testdata/expr-618 | 1 + .../go-jmespath/fuzz/testdata/expr-619 | 1 + .../go-jmespath/fuzz/testdata/expr-62 | 1 + .../go-jmespath/fuzz/testdata/expr-620 | 1 + .../go-jmespath/fuzz/testdata/expr-621 | 1 + .../go-jmespath/fuzz/testdata/expr-622 | 1 + .../go-jmespath/fuzz/testdata/expr-623 | 1 + .../go-jmespath/fuzz/testdata/expr-624 | 1 + .../go-jmespath/fuzz/testdata/expr-625 | 1 + .../go-jmespath/fuzz/testdata/expr-626 | 1 + .../go-jmespath/fuzz/testdata/expr-627 | 1 + .../go-jmespath/fuzz/testdata/expr-628 | 1 + .../go-jmespath/fuzz/testdata/expr-629 | 1 + .../go-jmespath/fuzz/testdata/expr-63 | 1 + .../go-jmespath/fuzz/testdata/expr-630 | 1 + .../go-jmespath/fuzz/testdata/expr-631 | 1 + .../go-jmespath/fuzz/testdata/expr-632 | 1 + .../go-jmespath/fuzz/testdata/expr-633 | 1 + .../go-jmespath/fuzz/testdata/expr-634 | 1 + .../go-jmespath/fuzz/testdata/expr-635 | 1 + .../go-jmespath/fuzz/testdata/expr-636 | 1 + .../go-jmespath/fuzz/testdata/expr-637 | 1 + .../go-jmespath/fuzz/testdata/expr-638 | 1 + .../go-jmespath/fuzz/testdata/expr-639 | 1 + .../go-jmespath/fuzz/testdata/expr-64 | 1 + .../go-jmespath/fuzz/testdata/expr-640 | 1 + .../go-jmespath/fuzz/testdata/expr-641 | 1 + .../go-jmespath/fuzz/testdata/expr-642 | 1 + .../go-jmespath/fuzz/testdata/expr-643 | 1 + .../go-jmespath/fuzz/testdata/expr-644 | 1 + .../go-jmespath/fuzz/testdata/expr-645 | 1 + .../go-jmespath/fuzz/testdata/expr-646 | 1 + .../go-jmespath/fuzz/testdata/expr-647 | 1 + .../go-jmespath/fuzz/testdata/expr-648 | 1 + .../go-jmespath/fuzz/testdata/expr-649 | 1 + .../go-jmespath/fuzz/testdata/expr-65 | 1 + .../go-jmespath/fuzz/testdata/expr-650 | 1 + .../go-jmespath/fuzz/testdata/expr-651 | 1 + .../go-jmespath/fuzz/testdata/expr-652 | 1 + .../go-jmespath/fuzz/testdata/expr-653 | 1 + .../go-jmespath/fuzz/testdata/expr-654 | 1 + .../go-jmespath/fuzz/testdata/expr-655 | 1 + .../go-jmespath/fuzz/testdata/expr-656 | 1 + .../go-jmespath/fuzz/testdata/expr-66 | 1 + .../go-jmespath/fuzz/testdata/expr-67 | 1 + .../go-jmespath/fuzz/testdata/expr-68 | 1 + .../go-jmespath/fuzz/testdata/expr-69 | 1 + .../jmespath/go-jmespath/fuzz/testdata/expr-7 | 1 + .../go-jmespath/fuzz/testdata/expr-70 | 1 + .../go-jmespath/fuzz/testdata/expr-71 | 1 + .../go-jmespath/fuzz/testdata/expr-72 | 1 + .../go-jmespath/fuzz/testdata/expr-73 | 1 + .../go-jmespath/fuzz/testdata/expr-74 | 1 + .../go-jmespath/fuzz/testdata/expr-75 | 1 + .../go-jmespath/fuzz/testdata/expr-76 | 1 + .../go-jmespath/fuzz/testdata/expr-77 | 1 + .../go-jmespath/fuzz/testdata/expr-78 | 1 + .../go-jmespath/fuzz/testdata/expr-79 | 1 + .../jmespath/go-jmespath/fuzz/testdata/expr-8 | 1 + .../go-jmespath/fuzz/testdata/expr-80 | 1 + .../go-jmespath/fuzz/testdata/expr-81 | 1 + .../go-jmespath/fuzz/testdata/expr-82 | 1 + .../go-jmespath/fuzz/testdata/expr-83 | 1 + .../go-jmespath/fuzz/testdata/expr-84 | 1 + .../go-jmespath/fuzz/testdata/expr-85 | 1 + .../go-jmespath/fuzz/testdata/expr-86 | 1 + .../go-jmespath/fuzz/testdata/expr-87 | 1 + .../go-jmespath/fuzz/testdata/expr-88 | 1 + .../go-jmespath/fuzz/testdata/expr-89 | 1 + .../jmespath/go-jmespath/fuzz/testdata/expr-9 | 1 + .../go-jmespath/fuzz/testdata/expr-90 | 1 + .../go-jmespath/fuzz/testdata/expr-91 | 1 + .../go-jmespath/fuzz/testdata/expr-92 | 1 + .../go-jmespath/fuzz/testdata/expr-93 | 1 + .../go-jmespath/fuzz/testdata/expr-94 | 1 + .../go-jmespath/fuzz/testdata/expr-95 | 1 + .../go-jmespath/fuzz/testdata/expr-96 | 1 + .../go-jmespath/fuzz/testdata/expr-97 | 1 + .../go-jmespath/fuzz/testdata/expr-98 | 1 + .../jmespath/go-jmespath/interpreter.go | 418 + .../jmespath/go-jmespath/interpreter_test.go | 221 + .../github.com/jmespath/go-jmespath/lexer.go | 420 + .../jmespath/go-jmespath/lexer_test.go | 161 + .../github.com/jmespath/go-jmespath/parser.go | 603 + .../jmespath/go-jmespath/parser_test.go | 136 + .../jmespath/go-jmespath/toktype_string.go | 16 + .../github.com/jmespath/go-jmespath/util.go | 185 + .../jmespath/go-jmespath/util_test.go | 73 + vendor/github.com/juju/errgo/LICENSE | 185 + vendor/github.com/juju/errgo/README.markdown | 281 + vendor/github.com/juju/errgo/errors.go | 385 + vendor/github.com/juju/errgo/errors/errors.go | 389 + .../juju/errgo/errors/errors_test.go | 277 + .../juju/errgo/errors/export_test.go | 6 + vendor/github.com/juju/errgo/errors_test.go | 289 + vendor/github.com/juju/errgo/export_test.go | 6 + vendor/github.com/moby/moby/.dockerignore | 7 + .../moby/moby/.github/ISSUE_TEMPLATE.md | 64 + .../moby/.github/PULL_REQUEST_TEMPLATE.md | 30 + vendor/github.com/moby/moby/.gitignore | 22 + vendor/github.com/moby/moby/.mailmap | 386 + vendor/github.com/moby/moby/AUTHORS | 1885 + vendor/github.com/moby/moby/CHANGELOG.md | 3587 ++ vendor/github.com/moby/moby/CONTRIBUTING.md | 455 + vendor/github.com/moby/moby/Dockerfile | 229 + .../github.com/moby/moby/Dockerfile.aarch64 | 202 + vendor/github.com/moby/moby/Dockerfile.armhf | 182 + .../github.com/moby/moby/Dockerfile.ppc64le | 189 + vendor/github.com/moby/moby/Dockerfile.s390x | 182 + vendor/github.com/moby/moby/Dockerfile.simple | 73 + .../github.com/moby/moby/Dockerfile.solaris | 19 + .../github.com/moby/moby/Dockerfile.windows | 256 + vendor/github.com/moby/moby/LICENSE | 191 + vendor/github.com/moby/moby/MAINTAINERS | 462 + vendor/github.com/moby/moby/Makefile | 202 + vendor/github.com/moby/moby/NOTICE | 19 + vendor/github.com/moby/moby/README.md | 90 + vendor/github.com/moby/moby/ROADMAP.md | 118 + vendor/github.com/moby/moby/VENDORING.md | 46 + vendor/github.com/moby/moby/VERSION | 1 + vendor/github.com/moby/moby/api/README.md | 42 + vendor/github.com/moby/moby/api/common.go | 65 + .../github.com/moby/moby/api/common_test.go | 77 + .../github.com/moby/moby/api/common_unix.go | 6 + .../moby/moby/api/common_windows.go | 8 + .../github.com/moby/moby/api/errors/errors.go | 47 + .../moby/moby/api/errors/errors_test.go | 64 + .../github.com/moby/moby/api/fixtures/keyfile | 7 + vendor/github.com/moby/moby/api/names.go | 9 + .../moby/api/server/backend/build/backend.go | 90 + .../moby/moby/api/server/backend/build/tag.go | 84 + .../moby/moby/api/server/httputils/decoder.go | 16 + .../moby/moby/api/server/httputils/errors.go | 145 + .../moby/moby/api/server/httputils/form.go | 70 + .../moby/api/server/httputils/form_test.go | 105 + .../moby/api/server/httputils/httputils.go | 97 + .../api/server/httputils/httputils_test.go | 18 + .../server/httputils/httputils_write_json.go | 15 + .../api/server/httputils/write_log_stream.go | 96 + .../moby/moby/api/server/middleware.go | 24 + .../moby/moby/api/server/middleware/cors.go | 37 + .../moby/moby/api/server/middleware/debug.go | 94 + .../moby/api/server/middleware/debug_test.go | 58 + .../api/server/middleware/experimental.go | 29 + .../moby/api/server/middleware/middleware.go | 13 + .../moby/api/server/middleware/version.go | 51 + .../api/server/middleware/version_test.go | 57 + .../moby/api/server/router/build/backend.go | 21 + .../moby/api/server/router/build/build.go | 29 + .../api/server/router/build/build_routes.go | 253 + .../api/server/router/checkpoint/backend.go | 10 + .../server/router/checkpoint/checkpoint.go | 36 + .../router/checkpoint/checkpoint_routes.go | 65 + .../api/server/router/container/backend.go | 79 + .../api/server/router/container/container.go | 77 + .../router/container/container_routes.go | 608 + .../moby/api/server/router/container/copy.go | 118 + .../moby/api/server/router/container/exec.go | 140 + .../api/server/router/container/inspect.go | 21 + .../moby/api/server/router/debug/debug.go | 53 + .../api/server/router/debug/debug_routes.go | 13 + .../api/server/router/distribution/backend.go | 14 + .../router/distribution/distribution.go | 31 + .../distribution/distribution_routes.go | 138 + .../moby/api/server/router/experimental.go | 67 + .../moby/api/server/router/image/backend.go | 46 + .../moby/api/server/router/image/image.go | 50 + .../api/server/router/image/image_routes.go | 378 + .../moby/moby/api/server/router/local.go | 104 + .../moby/api/server/router/network/backend.go | 22 + .../moby/api/server/router/network/filter.go | 87 + .../api/server/router/network/filter_test.go | 149 + .../moby/api/server/router/network/network.go | 44 + .../server/router/network/network_routes.go | 472 + .../moby/api/server/router/plugin/backend.go | 27 + .../moby/api/server/router/plugin/plugin.go | 39 + .../api/server/router/plugin/plugin_routes.go | 310 + .../moby/moby/api/server/router/router.go | 19 + .../moby/api/server/router/session/backend.go | 12 + .../moby/api/server/router/session/session.go | 29 + .../server/router/session/session_routes.go | 16 + .../moby/api/server/router/swarm/backend.go | 47 + .../moby/api/server/router/swarm/cluster.go | 63 + .../api/server/router/swarm/cluster_routes.go | 492 + .../moby/api/server/router/swarm/helpers.go | 65 + .../moby/api/server/router/system/backend.go | 21 + .../moby/api/server/router/system/system.go | 42 + .../api/server/router/system/system_routes.go | 192 + .../moby/api/server/router/volume/backend.go | 19 + .../moby/api/server/router/volume/volume.go | 36 + .../api/server/router/volume/volume_routes.go | 85 + .../moby/moby/api/server/router_swapper.go | 30 + .../github.com/moby/moby/api/server/server.go | 201 + .../moby/moby/api/server/server_test.go | 46 + .../github.com/moby/moby/api/swagger-gen.yaml | 12 + vendor/github.com/moby/moby/api/swagger.yaml | 8963 +++++ .../api/templates/server/operation.gotmpl | 26 + vendor/github.com/moby/moby/api/types/auth.go | 22 + .../moby/moby/api/types/backend/backend.go | 106 + .../moby/moby/api/types/backend/build.go | 44 + .../moby/moby/api/types/blkiodev/blkio.go | 23 + .../github.com/moby/moby/api/types/client.go | 389 + .../github.com/moby/moby/api/types/configs.go | 70 + .../moby/moby/api/types/container/config.go | 69 + .../api/types/container/container_changes.go | 21 + .../api/types/container/container_create.go | 21 + .../moby/api/types/container/container_top.go | 21 + .../api/types/container/container_update.go | 17 + .../api/types/container/container_wait.go | 17 + .../moby/api/types/container/host_config.go | 380 + .../api/types/container/hostconfig_unix.go | 41 + .../api/types/container/hostconfig_windows.go | 54 + .../moby/api/types/container/waitcondition.go | 22 + .../moby/moby/api/types/error_response.go | 13 + .../moby/moby/api/types/events/events.go | 52 + .../moby/moby/api/types/filters/parse.go | 310 + .../moby/moby/api/types/filters/parse_test.go | 417 + .../moby/moby/api/types/graph_driver_data.go | 17 + .../moby/moby/api/types/id_response.go | 13 + .../moby/api/types/image/image_history.go | 37 + .../api/types/image_delete_response_item.go | 15 + .../moby/moby/api/types/image_summary.go | 49 + .../moby/moby/api/types/mount/mount.go | 128 + .../moby/moby/api/types/network/network.go | 108 + .../github.com/moby/moby/api/types/plugin.go | 200 + .../moby/moby/api/types/plugin_device.go | 25 + .../moby/moby/api/types/plugin_env.go | 25 + .../moby/api/types/plugin_interface_type.go | 21 + .../moby/moby/api/types/plugin_mount.go | 37 + .../moby/moby/api/types/plugin_responses.go | 79 + .../api/types/plugins/logdriver/entry.pb.go | 449 + .../api/types/plugins/logdriver/entry.proto | 8 + .../moby/api/types/plugins/logdriver/gen.go | 3 + .../moby/api/types/plugins/logdriver/io.go | 87 + vendor/github.com/moby/moby/api/types/port.go | 23 + .../moby/api/types/registry/authenticate.go | 21 + .../moby/moby/api/types/registry/registry.go | 119 + .../github.com/moby/moby/api/types/seccomp.go | 93 + .../moby/api/types/service_update_response.go | 12 + .../github.com/moby/moby/api/types/stats.go | 181 + .../moby/moby/api/types/strslice/strslice.go | 30 + .../moby/api/types/strslice/strslice_test.go | 86 + .../moby/moby/api/types/swarm/common.go | 40 + .../moby/moby/api/types/swarm/config.go | 31 + .../moby/moby/api/types/swarm/container.go | 72 + .../moby/moby/api/types/swarm/network.go | 119 + .../moby/moby/api/types/swarm/node.go | 115 + .../moby/moby/api/types/swarm/runtime.go | 19 + .../moby/moby/api/types/swarm/runtime/gen.go | 3 + .../moby/api/types/swarm/runtime/plugin.pb.go | 712 + .../moby/api/types/swarm/runtime/plugin.proto | 18 + .../moby/moby/api/types/swarm/secret.go | 32 + .../moby/moby/api/types/swarm/service.go | 124 + .../moby/moby/api/types/swarm/swarm.go | 217 + .../moby/moby/api/types/swarm/task.go | 184 + .../moby/api/types/time/duration_convert.go | 12 + .../api/types/time/duration_convert_test.go | 26 + .../moby/moby/api/types/time/timestamp.go | 124 + .../moby/api/types/time/timestamp_test.go | 93 + .../github.com/moby/moby/api/types/types.go | 575 + .../moby/moby/api/types/versions/README.md | 14 + .../moby/moby/api/types/versions/compare.go | 62 + .../moby/api/types/versions/compare_test.go | 26 + .../moby/api/types/versions/v1p19/types.go | 35 + .../moby/api/types/versions/v1p20/types.go | 40 + .../github.com/moby/moby/api/types/volume.go | 69 + .../moby/api/types/volume/volumes_create.go | 29 + .../moby/api/types/volume/volumes_list.go | 23 + .../github.com/moby/moby/builder/builder.go | 105 + .../moby/moby/builder/dockerfile/bflag.go | 183 + .../moby/builder/dockerfile/bflag_test.go | 187 + .../moby/moby/builder/dockerfile/buildargs.go | 148 + .../moby/builder/dockerfile/buildargs_test.go | 100 + .../moby/moby/builder/dockerfile/builder.go | 420 + .../moby/builder/dockerfile/builder_test.go | 34 + .../moby/builder/dockerfile/builder_unix.go | 7 + .../builder/dockerfile/builder_windows.go | 8 + .../moby/builder/dockerfile/clientsession.go | 77 + .../builder/dockerfile/command/command.go | 46 + .../builder/dockerfile/containerbackend.go | 144 + .../moby/moby/builder/dockerfile/copy.go | 444 + .../moby/moby/builder/dockerfile/copy_test.go | 45 + .../moby/moby/builder/dockerfile/copy_unix.go | 36 + .../moby/builder/dockerfile/copy_windows.go | 8 + .../moby/builder/dockerfile/dispatchers.go | 884 + .../builder/dockerfile/dispatchers_test.go | 525 + .../builder/dockerfile/dispatchers_unix.go | 34 + .../dockerfile/dispatchers_unix_test.go | 33 + .../builder/dockerfile/dispatchers_windows.go | 93 + .../dockerfile/dispatchers_windows_test.go | 40 + .../moby/moby/builder/dockerfile/envVarTest | 121 + .../moby/moby/builder/dockerfile/evaluator.go | 327 + .../moby/builder/dockerfile/evaluator_test.go | 210 + .../moby/builder/dockerfile/evaluator_unix.go | 9 + .../builder/dockerfile/evaluator_windows.go | 13 + .../moby/builder/dockerfile/imagecontext.go | 211 + .../moby/builder/dockerfile/imageprobe.go | 63 + .../moby/moby/builder/dockerfile/internals.go | 300 + .../moby/builder/dockerfile/internals_test.go | 131 + .../moby/builder/dockerfile/internals_unix.go | 42 + .../builder/dockerfile/internals_windows.go | 95 + .../dockerfile/internals_windows_test.go | 53 + .../moby/moby/builder/dockerfile/metrics.go | 44 + .../builder/dockerfile/mockbackend_test.go | 130 + .../builder/dockerfile/parser/dumper/main.go | 32 + .../builder/dockerfile/parser/json_test.go | 59 + .../builder/dockerfile/parser/line_parsers.go | 399 + .../dockerfile/parser/line_parsers_test.go | 74 + .../moby/builder/dockerfile/parser/parser.go | 355 + .../builder/dockerfile/parser/parser_test.go | 154 + .../dockerfile/parser/split_command.go | 118 + .../parser/testfile-line/Dockerfile | 35 + .../env_no_value/Dockerfile | 3 + .../shykes-nested-json/Dockerfile | 1 + .../testfiles/ADD-COPY-with-JSON/Dockerfile | 11 + .../testfiles/ADD-COPY-with-JSON/result | 10 + .../testfiles/brimstone-consuldock/Dockerfile | 26 + .../testfiles/brimstone-consuldock/result | 5 + .../brimstone-docker-consul/Dockerfile | 52 + .../testfiles/brimstone-docker-consul/result | 9 + .../testfiles/continue-at-eof/Dockerfile | 3 + .../parser/testfiles/continue-at-eof/result | 2 + .../testfiles/continueIndent/Dockerfile | 36 + .../parser/testfiles/continueIndent/result | 10 + .../testfiles/cpuguy83-nagios/Dockerfile | 54 + .../parser/testfiles/cpuguy83-nagios/result | 40 + .../parser/testfiles/docker/Dockerfile | 102 + .../dockerfile/parser/testfiles/docker/result | 24 + .../parser/testfiles/env/Dockerfile | 23 + .../dockerfile/parser/testfiles/env/result | 16 + .../testfiles/escape-after-comment/Dockerfile | 9 + .../testfiles/escape-after-comment/result | 3 + .../testfiles/escape-nonewline/Dockerfile | 7 + .../parser/testfiles/escape-nonewline/result | 3 + .../parser/testfiles/escape/Dockerfile | 6 + .../dockerfile/parser/testfiles/escape/result | 3 + .../parser/testfiles/escapes/Dockerfile | 14 + .../parser/testfiles/escapes/result | 6 + .../parser/testfiles/flags/Dockerfile | 10 + .../dockerfile/parser/testfiles/flags/result | 10 + .../parser/testfiles/health/Dockerfile | 10 + .../dockerfile/parser/testfiles/health/result | 9 + .../parser/testfiles/influxdb/Dockerfile | 15 + .../parser/testfiles/influxdb/result | 11 + .../Dockerfile | 1 + .../result | 1 + .../Dockerfile | 1 + .../result | 1 + .../Dockerfile | 1 + .../jeztah-invalid-json-single-quotes/result | 1 + .../Dockerfile | 1 + .../result | 1 + .../Dockerfile | 1 + .../result | 1 + .../parser/testfiles/json/Dockerfile | 8 + .../dockerfile/parser/testfiles/json/result | 8 + .../kartar-entrypoint-oddities/Dockerfile | 7 + .../kartar-entrypoint-oddities/result | 7 + .../lk4d4-the-edge-case-generator/Dockerfile | 48 + .../lk4d4-the-edge-case-generator/result | 29 + .../parser/testfiles/mail/Dockerfile | 16 + .../dockerfile/parser/testfiles/mail/result | 14 + .../testfiles/multiple-volumes/Dockerfile | 3 + .../parser/testfiles/multiple-volumes/result | 2 + .../parser/testfiles/mumble/Dockerfile | 7 + .../dockerfile/parser/testfiles/mumble/result | 4 + .../parser/testfiles/nginx/Dockerfile | 14 + .../dockerfile/parser/testfiles/nginx/result | 11 + .../parser/testfiles/tf2/Dockerfile | 23 + .../dockerfile/parser/testfiles/tf2/result | 20 + .../parser/testfiles/weechat/Dockerfile | 9 + .../parser/testfiles/weechat/result | 6 + .../parser/testfiles/znc/Dockerfile | 7 + .../dockerfile/parser/testfiles/znc/result | 5 + .../moby/builder/dockerfile/shell_parser.go | 344 + .../builder/dockerfile/shell_parser_test.go | 151 + .../moby/moby/builder/dockerfile/support.go | 19 + .../moby/builder/dockerfile/support_test.go | 65 + .../moby/builder/dockerfile/utils_test.go | 50 + .../moby/moby/builder/dockerfile/wordsTest | 30 + .../moby/builder/dockerignore/dockerignore.go | 64 + .../builder/dockerignore/dockerignore_test.go | 69 + .../moby/moby/builder/fscache/fscache.go | 609 + .../moby/moby/builder/fscache/fscache_test.go | 131 + .../moby/moby/builder/fscache/naivedriver.go | 28 + .../moby/builder/remotecontext/archive.go | 128 + .../moby/moby/builder/remotecontext/detect.go | 184 + .../moby/builder/remotecontext/detect_test.go | 123 + .../moby/builder/remotecontext/filehash.go | 45 + .../moby/builder/remotecontext/generate.go | 3 + .../moby/moby/builder/remotecontext/git.go | 29 + .../builder/remotecontext/git/gitutils.go | 159 + .../remotecontext/git/gitutils_test.go | 238 + .../moby/builder/remotecontext/lazycontext.go | 101 + .../moby/builder/remotecontext/mimetype.go | 27 + .../builder/remotecontext/mimetype_test.go | 16 + .../moby/moby/builder/remotecontext/remote.go | 134 + .../moby/builder/remotecontext/remote_test.go | 263 + .../moby/moby/builder/remotecontext/tarsum.go | 174 + .../moby/builder/remotecontext/tarsum.pb.go | 525 + .../moby/builder/remotecontext/tarsum.proto | 7 + .../moby/builder/remotecontext/tarsum_test.go | 157 + .../moby/builder/remotecontext/utils_test.go | 55 + vendor/github.com/moby/moby/cli/cobra.go | 150 + .../moby/moby/cli/config/configdir.go | 25 + .../github.com/moby/moby/cli/debug/debug.go | 26 + .../moby/moby/cli/debug/debug_test.go | 43 + vendor/github.com/moby/moby/cli/error.go | 33 + vendor/github.com/moby/moby/cli/required.go | 27 + vendor/github.com/moby/moby/client/README.md | 35 + .../moby/moby/client/build_prune.go | 30 + .../moby/moby/client/checkpoint_create.go | 13 + .../moby/client/checkpoint_create_test.go | 73 + .../moby/moby/client/checkpoint_delete.go | 20 + .../moby/client/checkpoint_delete_test.go | 54 + .../moby/moby/client/checkpoint_list.go | 32 + .../moby/moby/client/checkpoint_list_test.go | 68 + vendor/github.com/moby/moby/client/client.go | 314 + .../moby/moby/client/client_mock_test.go | 45 + .../moby/moby/client/client_test.go | 344 + .../moby/moby/client/client_unix.go | 6 + .../moby/moby/client/client_windows.go | 4 + .../moby/moby/client/config_create.go | 25 + .../moby/moby/client/config_create_test.go | 69 + .../moby/moby/client/config_inspect.go | 37 + .../moby/moby/client/config_inspect_test.go | 78 + .../moby/moby/client/config_list.go | 38 + .../moby/moby/client/config_list_test.go | 106 + .../moby/moby/client/config_remove.go | 13 + .../moby/moby/client/config_remove_test.go | 59 + .../moby/moby/client/config_update.go | 21 + .../moby/moby/client/config_update_test.go | 60 + .../moby/moby/client/container_attach.go | 57 + .../moby/moby/client/container_commit.go | 55 + .../moby/moby/client/container_commit_test.go | 96 + .../moby/moby/client/container_copy.go | 102 + .../moby/moby/client/container_copy_test.go | 244 + .../moby/moby/client/container_create.go | 56 + .../moby/moby/client/container_create_test.go | 118 + .../moby/moby/client/container_diff.go | 23 + .../moby/moby/client/container_diff_test.go | 61 + .../moby/moby/client/container_exec.go | 54 + .../moby/moby/client/container_exec_test.go | 157 + .../moby/moby/client/container_export.go | 20 + .../moby/moby/client/container_export_test.go | 50 + .../moby/moby/client/container_inspect.go | 54 + .../moby/client/container_inspect_test.go | 125 + .../moby/moby/client/container_kill.go | 17 + .../moby/moby/client/container_kill_test.go | 46 + .../moby/moby/client/container_list.go | 56 + .../moby/moby/client/container_list_test.go | 96 + .../moby/moby/client/container_logs.go | 72 + .../moby/moby/client/container_logs_test.go | 133 + .../moby/moby/client/container_pause.go | 10 + .../moby/moby/client/container_pause_test.go | 41 + .../moby/moby/client/container_prune.go | 36 + .../moby/moby/client/container_prune_test.go | 124 + .../moby/moby/client/container_remove.go | 27 + .../moby/moby/client/container_remove_test.go | 59 + .../moby/moby/client/container_rename.go | 16 + .../moby/moby/client/container_rename_test.go | 46 + .../moby/moby/client/container_resize.go | 29 + .../moby/moby/client/container_resize_test.go | 82 + .../moby/moby/client/container_restart.go | 22 + .../moby/client/container_restart_test.go | 48 + .../moby/moby/client/container_start.go | 24 + .../moby/moby/client/container_start_test.go | 58 + .../moby/moby/client/container_stats.go | 26 + .../moby/moby/client/container_stats_test.go | 70 + .../moby/moby/client/container_stop.go | 21 + .../moby/moby/client/container_stop_test.go | 48 + .../moby/moby/client/container_top.go | 28 + .../moby/moby/client/container_top_test.go | 74 + .../moby/moby/client/container_unpause.go | 10 + .../moby/client/container_unpause_test.go | 41 + .../moby/moby/client/container_update.go | 22 + .../moby/moby/client/container_update_test.go | 58 + .../moby/moby/client/container_wait.go | 84 + .../moby/moby/client/container_wait_test.go | 74 + .../github.com/moby/moby/client/disk_usage.go | 26 + .../moby/moby/client/disk_usage_test.go | 55 + .../moby/moby/client/distribution_inspect.go | 35 + .../moby/client/distribution_inspect_test.go | 18 + vendor/github.com/moby/moby/client/errors.go | 300 + vendor/github.com/moby/moby/client/events.go | 102 + .../moby/moby/client/events_test.go | 165 + vendor/github.com/moby/moby/client/hijack.go | 206 + .../moby/moby/client/image_build.go | 128 + .../moby/moby/client/image_build_test.go | 233 + .../moby/moby/client/image_create.go | 34 + .../moby/moby/client/image_create_test.go | 76 + .../moby/moby/client/image_history.go | 22 + .../moby/moby/client/image_history_test.go | 60 + .../moby/moby/client/image_import.go | 37 + .../moby/moby/client/image_import_test.go | 81 + .../moby/moby/client/image_inspect.go | 33 + .../moby/moby/client/image_inspect_test.go | 71 + .../github.com/moby/moby/client/image_list.go | 45 + .../moby/moby/client/image_list_test.go | 159 + .../github.com/moby/moby/client/image_load.go | 30 + .../moby/moby/client/image_load_test.go | 95 + .../moby/moby/client/image_prune.go | 36 + .../moby/moby/client/image_prune_test.go | 119 + .../github.com/moby/moby/client/image_pull.go | 61 + .../moby/moby/client/image_pull_test.go | 199 + .../github.com/moby/moby/client/image_push.go | 56 + .../moby/moby/client/image_push_test.go | 180 + .../moby/moby/client/image_remove.go | 31 + .../moby/moby/client/image_remove_test.go | 95 + .../github.com/moby/moby/client/image_save.go | 22 + .../moby/moby/client/image_save_test.go | 58 + .../moby/moby/client/image_search.go | 51 + .../moby/moby/client/image_search_test.go | 165 + .../github.com/moby/moby/client/image_tag.go | 37 + .../moby/moby/client/image_tag_test.go | 143 + vendor/github.com/moby/moby/client/info.go | 26 + .../github.com/moby/moby/client/info_test.go | 76 + .../github.com/moby/moby/client/interface.go | 194 + .../moby/client/interface_experimental.go | 17 + .../moby/moby/client/interface_stable.go | 10 + vendor/github.com/moby/moby/client/login.go | 29 + .../moby/moby/client/network_connect.go | 18 + .../moby/moby/client/network_connect_test.go | 111 + .../moby/moby/client/network_create.go | 25 + .../moby/moby/client/network_create_test.go | 72 + .../moby/moby/client/network_disconnect.go | 14 + .../moby/client/network_disconnect_test.go | 64 + .../moby/moby/client/network_inspect.go | 50 + .../moby/moby/client/network_inspect_test.go | 107 + .../moby/moby/client/network_list.go | 31 + .../moby/moby/client/network_list_test.go | 108 + .../moby/moby/client/network_prune.go | 36 + .../moby/moby/client/network_prune_test.go | 112 + .../moby/moby/client/network_remove.go | 10 + .../moby/moby/client/network_remove_test.go | 47 + .../moby/moby/client/node_inspect.go | 33 + .../moby/moby/client/node_inspect_test.go | 65 + .../github.com/moby/moby/client/node_list.go | 36 + .../moby/moby/client/node_list_test.go | 94 + .../moby/moby/client/node_remove.go | 21 + .../moby/moby/client/node_remove_test.go | 69 + .../moby/moby/client/node_update.go | 18 + .../moby/moby/client/node_update_test.go | 49 + .../github.com/moby/moby/client/parse_logs.go | 41 + .../moby/moby/client/parse_logs_test.go | 36 + vendor/github.com/moby/moby/client/ping.go | 32 + .../github.com/moby/moby/client/ping_test.go | 82 + .../moby/moby/client/plugin_create.go | 26 + .../moby/moby/client/plugin_disable.go | 19 + .../moby/moby/client/plugin_disable_test.go | 48 + .../moby/moby/client/plugin_enable.go | 19 + .../moby/moby/client/plugin_enable_test.go | 48 + .../moby/moby/client/plugin_inspect.go | 32 + .../moby/moby/client/plugin_inspect_test.go | 54 + .../moby/moby/client/plugin_install.go | 113 + .../moby/moby/client/plugin_list.go | 32 + .../moby/moby/client/plugin_list_test.go | 107 + .../moby/moby/client/plugin_push.go | 17 + .../moby/moby/client/plugin_push_test.go | 51 + .../moby/moby/client/plugin_remove.go | 20 + .../moby/moby/client/plugin_remove_test.go | 49 + .../github.com/moby/moby/client/plugin_set.go | 12 + .../moby/moby/client/plugin_set_test.go | 47 + .../moby/moby/client/plugin_upgrade.go | 39 + vendor/github.com/moby/moby/client/request.go | 262 + .../moby/moby/client/request_test.go | 92 + .../moby/moby/client/secret_create.go | 25 + .../moby/moby/client/secret_create_test.go | 69 + .../moby/moby/client/secret_inspect.go | 37 + .../moby/moby/client/secret_inspect_test.go | 78 + .../moby/moby/client/secret_list.go | 38 + .../moby/moby/client/secret_list_test.go | 106 + .../moby/moby/client/secret_remove.go | 13 + .../moby/moby/client/secret_remove_test.go | 59 + .../moby/moby/client/secret_update.go | 21 + .../moby/moby/client/secret_update_test.go | 60 + .../moby/moby/client/service_create.go | 156 + .../moby/moby/client/service_create_test.go | 210 + .../moby/moby/client/service_inspect.go | 38 + .../moby/moby/client/service_inspect_test.go | 66 + .../moby/moby/client/service_list.go | 35 + .../moby/moby/client/service_list_test.go | 94 + .../moby/moby/client/service_logs.go | 52 + .../moby/moby/client/service_logs_test.go | 133 + .../moby/moby/client/service_remove.go | 10 + .../moby/moby/client/service_remove_test.go | 47 + .../moby/moby/client/service_update.go | 92 + .../moby/moby/client/service_update_test.go | 77 + vendor/github.com/moby/moby/client/session.go | 19 + .../moby/client/session/filesync/diffcopy.go | 31 + .../moby/client/session/filesync/filesync.go | 183 + .../client/session/filesync/filesync.pb.go | 575 + .../client/session/filesync/filesync.proto | 15 + .../client/session/filesync/filesync_test.go | 71 + .../moby/client/session/filesync/generate.go | 3 + .../moby/client/session/filesync/tarstream.go | 83 + .../moby/moby/client/session/grpc.go | 62 + .../moby/moby/client/session/manager.go | 202 + .../moby/moby/client/session/session.go | 117 + .../moby/client/session/testutil/testutil.go | 70 + .../moby/moby/client/swarm_get_unlock_key.go | 21 + .../moby/client/swarm_get_unlock_key_test.go | 60 + .../github.com/moby/moby/client/swarm_init.go | 21 + .../moby/moby/client/swarm_init_test.go | 54 + .../moby/moby/client/swarm_inspect.go | 21 + .../moby/moby/client/swarm_inspect_test.go | 56 + .../github.com/moby/moby/client/swarm_join.go | 13 + .../moby/moby/client/swarm_join_test.go | 51 + .../moby/moby/client/swarm_leave.go | 18 + .../moby/moby/client/swarm_leave_test.go | 66 + .../moby/moby/client/swarm_unlock.go | 13 + .../moby/moby/client/swarm_unlock_test.go | 49 + .../moby/moby/client/swarm_update.go | 22 + .../moby/moby/client/swarm_update_test.go | 49 + .../moby/moby/client/task_inspect.go | 34 + .../moby/moby/client/task_inspect_test.go | 54 + .../github.com/moby/moby/client/task_list.go | 35 + .../moby/moby/client/task_list_test.go | 94 + .../github.com/moby/moby/client/task_logs.go | 52 + .../github.com/moby/moby/client/transport.go | 25 + vendor/github.com/moby/moby/client/utils.go | 34 + vendor/github.com/moby/moby/client/version.go | 21 + .../moby/moby/client/volume_create.go | 21 + .../moby/moby/client/volume_create_test.go | 75 + .../moby/moby/client/volume_inspect.go | 38 + .../moby/moby/client/volume_inspect_test.go | 76 + .../moby/moby/client/volume_list.go | 32 + .../moby/moby/client/volume_list_test.go | 98 + .../moby/moby/client/volume_prune.go | 36 + .../moby/moby/client/volume_remove.go | 21 + .../moby/moby/client/volume_remove_test.go | 47 + .../moby/moby/cmd/dockerd/README.md | 3 + .../moby/moby/cmd/dockerd/config.go | 76 + .../moby/cmd/dockerd/config_common_unix.go | 34 + .../moby/cmd/dockerd/config_experimental.go | 9 + .../moby/moby/cmd/dockerd/config_solaris.go | 19 + .../moby/moby/cmd/dockerd/config_unix.go | 52 + .../moby/moby/cmd/dockerd/config_unix_test.go | 26 + .../moby/moby/cmd/dockerd/config_windows.go | 25 + .../moby/moby/cmd/dockerd/daemon.go | 570 + .../moby/moby/cmd/dockerd/daemon_freebsd.go | 9 + .../moby/moby/cmd/dockerd/daemon_linux.go | 15 + .../moby/moby/cmd/dockerd/daemon_solaris.go | 89 + .../moby/moby/cmd/dockerd/daemon_test.go | 145 + .../moby/moby/cmd/dockerd/daemon_unix.go | 125 + .../moby/moby/cmd/dockerd/daemon_unix_test.go | 114 + .../moby/moby/cmd/dockerd/daemon_windows.go | 98 + .../moby/moby/cmd/dockerd/docker.go | 109 + .../moby/moby/cmd/dockerd/docker_windows.go | 18 + .../dockerd/hack/malformed_host_override.go | 121 + .../hack/malformed_host_override_test.go | 124 + .../moby/moby/cmd/dockerd/metrics.go | 27 + .../moby/moby/cmd/dockerd/options.go | 123 + .../moby/moby/cmd/dockerd/options_test.go | 43 + .../moby/cmd/dockerd/service_unsupported.go | 14 + .../moby/moby/cmd/dockerd/service_windows.go | 430 + .../github.com/moby/moby/container/archive.go | 76 + .../moby/moby/container/container.go | 1058 + .../moby/moby/container/container_linux.go | 9 + .../moby/moby/container/container_notlinux.go | 23 + .../moby/container/container_unit_test.go | 68 + .../moby/moby/container/container_unix.go | 475 + .../moby/moby/container/container_windows.go | 210 + vendor/github.com/moby/moby/container/env.go | 43 + .../moby/moby/container/env_test.go | 24 + .../github.com/moby/moby/container/health.go | 50 + .../github.com/moby/moby/container/history.go | 30 + .../moby/moby/container/memory_store.go | 95 + .../moby/moby/container/memory_store_test.go | 106 + .../github.com/moby/moby/container/monitor.go | 46 + .../moby/moby/container/mounts_unix.go | 12 + .../moby/moby/container/mounts_windows.go | 8 + .../github.com/moby/moby/container/state.go | 382 + .../moby/moby/container/state_solaris.go | 7 + .../moby/moby/container/state_test.go | 168 + .../moby/moby/container/state_unix.go | 10 + .../moby/moby/container/state_windows.go | 7 + .../github.com/moby/moby/container/store.go | 28 + .../moby/moby/container/stream/attach.go | 179 + .../moby/moby/container/stream/streams.go | 146 + vendor/github.com/moby/moby/container/view.go | 498 + .../moby/moby/container/view_test.go | 153 + vendor/github.com/moby/moby/contrib/README.md | 4 + vendor/github.com/moby/moby/contrib/REVIEWERS | 1 + .../moby/moby/contrib/apparmor/main.go | 56 + .../moby/moby/contrib/apparmor/template.go | 268 + .../moby/contrib/builder/deb/aarch64/build.sh | 10 + .../deb/aarch64/debian-jessie/Dockerfile | 25 + .../deb/aarch64/debian-stretch/Dockerfile | 22 + .../contrib/builder/deb/aarch64/generate.sh | 135 + .../deb/aarch64/ubuntu-trusty/Dockerfile | 24 + .../deb/aarch64/ubuntu-xenial/Dockerfile | 22 + .../moby/contrib/builder/deb/amd64/README.md | 5 + .../moby/contrib/builder/deb/amd64/build.sh | 10 + .../deb/amd64/debian-jessie/Dockerfile | 20 + .../deb/amd64/debian-stretch/Dockerfile | 20 + .../deb/amd64/debian-wheezy/Dockerfile | 22 + .../contrib/builder/deb/amd64/generate.sh | 130 + .../deb/amd64/ubuntu-trusty/Dockerfile | 16 + .../deb/amd64/ubuntu-xenial/Dockerfile | 16 + .../deb/amd64/ubuntu-yakkety/Dockerfile | 16 + .../builder/deb/amd64/ubuntu-zesty/Dockerfile | 16 + .../deb/armhf/debian-jessie/Dockerfile | 20 + .../contrib/builder/deb/armhf/generate.sh | 139 + .../deb/armhf/raspbian-jessie/Dockerfile | 22 + .../deb/armhf/ubuntu-trusty/Dockerfile | 16 + .../deb/armhf/ubuntu-xenial/Dockerfile | 16 + .../deb/armhf/ubuntu-yakkety/Dockerfile | 16 + .../moby/contrib/builder/deb/ppc64le/build.sh | 10 + .../contrib/builder/deb/ppc64le/generate.sh | 101 + .../deb/ppc64le/ubuntu-trusty/Dockerfile | 16 + .../deb/ppc64le/ubuntu-xenial/Dockerfile | 16 + .../deb/ppc64le/ubuntu-yakkety/Dockerfile | 16 + .../moby/contrib/builder/deb/s390x/build.sh | 10 + .../contrib/builder/deb/s390x/generate.sh | 94 + .../deb/s390x/ubuntu-xenial/Dockerfile | 16 + .../deb/s390x/ubuntu-yakkety/Dockerfile | 16 + .../moby/contrib/builder/rpm/amd64/README.md | 5 + .../rpm/amd64/amazonlinux-latest/Dockerfile | 18 + .../moby/contrib/builder/rpm/amd64/build.sh | 10 + .../builder/rpm/amd64/centos-7/Dockerfile | 19 + .../builder/rpm/amd64/fedora-24/Dockerfile | 19 + .../builder/rpm/amd64/fedora-25/Dockerfile | 19 + .../contrib/builder/rpm/amd64/generate.sh | 187 + .../rpm/amd64/opensuse-13.2/Dockerfile | 18 + .../rpm/amd64/oraclelinux-6/Dockerfile | 28 + .../rpm/amd64/oraclelinux-7/Dockerfile | 18 + .../builder/rpm/amd64/photon-1.0/Dockerfile | 18 + .../moby/contrib/builder/rpm/armhf/build.sh | 10 + .../builder/rpm/armhf/centos-7/Dockerfile | 20 + .../contrib/builder/rpm/armhf/generate.sh | 122 + .../moby/contrib/builder/rpm/ppc64le/build.sh | 10 + .../builder/rpm/ppc64le/centos-7/Dockerfile | 19 + .../builder/rpm/ppc64le/fedora-24/Dockerfile | 19 + .../contrib/builder/rpm/ppc64le/generate.sh | 139 + .../rpm/ppc64le/opensuse-42.1/Dockerfile | 20 + .../moby/contrib/builder/rpm/s390x/build.sh | 10 + .../rpm/s390x/clefos-base-s390x-7/Dockerfile | 19 + .../contrib/builder/rpm/s390x/generate.sh | 144 + .../s390x/opensuse-tumbleweed-1/Dockerfile | 20 + .../moby/moby/contrib/check-config.sh | 360 + .../contrib/desktop-integration/README.md | 11 + .../desktop-integration/chromium/Dockerfile | 36 + .../desktop-integration/gparted/Dockerfile | 31 + .../moby/contrib/docker-device-tool/README.md | 14 + .../contrib/docker-device-tool/device_tool.go | 176 + .../docker-device-tool/device_tool_windows.go | 4 + .../contrib/docker-machine-install-bundle.sh | 111 + .../moby/moby/contrib/dockerize-disk.sh | 118 + .../moby/contrib/download-frozen-image-v1.sh | 108 + .../moby/contrib/download-frozen-image-v2.sh | 307 + .../github.com/moby/moby/contrib/editorconfig | 13 + .../moby/moby/contrib/gitdm/aliases | 148 + .../moby/moby/contrib/gitdm/domain-map | 47 + .../moby/contrib/gitdm/generate_aliases.sh | 16 + .../moby/moby/contrib/gitdm/gitdm.config | 17 + .../moby/moby/contrib/httpserver/Dockerfile | 4 + .../contrib/httpserver/Dockerfile.solaris | 4 + .../moby/moby/contrib/httpserver/server.go | 12 + .../moby/contrib/init/openrc/docker.confd | 23 + .../moby/contrib/init/openrc/docker.initd | 24 + .../moby/moby/contrib/init/systemd/REVIEWERS | 3 + .../moby/contrib/init/systemd/docker.service | 34 + .../contrib/init/systemd/docker.service.rpm | 33 + .../moby/contrib/init/systemd/docker.socket | 12 + .../moby/contrib/init/sysvinit-debian/docker | 156 + .../init/sysvinit-debian/docker.default | 20 + .../moby/contrib/init/sysvinit-redhat/docker | 153 + .../init/sysvinit-redhat/docker.sysconfig | 7 + .../moby/moby/contrib/init/upstart/REVIEWERS | 2 + .../moby/contrib/init/upstart/docker.conf | 72 + .../moby/moby/contrib/mac-install-bundle.sh | 45 + .../moby/moby/contrib/mkimage-alpine.sh | 90 + .../moby/contrib/mkimage-arch-pacman.conf | 92 + .../moby/moby/contrib/mkimage-arch.sh | 126 + .../moby/contrib/mkimage-archarm-pacman.conf | 98 + .../moby/moby/contrib/mkimage-crux.sh | 75 + .../moby/moby/contrib/mkimage-pld.sh | 73 + .../moby/moby/contrib/mkimage-yum.sh | 136 + .../github.com/moby/moby/contrib/mkimage.sh | 128 + .../contrib/mkimage/.febootstrap-minimize | 28 + .../moby/moby/contrib/mkimage/busybox-static | 34 + .../moby/moby/contrib/mkimage/debootstrap | 251 + .../moby/moby/contrib/mkimage/mageia-urpmi | 61 + .../moby/moby/contrib/mkimage/rinse | 25 + .../moby/moby/contrib/mkimage/solaris | 89 + .../moby/moby/contrib/nnp-test/Dockerfile | 9 + .../moby/moby/contrib/nnp-test/nnp-test.c | 10 + .../moby/moby/contrib/nuke-graph-directory.sh | 64 + .../moby/moby/contrib/project-stats.sh | 22 + .../moby/moby/contrib/report-issue.sh | 105 + .../moby/moby/contrib/reprepro/suites.sh | 12 + .../docker-engine-selinux/LICENSE | 339 + .../docker-engine-selinux/Makefile | 23 + .../docker-engine-selinux/README.md | 1 + .../docker-engine-selinux/docker.fc | 29 + .../docker-engine-selinux/docker.if | 523 + .../docker-engine-selinux/docker.te | 399 + .../docker-engine-selinux/LICENSE | 340 + .../docker-engine-selinux/Makefile | 16 + .../docker-engine-selinux/README.md | 1 + .../docker-engine-selinux/docker.fc | 18 + .../docker-engine-selinux/docker.if | 461 + .../docker-engine-selinux/docker.te | 407 + .../docker-engine-selinux/docker_selinux.8.gz | Bin 0 -> 2847 bytes .../contrib/syntax/nano/Dockerfile.nanorc | 26 + .../moby/moby/contrib/syntax/nano/README.md | 32 + .../Preferences/Dockerfile.tmPreferences | 24 + .../Syntaxes/Dockerfile.tmLanguage | 160 + .../textmate/Docker.tmbundle/info.plist | 16 + .../moby/contrib/syntax/textmate/README.md | 17 + .../moby/contrib/syntax/textmate/REVIEWERS | 1 + .../moby/moby/contrib/syntax/vim/LICENSE | 22 + .../moby/moby/contrib/syntax/vim/README.md | 26 + .../contrib/syntax/vim/doc/dockerfile.txt | 18 + .../syntax/vim/ftdetect/dockerfile.vim | 1 + .../contrib/syntax/vim/syntax/dockerfile.vim | 31 + .../moby/moby/contrib/syscall-test/Dockerfile | 15 + .../moby/moby/contrib/syscall-test/acct.c | 16 + .../moby/moby/contrib/syscall-test/exit32.s | 7 + .../moby/moby/contrib/syscall-test/ns.c | 63 + .../moby/moby/contrib/syscall-test/raw.c | 14 + .../moby/moby/contrib/syscall-test/setgid.c | 11 + .../moby/moby/contrib/syscall-test/setuid.c | 11 + .../moby/moby/contrib/syscall-test/socket.c | 30 + .../moby/moby/contrib/syscall-test/userns.c | 63 + .../moby/moby/contrib/udev/80-docker.rules | 3 + .../moby/contrib/vagrant-docker/README.md | 50 + .../moby/moby/daemon/apparmor_default.go | 36 + .../daemon/apparmor_default_unsupported.go | 7 + vendor/github.com/moby/moby/daemon/archive.go | 362 + .../moby/daemon/archive_tarcopyoptions.go | 15 + .../daemon/archive_tarcopyoptions_unix.go | 25 + .../daemon/archive_tarcopyoptions_windows.go | 12 + .../moby/moby/daemon/archive_unix.go | 29 + .../moby/moby/daemon/archive_windows.go | 39 + vendor/github.com/moby/moby/daemon/attach.go | 186 + vendor/github.com/moby/moby/daemon/auth.go | 13 + .../moby/moby/daemon/bindmount_solaris.go | 5 + .../moby/moby/daemon/bindmount_unix.go | 5 + vendor/github.com/moby/moby/daemon/build.go | 196 + vendor/github.com/moby/moby/daemon/cache.go | 27 + .../moby/moby/daemon/caps/utils_unix.go | 131 + vendor/github.com/moby/moby/daemon/changes.go | 31 + .../github.com/moby/moby/daemon/checkpoint.go | 137 + vendor/github.com/moby/moby/daemon/cluster.go | 26 + .../moby/moby/daemon/cluster/cluster.go | 441 + .../moby/moby/daemon/cluster/configs.go | 117 + .../cluster/controllers/plugin/controller.go | 261 + .../controllers/plugin/controller_test.go | 390 + .../moby/daemon/cluster/convert/config.go | 61 + .../moby/daemon/cluster/convert/container.go | 356 + .../moby/daemon/cluster/convert/network.go | 239 + .../moby/moby/daemon/cluster/convert/node.go | 94 + .../moby/daemon/cluster/convert/secret.go | 63 + .../moby/daemon/cluster/convert/service.go | 613 + .../daemon/cluster/convert/service_test.go | 150 + .../moby/moby/daemon/cluster/convert/swarm.go | 148 + .../moby/moby/daemon/cluster/convert/task.go | 70 + .../moby/daemon/cluster/executor/backend.go | 64 + .../cluster/executor/container/adapter.go | 472 + .../cluster/executor/container/attachment.go | 81 + .../cluster/executor/container/container.go | 679 + .../cluster/executor/container/controller.go | 690 + .../cluster/executor/container/errors.go | 17 + .../cluster/executor/container/executor.go | 245 + .../cluster/executor/container/health_test.go | 100 + .../cluster/executor/container/validate.go | 40 + .../executor/container/validate_test.go | 141 + .../executor/container/validate_unix_test.go | 8 + .../container/validate_windows_test.go | 8 + .../moby/moby/daemon/cluster/filters.go | 123 + .../moby/moby/daemon/cluster/filters_test.go | 102 + .../moby/moby/daemon/cluster/helpers.go | 245 + .../moby/moby/daemon/cluster/listen_addr.go | 302 + .../moby/daemon/cluster/listen_addr_linux.go | 91 + .../moby/daemon/cluster/listen_addr_others.go | 9 + .../daemon/cluster/listen_addr_solaris.go | 57 + .../moby/moby/daemon/cluster/networks.go | 317 + .../moby/moby/daemon/cluster/noderunner.go | 375 + .../moby/moby/daemon/cluster/nodes.go | 104 + .../moby/daemon/cluster/provider/network.go | 37 + .../moby/moby/daemon/cluster/secrets.go | 117 + .../moby/moby/daemon/cluster/services.go | 591 + .../moby/moby/daemon/cluster/swarm.go | 549 + .../moby/moby/daemon/cluster/tasks.go | 86 + .../moby/moby/daemon/cluster/utils.go | 63 + vendor/github.com/moby/moby/daemon/commit.go | 252 + .../moby/moby/daemon/config/config.go | 530 + .../moby/daemon/config/config_common_unix.go | 73 + .../daemon/config/config_common_unix_test.go | 84 + .../moby/moby/daemon/config/config_solaris.go | 29 + .../moby/moby/daemon/config/config_test.go | 391 + .../moby/moby/daemon/config/config_unix.go | 63 + .../moby/daemon/config/config_unix_test.go | 139 + .../moby/moby/daemon/config/config_windows.go | 52 + .../moby/daemon/config/config_windows_test.go | 60 + vendor/github.com/moby/moby/daemon/configs.go | 23 + .../moby/moby/daemon/configs_linux.go | 7 + .../moby/moby/daemon/configs_unsupported.go | 7 + .../moby/moby/daemon/configs_windows.go | 7 + .../github.com/moby/moby/daemon/container.go | 321 + .../moby/moby/daemon/container_linux.go | 29 + .../moby/moby/daemon/container_operations.go | 1090 + .../daemon/container_operations_solaris.go | 47 + .../moby/daemon/container_operations_unix.go | 357 + .../daemon/container_operations_windows.go | 202 + .../moby/moby/daemon/container_windows.go | 11 + vendor/github.com/moby/moby/daemon/create.go | 326 + .../moby/moby/daemon/create_unix.go | 81 + .../moby/moby/daemon/create_windows.go | 80 + vendor/github.com/moby/moby/daemon/daemon.go | 1233 + .../moby/moby/daemon/daemon_experimental.go | 7 + .../moby/moby/daemon/daemon_linux.go | 93 + .../moby/moby/daemon/daemon_linux_test.go | 104 + .../moby/moby/daemon/daemon_solaris.go | 533 + .../moby/moby/daemon/daemon_test.go | 306 + .../moby/moby/daemon/daemon_unix.go | 1325 + .../moby/moby/daemon/daemon_unix_test.go | 318 + .../moby/moby/daemon/daemon_unsupported.go | 5 + .../moby/moby/daemon/daemon_windows.go | 657 + .../moby/moby/daemon/debugtrap_unix.go | 27 + .../moby/moby/daemon/debugtrap_unsupported.go | 7 + .../moby/moby/daemon/debugtrap_windows.go | 46 + vendor/github.com/moby/moby/daemon/delete.go | 171 + .../moby/moby/daemon/delete_test.go | 96 + .../github.com/moby/moby/daemon/dependency.go | 17 + .../moby/moby/daemon/discovery/discovery.go | 202 + .../moby/daemon/discovery/discovery_test.go | 111 + .../github.com/moby/moby/daemon/disk_usage.go | 128 + vendor/github.com/moby/moby/daemon/errors.go | 53 + vendor/github.com/moby/moby/daemon/events.go | 332 + .../moby/moby/daemon/events/events.go | 165 + .../moby/moby/daemon/events/events_test.go | 275 + .../moby/moby/daemon/events/filter.go | 134 + .../moby/moby/daemon/events/metrics.go | 15 + .../moby/daemon/events/testutils/testutils.go | 76 + .../moby/moby/daemon/events_test.go | 90 + vendor/github.com/moby/moby/daemon/exec.go | 299 + .../github.com/moby/moby/daemon/exec/exec.go | 118 + .../github.com/moby/moby/daemon/exec_linux.go | 50 + .../moby/moby/daemon/exec_solaris.go | 11 + .../moby/moby/daemon/exec_windows.go | 16 + vendor/github.com/moby/moby/daemon/export.go | 59 + .../moby/moby/daemon/getsize_unix.go | 43 + .../moby/moby/daemon/graphdriver/aufs/aufs.go | 649 + .../moby/daemon/graphdriver/aufs/aufs_test.go | 802 + .../moby/moby/daemon/graphdriver/aufs/dirs.go | 64 + .../moby/daemon/graphdriver/aufs/mount.go | 21 + .../daemon/graphdriver/aufs/mount_linux.go | 7 + .../graphdriver/aufs/mount_unsupported.go | 12 + .../moby/daemon/graphdriver/btrfs/btrfs.go | 671 + .../daemon/graphdriver/btrfs/btrfs_test.go | 63 + .../graphdriver/btrfs/dummy_unsupported.go | 3 + .../moby/daemon/graphdriver/btrfs/version.go | 26 + .../daemon/graphdriver/btrfs/version_none.go | 14 + .../daemon/graphdriver/btrfs/version_test.go | 13 + .../moby/moby/daemon/graphdriver/counter.go | 59 + .../daemon/graphdriver/devmapper/README.md | 98 + .../graphdriver/devmapper/device_setup.go | 247 + .../daemon/graphdriver/devmapper/deviceset.go | 2813 ++ .../graphdriver/devmapper/devmapper_doc.go | 106 + .../graphdriver/devmapper/devmapper_test.go | 152 + .../daemon/graphdriver/devmapper/driver.go | 241 + .../daemon/graphdriver/devmapper/mount.go | 89 + .../moby/moby/daemon/graphdriver/driver.go | 287 + .../moby/daemon/graphdriver/driver_freebsd.go | 19 + .../moby/daemon/graphdriver/driver_linux.go | 135 + .../moby/daemon/graphdriver/driver_solaris.go | 97 + .../daemon/graphdriver/driver_unsupported.go | 15 + .../moby/daemon/graphdriver/driver_windows.go | 14 + .../moby/moby/daemon/graphdriver/fsdiff.go | 169 + .../graphdriver/graphtest/graphbench_unix.go | 259 + .../graphdriver/graphtest/graphtest_unix.go | 336 + .../graphtest/graphtest_windows.go | 1 + .../daemon/graphdriver/graphtest/testutil.go | 342 + .../graphdriver/graphtest/testutil_unix.go | 70 + .../moby/moby/daemon/graphdriver/lcow/lcow.go | 929 + .../moby/daemon/graphdriver/overlay/copy.go | 175 + .../daemon/graphdriver/overlay/overlay.go | 469 + .../graphdriver/overlay/overlay_test.go | 93 + .../overlay/overlay_unsupported.go | 3 + .../moby/daemon/graphdriver/overlay2/check.go | 79 + .../moby/daemon/graphdriver/overlay2/mount.go | 88 + .../daemon/graphdriver/overlay2/overlay.go | 724 + .../graphdriver/overlay2/overlay_test.go | 121 + .../overlay2/overlay_unsupported.go | 3 + .../daemon/graphdriver/overlay2/randomid.go | 81 + .../graphdriver/overlayutils/overlayutils.go | 18 + .../moby/moby/daemon/graphdriver/plugin.go | 43 + .../moby/moby/daemon/graphdriver/proxy.go | 263 + .../daemon/graphdriver/quota/projectquota.go | 340 + .../graphdriver/register/register_aufs.go | 8 + .../graphdriver/register/register_btrfs.go | 8 + .../register/register_devicemapper.go | 8 + .../graphdriver/register/register_overlay.go | 9 + .../graphdriver/register/register_vfs.go | 6 + .../graphdriver/register/register_windows.go | 7 + .../graphdriver/register/register_zfs.go | 8 + .../moby/daemon/graphdriver/vfs/driver.go | 134 + .../moby/daemon/graphdriver/vfs/vfs_test.go | 37 + .../daemon/graphdriver/windows/windows.go | 960 + .../moby/daemon/graphdriver/zfs/MAINTAINERS | 2 + .../moby/moby/daemon/graphdriver/zfs/zfs.go | 420 + .../daemon/graphdriver/zfs/zfs_freebsd.go | 38 + .../moby/daemon/graphdriver/zfs/zfs_linux.go | 27 + .../daemon/graphdriver/zfs/zfs_solaris.go | 59 + .../moby/daemon/graphdriver/zfs/zfs_test.go | 35 + .../daemon/graphdriver/zfs/zfs_unsupported.go | 11 + vendor/github.com/moby/moby/daemon/health.go | 376 + .../moby/moby/daemon/health_test.go | 154 + vendor/github.com/moby/moby/daemon/image.go | 84 + .../moby/moby/daemon/image_delete.go | 413 + .../moby/moby/daemon/image_exporter.go | 37 + .../moby/moby/daemon/image_history.go | 91 + .../moby/moby/daemon/image_inspect.go | 96 + .../github.com/moby/moby/daemon/image_pull.go | 126 + .../github.com/moby/moby/daemon/image_push.go | 71 + .../github.com/moby/moby/daemon/image_tag.go | 40 + vendor/github.com/moby/moby/daemon/images.go | 359 + vendor/github.com/moby/moby/daemon/import.go | 137 + vendor/github.com/moby/moby/daemon/info.go | 190 + .../github.com/moby/moby/daemon/info_unix.go | 93 + .../moby/moby/daemon/info_unix_test.go | 52 + .../moby/moby/daemon/info_windows.go | 10 + .../moby/daemon/initlayer/setup_solaris.go | 13 + .../moby/moby/daemon/initlayer/setup_unix.go | 69 + .../moby/daemon/initlayer/setup_windows.go | 17 + vendor/github.com/moby/moby/daemon/inspect.go | 274 + .../moby/moby/daemon/inspect_solaris.go | 27 + .../moby/moby/daemon/inspect_unix.go | 75 + .../moby/moby/daemon/inspect_windows.go | 26 + vendor/github.com/moby/moby/daemon/keys.go | 59 + .../moby/moby/daemon/keys_unsupported.go | 8 + vendor/github.com/moby/moby/daemon/kill.go | 177 + vendor/github.com/moby/moby/daemon/links.go | 87 + .../moby/moby/daemon/links/links.go | 141 + .../moby/moby/daemon/links/links_test.go | 213 + vendor/github.com/moby/moby/daemon/list.go | 670 + .../github.com/moby/moby/daemon/list_unix.go | 11 + .../moby/moby/daemon/list_windows.go | 20 + .../moby/moby/daemon/logdrivers_linux.go | 15 + .../moby/moby/daemon/logdrivers_windows.go | 13 + .../moby/moby/daemon/logger/adapter.go | 137 + .../moby/moby/daemon/logger/adapter_test.go | 180 + .../daemon/logger/awslogs/cloudwatchlogs.go | 598 + .../logger/awslogs/cloudwatchlogs_test.go | 1053 + .../logger/awslogs/cwlogsiface_mock_test.go | 92 + .../moby/moby/daemon/logger/copier.go | 135 + .../moby/moby/daemon/logger/copier_test.go | 296 + .../daemon/logger/etwlogs/etwlogs_windows.go | 168 + .../moby/moby/daemon/logger/factory.go | 162 + .../moby/daemon/logger/fluentd/fluentd.go | 250 + .../moby/daemon/logger/gcplogs/gcplogging.go | 244 + .../daemon/logger/gcplogs/gcplogging_linux.go | 31 + .../logger/gcplogs/gcplogging_others.go | 7 + .../moby/moby/daemon/logger/gelf/gelf.go | 209 + .../daemon/logger/gelf/gelf_unsupported.go | 3 + .../moby/daemon/logger/journald/journald.go | 126 + .../daemon/logger/journald/journald_test.go | 23 + .../logger/journald/journald_unsupported.go | 6 + .../moby/moby/daemon/logger/journald/read.go | 423 + .../daemon/logger/journald/read_native.go | 6 + .../logger/journald/read_native_compat.go | 6 + .../logger/journald/read_unsupported.go | 7 + .../daemon/logger/jsonfilelog/jsonfilelog.go | 174 + .../logger/jsonfilelog/jsonfilelog_test.go | 249 + .../jsonfilelog/multireader/multireader.go | 228 + .../multireader/multireader_test.go | 225 + .../moby/daemon/logger/jsonfilelog/read.go | 349 + .../daemon/logger/logentries/logentries.go | 97 + .../moby/moby/daemon/logger/logger.go | 131 + .../moby/moby/daemon/logger/logger_test.go | 21 + .../moby/daemon/logger/loggerutils/log_tag.go | 31 + .../daemon/logger/loggerutils/log_tag_test.go | 47 + .../logger/loggerutils/rotatefilewriter.go | 141 + .../moby/moby/daemon/logger/loginfo.go | 129 + .../moby/moby/daemon/logger/plugin.go | 90 + .../moby/moby/daemon/logger/plugin_unix.go | 20 + .../moby/daemon/logger/plugin_unsupported.go | 12 + .../moby/moby/daemon/logger/proxy.go | 107 + .../moby/moby/daemon/logger/ring.go | 218 + .../moby/moby/daemon/logger/ring_test.go | 299 + .../moby/moby/daemon/logger/splunk/splunk.go | 626 + .../moby/daemon/logger/splunk/splunk_test.go | 1306 + .../logger/splunk/splunkhecmock_test.go | 157 + .../moby/moby/daemon/logger/syslog/syslog.go | 266 + .../moby/daemon/logger/syslog/syslog_test.go | 62 + vendor/github.com/moby/moby/daemon/logs.go | 175 + .../github.com/moby/moby/daemon/logs_test.go | 15 + vendor/github.com/moby/moby/daemon/metrics.go | 174 + .../moby/moby/daemon/metrics_unix.go | 86 + .../moby/moby/daemon/metrics_unsupported.go | 12 + vendor/github.com/moby/moby/daemon/monitor.go | 173 + .../moby/moby/daemon/monitor_linux.go | 19 + .../moby/moby/daemon/monitor_solaris.go | 18 + .../moby/moby/daemon/monitor_windows.go | 46 + vendor/github.com/moby/moby/daemon/mounts.go | 53 + vendor/github.com/moby/moby/daemon/names.go | 111 + vendor/github.com/moby/moby/daemon/network.go | 567 + .../moby/moby/daemon/network/settings.go | 33 + .../github.com/moby/moby/daemon/oci_linux.go | 838 + .../moby/moby/daemon/oci_solaris.go | 187 + .../moby/moby/daemon/oci_windows.go | 207 + vendor/github.com/moby/moby/daemon/pause.go | 49 + vendor/github.com/moby/moby/daemon/prune.go | 474 + vendor/github.com/moby/moby/daemon/reload.go | 312 + .../moby/moby/daemon/reload_test.go | 474 + vendor/github.com/moby/moby/daemon/rename.go | 123 + vendor/github.com/moby/moby/daemon/resize.go | 40 + vendor/github.com/moby/moby/daemon/restart.go | 70 + vendor/github.com/moby/moby/daemon/search.go | 94 + .../moby/moby/daemon/search_test.go | 358 + .../moby/moby/daemon/seccomp_disabled.go | 19 + .../moby/moby/daemon/seccomp_linux.go | 55 + .../moby/moby/daemon/seccomp_unsupported.go | 5 + vendor/github.com/moby/moby/daemon/secrets.go | 23 + .../moby/moby/daemon/secrets_linux.go | 7 + .../moby/moby/daemon/secrets_unsupported.go | 7 + .../moby/moby/daemon/secrets_windows.go | 7 + .../moby/moby/daemon/selinux_linux.go | 17 + .../moby/moby/daemon/selinux_unsupported.go | 13 + vendor/github.com/moby/moby/daemon/start.go | 225 + .../github.com/moby/moby/daemon/start_unix.go | 32 + .../moby/moby/daemon/start_windows.go | 214 + vendor/github.com/moby/moby/daemon/stats.go | 160 + .../moby/moby/daemon/stats/collector.go | 122 + .../moby/daemon/stats/collector_solaris.go | 29 + .../moby/moby/daemon/stats/collector_unix.go | 83 + .../moby/daemon/stats/collector_windows.go | 19 + .../moby/moby/daemon/stats/types.go | 42 + .../moby/moby/daemon/stats_collector.go | 26 + .../github.com/moby/moby/daemon/stats_unix.go | 58 + .../moby/moby/daemon/stats_windows.go | 11 + vendor/github.com/moby/moby/daemon/stop.go | 91 + .../github.com/moby/moby/daemon/top_unix.go | 155 + .../moby/moby/daemon/top_unix_test.go | 79 + .../moby/moby/daemon/top_windows.go | 53 + vendor/github.com/moby/moby/daemon/unpause.go | 38 + vendor/github.com/moby/moby/daemon/update.go | 86 + .../moby/moby/daemon/update_linux.go | 32 + .../moby/moby/daemon/update_solaris.go | 11 + .../moby/moby/daemon/update_windows.go | 13 + vendor/github.com/moby/moby/daemon/volumes.go | 395 + .../moby/moby/daemon/volumes_unit_test.go | 39 + .../moby/moby/daemon/volumes_unix.go | 232 + .../moby/moby/daemon/volumes_unix_test.go | 256 + .../moby/moby/daemon/volumes_windows.go | 48 + vendor/github.com/moby/moby/daemon/wait.go | 22 + vendor/github.com/moby/moby/daemon/workdir.go | 20 + .../moby/moby/distribution/config.go | 252 + .../moby/moby/distribution/errors.go | 159 + .../fixtures/validate_manifest/bad_manifest | 38 + .../validate_manifest/extra_data_manifest | 46 + .../fixtures/validate_manifest/good_manifest | 38 + .../moby/distribution/metadata/metadata.go | 77 + .../distribution/metadata/v1_id_service.go | 51 + .../metadata/v1_id_service_test.go | 84 + .../metadata/v2_metadata_service.go | 241 + .../metadata/v2_metadata_service_test.go | 116 + .../github.com/moby/moby/distribution/pull.go | 198 + .../moby/moby/distribution/pull_v1.go | 368 + .../moby/moby/distribution/pull_v2.go | 920 + .../moby/moby/distribution/pull_v2_test.go | 183 + .../moby/moby/distribution/pull_v2_unix.go | 13 + .../moby/moby/distribution/pull_v2_windows.go | 57 + .../github.com/moby/moby/distribution/push.go | 186 + .../moby/moby/distribution/push_v1.go | 457 + .../moby/moby/distribution/push_v2.go | 691 + .../moby/moby/distribution/push_v2_test.go | 583 + .../moby/moby/distribution/registry.go | 156 + .../moby/distribution/registry_unit_test.go | 172 + .../moby/moby/distribution/utils/progress.go | 44 + .../moby/moby/distribution/xfer/download.go | 469 + .../moby/distribution/xfer/download_test.go | 367 + .../moby/moby/distribution/xfer/transfer.go | 401 + .../moby/distribution/xfer/transfer_test.go | 410 + .../moby/moby/distribution/xfer/upload.go | 174 + .../moby/distribution/xfer/upload_test.go | 134 + .../moby/moby/dockerversion/useragent.go | 76 + .../moby/moby/dockerversion/version_lib.go | 16 + vendor/github.com/moby/moby/docs/api/v1.18.md | 2159 + vendor/github.com/moby/moby/docs/api/v1.19.md | 2241 ++ vendor/github.com/moby/moby/docs/api/v1.20.md | 2394 ++ vendor/github.com/moby/moby/docs/api/v1.21.md | 2981 ++ vendor/github.com/moby/moby/docs/api/v1.22.md | 3319 ++ vendor/github.com/moby/moby/docs/api/v1.23.md | 3436 ++ vendor/github.com/moby/moby/docs/api/v1.24.md | 5348 +++ .../moby/moby/docs/api/version-history.md | 334 + .../moby/docs/static_files/contributors.png | Bin 0 -> 23100 bytes .../static_files/docker-logo-compressed.png | Bin 0 -> 4972 bytes .../docs/static_files/moby-project-logo.png | Bin 0 -> 20458 bytes .../moby/moby/hack/Jenkins/W2L/postbuild.sh | 35 + .../moby/moby/hack/Jenkins/W2L/setup.sh | 309 + .../moby/moby/hack/Jenkins/readme.md | 3 + vendor/github.com/moby/moby/hack/README.md | 60 + vendor/github.com/moby/moby/hack/dind | 33 + .../moby/hack/dockerfile/binaries-commits | 14 + .../moby/hack/dockerfile/install-binaries.sh | 125 + .../moby/moby/hack/generate-authors.sh | 15 + .../moby/moby/hack/generate-swagger-api.sh | 27 + .../hack/integration-cli-on-swarm/README.md | 69 + .../integration-cli-on-swarm/agent/Dockerfile | 6 + .../agent/master/call.go | 132 + .../agent/master/master.go | 65 + .../agent/master/set.go | 28 + .../agent/master/set_test.go | 63 + .../agent/types/types.go | 18 + .../agent/vendor.conf | 2 + .../agent/worker/executor.go | 118 + .../agent/worker/worker.go | 69 + .../integration-cli-on-swarm/host/compose.go | 122 + .../host/dockercmd.go | 64 + .../host/enumerate.go | 55 + .../host/enumerate_test.go | 84 + .../integration-cli-on-swarm/host/host.go | 198 + .../integration-cli-on-swarm/host/volume.go | 88 + vendor/github.com/moby/moby/hack/make.ps1 | 472 + vendor/github.com/moby/moby/hack/make.sh | 294 + vendor/github.com/moby/moby/hack/make/.binary | 39 + .../moby/moby/hack/make/.binary-setup | 9 + .../moby/moby/hack/make/.build-deb/compat | 1 + .../moby/moby/hack/make/.build-deb/control | 29 + .../.build-deb/docker-engine.bash-completion | 1 + .../.build-deb/docker-engine.docker.default | 1 + .../make/.build-deb/docker-engine.docker.init | 1 + .../.build-deb/docker-engine.docker.upstart | 1 + .../make/.build-deb/docker-engine.install | 12 + .../make/.build-deb/docker-engine.manpages | 1 + .../make/.build-deb/docker-engine.postinst | 20 + .../hack/make/.build-deb/docker-engine.udev | 1 + .../moby/moby/hack/make/.build-deb/docs | 1 + .../moby/moby/hack/make/.build-deb/rules | 53 + .../.build-rpm/docker-engine-selinux.spec | 99 + .../hack/make/.build-rpm/docker-engine.spec | 249 + .../moby/moby/hack/make/.detect-daemon-osarch | 73 + .../moby/moby/hack/make/.ensure-emptyfs | 23 + .../moby/moby/hack/make/.go-autogen | 86 + .../moby/moby/hack/make/.go-autogen.ps1 | 91 + .../moby/hack/make/.integration-daemon-setup | 7 + .../moby/hack/make/.integration-daemon-start | 130 + .../moby/hack/make/.integration-daemon-stop | 27 + .../moby/hack/make/.integration-test-helpers | 84 + .../hack/make/.resources-windows/common.rc | 38 + .../.resources-windows/docker.exe.manifest | 18 + .../hack/make/.resources-windows/docker.ico | Bin 0 -> 370070 bytes .../hack/make/.resources-windows/docker.png | Bin 0 -> 658195 bytes .../hack/make/.resources-windows/docker.rc | 3 + .../hack/make/.resources-windows/dockerd.rc | 4 + .../make/.resources-windows/event_messages.mc | 39 + .../hack/make/.resources-windows/resources.go | 18 + .../github.com/moby/moby/hack/make/README.md | 17 + vendor/github.com/moby/moby/hack/make/binary | 10 + .../moby/moby/hack/make/binary-daemon | 9 + .../github.com/moby/moby/hack/make/build-deb | 91 + .../hack/make/build-integration-test-binary | 13 + .../github.com/moby/moby/hack/make/build-rpm | 148 + .../moby/moby/hack/make/clean-apt-repo | 43 + .../moby/moby/hack/make/clean-yum-repo | 20 + vendor/github.com/moby/moby/hack/make/cover | 15 + vendor/github.com/moby/moby/hack/make/cross | 29 + .../github.com/moby/moby/hack/make/dynbinary | 10 + .../moby/moby/hack/make/dynbinary-daemon | 10 + .../moby/hack/make/generate-index-listing | 74 + .../moby/moby/hack/make/install-binary | 8 + .../moby/moby/hack/make/install-binary-daemon | 16 + .../moby/moby/hack/make/release-deb | 163 + .../moby/moby/hack/make/release-rpm | 71 + vendor/github.com/moby/moby/hack/make/run | 44 + .../github.com/moby/moby/hack/make/sign-repos | 65 + .../moby/moby/hack/make/test-docker-py | 20 + .../moby/moby/hack/make/test-integration-cli | 29 + .../moby/hack/make/test-integration-shell | 7 + .../moby/moby/hack/make/test-old-apt-repo | 29 + .../github.com/moby/moby/hack/make/test-unit | 58 + vendor/github.com/moby/moby/hack/make/tgz | 2 + vendor/github.com/moby/moby/hack/make/ubuntu | 190 + .../moby/moby/hack/make/update-apt-repo | 70 + vendor/github.com/moby/moby/hack/make/win | 20 + vendor/github.com/moby/moby/hack/release.sh | 317 + .../moby/moby/hack/validate/.swagger-yamllint | 4 + .../moby/moby/hack/validate/.validate | 30 + vendor/github.com/moby/moby/hack/validate/all | 8 + .../hack/validate/changelog-date-descending | 12 + .../moby/hack/validate/changelog-well-formed | 25 + vendor/github.com/moby/moby/hack/validate/dco | 55 + .../moby/moby/hack/validate/default | 18 + .../moby/moby/hack/validate/default-seccomp | 28 + .../github.com/moby/moby/hack/validate/gofmt | 33 + .../github.com/moby/moby/hack/validate/lint | 31 + .../moby/moby/hack/validate/pkg-imports | 33 + .../moby/moby/hack/validate/swagger | 13 + .../moby/moby/hack/validate/swagger-gen | 29 + .../moby/moby/hack/validate/test-imports | 38 + .../github.com/moby/moby/hack/validate/toml | 31 + .../github.com/moby/moby/hack/validate/vendor | 51 + vendor/github.com/moby/moby/hack/validate/vet | 32 + vendor/github.com/moby/moby/hack/vendor.sh | 15 + .../github.com/moby/moby/image/cache/cache.go | 253 + .../moby/moby/image/cache/compare.go | 63 + .../moby/moby/image/cache/compare_test.go | 126 + vendor/github.com/moby/moby/image/fs.go | 178 + vendor/github.com/moby/moby/image/fs_test.go | 275 + vendor/github.com/moby/moby/image/image.go | 223 + .../github.com/moby/moby/image/image_test.go | 90 + vendor/github.com/moby/moby/image/rootfs.go | 52 + .../github.com/moby/moby/image/spec/v1.1.md | 637 + .../github.com/moby/moby/image/spec/v1.2.md | 696 + vendor/github.com/moby/moby/image/spec/v1.md | 573 + vendor/github.com/moby/moby/image/store.go | 324 + .../github.com/moby/moby/image/store_test.go | 178 + .../moby/moby/image/tarexport/load.go | 431 + .../moby/moby/image/tarexport/save.go | 409 + .../moby/moby/image/tarexport/tarexport.go | 47 + .../github.com/moby/moby/image/v1/imagev1.go | 150 + .../moby/moby/image/v1/imagev1_test.go | 55 + .../moby/integration-cli/benchmark_test.go | 95 + .../moby/moby/integration-cli/check_test.go | 496 + .../moby/integration-cli/checker/checker.go | 46 + .../moby/integration-cli/cli/build/build.go | 82 + .../cli/build/fakecontext/context.go | 124 + .../cli/build/fakegit/fakegit.go | 125 + .../cli/build/fakestorage/fixtures.go | 67 + .../cli/build/fakestorage/storage.go | 176 + .../moby/moby/integration-cli/cli/cli.go | 231 + .../moby/integration-cli/daemon/daemon.go | 815 + .../integration-cli/daemon/daemon_swarm.go | 608 + .../integration-cli/daemon/daemon_unix.go | 36 + .../integration-cli/daemon/daemon_windows.go | 54 + .../integration-cli/daemon_swarm_hack_test.go | 23 + .../integration-cli/docker_api_attach_test.go | 210 + .../integration-cli/docker_api_auth_test.go | 26 + .../integration-cli/docker_api_build_test.go | 535 + .../docker_api_containers_test.go | 1950 + .../integration-cli/docker_api_create_test.go | 171 + .../integration-cli/docker_api_events_test.go | 74 + .../docker_api_exec_resize_test.go | 104 + .../integration-cli/docker_api_exec_test.go | 249 + .../integration-cli/docker_api_images_test.go | 192 + .../integration-cli/docker_api_info_test.go | 76 + .../docker_api_inspect_test.go | 184 + .../docker_api_inspect_unix_test.go | 36 + .../integration-cli/docker_api_logs_test.go | 84 + .../docker_api_network_test.go | 357 + .../integration-cli/docker_api_resize_test.go | 45 + .../docker_api_session_test.go | 49 + .../integration-cli/docker_api_stats_test.go | 310 + .../docker_api_stats_unix_test.go | 42 + .../docker_api_swarm_config_test.go | 118 + .../docker_api_swarm_node_test.go | 128 + .../docker_api_swarm_secret_test.go | 132 + .../docker_api_swarm_service_test.go | 676 + .../integration-cli/docker_api_swarm_test.go | 1045 + .../moby/integration-cli/docker_api_test.go | 122 + .../docker_api_update_unix_test.go | 36 + .../docker_api_version_test.go | 24 + .../docker_api_volumes_test.go | 103 + .../integration-cli/docker_cli_attach_test.go | 176 + .../docker_cli_attach_unix_test.go | 237 + .../docker_cli_authz_plugin_v2_test.go | 168 + .../docker_cli_authz_unix_test.go | 475 + .../integration-cli/docker_cli_build_test.go | 6490 +++ .../docker_cli_build_unix_test.go | 204 + .../docker_cli_by_digest_test.go | 690 + .../integration-cli/docker_cli_commit_test.go | 157 + .../docker_cli_config_create_test.go | 131 + .../docker_cli_config_inspect_test.go | 68 + .../docker_cli_config_ls_test.go | 125 + .../integration-cli/docker_cli_config_test.go | 150 + .../docker_cli_cp_from_container_test.go | 488 + .../integration-cli/docker_cli_cp_test.go | 665 + .../docker_cli_cp_to_container_test.go | 600 + .../docker_cli_cp_to_container_unix_test.go | 81 + .../docker_cli_cp_utils_test.go | 319 + .../integration-cli/docker_cli_create_test.go | 443 + .../docker_cli_create_unix_test.go | 43 + .../docker_cli_daemon_plugins_test.go | 369 + .../integration-cli/docker_cli_daemon_test.go | 3028 ++ .../integration-cli/docker_cli_diff_test.go | 98 + .../integration-cli/docker_cli_events_test.go | 799 + .../docker_cli_events_unix_test.go | 485 + .../integration-cli/docker_cli_exec_test.go | 602 + .../docker_cli_exec_unix_test.go | 93 + .../docker_cli_experimental_test.go | 29 + .../docker_cli_export_import_test.go | 51 + ...cker_cli_external_graphdriver_unix_test.go | 406 + ...er_cli_external_volume_driver_unix_test.go | 633 + .../integration-cli/docker_cli_health_test.go | 164 + .../integration-cli/docker_cli_help_test.go | 319 + .../docker_cli_history_test.go | 119 + .../integration-cli/docker_cli_images_test.go | 366 + .../integration-cli/docker_cli_import_test.go | 143 + .../integration-cli/docker_cli_info_test.go | 239 + .../docker_cli_info_unix_test.go | 15 + .../docker_cli_inspect_test.go | 468 + .../integration-cli/docker_cli_kill_test.go | 138 + .../integration-cli/docker_cli_links_test.go | 236 + .../docker_cli_links_unix_test.go | 26 + .../integration-cli/docker_cli_login_test.go | 30 + .../integration-cli/docker_cli_logout_test.go | 108 + .../docker_cli_logs_bench_test.go | 32 + .../integration-cli/docker_cli_logs_test.go | 307 + .../integration-cli/docker_cli_nat_test.go | 88 + .../docker_cli_netmode_test.go | 94 + .../docker_cli_network_unix_test.go | 1843 + .../docker_cli_oom_killed_test.go | 30 + .../integration-cli/docker_cli_pause_test.go | 78 + .../docker_cli_plugins_logdriver_test.go | 49 + .../docker_cli_plugins_test.go | 515 + .../integration-cli/docker_cli_port_test.go | 319 + .../integration-cli/docker_cli_proxy_test.go | 51 + .../docker_cli_prune_unix_test.go | 292 + .../integration-cli/docker_cli_ps_test.go | 967 + .../docker_cli_pull_local_test.go | 470 + .../integration-cli/docker_cli_pull_test.go | 284 + .../docker_cli_pull_trusted_test.go | 222 + .../integration-cli/docker_cli_push_test.go | 604 + .../docker_cli_registry_user_agent_test.go | 103 + .../integration-cli/docker_cli_rename_test.go | 138 + .../docker_cli_restart_test.go | 309 + .../integration-cli/docker_cli_rm_test.go | 87 + .../integration-cli/docker_cli_rmi_test.go | 338 + .../integration-cli/docker_cli_run_test.go | 4619 +++ .../docker_cli_run_unix_test.go | 1576 + .../docker_cli_save_load_test.go | 385 + .../docker_cli_save_load_unix_test.go | 107 + .../integration-cli/docker_cli_search_test.go | 131 + .../docker_cli_secret_create_test.go | 131 + .../docker_cli_secret_inspect_test.go | 68 + .../docker_cli_secret_ls_test.go | 125 + .../docker_cli_service_create_test.go | 447 + .../docker_cli_service_health_test.go | 134 + .../docker_cli_service_logs_test.go | 387 + .../docker_cli_service_scale_test.go | 57 + .../docker_cli_service_update_test.go | 172 + .../integration-cli/docker_cli_sni_test.go | 44 + .../integration-cli/docker_cli_stack_test.go | 206 + .../integration-cli/docker_cli_start_test.go | 199 + .../integration-cli/docker_cli_stats_test.go | 179 + .../integration-cli/docker_cli_stop_test.go | 17 + .../integration-cli/docker_cli_swarm_test.go | 2230 ++ .../docker_cli_swarm_unix_test.go | 104 + .../integration-cli/docker_cli_tag_test.go | 168 + .../integration-cli/docker_cli_top_test.go | 73 + .../integration-cli/docker_cli_update_test.go | 43 + .../docker_cli_update_unix_test.go | 319 + .../integration-cli/docker_cli_userns_test.go | 99 + .../docker_cli_v2_only_test.go | 120 + .../docker_cli_version_test.go | 58 + .../integration-cli/docker_cli_volume_test.go | 643 + .../integration-cli/docker_cli_wait_test.go | 98 + .../docker_deprecated_api_v124_test.go | 229 + .../docker_deprecated_api_v124_unix_test.go | 31 + .../docker_experimental_network_test.go | 533 + .../docker_hub_pull_suite_test.go | 91 + .../moby/integration-cli/docker_utils_test.go | 502 + .../moby/integration-cli/environment/clean.go | 216 + .../environment/environment.go | 229 + .../integration-cli/environment/protect.go | 12 + .../moby/integration-cli/events_utils_test.go | 206 + .../auth/docker-credential-shell-test | 55 + .../fixtures/credentialspecs/valid.json | 25 + .../fixtures/deploy/default.yaml | 9 + .../fixtures/deploy/remove.yaml | 11 + .../fixtures/deploy/secrets.yaml | 20 + .../fixtures/load/emptyLayer.tar | Bin 0 -> 30720 bytes .../integration-cli/fixtures/load/frozen.go | 182 + .../fixtures/notary/delgkey1.crt | 21 + .../fixtures/notary/delgkey1.key | 27 + .../fixtures/notary/delgkey2.crt | 21 + .../fixtures/notary/delgkey2.key | 27 + .../fixtures/notary/delgkey3.crt | 21 + .../fixtures/notary/delgkey3.key | 27 + .../fixtures/notary/delgkey4.crt | 21 + .../fixtures/notary/delgkey4.key | 27 + .../integration-cli/fixtures/notary/gen.sh | 18 + .../fixtures/notary/localhost.cert | 19 + .../fixtures/notary/localhost.key | 27 + .../fixtures/plugin/basic/basic.go | 34 + .../integration-cli/fixtures/plugin/plugin.go | 34 + .../fixtures/plugin/plugin_linux.go | 157 + .../fixtures/plugin/plugin_unsuported.go | 19 + .../integration-cli/fixtures/secrets/default | 1 + .../fixtures_linux_daemon_test.go | 149 + .../moby/integration-cli/registry/registry.go | 214 + .../integration-cli/registry/registry_mock.go | 66 + .../integration-cli/registry/requirement.go | 12 + .../moby/integration-cli/request/npipe.go | 12 + .../integration-cli/request/npipe_windows.go | 12 + .../moby/integration-cli/request/request.go | 312 + .../requirement/requirement.go | 33 + .../moby/integration-cli/requirements_test.go | 203 + .../integration-cli/requirements_unix_test.go | 115 + .../integration-cli/test_vars_exec_test.go | 8 + .../integration-cli/test_vars_noexec_test.go | 8 + .../test_vars_noseccomp_test.go | 8 + .../integration-cli/test_vars_seccomp_test.go | 8 + .../moby/integration-cli/test_vars_test.go | 11 + .../integration-cli/test_vars_unix_test.go | 14 + .../integration-cli/test_vars_windows_test.go | 15 + .../moby/integration-cli/trust_server_test.go | 336 + .../moby/moby/integration-cli/utils_test.go | 32 + vendor/github.com/moby/moby/layer/empty.go | 65 + .../github.com/moby/moby/layer/empty_test.go | 46 + .../github.com/moby/moby/layer/filestore.go | 355 + .../moby/moby/layer/filestore_test.go | 104 + .../moby/moby/layer/filestore_unix.go | 13 + .../moby/moby/layer/filestore_windows.go | 35 + vendor/github.com/moby/moby/layer/layer.go | 295 + .../github.com/moby/moby/layer/layer_store.go | 753 + .../moby/moby/layer/layer_store_windows.go | 11 + .../github.com/moby/moby/layer/layer_test.go | 772 + .../github.com/moby/moby/layer/layer_unix.go | 9 + .../moby/moby/layer/layer_unix_test.go | 71 + .../moby/moby/layer/layer_windows.go | 34 + .../github.com/moby/moby/layer/migration.go | 256 + .../moby/moby/layer/migration_test.go | 435 + .../github.com/moby/moby/layer/mount_test.go | 239 + .../moby/moby/layer/mounted_layer.go | 99 + vendor/github.com/moby/moby/layer/ro_layer.go | 183 + .../moby/moby/layer/ro_layer_unix.go | 7 + .../moby/moby/layer/ro_layer_windows.go | 16 + .../moby/moby/libcontainerd/client.go | 46 + .../moby/moby/libcontainerd/client_linux.go | 619 + .../moby/moby/libcontainerd/client_solaris.go | 101 + .../moby/moby/libcontainerd/client_unix.go | 141 + .../moby/moby/libcontainerd/client_windows.go | 754 + .../moby/moby/libcontainerd/container.go | 13 + .../moby/moby/libcontainerd/container_unix.go | 246 + .../moby/libcontainerd/container_windows.go | 330 + .../moby/moby/libcontainerd/oom_linux.go | 31 + .../moby/moby/libcontainerd/oom_solaris.go | 5 + .../moby/libcontainerd/pausemonitor_unix.go | 42 + .../moby/moby/libcontainerd/process.go | 18 + .../moby/moby/libcontainerd/process_unix.go | 107 + .../moby/libcontainerd/process_windows.go | 48 + .../moby/moby/libcontainerd/queue_unix.go | 37 + .../moby/libcontainerd/queue_unix_test.go | 33 + .../moby/moby/libcontainerd/remote.go | 20 + .../moby/moby/libcontainerd/remote_unix.go | 565 + .../moby/moby/libcontainerd/remote_windows.go | 36 + .../moby/moby/libcontainerd/types.go | 75 + .../moby/moby/libcontainerd/types_linux.go | 49 + .../moby/moby/libcontainerd/types_solaris.go | 43 + .../moby/moby/libcontainerd/types_windows.go | 79 + .../moby/moby/libcontainerd/utils_linux.go | 63 + .../moby/moby/libcontainerd/utils_solaris.go | 27 + .../moby/moby/libcontainerd/utils_windows.go | 46 + .../moby/libcontainerd/utils_windows_test.go | 13 + .../moby/moby/migrate/v1/migratev1.go | 506 + .../moby/moby/migrate/v1/migratev1_test.go | 442 + vendor/github.com/moby/moby/oci/defaults.go | 221 + .../github.com/moby/moby/oci/devices_linux.go | 86 + .../moby/moby/oci/devices_unsupported.go | 20 + vendor/github.com/moby/moby/oci/namespaces.go | 13 + vendor/github.com/moby/moby/opts/env.go | 46 + vendor/github.com/moby/moby/opts/env_test.go | 42 + vendor/github.com/moby/moby/opts/hosts.go | 165 + .../github.com/moby/moby/opts/hosts_test.go | 181 + .../github.com/moby/moby/opts/hosts_unix.go | 8 + .../moby/moby/opts/hosts_windows.go | 6 + vendor/github.com/moby/moby/opts/ip.go | 47 + vendor/github.com/moby/moby/opts/ip_test.go | 54 + vendor/github.com/moby/moby/opts/opts.go | 346 + vendor/github.com/moby/moby/opts/opts_test.go | 264 + vendor/github.com/moby/moby/opts/opts_unix.go | 6 + .../github.com/moby/moby/opts/opts_windows.go | 56 + .../github.com/moby/moby/opts/quotedstring.go | 37 + .../moby/moby/opts/quotedstring_test.go | 29 + vendor/github.com/moby/moby/opts/runtime.go | 79 + vendor/github.com/moby/moby/opts/ulimit.go | 81 + .../github.com/moby/moby/opts/ulimit_test.go | 42 + vendor/github.com/moby/moby/pkg/README.md | 11 + .../moby/moby/pkg/aaparser/aaparser.go | 89 + .../moby/moby/pkg/aaparser/aaparser_test.go | 73 + .../moby/moby/pkg/archive/README.md | 1 + .../moby/moby/pkg/archive/archive.go | 1219 + .../moby/moby/pkg/archive/archive_linux.go | 92 + .../moby/pkg/archive/archive_linux_test.go | 188 + .../moby/moby/pkg/archive/archive_other.go | 7 + .../moby/moby/pkg/archive/archive_test.go | 1279 + .../moby/moby/pkg/archive/archive_unix.go | 122 + .../moby/pkg/archive/archive_unix_test.go | 250 + .../moby/moby/pkg/archive/archive_windows.go | 79 + .../moby/pkg/archive/archive_windows_test.go | 93 + .../moby/moby/pkg/archive/changes.go | 441 + .../moby/moby/pkg/archive/changes_linux.go | 313 + .../moby/moby/pkg/archive/changes_other.go | 97 + .../moby/pkg/archive/changes_posix_test.go | 132 + .../moby/moby/pkg/archive/changes_test.go | 572 + .../moby/moby/pkg/archive/changes_unix.go | 37 + .../moby/moby/pkg/archive/changes_windows.go | 30 + .../github.com/moby/moby/pkg/archive/copy.go | 461 + .../moby/moby/pkg/archive/copy_unix.go | 11 + .../moby/moby/pkg/archive/copy_unix_test.go | 978 + .../moby/moby/pkg/archive/copy_windows.go | 9 + .../github.com/moby/moby/pkg/archive/diff.go | 256 + .../moby/moby/pkg/archive/diff_test.go | 386 + .../moby/moby/pkg/archive/example_changes.go | 97 + .../moby/moby/pkg/archive/testdata/broken.tar | Bin 0 -> 13824 bytes .../moby/moby/pkg/archive/time_linux.go | 16 + .../moby/moby/pkg/archive/time_unsupported.go | 16 + .../moby/moby/pkg/archive/utils_test.go | 166 + .../moby/moby/pkg/archive/whiteouts.go | 23 + .../github.com/moby/moby/pkg/archive/wrap.go | 59 + .../moby/moby/pkg/archive/wrap_test.go | 98 + .../moby/moby/pkg/authorization/api.go | 88 + .../moby/moby/pkg/authorization/api_test.go | 75 + .../moby/moby/pkg/authorization/authz.go | 186 + .../moby/pkg/authorization/authz_unix_test.go | 282 + .../moby/moby/pkg/authorization/middleware.go | 110 + .../moby/pkg/authorization/middleware_test.go | 53 + .../pkg/authorization/middleware_unix_test.go | 65 + .../moby/moby/pkg/authorization/plugin.go | 118 + .../moby/moby/pkg/authorization/response.go | 203 + .../moby/moby/pkg/broadcaster/unbuffered.go | 49 + .../moby/pkg/broadcaster/unbuffered_test.go | 162 + .../moby/moby/pkg/chrootarchive/archive.go | 70 + .../moby/pkg/chrootarchive/archive_test.go | 412 + .../moby/pkg/chrootarchive/archive_unix.go | 86 + .../moby/pkg/chrootarchive/archive_windows.go | 22 + .../moby/pkg/chrootarchive/chroot_linux.go | 108 + .../moby/pkg/chrootarchive/chroot_unix.go | 12 + .../moby/moby/pkg/chrootarchive/diff.go | 23 + .../moby/moby/pkg/chrootarchive/diff_unix.go | 130 + .../moby/pkg/chrootarchive/diff_windows.go | 45 + .../moby/moby/pkg/chrootarchive/init_unix.go | 28 + .../moby/pkg/chrootarchive/init_windows.go | 4 + .../moby/moby/pkg/devicemapper/devmapper.go | 819 + .../moby/pkg/devicemapper/devmapper_log.go | 121 + .../pkg/devicemapper/devmapper_wrapper.go | 253 + .../devmapper_wrapper_deferred_remove.go | 34 + .../devmapper_wrapper_no_deferred_remove.go | 15 + .../moby/moby/pkg/devicemapper/ioctl.go | 28 + .../moby/moby/pkg/devicemapper/log.go | 11 + .../moby/moby/pkg/directory/directory.go | 26 + .../moby/moby/pkg/directory/directory_test.go | 192 + .../moby/moby/pkg/directory/directory_unix.go | 48 + .../moby/pkg/directory/directory_windows.go | 37 + .../moby/moby/pkg/discovery/README.md | 41 + .../moby/moby/pkg/discovery/backends.go | 107 + .../moby/moby/pkg/discovery/discovery.go | 35 + .../moby/moby/pkg/discovery/discovery_test.go | 137 + .../moby/moby/pkg/discovery/entry.go | 94 + .../moby/moby/pkg/discovery/file/file.go | 107 + .../moby/moby/pkg/discovery/file/file_test.go | 114 + .../moby/moby/pkg/discovery/generator.go | 35 + .../moby/moby/pkg/discovery/generator_test.go | 53 + .../moby/moby/pkg/discovery/kv/kv.go | 192 + .../moby/moby/pkg/discovery/kv/kv_test.go | 324 + .../moby/moby/pkg/discovery/memory/memory.go | 93 + .../moby/pkg/discovery/memory/memory_test.go | 48 + .../moby/moby/pkg/discovery/nodes/nodes.go | 54 + .../moby/pkg/discovery/nodes/nodes_test.go | 51 + .../moby/moby/pkg/filenotify/filenotify.go | 40 + .../moby/moby/pkg/filenotify/fsnotify.go | 18 + .../moby/moby/pkg/filenotify/poller.go | 204 + .../moby/moby/pkg/filenotify/poller_test.go | 119 + .../moby/moby/pkg/fileutils/fileutils.go | 298 + .../moby/pkg/fileutils/fileutils_darwin.go | 27 + .../moby/pkg/fileutils/fileutils_solaris.go | 7 + .../moby/moby/pkg/fileutils/fileutils_test.go | 591 + .../moby/moby/pkg/fileutils/fileutils_unix.go | 22 + .../moby/pkg/fileutils/fileutils_windows.go | 7 + .../moby/moby/pkg/fsutils/fsutils_linux.go | 88 + .../moby/pkg/fsutils/fsutils_linux_test.go | 92 + .../moby/moby/pkg/homedir/homedir_linux.go | 23 + .../moby/moby/pkg/homedir/homedir_others.go | 13 + .../moby/moby/pkg/homedir/homedir_test.go | 24 + .../moby/moby/pkg/homedir/homedir_unix.go | 34 + .../moby/moby/pkg/homedir/homedir_windows.go | 24 + .../moby/moby/pkg/idtools/idtools.go | 279 + .../moby/moby/pkg/idtools/idtools_unix.go | 204 + .../moby/pkg/idtools/idtools_unix_test.go | 253 + .../moby/moby/pkg/idtools/idtools_windows.go | 25 + .../moby/pkg/idtools/usergroupadd_linux.go | 164 + .../pkg/idtools/usergroupadd_unsupported.go | 12 + .../moby/moby/pkg/idtools/utils_unix.go | 32 + .../moby/moby/pkg/ioutils/buffer.go | 51 + .../moby/moby/pkg/ioutils/buffer_test.go | 153 + .../moby/moby/pkg/ioutils/bytespipe.go | 186 + .../moby/moby/pkg/ioutils/bytespipe_test.go | 159 + .../moby/moby/pkg/ioutils/fswriters.go | 162 + .../moby/moby/pkg/ioutils/fswriters_test.go | 132 + .../moby/moby/pkg/ioutils/readers.go | 154 + .../moby/moby/pkg/ioutils/readers_test.go | 94 + .../moby/moby/pkg/ioutils/temp_unix.go | 10 + .../moby/moby/pkg/ioutils/temp_windows.go | 18 + .../moby/moby/pkg/ioutils/writeflusher.go | 92 + .../moby/moby/pkg/ioutils/writers.go | 66 + .../moby/moby/pkg/ioutils/writers_test.go | 65 + .../moby/moby/pkg/jsonlog/jsonlog.go | 42 + .../moby/pkg/jsonlog/jsonlog_marshalling.go | 178 + .../pkg/jsonlog/jsonlog_marshalling_test.go | 34 + .../moby/moby/pkg/jsonlog/jsonlogbytes.go | 122 + .../moby/pkg/jsonlog/jsonlogbytes_test.go | 39 + .../moby/moby/pkg/jsonlog/time_marshalling.go | 27 + .../moby/pkg/jsonlog/time_marshalling_test.go | 47 + .../moby/moby/pkg/jsonmessage/jsonmessage.go | 315 + .../moby/pkg/jsonmessage/jsonmessage_test.go | 281 + .../moby/moby/pkg/listeners/group_unix.go | 34 + .../moby/pkg/listeners/listeners_solaris.go | 43 + .../moby/moby/pkg/listeners/listeners_unix.go | 104 + .../moby/pkg/listeners/listeners_windows.go | 54 + .../github.com/moby/moby/pkg/locker/README.md | 65 + .../github.com/moby/moby/pkg/locker/locker.go | 112 + .../moby/moby/pkg/locker/locker_test.go | 124 + .../moby/moby/pkg/longpath/longpath.go | 26 + .../moby/moby/pkg/longpath/longpath_test.go | 22 + .../moby/moby/pkg/loopback/attach_loopback.go | 137 + .../moby/moby/pkg/loopback/ioctl.go | 54 + .../moby/moby/pkg/loopback/loop_wrapper.go | 52 + .../moby/moby/pkg/loopback/loopback.go | 63 + .../github.com/moby/moby/pkg/mount/flags.go | 149 + .../moby/moby/pkg/mount/flags_freebsd.go | 49 + .../moby/moby/pkg/mount/flags_linux.go | 87 + .../moby/moby/pkg/mount/flags_unsupported.go | 31 + .../github.com/moby/moby/pkg/mount/mount.go | 86 + .../moby/moby/pkg/mount/mount_unix_test.go | 162 + .../moby/moby/pkg/mount/mounter_freebsd.go | 60 + .../moby/moby/pkg/mount/mounter_linux.go | 57 + .../moby/moby/pkg/mount/mounter_linux_test.go | 228 + .../moby/moby/pkg/mount/mounter_solaris.go | 33 + .../moby/pkg/mount/mounter_unsupported.go | 11 + .../moby/moby/pkg/mount/mountinfo.go | 54 + .../moby/moby/pkg/mount/mountinfo_freebsd.go | 41 + .../moby/moby/pkg/mount/mountinfo_linux.go | 95 + .../moby/pkg/mount/mountinfo_linux_test.go | 476 + .../moby/moby/pkg/mount/mountinfo_solaris.go | 37 + .../moby/pkg/mount/mountinfo_unsupported.go | 12 + .../moby/moby/pkg/mount/mountinfo_windows.go | 6 + .../moby/pkg/mount/sharedsubtree_linux.go | 69 + .../pkg/mount/sharedsubtree_linux_test.go | 332 + .../moby/pkg/mount/sharedsubtree_solaris.go | 58 + .../cmd/names-generator/main.go | 11 + .../pkg/namesgenerator/names-generator.go | 606 + .../namesgenerator/names-generator_test.go | 27 + .../moby/moby/pkg/parsers/kernel/kernel.go | 74 + .../moby/pkg/parsers/kernel/kernel_darwin.go | 56 + .../moby/pkg/parsers/kernel/kernel_unix.go | 45 + .../pkg/parsers/kernel/kernel_unix_test.go | 96 + .../moby/pkg/parsers/kernel/kernel_windows.go | 70 + .../moby/pkg/parsers/kernel/uname_linux.go | 17 + .../moby/pkg/parsers/kernel/uname_solaris.go | 14 + .../pkg/parsers/kernel/uname_unsupported.go | 18 + .../operatingsystem/operatingsystem_linux.go | 77 + .../operatingsystem_solaris.go | 37 + .../operatingsystem/operatingsystem_unix.go | 25 + .../operatingsystem_unix_test.go | 247 + .../operatingsystem_windows.go | 50 + .../moby/moby/pkg/parsers/parsers.go | 69 + .../moby/moby/pkg/parsers/parsers_test.go | 70 + .../moby/moby/pkg/pidfile/pidfile.go | 53 + .../moby/moby/pkg/pidfile/pidfile_darwin.go | 14 + .../moby/moby/pkg/pidfile/pidfile_test.go | 38 + .../moby/moby/pkg/pidfile/pidfile_unix.go | 16 + .../moby/moby/pkg/pidfile/pidfile_windows.go | 25 + .../moby/pkg/platform/architecture_linux.go | 16 + .../moby/pkg/platform/architecture_unix.go | 20 + .../moby/pkg/platform/architecture_windows.go | 60 + .../moby/moby/pkg/platform/platform.go | 23 + .../moby/moby/pkg/platform/utsname_int8.go | 18 + .../moby/pkg/platform/utsname_int8_test.go | 16 + .../moby/moby/pkg/platform/utsname_uint8.go | 18 + .../moby/pkg/platform/utsname_uint8_test.go | 16 + .../moby/moby/pkg/plugingetter/getter.go | 35 + .../moby/moby/pkg/plugins/client.go | 205 + .../moby/moby/pkg/plugins/client_test.go | 234 + .../moby/moby/pkg/plugins/discovery.go | 131 + .../moby/moby/pkg/plugins/discovery_test.go | 152 + .../moby/moby/pkg/plugins/discovery_unix.go | 5 + .../moby/pkg/plugins/discovery_unix_test.go | 100 + .../moby/pkg/plugins/discovery_windows.go | 8 + .../moby/moby/pkg/plugins/errors.go | 33 + .../moby/moby/pkg/plugins/plugin_test.go | 156 + .../moby/pkg/plugins/pluginrpc-gen/README.md | 58 + .../pkg/plugins/pluginrpc-gen/fixtures/foo.go | 89 + .../fixtures/otherfixture/spaceship.go | 4 + .../moby/pkg/plugins/pluginrpc-gen/main.go | 91 + .../moby/pkg/plugins/pluginrpc-gen/parser.go | 263 + .../pkg/plugins/pluginrpc-gen/parser_test.go | 222 + .../pkg/plugins/pluginrpc-gen/template.go | 118 + .../moby/moby/pkg/plugins/plugins.go | 329 + .../moby/moby/pkg/plugins/plugins_unix.go | 9 + .../moby/moby/pkg/plugins/plugins_windows.go | 8 + .../moby/moby/pkg/plugins/transport/http.go | 36 + .../moby/pkg/plugins/transport/http_test.go | 20 + .../moby/pkg/plugins/transport/transport.go | 36 + .../github.com/moby/moby/pkg/pools/pools.go | 137 + .../moby/moby/pkg/pools/pools_test.go | 166 + .../moby/moby/pkg/progress/progress.go | 89 + .../moby/moby/pkg/progress/progressreader.go | 66 + .../moby/pkg/progress/progressreader_test.go | 75 + .../moby/moby/pkg/promise/promise.go | 11 + .../moby/moby/pkg/promise/promise_test.go | 25 + .../moby/moby/pkg/pubsub/publisher.go | 121 + .../moby/moby/pkg/pubsub/publisher_test.go | 142 + .../github.com/moby/moby/pkg/reexec/README.md | 5 + .../moby/moby/pkg/reexec/command_linux.go | 30 + .../moby/moby/pkg/reexec/command_unix.go | 23 + .../moby/pkg/reexec/command_unsupported.go | 12 + .../moby/moby/pkg/reexec/command_windows.go | 23 + .../github.com/moby/moby/pkg/reexec/reexec.go | 47 + .../moby/moby/pkg/reexec/reexec_test.go | 53 + .../github.com/moby/moby/pkg/signal/README.md | 1 + .../github.com/moby/moby/pkg/signal/signal.go | 54 + .../moby/moby/pkg/signal/signal_darwin.go | 41 + .../moby/moby/pkg/signal/signal_freebsd.go | 43 + .../moby/moby/pkg/signal/signal_linux.go | 82 + .../moby/moby/pkg/signal/signal_linux_test.go | 58 + .../moby/moby/pkg/signal/signal_solaris.go | 42 + .../moby/moby/pkg/signal/signal_test.go | 33 + .../moby/moby/pkg/signal/signal_unix.go | 21 + .../moby/pkg/signal/signal_unsupported.go | 10 + .../moby/moby/pkg/signal/signal_windows.go | 28 + .../github.com/moby/moby/pkg/signal/trap.go | 103 + .../moby/moby/pkg/stdcopy/stdcopy.go | 190 + .../moby/moby/pkg/stdcopy/stdcopy_test.go | 289 + .../pkg/streamformatter/streamformatter.go | 159 + .../streamformatter/streamformatter_test.go | 109 + .../moby/pkg/streamformatter/streamwriter.go | 47 + .../pkg/streamformatter/streamwriter_test.go | 35 + .../moby/moby/pkg/stringid/README.md | 1 + .../moby/moby/pkg/stringid/stringid.go | 99 + .../moby/moby/pkg/stringid/stringid_test.go | 72 + .../moby/moby/pkg/stringutils/README.md | 1 + .../moby/moby/pkg/stringutils/stringutils.go | 99 + .../moby/pkg/stringutils/stringutils_test.go | 121 + .../moby/moby/pkg/symlink/LICENSE.APACHE | 191 + .../moby/moby/pkg/symlink/LICENSE.BSD | 27 + .../moby/moby/pkg/symlink/README.md | 6 + vendor/github.com/moby/moby/pkg/symlink/fs.go | 144 + .../moby/moby/pkg/symlink/fs_unix.go | 15 + .../moby/moby/pkg/symlink/fs_unix_test.go | 407 + .../moby/moby/pkg/symlink/fs_windows.go | 169 + .../moby/moby/pkg/sysinfo/README.md | 1 + .../moby/moby/pkg/sysinfo/numcpu.go | 12 + .../moby/moby/pkg/sysinfo/numcpu_linux.go | 44 + .../moby/moby/pkg/sysinfo/numcpu_windows.go | 37 + .../moby/moby/pkg/sysinfo/sysinfo.go | 144 + .../moby/moby/pkg/sysinfo/sysinfo_linux.go | 254 + .../moby/pkg/sysinfo/sysinfo_linux_test.go | 104 + .../moby/moby/pkg/sysinfo/sysinfo_solaris.go | 121 + .../moby/moby/pkg/sysinfo/sysinfo_test.go | 26 + .../moby/moby/pkg/sysinfo/sysinfo_unix.go | 9 + .../moby/moby/pkg/sysinfo/sysinfo_windows.go | 9 + .../moby/moby/pkg/system/chtimes.go | 35 + .../moby/moby/pkg/system/chtimes_test.go | 94 + .../moby/moby/pkg/system/chtimes_unix.go | 14 + .../moby/moby/pkg/system/chtimes_unix_test.go | 91 + .../moby/moby/pkg/system/chtimes_windows.go | 28 + .../moby/pkg/system/chtimes_windows_test.go | 86 + .../github.com/moby/moby/pkg/system/errors.go | 10 + .../moby/moby/pkg/system/events_windows.go | 85 + .../moby/moby/pkg/system/exitcode.go | 33 + .../moby/moby/pkg/system/filesys.go | 67 + .../moby/moby/pkg/system/filesys_windows.go | 298 + .../github.com/moby/moby/pkg/system/init.go | 22 + .../moby/moby/pkg/system/init_windows.go | 17 + .../moby/moby/pkg/system/lcow_unix.go | 8 + .../moby/moby/pkg/system/lcow_windows.go | 6 + .../moby/moby/pkg/system/lstat_unix.go | 19 + .../moby/moby/pkg/system/lstat_unix_test.go | 30 + .../moby/moby/pkg/system/lstat_windows.go | 14 + .../moby/moby/pkg/system/meminfo.go | 17 + .../moby/moby/pkg/system/meminfo_linux.go | 65 + .../moby/moby/pkg/system/meminfo_solaris.go | 129 + .../moby/moby/pkg/system/meminfo_unix_test.go | 40 + .../moby/pkg/system/meminfo_unsupported.go | 8 + .../moby/moby/pkg/system/meminfo_windows.go | 45 + .../github.com/moby/moby/pkg/system/mknod.go | 22 + .../moby/moby/pkg/system/mknod_windows.go | 13 + .../github.com/moby/moby/pkg/system/path.go | 21 + .../moby/moby/pkg/system/path_unix.go | 9 + .../moby/moby/pkg/system/path_windows.go | 33 + .../moby/moby/pkg/system/path_windows_test.go | 78 + .../moby/moby/pkg/system/process_unix.go | 24 + vendor/github.com/moby/moby/pkg/system/rm.go | 80 + .../moby/moby/pkg/system/rm_test.go | 84 + .../moby/moby/pkg/system/stat_darwin.go | 13 + .../moby/moby/pkg/system/stat_freebsd.go | 13 + .../moby/moby/pkg/system/stat_linux.go | 19 + .../moby/moby/pkg/system/stat_openbsd.go | 13 + .../moby/moby/pkg/system/stat_solaris.go | 13 + .../moby/moby/pkg/system/stat_unix.go | 60 + .../moby/moby/pkg/system/stat_unix_test.go | 39 + .../moby/moby/pkg/system/stat_windows.go | 49 + .../moby/moby/pkg/system/syscall_unix.go | 17 + .../moby/moby/pkg/system/syscall_windows.go | 122 + .../moby/pkg/system/syscall_windows_test.go | 9 + .../github.com/moby/moby/pkg/system/umask.go | 13 + .../moby/moby/pkg/system/umask_windows.go | 9 + .../moby/moby/pkg/system/utimes_freebsd.go | 24 + .../moby/moby/pkg/system/utimes_linux.go | 25 + .../moby/moby/pkg/system/utimes_unix_test.go | 68 + .../moby/pkg/system/utimes_unsupported.go | 10 + .../moby/moby/pkg/system/xattrs_linux.go | 29 + .../moby/pkg/system/xattrs_unsupported.go | 13 + .../moby/moby/pkg/tailfile/tailfile.go | 66 + .../moby/moby/pkg/tailfile/tailfile_test.go | 148 + .../moby/moby/pkg/tarsum/builder_context.go | 21 + .../moby/pkg/tarsum/builder_context_test.go | 67 + .../moby/moby/pkg/tarsum/fileinfosums.go | 126 + .../moby/moby/pkg/tarsum/fileinfosums_test.go | 62 + .../github.com/moby/moby/pkg/tarsum/tarsum.go | 295 + .../moby/moby/pkg/tarsum/tarsum_spec.md | 230 + .../moby/moby/pkg/tarsum/tarsum_test.go | 664 + .../json | 1 + .../layer.tar | Bin 0 -> 9216 bytes .../json | 1 + .../layer.tar | Bin 0 -> 1536 bytes .../tarsum/testdata/collision/collision-0.tar | Bin 0 -> 10240 bytes .../tarsum/testdata/collision/collision-1.tar | Bin 0 -> 10240 bytes .../tarsum/testdata/collision/collision-2.tar | Bin 0 -> 10240 bytes .../tarsum/testdata/collision/collision-3.tar | Bin 0 -> 10240 bytes .../moby/moby/pkg/tarsum/testdata/xattr/json | 1 + .../moby/pkg/tarsum/testdata/xattr/layer.tar | Bin 0 -> 2560 bytes .../moby/moby/pkg/tarsum/versioning.go | 158 + .../moby/moby/pkg/tarsum/versioning_test.go | 98 + .../moby/moby/pkg/tarsum/writercloser.go | 22 + .../moby/moby/pkg/templates/templates.go | 78 + .../moby/moby/pkg/templates/templates_test.go | 88 + vendor/github.com/moby/moby/pkg/term/ascii.go | 66 + .../moby/moby/pkg/term/ascii_test.go | 43 + vendor/github.com/moby/moby/pkg/term/proxy.go | 74 + .../moby/moby/pkg/term/proxy_test.go | 92 + vendor/github.com/moby/moby/pkg/term/tc.go | 21 + .../moby/moby/pkg/term/tc_solaris_cgo.go | 65 + vendor/github.com/moby/moby/pkg/term/term.go | 124 + .../moby/moby/pkg/term/term_linux_test.go | 120 + .../moby/moby/pkg/term/term_windows.go | 237 + .../moby/moby/pkg/term/termios_bsd.go | 42 + .../moby/moby/pkg/term/termios_linux.go | 37 + .../moby/moby/pkg/term/windows/ansi_reader.go | 263 + .../moby/moby/pkg/term/windows/ansi_writer.go | 64 + .../moby/moby/pkg/term/windows/console.go | 35 + .../moby/moby/pkg/term/windows/windows.go | 33 + .../moby/pkg/term/windows/windows_test.go | 3 + .../github.com/moby/moby/pkg/term/winsize.go | 30 + .../moby/moby/pkg/term/winsize_solaris_cgo.go | 42 + .../moby/moby/pkg/testutil/cmd/command.go | 307 + .../moby/pkg/testutil/cmd/command_test.go | 118 + .../moby/moby/pkg/testutil/golden/golden.go | 28 + .../moby/moby/pkg/testutil/helpers.go | 33 + .../github.com/moby/moby/pkg/testutil/pkg.go | 1 + .../moby/pkg/testutil/tempfile/tempfile.go | 56 + .../moby/moby/pkg/testutil/utils.go | 218 + .../moby/moby/pkg/testutil/utils_test.go | 341 + .../moby/pkg/tlsconfig/tlsconfig_clone.go | 11 + .../pkg/tlsconfig/tlsconfig_clone_go17.go | 33 + .../moby/moby/pkg/truncindex/truncindex.go | 139 + .../moby/pkg/truncindex/truncindex_test.go | 453 + .../moby/moby/pkg/urlutil/urlutil.go | 44 + .../moby/moby/pkg/urlutil/urlutil_test.go | 56 + .../moby/moby/pkg/useragent/README.md | 1 + .../moby/moby/pkg/useragent/useragent.go | 55 + .../moby/moby/pkg/useragent/useragent_test.go | 31 + .../moby/moby/plugin/backend_linux.go | 853 + .../moby/moby/plugin/backend_unsupported.go | 72 + .../github.com/moby/moby/plugin/blobstore.go | 184 + vendor/github.com/moby/moby/plugin/defs.go | 37 + vendor/github.com/moby/moby/plugin/events.go | 111 + vendor/github.com/moby/moby/plugin/manager.go | 411 + .../moby/moby/plugin/manager_linux.go | 323 + .../moby/moby/plugin/manager_solaris.go | 28 + .../moby/moby/plugin/manager_test.go | 55 + .../moby/moby/plugin/manager_windows.go | 30 + vendor/github.com/moby/moby/plugin/store.go | 270 + .../github.com/moby/moby/plugin/store_test.go | 33 + .../github.com/moby/moby/plugin/v2/plugin.go | 246 + .../moby/moby/plugin/v2/plugin_linux.go | 132 + .../moby/moby/plugin/v2/plugin_unsupported.go | 14 + .../moby/moby/plugin/v2/settable.go | 102 + .../moby/moby/plugin/v2/settable_test.go | 91 + vendor/github.com/moby/moby/poule.yml | 131 + .../moby/moby/profiles/apparmor/apparmor.go | 114 + .../moby/moby/profiles/apparmor/template.go | 46 + .../moby/moby/profiles/seccomp/default.json | 750 + .../profiles/seccomp/fixtures/example.json | 27 + .../moby/moby/profiles/seccomp/generate.go | 32 + .../moby/moby/profiles/seccomp/seccomp.go | 150 + .../moby/profiles/seccomp/seccomp_default.go | 639 + .../moby/profiles/seccomp/seccomp_test.go | 32 + .../profiles/seccomp/seccomp_unsupported.go | 13 + vendor/github.com/moby/moby/project/ARM.md | 45 + .../moby/moby/project/BRANCHES-AND-TAGS.md | 35 + .../moby/moby/project/CONTRIBUTING.md | 1 + .../moby/moby/project/GOVERNANCE.md | 17 + .../moby/moby/project/IRC-ADMINISTRATION.md | 37 + .../moby/moby/project/ISSUE-TRIAGE.md | 132 + .../moby/project/PACKAGE-REPO-MAINTENANCE.md | 74 + .../github.com/moby/moby/project/PACKAGERS.md | 307 + .../moby/moby/project/PATCH-RELEASES.md | 68 + .../moby/moby/project/PRINCIPLES.md | 19 + vendor/github.com/moby/moby/project/README.md | 24 + .../moby/moby/project/RELEASE-CHECKLIST.md | 519 + .../moby/moby/project/RELEASE-PROCESS.md | 78 + .../github.com/moby/moby/project/REVIEWING.md | 246 + vendor/github.com/moby/moby/project/TOOLS.md | 63 + .../github.com/moby/moby/reference/store.go | 343 + .../moby/moby/reference/store_test.go | 358 + vendor/github.com/moby/moby/registry/auth.go | 303 + .../moby/moby/registry/auth_test.go | 124 + .../github.com/moby/moby/registry/config.go | 456 + .../moby/moby/registry/config_test.go | 260 + .../moby/moby/registry/config_unix.go | 25 + .../moby/moby/registry/config_windows.go | 25 + .../moby/moby/registry/endpoint_test.go | 78 + .../moby/moby/registry/endpoint_v1.go | 198 + .../github.com/moby/moby/registry/registry.go | 191 + .../moby/moby/registry/registry_mock_test.go | 478 + .../moby/moby/registry/registry_test.go | 917 + .../resumable/resumablerequestreader.go | 96 + .../resumable/resumablerequestreader_test.go | 256 + .../github.com/moby/moby/registry/service.go | 327 + .../moby/moby/registry/service_v1.go | 40 + .../moby/moby/registry/service_v1_test.go | 23 + .../moby/moby/registry/service_v2.go | 82 + .../github.com/moby/moby/registry/session.go | 778 + vendor/github.com/moby/moby/registry/types.go | 70 + .../moby/moby/reports/2017-05-01.md | 35 + .../moby/moby/reports/2017-05-08.md | 34 + .../moby/moby/reports/2017-05-15.md | 52 + .../moby/moby/reports/2017-06-05.md | 36 + .../moby/moby/reports/2017-06-12.md | 78 + .../moby/moby/reports/2017-06-26.md | 120 + .../moby/moby/reports/builder/2017-05-01.md | 47 + .../moby/moby/reports/builder/2017-05-08.md | 57 + .../moby/moby/reports/builder/2017-05-15.md | 64 + .../moby/moby/reports/builder/2017-05-22.md | 47 + .../moby/moby/reports/builder/2017-05-29.md | 52 + .../moby/moby/reports/builder/2017-06-05.md | 58 + .../moby/moby/reports/builder/2017-06-12.md | 58 + .../moby/moby/reports/builder/2017-06-26.md | 78 + .../moby/moby/reports/builder/2017-07-10.md | 65 + .../moby/restartmanager/restartmanager.go | 133 + .../restartmanager/restartmanager_test.go | 36 + .../github.com/moby/moby/runconfig/config.go | 108 + .../moby/moby/runconfig/config_test.go | 139 + .../moby/moby/runconfig/config_unix.go | 59 + .../moby/moby/runconfig/config_windows.go | 19 + .../github.com/moby/moby/runconfig/errors.go | 38 + .../fixtures/unix/container_config_1_14.json | 30 + .../fixtures/unix/container_config_1_17.json | 50 + .../fixtures/unix/container_config_1_19.json | 58 + .../unix/container_hostconfig_1_14.json | 18 + .../unix/container_hostconfig_1_19.json | 30 + .../windows/container_config_1_19.json | 58 + .../moby/moby/runconfig/hostconfig.go | 80 + .../moby/moby/runconfig/hostconfig_solaris.go | 46 + .../moby/moby/runconfig/hostconfig_test.go | 283 + .../moby/moby/runconfig/hostconfig_unix.go | 110 + .../moby/moby/runconfig/hostconfig_windows.go | 96 + .../moby/runconfig/hostconfig_windows_test.go | 17 + .../moby/moby/runconfig/opts/parse.go | 20 + vendor/github.com/moby/moby/vendor.conf | 146 + .../moby/moby/volume/drivers/adapter.go | 184 + .../moby/moby/volume/drivers/extpoint.go | 217 + .../moby/moby/volume/drivers/extpoint_test.go | 23 + .../moby/moby/volume/drivers/proxy.go | 242 + .../moby/moby/volume/drivers/proxy_test.go | 132 + .../moby/moby/volume/local/local.go | 387 + .../moby/moby/volume/local/local_test.go | 345 + .../moby/moby/volume/local/local_unix.go | 99 + .../moby/moby/volume/local/local_windows.go | 46 + .../github.com/moby/moby/volume/store/db.go | 88 + .../moby/moby/volume/store/errors.go | 76 + .../moby/moby/volume/store/restore.go | 83 + .../moby/moby/volume/store/store.go | 669 + .../moby/moby/volume/store/store_test.go | 234 + .../moby/moby/volume/store/store_unix.go | 9 + .../moby/moby/volume/store/store_windows.go | 12 + .../moby/moby/volume/testutils/testutils.go | 123 + .../github.com/moby/moby/volume/validate.go | 140 + .../moby/moby/volume/validate_test.go | 43 + .../moby/moby/volume/validate_test_unix.go | 8 + .../moby/moby/volume/validate_test_windows.go | 6 + vendor/github.com/moby/moby/volume/volume.go | 374 + .../moby/moby/volume/volume_copy.go | 23 + .../moby/moby/volume/volume_copy_unix.go | 8 + .../moby/moby/volume/volume_copy_windows.go | 6 + .../moby/moby/volume/volume_linux.go | 56 + .../moby/moby/volume/volume_linux_test.go | 51 + .../moby/volume/volume_propagation_linux.go | 47 + .../volume/volume_propagation_linux_test.go | 65 + .../volume/volume_propagation_unsupported.go | 24 + .../moby/moby/volume/volume_test.go | 269 + .../moby/moby/volume/volume_unix.go | 148 + .../moby/moby/volume/volume_unsupported.go | 16 + .../moby/moby/volume/volume_windows.go | 201 + vendor/github.com/onsi/gomega/.gitignore | 5 + vendor/github.com/onsi/gomega/.travis.yml | 12 + vendor/github.com/onsi/gomega/CHANGELOG.md | 74 + vendor/github.com/onsi/gomega/CONTRIBUTING.md | 11 + vendor/github.com/onsi/gomega/LICENSE | 20 + vendor/github.com/onsi/gomega/README.md | 21 + .../github.com/onsi/gomega/format/format.go | 379 + .../onsi/gomega/format/format_suite_test.go | 13 + .../onsi/gomega/format/format_test.go | 590 + .../github.com/onsi/gomega/gbytes/buffer.go | 245 + .../onsi/gomega/gbytes/buffer_test.go | 205 + .../onsi/gomega/gbytes/gbuffer_suite_test.go | 13 + .../onsi/gomega/gbytes/io_wrappers.go | 85 + .../onsi/gomega/gbytes/io_wrappers_test.go | 188 + .../onsi/gomega/gbytes/say_matcher.go | 105 + .../onsi/gomega/gbytes/say_matcher_test.go | 163 + .../gomega/gexec/_fixture/firefly/main.go | 36 + vendor/github.com/onsi/gomega/gexec/build.go | 99 + .../onsi/gomega/gexec/build_test.go | 59 + .../onsi/gomega/gexec/exit_matcher.go | 86 + .../onsi/gomega/gexec/exit_matcher_test.go | 113 + .../onsi/gomega/gexec/gexec_suite_test.go | 26 + .../onsi/gomega/gexec/prefixed_writer.go | 53 + .../onsi/gomega/gexec/prefixed_writer_test.go | 43 + .../github.com/onsi/gomega/gexec/session.go | 305 + .../onsi/gomega/gexec/session_test.go | 351 + .../github.com/onsi/gomega/ghttp/handlers.go | 313 + .../onsi/gomega/ghttp/protobuf/protobuf.go | 3 + .../ghttp/protobuf/simple_message.pb.go | 55 + .../ghttp/protobuf/simple_message.proto | 9 + .../onsi/gomega/ghttp/test_server.go | 381 + .../gomega/ghttp/test_server_suite_test.go | 13 + .../onsi/gomega/ghttp/test_server_test.go | 1089 + vendor/github.com/onsi/gomega/gomega_dsl.go | 335 + .../onsi/gomega/gstruct/elements.go | 145 + .../onsi/gomega/gstruct/elements_test.go | 144 + .../gomega/gstruct/errors/nested_types.go | 72 + .../github.com/onsi/gomega/gstruct/fields.go | 141 + .../onsi/gomega/gstruct/fields_test.go | 76 + .../gstruct/gstruct_tests_suite_test.go | 13 + .../github.com/onsi/gomega/gstruct/ignore.go | 37 + .../onsi/gomega/gstruct/ignore_test.go | 23 + .../github.com/onsi/gomega/gstruct/pointer.go | 56 + .../onsi/gomega/gstruct/pointer_test.go | 33 + .../github.com/onsi/gomega/gstruct/types.go | 15 + .../gomega/internal/assertion/assertion.go | 98 + .../assertion/assertion_suite_test.go | 13 + .../internal/assertion/assertion_test.go | 252 + .../asyncassertion/async_assertion.go | 189 + .../async_assertion_suite_test.go | 13 + .../asyncassertion/async_assertion_test.go | 345 + .../internal/fakematcher/fake_matcher.go | 23 + .../internal/oraclematcher/oracle_matcher.go | 25 + .../testingtsupport/testing_t_support.go | 40 + .../testingtsupport/testing_t_support_test.go | 12 + vendor/github.com/onsi/gomega/matchers.go | 427 + vendor/github.com/onsi/gomega/matchers/and.go | 63 + .../onsi/gomega/matchers/and_test.go | 103 + .../matchers/assignable_to_type_of_matcher.go | 31 + .../assignable_to_type_of_matcher_test.go | 30 + .../onsi/gomega/matchers/be_a_directory.go | 54 + .../gomega/matchers/be_a_directory_test.go | 40 + .../onsi/gomega/matchers/be_a_regular_file.go | 54 + .../gomega/matchers/be_a_regular_file_test.go | 40 + .../gomega/matchers/be_an_existing_file.go | 38 + .../matchers/be_an_existing_file_test.go | 40 + .../onsi/gomega/matchers/be_closed_matcher.go | 45 + .../gomega/matchers/be_closed_matcher_test.go | 70 + .../onsi/gomega/matchers/be_empty_matcher.go | 26 + .../gomega/matchers/be_empty_matcher_test.go | 52 + .../matchers/be_equivalent_to_matcher.go | 33 + .../matchers/be_equivalent_to_matcher_test.go | 50 + .../onsi/gomega/matchers/be_false_matcher.go | 25 + .../gomega/matchers/be_false_matcher_test.go | 20 + .../onsi/gomega/matchers/be_identical_to.go | 37 + .../gomega/matchers/be_identical_to_test.go | 61 + .../onsi/gomega/matchers/be_nil_matcher.go | 18 + .../gomega/matchers/be_nil_matcher_test.go | 28 + .../gomega/matchers/be_numerically_matcher.go | 120 + .../matchers/be_numerically_matcher_test.go | 148 + .../onsi/gomega/matchers/be_sent_matcher.go | 71 + .../gomega/matchers/be_sent_matcher_test.go | 106 + .../gomega/matchers/be_temporally_matcher.go | 65 + .../matchers/be_temporally_matcher_test.go | 98 + .../onsi/gomega/matchers/be_true_matcher.go | 25 + .../gomega/matchers/be_true_matcher_test.go | 20 + .../onsi/gomega/matchers/be_zero_matcher.go | 27 + .../gomega/matchers/be_zero_matcher_test.go | 30 + .../onsi/gomega/matchers/consist_of.go | 80 + .../onsi/gomega/matchers/consist_of_test.go | 75 + .../matchers/contain_element_matcher.go | 56 + .../matchers/contain_element_matcher_test.go | 76 + .../matchers/contain_substring_matcher.go | 37 + .../contain_substring_matcher_test.go | 36 + .../onsi/gomega/matchers/equal_matcher.go | 33 + .../gomega/matchers/equal_matcher_test.go | 78 + .../onsi/gomega/matchers/have_cap_matcher.go | 28 + .../gomega/matchers/have_cap_matcher_test.go | 50 + .../onsi/gomega/matchers/have_key_matcher.go | 53 + .../gomega/matchers/have_key_matcher_test.go | 73 + .../matchers/have_key_with_value_matcher.go | 73 + .../have_key_with_value_matcher_test.go | 82 + .../onsi/gomega/matchers/have_len_matcher.go | 27 + .../gomega/matchers/have_len_matcher_test.go | 53 + .../gomega/matchers/have_occurred_matcher.go | 33 + .../matchers/have_occurred_matcher_test.go | 58 + .../gomega/matchers/have_prefix_matcher.go | 35 + .../matchers/have_prefix_matcher_test.go | 36 + .../gomega/matchers/have_suffix_matcher.go | 35 + .../matchers/have_suffix_matcher_test.go | 36 + .../gomega/matchers/match_error_matcher.go | 50 + .../matchers/match_error_matcher_test.go | 93 + .../gomega/matchers/match_json_matcher.go | 135 + .../matchers/match_json_matcher_test.go | 97 + .../gomega/matchers/match_regexp_matcher.go | 42 + .../matchers/match_regexp_matcher_test.go | 44 + .../onsi/gomega/matchers/match_xml_matcher.go | 131 + .../gomega/matchers/match_xml_matcher_test.go | 90 + .../gomega/matchers/match_yaml_matcher.go | 74 + .../matchers/match_yaml_matcher_test.go | 94 + .../matchers/matcher_tests_suite_test.go | 50 + vendor/github.com/onsi/gomega/matchers/not.go | 30 + .../onsi/gomega/matchers/not_test.go | 57 + vendor/github.com/onsi/gomega/matchers/or.go | 67 + .../onsi/gomega/matchers/or_test.go | 85 + .../onsi/gomega/matchers/panic_matcher.go | 46 + .../gomega/matchers/panic_matcher_test.go | 45 + .../onsi/gomega/matchers/receive_matcher.go | 122 + .../gomega/matchers/receive_matcher_test.go | 280 + .../onsi/gomega/matchers/succeed_matcher.go | 33 + .../gomega/matchers/succeed_matcher_test.go | 62 + .../matchers/support/goraph/MIT.LICENSE | 20 + .../goraph/bipartitegraph/bipartitegraph.go | 41 + .../bipartitegraph/bipartitegraphmatching.go | 159 + .../matchers/support/goraph/edge/edge.go | 61 + .../matchers/support/goraph/node/node.go | 7 + .../matchers/support/goraph/util/util.go | 7 + .../matchers/test_data/xml/sample_01.xml | 6 + .../matchers/test_data/xml/sample_02.xml | 9 + .../matchers/test_data/xml/sample_03.xml | 1 + .../matchers/test_data/xml/sample_04.xml | 6 + .../matchers/test_data/xml/sample_05.xml | 211 + .../matchers/test_data/xml/sample_06.xml | 13 + .../matchers/test_data/xml/sample_07.xml | 13 + .../matchers/test_data/xml/sample_08.xml | 13 + .../matchers/test_data/xml/sample_09.xml | 4 + .../matchers/test_data/xml/sample_10.xml | 4 + .../matchers/test_data/xml/sample_11.xml | 7 + .../onsi/gomega/matchers/type_support.go | 173 + .../onsi/gomega/matchers/with_transform.go | 72 + .../gomega/matchers/with_transform_test.go | 102 + vendor/github.com/onsi/gomega/types/types.go | 17 + vendor/github.com/urfave/cli/.flake8 | 2 + vendor/github.com/urfave/cli/.gitignore | 2 + vendor/github.com/urfave/cli/.travis.yml | 38 + vendor/github.com/urfave/cli/CHANGELOG.md | 392 + vendor/github.com/urfave/cli/LICENSE | 21 + vendor/github.com/urfave/cli/README.md | 1381 + vendor/github.com/urfave/cli/altsrc/altsrc.go | 3 + vendor/github.com/urfave/cli/altsrc/flag.go | 261 + .../urfave/cli/altsrc/flag_generated.go | 347 + .../github.com/urfave/cli/altsrc/flag_test.go | 336 + .../urfave/cli/altsrc/helpers_test.go | 18 + .../urfave/cli/altsrc/input_source_context.go | 21 + .../urfave/cli/altsrc/map_input_source.go | 262 + .../urfave/cli/altsrc/toml_command_test.go | 310 + .../urfave/cli/altsrc/toml_file_loader.go | 113 + .../urfave/cli/altsrc/yaml_command_test.go | 313 + .../urfave/cli/altsrc/yaml_file_loader.go | 92 + vendor/github.com/urfave/cli/app.go | 497 + vendor/github.com/urfave/cli/app_test.go | 1742 + vendor/github.com/urfave/cli/appveyor.yml | 24 + .../urfave/cli/autocomplete/bash_autocomplete | 16 + .../urfave/cli/autocomplete/zsh_autocomplete | 5 + vendor/github.com/urfave/cli/category.go | 44 + vendor/github.com/urfave/cli/cli.go | 22 + vendor/github.com/urfave/cli/command.go | 304 + vendor/github.com/urfave/cli/command_test.go | 240 + vendor/github.com/urfave/cli/context.go | 278 + vendor/github.com/urfave/cli/context_test.go | 403 + vendor/github.com/urfave/cli/errors.go | 115 + vendor/github.com/urfave/cli/errors_test.go | 122 + vendor/github.com/urfave/cli/flag-types.json | 93 + vendor/github.com/urfave/cli/flag.go | 799 + .../github.com/urfave/cli/flag_generated.go | 627 + vendor/github.com/urfave/cli/flag_test.go | 1215 + vendor/github.com/urfave/cli/funcs.go | 28 + .../github.com/urfave/cli/generate-flag-types | 255 + vendor/github.com/urfave/cli/help.go | 338 + vendor/github.com/urfave/cli/help_test.go | 452 + vendor/github.com/urfave/cli/helpers_test.go | 28 + .../urfave/cli/helpers_unix_test.go | 9 + .../urfave/cli/helpers_windows_test.go | 20 + vendor/github.com/urfave/cli/runtests | 122 + 4048 files changed, 1104059 insertions(+), 554 deletions(-) create mode 100644 cli/vendor/github.com/funcy/functions_go/client/call/get_apps_app_calls_call_parameters.go create mode 100644 cli/vendor/github.com/funcy/functions_go/client/call/get_apps_app_calls_call_responses.go create mode 100644 cli/vendor/github.com/funcy/functions_go/client/call/get_apps_app_calls_parameters.go create mode 100644 cli/vendor/github.com/funcy/functions_go/client/call/get_apps_app_calls_responses.go create mode 100644 cli/vendor/github.com/funcy/functions_go/client/operations/delete_apps_app_calls_call_log_parameters.go create mode 100644 cli/vendor/github.com/funcy/functions_go/client/operations/delete_apps_app_calls_call_log_responses.go create mode 100644 cli/vendor/github.com/funcy/functions_go/client/operations/get_apps_app_calls_call_log_parameters.go create mode 100644 cli/vendor/github.com/funcy/functions_go/client/operations/get_apps_app_calls_call_log_responses.go create mode 100644 vendor/github.com/aws/aws-sdk-go/.gitignore create mode 100644 vendor/github.com/aws/aws-sdk-go/.godoc_config create mode 100644 vendor/github.com/aws/aws-sdk-go/.travis.yml create mode 100644 vendor/github.com/aws/aws-sdk-go/.yardopts create mode 100644 vendor/github.com/aws/aws-sdk-go/Gemfile create mode 100644 vendor/github.com/aws/aws-sdk-go/LICENSE.txt create mode 100644 vendor/github.com/aws/aws-sdk-go/Makefile create mode 100644 vendor/github.com/aws/aws-sdk-go/NOTICE.txt create mode 100644 vendor/github.com/aws/aws-sdk-go/README.md create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/awsutil/copy.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/awsutil/copy_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/client/client.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/config.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/config_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/convert_types.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/convert_types_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/example.ini create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/errors.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/logger.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/handlers_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/http_request.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/http_request_1_4.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/http_request_copy_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/http_request_retry_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/request.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/request_1_6_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/request_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/retryer_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/validation.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/session/session.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/session/session_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/signer/v4/functional_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/signer/v4/header_rules.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/signer/v4/header_rules_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/types.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/types_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/version.go create mode 100644 vendor/github.com/aws/aws-sdk-go/awsmigrate/awsmigrate-renamer/Godeps/Godeps.json create mode 100644 vendor/github.com/aws/aws-sdk-go/awsmigrate/awsmigrate-renamer/Godeps/Readme create mode 100644 vendor/github.com/aws/aws-sdk-go/awsmigrate/awsmigrate-renamer/gen/gen.go create mode 100644 vendor/github.com/aws/aws-sdk-go/awsmigrate/awsmigrate-renamer/rename/rename.go create mode 100644 vendor/github.com/aws/aws-sdk-go/awsmigrate/awsmigrate-renamer/rename/renames.go create mode 100644 vendor/github.com/aws/aws-sdk-go/awsmigrate/awsmigrate-renamer/renamer.go create mode 100644 vendor/github.com/aws/aws-sdk-go/awstesting/assert.go create mode 100644 vendor/github.com/aws/aws-sdk-go/awstesting/assert_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/awstesting/client.go create mode 100644 vendor/github.com/aws/aws-sdk-go/awstesting/integration/customizations/s3/integration_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/awstesting/integration/customizations/s3/s3manager/integration_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/awstesting/integration/customizations/s3/s3manager/stub.go create mode 100644 vendor/github.com/aws/aws-sdk-go/awstesting/integration/customizations/s3/stub.go create mode 100644 vendor/github.com/aws/aws-sdk-go/awstesting/integration/integration.go create mode 100644 vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/acm/acm.feature create mode 100644 vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/acm/client.go create mode 100644 vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/apigateway/apigateway.feature create mode 100644 vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/apigateway/client.go create mode 100644 vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/applicationdiscoveryservice/applicationdiscoveryservice.feature create mode 100644 vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/applicationdiscoveryservice/client.go create mode 100644 vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/autoscaling/autoscaling.feature create mode 100644 vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/autoscaling/client.go create mode 100644 vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cloudformation/client.go create mode 100644 vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cloudformation/cloudformation.feature create mode 100644 vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cloudfront/client.go create mode 100644 vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cloudfront/cloudfront.feature create mode 100644 vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cloudhsm/client.go create mode 100644 vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cloudhsm/cloudhsm.feature create mode 100644 vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cloudsearch/client.go create mode 100644 vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cloudsearch/cloudsearch.feature create mode 100644 vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cloudtrail/client.go create mode 100644 vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cloudtrail/cloudtrail.feature create mode 100644 vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cloudwatch/client.go create mode 100644 vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cloudwatch/cloudwatch.feature create mode 100644 vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cloudwatchlogs/client.go create mode 100644 vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cloudwatchlogs/cloudwatchlogs.feature create mode 100644 vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/codecommit/client.go create mode 100644 vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/codecommit/codecommit.feature create mode 100644 vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/codedeploy/client.go create mode 100644 vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/codedeploy/codedeploy.feature create mode 100644 vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/codepipeline/client.go create mode 100644 vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/codepipeline/codepipeline.feature create mode 100644 vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cognitoidentity/client.go create mode 100644 vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cognitoidentity/cognitoidentity.feature create mode 100644 vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cognitosync/client.go create mode 100644 vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cognitosync/cognitosync.feature create mode 100644 vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/configservice/client.go create mode 100644 vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/configservice/configservice.feature create mode 100644 vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/datapipeline/client.go create mode 100644 vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/datapipeline/datapipeline.feature create mode 100644 vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/devicefarm/client.go create mode 100644 vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/devicefarm/devicefarm.feature create mode 100644 vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/directconnect/client.go create mode 100644 vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/directconnect/directconnect.feature create mode 100644 vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/directoryservice/client.go create mode 100644 vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/directoryservice/directoryservice.feature create mode 100644 vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/dynamodb/client.go create mode 100644 vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/dynamodb/dynamodb.feature create mode 100644 vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/dynamodbstreams/client.go create mode 100644 vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/dynamodbstreams/dynamodbstreams.feature create mode 100644 vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/ec2/client.go create mode 100644 vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/ec2/ec2.feature create mode 100644 vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/ecs/client.go create mode 100644 vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/ecs/ecs.feature create mode 100644 vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/efs/client.go create mode 100644 vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/efs/efs.feature create mode 100644 vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/elasticache/client.go create mode 100644 vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/elasticache/elasticache.feature create mode 100644 vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/elasticbeanstalk/client.go create mode 100644 vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/elasticbeanstalk/elasticbeanstalk.feature create mode 100644 vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/elasticloadbalancing/client.go create mode 100644 vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/elasticloadbalancing/elasticloadbalancing.feature create mode 100644 vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/elastictranscoder/client.go create mode 100644 vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/elastictranscoder/elastictranscoder.feature create mode 100644 vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/emr/client.go create mode 100644 vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/emr/emr.feature create mode 100644 vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/es/client.go create mode 100644 vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/es/es.feature create mode 100644 vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/glacier/client.go create mode 100644 vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/glacier/glacier.feature create mode 100644 vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/iam/client.go create mode 100644 vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/iam/iam.feature create mode 100644 vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/iotdataplane/client.go create mode 100644 vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/iotdataplane/iotdataplane.feature create mode 100644 vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/kinesis/client.go create mode 100644 vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/kinesis/kinesis.feature create mode 100644 vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/kms/client.go create mode 100644 vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/kms/kms.feature create mode 100644 vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/lambda/client.go create mode 100644 vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/lambda/lambda.feature create mode 100644 vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/machinelearning/client.go create mode 100644 vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/machinelearning/machinelearning.feature create mode 100644 vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/opsworks/client.go create mode 100644 vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/opsworks/opsworks.feature create mode 100644 vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/rds/client.go create mode 100644 vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/rds/rds.feature create mode 100644 vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/redshift/client.go create mode 100644 vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/redshift/redshift.feature create mode 100644 vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/route53/client.go create mode 100644 vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/route53/route53.feature create mode 100644 vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/route53domains/client.go create mode 100644 vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/route53domains/route53domains.feature create mode 100644 vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/ses/client.go create mode 100644 vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/ses/ses.feature create mode 100644 vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/shared.go create mode 100644 vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/simpledb/client.go create mode 100644 vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/simpledb/simpledb.feature create mode 100644 vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/sns/client.go create mode 100644 vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/sns/sns.feature create mode 100644 vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/sqs/client.go create mode 100644 vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/sqs/sqs.feature create mode 100644 vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/ssm/client.go create mode 100644 vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/ssm/ssm.feature create mode 100644 vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/storagegateway/client.go create mode 100644 vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/storagegateway/storagegateway.feature create mode 100644 vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/sts/client.go create mode 100644 vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/sts/sts.feature create mode 100644 vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/support/client.go create mode 100644 vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/support/support.feature create mode 100644 vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/swf/client.go create mode 100644 vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/swf/swf.feature create mode 100644 vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/waf/client.go create mode 100644 vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/waf/waf.feature create mode 100644 vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/workspaces/client.go create mode 100644 vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/workspaces/workspaces.feature create mode 100644 vendor/github.com/aws/aws-sdk-go/awstesting/mock/server.go create mode 100644 vendor/github.com/aws/aws-sdk-go/awstesting/performance/benchmarks.go create mode 100644 vendor/github.com/aws/aws-sdk-go/awstesting/performance/client.go create mode 100644 vendor/github.com/aws/aws-sdk-go/awstesting/performance/clients.feature create mode 100644 vendor/github.com/aws/aws-sdk-go/awstesting/performance/clients.go create mode 100644 vendor/github.com/aws/aws-sdk-go/awstesting/performance/init.go create mode 100644 vendor/github.com/aws/aws-sdk-go/awstesting/performance/logging.go create mode 100644 vendor/github.com/aws/aws-sdk-go/awstesting/performance/streaming.feature create mode 100644 vendor/github.com/aws/aws-sdk-go/awstesting/sandbox/Dockerfile.golang-tip create mode 100644 vendor/github.com/aws/aws-sdk-go/awstesting/sandbox/Dockerfile.test.go1.4 create mode 100644 vendor/github.com/aws/aws-sdk-go/awstesting/sandbox/Dockerfile.test.go1.5 create mode 100644 vendor/github.com/aws/aws-sdk-go/awstesting/sandbox/Dockerfile.test.go1.5-novendorexp create mode 100644 vendor/github.com/aws/aws-sdk-go/awstesting/sandbox/Dockerfile.test.go1.6 create mode 100644 vendor/github.com/aws/aws-sdk-go/awstesting/sandbox/Dockerfile.test.go1.7 create mode 100644 vendor/github.com/aws/aws-sdk-go/awstesting/sandbox/Dockerfile.test.gotip create mode 100644 vendor/github.com/aws/aws-sdk-go/awstesting/unit/unit.go create mode 100644 vendor/github.com/aws/aws-sdk-go/awstesting/util.go create mode 100644 vendor/github.com/aws/aws-sdk-go/awstesting/util_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/doc-src/aws-godoc/templates/callgraph.html create mode 100644 vendor/github.com/aws/aws-sdk-go/doc-src/aws-godoc/templates/codewalk.html create mode 100644 vendor/github.com/aws/aws-sdk-go/doc-src/aws-godoc/templates/codewalkdir.html create mode 100644 vendor/github.com/aws/aws-sdk-go/doc-src/aws-godoc/templates/dirlist.html create mode 100644 vendor/github.com/aws/aws-sdk-go/doc-src/aws-godoc/templates/error.html create mode 100644 vendor/github.com/aws/aws-sdk-go/doc-src/aws-godoc/templates/example.html create mode 100644 vendor/github.com/aws/aws-sdk-go/doc-src/aws-godoc/templates/godoc.html create mode 100644 vendor/github.com/aws/aws-sdk-go/doc-src/aws-godoc/templates/godocs.js create mode 100644 vendor/github.com/aws/aws-sdk-go/doc-src/aws-godoc/templates/implements.html create mode 100644 vendor/github.com/aws/aws-sdk-go/doc-src/aws-godoc/templates/jquery.js create mode 100644 vendor/github.com/aws/aws-sdk-go/doc-src/aws-godoc/templates/jquery.treeview.css create mode 100644 vendor/github.com/aws/aws-sdk-go/doc-src/aws-godoc/templates/jquery.treeview.edit.js create mode 100644 vendor/github.com/aws/aws-sdk-go/doc-src/aws-godoc/templates/jquery.treeview.js create mode 100644 vendor/github.com/aws/aws-sdk-go/doc-src/aws-godoc/templates/methodset.html create mode 100644 vendor/github.com/aws/aws-sdk-go/doc-src/aws-godoc/templates/opensearch.xml create mode 100644 vendor/github.com/aws/aws-sdk-go/doc-src/aws-godoc/templates/package.txt create mode 100644 vendor/github.com/aws/aws-sdk-go/doc-src/aws-godoc/templates/package_default.html create mode 100644 vendor/github.com/aws/aws-sdk-go/doc-src/aws-godoc/templates/package_service.html create mode 100644 vendor/github.com/aws/aws-sdk-go/doc-src/aws-godoc/templates/pkglist.html create mode 100644 vendor/github.com/aws/aws-sdk-go/doc-src/aws-godoc/templates/search.html create mode 100644 vendor/github.com/aws/aws-sdk-go/doc-src/aws-godoc/templates/search.txt create mode 100644 vendor/github.com/aws/aws-sdk-go/doc-src/aws-godoc/templates/searchcode.html create mode 100644 vendor/github.com/aws/aws-sdk-go/doc-src/aws-godoc/templates/searchdoc.html create mode 100644 vendor/github.com/aws/aws-sdk-go/doc-src/aws-godoc/templates/searchtxt.html create mode 100644 vendor/github.com/aws/aws-sdk-go/doc-src/aws-godoc/templates/style.css create mode 100644 vendor/github.com/aws/aws-sdk-go/doc-src/aws-godoc/templates/user_guide_example.html create mode 100644 vendor/github.com/aws/aws-sdk-go/doc-src/plugin/plugin.rb create mode 100644 vendor/github.com/aws/aws-sdk-go/doc-src/plugin/templates/default/layout/html/footer.erb create mode 100644 vendor/github.com/aws/aws-sdk-go/doc-src/plugin/templates/default/module/html/client.erb create mode 100644 vendor/github.com/aws/aws-sdk-go/doc-src/plugin/templates/default/module/html/item_summary.erb create mode 100644 vendor/github.com/aws/aws-sdk-go/doc-src/plugin/templates/default/module/html/setup.rb create mode 100644 vendor/github.com/aws/aws-sdk-go/doc-src/plugin/templates/default/package/html/setup.rb create mode 100644 vendor/github.com/aws/aws-sdk-go/doc-src/plugin/templates/default/struct/html/paginators.erb create mode 100644 vendor/github.com/aws/aws-sdk-go/doc-src/plugin/templates/default/struct/html/request_methods.erb create mode 100644 vendor/github.com/aws/aws-sdk-go/doc-src/plugin/templates/default/struct/html/setup.rb create mode 100644 vendor/github.com/aws/aws-sdk-go/example/service/cloudfront/signCookies/README.md create mode 100644 vendor/github.com/aws/aws-sdk-go/example/service/cloudfront/signCookies/signCookies.go create mode 100644 vendor/github.com/aws/aws-sdk-go/example/service/s3/listObjects/README.md create mode 100644 vendor/github.com/aws/aws-sdk-go/example/service/s3/listObjects/listObjects.go create mode 100644 vendor/github.com/aws/aws-sdk-go/example/service/s3/listObjectsConcurrently/README.md create mode 100644 vendor/github.com/aws/aws-sdk-go/example/service/s3/listObjectsConcurrently/listObjectsConcurrently.go create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/acm/2015-12-08/api-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/acm/2015-12-08/docs-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/acm/2015-12-08/examples-1.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/acm/2015-12-08/paginators-1.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/apigateway/2015-07-09/api-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/apigateway/2015-07-09/docs-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/apigateway/2015-07-09/examples-1.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/apigateway/2015-07-09/paginators-1.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/application-autoscaling/2016-02-06/api-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/application-autoscaling/2016-02-06/docs-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/application-autoscaling/2016-02-06/examples-1.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/application-autoscaling/2016-02-06/paginators-1.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/autoscaling/2011-01-01/api-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/autoscaling/2011-01-01/docs-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/autoscaling/2011-01-01/examples-1.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/autoscaling/2011-01-01/paginators-1.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/autoscaling/2011-01-01/waiters-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/cloudformation/2010-05-15/api-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/cloudformation/2010-05-15/docs-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/cloudformation/2010-05-15/examples-1.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/cloudformation/2010-05-15/paginators-1.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/cloudformation/2010-05-15/waiters-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/cloudfront/2015-04-17/api-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/cloudfront/2015-04-17/docs-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/cloudfront/2015-04-17/paginators-1.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/cloudfront/2015-04-17/waiters-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/cloudfront/2015-07-27/api-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/cloudfront/2015-07-27/docs-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/cloudfront/2015-07-27/paginators-1.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/cloudfront/2015-07-27/waiters-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/cloudfront/2015-09-17/api-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/cloudfront/2015-09-17/docs-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/cloudfront/2015-09-17/examples-1.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/cloudfront/2015-09-17/paginators-1.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/cloudfront/2015-09-17/waiters-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/cloudfront/2016-01-13/api-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/cloudfront/2016-01-13/docs-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/cloudfront/2016-01-13/examples-1.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/cloudfront/2016-01-13/paginators-1.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/cloudfront/2016-01-13/waiters-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/cloudfront/2016-01-28/api-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/cloudfront/2016-01-28/docs-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/cloudfront/2016-01-28/examples-1.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/cloudfront/2016-01-28/paginators-1.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/cloudfront/2016-01-28/waiters-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/cloudhsm/2014-05-30/api-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/cloudhsm/2014-05-30/docs-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/cloudhsm/2014-05-30/examples-1.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/cloudsearch/2013-01-01/api-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/cloudsearch/2013-01-01/docs-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/cloudsearch/2013-01-01/paginators-1.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/cloudsearchdomain/2013-01-01/api-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/cloudsearchdomain/2013-01-01/docs-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/cloudsearchdomain/2013-01-01/examples-1.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/cloudtrail/2013-11-01/api-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/cloudtrail/2013-11-01/docs-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/cloudtrail/2013-11-01/examples-1.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/cloudtrail/2013-11-01/paginators-1.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/codecommit/2015-04-13/api-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/codecommit/2015-04-13/docs-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/codecommit/2015-04-13/examples-1.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/codecommit/2015-04-13/paginators-1.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/codedeploy/2014-10-06/api-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/codedeploy/2014-10-06/docs-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/codedeploy/2014-10-06/examples-1.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/codedeploy/2014-10-06/paginators-1.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/codepipeline/2015-07-09/api-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/codepipeline/2015-07-09/docs-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/codepipeline/2015-07-09/examples-1.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/cognito-identity/2014-06-30/api-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/cognito-identity/2014-06-30/docs-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/cognito-identity/2014-06-30/examples-1.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/cognito-idp/2016-04-18/api-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/cognito-idp/2016-04-18/docs-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/cognito-idp/2016-04-18/examples-1.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/cognito-sync/2014-06-30/api-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/cognito-sync/2014-06-30/docs-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/config/2014-11-12/api-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/config/2014-11-12/docs-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/config/2014-11-12/examples-1.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/config/2014-11-12/paginators-1.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/datapipeline/2012-10-29/api-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/datapipeline/2012-10-29/docs-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/datapipeline/2012-10-29/paginators-1.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/devicefarm/2015-06-23/api-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/devicefarm/2015-06-23/docs-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/devicefarm/2015-06-23/examples-1.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/devicefarm/2015-06-23/paginators-1.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/directconnect/2012-10-25/api-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/directconnect/2012-10-25/docs-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/directconnect/2012-10-25/examples-1.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/directconnect/2012-10-25/paginators-1.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/discovery/2015-11-01/api-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/discovery/2015-11-01/docs-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/discovery/2015-11-01/examples-1.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/dms/2016-01-01/api-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/dms/2016-01-01/docs-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/dms/2016-01-01/examples-1.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/ds/2015-04-16/api-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/ds/2015-04-16/docs-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/ds/2015-04-16/examples-1.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/dynamodb/2011-12-05/api-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/dynamodb/2011-12-05/docs-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/dynamodb/2011-12-05/examples-1.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/dynamodb/2011-12-05/paginators-1.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/dynamodb/2011-12-05/waiters-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/dynamodb/2012-08-10/api-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/dynamodb/2012-08-10/docs-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/dynamodb/2012-08-10/examples-1.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/dynamodb/2012-08-10/paginators-1.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/dynamodb/2012-08-10/waiters-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/ec2/2015-04-15/api-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/ec2/2015-04-15/docs-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/ec2/2015-04-15/paginators-1.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/ec2/2015-04-15/waiters-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/ec2/2015-10-01/api-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/ec2/2015-10-01/docs-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/ec2/2015-10-01/examples-1.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/ec2/2015-10-01/paginators-1.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/ec2/2015-10-01/waiters-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/ec2/2016-04-01/api-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/ec2/2016-04-01/docs-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/ec2/2016-04-01/examples-1.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/ec2/2016-04-01/paginators-1.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/ec2/2016-04-01/waiters-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/ecr/2015-09-21/api-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/ecr/2015-09-21/docs-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/ecr/2015-09-21/examples-1.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/ecs/2014-11-13/api-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/ecs/2014-11-13/docs-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/ecs/2014-11-13/examples-1.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/ecs/2014-11-13/paginators-1.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/ecs/2014-11-13/waiters-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/elasticache/2015-02-02/api-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/elasticache/2015-02-02/docs-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/elasticache/2015-02-02/examples-1.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/elasticache/2015-02-02/paginators-1.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/elasticache/2015-02-02/waiters-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/elasticbeanstalk/2010-12-01/api-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/elasticbeanstalk/2010-12-01/docs-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/elasticbeanstalk/2010-12-01/examples-1.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/elasticbeanstalk/2010-12-01/paginators-1.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/elasticfilesystem/2015-02-01/api-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/elasticfilesystem/2015-02-01/docs-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/elasticfilesystem/2015-02-01/examples-1.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/elasticloadbalancing/2012-06-01/api-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/elasticloadbalancing/2012-06-01/docs-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/elasticloadbalancing/2012-06-01/paginators-1.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/elasticloadbalancing/2012-06-01/waiters-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/elasticmapreduce/2009-03-31/api-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/elasticmapreduce/2009-03-31/docs-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/elasticmapreduce/2009-03-31/examples-1.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/elasticmapreduce/2009-03-31/paginators-1.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/elasticmapreduce/2009-03-31/waiters-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/elastictranscoder/2012-09-25/api-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/elastictranscoder/2012-09-25/docs-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/elastictranscoder/2012-09-25/paginators-1.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/elastictranscoder/2012-09-25/waiters-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/email/2010-12-01/api-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/email/2010-12-01/docs-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/email/2010-12-01/examples-1.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/email/2010-12-01/paginators-1.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/email/2010-12-01/waiters-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/es/2015-01-01/api-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/es/2015-01-01/docs-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/events/2014-02-03/api-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/events/2014-02-03/docs-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/events/2014-02-03/examples-1.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/events/2015-10-07/api-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/events/2015-10-07/docs-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/events/2015-10-07/examples-1.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/firehose/2015-08-04/api-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/firehose/2015-08-04/docs-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/gamelift/2015-10-01/api-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/gamelift/2015-10-01/docs-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/gamelift/2015-10-01/examples-1.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/glacier/2012-06-01/api-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/glacier/2012-06-01/docs-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/glacier/2012-06-01/paginators-1.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/glacier/2012-06-01/waiters-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/iam/2010-05-08/api-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/iam/2010-05-08/docs-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/iam/2010-05-08/examples-1.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/iam/2010-05-08/paginators-1.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/iam/2010-05-08/waiters-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/importexport/2010-06-01/api-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/importexport/2010-06-01/docs-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/importexport/2010-06-01/paginators-1.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/inspector/2015-08-18/api-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/inspector/2015-08-18/docs-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/inspector/2015-08-18/examples-1.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/inspector/2016-02-16/api-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/inspector/2016-02-16/docs-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/inspector/2016-02-16/examples-1.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/iot-data/2015-05-28/api-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/iot-data/2015-05-28/docs-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/iot-data/2015-05-28/examples-1.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/iot/2015-05-28/api-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/iot/2015-05-28/docs-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/iot/2015-05-28/examples-1.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/kinesis/2013-12-02/api-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/kinesis/2013-12-02/docs-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/kinesis/2013-12-02/examples-1.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/kinesis/2013-12-02/paginators-1.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/kinesis/2013-12-02/waiters-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/kms/2014-11-01/api-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/kms/2014-11-01/docs-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/kms/2014-11-01/examples-1.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/kms/2014-11-01/paginators-1.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/lambda/2014-11-11/api-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/lambda/2014-11-11/docs-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/lambda/2014-11-11/paginators-1.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/lambda/2015-03-31/api-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/lambda/2015-03-31/docs-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/lambda/2015-03-31/examples-1.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/lambda/2015-03-31/paginators-1.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/logs/2014-03-28/api-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/logs/2014-03-28/docs-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/logs/2014-03-28/examples-1.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/logs/2014-03-28/paginators-1.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/machinelearning/2014-12-12/api-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/machinelearning/2014-12-12/docs-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/machinelearning/2014-12-12/paginators-1.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/machinelearning/2014-12-12/waiters-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/marketplacecommerceanalytics/2015-07-01/api-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/marketplacecommerceanalytics/2015-07-01/docs-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/marketplacecommerceanalytics/2015-07-01/examples-1.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/meteringmarketplace/2016-01-14/api-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/meteringmarketplace/2016-01-14/docs-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/meteringmarketplace/2016-01-14/examples-1.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/mobileanalytics/2014-06-05/api-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/mobileanalytics/2014-06-05/docs-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/monitoring/2010-08-01/api-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/monitoring/2010-08-01/docs-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/monitoring/2010-08-01/examples-1.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/monitoring/2010-08-01/paginators-1.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/monitoring/2010-08-01/waiters-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/opsworks/2013-02-18/api-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/opsworks/2013-02-18/docs-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/opsworks/2013-02-18/examples-1.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/opsworks/2013-02-18/paginators-1.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/opsworks/2013-02-18/waiters-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/rds/2013-01-10/api-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/rds/2013-01-10/docs-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/rds/2013-01-10/examples-1.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/rds/2013-01-10/paginators-1.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/rds/2013-02-12/api-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/rds/2013-02-12/docs-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/rds/2013-02-12/examples-1.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/rds/2013-02-12/paginators-1.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/rds/2013-09-09/api-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/rds/2013-09-09/docs-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/rds/2013-09-09/examples-1.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/rds/2013-09-09/paginators-1.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/rds/2013-09-09/waiters-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/rds/2014-09-01/api-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/rds/2014-09-01/docs-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/rds/2014-09-01/examples-1.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/rds/2014-10-31/api-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/rds/2014-10-31/docs-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/rds/2014-10-31/examples-1.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/rds/2014-10-31/paginators-1.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/rds/2014-10-31/waiters-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/redshift/2012-12-01/api-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/redshift/2012-12-01/docs-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/redshift/2012-12-01/examples-1.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/redshift/2012-12-01/paginators-1.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/redshift/2012-12-01/waiters-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/route53/2013-04-01/api-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/route53/2013-04-01/docs-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/route53/2013-04-01/examples-1.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/route53/2013-04-01/paginators-1.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/route53/2013-04-01/waiters-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/route53domains/2014-05-15/api-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/route53domains/2014-05-15/docs-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/route53domains/2014-05-15/examples-1.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/route53domains/2014-05-15/paginators-1.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/s3/2006-03-01/api-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/s3/2006-03-01/docs-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/s3/2006-03-01/examples-1.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/s3/2006-03-01/paginators-1.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/s3/2006-03-01/waiters-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/sdb/2009-04-15/api-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/sdb/2009-04-15/docs-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/sdb/2009-04-15/paginators-1.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/servicecatalog/2015-12-10/api-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/servicecatalog/2015-12-10/docs-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/servicecatalog/2015-12-10/examples-1.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/sns/2010-03-31/api-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/sns/2010-03-31/docs-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/sns/2010-03-31/paginators-1.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/sqs/2012-11-05/api-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/sqs/2012-11-05/docs-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/sqs/2012-11-05/examples-1.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/sqs/2012-11-05/paginators-1.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/ssm/2014-11-06/api-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/ssm/2014-11-06/docs-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/ssm/2014-11-06/examples-1.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/ssm/2014-11-06/paginators-1.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/storagegateway/2013-06-30/api-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/storagegateway/2013-06-30/docs-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/storagegateway/2013-06-30/examples-1.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/storagegateway/2013-06-30/paginators-1.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/streams.dynamodb/2012-08-10/api-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/streams.dynamodb/2012-08-10/docs-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/streams.dynamodb/2012-08-10/examples-1.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/sts/2011-06-15/api-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/sts/2011-06-15/docs-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/sts/2011-06-15/examples-1.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/support/2013-04-15/api-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/support/2013-04-15/docs-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/support/2013-04-15/paginators-1.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/swf/2012-01-25/api-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/swf/2012-01-25/docs-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/swf/2012-01-25/paginators-1.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/waf/2015-08-24/api-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/waf/2015-08-24/docs-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/waf/2015-08-24/examples-1.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/workspaces/2015-04-08/api-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/workspaces/2015-04-08/docs-2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/workspaces/2015-04-08/examples-1.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/apis/workspaces/2015-04-08/paginators-1.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/protocol_tests/generate.go create mode 100644 vendor/github.com/aws/aws-sdk-go/models/protocol_tests/input/ec2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/protocol_tests/input/json.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/protocol_tests/input/query.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/protocol_tests/input/rest-json.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/protocol_tests/input/rest-xml.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/protocol_tests/output/ec2.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/protocol_tests/output/json.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/protocol_tests/output/query.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/protocol_tests/output/rest-json.json create mode 100644 vendor/github.com/aws/aws-sdk-go/models/protocol_tests/output/rest-xml.json create mode 100644 vendor/github.com/aws/aws-sdk-go/private/README.md create mode 100644 vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints.json create mode 100644 vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints_map.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/model/api/api.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/model/api/api_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/model/api/customization_passes.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/model/api/docstring.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/model/api/exportable_name.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/model/api/load.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/model/api/load_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/model/api/operation.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/model/api/pagination.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/model/api/param_filler.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/model/api/passes.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/model/api/shape.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/model/api/shape_validation.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/model/api/shapetag_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/model/api/waiters.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/model/cli/api-info/api-info.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/model/cli/gen-api/main.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/model/cli/gen-endpoints/main.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/model/endpoints.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/ec2query/build.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/ec2query/build_bench_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/ec2query/build_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/ec2query/unmarshal.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/ec2query/unmarshal_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/idempotency.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/idempotency_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/build.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/build_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/build_bench_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/build_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/jsonrpc.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/unmarshal_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/protocol_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/query/build_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/rest/payload.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/build_bench_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/build_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/restjson.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/unmarshal_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/build_bench_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/build_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/unmarshal_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/signer/v2/v2.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/signer/v2/v2_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/util/sort_keys.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/util/util.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/waiter/waiter.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/waiter/waiter_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/sdk.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/acm/acmiface/interface.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/acm/api.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/acm/examples_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/acm/service.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/apigateway/api.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/apigateway/apigatewayiface/interface.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/apigateway/customization.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/apigateway/examples_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/apigateway/service.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/applicationautoscaling/api.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/applicationautoscaling/applicationautoscalingiface/interface.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/applicationautoscaling/examples_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/applicationautoscaling/service.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/applicationdiscoveryservice/api.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/applicationdiscoveryservice/applicationdiscoveryserviceiface/interface.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/applicationdiscoveryservice/examples_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/applicationdiscoveryservice/service.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/autoscaling/api.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/autoscaling/autoscalingiface/interface.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/autoscaling/examples_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/autoscaling/service.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/autoscaling/waiters.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/cloudformation/api.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/cloudformation/cloudformationiface/interface.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/cloudformation/examples_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/cloudformation/service.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/cloudformation/waiters.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/cloudfront/api.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/cloudfront/cloudfrontiface/interface.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/cloudfront/examples_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/cloudfront/service.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/cloudfront/sign/policy.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/cloudfront/sign/policy_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/cloudfront/sign/privkey.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/cloudfront/sign/privkey_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/cloudfront/sign/randomreader.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/cloudfront/sign/sign_cookie.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/cloudfront/sign/sign_cookie_example_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/cloudfront/sign/sign_cookie_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/cloudfront/sign/sign_url.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/cloudfront/sign/sign_url_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/cloudfront/waiters.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/cloudhsm/api.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/cloudhsm/cloudhsmiface/interface.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/cloudhsm/examples_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/cloudhsm/service.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/cloudsearch/api.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/cloudsearch/cloudsearchiface/interface.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/cloudsearch/examples_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/cloudsearch/service.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/cloudsearchdomain/api.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/cloudsearchdomain/cloudsearchdomainiface/interface.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/cloudsearchdomain/customizations_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/cloudsearchdomain/examples_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/cloudsearchdomain/service.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/cloudtrail/api.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/cloudtrail/cloudtrailiface/interface.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/cloudtrail/examples_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/cloudtrail/service.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/cloudwatch/api.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/cloudwatch/cloudwatchiface/interface.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/cloudwatch/examples_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/cloudwatch/service.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/cloudwatch/waiters.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/cloudwatchevents/api.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/cloudwatchevents/cloudwatcheventsiface/interface.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/cloudwatchevents/examples_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/cloudwatchevents/service.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/cloudwatchlogs/api.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/cloudwatchlogs/cloudwatchlogsiface/interface.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/cloudwatchlogs/examples_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/cloudwatchlogs/service.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/codecommit/api.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/codecommit/codecommitiface/interface.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/codecommit/examples_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/codecommit/service.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/codedeploy/api.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/codedeploy/codedeployiface/interface.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/codedeploy/examples_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/codedeploy/service.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/codepipeline/api.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/codepipeline/codepipelineiface/interface.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/codepipeline/examples_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/codepipeline/service.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/cognitoidentity/api.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/cognitoidentity/cognitoidentityiface/interface.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/cognitoidentity/customizations.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/cognitoidentity/customizations_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/cognitoidentity/examples_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/cognitoidentity/service.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/cognitoidentityprovider/api.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/cognitoidentityprovider/cognitoidentityprovideriface/interface.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/cognitoidentityprovider/examples_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/cognitoidentityprovider/service.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/cognitosync/api.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/cognitosync/cognitosynciface/interface.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/cognitosync/examples_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/cognitosync/service.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/configservice/api.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/configservice/configserviceiface/interface.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/configservice/examples_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/configservice/service.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/databasemigrationservice/api.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/databasemigrationservice/databasemigrationserviceiface/interface.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/databasemigrationservice/examples_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/databasemigrationservice/service.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/datapipeline/api.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/datapipeline/datapipelineiface/interface.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/datapipeline/examples_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/datapipeline/service.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/devicefarm/api.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/devicefarm/devicefarmiface/interface.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/devicefarm/examples_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/devicefarm/service.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/directconnect/api.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/directconnect/directconnectiface/interface.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/directconnect/examples_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/directconnect/service.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/directoryservice/api.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/directoryservice/directoryserviceiface/interface.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/directoryservice/examples_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/directoryservice/service.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/dynamodb/api.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/dynamodb/customizations.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/dynamodb/customizations_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute/converter.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute/converter_examples_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute/converter_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute/decode.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute/decode_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute/doc.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute/encode.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute/encode_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute/field.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute/field_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute/marshaler_examples_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute/marshaler_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute/shared_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute/tag.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute/tag_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbiface/interface.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/dynamodb/examples_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/dynamodb/service.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/dynamodb/waiters.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/dynamodbstreams/api.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/dynamodbstreams/dynamodbstreamsiface/interface.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/dynamodbstreams/examples_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/dynamodbstreams/service.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/ec2/api.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/ec2/customizations.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/ec2/customizations_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/ec2/ec2iface/interface.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/ec2/examples_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/ec2/service.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/ec2/waiters.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/ecr/api.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/ecr/ecriface/interface.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/ecr/examples_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/ecr/service.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/ecs/api.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/ecs/ecsiface/interface.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/ecs/examples_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/ecs/service.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/ecs/waiters.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/efs/api.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/efs/efsiface/interface.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/efs/examples_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/efs/service.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/elasticache/api.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/elasticache/elasticacheiface/interface.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/elasticache/examples_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/elasticache/service.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/elasticache/waiters.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/elasticbeanstalk/api.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/elasticbeanstalk/elasticbeanstalkiface/interface.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/elasticbeanstalk/examples_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/elasticbeanstalk/service.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/elasticsearchservice/api.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/elasticsearchservice/elasticsearchserviceiface/interface.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/elasticsearchservice/examples_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/elasticsearchservice/service.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/elastictranscoder/api.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/elastictranscoder/elastictranscoderiface/interface.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/elastictranscoder/examples_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/elastictranscoder/service.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/elastictranscoder/waiters.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/elb/api.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/elb/elbiface/interface.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/elb/examples_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/elb/service.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/elb/waiters.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/emr/api.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/emr/emriface/interface.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/emr/examples_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/emr/service.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/emr/waiters.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/firehose/api.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/firehose/examples_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/firehose/firehoseiface/interface.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/firehose/service.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/gamelift/api.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/gamelift/examples_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/gamelift/gameliftiface/interface.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/gamelift/service.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/generate.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/glacier/api.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/glacier/customizations.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/glacier/customizations_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/glacier/examples_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/glacier/glacieriface/interface.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/glacier/service.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/glacier/treehash.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/glacier/treehash_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/glacier/waiters.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/iam/api.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/iam/examples_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/iam/iamiface/interface.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/iam/service.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/iam/waiters.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/inspector/api.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/inspector/examples_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/inspector/inspectoriface/interface.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/inspector/service.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/iot/api.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/iot/examples_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/iot/iotiface/interface.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/iot/service.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/iotdataplane/api.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/iotdataplane/customizations_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/iotdataplane/examples_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/iotdataplane/iotdataplaneiface/interface.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/iotdataplane/service.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/kinesis/api.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/kinesis/examples_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/kinesis/kinesisiface/interface.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/kinesis/service.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/kinesis/waiters.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/kms/api.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/kms/examples_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/kms/kmsiface/interface.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/kms/service.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/lambda/api.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/lambda/examples_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/lambda/lambdaiface/interface.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/lambda/service.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/machinelearning/api.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/machinelearning/customizations.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/machinelearning/customizations_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/machinelearning/examples_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/machinelearning/machinelearningiface/interface.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/machinelearning/service.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/machinelearning/waiters.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/marketplacecommerceanalytics/api.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/marketplacecommerceanalytics/examples_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/marketplacecommerceanalytics/marketplacecommerceanalyticsiface/interface.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/marketplacecommerceanalytics/service.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/marketplacemetering/api.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/marketplacemetering/examples_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/marketplacemetering/marketplacemeteringiface/interface.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/marketplacemetering/service.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/mobileanalytics/api.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/mobileanalytics/examples_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/mobileanalytics/mobileanalyticsiface/interface.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/mobileanalytics/service.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/opsworks/api.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/opsworks/examples_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/opsworks/opsworksiface/interface.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/opsworks/service.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/opsworks/waiters.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/rds/api.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/rds/examples_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/rds/rdsiface/interface.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/rds/service.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/rds/waiters.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/redshift/api.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/redshift/examples_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/redshift/redshiftiface/interface.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/redshift/service.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/redshift/waiters.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/route53/api.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/route53/customizations.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/route53/customizations_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/route53/examples_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/route53/route53iface/interface.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/route53/service.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/route53/unmarshal_error.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/route53/unmarshal_error_leak_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/route53/unmarshal_error_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/route53/waiters.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/route53domains/api.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/route53domains/examples_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/route53domains/route53domainsiface/interface.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/route53domains/service.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/s3/api.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/s3/bucket_location.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/s3/bucket_location_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/s3/content_md5.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/s3/customizations_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/s3/examples_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/s3/host_style_bucket_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/s3/platform_handlers.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/s3/platform_handlers_go1.6.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/s3/platform_handlers_go1.6_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/s3/s3iface/interface.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/doc.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/download.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/download_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/s3manageriface/interface.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/shared_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/upload.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/upload_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/s3/service.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/s3/sse.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/s3/sse_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/s3/statusok_error.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/s3/statusok_error_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error_leak_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/s3/waiters.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/servicecatalog/api.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/servicecatalog/examples_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/servicecatalog/service.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/servicecatalog/servicecatalogiface/interface.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/ses/api.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/ses/examples_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/ses/service.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/ses/sesiface/interface.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/ses/waiters.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/simpledb/api.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/simpledb/customizations.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/simpledb/examples_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/simpledb/service.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/simpledb/simpledbiface/interface.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/simpledb/unmarshal_error_leak_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/simpledb/unmarshall_error.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/simpledb/unmarshall_error_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/sns/api.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/sns/examples_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/sns/service.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/sns/snsiface/interface.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/sqs/api.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/sqs/api_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/sqs/checksums.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/sqs/checksums_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/sqs/customizations.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/sqs/examples_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/sqs/service.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/sqs/sqsiface/interface.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/ssm/api.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/ssm/examples_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/ssm/service.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/ssm/ssmiface/interface.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/storagegateway/api.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/storagegateway/examples_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/storagegateway/service.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/storagegateway/storagegatewayiface/interface.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/sts/api.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/sts/customizations.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/sts/customizations_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/sts/examples_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/sts/service.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/sts/stsiface/interface.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/support/api.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/support/examples_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/support/service.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/support/supportiface/interface.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/swf/api.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/swf/examples_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/swf/service.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/swf/swfiface/interface.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/waf/api.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/waf/examples_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/waf/service.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/waf/wafiface/interface.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/workspaces/api.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/workspaces/examples_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/workspaces/service.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/workspaces/workspacesiface/interface.go create mode 100644 vendor/github.com/coreos/go-semver/.travis.yml create mode 100644 vendor/github.com/coreos/go-semver/LICENSE create mode 100644 vendor/github.com/coreos/go-semver/README.md create mode 100644 vendor/github.com/coreos/go-semver/example.go create mode 100644 vendor/github.com/coreos/go-semver/semver/semver.go create mode 100644 vendor/github.com/coreos/go-semver/semver/semver_test.go create mode 100644 vendor/github.com/coreos/go-semver/semver/sort.go create mode 100644 vendor/github.com/funcy/functions_go/client/call/get_apps_app_calls_call_parameters.go create mode 100644 vendor/github.com/funcy/functions_go/client/call/get_apps_app_calls_call_responses.go create mode 100644 vendor/github.com/funcy/functions_go/client/call/get_apps_app_calls_parameters.go create mode 100644 vendor/github.com/funcy/functions_go/client/call/get_apps_app_calls_responses.go create mode 100644 vendor/github.com/funcy/functions_go/client/operations/delete_apps_app_calls_call_log_parameters.go create mode 100644 vendor/github.com/funcy/functions_go/client/operations/delete_apps_app_calls_call_log_responses.go create mode 100644 vendor/github.com/funcy/functions_go/client/operations/get_apps_app_calls_call_log_parameters.go create mode 100644 vendor/github.com/funcy/functions_go/client/operations/get_apps_app_calls_call_log_responses.go create mode 100644 vendor/github.com/giantswarm/semver-bump/.gitignore create mode 100644 vendor/github.com/giantswarm/semver-bump/.travis.yml create mode 100644 vendor/github.com/giantswarm/semver-bump/LICENSE create mode 100644 vendor/github.com/giantswarm/semver-bump/Makefile create mode 100644 vendor/github.com/giantswarm/semver-bump/README.md create mode 100644 vendor/github.com/giantswarm/semver-bump/VERSION create mode 100644 vendor/github.com/giantswarm/semver-bump/bump/bump.go create mode 100644 vendor/github.com/giantswarm/semver-bump/bump/bump_test.go create mode 100644 vendor/github.com/giantswarm/semver-bump/commands/bump_major.go create mode 100644 vendor/github.com/giantswarm/semver-bump/commands/bump_minor.go create mode 100644 vendor/github.com/giantswarm/semver-bump/commands/bump_patch.go create mode 100644 vendor/github.com/giantswarm/semver-bump/commands/init.go create mode 100644 vendor/github.com/giantswarm/semver-bump/commands/semver_bump.go create mode 100644 vendor/github.com/giantswarm/semver-bump/commands/util.go create mode 100644 vendor/github.com/giantswarm/semver-bump/commands/version.go create mode 100644 vendor/github.com/giantswarm/semver-bump/main.go create mode 100644 vendor/github.com/giantswarm/semver-bump/storage/version_storage.go create mode 100644 vendor/github.com/giantswarm/semver-bump/storage/version_storage_file.go create mode 100644 vendor/github.com/giantswarm/semver-bump/storage/version_storage_file_test.go create mode 100644 vendor/github.com/giantswarm/semver-bump/storage/version_storage_local.go create mode 100644 vendor/github.com/giantswarm/semver-bump/storage/version_storage_local_test.go create mode 100644 vendor/github.com/giantswarm/semver-bump/storage/version_storage_util.go create mode 100644 vendor/github.com/go-ini/ini/.gitignore create mode 100644 vendor/github.com/go-ini/ini/.travis.yml create mode 100644 vendor/github.com/go-ini/ini/LICENSE create mode 100644 vendor/github.com/go-ini/ini/Makefile create mode 100644 vendor/github.com/go-ini/ini/README.md create mode 100644 vendor/github.com/go-ini/ini/README_ZH.md create mode 100644 vendor/github.com/go-ini/ini/error.go create mode 100644 vendor/github.com/go-ini/ini/ini.go create mode 100644 vendor/github.com/go-ini/ini/ini_test.go create mode 100644 vendor/github.com/go-ini/ini/key.go create mode 100644 vendor/github.com/go-ini/ini/key_test.go create mode 100644 vendor/github.com/go-ini/ini/parser.go create mode 100644 vendor/github.com/go-ini/ini/parser_test.go create mode 100644 vendor/github.com/go-ini/ini/section.go create mode 100644 vendor/github.com/go-ini/ini/section_test.go create mode 100644 vendor/github.com/go-ini/ini/struct.go create mode 100644 vendor/github.com/go-ini/ini/struct_test.go create mode 100644 vendor/github.com/go-ini/ini/testdata/UTF-16-BE-BOM.ini create mode 100644 vendor/github.com/go-ini/ini/testdata/UTF-16-LE-BOM.ini create mode 100644 vendor/github.com/go-ini/ini/testdata/UTF-8-BOM.ini create mode 100644 vendor/github.com/go-ini/ini/testdata/aicc.ini create mode 100644 vendor/github.com/go-ini/ini/testdata/conf.ini create mode 100644 vendor/github.com/go-resty/resty/.gitignore create mode 100644 vendor/github.com/go-resty/resty/.travis.yml create mode 100644 vendor/github.com/go-resty/resty/LICENSE create mode 100644 vendor/github.com/go-resty/resty/README.md create mode 100644 vendor/github.com/go-resty/resty/client.go create mode 100644 vendor/github.com/go-resty/resty/client_test.go create mode 100644 vendor/github.com/go-resty/resty/context17_test.go create mode 100644 vendor/github.com/go-resty/resty/context18_test.go create mode 100644 vendor/github.com/go-resty/resty/context_test.go create mode 100644 vendor/github.com/go-resty/resty/default.go create mode 100644 vendor/github.com/go-resty/resty/example_test.go create mode 100644 vendor/github.com/go-resty/resty/middleware.go create mode 100644 vendor/github.com/go-resty/resty/redirect.go create mode 100644 vendor/github.com/go-resty/resty/request.go create mode 100644 vendor/github.com/go-resty/resty/request16.go create mode 100644 vendor/github.com/go-resty/resty/request17.go create mode 100644 vendor/github.com/go-resty/resty/response.go create mode 100644 vendor/github.com/go-resty/resty/resty.go create mode 100644 vendor/github.com/go-resty/resty/resty_test.go create mode 100644 vendor/github.com/go-resty/resty/retry.go create mode 100644 vendor/github.com/go-resty/resty/retry_test.go create mode 100644 vendor/github.com/go-resty/resty/test-data/test-img.png create mode 100644 vendor/github.com/go-resty/resty/test-data/text-file.txt create mode 100644 vendor/github.com/jmespath/go-jmespath/.gitignore create mode 100644 vendor/github.com/jmespath/go-jmespath/.travis.yml create mode 100644 vendor/github.com/jmespath/go-jmespath/LICENSE create mode 100644 vendor/github.com/jmespath/go-jmespath/Makefile create mode 100644 vendor/github.com/jmespath/go-jmespath/README.md create mode 100644 vendor/github.com/jmespath/go-jmespath/api.go create mode 100644 vendor/github.com/jmespath/go-jmespath/api_test.go create mode 100644 vendor/github.com/jmespath/go-jmespath/astnodetype_string.go create mode 100644 vendor/github.com/jmespath/go-jmespath/cmd/jpgo/main.go create mode 100644 vendor/github.com/jmespath/go-jmespath/compliance/basic.json create mode 100644 vendor/github.com/jmespath/go-jmespath/compliance/boolean.json create mode 100644 vendor/github.com/jmespath/go-jmespath/compliance/current.json create mode 100644 vendor/github.com/jmespath/go-jmespath/compliance/escape.json create mode 100644 vendor/github.com/jmespath/go-jmespath/compliance/filters.json create mode 100644 vendor/github.com/jmespath/go-jmespath/compliance/functions.json create mode 100644 vendor/github.com/jmespath/go-jmespath/compliance/identifiers.json create mode 100644 vendor/github.com/jmespath/go-jmespath/compliance/indices.json create mode 100644 vendor/github.com/jmespath/go-jmespath/compliance/literal.json create mode 100644 vendor/github.com/jmespath/go-jmespath/compliance/multiselect.json create mode 100644 vendor/github.com/jmespath/go-jmespath/compliance/ormatch.json create mode 100644 vendor/github.com/jmespath/go-jmespath/compliance/pipe.json create mode 100644 vendor/github.com/jmespath/go-jmespath/compliance/slice.json create mode 100644 vendor/github.com/jmespath/go-jmespath/compliance/syntax.json create mode 100644 vendor/github.com/jmespath/go-jmespath/compliance/unicode.json create mode 100644 vendor/github.com/jmespath/go-jmespath/compliance/wildcard.json create mode 100644 vendor/github.com/jmespath/go-jmespath/compliance_test.go create mode 100644 vendor/github.com/jmespath/go-jmespath/functions.go create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/jmespath.go create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-1 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-10 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-100 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-101 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-102 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-103 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-104 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-105 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-106 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-107 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-108 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-109 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-110 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-112 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-115 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-118 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-119 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-12 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-120 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-121 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-122 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-123 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-126 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-128 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-129 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-13 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-130 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-131 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-132 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-133 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-134 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-135 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-136 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-137 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-138 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-139 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-14 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-140 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-141 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-142 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-143 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-144 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-145 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-146 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-147 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-148 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-149 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-15 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-150 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-151 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-152 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-153 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-155 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-156 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-157 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-158 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-159 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-16 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-160 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-161 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-162 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-163 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-164 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-165 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-166 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-167 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-168 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-169 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-17 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-170 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-171 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-172 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-173 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-174 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-175 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-178 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-179 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-18 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-180 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-181 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-182 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-183 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-184 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-185 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-186 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-187 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-188 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-189 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-19 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-190 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-191 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-192 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-193 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-194 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-195 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-196 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-198 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-199 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-2 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-20 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-200 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-201 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-202 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-203 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-204 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-205 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-206 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-207 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-208 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-209 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-21 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-210 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-211 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-212 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-213 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-214 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-215 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-216 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-217 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-218 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-219 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-22 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-220 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-221 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-222 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-223 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-224 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-225 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-226 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-227 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-228 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-229 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-23 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-230 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-231 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-232 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-233 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-234 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-235 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-236 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-237 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-238 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-239 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-24 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-240 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-241 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-242 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-243 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-244 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-245 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-246 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-247 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-248 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-249 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-25 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-250 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-251 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-252 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-253 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-254 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-255 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-256 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-257 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-258 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-259 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-26 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-260 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-261 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-262 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-263 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-264 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-265 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-266 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-267 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-268 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-269 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-27 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-270 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-271 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-272 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-273 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-274 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-275 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-276 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-277 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-278 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-279 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-28 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-280 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-281 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-282 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-283 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-284 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-285 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-286 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-287 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-288 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-289 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-29 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-290 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-291 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-292 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-293 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-294 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-295 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-296 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-297 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-298 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-299 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-3 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-30 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-300 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-301 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-302 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-303 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-304 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-305 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-306 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-307 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-308 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-309 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-31 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-310 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-311 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-312 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-313 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-314 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-315 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-316 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-317 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-318 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-319 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-32 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-320 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-321 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-322 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-323 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-324 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-325 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-326 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-327 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-328 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-329 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-33 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-330 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-331 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-332 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-333 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-334 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-335 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-336 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-337 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-338 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-339 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-34 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-340 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-341 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-342 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-343 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-344 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-345 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-346 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-347 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-348 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-349 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-35 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-350 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-351 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-352 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-353 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-354 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-355 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-356 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-357 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-358 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-359 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-36 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-360 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-361 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-362 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-363 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-364 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-365 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-366 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-367 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-368 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-369 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-37 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-370 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-371 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-372 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-373 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-374 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-375 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-376 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-377 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-378 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-379 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-38 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-380 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-381 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-382 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-383 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-384 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-385 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-386 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-387 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-388 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-389 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-39 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-390 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-391 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-392 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-393 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-394 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-395 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-396 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-397 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-398 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-399 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-4 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-40 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-400 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-401 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-402 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-403 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-404 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-405 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-406 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-407 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-408 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-409 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-41 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-410 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-411 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-412 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-413 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-414 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-415 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-416 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-417 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-418 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-419 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-42 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-420 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-421 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-422 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-423 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-424 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-425 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-426 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-427 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-428 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-429 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-43 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-430 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-431 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-432 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-433 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-434 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-435 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-436 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-437 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-438 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-439 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-44 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-440 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-441 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-442 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-443 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-444 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-445 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-446 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-447 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-448 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-449 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-45 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-450 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-451 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-452 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-453 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-454 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-455 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-456 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-457 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-458 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-459 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-46 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-460 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-461 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-462 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-463 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-464 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-465 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-466 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-467 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-468 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-469 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-47 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-470 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-471 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-472 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-473 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-474 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-475 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-476 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-477 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-478 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-479 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-48 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-480 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-481 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-482 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-483 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-484 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-485 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-486 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-487 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-488 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-489 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-49 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-490 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-491 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-492 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-493 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-494 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-495 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-496 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-497 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-498 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-499 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-5 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-50 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-500 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-501 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-502 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-503 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-504 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-505 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-506 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-507 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-508 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-509 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-51 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-510 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-511 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-512 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-513 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-514 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-515 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-516 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-517 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-518 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-519 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-52 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-520 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-521 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-522 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-523 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-524 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-525 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-526 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-527 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-528 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-529 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-53 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-530 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-531 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-532 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-533 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-534 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-535 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-536 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-537 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-538 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-539 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-54 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-540 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-541 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-542 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-543 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-544 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-545 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-546 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-547 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-548 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-549 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-55 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-550 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-551 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-552 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-553 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-554 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-555 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-556 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-557 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-558 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-559 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-56 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-560 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-561 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-562 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-563 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-564 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-565 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-566 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-567 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-568 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-569 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-57 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-570 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-571 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-572 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-573 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-574 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-575 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-576 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-577 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-578 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-579 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-58 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-580 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-581 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-582 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-583 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-584 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-585 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-586 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-587 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-588 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-589 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-59 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-590 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-591 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-592 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-593 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-594 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-595 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-596 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-597 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-598 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-599 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-6 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-60 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-600 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-601 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-602 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-603 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-604 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-605 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-606 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-607 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-608 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-609 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-61 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-610 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-611 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-612 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-613 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-614 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-615 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-616 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-617 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-618 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-619 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-62 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-620 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-621 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-622 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-623 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-624 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-625 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-626 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-627 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-628 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-629 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-63 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-630 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-631 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-632 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-633 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-634 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-635 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-636 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-637 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-638 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-639 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-64 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-640 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-641 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-642 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-643 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-644 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-645 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-646 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-647 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-648 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-649 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-65 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-650 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-651 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-652 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-653 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-654 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-655 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-656 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-66 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-67 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-68 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-69 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-7 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-70 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-71 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-72 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-73 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-74 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-75 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-76 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-77 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-78 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-79 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-8 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-80 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-81 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-82 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-83 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-84 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-85 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-86 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-87 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-88 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-89 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-9 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-90 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-91 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-92 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-93 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-94 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-95 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-96 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-97 create mode 100644 vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-98 create mode 100644 vendor/github.com/jmespath/go-jmespath/interpreter.go create mode 100644 vendor/github.com/jmespath/go-jmespath/interpreter_test.go create mode 100644 vendor/github.com/jmespath/go-jmespath/lexer.go create mode 100644 vendor/github.com/jmespath/go-jmespath/lexer_test.go create mode 100644 vendor/github.com/jmespath/go-jmespath/parser.go create mode 100644 vendor/github.com/jmespath/go-jmespath/parser_test.go create mode 100644 vendor/github.com/jmespath/go-jmespath/toktype_string.go create mode 100644 vendor/github.com/jmespath/go-jmespath/util.go create mode 100644 vendor/github.com/jmespath/go-jmespath/util_test.go create mode 100644 vendor/github.com/juju/errgo/LICENSE create mode 100644 vendor/github.com/juju/errgo/README.markdown create mode 100644 vendor/github.com/juju/errgo/errors.go create mode 100644 vendor/github.com/juju/errgo/errors/errors.go create mode 100644 vendor/github.com/juju/errgo/errors/errors_test.go create mode 100644 vendor/github.com/juju/errgo/errors/export_test.go create mode 100644 vendor/github.com/juju/errgo/errors_test.go create mode 100644 vendor/github.com/juju/errgo/export_test.go create mode 100644 vendor/github.com/moby/moby/.dockerignore create mode 100644 vendor/github.com/moby/moby/.github/ISSUE_TEMPLATE.md create mode 100644 vendor/github.com/moby/moby/.github/PULL_REQUEST_TEMPLATE.md create mode 100644 vendor/github.com/moby/moby/.gitignore create mode 100644 vendor/github.com/moby/moby/.mailmap create mode 100644 vendor/github.com/moby/moby/AUTHORS create mode 100644 vendor/github.com/moby/moby/CHANGELOG.md create mode 100644 vendor/github.com/moby/moby/CONTRIBUTING.md create mode 100644 vendor/github.com/moby/moby/Dockerfile create mode 100644 vendor/github.com/moby/moby/Dockerfile.aarch64 create mode 100644 vendor/github.com/moby/moby/Dockerfile.armhf create mode 100644 vendor/github.com/moby/moby/Dockerfile.ppc64le create mode 100644 vendor/github.com/moby/moby/Dockerfile.s390x create mode 100644 vendor/github.com/moby/moby/Dockerfile.simple create mode 100644 vendor/github.com/moby/moby/Dockerfile.solaris create mode 100644 vendor/github.com/moby/moby/Dockerfile.windows create mode 100644 vendor/github.com/moby/moby/LICENSE create mode 100644 vendor/github.com/moby/moby/MAINTAINERS create mode 100644 vendor/github.com/moby/moby/Makefile create mode 100644 vendor/github.com/moby/moby/NOTICE create mode 100644 vendor/github.com/moby/moby/README.md create mode 100644 vendor/github.com/moby/moby/ROADMAP.md create mode 100644 vendor/github.com/moby/moby/VENDORING.md create mode 100644 vendor/github.com/moby/moby/VERSION create mode 100644 vendor/github.com/moby/moby/api/README.md create mode 100644 vendor/github.com/moby/moby/api/common.go create mode 100644 vendor/github.com/moby/moby/api/common_test.go create mode 100644 vendor/github.com/moby/moby/api/common_unix.go create mode 100644 vendor/github.com/moby/moby/api/common_windows.go create mode 100644 vendor/github.com/moby/moby/api/errors/errors.go create mode 100644 vendor/github.com/moby/moby/api/errors/errors_test.go create mode 100644 vendor/github.com/moby/moby/api/fixtures/keyfile create mode 100644 vendor/github.com/moby/moby/api/names.go create mode 100644 vendor/github.com/moby/moby/api/server/backend/build/backend.go create mode 100644 vendor/github.com/moby/moby/api/server/backend/build/tag.go create mode 100644 vendor/github.com/moby/moby/api/server/httputils/decoder.go create mode 100644 vendor/github.com/moby/moby/api/server/httputils/errors.go create mode 100644 vendor/github.com/moby/moby/api/server/httputils/form.go create mode 100644 vendor/github.com/moby/moby/api/server/httputils/form_test.go create mode 100644 vendor/github.com/moby/moby/api/server/httputils/httputils.go create mode 100644 vendor/github.com/moby/moby/api/server/httputils/httputils_test.go create mode 100644 vendor/github.com/moby/moby/api/server/httputils/httputils_write_json.go create mode 100644 vendor/github.com/moby/moby/api/server/httputils/write_log_stream.go create mode 100644 vendor/github.com/moby/moby/api/server/middleware.go create mode 100644 vendor/github.com/moby/moby/api/server/middleware/cors.go create mode 100644 vendor/github.com/moby/moby/api/server/middleware/debug.go create mode 100644 vendor/github.com/moby/moby/api/server/middleware/debug_test.go create mode 100644 vendor/github.com/moby/moby/api/server/middleware/experimental.go create mode 100644 vendor/github.com/moby/moby/api/server/middleware/middleware.go create mode 100644 vendor/github.com/moby/moby/api/server/middleware/version.go create mode 100644 vendor/github.com/moby/moby/api/server/middleware/version_test.go create mode 100644 vendor/github.com/moby/moby/api/server/router/build/backend.go create mode 100644 vendor/github.com/moby/moby/api/server/router/build/build.go create mode 100644 vendor/github.com/moby/moby/api/server/router/build/build_routes.go create mode 100644 vendor/github.com/moby/moby/api/server/router/checkpoint/backend.go create mode 100644 vendor/github.com/moby/moby/api/server/router/checkpoint/checkpoint.go create mode 100644 vendor/github.com/moby/moby/api/server/router/checkpoint/checkpoint_routes.go create mode 100644 vendor/github.com/moby/moby/api/server/router/container/backend.go create mode 100644 vendor/github.com/moby/moby/api/server/router/container/container.go create mode 100644 vendor/github.com/moby/moby/api/server/router/container/container_routes.go create mode 100644 vendor/github.com/moby/moby/api/server/router/container/copy.go create mode 100644 vendor/github.com/moby/moby/api/server/router/container/exec.go create mode 100644 vendor/github.com/moby/moby/api/server/router/container/inspect.go create mode 100644 vendor/github.com/moby/moby/api/server/router/debug/debug.go create mode 100644 vendor/github.com/moby/moby/api/server/router/debug/debug_routes.go create mode 100644 vendor/github.com/moby/moby/api/server/router/distribution/backend.go create mode 100644 vendor/github.com/moby/moby/api/server/router/distribution/distribution.go create mode 100644 vendor/github.com/moby/moby/api/server/router/distribution/distribution_routes.go create mode 100644 vendor/github.com/moby/moby/api/server/router/experimental.go create mode 100644 vendor/github.com/moby/moby/api/server/router/image/backend.go create mode 100644 vendor/github.com/moby/moby/api/server/router/image/image.go create mode 100644 vendor/github.com/moby/moby/api/server/router/image/image_routes.go create mode 100644 vendor/github.com/moby/moby/api/server/router/local.go create mode 100644 vendor/github.com/moby/moby/api/server/router/network/backend.go create mode 100644 vendor/github.com/moby/moby/api/server/router/network/filter.go create mode 100644 vendor/github.com/moby/moby/api/server/router/network/filter_test.go create mode 100644 vendor/github.com/moby/moby/api/server/router/network/network.go create mode 100644 vendor/github.com/moby/moby/api/server/router/network/network_routes.go create mode 100644 vendor/github.com/moby/moby/api/server/router/plugin/backend.go create mode 100644 vendor/github.com/moby/moby/api/server/router/plugin/plugin.go create mode 100644 vendor/github.com/moby/moby/api/server/router/plugin/plugin_routes.go create mode 100644 vendor/github.com/moby/moby/api/server/router/router.go create mode 100644 vendor/github.com/moby/moby/api/server/router/session/backend.go create mode 100644 vendor/github.com/moby/moby/api/server/router/session/session.go create mode 100644 vendor/github.com/moby/moby/api/server/router/session/session_routes.go create mode 100644 vendor/github.com/moby/moby/api/server/router/swarm/backend.go create mode 100644 vendor/github.com/moby/moby/api/server/router/swarm/cluster.go create mode 100644 vendor/github.com/moby/moby/api/server/router/swarm/cluster_routes.go create mode 100644 vendor/github.com/moby/moby/api/server/router/swarm/helpers.go create mode 100644 vendor/github.com/moby/moby/api/server/router/system/backend.go create mode 100644 vendor/github.com/moby/moby/api/server/router/system/system.go create mode 100644 vendor/github.com/moby/moby/api/server/router/system/system_routes.go create mode 100644 vendor/github.com/moby/moby/api/server/router/volume/backend.go create mode 100644 vendor/github.com/moby/moby/api/server/router/volume/volume.go create mode 100644 vendor/github.com/moby/moby/api/server/router/volume/volume_routes.go create mode 100644 vendor/github.com/moby/moby/api/server/router_swapper.go create mode 100644 vendor/github.com/moby/moby/api/server/server.go create mode 100644 vendor/github.com/moby/moby/api/server/server_test.go create mode 100644 vendor/github.com/moby/moby/api/swagger-gen.yaml create mode 100644 vendor/github.com/moby/moby/api/swagger.yaml create mode 100644 vendor/github.com/moby/moby/api/templates/server/operation.gotmpl create mode 100644 vendor/github.com/moby/moby/api/types/auth.go create mode 100644 vendor/github.com/moby/moby/api/types/backend/backend.go create mode 100644 vendor/github.com/moby/moby/api/types/backend/build.go create mode 100644 vendor/github.com/moby/moby/api/types/blkiodev/blkio.go create mode 100644 vendor/github.com/moby/moby/api/types/client.go create mode 100644 vendor/github.com/moby/moby/api/types/configs.go create mode 100644 vendor/github.com/moby/moby/api/types/container/config.go create mode 100644 vendor/github.com/moby/moby/api/types/container/container_changes.go create mode 100644 vendor/github.com/moby/moby/api/types/container/container_create.go create mode 100644 vendor/github.com/moby/moby/api/types/container/container_top.go create mode 100644 vendor/github.com/moby/moby/api/types/container/container_update.go create mode 100644 vendor/github.com/moby/moby/api/types/container/container_wait.go create mode 100644 vendor/github.com/moby/moby/api/types/container/host_config.go create mode 100644 vendor/github.com/moby/moby/api/types/container/hostconfig_unix.go create mode 100644 vendor/github.com/moby/moby/api/types/container/hostconfig_windows.go create mode 100644 vendor/github.com/moby/moby/api/types/container/waitcondition.go create mode 100644 vendor/github.com/moby/moby/api/types/error_response.go create mode 100644 vendor/github.com/moby/moby/api/types/events/events.go create mode 100644 vendor/github.com/moby/moby/api/types/filters/parse.go create mode 100644 vendor/github.com/moby/moby/api/types/filters/parse_test.go create mode 100644 vendor/github.com/moby/moby/api/types/graph_driver_data.go create mode 100644 vendor/github.com/moby/moby/api/types/id_response.go create mode 100644 vendor/github.com/moby/moby/api/types/image/image_history.go create mode 100644 vendor/github.com/moby/moby/api/types/image_delete_response_item.go create mode 100644 vendor/github.com/moby/moby/api/types/image_summary.go create mode 100644 vendor/github.com/moby/moby/api/types/mount/mount.go create mode 100644 vendor/github.com/moby/moby/api/types/network/network.go create mode 100644 vendor/github.com/moby/moby/api/types/plugin.go create mode 100644 vendor/github.com/moby/moby/api/types/plugin_device.go create mode 100644 vendor/github.com/moby/moby/api/types/plugin_env.go create mode 100644 vendor/github.com/moby/moby/api/types/plugin_interface_type.go create mode 100644 vendor/github.com/moby/moby/api/types/plugin_mount.go create mode 100644 vendor/github.com/moby/moby/api/types/plugin_responses.go create mode 100644 vendor/github.com/moby/moby/api/types/plugins/logdriver/entry.pb.go create mode 100644 vendor/github.com/moby/moby/api/types/plugins/logdriver/entry.proto create mode 100644 vendor/github.com/moby/moby/api/types/plugins/logdriver/gen.go create mode 100644 vendor/github.com/moby/moby/api/types/plugins/logdriver/io.go create mode 100644 vendor/github.com/moby/moby/api/types/port.go create mode 100644 vendor/github.com/moby/moby/api/types/registry/authenticate.go create mode 100644 vendor/github.com/moby/moby/api/types/registry/registry.go create mode 100644 vendor/github.com/moby/moby/api/types/seccomp.go create mode 100644 vendor/github.com/moby/moby/api/types/service_update_response.go create mode 100644 vendor/github.com/moby/moby/api/types/stats.go create mode 100644 vendor/github.com/moby/moby/api/types/strslice/strslice.go create mode 100644 vendor/github.com/moby/moby/api/types/strslice/strslice_test.go create mode 100644 vendor/github.com/moby/moby/api/types/swarm/common.go create mode 100644 vendor/github.com/moby/moby/api/types/swarm/config.go create mode 100644 vendor/github.com/moby/moby/api/types/swarm/container.go create mode 100644 vendor/github.com/moby/moby/api/types/swarm/network.go create mode 100644 vendor/github.com/moby/moby/api/types/swarm/node.go create mode 100644 vendor/github.com/moby/moby/api/types/swarm/runtime.go create mode 100644 vendor/github.com/moby/moby/api/types/swarm/runtime/gen.go create mode 100644 vendor/github.com/moby/moby/api/types/swarm/runtime/plugin.pb.go create mode 100644 vendor/github.com/moby/moby/api/types/swarm/runtime/plugin.proto create mode 100644 vendor/github.com/moby/moby/api/types/swarm/secret.go create mode 100644 vendor/github.com/moby/moby/api/types/swarm/service.go create mode 100644 vendor/github.com/moby/moby/api/types/swarm/swarm.go create mode 100644 vendor/github.com/moby/moby/api/types/swarm/task.go create mode 100644 vendor/github.com/moby/moby/api/types/time/duration_convert.go create mode 100644 vendor/github.com/moby/moby/api/types/time/duration_convert_test.go create mode 100644 vendor/github.com/moby/moby/api/types/time/timestamp.go create mode 100644 vendor/github.com/moby/moby/api/types/time/timestamp_test.go create mode 100644 vendor/github.com/moby/moby/api/types/types.go create mode 100644 vendor/github.com/moby/moby/api/types/versions/README.md create mode 100644 vendor/github.com/moby/moby/api/types/versions/compare.go create mode 100644 vendor/github.com/moby/moby/api/types/versions/compare_test.go create mode 100644 vendor/github.com/moby/moby/api/types/versions/v1p19/types.go create mode 100644 vendor/github.com/moby/moby/api/types/versions/v1p20/types.go create mode 100644 vendor/github.com/moby/moby/api/types/volume.go create mode 100644 vendor/github.com/moby/moby/api/types/volume/volumes_create.go create mode 100644 vendor/github.com/moby/moby/api/types/volume/volumes_list.go create mode 100644 vendor/github.com/moby/moby/builder/builder.go create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/bflag.go create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/bflag_test.go create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/buildargs.go create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/buildargs_test.go create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/builder.go create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/builder_test.go create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/builder_unix.go create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/builder_windows.go create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/clientsession.go create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/command/command.go create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/containerbackend.go create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/copy.go create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/copy_test.go create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/copy_unix.go create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/copy_windows.go create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/dispatchers.go create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/dispatchers_test.go create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/dispatchers_unix.go create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/dispatchers_unix_test.go create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/dispatchers_windows.go create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/dispatchers_windows_test.go create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/envVarTest create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/evaluator.go create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/evaluator_test.go create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/evaluator_unix.go create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/evaluator_windows.go create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/imagecontext.go create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/imageprobe.go create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/internals.go create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/internals_test.go create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/internals_unix.go create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/internals_windows.go create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/internals_windows_test.go create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/metrics.go create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/mockbackend_test.go create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/parser/dumper/main.go create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/parser/json_test.go create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/parser/line_parsers.go create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/parser/line_parsers_test.go create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/parser/parser.go create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/parser/parser_test.go create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/parser/split_command.go create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/parser/testfile-line/Dockerfile create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles-negative/env_no_value/Dockerfile create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles-negative/shykes-nested-json/Dockerfile create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/ADD-COPY-with-JSON/Dockerfile create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/ADD-COPY-with-JSON/result create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/brimstone-consuldock/Dockerfile create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/brimstone-consuldock/result create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/brimstone-docker-consul/Dockerfile create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/brimstone-docker-consul/result create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/continue-at-eof/Dockerfile create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/continue-at-eof/result create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/continueIndent/Dockerfile create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/continueIndent/result create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/cpuguy83-nagios/Dockerfile create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/cpuguy83-nagios/result create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/docker/Dockerfile create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/docker/result create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/env/Dockerfile create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/env/result create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/escape-after-comment/Dockerfile create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/escape-after-comment/result create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/escape-nonewline/Dockerfile create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/escape-nonewline/result create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/escape/Dockerfile create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/escape/result create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/escapes/Dockerfile create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/escapes/result create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/flags/Dockerfile create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/flags/result create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/health/Dockerfile create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/health/result create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/influxdb/Dockerfile create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/influxdb/result create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/jeztah-invalid-json-json-inside-string-double/Dockerfile create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/jeztah-invalid-json-json-inside-string-double/result create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/jeztah-invalid-json-json-inside-string/Dockerfile create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/jeztah-invalid-json-json-inside-string/result create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/jeztah-invalid-json-single-quotes/Dockerfile create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/jeztah-invalid-json-single-quotes/result create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/jeztah-invalid-json-unterminated-bracket/Dockerfile create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/jeztah-invalid-json-unterminated-bracket/result create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/jeztah-invalid-json-unterminated-string/Dockerfile create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/jeztah-invalid-json-unterminated-string/result create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/json/Dockerfile create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/json/result create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/kartar-entrypoint-oddities/Dockerfile create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/kartar-entrypoint-oddities/result create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/lk4d4-the-edge-case-generator/Dockerfile create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/lk4d4-the-edge-case-generator/result create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/mail/Dockerfile create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/mail/result create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/multiple-volumes/Dockerfile create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/multiple-volumes/result create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/mumble/Dockerfile create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/mumble/result create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/nginx/Dockerfile create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/nginx/result create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/tf2/Dockerfile create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/tf2/result create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/weechat/Dockerfile create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/weechat/result create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/znc/Dockerfile create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/znc/result create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/shell_parser.go create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/shell_parser_test.go create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/support.go create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/support_test.go create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/utils_test.go create mode 100644 vendor/github.com/moby/moby/builder/dockerfile/wordsTest create mode 100644 vendor/github.com/moby/moby/builder/dockerignore/dockerignore.go create mode 100644 vendor/github.com/moby/moby/builder/dockerignore/dockerignore_test.go create mode 100644 vendor/github.com/moby/moby/builder/fscache/fscache.go create mode 100644 vendor/github.com/moby/moby/builder/fscache/fscache_test.go create mode 100644 vendor/github.com/moby/moby/builder/fscache/naivedriver.go create mode 100644 vendor/github.com/moby/moby/builder/remotecontext/archive.go create mode 100644 vendor/github.com/moby/moby/builder/remotecontext/detect.go create mode 100644 vendor/github.com/moby/moby/builder/remotecontext/detect_test.go create mode 100644 vendor/github.com/moby/moby/builder/remotecontext/filehash.go create mode 100644 vendor/github.com/moby/moby/builder/remotecontext/generate.go create mode 100644 vendor/github.com/moby/moby/builder/remotecontext/git.go create mode 100644 vendor/github.com/moby/moby/builder/remotecontext/git/gitutils.go create mode 100644 vendor/github.com/moby/moby/builder/remotecontext/git/gitutils_test.go create mode 100644 vendor/github.com/moby/moby/builder/remotecontext/lazycontext.go create mode 100644 vendor/github.com/moby/moby/builder/remotecontext/mimetype.go create mode 100644 vendor/github.com/moby/moby/builder/remotecontext/mimetype_test.go create mode 100644 vendor/github.com/moby/moby/builder/remotecontext/remote.go create mode 100644 vendor/github.com/moby/moby/builder/remotecontext/remote_test.go create mode 100644 vendor/github.com/moby/moby/builder/remotecontext/tarsum.go create mode 100644 vendor/github.com/moby/moby/builder/remotecontext/tarsum.pb.go create mode 100644 vendor/github.com/moby/moby/builder/remotecontext/tarsum.proto create mode 100644 vendor/github.com/moby/moby/builder/remotecontext/tarsum_test.go create mode 100644 vendor/github.com/moby/moby/builder/remotecontext/utils_test.go create mode 100644 vendor/github.com/moby/moby/cli/cobra.go create mode 100644 vendor/github.com/moby/moby/cli/config/configdir.go create mode 100644 vendor/github.com/moby/moby/cli/debug/debug.go create mode 100644 vendor/github.com/moby/moby/cli/debug/debug_test.go create mode 100644 vendor/github.com/moby/moby/cli/error.go create mode 100644 vendor/github.com/moby/moby/cli/required.go create mode 100644 vendor/github.com/moby/moby/client/README.md create mode 100644 vendor/github.com/moby/moby/client/build_prune.go create mode 100644 vendor/github.com/moby/moby/client/checkpoint_create.go create mode 100644 vendor/github.com/moby/moby/client/checkpoint_create_test.go create mode 100644 vendor/github.com/moby/moby/client/checkpoint_delete.go create mode 100644 vendor/github.com/moby/moby/client/checkpoint_delete_test.go create mode 100644 vendor/github.com/moby/moby/client/checkpoint_list.go create mode 100644 vendor/github.com/moby/moby/client/checkpoint_list_test.go create mode 100644 vendor/github.com/moby/moby/client/client.go create mode 100644 vendor/github.com/moby/moby/client/client_mock_test.go create mode 100644 vendor/github.com/moby/moby/client/client_test.go create mode 100644 vendor/github.com/moby/moby/client/client_unix.go create mode 100644 vendor/github.com/moby/moby/client/client_windows.go create mode 100644 vendor/github.com/moby/moby/client/config_create.go create mode 100644 vendor/github.com/moby/moby/client/config_create_test.go create mode 100644 vendor/github.com/moby/moby/client/config_inspect.go create mode 100644 vendor/github.com/moby/moby/client/config_inspect_test.go create mode 100644 vendor/github.com/moby/moby/client/config_list.go create mode 100644 vendor/github.com/moby/moby/client/config_list_test.go create mode 100644 vendor/github.com/moby/moby/client/config_remove.go create mode 100644 vendor/github.com/moby/moby/client/config_remove_test.go create mode 100644 vendor/github.com/moby/moby/client/config_update.go create mode 100644 vendor/github.com/moby/moby/client/config_update_test.go create mode 100644 vendor/github.com/moby/moby/client/container_attach.go create mode 100644 vendor/github.com/moby/moby/client/container_commit.go create mode 100644 vendor/github.com/moby/moby/client/container_commit_test.go create mode 100644 vendor/github.com/moby/moby/client/container_copy.go create mode 100644 vendor/github.com/moby/moby/client/container_copy_test.go create mode 100644 vendor/github.com/moby/moby/client/container_create.go create mode 100644 vendor/github.com/moby/moby/client/container_create_test.go create mode 100644 vendor/github.com/moby/moby/client/container_diff.go create mode 100644 vendor/github.com/moby/moby/client/container_diff_test.go create mode 100644 vendor/github.com/moby/moby/client/container_exec.go create mode 100644 vendor/github.com/moby/moby/client/container_exec_test.go create mode 100644 vendor/github.com/moby/moby/client/container_export.go create mode 100644 vendor/github.com/moby/moby/client/container_export_test.go create mode 100644 vendor/github.com/moby/moby/client/container_inspect.go create mode 100644 vendor/github.com/moby/moby/client/container_inspect_test.go create mode 100644 vendor/github.com/moby/moby/client/container_kill.go create mode 100644 vendor/github.com/moby/moby/client/container_kill_test.go create mode 100644 vendor/github.com/moby/moby/client/container_list.go create mode 100644 vendor/github.com/moby/moby/client/container_list_test.go create mode 100644 vendor/github.com/moby/moby/client/container_logs.go create mode 100644 vendor/github.com/moby/moby/client/container_logs_test.go create mode 100644 vendor/github.com/moby/moby/client/container_pause.go create mode 100644 vendor/github.com/moby/moby/client/container_pause_test.go create mode 100644 vendor/github.com/moby/moby/client/container_prune.go create mode 100644 vendor/github.com/moby/moby/client/container_prune_test.go create mode 100644 vendor/github.com/moby/moby/client/container_remove.go create mode 100644 vendor/github.com/moby/moby/client/container_remove_test.go create mode 100644 vendor/github.com/moby/moby/client/container_rename.go create mode 100644 vendor/github.com/moby/moby/client/container_rename_test.go create mode 100644 vendor/github.com/moby/moby/client/container_resize.go create mode 100644 vendor/github.com/moby/moby/client/container_resize_test.go create mode 100644 vendor/github.com/moby/moby/client/container_restart.go create mode 100644 vendor/github.com/moby/moby/client/container_restart_test.go create mode 100644 vendor/github.com/moby/moby/client/container_start.go create mode 100644 vendor/github.com/moby/moby/client/container_start_test.go create mode 100644 vendor/github.com/moby/moby/client/container_stats.go create mode 100644 vendor/github.com/moby/moby/client/container_stats_test.go create mode 100644 vendor/github.com/moby/moby/client/container_stop.go create mode 100644 vendor/github.com/moby/moby/client/container_stop_test.go create mode 100644 vendor/github.com/moby/moby/client/container_top.go create mode 100644 vendor/github.com/moby/moby/client/container_top_test.go create mode 100644 vendor/github.com/moby/moby/client/container_unpause.go create mode 100644 vendor/github.com/moby/moby/client/container_unpause_test.go create mode 100644 vendor/github.com/moby/moby/client/container_update.go create mode 100644 vendor/github.com/moby/moby/client/container_update_test.go create mode 100644 vendor/github.com/moby/moby/client/container_wait.go create mode 100644 vendor/github.com/moby/moby/client/container_wait_test.go create mode 100644 vendor/github.com/moby/moby/client/disk_usage.go create mode 100644 vendor/github.com/moby/moby/client/disk_usage_test.go create mode 100644 vendor/github.com/moby/moby/client/distribution_inspect.go create mode 100644 vendor/github.com/moby/moby/client/distribution_inspect_test.go create mode 100644 vendor/github.com/moby/moby/client/errors.go create mode 100644 vendor/github.com/moby/moby/client/events.go create mode 100644 vendor/github.com/moby/moby/client/events_test.go create mode 100644 vendor/github.com/moby/moby/client/hijack.go create mode 100644 vendor/github.com/moby/moby/client/image_build.go create mode 100644 vendor/github.com/moby/moby/client/image_build_test.go create mode 100644 vendor/github.com/moby/moby/client/image_create.go create mode 100644 vendor/github.com/moby/moby/client/image_create_test.go create mode 100644 vendor/github.com/moby/moby/client/image_history.go create mode 100644 vendor/github.com/moby/moby/client/image_history_test.go create mode 100644 vendor/github.com/moby/moby/client/image_import.go create mode 100644 vendor/github.com/moby/moby/client/image_import_test.go create mode 100644 vendor/github.com/moby/moby/client/image_inspect.go create mode 100644 vendor/github.com/moby/moby/client/image_inspect_test.go create mode 100644 vendor/github.com/moby/moby/client/image_list.go create mode 100644 vendor/github.com/moby/moby/client/image_list_test.go create mode 100644 vendor/github.com/moby/moby/client/image_load.go create mode 100644 vendor/github.com/moby/moby/client/image_load_test.go create mode 100644 vendor/github.com/moby/moby/client/image_prune.go create mode 100644 vendor/github.com/moby/moby/client/image_prune_test.go create mode 100644 vendor/github.com/moby/moby/client/image_pull.go create mode 100644 vendor/github.com/moby/moby/client/image_pull_test.go create mode 100644 vendor/github.com/moby/moby/client/image_push.go create mode 100644 vendor/github.com/moby/moby/client/image_push_test.go create mode 100644 vendor/github.com/moby/moby/client/image_remove.go create mode 100644 vendor/github.com/moby/moby/client/image_remove_test.go create mode 100644 vendor/github.com/moby/moby/client/image_save.go create mode 100644 vendor/github.com/moby/moby/client/image_save_test.go create mode 100644 vendor/github.com/moby/moby/client/image_search.go create mode 100644 vendor/github.com/moby/moby/client/image_search_test.go create mode 100644 vendor/github.com/moby/moby/client/image_tag.go create mode 100644 vendor/github.com/moby/moby/client/image_tag_test.go create mode 100644 vendor/github.com/moby/moby/client/info.go create mode 100644 vendor/github.com/moby/moby/client/info_test.go create mode 100644 vendor/github.com/moby/moby/client/interface.go create mode 100644 vendor/github.com/moby/moby/client/interface_experimental.go create mode 100644 vendor/github.com/moby/moby/client/interface_stable.go create mode 100644 vendor/github.com/moby/moby/client/login.go create mode 100644 vendor/github.com/moby/moby/client/network_connect.go create mode 100644 vendor/github.com/moby/moby/client/network_connect_test.go create mode 100644 vendor/github.com/moby/moby/client/network_create.go create mode 100644 vendor/github.com/moby/moby/client/network_create_test.go create mode 100644 vendor/github.com/moby/moby/client/network_disconnect.go create mode 100644 vendor/github.com/moby/moby/client/network_disconnect_test.go create mode 100644 vendor/github.com/moby/moby/client/network_inspect.go create mode 100644 vendor/github.com/moby/moby/client/network_inspect_test.go create mode 100644 vendor/github.com/moby/moby/client/network_list.go create mode 100644 vendor/github.com/moby/moby/client/network_list_test.go create mode 100644 vendor/github.com/moby/moby/client/network_prune.go create mode 100644 vendor/github.com/moby/moby/client/network_prune_test.go create mode 100644 vendor/github.com/moby/moby/client/network_remove.go create mode 100644 vendor/github.com/moby/moby/client/network_remove_test.go create mode 100644 vendor/github.com/moby/moby/client/node_inspect.go create mode 100644 vendor/github.com/moby/moby/client/node_inspect_test.go create mode 100644 vendor/github.com/moby/moby/client/node_list.go create mode 100644 vendor/github.com/moby/moby/client/node_list_test.go create mode 100644 vendor/github.com/moby/moby/client/node_remove.go create mode 100644 vendor/github.com/moby/moby/client/node_remove_test.go create mode 100644 vendor/github.com/moby/moby/client/node_update.go create mode 100644 vendor/github.com/moby/moby/client/node_update_test.go create mode 100644 vendor/github.com/moby/moby/client/parse_logs.go create mode 100644 vendor/github.com/moby/moby/client/parse_logs_test.go create mode 100644 vendor/github.com/moby/moby/client/ping.go create mode 100644 vendor/github.com/moby/moby/client/ping_test.go create mode 100644 vendor/github.com/moby/moby/client/plugin_create.go create mode 100644 vendor/github.com/moby/moby/client/plugin_disable.go create mode 100644 vendor/github.com/moby/moby/client/plugin_disable_test.go create mode 100644 vendor/github.com/moby/moby/client/plugin_enable.go create mode 100644 vendor/github.com/moby/moby/client/plugin_enable_test.go create mode 100644 vendor/github.com/moby/moby/client/plugin_inspect.go create mode 100644 vendor/github.com/moby/moby/client/plugin_inspect_test.go create mode 100644 vendor/github.com/moby/moby/client/plugin_install.go create mode 100644 vendor/github.com/moby/moby/client/plugin_list.go create mode 100644 vendor/github.com/moby/moby/client/plugin_list_test.go create mode 100644 vendor/github.com/moby/moby/client/plugin_push.go create mode 100644 vendor/github.com/moby/moby/client/plugin_push_test.go create mode 100644 vendor/github.com/moby/moby/client/plugin_remove.go create mode 100644 vendor/github.com/moby/moby/client/plugin_remove_test.go create mode 100644 vendor/github.com/moby/moby/client/plugin_set.go create mode 100644 vendor/github.com/moby/moby/client/plugin_set_test.go create mode 100644 vendor/github.com/moby/moby/client/plugin_upgrade.go create mode 100644 vendor/github.com/moby/moby/client/request.go create mode 100644 vendor/github.com/moby/moby/client/request_test.go create mode 100644 vendor/github.com/moby/moby/client/secret_create.go create mode 100644 vendor/github.com/moby/moby/client/secret_create_test.go create mode 100644 vendor/github.com/moby/moby/client/secret_inspect.go create mode 100644 vendor/github.com/moby/moby/client/secret_inspect_test.go create mode 100644 vendor/github.com/moby/moby/client/secret_list.go create mode 100644 vendor/github.com/moby/moby/client/secret_list_test.go create mode 100644 vendor/github.com/moby/moby/client/secret_remove.go create mode 100644 vendor/github.com/moby/moby/client/secret_remove_test.go create mode 100644 vendor/github.com/moby/moby/client/secret_update.go create mode 100644 vendor/github.com/moby/moby/client/secret_update_test.go create mode 100644 vendor/github.com/moby/moby/client/service_create.go create mode 100644 vendor/github.com/moby/moby/client/service_create_test.go create mode 100644 vendor/github.com/moby/moby/client/service_inspect.go create mode 100644 vendor/github.com/moby/moby/client/service_inspect_test.go create mode 100644 vendor/github.com/moby/moby/client/service_list.go create mode 100644 vendor/github.com/moby/moby/client/service_list_test.go create mode 100644 vendor/github.com/moby/moby/client/service_logs.go create mode 100644 vendor/github.com/moby/moby/client/service_logs_test.go create mode 100644 vendor/github.com/moby/moby/client/service_remove.go create mode 100644 vendor/github.com/moby/moby/client/service_remove_test.go create mode 100644 vendor/github.com/moby/moby/client/service_update.go create mode 100644 vendor/github.com/moby/moby/client/service_update_test.go create mode 100644 vendor/github.com/moby/moby/client/session.go create mode 100644 vendor/github.com/moby/moby/client/session/filesync/diffcopy.go create mode 100644 vendor/github.com/moby/moby/client/session/filesync/filesync.go create mode 100644 vendor/github.com/moby/moby/client/session/filesync/filesync.pb.go create mode 100644 vendor/github.com/moby/moby/client/session/filesync/filesync.proto create mode 100644 vendor/github.com/moby/moby/client/session/filesync/filesync_test.go create mode 100644 vendor/github.com/moby/moby/client/session/filesync/generate.go create mode 100644 vendor/github.com/moby/moby/client/session/filesync/tarstream.go create mode 100644 vendor/github.com/moby/moby/client/session/grpc.go create mode 100644 vendor/github.com/moby/moby/client/session/manager.go create mode 100644 vendor/github.com/moby/moby/client/session/session.go create mode 100644 vendor/github.com/moby/moby/client/session/testutil/testutil.go create mode 100644 vendor/github.com/moby/moby/client/swarm_get_unlock_key.go create mode 100644 vendor/github.com/moby/moby/client/swarm_get_unlock_key_test.go create mode 100644 vendor/github.com/moby/moby/client/swarm_init.go create mode 100644 vendor/github.com/moby/moby/client/swarm_init_test.go create mode 100644 vendor/github.com/moby/moby/client/swarm_inspect.go create mode 100644 vendor/github.com/moby/moby/client/swarm_inspect_test.go create mode 100644 vendor/github.com/moby/moby/client/swarm_join.go create mode 100644 vendor/github.com/moby/moby/client/swarm_join_test.go create mode 100644 vendor/github.com/moby/moby/client/swarm_leave.go create mode 100644 vendor/github.com/moby/moby/client/swarm_leave_test.go create mode 100644 vendor/github.com/moby/moby/client/swarm_unlock.go create mode 100644 vendor/github.com/moby/moby/client/swarm_unlock_test.go create mode 100644 vendor/github.com/moby/moby/client/swarm_update.go create mode 100644 vendor/github.com/moby/moby/client/swarm_update_test.go create mode 100644 vendor/github.com/moby/moby/client/task_inspect.go create mode 100644 vendor/github.com/moby/moby/client/task_inspect_test.go create mode 100644 vendor/github.com/moby/moby/client/task_list.go create mode 100644 vendor/github.com/moby/moby/client/task_list_test.go create mode 100644 vendor/github.com/moby/moby/client/task_logs.go create mode 100644 vendor/github.com/moby/moby/client/transport.go create mode 100644 vendor/github.com/moby/moby/client/utils.go create mode 100644 vendor/github.com/moby/moby/client/version.go create mode 100644 vendor/github.com/moby/moby/client/volume_create.go create mode 100644 vendor/github.com/moby/moby/client/volume_create_test.go create mode 100644 vendor/github.com/moby/moby/client/volume_inspect.go create mode 100644 vendor/github.com/moby/moby/client/volume_inspect_test.go create mode 100644 vendor/github.com/moby/moby/client/volume_list.go create mode 100644 vendor/github.com/moby/moby/client/volume_list_test.go create mode 100644 vendor/github.com/moby/moby/client/volume_prune.go create mode 100644 vendor/github.com/moby/moby/client/volume_remove.go create mode 100644 vendor/github.com/moby/moby/client/volume_remove_test.go create mode 100644 vendor/github.com/moby/moby/cmd/dockerd/README.md create mode 100644 vendor/github.com/moby/moby/cmd/dockerd/config.go create mode 100644 vendor/github.com/moby/moby/cmd/dockerd/config_common_unix.go create mode 100644 vendor/github.com/moby/moby/cmd/dockerd/config_experimental.go create mode 100644 vendor/github.com/moby/moby/cmd/dockerd/config_solaris.go create mode 100644 vendor/github.com/moby/moby/cmd/dockerd/config_unix.go create mode 100644 vendor/github.com/moby/moby/cmd/dockerd/config_unix_test.go create mode 100644 vendor/github.com/moby/moby/cmd/dockerd/config_windows.go create mode 100644 vendor/github.com/moby/moby/cmd/dockerd/daemon.go create mode 100644 vendor/github.com/moby/moby/cmd/dockerd/daemon_freebsd.go create mode 100644 vendor/github.com/moby/moby/cmd/dockerd/daemon_linux.go create mode 100644 vendor/github.com/moby/moby/cmd/dockerd/daemon_solaris.go create mode 100644 vendor/github.com/moby/moby/cmd/dockerd/daemon_test.go create mode 100644 vendor/github.com/moby/moby/cmd/dockerd/daemon_unix.go create mode 100644 vendor/github.com/moby/moby/cmd/dockerd/daemon_unix_test.go create mode 100644 vendor/github.com/moby/moby/cmd/dockerd/daemon_windows.go create mode 100644 vendor/github.com/moby/moby/cmd/dockerd/docker.go create mode 100644 vendor/github.com/moby/moby/cmd/dockerd/docker_windows.go create mode 100644 vendor/github.com/moby/moby/cmd/dockerd/hack/malformed_host_override.go create mode 100644 vendor/github.com/moby/moby/cmd/dockerd/hack/malformed_host_override_test.go create mode 100644 vendor/github.com/moby/moby/cmd/dockerd/metrics.go create mode 100644 vendor/github.com/moby/moby/cmd/dockerd/options.go create mode 100644 vendor/github.com/moby/moby/cmd/dockerd/options_test.go create mode 100644 vendor/github.com/moby/moby/cmd/dockerd/service_unsupported.go create mode 100644 vendor/github.com/moby/moby/cmd/dockerd/service_windows.go create mode 100644 vendor/github.com/moby/moby/container/archive.go create mode 100644 vendor/github.com/moby/moby/container/container.go create mode 100644 vendor/github.com/moby/moby/container/container_linux.go create mode 100644 vendor/github.com/moby/moby/container/container_notlinux.go create mode 100644 vendor/github.com/moby/moby/container/container_unit_test.go create mode 100644 vendor/github.com/moby/moby/container/container_unix.go create mode 100644 vendor/github.com/moby/moby/container/container_windows.go create mode 100644 vendor/github.com/moby/moby/container/env.go create mode 100644 vendor/github.com/moby/moby/container/env_test.go create mode 100644 vendor/github.com/moby/moby/container/health.go create mode 100644 vendor/github.com/moby/moby/container/history.go create mode 100644 vendor/github.com/moby/moby/container/memory_store.go create mode 100644 vendor/github.com/moby/moby/container/memory_store_test.go create mode 100644 vendor/github.com/moby/moby/container/monitor.go create mode 100644 vendor/github.com/moby/moby/container/mounts_unix.go create mode 100644 vendor/github.com/moby/moby/container/mounts_windows.go create mode 100644 vendor/github.com/moby/moby/container/state.go create mode 100644 vendor/github.com/moby/moby/container/state_solaris.go create mode 100644 vendor/github.com/moby/moby/container/state_test.go create mode 100644 vendor/github.com/moby/moby/container/state_unix.go create mode 100644 vendor/github.com/moby/moby/container/state_windows.go create mode 100644 vendor/github.com/moby/moby/container/store.go create mode 100644 vendor/github.com/moby/moby/container/stream/attach.go create mode 100644 vendor/github.com/moby/moby/container/stream/streams.go create mode 100644 vendor/github.com/moby/moby/container/view.go create mode 100644 vendor/github.com/moby/moby/container/view_test.go create mode 100644 vendor/github.com/moby/moby/contrib/README.md create mode 100644 vendor/github.com/moby/moby/contrib/REVIEWERS create mode 100644 vendor/github.com/moby/moby/contrib/apparmor/main.go create mode 100644 vendor/github.com/moby/moby/contrib/apparmor/template.go create mode 100755 vendor/github.com/moby/moby/contrib/builder/deb/aarch64/build.sh create mode 100644 vendor/github.com/moby/moby/contrib/builder/deb/aarch64/debian-jessie/Dockerfile create mode 100644 vendor/github.com/moby/moby/contrib/builder/deb/aarch64/debian-stretch/Dockerfile create mode 100755 vendor/github.com/moby/moby/contrib/builder/deb/aarch64/generate.sh create mode 100644 vendor/github.com/moby/moby/contrib/builder/deb/aarch64/ubuntu-trusty/Dockerfile create mode 100644 vendor/github.com/moby/moby/contrib/builder/deb/aarch64/ubuntu-xenial/Dockerfile create mode 100644 vendor/github.com/moby/moby/contrib/builder/deb/amd64/README.md create mode 100755 vendor/github.com/moby/moby/contrib/builder/deb/amd64/build.sh create mode 100644 vendor/github.com/moby/moby/contrib/builder/deb/amd64/debian-jessie/Dockerfile create mode 100644 vendor/github.com/moby/moby/contrib/builder/deb/amd64/debian-stretch/Dockerfile create mode 100644 vendor/github.com/moby/moby/contrib/builder/deb/amd64/debian-wheezy/Dockerfile create mode 100755 vendor/github.com/moby/moby/contrib/builder/deb/amd64/generate.sh create mode 100644 vendor/github.com/moby/moby/contrib/builder/deb/amd64/ubuntu-trusty/Dockerfile create mode 100644 vendor/github.com/moby/moby/contrib/builder/deb/amd64/ubuntu-xenial/Dockerfile create mode 100644 vendor/github.com/moby/moby/contrib/builder/deb/amd64/ubuntu-yakkety/Dockerfile create mode 100644 vendor/github.com/moby/moby/contrib/builder/deb/amd64/ubuntu-zesty/Dockerfile create mode 100644 vendor/github.com/moby/moby/contrib/builder/deb/armhf/debian-jessie/Dockerfile create mode 100755 vendor/github.com/moby/moby/contrib/builder/deb/armhf/generate.sh create mode 100644 vendor/github.com/moby/moby/contrib/builder/deb/armhf/raspbian-jessie/Dockerfile create mode 100644 vendor/github.com/moby/moby/contrib/builder/deb/armhf/ubuntu-trusty/Dockerfile create mode 100644 vendor/github.com/moby/moby/contrib/builder/deb/armhf/ubuntu-xenial/Dockerfile create mode 100644 vendor/github.com/moby/moby/contrib/builder/deb/armhf/ubuntu-yakkety/Dockerfile create mode 100755 vendor/github.com/moby/moby/contrib/builder/deb/ppc64le/build.sh create mode 100755 vendor/github.com/moby/moby/contrib/builder/deb/ppc64le/generate.sh create mode 100644 vendor/github.com/moby/moby/contrib/builder/deb/ppc64le/ubuntu-trusty/Dockerfile create mode 100644 vendor/github.com/moby/moby/contrib/builder/deb/ppc64le/ubuntu-xenial/Dockerfile create mode 100644 vendor/github.com/moby/moby/contrib/builder/deb/ppc64le/ubuntu-yakkety/Dockerfile create mode 100755 vendor/github.com/moby/moby/contrib/builder/deb/s390x/build.sh create mode 100755 vendor/github.com/moby/moby/contrib/builder/deb/s390x/generate.sh create mode 100644 vendor/github.com/moby/moby/contrib/builder/deb/s390x/ubuntu-xenial/Dockerfile create mode 100644 vendor/github.com/moby/moby/contrib/builder/deb/s390x/ubuntu-yakkety/Dockerfile create mode 100644 vendor/github.com/moby/moby/contrib/builder/rpm/amd64/README.md create mode 100644 vendor/github.com/moby/moby/contrib/builder/rpm/amd64/amazonlinux-latest/Dockerfile create mode 100755 vendor/github.com/moby/moby/contrib/builder/rpm/amd64/build.sh create mode 100644 vendor/github.com/moby/moby/contrib/builder/rpm/amd64/centos-7/Dockerfile create mode 100644 vendor/github.com/moby/moby/contrib/builder/rpm/amd64/fedora-24/Dockerfile create mode 100644 vendor/github.com/moby/moby/contrib/builder/rpm/amd64/fedora-25/Dockerfile create mode 100755 vendor/github.com/moby/moby/contrib/builder/rpm/amd64/generate.sh create mode 100644 vendor/github.com/moby/moby/contrib/builder/rpm/amd64/opensuse-13.2/Dockerfile create mode 100644 vendor/github.com/moby/moby/contrib/builder/rpm/amd64/oraclelinux-6/Dockerfile create mode 100644 vendor/github.com/moby/moby/contrib/builder/rpm/amd64/oraclelinux-7/Dockerfile create mode 100644 vendor/github.com/moby/moby/contrib/builder/rpm/amd64/photon-1.0/Dockerfile create mode 100755 vendor/github.com/moby/moby/contrib/builder/rpm/armhf/build.sh create mode 100644 vendor/github.com/moby/moby/contrib/builder/rpm/armhf/centos-7/Dockerfile create mode 100755 vendor/github.com/moby/moby/contrib/builder/rpm/armhf/generate.sh create mode 100755 vendor/github.com/moby/moby/contrib/builder/rpm/ppc64le/build.sh create mode 100644 vendor/github.com/moby/moby/contrib/builder/rpm/ppc64le/centos-7/Dockerfile create mode 100644 vendor/github.com/moby/moby/contrib/builder/rpm/ppc64le/fedora-24/Dockerfile create mode 100755 vendor/github.com/moby/moby/contrib/builder/rpm/ppc64le/generate.sh create mode 100644 vendor/github.com/moby/moby/contrib/builder/rpm/ppc64le/opensuse-42.1/Dockerfile create mode 100755 vendor/github.com/moby/moby/contrib/builder/rpm/s390x/build.sh create mode 100644 vendor/github.com/moby/moby/contrib/builder/rpm/s390x/clefos-base-s390x-7/Dockerfile create mode 100755 vendor/github.com/moby/moby/contrib/builder/rpm/s390x/generate.sh create mode 100644 vendor/github.com/moby/moby/contrib/builder/rpm/s390x/opensuse-tumbleweed-1/Dockerfile create mode 100755 vendor/github.com/moby/moby/contrib/check-config.sh create mode 100644 vendor/github.com/moby/moby/contrib/desktop-integration/README.md create mode 100644 vendor/github.com/moby/moby/contrib/desktop-integration/chromium/Dockerfile create mode 100644 vendor/github.com/moby/moby/contrib/desktop-integration/gparted/Dockerfile create mode 100644 vendor/github.com/moby/moby/contrib/docker-device-tool/README.md create mode 100644 vendor/github.com/moby/moby/contrib/docker-device-tool/device_tool.go create mode 100644 vendor/github.com/moby/moby/contrib/docker-device-tool/device_tool_windows.go create mode 100755 vendor/github.com/moby/moby/contrib/docker-machine-install-bundle.sh create mode 100755 vendor/github.com/moby/moby/contrib/dockerize-disk.sh create mode 100755 vendor/github.com/moby/moby/contrib/download-frozen-image-v1.sh create mode 100755 vendor/github.com/moby/moby/contrib/download-frozen-image-v2.sh create mode 100644 vendor/github.com/moby/moby/contrib/editorconfig create mode 100644 vendor/github.com/moby/moby/contrib/gitdm/aliases create mode 100644 vendor/github.com/moby/moby/contrib/gitdm/domain-map create mode 100755 vendor/github.com/moby/moby/contrib/gitdm/generate_aliases.sh create mode 100644 vendor/github.com/moby/moby/contrib/gitdm/gitdm.config create mode 100644 vendor/github.com/moby/moby/contrib/httpserver/Dockerfile create mode 100644 vendor/github.com/moby/moby/contrib/httpserver/Dockerfile.solaris create mode 100644 vendor/github.com/moby/moby/contrib/httpserver/server.go create mode 100644 vendor/github.com/moby/moby/contrib/init/openrc/docker.confd create mode 100644 vendor/github.com/moby/moby/contrib/init/openrc/docker.initd create mode 100644 vendor/github.com/moby/moby/contrib/init/systemd/REVIEWERS create mode 100644 vendor/github.com/moby/moby/contrib/init/systemd/docker.service create mode 100644 vendor/github.com/moby/moby/contrib/init/systemd/docker.service.rpm create mode 100644 vendor/github.com/moby/moby/contrib/init/systemd/docker.socket create mode 100755 vendor/github.com/moby/moby/contrib/init/sysvinit-debian/docker create mode 100644 vendor/github.com/moby/moby/contrib/init/sysvinit-debian/docker.default create mode 100755 vendor/github.com/moby/moby/contrib/init/sysvinit-redhat/docker create mode 100644 vendor/github.com/moby/moby/contrib/init/sysvinit-redhat/docker.sysconfig create mode 100644 vendor/github.com/moby/moby/contrib/init/upstart/REVIEWERS create mode 100644 vendor/github.com/moby/moby/contrib/init/upstart/docker.conf create mode 100755 vendor/github.com/moby/moby/contrib/mac-install-bundle.sh create mode 100755 vendor/github.com/moby/moby/contrib/mkimage-alpine.sh create mode 100644 vendor/github.com/moby/moby/contrib/mkimage-arch-pacman.conf create mode 100755 vendor/github.com/moby/moby/contrib/mkimage-arch.sh create mode 100644 vendor/github.com/moby/moby/contrib/mkimage-archarm-pacman.conf create mode 100755 vendor/github.com/moby/moby/contrib/mkimage-crux.sh create mode 100755 vendor/github.com/moby/moby/contrib/mkimage-pld.sh create mode 100755 vendor/github.com/moby/moby/contrib/mkimage-yum.sh create mode 100755 vendor/github.com/moby/moby/contrib/mkimage.sh create mode 100755 vendor/github.com/moby/moby/contrib/mkimage/.febootstrap-minimize create mode 100755 vendor/github.com/moby/moby/contrib/mkimage/busybox-static create mode 100755 vendor/github.com/moby/moby/contrib/mkimage/debootstrap create mode 100755 vendor/github.com/moby/moby/contrib/mkimage/mageia-urpmi create mode 100755 vendor/github.com/moby/moby/contrib/mkimage/rinse create mode 100755 vendor/github.com/moby/moby/contrib/mkimage/solaris create mode 100644 vendor/github.com/moby/moby/contrib/nnp-test/Dockerfile create mode 100644 vendor/github.com/moby/moby/contrib/nnp-test/nnp-test.c create mode 100755 vendor/github.com/moby/moby/contrib/nuke-graph-directory.sh create mode 100755 vendor/github.com/moby/moby/contrib/project-stats.sh create mode 100755 vendor/github.com/moby/moby/contrib/report-issue.sh create mode 100755 vendor/github.com/moby/moby/contrib/reprepro/suites.sh create mode 100644 vendor/github.com/moby/moby/contrib/selinux-fedora-24/docker-engine-selinux/LICENSE create mode 100644 vendor/github.com/moby/moby/contrib/selinux-fedora-24/docker-engine-selinux/Makefile create mode 100644 vendor/github.com/moby/moby/contrib/selinux-fedora-24/docker-engine-selinux/README.md create mode 100644 vendor/github.com/moby/moby/contrib/selinux-fedora-24/docker-engine-selinux/docker.fc create mode 100644 vendor/github.com/moby/moby/contrib/selinux-fedora-24/docker-engine-selinux/docker.if create mode 100644 vendor/github.com/moby/moby/contrib/selinux-fedora-24/docker-engine-selinux/docker.te create mode 100644 vendor/github.com/moby/moby/contrib/selinux-oraclelinux-7/docker-engine-selinux/LICENSE create mode 100644 vendor/github.com/moby/moby/contrib/selinux-oraclelinux-7/docker-engine-selinux/Makefile create mode 100644 vendor/github.com/moby/moby/contrib/selinux-oraclelinux-7/docker-engine-selinux/README.md create mode 100644 vendor/github.com/moby/moby/contrib/selinux-oraclelinux-7/docker-engine-selinux/docker.fc create mode 100644 vendor/github.com/moby/moby/contrib/selinux-oraclelinux-7/docker-engine-selinux/docker.if create mode 100644 vendor/github.com/moby/moby/contrib/selinux-oraclelinux-7/docker-engine-selinux/docker.te create mode 100644 vendor/github.com/moby/moby/contrib/selinux-oraclelinux-7/docker-engine-selinux/docker_selinux.8.gz create mode 100644 vendor/github.com/moby/moby/contrib/syntax/nano/Dockerfile.nanorc create mode 100644 vendor/github.com/moby/moby/contrib/syntax/nano/README.md create mode 100644 vendor/github.com/moby/moby/contrib/syntax/textmate/Docker.tmbundle/Preferences/Dockerfile.tmPreferences create mode 100644 vendor/github.com/moby/moby/contrib/syntax/textmate/Docker.tmbundle/Syntaxes/Dockerfile.tmLanguage create mode 100644 vendor/github.com/moby/moby/contrib/syntax/textmate/Docker.tmbundle/info.plist create mode 100644 vendor/github.com/moby/moby/contrib/syntax/textmate/README.md create mode 100644 vendor/github.com/moby/moby/contrib/syntax/textmate/REVIEWERS create mode 100644 vendor/github.com/moby/moby/contrib/syntax/vim/LICENSE create mode 100644 vendor/github.com/moby/moby/contrib/syntax/vim/README.md create mode 100644 vendor/github.com/moby/moby/contrib/syntax/vim/doc/dockerfile.txt create mode 100644 vendor/github.com/moby/moby/contrib/syntax/vim/ftdetect/dockerfile.vim create mode 100644 vendor/github.com/moby/moby/contrib/syntax/vim/syntax/dockerfile.vim create mode 100644 vendor/github.com/moby/moby/contrib/syscall-test/Dockerfile create mode 100644 vendor/github.com/moby/moby/contrib/syscall-test/acct.c create mode 100644 vendor/github.com/moby/moby/contrib/syscall-test/exit32.s create mode 100644 vendor/github.com/moby/moby/contrib/syscall-test/ns.c create mode 100644 vendor/github.com/moby/moby/contrib/syscall-test/raw.c create mode 100644 vendor/github.com/moby/moby/contrib/syscall-test/setgid.c create mode 100644 vendor/github.com/moby/moby/contrib/syscall-test/setuid.c create mode 100644 vendor/github.com/moby/moby/contrib/syscall-test/socket.c create mode 100644 vendor/github.com/moby/moby/contrib/syscall-test/userns.c create mode 100644 vendor/github.com/moby/moby/contrib/udev/80-docker.rules create mode 100644 vendor/github.com/moby/moby/contrib/vagrant-docker/README.md create mode 100644 vendor/github.com/moby/moby/daemon/apparmor_default.go create mode 100644 vendor/github.com/moby/moby/daemon/apparmor_default_unsupported.go create mode 100644 vendor/github.com/moby/moby/daemon/archive.go create mode 100644 vendor/github.com/moby/moby/daemon/archive_tarcopyoptions.go create mode 100644 vendor/github.com/moby/moby/daemon/archive_tarcopyoptions_unix.go create mode 100644 vendor/github.com/moby/moby/daemon/archive_tarcopyoptions_windows.go create mode 100644 vendor/github.com/moby/moby/daemon/archive_unix.go create mode 100644 vendor/github.com/moby/moby/daemon/archive_windows.go create mode 100644 vendor/github.com/moby/moby/daemon/attach.go create mode 100644 vendor/github.com/moby/moby/daemon/auth.go create mode 100644 vendor/github.com/moby/moby/daemon/bindmount_solaris.go create mode 100644 vendor/github.com/moby/moby/daemon/bindmount_unix.go create mode 100644 vendor/github.com/moby/moby/daemon/build.go create mode 100644 vendor/github.com/moby/moby/daemon/cache.go create mode 100644 vendor/github.com/moby/moby/daemon/caps/utils_unix.go create mode 100644 vendor/github.com/moby/moby/daemon/changes.go create mode 100644 vendor/github.com/moby/moby/daemon/checkpoint.go create mode 100644 vendor/github.com/moby/moby/daemon/cluster.go create mode 100644 vendor/github.com/moby/moby/daemon/cluster/cluster.go create mode 100644 vendor/github.com/moby/moby/daemon/cluster/configs.go create mode 100644 vendor/github.com/moby/moby/daemon/cluster/controllers/plugin/controller.go create mode 100644 vendor/github.com/moby/moby/daemon/cluster/controllers/plugin/controller_test.go create mode 100644 vendor/github.com/moby/moby/daemon/cluster/convert/config.go create mode 100644 vendor/github.com/moby/moby/daemon/cluster/convert/container.go create mode 100644 vendor/github.com/moby/moby/daemon/cluster/convert/network.go create mode 100644 vendor/github.com/moby/moby/daemon/cluster/convert/node.go create mode 100644 vendor/github.com/moby/moby/daemon/cluster/convert/secret.go create mode 100644 vendor/github.com/moby/moby/daemon/cluster/convert/service.go create mode 100644 vendor/github.com/moby/moby/daemon/cluster/convert/service_test.go create mode 100644 vendor/github.com/moby/moby/daemon/cluster/convert/swarm.go create mode 100644 vendor/github.com/moby/moby/daemon/cluster/convert/task.go create mode 100644 vendor/github.com/moby/moby/daemon/cluster/executor/backend.go create mode 100644 vendor/github.com/moby/moby/daemon/cluster/executor/container/adapter.go create mode 100644 vendor/github.com/moby/moby/daemon/cluster/executor/container/attachment.go create mode 100644 vendor/github.com/moby/moby/daemon/cluster/executor/container/container.go create mode 100644 vendor/github.com/moby/moby/daemon/cluster/executor/container/controller.go create mode 100644 vendor/github.com/moby/moby/daemon/cluster/executor/container/errors.go create mode 100644 vendor/github.com/moby/moby/daemon/cluster/executor/container/executor.go create mode 100644 vendor/github.com/moby/moby/daemon/cluster/executor/container/health_test.go create mode 100644 vendor/github.com/moby/moby/daemon/cluster/executor/container/validate.go create mode 100644 vendor/github.com/moby/moby/daemon/cluster/executor/container/validate_test.go create mode 100644 vendor/github.com/moby/moby/daemon/cluster/executor/container/validate_unix_test.go create mode 100644 vendor/github.com/moby/moby/daemon/cluster/executor/container/validate_windows_test.go create mode 100644 vendor/github.com/moby/moby/daemon/cluster/filters.go create mode 100644 vendor/github.com/moby/moby/daemon/cluster/filters_test.go create mode 100644 vendor/github.com/moby/moby/daemon/cluster/helpers.go create mode 100644 vendor/github.com/moby/moby/daemon/cluster/listen_addr.go create mode 100644 vendor/github.com/moby/moby/daemon/cluster/listen_addr_linux.go create mode 100644 vendor/github.com/moby/moby/daemon/cluster/listen_addr_others.go create mode 100644 vendor/github.com/moby/moby/daemon/cluster/listen_addr_solaris.go create mode 100644 vendor/github.com/moby/moby/daemon/cluster/networks.go create mode 100644 vendor/github.com/moby/moby/daemon/cluster/noderunner.go create mode 100644 vendor/github.com/moby/moby/daemon/cluster/nodes.go create mode 100644 vendor/github.com/moby/moby/daemon/cluster/provider/network.go create mode 100644 vendor/github.com/moby/moby/daemon/cluster/secrets.go create mode 100644 vendor/github.com/moby/moby/daemon/cluster/services.go create mode 100644 vendor/github.com/moby/moby/daemon/cluster/swarm.go create mode 100644 vendor/github.com/moby/moby/daemon/cluster/tasks.go create mode 100644 vendor/github.com/moby/moby/daemon/cluster/utils.go create mode 100644 vendor/github.com/moby/moby/daemon/commit.go create mode 100644 vendor/github.com/moby/moby/daemon/config/config.go create mode 100644 vendor/github.com/moby/moby/daemon/config/config_common_unix.go create mode 100644 vendor/github.com/moby/moby/daemon/config/config_common_unix_test.go create mode 100644 vendor/github.com/moby/moby/daemon/config/config_solaris.go create mode 100644 vendor/github.com/moby/moby/daemon/config/config_test.go create mode 100644 vendor/github.com/moby/moby/daemon/config/config_unix.go create mode 100644 vendor/github.com/moby/moby/daemon/config/config_unix_test.go create mode 100644 vendor/github.com/moby/moby/daemon/config/config_windows.go create mode 100644 vendor/github.com/moby/moby/daemon/config/config_windows_test.go create mode 100644 vendor/github.com/moby/moby/daemon/configs.go create mode 100644 vendor/github.com/moby/moby/daemon/configs_linux.go create mode 100644 vendor/github.com/moby/moby/daemon/configs_unsupported.go create mode 100644 vendor/github.com/moby/moby/daemon/configs_windows.go create mode 100644 vendor/github.com/moby/moby/daemon/container.go create mode 100644 vendor/github.com/moby/moby/daemon/container_linux.go create mode 100644 vendor/github.com/moby/moby/daemon/container_operations.go create mode 100644 vendor/github.com/moby/moby/daemon/container_operations_solaris.go create mode 100644 vendor/github.com/moby/moby/daemon/container_operations_unix.go create mode 100644 vendor/github.com/moby/moby/daemon/container_operations_windows.go create mode 100644 vendor/github.com/moby/moby/daemon/container_windows.go create mode 100644 vendor/github.com/moby/moby/daemon/create.go create mode 100644 vendor/github.com/moby/moby/daemon/create_unix.go create mode 100644 vendor/github.com/moby/moby/daemon/create_windows.go create mode 100644 vendor/github.com/moby/moby/daemon/daemon.go create mode 100644 vendor/github.com/moby/moby/daemon/daemon_experimental.go create mode 100644 vendor/github.com/moby/moby/daemon/daemon_linux.go create mode 100644 vendor/github.com/moby/moby/daemon/daemon_linux_test.go create mode 100644 vendor/github.com/moby/moby/daemon/daemon_solaris.go create mode 100644 vendor/github.com/moby/moby/daemon/daemon_test.go create mode 100644 vendor/github.com/moby/moby/daemon/daemon_unix.go create mode 100644 vendor/github.com/moby/moby/daemon/daemon_unix_test.go create mode 100644 vendor/github.com/moby/moby/daemon/daemon_unsupported.go create mode 100644 vendor/github.com/moby/moby/daemon/daemon_windows.go create mode 100644 vendor/github.com/moby/moby/daemon/debugtrap_unix.go create mode 100644 vendor/github.com/moby/moby/daemon/debugtrap_unsupported.go create mode 100644 vendor/github.com/moby/moby/daemon/debugtrap_windows.go create mode 100644 vendor/github.com/moby/moby/daemon/delete.go create mode 100644 vendor/github.com/moby/moby/daemon/delete_test.go create mode 100644 vendor/github.com/moby/moby/daemon/dependency.go create mode 100644 vendor/github.com/moby/moby/daemon/discovery/discovery.go create mode 100644 vendor/github.com/moby/moby/daemon/discovery/discovery_test.go create mode 100644 vendor/github.com/moby/moby/daemon/disk_usage.go create mode 100644 vendor/github.com/moby/moby/daemon/errors.go create mode 100644 vendor/github.com/moby/moby/daemon/events.go create mode 100644 vendor/github.com/moby/moby/daemon/events/events.go create mode 100644 vendor/github.com/moby/moby/daemon/events/events_test.go create mode 100644 vendor/github.com/moby/moby/daemon/events/filter.go create mode 100644 vendor/github.com/moby/moby/daemon/events/metrics.go create mode 100644 vendor/github.com/moby/moby/daemon/events/testutils/testutils.go create mode 100644 vendor/github.com/moby/moby/daemon/events_test.go create mode 100644 vendor/github.com/moby/moby/daemon/exec.go create mode 100644 vendor/github.com/moby/moby/daemon/exec/exec.go create mode 100644 vendor/github.com/moby/moby/daemon/exec_linux.go create mode 100644 vendor/github.com/moby/moby/daemon/exec_solaris.go create mode 100644 vendor/github.com/moby/moby/daemon/exec_windows.go create mode 100644 vendor/github.com/moby/moby/daemon/export.go create mode 100644 vendor/github.com/moby/moby/daemon/getsize_unix.go create mode 100644 vendor/github.com/moby/moby/daemon/graphdriver/aufs/aufs.go create mode 100644 vendor/github.com/moby/moby/daemon/graphdriver/aufs/aufs_test.go create mode 100644 vendor/github.com/moby/moby/daemon/graphdriver/aufs/dirs.go create mode 100644 vendor/github.com/moby/moby/daemon/graphdriver/aufs/mount.go create mode 100644 vendor/github.com/moby/moby/daemon/graphdriver/aufs/mount_linux.go create mode 100644 vendor/github.com/moby/moby/daemon/graphdriver/aufs/mount_unsupported.go create mode 100644 vendor/github.com/moby/moby/daemon/graphdriver/btrfs/btrfs.go create mode 100644 vendor/github.com/moby/moby/daemon/graphdriver/btrfs/btrfs_test.go create mode 100644 vendor/github.com/moby/moby/daemon/graphdriver/btrfs/dummy_unsupported.go create mode 100644 vendor/github.com/moby/moby/daemon/graphdriver/btrfs/version.go create mode 100644 vendor/github.com/moby/moby/daemon/graphdriver/btrfs/version_none.go create mode 100644 vendor/github.com/moby/moby/daemon/graphdriver/btrfs/version_test.go create mode 100644 vendor/github.com/moby/moby/daemon/graphdriver/counter.go create mode 100644 vendor/github.com/moby/moby/daemon/graphdriver/devmapper/README.md create mode 100644 vendor/github.com/moby/moby/daemon/graphdriver/devmapper/device_setup.go create mode 100644 vendor/github.com/moby/moby/daemon/graphdriver/devmapper/deviceset.go create mode 100644 vendor/github.com/moby/moby/daemon/graphdriver/devmapper/devmapper_doc.go create mode 100644 vendor/github.com/moby/moby/daemon/graphdriver/devmapper/devmapper_test.go create mode 100644 vendor/github.com/moby/moby/daemon/graphdriver/devmapper/driver.go create mode 100644 vendor/github.com/moby/moby/daemon/graphdriver/devmapper/mount.go create mode 100644 vendor/github.com/moby/moby/daemon/graphdriver/driver.go create mode 100644 vendor/github.com/moby/moby/daemon/graphdriver/driver_freebsd.go create mode 100644 vendor/github.com/moby/moby/daemon/graphdriver/driver_linux.go create mode 100644 vendor/github.com/moby/moby/daemon/graphdriver/driver_solaris.go create mode 100644 vendor/github.com/moby/moby/daemon/graphdriver/driver_unsupported.go create mode 100644 vendor/github.com/moby/moby/daemon/graphdriver/driver_windows.go create mode 100644 vendor/github.com/moby/moby/daemon/graphdriver/fsdiff.go create mode 100644 vendor/github.com/moby/moby/daemon/graphdriver/graphtest/graphbench_unix.go create mode 100644 vendor/github.com/moby/moby/daemon/graphdriver/graphtest/graphtest_unix.go create mode 100644 vendor/github.com/moby/moby/daemon/graphdriver/graphtest/graphtest_windows.go create mode 100644 vendor/github.com/moby/moby/daemon/graphdriver/graphtest/testutil.go create mode 100644 vendor/github.com/moby/moby/daemon/graphdriver/graphtest/testutil_unix.go create mode 100644 vendor/github.com/moby/moby/daemon/graphdriver/lcow/lcow.go create mode 100644 vendor/github.com/moby/moby/daemon/graphdriver/overlay/copy.go create mode 100644 vendor/github.com/moby/moby/daemon/graphdriver/overlay/overlay.go create mode 100644 vendor/github.com/moby/moby/daemon/graphdriver/overlay/overlay_test.go create mode 100644 vendor/github.com/moby/moby/daemon/graphdriver/overlay/overlay_unsupported.go create mode 100644 vendor/github.com/moby/moby/daemon/graphdriver/overlay2/check.go create mode 100644 vendor/github.com/moby/moby/daemon/graphdriver/overlay2/mount.go create mode 100644 vendor/github.com/moby/moby/daemon/graphdriver/overlay2/overlay.go create mode 100644 vendor/github.com/moby/moby/daemon/graphdriver/overlay2/overlay_test.go create mode 100644 vendor/github.com/moby/moby/daemon/graphdriver/overlay2/overlay_unsupported.go create mode 100644 vendor/github.com/moby/moby/daemon/graphdriver/overlay2/randomid.go create mode 100644 vendor/github.com/moby/moby/daemon/graphdriver/overlayutils/overlayutils.go create mode 100644 vendor/github.com/moby/moby/daemon/graphdriver/plugin.go create mode 100644 vendor/github.com/moby/moby/daemon/graphdriver/proxy.go create mode 100644 vendor/github.com/moby/moby/daemon/graphdriver/quota/projectquota.go create mode 100644 vendor/github.com/moby/moby/daemon/graphdriver/register/register_aufs.go create mode 100644 vendor/github.com/moby/moby/daemon/graphdriver/register/register_btrfs.go create mode 100644 vendor/github.com/moby/moby/daemon/graphdriver/register/register_devicemapper.go create mode 100644 vendor/github.com/moby/moby/daemon/graphdriver/register/register_overlay.go create mode 100644 vendor/github.com/moby/moby/daemon/graphdriver/register/register_vfs.go create mode 100644 vendor/github.com/moby/moby/daemon/graphdriver/register/register_windows.go create mode 100644 vendor/github.com/moby/moby/daemon/graphdriver/register/register_zfs.go create mode 100644 vendor/github.com/moby/moby/daemon/graphdriver/vfs/driver.go create mode 100644 vendor/github.com/moby/moby/daemon/graphdriver/vfs/vfs_test.go create mode 100644 vendor/github.com/moby/moby/daemon/graphdriver/windows/windows.go create mode 100644 vendor/github.com/moby/moby/daemon/graphdriver/zfs/MAINTAINERS create mode 100644 vendor/github.com/moby/moby/daemon/graphdriver/zfs/zfs.go create mode 100644 vendor/github.com/moby/moby/daemon/graphdriver/zfs/zfs_freebsd.go create mode 100644 vendor/github.com/moby/moby/daemon/graphdriver/zfs/zfs_linux.go create mode 100644 vendor/github.com/moby/moby/daemon/graphdriver/zfs/zfs_solaris.go create mode 100644 vendor/github.com/moby/moby/daemon/graphdriver/zfs/zfs_test.go create mode 100644 vendor/github.com/moby/moby/daemon/graphdriver/zfs/zfs_unsupported.go create mode 100644 vendor/github.com/moby/moby/daemon/health.go create mode 100644 vendor/github.com/moby/moby/daemon/health_test.go create mode 100644 vendor/github.com/moby/moby/daemon/image.go create mode 100644 vendor/github.com/moby/moby/daemon/image_delete.go create mode 100644 vendor/github.com/moby/moby/daemon/image_exporter.go create mode 100644 vendor/github.com/moby/moby/daemon/image_history.go create mode 100644 vendor/github.com/moby/moby/daemon/image_inspect.go create mode 100644 vendor/github.com/moby/moby/daemon/image_pull.go create mode 100644 vendor/github.com/moby/moby/daemon/image_push.go create mode 100644 vendor/github.com/moby/moby/daemon/image_tag.go create mode 100644 vendor/github.com/moby/moby/daemon/images.go create mode 100644 vendor/github.com/moby/moby/daemon/import.go create mode 100644 vendor/github.com/moby/moby/daemon/info.go create mode 100644 vendor/github.com/moby/moby/daemon/info_unix.go create mode 100644 vendor/github.com/moby/moby/daemon/info_unix_test.go create mode 100644 vendor/github.com/moby/moby/daemon/info_windows.go create mode 100644 vendor/github.com/moby/moby/daemon/initlayer/setup_solaris.go create mode 100644 vendor/github.com/moby/moby/daemon/initlayer/setup_unix.go create mode 100644 vendor/github.com/moby/moby/daemon/initlayer/setup_windows.go create mode 100644 vendor/github.com/moby/moby/daemon/inspect.go create mode 100644 vendor/github.com/moby/moby/daemon/inspect_solaris.go create mode 100644 vendor/github.com/moby/moby/daemon/inspect_unix.go create mode 100644 vendor/github.com/moby/moby/daemon/inspect_windows.go create mode 100644 vendor/github.com/moby/moby/daemon/keys.go create mode 100644 vendor/github.com/moby/moby/daemon/keys_unsupported.go create mode 100644 vendor/github.com/moby/moby/daemon/kill.go create mode 100644 vendor/github.com/moby/moby/daemon/links.go create mode 100644 vendor/github.com/moby/moby/daemon/links/links.go create mode 100644 vendor/github.com/moby/moby/daemon/links/links_test.go create mode 100644 vendor/github.com/moby/moby/daemon/list.go create mode 100644 vendor/github.com/moby/moby/daemon/list_unix.go create mode 100644 vendor/github.com/moby/moby/daemon/list_windows.go create mode 100644 vendor/github.com/moby/moby/daemon/logdrivers_linux.go create mode 100644 vendor/github.com/moby/moby/daemon/logdrivers_windows.go create mode 100644 vendor/github.com/moby/moby/daemon/logger/adapter.go create mode 100644 vendor/github.com/moby/moby/daemon/logger/adapter_test.go create mode 100644 vendor/github.com/moby/moby/daemon/logger/awslogs/cloudwatchlogs.go create mode 100644 vendor/github.com/moby/moby/daemon/logger/awslogs/cloudwatchlogs_test.go create mode 100644 vendor/github.com/moby/moby/daemon/logger/awslogs/cwlogsiface_mock_test.go create mode 100644 vendor/github.com/moby/moby/daemon/logger/copier.go create mode 100644 vendor/github.com/moby/moby/daemon/logger/copier_test.go create mode 100644 vendor/github.com/moby/moby/daemon/logger/etwlogs/etwlogs_windows.go create mode 100644 vendor/github.com/moby/moby/daemon/logger/factory.go create mode 100644 vendor/github.com/moby/moby/daemon/logger/fluentd/fluentd.go create mode 100644 vendor/github.com/moby/moby/daemon/logger/gcplogs/gcplogging.go create mode 100644 vendor/github.com/moby/moby/daemon/logger/gcplogs/gcplogging_linux.go create mode 100644 vendor/github.com/moby/moby/daemon/logger/gcplogs/gcplogging_others.go create mode 100644 vendor/github.com/moby/moby/daemon/logger/gelf/gelf.go create mode 100644 vendor/github.com/moby/moby/daemon/logger/gelf/gelf_unsupported.go create mode 100644 vendor/github.com/moby/moby/daemon/logger/journald/journald.go create mode 100644 vendor/github.com/moby/moby/daemon/logger/journald/journald_test.go create mode 100644 vendor/github.com/moby/moby/daemon/logger/journald/journald_unsupported.go create mode 100644 vendor/github.com/moby/moby/daemon/logger/journald/read.go create mode 100644 vendor/github.com/moby/moby/daemon/logger/journald/read_native.go create mode 100644 vendor/github.com/moby/moby/daemon/logger/journald/read_native_compat.go create mode 100644 vendor/github.com/moby/moby/daemon/logger/journald/read_unsupported.go create mode 100644 vendor/github.com/moby/moby/daemon/logger/jsonfilelog/jsonfilelog.go create mode 100644 vendor/github.com/moby/moby/daemon/logger/jsonfilelog/jsonfilelog_test.go create mode 100644 vendor/github.com/moby/moby/daemon/logger/jsonfilelog/multireader/multireader.go create mode 100644 vendor/github.com/moby/moby/daemon/logger/jsonfilelog/multireader/multireader_test.go create mode 100644 vendor/github.com/moby/moby/daemon/logger/jsonfilelog/read.go create mode 100644 vendor/github.com/moby/moby/daemon/logger/logentries/logentries.go create mode 100644 vendor/github.com/moby/moby/daemon/logger/logger.go create mode 100644 vendor/github.com/moby/moby/daemon/logger/logger_test.go create mode 100644 vendor/github.com/moby/moby/daemon/logger/loggerutils/log_tag.go create mode 100644 vendor/github.com/moby/moby/daemon/logger/loggerutils/log_tag_test.go create mode 100644 vendor/github.com/moby/moby/daemon/logger/loggerutils/rotatefilewriter.go create mode 100644 vendor/github.com/moby/moby/daemon/logger/loginfo.go create mode 100644 vendor/github.com/moby/moby/daemon/logger/plugin.go create mode 100644 vendor/github.com/moby/moby/daemon/logger/plugin_unix.go create mode 100644 vendor/github.com/moby/moby/daemon/logger/plugin_unsupported.go create mode 100644 vendor/github.com/moby/moby/daemon/logger/proxy.go create mode 100644 vendor/github.com/moby/moby/daemon/logger/ring.go create mode 100644 vendor/github.com/moby/moby/daemon/logger/ring_test.go create mode 100644 vendor/github.com/moby/moby/daemon/logger/splunk/splunk.go create mode 100644 vendor/github.com/moby/moby/daemon/logger/splunk/splunk_test.go create mode 100644 vendor/github.com/moby/moby/daemon/logger/splunk/splunkhecmock_test.go create mode 100644 vendor/github.com/moby/moby/daemon/logger/syslog/syslog.go create mode 100644 vendor/github.com/moby/moby/daemon/logger/syslog/syslog_test.go create mode 100644 vendor/github.com/moby/moby/daemon/logs.go create mode 100644 vendor/github.com/moby/moby/daemon/logs_test.go create mode 100644 vendor/github.com/moby/moby/daemon/metrics.go create mode 100644 vendor/github.com/moby/moby/daemon/metrics_unix.go create mode 100644 vendor/github.com/moby/moby/daemon/metrics_unsupported.go create mode 100644 vendor/github.com/moby/moby/daemon/monitor.go create mode 100644 vendor/github.com/moby/moby/daemon/monitor_linux.go create mode 100644 vendor/github.com/moby/moby/daemon/monitor_solaris.go create mode 100644 vendor/github.com/moby/moby/daemon/monitor_windows.go create mode 100644 vendor/github.com/moby/moby/daemon/mounts.go create mode 100644 vendor/github.com/moby/moby/daemon/names.go create mode 100644 vendor/github.com/moby/moby/daemon/network.go create mode 100644 vendor/github.com/moby/moby/daemon/network/settings.go create mode 100644 vendor/github.com/moby/moby/daemon/oci_linux.go create mode 100644 vendor/github.com/moby/moby/daemon/oci_solaris.go create mode 100644 vendor/github.com/moby/moby/daemon/oci_windows.go create mode 100644 vendor/github.com/moby/moby/daemon/pause.go create mode 100644 vendor/github.com/moby/moby/daemon/prune.go create mode 100644 vendor/github.com/moby/moby/daemon/reload.go create mode 100644 vendor/github.com/moby/moby/daemon/reload_test.go create mode 100644 vendor/github.com/moby/moby/daemon/rename.go create mode 100644 vendor/github.com/moby/moby/daemon/resize.go create mode 100644 vendor/github.com/moby/moby/daemon/restart.go create mode 100644 vendor/github.com/moby/moby/daemon/search.go create mode 100644 vendor/github.com/moby/moby/daemon/search_test.go create mode 100644 vendor/github.com/moby/moby/daemon/seccomp_disabled.go create mode 100644 vendor/github.com/moby/moby/daemon/seccomp_linux.go create mode 100644 vendor/github.com/moby/moby/daemon/seccomp_unsupported.go create mode 100644 vendor/github.com/moby/moby/daemon/secrets.go create mode 100644 vendor/github.com/moby/moby/daemon/secrets_linux.go create mode 100644 vendor/github.com/moby/moby/daemon/secrets_unsupported.go create mode 100644 vendor/github.com/moby/moby/daemon/secrets_windows.go create mode 100644 vendor/github.com/moby/moby/daemon/selinux_linux.go create mode 100644 vendor/github.com/moby/moby/daemon/selinux_unsupported.go create mode 100644 vendor/github.com/moby/moby/daemon/start.go create mode 100644 vendor/github.com/moby/moby/daemon/start_unix.go create mode 100644 vendor/github.com/moby/moby/daemon/start_windows.go create mode 100644 vendor/github.com/moby/moby/daemon/stats.go create mode 100644 vendor/github.com/moby/moby/daemon/stats/collector.go create mode 100644 vendor/github.com/moby/moby/daemon/stats/collector_solaris.go create mode 100644 vendor/github.com/moby/moby/daemon/stats/collector_unix.go create mode 100644 vendor/github.com/moby/moby/daemon/stats/collector_windows.go create mode 100644 vendor/github.com/moby/moby/daemon/stats/types.go create mode 100644 vendor/github.com/moby/moby/daemon/stats_collector.go create mode 100644 vendor/github.com/moby/moby/daemon/stats_unix.go create mode 100644 vendor/github.com/moby/moby/daemon/stats_windows.go create mode 100644 vendor/github.com/moby/moby/daemon/stop.go create mode 100644 vendor/github.com/moby/moby/daemon/top_unix.go create mode 100644 vendor/github.com/moby/moby/daemon/top_unix_test.go create mode 100644 vendor/github.com/moby/moby/daemon/top_windows.go create mode 100644 vendor/github.com/moby/moby/daemon/unpause.go create mode 100644 vendor/github.com/moby/moby/daemon/update.go create mode 100644 vendor/github.com/moby/moby/daemon/update_linux.go create mode 100644 vendor/github.com/moby/moby/daemon/update_solaris.go create mode 100644 vendor/github.com/moby/moby/daemon/update_windows.go create mode 100644 vendor/github.com/moby/moby/daemon/volumes.go create mode 100644 vendor/github.com/moby/moby/daemon/volumes_unit_test.go create mode 100644 vendor/github.com/moby/moby/daemon/volumes_unix.go create mode 100644 vendor/github.com/moby/moby/daemon/volumes_unix_test.go create mode 100644 vendor/github.com/moby/moby/daemon/volumes_windows.go create mode 100644 vendor/github.com/moby/moby/daemon/wait.go create mode 100644 vendor/github.com/moby/moby/daemon/workdir.go create mode 100644 vendor/github.com/moby/moby/distribution/config.go create mode 100644 vendor/github.com/moby/moby/distribution/errors.go create mode 100644 vendor/github.com/moby/moby/distribution/fixtures/validate_manifest/bad_manifest create mode 100644 vendor/github.com/moby/moby/distribution/fixtures/validate_manifest/extra_data_manifest create mode 100644 vendor/github.com/moby/moby/distribution/fixtures/validate_manifest/good_manifest create mode 100644 vendor/github.com/moby/moby/distribution/metadata/metadata.go create mode 100644 vendor/github.com/moby/moby/distribution/metadata/v1_id_service.go create mode 100644 vendor/github.com/moby/moby/distribution/metadata/v1_id_service_test.go create mode 100644 vendor/github.com/moby/moby/distribution/metadata/v2_metadata_service.go create mode 100644 vendor/github.com/moby/moby/distribution/metadata/v2_metadata_service_test.go create mode 100644 vendor/github.com/moby/moby/distribution/pull.go create mode 100644 vendor/github.com/moby/moby/distribution/pull_v1.go create mode 100644 vendor/github.com/moby/moby/distribution/pull_v2.go create mode 100644 vendor/github.com/moby/moby/distribution/pull_v2_test.go create mode 100644 vendor/github.com/moby/moby/distribution/pull_v2_unix.go create mode 100644 vendor/github.com/moby/moby/distribution/pull_v2_windows.go create mode 100644 vendor/github.com/moby/moby/distribution/push.go create mode 100644 vendor/github.com/moby/moby/distribution/push_v1.go create mode 100644 vendor/github.com/moby/moby/distribution/push_v2.go create mode 100644 vendor/github.com/moby/moby/distribution/push_v2_test.go create mode 100644 vendor/github.com/moby/moby/distribution/registry.go create mode 100644 vendor/github.com/moby/moby/distribution/registry_unit_test.go create mode 100644 vendor/github.com/moby/moby/distribution/utils/progress.go create mode 100644 vendor/github.com/moby/moby/distribution/xfer/download.go create mode 100644 vendor/github.com/moby/moby/distribution/xfer/download_test.go create mode 100644 vendor/github.com/moby/moby/distribution/xfer/transfer.go create mode 100644 vendor/github.com/moby/moby/distribution/xfer/transfer_test.go create mode 100644 vendor/github.com/moby/moby/distribution/xfer/upload.go create mode 100644 vendor/github.com/moby/moby/distribution/xfer/upload_test.go create mode 100644 vendor/github.com/moby/moby/dockerversion/useragent.go create mode 100644 vendor/github.com/moby/moby/dockerversion/version_lib.go create mode 100644 vendor/github.com/moby/moby/docs/api/v1.18.md create mode 100644 vendor/github.com/moby/moby/docs/api/v1.19.md create mode 100644 vendor/github.com/moby/moby/docs/api/v1.20.md create mode 100644 vendor/github.com/moby/moby/docs/api/v1.21.md create mode 100644 vendor/github.com/moby/moby/docs/api/v1.22.md create mode 100644 vendor/github.com/moby/moby/docs/api/v1.23.md create mode 100644 vendor/github.com/moby/moby/docs/api/v1.24.md create mode 100644 vendor/github.com/moby/moby/docs/api/version-history.md create mode 100644 vendor/github.com/moby/moby/docs/static_files/contributors.png create mode 100644 vendor/github.com/moby/moby/docs/static_files/docker-logo-compressed.png create mode 100644 vendor/github.com/moby/moby/docs/static_files/moby-project-logo.png create mode 100644 vendor/github.com/moby/moby/hack/Jenkins/W2L/postbuild.sh create mode 100644 vendor/github.com/moby/moby/hack/Jenkins/W2L/setup.sh create mode 100644 vendor/github.com/moby/moby/hack/Jenkins/readme.md create mode 100644 vendor/github.com/moby/moby/hack/README.md create mode 100755 vendor/github.com/moby/moby/hack/dind create mode 100644 vendor/github.com/moby/moby/hack/dockerfile/binaries-commits create mode 100755 vendor/github.com/moby/moby/hack/dockerfile/install-binaries.sh create mode 100755 vendor/github.com/moby/moby/hack/generate-authors.sh create mode 100755 vendor/github.com/moby/moby/hack/generate-swagger-api.sh create mode 100644 vendor/github.com/moby/moby/hack/integration-cli-on-swarm/README.md create mode 100644 vendor/github.com/moby/moby/hack/integration-cli-on-swarm/agent/Dockerfile create mode 100644 vendor/github.com/moby/moby/hack/integration-cli-on-swarm/agent/master/call.go create mode 100644 vendor/github.com/moby/moby/hack/integration-cli-on-swarm/agent/master/master.go create mode 100644 vendor/github.com/moby/moby/hack/integration-cli-on-swarm/agent/master/set.go create mode 100644 vendor/github.com/moby/moby/hack/integration-cli-on-swarm/agent/master/set_test.go create mode 100644 vendor/github.com/moby/moby/hack/integration-cli-on-swarm/agent/types/types.go create mode 100644 vendor/github.com/moby/moby/hack/integration-cli-on-swarm/agent/vendor.conf create mode 100644 vendor/github.com/moby/moby/hack/integration-cli-on-swarm/agent/worker/executor.go create mode 100644 vendor/github.com/moby/moby/hack/integration-cli-on-swarm/agent/worker/worker.go create mode 100644 vendor/github.com/moby/moby/hack/integration-cli-on-swarm/host/compose.go create mode 100644 vendor/github.com/moby/moby/hack/integration-cli-on-swarm/host/dockercmd.go create mode 100644 vendor/github.com/moby/moby/hack/integration-cli-on-swarm/host/enumerate.go create mode 100644 vendor/github.com/moby/moby/hack/integration-cli-on-swarm/host/enumerate_test.go create mode 100644 vendor/github.com/moby/moby/hack/integration-cli-on-swarm/host/host.go create mode 100644 vendor/github.com/moby/moby/hack/integration-cli-on-swarm/host/volume.go create mode 100644 vendor/github.com/moby/moby/hack/make.ps1 create mode 100755 vendor/github.com/moby/moby/hack/make.sh create mode 100644 vendor/github.com/moby/moby/hack/make/.binary create mode 100644 vendor/github.com/moby/moby/hack/make/.binary-setup create mode 100644 vendor/github.com/moby/moby/hack/make/.build-deb/compat create mode 100644 vendor/github.com/moby/moby/hack/make/.build-deb/control create mode 100644 vendor/github.com/moby/moby/hack/make/.build-deb/docker-engine.bash-completion create mode 120000 vendor/github.com/moby/moby/hack/make/.build-deb/docker-engine.docker.default create mode 120000 vendor/github.com/moby/moby/hack/make/.build-deb/docker-engine.docker.init create mode 120000 vendor/github.com/moby/moby/hack/make/.build-deb/docker-engine.docker.upstart create mode 100644 vendor/github.com/moby/moby/hack/make/.build-deb/docker-engine.install create mode 100644 vendor/github.com/moby/moby/hack/make/.build-deb/docker-engine.manpages create mode 100644 vendor/github.com/moby/moby/hack/make/.build-deb/docker-engine.postinst create mode 120000 vendor/github.com/moby/moby/hack/make/.build-deb/docker-engine.udev create mode 100644 vendor/github.com/moby/moby/hack/make/.build-deb/docs create mode 100755 vendor/github.com/moby/moby/hack/make/.build-deb/rules create mode 100644 vendor/github.com/moby/moby/hack/make/.build-rpm/docker-engine-selinux.spec create mode 100644 vendor/github.com/moby/moby/hack/make/.build-rpm/docker-engine.spec create mode 100644 vendor/github.com/moby/moby/hack/make/.detect-daemon-osarch create mode 100644 vendor/github.com/moby/moby/hack/make/.ensure-emptyfs create mode 100644 vendor/github.com/moby/moby/hack/make/.go-autogen create mode 100644 vendor/github.com/moby/moby/hack/make/.go-autogen.ps1 create mode 100644 vendor/github.com/moby/moby/hack/make/.integration-daemon-setup create mode 100644 vendor/github.com/moby/moby/hack/make/.integration-daemon-start create mode 100644 vendor/github.com/moby/moby/hack/make/.integration-daemon-stop create mode 100644 vendor/github.com/moby/moby/hack/make/.integration-test-helpers create mode 100644 vendor/github.com/moby/moby/hack/make/.resources-windows/common.rc create mode 100644 vendor/github.com/moby/moby/hack/make/.resources-windows/docker.exe.manifest create mode 100644 vendor/github.com/moby/moby/hack/make/.resources-windows/docker.ico create mode 100644 vendor/github.com/moby/moby/hack/make/.resources-windows/docker.png create mode 100644 vendor/github.com/moby/moby/hack/make/.resources-windows/docker.rc create mode 100644 vendor/github.com/moby/moby/hack/make/.resources-windows/dockerd.rc create mode 100644 vendor/github.com/moby/moby/hack/make/.resources-windows/event_messages.mc create mode 100644 vendor/github.com/moby/moby/hack/make/.resources-windows/resources.go create mode 100644 vendor/github.com/moby/moby/hack/make/README.md create mode 100644 vendor/github.com/moby/moby/hack/make/binary create mode 100644 vendor/github.com/moby/moby/hack/make/binary-daemon create mode 100644 vendor/github.com/moby/moby/hack/make/build-deb create mode 100644 vendor/github.com/moby/moby/hack/make/build-integration-test-binary create mode 100644 vendor/github.com/moby/moby/hack/make/build-rpm create mode 100755 vendor/github.com/moby/moby/hack/make/clean-apt-repo create mode 100755 vendor/github.com/moby/moby/hack/make/clean-yum-repo create mode 100644 vendor/github.com/moby/moby/hack/make/cover create mode 100644 vendor/github.com/moby/moby/hack/make/cross create mode 100644 vendor/github.com/moby/moby/hack/make/dynbinary create mode 100644 vendor/github.com/moby/moby/hack/make/dynbinary-daemon create mode 100755 vendor/github.com/moby/moby/hack/make/generate-index-listing create mode 100755 vendor/github.com/moby/moby/hack/make/install-binary create mode 100644 vendor/github.com/moby/moby/hack/make/install-binary-daemon create mode 100755 vendor/github.com/moby/moby/hack/make/release-deb create mode 100755 vendor/github.com/moby/moby/hack/make/release-rpm create mode 100644 vendor/github.com/moby/moby/hack/make/run create mode 100755 vendor/github.com/moby/moby/hack/make/sign-repos create mode 100644 vendor/github.com/moby/moby/hack/make/test-docker-py create mode 100755 vendor/github.com/moby/moby/hack/make/test-integration-cli create mode 100644 vendor/github.com/moby/moby/hack/make/test-integration-shell create mode 100755 vendor/github.com/moby/moby/hack/make/test-old-apt-repo create mode 100644 vendor/github.com/moby/moby/hack/make/test-unit create mode 100644 vendor/github.com/moby/moby/hack/make/tgz create mode 100644 vendor/github.com/moby/moby/hack/make/ubuntu create mode 100755 vendor/github.com/moby/moby/hack/make/update-apt-repo create mode 100644 vendor/github.com/moby/moby/hack/make/win create mode 100755 vendor/github.com/moby/moby/hack/release.sh create mode 100644 vendor/github.com/moby/moby/hack/validate/.swagger-yamllint create mode 100644 vendor/github.com/moby/moby/hack/validate/.validate create mode 100755 vendor/github.com/moby/moby/hack/validate/all create mode 100755 vendor/github.com/moby/moby/hack/validate/changelog-date-descending create mode 100755 vendor/github.com/moby/moby/hack/validate/changelog-well-formed create mode 100755 vendor/github.com/moby/moby/hack/validate/dco create mode 100755 vendor/github.com/moby/moby/hack/validate/default create mode 100755 vendor/github.com/moby/moby/hack/validate/default-seccomp create mode 100755 vendor/github.com/moby/moby/hack/validate/gofmt create mode 100755 vendor/github.com/moby/moby/hack/validate/lint create mode 100755 vendor/github.com/moby/moby/hack/validate/pkg-imports create mode 100755 vendor/github.com/moby/moby/hack/validate/swagger create mode 100755 vendor/github.com/moby/moby/hack/validate/swagger-gen create mode 100755 vendor/github.com/moby/moby/hack/validate/test-imports create mode 100755 vendor/github.com/moby/moby/hack/validate/toml create mode 100755 vendor/github.com/moby/moby/hack/validate/vendor create mode 100755 vendor/github.com/moby/moby/hack/validate/vet create mode 100755 vendor/github.com/moby/moby/hack/vendor.sh create mode 100644 vendor/github.com/moby/moby/image/cache/cache.go create mode 100644 vendor/github.com/moby/moby/image/cache/compare.go create mode 100644 vendor/github.com/moby/moby/image/cache/compare_test.go create mode 100644 vendor/github.com/moby/moby/image/fs.go create mode 100644 vendor/github.com/moby/moby/image/fs_test.go create mode 100644 vendor/github.com/moby/moby/image/image.go create mode 100644 vendor/github.com/moby/moby/image/image_test.go create mode 100644 vendor/github.com/moby/moby/image/rootfs.go create mode 100644 vendor/github.com/moby/moby/image/spec/v1.1.md create mode 100644 vendor/github.com/moby/moby/image/spec/v1.2.md create mode 100644 vendor/github.com/moby/moby/image/spec/v1.md create mode 100644 vendor/github.com/moby/moby/image/store.go create mode 100644 vendor/github.com/moby/moby/image/store_test.go create mode 100644 vendor/github.com/moby/moby/image/tarexport/load.go create mode 100644 vendor/github.com/moby/moby/image/tarexport/save.go create mode 100644 vendor/github.com/moby/moby/image/tarexport/tarexport.go create mode 100644 vendor/github.com/moby/moby/image/v1/imagev1.go create mode 100644 vendor/github.com/moby/moby/image/v1/imagev1_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/benchmark_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/check_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/checker/checker.go create mode 100644 vendor/github.com/moby/moby/integration-cli/cli/build/build.go create mode 100644 vendor/github.com/moby/moby/integration-cli/cli/build/fakecontext/context.go create mode 100644 vendor/github.com/moby/moby/integration-cli/cli/build/fakegit/fakegit.go create mode 100644 vendor/github.com/moby/moby/integration-cli/cli/build/fakestorage/fixtures.go create mode 100644 vendor/github.com/moby/moby/integration-cli/cli/build/fakestorage/storage.go create mode 100644 vendor/github.com/moby/moby/integration-cli/cli/cli.go create mode 100644 vendor/github.com/moby/moby/integration-cli/daemon/daemon.go create mode 100644 vendor/github.com/moby/moby/integration-cli/daemon/daemon_swarm.go create mode 100644 vendor/github.com/moby/moby/integration-cli/daemon/daemon_unix.go create mode 100644 vendor/github.com/moby/moby/integration-cli/daemon/daemon_windows.go create mode 100644 vendor/github.com/moby/moby/integration-cli/daemon_swarm_hack_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_api_attach_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_api_auth_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_api_build_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_api_containers_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_api_create_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_api_events_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_api_exec_resize_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_api_exec_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_api_images_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_api_info_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_api_inspect_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_api_inspect_unix_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_api_logs_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_api_network_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_api_resize_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_api_session_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_api_stats_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_api_stats_unix_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_api_swarm_config_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_api_swarm_node_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_api_swarm_secret_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_api_swarm_service_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_api_swarm_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_api_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_api_update_unix_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_api_version_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_api_volumes_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_attach_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_attach_unix_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_authz_plugin_v2_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_authz_unix_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_build_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_build_unix_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_by_digest_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_commit_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_config_create_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_config_inspect_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_config_ls_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_config_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_cp_from_container_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_cp_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_cp_to_container_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_cp_to_container_unix_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_cp_utils_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_create_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_create_unix_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_daemon_plugins_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_daemon_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_diff_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_events_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_events_unix_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_exec_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_exec_unix_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_experimental_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_export_import_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_external_graphdriver_unix_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_external_volume_driver_unix_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_health_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_help_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_history_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_images_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_import_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_info_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_info_unix_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_inspect_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_kill_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_links_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_links_unix_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_login_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_logout_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_logs_bench_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_logs_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_nat_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_netmode_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_network_unix_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_oom_killed_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_pause_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_plugins_logdriver_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_plugins_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_port_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_proxy_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_prune_unix_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_ps_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_pull_local_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_pull_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_pull_trusted_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_push_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_registry_user_agent_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_rename_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_restart_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_rm_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_rmi_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_run_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_run_unix_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_save_load_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_save_load_unix_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_search_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_secret_create_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_secret_inspect_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_secret_ls_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_service_create_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_service_health_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_service_logs_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_service_scale_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_service_update_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_sni_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_stack_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_start_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_stats_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_stop_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_swarm_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_swarm_unix_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_tag_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_top_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_update_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_update_unix_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_userns_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_v2_only_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_version_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_volume_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_cli_wait_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_deprecated_api_v124_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_deprecated_api_v124_unix_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_experimental_network_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_hub_pull_suite_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/docker_utils_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/environment/clean.go create mode 100644 vendor/github.com/moby/moby/integration-cli/environment/environment.go create mode 100644 vendor/github.com/moby/moby/integration-cli/environment/protect.go create mode 100644 vendor/github.com/moby/moby/integration-cli/events_utils_test.go create mode 100755 vendor/github.com/moby/moby/integration-cli/fixtures/auth/docker-credential-shell-test create mode 100644 vendor/github.com/moby/moby/integration-cli/fixtures/credentialspecs/valid.json create mode 100644 vendor/github.com/moby/moby/integration-cli/fixtures/deploy/default.yaml create mode 100644 vendor/github.com/moby/moby/integration-cli/fixtures/deploy/remove.yaml create mode 100644 vendor/github.com/moby/moby/integration-cli/fixtures/deploy/secrets.yaml create mode 100644 vendor/github.com/moby/moby/integration-cli/fixtures/load/emptyLayer.tar create mode 100644 vendor/github.com/moby/moby/integration-cli/fixtures/load/frozen.go create mode 100644 vendor/github.com/moby/moby/integration-cli/fixtures/notary/delgkey1.crt create mode 100644 vendor/github.com/moby/moby/integration-cli/fixtures/notary/delgkey1.key create mode 100644 vendor/github.com/moby/moby/integration-cli/fixtures/notary/delgkey2.crt create mode 100644 vendor/github.com/moby/moby/integration-cli/fixtures/notary/delgkey2.key create mode 100644 vendor/github.com/moby/moby/integration-cli/fixtures/notary/delgkey3.crt create mode 100644 vendor/github.com/moby/moby/integration-cli/fixtures/notary/delgkey3.key create mode 100644 vendor/github.com/moby/moby/integration-cli/fixtures/notary/delgkey4.crt create mode 100644 vendor/github.com/moby/moby/integration-cli/fixtures/notary/delgkey4.key create mode 100755 vendor/github.com/moby/moby/integration-cli/fixtures/notary/gen.sh create mode 100644 vendor/github.com/moby/moby/integration-cli/fixtures/notary/localhost.cert create mode 100644 vendor/github.com/moby/moby/integration-cli/fixtures/notary/localhost.key create mode 100644 vendor/github.com/moby/moby/integration-cli/fixtures/plugin/basic/basic.go create mode 100644 vendor/github.com/moby/moby/integration-cli/fixtures/plugin/plugin.go create mode 100644 vendor/github.com/moby/moby/integration-cli/fixtures/plugin/plugin_linux.go create mode 100644 vendor/github.com/moby/moby/integration-cli/fixtures/plugin/plugin_unsuported.go create mode 100644 vendor/github.com/moby/moby/integration-cli/fixtures/secrets/default create mode 100644 vendor/github.com/moby/moby/integration-cli/fixtures_linux_daemon_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/registry/registry.go create mode 100644 vendor/github.com/moby/moby/integration-cli/registry/registry_mock.go create mode 100644 vendor/github.com/moby/moby/integration-cli/registry/requirement.go create mode 100644 vendor/github.com/moby/moby/integration-cli/request/npipe.go create mode 100644 vendor/github.com/moby/moby/integration-cli/request/npipe_windows.go create mode 100644 vendor/github.com/moby/moby/integration-cli/request/request.go create mode 100644 vendor/github.com/moby/moby/integration-cli/requirement/requirement.go create mode 100644 vendor/github.com/moby/moby/integration-cli/requirements_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/requirements_unix_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/test_vars_exec_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/test_vars_noexec_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/test_vars_noseccomp_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/test_vars_seccomp_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/test_vars_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/test_vars_unix_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/test_vars_windows_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/trust_server_test.go create mode 100644 vendor/github.com/moby/moby/integration-cli/utils_test.go create mode 100644 vendor/github.com/moby/moby/layer/empty.go create mode 100644 vendor/github.com/moby/moby/layer/empty_test.go create mode 100644 vendor/github.com/moby/moby/layer/filestore.go create mode 100644 vendor/github.com/moby/moby/layer/filestore_test.go create mode 100644 vendor/github.com/moby/moby/layer/filestore_unix.go create mode 100644 vendor/github.com/moby/moby/layer/filestore_windows.go create mode 100644 vendor/github.com/moby/moby/layer/layer.go create mode 100644 vendor/github.com/moby/moby/layer/layer_store.go create mode 100644 vendor/github.com/moby/moby/layer/layer_store_windows.go create mode 100644 vendor/github.com/moby/moby/layer/layer_test.go create mode 100644 vendor/github.com/moby/moby/layer/layer_unix.go create mode 100644 vendor/github.com/moby/moby/layer/layer_unix_test.go create mode 100644 vendor/github.com/moby/moby/layer/layer_windows.go create mode 100644 vendor/github.com/moby/moby/layer/migration.go create mode 100644 vendor/github.com/moby/moby/layer/migration_test.go create mode 100644 vendor/github.com/moby/moby/layer/mount_test.go create mode 100644 vendor/github.com/moby/moby/layer/mounted_layer.go create mode 100644 vendor/github.com/moby/moby/layer/ro_layer.go create mode 100644 vendor/github.com/moby/moby/layer/ro_layer_unix.go create mode 100644 vendor/github.com/moby/moby/layer/ro_layer_windows.go create mode 100644 vendor/github.com/moby/moby/libcontainerd/client.go create mode 100644 vendor/github.com/moby/moby/libcontainerd/client_linux.go create mode 100644 vendor/github.com/moby/moby/libcontainerd/client_solaris.go create mode 100644 vendor/github.com/moby/moby/libcontainerd/client_unix.go create mode 100644 vendor/github.com/moby/moby/libcontainerd/client_windows.go create mode 100644 vendor/github.com/moby/moby/libcontainerd/container.go create mode 100644 vendor/github.com/moby/moby/libcontainerd/container_unix.go create mode 100644 vendor/github.com/moby/moby/libcontainerd/container_windows.go create mode 100644 vendor/github.com/moby/moby/libcontainerd/oom_linux.go create mode 100644 vendor/github.com/moby/moby/libcontainerd/oom_solaris.go create mode 100644 vendor/github.com/moby/moby/libcontainerd/pausemonitor_unix.go create mode 100644 vendor/github.com/moby/moby/libcontainerd/process.go create mode 100644 vendor/github.com/moby/moby/libcontainerd/process_unix.go create mode 100644 vendor/github.com/moby/moby/libcontainerd/process_windows.go create mode 100644 vendor/github.com/moby/moby/libcontainerd/queue_unix.go create mode 100644 vendor/github.com/moby/moby/libcontainerd/queue_unix_test.go create mode 100644 vendor/github.com/moby/moby/libcontainerd/remote.go create mode 100644 vendor/github.com/moby/moby/libcontainerd/remote_unix.go create mode 100644 vendor/github.com/moby/moby/libcontainerd/remote_windows.go create mode 100644 vendor/github.com/moby/moby/libcontainerd/types.go create mode 100644 vendor/github.com/moby/moby/libcontainerd/types_linux.go create mode 100644 vendor/github.com/moby/moby/libcontainerd/types_solaris.go create mode 100644 vendor/github.com/moby/moby/libcontainerd/types_windows.go create mode 100644 vendor/github.com/moby/moby/libcontainerd/utils_linux.go create mode 100644 vendor/github.com/moby/moby/libcontainerd/utils_solaris.go create mode 100644 vendor/github.com/moby/moby/libcontainerd/utils_windows.go create mode 100644 vendor/github.com/moby/moby/libcontainerd/utils_windows_test.go create mode 100644 vendor/github.com/moby/moby/migrate/v1/migratev1.go create mode 100644 vendor/github.com/moby/moby/migrate/v1/migratev1_test.go create mode 100644 vendor/github.com/moby/moby/oci/defaults.go create mode 100644 vendor/github.com/moby/moby/oci/devices_linux.go create mode 100644 vendor/github.com/moby/moby/oci/devices_unsupported.go create mode 100644 vendor/github.com/moby/moby/oci/namespaces.go create mode 100644 vendor/github.com/moby/moby/opts/env.go create mode 100644 vendor/github.com/moby/moby/opts/env_test.go create mode 100644 vendor/github.com/moby/moby/opts/hosts.go create mode 100644 vendor/github.com/moby/moby/opts/hosts_test.go create mode 100644 vendor/github.com/moby/moby/opts/hosts_unix.go create mode 100644 vendor/github.com/moby/moby/opts/hosts_windows.go create mode 100644 vendor/github.com/moby/moby/opts/ip.go create mode 100644 vendor/github.com/moby/moby/opts/ip_test.go create mode 100644 vendor/github.com/moby/moby/opts/opts.go create mode 100644 vendor/github.com/moby/moby/opts/opts_test.go create mode 100644 vendor/github.com/moby/moby/opts/opts_unix.go create mode 100644 vendor/github.com/moby/moby/opts/opts_windows.go create mode 100644 vendor/github.com/moby/moby/opts/quotedstring.go create mode 100644 vendor/github.com/moby/moby/opts/quotedstring_test.go create mode 100644 vendor/github.com/moby/moby/opts/runtime.go create mode 100644 vendor/github.com/moby/moby/opts/ulimit.go create mode 100644 vendor/github.com/moby/moby/opts/ulimit_test.go create mode 100644 vendor/github.com/moby/moby/pkg/README.md create mode 100644 vendor/github.com/moby/moby/pkg/aaparser/aaparser.go create mode 100644 vendor/github.com/moby/moby/pkg/aaparser/aaparser_test.go create mode 100644 vendor/github.com/moby/moby/pkg/archive/README.md create mode 100644 vendor/github.com/moby/moby/pkg/archive/archive.go create mode 100644 vendor/github.com/moby/moby/pkg/archive/archive_linux.go create mode 100644 vendor/github.com/moby/moby/pkg/archive/archive_linux_test.go create mode 100644 vendor/github.com/moby/moby/pkg/archive/archive_other.go create mode 100644 vendor/github.com/moby/moby/pkg/archive/archive_test.go create mode 100644 vendor/github.com/moby/moby/pkg/archive/archive_unix.go create mode 100644 vendor/github.com/moby/moby/pkg/archive/archive_unix_test.go create mode 100644 vendor/github.com/moby/moby/pkg/archive/archive_windows.go create mode 100644 vendor/github.com/moby/moby/pkg/archive/archive_windows_test.go create mode 100644 vendor/github.com/moby/moby/pkg/archive/changes.go create mode 100644 vendor/github.com/moby/moby/pkg/archive/changes_linux.go create mode 100644 vendor/github.com/moby/moby/pkg/archive/changes_other.go create mode 100644 vendor/github.com/moby/moby/pkg/archive/changes_posix_test.go create mode 100644 vendor/github.com/moby/moby/pkg/archive/changes_test.go create mode 100644 vendor/github.com/moby/moby/pkg/archive/changes_unix.go create mode 100644 vendor/github.com/moby/moby/pkg/archive/changes_windows.go create mode 100644 vendor/github.com/moby/moby/pkg/archive/copy.go create mode 100644 vendor/github.com/moby/moby/pkg/archive/copy_unix.go create mode 100644 vendor/github.com/moby/moby/pkg/archive/copy_unix_test.go create mode 100644 vendor/github.com/moby/moby/pkg/archive/copy_windows.go create mode 100644 vendor/github.com/moby/moby/pkg/archive/diff.go create mode 100644 vendor/github.com/moby/moby/pkg/archive/diff_test.go create mode 100644 vendor/github.com/moby/moby/pkg/archive/example_changes.go create mode 100644 vendor/github.com/moby/moby/pkg/archive/testdata/broken.tar create mode 100644 vendor/github.com/moby/moby/pkg/archive/time_linux.go create mode 100644 vendor/github.com/moby/moby/pkg/archive/time_unsupported.go create mode 100644 vendor/github.com/moby/moby/pkg/archive/utils_test.go create mode 100644 vendor/github.com/moby/moby/pkg/archive/whiteouts.go create mode 100644 vendor/github.com/moby/moby/pkg/archive/wrap.go create mode 100644 vendor/github.com/moby/moby/pkg/archive/wrap_test.go create mode 100644 vendor/github.com/moby/moby/pkg/authorization/api.go create mode 100644 vendor/github.com/moby/moby/pkg/authorization/api_test.go create mode 100644 vendor/github.com/moby/moby/pkg/authorization/authz.go create mode 100644 vendor/github.com/moby/moby/pkg/authorization/authz_unix_test.go create mode 100644 vendor/github.com/moby/moby/pkg/authorization/middleware.go create mode 100644 vendor/github.com/moby/moby/pkg/authorization/middleware_test.go create mode 100644 vendor/github.com/moby/moby/pkg/authorization/middleware_unix_test.go create mode 100644 vendor/github.com/moby/moby/pkg/authorization/plugin.go create mode 100644 vendor/github.com/moby/moby/pkg/authorization/response.go create mode 100644 vendor/github.com/moby/moby/pkg/broadcaster/unbuffered.go create mode 100644 vendor/github.com/moby/moby/pkg/broadcaster/unbuffered_test.go create mode 100644 vendor/github.com/moby/moby/pkg/chrootarchive/archive.go create mode 100644 vendor/github.com/moby/moby/pkg/chrootarchive/archive_test.go create mode 100644 vendor/github.com/moby/moby/pkg/chrootarchive/archive_unix.go create mode 100644 vendor/github.com/moby/moby/pkg/chrootarchive/archive_windows.go create mode 100644 vendor/github.com/moby/moby/pkg/chrootarchive/chroot_linux.go create mode 100644 vendor/github.com/moby/moby/pkg/chrootarchive/chroot_unix.go create mode 100644 vendor/github.com/moby/moby/pkg/chrootarchive/diff.go create mode 100644 vendor/github.com/moby/moby/pkg/chrootarchive/diff_unix.go create mode 100644 vendor/github.com/moby/moby/pkg/chrootarchive/diff_windows.go create mode 100644 vendor/github.com/moby/moby/pkg/chrootarchive/init_unix.go create mode 100644 vendor/github.com/moby/moby/pkg/chrootarchive/init_windows.go create mode 100644 vendor/github.com/moby/moby/pkg/devicemapper/devmapper.go create mode 100644 vendor/github.com/moby/moby/pkg/devicemapper/devmapper_log.go create mode 100644 vendor/github.com/moby/moby/pkg/devicemapper/devmapper_wrapper.go create mode 100644 vendor/github.com/moby/moby/pkg/devicemapper/devmapper_wrapper_deferred_remove.go create mode 100644 vendor/github.com/moby/moby/pkg/devicemapper/devmapper_wrapper_no_deferred_remove.go create mode 100644 vendor/github.com/moby/moby/pkg/devicemapper/ioctl.go create mode 100644 vendor/github.com/moby/moby/pkg/devicemapper/log.go create mode 100644 vendor/github.com/moby/moby/pkg/directory/directory.go create mode 100644 vendor/github.com/moby/moby/pkg/directory/directory_test.go create mode 100644 vendor/github.com/moby/moby/pkg/directory/directory_unix.go create mode 100644 vendor/github.com/moby/moby/pkg/directory/directory_windows.go create mode 100644 vendor/github.com/moby/moby/pkg/discovery/README.md create mode 100644 vendor/github.com/moby/moby/pkg/discovery/backends.go create mode 100644 vendor/github.com/moby/moby/pkg/discovery/discovery.go create mode 100644 vendor/github.com/moby/moby/pkg/discovery/discovery_test.go create mode 100644 vendor/github.com/moby/moby/pkg/discovery/entry.go create mode 100644 vendor/github.com/moby/moby/pkg/discovery/file/file.go create mode 100644 vendor/github.com/moby/moby/pkg/discovery/file/file_test.go create mode 100644 vendor/github.com/moby/moby/pkg/discovery/generator.go create mode 100644 vendor/github.com/moby/moby/pkg/discovery/generator_test.go create mode 100644 vendor/github.com/moby/moby/pkg/discovery/kv/kv.go create mode 100644 vendor/github.com/moby/moby/pkg/discovery/kv/kv_test.go create mode 100644 vendor/github.com/moby/moby/pkg/discovery/memory/memory.go create mode 100644 vendor/github.com/moby/moby/pkg/discovery/memory/memory_test.go create mode 100644 vendor/github.com/moby/moby/pkg/discovery/nodes/nodes.go create mode 100644 vendor/github.com/moby/moby/pkg/discovery/nodes/nodes_test.go create mode 100644 vendor/github.com/moby/moby/pkg/filenotify/filenotify.go create mode 100644 vendor/github.com/moby/moby/pkg/filenotify/fsnotify.go create mode 100644 vendor/github.com/moby/moby/pkg/filenotify/poller.go create mode 100644 vendor/github.com/moby/moby/pkg/filenotify/poller_test.go create mode 100644 vendor/github.com/moby/moby/pkg/fileutils/fileutils.go create mode 100644 vendor/github.com/moby/moby/pkg/fileutils/fileutils_darwin.go create mode 100644 vendor/github.com/moby/moby/pkg/fileutils/fileutils_solaris.go create mode 100644 vendor/github.com/moby/moby/pkg/fileutils/fileutils_test.go create mode 100644 vendor/github.com/moby/moby/pkg/fileutils/fileutils_unix.go create mode 100644 vendor/github.com/moby/moby/pkg/fileutils/fileutils_windows.go create mode 100644 vendor/github.com/moby/moby/pkg/fsutils/fsutils_linux.go create mode 100644 vendor/github.com/moby/moby/pkg/fsutils/fsutils_linux_test.go create mode 100644 vendor/github.com/moby/moby/pkg/homedir/homedir_linux.go create mode 100644 vendor/github.com/moby/moby/pkg/homedir/homedir_others.go create mode 100644 vendor/github.com/moby/moby/pkg/homedir/homedir_test.go create mode 100644 vendor/github.com/moby/moby/pkg/homedir/homedir_unix.go create mode 100644 vendor/github.com/moby/moby/pkg/homedir/homedir_windows.go create mode 100644 vendor/github.com/moby/moby/pkg/idtools/idtools.go create mode 100644 vendor/github.com/moby/moby/pkg/idtools/idtools_unix.go create mode 100644 vendor/github.com/moby/moby/pkg/idtools/idtools_unix_test.go create mode 100644 vendor/github.com/moby/moby/pkg/idtools/idtools_windows.go create mode 100644 vendor/github.com/moby/moby/pkg/idtools/usergroupadd_linux.go create mode 100644 vendor/github.com/moby/moby/pkg/idtools/usergroupadd_unsupported.go create mode 100644 vendor/github.com/moby/moby/pkg/idtools/utils_unix.go create mode 100644 vendor/github.com/moby/moby/pkg/ioutils/buffer.go create mode 100644 vendor/github.com/moby/moby/pkg/ioutils/buffer_test.go create mode 100644 vendor/github.com/moby/moby/pkg/ioutils/bytespipe.go create mode 100644 vendor/github.com/moby/moby/pkg/ioutils/bytespipe_test.go create mode 100644 vendor/github.com/moby/moby/pkg/ioutils/fswriters.go create mode 100644 vendor/github.com/moby/moby/pkg/ioutils/fswriters_test.go create mode 100644 vendor/github.com/moby/moby/pkg/ioutils/readers.go create mode 100644 vendor/github.com/moby/moby/pkg/ioutils/readers_test.go create mode 100644 vendor/github.com/moby/moby/pkg/ioutils/temp_unix.go create mode 100644 vendor/github.com/moby/moby/pkg/ioutils/temp_windows.go create mode 100644 vendor/github.com/moby/moby/pkg/ioutils/writeflusher.go create mode 100644 vendor/github.com/moby/moby/pkg/ioutils/writers.go create mode 100644 vendor/github.com/moby/moby/pkg/ioutils/writers_test.go create mode 100644 vendor/github.com/moby/moby/pkg/jsonlog/jsonlog.go create mode 100644 vendor/github.com/moby/moby/pkg/jsonlog/jsonlog_marshalling.go create mode 100644 vendor/github.com/moby/moby/pkg/jsonlog/jsonlog_marshalling_test.go create mode 100644 vendor/github.com/moby/moby/pkg/jsonlog/jsonlogbytes.go create mode 100644 vendor/github.com/moby/moby/pkg/jsonlog/jsonlogbytes_test.go create mode 100644 vendor/github.com/moby/moby/pkg/jsonlog/time_marshalling.go create mode 100644 vendor/github.com/moby/moby/pkg/jsonlog/time_marshalling_test.go create mode 100644 vendor/github.com/moby/moby/pkg/jsonmessage/jsonmessage.go create mode 100644 vendor/github.com/moby/moby/pkg/jsonmessage/jsonmessage_test.go create mode 100644 vendor/github.com/moby/moby/pkg/listeners/group_unix.go create mode 100644 vendor/github.com/moby/moby/pkg/listeners/listeners_solaris.go create mode 100644 vendor/github.com/moby/moby/pkg/listeners/listeners_unix.go create mode 100644 vendor/github.com/moby/moby/pkg/listeners/listeners_windows.go create mode 100644 vendor/github.com/moby/moby/pkg/locker/README.md create mode 100644 vendor/github.com/moby/moby/pkg/locker/locker.go create mode 100644 vendor/github.com/moby/moby/pkg/locker/locker_test.go create mode 100644 vendor/github.com/moby/moby/pkg/longpath/longpath.go create mode 100644 vendor/github.com/moby/moby/pkg/longpath/longpath_test.go create mode 100644 vendor/github.com/moby/moby/pkg/loopback/attach_loopback.go create mode 100644 vendor/github.com/moby/moby/pkg/loopback/ioctl.go create mode 100644 vendor/github.com/moby/moby/pkg/loopback/loop_wrapper.go create mode 100644 vendor/github.com/moby/moby/pkg/loopback/loopback.go create mode 100644 vendor/github.com/moby/moby/pkg/mount/flags.go create mode 100644 vendor/github.com/moby/moby/pkg/mount/flags_freebsd.go create mode 100644 vendor/github.com/moby/moby/pkg/mount/flags_linux.go create mode 100644 vendor/github.com/moby/moby/pkg/mount/flags_unsupported.go create mode 100644 vendor/github.com/moby/moby/pkg/mount/mount.go create mode 100644 vendor/github.com/moby/moby/pkg/mount/mount_unix_test.go create mode 100644 vendor/github.com/moby/moby/pkg/mount/mounter_freebsd.go create mode 100644 vendor/github.com/moby/moby/pkg/mount/mounter_linux.go create mode 100644 vendor/github.com/moby/moby/pkg/mount/mounter_linux_test.go create mode 100644 vendor/github.com/moby/moby/pkg/mount/mounter_solaris.go create mode 100644 vendor/github.com/moby/moby/pkg/mount/mounter_unsupported.go create mode 100644 vendor/github.com/moby/moby/pkg/mount/mountinfo.go create mode 100644 vendor/github.com/moby/moby/pkg/mount/mountinfo_freebsd.go create mode 100644 vendor/github.com/moby/moby/pkg/mount/mountinfo_linux.go create mode 100644 vendor/github.com/moby/moby/pkg/mount/mountinfo_linux_test.go create mode 100644 vendor/github.com/moby/moby/pkg/mount/mountinfo_solaris.go create mode 100644 vendor/github.com/moby/moby/pkg/mount/mountinfo_unsupported.go create mode 100644 vendor/github.com/moby/moby/pkg/mount/mountinfo_windows.go create mode 100644 vendor/github.com/moby/moby/pkg/mount/sharedsubtree_linux.go create mode 100644 vendor/github.com/moby/moby/pkg/mount/sharedsubtree_linux_test.go create mode 100644 vendor/github.com/moby/moby/pkg/mount/sharedsubtree_solaris.go create mode 100644 vendor/github.com/moby/moby/pkg/namesgenerator/cmd/names-generator/main.go create mode 100644 vendor/github.com/moby/moby/pkg/namesgenerator/names-generator.go create mode 100644 vendor/github.com/moby/moby/pkg/namesgenerator/names-generator_test.go create mode 100644 vendor/github.com/moby/moby/pkg/parsers/kernel/kernel.go create mode 100644 vendor/github.com/moby/moby/pkg/parsers/kernel/kernel_darwin.go create mode 100644 vendor/github.com/moby/moby/pkg/parsers/kernel/kernel_unix.go create mode 100644 vendor/github.com/moby/moby/pkg/parsers/kernel/kernel_unix_test.go create mode 100644 vendor/github.com/moby/moby/pkg/parsers/kernel/kernel_windows.go create mode 100644 vendor/github.com/moby/moby/pkg/parsers/kernel/uname_linux.go create mode 100644 vendor/github.com/moby/moby/pkg/parsers/kernel/uname_solaris.go create mode 100644 vendor/github.com/moby/moby/pkg/parsers/kernel/uname_unsupported.go create mode 100644 vendor/github.com/moby/moby/pkg/parsers/operatingsystem/operatingsystem_linux.go create mode 100644 vendor/github.com/moby/moby/pkg/parsers/operatingsystem/operatingsystem_solaris.go create mode 100644 vendor/github.com/moby/moby/pkg/parsers/operatingsystem/operatingsystem_unix.go create mode 100644 vendor/github.com/moby/moby/pkg/parsers/operatingsystem/operatingsystem_unix_test.go create mode 100644 vendor/github.com/moby/moby/pkg/parsers/operatingsystem/operatingsystem_windows.go create mode 100644 vendor/github.com/moby/moby/pkg/parsers/parsers.go create mode 100644 vendor/github.com/moby/moby/pkg/parsers/parsers_test.go create mode 100644 vendor/github.com/moby/moby/pkg/pidfile/pidfile.go create mode 100644 vendor/github.com/moby/moby/pkg/pidfile/pidfile_darwin.go create mode 100644 vendor/github.com/moby/moby/pkg/pidfile/pidfile_test.go create mode 100644 vendor/github.com/moby/moby/pkg/pidfile/pidfile_unix.go create mode 100644 vendor/github.com/moby/moby/pkg/pidfile/pidfile_windows.go create mode 100644 vendor/github.com/moby/moby/pkg/platform/architecture_linux.go create mode 100644 vendor/github.com/moby/moby/pkg/platform/architecture_unix.go create mode 100644 vendor/github.com/moby/moby/pkg/platform/architecture_windows.go create mode 100644 vendor/github.com/moby/moby/pkg/platform/platform.go create mode 100644 vendor/github.com/moby/moby/pkg/platform/utsname_int8.go create mode 100644 vendor/github.com/moby/moby/pkg/platform/utsname_int8_test.go create mode 100644 vendor/github.com/moby/moby/pkg/platform/utsname_uint8.go create mode 100644 vendor/github.com/moby/moby/pkg/platform/utsname_uint8_test.go create mode 100644 vendor/github.com/moby/moby/pkg/plugingetter/getter.go create mode 100644 vendor/github.com/moby/moby/pkg/plugins/client.go create mode 100644 vendor/github.com/moby/moby/pkg/plugins/client_test.go create mode 100644 vendor/github.com/moby/moby/pkg/plugins/discovery.go create mode 100644 vendor/github.com/moby/moby/pkg/plugins/discovery_test.go create mode 100644 vendor/github.com/moby/moby/pkg/plugins/discovery_unix.go create mode 100644 vendor/github.com/moby/moby/pkg/plugins/discovery_unix_test.go create mode 100644 vendor/github.com/moby/moby/pkg/plugins/discovery_windows.go create mode 100644 vendor/github.com/moby/moby/pkg/plugins/errors.go create mode 100644 vendor/github.com/moby/moby/pkg/plugins/plugin_test.go create mode 100644 vendor/github.com/moby/moby/pkg/plugins/pluginrpc-gen/README.md create mode 100644 vendor/github.com/moby/moby/pkg/plugins/pluginrpc-gen/fixtures/foo.go create mode 100644 vendor/github.com/moby/moby/pkg/plugins/pluginrpc-gen/fixtures/otherfixture/spaceship.go create mode 100644 vendor/github.com/moby/moby/pkg/plugins/pluginrpc-gen/main.go create mode 100644 vendor/github.com/moby/moby/pkg/plugins/pluginrpc-gen/parser.go create mode 100644 vendor/github.com/moby/moby/pkg/plugins/pluginrpc-gen/parser_test.go create mode 100644 vendor/github.com/moby/moby/pkg/plugins/pluginrpc-gen/template.go create mode 100644 vendor/github.com/moby/moby/pkg/plugins/plugins.go create mode 100644 vendor/github.com/moby/moby/pkg/plugins/plugins_unix.go create mode 100644 vendor/github.com/moby/moby/pkg/plugins/plugins_windows.go create mode 100644 vendor/github.com/moby/moby/pkg/plugins/transport/http.go create mode 100644 vendor/github.com/moby/moby/pkg/plugins/transport/http_test.go create mode 100644 vendor/github.com/moby/moby/pkg/plugins/transport/transport.go create mode 100644 vendor/github.com/moby/moby/pkg/pools/pools.go create mode 100644 vendor/github.com/moby/moby/pkg/pools/pools_test.go create mode 100644 vendor/github.com/moby/moby/pkg/progress/progress.go create mode 100644 vendor/github.com/moby/moby/pkg/progress/progressreader.go create mode 100644 vendor/github.com/moby/moby/pkg/progress/progressreader_test.go create mode 100644 vendor/github.com/moby/moby/pkg/promise/promise.go create mode 100644 vendor/github.com/moby/moby/pkg/promise/promise_test.go create mode 100644 vendor/github.com/moby/moby/pkg/pubsub/publisher.go create mode 100644 vendor/github.com/moby/moby/pkg/pubsub/publisher_test.go create mode 100644 vendor/github.com/moby/moby/pkg/reexec/README.md create mode 100644 vendor/github.com/moby/moby/pkg/reexec/command_linux.go create mode 100644 vendor/github.com/moby/moby/pkg/reexec/command_unix.go create mode 100644 vendor/github.com/moby/moby/pkg/reexec/command_unsupported.go create mode 100644 vendor/github.com/moby/moby/pkg/reexec/command_windows.go create mode 100644 vendor/github.com/moby/moby/pkg/reexec/reexec.go create mode 100644 vendor/github.com/moby/moby/pkg/reexec/reexec_test.go create mode 100644 vendor/github.com/moby/moby/pkg/signal/README.md create mode 100644 vendor/github.com/moby/moby/pkg/signal/signal.go create mode 100644 vendor/github.com/moby/moby/pkg/signal/signal_darwin.go create mode 100644 vendor/github.com/moby/moby/pkg/signal/signal_freebsd.go create mode 100644 vendor/github.com/moby/moby/pkg/signal/signal_linux.go create mode 100644 vendor/github.com/moby/moby/pkg/signal/signal_linux_test.go create mode 100644 vendor/github.com/moby/moby/pkg/signal/signal_solaris.go create mode 100644 vendor/github.com/moby/moby/pkg/signal/signal_test.go create mode 100644 vendor/github.com/moby/moby/pkg/signal/signal_unix.go create mode 100644 vendor/github.com/moby/moby/pkg/signal/signal_unsupported.go create mode 100644 vendor/github.com/moby/moby/pkg/signal/signal_windows.go create mode 100644 vendor/github.com/moby/moby/pkg/signal/trap.go create mode 100644 vendor/github.com/moby/moby/pkg/stdcopy/stdcopy.go create mode 100644 vendor/github.com/moby/moby/pkg/stdcopy/stdcopy_test.go create mode 100644 vendor/github.com/moby/moby/pkg/streamformatter/streamformatter.go create mode 100644 vendor/github.com/moby/moby/pkg/streamformatter/streamformatter_test.go create mode 100644 vendor/github.com/moby/moby/pkg/streamformatter/streamwriter.go create mode 100644 vendor/github.com/moby/moby/pkg/streamformatter/streamwriter_test.go create mode 100644 vendor/github.com/moby/moby/pkg/stringid/README.md create mode 100644 vendor/github.com/moby/moby/pkg/stringid/stringid.go create mode 100644 vendor/github.com/moby/moby/pkg/stringid/stringid_test.go create mode 100644 vendor/github.com/moby/moby/pkg/stringutils/README.md create mode 100644 vendor/github.com/moby/moby/pkg/stringutils/stringutils.go create mode 100644 vendor/github.com/moby/moby/pkg/stringutils/stringutils_test.go create mode 100644 vendor/github.com/moby/moby/pkg/symlink/LICENSE.APACHE create mode 100644 vendor/github.com/moby/moby/pkg/symlink/LICENSE.BSD create mode 100644 vendor/github.com/moby/moby/pkg/symlink/README.md create mode 100644 vendor/github.com/moby/moby/pkg/symlink/fs.go create mode 100644 vendor/github.com/moby/moby/pkg/symlink/fs_unix.go create mode 100644 vendor/github.com/moby/moby/pkg/symlink/fs_unix_test.go create mode 100644 vendor/github.com/moby/moby/pkg/symlink/fs_windows.go create mode 100644 vendor/github.com/moby/moby/pkg/sysinfo/README.md create mode 100644 vendor/github.com/moby/moby/pkg/sysinfo/numcpu.go create mode 100644 vendor/github.com/moby/moby/pkg/sysinfo/numcpu_linux.go create mode 100644 vendor/github.com/moby/moby/pkg/sysinfo/numcpu_windows.go create mode 100644 vendor/github.com/moby/moby/pkg/sysinfo/sysinfo.go create mode 100644 vendor/github.com/moby/moby/pkg/sysinfo/sysinfo_linux.go create mode 100644 vendor/github.com/moby/moby/pkg/sysinfo/sysinfo_linux_test.go create mode 100644 vendor/github.com/moby/moby/pkg/sysinfo/sysinfo_solaris.go create mode 100644 vendor/github.com/moby/moby/pkg/sysinfo/sysinfo_test.go create mode 100644 vendor/github.com/moby/moby/pkg/sysinfo/sysinfo_unix.go create mode 100644 vendor/github.com/moby/moby/pkg/sysinfo/sysinfo_windows.go create mode 100644 vendor/github.com/moby/moby/pkg/system/chtimes.go create mode 100644 vendor/github.com/moby/moby/pkg/system/chtimes_test.go create mode 100644 vendor/github.com/moby/moby/pkg/system/chtimes_unix.go create mode 100644 vendor/github.com/moby/moby/pkg/system/chtimes_unix_test.go create mode 100644 vendor/github.com/moby/moby/pkg/system/chtimes_windows.go create mode 100644 vendor/github.com/moby/moby/pkg/system/chtimes_windows_test.go create mode 100644 vendor/github.com/moby/moby/pkg/system/errors.go create mode 100644 vendor/github.com/moby/moby/pkg/system/events_windows.go create mode 100644 vendor/github.com/moby/moby/pkg/system/exitcode.go create mode 100644 vendor/github.com/moby/moby/pkg/system/filesys.go create mode 100644 vendor/github.com/moby/moby/pkg/system/filesys_windows.go create mode 100644 vendor/github.com/moby/moby/pkg/system/init.go create mode 100644 vendor/github.com/moby/moby/pkg/system/init_windows.go create mode 100644 vendor/github.com/moby/moby/pkg/system/lcow_unix.go create mode 100644 vendor/github.com/moby/moby/pkg/system/lcow_windows.go create mode 100644 vendor/github.com/moby/moby/pkg/system/lstat_unix.go create mode 100644 vendor/github.com/moby/moby/pkg/system/lstat_unix_test.go create mode 100644 vendor/github.com/moby/moby/pkg/system/lstat_windows.go create mode 100644 vendor/github.com/moby/moby/pkg/system/meminfo.go create mode 100644 vendor/github.com/moby/moby/pkg/system/meminfo_linux.go create mode 100644 vendor/github.com/moby/moby/pkg/system/meminfo_solaris.go create mode 100644 vendor/github.com/moby/moby/pkg/system/meminfo_unix_test.go create mode 100644 vendor/github.com/moby/moby/pkg/system/meminfo_unsupported.go create mode 100644 vendor/github.com/moby/moby/pkg/system/meminfo_windows.go create mode 100644 vendor/github.com/moby/moby/pkg/system/mknod.go create mode 100644 vendor/github.com/moby/moby/pkg/system/mknod_windows.go create mode 100644 vendor/github.com/moby/moby/pkg/system/path.go create mode 100644 vendor/github.com/moby/moby/pkg/system/path_unix.go create mode 100644 vendor/github.com/moby/moby/pkg/system/path_windows.go create mode 100644 vendor/github.com/moby/moby/pkg/system/path_windows_test.go create mode 100644 vendor/github.com/moby/moby/pkg/system/process_unix.go create mode 100644 vendor/github.com/moby/moby/pkg/system/rm.go create mode 100644 vendor/github.com/moby/moby/pkg/system/rm_test.go create mode 100644 vendor/github.com/moby/moby/pkg/system/stat_darwin.go create mode 100644 vendor/github.com/moby/moby/pkg/system/stat_freebsd.go create mode 100644 vendor/github.com/moby/moby/pkg/system/stat_linux.go create mode 100644 vendor/github.com/moby/moby/pkg/system/stat_openbsd.go create mode 100644 vendor/github.com/moby/moby/pkg/system/stat_solaris.go create mode 100644 vendor/github.com/moby/moby/pkg/system/stat_unix.go create mode 100644 vendor/github.com/moby/moby/pkg/system/stat_unix_test.go create mode 100644 vendor/github.com/moby/moby/pkg/system/stat_windows.go create mode 100644 vendor/github.com/moby/moby/pkg/system/syscall_unix.go create mode 100644 vendor/github.com/moby/moby/pkg/system/syscall_windows.go create mode 100644 vendor/github.com/moby/moby/pkg/system/syscall_windows_test.go create mode 100644 vendor/github.com/moby/moby/pkg/system/umask.go create mode 100644 vendor/github.com/moby/moby/pkg/system/umask_windows.go create mode 100644 vendor/github.com/moby/moby/pkg/system/utimes_freebsd.go create mode 100644 vendor/github.com/moby/moby/pkg/system/utimes_linux.go create mode 100644 vendor/github.com/moby/moby/pkg/system/utimes_unix_test.go create mode 100644 vendor/github.com/moby/moby/pkg/system/utimes_unsupported.go create mode 100644 vendor/github.com/moby/moby/pkg/system/xattrs_linux.go create mode 100644 vendor/github.com/moby/moby/pkg/system/xattrs_unsupported.go create mode 100644 vendor/github.com/moby/moby/pkg/tailfile/tailfile.go create mode 100644 vendor/github.com/moby/moby/pkg/tailfile/tailfile_test.go create mode 100644 vendor/github.com/moby/moby/pkg/tarsum/builder_context.go create mode 100644 vendor/github.com/moby/moby/pkg/tarsum/builder_context_test.go create mode 100644 vendor/github.com/moby/moby/pkg/tarsum/fileinfosums.go create mode 100644 vendor/github.com/moby/moby/pkg/tarsum/fileinfosums_test.go create mode 100644 vendor/github.com/moby/moby/pkg/tarsum/tarsum.go create mode 100644 vendor/github.com/moby/moby/pkg/tarsum/tarsum_spec.md create mode 100644 vendor/github.com/moby/moby/pkg/tarsum/tarsum_test.go create mode 100644 vendor/github.com/moby/moby/pkg/tarsum/testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/json create mode 100644 vendor/github.com/moby/moby/pkg/tarsum/testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar create mode 100644 vendor/github.com/moby/moby/pkg/tarsum/testdata/511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158/json create mode 100644 vendor/github.com/moby/moby/pkg/tarsum/testdata/511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158/layer.tar create mode 100644 vendor/github.com/moby/moby/pkg/tarsum/testdata/collision/collision-0.tar create mode 100644 vendor/github.com/moby/moby/pkg/tarsum/testdata/collision/collision-1.tar create mode 100644 vendor/github.com/moby/moby/pkg/tarsum/testdata/collision/collision-2.tar create mode 100644 vendor/github.com/moby/moby/pkg/tarsum/testdata/collision/collision-3.tar create mode 100644 vendor/github.com/moby/moby/pkg/tarsum/testdata/xattr/json create mode 100644 vendor/github.com/moby/moby/pkg/tarsum/testdata/xattr/layer.tar create mode 100644 vendor/github.com/moby/moby/pkg/tarsum/versioning.go create mode 100644 vendor/github.com/moby/moby/pkg/tarsum/versioning_test.go create mode 100644 vendor/github.com/moby/moby/pkg/tarsum/writercloser.go create mode 100644 vendor/github.com/moby/moby/pkg/templates/templates.go create mode 100644 vendor/github.com/moby/moby/pkg/templates/templates_test.go create mode 100644 vendor/github.com/moby/moby/pkg/term/ascii.go create mode 100644 vendor/github.com/moby/moby/pkg/term/ascii_test.go create mode 100644 vendor/github.com/moby/moby/pkg/term/proxy.go create mode 100644 vendor/github.com/moby/moby/pkg/term/proxy_test.go create mode 100644 vendor/github.com/moby/moby/pkg/term/tc.go create mode 100644 vendor/github.com/moby/moby/pkg/term/tc_solaris_cgo.go create mode 100644 vendor/github.com/moby/moby/pkg/term/term.go create mode 100644 vendor/github.com/moby/moby/pkg/term/term_linux_test.go create mode 100644 vendor/github.com/moby/moby/pkg/term/term_windows.go create mode 100644 vendor/github.com/moby/moby/pkg/term/termios_bsd.go create mode 100644 vendor/github.com/moby/moby/pkg/term/termios_linux.go create mode 100644 vendor/github.com/moby/moby/pkg/term/windows/ansi_reader.go create mode 100644 vendor/github.com/moby/moby/pkg/term/windows/ansi_writer.go create mode 100644 vendor/github.com/moby/moby/pkg/term/windows/console.go create mode 100644 vendor/github.com/moby/moby/pkg/term/windows/windows.go create mode 100644 vendor/github.com/moby/moby/pkg/term/windows/windows_test.go create mode 100644 vendor/github.com/moby/moby/pkg/term/winsize.go create mode 100644 vendor/github.com/moby/moby/pkg/term/winsize_solaris_cgo.go create mode 100644 vendor/github.com/moby/moby/pkg/testutil/cmd/command.go create mode 100644 vendor/github.com/moby/moby/pkg/testutil/cmd/command_test.go create mode 100644 vendor/github.com/moby/moby/pkg/testutil/golden/golden.go create mode 100644 vendor/github.com/moby/moby/pkg/testutil/helpers.go create mode 100644 vendor/github.com/moby/moby/pkg/testutil/pkg.go create mode 100644 vendor/github.com/moby/moby/pkg/testutil/tempfile/tempfile.go create mode 100644 vendor/github.com/moby/moby/pkg/testutil/utils.go create mode 100644 vendor/github.com/moby/moby/pkg/testutil/utils_test.go create mode 100644 vendor/github.com/moby/moby/pkg/tlsconfig/tlsconfig_clone.go create mode 100644 vendor/github.com/moby/moby/pkg/tlsconfig/tlsconfig_clone_go17.go create mode 100644 vendor/github.com/moby/moby/pkg/truncindex/truncindex.go create mode 100644 vendor/github.com/moby/moby/pkg/truncindex/truncindex_test.go create mode 100644 vendor/github.com/moby/moby/pkg/urlutil/urlutil.go create mode 100644 vendor/github.com/moby/moby/pkg/urlutil/urlutil_test.go create mode 100644 vendor/github.com/moby/moby/pkg/useragent/README.md create mode 100644 vendor/github.com/moby/moby/pkg/useragent/useragent.go create mode 100644 vendor/github.com/moby/moby/pkg/useragent/useragent_test.go create mode 100644 vendor/github.com/moby/moby/plugin/backend_linux.go create mode 100644 vendor/github.com/moby/moby/plugin/backend_unsupported.go create mode 100644 vendor/github.com/moby/moby/plugin/blobstore.go create mode 100644 vendor/github.com/moby/moby/plugin/defs.go create mode 100644 vendor/github.com/moby/moby/plugin/events.go create mode 100644 vendor/github.com/moby/moby/plugin/manager.go create mode 100644 vendor/github.com/moby/moby/plugin/manager_linux.go create mode 100644 vendor/github.com/moby/moby/plugin/manager_solaris.go create mode 100644 vendor/github.com/moby/moby/plugin/manager_test.go create mode 100644 vendor/github.com/moby/moby/plugin/manager_windows.go create mode 100644 vendor/github.com/moby/moby/plugin/store.go create mode 100644 vendor/github.com/moby/moby/plugin/store_test.go create mode 100644 vendor/github.com/moby/moby/plugin/v2/plugin.go create mode 100644 vendor/github.com/moby/moby/plugin/v2/plugin_linux.go create mode 100644 vendor/github.com/moby/moby/plugin/v2/plugin_unsupported.go create mode 100644 vendor/github.com/moby/moby/plugin/v2/settable.go create mode 100644 vendor/github.com/moby/moby/plugin/v2/settable_test.go create mode 100644 vendor/github.com/moby/moby/poule.yml create mode 100644 vendor/github.com/moby/moby/profiles/apparmor/apparmor.go create mode 100644 vendor/github.com/moby/moby/profiles/apparmor/template.go create mode 100755 vendor/github.com/moby/moby/profiles/seccomp/default.json create mode 100755 vendor/github.com/moby/moby/profiles/seccomp/fixtures/example.json create mode 100644 vendor/github.com/moby/moby/profiles/seccomp/generate.go create mode 100644 vendor/github.com/moby/moby/profiles/seccomp/seccomp.go create mode 100644 vendor/github.com/moby/moby/profiles/seccomp/seccomp_default.go create mode 100644 vendor/github.com/moby/moby/profiles/seccomp/seccomp_test.go create mode 100644 vendor/github.com/moby/moby/profiles/seccomp/seccomp_unsupported.go create mode 100644 vendor/github.com/moby/moby/project/ARM.md create mode 100644 vendor/github.com/moby/moby/project/BRANCHES-AND-TAGS.md create mode 120000 vendor/github.com/moby/moby/project/CONTRIBUTING.md create mode 100644 vendor/github.com/moby/moby/project/GOVERNANCE.md create mode 100644 vendor/github.com/moby/moby/project/IRC-ADMINISTRATION.md create mode 100644 vendor/github.com/moby/moby/project/ISSUE-TRIAGE.md create mode 100644 vendor/github.com/moby/moby/project/PACKAGE-REPO-MAINTENANCE.md create mode 100644 vendor/github.com/moby/moby/project/PACKAGERS.md create mode 100644 vendor/github.com/moby/moby/project/PATCH-RELEASES.md create mode 100644 vendor/github.com/moby/moby/project/PRINCIPLES.md create mode 100644 vendor/github.com/moby/moby/project/README.md create mode 100644 vendor/github.com/moby/moby/project/RELEASE-CHECKLIST.md create mode 100644 vendor/github.com/moby/moby/project/RELEASE-PROCESS.md create mode 100644 vendor/github.com/moby/moby/project/REVIEWING.md create mode 100644 vendor/github.com/moby/moby/project/TOOLS.md create mode 100644 vendor/github.com/moby/moby/reference/store.go create mode 100644 vendor/github.com/moby/moby/reference/store_test.go create mode 100644 vendor/github.com/moby/moby/registry/auth.go create mode 100644 vendor/github.com/moby/moby/registry/auth_test.go create mode 100644 vendor/github.com/moby/moby/registry/config.go create mode 100644 vendor/github.com/moby/moby/registry/config_test.go create mode 100644 vendor/github.com/moby/moby/registry/config_unix.go create mode 100644 vendor/github.com/moby/moby/registry/config_windows.go create mode 100644 vendor/github.com/moby/moby/registry/endpoint_test.go create mode 100644 vendor/github.com/moby/moby/registry/endpoint_v1.go create mode 100644 vendor/github.com/moby/moby/registry/registry.go create mode 100644 vendor/github.com/moby/moby/registry/registry_mock_test.go create mode 100644 vendor/github.com/moby/moby/registry/registry_test.go create mode 100644 vendor/github.com/moby/moby/registry/resumable/resumablerequestreader.go create mode 100644 vendor/github.com/moby/moby/registry/resumable/resumablerequestreader_test.go create mode 100644 vendor/github.com/moby/moby/registry/service.go create mode 100644 vendor/github.com/moby/moby/registry/service_v1.go create mode 100644 vendor/github.com/moby/moby/registry/service_v1_test.go create mode 100644 vendor/github.com/moby/moby/registry/service_v2.go create mode 100644 vendor/github.com/moby/moby/registry/session.go create mode 100644 vendor/github.com/moby/moby/registry/types.go create mode 100644 vendor/github.com/moby/moby/reports/2017-05-01.md create mode 100644 vendor/github.com/moby/moby/reports/2017-05-08.md create mode 100644 vendor/github.com/moby/moby/reports/2017-05-15.md create mode 100644 vendor/github.com/moby/moby/reports/2017-06-05.md create mode 100644 vendor/github.com/moby/moby/reports/2017-06-12.md create mode 100644 vendor/github.com/moby/moby/reports/2017-06-26.md create mode 100644 vendor/github.com/moby/moby/reports/builder/2017-05-01.md create mode 100644 vendor/github.com/moby/moby/reports/builder/2017-05-08.md create mode 100644 vendor/github.com/moby/moby/reports/builder/2017-05-15.md create mode 100644 vendor/github.com/moby/moby/reports/builder/2017-05-22.md create mode 100644 vendor/github.com/moby/moby/reports/builder/2017-05-29.md create mode 100644 vendor/github.com/moby/moby/reports/builder/2017-06-05.md create mode 100644 vendor/github.com/moby/moby/reports/builder/2017-06-12.md create mode 100644 vendor/github.com/moby/moby/reports/builder/2017-06-26.md create mode 100644 vendor/github.com/moby/moby/reports/builder/2017-07-10.md create mode 100644 vendor/github.com/moby/moby/restartmanager/restartmanager.go create mode 100644 vendor/github.com/moby/moby/restartmanager/restartmanager_test.go create mode 100644 vendor/github.com/moby/moby/runconfig/config.go create mode 100644 vendor/github.com/moby/moby/runconfig/config_test.go create mode 100644 vendor/github.com/moby/moby/runconfig/config_unix.go create mode 100644 vendor/github.com/moby/moby/runconfig/config_windows.go create mode 100644 vendor/github.com/moby/moby/runconfig/errors.go create mode 100644 vendor/github.com/moby/moby/runconfig/fixtures/unix/container_config_1_14.json create mode 100644 vendor/github.com/moby/moby/runconfig/fixtures/unix/container_config_1_17.json create mode 100644 vendor/github.com/moby/moby/runconfig/fixtures/unix/container_config_1_19.json create mode 100644 vendor/github.com/moby/moby/runconfig/fixtures/unix/container_hostconfig_1_14.json create mode 100644 vendor/github.com/moby/moby/runconfig/fixtures/unix/container_hostconfig_1_19.json create mode 100644 vendor/github.com/moby/moby/runconfig/fixtures/windows/container_config_1_19.json create mode 100644 vendor/github.com/moby/moby/runconfig/hostconfig.go create mode 100644 vendor/github.com/moby/moby/runconfig/hostconfig_solaris.go create mode 100644 vendor/github.com/moby/moby/runconfig/hostconfig_test.go create mode 100644 vendor/github.com/moby/moby/runconfig/hostconfig_unix.go create mode 100644 vendor/github.com/moby/moby/runconfig/hostconfig_windows.go create mode 100644 vendor/github.com/moby/moby/runconfig/hostconfig_windows_test.go create mode 100644 vendor/github.com/moby/moby/runconfig/opts/parse.go create mode 100644 vendor/github.com/moby/moby/vendor.conf create mode 100644 vendor/github.com/moby/moby/volume/drivers/adapter.go create mode 100644 vendor/github.com/moby/moby/volume/drivers/extpoint.go create mode 100644 vendor/github.com/moby/moby/volume/drivers/extpoint_test.go create mode 100644 vendor/github.com/moby/moby/volume/drivers/proxy.go create mode 100644 vendor/github.com/moby/moby/volume/drivers/proxy_test.go create mode 100644 vendor/github.com/moby/moby/volume/local/local.go create mode 100644 vendor/github.com/moby/moby/volume/local/local_test.go create mode 100644 vendor/github.com/moby/moby/volume/local/local_unix.go create mode 100644 vendor/github.com/moby/moby/volume/local/local_windows.go create mode 100644 vendor/github.com/moby/moby/volume/store/db.go create mode 100644 vendor/github.com/moby/moby/volume/store/errors.go create mode 100644 vendor/github.com/moby/moby/volume/store/restore.go create mode 100644 vendor/github.com/moby/moby/volume/store/store.go create mode 100644 vendor/github.com/moby/moby/volume/store/store_test.go create mode 100644 vendor/github.com/moby/moby/volume/store/store_unix.go create mode 100644 vendor/github.com/moby/moby/volume/store/store_windows.go create mode 100644 vendor/github.com/moby/moby/volume/testutils/testutils.go create mode 100644 vendor/github.com/moby/moby/volume/validate.go create mode 100644 vendor/github.com/moby/moby/volume/validate_test.go create mode 100644 vendor/github.com/moby/moby/volume/validate_test_unix.go create mode 100644 vendor/github.com/moby/moby/volume/validate_test_windows.go create mode 100644 vendor/github.com/moby/moby/volume/volume.go create mode 100644 vendor/github.com/moby/moby/volume/volume_copy.go create mode 100644 vendor/github.com/moby/moby/volume/volume_copy_unix.go create mode 100644 vendor/github.com/moby/moby/volume/volume_copy_windows.go create mode 100644 vendor/github.com/moby/moby/volume/volume_linux.go create mode 100644 vendor/github.com/moby/moby/volume/volume_linux_test.go create mode 100644 vendor/github.com/moby/moby/volume/volume_propagation_linux.go create mode 100644 vendor/github.com/moby/moby/volume/volume_propagation_linux_test.go create mode 100644 vendor/github.com/moby/moby/volume/volume_propagation_unsupported.go create mode 100644 vendor/github.com/moby/moby/volume/volume_test.go create mode 100644 vendor/github.com/moby/moby/volume/volume_unix.go create mode 100644 vendor/github.com/moby/moby/volume/volume_unsupported.go create mode 100644 vendor/github.com/moby/moby/volume/volume_windows.go create mode 100644 vendor/github.com/onsi/gomega/.gitignore create mode 100644 vendor/github.com/onsi/gomega/.travis.yml create mode 100644 vendor/github.com/onsi/gomega/CHANGELOG.md create mode 100644 vendor/github.com/onsi/gomega/CONTRIBUTING.md create mode 100644 vendor/github.com/onsi/gomega/LICENSE create mode 100644 vendor/github.com/onsi/gomega/README.md create mode 100644 vendor/github.com/onsi/gomega/format/format.go create mode 100644 vendor/github.com/onsi/gomega/format/format_suite_test.go create mode 100644 vendor/github.com/onsi/gomega/format/format_test.go create mode 100644 vendor/github.com/onsi/gomega/gbytes/buffer.go create mode 100644 vendor/github.com/onsi/gomega/gbytes/buffer_test.go create mode 100644 vendor/github.com/onsi/gomega/gbytes/gbuffer_suite_test.go create mode 100644 vendor/github.com/onsi/gomega/gbytes/io_wrappers.go create mode 100644 vendor/github.com/onsi/gomega/gbytes/io_wrappers_test.go create mode 100644 vendor/github.com/onsi/gomega/gbytes/say_matcher.go create mode 100644 vendor/github.com/onsi/gomega/gbytes/say_matcher_test.go create mode 100644 vendor/github.com/onsi/gomega/gexec/_fixture/firefly/main.go create mode 100644 vendor/github.com/onsi/gomega/gexec/build.go create mode 100644 vendor/github.com/onsi/gomega/gexec/build_test.go create mode 100644 vendor/github.com/onsi/gomega/gexec/exit_matcher.go create mode 100644 vendor/github.com/onsi/gomega/gexec/exit_matcher_test.go create mode 100644 vendor/github.com/onsi/gomega/gexec/gexec_suite_test.go create mode 100644 vendor/github.com/onsi/gomega/gexec/prefixed_writer.go create mode 100644 vendor/github.com/onsi/gomega/gexec/prefixed_writer_test.go create mode 100644 vendor/github.com/onsi/gomega/gexec/session.go create mode 100644 vendor/github.com/onsi/gomega/gexec/session_test.go create mode 100644 vendor/github.com/onsi/gomega/ghttp/handlers.go create mode 100644 vendor/github.com/onsi/gomega/ghttp/protobuf/protobuf.go create mode 100644 vendor/github.com/onsi/gomega/ghttp/protobuf/simple_message.pb.go create mode 100644 vendor/github.com/onsi/gomega/ghttp/protobuf/simple_message.proto create mode 100644 vendor/github.com/onsi/gomega/ghttp/test_server.go create mode 100644 vendor/github.com/onsi/gomega/ghttp/test_server_suite_test.go create mode 100644 vendor/github.com/onsi/gomega/ghttp/test_server_test.go create mode 100644 vendor/github.com/onsi/gomega/gomega_dsl.go create mode 100644 vendor/github.com/onsi/gomega/gstruct/elements.go create mode 100644 vendor/github.com/onsi/gomega/gstruct/elements_test.go create mode 100644 vendor/github.com/onsi/gomega/gstruct/errors/nested_types.go create mode 100644 vendor/github.com/onsi/gomega/gstruct/fields.go create mode 100644 vendor/github.com/onsi/gomega/gstruct/fields_test.go create mode 100644 vendor/github.com/onsi/gomega/gstruct/gstruct_tests_suite_test.go create mode 100644 vendor/github.com/onsi/gomega/gstruct/ignore.go create mode 100644 vendor/github.com/onsi/gomega/gstruct/ignore_test.go create mode 100644 vendor/github.com/onsi/gomega/gstruct/pointer.go create mode 100644 vendor/github.com/onsi/gomega/gstruct/pointer_test.go create mode 100644 vendor/github.com/onsi/gomega/gstruct/types.go create mode 100644 vendor/github.com/onsi/gomega/internal/assertion/assertion.go create mode 100644 vendor/github.com/onsi/gomega/internal/assertion/assertion_suite_test.go create mode 100644 vendor/github.com/onsi/gomega/internal/assertion/assertion_test.go create mode 100644 vendor/github.com/onsi/gomega/internal/asyncassertion/async_assertion.go create mode 100644 vendor/github.com/onsi/gomega/internal/asyncassertion/async_assertion_suite_test.go create mode 100644 vendor/github.com/onsi/gomega/internal/asyncassertion/async_assertion_test.go create mode 100644 vendor/github.com/onsi/gomega/internal/fakematcher/fake_matcher.go create mode 100644 vendor/github.com/onsi/gomega/internal/oraclematcher/oracle_matcher.go create mode 100644 vendor/github.com/onsi/gomega/internal/testingtsupport/testing_t_support.go create mode 100644 vendor/github.com/onsi/gomega/internal/testingtsupport/testing_t_support_test.go create mode 100644 vendor/github.com/onsi/gomega/matchers.go create mode 100644 vendor/github.com/onsi/gomega/matchers/and.go create mode 100644 vendor/github.com/onsi/gomega/matchers/and_test.go create mode 100644 vendor/github.com/onsi/gomega/matchers/assignable_to_type_of_matcher.go create mode 100644 vendor/github.com/onsi/gomega/matchers/assignable_to_type_of_matcher_test.go create mode 100644 vendor/github.com/onsi/gomega/matchers/be_a_directory.go create mode 100644 vendor/github.com/onsi/gomega/matchers/be_a_directory_test.go create mode 100644 vendor/github.com/onsi/gomega/matchers/be_a_regular_file.go create mode 100644 vendor/github.com/onsi/gomega/matchers/be_a_regular_file_test.go create mode 100644 vendor/github.com/onsi/gomega/matchers/be_an_existing_file.go create mode 100644 vendor/github.com/onsi/gomega/matchers/be_an_existing_file_test.go create mode 100644 vendor/github.com/onsi/gomega/matchers/be_closed_matcher.go create mode 100644 vendor/github.com/onsi/gomega/matchers/be_closed_matcher_test.go create mode 100644 vendor/github.com/onsi/gomega/matchers/be_empty_matcher.go create mode 100644 vendor/github.com/onsi/gomega/matchers/be_empty_matcher_test.go create mode 100644 vendor/github.com/onsi/gomega/matchers/be_equivalent_to_matcher.go create mode 100644 vendor/github.com/onsi/gomega/matchers/be_equivalent_to_matcher_test.go create mode 100644 vendor/github.com/onsi/gomega/matchers/be_false_matcher.go create mode 100644 vendor/github.com/onsi/gomega/matchers/be_false_matcher_test.go create mode 100644 vendor/github.com/onsi/gomega/matchers/be_identical_to.go create mode 100644 vendor/github.com/onsi/gomega/matchers/be_identical_to_test.go create mode 100644 vendor/github.com/onsi/gomega/matchers/be_nil_matcher.go create mode 100644 vendor/github.com/onsi/gomega/matchers/be_nil_matcher_test.go create mode 100644 vendor/github.com/onsi/gomega/matchers/be_numerically_matcher.go create mode 100644 vendor/github.com/onsi/gomega/matchers/be_numerically_matcher_test.go create mode 100644 vendor/github.com/onsi/gomega/matchers/be_sent_matcher.go create mode 100644 vendor/github.com/onsi/gomega/matchers/be_sent_matcher_test.go create mode 100644 vendor/github.com/onsi/gomega/matchers/be_temporally_matcher.go create mode 100644 vendor/github.com/onsi/gomega/matchers/be_temporally_matcher_test.go create mode 100644 vendor/github.com/onsi/gomega/matchers/be_true_matcher.go create mode 100644 vendor/github.com/onsi/gomega/matchers/be_true_matcher_test.go create mode 100644 vendor/github.com/onsi/gomega/matchers/be_zero_matcher.go create mode 100644 vendor/github.com/onsi/gomega/matchers/be_zero_matcher_test.go create mode 100644 vendor/github.com/onsi/gomega/matchers/consist_of.go create mode 100644 vendor/github.com/onsi/gomega/matchers/consist_of_test.go create mode 100644 vendor/github.com/onsi/gomega/matchers/contain_element_matcher.go create mode 100644 vendor/github.com/onsi/gomega/matchers/contain_element_matcher_test.go create mode 100644 vendor/github.com/onsi/gomega/matchers/contain_substring_matcher.go create mode 100644 vendor/github.com/onsi/gomega/matchers/contain_substring_matcher_test.go create mode 100644 vendor/github.com/onsi/gomega/matchers/equal_matcher.go create mode 100644 vendor/github.com/onsi/gomega/matchers/equal_matcher_test.go create mode 100644 vendor/github.com/onsi/gomega/matchers/have_cap_matcher.go create mode 100644 vendor/github.com/onsi/gomega/matchers/have_cap_matcher_test.go create mode 100644 vendor/github.com/onsi/gomega/matchers/have_key_matcher.go create mode 100644 vendor/github.com/onsi/gomega/matchers/have_key_matcher_test.go create mode 100644 vendor/github.com/onsi/gomega/matchers/have_key_with_value_matcher.go create mode 100644 vendor/github.com/onsi/gomega/matchers/have_key_with_value_matcher_test.go create mode 100644 vendor/github.com/onsi/gomega/matchers/have_len_matcher.go create mode 100644 vendor/github.com/onsi/gomega/matchers/have_len_matcher_test.go create mode 100644 vendor/github.com/onsi/gomega/matchers/have_occurred_matcher.go create mode 100644 vendor/github.com/onsi/gomega/matchers/have_occurred_matcher_test.go create mode 100644 vendor/github.com/onsi/gomega/matchers/have_prefix_matcher.go create mode 100644 vendor/github.com/onsi/gomega/matchers/have_prefix_matcher_test.go create mode 100644 vendor/github.com/onsi/gomega/matchers/have_suffix_matcher.go create mode 100644 vendor/github.com/onsi/gomega/matchers/have_suffix_matcher_test.go create mode 100644 vendor/github.com/onsi/gomega/matchers/match_error_matcher.go create mode 100644 vendor/github.com/onsi/gomega/matchers/match_error_matcher_test.go create mode 100644 vendor/github.com/onsi/gomega/matchers/match_json_matcher.go create mode 100644 vendor/github.com/onsi/gomega/matchers/match_json_matcher_test.go create mode 100644 vendor/github.com/onsi/gomega/matchers/match_regexp_matcher.go create mode 100644 vendor/github.com/onsi/gomega/matchers/match_regexp_matcher_test.go create mode 100644 vendor/github.com/onsi/gomega/matchers/match_xml_matcher.go create mode 100644 vendor/github.com/onsi/gomega/matchers/match_xml_matcher_test.go create mode 100644 vendor/github.com/onsi/gomega/matchers/match_yaml_matcher.go create mode 100644 vendor/github.com/onsi/gomega/matchers/match_yaml_matcher_test.go create mode 100644 vendor/github.com/onsi/gomega/matchers/matcher_tests_suite_test.go create mode 100644 vendor/github.com/onsi/gomega/matchers/not.go create mode 100644 vendor/github.com/onsi/gomega/matchers/not_test.go create mode 100644 vendor/github.com/onsi/gomega/matchers/or.go create mode 100644 vendor/github.com/onsi/gomega/matchers/or_test.go create mode 100644 vendor/github.com/onsi/gomega/matchers/panic_matcher.go create mode 100644 vendor/github.com/onsi/gomega/matchers/panic_matcher_test.go create mode 100644 vendor/github.com/onsi/gomega/matchers/receive_matcher.go create mode 100644 vendor/github.com/onsi/gomega/matchers/receive_matcher_test.go create mode 100644 vendor/github.com/onsi/gomega/matchers/succeed_matcher.go create mode 100644 vendor/github.com/onsi/gomega/matchers/succeed_matcher_test.go create mode 100644 vendor/github.com/onsi/gomega/matchers/support/goraph/MIT.LICENSE create mode 100644 vendor/github.com/onsi/gomega/matchers/support/goraph/bipartitegraph/bipartitegraph.go create mode 100644 vendor/github.com/onsi/gomega/matchers/support/goraph/bipartitegraph/bipartitegraphmatching.go create mode 100644 vendor/github.com/onsi/gomega/matchers/support/goraph/edge/edge.go create mode 100644 vendor/github.com/onsi/gomega/matchers/support/goraph/node/node.go create mode 100644 vendor/github.com/onsi/gomega/matchers/support/goraph/util/util.go create mode 100644 vendor/github.com/onsi/gomega/matchers/test_data/xml/sample_01.xml create mode 100644 vendor/github.com/onsi/gomega/matchers/test_data/xml/sample_02.xml create mode 100644 vendor/github.com/onsi/gomega/matchers/test_data/xml/sample_03.xml create mode 100644 vendor/github.com/onsi/gomega/matchers/test_data/xml/sample_04.xml create mode 100644 vendor/github.com/onsi/gomega/matchers/test_data/xml/sample_05.xml create mode 100644 vendor/github.com/onsi/gomega/matchers/test_data/xml/sample_06.xml create mode 100644 vendor/github.com/onsi/gomega/matchers/test_data/xml/sample_07.xml create mode 100644 vendor/github.com/onsi/gomega/matchers/test_data/xml/sample_08.xml create mode 100644 vendor/github.com/onsi/gomega/matchers/test_data/xml/sample_09.xml create mode 100644 vendor/github.com/onsi/gomega/matchers/test_data/xml/sample_10.xml create mode 100644 vendor/github.com/onsi/gomega/matchers/test_data/xml/sample_11.xml create mode 100644 vendor/github.com/onsi/gomega/matchers/type_support.go create mode 100644 vendor/github.com/onsi/gomega/matchers/with_transform.go create mode 100644 vendor/github.com/onsi/gomega/matchers/with_transform_test.go create mode 100644 vendor/github.com/onsi/gomega/types/types.go create mode 100644 vendor/github.com/urfave/cli/.flake8 create mode 100644 vendor/github.com/urfave/cli/.gitignore create mode 100644 vendor/github.com/urfave/cli/.travis.yml create mode 100644 vendor/github.com/urfave/cli/CHANGELOG.md create mode 100644 vendor/github.com/urfave/cli/LICENSE create mode 100644 vendor/github.com/urfave/cli/README.md create mode 100644 vendor/github.com/urfave/cli/altsrc/altsrc.go create mode 100644 vendor/github.com/urfave/cli/altsrc/flag.go create mode 100644 vendor/github.com/urfave/cli/altsrc/flag_generated.go create mode 100644 vendor/github.com/urfave/cli/altsrc/flag_test.go create mode 100644 vendor/github.com/urfave/cli/altsrc/helpers_test.go create mode 100644 vendor/github.com/urfave/cli/altsrc/input_source_context.go create mode 100644 vendor/github.com/urfave/cli/altsrc/map_input_source.go create mode 100644 vendor/github.com/urfave/cli/altsrc/toml_command_test.go create mode 100644 vendor/github.com/urfave/cli/altsrc/toml_file_loader.go create mode 100644 vendor/github.com/urfave/cli/altsrc/yaml_command_test.go create mode 100644 vendor/github.com/urfave/cli/altsrc/yaml_file_loader.go create mode 100644 vendor/github.com/urfave/cli/app.go create mode 100644 vendor/github.com/urfave/cli/app_test.go create mode 100644 vendor/github.com/urfave/cli/appveyor.yml create mode 100755 vendor/github.com/urfave/cli/autocomplete/bash_autocomplete create mode 100644 vendor/github.com/urfave/cli/autocomplete/zsh_autocomplete create mode 100644 vendor/github.com/urfave/cli/category.go create mode 100644 vendor/github.com/urfave/cli/cli.go create mode 100644 vendor/github.com/urfave/cli/command.go create mode 100644 vendor/github.com/urfave/cli/command_test.go create mode 100644 vendor/github.com/urfave/cli/context.go create mode 100644 vendor/github.com/urfave/cli/context_test.go create mode 100644 vendor/github.com/urfave/cli/errors.go create mode 100644 vendor/github.com/urfave/cli/errors_test.go create mode 100644 vendor/github.com/urfave/cli/flag-types.json create mode 100644 vendor/github.com/urfave/cli/flag.go create mode 100644 vendor/github.com/urfave/cli/flag_generated.go create mode 100644 vendor/github.com/urfave/cli/flag_test.go create mode 100644 vendor/github.com/urfave/cli/funcs.go create mode 100755 vendor/github.com/urfave/cli/generate-flag-types create mode 100644 vendor/github.com/urfave/cli/help.go create mode 100644 vendor/github.com/urfave/cli/help_test.go create mode 100644 vendor/github.com/urfave/cli/helpers_test.go create mode 100644 vendor/github.com/urfave/cli/helpers_unix_test.go create mode 100644 vendor/github.com/urfave/cli/helpers_windows_test.go create mode 100755 vendor/github.com/urfave/cli/runtests diff --git a/cli/glide.lock b/cli/glide.lock index 90ce02ead..ccc9f2c51 100644 --- a/cli/glide.lock +++ b/cli/glide.lock @@ -1,10 +1,10 @@ -hash: 4cce7a5074b5b856c40bc5a97c7fac81546f835c42fd16aeff728f66ca65dd00 -updated: 2017-07-19T15:40:45.33026105-07:00 +hash: ac1f86c693857c54f425217105213884d89e2e3f1818804faba74b7eb5839b6c +updated: 2017-07-26T12:39:47.375642177-07:00 imports: - name: github.com/asaskevich/govalidator version: aa5cce4a76edb1a5acecab1870c17abbffb5419e - name: github.com/aws/aws-sdk-go - version: 8da51c33f6001c4dda06a2561c2234be4cece1ed + version: 61b379ef486ea3baa55e10c358f3217f06e7b5ad subpackages: - aws - aws/awserr @@ -50,7 +50,7 @@ imports: - name: github.com/docker/go-units version: 0dadbb0345b35ec7ef35e228dabb8de89a65bf52 - name: github.com/funcy/functions_go - version: 601696a734d8df755f42bff262e699bb2eeb69b3 + version: c540b7a8e1af8dad992a3b520175db85f8e53636 subpackages: - client - client/apps diff --git a/cli/vendor/github.com/aws/aws-sdk-go/CHANGELOG.md b/cli/vendor/github.com/aws/aws-sdk-go/CHANGELOG.md index bcca07858..134d37241 100644 --- a/cli/vendor/github.com/aws/aws-sdk-go/CHANGELOG.md +++ b/cli/vendor/github.com/aws/aws-sdk-go/CHANGELOG.md @@ -1,3 +1,35 @@ +Release v1.10.16 (2017-07-26) +=== + +### Service Client Updates +* `service/clouddirectory`: Updates service API and documentation + * Cloud Directory adds support for additional batch operations. +* `service/cloudformation`: Updates service API and documentation + * AWS CloudFormation StackSets enables you to manage stacks across multiple accounts and regions. + +### SDK Enhancements +* `aws/signer/v4`: Optimize V4 signer's header duplicate space stripping. [#1417](https://github.com/aws/aws-sdk-go/pull/1417) + +Release v1.10.15 (2017-07-24) +=== + +### Service Client Updates +* `service/appstream`: Updates service API, documentation, and waiters + * Amazon AppStream 2.0 image builders and fleets can now access applications and network resources that rely on Microsoft Active Directory (AD) for authentication and permissions. This new feature allows you to join your streaming instances to your AD, so you can use your existing AD user management tools. +* `service/ec2`: Updates service API and documentation + * Spot Fleet tagging capability allows customers to automatically tag instances launched by Spot Fleet. You can use this feature to label or distinguish instances created by distinct Spot Fleets. Tagging your EC2 instances also enables you to see instance cost allocation by tag in your AWS bill. + +### SDK Bugs +* `aws/signer/v4`: Fix out of bounds panic in stripExcessSpaces [#1412](https://github.com/aws/aws-sdk-go/pull/1412) + * Fixes the out of bands panic in stripExcessSpaces caused by an incorrect calculation of the stripToIdx value. Simplified to code also. + * Fixes [#1411](https://github.com/aws/aws-sdk-go/issues/1411) +Release v1.10.14 (2017-07-20) +=== + +### Service Client Updates +* `service/elasticmapreduce`: Updates service API and documentation + * Amazon EMR now includes the ability to use a custom Amazon Linux AMI and adjustable root volume size when launching a cluster. + Release v1.10.13 (2017-07-19) === diff --git a/cli/vendor/github.com/aws/aws-sdk-go/aws/endpoints/doc.go b/cli/vendor/github.com/aws/aws-sdk-go/aws/endpoints/doc.go index a0e9bc454..84316b92c 100644 --- a/cli/vendor/github.com/aws/aws-sdk-go/aws/endpoints/doc.go +++ b/cli/vendor/github.com/aws/aws-sdk-go/aws/endpoints/doc.go @@ -21,12 +21,12 @@ // partitions := resolver.(endpoints.EnumPartitions).Partitions() // // for _, p := range partitions { -// fmt.Println("Regions for", p.Name) +// fmt.Println("Regions for", p.ID()) // for id, _ := range p.Regions() { // fmt.Println("*", id) // } // -// fmt.Println("Services for", p.Name) +// fmt.Println("Services for", p.ID()) // for id, _ := range p.Services() { // fmt.Println("*", id) // } diff --git a/cli/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go b/cli/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go index b7da95ab6..d68905acb 100644 --- a/cli/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go +++ b/cli/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go @@ -55,7 +55,6 @@ package v4 import ( - "bytes" "crypto/hmac" "crypto/sha256" "encoding/hex" @@ -614,8 +613,8 @@ func (ctx *signingCtx) buildCanonicalHeaders(r rule, header http.Header) { strings.Join(ctx.SignedHeaderVals[k], ",") } } - - ctx.canonicalHeaders = strings.Join(stripExcessSpaces(headerValues), "\n") + stripExcessSpaces(headerValues) + ctx.canonicalHeaders = strings.Join(headerValues, "\n") } func (ctx *signingCtx) buildCanonicalString() { @@ -717,45 +716,46 @@ func makeSha256Reader(reader io.ReadSeeker) []byte { return hash.Sum(nil) } -const doubleSpaces = " " +const doubleSpace = " " -var doubleSpaceBytes = []byte(doubleSpaces) +// stripExcessSpaces will rewrite the passed in slice's string values to not +// contain muliple side-by-side spaces. +func stripExcessSpaces(vals []string) { + var j, k, l, m, spaces int + for i, str := range vals { + // Trim trailing spaces + for j = len(str) - 1; j >= 0 && str[j] == ' '; j-- { + } -func stripExcessSpaces(headerVals []string) []string { - vals := make([]string, len(headerVals)) - for i, str := range headerVals { - // Trim leading and trailing spaces - trimmed := strings.TrimSpace(str) + // Trim leading spaces + for k = 0; k < j && str[k] == ' '; k++ { + } + str = str[k : j+1] - idx := strings.Index(trimmed, doubleSpaces) - if idx < 0 { - vals[i] = trimmed + // Strip multiple spaces. + j = strings.Index(str, doubleSpace) + if j < 0 { + vals[i] = str continue } - buf := []byte(trimmed) - for idx > -1 { - stripToIdx := -1 - for j := idx + 1; j < len(buf); j++ { - if buf[j] != ' ' { - buf = append(buf[:idx+1], buf[j:]...) - stripToIdx = j - idx - 1 - break - } - } - - if stripToIdx >= 0 { - // Find next double space - idx = bytes.Index(buf[stripToIdx:], doubleSpaceBytes) - if idx >= 0 { - idx += stripToIdx + buf := []byte(str) + for k, m, l = j, j, len(buf); k < l; k++ { + if buf[k] == ' ' { + if spaces == 0 { + // First space. + buf[m] = buf[k] + m++ } + spaces++ } else { - idx = -1 + // End of multiple spaces. + spaces = 0 + buf[m] = buf[k] + m++ } } - vals[i] = string(buf) + vals[i] = string(buf[:m]) } - return vals } diff --git a/cli/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4_test.go b/cli/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4_test.go index 44a70de35..e94e63491 100644 --- a/cli/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4_test.go +++ b/cli/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4_test.go @@ -20,8 +20,10 @@ import ( func TestStripExcessHeaders(t *testing.T) { vals := []string{ + "", "123", "1 2 3", + "1 2 3 ", " 1 2 3", "1 2 3", "1 23", @@ -30,24 +32,30 @@ func TestStripExcessHeaders(t *testing.T) { " 1 2 ", "12 3", "12 3 1", + "12 3 1", + "12 3 1abc123", } expected := []string{ + "", "123", "1 2 3", "1 2 3", "1 2 3", + "1 2 3", "1 23", "1 2 3", "1 2", "1 2", "12 3", "12 3 1", + "12 3 1", + "12 3 1abc123", } - newVals := stripExcessSpaces(vals) - for i := 0; i < len(newVals); i++ { - assert.Equal(t, expected[i], newVals[i], "test: %d", i) + stripExcessSpaces(vals) + for i := 0; i < len(vals); i++ { + assert.Equal(t, expected[i], vals[i], "test: %d", i) } } @@ -507,15 +515,29 @@ func BenchmarkSignRequest(b *testing.B) { } } -func BenchmarkStripExcessSpaces(b *testing.B) { - vals := []string{ - `AWS4-HMAC-SHA256 Credential=AKIDFAKEIDFAKEID/20160628/us-west-2/s3/aws4_request, SignedHeaders=host;x-amz-date, Signature=1234567890abcdef1234567890abcdef1234567890abcdef`, - `123 321 123 321`, - ` 123 321 123 321 `, - } +var stripExcessSpaceCases = []string{ + `AWS4-HMAC-SHA256 Credential=AKIDFAKEIDFAKEID/20160628/us-west-2/s3/aws4_request, SignedHeaders=host;x-amz-date, Signature=1234567890abcdef1234567890abcdef1234567890abcdef`, + `123 321 123 321`, + ` 123 321 123 321 `, + ` 123 321 123 321 `, + "123", + "1 2 3", + " 1 2 3", + "1 2 3", + "1 23", + "1 2 3", + "1 2 ", + " 1 2 ", + "12 3", + "12 3 1", + "12 3 1", + "12 3 1abc123", +} - b.ResetTimer() +func BenchmarkStripExcessSpaces(b *testing.B) { for i := 0; i < b.N; i++ { - stripExcessSpaces(vals) + // Make sure to start with a copy of the cases + cases := append([]string{}, stripExcessSpaceCases...) + stripExcessSpaces(cases) } } diff --git a/cli/vendor/github.com/aws/aws-sdk-go/aws/version.go b/cli/vendor/github.com/aws/aws-sdk-go/aws/version.go index 775af6de1..447dbf914 100644 --- a/cli/vendor/github.com/aws/aws-sdk-go/aws/version.go +++ b/cli/vendor/github.com/aws/aws-sdk-go/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.10.13" +const SDKVersion = "1.10.16" diff --git a/cli/vendor/github.com/aws/aws-sdk-go/models/apis/appstream/2016-12-01/api-2.json b/cli/vendor/github.com/aws/aws-sdk-go/models/apis/appstream/2016-12-01/api-2.json index ece981dbb..e83461f87 100644 --- a/cli/vendor/github.com/aws/aws-sdk-go/models/apis/appstream/2016-12-01/api-2.json +++ b/cli/vendor/github.com/aws/aws-sdk-go/models/apis/appstream/2016-12-01/api-2.json @@ -24,7 +24,21 @@ {"shape":"LimitExceededException"}, {"shape":"ResourceNotFoundException"}, {"shape":"ConcurrentModificationException"}, - {"shape":"IncompatibleImageException"} + {"shape":"IncompatibleImageException"}, + {"shape":"OperationNotPermittedException"} + ] + }, + "CreateDirectoryConfig":{ + "name":"CreateDirectoryConfig", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateDirectoryConfigRequest"}, + "output":{"shape":"CreateDirectoryConfigResult"}, + "errors":[ + {"shape":"ResourceAlreadyExistsException"}, + {"shape":"LimitExceededException"} ] }, "CreateFleet":{ @@ -41,7 +55,9 @@ {"shape":"ResourceNotFoundException"}, {"shape":"LimitExceededException"}, {"shape":"InvalidRoleException"}, - {"shape":"ConcurrentModificationException"} + {"shape":"ConcurrentModificationException"}, + {"shape":"InvalidParameterCombinationException"}, + {"shape":"IncompatibleImageException"} ] }, "CreateStack":{ @@ -76,6 +92,19 @@ {"shape":"InvalidParameterCombinationException"} ] }, + "DeleteDirectoryConfig":{ + "name":"DeleteDirectoryConfig", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteDirectoryConfigRequest"}, + "output":{"shape":"DeleteDirectoryConfigResult"}, + "errors":[ + {"shape":"ResourceInUseException"}, + {"shape":"ResourceNotFoundException"} + ] + }, "DeleteFleet":{ "name":"DeleteFleet", "http":{ @@ -104,6 +133,18 @@ {"shape":"ConcurrentModificationException"} ] }, + "DescribeDirectoryConfigs":{ + "name":"DescribeDirectoryConfigs", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeDirectoryConfigsRequest"}, + "output":{"shape":"DescribeDirectoryConfigsResult"}, + "errors":[ + {"shape":"ResourceNotFoundException"} + ] + }, "DescribeFleets":{ "name":"DescribeFleets", "http":{ @@ -221,6 +262,20 @@ {"shape":"ConcurrentModificationException"} ] }, + "UpdateDirectoryConfig":{ + "name":"UpdateDirectoryConfig", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateDirectoryConfigRequest"}, + "output":{"shape":"UpdateDirectoryConfigResult"}, + "errors":[ + {"shape":"ResourceInUseException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ConcurrentModificationException"} + ] + }, "UpdateFleet":{ "name":"UpdateFleet", "http":{ @@ -237,7 +292,8 @@ {"shape":"ResourceNotAvailableException"}, {"shape":"InvalidParameterCombinationException"}, {"shape":"ConcurrentModificationException"}, - {"shape":"IncompatibleImageException"} + {"shape":"IncompatibleImageException"}, + {"shape":"OperationNotPermittedException"} ] }, "UpdateStack":{ @@ -259,6 +315,17 @@ } }, "shapes":{ + "AccountName":{ + "type":"string", + "min":1, + "sensitive":true + }, + "AccountPassword":{ + "type":"string", + "max":127, + "min":1, + "sensitive":true + }, "Application":{ "type":"structure", "members":{ @@ -329,6 +396,25 @@ }, "exception":true }, + "CreateDirectoryConfigRequest":{ + "type":"structure", + "required":[ + "DirectoryName", + "OrganizationalUnitDistinguishedNames", + "ServiceAccountCredentials" + ], + "members":{ + "DirectoryName":{"shape":"DirectoryName"}, + "OrganizationalUnitDistinguishedNames":{"shape":"OrganizationalUnitDistinguishedNamesList"}, + "ServiceAccountCredentials":{"shape":"ServiceAccountCredentials"} + } + }, + "CreateDirectoryConfigResult":{ + "type":"structure", + "members":{ + "DirectoryConfig":{"shape":"DirectoryConfig"} + } + }, "CreateFleetRequest":{ "type":"structure", "required":[ @@ -347,7 +433,8 @@ "DisconnectTimeoutInSeconds":{"shape":"Integer"}, "Description":{"shape":"Description"}, "DisplayName":{"shape":"DisplayName"}, - "EnableDefaultInternetAccess":{"shape":"BooleanObject"} + "EnableDefaultInternetAccess":{"shape":"BooleanObject"}, + "DomainJoinInfo":{"shape":"DomainJoinInfo"} } }, "CreateFleetResult":{ @@ -382,7 +469,7 @@ "members":{ "StackName":{"shape":"String"}, "FleetName":{"shape":"String"}, - "UserId":{"shape":"UserId"}, + "UserId":{"shape":"StreamingUrlUserId"}, "ApplicationId":{"shape":"String"}, "Validity":{"shape":"Long"}, "SessionContext":{"shape":"String"} @@ -395,6 +482,18 @@ "Expires":{"shape":"Timestamp"} } }, + "DeleteDirectoryConfigRequest":{ + "type":"structure", + "required":["DirectoryName"], + "members":{ + "DirectoryName":{"shape":"DirectoryName"} + } + }, + "DeleteDirectoryConfigResult":{ + "type":"structure", + "members":{ + } + }, "DeleteFleetRequest":{ "type":"structure", "required":["Name"], @@ -419,6 +518,21 @@ "members":{ } }, + "DescribeDirectoryConfigsRequest":{ + "type":"structure", + "members":{ + "DirectoryNames":{"shape":"DirectoryNameList"}, + "MaxResults":{"shape":"Integer"}, + "NextToken":{"shape":"String"} + } + }, + "DescribeDirectoryConfigsResult":{ + "type":"structure", + "members":{ + "DirectoryConfigs":{"shape":"DirectoryConfigList"}, + "NextToken":{"shape":"String"} + } + }, "DescribeFleetsRequest":{ "type":"structure", "members":{ @@ -485,6 +599,25 @@ "type":"string", "max":256 }, + "DirectoryConfig":{ + "type":"structure", + "required":["DirectoryName"], + "members":{ + "DirectoryName":{"shape":"DirectoryName"}, + "OrganizationalUnitDistinguishedNames":{"shape":"OrganizationalUnitDistinguishedNamesList"}, + "ServiceAccountCredentials":{"shape":"ServiceAccountCredentials"}, + "CreatedTime":{"shape":"Timestamp"} + } + }, + "DirectoryConfigList":{ + "type":"list", + "member":{"shape":"DirectoryConfig"} + }, + "DirectoryName":{"type":"string"}, + "DirectoryNameList":{ + "type":"list", + "member":{"shape":"DirectoryName"} + }, "DisassociateFleetRequest":{ "type":"structure", "required":[ @@ -505,6 +638,13 @@ "type":"string", "max":100 }, + "DomainJoinInfo":{ + "type":"structure", + "members":{ + "DirectoryName":{"shape":"DirectoryName"}, + "OrganizationalUnitDistinguishedName":{"shape":"OrganizationalUnitDistinguishedName"} + } + }, "ErrorMessage":{"type":"string"}, "ExpireSessionRequest":{ "type":"structure", @@ -542,14 +682,16 @@ "VpcConfig":{"shape":"VpcConfig"}, "CreatedTime":{"shape":"Timestamp"}, "FleetErrors":{"shape":"FleetErrors"}, - "EnableDefaultInternetAccess":{"shape":"BooleanObject"} + "EnableDefaultInternetAccess":{"shape":"BooleanObject"}, + "DomainJoinInfo":{"shape":"DomainJoinInfo"} } }, "FleetAttribute":{ "type":"string", "enum":[ "VPC_CONFIGURATION", - "VPC_CONFIGURATION_SECURITY_GROUP_IDS" + "VPC_CONFIGURATION_SECURITY_GROUP_IDS", + "DOMAIN_JOIN_INFO" ] }, "FleetAttributes":{ @@ -576,7 +718,21 @@ "IAM_SERVICE_ROLE_MISSING_DESCRIBE_SUBNET_ACTION", "SUBNET_NOT_FOUND", "IMAGE_NOT_FOUND", - "INVALID_SUBNET_CONFIGURATION" + "INVALID_SUBNET_CONFIGURATION", + "SECURITY_GROUPS_NOT_FOUND", + "IAM_SERVICE_ROLE_MISSING_DESCRIBE_SECURITY_GROUPS_ACTION", + "DOMAIN_JOIN_ERROR_FILE_NOT_FOUND", + "DOMAIN_JOIN_ERROR_ACCESS_DENIED", + "DOMAIN_JOIN_ERROR_LOGON_FAILURE", + "DOMAIN_JOIN_ERROR_INVALID_PARAMETER", + "DOMAIN_JOIN_ERROR_MORE_DATA", + "DOMAIN_JOIN_ERROR_NO_SUCH_DOMAIN", + "DOMAIN_JOIN_ERROR_NOT_SUPPORTED", + "DOMAIN_JOIN_NERR_INVALID_WORKGROUP_NAME", + "DOMAIN_JOIN_NERR_WORKSTATION_NOT_STARTED", + "DOMAIN_JOIN_ERROR_DS_MACHINE_ACCOUNT_QUOTA_EXCEEDED", + "DOMAIN_JOIN_NERR_PASSWORD_EXPIRED", + "DOMAIN_JOIN_INTERNAL_SERVICE_ERROR" ] }, "FleetErrors":{ @@ -718,6 +874,14 @@ }, "exception":true }, + "OrganizationalUnitDistinguishedName":{ + "type":"string", + "max":2000 + }, + "OrganizationalUnitDistinguishedNamesList":{ + "type":"list", + "member":{"shape":"OrganizationalUnitDistinguishedName"} + }, "PlatformType":{ "type":"string", "enum":["WINDOWS"] @@ -759,6 +923,17 @@ "member":{"shape":"String"}, "max":5 }, + "ServiceAccountCredentials":{ + "type":"structure", + "required":[ + "AccountName", + "AccountPassword" + ], + "members":{ + "AccountName":{"shape":"AccountName"}, + "AccountPassword":{"shape":"AccountPassword"} + } + }, "Session":{ "type":"structure", "required":[ @@ -864,6 +1039,12 @@ "type":"string", "enum":["HOMEFOLDERS"] }, + "StreamingUrlUserId":{ + "type":"string", + "max":32, + "min":2, + "pattern":"[\\w+=,.@-]*" + }, "String":{ "type":"string", "min":1 @@ -877,6 +1058,21 @@ "member":{"shape":"String"} }, "Timestamp":{"type":"timestamp"}, + "UpdateDirectoryConfigRequest":{ + "type":"structure", + "required":["DirectoryName"], + "members":{ + "DirectoryName":{"shape":"DirectoryName"}, + "OrganizationalUnitDistinguishedNames":{"shape":"OrganizationalUnitDistinguishedNamesList"}, + "ServiceAccountCredentials":{"shape":"ServiceAccountCredentials"} + } + }, + "UpdateDirectoryConfigResult":{ + "type":"structure", + "members":{ + "DirectoryConfig":{"shape":"DirectoryConfig"} + } + }, "UpdateFleetRequest":{ "type":"structure", "required":["Name"], @@ -895,6 +1091,7 @@ "Description":{"shape":"Description"}, "DisplayName":{"shape":"DisplayName"}, "EnableDefaultInternetAccess":{"shape":"BooleanObject"}, + "DomainJoinInfo":{"shape":"DomainJoinInfo"}, "AttributesToDelete":{"shape":"FleetAttributes"} } }, diff --git a/cli/vendor/github.com/aws/aws-sdk-go/models/apis/appstream/2016-12-01/docs-2.json b/cli/vendor/github.com/aws/aws-sdk-go/models/apis/appstream/2016-12-01/docs-2.json index 915e4bbf0..5d975ce05 100644 --- a/cli/vendor/github.com/aws/aws-sdk-go/models/apis/appstream/2016-12-01/docs-2.json +++ b/cli/vendor/github.com/aws/aws-sdk-go/models/apis/appstream/2016-12-01/docs-2.json @@ -3,25 +3,41 @@ "service": "Amazon AppStream 2.0

API documentation for Amazon AppStream 2.0.

", "operations": { "AssociateFleet": "

Associate a fleet to a stack.

", + "CreateDirectoryConfig": "

Creates a directory configuration with the given parameters.

", "CreateFleet": "

Creates a new fleet.

", "CreateStack": "

Create a new stack.

", "CreateStreamingURL": "

Creates a URL to start an AppStream 2.0 streaming session for a user. By default, the URL is valid only for 1 minute from the time that it is generated.

", + "DeleteDirectoryConfig": "

Deletes the directory configuration with the given parameters.

", "DeleteFleet": "

Deletes a fleet.

", "DeleteStack": "

Deletes the stack. After this operation completes, the environment can no longer be activated, and any reservations made for the stack are released.

", + "DescribeDirectoryConfigs": "

Returns a list describing the specified directory configurations.

", "DescribeFleets": "

If fleet names are provided, this operation describes the specified fleets; otherwise, all the fleets in the account are described.

", "DescribeImages": "

Describes the images. If a list of names is not provided, all images in your account are returned. This operation does not return a paginated result.

", - "DescribeSessions": "

Describes the streaming sessions for a stack and a fleet. If a user ID is provided, this operation returns streaming sessions for only that user. Pass this value for the nextToken parameter in a subsequent call to this operation to retrieve the next set of items. If an authentication type is not provided, the operation defaults to users authenticated using a streaming URL.

", - "DescribeStacks": "

If stack names are not provided, this operation describes the specified stacks; otherwise, all stacks in the account are described. Pass the nextToken value in a subsequent call to this operation to retrieve the next set of items.

", + "DescribeSessions": "

Describes the streaming sessions for a stack and a fleet. If a user ID is provided, this operation returns streaming sessions for only that user. To retrieve the next set of items, pass this value for the nextToken parameter in a subsequent call to this operation. If an authentication type is not provided, the operation defaults to users authenticated using a streaming URL.

", + "DescribeStacks": "

If stack names are not provided, this operation describes the specified stacks; otherwise, all stacks in the account are described. To retrieve the next set of items, pass the nextToken value in a subsequent call to this operation.

", "DisassociateFleet": "

Disassociates a fleet from a stack.

", "ExpireSession": "

This operation immediately stops a streaming session.

", "ListAssociatedFleets": "

Lists all fleets associated with the stack.

", "ListAssociatedStacks": "

Lists all stacks to which the specified fleet is associated.

", "StartFleet": "

Starts a fleet.

", "StopFleet": "

Stops a fleet.

", + "UpdateDirectoryConfig": "

Updates the directory configuration with the given parameters.

", "UpdateFleet": "

Updates an existing fleet. All the attributes except the fleet name can be updated in the STOPPED state. When a fleet is in the RUNNING state, only DisplayName and ComputeCapacity can be updated. A fleet cannot be updated in a status of STARTING or STOPPING.

", "UpdateStack": "

Updates the specified fields in the stack with the specified name.

" }, "shapes": { + "AccountName": { + "base": null, + "refs": { + "ServiceAccountCredentials$AccountName": "

The user name of an account in the directory that is used by AppStream 2.0 streaming instances to connect to the directory. This account must have the following privileges: create computer objects, join computers to the domain, change/reset the password on descendant computer objects for the organizational units specified.

" + } + }, + "AccountPassword": { + "base": null, + "refs": { + "ServiceAccountCredentials$AccountPassword": "

The password for the user account for directory actions.

" + } + }, "Application": { "base": "

An entry for a single application in the application catalog.

", "refs": { @@ -63,7 +79,7 @@ "Boolean": { "base": null, "refs": { - "Application$Enabled": "

An application can be disabled after image creation if there is a problem.

", + "Application$Enabled": "

If there is a problem, an application can be disabled after image creation.

", "Image$ImageBuilderSupported": "

Whether an image builder can be launched from this image.

", "UpdateFleetRequest$DeleteVpcConfig": "

Delete the VPC association for the specified fleet.

", "UpdateStackRequest$DeleteStorageConnectors": "

Remove all the storage connectors currently enabled for the stack.

" @@ -72,9 +88,9 @@ "BooleanObject": { "base": null, "refs": { - "CreateFleetRequest$EnableDefaultInternetAccess": "

Enables or disables default Internet access for the fleet.

", - "Fleet$EnableDefaultInternetAccess": "

Whether default Internet access is enabled for the fleet.

", - "UpdateFleetRequest$EnableDefaultInternetAccess": "

Enables or disables default Internet access for the fleet.

" + "CreateFleetRequest$EnableDefaultInternetAccess": "

Enables or disables default internet access for the fleet.

", + "Fleet$EnableDefaultInternetAccess": "

Whether default internet access is enabled for the fleet.

", + "UpdateFleetRequest$EnableDefaultInternetAccess": "

Enables or disables default internet access for the fleet.

" } }, "ComputeCapacity": { @@ -95,6 +111,16 @@ "refs": { } }, + "CreateDirectoryConfigRequest": { + "base": null, + "refs": { + } + }, + "CreateDirectoryConfigResult": { + "base": null, + "refs": { + } + }, "CreateFleetRequest": { "base": "

Contains the parameters for the new fleet to create.

", "refs": { @@ -125,6 +151,16 @@ "refs": { } }, + "DeleteDirectoryConfigRequest": { + "base": null, + "refs": { + } + }, + "DeleteDirectoryConfigResult": { + "base": null, + "refs": { + } + }, "DeleteFleetRequest": { "base": null, "refs": { @@ -145,6 +181,16 @@ "refs": { } }, + "DescribeDirectoryConfigsRequest": { + "base": null, + "refs": { + } + }, + "DescribeDirectoryConfigsResult": { + "base": null, + "refs": { + } + }, "DescribeFleetsRequest": { "base": null, "refs": { @@ -194,6 +240,37 @@ "UpdateStackRequest$Description": "

The description displayed to end users on the AppStream 2.0 portal.

" } }, + "DirectoryConfig": { + "base": "

Full directory configuration details, which are used to join domains for the AppStream 2.0 streaming instances.

", + "refs": { + "CreateDirectoryConfigResult$DirectoryConfig": "

Directory configuration details.

", + "DirectoryConfigList$member": null, + "UpdateDirectoryConfigResult$DirectoryConfig": "

The updated directory configuration details.

" + } + }, + "DirectoryConfigList": { + "base": null, + "refs": { + "DescribeDirectoryConfigsResult$DirectoryConfigs": "

The list of directory configurations.

" + } + }, + "DirectoryName": { + "base": null, + "refs": { + "CreateDirectoryConfigRequest$DirectoryName": "

The fully qualified name of the directory, such as corp.example.com

", + "DeleteDirectoryConfigRequest$DirectoryName": "

The name of the directory configuration to be deleted.

", + "DirectoryConfig$DirectoryName": "

The fully qualified name of the directory, such as corp.example.com

", + "DirectoryNameList$member": null, + "DomainJoinInfo$DirectoryName": "

The fully qualified name of the directory, such as corp.example.com

", + "UpdateDirectoryConfigRequest$DirectoryName": "

The name of the existing directory configuration to be updated.

" + } + }, + "DirectoryNameList": { + "base": null, + "refs": { + "DescribeDirectoryConfigsRequest$DirectoryNames": "

A specific list of directory names.

" + } + }, "DisassociateFleetRequest": { "base": null, "refs": { @@ -213,6 +290,14 @@ "UpdateStackRequest$DisplayName": "

The name displayed to end users on the AppStream 2.0 portal.

" } }, + "DomainJoinInfo": { + "base": "

The DirectoryName and OrganizationalUnitDistinguishedName values, which are used to join domains for the AppStream 2.0 streaming instances.

", + "refs": { + "CreateFleetRequest$DomainJoinInfo": "

The DirectoryName and OrganizationalUnitDistinguishedName values, which are used to join domains for the AppStream 2.0 streaming instances.

", + "Fleet$DomainJoinInfo": "

The DirectoryName and OrganizationalUnitDistinguishedName values, which are used to join domains for the AppStream 2.0 streaming instances.

", + "UpdateFleetRequest$DomainJoinInfo": "

The DirectoryName and OrganizationalUnitDistinguishedName values, which are used to join domains for the AppStream 2.0 streaming instances.

" + } + }, "ErrorMessage": { "base": "

The error message in the exception.

", "refs": { @@ -303,7 +388,7 @@ "ImageState": { "base": null, "refs": { - "Image$State": "

The image starts in the PENDING state, and then moves to AVAILABLE if image creation succeeds and FAILED if image creation has failed.

" + "Image$State": "

The image starts in the PENDING state. If image creation succeeds, it moves to AVAILABLE. If image creation fails, it moves to FAILED.

" } }, "ImageStateChangeReason": { @@ -333,6 +418,7 @@ "ComputeCapacityStatus$Available": "

The number of currently available instances that can be used to stream sessions.

", "CreateFleetRequest$MaxUserDurationInSeconds": "

The maximum time for which a streaming session can run. The input can be any numeric value in seconds between 600 and 57600.

", "CreateFleetRequest$DisconnectTimeoutInSeconds": "

The time after disconnection when a session is considered to have ended. If a user who got disconnected reconnects within this timeout interval, the user is connected back to their previous session. The input can be any numeric value in seconds between 60 and 57600.

", + "DescribeDirectoryConfigsRequest$MaxResults": "

The size of each page of results.

", "DescribeSessionsRequest$Limit": "

The size of each page of results. The default value is 20 and the maximum supported value is 50.

", "Fleet$MaxUserDurationInSeconds": "

The maximum time for which a streaming session can run. The value can be any numeric value in seconds between 600 and 57600.

", "Fleet$DisconnectTimeoutInSeconds": "

The time after disconnection when a session is considered to have ended. If a user who got disconnected reconnects within this timeout interval, the user is connected back to their previous session. The input can be any numeric value in seconds between 60 and 57600.

", @@ -398,6 +484,21 @@ "refs": { } }, + "OrganizationalUnitDistinguishedName": { + "base": null, + "refs": { + "DomainJoinInfo$OrganizationalUnitDistinguishedName": "

The distinguished name of the organizational unit to place the computer account in.

", + "OrganizationalUnitDistinguishedNamesList$member": null + } + }, + "OrganizationalUnitDistinguishedNamesList": { + "base": null, + "refs": { + "CreateDirectoryConfigRequest$OrganizationalUnitDistinguishedNames": "

The list of the distinguished names of organizational units to place computer accounts in.

", + "DirectoryConfig$OrganizationalUnitDistinguishedNames": "

The list of the distinguished names of organizational units in which to place computer accounts.

", + "UpdateDirectoryConfigRequest$OrganizationalUnitDistinguishedNames": "

The list of the distinguished names of organizational units to place computer accounts in.

" + } + }, "PlatformType": { "base": null, "refs": { @@ -436,6 +537,14 @@ "VpcConfig$SecurityGroupIds": "

Security groups associated with the fleet.

" } }, + "ServiceAccountCredentials": { + "base": "

The AccountName and AccountPassword of the service account, to be used by the streaming instance to connect to the directory.

", + "refs": { + "CreateDirectoryConfigRequest$ServiceAccountCredentials": "

The AccountName and AccountPassword values for the service account, which are used by the streaming instance to connect to the directory.

", + "DirectoryConfig$ServiceAccountCredentials": "

The AccountName and AccountPassword of the service account, to be used by the streaming instance to connect to the directory.

", + "UpdateDirectoryConfigRequest$ServiceAccountCredentials": "

The AccountName and AccountPassword values for the service account, which are used by the streaming instance to connect to the directory

" + } + }, "Session": { "base": "

Contains the parameters for a streaming session.

", "refs": { @@ -526,6 +635,12 @@ "StorageConnector$ConnectorType": "

The type of storage connector. The possible values include: HOMEFOLDERS.

" } }, + "StreamingUrlUserId": { + "base": null, + "refs": { + "CreateStreamingURLRequest$UserId": "

A unique user ID for whom the URL is generated.

" + } + }, "String": { "base": null, "refs": { @@ -537,7 +652,7 @@ "AssociateFleetRequest$FleetName": "

The name of the fleet to associate.

", "AssociateFleetRequest$StackName": "

The name of the stack to which the fleet is associated.

", "CreateFleetRequest$ImageName": "

Unique name of the image used by the fleet.

", - "CreateFleetRequest$InstanceType": "

The instance type of compute resources for the fleet. Fleet instances are launched from this instance type.

", + "CreateFleetRequest$InstanceType": "

The instance type of compute resources for the fleet. Fleet instances are launched from this instance type. Available instance types are:

", "CreateStackRequest$Name": "

The unique identifier for this stack.

", "CreateStreamingURLRequest$StackName": "

The stack for which the URL is generated.

", "CreateStreamingURLRequest$FleetName": "

The fleet for which the URL is generated.

", @@ -546,6 +661,8 @@ "CreateStreamingURLResult$StreamingURL": "

The URL to start the AppStream 2.0 streaming session.

", "DeleteFleetRequest$Name": "

The name of the fleet to be deleted.

", "DeleteStackRequest$Name": "

The name of the stack to delete.

", + "DescribeDirectoryConfigsRequest$NextToken": "

The DescribeDirectoryConfigsResult.NextToken from a previous call to DescribeDirectoryConfigs. If this is the first call, pass null.

", + "DescribeDirectoryConfigsResult$NextToken": "

If not null, more results are available. To retrieve the next set of items, pass this value for the NextToken parameter in a subsequent call to DescribeDirectoryConfigs.

", "DescribeFleetsRequest$NextToken": "

The pagination token to use to retrieve the next page of results for this operation. If this value is null, it retrieves the first page.

", "DescribeFleetsResult$NextToken": "

The pagination token to use to retrieve the next page of results for this operation. If there are no more pages, this value is null.

", "DescribeSessionsRequest$StackName": "

The name of the stack for which to list sessions.

", @@ -589,7 +706,7 @@ "SubnetIdList$member": null, "UpdateFleetRequest$ImageName": "

The image name from which a fleet is created.

", "UpdateFleetRequest$Name": "

The name of the fleet.

", - "UpdateFleetRequest$InstanceType": "

The instance type of compute resources for the fleet. Fleet instances are launched from this instance type.

", + "UpdateFleetRequest$InstanceType": "

The instance type of compute resources for the fleet. Fleet instances are launched from this instance type. Available instance types are:

", "UpdateStackRequest$Name": "

The name of the stack to update.

" } }, @@ -612,11 +729,22 @@ "Timestamp": { "base": null, "refs": { - "CreateStreamingURLResult$Expires": "

Elapsed seconds after the Unix epoch, at which time this URL expires.

", + "CreateStreamingURLResult$Expires": "

Elapsed seconds after the Unix epoch, when this URL expires.

", + "DirectoryConfig$CreatedTime": "

The time stamp when the directory configuration was created within AppStream 2.0.

", "Fleet$CreatedTime": "

The time at which the fleet was created.

", - "Image$CreatedTime": "

The timestamp when the image was created.

", + "Image$CreatedTime": "

The time stamp when the image was created.

", "Image$PublicBaseImageReleasedDate": "

The AWS release date of the public base image. For private images, this date is the release date of the base image from which the image was created.

", - "Stack$CreatedTime": "

The timestamp when the stack was created.

" + "Stack$CreatedTime": "

The time stamp when the stack was created.

" + } + }, + "UpdateDirectoryConfigRequest": { + "base": null, + "refs": { + } + }, + "UpdateDirectoryConfigResult": { + "base": null, + "refs": { } }, "UpdateFleetRequest": { @@ -642,7 +770,6 @@ "UserId": { "base": null, "refs": { - "CreateStreamingURLRequest$UserId": "

A unique user ID for whom the URL is generated.

", "DescribeSessionsRequest$UserId": "

The user for whom to list sessions. Use null to describe all the sessions for the stack and fleet.

", "Session$UserId": "

The identifier of the user for whom the session was created.

" } diff --git a/cli/vendor/github.com/aws/aws-sdk-go/models/apis/appstream/2016-12-01/waiters-2.json b/cli/vendor/github.com/aws/aws-sdk-go/models/apis/appstream/2016-12-01/waiters-2.json index 6672ceed3..f53f609cb 100644 --- a/cli/vendor/github.com/aws/aws-sdk-go/models/apis/appstream/2016-12-01/waiters-2.json +++ b/cli/vendor/github.com/aws/aws-sdk-go/models/apis/appstream/2016-12-01/waiters-2.json @@ -9,19 +9,19 @@ { "state": "success", "matcher": "pathAll", - "argument": "fleets[].state", + "argument": "Fleets[].State", "expected": "ACTIVE" }, { "state": "failure", "matcher": "pathAny", - "argument": "fleets[].state", + "argument": "Fleets[].State", "expected": "PENDING_DEACTIVATE" }, { "state": "failure", "matcher": "pathAny", - "argument": "fleets[].state", + "argument": "Fleets[].State", "expected": "INACTIVE" } ] @@ -34,19 +34,19 @@ { "state": "success", "matcher": "pathAll", - "argument": "fleets[].state", + "argument": "Fleets[].State", "expected": "INACTIVE" }, { "state": "failure", "matcher": "pathAny", - "argument": "fleets[].state", + "argument": "Fleets[].State", "expected": "PENDING_ACTIVATE" }, { "state": "failure", "matcher": "pathAny", - "argument": "fleets[].state", + "argument": "Fleets[].State", "expected": "ACTIVE" } ] diff --git a/cli/vendor/github.com/aws/aws-sdk-go/models/apis/clouddirectory/2016-05-10/api-2.json b/cli/vendor/github.com/aws/aws-sdk-go/models/apis/clouddirectory/2016-05-10/api-2.json index 4d96e1608..d8c9ae8fd 100644 --- a/cli/vendor/github.com/aws/aws-sdk-go/models/apis/clouddirectory/2016-05-10/api-2.json +++ b/cli/vendor/github.com/aws/aws-sdk-go/models/apis/clouddirectory/2016-05-10/api-2.json @@ -136,6 +136,7 @@ {"shape":"ValidationException"}, {"shape":"LimitExceededException"}, {"shape":"AccessDeniedException"}, + {"shape":"DirectoryNotEnabledException"}, {"shape":"ResourceNotFoundException"}, {"shape":"InvalidAttachmentException"}, {"shape":"ValidationException"}, @@ -495,6 +496,7 @@ {"shape":"ValidationException"}, {"shape":"LimitExceededException"}, {"shape":"AccessDeniedException"}, + {"shape":"DirectoryNotEnabledException"}, {"shape":"ResourceNotFoundException"}, {"shape":"FacetValidationException"} ] @@ -774,6 +776,7 @@ {"shape":"ValidationException"}, {"shape":"LimitExceededException"}, {"shape":"AccessDeniedException"}, + {"shape":"DirectoryNotEnabledException"}, {"shape":"ResourceNotFoundException"}, {"shape":"InvalidNextTokenException"}, {"shape":"FacetValidationException"} @@ -927,6 +930,7 @@ {"shape":"ValidationException"}, {"shape":"LimitExceededException"}, {"shape":"AccessDeniedException"}, + {"shape":"DirectoryNotEnabledException"}, {"shape":"ResourceNotFoundException"}, {"shape":"InvalidNextTokenException"}, {"shape":"FacetValidationException"} @@ -1494,6 +1498,80 @@ "attachedObjectIdentifier":{"shape":"ObjectIdentifier"} } }, + "BatchAttachPolicy":{ + "type":"structure", + "required":[ + "PolicyReference", + "ObjectReference" + ], + "members":{ + "PolicyReference":{"shape":"ObjectReference"}, + "ObjectReference":{"shape":"ObjectReference"} + } + }, + "BatchAttachPolicyResponse":{ + "type":"structure", + "members":{ + } + }, + "BatchAttachToIndex":{ + "type":"structure", + "required":[ + "IndexReference", + "TargetReference" + ], + "members":{ + "IndexReference":{"shape":"ObjectReference"}, + "TargetReference":{"shape":"ObjectReference"} + } + }, + "BatchAttachToIndexResponse":{ + "type":"structure", + "members":{ + "AttachedObjectIdentifier":{"shape":"ObjectIdentifier"} + } + }, + "BatchAttachTypedLink":{ + "type":"structure", + "required":[ + "SourceObjectReference", + "TargetObjectReference", + "TypedLinkFacet", + "Attributes" + ], + "members":{ + "SourceObjectReference":{"shape":"ObjectReference"}, + "TargetObjectReference":{"shape":"ObjectReference"}, + "TypedLinkFacet":{"shape":"TypedLinkSchemaAndFacetName"}, + "Attributes":{"shape":"AttributeNameAndValueList"} + } + }, + "BatchAttachTypedLinkResponse":{ + "type":"structure", + "members":{ + "TypedLinkSpecifier":{"shape":"TypedLinkSpecifier"} + } + }, + "BatchCreateIndex":{ + "type":"structure", + "required":[ + "OrderedIndexedAttributeList", + "IsUnique" + ], + "members":{ + "OrderedIndexedAttributeList":{"shape":"AttributeKeyList"}, + "IsUnique":{"shape":"Bool"}, + "ParentReference":{"shape":"ObjectReference"}, + "LinkName":{"shape":"LinkName"}, + "BatchReferenceName":{"shape":"BatchReferenceName"} + } + }, + "BatchCreateIndexResponse":{ + "type":"structure", + "members":{ + "ObjectIdentifier":{"shape":"ObjectIdentifier"} + } + }, "BatchCreateObject":{ "type":"structure", "required":[ @@ -1529,6 +1607,23 @@ "members":{ } }, + "BatchDetachFromIndex":{ + "type":"structure", + "required":[ + "IndexReference", + "TargetReference" + ], + "members":{ + "IndexReference":{"shape":"ObjectReference"}, + "TargetReference":{"shape":"ObjectReference"} + } + }, + "BatchDetachFromIndexResponse":{ + "type":"structure", + "members":{ + "DetachedObjectIdentifier":{"shape":"ObjectIdentifier"} + } + }, "BatchDetachObject":{ "type":"structure", "required":[ @@ -1548,6 +1643,83 @@ "detachedObjectIdentifier":{"shape":"ObjectIdentifier"} } }, + "BatchDetachTypedLink":{ + "type":"structure", + "required":["TypedLinkSpecifier"], + "members":{ + "TypedLinkSpecifier":{"shape":"TypedLinkSpecifier"} + } + }, + "BatchDetachTypedLinkResponse":{ + "type":"structure", + "members":{ + } + }, + "BatchGetObjectInformation":{ + "type":"structure", + "required":["ObjectReference"], + "members":{ + "ObjectReference":{"shape":"ObjectReference"} + } + }, + "BatchGetObjectInformationResponse":{ + "type":"structure", + "members":{ + "SchemaFacets":{"shape":"SchemaFacetList"}, + "ObjectIdentifier":{"shape":"ObjectIdentifier"} + } + }, + "BatchListAttachedIndices":{ + "type":"structure", + "required":["TargetReference"], + "members":{ + "TargetReference":{"shape":"ObjectReference"}, + "NextToken":{"shape":"NextToken"}, + "MaxResults":{"shape":"NumberResults"} + } + }, + "BatchListAttachedIndicesResponse":{ + "type":"structure", + "members":{ + "IndexAttachments":{"shape":"IndexAttachmentList"}, + "NextToken":{"shape":"NextToken"} + } + }, + "BatchListIncomingTypedLinks":{ + "type":"structure", + "required":["ObjectReference"], + "members":{ + "ObjectReference":{"shape":"ObjectReference"}, + "FilterAttributeRanges":{"shape":"TypedLinkAttributeRangeList"}, + "FilterTypedLink":{"shape":"TypedLinkSchemaAndFacetName"}, + "NextToken":{"shape":"NextToken"}, + "MaxResults":{"shape":"NumberResults"} + } + }, + "BatchListIncomingTypedLinksResponse":{ + "type":"structure", + "members":{ + "LinkSpecifiers":{"shape":"TypedLinkSpecifierList"}, + "NextToken":{"shape":"NextToken"} + } + }, + "BatchListIndex":{ + "type":"structure", + "required":["IndexReference"], + "members":{ + "RangesOnIndexedValues":{"shape":"ObjectAttributeRangeList"}, + "IndexReference":{"shape":"ObjectReference"}, + "MaxResults":{"shape":"NumberResults"}, + "NextToken":{"shape":"NextToken"} + } + }, + "BatchListIndexResponse":{ + "type":"structure", + "members":{ + "IndexAttachments":{"shape":"IndexAttachmentList"}, + "NextToken":{"shape":"NextToken"} + } + }, "BatchListObjectAttributes":{ "type":"structure", "required":["ObjectReference"], @@ -1581,6 +1753,88 @@ "NextToken":{"shape":"NextToken"} } }, + "BatchListObjectParentPaths":{ + "type":"structure", + "required":["ObjectReference"], + "members":{ + "ObjectReference":{"shape":"ObjectReference"}, + "NextToken":{"shape":"NextToken"}, + "MaxResults":{"shape":"NumberResults"} + } + }, + "BatchListObjectParentPathsResponse":{ + "type":"structure", + "members":{ + "PathToObjectIdentifiersList":{"shape":"PathToObjectIdentifiersList"}, + "NextToken":{"shape":"NextToken"} + } + }, + "BatchListObjectPolicies":{ + "type":"structure", + "required":["ObjectReference"], + "members":{ + "ObjectReference":{"shape":"ObjectReference"}, + "NextToken":{"shape":"NextToken"}, + "MaxResults":{"shape":"NumberResults"} + } + }, + "BatchListObjectPoliciesResponse":{ + "type":"structure", + "members":{ + "AttachedPolicyIds":{"shape":"ObjectIdentifierList"}, + "NextToken":{"shape":"NextToken"} + } + }, + "BatchListOutgoingTypedLinks":{ + "type":"structure", + "required":["ObjectReference"], + "members":{ + "ObjectReference":{"shape":"ObjectReference"}, + "FilterAttributeRanges":{"shape":"TypedLinkAttributeRangeList"}, + "FilterTypedLink":{"shape":"TypedLinkSchemaAndFacetName"}, + "NextToken":{"shape":"NextToken"}, + "MaxResults":{"shape":"NumberResults"} + } + }, + "BatchListOutgoingTypedLinksResponse":{ + "type":"structure", + "members":{ + "TypedLinkSpecifiers":{"shape":"TypedLinkSpecifierList"}, + "NextToken":{"shape":"NextToken"} + } + }, + "BatchListPolicyAttachments":{ + "type":"structure", + "required":["PolicyReference"], + "members":{ + "PolicyReference":{"shape":"ObjectReference"}, + "NextToken":{"shape":"NextToken"}, + "MaxResults":{"shape":"NumberResults"} + } + }, + "BatchListPolicyAttachmentsResponse":{ + "type":"structure", + "members":{ + "ObjectIdentifiers":{"shape":"ObjectIdentifierList"}, + "NextToken":{"shape":"NextToken"} + } + }, + "BatchLookupPolicy":{ + "type":"structure", + "required":["ObjectReference"], + "members":{ + "ObjectReference":{"shape":"ObjectReference"}, + "NextToken":{"shape":"NextToken"}, + "MaxResults":{"shape":"NumberResults"} + } + }, + "BatchLookupPolicyResponse":{ + "type":"structure", + "members":{ + "PolicyToPathList":{"shape":"PolicyToPathList"}, + "NextToken":{"shape":"NextToken"} + } + }, "BatchOperationIndex":{"type":"integer"}, "BatchReadException":{ "type":"structure", @@ -1597,14 +1851,30 @@ "ResourceNotFoundException", "InvalidNextTokenException", "AccessDeniedException", - "NotNodeException" + "NotNodeException", + "FacetValidationException", + "CannotListParentOfRootException", + "NotIndexException", + "NotPolicyException", + "DirectoryNotEnabledException", + "LimitExceededException", + "InternalServiceException" ] }, "BatchReadOperation":{ "type":"structure", "members":{ "ListObjectAttributes":{"shape":"BatchListObjectAttributes"}, - "ListObjectChildren":{"shape":"BatchListObjectChildren"} + "ListObjectChildren":{"shape":"BatchListObjectChildren"}, + "ListAttachedIndices":{"shape":"BatchListAttachedIndices"}, + "ListObjectParentPaths":{"shape":"BatchListObjectParentPaths"}, + "GetObjectInformation":{"shape":"BatchGetObjectInformation"}, + "ListObjectPolicies":{"shape":"BatchListObjectPolicies"}, + "ListPolicyAttachments":{"shape":"BatchListPolicyAttachments"}, + "LookupPolicy":{"shape":"BatchLookupPolicy"}, + "ListIndex":{"shape":"BatchListIndex"}, + "ListOutgoingTypedLinks":{"shape":"BatchListOutgoingTypedLinks"}, + "ListIncomingTypedLinks":{"shape":"BatchListIncomingTypedLinks"} } }, "BatchReadOperationList":{ @@ -1652,7 +1922,16 @@ "type":"structure", "members":{ "ListObjectAttributes":{"shape":"BatchListObjectAttributesResponse"}, - "ListObjectChildren":{"shape":"BatchListObjectChildrenResponse"} + "ListObjectChildren":{"shape":"BatchListObjectChildrenResponse"}, + "GetObjectInformation":{"shape":"BatchGetObjectInformationResponse"}, + "ListAttachedIndices":{"shape":"BatchListAttachedIndicesResponse"}, + "ListObjectParentPaths":{"shape":"BatchListObjectParentPathsResponse"}, + "ListObjectPolicies":{"shape":"BatchListObjectPoliciesResponse"}, + "ListPolicyAttachments":{"shape":"BatchListPolicyAttachmentsResponse"}, + "LookupPolicy":{"shape":"BatchLookupPolicyResponse"}, + "ListIndex":{"shape":"BatchListIndexResponse"}, + "ListOutgoingTypedLinks":{"shape":"BatchListOutgoingTypedLinksResponse"}, + "ListIncomingTypedLinks":{"shape":"BatchListIncomingTypedLinksResponse"} } }, "BatchReferenceName":{"type":"string"}, @@ -1709,7 +1988,15 @@ "FacetValidationException", "ObjectNotDetachedException", "ResourceNotFoundException", - "AccessDeniedException" + "AccessDeniedException", + "InvalidAttachmentException", + "NotIndexException", + "IndexedAttributeMissingException", + "ObjectAlreadyDetachedException", + "NotPolicyException", + "DirectoryNotEnabledException", + "LimitExceededException", + "UnsupportedIndexTypeException" ] }, "BatchWriteOperation":{ @@ -1721,7 +2008,13 @@ "UpdateObjectAttributes":{"shape":"BatchUpdateObjectAttributes"}, "DeleteObject":{"shape":"BatchDeleteObject"}, "AddFacetToObject":{"shape":"BatchAddFacetToObject"}, - "RemoveFacetFromObject":{"shape":"BatchRemoveFacetFromObject"} + "RemoveFacetFromObject":{"shape":"BatchRemoveFacetFromObject"}, + "AttachPolicy":{"shape":"BatchAttachPolicy"}, + "CreateIndex":{"shape":"BatchCreateIndex"}, + "AttachToIndex":{"shape":"BatchAttachToIndex"}, + "DetachFromIndex":{"shape":"BatchDetachFromIndex"}, + "AttachTypedLink":{"shape":"BatchAttachTypedLink"}, + "DetachTypedLink":{"shape":"BatchDetachTypedLink"} } }, "BatchWriteOperationList":{ @@ -1737,7 +2030,13 @@ "UpdateObjectAttributes":{"shape":"BatchUpdateObjectAttributesResponse"}, "DeleteObject":{"shape":"BatchDeleteObjectResponse"}, "AddFacetToObject":{"shape":"BatchAddFacetToObjectResponse"}, - "RemoveFacetFromObject":{"shape":"BatchRemoveFacetFromObjectResponse"} + "RemoveFacetFromObject":{"shape":"BatchRemoveFacetFromObjectResponse"}, + "AttachPolicy":{"shape":"BatchAttachPolicyResponse"}, + "CreateIndex":{"shape":"BatchCreateIndexResponse"}, + "AttachToIndex":{"shape":"BatchAttachToIndexResponse"}, + "DetachFromIndex":{"shape":"BatchDetachFromIndexResponse"}, + "AttachTypedLink":{"shape":"BatchAttachTypedLinkResponse"}, + "DetachTypedLink":{"shape":"BatchDetachTypedLinkResponse"} } }, "BatchWriteOperationResponseList":{ diff --git a/cli/vendor/github.com/aws/aws-sdk-go/models/apis/clouddirectory/2016-05-10/docs-2.json b/cli/vendor/github.com/aws/aws-sdk-go/models/apis/clouddirectory/2016-05-10/docs-2.json index 45e5b3087..89ccb3425 100644 --- a/cli/vendor/github.com/aws/aws-sdk-go/models/apis/clouddirectory/2016-05-10/docs-2.json +++ b/cli/vendor/github.com/aws/aws-sdk-go/models/apis/clouddirectory/2016-05-10/docs-2.json @@ -241,6 +241,7 @@ "AttributeKeyList": { "base": null, "refs": { + "BatchCreateIndex$OrderedIndexedAttributeList": "

Specifies the attributes that should be indexed on. Currently only a single attribute is supported.

", "CreateIndexRequest$OrderedIndexedAttributeList": "

Specifies the attributes that should be indexed on. Currently only a single attribute is supported.

" } }, @@ -266,6 +267,7 @@ "base": null, "refs": { "AttachTypedLinkRequest$Attributes": "

A set of attributes that are associated with the typed link.

", + "BatchAttachTypedLink$Attributes": "

A set of attributes that are associated with the typed link.

", "TypedLinkSpecifier$IdentityAttributeValues": "

Identifies the attribute value to update.

" } }, @@ -273,7 +275,7 @@ "base": null, "refs": { "GetTypedLinkFacetInformationResponse$IdentityAttributeOrder": "

The order of identity attributes for the facet, from most significant to least significant. The ability to filter typed links considers the order that the attributes are defined on the typed link facet. When providing ranges to typed link selection, any inexact ranges must be specified at the end. Any attributes that do not have a range specified are presumed to match the entire range. Filters are interpreted in the order of the attributes on the typed link facet, not the order in which they are supplied to any API calls. For more information about identity attributes, see Typed link.

", - "TypedLinkFacet$IdentityAttributeOrder": "

The set of attributes that distinguish links made from this facet from each other, in the order of significance. Listing typed links can filter on the values of these attributes. See ListOutgoingTypedLinks and ListIncomingTypeLinks for details.

", + "TypedLinkFacet$IdentityAttributeOrder": "

The set of attributes that distinguish links made from this facet from each other, in the order of significance. Listing typed links can filter on the values of these attributes. See ListOutgoingTypedLinks and ListIncomingTypedLinks for details.

", "UpdateTypedLinkFacetRequest$IdentityAttributeOrder": "

The order of identity attributes for the facet, from most significant to least significant. The ability to filter typed links considers the order that the attributes are defined on the typed link facet. When providing ranges to a typed link selection, any inexact ranges must be specified at the end. Any attributes that do not have a range specified are presumed to match the entire range. Filters are interpreted in the order of the attributes on the typed link facet, not the order in which they are supplied to any API calls. For more information about identity attributes, see Typed link.

" } }, @@ -290,77 +292,257 @@ } }, "BatchAttachObject": { - "base": "

Represents the output of an AttachObject operation.

", + "base": "

Represents the output of an AttachObject operation.

", "refs": { "BatchWriteOperation$AttachObject": "

Attaches an object to a Directory.

" } }, "BatchAttachObjectResponse": { - "base": "

Represents the output batch AttachObject response operation.

", + "base": "

Represents the output batch AttachObject response operation.

", "refs": { "BatchWriteOperationResponse$AttachObject": "

Attaches an object to a Directory.

" } }, + "BatchAttachPolicy": { + "base": "

Attaches a policy object to a regular object inside a BatchRead operation. For more information, see AttachPolicy and BatchReadRequest$Operations.

", + "refs": { + "BatchWriteOperation$AttachPolicy": "

Attaches a policy object to a regular object. An object can have a limited number of attached policies.

" + } + }, + "BatchAttachPolicyResponse": { + "base": "

Represents the output of an AttachPolicy response operation.

", + "refs": { + "BatchWriteOperationResponse$AttachPolicy": "

Attaches a policy object to a regular object. An object can have a limited number of attached policies.

" + } + }, + "BatchAttachToIndex": { + "base": "

Attaches the specified object to the specified index inside a BatchRead operation. For more information, see AttachToIndex and BatchReadRequest$Operations.

", + "refs": { + "BatchWriteOperation$AttachToIndex": "

Attaches the specified object to the specified index.

" + } + }, + "BatchAttachToIndexResponse": { + "base": "

Represents the output of a AttachToIndex response operation.

", + "refs": { + "BatchWriteOperationResponse$AttachToIndex": "

Attaches the specified object to the specified index.

" + } + }, + "BatchAttachTypedLink": { + "base": "

Attaches a typed link to a specified source and target object inside a BatchRead operation. For more information, see AttachTypedLink and BatchReadRequest$Operations.

", + "refs": { + "BatchWriteOperation$AttachTypedLink": "

Attaches a typed link to a specified source and target object. For more information, see Typed link.

" + } + }, + "BatchAttachTypedLinkResponse": { + "base": "

Represents the output of a AttachTypedLink response operation.

", + "refs": { + "BatchWriteOperationResponse$AttachTypedLink": "

Attaches a typed link to a specified source and target object. For more information, see Typed link.

" + } + }, + "BatchCreateIndex": { + "base": "

Creates an index object inside of a BatchRead operation. For more information, see CreateIndex and BatchReadRequest$Operations.

", + "refs": { + "BatchWriteOperation$CreateIndex": "

Creates an index object. See Indexing for more information.

" + } + }, + "BatchCreateIndexResponse": { + "base": "

Represents the output of a CreateIndex response operation.

", + "refs": { + "BatchWriteOperationResponse$CreateIndex": "

Creates an index object. See Indexing for more information.

" + } + }, "BatchCreateObject": { - "base": "

Represents the output of a CreateObject operation.

", + "base": "

Represents the output of a CreateObject operation.

", "refs": { "BatchWriteOperation$CreateObject": "

Creates an object.

" } }, "BatchCreateObjectResponse": { - "base": "

Represents the output of a CreateObject response operation.

", + "base": "

Represents the output of a CreateObject response operation.

", "refs": { "BatchWriteOperationResponse$CreateObject": "

Creates an object in a Directory.

" } }, "BatchDeleteObject": { - "base": "

Represents the output of a DeleteObject operation.

", + "base": "

Represents the output of a DeleteObject operation.

", "refs": { "BatchWriteOperation$DeleteObject": "

Deletes an object in a Directory.

" } }, "BatchDeleteObjectResponse": { - "base": "

Represents the output of a DeleteObject response operation.

", + "base": "

Represents the output of a DeleteObject response operation.

", "refs": { "BatchWriteOperationResponse$DeleteObject": "

Deletes an object in a Directory.

" } }, + "BatchDetachFromIndex": { + "base": "

Detaches the specified object from the specified index inside a BatchRead operation. For more information, see DetachFromIndex and BatchReadRequest$Operations.

", + "refs": { + "BatchWriteOperation$DetachFromIndex": "

Detaches the specified object from the specified index.

" + } + }, + "BatchDetachFromIndexResponse": { + "base": "

Represents the output of a DetachFromIndex response operation.

", + "refs": { + "BatchWriteOperationResponse$DetachFromIndex": "

Detaches the specified object from the specified index.

" + } + }, "BatchDetachObject": { - "base": "

Represents the output of a DetachObject operation.

", + "base": "

Represents the output of a DetachObject operation.

", "refs": { "BatchWriteOperation$DetachObject": "

Detaches an object from a Directory.

" } }, "BatchDetachObjectResponse": { - "base": "

Represents the output of a DetachObject response operation.

", + "base": "

Represents the output of a DetachObject response operation.

", "refs": { "BatchWriteOperationResponse$DetachObject": "

Detaches an object from a Directory.

" } }, + "BatchDetachTypedLink": { + "base": "

Detaches a typed link from a specified source and target object inside a BatchRead operation. For more information, see DetachTypedLink and BatchReadRequest$Operations.

", + "refs": { + "BatchWriteOperation$DetachTypedLink": "

Detaches a typed link from a specified source and target object. For more information, see Typed link.

" + } + }, + "BatchDetachTypedLinkResponse": { + "base": "

Represents the output of a DetachTypedLink response operation.

", + "refs": { + "BatchWriteOperationResponse$DetachTypedLink": "

Detaches a typed link from a specified source and target object. For more information, see Typed link.

" + } + }, + "BatchGetObjectInformation": { + "base": "

Retrieves metadata about an object inside a BatchRead operation. For more information, see GetObjectInformation and BatchReadRequest$Operations.

", + "refs": { + "BatchReadOperation$GetObjectInformation": "

Retrieves metadata about an object.

" + } + }, + "BatchGetObjectInformationResponse": { + "base": "

Represents the output of a GetObjectInformation response operation.

", + "refs": { + "BatchReadSuccessfulResponse$GetObjectInformation": "

Retrieves metadata about an object.

" + } + }, + "BatchListAttachedIndices": { + "base": "

Lists indices attached to an object inside a BatchRead operation. For more information, see ListAttachedIndices and BatchReadRequest$Operations.

", + "refs": { + "BatchReadOperation$ListAttachedIndices": "

Lists indices attached to an object.

" + } + }, + "BatchListAttachedIndicesResponse": { + "base": "

Represents the output of a ListAttachedIndices response operation.

", + "refs": { + "BatchReadSuccessfulResponse$ListAttachedIndices": "

Lists indices attached to an object.

" + } + }, + "BatchListIncomingTypedLinks": { + "base": "

Returns a paginated list of all the incoming TypedLinkSpecifier information for an object inside a BatchRead operation. For more information, see ListIncomingTypedLinks and BatchReadRequest$Operations.

", + "refs": { + "BatchReadOperation$ListIncomingTypedLinks": "

Returns a paginated list of all the incoming TypedLinkSpecifier information for an object. It also supports filtering by typed link facet and identity attributes. For more information, see Typed link.

" + } + }, + "BatchListIncomingTypedLinksResponse": { + "base": "

Represents the output of a ListIncomingTypedLinks response operation.

", + "refs": { + "BatchReadSuccessfulResponse$ListIncomingTypedLinks": "

Returns a paginated list of all the incoming TypedLinkSpecifier information for an object. It also supports filtering by typed link facet and identity attributes. For more information, see Typed link.

" + } + }, + "BatchListIndex": { + "base": "

Lists objects attached to the specified index inside a BatchRead operation. For more information, see ListIndex and BatchReadRequest$Operations.

", + "refs": { + "BatchReadOperation$ListIndex": "

Lists objects attached to the specified index.

" + } + }, + "BatchListIndexResponse": { + "base": "

Represents the output of a ListIndex response operation.

", + "refs": { + "BatchReadSuccessfulResponse$ListIndex": "

Lists objects attached to the specified index.

" + } + }, "BatchListObjectAttributes": { - "base": "

Represents the output of a ListObjectAttributes operation.

", + "base": "

Represents the output of a ListObjectAttributes operation.

", "refs": { "BatchReadOperation$ListObjectAttributes": "

Lists all attributes that are associated with an object.

" } }, "BatchListObjectAttributesResponse": { - "base": "

Represents the output of a ListObjectAttributes response operation.

", + "base": "

Represents the output of a ListObjectAttributes response operation.

", "refs": { "BatchReadSuccessfulResponse$ListObjectAttributes": "

Lists all attributes that are associated with an object.

" } }, "BatchListObjectChildren": { - "base": "

Represents the output of a ListObjectChildren operation.

", + "base": "

Represents the output of a ListObjectChildren operation.

", "refs": { "BatchReadOperation$ListObjectChildren": "

Returns a paginated list of child objects that are associated with a given object.

" } }, "BatchListObjectChildrenResponse": { - "base": "

Represents the output of a ListObjectChildren response operation.

", + "base": "

Represents the output of a ListObjectChildren response operation.

", "refs": { "BatchReadSuccessfulResponse$ListObjectChildren": "

Returns a paginated list of child objects that are associated with a given object.

" } }, + "BatchListObjectParentPaths": { + "base": "

Retrieves all available parent paths for any object type such as node, leaf node, policy node, and index node objects inside a BatchRead operation. For more information, see ListObjectParentPaths and BatchReadRequest$Operations.

", + "refs": { + "BatchReadOperation$ListObjectParentPaths": "

Retrieves all available parent paths for any object type such as node, leaf node, policy node, and index node objects. For more information about objects, see Directory Structure.

" + } + }, + "BatchListObjectParentPathsResponse": { + "base": "

Represents the output of a ListObjectParentPaths response operation.

", + "refs": { + "BatchReadSuccessfulResponse$ListObjectParentPaths": "

Retrieves all available parent paths for any object type such as node, leaf node, policy node, and index node objects. For more information about objects, see Directory Structure.

" + } + }, + "BatchListObjectPolicies": { + "base": "

Returns policies attached to an object in pagination fashion inside a BatchRead operation. For more information, see ListObjectPolicies and BatchReadRequest$Operations.

", + "refs": { + "BatchReadOperation$ListObjectPolicies": "

Returns policies attached to an object in pagination fashion.

" + } + }, + "BatchListObjectPoliciesResponse": { + "base": "

Represents the output of a ListObjectPolicies response operation.

", + "refs": { + "BatchReadSuccessfulResponse$ListObjectPolicies": "

Returns policies attached to an object in pagination fashion.

" + } + }, + "BatchListOutgoingTypedLinks": { + "base": "

Returns a paginated list of all the outgoing TypedLinkSpecifier information for an object inside a BatchRead operation. For more information, see ListOutgoingTypedLinks and BatchReadRequest$Operations.

", + "refs": { + "BatchReadOperation$ListOutgoingTypedLinks": "

Returns a paginated list of all the outgoing TypedLinkSpecifier information for an object. It also supports filtering by typed link facet and identity attributes. For more information, see Typed link.

" + } + }, + "BatchListOutgoingTypedLinksResponse": { + "base": "

Represents the output of a ListOutgoingTypedLinks response operation.

", + "refs": { + "BatchReadSuccessfulResponse$ListOutgoingTypedLinks": "

Returns a paginated list of all the outgoing TypedLinkSpecifier information for an object. It also supports filtering by typed link facet and identity attributes. For more information, see Typed link.

" + } + }, + "BatchListPolicyAttachments": { + "base": "

Returns all of the ObjectIdentifiers to which a given policy is attached inside a BatchRead operation. For more information, see ListPolicyAttachments and BatchReadRequest$Operations.

", + "refs": { + "BatchReadOperation$ListPolicyAttachments": "

Returns all of the ObjectIdentifiers to which a given policy is attached.

" + } + }, + "BatchListPolicyAttachmentsResponse": { + "base": "

Represents the output of a ListPolicyAttachments response operation.

", + "refs": { + "BatchReadSuccessfulResponse$ListPolicyAttachments": "

Returns all of the ObjectIdentifiers to which a given policy is attached.

" + } + }, + "BatchLookupPolicy": { + "base": "

Lists all policies from the root of the Directory to the object specified inside a BatchRead operation. For more information, see LookupPolicy and BatchReadRequest$Operations.

", + "refs": { + "BatchReadOperation$LookupPolicy": "

Lists all policies from the root of the Directory to the object specified. If there are no policies present, an empty list is returned. If policies are present, and if some objects don't have the policies attached, it returns the ObjectIdentifier for such objects. If policies are present, it returns ObjectIdentifier, policyId, and policyType. Paths that don't lead to the root from the target object are ignored. For more information, see Policies.

" + } + }, + "BatchLookupPolicyResponse": { + "base": "

Represents the output of a LookupPolicy response operation.

", + "refs": { + "BatchReadSuccessfulResponse$LookupPolicy": "

Lists all policies from the root of the Directory to the object specified. If there are no policies present, an empty list is returned. If policies are present, and if some objects don't have the policies attached, it returns the ObjectIdentifier for such objects. If policies are present, it returns ObjectIdentifier, policyId, and policyType. Paths that don't lead to the root from the target object are ignored. For more information, see Policies.

" + } + }, "BatchOperationIndex": { "base": null, "refs": { @@ -422,6 +604,7 @@ "BatchReferenceName": { "base": null, "refs": { + "BatchCreateIndex$BatchReferenceName": "

The batch reference name. See Batches for more information.

", "BatchCreateObject$BatchReferenceName": "

The batch reference name. See Batches for more information.

", "BatchDetachObject$BatchReferenceName": "

The batch reference name. See Batches for more information.

" } @@ -504,6 +687,7 @@ "Bool": { "base": null, "refs": { + "BatchCreateIndex$IsUnique": "

Indicates whether the attribute that is being indexed has unique values or not.

", "CreateIndexRequest$IsUnique": "

Indicates whether the attribute that is being indexed has unique values or not.

", "FacetAttributeDefinition$IsImmutable": "

Whether the attribute is mutable or not.

", "TypedLinkAttributeDefinition$IsImmutable": "

Whether the attribute is mutable or not.

" @@ -960,6 +1144,8 @@ "IndexAttachmentList": { "base": null, "refs": { + "BatchListAttachedIndicesResponse$IndexAttachments": "

The indices attached to the specified object.

", + "BatchListIndexResponse$IndexAttachments": "

The objects and indexed values attached to the index.

", "ListAttachedIndicesResponse$IndexAttachments": "

The indices attached to the specified object.

", "ListIndexResponse$IndexAttachments": "

The objects and indexed values attached to the index.

" } @@ -1019,6 +1205,7 @@ "refs": { "AttachObjectRequest$LinkName": "

The link name with which the child object is attached to the parent.

", "BatchAttachObject$LinkName": "

The name of the link.

", + "BatchCreateIndex$LinkName": "

The name of the link between the parent object and the index object.

", "BatchCreateObject$LinkName": "

The name of the link.

", "BatchDetachObject$LinkName": "

The name of the link.

", "CreateIndexRequest$LinkName": "

The name of the link between the parent object and the index object.

", @@ -1243,10 +1430,26 @@ "NextToken": { "base": null, "refs": { + "BatchListAttachedIndices$NextToken": "

The pagination token.

", + "BatchListAttachedIndicesResponse$NextToken": "

The pagination token.

", + "BatchListIncomingTypedLinks$NextToken": "

The pagination token.

", + "BatchListIncomingTypedLinksResponse$NextToken": "

The pagination token.

", + "BatchListIndex$NextToken": "

The pagination token.

", + "BatchListIndexResponse$NextToken": "

The pagination token.

", "BatchListObjectAttributes$NextToken": "

The pagination token.

", "BatchListObjectAttributesResponse$NextToken": "

The pagination token.

", "BatchListObjectChildren$NextToken": "

The pagination token.

", "BatchListObjectChildrenResponse$NextToken": "

The pagination token.

", + "BatchListObjectParentPaths$NextToken": "

The pagination token.

", + "BatchListObjectParentPathsResponse$NextToken": "

The pagination token.

", + "BatchListObjectPolicies$NextToken": "

The pagination token.

", + "BatchListObjectPoliciesResponse$NextToken": "

The pagination token.

", + "BatchListOutgoingTypedLinks$NextToken": "

The pagination token.

", + "BatchListOutgoingTypedLinksResponse$NextToken": "

The pagination token.

", + "BatchListPolicyAttachments$NextToken": "

The pagination token.

", + "BatchListPolicyAttachmentsResponse$NextToken": "

The pagination token.

", + "BatchLookupPolicy$NextToken": "

The pagination token.

", + "BatchLookupPolicyResponse$NextToken": "

The pagination token.

", "ListAppliedSchemaArnsRequest$NextToken": "

The pagination token.

", "ListAppliedSchemaArnsResponse$NextToken": "

The pagination token.

", "ListAttachedIndicesRequest$NextToken": "

The pagination token.

", @@ -1313,8 +1516,16 @@ "NumberResults": { "base": null, "refs": { + "BatchListAttachedIndices$MaxResults": "

The maximum number of results to retrieve.

", + "BatchListIncomingTypedLinks$MaxResults": "

The maximum number of results to retrieve.

", + "BatchListIndex$MaxResults": "

The maximum number of results to retrieve.

", "BatchListObjectAttributes$MaxResults": "

The maximum number of items to be retrieved in a single call. This is an approximate number.

", "BatchListObjectChildren$MaxResults": "

Maximum number of items to be retrieved in a single call. This is an approximate number.

", + "BatchListObjectParentPaths$MaxResults": "

The maximum number of results to retrieve.

", + "BatchListObjectPolicies$MaxResults": "

The maximum number of results to retrieve.

", + "BatchListOutgoingTypedLinks$MaxResults": "

The maximum number of results to retrieve.

", + "BatchListPolicyAttachments$MaxResults": "

The maximum number of results to retrieve.

", + "BatchLookupPolicy$MaxResults": "

The maximum number of results to retrieve.

", "ListAppliedSchemaArnsRequest$MaxResults": "

The maximum number of results to retrieve.

", "ListAttachedIndicesRequest$MaxResults": "

The maximum number of results to retrieve.

", "ListDevelopmentSchemaArnsRequest$MaxResults": "

The maximum number of results to retrieve.

", @@ -1356,6 +1567,7 @@ "ObjectAttributeRangeList": { "base": null, "refs": { + "BatchListIndex$RangesOnIndexedValues": "

Specifies the ranges of indexed values that you want to query.

", "ListIndexRequest$RangesOnIndexedValues": "

Specifies the ranges of indexed values that you want to query.

" } }, @@ -1378,8 +1590,12 @@ "AttachObjectResponse$AttachedObjectIdentifier": "

The attached ObjectIdentifier, which is the child ObjectIdentifier.

", "AttachToIndexResponse$AttachedObjectIdentifier": "

The ObjectIdentifier of the object that was attached to the index.

", "BatchAttachObjectResponse$attachedObjectIdentifier": "

The ObjectIdentifier of the object that has been attached.

", + "BatchAttachToIndexResponse$AttachedObjectIdentifier": "

The ObjectIdentifier of the object that was attached to the index.

", + "BatchCreateIndexResponse$ObjectIdentifier": "

The ObjectIdentifier of the index created by this operation.

", "BatchCreateObjectResponse$ObjectIdentifier": "

The ID that is associated with the object.

", + "BatchDetachFromIndexResponse$DetachedObjectIdentifier": "

The ObjectIdentifier of the object that was detached from the index.

", "BatchDetachObjectResponse$detachedObjectIdentifier": "

The ObjectIdentifier of the detached object.

", + "BatchGetObjectInformationResponse$ObjectIdentifier": "

The ObjectIdentifier of the specified object.

", "BatchUpdateObjectAttributesResponse$ObjectIdentifier": "

ID that is associated with the object.

", "CreateDirectoryResponse$ObjectIdentifier": "

The root object node of the created directory.

", "CreateIndexResponse$ObjectIdentifier": "

The ObjectIdentifier of the index created by this operation.

", @@ -1399,6 +1615,8 @@ "ObjectIdentifierList": { "base": null, "refs": { + "BatchListObjectPoliciesResponse$AttachedPolicyIds": "

A list of policy ObjectIdentifiers, that are attached to the object.

", + "BatchListPolicyAttachmentsResponse$ObjectIdentifiers": "

A list of ObjectIdentifiers to which the policy is attached.

", "ListObjectPoliciesResponse$AttachedPolicyIds": "

A list of policy ObjectIdentifiers, that are attached to the object.

", "ListPolicyAttachmentsResponse$ObjectIdentifiers": "

A list of ObjectIdentifiers to which the policy is attached.

", "PathToObjectIdentifiers$ObjectIdentifiers": "

Lists ObjectIdentifiers starting from directory root to the object in the request.

" @@ -1430,11 +1648,29 @@ "BatchAddFacetToObject$ObjectReference": "

A reference to the object being mutated.

", "BatchAttachObject$ParentReference": "

The parent object reference.

", "BatchAttachObject$ChildReference": "

The child object reference that is to be attached to the object.

", + "BatchAttachPolicy$PolicyReference": "

The reference that is associated with the policy object.

", + "BatchAttachPolicy$ObjectReference": "

The reference that identifies the object to which the policy will be attached.

", + "BatchAttachToIndex$IndexReference": "

A reference to the index that you are attaching the object to.

", + "BatchAttachToIndex$TargetReference": "

A reference to the object that you are attaching to the index.

", + "BatchAttachTypedLink$SourceObjectReference": "

Identifies the source object that the typed link will attach to.

", + "BatchAttachTypedLink$TargetObjectReference": "

Identifies the target object that the typed link will attach to.

", + "BatchCreateIndex$ParentReference": "

A reference to the parent object that contains the index object.

", "BatchCreateObject$ParentReference": "

If specified, the parent reference to which this object will be attached.

", "BatchDeleteObject$ObjectReference": "

The reference that identifies the object.

", + "BatchDetachFromIndex$IndexReference": "

A reference to the index object.

", + "BatchDetachFromIndex$TargetReference": "

A reference to the object being detached from the index.

", "BatchDetachObject$ParentReference": "

Parent reference from which the object with the specified link name is detached.

", + "BatchGetObjectInformation$ObjectReference": "

A reference to the object.

", + "BatchListAttachedIndices$TargetReference": "

A reference to the object that has indices attached.

", + "BatchListIncomingTypedLinks$ObjectReference": "

The reference that identifies the object whose attributes will be listed.

", + "BatchListIndex$IndexReference": "

The reference to the index to list.

", "BatchListObjectAttributes$ObjectReference": "

Reference of the object whose attributes need to be listed.

", "BatchListObjectChildren$ObjectReference": "

Reference of the object for which child objects are being listed.

", + "BatchListObjectParentPaths$ObjectReference": "

The reference that identifies the object whose attributes will be listed.

", + "BatchListObjectPolicies$ObjectReference": "

The reference that identifies the object whose attributes will be listed.

", + "BatchListOutgoingTypedLinks$ObjectReference": "

The reference that identifies the object whose attributes will be listed.

", + "BatchListPolicyAttachments$PolicyReference": "

The reference that identifies the policy object.

", + "BatchLookupPolicy$ObjectReference": "

Reference that identifies the object whose policies will be looked up.

", "BatchRemoveFacetFromObject$ObjectReference": "

A reference to the object whose facet will be removed.

", "BatchUpdateObjectAttributes$ObjectReference": "

Reference that identifies the object.

", "CreateIndexRequest$ParentReference": "

A reference to the parent object that contains the index object.

", @@ -1446,7 +1682,7 @@ "DetachPolicyRequest$PolicyReference": "

Reference that identifies the policy object.

", "DetachPolicyRequest$ObjectReference": "

Reference that identifies the object whose policy object will be detached.

", "GetObjectInformationRequest$ObjectReference": "

A reference to the object.

", - "ListAttachedIndicesRequest$TargetReference": "

A reference to the object to that has indices attached.

", + "ListAttachedIndicesRequest$TargetReference": "

A reference to the object that has indices attached.

", "ListIncomingTypedLinksRequest$ObjectReference": "

Reference that identifies the object whose attributes will be listed.

", "ListIndexRequest$IndexReference": "

The reference to the index to list.

", "ListObjectAttributesRequest$ObjectReference": "

The reference that identifies the object whose attributes will be listed.

", @@ -1487,6 +1723,7 @@ "PathToObjectIdentifiersList": { "base": null, "refs": { + "BatchListObjectParentPathsResponse$PathToObjectIdentifiersList": "

Returns the path to the ObjectIdentifiers that are associated with the directory.

", "ListObjectParentPathsResponse$PathToObjectIdentifiersList": "

Returns the path to the ObjectIdentifiers that are associated with the directory.

" } }, @@ -1511,6 +1748,7 @@ "PolicyToPathList": { "base": null, "refs": { + "BatchLookupPolicyResponse$PolicyToPathList": "

Provides list of path to policies. Policies contain PolicyId, ObjectIdentifier, and PolicyType. For more information, see Policies.

", "LookupPolicyResponse$PolicyToPathList": "

Provides list of path to policies. Policies contain PolicyId, ObjectIdentifier, and PolicyType. For more information, see Policies.

" } }, @@ -1643,6 +1881,7 @@ "base": null, "refs": { "BatchCreateObject$SchemaFacet": "

A list of FacetArns that will be associated with the object. For more information, see arns.

", + "BatchGetObjectInformationResponse$SchemaFacets": "

The facets attached to the specified object.

", "CreateObjectRequest$SchemaFacets": "

A list of schema facets to be associated with the object that contains SchemaArn and facet name. For more information, see arns.

", "GetObjectInformationResponse$SchemaFacets": "

The facets attached to the specified object.

" } @@ -1770,6 +2009,8 @@ "TypedLinkAttributeRangeList": { "base": null, "refs": { + "BatchListIncomingTypedLinks$FilterAttributeRanges": "

Provides range filters for multiple attributes. When providing ranges to typed link selection, any inexact ranges must be specified at the end. Any attributes that do not have a range specified are presumed to match the entire range.

", + "BatchListOutgoingTypedLinks$FilterAttributeRanges": "

Provides range filters for multiple attributes. When providing ranges to typed link selection, any inexact ranges must be specified at the end. Any attributes that do not have a range specified are presumed to match the entire range.

", "ListIncomingTypedLinksRequest$FilterAttributeRanges": "

Provides range filters for multiple attributes. When providing ranges to typed link selection, any inexact ranges must be specified at the end. Any attributes that do not have a range specified are presumed to match the entire range.

", "ListOutgoingTypedLinksRequest$FilterAttributeRanges": "

Provides range filters for multiple attributes. When providing ranges to typed link selection, any inexact ranges must be specified at the end. Any attributes that do not have a range specified are presumed to match the entire range.

" } @@ -1814,6 +2055,9 @@ "base": "

Identifies the schema Amazon Resource Name (ARN) and facet name for the typed link.

", "refs": { "AttachTypedLinkRequest$TypedLinkFacet": "

Identifies the typed link facet that is associated with the typed link.

", + "BatchAttachTypedLink$TypedLinkFacet": "

Identifies the typed link facet that is associated with the typed link.

", + "BatchListIncomingTypedLinks$FilterTypedLink": "

Filters are interpreted in the order of the attributes on the typed link facet, not the order in which they are supplied to any API calls.

", + "BatchListOutgoingTypedLinks$FilterTypedLink": "

Filters are interpreted in the order of the attributes defined on the typed link facet, not the order they are supplied to any API calls.

", "ListIncomingTypedLinksRequest$FilterTypedLink": "

Filters are interpreted in the order of the attributes on the typed link facet, not the order in which they are supplied to any API calls.

", "ListOutgoingTypedLinksRequest$FilterTypedLink": "

Filters are interpreted in the order of the attributes defined on the typed link facet, not the order they are supplied to any API calls.

", "TypedLinkSpecifier$TypedLinkFacet": "

Identifies the typed link facet that is associated with the typed link.

" @@ -1823,6 +2067,8 @@ "base": "

Contains all the information that is used to uniquely identify a typed link. The parameters discussed in this topic are used to uniquely specify the typed link being operated on. The AttachTypedLink API returns a typed link specifier while the DetachTypedLink API accepts one as input. Similarly, the ListIncomingTypedLinks and ListOutgoingTypedLinks API operations provide typed link specifiers as output. You can also construct a typed link specifier from scratch.

", "refs": { "AttachTypedLinkResponse$TypedLinkSpecifier": "

Returns a typed link specifier as output.

", + "BatchAttachTypedLinkResponse$TypedLinkSpecifier": "

Returns a typed link specifier as output.

", + "BatchDetachTypedLink$TypedLinkSpecifier": "

Used to accept a typed link specifier as input.

", "DetachTypedLinkRequest$TypedLinkSpecifier": "

Used to accept a typed link specifier as input.

", "TypedLinkSpecifierList$member": null } @@ -1830,6 +2076,8 @@ "TypedLinkSpecifierList": { "base": null, "refs": { + "BatchListIncomingTypedLinksResponse$LinkSpecifiers": "

Returns one or more typed link specifiers as output.

", + "BatchListOutgoingTypedLinksResponse$TypedLinkSpecifiers": "

Returns a typed link specifier as output.

", "ListIncomingTypedLinksResponse$LinkSpecifiers": "

Returns one or more typed link specifiers as output.

", "ListOutgoingTypedLinksResponse$TypedLinkSpecifiers": "

Returns a typed link specifier as output.

" } diff --git a/cli/vendor/github.com/aws/aws-sdk-go/models/apis/cloudformation/2010-05-15/api-2.json b/cli/vendor/github.com/aws/aws-sdk-go/models/apis/cloudformation/2010-05-15/api-2.json index 517a91e09..e9e53a4f7 100644 --- a/cli/vendor/github.com/aws/aws-sdk-go/models/apis/cloudformation/2010-05-15/api-2.json +++ b/cli/vendor/github.com/aws/aws-sdk-go/models/apis/cloudformation/2010-05-15/api-2.json @@ -71,6 +71,43 @@ {"shape":"InsufficientCapabilitiesException"} ] }, + "CreateStackInstances":{ + "name":"CreateStackInstances", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateStackInstancesInput"}, + "output":{ + "shape":"CreateStackInstancesOutput", + "resultWrapper":"CreateStackInstancesResult" + }, + "errors":[ + {"shape":"StackSetNotFoundException"}, + {"shape":"OperationInProgressException"}, + {"shape":"OperationIdAlreadyExistsException"}, + {"shape":"StaleRequestException"}, + {"shape":"InvalidOperationException"}, + {"shape":"LimitExceededException"} + ] + }, + "CreateStackSet":{ + "name":"CreateStackSet", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateStackSetInput"}, + "output":{ + "shape":"CreateStackSetOutput", + "resultWrapper":"CreateStackSetResult" + }, + "errors":[ + {"shape":"NameAlreadyExistsException"}, + {"shape":"CreatedButModifiedException"}, + {"shape":"LimitExceededException"} + ] + }, "DeleteChangeSet":{ "name":"DeleteChangeSet", "http":{ @@ -97,6 +134,41 @@ {"shape":"TokenAlreadyExistsException"} ] }, + "DeleteStackInstances":{ + "name":"DeleteStackInstances", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteStackInstancesInput"}, + "output":{ + "shape":"DeleteStackInstancesOutput", + "resultWrapper":"DeleteStackInstancesResult" + }, + "errors":[ + {"shape":"StackSetNotFoundException"}, + {"shape":"OperationInProgressException"}, + {"shape":"OperationIdAlreadyExistsException"}, + {"shape":"StaleRequestException"}, + {"shape":"InvalidOperationException"} + ] + }, + "DeleteStackSet":{ + "name":"DeleteStackSet", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteStackSetInput"}, + "output":{ + "shape":"DeleteStackSetOutput", + "resultWrapper":"DeleteStackSetResult" + }, + "errors":[ + {"shape":"StackSetNotEmptyException"}, + {"shape":"OperationInProgressException"} + ] + }, "DescribeAccountLimits":{ "name":"DescribeAccountLimits", "http":{ @@ -136,6 +208,22 @@ "resultWrapper":"DescribeStackEventsResult" } }, + "DescribeStackInstance":{ + "name":"DescribeStackInstance", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeStackInstanceInput"}, + "output":{ + "shape":"DescribeStackInstanceOutput", + "resultWrapper":"DescribeStackInstanceResult" + }, + "errors":[ + {"shape":"StackSetNotFoundException"}, + {"shape":"StackInstanceNotFoundException"} + ] + }, "DescribeStackResource":{ "name":"DescribeStackResource", "http":{ @@ -160,6 +248,37 @@ "resultWrapper":"DescribeStackResourcesResult" } }, + "DescribeStackSet":{ + "name":"DescribeStackSet", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeStackSetInput"}, + "output":{ + "shape":"DescribeStackSetOutput", + "resultWrapper":"DescribeStackSetResult" + }, + "errors":[ + {"shape":"StackSetNotFoundException"} + ] + }, + "DescribeStackSetOperation":{ + "name":"DescribeStackSetOperation", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeStackSetOperationInput"}, + "output":{ + "shape":"DescribeStackSetOperationOutput", + "resultWrapper":"DescribeStackSetOperationResult" + }, + "errors":[ + {"shape":"StackSetNotFoundException"}, + {"shape":"OperationNotFoundException"} + ] + }, "DescribeStacks":{ "name":"DescribeStacks", "http":{ @@ -239,7 +358,10 @@ "output":{ "shape":"GetTemplateSummaryOutput", "resultWrapper":"GetTemplateSummaryResult" - } + }, + "errors":[ + {"shape":"StackSetNotFoundException"} + ] }, "ListChangeSets":{ "name":"ListChangeSets", @@ -277,6 +399,21 @@ "resultWrapper":"ListImportsResult" } }, + "ListStackInstances":{ + "name":"ListStackInstances", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListStackInstancesInput"}, + "output":{ + "shape":"ListStackInstancesOutput", + "resultWrapper":"ListStackInstancesResult" + }, + "errors":[ + {"shape":"StackSetNotFoundException"} + ] + }, "ListStackResources":{ "name":"ListStackResources", "http":{ @@ -289,6 +426,49 @@ "resultWrapper":"ListStackResourcesResult" } }, + "ListStackSetOperationResults":{ + "name":"ListStackSetOperationResults", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListStackSetOperationResultsInput"}, + "output":{ + "shape":"ListStackSetOperationResultsOutput", + "resultWrapper":"ListStackSetOperationResultsResult" + }, + "errors":[ + {"shape":"StackSetNotFoundException"}, + {"shape":"OperationNotFoundException"} + ] + }, + "ListStackSetOperations":{ + "name":"ListStackSetOperations", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListStackSetOperationsInput"}, + "output":{ + "shape":"ListStackSetOperationsOutput", + "resultWrapper":"ListStackSetOperationsResult" + }, + "errors":[ + {"shape":"StackSetNotFoundException"} + ] + }, + "ListStackSets":{ + "name":"ListStackSets", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListStackSetsInput"}, + "output":{ + "shape":"ListStackSetsOutput", + "resultWrapper":"ListStackSetsResult" + } + }, "ListStacks":{ "name":"ListStacks", "http":{ @@ -317,6 +497,23 @@ }, "input":{"shape":"SignalResourceInput"} }, + "StopStackSetOperation":{ + "name":"StopStackSetOperation", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StopStackSetOperationInput"}, + "output":{ + "shape":"StopStackSetOperationOutput", + "resultWrapper":"StopStackSetOperationResult" + }, + "errors":[ + {"shape":"StackSetNotFoundException"}, + {"shape":"OperationNotFoundException"}, + {"shape":"InvalidOperationException"} + ] + }, "UpdateStack":{ "name":"UpdateStack", "http":{ @@ -333,6 +530,25 @@ {"shape":"TokenAlreadyExistsException"} ] }, + "UpdateStackSet":{ + "name":"UpdateStackSet", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateStackSetInput"}, + "output":{ + "shape":"UpdateStackSetOutput", + "resultWrapper":"UpdateStackSetResult" + }, + "errors":[ + {"shape":"StackSetNotFoundException"}, + {"shape":"OperationInProgressException"}, + {"shape":"OperationIdAlreadyExistsException"}, + {"shape":"StaleRequestException"}, + {"shape":"InvalidOperationException"} + ] + }, "ValidateTemplate":{ "name":"ValidateTemplate", "http":{ @@ -347,6 +563,26 @@ } }, "shapes":{ + "Account":{ + "type":"string", + "pattern":"[0-9]{12}" + }, + "AccountGateResult":{ + "type":"structure", + "members":{ + "Status":{"shape":"AccountGateStatus"}, + "StatusReason":{"shape":"AccountGateStatusReason"} + } + }, + "AccountGateStatus":{ + "type":"string", + "enum":[ + "SUCCEEDED", + "FAILED", + "SKIPPED" + ] + }, + "AccountGateStatusReason":{"type":"string"}, "AccountLimit":{ "type":"structure", "members":{ @@ -358,6 +594,10 @@ "type":"list", "member":{"shape":"AccountLimit"} }, + "AccountList":{ + "type":"list", + "member":{"shape":"Account"} + }, "AllowedValue":{"type":"string"}, "AllowedValues":{ "type":"list", @@ -496,7 +736,7 @@ "type":"string", "max":128, "min":1, - "pattern":"[a-zA-Z][-a-zA-Z0-9]*" + "pattern":"[a-zA-Z0-9][-a-zA-Z0-9]*" }, "ClientToken":{ "type":"string", @@ -569,12 +809,70 @@ "ClientRequestToken":{"shape":"ClientRequestToken"} } }, + "CreateStackInstancesInput":{ + "type":"structure", + "required":[ + "StackSetName", + "Accounts", + "Regions" + ], + "members":{ + "StackSetName":{"shape":"StackSetName"}, + "Accounts":{"shape":"AccountList"}, + "Regions":{"shape":"RegionList"}, + "OperationPreferences":{"shape":"StackSetOperationPreferences"}, + "OperationId":{ + "shape":"ClientRequestToken", + "idempotencyToken":true + } + } + }, + "CreateStackInstancesOutput":{ + "type":"structure", + "members":{ + "OperationId":{"shape":"ClientRequestToken"} + } + }, "CreateStackOutput":{ "type":"structure", "members":{ "StackId":{"shape":"StackId"} } }, + "CreateStackSetInput":{ + "type":"structure", + "required":["StackSetName"], + "members":{ + "StackSetName":{"shape":"StackSetName"}, + "Description":{"shape":"Description"}, + "TemplateBody":{"shape":"TemplateBody"}, + "TemplateURL":{"shape":"TemplateURL"}, + "Parameters":{"shape":"Parameters"}, + "Capabilities":{"shape":"Capabilities"}, + "Tags":{"shape":"Tags"}, + "ClientRequestToken":{ + "shape":"ClientRequestToken", + "idempotencyToken":true + } + } + }, + "CreateStackSetOutput":{ + "type":"structure", + "members":{ + "StackSetId":{"shape":"StackSetId"} + } + }, + "CreatedButModifiedException":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"CreatedButModifiedException", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + }, "CreationTime":{"type":"timestamp"}, "DeleteChangeSetInput":{ "type":"structure", @@ -599,6 +897,44 @@ "ClientRequestToken":{"shape":"ClientRequestToken"} } }, + "DeleteStackInstancesInput":{ + "type":"structure", + "required":[ + "StackSetName", + "Accounts", + "Regions", + "RetainStacks" + ], + "members":{ + "StackSetName":{"shape":"StackSetName"}, + "Accounts":{"shape":"AccountList"}, + "Regions":{"shape":"RegionList"}, + "OperationPreferences":{"shape":"StackSetOperationPreferences"}, + "RetainStacks":{"shape":"RetainStacks"}, + "OperationId":{ + "shape":"ClientRequestToken", + "idempotencyToken":true + } + } + }, + "DeleteStackInstancesOutput":{ + "type":"structure", + "members":{ + "OperationId":{"shape":"ClientRequestToken"} + } + }, + "DeleteStackSetInput":{ + "type":"structure", + "required":["StackSetName"], + "members":{ + "StackSetName":{"shape":"StackSetName"} + } + }, + "DeleteStackSetOutput":{ + "type":"structure", + "members":{ + } + }, "DeletionTime":{"type":"timestamp"}, "DescribeAccountLimitsInput":{ "type":"structure", @@ -656,6 +992,25 @@ "NextToken":{"shape":"NextToken"} } }, + "DescribeStackInstanceInput":{ + "type":"structure", + "required":[ + "StackSetName", + "StackInstanceAccount", + "StackInstanceRegion" + ], + "members":{ + "StackSetName":{"shape":"StackSetName"}, + "StackInstanceAccount":{"shape":"Account"}, + "StackInstanceRegion":{"shape":"Region"} + } + }, + "DescribeStackInstanceOutput":{ + "type":"structure", + "members":{ + "StackInstance":{"shape":"StackInstance"} + } + }, "DescribeStackResourceInput":{ "type":"structure", "required":[ @@ -687,6 +1042,36 @@ "StackResources":{"shape":"StackResources"} } }, + "DescribeStackSetInput":{ + "type":"structure", + "required":["StackSetName"], + "members":{ + "StackSetName":{"shape":"StackSetName"} + } + }, + "DescribeStackSetOperationInput":{ + "type":"structure", + "required":[ + "StackSetName", + "OperationId" + ], + "members":{ + "StackSetName":{"shape":"StackSetName"}, + "OperationId":{"shape":"ClientRequestToken"} + } + }, + "DescribeStackSetOperationOutput":{ + "type":"structure", + "members":{ + "StackSetOperation":{"shape":"StackSetOperation"} + } + }, + "DescribeStackSetOutput":{ + "type":"structure", + "members":{ + "StackSet":{"shape":"StackSet"} + } + }, "DescribeStacksInput":{ "type":"structure", "members":{ @@ -768,6 +1153,15 @@ "type":"list", "member":{"shape":"Export"} }, + "FailureToleranceCount":{ + "type":"integer", + "min":0 + }, + "FailureTolerancePercentage":{ + "type":"integer", + "max":100, + "min":0 + }, "GetStackPolicyInput":{ "type":"structure", "required":["StackName"], @@ -801,7 +1195,8 @@ "members":{ "TemplateBody":{"shape":"TemplateBody"}, "TemplateURL":{"shape":"TemplateURL"}, - "StackName":{"shape":"StackNameOrId"} + "StackName":{"shape":"StackNameOrId"}, + "StackSetName":{"shape":"StackSetNameOrId"} } }, "GetTemplateSummaryOutput":{ @@ -843,6 +1238,17 @@ }, "exception":true }, + "InvalidOperationException":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidOperationException", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, "LastUpdatedTime":{"type":"timestamp"}, "LimitExceededException":{ "type":"structure", @@ -900,6 +1306,24 @@ "NextToken":{"shape":"NextToken"} } }, + "ListStackInstancesInput":{ + "type":"structure", + "required":["StackSetName"], + "members":{ + "StackSetName":{"shape":"StackSetName"}, + "NextToken":{"shape":"NextToken"}, + "MaxResults":{"shape":"MaxResults"}, + "StackInstanceAccount":{"shape":"Account"}, + "StackInstanceRegion":{"shape":"Region"} + } + }, + "ListStackInstancesOutput":{ + "type":"structure", + "members":{ + "Summaries":{"shape":"StackInstanceSummaries"}, + "NextToken":{"shape":"NextToken"} + } + }, "ListStackResourcesInput":{ "type":"structure", "required":["StackName"], @@ -915,6 +1339,57 @@ "NextToken":{"shape":"NextToken"} } }, + "ListStackSetOperationResultsInput":{ + "type":"structure", + "required":[ + "StackSetName", + "OperationId" + ], + "members":{ + "StackSetName":{"shape":"StackSetName"}, + "OperationId":{"shape":"ClientRequestToken"}, + "NextToken":{"shape":"NextToken"}, + "MaxResults":{"shape":"MaxResults"} + } + }, + "ListStackSetOperationResultsOutput":{ + "type":"structure", + "members":{ + "Summaries":{"shape":"StackSetOperationResultSummaries"}, + "NextToken":{"shape":"NextToken"} + } + }, + "ListStackSetOperationsInput":{ + "type":"structure", + "required":["StackSetName"], + "members":{ + "StackSetName":{"shape":"StackSetName"}, + "NextToken":{"shape":"NextToken"}, + "MaxResults":{"shape":"MaxResults"} + } + }, + "ListStackSetOperationsOutput":{ + "type":"structure", + "members":{ + "Summaries":{"shape":"StackSetOperationSummaries"}, + "NextToken":{"shape":"NextToken"} + } + }, + "ListStackSetsInput":{ + "type":"structure", + "members":{ + "NextToken":{"shape":"NextToken"}, + "MaxResults":{"shape":"MaxResults"}, + "Status":{"shape":"StackSetStatus"} + } + }, + "ListStackSetsOutput":{ + "type":"structure", + "members":{ + "Summaries":{"shape":"StackSetSummaries"}, + "NextToken":{"shape":"NextToken"} + } + }, "ListStacksInput":{ "type":"structure", "members":{ @@ -930,7 +1405,32 @@ } }, "LogicalResourceId":{"type":"string"}, + "MaxConcurrentCount":{ + "type":"integer", + "min":1 + }, + "MaxConcurrentPercentage":{ + "type":"integer", + "max":100, + "min":1 + }, + "MaxResults":{ + "type":"integer", + "max":100, + "min":1 + }, "Metadata":{"type":"string"}, + "NameAlreadyExistsException":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"NameAlreadyExistsException", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + }, "NextToken":{ "type":"string", "max":1024, @@ -951,12 +1451,46 @@ "DELETE" ] }, + "OperationIdAlreadyExistsException":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"OperationIdAlreadyExistsException", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + }, + "OperationInProgressException":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"OperationInProgressException", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + }, + "OperationNotFoundException":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"OperationNotFoundException", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, "Output":{ "type":"structure", "members":{ "OutputKey":{"shape":"OutputKey"}, "OutputValue":{"shape":"OutputValue"}, - "Description":{"shape":"Description"} + "Description":{"shape":"Description"}, + "ExportName":{"shape":"ExportName"} } }, "OutputKey":{"type":"string"}, @@ -1003,6 +1537,12 @@ }, "PhysicalResourceId":{"type":"string"}, "PropertyName":{"type":"string"}, + "Reason":{"type":"string"}, + "Region":{"type":"string"}, + "RegionList":{ + "type":"list", + "member":{"shape":"Region"} + }, "Replacement":{ "type":"string", "enum":[ @@ -1113,6 +1653,8 @@ "type":"list", "member":{"shape":"LogicalResourceId"} }, + "RetainStacks":{"type":"boolean"}, + "RetainStacksNullable":{"type":"boolean"}, "RoleARN":{ "type":"string", "max":2048, @@ -1199,6 +1741,51 @@ "member":{"shape":"StackEvent"} }, "StackId":{"type":"string"}, + "StackInstance":{ + "type":"structure", + "members":{ + "StackSetId":{"shape":"StackSetId"}, + "Region":{"shape":"Region"}, + "Account":{"shape":"Account"}, + "StackId":{"shape":"StackId"}, + "Status":{"shape":"StackInstanceStatus"}, + "StatusReason":{"shape":"Reason"} + } + }, + "StackInstanceNotFoundException":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"StackInstanceNotFoundException", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "StackInstanceStatus":{ + "type":"string", + "enum":[ + "CURRENT", + "OUTDATED", + "INOPERABLE" + ] + }, + "StackInstanceSummaries":{ + "type":"list", + "member":{"shape":"StackInstanceSummary"} + }, + "StackInstanceSummary":{ + "type":"structure", + "members":{ + "StackSetId":{"shape":"StackSetId"}, + "Region":{"shape":"Region"}, + "Account":{"shape":"Account"}, + "StackId":{"shape":"StackId"}, + "Status":{"shape":"StackInstanceStatus"}, + "StatusReason":{"shape":"Reason"} + } + }, "StackName":{"type":"string"}, "StackNameOrId":{ "type":"string", @@ -1291,6 +1878,147 @@ "type":"list", "member":{"shape":"StackResource"} }, + "StackSet":{ + "type":"structure", + "members":{ + "StackSetName":{"shape":"StackSetName"}, + "StackSetId":{"shape":"StackSetId"}, + "Description":{"shape":"Description"}, + "Status":{"shape":"StackSetStatus"}, + "TemplateBody":{"shape":"TemplateBody"}, + "Parameters":{"shape":"Parameters"}, + "Capabilities":{"shape":"Capabilities"}, + "Tags":{"shape":"Tags"} + } + }, + "StackSetId":{"type":"string"}, + "StackSetName":{"type":"string"}, + "StackSetNameOrId":{ + "type":"string", + "min":1, + "pattern":"[a-zA-Z][-a-zA-Z0-9]*" + }, + "StackSetNotEmptyException":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"StackSetNotEmptyException", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + }, + "StackSetNotFoundException":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"StackSetNotFoundException", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "StackSetOperation":{ + "type":"structure", + "members":{ + "OperationId":{"shape":"ClientRequestToken"}, + "StackSetId":{"shape":"StackSetId"}, + "Action":{"shape":"StackSetOperationAction"}, + "Status":{"shape":"StackSetOperationStatus"}, + "OperationPreferences":{"shape":"StackSetOperationPreferences"}, + "RetainStacks":{"shape":"RetainStacksNullable"}, + "CreationTimestamp":{"shape":"Timestamp"}, + "EndTimestamp":{"shape":"Timestamp"} + } + }, + "StackSetOperationAction":{ + "type":"string", + "enum":[ + "CREATE", + "UPDATE", + "DELETE" + ] + }, + "StackSetOperationPreferences":{ + "type":"structure", + "members":{ + "RegionOrder":{"shape":"RegionList"}, + "FailureToleranceCount":{"shape":"FailureToleranceCount"}, + "FailureTolerancePercentage":{"shape":"FailureTolerancePercentage"}, + "MaxConcurrentCount":{"shape":"MaxConcurrentCount"}, + "MaxConcurrentPercentage":{"shape":"MaxConcurrentPercentage"} + } + }, + "StackSetOperationResultStatus":{ + "type":"string", + "enum":[ + "PENDING", + "RUNNING", + "SUCCEEDED", + "FAILED", + "CANCELLED" + ] + }, + "StackSetOperationResultSummaries":{ + "type":"list", + "member":{"shape":"StackSetOperationResultSummary"} + }, + "StackSetOperationResultSummary":{ + "type":"structure", + "members":{ + "Account":{"shape":"Account"}, + "Region":{"shape":"Region"}, + "Status":{"shape":"StackSetOperationResultStatus"}, + "StatusReason":{"shape":"Reason"}, + "AccountGateResult":{"shape":"AccountGateResult"} + } + }, + "StackSetOperationStatus":{ + "type":"string", + "enum":[ + "RUNNING", + "SUCCEEDED", + "FAILED", + "STOPPING", + "STOPPED" + ] + }, + "StackSetOperationSummaries":{ + "type":"list", + "member":{"shape":"StackSetOperationSummary"} + }, + "StackSetOperationSummary":{ + "type":"structure", + "members":{ + "OperationId":{"shape":"ClientRequestToken"}, + "Action":{"shape":"StackSetOperationAction"}, + "Status":{"shape":"StackSetOperationStatus"}, + "CreationTimestamp":{"shape":"Timestamp"}, + "EndTimestamp":{"shape":"Timestamp"} + } + }, + "StackSetStatus":{ + "type":"string", + "enum":[ + "ACTIVE", + "DELETED" + ] + }, + "StackSetSummaries":{ + "type":"list", + "member":{"shape":"StackSetSummary"} + }, + "StackSetSummary":{ + "type":"structure", + "members":{ + "StackSetName":{"shape":"StackSetName"}, + "StackSetId":{"shape":"StackSetId"}, + "Description":{"shape":"Description"}, + "Status":{"shape":"StackSetStatus"} + } + }, "StackStatus":{ "type":"string", "enum":[ @@ -1348,18 +2076,58 @@ "type":"list", "member":{"shape":"TemplateStage"} }, + "StaleRequestException":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"StaleRequestException", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + }, + "StopStackSetOperationInput":{ + "type":"structure", + "required":[ + "StackSetName", + "OperationId" + ], + "members":{ + "StackSetName":{"shape":"StackSetName"}, + "OperationId":{"shape":"ClientRequestToken"} + } + }, + "StopStackSetOperationOutput":{ + "type":"structure", + "members":{ + } + }, "Tag":{ "type":"structure", + "required":[ + "Key", + "Value" + ], "members":{ "Key":{"shape":"TagKey"}, "Value":{"shape":"TagValue"} } }, - "TagKey":{"type":"string"}, - "TagValue":{"type":"string"}, + "TagKey":{ + "type":"string", + "max":128, + "min":1 + }, + "TagValue":{ + "type":"string", + "max":256, + "min":1 + }, "Tags":{ "type":"list", - "member":{"shape":"Tag"} + "member":{"shape":"Tag"}, + "max":50 }, "TemplateBody":{ "type":"string", @@ -1439,6 +2207,31 @@ "StackId":{"shape":"StackId"} } }, + "UpdateStackSetInput":{ + "type":"structure", + "required":["StackSetName"], + "members":{ + "StackSetName":{"shape":"StackSetName"}, + "Description":{"shape":"Description"}, + "TemplateBody":{"shape":"TemplateBody"}, + "TemplateURL":{"shape":"TemplateURL"}, + "UsePreviousTemplate":{"shape":"UsePreviousTemplate"}, + "Parameters":{"shape":"Parameters"}, + "Capabilities":{"shape":"Capabilities"}, + "Tags":{"shape":"Tags"}, + "OperationPreferences":{"shape":"StackSetOperationPreferences"}, + "OperationId":{ + "shape":"ClientRequestToken", + "idempotencyToken":true + } + } + }, + "UpdateStackSetOutput":{ + "type":"structure", + "members":{ + "OperationId":{"shape":"ClientRequestToken"} + } + }, "Url":{"type":"string"}, "UsePreviousTemplate":{"type":"boolean"}, "UsePreviousValue":{"type":"boolean"}, diff --git a/cli/vendor/github.com/aws/aws-sdk-go/models/apis/cloudformation/2010-05-15/docs-2.json b/cli/vendor/github.com/aws/aws-sdk-go/models/apis/cloudformation/2010-05-15/docs-2.json index 18072cbf7..d388e65cc 100644 --- a/cli/vendor/github.com/aws/aws-sdk-go/models/apis/cloudformation/2010-05-15/docs-2.json +++ b/cli/vendor/github.com/aws/aws-sdk-go/models/apis/cloudformation/2010-05-15/docs-2.json @@ -1,18 +1,25 @@ { "version": "2.0", - "service": "AWS CloudFormation

AWS CloudFormation allows you to create and manage AWS infrastructure deployments predictably and repeatedly. You can use AWS CloudFormation to leverage AWS products, such as Amazon Elastic Compute Cloud, Amazon Elastic Block Store, Amazon Simple Notification Service, Elastic Load Balancing, and Auto Scaling to build highly-reliable, highly scalable, cost-effective applications without creating or configuring the underlying AWS infrastructure.

With AWS CloudFormation, you declare all of your resources and dependencies in a template file. The template defines a collection of resources as a single unit called a stack. AWS CloudFormation creates and deletes all member resources of the stack together and manages all dependencies between the resources for you.

For more information about AWS CloudFormation, see the AWS CloudFormation Product Page.

Amazon CloudFormation makes use of other AWS products. If you need additional technical information about a specific AWS product, you can find the product's technical documentation at docs.aws.amazon.com.

", + "service": "AWS CloudFormation

AWS CloudFormation allows you to create and manage AWS infrastructure deployments predictably and repeatedly. You can use AWS CloudFormation to leverage AWS products, such as Amazon Elastic Compute Cloud, Amazon Elastic Block Store, Amazon Simple Notification Service, Elastic Load Balancing, and Auto Scaling to build highly-reliable, highly scalable, cost-effective applications without creating or configuring the underlying AWS infrastructure.

With AWS CloudFormation, you declare all of your resources and dependencies in a template file. The template defines a collection of resources as a single unit called a stack. AWS CloudFormation creates and deletes all member resources of the stack together and manages all dependencies between the resources for you.

For more information about AWS CloudFormation, see the AWS CloudFormation Product Page.

Amazon CloudFormation makes use of other AWS products. If you need additional technical information about a specific AWS product, you can find the product's technical documentation at docs.aws.amazon.com.

APIs for stacks

When you use AWS CloudFormation, you manage related resources as a single unit called a stack. You create, update, and delete a collection of resources by creating, updating, and deleting stacks. All the resources in a stack are defined by the stack's AWS CloudFormation template.

Actions

Data Types

APIs for change sets

If you need to make changes to the running resources in a stack, you update the stack. Before making changes to your resources, you can generate a change set, which is summary of your proposed changes. Change sets allow you to see how your changes might impact your running resources, especially for critical resources, before implementing them.

Actions

Data Types

APIs for stack sets

AWS CloudFormation StackSets lets you create a collection, or stack set, of stacks that can automatically and safely provision a common set of AWS resources across multiple AWS accounts and multiple AWS regions from a single AWS CloudFormation template. When you create a stack set, AWS CloudFormation provisions a stack in each of the specified accounts and regions by using the supplied AWS CloudFormation template and parameters. Stack sets let you manage a common set of AWS resources in a selection of accounts and regions in a single operation.

Actions

Data Types

", "operations": { "CancelUpdateStack": "

Cancels an update on the specified stack. If the call completes successfully, the stack rolls back the update and reverts to the previous stack configuration.

You can cancel only stacks that are in the UPDATE_IN_PROGRESS state.

", "ContinueUpdateRollback": "

For a specified stack that is in the UPDATE_ROLLBACK_FAILED state, continues rolling it back to the UPDATE_ROLLBACK_COMPLETE state. Depending on the cause of the failure, you can manually fix the error and continue the rollback. By continuing the rollback, you can return your stack to a working state (the UPDATE_ROLLBACK_COMPLETE state), and then try to update the stack again.

A stack goes into the UPDATE_ROLLBACK_FAILED state when AWS CloudFormation cannot roll back all changes after a failed stack update. For example, you might have a stack that is rolling back to an old database instance that was deleted outside of AWS CloudFormation. Because AWS CloudFormation doesn't know the database was deleted, it assumes that the database instance still exists and attempts to roll back to it, causing the update rollback to fail.

", "CreateChangeSet": "

Creates a list of changes that will be applied to a stack so that you can review the changes before executing them. You can create a change set for a stack that doesn't exist or an existing stack. If you create a change set for a stack that doesn't exist, the change set shows all of the resources that AWS CloudFormation will create. If you create a change set for an existing stack, AWS CloudFormation compares the stack's information with the information that you submit in the change set and lists the differences. Use change sets to understand which resources AWS CloudFormation will create or change, and how it will change resources in an existing stack, before you create or update a stack.

To create a change set for a stack that doesn't exist, for the ChangeSetType parameter, specify CREATE. To create a change set for an existing stack, specify UPDATE for the ChangeSetType parameter. After the CreateChangeSet call successfully completes, AWS CloudFormation starts creating the change set. To check the status of the change set or to review it, use the DescribeChangeSet action.

When you are satisfied with the changes the change set will make, execute the change set by using the ExecuteChangeSet action. AWS CloudFormation doesn't make changes until you execute the change set.

", "CreateStack": "

Creates a stack as specified in the template. After the call completes successfully, the stack creation starts. You can check the status of the stack via the DescribeStacks API.

", + "CreateStackInstances": "

Creates stack instances for the specified accounts, within the specified regions. A stack instance refers to a stack in a specific account and region. Accounts and Regions are required parameters—you must specify at least one account and one region.

", + "CreateStackSet": "

Creates a stack set.

", "DeleteChangeSet": "

Deletes the specified change set. Deleting change sets ensures that no one executes the wrong change set.

If the call successfully completes, AWS CloudFormation successfully deleted the change set.

", "DeleteStack": "

Deletes a specified stack. Once the call completes successfully, stack deletion starts. Deleted stacks do not show up in the DescribeStacks API if the deletion has been completed successfully.

", + "DeleteStackInstances": "

Deletes stack instances for the specified accounts, in the specified regions.

", + "DeleteStackSet": "

Deletes a stack set. Before you can delete a stack set, all of its member stack instances must be deleted. For more information about how to do this, see DeleteStackInstances.

", "DescribeAccountLimits": "

Retrieves your account's AWS CloudFormation limits, such as the maximum number of stacks that you can create in your account.

", "DescribeChangeSet": "

Returns the inputs for the change set and a list of changes that AWS CloudFormation will make if you execute the change set. For more information, see Updating Stacks Using Change Sets in the AWS CloudFormation User Guide.

", "DescribeStackEvents": "

Returns all stack related events for a specified stack in reverse chronological order. For more information about a stack's event history, go to Stacks in the AWS CloudFormation User Guide.

You can list events for stacks that have failed to create or have been deleted by specifying the unique stack identifier (stack ID).

", + "DescribeStackInstance": "

Returns the stack instance that's associated with the specified stack set, AWS account, and region.

For a list of stack instances that are associated with a specific stack set, use ListStackInstances.

", "DescribeStackResource": "

Returns a description of the specified resource in the specified stack.

For deleted stacks, DescribeStackResource returns resource information for up to 90 days after the stack has been deleted.

", "DescribeStackResources": "

Returns AWS resource descriptions for running and deleted stacks. If StackName is specified, all the associated resources that are part of the stack are returned. If PhysicalResourceId is specified, the associated resources of the stack that the resource belongs to are returned.

Only the first 100 resources will be returned. If your stack has more resources than this, you should use ListStackResources instead.

For deleted stacks, DescribeStackResources returns resource information for up to 90 days after the stack has been deleted.

You must specify either StackName or PhysicalResourceId, but not both. In addition, you can specify LogicalResourceId to filter the returned result. For more information about resources, the LogicalResourceId and PhysicalResourceId, go to the AWS CloudFormation User Guide.

A ValidationError is returned if you specify both StackName and PhysicalResourceId in the same request.

", + "DescribeStackSet": "

Returns the description of the specified stack set.

", + "DescribeStackSetOperation": "

Returns the description of the specified stack set operation.

", "DescribeStacks": "

Returns the description for the specified stack; if no stack name was specified, then it returns the description for all the stacks created.

If the stack does not exist, an AmazonCloudFormationException is returned.

", "EstimateTemplateCost": "

Returns the estimated monthly cost of a template. The return value is an AWS Simple Monthly Calculator URL with a query string that describes the resources required to run the template.

", "ExecuteChangeSet": "

Updates a stack using the input information that was provided when the specified change set was created. After the call successfully completes, AWS CloudFormation starts updating the stack. Use the DescribeStacks action to view the status of the update.

When you execute a change set, AWS CloudFormation deletes all other change sets associated with the stack because they aren't valid for the updated stack.

If a stack policy is associated with the stack, AWS CloudFormation enforces the policy during the update. You can't specify a temporary stack policy that overrides the current policy.

", @@ -22,14 +29,49 @@ "ListChangeSets": "

Returns the ID and status of each active change set for a stack. For example, AWS CloudFormation lists change sets that are in the CREATE_IN_PROGRESS or CREATE_PENDING state.

", "ListExports": "

Lists all exported output values in the account and region in which you call this action. Use this action to see the exported output values that you can import into other stacks. To import values, use the Fn::ImportValue function.

For more information, see AWS CloudFormation Export Stack Output Values.

", "ListImports": "

Lists all stacks that are importing an exported output value. To modify or remove an exported output value, first use this action to see which stacks are using it. To see the exported output values in your account, see ListExports.

For more information about importing an exported output value, see the Fn::ImportValue function.

", + "ListStackInstances": "

Returns summary information about stack instances that are associated with the specified stack set. You can filter for stack instances that are associated with a specific AWS account name or region.

", "ListStackResources": "

Returns descriptions of all resources of the specified stack.

For deleted stacks, ListStackResources returns resource information for up to 90 days after the stack has been deleted.

", + "ListStackSetOperationResults": "

Returns summary information about the results of a stack set operation.

", + "ListStackSetOperations": "

Returns summary information about operations performed on a stack set.

", + "ListStackSets": "

Returns summary information about stack sets that are associated with the user.

", "ListStacks": "

Returns the summary information for stacks whose status matches the specified StackStatusFilter. Summary information for stacks that have been deleted is kept for 90 days after the stack is deleted. If no StackStatusFilter is specified, summary information for all stacks is returned (including existing stacks and stacks that have been deleted).

", "SetStackPolicy": "

Sets a stack policy for a specified stack.

", "SignalResource": "

Sends a signal to the specified resource with a success or failure status. You can use the SignalResource API in conjunction with a creation policy or update policy. AWS CloudFormation doesn't proceed with a stack creation or update until resources receive the required number of signals or the timeout period is exceeded. The SignalResource API is useful in cases where you want to send signals from anywhere other than an Amazon EC2 instance.

", + "StopStackSetOperation": "

Stops an in-progress operation on a stack set and its associated stack instances.

", "UpdateStack": "

Updates a stack as specified in the template. After the call completes successfully, the stack update starts. You can check the status of the stack via the DescribeStacks action.

To get a copy of the template for an existing stack, you can use the GetTemplate action.

For more information about creating an update template, updating a stack, and monitoring the progress of the update, see Updating a Stack.

", + "UpdateStackSet": "

Updates the stack set and all associated stack instances.

Even if the stack set operation created by updating the stack set fails (completely or partially, below or above a specified failure tolerance), the stack set is updated with your changes. Subsequent CreateStackInstances calls on the specified stack set use the updated stack set.

", "ValidateTemplate": "

Validates a specified template. AWS CloudFormation first checks if the template is valid JSON. If it isn't, AWS CloudFormation checks if the template is valid YAML. If both these checks fail, AWS CloudFormation returns a template validation error.

" }, "shapes": { + "Account": { + "base": null, + "refs": { + "AccountList$member": null, + "DescribeStackInstanceInput$StackInstanceAccount": "

The ID of an AWS account that's associated with this stack instance.

", + "ListStackInstancesInput$StackInstanceAccount": "

The name of the AWS account that you want to list stack instances for.

", + "StackInstance$Account": "

The name of the AWS account that the stack instance is associated with.

", + "StackInstanceSummary$Account": "

The name of the AWS account that the stack instance is associated with.

", + "StackSetOperationResultSummary$Account": "

The name of the AWS account for this operation result.

" + } + }, + "AccountGateResult": { + "base": "

Structure that contains the results of the account gate function AWS CloudFormation StackSets invokes, if present, before proceeding with stack set operations in an account.

Account gating enables you to specify a Lamdba function for an account that encapsulates any requirements that must be met before AWS CloudFormation StackSets proceeds with stack set operations in that account. CloudFormation invokes the function each time stack set operations are initiated for that account, and only proceeds if the function returns a success code.

", + "refs": { + "StackSetOperationResultSummary$AccountGateResult": "

The results of the account gate function AWS CloudFormation invokes, if present, before proceeding with stack set operations in an account

" + } + }, + "AccountGateStatus": { + "base": null, + "refs": { + "AccountGateResult$Status": "

The status of the account gate function.

" + } + }, + "AccountGateStatusReason": { + "base": null, + "refs": { + "AccountGateResult$StatusReason": "

The reason for the account gate status assigned to this account.

" + } + }, "AccountLimit": { "base": "

The AccountLimit data type.

", "refs": { @@ -42,6 +84,13 @@ "DescribeAccountLimitsOutput$AccountLimits": "

An account limit structure that contain a list of AWS CloudFormation account limits and their values.

" } }, + "AccountList": { + "base": null, + "refs": { + "CreateStackInstancesInput$Accounts": "

The names of one or more AWS accounts that you want to create stack instances in the specified region(s) for.

", + "DeleteStackInstancesInput$Accounts": "

The names of the AWS accounts that you want to delete stack instances for.

" + } + }, "AllowedValue": { "base": null, "refs": { @@ -55,7 +104,7 @@ } }, "AlreadyExistsException": { - "base": "

Resource with the name requested already exists.

", + "base": "

The resource with the name requested already exists.

", "refs": { } }, @@ -69,10 +118,13 @@ "refs": { "CreateChangeSetInput$Capabilities": "

A list of values that you must specify before AWS CloudFormation can update certain stacks. Some stack templates might include resources that can affect permissions in your AWS account, for example, by creating new AWS Identity and Access Management (IAM) users. For those stacks, you must explicitly acknowledge their capabilities by specifying this parameter.

The only valid values are CAPABILITY_IAM and CAPABILITY_NAMED_IAM. The following resources require you to specify this parameter: AWS::IAM::AccessKey, AWS::IAM::Group, AWS::IAM::InstanceProfile, AWS::IAM::Policy, AWS::IAM::Role, AWS::IAM::User, and AWS::IAM::UserToGroupAddition. If your stack template contains these resources, we recommend that you review all permissions associated with them and edit their permissions if necessary.

If you have IAM resources, you can specify either capability. If you have IAM resources with custom names, you must specify CAPABILITY_NAMED_IAM. If you don't specify this parameter, this action returns an InsufficientCapabilities error.

For more information, see Acknowledging IAM Resources in AWS CloudFormation Templates.

", "CreateStackInput$Capabilities": "

A list of values that you must specify before AWS CloudFormation can create certain stacks. Some stack templates might include resources that can affect permissions in your AWS account, for example, by creating new AWS Identity and Access Management (IAM) users. For those stacks, you must explicitly acknowledge their capabilities by specifying this parameter.

The only valid values are CAPABILITY_IAM and CAPABILITY_NAMED_IAM. The following resources require you to specify this parameter: AWS::IAM::AccessKey, AWS::IAM::Group, AWS::IAM::InstanceProfile, AWS::IAM::Policy, AWS::IAM::Role, AWS::IAM::User, and AWS::IAM::UserToGroupAddition. If your stack template contains these resources, we recommend that you review all permissions associated with them and edit their permissions if necessary.

If you have IAM resources, you can specify either capability. If you have IAM resources with custom names, you must specify CAPABILITY_NAMED_IAM. If you don't specify this parameter, this action returns an InsufficientCapabilities error.

For more information, see Acknowledging IAM Resources in AWS CloudFormation Templates.

", + "CreateStackSetInput$Capabilities": "

A list of values that you must specify before AWS CloudFormation can create certain stack sets. Some stack set templates might include resources that can affect permissions in your AWS account—for example, by creating new AWS Identity and Access Management (IAM) users. For those stack sets, you must explicitly acknowledge their capabilities by specifying this parameter.

The only valid values are CAPABILITY_IAM and CAPABILITY_NAMED_IAM. The following resources require you to specify this parameter:

If your stack template contains these resources, we recommend that you review all permissions that are associated with them and edit their permissions if necessary.

If you have IAM resources, you can specify either capability. If you have IAM resources with custom names, you must specify CAPABILITY_NAMED_IAM. If you don't specify this parameter, this action returns an InsufficientCapabilities error.

For more information, see Acknowledging IAM Resources in AWS CloudFormation Templates.

", "DescribeChangeSetOutput$Capabilities": "

If you execute the change set, the list of capabilities that were explicitly acknowledged when the change set was created.

", "GetTemplateSummaryOutput$Capabilities": "

The capabilities found within the template. If your template contains IAM resources, you must specify the CAPABILITY_IAM or CAPABILITY_NAMED_IAM value for this parameter when you use the CreateStack or UpdateStack actions with your template; otherwise, those actions return an InsufficientCapabilities error.

For more information, see Acknowledging IAM Resources in AWS CloudFormation Templates.

", "Stack$Capabilities": "

The capabilities allowed in the stack.

", + "StackSet$Capabilities": "

The capabilities that are allowed in the stack set. Some stack set templates might include resources that can affect permissions in your AWS account—for example, by creating new AWS Identity and Access Management (IAM) users. For more information, see Acknowledging IAM Resources in AWS CloudFormation Templates.

", "UpdateStackInput$Capabilities": "

A list of values that you must specify before AWS CloudFormation can update certain stacks. Some stack templates might include resources that can affect permissions in your AWS account, for example, by creating new AWS Identity and Access Management (IAM) users. For those stacks, you must explicitly acknowledge their capabilities by specifying this parameter.

The only valid values are CAPABILITY_IAM and CAPABILITY_NAMED_IAM. The following resources require you to specify this parameter: AWS::IAM::AccessKey, AWS::IAM::Group, AWS::IAM::InstanceProfile, AWS::IAM::Policy, AWS::IAM::Role, AWS::IAM::User, and AWS::IAM::UserToGroupAddition. If your stack template contains these resources, we recommend that you review all permissions associated with them and edit their permissions if necessary.

If you have IAM resources, you can specify either capability. If you have IAM resources with custom names, you must specify CAPABILITY_NAMED_IAM. If you don't specify this parameter, this action returns an InsufficientCapabilities error.

For more information, see Acknowledging IAM Resources in AWS CloudFormation Templates.

", + "UpdateStackSetInput$Capabilities": "

A list of values that you must specify before AWS CloudFormation can create certain stack sets. Some stack set templates might include resources that can affect permissions in your AWS account—for example, by creating new AWS Identity and Access Management (IAM) users. For those stack sets, you must explicitly acknowledge their capabilities by specifying this parameter.

The only valid values are CAPABILITY_IAM and CAPABILITY_NAMED_IAM. The following resources require you to specify this parameter:

If your stack template contains these resources, we recommend that you review all permissions that are associated with them and edit their permissions if necessary.

If you have IAM resources, you can specify either capability. If you have IAM resources with custom names, you must specify CAPABILITY_NAMED_IAM. If you don't specify this parameter, this action returns an InsufficientCapabilities error.

For more information, see Acknowledging IAM Resources in AWS CloudFormation Templates.

", "ValidateTemplateOutput$Capabilities": "

The capabilities found within the template. If your template contains IAM resources, you must specify the CAPABILITY_IAM or CAPABILITY_NAMED_IAM value for this parameter when you use the CreateStack or UpdateStack actions with your template; otherwise, those actions return an InsufficientCapabilities error.

For more information, see Acknowledging IAM Resources in AWS CloudFormation Templates.

" } }, @@ -193,11 +245,23 @@ "refs": { "CancelUpdateStackInput$ClientRequestToken": "

A unique identifier for this CancelUpdateStack request. Specify this token if you plan to retry requests so that AWS CloudFormation knows that you're not attempting to cancel an update on a stack with the same name. You might retry CancelUpdateStack requests to ensure that AWS CloudFormation successfully received them.

", "ContinueUpdateRollbackInput$ClientRequestToken": "

A unique identifier for this ContinueUpdateRollback request. Specify this token if you plan to retry requests so that AWS CloudFormation knows that you're not attempting to continue the rollback to a stack with the same name. You might retry ContinueUpdateRollback requests to ensure that AWS CloudFormation successfully received them.

", - "CreateStackInput$ClientRequestToken": "

A unique identifier for this CreateStack request. Specify this token if you plan to retry requests so that AWS CloudFormation knows that you're not attempting to create a stack with the same name. You might retry CreateStack requests to ensure that AWS CloudFormation successfully received them.

", - "DeleteStackInput$ClientRequestToken": "

A unique identifier for this DeleteStack request. Specify this token if you plan to retry requests so that AWS CloudFormation knows that you're not attempting to delete a stack with the same name. You might retry DeleteStack requests to ensure that AWS CloudFormation successfully received them.

", + "CreateStackInput$ClientRequestToken": "

A unique identifier for this CreateStack request. Specify this token if you plan to retry requests so that AWS CloudFormation knows that you're not attempting to create a stack with the same name. You might retry CreateStack requests to ensure that AWS CloudFormation successfully received them.

All events triggered by a given stack operation are assigned the same client request token, which you can use to track operations. For example, if you execute a CreateStack operation with the token token1, then all the StackEvents generated by that operation will have ClientRequestToken set as token1.

In the console, stack operations display the client request token on the Events tab. Stack operations that are initiated from the console use the token format Console-StackOperation-ID, which helps you easily identify the stack operation . For example, if you create a stack using the console, each stack event would be assigned the same token in the following format: Console-CreateStack-7f59c3cf-00d2-40c7-b2ff-e75db0987002.

", + "CreateStackInstancesInput$OperationId": "

The unique identifier for this stack set operation.

The operation ID also functions as an idempotency token, to ensure that AWS CloudFormation performs the stack set operation only once, even if you retry the request multiple times. You might retry stack set operation requests to ensure that AWS CloudFormation successfully received them.

If you don't specify an operation ID, the SDK generates one automatically.

Repeating this stack set operation with a new operation ID retries all stack instances whose status is OUTDATED.

", + "CreateStackInstancesOutput$OperationId": "

The unique identifier for this stack set operation.

", + "CreateStackSetInput$ClientRequestToken": "

A unique identifier for this CreateStackSet request. Specify this token if you plan to retry requests so that AWS CloudFormation knows that you're not attempting to create another stack set with the same name. You might retry CreateStackSet requests to ensure that AWS CloudFormation successfully received them.

If you don't specify an operation ID, the SDK generates one automatically.

", + "DeleteStackInput$ClientRequestToken": "

A unique identifier for this DeleteStack request. Specify this token if you plan to retry requests so that AWS CloudFormation knows that you're not attempting to delete a stack with the same name. You might retry DeleteStack requests to ensure that AWS CloudFormation successfully received them.

All events triggered by a given stack operation are assigned the same client request token, which you can use to track operations. For example, if you execute a CreateStack operation with the token token1, then all the StackEvents generated by that operation will have ClientRequestToken set as token1.

In the console, stack operations display the client request token on the Events tab. Stack operations that are initiated from the console use the token format Console-StackOperation-ID, which helps you easily identify the stack operation . For example, if you create a stack using the console, each stack event would be assigned the same token in the following format: Console-CreateStack-7f59c3cf-00d2-40c7-b2ff-e75db0987002.

", + "DeleteStackInstancesInput$OperationId": "

The unique identifier for this stack set operation.

If you don't specify an operation ID, the SDK generates one automatically.

The operation ID also functions as an idempotency token, to ensure that AWS CloudFormation performs the stack set operation only once, even if you retry the request multiple times. You can retry stack set operation requests to ensure that AWS CloudFormation successfully received them.

Repeating this stack set operation with a new operation ID retries all stack instances whose status is OUTDATED.

", + "DeleteStackInstancesOutput$OperationId": "

The unique identifier for this stack set operation.

", + "DescribeStackSetOperationInput$OperationId": "

The unique ID of the stack set operation.

", "ExecuteChangeSetInput$ClientRequestToken": "

A unique identifier for this ExecuteChangeSet request. Specify this token if you plan to retry requests so that AWS CloudFormation knows that you're not attempting to execute a change set to update a stack with the same name. You might retry ExecuteChangeSet requests to ensure that AWS CloudFormation successfully received them.

", - "StackEvent$ClientRequestToken": "

The token passed to the operation that generated this event.

For example, if you execute a CreateStack operation with the token token1, then all the StackEvents generated by that operation will have ClientRequestToken set as token1.

", - "UpdateStackInput$ClientRequestToken": "

A unique identifier for this UpdateStack request. Specify this token if you plan to retry requests so that AWS CloudFormation knows that you're not attempting to update a stack with the same name. You might retry UpdateStack requests to ensure that AWS CloudFormation successfully received them.

" + "ListStackSetOperationResultsInput$OperationId": "

The ID of the stack set operation.

", + "StackEvent$ClientRequestToken": "

The token passed to the operation that generated this event.

All events triggered by a given stack operation are assigned the same client request token, which you can use to track operations. For example, if you execute a CreateStack operation with the token token1, then all the StackEvents generated by that operation will have ClientRequestToken set as token1.

In the console, stack operations display the client request token on the Events tab. Stack operations that are initiated from the console use the token format Console-StackOperation-ID, which helps you easily identify the stack operation . For example, if you create a stack using the console, each stack event would be assigned the same token in the following format: Console-CreateStack-7f59c3cf-00d2-40c7-b2ff-e75db0987002.

", + "StackSetOperation$OperationId": "

The unique ID of a stack set operation.

", + "StackSetOperationSummary$OperationId": "

The unique ID of the stack set operation.

", + "StopStackSetOperationInput$OperationId": "

The ID of the stack operation.

", + "UpdateStackInput$ClientRequestToken": "

A unique identifier for this UpdateStack request. Specify this token if you plan to retry requests so that AWS CloudFormation knows that you're not attempting to update a stack with the same name. You might retry UpdateStack requests to ensure that AWS CloudFormation successfully received them.

All events triggered by a given stack operation are assigned the same client request token, which you can use to track operations. For example, if you execute a CreateStack operation with the token token1, then all the StackEvents generated by that operation will have ClientRequestToken set as token1.

In the console, stack operations display the client request token on the Events tab. Stack operations that are initiated from the console use the token format Console-StackOperation-ID, which helps you easily identify the stack operation . For example, if you create a stack using the console, each stack event would be assigned the same token in the following format: Console-CreateStack-7f59c3cf-00d2-40c7-b2ff-e75db0987002.

", + "UpdateStackSetInput$OperationId": "

The unique ID for this stack set operation.

The operation ID also functions as an idempotency token, to ensure that AWS CloudFormation performs the stack set operation only once, even if you retry the request multiple times. You might retry stack set operation requests to ensure that AWS CloudFormation successfully received them.

If you don't specify an operation ID, AWS CloudFormation generates one automatically.

Repeating this stack set operation with a new operation ID retries all stack instances whose status is OUTDATED.

", + "UpdateStackSetOutput$OperationId": "

The unique ID for this stack set operation.

" } }, "ClientToken": { @@ -231,11 +295,36 @@ "refs": { } }, + "CreateStackInstancesInput": { + "base": null, + "refs": { + } + }, + "CreateStackInstancesOutput": { + "base": null, + "refs": { + } + }, "CreateStackOutput": { "base": "

The output for a CreateStack action.

", "refs": { } }, + "CreateStackSetInput": { + "base": null, + "refs": { + } + }, + "CreateStackSetOutput": { + "base": null, + "refs": { + } + }, + "CreatedButModifiedException": { + "base": "

The specified resource exists, but has been changed.

", + "refs": { + } + }, "CreationTime": { "base": null, "refs": { @@ -260,6 +349,26 @@ "refs": { } }, + "DeleteStackInstancesInput": { + "base": null, + "refs": { + } + }, + "DeleteStackInstancesOutput": { + "base": null, + "refs": { + } + }, + "DeleteStackSetInput": { + "base": null, + "refs": { + } + }, + "DeleteStackSetOutput": { + "base": null, + "refs": { + } + }, "DeletionTime": { "base": null, "refs": { @@ -296,6 +405,16 @@ "refs": { } }, + "DescribeStackInstanceInput": { + "base": null, + "refs": { + } + }, + "DescribeStackInstanceOutput": { + "base": null, + "refs": { + } + }, "DescribeStackResourceInput": { "base": "

The input for DescribeStackResource action.

", "refs": { @@ -316,6 +435,26 @@ "refs": { } }, + "DescribeStackSetInput": { + "base": null, + "refs": { + } + }, + "DescribeStackSetOperationInput": { + "base": null, + "refs": { + } + }, + "DescribeStackSetOperationOutput": { + "base": null, + "refs": { + } + }, + "DescribeStackSetOutput": { + "base": null, + "refs": { + } + }, "DescribeStacksInput": { "base": "

The input for DescribeStacks action.

", "refs": { @@ -331,6 +470,7 @@ "refs": { "ChangeSetSummary$Description": "

Descriptive information about the change set.

", "CreateChangeSetInput$Description": "

A description to help you identify this change set.

", + "CreateStackSetInput$Description": "

A description of the stack set. You can use the description to identify the stack set's purpose or other important information.

", "DescribeChangeSetOutput$Description": "

Information about the change set.

", "GetTemplateSummaryOutput$Description": "

The value that is defined in the Description property of the template.

", "Output$Description": "

User defined description associated with the output.

", @@ -338,7 +478,10 @@ "Stack$Description": "

A user-defined description associated with the stack.

", "StackResource$Description": "

User defined description associated with the resource.

", "StackResourceDetail$Description": "

User defined description associated with the resource.

", + "StackSet$Description": "

A description of the stack set that you specify when the stack set is created or updated.

", + "StackSetSummary$Description": "

A description of the stack set that you specify when the stack set is created or updated.

", "TemplateParameter$Description": "

User defined description associated with the parameter.

", + "UpdateStackSetInput$Description": "

A brief description of updates that you are making.

", "ValidateTemplateOutput$Description": "

The description found within the template.

" } }, @@ -398,7 +541,8 @@ "base": null, "refs": { "Export$Name": "

The name of exported output value. Use this name and the Fn::ImportValue function to import the associated value into other stacks. The name is defined in the Export field in the associated stack's Outputs section.

", - "ListImportsInput$ExportName": "

The name of the exported output value. AWS CloudFormation returns the stack names that are importing this value.

" + "ListImportsInput$ExportName": "

The name of the exported output value. AWS CloudFormation returns the stack names that are importing this value.

", + "Output$ExportName": "

The name of the export associated with the output.

" } }, "ExportValue": { @@ -413,6 +557,18 @@ "ListExportsOutput$Exports": "

The output for the ListExports action.

" } }, + "FailureToleranceCount": { + "base": null, + "refs": { + "StackSetOperationPreferences$FailureToleranceCount": "

The number of accounts, per region, for which this operation can fail before AWS CloudFormation stops the operation in that region. If the operation is stopped in a region, AWS CloudFormation doesn't attempt the operation in any subsequent regions.

Conditional: You must specify either FailureToleranceCount or FailureTolerancePercentage (but not both).

" + } + }, + "FailureTolerancePercentage": { + "base": null, + "refs": { + "StackSetOperationPreferences$FailureTolerancePercentage": "

The percentage of accounts, per region, for which this stack operation can fail before AWS CloudFormation stops the operation in that region. If the operation is stopped in a region, AWS CloudFormation doesn't attempt the operation in any subsequent regions.

When calculating the number of accounts based on the specified percentage, AWS CloudFormation rounds down to the next whole number.

Conditional: You must specify either FailureToleranceCount or FailureTolerancePercentage, but not both.

" + } + }, "GetStackPolicyInput": { "base": "

The input for the GetStackPolicy action.

", "refs": { @@ -450,12 +606,17 @@ } }, "InsufficientCapabilitiesException": { - "base": "

The template contains resources with capabilities that were not specified in the Capabilities parameter.

", + "base": "

The template contains resources with capabilities that weren't specified in the Capabilities parameter.

", "refs": { } }, "InvalidChangeSetStatusException": { - "base": "

The specified change set cannot be used to update the stack. For example, the change set status might be CREATE_IN_PROGRESS or the stack status might be UPDATE_IN_PROGRESS.

", + "base": "

The specified change set can't be used to update the stack. For example, the change set status might be CREATE_IN_PROGRESS, or the stack status might be UPDATE_IN_PROGRESS.

", + "refs": { + } + }, + "InvalidOperationException": { + "base": "

The specified operation isn't valid.

", "refs": { } }, @@ -467,7 +628,7 @@ } }, "LimitExceededException": { - "base": "

Quota for the resource has already been reached.

", + "base": "

The quota for the resource has already been reached.

", "refs": { } }, @@ -513,6 +674,16 @@ "refs": { } }, + "ListStackInstancesInput": { + "base": null, + "refs": { + } + }, + "ListStackInstancesOutput": { + "base": null, + "refs": { + } + }, "ListStackResourcesInput": { "base": "

The input for the ListStackResource action.

", "refs": { @@ -523,6 +694,36 @@ "refs": { } }, + "ListStackSetOperationResultsInput": { + "base": null, + "refs": { + } + }, + "ListStackSetOperationResultsOutput": { + "base": null, + "refs": { + } + }, + "ListStackSetOperationsInput": { + "base": null, + "refs": { + } + }, + "ListStackSetOperationsOutput": { + "base": null, + "refs": { + } + }, + "ListStackSetsInput": { + "base": null, + "refs": { + } + }, + "ListStackSetsOutput": { + "base": null, + "refs": { + } + }, "ListStacksInput": { "base": "

The input for ListStacks action.

", "refs": { @@ -547,6 +748,27 @@ "StackResourceSummary$LogicalResourceId": "

The logical name of the resource specified in the template.

" } }, + "MaxConcurrentCount": { + "base": null, + "refs": { + "StackSetOperationPreferences$MaxConcurrentCount": "

The maximum number of accounts in which to perform this operation at one time. This is dependent on the value of FailureToleranceCountMaxConcurrentCount is at most one more than the FailureToleranceCount .

Conditional: You must specify either MaxConcurrentCount or MaxConcurrentPercentage, but not both.

" + } + }, + "MaxConcurrentPercentage": { + "base": null, + "refs": { + "StackSetOperationPreferences$MaxConcurrentPercentage": "

The maximum percentage of accounts in which to perform this operation at one time.

When calculating the number of accounts based on the specified percentage, AWS CloudFormation rounds down to the next whole number. This is true except in cases where rounding down would result is zero. In this case, CloudFormation sets the number as one instead.

Conditional: You must specify either MaxConcurrentCount or MaxConcurrentPercentage, but not both.

" + } + }, + "MaxResults": { + "base": null, + "refs": { + "ListStackInstancesInput$MaxResults": "

The maximum number of results to be returned with a single call. If the number of available results exceeds this maximum, the response includes a NextToken value that you can assign to the NextToken request parameter to get the next set of results.

", + "ListStackSetOperationResultsInput$MaxResults": "

The maximum number of results to be returned with a single call. If the number of available results exceeds this maximum, the response includes a NextToken value that you can assign to the NextToken request parameter to get the next set of results.

", + "ListStackSetOperationsInput$MaxResults": "

The maximum number of results to be returned with a single call. If the number of available results exceeds this maximum, the response includes a NextToken value that you can assign to the NextToken request parameter to get the next set of results.

", + "ListStackSetsInput$MaxResults": "

The maximum number of results to be returned with a single call. If the number of available results exceeds this maximum, the response includes a NextToken value that you can assign to the NextToken request parameter to get the next set of results.

" + } + }, "Metadata": { "base": null, "refs": { @@ -554,6 +776,11 @@ "StackResourceDetail$Metadata": "

The content of the Metadata attribute declared for the resource. For more information, see Metadata Attribute in the AWS CloudFormation User Guide.

" } }, + "NameAlreadyExistsException": { + "base": "

The specified name is already in use.

", + "refs": { + } + }, "NextToken": { "base": null, "refs": { @@ -571,8 +798,16 @@ "ListExportsOutput$NextToken": "

If the output exceeds 100 exported output values, a string that identifies the next page of exports. If there is no additional page, this value is null.

", "ListImportsInput$NextToken": "

A string (provided by the ListImports response output) that identifies the next page of stacks that are importing the specified exported output value.

", "ListImportsOutput$NextToken": "

A string that identifies the next page of exports. If there is no additional page, this value is null.

", + "ListStackInstancesInput$NextToken": "

If the previous request didn't return all of the remaining results, the response's NextToken parameter value is set to a token. To retrieve the next set of results, call ListStackInstances again and assign that token to the request object's NextToken parameter. If there are no remaining results, the previous response object's NextToken parameter is set to null.

", + "ListStackInstancesOutput$NextToken": "

If the request doesn't return all of the remaining results, NextToken is set to a token. To retrieve the next set of results, call ListStackInstances again and assign that token to the request object's NextToken parameter. If the request returns all results, NextToken is set to null.

", "ListStackResourcesInput$NextToken": "

A string that identifies the next page of stack resources that you want to retrieve.

", "ListStackResourcesOutput$NextToken": "

If the output exceeds 1 MB, a string that identifies the next page of stack resources. If no additional page exists, this value is null.

", + "ListStackSetOperationResultsInput$NextToken": "

If the previous request didn't return all of the remaining results, the response object's NextToken parameter value is set to a token. To retrieve the next set of results, call ListStackSetOperationResults again and assign that token to the request object's NextToken parameter. If there are no remaining results, the previous response object's NextToken parameter is set to null.

", + "ListStackSetOperationResultsOutput$NextToken": "

If the request doesn't return all results, NextToken is set to a token. To retrieve the next set of results, call ListOperationResults again and assign that token to the request object's NextToken parameter. If there are no remaining results, NextToken is set to null.

", + "ListStackSetOperationsInput$NextToken": "

If the previous paginated request didn't return all of the remaining results, the response object's NextToken parameter value is set to a token. To retrieve the next set of results, call ListStackSetOperations again and assign that token to the request object's NextToken parameter. If there are no remaining results, the previous response object's NextToken parameter is set to null.

", + "ListStackSetOperationsOutput$NextToken": "

If the request doesn't return all results, NextToken is set to a token. To retrieve the next set of results, call ListOperationResults again and assign that token to the request object's NextToken parameter. If there are no remaining results, NextToken is set to null.

", + "ListStackSetsInput$NextToken": "

If the previous paginated request didn't return all of the remaining results, the response object's NextToken parameter value is set to a token. To retrieve the next set of results, call ListStackSets again and assign that token to the request object's NextToken parameter. If there are no remaining results, the previous response object's NextToken parameter is set to null.

", + "ListStackSetsOutput$NextToken": "

If the request doesn't return all of the remaining results, NextToken is set to a token. To retrieve the next set of results, call ListStackInstances again and assign that token to the request object's NextToken parameter. If the request returns all results, NextToken is set to null.

", "ListStacksInput$NextToken": "

A string that identifies the next page of stacks that you want to retrieve.

", "ListStacksOutput$NextToken": "

If the output exceeds 1 MB in size, a string that identifies the next page of stacks. If no additional page exists, this value is null.

" } @@ -606,6 +841,21 @@ "CreateStackInput$OnFailure": "

Determines what action will be taken if stack creation fails. This must be one of: DO_NOTHING, ROLLBACK, or DELETE. You can specify either OnFailure or DisableRollback, but not both.

Default: ROLLBACK

" } }, + "OperationIdAlreadyExistsException": { + "base": "

The specified operation ID already exists.

", + "refs": { + } + }, + "OperationInProgressException": { + "base": "

Another operation is currently in progress for this stack set. Only one operation can be performed for a stack set at a given time.

", + "refs": { + } + }, + "OperationNotFoundException": { + "base": "

The specified ID refers to an operation that doesn't exist.

", + "refs": { + } + }, "Output": { "base": "

The Output data type.

", "refs": { @@ -681,10 +931,13 @@ "refs": { "CreateChangeSetInput$Parameters": "

A list of Parameter structures that specify input parameters for the change set. For more information, see the Parameter data type.

", "CreateStackInput$Parameters": "

A list of Parameter structures that specify input parameters for the stack. For more information, see the Parameter data type.

", + "CreateStackSetInput$Parameters": "

The input parameters for the stack set template.

", "DescribeChangeSetOutput$Parameters": "

A list of Parameter structures that describes the input parameters and their values used to create the change set. For more information, see the Parameter data type.

", "EstimateTemplateCostInput$Parameters": "

A list of Parameter structures that specify input parameters.

", "Stack$Parameters": "

A list of Parameter structures.

", - "UpdateStackInput$Parameters": "

A list of Parameter structures that specify input parameters for the stack. For more information, see the Parameter data type.

" + "StackSet$Parameters": "

A list of input parameters for a stack set.

", + "UpdateStackInput$Parameters": "

A list of Parameter structures that specify input parameters for the stack. For more information, see the Parameter data type.

", + "UpdateStackSetInput$Parameters": "

A list of input parameters for the stack set template.

" } }, "PhysicalResourceId": { @@ -704,6 +957,33 @@ "ResourceTargetDefinition$Name": "

If the Attribute value is Properties, the name of the property. For all other attributes, the value is null.

" } }, + "Reason": { + "base": null, + "refs": { + "StackInstance$StatusReason": "

The explanation for the specific status code that is assigned to this stack instance.

", + "StackInstanceSummary$StatusReason": "

The explanation for the specific status code assigned to this stack instance.

", + "StackSetOperationResultSummary$StatusReason": "

The reason for the assigned result status.

" + } + }, + "Region": { + "base": null, + "refs": { + "DescribeStackInstanceInput$StackInstanceRegion": "

The name of a region that's associated with this stack instance.

", + "ListStackInstancesInput$StackInstanceRegion": "

The name of the region where you want to list stack instances.

", + "RegionList$member": null, + "StackInstance$Region": "

The name of the AWS region that the stack instance is associated with.

", + "StackInstanceSummary$Region": "

The name of the AWS region that the stack instance is associated with.

", + "StackSetOperationResultSummary$Region": "

The name of the AWS region for this operation result.

" + } + }, + "RegionList": { + "base": null, + "refs": { + "CreateStackInstancesInput$Regions": "

The names of one or more regions where you want to create stack instances using the specified AWS account(s).

", + "DeleteStackInstancesInput$Regions": "

The regions where you want to delete stack set instances.

", + "StackSetOperationPreferences$RegionOrder": "

The order of the regions in where you want to perform the stack operation.

" + } + }, "Replacement": { "base": null, "refs": { @@ -812,7 +1092,7 @@ "ResourcesToSkip": { "base": null, "refs": { - "ContinueUpdateRollbackInput$ResourcesToSkip": "

A list of the logical IDs of the resources that AWS CloudFormation skips during the continue update rollback operation. You can specify only resources that are in the UPDATE_FAILED state because a rollback failed. You can't specify resources that are in the UPDATE_FAILED state for other reasons, for example, because an update was canceled. To check why a resource update failed, use the DescribeStackResources action, and view the resource status reason.

Specify this property to skip rolling back resources that AWS CloudFormation can't successfully roll back. We recommend that you troubleshoot resources before skipping them. AWS CloudFormation sets the status of the specified resources to UPDATE_COMPLETE and continues to roll back the stack. After the rollback is complete, the state of the skipped resources will be inconsistent with the state of the resources in the stack template. Before performing another stack update, you must update the stack or resources to be consistent with each other. If you don't, subsequent stack updates might fail, and the stack will become unrecoverable.

Specify the minimum number of resources required to successfully roll back your stack. For example, a failed resource update might cause dependent resources to fail. In this case, it might not be necessary to skip the dependent resources.

To specify resources in a nested stack, use the following format: NestedStackName.ResourceLogicalID. If the ResourceLogicalID is a stack resource (Type: AWS::CloudFormation::Stack), it must be in one of the following states: DELETE_IN_PROGRESS, DELETE_COMPLETE, or DELETE_FAILED.

" + "ContinueUpdateRollbackInput$ResourcesToSkip": "

A list of the logical IDs of the resources that AWS CloudFormation skips during the continue update rollback operation. You can specify only resources that are in the UPDATE_FAILED state because a rollback failed. You can't specify resources that are in the UPDATE_FAILED state for other reasons, for example, because an update was cancelled. To check why a resource update failed, use the DescribeStackResources action, and view the resource status reason.

Specify this property to skip rolling back resources that AWS CloudFormation can't successfully roll back. We recommend that you troubleshoot resources before skipping them. AWS CloudFormation sets the status of the specified resources to UPDATE_COMPLETE and continues to roll back the stack. After the rollback is complete, the state of the skipped resources will be inconsistent with the state of the resources in the stack template. Before performing another stack update, you must update the stack or resources to be consistent with each other. If you don't, subsequent stack updates might fail, and the stack will become unrecoverable.

Specify the minimum number of resources required to successfully roll back your stack. For example, a failed resource update might cause dependent resources to fail. In this case, it might not be necessary to skip the dependent resources.

To skip resources that are part of nested stacks, use the following format: NestedStackName.ResourceLogicalID. If you want to specify the logical ID of a stack resource (Type: AWS::CloudFormation::Stack) in the ResourcesToSkip list, then its corresponding embedded stack must be in one of the following states: DELETE_IN_PROGRESS, DELETE_COMPLETE, or DELETE_FAILED.

Don't confuse a child stack's name with its corresponding logical ID defined in the parent stack. For an example of a continue update rollback operation with nested stacks, see Using ResourcesToSkip to recover a nested stacks hierarchy.

" } }, "RetainResources": { @@ -821,6 +1101,18 @@ "DeleteStackInput$RetainResources": "

For stacks in the DELETE_FAILED state, a list of resource logical IDs that are associated with the resources you want to retain. During deletion, AWS CloudFormation deletes the stack but does not delete the retained resources.

Retaining resources is useful when you cannot delete a resource, such as a non-empty S3 bucket, but you want to delete the stack.

" } }, + "RetainStacks": { + "base": null, + "refs": { + "DeleteStackInstancesInput$RetainStacks": "

Removes the stack instances from the specified stack set, but doesn't delete the stacks. You can't reassociate a retained stack or add an existing, saved stack to a new stack set.

" + } + }, + "RetainStacksNullable": { + "base": null, + "refs": { + "StackSetOperation$RetainStacks": "

For stack set operations of action type DELETE, specifies whether to remove the stack instances from the specified stack set, but doesn't delete the stacks. You can't reassociate a retained stack, or add an existing, saved stack to a new stack set.

" + } + }, "RoleARN": { "base": null, "refs": { @@ -876,12 +1168,44 @@ "Export$ExportingStackId": "

The stack that contains the exported output name and value.

", "Stack$StackId": "

Unique identifier of the stack.

", "StackEvent$StackId": "

The unique ID name of the instance of the stack.

", + "StackInstance$StackId": "

The ID of the stack instance.

", + "StackInstanceSummary$StackId": "

The ID of the stack instance.

", "StackResource$StackId": "

Unique identifier of the stack.

", "StackResourceDetail$StackId": "

Unique identifier of the stack.

", "StackSummary$StackId": "

Unique stack identifier.

", "UpdateStackOutput$StackId": "

Unique identifier of the stack.

" } }, + "StackInstance": { + "base": "

An AWS CloudFormation stack, in a specific account and region, that's part of a stack set operation. A stack instance is a reference to an attempted or actual stack in a given account within a given region. A stack instance can exist without a stack—for example, if the stack couldn't be created for some reason. A stack instance is associated with only one stack set. Each stack instance contains the ID of its associated stack set, as well as the ID of the actual stack and the stack status.

", + "refs": { + "DescribeStackInstanceOutput$StackInstance": "

The stack instance that matches the specified request parameters.

" + } + }, + "StackInstanceNotFoundException": { + "base": "

The specified stack instance doesn't exist.

", + "refs": { + } + }, + "StackInstanceStatus": { + "base": null, + "refs": { + "StackInstance$Status": "

The status of the stack instance, in terms of its synchronization with its associated stack set.

", + "StackInstanceSummary$Status": "

The status of the stack instance, in terms of its synchronization with its associated stack set.

" + } + }, + "StackInstanceSummaries": { + "base": null, + "refs": { + "ListStackInstancesOutput$Summaries": "

A list of StackInstanceSummary structures that contain information about the specified stack instances.

" + } + }, + "StackInstanceSummary": { + "base": "

The structure that contains summary information about a stack instance.

", + "refs": { + "StackInstanceSummaries$member": null + } + }, "StackName": { "base": null, "refs": { @@ -979,6 +1303,137 @@ "DescribeStackResourcesOutput$StackResources": "

A list of StackResource structures.

" } }, + "StackSet": { + "base": "

A structure that contains information about a stack set. A stack set enables you to provision stacks into AWS accounts and across regions by using a single CloudFormation template. In the stack set, you specify the template to use, as well as any parameters and capabilities that the template requires.

", + "refs": { + "DescribeStackSetOutput$StackSet": "

The specified stack set.

" + } + }, + "StackSetId": { + "base": null, + "refs": { + "CreateStackSetOutput$StackSetId": "

The ID of the stack set that you're creating.

", + "StackInstance$StackSetId": "

The name or unique ID of the stack set that the stack instance is associated with.

", + "StackInstanceSummary$StackSetId": "

The name or unique ID of the stack set that the stack instance is associated with.

", + "StackSet$StackSetId": "

The ID of the stack set.

", + "StackSetOperation$StackSetId": "

The ID of the stack set.

", + "StackSetSummary$StackSetId": "

The ID of the stack set.

" + } + }, + "StackSetName": { + "base": null, + "refs": { + "CreateStackInstancesInput$StackSetName": "

The name or unique ID of the stack set that you want to create stack instances from.

", + "CreateStackSetInput$StackSetName": "

The name to associate with the stack set. The name must be unique in the region where you create your stack set.

A stack name can contain only alphanumeric characters (case-sensitive) and hyphens. It must start with an alphabetic character and can't be longer than 128 characters.

", + "DeleteStackInstancesInput$StackSetName": "

The name or unique ID of the stack set that you want to delete stack instances for.

", + "DeleteStackSetInput$StackSetName": "

The name or unique ID of the stack set that you're deleting. You can obtain this value by running ListStackSets.

", + "DescribeStackInstanceInput$StackSetName": "

The name or the unique stack ID of the stack set that you want to get stack instance information for.

", + "DescribeStackSetInput$StackSetName": "

The name or unique ID of the stack set whose description you want.

", + "DescribeStackSetOperationInput$StackSetName": "

The name or the unique stack ID of the stack set for the stack operation.

", + "ListStackInstancesInput$StackSetName": "

The name or unique ID of the stack set that you want to list stack instances for.

", + "ListStackSetOperationResultsInput$StackSetName": "

The name or unique ID of the stack set that you want to get operation results for.

", + "ListStackSetOperationsInput$StackSetName": "

The name or unique ID of the stack set that you want to get operation summaries for.

", + "StackSet$StackSetName": "

The name that's associated with the stack set.

", + "StackSetSummary$StackSetName": "

The name of the stack set.

", + "StopStackSetOperationInput$StackSetName": "

The name or unique ID of the stack set that you want to stop the operation for.

", + "UpdateStackSetInput$StackSetName": "

The name or unique ID of the stack set that you want to update.

" + } + }, + "StackSetNameOrId": { + "base": null, + "refs": { + "GetTemplateSummaryInput$StackSetName": "

The name or unique ID of the stack set from which the stack was created.

" + } + }, + "StackSetNotEmptyException": { + "base": "

You can't yet delete this stack set, because it still contains one or more stack instances. Delete all stack instances from the stack set before deleting the stack set.

", + "refs": { + } + }, + "StackSetNotFoundException": { + "base": "

The specified stack set doesn't exist.

", + "refs": { + } + }, + "StackSetOperation": { + "base": "

The structure that contains information about a stack set operation.

", + "refs": { + "DescribeStackSetOperationOutput$StackSetOperation": "

The specified stack set operation.

" + } + }, + "StackSetOperationAction": { + "base": null, + "refs": { + "StackSetOperation$Action": "

The type of stack set operation: CREATE, UPDATE, or DELETE. Create and delete operations affect only the specified stack set instances that are associated with the specified stack set. Update operations affect both the stack set itself, as well as all associated stack set instances.

", + "StackSetOperationSummary$Action": "

The type of operation: CREATE, UPDATE, or DELETE. Create and delete operations affect only the specified stack instances that are associated with the specified stack set. Update operations affect both the stack set itself as well as all associated stack set instances.

" + } + }, + "StackSetOperationPreferences": { + "base": "

The user-specified preferences for how AWS CloudFormation performs a stack set operation.

", + "refs": { + "CreateStackInstancesInput$OperationPreferences": "

Preferences for how AWS CloudFormation performs this stack set operation.

", + "DeleteStackInstancesInput$OperationPreferences": "

Preferences for how AWS CloudFormation performs this stack set operation.

", + "StackSetOperation$OperationPreferences": "

The preferences for how AWS CloudFormation performs this stack set operation.

", + "UpdateStackSetInput$OperationPreferences": "

Preferences for how AWS CloudFormation performs this stack set operation.

" + } + }, + "StackSetOperationResultStatus": { + "base": null, + "refs": { + "StackSetOperationResultSummary$Status": "

The result status of the stack set operation for the given account in the given region.

" + } + }, + "StackSetOperationResultSummaries": { + "base": null, + "refs": { + "ListStackSetOperationResultsOutput$Summaries": "

A list of StackSetOperationResultSummary structures that contain information about the specified operation results, for accounts and regions that are included in the operation.

" + } + }, + "StackSetOperationResultSummary": { + "base": "

The structure that contains information about a specified operation's results for a given account in a given region.

", + "refs": { + "StackSetOperationResultSummaries$member": null + } + }, + "StackSetOperationStatus": { + "base": null, + "refs": { + "StackSetOperation$Status": "

The status of the operation.

", + "StackSetOperationSummary$Status": "

The overall status of the operation.

" + } + }, + "StackSetOperationSummaries": { + "base": null, + "refs": { + "ListStackSetOperationsOutput$Summaries": "

A list of StackSetOperationSummary structures that contain summary information about operations for the specified stack set.

" + } + }, + "StackSetOperationSummary": { + "base": "

The structures that contain summary information about the specified operation.

", + "refs": { + "StackSetOperationSummaries$member": null + } + }, + "StackSetStatus": { + "base": null, + "refs": { + "ListStackSetsInput$Status": "

The status of the stack sets that you want to get summary information about.

", + "StackSet$Status": "

The status of the stack set.

", + "StackSetSummary$Status": "

The status of the stack set.

" + } + }, + "StackSetSummaries": { + "base": null, + "refs": { + "ListStackSetsOutput$Summaries": "

A list of StackSetSummary structures that contain information about the user's stack sets.

" + } + }, + "StackSetSummary": { + "base": "

The structures that contain summary information about the specified stack set.

", + "refs": { + "StackSetSummaries$member": null + } + }, "StackStatus": { "base": null, "refs": { @@ -1024,6 +1479,21 @@ "GetTemplateOutput$StagesAvailable": "

The stage of the template that you can retrieve. For stacks, the Original and Processed templates are always available. For change sets, the Original template is always available. After AWS CloudFormation finishes creating the change set, the Processed template becomes available.

" } }, + "StaleRequestException": { + "base": "

Another operation has been performed on this stack set since the specified operation was performed.

", + "refs": { + } + }, + "StopStackSetOperationInput": { + "base": null, + "refs": { + } + }, + "StopStackSetOperationOutput": { + "base": null, + "refs": { + } + }, "Tag": { "base": "

The Tag type enables you to specify a key-value pair that can be used to store information about an AWS CloudFormation stack.

", "refs": { @@ -1045,11 +1515,14 @@ "Tags": { "base": null, "refs": { - "CreateChangeSetInput$Tags": "

Key-value pairs to associate with this stack. AWS CloudFormation also propagates these tags to resources in the stack. You can specify a maximum of 10 tags.

", - "CreateStackInput$Tags": "

Key-value pairs to associate with this stack. AWS CloudFormation also propagates these tags to the resources created in the stack. A maximum number of 10 tags can be specified.

", + "CreateChangeSetInput$Tags": "

Key-value pairs to associate with this stack. AWS CloudFormation also propagates these tags to resources in the stack. You can specify a maximum of 50 tags.

", + "CreateStackInput$Tags": "

Key-value pairs to associate with this stack. AWS CloudFormation also propagates these tags to the resources created in the stack. A maximum number of 50 tags can be specified.

", + "CreateStackSetInput$Tags": "

The key-value pairs to associate with this stack set and the stacks created from it. AWS CloudFormation also propagates these tags to supported resources that are created in the stacks. A maximum number of 50 tags can be specified.

If you specify tags as part of a CreateStackSet action, AWS CloudFormation checks to see if you have the required IAM permission to tag resources. If you don't, the entire CreateStackSet action fails with an access denied error, and the stack set is not created.

", "DescribeChangeSetOutput$Tags": "

If you execute the change set, the tags that will be associated with the stack.

", "Stack$Tags": "

A list of Tags that specify information about the stack.

", - "UpdateStackInput$Tags": "

Key-value pairs to associate with this stack. AWS CloudFormation also propagates these tags to supported resources in the stack. You can specify a maximum number of 10 tags.

If you don't specify this parameter, AWS CloudFormation doesn't modify the stack's tags. If you specify an empty value, AWS CloudFormation removes all associated tags.

" + "StackSet$Tags": "

A list of tags that specify information about the stack set. A maximum number of 50 tags can be specified.

", + "UpdateStackInput$Tags": "

Key-value pairs to associate with this stack. AWS CloudFormation also propagates these tags to supported resources in the stack. You can specify a maximum number of 50 tags.

If you don't specify this parameter, AWS CloudFormation doesn't modify the stack's tags. If you specify an empty value, AWS CloudFormation removes all associated tags.

", + "UpdateStackSetInput$Tags": "

The key-value pairs to associate with this stack set and the stacks created from it. AWS CloudFormation also propagates these tags to supported resources that are created in the stacks. You can specify a maximum number of 50 tags.

If you specify tags for this parameter, those tags replace any list of tags that are currently associated with this stack set. This means:

If you specify new tags as part of an UpdateStackSet action, AWS CloudFormation checks to see if you have the required IAM permission to tag resources. If you omit tags that are currently associated with the stack set from the list of tags you specify, AWS CloudFormation assumes that you want to remove those tags from the stack set, and checks to see if you have permission to untag resources. If you don't have the necessary permission(s), the entire UpdateStackSet action fails with an access denied error, and the stack set is not updated.

" } }, "TemplateBody": { @@ -1057,10 +1530,13 @@ "refs": { "CreateChangeSetInput$TemplateBody": "

A structure that contains the body of the revised template, with a minimum length of 1 byte and a maximum length of 51,200 bytes. AWS CloudFormation generates the change set by comparing this template with the template of the stack that you specified.

Conditional: You must specify only TemplateBody or TemplateURL.

", "CreateStackInput$TemplateBody": "

Structure containing the template body with a minimum length of 1 byte and a maximum length of 51,200 bytes. For more information, go to Template Anatomy in the AWS CloudFormation User Guide.

Conditional: You must specify either the TemplateBody or the TemplateURL parameter, but not both.

", + "CreateStackSetInput$TemplateBody": "

The structure that contains the template body, with a minimum length of 1 byte and a maximum length of 51,200 bytes. For more information, see Template Anatomy in the AWS CloudFormation User Guide.

Conditional: You must specify either the TemplateBody or the TemplateURL parameter, but not both.

", "EstimateTemplateCostInput$TemplateBody": "

Structure containing the template body with a minimum length of 1 byte and a maximum length of 51,200 bytes. (For more information, go to Template Anatomy in the AWS CloudFormation User Guide.)

Conditional: You must pass TemplateBody or TemplateURL. If both are passed, only TemplateBody is used.

", "GetTemplateOutput$TemplateBody": "

Structure containing the template body. (For more information, go to Template Anatomy in the AWS CloudFormation User Guide.)

AWS CloudFormation returns the same template that was used when the stack was created.

", "GetTemplateSummaryInput$TemplateBody": "

Structure containing the template body with a minimum length of 1 byte and a maximum length of 51,200 bytes. For more information about templates, see Template Anatomy in the AWS CloudFormation User Guide.

Conditional: You must specify only one of the following parameters: StackName, TemplateBody, or TemplateURL.

", + "StackSet$TemplateBody": "

The structure that contains the body of the template that was used to create or update the stack set.

", "UpdateStackInput$TemplateBody": "

Structure containing the template body with a minimum length of 1 byte and a maximum length of 51,200 bytes. (For more information, go to Template Anatomy in the AWS CloudFormation User Guide.)

Conditional: You must specify only one of the following parameters: TemplateBody, TemplateURL, or set the UsePreviousTemplate to true.

", + "UpdateStackSetInput$TemplateBody": "

The structure that contains the template body, with a minimum length of 1 byte and a maximum length of 51,200 bytes. For more information, see Template Anatomy in the AWS CloudFormation User Guide.

Conditional: You must specify only one of the following parameters: TemplateBody or TemplateURL—or set UsePreviousTemplate to true.

", "ValidateTemplateInput$TemplateBody": "

Structure containing the template body with a minimum length of 1 byte and a maximum length of 51,200 bytes. For more information, go to Template Anatomy in the AWS CloudFormation User Guide.

Conditional: You must pass TemplateURL or TemplateBody. If both are passed, only TemplateBody is used.

" } }, @@ -1094,9 +1570,11 @@ "refs": { "CreateChangeSetInput$TemplateURL": "

The location of the file that contains the revised template. The URL must point to a template (max size: 460,800 bytes) that is located in an S3 bucket. AWS CloudFormation generates the change set by comparing this template with the stack that you specified.

Conditional: You must specify only TemplateBody or TemplateURL.

", "CreateStackInput$TemplateURL": "

Location of file containing the template body. The URL must point to a template (max size: 460,800 bytes) that is located in an Amazon S3 bucket. For more information, go to the Template Anatomy in the AWS CloudFormation User Guide.

Conditional: You must specify either the TemplateBody or the TemplateURL parameter, but not both.

", + "CreateStackSetInput$TemplateURL": "

The location of the file that contains the template body. The URL must point to a template (maximum size: 460,800 bytes) that's located in an Amazon S3 bucket. For more information, see Template Anatomy in the AWS CloudFormation User Guide.

Conditional: You must specify either the TemplateBody or the TemplateURL parameter, but not both.

", "EstimateTemplateCostInput$TemplateURL": "

Location of file containing the template body. The URL must point to a template that is located in an Amazon S3 bucket. For more information, go to Template Anatomy in the AWS CloudFormation User Guide.

Conditional: You must pass TemplateURL or TemplateBody. If both are passed, only TemplateBody is used.

", "GetTemplateSummaryInput$TemplateURL": "

Location of file containing the template body. The URL must point to a template (max size: 460,800 bytes) that is located in an Amazon S3 bucket. For more information about templates, see Template Anatomy in the AWS CloudFormation User Guide.

Conditional: You must specify only one of the following parameters: StackName, TemplateBody, or TemplateURL.

", "UpdateStackInput$TemplateURL": "

Location of file containing the template body. The URL must point to a template that is located in an Amazon S3 bucket. For more information, go to Template Anatomy in the AWS CloudFormation User Guide.

Conditional: You must specify only one of the following parameters: TemplateBody, TemplateURL, or set the UsePreviousTemplate to true.

", + "UpdateStackSetInput$TemplateURL": "

The location of the file that contains the template body. The URL must point to a template (maximum size: 460,800 bytes) that is located in an Amazon S3 bucket. For more information, see Template Anatomy in the AWS CloudFormation User Guide.

Conditional: You must specify only one of the following parameters: TemplateBody or TemplateURL—or set UsePreviousTemplate to true.

", "ValidateTemplateInput$TemplateURL": "

Location of file containing the template body. The URL must point to a template (max size: 460,800 bytes) that is located in an Amazon S3 bucket. For more information, go to Template Anatomy in the AWS CloudFormation User Guide.

Conditional: You must pass TemplateURL or TemplateBody. If both are passed, only TemplateBody is used.

" } }, @@ -1113,7 +1591,11 @@ "StackEvent$Timestamp": "

Time the status was updated.

", "StackResource$Timestamp": "

Time the status was updated.

", "StackResourceDetail$LastUpdatedTimestamp": "

Time the status was updated.

", - "StackResourceSummary$LastUpdatedTimestamp": "

Time the status was updated.

" + "StackResourceSummary$LastUpdatedTimestamp": "

Time the status was updated.

", + "StackSetOperation$CreationTimestamp": "

The time at which the operation was initiated. Note that the creation times for the stack set operation might differ from the creation time of the individual stacks themselves. This is because AWS CloudFormation needs to perform preparatory work for the operation, such as dispatching the work to the requested regions, before actually creating the first stacks.

", + "StackSetOperation$EndTimestamp": "

The time at which the stack set operation ended, across all accounts and regions specified. Note that this doesn't necessarily mean that the stack set operation was successful, or even attempted, in each account or region.

", + "StackSetOperationSummary$CreationTimestamp": "

The time at which the operation was initiated. Note that the creation times for the stack set operation might differ from the creation time of the individual stacks themselves. This is because AWS CloudFormation needs to perform preparatory work for the operation, such as dispatching the work to the requested regions, before actually creating the first stacks.

", + "StackSetOperationSummary$EndTimestamp": "

The time at which the stack set operation ended, across all accounts and regions specified. Note that this doesn't necessarily mean that the stack set operation was successful, or even attempted, in each account or region.

" } }, "TokenAlreadyExistsException": { @@ -1144,6 +1626,16 @@ "refs": { } }, + "UpdateStackSetInput": { + "base": null, + "refs": { + } + }, + "UpdateStackSetOutput": { + "base": null, + "refs": { + } + }, "Url": { "base": null, "refs": { @@ -1154,7 +1646,8 @@ "base": null, "refs": { "CreateChangeSetInput$UsePreviousTemplate": "

Whether to reuse the template that is associated with the stack to create the change set.

", - "UpdateStackInput$UsePreviousTemplate": "

Reuse the existing template that is associated with the stack that you are updating.

Conditional: You must specify only one of the following parameters: TemplateBody, TemplateURL, or set the UsePreviousTemplate to true.

" + "UpdateStackInput$UsePreviousTemplate": "

Reuse the existing template that is associated with the stack that you are updating.

Conditional: You must specify only one of the following parameters: TemplateBody, TemplateURL, or set the UsePreviousTemplate to true.

", + "UpdateStackSetInput$UsePreviousTemplate": "

Use the existing template that's associated with the stack set that you're updating.

Conditional: You must specify only one of the following parameters: TemplateBody or TemplateURL—or set UsePreviousTemplate to true.

" } }, "UsePreviousValue": { diff --git a/cli/vendor/github.com/aws/aws-sdk-go/models/apis/ec2/2016-11-15/api-2.json b/cli/vendor/github.com/aws/aws-sdk-go/models/apis/ec2/2016-11-15/api-2.json index c0bb8653b..bf5bddbfb 100755 --- a/cli/vendor/github.com/aws/aws-sdk-go/models/apis/ec2/2016-11-15/api-2.json +++ b/cli/vendor/github.com/aws/aws-sdk-go/models/apis/ec2/2016-11-15/api-2.json @@ -13894,6 +13894,10 @@ "WeightedCapacity":{ "shape":"Double", "locationName":"weightedCapacity" + }, + "TagSpecifications":{ + "shape":"SpotFleetTagSpecificationList", + "locationName":"tagSpecificationSet" } } }, @@ -14007,6 +14011,26 @@ "locationName":"item" } }, + "SpotFleetTagSpecification":{ + "type":"structure", + "members":{ + "ResourceType":{ + "shape":"ResourceType", + "locationName":"resourceType" + }, + "Tags":{ + "shape":"TagList", + "locationName":"tag" + } + } + }, + "SpotFleetTagSpecificationList":{ + "type":"list", + "member":{ + "shape":"SpotFleetTagSpecification", + "locationName":"item" + } + }, "SpotInstanceRequest":{ "type":"structure", "members":{ diff --git a/cli/vendor/github.com/aws/aws-sdk-go/models/apis/ec2/2016-11-15/docs-2.json b/cli/vendor/github.com/aws/aws-sdk-go/models/apis/ec2/2016-11-15/docs-2.json index fa7d8c980..866be2a3d 100755 --- a/cli/vendor/github.com/aws/aws-sdk-go/models/apis/ec2/2016-11-15/docs-2.json +++ b/cli/vendor/github.com/aws/aws-sdk-go/models/apis/ec2/2016-11-15/docs-2.json @@ -1309,12 +1309,12 @@ } }, "CreateNetworkInterfacePermissionRequest": { - "base": null, + "base": "

Contains the parameters for CreateNetworkInterfacePermission.

", "refs": { } }, "CreateNetworkInterfacePermissionResult": { - "base": null, + "base": "

Contains the output of CreateNetworkInterfacePermission.

", "refs": { } }, @@ -1660,12 +1660,12 @@ } }, "DeleteNetworkInterfacePermissionRequest": { - "base": null, + "base": "

Contains the parameters for DeleteNetworkInterfacePermission.

", "refs": { } }, "DeleteNetworkInterfacePermissionResult": { - "base": null, + "base": "

Contains the output for DeleteNetworkInterfacePermission.

", "refs": { } }, @@ -2071,12 +2071,12 @@ } }, "DescribeNetworkInterfacePermissionsRequest": { - "base": null, + "base": "

Contains the parameters for DescribeNetworkInterfacePermissions.

", "refs": { } }, "DescribeNetworkInterfacePermissionsResult": { - "base": null, + "base": "

Contains the output for DescribeNetworkInterfacePermissions.

", "refs": { } }, @@ -5063,6 +5063,7 @@ "ResourceType": { "base": null, "refs": { + "SpotFleetTagSpecification$ResourceType": "

The type of resource. Currently, the only resource type that is supported is instance.

", "TagDescription$ResourceType": "

The resource type.

", "TagSpecification$ResourceType": "

The type of resource to tag. Currently, the resource types that support tagging on creation are instance and volume.

" } @@ -5473,6 +5474,18 @@ "DescribeSpotFleetRequestsResponse$SpotFleetRequestConfigs": "

Information about the configuration of your Spot fleet.

" } }, + "SpotFleetTagSpecification": { + "base": "

The tags for a Spot fleet resource.

", + "refs": { + "SpotFleetTagSpecificationList$member": null + } + }, + "SpotFleetTagSpecificationList": { + "base": null, + "refs": { + "SpotFleetLaunchSpecification$TagSpecifications": "

The tags to apply during creation.

" + } + }, "SpotInstanceRequest": { "base": "

Describes a Spot instance request.

", "refs": { @@ -6739,6 +6752,7 @@ "RouteTable$Tags": "

Any tags assigned to the route table.

", "SecurityGroup$Tags": "

Any tags assigned to the security group.

", "Snapshot$Tags": "

Any tags assigned to the snapshot.

", + "SpotFleetTagSpecification$Tags": "

The tags.

", "SpotInstanceRequest$Tags": "

Any tags assigned to the resource.

", "Subnet$Tags": "

Any tags assigned to the subnet.

", "TagSpecification$Tags": "

The tags to apply to the resource.

", diff --git a/cli/vendor/github.com/aws/aws-sdk-go/models/apis/elasticmapreduce/2009-03-31/api-2.json b/cli/vendor/github.com/aws/aws-sdk-go/models/apis/elasticmapreduce/2009-03-31/api-2.json index d8e03048f..6ef25d6d6 100644 --- a/cli/vendor/github.com/aws/aws-sdk-go/models/apis/elasticmapreduce/2009-03-31/api-2.json +++ b/cli/vendor/github.com/aws/aws-sdk-go/models/apis/elasticmapreduce/2009-03-31/api-2.json @@ -599,7 +599,10 @@ "Configurations":{"shape":"ConfigurationList"}, "SecurityConfiguration":{"shape":"XmlString"}, "AutoScalingRole":{"shape":"XmlString"}, - "ScaleDownBehavior":{"shape":"ScaleDownBehavior"} + "ScaleDownBehavior":{"shape":"ScaleDownBehavior"}, + "CustomAmiId":{"shape":"XmlStringMaxLen256"}, + "EbsRootVolumeSize":{"shape":"Integer"}, + "RepoUpgradeOnBoot":{"shape":"RepoUpgradeOnBoot"} } }, "ClusterId":{"type":"string"}, @@ -628,6 +631,7 @@ "INTERNAL_ERROR", "VALIDATION_ERROR", "INSTANCE_FAILURE", + "INSTANCE_FLEET_TIMEOUT", "BOOTSTRAP_FAILURE", "USER_REQUEST", "STEP_FAILURE", @@ -1639,6 +1643,13 @@ "members":{ } }, + "RepoUpgradeOnBoot":{ + "type":"string", + "enum":[ + "SECURITY", + "NONE" + ] + }, "ResourceId":{"type":"string"}, "RunJobFlowInput":{ "type":"structure", @@ -1665,7 +1676,10 @@ "Tags":{"shape":"TagList"}, "SecurityConfiguration":{"shape":"XmlString"}, "AutoScalingRole":{"shape":"XmlString"}, - "ScaleDownBehavior":{"shape":"ScaleDownBehavior"} + "ScaleDownBehavior":{"shape":"ScaleDownBehavior"}, + "CustomAmiId":{"shape":"XmlStringMaxLen256"}, + "EbsRootVolumeSize":{"shape":"Integer"}, + "RepoUpgradeOnBoot":{"shape":"RepoUpgradeOnBoot"} } }, "RunJobFlowOutput":{ diff --git a/cli/vendor/github.com/aws/aws-sdk-go/models/apis/elasticmapreduce/2009-03-31/docs-2.json b/cli/vendor/github.com/aws/aws-sdk-go/models/apis/elasticmapreduce/2009-03-31/docs-2.json index 7fbc4ebda..061c3fbee 100644 --- a/cli/vendor/github.com/aws/aws-sdk-go/models/apis/elasticmapreduce/2009-03-31/docs-2.json +++ b/cli/vendor/github.com/aws/aws-sdk-go/models/apis/elasticmapreduce/2009-03-31/docs-2.json @@ -17,7 +17,7 @@ "ListClusters": "

Provides the status of all clusters visible to this AWS account. Allows you to filter the list of clusters based on certain criteria; for example, filtering by cluster creation date and time or by status. This call returns a maximum of 50 clusters per call, but returns a marker to track the paging of the cluster list across multiple ListClusters calls.

", "ListInstanceFleets": "

Lists all available details about the instance fleets in a cluster.

The instance fleet configuration is available only in Amazon EMR versions 4.8.0 and later, excluding 5.0.x versions.

", "ListInstanceGroups": "

Provides all available details about the instance groups in a cluster.

", - "ListInstances": "

Provides information about the cluster instances that Amazon EMR provisions on behalf of a user when it creates the cluster. For example, this operation indicates when the EC2 instances reach the Ready state, when instances become available to Amazon EMR to use for jobs, and the IP addresses for cluster instances, etc.

", + "ListInstances": "

Provides information for all active EC2 instances and EC2 instances terminated in the last 30 days, up to a maximum of 2,000. EC2 instances in any of the following states are considered active: AWAITING_FULFILLMENT, PROVISIONING, BOOTSTRAPPING, RUNNING.

", "ListSecurityConfigurations": "

Lists all the security configurations visible to this account, providing their creation dates and times, and their names. This call returns a maximum of 50 clusters per call, but returns a marker to track the paging of the cluster list across multiple ListSecurityConfigurations calls.

", "ListSteps": "

Provides a list of steps for the cluster in reverse order unless you specify stepIds with the request.

", "ModifyInstanceFleet": "

Modifies the target On-Demand and target Spot capacities for the instance fleet with the specified InstanceFleetID within the cluster specified using ClusterID. The call either succeeds or fails atomically.

The instance fleet configuration is available only in Amazon EMR versions 4.8.0 and later, excluding 5.0.x versions.

", @@ -86,7 +86,7 @@ } }, "Application": { - "base": "

An application is any Amazon or third-party software that you can add to the cluster. This structure contains a list of strings that indicates the software to use with the cluster and accepts a user argument list. Amazon EMR accepts and forwards the argument list to the corresponding installation script as bootstrap action argument. For more information, see Using the MapR Distribution for Hadoop. Currently supported values are:

In Amazon EMR releases 4.0 and greater, the only accepted parameter is the application name. To pass arguments to applications, you supply a configuration for each application.

", + "base": "

An application is any Amazon or third-party software that you can add to the cluster. This structure contains a list of strings that indicates the software to use with the cluster and accepts a user argument list. Amazon EMR accepts and forwards the argument list to the corresponding installation script as bootstrap action argument. For more information, see Using the MapR Distribution for Hadoop. Currently supported values are:

In Amazon EMR releases 4.x and later, the only accepted parameter is the application name. To pass arguments to applications, you supply a configuration for each application.

", "refs": { "ApplicationList$member": null } @@ -95,7 +95,7 @@ "base": null, "refs": { "Cluster$Applications": "

The applications installed on this cluster.

", - "RunJobFlowInput$Applications": "

Amazon EMR releases 4.x or later.

A list of applications for the cluster. Valid values are: \"Hadoop\", \"Hive\", \"Mahout\", \"Pig\", and \"Spark.\" They are case insensitive.

" + "RunJobFlowInput$Applications": "

For Amazon EMR releases 4.0 and later. A list of applications for the cluster. Valid values are: \"Hadoop\", \"Hive\", \"Mahout\", \"Pig\", and \"Spark.\" They are case insensitive.

" } }, "AutoScalingPolicy": { @@ -321,13 +321,13 @@ "ConfigurationList": { "base": null, "refs": { - "Cluster$Configurations": "

Amazon EMR releases 4.x or later.

The list of Configurations supplied to the EMR cluster.

", + "Cluster$Configurations": "

Applies only to Amazon EMR releases 4.x and later. The list of Configurations supplied to the EMR cluster.

", "Configuration$Configurations": "

A list of additional configurations to apply within a configuration object.

", "InstanceGroup$Configurations": "

Amazon EMR releases 4.x or later.

The list of configurations supplied for an EMR cluster instance group. You can specify a separate configuration for each instance group (master, core, and task).

", "InstanceGroupConfig$Configurations": "

Amazon EMR releases 4.x or later.

The list of configurations supplied for an EMR cluster instance group. You can specify a separate configuration for each instance group (master, core, and task).

", "InstanceTypeConfig$Configurations": "

A configuration classification that applies when provisioning cluster instances, which can include configurations for applications and software that run on the cluster.

", "InstanceTypeSpecification$Configurations": "

A configuration classification that applies when provisioning cluster instances, which can include configurations for applications and software bundled with Amazon EMR.

", - "RunJobFlowInput$Configurations": "

Amazon EMR releases 4.x or later.

The list of configurations supplied for the EMR cluster you are creating.

" + "RunJobFlowInput$Configurations": "

For Amazon EMR releases 4.0 and later. The list of configurations supplied for the EMR cluster you are creating.

" } }, "CreateSecurityConfigurationInput": { @@ -840,6 +840,7 @@ "CloudWatchAlarmDefinition$EvaluationPeriods": "

The number of periods, expressed in seconds using Period, during which the alarm condition must exist before the alarm triggers automatic scaling activity. The default value is 1.

", "CloudWatchAlarmDefinition$Period": "

The period, in seconds, over which the statistic is applied. EMR CloudWatch metrics are emitted every five minutes (300 seconds), so if an EMR CloudWatch metric is specified, specify 300.

", "Cluster$NormalizedInstanceHours": "

An approximation of the cost of the cluster, represented in m1.small/hours. This value is incremented one time for every hour an m1.small instance runs. Larger instances are weighted more, so an EC2 instance that is roughly four times more expensive would result in the normalized instance hours being incremented by four. This result is only an approximation and does not reflect the actual billing rate.

", + "Cluster$EbsRootVolumeSize": "

The size, in GiB, of the EBS root device volume of the Linux AMI that is used for each EC2 instance. Available in Amazon EMR version 4.x and later.

", "ClusterSummary$NormalizedInstanceHours": "

An approximation of the cost of the cluster, represented in m1.small/hours. This value is incremented one time for every hour an m1.small instance runs. Larger instances are weighted more, so an EC2 instance that is roughly four times more expensive would result in the normalized instance hours being incremented by four. This result is only an approximation and does not reflect the actual billing rate.

", "EbsBlockDeviceConfig$VolumesPerInstance": "

Number of EBS volumes with a specific volume configuration that will be associated with every instance in the instance group

", "InstanceGroup$RequestedInstanceCount": "

The target number of instances for the instance group.

", @@ -852,6 +853,7 @@ "JobFlowInstancesConfig$InstanceCount": "

The number of EC2 instances in the cluster.

", "JobFlowInstancesDetail$InstanceCount": "

The number of Amazon EC2 instances in the cluster. If the value is 1, the same instance serves as both the master and slave node. If the value is greater than 1, one instance is the master node and all others are slave nodes.

", "JobFlowInstancesDetail$NormalizedInstanceHours": "

An approximation of the cost of the cluster, represented in m1.small/hours. This value is incremented one time for every hour that an m1.small runs. Larger instances are weighted more, so an Amazon EC2 instance that is roughly four times more expensive would result in the normalized instance hours being incremented by four. This result is only an approximation and does not reflect the actual billing rate.

", + "RunJobFlowInput$EbsRootVolumeSize": "

The size, in GiB, of the EBS root device volume of the Linux AMI that is used for each EC2 instance. Available in Amazon EMR version 4.x and later.

", "ScalingConstraints$MinCapacity": "

The lower boundary of EC2 instances in an instance group below which scaling activities are not allowed to shrink. Scale-in activities will not terminate instances below this boundary.

", "ScalingConstraints$MaxCapacity": "

The upper boundary of EC2 instances in an instance group beyond which scaling activities are not allowed to grow. Scale-out activities will not add instances beyond this boundary.

", "ShrinkPolicy$DecommissionTimeout": "

The desired timeout for decommissioning an instance. Overrides the default YARN decommissioning timeout.

", @@ -1055,14 +1057,14 @@ "NewSupportedProductsList": { "base": null, "refs": { - "RunJobFlowInput$NewSupportedProducts": "

For Amazon EMR releases 3.x and 2.x. For Amazon EMR releases 4.x and greater, use Applications.

A list of strings that indicates third-party software to use with the job flow that accepts a user argument list. EMR accepts and forwards the argument list to the corresponding installation script as bootstrap action arguments. For more information, see \"Launch a Job Flow on the MapR Distribution for Hadoop\" in the Amazon EMR Developer Guide. Supported values are:

" + "RunJobFlowInput$NewSupportedProducts": "

For Amazon EMR releases 3.x and 2.x. For Amazon EMR releases 4.x and later, use Applications.

A list of strings that indicates third-party software to use with the job flow that accepts a user argument list. EMR accepts and forwards the argument list to the corresponding installation script as bootstrap action arguments. For more information, see \"Launch a Job Flow on the MapR Distribution for Hadoop\" in the Amazon EMR Developer Guide. Supported values are:

" } }, "NonNegativeDouble": { "base": null, "refs": { "CloudWatchAlarmDefinition$Threshold": "

The value against which the specified statistic is compared.

", - "InstanceTypeConfig$BidPriceAsPercentageOfOnDemandPrice": "

The bid price, as a percentage of On-Demand price, for each EC2 Spot instance as defined by InstanceType. Expressed as a number between 0 and 1000 (for example, 20 specifies 20%). If neither BidPrice nor BidPriceAsPercentageOfOnDemandPrice is provided, BidPriceAsPercentageOfOnDemandPrice defaults to 100%.

", + "InstanceTypeConfig$BidPriceAsPercentageOfOnDemandPrice": "

The bid price, as a percentage of On-Demand price, for each EC2 Spot instance as defined by InstanceType. Expressed as a number (for example, 20 specifies 20%). If neither BidPrice nor BidPriceAsPercentageOfOnDemandPrice is provided, BidPriceAsPercentageOfOnDemandPrice defaults to 100%.

", "InstanceTypeSpecification$BidPriceAsPercentageOfOnDemandPrice": "

The bid price, as a percentage of On-Demand price, for each EC2 Spot instance as defined by InstanceType. Expressed as a number (for example, 20 specifies 20%).

" } }, @@ -1103,6 +1105,13 @@ "refs": { } }, + "RepoUpgradeOnBoot": { + "base": null, + "refs": { + "Cluster$RepoUpgradeOnBoot": "

Applies only when CustomAmiID is used. Specifies the type of updates that are applied from the Amazon Linux AMI package repositories when an instance boots using the AMI.

", + "RunJobFlowInput$RepoUpgradeOnBoot": "

Applies only when CustomAmiID is used. Specifies which updates from the Amazon Linux AMI package repositories to apply automatically when the instance boots using the AMI. If omitted, the default is SECURITY, which indicates that only security updates are applied. If NONE is specified, no updates are applied, and all updates must be applied manually.

" + } + }, "ResourceId": { "base": null, "refs": { @@ -1217,7 +1226,7 @@ "SpotProvisioningTimeoutAction": { "base": null, "refs": { - "SpotProvisioningSpecification$TimeoutAction": "

The action to take when TargetSpotCapacity has not been fulfilled when the TimeoutDurationMinutes has expired. Spot instances are not uprovisioned within the Spot provisioining timeout. Valid values are TERMINATE_CLUSTER and SWITCH_TO_ON_DEMAND to fulfill the remaining capacity.

" + "SpotProvisioningSpecification$TimeoutAction": "

The action to take when TargetSpotCapacity has not been fulfilled when the TimeoutDurationMinutes has expired. Spot instances are not uprovisioned within the Spot provisioining timeout. Valid values are TERMINATE_CLUSTER and SWITCH_TO_ON_DEMAND. SWITCH_TO_ON_DEMAND specifies that if no Spot instances are available, On-Demand Instances should be provisioned to fulfill any remaining Spot capacity.

" } }, "Statistic": { @@ -1349,7 +1358,7 @@ "Cluster$LogUri": "

The path to the Amazon S3 location where logs for this cluster are stored.

", "Cluster$RequestedAmiVersion": "

The AMI version requested for this cluster.

", "Cluster$RunningAmiVersion": "

The AMI version running on this cluster.

", - "Cluster$ReleaseLabel": "

The release label for the Amazon EMR release. For Amazon EMR 3.x and 2.x AMIs, use amiVersion instead instead of ReleaseLabel.

", + "Cluster$ReleaseLabel": "

The release label for the Amazon EMR release.

", "Cluster$ServiceRole": "

The IAM role that will be assumed by the Amazon EMR service to access AWS resources on your behalf.

", "Cluster$MasterPublicDnsName": "

The public DNS name of the master EC2 instance.

", "ClusterStateChangeReason$Message": "

The descriptive message for the state change reason.

", @@ -1428,7 +1437,7 @@ "base": null, "refs": { "JobFlowDetail$SupportedProducts": "

A list of strings set by third party software when the job flow is launched. If you are not using third party software to manage the job flow this value is empty.

", - "RunJobFlowInput$SupportedProducts": "

For Amazon EMR releases 3.x and 2.x. For Amazon EMR releases 4.x and greater, use Applications.

A list of strings that indicates third-party software to use. For more information, see Use Third Party Applications with Amazon EMR. Currently supported values are:

" + "RunJobFlowInput$SupportedProducts": "

For Amazon EMR releases 3.x and 2.x. For Amazon EMR releases 4.x and later, use Applications.

A list of strings that indicates third-party software to use. For more information, see Use Third Party Applications with Amazon EMR. Currently supported values are:

" } }, "Tag": { @@ -1474,7 +1483,7 @@ "InstanceFleetConfig$TargetSpotCapacity": "

The target capacity of Spot units for the instance fleet, which determines how many Spot instances to provision. When the instance fleet launches, Amazon EMR tries to provision Spot instances as specified by InstanceTypeConfig. Each instance configuration has a specified WeightedCapacity. When a Spot instance is provisioned, the WeightedCapacity units count toward the target capacity. Amazon EMR provisions instances until the target capacity is totally fulfilled, even if this results in an overage. For example, if there are 2 units remaining to fulfill capacity, and Amazon EMR can only provision an instance with a WeightedCapacity of 5 units, the instance is provisioned, and the target capacity is exceeded by 3 units.

If not specified or set to 0, only On-Demand instances are provisioned for the instance fleet. At least one of TargetSpotCapacity and TargetOnDemandCapacity should be greater than 0. For a master instance fleet, only one of TargetSpotCapacity and TargetOnDemandCapacity can be specified, and its value must be 1.

", "InstanceFleetModifyConfig$TargetOnDemandCapacity": "

The target capacity of On-Demand units for the instance fleet. For more information see InstanceFleetConfig$TargetOnDemandCapacity.

", "InstanceFleetModifyConfig$TargetSpotCapacity": "

The target capacity of Spot units for the instance fleet. For more information, see InstanceFleetConfig$TargetSpotCapacity.

", - "InstanceTypeConfig$WeightedCapacity": "

The number of units that a provisioned instance of this type provides toward fulfilling the target capacities defined in InstanceFleetConfig. This value is 1 for a master instance fleet, and must be greater than 0 for core and task instance fleets.

", + "InstanceTypeConfig$WeightedCapacity": "

The number of units that a provisioned instance of this type provides toward fulfilling the target capacities defined in InstanceFleetConfig. This value is 1 for a master instance fleet, and must be 1 or greater for core and task instance fleets. Defaults to 1 if not specified.

", "InstanceTypeSpecification$WeightedCapacity": "

The number of units that a provisioned instance of this type provides toward fulfilling the target capacities defined in InstanceFleetConfig. Capacity values represent performance characteristics such as vCPUs, memory, or I/O. If not specified, the default value is 1.

", "SpotProvisioningSpecification$TimeoutDurationMinutes": "

The spot provisioning timeout period in minutes. If Spot instances are not provisioned within this time period, the TimeOutAction is taken. Minimum value is 5 and maximum value is 1440. The timeout applies only during initial provisioning, when the cluster is first created.

", "SpotProvisioningSpecification$BlockDurationMinutes": "

The defined duration for Spot instances (also known as Spot blocks) in minutes. When specified, the Spot instance does not terminate before the defined duration expires, and defined duration pricing for Spot instances applies. Valid values are 60, 120, 180, 240, 300, or 360. The duration period starts as soon as a Spot instance receives its instance ID. At the end of the duration, Amazon EC2 marks the Spot instance for termination and provides a Spot instance termination notice, which gives the instance a two-minute warning before it terminates.

" @@ -1538,6 +1547,7 @@ "AddJobFlowStepsInput$JobFlowId": "

A string that uniquely identifies the job flow. This identifier is returned by RunJobFlow and can also be obtained from ListClusters.

", "BootstrapActionConfig$Name": "

The name of the bootstrap action.

", "CancelStepsInput$ClusterId": "

The ClusterID for which specified steps will be canceled. Use RunJobFlow and ListClusters to get ClusterIDs.

", + "Cluster$CustomAmiId": "

Available only in Amazon EMR version 5.7.0 and later. The ID of a custom Amazon EBS-backed Linux AMI if the cluster uses a custom AMI.

", "InstanceFleet$Name": "

A friendly name for the instance fleet.

", "InstanceFleetConfig$Name": "

The friendly name of the instance fleet.

", "InstanceGroupConfig$Name": "

Friendly name given to the instance group.

", @@ -1551,7 +1561,7 @@ "InstanceTypeSpecification$BidPrice": "

The bid price for each EC2 Spot instance type as defined by InstanceType. Expressed in USD.

", "JobFlowDetail$JobFlowId": "

The job flow identifier.

", "JobFlowDetail$Name": "

The name of the job flow.

", - "JobFlowDetail$AmiVersion": "

The version of the AMI used to initialize Amazon EC2 instances in the job flow. For a list of AMI versions currently supported by Amazon EMR, see AMI Versions Supported in EMR in the Amazon EMR Developer Guide.

", + "JobFlowDetail$AmiVersion": "

Used only for version 2.x and 3.x of Amazon EMR. The version of the AMI used to initialize Amazon EC2 instances in the job flow. For a list of AMI versions supported by Amazon EMR, see AMI Versions Supported in EMR in the Amazon EMR Developer Guide.

", "JobFlowInstancesConfig$Ec2KeyName": "

The name of the EC2 key pair that can be used to ssh to the master node as the user called \"hadoop.\"

", "JobFlowInstancesConfig$HadoopVersion": "

The Hadoop version for the cluster. Valid inputs are \"0.18\" (deprecated), \"0.20\" (deprecated), \"0.20.205\" (deprecated), \"1.0.3\", \"2.2.0\", or \"2.4.0\". If you do not set this value, the default of 0.18 is used, unless the AmiVersion parameter is set in the RunJobFlow call, in which case the default version of Hadoop for that AMI version is used.

", "JobFlowInstancesConfig$Ec2SubnetId": "

Applies to clusters that use the uniform instance group configuration. To launch the cluster in Amazon Virtual Private Cloud (Amazon VPC), set this parameter to the identifier of the Amazon VPC subnet where you want the cluster to launch. If you do not specify this value, the cluster launches in the normal Amazon Web Services cloud, outside of an Amazon VPC, if the account launching the cluster supports EC2 Classic networks in the region where the cluster launches.

Amazon VPC currently does not support cluster compute quadruple extra large (cc1.4xlarge) instances. Thus you cannot specify the cc1.4xlarge instance type for clusters launched in an Amazon VPC.

", @@ -1562,8 +1572,9 @@ "JobFlowInstancesDetail$Ec2SubnetId": "

For clusters launched within Amazon Virtual Private Cloud, this is the identifier of the subnet where the cluster was launched.

", "JobFlowInstancesDetail$HadoopVersion": "

The Hadoop version for the cluster.

", "RunJobFlowInput$Name": "

The name of the job flow.

", - "RunJobFlowInput$AmiVersion": "

For Amazon EMR releases 3.x and 2.x. For Amazon EMR releases 4.x and greater, use ReleaseLabel.

The version of the Amazon Machine Image (AMI) to use when launching Amazon EC2 instances in the job flow. The following values are valid:

If the AMI supports multiple versions of Hadoop (for example, AMI 1.0 supports both Hadoop 0.18 and 0.20) you can use the JobFlowInstancesConfig HadoopVersion parameter to modify the version of Hadoop from the defaults shown above.

For details about the AMI versions currently supported by Amazon Elastic MapReduce, see AMI Versions Supported in Elastic MapReduce in the Amazon Elastic MapReduce Developer Guide.

Previously, the EMR AMI version API parameter options allowed you to use latest for the latest AMI version rather than specify a numerical value. Some regions no longer support this deprecated option as they only have a newer release label version of EMR, which requires you to specify an EMR release label release (EMR 4.x or later).

", - "RunJobFlowInput$ReleaseLabel": "

Amazon EMR releases 4.x or later.

The release label for the Amazon EMR release. For Amazon EMR 3.x and 2.x AMIs, use amiVersion instead instead of ReleaseLabel.

", + "RunJobFlowInput$AmiVersion": "

For Amazon EMR AMI versions 3.x and 2.x. For Amazon EMR releases 4.0 and later, the Linux AMI is determined by the ReleaseLabel specified or by CustomAmiID. The version of the Amazon Machine Image (AMI) to use when launching Amazon EC2 instances in the job flow. For details about the AMI versions currently supported in EMR version 3.x and 2.x, see AMI Versions Supported in EMR in the Amazon EMR Developer Guide.

If the AMI supports multiple versions of Hadoop (for example, AMI 1.0 supports both Hadoop 0.18 and 0.20), you can use the JobFlowInstancesConfig HadoopVersion parameter to modify the version of Hadoop from the defaults shown above.

Previously, the EMR AMI version API parameter options allowed you to use latest for the latest AMI version rather than specify a numerical value. Some regions no longer support this deprecated option as they only have a newer release label version of EMR, which requires you to specify an EMR release label release (EMR 4.x or later).

", + "RunJobFlowInput$ReleaseLabel": "

The release label for the Amazon EMR release. For Amazon EMR 3.x and 2.x AMIs, use AmiVersion instead.

", + "RunJobFlowInput$CustomAmiId": "

Available only in Amazon EMR version 5.7.0 and later. The ID of a custom Amazon EBS-backed Linux AMI. If specified, Amazon EMR uses this AMI when it launches cluster EC2 instances. For more information about custom AMIs in Amazon EMR, see Using a Custom AMI in the Amazon EMR Management Guide. If omitted, the cluster uses the base Linux AMI for the ReleaseLabel specified. For Amazon EMR versions 2.x and 3.x, use AmiVersion instead.

For information about creating a custom AMI, see Creating an Amazon EBS-Backed Linux AMI in the Amazon Elastic Compute Cloud User Guide for Linux Instances. For information about finding an AMI ID, see Finding a Linux AMI.

", "RunJobFlowOutput$JobFlowId": "

An unique identifier for the job flow.

", "SecurityGroupsList$member": null, "StepConfig$Name": "

The name of the step.

", @@ -1576,8 +1587,8 @@ "XmlStringMaxLen256List": { "base": null, "refs": { - "Ec2InstanceAttributes$RequestedEc2SubnetIds": "

Applies to clusters configured with the instance fleets option. Specifies the unique identifier of one or more Amazon EC2 subnets in which to launch EC2 cluster instances. Amazon EMR chooses the EC2 subnet with the best performance and cost characteristics from among the list of RequestedEc2SubnetIds and launches all cluster instances within that subnet. If this value is not specified, and the account supports EC2-Classic networks, the cluster launches instances in the EC2-Classic network and uses Requested

", - "Ec2InstanceAttributes$RequestedEc2AvailabilityZones": "

Applies to clusters configured with the The list of availability zones to choose from. The service will choose the availability zone with the best mix of available capacity and lowest cost to launch the cluster. If you do not specify this value, the cluster is launched in any availability zone that the customer account has access to.

", + "Ec2InstanceAttributes$RequestedEc2SubnetIds": "

Applies to clusters configured with the instance fleets option. Specifies the unique identifier of one or more Amazon EC2 subnets in which to launch EC2 cluster instances. Subnets must exist within the same VPC. Amazon EMR chooses the EC2 subnet with the best fit from among the list of RequestedEc2SubnetIds, and then launches all cluster instances within that Subnet. If this value is not specified, and the account and region support EC2-Classic networks, the cluster launches instances in the EC2-Classic network and uses RequestedEc2AvailabilityZones instead of this setting. If EC2-Classic is not supported, and no Subnet is specified, Amazon EMR chooses the subnet for you. RequestedEc2SubnetIDs and RequestedEc2AvailabilityZones cannot be specified together.

", + "Ec2InstanceAttributes$RequestedEc2AvailabilityZones": "

Applies to clusters configured with the instance fleets option. Specifies one or more Availability Zones in which to launch EC2 cluster instances when the EC2-Classic network configuration is supported. Amazon EMR chooses the Availability Zone with the best fit from among the list of RequestedEc2AvailabilityZones, and then launches all cluster instances within that Availability Zone. If you do not specify this value, Amazon EMR chooses the Availability Zone for you. RequestedEc2SubnetIDs and RequestedEc2AvailabilityZones cannot be specified together.

", "JobFlowInstancesConfig$Ec2SubnetIds": "

Applies to clusters that use the instance fleet configuration. When multiple EC2 subnet IDs are specified, Amazon EMR evaluates them and launches instances in the optimal subnet.

The instance fleet configuration is available only in Amazon EMR versions 4.8.0 and later, excluding 5.0.x versions.

", "PlacementType$AvailabilityZones": "

When multiple Availability Zones are specified, Amazon EMR evaluates them and launches instances in the optimal Availability Zone. AvailabilityZones is used for instance fleets, while AvailabilityZone (singular) is used for uniform instance groups.

The instance fleet configuration is available only in Amazon EMR versions 4.8.0 and later, excluding 5.0.x versions.

" } diff --git a/cli/vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/build_bench_test.go b/cli/vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/build_bench_test.go index 081716739..2736446a7 100644 --- a/cli/vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/build_bench_test.go +++ b/cli/vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/build_bench_test.go @@ -3,47 +3,115 @@ package restxml_test import ( + "net/http" + "net/http/httptest" + "os" "testing" "bytes" "encoding/xml" + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/endpoints" "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/awstesting" + "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/private/protocol/restxml" "github.com/aws/aws-sdk-go/service/cloudfront" + "github.com/aws/aws-sdk-go/service/s3" ) -func BenchmarkRESTXMLBuild_Complex_cloudfrontCreateDistribution(b *testing.B) { - params := restxmlBuildCreateDistroParms +var ( + cloudfrontSvc *cloudfront.CloudFront + s3Svc *s3.S3 +) - op := &request.Operation{ - Name: "CreateDistribution", - HTTPMethod: "POST", - HTTPPath: "/2015-04-17/distribution/{DistributionId}/invalidation", - } +func TestMain(m *testing.M) { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + })) - benchRESTXMLBuild(b, op, params) + sess := session.Must(session.NewSession(&aws.Config{ + Credentials: credentials.NewStaticCredentials("Key", "Secret", "Token"), + Endpoint: aws.String(server.URL), + S3ForcePathStyle: aws.Bool(true), + DisableSSL: aws.Bool(true), + Region: aws.String(endpoints.UsWest2RegionID), + })) + cloudfrontSvc = cloudfront.New(sess) + s3Svc = s3.New(sess) + + c := m.Run() + server.Close() + os.Exit(c) } -func BenchmarkRESTXMLBuild_Simple_cloudfrontDeleteStreamingDistribution(b *testing.B) { - params := &cloudfront.DeleteDistributionInput{ - Id: aws.String("string"), // Required - IfMatch: aws.String("string"), - } - op := &request.Operation{ - Name: "DeleteStreamingDistribution", - HTTPMethod: "DELETE", - HTTPPath: "/2015-04-17/streaming-distribution/{Id}", - } - benchRESTXMLBuild(b, op, params) +func BenchmarkRESTXMLBuild_Complex_CFCreateDistro(b *testing.B) { + params := cloudfrontCreateDistributionInput() + + benchRESTXMLBuild(b, func() *request.Request { + req, _ := cloudfrontSvc.CreateDistributionRequest(params) + return req + }) } -func BenchmarkEncodingXMLMarshal_Simple_cloudfrontDeleteStreamingDistribution(b *testing.B) { - params := &cloudfront.DeleteDistributionInput{ - Id: aws.String("string"), // Required - IfMatch: aws.String("string"), - } +func BenchmarkRESTXMLRequest_Complex_CFCreateDistro(b *testing.B) { + benchRESTXMLRequest(b, func() *request.Request { + req, _ := cloudfrontSvc.CreateDistributionRequest(cloudfrontCreateDistributionInput()) + return req + }) +} + +func BenchmarkRESTXMLBuild_Simple_CFDeleteDistro(b *testing.B) { + params := cloudfrontDeleteDistributionInput() + + benchRESTXMLBuild(b, func() *request.Request { + req, _ := cloudfrontSvc.DeleteDistributionRequest(params) + return req + }) +} + +func BenchmarkRESTXMLRequest_Simple_CFDeleteDistro(b *testing.B) { + benchRESTXMLRequest(b, func() *request.Request { + req, _ := cloudfrontSvc.DeleteDistributionRequest(cloudfrontDeleteDistributionInput()) + return req + }) +} + +func BenchmarkRESTXMLBuild_REST_S3HeadObject(b *testing.B) { + params := s3HeadObjectInput() + + benchRESTXMLBuild(b, func() *request.Request { + req, _ := s3Svc.HeadObjectRequest(params) + return req + }) +} + +func BenchmarkRESTXMLRequest_REST_S3HeadObject(b *testing.B) { + benchRESTXMLRequest(b, func() *request.Request { + req, _ := s3Svc.HeadObjectRequest(s3HeadObjectInput()) + return req + }) +} + +func BenchmarkRESTXMLBuild_XML_S3PutObjectAcl(b *testing.B) { + params := s3PutObjectAclInput() + + benchRESTXMLBuild(b, func() *request.Request { + req, _ := s3Svc.PutObjectAclRequest(params) + return req + }) +} + +func BenchmarkRESTXMLRequest_XML_S3PutObjectAcl(b *testing.B) { + benchRESTXMLRequest(b, func() *request.Request { + req, _ := s3Svc.PutObjectAclRequest(s3PutObjectAclInput()) + return req + }) +} + +func BenchmarkEncodingXML_Simple(b *testing.B) { + params := cloudfrontDeleteDistributionInput() for i := 0; i < b.N; i++ { buf := &bytes.Buffer{} @@ -54,118 +122,39 @@ func BenchmarkEncodingXMLMarshal_Simple_cloudfrontDeleteStreamingDistribution(b } } -func benchRESTXMLBuild(b *testing.B, op *request.Operation, params interface{}) { - svc := awstesting.NewClient() - svc.ServiceName = "cloudfront" - svc.APIVersion = "2015-04-17" +func benchRESTXMLBuild(b *testing.B, reqFn func() *request.Request) { + b.ResetTimer() for i := 0; i < b.N; i++ { - r := svc.NewRequest(op, params, nil) - restxml.Build(r) - if r.Error != nil { - b.Fatal("Unexpected error", r.Error) + req := reqFn() + restxml.Build(req) + if req.Error != nil { + b.Fatal("Unexpected error", req.Error) } } } -var restxmlBuildCreateDistroParms = &cloudfront.CreateDistributionInput{ - DistributionConfig: &cloudfront.DistributionConfig{ // Required - CallerReference: aws.String("string"), // Required - Comment: aws.String("string"), // Required - DefaultCacheBehavior: &cloudfront.DefaultCacheBehavior{ // Required - ForwardedValues: &cloudfront.ForwardedValues{ // Required - Cookies: &cloudfront.CookiePreference{ // Required - Forward: aws.String("ItemSelection"), // Required - WhitelistedNames: &cloudfront.CookieNames{ - Quantity: aws.Int64(1), // Required - Items: []*string{ - aws.String("string"), // Required - // More values... - }, - }, - }, - QueryString: aws.Bool(true), // Required - Headers: &cloudfront.Headers{ - Quantity: aws.Int64(1), // Required - Items: []*string{ - aws.String("string"), // Required - // More values... - }, - }, - }, - MinTTL: aws.Int64(1), // Required - TargetOriginId: aws.String("string"), // Required - TrustedSigners: &cloudfront.TrustedSigners{ // Required - Enabled: aws.Bool(true), // Required - Quantity: aws.Int64(1), // Required - Items: []*string{ - aws.String("string"), // Required - // More values... - }, - }, - ViewerProtocolPolicy: aws.String("ViewerProtocolPolicy"), // Required - AllowedMethods: &cloudfront.AllowedMethods{ - Items: []*string{ // Required - aws.String("Method"), // Required - // More values... - }, - Quantity: aws.Int64(1), // Required - CachedMethods: &cloudfront.CachedMethods{ - Items: []*string{ // Required - aws.String("Method"), // Required - // More values... - }, - Quantity: aws.Int64(1), // Required - }, - }, - DefaultTTL: aws.Int64(1), - MaxTTL: aws.Int64(1), - SmoothStreaming: aws.Bool(true), - }, - Enabled: aws.Bool(true), // Required - Origins: &cloudfront.Origins{ // Required - Quantity: aws.Int64(1), // Required - Items: []*cloudfront.Origin{ - { // Required - DomainName: aws.String("string"), // Required - Id: aws.String("string"), // Required - CustomOriginConfig: &cloudfront.CustomOriginConfig{ - HTTPPort: aws.Int64(1), // Required - HTTPSPort: aws.Int64(1), // Required - OriginProtocolPolicy: aws.String("OriginProtocolPolicy"), // Required - }, - OriginPath: aws.String("string"), - S3OriginConfig: &cloudfront.S3OriginConfig{ - OriginAccessIdentity: aws.String("string"), // Required - }, - }, - // More values... - }, - }, - Aliases: &cloudfront.Aliases{ - Quantity: aws.Int64(1), // Required - Items: []*string{ - aws.String("string"), // Required - // More values... - }, - }, - CacheBehaviors: &cloudfront.CacheBehaviors{ - Quantity: aws.Int64(1), // Required - Items: []*cloudfront.CacheBehavior{ - { // Required - ForwardedValues: &cloudfront.ForwardedValues{ // Required - Cookies: &cloudfront.CookiePreference{ // Required - Forward: aws.String("ItemSelection"), // Required - WhitelistedNames: &cloudfront.CookieNames{ - Quantity: aws.Int64(1), // Required - Items: []*string{ - aws.String("string"), // Required - // More values... - }, - }, - }, - QueryString: aws.Bool(true), // Required - Headers: &cloudfront.Headers{ +func benchRESTXMLRequest(b *testing.B, reqFn func() *request.Request) { + b.ResetTimer() + + for i := 0; i < b.N; i++ { + err := reqFn().Send() + if err != nil { + b.Fatal("Unexpected error", err) + } + } +} + +func cloudfrontCreateDistributionInput() *cloudfront.CreateDistributionInput { + return &cloudfront.CreateDistributionInput{ + DistributionConfig: &cloudfront.DistributionConfig{ // Required + CallerReference: aws.String("string"), // Required + Comment: aws.String("string"), // Required + DefaultCacheBehavior: &cloudfront.DefaultCacheBehavior{ // Required + ForwardedValues: &cloudfront.ForwardedValues{ // Required + Cookies: &cloudfront.CookiePreference{ // Required + Forward: aws.String("ItemSelection"), // Required + WhitelistedNames: &cloudfront.CookieNames{ Quantity: aws.Int64(1), // Required Items: []*string{ aws.String("string"), // Required @@ -173,74 +162,205 @@ var restxmlBuildCreateDistroParms = &cloudfront.CreateDistributionInput{ }, }, }, - MinTTL: aws.Int64(1), // Required - PathPattern: aws.String("string"), // Required - TargetOriginId: aws.String("string"), // Required - TrustedSigners: &cloudfront.TrustedSigners{ // Required - Enabled: aws.Bool(true), // Required - Quantity: aws.Int64(1), // Required + QueryString: aws.Bool(true), // Required + Headers: &cloudfront.Headers{ + Quantity: aws.Int64(1), // Required Items: []*string{ aws.String("string"), // Required // More values... }, }, - ViewerProtocolPolicy: aws.String("ViewerProtocolPolicy"), // Required - AllowedMethods: &cloudfront.AllowedMethods{ + }, + MinTTL: aws.Int64(1), // Required + TargetOriginId: aws.String("string"), // Required + TrustedSigners: &cloudfront.TrustedSigners{ // Required + Enabled: aws.Bool(true), // Required + Quantity: aws.Int64(1), // Required + Items: []*string{ + aws.String("string"), // Required + // More values... + }, + }, + ViewerProtocolPolicy: aws.String("ViewerProtocolPolicy"), // Required + AllowedMethods: &cloudfront.AllowedMethods{ + Items: []*string{ // Required + aws.String("Method"), // Required + // More values... + }, + Quantity: aws.Int64(1), // Required + CachedMethods: &cloudfront.CachedMethods{ Items: []*string{ // Required aws.String("Method"), // Required // More values... }, Quantity: aws.Int64(1), // Required - CachedMethods: &cloudfront.CachedMethods{ - Items: []*string{ // Required - aws.String("Method"), // Required - // More values... - }, - Quantity: aws.Int64(1), // Required + }, + }, + DefaultTTL: aws.Int64(1), + MaxTTL: aws.Int64(1), + SmoothStreaming: aws.Bool(true), + }, + Enabled: aws.Bool(true), // Required + Origins: &cloudfront.Origins{ // Required + Quantity: aws.Int64(1), // Required + Items: []*cloudfront.Origin{ + { // Required + DomainName: aws.String("string"), // Required + Id: aws.String("string"), // Required + CustomOriginConfig: &cloudfront.CustomOriginConfig{ + HTTPPort: aws.Int64(1), // Required + HTTPSPort: aws.Int64(1), // Required + OriginProtocolPolicy: aws.String("OriginProtocolPolicy"), // Required + }, + OriginPath: aws.String("string"), + S3OriginConfig: &cloudfront.S3OriginConfig{ + OriginAccessIdentity: aws.String("string"), // Required }, }, - DefaultTTL: aws.Int64(1), - MaxTTL: aws.Int64(1), - SmoothStreaming: aws.Bool(true), + // More values... }, - // More values... }, - }, - CustomErrorResponses: &cloudfront.CustomErrorResponses{ - Quantity: aws.Int64(1), // Required - Items: []*cloudfront.CustomErrorResponse{ - { // Required - ErrorCode: aws.Int64(1), // Required - ErrorCachingMinTTL: aws.Int64(1), - ResponseCode: aws.String("string"), - ResponsePagePath: aws.String("string"), - }, - // More values... - }, - }, - DefaultRootObject: aws.String("string"), - Logging: &cloudfront.LoggingConfig{ - Bucket: aws.String("string"), // Required - Enabled: aws.Bool(true), // Required - IncludeCookies: aws.Bool(true), // Required - Prefix: aws.String("string"), // Required - }, - PriceClass: aws.String("PriceClass"), - Restrictions: &cloudfront.Restrictions{ - GeoRestriction: &cloudfront.GeoRestriction{ // Required - Quantity: aws.Int64(1), // Required - RestrictionType: aws.String("GeoRestrictionType"), // Required + Aliases: &cloudfront.Aliases{ + Quantity: aws.Int64(1), // Required Items: []*string{ aws.String("string"), // Required // More values... }, }, + CacheBehaviors: &cloudfront.CacheBehaviors{ + Quantity: aws.Int64(1), // Required + Items: []*cloudfront.CacheBehavior{ + { // Required + ForwardedValues: &cloudfront.ForwardedValues{ // Required + Cookies: &cloudfront.CookiePreference{ // Required + Forward: aws.String("ItemSelection"), // Required + WhitelistedNames: &cloudfront.CookieNames{ + Quantity: aws.Int64(1), // Required + Items: []*string{ + aws.String("string"), // Required + // More values... + }, + }, + }, + QueryString: aws.Bool(true), // Required + Headers: &cloudfront.Headers{ + Quantity: aws.Int64(1), // Required + Items: []*string{ + aws.String("string"), // Required + // More values... + }, + }, + }, + MinTTL: aws.Int64(1), // Required + PathPattern: aws.String("string"), // Required + TargetOriginId: aws.String("string"), // Required + TrustedSigners: &cloudfront.TrustedSigners{ // Required + Enabled: aws.Bool(true), // Required + Quantity: aws.Int64(1), // Required + Items: []*string{ + aws.String("string"), // Required + // More values... + }, + }, + ViewerProtocolPolicy: aws.String("ViewerProtocolPolicy"), // Required + AllowedMethods: &cloudfront.AllowedMethods{ + Items: []*string{ // Required + aws.String("Method"), // Required + // More values... + }, + Quantity: aws.Int64(1), // Required + CachedMethods: &cloudfront.CachedMethods{ + Items: []*string{ // Required + aws.String("Method"), // Required + // More values... + }, + Quantity: aws.Int64(1), // Required + }, + }, + DefaultTTL: aws.Int64(1), + MaxTTL: aws.Int64(1), + SmoothStreaming: aws.Bool(true), + }, + // More values... + }, + }, + CustomErrorResponses: &cloudfront.CustomErrorResponses{ + Quantity: aws.Int64(1), // Required + Items: []*cloudfront.CustomErrorResponse{ + { // Required + ErrorCode: aws.Int64(1), // Required + ErrorCachingMinTTL: aws.Int64(1), + ResponseCode: aws.String("string"), + ResponsePagePath: aws.String("string"), + }, + // More values... + }, + }, + DefaultRootObject: aws.String("string"), + Logging: &cloudfront.LoggingConfig{ + Bucket: aws.String("string"), // Required + Enabled: aws.Bool(true), // Required + IncludeCookies: aws.Bool(true), // Required + Prefix: aws.String("string"), // Required + }, + PriceClass: aws.String("PriceClass"), + Restrictions: &cloudfront.Restrictions{ + GeoRestriction: &cloudfront.GeoRestriction{ // Required + Quantity: aws.Int64(1), // Required + RestrictionType: aws.String("GeoRestrictionType"), // Required + Items: []*string{ + aws.String("string"), // Required + // More values... + }, + }, + }, + ViewerCertificate: &cloudfront.ViewerCertificate{ + CloudFrontDefaultCertificate: aws.Bool(true), + IAMCertificateId: aws.String("string"), + MinimumProtocolVersion: aws.String("MinimumProtocolVersion"), + SSLSupportMethod: aws.String("SSLSupportMethod"), + }, }, - ViewerCertificate: &cloudfront.ViewerCertificate{ - CloudFrontDefaultCertificate: aws.Bool(true), - IAMCertificateId: aws.String("string"), - MinimumProtocolVersion: aws.String("MinimumProtocolVersion"), - SSLSupportMethod: aws.String("SSLSupportMethod"), - }, - }, + } +} + +func cloudfrontDeleteDistributionInput() *cloudfront.DeleteDistributionInput { + return &cloudfront.DeleteDistributionInput{ + Id: aws.String("string"), // Required + IfMatch: aws.String("string"), + } +} + +func s3HeadObjectInput() *s3.HeadObjectInput { + return &s3.HeadObjectInput{ + Bucket: aws.String("somebucketname"), + Key: aws.String("keyname"), + VersionId: aws.String("someVersion"), + IfMatch: aws.String("IfMatch"), + } +} + +func s3PutObjectAclInput() *s3.PutObjectAclInput { + return &s3.PutObjectAclInput{ + Bucket: aws.String("somebucketname"), + Key: aws.String("keyname"), + AccessControlPolicy: &s3.AccessControlPolicy{ + Grants: []*s3.Grant{ + { + Grantee: &s3.Grantee{ + DisplayName: aws.String("someName"), + EmailAddress: aws.String("someAddr"), + ID: aws.String("someID"), + Type: aws.String(s3.TypeCanonicalUser), + URI: aws.String("someURI"), + }, + Permission: aws.String(s3.PermissionWrite), + }, + }, + Owner: &s3.Owner{ + DisplayName: aws.String("howdy"), + ID: aws.String("someID"), + }, + }, + } } diff --git a/cli/vendor/github.com/aws/aws-sdk-go/service/appstream/api.go b/cli/vendor/github.com/aws/aws-sdk-go/service/appstream/api.go index 54679838c..177593a50 100644 --- a/cli/vendor/github.com/aws/aws-sdk-go/service/appstream/api.go +++ b/cli/vendor/github.com/aws/aws-sdk-go/service/appstream/api.go @@ -78,6 +78,9 @@ func (c *AppStream) AssociateFleetRequest(input *AssociateFleetInput) (req *requ // * ErrCodeIncompatibleImageException "IncompatibleImageException" // The image does not support storage connectors. // +// * ErrCodeOperationNotPermittedException "OperationNotPermittedException" +// The attempted operation is not permitted. +// // Please also see https://docs.aws.amazon.com/goto/WebAPI/appstream-2016-12-01/AssociateFleet func (c *AppStream) AssociateFleet(input *AssociateFleetInput) (*AssociateFleetOutput, error) { req, out := c.AssociateFleetRequest(input) @@ -100,6 +103,89 @@ func (c *AppStream) AssociateFleetWithContext(ctx aws.Context, input *AssociateF return out, req.Send() } +const opCreateDirectoryConfig = "CreateDirectoryConfig" + +// CreateDirectoryConfigRequest generates a "aws/request.Request" representing the +// client's request for the CreateDirectoryConfig operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See CreateDirectoryConfig for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateDirectoryConfig method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateDirectoryConfigRequest method. +// req, resp := client.CreateDirectoryConfigRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/appstream-2016-12-01/CreateDirectoryConfig +func (c *AppStream) CreateDirectoryConfigRequest(input *CreateDirectoryConfigInput) (req *request.Request, output *CreateDirectoryConfigOutput) { + op := &request.Operation{ + Name: opCreateDirectoryConfig, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateDirectoryConfigInput{} + } + + output = &CreateDirectoryConfigOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateDirectoryConfig API operation for Amazon AppStream. +// +// Creates a directory configuration with the given parameters. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon AppStream's +// API operation CreateDirectoryConfig for usage and error information. +// +// Returned Error Codes: +// * ErrCodeResourceAlreadyExistsException "ResourceAlreadyExistsException" +// The specified resource already exists. +// +// * ErrCodeLimitExceededException "LimitExceededException" +// The requested limit exceeds the permitted limit for an account. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/appstream-2016-12-01/CreateDirectoryConfig +func (c *AppStream) CreateDirectoryConfig(input *CreateDirectoryConfigInput) (*CreateDirectoryConfigOutput, error) { + req, out := c.CreateDirectoryConfigRequest(input) + return out, req.Send() +} + +// CreateDirectoryConfigWithContext is the same as CreateDirectoryConfig with the addition of +// the ability to pass a context and additional request options. +// +// See CreateDirectoryConfig for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *AppStream) CreateDirectoryConfigWithContext(ctx aws.Context, input *CreateDirectoryConfigInput, opts ...request.Option) (*CreateDirectoryConfigOutput, error) { + req, out := c.CreateDirectoryConfigRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opCreateFleet = "CreateFleet" // CreateFleetRequest generates a "aws/request.Request" representing the @@ -173,6 +259,12 @@ func (c *AppStream) CreateFleetRequest(input *CreateFleetInput) (req *request.Re // * ErrCodeConcurrentModificationException "ConcurrentModificationException" // An API error occurred. Wait a few minutes and try again. // +// * ErrCodeInvalidParameterCombinationException "InvalidParameterCombinationException" +// Indicates an incorrect combination of parameters, or a missing parameter. +// +// * ErrCodeIncompatibleImageException "IncompatibleImageException" +// The image does not support storage connectors. +// // Please also see https://docs.aws.amazon.com/goto/WebAPI/appstream-2016-12-01/CreateFleet func (c *AppStream) CreateFleet(input *CreateFleetInput) (*CreateFleetOutput, error) { req, out := c.CreateFleetRequest(input) @@ -380,6 +472,89 @@ func (c *AppStream) CreateStreamingURLWithContext(ctx aws.Context, input *Create return out, req.Send() } +const opDeleteDirectoryConfig = "DeleteDirectoryConfig" + +// DeleteDirectoryConfigRequest generates a "aws/request.Request" representing the +// client's request for the DeleteDirectoryConfig operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See DeleteDirectoryConfig for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteDirectoryConfig method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteDirectoryConfigRequest method. +// req, resp := client.DeleteDirectoryConfigRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/appstream-2016-12-01/DeleteDirectoryConfig +func (c *AppStream) DeleteDirectoryConfigRequest(input *DeleteDirectoryConfigInput) (req *request.Request, output *DeleteDirectoryConfigOutput) { + op := &request.Operation{ + Name: opDeleteDirectoryConfig, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteDirectoryConfigInput{} + } + + output = &DeleteDirectoryConfigOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeleteDirectoryConfig API operation for Amazon AppStream. +// +// Deletes the directory configuration with the given parameters. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon AppStream's +// API operation DeleteDirectoryConfig for usage and error information. +// +// Returned Error Codes: +// * ErrCodeResourceInUseException "ResourceInUseException" +// The specified resource is in use. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// The specified resource was not found. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/appstream-2016-12-01/DeleteDirectoryConfig +func (c *AppStream) DeleteDirectoryConfig(input *DeleteDirectoryConfigInput) (*DeleteDirectoryConfigOutput, error) { + req, out := c.DeleteDirectoryConfigRequest(input) + return out, req.Send() +} + +// DeleteDirectoryConfigWithContext is the same as DeleteDirectoryConfig with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteDirectoryConfig for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *AppStream) DeleteDirectoryConfigWithContext(ctx aws.Context, input *DeleteDirectoryConfigInput, opts ...request.Option) (*DeleteDirectoryConfigOutput, error) { + req, out := c.DeleteDirectoryConfigRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDeleteFleet = "DeleteFleet" // DeleteFleetRequest generates a "aws/request.Request" representing the @@ -553,6 +728,86 @@ func (c *AppStream) DeleteStackWithContext(ctx aws.Context, input *DeleteStackIn return out, req.Send() } +const opDescribeDirectoryConfigs = "DescribeDirectoryConfigs" + +// DescribeDirectoryConfigsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeDirectoryConfigs operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See DescribeDirectoryConfigs for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeDirectoryConfigs method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeDirectoryConfigsRequest method. +// req, resp := client.DescribeDirectoryConfigsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/appstream-2016-12-01/DescribeDirectoryConfigs +func (c *AppStream) DescribeDirectoryConfigsRequest(input *DescribeDirectoryConfigsInput) (req *request.Request, output *DescribeDirectoryConfigsOutput) { + op := &request.Operation{ + Name: opDescribeDirectoryConfigs, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeDirectoryConfigsInput{} + } + + output = &DescribeDirectoryConfigsOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeDirectoryConfigs API operation for Amazon AppStream. +// +// Returns a list describing the specified directory configurations. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon AppStream's +// API operation DescribeDirectoryConfigs for usage and error information. +// +// Returned Error Codes: +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// The specified resource was not found. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/appstream-2016-12-01/DescribeDirectoryConfigs +func (c *AppStream) DescribeDirectoryConfigs(input *DescribeDirectoryConfigsInput) (*DescribeDirectoryConfigsOutput, error) { + req, out := c.DescribeDirectoryConfigsRequest(input) + return out, req.Send() +} + +// DescribeDirectoryConfigsWithContext is the same as DescribeDirectoryConfigs with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeDirectoryConfigs for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *AppStream) DescribeDirectoryConfigsWithContext(ctx aws.Context, input *DescribeDirectoryConfigsInput, opts ...request.Option) (*DescribeDirectoryConfigsOutput, error) { + req, out := c.DescribeDirectoryConfigsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDescribeFleets = "DescribeFleets" // DescribeFleetsRequest generates a "aws/request.Request" representing the @@ -761,10 +1016,11 @@ func (c *AppStream) DescribeSessionsRequest(input *DescribeSessionsInput) (req * // DescribeSessions API operation for Amazon AppStream. // // Describes the streaming sessions for a stack and a fleet. If a user ID is -// provided, this operation returns streaming sessions for only that user. Pass -// this value for the nextToken parameter in a subsequent call to this operation -// to retrieve the next set of items. If an authentication type is not provided, -// the operation defaults to users authenticated using a streaming URL. +// provided, this operation returns streaming sessions for only that user. To +// retrieve the next set of items, pass this value for the nextToken parameter +// in a subsequent call to this operation. If an authentication type is not +// provided, the operation defaults to users authenticated using a streaming +// URL. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -845,8 +1101,8 @@ func (c *AppStream) DescribeStacksRequest(input *DescribeStacksInput) (req *requ // DescribeStacks API operation for Amazon AppStream. // // If stack names are not provided, this operation describes the specified stacks; -// otherwise, all stacks in the account are described. Pass the nextToken value -// in a subsequent call to this operation to retrieve the next set of items. +// otherwise, all stacks in the account are described. To retrieve the next +// set of items, pass the nextToken value in a subsequent call to this operation. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1364,6 +1620,92 @@ func (c *AppStream) StopFleetWithContext(ctx aws.Context, input *StopFleetInput, return out, req.Send() } +const opUpdateDirectoryConfig = "UpdateDirectoryConfig" + +// UpdateDirectoryConfigRequest generates a "aws/request.Request" representing the +// client's request for the UpdateDirectoryConfig operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See UpdateDirectoryConfig for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateDirectoryConfig method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateDirectoryConfigRequest method. +// req, resp := client.UpdateDirectoryConfigRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/appstream-2016-12-01/UpdateDirectoryConfig +func (c *AppStream) UpdateDirectoryConfigRequest(input *UpdateDirectoryConfigInput) (req *request.Request, output *UpdateDirectoryConfigOutput) { + op := &request.Operation{ + Name: opUpdateDirectoryConfig, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateDirectoryConfigInput{} + } + + output = &UpdateDirectoryConfigOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateDirectoryConfig API operation for Amazon AppStream. +// +// Updates the directory configuration with the given parameters. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon AppStream's +// API operation UpdateDirectoryConfig for usage and error information. +// +// Returned Error Codes: +// * ErrCodeResourceInUseException "ResourceInUseException" +// The specified resource is in use. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// The specified resource was not found. +// +// * ErrCodeConcurrentModificationException "ConcurrentModificationException" +// An API error occurred. Wait a few minutes and try again. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/appstream-2016-12-01/UpdateDirectoryConfig +func (c *AppStream) UpdateDirectoryConfig(input *UpdateDirectoryConfigInput) (*UpdateDirectoryConfigOutput, error) { + req, out := c.UpdateDirectoryConfigRequest(input) + return out, req.Send() +} + +// UpdateDirectoryConfigWithContext is the same as UpdateDirectoryConfig with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateDirectoryConfig for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *AppStream) UpdateDirectoryConfigWithContext(ctx aws.Context, input *UpdateDirectoryConfigInput, opts ...request.Option) (*UpdateDirectoryConfigOutput, error) { + req, out := c.UpdateDirectoryConfigRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opUpdateFleet = "UpdateFleet" // UpdateFleetRequest generates a "aws/request.Request" representing the @@ -1446,6 +1788,9 @@ func (c *AppStream) UpdateFleetRequest(input *UpdateFleetInput) (req *request.Re // * ErrCodeIncompatibleImageException "IncompatibleImageException" // The image does not support storage connectors. // +// * ErrCodeOperationNotPermittedException "OperationNotPermittedException" +// The attempted operation is not permitted. +// // Please also see https://docs.aws.amazon.com/goto/WebAPI/appstream-2016-12-01/UpdateFleet func (c *AppStream) UpdateFleet(input *UpdateFleetInput) (*UpdateFleetOutput, error) { req, out := c.UpdateFleetRequest(input) @@ -1571,7 +1916,7 @@ type Application struct { // The name of the application shown to the end users. DisplayName *string `min:"1" type:"string"` - // An application can be disabled after image creation if there is a problem. + // If there is a problem, an application can be disabled after image creation. Enabled *bool `type:"boolean"` // The URL for the application icon. This URL may be time-limited. @@ -1810,6 +2155,104 @@ func (s *ComputeCapacityStatus) SetRunning(v int64) *ComputeCapacityStatus { return s } +// Please also see https://docs.aws.amazon.com/goto/WebAPI/appstream-2016-12-01/CreateDirectoryConfigRequest +type CreateDirectoryConfigInput struct { + _ struct{} `type:"structure"` + + // The fully qualified name of the directory, such as corp.example.com + // + // DirectoryName is a required field + DirectoryName *string `type:"string" required:"true"` + + // The list of the distinguished names of organizational units to place computer + // accounts in. + // + // OrganizationalUnitDistinguishedNames is a required field + OrganizationalUnitDistinguishedNames []*string `type:"list" required:"true"` + + // The AccountName and AccountPassword values for the service account, which + // are used by the streaming instance to connect to the directory. + // + // ServiceAccountCredentials is a required field + ServiceAccountCredentials *ServiceAccountCredentials `type:"structure" required:"true"` +} + +// String returns the string representation +func (s CreateDirectoryConfigInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDirectoryConfigInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateDirectoryConfigInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateDirectoryConfigInput"} + if s.DirectoryName == nil { + invalidParams.Add(request.NewErrParamRequired("DirectoryName")) + } + if s.OrganizationalUnitDistinguishedNames == nil { + invalidParams.Add(request.NewErrParamRequired("OrganizationalUnitDistinguishedNames")) + } + if s.ServiceAccountCredentials == nil { + invalidParams.Add(request.NewErrParamRequired("ServiceAccountCredentials")) + } + if s.ServiceAccountCredentials != nil { + if err := s.ServiceAccountCredentials.Validate(); err != nil { + invalidParams.AddNested("ServiceAccountCredentials", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDirectoryName sets the DirectoryName field's value. +func (s *CreateDirectoryConfigInput) SetDirectoryName(v string) *CreateDirectoryConfigInput { + s.DirectoryName = &v + return s +} + +// SetOrganizationalUnitDistinguishedNames sets the OrganizationalUnitDistinguishedNames field's value. +func (s *CreateDirectoryConfigInput) SetOrganizationalUnitDistinguishedNames(v []*string) *CreateDirectoryConfigInput { + s.OrganizationalUnitDistinguishedNames = v + return s +} + +// SetServiceAccountCredentials sets the ServiceAccountCredentials field's value. +func (s *CreateDirectoryConfigInput) SetServiceAccountCredentials(v *ServiceAccountCredentials) *CreateDirectoryConfigInput { + s.ServiceAccountCredentials = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/appstream-2016-12-01/CreateDirectoryConfigResult +type CreateDirectoryConfigOutput struct { + _ struct{} `type:"structure"` + + // Directory configuration details. + DirectoryConfig *DirectoryConfig `type:"structure"` +} + +// String returns the string representation +func (s CreateDirectoryConfigOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDirectoryConfigOutput) GoString() string { + return s.String() +} + +// SetDirectoryConfig sets the DirectoryConfig field's value. +func (s *CreateDirectoryConfigOutput) SetDirectoryConfig(v *DirectoryConfig) *CreateDirectoryConfigOutput { + s.DirectoryConfig = v + return s +} + // Contains the parameters for the new fleet to create. // Please also see https://docs.aws.amazon.com/goto/WebAPI/appstream-2016-12-01/CreateFleetRequest type CreateFleetInput struct { @@ -1832,7 +2275,11 @@ type CreateFleetInput struct { // The display name of the fleet. DisplayName *string `type:"string"` - // Enables or disables default Internet access for the fleet. + // The DirectoryName and OrganizationalUnitDistinguishedName values, which are + // used to join domains for the AppStream 2.0 streaming instances. + DomainJoinInfo *DomainJoinInfo `type:"structure"` + + // Enables or disables default internet access for the fleet. EnableDefaultInternetAccess *bool `type:"boolean"` // Unique name of the image used by the fleet. @@ -1841,7 +2288,31 @@ type CreateFleetInput struct { ImageName *string `min:"1" type:"string" required:"true"` // The instance type of compute resources for the fleet. Fleet instances are - // launched from this instance type. + // launched from this instance type. Available instance types are: + // + // * stream.standard.medium + // + // * stream.standard.large + // + // * stream.compute.large + // + // * stream.compute.xlarge + // + // * stream.compute.2xlarge + // + // * stream.compute.4xlarge + // + // * stream.compute.8xlarge + // + // * stream.memory.large + // + // * stream.memory.xlarge + // + // * stream.memory.2xlarge + // + // * stream.memory.4xlarge + // + // * stream.memory.8xlarge // // InstanceType is a required field InstanceType *string `min:"1" type:"string" required:"true"` @@ -1926,6 +2397,12 @@ func (s *CreateFleetInput) SetDisplayName(v string) *CreateFleetInput { return s } +// SetDomainJoinInfo sets the DomainJoinInfo field's value. +func (s *CreateFleetInput) SetDomainJoinInfo(v *DomainJoinInfo) *CreateFleetInput { + s.DomainJoinInfo = v + return s +} + // SetEnableDefaultInternetAccess sets the EnableDefaultInternetAccess field's value. func (s *CreateFleetInput) SetEnableDefaultInternetAccess(v bool) *CreateFleetInput { s.EnableDefaultInternetAccess = &v @@ -2203,7 +2680,7 @@ func (s *CreateStreamingURLInput) SetValidity(v int64) *CreateStreamingURLInput type CreateStreamingURLOutput struct { _ struct{} `type:"structure"` - // Elapsed seconds after the Unix epoch, at which time this URL expires. + // Elapsed seconds after the Unix epoch, when this URL expires. Expires *time.Time `type:"timestamp" timestampFormat:"unix"` // The URL to start the AppStream 2.0 streaming session. @@ -2232,6 +2709,60 @@ func (s *CreateStreamingURLOutput) SetStreamingURL(v string) *CreateStreamingURL return s } +// Please also see https://docs.aws.amazon.com/goto/WebAPI/appstream-2016-12-01/DeleteDirectoryConfigRequest +type DeleteDirectoryConfigInput struct { + _ struct{} `type:"structure"` + + // The name of the directory configuration to be deleted. + // + // DirectoryName is a required field + DirectoryName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteDirectoryConfigInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteDirectoryConfigInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteDirectoryConfigInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteDirectoryConfigInput"} + if s.DirectoryName == nil { + invalidParams.Add(request.NewErrParamRequired("DirectoryName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDirectoryName sets the DirectoryName field's value. +func (s *DeleteDirectoryConfigInput) SetDirectoryName(v string) *DeleteDirectoryConfigInput { + s.DirectoryName = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/appstream-2016-12-01/DeleteDirectoryConfigResult +type DeleteDirectoryConfigOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteDirectoryConfigOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteDirectoryConfigOutput) GoString() string { + return s.String() +} + // Please also see https://docs.aws.amazon.com/goto/WebAPI/appstream-2016-12-01/DeleteFleetRequest type DeleteFleetInput struct { _ struct{} `type:"structure"` @@ -2346,6 +2877,96 @@ func (s DeleteStackOutput) GoString() string { return s.String() } +// Please also see https://docs.aws.amazon.com/goto/WebAPI/appstream-2016-12-01/DescribeDirectoryConfigsRequest +type DescribeDirectoryConfigsInput struct { + _ struct{} `type:"structure"` + + // A specific list of directory names. + DirectoryNames []*string `type:"list"` + + // The size of each page of results. + MaxResults *int64 `type:"integer"` + + // The DescribeDirectoryConfigsResult.NextToken from a previous call to DescribeDirectoryConfigs. + // If this is the first call, pass null. + NextToken *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s DescribeDirectoryConfigsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDirectoryConfigsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeDirectoryConfigsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeDirectoryConfigsInput"} + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDirectoryNames sets the DirectoryNames field's value. +func (s *DescribeDirectoryConfigsInput) SetDirectoryNames(v []*string) *DescribeDirectoryConfigsInput { + s.DirectoryNames = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeDirectoryConfigsInput) SetMaxResults(v int64) *DescribeDirectoryConfigsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeDirectoryConfigsInput) SetNextToken(v string) *DescribeDirectoryConfigsInput { + s.NextToken = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/appstream-2016-12-01/DescribeDirectoryConfigsResult +type DescribeDirectoryConfigsOutput struct { + _ struct{} `type:"structure"` + + // The list of directory configurations. + DirectoryConfigs []*DirectoryConfig `type:"list"` + + // If not null, more results are available. To retrieve the next set of items, + // pass this value for the NextToken parameter in a subsequent call to DescribeDirectoryConfigs. + NextToken *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s DescribeDirectoryConfigsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDirectoryConfigsOutput) GoString() string { + return s.String() +} + +// SetDirectoryConfigs sets the DirectoryConfigs field's value. +func (s *DescribeDirectoryConfigsOutput) SetDirectoryConfigs(v []*DirectoryConfig) *DescribeDirectoryConfigsOutput { + s.DirectoryConfigs = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeDirectoryConfigsOutput) SetNextToken(v string) *DescribeDirectoryConfigsOutput { + s.NextToken = &v + return s +} + // Please also see https://docs.aws.amazon.com/goto/WebAPI/appstream-2016-12-01/DescribeFleetsRequest type DescribeFleetsInput struct { _ struct{} `type:"structure"` @@ -2699,6 +3320,64 @@ func (s *DescribeStacksOutput) SetStacks(v []*Stack) *DescribeStacksOutput { return s } +// Full directory configuration details, which are used to join domains for +// the AppStream 2.0 streaming instances. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/appstream-2016-12-01/DirectoryConfig +type DirectoryConfig struct { + _ struct{} `type:"structure"` + + // The time stamp when the directory configuration was created within AppStream + // 2.0. + CreatedTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The fully qualified name of the directory, such as corp.example.com + // + // DirectoryName is a required field + DirectoryName *string `type:"string" required:"true"` + + // The list of the distinguished names of organizational units in which to place + // computer accounts. + OrganizationalUnitDistinguishedNames []*string `type:"list"` + + // The AccountName and AccountPassword of the service account, to be used by + // the streaming instance to connect to the directory. + ServiceAccountCredentials *ServiceAccountCredentials `type:"structure"` +} + +// String returns the string representation +func (s DirectoryConfig) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DirectoryConfig) GoString() string { + return s.String() +} + +// SetCreatedTime sets the CreatedTime field's value. +func (s *DirectoryConfig) SetCreatedTime(v time.Time) *DirectoryConfig { + s.CreatedTime = &v + return s +} + +// SetDirectoryName sets the DirectoryName field's value. +func (s *DirectoryConfig) SetDirectoryName(v string) *DirectoryConfig { + s.DirectoryName = &v + return s +} + +// SetOrganizationalUnitDistinguishedNames sets the OrganizationalUnitDistinguishedNames field's value. +func (s *DirectoryConfig) SetOrganizationalUnitDistinguishedNames(v []*string) *DirectoryConfig { + s.OrganizationalUnitDistinguishedNames = v + return s +} + +// SetServiceAccountCredentials sets the ServiceAccountCredentials field's value. +func (s *DirectoryConfig) SetServiceAccountCredentials(v *ServiceAccountCredentials) *DirectoryConfig { + s.ServiceAccountCredentials = v + return s +} + // Please also see https://docs.aws.amazon.com/goto/WebAPI/appstream-2016-12-01/DisassociateFleetRequest type DisassociateFleetInput struct { _ struct{} `type:"structure"` @@ -2773,6 +3452,42 @@ func (s DisassociateFleetOutput) GoString() string { return s.String() } +// The DirectoryName and OrganizationalUnitDistinguishedName values, which are +// used to join domains for the AppStream 2.0 streaming instances. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/appstream-2016-12-01/DomainJoinInfo +type DomainJoinInfo struct { + _ struct{} `type:"structure"` + + // The fully qualified name of the directory, such as corp.example.com + DirectoryName *string `type:"string"` + + // The distinguished name of the organizational unit to place the computer account + // in. + OrganizationalUnitDistinguishedName *string `type:"string"` +} + +// String returns the string representation +func (s DomainJoinInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DomainJoinInfo) GoString() string { + return s.String() +} + +// SetDirectoryName sets the DirectoryName field's value. +func (s *DomainJoinInfo) SetDirectoryName(v string) *DomainJoinInfo { + s.DirectoryName = &v + return s +} + +// SetOrganizationalUnitDistinguishedName sets the OrganizationalUnitDistinguishedName field's value. +func (s *DomainJoinInfo) SetOrganizationalUnitDistinguishedName(v string) *DomainJoinInfo { + s.OrganizationalUnitDistinguishedName = &v + return s +} + // Please also see https://docs.aws.amazon.com/goto/WebAPI/appstream-2016-12-01/ExpireSessionRequest type ExpireSessionInput struct { _ struct{} `type:"structure"` @@ -2860,7 +3575,11 @@ type Fleet struct { // The name displayed to end users on the AppStream 2.0 portal. DisplayName *string `min:"1" type:"string"` - // Whether default Internet access is enabled for the fleet. + // The DirectoryName and OrganizationalUnitDistinguishedName values, which are + // used to join domains for the AppStream 2.0 streaming instances. + DomainJoinInfo *DomainJoinInfo `type:"structure"` + + // Whether default internet access is enabled for the fleet. EnableDefaultInternetAccess *bool `type:"boolean"` // The list of fleet errors is appended to this list. @@ -2941,6 +3660,12 @@ func (s *Fleet) SetDisplayName(v string) *Fleet { return s } +// SetDomainJoinInfo sets the DomainJoinInfo field's value. +func (s *Fleet) SetDomainJoinInfo(v *DomainJoinInfo) *Fleet { + s.DomainJoinInfo = v + return s +} + // SetEnableDefaultInternetAccess sets the EnableDefaultInternetAccess field's value. func (s *Fleet) SetEnableDefaultInternetAccess(v bool) *Fleet { s.EnableDefaultInternetAccess = &v @@ -3038,7 +3763,7 @@ type Image struct { // The source image ARN from which this image was created. BaseImageArn *string `type:"string"` - // The timestamp when the image was created. + // The time stamp when the image was created. CreatedTime *time.Time `type:"timestamp" timestampFormat:"unix"` // A meaningful description for the image. @@ -3062,8 +3787,8 @@ type Image struct { // is the release date of the base image from which the image was created. PublicBaseImageReleasedDate *time.Time `type:"timestamp" timestampFormat:"unix"` - // The image starts in the PENDING state, and then moves to AVAILABLE if image - // creation succeeds and FAILED if image creation has failed. + // The image starts in the PENDING state. If image creation succeeds, it moves + // to AVAILABLE. If image creation fails, it moves to FAILED. State *string `type:"string" enum:"ImageState"` // The reason why the last state change occurred. @@ -3375,6 +4100,71 @@ func (s *ListAssociatedStacksOutput) SetNextToken(v string) *ListAssociatedStack return s } +// The AccountName and AccountPassword of the service account, to be used by +// the streaming instance to connect to the directory. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/appstream-2016-12-01/ServiceAccountCredentials +type ServiceAccountCredentials struct { + _ struct{} `type:"structure"` + + // The user name of an account in the directory that is used by AppStream 2.0 + // streaming instances to connect to the directory. This account must have the + // following privileges: create computer objects, join computers to the domain, + // change/reset the password on descendant computer objects for the organizational + // units specified. + // + // AccountName is a required field + AccountName *string `min:"1" type:"string" required:"true"` + + // The password for the user account for directory actions. + // + // AccountPassword is a required field + AccountPassword *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s ServiceAccountCredentials) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ServiceAccountCredentials) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ServiceAccountCredentials) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ServiceAccountCredentials"} + if s.AccountName == nil { + invalidParams.Add(request.NewErrParamRequired("AccountName")) + } + if s.AccountName != nil && len(*s.AccountName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AccountName", 1)) + } + if s.AccountPassword == nil { + invalidParams.Add(request.NewErrParamRequired("AccountPassword")) + } + if s.AccountPassword != nil && len(*s.AccountPassword) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AccountPassword", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAccountName sets the AccountName field's value. +func (s *ServiceAccountCredentials) SetAccountName(v string) *ServiceAccountCredentials { + s.AccountName = &v + return s +} + +// SetAccountPassword sets the AccountPassword field's value. +func (s *ServiceAccountCredentials) SetAccountPassword(v string) *ServiceAccountCredentials { + s.AccountPassword = &v + return s +} + // Contains the parameters for a streaming session. // Please also see https://docs.aws.amazon.com/goto/WebAPI/appstream-2016-12-01/Session type Session struct { @@ -3465,7 +4255,7 @@ type Stack struct { // The ARN of the stack. Arn *string `type:"string"` - // The timestamp when the stack was created. + // The time stamp when the stack was created. CreatedTime *time.Time `type:"timestamp" timestampFormat:"unix"` // A meaningful description for the stack. @@ -3738,6 +4528,94 @@ func (s *StorageConnector) SetResourceIdentifier(v string) *StorageConnector { return s } +// Please also see https://docs.aws.amazon.com/goto/WebAPI/appstream-2016-12-01/UpdateDirectoryConfigRequest +type UpdateDirectoryConfigInput struct { + _ struct{} `type:"structure"` + + // The name of the existing directory configuration to be updated. + // + // DirectoryName is a required field + DirectoryName *string `type:"string" required:"true"` + + // The list of the distinguished names of organizational units to place computer + // accounts in. + OrganizationalUnitDistinguishedNames []*string `type:"list"` + + // The AccountName and AccountPassword values for the service account, which + // are used by the streaming instance to connect to the directory + ServiceAccountCredentials *ServiceAccountCredentials `type:"structure"` +} + +// String returns the string representation +func (s UpdateDirectoryConfigInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateDirectoryConfigInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateDirectoryConfigInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateDirectoryConfigInput"} + if s.DirectoryName == nil { + invalidParams.Add(request.NewErrParamRequired("DirectoryName")) + } + if s.ServiceAccountCredentials != nil { + if err := s.ServiceAccountCredentials.Validate(); err != nil { + invalidParams.AddNested("ServiceAccountCredentials", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDirectoryName sets the DirectoryName field's value. +func (s *UpdateDirectoryConfigInput) SetDirectoryName(v string) *UpdateDirectoryConfigInput { + s.DirectoryName = &v + return s +} + +// SetOrganizationalUnitDistinguishedNames sets the OrganizationalUnitDistinguishedNames field's value. +func (s *UpdateDirectoryConfigInput) SetOrganizationalUnitDistinguishedNames(v []*string) *UpdateDirectoryConfigInput { + s.OrganizationalUnitDistinguishedNames = v + return s +} + +// SetServiceAccountCredentials sets the ServiceAccountCredentials field's value. +func (s *UpdateDirectoryConfigInput) SetServiceAccountCredentials(v *ServiceAccountCredentials) *UpdateDirectoryConfigInput { + s.ServiceAccountCredentials = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/appstream-2016-12-01/UpdateDirectoryConfigResult +type UpdateDirectoryConfigOutput struct { + _ struct{} `type:"structure"` + + // The updated directory configuration details. + DirectoryConfig *DirectoryConfig `type:"structure"` +} + +// String returns the string representation +func (s UpdateDirectoryConfigOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateDirectoryConfigOutput) GoString() string { + return s.String() +} + +// SetDirectoryConfig sets the DirectoryConfig field's value. +func (s *UpdateDirectoryConfigOutput) SetDirectoryConfig(v *DirectoryConfig) *UpdateDirectoryConfigOutput { + s.DirectoryConfig = v + return s +} + // Please also see https://docs.aws.amazon.com/goto/WebAPI/appstream-2016-12-01/UpdateFleetRequest type UpdateFleetInput struct { _ struct{} `type:"structure"` @@ -3763,14 +4641,42 @@ type UpdateFleetInput struct { // The name displayed to end users on the AppStream 2.0 portal. DisplayName *string `type:"string"` - // Enables or disables default Internet access for the fleet. + // The DirectoryName and OrganizationalUnitDistinguishedName values, which are + // used to join domains for the AppStream 2.0 streaming instances. + DomainJoinInfo *DomainJoinInfo `type:"structure"` + + // Enables or disables default internet access for the fleet. EnableDefaultInternetAccess *bool `type:"boolean"` // The image name from which a fleet is created. ImageName *string `min:"1" type:"string"` // The instance type of compute resources for the fleet. Fleet instances are - // launched from this instance type. + // launched from this instance type. Available instance types are: + // + // * stream.standard.medium + // + // * stream.standard.large + // + // * stream.compute.large + // + // * stream.compute.xlarge + // + // * stream.compute.2xlarge + // + // * stream.compute.4xlarge + // + // * stream.compute.8xlarge + // + // * stream.memory.large + // + // * stream.memory.xlarge + // + // * stream.memory.2xlarge + // + // * stream.memory.4xlarge + // + // * stream.memory.8xlarge InstanceType *string `min:"1" type:"string"` // The maximum time for which a streaming session can run. The input can be @@ -3859,6 +4765,12 @@ func (s *UpdateFleetInput) SetDisplayName(v string) *UpdateFleetInput { return s } +// SetDomainJoinInfo sets the DomainJoinInfo field's value. +func (s *UpdateFleetInput) SetDomainJoinInfo(v *DomainJoinInfo) *UpdateFleetInput { + s.DomainJoinInfo = v + return s +} + // SetEnableDefaultInternetAccess sets the EnableDefaultInternetAccess field's value. func (s *UpdateFleetInput) SetEnableDefaultInternetAccess(v bool) *UpdateFleetInput { s.EnableDefaultInternetAccess = &v @@ -4084,6 +4996,9 @@ const ( // FleetAttributeVpcConfigurationSecurityGroupIds is a FleetAttribute enum value FleetAttributeVpcConfigurationSecurityGroupIds = "VPC_CONFIGURATION_SECURITY_GROUP_IDS" + + // FleetAttributeDomainJoinInfo is a FleetAttribute enum value + FleetAttributeDomainJoinInfo = "DOMAIN_JOIN_INFO" ) const ( @@ -4119,6 +5034,48 @@ const ( // FleetErrorCodeInvalidSubnetConfiguration is a FleetErrorCode enum value FleetErrorCodeInvalidSubnetConfiguration = "INVALID_SUBNET_CONFIGURATION" + + // FleetErrorCodeSecurityGroupsNotFound is a FleetErrorCode enum value + FleetErrorCodeSecurityGroupsNotFound = "SECURITY_GROUPS_NOT_FOUND" + + // FleetErrorCodeIamServiceRoleMissingDescribeSecurityGroupsAction is a FleetErrorCode enum value + FleetErrorCodeIamServiceRoleMissingDescribeSecurityGroupsAction = "IAM_SERVICE_ROLE_MISSING_DESCRIBE_SECURITY_GROUPS_ACTION" + + // FleetErrorCodeDomainJoinErrorFileNotFound is a FleetErrorCode enum value + FleetErrorCodeDomainJoinErrorFileNotFound = "DOMAIN_JOIN_ERROR_FILE_NOT_FOUND" + + // FleetErrorCodeDomainJoinErrorAccessDenied is a FleetErrorCode enum value + FleetErrorCodeDomainJoinErrorAccessDenied = "DOMAIN_JOIN_ERROR_ACCESS_DENIED" + + // FleetErrorCodeDomainJoinErrorLogonFailure is a FleetErrorCode enum value + FleetErrorCodeDomainJoinErrorLogonFailure = "DOMAIN_JOIN_ERROR_LOGON_FAILURE" + + // FleetErrorCodeDomainJoinErrorInvalidParameter is a FleetErrorCode enum value + FleetErrorCodeDomainJoinErrorInvalidParameter = "DOMAIN_JOIN_ERROR_INVALID_PARAMETER" + + // FleetErrorCodeDomainJoinErrorMoreData is a FleetErrorCode enum value + FleetErrorCodeDomainJoinErrorMoreData = "DOMAIN_JOIN_ERROR_MORE_DATA" + + // FleetErrorCodeDomainJoinErrorNoSuchDomain is a FleetErrorCode enum value + FleetErrorCodeDomainJoinErrorNoSuchDomain = "DOMAIN_JOIN_ERROR_NO_SUCH_DOMAIN" + + // FleetErrorCodeDomainJoinErrorNotSupported is a FleetErrorCode enum value + FleetErrorCodeDomainJoinErrorNotSupported = "DOMAIN_JOIN_ERROR_NOT_SUPPORTED" + + // FleetErrorCodeDomainJoinNerrInvalidWorkgroupName is a FleetErrorCode enum value + FleetErrorCodeDomainJoinNerrInvalidWorkgroupName = "DOMAIN_JOIN_NERR_INVALID_WORKGROUP_NAME" + + // FleetErrorCodeDomainJoinNerrWorkstationNotStarted is a FleetErrorCode enum value + FleetErrorCodeDomainJoinNerrWorkstationNotStarted = "DOMAIN_JOIN_NERR_WORKSTATION_NOT_STARTED" + + // FleetErrorCodeDomainJoinErrorDsMachineAccountQuotaExceeded is a FleetErrorCode enum value + FleetErrorCodeDomainJoinErrorDsMachineAccountQuotaExceeded = "DOMAIN_JOIN_ERROR_DS_MACHINE_ACCOUNT_QUOTA_EXCEEDED" + + // FleetErrorCodeDomainJoinNerrPasswordExpired is a FleetErrorCode enum value + FleetErrorCodeDomainJoinNerrPasswordExpired = "DOMAIN_JOIN_NERR_PASSWORD_EXPIRED" + + // FleetErrorCodeDomainJoinInternalServiceError is a FleetErrorCode enum value + FleetErrorCodeDomainJoinInternalServiceError = "DOMAIN_JOIN_INTERNAL_SERVICE_ERROR" ) const ( diff --git a/cli/vendor/github.com/aws/aws-sdk-go/service/appstream/appstreamiface/interface.go b/cli/vendor/github.com/aws/aws-sdk-go/service/appstream/appstreamiface/interface.go index d3edeb4cc..8355bf4a3 100644 --- a/cli/vendor/github.com/aws/aws-sdk-go/service/appstream/appstreamiface/interface.go +++ b/cli/vendor/github.com/aws/aws-sdk-go/service/appstream/appstreamiface/interface.go @@ -64,6 +64,10 @@ type AppStreamAPI interface { AssociateFleetWithContext(aws.Context, *appstream.AssociateFleetInput, ...request.Option) (*appstream.AssociateFleetOutput, error) AssociateFleetRequest(*appstream.AssociateFleetInput) (*request.Request, *appstream.AssociateFleetOutput) + CreateDirectoryConfig(*appstream.CreateDirectoryConfigInput) (*appstream.CreateDirectoryConfigOutput, error) + CreateDirectoryConfigWithContext(aws.Context, *appstream.CreateDirectoryConfigInput, ...request.Option) (*appstream.CreateDirectoryConfigOutput, error) + CreateDirectoryConfigRequest(*appstream.CreateDirectoryConfigInput) (*request.Request, *appstream.CreateDirectoryConfigOutput) + CreateFleet(*appstream.CreateFleetInput) (*appstream.CreateFleetOutput, error) CreateFleetWithContext(aws.Context, *appstream.CreateFleetInput, ...request.Option) (*appstream.CreateFleetOutput, error) CreateFleetRequest(*appstream.CreateFleetInput) (*request.Request, *appstream.CreateFleetOutput) @@ -76,6 +80,10 @@ type AppStreamAPI interface { CreateStreamingURLWithContext(aws.Context, *appstream.CreateStreamingURLInput, ...request.Option) (*appstream.CreateStreamingURLOutput, error) CreateStreamingURLRequest(*appstream.CreateStreamingURLInput) (*request.Request, *appstream.CreateStreamingURLOutput) + DeleteDirectoryConfig(*appstream.DeleteDirectoryConfigInput) (*appstream.DeleteDirectoryConfigOutput, error) + DeleteDirectoryConfigWithContext(aws.Context, *appstream.DeleteDirectoryConfigInput, ...request.Option) (*appstream.DeleteDirectoryConfigOutput, error) + DeleteDirectoryConfigRequest(*appstream.DeleteDirectoryConfigInput) (*request.Request, *appstream.DeleteDirectoryConfigOutput) + DeleteFleet(*appstream.DeleteFleetInput) (*appstream.DeleteFleetOutput, error) DeleteFleetWithContext(aws.Context, *appstream.DeleteFleetInput, ...request.Option) (*appstream.DeleteFleetOutput, error) DeleteFleetRequest(*appstream.DeleteFleetInput) (*request.Request, *appstream.DeleteFleetOutput) @@ -84,6 +92,10 @@ type AppStreamAPI interface { DeleteStackWithContext(aws.Context, *appstream.DeleteStackInput, ...request.Option) (*appstream.DeleteStackOutput, error) DeleteStackRequest(*appstream.DeleteStackInput) (*request.Request, *appstream.DeleteStackOutput) + DescribeDirectoryConfigs(*appstream.DescribeDirectoryConfigsInput) (*appstream.DescribeDirectoryConfigsOutput, error) + DescribeDirectoryConfigsWithContext(aws.Context, *appstream.DescribeDirectoryConfigsInput, ...request.Option) (*appstream.DescribeDirectoryConfigsOutput, error) + DescribeDirectoryConfigsRequest(*appstream.DescribeDirectoryConfigsInput) (*request.Request, *appstream.DescribeDirectoryConfigsOutput) + DescribeFleets(*appstream.DescribeFleetsInput) (*appstream.DescribeFleetsOutput, error) DescribeFleetsWithContext(aws.Context, *appstream.DescribeFleetsInput, ...request.Option) (*appstream.DescribeFleetsOutput, error) DescribeFleetsRequest(*appstream.DescribeFleetsInput) (*request.Request, *appstream.DescribeFleetsOutput) @@ -124,6 +136,10 @@ type AppStreamAPI interface { StopFleetWithContext(aws.Context, *appstream.StopFleetInput, ...request.Option) (*appstream.StopFleetOutput, error) StopFleetRequest(*appstream.StopFleetInput) (*request.Request, *appstream.StopFleetOutput) + UpdateDirectoryConfig(*appstream.UpdateDirectoryConfigInput) (*appstream.UpdateDirectoryConfigOutput, error) + UpdateDirectoryConfigWithContext(aws.Context, *appstream.UpdateDirectoryConfigInput, ...request.Option) (*appstream.UpdateDirectoryConfigOutput, error) + UpdateDirectoryConfigRequest(*appstream.UpdateDirectoryConfigInput) (*request.Request, *appstream.UpdateDirectoryConfigOutput) + UpdateFleet(*appstream.UpdateFleetInput) (*appstream.UpdateFleetOutput, error) UpdateFleetWithContext(aws.Context, *appstream.UpdateFleetInput, ...request.Option) (*appstream.UpdateFleetOutput, error) UpdateFleetRequest(*appstream.UpdateFleetInput) (*request.Request, *appstream.UpdateFleetOutput) diff --git a/cli/vendor/github.com/aws/aws-sdk-go/service/appstream/waiters.go b/cli/vendor/github.com/aws/aws-sdk-go/service/appstream/waiters.go index d298eb571..f2e6c9a21 100644 --- a/cli/vendor/github.com/aws/aws-sdk-go/service/appstream/waiters.go +++ b/cli/vendor/github.com/aws/aws-sdk-go/service/appstream/waiters.go @@ -33,17 +33,17 @@ func (c *AppStream) WaitUntilFleetStartedWithContext(ctx aws.Context, input *Des Acceptors: []request.WaiterAcceptor{ { State: request.SuccessWaiterState, - Matcher: request.PathAllWaiterMatch, Argument: "fleets[].state", + Matcher: request.PathAllWaiterMatch, Argument: "Fleets[].State", Expected: "ACTIVE", }, { State: request.FailureWaiterState, - Matcher: request.PathAnyWaiterMatch, Argument: "fleets[].state", + Matcher: request.PathAnyWaiterMatch, Argument: "Fleets[].State", Expected: "PENDING_DEACTIVATE", }, { State: request.FailureWaiterState, - Matcher: request.PathAnyWaiterMatch, Argument: "fleets[].state", + Matcher: request.PathAnyWaiterMatch, Argument: "Fleets[].State", Expected: "INACTIVE", }, }, @@ -89,17 +89,17 @@ func (c *AppStream) WaitUntilFleetStoppedWithContext(ctx aws.Context, input *Des Acceptors: []request.WaiterAcceptor{ { State: request.SuccessWaiterState, - Matcher: request.PathAllWaiterMatch, Argument: "fleets[].state", + Matcher: request.PathAllWaiterMatch, Argument: "Fleets[].State", Expected: "INACTIVE", }, { State: request.FailureWaiterState, - Matcher: request.PathAnyWaiterMatch, Argument: "fleets[].state", + Matcher: request.PathAnyWaiterMatch, Argument: "Fleets[].State", Expected: "PENDING_ACTIVATE", }, { State: request.FailureWaiterState, - Matcher: request.PathAnyWaiterMatch, Argument: "fleets[].state", + Matcher: request.PathAnyWaiterMatch, Argument: "Fleets[].State", Expected: "ACTIVE", }, }, diff --git a/cli/vendor/github.com/aws/aws-sdk-go/service/clouddirectory/api.go b/cli/vendor/github.com/aws/aws-sdk-go/service/clouddirectory/api.go index 476cd1f0f..a42b03a96 100644 --- a/cli/vendor/github.com/aws/aws-sdk-go/service/clouddirectory/api.go +++ b/cli/vendor/github.com/aws/aws-sdk-go/service/clouddirectory/api.go @@ -699,6 +699,9 @@ func (c *CloudDirectory) AttachTypedLinkRequest(input *AttachTypedLinkInput) (re // * ErrCodeAccessDeniedException "AccessDeniedException" // Access denied. Check your permissions. // +// * ErrCodeDirectoryNotEnabledException "DirectoryNotEnabledException" +// An operation can only operate on a directory that is not enabled. +// // * ErrCodeResourceNotFoundException "ResourceNotFoundException" // The specified resource could not be found. // @@ -2689,6 +2692,9 @@ func (c *CloudDirectory) DetachTypedLinkRequest(input *DetachTypedLinkInput) (re // * ErrCodeAccessDeniedException "AccessDeniedException" // Access denied. Check your permissions. // +// * ErrCodeDirectoryNotEnabledException "DirectoryNotEnabledException" +// An operation can only operate on a directory that is not enabled. +// // * ErrCodeResourceNotFoundException "ResourceNotFoundException" // The specified resource could not be found. // @@ -4589,6 +4595,9 @@ func (c *CloudDirectory) ListIncomingTypedLinksRequest(input *ListIncomingTypedL // * ErrCodeAccessDeniedException "AccessDeniedException" // Access denied. Check your permissions. // +// * ErrCodeDirectoryNotEnabledException "DirectoryNotEnabledException" +// An operation can only operate on a directory that is not enabled. +// // * ErrCodeResourceNotFoundException "ResourceNotFoundException" // The specified resource could not be found. // @@ -5758,6 +5767,9 @@ func (c *CloudDirectory) ListOutgoingTypedLinksRequest(input *ListOutgoingTypedL // * ErrCodeAccessDeniedException "AccessDeniedException" // Access denied. Check your permissions. // +// * ErrCodeDirectoryNotEnabledException "DirectoryNotEnabledException" +// An operation can only operate on a directory that is not enabled. +// // * ErrCodeResourceNotFoundException "ResourceNotFoundException" // The specified resource could not be found. // @@ -8856,6 +8868,403 @@ func (s *BatchAttachObjectResponse) SetAttachedObjectIdentifier(v string) *Batch return s } +// Attaches a policy object to a regular object inside a BatchRead operation. For +// more information, see AttachPolicy and BatchReadRequest$Operations. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/clouddirectory-2016-05-10/BatchAttachPolicy +type BatchAttachPolicy struct { + _ struct{} `type:"structure"` + + // The reference that identifies the object to which the policy will be attached. + // + // ObjectReference is a required field + ObjectReference *ObjectReference `type:"structure" required:"true"` + + // The reference that is associated with the policy object. + // + // PolicyReference is a required field + PolicyReference *ObjectReference `type:"structure" required:"true"` +} + +// String returns the string representation +func (s BatchAttachPolicy) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchAttachPolicy) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *BatchAttachPolicy) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "BatchAttachPolicy"} + if s.ObjectReference == nil { + invalidParams.Add(request.NewErrParamRequired("ObjectReference")) + } + if s.PolicyReference == nil { + invalidParams.Add(request.NewErrParamRequired("PolicyReference")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetObjectReference sets the ObjectReference field's value. +func (s *BatchAttachPolicy) SetObjectReference(v *ObjectReference) *BatchAttachPolicy { + s.ObjectReference = v + return s +} + +// SetPolicyReference sets the PolicyReference field's value. +func (s *BatchAttachPolicy) SetPolicyReference(v *ObjectReference) *BatchAttachPolicy { + s.PolicyReference = v + return s +} + +// Represents the output of an AttachPolicy response operation. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/clouddirectory-2016-05-10/BatchAttachPolicyResponse +type BatchAttachPolicyResponse struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s BatchAttachPolicyResponse) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchAttachPolicyResponse) GoString() string { + return s.String() +} + +// Attaches the specified object to the specified index inside a BatchRead operation. +// For more information, see AttachToIndex and BatchReadRequest$Operations. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/clouddirectory-2016-05-10/BatchAttachToIndex +type BatchAttachToIndex struct { + _ struct{} `type:"structure"` + + // A reference to the index that you are attaching the object to. + // + // IndexReference is a required field + IndexReference *ObjectReference `type:"structure" required:"true"` + + // A reference to the object that you are attaching to the index. + // + // TargetReference is a required field + TargetReference *ObjectReference `type:"structure" required:"true"` +} + +// String returns the string representation +func (s BatchAttachToIndex) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchAttachToIndex) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *BatchAttachToIndex) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "BatchAttachToIndex"} + if s.IndexReference == nil { + invalidParams.Add(request.NewErrParamRequired("IndexReference")) + } + if s.TargetReference == nil { + invalidParams.Add(request.NewErrParamRequired("TargetReference")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetIndexReference sets the IndexReference field's value. +func (s *BatchAttachToIndex) SetIndexReference(v *ObjectReference) *BatchAttachToIndex { + s.IndexReference = v + return s +} + +// SetTargetReference sets the TargetReference field's value. +func (s *BatchAttachToIndex) SetTargetReference(v *ObjectReference) *BatchAttachToIndex { + s.TargetReference = v + return s +} + +// Represents the output of a AttachToIndex response operation. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/clouddirectory-2016-05-10/BatchAttachToIndexResponse +type BatchAttachToIndexResponse struct { + _ struct{} `type:"structure"` + + // The ObjectIdentifier of the object that was attached to the index. + AttachedObjectIdentifier *string `type:"string"` +} + +// String returns the string representation +func (s BatchAttachToIndexResponse) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchAttachToIndexResponse) GoString() string { + return s.String() +} + +// SetAttachedObjectIdentifier sets the AttachedObjectIdentifier field's value. +func (s *BatchAttachToIndexResponse) SetAttachedObjectIdentifier(v string) *BatchAttachToIndexResponse { + s.AttachedObjectIdentifier = &v + return s +} + +// Attaches a typed link to a specified source and target object inside a BatchRead +// operation. For more information, see AttachTypedLink and BatchReadRequest$Operations. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/clouddirectory-2016-05-10/BatchAttachTypedLink +type BatchAttachTypedLink struct { + _ struct{} `type:"structure"` + + // A set of attributes that are associated with the typed link. + // + // Attributes is a required field + Attributes []*AttributeNameAndValue `type:"list" required:"true"` + + // Identifies the source object that the typed link will attach to. + // + // SourceObjectReference is a required field + SourceObjectReference *ObjectReference `type:"structure" required:"true"` + + // Identifies the target object that the typed link will attach to. + // + // TargetObjectReference is a required field + TargetObjectReference *ObjectReference `type:"structure" required:"true"` + + // Identifies the typed link facet that is associated with the typed link. + // + // TypedLinkFacet is a required field + TypedLinkFacet *TypedLinkSchemaAndFacetName `type:"structure" required:"true"` +} + +// String returns the string representation +func (s BatchAttachTypedLink) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchAttachTypedLink) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *BatchAttachTypedLink) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "BatchAttachTypedLink"} + if s.Attributes == nil { + invalidParams.Add(request.NewErrParamRequired("Attributes")) + } + if s.SourceObjectReference == nil { + invalidParams.Add(request.NewErrParamRequired("SourceObjectReference")) + } + if s.TargetObjectReference == nil { + invalidParams.Add(request.NewErrParamRequired("TargetObjectReference")) + } + if s.TypedLinkFacet == nil { + invalidParams.Add(request.NewErrParamRequired("TypedLinkFacet")) + } + if s.Attributes != nil { + for i, v := range s.Attributes { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Attributes", i), err.(request.ErrInvalidParams)) + } + } + } + if s.TypedLinkFacet != nil { + if err := s.TypedLinkFacet.Validate(); err != nil { + invalidParams.AddNested("TypedLinkFacet", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAttributes sets the Attributes field's value. +func (s *BatchAttachTypedLink) SetAttributes(v []*AttributeNameAndValue) *BatchAttachTypedLink { + s.Attributes = v + return s +} + +// SetSourceObjectReference sets the SourceObjectReference field's value. +func (s *BatchAttachTypedLink) SetSourceObjectReference(v *ObjectReference) *BatchAttachTypedLink { + s.SourceObjectReference = v + return s +} + +// SetTargetObjectReference sets the TargetObjectReference field's value. +func (s *BatchAttachTypedLink) SetTargetObjectReference(v *ObjectReference) *BatchAttachTypedLink { + s.TargetObjectReference = v + return s +} + +// SetTypedLinkFacet sets the TypedLinkFacet field's value. +func (s *BatchAttachTypedLink) SetTypedLinkFacet(v *TypedLinkSchemaAndFacetName) *BatchAttachTypedLink { + s.TypedLinkFacet = v + return s +} + +// Represents the output of a AttachTypedLink response operation. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/clouddirectory-2016-05-10/BatchAttachTypedLinkResponse +type BatchAttachTypedLinkResponse struct { + _ struct{} `type:"structure"` + + // Returns a typed link specifier as output. + TypedLinkSpecifier *TypedLinkSpecifier `type:"structure"` +} + +// String returns the string representation +func (s BatchAttachTypedLinkResponse) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchAttachTypedLinkResponse) GoString() string { + return s.String() +} + +// SetTypedLinkSpecifier sets the TypedLinkSpecifier field's value. +func (s *BatchAttachTypedLinkResponse) SetTypedLinkSpecifier(v *TypedLinkSpecifier) *BatchAttachTypedLinkResponse { + s.TypedLinkSpecifier = v + return s +} + +// Creates an index object inside of a BatchRead operation. For more information, +// see CreateIndex and BatchReadRequest$Operations. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/clouddirectory-2016-05-10/BatchCreateIndex +type BatchCreateIndex struct { + _ struct{} `type:"structure"` + + // The batch reference name. See Batches (http://docs.aws.amazon.com/directoryservice/latest/admin-guide/cd_advanced.html#batches) + // for more information. + BatchReferenceName *string `type:"string"` + + // Indicates whether the attribute that is being indexed has unique values or + // not. + // + // IsUnique is a required field + IsUnique *bool `type:"boolean" required:"true"` + + // The name of the link between the parent object and the index object. + LinkName *string `min:"1" type:"string"` + + // Specifies the attributes that should be indexed on. Currently only a single + // attribute is supported. + // + // OrderedIndexedAttributeList is a required field + OrderedIndexedAttributeList []*AttributeKey `type:"list" required:"true"` + + // A reference to the parent object that contains the index object. + ParentReference *ObjectReference `type:"structure"` +} + +// String returns the string representation +func (s BatchCreateIndex) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchCreateIndex) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *BatchCreateIndex) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "BatchCreateIndex"} + if s.IsUnique == nil { + invalidParams.Add(request.NewErrParamRequired("IsUnique")) + } + if s.LinkName != nil && len(*s.LinkName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LinkName", 1)) + } + if s.OrderedIndexedAttributeList == nil { + invalidParams.Add(request.NewErrParamRequired("OrderedIndexedAttributeList")) + } + if s.OrderedIndexedAttributeList != nil { + for i, v := range s.OrderedIndexedAttributeList { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "OrderedIndexedAttributeList", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBatchReferenceName sets the BatchReferenceName field's value. +func (s *BatchCreateIndex) SetBatchReferenceName(v string) *BatchCreateIndex { + s.BatchReferenceName = &v + return s +} + +// SetIsUnique sets the IsUnique field's value. +func (s *BatchCreateIndex) SetIsUnique(v bool) *BatchCreateIndex { + s.IsUnique = &v + return s +} + +// SetLinkName sets the LinkName field's value. +func (s *BatchCreateIndex) SetLinkName(v string) *BatchCreateIndex { + s.LinkName = &v + return s +} + +// SetOrderedIndexedAttributeList sets the OrderedIndexedAttributeList field's value. +func (s *BatchCreateIndex) SetOrderedIndexedAttributeList(v []*AttributeKey) *BatchCreateIndex { + s.OrderedIndexedAttributeList = v + return s +} + +// SetParentReference sets the ParentReference field's value. +func (s *BatchCreateIndex) SetParentReference(v *ObjectReference) *BatchCreateIndex { + s.ParentReference = v + return s +} + +// Represents the output of a CreateIndex response operation. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/clouddirectory-2016-05-10/BatchCreateIndexResponse +type BatchCreateIndexResponse struct { + _ struct{} `type:"structure"` + + // The ObjectIdentifier of the index created by this operation. + ObjectIdentifier *string `type:"string"` +} + +// String returns the string representation +func (s BatchCreateIndexResponse) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchCreateIndexResponse) GoString() string { + return s.String() +} + +// SetObjectIdentifier sets the ObjectIdentifier field's value. +func (s *BatchCreateIndexResponse) SetObjectIdentifier(v string) *BatchCreateIndexResponse { + s.ObjectIdentifier = &v + return s +} + // Represents the output of a CreateObject operation. // Please also see https://docs.aws.amazon.com/goto/WebAPI/clouddirectory-2016-05-10/BatchCreateObject type BatchCreateObject struct { @@ -9059,6 +9468,86 @@ func (s BatchDeleteObjectResponse) GoString() string { return s.String() } +// Detaches the specified object from the specified index inside a BatchRead +// operation. For more information, see DetachFromIndex and BatchReadRequest$Operations. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/clouddirectory-2016-05-10/BatchDetachFromIndex +type BatchDetachFromIndex struct { + _ struct{} `type:"structure"` + + // A reference to the index object. + // + // IndexReference is a required field + IndexReference *ObjectReference `type:"structure" required:"true"` + + // A reference to the object being detached from the index. + // + // TargetReference is a required field + TargetReference *ObjectReference `type:"structure" required:"true"` +} + +// String returns the string representation +func (s BatchDetachFromIndex) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchDetachFromIndex) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *BatchDetachFromIndex) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "BatchDetachFromIndex"} + if s.IndexReference == nil { + invalidParams.Add(request.NewErrParamRequired("IndexReference")) + } + if s.TargetReference == nil { + invalidParams.Add(request.NewErrParamRequired("TargetReference")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetIndexReference sets the IndexReference field's value. +func (s *BatchDetachFromIndex) SetIndexReference(v *ObjectReference) *BatchDetachFromIndex { + s.IndexReference = v + return s +} + +// SetTargetReference sets the TargetReference field's value. +func (s *BatchDetachFromIndex) SetTargetReference(v *ObjectReference) *BatchDetachFromIndex { + s.TargetReference = v + return s +} + +// Represents the output of a DetachFromIndex response operation. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/clouddirectory-2016-05-10/BatchDetachFromIndexResponse +type BatchDetachFromIndexResponse struct { + _ struct{} `type:"structure"` + + // The ObjectIdentifier of the object that was detached from the index. + DetachedObjectIdentifier *string `type:"string"` +} + +// String returns the string representation +func (s BatchDetachFromIndexResponse) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchDetachFromIndexResponse) GoString() string { + return s.String() +} + +// SetDetachedObjectIdentifier sets the DetachedObjectIdentifier field's value. +func (s *BatchDetachFromIndexResponse) SetDetachedObjectIdentifier(v string) *BatchDetachFromIndexResponse { + s.DetachedObjectIdentifier = &v + return s +} + // Represents the output of a DetachObject operation. // Please also see https://docs.aws.amazon.com/goto/WebAPI/clouddirectory-2016-05-10/BatchDetachObject type BatchDetachObject struct { @@ -9156,6 +9645,488 @@ func (s *BatchDetachObjectResponse) SetDetachedObjectIdentifier(v string) *Batch return s } +// Detaches a typed link from a specified source and target object inside a +// BatchRead operation. For more information, see DetachTypedLink and BatchReadRequest$Operations. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/clouddirectory-2016-05-10/BatchDetachTypedLink +type BatchDetachTypedLink struct { + _ struct{} `type:"structure"` + + // Used to accept a typed link specifier as input. + // + // TypedLinkSpecifier is a required field + TypedLinkSpecifier *TypedLinkSpecifier `type:"structure" required:"true"` +} + +// String returns the string representation +func (s BatchDetachTypedLink) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchDetachTypedLink) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *BatchDetachTypedLink) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "BatchDetachTypedLink"} + if s.TypedLinkSpecifier == nil { + invalidParams.Add(request.NewErrParamRequired("TypedLinkSpecifier")) + } + if s.TypedLinkSpecifier != nil { + if err := s.TypedLinkSpecifier.Validate(); err != nil { + invalidParams.AddNested("TypedLinkSpecifier", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetTypedLinkSpecifier sets the TypedLinkSpecifier field's value. +func (s *BatchDetachTypedLink) SetTypedLinkSpecifier(v *TypedLinkSpecifier) *BatchDetachTypedLink { + s.TypedLinkSpecifier = v + return s +} + +// Represents the output of a DetachTypedLink response operation. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/clouddirectory-2016-05-10/BatchDetachTypedLinkResponse +type BatchDetachTypedLinkResponse struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s BatchDetachTypedLinkResponse) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchDetachTypedLinkResponse) GoString() string { + return s.String() +} + +// Retrieves metadata about an object inside a BatchRead operation. For more +// information, see GetObjectInformation and BatchReadRequest$Operations. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/clouddirectory-2016-05-10/BatchGetObjectInformation +type BatchGetObjectInformation struct { + _ struct{} `type:"structure"` + + // A reference to the object. + // + // ObjectReference is a required field + ObjectReference *ObjectReference `type:"structure" required:"true"` +} + +// String returns the string representation +func (s BatchGetObjectInformation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchGetObjectInformation) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *BatchGetObjectInformation) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "BatchGetObjectInformation"} + if s.ObjectReference == nil { + invalidParams.Add(request.NewErrParamRequired("ObjectReference")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetObjectReference sets the ObjectReference field's value. +func (s *BatchGetObjectInformation) SetObjectReference(v *ObjectReference) *BatchGetObjectInformation { + s.ObjectReference = v + return s +} + +// Represents the output of a GetObjectInformation response operation. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/clouddirectory-2016-05-10/BatchGetObjectInformationResponse +type BatchGetObjectInformationResponse struct { + _ struct{} `type:"structure"` + + // The ObjectIdentifier of the specified object. + ObjectIdentifier *string `type:"string"` + + // The facets attached to the specified object. + SchemaFacets []*SchemaFacet `type:"list"` +} + +// String returns the string representation +func (s BatchGetObjectInformationResponse) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchGetObjectInformationResponse) GoString() string { + return s.String() +} + +// SetObjectIdentifier sets the ObjectIdentifier field's value. +func (s *BatchGetObjectInformationResponse) SetObjectIdentifier(v string) *BatchGetObjectInformationResponse { + s.ObjectIdentifier = &v + return s +} + +// SetSchemaFacets sets the SchemaFacets field's value. +func (s *BatchGetObjectInformationResponse) SetSchemaFacets(v []*SchemaFacet) *BatchGetObjectInformationResponse { + s.SchemaFacets = v + return s +} + +// Lists indices attached to an object inside a BatchRead operation. For more +// information, see ListAttachedIndices and BatchReadRequest$Operations. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/clouddirectory-2016-05-10/BatchListAttachedIndices +type BatchListAttachedIndices struct { + _ struct{} `type:"structure"` + + // The maximum number of results to retrieve. + MaxResults *int64 `min:"1" type:"integer"` + + // The pagination token. + NextToken *string `type:"string"` + + // A reference to the object that has indices attached. + // + // TargetReference is a required field + TargetReference *ObjectReference `type:"structure" required:"true"` +} + +// String returns the string representation +func (s BatchListAttachedIndices) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchListAttachedIndices) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *BatchListAttachedIndices) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "BatchListAttachedIndices"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + if s.TargetReference == nil { + invalidParams.Add(request.NewErrParamRequired("TargetReference")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetMaxResults sets the MaxResults field's value. +func (s *BatchListAttachedIndices) SetMaxResults(v int64) *BatchListAttachedIndices { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *BatchListAttachedIndices) SetNextToken(v string) *BatchListAttachedIndices { + s.NextToken = &v + return s +} + +// SetTargetReference sets the TargetReference field's value. +func (s *BatchListAttachedIndices) SetTargetReference(v *ObjectReference) *BatchListAttachedIndices { + s.TargetReference = v + return s +} + +// Represents the output of a ListAttachedIndices response operation. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/clouddirectory-2016-05-10/BatchListAttachedIndicesResponse +type BatchListAttachedIndicesResponse struct { + _ struct{} `type:"structure"` + + // The indices attached to the specified object. + IndexAttachments []*IndexAttachment `type:"list"` + + // The pagination token. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s BatchListAttachedIndicesResponse) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchListAttachedIndicesResponse) GoString() string { + return s.String() +} + +// SetIndexAttachments sets the IndexAttachments field's value. +func (s *BatchListAttachedIndicesResponse) SetIndexAttachments(v []*IndexAttachment) *BatchListAttachedIndicesResponse { + s.IndexAttachments = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *BatchListAttachedIndicesResponse) SetNextToken(v string) *BatchListAttachedIndicesResponse { + s.NextToken = &v + return s +} + +// Returns a paginated list of all the incoming TypedLinkSpecifier information +// for an object inside a BatchRead operation. For more information, see ListIncomingTypedLinks +// and BatchReadRequest$Operations. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/clouddirectory-2016-05-10/BatchListIncomingTypedLinks +type BatchListIncomingTypedLinks struct { + _ struct{} `type:"structure"` + + // Provides range filters for multiple attributes. When providing ranges to + // typed link selection, any inexact ranges must be specified at the end. Any + // attributes that do not have a range specified are presumed to match the entire + // range. + FilterAttributeRanges []*TypedLinkAttributeRange `type:"list"` + + // Filters are interpreted in the order of the attributes on the typed link + // facet, not the order in which they are supplied to any API calls. + FilterTypedLink *TypedLinkSchemaAndFacetName `type:"structure"` + + // The maximum number of results to retrieve. + MaxResults *int64 `min:"1" type:"integer"` + + // The pagination token. + NextToken *string `type:"string"` + + // The reference that identifies the object whose attributes will be listed. + // + // ObjectReference is a required field + ObjectReference *ObjectReference `type:"structure" required:"true"` +} + +// String returns the string representation +func (s BatchListIncomingTypedLinks) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchListIncomingTypedLinks) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *BatchListIncomingTypedLinks) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "BatchListIncomingTypedLinks"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + if s.ObjectReference == nil { + invalidParams.Add(request.NewErrParamRequired("ObjectReference")) + } + if s.FilterAttributeRanges != nil { + for i, v := range s.FilterAttributeRanges { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "FilterAttributeRanges", i), err.(request.ErrInvalidParams)) + } + } + } + if s.FilterTypedLink != nil { + if err := s.FilterTypedLink.Validate(); err != nil { + invalidParams.AddNested("FilterTypedLink", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFilterAttributeRanges sets the FilterAttributeRanges field's value. +func (s *BatchListIncomingTypedLinks) SetFilterAttributeRanges(v []*TypedLinkAttributeRange) *BatchListIncomingTypedLinks { + s.FilterAttributeRanges = v + return s +} + +// SetFilterTypedLink sets the FilterTypedLink field's value. +func (s *BatchListIncomingTypedLinks) SetFilterTypedLink(v *TypedLinkSchemaAndFacetName) *BatchListIncomingTypedLinks { + s.FilterTypedLink = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *BatchListIncomingTypedLinks) SetMaxResults(v int64) *BatchListIncomingTypedLinks { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *BatchListIncomingTypedLinks) SetNextToken(v string) *BatchListIncomingTypedLinks { + s.NextToken = &v + return s +} + +// SetObjectReference sets the ObjectReference field's value. +func (s *BatchListIncomingTypedLinks) SetObjectReference(v *ObjectReference) *BatchListIncomingTypedLinks { + s.ObjectReference = v + return s +} + +// Represents the output of a ListIncomingTypedLinks response operation. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/clouddirectory-2016-05-10/BatchListIncomingTypedLinksResponse +type BatchListIncomingTypedLinksResponse struct { + _ struct{} `type:"structure"` + + // Returns one or more typed link specifiers as output. + LinkSpecifiers []*TypedLinkSpecifier `type:"list"` + + // The pagination token. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s BatchListIncomingTypedLinksResponse) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchListIncomingTypedLinksResponse) GoString() string { + return s.String() +} + +// SetLinkSpecifiers sets the LinkSpecifiers field's value. +func (s *BatchListIncomingTypedLinksResponse) SetLinkSpecifiers(v []*TypedLinkSpecifier) *BatchListIncomingTypedLinksResponse { + s.LinkSpecifiers = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *BatchListIncomingTypedLinksResponse) SetNextToken(v string) *BatchListIncomingTypedLinksResponse { + s.NextToken = &v + return s +} + +// Lists objects attached to the specified index inside a BatchRead operation. +// For more information, see ListIndex and BatchReadRequest$Operations. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/clouddirectory-2016-05-10/BatchListIndex +type BatchListIndex struct { + _ struct{} `type:"structure"` + + // The reference to the index to list. + // + // IndexReference is a required field + IndexReference *ObjectReference `type:"structure" required:"true"` + + // The maximum number of results to retrieve. + MaxResults *int64 `min:"1" type:"integer"` + + // The pagination token. + NextToken *string `type:"string"` + + // Specifies the ranges of indexed values that you want to query. + RangesOnIndexedValues []*ObjectAttributeRange `type:"list"` +} + +// String returns the string representation +func (s BatchListIndex) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchListIndex) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *BatchListIndex) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "BatchListIndex"} + if s.IndexReference == nil { + invalidParams.Add(request.NewErrParamRequired("IndexReference")) + } + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + if s.RangesOnIndexedValues != nil { + for i, v := range s.RangesOnIndexedValues { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "RangesOnIndexedValues", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetIndexReference sets the IndexReference field's value. +func (s *BatchListIndex) SetIndexReference(v *ObjectReference) *BatchListIndex { + s.IndexReference = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *BatchListIndex) SetMaxResults(v int64) *BatchListIndex { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *BatchListIndex) SetNextToken(v string) *BatchListIndex { + s.NextToken = &v + return s +} + +// SetRangesOnIndexedValues sets the RangesOnIndexedValues field's value. +func (s *BatchListIndex) SetRangesOnIndexedValues(v []*ObjectAttributeRange) *BatchListIndex { + s.RangesOnIndexedValues = v + return s +} + +// Represents the output of a ListIndex response operation. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/clouddirectory-2016-05-10/BatchListIndexResponse +type BatchListIndexResponse struct { + _ struct{} `type:"structure"` + + // The objects and indexed values attached to the index. + IndexAttachments []*IndexAttachment `type:"list"` + + // The pagination token. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s BatchListIndexResponse) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchListIndexResponse) GoString() string { + return s.String() +} + +// SetIndexAttachments sets the IndexAttachments field's value. +func (s *BatchListIndexResponse) SetIndexAttachments(v []*IndexAttachment) *BatchListIndexResponse { + s.IndexAttachments = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *BatchListIndexResponse) SetNextToken(v string) *BatchListIndexResponse { + s.NextToken = &v + return s +} + // Represents the output of a ListObjectAttributes operation. // Please also see https://docs.aws.amazon.com/goto/WebAPI/clouddirectory-2016-05-10/BatchListObjectAttributes type BatchListObjectAttributes struct { @@ -9365,6 +10336,528 @@ func (s *BatchListObjectChildrenResponse) SetNextToken(v string) *BatchListObjec return s } +// Retrieves all available parent paths for any object type such as node, leaf +// node, policy node, and index node objects inside a BatchRead operation. For +// more information, see ListObjectParentPaths and BatchReadRequest$Operations. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/clouddirectory-2016-05-10/BatchListObjectParentPaths +type BatchListObjectParentPaths struct { + _ struct{} `type:"structure"` + + // The maximum number of results to retrieve. + MaxResults *int64 `min:"1" type:"integer"` + + // The pagination token. + NextToken *string `type:"string"` + + // The reference that identifies the object whose attributes will be listed. + // + // ObjectReference is a required field + ObjectReference *ObjectReference `type:"structure" required:"true"` +} + +// String returns the string representation +func (s BatchListObjectParentPaths) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchListObjectParentPaths) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *BatchListObjectParentPaths) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "BatchListObjectParentPaths"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + if s.ObjectReference == nil { + invalidParams.Add(request.NewErrParamRequired("ObjectReference")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetMaxResults sets the MaxResults field's value. +func (s *BatchListObjectParentPaths) SetMaxResults(v int64) *BatchListObjectParentPaths { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *BatchListObjectParentPaths) SetNextToken(v string) *BatchListObjectParentPaths { + s.NextToken = &v + return s +} + +// SetObjectReference sets the ObjectReference field's value. +func (s *BatchListObjectParentPaths) SetObjectReference(v *ObjectReference) *BatchListObjectParentPaths { + s.ObjectReference = v + return s +} + +// Represents the output of a ListObjectParentPaths response operation. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/clouddirectory-2016-05-10/BatchListObjectParentPathsResponse +type BatchListObjectParentPathsResponse struct { + _ struct{} `type:"structure"` + + // The pagination token. + NextToken *string `type:"string"` + + // Returns the path to the ObjectIdentifiers that are associated with the directory. + PathToObjectIdentifiersList []*PathToObjectIdentifiers `type:"list"` +} + +// String returns the string representation +func (s BatchListObjectParentPathsResponse) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchListObjectParentPathsResponse) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *BatchListObjectParentPathsResponse) SetNextToken(v string) *BatchListObjectParentPathsResponse { + s.NextToken = &v + return s +} + +// SetPathToObjectIdentifiersList sets the PathToObjectIdentifiersList field's value. +func (s *BatchListObjectParentPathsResponse) SetPathToObjectIdentifiersList(v []*PathToObjectIdentifiers) *BatchListObjectParentPathsResponse { + s.PathToObjectIdentifiersList = v + return s +} + +// Returns policies attached to an object in pagination fashion inside a BatchRead +// operation. For more information, see ListObjectPolicies and BatchReadRequest$Operations. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/clouddirectory-2016-05-10/BatchListObjectPolicies +type BatchListObjectPolicies struct { + _ struct{} `type:"structure"` + + // The maximum number of results to retrieve. + MaxResults *int64 `min:"1" type:"integer"` + + // The pagination token. + NextToken *string `type:"string"` + + // The reference that identifies the object whose attributes will be listed. + // + // ObjectReference is a required field + ObjectReference *ObjectReference `type:"structure" required:"true"` +} + +// String returns the string representation +func (s BatchListObjectPolicies) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchListObjectPolicies) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *BatchListObjectPolicies) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "BatchListObjectPolicies"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + if s.ObjectReference == nil { + invalidParams.Add(request.NewErrParamRequired("ObjectReference")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetMaxResults sets the MaxResults field's value. +func (s *BatchListObjectPolicies) SetMaxResults(v int64) *BatchListObjectPolicies { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *BatchListObjectPolicies) SetNextToken(v string) *BatchListObjectPolicies { + s.NextToken = &v + return s +} + +// SetObjectReference sets the ObjectReference field's value. +func (s *BatchListObjectPolicies) SetObjectReference(v *ObjectReference) *BatchListObjectPolicies { + s.ObjectReference = v + return s +} + +// Represents the output of a ListObjectPolicies response operation. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/clouddirectory-2016-05-10/BatchListObjectPoliciesResponse +type BatchListObjectPoliciesResponse struct { + _ struct{} `type:"structure"` + + // A list of policy ObjectIdentifiers, that are attached to the object. + AttachedPolicyIds []*string `type:"list"` + + // The pagination token. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s BatchListObjectPoliciesResponse) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchListObjectPoliciesResponse) GoString() string { + return s.String() +} + +// SetAttachedPolicyIds sets the AttachedPolicyIds field's value. +func (s *BatchListObjectPoliciesResponse) SetAttachedPolicyIds(v []*string) *BatchListObjectPoliciesResponse { + s.AttachedPolicyIds = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *BatchListObjectPoliciesResponse) SetNextToken(v string) *BatchListObjectPoliciesResponse { + s.NextToken = &v + return s +} + +// Returns a paginated list of all the outgoing TypedLinkSpecifier information +// for an object inside a BatchRead operation. For more information, see ListOutgoingTypedLinks +// and BatchReadRequest$Operations. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/clouddirectory-2016-05-10/BatchListOutgoingTypedLinks +type BatchListOutgoingTypedLinks struct { + _ struct{} `type:"structure"` + + // Provides range filters for multiple attributes. When providing ranges to + // typed link selection, any inexact ranges must be specified at the end. Any + // attributes that do not have a range specified are presumed to match the entire + // range. + FilterAttributeRanges []*TypedLinkAttributeRange `type:"list"` + + // Filters are interpreted in the order of the attributes defined on the typed + // link facet, not the order they are supplied to any API calls. + FilterTypedLink *TypedLinkSchemaAndFacetName `type:"structure"` + + // The maximum number of results to retrieve. + MaxResults *int64 `min:"1" type:"integer"` + + // The pagination token. + NextToken *string `type:"string"` + + // The reference that identifies the object whose attributes will be listed. + // + // ObjectReference is a required field + ObjectReference *ObjectReference `type:"structure" required:"true"` +} + +// String returns the string representation +func (s BatchListOutgoingTypedLinks) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchListOutgoingTypedLinks) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *BatchListOutgoingTypedLinks) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "BatchListOutgoingTypedLinks"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + if s.ObjectReference == nil { + invalidParams.Add(request.NewErrParamRequired("ObjectReference")) + } + if s.FilterAttributeRanges != nil { + for i, v := range s.FilterAttributeRanges { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "FilterAttributeRanges", i), err.(request.ErrInvalidParams)) + } + } + } + if s.FilterTypedLink != nil { + if err := s.FilterTypedLink.Validate(); err != nil { + invalidParams.AddNested("FilterTypedLink", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFilterAttributeRanges sets the FilterAttributeRanges field's value. +func (s *BatchListOutgoingTypedLinks) SetFilterAttributeRanges(v []*TypedLinkAttributeRange) *BatchListOutgoingTypedLinks { + s.FilterAttributeRanges = v + return s +} + +// SetFilterTypedLink sets the FilterTypedLink field's value. +func (s *BatchListOutgoingTypedLinks) SetFilterTypedLink(v *TypedLinkSchemaAndFacetName) *BatchListOutgoingTypedLinks { + s.FilterTypedLink = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *BatchListOutgoingTypedLinks) SetMaxResults(v int64) *BatchListOutgoingTypedLinks { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *BatchListOutgoingTypedLinks) SetNextToken(v string) *BatchListOutgoingTypedLinks { + s.NextToken = &v + return s +} + +// SetObjectReference sets the ObjectReference field's value. +func (s *BatchListOutgoingTypedLinks) SetObjectReference(v *ObjectReference) *BatchListOutgoingTypedLinks { + s.ObjectReference = v + return s +} + +// Represents the output of a ListOutgoingTypedLinks response operation. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/clouddirectory-2016-05-10/BatchListOutgoingTypedLinksResponse +type BatchListOutgoingTypedLinksResponse struct { + _ struct{} `type:"structure"` + + // The pagination token. + NextToken *string `type:"string"` + + // Returns a typed link specifier as output. + TypedLinkSpecifiers []*TypedLinkSpecifier `type:"list"` +} + +// String returns the string representation +func (s BatchListOutgoingTypedLinksResponse) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchListOutgoingTypedLinksResponse) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *BatchListOutgoingTypedLinksResponse) SetNextToken(v string) *BatchListOutgoingTypedLinksResponse { + s.NextToken = &v + return s +} + +// SetTypedLinkSpecifiers sets the TypedLinkSpecifiers field's value. +func (s *BatchListOutgoingTypedLinksResponse) SetTypedLinkSpecifiers(v []*TypedLinkSpecifier) *BatchListOutgoingTypedLinksResponse { + s.TypedLinkSpecifiers = v + return s +} + +// Returns all of the ObjectIdentifiers to which a given policy is attached +// inside a BatchRead operation. For more information, see ListPolicyAttachments +// and BatchReadRequest$Operations. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/clouddirectory-2016-05-10/BatchListPolicyAttachments +type BatchListPolicyAttachments struct { + _ struct{} `type:"structure"` + + // The maximum number of results to retrieve. + MaxResults *int64 `min:"1" type:"integer"` + + // The pagination token. + NextToken *string `type:"string"` + + // The reference that identifies the policy object. + // + // PolicyReference is a required field + PolicyReference *ObjectReference `type:"structure" required:"true"` +} + +// String returns the string representation +func (s BatchListPolicyAttachments) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchListPolicyAttachments) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *BatchListPolicyAttachments) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "BatchListPolicyAttachments"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + if s.PolicyReference == nil { + invalidParams.Add(request.NewErrParamRequired("PolicyReference")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetMaxResults sets the MaxResults field's value. +func (s *BatchListPolicyAttachments) SetMaxResults(v int64) *BatchListPolicyAttachments { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *BatchListPolicyAttachments) SetNextToken(v string) *BatchListPolicyAttachments { + s.NextToken = &v + return s +} + +// SetPolicyReference sets the PolicyReference field's value. +func (s *BatchListPolicyAttachments) SetPolicyReference(v *ObjectReference) *BatchListPolicyAttachments { + s.PolicyReference = v + return s +} + +// Represents the output of a ListPolicyAttachments response operation. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/clouddirectory-2016-05-10/BatchListPolicyAttachmentsResponse +type BatchListPolicyAttachmentsResponse struct { + _ struct{} `type:"structure"` + + // The pagination token. + NextToken *string `type:"string"` + + // A list of ObjectIdentifiers to which the policy is attached. + ObjectIdentifiers []*string `type:"list"` +} + +// String returns the string representation +func (s BatchListPolicyAttachmentsResponse) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchListPolicyAttachmentsResponse) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *BatchListPolicyAttachmentsResponse) SetNextToken(v string) *BatchListPolicyAttachmentsResponse { + s.NextToken = &v + return s +} + +// SetObjectIdentifiers sets the ObjectIdentifiers field's value. +func (s *BatchListPolicyAttachmentsResponse) SetObjectIdentifiers(v []*string) *BatchListPolicyAttachmentsResponse { + s.ObjectIdentifiers = v + return s +} + +// Lists all policies from the root of the Directory to the object specified +// inside a BatchRead operation. For more information, see LookupPolicy and +// BatchReadRequest$Operations. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/clouddirectory-2016-05-10/BatchLookupPolicy +type BatchLookupPolicy struct { + _ struct{} `type:"structure"` + + // The maximum number of results to retrieve. + MaxResults *int64 `min:"1" type:"integer"` + + // The pagination token. + NextToken *string `type:"string"` + + // Reference that identifies the object whose policies will be looked up. + // + // ObjectReference is a required field + ObjectReference *ObjectReference `type:"structure" required:"true"` +} + +// String returns the string representation +func (s BatchLookupPolicy) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchLookupPolicy) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *BatchLookupPolicy) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "BatchLookupPolicy"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + if s.ObjectReference == nil { + invalidParams.Add(request.NewErrParamRequired("ObjectReference")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetMaxResults sets the MaxResults field's value. +func (s *BatchLookupPolicy) SetMaxResults(v int64) *BatchLookupPolicy { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *BatchLookupPolicy) SetNextToken(v string) *BatchLookupPolicy { + s.NextToken = &v + return s +} + +// SetObjectReference sets the ObjectReference field's value. +func (s *BatchLookupPolicy) SetObjectReference(v *ObjectReference) *BatchLookupPolicy { + s.ObjectReference = v + return s +} + +// Represents the output of a LookupPolicy response operation. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/clouddirectory-2016-05-10/BatchLookupPolicyResponse +type BatchLookupPolicyResponse struct { + _ struct{} `type:"structure"` + + // The pagination token. + NextToken *string `type:"string"` + + // Provides list of path to policies. Policies contain PolicyId, ObjectIdentifier, + // and PolicyType. For more information, see Policies (http://docs.aws.amazon.com/directoryservice/latest/admin-guide/cd_key_concepts.html#policies). + PolicyToPathList []*PolicyToPath `type:"list"` +} + +// String returns the string representation +func (s BatchLookupPolicyResponse) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchLookupPolicyResponse) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *BatchLookupPolicyResponse) SetNextToken(v string) *BatchLookupPolicyResponse { + s.NextToken = &v + return s +} + +// SetPolicyToPathList sets the PolicyToPathList field's value. +func (s *BatchLookupPolicyResponse) SetPolicyToPathList(v []*PolicyToPath) *BatchLookupPolicyResponse { + s.PolicyToPathList = v + return s +} + // The batch read exception structure, which contains the exception type and // message. // Please also see https://docs.aws.amazon.com/goto/WebAPI/clouddirectory-2016-05-10/BatchReadException @@ -9479,12 +10972,51 @@ func (s *BatchReadInput) SetOperations(v []*BatchReadOperation) *BatchReadInput type BatchReadOperation struct { _ struct{} `type:"structure"` + // Retrieves metadata about an object. + GetObjectInformation *BatchGetObjectInformation `type:"structure"` + + // Lists indices attached to an object. + ListAttachedIndices *BatchListAttachedIndices `type:"structure"` + + // Returns a paginated list of all the incoming TypedLinkSpecifier information + // for an object. It also supports filtering by typed link facet and identity + // attributes. For more information, see Typed link (http://docs.aws.amazon.com/directoryservice/latest/admin-guide/objectsandlinks.html#typedlink). + ListIncomingTypedLinks *BatchListIncomingTypedLinks `type:"structure"` + + // Lists objects attached to the specified index. + ListIndex *BatchListIndex `type:"structure"` + // Lists all attributes that are associated with an object. ListObjectAttributes *BatchListObjectAttributes `type:"structure"` // Returns a paginated list of child objects that are associated with a given // object. ListObjectChildren *BatchListObjectChildren `type:"structure"` + + // Retrieves all available parent paths for any object type such as node, leaf + // node, policy node, and index node objects. For more information about objects, + // see Directory Structure (http://docs.aws.amazon.com/directoryservice/latest/admin-guide/cd_key_concepts.html#dirstructure). + ListObjectParentPaths *BatchListObjectParentPaths `type:"structure"` + + // Returns policies attached to an object in pagination fashion. + ListObjectPolicies *BatchListObjectPolicies `type:"structure"` + + // Returns a paginated list of all the outgoing TypedLinkSpecifier information + // for an object. It also supports filtering by typed link facet and identity + // attributes. For more information, see Typed link (http://docs.aws.amazon.com/directoryservice/latest/admin-guide/objectsandlinks.html#typedlink). + ListOutgoingTypedLinks *BatchListOutgoingTypedLinks `type:"structure"` + + // Returns all of the ObjectIdentifiers to which a given policy is attached. + ListPolicyAttachments *BatchListPolicyAttachments `type:"structure"` + + // Lists all policies from the root of the Directory to the object specified. + // If there are no policies present, an empty list is returned. If policies + // are present, and if some objects don't have the policies attached, it returns + // the ObjectIdentifier for such objects. If policies are present, it returns + // ObjectIdentifier, policyId, and policyType. Paths that don't lead to the + // root from the target object are ignored. For more information, see Policies + // (http://docs.aws.amazon.com/directoryservice/latest/admin-guide/cd_key_concepts.html#policies). + LookupPolicy *BatchLookupPolicy `type:"structure"` } // String returns the string representation @@ -9500,6 +11032,26 @@ func (s BatchReadOperation) GoString() string { // Validate inspects the fields of the type to determine if they are valid. func (s *BatchReadOperation) Validate() error { invalidParams := request.ErrInvalidParams{Context: "BatchReadOperation"} + if s.GetObjectInformation != nil { + if err := s.GetObjectInformation.Validate(); err != nil { + invalidParams.AddNested("GetObjectInformation", err.(request.ErrInvalidParams)) + } + } + if s.ListAttachedIndices != nil { + if err := s.ListAttachedIndices.Validate(); err != nil { + invalidParams.AddNested("ListAttachedIndices", err.(request.ErrInvalidParams)) + } + } + if s.ListIncomingTypedLinks != nil { + if err := s.ListIncomingTypedLinks.Validate(); err != nil { + invalidParams.AddNested("ListIncomingTypedLinks", err.(request.ErrInvalidParams)) + } + } + if s.ListIndex != nil { + if err := s.ListIndex.Validate(); err != nil { + invalidParams.AddNested("ListIndex", err.(request.ErrInvalidParams)) + } + } if s.ListObjectAttributes != nil { if err := s.ListObjectAttributes.Validate(); err != nil { invalidParams.AddNested("ListObjectAttributes", err.(request.ErrInvalidParams)) @@ -9510,6 +11062,31 @@ func (s *BatchReadOperation) Validate() error { invalidParams.AddNested("ListObjectChildren", err.(request.ErrInvalidParams)) } } + if s.ListObjectParentPaths != nil { + if err := s.ListObjectParentPaths.Validate(); err != nil { + invalidParams.AddNested("ListObjectParentPaths", err.(request.ErrInvalidParams)) + } + } + if s.ListObjectPolicies != nil { + if err := s.ListObjectPolicies.Validate(); err != nil { + invalidParams.AddNested("ListObjectPolicies", err.(request.ErrInvalidParams)) + } + } + if s.ListOutgoingTypedLinks != nil { + if err := s.ListOutgoingTypedLinks.Validate(); err != nil { + invalidParams.AddNested("ListOutgoingTypedLinks", err.(request.ErrInvalidParams)) + } + } + if s.ListPolicyAttachments != nil { + if err := s.ListPolicyAttachments.Validate(); err != nil { + invalidParams.AddNested("ListPolicyAttachments", err.(request.ErrInvalidParams)) + } + } + if s.LookupPolicy != nil { + if err := s.LookupPolicy.Validate(); err != nil { + invalidParams.AddNested("LookupPolicy", err.(request.ErrInvalidParams)) + } + } if invalidParams.Len() > 0 { return invalidParams @@ -9517,6 +11094,30 @@ func (s *BatchReadOperation) Validate() error { return nil } +// SetGetObjectInformation sets the GetObjectInformation field's value. +func (s *BatchReadOperation) SetGetObjectInformation(v *BatchGetObjectInformation) *BatchReadOperation { + s.GetObjectInformation = v + return s +} + +// SetListAttachedIndices sets the ListAttachedIndices field's value. +func (s *BatchReadOperation) SetListAttachedIndices(v *BatchListAttachedIndices) *BatchReadOperation { + s.ListAttachedIndices = v + return s +} + +// SetListIncomingTypedLinks sets the ListIncomingTypedLinks field's value. +func (s *BatchReadOperation) SetListIncomingTypedLinks(v *BatchListIncomingTypedLinks) *BatchReadOperation { + s.ListIncomingTypedLinks = v + return s +} + +// SetListIndex sets the ListIndex field's value. +func (s *BatchReadOperation) SetListIndex(v *BatchListIndex) *BatchReadOperation { + s.ListIndex = v + return s +} + // SetListObjectAttributes sets the ListObjectAttributes field's value. func (s *BatchReadOperation) SetListObjectAttributes(v *BatchListObjectAttributes) *BatchReadOperation { s.ListObjectAttributes = v @@ -9529,6 +11130,36 @@ func (s *BatchReadOperation) SetListObjectChildren(v *BatchListObjectChildren) * return s } +// SetListObjectParentPaths sets the ListObjectParentPaths field's value. +func (s *BatchReadOperation) SetListObjectParentPaths(v *BatchListObjectParentPaths) *BatchReadOperation { + s.ListObjectParentPaths = v + return s +} + +// SetListObjectPolicies sets the ListObjectPolicies field's value. +func (s *BatchReadOperation) SetListObjectPolicies(v *BatchListObjectPolicies) *BatchReadOperation { + s.ListObjectPolicies = v + return s +} + +// SetListOutgoingTypedLinks sets the ListOutgoingTypedLinks field's value. +func (s *BatchReadOperation) SetListOutgoingTypedLinks(v *BatchListOutgoingTypedLinks) *BatchReadOperation { + s.ListOutgoingTypedLinks = v + return s +} + +// SetListPolicyAttachments sets the ListPolicyAttachments field's value. +func (s *BatchReadOperation) SetListPolicyAttachments(v *BatchListPolicyAttachments) *BatchReadOperation { + s.ListPolicyAttachments = v + return s +} + +// SetLookupPolicy sets the LookupPolicy field's value. +func (s *BatchReadOperation) SetLookupPolicy(v *BatchLookupPolicy) *BatchReadOperation { + s.LookupPolicy = v + return s +} + // Represents the output of a BatchRead response operation. // Please also see https://docs.aws.amazon.com/goto/WebAPI/clouddirectory-2016-05-10/BatchReadOperationResponse type BatchReadOperationResponse struct { @@ -9592,12 +11223,51 @@ func (s *BatchReadOutput) SetResponses(v []*BatchReadOperationResponse) *BatchRe type BatchReadSuccessfulResponse struct { _ struct{} `type:"structure"` + // Retrieves metadata about an object. + GetObjectInformation *BatchGetObjectInformationResponse `type:"structure"` + + // Lists indices attached to an object. + ListAttachedIndices *BatchListAttachedIndicesResponse `type:"structure"` + + // Returns a paginated list of all the incoming TypedLinkSpecifier information + // for an object. It also supports filtering by typed link facet and identity + // attributes. For more information, see Typed link (http://docs.aws.amazon.com/directoryservice/latest/admin-guide/objectsandlinks.html#typedlink). + ListIncomingTypedLinks *BatchListIncomingTypedLinksResponse `type:"structure"` + + // Lists objects attached to the specified index. + ListIndex *BatchListIndexResponse `type:"structure"` + // Lists all attributes that are associated with an object. ListObjectAttributes *BatchListObjectAttributesResponse `type:"structure"` // Returns a paginated list of child objects that are associated with a given // object. ListObjectChildren *BatchListObjectChildrenResponse `type:"structure"` + + // Retrieves all available parent paths for any object type such as node, leaf + // node, policy node, and index node objects. For more information about objects, + // see Directory Structure (http://docs.aws.amazon.com/directoryservice/latest/admin-guide/cd_key_concepts.html#dirstructure). + ListObjectParentPaths *BatchListObjectParentPathsResponse `type:"structure"` + + // Returns policies attached to an object in pagination fashion. + ListObjectPolicies *BatchListObjectPoliciesResponse `type:"structure"` + + // Returns a paginated list of all the outgoing TypedLinkSpecifier information + // for an object. It also supports filtering by typed link facet and identity + // attributes. For more information, see Typed link (http://docs.aws.amazon.com/directoryservice/latest/admin-guide/objectsandlinks.html#typedlink). + ListOutgoingTypedLinks *BatchListOutgoingTypedLinksResponse `type:"structure"` + + // Returns all of the ObjectIdentifiers to which a given policy is attached. + ListPolicyAttachments *BatchListPolicyAttachmentsResponse `type:"structure"` + + // Lists all policies from the root of the Directory to the object specified. + // If there are no policies present, an empty list is returned. If policies + // are present, and if some objects don't have the policies attached, it returns + // the ObjectIdentifier for such objects. If policies are present, it returns + // ObjectIdentifier, policyId, and policyType. Paths that don't lead to the + // root from the target object are ignored. For more information, see Policies + // (http://docs.aws.amazon.com/directoryservice/latest/admin-guide/cd_key_concepts.html#policies). + LookupPolicy *BatchLookupPolicyResponse `type:"structure"` } // String returns the string representation @@ -9610,6 +11280,30 @@ func (s BatchReadSuccessfulResponse) GoString() string { return s.String() } +// SetGetObjectInformation sets the GetObjectInformation field's value. +func (s *BatchReadSuccessfulResponse) SetGetObjectInformation(v *BatchGetObjectInformationResponse) *BatchReadSuccessfulResponse { + s.GetObjectInformation = v + return s +} + +// SetListAttachedIndices sets the ListAttachedIndices field's value. +func (s *BatchReadSuccessfulResponse) SetListAttachedIndices(v *BatchListAttachedIndicesResponse) *BatchReadSuccessfulResponse { + s.ListAttachedIndices = v + return s +} + +// SetListIncomingTypedLinks sets the ListIncomingTypedLinks field's value. +func (s *BatchReadSuccessfulResponse) SetListIncomingTypedLinks(v *BatchListIncomingTypedLinksResponse) *BatchReadSuccessfulResponse { + s.ListIncomingTypedLinks = v + return s +} + +// SetListIndex sets the ListIndex field's value. +func (s *BatchReadSuccessfulResponse) SetListIndex(v *BatchListIndexResponse) *BatchReadSuccessfulResponse { + s.ListIndex = v + return s +} + // SetListObjectAttributes sets the ListObjectAttributes field's value. func (s *BatchReadSuccessfulResponse) SetListObjectAttributes(v *BatchListObjectAttributesResponse) *BatchReadSuccessfulResponse { s.ListObjectAttributes = v @@ -9622,6 +11316,36 @@ func (s *BatchReadSuccessfulResponse) SetListObjectChildren(v *BatchListObjectCh return s } +// SetListObjectParentPaths sets the ListObjectParentPaths field's value. +func (s *BatchReadSuccessfulResponse) SetListObjectParentPaths(v *BatchListObjectParentPathsResponse) *BatchReadSuccessfulResponse { + s.ListObjectParentPaths = v + return s +} + +// SetListObjectPolicies sets the ListObjectPolicies field's value. +func (s *BatchReadSuccessfulResponse) SetListObjectPolicies(v *BatchListObjectPoliciesResponse) *BatchReadSuccessfulResponse { + s.ListObjectPolicies = v + return s +} + +// SetListOutgoingTypedLinks sets the ListOutgoingTypedLinks field's value. +func (s *BatchReadSuccessfulResponse) SetListOutgoingTypedLinks(v *BatchListOutgoingTypedLinksResponse) *BatchReadSuccessfulResponse { + s.ListOutgoingTypedLinks = v + return s +} + +// SetListPolicyAttachments sets the ListPolicyAttachments field's value. +func (s *BatchReadSuccessfulResponse) SetListPolicyAttachments(v *BatchListPolicyAttachmentsResponse) *BatchReadSuccessfulResponse { + s.ListPolicyAttachments = v + return s +} + +// SetLookupPolicy sets the LookupPolicy field's value. +func (s *BatchReadSuccessfulResponse) SetLookupPolicy(v *BatchLookupPolicyResponse) *BatchReadSuccessfulResponse { + s.LookupPolicy = v + return s +} + // A batch operation to remove a facet from an object. // Please also see https://docs.aws.amazon.com/goto/WebAPI/clouddirectory-2016-05-10/BatchRemoveFacetFromObject type BatchRemoveFacetFromObject struct { @@ -9861,15 +11585,37 @@ type BatchWriteOperation struct { // Attaches an object to a Directory. AttachObject *BatchAttachObject `type:"structure"` + // Attaches a policy object to a regular object. An object can have a limited + // number of attached policies. + AttachPolicy *BatchAttachPolicy `type:"structure"` + + // Attaches the specified object to the specified index. + AttachToIndex *BatchAttachToIndex `type:"structure"` + + // Attaches a typed link to a specified source and target object. For more information, + // see Typed link (http://docs.aws.amazon.com/directoryservice/latest/admin-guide/objectsandlinks.html#typedlink). + AttachTypedLink *BatchAttachTypedLink `type:"structure"` + + // Creates an index object. See Indexing (http://docs.aws.amazon.com/directoryservice/latest/admin-guide/cd_indexing.html) + // for more information. + CreateIndex *BatchCreateIndex `type:"structure"` + // Creates an object. CreateObject *BatchCreateObject `type:"structure"` // Deletes an object in a Directory. DeleteObject *BatchDeleteObject `type:"structure"` + // Detaches the specified object from the specified index. + DetachFromIndex *BatchDetachFromIndex `type:"structure"` + // Detaches an object from a Directory. DetachObject *BatchDetachObject `type:"structure"` + // Detaches a typed link from a specified source and target object. For more + // information, see Typed link (http://docs.aws.amazon.com/directoryservice/latest/admin-guide/objectsandlinks.html#typedlink). + DetachTypedLink *BatchDetachTypedLink `type:"structure"` + // A batch operation that removes a facet from an object. RemoveFacetFromObject *BatchRemoveFacetFromObject `type:"structure"` @@ -9900,6 +11646,26 @@ func (s *BatchWriteOperation) Validate() error { invalidParams.AddNested("AttachObject", err.(request.ErrInvalidParams)) } } + if s.AttachPolicy != nil { + if err := s.AttachPolicy.Validate(); err != nil { + invalidParams.AddNested("AttachPolicy", err.(request.ErrInvalidParams)) + } + } + if s.AttachToIndex != nil { + if err := s.AttachToIndex.Validate(); err != nil { + invalidParams.AddNested("AttachToIndex", err.(request.ErrInvalidParams)) + } + } + if s.AttachTypedLink != nil { + if err := s.AttachTypedLink.Validate(); err != nil { + invalidParams.AddNested("AttachTypedLink", err.(request.ErrInvalidParams)) + } + } + if s.CreateIndex != nil { + if err := s.CreateIndex.Validate(); err != nil { + invalidParams.AddNested("CreateIndex", err.(request.ErrInvalidParams)) + } + } if s.CreateObject != nil { if err := s.CreateObject.Validate(); err != nil { invalidParams.AddNested("CreateObject", err.(request.ErrInvalidParams)) @@ -9910,11 +11676,21 @@ func (s *BatchWriteOperation) Validate() error { invalidParams.AddNested("DeleteObject", err.(request.ErrInvalidParams)) } } + if s.DetachFromIndex != nil { + if err := s.DetachFromIndex.Validate(); err != nil { + invalidParams.AddNested("DetachFromIndex", err.(request.ErrInvalidParams)) + } + } if s.DetachObject != nil { if err := s.DetachObject.Validate(); err != nil { invalidParams.AddNested("DetachObject", err.(request.ErrInvalidParams)) } } + if s.DetachTypedLink != nil { + if err := s.DetachTypedLink.Validate(); err != nil { + invalidParams.AddNested("DetachTypedLink", err.(request.ErrInvalidParams)) + } + } if s.RemoveFacetFromObject != nil { if err := s.RemoveFacetFromObject.Validate(); err != nil { invalidParams.AddNested("RemoveFacetFromObject", err.(request.ErrInvalidParams)) @@ -9944,6 +11720,30 @@ func (s *BatchWriteOperation) SetAttachObject(v *BatchAttachObject) *BatchWriteO return s } +// SetAttachPolicy sets the AttachPolicy field's value. +func (s *BatchWriteOperation) SetAttachPolicy(v *BatchAttachPolicy) *BatchWriteOperation { + s.AttachPolicy = v + return s +} + +// SetAttachToIndex sets the AttachToIndex field's value. +func (s *BatchWriteOperation) SetAttachToIndex(v *BatchAttachToIndex) *BatchWriteOperation { + s.AttachToIndex = v + return s +} + +// SetAttachTypedLink sets the AttachTypedLink field's value. +func (s *BatchWriteOperation) SetAttachTypedLink(v *BatchAttachTypedLink) *BatchWriteOperation { + s.AttachTypedLink = v + return s +} + +// SetCreateIndex sets the CreateIndex field's value. +func (s *BatchWriteOperation) SetCreateIndex(v *BatchCreateIndex) *BatchWriteOperation { + s.CreateIndex = v + return s +} + // SetCreateObject sets the CreateObject field's value. func (s *BatchWriteOperation) SetCreateObject(v *BatchCreateObject) *BatchWriteOperation { s.CreateObject = v @@ -9956,12 +11756,24 @@ func (s *BatchWriteOperation) SetDeleteObject(v *BatchDeleteObject) *BatchWriteO return s } +// SetDetachFromIndex sets the DetachFromIndex field's value. +func (s *BatchWriteOperation) SetDetachFromIndex(v *BatchDetachFromIndex) *BatchWriteOperation { + s.DetachFromIndex = v + return s +} + // SetDetachObject sets the DetachObject field's value. func (s *BatchWriteOperation) SetDetachObject(v *BatchDetachObject) *BatchWriteOperation { s.DetachObject = v return s } +// SetDetachTypedLink sets the DetachTypedLink field's value. +func (s *BatchWriteOperation) SetDetachTypedLink(v *BatchDetachTypedLink) *BatchWriteOperation { + s.DetachTypedLink = v + return s +} + // SetRemoveFacetFromObject sets the RemoveFacetFromObject field's value. func (s *BatchWriteOperation) SetRemoveFacetFromObject(v *BatchRemoveFacetFromObject) *BatchWriteOperation { s.RemoveFacetFromObject = v @@ -9985,15 +11797,37 @@ type BatchWriteOperationResponse struct { // Attaches an object to a Directory. AttachObject *BatchAttachObjectResponse `type:"structure"` + // Attaches a policy object to a regular object. An object can have a limited + // number of attached policies. + AttachPolicy *BatchAttachPolicyResponse `type:"structure"` + + // Attaches the specified object to the specified index. + AttachToIndex *BatchAttachToIndexResponse `type:"structure"` + + // Attaches a typed link to a specified source and target object. For more information, + // see Typed link (http://docs.aws.amazon.com/directoryservice/latest/admin-guide/objectsandlinks.html#typedlink). + AttachTypedLink *BatchAttachTypedLinkResponse `type:"structure"` + + // Creates an index object. See Indexing (http://docs.aws.amazon.com/directoryservice/latest/admin-guide/cd_indexing.html) + // for more information. + CreateIndex *BatchCreateIndexResponse `type:"structure"` + // Creates an object in a Directory. CreateObject *BatchCreateObjectResponse `type:"structure"` // Deletes an object in a Directory. DeleteObject *BatchDeleteObjectResponse `type:"structure"` + // Detaches the specified object from the specified index. + DetachFromIndex *BatchDetachFromIndexResponse `type:"structure"` + // Detaches an object from a Directory. DetachObject *BatchDetachObjectResponse `type:"structure"` + // Detaches a typed link from a specified source and target object. For more + // information, see Typed link (http://docs.aws.amazon.com/directoryservice/latest/admin-guide/objectsandlinks.html#typedlink). + DetachTypedLink *BatchDetachTypedLinkResponse `type:"structure"` + // The result of a batch remove facet from object operation. RemoveFacetFromObject *BatchRemoveFacetFromObjectResponse `type:"structure"` @@ -10023,6 +11857,30 @@ func (s *BatchWriteOperationResponse) SetAttachObject(v *BatchAttachObjectRespon return s } +// SetAttachPolicy sets the AttachPolicy field's value. +func (s *BatchWriteOperationResponse) SetAttachPolicy(v *BatchAttachPolicyResponse) *BatchWriteOperationResponse { + s.AttachPolicy = v + return s +} + +// SetAttachToIndex sets the AttachToIndex field's value. +func (s *BatchWriteOperationResponse) SetAttachToIndex(v *BatchAttachToIndexResponse) *BatchWriteOperationResponse { + s.AttachToIndex = v + return s +} + +// SetAttachTypedLink sets the AttachTypedLink field's value. +func (s *BatchWriteOperationResponse) SetAttachTypedLink(v *BatchAttachTypedLinkResponse) *BatchWriteOperationResponse { + s.AttachTypedLink = v + return s +} + +// SetCreateIndex sets the CreateIndex field's value. +func (s *BatchWriteOperationResponse) SetCreateIndex(v *BatchCreateIndexResponse) *BatchWriteOperationResponse { + s.CreateIndex = v + return s +} + // SetCreateObject sets the CreateObject field's value. func (s *BatchWriteOperationResponse) SetCreateObject(v *BatchCreateObjectResponse) *BatchWriteOperationResponse { s.CreateObject = v @@ -10035,12 +11893,24 @@ func (s *BatchWriteOperationResponse) SetDeleteObject(v *BatchDeleteObjectRespon return s } +// SetDetachFromIndex sets the DetachFromIndex field's value. +func (s *BatchWriteOperationResponse) SetDetachFromIndex(v *BatchDetachFromIndexResponse) *BatchWriteOperationResponse { + s.DetachFromIndex = v + return s +} + // SetDetachObject sets the DetachObject field's value. func (s *BatchWriteOperationResponse) SetDetachObject(v *BatchDetachObjectResponse) *BatchWriteOperationResponse { s.DetachObject = v return s } +// SetDetachTypedLink sets the DetachTypedLink field's value. +func (s *BatchWriteOperationResponse) SetDetachTypedLink(v *BatchDetachTypedLinkResponse) *BatchWriteOperationResponse { + s.DetachTypedLink = v + return s +} + // SetRemoveFacetFromObject sets the RemoveFacetFromObject field's value. func (s *BatchWriteOperationResponse) SetRemoveFacetFromObject(v *BatchRemoveFacetFromObjectResponse) *BatchWriteOperationResponse { s.RemoveFacetFromObject = v @@ -12419,7 +14289,7 @@ type ListAttachedIndicesInput struct { // The pagination token. NextToken *string `type:"string"` - // A reference to the object to that has indices attached. + // A reference to the object that has indices attached. // // TargetReference is a required field TargetReference *ObjectReference `type:"structure" required:"true"` @@ -15588,7 +17458,7 @@ type TypedLinkFacet struct { // The set of attributes that distinguish links made from this facet from each // other, in the order of significance. Listing typed links can filter on the - // values of these attributes. See ListOutgoingTypedLinks and ListIncomingTypeLinks + // values of these attributes. See ListOutgoingTypedLinks and ListIncomingTypedLinks // for details. // // IdentityAttributeOrder is a required field @@ -16362,6 +18232,27 @@ const ( // BatchReadExceptionTypeNotNodeException is a BatchReadExceptionType enum value BatchReadExceptionTypeNotNodeException = "NotNodeException" + + // BatchReadExceptionTypeFacetValidationException is a BatchReadExceptionType enum value + BatchReadExceptionTypeFacetValidationException = "FacetValidationException" + + // BatchReadExceptionTypeCannotListParentOfRootException is a BatchReadExceptionType enum value + BatchReadExceptionTypeCannotListParentOfRootException = "CannotListParentOfRootException" + + // BatchReadExceptionTypeNotIndexException is a BatchReadExceptionType enum value + BatchReadExceptionTypeNotIndexException = "NotIndexException" + + // BatchReadExceptionTypeNotPolicyException is a BatchReadExceptionType enum value + BatchReadExceptionTypeNotPolicyException = "NotPolicyException" + + // BatchReadExceptionTypeDirectoryNotEnabledException is a BatchReadExceptionType enum value + BatchReadExceptionTypeDirectoryNotEnabledException = "DirectoryNotEnabledException" + + // BatchReadExceptionTypeLimitExceededException is a BatchReadExceptionType enum value + BatchReadExceptionTypeLimitExceededException = "LimitExceededException" + + // BatchReadExceptionTypeInternalServiceException is a BatchReadExceptionType enum value + BatchReadExceptionTypeInternalServiceException = "InternalServiceException" ) const ( @@ -16391,6 +18282,30 @@ const ( // BatchWriteExceptionTypeAccessDeniedException is a BatchWriteExceptionType enum value BatchWriteExceptionTypeAccessDeniedException = "AccessDeniedException" + + // BatchWriteExceptionTypeInvalidAttachmentException is a BatchWriteExceptionType enum value + BatchWriteExceptionTypeInvalidAttachmentException = "InvalidAttachmentException" + + // BatchWriteExceptionTypeNotIndexException is a BatchWriteExceptionType enum value + BatchWriteExceptionTypeNotIndexException = "NotIndexException" + + // BatchWriteExceptionTypeIndexedAttributeMissingException is a BatchWriteExceptionType enum value + BatchWriteExceptionTypeIndexedAttributeMissingException = "IndexedAttributeMissingException" + + // BatchWriteExceptionTypeObjectAlreadyDetachedException is a BatchWriteExceptionType enum value + BatchWriteExceptionTypeObjectAlreadyDetachedException = "ObjectAlreadyDetachedException" + + // BatchWriteExceptionTypeNotPolicyException is a BatchWriteExceptionType enum value + BatchWriteExceptionTypeNotPolicyException = "NotPolicyException" + + // BatchWriteExceptionTypeDirectoryNotEnabledException is a BatchWriteExceptionType enum value + BatchWriteExceptionTypeDirectoryNotEnabledException = "DirectoryNotEnabledException" + + // BatchWriteExceptionTypeLimitExceededException is a BatchWriteExceptionType enum value + BatchWriteExceptionTypeLimitExceededException = "LimitExceededException" + + // BatchWriteExceptionTypeUnsupportedIndexTypeException is a BatchWriteExceptionType enum value + BatchWriteExceptionTypeUnsupportedIndexTypeException = "UnsupportedIndexTypeException" ) const ( diff --git a/cli/vendor/github.com/aws/aws-sdk-go/service/cloudformation/api.go b/cli/vendor/github.com/aws/aws-sdk-go/service/cloudformation/api.go index 8b3470774..c62c63765 100644 --- a/cli/vendor/github.com/aws/aws-sdk-go/service/cloudformation/api.go +++ b/cli/vendor/github.com/aws/aws-sdk-go/service/cloudformation/api.go @@ -3,6 +3,7 @@ package cloudformation import ( + "fmt" "time" "github.com/aws/aws-sdk-go/aws" @@ -265,14 +266,14 @@ func (c *CloudFormation) CreateChangeSetRequest(input *CreateChangeSetInput) (re // // Returned Error Codes: // * ErrCodeAlreadyExistsException "AlreadyExistsException" -// Resource with the name requested already exists. +// The resource with the name requested already exists. // // * ErrCodeInsufficientCapabilitiesException "InsufficientCapabilitiesException" -// The template contains resources with capabilities that were not specified +// The template contains resources with capabilities that weren't specified // in the Capabilities parameter. // // * ErrCodeLimitExceededException "LimitExceededException" -// Quota for the resource has already been reached. +// The quota for the resource has already been reached. // // Please also see https://docs.aws.amazon.com/goto/WebAPI/cloudformation-2010-05-15/CreateChangeSet func (c *CloudFormation) CreateChangeSet(input *CreateChangeSetInput) (*CreateChangeSetOutput, error) { @@ -354,16 +355,16 @@ func (c *CloudFormation) CreateStackRequest(input *CreateStackInput) (req *reque // // Returned Error Codes: // * ErrCodeLimitExceededException "LimitExceededException" -// Quota for the resource has already been reached. +// The quota for the resource has already been reached. // // * ErrCodeAlreadyExistsException "AlreadyExistsException" -// Resource with the name requested already exists. +// The resource with the name requested already exists. // // * ErrCodeTokenAlreadyExistsException "TokenAlreadyExistsException" // A client request token already exists. // // * ErrCodeInsufficientCapabilitiesException "InsufficientCapabilitiesException" -// The template contains resources with capabilities that were not specified +// The template contains resources with capabilities that weren't specified // in the Capabilities parameter. // // Please also see https://docs.aws.amazon.com/goto/WebAPI/cloudformation-2010-05-15/CreateStack @@ -388,6 +389,192 @@ func (c *CloudFormation) CreateStackWithContext(ctx aws.Context, input *CreateSt return out, req.Send() } +const opCreateStackInstances = "CreateStackInstances" + +// CreateStackInstancesRequest generates a "aws/request.Request" representing the +// client's request for the CreateStackInstances operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See CreateStackInstances for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateStackInstances method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateStackInstancesRequest method. +// req, resp := client.CreateStackInstancesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/cloudformation-2010-05-15/CreateStackInstances +func (c *CloudFormation) CreateStackInstancesRequest(input *CreateStackInstancesInput) (req *request.Request, output *CreateStackInstancesOutput) { + op := &request.Operation{ + Name: opCreateStackInstances, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateStackInstancesInput{} + } + + output = &CreateStackInstancesOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateStackInstances API operation for AWS CloudFormation. +// +// Creates stack instances for the specified accounts, within the specified +// regions. A stack instance refers to a stack in a specific account and region. +// Accounts and Regions are required parameters—you must specify at least one +// account and one region. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS CloudFormation's +// API operation CreateStackInstances for usage and error information. +// +// Returned Error Codes: +// * ErrCodeStackSetNotFoundException "StackSetNotFoundException" +// The specified stack set doesn't exist. +// +// * ErrCodeOperationInProgressException "OperationInProgressException" +// Another operation is currently in progress for this stack set. Only one operation +// can be performed for a stack set at a given time. +// +// * ErrCodeOperationIdAlreadyExistsException "OperationIdAlreadyExistsException" +// The specified operation ID already exists. +// +// * ErrCodeStaleRequestException "StaleRequestException" +// Another operation has been performed on this stack set since the specified +// operation was performed. +// +// * ErrCodeInvalidOperationException "InvalidOperationException" +// The specified operation isn't valid. +// +// * ErrCodeLimitExceededException "LimitExceededException" +// The quota for the resource has already been reached. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/cloudformation-2010-05-15/CreateStackInstances +func (c *CloudFormation) CreateStackInstances(input *CreateStackInstancesInput) (*CreateStackInstancesOutput, error) { + req, out := c.CreateStackInstancesRequest(input) + return out, req.Send() +} + +// CreateStackInstancesWithContext is the same as CreateStackInstances with the addition of +// the ability to pass a context and additional request options. +// +// See CreateStackInstances for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudFormation) CreateStackInstancesWithContext(ctx aws.Context, input *CreateStackInstancesInput, opts ...request.Option) (*CreateStackInstancesOutput, error) { + req, out := c.CreateStackInstancesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateStackSet = "CreateStackSet" + +// CreateStackSetRequest generates a "aws/request.Request" representing the +// client's request for the CreateStackSet operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See CreateStackSet for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateStackSet method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateStackSetRequest method. +// req, resp := client.CreateStackSetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/cloudformation-2010-05-15/CreateStackSet +func (c *CloudFormation) CreateStackSetRequest(input *CreateStackSetInput) (req *request.Request, output *CreateStackSetOutput) { + op := &request.Operation{ + Name: opCreateStackSet, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateStackSetInput{} + } + + output = &CreateStackSetOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateStackSet API operation for AWS CloudFormation. +// +// Creates a stack set. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS CloudFormation's +// API operation CreateStackSet for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNameAlreadyExistsException "NameAlreadyExistsException" +// The specified name is already in use. +// +// * ErrCodeCreatedButModifiedException "CreatedButModifiedException" +// The specified resource exists, but has been changed. +// +// * ErrCodeLimitExceededException "LimitExceededException" +// The quota for the resource has already been reached. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/cloudformation-2010-05-15/CreateStackSet +func (c *CloudFormation) CreateStackSet(input *CreateStackSetInput) (*CreateStackSetOutput, error) { + req, out := c.CreateStackSetRequest(input) + return out, req.Send() +} + +// CreateStackSetWithContext is the same as CreateStackSet with the addition of +// the ability to pass a context and additional request options. +// +// See CreateStackSet for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudFormation) CreateStackSetWithContext(ctx aws.Context, input *CreateStackSetInput, opts ...request.Option) (*CreateStackSetOutput, error) { + req, out := c.CreateStackSetRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDeleteChangeSet = "DeleteChangeSet" // DeleteChangeSetRequest generates a "aws/request.Request" representing the @@ -448,8 +635,8 @@ func (c *CloudFormation) DeleteChangeSetRequest(input *DeleteChangeSetInput) (re // // Returned Error Codes: // * ErrCodeInvalidChangeSetStatusException "InvalidChangeSetStatus" -// The specified change set cannot be used to update the stack. For example, -// the change set status might be CREATE_IN_PROGRESS or the stack status might +// The specified change set can't be used to update the stack. For example, +// the change set status might be CREATE_IN_PROGRESS, or the stack status might // be UPDATE_IN_PROGRESS. // // Please also see https://docs.aws.amazon.com/goto/WebAPI/cloudformation-2010-05-15/DeleteChangeSet @@ -558,6 +745,188 @@ func (c *CloudFormation) DeleteStackWithContext(ctx aws.Context, input *DeleteSt return out, req.Send() } +const opDeleteStackInstances = "DeleteStackInstances" + +// DeleteStackInstancesRequest generates a "aws/request.Request" representing the +// client's request for the DeleteStackInstances operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See DeleteStackInstances for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteStackInstances method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteStackInstancesRequest method. +// req, resp := client.DeleteStackInstancesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/cloudformation-2010-05-15/DeleteStackInstances +func (c *CloudFormation) DeleteStackInstancesRequest(input *DeleteStackInstancesInput) (req *request.Request, output *DeleteStackInstancesOutput) { + op := &request.Operation{ + Name: opDeleteStackInstances, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteStackInstancesInput{} + } + + output = &DeleteStackInstancesOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeleteStackInstances API operation for AWS CloudFormation. +// +// Deletes stack instances for the specified accounts, in the specified regions. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS CloudFormation's +// API operation DeleteStackInstances for usage and error information. +// +// Returned Error Codes: +// * ErrCodeStackSetNotFoundException "StackSetNotFoundException" +// The specified stack set doesn't exist. +// +// * ErrCodeOperationInProgressException "OperationInProgressException" +// Another operation is currently in progress for this stack set. Only one operation +// can be performed for a stack set at a given time. +// +// * ErrCodeOperationIdAlreadyExistsException "OperationIdAlreadyExistsException" +// The specified operation ID already exists. +// +// * ErrCodeStaleRequestException "StaleRequestException" +// Another operation has been performed on this stack set since the specified +// operation was performed. +// +// * ErrCodeInvalidOperationException "InvalidOperationException" +// The specified operation isn't valid. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/cloudformation-2010-05-15/DeleteStackInstances +func (c *CloudFormation) DeleteStackInstances(input *DeleteStackInstancesInput) (*DeleteStackInstancesOutput, error) { + req, out := c.DeleteStackInstancesRequest(input) + return out, req.Send() +} + +// DeleteStackInstancesWithContext is the same as DeleteStackInstances with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteStackInstances for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudFormation) DeleteStackInstancesWithContext(ctx aws.Context, input *DeleteStackInstancesInput, opts ...request.Option) (*DeleteStackInstancesOutput, error) { + req, out := c.DeleteStackInstancesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteStackSet = "DeleteStackSet" + +// DeleteStackSetRequest generates a "aws/request.Request" representing the +// client's request for the DeleteStackSet operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See DeleteStackSet for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteStackSet method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteStackSetRequest method. +// req, resp := client.DeleteStackSetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/cloudformation-2010-05-15/DeleteStackSet +func (c *CloudFormation) DeleteStackSetRequest(input *DeleteStackSetInput) (req *request.Request, output *DeleteStackSetOutput) { + op := &request.Operation{ + Name: opDeleteStackSet, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteStackSetInput{} + } + + output = &DeleteStackSetOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeleteStackSet API operation for AWS CloudFormation. +// +// Deletes a stack set. Before you can delete a stack set, all of its member +// stack instances must be deleted. For more information about how to do this, +// see DeleteStackInstances. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS CloudFormation's +// API operation DeleteStackSet for usage and error information. +// +// Returned Error Codes: +// * ErrCodeStackSetNotEmptyException "StackSetNotEmptyException" +// You can't yet delete this stack set, because it still contains one or more +// stack instances. Delete all stack instances from the stack set before deleting +// the stack set. +// +// * ErrCodeOperationInProgressException "OperationInProgressException" +// Another operation is currently in progress for this stack set. Only one operation +// can be performed for a stack set at a given time. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/cloudformation-2010-05-15/DeleteStackSet +func (c *CloudFormation) DeleteStackSet(input *DeleteStackSetInput) (*DeleteStackSetOutput, error) { + req, out := c.DeleteStackSetRequest(input) + return out, req.Send() +} + +// DeleteStackSetWithContext is the same as DeleteStackSet with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteStackSet for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudFormation) DeleteStackSetWithContext(ctx aws.Context, input *DeleteStackSetInput, opts ...request.Option) (*DeleteStackSetOutput, error) { + req, out := c.DeleteStackSetRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDescribeAccountLimits = "DescribeAccountLimits" // DescribeAccountLimitsRequest generates a "aws/request.Request" representing the @@ -854,6 +1223,93 @@ func (c *CloudFormation) DescribeStackEventsPagesWithContext(ctx aws.Context, in return p.Err() } +const opDescribeStackInstance = "DescribeStackInstance" + +// DescribeStackInstanceRequest generates a "aws/request.Request" representing the +// client's request for the DescribeStackInstance operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See DescribeStackInstance for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeStackInstance method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeStackInstanceRequest method. +// req, resp := client.DescribeStackInstanceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/cloudformation-2010-05-15/DescribeStackInstance +func (c *CloudFormation) DescribeStackInstanceRequest(input *DescribeStackInstanceInput) (req *request.Request, output *DescribeStackInstanceOutput) { + op := &request.Operation{ + Name: opDescribeStackInstance, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeStackInstanceInput{} + } + + output = &DescribeStackInstanceOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeStackInstance API operation for AWS CloudFormation. +// +// Returns the stack instance that's associated with the specified stack set, +// AWS account, and region. +// +// For a list of stack instances that are associated with a specific stack set, +// use ListStackInstances. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS CloudFormation's +// API operation DescribeStackInstance for usage and error information. +// +// Returned Error Codes: +// * ErrCodeStackSetNotFoundException "StackSetNotFoundException" +// The specified stack set doesn't exist. +// +// * ErrCodeStackInstanceNotFoundException "StackInstanceNotFoundException" +// The specified stack instance doesn't exist. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/cloudformation-2010-05-15/DescribeStackInstance +func (c *CloudFormation) DescribeStackInstance(input *DescribeStackInstanceInput) (*DescribeStackInstanceOutput, error) { + req, out := c.DescribeStackInstanceRequest(input) + return out, req.Send() +} + +// DescribeStackInstanceWithContext is the same as DescribeStackInstance with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeStackInstance for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudFormation) DescribeStackInstanceWithContext(ctx aws.Context, input *DescribeStackInstanceInput, opts ...request.Option) (*DescribeStackInstanceOutput, error) { + req, out := c.DescribeStackInstanceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDescribeStackResource = "DescribeStackResource" // DescribeStackResourceRequest generates a "aws/request.Request" representing the @@ -1024,6 +1480,169 @@ func (c *CloudFormation) DescribeStackResourcesWithContext(ctx aws.Context, inpu return out, req.Send() } +const opDescribeStackSet = "DescribeStackSet" + +// DescribeStackSetRequest generates a "aws/request.Request" representing the +// client's request for the DescribeStackSet operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See DescribeStackSet for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeStackSet method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeStackSetRequest method. +// req, resp := client.DescribeStackSetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/cloudformation-2010-05-15/DescribeStackSet +func (c *CloudFormation) DescribeStackSetRequest(input *DescribeStackSetInput) (req *request.Request, output *DescribeStackSetOutput) { + op := &request.Operation{ + Name: opDescribeStackSet, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeStackSetInput{} + } + + output = &DescribeStackSetOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeStackSet API operation for AWS CloudFormation. +// +// Returns the description of the specified stack set. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS CloudFormation's +// API operation DescribeStackSet for usage and error information. +// +// Returned Error Codes: +// * ErrCodeStackSetNotFoundException "StackSetNotFoundException" +// The specified stack set doesn't exist. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/cloudformation-2010-05-15/DescribeStackSet +func (c *CloudFormation) DescribeStackSet(input *DescribeStackSetInput) (*DescribeStackSetOutput, error) { + req, out := c.DescribeStackSetRequest(input) + return out, req.Send() +} + +// DescribeStackSetWithContext is the same as DescribeStackSet with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeStackSet for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudFormation) DescribeStackSetWithContext(ctx aws.Context, input *DescribeStackSetInput, opts ...request.Option) (*DescribeStackSetOutput, error) { + req, out := c.DescribeStackSetRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeStackSetOperation = "DescribeStackSetOperation" + +// DescribeStackSetOperationRequest generates a "aws/request.Request" representing the +// client's request for the DescribeStackSetOperation operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See DescribeStackSetOperation for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeStackSetOperation method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeStackSetOperationRequest method. +// req, resp := client.DescribeStackSetOperationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/cloudformation-2010-05-15/DescribeStackSetOperation +func (c *CloudFormation) DescribeStackSetOperationRequest(input *DescribeStackSetOperationInput) (req *request.Request, output *DescribeStackSetOperationOutput) { + op := &request.Operation{ + Name: opDescribeStackSetOperation, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeStackSetOperationInput{} + } + + output = &DescribeStackSetOperationOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeStackSetOperation API operation for AWS CloudFormation. +// +// Returns the description of the specified stack set operation. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS CloudFormation's +// API operation DescribeStackSetOperation for usage and error information. +// +// Returned Error Codes: +// * ErrCodeStackSetNotFoundException "StackSetNotFoundException" +// The specified stack set doesn't exist. +// +// * ErrCodeOperationNotFoundException "OperationNotFoundException" +// The specified ID refers to an operation that doesn't exist. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/cloudformation-2010-05-15/DescribeStackSetOperation +func (c *CloudFormation) DescribeStackSetOperation(input *DescribeStackSetOperationInput) (*DescribeStackSetOperationOutput, error) { + req, out := c.DescribeStackSetOperationRequest(input) + return out, req.Send() +} + +// DescribeStackSetOperationWithContext is the same as DescribeStackSetOperation with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeStackSetOperation for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudFormation) DescribeStackSetOperationWithContext(ctx aws.Context, input *DescribeStackSetOperationInput, opts ...request.Option) (*DescribeStackSetOperationOutput, error) { + req, out := c.DescribeStackSetOperationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDescribeStacks = "DescribeStacks" // DescribeStacksRequest generates a "aws/request.Request" representing the @@ -1302,8 +1921,8 @@ func (c *CloudFormation) ExecuteChangeSetRequest(input *ExecuteChangeSetInput) ( // // Returned Error Codes: // * ErrCodeInvalidChangeSetStatusException "InvalidChangeSetStatus" -// The specified change set cannot be used to update the stack. For example, -// the change set status might be CREATE_IN_PROGRESS or the stack status might +// The specified change set can't be used to update the stack. For example, +// the change set status might be CREATE_IN_PROGRESS, or the stack status might // be UPDATE_IN_PROGRESS. // // * ErrCodeChangeSetNotFoundException "ChangeSetNotFound" @@ -1311,7 +1930,7 @@ func (c *CloudFormation) ExecuteChangeSetRequest(input *ExecuteChangeSetInput) ( // for a stack, use the ListChangeSets action. // // * ErrCodeInsufficientCapabilitiesException "InsufficientCapabilitiesException" -// The template contains resources with capabilities that were not specified +// The template contains resources with capabilities that weren't specified // in the Capabilities parameter. // // * ErrCodeTokenAlreadyExistsException "TokenAlreadyExistsException" @@ -1564,6 +2183,11 @@ func (c *CloudFormation) GetTemplateSummaryRequest(input *GetTemplateSummaryInpu // // See the AWS API reference guide for AWS CloudFormation's // API operation GetTemplateSummary for usage and error information. +// +// Returned Error Codes: +// * ErrCodeStackSetNotFoundException "StackSetNotFoundException" +// The specified stack set doesn't exist. +// // Please also see https://docs.aws.amazon.com/goto/WebAPI/cloudformation-2010-05-15/GetTemplateSummary func (c *CloudFormation) GetTemplateSummary(input *GetTemplateSummaryInput) (*GetTemplateSummaryOutput, error) { req, out := c.GetTemplateSummaryRequest(input) @@ -1937,6 +2561,88 @@ func (c *CloudFormation) ListImportsPagesWithContext(ctx aws.Context, input *Lis return p.Err() } +const opListStackInstances = "ListStackInstances" + +// ListStackInstancesRequest generates a "aws/request.Request" representing the +// client's request for the ListStackInstances operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See ListStackInstances for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListStackInstances method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListStackInstancesRequest method. +// req, resp := client.ListStackInstancesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/cloudformation-2010-05-15/ListStackInstances +func (c *CloudFormation) ListStackInstancesRequest(input *ListStackInstancesInput) (req *request.Request, output *ListStackInstancesOutput) { + op := &request.Operation{ + Name: opListStackInstances, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListStackInstancesInput{} + } + + output = &ListStackInstancesOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListStackInstances API operation for AWS CloudFormation. +// +// Returns summary information about stack instances that are associated with +// the specified stack set. You can filter for stack instances that are associated +// with a specific AWS account name or region. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS CloudFormation's +// API operation ListStackInstances for usage and error information. +// +// Returned Error Codes: +// * ErrCodeStackSetNotFoundException "StackSetNotFoundException" +// The specified stack set doesn't exist. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/cloudformation-2010-05-15/ListStackInstances +func (c *CloudFormation) ListStackInstances(input *ListStackInstancesInput) (*ListStackInstancesOutput, error) { + req, out := c.ListStackInstancesRequest(input) + return out, req.Send() +} + +// ListStackInstancesWithContext is the same as ListStackInstances with the addition of +// the ability to pass a context and additional request options. +// +// See ListStackInstances for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudFormation) ListStackInstancesWithContext(ctx aws.Context, input *ListStackInstancesInput, opts ...request.Option) (*ListStackInstancesOutput, error) { + req, out := c.ListStackInstancesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opListStackResources = "ListStackResources" // ListStackResourcesRequest generates a "aws/request.Request" representing the @@ -2071,6 +2777,245 @@ func (c *CloudFormation) ListStackResourcesPagesWithContext(ctx aws.Context, inp return p.Err() } +const opListStackSetOperationResults = "ListStackSetOperationResults" + +// ListStackSetOperationResultsRequest generates a "aws/request.Request" representing the +// client's request for the ListStackSetOperationResults operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See ListStackSetOperationResults for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListStackSetOperationResults method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListStackSetOperationResultsRequest method. +// req, resp := client.ListStackSetOperationResultsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/cloudformation-2010-05-15/ListStackSetOperationResults +func (c *CloudFormation) ListStackSetOperationResultsRequest(input *ListStackSetOperationResultsInput) (req *request.Request, output *ListStackSetOperationResultsOutput) { + op := &request.Operation{ + Name: opListStackSetOperationResults, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListStackSetOperationResultsInput{} + } + + output = &ListStackSetOperationResultsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListStackSetOperationResults API operation for AWS CloudFormation. +// +// Returns summary information about the results of a stack set operation. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS CloudFormation's +// API operation ListStackSetOperationResults for usage and error information. +// +// Returned Error Codes: +// * ErrCodeStackSetNotFoundException "StackSetNotFoundException" +// The specified stack set doesn't exist. +// +// * ErrCodeOperationNotFoundException "OperationNotFoundException" +// The specified ID refers to an operation that doesn't exist. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/cloudformation-2010-05-15/ListStackSetOperationResults +func (c *CloudFormation) ListStackSetOperationResults(input *ListStackSetOperationResultsInput) (*ListStackSetOperationResultsOutput, error) { + req, out := c.ListStackSetOperationResultsRequest(input) + return out, req.Send() +} + +// ListStackSetOperationResultsWithContext is the same as ListStackSetOperationResults with the addition of +// the ability to pass a context and additional request options. +// +// See ListStackSetOperationResults for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudFormation) ListStackSetOperationResultsWithContext(ctx aws.Context, input *ListStackSetOperationResultsInput, opts ...request.Option) (*ListStackSetOperationResultsOutput, error) { + req, out := c.ListStackSetOperationResultsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListStackSetOperations = "ListStackSetOperations" + +// ListStackSetOperationsRequest generates a "aws/request.Request" representing the +// client's request for the ListStackSetOperations operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See ListStackSetOperations for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListStackSetOperations method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListStackSetOperationsRequest method. +// req, resp := client.ListStackSetOperationsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/cloudformation-2010-05-15/ListStackSetOperations +func (c *CloudFormation) ListStackSetOperationsRequest(input *ListStackSetOperationsInput) (req *request.Request, output *ListStackSetOperationsOutput) { + op := &request.Operation{ + Name: opListStackSetOperations, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListStackSetOperationsInput{} + } + + output = &ListStackSetOperationsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListStackSetOperations API operation for AWS CloudFormation. +// +// Returns summary information about operations performed on a stack set. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS CloudFormation's +// API operation ListStackSetOperations for usage and error information. +// +// Returned Error Codes: +// * ErrCodeStackSetNotFoundException "StackSetNotFoundException" +// The specified stack set doesn't exist. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/cloudformation-2010-05-15/ListStackSetOperations +func (c *CloudFormation) ListStackSetOperations(input *ListStackSetOperationsInput) (*ListStackSetOperationsOutput, error) { + req, out := c.ListStackSetOperationsRequest(input) + return out, req.Send() +} + +// ListStackSetOperationsWithContext is the same as ListStackSetOperations with the addition of +// the ability to pass a context and additional request options. +// +// See ListStackSetOperations for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudFormation) ListStackSetOperationsWithContext(ctx aws.Context, input *ListStackSetOperationsInput, opts ...request.Option) (*ListStackSetOperationsOutput, error) { + req, out := c.ListStackSetOperationsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListStackSets = "ListStackSets" + +// ListStackSetsRequest generates a "aws/request.Request" representing the +// client's request for the ListStackSets operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See ListStackSets for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListStackSets method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListStackSetsRequest method. +// req, resp := client.ListStackSetsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/cloudformation-2010-05-15/ListStackSets +func (c *CloudFormation) ListStackSetsRequest(input *ListStackSetsInput) (req *request.Request, output *ListStackSetsOutput) { + op := &request.Operation{ + Name: opListStackSets, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListStackSetsInput{} + } + + output = &ListStackSetsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListStackSets API operation for AWS CloudFormation. +// +// Returns summary information about stack sets that are associated with the +// user. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS CloudFormation's +// API operation ListStackSets for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/cloudformation-2010-05-15/ListStackSets +func (c *CloudFormation) ListStackSets(input *ListStackSetsInput) (*ListStackSetsOutput, error) { + req, out := c.ListStackSetsRequest(input) + return out, req.Send() +} + +// ListStackSetsWithContext is the same as ListStackSets with the addition of +// the ability to pass a context and additional request options. +// +// See ListStackSets for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudFormation) ListStackSetsWithContext(ctx aws.Context, input *ListStackSetsInput, opts ...request.Option) (*ListStackSetsOutput, error) { + req, out := c.ListStackSetsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opListStacks = "ListStacks" // ListStacksRequest generates a "aws/request.Request" representing the @@ -2365,6 +3310,92 @@ func (c *CloudFormation) SignalResourceWithContext(ctx aws.Context, input *Signa return out, req.Send() } +const opStopStackSetOperation = "StopStackSetOperation" + +// StopStackSetOperationRequest generates a "aws/request.Request" representing the +// client's request for the StopStackSetOperation operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See StopStackSetOperation for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the StopStackSetOperation method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the StopStackSetOperationRequest method. +// req, resp := client.StopStackSetOperationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/cloudformation-2010-05-15/StopStackSetOperation +func (c *CloudFormation) StopStackSetOperationRequest(input *StopStackSetOperationInput) (req *request.Request, output *StopStackSetOperationOutput) { + op := &request.Operation{ + Name: opStopStackSetOperation, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &StopStackSetOperationInput{} + } + + output = &StopStackSetOperationOutput{} + req = c.newRequest(op, input, output) + return +} + +// StopStackSetOperation API operation for AWS CloudFormation. +// +// Stops an in-progress operation on a stack set and its associated stack instances. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS CloudFormation's +// API operation StopStackSetOperation for usage and error information. +// +// Returned Error Codes: +// * ErrCodeStackSetNotFoundException "StackSetNotFoundException" +// The specified stack set doesn't exist. +// +// * ErrCodeOperationNotFoundException "OperationNotFoundException" +// The specified ID refers to an operation that doesn't exist. +// +// * ErrCodeInvalidOperationException "InvalidOperationException" +// The specified operation isn't valid. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/cloudformation-2010-05-15/StopStackSetOperation +func (c *CloudFormation) StopStackSetOperation(input *StopStackSetOperationInput) (*StopStackSetOperationOutput, error) { + req, out := c.StopStackSetOperationRequest(input) + return out, req.Send() +} + +// StopStackSetOperationWithContext is the same as StopStackSetOperation with the addition of +// the ability to pass a context and additional request options. +// +// See StopStackSetOperation for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudFormation) StopStackSetOperationWithContext(ctx aws.Context, input *StopStackSetOperationInput, opts ...request.Option) (*StopStackSetOperationOutput, error) { + req, out := c.StopStackSetOperationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opUpdateStack = "UpdateStack" // UpdateStackRequest generates a "aws/request.Request" representing the @@ -2429,7 +3460,7 @@ func (c *CloudFormation) UpdateStackRequest(input *UpdateStackInput) (req *reque // // Returned Error Codes: // * ErrCodeInsufficientCapabilitiesException "InsufficientCapabilitiesException" -// The template contains resources with capabilities that were not specified +// The template contains resources with capabilities that weren't specified // in the Capabilities parameter. // // * ErrCodeTokenAlreadyExistsException "TokenAlreadyExistsException" @@ -2457,6 +3488,105 @@ func (c *CloudFormation) UpdateStackWithContext(ctx aws.Context, input *UpdateSt return out, req.Send() } +const opUpdateStackSet = "UpdateStackSet" + +// UpdateStackSetRequest generates a "aws/request.Request" representing the +// client's request for the UpdateStackSet operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See UpdateStackSet for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateStackSet method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateStackSetRequest method. +// req, resp := client.UpdateStackSetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/cloudformation-2010-05-15/UpdateStackSet +func (c *CloudFormation) UpdateStackSetRequest(input *UpdateStackSetInput) (req *request.Request, output *UpdateStackSetOutput) { + op := &request.Operation{ + Name: opUpdateStackSet, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateStackSetInput{} + } + + output = &UpdateStackSetOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateStackSet API operation for AWS CloudFormation. +// +// Updates the stack set and all associated stack instances. +// +// Even if the stack set operation created by updating the stack set fails (completely +// or partially, below or above a specified failure tolerance), the stack set +// is updated with your changes. Subsequent CreateStackInstances calls on the +// specified stack set use the updated stack set. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS CloudFormation's +// API operation UpdateStackSet for usage and error information. +// +// Returned Error Codes: +// * ErrCodeStackSetNotFoundException "StackSetNotFoundException" +// The specified stack set doesn't exist. +// +// * ErrCodeOperationInProgressException "OperationInProgressException" +// Another operation is currently in progress for this stack set. Only one operation +// can be performed for a stack set at a given time. +// +// * ErrCodeOperationIdAlreadyExistsException "OperationIdAlreadyExistsException" +// The specified operation ID already exists. +// +// * ErrCodeStaleRequestException "StaleRequestException" +// Another operation has been performed on this stack set since the specified +// operation was performed. +// +// * ErrCodeInvalidOperationException "InvalidOperationException" +// The specified operation isn't valid. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/cloudformation-2010-05-15/UpdateStackSet +func (c *CloudFormation) UpdateStackSet(input *UpdateStackSetInput) (*UpdateStackSetOutput, error) { + req, out := c.UpdateStackSetRequest(input) + return out, req.Send() +} + +// UpdateStackSetWithContext is the same as UpdateStackSet with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateStackSet for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudFormation) UpdateStackSetWithContext(ctx aws.Context, input *UpdateStackSetInput, opts ...request.Option) (*UpdateStackSetOutput, error) { + req, out := c.UpdateStackSetRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opValidateTemplate = "ValidateTemplate" // ValidateTemplateRequest generates a "aws/request.Request" representing the @@ -2535,6 +3665,62 @@ func (c *CloudFormation) ValidateTemplateWithContext(ctx aws.Context, input *Val return out, req.Send() } +// Structure that contains the results of the account gate function AWS CloudFormation +// StackSets invokes, if present, before proceeding with stack set operations +// in an account. +// +// Account gating enables you to specify a Lamdba function for an account that +// encapsulates any requirements that must be met before AWS CloudFormation +// StackSets proceeds with stack set operations in that account. CloudFormation +// invokes the function each time stack set operations are initiated for that +// account, and only proceeds if the function returns a success code. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/cloudformation-2010-05-15/AccountGateResult +type AccountGateResult struct { + _ struct{} `type:"structure"` + + // The status of the account gate function. + // + // * SUCCEEDED: The account gate function has determined that the account + // passes any requirements for stack set operations to occur. AWS CloudFormation + // proceeds with stack operations in the account. + // + // * FAILED: The account gate function has determined that the account does + // not meet the requirements for stack set operations to occur. AWS CloudFormation + // cancels the stack set operations in that account, and the stack set operation + // status is set to FAILED. + // + // * SKIPPED: An account gate function has not been specified for the account, + // or the AWSCloudFormationStackSetExecutionRole of the stack set adminstration + // account lacks permissions to invoke the function. AWS CloudFormation proceeds + // with stack set operations in the account. + Status *string `type:"string" enum:"AccountGateStatus"` + + // The reason for the account gate status assigned to this account. + StatusReason *string `type:"string"` +} + +// String returns the string representation +func (s AccountGateResult) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AccountGateResult) GoString() string { + return s.String() +} + +// SetStatus sets the Status field's value. +func (s *AccountGateResult) SetStatus(v string) *AccountGateResult { + s.Status = &v + return s +} + +// SetStatusReason sets the StatusReason field's value. +func (s *AccountGateResult) SetStatusReason(v string) *AccountGateResult { + s.StatusReason = &v + return s +} + // The AccountLimit data type. // Please also see https://docs.aws.amazon.com/goto/WebAPI/cloudformation-2010-05-15/AccountLimit type AccountLimit struct { @@ -2797,7 +3983,7 @@ type ContinueUpdateRollbackInput struct { // during the continue update rollback operation. You can specify only resources // that are in the UPDATE_FAILED state because a rollback failed. You can't // specify resources that are in the UPDATE_FAILED state for other reasons, - // for example, because an update was canceled. To check why a resource update + // for example, because an update was cancelled. To check why a resource update // failed, use the DescribeStackResources action, and view the resource status // reason. // @@ -2815,10 +4001,16 @@ type ContinueUpdateRollbackInput struct { // your stack. For example, a failed resource update might cause dependent resources // to fail. In this case, it might not be necessary to skip the dependent resources. // - // To specify resources in a nested stack, use the following format: NestedStackName.ResourceLogicalID. - // If the ResourceLogicalID is a stack resource (Type: AWS::CloudFormation::Stack), - // it must be in one of the following states: DELETE_IN_PROGRESS, DELETE_COMPLETE, - // or DELETE_FAILED. + // To skip resources that are part of nested stacks, use the following format: + // NestedStackName.ResourceLogicalID. If you want to specify the logical ID + // of a stack resource (Type: AWS::CloudFormation::Stack) in the ResourcesToSkip + // list, then its corresponding embedded stack must be in one of the following + // states: DELETE_IN_PROGRESS, DELETE_COMPLETE, or DELETE_FAILED. + // + // Don't confuse a child stack's name with its corresponding logical ID defined + // in the parent stack. For an example of a continue update rollback operation + // with nested stacks, see Using ResourcesToSkip to recover a nested stacks + // hierarchy (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-continueupdaterollback.html#nested-stacks). ResourcesToSkip []*string `type:"list"` // The Amazon Resource Name (ARN) of an AWS Identity and Access Management (IAM) @@ -3026,7 +4218,7 @@ type CreateChangeSetInput struct { StackName *string `min:"1" type:"string" required:"true"` // Key-value pairs to associate with this stack. AWS CloudFormation also propagates - // these tags to resources in the stack. You can specify a maximum of 10 tags. + // these tags to resources in the stack. You can specify a maximum of 50 tags. Tags []*Tag `type:"list"` // A structure that contains the body of the revised template, with a minimum @@ -3090,6 +4282,16 @@ func (s *CreateChangeSetInput) Validate() error { if s.TemplateURL != nil && len(*s.TemplateURL) < 1 { invalidParams.Add(request.NewErrParamMinLen("TemplateURL", 1)) } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } if invalidParams.Len() > 0 { return invalidParams @@ -3250,6 +4452,17 @@ type CreateStackInput struct { // plan to retry requests so that AWS CloudFormation knows that you're not attempting // to create a stack with the same name. You might retry CreateStack requests // to ensure that AWS CloudFormation successfully received them. + // + // All events triggered by a given stack operation are assigned the same client + // request token, which you can use to track operations. For example, if you + // execute a CreateStack operation with the token token1, then all the StackEvents + // generated by that operation will have ClientRequestToken set as token1. + // + // In the console, stack operations display the client request token on the + // Events tab. Stack operations that are initiated from the console use the + // token format Console-StackOperation-ID, which helps you easily identify the + // stack operation . For example, if you create a stack using the console, each + // stack event would be assigned the same token in the following format: Console-CreateStack-7f59c3cf-00d2-40c7-b2ff-e75db0987002. ClientRequestToken *string `min:"1" type:"string"` // Set to true to disable rollback of the stack if stack creation failed. You @@ -3327,7 +4540,7 @@ type CreateStackInput struct { StackPolicyURL *string `min:"1" type:"string"` // Key-value pairs to associate with this stack. AWS CloudFormation also propagates - // these tags to the resources created in the stack. A maximum number of 10 + // these tags to the resources created in the stack. A maximum number of 50 // tags can be specified. Tags []*Tag `type:"list"` @@ -3392,6 +4605,16 @@ func (s *CreateStackInput) Validate() error { if s.TimeoutInMinutes != nil && *s.TimeoutInMinutes < 1 { invalidParams.Add(request.NewErrParamMinValue("TimeoutInMinutes", 1)) } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } if invalidParams.Len() > 0 { return invalidParams @@ -3489,6 +4712,136 @@ func (s *CreateStackInput) SetTimeoutInMinutes(v int64) *CreateStackInput { return s } +// Please also see https://docs.aws.amazon.com/goto/WebAPI/cloudformation-2010-05-15/CreateStackInstancesInput +type CreateStackInstancesInput struct { + _ struct{} `type:"structure"` + + // The names of one or more AWS accounts that you want to create stack instances + // in the specified region(s) for. + // + // Accounts is a required field + Accounts []*string `type:"list" required:"true"` + + // The unique identifier for this stack set operation. + // + // The operation ID also functions as an idempotency token, to ensure that AWS + // CloudFormation performs the stack set operation only once, even if you retry + // the request multiple times. You might retry stack set operation requests + // to ensure that AWS CloudFormation successfully received them. + // + // If you don't specify an operation ID, the SDK generates one automatically. + // + // Repeating this stack set operation with a new operation ID retries all stack + // instances whose status is OUTDATED. + OperationId *string `min:"1" type:"string" idempotencyToken:"true"` + + // Preferences for how AWS CloudFormation performs this stack set operation. + OperationPreferences *StackSetOperationPreferences `type:"structure"` + + // The names of one or more regions where you want to create stack instances + // using the specified AWS account(s). + // + // Regions is a required field + Regions []*string `type:"list" required:"true"` + + // The name or unique ID of the stack set that you want to create stack instances + // from. + // + // StackSetName is a required field + StackSetName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateStackInstancesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateStackInstancesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateStackInstancesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateStackInstancesInput"} + if s.Accounts == nil { + invalidParams.Add(request.NewErrParamRequired("Accounts")) + } + if s.OperationId != nil && len(*s.OperationId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("OperationId", 1)) + } + if s.Regions == nil { + invalidParams.Add(request.NewErrParamRequired("Regions")) + } + if s.StackSetName == nil { + invalidParams.Add(request.NewErrParamRequired("StackSetName")) + } + if s.OperationPreferences != nil { + if err := s.OperationPreferences.Validate(); err != nil { + invalidParams.AddNested("OperationPreferences", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAccounts sets the Accounts field's value. +func (s *CreateStackInstancesInput) SetAccounts(v []*string) *CreateStackInstancesInput { + s.Accounts = v + return s +} + +// SetOperationId sets the OperationId field's value. +func (s *CreateStackInstancesInput) SetOperationId(v string) *CreateStackInstancesInput { + s.OperationId = &v + return s +} + +// SetOperationPreferences sets the OperationPreferences field's value. +func (s *CreateStackInstancesInput) SetOperationPreferences(v *StackSetOperationPreferences) *CreateStackInstancesInput { + s.OperationPreferences = v + return s +} + +// SetRegions sets the Regions field's value. +func (s *CreateStackInstancesInput) SetRegions(v []*string) *CreateStackInstancesInput { + s.Regions = v + return s +} + +// SetStackSetName sets the StackSetName field's value. +func (s *CreateStackInstancesInput) SetStackSetName(v string) *CreateStackInstancesInput { + s.StackSetName = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/cloudformation-2010-05-15/CreateStackInstancesOutput +type CreateStackInstancesOutput struct { + _ struct{} `type:"structure"` + + // The unique identifier for this stack set operation. + OperationId *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s CreateStackInstancesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateStackInstancesOutput) GoString() string { + return s.String() +} + +// SetOperationId sets the OperationId field's value. +func (s *CreateStackInstancesOutput) SetOperationId(v string) *CreateStackInstancesOutput { + s.OperationId = &v + return s +} + // The output for a CreateStack action. // Please also see https://docs.aws.amazon.com/goto/WebAPI/cloudformation-2010-05-15/CreateStackOutput type CreateStackOutput struct { @@ -3514,6 +4867,218 @@ func (s *CreateStackOutput) SetStackId(v string) *CreateStackOutput { return s } +// Please also see https://docs.aws.amazon.com/goto/WebAPI/cloudformation-2010-05-15/CreateStackSetInput +type CreateStackSetInput struct { + _ struct{} `type:"structure"` + + // A list of values that you must specify before AWS CloudFormation can create + // certain stack sets. Some stack set templates might include resources that + // can affect permissions in your AWS account—for example, by creating new AWS + // Identity and Access Management (IAM) users. For those stack sets, you must + // explicitly acknowledge their capabilities by specifying this parameter. + // + // The only valid values are CAPABILITY_IAM and CAPABILITY_NAMED_IAM. The following + // resources require you to specify this parameter: + // + // * AWS::IAM::AccessKey + // + // * AWS::IAM::Group + // + // * AWS::IAM::InstanceProfile + // + // * AWS::IAM::Policy + // + // * AWS::IAM::Role + // + // * AWS::IAM::User + // + // * AWS::IAM::UserToGroupAddition + // + // If your stack template contains these resources, we recommend that you review + // all permissions that are associated with them and edit their permissions + // if necessary. + // + // If you have IAM resources, you can specify either capability. If you have + // IAM resources with custom names, you must specify CAPABILITY_NAMED_IAM. If + // you don't specify this parameter, this action returns an InsufficientCapabilities + // error. + // + // For more information, see Acknowledging IAM Resources in AWS CloudFormation + // Templates. (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-iam-template.html#capabilities) + Capabilities []*string `type:"list"` + + // A unique identifier for this CreateStackSet request. Specify this token if + // you plan to retry requests so that AWS CloudFormation knows that you're not + // attempting to create another stack set with the same name. You might retry + // CreateStackSet requests to ensure that AWS CloudFormation successfully received + // them. + // + // If you don't specify an operation ID, the SDK generates one automatically. + ClientRequestToken *string `min:"1" type:"string" idempotencyToken:"true"` + + // A description of the stack set. You can use the description to identify the + // stack set's purpose or other important information. + Description *string `min:"1" type:"string"` + + // The input parameters for the stack set template. + Parameters []*Parameter `type:"list"` + + // The name to associate with the stack set. The name must be unique in the + // region where you create your stack set. + // + // A stack name can contain only alphanumeric characters (case-sensitive) and + // hyphens. It must start with an alphabetic character and can't be longer than + // 128 characters. + // + // StackSetName is a required field + StackSetName *string `type:"string" required:"true"` + + // The key-value pairs to associate with this stack set and the stacks created + // from it. AWS CloudFormation also propagates these tags to supported resources + // that are created in the stacks. A maximum number of 50 tags can be specified. + // + // If you specify tags as part of a CreateStackSet action, AWS CloudFormation + // checks to see if you have the required IAM permission to tag resources. If + // you don't, the entire CreateStackSet action fails with an access denied error, + // and the stack set is not created. + Tags []*Tag `type:"list"` + + // The structure that contains the template body, with a minimum length of 1 + // byte and a maximum length of 51,200 bytes. For more information, see Template + // Anatomy (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/template-anatomy.html) + // in the AWS CloudFormation User Guide. + // + // Conditional: You must specify either the TemplateBody or the TemplateURL + // parameter, but not both. + TemplateBody *string `min:"1" type:"string"` + + // The location of the file that contains the template body. The URL must point + // to a template (maximum size: 460,800 bytes) that's located in an Amazon S3 + // bucket. For more information, see Template Anatomy (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/template-anatomy.html) + // in the AWS CloudFormation User Guide. + // + // Conditional: You must specify either the TemplateBody or the TemplateURL + // parameter, but not both. + TemplateURL *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s CreateStackSetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateStackSetInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateStackSetInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateStackSetInput"} + if s.ClientRequestToken != nil && len(*s.ClientRequestToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ClientRequestToken", 1)) + } + if s.Description != nil && len(*s.Description) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Description", 1)) + } + if s.StackSetName == nil { + invalidParams.Add(request.NewErrParamRequired("StackSetName")) + } + if s.TemplateBody != nil && len(*s.TemplateBody) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TemplateBody", 1)) + } + if s.TemplateURL != nil && len(*s.TemplateURL) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TemplateURL", 1)) + } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCapabilities sets the Capabilities field's value. +func (s *CreateStackSetInput) SetCapabilities(v []*string) *CreateStackSetInput { + s.Capabilities = v + return s +} + +// SetClientRequestToken sets the ClientRequestToken field's value. +func (s *CreateStackSetInput) SetClientRequestToken(v string) *CreateStackSetInput { + s.ClientRequestToken = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *CreateStackSetInput) SetDescription(v string) *CreateStackSetInput { + s.Description = &v + return s +} + +// SetParameters sets the Parameters field's value. +func (s *CreateStackSetInput) SetParameters(v []*Parameter) *CreateStackSetInput { + s.Parameters = v + return s +} + +// SetStackSetName sets the StackSetName field's value. +func (s *CreateStackSetInput) SetStackSetName(v string) *CreateStackSetInput { + s.StackSetName = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *CreateStackSetInput) SetTags(v []*Tag) *CreateStackSetInput { + s.Tags = v + return s +} + +// SetTemplateBody sets the TemplateBody field's value. +func (s *CreateStackSetInput) SetTemplateBody(v string) *CreateStackSetInput { + s.TemplateBody = &v + return s +} + +// SetTemplateURL sets the TemplateURL field's value. +func (s *CreateStackSetInput) SetTemplateURL(v string) *CreateStackSetInput { + s.TemplateURL = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/cloudformation-2010-05-15/CreateStackSetOutput +type CreateStackSetOutput struct { + _ struct{} `type:"structure"` + + // The ID of the stack set that you're creating. + StackSetId *string `type:"string"` +} + +// String returns the string representation +func (s CreateStackSetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateStackSetOutput) GoString() string { + return s.String() +} + +// SetStackSetId sets the StackSetId field's value. +func (s *CreateStackSetOutput) SetStackSetId(v string) *CreateStackSetOutput { + s.StackSetId = &v + return s +} + // The input for the DeleteChangeSet action. // Please also see https://docs.aws.amazon.com/goto/WebAPI/cloudformation-2010-05-15/DeleteChangeSetInput type DeleteChangeSetInput struct { @@ -3596,6 +5161,17 @@ type DeleteStackInput struct { // plan to retry requests so that AWS CloudFormation knows that you're not attempting // to delete a stack with the same name. You might retry DeleteStack requests // to ensure that AWS CloudFormation successfully received them. + // + // All events triggered by a given stack operation are assigned the same client + // request token, which you can use to track operations. For example, if you + // execute a CreateStack operation with the token token1, then all the StackEvents + // generated by that operation will have ClientRequestToken set as token1. + // + // In the console, stack operations display the client request token on the + // Events tab. Stack operations that are initiated from the console use the + // token format Console-StackOperation-ID, which helps you easily identify the + // stack operation . For example, if you create a stack using the console, each + // stack event would be assigned the same token in the following format: Console-CreateStack-7f59c3cf-00d2-40c7-b2ff-e75db0987002. ClientRequestToken *string `min:"1" type:"string"` // For stacks in the DELETE_FAILED state, a list of resource logical IDs that @@ -3674,6 +5250,150 @@ func (s *DeleteStackInput) SetStackName(v string) *DeleteStackInput { return s } +// Please also see https://docs.aws.amazon.com/goto/WebAPI/cloudformation-2010-05-15/DeleteStackInstancesInput +type DeleteStackInstancesInput struct { + _ struct{} `type:"structure"` + + // The names of the AWS accounts that you want to delete stack instances for. + // + // Accounts is a required field + Accounts []*string `type:"list" required:"true"` + + // The unique identifier for this stack set operation. + // + // If you don't specify an operation ID, the SDK generates one automatically. + // + // The operation ID also functions as an idempotency token, to ensure that AWS + // CloudFormation performs the stack set operation only once, even if you retry + // the request multiple times. You can retry stack set operation requests to + // ensure that AWS CloudFormation successfully received them. + // + // Repeating this stack set operation with a new operation ID retries all stack + // instances whose status is OUTDATED. + OperationId *string `min:"1" type:"string" idempotencyToken:"true"` + + // Preferences for how AWS CloudFormation performs this stack set operation. + OperationPreferences *StackSetOperationPreferences `type:"structure"` + + // The regions where you want to delete stack set instances. + // + // Regions is a required field + Regions []*string `type:"list" required:"true"` + + // Removes the stack instances from the specified stack set, but doesn't delete + // the stacks. You can't reassociate a retained stack or add an existing, saved + // stack to a new stack set. + // + // RetainStacks is a required field + RetainStacks *bool `type:"boolean" required:"true"` + + // The name or unique ID of the stack set that you want to delete stack instances + // for. + // + // StackSetName is a required field + StackSetName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteStackInstancesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteStackInstancesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteStackInstancesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteStackInstancesInput"} + if s.Accounts == nil { + invalidParams.Add(request.NewErrParamRequired("Accounts")) + } + if s.OperationId != nil && len(*s.OperationId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("OperationId", 1)) + } + if s.Regions == nil { + invalidParams.Add(request.NewErrParamRequired("Regions")) + } + if s.RetainStacks == nil { + invalidParams.Add(request.NewErrParamRequired("RetainStacks")) + } + if s.StackSetName == nil { + invalidParams.Add(request.NewErrParamRequired("StackSetName")) + } + if s.OperationPreferences != nil { + if err := s.OperationPreferences.Validate(); err != nil { + invalidParams.AddNested("OperationPreferences", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAccounts sets the Accounts field's value. +func (s *DeleteStackInstancesInput) SetAccounts(v []*string) *DeleteStackInstancesInput { + s.Accounts = v + return s +} + +// SetOperationId sets the OperationId field's value. +func (s *DeleteStackInstancesInput) SetOperationId(v string) *DeleteStackInstancesInput { + s.OperationId = &v + return s +} + +// SetOperationPreferences sets the OperationPreferences field's value. +func (s *DeleteStackInstancesInput) SetOperationPreferences(v *StackSetOperationPreferences) *DeleteStackInstancesInput { + s.OperationPreferences = v + return s +} + +// SetRegions sets the Regions field's value. +func (s *DeleteStackInstancesInput) SetRegions(v []*string) *DeleteStackInstancesInput { + s.Regions = v + return s +} + +// SetRetainStacks sets the RetainStacks field's value. +func (s *DeleteStackInstancesInput) SetRetainStacks(v bool) *DeleteStackInstancesInput { + s.RetainStacks = &v + return s +} + +// SetStackSetName sets the StackSetName field's value. +func (s *DeleteStackInstancesInput) SetStackSetName(v string) *DeleteStackInstancesInput { + s.StackSetName = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/cloudformation-2010-05-15/DeleteStackInstancesOutput +type DeleteStackInstancesOutput struct { + _ struct{} `type:"structure"` + + // The unique identifier for this stack set operation. + OperationId *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s DeleteStackInstancesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteStackInstancesOutput) GoString() string { + return s.String() +} + +// SetOperationId sets the OperationId field's value. +func (s *DeleteStackInstancesOutput) SetOperationId(v string) *DeleteStackInstancesOutput { + s.OperationId = &v + return s +} + // Please also see https://docs.aws.amazon.com/goto/WebAPI/cloudformation-2010-05-15/DeleteStackOutput type DeleteStackOutput struct { _ struct{} `type:"structure"` @@ -3689,6 +5409,61 @@ func (s DeleteStackOutput) GoString() string { return s.String() } +// Please also see https://docs.aws.amazon.com/goto/WebAPI/cloudformation-2010-05-15/DeleteStackSetInput +type DeleteStackSetInput struct { + _ struct{} `type:"structure"` + + // The name or unique ID of the stack set that you're deleting. You can obtain + // this value by running ListStackSets. + // + // StackSetName is a required field + StackSetName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteStackSetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteStackSetInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteStackSetInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteStackSetInput"} + if s.StackSetName == nil { + invalidParams.Add(request.NewErrParamRequired("StackSetName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetStackSetName sets the StackSetName field's value. +func (s *DeleteStackSetInput) SetStackSetName(v string) *DeleteStackSetInput { + s.StackSetName = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/cloudformation-2010-05-15/DeleteStackSetOutput +type DeleteStackSetOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteStackSetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteStackSetOutput) GoString() string { + return s.String() +} + // The input for the DescribeAccountLimits action. // Please also see https://docs.aws.amazon.com/goto/WebAPI/cloudformation-2010-05-15/DescribeAccountLimitsInput type DescribeAccountLimitsInput struct { @@ -4088,6 +5863,98 @@ func (s *DescribeStackEventsOutput) SetStackEvents(v []*StackEvent) *DescribeSta return s } +// Please also see https://docs.aws.amazon.com/goto/WebAPI/cloudformation-2010-05-15/DescribeStackInstanceInput +type DescribeStackInstanceInput struct { + _ struct{} `type:"structure"` + + // The ID of an AWS account that's associated with this stack instance. + // + // StackInstanceAccount is a required field + StackInstanceAccount *string `type:"string" required:"true"` + + // The name of a region that's associated with this stack instance. + // + // StackInstanceRegion is a required field + StackInstanceRegion *string `type:"string" required:"true"` + + // The name or the unique stack ID of the stack set that you want to get stack + // instance information for. + // + // StackSetName is a required field + StackSetName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeStackInstanceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeStackInstanceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeStackInstanceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeStackInstanceInput"} + if s.StackInstanceAccount == nil { + invalidParams.Add(request.NewErrParamRequired("StackInstanceAccount")) + } + if s.StackInstanceRegion == nil { + invalidParams.Add(request.NewErrParamRequired("StackInstanceRegion")) + } + if s.StackSetName == nil { + invalidParams.Add(request.NewErrParamRequired("StackSetName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetStackInstanceAccount sets the StackInstanceAccount field's value. +func (s *DescribeStackInstanceInput) SetStackInstanceAccount(v string) *DescribeStackInstanceInput { + s.StackInstanceAccount = &v + return s +} + +// SetStackInstanceRegion sets the StackInstanceRegion field's value. +func (s *DescribeStackInstanceInput) SetStackInstanceRegion(v string) *DescribeStackInstanceInput { + s.StackInstanceRegion = &v + return s +} + +// SetStackSetName sets the StackSetName field's value. +func (s *DescribeStackInstanceInput) SetStackSetName(v string) *DescribeStackInstanceInput { + s.StackSetName = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/cloudformation-2010-05-15/DescribeStackInstanceOutput +type DescribeStackInstanceOutput struct { + _ struct{} `type:"structure"` + + // The stack instance that matches the specified request parameters. + StackInstance *StackInstance `type:"structure"` +} + +// String returns the string representation +func (s DescribeStackInstanceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeStackInstanceOutput) GoString() string { + return s.String() +} + +// SetStackInstance sets the StackInstance field's value. +func (s *DescribeStackInstanceOutput) SetStackInstance(v *StackInstance) *DescribeStackInstanceOutput { + s.StackInstance = v + return s +} + // The input for DescribeStackResource action. // Please also see https://docs.aws.amazon.com/goto/WebAPI/cloudformation-2010-05-15/DescribeStackResourceInput type DescribeStackResourceInput struct { @@ -4270,6 +6137,149 @@ func (s *DescribeStackResourcesOutput) SetStackResources(v []*StackResource) *De return s } +// Please also see https://docs.aws.amazon.com/goto/WebAPI/cloudformation-2010-05-15/DescribeStackSetInput +type DescribeStackSetInput struct { + _ struct{} `type:"structure"` + + // The name or unique ID of the stack set whose description you want. + // + // StackSetName is a required field + StackSetName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeStackSetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeStackSetInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeStackSetInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeStackSetInput"} + if s.StackSetName == nil { + invalidParams.Add(request.NewErrParamRequired("StackSetName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetStackSetName sets the StackSetName field's value. +func (s *DescribeStackSetInput) SetStackSetName(v string) *DescribeStackSetInput { + s.StackSetName = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/cloudformation-2010-05-15/DescribeStackSetOperationInput +type DescribeStackSetOperationInput struct { + _ struct{} `type:"structure"` + + // The unique ID of the stack set operation. + // + // OperationId is a required field + OperationId *string `min:"1" type:"string" required:"true"` + + // The name or the unique stack ID of the stack set for the stack operation. + // + // StackSetName is a required field + StackSetName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeStackSetOperationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeStackSetOperationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeStackSetOperationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeStackSetOperationInput"} + if s.OperationId == nil { + invalidParams.Add(request.NewErrParamRequired("OperationId")) + } + if s.OperationId != nil && len(*s.OperationId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("OperationId", 1)) + } + if s.StackSetName == nil { + invalidParams.Add(request.NewErrParamRequired("StackSetName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetOperationId sets the OperationId field's value. +func (s *DescribeStackSetOperationInput) SetOperationId(v string) *DescribeStackSetOperationInput { + s.OperationId = &v + return s +} + +// SetStackSetName sets the StackSetName field's value. +func (s *DescribeStackSetOperationInput) SetStackSetName(v string) *DescribeStackSetOperationInput { + s.StackSetName = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/cloudformation-2010-05-15/DescribeStackSetOperationOutput +type DescribeStackSetOperationOutput struct { + _ struct{} `type:"structure"` + + // The specified stack set operation. + StackSetOperation *StackSetOperation `type:"structure"` +} + +// String returns the string representation +func (s DescribeStackSetOperationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeStackSetOperationOutput) GoString() string { + return s.String() +} + +// SetStackSetOperation sets the StackSetOperation field's value. +func (s *DescribeStackSetOperationOutput) SetStackSetOperation(v *StackSetOperation) *DescribeStackSetOperationOutput { + s.StackSetOperation = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/cloudformation-2010-05-15/DescribeStackSetOutput +type DescribeStackSetOutput struct { + _ struct{} `type:"structure"` + + // The specified stack set. + StackSet *StackSet `type:"structure"` +} + +// String returns the string representation +func (s DescribeStackSetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeStackSetOutput) GoString() string { + return s.String() +} + +// SetStackSet sets the StackSet field's value. +func (s *DescribeStackSetOutput) SetStackSet(v *StackSet) *DescribeStackSetOutput { + s.StackSet = v + return s +} + // The input for DescribeStacks action. // Please also see https://docs.aws.amazon.com/goto/WebAPI/cloudformation-2010-05-15/DescribeStacksInput type DescribeStacksInput struct { @@ -4788,6 +6798,9 @@ type GetTemplateSummaryInput struct { // TemplateBody, or TemplateURL. StackName *string `min:"1" type:"string"` + // The name or unique ID of the stack set from which the stack was created. + StackSetName *string `min:"1" type:"string"` + // Structure containing the template body with a minimum length of 1 byte and // a maximum length of 51,200 bytes. For more information about templates, see // Template Anatomy (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/template-anatomy.html) @@ -4823,6 +6836,9 @@ func (s *GetTemplateSummaryInput) Validate() error { if s.StackName != nil && len(*s.StackName) < 1 { invalidParams.Add(request.NewErrParamMinLen("StackName", 1)) } + if s.StackSetName != nil && len(*s.StackSetName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("StackSetName", 1)) + } if s.TemplateBody != nil && len(*s.TemplateBody) < 1 { invalidParams.Add(request.NewErrParamMinLen("TemplateBody", 1)) } @@ -4842,6 +6858,12 @@ func (s *GetTemplateSummaryInput) SetStackName(v string) *GetTemplateSummaryInpu return s } +// SetStackSetName sets the StackSetName field's value. +func (s *GetTemplateSummaryInput) SetStackSetName(v string) *GetTemplateSummaryInput { + s.StackSetName = &v + return s +} + // SetTemplateBody sets the TemplateBody field's value. func (s *GetTemplateSummaryInput) SetTemplateBody(v string) *GetTemplateSummaryInput { s.TemplateBody = &v @@ -5205,6 +7227,132 @@ func (s *ListImportsOutput) SetNextToken(v string) *ListImportsOutput { return s } +// Please also see https://docs.aws.amazon.com/goto/WebAPI/cloudformation-2010-05-15/ListStackInstancesInput +type ListStackInstancesInput struct { + _ struct{} `type:"structure"` + + // The maximum number of results to be returned with a single call. If the number + // of available results exceeds this maximum, the response includes a NextToken + // value that you can assign to the NextToken request parameter to get the next + // set of results. + MaxResults *int64 `min:"1" type:"integer"` + + // If the previous request didn't return all of the remaining results, the response's + // NextToken parameter value is set to a token. To retrieve the next set of + // results, call ListStackInstances again and assign that token to the request + // object's NextToken parameter. If there are no remaining results, the previous + // response object's NextToken parameter is set to null. + NextToken *string `min:"1" type:"string"` + + // The name of the AWS account that you want to list stack instances for. + StackInstanceAccount *string `type:"string"` + + // The name of the region where you want to list stack instances. + StackInstanceRegion *string `type:"string"` + + // The name or unique ID of the stack set that you want to list stack instances + // for. + // + // StackSetName is a required field + StackSetName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ListStackInstancesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListStackInstancesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListStackInstancesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListStackInstancesInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + if s.StackSetName == nil { + invalidParams.Add(request.NewErrParamRequired("StackSetName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetMaxResults sets the MaxResults field's value. +func (s *ListStackInstancesInput) SetMaxResults(v int64) *ListStackInstancesInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListStackInstancesInput) SetNextToken(v string) *ListStackInstancesInput { + s.NextToken = &v + return s +} + +// SetStackInstanceAccount sets the StackInstanceAccount field's value. +func (s *ListStackInstancesInput) SetStackInstanceAccount(v string) *ListStackInstancesInput { + s.StackInstanceAccount = &v + return s +} + +// SetStackInstanceRegion sets the StackInstanceRegion field's value. +func (s *ListStackInstancesInput) SetStackInstanceRegion(v string) *ListStackInstancesInput { + s.StackInstanceRegion = &v + return s +} + +// SetStackSetName sets the StackSetName field's value. +func (s *ListStackInstancesInput) SetStackSetName(v string) *ListStackInstancesInput { + s.StackSetName = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/cloudformation-2010-05-15/ListStackInstancesOutput +type ListStackInstancesOutput struct { + _ struct{} `type:"structure"` + + // If the request doesn't return all of the remaining results, NextToken is + // set to a token. To retrieve the next set of results, call ListStackInstances + // again and assign that token to the request object's NextToken parameter. + // If the request returns all results, NextToken is set to null. + NextToken *string `min:"1" type:"string"` + + // A list of StackInstanceSummary structures that contain information about + // the specified stack instances. + Summaries []*StackInstanceSummary `type:"list"` +} + +// String returns the string representation +func (s ListStackInstancesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListStackInstancesOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *ListStackInstancesOutput) SetNextToken(v string) *ListStackInstancesOutput { + s.NextToken = &v + return s +} + +// SetSummaries sets the Summaries field's value. +func (s *ListStackInstancesOutput) SetSummaries(v []*StackInstanceSummary) *ListStackInstancesOutput { + s.Summaries = v + return s +} + // The input for the ListStackResource action. // Please also see https://docs.aws.amazon.com/goto/WebAPI/cloudformation-2010-05-15/ListStackResourcesInput type ListStackResourcesInput struct { @@ -5301,6 +7449,342 @@ func (s *ListStackResourcesOutput) SetStackResourceSummaries(v []*StackResourceS return s } +// Please also see https://docs.aws.amazon.com/goto/WebAPI/cloudformation-2010-05-15/ListStackSetOperationResultsInput +type ListStackSetOperationResultsInput struct { + _ struct{} `type:"structure"` + + // The maximum number of results to be returned with a single call. If the number + // of available results exceeds this maximum, the response includes a NextToken + // value that you can assign to the NextToken request parameter to get the next + // set of results. + MaxResults *int64 `min:"1" type:"integer"` + + // If the previous request didn't return all of the remaining results, the response + // object's NextToken parameter value is set to a token. To retrieve the next + // set of results, call ListStackSetOperationResults again and assign that token + // to the request object's NextToken parameter. If there are no remaining results, + // the previous response object's NextToken parameter is set to null. + NextToken *string `min:"1" type:"string"` + + // The ID of the stack set operation. + // + // OperationId is a required field + OperationId *string `min:"1" type:"string" required:"true"` + + // The name or unique ID of the stack set that you want to get operation results + // for. + // + // StackSetName is a required field + StackSetName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ListStackSetOperationResultsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListStackSetOperationResultsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListStackSetOperationResultsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListStackSetOperationResultsInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + if s.OperationId == nil { + invalidParams.Add(request.NewErrParamRequired("OperationId")) + } + if s.OperationId != nil && len(*s.OperationId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("OperationId", 1)) + } + if s.StackSetName == nil { + invalidParams.Add(request.NewErrParamRequired("StackSetName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetMaxResults sets the MaxResults field's value. +func (s *ListStackSetOperationResultsInput) SetMaxResults(v int64) *ListStackSetOperationResultsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListStackSetOperationResultsInput) SetNextToken(v string) *ListStackSetOperationResultsInput { + s.NextToken = &v + return s +} + +// SetOperationId sets the OperationId field's value. +func (s *ListStackSetOperationResultsInput) SetOperationId(v string) *ListStackSetOperationResultsInput { + s.OperationId = &v + return s +} + +// SetStackSetName sets the StackSetName field's value. +func (s *ListStackSetOperationResultsInput) SetStackSetName(v string) *ListStackSetOperationResultsInput { + s.StackSetName = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/cloudformation-2010-05-15/ListStackSetOperationResultsOutput +type ListStackSetOperationResultsOutput struct { + _ struct{} `type:"structure"` + + // If the request doesn't return all results, NextToken is set to a token. To + // retrieve the next set of results, call ListOperationResults again and assign + // that token to the request object's NextToken parameter. If there are no remaining + // results, NextToken is set to null. + NextToken *string `min:"1" type:"string"` + + // A list of StackSetOperationResultSummary structures that contain information + // about the specified operation results, for accounts and regions that are + // included in the operation. + Summaries []*StackSetOperationResultSummary `type:"list"` +} + +// String returns the string representation +func (s ListStackSetOperationResultsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListStackSetOperationResultsOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *ListStackSetOperationResultsOutput) SetNextToken(v string) *ListStackSetOperationResultsOutput { + s.NextToken = &v + return s +} + +// SetSummaries sets the Summaries field's value. +func (s *ListStackSetOperationResultsOutput) SetSummaries(v []*StackSetOperationResultSummary) *ListStackSetOperationResultsOutput { + s.Summaries = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/cloudformation-2010-05-15/ListStackSetOperationsInput +type ListStackSetOperationsInput struct { + _ struct{} `type:"structure"` + + // The maximum number of results to be returned with a single call. If the number + // of available results exceeds this maximum, the response includes a NextToken + // value that you can assign to the NextToken request parameter to get the next + // set of results. + MaxResults *int64 `min:"1" type:"integer"` + + // If the previous paginated request didn't return all of the remaining results, + // the response object's NextToken parameter value is set to a token. To retrieve + // the next set of results, call ListStackSetOperations again and assign that + // token to the request object's NextToken parameter. If there are no remaining + // results, the previous response object's NextToken parameter is set to null. + NextToken *string `min:"1" type:"string"` + + // The name or unique ID of the stack set that you want to get operation summaries + // for. + // + // StackSetName is a required field + StackSetName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ListStackSetOperationsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListStackSetOperationsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListStackSetOperationsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListStackSetOperationsInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + if s.StackSetName == nil { + invalidParams.Add(request.NewErrParamRequired("StackSetName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetMaxResults sets the MaxResults field's value. +func (s *ListStackSetOperationsInput) SetMaxResults(v int64) *ListStackSetOperationsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListStackSetOperationsInput) SetNextToken(v string) *ListStackSetOperationsInput { + s.NextToken = &v + return s +} + +// SetStackSetName sets the StackSetName field's value. +func (s *ListStackSetOperationsInput) SetStackSetName(v string) *ListStackSetOperationsInput { + s.StackSetName = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/cloudformation-2010-05-15/ListStackSetOperationsOutput +type ListStackSetOperationsOutput struct { + _ struct{} `type:"structure"` + + // If the request doesn't return all results, NextToken is set to a token. To + // retrieve the next set of results, call ListOperationResults again and assign + // that token to the request object's NextToken parameter. If there are no remaining + // results, NextToken is set to null. + NextToken *string `min:"1" type:"string"` + + // A list of StackSetOperationSummary structures that contain summary information + // about operations for the specified stack set. + Summaries []*StackSetOperationSummary `type:"list"` +} + +// String returns the string representation +func (s ListStackSetOperationsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListStackSetOperationsOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *ListStackSetOperationsOutput) SetNextToken(v string) *ListStackSetOperationsOutput { + s.NextToken = &v + return s +} + +// SetSummaries sets the Summaries field's value. +func (s *ListStackSetOperationsOutput) SetSummaries(v []*StackSetOperationSummary) *ListStackSetOperationsOutput { + s.Summaries = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/cloudformation-2010-05-15/ListStackSetsInput +type ListStackSetsInput struct { + _ struct{} `type:"structure"` + + // The maximum number of results to be returned with a single call. If the number + // of available results exceeds this maximum, the response includes a NextToken + // value that you can assign to the NextToken request parameter to get the next + // set of results. + MaxResults *int64 `min:"1" type:"integer"` + + // If the previous paginated request didn't return all of the remaining results, + // the response object's NextToken parameter value is set to a token. To retrieve + // the next set of results, call ListStackSets again and assign that token to + // the request object's NextToken parameter. If there are no remaining results, + // the previous response object's NextToken parameter is set to null. + NextToken *string `min:"1" type:"string"` + + // The status of the stack sets that you want to get summary information about. + Status *string `type:"string" enum:"StackSetStatus"` +} + +// String returns the string representation +func (s ListStackSetsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListStackSetsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListStackSetsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListStackSetsInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetMaxResults sets the MaxResults field's value. +func (s *ListStackSetsInput) SetMaxResults(v int64) *ListStackSetsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListStackSetsInput) SetNextToken(v string) *ListStackSetsInput { + s.NextToken = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *ListStackSetsInput) SetStatus(v string) *ListStackSetsInput { + s.Status = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/cloudformation-2010-05-15/ListStackSetsOutput +type ListStackSetsOutput struct { + _ struct{} `type:"structure"` + + // If the request doesn't return all of the remaining results, NextToken is + // set to a token. To retrieve the next set of results, call ListStackInstances + // again and assign that token to the request object's NextToken parameter. + // If the request returns all results, NextToken is set to null. + NextToken *string `min:"1" type:"string"` + + // A list of StackSetSummary structures that contain information about the user's + // stack sets. + Summaries []*StackSetSummary `type:"list"` +} + +// String returns the string representation +func (s ListStackSetsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListStackSetsOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *ListStackSetsOutput) SetNextToken(v string) *ListStackSetsOutput { + s.NextToken = &v + return s +} + +// SetSummaries sets the Summaries field's value. +func (s *ListStackSetsOutput) SetSummaries(v []*StackSetSummary) *ListStackSetsOutput { + s.Summaries = v + return s +} + // The input for ListStacks action. // Please also see https://docs.aws.amazon.com/goto/WebAPI/cloudformation-2010-05-15/ListStacksInput type ListStacksInput struct { @@ -5394,6 +7878,9 @@ type Output struct { // User defined description associated with the output. Description *string `min:"1" type:"string"` + // The name of the export associated with the output. + ExportName *string `type:"string"` + // The key associated with the output. OutputKey *string `type:"string"` @@ -5417,6 +7904,12 @@ func (s *Output) SetDescription(v string) *Output { return s } +// SetExportName sets the ExportName field's value. +func (s *Output) SetExportName(v string) *Output { + s.ExportName = &v + return s +} + // SetOutputKey sets the OutputKey field's value. func (s *Output) SetOutputKey(v string) *Output { s.OutputKey = &v @@ -6187,9 +8680,16 @@ type StackEvent struct { // The token passed to the operation that generated this event. // - // For example, if you execute a CreateStack operation with the token token1, - // then all the StackEvents generated by that operation will have ClientRequestToken - // set as token1. + // All events triggered by a given stack operation are assigned the same client + // request token, which you can use to track operations. For example, if you + // execute a CreateStack operation with the token token1, then all the StackEvents + // generated by that operation will have ClientRequestToken set as token1. + // + // In the console, stack operations display the client request token on the + // Events tab. Stack operations that are initiated from the console use the + // token format Console-StackOperation-ID, which helps you easily identify the + // stack operation . For example, if you create a stack using the console, each + // stack event would be assigned the same token in the following format: Console-CreateStack-7f59c3cf-00d2-40c7-b2ff-e75db0987002. ClientRequestToken *string `min:"1" type:"string"` // The unique ID of this event. @@ -6310,6 +8810,187 @@ func (s *StackEvent) SetTimestamp(v time.Time) *StackEvent { return s } +// An AWS CloudFormation stack, in a specific account and region, that's part +// of a stack set operation. A stack instance is a reference to an attempted +// or actual stack in a given account within a given region. A stack instance +// can exist without a stack—for example, if the stack couldn't be created for +// some reason. A stack instance is associated with only one stack set. Each +// stack instance contains the ID of its associated stack set, as well as the +// ID of the actual stack and the stack status. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/cloudformation-2010-05-15/StackInstance +type StackInstance struct { + _ struct{} `type:"structure"` + + // The name of the AWS account that the stack instance is associated with. + Account *string `type:"string"` + + // The name of the AWS region that the stack instance is associated with. + Region *string `type:"string"` + + // The ID of the stack instance. + StackId *string `type:"string"` + + // The name or unique ID of the stack set that the stack instance is associated + // with. + StackSetId *string `type:"string"` + + // The status of the stack instance, in terms of its synchronization with its + // associated stack set. + // + // * INOPERABLE: A DeleteStackInstances operation has failed and left the + // stack in an unstable state. Stacks in this state are excluded from further + // UpdateStackSet and DeleteStackInstances operations. You might need to + // clean up the stack manually. + // + // * OUTDATED: The stack isn't currently up to date with the stack set because: + // + // The associated stack failed during a CreateStackSet or UpdateStackSet operation. + // + // + // The stack was part of a CreateStackSet or UpdateStackSet operation that failed + // or was stopped before the stack was created or updated. + // + // * CURRENT: The stack is currently up to date with the stack set. + Status *string `type:"string" enum:"StackInstanceStatus"` + + // The explanation for the specific status code that is assigned to this stack + // instance. + StatusReason *string `type:"string"` +} + +// String returns the string representation +func (s StackInstance) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StackInstance) GoString() string { + return s.String() +} + +// SetAccount sets the Account field's value. +func (s *StackInstance) SetAccount(v string) *StackInstance { + s.Account = &v + return s +} + +// SetRegion sets the Region field's value. +func (s *StackInstance) SetRegion(v string) *StackInstance { + s.Region = &v + return s +} + +// SetStackId sets the StackId field's value. +func (s *StackInstance) SetStackId(v string) *StackInstance { + s.StackId = &v + return s +} + +// SetStackSetId sets the StackSetId field's value. +func (s *StackInstance) SetStackSetId(v string) *StackInstance { + s.StackSetId = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *StackInstance) SetStatus(v string) *StackInstance { + s.Status = &v + return s +} + +// SetStatusReason sets the StatusReason field's value. +func (s *StackInstance) SetStatusReason(v string) *StackInstance { + s.StatusReason = &v + return s +} + +// The structure that contains summary information about a stack instance. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/cloudformation-2010-05-15/StackInstanceSummary +type StackInstanceSummary struct { + _ struct{} `type:"structure"` + + // The name of the AWS account that the stack instance is associated with. + Account *string `type:"string"` + + // The name of the AWS region that the stack instance is associated with. + Region *string `type:"string"` + + // The ID of the stack instance. + StackId *string `type:"string"` + + // The name or unique ID of the stack set that the stack instance is associated + // with. + StackSetId *string `type:"string"` + + // The status of the stack instance, in terms of its synchronization with its + // associated stack set. + // + // * INOPERABLE: A DeleteStackInstances operation has failed and left the + // stack in an unstable state. Stacks in this state are excluded from further + // UpdateStackSet and DeleteStackInstances operations. You might need to + // clean up the stack manually. + // + // * OUTDATED: The stack isn't currently up to date with the stack set because: + // + // The associated stack failed during a CreateStackSet or UpdateStackSet operation. + // + // + // The stack was part of a CreateStackSet or UpdateStackSet operation that failed + // or was stopped before the stack was created or updated. + // + // * CURRENT: The stack is currently up to date with the stack set. + Status *string `type:"string" enum:"StackInstanceStatus"` + + // The explanation for the specific status code assigned to this stack instance. + StatusReason *string `type:"string"` +} + +// String returns the string representation +func (s StackInstanceSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StackInstanceSummary) GoString() string { + return s.String() +} + +// SetAccount sets the Account field's value. +func (s *StackInstanceSummary) SetAccount(v string) *StackInstanceSummary { + s.Account = &v + return s +} + +// SetRegion sets the Region field's value. +func (s *StackInstanceSummary) SetRegion(v string) *StackInstanceSummary { + s.Region = &v + return s +} + +// SetStackId sets the StackId field's value. +func (s *StackInstanceSummary) SetStackId(v string) *StackInstanceSummary { + s.StackId = &v + return s +} + +// SetStackSetId sets the StackSetId field's value. +func (s *StackInstanceSummary) SetStackSetId(v string) *StackInstanceSummary { + s.StackSetId = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *StackInstanceSummary) SetStatus(v string) *StackInstanceSummary { + s.Status = &v + return s +} + +// SetStatusReason sets the StatusReason field's value. +func (s *StackInstanceSummary) SetStatusReason(v string) *StackInstanceSummary { + s.StatusReason = &v + return s +} + // The StackResource data type. // Please also see https://docs.aws.amazon.com/goto/WebAPI/cloudformation-2010-05-15/StackResource type StackResource struct { @@ -6618,6 +9299,554 @@ func (s *StackResourceSummary) SetResourceType(v string) *StackResourceSummary { return s } +// A structure that contains information about a stack set. A stack set enables +// you to provision stacks into AWS accounts and across regions by using a single +// CloudFormation template. In the stack set, you specify the template to use, +// as well as any parameters and capabilities that the template requires. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/cloudformation-2010-05-15/StackSet +type StackSet struct { + _ struct{} `type:"structure"` + + // The capabilities that are allowed in the stack set. Some stack set templates + // might include resources that can affect permissions in your AWS account—for + // example, by creating new AWS Identity and Access Management (IAM) users. + // For more information, see Acknowledging IAM Resources in AWS CloudFormation + // Templates. (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-iam-template.html#capabilities) + Capabilities []*string `type:"list"` + + // A description of the stack set that you specify when the stack set is created + // or updated. + Description *string `min:"1" type:"string"` + + // A list of input parameters for a stack set. + Parameters []*Parameter `type:"list"` + + // The ID of the stack set. + StackSetId *string `type:"string"` + + // The name that's associated with the stack set. + StackSetName *string `type:"string"` + + // The status of the stack set. + Status *string `type:"string" enum:"StackSetStatus"` + + // A list of tags that specify information about the stack set. A maximum number + // of 50 tags can be specified. + Tags []*Tag `type:"list"` + + // The structure that contains the body of the template that was used to create + // or update the stack set. + TemplateBody *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s StackSet) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StackSet) GoString() string { + return s.String() +} + +// SetCapabilities sets the Capabilities field's value. +func (s *StackSet) SetCapabilities(v []*string) *StackSet { + s.Capabilities = v + return s +} + +// SetDescription sets the Description field's value. +func (s *StackSet) SetDescription(v string) *StackSet { + s.Description = &v + return s +} + +// SetParameters sets the Parameters field's value. +func (s *StackSet) SetParameters(v []*Parameter) *StackSet { + s.Parameters = v + return s +} + +// SetStackSetId sets the StackSetId field's value. +func (s *StackSet) SetStackSetId(v string) *StackSet { + s.StackSetId = &v + return s +} + +// SetStackSetName sets the StackSetName field's value. +func (s *StackSet) SetStackSetName(v string) *StackSet { + s.StackSetName = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *StackSet) SetStatus(v string) *StackSet { + s.Status = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *StackSet) SetTags(v []*Tag) *StackSet { + s.Tags = v + return s +} + +// SetTemplateBody sets the TemplateBody field's value. +func (s *StackSet) SetTemplateBody(v string) *StackSet { + s.TemplateBody = &v + return s +} + +// The structure that contains information about a stack set operation. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/cloudformation-2010-05-15/StackSetOperation +type StackSetOperation struct { + _ struct{} `type:"structure"` + + // The type of stack set operation: CREATE, UPDATE, or DELETE. Create and delete + // operations affect only the specified stack set instances that are associated + // with the specified stack set. Update operations affect both the stack set + // itself, as well as all associated stack set instances. + Action *string `type:"string" enum:"StackSetOperationAction"` + + // The time at which the operation was initiated. Note that the creation times + // for the stack set operation might differ from the creation time of the individual + // stacks themselves. This is because AWS CloudFormation needs to perform preparatory + // work for the operation, such as dispatching the work to the requested regions, + // before actually creating the first stacks. + CreationTimestamp *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The time at which the stack set operation ended, across all accounts and + // regions specified. Note that this doesn't necessarily mean that the stack + // set operation was successful, or even attempted, in each account or region. + EndTimestamp *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The unique ID of a stack set operation. + OperationId *string `min:"1" type:"string"` + + // The preferences for how AWS CloudFormation performs this stack set operation. + OperationPreferences *StackSetOperationPreferences `type:"structure"` + + // For stack set operations of action type DELETE, specifies whether to remove + // the stack instances from the specified stack set, but doesn't delete the + // stacks. You can't reassociate a retained stack, or add an existing, saved + // stack to a new stack set. + RetainStacks *bool `type:"boolean"` + + // The ID of the stack set. + StackSetId *string `type:"string"` + + // The status of the operation. + // + // * FAILED: The operation exceeded the specified failure tolerance. The + // failure tolerance value that you've set for an operation is applied for + // each region during stack create and update operations. If the number of + // failed stacks within a region exceeds the failure tolerance, the status + // of the operation in the region is set to FAILED. This in turn sets the + // status of the operation as a whole to FAILED, and AWS CloudFormation cancels + // the operation in any remaining regions. + // + // * RUNNING: The operation is currently being performed. + // + // * STOPPED: The user has cancelled the operation. + // + // * STOPPING: The operation is in the process of stopping, at user request. + // + // + // * SUCCEEDED: The operation completed creating or updating all the specified + // stacks without exceeding the failure tolerance for the operation. + Status *string `type:"string" enum:"StackSetOperationStatus"` +} + +// String returns the string representation +func (s StackSetOperation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StackSetOperation) GoString() string { + return s.String() +} + +// SetAction sets the Action field's value. +func (s *StackSetOperation) SetAction(v string) *StackSetOperation { + s.Action = &v + return s +} + +// SetCreationTimestamp sets the CreationTimestamp field's value. +func (s *StackSetOperation) SetCreationTimestamp(v time.Time) *StackSetOperation { + s.CreationTimestamp = &v + return s +} + +// SetEndTimestamp sets the EndTimestamp field's value. +func (s *StackSetOperation) SetEndTimestamp(v time.Time) *StackSetOperation { + s.EndTimestamp = &v + return s +} + +// SetOperationId sets the OperationId field's value. +func (s *StackSetOperation) SetOperationId(v string) *StackSetOperation { + s.OperationId = &v + return s +} + +// SetOperationPreferences sets the OperationPreferences field's value. +func (s *StackSetOperation) SetOperationPreferences(v *StackSetOperationPreferences) *StackSetOperation { + s.OperationPreferences = v + return s +} + +// SetRetainStacks sets the RetainStacks field's value. +func (s *StackSetOperation) SetRetainStacks(v bool) *StackSetOperation { + s.RetainStacks = &v + return s +} + +// SetStackSetId sets the StackSetId field's value. +func (s *StackSetOperation) SetStackSetId(v string) *StackSetOperation { + s.StackSetId = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *StackSetOperation) SetStatus(v string) *StackSetOperation { + s.Status = &v + return s +} + +// The user-specified preferences for how AWS CloudFormation performs a stack +// set operation. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/cloudformation-2010-05-15/StackSetOperationPreferences +type StackSetOperationPreferences struct { + _ struct{} `type:"structure"` + + // The number of accounts, per region, for which this operation can fail before + // AWS CloudFormation stops the operation in that region. If the operation is + // stopped in a region, AWS CloudFormation doesn't attempt the operation in + // any subsequent regions. + // + // Conditional: You must specify either FailureToleranceCount or FailureTolerancePercentage + // (but not both). + FailureToleranceCount *int64 `type:"integer"` + + // The percentage of accounts, per region, for which this stack operation can + // fail before AWS CloudFormation stops the operation in that region. If the + // operation is stopped in a region, AWS CloudFormation doesn't attempt the + // operation in any subsequent regions. + // + // When calculating the number of accounts based on the specified percentage, + // AWS CloudFormation rounds down to the next whole number. + // + // Conditional: You must specify either FailureToleranceCount or FailureTolerancePercentage, + // but not both. + FailureTolerancePercentage *int64 `type:"integer"` + + // The maximum number of accounts in which to perform this operation at one + // time. This is dependent on the value of FailureToleranceCount—MaxConcurrentCount + // is at most one more than the FailureToleranceCount . + // + // Conditional: You must specify either MaxConcurrentCount or MaxConcurrentPercentage, + // but not both. + MaxConcurrentCount *int64 `min:"1" type:"integer"` + + // The maximum percentage of accounts in which to perform this operation at + // one time. + // + // When calculating the number of accounts based on the specified percentage, + // AWS CloudFormation rounds down to the next whole number. This is true except + // in cases where rounding down would result is zero. In this case, CloudFormation + // sets the number as one instead. + // + // Conditional: You must specify either MaxConcurrentCount or MaxConcurrentPercentage, + // but not both. + MaxConcurrentPercentage *int64 `min:"1" type:"integer"` + + // The order of the regions in where you want to perform the stack operation. + RegionOrder []*string `type:"list"` +} + +// String returns the string representation +func (s StackSetOperationPreferences) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StackSetOperationPreferences) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *StackSetOperationPreferences) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StackSetOperationPreferences"} + if s.MaxConcurrentCount != nil && *s.MaxConcurrentCount < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxConcurrentCount", 1)) + } + if s.MaxConcurrentPercentage != nil && *s.MaxConcurrentPercentage < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxConcurrentPercentage", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFailureToleranceCount sets the FailureToleranceCount field's value. +func (s *StackSetOperationPreferences) SetFailureToleranceCount(v int64) *StackSetOperationPreferences { + s.FailureToleranceCount = &v + return s +} + +// SetFailureTolerancePercentage sets the FailureTolerancePercentage field's value. +func (s *StackSetOperationPreferences) SetFailureTolerancePercentage(v int64) *StackSetOperationPreferences { + s.FailureTolerancePercentage = &v + return s +} + +// SetMaxConcurrentCount sets the MaxConcurrentCount field's value. +func (s *StackSetOperationPreferences) SetMaxConcurrentCount(v int64) *StackSetOperationPreferences { + s.MaxConcurrentCount = &v + return s +} + +// SetMaxConcurrentPercentage sets the MaxConcurrentPercentage field's value. +func (s *StackSetOperationPreferences) SetMaxConcurrentPercentage(v int64) *StackSetOperationPreferences { + s.MaxConcurrentPercentage = &v + return s +} + +// SetRegionOrder sets the RegionOrder field's value. +func (s *StackSetOperationPreferences) SetRegionOrder(v []*string) *StackSetOperationPreferences { + s.RegionOrder = v + return s +} + +// The structure that contains information about a specified operation's results +// for a given account in a given region. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/cloudformation-2010-05-15/StackSetOperationResultSummary +type StackSetOperationResultSummary struct { + _ struct{} `type:"structure"` + + // The name of the AWS account for this operation result. + Account *string `type:"string"` + + // The results of the account gate function AWS CloudFormation invokes, if present, + // before proceeding with stack set operations in an account + AccountGateResult *AccountGateResult `type:"structure"` + + // The name of the AWS region for this operation result. + Region *string `type:"string"` + + // The result status of the stack set operation for the given account in the + // given region. + // + // * CANCELLED: The operation in the specified account and region has been + // cancelled. This is either because a user has stopped the stack set operation, + // or because the failure tolerance of the stack set operation has been exceeded. + // + // * FAILED: The operation in the specified account and region failed. + // + // If the stack set operation fails in enough accounts within a region, the + // failure tolerance for the stack set operation as a whole might be exceeded. + // + // + // * RUNNING: The operation in the specified account and region is currently + // in progress. + // + // * PENDING: The operation in the specified account and region has yet to + // start. + // + // * SUCCEEDED: The operation in the specified account and region completed + // successfully. + Status *string `type:"string" enum:"StackSetOperationResultStatus"` + + // The reason for the assigned result status. + StatusReason *string `type:"string"` +} + +// String returns the string representation +func (s StackSetOperationResultSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StackSetOperationResultSummary) GoString() string { + return s.String() +} + +// SetAccount sets the Account field's value. +func (s *StackSetOperationResultSummary) SetAccount(v string) *StackSetOperationResultSummary { + s.Account = &v + return s +} + +// SetAccountGateResult sets the AccountGateResult field's value. +func (s *StackSetOperationResultSummary) SetAccountGateResult(v *AccountGateResult) *StackSetOperationResultSummary { + s.AccountGateResult = v + return s +} + +// SetRegion sets the Region field's value. +func (s *StackSetOperationResultSummary) SetRegion(v string) *StackSetOperationResultSummary { + s.Region = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *StackSetOperationResultSummary) SetStatus(v string) *StackSetOperationResultSummary { + s.Status = &v + return s +} + +// SetStatusReason sets the StatusReason field's value. +func (s *StackSetOperationResultSummary) SetStatusReason(v string) *StackSetOperationResultSummary { + s.StatusReason = &v + return s +} + +// The structures that contain summary information about the specified operation. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/cloudformation-2010-05-15/StackSetOperationSummary +type StackSetOperationSummary struct { + _ struct{} `type:"structure"` + + // The type of operation: CREATE, UPDATE, or DELETE. Create and delete operations + // affect only the specified stack instances that are associated with the specified + // stack set. Update operations affect both the stack set itself as well as + // all associated stack set instances. + Action *string `type:"string" enum:"StackSetOperationAction"` + + // The time at which the operation was initiated. Note that the creation times + // for the stack set operation might differ from the creation time of the individual + // stacks themselves. This is because AWS CloudFormation needs to perform preparatory + // work for the operation, such as dispatching the work to the requested regions, + // before actually creating the first stacks. + CreationTimestamp *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The time at which the stack set operation ended, across all accounts and + // regions specified. Note that this doesn't necessarily mean that the stack + // set operation was successful, or even attempted, in each account or region. + EndTimestamp *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The unique ID of the stack set operation. + OperationId *string `min:"1" type:"string"` + + // The overall status of the operation. + // + // * FAILED: The operation exceeded the specified failure tolerance. The + // failure tolerance value that you've set for an operation is applied for + // each region during stack create and update operations. If the number of + // failed stacks within a region exceeds the failure tolerance, the status + // of the operation in the region is set to FAILED. This in turn sets the + // status of the operation as a whole to FAILED, and AWS CloudFormation cancels + // the operation in any remaining regions. + // + // * RUNNING: The operation is currently being performed. + // + // * STOPPED: The user has cancelled the operation. + // + // * STOPPING: The operation is in the process of stopping, at user request. + // + // + // * SUCCEEDED: The operation completed creating or updating all the specified + // stacks without exceeding the failure tolerance for the operation. + Status *string `type:"string" enum:"StackSetOperationStatus"` +} + +// String returns the string representation +func (s StackSetOperationSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StackSetOperationSummary) GoString() string { + return s.String() +} + +// SetAction sets the Action field's value. +func (s *StackSetOperationSummary) SetAction(v string) *StackSetOperationSummary { + s.Action = &v + return s +} + +// SetCreationTimestamp sets the CreationTimestamp field's value. +func (s *StackSetOperationSummary) SetCreationTimestamp(v time.Time) *StackSetOperationSummary { + s.CreationTimestamp = &v + return s +} + +// SetEndTimestamp sets the EndTimestamp field's value. +func (s *StackSetOperationSummary) SetEndTimestamp(v time.Time) *StackSetOperationSummary { + s.EndTimestamp = &v + return s +} + +// SetOperationId sets the OperationId field's value. +func (s *StackSetOperationSummary) SetOperationId(v string) *StackSetOperationSummary { + s.OperationId = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *StackSetOperationSummary) SetStatus(v string) *StackSetOperationSummary { + s.Status = &v + return s +} + +// The structures that contain summary information about the specified stack +// set. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/cloudformation-2010-05-15/StackSetSummary +type StackSetSummary struct { + _ struct{} `type:"structure"` + + // A description of the stack set that you specify when the stack set is created + // or updated. + Description *string `min:"1" type:"string"` + + // The ID of the stack set. + StackSetId *string `type:"string"` + + // The name of the stack set. + StackSetName *string `type:"string"` + + // The status of the stack set. + Status *string `type:"string" enum:"StackSetStatus"` +} + +// String returns the string representation +func (s StackSetSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StackSetSummary) GoString() string { + return s.String() +} + +// SetDescription sets the Description field's value. +func (s *StackSetSummary) SetDescription(v string) *StackSetSummary { + s.Description = &v + return s +} + +// SetStackSetId sets the StackSetId field's value. +func (s *StackSetSummary) SetStackSetId(v string) *StackSetSummary { + s.StackSetId = &v + return s +} + +// SetStackSetName sets the StackSetName field's value. +func (s *StackSetSummary) SetStackSetName(v string) *StackSetSummary { + s.StackSetName = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *StackSetSummary) SetStatus(v string) *StackSetSummary { + s.Status = &v + return s +} + // The StackSummary Data Type // Please also see https://docs.aws.amazon.com/goto/WebAPI/cloudformation-2010-05-15/StackSummary type StackSummary struct { @@ -6713,6 +9942,78 @@ func (s *StackSummary) SetTemplateDescription(v string) *StackSummary { return s } +// Please also see https://docs.aws.amazon.com/goto/WebAPI/cloudformation-2010-05-15/StopStackSetOperationInput +type StopStackSetOperationInput struct { + _ struct{} `type:"structure"` + + // The ID of the stack operation. + // + // OperationId is a required field + OperationId *string `min:"1" type:"string" required:"true"` + + // The name or unique ID of the stack set that you want to stop the operation + // for. + // + // StackSetName is a required field + StackSetName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s StopStackSetOperationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StopStackSetOperationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *StopStackSetOperationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StopStackSetOperationInput"} + if s.OperationId == nil { + invalidParams.Add(request.NewErrParamRequired("OperationId")) + } + if s.OperationId != nil && len(*s.OperationId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("OperationId", 1)) + } + if s.StackSetName == nil { + invalidParams.Add(request.NewErrParamRequired("StackSetName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetOperationId sets the OperationId field's value. +func (s *StopStackSetOperationInput) SetOperationId(v string) *StopStackSetOperationInput { + s.OperationId = &v + return s +} + +// SetStackSetName sets the StackSetName field's value. +func (s *StopStackSetOperationInput) SetStackSetName(v string) *StopStackSetOperationInput { + s.StackSetName = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/cloudformation-2010-05-15/StopStackSetOperationOutput +type StopStackSetOperationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s StopStackSetOperationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StopStackSetOperationOutput) GoString() string { + return s.String() +} + // The Tag type enables you to specify a key-value pair that can be used to // store information about an AWS CloudFormation stack. // Please also see https://docs.aws.amazon.com/goto/WebAPI/cloudformation-2010-05-15/Tag @@ -6722,11 +10023,15 @@ type Tag struct { // Required. A string used to identify this tag. You can specify a maximum of // 128 characters for a tag key. Tags owned by Amazon Web Services (AWS) have // the reserved prefix: aws:. - Key *string `type:"string"` + // + // Key is a required field + Key *string `min:"1" type:"string" required:"true"` // Required. A string containing the value for this tag. You can specify a maximum // of 256 characters for a tag value. - Value *string `type:"string"` + // + // Value is a required field + Value *string `min:"1" type:"string" required:"true"` } // String returns the string representation @@ -6739,6 +10044,28 @@ func (s Tag) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *Tag) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Tag"} + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.Value == nil { + invalidParams.Add(request.NewErrParamRequired("Value")) + } + if s.Value != nil && len(*s.Value) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Value", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // SetKey sets the Key field's value. func (s *Tag) SetKey(v string) *Tag { s.Key = &v @@ -6839,6 +10166,17 @@ type UpdateStackInput struct { // plan to retry requests so that AWS CloudFormation knows that you're not attempting // to update a stack with the same name. You might retry UpdateStack requests // to ensure that AWS CloudFormation successfully received them. + // + // All events triggered by a given stack operation are assigned the same client + // request token, which you can use to track operations. For example, if you + // execute a CreateStack operation with the token token1, then all the StackEvents + // generated by that operation will have ClientRequestToken set as token1. + // + // In the console, stack operations display the client request token on the + // Events tab. Stack operations that are initiated from the console use the + // token format Console-StackOperation-ID, which helps you easily identify the + // stack operation . For example, if you create a stack using the console, each + // stack event would be assigned the same token in the following format: Console-CreateStack-7f59c3cf-00d2-40c7-b2ff-e75db0987002. ClientRequestToken *string `min:"1" type:"string"` // Amazon Simple Notification Service topic Amazon Resource Names (ARNs) that @@ -6919,7 +10257,7 @@ type UpdateStackInput struct { // Key-value pairs to associate with this stack. AWS CloudFormation also propagates // these tags to supported resources in the stack. You can specify a maximum - // number of 10 tags. + // number of 50 tags. // // If you don't specify this parameter, AWS CloudFormation doesn't modify the // stack's tags. If you specify an empty value, AWS CloudFormation removes all @@ -6992,6 +10330,16 @@ func (s *UpdateStackInput) Validate() error { if s.TemplateURL != nil && len(*s.TemplateURL) < 1 { invalidParams.Add(request.NewErrParamMinLen("TemplateURL", 1)) } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } if invalidParams.Len() > 0 { return invalidParams @@ -7114,6 +10462,262 @@ func (s *UpdateStackOutput) SetStackId(v string) *UpdateStackOutput { return s } +// Please also see https://docs.aws.amazon.com/goto/WebAPI/cloudformation-2010-05-15/UpdateStackSetInput +type UpdateStackSetInput struct { + _ struct{} `type:"structure"` + + // A list of values that you must specify before AWS CloudFormation can create + // certain stack sets. Some stack set templates might include resources that + // can affect permissions in your AWS account—for example, by creating new AWS + // Identity and Access Management (IAM) users. For those stack sets, you must + // explicitly acknowledge their capabilities by specifying this parameter. + // + // The only valid values are CAPABILITY_IAM and CAPABILITY_NAMED_IAM. The following + // resources require you to specify this parameter: + // + // * AWS::IAM::AccessKey + // + // * AWS::IAM::Group + // + // * AWS::IAM::InstanceProfile + // + // * AWS::IAM::Policy + // + // * AWS::IAM::Role + // + // * AWS::IAM::User + // + // * AWS::IAM::UserToGroupAddition + // + // If your stack template contains these resources, we recommend that you review + // all permissions that are associated with them and edit their permissions + // if necessary. + // + // If you have IAM resources, you can specify either capability. If you have + // IAM resources with custom names, you must specify CAPABILITY_NAMED_IAM. If + // you don't specify this parameter, this action returns an InsufficientCapabilities + // error. + // + // For more information, see Acknowledging IAM Resources in AWS CloudFormation + // Templates. (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-iam-template.html#capabilities) + Capabilities []*string `type:"list"` + + // A brief description of updates that you are making. + Description *string `min:"1" type:"string"` + + // The unique ID for this stack set operation. + // + // The operation ID also functions as an idempotency token, to ensure that AWS + // CloudFormation performs the stack set operation only once, even if you retry + // the request multiple times. You might retry stack set operation requests + // to ensure that AWS CloudFormation successfully received them. + // + // If you don't specify an operation ID, AWS CloudFormation generates one automatically. + // + // Repeating this stack set operation with a new operation ID retries all stack + // instances whose status is OUTDATED. + OperationId *string `min:"1" type:"string" idempotencyToken:"true"` + + // Preferences for how AWS CloudFormation performs this stack set operation. + OperationPreferences *StackSetOperationPreferences `type:"structure"` + + // A list of input parameters for the stack set template. + Parameters []*Parameter `type:"list"` + + // The name or unique ID of the stack set that you want to update. + // + // StackSetName is a required field + StackSetName *string `type:"string" required:"true"` + + // The key-value pairs to associate with this stack set and the stacks created + // from it. AWS CloudFormation also propagates these tags to supported resources + // that are created in the stacks. You can specify a maximum number of 50 tags. + // + // If you specify tags for this parameter, those tags replace any list of tags + // that are currently associated with this stack set. This means: + // + // * If you don't specify this parameter, AWS CloudFormation doesn't modify + // the stack's tags. + // + // * If you specify any tags using this parameter, you must specify all the + // tags that you want associated with this stack set, even tags you've specifed + // before (for example, when creating the stack set or during a previous + // update of the stack set.). Any tags that you don't include in the updated + // list of tags are removed from the stack set, and therefore from the stacks + // and resources as well. + // + // * If you specify an empty value, AWS CloudFormation removes all currently + // associated tags. + // + // If you specify new tags as part of an UpdateStackSet action, AWS CloudFormation + // checks to see if you have the required IAM permission to tag resources. If + // you omit tags that are currently associated with the stack set from the list + // of tags you specify, AWS CloudFormation assumes that you want to remove those + // tags from the stack set, and checks to see if you have permission to untag + // resources. If you don't have the necessary permission(s), the entire UpdateStackSet + // action fails with an access denied error, and the stack set is not updated. + Tags []*Tag `type:"list"` + + // The structure that contains the template body, with a minimum length of 1 + // byte and a maximum length of 51,200 bytes. For more information, see Template + // Anatomy (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/template-anatomy.html) + // in the AWS CloudFormation User Guide. + // + // Conditional: You must specify only one of the following parameters: TemplateBody + // or TemplateURL—or set UsePreviousTemplate to true. + TemplateBody *string `min:"1" type:"string"` + + // The location of the file that contains the template body. The URL must point + // to a template (maximum size: 460,800 bytes) that is located in an Amazon + // S3 bucket. For more information, see Template Anatomy (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/template-anatomy.html) + // in the AWS CloudFormation User Guide. + // + // Conditional: You must specify only one of the following parameters: TemplateBody + // or TemplateURL—or set UsePreviousTemplate to true. + TemplateURL *string `min:"1" type:"string"` + + // Use the existing template that's associated with the stack set that you're + // updating. + // + // Conditional: You must specify only one of the following parameters: TemplateBody + // or TemplateURL—or set UsePreviousTemplate to true. + UsePreviousTemplate *bool `type:"boolean"` +} + +// String returns the string representation +func (s UpdateStackSetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateStackSetInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateStackSetInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateStackSetInput"} + if s.Description != nil && len(*s.Description) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Description", 1)) + } + if s.OperationId != nil && len(*s.OperationId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("OperationId", 1)) + } + if s.StackSetName == nil { + invalidParams.Add(request.NewErrParamRequired("StackSetName")) + } + if s.TemplateBody != nil && len(*s.TemplateBody) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TemplateBody", 1)) + } + if s.TemplateURL != nil && len(*s.TemplateURL) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TemplateURL", 1)) + } + if s.OperationPreferences != nil { + if err := s.OperationPreferences.Validate(); err != nil { + invalidParams.AddNested("OperationPreferences", err.(request.ErrInvalidParams)) + } + } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCapabilities sets the Capabilities field's value. +func (s *UpdateStackSetInput) SetCapabilities(v []*string) *UpdateStackSetInput { + s.Capabilities = v + return s +} + +// SetDescription sets the Description field's value. +func (s *UpdateStackSetInput) SetDescription(v string) *UpdateStackSetInput { + s.Description = &v + return s +} + +// SetOperationId sets the OperationId field's value. +func (s *UpdateStackSetInput) SetOperationId(v string) *UpdateStackSetInput { + s.OperationId = &v + return s +} + +// SetOperationPreferences sets the OperationPreferences field's value. +func (s *UpdateStackSetInput) SetOperationPreferences(v *StackSetOperationPreferences) *UpdateStackSetInput { + s.OperationPreferences = v + return s +} + +// SetParameters sets the Parameters field's value. +func (s *UpdateStackSetInput) SetParameters(v []*Parameter) *UpdateStackSetInput { + s.Parameters = v + return s +} + +// SetStackSetName sets the StackSetName field's value. +func (s *UpdateStackSetInput) SetStackSetName(v string) *UpdateStackSetInput { + s.StackSetName = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *UpdateStackSetInput) SetTags(v []*Tag) *UpdateStackSetInput { + s.Tags = v + return s +} + +// SetTemplateBody sets the TemplateBody field's value. +func (s *UpdateStackSetInput) SetTemplateBody(v string) *UpdateStackSetInput { + s.TemplateBody = &v + return s +} + +// SetTemplateURL sets the TemplateURL field's value. +func (s *UpdateStackSetInput) SetTemplateURL(v string) *UpdateStackSetInput { + s.TemplateURL = &v + return s +} + +// SetUsePreviousTemplate sets the UsePreviousTemplate field's value. +func (s *UpdateStackSetInput) SetUsePreviousTemplate(v bool) *UpdateStackSetInput { + s.UsePreviousTemplate = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/cloudformation-2010-05-15/UpdateStackSetOutput +type UpdateStackSetOutput struct { + _ struct{} `type:"structure"` + + // The unique ID for this stack set operation. + OperationId *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s UpdateStackSetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateStackSetOutput) GoString() string { + return s.String() +} + +// SetOperationId sets the OperationId field's value. +func (s *UpdateStackSetOutput) SetOperationId(v string) *UpdateStackSetOutput { + s.OperationId = &v + return s +} + // The input for ValidateTemplate action. // Please also see https://docs.aws.amazon.com/goto/WebAPI/cloudformation-2010-05-15/ValidateTemplateInput type ValidateTemplateInput struct { @@ -7245,6 +10849,17 @@ func (s *ValidateTemplateOutput) SetParameters(v []*TemplateParameter) *Validate return s } +const ( + // AccountGateStatusSucceeded is a AccountGateStatus enum value + AccountGateStatusSucceeded = "SUCCEEDED" + + // AccountGateStatusFailed is a AccountGateStatus enum value + AccountGateStatusFailed = "FAILED" + + // AccountGateStatusSkipped is a AccountGateStatus enum value + AccountGateStatusSkipped = "SKIPPED" +) + const ( // CapabilityCapabilityIam is a Capability enum value CapabilityCapabilityIam = "CAPABILITY_IAM" @@ -7432,6 +11047,70 @@ const ( ResourceStatusUpdateComplete = "UPDATE_COMPLETE" ) +const ( + // StackInstanceStatusCurrent is a StackInstanceStatus enum value + StackInstanceStatusCurrent = "CURRENT" + + // StackInstanceStatusOutdated is a StackInstanceStatus enum value + StackInstanceStatusOutdated = "OUTDATED" + + // StackInstanceStatusInoperable is a StackInstanceStatus enum value + StackInstanceStatusInoperable = "INOPERABLE" +) + +const ( + // StackSetOperationActionCreate is a StackSetOperationAction enum value + StackSetOperationActionCreate = "CREATE" + + // StackSetOperationActionUpdate is a StackSetOperationAction enum value + StackSetOperationActionUpdate = "UPDATE" + + // StackSetOperationActionDelete is a StackSetOperationAction enum value + StackSetOperationActionDelete = "DELETE" +) + +const ( + // StackSetOperationResultStatusPending is a StackSetOperationResultStatus enum value + StackSetOperationResultStatusPending = "PENDING" + + // StackSetOperationResultStatusRunning is a StackSetOperationResultStatus enum value + StackSetOperationResultStatusRunning = "RUNNING" + + // StackSetOperationResultStatusSucceeded is a StackSetOperationResultStatus enum value + StackSetOperationResultStatusSucceeded = "SUCCEEDED" + + // StackSetOperationResultStatusFailed is a StackSetOperationResultStatus enum value + StackSetOperationResultStatusFailed = "FAILED" + + // StackSetOperationResultStatusCancelled is a StackSetOperationResultStatus enum value + StackSetOperationResultStatusCancelled = "CANCELLED" +) + +const ( + // StackSetOperationStatusRunning is a StackSetOperationStatus enum value + StackSetOperationStatusRunning = "RUNNING" + + // StackSetOperationStatusSucceeded is a StackSetOperationStatus enum value + StackSetOperationStatusSucceeded = "SUCCEEDED" + + // StackSetOperationStatusFailed is a StackSetOperationStatus enum value + StackSetOperationStatusFailed = "FAILED" + + // StackSetOperationStatusStopping is a StackSetOperationStatus enum value + StackSetOperationStatusStopping = "STOPPING" + + // StackSetOperationStatusStopped is a StackSetOperationStatus enum value + StackSetOperationStatusStopped = "STOPPED" +) + +const ( + // StackSetStatusActive is a StackSetStatus enum value + StackSetStatusActive = "ACTIVE" + + // StackSetStatusDeleted is a StackSetStatus enum value + StackSetStatusDeleted = "DELETED" +) + const ( // StackStatusCreateInProgress is a StackStatus enum value StackStatusCreateInProgress = "CREATE_IN_PROGRESS" diff --git a/cli/vendor/github.com/aws/aws-sdk-go/service/cloudformation/cloudformationiface/interface.go b/cli/vendor/github.com/aws/aws-sdk-go/service/cloudformation/cloudformationiface/interface.go index 923e2c6b5..b968e4e8b 100644 --- a/cli/vendor/github.com/aws/aws-sdk-go/service/cloudformation/cloudformationiface/interface.go +++ b/cli/vendor/github.com/aws/aws-sdk-go/service/cloudformation/cloudformationiface/interface.go @@ -76,6 +76,14 @@ type CloudFormationAPI interface { CreateStackWithContext(aws.Context, *cloudformation.CreateStackInput, ...request.Option) (*cloudformation.CreateStackOutput, error) CreateStackRequest(*cloudformation.CreateStackInput) (*request.Request, *cloudformation.CreateStackOutput) + CreateStackInstances(*cloudformation.CreateStackInstancesInput) (*cloudformation.CreateStackInstancesOutput, error) + CreateStackInstancesWithContext(aws.Context, *cloudformation.CreateStackInstancesInput, ...request.Option) (*cloudformation.CreateStackInstancesOutput, error) + CreateStackInstancesRequest(*cloudformation.CreateStackInstancesInput) (*request.Request, *cloudformation.CreateStackInstancesOutput) + + CreateStackSet(*cloudformation.CreateStackSetInput) (*cloudformation.CreateStackSetOutput, error) + CreateStackSetWithContext(aws.Context, *cloudformation.CreateStackSetInput, ...request.Option) (*cloudformation.CreateStackSetOutput, error) + CreateStackSetRequest(*cloudformation.CreateStackSetInput) (*request.Request, *cloudformation.CreateStackSetOutput) + DeleteChangeSet(*cloudformation.DeleteChangeSetInput) (*cloudformation.DeleteChangeSetOutput, error) DeleteChangeSetWithContext(aws.Context, *cloudformation.DeleteChangeSetInput, ...request.Option) (*cloudformation.DeleteChangeSetOutput, error) DeleteChangeSetRequest(*cloudformation.DeleteChangeSetInput) (*request.Request, *cloudformation.DeleteChangeSetOutput) @@ -84,6 +92,14 @@ type CloudFormationAPI interface { DeleteStackWithContext(aws.Context, *cloudformation.DeleteStackInput, ...request.Option) (*cloudformation.DeleteStackOutput, error) DeleteStackRequest(*cloudformation.DeleteStackInput) (*request.Request, *cloudformation.DeleteStackOutput) + DeleteStackInstances(*cloudformation.DeleteStackInstancesInput) (*cloudformation.DeleteStackInstancesOutput, error) + DeleteStackInstancesWithContext(aws.Context, *cloudformation.DeleteStackInstancesInput, ...request.Option) (*cloudformation.DeleteStackInstancesOutput, error) + DeleteStackInstancesRequest(*cloudformation.DeleteStackInstancesInput) (*request.Request, *cloudformation.DeleteStackInstancesOutput) + + DeleteStackSet(*cloudformation.DeleteStackSetInput) (*cloudformation.DeleteStackSetOutput, error) + DeleteStackSetWithContext(aws.Context, *cloudformation.DeleteStackSetInput, ...request.Option) (*cloudformation.DeleteStackSetOutput, error) + DeleteStackSetRequest(*cloudformation.DeleteStackSetInput) (*request.Request, *cloudformation.DeleteStackSetOutput) + DescribeAccountLimits(*cloudformation.DescribeAccountLimitsInput) (*cloudformation.DescribeAccountLimitsOutput, error) DescribeAccountLimitsWithContext(aws.Context, *cloudformation.DescribeAccountLimitsInput, ...request.Option) (*cloudformation.DescribeAccountLimitsOutput, error) DescribeAccountLimitsRequest(*cloudformation.DescribeAccountLimitsInput) (*request.Request, *cloudformation.DescribeAccountLimitsOutput) @@ -99,6 +115,10 @@ type CloudFormationAPI interface { DescribeStackEventsPages(*cloudformation.DescribeStackEventsInput, func(*cloudformation.DescribeStackEventsOutput, bool) bool) error DescribeStackEventsPagesWithContext(aws.Context, *cloudformation.DescribeStackEventsInput, func(*cloudformation.DescribeStackEventsOutput, bool) bool, ...request.Option) error + DescribeStackInstance(*cloudformation.DescribeStackInstanceInput) (*cloudformation.DescribeStackInstanceOutput, error) + DescribeStackInstanceWithContext(aws.Context, *cloudformation.DescribeStackInstanceInput, ...request.Option) (*cloudformation.DescribeStackInstanceOutput, error) + DescribeStackInstanceRequest(*cloudformation.DescribeStackInstanceInput) (*request.Request, *cloudformation.DescribeStackInstanceOutput) + DescribeStackResource(*cloudformation.DescribeStackResourceInput) (*cloudformation.DescribeStackResourceOutput, error) DescribeStackResourceWithContext(aws.Context, *cloudformation.DescribeStackResourceInput, ...request.Option) (*cloudformation.DescribeStackResourceOutput, error) DescribeStackResourceRequest(*cloudformation.DescribeStackResourceInput) (*request.Request, *cloudformation.DescribeStackResourceOutput) @@ -107,6 +127,14 @@ type CloudFormationAPI interface { DescribeStackResourcesWithContext(aws.Context, *cloudformation.DescribeStackResourcesInput, ...request.Option) (*cloudformation.DescribeStackResourcesOutput, error) DescribeStackResourcesRequest(*cloudformation.DescribeStackResourcesInput) (*request.Request, *cloudformation.DescribeStackResourcesOutput) + DescribeStackSet(*cloudformation.DescribeStackSetInput) (*cloudformation.DescribeStackSetOutput, error) + DescribeStackSetWithContext(aws.Context, *cloudformation.DescribeStackSetInput, ...request.Option) (*cloudformation.DescribeStackSetOutput, error) + DescribeStackSetRequest(*cloudformation.DescribeStackSetInput) (*request.Request, *cloudformation.DescribeStackSetOutput) + + DescribeStackSetOperation(*cloudformation.DescribeStackSetOperationInput) (*cloudformation.DescribeStackSetOperationOutput, error) + DescribeStackSetOperationWithContext(aws.Context, *cloudformation.DescribeStackSetOperationInput, ...request.Option) (*cloudformation.DescribeStackSetOperationOutput, error) + DescribeStackSetOperationRequest(*cloudformation.DescribeStackSetOperationInput) (*request.Request, *cloudformation.DescribeStackSetOperationOutput) + DescribeStacks(*cloudformation.DescribeStacksInput) (*cloudformation.DescribeStacksOutput, error) DescribeStacksWithContext(aws.Context, *cloudformation.DescribeStacksInput, ...request.Option) (*cloudformation.DescribeStacksOutput, error) DescribeStacksRequest(*cloudformation.DescribeStacksInput) (*request.Request, *cloudformation.DescribeStacksOutput) @@ -152,6 +180,10 @@ type CloudFormationAPI interface { ListImportsPages(*cloudformation.ListImportsInput, func(*cloudformation.ListImportsOutput, bool) bool) error ListImportsPagesWithContext(aws.Context, *cloudformation.ListImportsInput, func(*cloudformation.ListImportsOutput, bool) bool, ...request.Option) error + ListStackInstances(*cloudformation.ListStackInstancesInput) (*cloudformation.ListStackInstancesOutput, error) + ListStackInstancesWithContext(aws.Context, *cloudformation.ListStackInstancesInput, ...request.Option) (*cloudformation.ListStackInstancesOutput, error) + ListStackInstancesRequest(*cloudformation.ListStackInstancesInput) (*request.Request, *cloudformation.ListStackInstancesOutput) + ListStackResources(*cloudformation.ListStackResourcesInput) (*cloudformation.ListStackResourcesOutput, error) ListStackResourcesWithContext(aws.Context, *cloudformation.ListStackResourcesInput, ...request.Option) (*cloudformation.ListStackResourcesOutput, error) ListStackResourcesRequest(*cloudformation.ListStackResourcesInput) (*request.Request, *cloudformation.ListStackResourcesOutput) @@ -159,6 +191,18 @@ type CloudFormationAPI interface { ListStackResourcesPages(*cloudformation.ListStackResourcesInput, func(*cloudformation.ListStackResourcesOutput, bool) bool) error ListStackResourcesPagesWithContext(aws.Context, *cloudformation.ListStackResourcesInput, func(*cloudformation.ListStackResourcesOutput, bool) bool, ...request.Option) error + ListStackSetOperationResults(*cloudformation.ListStackSetOperationResultsInput) (*cloudformation.ListStackSetOperationResultsOutput, error) + ListStackSetOperationResultsWithContext(aws.Context, *cloudformation.ListStackSetOperationResultsInput, ...request.Option) (*cloudformation.ListStackSetOperationResultsOutput, error) + ListStackSetOperationResultsRequest(*cloudformation.ListStackSetOperationResultsInput) (*request.Request, *cloudformation.ListStackSetOperationResultsOutput) + + ListStackSetOperations(*cloudformation.ListStackSetOperationsInput) (*cloudformation.ListStackSetOperationsOutput, error) + ListStackSetOperationsWithContext(aws.Context, *cloudformation.ListStackSetOperationsInput, ...request.Option) (*cloudformation.ListStackSetOperationsOutput, error) + ListStackSetOperationsRequest(*cloudformation.ListStackSetOperationsInput) (*request.Request, *cloudformation.ListStackSetOperationsOutput) + + ListStackSets(*cloudformation.ListStackSetsInput) (*cloudformation.ListStackSetsOutput, error) + ListStackSetsWithContext(aws.Context, *cloudformation.ListStackSetsInput, ...request.Option) (*cloudformation.ListStackSetsOutput, error) + ListStackSetsRequest(*cloudformation.ListStackSetsInput) (*request.Request, *cloudformation.ListStackSetsOutput) + ListStacks(*cloudformation.ListStacksInput) (*cloudformation.ListStacksOutput, error) ListStacksWithContext(aws.Context, *cloudformation.ListStacksInput, ...request.Option) (*cloudformation.ListStacksOutput, error) ListStacksRequest(*cloudformation.ListStacksInput) (*request.Request, *cloudformation.ListStacksOutput) @@ -174,10 +218,18 @@ type CloudFormationAPI interface { SignalResourceWithContext(aws.Context, *cloudformation.SignalResourceInput, ...request.Option) (*cloudformation.SignalResourceOutput, error) SignalResourceRequest(*cloudformation.SignalResourceInput) (*request.Request, *cloudformation.SignalResourceOutput) + StopStackSetOperation(*cloudformation.StopStackSetOperationInput) (*cloudformation.StopStackSetOperationOutput, error) + StopStackSetOperationWithContext(aws.Context, *cloudformation.StopStackSetOperationInput, ...request.Option) (*cloudformation.StopStackSetOperationOutput, error) + StopStackSetOperationRequest(*cloudformation.StopStackSetOperationInput) (*request.Request, *cloudformation.StopStackSetOperationOutput) + UpdateStack(*cloudformation.UpdateStackInput) (*cloudformation.UpdateStackOutput, error) UpdateStackWithContext(aws.Context, *cloudformation.UpdateStackInput, ...request.Option) (*cloudformation.UpdateStackOutput, error) UpdateStackRequest(*cloudformation.UpdateStackInput) (*request.Request, *cloudformation.UpdateStackOutput) + UpdateStackSet(*cloudformation.UpdateStackSetInput) (*cloudformation.UpdateStackSetOutput, error) + UpdateStackSetWithContext(aws.Context, *cloudformation.UpdateStackSetInput, ...request.Option) (*cloudformation.UpdateStackSetOutput, error) + UpdateStackSetRequest(*cloudformation.UpdateStackSetInput) (*request.Request, *cloudformation.UpdateStackSetOutput) + ValidateTemplate(*cloudformation.ValidateTemplateInput) (*cloudformation.ValidateTemplateOutput, error) ValidateTemplateWithContext(aws.Context, *cloudformation.ValidateTemplateInput, ...request.Option) (*cloudformation.ValidateTemplateOutput, error) ValidateTemplateRequest(*cloudformation.ValidateTemplateInput) (*request.Request, *cloudformation.ValidateTemplateOutput) diff --git a/cli/vendor/github.com/aws/aws-sdk-go/service/cloudformation/doc.go b/cli/vendor/github.com/aws/aws-sdk-go/service/cloudformation/doc.go index 83600619a..667c0d17c 100644 --- a/cli/vendor/github.com/aws/aws-sdk-go/service/cloudformation/doc.go +++ b/cli/vendor/github.com/aws/aws-sdk-go/service/cloudformation/doc.go @@ -23,6 +23,172 @@ // technical information about a specific AWS product, you can find the product's // technical documentation at docs.aws.amazon.com (http://docs.aws.amazon.com/). // +// APIs for stacks +// +// When you use AWS CloudFormation, you manage related resources as a single +// unit called a stack. You create, update, and delete a collection of resources +// by creating, updating, and deleting stacks. All the resources in a stack +// are defined by the stack's AWS CloudFormation template. +// +// Actions +// +// * CancelUpdateStack (http://docs.aws.amazon.com/AWSCloudFormation/latest/APIReference/API_CancelUpdateStack.html) +// +// * ContinueUpdateRollback (http://docs.aws.amazon.com/AWSCloudFormation/latest/APIReference/API_ContinueUpdateRollback.html) +// +// * CreateStack (http://docs.aws.amazon.com/AWSCloudFormation/latest/APIReference/API_CreateStack.html) +// +// * DeleteStack (http://docs.aws.amazon.com/AWSCloudFormation/latest/APIReference/API_DeleteStack.html) +// +// * DescribeStackEvents (http://docs.aws.amazon.com/AWSCloudFormation/latest/APIReference/API_DescribeStackEvents.html) +// +// * DescribeStackResource (http://docs.aws.amazon.com/AWSCloudFormation/latest/APIReference/API_DescribeStackResource.html) +// +// * DescribeStackResources (http://docs.aws.amazon.com/AWSCloudFormation/latest/APIReference/API_DescribeStackResources.html) +// +// * DescribeStacks (http://docs.aws.amazon.com/AWSCloudFormation/latest/APIReference/API_DescribeStacks.html) +// +// * EstimateTemplateCost (http://docs.aws.amazon.com/AWSCloudFormation/latest/APIReference/API_EstimateTemplateCost.html) +// +// * GetStackPolicy (http://docs.aws.amazon.com/AWSCloudFormation/latest/APIReference/API_GetStackPolicy.html) +// +// * GetTemplate (http://docs.aws.amazon.com/AWSCloudFormation/latest/APIReference/API_GetTemplate.html) +// +// * GetTemplateSummary (http://docs.aws.amazon.com/AWSCloudFormation/latest/APIReference/API_GetTemplateSummary.html) +// +// * ListExports (http://docs.aws.amazon.com/AWSCloudFormation/latest/APIReference/API_ListExports.html) +// +// * ListImports (http://docs.aws.amazon.com/AWSCloudFormation/latest/APIReference/API_ListImports.html) +// +// * ListStackResources (http://docs.aws.amazon.com/AWSCloudFormation/latest/APIReference/API_ListStackResources.html) +// +// * ListStacks (http://docs.aws.amazon.com/AWSCloudFormation/latest/APIReference/API_ListStacks.html) +// +// * SetStackPolicy (http://docs.aws.amazon.com/AWSCloudFormation/latest/APIReference/API_SetStackPolicy.html) +// +// * UpdateStack (http://docs.aws.amazon.com/AWSCloudFormation/latest/APIReference/API_UpdateStack.html) +// +// * ValidateTemplate (http://docs.aws.amazon.com/AWSCloudFormation/latest/APIReference/API_ValidateTemplate.html) +// +// Data Types +// +// * Export (http://docs.aws.amazon.com/AWSCloudFormation/latest/APIReference/API_Export.html) +// +// * Parameter (http://docs.aws.amazon.com/AWSCloudFormation/latest/APIReference/API_Parameter.html) +// +// * ParameterConstraints (http://docs.aws.amazon.com/AWSCloudFormation/latest/APIReference/API_ParameterConstraints.html) +// +// * ParameterDeclaration (http://docs.aws.amazon.com/AWSCloudFormation/latest/APIReference/API_ParameterDeclaration.html) +// +// * Stack (http://docs.aws.amazon.com/AWSCloudFormation/latest/APIReference/API_Stack.html) +// +// * StackEvent (http://docs.aws.amazon.com/AWSCloudFormation/latest/APIReference/API_StackEvent.html) +// +// * StackResource (http://docs.aws.amazon.com/AWSCloudFormation/latest/APIReference/API_StackResource.html) +// +// * StackResourceDetail (http://docs.aws.amazon.com/AWSCloudFormation/latest/APIReference/API_StackResourceDetail.html) +// +// * StackResourceSummary (http://docs.aws.amazon.com/AWSCloudFormation/latest/APIReference/API_StackResourceSummary.html) +// +// * StackSummary (http://docs.aws.amazon.com/AWSCloudFormation/latest/APIReference/API_StackSummary.html) +// +// * Tag (http://docs.aws.amazon.com/AWSCloudFormation/latest/APIReference/API_Tag.html) +// +// * TemplateParameter (http://docs.aws.amazon.com/AWSCloudFormation/latest/APIReference/API_TemplateParameter.html) +// +// APIs for change sets +// +// If you need to make changes to the running resources in a stack, you update +// the stack. Before making changes to your resources, you can generate a change +// set, which is summary of your proposed changes. Change sets allow you to +// see how your changes might impact your running resources, especially for +// critical resources, before implementing them. +// +// Actions +// +// * CreateChangeSet (http://docs.aws.amazon.com/AWSCloudFormation/latest/APIReference/API_CreateChangeSet.html) +// +// * DeleteChangeSet (http://docs.aws.amazon.com/AWSCloudFormation/latest/APIReference/API_DeleteChangeSet.html) +// +// * DescribeChangeSet (http://docs.aws.amazon.com/AWSCloudFormation/latest/APIReference/API_DescribeChangeSet.html) +// +// * ExecuteChangeSet (http://docs.aws.amazon.com/AWSCloudFormation/latest/APIReference/API_ExecuteChangeSet.html) +// +// * ListChangeSets (http://docs.aws.amazon.com/AWSCloudFormation/latest/APIReference/API_ListChangeSets.html) +// +// Data Types +// +// * Change (http://docs.aws.amazon.com/AWSCloudFormation/latest/APIReference/API_Change.html) +// +// * ChangeSetSummary (http://docs.aws.amazon.com/AWSCloudFormation/latest/APIReference/API_ChangeSetSummary.html) +// +// * ResourceChange (http://docs.aws.amazon.com/AWSCloudFormation/latest/APIReference/API_ResourceChange.html) +// +// * ResourceChangeDetail (http://docs.aws.amazon.com/AWSCloudFormation/latest/APIReference/API_ResourceChangeDetail.html) +// +// * ResourceTargetDefinition (http://docs.aws.amazon.com/AWSCloudFormation/latest/APIReference/API_ResourceTargetDefinition.html) +// +// APIs for stack sets +// +// AWS CloudFormation StackSets lets you create a collection, or stack set, +// of stacks that can automatically and safely provision a common set of AWS +// resources across multiple AWS accounts and multiple AWS regions from a single +// AWS CloudFormation template. When you create a stack set, AWS CloudFormation +// provisions a stack in each of the specified accounts and regions by using +// the supplied AWS CloudFormation template and parameters. Stack sets let you +// manage a common set of AWS resources in a selection of accounts and regions +// in a single operation. +// +// Actions +// +// * CreateStackInstances (http://docs.aws.amazon.com/AWSCloudFormation/latest/APIReference/API_CreateStackInstances.html) +// +// * CreateStackSet (http://docs.aws.amazon.com/AWSCloudFormation/latest/APIReference/API_CreateStackSet.html) +// +// * DeleteStackInstances (http://docs.aws.amazon.com/AWSCloudFormation/latest/APIReference/API_DeleteStackInstances.html) +// +// * DeleteStackSet (http://docs.aws.amazon.com/AWSCloudFormation/latest/APIReference/API_DeleteStackSet.html) +// +// * DescribeStackInstance (http://docs.aws.amazon.com/AWSCloudFormation/latest/APIReference/API_DescribeStackInstance.html) +// +// * DescribeStackSet (http://docs.aws.amazon.com/AWSCloudFormation/latest/APIReference/API_DescribeStackSet.html) +// +// * DescribeStackSetOperation (http://docs.aws.amazon.com/AWSCloudFormation/latest/APIReference/API_DescribeStackSetOperation.html) +// +// * ListStackInstances (http://docs.aws.amazon.com/AWSCloudFormation/latest/APIReference/API_ListStackInstances.html) +// +// * ListStackSetOperationResults (http://docs.aws.amazon.com/AWSCloudFormation/latest/APIReference/API_ListStackSetOperationResults) +// +// * ListStackSetOperations (http://docs.aws.amazon.com/AWSCloudFormation/latest/APIReference/API_ListStackSetOperations) +// +// * ListStackSets (http://docs.aws.amazon.com/AWSCloudFormation/latest/APIReference/API_ListStackSets) +// +// * StopStackSetOperation (http://docs.aws.amazon.com/AWSCloudFormation/latest/APIReference/API_StopStackSetOperation.html) +// +// * UpdateStackSet (http://docs.aws.amazon.com/AWSCloudFormation/latest/APIReference/API_UpdateStackSet.html) +// +// Data Types +// +// * Parameter (http://docs.aws.amazon.com/AWSCloudFormation/latest/APIReference/API_Parameter.html) +// +// * StackInstance (http://docs.aws.amazon.com/AWSCloudFormation/latest/APIReference/API_StackInstance.html.html) +// +// * StackInstanceSummary (http://docs.aws.amazon.com/AWSCloudFormation/latest/APIReference/API_StackInstanceSummary.html.html) +// +// * StackSet (http://docs.aws.amazon.com/AWSCloudFormation/latest/APIReference/API_StackSet.html) +// +// * StackSetOperation (http://docs.aws.amazon.com/AWSCloudFormation/latest/APIReference/API_StackSetOperation.html.html) +// +// * StackSetOperationPreferences (http://docs.aws.amazon.com/AWSCloudFormation/latest/APIReference/API_StackSetOperationPreferences.html.html) +// +// * StackSetOperationResultSummary (http://docs.aws.amazon.com/AWSCloudFormation/latest/APIReference/API_StackSetOperationResultSummary.html.html) +// +// * StackSetOperationSummary (http://docs.aws.amazon.com/AWSCloudFormation/latest/APIReference/API_StackSetOperationSummary.html.html) +// +// * StackSetSummary (http://docs.aws.amazon.com/AWSCloudFormation/latest/APIReference/API_StackSetSummary.html) +// +// * Tag (http://docs.aws.amazon.com/AWSCloudFormation/latest/APIReference/API_Tag.html) +// // See https://docs.aws.amazon.com/goto/WebAPI/cloudformation-2010-05-15 for more information on this service. // // See cloudformation package documentation for more information. diff --git a/cli/vendor/github.com/aws/aws-sdk-go/service/cloudformation/errors.go b/cli/vendor/github.com/aws/aws-sdk-go/service/cloudformation/errors.go index 8bcf9482c..4ad8aa6e4 100644 --- a/cli/vendor/github.com/aws/aws-sdk-go/service/cloudformation/errors.go +++ b/cli/vendor/github.com/aws/aws-sdk-go/service/cloudformation/errors.go @@ -7,7 +7,7 @@ const ( // ErrCodeAlreadyExistsException for service response error code // "AlreadyExistsException". // - // Resource with the name requested already exists. + // The resource with the name requested already exists. ErrCodeAlreadyExistsException = "AlreadyExistsException" // ErrCodeChangeSetNotFoundException for service response error code @@ -17,27 +17,91 @@ const ( // for a stack, use the ListChangeSets action. ErrCodeChangeSetNotFoundException = "ChangeSetNotFound" + // ErrCodeCreatedButModifiedException for service response error code + // "CreatedButModifiedException". + // + // The specified resource exists, but has been changed. + ErrCodeCreatedButModifiedException = "CreatedButModifiedException" + // ErrCodeInsufficientCapabilitiesException for service response error code // "InsufficientCapabilitiesException". // - // The template contains resources with capabilities that were not specified + // The template contains resources with capabilities that weren't specified // in the Capabilities parameter. ErrCodeInsufficientCapabilitiesException = "InsufficientCapabilitiesException" // ErrCodeInvalidChangeSetStatusException for service response error code // "InvalidChangeSetStatus". // - // The specified change set cannot be used to update the stack. For example, - // the change set status might be CREATE_IN_PROGRESS or the stack status might + // The specified change set can't be used to update the stack. For example, + // the change set status might be CREATE_IN_PROGRESS, or the stack status might // be UPDATE_IN_PROGRESS. ErrCodeInvalidChangeSetStatusException = "InvalidChangeSetStatus" + // ErrCodeInvalidOperationException for service response error code + // "InvalidOperationException". + // + // The specified operation isn't valid. + ErrCodeInvalidOperationException = "InvalidOperationException" + // ErrCodeLimitExceededException for service response error code // "LimitExceededException". // - // Quota for the resource has already been reached. + // The quota for the resource has already been reached. ErrCodeLimitExceededException = "LimitExceededException" + // ErrCodeNameAlreadyExistsException for service response error code + // "NameAlreadyExistsException". + // + // The specified name is already in use. + ErrCodeNameAlreadyExistsException = "NameAlreadyExistsException" + + // ErrCodeOperationIdAlreadyExistsException for service response error code + // "OperationIdAlreadyExistsException". + // + // The specified operation ID already exists. + ErrCodeOperationIdAlreadyExistsException = "OperationIdAlreadyExistsException" + + // ErrCodeOperationInProgressException for service response error code + // "OperationInProgressException". + // + // Another operation is currently in progress for this stack set. Only one operation + // can be performed for a stack set at a given time. + ErrCodeOperationInProgressException = "OperationInProgressException" + + // ErrCodeOperationNotFoundException for service response error code + // "OperationNotFoundException". + // + // The specified ID refers to an operation that doesn't exist. + ErrCodeOperationNotFoundException = "OperationNotFoundException" + + // ErrCodeStackInstanceNotFoundException for service response error code + // "StackInstanceNotFoundException". + // + // The specified stack instance doesn't exist. + ErrCodeStackInstanceNotFoundException = "StackInstanceNotFoundException" + + // ErrCodeStackSetNotEmptyException for service response error code + // "StackSetNotEmptyException". + // + // You can't yet delete this stack set, because it still contains one or more + // stack instances. Delete all stack instances from the stack set before deleting + // the stack set. + ErrCodeStackSetNotEmptyException = "StackSetNotEmptyException" + + // ErrCodeStackSetNotFoundException for service response error code + // "StackSetNotFoundException". + // + // The specified stack set doesn't exist. + ErrCodeStackSetNotFoundException = "StackSetNotFoundException" + + // ErrCodeStaleRequestException for service response error code + // "StaleRequestException". + // + // Another operation has been performed on this stack set since the specified + // operation was performed. + ErrCodeStaleRequestException = "StaleRequestException" + // ErrCodeTokenAlreadyExistsException for service response error code // "TokenAlreadyExistsException". // diff --git a/cli/vendor/github.com/aws/aws-sdk-go/service/ec2/api.go b/cli/vendor/github.com/aws/aws-sdk-go/service/ec2/api.go index bd13ab77e..a7d7c335c 100644 --- a/cli/vendor/github.com/aws/aws-sdk-go/service/ec2/api.go +++ b/cli/vendor/github.com/aws/aws-sdk-go/service/ec2/api.go @@ -25094,6 +25094,7 @@ func (s *CreateNetworkInterfaceOutput) SetNetworkInterface(v *NetworkInterface) return s } +// Contains the parameters for CreateNetworkInterfacePermission. // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateNetworkInterfacePermissionRequest type CreateNetworkInterfacePermissionInput struct { _ struct{} `type:"structure"` @@ -25177,6 +25178,7 @@ func (s *CreateNetworkInterfacePermissionInput) SetPermission(v string) *CreateN return s } +// Contains the output of CreateNetworkInterfacePermission. // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateNetworkInterfacePermissionResult type CreateNetworkInterfacePermissionOutput struct { _ struct{} `type:"structure"` @@ -27665,6 +27667,7 @@ func (s DeleteNetworkInterfaceOutput) GoString() string { return s.String() } +// Contains the parameters for DeleteNetworkInterfacePermission. // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteNetworkInterfacePermissionRequest type DeleteNetworkInterfacePermissionInput struct { _ struct{} `type:"structure"` @@ -27726,6 +27729,7 @@ func (s *DeleteNetworkInterfacePermissionInput) SetNetworkInterfacePermissionId( return s } +// Contains the output for DeleteNetworkInterfacePermission. // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteNetworkInterfacePermissionResult type DeleteNetworkInterfacePermissionOutput struct { _ struct{} `type:"structure"` @@ -32411,6 +32415,7 @@ func (s *DescribeNetworkInterfaceAttributeOutput) SetSourceDestCheck(v *Attribut return s } +// Contains the parameters for DescribeNetworkInterfacePermissions. // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeNetworkInterfacePermissionsRequest type DescribeNetworkInterfacePermissionsInput struct { _ struct{} `type:"structure"` @@ -32477,6 +32482,7 @@ func (s *DescribeNetworkInterfacePermissionsInput) SetNextToken(v string) *Descr return s } +// Contains the output for DescribeNetworkInterfacePermissions. // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeNetworkInterfacePermissionsResult type DescribeNetworkInterfacePermissionsOutput struct { _ struct{} `type:"structure"` @@ -54039,6 +54045,9 @@ type SpotFleetLaunchSpecification struct { // subnets, separate them using commas; for example, "subnet-a61dafcf, subnet-65ea5f08". SubnetId *string `locationName:"subnetId" type:"string"` + // The tags to apply during creation. + TagSpecifications []*SpotFleetTagSpecification `locationName:"tagSpecificationSet" locationNameList:"item" type:"list"` + // The user data to make available to the instances. If you are using an AWS // SDK or command line tool, Base64-encoding is performed for you, and you can // load the text from a file. Otherwise, you must provide Base64-encoded text. @@ -54174,6 +54183,12 @@ func (s *SpotFleetLaunchSpecification) SetSubnetId(v string) *SpotFleetLaunchSpe return s } +// SetTagSpecifications sets the TagSpecifications field's value. +func (s *SpotFleetLaunchSpecification) SetTagSpecifications(v []*SpotFleetTagSpecification) *SpotFleetLaunchSpecification { + s.TagSpecifications = v + return s +} + // SetUserData sets the UserData field's value. func (s *SpotFleetLaunchSpecification) SetUserData(v string) *SpotFleetLaunchSpecification { s.UserData = &v @@ -54483,6 +54498,41 @@ func (s *SpotFleetRequestConfigData) SetValidUntil(v time.Time) *SpotFleetReques return s } +// The tags for a Spot fleet resource. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/SpotFleetTagSpecification +type SpotFleetTagSpecification struct { + _ struct{} `type:"structure"` + + // The type of resource. Currently, the only resource type that is supported + // is instance. + ResourceType *string `locationName:"resourceType" type:"string" enum:"ResourceType"` + + // The tags. + Tags []*Tag `locationName:"tag" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s SpotFleetTagSpecification) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SpotFleetTagSpecification) GoString() string { + return s.String() +} + +// SetResourceType sets the ResourceType field's value. +func (s *SpotFleetTagSpecification) SetResourceType(v string) *SpotFleetTagSpecification { + s.ResourceType = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *SpotFleetTagSpecification) SetTags(v []*Tag) *SpotFleetTagSpecification { + s.Tags = v + return s +} + // Describes a Spot instance request. // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/SpotInstanceRequest type SpotInstanceRequest struct { diff --git a/cli/vendor/github.com/aws/aws-sdk-go/service/emr/api.go b/cli/vendor/github.com/aws/aws-sdk-go/service/emr/api.go index d7eb0abe5..8abbf85e7 100644 --- a/cli/vendor/github.com/aws/aws-sdk-go/service/emr/api.go +++ b/cli/vendor/github.com/aws/aws-sdk-go/service/emr/api.go @@ -1592,11 +1592,10 @@ func (c *EMR) ListInstancesRequest(input *ListInstancesInput) (req *request.Requ // ListInstances API operation for Amazon Elastic MapReduce. // -// Provides information about the cluster instances that Amazon EMR provisions -// on behalf of a user when it creates the cluster. For example, this operation -// indicates when the EC2 instances reach the Ready state, when instances become -// available to Amazon EMR to use for jobs, and the IP addresses for cluster -// instances, etc. +// Provides information for all active EC2 instances and EC2 instances terminated +// in the last 30 days, up to a maximum of 2,000. EC2 instances in any of the +// following states are considered active: AWAITING_FULFILLMENT, PROVISIONING, +// BOOTSTRAPPING, RUNNING. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -3082,7 +3081,7 @@ func (s AddTagsOutput) GoString() string { // * "mapr" with the user arguments specifying "--edition,m3" or "--edition,m5" // - launch the cluster using MapR M3 or M5 Edition, respectively. // -// In Amazon EMR releases 4.0 and greater, the only accepted parameter is the +// In Amazon EMR releases 4.x and later, the only accepted parameter is the // application name. To pass arguments to applications, you supply a configuration // for each application. // Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticmapreduce-2009-03-31/Application @@ -3674,11 +3673,18 @@ type Cluster struct { // Specifies whether the cluster should terminate after completing all steps. AutoTerminate *bool `type:"boolean"` - // Amazon EMR releases 4.x or later. - // - // The list of Configurations supplied to the EMR cluster. + // Applies only to Amazon EMR releases 4.x and later. The list of Configurations + // supplied to the EMR cluster. Configurations []*Configuration `type:"list"` + // Available only in Amazon EMR version 5.7.0 and later. The ID of a custom + // Amazon EBS-backed Linux AMI if the cluster uses a custom AMI. + CustomAmiId *string `type:"string"` + + // The size, in GiB, of the EBS root device volume of the Linux AMI that is + // used for each EC2 instance. Available in Amazon EMR version 4.x and later. + EbsRootVolumeSize *int64 `type:"integer"` + // Provides information about the EC2 instances in a cluster grouped by category. // For example, key name, subnet ID, IAM instance profile, and so on. Ec2InstanceAttributes *Ec2InstanceAttributes `type:"structure"` @@ -3711,10 +3717,14 @@ type Cluster struct { // the actual billing rate. NormalizedInstanceHours *int64 `type:"integer"` - // The release label for the Amazon EMR release. For Amazon EMR 3.x and 2.x - // AMIs, use amiVersion instead instead of ReleaseLabel. + // The release label for the Amazon EMR release. ReleaseLabel *string `type:"string"` + // Applies only when CustomAmiID is used. Specifies the type of updates that + // are applied from the Amazon Linux AMI package repositories when an instance + // boots using the AMI. + RepoUpgradeOnBoot *string `type:"string" enum:"RepoUpgradeOnBoot"` + // The AMI version requested for this cluster. RequestedAmiVersion *string `type:"string"` @@ -3796,6 +3806,18 @@ func (s *Cluster) SetConfigurations(v []*Configuration) *Cluster { return s } +// SetCustomAmiId sets the CustomAmiId field's value. +func (s *Cluster) SetCustomAmiId(v string) *Cluster { + s.CustomAmiId = &v + return s +} + +// SetEbsRootVolumeSize sets the EbsRootVolumeSize field's value. +func (s *Cluster) SetEbsRootVolumeSize(v int64) *Cluster { + s.EbsRootVolumeSize = &v + return s +} + // SetEc2InstanceAttributes sets the Ec2InstanceAttributes field's value. func (s *Cluster) SetEc2InstanceAttributes(v *Ec2InstanceAttributes) *Cluster { s.Ec2InstanceAttributes = v @@ -3844,6 +3866,12 @@ func (s *Cluster) SetReleaseLabel(v string) *Cluster { return s } +// SetRepoUpgradeOnBoot sets the RepoUpgradeOnBoot field's value. +func (s *Cluster) SetRepoUpgradeOnBoot(v string) *Cluster { + s.RepoUpgradeOnBoot = &v + return s +} + // SetRequestedAmiVersion sets the RequestedAmiVersion field's value. func (s *Cluster) SetRequestedAmiVersion(v string) *Cluster { s.RequestedAmiVersion = &v @@ -4835,20 +4863,26 @@ type Ec2InstanceAttributes struct { // of the cluster assume this role. IamInstanceProfile *string `type:"string"` - // Applies to clusters configured with the The list of availability zones to - // choose from. The service will choose the availability zone with the best - // mix of available capacity and lowest cost to launch the cluster. If you do - // not specify this value, the cluster is launched in any availability zone - // that the customer account has access to. + // Applies to clusters configured with the instance fleets option. Specifies + // one or more Availability Zones in which to launch EC2 cluster instances when + // the EC2-Classic network configuration is supported. Amazon EMR chooses the + // Availability Zone with the best fit from among the list of RequestedEc2AvailabilityZones, + // and then launches all cluster instances within that Availability Zone. If + // you do not specify this value, Amazon EMR chooses the Availability Zone for + // you. RequestedEc2SubnetIDs and RequestedEc2AvailabilityZones cannot be specified + // together. RequestedEc2AvailabilityZones []*string `type:"list"` // Applies to clusters configured with the instance fleets option. Specifies // the unique identifier of one or more Amazon EC2 subnets in which to launch - // EC2 cluster instances. Amazon EMR chooses the EC2 subnet with the best performance - // and cost characteristics from among the list of RequestedEc2SubnetIds and - // launches all cluster instances within that subnet. If this value is not specified, - // and the account supports EC2-Classic networks, the cluster launches instances - // in the EC2-Classic network and uses Requested + // EC2 cluster instances. Subnets must exist within the same VPC. Amazon EMR + // chooses the EC2 subnet with the best fit from among the list of RequestedEc2SubnetIds, + // and then launches all cluster instances within that Subnet. If this value + // is not specified, and the account and region support EC2-Classic networks, + // the cluster launches instances in the EC2-Classic network and uses RequestedEc2AvailabilityZones + // instead of this setting. If EC2-Classic is not supported, and no Subnet is + // specified, Amazon EMR chooses the subnet for you. RequestedEc2SubnetIDs and + // RequestedEc2AvailabilityZones cannot be specified together. RequestedEc2SubnetIds []*string `type:"list"` // The identifier of the Amazon EC2 security group for the Amazon EMR service @@ -6590,9 +6624,9 @@ type InstanceTypeConfig struct { BidPrice *string `type:"string"` // The bid price, as a percentage of On-Demand price, for each EC2 Spot instance - // as defined by InstanceType. Expressed as a number between 0 and 1000 (for - // example, 20 specifies 20%). If neither BidPrice nor BidPriceAsPercentageOfOnDemandPrice - // is provided, BidPriceAsPercentageOfOnDemandPrice defaults to 100%. + // as defined by InstanceType. Expressed as a number (for example, 20 specifies + // 20%). If neither BidPrice nor BidPriceAsPercentageOfOnDemandPrice is provided, + // BidPriceAsPercentageOfOnDemandPrice defaults to 100%. BidPriceAsPercentageOfOnDemandPrice *float64 `type:"double"` // A configuration classification that applies when provisioning cluster instances, @@ -6611,8 +6645,8 @@ type InstanceTypeConfig struct { // The number of units that a provisioned instance of this type provides toward // fulfilling the target capacities defined in InstanceFleetConfig. This value - // is 1 for a master instance fleet, and must be greater than 0 for core and - // task instance fleets. + // is 1 for a master instance fleet, and must be 1 or greater for core and task + // instance fleets. Defaults to 1 if not specified. WeightedCapacity *int64 `type:"integer"` } @@ -6779,9 +6813,9 @@ func (s *InstanceTypeSpecification) SetWeightedCapacity(v int64) *InstanceTypeSp type JobFlowDetail struct { _ struct{} `type:"structure"` - // The version of the AMI used to initialize Amazon EC2 instances in the job - // flow. For a list of AMI versions currently supported by Amazon EMR, see AMI - // Versions Supported in EMR (http://docs.aws.amazon.com/ElasticMapReduce/latest/DeveloperGuide/EnvironmentConfig_AMIVersion.html#ami-versions-supported) + // Used only for version 2.x and 3.x of Amazon EMR. The version of the AMI used + // to initialize Amazon EC2 instances in the job flow. For a list of AMI versions + // supported by Amazon EMR, see AMI Versions Supported in EMR (http://docs.aws.amazon.com/ElasticMapReduce/latest/DeveloperGuide/EnvironmentConfig_AMIVersion.html#ami-versions-supported) // in the Amazon EMR Developer Guide. AmiVersion *string `type:"string"` @@ -8560,22 +8594,17 @@ type RunJobFlowInput struct { // A JSON string for selecting additional features. AdditionalInfo *string `type:"string"` - // For Amazon EMR releases 3.x and 2.x. For Amazon EMR releases 4.x and greater, - // use ReleaseLabel. - // + // For Amazon EMR AMI versions 3.x and 2.x. For Amazon EMR releases 4.0 and + // later, the Linux AMI is determined by the ReleaseLabel specified or by CustomAmiID. // The version of the Amazon Machine Image (AMI) to use when launching Amazon - // EC2 instances in the job flow. The following values are valid: - // - // * The version number of the AMI to use, for example, "2.0." + // EC2 instances in the job flow. For details about the AMI versions currently + // supported in EMR version 3.x and 2.x, see AMI Versions Supported in EMR (ElasticMapReduce/latest/DeveloperGuide/emr-dg.pdf#nameddest=ami-versions-supported) + // in the Amazon EMR Developer Guide. // // If the AMI supports multiple versions of Hadoop (for example, AMI 1.0 supports - // both Hadoop 0.18 and 0.20) you can use the JobFlowInstancesConfigHadoopVersion + // both Hadoop 0.18 and 0.20), you can use the JobFlowInstancesConfigHadoopVersion // parameter to modify the version of Hadoop from the defaults shown above. // - // For details about the AMI versions currently supported by Amazon Elastic - // MapReduce, see AMI Versions Supported in Elastic MapReduce (http://docs.aws.amazon.com/ElasticMapReduce/latest/DeveloperGuide/EnvironmentConfig_AMIVersion.html#ami-versions-supported) - // in the Amazon Elastic MapReduce Developer Guide. - // // Previously, the EMR AMI version API parameter options allowed you to use // latest for the latest AMI version rather than specify a numerical value. // Some regions no longer support this deprecated option as they only have a @@ -8583,10 +8612,9 @@ type RunJobFlowInput struct { // release label release (EMR 4.x or later). AmiVersion *string `type:"string"` - // Amazon EMR releases 4.x or later. - // - // A list of applications for the cluster. Valid values are: "Hadoop", "Hive", - // "Mahout", "Pig", and "Spark." They are case insensitive. + // For Amazon EMR releases 4.0 and later. A list of applications for the cluster. + // Valid values are: "Hadoop", "Hive", "Mahout", "Pig", and "Spark." They are + // case insensitive. Applications []*Application `type:"list"` // An IAM role for automatic scaling policies. The default role is EMR_AutoScaling_DefaultRole. @@ -8597,11 +8625,28 @@ type RunJobFlowInput struct { // A list of bootstrap actions to run before Hadoop starts on the cluster nodes. BootstrapActions []*BootstrapActionConfig `type:"list"` - // Amazon EMR releases 4.x or later. - // - // The list of configurations supplied for the EMR cluster you are creating. + // For Amazon EMR releases 4.0 and later. The list of configurations supplied + // for the EMR cluster you are creating. Configurations []*Configuration `type:"list"` + // Available only in Amazon EMR version 5.7.0 and later. The ID of a custom + // Amazon EBS-backed Linux AMI. If specified, Amazon EMR uses this AMI when + // it launches cluster EC2 instances. For more information about custom AMIs + // in Amazon EMR, see Using a Custom AMI (http://docs.aws.amazon.com/emr/latest/ManagementGuide/emr-custom-ami.html) + // in the Amazon EMR Management Guide. If omitted, the cluster uses the base + // Linux AMI for the ReleaseLabel specified. For Amazon EMR versions 2.x and + // 3.x, use AmiVersion instead. + // + // For information about creating a custom AMI, see Creating an Amazon EBS-Backed + // Linux AMI (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/creating-an-ami-ebs.html) + // in the Amazon Elastic Compute Cloud User Guide for Linux Instances. For information + // about finding an AMI ID, see Finding a Linux AMI (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/finding-an-ami.html). + CustomAmiId *string `type:"string"` + + // The size, in GiB, of the EBS root device volume of the Linux AMI that is + // used for each EC2 instance. Available in Amazon EMR version 4.x and later. + EbsRootVolumeSize *int64 `type:"integer"` + // A specification of the number and type of Amazon EC2 instances. // // Instances is a required field @@ -8622,7 +8667,7 @@ type RunJobFlowInput struct { // Name is a required field Name *string `type:"string" required:"true"` - // For Amazon EMR releases 3.x and 2.x. For Amazon EMR releases 4.x and greater, + // For Amazon EMR releases 3.x and 2.x. For Amazon EMR releases 4.x and later, // use Applications. // // A list of strings that indicates third-party software to use with the job @@ -8650,12 +8695,17 @@ type RunJobFlowInput struct { // * "ganglia" - launch the cluster with the Ganglia Monitoring System installed. NewSupportedProducts []*SupportedProductConfig `type:"list"` - // Amazon EMR releases 4.x or later. - // // The release label for the Amazon EMR release. For Amazon EMR 3.x and 2.x - // AMIs, use amiVersion instead instead of ReleaseLabel. + // AMIs, use AmiVersion instead. ReleaseLabel *string `type:"string"` + // Applies only when CustomAmiID is used. Specifies which updates from the Amazon + // Linux AMI package repositories to apply automatically when the instance boots + // using the AMI. If omitted, the default is SECURITY, which indicates that + // only security updates are applied. If NONE is specified, no updates are applied, + // and all updates must be applied manually. + RepoUpgradeOnBoot *string `type:"string" enum:"RepoUpgradeOnBoot"` + // Specifies the way that individual Amazon EC2 instances terminate when an // automatic scale-in activity occurs or an instance group is resized. TERMINATE_AT_INSTANCE_HOUR // indicates that Amazon EMR terminates nodes at the instance-hour boundary, @@ -8680,7 +8730,7 @@ type RunJobFlowInput struct { // A list of steps to run. Steps []*StepConfig `type:"list"` - // For Amazon EMR releases 3.x and 2.x. For Amazon EMR releases 4.x and greater, + // For Amazon EMR releases 3.x and 2.x. For Amazon EMR releases 4.x and later, // use Applications. // // A list of strings that indicates third-party software to use. For more information, @@ -8790,6 +8840,18 @@ func (s *RunJobFlowInput) SetConfigurations(v []*Configuration) *RunJobFlowInput return s } +// SetCustomAmiId sets the CustomAmiId field's value. +func (s *RunJobFlowInput) SetCustomAmiId(v string) *RunJobFlowInput { + s.CustomAmiId = &v + return s +} + +// SetEbsRootVolumeSize sets the EbsRootVolumeSize field's value. +func (s *RunJobFlowInput) SetEbsRootVolumeSize(v int64) *RunJobFlowInput { + s.EbsRootVolumeSize = &v + return s +} + // SetInstances sets the Instances field's value. func (s *RunJobFlowInput) SetInstances(v *JobFlowInstancesConfig) *RunJobFlowInput { s.Instances = v @@ -8826,6 +8888,12 @@ func (s *RunJobFlowInput) SetReleaseLabel(v string) *RunJobFlowInput { return s } +// SetRepoUpgradeOnBoot sets the RepoUpgradeOnBoot field's value. +func (s *RunJobFlowInput) SetRepoUpgradeOnBoot(v string) *RunJobFlowInput { + s.RepoUpgradeOnBoot = &v + return s +} + // SetScaleDownBehavior sets the ScaleDownBehavior field's value. func (s *RunJobFlowInput) SetScaleDownBehavior(v string) *RunJobFlowInput { s.ScaleDownBehavior = &v @@ -9514,8 +9582,9 @@ type SpotProvisioningSpecification struct { // The action to take when TargetSpotCapacity has not been fulfilled when the // TimeoutDurationMinutes has expired. Spot instances are not uprovisioned within - // the Spot provisioining timeout. Valid values are TERMINATE_CLUSTER and SWITCH_TO_ON_DEMAND - // to fulfill the remaining capacity. + // the Spot provisioining timeout. Valid values are TERMINATE_CLUSTER and SWITCH_TO_ON_DEMAND. + // SWITCH_TO_ON_DEMAND specifies that if no Spot instances are available, On-Demand + // Instances should be provisioned to fulfill any remaining Spot capacity. // // TimeoutAction is a required field TimeoutAction *string `type:"string" required:"true" enum:"SpotProvisioningTimeoutAction"` @@ -10292,6 +10361,9 @@ const ( // ClusterStateChangeReasonCodeInstanceFailure is a ClusterStateChangeReasonCode enum value ClusterStateChangeReasonCodeInstanceFailure = "INSTANCE_FAILURE" + // ClusterStateChangeReasonCodeInstanceFleetTimeout is a ClusterStateChangeReasonCode enum value + ClusterStateChangeReasonCodeInstanceFleetTimeout = "INSTANCE_FLEET_TIMEOUT" + // ClusterStateChangeReasonCodeBootstrapFailure is a ClusterStateChangeReasonCode enum value ClusterStateChangeReasonCodeBootstrapFailure = "BOOTSTRAP_FAILURE" @@ -10512,6 +10584,14 @@ const ( MarketTypeSpot = "SPOT" ) +const ( + // RepoUpgradeOnBootSecurity is a RepoUpgradeOnBoot enum value + RepoUpgradeOnBootSecurity = "SECURITY" + + // RepoUpgradeOnBootNone is a RepoUpgradeOnBoot enum value + RepoUpgradeOnBootNone = "NONE" +) + const ( // ScaleDownBehaviorTerminateAtInstanceHour is a ScaleDownBehavior enum value ScaleDownBehaviorTerminateAtInstanceHour = "TERMINATE_AT_INSTANCE_HOUR" diff --git a/cli/vendor/github.com/funcy/functions_go/VERSION b/cli/vendor/github.com/funcy/functions_go/VERSION index 23e5f1854..9d77e730c 100644 --- a/cli/vendor/github.com/funcy/functions_go/VERSION +++ b/cli/vendor/github.com/funcy/functions_go/VERSION @@ -1 +1 @@ -0.1.34 \ No newline at end of file +0.1.35 \ No newline at end of file diff --git a/cli/vendor/github.com/funcy/functions_go/client/call/call_client.go b/cli/vendor/github.com/funcy/functions_go/client/call/call_client.go index 1055bfd81..541bcb004 100644 --- a/cli/vendor/github.com/funcy/functions_go/client/call/call_client.go +++ b/cli/vendor/github.com/funcy/functions_go/client/call/call_client.go @@ -23,62 +23,62 @@ type Client struct { } /* -GetAppsAppCallsRoute gets route bound calls +GetAppsAppCalls gets app bound calls -Get route-bound calls. +Get app-bound calls can filter to route-bound calls. */ -func (a *Client) GetAppsAppCallsRoute(params *GetAppsAppCallsRouteParams) (*GetAppsAppCallsRouteOK, error) { +func (a *Client) GetAppsAppCalls(params *GetAppsAppCallsParams) (*GetAppsAppCallsOK, error) { // TODO: Validate the params before sending if params == nil { - params = NewGetAppsAppCallsRouteParams() + params = NewGetAppsAppCallsParams() } result, err := a.transport.Submit(&runtime.ClientOperation{ - ID: "GetAppsAppCallsRoute", + ID: "GetAppsAppCalls", Method: "GET", - PathPattern: "/apps/{app}/calls/{route}", + PathPattern: "/apps/{app}/calls/", ProducesMediaTypes: []string{"application/json"}, ConsumesMediaTypes: []string{"application/json"}, Schemes: []string{"http", "https"}, Params: params, - Reader: &GetAppsAppCallsRouteReader{formats: a.formats}, + Reader: &GetAppsAppCallsReader{formats: a.formats}, Context: params.Context, Client: params.HTTPClient, }) if err != nil { return nil, err } - return result.(*GetAppsAppCallsRouteOK), nil + return result.(*GetAppsAppCallsOK), nil } /* -GetCallsCall gets call information +GetAppsAppCallsCall gets call information Get call information */ -func (a *Client) GetCallsCall(params *GetCallsCallParams) (*GetCallsCallOK, error) { +func (a *Client) GetAppsAppCallsCall(params *GetAppsAppCallsCallParams) (*GetAppsAppCallsCallOK, error) { // TODO: Validate the params before sending if params == nil { - params = NewGetCallsCallParams() + params = NewGetAppsAppCallsCallParams() } result, err := a.transport.Submit(&runtime.ClientOperation{ - ID: "GetCallsCall", + ID: "GetAppsAppCallsCall", Method: "GET", - PathPattern: "/calls/{call}", + PathPattern: "/apps/{app}/calls/{call}", ProducesMediaTypes: []string{"application/json"}, ConsumesMediaTypes: []string{"application/json"}, Schemes: []string{"http", "https"}, Params: params, - Reader: &GetCallsCallReader{formats: a.formats}, + Reader: &GetAppsAppCallsCallReader{formats: a.formats}, Context: params.Context, Client: params.HTTPClient, }) if err != nil { return nil, err } - return result.(*GetCallsCallOK), nil + return result.(*GetAppsAppCallsCallOK), nil } diff --git a/cli/vendor/github.com/funcy/functions_go/client/call/get_apps_app_calls_call_parameters.go b/cli/vendor/github.com/funcy/functions_go/client/call/get_apps_app_calls_call_parameters.go new file mode 100644 index 000000000..01e9989ec --- /dev/null +++ b/cli/vendor/github.com/funcy/functions_go/client/call/get_apps_app_calls_call_parameters.go @@ -0,0 +1,156 @@ +package call + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + "time" + + "golang.org/x/net/context" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + + strfmt "github.com/go-openapi/strfmt" +) + +// NewGetAppsAppCallsCallParams creates a new GetAppsAppCallsCallParams object +// with the default values initialized. +func NewGetAppsAppCallsCallParams() *GetAppsAppCallsCallParams { + var () + return &GetAppsAppCallsCallParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewGetAppsAppCallsCallParamsWithTimeout creates a new GetAppsAppCallsCallParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewGetAppsAppCallsCallParamsWithTimeout(timeout time.Duration) *GetAppsAppCallsCallParams { + var () + return &GetAppsAppCallsCallParams{ + + timeout: timeout, + } +} + +// NewGetAppsAppCallsCallParamsWithContext creates a new GetAppsAppCallsCallParams object +// with the default values initialized, and the ability to set a context for a request +func NewGetAppsAppCallsCallParamsWithContext(ctx context.Context) *GetAppsAppCallsCallParams { + var () + return &GetAppsAppCallsCallParams{ + + Context: ctx, + } +} + +// NewGetAppsAppCallsCallParamsWithHTTPClient creates a new GetAppsAppCallsCallParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewGetAppsAppCallsCallParamsWithHTTPClient(client *http.Client) *GetAppsAppCallsCallParams { + var () + return &GetAppsAppCallsCallParams{ + HTTPClient: client, + } +} + +/*GetAppsAppCallsCallParams contains all the parameters to send to the API endpoint +for the get apps app calls call operation typically these are written to a http.Request +*/ +type GetAppsAppCallsCallParams struct { + + /*App + app name + + */ + App string + /*Call + Call ID. + + */ + Call string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the get apps app calls call params +func (o *GetAppsAppCallsCallParams) WithTimeout(timeout time.Duration) *GetAppsAppCallsCallParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the get apps app calls call params +func (o *GetAppsAppCallsCallParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the get apps app calls call params +func (o *GetAppsAppCallsCallParams) WithContext(ctx context.Context) *GetAppsAppCallsCallParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the get apps app calls call params +func (o *GetAppsAppCallsCallParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the get apps app calls call params +func (o *GetAppsAppCallsCallParams) WithHTTPClient(client *http.Client) *GetAppsAppCallsCallParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the get apps app calls call params +func (o *GetAppsAppCallsCallParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithApp adds the app to the get apps app calls call params +func (o *GetAppsAppCallsCallParams) WithApp(app string) *GetAppsAppCallsCallParams { + o.SetApp(app) + return o +} + +// SetApp adds the app to the get apps app calls call params +func (o *GetAppsAppCallsCallParams) SetApp(app string) { + o.App = app +} + +// WithCall adds the call to the get apps app calls call params +func (o *GetAppsAppCallsCallParams) WithCall(call string) *GetAppsAppCallsCallParams { + o.SetCall(call) + return o +} + +// SetCall adds the call to the get apps app calls call params +func (o *GetAppsAppCallsCallParams) SetCall(call string) { + o.Call = call +} + +// WriteToRequest writes these params to a swagger request +func (o *GetAppsAppCallsCallParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param app + if err := r.SetPathParam("app", o.App); err != nil { + return err + } + + // path param call + if err := r.SetPathParam("call", o.Call); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/cli/vendor/github.com/funcy/functions_go/client/call/get_apps_app_calls_call_responses.go b/cli/vendor/github.com/funcy/functions_go/client/call/get_apps_app_calls_call_responses.go new file mode 100644 index 000000000..ba614d0e8 --- /dev/null +++ b/cli/vendor/github.com/funcy/functions_go/client/call/get_apps_app_calls_call_responses.go @@ -0,0 +1,101 @@ +package call + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + + strfmt "github.com/go-openapi/strfmt" + + "github.com/funcy/functions_go/models" +) + +// GetAppsAppCallsCallReader is a Reader for the GetAppsAppCallsCall structure. +type GetAppsAppCallsCallReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *GetAppsAppCallsCallReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + + case 200: + result := NewGetAppsAppCallsCallOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + + case 404: + result := NewGetAppsAppCallsCallNotFound() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + default: + return nil, runtime.NewAPIError("unknown error", response, response.Code()) + } +} + +// NewGetAppsAppCallsCallOK creates a GetAppsAppCallsCallOK with default headers values +func NewGetAppsAppCallsCallOK() *GetAppsAppCallsCallOK { + return &GetAppsAppCallsCallOK{} +} + +/*GetAppsAppCallsCallOK handles this case with default header values. + +Call found +*/ +type GetAppsAppCallsCallOK struct { + Payload *models.CallWrapper +} + +func (o *GetAppsAppCallsCallOK) Error() string { + return fmt.Sprintf("[GET /apps/{app}/calls/{call}][%d] getAppsAppCallsCallOK %+v", 200, o.Payload) +} + +func (o *GetAppsAppCallsCallOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.CallWrapper) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewGetAppsAppCallsCallNotFound creates a GetAppsAppCallsCallNotFound with default headers values +func NewGetAppsAppCallsCallNotFound() *GetAppsAppCallsCallNotFound { + return &GetAppsAppCallsCallNotFound{} +} + +/*GetAppsAppCallsCallNotFound handles this case with default header values. + +Call not found. +*/ +type GetAppsAppCallsCallNotFound struct { + Payload *models.Error +} + +func (o *GetAppsAppCallsCallNotFound) Error() string { + return fmt.Sprintf("[GET /apps/{app}/calls/{call}][%d] getAppsAppCallsCallNotFound %+v", 404, o.Payload) +} + +func (o *GetAppsAppCallsCallNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/cli/vendor/github.com/funcy/functions_go/client/call/get_apps_app_calls_parameters.go b/cli/vendor/github.com/funcy/functions_go/client/call/get_apps_app_calls_parameters.go new file mode 100644 index 000000000..54fe1d111 --- /dev/null +++ b/cli/vendor/github.com/funcy/functions_go/client/call/get_apps_app_calls_parameters.go @@ -0,0 +1,167 @@ +package call + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + "time" + + "golang.org/x/net/context" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + + strfmt "github.com/go-openapi/strfmt" +) + +// NewGetAppsAppCallsParams creates a new GetAppsAppCallsParams object +// with the default values initialized. +func NewGetAppsAppCallsParams() *GetAppsAppCallsParams { + var () + return &GetAppsAppCallsParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewGetAppsAppCallsParamsWithTimeout creates a new GetAppsAppCallsParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewGetAppsAppCallsParamsWithTimeout(timeout time.Duration) *GetAppsAppCallsParams { + var () + return &GetAppsAppCallsParams{ + + timeout: timeout, + } +} + +// NewGetAppsAppCallsParamsWithContext creates a new GetAppsAppCallsParams object +// with the default values initialized, and the ability to set a context for a request +func NewGetAppsAppCallsParamsWithContext(ctx context.Context) *GetAppsAppCallsParams { + var () + return &GetAppsAppCallsParams{ + + Context: ctx, + } +} + +// NewGetAppsAppCallsParamsWithHTTPClient creates a new GetAppsAppCallsParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewGetAppsAppCallsParamsWithHTTPClient(client *http.Client) *GetAppsAppCallsParams { + var () + return &GetAppsAppCallsParams{ + HTTPClient: client, + } +} + +/*GetAppsAppCallsParams contains all the parameters to send to the API endpoint +for the get apps app calls operation typically these are written to a http.Request +*/ +type GetAppsAppCallsParams struct { + + /*App + App name. + + */ + App string + /*Route + App route. + + */ + Route *string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the get apps app calls params +func (o *GetAppsAppCallsParams) WithTimeout(timeout time.Duration) *GetAppsAppCallsParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the get apps app calls params +func (o *GetAppsAppCallsParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the get apps app calls params +func (o *GetAppsAppCallsParams) WithContext(ctx context.Context) *GetAppsAppCallsParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the get apps app calls params +func (o *GetAppsAppCallsParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the get apps app calls params +func (o *GetAppsAppCallsParams) WithHTTPClient(client *http.Client) *GetAppsAppCallsParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the get apps app calls params +func (o *GetAppsAppCallsParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithApp adds the app to the get apps app calls params +func (o *GetAppsAppCallsParams) WithApp(app string) *GetAppsAppCallsParams { + o.SetApp(app) + return o +} + +// SetApp adds the app to the get apps app calls params +func (o *GetAppsAppCallsParams) SetApp(app string) { + o.App = app +} + +// WithRoute adds the route to the get apps app calls params +func (o *GetAppsAppCallsParams) WithRoute(route *string) *GetAppsAppCallsParams { + o.SetRoute(route) + return o +} + +// SetRoute adds the route to the get apps app calls params +func (o *GetAppsAppCallsParams) SetRoute(route *string) { + o.Route = route +} + +// WriteToRequest writes these params to a swagger request +func (o *GetAppsAppCallsParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param app + if err := r.SetPathParam("app", o.App); err != nil { + return err + } + + if o.Route != nil { + + // query param route + var qrRoute string + if o.Route != nil { + qrRoute = *o.Route + } + qRoute := qrRoute + if qRoute != "" { + if err := r.SetQueryParam("route", qRoute); err != nil { + return err + } + } + + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/cli/vendor/github.com/funcy/functions_go/client/call/get_apps_app_calls_responses.go b/cli/vendor/github.com/funcy/functions_go/client/call/get_apps_app_calls_responses.go new file mode 100644 index 000000000..8e9a9ba52 --- /dev/null +++ b/cli/vendor/github.com/funcy/functions_go/client/call/get_apps_app_calls_responses.go @@ -0,0 +1,101 @@ +package call + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + + strfmt "github.com/go-openapi/strfmt" + + "github.com/funcy/functions_go/models" +) + +// GetAppsAppCallsReader is a Reader for the GetAppsAppCalls structure. +type GetAppsAppCallsReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *GetAppsAppCallsReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + + case 200: + result := NewGetAppsAppCallsOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + + case 404: + result := NewGetAppsAppCallsNotFound() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + default: + return nil, runtime.NewAPIError("unknown error", response, response.Code()) + } +} + +// NewGetAppsAppCallsOK creates a GetAppsAppCallsOK with default headers values +func NewGetAppsAppCallsOK() *GetAppsAppCallsOK { + return &GetAppsAppCallsOK{} +} + +/*GetAppsAppCallsOK handles this case with default header values. + +Calls found +*/ +type GetAppsAppCallsOK struct { + Payload *models.CallsWrapper +} + +func (o *GetAppsAppCallsOK) Error() string { + return fmt.Sprintf("[GET /apps/{app}/calls/][%d] getAppsAppCallsOK %+v", 200, o.Payload) +} + +func (o *GetAppsAppCallsOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.CallsWrapper) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewGetAppsAppCallsNotFound creates a GetAppsAppCallsNotFound with default headers values +func NewGetAppsAppCallsNotFound() *GetAppsAppCallsNotFound { + return &GetAppsAppCallsNotFound{} +} + +/*GetAppsAppCallsNotFound handles this case with default header values. + +Calls not found. +*/ +type GetAppsAppCallsNotFound struct { + Payload *models.Error +} + +func (o *GetAppsAppCallsNotFound) Error() string { + return fmt.Sprintf("[GET /apps/{app}/calls/][%d] getAppsAppCallsNotFound %+v", 404, o.Payload) +} + +func (o *GetAppsAppCallsNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/cli/vendor/github.com/funcy/functions_go/client/operations/delete_apps_app_calls_call_log_parameters.go b/cli/vendor/github.com/funcy/functions_go/client/operations/delete_apps_app_calls_call_log_parameters.go new file mode 100644 index 000000000..09f1033f3 --- /dev/null +++ b/cli/vendor/github.com/funcy/functions_go/client/operations/delete_apps_app_calls_call_log_parameters.go @@ -0,0 +1,156 @@ +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + "time" + + "golang.org/x/net/context" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + + strfmt "github.com/go-openapi/strfmt" +) + +// NewDeleteAppsAppCallsCallLogParams creates a new DeleteAppsAppCallsCallLogParams object +// with the default values initialized. +func NewDeleteAppsAppCallsCallLogParams() *DeleteAppsAppCallsCallLogParams { + var () + return &DeleteAppsAppCallsCallLogParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewDeleteAppsAppCallsCallLogParamsWithTimeout creates a new DeleteAppsAppCallsCallLogParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewDeleteAppsAppCallsCallLogParamsWithTimeout(timeout time.Duration) *DeleteAppsAppCallsCallLogParams { + var () + return &DeleteAppsAppCallsCallLogParams{ + + timeout: timeout, + } +} + +// NewDeleteAppsAppCallsCallLogParamsWithContext creates a new DeleteAppsAppCallsCallLogParams object +// with the default values initialized, and the ability to set a context for a request +func NewDeleteAppsAppCallsCallLogParamsWithContext(ctx context.Context) *DeleteAppsAppCallsCallLogParams { + var () + return &DeleteAppsAppCallsCallLogParams{ + + Context: ctx, + } +} + +// NewDeleteAppsAppCallsCallLogParamsWithHTTPClient creates a new DeleteAppsAppCallsCallLogParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewDeleteAppsAppCallsCallLogParamsWithHTTPClient(client *http.Client) *DeleteAppsAppCallsCallLogParams { + var () + return &DeleteAppsAppCallsCallLogParams{ + HTTPClient: client, + } +} + +/*DeleteAppsAppCallsCallLogParams contains all the parameters to send to the API endpoint +for the delete apps app calls call log operation typically these are written to a http.Request +*/ +type DeleteAppsAppCallsCallLogParams struct { + + /*App + App name. + + */ + App string + /*Call + Call ID. + + */ + Call string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the delete apps app calls call log params +func (o *DeleteAppsAppCallsCallLogParams) WithTimeout(timeout time.Duration) *DeleteAppsAppCallsCallLogParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the delete apps app calls call log params +func (o *DeleteAppsAppCallsCallLogParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the delete apps app calls call log params +func (o *DeleteAppsAppCallsCallLogParams) WithContext(ctx context.Context) *DeleteAppsAppCallsCallLogParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the delete apps app calls call log params +func (o *DeleteAppsAppCallsCallLogParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the delete apps app calls call log params +func (o *DeleteAppsAppCallsCallLogParams) WithHTTPClient(client *http.Client) *DeleteAppsAppCallsCallLogParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the delete apps app calls call log params +func (o *DeleteAppsAppCallsCallLogParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithApp adds the app to the delete apps app calls call log params +func (o *DeleteAppsAppCallsCallLogParams) WithApp(app string) *DeleteAppsAppCallsCallLogParams { + o.SetApp(app) + return o +} + +// SetApp adds the app to the delete apps app calls call log params +func (o *DeleteAppsAppCallsCallLogParams) SetApp(app string) { + o.App = app +} + +// WithCall adds the call to the delete apps app calls call log params +func (o *DeleteAppsAppCallsCallLogParams) WithCall(call string) *DeleteAppsAppCallsCallLogParams { + o.SetCall(call) + return o +} + +// SetCall adds the call to the delete apps app calls call log params +func (o *DeleteAppsAppCallsCallLogParams) SetCall(call string) { + o.Call = call +} + +// WriteToRequest writes these params to a swagger request +func (o *DeleteAppsAppCallsCallLogParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param app + if err := r.SetPathParam("app", o.App); err != nil { + return err + } + + // path param call + if err := r.SetPathParam("call", o.Call); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/cli/vendor/github.com/funcy/functions_go/client/operations/delete_apps_app_calls_call_log_responses.go b/cli/vendor/github.com/funcy/functions_go/client/operations/delete_apps_app_calls_call_log_responses.go new file mode 100644 index 000000000..62a5beaf6 --- /dev/null +++ b/cli/vendor/github.com/funcy/functions_go/client/operations/delete_apps_app_calls_call_log_responses.go @@ -0,0 +1,138 @@ +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + + strfmt "github.com/go-openapi/strfmt" + + "github.com/funcy/functions_go/models" +) + +// DeleteAppsAppCallsCallLogReader is a Reader for the DeleteAppsAppCallsCallLog structure. +type DeleteAppsAppCallsCallLogReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *DeleteAppsAppCallsCallLogReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + + case 202: + result := NewDeleteAppsAppCallsCallLogAccepted() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + + case 404: + result := NewDeleteAppsAppCallsCallLogNotFound() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + default: + result := NewDeleteAppsAppCallsCallLogDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewDeleteAppsAppCallsCallLogAccepted creates a DeleteAppsAppCallsCallLogAccepted with default headers values +func NewDeleteAppsAppCallsCallLogAccepted() *DeleteAppsAppCallsCallLogAccepted { + return &DeleteAppsAppCallsCallLogAccepted{} +} + +/*DeleteAppsAppCallsCallLogAccepted handles this case with default header values. + +Log delete request accepted +*/ +type DeleteAppsAppCallsCallLogAccepted struct { +} + +func (o *DeleteAppsAppCallsCallLogAccepted) Error() string { + return fmt.Sprintf("[DELETE /apps/{app}/calls/{call}/log][%d] deleteAppsAppCallsCallLogAccepted ", 202) +} + +func (o *DeleteAppsAppCallsCallLogAccepted) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewDeleteAppsAppCallsCallLogNotFound creates a DeleteAppsAppCallsCallLogNotFound with default headers values +func NewDeleteAppsAppCallsCallLogNotFound() *DeleteAppsAppCallsCallLogNotFound { + return &DeleteAppsAppCallsCallLogNotFound{} +} + +/*DeleteAppsAppCallsCallLogNotFound handles this case with default header values. + +Does not exist. +*/ +type DeleteAppsAppCallsCallLogNotFound struct { + Payload *models.Error +} + +func (o *DeleteAppsAppCallsCallLogNotFound) Error() string { + return fmt.Sprintf("[DELETE /apps/{app}/calls/{call}/log][%d] deleteAppsAppCallsCallLogNotFound %+v", 404, o.Payload) +} + +func (o *DeleteAppsAppCallsCallLogNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewDeleteAppsAppCallsCallLogDefault creates a DeleteAppsAppCallsCallLogDefault with default headers values +func NewDeleteAppsAppCallsCallLogDefault(code int) *DeleteAppsAppCallsCallLogDefault { + return &DeleteAppsAppCallsCallLogDefault{ + _statusCode: code, + } +} + +/*DeleteAppsAppCallsCallLogDefault handles this case with default header values. + +Unexpected error +*/ +type DeleteAppsAppCallsCallLogDefault struct { + _statusCode int + + Payload *models.Error +} + +// Code gets the status code for the delete apps app calls call log default response +func (o *DeleteAppsAppCallsCallLogDefault) Code() int { + return o._statusCode +} + +func (o *DeleteAppsAppCallsCallLogDefault) Error() string { + return fmt.Sprintf("[DELETE /apps/{app}/calls/{call}/log][%d] DeleteAppsAppCallsCallLog default %+v", o._statusCode, o.Payload) +} + +func (o *DeleteAppsAppCallsCallLogDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/cli/vendor/github.com/funcy/functions_go/client/operations/get_apps_app_calls_call_log_parameters.go b/cli/vendor/github.com/funcy/functions_go/client/operations/get_apps_app_calls_call_log_parameters.go new file mode 100644 index 000000000..abe49b6aa --- /dev/null +++ b/cli/vendor/github.com/funcy/functions_go/client/operations/get_apps_app_calls_call_log_parameters.go @@ -0,0 +1,156 @@ +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + "time" + + "golang.org/x/net/context" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + + strfmt "github.com/go-openapi/strfmt" +) + +// NewGetAppsAppCallsCallLogParams creates a new GetAppsAppCallsCallLogParams object +// with the default values initialized. +func NewGetAppsAppCallsCallLogParams() *GetAppsAppCallsCallLogParams { + var () + return &GetAppsAppCallsCallLogParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewGetAppsAppCallsCallLogParamsWithTimeout creates a new GetAppsAppCallsCallLogParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewGetAppsAppCallsCallLogParamsWithTimeout(timeout time.Duration) *GetAppsAppCallsCallLogParams { + var () + return &GetAppsAppCallsCallLogParams{ + + timeout: timeout, + } +} + +// NewGetAppsAppCallsCallLogParamsWithContext creates a new GetAppsAppCallsCallLogParams object +// with the default values initialized, and the ability to set a context for a request +func NewGetAppsAppCallsCallLogParamsWithContext(ctx context.Context) *GetAppsAppCallsCallLogParams { + var () + return &GetAppsAppCallsCallLogParams{ + + Context: ctx, + } +} + +// NewGetAppsAppCallsCallLogParamsWithHTTPClient creates a new GetAppsAppCallsCallLogParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewGetAppsAppCallsCallLogParamsWithHTTPClient(client *http.Client) *GetAppsAppCallsCallLogParams { + var () + return &GetAppsAppCallsCallLogParams{ + HTTPClient: client, + } +} + +/*GetAppsAppCallsCallLogParams contains all the parameters to send to the API endpoint +for the get apps app calls call log operation typically these are written to a http.Request +*/ +type GetAppsAppCallsCallLogParams struct { + + /*App + App Name + + */ + App string + /*Call + Call ID. + + */ + Call string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the get apps app calls call log params +func (o *GetAppsAppCallsCallLogParams) WithTimeout(timeout time.Duration) *GetAppsAppCallsCallLogParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the get apps app calls call log params +func (o *GetAppsAppCallsCallLogParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the get apps app calls call log params +func (o *GetAppsAppCallsCallLogParams) WithContext(ctx context.Context) *GetAppsAppCallsCallLogParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the get apps app calls call log params +func (o *GetAppsAppCallsCallLogParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the get apps app calls call log params +func (o *GetAppsAppCallsCallLogParams) WithHTTPClient(client *http.Client) *GetAppsAppCallsCallLogParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the get apps app calls call log params +func (o *GetAppsAppCallsCallLogParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithApp adds the app to the get apps app calls call log params +func (o *GetAppsAppCallsCallLogParams) WithApp(app string) *GetAppsAppCallsCallLogParams { + o.SetApp(app) + return o +} + +// SetApp adds the app to the get apps app calls call log params +func (o *GetAppsAppCallsCallLogParams) SetApp(app string) { + o.App = app +} + +// WithCall adds the call to the get apps app calls call log params +func (o *GetAppsAppCallsCallLogParams) WithCall(call string) *GetAppsAppCallsCallLogParams { + o.SetCall(call) + return o +} + +// SetCall adds the call to the get apps app calls call log params +func (o *GetAppsAppCallsCallLogParams) SetCall(call string) { + o.Call = call +} + +// WriteToRequest writes these params to a swagger request +func (o *GetAppsAppCallsCallLogParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param app + if err := r.SetPathParam("app", o.App); err != nil { + return err + } + + // path param call + if err := r.SetPathParam("call", o.Call); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/cli/vendor/github.com/funcy/functions_go/client/operations/get_apps_app_calls_call_log_responses.go b/cli/vendor/github.com/funcy/functions_go/client/operations/get_apps_app_calls_call_log_responses.go new file mode 100644 index 000000000..4b2e6c4df --- /dev/null +++ b/cli/vendor/github.com/funcy/functions_go/client/operations/get_apps_app_calls_call_log_responses.go @@ -0,0 +1,101 @@ +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + + strfmt "github.com/go-openapi/strfmt" + + "github.com/funcy/functions_go/models" +) + +// GetAppsAppCallsCallLogReader is a Reader for the GetAppsAppCallsCallLog structure. +type GetAppsAppCallsCallLogReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *GetAppsAppCallsCallLogReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + + case 200: + result := NewGetAppsAppCallsCallLogOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + + case 404: + result := NewGetAppsAppCallsCallLogNotFound() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + default: + return nil, runtime.NewAPIError("unknown error", response, response.Code()) + } +} + +// NewGetAppsAppCallsCallLogOK creates a GetAppsAppCallsCallLogOK with default headers values +func NewGetAppsAppCallsCallLogOK() *GetAppsAppCallsCallLogOK { + return &GetAppsAppCallsCallLogOK{} +} + +/*GetAppsAppCallsCallLogOK handles this case with default header values. + +Log found +*/ +type GetAppsAppCallsCallLogOK struct { + Payload *models.LogWrapper +} + +func (o *GetAppsAppCallsCallLogOK) Error() string { + return fmt.Sprintf("[GET /apps/{app}/calls/{call}/log][%d] getAppsAppCallsCallLogOK %+v", 200, o.Payload) +} + +func (o *GetAppsAppCallsCallLogOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.LogWrapper) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewGetAppsAppCallsCallLogNotFound creates a GetAppsAppCallsCallLogNotFound with default headers values +func NewGetAppsAppCallsCallLogNotFound() *GetAppsAppCallsCallLogNotFound { + return &GetAppsAppCallsCallLogNotFound{} +} + +/*GetAppsAppCallsCallLogNotFound handles this case with default header values. + +Log not found. +*/ +type GetAppsAppCallsCallLogNotFound struct { + Payload *models.Error +} + +func (o *GetAppsAppCallsCallLogNotFound) Error() string { + return fmt.Sprintf("[GET /apps/{app}/calls/{call}/log][%d] getAppsAppCallsCallLogNotFound %+v", 404, o.Payload) +} + +func (o *GetAppsAppCallsCallLogNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/cli/vendor/github.com/funcy/functions_go/client/operations/operations_client.go b/cli/vendor/github.com/funcy/functions_go/client/operations/operations_client.go index e2d306ba5..867f0ecd5 100644 --- a/cli/vendor/github.com/funcy/functions_go/client/operations/operations_client.go +++ b/cli/vendor/github.com/funcy/functions_go/client/operations/operations_client.go @@ -23,62 +23,62 @@ type Client struct { } /* -DeleteCallsCallLog deletes call log entry +DeleteAppsAppCallsCallLog deletes call log entry Delete call log entry */ -func (a *Client) DeleteCallsCallLog(params *DeleteCallsCallLogParams) (*DeleteCallsCallLogAccepted, error) { +func (a *Client) DeleteAppsAppCallsCallLog(params *DeleteAppsAppCallsCallLogParams) (*DeleteAppsAppCallsCallLogAccepted, error) { // TODO: Validate the params before sending if params == nil { - params = NewDeleteCallsCallLogParams() + params = NewDeleteAppsAppCallsCallLogParams() } result, err := a.transport.Submit(&runtime.ClientOperation{ - ID: "DeleteCallsCallLog", + ID: "DeleteAppsAppCallsCallLog", Method: "DELETE", - PathPattern: "/calls/{call}/log", + PathPattern: "/apps/{app}/calls/{call}/log", ProducesMediaTypes: []string{"application/json"}, ConsumesMediaTypes: []string{"application/json"}, Schemes: []string{"http", "https"}, Params: params, - Reader: &DeleteCallsCallLogReader{formats: a.formats}, + Reader: &DeleteAppsAppCallsCallLogReader{formats: a.formats}, Context: params.Context, Client: params.HTTPClient, }) if err != nil { return nil, err } - return result.(*DeleteCallsCallLogAccepted), nil + return result.(*DeleteAppsAppCallsCallLogAccepted), nil } /* -GetCallsCallLog gets call logs +GetAppsAppCallsCallLog gets call logs Get call logs */ -func (a *Client) GetCallsCallLog(params *GetCallsCallLogParams) (*GetCallsCallLogOK, error) { +func (a *Client) GetAppsAppCallsCallLog(params *GetAppsAppCallsCallLogParams) (*GetAppsAppCallsCallLogOK, error) { // TODO: Validate the params before sending if params == nil { - params = NewGetCallsCallLogParams() + params = NewGetAppsAppCallsCallLogParams() } result, err := a.transport.Submit(&runtime.ClientOperation{ - ID: "GetCallsCallLog", + ID: "GetAppsAppCallsCallLog", Method: "GET", - PathPattern: "/calls/{call}/log", + PathPattern: "/apps/{app}/calls/{call}/log", ProducesMediaTypes: []string{"application/json"}, ConsumesMediaTypes: []string{"application/json"}, Schemes: []string{"http", "https"}, Params: params, - Reader: &GetCallsCallLogReader{formats: a.formats}, + Reader: &GetAppsAppCallsCallLogReader{formats: a.formats}, Context: params.Context, Client: params.HTTPClient, }) if err != nil { return nil, err } - return result.(*GetCallsCallLogOK), nil + return result.(*GetAppsAppCallsCallLogOK), nil } diff --git a/glide.lock b/glide.lock index 3aaf0ed01..4de5091c1 100644 --- a/glide.lock +++ b/glide.lock @@ -1,5 +1,5 @@ -hash: 68fe5d3130a8346f3e38c0924b70369592ccdf0ffe503858056694a71a19acd2 -updated: 2017-07-21T18:13:30.488739267-07:00 +hash: a333d90d0cb65a059f0459e426d4a1a11cc4b7385beca641a11ebf2e67b2a371 +updated: 2017-07-26T12:30:24.327734143-07:00 imports: - name: code.cloudfoundry.org/bytefmt version: f4415fafc5619dd75599a54a7c91fb3948ad58bd @@ -13,6 +13,38 @@ imports: - lib/go/thrift - name: github.com/asaskevich/govalidator version: aa5cce4a76edb1a5acecab1870c17abbffb5419e +- name: github.com/aws/aws-sdk-go + version: 90dec2183a5f5458ee79cbaf4b8e9ab910bc81a6 + subpackages: + - aws + - aws/awserr + - aws/awsutil + - aws/client + - aws/client/metadata + - aws/corehandlers + - aws/credentials + - aws/credentials/ec2rolecreds + - aws/defaults + - aws/ec2metadata + - aws/request + - aws/session + - aws/signer/v4 + - private/endpoints + - private/protocol + - private/protocol/json/jsonutil + - private/protocol/jsonrpc + - private/protocol/query + - private/protocol/query/queryutil + - private/protocol/rest + - private/protocol/restjson + - private/protocol/restxml + - private/protocol/xml/xmlutil + - private/waiter + - service/cloudfront/sign + - service/lambda + - service/s3 + - vendor/github.com/go-ini/ini + - vendor/github.com/jmespath/go-jmespath - name: github.com/Azure/go-ansiterm version: fa152c58bc15761d0200cb75fe958b89a9d4888e subpackages: @@ -27,6 +59,10 @@ imports: version: 230eff6403e22b43f5fba7b28466dae4718934dd - name: github.com/cenkalti/backoff version: 5d150e7eec023ce7a124856b37c68e54b4050ac7 +- name: github.com/coreos/go-semver + version: 1817cd4bea52af76542157eeabd74b057d1a199e + subpackages: + - semver - name: github.com/davecgh/go-spew version: 5215b55f46b2b919f50a1df0eaa5886afe4e3b3d subpackages: @@ -106,7 +142,7 @@ imports: - name: github.com/fsouza/go-dockerclient version: c933ed18bef34ec2955de03de8ef9a3bb996e3df - name: github.com/funcy/functions_go - version: 5d9948e8b1292c5421b5dd98bb6a9b5535d5e1ba + version: c540b7a8e1af8dad992a3b520175db85f8e53636 subpackages: - client - client/apps @@ -121,11 +157,18 @@ imports: subpackages: - internal - redis +- name: github.com/giantswarm/semver-bump + version: 7ec6ac8985c24dd50b4942f9a908d13cdfe70f23 + subpackages: + - bump + - storage - name: github.com/gin-gonic/gin version: d5b353c5d5a560322e6d96121c814115562501f7 subpackages: - binding - render +- name: github.com/go-ini/ini + version: 3d73f4b845efdf9989fffd4b4e562727744a34ba - name: github.com/go-logfmt/logfmt version: 390ab7935ee28ec6b286364bba9b4dd6410cb3d5 - name: github.com/go-openapi/analysis @@ -152,6 +195,8 @@ imports: version: f3f9494671f93fcff853e3c6e9e948b3eb71e590 - name: github.com/go-openapi/validate version: 035dcd74f1f61e83debe1c22950dc53556e7e4b2 +- name: github.com/go-resty/resty + version: 6d8c785a63e4b7505c88451cf9c5b452ccf2454c - name: github.com/go-sql-driver/mysql version: 56226343bd543f91a3930ed73ebdd03cfd633e85 - name: github.com/gogo/protobuf @@ -197,12 +242,18 @@ imports: - api - config - mq +- name: github.com/jmespath/go-jmespath + version: bd40a432e4c76585ef6b72d3fd96fb9b6dc7b68d - name: github.com/jmoiron/jsonq version: e874b168d07ecc7808bc950a17998a8aa3141d82 - name: github.com/jmoiron/sqlx version: d9bd385d68c068f1fabb5057e3dedcbcbb039d0f subpackages: - reflectx +- name: github.com/juju/errgo + version: 08cceb5d0b5331634b9826762a8fd53b29b86ad8 + subpackages: + - errors - name: github.com/kr/logfmt version: b84e30acd515aadc4b783ad4ff83aff3299bdfe0 - name: github.com/lib/pq @@ -227,8 +278,26 @@ imports: version: f533f7a102197536779ea3a8cb881d639e21ec5a - name: github.com/mitchellh/mapstructure version: d0303fe809921458f417bcf828397a65db30a7e4 +- name: github.com/moby/moby + version: 72cda6a6c2f25854bea2d69168082684f2c9feca + subpackages: + - pkg/jsonmessage - name: github.com/Nvveen/Gotty version: cd527374f1e5bff4938207604a14f2e38a9cf512 +- name: github.com/onsi/gomega + version: c893efa28eb45626cdaa76c9f653b62488858837 + subpackages: + - format + - internal/assertion + - internal/asyncassertion + - internal/oraclematcher + - internal/testingtsupport + - matchers + - matchers/support/goraph/bipartitegraph + - matchers/support/goraph/edge + - matchers/support/goraph/node + - matchers/support/goraph/util + - types - name: github.com/opencontainers/runc version: ea35825a6350511ab93fe24e69c0723d6728616d subpackages: @@ -271,14 +340,14 @@ imports: version: 1f30fe9094a513ce4c700b9a54458bbb0c96996c - name: github.com/Shopify/sarama version: 2fd980e23bdcbb8edeb78fc704de0c39a6567ffc +- name: github.com/sirupsen/logrus + version: ba1b36c82c5e05c4f912a88eab0dcd91a171688f - name: github.com/Sirupsen/logrus version: ba1b36c82c5e05c4f912a88eab0dcd91a171688f repo: https://github.com/sirupsen/logrus.git vcs: git subpackages: - hooks/syslog -- name: github.com/sirupsen/logrus - version: ba1b36c82c5e05c4f912a88eab0dcd91a171688f - name: github.com/spf13/afero version: 9be650865eab0c12963d8753212f4f9c66cdcf12 subpackages: @@ -291,6 +360,8 @@ imports: version: 5644820622454e71517561946e3d94b9f9db6842 - name: github.com/spf13/viper version: 0967fc9aceab2ce9da34061253ac10fb99bba5b2 +- name: github.com/urfave/cli + version: 4b90d79a682b4bf685762c7452db20f2a676ecb2 - name: golang.org/x/crypto version: c10c31b5e94b6f7a0283272dc2bb27163dcea24b subpackages: @@ -301,8 +372,12 @@ imports: subpackages: - context - context/ctxhttp + - html + - html/atom + - html/charset - idna - proxy + - publicsuffix - name: golang.org/x/sys version: 0b25a408a50076fbbcae6b7ac0ea5fbb0b085e79 subpackages: @@ -311,6 +386,20 @@ imports: - name: golang.org/x/text version: 210eee5cf7323015d097341bcf7166130d001cd8 subpackages: + - encoding + - encoding/charmap + - encoding/htmlindex + - encoding/internal + - encoding/internal/identifier + - encoding/japanese + - encoding/korean + - encoding/simplifiedchinese + - encoding/traditionalchinese + - encoding/unicode + - internal/tag + - internal/utf8internal + - language + - runes - transform - unicode/norm - width diff --git a/vendor/github.com/aws/aws-sdk-go/.gitignore b/vendor/github.com/aws/aws-sdk-go/.gitignore new file mode 100644 index 000000000..fb11ceca0 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/.gitignore @@ -0,0 +1,11 @@ +dist +/doc +/doc-staging +.yardoc +Gemfile.lock +awstesting/integration/smoke/**/importmarker__.go +awstesting/integration/smoke/_test/ +/vendor/bin/ +/vendor/pkg/ +/vendor/src/ +/private/model/cli/gen-api/gen-api diff --git a/vendor/github.com/aws/aws-sdk-go/.godoc_config b/vendor/github.com/aws/aws-sdk-go/.godoc_config new file mode 100644 index 000000000..395878d6c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/.godoc_config @@ -0,0 +1,14 @@ +{ + "PkgHandler": { + "Pattern": "/sdk-for-go/api/", + "StripPrefix": "/sdk-for-go/api", + "Include": ["/src/github.com/aws/aws-sdk-go/aws", "/src/github.com/aws/aws-sdk-go/service"], + "Exclude": ["/src/cmd", "/src/github.com/aws/aws-sdk-go/awstesting", "/src/github.com/aws/aws-sdk-go/awsmigrate"], + "IgnoredSuffixes": ["iface"] + }, + "Github": { + "Tag": "master", + "Repo": "/aws/aws-sdk-go", + "UseGithub": true + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/.travis.yml b/vendor/github.com/aws/aws-sdk-go/.travis.yml new file mode 100644 index 000000000..0196798a5 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/.travis.yml @@ -0,0 +1,23 @@ +language: go + +sudo: false + +go: + - 1.4 + - 1.5 + - 1.6 + - tip + +# Use Go 1.5's vendoring experiment for 1.5 tests. 1.4 tests will use the tip of the dependencies repo. +env: + - GO15VENDOREXPERIMENT=1 + +install: + - make get-deps + +script: + - make unit-with-race-cover + +matrix: + allow_failures: + - go: tip diff --git a/vendor/github.com/aws/aws-sdk-go/.yardopts b/vendor/github.com/aws/aws-sdk-go/.yardopts new file mode 100644 index 000000000..07724e4bd --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/.yardopts @@ -0,0 +1,7 @@ +--plugin go +-e doc-src/plugin/plugin.rb +-m markdown +-o doc/api +--title "AWS SDK for Go" +aws/**/*.go +service/**/*.go diff --git a/vendor/github.com/aws/aws-sdk-go/Gemfile b/vendor/github.com/aws/aws-sdk-go/Gemfile new file mode 100644 index 000000000..2fb295a1a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/Gemfile @@ -0,0 +1,6 @@ +source 'https://rubygems.org' + +gem 'yard', git: 'git://github.com/lsegal/yard', ref: '5025564a491e1b7c6192632cba2802202ca08449' +gem 'yard-go', git: 'git://github.com/jasdel/yard-go', ref: 'e78e1ef7cdf5e0f3266845b26bb4fd64f1dd6f85' +gem 'rdiscount' + diff --git a/vendor/github.com/aws/aws-sdk-go/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go/LICENSE.txt new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/aws/aws-sdk-go/Makefile b/vendor/github.com/aws/aws-sdk-go/Makefile new file mode 100644 index 000000000..b8b4e9464 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/Makefile @@ -0,0 +1,152 @@ +LINTIGNOREDOT='awstesting/integration.+should not use dot imports' +LINTIGNOREDOC='service/[^/]+/(api|service|waiters)\.go:.+(comment on exported|should have comment or be unexported)' +LINTIGNORECONST='service/[^/]+/(api|service|waiters)\.go:.+(type|struct field|const|func) ([^ ]+) should be ([^ ]+)' +LINTIGNORESTUTTER='service/[^/]+/(api|service)\.go:.+(and that stutters)' +LINTIGNOREINFLECT='service/[^/]+/(api|service)\.go:.+method .+ should be ' +LINTIGNOREINFLECTS3UPLOAD='service/s3/s3manager/upload\.go:.+struct field SSEKMSKeyId should be ' +LINTIGNOREDEPS='vendor/.+\.go' + +SDK_WITH_VENDOR_PKGS=$(shell go list ./... | grep -v "/vendor/src") +SDK_ONLY_PKGS=$(shell go list ./... | grep -v "/vendor/") +SDK_GO_1_4=$(shell go version | grep "go1.4") +SDK_GO_VERSION=$(shell go version | awk '''{print $$3}''' | tr -d '''\n''') + +all: get-deps generate unit + +help: + @echo "Please use \`make ' where is one of" + @echo " api_info to print a list of services and versions" + @echo " docs to build SDK documentation" + @echo " build to go build the SDK" + @echo " unit to run unit tests" + @echo " integration to run integration tests" + @echo " performance to run performance tests" + @echo " verify to verify tests" + @echo " lint to lint the SDK" + @echo " vet to vet the SDK" + @echo " generate to go generate and make services" + @echo " gen-test to generate protocol tests" + @echo " gen-services to generate services" + @echo " get-deps to go get the SDK dependencies" + @echo " get-deps-tests to get the SDK's test dependencies" + @echo " get-deps-verify to get the SDK's verification dependencies" + +generate: gen-test gen-endpoints gen-services + +gen-test: gen-protocol-test + +gen-services: + go generate ./service + +gen-protocol-test: + go generate ./private/protocol/... + +gen-endpoints: + go generate ./private/endpoints + +build: + @echo "go build SDK and vendor packages" + @go build ${SDK_ONLY_PKGS} + +unit: get-deps-tests build verify + @echo "go test SDK and vendor packages" + @go test -tags $(SDK_ONLY_PKGS) + +unit-with-race-cover: get-deps-tests build verify + @echo "go test SDK and vendor packages" + @go test -tags -race -cpu=1,2,4 $(SDK_ONLY_PKGS) + +integration: get-deps-tests integ-custom smoke-tests performance + +integ-custom: + go test -tags "integration" ./awstesting/integration/customizations/... + +smoke-tests: get-deps-tests + gucumber -go-tags "integration" ./awstesting/integration/smoke + +performance: get-deps-tests + AWS_TESTING_LOG_RESULTS=${log-detailed} AWS_TESTING_REGION=$(region) AWS_TESTING_DB_TABLE=$(table) gucumber -go-tags "integration" ./awstesting/performance + +sandbox-tests: sandbox-test-go14 sandbox-test-go15 sandbox-test-go15-novendorexp sandbox-test-go16 sandbox-test-go17 sandbox-test-gotip + +sandbox-test-go14: + docker build -f ./awstesting/sandbox/Dockerfile.test.go1.4 -t "aws-sdk-go-1.4" . + docker run -t aws-sdk-go-1.4 + +sandbox-test-go15: + docker build -f ./awstesting/sandbox/Dockerfile.test.go1.5 -t "aws-sdk-go-1.5" . + docker run -t aws-sdk-go-1.5 + +sandbox-test-go15-novendorexp: + docker build -f ./awstesting/sandbox/Dockerfile.test.go1.5-novendorexp -t "aws-sdk-go-1.5-novendorexp" . + docker run -t aws-sdk-go-1.5-novendorexp + +sandbox-test-go16: + docker build -f ./awstesting/sandbox/Dockerfile.test.go1.6 -t "aws-sdk-go-1.6" . + docker run -t aws-sdk-go-1.6 + +sandbox-test-go17: + docker build -f ./awstesting/sandbox/Dockerfile.test.go1.7 -t "aws-sdk-go-1.7" . + docker run -t aws-sdk-go-1.7 + +sandbox-test-gotip: + @echo "Run make update-aws-golang-tip, if this test fails because missing aws-golang:tip container" + docker build -f ./awstesting/sandbox/Dockerfile.test.gotip -t "aws-sdk-go-tip" . + docker run -t aws-sdk-go-tip + +update-aws-golang-tip: + docker build -f ./awstesting/sandbox/Dockerfile.golang-tip -t "aws-golang:tip" . + +verify: get-deps-verify lint vet + +lint: + @echo "go lint SDK and vendor packages" + @lint=`if [ -z "${SDK_GO_1_4}" ]; then golint ./...; else echo "skipping golint"; fi`; \ + lint=`echo "$$lint" | grep -E -v -e ${LINTIGNOREDOT} -e ${LINTIGNOREDOC} -e ${LINTIGNORECONST} -e ${LINTIGNORESTUTTER} -e ${LINTIGNOREINFLECT} -e ${LINTIGNOREDEPS} -e ${LINTIGNOREINFLECTS3UPLOAD}`; \ + echo "$$lint"; \ + if [ "$$lint" != "" ] && [ "$$lint" != "skipping golint" ]; then exit 1; fi + +SDK_BASE_FOLDERS=$(shell ls -d */ | grep -v vendor | grep -v awsmigrate) +ifneq (,$(findstring go1.5, ${SDK_GO_VERSION})) + GO_VET_CMD=go tool vet --all -shadow +else ifneq (,$(findstring go1.6, ${SDK_GO_VERSION})) + GO_VET_CMD=go tool vet --all -shadow -example=false +else ifneq (,$(findstring devel, ${SDK_GO_VERSION})) + GO_VET_CMD=go tool vet --all -shadow -tests=false +else + GO_VET_CMD=echo skipping go vet, ${SDK_GO_VERSION} +endif + +vet: + ${GO_VET_CMD} ${SDK_BASE_FOLDERS} + +get-deps: get-deps-tests get-deps-verify + @echo "go get SDK dependencies" + @go get -v $(SDK_ONLY_PKGS) + +get-deps-tests: + @echo "go get SDK testing dependencies" + go get github.com/lsegal/gucumber/cmd/gucumber + go get github.com/stretchr/testify + go get github.com/smartystreets/goconvey + +get-deps-verify: + @echo "go get SDK verification utilities" + @if [ -z "${SDK_GO_1_4}" ]; then go get github.com/golang/lint/golint; else echo "skipped getting golint"; fi + +bench: + @echo "go bench SDK packages" + @go test -run NONE -bench . -benchmem -tags 'bench' $(SDK_ONLY_PKGS) + +bench-protocol: + @echo "go bench SDK protocol marshallers" + @go test -run NONE -bench . -benchmem -tags 'bench' ./private/protocol/... + +docs: + @echo "generate SDK docs" + rm -rf doc && bundle install && bundle exec yard + @# This env variable, DOCS, is for internal use + @if [ -n "$(AWS_DOC_GEN_TOOL)" ]; then echo "For internal use. Subject to change."; $(AWS_DOC_GEN_TOOL) `pwd`; fi + +api_info: + @go run private/model/cli/api-info/api-info.go diff --git a/vendor/github.com/aws/aws-sdk-go/NOTICE.txt b/vendor/github.com/aws/aws-sdk-go/NOTICE.txt new file mode 100644 index 000000000..5f14d1162 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/NOTICE.txt @@ -0,0 +1,3 @@ +AWS SDK for Go +Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. +Copyright 2014-2015 Stripe, Inc. diff --git a/vendor/github.com/aws/aws-sdk-go/README.md b/vendor/github.com/aws/aws-sdk-go/README.md new file mode 100644 index 000000000..812ce716c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/README.md @@ -0,0 +1,116 @@ +# AWS SDK for Go + + +[![API Reference](http://img.shields.io/badge/api-reference-blue.svg)](http://docs.aws.amazon.com/sdk-for-go/api) +[![Join the chat at https://gitter.im/aws/aws-sdk-go](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/aws/aws-sdk-go?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) +[![Build Status](https://img.shields.io/travis/aws/aws-sdk-go.svg)](https://travis-ci.org/aws/aws-sdk-go) +[![Apache V2 License](http://img.shields.io/badge/license-Apache%20V2-blue.svg)](https://github.com/aws/aws-sdk-go/blob/master/LICENSE.txt) + + +aws-sdk-go is the official AWS SDK for the Go programming language. + +Checkout our [release notes](https://github.com/aws/aws-sdk-go/releases) for information about the latest bug fixes, updates, and features added to the SDK. + +## Installing + +If you are using Go 1.5 with the `GO15VENDOREXPERIMENT=1` vendoring flag, or 1.6 and higher you can use the following command to retrieve the SDK. The SDK's non-testing dependencies will be included and are vendored in the `vendor` folder. + + go get -u github.com/aws/aws-sdk-go + +Otherwise if your Go environment does not have vendoring support enabled, or you do not want to include the vendored SDK's dependencies you can use the following command to retrieve the SDK and its non-testing dependencies using `go get`. + + go get -u github.com/aws/aws-sdk-go/aws/... + go get -u github.com/aws/aws-sdk-go/service/... + +If you're looking to retrieve just the SDK without any dependencies use the following command. + + go get -d github.com/aws/aws-sdk-go/ + +These two processes will still include the `vendor` folder and it should be deleted if its not going to be used by your environment. + + rm -rf $GOPATH/src/github.com/aws/aws-sdk-go/vendor + +## Reference Documentation +[`Getting Started Guide`](https://aws.amazon.com/sdk-for-go/) - This document is a general introduction how to configure and make requests with the SDK. If this is your first time using the SDK, this documentation and the API documentation will help you get started. This document focuses on the syntax and behavior of the SDK. The [Service Developer Guide](https://aws.amazon.com/documentation/) will help you get started using specific AWS services. + +[`SDK API Reference Documentation`](https://docs.aws.amazon.com/sdk-for-go/api/) - Use this document to look up all API operation input and output parameters for AWS services supported by the SDK. The API reference also includes documentation of the SDK, and examples how to using the SDK, service client API operations, and API operation require parameters. + +[`Service Developer Guide`](https://aws.amazon.com/documentation/) - Use this documentation to learn how to interface with an AWS service. These are great guides both, if you're getting started with a service, or looking for more information on a service. You should not need this document for coding, though in some cases, services may supply helpful samples that you might want to look out for. + +[`SDK Examples`](https://github.com/aws/aws-sdk-go/tree/master/example) - Included in the SDK's repo are a several hand crafted examples using the SDK features and AWS services. + +## Configuring Credentials + +Before using the SDK, ensure that you've configured credentials. The best +way to configure credentials on a development machine is to use the +`~/.aws/credentials` file, which might look like: + +``` +[default] +aws_access_key_id = AKID1234567890 +aws_secret_access_key = MY-SECRET-KEY +``` + +You can learn more about the credentials file from this +[blog post](http://blogs.aws.amazon.com/security/post/Tx3D6U6WSFGOK2H/A-New-and-Standardized-Way-to-Manage-Credentials-in-the-AWS-SDKs). + +Alternatively, you can set the following environment variables: + +``` +AWS_ACCESS_KEY_ID=AKID1234567890 +AWS_SECRET_ACCESS_KEY=MY-SECRET-KEY +``` + +### AWS CLI config file (`~/.aws/config`) +The AWS SDK for Go does not support the AWS CLI's config file. The SDK will not use any contents from this file. The SDK only supports the shared credentials file (`~/.aws/credentials`). #384 tracks this feature request discussion. + +## Using the Go SDK + +To use a service in the SDK, create a service variable by calling the `New()` +function. Once you have a service client, you can call API operations which each +return response data and a possible error. + +To list a set of instance IDs from EC2, you could run: + +```go +package main + +import ( + "fmt" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/ec2" +) + +func main() { + // Create an EC2 service object in the "us-west-2" region + // Note that you can also configure your region globally by + // exporting the AWS_REGION environment variable + svc := ec2.New(session.New(), &aws.Config{Region: aws.String("us-west-2")}) + + // Call the DescribeInstances Operation + resp, err := svc.DescribeInstances(nil) + if err != nil { + panic(err) + } + + // resp has all of the response data, pull out instance IDs: + fmt.Println("> Number of reservation sets: ", len(resp.Reservations)) + for idx, res := range resp.Reservations { + fmt.Println(" > Number of instances: ", len(res.Instances)) + for _, inst := range resp.Reservations[idx].Instances { + fmt.Println(" - Instance ID: ", *inst.InstanceId) + } + } +} +``` + +You can find more information and operations in our +[API documentation](http://docs.aws.amazon.com/sdk-for-go/api/). + +## License + +This SDK is distributed under the +[Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0), +see LICENSE.txt and NOTICE.txt for more information. diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go b/vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go new file mode 100644 index 000000000..e50771f80 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go @@ -0,0 +1,145 @@ +// Package awserr represents API error interface accessors for the SDK. +package awserr + +// An Error wraps lower level errors with code, message and an original error. +// The underlying concrete error type may also satisfy other interfaces which +// can be to used to obtain more specific information about the error. +// +// Calling Error() or String() will always include the full information about +// an error based on its underlying type. +// +// Example: +// +// output, err := s3manage.Upload(svc, input, opts) +// if err != nil { +// if awsErr, ok := err.(awserr.Error); ok { +// // Get error details +// log.Println("Error:", awsErr.Code(), awsErr.Message()) +// +// // Prints out full error message, including original error if there was one. +// log.Println("Error:", awsErr.Error()) +// +// // Get original error +// if origErr := awsErr.OrigErr(); origErr != nil { +// // operate on original error. +// } +// } else { +// fmt.Println(err.Error()) +// } +// } +// +type Error interface { + // Satisfy the generic error interface. + error + + // Returns the short phrase depicting the classification of the error. + Code() string + + // Returns the error details message. + Message() string + + // Returns the original error if one was set. Nil is returned if not set. + OrigErr() error +} + +// BatchError is a batch of errors which also wraps lower level errors with +// code, message, and original errors. Calling Error() will include all errors +// that occured in the batch. +// +// Deprecated: Replaced with BatchedErrors. Only defined for backwards +// compatibility. +type BatchError interface { + // Satisfy the generic error interface. + error + + // Returns the short phrase depicting the classification of the error. + Code() string + + // Returns the error details message. + Message() string + + // Returns the original error if one was set. Nil is returned if not set. + OrigErrs() []error +} + +// BatchedErrors is a batch of errors which also wraps lower level errors with +// code, message, and original errors. Calling Error() will include all errors +// that occured in the batch. +// +// Replaces BatchError +type BatchedErrors interface { + // Satisfy the base Error interface. + Error + + // Returns the original error if one was set. Nil is returned if not set. + OrigErrs() []error +} + +// New returns an Error object described by the code, message, and origErr. +// +// If origErr satisfies the Error interface it will not be wrapped within a new +// Error object and will instead be returned. +func New(code, message string, origErr error) Error { + var errs []error + if origErr != nil { + errs = append(errs, origErr) + } + return newBaseError(code, message, errs) +} + +// NewBatchError returns an BatchedErrors with a collection of errors as an +// array of errors. +func NewBatchError(code, message string, errs []error) BatchedErrors { + return newBaseError(code, message, errs) +} + +// A RequestFailure is an interface to extract request failure information from +// an Error such as the request ID of the failed request returned by a service. +// RequestFailures may not always have a requestID value if the request failed +// prior to reaching the service such as a connection error. +// +// Example: +// +// output, err := s3manage.Upload(svc, input, opts) +// if err != nil { +// if reqerr, ok := err.(RequestFailure); ok { +// log.Println("Request failed", reqerr.Code(), reqerr.Message(), reqerr.RequestID()) +// } else { +// log.Println("Error:", err.Error()) +// } +// } +// +// Combined with awserr.Error: +// +// output, err := s3manage.Upload(svc, input, opts) +// if err != nil { +// if awsErr, ok := err.(awserr.Error); ok { +// // Generic AWS Error with Code, Message, and original error (if any) +// fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr()) +// +// if reqErr, ok := err.(awserr.RequestFailure); ok { +// // A service error occurred +// fmt.Println(reqErr.StatusCode(), reqErr.RequestID()) +// } +// } else { +// fmt.Println(err.Error()) +// } +// } +// +type RequestFailure interface { + Error + + // The status code of the HTTP response. + StatusCode() int + + // The request ID returned by the service for a request failure. This will + // be empty if no request ID is available such as the request failed due + // to a connection error. + RequestID() string +} + +// NewRequestFailure returns a new request error wrapper for the given Error +// provided. +func NewRequestFailure(err Error, statusCode int, reqID string) RequestFailure { + return newRequestError(err, statusCode, reqID) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go b/vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go new file mode 100644 index 000000000..e2d333b84 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go @@ -0,0 +1,194 @@ +package awserr + +import "fmt" + +// SprintError returns a string of the formatted error code. +// +// Both extra and origErr are optional. If they are included their lines +// will be added, but if they are not included their lines will be ignored. +func SprintError(code, message, extra string, origErr error) string { + msg := fmt.Sprintf("%s: %s", code, message) + if extra != "" { + msg = fmt.Sprintf("%s\n\t%s", msg, extra) + } + if origErr != nil { + msg = fmt.Sprintf("%s\ncaused by: %s", msg, origErr.Error()) + } + return msg +} + +// A baseError wraps the code and message which defines an error. It also +// can be used to wrap an original error object. +// +// Should be used as the root for errors satisfying the awserr.Error. Also +// for any error which does not fit into a specific error wrapper type. +type baseError struct { + // Classification of error + code string + + // Detailed information about error + message string + + // Optional original error this error is based off of. Allows building + // chained errors. + errs []error +} + +// newBaseError returns an error object for the code, message, and errors. +// +// code is a short no whitespace phrase depicting the classification of +// the error that is being created. +// +// message is the free flow string containing detailed information about the +// error. +// +// origErrs is the error objects which will be nested under the new errors to +// be returned. +func newBaseError(code, message string, origErrs []error) *baseError { + b := &baseError{ + code: code, + message: message, + errs: origErrs, + } + + return b +} + +// Error returns the string representation of the error. +// +// See ErrorWithExtra for formatting. +// +// Satisfies the error interface. +func (b baseError) Error() string { + size := len(b.errs) + if size > 0 { + return SprintError(b.code, b.message, "", errorList(b.errs)) + } + + return SprintError(b.code, b.message, "", nil) +} + +// String returns the string representation of the error. +// Alias for Error to satisfy the stringer interface. +func (b baseError) String() string { + return b.Error() +} + +// Code returns the short phrase depicting the classification of the error. +func (b baseError) Code() string { + return b.code +} + +// Message returns the error details message. +func (b baseError) Message() string { + return b.message +} + +// OrigErr returns the original error if one was set. Nil is returned if no +// error was set. This only returns the first element in the list. If the full +// list is needed, use BatchedErrors. +func (b baseError) OrigErr() error { + switch len(b.errs) { + case 0: + return nil + case 1: + return b.errs[0] + default: + if err, ok := b.errs[0].(Error); ok { + return NewBatchError(err.Code(), err.Message(), b.errs[1:]) + } + return NewBatchError("BatchedErrors", + "multiple errors occured", b.errs) + } +} + +// OrigErrs returns the original errors if one was set. An empty slice is +// returned if no error was set. +func (b baseError) OrigErrs() []error { + return b.errs +} + +// So that the Error interface type can be included as an anonymous field +// in the requestError struct and not conflict with the error.Error() method. +type awsError Error + +// A requestError wraps a request or service error. +// +// Composed of baseError for code, message, and original error. +type requestError struct { + awsError + statusCode int + requestID string +} + +// newRequestError returns a wrapped error with additional information for +// request status code, and service requestID. +// +// Should be used to wrap all request which involve service requests. Even if +// the request failed without a service response, but had an HTTP status code +// that may be meaningful. +// +// Also wraps original errors via the baseError. +func newRequestError(err Error, statusCode int, requestID string) *requestError { + return &requestError{ + awsError: err, + statusCode: statusCode, + requestID: requestID, + } +} + +// Error returns the string representation of the error. +// Satisfies the error interface. +func (r requestError) Error() string { + extra := fmt.Sprintf("status code: %d, request id: %s", + r.statusCode, r.requestID) + return SprintError(r.Code(), r.Message(), extra, r.OrigErr()) +} + +// String returns the string representation of the error. +// Alias for Error to satisfy the stringer interface. +func (r requestError) String() string { + return r.Error() +} + +// StatusCode returns the wrapped status code for the error +func (r requestError) StatusCode() int { + return r.statusCode +} + +// RequestID returns the wrapped requestID +func (r requestError) RequestID() string { + return r.requestID +} + +// OrigErrs returns the original errors if one was set. An empty slice is +// returned if no error was set. +func (r requestError) OrigErrs() []error { + if b, ok := r.awsError.(BatchedErrors); ok { + return b.OrigErrs() + } + return []error{r.OrigErr()} +} + +// An error list that satisfies the golang interface +type errorList []error + +// Error returns the string representation of the error. +// +// Satisfies the error interface. +func (e errorList) Error() string { + msg := "" + // How do we want to handle the array size being zero + if size := len(e); size > 0 { + for i := 0; i < size; i++ { + msg += fmt.Sprintf("%s", e[i].Error()) + // We check the next index to see if it is within the slice. + // If it is, then we append a newline. We do this, because unit tests + // could be broken with the additional '\n' + if i+1 < size { + msg += "\n" + } + } + } + return msg +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/copy.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/copy.go new file mode 100644 index 000000000..8429470b9 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/copy.go @@ -0,0 +1,100 @@ +package awsutil + +import ( + "io" + "reflect" +) + +// Copy deeply copies a src structure to dst. Useful for copying request and +// response structures. +// +// Can copy between structs of different type, but will only copy fields which +// are assignable, and exist in both structs. Fields which are not assignable, +// or do not exist in both structs are ignored. +func Copy(dst, src interface{}) { + dstval := reflect.ValueOf(dst) + if !dstval.IsValid() { + panic("Copy dst cannot be nil") + } + + rcopy(dstval, reflect.ValueOf(src), true) +} + +// CopyOf returns a copy of src while also allocating the memory for dst. +// src must be a pointer type or this operation will fail. +func CopyOf(src interface{}) (dst interface{}) { + dsti := reflect.New(reflect.TypeOf(src).Elem()) + dst = dsti.Interface() + rcopy(dsti, reflect.ValueOf(src), true) + return +} + +// rcopy performs a recursive copy of values from the source to destination. +// +// root is used to skip certain aspects of the copy which are not valid +// for the root node of a object. +func rcopy(dst, src reflect.Value, root bool) { + if !src.IsValid() { + return + } + + switch src.Kind() { + case reflect.Ptr: + if _, ok := src.Interface().(io.Reader); ok { + if dst.Kind() == reflect.Ptr && dst.Elem().CanSet() { + dst.Elem().Set(src) + } else if dst.CanSet() { + dst.Set(src) + } + } else { + e := src.Type().Elem() + if dst.CanSet() && !src.IsNil() { + dst.Set(reflect.New(e)) + } + if src.Elem().IsValid() { + // Keep the current root state since the depth hasn't changed + rcopy(dst.Elem(), src.Elem(), root) + } + } + case reflect.Struct: + t := dst.Type() + for i := 0; i < t.NumField(); i++ { + name := t.Field(i).Name + srcVal := src.FieldByName(name) + dstVal := dst.FieldByName(name) + if srcVal.IsValid() && dstVal.CanSet() { + rcopy(dstVal, srcVal, false) + } + } + case reflect.Slice: + if src.IsNil() { + break + } + + s := reflect.MakeSlice(src.Type(), src.Len(), src.Cap()) + dst.Set(s) + for i := 0; i < src.Len(); i++ { + rcopy(dst.Index(i), src.Index(i), false) + } + case reflect.Map: + if src.IsNil() { + break + } + + s := reflect.MakeMap(src.Type()) + dst.Set(s) + for _, k := range src.MapKeys() { + v := src.MapIndex(k) + v2 := reflect.New(v.Type()).Elem() + rcopy(v2, v, false) + dst.SetMapIndex(k, v2) + } + default: + // Assign the value if possible. If its not assignable, the value would + // need to be converted and the impact of that may be unexpected, or is + // not compatible with the dst type. + if src.Type().AssignableTo(dst.Type()) { + dst.Set(src) + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/copy_test.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/copy_test.go new file mode 100644 index 000000000..84b7e3f34 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/copy_test.go @@ -0,0 +1,233 @@ +package awsutil_test + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "testing" + + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/stretchr/testify/assert" +) + +func ExampleCopy() { + type Foo struct { + A int + B []*string + } + + // Create the initial value + str1 := "hello" + str2 := "bye bye" + f1 := &Foo{A: 1, B: []*string{&str1, &str2}} + + // Do the copy + var f2 Foo + awsutil.Copy(&f2, f1) + + // Print the result + fmt.Println(awsutil.Prettify(f2)) + + // Output: + // { + // A: 1, + // B: ["hello","bye bye"] + // } +} + +func TestCopy(t *testing.T) { + type Foo struct { + A int + B []*string + C map[string]*int + } + + // Create the initial value + str1 := "hello" + str2 := "bye bye" + int1 := 1 + int2 := 2 + f1 := &Foo{ + A: 1, + B: []*string{&str1, &str2}, + C: map[string]*int{ + "A": &int1, + "B": &int2, + }, + } + + // Do the copy + var f2 Foo + awsutil.Copy(&f2, f1) + + // Values are equal + assert.Equal(t, f2.A, f1.A) + assert.Equal(t, f2.B, f1.B) + assert.Equal(t, f2.C, f1.C) + + // But pointers are not! + str3 := "nothello" + int3 := 57 + f2.A = 100 + f2.B[0] = &str3 + f2.C["B"] = &int3 + assert.NotEqual(t, f2.A, f1.A) + assert.NotEqual(t, f2.B, f1.B) + assert.NotEqual(t, f2.C, f1.C) +} + +func TestCopyNestedWithUnexported(t *testing.T) { + type Bar struct { + a int + B int + } + type Foo struct { + A string + B Bar + } + + f1 := &Foo{A: "string", B: Bar{a: 1, B: 2}} + + var f2 Foo + awsutil.Copy(&f2, f1) + + // Values match + assert.Equal(t, f2.A, f1.A) + assert.NotEqual(t, f2.B, f1.B) + assert.NotEqual(t, f2.B.a, f1.B.a) + assert.Equal(t, f2.B.B, f2.B.B) +} + +func TestCopyIgnoreNilMembers(t *testing.T) { + type Foo struct { + A *string + B []string + C map[string]string + } + + f := &Foo{} + assert.Nil(t, f.A) + assert.Nil(t, f.B) + assert.Nil(t, f.C) + + var f2 Foo + awsutil.Copy(&f2, f) + assert.Nil(t, f2.A) + assert.Nil(t, f2.B) + assert.Nil(t, f2.C) + + fcopy := awsutil.CopyOf(f) + f3 := fcopy.(*Foo) + assert.Nil(t, f3.A) + assert.Nil(t, f3.B) + assert.Nil(t, f3.C) +} + +func TestCopyPrimitive(t *testing.T) { + str := "hello" + var s string + awsutil.Copy(&s, &str) + assert.Equal(t, "hello", s) +} + +func TestCopyNil(t *testing.T) { + var s string + awsutil.Copy(&s, nil) + assert.Equal(t, "", s) +} + +func TestCopyReader(t *testing.T) { + var buf io.Reader = bytes.NewReader([]byte("hello world")) + var r io.Reader + awsutil.Copy(&r, buf) + b, err := ioutil.ReadAll(r) + assert.NoError(t, err) + assert.Equal(t, []byte("hello world"), b) + + // empty bytes because this is not a deep copy + b, err = ioutil.ReadAll(buf) + assert.NoError(t, err) + assert.Equal(t, []byte(""), b) +} + +func TestCopyDifferentStructs(t *testing.T) { + type SrcFoo struct { + A int + B []*string + C map[string]*int + SrcUnique string + SameNameDiffType int + unexportedPtr *int + ExportedPtr *int + } + type DstFoo struct { + A int + B []*string + C map[string]*int + DstUnique int + SameNameDiffType string + unexportedPtr *int + ExportedPtr *int + } + + // Create the initial value + str1 := "hello" + str2 := "bye bye" + int1 := 1 + int2 := 2 + f1 := &SrcFoo{ + A: 1, + B: []*string{&str1, &str2}, + C: map[string]*int{ + "A": &int1, + "B": &int2, + }, + SrcUnique: "unique", + SameNameDiffType: 1, + unexportedPtr: &int1, + ExportedPtr: &int2, + } + + // Do the copy + var f2 DstFoo + awsutil.Copy(&f2, f1) + + // Values are equal + assert.Equal(t, f2.A, f1.A) + assert.Equal(t, f2.B, f1.B) + assert.Equal(t, f2.C, f1.C) + assert.Equal(t, "unique", f1.SrcUnique) + assert.Equal(t, 1, f1.SameNameDiffType) + assert.Equal(t, 0, f2.DstUnique) + assert.Equal(t, "", f2.SameNameDiffType) + assert.Equal(t, int1, *f1.unexportedPtr) + assert.Nil(t, f2.unexportedPtr) + assert.Equal(t, int2, *f1.ExportedPtr) + assert.Equal(t, int2, *f2.ExportedPtr) +} + +func ExampleCopyOf() { + type Foo struct { + A int + B []*string + } + + // Create the initial value + str1 := "hello" + str2 := "bye bye" + f1 := &Foo{A: 1, B: []*string{&str1, &str2}} + + // Do the copy + v := awsutil.CopyOf(f1) + var f2 *Foo = v.(*Foo) + + // Print the result + fmt.Println(awsutil.Prettify(f2)) + + // Output: + // { + // A: 1, + // B: ["hello","bye bye"] + // } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal.go new file mode 100644 index 000000000..59fa4a558 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal.go @@ -0,0 +1,27 @@ +package awsutil + +import ( + "reflect" +) + +// DeepEqual returns if the two values are deeply equal like reflect.DeepEqual. +// In addition to this, this method will also dereference the input values if +// possible so the DeepEqual performed will not fail if one parameter is a +// pointer and the other is not. +// +// DeepEqual will not perform indirection of nested values of the input parameters. +func DeepEqual(a, b interface{}) bool { + ra := reflect.Indirect(reflect.ValueOf(a)) + rb := reflect.Indirect(reflect.ValueOf(b)) + + if raValid, rbValid := ra.IsValid(), rb.IsValid(); !raValid && !rbValid { + // If the elements are both nil, and of the same type the are equal + // If they are of different types they are not equal + return reflect.TypeOf(a) == reflect.TypeOf(b) + } else if raValid != rbValid { + // Both values must be valid to be equal + return false + } + + return reflect.DeepEqual(ra.Interface(), rb.Interface()) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal_test.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal_test.go new file mode 100644 index 000000000..7a5db6e49 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal_test.go @@ -0,0 +1,29 @@ +package awsutil_test + +import ( + "testing" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/stretchr/testify/assert" +) + +func TestDeepEqual(t *testing.T) { + cases := []struct { + a, b interface{} + equal bool + }{ + {"a", "a", true}, + {"a", "b", false}, + {"a", aws.String(""), false}, + {"a", nil, false}, + {"a", aws.String("a"), true}, + {(*bool)(nil), (*bool)(nil), true}, + {(*bool)(nil), (*string)(nil), false}, + {nil, nil, true}, + } + + for i, c := range cases { + assert.Equal(t, c.equal, awsutil.DeepEqual(c.a, c.b), "%d, a:%v b:%v, %t", i, c.a, c.b, c.equal) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go new file mode 100644 index 000000000..4d2a01e8c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go @@ -0,0 +1,222 @@ +package awsutil + +import ( + "reflect" + "regexp" + "strconv" + "strings" + + "github.com/jmespath/go-jmespath" +) + +var indexRe = regexp.MustCompile(`(.+)\[(-?\d+)?\]$`) + +// rValuesAtPath returns a slice of values found in value v. The values +// in v are explored recursively so all nested values are collected. +func rValuesAtPath(v interface{}, path string, createPath, caseSensitive, nilTerm bool) []reflect.Value { + pathparts := strings.Split(path, "||") + if len(pathparts) > 1 { + for _, pathpart := range pathparts { + vals := rValuesAtPath(v, pathpart, createPath, caseSensitive, nilTerm) + if len(vals) > 0 { + return vals + } + } + return nil + } + + values := []reflect.Value{reflect.Indirect(reflect.ValueOf(v))} + components := strings.Split(path, ".") + for len(values) > 0 && len(components) > 0 { + var index *int64 + var indexStar bool + c := strings.TrimSpace(components[0]) + if c == "" { // no actual component, illegal syntax + return nil + } else if caseSensitive && c != "*" && strings.ToLower(c[0:1]) == c[0:1] { + // TODO normalize case for user + return nil // don't support unexported fields + } + + // parse this component + if m := indexRe.FindStringSubmatch(c); m != nil { + c = m[1] + if m[2] == "" { + index = nil + indexStar = true + } else { + i, _ := strconv.ParseInt(m[2], 10, 32) + index = &i + indexStar = false + } + } + + nextvals := []reflect.Value{} + for _, value := range values { + // pull component name out of struct member + if value.Kind() != reflect.Struct { + continue + } + + if c == "*" { // pull all members + for i := 0; i < value.NumField(); i++ { + if f := reflect.Indirect(value.Field(i)); f.IsValid() { + nextvals = append(nextvals, f) + } + } + continue + } + + value = value.FieldByNameFunc(func(name string) bool { + if c == name { + return true + } else if !caseSensitive && strings.ToLower(name) == strings.ToLower(c) { + return true + } + return false + }) + + if nilTerm && value.Kind() == reflect.Ptr && len(components[1:]) == 0 { + if !value.IsNil() { + value.Set(reflect.Zero(value.Type())) + } + return []reflect.Value{value} + } + + if createPath && value.Kind() == reflect.Ptr && value.IsNil() { + // TODO if the value is the terminus it should not be created + // if the value to be set to its position is nil. + value.Set(reflect.New(value.Type().Elem())) + value = value.Elem() + } else { + value = reflect.Indirect(value) + } + + if value.Kind() == reflect.Slice || value.Kind() == reflect.Map { + if !createPath && value.IsNil() { + value = reflect.ValueOf(nil) + } + } + + if value.IsValid() { + nextvals = append(nextvals, value) + } + } + values = nextvals + + if indexStar || index != nil { + nextvals = []reflect.Value{} + for _, value := range values { + value := reflect.Indirect(value) + if value.Kind() != reflect.Slice { + continue + } + + if indexStar { // grab all indices + for i := 0; i < value.Len(); i++ { + idx := reflect.Indirect(value.Index(i)) + if idx.IsValid() { + nextvals = append(nextvals, idx) + } + } + continue + } + + // pull out index + i := int(*index) + if i >= value.Len() { // check out of bounds + if createPath { + // TODO resize slice + } else { + continue + } + } else if i < 0 { // support negative indexing + i = value.Len() + i + } + value = reflect.Indirect(value.Index(i)) + + if value.Kind() == reflect.Slice || value.Kind() == reflect.Map { + if !createPath && value.IsNil() { + value = reflect.ValueOf(nil) + } + } + + if value.IsValid() { + nextvals = append(nextvals, value) + } + } + values = nextvals + } + + components = components[1:] + } + return values +} + +// ValuesAtPath returns a list of values at the case insensitive lexical +// path inside of a structure. +func ValuesAtPath(i interface{}, path string) ([]interface{}, error) { + result, err := jmespath.Search(path, i) + if err != nil { + return nil, err + } + + v := reflect.ValueOf(result) + if !v.IsValid() || (v.Kind() == reflect.Ptr && v.IsNil()) { + return nil, nil + } + if s, ok := result.([]interface{}); ok { + return s, err + } + if v.Kind() == reflect.Map && v.Len() == 0 { + return nil, nil + } + if v.Kind() == reflect.Slice { + out := make([]interface{}, v.Len()) + for i := 0; i < v.Len(); i++ { + out[i] = v.Index(i).Interface() + } + return out, nil + } + + return []interface{}{result}, nil +} + +// SetValueAtPath sets a value at the case insensitive lexical path inside +// of a structure. +func SetValueAtPath(i interface{}, path string, v interface{}) { + if rvals := rValuesAtPath(i, path, true, false, v == nil); rvals != nil { + for _, rval := range rvals { + if rval.Kind() == reflect.Ptr && rval.IsNil() { + continue + } + setValue(rval, v) + } + } +} + +func setValue(dstVal reflect.Value, src interface{}) { + if dstVal.Kind() == reflect.Ptr { + dstVal = reflect.Indirect(dstVal) + } + srcVal := reflect.ValueOf(src) + + if !srcVal.IsValid() { // src is literal nil + if dstVal.CanAddr() { + // Convert to pointer so that pointer's value can be nil'ed + // dstVal = dstVal.Addr() + } + dstVal.Set(reflect.Zero(dstVal.Type())) + + } else if srcVal.Kind() == reflect.Ptr { + if srcVal.IsNil() { + srcVal = reflect.Zero(dstVal.Type()) + } else { + srcVal = reflect.ValueOf(src).Elem() + } + dstVal.Set(srcVal) + } else { + dstVal.Set(srcVal) + } + +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value_test.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value_test.go new file mode 100644 index 000000000..b2225566f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value_test.go @@ -0,0 +1,142 @@ +package awsutil_test + +import ( + "testing" + + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/stretchr/testify/assert" +) + +type Struct struct { + A []Struct + z []Struct + B *Struct + D *Struct + C string + E map[string]string +} + +var data = Struct{ + A: []Struct{{C: "value1"}, {C: "value2"}, {C: "value3"}}, + z: []Struct{{C: "value1"}, {C: "value2"}, {C: "value3"}}, + B: &Struct{B: &Struct{C: "terminal"}, D: &Struct{C: "terminal2"}}, + C: "initial", +} +var data2 = Struct{A: []Struct{ + {A: []Struct{{C: "1"}, {C: "1"}, {C: "1"}, {C: "1"}, {C: "1"}}}, + {A: []Struct{{C: "2"}, {C: "2"}, {C: "2"}, {C: "2"}, {C: "2"}}}, +}} + +func TestValueAtPathSuccess(t *testing.T) { + var testCases = []struct { + expect []interface{} + data interface{} + path string + }{ + {[]interface{}{"initial"}, data, "C"}, + {[]interface{}{"value1"}, data, "A[0].C"}, + {[]interface{}{"value2"}, data, "A[1].C"}, + {[]interface{}{"value3"}, data, "A[2].C"}, + {[]interface{}{"value3"}, data, "a[2].c"}, + {[]interface{}{"value3"}, data, "A[-1].C"}, + {[]interface{}{"value1", "value2", "value3"}, data, "A[].C"}, + {[]interface{}{"terminal"}, data, "B . B . C"}, + {[]interface{}{"initial"}, data, "A.D.X || C"}, + {[]interface{}{"initial"}, data, "A[0].B || C"}, + {[]interface{}{ + Struct{A: []Struct{{C: "1"}, {C: "1"}, {C: "1"}, {C: "1"}, {C: "1"}}}, + Struct{A: []Struct{{C: "2"}, {C: "2"}, {C: "2"}, {C: "2"}, {C: "2"}}}, + }, data2, "A"}, + } + for i, c := range testCases { + v, err := awsutil.ValuesAtPath(c.data, c.path) + assert.NoError(t, err, "case %d, expected no error, %s", i, c.path) + assert.Equal(t, c.expect, v, "case %d, %s", i, c.path) + } +} + +func TestValueAtPathFailure(t *testing.T) { + var testCases = []struct { + expect []interface{} + errContains string + data interface{} + path string + }{ + {nil, "", data, "C.x"}, + {nil, "SyntaxError: Invalid token: tDot", data, ".x"}, + {nil, "", data, "X.Y.Z"}, + {nil, "", data, "A[100].C"}, + {nil, "", data, "A[3].C"}, + {nil, "", data, "B.B.C.Z"}, + {nil, "", data, "z[-1].C"}, + {nil, "", nil, "A.B.C"}, + {[]interface{}{}, "", Struct{}, "A"}, + {nil, "", data, "A[0].B.C"}, + {nil, "", data, "D"}, + } + + for i, c := range testCases { + v, err := awsutil.ValuesAtPath(c.data, c.path) + if c.errContains != "" { + assert.Contains(t, err.Error(), c.errContains, "case %d, expected error, %s", i, c.path) + continue + } else { + assert.NoError(t, err, "case %d, expected no error, %s", i, c.path) + } + assert.Equal(t, c.expect, v, "case %d, %s", i, c.path) + } +} + +func TestSetValueAtPathSuccess(t *testing.T) { + var s Struct + awsutil.SetValueAtPath(&s, "C", "test1") + awsutil.SetValueAtPath(&s, "B.B.C", "test2") + awsutil.SetValueAtPath(&s, "B.D.C", "test3") + assert.Equal(t, "test1", s.C) + assert.Equal(t, "test2", s.B.B.C) + assert.Equal(t, "test3", s.B.D.C) + + awsutil.SetValueAtPath(&s, "B.*.C", "test0") + assert.Equal(t, "test0", s.B.B.C) + assert.Equal(t, "test0", s.B.D.C) + + var s2 Struct + awsutil.SetValueAtPath(&s2, "b.b.c", "test0") + assert.Equal(t, "test0", s2.B.B.C) + awsutil.SetValueAtPath(&s2, "A", []Struct{{}}) + assert.Equal(t, []Struct{{}}, s2.A) + + str := "foo" + + s3 := Struct{} + awsutil.SetValueAtPath(&s3, "b.b.c", str) + assert.Equal(t, "foo", s3.B.B.C) + + s3 = Struct{B: &Struct{B: &Struct{C: str}}} + awsutil.SetValueAtPath(&s3, "b.b.c", nil) + assert.Equal(t, "", s3.B.B.C) + + s3 = Struct{} + awsutil.SetValueAtPath(&s3, "b.b.c", nil) + assert.Equal(t, "", s3.B.B.C) + + s3 = Struct{} + awsutil.SetValueAtPath(&s3, "b.b.c", &str) + assert.Equal(t, "foo", s3.B.B.C) + + var s4 struct{ Name *string } + awsutil.SetValueAtPath(&s4, "Name", str) + assert.Equal(t, str, *s4.Name) + + s4 = struct{ Name *string }{} + awsutil.SetValueAtPath(&s4, "Name", nil) + assert.Equal(t, (*string)(nil), s4.Name) + + s4 = struct{ Name *string }{Name: &str} + awsutil.SetValueAtPath(&s4, "Name", nil) + assert.Equal(t, (*string)(nil), s4.Name) + + s4 = struct{ Name *string }{} + awsutil.SetValueAtPath(&s4, "Name", &str) + assert.Equal(t, str, *s4.Name) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go new file mode 100644 index 000000000..fc38172fe --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go @@ -0,0 +1,107 @@ +package awsutil + +import ( + "bytes" + "fmt" + "io" + "reflect" + "strings" +) + +// Prettify returns the string representation of a value. +func Prettify(i interface{}) string { + var buf bytes.Buffer + prettify(reflect.ValueOf(i), 0, &buf) + return buf.String() +} + +// prettify will recursively walk value v to build a textual +// representation of the value. +func prettify(v reflect.Value, indent int, buf *bytes.Buffer) { + for v.Kind() == reflect.Ptr { + v = v.Elem() + } + + switch v.Kind() { + case reflect.Struct: + strtype := v.Type().String() + if strtype == "time.Time" { + fmt.Fprintf(buf, "%s", v.Interface()) + break + } else if strings.HasPrefix(strtype, "io.") { + buf.WriteString("") + break + } + + buf.WriteString("{\n") + + names := []string{} + for i := 0; i < v.Type().NumField(); i++ { + name := v.Type().Field(i).Name + f := v.Field(i) + if name[0:1] == strings.ToLower(name[0:1]) { + continue // ignore unexported fields + } + if (f.Kind() == reflect.Ptr || f.Kind() == reflect.Slice || f.Kind() == reflect.Map) && f.IsNil() { + continue // ignore unset fields + } + names = append(names, name) + } + + for i, n := range names { + val := v.FieldByName(n) + buf.WriteString(strings.Repeat(" ", indent+2)) + buf.WriteString(n + ": ") + prettify(val, indent+2, buf) + + if i < len(names)-1 { + buf.WriteString(",\n") + } + } + + buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") + case reflect.Slice: + nl, id, id2 := "", "", "" + if v.Len() > 3 { + nl, id, id2 = "\n", strings.Repeat(" ", indent), strings.Repeat(" ", indent+2) + } + buf.WriteString("[" + nl) + for i := 0; i < v.Len(); i++ { + buf.WriteString(id2) + prettify(v.Index(i), indent+2, buf) + + if i < v.Len()-1 { + buf.WriteString("," + nl) + } + } + + buf.WriteString(nl + id + "]") + case reflect.Map: + buf.WriteString("{\n") + + for i, k := range v.MapKeys() { + buf.WriteString(strings.Repeat(" ", indent+2)) + buf.WriteString(k.String() + ": ") + prettify(v.MapIndex(k), indent+2, buf) + + if i < v.Len()-1 { + buf.WriteString(",\n") + } + } + + buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") + default: + if !v.IsValid() { + fmt.Fprint(buf, "") + return + } + format := "%v" + switch v.Interface().(type) { + case string: + format = "%q" + case io.ReadSeeker, io.Reader: + format = "buffer(%p)" + } + fmt.Fprintf(buf, format, v.Interface()) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go new file mode 100644 index 000000000..b6432f1a1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go @@ -0,0 +1,89 @@ +package awsutil + +import ( + "bytes" + "fmt" + "reflect" + "strings" +) + +// StringValue returns the string representation of a value. +func StringValue(i interface{}) string { + var buf bytes.Buffer + stringValue(reflect.ValueOf(i), 0, &buf) + return buf.String() +} + +func stringValue(v reflect.Value, indent int, buf *bytes.Buffer) { + for v.Kind() == reflect.Ptr { + v = v.Elem() + } + + switch v.Kind() { + case reflect.Struct: + buf.WriteString("{\n") + + names := []string{} + for i := 0; i < v.Type().NumField(); i++ { + name := v.Type().Field(i).Name + f := v.Field(i) + if name[0:1] == strings.ToLower(name[0:1]) { + continue // ignore unexported fields + } + if (f.Kind() == reflect.Ptr || f.Kind() == reflect.Slice) && f.IsNil() { + continue // ignore unset fields + } + names = append(names, name) + } + + for i, n := range names { + val := v.FieldByName(n) + buf.WriteString(strings.Repeat(" ", indent+2)) + buf.WriteString(n + ": ") + stringValue(val, indent+2, buf) + + if i < len(names)-1 { + buf.WriteString(",\n") + } + } + + buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") + case reflect.Slice: + nl, id, id2 := "", "", "" + if v.Len() > 3 { + nl, id, id2 = "\n", strings.Repeat(" ", indent), strings.Repeat(" ", indent+2) + } + buf.WriteString("[" + nl) + for i := 0; i < v.Len(); i++ { + buf.WriteString(id2) + stringValue(v.Index(i), indent+2, buf) + + if i < v.Len()-1 { + buf.WriteString("," + nl) + } + } + + buf.WriteString(nl + id + "]") + case reflect.Map: + buf.WriteString("{\n") + + for i, k := range v.MapKeys() { + buf.WriteString(strings.Repeat(" ", indent+2)) + buf.WriteString(k.String() + ": ") + stringValue(v.MapIndex(k), indent+2, buf) + + if i < v.Len()-1 { + buf.WriteString(",\n") + } + } + + buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") + default: + format := "%v" + switch v.Interface().(type) { + case string: + format = "%q" + } + fmt.Fprintf(buf, format, v.Interface()) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/client.go b/vendor/github.com/aws/aws-sdk-go/aws/client/client.go new file mode 100644 index 000000000..c8d0564d8 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/client/client.go @@ -0,0 +1,120 @@ +package client + +import ( + "fmt" + "io/ioutil" + "net/http/httputil" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" +) + +// A Config provides configuration to a service client instance. +type Config struct { + Config *aws.Config + Handlers request.Handlers + Endpoint, SigningRegion string +} + +// ConfigProvider provides a generic way for a service client to receive +// the ClientConfig without circular dependencies. +type ConfigProvider interface { + ClientConfig(serviceName string, cfgs ...*aws.Config) Config +} + +// A Client implements the base client request and response handling +// used by all service clients. +type Client struct { + request.Retryer + metadata.ClientInfo + + Config aws.Config + Handlers request.Handlers +} + +// New will return a pointer to a new initialized service client. +func New(cfg aws.Config, info metadata.ClientInfo, handlers request.Handlers, options ...func(*Client)) *Client { + svc := &Client{ + Config: cfg, + ClientInfo: info, + Handlers: handlers, + } + + switch retryer, ok := cfg.Retryer.(request.Retryer); { + case ok: + svc.Retryer = retryer + case cfg.Retryer != nil && cfg.Logger != nil: + s := fmt.Sprintf("WARNING: %T does not implement request.Retryer; using DefaultRetryer instead", cfg.Retryer) + cfg.Logger.Log(s) + fallthrough + default: + maxRetries := aws.IntValue(cfg.MaxRetries) + if cfg.MaxRetries == nil || maxRetries == aws.UseServiceDefaultRetries { + maxRetries = 3 + } + svc.Retryer = DefaultRetryer{NumMaxRetries: maxRetries} + } + + svc.AddDebugHandlers() + + for _, option := range options { + option(svc) + } + + return svc +} + +// NewRequest returns a new Request pointer for the service API +// operation and parameters. +func (c *Client) NewRequest(operation *request.Operation, params interface{}, data interface{}) *request.Request { + return request.New(c.Config, c.ClientInfo, c.Handlers, c.Retryer, operation, params, data) +} + +// AddDebugHandlers injects debug logging handlers into the service to log request +// debug information. +func (c *Client) AddDebugHandlers() { + if !c.Config.LogLevel.AtLeast(aws.LogDebug) { + return + } + + c.Handlers.Send.PushFront(logRequest) + c.Handlers.Send.PushBack(logResponse) +} + +const logReqMsg = `DEBUG: Request %s/%s Details: +---[ REQUEST POST-SIGN ]----------------------------- +%s +-----------------------------------------------------` + +func logRequest(r *request.Request) { + logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody) + dumpedBody, _ := httputil.DumpRequestOut(r.HTTPRequest, logBody) + + if logBody { + // Reset the request body because dumpRequest will re-wrap the r.HTTPRequest's + // Body as a NoOpCloser and will not be reset after read by the HTTP + // client reader. + r.Body.Seek(r.BodyStart, 0) + r.HTTPRequest.Body = ioutil.NopCloser(r.Body) + } + + r.Config.Logger.Log(fmt.Sprintf(logReqMsg, r.ClientInfo.ServiceName, r.Operation.Name, string(dumpedBody))) +} + +const logRespMsg = `DEBUG: Response %s/%s Details: +---[ RESPONSE ]-------------------------------------- +%s +-----------------------------------------------------` + +func logResponse(r *request.Request) { + var msg = "no response data" + if r.HTTPResponse != nil { + logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody) + dumpedBody, _ := httputil.DumpResponse(r.HTTPResponse, logBody) + msg = string(dumpedBody) + } else if r.Error != nil { + msg = r.Error.Error() + } + r.Config.Logger.Log(fmt.Sprintf(logRespMsg, r.ClientInfo.ServiceName, r.Operation.Name, msg)) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go b/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go new file mode 100644 index 000000000..43a3676b7 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go @@ -0,0 +1,90 @@ +package client + +import ( + "math/rand" + "sync" + "time" + + "github.com/aws/aws-sdk-go/aws/request" +) + +// DefaultRetryer implements basic retry logic using exponential backoff for +// most services. If you want to implement custom retry logic, implement the +// request.Retryer interface or create a structure type that composes this +// struct and override the specific methods. For example, to override only +// the MaxRetries method: +// +// type retryer struct { +// service.DefaultRetryer +// } +// +// // This implementation always has 100 max retries +// func (d retryer) MaxRetries() uint { return 100 } +type DefaultRetryer struct { + NumMaxRetries int +} + +// MaxRetries returns the number of maximum returns the service will use to make +// an individual API request. +func (d DefaultRetryer) MaxRetries() int { + return d.NumMaxRetries +} + +var seededRand = rand.New(&lockedSource{src: rand.NewSource(time.Now().UnixNano())}) + +// RetryRules returns the delay duration before retrying this request again +func (d DefaultRetryer) RetryRules(r *request.Request) time.Duration { + // Set the upper limit of delay in retrying at ~five minutes + minTime := 30 + throttle := d.shouldThrottle(r) + if throttle { + minTime = 500 + } + + retryCount := r.RetryCount + if retryCount > 13 { + retryCount = 13 + } else if throttle && retryCount > 8 { + retryCount = 8 + } + + delay := (1 << uint(retryCount)) * (seededRand.Intn(minTime) + minTime) + return time.Duration(delay) * time.Millisecond +} + +// ShouldRetry returns true if the request should be retried. +func (d DefaultRetryer) ShouldRetry(r *request.Request) bool { + if r.HTTPResponse.StatusCode >= 500 { + return true + } + return r.IsErrorRetryable() || d.shouldThrottle(r) +} + +// ShouldThrottle returns true if the request should be throttled. +func (d DefaultRetryer) shouldThrottle(r *request.Request) bool { + if r.HTTPResponse.StatusCode == 502 || + r.HTTPResponse.StatusCode == 503 || + r.HTTPResponse.StatusCode == 504 { + return true + } + return r.IsErrorThrottle() +} + +// lockedSource is a thread-safe implementation of rand.Source +type lockedSource struct { + lk sync.Mutex + src rand.Source +} + +func (r *lockedSource) Int63() (n int64) { + r.lk.Lock() + n = r.src.Int63() + r.lk.Unlock() + return +} + +func (r *lockedSource) Seed(seed int64) { + r.lk.Lock() + r.src.Seed(seed) + r.lk.Unlock() +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go b/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go new file mode 100644 index 000000000..4778056dd --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go @@ -0,0 +1,12 @@ +package metadata + +// ClientInfo wraps immutable data from the client.Client structure. +type ClientInfo struct { + ServiceName string + APIVersion string + Endpoint string + SigningName string + SigningRegion string + JSONVersion string + TargetPrefix string +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/config.go b/vendor/github.com/aws/aws-sdk-go/aws/config.go new file mode 100644 index 000000000..da72935be --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/config.go @@ -0,0 +1,363 @@ +package aws + +import ( + "net/http" + "time" + + "github.com/aws/aws-sdk-go/aws/credentials" +) + +// UseServiceDefaultRetries instructs the config to use the service's own default +// number of retries. This will be the default action if Config.MaxRetries +// is nil also. +const UseServiceDefaultRetries = -1 + +// RequestRetryer is an alias for a type that implements the request.Retryer interface. +type RequestRetryer interface{} + +// A Config provides service configuration for service clients. By default, +// all clients will use the {defaults.DefaultConfig} structure. +type Config struct { + // Enables verbose error printing of all credential chain errors. + // Should be used when wanting to see all errors while attempting to retreive + // credentials. + CredentialsChainVerboseErrors *bool + + // The credentials object to use when signing requests. Defaults to + // a chain of credential providers to search for credentials in environment + // variables, shared credential file, and EC2 Instance Roles. + Credentials *credentials.Credentials + + // An optional endpoint URL (hostname only or fully qualified URI) + // that overrides the default generated endpoint for a client. Set this + // to `""` to use the default generated endpoint. + // + // @note You must still provide a `Region` value when specifying an + // endpoint for a client. + Endpoint *string + + // The region to send requests to. This parameter is required and must + // be configured globally or on a per-client basis unless otherwise + // noted. A full list of regions is found in the "Regions and Endpoints" + // document. + // + // @see http://docs.aws.amazon.com/general/latest/gr/rande.html + // AWS Regions and Endpoints + Region *string + + // Set this to `true` to disable SSL when sending requests. Defaults + // to `false`. + DisableSSL *bool + + // The HTTP client to use when sending requests. Defaults to + // `http.DefaultClient`. + HTTPClient *http.Client + + // An integer value representing the logging level. The default log level + // is zero (LogOff), which represents no logging. To enable logging set + // to a LogLevel Value. + LogLevel *LogLevelType + + // The logger writer interface to write logging messages to. Defaults to + // standard out. + Logger Logger + + // The maximum number of times that a request will be retried for failures. + // Defaults to -1, which defers the max retry setting to the service specific + // configuration. + MaxRetries *int + + // Retryer guides how HTTP requests should be retried in case of recoverable failures. + // + // When nil or the value does not implement the request.Retryer interface, + // the request.DefaultRetryer will be used. + // + // When both Retryer and MaxRetries are non-nil, the former is used and + // the latter ignored. + // + // To set the Retryer field in a type-safe manner and with chaining, use + // the request.WithRetryer helper function: + // + // cfg := request.WithRetryer(aws.NewConfig(), myRetryer) + // + Retryer RequestRetryer + + // Disables semantic parameter validation, which validates input for missing + // required fields and/or other semantic request input errors. + DisableParamValidation *bool + + // Disables the computation of request and response checksums, e.g., + // CRC32 checksums in Amazon DynamoDB. + DisableComputeChecksums *bool + + // Set this to `true` to force the request to use path-style addressing, + // i.e., `http://s3.amazonaws.com/BUCKET/KEY`. By default, the S3 client will + // use virtual hosted bucket addressing when possible + // (`http://BUCKET.s3.amazonaws.com/KEY`). + // + // @note This configuration option is specific to the Amazon S3 service. + // @see http://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html + // Amazon S3: Virtual Hosting of Buckets + S3ForcePathStyle *bool + + // Set this to `true` to disable the SDK adding the `Expect: 100-Continue` + // header to PUT requests over 2MB of content. 100-Continue instructs the + // HTTP client not to send the body until the service responds with a + // `continue` status. This is useful to prevent sending the request body + // until after the request is authenticated, and validated. + // + // http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPUT.html + // + // 100-Continue is only enabled for Go 1.6 and above. See `http.Transport`'s + // `ExpectContinueTimeout` for information on adjusting the continue wait timeout. + // https://golang.org/pkg/net/http/#Transport + // + // You should use this flag to disble 100-Continue if you experiance issues + // with proxies or thrid party S3 compatible services. + S3Disable100Continue *bool + + // Set this to `true` to enable S3 Accelerate feature. For all operations compatible + // with S3 Accelerate will use the accelerate endpoint for requests. Requests not compatible + // will fall back to normal S3 requests. + // + // The bucket must be enable for accelerate to be used with S3 client with accelerate + // enabled. If the bucket is not enabled for accelerate an error will be returned. + // The bucket name must be DNS compatible to also work with accelerate. + S3UseAccelerate *bool + + // Set this to `true` to disable the EC2Metadata client from overriding the + // default http.Client's Timeout. This is helpful if you do not want the EC2Metadata + // client to create a new http.Client. This options is only meaningful if you're not + // already using a custom HTTP client with the SDK. Enabled by default. + // + // Must be set and provided to the session.New() in order to disable the EC2Metadata + // overriding the timeout for default credentials chain. + // + // Example: + // sess := session.New(aws.NewConfig().WithEC2MetadataDiableTimeoutOverride(true)) + // svc := s3.New(sess) + // + EC2MetadataDisableTimeoutOverride *bool + + // SleepDelay is an override for the func the SDK will call when sleeping + // during the lifecycle of a request. Specifically this will be used for + // request delays. This value should only be used for testing. To adjust + // the delay of a request see the aws/client.DefaultRetryer and + // aws/request.Retryer. + SleepDelay func(time.Duration) +} + +// NewConfig returns a new Config pointer that can be chained with builder methods to +// set multiple configuration values inline without using pointers. +// +// sess := session.New(aws.NewConfig().WithRegion("us-west-2").WithMaxRetries(10)) +// +func NewConfig() *Config { + return &Config{} +} + +// WithCredentialsChainVerboseErrors sets a config verbose errors boolean and returning +// a Config pointer. +func (c *Config) WithCredentialsChainVerboseErrors(verboseErrs bool) *Config { + c.CredentialsChainVerboseErrors = &verboseErrs + return c +} + +// WithCredentials sets a config Credentials value returning a Config pointer +// for chaining. +func (c *Config) WithCredentials(creds *credentials.Credentials) *Config { + c.Credentials = creds + return c +} + +// WithEndpoint sets a config Endpoint value returning a Config pointer for +// chaining. +func (c *Config) WithEndpoint(endpoint string) *Config { + c.Endpoint = &endpoint + return c +} + +// WithRegion sets a config Region value returning a Config pointer for +// chaining. +func (c *Config) WithRegion(region string) *Config { + c.Region = ®ion + return c +} + +// WithDisableSSL sets a config DisableSSL value returning a Config pointer +// for chaining. +func (c *Config) WithDisableSSL(disable bool) *Config { + c.DisableSSL = &disable + return c +} + +// WithHTTPClient sets a config HTTPClient value returning a Config pointer +// for chaining. +func (c *Config) WithHTTPClient(client *http.Client) *Config { + c.HTTPClient = client + return c +} + +// WithMaxRetries sets a config MaxRetries value returning a Config pointer +// for chaining. +func (c *Config) WithMaxRetries(max int) *Config { + c.MaxRetries = &max + return c +} + +// WithDisableParamValidation sets a config DisableParamValidation value +// returning a Config pointer for chaining. +func (c *Config) WithDisableParamValidation(disable bool) *Config { + c.DisableParamValidation = &disable + return c +} + +// WithDisableComputeChecksums sets a config DisableComputeChecksums value +// returning a Config pointer for chaining. +func (c *Config) WithDisableComputeChecksums(disable bool) *Config { + c.DisableComputeChecksums = &disable + return c +} + +// WithLogLevel sets a config LogLevel value returning a Config pointer for +// chaining. +func (c *Config) WithLogLevel(level LogLevelType) *Config { + c.LogLevel = &level + return c +} + +// WithLogger sets a config Logger value returning a Config pointer for +// chaining. +func (c *Config) WithLogger(logger Logger) *Config { + c.Logger = logger + return c +} + +// WithS3ForcePathStyle sets a config S3ForcePathStyle value returning a Config +// pointer for chaining. +func (c *Config) WithS3ForcePathStyle(force bool) *Config { + c.S3ForcePathStyle = &force + return c +} + +// WithS3Disable100Continue sets a config S3Disable100Continue value returning +// a Config pointer for chaining. +func (c *Config) WithS3Disable100Continue(disable bool) *Config { + c.S3Disable100Continue = &disable + return c +} + +// WithS3UseAccelerate sets a config S3UseAccelerate value returning a Config +// pointer for chaining. +func (c *Config) WithS3UseAccelerate(enable bool) *Config { + c.S3UseAccelerate = &enable + return c +} + +// WithEC2MetadataDisableTimeoutOverride sets a config EC2MetadataDisableTimeoutOverride value +// returning a Config pointer for chaining. +func (c *Config) WithEC2MetadataDisableTimeoutOverride(enable bool) *Config { + c.EC2MetadataDisableTimeoutOverride = &enable + return c +} + +// WithSleepDelay overrides the function used to sleep while waiting for the +// next retry. Defaults to time.Sleep. +func (c *Config) WithSleepDelay(fn func(time.Duration)) *Config { + c.SleepDelay = fn + return c +} + +// MergeIn merges the passed in configs into the existing config object. +func (c *Config) MergeIn(cfgs ...*Config) { + for _, other := range cfgs { + mergeInConfig(c, other) + } +} + +func mergeInConfig(dst *Config, other *Config) { + if other == nil { + return + } + + if other.CredentialsChainVerboseErrors != nil { + dst.CredentialsChainVerboseErrors = other.CredentialsChainVerboseErrors + } + + if other.Credentials != nil { + dst.Credentials = other.Credentials + } + + if other.Endpoint != nil { + dst.Endpoint = other.Endpoint + } + + if other.Region != nil { + dst.Region = other.Region + } + + if other.DisableSSL != nil { + dst.DisableSSL = other.DisableSSL + } + + if other.HTTPClient != nil { + dst.HTTPClient = other.HTTPClient + } + + if other.LogLevel != nil { + dst.LogLevel = other.LogLevel + } + + if other.Logger != nil { + dst.Logger = other.Logger + } + + if other.MaxRetries != nil { + dst.MaxRetries = other.MaxRetries + } + + if other.Retryer != nil { + dst.Retryer = other.Retryer + } + + if other.DisableParamValidation != nil { + dst.DisableParamValidation = other.DisableParamValidation + } + + if other.DisableComputeChecksums != nil { + dst.DisableComputeChecksums = other.DisableComputeChecksums + } + + if other.S3ForcePathStyle != nil { + dst.S3ForcePathStyle = other.S3ForcePathStyle + } + + if other.S3Disable100Continue != nil { + dst.S3Disable100Continue = other.S3Disable100Continue + } + + if other.S3UseAccelerate != nil { + dst.S3UseAccelerate = other.S3UseAccelerate + } + + if other.EC2MetadataDisableTimeoutOverride != nil { + dst.EC2MetadataDisableTimeoutOverride = other.EC2MetadataDisableTimeoutOverride + } + + if other.SleepDelay != nil { + dst.SleepDelay = other.SleepDelay + } +} + +// Copy will return a shallow copy of the Config object. If any additional +// configurations are provided they will be merged into the new config returned. +func (c *Config) Copy(cfgs ...*Config) *Config { + dst := &Config{} + dst.MergeIn(c) + + for _, cfg := range cfgs { + dst.MergeIn(cfg) + } + + return dst +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/config_test.go b/vendor/github.com/aws/aws-sdk-go/aws/config_test.go new file mode 100644 index 000000000..fe97a31fc --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/config_test.go @@ -0,0 +1,86 @@ +package aws + +import ( + "net/http" + "reflect" + "testing" + + "github.com/aws/aws-sdk-go/aws/credentials" +) + +var testCredentials = credentials.NewStaticCredentials("AKID", "SECRET", "SESSION") + +var copyTestConfig = Config{ + Credentials: testCredentials, + Endpoint: String("CopyTestEndpoint"), + Region: String("COPY_TEST_AWS_REGION"), + DisableSSL: Bool(true), + HTTPClient: http.DefaultClient, + LogLevel: LogLevel(LogDebug), + Logger: NewDefaultLogger(), + MaxRetries: Int(3), + DisableParamValidation: Bool(true), + DisableComputeChecksums: Bool(true), + S3ForcePathStyle: Bool(true), +} + +func TestCopy(t *testing.T) { + want := copyTestConfig + got := copyTestConfig.Copy() + if !reflect.DeepEqual(*got, want) { + t.Errorf("Copy() = %+v", got) + t.Errorf(" want %+v", want) + } + + got.Region = String("other") + if got.Region == want.Region { + t.Errorf("Expect setting copy values not not reflect in source") + } +} + +func TestCopyReturnsNewInstance(t *testing.T) { + want := copyTestConfig + got := copyTestConfig.Copy() + if got == &want { + t.Errorf("Copy() = %p; want different instance as source %p", got, &want) + } +} + +var mergeTestZeroValueConfig = Config{} + +var mergeTestConfig = Config{ + Credentials: testCredentials, + Endpoint: String("MergeTestEndpoint"), + Region: String("MERGE_TEST_AWS_REGION"), + DisableSSL: Bool(true), + HTTPClient: http.DefaultClient, + LogLevel: LogLevel(LogDebug), + Logger: NewDefaultLogger(), + MaxRetries: Int(10), + DisableParamValidation: Bool(true), + DisableComputeChecksums: Bool(true), + S3ForcePathStyle: Bool(true), +} + +var mergeTests = []struct { + cfg *Config + in *Config + want *Config +}{ + {&Config{}, nil, &Config{}}, + {&Config{}, &mergeTestZeroValueConfig, &Config{}}, + {&Config{}, &mergeTestConfig, &mergeTestConfig}, +} + +func TestMerge(t *testing.T) { + for i, tt := range mergeTests { + got := tt.cfg.Copy() + got.MergeIn(tt.in) + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("Config %d %+v", i, tt.cfg) + t.Errorf(" Merge(%+v)", tt.in) + t.Errorf(" got %+v", got) + t.Errorf(" want %+v", tt.want) + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go b/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go new file mode 100644 index 000000000..3b73a7da7 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go @@ -0,0 +1,369 @@ +package aws + +import "time" + +// String returns a pointer to the string value passed in. +func String(v string) *string { + return &v +} + +// StringValue returns the value of the string pointer passed in or +// "" if the pointer is nil. +func StringValue(v *string) string { + if v != nil { + return *v + } + return "" +} + +// StringSlice converts a slice of string values into a slice of +// string pointers +func StringSlice(src []string) []*string { + dst := make([]*string, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// StringValueSlice converts a slice of string pointers into a slice of +// string values +func StringValueSlice(src []*string) []string { + dst := make([]string, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// StringMap converts a string map of string values into a string +// map of string pointers +func StringMap(src map[string]string) map[string]*string { + dst := make(map[string]*string) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// StringValueMap converts a string map of string pointers into a string +// map of string values +func StringValueMap(src map[string]*string) map[string]string { + dst := make(map[string]string) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Bool returns a pointer to the bool value passed in. +func Bool(v bool) *bool { + return &v +} + +// BoolValue returns the value of the bool pointer passed in or +// false if the pointer is nil. +func BoolValue(v *bool) bool { + if v != nil { + return *v + } + return false +} + +// BoolSlice converts a slice of bool values into a slice of +// bool pointers +func BoolSlice(src []bool) []*bool { + dst := make([]*bool, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// BoolValueSlice converts a slice of bool pointers into a slice of +// bool values +func BoolValueSlice(src []*bool) []bool { + dst := make([]bool, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// BoolMap converts a string map of bool values into a string +// map of bool pointers +func BoolMap(src map[string]bool) map[string]*bool { + dst := make(map[string]*bool) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// BoolValueMap converts a string map of bool pointers into a string +// map of bool values +func BoolValueMap(src map[string]*bool) map[string]bool { + dst := make(map[string]bool) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Int returns a pointer to the int value passed in. +func Int(v int) *int { + return &v +} + +// IntValue returns the value of the int pointer passed in or +// 0 if the pointer is nil. +func IntValue(v *int) int { + if v != nil { + return *v + } + return 0 +} + +// IntSlice converts a slice of int values into a slice of +// int pointers +func IntSlice(src []int) []*int { + dst := make([]*int, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// IntValueSlice converts a slice of int pointers into a slice of +// int values +func IntValueSlice(src []*int) []int { + dst := make([]int, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// IntMap converts a string map of int values into a string +// map of int pointers +func IntMap(src map[string]int) map[string]*int { + dst := make(map[string]*int) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// IntValueMap converts a string map of int pointers into a string +// map of int values +func IntValueMap(src map[string]*int) map[string]int { + dst := make(map[string]int) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Int64 returns a pointer to the int64 value passed in. +func Int64(v int64) *int64 { + return &v +} + +// Int64Value returns the value of the int64 pointer passed in or +// 0 if the pointer is nil. +func Int64Value(v *int64) int64 { + if v != nil { + return *v + } + return 0 +} + +// Int64Slice converts a slice of int64 values into a slice of +// int64 pointers +func Int64Slice(src []int64) []*int64 { + dst := make([]*int64, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Int64ValueSlice converts a slice of int64 pointers into a slice of +// int64 values +func Int64ValueSlice(src []*int64) []int64 { + dst := make([]int64, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Int64Map converts a string map of int64 values into a string +// map of int64 pointers +func Int64Map(src map[string]int64) map[string]*int64 { + dst := make(map[string]*int64) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Int64ValueMap converts a string map of int64 pointers into a string +// map of int64 values +func Int64ValueMap(src map[string]*int64) map[string]int64 { + dst := make(map[string]int64) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Float64 returns a pointer to the float64 value passed in. +func Float64(v float64) *float64 { + return &v +} + +// Float64Value returns the value of the float64 pointer passed in or +// 0 if the pointer is nil. +func Float64Value(v *float64) float64 { + if v != nil { + return *v + } + return 0 +} + +// Float64Slice converts a slice of float64 values into a slice of +// float64 pointers +func Float64Slice(src []float64) []*float64 { + dst := make([]*float64, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Float64ValueSlice converts a slice of float64 pointers into a slice of +// float64 values +func Float64ValueSlice(src []*float64) []float64 { + dst := make([]float64, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Float64Map converts a string map of float64 values into a string +// map of float64 pointers +func Float64Map(src map[string]float64) map[string]*float64 { + dst := make(map[string]*float64) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Float64ValueMap converts a string map of float64 pointers into a string +// map of float64 values +func Float64ValueMap(src map[string]*float64) map[string]float64 { + dst := make(map[string]float64) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Time returns a pointer to the time.Time value passed in. +func Time(v time.Time) *time.Time { + return &v +} + +// TimeValue returns the value of the time.Time pointer passed in or +// time.Time{} if the pointer is nil. +func TimeValue(v *time.Time) time.Time { + if v != nil { + return *v + } + return time.Time{} +} + +// TimeUnixMilli returns a Unix timestamp in milliseconds from "January 1, 1970 UTC". +// The result is undefined if the Unix time cannot be represented by an int64. +// Which includes calling TimeUnixMilli on a zero Time is undefined. +// +// This utility is useful for service API's such as CloudWatch Logs which require +// their unix time values to be in milliseconds. +// +// See Go stdlib https://golang.org/pkg/time/#Time.UnixNano for more information. +func TimeUnixMilli(t time.Time) int64 { + return t.UnixNano() / int64(time.Millisecond/time.Nanosecond) +} + +// TimeSlice converts a slice of time.Time values into a slice of +// time.Time pointers +func TimeSlice(src []time.Time) []*time.Time { + dst := make([]*time.Time, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// TimeValueSlice converts a slice of time.Time pointers into a slice of +// time.Time values +func TimeValueSlice(src []*time.Time) []time.Time { + dst := make([]time.Time, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// TimeMap converts a string map of time.Time values into a string +// map of time.Time pointers +func TimeMap(src map[string]time.Time) map[string]*time.Time { + dst := make(map[string]*time.Time) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// TimeValueMap converts a string map of time.Time pointers into a string +// map of time.Time values +func TimeValueMap(src map[string]*time.Time) map[string]time.Time { + dst := make(map[string]time.Time) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/convert_types_test.go b/vendor/github.com/aws/aws-sdk-go/aws/convert_types_test.go new file mode 100644 index 000000000..df7a3e5d2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/convert_types_test.go @@ -0,0 +1,437 @@ +package aws + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +var testCasesStringSlice = [][]string{ + {"a", "b", "c", "d", "e"}, + {"a", "b", "", "", "e"}, +} + +func TestStringSlice(t *testing.T) { + for idx, in := range testCasesStringSlice { + if in == nil { + continue + } + out := StringSlice(in) + assert.Len(t, out, len(in), "Unexpected len at idx %d", idx) + for i := range out { + assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx) + } + + out2 := StringValueSlice(out) + assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx) + assert.Equal(t, in, out2, "Unexpected value at idx %d", idx) + } +} + +var testCasesStringValueSlice = [][]*string{ + {String("a"), String("b"), nil, String("c")}, +} + +func TestStringValueSlice(t *testing.T) { + for idx, in := range testCasesStringValueSlice { + if in == nil { + continue + } + out := StringValueSlice(in) + assert.Len(t, out, len(in), "Unexpected len at idx %d", idx) + for i := range out { + if in[i] == nil { + assert.Empty(t, out[i], "Unexpected value at idx %d", idx) + } else { + assert.Equal(t, *(in[i]), out[i], "Unexpected value at idx %d", idx) + } + } + + out2 := StringSlice(out) + assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx) + for i := range out2 { + if in[i] == nil { + assert.Empty(t, *(out2[i]), "Unexpected value at idx %d", idx) + } else { + assert.Equal(t, in[i], out2[i], "Unexpected value at idx %d", idx) + } + } + } +} + +var testCasesStringMap = []map[string]string{ + {"a": "1", "b": "2", "c": "3"}, +} + +func TestStringMap(t *testing.T) { + for idx, in := range testCasesStringMap { + if in == nil { + continue + } + out := StringMap(in) + assert.Len(t, out, len(in), "Unexpected len at idx %d", idx) + for i := range out { + assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx) + } + + out2 := StringValueMap(out) + assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx) + assert.Equal(t, in, out2, "Unexpected value at idx %d", idx) + } +} + +var testCasesBoolSlice = [][]bool{ + {true, true, false, false}, +} + +func TestBoolSlice(t *testing.T) { + for idx, in := range testCasesBoolSlice { + if in == nil { + continue + } + out := BoolSlice(in) + assert.Len(t, out, len(in), "Unexpected len at idx %d", idx) + for i := range out { + assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx) + } + + out2 := BoolValueSlice(out) + assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx) + assert.Equal(t, in, out2, "Unexpected value at idx %d", idx) + } +} + +var testCasesBoolValueSlice = [][]*bool{} + +func TestBoolValueSlice(t *testing.T) { + for idx, in := range testCasesBoolValueSlice { + if in == nil { + continue + } + out := BoolValueSlice(in) + assert.Len(t, out, len(in), "Unexpected len at idx %d", idx) + for i := range out { + if in[i] == nil { + assert.Empty(t, out[i], "Unexpected value at idx %d", idx) + } else { + assert.Equal(t, *(in[i]), out[i], "Unexpected value at idx %d", idx) + } + } + + out2 := BoolSlice(out) + assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx) + for i := range out2 { + if in[i] == nil { + assert.Empty(t, *(out2[i]), "Unexpected value at idx %d", idx) + } else { + assert.Equal(t, in[i], out2[i], "Unexpected value at idx %d", idx) + } + } + } +} + +var testCasesBoolMap = []map[string]bool{ + {"a": true, "b": false, "c": true}, +} + +func TestBoolMap(t *testing.T) { + for idx, in := range testCasesBoolMap { + if in == nil { + continue + } + out := BoolMap(in) + assert.Len(t, out, len(in), "Unexpected len at idx %d", idx) + for i := range out { + assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx) + } + + out2 := BoolValueMap(out) + assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx) + assert.Equal(t, in, out2, "Unexpected value at idx %d", idx) + } +} + +var testCasesIntSlice = [][]int{ + {1, 2, 3, 4}, +} + +func TestIntSlice(t *testing.T) { + for idx, in := range testCasesIntSlice { + if in == nil { + continue + } + out := IntSlice(in) + assert.Len(t, out, len(in), "Unexpected len at idx %d", idx) + for i := range out { + assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx) + } + + out2 := IntValueSlice(out) + assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx) + assert.Equal(t, in, out2, "Unexpected value at idx %d", idx) + } +} + +var testCasesIntValueSlice = [][]*int{} + +func TestIntValueSlice(t *testing.T) { + for idx, in := range testCasesIntValueSlice { + if in == nil { + continue + } + out := IntValueSlice(in) + assert.Len(t, out, len(in), "Unexpected len at idx %d", idx) + for i := range out { + if in[i] == nil { + assert.Empty(t, out[i], "Unexpected value at idx %d", idx) + } else { + assert.Equal(t, *(in[i]), out[i], "Unexpected value at idx %d", idx) + } + } + + out2 := IntSlice(out) + assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx) + for i := range out2 { + if in[i] == nil { + assert.Empty(t, *(out2[i]), "Unexpected value at idx %d", idx) + } else { + assert.Equal(t, in[i], out2[i], "Unexpected value at idx %d", idx) + } + } + } +} + +var testCasesIntMap = []map[string]int{ + {"a": 3, "b": 2, "c": 1}, +} + +func TestIntMap(t *testing.T) { + for idx, in := range testCasesIntMap { + if in == nil { + continue + } + out := IntMap(in) + assert.Len(t, out, len(in), "Unexpected len at idx %d", idx) + for i := range out { + assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx) + } + + out2 := IntValueMap(out) + assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx) + assert.Equal(t, in, out2, "Unexpected value at idx %d", idx) + } +} + +var testCasesInt64Slice = [][]int64{ + {1, 2, 3, 4}, +} + +func TestInt64Slice(t *testing.T) { + for idx, in := range testCasesInt64Slice { + if in == nil { + continue + } + out := Int64Slice(in) + assert.Len(t, out, len(in), "Unexpected len at idx %d", idx) + for i := range out { + assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx) + } + + out2 := Int64ValueSlice(out) + assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx) + assert.Equal(t, in, out2, "Unexpected value at idx %d", idx) + } +} + +var testCasesInt64ValueSlice = [][]*int64{} + +func TestInt64ValueSlice(t *testing.T) { + for idx, in := range testCasesInt64ValueSlice { + if in == nil { + continue + } + out := Int64ValueSlice(in) + assert.Len(t, out, len(in), "Unexpected len at idx %d", idx) + for i := range out { + if in[i] == nil { + assert.Empty(t, out[i], "Unexpected value at idx %d", idx) + } else { + assert.Equal(t, *(in[i]), out[i], "Unexpected value at idx %d", idx) + } + } + + out2 := Int64Slice(out) + assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx) + for i := range out2 { + if in[i] == nil { + assert.Empty(t, *(out2[i]), "Unexpected value at idx %d", idx) + } else { + assert.Equal(t, in[i], out2[i], "Unexpected value at idx %d", idx) + } + } + } +} + +var testCasesInt64Map = []map[string]int64{ + {"a": 3, "b": 2, "c": 1}, +} + +func TestInt64Map(t *testing.T) { + for idx, in := range testCasesInt64Map { + if in == nil { + continue + } + out := Int64Map(in) + assert.Len(t, out, len(in), "Unexpected len at idx %d", idx) + for i := range out { + assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx) + } + + out2 := Int64ValueMap(out) + assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx) + assert.Equal(t, in, out2, "Unexpected value at idx %d", idx) + } +} + +var testCasesFloat64Slice = [][]float64{ + {1, 2, 3, 4}, +} + +func TestFloat64Slice(t *testing.T) { + for idx, in := range testCasesFloat64Slice { + if in == nil { + continue + } + out := Float64Slice(in) + assert.Len(t, out, len(in), "Unexpected len at idx %d", idx) + for i := range out { + assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx) + } + + out2 := Float64ValueSlice(out) + assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx) + assert.Equal(t, in, out2, "Unexpected value at idx %d", idx) + } +} + +var testCasesFloat64ValueSlice = [][]*float64{} + +func TestFloat64ValueSlice(t *testing.T) { + for idx, in := range testCasesFloat64ValueSlice { + if in == nil { + continue + } + out := Float64ValueSlice(in) + assert.Len(t, out, len(in), "Unexpected len at idx %d", idx) + for i := range out { + if in[i] == nil { + assert.Empty(t, out[i], "Unexpected value at idx %d", idx) + } else { + assert.Equal(t, *(in[i]), out[i], "Unexpected value at idx %d", idx) + } + } + + out2 := Float64Slice(out) + assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx) + for i := range out2 { + if in[i] == nil { + assert.Empty(t, *(out2[i]), "Unexpected value at idx %d", idx) + } else { + assert.Equal(t, in[i], out2[i], "Unexpected value at idx %d", idx) + } + } + } +} + +var testCasesFloat64Map = []map[string]float64{ + {"a": 3, "b": 2, "c": 1}, +} + +func TestFloat64Map(t *testing.T) { + for idx, in := range testCasesFloat64Map { + if in == nil { + continue + } + out := Float64Map(in) + assert.Len(t, out, len(in), "Unexpected len at idx %d", idx) + for i := range out { + assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx) + } + + out2 := Float64ValueMap(out) + assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx) + assert.Equal(t, in, out2, "Unexpected value at idx %d", idx) + } +} + +var testCasesTimeSlice = [][]time.Time{ + {time.Now(), time.Now().AddDate(100, 0, 0)}, +} + +func TestTimeSlice(t *testing.T) { + for idx, in := range testCasesTimeSlice { + if in == nil { + continue + } + out := TimeSlice(in) + assert.Len(t, out, len(in), "Unexpected len at idx %d", idx) + for i := range out { + assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx) + } + + out2 := TimeValueSlice(out) + assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx) + assert.Equal(t, in, out2, "Unexpected value at idx %d", idx) + } +} + +var testCasesTimeValueSlice = [][]*time.Time{} + +func TestTimeValueSlice(t *testing.T) { + for idx, in := range testCasesTimeValueSlice { + if in == nil { + continue + } + out := TimeValueSlice(in) + assert.Len(t, out, len(in), "Unexpected len at idx %d", idx) + for i := range out { + if in[i] == nil { + assert.Empty(t, out[i], "Unexpected value at idx %d", idx) + } else { + assert.Equal(t, *(in[i]), out[i], "Unexpected value at idx %d", idx) + } + } + + out2 := TimeSlice(out) + assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx) + for i := range out2 { + if in[i] == nil { + assert.Empty(t, *(out2[i]), "Unexpected value at idx %d", idx) + } else { + assert.Equal(t, in[i], out2[i], "Unexpected value at idx %d", idx) + } + } + } +} + +var testCasesTimeMap = []map[string]time.Time{ + {"a": time.Now().AddDate(-100, 0, 0), "b": time.Now()}, +} + +func TestTimeMap(t *testing.T) { + for idx, in := range testCasesTimeMap { + if in == nil { + continue + } + out := TimeMap(in) + assert.Len(t, out, len(in), "Unexpected len at idx %d", idx) + for i := range out { + assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx) + } + + out2 := TimeValueMap(out) + assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx) + assert.Equal(t, in, out2, "Unexpected value at idx %d", idx) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go new file mode 100644 index 000000000..8456e29b5 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go @@ -0,0 +1,152 @@ +package corehandlers + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "regexp" + "runtime" + "strconv" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" +) + +// Interface for matching types which also have a Len method. +type lener interface { + Len() int +} + +// BuildContentLengthHandler builds the content length of a request based on the body, +// or will use the HTTPRequest.Header's "Content-Length" if defined. If unable +// to determine request body length and no "Content-Length" was specified it will panic. +// +// The Content-Length will only be aded to the request if the length of the body +// is greater than 0. If the body is empty or the current `Content-Length` +// header is <= 0, the header will also be stripped. +var BuildContentLengthHandler = request.NamedHandler{Name: "core.BuildContentLengthHandler", Fn: func(r *request.Request) { + var length int64 + + if slength := r.HTTPRequest.Header.Get("Content-Length"); slength != "" { + length, _ = strconv.ParseInt(slength, 10, 64) + } else { + switch body := r.Body.(type) { + case nil: + length = 0 + case lener: + length = int64(body.Len()) + case io.Seeker: + r.BodyStart, _ = body.Seek(0, 1) + end, _ := body.Seek(0, 2) + body.Seek(r.BodyStart, 0) // make sure to seek back to original location + length = end - r.BodyStart + default: + panic("Cannot get length of body, must provide `ContentLength`") + } + } + + if length > 0 { + r.HTTPRequest.ContentLength = length + r.HTTPRequest.Header.Set("Content-Length", fmt.Sprintf("%d", length)) + } else { + r.HTTPRequest.ContentLength = 0 + r.HTTPRequest.Header.Del("Content-Length") + } +}} + +// SDKVersionUserAgentHandler is a request handler for adding the SDK Version to the user agent. +var SDKVersionUserAgentHandler = request.NamedHandler{ + Name: "core.SDKVersionUserAgentHandler", + Fn: request.MakeAddToUserAgentHandler(aws.SDKName, aws.SDKVersion, + runtime.Version(), runtime.GOOS, runtime.GOARCH), +} + +var reStatusCode = regexp.MustCompile(`^(\d{3})`) + +// SendHandler is a request handler to send service request using HTTP client. +var SendHandler = request.NamedHandler{Name: "core.SendHandler", Fn: func(r *request.Request) { + var err error + r.HTTPResponse, err = r.Config.HTTPClient.Do(r.HTTPRequest) + if err != nil { + // Prevent leaking if an HTTPResponse was returned. Clean up + // the body. + if r.HTTPResponse != nil { + r.HTTPResponse.Body.Close() + } + // Capture the case where url.Error is returned for error processing + // response. e.g. 301 without location header comes back as string + // error and r.HTTPResponse is nil. Other url redirect errors will + // comeback in a similar method. + if e, ok := err.(*url.Error); ok && e.Err != nil { + if s := reStatusCode.FindStringSubmatch(e.Err.Error()); s != nil { + code, _ := strconv.ParseInt(s[1], 10, 64) + r.HTTPResponse = &http.Response{ + StatusCode: int(code), + Status: http.StatusText(int(code)), + Body: ioutil.NopCloser(bytes.NewReader([]byte{})), + } + return + } + } + if r.HTTPResponse == nil { + // Add a dummy request response object to ensure the HTTPResponse + // value is consistent. + r.HTTPResponse = &http.Response{ + StatusCode: int(0), + Status: http.StatusText(int(0)), + Body: ioutil.NopCloser(bytes.NewReader([]byte{})), + } + } + // Catch all other request errors. + r.Error = awserr.New("RequestError", "send request failed", err) + r.Retryable = aws.Bool(true) // network errors are retryable + } +}} + +// ValidateResponseHandler is a request handler to validate service response. +var ValidateResponseHandler = request.NamedHandler{Name: "core.ValidateResponseHandler", Fn: func(r *request.Request) { + if r.HTTPResponse.StatusCode == 0 || r.HTTPResponse.StatusCode >= 300 { + // this may be replaced by an UnmarshalError handler + r.Error = awserr.New("UnknownError", "unknown error", nil) + } +}} + +// AfterRetryHandler performs final checks to determine if the request should +// be retried and how long to delay. +var AfterRetryHandler = request.NamedHandler{Name: "core.AfterRetryHandler", Fn: func(r *request.Request) { + // If one of the other handlers already set the retry state + // we don't want to override it based on the service's state + if r.Retryable == nil { + r.Retryable = aws.Bool(r.ShouldRetry(r)) + } + + if r.WillRetry() { + r.RetryDelay = r.RetryRules(r) + r.Config.SleepDelay(r.RetryDelay) + + // when the expired token exception occurs the credentials + // need to be expired locally so that the next request to + // get credentials will trigger a credentials refresh. + if r.IsErrorExpired() { + r.Config.Credentials.Expire() + } + + r.RetryCount++ + r.Error = nil + } +}} + +// ValidateEndpointHandler is a request handler to validate a request had the +// appropriate Region and Endpoint set. Will set r.Error if the endpoint or +// region is not valid. +var ValidateEndpointHandler = request.NamedHandler{Name: "core.ValidateEndpointHandler", Fn: func(r *request.Request) { + if r.ClientInfo.SigningRegion == "" && aws.StringValue(r.Config.Region) == "" { + r.Error = aws.ErrMissingRegion + } else if r.ClientInfo.Endpoint == "" { + r.Error = aws.ErrMissingEndpoint + } +}} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers_test.go b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers_test.go new file mode 100644 index 000000000..5b61a33b6 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers_test.go @@ -0,0 +1,192 @@ +package corehandlers_test + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "net/http/httptest" + "os" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/corehandlers" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/awstesting" + "github.com/aws/aws-sdk-go/awstesting/unit" + "github.com/aws/aws-sdk-go/service/s3" +) + +func TestValidateEndpointHandler(t *testing.T) { + os.Clearenv() + + svc := awstesting.NewClient(aws.NewConfig().WithRegion("us-west-2")) + svc.Handlers.Clear() + svc.Handlers.Validate.PushBackNamed(corehandlers.ValidateEndpointHandler) + + req := svc.NewRequest(&request.Operation{Name: "Operation"}, nil, nil) + err := req.Build() + + assert.NoError(t, err) +} + +func TestValidateEndpointHandlerErrorRegion(t *testing.T) { + os.Clearenv() + + svc := awstesting.NewClient() + svc.Handlers.Clear() + svc.Handlers.Validate.PushBackNamed(corehandlers.ValidateEndpointHandler) + + req := svc.NewRequest(&request.Operation{Name: "Operation"}, nil, nil) + err := req.Build() + + assert.Error(t, err) + assert.Equal(t, aws.ErrMissingRegion, err) +} + +type mockCredsProvider struct { + expired bool + retrieveCalled bool +} + +func (m *mockCredsProvider) Retrieve() (credentials.Value, error) { + m.retrieveCalled = true + return credentials.Value{ProviderName: "mockCredsProvider"}, nil +} + +func (m *mockCredsProvider) IsExpired() bool { + return m.expired +} + +func TestAfterRetryRefreshCreds(t *testing.T) { + os.Clearenv() + credProvider := &mockCredsProvider{} + + svc := awstesting.NewClient(&aws.Config{ + Credentials: credentials.NewCredentials(credProvider), + MaxRetries: aws.Int(1), + }) + + svc.Handlers.Clear() + svc.Handlers.ValidateResponse.PushBack(func(r *request.Request) { + r.Error = awserr.New("UnknownError", "", nil) + r.HTTPResponse = &http.Response{StatusCode: 400, Body: ioutil.NopCloser(bytes.NewBuffer([]byte{}))} + }) + svc.Handlers.UnmarshalError.PushBack(func(r *request.Request) { + r.Error = awserr.New("ExpiredTokenException", "", nil) + }) + svc.Handlers.AfterRetry.PushBackNamed(corehandlers.AfterRetryHandler) + + assert.True(t, svc.Config.Credentials.IsExpired(), "Expect to start out expired") + assert.False(t, credProvider.retrieveCalled) + + req := svc.NewRequest(&request.Operation{Name: "Operation"}, nil, nil) + req.Send() + + assert.True(t, svc.Config.Credentials.IsExpired()) + assert.False(t, credProvider.retrieveCalled) + + _, err := svc.Config.Credentials.Get() + assert.NoError(t, err) + assert.True(t, credProvider.retrieveCalled) +} + +type testSendHandlerTransport struct{} + +func (t *testSendHandlerTransport) RoundTrip(r *http.Request) (*http.Response, error) { + return nil, fmt.Errorf("mock error") +} + +func TestSendHandlerError(t *testing.T) { + svc := awstesting.NewClient(&aws.Config{ + HTTPClient: &http.Client{ + Transport: &testSendHandlerTransport{}, + }, + }) + svc.Handlers.Clear() + svc.Handlers.Send.PushBackNamed(corehandlers.SendHandler) + r := svc.NewRequest(&request.Operation{Name: "Operation"}, nil, nil) + + r.Send() + + assert.Error(t, r.Error) + assert.NotNil(t, r.HTTPResponse) +} + +func setupContentLengthTestServer(t *testing.T, hasContentLength bool, contentLength int64) *httptest.Server { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + _, ok := r.Header["Content-Length"] + assert.Equal(t, hasContentLength, ok, "expect content length to be set, %t", hasContentLength) + assert.Equal(t, contentLength, r.ContentLength) + + b, err := ioutil.ReadAll(r.Body) + assert.NoError(t, err) + r.Body.Close() + + authHeader := r.Header.Get("Authorization") + if hasContentLength { + assert.Contains(t, authHeader, "content-length") + } else { + assert.NotContains(t, authHeader, "content-length") + } + + assert.Equal(t, contentLength, int64(len(b))) + })) + + return server +} + +func TestBuildContentLength_ZeroBody(t *testing.T) { + server := setupContentLengthTestServer(t, false, 0) + + svc := s3.New(unit.Session, &aws.Config{ + Endpoint: aws.String(server.URL), + S3ForcePathStyle: aws.Bool(true), + DisableSSL: aws.Bool(true), + }) + _, err := svc.GetObject(&s3.GetObjectInput{ + Bucket: aws.String("bucketname"), + Key: aws.String("keyname"), + }) + + assert.NoError(t, err) +} + +func TestBuildContentLength_NegativeBody(t *testing.T) { + server := setupContentLengthTestServer(t, false, 0) + + svc := s3.New(unit.Session, &aws.Config{ + Endpoint: aws.String(server.URL), + S3ForcePathStyle: aws.Bool(true), + DisableSSL: aws.Bool(true), + }) + req, _ := svc.GetObjectRequest(&s3.GetObjectInput{ + Bucket: aws.String("bucketname"), + Key: aws.String("keyname"), + }) + + req.HTTPRequest.Header.Set("Content-Length", "-1") + + assert.NoError(t, req.Send()) +} + +func TestBuildContentLength_WithBody(t *testing.T) { + server := setupContentLengthTestServer(t, true, 1024) + + svc := s3.New(unit.Session, &aws.Config{ + Endpoint: aws.String(server.URL), + S3ForcePathStyle: aws.Bool(true), + DisableSSL: aws.Bool(true), + }) + _, err := svc.PutObject(&s3.PutObjectInput{ + Bucket: aws.String("bucketname"), + Key: aws.String("keyname"), + Body: bytes.NewReader(make([]byte, 1024)), + }) + + assert.NoError(t, err) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go new file mode 100644 index 000000000..7d50b1557 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go @@ -0,0 +1,17 @@ +package corehandlers + +import "github.com/aws/aws-sdk-go/aws/request" + +// ValidateParametersHandler is a request handler to validate the input parameters. +// Validating parameters only has meaning if done prior to the request being sent. +var ValidateParametersHandler = request.NamedHandler{Name: "core.ValidateParametersHandler", Fn: func(r *request.Request) { + if !r.ParamsFilled() { + return + } + + if v, ok := r.Params.(request.Validator); ok { + if err := v.Validate(); err != nil { + r.Error = err + } + } +}} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator_test.go b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator_test.go new file mode 100644 index 000000000..fb50a0bd8 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator_test.go @@ -0,0 +1,254 @@ +package corehandlers_test + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/corehandlers" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/kinesis" + "github.com/stretchr/testify/require" +) + +var testSvc = func() *client.Client { + s := &client.Client{ + Config: aws.Config{}, + ClientInfo: metadata.ClientInfo{ + ServiceName: "mock-service", + APIVersion: "2015-01-01", + }, + } + return s +}() + +type StructShape struct { + _ struct{} `type:"structure"` + + RequiredList []*ConditionalStructShape `required:"true"` + RequiredMap map[string]*ConditionalStructShape `required:"true"` + RequiredBool *bool `required:"true"` + OptionalStruct *ConditionalStructShape + + hiddenParameter *string +} + +func (s *StructShape) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StructShape"} + if s.RequiredList == nil { + invalidParams.Add(request.NewErrParamRequired("RequiredList")) + } + if s.RequiredMap == nil { + invalidParams.Add(request.NewErrParamRequired("RequiredMap")) + } + if s.RequiredBool == nil { + invalidParams.Add(request.NewErrParamRequired("RequiredBool")) + } + if s.RequiredList != nil { + for i, v := range s.RequiredList { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "RequiredList", i), err.(request.ErrInvalidParams)) + } + } + } + if s.RequiredMap != nil { + for i, v := range s.RequiredMap { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "RequiredMap", i), err.(request.ErrInvalidParams)) + } + } + } + if s.OptionalStruct != nil { + if err := s.OptionalStruct.Validate(); err != nil { + invalidParams.AddNested("OptionalStruct", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type ConditionalStructShape struct { + _ struct{} `type:"structure"` + + Name *string `required:"true"` +} + +func (s *ConditionalStructShape) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ConditionalStructShape"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +func TestNoErrors(t *testing.T) { + input := &StructShape{ + RequiredList: []*ConditionalStructShape{}, + RequiredMap: map[string]*ConditionalStructShape{ + "key1": {Name: aws.String("Name")}, + "key2": {Name: aws.String("Name")}, + }, + RequiredBool: aws.Bool(true), + OptionalStruct: &ConditionalStructShape{Name: aws.String("Name")}, + } + + req := testSvc.NewRequest(&request.Operation{}, input, nil) + corehandlers.ValidateParametersHandler.Fn(req) + require.NoError(t, req.Error) +} + +func TestMissingRequiredParameters(t *testing.T) { + input := &StructShape{} + req := testSvc.NewRequest(&request.Operation{}, input, nil) + corehandlers.ValidateParametersHandler.Fn(req) + + require.Error(t, req.Error) + assert.Equal(t, "InvalidParameter", req.Error.(awserr.Error).Code()) + assert.Equal(t, "3 validation error(s) found.", req.Error.(awserr.Error).Message()) + + errs := req.Error.(awserr.BatchedErrors).OrigErrs() + assert.Len(t, errs, 3) + assert.Equal(t, "ParamRequiredError: missing required field, StructShape.RequiredList.", errs[0].Error()) + assert.Equal(t, "ParamRequiredError: missing required field, StructShape.RequiredMap.", errs[1].Error()) + assert.Equal(t, "ParamRequiredError: missing required field, StructShape.RequiredBool.", errs[2].Error()) + + assert.Equal(t, "InvalidParameter: 3 validation error(s) found.\n- missing required field, StructShape.RequiredList.\n- missing required field, StructShape.RequiredMap.\n- missing required field, StructShape.RequiredBool.\n", req.Error.Error()) +} + +func TestNestedMissingRequiredParameters(t *testing.T) { + input := &StructShape{ + RequiredList: []*ConditionalStructShape{{}}, + RequiredMap: map[string]*ConditionalStructShape{ + "key1": {Name: aws.String("Name")}, + "key2": {}, + }, + RequiredBool: aws.Bool(true), + OptionalStruct: &ConditionalStructShape{}, + } + + req := testSvc.NewRequest(&request.Operation{}, input, nil) + corehandlers.ValidateParametersHandler.Fn(req) + + require.Error(t, req.Error) + assert.Equal(t, "InvalidParameter", req.Error.(awserr.Error).Code()) + assert.Equal(t, "3 validation error(s) found.", req.Error.(awserr.Error).Message()) + + errs := req.Error.(awserr.BatchedErrors).OrigErrs() + assert.Len(t, errs, 3) + assert.Equal(t, "ParamRequiredError: missing required field, StructShape.RequiredList[0].Name.", errs[0].Error()) + assert.Equal(t, "ParamRequiredError: missing required field, StructShape.RequiredMap[key2].Name.", errs[1].Error()) + assert.Equal(t, "ParamRequiredError: missing required field, StructShape.OptionalStruct.Name.", errs[2].Error()) +} + +type testInput struct { + StringField *string `min:"5"` + ListField []string `min:"3"` + MapField map[string]string `min:"4"` +} + +func (s testInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "testInput"} + if s.StringField != nil && len(*s.StringField) < 5 { + invalidParams.Add(request.NewErrParamMinLen("StringField", 5)) + } + if s.ListField != nil && len(s.ListField) < 3 { + invalidParams.Add(request.NewErrParamMinLen("ListField", 3)) + } + if s.MapField != nil && len(s.MapField) < 4 { + invalidParams.Add(request.NewErrParamMinLen("MapField", 4)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +var testsFieldMin = []struct { + err awserr.Error + in testInput +}{ + { + err: func() awserr.Error { + invalidParams := request.ErrInvalidParams{Context: "testInput"} + invalidParams.Add(request.NewErrParamMinLen("StringField", 5)) + return invalidParams + }(), + in: testInput{StringField: aws.String("abcd")}, + }, + { + err: func() awserr.Error { + invalidParams := request.ErrInvalidParams{Context: "testInput"} + invalidParams.Add(request.NewErrParamMinLen("StringField", 5)) + invalidParams.Add(request.NewErrParamMinLen("ListField", 3)) + return invalidParams + }(), + in: testInput{StringField: aws.String("abcd"), ListField: []string{"a", "b"}}, + }, + { + err: func() awserr.Error { + invalidParams := request.ErrInvalidParams{Context: "testInput"} + invalidParams.Add(request.NewErrParamMinLen("StringField", 5)) + invalidParams.Add(request.NewErrParamMinLen("ListField", 3)) + invalidParams.Add(request.NewErrParamMinLen("MapField", 4)) + return invalidParams + }(), + in: testInput{StringField: aws.String("abcd"), ListField: []string{"a", "b"}, MapField: map[string]string{"a": "a", "b": "b"}}, + }, + { + err: nil, + in: testInput{StringField: aws.String("abcde"), + ListField: []string{"a", "b", "c"}, MapField: map[string]string{"a": "a", "b": "b", "c": "c", "d": "d"}}, + }, +} + +func TestValidateFieldMinParameter(t *testing.T) { + for i, c := range testsFieldMin { + req := testSvc.NewRequest(&request.Operation{}, &c.in, nil) + corehandlers.ValidateParametersHandler.Fn(req) + + assert.Equal(t, c.err, req.Error, "%d case failed", i) + } +} + +func BenchmarkValidateAny(b *testing.B) { + input := &kinesis.PutRecordsInput{ + StreamName: aws.String("stream"), + } + for i := 0; i < 100; i++ { + record := &kinesis.PutRecordsRequestEntry{ + Data: make([]byte, 10000), + PartitionKey: aws.String("partition"), + } + input.Records = append(input.Records, record) + } + + req, _ := kinesis.New(session.New()).PutRecordsRequest(input) + + b.ResetTimer() + for i := 0; i < b.N; i++ { + corehandlers.ValidateParametersHandler.Fn(req) + if err := req.Error; err != nil { + b.Fatalf("validation failed: %v", err) + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go new file mode 100644 index 000000000..857311f64 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go @@ -0,0 +1,100 @@ +package credentials + +import ( + "github.com/aws/aws-sdk-go/aws/awserr" +) + +var ( + // ErrNoValidProvidersFoundInChain Is returned when there are no valid + // providers in the ChainProvider. + // + // This has been deprecated. For verbose error messaging set + // aws.Config.CredentialsChainVerboseErrors to true + // + // @readonly + ErrNoValidProvidersFoundInChain = awserr.New("NoCredentialProviders", + `no valid providers in chain. Deprecated. + For verbose messaging see aws.Config.CredentialsChainVerboseErrors`, + nil) +) + +// A ChainProvider will search for a provider which returns credentials +// and cache that provider until Retrieve is called again. +// +// The ChainProvider provides a way of chaining multiple providers together +// which will pick the first available using priority order of the Providers +// in the list. +// +// If none of the Providers retrieve valid credentials Value, ChainProvider's +// Retrieve() will return the error ErrNoValidProvidersFoundInChain. +// +// If a Provider is found which returns valid credentials Value ChainProvider +// will cache that Provider for all calls to IsExpired(), until Retrieve is +// called again. +// +// Example of ChainProvider to be used with an EnvProvider and EC2RoleProvider. +// In this example EnvProvider will first check if any credentials are available +// vai the environment variables. If there are none ChainProvider will check +// the next Provider in the list, EC2RoleProvider in this case. If EC2RoleProvider +// does not return any credentials ChainProvider will return the error +// ErrNoValidProvidersFoundInChain +// +// creds := NewChainCredentials( +// []Provider{ +// &EnvProvider{}, +// &EC2RoleProvider{ +// Client: ec2metadata.New(sess), +// }, +// }) +// +// // Usage of ChainCredentials with aws.Config +// svc := ec2.New(&aws.Config{Credentials: creds}) +// +type ChainProvider struct { + Providers []Provider + curr Provider + VerboseErrors bool +} + +// NewChainCredentials returns a pointer to a new Credentials object +// wrapping a chain of providers. +func NewChainCredentials(providers []Provider) *Credentials { + return NewCredentials(&ChainProvider{ + Providers: append([]Provider{}, providers...), + }) +} + +// Retrieve returns the credentials value or error if no provider returned +// without error. +// +// If a provider is found it will be cached and any calls to IsExpired() +// will return the expired state of the cached provider. +func (c *ChainProvider) Retrieve() (Value, error) { + var errs []error + for _, p := range c.Providers { + creds, err := p.Retrieve() + if err == nil { + c.curr = p + return creds, nil + } + errs = append(errs, err) + } + c.curr = nil + + var err error + err = ErrNoValidProvidersFoundInChain + if c.VerboseErrors { + err = awserr.NewBatchError("NoCredentialProviders", "no valid providers in chain", errs) + } + return Value{}, err +} + +// IsExpired will returned the expired state of the currently cached provider +// if there is one. If there is no current provider, true will be returned. +func (c *ChainProvider) IsExpired() bool { + if c.curr != nil { + return c.curr.IsExpired() + } + + return true +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider_test.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider_test.go new file mode 100644 index 000000000..3b393a2ed --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider_test.go @@ -0,0 +1,154 @@ +package credentials + +import ( + "testing" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/stretchr/testify/assert" +) + +type secondStubProvider struct { + creds Value + expired bool + err error +} + +func (s *secondStubProvider) Retrieve() (Value, error) { + s.expired = false + s.creds.ProviderName = "secondStubProvider" + return s.creds, s.err +} +func (s *secondStubProvider) IsExpired() bool { + return s.expired +} + +func TestChainProviderWithNames(t *testing.T) { + p := &ChainProvider{ + Providers: []Provider{ + &stubProvider{err: awserr.New("FirstError", "first provider error", nil)}, + &stubProvider{err: awserr.New("SecondError", "second provider error", nil)}, + &secondStubProvider{ + creds: Value{ + AccessKeyID: "AKIF", + SecretAccessKey: "NOSECRET", + SessionToken: "", + }, + }, + &stubProvider{ + creds: Value{ + AccessKeyID: "AKID", + SecretAccessKey: "SECRET", + SessionToken: "", + }, + }, + }, + } + + creds, err := p.Retrieve() + assert.Nil(t, err, "Expect no error") + assert.Equal(t, "secondStubProvider", creds.ProviderName, "Expect provider name to match") + + // Also check credentials + assert.Equal(t, "AKIF", creds.AccessKeyID, "Expect access key ID to match") + assert.Equal(t, "NOSECRET", creds.SecretAccessKey, "Expect secret access key to match") + assert.Empty(t, creds.SessionToken, "Expect session token to be empty") + +} + +func TestChainProviderGet(t *testing.T) { + p := &ChainProvider{ + Providers: []Provider{ + &stubProvider{err: awserr.New("FirstError", "first provider error", nil)}, + &stubProvider{err: awserr.New("SecondError", "second provider error", nil)}, + &stubProvider{ + creds: Value{ + AccessKeyID: "AKID", + SecretAccessKey: "SECRET", + SessionToken: "", + }, + }, + }, + } + + creds, err := p.Retrieve() + assert.Nil(t, err, "Expect no error") + assert.Equal(t, "AKID", creds.AccessKeyID, "Expect access key ID to match") + assert.Equal(t, "SECRET", creds.SecretAccessKey, "Expect secret access key to match") + assert.Empty(t, creds.SessionToken, "Expect session token to be empty") +} + +func TestChainProviderIsExpired(t *testing.T) { + stubProvider := &stubProvider{expired: true} + p := &ChainProvider{ + Providers: []Provider{ + stubProvider, + }, + } + + assert.True(t, p.IsExpired(), "Expect expired to be true before any Retrieve") + _, err := p.Retrieve() + assert.Nil(t, err, "Expect no error") + assert.False(t, p.IsExpired(), "Expect not expired after retrieve") + + stubProvider.expired = true + assert.True(t, p.IsExpired(), "Expect return of expired provider") + + _, err = p.Retrieve() + assert.False(t, p.IsExpired(), "Expect not expired after retrieve") +} + +func TestChainProviderWithNoProvider(t *testing.T) { + p := &ChainProvider{ + Providers: []Provider{}, + } + + assert.True(t, p.IsExpired(), "Expect expired with no providers") + _, err := p.Retrieve() + assert.Equal(t, + ErrNoValidProvidersFoundInChain, + err, + "Expect no providers error returned") +} + +func TestChainProviderWithNoValidProvider(t *testing.T) { + errs := []error{ + awserr.New("FirstError", "first provider error", nil), + awserr.New("SecondError", "second provider error", nil), + } + p := &ChainProvider{ + Providers: []Provider{ + &stubProvider{err: errs[0]}, + &stubProvider{err: errs[1]}, + }, + } + + assert.True(t, p.IsExpired(), "Expect expired with no providers") + _, err := p.Retrieve() + + assert.Equal(t, + ErrNoValidProvidersFoundInChain, + err, + "Expect no providers error returned") +} + +func TestChainProviderWithNoValidProviderWithVerboseEnabled(t *testing.T) { + errs := []error{ + awserr.New("FirstError", "first provider error", nil), + awserr.New("SecondError", "second provider error", nil), + } + p := &ChainProvider{ + VerboseErrors: true, + Providers: []Provider{ + &stubProvider{err: errs[0]}, + &stubProvider{err: errs[1]}, + }, + } + + assert.True(t, p.IsExpired(), "Expect expired with no providers") + _, err := p.Retrieve() + + assert.Equal(t, + awserr.NewBatchError("NoCredentialProviders", "no valid providers in chain", errs), + err, + "Expect no providers error returned") +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go new file mode 100644 index 000000000..7b8ebf5f9 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go @@ -0,0 +1,223 @@ +// Package credentials provides credential retrieval and management +// +// The Credentials is the primary method of getting access to and managing +// credentials Values. Using dependency injection retrieval of the credential +// values is handled by a object which satisfies the Provider interface. +// +// By default the Credentials.Get() will cache the successful result of a +// Provider's Retrieve() until Provider.IsExpired() returns true. At which +// point Credentials will call Provider's Retrieve() to get new credential Value. +// +// The Provider is responsible for determining when credentials Value have expired. +// It is also important to note that Credentials will always call Retrieve the +// first time Credentials.Get() is called. +// +// Example of using the environment variable credentials. +// +// creds := NewEnvCredentials() +// +// // Retrieve the credentials value +// credValue, err := creds.Get() +// if err != nil { +// // handle error +// } +// +// Example of forcing credentials to expire and be refreshed on the next Get(). +// This may be helpful to proactively expire credentials and refresh them sooner +// than they would naturally expire on their own. +// +// creds := NewCredentials(&EC2RoleProvider{}) +// creds.Expire() +// credsValue, err := creds.Get() +// // New credentials will be retrieved instead of from cache. +// +// +// Custom Provider +// +// Each Provider built into this package also provides a helper method to generate +// a Credentials pointer setup with the provider. To use a custom Provider just +// create a type which satisfies the Provider interface and pass it to the +// NewCredentials method. +// +// type MyProvider struct{} +// func (m *MyProvider) Retrieve() (Value, error) {...} +// func (m *MyProvider) IsExpired() bool {...} +// +// creds := NewCredentials(&MyProvider{}) +// credValue, err := creds.Get() +// +package credentials + +import ( + "sync" + "time" +) + +// AnonymousCredentials is an empty Credential object that can be used as +// dummy placeholder credentials for requests that do not need signed. +// +// This Credentials can be used to configure a service to not sign requests +// when making service API calls. For example, when accessing public +// s3 buckets. +// +// svc := s3.New(&aws.Config{Credentials: AnonymousCredentials}) +// // Access public S3 buckets. +// +// @readonly +var AnonymousCredentials = NewStaticCredentials("", "", "") + +// A Value is the AWS credentials value for individual credential fields. +type Value struct { + // AWS Access key ID + AccessKeyID string + + // AWS Secret Access Key + SecretAccessKey string + + // AWS Session Token + SessionToken string + + // Provider used to get credentials + ProviderName string +} + +// A Provider is the interface for any component which will provide credentials +// Value. A provider is required to manage its own Expired state, and what to +// be expired means. +// +// The Provider should not need to implement its own mutexes, because +// that will be managed by Credentials. +type Provider interface { + // Refresh returns nil if it successfully retrieved the value. + // Error is returned if the value were not obtainable, or empty. + Retrieve() (Value, error) + + // IsExpired returns if the credentials are no longer valid, and need + // to be retrieved. + IsExpired() bool +} + +// A Expiry provides shared expiration logic to be used by credentials +// providers to implement expiry functionality. +// +// The best method to use this struct is as an anonymous field within the +// provider's struct. +// +// Example: +// type EC2RoleProvider struct { +// Expiry +// ... +// } +type Expiry struct { + // The date/time when to expire on + expiration time.Time + + // If set will be used by IsExpired to determine the current time. + // Defaults to time.Now if CurrentTime is not set. Available for testing + // to be able to mock out the current time. + CurrentTime func() time.Time +} + +// SetExpiration sets the expiration IsExpired will check when called. +// +// If window is greater than 0 the expiration time will be reduced by the +// window value. +// +// Using a window is helpful to trigger credentials to expire sooner than +// the expiration time given to ensure no requests are made with expired +// tokens. +func (e *Expiry) SetExpiration(expiration time.Time, window time.Duration) { + e.expiration = expiration + if window > 0 { + e.expiration = e.expiration.Add(-window) + } +} + +// IsExpired returns if the credentials are expired. +func (e *Expiry) IsExpired() bool { + if e.CurrentTime == nil { + e.CurrentTime = time.Now + } + return e.expiration.Before(e.CurrentTime()) +} + +// A Credentials provides synchronous safe retrieval of AWS credentials Value. +// Credentials will cache the credentials value until they expire. Once the value +// expires the next Get will attempt to retrieve valid credentials. +// +// Credentials is safe to use across multiple goroutines and will manage the +// synchronous state so the Providers do not need to implement their own +// synchronization. +// +// The first Credentials.Get() will always call Provider.Retrieve() to get the +// first instance of the credentials Value. All calls to Get() after that +// will return the cached credentials Value until IsExpired() returns true. +type Credentials struct { + creds Value + forceRefresh bool + m sync.Mutex + + provider Provider +} + +// NewCredentials returns a pointer to a new Credentials with the provider set. +func NewCredentials(provider Provider) *Credentials { + return &Credentials{ + provider: provider, + forceRefresh: true, + } +} + +// Get returns the credentials value, or error if the credentials Value failed +// to be retrieved. +// +// Will return the cached credentials Value if it has not expired. If the +// credentials Value has expired the Provider's Retrieve() will be called +// to refresh the credentials. +// +// If Credentials.Expire() was called the credentials Value will be force +// expired, and the next call to Get() will cause them to be refreshed. +func (c *Credentials) Get() (Value, error) { + c.m.Lock() + defer c.m.Unlock() + + if c.isExpired() { + creds, err := c.provider.Retrieve() + if err != nil { + return Value{}, err + } + c.creds = creds + c.forceRefresh = false + } + + return c.creds, nil +} + +// Expire expires the credentials and forces them to be retrieved on the +// next call to Get(). +// +// This will override the Provider's expired state, and force Credentials +// to call the Provider's Retrieve(). +func (c *Credentials) Expire() { + c.m.Lock() + defer c.m.Unlock() + + c.forceRefresh = true +} + +// IsExpired returns if the credentials are no longer valid, and need +// to be retrieved. +// +// If the Credentials were forced to be expired with Expire() this will +// reflect that override. +func (c *Credentials) IsExpired() bool { + c.m.Lock() + defer c.m.Unlock() + + return c.isExpired() +} + +// isExpired helper method wrapping the definition of expired credentials. +func (c *Credentials) isExpired() bool { + return c.forceRefresh || c.provider.IsExpired() +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials_test.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials_test.go new file mode 100644 index 000000000..7b79ba985 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials_test.go @@ -0,0 +1,73 @@ +package credentials + +import ( + "testing" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/stretchr/testify/assert" +) + +type stubProvider struct { + creds Value + expired bool + err error +} + +func (s *stubProvider) Retrieve() (Value, error) { + s.expired = false + s.creds.ProviderName = "stubProvider" + return s.creds, s.err +} +func (s *stubProvider) IsExpired() bool { + return s.expired +} + +func TestCredentialsGet(t *testing.T) { + c := NewCredentials(&stubProvider{ + creds: Value{ + AccessKeyID: "AKID", + SecretAccessKey: "SECRET", + SessionToken: "", + }, + expired: true, + }) + + creds, err := c.Get() + assert.Nil(t, err, "Expected no error") + assert.Equal(t, "AKID", creds.AccessKeyID, "Expect access key ID to match") + assert.Equal(t, "SECRET", creds.SecretAccessKey, "Expect secret access key to match") + assert.Empty(t, creds.SessionToken, "Expect session token to be empty") +} + +func TestCredentialsGetWithError(t *testing.T) { + c := NewCredentials(&stubProvider{err: awserr.New("provider error", "", nil), expired: true}) + + _, err := c.Get() + assert.Equal(t, "provider error", err.(awserr.Error).Code(), "Expected provider error") +} + +func TestCredentialsExpire(t *testing.T) { + stub := &stubProvider{} + c := NewCredentials(stub) + + stub.expired = false + assert.True(t, c.IsExpired(), "Expected to start out expired") + c.Expire() + assert.True(t, c.IsExpired(), "Expected to be expired") + + c.forceRefresh = false + assert.False(t, c.IsExpired(), "Expected not to be expired") + + stub.expired = true + assert.True(t, c.IsExpired(), "Expected to be expired") +} + +func TestCredentialsGetWithProviderName(t *testing.T) { + stub := &stubProvider{} + + c := NewCredentials(stub) + + creds, err := c.Get() + assert.Nil(t, err, "Expected no error") + assert.Equal(t, creds.ProviderName, "stubProvider", "Expected provider name to match") +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go new file mode 100644 index 000000000..aa9d689a0 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go @@ -0,0 +1,178 @@ +package ec2rolecreds + +import ( + "bufio" + "encoding/json" + "fmt" + "path" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/ec2metadata" +) + +// ProviderName provides a name of EC2Role provider +const ProviderName = "EC2RoleProvider" + +// A EC2RoleProvider retrieves credentials from the EC2 service, and keeps track if +// those credentials are expired. +// +// Example how to configure the EC2RoleProvider with custom http Client, Endpoint +// or ExpiryWindow +// +// p := &ec2rolecreds.EC2RoleProvider{ +// // Pass in a custom timeout to be used when requesting +// // IAM EC2 Role credentials. +// Client: ec2metadata.New(sess, aws.Config{ +// HTTPClient: &http.Client{Timeout: 10 * time.Second}, +// }), +// +// // Do not use early expiry of credentials. If a non zero value is +// // specified the credentials will be expired early +// ExpiryWindow: 0, +// } +type EC2RoleProvider struct { + credentials.Expiry + + // Required EC2Metadata client to use when connecting to EC2 metadata service. + Client *ec2metadata.EC2Metadata + + // ExpiryWindow will allow the credentials to trigger refreshing prior to + // the credentials actually expiring. This is beneficial so race conditions + // with expiring credentials do not cause request to fail unexpectedly + // due to ExpiredTokenException exceptions. + // + // So a ExpiryWindow of 10s would cause calls to IsExpired() to return true + // 10 seconds before the credentials are actually expired. + // + // If ExpiryWindow is 0 or less it will be ignored. + ExpiryWindow time.Duration +} + +// NewCredentials returns a pointer to a new Credentials object wrapping +// the EC2RoleProvider. Takes a ConfigProvider to create a EC2Metadata client. +// The ConfigProvider is satisfied by the session.Session type. +func NewCredentials(c client.ConfigProvider, options ...func(*EC2RoleProvider)) *credentials.Credentials { + p := &EC2RoleProvider{ + Client: ec2metadata.New(c), + } + + for _, option := range options { + option(p) + } + + return credentials.NewCredentials(p) +} + +// NewCredentialsWithClient returns a pointer to a new Credentials object wrapping +// the EC2RoleProvider. Takes a EC2Metadata client to use when connecting to EC2 +// metadata service. +func NewCredentialsWithClient(client *ec2metadata.EC2Metadata, options ...func(*EC2RoleProvider)) *credentials.Credentials { + p := &EC2RoleProvider{ + Client: client, + } + + for _, option := range options { + option(p) + } + + return credentials.NewCredentials(p) +} + +// Retrieve retrieves credentials from the EC2 service. +// Error will be returned if the request fails, or unable to extract +// the desired credentials. +func (m *EC2RoleProvider) Retrieve() (credentials.Value, error) { + credsList, err := requestCredList(m.Client) + if err != nil { + return credentials.Value{ProviderName: ProviderName}, err + } + + if len(credsList) == 0 { + return credentials.Value{ProviderName: ProviderName}, awserr.New("EmptyEC2RoleList", "empty EC2 Role list", nil) + } + credsName := credsList[0] + + roleCreds, err := requestCred(m.Client, credsName) + if err != nil { + return credentials.Value{ProviderName: ProviderName}, err + } + + m.SetExpiration(roleCreds.Expiration, m.ExpiryWindow) + + return credentials.Value{ + AccessKeyID: roleCreds.AccessKeyID, + SecretAccessKey: roleCreds.SecretAccessKey, + SessionToken: roleCreds.Token, + ProviderName: ProviderName, + }, nil +} + +// A ec2RoleCredRespBody provides the shape for unmarshalling credential +// request responses. +type ec2RoleCredRespBody struct { + // Success State + Expiration time.Time + AccessKeyID string + SecretAccessKey string + Token string + + // Error state + Code string + Message string +} + +const iamSecurityCredsPath = "/iam/security-credentials" + +// requestCredList requests a list of credentials from the EC2 service. +// If there are no credentials, or there is an error making or receiving the request +func requestCredList(client *ec2metadata.EC2Metadata) ([]string, error) { + resp, err := client.GetMetadata(iamSecurityCredsPath) + if err != nil { + return nil, awserr.New("EC2RoleRequestError", "no EC2 instance role found", err) + } + + credsList := []string{} + s := bufio.NewScanner(strings.NewReader(resp)) + for s.Scan() { + credsList = append(credsList, s.Text()) + } + + if err := s.Err(); err != nil { + return nil, awserr.New("SerializationError", "failed to read EC2 instance role from metadata service", err) + } + + return credsList, nil +} + +// requestCred requests the credentials for a specific credentials from the EC2 service. +// +// If the credentials cannot be found, or there is an error reading the response +// and error will be returned. +func requestCred(client *ec2metadata.EC2Metadata, credsName string) (ec2RoleCredRespBody, error) { + resp, err := client.GetMetadata(path.Join(iamSecurityCredsPath, credsName)) + if err != nil { + return ec2RoleCredRespBody{}, + awserr.New("EC2RoleRequestError", + fmt.Sprintf("failed to get %s EC2 instance role credentials", credsName), + err) + } + + respCreds := ec2RoleCredRespBody{} + if err := json.NewDecoder(strings.NewReader(resp)).Decode(&respCreds); err != nil { + return ec2RoleCredRespBody{}, + awserr.New("SerializationError", + fmt.Sprintf("failed to decode %s EC2 instance role credentials", credsName), + err) + } + + if respCreds.Code != "Success" { + // If an error code was returned something failed requesting the role. + return ec2RoleCredRespBody{}, awserr.New(respCreds.Code, respCreds.Message, nil) + } + + return respCreds, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider_test.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider_test.go new file mode 100644 index 000000000..da3d8ed3e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider_test.go @@ -0,0 +1,159 @@ +package ec2rolecreds_test + +import ( + "fmt" + "net/http" + "net/http/httptest" + "testing" + "time" + + "github.com/stretchr/testify/assert" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds" + "github.com/aws/aws-sdk-go/aws/ec2metadata" + "github.com/aws/aws-sdk-go/aws/session" +) + +const credsRespTmpl = `{ + "Code": "Success", + "Type": "AWS-HMAC", + "AccessKeyId" : "accessKey", + "SecretAccessKey" : "secret", + "Token" : "token", + "Expiration" : "%s", + "LastUpdated" : "2009-11-23T0:00:00Z" +}` + +const credsFailRespTmpl = `{ + "Code": "ErrorCode", + "Message": "ErrorMsg", + "LastUpdated": "2009-11-23T0:00:00Z" +}` + +func initTestServer(expireOn string, failAssume bool) *httptest.Server { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.Path == "/latest/meta-data/iam/security-credentials" { + fmt.Fprintln(w, "RoleName") + } else if r.URL.Path == "/latest/meta-data/iam/security-credentials/RoleName" { + if failAssume { + fmt.Fprintf(w, credsFailRespTmpl) + } else { + fmt.Fprintf(w, credsRespTmpl, expireOn) + } + } else { + http.Error(w, "bad request", http.StatusBadRequest) + } + })) + + return server +} + +func TestEC2RoleProvider(t *testing.T) { + server := initTestServer("2014-12-16T01:51:37Z", false) + defer server.Close() + + p := &ec2rolecreds.EC2RoleProvider{ + Client: ec2metadata.New(session.New(), &aws.Config{Endpoint: aws.String(server.URL + "/latest")}), + } + + creds, err := p.Retrieve() + assert.Nil(t, err, "Expect no error, %v", err) + + assert.Equal(t, "accessKey", creds.AccessKeyID, "Expect access key ID to match") + assert.Equal(t, "secret", creds.SecretAccessKey, "Expect secret access key to match") + assert.Equal(t, "token", creds.SessionToken, "Expect session token to match") +} + +func TestEC2RoleProviderFailAssume(t *testing.T) { + server := initTestServer("2014-12-16T01:51:37Z", true) + defer server.Close() + + p := &ec2rolecreds.EC2RoleProvider{ + Client: ec2metadata.New(session.New(), &aws.Config{Endpoint: aws.String(server.URL + "/latest")}), + } + + creds, err := p.Retrieve() + assert.Error(t, err, "Expect error") + + e := err.(awserr.Error) + assert.Equal(t, "ErrorCode", e.Code()) + assert.Equal(t, "ErrorMsg", e.Message()) + assert.Nil(t, e.OrigErr()) + + assert.Equal(t, "", creds.AccessKeyID, "Expect access key ID to match") + assert.Equal(t, "", creds.SecretAccessKey, "Expect secret access key to match") + assert.Equal(t, "", creds.SessionToken, "Expect session token to match") +} + +func TestEC2RoleProviderIsExpired(t *testing.T) { + server := initTestServer("2014-12-16T01:51:37Z", false) + defer server.Close() + + p := &ec2rolecreds.EC2RoleProvider{ + Client: ec2metadata.New(session.New(), &aws.Config{Endpoint: aws.String(server.URL + "/latest")}), + } + p.CurrentTime = func() time.Time { + return time.Date(2014, 12, 15, 21, 26, 0, 0, time.UTC) + } + + assert.True(t, p.IsExpired(), "Expect creds to be expired before retrieve.") + + _, err := p.Retrieve() + assert.Nil(t, err, "Expect no error, %v", err) + + assert.False(t, p.IsExpired(), "Expect creds to not be expired after retrieve.") + + p.CurrentTime = func() time.Time { + return time.Date(3014, 12, 15, 21, 26, 0, 0, time.UTC) + } + + assert.True(t, p.IsExpired(), "Expect creds to be expired.") +} + +func TestEC2RoleProviderExpiryWindowIsExpired(t *testing.T) { + server := initTestServer("2014-12-16T01:51:37Z", false) + defer server.Close() + + p := &ec2rolecreds.EC2RoleProvider{ + Client: ec2metadata.New(session.New(), &aws.Config{Endpoint: aws.String(server.URL + "/latest")}), + ExpiryWindow: time.Hour * 1, + } + p.CurrentTime = func() time.Time { + return time.Date(2014, 12, 15, 0, 51, 37, 0, time.UTC) + } + + assert.True(t, p.IsExpired(), "Expect creds to be expired before retrieve.") + + _, err := p.Retrieve() + assert.Nil(t, err, "Expect no error, %v", err) + + assert.False(t, p.IsExpired(), "Expect creds to not be expired after retrieve.") + + p.CurrentTime = func() time.Time { + return time.Date(2014, 12, 16, 0, 55, 37, 0, time.UTC) + } + + assert.True(t, p.IsExpired(), "Expect creds to be expired.") +} + +func BenchmarkEC3RoleProvider(b *testing.B) { + server := initTestServer("2014-12-16T01:51:37Z", false) + defer server.Close() + + p := &ec2rolecreds.EC2RoleProvider{ + Client: ec2metadata.New(session.New(), &aws.Config{Endpoint: aws.String(server.URL + "/latest")}), + } + _, err := p.Retrieve() + if err != nil { + b.Fatal(err) + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + if _, err := p.Retrieve(); err != nil { + b.Fatal(err) + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go new file mode 100644 index 000000000..96655bc46 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go @@ -0,0 +1,77 @@ +package credentials + +import ( + "os" + + "github.com/aws/aws-sdk-go/aws/awserr" +) + +// EnvProviderName provides a name of Env provider +const EnvProviderName = "EnvProvider" + +var ( + // ErrAccessKeyIDNotFound is returned when the AWS Access Key ID can't be + // found in the process's environment. + // + // @readonly + ErrAccessKeyIDNotFound = awserr.New("EnvAccessKeyNotFound", "AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY not found in environment", nil) + + // ErrSecretAccessKeyNotFound is returned when the AWS Secret Access Key + // can't be found in the process's environment. + // + // @readonly + ErrSecretAccessKeyNotFound = awserr.New("EnvSecretNotFound", "AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY not found in environment", nil) +) + +// A EnvProvider retrieves credentials from the environment variables of the +// running process. Environment credentials never expire. +// +// Environment variables used: +// +// * Access Key ID: AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY +// * Secret Access Key: AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY +type EnvProvider struct { + retrieved bool +} + +// NewEnvCredentials returns a pointer to a new Credentials object +// wrapping the environment variable provider. +func NewEnvCredentials() *Credentials { + return NewCredentials(&EnvProvider{}) +} + +// Retrieve retrieves the keys from the environment. +func (e *EnvProvider) Retrieve() (Value, error) { + e.retrieved = false + + id := os.Getenv("AWS_ACCESS_KEY_ID") + if id == "" { + id = os.Getenv("AWS_ACCESS_KEY") + } + + secret := os.Getenv("AWS_SECRET_ACCESS_KEY") + if secret == "" { + secret = os.Getenv("AWS_SECRET_KEY") + } + + if id == "" { + return Value{ProviderName: EnvProviderName}, ErrAccessKeyIDNotFound + } + + if secret == "" { + return Value{ProviderName: EnvProviderName}, ErrSecretAccessKeyNotFound + } + + e.retrieved = true + return Value{ + AccessKeyID: id, + SecretAccessKey: secret, + SessionToken: os.Getenv("AWS_SESSION_TOKEN"), + ProviderName: EnvProviderName, + }, nil +} + +// IsExpired returns if the credentials have been retrieved. +func (e *EnvProvider) IsExpired() bool { + return !e.retrieved +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider_test.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider_test.go new file mode 100644 index 000000000..53f6ce256 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider_test.go @@ -0,0 +1,70 @@ +package credentials + +import ( + "github.com/stretchr/testify/assert" + "os" + "testing" +) + +func TestEnvProviderRetrieve(t *testing.T) { + os.Clearenv() + os.Setenv("AWS_ACCESS_KEY_ID", "access") + os.Setenv("AWS_SECRET_ACCESS_KEY", "secret") + os.Setenv("AWS_SESSION_TOKEN", "token") + + e := EnvProvider{} + creds, err := e.Retrieve() + assert.Nil(t, err, "Expect no error") + + assert.Equal(t, "access", creds.AccessKeyID, "Expect access key ID to match") + assert.Equal(t, "secret", creds.SecretAccessKey, "Expect secret access key to match") + assert.Equal(t, "token", creds.SessionToken, "Expect session token to match") +} + +func TestEnvProviderIsExpired(t *testing.T) { + os.Clearenv() + os.Setenv("AWS_ACCESS_KEY_ID", "access") + os.Setenv("AWS_SECRET_ACCESS_KEY", "secret") + os.Setenv("AWS_SESSION_TOKEN", "token") + + e := EnvProvider{} + + assert.True(t, e.IsExpired(), "Expect creds to be expired before retrieve.") + + _, err := e.Retrieve() + assert.Nil(t, err, "Expect no error") + + assert.False(t, e.IsExpired(), "Expect creds to not be expired after retrieve.") +} + +func TestEnvProviderNoAccessKeyID(t *testing.T) { + os.Clearenv() + os.Setenv("AWS_SECRET_ACCESS_KEY", "secret") + + e := EnvProvider{} + creds, err := e.Retrieve() + assert.Equal(t, ErrAccessKeyIDNotFound, err, "ErrAccessKeyIDNotFound expected, but was %#v error: %#v", creds, err) +} + +func TestEnvProviderNoSecretAccessKey(t *testing.T) { + os.Clearenv() + os.Setenv("AWS_ACCESS_KEY_ID", "access") + + e := EnvProvider{} + creds, err := e.Retrieve() + assert.Equal(t, ErrSecretAccessKeyNotFound, err, "ErrSecretAccessKeyNotFound expected, but was %#v error: %#v", creds, err) +} + +func TestEnvProviderAlternateNames(t *testing.T) { + os.Clearenv() + os.Setenv("AWS_ACCESS_KEY", "access") + os.Setenv("AWS_SECRET_KEY", "secret") + + e := EnvProvider{} + creds, err := e.Retrieve() + assert.Nil(t, err, "Expect no error") + + assert.Equal(t, "access", creds.AccessKeyID, "Expected access key ID") + assert.Equal(t, "secret", creds.SecretAccessKey, "Expected secret access key") + assert.Empty(t, creds.SessionToken, "Expected no token") +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/example.ini b/vendor/github.com/aws/aws-sdk-go/aws/credentials/example.ini new file mode 100644 index 000000000..7fc91d9d2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/example.ini @@ -0,0 +1,12 @@ +[default] +aws_access_key_id = accessKey +aws_secret_access_key = secret +aws_session_token = token + +[no_token] +aws_access_key_id = accessKey +aws_secret_access_key = secret + +[with_colon] +aws_access_key_id: accessKey +aws_secret_access_key: secret diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go new file mode 100644 index 000000000..7fb7cbf0d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go @@ -0,0 +1,151 @@ +package credentials + +import ( + "fmt" + "os" + "path/filepath" + + "github.com/go-ini/ini" + + "github.com/aws/aws-sdk-go/aws/awserr" +) + +// SharedCredsProviderName provides a name of SharedCreds provider +const SharedCredsProviderName = "SharedCredentialsProvider" + +var ( + // ErrSharedCredentialsHomeNotFound is emitted when the user directory cannot be found. + // + // @readonly + ErrSharedCredentialsHomeNotFound = awserr.New("UserHomeNotFound", "user home directory not found.", nil) +) + +// A SharedCredentialsProvider retrieves credentials from the current user's home +// directory, and keeps track if those credentials are expired. +// +// Profile ini file example: $HOME/.aws/credentials +type SharedCredentialsProvider struct { + // Path to the shared credentials file. + // + // If empty will look for "AWS_SHARED_CREDENTIALS_FILE" env variable. If the + // env value is empty will default to current user's home directory. + // Linux/OSX: "$HOME/.aws/credentials" + // Windows: "%USERPROFILE%\.aws\credentials" + Filename string + + // AWS Profile to extract credentials from the shared credentials file. If empty + // will default to environment variable "AWS_PROFILE" or "default" if + // environment variable is also not set. + Profile string + + // retrieved states if the credentials have been successfully retrieved. + retrieved bool +} + +// NewSharedCredentials returns a pointer to a new Credentials object +// wrapping the Profile file provider. +func NewSharedCredentials(filename, profile string) *Credentials { + return NewCredentials(&SharedCredentialsProvider{ + Filename: filename, + Profile: profile, + }) +} + +// Retrieve reads and extracts the shared credentials from the current +// users home directory. +func (p *SharedCredentialsProvider) Retrieve() (Value, error) { + p.retrieved = false + + filename, err := p.filename() + if err != nil { + return Value{ProviderName: SharedCredsProviderName}, err + } + + creds, err := loadProfile(filename, p.profile()) + if err != nil { + return Value{ProviderName: SharedCredsProviderName}, err + } + + p.retrieved = true + return creds, nil +} + +// IsExpired returns if the shared credentials have expired. +func (p *SharedCredentialsProvider) IsExpired() bool { + return !p.retrieved +} + +// loadProfiles loads from the file pointed to by shared credentials filename for profile. +// The credentials retrieved from the profile will be returned or error. Error will be +// returned if it fails to read from the file, or the data is invalid. +func loadProfile(filename, profile string) (Value, error) { + config, err := ini.Load(filename) + if err != nil { + return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsLoad", "failed to load shared credentials file", err) + } + iniProfile, err := config.GetSection(profile) + if err != nil { + return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsLoad", "failed to get profile", err) + } + + id, err := iniProfile.GetKey("aws_access_key_id") + if err != nil { + return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsAccessKey", + fmt.Sprintf("shared credentials %s in %s did not contain aws_access_key_id", profile, filename), + err) + } + + secret, err := iniProfile.GetKey("aws_secret_access_key") + if err != nil { + return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsSecret", + fmt.Sprintf("shared credentials %s in %s did not contain aws_secret_access_key", profile, filename), + nil) + } + + // Default to empty string if not found + token := iniProfile.Key("aws_session_token") + + return Value{ + AccessKeyID: id.String(), + SecretAccessKey: secret.String(), + SessionToken: token.String(), + ProviderName: SharedCredsProviderName, + }, nil +} + +// filename returns the filename to use to read AWS shared credentials. +// +// Will return an error if the user's home directory path cannot be found. +func (p *SharedCredentialsProvider) filename() (string, error) { + if p.Filename == "" { + if p.Filename = os.Getenv("AWS_SHARED_CREDENTIALS_FILE"); p.Filename != "" { + return p.Filename, nil + } + + homeDir := os.Getenv("HOME") // *nix + if homeDir == "" { // Windows + homeDir = os.Getenv("USERPROFILE") + } + if homeDir == "" { + return "", ErrSharedCredentialsHomeNotFound + } + + p.Filename = filepath.Join(homeDir, ".aws", "credentials") + } + + return p.Filename, nil +} + +// profile returns the AWS shared credentials profile. If empty will read +// environment variable "AWS_PROFILE". If that is not set profile will +// return "default". +func (p *SharedCredentialsProvider) profile() string { + if p.Profile == "" { + p.Profile = os.Getenv("AWS_PROFILE") + } + if p.Profile == "" { + p.Profile = "default" + } + + return p.Profile +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider_test.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider_test.go new file mode 100644 index 000000000..6b4093a15 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider_test.go @@ -0,0 +1,116 @@ +package credentials + +import ( + "os" + "path/filepath" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestSharedCredentialsProvider(t *testing.T) { + os.Clearenv() + + p := SharedCredentialsProvider{Filename: "example.ini", Profile: ""} + creds, err := p.Retrieve() + assert.Nil(t, err, "Expect no error") + + assert.Equal(t, "accessKey", creds.AccessKeyID, "Expect access key ID to match") + assert.Equal(t, "secret", creds.SecretAccessKey, "Expect secret access key to match") + assert.Equal(t, "token", creds.SessionToken, "Expect session token to match") +} + +func TestSharedCredentialsProviderIsExpired(t *testing.T) { + os.Clearenv() + + p := SharedCredentialsProvider{Filename: "example.ini", Profile: ""} + + assert.True(t, p.IsExpired(), "Expect creds to be expired before retrieve") + + _, err := p.Retrieve() + assert.Nil(t, err, "Expect no error") + + assert.False(t, p.IsExpired(), "Expect creds to not be expired after retrieve") +} + +func TestSharedCredentialsProviderWithAWS_SHARED_CREDENTIALS_FILE(t *testing.T) { + os.Clearenv() + os.Setenv("AWS_SHARED_CREDENTIALS_FILE", "example.ini") + p := SharedCredentialsProvider{} + creds, err := p.Retrieve() + + assert.Nil(t, err, "Expect no error") + + assert.Equal(t, "accessKey", creds.AccessKeyID, "Expect access key ID to match") + assert.Equal(t, "secret", creds.SecretAccessKey, "Expect secret access key to match") + assert.Equal(t, "token", creds.SessionToken, "Expect session token to match") +} + +func TestSharedCredentialsProviderWithAWS_SHARED_CREDENTIALS_FILEAbsPath(t *testing.T) { + os.Clearenv() + wd, err := os.Getwd() + assert.NoError(t, err) + os.Setenv("AWS_SHARED_CREDENTIALS_FILE", filepath.Join(wd, "example.ini")) + p := SharedCredentialsProvider{} + creds, err := p.Retrieve() + assert.Nil(t, err, "Expect no error") + + assert.Equal(t, "accessKey", creds.AccessKeyID, "Expect access key ID to match") + assert.Equal(t, "secret", creds.SecretAccessKey, "Expect secret access key to match") + assert.Equal(t, "token", creds.SessionToken, "Expect session token to match") +} + +func TestSharedCredentialsProviderWithAWS_PROFILE(t *testing.T) { + os.Clearenv() + os.Setenv("AWS_PROFILE", "no_token") + + p := SharedCredentialsProvider{Filename: "example.ini", Profile: ""} + creds, err := p.Retrieve() + assert.Nil(t, err, "Expect no error") + + assert.Equal(t, "accessKey", creds.AccessKeyID, "Expect access key ID to match") + assert.Equal(t, "secret", creds.SecretAccessKey, "Expect secret access key to match") + assert.Empty(t, creds.SessionToken, "Expect no token") +} + +func TestSharedCredentialsProviderWithoutTokenFromProfile(t *testing.T) { + os.Clearenv() + + p := SharedCredentialsProvider{Filename: "example.ini", Profile: "no_token"} + creds, err := p.Retrieve() + assert.Nil(t, err, "Expect no error") + + assert.Equal(t, "accessKey", creds.AccessKeyID, "Expect access key ID to match") + assert.Equal(t, "secret", creds.SecretAccessKey, "Expect secret access key to match") + assert.Empty(t, creds.SessionToken, "Expect no token") +} + +func TestSharedCredentialsProviderColonInCredFile(t *testing.T) { + os.Clearenv() + + p := SharedCredentialsProvider{Filename: "example.ini", Profile: "with_colon"} + creds, err := p.Retrieve() + assert.Nil(t, err, "Expect no error") + + assert.Equal(t, "accessKey", creds.AccessKeyID, "Expect access key ID to match") + assert.Equal(t, "secret", creds.SecretAccessKey, "Expect secret access key to match") + assert.Empty(t, creds.SessionToken, "Expect no token") +} + +func BenchmarkSharedCredentialsProvider(b *testing.B) { + os.Clearenv() + + p := SharedCredentialsProvider{Filename: "example.ini", Profile: ""} + _, err := p.Retrieve() + if err != nil { + b.Fatal(err) + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := p.Retrieve() + if err != nil { + b.Fatal(err) + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go new file mode 100644 index 000000000..6f075604e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go @@ -0,0 +1,48 @@ +package credentials + +import ( + "github.com/aws/aws-sdk-go/aws/awserr" +) + +// StaticProviderName provides a name of Static provider +const StaticProviderName = "StaticProvider" + +var ( + // ErrStaticCredentialsEmpty is emitted when static credentials are empty. + // + // @readonly + ErrStaticCredentialsEmpty = awserr.New("EmptyStaticCreds", "static credentials are empty", nil) +) + +// A StaticProvider is a set of credentials which are set programmatically, +// and will never expire. +type StaticProvider struct { + Value +} + +// NewStaticCredentials returns a pointer to a new Credentials object +// wrapping a static credentials value provider. +func NewStaticCredentials(id, secret, token string) *Credentials { + return NewCredentials(&StaticProvider{Value: Value{ + AccessKeyID: id, + SecretAccessKey: secret, + SessionToken: token, + }}) +} + +// Retrieve returns the credentials or error if the credentials are invalid. +func (s *StaticProvider) Retrieve() (Value, error) { + if s.AccessKeyID == "" || s.SecretAccessKey == "" { + return Value{ProviderName: StaticProviderName}, ErrStaticCredentialsEmpty + } + + s.Value.ProviderName = StaticProviderName + return s.Value, nil +} + +// IsExpired returns if the credentials are expired. +// +// For StaticProvider, the credentials never expired. +func (s *StaticProvider) IsExpired() bool { + return false +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider_test.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider_test.go new file mode 100644 index 000000000..ea0123696 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider_test.go @@ -0,0 +1,34 @@ +package credentials + +import ( + "github.com/stretchr/testify/assert" + "testing" +) + +func TestStaticProviderGet(t *testing.T) { + s := StaticProvider{ + Value: Value{ + AccessKeyID: "AKID", + SecretAccessKey: "SECRET", + SessionToken: "", + }, + } + + creds, err := s.Retrieve() + assert.Nil(t, err, "Expect no error") + assert.Equal(t, "AKID", creds.AccessKeyID, "Expect access key ID to match") + assert.Equal(t, "SECRET", creds.SecretAccessKey, "Expect secret access key to match") + assert.Empty(t, creds.SessionToken, "Expect no session token") +} + +func TestStaticProviderIsExpired(t *testing.T) { + s := StaticProvider{ + Value: Value{ + AccessKeyID: "AKID", + SecretAccessKey: "SECRET", + SessionToken: "", + }, + } + + assert.False(t, s.IsExpired(), "Expect static credentials to never expire") +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go new file mode 100644 index 000000000..30c847ae2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go @@ -0,0 +1,161 @@ +// Package stscreds are credential Providers to retrieve STS AWS credentials. +// +// STS provides multiple ways to retrieve credentials which can be used when making +// future AWS service API operation calls. +package stscreds + +import ( + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/service/sts" +) + +// ProviderName provides a name of AssumeRole provider +const ProviderName = "AssumeRoleProvider" + +// AssumeRoler represents the minimal subset of the STS client API used by this provider. +type AssumeRoler interface { + AssumeRole(input *sts.AssumeRoleInput) (*sts.AssumeRoleOutput, error) +} + +// DefaultDuration is the default amount of time in minutes that the credentials +// will be valid for. +var DefaultDuration = time.Duration(15) * time.Minute + +// AssumeRoleProvider retrieves temporary credentials from the STS service, and +// keeps track of their expiration time. This provider must be used explicitly, +// as it is not included in the credentials chain. +type AssumeRoleProvider struct { + credentials.Expiry + + // STS client to make assume role request with. + Client AssumeRoler + + // Role to be assumed. + RoleARN string + + // Session name, if you wish to reuse the credentials elsewhere. + RoleSessionName string + + // Expiry duration of the STS credentials. Defaults to 15 minutes if not set. + Duration time.Duration + + // Optional ExternalID to pass along, defaults to nil if not set. + ExternalID *string + + // The policy plain text must be 2048 bytes or shorter. However, an internal + // conversion compresses it into a packed binary format with a separate limit. + // The PackedPolicySize response element indicates by percentage how close to + // the upper size limit the policy is, with 100% equaling the maximum allowed + // size. + Policy *string + + // The identification number of the MFA device that is associated with the user + // who is making the AssumeRole call. Specify this value if the trust policy + // of the role being assumed includes a condition that requires MFA authentication. + // The value is either the serial number for a hardware device (such as GAHT12345678) + // or an Amazon Resource Name (ARN) for a virtual device (such as arn:aws:iam::123456789012:mfa/user). + SerialNumber *string + + // The value provided by the MFA device, if the trust policy of the role being + // assumed requires MFA (that is, if the policy includes a condition that tests + // for MFA). If the role being assumed requires MFA and if the TokenCode value + // is missing or expired, the AssumeRole call returns an "access denied" error. + TokenCode *string + + // ExpiryWindow will allow the credentials to trigger refreshing prior to + // the credentials actually expiring. This is beneficial so race conditions + // with expiring credentials do not cause request to fail unexpectedly + // due to ExpiredTokenException exceptions. + // + // So a ExpiryWindow of 10s would cause calls to IsExpired() to return true + // 10 seconds before the credentials are actually expired. + // + // If ExpiryWindow is 0 or less it will be ignored. + ExpiryWindow time.Duration +} + +// NewCredentials returns a pointer to a new Credentials object wrapping the +// AssumeRoleProvider. The credentials will expire every 15 minutes and the +// role will be named after a nanosecond timestamp of this operation. +// +// Takes a Config provider to create the STS client. The ConfigProvider is +// satisfied by the session.Session type. +func NewCredentials(c client.ConfigProvider, roleARN string, options ...func(*AssumeRoleProvider)) *credentials.Credentials { + p := &AssumeRoleProvider{ + Client: sts.New(c), + RoleARN: roleARN, + Duration: DefaultDuration, + } + + for _, option := range options { + option(p) + } + + return credentials.NewCredentials(p) +} + +// NewCredentialsWithClient returns a pointer to a new Credentials object wrapping the +// AssumeRoleProvider. The credentials will expire every 15 minutes and the +// role will be named after a nanosecond timestamp of this operation. +// +// Takes an AssumeRoler which can be satisfiede by the STS client. +func NewCredentialsWithClient(svc AssumeRoler, roleARN string, options ...func(*AssumeRoleProvider)) *credentials.Credentials { + p := &AssumeRoleProvider{ + Client: svc, + RoleARN: roleARN, + Duration: DefaultDuration, + } + + for _, option := range options { + option(p) + } + + return credentials.NewCredentials(p) +} + +// Retrieve generates a new set of temporary credentials using STS. +func (p *AssumeRoleProvider) Retrieve() (credentials.Value, error) { + + // Apply defaults where parameters are not set. + if p.RoleSessionName == "" { + // Try to work out a role name that will hopefully end up unique. + p.RoleSessionName = fmt.Sprintf("%d", time.Now().UTC().UnixNano()) + } + if p.Duration == 0 { + // Expire as often as AWS permits. + p.Duration = DefaultDuration + } + input := &sts.AssumeRoleInput{ + DurationSeconds: aws.Int64(int64(p.Duration / time.Second)), + RoleArn: aws.String(p.RoleARN), + RoleSessionName: aws.String(p.RoleSessionName), + ExternalId: p.ExternalID, + } + if p.Policy != nil { + input.Policy = p.Policy + } + if p.SerialNumber != nil && p.TokenCode != nil { + input.SerialNumber = p.SerialNumber + input.TokenCode = p.TokenCode + } + roleOutput, err := p.Client.AssumeRole(input) + + if err != nil { + return credentials.Value{ProviderName: ProviderName}, err + } + + // We will proactively generate new credentials before they expire. + p.SetExpiration(*roleOutput.Credentials.Expiration, p.ExpiryWindow) + + return credentials.Value{ + AccessKeyID: *roleOutput.Credentials.AccessKeyId, + SecretAccessKey: *roleOutput.Credentials.SecretAccessKey, + SessionToken: *roleOutput.Credentials.SessionToken, + ProviderName: ProviderName, + }, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider_test.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider_test.go new file mode 100644 index 000000000..6bd6e9197 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider_test.go @@ -0,0 +1,56 @@ +package stscreds + +import ( + "testing" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/sts" + "github.com/stretchr/testify/assert" +) + +type stubSTS struct { +} + +func (s *stubSTS) AssumeRole(input *sts.AssumeRoleInput) (*sts.AssumeRoleOutput, error) { + expiry := time.Now().Add(60 * time.Minute) + return &sts.AssumeRoleOutput{ + Credentials: &sts.Credentials{ + // Just reflect the role arn to the provider. + AccessKeyId: input.RoleArn, + SecretAccessKey: aws.String("assumedSecretAccessKey"), + SessionToken: aws.String("assumedSessionToken"), + Expiration: &expiry, + }, + }, nil +} + +func TestAssumeRoleProvider(t *testing.T) { + stub := &stubSTS{} + p := &AssumeRoleProvider{ + Client: stub, + RoleARN: "roleARN", + } + + creds, err := p.Retrieve() + assert.Nil(t, err, "Expect no error") + + assert.Equal(t, "roleARN", creds.AccessKeyID, "Expect access key ID to be reflected role ARN") + assert.Equal(t, "assumedSecretAccessKey", creds.SecretAccessKey, "Expect secret access key to match") + assert.Equal(t, "assumedSessionToken", creds.SessionToken, "Expect session token to match") +} + +func BenchmarkAssumeRoleProvider(b *testing.B) { + stub := &stubSTS{} + p := &AssumeRoleProvider{ + Client: stub, + RoleARN: "roleARN", + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + if _, err := p.Retrieve(); err != nil { + b.Fatal(err) + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go new file mode 100644 index 000000000..12be1a5d7 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go @@ -0,0 +1,98 @@ +// Package defaults is a collection of helpers to retrieve the SDK's default +// configuration and handlers. +// +// Generally this package shouldn't be used directly, but session.Session +// instead. This package is useful when you need to reset the defaults +// of a session or service client to the SDK defaults before setting +// additional parameters. +package defaults + +import ( + "net/http" + "os" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/corehandlers" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds" + "github.com/aws/aws-sdk-go/aws/ec2metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/endpoints" +) + +// A Defaults provides a collection of default values for SDK clients. +type Defaults struct { + Config *aws.Config + Handlers request.Handlers +} + +// Get returns the SDK's default values with Config and handlers pre-configured. +func Get() Defaults { + cfg := Config() + handlers := Handlers() + cfg.Credentials = CredChain(cfg, handlers) + + return Defaults{ + Config: cfg, + Handlers: handlers, + } +} + +// Config returns the default configuration without credentials. +// To retrieve a config with credentials also included use +// `defaults.Get().Config` instead. +// +// Generally you shouldn't need to use this method directly, but +// is available if you need to reset the configuration of an +// existing service client or session. +func Config() *aws.Config { + return aws.NewConfig(). + WithCredentials(credentials.AnonymousCredentials). + WithRegion(os.Getenv("AWS_REGION")). + WithHTTPClient(http.DefaultClient). + WithMaxRetries(aws.UseServiceDefaultRetries). + WithLogger(aws.NewDefaultLogger()). + WithLogLevel(aws.LogOff). + WithSleepDelay(time.Sleep) +} + +// Handlers returns the default request handlers. +// +// Generally you shouldn't need to use this method directly, but +// is available if you need to reset the request handlers of an +// existing service client or session. +func Handlers() request.Handlers { + var handlers request.Handlers + + handlers.Validate.PushBackNamed(corehandlers.ValidateEndpointHandler) + handlers.Validate.AfterEachFn = request.HandlerListStopOnError + handlers.Build.PushBackNamed(corehandlers.SDKVersionUserAgentHandler) + handlers.Build.AfterEachFn = request.HandlerListStopOnError + handlers.Sign.PushBackNamed(corehandlers.BuildContentLengthHandler) + handlers.Send.PushBackNamed(corehandlers.SendHandler) + handlers.AfterRetry.PushBackNamed(corehandlers.AfterRetryHandler) + handlers.ValidateResponse.PushBackNamed(corehandlers.ValidateResponseHandler) + + return handlers +} + +// CredChain returns the default credential chain. +// +// Generally you shouldn't need to use this method directly, but +// is available if you need to reset the credentials of an +// existing service client or session's Config. +func CredChain(cfg *aws.Config, handlers request.Handlers) *credentials.Credentials { + endpoint, signingRegion := endpoints.EndpointForRegion(ec2metadata.ServiceName, *cfg.Region, true) + + return credentials.NewCredentials(&credentials.ChainProvider{ + VerboseErrors: aws.BoolValue(cfg.CredentialsChainVerboseErrors), + Providers: []credentials.Provider{ + &credentials.EnvProvider{}, + &credentials.SharedCredentialsProvider{Filename: "", Profile: ""}, + &ec2rolecreds.EC2RoleProvider{ + Client: ec2metadata.NewClient(*cfg, handlers, endpoint, signingRegion), + ExpiryWindow: 5 * time.Minute, + }, + }}) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go new file mode 100644 index 000000000..669c813a0 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go @@ -0,0 +1,140 @@ +package ec2metadata + +import ( + "encoding/json" + "fmt" + "path" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" +) + +// GetMetadata uses the path provided to request information from the EC2 +// instance metdata service. The content will be returned as a string, or +// error if the request failed. +func (c *EC2Metadata) GetMetadata(p string) (string, error) { + op := &request.Operation{ + Name: "GetMetadata", + HTTPMethod: "GET", + HTTPPath: path.Join("/", "meta-data", p), + } + + output := &metadataOutput{} + req := c.NewRequest(op, nil, output) + + return output.Content, req.Send() +} + +// GetDynamicData uses the path provided to request information from the EC2 +// instance metadata service for dynamic data. The content will be returned +// as a string, or error if the request failed. +func (c *EC2Metadata) GetDynamicData(p string) (string, error) { + op := &request.Operation{ + Name: "GetDynamicData", + HTTPMethod: "GET", + HTTPPath: path.Join("/", "dynamic", p), + } + + output := &metadataOutput{} + req := c.NewRequest(op, nil, output) + + return output.Content, req.Send() +} + +// GetInstanceIdentityDocument retrieves an identity document describing an +// instance. Error is returned if the request fails or is unable to parse +// the response. +func (c *EC2Metadata) GetInstanceIdentityDocument() (EC2InstanceIdentityDocument, error) { + resp, err := c.GetDynamicData("instance-identity/document") + if err != nil { + return EC2InstanceIdentityDocument{}, + awserr.New("EC2MetadataRequestError", + "failed to get EC2 instance identity document", err) + } + + doc := EC2InstanceIdentityDocument{} + if err := json.NewDecoder(strings.NewReader(resp)).Decode(&doc); err != nil { + return EC2InstanceIdentityDocument{}, + awserr.New("SerializationError", + "failed to decode EC2 instance identity document", err) + } + + return doc, nil +} + +// IAMInfo retrieves IAM info from the metadata API +func (c *EC2Metadata) IAMInfo() (EC2IAMInfo, error) { + resp, err := c.GetMetadata("iam/info") + if err != nil { + return EC2IAMInfo{}, + awserr.New("EC2MetadataRequestError", + "failed to get EC2 IAM info", err) + } + + info := EC2IAMInfo{} + if err := json.NewDecoder(strings.NewReader(resp)).Decode(&info); err != nil { + return EC2IAMInfo{}, + awserr.New("SerializationError", + "failed to decode EC2 IAM info", err) + } + + if info.Code != "Success" { + errMsg := fmt.Sprintf("failed to get EC2 IAM Info (%s)", info.Code) + return EC2IAMInfo{}, + awserr.New("EC2MetadataError", errMsg, nil) + } + + return info, nil +} + +// Region returns the region the instance is running in. +func (c *EC2Metadata) Region() (string, error) { + resp, err := c.GetMetadata("placement/availability-zone") + if err != nil { + return "", err + } + + // returns region without the suffix. Eg: us-west-2a becomes us-west-2 + return resp[:len(resp)-1], nil +} + +// Available returns if the application has access to the EC2 Metadata service. +// Can be used to determine if application is running within an EC2 Instance and +// the metadata service is available. +func (c *EC2Metadata) Available() bool { + if _, err := c.GetMetadata("instance-id"); err != nil { + return false + } + + return true +} + +// An EC2IAMInfo provides the shape for unmarshalling +// an IAM info from the metadata API +type EC2IAMInfo struct { + Code string + LastUpdated time.Time + InstanceProfileArn string + InstanceProfileID string +} + +// An EC2InstanceIdentityDocument provides the shape for unmarshalling +// an instance identity document +type EC2InstanceIdentityDocument struct { + DevpayProductCodes []string `json:"devpayProductCodes"` + AvailabilityZone string `json:"availabilityZone"` + PrivateIP string `json:"privateIp"` + Version string `json:"version"` + Region string `json:"region"` + InstanceID string `json:"instanceId"` + BillingProducts []string `json:"billingProducts"` + InstanceType string `json:"instanceType"` + AccountID string `json:"accountId"` + PendingTime time.Time `json:"pendingTime"` + ImageID string `json:"imageId"` + KernelID string `json:"kernelId"` + RamdiskID string `json:"ramdiskId"` + Architecture string `json:"architecture"` +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api_test.go b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api_test.go new file mode 100644 index 000000000..37b74fb0c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api_test.go @@ -0,0 +1,195 @@ +package ec2metadata_test + +import ( + "bytes" + "io/ioutil" + "net/http" + "net/http/httptest" + "path" + "strings" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/ec2metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/session" +) + +const instanceIdentityDocument = `{ + "devpayProductCodes" : null, + "availabilityZone" : "us-east-1d", + "privateIp" : "10.158.112.84", + "version" : "2010-08-31", + "region" : "us-east-1", + "instanceId" : "i-1234567890abcdef0", + "billingProducts" : null, + "instanceType" : "t1.micro", + "accountId" : "123456789012", + "pendingTime" : "2015-11-19T16:32:11Z", + "imageId" : "ami-5fb8c835", + "kernelId" : "aki-919dcaf8", + "ramdiskId" : null, + "architecture" : "x86_64" +}` + +const validIamInfo = `{ + "Code" : "Success", + "LastUpdated" : "2016-03-17T12:27:32Z", + "InstanceProfileArn" : "arn:aws:iam::123456789012:instance-profile/my-instance-profile", + "InstanceProfileId" : "AIPAABCDEFGHIJKLMN123" +}` + +const unsuccessfulIamInfo = `{ + "Code" : "Failed", + "LastUpdated" : "2016-03-17T12:27:32Z", + "InstanceProfileArn" : "arn:aws:iam::123456789012:instance-profile/my-instance-profile", + "InstanceProfileId" : "AIPAABCDEFGHIJKLMN123" +}` + +func initTestServer(path string, resp string) *httptest.Server { + return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.RequestURI != path { + http.Error(w, "not found", http.StatusNotFound) + return + } + + w.Write([]byte(resp)) + })) +} + +func TestEndpoint(t *testing.T) { + c := ec2metadata.New(session.New()) + op := &request.Operation{ + Name: "GetMetadata", + HTTPMethod: "GET", + HTTPPath: path.Join("/", "meta-data", "testpath"), + } + + req := c.NewRequest(op, nil, nil) + assert.Equal(t, "http://169.254.169.254/latest", req.ClientInfo.Endpoint) + assert.Equal(t, "http://169.254.169.254/latest/meta-data/testpath", req.HTTPRequest.URL.String()) +} + +func TestGetMetadata(t *testing.T) { + server := initTestServer( + "/latest/meta-data/some/path", + "success", // real response includes suffix + ) + defer server.Close() + c := ec2metadata.New(session.New(), &aws.Config{Endpoint: aws.String(server.URL + "/latest")}) + + resp, err := c.GetMetadata("some/path") + + assert.NoError(t, err) + assert.Equal(t, "success", resp) +} + +func TestGetRegion(t *testing.T) { + server := initTestServer( + "/latest/meta-data/placement/availability-zone", + "us-west-2a", // real response includes suffix + ) + defer server.Close() + c := ec2metadata.New(session.New(), &aws.Config{Endpoint: aws.String(server.URL + "/latest")}) + + region, err := c.Region() + + assert.NoError(t, err) + assert.Equal(t, "us-west-2", region) +} + +func TestMetadataAvailable(t *testing.T) { + server := initTestServer( + "/latest/meta-data/instance-id", + "instance-id", + ) + defer server.Close() + c := ec2metadata.New(session.New(), &aws.Config{Endpoint: aws.String(server.URL + "/latest")}) + + available := c.Available() + + assert.True(t, available) +} + +func TestMetadataIAMInfo_success(t *testing.T) { + server := initTestServer( + "/latest/meta-data/iam/info", + validIamInfo, + ) + defer server.Close() + c := ec2metadata.New(session.New(), &aws.Config{Endpoint: aws.String(server.URL + "/latest")}) + + iamInfo, err := c.IAMInfo() + assert.NoError(t, err) + assert.Equal(t, "Success", iamInfo.Code) + assert.Equal(t, "arn:aws:iam::123456789012:instance-profile/my-instance-profile", iamInfo.InstanceProfileArn) + assert.Equal(t, "AIPAABCDEFGHIJKLMN123", iamInfo.InstanceProfileID) +} + +func TestMetadataIAMInfo_failure(t *testing.T) { + server := initTestServer( + "/latest/meta-data/iam/info", + unsuccessfulIamInfo, + ) + defer server.Close() + c := ec2metadata.New(session.New(), &aws.Config{Endpoint: aws.String(server.URL + "/latest")}) + + iamInfo, err := c.IAMInfo() + assert.NotNil(t, err) + assert.Equal(t, "", iamInfo.Code) + assert.Equal(t, "", iamInfo.InstanceProfileArn) + assert.Equal(t, "", iamInfo.InstanceProfileID) +} + +func TestMetadataNotAvailable(t *testing.T) { + c := ec2metadata.New(session.New()) + c.Handlers.Send.Clear() + c.Handlers.Send.PushBack(func(r *request.Request) { + r.HTTPResponse = &http.Response{ + StatusCode: int(0), + Status: http.StatusText(int(0)), + Body: ioutil.NopCloser(bytes.NewReader([]byte{})), + } + r.Error = awserr.New("RequestError", "send request failed", nil) + r.Retryable = aws.Bool(true) // network errors are retryable + }) + + available := c.Available() + + assert.False(t, available) +} + +func TestMetadataErrorResponse(t *testing.T) { + c := ec2metadata.New(session.New()) + c.Handlers.Send.Clear() + c.Handlers.Send.PushBack(func(r *request.Request) { + r.HTTPResponse = &http.Response{ + StatusCode: http.StatusBadRequest, + Status: http.StatusText(http.StatusBadRequest), + Body: ioutil.NopCloser(strings.NewReader("error message text")), + } + r.Retryable = aws.Bool(false) // network errors are retryable + }) + + data, err := c.GetMetadata("uri/path") + assert.Empty(t, data) + assert.Contains(t, err.Error(), "error message text") +} + +func TestEC2RoleProviderInstanceIdentity(t *testing.T) { + server := initTestServer( + "/latest/dynamic/instance-identity/document", + instanceIdentityDocument, + ) + defer server.Close() + c := ec2metadata.New(session.New(), &aws.Config{Endpoint: aws.String(server.URL + "/latest")}) + + doc, err := c.GetInstanceIdentityDocument() + assert.Nil(t, err, "Expect no error, %v", err) + assert.Equal(t, doc.AccountID, "123456789012") + assert.Equal(t, doc.AvailabilityZone, "us-east-1d") + assert.Equal(t, doc.Region, "us-east-1") +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go new file mode 100644 index 000000000..5b4379dbd --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go @@ -0,0 +1,124 @@ +// Package ec2metadata provides the client for making API calls to the +// EC2 Metadata service. +package ec2metadata + +import ( + "bytes" + "errors" + "io" + "net/http" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" +) + +// ServiceName is the name of the service. +const ServiceName = "ec2metadata" + +// A EC2Metadata is an EC2 Metadata service Client. +type EC2Metadata struct { + *client.Client +} + +// New creates a new instance of the EC2Metadata client with a session. +// This client is safe to use across multiple goroutines. +// +// +// Example: +// // Create a EC2Metadata client from just a session. +// svc := ec2metadata.New(mySession) +// +// // Create a EC2Metadata client with additional configuration +// svc := ec2metadata.New(mySession, aws.NewConfig().WithLogLevel(aws.LogDebugHTTPBody)) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *EC2Metadata { + c := p.ClientConfig(ServiceName, cfgs...) + return NewClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// NewClient returns a new EC2Metadata client. Should be used to create +// a client when not using a session. Generally using just New with a session +// is preferred. +// +// If an unmodified HTTP client is provided from the stdlib default, or no client +// the EC2RoleProvider's EC2Metadata HTTP client's timeout will be shortened. +// To disable this set Config.EC2MetadataDisableTimeoutOverride to false. Enabled by default. +func NewClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string, opts ...func(*client.Client)) *EC2Metadata { + if !aws.BoolValue(cfg.EC2MetadataDisableTimeoutOverride) && httpClientZero(cfg.HTTPClient) { + // If the http client is unmodified and this feature is not disabled + // set custom timeouts for EC2Metadata requests. + cfg.HTTPClient = &http.Client{ + // use a shorter timeout than default because the metadata + // service is local if it is running, and to fail faster + // if not running on an ec2 instance. + Timeout: 5 * time.Second, + } + } + + svc := &EC2Metadata{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + Endpoint: endpoint, + APIVersion: "latest", + }, + handlers, + ), + } + + svc.Handlers.Unmarshal.PushBack(unmarshalHandler) + svc.Handlers.UnmarshalError.PushBack(unmarshalError) + svc.Handlers.Validate.Clear() + svc.Handlers.Validate.PushBack(validateEndpointHandler) + + // Add additional options to the service config + for _, option := range opts { + option(svc.Client) + } + + return svc +} + +func httpClientZero(c *http.Client) bool { + return c == nil || (c.Transport == nil && c.CheckRedirect == nil && c.Jar == nil && c.Timeout == 0) +} + +type metadataOutput struct { + Content string +} + +func unmarshalHandler(r *request.Request) { + defer r.HTTPResponse.Body.Close() + b := &bytes.Buffer{} + if _, err := io.Copy(b, r.HTTPResponse.Body); err != nil { + r.Error = awserr.New("SerializationError", "unable to unmarshal EC2 metadata respose", err) + return + } + + if data, ok := r.Data.(*metadataOutput); ok { + data.Content = b.String() + } +} + +func unmarshalError(r *request.Request) { + defer r.HTTPResponse.Body.Close() + b := &bytes.Buffer{} + if _, err := io.Copy(b, r.HTTPResponse.Body); err != nil { + r.Error = awserr.New("SerializationError", "unable to unmarshal EC2 metadata error respose", err) + return + } + + // Response body format is not consistent between metadata endpoints. + // Grab the error message as a string and include that as the source error + r.Error = awserr.New("EC2MetadataError", "failed to make EC2Metadata request", errors.New(b.String())) +} + +func validateEndpointHandler(r *request.Request) { + if r.ClientInfo.Endpoint == "" { + r.Error = aws.ErrMissingEndpoint + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service_test.go b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service_test.go new file mode 100644 index 000000000..d10ecb303 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service_test.go @@ -0,0 +1,79 @@ +package ec2metadata_test + +import ( + "net/http" + "net/http/httptest" + "sync" + "testing" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/ec2metadata" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/stretchr/testify/assert" +) + +func TestClientOverrideDefaultHTTPClientTimeout(t *testing.T) { + svc := ec2metadata.New(session.New()) + + assert.NotEqual(t, http.DefaultClient, svc.Config.HTTPClient) + assert.Equal(t, 5*time.Second, svc.Config.HTTPClient.Timeout) +} + +func TestClientNotOverrideDefaultHTTPClientTimeout(t *testing.T) { + origClient := *http.DefaultClient + http.DefaultClient.Transport = &http.Transport{} + defer func() { + http.DefaultClient = &origClient + }() + + svc := ec2metadata.New(session.New()) + + assert.Equal(t, http.DefaultClient, svc.Config.HTTPClient) + + tr, ok := svc.Config.HTTPClient.Transport.(*http.Transport) + assert.True(t, ok) + assert.NotNil(t, tr) + assert.Nil(t, tr.Dial) +} + +func TestClientDisableOverrideDefaultHTTPClientTimeout(t *testing.T) { + svc := ec2metadata.New(session.New(aws.NewConfig().WithEC2MetadataDisableTimeoutOverride(true))) + + assert.Equal(t, http.DefaultClient, svc.Config.HTTPClient) +} + +func TestClientOverrideDefaultHTTPClientTimeoutRace(t *testing.T) { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Write([]byte("us-east-1a")) + })) + + cfg := aws.NewConfig().WithEndpoint(server.URL) + runEC2MetadataClients(t, cfg, 100) +} + +func TestClientOverrideDefaultHTTPClientTimeoutRaceWithTransport(t *testing.T) { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Write([]byte("us-east-1a")) + })) + + cfg := aws.NewConfig().WithEndpoint(server.URL).WithHTTPClient(&http.Client{ + Transport: http.DefaultTransport, + }) + + runEC2MetadataClients(t, cfg, 100) +} + +func runEC2MetadataClients(t *testing.T, cfg *aws.Config, atOnce int) { + var wg sync.WaitGroup + wg.Add(atOnce) + for i := 0; i < atOnce; i++ { + go func() { + svc := ec2metadata.New(session.New(), cfg) + _, err := svc.Region() + assert.NoError(t, err) + wg.Done() + }() + } + wg.Wait() +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/errors.go b/vendor/github.com/aws/aws-sdk-go/aws/errors.go new file mode 100644 index 000000000..576636168 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/errors.go @@ -0,0 +1,17 @@ +package aws + +import "github.com/aws/aws-sdk-go/aws/awserr" + +var ( + // ErrMissingRegion is an error that is returned if region configuration is + // not found. + // + // @readonly + ErrMissingRegion = awserr.New("MissingRegion", "could not find region configuration", nil) + + // ErrMissingEndpoint is an error that is returned if an endpoint cannot be + // resolved for a service. + // + // @readonly + ErrMissingEndpoint = awserr.New("MissingEndpoint", "'Endpoint' configuration is required for this service", nil) +) diff --git a/vendor/github.com/aws/aws-sdk-go/aws/logger.go b/vendor/github.com/aws/aws-sdk-go/aws/logger.go new file mode 100644 index 000000000..db87188e2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/logger.go @@ -0,0 +1,112 @@ +package aws + +import ( + "log" + "os" +) + +// A LogLevelType defines the level logging should be performed at. Used to instruct +// the SDK which statements should be logged. +type LogLevelType uint + +// LogLevel returns the pointer to a LogLevel. Should be used to workaround +// not being able to take the address of a non-composite literal. +func LogLevel(l LogLevelType) *LogLevelType { + return &l +} + +// Value returns the LogLevel value or the default value LogOff if the LogLevel +// is nil. Safe to use on nil value LogLevelTypes. +func (l *LogLevelType) Value() LogLevelType { + if l != nil { + return *l + } + return LogOff +} + +// Matches returns true if the v LogLevel is enabled by this LogLevel. Should be +// used with logging sub levels. Is safe to use on nil value LogLevelTypes. If +// LogLevel is nill, will default to LogOff comparison. +func (l *LogLevelType) Matches(v LogLevelType) bool { + c := l.Value() + return c&v == v +} + +// AtLeast returns true if this LogLevel is at least high enough to satisfies v. +// Is safe to use on nil value LogLevelTypes. If LogLevel is nill, will default +// to LogOff comparison. +func (l *LogLevelType) AtLeast(v LogLevelType) bool { + c := l.Value() + return c >= v +} + +const ( + // LogOff states that no logging should be performed by the SDK. This is the + // default state of the SDK, and should be use to disable all logging. + LogOff LogLevelType = iota * 0x1000 + + // LogDebug state that debug output should be logged by the SDK. This should + // be used to inspect request made and responses received. + LogDebug +) + +// Debug Logging Sub Levels +const ( + // LogDebugWithSigning states that the SDK should log request signing and + // presigning events. This should be used to log the signing details of + // requests for debugging. Will also enable LogDebug. + LogDebugWithSigning LogLevelType = LogDebug | (1 << iota) + + // LogDebugWithHTTPBody states the SDK should log HTTP request and response + // HTTP bodys in addition to the headers and path. This should be used to + // see the body content of requests and responses made while using the SDK + // Will also enable LogDebug. + LogDebugWithHTTPBody + + // LogDebugWithRequestRetries states the SDK should log when service requests will + // be retried. This should be used to log when you want to log when service + // requests are being retried. Will also enable LogDebug. + LogDebugWithRequestRetries + + // LogDebugWithRequestErrors states the SDK should log when service requests fail + // to build, send, validate, or unmarshal. + LogDebugWithRequestErrors +) + +// A Logger is a minimalistic interface for the SDK to log messages to. Should +// be used to provide custom logging writers for the SDK to use. +type Logger interface { + Log(...interface{}) +} + +// A LoggerFunc is a convenience type to convert a function taking a variadic +// list of arguments and wrap it so the Logger interface can be used. +// +// Example: +// s3.New(sess, &aws.Config{Logger: aws.LoggerFunc(func(args ...interface{}) { +// fmt.Fprintln(os.Stdout, args...) +// })}) +type LoggerFunc func(...interface{}) + +// Log calls the wrapped function with the arguments provided +func (f LoggerFunc) Log(args ...interface{}) { + f(args...) +} + +// NewDefaultLogger returns a Logger which will write log messages to stdout, and +// use same formatting runes as the stdlib log.Logger +func NewDefaultLogger() Logger { + return &defaultLogger{ + logger: log.New(os.Stdout, "", log.LstdFlags), + } +} + +// A defaultLogger provides a minimalistic logger satisfying the Logger interface. +type defaultLogger struct { + logger *log.Logger +} + +// Log logs the parameters to the stdlib logger. See log.Println. +func (l defaultLogger) Log(args ...interface{}) { + l.logger.Println(args...) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go b/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go new file mode 100644 index 000000000..5279c19c0 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go @@ -0,0 +1,187 @@ +package request + +import ( + "fmt" + "strings" +) + +// A Handlers provides a collection of request handlers for various +// stages of handling requests. +type Handlers struct { + Validate HandlerList + Build HandlerList + Sign HandlerList + Send HandlerList + ValidateResponse HandlerList + Unmarshal HandlerList + UnmarshalMeta HandlerList + UnmarshalError HandlerList + Retry HandlerList + AfterRetry HandlerList +} + +// Copy returns of this handler's lists. +func (h *Handlers) Copy() Handlers { + return Handlers{ + Validate: h.Validate.copy(), + Build: h.Build.copy(), + Sign: h.Sign.copy(), + Send: h.Send.copy(), + ValidateResponse: h.ValidateResponse.copy(), + Unmarshal: h.Unmarshal.copy(), + UnmarshalError: h.UnmarshalError.copy(), + UnmarshalMeta: h.UnmarshalMeta.copy(), + Retry: h.Retry.copy(), + AfterRetry: h.AfterRetry.copy(), + } +} + +// Clear removes callback functions for all handlers +func (h *Handlers) Clear() { + h.Validate.Clear() + h.Build.Clear() + h.Send.Clear() + h.Sign.Clear() + h.Unmarshal.Clear() + h.UnmarshalMeta.Clear() + h.UnmarshalError.Clear() + h.ValidateResponse.Clear() + h.Retry.Clear() + h.AfterRetry.Clear() +} + +// A HandlerListRunItem represents an entry in the HandlerList which +// is being run. +type HandlerListRunItem struct { + Index int + Handler NamedHandler + Request *Request +} + +// A HandlerList manages zero or more handlers in a list. +type HandlerList struct { + list []NamedHandler + + // Called after each request handler in the list is called. If set + // and the func returns true the HandlerList will continue to iterate + // over the request handlers. If false is returned the HandlerList + // will stop iterating. + // + // Should be used if extra logic to be performed between each handler + // in the list. This can be used to terminate a list's iteration + // based on a condition such as error like, HandlerListStopOnError. + // Or for logging like HandlerListLogItem. + AfterEachFn func(item HandlerListRunItem) bool +} + +// A NamedHandler is a struct that contains a name and function callback. +type NamedHandler struct { + Name string + Fn func(*Request) +} + +// copy creates a copy of the handler list. +func (l *HandlerList) copy() HandlerList { + n := HandlerList{ + AfterEachFn: l.AfterEachFn, + } + n.list = append([]NamedHandler{}, l.list...) + return n +} + +// Clear clears the handler list. +func (l *HandlerList) Clear() { + l.list = []NamedHandler{} +} + +// Len returns the number of handlers in the list. +func (l *HandlerList) Len() int { + return len(l.list) +} + +// PushBack pushes handler f to the back of the handler list. +func (l *HandlerList) PushBack(f func(*Request)) { + l.list = append(l.list, NamedHandler{"__anonymous", f}) +} + +// PushFront pushes handler f to the front of the handler list. +func (l *HandlerList) PushFront(f func(*Request)) { + l.list = append([]NamedHandler{{"__anonymous", f}}, l.list...) +} + +// PushBackNamed pushes named handler f to the back of the handler list. +func (l *HandlerList) PushBackNamed(n NamedHandler) { + l.list = append(l.list, n) +} + +// PushFrontNamed pushes named handler f to the front of the handler list. +func (l *HandlerList) PushFrontNamed(n NamedHandler) { + l.list = append([]NamedHandler{n}, l.list...) +} + +// Remove removes a NamedHandler n +func (l *HandlerList) Remove(n NamedHandler) { + newlist := []NamedHandler{} + for _, m := range l.list { + if m.Name != n.Name { + newlist = append(newlist, m) + } + } + l.list = newlist +} + +// Run executes all handlers in the list with a given request object. +func (l *HandlerList) Run(r *Request) { + for i, h := range l.list { + h.Fn(r) + item := HandlerListRunItem{ + Index: i, Handler: h, Request: r, + } + if l.AfterEachFn != nil && !l.AfterEachFn(item) { + return + } + } +} + +// HandlerListLogItem logs the request handler and the state of the +// request's Error value. Always returns true to continue iterating +// request handlers in a HandlerList. +func HandlerListLogItem(item HandlerListRunItem) bool { + if item.Request.Config.Logger == nil { + return true + } + item.Request.Config.Logger.Log("DEBUG: RequestHandler", + item.Index, item.Handler.Name, item.Request.Error) + + return true +} + +// HandlerListStopOnError returns false to stop the HandlerList iterating +// over request handlers if Request.Error is not nil. True otherwise +// to continue iterating. +func HandlerListStopOnError(item HandlerListRunItem) bool { + return item.Request.Error == nil +} + +// MakeAddToUserAgentHandler will add the name/version pair to the User-Agent request +// header. If the extra parameters are provided they will be added as metadata to the +// name/version pair resulting in the following format. +// "name/version (extra0; extra1; ...)" +// The user agent part will be concatenated with this current request's user agent string. +func MakeAddToUserAgentHandler(name, version string, extra ...string) func(*Request) { + ua := fmt.Sprintf("%s/%s", name, version) + if len(extra) > 0 { + ua += fmt.Sprintf(" (%s)", strings.Join(extra, "; ")) + } + return func(r *Request) { + AddToUserAgent(r, ua) + } +} + +// MakeAddToUserAgentFreeFormHandler adds the input to the User-Agent request header. +// The input string will be concatenated with the current request's user agent string. +func MakeAddToUserAgentFreeFormHandler(s string) func(*Request) { + return func(r *Request) { + AddToUserAgent(r, s) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/handlers_test.go b/vendor/github.com/aws/aws-sdk-go/aws/request/handlers_test.go new file mode 100644 index 000000000..f2062c3e0 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/handlers_test.go @@ -0,0 +1,87 @@ +package request_test + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/request" +) + +func TestHandlerList(t *testing.T) { + s := "" + r := &request.Request{} + l := request.HandlerList{} + l.PushBack(func(r *request.Request) { + s += "a" + r.Data = s + }) + l.Run(r) + assert.Equal(t, "a", s) + assert.Equal(t, "a", r.Data) +} + +func TestMultipleHandlers(t *testing.T) { + r := &request.Request{} + l := request.HandlerList{} + l.PushBack(func(r *request.Request) { r.Data = nil }) + l.PushFront(func(r *request.Request) { r.Data = aws.Bool(true) }) + l.Run(r) + if r.Data != nil { + t.Error("Expected handler to execute") + } +} + +func TestNamedHandlers(t *testing.T) { + l := request.HandlerList{} + named := request.NamedHandler{Name: "Name", Fn: func(r *request.Request) {}} + named2 := request.NamedHandler{Name: "NotName", Fn: func(r *request.Request) {}} + l.PushBackNamed(named) + l.PushBackNamed(named) + l.PushBackNamed(named2) + l.PushBack(func(r *request.Request) {}) + assert.Equal(t, 4, l.Len()) + l.Remove(named) + assert.Equal(t, 2, l.Len()) +} + +func TestLoggedHandlers(t *testing.T) { + expectedHandlers := []string{"name1", "name2"} + l := request.HandlerList{} + loggedHandlers := []string{} + l.AfterEachFn = request.HandlerListLogItem + cfg := aws.Config{Logger: aws.LoggerFunc(func(args ...interface{}) { + loggedHandlers = append(loggedHandlers, args[2].(string)) + })} + + named1 := request.NamedHandler{Name: "name1", Fn: func(r *request.Request) {}} + named2 := request.NamedHandler{Name: "name2", Fn: func(r *request.Request) {}} + l.PushBackNamed(named1) + l.PushBackNamed(named2) + l.Run(&request.Request{Config: cfg}) + + assert.Equal(t, expectedHandlers, loggedHandlers) +} + +func TestStopHandlers(t *testing.T) { + l := request.HandlerList{} + stopAt := 1 + l.AfterEachFn = func(item request.HandlerListRunItem) bool { + return item.Index != stopAt + } + + called := 0 + l.PushBackNamed(request.NamedHandler{Name: "name1", Fn: func(r *request.Request) { + called++ + }}) + l.PushBackNamed(request.NamedHandler{Name: "name2", Fn: func(r *request.Request) { + called++ + }}) + l.PushBackNamed(request.NamedHandler{Name: "name3", Fn: func(r *request.Request) { + assert.Fail(t, "thrid handler should not be called") + }}) + l.Run(&request.Request{}) + + assert.Equal(t, 2, called, "Expect only two handlers to be called") +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/http_request.go b/vendor/github.com/aws/aws-sdk-go/aws/request/http_request.go new file mode 100644 index 000000000..a4087f20e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/http_request.go @@ -0,0 +1,33 @@ +// +build go1.5 + +package request + +import ( + "io" + "net/http" + "net/url" +) + +func copyHTTPRequest(r *http.Request, body io.ReadCloser) *http.Request { + req := &http.Request{ + URL: &url.URL{}, + Header: http.Header{}, + Close: r.Close, + Body: body, + Host: r.Host, + Method: r.Method, + Proto: r.Proto, + ContentLength: r.ContentLength, + // Cancel will be deprecated in 1.7 and will be replaced with Context + Cancel: r.Cancel, + } + + *req.URL = *r.URL + for k, v := range r.Header { + for _, vv := range v { + req.Header.Add(k, vv) + } + } + + return req +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/http_request_1_4.go b/vendor/github.com/aws/aws-sdk-go/aws/request/http_request_1_4.go new file mode 100644 index 000000000..75da021ef --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/http_request_1_4.go @@ -0,0 +1,31 @@ +// +build !go1.5 + +package request + +import ( + "io" + "net/http" + "net/url" +) + +func copyHTTPRequest(r *http.Request, body io.ReadCloser) *http.Request { + req := &http.Request{ + URL: &url.URL{}, + Header: http.Header{}, + Close: r.Close, + Body: body, + Host: r.Host, + Method: r.Method, + Proto: r.Proto, + ContentLength: r.ContentLength, + } + + *req.URL = *r.URL + for k, v := range r.Header { + for _, vv := range v { + req.Header.Add(k, vv) + } + } + + return req +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/http_request_copy_test.go b/vendor/github.com/aws/aws-sdk-go/aws/request/http_request_copy_test.go new file mode 100644 index 000000000..4a4f8550b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/http_request_copy_test.go @@ -0,0 +1,34 @@ +package request + +import ( + "bytes" + "io/ioutil" + "net/http" + "net/url" + "sync" + "testing" +) + +func TestRequestCopyRace(t *testing.T) { + origReq := &http.Request{URL: &url.URL{}, Header: http.Header{}} + origReq.Header.Set("Header", "OrigValue") + + var wg sync.WaitGroup + for i := 0; i < 100; i++ { + wg.Add(1) + go func() { + req := copyHTTPRequest(origReq, ioutil.NopCloser(&bytes.Buffer{})) + req.Header.Set("Header", "Value") + go func() { + req2 := copyHTTPRequest(req, ioutil.NopCloser(&bytes.Buffer{})) + req2.Header.Add("Header", "Value2") + }() + _ = req.Header.Get("Header") + wg.Done() + }() + _ = origReq.Header.Get("Header") + } + origReq.Header.Get("Header") + + wg.Wait() +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/http_request_retry_test.go b/vendor/github.com/aws/aws-sdk-go/aws/request/http_request_retry_test.go new file mode 100644 index 000000000..7e5e68c76 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/http_request_retry_test.go @@ -0,0 +1,37 @@ +// +build go1.5 + +package request_test + +import ( + "errors" + "strings" + "testing" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/awstesting" + "github.com/stretchr/testify/assert" +) + +func TestRequestCancelRetry(t *testing.T) { + c := make(chan struct{}) + + reqNum := 0 + s := awstesting.NewMockClient(aws.NewConfig().WithMaxRetries(10)) + s.Handlers.Validate.Clear() + s.Handlers.Unmarshal.Clear() + s.Handlers.UnmarshalMeta.Clear() + s.Handlers.UnmarshalError.Clear() + s.Handlers.Send.PushFront(func(r *request.Request) { + reqNum++ + r.Error = errors.New("net/http: canceled") + }) + out := &testData{} + r := s.NewRequest(&request.Operation{Name: "Operation"}, nil, out) + r.HTTPRequest.Cancel = c + close(c) + + err := r.Send() + assert.True(t, strings.Contains(err.Error(), "canceled")) + assert.Equal(t, 1, reqNum) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go b/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go new file mode 100644 index 000000000..da6396d2d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go @@ -0,0 +1,49 @@ +package request + +import ( + "io" + "sync" +) + +// offsetReader is a thread-safe io.ReadCloser to prevent racing +// with retrying requests +type offsetReader struct { + buf io.ReadSeeker + lock sync.RWMutex + closed bool +} + +func newOffsetReader(buf io.ReadSeeker, offset int64) *offsetReader { + reader := &offsetReader{} + buf.Seek(offset, 0) + + reader.buf = buf + return reader +} + +// Close is a thread-safe close. Uses the write lock. +func (o *offsetReader) Close() error { + o.lock.Lock() + defer o.lock.Unlock() + o.closed = true + return nil +} + +// Read is a thread-safe read using a read lock. +func (o *offsetReader) Read(p []byte) (int, error) { + o.lock.RLock() + defer o.lock.RUnlock() + + if o.closed { + return 0, io.EOF + } + + return o.buf.Read(p) +} + +// CloseAndCopy will return a new offsetReader with a copy of the old buffer +// and close the old buffer. +func (o *offsetReader) CloseAndCopy(offset int64) *offsetReader { + o.Close() + return newOffsetReader(o.buf, offset) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader_test.go b/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader_test.go new file mode 100644 index 000000000..8472258c2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader_test.go @@ -0,0 +1,122 @@ +package request + +import ( + "bytes" + "io" + "math/rand" + "sync" + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +func TestOffsetReaderRead(t *testing.T) { + buf := []byte("testData") + reader := &offsetReader{buf: bytes.NewReader(buf)} + + tempBuf := make([]byte, len(buf)) + + n, err := reader.Read(tempBuf) + + assert.Equal(t, n, len(buf)) + assert.Nil(t, err) + assert.Equal(t, buf, tempBuf) +} + +func TestOffsetReaderClose(t *testing.T) { + buf := []byte("testData") + reader := &offsetReader{buf: bytes.NewReader(buf)} + + err := reader.Close() + assert.Nil(t, err) + + tempBuf := make([]byte, len(buf)) + n, err := reader.Read(tempBuf) + assert.Equal(t, n, 0) + assert.Equal(t, err, io.EOF) +} + +func TestOffsetReaderCloseAndCopy(t *testing.T) { + buf := []byte("testData") + tempBuf := make([]byte, len(buf)) + reader := &offsetReader{buf: bytes.NewReader(buf)} + + newReader := reader.CloseAndCopy(0) + + n, err := reader.Read(tempBuf) + assert.Equal(t, n, 0) + assert.Equal(t, err, io.EOF) + + n, err = newReader.Read(tempBuf) + assert.Equal(t, n, len(buf)) + assert.Nil(t, err) + assert.Equal(t, buf, tempBuf) +} + +func TestOffsetReaderCloseAndCopyOffset(t *testing.T) { + buf := []byte("testData") + tempBuf := make([]byte, len(buf)) + reader := &offsetReader{buf: bytes.NewReader(buf)} + + newReader := reader.CloseAndCopy(4) + n, err := newReader.Read(tempBuf) + assert.Equal(t, n, len(buf)-4) + assert.Nil(t, err) + + expected := []byte{'D', 'a', 't', 'a', 0, 0, 0, 0} + assert.Equal(t, expected, tempBuf) +} + +func TestOffsetReaderRace(t *testing.T) { + wg := sync.WaitGroup{} + + f := func(reader *offsetReader) { + defer wg.Done() + var err error + buf := make([]byte, 1) + _, err = reader.Read(buf) + for err != io.EOF { + _, err = reader.Read(buf) + } + + } + + closeFn := func(reader *offsetReader) { + defer wg.Done() + time.Sleep(time.Duration(rand.Intn(20)+1) * time.Millisecond) + reader.Close() + } + for i := 0; i < 50; i++ { + reader := &offsetReader{buf: bytes.NewReader(make([]byte, 1024*1024))} + wg.Add(1) + go f(reader) + wg.Add(1) + go closeFn(reader) + } + wg.Wait() +} + +func BenchmarkOffsetReader(b *testing.B) { + bufSize := 1024 * 1024 * 100 + buf := make([]byte, bufSize) + reader := &offsetReader{buf: bytes.NewReader(buf)} + + tempBuf := make([]byte, 1024) + + for i := 0; i < b.N; i++ { + reader.Read(tempBuf) + } +} + +func BenchmarkBytesReader(b *testing.B) { + bufSize := 1024 * 1024 * 100 + buf := make([]byte, bufSize) + reader := bytes.NewReader(buf) + + tempBuf := make([]byte, 1024) + + for i := 0; i < b.N; i++ { + reader.Read(tempBuf) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request.go new file mode 100644 index 000000000..d04e95bd1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request.go @@ -0,0 +1,330 @@ +package request + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "reflect" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/client/metadata" +) + +// A Request is the service request to be made. +type Request struct { + Config aws.Config + ClientInfo metadata.ClientInfo + Handlers Handlers + + Retryer + Time time.Time + ExpireTime time.Duration + Operation *Operation + HTTPRequest *http.Request + HTTPResponse *http.Response + Body io.ReadSeeker + BodyStart int64 // offset from beginning of Body that the request body starts + Params interface{} + Error error + Data interface{} + RequestID string + RetryCount int + Retryable *bool + RetryDelay time.Duration + NotHoist bool + SignedHeaderVals http.Header + LastSignedAt time.Time + + built bool +} + +// An Operation is the service API operation to be made. +type Operation struct { + Name string + HTTPMethod string + HTTPPath string + *Paginator +} + +// Paginator keeps track of pagination configuration for an API operation. +type Paginator struct { + InputTokens []string + OutputTokens []string + LimitToken string + TruncationToken string +} + +// New returns a new Request pointer for the service API +// operation and parameters. +// +// Params is any value of input parameters to be the request payload. +// Data is pointer value to an object which the request's response +// payload will be deserialized to. +func New(cfg aws.Config, clientInfo metadata.ClientInfo, handlers Handlers, + retryer Retryer, operation *Operation, params interface{}, data interface{}) *Request { + + method := operation.HTTPMethod + if method == "" { + method = "POST" + } + p := operation.HTTPPath + if p == "" { + p = "/" + } + + httpReq, _ := http.NewRequest(method, "", nil) + + var err error + httpReq.URL, err = url.Parse(clientInfo.Endpoint + p) + if err != nil { + httpReq.URL = &url.URL{} + err = awserr.New("InvalidEndpointURL", "invalid endpoint uri", err) + } + + r := &Request{ + Config: cfg, + ClientInfo: clientInfo, + Handlers: handlers.Copy(), + + Retryer: retryer, + Time: time.Now(), + ExpireTime: 0, + Operation: operation, + HTTPRequest: httpReq, + Body: nil, + Params: params, + Error: err, + Data: data, + } + r.SetBufferBody([]byte{}) + + return r +} + +// WillRetry returns if the request's can be retried. +func (r *Request) WillRetry() bool { + return r.Error != nil && aws.BoolValue(r.Retryable) && r.RetryCount < r.MaxRetries() +} + +// ParamsFilled returns if the request's parameters have been populated +// and the parameters are valid. False is returned if no parameters are +// provided or invalid. +func (r *Request) ParamsFilled() bool { + return r.Params != nil && reflect.ValueOf(r.Params).Elem().IsValid() +} + +// DataFilled returns true if the request's data for response deserialization +// target has been set and is a valid. False is returned if data is not +// set, or is invalid. +func (r *Request) DataFilled() bool { + return r.Data != nil && reflect.ValueOf(r.Data).Elem().IsValid() +} + +// SetBufferBody will set the request's body bytes that will be sent to +// the service API. +func (r *Request) SetBufferBody(buf []byte) { + r.SetReaderBody(bytes.NewReader(buf)) +} + +// SetStringBody sets the body of the request to be backed by a string. +func (r *Request) SetStringBody(s string) { + r.SetReaderBody(strings.NewReader(s)) +} + +// SetReaderBody will set the request's body reader. +func (r *Request) SetReaderBody(reader io.ReadSeeker) { + r.HTTPRequest.Body = newOffsetReader(reader, 0) + r.Body = reader +} + +// Presign returns the request's signed URL. Error will be returned +// if the signing fails. +func (r *Request) Presign(expireTime time.Duration) (string, error) { + r.ExpireTime = expireTime + r.NotHoist = false + r.Sign() + if r.Error != nil { + return "", r.Error + } + return r.HTTPRequest.URL.String(), nil +} + +// PresignRequest behaves just like presign, but hoists all headers and signs them. +// Also returns the signed hash back to the user +func (r *Request) PresignRequest(expireTime time.Duration) (string, http.Header, error) { + r.ExpireTime = expireTime + r.NotHoist = true + r.Sign() + if r.Error != nil { + return "", nil, r.Error + } + return r.HTTPRequest.URL.String(), r.SignedHeaderVals, nil +} + +func debugLogReqError(r *Request, stage string, retrying bool, err error) { + if !r.Config.LogLevel.Matches(aws.LogDebugWithRequestErrors) { + return + } + + retryStr := "not retrying" + if retrying { + retryStr = "will retry" + } + + r.Config.Logger.Log(fmt.Sprintf("DEBUG: %s %s/%s failed, %s, error %v", + stage, r.ClientInfo.ServiceName, r.Operation.Name, retryStr, err)) +} + +// Build will build the request's object so it can be signed and sent +// to the service. Build will also validate all the request's parameters. +// Anny additional build Handlers set on this request will be run +// in the order they were set. +// +// The request will only be built once. Multiple calls to build will have +// no effect. +// +// If any Validate or Build errors occur the build will stop and the error +// which occurred will be returned. +func (r *Request) Build() error { + if !r.built { + r.Handlers.Validate.Run(r) + if r.Error != nil { + debugLogReqError(r, "Validate Request", false, r.Error) + return r.Error + } + r.Handlers.Build.Run(r) + if r.Error != nil { + debugLogReqError(r, "Build Request", false, r.Error) + return r.Error + } + r.built = true + } + + return r.Error +} + +// Sign will sign the request returning error if errors are encountered. +// +// Send will build the request prior to signing. All Sign Handlers will +// be executed in the order they were set. +func (r *Request) Sign() error { + r.Build() + if r.Error != nil { + debugLogReqError(r, "Build Request", false, r.Error) + return r.Error + } + + r.Handlers.Sign.Run(r) + return r.Error +} + +// Send will send the request returning error if errors are encountered. +// +// Send will sign the request prior to sending. All Send Handlers will +// be executed in the order they were set. +// +// Canceling a request is non-deterministic. If a request has been canceled, +// then the transport will choose, randomly, one of the state channels during +// reads or getting the connection. +// +// readLoop() and getConn(req *Request, cm connectMethod) +// https://github.com/golang/go/blob/master/src/net/http/transport.go +func (r *Request) Send() error { + for { + if aws.BoolValue(r.Retryable) { + if r.Config.LogLevel.Matches(aws.LogDebugWithRequestRetries) { + r.Config.Logger.Log(fmt.Sprintf("DEBUG: Retrying Request %s/%s, attempt %d", + r.ClientInfo.ServiceName, r.Operation.Name, r.RetryCount)) + } + + var body io.ReadCloser + if reader, ok := r.HTTPRequest.Body.(*offsetReader); ok { + body = reader.CloseAndCopy(r.BodyStart) + } else { + if r.Config.Logger != nil { + r.Config.Logger.Log("Request body type has been overwritten. May cause race conditions") + } + r.Body.Seek(r.BodyStart, 0) + body = ioutil.NopCloser(r.Body) + } + + r.HTTPRequest = copyHTTPRequest(r.HTTPRequest, body) + if r.HTTPResponse != nil && r.HTTPResponse.Body != nil { + // Closing response body. Since we are setting a new request to send off, this + // response will get squashed and leaked. + r.HTTPResponse.Body.Close() + } + } + + r.Sign() + if r.Error != nil { + return r.Error + } + + r.Retryable = nil + + r.Handlers.Send.Run(r) + if r.Error != nil { + if strings.Contains(r.Error.Error(), "net/http: request canceled") { + return r.Error + } + + err := r.Error + r.Handlers.Retry.Run(r) + r.Handlers.AfterRetry.Run(r) + if r.Error != nil { + debugLogReqError(r, "Send Request", false, r.Error) + return r.Error + } + debugLogReqError(r, "Send Request", true, err) + continue + } + + r.Handlers.UnmarshalMeta.Run(r) + r.Handlers.ValidateResponse.Run(r) + if r.Error != nil { + err := r.Error + r.Handlers.UnmarshalError.Run(r) + r.Handlers.Retry.Run(r) + r.Handlers.AfterRetry.Run(r) + if r.Error != nil { + debugLogReqError(r, "Validate Response", false, r.Error) + return r.Error + } + debugLogReqError(r, "Validate Response", true, err) + continue + } + + r.Handlers.Unmarshal.Run(r) + if r.Error != nil { + err := r.Error + r.Handlers.Retry.Run(r) + r.Handlers.AfterRetry.Run(r) + if r.Error != nil { + debugLogReqError(r, "Unmarshal Response", false, r.Error) + return r.Error + } + debugLogReqError(r, "Unmarshal Response", true, err) + continue + } + + break + } + + return nil +} + +// AddToUserAgent adds the string to the end of the request's current user agent. +func AddToUserAgent(r *Request, s string) { + curUA := r.HTTPRequest.Header.Get("User-Agent") + if len(curUA) > 0 { + s = curUA + " " + s + } + r.HTTPRequest.Header.Set("User-Agent", s) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_6_test.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_6_test.go new file mode 100644 index 000000000..f72f6d457 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_6_test.go @@ -0,0 +1,33 @@ +// +build go1.6 + +package request_test + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/defaults" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/endpoints" +) + +// go version 1.4 and 1.5 do not return an error. Version 1.5 will url encode +// the uri while 1.4 will not +func TestRequestInvalidEndpoint(t *testing.T) { + endpoint, _ := endpoints.NormalizeEndpoint("localhost:80 ", "test-service", "test-region", false) + r := request.New( + aws.Config{}, + metadata.ClientInfo{Endpoint: endpoint}, + defaults.Handlers(), + client.DefaultRetryer{}, + &request.Operation{}, + nil, + nil, + ) + + assert.Error(t, r.Error) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go new file mode 100644 index 000000000..2939ec473 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go @@ -0,0 +1,104 @@ +package request + +import ( + "reflect" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awsutil" +) + +//type Paginater interface { +// HasNextPage() bool +// NextPage() *Request +// EachPage(fn func(data interface{}, isLastPage bool) (shouldContinue bool)) error +//} + +// HasNextPage returns true if this request has more pages of data available. +func (r *Request) HasNextPage() bool { + return len(r.nextPageTokens()) > 0 +} + +// nextPageTokens returns the tokens to use when asking for the next page of +// data. +func (r *Request) nextPageTokens() []interface{} { + if r.Operation.Paginator == nil { + return nil + } + + if r.Operation.TruncationToken != "" { + tr, _ := awsutil.ValuesAtPath(r.Data, r.Operation.TruncationToken) + if len(tr) == 0 { + return nil + } + + switch v := tr[0].(type) { + case *bool: + if !aws.BoolValue(v) { + return nil + } + case bool: + if v == false { + return nil + } + } + } + + tokens := []interface{}{} + tokenAdded := false + for _, outToken := range r.Operation.OutputTokens { + v, _ := awsutil.ValuesAtPath(r.Data, outToken) + if len(v) > 0 { + tokens = append(tokens, v[0]) + tokenAdded = true + } else { + tokens = append(tokens, nil) + } + } + if !tokenAdded { + return nil + } + + return tokens +} + +// NextPage returns a new Request that can be executed to return the next +// page of result data. Call .Send() on this request to execute it. +func (r *Request) NextPage() *Request { + tokens := r.nextPageTokens() + if len(tokens) == 0 { + return nil + } + + data := reflect.New(reflect.TypeOf(r.Data).Elem()).Interface() + nr := New(r.Config, r.ClientInfo, r.Handlers, r.Retryer, r.Operation, awsutil.CopyOf(r.Params), data) + for i, intok := range nr.Operation.InputTokens { + awsutil.SetValueAtPath(nr.Params, intok, tokens[i]) + } + return nr +} + +// EachPage iterates over each page of a paginated request object. The fn +// parameter should be a function with the following sample signature: +// +// func(page *T, lastPage bool) bool { +// return true // return false to stop iterating +// } +// +// Where "T" is the structure type matching the output structure of the given +// operation. For example, a request object generated by +// DynamoDB.ListTablesRequest() would expect to see dynamodb.ListTablesOutput +// as the structure "T". The lastPage value represents whether the page is +// the last page of data or not. The return value of this function should +// return true to keep iterating or false to stop. +func (r *Request) EachPage(fn func(data interface{}, isLastPage bool) (shouldContinue bool)) error { + for page := r; page != nil; page = page.NextPage() { + if err := page.Send(); err != nil { + return err + } + if getNextPage := fn(page.Data, !page.HasNextPage()); !getNextPage { + return page.Error + } + } + + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination_test.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination_test.go new file mode 100644 index 000000000..725ea25cb --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination_test.go @@ -0,0 +1,455 @@ +package request_test + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/awstesting/unit" + "github.com/aws/aws-sdk-go/service/dynamodb" + "github.com/aws/aws-sdk-go/service/route53" + "github.com/aws/aws-sdk-go/service/s3" +) + +// Use DynamoDB methods for simplicity +func TestPaginationQueryPage(t *testing.T) { + db := dynamodb.New(unit.Session) + tokens, pages, numPages, gotToEnd := []map[string]*dynamodb.AttributeValue{}, []map[string]*dynamodb.AttributeValue{}, 0, false + + reqNum := 0 + resps := []*dynamodb.QueryOutput{ + { + LastEvaluatedKey: map[string]*dynamodb.AttributeValue{"key": {S: aws.String("key1")}}, + Count: aws.Int64(1), + Items: []map[string]*dynamodb.AttributeValue{ + { + "key": {S: aws.String("key1")}, + }, + }, + }, + { + LastEvaluatedKey: map[string]*dynamodb.AttributeValue{"key": {S: aws.String("key2")}}, + Count: aws.Int64(1), + Items: []map[string]*dynamodb.AttributeValue{ + { + "key": {S: aws.String("key2")}, + }, + }, + }, + { + LastEvaluatedKey: map[string]*dynamodb.AttributeValue{}, + Count: aws.Int64(1), + Items: []map[string]*dynamodb.AttributeValue{ + { + "key": {S: aws.String("key3")}, + }, + }, + }, + } + + db.Handlers.Send.Clear() // mock sending + db.Handlers.Unmarshal.Clear() + db.Handlers.UnmarshalMeta.Clear() + db.Handlers.ValidateResponse.Clear() + db.Handlers.Build.PushBack(func(r *request.Request) { + in := r.Params.(*dynamodb.QueryInput) + if in == nil { + tokens = append(tokens, nil) + } else if len(in.ExclusiveStartKey) != 0 { + tokens = append(tokens, in.ExclusiveStartKey) + } + }) + db.Handlers.Unmarshal.PushBack(func(r *request.Request) { + r.Data = resps[reqNum] + reqNum++ + }) + + params := &dynamodb.QueryInput{ + Limit: aws.Int64(2), + TableName: aws.String("tablename"), + } + err := db.QueryPages(params, func(p *dynamodb.QueryOutput, last bool) bool { + numPages++ + for _, item := range p.Items { + pages = append(pages, item) + } + if last { + if gotToEnd { + assert.Fail(t, "last=true happened twice") + } + gotToEnd = true + } + return true + }) + assert.Nil(t, err) + + assert.Equal(t, + []map[string]*dynamodb.AttributeValue{ + {"key": {S: aws.String("key1")}}, + {"key": {S: aws.String("key2")}}, + }, tokens) + assert.Equal(t, + []map[string]*dynamodb.AttributeValue{ + {"key": {S: aws.String("key1")}}, + {"key": {S: aws.String("key2")}}, + {"key": {S: aws.String("key3")}}, + }, pages) + assert.Equal(t, 3, numPages) + assert.True(t, gotToEnd) + assert.Nil(t, params.ExclusiveStartKey) +} + +// Use DynamoDB methods for simplicity +func TestPagination(t *testing.T) { + db := dynamodb.New(unit.Session) + tokens, pages, numPages, gotToEnd := []string{}, []string{}, 0, false + + reqNum := 0 + resps := []*dynamodb.ListTablesOutput{ + {TableNames: []*string{aws.String("Table1"), aws.String("Table2")}, LastEvaluatedTableName: aws.String("Table2")}, + {TableNames: []*string{aws.String("Table3"), aws.String("Table4")}, LastEvaluatedTableName: aws.String("Table4")}, + {TableNames: []*string{aws.String("Table5")}}, + } + + db.Handlers.Send.Clear() // mock sending + db.Handlers.Unmarshal.Clear() + db.Handlers.UnmarshalMeta.Clear() + db.Handlers.ValidateResponse.Clear() + db.Handlers.Build.PushBack(func(r *request.Request) { + in := r.Params.(*dynamodb.ListTablesInput) + if in == nil { + tokens = append(tokens, "") + } else if in.ExclusiveStartTableName != nil { + tokens = append(tokens, *in.ExclusiveStartTableName) + } + }) + db.Handlers.Unmarshal.PushBack(func(r *request.Request) { + r.Data = resps[reqNum] + reqNum++ + }) + + params := &dynamodb.ListTablesInput{Limit: aws.Int64(2)} + err := db.ListTablesPages(params, func(p *dynamodb.ListTablesOutput, last bool) bool { + numPages++ + for _, t := range p.TableNames { + pages = append(pages, *t) + } + if last { + if gotToEnd { + assert.Fail(t, "last=true happened twice") + } + gotToEnd = true + } + return true + }) + + assert.Equal(t, []string{"Table2", "Table4"}, tokens) + assert.Equal(t, []string{"Table1", "Table2", "Table3", "Table4", "Table5"}, pages) + assert.Equal(t, 3, numPages) + assert.True(t, gotToEnd) + assert.Nil(t, err) + assert.Nil(t, params.ExclusiveStartTableName) +} + +// Use DynamoDB methods for simplicity +func TestPaginationEachPage(t *testing.T) { + db := dynamodb.New(unit.Session) + tokens, pages, numPages, gotToEnd := []string{}, []string{}, 0, false + + reqNum := 0 + resps := []*dynamodb.ListTablesOutput{ + {TableNames: []*string{aws.String("Table1"), aws.String("Table2")}, LastEvaluatedTableName: aws.String("Table2")}, + {TableNames: []*string{aws.String("Table3"), aws.String("Table4")}, LastEvaluatedTableName: aws.String("Table4")}, + {TableNames: []*string{aws.String("Table5")}}, + } + + db.Handlers.Send.Clear() // mock sending + db.Handlers.Unmarshal.Clear() + db.Handlers.UnmarshalMeta.Clear() + db.Handlers.ValidateResponse.Clear() + db.Handlers.Build.PushBack(func(r *request.Request) { + in := r.Params.(*dynamodb.ListTablesInput) + if in == nil { + tokens = append(tokens, "") + } else if in.ExclusiveStartTableName != nil { + tokens = append(tokens, *in.ExclusiveStartTableName) + } + }) + db.Handlers.Unmarshal.PushBack(func(r *request.Request) { + r.Data = resps[reqNum] + reqNum++ + }) + + params := &dynamodb.ListTablesInput{Limit: aws.Int64(2)} + req, _ := db.ListTablesRequest(params) + err := req.EachPage(func(p interface{}, last bool) bool { + numPages++ + for _, t := range p.(*dynamodb.ListTablesOutput).TableNames { + pages = append(pages, *t) + } + if last { + if gotToEnd { + assert.Fail(t, "last=true happened twice") + } + gotToEnd = true + } + + return true + }) + + assert.Equal(t, []string{"Table2", "Table4"}, tokens) + assert.Equal(t, []string{"Table1", "Table2", "Table3", "Table4", "Table5"}, pages) + assert.Equal(t, 3, numPages) + assert.True(t, gotToEnd) + assert.Nil(t, err) +} + +// Use DynamoDB methods for simplicity +func TestPaginationEarlyExit(t *testing.T) { + db := dynamodb.New(unit.Session) + numPages, gotToEnd := 0, false + + reqNum := 0 + resps := []*dynamodb.ListTablesOutput{ + {TableNames: []*string{aws.String("Table1"), aws.String("Table2")}, LastEvaluatedTableName: aws.String("Table2")}, + {TableNames: []*string{aws.String("Table3"), aws.String("Table4")}, LastEvaluatedTableName: aws.String("Table4")}, + {TableNames: []*string{aws.String("Table5")}}, + } + + db.Handlers.Send.Clear() // mock sending + db.Handlers.Unmarshal.Clear() + db.Handlers.UnmarshalMeta.Clear() + db.Handlers.ValidateResponse.Clear() + db.Handlers.Unmarshal.PushBack(func(r *request.Request) { + r.Data = resps[reqNum] + reqNum++ + }) + + params := &dynamodb.ListTablesInput{Limit: aws.Int64(2)} + err := db.ListTablesPages(params, func(p *dynamodb.ListTablesOutput, last bool) bool { + numPages++ + if numPages == 2 { + return false + } + if last { + if gotToEnd { + assert.Fail(t, "last=true happened twice") + } + gotToEnd = true + } + return true + }) + + assert.Equal(t, 2, numPages) + assert.False(t, gotToEnd) + assert.Nil(t, err) +} + +func TestSkipPagination(t *testing.T) { + client := s3.New(unit.Session) + client.Handlers.Send.Clear() // mock sending + client.Handlers.Unmarshal.Clear() + client.Handlers.UnmarshalMeta.Clear() + client.Handlers.ValidateResponse.Clear() + client.Handlers.Unmarshal.PushBack(func(r *request.Request) { + r.Data = &s3.HeadBucketOutput{} + }) + + req, _ := client.HeadBucketRequest(&s3.HeadBucketInput{Bucket: aws.String("bucket")}) + + numPages, gotToEnd := 0, false + req.EachPage(func(p interface{}, last bool) bool { + numPages++ + if last { + gotToEnd = true + } + return true + }) + assert.Equal(t, 1, numPages) + assert.True(t, gotToEnd) +} + +// Use S3 for simplicity +func TestPaginationTruncation(t *testing.T) { + client := s3.New(unit.Session) + + reqNum := 0 + resps := []*s3.ListObjectsOutput{ + {IsTruncated: aws.Bool(true), Contents: []*s3.Object{{Key: aws.String("Key1")}}}, + {IsTruncated: aws.Bool(true), Contents: []*s3.Object{{Key: aws.String("Key2")}}}, + {IsTruncated: aws.Bool(false), Contents: []*s3.Object{{Key: aws.String("Key3")}}}, + {IsTruncated: aws.Bool(true), Contents: []*s3.Object{{Key: aws.String("Key4")}}}, + } + + client.Handlers.Send.Clear() // mock sending + client.Handlers.Unmarshal.Clear() + client.Handlers.UnmarshalMeta.Clear() + client.Handlers.ValidateResponse.Clear() + client.Handlers.Unmarshal.PushBack(func(r *request.Request) { + r.Data = resps[reqNum] + reqNum++ + }) + + params := &s3.ListObjectsInput{Bucket: aws.String("bucket")} + + results := []string{} + err := client.ListObjectsPages(params, func(p *s3.ListObjectsOutput, last bool) bool { + results = append(results, *p.Contents[0].Key) + return true + }) + + assert.Equal(t, []string{"Key1", "Key2", "Key3"}, results) + assert.Nil(t, err) + + // Try again without truncation token at all + reqNum = 0 + resps[1].IsTruncated = nil + resps[2].IsTruncated = aws.Bool(true) + results = []string{} + err = client.ListObjectsPages(params, func(p *s3.ListObjectsOutput, last bool) bool { + results = append(results, *p.Contents[0].Key) + return true + }) + + assert.Equal(t, []string{"Key1", "Key2"}, results) + assert.Nil(t, err) +} + +func TestPaginationNilToken(t *testing.T) { + client := route53.New(unit.Session) + + reqNum := 0 + resps := []*route53.ListResourceRecordSetsOutput{ + { + ResourceRecordSets: []*route53.ResourceRecordSet{ + {Name: aws.String("first.example.com.")}, + }, + IsTruncated: aws.Bool(true), + NextRecordName: aws.String("second.example.com."), + NextRecordType: aws.String("MX"), + NextRecordIdentifier: aws.String("second"), + MaxItems: aws.String("1"), + }, + { + ResourceRecordSets: []*route53.ResourceRecordSet{ + {Name: aws.String("second.example.com.")}, + }, + IsTruncated: aws.Bool(true), + NextRecordName: aws.String("third.example.com."), + NextRecordType: aws.String("MX"), + MaxItems: aws.String("1"), + }, + { + ResourceRecordSets: []*route53.ResourceRecordSet{ + {Name: aws.String("third.example.com.")}, + }, + IsTruncated: aws.Bool(false), + MaxItems: aws.String("1"), + }, + } + client.Handlers.Send.Clear() // mock sending + client.Handlers.Unmarshal.Clear() + client.Handlers.UnmarshalMeta.Clear() + client.Handlers.ValidateResponse.Clear() + + idents := []string{} + client.Handlers.Build.PushBack(func(r *request.Request) { + p := r.Params.(*route53.ListResourceRecordSetsInput) + idents = append(idents, aws.StringValue(p.StartRecordIdentifier)) + + }) + client.Handlers.Unmarshal.PushBack(func(r *request.Request) { + r.Data = resps[reqNum] + reqNum++ + }) + + params := &route53.ListResourceRecordSetsInput{ + HostedZoneId: aws.String("id-zone"), + } + + results := []string{} + err := client.ListResourceRecordSetsPages(params, func(p *route53.ListResourceRecordSetsOutput, last bool) bool { + results = append(results, *p.ResourceRecordSets[0].Name) + return true + }) + + assert.NoError(t, err) + assert.Equal(t, []string{"", "second", ""}, idents) + assert.Equal(t, []string{"first.example.com.", "second.example.com.", "third.example.com."}, results) +} + +// Benchmarks +var benchResps = []*dynamodb.ListTablesOutput{ + {TableNames: []*string{aws.String("TABLE"), aws.String("NXT")}, LastEvaluatedTableName: aws.String("NXT")}, + {TableNames: []*string{aws.String("TABLE"), aws.String("NXT")}, LastEvaluatedTableName: aws.String("NXT")}, + {TableNames: []*string{aws.String("TABLE"), aws.String("NXT")}, LastEvaluatedTableName: aws.String("NXT")}, + {TableNames: []*string{aws.String("TABLE"), aws.String("NXT")}, LastEvaluatedTableName: aws.String("NXT")}, + {TableNames: []*string{aws.String("TABLE"), aws.String("NXT")}, LastEvaluatedTableName: aws.String("NXT")}, + {TableNames: []*string{aws.String("TABLE"), aws.String("NXT")}, LastEvaluatedTableName: aws.String("NXT")}, + {TableNames: []*string{aws.String("TABLE"), aws.String("NXT")}, LastEvaluatedTableName: aws.String("NXT")}, + {TableNames: []*string{aws.String("TABLE"), aws.String("NXT")}, LastEvaluatedTableName: aws.String("NXT")}, + {TableNames: []*string{aws.String("TABLE"), aws.String("NXT")}, LastEvaluatedTableName: aws.String("NXT")}, + {TableNames: []*string{aws.String("TABLE"), aws.String("NXT")}, LastEvaluatedTableName: aws.String("NXT")}, + {TableNames: []*string{aws.String("TABLE"), aws.String("NXT")}, LastEvaluatedTableName: aws.String("NXT")}, + {TableNames: []*string{aws.String("TABLE"), aws.String("NXT")}, LastEvaluatedTableName: aws.String("NXT")}, + {TableNames: []*string{aws.String("TABLE"), aws.String("NXT")}, LastEvaluatedTableName: aws.String("NXT")}, + {TableNames: []*string{aws.String("TABLE")}}, +} + +var benchDb = func() *dynamodb.DynamoDB { + db := dynamodb.New(unit.Session) + db.Handlers.Send.Clear() // mock sending + db.Handlers.Unmarshal.Clear() + db.Handlers.UnmarshalMeta.Clear() + db.Handlers.ValidateResponse.Clear() + return db +} + +func BenchmarkCodegenIterator(b *testing.B) { + reqNum := 0 + db := benchDb() + db.Handlers.Unmarshal.PushBack(func(r *request.Request) { + r.Data = benchResps[reqNum] + reqNum++ + }) + + input := &dynamodb.ListTablesInput{Limit: aws.Int64(2)} + iter := func(fn func(*dynamodb.ListTablesOutput, bool) bool) error { + page, _ := db.ListTablesRequest(input) + for ; page != nil; page = page.NextPage() { + page.Send() + out := page.Data.(*dynamodb.ListTablesOutput) + if result := fn(out, !page.HasNextPage()); page.Error != nil || !result { + return page.Error + } + } + return nil + } + + for i := 0; i < b.N; i++ { + reqNum = 0 + iter(func(p *dynamodb.ListTablesOutput, last bool) bool { + return true + }) + } +} + +func BenchmarkEachPageIterator(b *testing.B) { + reqNum := 0 + db := benchDb() + db.Handlers.Unmarshal.PushBack(func(r *request.Request) { + r.Data = benchResps[reqNum] + reqNum++ + }) + + input := &dynamodb.ListTablesInput{Limit: aws.Int64(2)} + for i := 0; i < b.N; i++ { + reqNum = 0 + req, _ := db.ListTablesRequest(input) + req.EachPage(func(p interface{}, last bool) bool { + return true + }) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_test.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_test.go new file mode 100644 index 000000000..16bdd6159 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request_test.go @@ -0,0 +1,380 @@ +package request_test + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "net/http" + "runtime" + "testing" + "time" + + "github.com/stretchr/testify/assert" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/awstesting" +) + +type testData struct { + Data string +} + +func body(str string) io.ReadCloser { + return ioutil.NopCloser(bytes.NewReader([]byte(str))) +} + +func unmarshal(req *request.Request) { + defer req.HTTPResponse.Body.Close() + if req.Data != nil { + json.NewDecoder(req.HTTPResponse.Body).Decode(req.Data) + } + return +} + +func unmarshalError(req *request.Request) { + bodyBytes, err := ioutil.ReadAll(req.HTTPResponse.Body) + if err != nil { + req.Error = awserr.New("UnmarshaleError", req.HTTPResponse.Status, err) + return + } + if len(bodyBytes) == 0 { + req.Error = awserr.NewRequestFailure( + awserr.New("UnmarshaleError", req.HTTPResponse.Status, fmt.Errorf("empty body")), + req.HTTPResponse.StatusCode, + "", + ) + return + } + var jsonErr jsonErrorResponse + if err := json.Unmarshal(bodyBytes, &jsonErr); err != nil { + req.Error = awserr.New("UnmarshaleError", "JSON unmarshal", err) + return + } + req.Error = awserr.NewRequestFailure( + awserr.New(jsonErr.Code, jsonErr.Message, nil), + req.HTTPResponse.StatusCode, + "", + ) +} + +type jsonErrorResponse struct { + Code string `json:"__type"` + Message string `json:"message"` +} + +// test that retries occur for 5xx status codes +func TestRequestRecoverRetry5xx(t *testing.T) { + reqNum := 0 + reqs := []http.Response{ + {StatusCode: 500, Body: body(`{"__type":"UnknownError","message":"An error occurred."}`)}, + {StatusCode: 501, Body: body(`{"__type":"UnknownError","message":"An error occurred."}`)}, + {StatusCode: 200, Body: body(`{"data":"valid"}`)}, + } + + s := awstesting.NewClient(aws.NewConfig().WithMaxRetries(10)) + s.Handlers.Validate.Clear() + s.Handlers.Unmarshal.PushBack(unmarshal) + s.Handlers.UnmarshalError.PushBack(unmarshalError) + s.Handlers.Send.Clear() // mock sending + s.Handlers.Send.PushBack(func(r *request.Request) { + r.HTTPResponse = &reqs[reqNum] + reqNum++ + }) + out := &testData{} + r := s.NewRequest(&request.Operation{Name: "Operation"}, nil, out) + err := r.Send() + assert.Nil(t, err) + assert.Equal(t, 2, int(r.RetryCount)) + assert.Equal(t, "valid", out.Data) +} + +// test that retries occur for 4xx status codes with a response type that can be retried - see `shouldRetry` +func TestRequestRecoverRetry4xxRetryable(t *testing.T) { + reqNum := 0 + reqs := []http.Response{ + {StatusCode: 400, Body: body(`{"__type":"Throttling","message":"Rate exceeded."}`)}, + {StatusCode: 429, Body: body(`{"__type":"ProvisionedThroughputExceededException","message":"Rate exceeded."}`)}, + {StatusCode: 200, Body: body(`{"data":"valid"}`)}, + } + + s := awstesting.NewClient(aws.NewConfig().WithMaxRetries(10)) + s.Handlers.Validate.Clear() + s.Handlers.Unmarshal.PushBack(unmarshal) + s.Handlers.UnmarshalError.PushBack(unmarshalError) + s.Handlers.Send.Clear() // mock sending + s.Handlers.Send.PushBack(func(r *request.Request) { + r.HTTPResponse = &reqs[reqNum] + reqNum++ + }) + out := &testData{} + r := s.NewRequest(&request.Operation{Name: "Operation"}, nil, out) + err := r.Send() + assert.Nil(t, err) + assert.Equal(t, 2, int(r.RetryCount)) + assert.Equal(t, "valid", out.Data) +} + +// test that retries don't occur for 4xx status codes with a response type that can't be retried +func TestRequest4xxUnretryable(t *testing.T) { + s := awstesting.NewClient(aws.NewConfig().WithMaxRetries(10)) + s.Handlers.Validate.Clear() + s.Handlers.Unmarshal.PushBack(unmarshal) + s.Handlers.UnmarshalError.PushBack(unmarshalError) + s.Handlers.Send.Clear() // mock sending + s.Handlers.Send.PushBack(func(r *request.Request) { + r.HTTPResponse = &http.Response{StatusCode: 401, Body: body(`{"__type":"SignatureDoesNotMatch","message":"Signature does not match."}`)} + }) + out := &testData{} + r := s.NewRequest(&request.Operation{Name: "Operation"}, nil, out) + err := r.Send() + assert.NotNil(t, err) + if e, ok := err.(awserr.RequestFailure); ok { + assert.Equal(t, 401, e.StatusCode()) + } else { + assert.Fail(t, "Expected error to be a service failure") + } + assert.Equal(t, "SignatureDoesNotMatch", err.(awserr.Error).Code()) + assert.Equal(t, "Signature does not match.", err.(awserr.Error).Message()) + assert.Equal(t, 0, int(r.RetryCount)) +} + +func TestRequestExhaustRetries(t *testing.T) { + delays := []time.Duration{} + sleepDelay := func(delay time.Duration) { + delays = append(delays, delay) + } + + reqNum := 0 + reqs := []http.Response{ + {StatusCode: 500, Body: body(`{"__type":"UnknownError","message":"An error occurred."}`)}, + {StatusCode: 500, Body: body(`{"__type":"UnknownError","message":"An error occurred."}`)}, + {StatusCode: 500, Body: body(`{"__type":"UnknownError","message":"An error occurred."}`)}, + {StatusCode: 500, Body: body(`{"__type":"UnknownError","message":"An error occurred."}`)}, + } + + s := awstesting.NewClient(aws.NewConfig().WithSleepDelay(sleepDelay)) + s.Handlers.Validate.Clear() + s.Handlers.Unmarshal.PushBack(unmarshal) + s.Handlers.UnmarshalError.PushBack(unmarshalError) + s.Handlers.Send.Clear() // mock sending + s.Handlers.Send.PushBack(func(r *request.Request) { + r.HTTPResponse = &reqs[reqNum] + reqNum++ + }) + r := s.NewRequest(&request.Operation{Name: "Operation"}, nil, nil) + err := r.Send() + assert.NotNil(t, err) + if e, ok := err.(awserr.RequestFailure); ok { + assert.Equal(t, 500, e.StatusCode()) + } else { + assert.Fail(t, "Expected error to be a service failure") + } + assert.Equal(t, "UnknownError", err.(awserr.Error).Code()) + assert.Equal(t, "An error occurred.", err.(awserr.Error).Message()) + assert.Equal(t, 3, int(r.RetryCount)) + + expectDelays := []struct{ min, max time.Duration }{{30, 59}, {60, 118}, {120, 236}} + for i, v := range delays { + min := expectDelays[i].min * time.Millisecond + max := expectDelays[i].max * time.Millisecond + assert.True(t, min <= v && v <= max, + "Expect delay to be within range, i:%d, v:%s, min:%s, max:%s", i, v, min, max) + } +} + +// test that the request is retried after the credentials are expired. +func TestRequestRecoverExpiredCreds(t *testing.T) { + reqNum := 0 + reqs := []http.Response{ + {StatusCode: 400, Body: body(`{"__type":"ExpiredTokenException","message":"expired token"}`)}, + {StatusCode: 200, Body: body(`{"data":"valid"}`)}, + } + + s := awstesting.NewClient(&aws.Config{MaxRetries: aws.Int(10), Credentials: credentials.NewStaticCredentials("AKID", "SECRET", "")}) + s.Handlers.Validate.Clear() + s.Handlers.Unmarshal.PushBack(unmarshal) + s.Handlers.UnmarshalError.PushBack(unmarshalError) + + credExpiredBeforeRetry := false + credExpiredAfterRetry := false + + s.Handlers.AfterRetry.PushBack(func(r *request.Request) { + credExpiredAfterRetry = r.Config.Credentials.IsExpired() + }) + + s.Handlers.Sign.Clear() + s.Handlers.Sign.PushBack(func(r *request.Request) { + r.Config.Credentials.Get() + }) + s.Handlers.Send.Clear() // mock sending + s.Handlers.Send.PushBack(func(r *request.Request) { + r.HTTPResponse = &reqs[reqNum] + reqNum++ + }) + out := &testData{} + r := s.NewRequest(&request.Operation{Name: "Operation"}, nil, out) + err := r.Send() + assert.Nil(t, err) + + assert.False(t, credExpiredBeforeRetry, "Expect valid creds before retry check") + assert.True(t, credExpiredAfterRetry, "Expect expired creds after retry check") + assert.False(t, s.Config.Credentials.IsExpired(), "Expect valid creds after cred expired recovery") + + assert.Equal(t, 1, int(r.RetryCount)) + assert.Equal(t, "valid", out.Data) +} + +func TestMakeAddtoUserAgentHandler(t *testing.T) { + fn := request.MakeAddToUserAgentHandler("name", "version", "extra1", "extra2") + r := &request.Request{HTTPRequest: &http.Request{Header: http.Header{}}} + r.HTTPRequest.Header.Set("User-Agent", "foo/bar") + fn(r) + + assert.Equal(t, "foo/bar name/version (extra1; extra2)", r.HTTPRequest.Header.Get("User-Agent")) +} + +func TestMakeAddtoUserAgentFreeFormHandler(t *testing.T) { + fn := request.MakeAddToUserAgentFreeFormHandler("name/version (extra1; extra2)") + r := &request.Request{HTTPRequest: &http.Request{Header: http.Header{}}} + r.HTTPRequest.Header.Set("User-Agent", "foo/bar") + fn(r) + + assert.Equal(t, "foo/bar name/version (extra1; extra2)", r.HTTPRequest.Header.Get("User-Agent")) +} + +func TestRequestUserAgent(t *testing.T) { + s := awstesting.NewClient(&aws.Config{Region: aws.String("us-east-1")}) + // s.Handlers.Validate.Clear() + + req := s.NewRequest(&request.Operation{Name: "Operation"}, nil, &testData{}) + req.HTTPRequest.Header.Set("User-Agent", "foo/bar") + assert.NoError(t, req.Build()) + + expectUA := fmt.Sprintf("foo/bar %s/%s (%s; %s; %s)", + aws.SDKName, aws.SDKVersion, runtime.Version(), runtime.GOOS, runtime.GOARCH) + assert.Equal(t, expectUA, req.HTTPRequest.Header.Get("User-Agent")) +} + +func TestRequestThrottleRetries(t *testing.T) { + delays := []time.Duration{} + sleepDelay := func(delay time.Duration) { + delays = append(delays, delay) + } + + reqNum := 0 + reqs := []http.Response{ + {StatusCode: 500, Body: body(`{"__type":"Throttling","message":"An error occurred."}`)}, + {StatusCode: 500, Body: body(`{"__type":"Throttling","message":"An error occurred."}`)}, + {StatusCode: 500, Body: body(`{"__type":"Throttling","message":"An error occurred."}`)}, + {StatusCode: 500, Body: body(`{"__type":"Throttling","message":"An error occurred."}`)}, + } + + s := awstesting.NewClient(aws.NewConfig().WithSleepDelay(sleepDelay)) + s.Handlers.Validate.Clear() + s.Handlers.Unmarshal.PushBack(unmarshal) + s.Handlers.UnmarshalError.PushBack(unmarshalError) + s.Handlers.Send.Clear() // mock sending + s.Handlers.Send.PushBack(func(r *request.Request) { + r.HTTPResponse = &reqs[reqNum] + reqNum++ + }) + r := s.NewRequest(&request.Operation{Name: "Operation"}, nil, nil) + err := r.Send() + assert.NotNil(t, err) + if e, ok := err.(awserr.RequestFailure); ok { + assert.Equal(t, 500, e.StatusCode()) + } else { + assert.Fail(t, "Expected error to be a service failure") + } + assert.Equal(t, "Throttling", err.(awserr.Error).Code()) + assert.Equal(t, "An error occurred.", err.(awserr.Error).Message()) + assert.Equal(t, 3, int(r.RetryCount)) + + expectDelays := []struct{ min, max time.Duration }{{500, 999}, {1000, 1998}, {2000, 3996}} + for i, v := range delays { + min := expectDelays[i].min * time.Millisecond + max := expectDelays[i].max * time.Millisecond + assert.True(t, min <= v && v <= max, + "Expect delay to be within range, i:%d, v:%s, min:%s, max:%s", i, v, min, max) + } +} + +// test that retries occur for request timeouts when response.Body can be nil +func TestRequestRecoverTimeoutWithNilBody(t *testing.T) { + reqNum := 0 + reqs := []*http.Response{ + {StatusCode: 0, Body: nil}, // body can be nil when requests time out + {StatusCode: 200, Body: body(`{"data":"valid"}`)}, + } + errors := []error{ + errors.New("timeout"), nil, + } + + s := awstesting.NewClient(aws.NewConfig().WithMaxRetries(10)) + s.Handlers.Validate.Clear() + s.Handlers.Unmarshal.PushBack(unmarshal) + s.Handlers.UnmarshalError.PushBack(unmarshalError) + s.Handlers.AfterRetry.Clear() // force retry on all errors + s.Handlers.AfterRetry.PushBack(func(r *request.Request) { + if r.Error != nil { + r.Error = nil + r.Retryable = aws.Bool(true) + r.RetryCount++ + } + }) + s.Handlers.Send.Clear() // mock sending + s.Handlers.Send.PushBack(func(r *request.Request) { + r.HTTPResponse = reqs[reqNum] + r.Error = errors[reqNum] + reqNum++ + }) + out := &testData{} + r := s.NewRequest(&request.Operation{Name: "Operation"}, nil, out) + err := r.Send() + assert.Nil(t, err) + assert.Equal(t, 1, int(r.RetryCount)) + assert.Equal(t, "valid", out.Data) +} + +func TestRequestRecoverTimeoutWithNilResponse(t *testing.T) { + reqNum := 0 + reqs := []*http.Response{ + nil, + {StatusCode: 200, Body: body(`{"data":"valid"}`)}, + } + errors := []error{ + errors.New("timeout"), + nil, + } + + s := awstesting.NewClient(aws.NewConfig().WithMaxRetries(10)) + s.Handlers.Validate.Clear() + s.Handlers.Unmarshal.PushBack(unmarshal) + s.Handlers.UnmarshalError.PushBack(unmarshalError) + s.Handlers.AfterRetry.Clear() // force retry on all errors + s.Handlers.AfterRetry.PushBack(func(r *request.Request) { + if r.Error != nil { + r.Error = nil + r.Retryable = aws.Bool(true) + r.RetryCount++ + } + }) + s.Handlers.Send.Clear() // mock sending + s.Handlers.Send.PushBack(func(r *request.Request) { + r.HTTPResponse = reqs[reqNum] + r.Error = errors[reqNum] + reqNum++ + }) + out := &testData{} + r := s.NewRequest(&request.Operation{Name: "Operation"}, nil, out) + err := r.Send() + assert.Nil(t, err) + assert.Equal(t, 1, int(r.RetryCount)) + assert.Equal(t, "valid", out.Data) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go b/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go new file mode 100644 index 000000000..8cc8b015a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go @@ -0,0 +1,101 @@ +package request + +import ( + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" +) + +// Retryer is an interface to control retry logic for a given service. +// The default implementation used by most services is the service.DefaultRetryer +// structure, which contains basic retry logic using exponential backoff. +type Retryer interface { + RetryRules(*Request) time.Duration + ShouldRetry(*Request) bool + MaxRetries() int +} + +// WithRetryer sets a config Retryer value to the given Config returning it +// for chaining. +func WithRetryer(cfg *aws.Config, retryer Retryer) *aws.Config { + cfg.Retryer = retryer + return cfg +} + +// retryableCodes is a collection of service response codes which are retry-able +// without any further action. +var retryableCodes = map[string]struct{}{ + "RequestError": {}, + "RequestTimeout": {}, +} + +var throttleCodes = map[string]struct{}{ + "ProvisionedThroughputExceededException": {}, + "Throttling": {}, + "ThrottlingException": {}, + "RequestLimitExceeded": {}, + "RequestThrottled": {}, + "LimitExceededException": {}, // Deleting 10+ DynamoDb tables at once + "TooManyRequestsException": {}, // Lambda functions +} + +// credsExpiredCodes is a collection of error codes which signify the credentials +// need to be refreshed. Expired tokens require refreshing of credentials, and +// resigning before the request can be retried. +var credsExpiredCodes = map[string]struct{}{ + "ExpiredToken": {}, + "ExpiredTokenException": {}, + "RequestExpired": {}, // EC2 Only +} + +func isCodeThrottle(code string) bool { + _, ok := throttleCodes[code] + return ok +} + +func isCodeRetryable(code string) bool { + if _, ok := retryableCodes[code]; ok { + return true + } + + return isCodeExpiredCreds(code) +} + +func isCodeExpiredCreds(code string) bool { + _, ok := credsExpiredCodes[code] + return ok +} + +// IsErrorRetryable returns whether the error is retryable, based on its Code. +// Returns false if the request has no Error set. +func (r *Request) IsErrorRetryable() bool { + if r.Error != nil { + if err, ok := r.Error.(awserr.Error); ok { + return isCodeRetryable(err.Code()) + } + } + return false +} + +// IsErrorThrottle returns whether the error is to be throttled based on its code. +// Returns false if the request has no Error set +func (r *Request) IsErrorThrottle() bool { + if r.Error != nil { + if err, ok := r.Error.(awserr.Error); ok { + return isCodeThrottle(err.Code()) + } + } + return false +} + +// IsErrorExpired returns whether the error code is a credential expiry error. +// Returns false if the request has no Error set. +func (r *Request) IsErrorExpired() bool { + if r.Error != nil { + if err, ok := r.Error.(awserr.Error); ok { + return isCodeExpiredCreds(err.Code()) + } + } + return false +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/retryer_test.go b/vendor/github.com/aws/aws-sdk-go/aws/request/retryer_test.go new file mode 100644 index 000000000..b1926e3d6 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/retryer_test.go @@ -0,0 +1,16 @@ +package request + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/aws/aws-sdk-go/aws/awserr" +) + +func TestRequestThrottling(t *testing.T) { + req := Request{} + + req.Error = awserr.New("Throttling", "", nil) + assert.True(t, req.IsErrorThrottle()) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/validation.go b/vendor/github.com/aws/aws-sdk-go/aws/request/validation.go new file mode 100644 index 000000000..2520286b7 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/validation.go @@ -0,0 +1,234 @@ +package request + +import ( + "bytes" + "fmt" + + "github.com/aws/aws-sdk-go/aws/awserr" +) + +const ( + // InvalidParameterErrCode is the error code for invalid parameters errors + InvalidParameterErrCode = "InvalidParameter" + // ParamRequiredErrCode is the error code for required parameter errors + ParamRequiredErrCode = "ParamRequiredError" + // ParamMinValueErrCode is the error code for fields with too low of a + // number value. + ParamMinValueErrCode = "ParamMinValueError" + // ParamMinLenErrCode is the error code for fields without enough elements. + ParamMinLenErrCode = "ParamMinLenError" +) + +// Validator provides a way for types to perform validation logic on their +// input values that external code can use to determine if a type's values +// are valid. +type Validator interface { + Validate() error +} + +// An ErrInvalidParams provides wrapping of invalid parameter errors found when +// validating API operation input parameters. +type ErrInvalidParams struct { + // Context is the base context of the invalid parameter group. + Context string + errs []ErrInvalidParam +} + +// Add adds a new invalid parameter error to the collection of invalid +// parameters. The context of the invalid parameter will be updated to reflect +// this collection. +func (e *ErrInvalidParams) Add(err ErrInvalidParam) { + err.SetContext(e.Context) + e.errs = append(e.errs, err) +} + +// AddNested adds the invalid parameter errors from another ErrInvalidParams +// value into this collection. The nested errors will have their nested context +// updated and base context to reflect the merging. +// +// Use for nested validations errors. +func (e *ErrInvalidParams) AddNested(nestedCtx string, nested ErrInvalidParams) { + for _, err := range nested.errs { + err.SetContext(e.Context) + err.AddNestedContext(nestedCtx) + e.errs = append(e.errs, err) + } +} + +// Len returns the number of invalid parameter errors +func (e ErrInvalidParams) Len() int { + return len(e.errs) +} + +// Code returns the code of the error +func (e ErrInvalidParams) Code() string { + return InvalidParameterErrCode +} + +// Message returns the message of the error +func (e ErrInvalidParams) Message() string { + return fmt.Sprintf("%d validation error(s) found.", len(e.errs)) +} + +// Error returns the string formatted form of the invalid parameters. +func (e ErrInvalidParams) Error() string { + w := &bytes.Buffer{} + fmt.Fprintf(w, "%s: %s\n", e.Code(), e.Message()) + + for _, err := range e.errs { + fmt.Fprintf(w, "- %s\n", err.Message()) + } + + return w.String() +} + +// OrigErr returns the invalid parameters as a awserr.BatchedErrors value +func (e ErrInvalidParams) OrigErr() error { + return awserr.NewBatchError( + InvalidParameterErrCode, e.Message(), e.OrigErrs()) +} + +// OrigErrs returns a slice of the invalid parameters +func (e ErrInvalidParams) OrigErrs() []error { + errs := make([]error, len(e.errs)) + for i := 0; i < len(errs); i++ { + errs[i] = e.errs[i] + } + + return errs +} + +// An ErrInvalidParam represents an invalid parameter error type. +type ErrInvalidParam interface { + awserr.Error + + // Field name the error occurred on. + Field() string + + // SetContext updates the context of the error. + SetContext(string) + + // AddNestedContext updates the error's context to include a nested level. + AddNestedContext(string) +} + +type errInvalidParam struct { + context string + nestedContext string + field string + code string + msg string +} + +// Code returns the error code for the type of invalid parameter. +func (e *errInvalidParam) Code() string { + return e.code +} + +// Message returns the reason the parameter was invalid, and its context. +func (e *errInvalidParam) Message() string { + return fmt.Sprintf("%s, %s.", e.msg, e.Field()) +} + +// Error returns the string version of the invalid parameter error. +func (e *errInvalidParam) Error() string { + return fmt.Sprintf("%s: %s", e.code, e.Message()) +} + +// OrigErr returns nil, Implemented for awserr.Error interface. +func (e *errInvalidParam) OrigErr() error { + return nil +} + +// Field Returns the field and context the error occurred. +func (e *errInvalidParam) Field() string { + field := e.context + if len(field) > 0 { + field += "." + } + if len(e.nestedContext) > 0 { + field += fmt.Sprintf("%s.", e.nestedContext) + } + field += e.field + + return field +} + +// SetContext updates the base context of the error. +func (e *errInvalidParam) SetContext(ctx string) { + e.context = ctx +} + +// AddNestedContext prepends a context to the field's path. +func (e *errInvalidParam) AddNestedContext(ctx string) { + if len(e.nestedContext) == 0 { + e.nestedContext = ctx + } else { + e.nestedContext = fmt.Sprintf("%s.%s", ctx, e.nestedContext) + } + +} + +// An ErrParamRequired represents an required parameter error. +type ErrParamRequired struct { + errInvalidParam +} + +// NewErrParamRequired creates a new required parameter error. +func NewErrParamRequired(field string) *ErrParamRequired { + return &ErrParamRequired{ + errInvalidParam{ + code: ParamRequiredErrCode, + field: field, + msg: fmt.Sprintf("missing required field"), + }, + } +} + +// An ErrParamMinValue represents a minimum value parameter error. +type ErrParamMinValue struct { + errInvalidParam + min float64 +} + +// NewErrParamMinValue creates a new minimum value parameter error. +func NewErrParamMinValue(field string, min float64) *ErrParamMinValue { + return &ErrParamMinValue{ + errInvalidParam: errInvalidParam{ + code: ParamMinValueErrCode, + field: field, + msg: fmt.Sprintf("minimum field value of %v", min), + }, + min: min, + } +} + +// MinValue returns the field's require minimum value. +// +// float64 is returned for both int and float min values. +func (e *ErrParamMinValue) MinValue() float64 { + return e.min +} + +// An ErrParamMinLen represents a minimum length parameter error. +type ErrParamMinLen struct { + errInvalidParam + min int +} + +// NewErrParamMinLen creates a new minimum length parameter error. +func NewErrParamMinLen(field string, min int) *ErrParamMinLen { + return &ErrParamMinLen{ + errInvalidParam: errInvalidParam{ + code: ParamMinValueErrCode, + field: field, + msg: fmt.Sprintf("minimum field size of %v", min), + }, + min: min, + } +} + +// MinLen returns the field's required minimum length. +func (e *ErrParamMinLen) MinLen() int { + return e.min +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/session.go b/vendor/github.com/aws/aws-sdk-go/aws/session/session.go new file mode 100644 index 000000000..6bc8f1be9 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/session.go @@ -0,0 +1,120 @@ +// Package session provides a way to create service clients with shared configuration +// and handlers. +// +// Generally this package should be used instead of the `defaults` package. +// +// A session should be used to share configurations and request handlers between multiple +// service clients. When service clients need specific configuration aws.Config can be +// used to provide additional configuration directly to the service client. +package session + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/corehandlers" + "github.com/aws/aws-sdk-go/aws/defaults" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/endpoints" +) + +// A Session provides a central location to create service clients from and +// store configurations and request handlers for those services. +// +// Sessions are safe to create service clients concurrently, but it is not safe +// to mutate the session concurrently. +type Session struct { + Config *aws.Config + Handlers request.Handlers +} + +// New creates a new instance of the handlers merging in the provided Configs +// on top of the SDK's default configurations. Once the session is created it +// can be mutated to modify Configs or Handlers. The session is safe to be read +// concurrently, but it should not be written to concurrently. +// +// Example: +// // Create a session with the default config and request handlers. +// sess := session.New() +// +// // Create a session with a custom region +// sess := session.New(&aws.Config{Region: aws.String("us-east-1")}) +// +// // Create a session, and add additional handlers for all service +// // clients created with the session to inherit. Adds logging handler. +// sess := session.New() +// sess.Handlers.Send.PushFront(func(r *request.Request) { +// // Log every request made and its payload +// logger.Println("Request: %s/%s, Payload: %s", r.ClientInfo.ServiceName, r.Operation, r.Params) +// }) +// +// // Create a S3 client instance from a session +// sess := session.New() +// svc := s3.New(sess) +func New(cfgs ...*aws.Config) *Session { + cfg := defaults.Config() + handlers := defaults.Handlers() + + // Apply the passed in configs so the configuration can be applied to the + // default credential chain + cfg.MergeIn(cfgs...) + cfg.Credentials = defaults.CredChain(cfg, handlers) + + // Reapply any passed in configs to override credentials if set + cfg.MergeIn(cfgs...) + + s := &Session{ + Config: cfg, + Handlers: handlers, + } + + initHandlers(s) + + return s +} + +func initHandlers(s *Session) { + // Add the Validate parameter handler if it is not disabled. + s.Handlers.Validate.Remove(corehandlers.ValidateParametersHandler) + if !aws.BoolValue(s.Config.DisableParamValidation) { + s.Handlers.Validate.PushBackNamed(corehandlers.ValidateParametersHandler) + } +} + +// Copy creates and returns a copy of the current session, coping the config +// and handlers. If any additional configs are provided they will be merged +// on top of the session's copied config. +// +// Example: +// // Create a copy of the current session, configured for the us-west-2 region. +// sess.Copy(&aws.Config{Region: aws.String("us-west-2")}) +func (s *Session) Copy(cfgs ...*aws.Config) *Session { + newSession := &Session{ + Config: s.Config.Copy(cfgs...), + Handlers: s.Handlers.Copy(), + } + + initHandlers(newSession) + + return newSession +} + +// ClientConfig satisfies the client.ConfigProvider interface and is used to +// configure the service client instances. Passing the Session to the service +// client's constructor (New) will use this method to configure the client. +// +// Example: +// sess := session.New() +// s3.New(sess) +func (s *Session) ClientConfig(serviceName string, cfgs ...*aws.Config) client.Config { + s = s.Copy(cfgs...) + endpoint, signingRegion := endpoints.NormalizeEndpoint( + aws.StringValue(s.Config.Endpoint), serviceName, + aws.StringValue(s.Config.Region), aws.BoolValue(s.Config.DisableSSL)) + + return client.Config{ + Config: s.Config, + Handlers: s.Handlers, + Endpoint: endpoint, + SigningRegion: signingRegion, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/session_test.go b/vendor/github.com/aws/aws-sdk-go/aws/session/session_test.go new file mode 100644 index 000000000..e56c02fc6 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/session_test.go @@ -0,0 +1,20 @@ +package session_test + +import ( + "net/http" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" +) + +func TestNewDefaultSession(t *testing.T) { + s := session.New(&aws.Config{Region: aws.String("region")}) + + assert.Equal(t, "region", *s.Config.Region) + assert.Equal(t, http.DefaultClient, s.Config.HTTPClient) + assert.NotNil(t, s.Config.Logger) + assert.Equal(t, aws.LogOff, *s.Config.LogLevel) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/functional_test.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/functional_test.go new file mode 100644 index 000000000..e4329c624 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/functional_test.go @@ -0,0 +1,77 @@ +package v4_test + +import ( + "net/http" + "net/url" + "testing" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/awstesting/unit" + "github.com/aws/aws-sdk-go/service/s3" + "github.com/stretchr/testify/assert" +) + +func TestPresignHandler(t *testing.T) { + svc := s3.New(unit.Session) + req, _ := svc.PutObjectRequest(&s3.PutObjectInput{ + Bucket: aws.String("bucket"), + Key: aws.String("key"), + ContentDisposition: aws.String("a+b c$d"), + ACL: aws.String("public-read"), + }) + req.Time = time.Unix(0, 0) + urlstr, err := req.Presign(5 * time.Minute) + + assert.NoError(t, err) + + expectedDate := "19700101T000000Z" + expectedHeaders := "content-disposition;host;x-amz-acl" + expectedSig := "b2754ba8ffeb74a40b94767017e24c4672107d6d5a894648d5d332ca61f5ffe4" + expectedCred := "AKID/19700101/mock-region/s3/aws4_request" + + u, _ := url.Parse(urlstr) + urlQ := u.Query() + assert.Equal(t, expectedSig, urlQ.Get("X-Amz-Signature")) + assert.Equal(t, expectedCred, urlQ.Get("X-Amz-Credential")) + assert.Equal(t, expectedHeaders, urlQ.Get("X-Amz-SignedHeaders")) + assert.Equal(t, expectedDate, urlQ.Get("X-Amz-Date")) + assert.Equal(t, "300", urlQ.Get("X-Amz-Expires")) + + assert.NotContains(t, urlstr, "+") // + encoded as %20 +} + +func TestPresignRequest(t *testing.T) { + svc := s3.New(unit.Session) + req, _ := svc.PutObjectRequest(&s3.PutObjectInput{ + Bucket: aws.String("bucket"), + Key: aws.String("key"), + ContentDisposition: aws.String("a+b c$d"), + ACL: aws.String("public-read"), + }) + req.Time = time.Unix(0, 0) + urlstr, headers, err := req.PresignRequest(5 * time.Minute) + + assert.NoError(t, err) + + expectedDate := "19700101T000000Z" + expectedHeaders := "content-disposition;host;x-amz-acl;x-amz-content-sha256" + expectedSig := "0d200ba61501d752acd06f39ef4dbe7d83ffd5ea15978dc3476dfc00b8eb574e" + expectedCred := "AKID/19700101/mock-region/s3/aws4_request" + expectedHeaderMap := http.Header{ + "x-amz-acl": []string{"public-read"}, + "content-disposition": []string{"a+b c$d"}, + "x-amz-content-sha256": []string{"UNSIGNED-PAYLOAD"}, + } + + u, _ := url.Parse(urlstr) + urlQ := u.Query() + assert.Equal(t, expectedSig, urlQ.Get("X-Amz-Signature")) + assert.Equal(t, expectedCred, urlQ.Get("X-Amz-Credential")) + assert.Equal(t, expectedHeaders, urlQ.Get("X-Amz-SignedHeaders")) + assert.Equal(t, expectedDate, urlQ.Get("X-Amz-Date")) + assert.Equal(t, expectedHeaderMap, headers) + assert.Equal(t, "300", urlQ.Get("X-Amz-Expires")) + + assert.NotContains(t, urlstr, "+") // + encoded as %20 +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/header_rules.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/header_rules.go new file mode 100644 index 000000000..244c86da0 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/header_rules.go @@ -0,0 +1,82 @@ +package v4 + +import ( + "net/http" + "strings" +) + +// validator houses a set of rule needed for validation of a +// string value +type rules []rule + +// rule interface allows for more flexible rules and just simply +// checks whether or not a value adheres to that rule +type rule interface { + IsValid(value string) bool +} + +// IsValid will iterate through all rules and see if any rules +// apply to the value and supports nested rules +func (r rules) IsValid(value string) bool { + for _, rule := range r { + if rule.IsValid(value) { + return true + } + } + return false +} + +// mapRule generic rule for maps +type mapRule map[string]struct{} + +// IsValid for the map rule satisfies whether it exists in the map +func (m mapRule) IsValid(value string) bool { + _, ok := m[value] + return ok +} + +// whitelist is a generic rule for whitelisting +type whitelist struct { + rule +} + +// IsValid for whitelist checks if the value is within the whitelist +func (w whitelist) IsValid(value string) bool { + return w.rule.IsValid(value) +} + +// blacklist is a generic rule for blacklisting +type blacklist struct { + rule +} + +// IsValid for whitelist checks if the value is within the whitelist +func (b blacklist) IsValid(value string) bool { + return !b.rule.IsValid(value) +} + +type patterns []string + +// IsValid for patterns checks each pattern and returns if a match has +// been found +func (p patterns) IsValid(value string) bool { + for _, pattern := range p { + if strings.HasPrefix(http.CanonicalHeaderKey(value), pattern) { + return true + } + } + return false +} + +// inclusiveRules rules allow for rules to depend on one another +type inclusiveRules []rule + +// IsValid will return true if all rules are true +func (r inclusiveRules) IsValid(value string) bool { + for _, rule := range r { + if !rule.IsValid(value) { + return false + } + } + return true +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/header_rules_test.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/header_rules_test.go new file mode 100644 index 000000000..7dfddc87e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/header_rules_test.go @@ -0,0 +1,57 @@ +package v4 + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestRuleCheckWhitelist(t *testing.T) { + w := whitelist{ + mapRule{ + "Cache-Control": struct{}{}, + }, + } + + assert.True(t, w.IsValid("Cache-Control")) + assert.False(t, w.IsValid("Cache-")) +} + +func TestRuleCheckBlacklist(t *testing.T) { + b := blacklist{ + mapRule{ + "Cache-Control": struct{}{}, + }, + } + + assert.False(t, b.IsValid("Cache-Control")) + assert.True(t, b.IsValid("Cache-")) +} + +func TestRuleCheckPattern(t *testing.T) { + p := patterns{"X-Amz-Meta-"} + + assert.True(t, p.IsValid("X-Amz-Meta-")) + assert.True(t, p.IsValid("X-Amz-Meta-Star")) + assert.False(t, p.IsValid("Cache-")) +} + +func TestRuleComplexWhitelist(t *testing.T) { + w := rules{ + whitelist{ + mapRule{ + "Cache-Control": struct{}{}, + }, + }, + patterns{"X-Amz-Meta-"}, + } + + r := rules{ + inclusiveRules{patterns{"X-Amz-"}, blacklist{w}}, + } + + assert.True(t, r.IsValid("X-Amz-Blah")) + assert.False(t, r.IsValid("X-Amz-Meta-")) + assert.False(t, r.IsValid("X-Amz-Meta-Star")) + assert.False(t, r.IsValid("Cache-Control")) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go new file mode 100644 index 000000000..f040f9ce9 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go @@ -0,0 +1,644 @@ +// Package v4 implements signing for AWS V4 signer +// +// Provides request signing for request that need to be signed with +// AWS V4 Signatures. +package v4 + +import ( + "bytes" + "crypto/hmac" + "crypto/sha256" + "encoding/hex" + "fmt" + "io" + "net/http" + "net/url" + "sort" + "strconv" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/rest" +) + +const ( + authHeaderPrefix = "AWS4-HMAC-SHA256" + timeFormat = "20060102T150405Z" + shortTimeFormat = "20060102" + + // emptyStringSHA256 is a SHA256 of an empty string + emptyStringSHA256 = `e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855` +) + +var ignoredHeaders = rules{ + blacklist{ + mapRule{ + "Authorization": struct{}{}, + "User-Agent": struct{}{}, + }, + }, +} + +// requiredSignedHeaders is a whitelist for build canonical headers. +var requiredSignedHeaders = rules{ + whitelist{ + mapRule{ + "Cache-Control": struct{}{}, + "Content-Disposition": struct{}{}, + "Content-Encoding": struct{}{}, + "Content-Language": struct{}{}, + "Content-Md5": struct{}{}, + "Content-Type": struct{}{}, + "Expires": struct{}{}, + "If-Match": struct{}{}, + "If-Modified-Since": struct{}{}, + "If-None-Match": struct{}{}, + "If-Unmodified-Since": struct{}{}, + "Range": struct{}{}, + "X-Amz-Acl": struct{}{}, + "X-Amz-Copy-Source": struct{}{}, + "X-Amz-Copy-Source-If-Match": struct{}{}, + "X-Amz-Copy-Source-If-Modified-Since": struct{}{}, + "X-Amz-Copy-Source-If-None-Match": struct{}{}, + "X-Amz-Copy-Source-If-Unmodified-Since": struct{}{}, + "X-Amz-Copy-Source-Range": struct{}{}, + "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": struct{}{}, + "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key": struct{}{}, + "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5": struct{}{}, + "X-Amz-Grant-Full-control": struct{}{}, + "X-Amz-Grant-Read": struct{}{}, + "X-Amz-Grant-Read-Acp": struct{}{}, + "X-Amz-Grant-Write": struct{}{}, + "X-Amz-Grant-Write-Acp": struct{}{}, + "X-Amz-Metadata-Directive": struct{}{}, + "X-Amz-Mfa": struct{}{}, + "X-Amz-Request-Payer": struct{}{}, + "X-Amz-Server-Side-Encryption": struct{}{}, + "X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id": struct{}{}, + "X-Amz-Server-Side-Encryption-Customer-Algorithm": struct{}{}, + "X-Amz-Server-Side-Encryption-Customer-Key": struct{}{}, + "X-Amz-Server-Side-Encryption-Customer-Key-Md5": struct{}{}, + "X-Amz-Storage-Class": struct{}{}, + "X-Amz-Website-Redirect-Location": struct{}{}, + }, + }, + patterns{"X-Amz-Meta-"}, +} + +// allowedHoisting is a whitelist for build query headers. The boolean value +// represents whether or not it is a pattern. +var allowedQueryHoisting = inclusiveRules{ + blacklist{requiredSignedHeaders}, + patterns{"X-Amz-"}, +} + +// Signer applies AWS v4 signing to given request. Use this to sign requests +// that need to be signed with AWS V4 Signatures. +type Signer struct { + // The authentication credentials the request will be signed against. + // This value must be set to sign requests. + Credentials *credentials.Credentials + + // Sets the log level the signer should use when reporting information to + // the logger. If the logger is nil nothing will be logged. See + // aws.LogLevelType for more information on available logging levels + // + // By default nothing will be logged. + Debug aws.LogLevelType + + // The logger loging information will be written to. If there the logger + // is nil, nothing will be logged. + Logger aws.Logger + + // Disables the Signer's moving HTTP header key/value pairs from the HTTP + // request header to the request's query string. This is most commonly used + // with pre-signed requests preventing headers from being added to the + // request's query string. + DisableHeaderHoisting bool + + // currentTimeFn returns the time value which represents the current time. + // This value should only be used for testing. If it is nil the default + // time.Now will be used. + currentTimeFn func() time.Time +} + +// NewSigner returns a Signer pointer configured with the credentials and optional +// option values provided. If not options are provided the Signer will use its +// default configuration. +func NewSigner(credentials *credentials.Credentials, options ...func(*Signer)) *Signer { + v4 := &Signer{ + Credentials: credentials, + } + + for _, option := range options { + option(v4) + } + + return v4 +} + +type signingCtx struct { + ServiceName string + Region string + Request *http.Request + Body io.ReadSeeker + Query url.Values + Time time.Time + ExpireTime time.Duration + SignedHeaderVals http.Header + + credValues credentials.Value + isPresign bool + formattedTime string + formattedShortTime string + + bodyDigest string + signedHeaders string + canonicalHeaders string + canonicalString string + credentialString string + stringToSign string + signature string + authorization string +} + +// Sign signs AWS v4 requests with the provided body, service name, region the +// request is made to, and time the request is signed at. The signTime allows +// you to specify that a request is signed for the future, and cannot be +// used until then. +// +// Returns a list of HTTP headers that were included in the signature or an +// error if signing the request failed. Generally for signed requests this value +// is not needed as the full request context will be captured by the http.Request +// value. It is included for reference though. +// +// Sign differs from Presign in that it will sign the request using HTTP +// header values. This type of signing is intended for http.Request values that +// will not be shared, or are shared in a way the header values on the request +// will not be lost. +// +// The requests body is an io.ReadSeeker so the SHA256 of the body can be +// generated. To bypass the signer computing the hash you can set the +// "X-Amz-Content-Sha256" header with a precomputed value. The signer will +// only compute the hash if the request header value is empty. +func (v4 Signer) Sign(r *http.Request, body io.ReadSeeker, service, region string, signTime time.Time) (http.Header, error) { + return v4.signWithBody(r, body, service, region, 0, signTime) +} + +// Presign signs AWS v4 requests with the provided body, service name, region +// the request is made to, and time the request is signed at. The signTime +// allows you to specify that a request is signed for the future, and cannot +// be used until then. +// +// Returns a list of HTTP headers that were included in the signature or an +// error if signing the request failed. For presigned requests these headers +// and their values must be included on the HTTP request when it is made. This +// is helpful to know what header values need to be shared with the party the +// presigned request will be distributed to. +// +// Presign differs from Sign in that it will sign the request using query string +// instead of header values. This allows you to share the Presigned Request's +// URL with third parties, or distribute it throughout your system with minimal +// dependencies. +// +// Presign also takes an exp value which is the duration the +// signed request will be valid after the signing time. This is allows you to +// set when the request will expire. +// +// The requests body is an io.ReadSeeker so the SHA256 of the body can be +// generated. To bypass the signer computing the hash you can set the +// "X-Amz-Content-Sha256" header with a precomputed value. The signer will +// only compute the hash if the request header value is empty. +// +// Presigning a S3 request will not compute the body's SHA256 hash by default. +// This is done due to the general use case for S3 presigned URLs is to share +// PUT/GET capabilities. If you would like to include the body's SHA256 in the +// presigned request's signature you can set the "X-Amz-Content-Sha256" +// HTTP header and that will be included in the request's signature. +func (v4 Signer) Presign(r *http.Request, body io.ReadSeeker, service, region string, exp time.Duration, signTime time.Time) (http.Header, error) { + return v4.signWithBody(r, body, service, region, exp, signTime) +} + +func (v4 Signer) signWithBody(r *http.Request, body io.ReadSeeker, service, region string, exp time.Duration, signTime time.Time) (http.Header, error) { + currentTimeFn := v4.currentTimeFn + if currentTimeFn == nil { + currentTimeFn = time.Now + } + + ctx := &signingCtx{ + Request: r, + Body: body, + Query: r.URL.Query(), + Time: signTime, + ExpireTime: exp, + isPresign: exp != 0, + ServiceName: service, + Region: region, + } + + if ctx.isRequestSigned() { + if !v4.Credentials.IsExpired() && currentTimeFn().Before(ctx.Time.Add(10*time.Minute)) { + // If the request is already signed, and the credentials have not + // expired, and the request is not too old ignore the signing request. + return ctx.SignedHeaderVals, nil + } + ctx.Time = currentTimeFn() + ctx.handlePresignRemoval() + } + + var err error + ctx.credValues, err = v4.Credentials.Get() + if err != nil { + return http.Header{}, err + } + + ctx.assignAmzQueryValues() + ctx.build(v4.DisableHeaderHoisting) + + if v4.Debug.Matches(aws.LogDebugWithSigning) { + v4.logSigningInfo(ctx) + } + + return ctx.SignedHeaderVals, nil +} + +func (ctx *signingCtx) handlePresignRemoval() { + if !ctx.isPresign { + return + } + + // The credentials have expired for this request. The current signing + // is invalid, and needs to be request because the request will fail. + ctx.removePresign() + + // Update the request's query string to ensure the values stays in + // sync in the case retrieving the new credentials fails. + ctx.Request.URL.RawQuery = ctx.Query.Encode() +} + +func (ctx *signingCtx) assignAmzQueryValues() { + if ctx.isPresign { + ctx.Query.Set("X-Amz-Algorithm", authHeaderPrefix) + if ctx.credValues.SessionToken != "" { + ctx.Query.Set("X-Amz-Security-Token", ctx.credValues.SessionToken) + } else { + ctx.Query.Del("X-Amz-Security-Token") + } + + return + } + + if ctx.credValues.SessionToken != "" { + ctx.Request.Header.Set("X-Amz-Security-Token", ctx.credValues.SessionToken) + } +} + +// SignRequestHandler is a named request handler the SDK will use to sign +// service client request with using the V4 signature. +var SignRequestHandler = request.NamedHandler{ + Name: "v4.SignRequestHandler", Fn: SignSDKRequest, +} + +// SignSDKRequest signs an AWS request with the V4 signature. This +// request handler is bested used only with the SDK's built in service client's +// API operation requests. +// +// This function should not be used on its on its own, but in conjunction with +// an AWS service client's API operation call. To sign a standalone request +// not created by a service client's API operation method use the "Sign" or +// "Presign" functions of the "Signer" type. +// +// If the credentials of the request's config are set to +// credentials.AnonymousCredentials the request will not be signed. +func SignSDKRequest(req *request.Request) { + signSDKRequestWithCurrTime(req, time.Now) +} +func signSDKRequestWithCurrTime(req *request.Request, curTimeFn func() time.Time) { + // If the request does not need to be signed ignore the signing of the + // request if the AnonymousCredentials object is used. + if req.Config.Credentials == credentials.AnonymousCredentials { + return + } + + region := req.ClientInfo.SigningRegion + if region == "" { + region = aws.StringValue(req.Config.Region) + } + + name := req.ClientInfo.SigningName + if name == "" { + name = req.ClientInfo.ServiceName + } + + v4 := NewSigner(req.Config.Credentials, func(v4 *Signer) { + v4.Debug = req.Config.LogLevel.Value() + v4.Logger = req.Config.Logger + v4.DisableHeaderHoisting = req.NotHoist + v4.currentTimeFn = curTimeFn + }) + + signingTime := req.Time + if !req.LastSignedAt.IsZero() { + signingTime = req.LastSignedAt + } + + signedHeaders, err := v4.signWithBody(req.HTTPRequest, req.Body, name, region, req.ExpireTime, signingTime) + if err != nil { + req.Error = err + req.SignedHeaderVals = nil + return + } + + req.SignedHeaderVals = signedHeaders + req.LastSignedAt = curTimeFn() +} + +const logSignInfoMsg = `DEBUG: Request Signiture: +---[ CANONICAL STRING ]----------------------------- +%s +---[ STRING TO SIGN ]-------------------------------- +%s%s +-----------------------------------------------------` +const logSignedURLMsg = ` +---[ SIGNED URL ]------------------------------------ +%s` + +func (v4 *Signer) logSigningInfo(ctx *signingCtx) { + signedURLMsg := "" + if ctx.isPresign { + signedURLMsg = fmt.Sprintf(logSignedURLMsg, ctx.Request.URL.String()) + } + msg := fmt.Sprintf(logSignInfoMsg, ctx.canonicalString, ctx.stringToSign, signedURLMsg) + v4.Logger.Log(msg) +} + +func (ctx *signingCtx) build(disableHeaderHoisting bool) { + ctx.buildTime() // no depends + ctx.buildCredentialString() // no depends + + unsignedHeaders := ctx.Request.Header + if ctx.isPresign { + if !disableHeaderHoisting { + urlValues := url.Values{} + urlValues, unsignedHeaders = buildQuery(allowedQueryHoisting, unsignedHeaders) // no depends + for k := range urlValues { + ctx.Query[k] = urlValues[k] + } + } + } + + ctx.buildBodyDigest() + ctx.buildCanonicalHeaders(ignoredHeaders, unsignedHeaders) + ctx.buildCanonicalString() // depends on canon headers / signed headers + ctx.buildStringToSign() // depends on canon string + ctx.buildSignature() // depends on string to sign + + if ctx.isPresign { + ctx.Request.URL.RawQuery += "&X-Amz-Signature=" + ctx.signature + } else { + parts := []string{ + authHeaderPrefix + " Credential=" + ctx.credValues.AccessKeyID + "/" + ctx.credentialString, + "SignedHeaders=" + ctx.signedHeaders, + "Signature=" + ctx.signature, + } + ctx.Request.Header.Set("Authorization", strings.Join(parts, ", ")) + } +} + +func (ctx *signingCtx) buildTime() { + ctx.formattedTime = ctx.Time.UTC().Format(timeFormat) + ctx.formattedShortTime = ctx.Time.UTC().Format(shortTimeFormat) + + if ctx.isPresign { + duration := int64(ctx.ExpireTime / time.Second) + ctx.Query.Set("X-Amz-Date", ctx.formattedTime) + ctx.Query.Set("X-Amz-Expires", strconv.FormatInt(duration, 10)) + } else { + ctx.Request.Header.Set("X-Amz-Date", ctx.formattedTime) + } +} + +func (ctx *signingCtx) buildCredentialString() { + ctx.credentialString = strings.Join([]string{ + ctx.formattedShortTime, + ctx.Region, + ctx.ServiceName, + "aws4_request", + }, "/") + + if ctx.isPresign { + ctx.Query.Set("X-Amz-Credential", ctx.credValues.AccessKeyID+"/"+ctx.credentialString) + } +} + +func buildQuery(r rule, header http.Header) (url.Values, http.Header) { + query := url.Values{} + unsignedHeaders := http.Header{} + for k, h := range header { + if r.IsValid(k) { + query[k] = h + } else { + unsignedHeaders[k] = h + } + } + + return query, unsignedHeaders +} +func (ctx *signingCtx) buildCanonicalHeaders(r rule, header http.Header) { + var headers []string + headers = append(headers, "host") + for k, v := range header { + canonicalKey := http.CanonicalHeaderKey(k) + if !r.IsValid(canonicalKey) { + continue // ignored header + } + if ctx.SignedHeaderVals == nil { + ctx.SignedHeaderVals = make(http.Header) + } + + lowerCaseKey := strings.ToLower(k) + if _, ok := ctx.SignedHeaderVals[lowerCaseKey]; ok { + // include additional values + ctx.SignedHeaderVals[lowerCaseKey] = append(ctx.SignedHeaderVals[lowerCaseKey], v...) + continue + } + + headers = append(headers, lowerCaseKey) + ctx.SignedHeaderVals[lowerCaseKey] = v + } + sort.Strings(headers) + + ctx.signedHeaders = strings.Join(headers, ";") + + if ctx.isPresign { + ctx.Query.Set("X-Amz-SignedHeaders", ctx.signedHeaders) + } + + headerValues := make([]string, len(headers)) + for i, k := range headers { + if k == "host" { + headerValues[i] = "host:" + ctx.Request.URL.Host + } else { + headerValues[i] = k + ":" + + strings.Join(ctx.SignedHeaderVals[k], ",") + } + } + + ctx.canonicalHeaders = strings.Join(stripExcessSpaces(headerValues), "\n") +} + +func (ctx *signingCtx) buildCanonicalString() { + ctx.Request.URL.RawQuery = strings.Replace(ctx.Query.Encode(), "+", "%20", -1) + uri := ctx.Request.URL.Opaque + if uri != "" { + uri = "/" + strings.Join(strings.Split(uri, "/")[3:], "/") + } else { + uri = ctx.Request.URL.Path + } + if uri == "" { + uri = "/" + } + + if ctx.ServiceName != "s3" { + uri = rest.EscapePath(uri, false) + } + + ctx.canonicalString = strings.Join([]string{ + ctx.Request.Method, + uri, + ctx.Request.URL.RawQuery, + ctx.canonicalHeaders + "\n", + ctx.signedHeaders, + ctx.bodyDigest, + }, "\n") +} + +func (ctx *signingCtx) buildStringToSign() { + ctx.stringToSign = strings.Join([]string{ + authHeaderPrefix, + ctx.formattedTime, + ctx.credentialString, + hex.EncodeToString(makeSha256([]byte(ctx.canonicalString))), + }, "\n") +} + +func (ctx *signingCtx) buildSignature() { + secret := ctx.credValues.SecretAccessKey + date := makeHmac([]byte("AWS4"+secret), []byte(ctx.formattedShortTime)) + region := makeHmac(date, []byte(ctx.Region)) + service := makeHmac(region, []byte(ctx.ServiceName)) + credentials := makeHmac(service, []byte("aws4_request")) + signature := makeHmac(credentials, []byte(ctx.stringToSign)) + ctx.signature = hex.EncodeToString(signature) +} + +func (ctx *signingCtx) buildBodyDigest() { + hash := ctx.Request.Header.Get("X-Amz-Content-Sha256") + if hash == "" { + if ctx.isPresign && ctx.ServiceName == "s3" { + hash = "UNSIGNED-PAYLOAD" + } else if ctx.Body == nil { + hash = emptyStringSHA256 + } else { + hash = hex.EncodeToString(makeSha256Reader(ctx.Body)) + } + if ctx.ServiceName == "s3" { + ctx.Request.Header.Set("X-Amz-Content-Sha256", hash) + } + } + ctx.bodyDigest = hash +} + +// isRequestSigned returns if the request is currently signed or presigned +func (ctx *signingCtx) isRequestSigned() bool { + if ctx.isPresign && ctx.Query.Get("X-Amz-Signature") != "" { + return true + } + if ctx.Request.Header.Get("Authorization") != "" { + return true + } + + return false +} + +// unsign removes signing flags for both signed and presigned requests. +func (ctx *signingCtx) removePresign() { + ctx.Query.Del("X-Amz-Algorithm") + ctx.Query.Del("X-Amz-Signature") + ctx.Query.Del("X-Amz-Security-Token") + ctx.Query.Del("X-Amz-Date") + ctx.Query.Del("X-Amz-Expires") + ctx.Query.Del("X-Amz-Credential") + ctx.Query.Del("X-Amz-SignedHeaders") +} + +func makeHmac(key []byte, data []byte) []byte { + hash := hmac.New(sha256.New, key) + hash.Write(data) + return hash.Sum(nil) +} + +func makeSha256(data []byte) []byte { + hash := sha256.New() + hash.Write(data) + return hash.Sum(nil) +} + +func makeSha256Reader(reader io.ReadSeeker) []byte { + hash := sha256.New() + start, _ := reader.Seek(0, 1) + defer reader.Seek(start, 0) + + io.Copy(hash, reader) + return hash.Sum(nil) +} + +const doubleSpaces = " " + +var doubleSpaceBytes = []byte(doubleSpaces) + +func stripExcessSpaces(headerVals []string) []string { + vals := make([]string, len(headerVals)) + for i, str := range headerVals { + // Trim leading and trailing spaces + trimmed := strings.TrimSpace(str) + + idx := strings.Index(trimmed, doubleSpaces) + var buf []byte + for idx > -1 { + // Multiple adjacent spaces found + if buf == nil { + // first time create the buffer + buf = []byte(trimmed) + } + + stripToIdx := -1 + for j := idx + 1; j < len(buf); j++ { + if buf[j] != ' ' { + buf = append(buf[:idx+1], buf[j:]...) + stripToIdx = j + break + } + } + + if stripToIdx >= 0 { + idx = bytes.Index(buf[stripToIdx:], doubleSpaceBytes) + if idx >= 0 { + idx += stripToIdx + } + } else { + idx = -1 + } + } + + if buf != nil { + vals[i] = string(buf) + } else { + vals[i] = trimmed + } + } + return vals +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4_test.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4_test.go new file mode 100644 index 000000000..5600a9b3d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4_test.go @@ -0,0 +1,345 @@ +package v4 + +import ( + "io" + "net/http" + "strings" + "testing" + "time" + + "github.com/stretchr/testify/assert" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/awstesting" +) + +func TestStripExcessHeaders(t *testing.T) { + vals := []string{ + "123", + "1 2 3", + " 1 2 3", + "1 2 3", + "1 23", + "1 2 3", + "1 2 ", + " 1 2 ", + } + + expected := []string{ + "123", + "1 2 3", + "1 2 3", + "1 2 3", + "1 23", + "1 2 3", + "1 2", + "1 2", + } + + newVals := stripExcessSpaces(vals) + for i := 0; i < len(newVals); i++ { + assert.Equal(t, expected[i], newVals[i], "test: %d", i) + } +} + +func buildRequest(serviceName, region, body string) (*http.Request, io.ReadSeeker) { + endpoint := "https://" + serviceName + "." + region + ".amazonaws.com" + reader := strings.NewReader(body) + req, _ := http.NewRequest("POST", endpoint, reader) + req.URL.Opaque = "//example.org/bucket/key-._~,!@#$%^&*()" + req.Header.Add("X-Amz-Target", "prefix.Operation") + req.Header.Add("Content-Type", "application/x-amz-json-1.0") + req.Header.Add("Content-Length", string(len(body))) + req.Header.Add("X-Amz-Meta-Other-Header", "some-value=!@#$%^&* (+)") + req.Header.Add("X-Amz-Meta-Other-Header_With_Underscore", "some-value=!@#$%^&* (+)") + req.Header.Add("X-amz-Meta-Other-Header_With_Underscore", "some-value=!@#$%^&* (+)") + return req, reader +} + +func buildSigner() Signer { + return Signer{ + Credentials: credentials.NewStaticCredentials("AKID", "SECRET", "SESSION"), + } +} + +func removeWS(text string) string { + text = strings.Replace(text, " ", "", -1) + text = strings.Replace(text, "\n", "", -1) + text = strings.Replace(text, "\t", "", -1) + return text +} + +func assertEqual(t *testing.T, expected, given string) { + if removeWS(expected) != removeWS(given) { + t.Errorf("\nExpected: %s\nGiven: %s", expected, given) + } +} + +func TestPresignRequest(t *testing.T) { + req, body := buildRequest("dynamodb", "us-east-1", "{}") + + signer := buildSigner() + signer.Presign(req, body, "dynamodb", "us-east-1", 300*time.Second, time.Unix(0, 0)) + + expectedDate := "19700101T000000Z" + expectedHeaders := "content-length;content-type;host;x-amz-meta-other-header;x-amz-meta-other-header_with_underscore" + expectedSig := "ea7856749041f727690c580569738282e99c79355fe0d8f125d3b5535d2ece83" + expectedCred := "AKID/19700101/us-east-1/dynamodb/aws4_request" + expectedTarget := "prefix.Operation" + + q := req.URL.Query() + assert.Equal(t, expectedSig, q.Get("X-Amz-Signature")) + assert.Equal(t, expectedCred, q.Get("X-Amz-Credential")) + assert.Equal(t, expectedHeaders, q.Get("X-Amz-SignedHeaders")) + assert.Equal(t, expectedDate, q.Get("X-Amz-Date")) + assert.Empty(t, q.Get("X-Amz-Meta-Other-Header")) + assert.Equal(t, expectedTarget, q.Get("X-Amz-Target")) +} + +func TestSignRequest(t *testing.T) { + req, body := buildRequest("dynamodb", "us-east-1", "{}") + signer := buildSigner() + signer.Sign(req, body, "dynamodb", "us-east-1", time.Unix(0, 0)) + + expectedDate := "19700101T000000Z" + expectedSig := "AWS4-HMAC-SHA256 Credential=AKID/19700101/us-east-1/dynamodb/aws4_request, SignedHeaders=content-length;content-type;host;x-amz-date;x-amz-meta-other-header;x-amz-meta-other-header_with_underscore;x-amz-security-token;x-amz-target, Signature=ea766cabd2ec977d955a3c2bae1ae54f4515d70752f2207618396f20aa85bd21" + + q := req.Header + assert.Equal(t, expectedSig, q.Get("Authorization")) + assert.Equal(t, expectedDate, q.Get("X-Amz-Date")) +} + +func TestSignBody(t *testing.T) { + req, body := buildRequest("s3", "us-east-1", "hello") + signer := buildSigner() + signer.Sign(req, body, "s3", "us-east-1", time.Now()) + hash := req.Header.Get("X-Amz-Content-Sha256") + assert.Equal(t, "2cf24dba5fb0a30e26e83b2ac5b9e29e1b161e5c1fa7425e73043362938b9824", hash) +} + +func TestPresignEmptyBodyS3(t *testing.T) { + req, body := buildRequest("s3", "us-east-1", "hello") + signer := buildSigner() + signer.Presign(req, body, "s3", "us-east-1", 5*time.Minute, time.Now()) + hash := req.Header.Get("X-Amz-Content-Sha256") + assert.Equal(t, "UNSIGNED-PAYLOAD", hash) +} + +func TestSignPrecomputedBodyChecksum(t *testing.T) { + req, body := buildRequest("dynamodb", "us-east-1", "hello") + req.Header.Set("X-Amz-Content-Sha256", "PRECOMPUTED") + signer := buildSigner() + signer.Sign(req, body, "dynamodb", "us-east-1", time.Now()) + hash := req.Header.Get("X-Amz-Content-Sha256") + assert.Equal(t, "PRECOMPUTED", hash) +} + +func TestAnonymousCredentials(t *testing.T) { + svc := awstesting.NewClient(&aws.Config{Credentials: credentials.AnonymousCredentials}) + r := svc.NewRequest( + &request.Operation{ + Name: "BatchGetItem", + HTTPMethod: "POST", + HTTPPath: "/", + }, + nil, + nil, + ) + SignSDKRequest(r) + + urlQ := r.HTTPRequest.URL.Query() + assert.Empty(t, urlQ.Get("X-Amz-Signature")) + assert.Empty(t, urlQ.Get("X-Amz-Credential")) + assert.Empty(t, urlQ.Get("X-Amz-SignedHeaders")) + assert.Empty(t, urlQ.Get("X-Amz-Date")) + + hQ := r.HTTPRequest.Header + assert.Empty(t, hQ.Get("Authorization")) + assert.Empty(t, hQ.Get("X-Amz-Date")) +} + +func TestIgnoreResignRequestWithValidCreds(t *testing.T) { + svc := awstesting.NewClient(&aws.Config{ + Credentials: credentials.NewStaticCredentials("AKID", "SECRET", "SESSION"), + Region: aws.String("us-west-2"), + }) + r := svc.NewRequest( + &request.Operation{ + Name: "BatchGetItem", + HTTPMethod: "POST", + HTTPPath: "/", + }, + nil, + nil, + ) + + SignSDKRequest(r) + sig := r.HTTPRequest.Header.Get("Authorization") + + SignSDKRequest(r) + assert.Equal(t, sig, r.HTTPRequest.Header.Get("Authorization")) +} + +func TestIgnorePreResignRequestWithValidCreds(t *testing.T) { + svc := awstesting.NewClient(&aws.Config{ + Credentials: credentials.NewStaticCredentials("AKID", "SECRET", "SESSION"), + Region: aws.String("us-west-2"), + }) + r := svc.NewRequest( + &request.Operation{ + Name: "BatchGetItem", + HTTPMethod: "POST", + HTTPPath: "/", + }, + nil, + nil, + ) + r.ExpireTime = time.Minute * 10 + + SignSDKRequest(r) + sig := r.HTTPRequest.Header.Get("X-Amz-Signature") + + SignSDKRequest(r) + assert.Equal(t, sig, r.HTTPRequest.Header.Get("X-Amz-Signature")) +} + +func TestResignRequestExpiredCreds(t *testing.T) { + creds := credentials.NewStaticCredentials("AKID", "SECRET", "SESSION") + svc := awstesting.NewClient(&aws.Config{Credentials: creds}) + r := svc.NewRequest( + &request.Operation{ + Name: "BatchGetItem", + HTTPMethod: "POST", + HTTPPath: "/", + }, + nil, + nil, + ) + SignSDKRequest(r) + querySig := r.HTTPRequest.Header.Get("Authorization") + var origSignedHeaders string + for _, p := range strings.Split(querySig, ", ") { + if strings.HasPrefix(p, "SignedHeaders=") { + origSignedHeaders = p[len("SignedHeaders="):] + break + } + } + assert.NotEmpty(t, origSignedHeaders) + assert.NotContains(t, origSignedHeaders, "authorization") + origSignedAt := r.LastSignedAt + + creds.Expire() + + signSDKRequestWithCurrTime(r, func() time.Time { + // Simulate one second has passed so that signature's date changes + // when it is resigned. + return time.Now().Add(1 * time.Second) + }) + updatedQuerySig := r.HTTPRequest.Header.Get("Authorization") + assert.NotEqual(t, querySig, updatedQuerySig) + + var updatedSignedHeaders string + for _, p := range strings.Split(updatedQuerySig, ", ") { + if strings.HasPrefix(p, "SignedHeaders=") { + updatedSignedHeaders = p[len("SignedHeaders="):] + break + } + } + assert.NotEmpty(t, updatedSignedHeaders) + assert.NotContains(t, updatedQuerySig, "authorization") + assert.NotEqual(t, origSignedAt, r.LastSignedAt) +} + +func TestPreResignRequestExpiredCreds(t *testing.T) { + provider := &credentials.StaticProvider{Value: credentials.Value{ + AccessKeyID: "AKID", + SecretAccessKey: "SECRET", + SessionToken: "SESSION", + }} + creds := credentials.NewCredentials(provider) + svc := awstesting.NewClient(&aws.Config{Credentials: creds}) + r := svc.NewRequest( + &request.Operation{ + Name: "BatchGetItem", + HTTPMethod: "POST", + HTTPPath: "/", + }, + nil, + nil, + ) + r.ExpireTime = time.Minute * 10 + + SignSDKRequest(r) + querySig := r.HTTPRequest.URL.Query().Get("X-Amz-Signature") + signedHeaders := r.HTTPRequest.URL.Query().Get("X-Amz-SignedHeaders") + assert.NotEmpty(t, signedHeaders) + origSignedAt := r.LastSignedAt + + creds.Expire() + + signSDKRequestWithCurrTime(r, func() time.Time { + // Simulate the request occured 15 minutes in the past + return time.Now().Add(-48 * time.Hour) + }) + assert.NotEqual(t, querySig, r.HTTPRequest.URL.Query().Get("X-Amz-Signature")) + resignedHeaders := r.HTTPRequest.URL.Query().Get("X-Amz-SignedHeaders") + assert.Equal(t, signedHeaders, resignedHeaders) + assert.NotContains(t, signedHeaders, "x-amz-signedHeaders") + assert.NotEqual(t, origSignedAt, r.LastSignedAt) +} + +func TestResignRequestExpiredRequest(t *testing.T) { + creds := credentials.NewStaticCredentials("AKID", "SECRET", "SESSION") + svc := awstesting.NewClient(&aws.Config{Credentials: creds}) + r := svc.NewRequest( + &request.Operation{ + Name: "BatchGetItem", + HTTPMethod: "POST", + HTTPPath: "/", + }, + nil, + nil, + ) + + SignSDKRequest(r) + querySig := r.HTTPRequest.Header.Get("Authorization") + origSignedAt := r.LastSignedAt + + signSDKRequestWithCurrTime(r, func() time.Time { + // Simulate the request occured 15 minutes in the past + return time.Now().Add(15 * time.Minute) + }) + assert.NotEqual(t, querySig, r.HTTPRequest.Header.Get("Authorization")) + assert.NotEqual(t, origSignedAt, r.LastSignedAt) +} + +func BenchmarkPresignRequest(b *testing.B) { + signer := buildSigner() + req, body := buildRequest("dynamodb", "us-east-1", "{}") + for i := 0; i < b.N; i++ { + signer.Presign(req, body, "dynamodb", "us-east-1", 300*time.Second, time.Now()) + } +} + +func BenchmarkSignRequest(b *testing.B) { + signer := buildSigner() + req, body := buildRequest("dynamodb", "us-east-1", "{}") + for i := 0; i < b.N; i++ { + signer.Sign(req, body, "dynamodb", "us-east-1", time.Now()) + } +} + +func BenchmarkStripExcessSpaces(b *testing.B) { + vals := []string{ + `AWS4-HMAC-SHA256 Credential=AKIDFAKEIDFAKEID/20160628/us-west-2/s3/aws4_request, SignedHeaders=host;x-amz-date, Signature=1234567890abcdef1234567890abcdef1234567890abcdef`, + `123 321 123 321`, + ` 123 321 123 321 `, + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + stripExcessSpaces(vals) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/types.go b/vendor/github.com/aws/aws-sdk-go/aws/types.go new file mode 100644 index 000000000..fa014b49e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/types.go @@ -0,0 +1,106 @@ +package aws + +import ( + "io" + "sync" +) + +// ReadSeekCloser wraps a io.Reader returning a ReaderSeekerCloser +func ReadSeekCloser(r io.Reader) ReaderSeekerCloser { + return ReaderSeekerCloser{r} +} + +// ReaderSeekerCloser represents a reader that can also delegate io.Seeker and +// io.Closer interfaces to the underlying object if they are available. +type ReaderSeekerCloser struct { + r io.Reader +} + +// Read reads from the reader up to size of p. The number of bytes read, and +// error if it occurred will be returned. +// +// If the reader is not an io.Reader zero bytes read, and nil error will be returned. +// +// Performs the same functionality as io.Reader Read +func (r ReaderSeekerCloser) Read(p []byte) (int, error) { + switch t := r.r.(type) { + case io.Reader: + return t.Read(p) + } + return 0, nil +} + +// Seek sets the offset for the next Read to offset, interpreted according to +// whence: 0 means relative to the origin of the file, 1 means relative to the +// current offset, and 2 means relative to the end. Seek returns the new offset +// and an error, if any. +// +// If the ReaderSeekerCloser is not an io.Seeker nothing will be done. +func (r ReaderSeekerCloser) Seek(offset int64, whence int) (int64, error) { + switch t := r.r.(type) { + case io.Seeker: + return t.Seek(offset, whence) + } + return int64(0), nil +} + +// Close closes the ReaderSeekerCloser. +// +// If the ReaderSeekerCloser is not an io.Closer nothing will be done. +func (r ReaderSeekerCloser) Close() error { + switch t := r.r.(type) { + case io.Closer: + return t.Close() + } + return nil +} + +// A WriteAtBuffer provides a in memory buffer supporting the io.WriterAt interface +// Can be used with the s3manager.Downloader to download content to a buffer +// in memory. Safe to use concurrently. +type WriteAtBuffer struct { + buf []byte + m sync.Mutex + + // GrowthCoeff defines the growth rate of the internal buffer. By + // default, the growth rate is 1, where expanding the internal + // buffer will allocate only enough capacity to fit the new expected + // length. + GrowthCoeff float64 +} + +// NewWriteAtBuffer creates a WriteAtBuffer with an internal buffer +// provided by buf. +func NewWriteAtBuffer(buf []byte) *WriteAtBuffer { + return &WriteAtBuffer{buf: buf} +} + +// WriteAt writes a slice of bytes to a buffer starting at the position provided +// The number of bytes written will be returned, or error. Can overwrite previous +// written slices if the write ats overlap. +func (b *WriteAtBuffer) WriteAt(p []byte, pos int64) (n int, err error) { + pLen := len(p) + expLen := pos + int64(pLen) + b.m.Lock() + defer b.m.Unlock() + if int64(len(b.buf)) < expLen { + if int64(cap(b.buf)) < expLen { + if b.GrowthCoeff < 1 { + b.GrowthCoeff = 1 + } + newBuf := make([]byte, expLen, int64(b.GrowthCoeff*float64(expLen))) + copy(newBuf, b.buf) + b.buf = newBuf + } + b.buf = b.buf[:expLen] + } + copy(b.buf[pos:], p) + return pLen, nil +} + +// Bytes returns a slice of bytes written to the buffer. +func (b *WriteAtBuffer) Bytes() []byte { + b.m.Lock() + defer b.m.Unlock() + return b.buf[:len(b.buf):len(b.buf)] +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/types_test.go b/vendor/github.com/aws/aws-sdk-go/aws/types_test.go new file mode 100644 index 000000000..a7cd93b83 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/types_test.go @@ -0,0 +1,75 @@ +package aws + +import ( + "math/rand" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestWriteAtBuffer(t *testing.T) { + b := &WriteAtBuffer{} + + n, err := b.WriteAt([]byte{1}, 0) + assert.NoError(t, err) + assert.Equal(t, 1, n) + + n, err = b.WriteAt([]byte{1, 1, 1}, 5) + assert.NoError(t, err) + assert.Equal(t, 3, n) + + n, err = b.WriteAt([]byte{2}, 1) + assert.NoError(t, err) + assert.Equal(t, 1, n) + + n, err = b.WriteAt([]byte{3}, 2) + assert.NoError(t, err) + assert.Equal(t, 1, n) + + assert.Equal(t, []byte{1, 2, 3, 0, 0, 1, 1, 1}, b.Bytes()) +} + +func BenchmarkWriteAtBuffer(b *testing.B) { + buf := &WriteAtBuffer{} + r := rand.New(rand.NewSource(1)) + + b.ResetTimer() + for i := 0; i < b.N; i++ { + to := r.Intn(10) * 4096 + bs := make([]byte, to) + buf.WriteAt(bs, r.Int63n(10)*4096) + } +} + +func BenchmarkWriteAtBufferOrderedWrites(b *testing.B) { + // test the performance of a WriteAtBuffer when written in an + // ordered fashion. This is similar to the behavior of the + // s3.Downloader, since downloads the first chunk of the file, then + // the second, and so on. + // + // This test simulates a 150MB file being written in 30 ordered 5MB chunks. + chunk := int64(5e6) + max := chunk * 30 + // we'll write the same 5MB chunk every time + tmp := make([]byte, chunk) + for i := 0; i < b.N; i++ { + buf := &WriteAtBuffer{} + for i := int64(0); i < max; i += chunk { + buf.WriteAt(tmp, i) + } + } +} + +func BenchmarkWriteAtBufferParallel(b *testing.B) { + buf := &WriteAtBuffer{} + r := rand.New(rand.NewSource(1)) + + b.ResetTimer() + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + to := r.Intn(10) * 4096 + bs := make([]byte, to) + buf.WriteAt(bs, r.Int63n(10)*4096) + } + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/version.go b/vendor/github.com/aws/aws-sdk-go/aws/version.go new file mode 100644 index 000000000..e78052130 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/version.go @@ -0,0 +1,8 @@ +// Package aws provides core functionality for making requests to AWS services. +package aws + +// SDKName is the name of this AWS SDK +const SDKName = "aws-sdk-go" + +// SDKVersion is the version of this SDK +const SDKVersion = "1.2.4" diff --git a/vendor/github.com/aws/aws-sdk-go/awsmigrate/awsmigrate-renamer/Godeps/Godeps.json b/vendor/github.com/aws/aws-sdk-go/awsmigrate/awsmigrate-renamer/Godeps/Godeps.json new file mode 100644 index 000000000..65d753cac --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/awsmigrate/awsmigrate-renamer/Godeps/Godeps.json @@ -0,0 +1,19 @@ +{ + "ImportPath": "github.com/aws/aws-sdk-go/awsmigrate/awsmigrate-renamer", + "GoVersion": "go1.6", + "GodepVersion": "v60", + "Deps": [ + { + "ImportPath": "golang.org/x/tools/go/ast/astutil", + "Rev": "b75b3f5cd5d50fbb1fb88ce784d2e7cca17bba8a" + }, + { + "ImportPath": "golang.org/x/tools/go/buildutil", + "Rev": "b75b3f5cd5d50fbb1fb88ce784d2e7cca17bba8a" + }, + { + "ImportPath": "golang.org/x/tools/go/loader", + "Rev": "b75b3f5cd5d50fbb1fb88ce784d2e7cca17bba8a" + } + ] +} diff --git a/vendor/github.com/aws/aws-sdk-go/awsmigrate/awsmigrate-renamer/Godeps/Readme b/vendor/github.com/aws/aws-sdk-go/awsmigrate/awsmigrate-renamer/Godeps/Readme new file mode 100644 index 000000000..4cdaa53d5 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/awsmigrate/awsmigrate-renamer/Godeps/Readme @@ -0,0 +1,5 @@ +This directory tree is generated automatically by godep. + +Please do not edit. + +See https://github.com/tools/godep for more information. diff --git a/vendor/github.com/aws/aws-sdk-go/awsmigrate/awsmigrate-renamer/gen/gen.go b/vendor/github.com/aws/aws-sdk-go/awsmigrate/awsmigrate-renamer/gen/gen.go new file mode 100644 index 000000000..0dc652942 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/awsmigrate/awsmigrate-renamer/gen/gen.go @@ -0,0 +1,200 @@ +// +build go1.5 + +package main + +import ( + "bytes" + "go/format" + "io" + "os" + "path/filepath" + "sort" + "strings" + "text/template" + + "github.com/aws/aws-sdk-go/private/model/api" +) + +type pkg struct { + oldAPI *api.API + newAPI *api.API + shapes map[string]*shapentry + operations map[string]*opentry +} + +type shapentry struct { + oldShape *api.Shape + newShape *api.Shape +} + +type opentry struct { + oldName string + newName string +} + +type packageRenames struct { + Shapes map[string]string + Operations map[string]string + Fields map[string]string +} + +var exportMap = map[string]*packageRenames{} + +func generateRenames(w io.Writer) error { + tmpl, err := template.New("renames").Parse(t) + if err != nil { + return err + } + + out := bytes.NewBuffer(nil) + if err = tmpl.Execute(out, exportMap); err != nil { + return err + } + + b, err := format.Source(bytes.TrimSpace(out.Bytes())) + if err != nil { + return err + } + + _, err = io.Copy(w, bytes.NewReader(b)) + return err +} + +const t = ` +package rename + +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +var renamedPackages = map[string]*packageRenames{ + {{ range $key, $entry := . }}"{{ $key }}": &packageRenames{ + operations: map[string]string{ + {{ range $old, $new := $entry.Operations }}"{{ $old }}": "{{ $new }}", + {{ end }} + }, + shapes: map[string]string{ + {{ range $old, $new := $entry.Shapes }}"{{ $old }}": "{{ $new }}", + {{ end }} + }, + fields: map[string]string{ + {{ range $old, $new := $entry.Fields }}"{{ $old }}": "{{ $new }}", + {{ end }} + }, + }, + {{ end }} +} +` + +func (p *pkg) buildRenames() { + pkgName := "github.com/aws/aws-sdk-go/service/" + p.oldAPI.PackageName() + if exportMap[pkgName] == nil { + exportMap[pkgName] = &packageRenames{map[string]string{}, map[string]string{}, map[string]string{}} + } + ifacename := "github.com/aws/aws-sdk-go/service/" + p.oldAPI.PackageName() + "/" + + p.oldAPI.InterfacePackageName() + if exportMap[ifacename] == nil { + exportMap[ifacename] = &packageRenames{map[string]string{}, map[string]string{}, map[string]string{}} + } + + for _, entry := range p.operations { + if entry.oldName != entry.newName { + pkgNames := []string{pkgName, ifacename} + for _, p := range pkgNames { + exportMap[p].Operations[entry.oldName] = entry.newName + exportMap[p].Operations[entry.oldName+"Request"] = entry.newName + "Request" + exportMap[p].Operations[entry.oldName+"Pages"] = entry.newName + "Pages" + } + } + } + + for _, entry := range p.shapes { + if entry.oldShape.Type == "structure" { + if entry.oldShape.ShapeName != entry.newShape.ShapeName { + exportMap[pkgName].Shapes[entry.oldShape.ShapeName] = entry.newShape.ShapeName + } + + for _, n := range entry.oldShape.MemberNames() { + for _, m := range entry.newShape.MemberNames() { + if n != m && strings.ToLower(n) == strings.ToLower(m) { + exportMap[pkgName].Fields[n] = m + } + } + } + } + } +} + +func load(file string) *pkg { + p := &pkg{&api.API{}, &api.API{}, map[string]*shapentry{}, map[string]*opentry{}} + + p.oldAPI.Attach(file) + p.oldAPI.Setup() + + p.newAPI.Attach(file) + p.newAPI.Setup() + + for _, name := range p.oldAPI.OperationNames() { + p.operations[strings.ToLower(name)] = &opentry{oldName: name} + } + + for _, name := range p.newAPI.OperationNames() { + p.operations[strings.ToLower(name)].newName = name + } + + for _, shape := range p.oldAPI.ShapeList() { + p.shapes[strings.ToLower(shape.ShapeName)] = &shapentry{oldShape: shape} + } + + for _, shape := range p.newAPI.ShapeList() { + if _, ok := p.shapes[strings.ToLower(shape.ShapeName)]; !ok { + panic("missing shape " + shape.ShapeName) + } + p.shapes[strings.ToLower(shape.ShapeName)].newShape = shape + } + + return p +} + +var excludeServices = map[string]struct{}{ + "simpledb": {}, + "importexport": {}, +} + +func main() { + files, _ := filepath.Glob("../../apis/*/*/api-2.json") + + sort.Strings(files) + + // Remove old API versions from list + m := map[string]bool{} + for i := range files { + idx := len(files) - 1 - i + parts := strings.Split(files[idx], string(filepath.Separator)) + svc := parts[len(parts)-3] // service name is 2nd-to-last component + + if m[svc] { + files[idx] = "" // wipe this one out if we already saw the service + } + m[svc] = true + } + + for i := range files { + file := files[i] + if file == "" { // empty file + continue + } + + if g := load(file); g != nil { + if _, ok := excludeServices[g.oldAPI.PackageName()]; !ok { + g.buildRenames() + } + } + } + + outfile, err := os.Create("rename/renames.go") + if err != nil { + panic(err) + } + if err := generateRenames(outfile); err != nil { + panic(err) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/awsmigrate/awsmigrate-renamer/rename/rename.go b/vendor/github.com/aws/aws-sdk-go/awsmigrate/awsmigrate-renamer/rename/rename.go new file mode 100644 index 000000000..05d6f3626 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/awsmigrate/awsmigrate-renamer/rename/rename.go @@ -0,0 +1,116 @@ +// +build go1.5 + +package rename + +import ( + "bytes" + "flag" + "fmt" + "go/format" + "go/parser" + "go/token" + "go/types" + "io/ioutil" + + "golang.org/x/tools/go/loader" +) + +var dryRun = flag.Bool("dryrun", false, "Dry run") +var verbose = flag.Bool("verbose", false, "Verbose") + +type packageRenames struct { + operations map[string]string + shapes map[string]string + fields map[string]string +} + +type renamer struct { + *loader.Program + files map[*token.File]bool +} + +// ParsePathsFromArgs parses arguments from command line and looks at import +// paths to rename objects. +func ParsePathsFromArgs() { + flag.Parse() + for _, dir := range flag.Args() { + var conf loader.Config + conf.ParserMode = parser.ParseComments + conf.ImportWithTests(dir) + prog, err := conf.Load() + if err != nil { + panic(err) + } + + r := renamer{prog, map[*token.File]bool{}} + r.parse() + if !*dryRun { + r.write() + } + } +} + +func (r *renamer) dryInfo() string { + if *dryRun { + return "[DRY-RUN]" + } + return "[!]" +} + +func (r *renamer) printf(msg string, args ...interface{}) { + if *verbose { + fmt.Printf(msg, args...) + } +} + +func (r *renamer) parse() { + for _, pkg := range r.InitialPackages() { + r.parseUses(pkg) + } +} + +func (r *renamer) write() { + for _, pkg := range r.InitialPackages() { + for _, f := range pkg.Files { + tokenFile := r.Fset.File(f.Pos()) + if r.files[tokenFile] { + var buf bytes.Buffer + format.Node(&buf, r.Fset, f) + if err := ioutil.WriteFile(tokenFile.Name(), buf.Bytes(), 0644); err != nil { + panic(err) + } + } + } + } +} + +func (r *renamer) parseUses(pkg *loader.PackageInfo) { + for k, v := range pkg.Uses { + if v.Pkg() != nil { + pkgPath := v.Pkg().Path() + if renames, ok := renamedPackages[pkgPath]; ok { + name := k.Name + switch t := v.(type) { + case *types.Func: + if newName, ok := renames.operations[t.Name()]; ok && newName != name { + r.printf("%s Rename [OPERATION]: %q -> %q\n", r.dryInfo(), name, newName) + r.files[r.Fset.File(k.Pos())] = true + k.Name = newName + } + case *types.TypeName: + if newName, ok := renames.shapes[name]; ok && newName != name { + r.printf("%s Rename [SHAPE]: %q -> %q\n", r.dryInfo(), t.Name(), newName) + r.files[r.Fset.File(k.Pos())] = true + k.Name = newName + } + case *types.Var: + if newName, ok := renames.fields[name]; ok && newName != name { + r.printf("%s Rename [FIELD]: %q -> %q\n", r.dryInfo(), t.Name(), newName) + r.files[r.Fset.File(k.Pos())] = true + k.Name = newName + } + } + } + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/awsmigrate/awsmigrate-renamer/rename/renames.go b/vendor/github.com/aws/aws-sdk-go/awsmigrate/awsmigrate-renamer/rename/renames.go new file mode 100644 index 000000000..0d22f39ec --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/awsmigrate/awsmigrate-renamer/rename/renames.go @@ -0,0 +1,2120 @@ +// +build go1.5 + +package rename + +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +var renamedPackages = map[string]*packageRenames{ + "github.com/aws/aws-sdk-go/service/autoscaling": { + operations: map[string]string{}, + shapes: map[string]string{ + "EBS": "Ebs", + }, + fields: map[string]string{ + "ActivityID": "ActivityId", + "ActivityIDs": "ActivityIds", + "AssociatePublicIPAddress": "AssociatePublicIpAddress", + "ClassicLinkVPCID": "ClassicLinkVPCId", + "EBS": "Ebs", + "EBSOptimized": "EbsOptimized", + "IAMInstanceProfile": "IamInstanceProfile", + "IOPS": "Iops", + "ImageID": "ImageId", + "InstanceID": "InstanceId", + "InstanceIDs": "InstanceIds", + "KernelID": "KernelId", + "RAMDiskID": "RamdiskId", + "ResourceID": "ResourceId", + "SnapshotID": "SnapshotId", + }, + }, + "github.com/aws/aws-sdk-go/service/autoscaling/autoscalingiface": { + operations: map[string]string{}, + shapes: map[string]string{}, + fields: map[string]string{}, + }, + "github.com/aws/aws-sdk-go/service/cloudformation": { + operations: map[string]string{}, + shapes: map[string]string{}, + fields: map[string]string{ + "EventID": "EventId", + "LogicalResourceID": "LogicalResourceId", + "PhysicalResourceID": "PhysicalResourceId", + "StackID": "StackId", + "URL": "Url", + "UniqueID": "UniqueId", + }, + }, + "github.com/aws/aws-sdk-go/service/cloudformation/cloudformationiface": { + operations: map[string]string{}, + shapes: map[string]string{}, + fields: map[string]string{}, + }, + "github.com/aws/aws-sdk-go/service/cloudfront": { + operations: map[string]string{}, + shapes: map[string]string{ + "KeyPairIDs": "KeyPairIds", + }, + fields: map[string]string{ + "AWSAccountNumber": "AwsAccountNumber", + "DistributionID": "DistributionId", + "IAMCertificateID": "IAMCertificateId", + "ID": "Id", + "KeyPairIDs": "KeyPairIds", + "S3CanonicalUserID": "S3CanonicalUserId", + "TargetOriginID": "TargetOriginId", + }, + }, + "github.com/aws/aws-sdk-go/service/cloudfront/cloudfrontiface": { + operations: map[string]string{}, + shapes: map[string]string{}, + fields: map[string]string{}, + }, + "github.com/aws/aws-sdk-go/service/cloudhsm": { + operations: map[string]string{ + "CreateHAPG": "CreateHapg", + "CreateHAPGPages": "CreateHapgPages", + "CreateHAPGRequest": "CreateHapgRequest", + "CreateHSM": "CreateHsm", + "CreateHSMPages": "CreateHsmPages", + "CreateHSMRequest": "CreateHsmRequest", + "DeleteHAPG": "DeleteHapg", + "DeleteHAPGPages": "DeleteHapgPages", + "DeleteHAPGRequest": "DeleteHapgRequest", + "DeleteHSM": "DeleteHsm", + "DeleteHSMPages": "DeleteHsmPages", + "DeleteHSMRequest": "DeleteHsmRequest", + "DescribeHAPG": "DescribeHapg", + "DescribeHAPGPages": "DescribeHapgPages", + "DescribeHAPGRequest": "DescribeHapgRequest", + "DescribeHSM": "DescribeHsm", + "DescribeHSMPages": "DescribeHsmPages", + "DescribeHSMRequest": "DescribeHsmRequest", + "ListHSMs": "ListHsms", + "ListHSMsPages": "ListHsmsPages", + "ListHSMsRequest": "ListHsmsRequest", + "ModifyHAPG": "ModifyHapg", + "ModifyHAPGPages": "ModifyHapgPages", + "ModifyHAPGRequest": "ModifyHapgRequest", + "ModifyHSM": "ModifyHsm", + "ModifyHSMPages": "ModifyHsmPages", + "ModifyHSMRequest": "ModifyHsmRequest", + }, + shapes: map[string]string{ + "CreateHAPGInput": "CreateHapgInput", + "CreateHAPGOutput": "CreateHapgOutput", + "CreateHSMInput": "CreateHsmInput", + "CreateHSMOutput": "CreateHsmOutput", + "DeleteHAPGInput": "DeleteHapgInput", + "DeleteHAPGOutput": "DeleteHapgOutput", + "DeleteHSMInput": "DeleteHsmInput", + "DeleteHSMOutput": "DeleteHsmOutput", + "DescribeHAPGInput": "DescribeHapgInput", + "DescribeHAPGOutput": "DescribeHapgOutput", + "DescribeHSMInput": "DescribeHsmInput", + "DescribeHSMOutput": "DescribeHsmOutput", + "ListHSMsInput": "ListHsmsInput", + "ListHSMsOutput": "ListHsmsOutput", + "ModifyHAPGInput": "ModifyHapgInput", + "ModifyHAPGOutput": "ModifyHapgOutput", + "ModifyHSMInput": "ModifyHsmInput", + "ModifyHSMOutput": "ModifyHsmOutput", + }, + fields: map[string]string{ + "ClientARN": "ClientArn", + "ENIID": "EniId", + "ENIIP": "EniIp", + "ExternalID": "ExternalId", + "HAPGARN": "HapgArn", + "HAPGList": "HapgList", + "HAPGSerial": "HapgSerial", + "HSMARN": "HsmArn", + "HSMList": "HsmList", + "HSMSerialNumber": "HsmSerialNumber", + "HSMType": "HsmType", + "HSMsLastActionFailed": "HsmsLastActionFailed", + "HSMsPendingDeletion": "HsmsPendingDeletion", + "HSMsPendingRegistration": "HsmsPendingRegistration", + "IAMRoleARN": "IamRoleArn", + "SSHKey": "SshKey", + "SSHKeyLastUpdated": "SshKeyLastUpdated", + "SSHPublicKey": "SshPublicKey", + "ServerCertURI": "ServerCertUri", + "SubnetID": "SubnetId", + "SyslogIP": "SyslogIp", + "VPCID": "VpcId", + }, + }, + "github.com/aws/aws-sdk-go/service/cloudhsm/cloudhsmiface": { + operations: map[string]string{ + "CreateHAPG": "CreateHapg", + "CreateHAPGPages": "CreateHapgPages", + "CreateHAPGRequest": "CreateHapgRequest", + "CreateHSM": "CreateHsm", + "CreateHSMPages": "CreateHsmPages", + "CreateHSMRequest": "CreateHsmRequest", + "DeleteHAPG": "DeleteHapg", + "DeleteHAPGPages": "DeleteHapgPages", + "DeleteHAPGRequest": "DeleteHapgRequest", + "DeleteHSM": "DeleteHsm", + "DeleteHSMPages": "DeleteHsmPages", + "DeleteHSMRequest": "DeleteHsmRequest", + "DescribeHAPG": "DescribeHapg", + "DescribeHAPGPages": "DescribeHapgPages", + "DescribeHAPGRequest": "DescribeHapgRequest", + "DescribeHSM": "DescribeHsm", + "DescribeHSMPages": "DescribeHsmPages", + "DescribeHSMRequest": "DescribeHsmRequest", + "ListHSMs": "ListHsms", + "ListHSMsPages": "ListHsmsPages", + "ListHSMsRequest": "ListHsmsRequest", + "ModifyHAPG": "ModifyHapg", + "ModifyHAPGPages": "ModifyHapgPages", + "ModifyHAPGRequest": "ModifyHapgRequest", + "ModifyHSM": "ModifyHsm", + "ModifyHSMPages": "ModifyHsmPages", + "ModifyHSMRequest": "ModifyHsmRequest", + }, + shapes: map[string]string{}, + fields: map[string]string{}, + }, + "github.com/aws/aws-sdk-go/service/cloudsearch": { + operations: map[string]string{}, + shapes: map[string]string{}, + fields: map[string]string{ + "DomainID": "DomainId", + }, + }, + "github.com/aws/aws-sdk-go/service/cloudsearch/cloudsearchiface": { + operations: map[string]string{}, + shapes: map[string]string{}, + fields: map[string]string{}, + }, + "github.com/aws/aws-sdk-go/service/cloudsearchdomain": { + operations: map[string]string{}, + shapes: map[string]string{}, + fields: map[string]string{ + "ID": "Id", + "RID": "Rid", + "TimeMS": "Timems", + }, + }, + "github.com/aws/aws-sdk-go/service/cloudsearchdomain/cloudsearchdomainiface": { + operations: map[string]string{}, + shapes: map[string]string{}, + fields: map[string]string{}, + }, + "github.com/aws/aws-sdk-go/service/cloudtrail": { + operations: map[string]string{}, + shapes: map[string]string{}, + fields: map[string]string{ + "CloudWatchLogsLogGroupARN": "CloudWatchLogsLogGroupArn", + "CloudWatchLogsRoleARN": "CloudWatchLogsRoleArn", + "EventID": "EventId", + "SNSTopicName": "SnsTopicName", + }, + }, + "github.com/aws/aws-sdk-go/service/cloudtrail/cloudtrailiface": { + operations: map[string]string{}, + shapes: map[string]string{}, + fields: map[string]string{}, + }, + "github.com/aws/aws-sdk-go/service/cloudwatch": { + operations: map[string]string{}, + shapes: map[string]string{}, + fields: map[string]string{ + "AlarmARN": "AlarmArn", + }, + }, + "github.com/aws/aws-sdk-go/service/cloudwatch/cloudwatchiface": { + operations: map[string]string{}, + shapes: map[string]string{}, + fields: map[string]string{}, + }, + "github.com/aws/aws-sdk-go/service/cloudwatchlogs": { + operations: map[string]string{}, + shapes: map[string]string{}, + fields: map[string]string{ + "ARN": "Arn", + "DestinationARN": "DestinationArn", + "EventID": "EventId", + "RoleARN": "RoleArn", + "TargetARN": "TargetArn", + }, + }, + "github.com/aws/aws-sdk-go/service/cloudwatchlogs/cloudwatchlogsiface": { + operations: map[string]string{}, + shapes: map[string]string{}, + fields: map[string]string{}, + }, + "github.com/aws/aws-sdk-go/service/codecommit": { + operations: map[string]string{}, + shapes: map[string]string{ + "RepositoryNameIDPair": "RepositoryNameIdPair", + }, + fields: map[string]string{ + "ARN": "Arn", + "AccountID": "AccountId", + "CloneURLHTTP": "CloneUrlHttp", + "CloneURLSSH": "CloneUrlSsh", + "CommitID": "CommitId", + "RepositoryID": "RepositoryId", + }, + }, + "github.com/aws/aws-sdk-go/service/codecommit/codecommitiface": { + operations: map[string]string{}, + shapes: map[string]string{}, + fields: map[string]string{}, + }, + "github.com/aws/aws-sdk-go/service/codedeploy": { + operations: map[string]string{}, + shapes: map[string]string{}, + fields: map[string]string{ + "ApplicationID": "ApplicationId", + "CommitID": "CommitId", + "DeploymentConfigID": "DeploymentConfigId", + "DeploymentGroupID": "DeploymentGroupId", + "DeploymentID": "DeploymentId", + "DeploymentIDs": "DeploymentIds", + "EC2TagFilters": "Ec2TagFilters", + "IAMUserARN": "IamUserArn", + "InstanceARN": "InstanceArn", + "InstanceID": "InstanceId", + "ServiceRoleARN": "ServiceRoleArn", + }, + }, + "github.com/aws/aws-sdk-go/service/codedeploy/codedeployiface": { + operations: map[string]string{}, + shapes: map[string]string{}, + fields: map[string]string{}, + }, + "github.com/aws/aws-sdk-go/service/codepipeline": { + operations: map[string]string{}, + shapes: map[string]string{ + "ActionTypeID": "ActionTypeId", + }, + fields: map[string]string{ + "AccessKeyID": "AccessKeyId", + "AccountID": "AccountId", + "ActionTypeID": "ActionTypeId", + "ClientID": "ClientId", + "EntityURL": "EntityUrl", + "EntityURLTemplate": "EntityUrlTemplate", + "ExecutionURLTemplate": "ExecutionUrlTemplate", + "ExternalExecutionID": "ExternalExecutionId", + "ExternalExecutionURL": "ExternalExecutionUrl", + "ID": "Id", + "JobID": "JobId", + "PipelineExecutionID": "PipelineExecutionId", + "RevisionChangeID": "RevisionChangeId", + "RevisionID": "RevisionId", + "RevisionURL": "RevisionUrl", + "RevisionURLTemplate": "RevisionUrlTemplate", + "RoleARN": "RoleArn", + "ThirdPartyConfigurationURL": "ThirdPartyConfigurationUrl", + }, + }, + "github.com/aws/aws-sdk-go/service/codepipeline/codepipelineiface": { + operations: map[string]string{}, + shapes: map[string]string{}, + fields: map[string]string{}, + }, + "github.com/aws/aws-sdk-go/service/cognitoidentity": { + operations: map[string]string{ + "GetID": "GetId", + "GetIDPages": "GetIdPages", + "GetIDRequest": "GetIdRequest", + "GetOpenIDToken": "GetOpenIdToken", + "GetOpenIDTokenForDeveloperIdentity": "GetOpenIdTokenForDeveloperIdentity", + "GetOpenIDTokenForDeveloperIdentityPages": "GetOpenIdTokenForDeveloperIdentityPages", + "GetOpenIDTokenForDeveloperIdentityRequest": "GetOpenIdTokenForDeveloperIdentityRequest", + "GetOpenIDTokenPages": "GetOpenIdTokenPages", + "GetOpenIDTokenRequest": "GetOpenIdTokenRequest", + }, + shapes: map[string]string{ + "GetIDInput": "GetIdInput", + "GetIDOutput": "GetIdOutput", + "GetOpenIDTokenForDeveloperIdentityInput": "GetOpenIdTokenForDeveloperIdentityInput", + "GetOpenIDTokenForDeveloperIdentityOutput": "GetOpenIdTokenForDeveloperIdentityOutput", + "GetOpenIDTokenInput": "GetOpenIdTokenInput", + "GetOpenIDTokenOutput": "GetOpenIdTokenOutput", + "UnprocessedIdentityID": "UnprocessedIdentityId", + }, + fields: map[string]string{ + "AccessKeyID": "AccessKeyId", + "AccountID": "AccountId", + "IdentityID": "IdentityId", + "IdentityIDsToDelete": "IdentityIdsToDelete", + "IdentityPoolID": "IdentityPoolId", + "OpenIDConnectProviderARNs": "OpenIdConnectProviderARNs", + "UnprocessedIdentityIDs": "UnprocessedIdentityIds", + }, + }, + "github.com/aws/aws-sdk-go/service/cognitoidentity/cognitoidentityiface": { + operations: map[string]string{ + "GetID": "GetId", + "GetIDPages": "GetIdPages", + "GetIDRequest": "GetIdRequest", + "GetOpenIDToken": "GetOpenIdToken", + "GetOpenIDTokenForDeveloperIdentity": "GetOpenIdTokenForDeveloperIdentity", + "GetOpenIDTokenForDeveloperIdentityPages": "GetOpenIdTokenForDeveloperIdentityPages", + "GetOpenIDTokenForDeveloperIdentityRequest": "GetOpenIdTokenForDeveloperIdentityRequest", + "GetOpenIDTokenPages": "GetOpenIdTokenPages", + "GetOpenIDTokenRequest": "GetOpenIdTokenRequest", + }, + shapes: map[string]string{}, + fields: map[string]string{}, + }, + "github.com/aws/aws-sdk-go/service/cognitosync": { + operations: map[string]string{}, + shapes: map[string]string{}, + fields: map[string]string{ + "ApplicationARNs": "ApplicationArns", + "DeviceID": "DeviceId", + "IdentityID": "IdentityId", + "IdentityPoolID": "IdentityPoolId", + "RoleARN": "RoleArn", + }, + }, + "github.com/aws/aws-sdk-go/service/cognitosync/cognitosynciface": { + operations: map[string]string{}, + shapes: map[string]string{}, + fields: map[string]string{}, + }, + "github.com/aws/aws-sdk-go/service/configservice": { + operations: map[string]string{}, + shapes: map[string]string{}, + fields: map[string]string{ + "ARN": "Arn", + "AccountID": "AccountId", + "ConfigSnapshotID": "ConfigSnapshotId", + "ConfigurationStateID": "ConfigurationStateId", + "ResourceID": "ResourceId", + "SNSTopicARN": "SnsTopicARN", + }, + }, + "github.com/aws/aws-sdk-go/service/configservice/configserviceiface": { + operations: map[string]string{}, + shapes: map[string]string{}, + fields: map[string]string{}, + }, + "github.com/aws/aws-sdk-go/service/datapipeline": { + operations: map[string]string{}, + shapes: map[string]string{ + "PipelineIDName": "PipelineIdName", + }, + fields: map[string]string{ + "AttemptID": "AttemptId", + "ErrorID": "ErrorId", + "ID": "Id", + "IDs": "Ids", + "ObjectID": "ObjectId", + "ObjectIDs": "ObjectIds", + "PipelineID": "PipelineId", + "PipelineIDList": "PipelineIdList", + "PipelineIDs": "PipelineIds", + "TaskID": "TaskId", + "TaskRunnerID": "TaskrunnerId", + "UniqueID": "UniqueId", + }, + }, + "github.com/aws/aws-sdk-go/service/datapipeline/datapipelineiface": { + operations: map[string]string{}, + shapes: map[string]string{}, + fields: map[string]string{}, + }, + "github.com/aws/aws-sdk-go/service/devicefarm": { + operations: map[string]string{}, + shapes: map[string]string{}, + fields: map[string]string{ + "ARN": "Arn", + "AWSAccountNumber": "AwsAccountNumber", + "AppARN": "AppArn", + "CPU": "Cpu", + "DevicePoolARN": "DevicePoolArn", + "ExtraDataPackageARN": "ExtraDataPackageArn", + "NetworkProfileARN": "NetworkProfileArn", + "ProjectARN": "ProjectArn", + "TestPackageARN": "TestPackageArn", + "URL": "Url", + }, + }, + "github.com/aws/aws-sdk-go/service/devicefarm/devicefarmiface": { + operations: map[string]string{}, + shapes: map[string]string{}, + fields: map[string]string{}, + }, + "github.com/aws/aws-sdk-go/service/directconnect": { + operations: map[string]string{}, + shapes: map[string]string{}, + fields: map[string]string{ + "ASN": "Asn", + "CIDR": "Cidr", + "ConnectionID": "ConnectionId", + "InterconnectID": "InterconnectId", + "VLAN": "Vlan", + "VirtualGatewayID": "VirtualGatewayId", + "VirtualInterfaceID": "VirtualInterfaceId", + }, + }, + "github.com/aws/aws-sdk-go/service/directconnect/directconnectiface": { + operations: map[string]string{}, + shapes: map[string]string{}, + fields: map[string]string{}, + }, + "github.com/aws/aws-sdk-go/service/directoryservice": { + operations: map[string]string{ + "DisableSSO": "DisableSso", + "DisableSSOPages": "DisableSsoPages", + "DisableSSORequest": "DisableSsoRequest", + "EnableSSO": "EnableSso", + "EnableSSOPages": "EnableSsoPages", + "EnableSSORequest": "EnableSsoRequest", + }, + shapes: map[string]string{ + "DirectoryVPCSettings": "DirectoryVpcSettings", + "DirectoryVPCSettingsDescription": "DirectoryVpcSettingsDescription", + "DisableSSOInput": "DisableSsoInput", + "DisableSSOOutput": "DisableSsoOutput", + "EnableSSOInput": "EnableSsoInput", + "EnableSSOOutput": "EnableSsoOutput", + }, + fields: map[string]string{ + "AccessURL": "AccessUrl", + "ComputerID": "ComputerId", + "ConnectIPs": "ConnectIps", + "CustomerDNSIPs": "CustomerDnsIps", + "DNSIPAddrs": "DnsIpAddrs", + "DirectoryID": "DirectoryId", + "DirectoryIDs": "DirectoryIds", + "SSOEnabled": "SsoEnabled", + "SecurityGroupID": "SecurityGroupId", + "SnapshotID": "SnapshotId", + "SnapshotIDs": "SnapshotIds", + "SubnetIDs": "SubnetIds", + "VPCID": "VpcId", + "VPCSettings": "VpcSettings", + }, + }, + "github.com/aws/aws-sdk-go/service/directoryservice/directoryserviceiface": { + operations: map[string]string{ + "DisableSSO": "DisableSso", + "DisableSSOPages": "DisableSsoPages", + "DisableSSORequest": "DisableSsoRequest", + "EnableSSO": "EnableSso", + "EnableSSOPages": "EnableSsoPages", + "EnableSSORequest": "EnableSsoRequest", + }, + shapes: map[string]string{}, + fields: map[string]string{}, + }, + "github.com/aws/aws-sdk-go/service/dynamodb": { + operations: map[string]string{}, + shapes: map[string]string{}, + fields: map[string]string{ + "IndexARN": "IndexArn", + "LatestStreamARN": "LatestStreamArn", + "TableARN": "TableArn", + }, + }, + "github.com/aws/aws-sdk-go/service/dynamodb/dynamodbiface": { + operations: map[string]string{}, + shapes: map[string]string{}, + fields: map[string]string{}, + }, + "github.com/aws/aws-sdk-go/service/dynamodbstreams": { + operations: map[string]string{}, + shapes: map[string]string{}, + fields: map[string]string{ + "AWSRegion": "AwsRegion", + "DynamoDB": "Dynamodb", + "ExclusiveStartShardID": "ExclusiveStartShardId", + "ExclusiveStartStreamARN": "ExclusiveStartStreamArn", + "LastEvaluatedShardID": "LastEvaluatedShardId", + "LastEvaluatedStreamARN": "LastEvaluatedStreamArn", + "ParentShardID": "ParentShardId", + "ShardID": "ShardId", + "StreamARN": "StreamArn", + }, + }, + "github.com/aws/aws-sdk-go/service/dynamodbstreams/dynamodbstreamsiface": { + operations: map[string]string{}, + shapes: map[string]string{}, + fields: map[string]string{}, + }, + "github.com/aws/aws-sdk-go/service/ec2": { + operations: map[string]string{ + "AcceptVPCPeeringConnection": "AcceptVpcPeeringConnection", + "AcceptVPCPeeringConnectionPages": "AcceptVpcPeeringConnectionPages", + "AcceptVPCPeeringConnectionRequest": "AcceptVpcPeeringConnectionRequest", + "AssignPrivateIPAddresses": "AssignPrivateIpAddresses", + "AssignPrivateIPAddressesPages": "AssignPrivateIpAddressesPages", + "AssignPrivateIPAddressesRequest": "AssignPrivateIpAddressesRequest", + "AssociateDHCPOptions": "AssociateDhcpOptions", + "AssociateDHCPOptionsPages": "AssociateDhcpOptionsPages", + "AssociateDHCPOptionsRequest": "AssociateDhcpOptionsRequest", + "AttachClassicLinkVPC": "AttachClassicLinkVpc", + "AttachClassicLinkVPCPages": "AttachClassicLinkVpcPages", + "AttachClassicLinkVPCRequest": "AttachClassicLinkVpcRequest", + "AttachVPNGateway": "AttachVpnGateway", + "AttachVPNGatewayPages": "AttachVpnGatewayPages", + "AttachVPNGatewayRequest": "AttachVpnGatewayRequest", + "CreateDHCPOptions": "CreateDhcpOptions", + "CreateDHCPOptionsPages": "CreateDhcpOptionsPages", + "CreateDHCPOptionsRequest": "CreateDhcpOptionsRequest", + "CreateNetworkACL": "CreateNetworkAcl", + "CreateNetworkACLEntry": "CreateNetworkAclEntry", + "CreateNetworkACLEntryPages": "CreateNetworkAclEntryPages", + "CreateNetworkACLEntryRequest": "CreateNetworkAclEntryRequest", + "CreateNetworkACLPages": "CreateNetworkAclPages", + "CreateNetworkACLRequest": "CreateNetworkAclRequest", + "CreateVPC": "CreateVpc", + "CreateVPCEndpoint": "CreateVpcEndpoint", + "CreateVPCEndpointPages": "CreateVpcEndpointPages", + "CreateVPCEndpointRequest": "CreateVpcEndpointRequest", + "CreateVPCPages": "CreateVpcPages", + "CreateVPCPeeringConnection": "CreateVpcPeeringConnection", + "CreateVPCPeeringConnectionPages": "CreateVpcPeeringConnectionPages", + "CreateVPCPeeringConnectionRequest": "CreateVpcPeeringConnectionRequest", + "CreateVPCRequest": "CreateVpcRequest", + "CreateVPNConnection": "CreateVpnConnection", + "CreateVPNConnectionPages": "CreateVpnConnectionPages", + "CreateVPNConnectionRequest": "CreateVpnConnectionRequest", + "CreateVPNConnectionRoute": "CreateVpnConnectionRoute", + "CreateVPNConnectionRoutePages": "CreateVpnConnectionRoutePages", + "CreateVPNConnectionRouteRequest": "CreateVpnConnectionRouteRequest", + "CreateVPNGateway": "CreateVpnGateway", + "CreateVPNGatewayPages": "CreateVpnGatewayPages", + "CreateVPNGatewayRequest": "CreateVpnGatewayRequest", + "DeleteDHCPOptions": "DeleteDhcpOptions", + "DeleteDHCPOptionsPages": "DeleteDhcpOptionsPages", + "DeleteDHCPOptionsRequest": "DeleteDhcpOptionsRequest", + "DeleteNetworkACL": "DeleteNetworkAcl", + "DeleteNetworkACLEntry": "DeleteNetworkAclEntry", + "DeleteNetworkACLEntryPages": "DeleteNetworkAclEntryPages", + "DeleteNetworkACLEntryRequest": "DeleteNetworkAclEntryRequest", + "DeleteNetworkACLPages": "DeleteNetworkAclPages", + "DeleteNetworkACLRequest": "DeleteNetworkAclRequest", + "DeleteVPC": "DeleteVpc", + "DeleteVPCEndpoints": "DeleteVpcEndpoints", + "DeleteVPCEndpointsPages": "DeleteVpcEndpointsPages", + "DeleteVPCEndpointsRequest": "DeleteVpcEndpointsRequest", + "DeleteVPCPages": "DeleteVpcPages", + "DeleteVPCPeeringConnection": "DeleteVpcPeeringConnection", + "DeleteVPCPeeringConnectionPages": "DeleteVpcPeeringConnectionPages", + "DeleteVPCPeeringConnectionRequest": "DeleteVpcPeeringConnectionRequest", + "DeleteVPCRequest": "DeleteVpcRequest", + "DeleteVPNConnection": "DeleteVpnConnection", + "DeleteVPNConnectionPages": "DeleteVpnConnectionPages", + "DeleteVPNConnectionRequest": "DeleteVpnConnectionRequest", + "DeleteVPNConnectionRoute": "DeleteVpnConnectionRoute", + "DeleteVPNConnectionRoutePages": "DeleteVpnConnectionRoutePages", + "DeleteVPNConnectionRouteRequest": "DeleteVpnConnectionRouteRequest", + "DeleteVPNGateway": "DeleteVpnGateway", + "DeleteVPNGatewayPages": "DeleteVpnGatewayPages", + "DeleteVPNGatewayRequest": "DeleteVpnGatewayRequest", + "DescribeDHCPOptions": "DescribeDhcpOptions", + "DescribeDHCPOptionsPages": "DescribeDhcpOptionsPages", + "DescribeDHCPOptionsRequest": "DescribeDhcpOptionsRequest", + "DescribeNetworkACLs": "DescribeNetworkAcls", + "DescribeNetworkACLsPages": "DescribeNetworkAclsPages", + "DescribeNetworkACLsRequest": "DescribeNetworkAclsRequest", + "DescribeVPCAttribute": "DescribeVpcAttribute", + "DescribeVPCAttributePages": "DescribeVpcAttributePages", + "DescribeVPCAttributeRequest": "DescribeVpcAttributeRequest", + "DescribeVPCClassicLink": "DescribeVpcClassicLink", + "DescribeVPCClassicLinkPages": "DescribeVpcClassicLinkPages", + "DescribeVPCClassicLinkRequest": "DescribeVpcClassicLinkRequest", + "DescribeVPCEndpointServices": "DescribeVpcEndpointServices", + "DescribeVPCEndpointServicesPages": "DescribeVpcEndpointServicesPages", + "DescribeVPCEndpointServicesRequest": "DescribeVpcEndpointServicesRequest", + "DescribeVPCEndpoints": "DescribeVpcEndpoints", + "DescribeVPCEndpointsPages": "DescribeVpcEndpointsPages", + "DescribeVPCEndpointsRequest": "DescribeVpcEndpointsRequest", + "DescribeVPCPeeringConnections": "DescribeVpcPeeringConnections", + "DescribeVPCPeeringConnectionsPages": "DescribeVpcPeeringConnectionsPages", + "DescribeVPCPeeringConnectionsRequest": "DescribeVpcPeeringConnectionsRequest", + "DescribeVPCs": "DescribeVpcs", + "DescribeVPCsPages": "DescribeVpcsPages", + "DescribeVPCsRequest": "DescribeVpcsRequest", + "DescribeVPNConnections": "DescribeVpnConnections", + "DescribeVPNConnectionsPages": "DescribeVpnConnectionsPages", + "DescribeVPNConnectionsRequest": "DescribeVpnConnectionsRequest", + "DescribeVPNGateways": "DescribeVpnGateways", + "DescribeVPNGatewaysPages": "DescribeVpnGatewaysPages", + "DescribeVPNGatewaysRequest": "DescribeVpnGatewaysRequest", + "DetachClassicLinkVPC": "DetachClassicLinkVpc", + "DetachClassicLinkVPCPages": "DetachClassicLinkVpcPages", + "DetachClassicLinkVPCRequest": "DetachClassicLinkVpcRequest", + "DetachVPNGateway": "DetachVpnGateway", + "DetachVPNGatewayPages": "DetachVpnGatewayPages", + "DetachVPNGatewayRequest": "DetachVpnGatewayRequest", + "DisableVGWRoutePropagation": "DisableVgwRoutePropagation", + "DisableVGWRoutePropagationPages": "DisableVgwRoutePropagationPages", + "DisableVGWRoutePropagationRequest": "DisableVgwRoutePropagationRequest", + "DisableVPCClassicLink": "DisableVpcClassicLink", + "DisableVPCClassicLinkPages": "DisableVpcClassicLinkPages", + "DisableVPCClassicLinkRequest": "DisableVpcClassicLinkRequest", + "EnableVGWRoutePropagation": "EnableVgwRoutePropagation", + "EnableVGWRoutePropagationPages": "EnableVgwRoutePropagationPages", + "EnableVGWRoutePropagationRequest": "EnableVgwRoutePropagationRequest", + "EnableVPCClassicLink": "EnableVpcClassicLink", + "EnableVPCClassicLinkPages": "EnableVpcClassicLinkPages", + "EnableVPCClassicLinkRequest": "EnableVpcClassicLinkRequest", + "ModifyVPCAttribute": "ModifyVpcAttribute", + "ModifyVPCAttributePages": "ModifyVpcAttributePages", + "ModifyVPCAttributeRequest": "ModifyVpcAttributeRequest", + "ModifyVPCEndpoint": "ModifyVpcEndpoint", + "ModifyVPCEndpointPages": "ModifyVpcEndpointPages", + "ModifyVPCEndpointRequest": "ModifyVpcEndpointRequest", + "MoveAddressToVPC": "MoveAddressToVpc", + "MoveAddressToVPCPages": "MoveAddressToVpcPages", + "MoveAddressToVPCRequest": "MoveAddressToVpcRequest", + "RejectVPCPeeringConnection": "RejectVpcPeeringConnection", + "RejectVPCPeeringConnectionPages": "RejectVpcPeeringConnectionPages", + "RejectVPCPeeringConnectionRequest": "RejectVpcPeeringConnectionRequest", + "ReplaceNetworkACLAssociation": "ReplaceNetworkAclAssociation", + "ReplaceNetworkACLAssociationPages": "ReplaceNetworkAclAssociationPages", + "ReplaceNetworkACLAssociationRequest": "ReplaceNetworkAclAssociationRequest", + "ReplaceNetworkACLEntry": "ReplaceNetworkAclEntry", + "ReplaceNetworkACLEntryPages": "ReplaceNetworkAclEntryPages", + "ReplaceNetworkACLEntryRequest": "ReplaceNetworkAclEntryRequest", + "UnassignPrivateIPAddresses": "UnassignPrivateIpAddresses", + "UnassignPrivateIPAddressesPages": "UnassignPrivateIpAddressesPages", + "UnassignPrivateIPAddressesRequest": "UnassignPrivateIpAddressesRequest", + }, + shapes: map[string]string{ + "AcceptVPCPeeringConnectionInput": "AcceptVpcPeeringConnectionInput", + "AcceptVPCPeeringConnectionOutput": "AcceptVpcPeeringConnectionOutput", + "AssignPrivateIPAddressesInput": "AssignPrivateIpAddressesInput", + "AssignPrivateIPAddressesOutput": "AssignPrivateIpAddressesOutput", + "AssociateDHCPOptionsInput": "AssociateDhcpOptionsInput", + "AssociateDHCPOptionsOutput": "AssociateDhcpOptionsOutput", + "AttachClassicLinkVPCInput": "AttachClassicLinkVpcInput", + "AttachClassicLinkVPCOutput": "AttachClassicLinkVpcOutput", + "AttachVPNGatewayInput": "AttachVpnGatewayInput", + "AttachVPNGatewayOutput": "AttachVpnGatewayOutput", + "CreateDHCPOptionsInput": "CreateDhcpOptionsInput", + "CreateDHCPOptionsOutput": "CreateDhcpOptionsOutput", + "CreateNetworkACLEntryInput": "CreateNetworkAclEntryInput", + "CreateNetworkACLEntryOutput": "CreateNetworkAclEntryOutput", + "CreateNetworkACLInput": "CreateNetworkAclInput", + "CreateNetworkACLOutput": "CreateNetworkAclOutput", + "CreateVPCEndpointInput": "CreateVpcEndpointInput", + "CreateVPCEndpointOutput": "CreateVpcEndpointOutput", + "CreateVPCInput": "CreateVpcInput", + "CreateVPCOutput": "CreateVpcOutput", + "CreateVPCPeeringConnectionInput": "CreateVpcPeeringConnectionInput", + "CreateVPCPeeringConnectionOutput": "CreateVpcPeeringConnectionOutput", + "CreateVPNConnectionInput": "CreateVpnConnectionInput", + "CreateVPNConnectionOutput": "CreateVpnConnectionOutput", + "CreateVPNConnectionRouteInput": "CreateVpnConnectionRouteInput", + "CreateVPNConnectionRouteOutput": "CreateVpnConnectionRouteOutput", + "CreateVPNGatewayInput": "CreateVpnGatewayInput", + "CreateVPNGatewayOutput": "CreateVpnGatewayOutput", + "DHCPConfiguration": "DhcpConfiguration", + "DHCPOptions": "DhcpOptions", + "DeleteDHCPOptionsInput": "DeleteDhcpOptionsInput", + "DeleteDHCPOptionsOutput": "DeleteDhcpOptionsOutput", + "DeleteNetworkACLEntryInput": "DeleteNetworkAclEntryInput", + "DeleteNetworkACLEntryOutput": "DeleteNetworkAclEntryOutput", + "DeleteNetworkACLInput": "DeleteNetworkAclInput", + "DeleteNetworkACLOutput": "DeleteNetworkAclOutput", + "DeleteVPCEndpointsInput": "DeleteVpcEndpointsInput", + "DeleteVPCEndpointsOutput": "DeleteVpcEndpointsOutput", + "DeleteVPCInput": "DeleteVpcInput", + "DeleteVPCOutput": "DeleteVpcOutput", + "DeleteVPCPeeringConnectionInput": "DeleteVpcPeeringConnectionInput", + "DeleteVPCPeeringConnectionOutput": "DeleteVpcPeeringConnectionOutput", + "DeleteVPNConnectionInput": "DeleteVpnConnectionInput", + "DeleteVPNConnectionOutput": "DeleteVpnConnectionOutput", + "DeleteVPNConnectionRouteInput": "DeleteVpnConnectionRouteInput", + "DeleteVPNConnectionRouteOutput": "DeleteVpnConnectionRouteOutput", + "DeleteVPNGatewayInput": "DeleteVpnGatewayInput", + "DeleteVPNGatewayOutput": "DeleteVpnGatewayOutput", + "DescribeDHCPOptionsInput": "DescribeDhcpOptionsInput", + "DescribeDHCPOptionsOutput": "DescribeDhcpOptionsOutput", + "DescribeNetworkACLsInput": "DescribeNetworkAclsInput", + "DescribeNetworkACLsOutput": "DescribeNetworkAclsOutput", + "DescribeVPCAttributeInput": "DescribeVpcAttributeInput", + "DescribeVPCAttributeOutput": "DescribeVpcAttributeOutput", + "DescribeVPCClassicLinkInput": "DescribeVpcClassicLinkInput", + "DescribeVPCClassicLinkOutput": "DescribeVpcClassicLinkOutput", + "DescribeVPCEndpointServicesInput": "DescribeVpcEndpointServicesInput", + "DescribeVPCEndpointServicesOutput": "DescribeVpcEndpointServicesOutput", + "DescribeVPCEndpointsInput": "DescribeVpcEndpointsInput", + "DescribeVPCEndpointsOutput": "DescribeVpcEndpointsOutput", + "DescribeVPCPeeringConnectionsInput": "DescribeVpcPeeringConnectionsInput", + "DescribeVPCPeeringConnectionsOutput": "DescribeVpcPeeringConnectionsOutput", + "DescribeVPCsInput": "DescribeVpcsInput", + "DescribeVPCsOutput": "DescribeVpcsOutput", + "DescribeVPNConnectionsInput": "DescribeVpnConnectionsInput", + "DescribeVPNConnectionsOutput": "DescribeVpnConnectionsOutput", + "DescribeVPNGatewaysInput": "DescribeVpnGatewaysInput", + "DescribeVPNGatewaysOutput": "DescribeVpnGatewaysOutput", + "DetachClassicLinkVPCInput": "DetachClassicLinkVpcInput", + "DetachClassicLinkVPCOutput": "DetachClassicLinkVpcOutput", + "DetachVPNGatewayInput": "DetachVpnGatewayInput", + "DetachVPNGatewayOutput": "DetachVpnGatewayOutput", + "DisableVGWRoutePropagationInput": "DisableVgwRoutePropagationInput", + "DisableVGWRoutePropagationOutput": "DisableVgwRoutePropagationOutput", + "DisableVPCClassicLinkInput": "DisableVpcClassicLinkInput", + "DisableVPCClassicLinkOutput": "DisableVpcClassicLinkOutput", + "EBSBlockDevice": "EbsBlockDevice", + "EBSInstanceBlockDevice": "EbsInstanceBlockDevice", + "EBSInstanceBlockDeviceSpecification": "EbsInstanceBlockDeviceSpecification", + "EnableVGWRoutePropagationInput": "EnableVgwRoutePropagationInput", + "EnableVGWRoutePropagationOutput": "EnableVgwRoutePropagationOutput", + "EnableVPCClassicLinkInput": "EnableVpcClassicLinkInput", + "EnableVPCClassicLinkOutput": "EnableVpcClassicLinkOutput", + "IAMInstanceProfile": "IamInstanceProfile", + "IAMInstanceProfileSpecification": "IamInstanceProfileSpecification", + "ICMPTypeCode": "IcmpTypeCode", + "IPPermission": "IpPermission", + "IPRange": "IpRange", + "InstancePrivateIPAddress": "InstancePrivateIpAddress", + "ModifyVPCAttributeInput": "ModifyVpcAttributeInput", + "ModifyVPCAttributeOutput": "ModifyVpcAttributeOutput", + "ModifyVPCEndpointInput": "ModifyVpcEndpointInput", + "ModifyVPCEndpointOutput": "ModifyVpcEndpointOutput", + "MoveAddressToVPCInput": "MoveAddressToVpcInput", + "MoveAddressToVPCOutput": "MoveAddressToVpcOutput", + "NetworkACL": "NetworkAcl", + "NetworkACLAssociation": "NetworkAclAssociation", + "NetworkACLEntry": "NetworkAclEntry", + "NetworkInterfacePrivateIPAddress": "NetworkInterfacePrivateIpAddress", + "NewDHCPConfiguration": "NewDhcpConfiguration", + "PrefixListID": "PrefixListId", + "PrivateIPAddressSpecification": "PrivateIpAddressSpecification", + "PropagatingVGW": "PropagatingVgw", + "RejectVPCPeeringConnectionInput": "RejectVpcPeeringConnectionInput", + "RejectVPCPeeringConnectionOutput": "RejectVpcPeeringConnectionOutput", + "ReplaceNetworkACLAssociationInput": "ReplaceNetworkAclAssociationInput", + "ReplaceNetworkACLAssociationOutput": "ReplaceNetworkAclAssociationOutput", + "ReplaceNetworkACLEntryInput": "ReplaceNetworkAclEntryInput", + "ReplaceNetworkACLEntryOutput": "ReplaceNetworkAclEntryOutput", + "ReservedInstancesID": "ReservedInstancesId", + "UnassignPrivateIPAddressesInput": "UnassignPrivateIpAddressesInput", + "UnassignPrivateIPAddressesOutput": "UnassignPrivateIpAddressesOutput", + "UserIDGroupPair": "UserIdGroupPair", + "VGWTelemetry": "VgwTelemetry", + "VPC": "Vpc", + "VPCAttachment": "VpcAttachment", + "VPCClassicLink": "VpcClassicLink", + "VPCEndpoint": "VpcEndpoint", + "VPCPeeringConnection": "VpcPeeringConnection", + "VPCPeeringConnectionStateReason": "VpcPeeringConnectionStateReason", + "VPCPeeringConnectionVPCInfo": "VpcPeeringConnectionVpcInfo", + "VPNConnection": "VpnConnection", + "VPNConnectionOptions": "VpnConnectionOptions", + "VPNConnectionOptionsSpecification": "VpnConnectionOptionsSpecification", + "VPNGateway": "VpnGateway", + "VPNStaticRoute": "VpnStaticRoute", + }, + fields: map[string]string{ + "AMILaunchIndex": "AmiLaunchIndex", + "ARN": "Arn", + "AWSAccessKeyID": "AWSAccessKeyId", + "AccepterVPCInfo": "AccepterVpcInfo", + "AddRouteTableIDs": "AddRouteTableIds", + "AllocationID": "AllocationId", + "AllocationIDs": "AllocationIds", + "AssociatePublicIPAddress": "AssociatePublicIpAddress", + "AssociationID": "AssociationId", + "AttachmentID": "AttachmentId", + "AvailableIPAddressCount": "AvailableIpAddressCount", + "BGPASN": "BgpAsn", + "BundleID": "BundleId", + "BundleIDs": "BundleIds", + "CIDRBlock": "CidrBlock", + "CIDRIP": "CidrIp", + "CIDRs": "Cidrs", + "ConversionTaskID": "ConversionTaskId", + "ConversionTaskIDs": "ConversionTaskIds", + "CustomerGatewayID": "CustomerGatewayId", + "CustomerGatewayIDs": "CustomerGatewayIds", + "DHCPConfigurations": "DhcpConfigurations", + "DHCPOptions": "DhcpOptions", + "DHCPOptionsID": "DhcpOptionsId", + "DHCPOptionsIDs": "DhcpOptionsIds", + "DefaultForAZ": "DefaultForAz", + "DeliverLogsPermissionARN": "DeliverLogsPermissionArn", + "DestinationCIDRBlock": "DestinationCidrBlock", + "DestinationPrefixListID": "DestinationPrefixListId", + "DisableAPITermination": "DisableApiTermination", + "EBS": "Ebs", + "EBSOptimized": "EbsOptimized", + "EnableDNSHostnames": "EnableDnsHostnames", + "EnableDNSSupport": "EnableDnsSupport", + "EventID": "EventId", + "ExportTaskID": "ExportTaskId", + "ExportTaskIDs": "ExportTaskIds", + "FlowLogID": "FlowLogId", + "FlowLogIDs": "FlowLogIds", + "GatewayID": "GatewayId", + "GroupID": "GroupId", + "GroupIDs": "GroupIds", + "IAMFleetRole": "IamFleetRole", + "IAMInstanceProfile": "IamInstanceProfile", + "ICMPTypeCode": "IcmpTypeCode", + "ID": "Id", + "IOPS": "Iops", + "IPAddress": "IpAddress", + "IPOwnerID": "IpOwnerId", + "IPPermissions": "IpPermissions", + "IPPermissionsEgress": "IpPermissionsEgress", + "IPProtocol": "IpProtocol", + "IPRanges": "IpRanges", + "ImageID": "ImageId", + "ImageIDs": "ImageIds", + "ImportManifestURL": "ImportManifestUrl", + "ImportTaskID": "ImportTaskId", + "ImportTaskIDs": "ImportTaskIds", + "InstanceID": "InstanceId", + "InstanceIDs": "InstanceIds", + "InstanceOwnerID": "InstanceOwnerId", + "InternetGatewayID": "InternetGatewayId", + "InternetGatewayIDs": "InternetGatewayIds", + "KMSKeyID": "KmsKeyId", + "KernelID": "KernelId", + "MACAddress": "MacAddress", + "MapPublicIPOnLaunch": "MapPublicIpOnLaunch", + "NetworkACL": "NetworkAcl", + "NetworkACLAssociationID": "NetworkAclAssociationId", + "NetworkACLID": "NetworkAclId", + "NetworkACLIDs": "NetworkAclIds", + "NetworkACLs": "NetworkAcls", + "NetworkInterfaceID": "NetworkInterfaceId", + "NetworkInterfaceIDs": "NetworkInterfaceIds", + "NetworkInterfaceOwnerID": "NetworkInterfaceOwnerId", + "NewAssociationID": "NewAssociationId", + "OutsideIPAddress": "OutsideIpAddress", + "OwnerID": "OwnerId", + "OwnerIDs": "OwnerIds", + "PeerOwnerID": "PeerOwnerId", + "PeerVPCID": "PeerVpcId", + "PrefixListID": "PrefixListId", + "PrefixListIDs": "PrefixListIds", + "PresignedURL": "PresignedUrl", + "PrivateDNSName": "PrivateDnsName", + "PrivateIPAddress": "PrivateIpAddress", + "PrivateIPAddresses": "PrivateIpAddresses", + "ProductCodeID": "ProductCodeId", + "PropagatingVGWs": "PropagatingVgws", + "PublicDNSName": "PublicDnsName", + "PublicIP": "PublicIp", + "PublicIPAddress": "PublicIpAddress", + "PublicIPs": "PublicIps", + "RAMDisk": "Ramdisk", + "RAMDiskID": "RamdiskId", + "RemoveRouteTableIDs": "RemoveRouteTableIds", + "RequesterID": "RequesterId", + "RequesterVPCInfo": "RequesterVpcInfo", + "ReservationID": "ReservationId", + "ReservedInstancesID": "ReservedInstancesId", + "ReservedInstancesIDs": "ReservedInstancesIds", + "ReservedInstancesListingID": "ReservedInstancesListingId", + "ReservedInstancesModificationID": "ReservedInstancesModificationId", + "ReservedInstancesModificationIDs": "ReservedInstancesModificationIds", + "ReservedInstancesOfferingID": "ReservedInstancesOfferingId", + "ReservedInstancesOfferingIDs": "ReservedInstancesOfferingIds", + "ResourceID": "ResourceId", + "ResourceIDs": "ResourceIds", + "RestorableByUserIDs": "RestorableByUserIds", + "RouteTableAssociationID": "RouteTableAssociationId", + "RouteTableID": "RouteTableId", + "RouteTableIDs": "RouteTableIds", + "SRIOVNetSupport": "SriovNetSupport", + "SecondaryPrivateIPAddressCount": "SecondaryPrivateIpAddressCount", + "SecurityGroupIDs": "SecurityGroupIds", + "SnapshotID": "SnapshotId", + "SnapshotIDs": "SnapshotIds", + "SourceImageID": "SourceImageId", + "SourceSecurityGroupOwnerID": "SourceSecurityGroupOwnerId", + "SourceSnapshotID": "SourceSnapshotId", + "SpotFleetRequestID": "SpotFleetRequestId", + "SpotFleetRequestIDs": "SpotFleetRequestIds", + "SpotInstanceRequestID": "SpotInstanceRequestId", + "SpotInstanceRequestIDs": "SpotInstanceRequestIds", + "SubnetID": "SubnetId", + "SubnetIDs": "SubnetIds", + "URL": "Url", + "UserID": "UserId", + "UserIDGroupPairs": "UserIdGroupPairs", + "UserIDs": "UserIds", + "VGWTelemetry": "VgwTelemetry", + "VPC": "Vpc", + "VPCAttachment": "VpcAttachment", + "VPCAttachments": "VpcAttachments", + "VPCEndpoint": "VpcEndpoint", + "VPCEndpointID": "VpcEndpointId", + "VPCEndpointIDs": "VpcEndpointIds", + "VPCEndpoints": "VpcEndpoints", + "VPCID": "VpcId", + "VPCIDs": "VpcIds", + "VPCPeeringConnection": "VpcPeeringConnection", + "VPCPeeringConnectionID": "VpcPeeringConnectionId", + "VPCPeeringConnectionIDs": "VpcPeeringConnectionIds", + "VPCPeeringConnections": "VpcPeeringConnections", + "VPCs": "Vpcs", + "VPNConnection": "VpnConnection", + "VPNConnectionID": "VpnConnectionId", + "VPNConnectionIDs": "VpnConnectionIds", + "VPNConnections": "VpnConnections", + "VPNGateway": "VpnGateway", + "VPNGatewayID": "VpnGatewayId", + "VPNGatewayIDs": "VpnGatewayIds", + "VPNGateways": "VpnGateways", + "VolumeID": "VolumeId", + "VolumeIDs": "VolumeIds", + }, + }, + "github.com/aws/aws-sdk-go/service/ec2/ec2iface": { + operations: map[string]string{ + "AcceptVPCPeeringConnection": "AcceptVpcPeeringConnection", + "AcceptVPCPeeringConnectionPages": "AcceptVpcPeeringConnectionPages", + "AcceptVPCPeeringConnectionRequest": "AcceptVpcPeeringConnectionRequest", + "AssignPrivateIPAddresses": "AssignPrivateIpAddresses", + "AssignPrivateIPAddressesPages": "AssignPrivateIpAddressesPages", + "AssignPrivateIPAddressesRequest": "AssignPrivateIpAddressesRequest", + "AssociateDHCPOptions": "AssociateDhcpOptions", + "AssociateDHCPOptionsPages": "AssociateDhcpOptionsPages", + "AssociateDHCPOptionsRequest": "AssociateDhcpOptionsRequest", + "AttachClassicLinkVPC": "AttachClassicLinkVpc", + "AttachClassicLinkVPCPages": "AttachClassicLinkVpcPages", + "AttachClassicLinkVPCRequest": "AttachClassicLinkVpcRequest", + "AttachVPNGateway": "AttachVpnGateway", + "AttachVPNGatewayPages": "AttachVpnGatewayPages", + "AttachVPNGatewayRequest": "AttachVpnGatewayRequest", + "CreateDHCPOptions": "CreateDhcpOptions", + "CreateDHCPOptionsPages": "CreateDhcpOptionsPages", + "CreateDHCPOptionsRequest": "CreateDhcpOptionsRequest", + "CreateNetworkACL": "CreateNetworkAcl", + "CreateNetworkACLEntry": "CreateNetworkAclEntry", + "CreateNetworkACLEntryPages": "CreateNetworkAclEntryPages", + "CreateNetworkACLEntryRequest": "CreateNetworkAclEntryRequest", + "CreateNetworkACLPages": "CreateNetworkAclPages", + "CreateNetworkACLRequest": "CreateNetworkAclRequest", + "CreateVPC": "CreateVpc", + "CreateVPCEndpoint": "CreateVpcEndpoint", + "CreateVPCEndpointPages": "CreateVpcEndpointPages", + "CreateVPCEndpointRequest": "CreateVpcEndpointRequest", + "CreateVPCPages": "CreateVpcPages", + "CreateVPCPeeringConnection": "CreateVpcPeeringConnection", + "CreateVPCPeeringConnectionPages": "CreateVpcPeeringConnectionPages", + "CreateVPCPeeringConnectionRequest": "CreateVpcPeeringConnectionRequest", + "CreateVPCRequest": "CreateVpcRequest", + "CreateVPNConnection": "CreateVpnConnection", + "CreateVPNConnectionPages": "CreateVpnConnectionPages", + "CreateVPNConnectionRequest": "CreateVpnConnectionRequest", + "CreateVPNConnectionRoute": "CreateVpnConnectionRoute", + "CreateVPNConnectionRoutePages": "CreateVpnConnectionRoutePages", + "CreateVPNConnectionRouteRequest": "CreateVpnConnectionRouteRequest", + "CreateVPNGateway": "CreateVpnGateway", + "CreateVPNGatewayPages": "CreateVpnGatewayPages", + "CreateVPNGatewayRequest": "CreateVpnGatewayRequest", + "DeleteDHCPOptions": "DeleteDhcpOptions", + "DeleteDHCPOptionsPages": "DeleteDhcpOptionsPages", + "DeleteDHCPOptionsRequest": "DeleteDhcpOptionsRequest", + "DeleteNetworkACL": "DeleteNetworkAcl", + "DeleteNetworkACLEntry": "DeleteNetworkAclEntry", + "DeleteNetworkACLEntryPages": "DeleteNetworkAclEntryPages", + "DeleteNetworkACLEntryRequest": "DeleteNetworkAclEntryRequest", + "DeleteNetworkACLPages": "DeleteNetworkAclPages", + "DeleteNetworkACLRequest": "DeleteNetworkAclRequest", + "DeleteVPC": "DeleteVpc", + "DeleteVPCEndpoints": "DeleteVpcEndpoints", + "DeleteVPCEndpointsPages": "DeleteVpcEndpointsPages", + "DeleteVPCEndpointsRequest": "DeleteVpcEndpointsRequest", + "DeleteVPCPages": "DeleteVpcPages", + "DeleteVPCPeeringConnection": "DeleteVpcPeeringConnection", + "DeleteVPCPeeringConnectionPages": "DeleteVpcPeeringConnectionPages", + "DeleteVPCPeeringConnectionRequest": "DeleteVpcPeeringConnectionRequest", + "DeleteVPCRequest": "DeleteVpcRequest", + "DeleteVPNConnection": "DeleteVpnConnection", + "DeleteVPNConnectionPages": "DeleteVpnConnectionPages", + "DeleteVPNConnectionRequest": "DeleteVpnConnectionRequest", + "DeleteVPNConnectionRoute": "DeleteVpnConnectionRoute", + "DeleteVPNConnectionRoutePages": "DeleteVpnConnectionRoutePages", + "DeleteVPNConnectionRouteRequest": "DeleteVpnConnectionRouteRequest", + "DeleteVPNGateway": "DeleteVpnGateway", + "DeleteVPNGatewayPages": "DeleteVpnGatewayPages", + "DeleteVPNGatewayRequest": "DeleteVpnGatewayRequest", + "DescribeDHCPOptions": "DescribeDhcpOptions", + "DescribeDHCPOptionsPages": "DescribeDhcpOptionsPages", + "DescribeDHCPOptionsRequest": "DescribeDhcpOptionsRequest", + "DescribeNetworkACLs": "DescribeNetworkAcls", + "DescribeNetworkACLsPages": "DescribeNetworkAclsPages", + "DescribeNetworkACLsRequest": "DescribeNetworkAclsRequest", + "DescribeVPCAttribute": "DescribeVpcAttribute", + "DescribeVPCAttributePages": "DescribeVpcAttributePages", + "DescribeVPCAttributeRequest": "DescribeVpcAttributeRequest", + "DescribeVPCClassicLink": "DescribeVpcClassicLink", + "DescribeVPCClassicLinkPages": "DescribeVpcClassicLinkPages", + "DescribeVPCClassicLinkRequest": "DescribeVpcClassicLinkRequest", + "DescribeVPCEndpointServices": "DescribeVpcEndpointServices", + "DescribeVPCEndpointServicesPages": "DescribeVpcEndpointServicesPages", + "DescribeVPCEndpointServicesRequest": "DescribeVpcEndpointServicesRequest", + "DescribeVPCEndpoints": "DescribeVpcEndpoints", + "DescribeVPCEndpointsPages": "DescribeVpcEndpointsPages", + "DescribeVPCEndpointsRequest": "DescribeVpcEndpointsRequest", + "DescribeVPCPeeringConnections": "DescribeVpcPeeringConnections", + "DescribeVPCPeeringConnectionsPages": "DescribeVpcPeeringConnectionsPages", + "DescribeVPCPeeringConnectionsRequest": "DescribeVpcPeeringConnectionsRequest", + "DescribeVPCs": "DescribeVpcs", + "DescribeVPCsPages": "DescribeVpcsPages", + "DescribeVPCsRequest": "DescribeVpcsRequest", + "DescribeVPNConnections": "DescribeVpnConnections", + "DescribeVPNConnectionsPages": "DescribeVpnConnectionsPages", + "DescribeVPNConnectionsRequest": "DescribeVpnConnectionsRequest", + "DescribeVPNGateways": "DescribeVpnGateways", + "DescribeVPNGatewaysPages": "DescribeVpnGatewaysPages", + "DescribeVPNGatewaysRequest": "DescribeVpnGatewaysRequest", + "DetachClassicLinkVPC": "DetachClassicLinkVpc", + "DetachClassicLinkVPCPages": "DetachClassicLinkVpcPages", + "DetachClassicLinkVPCRequest": "DetachClassicLinkVpcRequest", + "DetachVPNGateway": "DetachVpnGateway", + "DetachVPNGatewayPages": "DetachVpnGatewayPages", + "DetachVPNGatewayRequest": "DetachVpnGatewayRequest", + "DisableVGWRoutePropagation": "DisableVgwRoutePropagation", + "DisableVGWRoutePropagationPages": "DisableVgwRoutePropagationPages", + "DisableVGWRoutePropagationRequest": "DisableVgwRoutePropagationRequest", + "DisableVPCClassicLink": "DisableVpcClassicLink", + "DisableVPCClassicLinkPages": "DisableVpcClassicLinkPages", + "DisableVPCClassicLinkRequest": "DisableVpcClassicLinkRequest", + "EnableVGWRoutePropagation": "EnableVgwRoutePropagation", + "EnableVGWRoutePropagationPages": "EnableVgwRoutePropagationPages", + "EnableVGWRoutePropagationRequest": "EnableVgwRoutePropagationRequest", + "EnableVPCClassicLink": "EnableVpcClassicLink", + "EnableVPCClassicLinkPages": "EnableVpcClassicLinkPages", + "EnableVPCClassicLinkRequest": "EnableVpcClassicLinkRequest", + "ModifyVPCAttribute": "ModifyVpcAttribute", + "ModifyVPCAttributePages": "ModifyVpcAttributePages", + "ModifyVPCAttributeRequest": "ModifyVpcAttributeRequest", + "ModifyVPCEndpoint": "ModifyVpcEndpoint", + "ModifyVPCEndpointPages": "ModifyVpcEndpointPages", + "ModifyVPCEndpointRequest": "ModifyVpcEndpointRequest", + "MoveAddressToVPC": "MoveAddressToVpc", + "MoveAddressToVPCPages": "MoveAddressToVpcPages", + "MoveAddressToVPCRequest": "MoveAddressToVpcRequest", + "RejectVPCPeeringConnection": "RejectVpcPeeringConnection", + "RejectVPCPeeringConnectionPages": "RejectVpcPeeringConnectionPages", + "RejectVPCPeeringConnectionRequest": "RejectVpcPeeringConnectionRequest", + "ReplaceNetworkACLAssociation": "ReplaceNetworkAclAssociation", + "ReplaceNetworkACLAssociationPages": "ReplaceNetworkAclAssociationPages", + "ReplaceNetworkACLAssociationRequest": "ReplaceNetworkAclAssociationRequest", + "ReplaceNetworkACLEntry": "ReplaceNetworkAclEntry", + "ReplaceNetworkACLEntryPages": "ReplaceNetworkAclEntryPages", + "ReplaceNetworkACLEntryRequest": "ReplaceNetworkAclEntryRequest", + "UnassignPrivateIPAddresses": "UnassignPrivateIpAddresses", + "UnassignPrivateIPAddressesPages": "UnassignPrivateIpAddressesPages", + "UnassignPrivateIPAddressesRequest": "UnassignPrivateIpAddressesRequest", + }, + shapes: map[string]string{}, + fields: map[string]string{}, + }, + "github.com/aws/aws-sdk-go/service/ecs": { + operations: map[string]string{}, + shapes: map[string]string{}, + fields: map[string]string{ + "ARN": "Arn", + "CPU": "Cpu", + "ClusterARN": "ClusterArn", + "ClusterARNs": "ClusterArns", + "ContainerARN": "ContainerArn", + "ContainerInstanceARN": "ContainerInstanceArn", + "ContainerInstanceARNs": "ContainerInstanceArns", + "EC2InstanceID": "Ec2InstanceId", + "ID": "Id", + "RoleARN": "RoleArn", + "ServiceARN": "ServiceArn", + "ServiceARNs": "ServiceArns", + "TaskARN": "TaskArn", + "TaskARNs": "TaskArns", + "TaskDefinitionARN": "TaskDefinitionArn", + "TaskDefinitionARNs": "TaskDefinitionArns", + }, + }, + "github.com/aws/aws-sdk-go/service/ecs/ecsiface": { + operations: map[string]string{}, + shapes: map[string]string{}, + fields: map[string]string{}, + }, + "github.com/aws/aws-sdk-go/service/efs": { + operations: map[string]string{}, + shapes: map[string]string{}, + fields: map[string]string{ + "FileSystemID": "FileSystemId", + "IPAddress": "IpAddress", + "MountTargetID": "MountTargetId", + "NetworkInterfaceID": "NetworkInterfaceId", + "OwnerID": "OwnerId", + "SubnetID": "SubnetId", + }, + }, + "github.com/aws/aws-sdk-go/service/efs/efsiface": { + operations: map[string]string{}, + shapes: map[string]string{}, + fields: map[string]string{}, + }, + "github.com/aws/aws-sdk-go/service/elasticache": { + operations: map[string]string{}, + shapes: map[string]string{}, + fields: map[string]string{ + "CacheClusterID": "CacheClusterId", + "CacheNodeID": "CacheNodeId", + "CacheNodeIDsToReboot": "CacheNodeIdsToReboot", + "CacheNodeIDsToRemove": "CacheNodeIdsToRemove", + "EC2SecurityGroupOwnerID": "EC2SecurityGroupOwnerId", + "NodeGroupID": "NodeGroupId", + "NotificationTopicARN": "NotificationTopicArn", + "OwnerID": "OwnerId", + "PrimaryClusterID": "PrimaryClusterId", + "ReplicationGroupID": "ReplicationGroupId", + "ReservedCacheNodeID": "ReservedCacheNodeId", + "ReservedCacheNodesOfferingID": "ReservedCacheNodesOfferingId", + "SecurityGroupID": "SecurityGroupId", + "SecurityGroupIDs": "SecurityGroupIds", + "SnapshotARNs": "SnapshotArns", + "SnapshottingClusterID": "SnapshottingClusterId", + "SourceCacheNodeID": "SourceCacheNodeId", + "SubnetIDs": "SubnetIds", + "TopicARN": "TopicArn", + "VPCID": "VpcId", + }, + }, + "github.com/aws/aws-sdk-go/service/elasticache/elasticacheiface": { + operations: map[string]string{}, + shapes: map[string]string{}, + fields: map[string]string{}, + }, + "github.com/aws/aws-sdk-go/service/elasticbeanstalk": { + operations: map[string]string{}, + shapes: map[string]string{}, + fields: map[string]string{ + "DestinationEnvironmentID": "DestinationEnvironmentId", + "EC2InstanceID": "Ec2InstanceId", + "EnvironmentID": "EnvironmentId", + "EnvironmentIDs": "EnvironmentIds", + "ID": "Id", + "OK": "Ok", + "RequestID": "RequestId", + "SourceEnvironmentID": "SourceEnvironmentId", + }, + }, + "github.com/aws/aws-sdk-go/service/elasticbeanstalk/elasticbeanstalkiface": { + operations: map[string]string{}, + shapes: map[string]string{}, + fields: map[string]string{}, + }, + "github.com/aws/aws-sdk-go/service/elastictranscoder": { + operations: map[string]string{}, + shapes: map[string]string{ + "HLSContentProtection": "HlsContentProtection", + "PlayReadyDRM": "PlayReadyDrm", + }, + fields: map[string]string{ + "ARN": "Arn", + "AWSKMSKeyARN": "AwsKmsKeyArn", + "HLSContentProtection": "HlsContentProtection", + "ID": "Id", + "KeyID": "KeyId", + "KeyMD5": "KeyMd5", + "LicenseAcquisitionURL": "LicenseAcquisitionUrl", + "PipelineID": "PipelineId", + "PlayReadyDRM": "PlayReadyDrm", + "PresetID": "PresetId", + "PresetWatermarkID": "PresetWatermarkId", + }, + }, + "github.com/aws/aws-sdk-go/service/elastictranscoder/elastictranscoderiface": { + operations: map[string]string{}, + shapes: map[string]string{}, + fields: map[string]string{}, + }, + "github.com/aws/aws-sdk-go/service/elb": { + operations: map[string]string{}, + shapes: map[string]string{}, + fields: map[string]string{ + "InstanceID": "InstanceId", + "SSLCertificateID": "SSLCertificateId", + "VPCID": "VPCId", + }, + }, + "github.com/aws/aws-sdk-go/service/elb/elbiface": { + operations: map[string]string{}, + shapes: map[string]string{}, + fields: map[string]string{}, + }, + "github.com/aws/aws-sdk-go/service/emr": { + operations: map[string]string{}, + shapes: map[string]string{ + "EC2InstanceAttributes": "Ec2InstanceAttributes", + "HadoopJARStepConfig": "HadoopJarStepConfig", + }, + fields: map[string]string{ + "AMIVersion": "AmiVersion", + "ClusterID": "ClusterId", + "EC2AvailabilityZone": "Ec2AvailabilityZone", + "EC2InstanceAttributes": "Ec2InstanceAttributes", + "EC2InstanceID": "Ec2InstanceId", + "EC2InstanceIDsToTerminate": "EC2InstanceIdsToTerminate", + "EC2KeyName": "Ec2KeyName", + "EC2SubnetID": "Ec2SubnetId", + "EMRManagedMasterSecurityGroup": "EmrManagedMasterSecurityGroup", + "EMRManagedSlaveSecurityGroup": "EmrManagedSlaveSecurityGroup", + "HadoopJARStep": "HadoopJarStep", + "IAMInstanceProfile": "IamInstanceProfile", + "ID": "Id", + "InstanceGroupID": "InstanceGroupId", + "InstanceGroupIDs": "InstanceGroupIds", + "JAR": "Jar", + "JobFlowID": "JobFlowId", + "JobFlowIDs": "JobFlowIds", + "LogURI": "LogUri", + "MasterInstanceID": "MasterInstanceId", + "MasterPublicDNSName": "MasterPublicDnsName", + "PrivateDNSName": "PrivateDnsName", + "PrivateIPAddress": "PrivateIpAddress", + "PublicDNSName": "PublicDnsName", + "PublicIPAddress": "PublicIpAddress", + "RequestedAMIVersion": "RequestedAmiVersion", + "ResourceID": "ResourceId", + "RunningAMIVersion": "RunningAmiVersion", + "StepID": "StepId", + "StepIDs": "StepIds", + }, + }, + "github.com/aws/aws-sdk-go/service/emr/emriface": { + operations: map[string]string{}, + shapes: map[string]string{}, + fields: map[string]string{}, + }, + "github.com/aws/aws-sdk-go/service/glacier": { + operations: map[string]string{}, + shapes: map[string]string{}, + fields: map[string]string{ + "AccountID": "AccountId", + "ArchiveID": "ArchiveId", + "JobID": "JobId", + "LockID": "LockId", + "MultipartUploadID": "MultipartUploadId", + "UploadID": "UploadId", + }, + }, + "github.com/aws/aws-sdk-go/service/glacier/glacieriface": { + operations: map[string]string{}, + shapes: map[string]string{}, + fields: map[string]string{}, + }, + "github.com/aws/aws-sdk-go/service/iam": { + operations: map[string]string{}, + shapes: map[string]string{}, + fields: map[string]string{ + "ARN": "Arn", + "AccessKeyID": "AccessKeyId", + "CertificateID": "CertificateId", + "DefaultVersionID": "DefaultVersionId", + "GroupID": "GroupId", + "InstanceProfileID": "InstanceProfileId", + "OpenIDConnectProviderARN": "OpenIDConnectProviderArn", + "PolicyARN": "PolicyArn", + "PolicyID": "PolicyId", + "RoleID": "RoleId", + "SAMLProviderARN": "SAMLProviderArn", + "SSHPublicKeyID": "SSHPublicKeyId", + "ServerCertificateID": "ServerCertificateId", + "URL": "Url", + "UserID": "UserId", + "VersionID": "VersionId", + }, + }, + "github.com/aws/aws-sdk-go/service/iam/iamiface": { + operations: map[string]string{}, + shapes: map[string]string{}, + fields: map[string]string{}, + }, + "github.com/aws/aws-sdk-go/service/kinesis": { + operations: map[string]string{}, + shapes: map[string]string{}, + fields: map[string]string{ + "AdjacentParentShardID": "AdjacentParentShardId", + "ExclusiveStartShardID": "ExclusiveStartShardId", + "ParentShardID": "ParentShardId", + "ShardID": "ShardId", + }, + }, + "github.com/aws/aws-sdk-go/service/kinesis/kinesisiface": { + operations: map[string]string{}, + shapes: map[string]string{}, + fields: map[string]string{}, + }, + "github.com/aws/aws-sdk-go/service/kms": { + operations: map[string]string{}, + shapes: map[string]string{}, + fields: map[string]string{ + "ARN": "Arn", + "AWSAccountID": "AWSAccountId", + "AliasARN": "AliasArn", + "DestinationKeyID": "DestinationKeyId", + "GrantID": "GrantId", + "KeyARN": "KeyArn", + "KeyID": "KeyId", + "SourceKeyID": "SourceKeyId", + "TargetKeyID": "TargetKeyId", + }, + }, + "github.com/aws/aws-sdk-go/service/kms/kmsiface": { + operations: map[string]string{}, + shapes: map[string]string{}, + fields: map[string]string{}, + }, + "github.com/aws/aws-sdk-go/service/lambda": { + operations: map[string]string{}, + shapes: map[string]string{}, + fields: map[string]string{ + "EventSourceARN": "EventSourceArn", + "FunctionARN": "FunctionArn", + "SourceARN": "SourceArn", + "StatementID": "StatementId", + }, + }, + "github.com/aws/aws-sdk-go/service/lambda/lambdaiface": { + operations: map[string]string{}, + shapes: map[string]string{}, + fields: map[string]string{}, + }, + "github.com/aws/aws-sdk-go/service/machinelearning": { + operations: map[string]string{}, + shapes: map[string]string{}, + fields: map[string]string{ + "BatchPredictionDataSourceID": "BatchPredictionDataSourceId", + "BatchPredictionID": "BatchPredictionId", + "CreatedByIAMUser": "CreatedByIamUser", + "DataPipelineID": "DataPipelineId", + "DataSchemaURI": "DataSchemaUri", + "DataSourceID": "DataSourceId", + "EndpointURL": "EndpointUrl", + "EvaluationDataSourceID": "EvaluationDataSourceId", + "EvaluationID": "EvaluationId", + "LogURI": "LogUri", + "MLModelID": "MLModelId", + "OutputURI": "OutputUri", + "RecipeURI": "RecipeUri", + "SecurityGroupIDs": "SecurityGroupIds", + "SelectSQLQuery": "SelectSqlQuery", + "SubnetID": "SubnetId", + "TrainingDataSourceID": "TrainingDataSourceId", + }, + }, + "github.com/aws/aws-sdk-go/service/machinelearning/machinelearningiface": { + operations: map[string]string{}, + shapes: map[string]string{}, + fields: map[string]string{}, + }, + "github.com/aws/aws-sdk-go/service/mobileanalytics": { + operations: map[string]string{}, + shapes: map[string]string{}, + fields: map[string]string{ + "ID": "Id", + }, + }, + "github.com/aws/aws-sdk-go/service/mobileanalytics/mobileanalyticsiface": { + operations: map[string]string{}, + shapes: map[string]string{}, + fields: map[string]string{}, + }, + "github.com/aws/aws-sdk-go/service/opsworks": { + operations: map[string]string{ + "AssociateElasticIP": "AssociateElasticIp", + "AssociateElasticIPPages": "AssociateElasticIpPages", + "AssociateElasticIPRequest": "AssociateElasticIpRequest", + "DeregisterElasticIP": "DeregisterElasticIp", + "DeregisterElasticIPPages": "DeregisterElasticIpPages", + "DeregisterElasticIPRequest": "DeregisterElasticIpRequest", + "DeregisterRDSDBInstance": "DeregisterRdsDbInstance", + "DeregisterRDSDBInstancePages": "DeregisterRdsDbInstancePages", + "DeregisterRDSDBInstanceRequest": "DeregisterRdsDbInstanceRequest", + "DescribeElasticIPs": "DescribeElasticIps", + "DescribeElasticIPsPages": "DescribeElasticIpsPages", + "DescribeElasticIPsRequest": "DescribeElasticIpsRequest", + "DescribeRAIDArrays": "DescribeRaidArrays", + "DescribeRAIDArraysPages": "DescribeRaidArraysPages", + "DescribeRAIDArraysRequest": "DescribeRaidArraysRequest", + "DescribeRDSDBInstances": "DescribeRdsDbInstances", + "DescribeRDSDBInstancesPages": "DescribeRdsDbInstancesPages", + "DescribeRDSDBInstancesRequest": "DescribeRdsDbInstancesRequest", + "DisassociateElasticIP": "DisassociateElasticIp", + "DisassociateElasticIPPages": "DisassociateElasticIpPages", + "DisassociateElasticIPRequest": "DisassociateElasticIpRequest", + "RegisterElasticIP": "RegisterElasticIp", + "RegisterElasticIPPages": "RegisterElasticIpPages", + "RegisterElasticIPRequest": "RegisterElasticIpRequest", + "RegisterRDSDBInstance": "RegisterRdsDbInstance", + "RegisterRDSDBInstancePages": "RegisterRdsDbInstancePages", + "RegisterRDSDBInstanceRequest": "RegisterRdsDbInstanceRequest", + "UpdateElasticIP": "UpdateElasticIp", + "UpdateElasticIPPages": "UpdateElasticIpPages", + "UpdateElasticIPRequest": "UpdateElasticIpRequest", + "UpdateRDSDBInstance": "UpdateRdsDbInstance", + "UpdateRDSDBInstancePages": "UpdateRdsDbInstancePages", + "UpdateRDSDBInstanceRequest": "UpdateRdsDbInstanceRequest", + }, + shapes: map[string]string{ + "AssociateElasticIPInput": "AssociateElasticIpInput", + "AssociateElasticIPOutput": "AssociateElasticIpOutput", + "DeregisterElasticIPInput": "DeregisterElasticIpInput", + "DeregisterElasticIPOutput": "DeregisterElasticIpOutput", + "DeregisterRDSDBInstanceInput": "DeregisterRdsDbInstanceInput", + "DeregisterRDSDBInstanceOutput": "DeregisterRdsDbInstanceOutput", + "DescribeElasticIPsInput": "DescribeElasticIpsInput", + "DescribeElasticIPsOutput": "DescribeElasticIpsOutput", + "DescribeRAIDArraysInput": "DescribeRaidArraysInput", + "DescribeRAIDArraysOutput": "DescribeRaidArraysOutput", + "DescribeRDSDBInstancesInput": "DescribeRdsDbInstancesInput", + "DescribeRDSDBInstancesOutput": "DescribeRdsDbInstancesOutput", + "DisassociateElasticIPInput": "DisassociateElasticIpInput", + "DisassociateElasticIPOutput": "DisassociateElasticIpOutput", + "EBSBlockDevice": "EbsBlockDevice", + "ElasticIP": "ElasticIp", + "RAIDArray": "RaidArray", + "RDSDBInstance": "RdsDbInstance", + "RegisterElasticIPInput": "RegisterElasticIpInput", + "RegisterElasticIPOutput": "RegisterElasticIpOutput", + "RegisterRDSDBInstanceInput": "RegisterRdsDbInstanceInput", + "RegisterRDSDBInstanceOutput": "RegisterRdsDbInstanceOutput", + "SSLConfiguration": "SslConfiguration", + "UpdateElasticIPInput": "UpdateElasticIpInput", + "UpdateElasticIPOutput": "UpdateElasticIpOutput", + "UpdateRDSDBInstanceInput": "UpdateRdsDbInstanceInput", + "UpdateRDSDBInstanceOutput": "UpdateRdsDbInstanceOutput", + }, + fields: map[string]string{ + "AMIID": "AmiId", + "ARN": "Arn", + "AgentInstallerURL": "AgentInstallerUrl", + "AllowSSH": "AllowSsh", + "AppID": "AppId", + "AppIDs": "AppIds", + "AutoAssignElasticIPs": "AutoAssignElasticIps", + "AutoAssignPublicIPs": "AutoAssignPublicIps", + "CPUThreshold": "CpuThreshold", + "CloneAppIDs": "CloneAppIds", + "CommandID": "CommandId", + "CommandIDs": "CommandIds", + "CustomInstanceProfileARN": "CustomInstanceProfileArn", + "CustomJSON": "CustomJson", + "CustomSecurityGroupIDs": "CustomSecurityGroupIds", + "DBInstanceIdentifier": "DbInstanceIdentifier", + "DBPassword": "DbPassword", + "DBUser": "DbUser", + "DNSName": "DnsName", + "DefaultInstanceProfileARN": "DefaultInstanceProfileArn", + "DefaultSSHKeyName": "DefaultSshKeyName", + "DefaultSubnetID": "DefaultSubnetId", + "DelayUntilELBConnectionsDrained": "DelayUntilElbConnectionsDrained", + "DeleteElasticIP": "DeleteElasticIp", + "DeploymentID": "DeploymentId", + "DeploymentIDs": "DeploymentIds", + "EBS": "Ebs", + "EBSOptimized": "EbsOptimized", + "EC2InstanceID": "Ec2InstanceId", + "EC2InstanceIDs": "Ec2InstanceIds", + "EC2VolumeID": "Ec2VolumeId", + "EcsClusterARN": "EcsClusterArn", + "EcsClusterARNs": "EcsClusterArns", + "EcsContainerInstanceARN": "EcsContainerInstanceArn", + "ElasticIP": "ElasticIp", + "ElasticIPs": "ElasticIps", + "EnableSSL": "EnableSsl", + "IAMUserARN": "IamUserArn", + "IAMUserARNs": "IamUserArns", + "IOPS": "Iops", + "IP": "Ip", + "IPs": "Ips", + "InstanceID": "InstanceId", + "InstanceIDs": "InstanceIds", + "InstanceProfileARN": "InstanceProfileArn", + "LastServiceErrorID": "LastServiceErrorId", + "LayerID": "LayerId", + "LayerIDs": "LayerIds", + "LogURL": "LogUrl", + "MissingOnRDS": "MissingOnRds", + "PrivateDNS": "PrivateDns", + "PrivateIP": "PrivateIp", + "PublicDNS": "PublicDns", + "PublicIP": "PublicIp", + "RAIDArrayID": "RaidArrayId", + "RAIDArrayIDs": "RaidArrayIds", + "RAIDArrays": "RaidArrays", + "RAIDLevel": "RaidLevel", + "RDSDBInstanceARN": "RdsDbInstanceArn", + "RDSDBInstanceARNs": "RdsDbInstanceArns", + "RDSDBInstances": "RdsDbInstances", + "RSAPublicKey": "RsaPublicKey", + "RSAPublicKeyFingerprint": "RsaPublicKeyFingerprint", + "RootDeviceVolumeID": "RootDeviceVolumeId", + "SSHHostDSAKeyFingerprint": "SshHostDsaKeyFingerprint", + "SSHHostRSAKeyFingerprint": "SshHostRsaKeyFingerprint", + "SSHKey": "SshKey", + "SSHKeyName": "SshKeyName", + "SSHPublicKey": "SshPublicKey", + "SSHUsername": "SshUsername", + "SSLConfiguration": "SslConfiguration", + "SecurityGroupIDs": "SecurityGroupIds", + "ServiceErrorID": "ServiceErrorId", + "ServiceErrorIDs": "ServiceErrorIds", + "ServiceRoleARN": "ServiceRoleArn", + "SnapshotID": "SnapshotId", + "SourceStackID": "SourceStackId", + "StackID": "StackId", + "StackIDs": "StackIds", + "SubnetID": "SubnetId", + "SubnetIDs": "SubnetIds", + "URL": "Url", + "UseEBSOptimizedInstances": "UseEbsOptimizedInstances", + "UseOpsWorksSecurityGroups": "UseOpsworksSecurityGroups", + "VPCID": "VpcId", + "VolumeID": "VolumeId", + "VolumeIDs": "VolumeIds", + }, + }, + "github.com/aws/aws-sdk-go/service/opsworks/opsworksiface": { + operations: map[string]string{ + "AssociateElasticIP": "AssociateElasticIp", + "AssociateElasticIPPages": "AssociateElasticIpPages", + "AssociateElasticIPRequest": "AssociateElasticIpRequest", + "DeregisterElasticIP": "DeregisterElasticIp", + "DeregisterElasticIPPages": "DeregisterElasticIpPages", + "DeregisterElasticIPRequest": "DeregisterElasticIpRequest", + "DeregisterRDSDBInstance": "DeregisterRdsDbInstance", + "DeregisterRDSDBInstancePages": "DeregisterRdsDbInstancePages", + "DeregisterRDSDBInstanceRequest": "DeregisterRdsDbInstanceRequest", + "DescribeElasticIPs": "DescribeElasticIps", + "DescribeElasticIPsPages": "DescribeElasticIpsPages", + "DescribeElasticIPsRequest": "DescribeElasticIpsRequest", + "DescribeRAIDArrays": "DescribeRaidArrays", + "DescribeRAIDArraysPages": "DescribeRaidArraysPages", + "DescribeRAIDArraysRequest": "DescribeRaidArraysRequest", + "DescribeRDSDBInstances": "DescribeRdsDbInstances", + "DescribeRDSDBInstancesPages": "DescribeRdsDbInstancesPages", + "DescribeRDSDBInstancesRequest": "DescribeRdsDbInstancesRequest", + "DisassociateElasticIP": "DisassociateElasticIp", + "DisassociateElasticIPPages": "DisassociateElasticIpPages", + "DisassociateElasticIPRequest": "DisassociateElasticIpRequest", + "RegisterElasticIP": "RegisterElasticIp", + "RegisterElasticIPPages": "RegisterElasticIpPages", + "RegisterElasticIPRequest": "RegisterElasticIpRequest", + "RegisterRDSDBInstance": "RegisterRdsDbInstance", + "RegisterRDSDBInstancePages": "RegisterRdsDbInstancePages", + "RegisterRDSDBInstanceRequest": "RegisterRdsDbInstanceRequest", + "UpdateElasticIP": "UpdateElasticIp", + "UpdateElasticIPPages": "UpdateElasticIpPages", + "UpdateElasticIPRequest": "UpdateElasticIpRequest", + "UpdateRDSDBInstance": "UpdateRdsDbInstance", + "UpdateRDSDBInstancePages": "UpdateRdsDbInstancePages", + "UpdateRDSDBInstanceRequest": "UpdateRdsDbInstanceRequest", + }, + shapes: map[string]string{}, + fields: map[string]string{}, + }, + "github.com/aws/aws-sdk-go/service/rds": { + operations: map[string]string{}, + shapes: map[string]string{ + "VPCSecurityGroupMembership": "VpcSecurityGroupMembership", + }, + fields: map[string]string{ + "AllowsVPCAndNonVPCInstanceMemberships": "AllowsVpcAndNonVpcInstanceMemberships", + "CustSubscriptionID": "CustSubscriptionId", + "CustomerAWSID": "CustomerAwsId", + "DBIResourceID": "DbiResourceId", + "DBInstancePort": "DbInstancePort", + "EC2SecurityGroupID": "EC2SecurityGroupId", + "EC2SecurityGroupOwnerID": "EC2SecurityGroupOwnerId", + "IOPS": "Iops", + "KMSKeyID": "KmsKeyId", + "OwnerID": "OwnerId", + "ReservedDBInstanceID": "ReservedDBInstanceId", + "ReservedDBInstancesOfferingID": "ReservedDBInstancesOfferingId", + "SNSTopicARN": "SnsTopicArn", + "SourceIDs": "SourceIds", + "SourceIDsList": "SourceIdsList", + "SubnetIDs": "SubnetIds", + "SupportsIOPS": "SupportsIops", + "TDECredentialARN": "TdeCredentialArn", + "TDECredentialPassword": "TdeCredentialPassword", + "VPC": "Vpc", + "VPCID": "VpcId", + "VPCSecurityGroupID": "VpcSecurityGroupId", + "VPCSecurityGroupIDs": "VpcSecurityGroupIds", + "VPCSecurityGroupMemberships": "VpcSecurityGroupMemberships", + "VPCSecurityGroups": "VpcSecurityGroups", + }, + }, + "github.com/aws/aws-sdk-go/service/rds/rdsiface": { + operations: map[string]string{}, + shapes: map[string]string{}, + fields: map[string]string{}, + }, + "github.com/aws/aws-sdk-go/service/redshift": { + operations: map[string]string{ + "CreateHSMClientCertificate": "CreateHsmClientCertificate", + "CreateHSMClientCertificatePages": "CreateHsmClientCertificatePages", + "CreateHSMClientCertificateRequest": "CreateHsmClientCertificateRequest", + "CreateHSMConfiguration": "CreateHsmConfiguration", + "CreateHSMConfigurationPages": "CreateHsmConfigurationPages", + "CreateHSMConfigurationRequest": "CreateHsmConfigurationRequest", + "DeleteHSMClientCertificate": "DeleteHsmClientCertificate", + "DeleteHSMClientCertificatePages": "DeleteHsmClientCertificatePages", + "DeleteHSMClientCertificateRequest": "DeleteHsmClientCertificateRequest", + "DeleteHSMConfiguration": "DeleteHsmConfiguration", + "DeleteHSMConfigurationPages": "DeleteHsmConfigurationPages", + "DeleteHSMConfigurationRequest": "DeleteHsmConfigurationRequest", + "DescribeHSMClientCertificates": "DescribeHsmClientCertificates", + "DescribeHSMClientCertificatesPages": "DescribeHsmClientCertificatesPages", + "DescribeHSMClientCertificatesRequest": "DescribeHsmClientCertificatesRequest", + "DescribeHSMConfigurations": "DescribeHsmConfigurations", + "DescribeHSMConfigurationsPages": "DescribeHsmConfigurationsPages", + "DescribeHSMConfigurationsRequest": "DescribeHsmConfigurationsRequest", + }, + shapes: map[string]string{ + "CreateHSMClientCertificateInput": "CreateHsmClientCertificateInput", + "CreateHSMClientCertificateOutput": "CreateHsmClientCertificateOutput", + "CreateHSMConfigurationInput": "CreateHsmConfigurationInput", + "CreateHSMConfigurationOutput": "CreateHsmConfigurationOutput", + "DeleteHSMClientCertificateInput": "DeleteHsmClientCertificateInput", + "DeleteHSMClientCertificateOutput": "DeleteHsmClientCertificateOutput", + "DeleteHSMConfigurationInput": "DeleteHsmConfigurationInput", + "DeleteHSMConfigurationOutput": "DeleteHsmConfigurationOutput", + "DescribeHSMClientCertificatesInput": "DescribeHsmClientCertificatesInput", + "DescribeHSMClientCertificatesOutput": "DescribeHsmClientCertificatesOutput", + "DescribeHSMConfigurationsInput": "DescribeHsmConfigurationsInput", + "DescribeHSMConfigurationsOutput": "DescribeHsmConfigurationsOutput", + "ElasticIPStatus": "ElasticIpStatus", + "HSMClientCertificate": "HsmClientCertificate", + "HSMConfiguration": "HsmConfiguration", + "HSMStatus": "HsmStatus", + "VPCSecurityGroupMembership": "VpcSecurityGroupMembership", + }, + fields: map[string]string{ + "AccountID": "AccountId", + "CustSubscriptionID": "CustSubscriptionId", + "CustomerAWSID": "CustomerAwsId", + "EC2SecurityGroupOwnerID": "EC2SecurityGroupOwnerId", + "ElasticIP": "ElasticIp", + "ElasticIPStatus": "ElasticIpStatus", + "EventID": "EventId", + "HSMClientCertificate": "HsmClientCertificate", + "HSMClientCertificateIdentifier": "HsmClientCertificateIdentifier", + "HSMClientCertificatePublicKey": "HsmClientCertificatePublicKey", + "HSMClientCertificates": "HsmClientCertificates", + "HSMConfiguration": "HsmConfiguration", + "HSMConfigurationIdentifier": "HsmConfigurationIdentifier", + "HSMConfigurations": "HsmConfigurations", + "HSMIPAddress": "HsmIpAddress", + "HSMPartitionName": "HsmPartitionName", + "HSMPartitionPassword": "HsmPartitionPassword", + "HSMServerPublicCertificate": "HsmServerPublicCertificate", + "HSMStatus": "HsmStatus", + "KMSKeyID": "KmsKeyId", + "ReservedNodeID": "ReservedNodeId", + "ReservedNodeOfferingID": "ReservedNodeOfferingId", + "SNSTopicARN": "SnsTopicArn", + "SourceIDs": "SourceIds", + "SourceIDsList": "SourceIdsList", + "SubnetIDs": "SubnetIds", + "VPCID": "VpcId", + "VPCSecurityGroupID": "VpcSecurityGroupId", + "VPCSecurityGroupIDs": "VpcSecurityGroupIds", + "VPCSecurityGroups": "VpcSecurityGroups", + }, + }, + "github.com/aws/aws-sdk-go/service/redshift/redshiftiface": { + operations: map[string]string{ + "CreateHSMClientCertificate": "CreateHsmClientCertificate", + "CreateHSMClientCertificatePages": "CreateHsmClientCertificatePages", + "CreateHSMClientCertificateRequest": "CreateHsmClientCertificateRequest", + "CreateHSMConfiguration": "CreateHsmConfiguration", + "CreateHSMConfigurationPages": "CreateHsmConfigurationPages", + "CreateHSMConfigurationRequest": "CreateHsmConfigurationRequest", + "DeleteHSMClientCertificate": "DeleteHsmClientCertificate", + "DeleteHSMClientCertificatePages": "DeleteHsmClientCertificatePages", + "DeleteHSMClientCertificateRequest": "DeleteHsmClientCertificateRequest", + "DeleteHSMConfiguration": "DeleteHsmConfiguration", + "DeleteHSMConfigurationPages": "DeleteHsmConfigurationPages", + "DeleteHSMConfigurationRequest": "DeleteHsmConfigurationRequest", + "DescribeHSMClientCertificates": "DescribeHsmClientCertificates", + "DescribeHSMClientCertificatesPages": "DescribeHsmClientCertificatesPages", + "DescribeHSMClientCertificatesRequest": "DescribeHsmClientCertificatesRequest", + "DescribeHSMConfigurations": "DescribeHsmConfigurations", + "DescribeHSMConfigurationsPages": "DescribeHsmConfigurationsPages", + "DescribeHSMConfigurationsRequest": "DescribeHsmConfigurationsRequest", + }, + shapes: map[string]string{}, + fields: map[string]string{}, + }, + "github.com/aws/aws-sdk-go/service/route53": { + operations: map[string]string{ + "GetCheckerIPRanges": "GetCheckerIpRanges", + "GetCheckerIPRangesPages": "GetCheckerIpRangesPages", + "GetCheckerIPRangesRequest": "GetCheckerIpRangesRequest", + }, + shapes: map[string]string{ + "GetCheckerIPRangesInput": "GetCheckerIpRangesInput", + "GetCheckerIPRangesOutput": "GetCheckerIpRangesOutput", + }, + fields: map[string]string{ + "CheckerIPRanges": "CheckerIpRanges", + "DelegationSetID": "DelegationSetId", + "HealthCheckID": "HealthCheckId", + "HostedZoneID": "HostedZoneId", + "ID": "Id", + "NextHostedZoneID": "NextHostedZoneId", + "ResourceID": "ResourceId", + "ResourceIDs": "ResourceIds", + "VPCID": "VPCId", + }, + }, + "github.com/aws/aws-sdk-go/service/route53/route53iface": { + operations: map[string]string{ + "GetCheckerIPRanges": "GetCheckerIpRanges", + "GetCheckerIPRangesPages": "GetCheckerIpRangesPages", + "GetCheckerIPRangesRequest": "GetCheckerIpRangesRequest", + }, + shapes: map[string]string{}, + fields: map[string]string{}, + }, + "github.com/aws/aws-sdk-go/service/route53domains": { + operations: map[string]string{}, + shapes: map[string]string{}, + fields: map[string]string{ + "DNSSec": "DnsSec", + "GlueIPs": "GlueIps", + "IDNLangCode": "IdnLangCode", + "OperationID": "OperationId", + "RegistrarURL": "RegistrarUrl", + "RegistryDomainID": "RegistryDomainId", + }, + }, + "github.com/aws/aws-sdk-go/service/route53domains/route53domainsiface": { + operations: map[string]string{}, + shapes: map[string]string{}, + fields: map[string]string{}, + }, + "github.com/aws/aws-sdk-go/service/s3": { + operations: map[string]string{ + "DeleteBucketCORS": "DeleteBucketCors", + "DeleteBucketCORSPages": "DeleteBucketCorsPages", + "DeleteBucketCORSRequest": "DeleteBucketCorsRequest", + "GetBucketACL": "GetBucketAcl", + "GetBucketACLPages": "GetBucketAclPages", + "GetBucketACLRequest": "GetBucketAclRequest", + "GetBucketCORS": "GetBucketCors", + "GetBucketCORSPages": "GetBucketCorsPages", + "GetBucketCORSRequest": "GetBucketCorsRequest", + "GetObjectACL": "GetObjectAcl", + "GetObjectACLPages": "GetObjectAclPages", + "GetObjectACLRequest": "GetObjectAclRequest", + "PutBucketACL": "PutBucketAcl", + "PutBucketACLPages": "PutBucketAclPages", + "PutBucketACLRequest": "PutBucketAclRequest", + "PutBucketCORS": "PutBucketCors", + "PutBucketCORSPages": "PutBucketCorsPages", + "PutBucketCORSRequest": "PutBucketCorsRequest", + "PutObjectACL": "PutObjectAcl", + "PutObjectACLPages": "PutObjectAclPages", + "PutObjectACLRequest": "PutObjectAclRequest", + }, + shapes: map[string]string{ + "DeleteBucketCORSInput": "DeleteBucketCorsInput", + "DeleteBucketCORSOutput": "DeleteBucketCorsOutput", + "GetBucketACLInput": "GetBucketAclInput", + "GetBucketACLOutput": "GetBucketAclOutput", + "GetBucketCORSInput": "GetBucketCorsInput", + "GetBucketCORSOutput": "GetBucketCorsOutput", + "GetObjectACLInput": "GetObjectAclInput", + "GetObjectACLOutput": "GetObjectAclOutput", + "PutBucketACLInput": "PutBucketAclInput", + "PutBucketACLOutput": "PutBucketAclOutput", + "PutBucketCORSInput": "PutBucketCorsInput", + "PutBucketCORSOutput": "PutBucketCorsOutput", + "PutObjectACLInput": "PutObjectAclInput", + "PutObjectACLOutput": "PutObjectAclOutput", + }, + fields: map[string]string{ + "CopySourceVersionID": "CopySourceVersionId", + "DeleteMarkerVersionID": "DeleteMarkerVersionId", + "HTTPErrorCodeReturnedEquals": "HttpErrorCodeReturnedEquals", + "HTTPRedirectCode": "HttpRedirectCode", + "ID": "Id", + "LambdaFunctionARN": "LambdaFunctionArn", + "NextUploadIDMarker": "NextUploadIdMarker", + "NextVersionIDMarker": "NextVersionIdMarker", + "QueueARN": "QueueArn", + "SSEKMSKeyID": "SSEKMSKeyId", + "TopicARN": "TopicArn", + "UploadID": "UploadId", + "UploadIDMarker": "UploadIdMarker", + "VersionID": "VersionId", + "VersionIDMarker": "VersionIdMarker", + }, + }, + "github.com/aws/aws-sdk-go/service/s3/s3manager": { + operations: map[string]string{}, + shapes: map[string]string{}, + fields: map[string]string{ + "UploadID": "UploadId", + }, + }, + "github.com/aws/aws-sdk-go/service/s3/s3iface": { + operations: map[string]string{ + "DeleteBucketCORS": "DeleteBucketCors", + "DeleteBucketCORSPages": "DeleteBucketCorsPages", + "DeleteBucketCORSRequest": "DeleteBucketCorsRequest", + "GetBucketACL": "GetBucketAcl", + "GetBucketACLPages": "GetBucketAclPages", + "GetBucketACLRequest": "GetBucketAclRequest", + "GetBucketCORS": "GetBucketCors", + "GetBucketCORSPages": "GetBucketCorsPages", + "GetBucketCORSRequest": "GetBucketCorsRequest", + "GetObjectACL": "GetObjectAcl", + "GetObjectACLPages": "GetObjectAclPages", + "GetObjectACLRequest": "GetObjectAclRequest", + "PutBucketACL": "PutBucketAcl", + "PutBucketACLPages": "PutBucketAclPages", + "PutBucketACLRequest": "PutBucketAclRequest", + "PutBucketCORS": "PutBucketCors", + "PutBucketCORSPages": "PutBucketCorsPages", + "PutBucketCORSRequest": "PutBucketCorsRequest", + "PutObjectACL": "PutObjectAcl", + "PutObjectACLPages": "PutObjectAclPages", + "PutObjectACLRequest": "PutObjectAclRequest", + }, + shapes: map[string]string{}, + fields: map[string]string{}, + }, + "github.com/aws/aws-sdk-go/service/ses": { + operations: map[string]string{ + "GetIdentityDKIMAttributes": "GetIdentityDkimAttributes", + "GetIdentityDKIMAttributesPages": "GetIdentityDkimAttributesPages", + "GetIdentityDKIMAttributesRequest": "GetIdentityDkimAttributesRequest", + "SetIdentityDKIMEnabled": "SetIdentityDkimEnabled", + "SetIdentityDKIMEnabledPages": "SetIdentityDkimEnabledPages", + "SetIdentityDKIMEnabledRequest": "SetIdentityDkimEnabledRequest", + "VerifyDomainDKIM": "VerifyDomainDkim", + "VerifyDomainDKIMPages": "VerifyDomainDkimPages", + "VerifyDomainDKIMRequest": "VerifyDomainDkimRequest", + }, + shapes: map[string]string{ + "GetIdentityDKIMAttributesInput": "GetIdentityDkimAttributesInput", + "GetIdentityDKIMAttributesOutput": "GetIdentityDkimAttributesOutput", + "IdentityDKIMAttributes": "IdentityDkimAttributes", + "SetIdentityDKIMEnabledInput": "SetIdentityDkimEnabledInput", + "SetIdentityDKIMEnabledOutput": "SetIdentityDkimEnabledOutput", + "VerifyDomainDKIMInput": "VerifyDomainDkimInput", + "VerifyDomainDKIMOutput": "VerifyDomainDkimOutput", + }, + fields: map[string]string{ + "BCCAddresses": "BccAddresses", + "CCAddresses": "CcAddresses", + "DKIMAttributes": "DkimAttributes", + "DKIMEnabled": "DkimEnabled", + "DKIMTokens": "DkimTokens", + "DKIMVerificationStatus": "DkimVerificationStatus", + "FromARN": "FromArn", + "HTML": "Html", + "MessageID": "MessageId", + "ReturnPathARN": "ReturnPathArn", + "SNSTopic": "SnsTopic", + "SourceARN": "SourceArn", + }, + }, + "github.com/aws/aws-sdk-go/service/ses/sesiface": { + operations: map[string]string{ + "GetIdentityDKIMAttributes": "GetIdentityDkimAttributes", + "GetIdentityDKIMAttributesPages": "GetIdentityDkimAttributesPages", + "GetIdentityDKIMAttributesRequest": "GetIdentityDkimAttributesRequest", + "SetIdentityDKIMEnabled": "SetIdentityDkimEnabled", + "SetIdentityDKIMEnabledPages": "SetIdentityDkimEnabledPages", + "SetIdentityDKIMEnabledRequest": "SetIdentityDkimEnabledRequest", + "VerifyDomainDKIM": "VerifyDomainDkim", + "VerifyDomainDKIMPages": "VerifyDomainDkimPages", + "VerifyDomainDKIMRequest": "VerifyDomainDkimRequest", + }, + shapes: map[string]string{}, + fields: map[string]string{}, + }, + "github.com/aws/aws-sdk-go/service/sns": { + operations: map[string]string{}, + shapes: map[string]string{}, + fields: map[string]string{ + "AWSAccountID": "AWSAccountId", + "EndpointARN": "EndpointArn", + "MessageID": "MessageId", + "PlatformApplicationARN": "PlatformApplicationArn", + "SubscriptionARN": "SubscriptionArn", + "TargetARN": "TargetArn", + "TopicARN": "TopicArn", + }, + }, + "github.com/aws/aws-sdk-go/service/sns/snsiface": { + operations: map[string]string{}, + shapes: map[string]string{}, + fields: map[string]string{}, + }, + "github.com/aws/aws-sdk-go/service/sqs": { + operations: map[string]string{ + "GetQueueURL": "GetQueueUrl", + "GetQueueURLPages": "GetQueueUrlPages", + "GetQueueURLRequest": "GetQueueUrlRequest", + }, + shapes: map[string]string{ + "GetQueueURLInput": "GetQueueUrlInput", + "GetQueueURLOutput": "GetQueueUrlOutput", + }, + fields: map[string]string{ + "AWSAccountIDs": "AWSAccountIds", + "ID": "Id", + "MessageID": "MessageId", + "QueueOwnerAWSAccountID": "QueueOwnerAWSAccountId", + "QueueURL": "QueueUrl", + "QueueURLs": "QueueUrls", + }, + }, + "github.com/aws/aws-sdk-go/service/sqs/sqsiface": { + operations: map[string]string{ + "GetQueueURL": "GetQueueUrl", + "GetQueueURLPages": "GetQueueUrlPages", + "GetQueueURLRequest": "GetQueueUrlRequest", + }, + shapes: map[string]string{}, + fields: map[string]string{}, + }, + "github.com/aws/aws-sdk-go/service/ssm": { + operations: map[string]string{}, + shapes: map[string]string{}, + fields: map[string]string{ + "InstanceID": "InstanceId", + "SHA1": "Sha1", + }, + }, + "github.com/aws/aws-sdk-go/service/ssm/ssmiface": { + operations: map[string]string{}, + shapes: map[string]string{}, + fields: map[string]string{}, + }, + "github.com/aws/aws-sdk-go/service/storagegateway": { + operations: map[string]string{}, + shapes: map[string]string{}, + fields: map[string]string{ + "DiskID": "DiskId", + "DiskIDs": "DiskIds", + "GatewayID": "GatewayId", + "IPV4Address": "Ipv4Address", + "IPV6Address": "Ipv6Address", + "MACAddress": "MacAddress", + "NetworkInterfaceID": "NetworkInterfaceId", + "SnapshotID": "SnapshotId", + "SourceSnapshotID": "SourceSnapshotId", + "VolumeDiskID": "VolumeDiskId", + "VolumeID": "VolumeId", + }, + }, + "github.com/aws/aws-sdk-go/service/storagegateway/storagegatewayiface": { + operations: map[string]string{}, + shapes: map[string]string{}, + fields: map[string]string{}, + }, + "github.com/aws/aws-sdk-go/service/sts": { + operations: map[string]string{}, + shapes: map[string]string{}, + fields: map[string]string{ + "ARN": "Arn", + "AccessKeyID": "AccessKeyId", + "AssumedRoleID": "AssumedRoleId", + "ExternalID": "ExternalId", + "FederatedUserID": "FederatedUserId", + "PrincipalARN": "PrincipalArn", + "ProviderID": "ProviderId", + "RoleARN": "RoleArn", + }, + }, + "github.com/aws/aws-sdk-go/service/sts/stsiface": { + operations: map[string]string{}, + shapes: map[string]string{}, + fields: map[string]string{}, + }, + "github.com/aws/aws-sdk-go/service/support": { + operations: map[string]string{}, + shapes: map[string]string{}, + fields: map[string]string{ + "AttachmentID": "AttachmentId", + "AttachmentSetID": "AttachmentSetId", + "CCEmailAddresses": "CcEmailAddresses", + "CaseID": "CaseId", + "CaseIDList": "CaseIdList", + "CheckID": "CheckId", + "CheckIDs": "CheckIds", + "DisplayID": "DisplayId", + "ID": "Id", + "ResourceID": "ResourceId", + }, + }, + "github.com/aws/aws-sdk-go/service/support/supportiface": { + operations: map[string]string{}, + shapes: map[string]string{}, + fields: map[string]string{}, + }, + "github.com/aws/aws-sdk-go/service/swf": { + operations: map[string]string{}, + shapes: map[string]string{}, + fields: map[string]string{ + "ActivityID": "ActivityId", + "ContinuedExecutionRunID": "ContinuedExecutionRunId", + "DecisionTaskCompletedEventID": "DecisionTaskCompletedEventId", + "EventID": "EventId", + "ExternalInitiatedEventID": "ExternalInitiatedEventId", + "ID": "Id", + "InitiatedEventID": "InitiatedEventId", + "LatestCancelRequestedEventID": "LatestCancelRequestedEventId", + "NewExecutionRunID": "NewExecutionRunId", + "ParentInitiatedEventID": "ParentInitiatedEventId", + "PreviousStartedEventID": "PreviousStartedEventId", + "RunID": "RunId", + "ScheduledEventID": "ScheduledEventId", + "StartedEventID": "StartedEventId", + "TimerID": "TimerId", + "WorkflowID": "WorkflowId", + }, + }, + "github.com/aws/aws-sdk-go/service/swf/swfiface": { + operations: map[string]string{}, + shapes: map[string]string{}, + fields: map[string]string{}, + }, + "github.com/aws/aws-sdk-go/service/workspaces": { + operations: map[string]string{}, + shapes: map[string]string{}, + fields: map[string]string{ + "BundleID": "BundleId", + "BundleIDs": "BundleIds", + "CustomSecurityGroupID": "CustomSecurityGroupId", + "DNSIPAddresses": "DnsIpAddresses", + "DefaultOU": "DefaultOu", + "DirectoryID": "DirectoryId", + "DirectoryIDs": "DirectoryIds", + "IAMRoleID": "IamRoleId", + "IPAddress": "IpAddress", + "SubnetID": "SubnetId", + "SubnetIDs": "SubnetIds", + "WorkspaceID": "WorkspaceId", + "WorkspaceIDs": "WorkspaceIds", + "WorkspaceSecurityGroupID": "WorkspaceSecurityGroupId", + }, + }, + "github.com/aws/aws-sdk-go/service/workspaces/workspacesiface": { + operations: map[string]string{}, + shapes: map[string]string{}, + fields: map[string]string{}, + }, +} diff --git a/vendor/github.com/aws/aws-sdk-go/awsmigrate/awsmigrate-renamer/renamer.go b/vendor/github.com/aws/aws-sdk-go/awsmigrate/awsmigrate-renamer/renamer.go new file mode 100644 index 000000000..ed60e88dd --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/awsmigrate/awsmigrate-renamer/renamer.go @@ -0,0 +1,45 @@ +// +build go1.5 + +package main + +//go:generate go run gen/gen.go + +import ( + "os" + "os/exec" + "path/filepath" + "strings" + + "github.com/aws/aws-sdk-go/awsmigrate/awsmigrate-renamer/rename" +) + +var safeTag = "4e554f77f00d527b452c68a46f2e68595284121b" + +func main() { + gopath := os.Getenv("GOPATH") + if gopath == "" { + panic("GOPATH not set!") + } + gopath = strings.Split(gopath, ":")[0] + + // change directory to SDK + err := os.Chdir(filepath.Join(gopath, "src", "github.com", "aws", "aws-sdk-go")) + if err != nil { + panic("Cannot find SDK repository") + } + + // store orig HEAD + head, err := exec.Command("git", "rev-parse", "--abbrev-ref", "HEAD").Output() + if err != nil { + panic("Cannot find SDK repository") + } + origHEAD := strings.Trim(string(head), " \r\n") + + // checkout to safe tag and run conversion + exec.Command("git", "checkout", safeTag).Run() + defer func() { + exec.Command("git", "checkout", origHEAD).Run() + }() + + rename.ParsePathsFromArgs() +} diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/assert.go b/vendor/github.com/aws/aws-sdk-go/awstesting/assert.go new file mode 100644 index 000000000..f97b228fc --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/awstesting/assert.go @@ -0,0 +1,130 @@ +package awstesting + +import ( + "encoding/json" + "encoding/xml" + "fmt" + "net/url" + "regexp" + "sort" + "testing" + + "github.com/stretchr/testify/assert" +) + +// Match is a testing helper to test for testing error by comparing expected +// with a regular expression. +func Match(t *testing.T, regex, expected string) { + if !regexp.MustCompile(regex).Match([]byte(expected)) { + t.Errorf("%q\n\tdoes not match /%s/", expected, regex) + } +} + +// AssertURL verifies the expected URL is matches the actual. +func AssertURL(t *testing.T, expect, actual string, msgAndArgs ...interface{}) bool { + expectURL, err := url.Parse(expect) + if err != nil { + t.Errorf(errMsg("unable to parse expected URL", err, msgAndArgs)) + return false + } + actualURL, err := url.Parse(actual) + if err != nil { + t.Errorf(errMsg("unable to parse actual URL", err, msgAndArgs)) + return false + } + + assert.Equal(t, expectURL.Host, actualURL.Host, msgAndArgs...) + assert.Equal(t, expectURL.Scheme, actualURL.Scheme, msgAndArgs...) + assert.Equal(t, expectURL.Path, actualURL.Path, msgAndArgs...) + + return AssertQuery(t, expectURL.Query().Encode(), actualURL.Query().Encode(), msgAndArgs...) +} + +// AssertQuery verifies the expect HTTP query string matches the actual. +func AssertQuery(t *testing.T, expect, actual string, msgAndArgs ...interface{}) bool { + expectQ, err := url.ParseQuery(expect) + if err != nil { + t.Errorf(errMsg("unable to parse expected Query", err, msgAndArgs)) + return false + } + actualQ, err := url.ParseQuery(expect) + if err != nil { + t.Errorf(errMsg("unable to parse actual Query", err, msgAndArgs)) + return false + } + + // Make sure the keys are the same + if !assert.Equal(t, queryValueKeys(expectQ), queryValueKeys(actualQ), msgAndArgs...) { + return false + } + + for k, expectQVals := range expectQ { + sort.Strings(expectQVals) + actualQVals := actualQ[k] + sort.Strings(actualQVals) + assert.Equal(t, expectQVals, actualQVals, msgAndArgs...) + } + + return true +} + +// AssertJSON verifies that the expect json string matches the actual. +func AssertJSON(t *testing.T, expect, actual string, msgAndArgs ...interface{}) bool { + expectVal := map[string]interface{}{} + if err := json.Unmarshal([]byte(expect), &expectVal); err != nil { + t.Errorf(errMsg("unable to parse expected JSON", err, msgAndArgs...)) + return false + } + + actualVal := map[string]interface{}{} + if err := json.Unmarshal([]byte(actual), &actualVal); err != nil { + t.Errorf(errMsg("unable to parse actual JSON", err, msgAndArgs...)) + return false + } + + return assert.Equal(t, expectVal, actualVal, msgAndArgs...) +} + +// AssertXML verifies that the expect xml string matches the actual. +func AssertXML(t *testing.T, expect, actual string, container interface{}, msgAndArgs ...interface{}) bool { + expectVal := container + if err := xml.Unmarshal([]byte(expect), &expectVal); err != nil { + t.Errorf(errMsg("unable to parse expected XML", err, msgAndArgs...)) + } + + actualVal := container + if err := xml.Unmarshal([]byte(actual), &actualVal); err != nil { + t.Errorf(errMsg("unable to parse actual XML", err, msgAndArgs...)) + } + return assert.Equal(t, expectVal, actualVal, msgAndArgs...) +} + +func errMsg(baseMsg string, err error, msgAndArgs ...interface{}) string { + message := messageFromMsgAndArgs(msgAndArgs) + if message != "" { + message += ", " + } + return fmt.Sprintf("%s%s, %v", message, baseMsg, err) +} + +func messageFromMsgAndArgs(msgAndArgs []interface{}) string { + if len(msgAndArgs) == 0 || msgAndArgs == nil { + return "" + } + if len(msgAndArgs) == 1 { + return msgAndArgs[0].(string) + } + if len(msgAndArgs) > 1 { + return fmt.Sprintf(msgAndArgs[0].(string), msgAndArgs[1:]...) + } + return "" +} + +func queryValueKeys(v url.Values) []string { + keys := make([]string, 0, len(v)) + for k := range v { + keys = append(keys, k) + } + sort.Strings(keys) + return keys +} diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/assert_test.go b/vendor/github.com/aws/aws-sdk-go/awstesting/assert_test.go new file mode 100644 index 000000000..45903a5d3 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/awstesting/assert_test.go @@ -0,0 +1,64 @@ +package awstesting_test + +import ( + "encoding/xml" + "testing" + + "github.com/aws/aws-sdk-go/awstesting" +) + +func TestAssertJSON(t *testing.T) { + cases := []struct { + e, a string + asserts bool + }{ + { + e: `{"RecursiveStruct":{"RecursiveMap":{"foo":{"NoRecurse":"foo"},"bar":{"NoRecurse":"bar"}}}}`, + a: `{"RecursiveStruct":{"RecursiveMap":{"bar":{"NoRecurse":"bar"},"foo":{"NoRecurse":"foo"}}}}`, + asserts: true, + }, + } + + for i, c := range cases { + mockT := &testing.T{} + if awstesting.AssertJSON(mockT, c.e, c.a) != c.asserts { + t.Error("Assert JSON result was not expected.", i) + } + } +} + +func TestAssertXML(t *testing.T) { + cases := []struct { + e, a string + asserts bool + container struct { + XMLName xml.Name `xml:"OperationRequest"` + NS string `xml:"xmlns,attr"` + RecursiveStruct struct { + RecursiveMap struct { + Entries []struct { + XMLName xml.Name `xml:"entries"` + Key string `xml:"key"` + Value struct { + XMLName xml.Name `xml:"value"` + NoRecurse string + } + } + } + } + } + }{ + { + e: `foofoobarbar`, + a: `barbarfoofoo`, + asserts: true, + }, + } + + for i, c := range cases { + // mockT := &testing.T{} + if awstesting.AssertXML(t, c.e, c.a, c.container) != c.asserts { + t.Error("Assert XML result was not expected.", i) + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/client.go b/vendor/github.com/aws/aws-sdk-go/awstesting/client.go new file mode 100644 index 000000000..fd95259e0 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/awstesting/client.go @@ -0,0 +1,42 @@ +package awstesting + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/defaults" + "github.com/aws/aws-sdk-go/awstesting/mock" +) + +// NewClient creates and initializes a generic service client for testing. +func NewClient(cfgs ...*aws.Config) *client.Client { + info := metadata.ClientInfo{ + Endpoint: "http://endpoint", + SigningName: "", + } + def := defaults.Get() + def.Config.MergeIn(cfgs...) + + return client.New(*def.Config, info, def.Handlers) +} + +// NewMockClient creates and initializes a client that will connect to the +// mock server +func NewMockClient(cfgs ...*aws.Config) *client.Client { + c := mock.Session.ClientConfig("Mock", cfgs...) + + svc := client.New( + *c.Config, + metadata.ClientInfo{ + ServiceName: "Mock", + SigningRegion: c.SigningRegion, + Endpoint: c.Endpoint, + APIVersion: "2015-12-08", + JSONVersion: "1.1", + TargetPrefix: "MockServer", + }, + c.Handlers, + ) + + return svc +} diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/customizations/s3/integration_test.go b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/customizations/s3/integration_test.go new file mode 100644 index 000000000..93d5ff60f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/customizations/s3/integration_test.go @@ -0,0 +1,124 @@ +// +build integration + +// Package s3_test runs integration tests for S3 +package s3_test + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "os" + "testing" + "time" + + "github.com/stretchr/testify/assert" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/awstesting/integration" + "github.com/aws/aws-sdk-go/service/s3" +) + +var bucketName *string +var svc *s3.S3 + +func TestMain(m *testing.M) { + setup() + defer teardown() // only called if we panic + result := m.Run() + teardown() + os.Exit(result) +} + +// Create a bucket for testing +func setup() { + svc = s3.New(integration.Session) + bucketName = aws.String( + fmt.Sprintf("aws-sdk-go-integration-%d-%s", time.Now().Unix(), integration.UniqueID())) + + for i := 0; i < 10; i++ { + _, err := svc.CreateBucket(&s3.CreateBucketInput{Bucket: bucketName}) + if err == nil { + break + } + } + + for { + _, err := svc.HeadBucket(&s3.HeadBucketInput{Bucket: bucketName}) + if err == nil { + break + } + time.Sleep(1 * time.Second) + } +} + +// Delete the bucket +func teardown() { + resp, _ := svc.ListObjects(&s3.ListObjectsInput{Bucket: bucketName}) + for _, o := range resp.Contents { + svc.DeleteObject(&s3.DeleteObjectInput{Bucket: bucketName, Key: o.Key}) + } + svc.DeleteBucket(&s3.DeleteBucketInput{Bucket: bucketName}) +} + +func TestWriteToObject(t *testing.T) { + _, err := svc.PutObject(&s3.PutObjectInput{ + Bucket: bucketName, + Key: aws.String("key name"), + Body: bytes.NewReader([]byte("hello world")), + }) + assert.NoError(t, err) + + resp, err := svc.GetObject(&s3.GetObjectInput{ + Bucket: bucketName, + Key: aws.String("key name"), + }) + assert.NoError(t, err) + + b, _ := ioutil.ReadAll(resp.Body) + assert.Equal(t, []byte("hello world"), b) +} + +func TestPresignedGetPut(t *testing.T) { + putreq, _ := svc.PutObjectRequest(&s3.PutObjectInput{ + Bucket: bucketName, + Key: aws.String("presigned-key"), + }) + var err error + + // Presign a PUT request + var puturl string + puturl, err = putreq.Presign(300 * time.Second) + assert.NoError(t, err) + + // PUT to the presigned URL with a body + var puthttpreq *http.Request + buf := bytes.NewReader([]byte("hello world")) + puthttpreq, err = http.NewRequest("PUT", puturl, buf) + assert.NoError(t, err) + + var putresp *http.Response + putresp, err = http.DefaultClient.Do(puthttpreq) + assert.NoError(t, err) + assert.Equal(t, 200, putresp.StatusCode) + + // Presign a GET on the same URL + getreq, _ := svc.GetObjectRequest(&s3.GetObjectInput{ + Bucket: bucketName, + Key: aws.String("presigned-key"), + }) + + var geturl string + geturl, err = getreq.Presign(300 * time.Second) + assert.NoError(t, err) + + // Get the body + var getresp *http.Response + getresp, err = http.Get(geturl) + assert.NoError(t, err) + + var b []byte + defer getresp.Body.Close() + b, err = ioutil.ReadAll(getresp.Body) + assert.Equal(t, "hello world", string(b)) +} diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/customizations/s3/s3manager/integration_test.go b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/customizations/s3/s3manager/integration_test.go new file mode 100644 index 000000000..eccd6b60e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/customizations/s3/s3manager/integration_test.go @@ -0,0 +1,163 @@ +// +build integration + +// Package s3manager provides +package s3manager + +import ( + "bytes" + "crypto/md5" + "fmt" + "io" + "os" + "testing" + "time" + + "github.com/stretchr/testify/assert" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/awstesting/integration" + "github.com/aws/aws-sdk-go/service/s3" + "github.com/aws/aws-sdk-go/service/s3/s3manager" +) + +var integBuf12MB = make([]byte, 1024*1024*12) +var integMD512MB = fmt.Sprintf("%x", md5.Sum(integBuf12MB)) +var bucketName *string + +func TestMain(m *testing.M) { + setup() + defer teardown() // only called if we panic + result := m.Run() + teardown() + os.Exit(result) +} + +func setup() { + // Create a bucket for testing + svc := s3.New(integration.Session) + bucketName = aws.String( + fmt.Sprintf("aws-sdk-go-integration-%d-%s", time.Now().Unix(), integration.UniqueID())) + + for i := 0; i < 10; i++ { + _, err := svc.CreateBucket(&s3.CreateBucketInput{Bucket: bucketName}) + if err == nil { + break + } + } + + for { + _, err := svc.HeadBucket(&s3.HeadBucketInput{Bucket: bucketName}) + if err == nil { + break + } + time.Sleep(1 * time.Second) + } +} + +// Delete the bucket +func teardown() { + svc := s3.New(session.New()) + + objs, _ := svc.ListObjects(&s3.ListObjectsInput{Bucket: bucketName}) + for _, o := range objs.Contents { + svc.DeleteObject(&s3.DeleteObjectInput{Bucket: bucketName, Key: o.Key}) + } + + uploads, _ := svc.ListMultipartUploads(&s3.ListMultipartUploadsInput{Bucket: bucketName}) + for _, u := range uploads.Uploads { + svc.AbortMultipartUpload(&s3.AbortMultipartUploadInput{ + Bucket: bucketName, + Key: u.Key, + UploadId: u.UploadId, + }) + } + + svc.DeleteBucket(&s3.DeleteBucketInput{Bucket: bucketName}) +} + +type dlwriter struct { + buf []byte +} + +func newDLWriter(size int) *dlwriter { + return &dlwriter{buf: make([]byte, size)} +} + +func (d dlwriter) WriteAt(p []byte, pos int64) (n int, err error) { + if pos > int64(len(d.buf)) { + return 0, io.EOF + } + + written := 0 + for i, b := range p { + if i >= len(d.buf) { + break + } + d.buf[pos+int64(i)] = b + written++ + } + return written, nil +} + +func validate(t *testing.T, key string, md5value string) { + mgr := s3manager.NewDownloader(integration.Session) + params := &s3.GetObjectInput{Bucket: bucketName, Key: &key} + + w := newDLWriter(1024 * 1024 * 20) + n, err := mgr.Download(w, params) + assert.NoError(t, err) + assert.Equal(t, md5value, fmt.Sprintf("%x", md5.Sum(w.buf[0:n]))) +} + +func TestUploadConcurrently(t *testing.T) { + key := "12mb-1" + mgr := s3manager.NewUploader(integration.Session) + out, err := mgr.Upload(&s3manager.UploadInput{ + Bucket: bucketName, + Key: &key, + Body: bytes.NewReader(integBuf12MB), + }) + + assert.NoError(t, err) + assert.NotEqual(t, "", out.UploadID) + assert.Regexp(t, `^https?://.+/`+key+`$`, out.Location) + + validate(t, key, integMD512MB) +} + +func TestUploadFailCleanup(t *testing.T) { + svc := s3.New(session.New()) + + // Break checksum on 2nd part so it fails + part := 0 + svc.Handlers.Build.PushBack(func(r *request.Request) { + if r.Operation.Name == "UploadPart" { + if part == 1 { + r.HTTPRequest.Header.Set("X-Amz-Content-Sha256", "000") + } + part++ + } + }) + + key := "12mb-leave" + mgr := s3manager.NewUploaderWithClient(svc, func(u *s3manager.Uploader) { + u.LeavePartsOnError = false + }) + _, err := mgr.Upload(&s3manager.UploadInput{ + Bucket: bucketName, + Key: &key, + Body: bytes.NewReader(integBuf12MB), + }) + assert.Error(t, err) + uploadID := "" + if merr, ok := err.(s3manager.MultiUploadFailure); ok { + uploadID = merr.UploadID() + } + assert.NotEmpty(t, uploadID) + + _, err = svc.ListParts(&s3.ListPartsInput{ + Bucket: bucketName, Key: &key, UploadId: &uploadID}) + assert.Error(t, err) +} diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/customizations/s3/s3manager/stub.go b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/customizations/s3/s3manager/stub.go new file mode 100644 index 000000000..9434ae970 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/customizations/s3/s3manager/stub.go @@ -0,0 +1 @@ +package s3manager diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/customizations/s3/stub.go b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/customizations/s3/stub.go new file mode 100644 index 000000000..3ed7f9723 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/customizations/s3/stub.go @@ -0,0 +1 @@ +package s3 diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/integration.go b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/integration.go new file mode 100644 index 000000000..7d4989626 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/integration.go @@ -0,0 +1,44 @@ +// +build integration + +// Package integration performs initialization and validation for integration +// tests. +package integration + +import ( + "crypto/rand" + "fmt" + "io" + "os" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" +) + +// Session is a shared session for all integration tests to use. +var Session = session.New() + +func init() { + logLevel := Session.Config.LogLevel + if os.Getenv("DEBUG") != "" { + logLevel = aws.LogLevel(aws.LogDebug) + } + if os.Getenv("DEBUG_SIGNING") != "" { + logLevel = aws.LogLevel(aws.LogDebugWithSigning) + } + if os.Getenv("DEBUG_BODY") != "" { + logLevel = aws.LogLevel(aws.LogDebugWithSigning | aws.LogDebugWithHTTPBody) + } + Session.Config.LogLevel = logLevel + + if aws.StringValue(Session.Config.Region) == "" { + panic("AWS_REGION must be configured to run integration tests") + } +} + +// UniqueID returns a unique UUID-like identifier for use in generating +// resources for integration tests. +func UniqueID() string { + uuid := make([]byte, 16) + io.ReadFull(rand.Reader, uuid) + return fmt.Sprintf("%x", uuid) +} diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/acm/acm.feature b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/acm/acm.feature new file mode 100644 index 000000000..dc28b5533 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/acm/acm.feature @@ -0,0 +1,14 @@ +#language en +@acm @client +Feature: AWS Certificate Manager + + Scenario: Making a request + When I call the "ListCertificates" API + Then the request should be successful + + Scenario: Handling errors + When I attempt to call the "GetCertificate" API with: + | CertificateArn | arn:aws:acm:region:123456789012:certificate/12345678-1234-1234-1234-123456789012 | + Then I expect the response error code to be "ResourceNotFoundException" + And I expect the response error message not be empty + diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/acm/client.go b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/acm/client.go new file mode 100644 index 000000000..471f48689 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/acm/client.go @@ -0,0 +1,16 @@ +// +build integration + +//Package acm provides gucumber integration tests support. +package acm + +import ( + "github.com/aws/aws-sdk-go/awstesting/integration/smoke" + "github.com/aws/aws-sdk-go/service/acm" + . "github.com/lsegal/gucumber" +) + +func init() { + Before("@acm", func() { + World["client"] = acm.New(smoke.Session) + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/apigateway/apigateway.feature b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/apigateway/apigateway.feature new file mode 100644 index 000000000..4286b8130 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/apigateway/apigateway.feature @@ -0,0 +1,16 @@ +# language: en +@apigateway @client +Feature: Amazon API Gateway + + Scenario: Making a request + When I call the "GetAccountRequest" API + Then the request should be successful + + Scenario: Handing errors + When I attempt to call the "GetRestApi" API with: + | RestApiId | api123 | + Then I expect the response error code to be "NotFoundException" + And I expect the response error message to include: + """ + Invalid REST API identifier specified + """ diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/apigateway/client.go b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/apigateway/client.go new file mode 100644 index 000000000..17e295b07 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/apigateway/client.go @@ -0,0 +1,16 @@ +// +build integration + +//Package apigateway provides gucumber integration tests support. +package apigateway + +import ( + "github.com/aws/aws-sdk-go/awstesting/integration/smoke" + "github.com/aws/aws-sdk-go/service/apigateway" + . "github.com/lsegal/gucumber" +) + +func init() { + Before("@apigateway", func() { + World["client"] = apigateway.New(smoke.Session) + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/applicationdiscoveryservice/applicationdiscoveryservice.feature b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/applicationdiscoveryservice/applicationdiscoveryservice.feature new file mode 100644 index 000000000..02ae2874e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/applicationdiscoveryservice/applicationdiscoveryservice.feature @@ -0,0 +1,8 @@ +#language en +@applicationdiscoveryservice @client +Feature: AWS Application Discovery Service + + Scenario: Making a request + When I call the "DescribeAgents" API + Then the request should be successful + diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/applicationdiscoveryservice/client.go b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/applicationdiscoveryservice/client.go new file mode 100644 index 000000000..c29b647b6 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/applicationdiscoveryservice/client.go @@ -0,0 +1,17 @@ +// +build integration + +//Package applicationdiscoveryservice provides gucumber integration tests support. +package applicationdiscoveryservice + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/awstesting/integration/smoke" + "github.com/aws/aws-sdk-go/service/applicationdiscoveryservice" + . "github.com/lsegal/gucumber" +) + +func init() { + Before("@applicationdiscoveryservice", func() { + World["client"] = applicationdiscoveryservice.New(smoke.Session, &aws.Config{Region: aws.String("us-west-2")}) + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/autoscaling/autoscaling.feature b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/autoscaling/autoscaling.feature new file mode 100644 index 000000000..7c2bdf6cf --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/autoscaling/autoscaling.feature @@ -0,0 +1,18 @@ +# language: en +@autoscaling @client +Feature: Auto Scaling + + Scenario: Making a request + When I call the "DescribeScalingProcessTypes" API + Then the value at "Processes" should be a list + + Scenario: Handing errors + When I attempt to call the "CreateLaunchConfiguration" API with: + | LaunchConfigurationName | | + | ImageId | ami-12345678 | + | InstanceType | m1.small | + Then I expect the response error code to be "InvalidParameter" + And I expect the response error message to include: + """ + LaunchConfigurationName + """ diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/autoscaling/client.go b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/autoscaling/client.go new file mode 100644 index 000000000..b05963b0e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/autoscaling/client.go @@ -0,0 +1,16 @@ +// +build integration + +//Package autoscaling provides gucumber integration tests support. +package autoscaling + +import ( + "github.com/aws/aws-sdk-go/awstesting/integration/smoke" + "github.com/aws/aws-sdk-go/service/autoscaling" + . "github.com/lsegal/gucumber" +) + +func init() { + Before("@autoscaling", func() { + World["client"] = autoscaling.New(smoke.Session) + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cloudformation/client.go b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cloudformation/client.go new file mode 100644 index 000000000..835651ba2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cloudformation/client.go @@ -0,0 +1,16 @@ +// +build integration + +//Package cloudformation provides gucumber integration tests support. +package cloudformation + +import ( + "github.com/aws/aws-sdk-go/awstesting/integration/smoke" + "github.com/aws/aws-sdk-go/service/cloudformation" + . "github.com/lsegal/gucumber" +) + +func init() { + Before("@cloudformation", func() { + World["client"] = cloudformation.New(smoke.Session) + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cloudformation/cloudformation.feature b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cloudformation/cloudformation.feature new file mode 100644 index 000000000..3eafaf608 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cloudformation/cloudformation.feature @@ -0,0 +1,17 @@ +# language: en +@cloudformation @client +Feature: AWS CloudFormation + + Scenario: Making a request + When I call the "ListStacks" API + Then the value at "StackSummaries" should be a list + + Scenario: Handling errors + When I attempt to call the "CreateStack" API with: + | StackName | fakestack | + | TemplateURL | http://s3.amazonaws.com/foo/bar | + Then I expect the response error code to be "ValidationError" + And I expect the response error message to include: + """ + TemplateURL must reference a valid S3 object to which you have access. + """ diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cloudfront/client.go b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cloudfront/client.go new file mode 100644 index 000000000..0031473e9 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cloudfront/client.go @@ -0,0 +1,16 @@ +// +build integration + +//Package cloudfront provides gucumber integration tests support. +package cloudfront + +import ( + "github.com/aws/aws-sdk-go/awstesting/integration/smoke" + "github.com/aws/aws-sdk-go/service/cloudfront" + . "github.com/lsegal/gucumber" +) + +func init() { + Before("@cloudfront", func() { + World["client"] = cloudfront.New(smoke.Session) + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cloudfront/cloudfront.feature b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cloudfront/cloudfront.feature new file mode 100644 index 000000000..bbb2a8d2a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cloudfront/cloudfront.feature @@ -0,0 +1,17 @@ +# language: en +@cloudfront @client +Feature: Amazon CloudFront + + Scenario: Making a basic request + When I call the "ListDistributions" API with: + | MaxItems | 1 | + Then the value at "DistributionList.Items" should be a list + + Scenario: Error handling + When I attempt to call the "GetDistribution" API with: + | Id | fake-id | + Then I expect the response error code to be "NoSuchDistribution" + And I expect the response error message to include: + """ + The specified distribution does not exist. + """ diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cloudhsm/client.go b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cloudhsm/client.go new file mode 100644 index 000000000..0a413364b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cloudhsm/client.go @@ -0,0 +1,16 @@ +// +build integration + +//Package cloudhsm provides gucumber integration tests support. +package cloudhsm + +import ( + "github.com/aws/aws-sdk-go/awstesting/integration/smoke" + "github.com/aws/aws-sdk-go/service/cloudhsm" + . "github.com/lsegal/gucumber" +) + +func init() { + Before("@cloudhsm", func() { + World["client"] = cloudhsm.New(smoke.Session) + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cloudhsm/cloudhsm.feature b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cloudhsm/cloudhsm.feature new file mode 100644 index 000000000..545ca4efe --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cloudhsm/cloudhsm.feature @@ -0,0 +1,16 @@ +# language: en +@cloudhsm @client +Feature: Amazon CloudHSM + + Scenario: Making a request + When I call the "ListHapgs" API + Then the value at "HapgList" should be a list + + Scenario: Handling errors + When I attempt to call the "DescribeHapg" API with: + | HapgArn | bogus-arn | + Then I expect the response error code to be "ValidationException" + And I expect the response error message to include: + """ + Value 'bogus-arn' at 'hapgArn' failed to satisfy constraint + """ diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cloudsearch/client.go b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cloudsearch/client.go new file mode 100644 index 000000000..721e225d6 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cloudsearch/client.go @@ -0,0 +1,16 @@ +// +build integration + +//Package cloudsearch provides gucumber integration tests support. +package cloudsearch + +import ( + "github.com/aws/aws-sdk-go/awstesting/integration/smoke" + "github.com/aws/aws-sdk-go/service/cloudsearch" + . "github.com/lsegal/gucumber" +) + +func init() { + Before("@cloudsearch", func() { + World["client"] = cloudsearch.New(smoke.Session) + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cloudsearch/cloudsearch.feature b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cloudsearch/cloudsearch.feature new file mode 100644 index 000000000..160e916d2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cloudsearch/cloudsearch.feature @@ -0,0 +1,16 @@ +# language: en +@cloudsearch @client +Feature: Amazon CloudSearch + + Scenario: Making a request + When I call the "DescribeDomains" API + Then the response should contain a "DomainStatusList" + + Scenario: Handling errors + When I attempt to call the "DescribeIndexFields" API with: + | DomainName | fakedomain | + Then I expect the response error code to be "ResourceNotFound" + And I expect the response error message to include: + """ + Domain not found: fakedomain + """ diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cloudtrail/client.go b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cloudtrail/client.go new file mode 100644 index 000000000..1b1c77edd --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cloudtrail/client.go @@ -0,0 +1,16 @@ +// +build integration + +//Package cloudtrail provides gucumber integration tests support. +package cloudtrail + +import ( + "github.com/aws/aws-sdk-go/awstesting/integration/smoke" + "github.com/aws/aws-sdk-go/service/cloudtrail" + . "github.com/lsegal/gucumber" +) + +func init() { + Before("@cloudtrail", func() { + World["client"] = cloudtrail.New(smoke.Session) + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cloudtrail/cloudtrail.feature b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cloudtrail/cloudtrail.feature new file mode 100644 index 000000000..7b5166a76 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cloudtrail/cloudtrail.feature @@ -0,0 +1,16 @@ +# language: en +@cloudtrail @client +Feature: AWS CloudTrail + + Scenario: Making a request + When I call the "DescribeTrails" API + Then the response should contain a "trailList" + + Scenario: Handling errors + When I attempt to call the "DeleteTrail" API with: + | Name | faketrail | + Then I expect the response error code to be "TrailNotFoundException" + And I expect the response error message to include: + """ + Unknown trail + """ diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cloudwatch/client.go b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cloudwatch/client.go new file mode 100644 index 000000000..7fff575fe --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cloudwatch/client.go @@ -0,0 +1,16 @@ +// +build integration + +//Package cloudwatch provides gucumber integration tests support. +package cloudwatch + +import ( + "github.com/aws/aws-sdk-go/awstesting/integration/smoke" + "github.com/aws/aws-sdk-go/service/cloudwatch" + . "github.com/lsegal/gucumber" +) + +func init() { + Before("@cloudwatch", func() { + World["client"] = cloudwatch.New(smoke.Session) + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cloudwatch/cloudwatch.feature b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cloudwatch/cloudwatch.feature new file mode 100644 index 000000000..84307ef2a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cloudwatch/cloudwatch.feature @@ -0,0 +1,19 @@ +# language: en +@cloudwatch @monitoring @client +Feature: Amazon CloudWatch + + Scenario: Making a request + When I call the "ListMetrics" API with: + | Namespace | AWS/EC2 | + Then the value at "Metrics" should be a list + + Scenario: Handling errors + When I attempt to call the "SetAlarmState" API with: + | AlarmName | abc | + | StateValue | mno | + | StateReason | xyz | + Then I expect the response error code to be "ValidationError" + And I expect the response error message to include: + """ + failed to satisfy constraint + """ diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cloudwatchlogs/client.go b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cloudwatchlogs/client.go new file mode 100644 index 000000000..b6ded13df --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cloudwatchlogs/client.go @@ -0,0 +1,16 @@ +// +build integration + +//Package cloudwatchlogs provides gucumber integration tests support. +package cloudwatchlogs + +import ( + "github.com/aws/aws-sdk-go/awstesting/integration/smoke" + "github.com/aws/aws-sdk-go/service/cloudwatchlogs" + . "github.com/lsegal/gucumber" +) + +func init() { + Before("@cloudwatchlogs", func() { + World["client"] = cloudwatchlogs.New(smoke.Session) + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cloudwatchlogs/cloudwatchlogs.feature b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cloudwatchlogs/cloudwatchlogs.feature new file mode 100644 index 000000000..5711c4e85 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cloudwatchlogs/cloudwatchlogs.feature @@ -0,0 +1,17 @@ +# language: en +@cloudwatchlogs @logs +Feature: Amazon CloudWatch Logs + + Scenario: Making a request + When I call the "DescribeLogGroups" API + Then the value at "logGroups" should be a list + + Scenario: Handling errors + When I attempt to call the "GetLogEvents" API with: + | logGroupName | fakegroup | + | logStreamName | fakestream | + Then I expect the response error code to be "ResourceNotFoundException" + And I expect the response error message to include: + """ + The specified log group does not exist. + """ diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/codecommit/client.go b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/codecommit/client.go new file mode 100644 index 000000000..f65caf53d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/codecommit/client.go @@ -0,0 +1,16 @@ +// +build integration + +//Package codecommit provides gucumber integration tests support. +package codecommit + +import ( + "github.com/aws/aws-sdk-go/awstesting/integration/smoke" + "github.com/aws/aws-sdk-go/service/codecommit" + . "github.com/lsegal/gucumber" +) + +func init() { + Before("@codecommit", func() { + World["client"] = codecommit.New(smoke.Session) + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/codecommit/codecommit.feature b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/codecommit/codecommit.feature new file mode 100644 index 000000000..c5c019055 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/codecommit/codecommit.feature @@ -0,0 +1,16 @@ +# language: en +@codecommit @client +Feature: Amazon CodeCommit + + Scenario: Making a request + When I call the "ListRepositories" API + Then the value at "repositories" should be a list + + Scenario: Handling errors + When I attempt to call the "ListBranches" API with: + | repositoryName | fake-repo | + Then I expect the response error code to be "RepositoryDoesNotExistException" + And I expect the response error message to include: + """ + fake-repo does not exist + """ diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/codedeploy/client.go b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/codedeploy/client.go new file mode 100644 index 000000000..bc9c76a76 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/codedeploy/client.go @@ -0,0 +1,16 @@ +// +build integration + +//Package codedeploy provides gucumber integration tests support. +package codedeploy + +import ( + "github.com/aws/aws-sdk-go/awstesting/integration/smoke" + "github.com/aws/aws-sdk-go/service/codedeploy" + . "github.com/lsegal/gucumber" +) + +func init() { + Before("@codedeploy", func() { + World["client"] = codedeploy.New(smoke.Session) + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/codedeploy/codedeploy.feature b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/codedeploy/codedeploy.feature new file mode 100644 index 000000000..45dfd2fa2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/codedeploy/codedeploy.feature @@ -0,0 +1,16 @@ +# language: en +@codedeploy @client +Feature: Amazon CodeDeploy + + Scenario: Making a request + When I call the "ListApplications" API + Then the value at "applications" should be a list + + Scenario: Handling errors + When I attempt to call the "GetDeployment" API with: + | deploymentId | d-USUAELQEX | + Then I expect the response error code to be "DeploymentDoesNotExistException" + And I expect the response error message to include: + """ + The deployment d-USUAELQEX could not be found + """ diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/codepipeline/client.go b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/codepipeline/client.go new file mode 100644 index 000000000..b3ef7cdb9 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/codepipeline/client.go @@ -0,0 +1,16 @@ +// +build integration + +//Package codepipeline provides gucumber integration tests support. +package codepipeline + +import ( + "github.com/aws/aws-sdk-go/awstesting/integration/smoke" + "github.com/aws/aws-sdk-go/service/codepipeline" + . "github.com/lsegal/gucumber" +) + +func init() { + Before("@codepipeline", func() { + World["client"] = codepipeline.New(smoke.Session) + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/codepipeline/codepipeline.feature b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/codepipeline/codepipeline.feature new file mode 100644 index 000000000..cb962cc89 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/codepipeline/codepipeline.feature @@ -0,0 +1,16 @@ +# language: en +@codepipeline @client +Feature: Amazon CodePipeline + + Scenario: Making a request + When I call the "ListPipelines" API + Then the value at "pipelines" should be a list + + Scenario: Handling errors + When I attempt to call the "GetPipeline" API with: + | name | fake-pipeline | + Then I expect the response error code to be "PipelineNotFoundException" + And I expect the response error message to include: + """ + does not have a pipeline with name 'fake-pipeline' + """ diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cognitoidentity/client.go b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cognitoidentity/client.go new file mode 100644 index 000000000..adf486107 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cognitoidentity/client.go @@ -0,0 +1,16 @@ +// +build integration + +//Package cognitoidentity provides gucumber integration tests support. +package cognitoidentity + +import ( + "github.com/aws/aws-sdk-go/awstesting/integration/smoke" + "github.com/aws/aws-sdk-go/service/cognitoidentity" + . "github.com/lsegal/gucumber" +) + +func init() { + Before("@cognitoidentity", func() { + World["client"] = cognitoidentity.New(smoke.Session) + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cognitoidentity/cognitoidentity.feature b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cognitoidentity/cognitoidentity.feature new file mode 100644 index 000000000..12abcc8b8 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cognitoidentity/cognitoidentity.feature @@ -0,0 +1,19 @@ +# language: en +@cognitoidentity @client +Feature: Amazon Cognito Idenity + + Scenario: Making a request + When I call the "ListIdentityPools" API with JSON: + """ + {"MaxResults": 10} + """ + Then the value at "IdentityPools" should be a list + + Scenario: Handling errors + When I attempt to call the "DescribeIdentityPool" API with: + | IdentityPoolId | us-east-1:aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee | + Then I expect the response error code to be "ResourceNotFoundException" + And I expect the response error message to include: + """ + IdentityPool 'us-east-1:aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee' not found + """ diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cognitosync/client.go b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cognitosync/client.go new file mode 100644 index 000000000..911c131e2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cognitosync/client.go @@ -0,0 +1,16 @@ +// +build integration + +//Package cognitosync provides gucumber integration tests support. +package cognitosync + +import ( + "github.com/aws/aws-sdk-go/awstesting/integration/smoke" + "github.com/aws/aws-sdk-go/service/cognitosync" + . "github.com/lsegal/gucumber" +) + +func init() { + Before("@cognitosync", func() { + World["client"] = cognitosync.New(smoke.Session) + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cognitosync/cognitosync.feature b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cognitosync/cognitosync.feature new file mode 100644 index 000000000..3cdf84ec1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cognitosync/cognitosync.feature @@ -0,0 +1,16 @@ +# language: en +@cognitosync @client +Feature: Amazon Cognito Sync + + Scenario: Making a request + When I call the "ListIdentityPoolUsage" API + Then the value at "IdentityPoolUsages" should be a list + + Scenario: Handling errors + When I attempt to call the "DescribeIdentityPoolUsage" API with: + | IdentityPoolId | us-east-1:aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee | + Then I expect the response error code to be "ResourceNotFoundException" + And I expect the response error message to include: + """ + IdentityPool 'us-east-1:aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee' not found + """ diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/configservice/client.go b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/configservice/client.go new file mode 100644 index 000000000..f2cb1c831 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/configservice/client.go @@ -0,0 +1,16 @@ +// +build integration + +//Package configservice provides gucumber integration tests support. +package configservice + +import ( + "github.com/aws/aws-sdk-go/awstesting/integration/smoke" + "github.com/aws/aws-sdk-go/service/configservice" + . "github.com/lsegal/gucumber" +) + +func init() { + Before("@configservice", func() { + World["client"] = configservice.New(smoke.Session) + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/configservice/configservice.feature b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/configservice/configservice.feature new file mode 100644 index 000000000..ccc3af6f8 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/configservice/configservice.feature @@ -0,0 +1,17 @@ +# language: en +@configservice @config @client +Feature: AWS Config + + Scenario: Making a request + When I call the "DescribeConfigurationRecorders" API + Then the value at "ConfigurationRecorders" should be a list + + Scenario: Handling errors + When I attempt to call the "GetResourceConfigHistory" API with: + | resourceType | fake-type | + | resourceId | fake-id | + Then I expect the response error code to be "ValidationException" + And I expect the response error message to include: + """ + failed to satisfy constraint + """ diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/datapipeline/client.go b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/datapipeline/client.go new file mode 100644 index 000000000..665a62825 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/datapipeline/client.go @@ -0,0 +1,16 @@ +// +build integration + +//Package datapipeline provides gucumber integration tests support. +package datapipeline + +import ( + "github.com/aws/aws-sdk-go/awstesting/integration/smoke" + "github.com/aws/aws-sdk-go/service/datapipeline" + . "github.com/lsegal/gucumber" +) + +func init() { + Before("@datapipeline", func() { + World["client"] = datapipeline.New(smoke.Session) + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/datapipeline/datapipeline.feature b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/datapipeline/datapipeline.feature new file mode 100644 index 000000000..db315184a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/datapipeline/datapipeline.feature @@ -0,0 +1,16 @@ +# language: en +@datapipeline @client +Feature: AWS Data Pipeline + + Scenario: Making a request + When I call the "ListPipelines" API + Then the response should contain a "pipelineIdList" + + Scenario: Handling errors + When I attempt to call the "GetPipelineDefinition" API with: + | pipelineId | fake-id | + Then I expect the response error code to be "PipelineNotFoundException" + And I expect the response error message to include: + """ + does not exist + """ diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/devicefarm/client.go b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/devicefarm/client.go new file mode 100644 index 000000000..c9ff570e1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/devicefarm/client.go @@ -0,0 +1,19 @@ +// +build integration + +//Package devicefarm provides gucumber integration tests support. +package devicefarm + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/awstesting/integration/smoke" + "github.com/aws/aws-sdk-go/service/devicefarm" + . "github.com/lsegal/gucumber" +) + +func init() { + Before("@devicefarm", func() { + // FIXME remove custom region + World["client"] = devicefarm.New(smoke.Session, + aws.NewConfig().WithRegion("us-west-2")) + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/devicefarm/devicefarm.feature b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/devicefarm/devicefarm.feature new file mode 100644 index 000000000..1d200a9d7 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/devicefarm/devicefarm.feature @@ -0,0 +1,16 @@ +# language: en +@devicefarm @client +Feature: AWS Device Farm + + Scenario: Making a request + When I call the "ListDevices" API + Then the value at "devices" should be a list + + Scenario: Handling errors + When I attempt to call the "GetDevice" API with: + | arn | arn:aws:devicefarm:us-west-2::device:000000000000000000000000fake-arn | + Then I expect the response error code to be "NotFoundException" + And I expect the response error message to include: + """ + No device was found for arn + """ diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/directconnect/client.go b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/directconnect/client.go new file mode 100644 index 000000000..a11d93739 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/directconnect/client.go @@ -0,0 +1,16 @@ +// +build integration + +//Package directconnect provides gucumber integration tests support. +package directconnect + +import ( + "github.com/aws/aws-sdk-go/awstesting/integration/smoke" + "github.com/aws/aws-sdk-go/service/directconnect" + . "github.com/lsegal/gucumber" +) + +func init() { + Before("@directconnect", func() { + World["client"] = directconnect.New(smoke.Session) + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/directconnect/directconnect.feature b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/directconnect/directconnect.feature new file mode 100644 index 000000000..3efd9c7dc --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/directconnect/directconnect.feature @@ -0,0 +1,16 @@ +# language: en +@directconnect @client +Feature: AWS Direct Connect + + Scenario: Making a request + When I call the "DescribeConnections" API + Then the value at "connections" should be a list + + Scenario: Handling errors + When I attempt to call the "DescribeConnections" API with: + | connectionId | fake-connection | + Then I expect the response error code to be "DirectConnectClientException" + And I expect the response error message to include: + """ + Connection ID fake-connection has an invalid format + """ diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/directoryservice/client.go b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/directoryservice/client.go new file mode 100644 index 000000000..aedcd281e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/directoryservice/client.go @@ -0,0 +1,16 @@ +// +build integration + +//Package directoryservice provides gucumber integration tests support. +package directoryservice + +import ( + "github.com/aws/aws-sdk-go/awstesting/integration/smoke" + "github.com/aws/aws-sdk-go/service/directoryservice" + . "github.com/lsegal/gucumber" +) + +func init() { + Before("@directoryservice", func() { + World["client"] = directoryservice.New(smoke.Session) + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/directoryservice/directoryservice.feature b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/directoryservice/directoryservice.feature new file mode 100644 index 000000000..315839b66 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/directoryservice/directoryservice.feature @@ -0,0 +1,17 @@ +# language: en +@directoryservice @ds @client +Feature: AWS Directory Service + + I want to use AWS Directory Service + + Scenario: Making a request + When I call the "DescribeDirectories" API + Then the value at "DirectoryDescriptions" should be a list + + Scenario: Handling errors + When I attempt to call the "CreateDirectory" API with: + | Name | | + | Password | | + | Size | | + Then I expect the response error code to be "ValidationException" + diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/dynamodb/client.go b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/dynamodb/client.go new file mode 100644 index 000000000..735d2f5de --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/dynamodb/client.go @@ -0,0 +1,16 @@ +// +build integration + +//Package dynamodb provides gucumber integration tests support. +package dynamodb + +import ( + "github.com/aws/aws-sdk-go/awstesting/integration/smoke" + "github.com/aws/aws-sdk-go/service/dynamodb" + . "github.com/lsegal/gucumber" +) + +func init() { + Before("@dynamodb", func() { + World["client"] = dynamodb.New(smoke.Session) + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/dynamodb/dynamodb.feature b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/dynamodb/dynamodb.feature new file mode 100644 index 000000000..1df6b3ccb --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/dynamodb/dynamodb.feature @@ -0,0 +1,19 @@ +# language: en +@dynamodb @client +Feature: Amazon DynamoDB + + Scenario: Making a request + When I call the "ListTables" API with JSON: + """ + {"Limit": 1} + """ + Then the value at "TableNames" should be a list + + Scenario: Handling errors + When I attempt to call the "DescribeTable" API with: + | TableName | fake-table | + Then I expect the response error code to be "ResourceNotFoundException" + And I expect the response error message to include: + """ + Requested resource not found: Table: fake-table not found + """ diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/dynamodbstreams/client.go b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/dynamodbstreams/client.go new file mode 100644 index 000000000..083a49456 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/dynamodbstreams/client.go @@ -0,0 +1,16 @@ +// +build integration + +//Package dynamodbstreams provides gucumber integration tests support. +package dynamodbstreams + +import ( + "github.com/aws/aws-sdk-go/awstesting/integration/smoke" + "github.com/aws/aws-sdk-go/service/dynamodbstreams" + . "github.com/lsegal/gucumber" +) + +func init() { + Before("@dynamodbstreams", func() { + World["client"] = dynamodbstreams.New(smoke.Session) + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/dynamodbstreams/dynamodbstreams.feature b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/dynamodbstreams/dynamodbstreams.feature new file mode 100644 index 000000000..6e35e29eb --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/dynamodbstreams/dynamodbstreams.feature @@ -0,0 +1,16 @@ +# language: en +@dynamodbstreams @client +Feature: Amazon DynamoDB Streams + + Scenario: Making a request + When I call the "ListStreams" API + Then the value at "Streams" should be a list + + Scenario: Handling errors + When I attempt to call the "DescribeStream" API with: + | StreamArn | fake-stream | + Then I expect the response error code to be "InvalidParameter" + And I expect the response error message to include: + """ + StreamArn + """ diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/ec2/client.go b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/ec2/client.go new file mode 100644 index 000000000..d83038b5f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/ec2/client.go @@ -0,0 +1,16 @@ +// +build integration + +//Package ec2 provides gucumber integration tests support. +package ec2 + +import ( + "github.com/aws/aws-sdk-go/awstesting/integration/smoke" + "github.com/aws/aws-sdk-go/service/ec2" + . "github.com/lsegal/gucumber" +) + +func init() { + Before("@ec2", func() { + World["client"] = ec2.New(smoke.Session) + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/ec2/ec2.feature b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/ec2/ec2.feature new file mode 100644 index 000000000..e238c2cd6 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/ec2/ec2.feature @@ -0,0 +1,18 @@ +# language: en +@ec2 @client +Feature: Amazon Elastic Compute Cloud + + Scenario: Making a request + When I call the "DescribeRegions" API + Then the value at "Regions" should be a list + + Scenario: Handling errors + When I attempt to call the "DescribeInstances" API with JSON: + """ + {"InstanceIds": ["i-12345678"]} + """ + Then I expect the response error code to be "InvalidInstanceID.NotFound" + And I expect the response error message to include: + """ + The instance ID 'i-12345678' does not exist + """ diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/ecs/client.go b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/ecs/client.go new file mode 100644 index 000000000..9251e9c75 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/ecs/client.go @@ -0,0 +1,19 @@ +// +build integration + +//Package ecs provides gucumber integration tests support. +package ecs + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/awstesting/integration/smoke" + "github.com/aws/aws-sdk-go/service/ecs" + . "github.com/lsegal/gucumber" +) + +func init() { + Before("@ecs", func() { + // FIXME remove custom region + World["client"] = ecs.New(smoke.Session, + aws.NewConfig().WithRegion("us-west-2")) + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/ecs/ecs.feature b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/ecs/ecs.feature new file mode 100644 index 000000000..694213785 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/ecs/ecs.feature @@ -0,0 +1,14 @@ +# language: en +@ecs @client +Feature: Amazon ECS + + I want to use Amazon ECS + + Scenario: Making a request + When I call the "ListClusters" API + Then the value at "clusterArns" should be a list + + Scenario: Handling errors + When I attempt to call the "StopTask" API with: + | task | xxxxxxxxxxx-xxxxxxxxxxxx-xxxxxxxxxxx | + Then the error code should be "ClusterNotFoundException" diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/efs/client.go b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/efs/client.go new file mode 100644 index 000000000..3d382528e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/efs/client.go @@ -0,0 +1,19 @@ +// +build integration + +//Package efs provides gucumber integration tests support. +package efs + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/awstesting/integration/smoke" + "github.com/aws/aws-sdk-go/service/efs" + . "github.com/lsegal/gucumber" +) + +func init() { + Before("@efs", func() { + // FIXME remove custom region + World["client"] = efs.New(smoke.Session, + aws.NewConfig().WithRegion("us-west-2")) + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/efs/efs.feature b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/efs/efs.feature new file mode 100644 index 000000000..113dd3501 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/efs/efs.feature @@ -0,0 +1,14 @@ +# language: en +@efs @elasticfilesystem @client +Feature: Amazon Elastic File System + + I want to use Amazon Elastic File System + + Scenario: Making a request + When I call the "DescribeFileSystems" API + Then the value at "FileSystems" should be a list + + Scenario: Handling errors + When I attempt to call the "DeleteFileSystem" API with: + | FileSystemId | fake-id | + Then the error code should be "BadRequest" diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/elasticache/client.go b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/elasticache/client.go new file mode 100644 index 000000000..a20580022 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/elasticache/client.go @@ -0,0 +1,16 @@ +// +build integration + +//Package elasticache provides gucumber integration tests support. +package elasticache + +import ( + "github.com/aws/aws-sdk-go/awstesting/integration/smoke" + "github.com/aws/aws-sdk-go/service/elasticache" + . "github.com/lsegal/gucumber" +) + +func init() { + Before("@elasticache", func() { + World["client"] = elasticache.New(smoke.Session) + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/elasticache/elasticache.feature b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/elasticache/elasticache.feature new file mode 100644 index 000000000..48828ca2d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/elasticache/elasticache.feature @@ -0,0 +1,16 @@ +# language: en +@elasticache @client +Feature: ElastiCache + + Scenario: Making a request + When I call the "DescribeEvents" API + Then the value at "Events" should be a list + + Scenario: Handling errors + When I attempt to call the "DescribeCacheClusters" API with: + | CacheClusterId | fake_cluster | + Then I expect the response error code to be "InvalidParameterValue" + And I expect the response error message to include: + """ + The parameter CacheClusterIdentifier is not a valid identifier. + """ diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/elasticbeanstalk/client.go b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/elasticbeanstalk/client.go new file mode 100644 index 000000000..ffe504ca6 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/elasticbeanstalk/client.go @@ -0,0 +1,16 @@ +// +build integration + +//Package elasticbeanstalk provides gucumber integration tests support. +package elasticbeanstalk + +import ( + "github.com/aws/aws-sdk-go/awstesting/integration/smoke" + "github.com/aws/aws-sdk-go/service/elasticbeanstalk" + . "github.com/lsegal/gucumber" +) + +func init() { + Before("@elasticbeanstalk", func() { + World["client"] = elasticbeanstalk.New(smoke.Session) + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/elasticbeanstalk/elasticbeanstalk.feature b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/elasticbeanstalk/elasticbeanstalk.feature new file mode 100644 index 000000000..35b1ad884 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/elasticbeanstalk/elasticbeanstalk.feature @@ -0,0 +1,16 @@ +# language: en +@elasticbeanstalk @client +Feature: AWS Elastic Beanstalk + + Scenario: Making a request + When I call the "ListAvailableSolutionStacks" API + Then the value at "SolutionStacks" should be a list + + Scenario: Handling errors + When I attempt to call the "DescribeEnvironmentResources" API with: + | EnvironmentId | fake_environment | + Then I expect the response error code to be "InvalidParameterValue" + And I expect the response error message to include: + """ + No Environment found for EnvironmentId = 'fake_environment'. + """ diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/elasticloadbalancing/client.go b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/elasticloadbalancing/client.go new file mode 100644 index 000000000..1dc4a430e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/elasticloadbalancing/client.go @@ -0,0 +1,16 @@ +// +build integration + +//Package elasticloadbalancing provides gucumber integration tests support. +package elasticloadbalancing + +import ( + "github.com/aws/aws-sdk-go/awstesting/integration/smoke" + "github.com/aws/aws-sdk-go/service/elb" + . "github.com/lsegal/gucumber" +) + +func init() { + Before("@elasticloadbalancing", func() { + World["client"] = elb.New(smoke.Session) + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/elasticloadbalancing/elasticloadbalancing.feature b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/elasticloadbalancing/elasticloadbalancing.feature new file mode 100644 index 000000000..a8c720908 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/elasticloadbalancing/elasticloadbalancing.feature @@ -0,0 +1,18 @@ +# language: en +@elasticloadbalancing @client +Feature: Elastic Load Balancing + + Scenario: Making a request + When I call the "DescribeLoadBalancers" API + Then the value at "LoadBalancerDescriptions" should be a list + + Scenario: Handling errors + When I attempt to call the "DescribeLoadBalancers" API with JSON: + """ + {"LoadBalancerNames": ["fake_load_balancer"]} + """ + Then I expect the response error code to be "ValidationError" + And I expect the response error message to include: + """ + LoadBalancer name cannot contain characters that are not letters, or digits or the dash. + """ diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/elastictranscoder/client.go b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/elastictranscoder/client.go new file mode 100644 index 000000000..085966d52 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/elastictranscoder/client.go @@ -0,0 +1,16 @@ +// +build integration + +//Package elastictranscoder provides gucumber integration tests support. +package elastictranscoder + +import ( + "github.com/aws/aws-sdk-go/awstesting/integration/smoke" + "github.com/aws/aws-sdk-go/service/elastictranscoder" + . "github.com/lsegal/gucumber" +) + +func init() { + Before("@elastictranscoder", func() { + World["client"] = elastictranscoder.New(smoke.Session) + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/elastictranscoder/elastictranscoder.feature b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/elastictranscoder/elastictranscoder.feature new file mode 100644 index 000000000..77658e668 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/elastictranscoder/elastictranscoder.feature @@ -0,0 +1,16 @@ +# language: en +@elastictranscoder @client +Feature: Amazon Elastic Transcoder + + Scenario: Making a request + When I call the "ListPresets" API + Then the value at "Presets" should be a list + + Scenario: Handling errors + When I attempt to call the "ReadJob" API with: + | Id | fake_job | + Then I expect the response error code to be "ValidationException" + And I expect the response error message to include: + """ + Value 'fake_job' at 'id' failed to satisfy constraint + """ diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/emr/client.go b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/emr/client.go new file mode 100644 index 000000000..35b7f97d8 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/emr/client.go @@ -0,0 +1,16 @@ +// +build integration + +//Package emr provides gucumber integration tests support. +package emr + +import ( + "github.com/aws/aws-sdk-go/awstesting/integration/smoke" + "github.com/aws/aws-sdk-go/service/emr" + . "github.com/lsegal/gucumber" +) + +func init() { + Before("@emr", func() { + World["client"] = emr.New(smoke.Session) + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/emr/emr.feature b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/emr/emr.feature new file mode 100644 index 000000000..133c17412 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/emr/emr.feature @@ -0,0 +1,16 @@ +# language: en +@emr @client @elasticmapreduce +Feature: Amazon EMR + + Scenario: Making a request + When I call the "ListClusters" API + Then the value at "Clusters" should be a list + + Scenario: Handling errors + When I attempt to call the "DescribeCluster" API with: + | ClusterId | fake_cluster | + Then I expect the response error code to be "InvalidRequestException" + And I expect the response error message to include: + """ + Cluster id 'fake_cluster' is not valid. + """ diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/es/client.go b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/es/client.go new file mode 100644 index 000000000..a4213a9d9 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/es/client.go @@ -0,0 +1,16 @@ +// +build integration + +//Package es provides gucumber integration tests support. +package es + +import ( + "github.com/aws/aws-sdk-go/awstesting/integration/smoke" + "github.com/aws/aws-sdk-go/service/elasticsearchservice" + . "github.com/lsegal/gucumber" +) + +func init() { + Before("@es", func() { + World["client"] = elasticsearchservice.New(smoke.Session) + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/es/es.feature b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/es/es.feature new file mode 100644 index 000000000..8bd1f1e49 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/es/es.feature @@ -0,0 +1,16 @@ +# language: en +@es @elasticsearchservice +Feature: Amazon ElasticsearchService + + Scenario: Making a request + When I call the "ListDomainNames" API + Then the value at "DomainNames" should be a list + + Scenario: Handling errors + When I attempt to call the "DescribeElasticsearchDomain" API with: + | DomainName | not-a-domain | + Then the error code should be "ResourceNotFoundException" + And I expect the response error message to include: + """ + Domain not found: not-a-domain + """ \ No newline at end of file diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/glacier/client.go b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/glacier/client.go new file mode 100644 index 000000000..a051f43c7 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/glacier/client.go @@ -0,0 +1,16 @@ +// +build integration + +//Package glacier provides gucumber integration tests support. +package glacier + +import ( + "github.com/aws/aws-sdk-go/awstesting/integration/smoke" + "github.com/aws/aws-sdk-go/service/glacier" + . "github.com/lsegal/gucumber" +) + +func init() { + Before("@glacier", func() { + World["client"] = glacier.New(smoke.Session) + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/glacier/glacier.feature b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/glacier/glacier.feature new file mode 100644 index 000000000..0e1a113a3 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/glacier/glacier.feature @@ -0,0 +1,16 @@ +# language: en +@glacier @client +Feature: Amazon Glacier + + Scenario: Making a request + When I call the "ListVaults" API + Then the response should contain a "VaultList" + + Scenario: Handling errors + When I attempt to call the "ListVaults" API with: + | accountId | abcmnoxyz | + Then I expect the response error code to be "UnrecognizedClientException" + And I expect the response error message to include: + """ + No account found for the given parameters + """ diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/iam/client.go b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/iam/client.go new file mode 100644 index 000000000..00dc855b0 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/iam/client.go @@ -0,0 +1,16 @@ +// +build integration + +//Package iam provides gucumber integration tests support. +package iam + +import ( + "github.com/aws/aws-sdk-go/awstesting/integration/smoke" + "github.com/aws/aws-sdk-go/service/iam" + . "github.com/lsegal/gucumber" +) + +func init() { + Before("@iam", func() { + World["client"] = iam.New(smoke.Session) + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/iam/iam.feature b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/iam/iam.feature new file mode 100644 index 000000000..0da6463ae --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/iam/iam.feature @@ -0,0 +1,16 @@ +# language: en +@iam @client +Feature: AWS Identity and Access Management + + Scenario: Making a request + When I call the "ListUsers" API + Then the value at "Users" should be a list + + Scenario: Handling errors + When I attempt to call the "GetUser" API with: + | UserName | fake_user | + Then I expect the response error code to be "NoSuchEntity" + And I expect the response error message to include: + """ + The user with name fake_user cannot be found. + """ diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/iotdataplane/client.go b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/iotdataplane/client.go new file mode 100644 index 000000000..86a2e9900 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/iotdataplane/client.go @@ -0,0 +1,26 @@ +// +build integration + +//Package iotdataplane provides gucumber integration tests support. +package iotdataplane + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/awstesting/integration/smoke" + "github.com/aws/aws-sdk-go/service/iot" + "github.com/aws/aws-sdk-go/service/iotdataplane" + . "github.com/lsegal/gucumber" +) + +func init() { + Before("@iotdataplane", func() { + svc := iot.New(smoke.Session) + result, err := svc.DescribeEndpoint(&iot.DescribeEndpointInput{}) + if err != nil { + World["error"] = err + return + } + + World["client"] = iotdataplane.New(smoke.Session, aws.NewConfig(). + WithEndpoint(*result.EndpointAddress)) + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/iotdataplane/iotdataplane.feature b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/iotdataplane/iotdataplane.feature new file mode 100644 index 000000000..a6ced14d7 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/iotdataplane/iotdataplane.feature @@ -0,0 +1,12 @@ +# language: en +@iotdataplane @client +Feature: AWS IoT Data Plane + + Scenario: Handling errors + When I attempt to call the "GetThingShadow" API with: + | ThingName | "fakeThing" | + Then I expect the response error code to be "InvalidRequestException" + And I expect the response error message to include: + """ + Invalid thing name + """ diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/kinesis/client.go b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/kinesis/client.go new file mode 100644 index 000000000..7e0c684f4 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/kinesis/client.go @@ -0,0 +1,16 @@ +// +build integration + +//Package kinesis provides gucumber integration tests support. +package kinesis + +import ( + "github.com/aws/aws-sdk-go/awstesting/integration/smoke" + "github.com/aws/aws-sdk-go/service/kinesis" + . "github.com/lsegal/gucumber" +) + +func init() { + Before("@kinesis", func() { + World["client"] = kinesis.New(smoke.Session) + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/kinesis/kinesis.feature b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/kinesis/kinesis.feature new file mode 100644 index 000000000..570505cd2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/kinesis/kinesis.feature @@ -0,0 +1,16 @@ +# language: en +@kinesis @client +Feature: AWS Kinesis + + Scenario: Making a request + When I call the "ListStreams" API + Then the value at "StreamNames" should be a list + + Scenario: Handling errors + When I attempt to call the "DescribeStream" API with: + | StreamName | bogus-stream-name | + Then I expect the response error code to be "ResourceNotFoundException" + And I expect the response error message to include: + """ + Stream bogus-stream-name under account + """ diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/kms/client.go b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/kms/client.go new file mode 100644 index 000000000..cf52a84ab --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/kms/client.go @@ -0,0 +1,16 @@ +// +build integration + +//Package kms provides gucumber integration tests support. +package kms + +import ( + "github.com/aws/aws-sdk-go/awstesting/integration/smoke" + "github.com/aws/aws-sdk-go/service/kms" + . "github.com/lsegal/gucumber" +) + +func init() { + Before("@kms", func() { + World["client"] = kms.New(smoke.Session) + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/kms/kms.feature b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/kms/kms.feature new file mode 100644 index 000000000..ee428abb8 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/kms/kms.feature @@ -0,0 +1,13 @@ +# language: en +@kms @client +Feature: Amazon Key Management Service + + Scenario: Making a request + When I call the "ListAliases" API + Then the value at "Aliases" should be a list + + Scenario: Handling errors + When I attempt to call the "GetKeyPolicy" API with: + | KeyId | fake-key | + | PolicyName | fakepolicy | + Then I expect the response error code to be "NotFoundException" diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/lambda/client.go b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/lambda/client.go new file mode 100644 index 000000000..7934b7ccf --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/lambda/client.go @@ -0,0 +1,16 @@ +// +build integration + +//Package lambda provides gucumber integration tests support. +package lambda + +import ( + "github.com/aws/aws-sdk-go/awstesting/integration/smoke" + "github.com/aws/aws-sdk-go/service/lambda" + . "github.com/lsegal/gucumber" +) + +func init() { + Before("@lambda", func() { + World["client"] = lambda.New(smoke.Session) + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/lambda/lambda.feature b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/lambda/lambda.feature new file mode 100644 index 000000000..6ff9cf4a3 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/lambda/lambda.feature @@ -0,0 +1,16 @@ +# language: en +@lambda @client +Feature: Amazon Lambda + + Scenario: Making a request + When I call the "ListFunctions" API + Then the value at "Functions" should be a list + + Scenario: Handling errors + When I attempt to call the "Invoke" API with: + | FunctionName | bogus-function | + Then I expect the response error code to be "ResourceNotFoundException" + And I expect the response error message to include: + """ + Function not found + """ diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/machinelearning/client.go b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/machinelearning/client.go new file mode 100644 index 000000000..fd3046400 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/machinelearning/client.go @@ -0,0 +1,16 @@ +// +build integration + +//Package machinelearning provides gucumber integration tests support. +package machinelearning + +import ( + "github.com/aws/aws-sdk-go/awstesting/integration/smoke" + "github.com/aws/aws-sdk-go/service/machinelearning" + . "github.com/lsegal/gucumber" +) + +func init() { + Before("@machinelearning", func() { + World["client"] = machinelearning.New(smoke.Session) + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/machinelearning/machinelearning.feature b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/machinelearning/machinelearning.feature new file mode 100644 index 000000000..2d9b0649a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/machinelearning/machinelearning.feature @@ -0,0 +1,18 @@ +# language: en +@machinelearning @client +Feature: Amazon Machine Learning + + I want to use Amazon Machine Learning + + Scenario: Making a request + When I call the "DescribeMLModels" API + Then the value at "Results" should be a list + + Scenario: Error handling + When I attempt to call the "GetBatchPrediction" API with: + | BatchPredictionId | fake-id | + Then the error code should be "ResourceNotFoundException" + And the error message should contain: + """ + No BatchPrediction with id fake-id exists + """ diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/opsworks/client.go b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/opsworks/client.go new file mode 100644 index 000000000..25279b04f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/opsworks/client.go @@ -0,0 +1,16 @@ +// +build integration + +//Package opsworks provides gucumber integration tests support. +package opsworks + +import ( + "github.com/aws/aws-sdk-go/awstesting/integration/smoke" + "github.com/aws/aws-sdk-go/service/opsworks" + . "github.com/lsegal/gucumber" +) + +func init() { + Before("@opsworks", func() { + World["client"] = opsworks.New(smoke.Session) + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/opsworks/opsworks.feature b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/opsworks/opsworks.feature new file mode 100644 index 000000000..a9cfe52b5 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/opsworks/opsworks.feature @@ -0,0 +1,16 @@ +# language: en +@opsworks @client +Feature: AWS OpsWorks + + Scenario: Making a request + When I call the "DescribeStacks" API + Then the value at "Stacks" should be a list + + Scenario: Handling errors + When I attempt to call the "DescribeLayers" API with: + | StackId | fake_stack | + Then I expect the response error code to be "ResourceNotFoundException" + And I expect the response error message to include: + """ + Unable to find stack with ID fake_stack + """ diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/rds/client.go b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/rds/client.go new file mode 100644 index 000000000..e3b3ad1cc --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/rds/client.go @@ -0,0 +1,16 @@ +// +build integration + +//Package rds provides gucumber integration tests support. +package rds + +import ( + "github.com/aws/aws-sdk-go/awstesting/integration/smoke" + "github.com/aws/aws-sdk-go/service/rds" + . "github.com/lsegal/gucumber" +) + +func init() { + Before("@rds", func() { + World["client"] = rds.New(smoke.Session) + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/rds/rds.feature b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/rds/rds.feature new file mode 100644 index 000000000..547d76db8 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/rds/rds.feature @@ -0,0 +1,16 @@ +# language: en +@rds @client +Feature: Amazon RDS + + Scenario: Making a request + When I call the "DescribeDBEngineVersions" API + Then the value at "DBEngineVersions" should be a list + + Scenario: Handling errors + When I attempt to call the "DescribeDBInstances" API with: + | DBInstanceIdentifier | fake-id | + Then I expect the response error code to be "DBInstanceNotFound" + And I expect the response error message to include: + """ + DBInstance fake-id not found. + """ diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/redshift/client.go b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/redshift/client.go new file mode 100644 index 000000000..b8f971410 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/redshift/client.go @@ -0,0 +1,16 @@ +// +build integration + +//Package redshift provides gucumber integration tests support. +package redshift + +import ( + "github.com/aws/aws-sdk-go/awstesting/integration/smoke" + "github.com/aws/aws-sdk-go/service/redshift" + . "github.com/lsegal/gucumber" +) + +func init() { + Before("@redshift", func() { + World["client"] = redshift.New(smoke.Session) + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/redshift/redshift.feature b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/redshift/redshift.feature new file mode 100644 index 000000000..8cb45b14c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/redshift/redshift.feature @@ -0,0 +1,16 @@ +# language: en +@redshift @client +Feature: Amazon Redshift + + Scenario: Making a request + When I call the "DescribeClusterVersions" API + Then the value at "ClusterVersions" should be a list + + Scenario: Handling errors + When I attempt to call the "DescribeClusters" API with: + | ClusterIdentifier | fake-cluster | + Then I expect the response error code to be "ClusterNotFound" + And I expect the response error message to include: + """ + Cluster fake-cluster not found. + """ diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/route53/client.go b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/route53/client.go new file mode 100644 index 000000000..3b9d153d6 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/route53/client.go @@ -0,0 +1,16 @@ +// +build integration + +//Package route53 provides gucumber integration tests support. +package route53 + +import ( + "github.com/aws/aws-sdk-go/awstesting/integration/smoke" + "github.com/aws/aws-sdk-go/service/route53" + . "github.com/lsegal/gucumber" +) + +func init() { + Before("@route53", func() { + World["client"] = route53.New(smoke.Session) + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/route53/route53.feature b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/route53/route53.feature new file mode 100644 index 000000000..51463c524 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/route53/route53.feature @@ -0,0 +1,16 @@ +# language: en +@route53 @client +Feature: Amazon Route 53 + + Scenario: Making a request + When I call the "ListHostedZones" API + Then the value at "HostedZones" should be a list + + Scenario: Handling errors + When I attempt to call the "GetHostedZone" API with: + | Id | fake-zone | + Then I expect the response error code to be "NoSuchHostedZone" + And I expect the response error message to include: + """ + No hosted zone found with ID: fake-zone + """ diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/route53domains/client.go b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/route53domains/client.go new file mode 100644 index 000000000..8f79136dc --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/route53domains/client.go @@ -0,0 +1,16 @@ +// +build integration + +//Package route53domains provides gucumber integration tests support. +package route53domains + +import ( + "github.com/aws/aws-sdk-go/awstesting/integration/smoke" + "github.com/aws/aws-sdk-go/service/route53domains" + . "github.com/lsegal/gucumber" +) + +func init() { + Before("@route53domains", func() { + World["client"] = route53domains.New(smoke.Session) + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/route53domains/route53domains.feature b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/route53domains/route53domains.feature new file mode 100644 index 000000000..f18dcc4e1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/route53domains/route53domains.feature @@ -0,0 +1,16 @@ +# language: en +@route53domains @client +Feature: Amazon Route53 Domains + + Scenario: Making a request + When I call the "ListDomains" API + Then the value at "Domains" should be a list + + Scenario: Handling errors + When I attempt to call the "GetDomainDetail" API with: + | DomainName | fake-domain-name | + Then I expect the response error code to be "InvalidInput" + And I expect the response error message to include: + """ + domain name must contain more than 1 label + """ diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/ses/client.go b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/ses/client.go new file mode 100644 index 000000000..905c72518 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/ses/client.go @@ -0,0 +1,16 @@ +// +build integration + +//Package ses provides gucumber integration tests support. +package ses + +import ( + "github.com/aws/aws-sdk-go/awstesting/integration/smoke" + "github.com/aws/aws-sdk-go/service/ses" + . "github.com/lsegal/gucumber" +) + +func init() { + Before("@ses", func() { + World["client"] = ses.New(smoke.Session) + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/ses/ses.feature b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/ses/ses.feature new file mode 100644 index 000000000..6b67fa7f3 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/ses/ses.feature @@ -0,0 +1,16 @@ +# language: en +@ses @email @client +Feature: Amazon Simple Email Service + + Scenario: Making a request + When I call the "ListIdentities" API + Then the value at "Identities" should be a list + + Scenario: Handling errors + When I attempt to call the "VerifyEmailIdentity" API with: + | EmailAddress | fake_email | + Then I expect the response error code to be "InvalidParameterValue" + And I expect the response error message to include: + """ + Invalid email address. + """ diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/shared.go b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/shared.go new file mode 100644 index 000000000..317780fed --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/shared.go @@ -0,0 +1,230 @@ +// +build integration + +// Package smoke contains shared step definitions that are used across integration tests +package smoke + +import ( + "encoding/json" + "fmt" + "os" + "reflect" + "regexp" + "strconv" + "strings" + + . "github.com/lsegal/gucumber" + "github.com/stretchr/testify/assert" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/session" +) + +// Session is a shared session for all integration smoke tests to use. +var Session = session.New() + +func init() { + logLevel := Session.Config.LogLevel + if os.Getenv("DEBUG") != "" { + logLevel = aws.LogLevel(aws.LogDebug) + } + if os.Getenv("DEBUG_SIGNING") != "" { + logLevel = aws.LogLevel(aws.LogDebugWithSigning) + } + if os.Getenv("DEBUG_BODY") != "" { + logLevel = aws.LogLevel(aws.LogDebugWithHTTPBody) + } + Session.Config.LogLevel = logLevel + + When(`^I call the "(.+?)" API$`, func(op string) { + call(op, nil, false) + }) + + When(`^I call the "(.+?)" API with:$`, func(op string, args [][]string) { + call(op, args, false) + }) + + Then(`^the value at "(.+?)" should be a list$`, func(member string) { + vals, _ := awsutil.ValuesAtPath(World["response"], member) + assert.NotNil(T, vals) + }) + + Then(`^the response should contain a "(.+?)"$`, func(member string) { + vals, _ := awsutil.ValuesAtPath(World["response"], member) + assert.NotEmpty(T, vals) + }) + + When(`^I attempt to call the "(.+?)" API with:$`, func(op string, args [][]string) { + call(op, args, true) + }) + + Then(`^I expect the response error code to be "(.+?)"$`, func(code string) { + err, ok := World["error"].(awserr.Error) + assert.True(T, ok, "no error returned") + if ok { + assert.Equal(T, code, err.Code(), "Error: %v", err) + } + }) + + And(`^I expect the response error message to include:$`, func(data string) { + err, ok := World["error"].(awserr.Error) + assert.True(T, ok, "no error returned") + if ok { + assert.Contains(T, err.Error(), data) + } + }) + + And(`^I expect the response error message to include one of:$`, func(table [][]string) { + err, ok := World["error"].(awserr.Error) + assert.True(T, ok, "no error returned") + if ok { + found := false + for _, row := range table { + if strings.Contains(err.Error(), row[0]) { + found = true + break + } + } + + assert.True(T, found, fmt.Sprintf("no error messages matched: \"%s\"", err.Error())) + } + }) + + And(`^I expect the response error message not be empty$`, func() { + err, ok := World["error"].(awserr.Error) + assert.True(T, ok, "no error returned") + assert.NotEmpty(T, err.Message()) + }) + + When(`^I call the "(.+?)" API with JSON:$`, func(s1 string, data string) { + callWithJSON(s1, data, false) + }) + + When(`^I attempt to call the "(.+?)" API with JSON:$`, func(s1 string, data string) { + callWithJSON(s1, data, true) + }) + + Then(`^the error code should be "(.+?)"$`, func(s1 string) { + err, ok := World["error"].(awserr.Error) + assert.True(T, ok, "no error returned") + assert.Equal(T, s1, err.Code()) + }) + + And(`^the error message should contain:$`, func(data string) { + err, ok := World["error"].(awserr.Error) + assert.True(T, ok, "no error returned") + assert.Contains(T, err.Error(), data) + }) + + Then(`^the request should fail$`, func() { + err, ok := World["error"].(awserr.Error) + assert.True(T, ok, "no error returned") + assert.Error(T, err) + }) + + Then(`^the request should be successful$`, func() { + err, ok := World["error"].(awserr.Error) + assert.False(T, ok, "error returned") + assert.NoError(T, err) + }) +} + +// findMethod finds the op operation on the v structure using a case-insensitive +// lookup. Returns nil if no method is found. +func findMethod(v reflect.Value, op string) *reflect.Value { + t := v.Type() + op = strings.ToLower(op) + for i := 0; i < t.NumMethod(); i++ { + name := t.Method(i).Name + if strings.ToLower(name) == op { + m := v.MethodByName(name) + return &m + } + } + return nil +} + +// call calls an operation on World["client"] by the name op using the args +// table of arguments to set. +func call(op string, args [][]string, allowError bool) { + v := reflect.ValueOf(World["client"]) + if m := findMethod(v, op); m != nil { + t := m.Type() + in := reflect.New(t.In(0).Elem()) + fillArgs(in, args) + + resps := m.Call([]reflect.Value{in}) + World["response"] = resps[0].Interface() + World["error"] = resps[1].Interface() + + if !allowError { + err, _ := World["error"].(error) + assert.NoError(T, err) + } + } else { + assert.Fail(T, "failed to find operation "+op) + } +} + +// reIsNum is a regular expression matching a numeric input (integer) +var reIsNum = regexp.MustCompile(`^\d+$`) + +// reIsArray is a regular expression matching a list +var reIsArray = regexp.MustCompile(`^\['.*?'\]$`) +var reArrayElem = regexp.MustCompile(`'(.+?)'`) + +// fillArgs fills arguments on the input structure using the args table of +// arguments. +func fillArgs(in reflect.Value, args [][]string) { + if args == nil { + return + } + + for _, row := range args { + path := row[0] + var val interface{} = row[1] + if reIsArray.MatchString(row[1]) { + quotedStrs := reArrayElem.FindAllString(row[1], -1) + strs := make([]*string, len(quotedStrs)) + for i, e := range quotedStrs { + str := e[1 : len(e)-1] + strs[i] = &str + } + val = strs + } else if reIsNum.MatchString(row[1]) { // handle integer values + num, err := strconv.ParseInt(row[1], 10, 64) + if err == nil { + val = num + } + } + awsutil.SetValueAtPath(in.Interface(), path, val) + } +} + +func callWithJSON(op, j string, allowError bool) { + v := reflect.ValueOf(World["client"]) + if m := findMethod(v, op); m != nil { + t := m.Type() + in := reflect.New(t.In(0).Elem()) + fillJSON(in, j) + + resps := m.Call([]reflect.Value{in}) + World["response"] = resps[0].Interface() + World["error"] = resps[1].Interface() + + if !allowError { + err, _ := World["error"].(error) + assert.NoError(T, err) + } + } else { + assert.Fail(T, "failed to find operation "+op) + } +} + +func fillJSON(in reflect.Value, j string) { + d := json.NewDecoder(strings.NewReader(j)) + if err := d.Decode(in.Interface()); err != nil { + panic(err) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/simpledb/client.go b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/simpledb/client.go new file mode 100644 index 000000000..7e3b5e0b3 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/simpledb/client.go @@ -0,0 +1,16 @@ +// +build integration + +//Package simpledb provides gucumber integration tests support. +package simpledb + +import ( + "github.com/aws/aws-sdk-go/awstesting/integration/smoke" + "github.com/aws/aws-sdk-go/service/simpledb" + . "github.com/lsegal/gucumber" +) + +func init() { + Before("@simpledb", func() { + World["client"] = simpledb.New(smoke.Session) + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/simpledb/simpledb.feature b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/simpledb/simpledb.feature new file mode 100644 index 000000000..ddc03d831 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/simpledb/simpledb.feature @@ -0,0 +1,24 @@ +# language: en +@simpledb @sdb +Feature: Amazon SimpleDB + + I want to use Amazon SimpleDB + + Scenario: Making a request + When I call the "CreateDomain" API with: + | DomainName | sample-domain | + Then the request should be successful + And I call the "ListDomains" API + Then the value at "DomainNames" should be a list + And I call the "DeleteDomain" API with: + | DomainName | sample-domain | + Then the request should be successful + + Scenario: Handling errors + When I attempt to call the "CreateDomain" API with: + | DomainName | | + Then I expect the response error code to be "InvalidParameterValue" + And I expect the response error message to include: + """ + DomainName is invalid + """ diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/sns/client.go b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/sns/client.go new file mode 100644 index 000000000..70483cb83 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/sns/client.go @@ -0,0 +1,16 @@ +// +build integration + +//Package sns provides gucumber integration tests support. +package sns + +import ( + "github.com/aws/aws-sdk-go/awstesting/integration/smoke" + "github.com/aws/aws-sdk-go/service/sns" + . "github.com/lsegal/gucumber" +) + +func init() { + Before("@sns", func() { + World["client"] = sns.New(smoke.Session) + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/sns/sns.feature b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/sns/sns.feature new file mode 100644 index 000000000..76f6a16da --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/sns/sns.feature @@ -0,0 +1,14 @@ +# language: en +@sns @client +Feature: Amazon Simple Notification Service + + Scenario: Making a request + When I call the "ListTopics" API + Then the value at "Topics" should be a list + + Scenario: Handling errors + When I attempt to call the "Publish" API with: + | Message | hello | + | TopicArn | fake_topic | + Then I expect the response error code to be "InvalidParameter" + And I expect the response error message not be empty diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/sqs/client.go b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/sqs/client.go new file mode 100644 index 000000000..c364ef690 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/sqs/client.go @@ -0,0 +1,16 @@ +// +build integration + +//Package sqs provides gucumber integration tests support. +package sqs + +import ( + "github.com/aws/aws-sdk-go/awstesting/integration/smoke" + "github.com/aws/aws-sdk-go/service/sqs" + . "github.com/lsegal/gucumber" +) + +func init() { + Before("@sqs", func() { + World["client"] = sqs.New(smoke.Session) + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/sqs/sqs.feature b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/sqs/sqs.feature new file mode 100644 index 000000000..1413820c0 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/sqs/sqs.feature @@ -0,0 +1,16 @@ +# language: en +@sqs @client +Feature: Amazon Simple Queue Service + + Scenario: Making a request + When I call the "ListQueues" API + Then the value at "QueueUrls" should be a list + + Scenario: Handling errors + When I attempt to call the "GetQueueUrl" API with: + | QueueName | fake_queue | + Then I expect the response error code to be "AWS.SimpleQueueService.NonExistentQueue" + And I expect the response error message to include: + """ + The specified queue does not exist for this wsdl version. + """ diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/ssm/client.go b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/ssm/client.go new file mode 100644 index 000000000..20a8d4533 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/ssm/client.go @@ -0,0 +1,16 @@ +// +build integration + +//Package ssm provides gucumber integration tests support. +package ssm + +import ( + "github.com/aws/aws-sdk-go/awstesting/integration/smoke" + "github.com/aws/aws-sdk-go/service/ssm" + . "github.com/lsegal/gucumber" +) + +func init() { + Before("@ssm", func() { + World["client"] = ssm.New(smoke.Session) + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/ssm/ssm.feature b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/ssm/ssm.feature new file mode 100644 index 000000000..3e2230ed0 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/ssm/ssm.feature @@ -0,0 +1,16 @@ +# language: en +@ssm @client +Feature: Amazon SSM + + Scenario: Making a request + When I call the "ListDocuments" API + Then the value at "DocumentIdentifiers" should be a list + + Scenario: Handling errors + When I attempt to call the "GetDocument" API with: + | Name | 'fake-name' | + Then I expect the response error code to be "ValidationException" + And I expect the response error message to include: + """ + validation error detected + """ diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/storagegateway/client.go b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/storagegateway/client.go new file mode 100644 index 000000000..2c5579eb0 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/storagegateway/client.go @@ -0,0 +1,16 @@ +// +build integration + +//Package storagegateway provides gucumber integration tests support. +package storagegateway + +import ( + "github.com/aws/aws-sdk-go/awstesting/integration/smoke" + "github.com/aws/aws-sdk-go/service/storagegateway" + . "github.com/lsegal/gucumber" +) + +func init() { + Before("@storagegateway", func() { + World["client"] = storagegateway.New(smoke.Session) + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/storagegateway/storagegateway.feature b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/storagegateway/storagegateway.feature new file mode 100644 index 000000000..ef96eed98 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/storagegateway/storagegateway.feature @@ -0,0 +1,16 @@ +# language: en +@storagegateway @client +Feature: AWS Storage Gateway + + Scenario: Making a request + When I call the "ListGateways" API + Then the value at "Gateways" should be a list + + Scenario: Handling errors + When I attempt to call the "ListVolumes" API with: + | GatewayARN | fake_gateway | + Then I expect the response error code to be "InvalidParameter" + And I expect the response error message to include: + """ + GatewayARN + """ diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/sts/client.go b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/sts/client.go new file mode 100644 index 000000000..27584fb31 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/sts/client.go @@ -0,0 +1,16 @@ +// +build integration + +//Package sts provides gucumber integration tests support. +package sts + +import ( + "github.com/aws/aws-sdk-go/awstesting/integration/smoke" + "github.com/aws/aws-sdk-go/service/sts" + . "github.com/lsegal/gucumber" +) + +func init() { + Before("@sts", func() { + World["client"] = sts.New(smoke.Session) + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/sts/sts.feature b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/sts/sts.feature new file mode 100644 index 000000000..9caf1fa02 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/sts/sts.feature @@ -0,0 +1,17 @@ +# language: en +@sts @client +Feature: AWS STS + + Scenario: Making a request + When I call the "GetSessionToken" API + Then the response should contain a "Credentials" + + Scenario: Handling errors + When I attempt to call the "GetFederationToken" API with: + | Name | temp | + | Policy | | + Then I expect the response error code to be "InvalidParameter" + And I expect the response error message to include: + """ + Policy + """ diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/support/client.go b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/support/client.go new file mode 100644 index 000000000..a1e3e2295 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/support/client.go @@ -0,0 +1,16 @@ +// +build integration + +//Package support provides gucumber integration tests support. +package support + +import ( + "github.com/aws/aws-sdk-go/awstesting/integration/smoke" + "github.com/aws/aws-sdk-go/service/support" + . "github.com/lsegal/gucumber" +) + +func init() { + Before("@support", func() { + World["client"] = support.New(smoke.Session) + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/support/support.feature b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/support/support.feature new file mode 100644 index 000000000..2f91ff896 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/support/support.feature @@ -0,0 +1,22 @@ +# language: en +@support @client +Feature: AWS Support + + I want to use AWS Support + + Scenario: Making a request + When I call the "DescribeServices" API + Then the value at "services" should be a list + + Scenario: Handling errors + When I attempt to call the "CreateCase" API with: + | subject | subject | + | communicationBody | communication | + | categoryCode | category | + | serviceCode | amazon-dynamodb | + | severityCode | low | + Then I expect the response error code to be "InvalidParameterValueException" + And the error message should contain: + """ + Invalid category code + """ diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/swf/client.go b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/swf/client.go new file mode 100644 index 000000000..29e9dbcfd --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/swf/client.go @@ -0,0 +1,16 @@ +// +build integration + +//Package swf provides gucumber integration tests support. +package swf + +import ( + "github.com/aws/aws-sdk-go/awstesting/integration/smoke" + "github.com/aws/aws-sdk-go/service/swf" + . "github.com/lsegal/gucumber" +) + +func init() { + Before("@swf", func() { + World["client"] = swf.New(smoke.Session) + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/swf/swf.feature b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/swf/swf.feature new file mode 100644 index 000000000..1349c8133 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/swf/swf.feature @@ -0,0 +1,17 @@ +# language: en +@swf @client +Feature: Amazon Simple Workflow Service + + Scenario: Making a request + When I call the "ListDomains" API with: + | registrationStatus | REGISTERED | + Then the value at "domainInfos" should be a list + + Scenario: Handling errors + When I attempt to call the "DescribeDomain" API with: + | name | fake_domain | + Then I expect the response error code to be "UnknownResourceFault" + And I expect the response error message to include: + """ + Unknown domain: fake_domain + """ diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/waf/client.go b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/waf/client.go new file mode 100644 index 000000000..3218b4915 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/waf/client.go @@ -0,0 +1,16 @@ +// +build integration + +//Package waf provides gucumber integration tests support. +package waf + +import ( + "github.com/aws/aws-sdk-go/awstesting/integration/smoke" + "github.com/aws/aws-sdk-go/service/waf" + . "github.com/lsegal/gucumber" +) + +func init() { + Before("@waf", func() { + World["client"] = waf.New(smoke.Session) + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/waf/waf.feature b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/waf/waf.feature new file mode 100644 index 000000000..bf76fb661 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/waf/waf.feature @@ -0,0 +1,20 @@ +# language: en +@waf +Feature: AWS WAF + + Scenario: Making a request + When I call the "ListRules" API with JSON: + """ + {"Limit":20} + """ + Then the value at "Rules" should be a list + + Scenario: Handling errors + When I attempt to call the "CreateSqlInjectionMatchSet" API with: + | Name | fake_name | + | ChangeToken | fake_token | + Then I expect the response error code to be "WAFStaleDataException" + And I expect the response error message to include: + """ + The input token is no longer current + """ diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/workspaces/client.go b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/workspaces/client.go new file mode 100644 index 000000000..7e471feac --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/workspaces/client.go @@ -0,0 +1,16 @@ +// +build integration + +//Package workspaces provides gucumber integration tests support. +package workspaces + +import ( + "github.com/aws/aws-sdk-go/awstesting/integration/smoke" + "github.com/aws/aws-sdk-go/service/workspaces" + . "github.com/lsegal/gucumber" +) + +func init() { + Before("@workspaces", func() { + World["client"] = workspaces.New(smoke.Session) + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/workspaces/workspaces.feature b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/workspaces/workspaces.feature new file mode 100644 index 000000000..09ca88491 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/workspaces/workspaces.feature @@ -0,0 +1,18 @@ +# language: en +@workspaces @client +Feature: Amazon WorkSpaces + + I want to use Amazon WorkSpaces + + Scenario: Making a request + When I call the "DescribeWorkspaces" API + Then the value at "Workspaces" should be a list + + Scenario: Handling errors + When I attempt to call the "DescribeWorkspaces" API with: + | DirectoryId | fake-id | + Then I expect the response error code to be "ValidationException" + And I expect the response error message to include: + """ + The Directory ID fake-id in the request is invalid. + """ diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/mock/server.go b/vendor/github.com/aws/aws-sdk-go/awstesting/mock/server.go new file mode 100644 index 000000000..026f7beb2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/awstesting/mock/server.go @@ -0,0 +1,20 @@ +package mock + +import ( + "net/http" + "net/http/httptest" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" +) + +// Session is a mock session which is used to hit the mock server +var Session = session.New(&aws.Config{ + DisableSSL: aws.Bool(true), + Endpoint: aws.String(server.URL[7:]), +}) + +// server is the mock server that simply writes a 200 status back to the client +var server = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) +})) diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/performance/benchmarks.go b/vendor/github.com/aws/aws-sdk-go/awstesting/performance/benchmarks.go new file mode 100644 index 000000000..5d49202f4 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/awstesting/performance/benchmarks.go @@ -0,0 +1,122 @@ +// +build integration + +package performance + +import ( + "errors" + "fmt" + "os" + "reflect" + "runtime" + "strings" + "testing" + + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/awstesting/mock" + "github.com/lsegal/gucumber" +) + +// mapCreateClients allows for the creation of clients +func mapCreateClients() { + clientFns := []func(){} + for _, c := range clients { + clientFns = append(clientFns, func() { c.Call([]reflect.Value{reflect.ValueOf(mock.Session)}) }) + } + + gucumber.World["services"] = clientFns +} + +func buildAnArrayOfClients() { + methods := []reflect.Value{} + params := [][]reflect.Value{} + + for _, c := range clients { + method, param, err := findAndGetMethod(c.Call([]reflect.Value{reflect.ValueOf(mock.Session)})) + if err == nil { + methods = append(methods, method) + params = append(params, param) + } + } + + fns := []func(){} + for i := 0; i < len(methods); i++ { + m := methods[i] + p := params[i] + f := func() { + reqs := m.Call(p) + resp := reqs[0].Interface().(*request.Request).Send() + fmt.Println(resp) + } + fns = append(fns, f) + } + gucumber.World["clientFns"] = fns +} + +// findAndGetMethod will grab the method, params to be passed to the method, and an error. +// The method that is found, is a method that doesn't have any required input +func findAndGetMethod(client interface{}) (reflect.Value, []reflect.Value, error) { + v := reflect.ValueOf(client).Type() + n := v.NumMethod() + +outer: + for i := 0; i < n; i++ { + method := v.Method(i) + if method.Type.NumIn() != 2 || strings.HasSuffix(method.Name, "Request") { + continue + } + param := reflect.New(method.Type.In(1).Elem()) + for j := 0; j < param.Elem().NumField(); j++ { + field := param.Elem().Type().Field(j) + req := field.Tag.Get("required") + + if req == "true" { + continue outer + } + } + + params := []reflect.Value{reflect.ValueOf(client), param} + return method.Func, params, nil + } + + return reflect.Value{}, nil, errors.New("No method found") +} + +// benchmarkTask takes a unique key to write to the logger with the benchmark +// result's data +func benchmarkTask(key string, fns []func(), i1 int) error { + gucumber.World["error"] = nil + memStatStart := &runtime.MemStats{} + runtime.ReadMemStats(memStatStart) + + results := testing.Benchmark(func(b *testing.B) { + for _, f := range fns { + for i := 0; i < i1; i++ { + f() + } + } + }) + + results.N = i1 + memStatEnd := &runtime.MemStats{} + runtime.ReadMemStats(memStatEnd) + l, err := newBenchmarkLogger("stdout") + if err != nil { + return err + } + l.log(key, results) + + toDynamodb := os.Getenv("AWS_TESTING_LOG_RESULTS") == "true" + if toDynamodb { + l, err := newBenchmarkLogger("dynamodb") + if err != nil { + return err + } + l.log(key+"_start_benchmarks", memStatStart) + l.log(key+"_end_benchmarks", memStatEnd) + } + + if memStatStart.Alloc < memStatEnd.Alloc { + return errors.New("Leaked memory") + } + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/performance/client.go b/vendor/github.com/aws/aws-sdk-go/awstesting/performance/client.go new file mode 100644 index 000000000..e63af3816 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/awstesting/performance/client.go @@ -0,0 +1,13 @@ +// +build integration + +//Package performance provides gucumber integration tests support. +package performance + +import ( + "github.com/lsegal/gucumber" +) + +func init() { + gucumber.Before("@performance", func() { + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/performance/clients.feature b/vendor/github.com/aws/aws-sdk-go/awstesting/performance/clients.feature new file mode 100644 index 000000000..c248329e0 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/awstesting/performance/clients.feature @@ -0,0 +1,17 @@ +# language: en +@performance @clients +Feature: Client Performance + Background: + Given I have loaded my SDK and its dependencies + And I have a list of services + And I take a snapshot of my resources + + Scenario: Creating and then cleaning up clients doesn't leak resources + When I create and discard 100 clients for each service + Then I should not have leaked any resources + + Scenario: Sending requests doesn't leak resources + When I create a client for each service + And I execute 100 command(s) on each client + And I destroy all the clients + Then I should not have leaked any resources diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/performance/clients.go b/vendor/github.com/aws/aws-sdk-go/awstesting/performance/clients.go new file mode 100644 index 000000000..6baa4444b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/awstesting/performance/clients.go @@ -0,0 +1,137 @@ +// +build integration + +package performance + +import ( + "reflect" + + "github.com/aws/aws-sdk-go/service/acm" + "github.com/aws/aws-sdk-go/service/apigateway" + "github.com/aws/aws-sdk-go/service/autoscaling" + "github.com/aws/aws-sdk-go/service/cloudformation" + "github.com/aws/aws-sdk-go/service/cloudfront" + "github.com/aws/aws-sdk-go/service/cloudhsm" + "github.com/aws/aws-sdk-go/service/cloudsearch" + "github.com/aws/aws-sdk-go/service/cloudsearchdomain" + "github.com/aws/aws-sdk-go/service/cloudtrail" + "github.com/aws/aws-sdk-go/service/cloudwatch" + "github.com/aws/aws-sdk-go/service/cloudwatchevents" + "github.com/aws/aws-sdk-go/service/cloudwatchlogs" + "github.com/aws/aws-sdk-go/service/codecommit" + "github.com/aws/aws-sdk-go/service/codedeploy" + "github.com/aws/aws-sdk-go/service/codepipeline" + "github.com/aws/aws-sdk-go/service/cognitoidentity" + "github.com/aws/aws-sdk-go/service/cognitosync" + "github.com/aws/aws-sdk-go/service/configservice" + "github.com/aws/aws-sdk-go/service/datapipeline" + "github.com/aws/aws-sdk-go/service/devicefarm" + "github.com/aws/aws-sdk-go/service/directconnect" + "github.com/aws/aws-sdk-go/service/directoryservice" + "github.com/aws/aws-sdk-go/service/dynamodb" + "github.com/aws/aws-sdk-go/service/dynamodbstreams" + "github.com/aws/aws-sdk-go/service/ec2" + "github.com/aws/aws-sdk-go/service/ecr" + "github.com/aws/aws-sdk-go/service/ecs" + "github.com/aws/aws-sdk-go/service/efs" + "github.com/aws/aws-sdk-go/service/elasticache" + "github.com/aws/aws-sdk-go/service/elasticbeanstalk" + "github.com/aws/aws-sdk-go/service/elasticsearchservice" + "github.com/aws/aws-sdk-go/service/elastictranscoder" + "github.com/aws/aws-sdk-go/service/elb" + "github.com/aws/aws-sdk-go/service/emr" + "github.com/aws/aws-sdk-go/service/firehose" + "github.com/aws/aws-sdk-go/service/glacier" + "github.com/aws/aws-sdk-go/service/iam" + "github.com/aws/aws-sdk-go/service/inspector" + "github.com/aws/aws-sdk-go/service/iot" + "github.com/aws/aws-sdk-go/service/iotdataplane" + "github.com/aws/aws-sdk-go/service/kinesis" + "github.com/aws/aws-sdk-go/service/kms" + "github.com/aws/aws-sdk-go/service/lambda" + "github.com/aws/aws-sdk-go/service/machinelearning" + "github.com/aws/aws-sdk-go/service/marketplacecommerceanalytics" + "github.com/aws/aws-sdk-go/service/mobileanalytics" + "github.com/aws/aws-sdk-go/service/opsworks" + "github.com/aws/aws-sdk-go/service/rds" + "github.com/aws/aws-sdk-go/service/redshift" + "github.com/aws/aws-sdk-go/service/route53" + "github.com/aws/aws-sdk-go/service/route53domains" + "github.com/aws/aws-sdk-go/service/s3" + "github.com/aws/aws-sdk-go/service/ses" + "github.com/aws/aws-sdk-go/service/simpledb" + "github.com/aws/aws-sdk-go/service/sns" + "github.com/aws/aws-sdk-go/service/sqs" + "github.com/aws/aws-sdk-go/service/ssm" + "github.com/aws/aws-sdk-go/service/storagegateway" + "github.com/aws/aws-sdk-go/service/sts" + "github.com/aws/aws-sdk-go/service/support" + "github.com/aws/aws-sdk-go/service/swf" + "github.com/aws/aws-sdk-go/service/waf" + "github.com/aws/aws-sdk-go/service/workspaces" +) + +var clients = []reflect.Value{ + reflect.ValueOf(acm.New), + reflect.ValueOf(apigateway.New), + reflect.ValueOf(autoscaling.New), + reflect.ValueOf(cloudformation.New), + reflect.ValueOf(cloudfront.New), + reflect.ValueOf(cloudhsm.New), + reflect.ValueOf(cloudsearch.New), + reflect.ValueOf(cloudsearchdomain.New), + reflect.ValueOf(cloudtrail.New), + reflect.ValueOf(cloudwatch.New), + reflect.ValueOf(cloudwatchevents.New), + reflect.ValueOf(cloudwatchlogs.New), + reflect.ValueOf(codecommit.New), + reflect.ValueOf(codedeploy.New), + reflect.ValueOf(codepipeline.New), + reflect.ValueOf(cognitoidentity.New), + reflect.ValueOf(cognitosync.New), + reflect.ValueOf(configservice.New), + reflect.ValueOf(datapipeline.New), + reflect.ValueOf(devicefarm.New), + reflect.ValueOf(directconnect.New), + reflect.ValueOf(directoryservice.New), + reflect.ValueOf(dynamodb.New), + reflect.ValueOf(dynamodbstreams.New), + reflect.ValueOf(ec2.New), + reflect.ValueOf(ecr.New), + reflect.ValueOf(ecs.New), + reflect.ValueOf(efs.New), + reflect.ValueOf(elasticache.New), + reflect.ValueOf(elasticbeanstalk.New), + reflect.ValueOf(elasticsearchservice.New), + reflect.ValueOf(elastictranscoder.New), + reflect.ValueOf(elb.New), + reflect.ValueOf(emr.New), + reflect.ValueOf(firehose.New), + reflect.ValueOf(glacier.New), + reflect.ValueOf(iam.New), + reflect.ValueOf(inspector.New), + reflect.ValueOf(iot.New), + reflect.ValueOf(iotdataplane.New), + reflect.ValueOf(kinesis.New), + reflect.ValueOf(kms.New), + reflect.ValueOf(lambda.New), + reflect.ValueOf(machinelearning.New), + reflect.ValueOf(marketplacecommerceanalytics.New), + reflect.ValueOf(mobileanalytics.New), + reflect.ValueOf(opsworks.New), + reflect.ValueOf(rds.New), + reflect.ValueOf(redshift.New), + reflect.ValueOf(route53.New), + reflect.ValueOf(route53domains.New), + reflect.ValueOf(s3.New), + reflect.ValueOf(ses.New), + reflect.ValueOf(simpledb.New), + reflect.ValueOf(sns.New), + reflect.ValueOf(sqs.New), + reflect.ValueOf(ssm.New), + reflect.ValueOf(storagegateway.New), + reflect.ValueOf(sts.New), + reflect.ValueOf(support.New), + reflect.ValueOf(swf.New), + reflect.ValueOf(waf.New), + reflect.ValueOf(workspaces.New), +} diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/performance/init.go b/vendor/github.com/aws/aws-sdk-go/awstesting/performance/init.go new file mode 100644 index 000000000..6dde8b543 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/awstesting/performance/init.go @@ -0,0 +1,93 @@ +// +build integration + +package performance + +import ( + "bytes" + "errors" + "fmt" + "runtime" + + "github.com/lsegal/gucumber" + "github.com/stretchr/testify/assert" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/awstesting/mock" + "github.com/aws/aws-sdk-go/service/s3" +) + +func init() { + // Go loads all of its dependecies on compile + gucumber.Given(`^I have loaded my SDK and its dependencies$`, func() { + }) + + // Performance + gucumber.When(`^I create and discard (\d+) clients for each service$`, func(i1 int) { + services := gucumber.World["services"].([]func()) + err := benchmarkTask(fmt.Sprintf("%d_create_and_discard_clients", i1), services, i1) + gucumber.World["error"] = err + }) + + gucumber.Then(`^I should not have leaked any resources$`, func() { + runtime.GC() + err, ok := gucumber.World["error"].(awserr.Error) + assert.False(gucumber.T, ok, "error returned") + assert.NoError(gucumber.T, err) + }) + + gucumber.And(`^I have a list of services$`, func() { + mapCreateClients() + }) + + gucumber.And(`^I take a snapshot of my resources$`, func() { + // Can't take a memory snapshot here, because gucumber does some + // allocation between each instruction leading to unreliable numbers + }) + + gucumber.When(`^I create a client for each service$`, func() { + buildAnArrayOfClients() + }) + + gucumber.And("^I execute (\\d+) command\\(s\\) on each client$", func(i1 int) { + clientFns := gucumber.World["clientFns"].([]func()) + err := benchmarkTask(fmt.Sprintf("%d_commands_on_clients", i1), clientFns, i1) + gucumber.World["error"] = err + }) + + gucumber.And(`^I destroy all the clients$`, func() { + delete(gucumber.World, "clientFns") + runtime.GC() + }) + + gucumber.Given(`^I have a (\d+) byte file$`, func(i1 int) { + gucumber.World["file"] = make([]byte, i1) + }) + + gucumber.When(`^I upload the file$`, func() { + svc := s3.New(mock.Session) + memStatStart := &runtime.MemStats{} + runtime.ReadMemStats(memStatStart) + gucumber.World["start"] = memStatStart + + svc.PutObjectRequest(&s3.PutObjectInput{ + Bucket: aws.String("bucketmesilly"), + Key: aws.String("testKey"), + Body: bytes.NewReader(gucumber.World["file"].([]byte)), + }) + }) + + gucumber.And(`then download the file$`, func() { + svc := s3.New(mock.Session) + svc.GetObjectRequest(&s3.GetObjectInput{ + Bucket: aws.String("bucketmesilly"), + Key: aws.String("testKey"), + }) + memStatEnd := &runtime.MemStats{} + runtime.ReadMemStats(memStatEnd) + memStatStart := gucumber.World["start"].(*runtime.MemStats) + if memStatStart.Alloc < memStatEnd.Alloc { + gucumber.World["error"] = errors.New("Leaked memory") + } + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/performance/logging.go b/vendor/github.com/aws/aws-sdk-go/awstesting/performance/logging.go new file mode 100644 index 000000000..c1a46950d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/awstesting/performance/logging.go @@ -0,0 +1,122 @@ +// +build integration + +// Package performance contains shared step definitions that are used for performance testing +package performance + +import ( + "errors" + "fmt" + "os" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/dynamodb" + "github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute" +) + +// benchmarkLogger handles all benchmark logging +type benchmarkLogger struct { + outputer +} + +// logger interface that handles any logging to an output +type logger interface { + log(key string, data map[string]interface{}) error +} + +// init intializes the logger and uses dependecy injection for the +// outputer +func newBenchmarkLogger(output string) (*benchmarkLogger, error) { + b := &benchmarkLogger{} + switch output { + case "dynamodb": + region := os.Getenv("AWS_TESTING_REGION") + if region == "" { + return b, errors.New("No region specified. Please export AWS_TESTING_REGION") + } + + table := os.Getenv("AWS_TESTING_DB_TABLE") + if table == "" { + return b, errors.New("No table specified. Please export AWS_TESTING_DB_TABLE") + } + b.outputer = newDynamodbOut(table, region) + case "stdout": + b.outputer = stdout{} + default: + return b, errors.New("Unsupported outputer") + } + return b, nil +} + +type record struct { + Key string + Data interface{} +} + +// log calls the output command and building a data structure +// to pass into its output formatter +func (b benchmarkLogger) log(key, data interface{}) error { + formatData := record{ + Key: fmt.Sprintf("%d-%v", time.Now().Unix(), key.(string)), + Data: data, + } + + return b.output(formatData) +} + +// outputer is a simple interface that'll handle output +// to whatever system like dynamodb or stdout +type outputer interface { + output(record) error +} + +// dyanmodbOut handles simple writes to dynamodb +type dynamodbOut struct { + table string // table to write to in dynamodb + region string + db *dynamodb.DynamoDB // the dynamodb session +} + +// init initializes dynamodbOut to have a new session +func newDynamodbOut(table, region string) *dynamodbOut { + out := dynamodbOut{ + table: table, + region: region, + } + + out.db = dynamodb.New( + session.New(), + &aws.Config{Region: &out.region}, + ) + return &out +} + +// output just writes to dynamodb +func (out dynamodbOut) output(data record) error { + input := &dynamodb.PutItemInput{ + TableName: aws.String(out.table), + } + + item, err := dynamodbattribute.ConvertToMap(data) + if err != nil { + return err + } + + input.Item = item + _, err = out.db.PutItem(input) + return err +} + +// stdout handles writes to stdout +type stdout struct{} + +// output expects key value data to print to stdout +func (out stdout) output(data record) error { + item, err := dynamodbattribute.ConvertToMap(data.Data) + if err != nil { + return err + } + fmt.Println(item) + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/performance/streaming.feature b/vendor/github.com/aws/aws-sdk-go/awstesting/performance/streaming.feature new file mode 100644 index 000000000..cd24cb7db --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/awstesting/performance/streaming.feature @@ -0,0 +1,26 @@ +# language: en +@performance @streaming +Feature: Streaming transfers consume a fixed amount of memory + + Scenario Outline: Streaming uploads are O(1) in memory usage + Given I have a byte file + And I take a snapshot of my resources + When I upload the file + Then I should not have leaked any resources + + Examples: + | bytes | + | 2097152 | + | 209715200 | + + Scenario Outline: Streaming download are O(1) in memory usage + Given I have a byte file + And I take a snapshot of my resources + When I upload the file + And then download the file + Then I should not have leaked any resources + + Examples: + | bytes | + | 2097152 | + | 209715200 | diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/sandbox/Dockerfile.golang-tip b/vendor/github.com/aws/aws-sdk-go/awstesting/sandbox/Dockerfile.golang-tip new file mode 100644 index 000000000..70148d532 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/awstesting/sandbox/Dockerfile.golang-tip @@ -0,0 +1,42 @@ +# Based on docker-library's golang 1.6 alpine and wheezy docker files. +# https://github.com/docker-library/golang/blob/master/1.6/alpine/Dockerfile +# https://github.com/docker-library/golang/blob/master/1.6/wheezy/Dockerfile +FROM buildpack-deps:wheezy-scm + +ENV GOLANG_VERSION tip +ENV GOLANG_SRC_REPO_URL https://go.googlesource.com/go + +ENV GOLANG_BOOTSTRAP_URL https://storage.googleapis.com/golang/go1.4.3.linux-amd64.tar.gz +ENV GOLANG_BOOTSTRAP_SHA256 ce3140662f45356eb78bc16a88fc7cfb29fb00e18d7c632608245b789b2086d2 +ENV GOLANG_BOOTSTRAP_PATH /usr/local/bootstrap + +# gcc for cgo +RUN apt-get update && apt-get install -y --no-install-recommends \ + g++ \ + gcc \ + libc6-dev \ + make \ + git \ + && rm -rf /var/lib/apt/lists/* + +# Setup the Bootstrap +RUN mkdir -p "$GOLANG_BOOTSTRAP_PATH" \ + && curl -fsSL "$GOLANG_BOOTSTRAP_URL" -o golang.tar.gz \ + && echo "$GOLANG_BOOTSTRAP_SHA256 golang.tar.gz" | sha256sum -c - \ + && tar -C "$GOLANG_BOOTSTRAP_PATH" -xzf golang.tar.gz \ + && rm golang.tar.gz + +# Get and build Go tip +RUN export GOROOT_BOOTSTRAP=$GOLANG_BOOTSTRAP_PATH/go \ + && git clone "$GOLANG_SRC_REPO_URL" /usr/local/go \ + && cd /usr/local/go/src \ + && ./make.bash \ + && rm -rf "$GOLANG_BOOTSTRAP_PATH" /usr/local/go/pkg/bootstrap + +# Build Go workspace and environment +ENV GOPATH /go +ENV PATH $GOPATH/bin:/usr/local/go/bin:$PATH +RUN mkdir -p "$GOPATH/src" "$GOPATH/bin" \ + && chmod -R 777 "$GOPATH" + +WORKDIR $GOPATH diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/sandbox/Dockerfile.test.go1.4 b/vendor/github.com/aws/aws-sdk-go/awstesting/sandbox/Dockerfile.test.go1.4 new file mode 100644 index 000000000..e048ed567 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/awstesting/sandbox/Dockerfile.test.go1.4 @@ -0,0 +1,7 @@ +FROM ubuntu:12.04 +FROM golang:1.4 + +ADD . /go/src/github.com/aws/aws-sdk-go + +WORKDIR /go/src/github.com/aws/aws-sdk-go +CMD ["make", "get-deps", "unit"] diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/sandbox/Dockerfile.test.go1.5 b/vendor/github.com/aws/aws-sdk-go/awstesting/sandbox/Dockerfile.test.go1.5 new file mode 100644 index 000000000..010381c79 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/awstesting/sandbox/Dockerfile.test.go1.5 @@ -0,0 +1,9 @@ +FROM ubuntu:12.04 +FROM golang:1.5 + +ADD . /go/src/github.com/aws/aws-sdk-go + +ENV GO15VENDOREXPERIMENT="1" + +WORKDIR /go/src/github.com/aws/aws-sdk-go +CMD ["make", "unit"] diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/sandbox/Dockerfile.test.go1.5-novendorexp b/vendor/github.com/aws/aws-sdk-go/awstesting/sandbox/Dockerfile.test.go1.5-novendorexp new file mode 100644 index 000000000..9ec9f169d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/awstesting/sandbox/Dockerfile.test.go1.5-novendorexp @@ -0,0 +1,7 @@ +FROM ubuntu:12.04 +FROM golang:1.5 + +ADD . /go/src/github.com/aws/aws-sdk-go + +WORKDIR /go/src/github.com/aws/aws-sdk-go +CMD ["make", "get-deps", "unit"] diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/sandbox/Dockerfile.test.go1.6 b/vendor/github.com/aws/aws-sdk-go/awstesting/sandbox/Dockerfile.test.go1.6 new file mode 100644 index 000000000..541a83735 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/awstesting/sandbox/Dockerfile.test.go1.6 @@ -0,0 +1,7 @@ +FROM ubuntu:12.04 +FROM golang:1.6 + +ADD . /go/src/github.com/aws/aws-sdk-go + +WORKDIR /go/src/github.com/aws/aws-sdk-go +CMD ["make", "unit"] diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/sandbox/Dockerfile.test.go1.7 b/vendor/github.com/aws/aws-sdk-go/awstesting/sandbox/Dockerfile.test.go1.7 new file mode 100644 index 000000000..aed4408a8 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/awstesting/sandbox/Dockerfile.test.go1.7 @@ -0,0 +1,7 @@ +FROM ubuntu:12.04 +FROM golang:1.7 + +ADD . /go/src/github.com/aws/aws-sdk-go + +WORKDIR /go/src/github.com/aws/aws-sdk-go +CMD ["make", "unit"] diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/sandbox/Dockerfile.test.gotip b/vendor/github.com/aws/aws-sdk-go/awstesting/sandbox/Dockerfile.test.gotip new file mode 100644 index 000000000..9758279f9 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/awstesting/sandbox/Dockerfile.test.gotip @@ -0,0 +1,7 @@ +FROM ubuntu:12.04 +FROM aws-golang:tip + +ADD . /go/src/github.com/aws/aws-sdk-go + +WORKDIR /go/src/github.com/aws/aws-sdk-go +CMD ["make", "unit"] diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/unit/unit.go b/vendor/github.com/aws/aws-sdk-go/awstesting/unit/unit.go new file mode 100644 index 000000000..e3ec33a82 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/awstesting/unit/unit.go @@ -0,0 +1,13 @@ +// Package unit performs initialization and validation for unit tests +package unit + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/session" +) + +// Session is a shared session for unit tests to use. +var Session = session.New(aws.NewConfig(). + WithCredentials(credentials.NewStaticCredentials("AKID", "SECRET", "SESSION")). + WithRegion("mock-region")) diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/util.go b/vendor/github.com/aws/aws-sdk-go/awstesting/util.go new file mode 100644 index 000000000..77c296e99 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/awstesting/util.go @@ -0,0 +1,67 @@ +package awstesting + +import ( + "io" + + "github.com/aws/aws-sdk-go/private/util" +) + +// ZeroReader is a io.Reader which will always write zeros to the byte slice provided. +type ZeroReader struct{} + +// Read fills the provided byte slice with zeros returning the number of bytes written. +func (r *ZeroReader) Read(b []byte) (int, error) { + for i := 0; i < len(b); i++ { + b[i] = 0 + } + return len(b), nil +} + +// ReadCloser is a io.ReadCloser for unit testing. +// Designed to test for leaks and whether a handle has +// been closed +type ReadCloser struct { + Size int + Closed bool + set bool + FillData func(bool, []byte, int, int) +} + +// Read will call FillData and fill it with whatever data needed. +// Decrements the size until zero, then return io.EOF. +func (r *ReadCloser) Read(b []byte) (int, error) { + if r.Closed { + return 0, io.EOF + } + + delta := len(b) + if delta > r.Size { + delta = r.Size + } + r.Size -= delta + + for i := 0; i < delta; i++ { + b[i] = 'a' + } + + if r.FillData != nil { + r.FillData(r.set, b, r.Size, delta) + } + r.set = true + + if r.Size > 0 { + return delta, nil + } + return delta, io.EOF +} + +// Close sets Closed to true and returns no error +func (r *ReadCloser) Close() error { + r.Closed = true + return nil +} + +// SortedKeys returns a sorted slice of keys of a map. +func SortedKeys(m map[string]interface{}) []string { + return util.SortedKeys(m) +} diff --git a/vendor/github.com/aws/aws-sdk-go/awstesting/util_test.go b/vendor/github.com/aws/aws-sdk-go/awstesting/util_test.go new file mode 100644 index 000000000..4b03db019 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/awstesting/util_test.go @@ -0,0 +1,49 @@ +package awstesting_test + +import ( + "io" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/aws/aws-sdk-go/awstesting" +) + +func TestReadCloserClose(t *testing.T) { + rc := awstesting.ReadCloser{Size: 1} + err := rc.Close() + + assert.Nil(t, err) + assert.True(t, rc.Closed) + assert.Equal(t, rc.Size, 1) +} + +func TestReadCloserRead(t *testing.T) { + rc := awstesting.ReadCloser{Size: 5} + b := make([]byte, 2) + + n, err := rc.Read(b) + + assert.Nil(t, err) + assert.Equal(t, n, 2) + assert.False(t, rc.Closed) + assert.Equal(t, rc.Size, 3) + + err = rc.Close() + assert.Nil(t, err) + n, err = rc.Read(b) + assert.Equal(t, err, io.EOF) + assert.Equal(t, n, 0) +} + +func TestReadCloserReadAll(t *testing.T) { + rc := awstesting.ReadCloser{Size: 5} + b := make([]byte, 5) + + n, err := rc.Read(b) + + assert.Equal(t, err, io.EOF) + assert.Equal(t, n, 5) + assert.False(t, rc.Closed) + assert.Equal(t, rc.Size, 0) +} diff --git a/vendor/github.com/aws/aws-sdk-go/doc-src/aws-godoc/templates/callgraph.html b/vendor/github.com/aws/aws-sdk-go/doc-src/aws-godoc/templates/callgraph.html new file mode 100644 index 000000000..c56b2ef1a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/doc-src/aws-godoc/templates/callgraph.html @@ -0,0 +1,15 @@ + diff --git a/vendor/github.com/aws/aws-sdk-go/doc-src/aws-godoc/templates/codewalk.html b/vendor/github.com/aws/aws-sdk-go/doc-src/aws-godoc/templates/codewalk.html new file mode 100644 index 000000000..0f3d22a20 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/doc-src/aws-godoc/templates/codewalk.html @@ -0,0 +1,56 @@ + + + + + +
+
+
+
+
+ + Pop Out Code + + +
+
+ +
+
+
+ code on leftright + code width 70% + filepaths shownhidden +
+
+
+
+ {{range .Step}} +
+ +
{{html .Title}}
+
+ {{with .Err}} + ERROR LOADING FILE: {{html .}}

+ {{end}} + {{.XML}} +
+
{{html .}}
+
+ {{end}} +
+
+ previous step + • + next step +
+
+
diff --git a/vendor/github.com/aws/aws-sdk-go/doc-src/aws-godoc/templates/codewalkdir.html b/vendor/github.com/aws/aws-sdk-go/doc-src/aws-godoc/templates/codewalkdir.html new file mode 100644 index 000000000..b7674c6ce --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/doc-src/aws-godoc/templates/codewalkdir.html @@ -0,0 +1,16 @@ + + + +{{range .}} + + {{$name_html := html .Name}} + + + + +{{end}} +
{{$name_html}} {{html .Title}}
diff --git a/vendor/github.com/aws/aws-sdk-go/doc-src/aws-godoc/templates/dirlist.html b/vendor/github.com/aws/aws-sdk-go/doc-src/aws-godoc/templates/dirlist.html new file mode 100644 index 000000000..a3e1a2fa8 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/doc-src/aws-godoc/templates/dirlist.html @@ -0,0 +1,31 @@ + + +

+ + + + + + + + + + + +{{range .}} + + {{$name_html := fileInfoName . | html}} + + + + + + +{{end}} + +
File Bytes Modified
..
{{$name_html}}{{html .Size}}{{fileInfoTime . | html}}
+

diff --git a/vendor/github.com/aws/aws-sdk-go/doc-src/aws-godoc/templates/error.html b/vendor/github.com/aws/aws-sdk-go/doc-src/aws-godoc/templates/error.html new file mode 100644 index 000000000..7573aa236 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/doc-src/aws-godoc/templates/error.html @@ -0,0 +1,9 @@ + + +

+{{html .}} +

diff --git a/vendor/github.com/aws/aws-sdk-go/doc-src/aws-godoc/templates/example.html b/vendor/github.com/aws/aws-sdk-go/doc-src/aws-godoc/templates/example.html new file mode 100644 index 000000000..4f4e09e87 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/doc-src/aws-godoc/templates/example.html @@ -0,0 +1,30 @@ +
+ +
+

Example{{example_suffix .Name}}

+ {{with .Doc}}

{{html .}}

{{end}} + {{$output := .Output}} + {{with .Play}} +
+
+
{{html $output}}
+
+ Run + Format + {{if $.Share}} + + {{end}} +
+
+ {{else}} +

Code:

+
{{.Code}}
+ {{with .Output}} +

Output:

+
{{html .}}
+ {{end}} + {{end}} +
+
diff --git a/vendor/github.com/aws/aws-sdk-go/doc-src/aws-godoc/templates/godoc.html b/vendor/github.com/aws/aws-sdk-go/doc-src/aws-godoc/templates/godoc.html new file mode 100644 index 000000000..80a9cf952 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/doc-src/aws-godoc/templates/godoc.html @@ -0,0 +1,138 @@ + + + + + + +{{with .Tabtitle}} + {{html .}} - Amazon Web Services - Go SDK +{{else}} + Amazon Web Services - Go SDK +{{end}} + + + + + + + + + + + + + + + + +
+... +
+ +
+ + + +
+
+ +{{/* The Table of Contents is automatically inserted in this
. + Do not delete this
. */}} +{{ if not .NoTOC }} + +{{ end }} +
+
+ +{{/* Body is HTML-escaped elsewhere */}} +{{printf "%s" .Body}} +
+
+
+ +
+
+ + + + + diff --git a/vendor/github.com/aws/aws-sdk-go/doc-src/aws-godoc/templates/godocs.js b/vendor/github.com/aws/aws-sdk-go/doc-src/aws-godoc/templates/godocs.js new file mode 100644 index 000000000..ec9f37a9b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/doc-src/aws-godoc/templates/godocs.js @@ -0,0 +1,571 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* A little code to ease navigation of these documents. + * + * On window load we: + * + Bind search box hint placeholder show/hide events (bindSearchEvents) + * + Generate a table of contents (generateTOC) + * + Bind foldable sections (bindToggles) + * + Bind links to foldable sections (bindToggleLinks) + */ + +(function() { +'use strict'; + +// Mobile-friendly topbar menu +$(function() { + var menu = $('#menu'); + var menuButton = $('#menu-button'); + var menuButtonArrow = $('#menu-button-arrow'); + menuButton.click(function(event) { + menu.toggleClass('menu-visible'); + menuButtonArrow.toggleClass('vertical-flip'); + event.preventDefault(); + return false; + }); +}); + +function bindSearchEvents() { + + var search = $('#search'); + if (search.length === 0) { + return; // no search box + } + + function clearInactive() { + if (search.is('.inactive')) { + search.val(''); + search.removeClass('inactive'); + } + } + + function restoreInactive() { + if (search.val() !== '') { + return; + } + search.val(search.attr('placeholder')); + search.addClass('inactive'); + } + + search.on('focus', clearInactive); + search.on('blur', restoreInactive); + + restoreInactive(); +} + +/* Generates a table of contents: looks for h2 and h3 elements and generates + * links. "Decorates" the element with id=="nav" with this table of contents. + */ +function generateTOC() { + if ($('#manual-nav').length > 0) { + return; + } + + var nav = $('#nav'); + if (nav.length === 0) { + return; + } + + var toc_items = []; + $(nav).nextAll('h2, h3').each(function() { + var node = this; + if (node.id == '') + node.id = 'tmp_' + toc_items.length; + var link = $('').attr('href', '#' + node.id).text($(node).text()); + var item; + if ($(node).is('h2')) { + item = $('
'); + } else { // h3 + item = $('
'); + } + item.append(link); + toc_items.push(item); + }); + if (toc_items.length <= 1) { + return; + } + + var dl1 = $('
'); + var dl2 = $('
'); + + var split_index = (toc_items.length / 2) + 1; + if (split_index < 8) { + split_index = toc_items.length; + } + for (var i = 0; i < split_index; i++) { + dl1.append(toc_items[i]); + } + for (/* keep using i */; i < toc_items.length; i++) { + dl2.append(toc_items[i]); + } + + var tocTable = $('').appendTo(nav); + var tocBody = $('').appendTo(tocTable); + var tocRow = $('').appendTo(tocBody); + + // 1st column + $(']","i"),bv=/^(?:checkbox|radio)$/,bw=/checked\s*(?:[^=]|=\s*.checked.)/i,bx=/\/(java|ecma)script/i,by=/^\s*\s*$/g,bz={option:[1,""],legend:[1,"
","
"],thead:[1,"
').appendTo(tocRow).append(dl1); + // 2nd column + $('').appendTo(tocRow).append(dl2); +} + +function bindToggle(el) { + $('.toggleButton', el).click(function() { + if ($(el).is('.toggle')) { + $(el).addClass('toggleVisible').removeClass('toggle'); + } else { + $(el).addClass('toggle').removeClass('toggleVisible'); + } + }); +} +function bindToggles(selector) { + $(selector).each(function(i, el) { + bindToggle(el); + }); +} + +function bindToggleLink(el, prefix) { + $(el).click(function() { + var href = $(el).attr('href'); + var i = href.indexOf('#'+prefix); + if (i < 0) { + return; + } + var id = '#' + prefix + href.slice(i+1+prefix.length); + if ($(id).is('.toggle')) { + $(id).find('.toggleButton').first().click(); + } + }); +} +function bindToggleLinks(selector, prefix) { + $(selector).each(function(i, el) { + bindToggleLink(el, prefix); + }); +} + +function setupDropdownPlayground() { + if (!$('#page').is('.wide')) { + return; // don't show on front page + } + var button = $('#playgroundButton'); + var div = $('#playground'); + var setup = false; + button.toggle(function() { + button.addClass('active'); + div.show(); + if (setup) { + return; + } + setup = true; + playground({ + 'codeEl': $('.code', div), + 'outputEl': $('.output', div), + 'runEl': $('.run', div), + 'fmtEl': $('.fmt', div), + 'shareEl': $('.share', div), + 'shareRedirect': '//play.golang.org/p/' + }); + }, + function() { + button.removeClass('active'); + div.hide(); + }); + button.show(); + $('#menu').css('min-width', '+=60'); +} + +function setupInlinePlayground() { + 'use strict'; + // Set up playground when each element is toggled. + $('div.play').each(function (i, el) { + // Set up playground for this example. + var setup = function() { + var code = $('.code', el); + playground({ + 'codeEl': code, + 'outputEl': $('.output', el), + 'runEl': $('.run', el), + 'fmtEl': $('.fmt', el), + 'shareEl': $('.share', el), + 'shareRedirect': '//play.golang.org/p/' + }); + + // Make the code textarea resize to fit content. + var resize = function() { + code.height(0); + var h = code[0].scrollHeight; + code.height(h+20); // minimize bouncing. + code.closest('.input').height(h); + }; + code.on('keydown', resize); + code.on('keyup', resize); + code.keyup(); // resize now. + }; + + // If example already visible, set up playground now. + if ($(el).is(':visible')) { + setup(); + return; + } + + // Otherwise, set up playground when example is expanded. + var built = false; + $(el).closest('.toggle').click(function() { + // Only set up once. + if (!built) { + setup(); + built = true; + } + }); + }); +} + +// fixFocus tries to put focus to div#page so that keyboard navigation works. +function fixFocus() { + var page = $('div#page'); + var topbar = $('div#topbar'); + page.css('outline', 0); // disable outline when focused + page.attr('tabindex', -1); // and set tabindex so that it is focusable + $(window).resize(function (evt) { + // only focus page when the topbar is at fixed position (that is, it's in + // front of page, and keyboard event will go to the former by default.) + // by focusing page, keyboard event will go to page so that up/down arrow, + // space, etc. will work as expected. + if (topbar.css('position') == "fixed") + page.focus(); + }).resize(); +} + +function toggleHash() { + var hash = $(window.location.hash); + if (hash.is('.toggle')) { + hash.find('.toggleButton').first().click(); + } +} + +function personalizeInstallInstructions() { + var prefix = '?download='; + var s = window.location.search; + if (s.indexOf(prefix) != 0) { + // No 'download' query string; bail. + return; + } + + var filename = s.substr(prefix.length); + var filenameRE = /^go1\.\d+(\.\d+)?([a-z0-9]+)?\.([a-z0-9]+)(-[a-z0-9]+)?(-osx10\.[68])?\.([a-z.]+)$/; + $('.downloadFilename').text(filename); + $('.hideFromDownload').hide(); + var m = filenameRE.exec(filename); + if (!m) { + // Can't interpret file name; bail. + return; + } + + var os = m[3]; + var ext = m[6]; + if (ext != 'tar.gz') { + $('#tarballInstructions').hide(); + } + if (os != 'darwin' || ext != 'pkg') { + $('#darwinPackageInstructions').hide(); + } + if (os != 'windows') { + $('#windowsInstructions').hide(); + $('.testUnix').show(); + $('.testWindows').hide(); + } else { + if (ext != 'msi') { + $('#windowsInstallerInstructions').hide(); + } + if (ext != 'zip') { + $('#windowsZipInstructions').hide(); + } + $('.testUnix').hide(); + $('.testWindows').show(); + } + + var download = "https://storage.googleapis.com/golang/" + filename; + + var message = $('

'+ + 'Your download should begin shortly. '+ + 'If it does not, click this link.

'); + message.find('a').attr('href', download); + message.insertAfter('#nav'); + + window.location = download; +} + +$(document).ready(function() { + bindSearchEvents(); + generateTOC(); + bindToggles(".toggle"); + bindToggles(".toggleVisible"); + bindToggleLinks(".exampleLink", "example_"); + bindToggleLinks(".overviewLink", ""); + bindToggleLinks(".examplesLink", ""); + bindToggleLinks(".indexLink", ""); + setupDropdownPlayground(); + setupInlinePlayground(); + fixFocus(); + setupTypeInfo(); + setupCallgraphs(); + toggleHash(); + personalizeInstallInstructions(); + + // godoc.html defines window.initFuncs in the tag, and root.html and + // codewalk.js push their on-page-ready functions to the list. + // We execute those functions here, to avoid loading jQuery until the page + // content is loaded. + for (var i = 0; i < window.initFuncs.length; i++) window.initFuncs[i](); +}); + +// -- analysis --------------------------------------------------------- + +// escapeHTML returns HTML for s, with metacharacters quoted. +// It is safe for use in both elements and attributes +// (unlike the "set innerText, read innerHTML" trick). +function escapeHTML(s) { + return s.replace(/&/g, '&'). + replace(/\"/g, '"'). + replace(/\'/g, '''). + replace(//g, '>'); +} + +// makeAnchor returns HTML for an element, given an anchorJSON object. +function makeAnchor(json) { + var html = escapeHTML(json.Text); + if (json.Href != "") { + html = "" + html + ""; + } + return html; +} + +function showLowFrame(html) { + var lowframe = document.getElementById('lowframe'); + lowframe.style.height = "200px"; + lowframe.innerHTML = "

" + html + "

\n" + + "
" +}; + +document.hideLowFrame = function() { + var lowframe = document.getElementById('lowframe'); + lowframe.style.height = "0px"; +} + +// onClickCallers is the onclick action for the 'func' tokens of a +// function declaration. +document.onClickCallers = function(index) { + var data = document.ANALYSIS_DATA[index] + if (data.Callers.length == 1 && data.Callers[0].Sites.length == 1) { + document.location = data.Callers[0].Sites[0].Href; // jump to sole caller + return; + } + + var html = "Callers of " + escapeHTML(data.Callee) + ":
\n"; + for (var i = 0; i < data.Callers.length; i++) { + var caller = data.Callers[i]; + html += "" + escapeHTML(caller.Func) + ""; + var sites = caller.Sites; + if (sites != null && sites.length > 0) { + html += " at line "; + for (var j = 0; j < sites.length; j++) { + if (j > 0) { + html += ", "; + } + html += "" + makeAnchor(sites[j]) + ""; + } + } + html += "
\n"; + } + showLowFrame(html); +}; + +// onClickCallees is the onclick action for the '(' token of a function call. +document.onClickCallees = function(index) { + var data = document.ANALYSIS_DATA[index] + if (data.Callees.length == 1) { + document.location = data.Callees[0].Href; // jump to sole callee + return; + } + + var html = "Callees of this " + escapeHTML(data.Descr) + ":
\n"; + for (var i = 0; i < data.Callees.length; i++) { + html += "" + makeAnchor(data.Callees[i]) + "
\n"; + } + showLowFrame(html); +}; + +// onClickTypeInfo is the onclick action for identifiers declaring a named type. +document.onClickTypeInfo = function(index) { + var data = document.ANALYSIS_DATA[index]; + var html = "Type " + data.Name + ": " + + "      (size=" + data.Size + ", align=" + data.Align + ")
\n"; + html += implementsHTML(data); + html += methodsetHTML(data); + showLowFrame(html); +}; + +// implementsHTML returns HTML for the implements relation of the +// specified TypeInfoJSON value. +function implementsHTML(info) { + var html = ""; + if (info.ImplGroups != null) { + for (var i = 0; i < info.ImplGroups.length; i++) { + var group = info.ImplGroups[i]; + var x = "" + escapeHTML(group.Descr) + " "; + for (var j = 0; j < group.Facts.length; j++) { + var fact = group.Facts[j]; + var y = "" + makeAnchor(fact.Other) + ""; + if (fact.ByKind != null) { + html += escapeHTML(fact.ByKind) + " type " + y + " implements " + x; + } else { + html += x + " implements " + y; + } + html += "
\n"; + } + } + } + return html; +} + + +// methodsetHTML returns HTML for the methodset of the specified +// TypeInfoJSON value. +function methodsetHTML(info) { + var html = ""; + if (info.Methods != null) { + for (var i = 0; i < info.Methods.length; i++) { + html += "" + makeAnchor(info.Methods[i]) + "
\n"; + } + } + return html; +} + +// onClickComm is the onclick action for channel "make" and "<-" +// send/receive tokens. +document.onClickComm = function(index) { + var ops = document.ANALYSIS_DATA[index].Ops + if (ops.length == 1) { + document.location = ops[0].Op.Href; // jump to sole element + return; + } + + var html = "Operations on this channel:
\n"; + for (var i = 0; i < ops.length; i++) { + html += makeAnchor(ops[i].Op) + " by " + escapeHTML(ops[i].Fn) + "
\n"; + } + if (ops.length == 0) { + html += "(none)
\n"; + } + showLowFrame(html); +}; + +$(window).load(function() { + // Scroll window so that first selection is visible. + // (This means we don't need to emit id='L%d' spans for each line.) + // TODO(adonovan): ideally, scroll it so that it's under the pointer, + // but I don't know how to get the pointer y coordinate. + var elts = document.getElementsByClassName("selection"); + if (elts.length > 0) { + elts[0].scrollIntoView() + } +}); + +// setupTypeInfo populates the "Implements" and "Method set" toggle for +// each type in the package doc. +function setupTypeInfo() { + for (var i in document.ANALYSIS_DATA) { + var data = document.ANALYSIS_DATA[i]; + + var el = document.getElementById("implements-" + i); + if (el != null) { + // el != null => data is TypeInfoJSON. + if (data.ImplGroups != null) { + el.innerHTML = implementsHTML(data); + el.parentNode.parentNode.style.display = "block"; + } + } + + var el = document.getElementById("methodset-" + i); + if (el != null) { + // el != null => data is TypeInfoJSON. + if (data.Methods != null) { + el.innerHTML = methodsetHTML(data); + el.parentNode.parentNode.style.display = "block"; + } + } + } +} + +function setupCallgraphs() { + if (document.CALLGRAPH == null) { + return + } + document.getElementById("pkg-callgraph").style.display = "block"; + + var treeviews = document.getElementsByClassName("treeview"); + for (var i = 0; i < treeviews.length; i++) { + var tree = treeviews[i]; + if (tree.id == null || tree.id.indexOf("callgraph-") != 0) { + continue; + } + var id = tree.id.substring("callgraph-".length); + $(tree).treeview({collapsed: true, animated: "fast"}); + document.cgAddChildren(tree, tree, [id]); + tree.parentNode.parentNode.style.display = "block"; + } +} + +document.cgAddChildren = function(tree, ul, indices) { + if (indices != null) { + for (var i = 0; i < indices.length; i++) { + var li = cgAddChild(tree, ul, document.CALLGRAPH[indices[i]]); + if (i == indices.length - 1) { + $(li).addClass("last"); + } + } + } + $(tree).treeview({animated: "fast", add: ul}); +} + +// cgAddChild adds an
  • element for document.CALLGRAPH node cgn to +// the parent
      element ul. tree is the tree's root
        element. +function cgAddChild(tree, ul, cgn) { + var li = document.createElement("li"); + ul.appendChild(li); + li.className = "closed"; + + var code = document.createElement("code"); + + if (cgn.Callees != null) { + $(li).addClass("expandable"); + + // Event handlers and innerHTML updates don't play nicely together, + // hence all this explicit DOM manipulation. + var hitarea = document.createElement("div"); + hitarea.className = "hitarea expandable-hitarea"; + li.appendChild(hitarea); + + li.appendChild(code); + + var childUL = document.createElement("ul"); + li.appendChild(childUL); + childUL.setAttribute('style', "display: none;"); + + var onClick = function() { + document.cgAddChildren(tree, childUL, cgn.Callees); + hitarea.removeEventListener('click', onClick) + }; + hitarea.addEventListener('click', onClick); + + } else { + li.appendChild(code); + } + code.innerHTML += " " + makeAnchor(cgn.Func); + return li +} + +})(); diff --git a/vendor/github.com/aws/aws-sdk-go/doc-src/aws-godoc/templates/implements.html b/vendor/github.com/aws/aws-sdk-go/doc-src/aws-godoc/templates/implements.html new file mode 100644 index 000000000..5f65b861a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/doc-src/aws-godoc/templates/implements.html @@ -0,0 +1,9 @@ + diff --git a/vendor/github.com/aws/aws-sdk-go/doc-src/aws-godoc/templates/jquery.js b/vendor/github.com/aws/aws-sdk-go/doc-src/aws-godoc/templates/jquery.js new file mode 100644 index 000000000..bc3fbc81b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/doc-src/aws-godoc/templates/jquery.js @@ -0,0 +1,2 @@ +/*! jQuery v1.8.2 jquery.com | jquery.org/license */ +(function(a,b){function G(a){var b=F[a]={};return p.each(a.split(s),function(a,c){b[c]=!0}),b}function J(a,c,d){if(d===b&&a.nodeType===1){var e="data-"+c.replace(I,"-$1").toLowerCase();d=a.getAttribute(e);if(typeof d=="string"){try{d=d==="true"?!0:d==="false"?!1:d==="null"?null:+d+""===d?+d:H.test(d)?p.parseJSON(d):d}catch(f){}p.data(a,c,d)}else d=b}return d}function K(a){var b;for(b in a){if(b==="data"&&p.isEmptyObject(a[b]))continue;if(b!=="toJSON")return!1}return!0}function ba(){return!1}function bb(){return!0}function bh(a){return!a||!a.parentNode||a.parentNode.nodeType===11}function bi(a,b){do a=a[b];while(a&&a.nodeType!==1);return a}function bj(a,b,c){b=b||0;if(p.isFunction(b))return p.grep(a,function(a,d){var e=!!b.call(a,d,a);return e===c});if(b.nodeType)return p.grep(a,function(a,d){return a===b===c});if(typeof b=="string"){var d=p.grep(a,function(a){return a.nodeType===1});if(be.test(b))return p.filter(b,d,!c);b=p.filter(b,d)}return p.grep(a,function(a,d){return p.inArray(a,b)>=0===c})}function bk(a){var b=bl.split("|"),c=a.createDocumentFragment();if(c.createElement)while(b.length)c.createElement(b.pop());return c}function bC(a,b){return a.getElementsByTagName(b)[0]||a.appendChild(a.ownerDocument.createElement(b))}function bD(a,b){if(b.nodeType!==1||!p.hasData(a))return;var c,d,e,f=p._data(a),g=p._data(b,f),h=f.events;if(h){delete g.handle,g.events={};for(c in h)for(d=0,e=h[c].length;d").appendTo(e.body),c=b.css("display");b.remove();if(c==="none"||c===""){bI=e.body.appendChild(bI||p.extend(e.createElement("iframe"),{frameBorder:0,width:0,height:0}));if(!bJ||!bI.createElement)bJ=(bI.contentWindow||bI.contentDocument).document,bJ.write(""),bJ.close();b=bJ.body.appendChild(bJ.createElement(a)),c=bH(b,"display"),e.body.removeChild(bI)}return bS[a]=c,c}function ci(a,b,c,d){var e;if(p.isArray(b))p.each(b,function(b,e){c||ce.test(a)?d(a,e):ci(a+"["+(typeof e=="object"?b:"")+"]",e,c,d)});else if(!c&&p.type(b)==="object")for(e in b)ci(a+"["+e+"]",b[e],c,d);else d(a,b)}function cz(a){return function(b,c){typeof b!="string"&&(c=b,b="*");var d,e,f,g=b.toLowerCase().split(s),h=0,i=g.length;if(p.isFunction(c))for(;h)[^>]*$|#([\w\-]*)$)/,v=/^<(\w+)\s*\/?>(?:<\/\1>|)$/,w=/^[\],:{}\s]*$/,x=/(?:^|:|,)(?:\s*\[)+/g,y=/\\(?:["\\\/bfnrt]|u[\da-fA-F]{4})/g,z=/"[^"\\\r\n]*"|true|false|null|-?(?:\d\d*\.|)\d+(?:[eE][\-+]?\d+|)/g,A=/^-ms-/,B=/-([\da-z])/gi,C=function(a,b){return(b+"").toUpperCase()},D=function(){e.addEventListener?(e.removeEventListener("DOMContentLoaded",D,!1),p.ready()):e.readyState==="complete"&&(e.detachEvent("onreadystatechange",D),p.ready())},E={};p.fn=p.prototype={constructor:p,init:function(a,c,d){var f,g,h,i;if(!a)return this;if(a.nodeType)return this.context=this[0]=a,this.length=1,this;if(typeof a=="string"){a.charAt(0)==="<"&&a.charAt(a.length-1)===">"&&a.length>=3?f=[null,a,null]:f=u.exec(a);if(f&&(f[1]||!c)){if(f[1])return c=c instanceof p?c[0]:c,i=c&&c.nodeType?c.ownerDocument||c:e,a=p.parseHTML(f[1],i,!0),v.test(f[1])&&p.isPlainObject(c)&&this.attr.call(a,c,!0),p.merge(this,a);g=e.getElementById(f[2]);if(g&&g.parentNode){if(g.id!==f[2])return d.find(a);this.length=1,this[0]=g}return this.context=e,this.selector=a,this}return!c||c.jquery?(c||d).find(a):this.constructor(c).find(a)}return p.isFunction(a)?d.ready(a):(a.selector!==b&&(this.selector=a.selector,this.context=a.context),p.makeArray(a,this))},selector:"",jquery:"1.8.2",length:0,size:function(){return this.length},toArray:function(){return k.call(this)},get:function(a){return a==null?this.toArray():a<0?this[this.length+a]:this[a]},pushStack:function(a,b,c){var d=p.merge(this.constructor(),a);return d.prevObject=this,d.context=this.context,b==="find"?d.selector=this.selector+(this.selector?" ":"")+c:b&&(d.selector=this.selector+"."+b+"("+c+")"),d},each:function(a,b){return p.each(this,a,b)},ready:function(a){return p.ready.promise().done(a),this},eq:function(a){return a=+a,a===-1?this.slice(a):this.slice(a,a+1)},first:function(){return this.eq(0)},last:function(){return this.eq(-1)},slice:function(){return this.pushStack(k.apply(this,arguments),"slice",k.call(arguments).join(","))},map:function(a){return this.pushStack(p.map(this,function(b,c){return a.call(b,c,b)}))},end:function(){return this.prevObject||this.constructor(null)},push:j,sort:[].sort,splice:[].splice},p.fn.init.prototype=p.fn,p.extend=p.fn.extend=function(){var a,c,d,e,f,g,h=arguments[0]||{},i=1,j=arguments.length,k=!1;typeof h=="boolean"&&(k=h,h=arguments[1]||{},i=2),typeof h!="object"&&!p.isFunction(h)&&(h={}),j===i&&(h=this,--i);for(;i0)return;d.resolveWith(e,[p]),p.fn.trigger&&p(e).trigger("ready").off("ready")},isFunction:function(a){return p.type(a)==="function"},isArray:Array.isArray||function(a){return p.type(a)==="array"},isWindow:function(a){return a!=null&&a==a.window},isNumeric:function(a){return!isNaN(parseFloat(a))&&isFinite(a)},type:function(a){return a==null?String(a):E[m.call(a)]||"object"},isPlainObject:function(a){if(!a||p.type(a)!=="object"||a.nodeType||p.isWindow(a))return!1;try{if(a.constructor&&!n.call(a,"constructor")&&!n.call(a.constructor.prototype,"isPrototypeOf"))return!1}catch(c){return!1}var d;for(d in a);return d===b||n.call(a,d)},isEmptyObject:function(a){var b;for(b in a)return!1;return!0},error:function(a){throw new Error(a)},parseHTML:function(a,b,c){var d;return!a||typeof a!="string"?null:(typeof b=="boolean"&&(c=b,b=0),b=b||e,(d=v.exec(a))?[b.createElement(d[1])]:(d=p.buildFragment([a],b,c?null:[]),p.merge([],(d.cacheable?p.clone(d.fragment):d.fragment).childNodes)))},parseJSON:function(b){if(!b||typeof b!="string")return null;b=p.trim(b);if(a.JSON&&a.JSON.parse)return a.JSON.parse(b);if(w.test(b.replace(y,"@").replace(z,"]").replace(x,"")))return(new Function("return "+b))();p.error("Invalid JSON: "+b)},parseXML:function(c){var d,e;if(!c||typeof c!="string")return null;try{a.DOMParser?(e=new DOMParser,d=e.parseFromString(c,"text/xml")):(d=new ActiveXObject("Microsoft.XMLDOM"),d.async="false",d.loadXML(c))}catch(f){d=b}return(!d||!d.documentElement||d.getElementsByTagName("parsererror").length)&&p.error("Invalid XML: "+c),d},noop:function(){},globalEval:function(b){b&&r.test(b)&&(a.execScript||function(b){a.eval.call(a,b)})(b)},camelCase:function(a){return a.replace(A,"ms-").replace(B,C)},nodeName:function(a,b){return a.nodeName&&a.nodeName.toLowerCase()===b.toLowerCase()},each:function(a,c,d){var e,f=0,g=a.length,h=g===b||p.isFunction(a);if(d){if(h){for(e in a)if(c.apply(a[e],d)===!1)break}else for(;f0&&a[0]&&a[i-1]||i===0||p.isArray(a));if(j)for(;h-1)i.splice(c,1),e&&(c<=g&&g--,c<=h&&h--)}),this},has:function(a){return p.inArray(a,i)>-1},empty:function(){return i=[],this},disable:function(){return i=j=c=b,this},disabled:function(){return!i},lock:function(){return j=b,c||l.disable(),this},locked:function(){return!j},fireWith:function(a,b){return b=b||[],b=[a,b.slice?b.slice():b],i&&(!d||j)&&(e?j.push(b):k(b)),this},fire:function(){return l.fireWith(this,arguments),this},fired:function(){return!!d}};return l},p.extend({Deferred:function(a){var b=[["resolve","done",p.Callbacks("once memory"),"resolved"],["reject","fail",p.Callbacks("once memory"),"rejected"],["notify","progress",p.Callbacks("memory")]],c="pending",d={state:function(){return c},always:function(){return e.done(arguments).fail(arguments),this},then:function(){var a=arguments;return p.Deferred(function(c){p.each(b,function(b,d){var f=d[0],g=a[b];e[d[1]](p.isFunction(g)?function(){var a=g.apply(this,arguments);a&&p.isFunction(a.promise)?a.promise().done(c.resolve).fail(c.reject).progress(c.notify):c[f+"With"](this===e?c:this,[a])}:c[f])}),a=null}).promise()},promise:function(a){return a!=null?p.extend(a,d):d}},e={};return d.pipe=d.then,p.each(b,function(a,f){var g=f[2],h=f[3];d[f[1]]=g.add,h&&g.add(function(){c=h},b[a^1][2].disable,b[2][2].lock),e[f[0]]=g.fire,e[f[0]+"With"]=g.fireWith}),d.promise(e),a&&a.call(e,e),e},when:function(a){var b=0,c=k.call(arguments),d=c.length,e=d!==1||a&&p.isFunction(a.promise)?d:0,f=e===1?a:p.Deferred(),g=function(a,b,c){return function(d){b[a]=this,c[a]=arguments.length>1?k.call(arguments):d,c===h?f.notifyWith(b,c):--e||f.resolveWith(b,c)}},h,i,j;if(d>1){h=new Array(d),i=new Array(d),j=new Array(d);for(;b
        a",c=n.getElementsByTagName("*"),d=n.getElementsByTagName("a")[0],d.style.cssText="top:1px;float:left;opacity:.5";if(!c||!c.length)return{};f=e.createElement("select"),g=f.appendChild(e.createElement("option")),h=n.getElementsByTagName("input")[0],b={leadingWhitespace:n.firstChild.nodeType===3,tbody:!n.getElementsByTagName("tbody").length,htmlSerialize:!!n.getElementsByTagName("link").length,style:/top/.test(d.getAttribute("style")),hrefNormalized:d.getAttribute("href")==="/a",opacity:/^0.5/.test(d.style.opacity),cssFloat:!!d.style.cssFloat,checkOn:h.value==="on",optSelected:g.selected,getSetAttribute:n.className!=="t",enctype:!!e.createElement("form").enctype,html5Clone:e.createElement("nav").cloneNode(!0).outerHTML!=="<:nav>",boxModel:e.compatMode==="CSS1Compat",submitBubbles:!0,changeBubbles:!0,focusinBubbles:!1,deleteExpando:!0,noCloneEvent:!0,inlineBlockNeedsLayout:!1,shrinkWrapBlocks:!1,reliableMarginRight:!0,boxSizingReliable:!0,pixelPosition:!1},h.checked=!0,b.noCloneChecked=h.cloneNode(!0).checked,f.disabled=!0,b.optDisabled=!g.disabled;try{delete n.test}catch(o){b.deleteExpando=!1}!n.addEventListener&&n.attachEvent&&n.fireEvent&&(n.attachEvent("onclick",m=function(){b.noCloneEvent=!1}),n.cloneNode(!0).fireEvent("onclick"),n.detachEvent("onclick",m)),h=e.createElement("input"),h.value="t",h.setAttribute("type","radio"),b.radioValue=h.value==="t",h.setAttribute("checked","checked"),h.setAttribute("name","t"),n.appendChild(h),i=e.createDocumentFragment(),i.appendChild(n.lastChild),b.checkClone=i.cloneNode(!0).cloneNode(!0).lastChild.checked,b.appendChecked=h.checked,i.removeChild(h),i.appendChild(n);if(n.attachEvent)for(k in{submit:!0,change:!0,focusin:!0})j="on"+k,l=j in n,l||(n.setAttribute(j,"return;"),l=typeof n[j]=="function"),b[k+"Bubbles"]=l;return p(function(){var c,d,f,g,h="padding:0;margin:0;border:0;display:block;overflow:hidden;",i=e.getElementsByTagName("body")[0];if(!i)return;c=e.createElement("div"),c.style.cssText="visibility:hidden;border:0;width:0;height:0;position:static;top:0;margin-top:1px",i.insertBefore(c,i.firstChild),d=e.createElement("div"),c.appendChild(d),d.innerHTML="
        t
        ",f=d.getElementsByTagName("td"),f[0].style.cssText="padding:0;margin:0;border:0;display:none",l=f[0].offsetHeight===0,f[0].style.display="",f[1].style.display="none",b.reliableHiddenOffsets=l&&f[0].offsetHeight===0,d.innerHTML="",d.style.cssText="box-sizing:border-box;-moz-box-sizing:border-box;-webkit-box-sizing:border-box;padding:1px;border:1px;display:block;width:4px;margin-top:1%;position:absolute;top:1%;",b.boxSizing=d.offsetWidth===4,b.doesNotIncludeMarginInBodyOffset=i.offsetTop!==1,a.getComputedStyle&&(b.pixelPosition=(a.getComputedStyle(d,null)||{}).top!=="1%",b.boxSizingReliable=(a.getComputedStyle(d,null)||{width:"4px"}).width==="4px",g=e.createElement("div"),g.style.cssText=d.style.cssText=h,g.style.marginRight=g.style.width="0",d.style.width="1px",d.appendChild(g),b.reliableMarginRight=!parseFloat((a.getComputedStyle(g,null)||{}).marginRight)),typeof d.style.zoom!="undefined"&&(d.innerHTML="",d.style.cssText=h+"width:1px;padding:1px;display:inline;zoom:1",b.inlineBlockNeedsLayout=d.offsetWidth===3,d.style.display="block",d.style.overflow="visible",d.innerHTML="
        ",d.firstChild.style.width="5px",b.shrinkWrapBlocks=d.offsetWidth!==3,c.style.zoom=1),i.removeChild(c),c=d=f=g=null}),i.removeChild(n),c=d=f=g=h=i=n=null,b}();var H=/(?:\{[\s\S]*\}|\[[\s\S]*\])$/,I=/([A-Z])/g;p.extend({cache:{},deletedIds:[],uuid:0,expando:"jQuery"+(p.fn.jquery+Math.random()).replace(/\D/g,""),noData:{embed:!0,object:"clsid:D27CDB6E-AE6D-11cf-96B8-444553540000",applet:!0},hasData:function(a){return a=a.nodeType?p.cache[a[p.expando]]:a[p.expando],!!a&&!K(a)},data:function(a,c,d,e){if(!p.acceptData(a))return;var f,g,h=p.expando,i=typeof c=="string",j=a.nodeType,k=j?p.cache:a,l=j?a[h]:a[h]&&h;if((!l||!k[l]||!e&&!k[l].data)&&i&&d===b)return;l||(j?a[h]=l=p.deletedIds.pop()||p.guid++:l=h),k[l]||(k[l]={},j||(k[l].toJSON=p.noop));if(typeof c=="object"||typeof c=="function")e?k[l]=p.extend(k[l],c):k[l].data=p.extend(k[l].data,c);return f=k[l],e||(f.data||(f.data={}),f=f.data),d!==b&&(f[p.camelCase(c)]=d),i?(g=f[c],g==null&&(g=f[p.camelCase(c)])):g=f,g},removeData:function(a,b,c){if(!p.acceptData(a))return;var d,e,f,g=a.nodeType,h=g?p.cache:a,i=g?a[p.expando]:p.expando;if(!h[i])return;if(b){d=c?h[i]:h[i].data;if(d){p.isArray(b)||(b in d?b=[b]:(b=p.camelCase(b),b in d?b=[b]:b=b.split(" ")));for(e=0,f=b.length;e1,null,!1))},removeData:function(a){return this.each(function(){p.removeData(this,a)})}}),p.extend({queue:function(a,b,c){var d;if(a)return b=(b||"fx")+"queue",d=p._data(a,b),c&&(!d||p.isArray(c)?d=p._data(a,b,p.makeArray(c)):d.push(c)),d||[]},dequeue:function(a,b){b=b||"fx";var c=p.queue(a,b),d=c.length,e=c.shift(),f=p._queueHooks(a,b),g=function(){p.dequeue(a,b)};e==="inprogress"&&(e=c.shift(),d--),e&&(b==="fx"&&c.unshift("inprogress"),delete f.stop,e.call(a,g,f)),!d&&f&&f.empty.fire()},_queueHooks:function(a,b){var c=b+"queueHooks";return p._data(a,c)||p._data(a,c,{empty:p.Callbacks("once memory").add(function(){p.removeData(a,b+"queue",!0),p.removeData(a,c,!0)})})}}),p.fn.extend({queue:function(a,c){var d=2;return typeof a!="string"&&(c=a,a="fx",d--),arguments.length1)},removeAttr:function(a){return this.each(function(){p.removeAttr(this,a)})},prop:function(a,b){return p.access(this,p.prop,a,b,arguments.length>1)},removeProp:function(a){return a=p.propFix[a]||a,this.each(function(){try{this[a]=b,delete this[a]}catch(c){}})},addClass:function(a){var b,c,d,e,f,g,h;if(p.isFunction(a))return this.each(function(b){p(this).addClass(a.call(this,b,this.className))});if(a&&typeof a=="string"){b=a.split(s);for(c=0,d=this.length;c=0)d=d.replace(" "+c[f]+" "," ");e.className=a?p.trim(d):""}}}return this},toggleClass:function(a,b){var c=typeof a,d=typeof b=="boolean";return p.isFunction(a)?this.each(function(c){p(this).toggleClass(a.call(this,c,this.className,b),b)}):this.each(function(){if(c==="string"){var e,f=0,g=p(this),h=b,i=a.split(s);while(e=i[f++])h=d?h:!g.hasClass(e),g[h?"addClass":"removeClass"](e)}else if(c==="undefined"||c==="boolean")this.className&&p._data(this,"__className__",this.className),this.className=this.className||a===!1?"":p._data(this,"__className__")||""})},hasClass:function(a){var b=" "+a+" ",c=0,d=this.length;for(;c=0)return!0;return!1},val:function(a){var c,d,e,f=this[0];if(!arguments.length){if(f)return c=p.valHooks[f.type]||p.valHooks[f.nodeName.toLowerCase()],c&&"get"in c&&(d=c.get(f,"value"))!==b?d:(d=f.value,typeof d=="string"?d.replace(P,""):d==null?"":d);return}return e=p.isFunction(a),this.each(function(d){var f,g=p(this);if(this.nodeType!==1)return;e?f=a.call(this,d,g.val()):f=a,f==null?f="":typeof f=="number"?f+="":p.isArray(f)&&(f=p.map(f,function(a){return a==null?"":a+""})),c=p.valHooks[this.type]||p.valHooks[this.nodeName.toLowerCase()];if(!c||!("set"in c)||c.set(this,f,"value")===b)this.value=f})}}),p.extend({valHooks:{option:{get:function(a){var b=a.attributes.value;return!b||b.specified?a.value:a.text}},select:{get:function(a){var b,c,d,e,f=a.selectedIndex,g=[],h=a.options,i=a.type==="select-one";if(f<0)return null;c=i?f:0,d=i?f+1:h.length;for(;c=0}),c.length||(a.selectedIndex=-1),c}}},attrFn:{},attr:function(a,c,d,e){var f,g,h,i=a.nodeType;if(!a||i===3||i===8||i===2)return;if(e&&p.isFunction(p.fn[c]))return p(a)[c](d);if(typeof a.getAttribute=="undefined")return p.prop(a,c,d);h=i!==1||!p.isXMLDoc(a),h&&(c=c.toLowerCase(),g=p.attrHooks[c]||(T.test(c)?M:L));if(d!==b){if(d===null){p.removeAttr(a,c);return}return g&&"set"in g&&h&&(f=g.set(a,d,c))!==b?f:(a.setAttribute(c,d+""),d)}return g&&"get"in g&&h&&(f=g.get(a,c))!==null?f:(f=a.getAttribute(c),f===null?b:f)},removeAttr:function(a,b){var c,d,e,f,g=0;if(b&&a.nodeType===1){d=b.split(s);for(;g=0}})});var V=/^(?:textarea|input|select)$/i,W=/^([^\.]*|)(?:\.(.+)|)$/,X=/(?:^|\s)hover(\.\S+|)\b/,Y=/^key/,Z=/^(?:mouse|contextmenu)|click/,$=/^(?:focusinfocus|focusoutblur)$/,_=function(a){return p.event.special.hover?a:a.replace(X,"mouseenter$1 mouseleave$1")};p.event={add:function(a,c,d,e,f){var g,h,i,j,k,l,m,n,o,q,r;if(a.nodeType===3||a.nodeType===8||!c||!d||!(g=p._data(a)))return;d.handler&&(o=d,d=o.handler,f=o.selector),d.guid||(d.guid=p.guid++),i=g.events,i||(g.events=i={}),h=g.handle,h||(g.handle=h=function(a){return typeof p!="undefined"&&(!a||p.event.triggered!==a.type)?p.event.dispatch.apply(h.elem,arguments):b},h.elem=a),c=p.trim(_(c)).split(" ");for(j=0;j=0&&(s=s.slice(0,-1),i=!0),s.indexOf(".")>=0&&(t=s.split("."),s=t.shift(),t.sort());if((!f||p.event.customEvent[s])&&!p.event.global[s])return;c=typeof c=="object"?c[p.expando]?c:new p.Event(s,c):new p.Event(s),c.type=s,c.isTrigger=!0,c.exclusive=i,c.namespace=t.join("."),c.namespace_re=c.namespace?new RegExp("(^|\\.)"+t.join("\\.(?:.*\\.|)")+"(\\.|$)"):null,m=s.indexOf(":")<0?"on"+s:"";if(!f){h=p.cache;for(j in h)h[j].events&&h[j].events[s]&&p.event.trigger(c,d,h[j].handle.elem,!0);return}c.result=b,c.target||(c.target=f),d=d!=null?p.makeArray(d):[],d.unshift(c),n=p.event.special[s]||{};if(n.trigger&&n.trigger.apply(f,d)===!1)return;q=[[f,n.bindType||s]];if(!g&&!n.noBubble&&!p.isWindow(f)){r=n.delegateType||s,k=$.test(r+s)?f:f.parentNode;for(l=f;k;k=k.parentNode)q.push([k,r]),l=k;l===(f.ownerDocument||e)&&q.push([l.defaultView||l.parentWindow||a,r])}for(j=0;j=0:p.find(m,this,null,[f]).length),h[m]&&j.push(l);j.length&&u.push({elem:f,matches:j})}o.length>q&&u.push({elem:this,matches:o.slice(q)});for(d=0;d0?this.on(b,null,a,c):this.trigger(b)},Y.test(b)&&(p.event.fixHooks[b]=p.event.keyHooks),Z.test(b)&&(p.event.fixHooks[b]=p.event.mouseHooks)}),function(a,b){function bc(a,b,c,d){c=c||[],b=b||r;var e,f,i,j,k=b.nodeType;if(!a||typeof a!="string")return c;if(k!==1&&k!==9)return[];i=g(b);if(!i&&!d)if(e=P.exec(a))if(j=e[1]){if(k===9){f=b.getElementById(j);if(!f||!f.parentNode)return c;if(f.id===j)return c.push(f),c}else if(b.ownerDocument&&(f=b.ownerDocument.getElementById(j))&&h(b,f)&&f.id===j)return c.push(f),c}else{if(e[2])return w.apply(c,x.call(b.getElementsByTagName(a),0)),c;if((j=e[3])&&_&&b.getElementsByClassName)return w.apply(c,x.call(b.getElementsByClassName(j),0)),c}return bp(a.replace(L,"$1"),b,c,d,i)}function bd(a){return function(b){var c=b.nodeName.toLowerCase();return c==="input"&&b.type===a}}function be(a){return function(b){var c=b.nodeName.toLowerCase();return(c==="input"||c==="button")&&b.type===a}}function bf(a){return z(function(b){return b=+b,z(function(c,d){var e,f=a([],c.length,b),g=f.length;while(g--)c[e=f[g]]&&(c[e]=!(d[e]=c[e]))})})}function bg(a,b,c){if(a===b)return c;var d=a.nextSibling;while(d){if(d===b)return-1;d=d.nextSibling}return 1}function bh(a,b){var c,d,f,g,h,i,j,k=C[o][a];if(k)return b?0:k.slice(0);h=a,i=[],j=e.preFilter;while(h){if(!c||(d=M.exec(h)))d&&(h=h.slice(d[0].length)),i.push(f=[]);c=!1;if(d=N.exec(h))f.push(c=new q(d.shift())),h=h.slice(c.length),c.type=d[0].replace(L," ");for(g in e.filter)(d=W[g].exec(h))&&(!j[g]||(d=j[g](d,r,!0)))&&(f.push(c=new q(d.shift())),h=h.slice(c.length),c.type=g,c.matches=d);if(!c)break}return b?h.length:h?bc.error(a):C(a,i).slice(0)}function bi(a,b,d){var e=b.dir,f=d&&b.dir==="parentNode",g=u++;return b.first?function(b,c,d){while(b=b[e])if(f||b.nodeType===1)return a(b,c,d)}:function(b,d,h){if(!h){var i,j=t+" "+g+" ",k=j+c;while(b=b[e])if(f||b.nodeType===1){if((i=b[o])===k)return b.sizset;if(typeof i=="string"&&i.indexOf(j)===0){if(b.sizset)return b}else{b[o]=k;if(a(b,d,h))return b.sizset=!0,b;b.sizset=!1}}}else while(b=b[e])if(f||b.nodeType===1)if(a(b,d,h))return b}}function bj(a){return a.length>1?function(b,c,d){var e=a.length;while(e--)if(!a[e](b,c,d))return!1;return!0}:a[0]}function bk(a,b,c,d,e){var f,g=[],h=0,i=a.length,j=b!=null;for(;h-1},h,!0),m=[function(a,c,d){return!g&&(d||c!==l)||((b=c).nodeType?j(a,c,d):k(a,c,d))}];for(;i1&&bj(m),i>1&&a.slice(0,i-1).join("").replace(L,"$1"),c,i0,f=a.length>0,g=function(h,i,j,k,m){var n,o,p,q=[],s=0,u="0",x=h&&[],y=m!=null,z=l,A=h||f&&e.find.TAG("*",m&&i.parentNode||i),B=t+=z==null?1:Math.E;y&&(l=i!==r&&i,c=g.el);for(;(n=A[u])!=null;u++){if(f&&n){for(o=0;p=a[o];o++)if(p(n,i,j)){k.push(n);break}y&&(t=B,c=++g.el)}d&&((n=!p&&n)&&s--,h&&x.push(n))}s+=u;if(d&&u!==s){for(o=0;p=b[o];o++)p(x,q,i,j);if(h){if(s>0)while(u--)!x[u]&&!q[u]&&(q[u]=v.call(k));q=bk(q)}w.apply(k,q),y&&!h&&q.length>0&&s+b.length>1&&bc.uniqueSort(k)}return y&&(t=B,l=z),x};return g.el=0,d?z(g):g}function bo(a,b,c,d){var e=0,f=b.length;for(;e2&&(j=h[0]).type==="ID"&&b.nodeType===9&&!f&&e.relative[h[1].type]){b=e.find.ID(j.matches[0].replace(V,""),b,f)[0];if(!b)return c;a=a.slice(h.shift().length)}for(g=W.POS.test(a)?-1:h.length-1;g>=0;g--){j=h[g];if(e.relative[k=j.type])break;if(l=e.find[k])if(d=l(j.matches[0].replace(V,""),R.test(h[0].type)&&b.parentNode||b,f)){h.splice(g,1),a=d.length&&h.join("");if(!a)return w.apply(c,x.call(d,0)),c;break}}}return i(a,m)(d,b,f,c,R.test(a)),c}function bq(){}var c,d,e,f,g,h,i,j,k,l,m=!0,n="undefined",o=("sizcache"+Math.random()).replace(".",""),q=String,r=a.document,s=r.documentElement,t=0,u=0,v=[].pop,w=[].push,x=[].slice,y=[].indexOf||function(a){var b=0,c=this.length;for(;be.cacheLength&&delete a[b.shift()],a[c]=d},a)},B=A(),C=A(),D=A(),E="[\\x20\\t\\r\\n\\f]",F="(?:\\\\.|[-\\w]|[^\\x00-\\xa0])+",G=F.replace("w","w#"),H="([*^$|!~]?=)",I="\\["+E+"*("+F+")"+E+"*(?:"+H+E+"*(?:(['\"])((?:\\\\.|[^\\\\])*?)\\3|("+G+")|)|)"+E+"*\\]",J=":("+F+")(?:\\((?:(['\"])((?:\\\\.|[^\\\\])*?)\\2|([^()[\\]]*|(?:(?:"+I+")|[^:]|\\\\.)*|.*))\\)|)",K=":(even|odd|eq|gt|lt|nth|first|last)(?:\\("+E+"*((?:-\\d)?\\d*)"+E+"*\\)|)(?=[^-]|$)",L=new RegExp("^"+E+"+|((?:^|[^\\\\])(?:\\\\.)*)"+E+"+$","g"),M=new RegExp("^"+E+"*,"+E+"*"),N=new RegExp("^"+E+"*([\\x20\\t\\r\\n\\f>+~])"+E+"*"),O=new RegExp(J),P=/^(?:#([\w\-]+)|(\w+)|\.([\w\-]+))$/,Q=/^:not/,R=/[\x20\t\r\n\f]*[+~]/,S=/:not\($/,T=/h\d/i,U=/input|select|textarea|button/i,V=/\\(?!\\)/g,W={ID:new RegExp("^#("+F+")"),CLASS:new RegExp("^\\.("+F+")"),NAME:new RegExp("^\\[name=['\"]?("+F+")['\"]?\\]"),TAG:new RegExp("^("+F.replace("w","w*")+")"),ATTR:new RegExp("^"+I),PSEUDO:new RegExp("^"+J),POS:new RegExp(K,"i"),CHILD:new RegExp("^:(only|nth|first|last)-child(?:\\("+E+"*(even|odd|(([+-]|)(\\d*)n|)"+E+"*(?:([+-]|)"+E+"*(\\d+)|))"+E+"*\\)|)","i"),needsContext:new RegExp("^"+E+"*[>+~]|"+K,"i")},X=function(a){var b=r.createElement("div");try{return a(b)}catch(c){return!1}finally{b=null}},Y=X(function(a){return a.appendChild(r.createComment("")),!a.getElementsByTagName("*").length}),Z=X(function(a){return a.innerHTML="",a.firstChild&&typeof a.firstChild.getAttribute!==n&&a.firstChild.getAttribute("href")==="#"}),$=X(function(a){a.innerHTML="";var b=typeof a.lastChild.getAttribute("multiple");return b!=="boolean"&&b!=="string"}),_=X(function(a){return a.innerHTML="",!a.getElementsByClassName||!a.getElementsByClassName("e").length?!1:(a.lastChild.className="e",a.getElementsByClassName("e").length===2)}),ba=X(function(a){a.id=o+0,a.innerHTML="
        ",s.insertBefore(a,s.firstChild);var b=r.getElementsByName&&r.getElementsByName(o).length===2+r.getElementsByName(o+0).length;return d=!r.getElementById(o),s.removeChild(a),b});try{x.call(s.childNodes,0)[0].nodeType}catch(bb){x=function(a){var b,c=[];for(;b=this[a];a++)c.push(b);return c}}bc.matches=function(a,b){return bc(a,null,null,b)},bc.matchesSelector=function(a,b){return bc(b,null,null,[a]).length>0},f=bc.getText=function(a){var b,c="",d=0,e=a.nodeType;if(e){if(e===1||e===9||e===11){if(typeof a.textContent=="string")return a.textContent;for(a=a.firstChild;a;a=a.nextSibling)c+=f(a)}else if(e===3||e===4)return a.nodeValue}else for(;b=a[d];d++)c+=f(b);return c},g=bc.isXML=function(a){var b=a&&(a.ownerDocument||a).documentElement;return b?b.nodeName!=="HTML":!1},h=bc.contains=s.contains?function(a,b){var c=a.nodeType===9?a.documentElement:a,d=b&&b.parentNode;return a===d||!!(d&&d.nodeType===1&&c.contains&&c.contains(d))}:s.compareDocumentPosition?function(a,b){return b&&!!(a.compareDocumentPosition(b)&16)}:function(a,b){while(b=b.parentNode)if(b===a)return!0;return!1},bc.attr=function(a,b){var c,d=g(a);return d||(b=b.toLowerCase()),(c=e.attrHandle[b])?c(a):d||$?a.getAttribute(b):(c=a.getAttributeNode(b),c?typeof a[b]=="boolean"?a[b]?b:null:c.specified?c.value:null:null)},e=bc.selectors={cacheLength:50,createPseudo:z,match:W,attrHandle:Z?{}:{href:function(a){return a.getAttribute("href",2)},type:function(a){return a.getAttribute("type")}},find:{ID:d?function(a,b,c){if(typeof b.getElementById!==n&&!c){var d=b.getElementById(a);return d&&d.parentNode?[d]:[]}}:function(a,c,d){if(typeof c.getElementById!==n&&!d){var e=c.getElementById(a);return e?e.id===a||typeof e.getAttributeNode!==n&&e.getAttributeNode("id").value===a?[e]:b:[]}},TAG:Y?function(a,b){if(typeof b.getElementsByTagName!==n)return b.getElementsByTagName(a)}:function(a,b){var c=b.getElementsByTagName(a);if(a==="*"){var d,e=[],f=0;for(;d=c[f];f++)d.nodeType===1&&e.push(d);return e}return c},NAME:ba&&function(a,b){if(typeof b.getElementsByName!==n)return b.getElementsByName(name)},CLASS:_&&function(a,b,c){if(typeof b.getElementsByClassName!==n&&!c)return b.getElementsByClassName(a)}},relative:{">":{dir:"parentNode",first:!0}," ":{dir:"parentNode"},"+":{dir:"previousSibling",first:!0},"~":{dir:"previousSibling"}},preFilter:{ATTR:function(a){return a[1]=a[1].replace(V,""),a[3]=(a[4]||a[5]||"").replace(V,""),a[2]==="~="&&(a[3]=" "+a[3]+" "),a.slice(0,4)},CHILD:function(a){return a[1]=a[1].toLowerCase(),a[1]==="nth"?(a[2]||bc.error(a[0]),a[3]=+(a[3]?a[4]+(a[5]||1):2*(a[2]==="even"||a[2]==="odd")),a[4]=+(a[6]+a[7]||a[2]==="odd")):a[2]&&bc.error(a[0]),a},PSEUDO:function(a){var b,c;if(W.CHILD.test(a[0]))return null;if(a[3])a[2]=a[3];else if(b=a[4])O.test(b)&&(c=bh(b,!0))&&(c=b.indexOf(")",b.length-c)-b.length)&&(b=b.slice(0,c),a[0]=a[0].slice(0,c)),a[2]=b;return a.slice(0,3)}},filter:{ID:d?function(a){return a=a.replace(V,""),function(b){return b.getAttribute("id")===a}}:function(a){return a=a.replace(V,""),function(b){var c=typeof b.getAttributeNode!==n&&b.getAttributeNode("id");return c&&c.value===a}},TAG:function(a){return a==="*"?function(){return!0}:(a=a.replace(V,"").toLowerCase(),function(b){return b.nodeName&&b.nodeName.toLowerCase()===a})},CLASS:function(a){var b=B[o][a];return b||(b=B(a,new RegExp("(^|"+E+")"+a+"("+E+"|$)"))),function(a){return b.test(a.className||typeof a.getAttribute!==n&&a.getAttribute("class")||"")}},ATTR:function(a,b,c){return function(d,e){var f=bc.attr(d,a);return f==null?b==="!=":b?(f+="",b==="="?f===c:b==="!="?f!==c:b==="^="?c&&f.indexOf(c)===0:b==="*="?c&&f.indexOf(c)>-1:b==="$="?c&&f.substr(f.length-c.length)===c:b==="~="?(" "+f+" ").indexOf(c)>-1:b==="|="?f===c||f.substr(0,c.length+1)===c+"-":!1):!0}},CHILD:function(a,b,c,d){return a==="nth"?function(a){var b,e,f=a.parentNode;if(c===1&&d===0)return!0;if(f){e=0;for(b=f.firstChild;b;b=b.nextSibling)if(b.nodeType===1){e++;if(a===b)break}}return e-=d,e===c||e%c===0&&e/c>=0}:function(b){var c=b;switch(a){case"only":case"first":while(c=c.previousSibling)if(c.nodeType===1)return!1;if(a==="first")return!0;c=b;case"last":while(c=c.nextSibling)if(c.nodeType===1)return!1;return!0}}},PSEUDO:function(a,b){var c,d=e.pseudos[a]||e.setFilters[a.toLowerCase()]||bc.error("unsupported pseudo: "+a);return d[o]?d(b):d.length>1?(c=[a,a,"",b],e.setFilters.hasOwnProperty(a.toLowerCase())?z(function(a,c){var e,f=d(a,b),g=f.length;while(g--)e=y.call(a,f[g]),a[e]=!(c[e]=f[g])}):function(a){return d(a,0,c)}):d}},pseudos:{not:z(function(a){var b=[],c=[],d=i(a.replace(L,"$1"));return d[o]?z(function(a,b,c,e){var f,g=d(a,null,e,[]),h=a.length;while(h--)if(f=g[h])a[h]=!(b[h]=f)}):function(a,e,f){return b[0]=a,d(b,null,f,c),!c.pop()}}),has:z(function(a){return function(b){return bc(a,b).length>0}}),contains:z(function(a){return function(b){return(b.textContent||b.innerText||f(b)).indexOf(a)>-1}}),enabled:function(a){return a.disabled===!1},disabled:function(a){return a.disabled===!0},checked:function(a){var b=a.nodeName.toLowerCase();return b==="input"&&!!a.checked||b==="option"&&!!a.selected},selected:function(a){return a.parentNode&&a.parentNode.selectedIndex,a.selected===!0},parent:function(a){return!e.pseudos.empty(a)},empty:function(a){var b;a=a.firstChild;while(a){if(a.nodeName>"@"||(b=a.nodeType)===3||b===4)return!1;a=a.nextSibling}return!0},header:function(a){return T.test(a.nodeName)},text:function(a){var b,c;return a.nodeName.toLowerCase()==="input"&&(b=a.type)==="text"&&((c=a.getAttribute("type"))==null||c.toLowerCase()===b)},radio:bd("radio"),checkbox:bd("checkbox"),file:bd("file"),password:bd("password"),image:bd("image"),submit:be("submit"),reset:be("reset"),button:function(a){var b=a.nodeName.toLowerCase();return b==="input"&&a.type==="button"||b==="button"},input:function(a){return U.test(a.nodeName)},focus:function(a){var b=a.ownerDocument;return a===b.activeElement&&(!b.hasFocus||b.hasFocus())&&(!!a.type||!!a.href)},active:function(a){return a===a.ownerDocument.activeElement},first:bf(function(a,b,c){return[0]}),last:bf(function(a,b,c){return[b-1]}),eq:bf(function(a,b,c){return[c<0?c+b:c]}),even:bf(function(a,b,c){for(var d=0;d=0;)a.push(d);return a}),gt:bf(function(a,b,c){for(var d=c<0?c+b:c;++d",a.querySelectorAll("[selected]").length||e.push("\\["+E+"*(?:checked|disabled|ismap|multiple|readonly|selected|value)"),a.querySelectorAll(":checked").length||e.push(":checked")}),X(function(a){a.innerHTML="

        ",a.querySelectorAll("[test^='']").length&&e.push("[*^$]="+E+"*(?:\"\"|'')"),a.innerHTML="",a.querySelectorAll(":enabled").length||e.push(":enabled",":disabled")}),e=new RegExp(e.join("|")),bp=function(a,d,f,g,h){if(!g&&!h&&(!e||!e.test(a))){var i,j,k=!0,l=o,m=d,n=d.nodeType===9&&a;if(d.nodeType===1&&d.nodeName.toLowerCase()!=="object"){i=bh(a),(k=d.getAttribute("id"))?l=k.replace(c,"\\$&"):d.setAttribute("id",l),l="[id='"+l+"'] ",j=i.length;while(j--)i[j]=l+i[j].join("");m=R.test(a)&&d.parentNode||d,n=i.join(",")}if(n)try{return w.apply(f,x.call(m.querySelectorAll(n),0)),f}catch(p){}finally{k||d.removeAttribute("id")}}return b(a,d,f,g,h)},h&&(X(function(b){a=h.call(b,"div");try{h.call(b,"[test!='']:sizzle"),f.push("!=",J)}catch(c){}}),f=new RegExp(f.join("|")),bc.matchesSelector=function(b,c){c=c.replace(d,"='$1']");if(!g(b)&&!f.test(c)&&(!e||!e.test(c)))try{var i=h.call(b,c);if(i||a||b.document&&b.document.nodeType!==11)return i}catch(j){}return bc(c,null,null,[b]).length>0})}(),e.pseudos.nth=e.pseudos.eq,e.filters=bq.prototype=e.pseudos,e.setFilters=new bq,bc.attr=p.attr,p.find=bc,p.expr=bc.selectors,p.expr[":"]=p.expr.pseudos,p.unique=bc.uniqueSort,p.text=bc.getText,p.isXMLDoc=bc.isXML,p.contains=bc.contains}(a);var bc=/Until$/,bd=/^(?:parents|prev(?:Until|All))/,be=/^.[^:#\[\.,]*$/,bf=p.expr.match.needsContext,bg={children:!0,contents:!0,next:!0,prev:!0};p.fn.extend({find:function(a){var b,c,d,e,f,g,h=this;if(typeof a!="string")return p(a).filter(function(){for(b=0,c=h.length;b0)for(e=d;e=0:p.filter(a,this).length>0:this.filter(a).length>0)},closest:function(a,b){var c,d=0,e=this.length,f=[],g=bf.test(a)||typeof a!="string"?p(a,b||this.context):0;for(;d-1:p.find.matchesSelector(c,a)){f.push(c);break}c=c.parentNode}}return f=f.length>1?p.unique(f):f,this.pushStack(f,"closest",a)},index:function(a){return a?typeof a=="string"?p.inArray(this[0],p(a)):p.inArray(a.jquery?a[0]:a,this):this[0]&&this[0].parentNode?this.prevAll().length:-1},add:function(a,b){var c=typeof a=="string"?p(a,b):p.makeArray(a&&a.nodeType?[a]:a),d=p.merge(this.get(),c);return this.pushStack(bh(c[0])||bh(d[0])?d:p.unique(d))},addBack:function(a){return this.add(a==null?this.prevObject:this.prevObject.filter(a))}}),p.fn.andSelf=p.fn.addBack,p.each({parent:function(a){var b=a.parentNode;return b&&b.nodeType!==11?b:null},parents:function(a){return p.dir(a,"parentNode")},parentsUntil:function(a,b,c){return p.dir(a,"parentNode",c)},next:function(a){return bi(a,"nextSibling")},prev:function(a){return bi(a,"previousSibling")},nextAll:function(a){return p.dir(a,"nextSibling")},prevAll:function(a){return p.dir(a,"previousSibling")},nextUntil:function(a,b,c){return p.dir(a,"nextSibling",c)},prevUntil:function(a,b,c){return p.dir(a,"previousSibling",c)},siblings:function(a){return p.sibling((a.parentNode||{}).firstChild,a)},children:function(a){return p.sibling(a.firstChild)},contents:function(a){return p.nodeName(a,"iframe")?a.contentDocument||a.contentWindow.document:p.merge([],a.childNodes)}},function(a,b){p.fn[a]=function(c,d){var e=p.map(this,b,c);return bc.test(a)||(d=c),d&&typeof d=="string"&&(e=p.filter(d,e)),e=this.length>1&&!bg[a]?p.unique(e):e,this.length>1&&bd.test(a)&&(e=e.reverse()),this.pushStack(e,a,k.call(arguments).join(","))}}),p.extend({filter:function(a,b,c){return c&&(a=":not("+a+")"),b.length===1?p.find.matchesSelector(b[0],a)?[b[0]]:[]:p.find.matches(a,b)},dir:function(a,c,d){var e=[],f=a[c];while(f&&f.nodeType!==9&&(d===b||f.nodeType!==1||!p(f).is(d)))f.nodeType===1&&e.push(f),f=f[c];return e},sibling:function(a,b){var c=[];for(;a;a=a.nextSibling)a.nodeType===1&&a!==b&&c.push(a);return c}});var bl="abbr|article|aside|audio|bdi|canvas|data|datalist|details|figcaption|figure|footer|header|hgroup|mark|meter|nav|output|progress|section|summary|time|video",bm=/ jQuery\d+="(?:null|\d+)"/g,bn=/^\s+/,bo=/<(?!area|br|col|embed|hr|img|input|link|meta|param)(([\w:]+)[^>]*)\/>/gi,bp=/<([\w:]+)/,bq=/
  • ","
    "],tr:[2,"","
    "],td:[3,"","
    "],col:[2,"","
    "],area:[1,"",""],_default:[0,"",""]},bA=bk(e),bB=bA.appendChild(e.createElement("div"));bz.optgroup=bz.option,bz.tbody=bz.tfoot=bz.colgroup=bz.caption=bz.thead,bz.th=bz.td,p.support.htmlSerialize||(bz._default=[1,"X
    ","
    "]),p.fn.extend({text:function(a){return p.access(this,function(a){return a===b?p.text(this):this.empty().append((this[0]&&this[0].ownerDocument||e).createTextNode(a))},null,a,arguments.length)},wrapAll:function(a){if(p.isFunction(a))return this.each(function(b){p(this).wrapAll(a.call(this,b))});if(this[0]){var b=p(a,this[0].ownerDocument).eq(0).clone(!0);this[0].parentNode&&b.insertBefore(this[0]),b.map(function(){var a=this;while(a.firstChild&&a.firstChild.nodeType===1)a=a.firstChild;return a}).append(this)}return this},wrapInner:function(a){return p.isFunction(a)?this.each(function(b){p(this).wrapInner(a.call(this,b))}):this.each(function(){var b=p(this),c=b.contents();c.length?c.wrapAll(a):b.append(a)})},wrap:function(a){var b=p.isFunction(a);return this.each(function(c){p(this).wrapAll(b?a.call(this,c):a)})},unwrap:function(){return this.parent().each(function(){p.nodeName(this,"body")||p(this).replaceWith(this.childNodes)}).end()},append:function(){return this.domManip(arguments,!0,function(a){(this.nodeType===1||this.nodeType===11)&&this.appendChild(a)})},prepend:function(){return this.domManip(arguments,!0,function(a){(this.nodeType===1||this.nodeType===11)&&this.insertBefore(a,this.firstChild)})},before:function(){if(!bh(this[0]))return this.domManip(arguments,!1,function(a){this.parentNode.insertBefore(a,this)});if(arguments.length){var a=p.clean(arguments);return this.pushStack(p.merge(a,this),"before",this.selector)}},after:function(){if(!bh(this[0]))return this.domManip(arguments,!1,function(a){this.parentNode.insertBefore(a,this.nextSibling)});if(arguments.length){var a=p.clean(arguments);return this.pushStack(p.merge(this,a),"after",this.selector)}},remove:function(a,b){var c,d=0;for(;(c=this[d])!=null;d++)if(!a||p.filter(a,[c]).length)!b&&c.nodeType===1&&(p.cleanData(c.getElementsByTagName("*")),p.cleanData([c])),c.parentNode&&c.parentNode.removeChild(c);return this},empty:function(){var a,b=0;for(;(a=this[b])!=null;b++){a.nodeType===1&&p.cleanData(a.getElementsByTagName("*"));while(a.firstChild)a.removeChild(a.firstChild)}return this},clone:function(a,b){return a=a==null?!1:a,b=b==null?a:b,this.map(function(){return p.clone(this,a,b)})},html:function(a){return p.access(this,function(a){var c=this[0]||{},d=0,e=this.length;if(a===b)return c.nodeType===1?c.innerHTML.replace(bm,""):b;if(typeof a=="string"&&!bs.test(a)&&(p.support.htmlSerialize||!bu.test(a))&&(p.support.leadingWhitespace||!bn.test(a))&&!bz[(bp.exec(a)||["",""])[1].toLowerCase()]){a=a.replace(bo,"<$1>");try{for(;d1&&typeof j=="string"&&bw.test(j))return this.each(function(){p(this).domManip(a,c,d)});if(p.isFunction(j))return this.each(function(e){var f=p(this);a[0]=j.call(this,e,c?f.html():b),f.domManip(a,c,d)});if(this[0]){e=p.buildFragment(a,this,k),g=e.fragment,f=g.firstChild,g.childNodes.length===1&&(g=f);if(f){c=c&&p.nodeName(f,"tr");for(h=e.cacheable||l-1;i0?this.clone(!0):this).get(),p(g[e])[b](d),f=f.concat(d);return this.pushStack(f,a,g.selector)}}),p.extend({clone:function(a,b,c){var d,e,f,g;p.support.html5Clone||p.isXMLDoc(a)||!bu.test("<"+a.nodeName+">")?g=a.cloneNode(!0):(bB.innerHTML=a.outerHTML,bB.removeChild(g=bB.firstChild));if((!p.support.noCloneEvent||!p.support.noCloneChecked)&&(a.nodeType===1||a.nodeType===11)&&!p.isXMLDoc(a)){bE(a,g),d=bF(a),e=bF(g);for(f=0;d[f];++f)e[f]&&bE(d[f],e[f])}if(b){bD(a,g);if(c){d=bF(a),e=bF(g);for(f=0;d[f];++f)bD(d[f],e[f])}}return d=e=null,g},clean:function(a,b,c,d){var f,g,h,i,j,k,l,m,n,o,q,r,s=b===e&&bA,t=[];if(!b||typeof b.createDocumentFragment=="undefined")b=e;for(f=0;(h=a[f])!=null;f++){typeof h=="number"&&(h+="");if(!h)continue;if(typeof h=="string")if(!br.test(h))h=b.createTextNode(h);else{s=s||bk(b),l=b.createElement("div"),s.appendChild(l),h=h.replace(bo,"<$1>"),i=(bp.exec(h)||["",""])[1].toLowerCase(),j=bz[i]||bz._default,k=j[0],l.innerHTML=j[1]+h+j[2];while(k--)l=l.lastChild;if(!p.support.tbody){m=bq.test(h),n=i==="table"&&!m?l.firstChild&&l.firstChild.childNodes:j[1]===""&&!m?l.childNodes:[];for(g=n.length-1;g>=0;--g)p.nodeName(n[g],"tbody")&&!n[g].childNodes.length&&n[g].parentNode.removeChild(n[g])}!p.support.leadingWhitespace&&bn.test(h)&&l.insertBefore(b.createTextNode(bn.exec(h)[0]),l.firstChild),h=l.childNodes,l.parentNode.removeChild(l)}h.nodeType?t.push(h):p.merge(t,h)}l&&(h=l=s=null);if(!p.support.appendChecked)for(f=0;(h=t[f])!=null;f++)p.nodeName(h,"input")?bG(h):typeof h.getElementsByTagName!="undefined"&&p.grep(h.getElementsByTagName("input"),bG);if(c){q=function(a){if(!a.type||bx.test(a.type))return d?d.push(a.parentNode?a.parentNode.removeChild(a):a):c.appendChild(a)};for(f=0;(h=t[f])!=null;f++)if(!p.nodeName(h,"script")||!q(h))c.appendChild(h),typeof h.getElementsByTagName!="undefined"&&(r=p.grep(p.merge([],h.getElementsByTagName("script")),q),t.splice.apply(t,[f+1,0].concat(r)),f+=r.length)}return t},cleanData:function(a,b){var c,d,e,f,g=0,h=p.expando,i=p.cache,j=p.support.deleteExpando,k=p.event.special;for(;(e=a[g])!=null;g++)if(b||p.acceptData(e)){d=e[h],c=d&&i[d];if(c){if(c.events)for(f in c.events)k[f]?p.event.remove(e,f):p.removeEvent(e,f,c.handle);i[d]&&(delete i[d],j?delete e[h]:e.removeAttribute?e.removeAttribute(h):e[h]=null,p.deletedIds.push(d))}}}}),function(){var a,b;p.uaMatch=function(a){a=a.toLowerCase();var b=/(chrome)[ \/]([\w.]+)/.exec(a)||/(webkit)[ \/]([\w.]+)/.exec(a)||/(opera)(?:.*version|)[ \/]([\w.]+)/.exec(a)||/(msie) ([\w.]+)/.exec(a)||a.indexOf("compatible")<0&&/(mozilla)(?:.*? rv:([\w.]+)|)/.exec(a)||[];return{browser:b[1]||"",version:b[2]||"0"}},a=p.uaMatch(g.userAgent),b={},a.browser&&(b[a.browser]=!0,b.version=a.version),b.chrome?b.webkit=!0:b.webkit&&(b.safari=!0),p.browser=b,p.sub=function(){function a(b,c){return new a.fn.init(b,c)}p.extend(!0,a,this),a.superclass=this,a.fn=a.prototype=this(),a.fn.constructor=a,a.sub=this.sub,a.fn.init=function c(c,d){return d&&d instanceof p&&!(d instanceof a)&&(d=a(d)),p.fn.init.call(this,c,d,b)},a.fn.init.prototype=a.fn;var b=a(e);return a}}();var bH,bI,bJ,bK=/alpha\([^)]*\)/i,bL=/opacity=([^)]*)/,bM=/^(top|right|bottom|left)$/,bN=/^(none|table(?!-c[ea]).+)/,bO=/^margin/,bP=new RegExp("^("+q+")(.*)$","i"),bQ=new RegExp("^("+q+")(?!px)[a-z%]+$","i"),bR=new RegExp("^([-+])=("+q+")","i"),bS={},bT={position:"absolute",visibility:"hidden",display:"block"},bU={letterSpacing:0,fontWeight:400},bV=["Top","Right","Bottom","Left"],bW=["Webkit","O","Moz","ms"],bX=p.fn.toggle;p.fn.extend({css:function(a,c){return p.access(this,function(a,c,d){return d!==b?p.style(a,c,d):p.css(a,c)},a,c,arguments.length>1)},show:function(){return b$(this,!0)},hide:function(){return b$(this)},toggle:function(a,b){var c=typeof a=="boolean";return p.isFunction(a)&&p.isFunction(b)?bX.apply(this,arguments):this.each(function(){(c?a:bZ(this))?p(this).show():p(this).hide()})}}),p.extend({cssHooks:{opacity:{get:function(a,b){if(b){var c=bH(a,"opacity");return c===""?"1":c}}}},cssNumber:{fillOpacity:!0,fontWeight:!0,lineHeight:!0,opacity:!0,orphans:!0,widows:!0,zIndex:!0,zoom:!0},cssProps:{"float":p.support.cssFloat?"cssFloat":"styleFloat"},style:function(a,c,d,e){if(!a||a.nodeType===3||a.nodeType===8||!a.style)return;var f,g,h,i=p.camelCase(c),j=a.style;c=p.cssProps[i]||(p.cssProps[i]=bY(j,i)),h=p.cssHooks[c]||p.cssHooks[i];if(d===b)return h&&"get"in h&&(f=h.get(a,!1,e))!==b?f:j[c];g=typeof d,g==="string"&&(f=bR.exec(d))&&(d=(f[1]+1)*f[2]+parseFloat(p.css(a,c)),g="number");if(d==null||g==="number"&&isNaN(d))return;g==="number"&&!p.cssNumber[i]&&(d+="px");if(!h||!("set"in h)||(d=h.set(a,d,e))!==b)try{j[c]=d}catch(k){}},css:function(a,c,d,e){var f,g,h,i=p.camelCase(c);return c=p.cssProps[i]||(p.cssProps[i]=bY(a.style,i)),h=p.cssHooks[c]||p.cssHooks[i],h&&"get"in h&&(f=h.get(a,!0,e)),f===b&&(f=bH(a,c)),f==="normal"&&c in bU&&(f=bU[c]),d||e!==b?(g=parseFloat(f),d||p.isNumeric(g)?g||0:f):f},swap:function(a,b,c){var d,e,f={};for(e in b)f[e]=a.style[e],a.style[e]=b[e];d=c.call(a);for(e in b)a.style[e]=f[e];return d}}),a.getComputedStyle?bH=function(b,c){var d,e,f,g,h=a.getComputedStyle(b,null),i=b.style;return h&&(d=h[c],d===""&&!p.contains(b.ownerDocument,b)&&(d=p.style(b,c)),bQ.test(d)&&bO.test(c)&&(e=i.width,f=i.minWidth,g=i.maxWidth,i.minWidth=i.maxWidth=i.width=d,d=h.width,i.width=e,i.minWidth=f,i.maxWidth=g)),d}:e.documentElement.currentStyle&&(bH=function(a,b){var c,d,e=a.currentStyle&&a.currentStyle[b],f=a.style;return e==null&&f&&f[b]&&(e=f[b]),bQ.test(e)&&!bM.test(b)&&(c=f.left,d=a.runtimeStyle&&a.runtimeStyle.left,d&&(a.runtimeStyle.left=a.currentStyle.left),f.left=b==="fontSize"?"1em":e,e=f.pixelLeft+"px",f.left=c,d&&(a.runtimeStyle.left=d)),e===""?"auto":e}),p.each(["height","width"],function(a,b){p.cssHooks[b]={get:function(a,c,d){if(c)return a.offsetWidth===0&&bN.test(bH(a,"display"))?p.swap(a,bT,function(){return cb(a,b,d)}):cb(a,b,d)},set:function(a,c,d){return b_(a,c,d?ca(a,b,d,p.support.boxSizing&&p.css(a,"boxSizing")==="border-box"):0)}}}),p.support.opacity||(p.cssHooks.opacity={get:function(a,b){return bL.test((b&&a.currentStyle?a.currentStyle.filter:a.style.filter)||"")?.01*parseFloat(RegExp.$1)+"":b?"1":""},set:function(a,b){var c=a.style,d=a.currentStyle,e=p.isNumeric(b)?"alpha(opacity="+b*100+")":"",f=d&&d.filter||c.filter||"";c.zoom=1;if(b>=1&&p.trim(f.replace(bK,""))===""&&c.removeAttribute){c.removeAttribute("filter");if(d&&!d.filter)return}c.filter=bK.test(f)?f.replace(bK,e):f+" "+e}}),p(function(){p.support.reliableMarginRight||(p.cssHooks.marginRight={get:function(a,b){return p.swap(a,{display:"inline-block"},function(){if(b)return bH(a,"marginRight")})}}),!p.support.pixelPosition&&p.fn.position&&p.each(["top","left"],function(a,b){p.cssHooks[b]={get:function(a,c){if(c){var d=bH(a,b);return bQ.test(d)?p(a).position()[b]+"px":d}}}})}),p.expr&&p.expr.filters&&(p.expr.filters.hidden=function(a){return a.offsetWidth===0&&a.offsetHeight===0||!p.support.reliableHiddenOffsets&&(a.style&&a.style.display||bH(a,"display"))==="none"},p.expr.filters.visible=function(a){return!p.expr.filters.hidden(a)}),p.each({margin:"",padding:"",border:"Width"},function(a,b){p.cssHooks[a+b]={expand:function(c){var d,e=typeof c=="string"?c.split(" "):[c],f={};for(d=0;d<4;d++)f[a+bV[d]+b]=e[d]||e[d-2]||e[0];return f}},bO.test(a)||(p.cssHooks[a+b].set=b_)});var cd=/%20/g,ce=/\[\]$/,cf=/\r?\n/g,cg=/^(?:color|date|datetime|datetime-local|email|hidden|month|number|password|range|search|tel|text|time|url|week)$/i,ch=/^(?:select|textarea)/i;p.fn.extend({serialize:function(){return p.param(this.serializeArray())},serializeArray:function(){return this.map(function(){return this.elements?p.makeArray(this.elements):this}).filter(function(){return this.name&&!this.disabled&&(this.checked||ch.test(this.nodeName)||cg.test(this.type))}).map(function(a,b){var c=p(this).val();return c==null?null:p.isArray(c)?p.map(c,function(a,c){return{name:b.name,value:a.replace(cf,"\r\n")}}):{name:b.name,value:c.replace(cf,"\r\n")}}).get()}}),p.param=function(a,c){var d,e=[],f=function(a,b){b=p.isFunction(b)?b():b==null?"":b,e[e.length]=encodeURIComponent(a)+"="+encodeURIComponent(b)};c===b&&(c=p.ajaxSettings&&p.ajaxSettings.traditional);if(p.isArray(a)||a.jquery&&!p.isPlainObject(a))p.each(a,function(){f(this.name,this.value)});else for(d in a)ci(d,a[d],c,f);return e.join("&").replace(cd,"+")};var cj,ck,cl=/#.*$/,cm=/^(.*?):[ \t]*([^\r\n]*)\r?$/mg,cn=/^(?:about|app|app\-storage|.+\-extension|file|res|widget):$/,co=/^(?:GET|HEAD)$/,cp=/^\/\//,cq=/\?/,cr=/)<[^<]*)*<\/script>/gi,cs=/([?&])_=[^&]*/,ct=/^([\w\+\.\-]+:)(?:\/\/([^\/?#:]*)(?::(\d+)|)|)/,cu=p.fn.load,cv={},cw={},cx=["*/"]+["*"];try{ck=f.href}catch(cy){ck=e.createElement("a"),ck.href="",ck=ck.href}cj=ct.exec(ck.toLowerCase())||[],p.fn.load=function(a,c,d){if(typeof a!="string"&&cu)return cu.apply(this,arguments);if(!this.length)return this;var e,f,g,h=this,i=a.indexOf(" ");return i>=0&&(e=a.slice(i,a.length),a=a.slice(0,i)),p.isFunction(c)?(d=c,c=b):c&&typeof c=="object"&&(f="POST"),p.ajax({url:a,type:f,dataType:"html",data:c,complete:function(a,b){d&&h.each(d,g||[a.responseText,b,a])}}).done(function(a){g=arguments,h.html(e?p("
    ").append(a.replace(cr,"")).find(e):a)}),this},p.each("ajaxStart ajaxStop ajaxComplete ajaxError ajaxSuccess ajaxSend".split(" "),function(a,b){p.fn[b]=function(a){return this.on(b,a)}}),p.each(["get","post"],function(a,c){p[c]=function(a,d,e,f){return p.isFunction(d)&&(f=f||e,e=d,d=b),p.ajax({type:c,url:a,data:d,success:e,dataType:f})}}),p.extend({getScript:function(a,c){return p.get(a,b,c,"script")},getJSON:function(a,b,c){return p.get(a,b,c,"json")},ajaxSetup:function(a,b){return b?cB(a,p.ajaxSettings):(b=a,a=p.ajaxSettings),cB(a,b),a},ajaxSettings:{url:ck,isLocal:cn.test(cj[1]),global:!0,type:"GET",contentType:"application/x-www-form-urlencoded; charset=UTF-8",processData:!0,async:!0,accepts:{xml:"application/xml, text/xml",html:"text/html",text:"text/plain",json:"application/json, text/javascript","*":cx},contents:{xml:/xml/,html:/html/,json:/json/},responseFields:{xml:"responseXML",text:"responseText"},converters:{"* text":a.String,"text html":!0,"text json":p.parseJSON,"text xml":p.parseXML},flatOptions:{context:!0,url:!0}},ajaxPrefilter:cz(cv),ajaxTransport:cz(cw),ajax:function(a,c){function y(a,c,f,i){var k,s,t,u,w,y=c;if(v===2)return;v=2,h&&clearTimeout(h),g=b,e=i||"",x.readyState=a>0?4:0,f&&(u=cC(l,x,f));if(a>=200&&a<300||a===304)l.ifModified&&(w=x.getResponseHeader("Last-Modified"),w&&(p.lastModified[d]=w),w=x.getResponseHeader("Etag"),w&&(p.etag[d]=w)),a===304?(y="notmodified",k=!0):(k=cD(l,u),y=k.state,s=k.data,t=k.error,k=!t);else{t=y;if(!y||a)y="error",a<0&&(a=0)}x.status=a,x.statusText=(c||y)+"",k?o.resolveWith(m,[s,y,x]):o.rejectWith(m,[x,y,t]),x.statusCode(r),r=b,j&&n.trigger("ajax"+(k?"Success":"Error"),[x,l,k?s:t]),q.fireWith(m,[x,y]),j&&(n.trigger("ajaxComplete",[x,l]),--p.active||p.event.trigger("ajaxStop"))}typeof a=="object"&&(c=a,a=b),c=c||{};var d,e,f,g,h,i,j,k,l=p.ajaxSetup({},c),m=l.context||l,n=m!==l&&(m.nodeType||m instanceof p)?p(m):p.event,o=p.Deferred(),q=p.Callbacks("once memory"),r=l.statusCode||{},t={},u={},v=0,w="canceled",x={readyState:0,setRequestHeader:function(a,b){if(!v){var c=a.toLowerCase();a=u[c]=u[c]||a,t[a]=b}return this},getAllResponseHeaders:function(){return v===2?e:null},getResponseHeader:function(a){var c;if(v===2){if(!f){f={};while(c=cm.exec(e))f[c[1].toLowerCase()]=c[2]}c=f[a.toLowerCase()]}return c===b?null:c},overrideMimeType:function(a){return v||(l.mimeType=a),this},abort:function(a){return a=a||w,g&&g.abort(a),y(0,a),this}};o.promise(x),x.success=x.done,x.error=x.fail,x.complete=q.add,x.statusCode=function(a){if(a){var b;if(v<2)for(b in a)r[b]=[r[b],a[b]];else b=a[x.status],x.always(b)}return this},l.url=((a||l.url)+"").replace(cl,"").replace(cp,cj[1]+"//"),l.dataTypes=p.trim(l.dataType||"*").toLowerCase().split(s),l.crossDomain==null&&(i=ct.exec(l.url.toLowerCase())||!1,l.crossDomain=i&&i.join(":")+(i[3]?"":i[1]==="http:"?80:443)!==cj.join(":")+(cj[3]?"":cj[1]==="http:"?80:443)),l.data&&l.processData&&typeof l.data!="string"&&(l.data=p.param(l.data,l.traditional)),cA(cv,l,c,x);if(v===2)return x;j=l.global,l.type=l.type.toUpperCase(),l.hasContent=!co.test(l.type),j&&p.active++===0&&p.event.trigger("ajaxStart");if(!l.hasContent){l.data&&(l.url+=(cq.test(l.url)?"&":"?")+l.data,delete l.data),d=l.url;if(l.cache===!1){var z=p.now(),A=l.url.replace(cs,"$1_="+z);l.url=A+(A===l.url?(cq.test(l.url)?"&":"?")+"_="+z:"")}}(l.data&&l.hasContent&&l.contentType!==!1||c.contentType)&&x.setRequestHeader("Content-Type",l.contentType),l.ifModified&&(d=d||l.url,p.lastModified[d]&&x.setRequestHeader("If-Modified-Since",p.lastModified[d]),p.etag[d]&&x.setRequestHeader("If-None-Match",p.etag[d])),x.setRequestHeader("Accept",l.dataTypes[0]&&l.accepts[l.dataTypes[0]]?l.accepts[l.dataTypes[0]]+(l.dataTypes[0]!=="*"?", "+cx+"; q=0.01":""):l.accepts["*"]);for(k in l.headers)x.setRequestHeader(k,l.headers[k]);if(!l.beforeSend||l.beforeSend.call(m,x,l)!==!1&&v!==2){w="abort";for(k in{success:1,error:1,complete:1})x[k](l[k]);g=cA(cw,l,c,x);if(!g)y(-1,"No Transport");else{x.readyState=1,j&&n.trigger("ajaxSend",[x,l]),l.async&&l.timeout>0&&(h=setTimeout(function(){x.abort("timeout")},l.timeout));try{v=1,g.send(t,y)}catch(B){if(v<2)y(-1,B);else throw B}}return x}return x.abort()},active:0,lastModified:{},etag:{}});var cE=[],cF=/\?/,cG=/(=)\?(?=&|$)|\?\?/,cH=p.now();p.ajaxSetup({jsonp:"callback",jsonpCallback:function(){var a=cE.pop()||p.expando+"_"+cH++;return this[a]=!0,a}}),p.ajaxPrefilter("json jsonp",function(c,d,e){var f,g,h,i=c.data,j=c.url,k=c.jsonp!==!1,l=k&&cG.test(j),m=k&&!l&&typeof i=="string"&&!(c.contentType||"").indexOf("application/x-www-form-urlencoded")&&cG.test(i);if(c.dataTypes[0]==="jsonp"||l||m)return f=c.jsonpCallback=p.isFunction(c.jsonpCallback)?c.jsonpCallback():c.jsonpCallback,g=a[f],l?c.url=j.replace(cG,"$1"+f):m?c.data=i.replace(cG,"$1"+f):k&&(c.url+=(cF.test(j)?"&":"?")+c.jsonp+"="+f),c.converters["script json"]=function(){return h||p.error(f+" was not called"),h[0]},c.dataTypes[0]="json",a[f]=function(){h=arguments},e.always(function(){a[f]=g,c[f]&&(c.jsonpCallback=d.jsonpCallback,cE.push(f)),h&&p.isFunction(g)&&g(h[0]),h=g=b}),"script"}),p.ajaxSetup({accepts:{script:"text/javascript, application/javascript, application/ecmascript, application/x-ecmascript"},contents:{script:/javascript|ecmascript/},converters:{"text script":function(a){return p.globalEval(a),a}}}),p.ajaxPrefilter("script",function(a){a.cache===b&&(a.cache=!1),a.crossDomain&&(a.type="GET",a.global=!1)}),p.ajaxTransport("script",function(a){if(a.crossDomain){var c,d=e.head||e.getElementsByTagName("head")[0]||e.documentElement;return{send:function(f,g){c=e.createElement("script"),c.async="async",a.scriptCharset&&(c.charset=a.scriptCharset),c.src=a.url,c.onload=c.onreadystatechange=function(a,e){if(e||!c.readyState||/loaded|complete/.test(c.readyState))c.onload=c.onreadystatechange=null,d&&c.parentNode&&d.removeChild(c),c=b,e||g(200,"success")},d.insertBefore(c,d.firstChild)},abort:function(){c&&c.onload(0,1)}}}});var cI,cJ=a.ActiveXObject?function(){for(var a in cI)cI[a](0,1)}:!1,cK=0;p.ajaxSettings.xhr=a.ActiveXObject?function(){return!this.isLocal&&cL()||cM()}:cL,function(a){p.extend(p.support,{ajax:!!a,cors:!!a&&"withCredentials"in a})}(p.ajaxSettings.xhr()),p.support.ajax&&p.ajaxTransport(function(c){if(!c.crossDomain||p.support.cors){var d;return{send:function(e,f){var g,h,i=c.xhr();c.username?i.open(c.type,c.url,c.async,c.username,c.password):i.open(c.type,c.url,c.async);if(c.xhrFields)for(h in c.xhrFields)i[h]=c.xhrFields[h];c.mimeType&&i.overrideMimeType&&i.overrideMimeType(c.mimeType),!c.crossDomain&&!e["X-Requested-With"]&&(e["X-Requested-With"]="XMLHttpRequest");try{for(h in e)i.setRequestHeader(h,e[h])}catch(j){}i.send(c.hasContent&&c.data||null),d=function(a,e){var h,j,k,l,m;try{if(d&&(e||i.readyState===4)){d=b,g&&(i.onreadystatechange=p.noop,cJ&&delete cI[g]);if(e)i.readyState!==4&&i.abort();else{h=i.status,k=i.getAllResponseHeaders(),l={},m=i.responseXML,m&&m.documentElement&&(l.xml=m);try{l.text=i.responseText}catch(a){}try{j=i.statusText}catch(n){j=""}!h&&c.isLocal&&!c.crossDomain?h=l.text?200:404:h===1223&&(h=204)}}}catch(o){e||f(-1,o)}l&&f(h,j,l,k)},c.async?i.readyState===4?setTimeout(d,0):(g=++cK,cJ&&(cI||(cI={},p(a).unload(cJ)),cI[g]=d),i.onreadystatechange=d):d()},abort:function(){d&&d(0,1)}}}});var cN,cO,cP=/^(?:toggle|show|hide)$/,cQ=new RegExp("^(?:([-+])=|)("+q+")([a-z%]*)$","i"),cR=/queueHooks$/,cS=[cY],cT={"*":[function(a,b){var c,d,e=this.createTween(a,b),f=cQ.exec(b),g=e.cur(),h=+g||0,i=1,j=20;if(f){c=+f[2],d=f[3]||(p.cssNumber[a]?"":"px");if(d!=="px"&&h){h=p.css(e.elem,a,!0)||c||1;do i=i||".5",h=h/i,p.style(e.elem,a,h+d);while(i!==(i=e.cur()/g)&&i!==1&&--j)}e.unit=d,e.start=h,e.end=f[1]?h+(f[1]+1)*c:c}return e}]};p.Animation=p.extend(cW,{tweener:function(a,b){p.isFunction(a)?(b=a,a=["*"]):a=a.split(" ");var c,d=0,e=a.length;for(;d-1,j={},k={},l,m;i?(k=e.position(),l=k.top,m=k.left):(l=parseFloat(g)||0,m=parseFloat(h)||0),p.isFunction(b)&&(b=b.call(a,c,f)),b.top!=null&&(j.top=b.top-f.top+l),b.left!=null&&(j.left=b.left-f.left+m),"using"in b?b.using.call(a,j):e.css(j)}},p.fn.extend({position:function(){if(!this[0])return;var a=this[0],b=this.offsetParent(),c=this.offset(),d=c_.test(b[0].nodeName)?{top:0,left:0}:b.offset();return c.top-=parseFloat(p.css(a,"marginTop"))||0,c.left-=parseFloat(p.css(a,"marginLeft"))||0,d.top+=parseFloat(p.css(b[0],"borderTopWidth"))||0,d.left+=parseFloat(p.css(b[0],"borderLeftWidth"))||0,{top:c.top-d.top,left:c.left-d.left}},offsetParent:function(){return this.map(function(){var a=this.offsetParent||e.body;while(a&&!c_.test(a.nodeName)&&p.css(a,"position")==="static")a=a.offsetParent;return a||e.body})}}),p.each({scrollLeft:"pageXOffset",scrollTop:"pageYOffset"},function(a,c){var d=/Y/.test(c);p.fn[a]=function(e){return p.access(this,function(a,e,f){var g=da(a);if(f===b)return g?c in g?g[c]:g.document.documentElement[e]:a[e];g?g.scrollTo(d?p(g).scrollLeft():f,d?f:p(g).scrollTop()):a[e]=f},a,e,arguments.length,null)}}),p.each({Height:"height",Width:"width"},function(a,c){p.each({padding:"inner"+a,content:c,"":"outer"+a},function(d,e){p.fn[e]=function(e,f){var g=arguments.length&&(d||typeof e!="boolean"),h=d||(e===!0||f===!0?"margin":"border");return p.access(this,function(c,d,e){var f;return p.isWindow(c)?c.document.documentElement["client"+a]:c.nodeType===9?(f=c.documentElement,Math.max(c.body["scroll"+a],f["scroll"+a],c.body["offset"+a],f["offset"+a],f["client"+a])):e===b?p.css(c,d,e,h):p.style(c,d,e,h)},c,g?e:b,g,null)}})}),a.jQuery=a.$=p,typeof define=="function"&&define.amd&&define.amd.jQuery&&define("jquery",[],function(){return p})})(window); \ No newline at end of file diff --git a/vendor/github.com/aws/aws-sdk-go/doc-src/aws-godoc/templates/jquery.treeview.css b/vendor/github.com/aws/aws-sdk-go/doc-src/aws-godoc/templates/jquery.treeview.css new file mode 100644 index 000000000..ac33361a6 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/doc-src/aws-godoc/templates/jquery.treeview.css @@ -0,0 +1,76 @@ +/* https://github.com/jzaefferer/jquery-treeview/blob/master/jquery.treeview.css */ +/* License: MIT. */ +.treeview, .treeview ul { + padding: 0; + margin: 0; + list-style: none; +} + +.treeview ul { + background-color: white; + margin-top: 4px; +} + +.treeview .hitarea { + background: url(images/treeview-default.gif) -64px -25px no-repeat; + height: 16px; + width: 16px; + margin-left: -16px; + float: left; + cursor: pointer; +} +/* fix for IE6 */ +* html .hitarea { + display: inline; + float:none; +} + +.treeview li { + margin: 0; + padding: 3px 0pt 3px 16px; +} + +.treeview a.selected { + background-color: #eee; +} + +#treecontrol { margin: 1em 0; display: none; } + +.treeview .hover { color: red; cursor: pointer; } + +.treeview li { background: url(images/treeview-default-line.gif) 0 0 no-repeat; } +.treeview li.collapsable, .treeview li.expandable { background-position: 0 -176px; } + +.treeview .expandable-hitarea { background-position: -80px -3px; } + +.treeview li.last { background-position: 0 -1766px } +.treeview li.lastCollapsable, .treeview li.lastExpandable { background-image: url(images/treeview-default.gif); } +.treeview li.lastCollapsable { background-position: 0 -111px } +.treeview li.lastExpandable { background-position: -32px -67px } + +.treeview div.lastCollapsable-hitarea, .treeview div.lastExpandable-hitarea { background-position: 0; } + +.treeview-red li { background-image: url(images/treeview-red-line.gif); } +.treeview-red .hitarea, .treeview-red li.lastCollapsable, .treeview-red li.lastExpandable { background-image: url(images/treeview-red.gif); } + +.treeview-black li { background-image: url(images/treeview-black-line.gif); } +.treeview-black .hitarea, .treeview-black li.lastCollapsable, .treeview-black li.lastExpandable { background-image: url(images/treeview-black.gif); } + +.treeview-gray li { background-image: url(images/treeview-gray-line.gif); } +.treeview-gray .hitarea, .treeview-gray li.lastCollapsable, .treeview-gray li.lastExpandable { background-image: url(images/treeview-gray.gif); } + +.treeview-famfamfam li { background-image: url(images/treeview-famfamfam-line.gif); } +.treeview-famfamfam .hitarea, .treeview-famfamfam li.lastCollapsable, .treeview-famfamfam li.lastExpandable { background-image: url(images/treeview-famfamfam.gif); } + +.treeview .placeholder { + background: url(images/ajax-loader.gif) 0 0 no-repeat; + height: 16px; + width: 16px; + display: block; +} + +.filetree li { padding: 3px 0 2px 16px; } +.filetree span.folder, .filetree span.file { padding: 1px 0 1px 16px; display: block; } +.filetree span.folder { background: url(images/folder.gif) 0 0 no-repeat; } +.filetree li.expandable span.folder { background: url(images/folder-closed.gif) 0 0 no-repeat; } +.filetree span.file { background: url(images/file.gif) 0 0 no-repeat; } diff --git a/vendor/github.com/aws/aws-sdk-go/doc-src/aws-godoc/templates/jquery.treeview.edit.js b/vendor/github.com/aws/aws-sdk-go/doc-src/aws-godoc/templates/jquery.treeview.edit.js new file mode 100644 index 000000000..9895b0263 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/doc-src/aws-godoc/templates/jquery.treeview.edit.js @@ -0,0 +1,39 @@ +/* https://github.com/jzaefferer/jquery-treeview/blob/master/jquery.treeview.edit.js */ +/* License: MIT. */ +(function($) { + var CLASSES = $.treeview.classes; + var proxied = $.fn.treeview; + $.fn.treeview = function(settings) { + settings = $.extend({}, settings); + if (settings.add) { + return this.trigger("add", [settings.add]); + } + if (settings.remove) { + return this.trigger("remove", [settings.remove]); + } + return proxied.apply(this, arguments).bind("add", function(event, branches) { + $(branches).prev() + .removeClass(CLASSES.last) + .removeClass(CLASSES.lastCollapsable) + .removeClass(CLASSES.lastExpandable) + .find(">.hitarea") + .removeClass(CLASSES.lastCollapsableHitarea) + .removeClass(CLASSES.lastExpandableHitarea); + $(branches).find("li").andSelf().prepareBranches(settings).applyClasses(settings, $(this).data("toggler")); + }).bind("remove", function(event, branches) { + var prev = $(branches).prev(); + var parent = $(branches).parent(); + $(branches).remove(); + prev.filter(":last-child").addClass(CLASSES.last) + .filter("." + CLASSES.expandable).replaceClass(CLASSES.last, CLASSES.lastExpandable).end() + .find(">.hitarea").replaceClass(CLASSES.expandableHitarea, CLASSES.lastExpandableHitarea).end() + .filter("." + CLASSES.collapsable).replaceClass(CLASSES.last, CLASSES.lastCollapsable).end() + .find(">.hitarea").replaceClass(CLASSES.collapsableHitarea, CLASSES.lastCollapsableHitarea); + if (parent.is(":not(:has(>))") && parent[0] != this) { + parent.parent().removeClass(CLASSES.collapsable).removeClass(CLASSES.expandable) + parent.siblings(".hitarea").andSelf().remove(); + } + }); + }; + +})(jQuery); diff --git a/vendor/github.com/aws/aws-sdk-go/doc-src/aws-godoc/templates/jquery.treeview.js b/vendor/github.com/aws/aws-sdk-go/doc-src/aws-godoc/templates/jquery.treeview.js new file mode 100644 index 000000000..356af2380 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/doc-src/aws-godoc/templates/jquery.treeview.js @@ -0,0 +1,256 @@ +/* + * Treeview 1.4.1 - jQuery plugin to hide and show branches of a tree + * + * http://bassistance.de/jquery-plugins/jquery-plugin-treeview/ + * http://docs.jquery.com/Plugins/Treeview + * + * Copyright (c) 2007 Jörn Zaefferer + * + * Dual licensed under the MIT and GPL licenses: + * http://www.opensource.org/licenses/mit-license.php + * http://www.gnu.org/licenses/gpl.html + * + * Revision: $Id: jquery.treeview.js 5759 2008-07-01 07:50:28Z joern.zaefferer $ + * + */ + +;(function($) { + + // TODO rewrite as a widget, removing all the extra plugins + $.extend($.fn, { + swapClass: function(c1, c2) { + var c1Elements = this.filter('.' + c1); + this.filter('.' + c2).removeClass(c2).addClass(c1); + c1Elements.removeClass(c1).addClass(c2); + return this; + }, + replaceClass: function(c1, c2) { + return this.filter('.' + c1).removeClass(c1).addClass(c2).end(); + }, + hoverClass: function(className) { + className = className || "hover"; + return this.hover(function() { + $(this).addClass(className); + }, function() { + $(this).removeClass(className); + }); + }, + heightToggle: function(animated, callback) { + animated ? + this.animate({ height: "toggle" }, animated, callback) : + this.each(function(){ + jQuery(this)[ jQuery(this).is(":hidden") ? "show" : "hide" ](); + if(callback) + callback.apply(this, arguments); + }); + }, + heightHide: function(animated, callback) { + if (animated) { + this.animate({ height: "hide" }, animated, callback); + } else { + this.hide(); + if (callback) + this.each(callback); + } + }, + prepareBranches: function(settings) { + if (!settings.prerendered) { + // mark last tree items + this.filter(":last-child:not(ul)").addClass(CLASSES.last); + // collapse whole tree, or only those marked as closed, anyway except those marked as open + this.filter((settings.collapsed ? "" : "." + CLASSES.closed) + ":not(." + CLASSES.open + ")").find(">ul").hide(); + } + // return all items with sublists + return this.filter(":has(>ul)"); + }, + applyClasses: function(settings, toggler) { + // TODO use event delegation + this.filter(":has(>ul):not(:has(>a))").find(">span").unbind("click.treeview").bind("click.treeview", function(event) { + // don't handle click events on children, eg. checkboxes + if ( this == event.target ) + toggler.apply($(this).next()); + }).add( $("a", this) ).hoverClass(); + + if (!settings.prerendered) { + // handle closed ones first + this.filter(":has(>ul:hidden)") + .addClass(CLASSES.expandable) + .replaceClass(CLASSES.last, CLASSES.lastExpandable); + + // handle open ones + this.not(":has(>ul:hidden)") + .addClass(CLASSES.collapsable) + .replaceClass(CLASSES.last, CLASSES.lastCollapsable); + + // create hitarea if not present + var hitarea = this.find("div." + CLASSES.hitarea); + if (!hitarea.length) + hitarea = this.prepend("
    ").find("div." + CLASSES.hitarea); + hitarea.removeClass().addClass(CLASSES.hitarea).each(function() { + var classes = ""; + $.each($(this).parent().attr("class").split(" "), function() { + classes += this + "-hitarea "; + }); + $(this).addClass( classes ); + }) + } + + // apply event to hitarea + this.find("div." + CLASSES.hitarea).click( toggler ); + }, + treeview: function(settings) { + + settings = $.extend({ + cookieId: "treeview" + }, settings); + + if ( settings.toggle ) { + var callback = settings.toggle; + settings.toggle = function() { + return callback.apply($(this).parent()[0], arguments); + }; + } + + // factory for treecontroller + function treeController(tree, control) { + // factory for click handlers + function handler(filter) { + return function() { + // reuse toggle event handler, applying the elements to toggle + // start searching for all hitareas + toggler.apply( $("div." + CLASSES.hitarea, tree).filter(function() { + // for plain toggle, no filter is provided, otherwise we need to check the parent element + return filter ? $(this).parent("." + filter).length : true; + }) ); + return false; + }; + } + // click on first element to collapse tree + $("a:eq(0)", control).click( handler(CLASSES.collapsable) ); + // click on second to expand tree + $("a:eq(1)", control).click( handler(CLASSES.expandable) ); + // click on third to toggle tree + $("a:eq(2)", control).click( handler() ); + } + + // handle toggle event + function toggler() { + $(this) + .parent() + // swap classes for hitarea + .find(">.hitarea") + .swapClass( CLASSES.collapsableHitarea, CLASSES.expandableHitarea ) + .swapClass( CLASSES.lastCollapsableHitarea, CLASSES.lastExpandableHitarea ) + .end() + // swap classes for parent li + .swapClass( CLASSES.collapsable, CLASSES.expandable ) + .swapClass( CLASSES.lastCollapsable, CLASSES.lastExpandable ) + // find child lists + .find( ">ul" ) + // toggle them + .heightToggle( settings.animated, settings.toggle ); + if ( settings.unique ) { + $(this).parent() + .siblings() + // swap classes for hitarea + .find(">.hitarea") + .replaceClass( CLASSES.collapsableHitarea, CLASSES.expandableHitarea ) + .replaceClass( CLASSES.lastCollapsableHitarea, CLASSES.lastExpandableHitarea ) + .end() + .replaceClass( CLASSES.collapsable, CLASSES.expandable ) + .replaceClass( CLASSES.lastCollapsable, CLASSES.lastExpandable ) + .find( ">ul" ) + .heightHide( settings.animated, settings.toggle ); + } + } + this.data("toggler", toggler); + + function serialize() { + function binary(arg) { + return arg ? 1 : 0; + } + var data = []; + branches.each(function(i, e) { + data[i] = $(e).is(":has(>ul:visible)") ? 1 : 0; + }); + $.cookie(settings.cookieId, data.join(""), settings.cookieOptions ); + } + + function deserialize() { + var stored = $.cookie(settings.cookieId); + if ( stored ) { + var data = stored.split(""); + branches.each(function(i, e) { + $(e).find(">ul")[ parseInt(data[i]) ? "show" : "hide" ](); + }); + } + } + + // add treeview class to activate styles + this.addClass("treeview"); + + // prepare branches and find all tree items with child lists + var branches = this.find("li").prepareBranches(settings); + + switch(settings.persist) { + case "cookie": + var toggleCallback = settings.toggle; + settings.toggle = function() { + serialize(); + if (toggleCallback) { + toggleCallback.apply(this, arguments); + } + }; + deserialize(); + break; + case "location": + var current = this.find("a").filter(function() { + return this.href.toLowerCase() == location.href.toLowerCase(); + }); + if ( current.length ) { + // TODO update the open/closed classes + var items = current.addClass("selected").parents("ul, li").add( current.next() ).show(); + if (settings.prerendered) { + // if prerendered is on, replicate the basic class swapping + items.filter("li") + .swapClass( CLASSES.collapsable, CLASSES.expandable ) + .swapClass( CLASSES.lastCollapsable, CLASSES.lastExpandable ) + .find(">.hitarea") + .swapClass( CLASSES.collapsableHitarea, CLASSES.expandableHitarea ) + .swapClass( CLASSES.lastCollapsableHitarea, CLASSES.lastExpandableHitarea ); + } + } + break; + } + + branches.applyClasses(settings, toggler); + + // if control option is set, create the treecontroller and show it + if ( settings.control ) { + treeController(this, settings.control); + $(settings.control).show(); + } + + return this; + } + }); + + // classes used by the plugin + // need to be styled via external stylesheet, see first example + $.treeview = {}; + var CLASSES = ($.treeview.classes = { + open: "open", + closed: "closed", + expandable: "expandable", + expandableHitarea: "expandable-hitarea", + lastExpandableHitarea: "lastExpandable-hitarea", + collapsable: "collapsable", + collapsableHitarea: "collapsable-hitarea", + lastCollapsableHitarea: "lastCollapsable-hitarea", + lastCollapsable: "lastCollapsable", + lastExpandable: "lastExpandable", + last: "last", + hitarea: "hitarea" + }); + +})(jQuery); diff --git a/vendor/github.com/aws/aws-sdk-go/doc-src/aws-godoc/templates/methodset.html b/vendor/github.com/aws/aws-sdk-go/doc-src/aws-godoc/templates/methodset.html new file mode 100644 index 000000000..1b339e3c3 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/doc-src/aws-godoc/templates/methodset.html @@ -0,0 +1,9 @@ + diff --git a/vendor/github.com/aws/aws-sdk-go/doc-src/aws-godoc/templates/opensearch.xml b/vendor/github.com/aws/aws-sdk-go/doc-src/aws-godoc/templates/opensearch.xml new file mode 100644 index 000000000..1b652db37 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/doc-src/aws-godoc/templates/opensearch.xml @@ -0,0 +1,11 @@ + + + godoc + The Go Programming Language + go golang + + + /favicon.ico + UTF-8 + UTF-8 + diff --git a/vendor/github.com/aws/aws-sdk-go/doc-src/aws-godoc/templates/package.txt b/vendor/github.com/aws/aws-sdk-go/doc-src/aws-godoc/templates/package.txt new file mode 100644 index 000000000..e53fa6ed3 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/doc-src/aws-godoc/templates/package.txt @@ -0,0 +1,116 @@ +{{$info := .}}{{$filtered := .IsFiltered}}{{/* + +--------------------------------------- + +*/}}{{if $filtered}}{{range .PAst}}{{range .Decls}}{{node $info .}} + +{{end}}{{end}}{{else}}{{with .PAst}}{{range $filename, $ast := .}}{{$filename}}: +{{node $ $ast}}{{end}}{{end}}{{end}}{{/* + +--------------------------------------- + +*/}}{{if and $filtered (not (or .PDoc .PAst))}}No match found. +{{end}}{{with .PDoc}}{{if $.IsMain}}COMMAND DOCUMENTATION + +{{comment_text .Doc " " "\t"}} +{{else}}{{if not $filtered}}PACKAGE DOCUMENTATION + +package {{.Name}} + import "{{.ImportPath}}" + +{{comment_text .Doc " " "\t"}} +{{example_text $ "" " "}}{{end}}{{/* + +--------------------------------------- + +*/}}{{with .Consts}}{{if not $filtered}}CONSTANTS + +{{end}}{{range .}}{{node $ .Decl}} +{{comment_text .Doc " " "\t"}} +{{end}}{{end}}{{/* + +--------------------------------------- + +*/}}{{with .Vars}}{{if not $filtered}}VARIABLES + +{{end}}{{range .}}{{node $ .Decl}} +{{comment_text .Doc " " "\t"}} +{{end}}{{end}}{{/* + +--------------------------------------- + +*/}}{{with .Funcs}}{{if not $filtered}}FUNCTIONS + +{{end}}{{range .}}{{node $ .Decl}} +{{comment_text .Doc " " "\t"}} +{{example_text $ .Name " "}}{{end}}{{end}}{{/* + +--------------------------------------- + +*/}}{{with .Types}}{{if not $filtered}}TYPES + +{{end}}{{range .}}{{$tname := .Name}}{{node $ .Decl}} +{{comment_text .Doc " " "\t"}} +{{/* + +--------------------------------------- + +*/}}{{if .Consts}}{{range .Consts}}{{node $ .Decl}} +{{comment_text .Doc " " "\t"}} +{{end}}{{end}}{{/* + +--------------------------------------- + +*/}}{{if .Vars}}{{range .Vars}}{{node $ .Decl}} +{{comment_text .Doc " " "\t"}} +{{range $name := .Names}}{{example_text $ $name " "}}{{end}}{{end}}{{end}}{{/* + +--------------------------------------- + +*/}}{{if .Funcs}}{{range .Funcs}}{{node $ .Decl}} +{{comment_text .Doc " " "\t"}} +{{example_text $ .Name " "}}{{end}}{{end}}{{/* + +--------------------------------------- + +*/}}{{if .Methods}}{{range .Methods}}{{node $ .Decl}} +{{comment_text .Doc " " "\t"}} +{{$name := printf "%s_%s" $tname .Name}}{{example_text $ $name " "}}{{end}}{{end}}{{/* + +--------------------------------------- + +*/}}{{end}}{{end}}{{/* + +--------------------------------------- + +*/}}{{if and $filtered (not (or .Consts (or .Vars (or .Funcs .Types))))}}No match found. +{{end}}{{/* + +--------------------------------------- + +*/}}{{end}}{{/* + +--------------------------------------- + +*/}}{{with $.Notes}} +{{range $marker, $content := .}} +{{$marker}}S + +{{range $content}}{{comment_text .Body " " "\t"}} +{{end}}{{end}}{{end}}{{end}}{{/* + +--------------------------------------- + +*/}}{{if not $filtered}}{{with .Dirs}}SUBDIRECTORIES +{{if $.DirFlat}}{{range .List}}{{if .HasPkg}} + {{.Path}}{{end}}{{end}} +{{else}}{{range .List}} + {{repeat `. ` .Depth}}{{.Name}}{{end}} +{{end}}{{end}}{{/* + +--------------------------------------- + +*/}}{{end}}{{/* +Make sure there is no newline at the end of this file. +perl -i -pe 'chomp if eof' package.txt +*/}} diff --git a/vendor/github.com/aws/aws-sdk-go/doc-src/aws-godoc/templates/package_default.html b/vendor/github.com/aws/aws-sdk-go/doc-src/aws-godoc/templates/package_default.html new file mode 100644 index 000000000..0b3c01dd2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/doc-src/aws-godoc/templates/package_default.html @@ -0,0 +1,245 @@ + + +{{with .PDoc}} + + + {{if $.IsMain}} + {{/* command documentation */}} + {{comment_html .Doc}} + {{else}} + {{/* package documentation */}} +
    +
    +
    import "github.com/aws/aws-sdk-go/{{html .ImportPath}}"
    +
    +
    +
    Overview
    +
    Index
    + {{if $.Examples}} +
    Examples
    + {{end}} +
    +
    + +
    + +
    +

    Overview ▾

    + {{comment_html .Doc}} +
    +
    + {{example_html $ ""}} + +
    + +
    +

    Index ▾

    + + +
    +
    + {{if .Consts}} +
    Constants
    + {{end}} + {{if .Vars}} +
    Variables
    + {{end}} + {{range .Funcs}} + {{$name_html := html .Name}} +
    {{node_html $ .Decl false | sanitize}}
    + {{end}} + {{range .Types}} + {{$tname_html := html .Name}} +
    type {{$tname_html}}
    + {{range .Funcs}} + {{$name_html := html .Name}} +
        {{node_html $ .Decl false | sanitize}}
    + {{end}} + {{range .Methods}} + {{$name_html := html .Name}} +
        {{node_html $ .Decl false | sanitize}}
    + {{end}} + {{end}} + {{if $.Notes}} + {{range $marker, $item := $.Notes}} +
    {{noteTitle $marker | html}}s
    + {{end}} + {{end}} +
    +
    + + {{if $.Examples}} +
    +

    Examples

    +
    + {{range $.Examples}} +
    {{example_name .Name}}
    + {{end}} +
    +
    + {{end}} + + {{with .Filenames}} +

    Package files

    +

    + + {{range .}} + {{.|filename|html}} + {{end}} + +

    + {{end}} +
    +
    + + + + {{with .Consts}} +

    Constants

    + {{range .}} +
    {{node_html $ .Decl true}}
    + {{comment_html .Doc}} + {{end}} + {{end}} + {{with .Vars}} +

    Variables

    + {{range .}} +
    {{node_html $ .Decl true}}
    + {{comment_html .Doc}} + {{end}} + {{end}} + {{range .Funcs}} + {{/* Name is a string - no need for FSet */}} + {{$name_html := html .Name}} +

    func {{$name_html}}

    +
    {{node_html $ .Decl true}}
    + {{comment_html .Doc}} + {{example_html $ .Name}} + {{callgraph_html $ "" .Name}} + + {{end}} + {{range .Types}} + {{$tname := .Name}} + {{$tname_html := html .Name}} +

    type {{$tname_html}}

    +
    {{node_html $ .Decl true}}
    + {{comment_html .Doc}} + + {{range .Consts}} +
    {{node_html $ .Decl true}}
    + {{comment_html .Doc}} + {{end}} + + {{range .Vars}} +
    {{node_html $ .Decl true}}
    + {{comment_html .Doc}} + {{end}} + + {{example_html $ $tname}} + {{implements_html $ $tname}} + {{methodset_html $ $tname}} + + {{range .Funcs}} + {{$name_html := html .Name}} +

    func {{$name_html}}

    +
    {{node_html $ .Decl true}}
    + {{comment_html .Doc}} + {{example_html $ .Name}} + {{callgraph_html $ "" .Name}} + {{end}} + + {{range .Methods}} + {{$name_html := html .Name}} +

    func ({{html .Recv}}) {{$name_html}}

    +
    {{node_html $ .Decl true}}
    + {{comment_html .Doc}} + {{$name := printf "%s_%s" $tname .Name}} + {{example_html $ $name}} + {{callgraph_html $ .Recv .Name}} + {{end}} + {{end}} + {{end}} + + {{with $.Notes}} + {{range $marker, $content := .}} +

    {{noteTitle $marker | html}}s

    +
      + {{range .}} +
    • {{html .Body}}
    • + {{end}} +
    + {{end}} + {{end}} +{{end}} + +{{with .PAst}} + {{range $filename, $ast := .}} + {{$filename|filename|html}}:
    {{node_html $ $ast false}}
    + {{end}} +{{end}} + +{{with .Dirs}} + {{if eq $.Dirname "/src"}} + +

    Standard library

    + + {{end}} +{{end}} diff --git a/vendor/github.com/aws/aws-sdk-go/doc-src/aws-godoc/templates/package_service.html b/vendor/github.com/aws/aws-sdk-go/doc-src/aws-godoc/templates/package_service.html new file mode 100644 index 000000000..91cb0bf9a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/doc-src/aws-godoc/templates/package_service.html @@ -0,0 +1,262 @@ + + +{{with .PDoc}} + + {{if $.IsMain}} + {{/* command documentation */}} + {{comment_html .Doc}} + {{else}} + {{/* package documentation */}} +
    +
    +
    import "github.com/aws/aws-sdk-go/{{html .ImportPath}}"
    +
    +
    +
    Overview
    + {{if .Consts}} +
    Constants
    + {{end}} + {{if $.Examples}} +
    Examples
    + {{end}} +
    +
    + +
    + +
    +

    Overview ▾

    + {{comment_html .Doc}} +
    +
    + {{example_html $ ""}} + +
    + +
    +

    Operations ▾

    + + +
    +
    + {{range .Funcs}} + {{$name_html := html .Name}} +
    {{node_html $ .Decl false | sanitize}}
    + {{end}} + {{range .Types}} + {{$tname_html := html .Name}} + {{range .Funcs}} + {{$name_html := html .Name}} +
    {{node_html $ .Decl false | sanitize}}
    + {{end}} + {{range .Methods}} + + {{if (and (ne .Name "String") (ne .Name "GoString")) }} + {{if (ne .Name "Validate") }} + {{$name_html := html .Name}} +
    {{node_html $ .Decl false | sanitize}}
    + {{end}} + {{end}} + {{end}} + {{end}} + {{if $.Notes}} + {{range $marker, $item := $.Notes}} +
    {{noteTitle $marker | html}}s
    + {{end}} + {{end}} +
    +
    + + + + +
    +
    +
    + +
    +

    Types ▾

    + {{if .Vars}} +
    Variables
    + {{end}} + {{range .Types}} + {{$tname_html := html .Name}} +
    type {{$tname_html}}
    + {{end}} +
    +
    + + {{if $.Examples}} +
    + +
    +

    Examples ▾

    +
    +
    + {{range $.Examples}} +
    {{example_name .Name}}
    + {{end}} +
    +
    +
    +
    + {{end}} + + + + {{with .Consts}} +

    Constants

    + {{range .}} +
    {{node_html $ .Decl true}}
    + {{comment_html .Doc}} + {{end}} + {{end}} + {{with .Vars}} +

    Variables

    + {{range .}} +
    {{node_html $ .Decl true}}
    + {{comment_html .Doc}} + {{end}} + {{end}} + {{range .Funcs}} + {{/* Name is a string - no need for FSet */}} + {{$name_html := html .Name}} +

    func {{$name_html}}

    +
    {{node_html $ .Decl true}}
    + {{comment_html .Doc}} + {{example_html $ .Name}} + {{callgraph_html $ "" .Name}} + + {{end}} + {{range .Types}} + {{$tname := .Name}} + {{$tname_html := html .Name}} +

    type {{$tname_html}}

    +
    {{node_html $ .Decl true}}
    + {{comment_html .Doc}} + + {{range .Consts}} +
    {{node_html $ .Decl true}}
    + {{comment_html .Doc}} + {{end}} + + {{range .Vars}} +
    {{node_html $ .Decl true}}
    + {{comment_html .Doc}} + {{end}} + + {{example_html $ $tname}} + {{implements_html $ $tname}} + {{methodset_html $ $tname}} + + {{range .Funcs}} + {{$name_html := html .Name}} +

    func {{$name_html}}

    +
    {{node_html $ .Decl true}}
    + {{comment_html .Doc}} + {{example_html $ .Name}} + {{callgraph_html $ "" .Name}} + {{end}} + + {{range .Methods}} + {{$name_html := html .Name}} +

    func ({{html .Recv}}) {{$name_html}}

    +
    {{node_html $ .Decl true}}
    + {{comment_html .Doc}} + {{$name := printf "%s_%s" $tname .Name}} + {{example_html $ $name}} + {{callgraph_html $ .Recv .Name}} + {{end}} + {{end}} + {{end}} + + {{with $.Notes}} + {{range $marker, $content := .}} +

    {{noteTitle $marker | html}}s

    +
      + {{range .}} +
    • {{html .Body}}
    • + {{end}} +
    + {{end}} + {{end}} +{{end}} + +{{with .PAst}} + {{range $filename, $ast := .}} + {{$filename|filename|html}}:
    {{node_html $ $ast false}}
    + {{end}} +{{end}} diff --git a/vendor/github.com/aws/aws-sdk-go/doc-src/aws-godoc/templates/pkglist.html b/vendor/github.com/aws/aws-sdk-go/doc-src/aws-godoc/templates/pkglist.html new file mode 100644 index 000000000..75cdb746e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/doc-src/aws-godoc/templates/pkglist.html @@ -0,0 +1,24 @@ + + + + + +
    +
    +
    + + +
    + {{range .List}} + + {{end}} +
    +
    + + diff --git a/vendor/github.com/aws/aws-sdk-go/doc-src/aws-godoc/templates/search.html b/vendor/github.com/aws/aws-sdk-go/doc-src/aws-godoc/templates/search.html new file mode 100644 index 000000000..e0d13b9b5 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/doc-src/aws-godoc/templates/search.html @@ -0,0 +1,18 @@ + +{{with .Alert}} +

    + {{html .}} +

    +{{end}} +{{with .Alt}} +

    + Did you mean: + {{range .Alts}} + {{html .}} + {{end}} +

    +{{end}} diff --git a/vendor/github.com/aws/aws-sdk-go/doc-src/aws-godoc/templates/search.txt b/vendor/github.com/aws/aws-sdk-go/doc-src/aws-godoc/templates/search.txt new file mode 100644 index 000000000..0ae0c080d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/doc-src/aws-godoc/templates/search.txt @@ -0,0 +1,54 @@ +QUERY + {{.Query}} + +{{with .Alert}}{{.}} +{{end}}{{/* .Alert */}}{{/* + +--------------------------------------- + +*/}}{{with .Alt}}DID YOU MEAN + +{{range .Alts}} {{.}} +{{end}} +{{end}}{{/* .Alt */}}{{/* + +--------------------------------------- + +*/}}{{with .Pak}}PACKAGE {{$.Query}} + +{{range .}} {{pkgLink .Pak.Path}} +{{end}} +{{end}}{{/* .Pak */}}{{/* + +--------------------------------------- + +*/}}{{range $key, $val := .Idents}}{{if $val}}{{$key.Name}} +{{range $val}} {{.Path}}.{{.Name}} +{{end}} +{{end}}{{end}}{{/* .Idents */}}{{/* + +--------------------------------------- + +*/}}{{with .Hit}}{{with .Decls}}PACKAGE-LEVEL DECLARATIONS + +{{range .}}package {{.Pak.Name}} +{{range $file := .Files}}{{range .Groups}}{{range .}} {{srcLink $file.File.Path}}:{{infoLine .}}{{end}} +{{end}}{{end}}{{/* .Files */}} +{{end}}{{end}}{{/* .Decls */}}{{/* + +--------------------------------------- + +*/}}{{with .Others}}LOCAL DECLARATIONS AND USES + +{{range .}}package {{.Pak.Name}} +{{range $file := .Files}}{{range .Groups}}{{range .}} {{srcLink $file.File.Path}}:{{infoLine .}} +{{end}}{{end}}{{end}}{{/* .Files */}} +{{end}}{{end}}{{/* .Others */}}{{end}}{{/* .Hit */}}{{/* + +--------------------------------------- + +*/}}{{if .Textual}}{{if .Complete}}{{.Found}} TEXTUAL OCCURRENCES{{else}}MORE THAN {{.Found}} TEXTUAL OCCURRENCES{{end}} + +{{range .Textual}}{{len .Lines}} {{srcLink .Filename}} +{{end}}{{if not .Complete}}... ... +{{end}}{{end}} diff --git a/vendor/github.com/aws/aws-sdk-go/doc-src/aws-godoc/templates/searchcode.html b/vendor/github.com/aws/aws-sdk-go/doc-src/aws-godoc/templates/searchcode.html new file mode 100644 index 000000000..a032e642c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/doc-src/aws-godoc/templates/searchcode.html @@ -0,0 +1,64 @@ + +{{$query_url := urlquery .Query}} +{{if not .Idents}} + {{with .Pak}} +

    Package {{html $.Query}}

    +

    +

    + {{range .}} + {{$pkg_html := pkgLink .Pak.Path | html}} + + {{end}} +
    {{$pkg_html}}
    +

    + {{end}} +{{end}} +{{with .Hit}} + {{with .Decls}} +

    Package-level declarations

    + {{range .}} + {{$pkg_html := pkgLink .Pak.Path | html}} +

    package {{html .Pak.Name}}

    + {{range .Files}} + {{$file := .File.Path}} + {{range .Groups}} + {{range .}} + {{$line := infoLine .}} + {{$file}}:{{$line}} + {{infoSnippet_html .}} + {{end}} + {{end}} + {{end}} + {{end}} + {{end}} + {{with .Others}} +

    Local declarations and uses

    + {{range .}} + {{$pkg_html := pkgLink .Pak.Path | html}} +

    package {{html .Pak.Name}}

    + {{range .Files}} + {{$file := .File.Path}} + {{$file}} + + {{range .Groups}} + + + + + + + {{end}} +
    {{index . 0 | infoKind_html}} + {{range .}} + {{$line := infoLine .}} + {{$line}} + {{end}} +
    + {{end}} + {{end}} + {{end}} +{{end}} diff --git a/vendor/github.com/aws/aws-sdk-go/doc-src/aws-godoc/templates/searchdoc.html b/vendor/github.com/aws/aws-sdk-go/doc-src/aws-godoc/templates/searchdoc.html new file mode 100644 index 000000000..679c02cf3 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/doc-src/aws-godoc/templates/searchdoc.html @@ -0,0 +1,24 @@ + +{{range $key, $val := .Idents}} + {{if $val}} +

    {{$key.Name}}

    + {{range $val}} + {{$pkg_html := pkgLink .Path | html}} + {{if eq "Packages" $key.Name}} + {{html .Path}} + {{else}} + {{$doc_html := docLink .Path .Name| html}} + {{html .Package}}.{{.Name}} + {{end}} + {{if .Doc}} +

    {{comment_html .Doc}}

    + {{else}} +

    No documentation available

    + {{end}} + {{end}} + {{end}} +{{end}} diff --git a/vendor/github.com/aws/aws-sdk-go/doc-src/aws-godoc/templates/searchtxt.html b/vendor/github.com/aws/aws-sdk-go/doc-src/aws-godoc/templates/searchtxt.html new file mode 100644 index 000000000..7e4a978c4 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/doc-src/aws-godoc/templates/searchtxt.html @@ -0,0 +1,42 @@ + +{{$query_url := urlquery .Query}} +{{with .Textual}} + {{if $.Complete}} +

    {{html $.Found}} textual occurrences

    + {{else}} +

    More than {{html $.Found}} textual occurrences

    +

    + Not all files or lines containing "{{html $.Query}}" are shown. +

    + {{end}} +

    + + {{range .}} + {{$file := .Filename}} + + + + + + + + {{end}} + {{if not $.Complete}} + + {{end}} +
    + {{$file}}: + {{len .Lines}} + {{range .Lines}} + {{html .}} + {{end}} + {{if not $.Complete}} + ... + {{end}} +
    ...
    +

    +{{end}} diff --git a/vendor/github.com/aws/aws-sdk-go/doc-src/aws-godoc/templates/style.css b/vendor/github.com/aws/aws-sdk-go/doc-src/aws-godoc/templates/style.css new file mode 100644 index 000000000..ffc3ff360 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/doc-src/aws-godoc/templates/style.css @@ -0,0 +1,1033 @@ +.container-body { + width: 100%; + overflow:hidden; +} + +a > img { + max-height:70%; + float:left; + padding-left:22px; + padding-top:8px; +} + +body { + margin: 0; + font-family: HelveticaNeueBold,Helvetica,Helvetica,Arial,sans-serif; + font-size: 16px; + background-color: #fff; + line-height: 1.3em; +} +pre, +code { + font-family: Menlo, monospace; + font-size: 14px; +} +pre { + line-height: 1.4em; + overflow-x: auto; +} +pre .comment { + color: #006600; +} +pre .highlight, +pre .highlight-comment, +pre .selection-highlight, +pre .selection-highlight-comment { + background: #FFFF00; +} +pre .selection, +pre .selection-comment { + background: #FF9632; +} +pre .ln { + color: #999; +} + +body { + color: #222; +} +a, +.exampleHeading .text { + color: #375EAB; + text-decoration: none; +} +a:hover, +.exampleHeading .text:hover { + text-decoration: underline; +} +p, li { + max-width: 800px; + word-wrap: break-word; +} +p, +pre, +ul, +ol { + display: inherit; + margin: 20px; +} +.packages { + margin: 0px; + text-align: left; + width: 100%; +} +pre { + background: #EFEFEF; + padding: 10px; + + -webkit-border-radius: 5px; + -moz-border-radius: 5px; + border-radius: 5px; +} +.title { + font-family: HelveticaNeueBold,Helvetica,Helvetica,Arial,sans-serif; + text-shadow: rgba(0,0,0,0.8) 0 -1px 0; + color: #fff; + -webkit-font-smoothing: antialiased; + margin:20px; + display:inherit; +} + +h1, +h2, +h3, +h4, +.rootHeading { + margin: 20px 0 20px; + padding: 0; + color: #375EAB; + font-weight: bold; +} +h1 { + font-size: 28px; + line-height: 1; +} +h2 { + font-size: 20px; + background: #E0EBF5; + padding: 8px; + line-height: 1.25; + font-weight: normal; +} +h2 a { + font-weight: bold; +} +h3 { + font-size: 20px; +} +h3, +h4 { + margin: 20px 5px; +} +h4 { + font-size: 16px; +} +.rootHeading { + font-size: 20px; + margin: 0; +} + +dl { + margin: 20px; +} +dd { + margin: 0 0 0 20px; +} +dl, +dd { + font-size: 14px; +} +div#nav table td { + vertical-align: top; +} + +div#mobile_container { + display:inline-block; + font-size: 15px; + margin-left:auto; + margin-right:auto; + line-height:100%; +} + +div#mobile-nav { + display:none; +} + +div#logo_container { + height:100%; + display:table-cell; + float:left; + vertical-align:middle; +} + +div#mobile_only { + display:none; +} + +div#fixed { + position: fixed; + width: 100%; + height: 100%; +} + +div .top_link { + float:right; + overflow:auto; + display:inline; + padding-top:2px; +} + +.pkg-dir { + width: 20%; + max-width:400px; + min-width:325px; + height: calc(100% - 64px); + position: relative; + background: #F6F6F6; + overflow:hidden; + float: left; + display: inline-block; + box-sizing: border-box; + border-top-style:hidden; + border-left-style:hidden; + border-bottom-style:hidden; +} + +.pkg-dir table { + border-collapse: collapse; + border-spacing: 0; +} +.pkg-name { + padding-right: 20px; +} +.alert { + color: #AA0000; +} + +.top-heading { + float: left; + padding: 21px 0; + font-size: 20px; + font-weight: normal; +} +.top-heading a { + color: #222; + text-decoration: none; +} + +div#topbar { + background: #444; + height: 64px; + overflow: hidden; + position: relative; +} + +div#page { + box-sizing: border-box; + height:calc(100% - 64px); + overflow:auto; +} +div#page > .container, +div#topbar > .container { + text-align: left; + padding: 0 20px; +} +div#topbar > .container, +div#page > .container { +} +div#page.wide > .container, +div#topbar.wide > .container { +} +div#plusone { + float: right; + clear: right; + margin-top: 5px; +} + +div#footer { + text-align: center; + color: #666; + font-size: 14px; + margin: 40px 0; +} + +div#menu > a, +div#menu > input, +div#learn .buttons a, +div.play .buttons a, +div#blog .read a, +#menu-button { + padding: 10px; + + text-decoration: none; + font-size: 16px; + + -webkit-border-radius: 5px; + -moz-border-radius: 5px; + border-radius: 5px; +} +div#playground .buttons a, +div#menu > a, +div#menu > input, +#menu-button { + border: 1px solid #375EAB; +} +div#playground .buttons a, +div#menu > a, +#menu-button { + color: white; + background: #375EAB; +} +#playgroundButton.active { + background: white; + color: #375EAB; +} +a#start, +div#learn .buttons a, +div.play .buttons a, +div#blog .read a { + color: #222; + border: 1px solid #375EAB; + background: #E0EBF5; +} +.download { + width: 150px; +} + +div#menu { + display: inline; + float: right; + padding: 10px; + white-space: nowrap; +} +div#menu.menu-visible { + max-height: 500px; +} +div#menu > a, +#menu-button { + margin: 10px 2px; + padding: 10px; +} +div#menu > input { + position: relative; + top: 1px; + width: 140px; + background: white; + color: #222; + box-sizing: border-box; +} +div#menu > input.inactive { + color: #999; +} + +#menu-button { + display: none; + position: absolute; + right: 5px; + top: 0; + margin-right: 5px; +} +#menu-button-arrow { + display: inline-block; +} +.vertical-flip { + transform: rotate(-180deg); +} + +div.left { + float: left; + clear: left; + margin-right: 2.5%; +} +div.right { + float: right; + clear: right; + margin-left: 2.5%; +} +div.left, +div.right { + width: 45%; +} + +div#learn, +div#about { + padding-top: 20px; +} +div#learn h2, +div#about { + margin: 0; +} +div#about { + font-size: 20px; + margin: 0 auto 30px; +} +div#gopher { + background: url(/doc/gopher/frontpage.png) no-repeat; + background-position: center top; + height: 155px; +} +a#start { + display: block; + padding: 10px; + + text-align: center; + text-decoration: none; + + -webkit-border-radius: 5px; + -moz-border-radius: 5px; + border-radius: 5px; +} +a#start .big { + display: block; + font-weight: bold; + font-size: 20px; +} +a#start .desc { + display: block; + font-size: 14px; + font-weight: normal; + margin-top: 5px; +} + +div#learn .popout { + float: right; + display: block; + cursor: pointer; + font-size: 12px; + background: url(/doc/share.png) no-repeat; + background-position: right top; + padding: 5px 27px; +} +div#learn pre, +div#learn textarea { + padding: 0; + margin: 0; + font-family: Menlo, monospace; + font-size: 14px; +} +div#learn .input { + padding: 10px; + margin-top: 10px; + height: 150px; + + -webkit-border-top-left-radius: 5px; + -webkit-border-top-right-radius: 5px; + -moz-border-radius-topleft: 5px; + -moz-border-radius-topright: 5px; + border-top-left-radius: 5px; + border-top-right-radius: 5px; +} +div#learn .input textarea { + width: 100%; + height: 100%; + border: none; + outline: none; + resize: none; +} +div#learn .output { + border-top: none !important; + + padding: 10px; + height: 59px; + overflow: auto; + + -webkit-border-bottom-right-radius: 5px; + -webkit-border-bottom-left-radius: 5px; + -moz-border-radius-bottomright: 5px; + -moz-border-radius-bottomleft: 5px; + border-bottom-right-radius: 5px; + border-bottom-left-radius: 5px; +} +div#learn .output pre { + padding: 0; + + -webkit-border-radius: 0; + -moz-border-radius: 0; + border-radius: 0; +} +div#learn .input, +div#learn .input textarea, +div#learn .output, +div#learn .output pre { + background: #FFFFD8; +} +div#learn .input, +div#learn .output { + border: 1px solid #375EAB; +} +div#learn .buttons { + float: right; + padding: 20px 0 10px 0; + text-align: right; +} +div#learn .buttons a { + height: 16px; + margin-left: 5px; + padding: 10px; +} +div#learn .toys { + margin-top: 8px; +} +div#learn .toys select { + border: 1px solid #375EAB; + margin: 0; +} +div#learn .output .exit { + display: none; +} + +div#video { + max-width: 100%; +} +div#blog, +div#video { + margin-top: 40px; +} +div#blog > a, +div#blog > div, +div#blog > h2, +div#video > a, +div#video > div, +div#video > h2 { + margin-bottom: 10px; +} +div#blog .title, +div#video .title { + display: block; + font-size: 20px; +} +div#blog .when { + color: #666; + font-size: 14px; +} +div#blog .read { + text-align: right; +} + +.toggleButton { cursor: pointer; } +.toggle .collapsed { display: block; } +.toggle .expanded { display: none; } +.toggleVisible .collapsed { display: none; } +.toggleVisible .expanded { display: block; } + +table.codetable { margin-left: auto; margin-right: auto; border-style: none; } +table.codetable td { padding-right: 10px; } +hr { border-style: none; border-top: 1px solid black; } +img.gopher { + float: right; + margin-left: 10px; + margin-bottom: 10px; + z-index: -1; +} +h2 { clear: right; } + +/* example and drop-down playground */ +div.play { + padding: 0 20px 40px 20px; +} +div.play pre, +div.play textarea, +div.play .lines { + padding: 0; + margin: 0; + font-family: Menlo, monospace; + font-size: 14px; +} +div.play .input { + padding: 10px; + margin-top: 10px; + + -webkit-border-top-left-radius: 5px; + -webkit-border-top-right-radius: 5px; + -moz-border-radius-topleft: 5px; + -moz-border-radius-topright: 5px; + border-top-left-radius: 5px; + border-top-right-radius: 5px; + + overflow: hidden; +} +div.play .input textarea { + width: 100%; + height: 100%; + border: none; + outline: none; + resize: none; + + overflow: hidden; +} +div#playground .input textarea { + overflow: auto; + resize: auto; +} +div.play .output { + border-top: none !important; + + padding: 10px; + max-height: 200px; + overflow: auto; + + -webkit-border-bottom-right-radius: 5px; + -webkit-border-bottom-left-radius: 5px; + -moz-border-radius-bottomright: 5px; + -moz-border-radius-bottomleft: 5px; + border-bottom-right-radius: 5px; + border-bottom-left-radius: 5px; +} +div.play .output pre { + padding: 0; + + -webkit-border-radius: 0; + -moz-border-radius: 0; + border-radius: 0; +} +div.play .input, +div.play .input textarea, +div.play .output, +div.play .output pre { + background: #FFFFD8; +} +div.play .input, +div.play .output { + border: 1px solid #375EAB; +} +div.play .buttons { + float: right; + padding: 20px 0 10px 0; + text-align: right; +} +div.play .buttons a { + height: 16px; + margin-left: 5px; + padding: 10px; + cursor: pointer; +} +.output .stderr { + color: #933; +} +.output .system { + color: #999; +} + +/* drop-down playground */ +#playgroundButton, +div#playground { + /* start hidden; revealed by javascript */ + display: none; +} +div#playground { + position: absolute; + top: 63px; + right: 20px; + padding: 0 10px 10px 10px; + z-index: 1; + text-align: left; + background: #E0EBF5; + + border: 1px solid #B0BBC5; + border-top: none; + + -webkit-border-bottom-left-radius: 5px; + -webkit-border-bottom-right-radius: 5px; + -moz-border-radius-bottomleft: 5px; + -moz-border-radius-bottomright: 5px; + border-bottom-left-radius: 5px; + border-bottom-right-radius: 5px; +} +div#playground .code { + width: 520px; + height: 200px; +} +div#playground .output { + height: 100px; +} + +/* Inline runnable snippets (play.js/initPlayground) */ +#content .code pre, #content .playground pre, #content .output pre { + margin: 0; + padding: 0; + background: none; + border: none; + outline: 0px solid transparent; + overflow: auto; +} +#content .playground .number, #content .code .number { + color: #999; +} +#content .code, #content .playground, #content .output { + width: auto; + margin: 20px; + padding: 10px; + -webkit-border-radius: 5px; + -moz-border-radius: 5px; + border-radius: 5px; +} +#content .code, #content .playground { + background: #e9e9e9; +} +#content .output { + background: #202020; +} +#content .output .stdout, #content .output pre { + color: #e6e6e6; +} +#content .output .stderr, #content .output .error { + color: rgb(244, 74, 63); +} +#content .output .system, #content .output .exit { + color: rgb(255, 209, 77) +} +#content .buttons { + position: relative; + float: right; + top: -50px; + right: 30px; +} +#content .output .buttons { + top: -60px; + right: 0; + height: 0; +} +#content .buttons .kill { + display: none; + visibility: hidden; +} +a.error { + font-weight: bold; + color: white; + background-color: darkred; + border-bottom-left-radius: 4px; + border-bottom-right-radius: 4px; + border-top-left-radius: 4px; + border-top-right-radius: 4px; + padding: 2px 4px 2px 4px; /* TRBL */ +} + + +#heading-narrow { + display: none; +} + +.downloading { + background: #F9F9BE; + padding: 10px; + text-align: center; + border-radius: 5px; +} + +@media (max-width: 930px) { + #heading-wide { + display: none; + } + #heading-narrow { + display: block; + } + + .pkg-dir { + display: none; + } + + .title { + margin:5px; + } + + div#page { + box-sizing: border-box; + width:100%; + height:calc(100% - 64px); + float:right; + display:inline-block; + overflow:auto + } + + div#menu { + display: none; + } + + div#topbar { + height: 30px; + padding: 10px; + } + + a > img { + max-height:100%; + float:left; + padding-left:10px; + padding-top:2px; + } + + div#mobile-nav { + display:inline-block; + float:right; + border-bottom: 13px double white; + border-top: 4px solid white; + content:""; + height: 5px; + width:30px; + } + + div#mobile_container { + display:inline-block; + width: 100%; + padding-left:20px; + padding-top:10px; + } +} + + +@media (max-width: 760px) { + .container .left, + .container .right { + width: auto; + float: none; + } + + div#about { + max-width: 500px; + text-align: center; + } + + .title { + margin:5px; + } + + .pkg-dir { + display: none; + } + + div#page { + box-sizing: border-box; + width:100%; + height:calc(100% - 64px); + float:right; + display:inline-block; + overflow:auto + } + + div#menu { + display: none; + } + + div#topbar { + height: 30px; + padding: 10px; + } + + div#mobile-nav { + display:inline-block; + float:right; + border-bottom: 13px double white; + border-top: 4px solid white; + content:""; + height: 5px; + width:30px; + } +} + +@media (min-width: 700px) and (max-width: 1000px) { + div#menu > a { + margin: 5px 0; + font-size: 14px; + } + + div#menu > input { + font-size: 14px; + } + + .title { + margin:5px; + } + + .pkg-dir { + display: none; + } + + div#page { + box-sizing: border-box; + width:100%; + height:calc(100% - 64px); + float:right; + display:inline-block; + overflow:auto + } + + div#menu { + display: none; + } + + div#topbar { + height: 30px; + padding: 10px; + } + + div#mobile-nav { + display:inline-block; + float:right; + border-bottom: 13px double white; + border-top: 4px solid white; + content:""; + height: 5px; + width:30px; + } +} + +@media (max-width: 700px) { + body { + font-size: 15px; + } + + pre, + code { + font-size: 13px; + } + + div#page > .container { + padding: 0 10px; + } + + div#topbar { + padding: 10px; + } + + div#topbar > .container { + padding: 0; + } + + #heading-wide { + display: block; + } + #heading-narrow { + display: none; + } + + .top-heading { + float: none; + display: inline-block; + padding: 12px; + } + + div#menu { + padding: 0; + min-width: 0; + text-align: left; + float: left; + } + + div#menu > a, + div#menu > input { + display: block; + margin-left: 0; + margin-right: 0; + } + + div#menu > input { + width: 100%; + } + + #menu-button { + display: inline-block; + } + + p, + pre, + ul, + ol { + margin: 10px; + } + + .pkg-synopsis { + display: none; + } + + img.gopher { + display: none; + } + + .pkg-dir { + display: none; + } + + .title { + margin:5px; + } + + div#page { + box-sizing: border-box; + width:100%; + height:calc(100% - 64px); + float:right; + display:inline-block; + overflow:auto + } + + div#menu { + display: none; + } + + div#topbar { + height: 30px; + padding: 10px; + } + + div#mobile-nav { + display:inline-block; + float:right; + border-bottom: 13px double white; + border-top: 4px solid white; + content:""; + height: 5px; + width:30px; + } +} + +@media (max-width: 480px) { + #heading-wide { + display: none; + } + #heading-narrow { + display: block; + } + + .pkg-dir { + display: none; + } + + .title { + margin:5px; + } + + div#page { + box-sizing: border-box; + width:100%; + height:calc(100% - 64px); + float:right; + display:inline-block; + overflow:auto + } + + div#menu { + display: none; + } + + div#topbar { + height: 30px; + padding: 10px; + } + + div#mobile-nav { + display:inline-block; + float:right; + border-bottom: 13px double white; + border-top: 4px solid white; + content:""; + height: 5px; + width:30px; + } +} + +@media print { + pre { + background: #FFF; + border: 1px solid #BBB; + white-space: pre-wrap; + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/doc-src/aws-godoc/templates/user_guide_example.html b/vendor/github.com/aws/aws-sdk-go/doc-src/aws-godoc/templates/user_guide_example.html new file mode 100644 index 000000000..2e1f042fe --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/doc-src/aws-godoc/templates/user_guide_example.html @@ -0,0 +1,82 @@ + + + +

    AWS SDK for Go

    +

    + aws-sdk-go is the official AWS SDK for the Go programming language. + + Checkout our release notes for information about the latest bug fixes, updates, and features added to the SDK. +

    +

    Installing

    +

    + If you are using Go 1.5 with the GO15VENDOREXPERIMENT=1 vendoring flag you can use the following to get the SDK as the SDK's runtime dependencies are vendored in the vendor folder. +

    +
     $ go get -u github.com/aws/aws-sdk-go 
    +

    + Otherwise you'll need to tell Go to get the SDK and all of its dependencies. +

    +
     $ go get -u github.com/aws/aws-sdk-go/...  
    +

    Configuring Credentials

    +

    + Before using the SDK, ensure that you've configured credentials. The best way to configure credentials on a development machine is to use the ~/.aws/credentials file, which might look like: +

    +
    +				[default]
    +				aws_access_key_id = AKID1234567890
    +				aws_secret_access_key = MY-SECRET-KEY
    +			
    +

    + You can learn more about the credentials file from this blog post. + + Alternatively, you can set the following environment variables: +

    +
    +				AWS_ACCESS_KEY_ID=AKID1234567890
    +				AWS_SECRET_ACCESS_KEY=MY-SECRET-KEY
    +			
    +

    AWS CLI config file (~/aws/config)

    +

    + The AWS SDK for Go does not support the AWS CLI's config file. The SDK will not use any contents from this file. The SDK only supports the shared credentials file (~/aws/credentials). #384 tracks this feature request discussion. +

    +

    Using the Go SDK

    +

    + To use a service in the SDK, create a service variable by calling the New() function. Once you have a service client, you can call API operations which each return response data and a possible error. + + To list a set of instance IDs from EC2, you could run: +

    +
    +				package main
    +
    +				import (
    +						"fmt"
    +
    +						"github.com/aws/aws-sdk-go/aws"
    +						"github.com/aws/aws-sdk-go/aws/session"
    +						"github.com/aws/aws-sdk-go/service/ec2"
    +				)
    +
    +				func main() {
    +						// Create an EC2 service object in the "us-west-2" region
    +						// Note that you can also configure your region globally by
    +						// exporting the AWS_REGION environment variable
    +						svc := ec2.New(session.New(), &aws.Config{Region: aws.String("us-west-2")})
    +
    +						// Call the DescribeInstances Operation
    +						resp, err := svc.DescribeInstances(nil)
    +						if err != nil {
    +								panic(err)
    +						}
    +
    +						// resp has all of the response data, pull out instance IDs:
    +						fmt.Println("> Number of reservation sets: ", len(resp.Reservations))
    +						for idx, res := range resp.Reservations {
    +								fmt.Println("  > Number of instances: ", len(res.Instances))
    +								for _, inst := range resp.Reservations[idx].Instances {
    +										fmt.Println("    - Instance ID: ", *inst.InstanceId)
    +								}
    +						}
    +				}
    +			
    + + + diff --git a/vendor/github.com/aws/aws-sdk-go/doc-src/plugin/plugin.rb b/vendor/github.com/aws/aws-sdk-go/doc-src/plugin/plugin.rb new file mode 100644 index 000000000..988270747 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/doc-src/plugin/plugin.rb @@ -0,0 +1,187 @@ +require 'yard' +require 'yard-go' + +module GoLinksHelper + def signature(obj, link = true, show_extras = true, full_attr_name = true) + case obj + when YARDGo::CodeObjects::FuncObject + if link && obj.has_tag?(:service_operation) + ret = signature_types(obj, !link) + args = obj.parameters.map {|m| m[0].split(/\s+/).last }.join(", ") + line = "#{obj.name}(#{args}) #{ret}" + return link ? linkify(obj, line) : line + end + end + + super(obj, link, show_extras, full_attr_name) + end + + def html_syntax_highlight(source, type = nil) + src = super(source, type || :go) + object.has_tag?(:service_operation) ? link_types(src) : src + end +end + +YARD::Templates::Helpers::HtmlHelper.send(:prepend, GoLinksHelper) +YARD::Templates::Engine.register_template_path(File.dirname(__FILE__) + '/templates') + +YARD::Parser::SourceParser.after_parse_list do + YARD::Registry.all(:struct).each do |obj| + if obj.file =~ /\/?service\/(.+?)\/(service|api)\.go$/ + obj.add_tag YARD::Tags::Tag.new(:service, $1) + obj.groups = ["Constructor Functions", "Service Operations", "Request Methods", "Pagination Methods"] + end + end + + YARD::Registry.all(:method).each do |obj| + if obj.file =~ /service\/.+?\/api\.go$/ && obj.scope == :instance + if obj.name.to_s =~ /Pages$/ + obj.group = "Pagination Methods" + opname = obj.name.to_s.sub(/Pages$/, '') + obj.docstring = <<-eof +#{obj.name} iterates over the pages of a {#{opname} #{opname}()} operation, calling the `fn` +function callback with the response data in each page. To stop iterating, return `false` from +the function callback. + +@note This operation can generate multiple requests to a service. +@example Iterating over at most 3 pages of a #{opname} operation + pageNum := 0 + err := client.#{obj.name}(params, func(page *#{obj.parent.parent.name}.#{obj.parameters[1][0].split("*").last}, lastPage bool) bool { + pageNum++ + fmt.Println(page) + return pageNum <= 3 + }) +@see #{opname} +eof + obj.add_tag YARD::Tags::Tag.new(:paginator, '') + elsif obj.name.to_s =~ /Request$/ + obj.group = "Request Methods" + obj.signature = obj.name.to_s + obj.parameters = [] + opname = obj.name.to_s.sub(/Request$/, '') + obj.docstring = <<-eof +#{obj.name} generates a {aws/request.Request} object representing the client request for +the {#{opname} #{opname}()} operation. The `output` return value can be used to capture +response data after {aws/request.Request.Send Request.Send()} is called. + +Creating a request object using this method should be used when you want to inject +custom logic into the request lifecycle using a custom handler, or if you want to +access properties on the request object before or after sending the request. If +you just want the service response, call the {#{opname} service operation method} +directly instead. + +@note You must call the {aws/request.Request.Send Send()} method on the returned + request object in order to execute the request. +@example Sending a request using the #{obj.name}() method + req, resp := client.#{obj.name}(params) + err := req.Send() + + if err == nil { // resp is now filled + fmt.Println(resp) + } +eof + obj.add_tag YARD::Tags::Tag.new(:request_method, '') + else + obj.group = "Service Operations" + obj.add_tag YARD::Tags::Tag.new(:service_operation, '') + if ex = obj.tag(:example) + ex.name = "Calling the #{obj.name} operation" + end + end + end + end + + apply_docs +end + +def apply_docs + svc_pkg = YARD::Registry.at('service') + return if svc_pkg.nil? + + pkgs = svc_pkg.children.select {|t| t.type == :package } + pkgs.each do |pkg| + svc = pkg.children.find {|t| t.has_tag?(:service) } + ctor = P(svc, ".New") + svc_name = ctor.source[/ServiceName:\s*"(.+?)",/, 1] + api_ver = ctor.source[/APIVersion:\s*"(.+?)",/, 1] + log.progress "Parsing service documentation for #{svc_name} (#{api_ver})" + file = Dir.glob("models/apis/#{svc_name}/#{api_ver}/docs-2.json").sort.last + next if file.nil? + + next if svc.nil? + exmeth = svc.children.find {|s| s.has_tag?(:service_operation) } + pkg.docstring += <<-eof + +@example Sending a request using the {#{svc.name}} client + client := #{pkg.name}.New(nil) + params := &#{pkg.name}.#{exmeth.parameters.first[0].split("*").last}{...} + resp, err := client.#{exmeth.name}(params) +@see #{svc.name} +@version #{api_ver} +eof + + ctor.docstring += <<-eof + +@example Constructing a client using default configuration + client := #{pkg.name}.New(nil) + +@example Constructing a client with custom configuration + config := aws.NewConfig().WithRegion("us-west-2") + client := #{pkg.name}.New(config) +eof + + json = JSON.parse(File.read(file)) + if svc + apply_doc(svc, json["service"]) + end + + json["operations"].each do |op, doc| + if doc && obj = svc.children.find {|t| t.name.to_s.downcase == op.downcase } + apply_doc(obj, doc) + end + end + + json["shapes"].each do |shape, data| + shape = shape_name(shape) + if obj = pkg.children.find {|t| t.name.to_s.downcase == shape.downcase } + apply_doc(obj, data["base"]) + end + + data["refs"].each do |refname, doc| + refshape, member = *refname.split("$") + refshape = shape_name(refshape) + if refobj = pkg.children.find {|t| t.name.to_s.downcase == refshape.downcase } + if m = refobj.children.find {|t| t.name.to_s.downcase == member.downcase } + apply_doc(m, doc || data["base"]) + end + end + end if data["refs"] + end + end +end + +def apply_doc(obj, doc) + tags = obj.docstring.tags || [] + obj.docstring = clean_docstring(doc) + tags.each {|t| obj.docstring.add_tag(t) } +end + +def shape_name(shape) + shape.sub(/Request$/, "Input").sub(/Response$/, "Output") +end + +def clean_docstring(docs) + return nil unless docs + docs = docs.gsub(//m, '') + docs = docs.gsub(/.+?<\/fullname?>/m, '') + docs = docs.gsub(/.+?<\/examples?>/m, '') + docs = docs.gsub(/\s*<\/note>/m, '') + docs = docs.gsub(/(.+?)<\/a>/, '\1') + docs = docs.gsub(/(.+?)<\/note>/m) do + text = $1.gsub(/<\/?p>/, '') + "
    Note: #{text}
    " + end + docs = docs.gsub(/\{(.+?)\}/, '`{\1}`') + docs = docs.gsub(/\s+/, ' ').strip + docs == '' ? nil : docs +end diff --git a/vendor/github.com/aws/aws-sdk-go/doc-src/plugin/templates/default/layout/html/footer.erb b/vendor/github.com/aws/aws-sdk-go/doc-src/plugin/templates/default/layout/html/footer.erb new file mode 100644 index 000000000..d5839b799 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/doc-src/plugin/templates/default/layout/html/footer.erb @@ -0,0 +1,31 @@ +
    + + + + + + + + + + + diff --git a/vendor/github.com/aws/aws-sdk-go/doc-src/plugin/templates/default/module/html/client.erb b/vendor/github.com/aws/aws-sdk-go/doc-src/plugin/templates/default/module/html/client.erb new file mode 100644 index 000000000..aa6831c40 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/doc-src/plugin/templates/default/module/html/client.erb @@ -0,0 +1,4 @@ +

    Client Structure collapse

    +
      + <%= yieldall :item => @client %> +
    diff --git a/vendor/github.com/aws/aws-sdk-go/doc-src/plugin/templates/default/module/html/item_summary.erb b/vendor/github.com/aws/aws-sdk-go/doc-src/plugin/templates/default/module/html/item_summary.erb new file mode 100644 index 000000000..f9ad2eb92 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/doc-src/plugin/templates/default/module/html/item_summary.erb @@ -0,0 +1,28 @@ +<% if !@item.has_tag?(:paginator) %> +
  • + <%= signature(@item) %> + <% if object != @item.namespace %> + + <%= @item.namespace.type == :class ? 'inherited' : (@item.scope == :class ? 'extended' : 'included') %> + from <%= linkify @item, object.relative_path(@item.namespace) %> + + <% end %> + <% if @item.type == :enum %>enum<% end %> + <% if @item.type == :bare_struct || @item.type == :struct %>struct<% end %> + <% if @item.has_tag?(:service) %>client<% end %> + <% if @item.has_tag?(:service_operation) %>operation<% end %> + <% if @item.type == :interface %>interface<% end %> + <% if @item.has_tag?(:readonly) %>readonly<% end %> + <% if @item.has_tag?(:writeonly) %>writeonly<% end %> + <% if @item.visibility != :public %><%= @item.visibility %><% end %> + <% if @item.has_tag?(:abstract) %>interface<% end %> + <% if @item.has_tag?(:deprecated) %>deprecated<% end %> + <% if @item.has_tag?(:api) && @item.tag(:api).text == 'private' %>private<% end %> + + <% if @item.has_tag?(:deprecated) %> + Deprecated. <%= htmlify_line @item.tag(:deprecated).text %> + <% else %> + <%= htmlify_line docstring_summary(@item) %> + <% end %> +
  • +<% end %> diff --git a/vendor/github.com/aws/aws-sdk-go/doc-src/plugin/templates/default/module/html/setup.rb b/vendor/github.com/aws/aws-sdk-go/doc-src/plugin/templates/default/module/html/setup.rb new file mode 100644 index 000000000..8a8b49b98 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/doc-src/plugin/templates/default/module/html/setup.rb @@ -0,0 +1,9 @@ +def init + super + sections.place(:client, [:item_summary]).before(:constant_summary) +end + +def client + @client = object.children.find {|c| c.has_tag?(:service) } + erb(:client) if @client +end diff --git a/vendor/github.com/aws/aws-sdk-go/doc-src/plugin/templates/default/package/html/setup.rb b/vendor/github.com/aws/aws-sdk-go/doc-src/plugin/templates/default/package/html/setup.rb new file mode 100644 index 000000000..ff777d292 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/doc-src/plugin/templates/default/package/html/setup.rb @@ -0,0 +1,8 @@ +def type_summary + @items = object.children. + select {|c| c.type == :bare_struct || c.type == :struct || c.type == :enum }. + reject {|c| c.has_tag?(:service) }. + sort_by {|c| c.name.to_s } + @name = "Type" + erb :list_summary +end diff --git a/vendor/github.com/aws/aws-sdk-go/doc-src/plugin/templates/default/struct/html/paginators.erb b/vendor/github.com/aws/aws-sdk-go/doc-src/plugin/templates/default/struct/html/paginators.erb new file mode 100644 index 000000000..053f762ce --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/doc-src/plugin/templates/default/struct/html/paginators.erb @@ -0,0 +1,4 @@ +
    +

    Pagination Methods

    +

    <%= @items.map {|pkg| link_object(pkg, pkg.name) }.join(" ") %>

    +
    diff --git a/vendor/github.com/aws/aws-sdk-go/doc-src/plugin/templates/default/struct/html/request_methods.erb b/vendor/github.com/aws/aws-sdk-go/doc-src/plugin/templates/default/struct/html/request_methods.erb new file mode 100644 index 000000000..1edbb66f7 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/doc-src/plugin/templates/default/struct/html/request_methods.erb @@ -0,0 +1,4 @@ +
    +

    Request Methods

    +

    <%= @items.map {|pkg| link_object(pkg, pkg.name) }.join(" ") %>

    +
    diff --git a/vendor/github.com/aws/aws-sdk-go/doc-src/plugin/templates/default/struct/html/setup.rb b/vendor/github.com/aws/aws-sdk-go/doc-src/plugin/templates/default/struct/html/setup.rb new file mode 100644 index 000000000..9038945a4 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/doc-src/plugin/templates/default/struct/html/setup.rb @@ -0,0 +1,20 @@ +def init + super + sections.place(:request_methods, :paginators).after(:method_summary) +end + +def groups(list, type = "Method") + super(list.reject {|o| o.has_tag?(:paginator) || o.has_tag?(:request_method) }, type) +end + +def paginators + @items = object.children.select {|o| o.has_tag?(:paginator) } + return if @items.size == 0 + erb(:paginators) +end + +def request_methods + @items = object.children.select {|o| o.has_tag?(:request_method) } + return if @items.size == 0 + erb(:request_methods) +end diff --git a/vendor/github.com/aws/aws-sdk-go/example/service/cloudfront/signCookies/README.md b/vendor/github.com/aws/aws-sdk-go/example/service/cloudfront/signCookies/README.md new file mode 100644 index 000000000..776f22a94 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/example/service/cloudfront/signCookies/README.md @@ -0,0 +1,12 @@ +# Example + +This example shows how the CloudFront CookieSigner can be used to generate signed cookies to provided short term access to restricted resourced fronted by CloudFront. + +# Usage +Makes a request for object using CloudFront cookie signing, and outputs the contents of the object to stdout. + +```sh +go run signCookies.go -file -id -r -g +``` + + diff --git a/vendor/github.com/aws/aws-sdk-go/example/service/cloudfront/signCookies/signCookies.go b/vendor/github.com/aws/aws-sdk-go/example/service/cloudfront/signCookies/signCookies.go new file mode 100644 index 000000000..0577c03e2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/example/service/cloudfront/signCookies/signCookies.go @@ -0,0 +1,77 @@ +package main + +import ( + "flag" + "fmt" + "io/ioutil" + "net/http" + "time" + + "github.com/aws/aws-sdk-go/service/cloudfront/sign" +) + +// Makes a request for object using CloudFront cookie signing, and outputs +// the contents of the object to stdout. +// +// Usage example: +// go run signCookies.go -file -id -r -g +func main() { + var keyFile string // Private key PEM file + var keyID string // Key pair ID of CloudFront key pair + var resource string // CloudFront resource pattern + var object string // S3 object frontented by CloudFront + + flag.StringVar(&keyFile, "file", "", "private key file") + flag.StringVar(&keyID, "id", "", "key pair id") + flag.StringVar(&resource, "r", "", "resource to request") + flag.StringVar(&object, "g", "", "object to get") + flag.Parse() + + // Load the PEM file into memory so it can be used by the signer + privKey, err := sign.LoadPEMPrivKeyFile(keyFile) + if err != nil { + fmt.Println("failed to load key,", err) + return + } + + // Create the new CookieSigner to get signed cookies for CloudFront + // resource requests + signer := sign.NewCookieSigner(keyID, privKey) + + // Get the cookies for the resource. These will be used + // to make the requests with + cookies, err := signer.Sign(resource, time.Now().Add(1*time.Hour)) + if err != nil { + fmt.Println("failed to sign cookies", err) + return + } + + // Use the cookies in a http.Client to show how they allow the client + // to request resources from CloudFront. + req, err := http.NewRequest("GET", object, nil) + fmt.Println("Cookies:") + for _, c := range cookies { + fmt.Printf("%s=%s;\n", c.Name, c.Value) + req.AddCookie(c) + } + + // Send and handle the response. For a successful response the object's + // content will be written to stdout. The same process could be applied + // to a http service written cookies to the response but useing + // http.SetCookie(w, c,) on the ResponseWriter. + resp, err := http.DefaultClient.Do(req) + if err != nil { + fmt.Println("failed to send request", err) + return + } + defer resp.Body.Close() + + b, err := ioutil.ReadAll(resp.Body) + if err != nil { + fmt.Println("failed to read requested body", err) + return + } + + fmt.Println("Response:", resp.Status) + fmt.Println(string(b)) +} diff --git a/vendor/github.com/aws/aws-sdk-go/example/service/s3/listObjects/README.md b/vendor/github.com/aws/aws-sdk-go/example/service/s3/listObjects/README.md new file mode 100644 index 000000000..6a5f2c9be --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/example/service/s3/listObjects/README.md @@ -0,0 +1,27 @@ +# Example + +listObjects is an example using the AWS SDK for Go to list objects' key in a S3 bucket. + + +# Usage + +The example uses the the bucket name provided, and lists all object keys in a bucket. + +```sh +go run listObjects.go +``` + +Output: +``` +Page, 0 +Object: myKey +Object: mykey.txt +Object: resources/0001/item-01 +Object: resources/0001/item-02 +Object: resources/0001/item-03 +Object: resources/0002/item-01 +Object: resources/0002/item-02 +Object: resources/0002/item-03 +Object: resources/0002/item-04 +Object: resources/0002/item-05 +``` diff --git a/vendor/github.com/aws/aws-sdk-go/example/service/s3/listObjects/listObjects.go b/vendor/github.com/aws/aws-sdk-go/example/service/s3/listObjects/listObjects.go new file mode 100644 index 000000000..4c114024f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/example/service/s3/listObjects/listObjects.go @@ -0,0 +1,36 @@ +package main + +import ( + "fmt" + "os" + + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/s3" +) + +// Lists all objects in a bucket using pagination +// +// Usage: +// go run listObjects.go +func main() { + sess := session.New() + + svc := s3.New(sess) + + i := 0 + err := svc.ListObjectsPages(&s3.ListObjectsInput{ + Bucket: &os.Args[1], + }, func(p *s3.ListObjectsOutput, last bool) (shouldContinue bool) { + fmt.Println("Page,", i) + i++ + + for _, obj := range p.Contents { + fmt.Println("Object:", *obj.Key) + } + return true + }) + if err != nil { + fmt.Println("failed to list objects", err) + return + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/example/service/s3/listObjectsConcurrently/README.md b/vendor/github.com/aws/aws-sdk-go/example/service/s3/listObjectsConcurrently/README.md new file mode 100644 index 000000000..5d99711df --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/example/service/s3/listObjectsConcurrently/README.md @@ -0,0 +1,13 @@ +## Example + +listS3EncryptedObjects is an example using the AWS SDK for Go concurrently to list the encrypted objects in the S3 buckets owned by an account. + +## Usage + +The example's `accounts` string slice contains a list of the SharedCredentials profiles which will be used to look up the buckets owned by each profile. Each bucket's objects will be queried. + +``` +AWS_REGION=us-east-1 go run example/listS3EncryptedObjects/main.go +``` + + diff --git a/vendor/github.com/aws/aws-sdk-go/example/service/s3/listObjectsConcurrently/listObjectsConcurrently.go b/vendor/github.com/aws/aws-sdk-go/example/service/s3/listObjectsConcurrently/listObjectsConcurrently.go new file mode 100644 index 000000000..33c855e06 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/example/service/s3/listObjectsConcurrently/listObjectsConcurrently.go @@ -0,0 +1,228 @@ +package main + +import ( + "fmt" + "os" + "sort" + "sync" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/s3" +) + +func exit(msg ...interface{}) { + fmt.Fprintln(os.Stderr, msg...) + os.Exit(1) +} + +// Lists all encrypted objects owned by an account. The `accounts` string +// contains a list of profiles to use. +// +// Usage: +// go run listObjectsConcurrently.go +func main() { + accounts := []string{"default", "default2", "otherprofile"} + + // Spin off a worker for each account to retrieve that account's + bucketCh := make(chan *Bucket, 5) + var wg sync.WaitGroup + for _, acc := range accounts { + wg.Add(1) + go func(acc string) { + sess := session.New(&aws.Config{Credentials: credentials.NewSharedCredentials("", acc)}) + if err := getAccountBuckets(sess, bucketCh, acc); err != nil { + fmt.Fprintf(os.Stderr, "failed to get account %s's bucket info, %v\n", acc, err) + } + wg.Done() + }(acc) + } + // Spin off a goroutine which will wait until all account buckets have been collected and + // added to the bucketCh. Close the bucketCh so the for range below will exit once all + // bucket info is printed. + go func() { + wg.Wait() + close(bucketCh) + }() + + // Receive from the bucket channel printing the information for each bucket to the console + // when the bucketCh channel is drained. + buckets := []*Bucket{} + for b := range bucketCh { + buckets = append(buckets, b) + } + + sortBuckets(buckets) + for _, b := range buckets { + if b.Error != nil { + fmt.Printf("Bucket %s, owned by: %s, failed: %v\n", b.Name, b.Owner, b.Error) + continue + } + + encObjs := b.encryptedObjects() + fmt.Printf("Bucket: %s, owned by: %s, total objects: %d, failed objects: %d, encrypted objects: %d\n", + b.Name, b.Owner, len(b.Objects), len(b.ErrObjects), len(encObjs)) + if len(encObjs) > 0 { + for _, encObj := range encObjs { + fmt.Printf("\t%s %s:%s/%s\n", encObj.EncryptionType, b.Region, b.Name, encObj.Key) + } + } + } +} + +func sortBuckets(buckets []*Bucket) { + s := sortalbeBuckets(buckets) + sort.Sort(s) +} + +type sortalbeBuckets []*Bucket + +func (s sortalbeBuckets) Len() int { return len(s) } +func (s sortalbeBuckets) Swap(a, b int) { s[a], s[b] = s[b], s[a] } +func (s sortalbeBuckets) Less(a, b int) bool { + if s[a].Owner == s[b].Owner && s[a].Name < s[b].Name { + return true + } + + if s[a].Owner < s[b].Owner { + return true + } + + return false +} + +func getAccountBuckets(sess *session.Session, bucketCh chan<- *Bucket, owner string) error { + svc := s3.New(sess) + buckets, err := listBuckets(svc) + if err != nil { + return fmt.Errorf("failed to list buckets, %v", err) + } + for _, bucket := range buckets { + bucket.Owner = owner + if bucket.Error != nil { + continue + } + + bckSvc := s3.New(sess, &aws.Config{ + Region: aws.String(bucket.Region), + Credentials: svc.Config.Credentials, + }) + bucketDetails(bckSvc, bucket) + bucketCh <- bucket + } + + return nil +} + +func bucketDetails(svc *s3.S3, bucket *Bucket) { + objs, errObjs, err := listBucketObjects(svc, bucket.Name) + if err != nil { + bucket.Error = err + } else { + bucket.Objects = objs + bucket.ErrObjects = errObjs + } +} + +// A Object provides details of an S3 object +type Object struct { + Bucket string + Key string + Encrypted bool + EncryptionType string +} + +// An ErrObject provides details of the error occurred retrieving +// an object's status. +type ErrObject struct { + Bucket string + Key string + Error error +} + +// A Bucket provides details about a bucket and its objects +type Bucket struct { + Owner string + Name string + CreationDate time.Time + Region string + Objects []Object + Error error + ErrObjects []ErrObject +} + +func (b *Bucket) encryptedObjects() []Object { + encObjs := []Object{} + for _, obj := range b.Objects { + if obj.Encrypted { + encObjs = append(encObjs, obj) + } + } + return encObjs +} + +func listBuckets(svc *s3.S3) ([]*Bucket, error) { + res, err := svc.ListBuckets(&s3.ListBucketsInput{}) + if err != nil { + return nil, err + } + + buckets := make([]*Bucket, len(res.Buckets)) + for i, b := range res.Buckets { + buckets[i] = &Bucket{ + Name: *b.Name, + CreationDate: *b.CreationDate, + } + + locRes, err := svc.GetBucketLocation(&s3.GetBucketLocationInput{ + Bucket: b.Name, + }) + if err != nil { + buckets[i].Error = err + continue + } + + if locRes.LocationConstraint == nil { + buckets[i].Region = "us-east-1" + } else { + buckets[i].Region = *locRes.LocationConstraint + } + } + + return buckets, nil +} + +func listBucketObjects(svc *s3.S3, bucket string) ([]Object, []ErrObject, error) { + listRes, err := svc.ListObjects(&s3.ListObjectsInput{ + Bucket: &bucket, + }) + if err != nil { + return nil, nil, err + } + + objs := make([]Object, 0, len(listRes.Contents)) + errObjs := []ErrObject{} + for _, listObj := range listRes.Contents { + objData, err := svc.HeadObject(&s3.HeadObjectInput{ + Bucket: &bucket, + Key: listObj.Key, + }) + + if err != nil { + errObjs = append(errObjs, ErrObject{Bucket: bucket, Key: *listObj.Key, Error: err}) + continue + } + + obj := Object{Bucket: bucket, Key: *listObj.Key} + if objData.ServerSideEncryption != nil { + obj.Encrypted = true + obj.EncryptionType = *objData.ServerSideEncryption + } + + objs = append(objs, obj) + } + + return objs, errObjs, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/acm/2015-12-08/api-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/acm/2015-12-08/api-2.json new file mode 100644 index 000000000..f503c4f6f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/acm/2015-12-08/api-2.json @@ -0,0 +1,495 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2015-12-08", + "endpointPrefix":"acm", + "jsonVersion":"1.1", + "protocol":"json", + "serviceAbbreviation":"ACM", + "serviceFullName":"AWS Certificate Manager", + "signatureVersion":"v4", + "targetPrefix":"CertificateManager" + }, + "operations":{ + "AddTagsToCertificate":{ + "name":"AddTagsToCertificate", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AddTagsToCertificateRequest"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidArnException"}, + {"shape":"InvalidTagException"}, + {"shape":"TooManyTagsException"} + ] + }, + "DeleteCertificate":{ + "name":"DeleteCertificate", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteCertificateRequest"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ResourceInUseException"}, + {"shape":"InvalidArnException"} + ] + }, + "DescribeCertificate":{ + "name":"DescribeCertificate", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeCertificateRequest"}, + "output":{"shape":"DescribeCertificateResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidArnException"} + ] + }, + "GetCertificate":{ + "name":"GetCertificate", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetCertificateRequest"}, + "output":{"shape":"GetCertificateResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"RequestInProgressException"}, + {"shape":"InvalidArnException"} + ] + }, + "ListCertificates":{ + "name":"ListCertificates", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListCertificatesRequest"}, + "output":{"shape":"ListCertificatesResponse"} + }, + "ListTagsForCertificate":{ + "name":"ListTagsForCertificate", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListTagsForCertificateRequest"}, + "output":{"shape":"ListTagsForCertificateResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidArnException"} + ] + }, + "RemoveTagsFromCertificate":{ + "name":"RemoveTagsFromCertificate", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RemoveTagsFromCertificateRequest"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidArnException"}, + {"shape":"InvalidTagException"} + ] + }, + "RequestCertificate":{ + "name":"RequestCertificate", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RequestCertificateRequest"}, + "output":{"shape":"RequestCertificateResponse"}, + "errors":[ + {"shape":"LimitExceededException"}, + {"shape":"InvalidDomainValidationOptionsException"} + ] + }, + "ResendValidationEmail":{ + "name":"ResendValidationEmail", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ResendValidationEmailRequest"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidStateException"}, + {"shape":"InvalidArnException"}, + {"shape":"InvalidDomainValidationOptionsException"} + ] + } + }, + "shapes":{ + "AddTagsToCertificateRequest":{ + "type":"structure", + "required":[ + "CertificateArn", + "Tags" + ], + "members":{ + "CertificateArn":{"shape":"Arn"}, + "Tags":{"shape":"TagList"} + } + }, + "Arn":{ + "type":"string", + "max":2048, + "min":20, + "pattern":"arn:[\\w+=/,.@-]+:[\\w+=/,.@-]+:[\\w+=/,.@-]*:[0-9]+:[\\w+=,.@-]+(/[\\w+=/,.@-]+)*" + }, + "CertificateBody":{ + "type":"string", + "max":524288, + "min":1, + "pattern":"-{5}BEGIN CERTIFICATE-{5}\\u000D?\\u000A([A-Za-z0-9/+]{64}\\u000D?\\u000A)*[A-Za-z0-9/+]{1,64}={0,2}\\u000D?\\u000A-{5}END CERTIFICATE-{5}(\\u000D?\\u000A)?" + }, + "CertificateChain":{ + "type":"string", + "max":2097152, + "min":1, + "pattern":"(-{5}BEGIN CERTIFICATE-{5}\\u000D?\\u000A([A-Za-z0-9/+]{64}\\u000D?\\u000A)*[A-Za-z0-9/+]{1,64}={0,2}\\u000D?\\u000A-{5}END CERTIFICATE-{5}\\u000D?\\u000A)*-{5}BEGIN CERTIFICATE-{5}\\u000D?\\u000A([A-Za-z0-9/+]{64}\\u000D?\\u000A)*[A-Za-z0-9/+]{1,64}={0,2}\\u000D?\\u000A-{5}END CERTIFICATE-{5}(\\u000D?\\u000A)?" + }, + "CertificateDetail":{ + "type":"structure", + "members":{ + "CertificateArn":{"shape":"Arn"}, + "DomainName":{"shape":"DomainNameString"}, + "SubjectAlternativeNames":{"shape":"DomainList"}, + "DomainValidationOptions":{"shape":"DomainValidationList"}, + "Serial":{"shape":"String"}, + "Subject":{"shape":"String"}, + "Issuer":{"shape":"String"}, + "CreatedAt":{"shape":"TStamp"}, + "IssuedAt":{"shape":"TStamp"}, + "Status":{"shape":"CertificateStatus"}, + "RevokedAt":{"shape":"TStamp"}, + "RevocationReason":{"shape":"RevocationReason"}, + "NotBefore":{"shape":"TStamp"}, + "NotAfter":{"shape":"TStamp"}, + "KeyAlgorithm":{"shape":"KeyAlgorithm"}, + "SignatureAlgorithm":{"shape":"String"}, + "InUseBy":{"shape":"InUseList"} + } + }, + "CertificateStatus":{ + "type":"string", + "enum":[ + "PENDING_VALIDATION", + "ISSUED", + "INACTIVE", + "EXPIRED", + "VALIDATION_TIMED_OUT", + "REVOKED", + "FAILED" + ] + }, + "CertificateStatuses":{ + "type":"list", + "member":{"shape":"CertificateStatus"} + }, + "CertificateSummary":{ + "type":"structure", + "members":{ + "CertificateArn":{"shape":"Arn"}, + "DomainName":{"shape":"DomainNameString"} + } + }, + "CertificateSummaryList":{ + "type":"list", + "member":{"shape":"CertificateSummary"} + }, + "DeleteCertificateRequest":{ + "type":"structure", + "required":["CertificateArn"], + "members":{ + "CertificateArn":{"shape":"Arn"} + } + }, + "DescribeCertificateRequest":{ + "type":"structure", + "required":["CertificateArn"], + "members":{ + "CertificateArn":{"shape":"Arn"} + } + }, + "DescribeCertificateResponse":{ + "type":"structure", + "members":{ + "Certificate":{"shape":"CertificateDetail"} + } + }, + "DomainList":{ + "type":"list", + "member":{"shape":"DomainNameString"}, + "max":1000, + "min":1 + }, + "DomainNameString":{ + "type":"string", + "max":253, + "min":1, + "pattern":"^(\\*\\.)?(((?!-)[A-Za-z0-9-]{0,62}[A-Za-z0-9])\\.)+((?!-)[A-Za-z0-9-]{1,62}[A-Za-z0-9])$" + }, + "DomainValidation":{ + "type":"structure", + "required":["DomainName"], + "members":{ + "DomainName":{"shape":"DomainNameString"}, + "ValidationEmails":{"shape":"ValidationEmailList"}, + "ValidationDomain":{"shape":"DomainNameString"} + } + }, + "DomainValidationList":{ + "type":"list", + "member":{"shape":"DomainValidation"}, + "max":1000, + "min":1 + }, + "DomainValidationOption":{ + "type":"structure", + "required":[ + "DomainName", + "ValidationDomain" + ], + "members":{ + "DomainName":{"shape":"DomainNameString"}, + "ValidationDomain":{"shape":"DomainNameString"} + } + }, + "DomainValidationOptionList":{ + "type":"list", + "member":{"shape":"DomainValidationOption"}, + "max":1000, + "min":1 + }, + "GetCertificateRequest":{ + "type":"structure", + "required":["CertificateArn"], + "members":{ + "CertificateArn":{"shape":"Arn"} + } + }, + "GetCertificateResponse":{ + "type":"structure", + "members":{ + "Certificate":{"shape":"CertificateBody"}, + "CertificateChain":{"shape":"CertificateChain"} + } + }, + "IdempotencyToken":{ + "type":"string", + "max":32, + "min":1, + "pattern":"\\w+" + }, + "InUseList":{ + "type":"list", + "member":{"shape":"String"} + }, + "InvalidArnException":{ + "type":"structure", + "members":{ + "message":{"shape":"String"} + }, + "exception":true + }, + "InvalidDomainValidationOptionsException":{ + "type":"structure", + "members":{ + "message":{"shape":"String"} + }, + "exception":true + }, + "InvalidStateException":{ + "type":"structure", + "members":{ + "message":{"shape":"String"} + }, + "exception":true + }, + "InvalidTagException":{ + "type":"structure", + "members":{ + "message":{"shape":"String"} + }, + "exception":true + }, + "KeyAlgorithm":{ + "type":"string", + "enum":[ + "RSA_2048", + "EC_prime256v1" + ] + }, + "LimitExceededException":{ + "type":"structure", + "members":{ + "message":{"shape":"String"} + }, + "exception":true + }, + "ListCertificatesRequest":{ + "type":"structure", + "members":{ + "CertificateStatuses":{"shape":"CertificateStatuses"}, + "NextToken":{"shape":"NextToken"}, + "MaxItems":{"shape":"MaxItems"} + } + }, + "ListCertificatesResponse":{ + "type":"structure", + "members":{ + "NextToken":{"shape":"NextToken"}, + "CertificateSummaryList":{"shape":"CertificateSummaryList"} + } + }, + "ListTagsForCertificateRequest":{ + "type":"structure", + "required":["CertificateArn"], + "members":{ + "CertificateArn":{"shape":"Arn"} + } + }, + "ListTagsForCertificateResponse":{ + "type":"structure", + "members":{ + "Tags":{"shape":"TagList"} + } + }, + "MaxItems":{ + "type":"integer", + "max":1000, + "min":1 + }, + "NextToken":{ + "type":"string", + "max":320, + "min":1, + "pattern":"[\\u0009\\u000A\\u000D\\u0020-\\u00FF]*" + }, + "RemoveTagsFromCertificateRequest":{ + "type":"structure", + "required":[ + "CertificateArn", + "Tags" + ], + "members":{ + "CertificateArn":{"shape":"Arn"}, + "Tags":{"shape":"TagList"} + } + }, + "RequestCertificateRequest":{ + "type":"structure", + "required":["DomainName"], + "members":{ + "DomainName":{"shape":"DomainNameString"}, + "SubjectAlternativeNames":{"shape":"DomainList"}, + "IdempotencyToken":{"shape":"IdempotencyToken"}, + "DomainValidationOptions":{"shape":"DomainValidationOptionList"} + } + }, + "RequestCertificateResponse":{ + "type":"structure", + "members":{ + "CertificateArn":{"shape":"Arn"} + } + }, + "RequestInProgressException":{ + "type":"structure", + "members":{ + "message":{"shape":"String"} + }, + "exception":true + }, + "ResendValidationEmailRequest":{ + "type":"structure", + "required":[ + "CertificateArn", + "Domain", + "ValidationDomain" + ], + "members":{ + "CertificateArn":{"shape":"Arn"}, + "Domain":{"shape":"DomainNameString"}, + "ValidationDomain":{"shape":"DomainNameString"} + } + }, + "ResourceInUseException":{ + "type":"structure", + "members":{ + "message":{"shape":"String"} + }, + "exception":true + }, + "ResourceNotFoundException":{ + "type":"structure", + "members":{ + "message":{"shape":"String"} + }, + "exception":true + }, + "RevocationReason":{ + "type":"string", + "enum":[ + "UNSPECIFIED", + "KEY_COMPROMISE", + "CA_COMPROMISE", + "AFFILIATION_CHANGED", + "SUPERCEDED", + "CESSATION_OF_OPERATION", + "CERTIFICATE_HOLD", + "REMOVE_FROM_CRL", + "PRIVILEGE_WITHDRAWN", + "A_A_COMPROMISE" + ] + }, + "String":{"type":"string"}, + "TStamp":{"type":"timestamp"}, + "Tag":{ + "type":"structure", + "required":["Key"], + "members":{ + "Key":{"shape":"TagKey"}, + "Value":{"shape":"TagValue"} + } + }, + "TagKey":{ + "type":"string", + "max":128, + "min":1, + "pattern":"[\\p{L}\\p{Z}\\p{N}_.:\\/=+\\-@]*" + }, + "TagList":{ + "type":"list", + "member":{"shape":"Tag"}, + "max":10, + "min":1 + }, + "TagValue":{ + "type":"string", + "max":256, + "min":0, + "pattern":"[\\p{L}\\p{Z}\\p{N}_.:\\/=+\\-@]*" + }, + "TooManyTagsException":{ + "type":"structure", + "members":{ + "message":{"shape":"String"} + }, + "exception":true + }, + "ValidationEmailList":{ + "type":"list", + "member":{"shape":"String"} + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/acm/2015-12-08/docs-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/acm/2015-12-08/docs-2.json new file mode 100644 index 000000000..43820631a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/acm/2015-12-08/docs-2.json @@ -0,0 +1,335 @@ +{ + "version": "2.0", + "service": "AWS Certificate Manager

    Welcome to the AWS Certificate Manager (ACM) Command Reference. This guide provides descriptions, syntax, and usage examples for each ACM command. You can use AWS Certificate Manager to request ACM Certificates for your AWS-based websites and applications. For general information about using ACM and for more information about using the console, see the AWS Certificate Manager User Guide. For more information about using the ACM API, see the AWS Certificate Manager API Reference.

    ", + "operations": { + "AddTagsToCertificate": "

    Adds one or more tags to an ACM Certificate. Tags are labels that you can use to identify and organize your AWS resources. Each tag consists of a key and an optional value. You specify the certificate on input by its Amazon Resource Name (ARN). You specify the tag by using a key-value pair.

    You can apply a tag to just one certificate if you want to identify a specific characteristic of that certificate, or you can apply the same tag to multiple certificates if you want to filter for a common relationship among those certificates. Similarly, you can apply the same tag to multiple resources if you want to specify a relationship among those resources. For example, you can add the same tag to an ACM Certificate and an Elastic Load Balancing load balancer to indicate that they are both used by the same website. For more information, see Tagging ACM Certificates.

    To remove one or more tags, use the RemoveTagsFromCertificate action. To view all of the tags that have been applied to the certificate, use the ListTagsForCertificate action.

    ", + "DeleteCertificate": "

    Deletes an ACM Certificate and its associated private key. If this action succeeds, the certificate no longer appears in the list of ACM Certificates that can be displayed by calling the ListCertificates action or be retrieved by calling the GetCertificate action. The certificate will not be available for use by other AWS services.

    You cannot delete an ACM Certificate that is being used by another AWS service. To delete a certificate that is in use, the certificate association must first be removed.

    ", + "DescribeCertificate": "

    Returns a list of the fields contained in the specified ACM Certificate. For example, this action returns the certificate status, a flag that indicates whether the certificate is associated with any other AWS service, and the date at which the certificate request was created. You specify the ACM Certificate on input by its Amazon Resource Name (ARN).

    ", + "GetCertificate": "

    Retrieves an ACM Certificate and certificate chain for the certificate specified by an ARN. The chain is an ordered list of certificates that contains the root certificate, intermediate certificates of subordinate CAs, and the ACM Certificate. The certificate and certificate chain are base64 encoded. If you want to decode the certificate chain to see the individual certificate fields, you can use OpenSSL.

    Currently, ACM Certificates can be used only with Elastic Load Balancing and Amazon CloudFront.

    ", + "ListCertificates": "

    Retrieves a list of ACM Certificates and the domain name for each. You can optionally filter the list to return only the certificates that match the specified status.

    ", + "ListTagsForCertificate": "

    Lists the tags that have been applied to the ACM Certificate. Use the certificate ARN to specify the certificate. To add a tag to an ACM Certificate, use the AddTagsToCertificate action. To delete a tag, use the RemoveTagsFromCertificate action.

    ", + "RemoveTagsFromCertificate": "

    Remove one or more tags from an ACM Certificate. A tag consists of a key-value pair. If you do not specify the value portion of the tag when calling this function, the tag will be removed regardless of value. If you specify a value, the tag is removed only if it is associated with the specified value.

    To add tags to a certificate, use the AddTagsToCertificate action. To view all of the tags that have been applied to a specific ACM Certificate, use the ListTagsForCertificate action.

    ", + "RequestCertificate": "

    Requests an ACM Certificate for use with other AWS services. To request an ACM Certificate, you must specify the fully qualified domain name (FQDN) for your site. You can also specify additional FQDNs if users can reach your site by using other names. For each domain name you specify, email is sent to the domain owner to request approval to issue the certificate. After receiving approval from the domain owner, the ACM Certificate is issued. For more information, see the AWS Certificate Manager User Guide .

    ", + "ResendValidationEmail": "

    Resends the email that requests domain ownership validation. The domain owner or an authorized representative must approve the ACM Certificate before it can be issued. The certificate can be approved by clicking a link in the mail to navigate to the Amazon certificate approval website and then clicking I Approve. However, the validation email can be blocked by spam filters. Therefore, if you do not receive the original mail, you can request that the mail be resent within 72 hours of requesting the ACM Certificate. If more than 72 hours have elapsed since your original request or since your last attempt to resend validation mail, you must request a new certificate.

    " + }, + "shapes": { + "AddTagsToCertificateRequest": { + "base": null, + "refs": { + } + }, + "Arn": { + "base": null, + "refs": { + "AddTagsToCertificateRequest$CertificateArn": "

    String that contains the ARN of the ACM Certificate to which the tag is to be applied. This must be of the form:

    arn:aws:acm:region:123456789012:certificate/12345678-1234-1234-1234-123456789012

    For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces.

    ", + "CertificateDetail$CertificateArn": "

    The Amazon Resource Name (ARN) of the certificate. For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces.

    ", + "CertificateSummary$CertificateArn": "

    Amazon Resource Name (ARN) of the certificate. This is of the form:

    arn:aws:acm:region:123456789012:certificate/12345678-1234-1234-1234-123456789012

    For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces.

    ", + "DeleteCertificateRequest$CertificateArn": "

    String that contains the ARN of the ACM Certificate to be deleted. This must be of the form:

    arn:aws:acm:region:123456789012:certificate/12345678-1234-1234-1234-123456789012

    For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces.

    ", + "DescribeCertificateRequest$CertificateArn": "

    String that contains an ACM Certificate ARN. The ARN must be of the form:

    arn:aws:acm:region:123456789012:certificate/12345678-1234-1234-1234-123456789012

    For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces.

    ", + "GetCertificateRequest$CertificateArn": "

    String that contains a certificate ARN in the following format:

    arn:aws:acm:region:123456789012:certificate/12345678-1234-1234-1234-123456789012

    For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces.

    ", + "ListTagsForCertificateRequest$CertificateArn": "

    String that contains the ARN of the ACM Certificate for which you want to list the tags. This must be of the form:

    arn:aws:acm:region:123456789012:certificate/12345678-1234-1234-1234-123456789012

    For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces.

    ", + "RemoveTagsFromCertificateRequest$CertificateArn": "

    String that contains the ARN of the ACM Certificate with one or more tags that you want to remove. This must be of the form:

    arn:aws:acm:region:123456789012:certificate/12345678-1234-1234-1234-123456789012

    For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces.

    ", + "RequestCertificateResponse$CertificateArn": "

    String that contains the ARN of the issued certificate. This must be of the form:

    arn:aws:acm:us-east-1:123456789012:certificate/12345678-1234-1234-1234-123456789012

    ", + "ResendValidationEmailRequest$CertificateArn": "

    String that contains the ARN of the requested certificate. The certificate ARN is generated and returned by the RequestCertificate action as soon as the request is made. By default, using this parameter causes email to be sent to all top-level domains you specified in the certificate request.

    The ARN must be of the form:

    arn:aws:acm:us-east-1:123456789012:certificate/12345678-1234-1234-1234-123456789012

    " + } + }, + "CertificateBody": { + "base": null, + "refs": { + "GetCertificateResponse$Certificate": "

    String that contains the ACM Certificate represented by the ARN specified at input.

    " + } + }, + "CertificateChain": { + "base": null, + "refs": { + "GetCertificateResponse$CertificateChain": "

    The certificate chain that contains the root certificate issued by the certificate authority (CA).

    " + } + }, + "CertificateDetail": { + "base": "

    Contains detailed metadata about an ACM Certificate. This structure is returned in the response to a DescribeCertificate request.

    ", + "refs": { + "DescribeCertificateResponse$Certificate": "

    Contains a CertificateDetail structure that lists the fields of an ACM Certificate.

    " + } + }, + "CertificateStatus": { + "base": null, + "refs": { + "CertificateDetail$Status": "

    The status of the certificate.

    ", + "CertificateStatuses$member": null + } + }, + "CertificateStatuses": { + "base": null, + "refs": { + "ListCertificatesRequest$CertificateStatuses": "

    The status or statuses on which to filter the list of ACM Certificates.

    " + } + }, + "CertificateSummary": { + "base": "

    This structure is returned in the response object of ListCertificates action.

    ", + "refs": { + "CertificateSummaryList$member": null + } + }, + "CertificateSummaryList": { + "base": null, + "refs": { + "ListCertificatesResponse$CertificateSummaryList": "

    A list of ACM Certificates.

    " + } + }, + "DeleteCertificateRequest": { + "base": null, + "refs": { + } + }, + "DescribeCertificateRequest": { + "base": null, + "refs": { + } + }, + "DescribeCertificateResponse": { + "base": null, + "refs": { + } + }, + "DomainList": { + "base": null, + "refs": { + "CertificateDetail$SubjectAlternativeNames": "

    One or more domain names (subject alternative names) included in the certificate request. After the certificate is issued, this list includes the domain names bound to the public key contained in the certificate. The subject alternative names include the canonical domain name (CN) of the certificate and additional domain names that can be used to connect to the website.

    ", + "RequestCertificateRequest$SubjectAlternativeNames": "

    Additional FQDNs to be included in the Subject Alternative Name extension of the ACM Certificate. For example, add the name www.example.net to a certificate for which the DomainName field is www.example.com if users can reach your site by using either name.

    " + } + }, + "DomainNameString": { + "base": null, + "refs": { + "CertificateDetail$DomainName": "

    The fully qualified domain name (FQDN) for the certificate, such as www.example.com or example.com.

    ", + "CertificateSummary$DomainName": "

    Fully qualified domain name (FQDN), such as www.example.com or example.com, for the certificate.

    ", + "DomainList$member": null, + "DomainValidation$DomainName": "

    Fully Qualified Domain Name (FQDN) of the form www.example.com or example.com.

    ", + "DomainValidation$ValidationDomain": "

    The base validation domain that acts as the suffix of the email addresses that are used to send the emails.

    ", + "DomainValidationOption$DomainName": "

    Fully Qualified Domain Name (FQDN) of the certificate being requested.

    ", + "DomainValidationOption$ValidationDomain": "

    The domain to which validation email is sent. This is the base validation domain that will act as the suffix of the email addresses. This must be the same as the DomainName value or a superdomain of the DomainName value. For example, if you requested a certificate for site.subdomain.example.com and specify a ValidationDomain of subdomain.example.com, ACM sends email to the domain registrant, technical contact, and administrative contact in WHOIS for the base domain and the following five addresses:

    • admin@subdomain.example.com

    • administrator@subdomain.example.com

    • hostmaster@subdomain.example.com

    • postmaster@subdomain.example.com

    • webmaster@subdomain.example.com

    ", + "RequestCertificateRequest$DomainName": "

    Fully qualified domain name (FQDN), such as www.example.com, of the site you want to secure with an ACM Certificate. Use an asterisk (*) to create a wildcard certificate that protects several sites in the same domain. For example, *.example.com protects www.example.com, site.example.com, and images.example.com.

    ", + "ResendValidationEmailRequest$Domain": "

    The Fully Qualified Domain Name (FQDN) of the certificate that needs to be validated.

    ", + "ResendValidationEmailRequest$ValidationDomain": "

    The base validation domain that will act as the suffix of the email addresses that are used to send the emails. This must be the same as the Domain value or a superdomain of the Domain value. For example, if you requested a certificate for site.subdomain.example.com and specify a ValidationDomain of subdomain.example.com, ACM sends email to the domain registrant, technical contact, and administrative contact in WHOIS and the following five addresses:

    • admin@subdomain.example.com

    • administrator@subdomain.example.com

    • hostmaster@subdomain.example.com

    • postmaster@subdomain.example.com

    • webmaster@subdomain.example.com

    " + } + }, + "DomainValidation": { + "base": "

    Structure that contains the domain name, the base validation domain to which validation email is sent, and the email addresses used to validate the domain identity.

    ", + "refs": { + "DomainValidationList$member": null + } + }, + "DomainValidationList": { + "base": null, + "refs": { + "CertificateDetail$DomainValidationOptions": "

    Contains information about the email address or addresses used for domain validation.

    " + } + }, + "DomainValidationOption": { + "base": "

    This structure is used in the request object of the RequestCertificate action.

    ", + "refs": { + "DomainValidationOptionList$member": null + } + }, + "DomainValidationOptionList": { + "base": null, + "refs": { + "RequestCertificateRequest$DomainValidationOptions": "

    The base validation domain that will act as the suffix of the email addresses that are used to send the emails. This must be the same as the Domain value or a superdomain of the Domain value. For example, if you requested a certificate for test.example.com and specify DomainValidationOptions of example.com, ACM sends email to the domain registrant, technical contact, and administrative contact in WHOIS and the following five addresses:

    • admin@example.com

    • administrator@example.com

    • hostmaster@example.com

    • postmaster@example.com

    • webmaster@example.com

    " + } + }, + "GetCertificateRequest": { + "base": null, + "refs": { + } + }, + "GetCertificateResponse": { + "base": null, + "refs": { + } + }, + "IdempotencyToken": { + "base": null, + "refs": { + "RequestCertificateRequest$IdempotencyToken": "

    Customer chosen string that can be used to distinguish between calls to RequestCertificate. Idempotency tokens time out after one hour. Therefore, if you call RequestCertificate multiple times with the same idempotency token within one hour, ACM recognizes that you are requesting only one certificate and will issue only one. If you change the idempotency token for each call, ACM recognizes that you are requesting multiple certificates.

    " + } + }, + "InUseList": { + "base": null, + "refs": { + "CertificateDetail$InUseBy": "

    A list of ARNs for the resources that are using the certificate. An ACM Certificate can be used by multiple AWS resources.

    " + } + }, + "InvalidArnException": { + "base": "

    The requested Amazon Resource Name (ARN) does not refer to an existing resource.

    ", + "refs": { + } + }, + "InvalidDomainValidationOptionsException": { + "base": "

    One or more values in the DomainValidationOption structure is incorrect.

    ", + "refs": { + } + }, + "InvalidStateException": { + "base": "

    Processing has reached an invalid state. For example, this exception can occur if the specified domain is not using email validation, or the current certificate status does not permit the requested operation. See the exception message returned by ACM to determine which state is not valid.

    ", + "refs": { + } + }, + "InvalidTagException": { + "base": "

    One or both of the values that make up the key-value pair is not valid. For example, you cannot specify a tag value that begins with aws:.

    ", + "refs": { + } + }, + "KeyAlgorithm": { + "base": null, + "refs": { + "CertificateDetail$KeyAlgorithm": "

    The algorithm used to generate the key pair (the public and private key). Currently the only supported value is RSA_2048.

    " + } + }, + "LimitExceededException": { + "base": "

    An ACM limit has been exceeded. For example, you may have input more domains than are allowed or you've requested too many certificates for your account. See the exception message returned by ACM to determine which limit you have violated. For more information about ACM limits, see the Limits topic.

    ", + "refs": { + } + }, + "ListCertificatesRequest": { + "base": null, + "refs": { + } + }, + "ListCertificatesResponse": { + "base": null, + "refs": { + } + }, + "ListTagsForCertificateRequest": { + "base": null, + "refs": { + } + }, + "ListTagsForCertificateResponse": { + "base": null, + "refs": { + } + }, + "MaxItems": { + "base": null, + "refs": { + "ListCertificatesRequest$MaxItems": "

    Use this parameter when paginating results to specify the maximum number of items to return in the response. If additional items exist beyond the number you specify, the NextToken element is sent in the response. Use this NextToken value in a subsequent request to retrieve additional items.

    " + } + }, + "NextToken": { + "base": null, + "refs": { + "ListCertificatesRequest$NextToken": "

    Use this parameter only when paginating results and only in a subsequent request after you receive a response with truncated results. Set it to the value of NextToken from the response you just received.

    ", + "ListCertificatesResponse$NextToken": "

    When the list is truncated, this value is present and contains the value to use for the NextToken parameter in a subsequent pagination request.

    " + } + }, + "RemoveTagsFromCertificateRequest": { + "base": null, + "refs": { + } + }, + "RequestCertificateRequest": { + "base": null, + "refs": { + } + }, + "RequestCertificateResponse": { + "base": null, + "refs": { + } + }, + "RequestInProgressException": { + "base": "

    The certificate request is in process and the certificate in your account has not yet been issued.

    ", + "refs": { + } + }, + "ResendValidationEmailRequest": { + "base": null, + "refs": { + } + }, + "ResourceInUseException": { + "base": "

    The certificate is in use by another AWS service in the caller's account. Remove the association and try again.

    ", + "refs": { + } + }, + "ResourceNotFoundException": { + "base": "

    The specified certificate cannot be found in the caller's account, or the caller's account cannot be found.

    ", + "refs": { + } + }, + "RevocationReason": { + "base": null, + "refs": { + "CertificateDetail$RevocationReason": "

    The reason the certificate was revoked. This value exists only when the certificate status is REVOKED.

    " + } + }, + "String": { + "base": null, + "refs": { + "CertificateDetail$Serial": "

    The serial number of the certificate.

    ", + "CertificateDetail$Subject": "

    The X.500 distinguished name of the entity associated with the public key contained in the certificate.

    ", + "CertificateDetail$Issuer": "

    The X.500 distinguished name of the CA that issued and signed the certificate.

    ", + "CertificateDetail$SignatureAlgorithm": "

    The algorithm used to generate a signature. Currently the only supported value is SHA256WITHRSA.

    ", + "InUseList$member": null, + "InvalidArnException$message": null, + "InvalidDomainValidationOptionsException$message": null, + "InvalidStateException$message": null, + "InvalidTagException$message": null, + "LimitExceededException$message": null, + "RequestInProgressException$message": null, + "ResourceInUseException$message": null, + "ResourceNotFoundException$message": null, + "TooManyTagsException$message": null, + "ValidationEmailList$member": null + } + }, + "TStamp": { + "base": null, + "refs": { + "CertificateDetail$CreatedAt": "

    The time at which the certificate was requested.

    ", + "CertificateDetail$IssuedAt": "

    The time at which the certificate was issued.

    ", + "CertificateDetail$RevokedAt": "

    The time at which the certificate was revoked. This value exists only when the certificate status is REVOKED.

    ", + "CertificateDetail$NotBefore": "

    The time before which the certificate is not valid.

    ", + "CertificateDetail$NotAfter": "

    The time after which the certificate is not valid.

    " + } + }, + "Tag": { + "base": "

    A key-value pair that identifies or specifies metadata about an ACM resource.

    ", + "refs": { + "TagList$member": null + } + }, + "TagKey": { + "base": null, + "refs": { + "Tag$Key": "

    The key of the tag.

    " + } + }, + "TagList": { + "base": null, + "refs": { + "AddTagsToCertificateRequest$Tags": "

    The key-value pair that defines the tag. The tag value is optional.

    ", + "ListTagsForCertificateResponse$Tags": "

    The key-value pairs that define the applied tags.

    ", + "RemoveTagsFromCertificateRequest$Tags": "

    The key-value pair that defines the tag to remove.

    " + } + }, + "TagValue": { + "base": null, + "refs": { + "Tag$Value": "

    The value of the tag.

    " + } + }, + "TooManyTagsException": { + "base": "

    The request contains too many tags. Try the request again with fewer tags.

    ", + "refs": { + } + }, + "ValidationEmailList": { + "base": null, + "refs": { + "DomainValidation$ValidationEmails": "

    A list of contact address for the domain registrant.

    " + } + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/acm/2015-12-08/examples-1.json b/vendor/github.com/aws/aws-sdk-go/models/apis/acm/2015-12-08/examples-1.json new file mode 100644 index 000000000..0ea7e3b0b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/acm/2015-12-08/examples-1.json @@ -0,0 +1,5 @@ +{ + "version": "1.0", + "examples": { + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/acm/2015-12-08/paginators-1.json b/vendor/github.com/aws/aws-sdk-go/models/apis/acm/2015-12-08/paginators-1.json new file mode 100644 index 000000000..036e35849 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/acm/2015-12-08/paginators-1.json @@ -0,0 +1,10 @@ +{ + "pagination": { + "ListCertificates": { + "limit_key": "MaxItems", + "input_token": "NextToken", + "output_token": "NextToken", + "result_key": "CertificateSummaryList" + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/apigateway/2015-07-09/api-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/apigateway/2015-07-09/api-2.json new file mode 100644 index 000000000..06340cb5c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/apigateway/2015-07-09/api-2.json @@ -0,0 +1,3347 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2015-07-09", + "endpointPrefix":"apigateway", + "protocol":"rest-json", + "serviceFullName":"Amazon API Gateway", + "signatureVersion":"v4" + }, + "operations":{ + "CreateApiKey":{ + "name":"CreateApiKey", + "http":{ + "method":"POST", + "requestUri":"/apikeys", + "responseCode":201 + }, + "input":{"shape":"CreateApiKeyRequest"}, + "output":{"shape":"ApiKey"}, + "errors":[ + {"shape":"UnauthorizedException"}, + {"shape":"NotFoundException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"LimitExceededException"}, + {"shape":"BadRequestException"} + ] + }, + "CreateAuthorizer":{ + "name":"CreateAuthorizer", + "http":{ + "method":"POST", + "requestUri":"/restapis/{restapi_id}/authorizers", + "responseCode":201 + }, + "input":{"shape":"CreateAuthorizerRequest"}, + "output":{"shape":"Authorizer"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"UnauthorizedException"}, + {"shape":"NotFoundException"}, + {"shape":"LimitExceededException"}, + {"shape":"TooManyRequestsException"} + ] + }, + "CreateBasePathMapping":{ + "name":"CreateBasePathMapping", + "http":{ + "method":"POST", + "requestUri":"/domainnames/{domain_name}/basepathmappings", + "responseCode":201 + }, + "input":{"shape":"CreateBasePathMappingRequest"}, + "output":{"shape":"BasePathMapping"}, + "errors":[ + {"shape":"UnauthorizedException"}, + {"shape":"ConflictException"}, + {"shape":"BadRequestException"}, + {"shape":"NotFoundException"}, + {"shape":"TooManyRequestsException"} + ] + }, + "CreateDeployment":{ + "name":"CreateDeployment", + "http":{ + "method":"POST", + "requestUri":"/restapis/{restapi_id}/deployments", + "responseCode":201 + }, + "input":{"shape":"CreateDeploymentRequest"}, + "output":{"shape":"Deployment"}, + "errors":[ + {"shape":"UnauthorizedException"}, + {"shape":"BadRequestException"}, + {"shape":"NotFoundException"}, + {"shape":"ConflictException"}, + {"shape":"LimitExceededException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"ServiceUnavailableException"} + ] + }, + "CreateDomainName":{ + "name":"CreateDomainName", + "http":{ + "method":"POST", + "requestUri":"/domainnames", + "responseCode":201 + }, + "input":{"shape":"CreateDomainNameRequest"}, + "output":{"shape":"DomainName"}, + "errors":[ + {"shape":"UnauthorizedException"}, + {"shape":"BadRequestException"}, + {"shape":"ConflictException"}, + {"shape":"TooManyRequestsException"} + ] + }, + "CreateModel":{ + "name":"CreateModel", + "http":{ + "method":"POST", + "requestUri":"/restapis/{restapi_id}/models", + "responseCode":201 + }, + "input":{"shape":"CreateModelRequest"}, + "output":{"shape":"Model"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"UnauthorizedException"}, + {"shape":"NotFoundException"}, + {"shape":"ConflictException"}, + {"shape":"LimitExceededException"}, + {"shape":"TooManyRequestsException"} + ] + }, + "CreateResource":{ + "name":"CreateResource", + "http":{ + "method":"POST", + "requestUri":"/restapis/{restapi_id}/resources/{parent_id}", + "responseCode":201 + }, + "input":{"shape":"CreateResourceRequest"}, + "output":{"shape":"Resource"}, + "errors":[ + {"shape":"UnauthorizedException"}, + {"shape":"NotFoundException"}, + {"shape":"ConflictException"}, + {"shape":"LimitExceededException"}, + {"shape":"BadRequestException"}, + {"shape":"TooManyRequestsException"} + ] + }, + "CreateRestApi":{ + "name":"CreateRestApi", + "http":{ + "method":"POST", + "requestUri":"/restapis", + "responseCode":201 + }, + "input":{"shape":"CreateRestApiRequest"}, + "output":{"shape":"RestApi"}, + "errors":[ + {"shape":"UnauthorizedException"}, + {"shape":"LimitExceededException"}, + {"shape":"BadRequestException"}, + {"shape":"TooManyRequestsException"} + ] + }, + "CreateStage":{ + "name":"CreateStage", + "http":{ + "method":"POST", + "requestUri":"/restapis/{restapi_id}/stages", + "responseCode":201 + }, + "input":{"shape":"CreateStageRequest"}, + "output":{"shape":"Stage"}, + "errors":[ + {"shape":"UnauthorizedException"}, + {"shape":"BadRequestException"}, + {"shape":"NotFoundException"}, + {"shape":"ConflictException"}, + {"shape":"LimitExceededException"}, + {"shape":"TooManyRequestsException"} + ] + }, + "DeleteApiKey":{ + "name":"DeleteApiKey", + "http":{ + "method":"DELETE", + "requestUri":"/apikeys/{api_Key}", + "responseCode":202 + }, + "input":{"shape":"DeleteApiKeyRequest"}, + "errors":[ + {"shape":"UnauthorizedException"}, + {"shape":"NotFoundException"}, + {"shape":"TooManyRequestsException"} + ] + }, + "DeleteAuthorizer":{ + "name":"DeleteAuthorizer", + "http":{ + "method":"DELETE", + "requestUri":"/restapis/{restapi_id}/authorizers/{authorizer_id}", + "responseCode":202 + }, + "input":{"shape":"DeleteAuthorizerRequest"}, + "errors":[ + {"shape":"UnauthorizedException"}, + {"shape":"NotFoundException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"BadRequestException"}, + {"shape":"ConflictException"} + ] + }, + "DeleteBasePathMapping":{ + "name":"DeleteBasePathMapping", + "http":{ + "method":"DELETE", + "requestUri":"/domainnames/{domain_name}/basepathmappings/{base_path}", + "responseCode":202 + }, + "input":{"shape":"DeleteBasePathMappingRequest"}, + "errors":[ + {"shape":"UnauthorizedException"}, + {"shape":"NotFoundException"}, + {"shape":"TooManyRequestsException"} + ] + }, + "DeleteClientCertificate":{ + "name":"DeleteClientCertificate", + "http":{ + "method":"DELETE", + "requestUri":"/clientcertificates/{clientcertificate_id}", + "responseCode":202 + }, + "input":{"shape":"DeleteClientCertificateRequest"}, + "errors":[ + {"shape":"UnauthorizedException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"BadRequestException"}, + {"shape":"NotFoundException"} + ] + }, + "DeleteDeployment":{ + "name":"DeleteDeployment", + "http":{ + "method":"DELETE", + "requestUri":"/restapis/{restapi_id}/deployments/{deployment_id}", + "responseCode":202 + }, + "input":{"shape":"DeleteDeploymentRequest"}, + "errors":[ + {"shape":"UnauthorizedException"}, + {"shape":"NotFoundException"}, + {"shape":"BadRequestException"}, + {"shape":"TooManyRequestsException"} + ] + }, + "DeleteDomainName":{ + "name":"DeleteDomainName", + "http":{ + "method":"DELETE", + "requestUri":"/domainnames/{domain_name}", + "responseCode":202 + }, + "input":{"shape":"DeleteDomainNameRequest"}, + "errors":[ + {"shape":"UnauthorizedException"}, + {"shape":"NotFoundException"}, + {"shape":"TooManyRequestsException"} + ] + }, + "DeleteIntegration":{ + "name":"DeleteIntegration", + "http":{ + "method":"DELETE", + "requestUri":"/restapis/{restapi_id}/resources/{resource_id}/methods/{http_method}/integration", + "responseCode":204 + }, + "input":{"shape":"DeleteIntegrationRequest"}, + "errors":[ + {"shape":"UnauthorizedException"}, + {"shape":"NotFoundException"}, + {"shape":"TooManyRequestsException"} + ] + }, + "DeleteIntegrationResponse":{ + "name":"DeleteIntegrationResponse", + "http":{ + "method":"DELETE", + "requestUri":"/restapis/{restapi_id}/resources/{resource_id}/methods/{http_method}/integration/responses/{status_code}", + "responseCode":204 + }, + "input":{"shape":"DeleteIntegrationResponseRequest"}, + "errors":[ + {"shape":"UnauthorizedException"}, + {"shape":"NotFoundException"}, + {"shape":"TooManyRequestsException"} + ] + }, + "DeleteMethod":{ + "name":"DeleteMethod", + "http":{ + "method":"DELETE", + "requestUri":"/restapis/{restapi_id}/resources/{resource_id}/methods/{http_method}", + "responseCode":204 + }, + "input":{"shape":"DeleteMethodRequest"}, + "errors":[ + {"shape":"UnauthorizedException"}, + {"shape":"NotFoundException"}, + {"shape":"TooManyRequestsException"} + ] + }, + "DeleteMethodResponse":{ + "name":"DeleteMethodResponse", + "http":{ + "method":"DELETE", + "requestUri":"/restapis/{restapi_id}/resources/{resource_id}/methods/{http_method}/responses/{status_code}", + "responseCode":204 + }, + "input":{"shape":"DeleteMethodResponseRequest"}, + "errors":[ + {"shape":"UnauthorizedException"}, + {"shape":"NotFoundException"}, + {"shape":"TooManyRequestsException"} + ] + }, + "DeleteModel":{ + "name":"DeleteModel", + "http":{ + "method":"DELETE", + "requestUri":"/restapis/{restapi_id}/models/{model_name}", + "responseCode":202 + }, + "input":{"shape":"DeleteModelRequest"}, + "errors":[ + {"shape":"UnauthorizedException"}, + {"shape":"NotFoundException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"BadRequestException"}, + {"shape":"ConflictException"} + ] + }, + "DeleteResource":{ + "name":"DeleteResource", + "http":{ + "method":"DELETE", + "requestUri":"/restapis/{restapi_id}/resources/{resource_id}", + "responseCode":202 + }, + "input":{"shape":"DeleteResourceRequest"}, + "errors":[ + {"shape":"UnauthorizedException"}, + {"shape":"NotFoundException"}, + {"shape":"BadRequestException"}, + {"shape":"ConflictException"}, + {"shape":"TooManyRequestsException"} + ] + }, + "DeleteRestApi":{ + "name":"DeleteRestApi", + "http":{ + "method":"DELETE", + "requestUri":"/restapis/{restapi_id}", + "responseCode":202 + }, + "input":{"shape":"DeleteRestApiRequest"}, + "errors":[ + {"shape":"UnauthorizedException"}, + {"shape":"NotFoundException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"BadRequestException"} + ] + }, + "DeleteStage":{ + "name":"DeleteStage", + "http":{ + "method":"DELETE", + "requestUri":"/restapis/{restapi_id}/stages/{stage_name}", + "responseCode":202 + }, + "input":{"shape":"DeleteStageRequest"}, + "errors":[ + {"shape":"UnauthorizedException"}, + {"shape":"NotFoundException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"BadRequestException"} + ] + }, + "FlushStageAuthorizersCache":{ + "name":"FlushStageAuthorizersCache", + "http":{ + "method":"DELETE", + "requestUri":"/restapis/{restapi_id}/stages/{stage_name}/cache/authorizers", + "responseCode":202 + }, + "input":{"shape":"FlushStageAuthorizersCacheRequest"}, + "errors":[ + {"shape":"UnauthorizedException"}, + {"shape":"NotFoundException"}, + {"shape":"BadRequestException"}, + {"shape":"TooManyRequestsException"} + ] + }, + "FlushStageCache":{ + "name":"FlushStageCache", + "http":{ + "method":"DELETE", + "requestUri":"/restapis/{restapi_id}/stages/{stage_name}/cache/data", + "responseCode":202 + }, + "input":{"shape":"FlushStageCacheRequest"}, + "errors":[ + {"shape":"UnauthorizedException"}, + {"shape":"NotFoundException"}, + {"shape":"BadRequestException"}, + {"shape":"TooManyRequestsException"} + ] + }, + "GenerateClientCertificate":{ + "name":"GenerateClientCertificate", + "http":{ + "method":"POST", + "requestUri":"/clientcertificates", + "responseCode":201 + }, + "input":{"shape":"GenerateClientCertificateRequest"}, + "output":{"shape":"ClientCertificate"}, + "errors":[ + {"shape":"UnauthorizedException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"LimitExceededException"} + ] + }, + "GetAccount":{ + "name":"GetAccount", + "http":{ + "method":"GET", + "requestUri":"/account" + }, + "input":{"shape":"GetAccountRequest"}, + "output":{"shape":"Account"}, + "errors":[ + {"shape":"UnauthorizedException"}, + {"shape":"NotFoundException"}, + {"shape":"TooManyRequestsException"} + ] + }, + "GetApiKey":{ + "name":"GetApiKey", + "http":{ + "method":"GET", + "requestUri":"/apikeys/{api_Key}" + }, + "input":{"shape":"GetApiKeyRequest"}, + "output":{"shape":"ApiKey"}, + "errors":[ + {"shape":"UnauthorizedException"}, + {"shape":"NotFoundException"}, + {"shape":"TooManyRequestsException"} + ] + }, + "GetApiKeys":{ + "name":"GetApiKeys", + "http":{ + "method":"GET", + "requestUri":"/apikeys" + }, + "input":{"shape":"GetApiKeysRequest"}, + "output":{"shape":"ApiKeys"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"UnauthorizedException"}, + {"shape":"TooManyRequestsException"} + ] + }, + "GetAuthorizer":{ + "name":"GetAuthorizer", + "http":{ + "method":"GET", + "requestUri":"/restapis/{restapi_id}/authorizers/{authorizer_id}" + }, + "input":{"shape":"GetAuthorizerRequest"}, + "output":{"shape":"Authorizer"}, + "errors":[ + {"shape":"UnauthorizedException"}, + {"shape":"NotFoundException"}, + {"shape":"TooManyRequestsException"} + ] + }, + "GetAuthorizers":{ + "name":"GetAuthorizers", + "http":{ + "method":"GET", + "requestUri":"/restapis/{restapi_id}/authorizers" + }, + "input":{"shape":"GetAuthorizersRequest"}, + "output":{"shape":"Authorizers"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"UnauthorizedException"}, + {"shape":"NotFoundException"}, + {"shape":"TooManyRequestsException"} + ] + }, + "GetBasePathMapping":{ + "name":"GetBasePathMapping", + "http":{ + "method":"GET", + "requestUri":"/domainnames/{domain_name}/basepathmappings/{base_path}" + }, + "input":{"shape":"GetBasePathMappingRequest"}, + "output":{"shape":"BasePathMapping"}, + "errors":[ + {"shape":"UnauthorizedException"}, + {"shape":"NotFoundException"}, + {"shape":"TooManyRequestsException"} + ] + }, + "GetBasePathMappings":{ + "name":"GetBasePathMappings", + "http":{ + "method":"GET", + "requestUri":"/domainnames/{domain_name}/basepathmappings" + }, + "input":{"shape":"GetBasePathMappingsRequest"}, + "output":{"shape":"BasePathMappings"}, + "errors":[ + {"shape":"UnauthorizedException"}, + {"shape":"NotFoundException"}, + {"shape":"TooManyRequestsException"} + ] + }, + "GetClientCertificate":{ + "name":"GetClientCertificate", + "http":{ + "method":"GET", + "requestUri":"/clientcertificates/{clientcertificate_id}" + }, + "input":{"shape":"GetClientCertificateRequest"}, + "output":{"shape":"ClientCertificate"}, + "errors":[ + {"shape":"UnauthorizedException"}, + {"shape":"NotFoundException"}, + {"shape":"TooManyRequestsException"} + ] + }, + "GetClientCertificates":{ + "name":"GetClientCertificates", + "http":{ + "method":"GET", + "requestUri":"/clientcertificates" + }, + "input":{"shape":"GetClientCertificatesRequest"}, + "output":{"shape":"ClientCertificates"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"UnauthorizedException"}, + {"shape":"TooManyRequestsException"} + ] + }, + "GetDeployment":{ + "name":"GetDeployment", + "http":{ + "method":"GET", + "requestUri":"/restapis/{restapi_id}/deployments/{deployment_id}" + }, + "input":{"shape":"GetDeploymentRequest"}, + "output":{"shape":"Deployment"}, + "errors":[ + {"shape":"UnauthorizedException"}, + {"shape":"NotFoundException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"ServiceUnavailableException"} + ] + }, + "GetDeployments":{ + "name":"GetDeployments", + "http":{ + "method":"GET", + "requestUri":"/restapis/{restapi_id}/deployments" + }, + "input":{"shape":"GetDeploymentsRequest"}, + "output":{"shape":"Deployments"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"UnauthorizedException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"ServiceUnavailableException"} + ] + }, + "GetDomainName":{ + "name":"GetDomainName", + "http":{ + "method":"GET", + "requestUri":"/domainnames/{domain_name}" + }, + "input":{"shape":"GetDomainNameRequest"}, + "output":{"shape":"DomainName"}, + "errors":[ + {"shape":"UnauthorizedException"}, + {"shape":"NotFoundException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"TooManyRequestsException"} + ] + }, + "GetDomainNames":{ + "name":"GetDomainNames", + "http":{ + "method":"GET", + "requestUri":"/domainnames" + }, + "input":{"shape":"GetDomainNamesRequest"}, + "output":{"shape":"DomainNames"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"UnauthorizedException"}, + {"shape":"TooManyRequestsException"} + ] + }, + "GetExport":{ + "name":"GetExport", + "http":{ + "method":"GET", + "requestUri":"/restapis/{restapi_id}/stages/{stage_name}/exports/{export_type}", + "responseCode":200 + }, + "input":{"shape":"GetExportRequest"}, + "output":{"shape":"ExportResponse"}, + "errors":[ + {"shape":"UnauthorizedException"}, + {"shape":"NotFoundException"}, + {"shape":"BadRequestException"}, + {"shape":"TooManyRequestsException"} + ] + }, + "GetIntegration":{ + "name":"GetIntegration", + "http":{ + "method":"GET", + "requestUri":"/restapis/{restapi_id}/resources/{resource_id}/methods/{http_method}/integration" + }, + "input":{"shape":"GetIntegrationRequest"}, + "output":{"shape":"Integration"}, + "errors":[ + {"shape":"UnauthorizedException"}, + {"shape":"NotFoundException"}, + {"shape":"TooManyRequestsException"} + ] + }, + "GetIntegrationResponse":{ + "name":"GetIntegrationResponse", + "http":{ + "method":"GET", + "requestUri":"/restapis/{restapi_id}/resources/{resource_id}/methods/{http_method}/integration/responses/{status_code}" + }, + "input":{"shape":"GetIntegrationResponseRequest"}, + "output":{"shape":"IntegrationResponse"}, + "errors":[ + {"shape":"UnauthorizedException"}, + {"shape":"NotFoundException"}, + {"shape":"TooManyRequestsException"} + ] + }, + "GetMethod":{ + "name":"GetMethod", + "http":{ + "method":"GET", + "requestUri":"/restapis/{restapi_id}/resources/{resource_id}/methods/{http_method}" + }, + "input":{"shape":"GetMethodRequest"}, + "output":{"shape":"Method"}, + "errors":[ + {"shape":"UnauthorizedException"}, + {"shape":"NotFoundException"}, + {"shape":"TooManyRequestsException"} + ] + }, + "GetMethodResponse":{ + "name":"GetMethodResponse", + "http":{ + "method":"GET", + "requestUri":"/restapis/{restapi_id}/resources/{resource_id}/methods/{http_method}/responses/{status_code}" + }, + "input":{"shape":"GetMethodResponseRequest"}, + "output":{"shape":"MethodResponse"}, + "errors":[ + {"shape":"UnauthorizedException"}, + {"shape":"NotFoundException"}, + {"shape":"TooManyRequestsException"} + ] + }, + "GetModel":{ + "name":"GetModel", + "http":{ + "method":"GET", + "requestUri":"/restapis/{restapi_id}/models/{model_name}" + }, + "input":{"shape":"GetModelRequest"}, + "output":{"shape":"Model"}, + "errors":[ + {"shape":"UnauthorizedException"}, + {"shape":"NotFoundException"}, + {"shape":"TooManyRequestsException"} + ] + }, + "GetModelTemplate":{ + "name":"GetModelTemplate", + "http":{ + "method":"GET", + "requestUri":"/restapis/{restapi_id}/models/{model_name}/default_template" + }, + "input":{"shape":"GetModelTemplateRequest"}, + "output":{"shape":"Template"}, + "errors":[ + {"shape":"UnauthorizedException"}, + {"shape":"NotFoundException"}, + {"shape":"BadRequestException"}, + {"shape":"TooManyRequestsException"} + ] + }, + "GetModels":{ + "name":"GetModels", + "http":{ + "method":"GET", + "requestUri":"/restapis/{restapi_id}/models" + }, + "input":{"shape":"GetModelsRequest"}, + "output":{"shape":"Models"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"UnauthorizedException"}, + {"shape":"NotFoundException"}, + {"shape":"TooManyRequestsException"} + ] + }, + "GetResource":{ + "name":"GetResource", + "http":{ + "method":"GET", + "requestUri":"/restapis/{restapi_id}/resources/{resource_id}" + }, + "input":{"shape":"GetResourceRequest"}, + "output":{"shape":"Resource"}, + "errors":[ + {"shape":"UnauthorizedException"}, + {"shape":"NotFoundException"}, + {"shape":"TooManyRequestsException"} + ] + }, + "GetResources":{ + "name":"GetResources", + "http":{ + "method":"GET", + "requestUri":"/restapis/{restapi_id}/resources" + }, + "input":{"shape":"GetResourcesRequest"}, + "output":{"shape":"Resources"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"UnauthorizedException"}, + {"shape":"NotFoundException"}, + {"shape":"TooManyRequestsException"} + ] + }, + "GetRestApi":{ + "name":"GetRestApi", + "http":{ + "method":"GET", + "requestUri":"/restapis/{restapi_id}" + }, + "input":{"shape":"GetRestApiRequest"}, + "output":{"shape":"RestApi"}, + "errors":[ + {"shape":"UnauthorizedException"}, + {"shape":"NotFoundException"}, + {"shape":"TooManyRequestsException"} + ] + }, + "GetRestApis":{ + "name":"GetRestApis", + "http":{ + "method":"GET", + "requestUri":"/restapis" + }, + "input":{"shape":"GetRestApisRequest"}, + "output":{"shape":"RestApis"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"UnauthorizedException"}, + {"shape":"TooManyRequestsException"} + ] + }, + "GetSdk":{ + "name":"GetSdk", + "http":{ + "method":"GET", + "requestUri":"/restapis/{restapi_id}/stages/{stage_name}/sdks/{sdk_type}", + "responseCode":200 + }, + "input":{"shape":"GetSdkRequest"}, + "output":{"shape":"SdkResponse"}, + "errors":[ + {"shape":"UnauthorizedException"}, + {"shape":"NotFoundException"}, + {"shape":"BadRequestException"}, + {"shape":"TooManyRequestsException"} + ] + }, + "GetStage":{ + "name":"GetStage", + "http":{ + "method":"GET", + "requestUri":"/restapis/{restapi_id}/stages/{stage_name}" + }, + "input":{"shape":"GetStageRequest"}, + "output":{"shape":"Stage"}, + "errors":[ + {"shape":"UnauthorizedException"}, + {"shape":"NotFoundException"}, + {"shape":"TooManyRequestsException"} + ] + }, + "GetStages":{ + "name":"GetStages", + "http":{ + "method":"GET", + "requestUri":"/restapis/{restapi_id}/stages" + }, + "input":{"shape":"GetStagesRequest"}, + "output":{"shape":"Stages"}, + "errors":[ + {"shape":"UnauthorizedException"}, + {"shape":"NotFoundException"}, + {"shape":"TooManyRequestsException"} + ] + }, + "ImportRestApi":{ + "name":"ImportRestApi", + "http":{ + "method":"POST", + "requestUri":"/restapis?mode=import", + "responseCode":201 + }, + "input":{"shape":"ImportRestApiRequest"}, + "output":{"shape":"RestApi"}, + "errors":[ + {"shape":"UnauthorizedException"}, + {"shape":"LimitExceededException"}, + {"shape":"BadRequestException"}, + {"shape":"TooManyRequestsException"} + ] + }, + "PutIntegration":{ + "name":"PutIntegration", + "http":{ + "method":"PUT", + "requestUri":"/restapis/{restapi_id}/resources/{resource_id}/methods/{http_method}/integration", + "responseCode":201 + }, + "input":{"shape":"PutIntegrationRequest"}, + "output":{"shape":"Integration"}, + "errors":[ + {"shape":"UnauthorizedException"}, + {"shape":"BadRequestException"}, + {"shape":"ConflictException"}, + {"shape":"NotFoundException"}, + {"shape":"TooManyRequestsException"} + ] + }, + "PutIntegrationResponse":{ + "name":"PutIntegrationResponse", + "http":{ + "method":"PUT", + "requestUri":"/restapis/{restapi_id}/resources/{resource_id}/methods/{http_method}/integration/responses/{status_code}", + "responseCode":201 + }, + "input":{"shape":"PutIntegrationResponseRequest"}, + "output":{"shape":"IntegrationResponse"}, + "errors":[ + {"shape":"UnauthorizedException"}, + {"shape":"NotFoundException"}, + {"shape":"LimitExceededException"}, + {"shape":"BadRequestException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"ConflictException"} + ] + }, + "PutMethod":{ + "name":"PutMethod", + "http":{ + "method":"PUT", + "requestUri":"/restapis/{restapi_id}/resources/{resource_id}/methods/{http_method}", + "responseCode":201 + }, + "input":{"shape":"PutMethodRequest"}, + "output":{"shape":"Method"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"UnauthorizedException"}, + {"shape":"NotFoundException"}, + {"shape":"ConflictException"}, + {"shape":"LimitExceededException"}, + {"shape":"TooManyRequestsException"} + ] + }, + "PutMethodResponse":{ + "name":"PutMethodResponse", + "http":{ + "method":"PUT", + "requestUri":"/restapis/{restapi_id}/resources/{resource_id}/methods/{http_method}/responses/{status_code}", + "responseCode":201 + }, + "input":{"shape":"PutMethodResponseRequest"}, + "output":{"shape":"MethodResponse"}, + "errors":[ + {"shape":"UnauthorizedException"}, + {"shape":"NotFoundException"}, + {"shape":"ConflictException"}, + {"shape":"LimitExceededException"}, + {"shape":"BadRequestException"}, + {"shape":"TooManyRequestsException"} + ] + }, + "PutRestApi":{ + "name":"PutRestApi", + "http":{ + "method":"PUT", + "requestUri":"/restapis/{restapi_id}" + }, + "input":{"shape":"PutRestApiRequest"}, + "output":{"shape":"RestApi"}, + "errors":[ + {"shape":"UnauthorizedException"}, + {"shape":"LimitExceededException"}, + {"shape":"NotFoundException"}, + {"shape":"BadRequestException"}, + {"shape":"TooManyRequestsException"} + ] + }, + "TestInvokeAuthorizer":{ + "name":"TestInvokeAuthorizer", + "http":{ + "method":"POST", + "requestUri":"/restapis/{restapi_id}/authorizers/{authorizer_id}" + }, + "input":{"shape":"TestInvokeAuthorizerRequest"}, + "output":{"shape":"TestInvokeAuthorizerResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"UnauthorizedException"}, + {"shape":"NotFoundException"}, + {"shape":"TooManyRequestsException"} + ] + }, + "TestInvokeMethod":{ + "name":"TestInvokeMethod", + "http":{ + "method":"POST", + "requestUri":"/restapis/{restapi_id}/resources/{resource_id}/methods/{http_method}" + }, + "input":{"shape":"TestInvokeMethodRequest"}, + "output":{"shape":"TestInvokeMethodResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"UnauthorizedException"}, + {"shape":"NotFoundException"}, + {"shape":"TooManyRequestsException"} + ] + }, + "UpdateAccount":{ + "name":"UpdateAccount", + "http":{ + "method":"PATCH", + "requestUri":"/account" + }, + "input":{"shape":"UpdateAccountRequest"}, + "output":{"shape":"Account"}, + "errors":[ + {"shape":"UnauthorizedException"}, + {"shape":"BadRequestException"}, + {"shape":"NotFoundException"}, + {"shape":"TooManyRequestsException"} + ] + }, + "UpdateApiKey":{ + "name":"UpdateApiKey", + "http":{ + "method":"PATCH", + "requestUri":"/apikeys/{api_Key}" + }, + "input":{"shape":"UpdateApiKeyRequest"}, + "output":{"shape":"ApiKey"}, + "errors":[ + {"shape":"UnauthorizedException"}, + {"shape":"NotFoundException"}, + {"shape":"BadRequestException"}, + {"shape":"TooManyRequestsException"} + ] + }, + "UpdateAuthorizer":{ + "name":"UpdateAuthorizer", + "http":{ + "method":"PATCH", + "requestUri":"/restapis/{restapi_id}/authorizers/{authorizer_id}" + }, + "input":{"shape":"UpdateAuthorizerRequest"}, + "output":{"shape":"Authorizer"}, + "errors":[ + {"shape":"UnauthorizedException"}, + {"shape":"NotFoundException"}, + {"shape":"BadRequestException"}, + {"shape":"TooManyRequestsException"} + ] + }, + "UpdateBasePathMapping":{ + "name":"UpdateBasePathMapping", + "http":{ + "method":"PATCH", + "requestUri":"/domainnames/{domain_name}/basepathmappings/{base_path}" + }, + "input":{"shape":"UpdateBasePathMappingRequest"}, + "output":{"shape":"BasePathMapping"}, + "errors":[ + {"shape":"UnauthorizedException"}, + {"shape":"NotFoundException"}, + {"shape":"ConflictException"}, + {"shape":"BadRequestException"}, + {"shape":"TooManyRequestsException"} + ] + }, + "UpdateClientCertificate":{ + "name":"UpdateClientCertificate", + "http":{ + "method":"PATCH", + "requestUri":"/clientcertificates/{clientcertificate_id}" + }, + "input":{"shape":"UpdateClientCertificateRequest"}, + "output":{"shape":"ClientCertificate"}, + "errors":[ + {"shape":"UnauthorizedException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"BadRequestException"}, + {"shape":"NotFoundException"} + ] + }, + "UpdateDeployment":{ + "name":"UpdateDeployment", + "http":{ + "method":"PATCH", + "requestUri":"/restapis/{restapi_id}/deployments/{deployment_id}" + }, + "input":{"shape":"UpdateDeploymentRequest"}, + "output":{"shape":"Deployment"}, + "errors":[ + {"shape":"UnauthorizedException"}, + {"shape":"NotFoundException"}, + {"shape":"BadRequestException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"ServiceUnavailableException"} + ] + }, + "UpdateDomainName":{ + "name":"UpdateDomainName", + "http":{ + "method":"PATCH", + "requestUri":"/domainnames/{domain_name}" + }, + "input":{"shape":"UpdateDomainNameRequest"}, + "output":{"shape":"DomainName"}, + "errors":[ + {"shape":"UnauthorizedException"}, + {"shape":"NotFoundException"}, + {"shape":"BadRequestException"}, + {"shape":"ConflictException"}, + {"shape":"TooManyRequestsException"} + ] + }, + "UpdateIntegration":{ + "name":"UpdateIntegration", + "http":{ + "method":"PATCH", + "requestUri":"/restapis/{restapi_id}/resources/{resource_id}/methods/{http_method}/integration" + }, + "input":{"shape":"UpdateIntegrationRequest"}, + "output":{"shape":"Integration"}, + "errors":[ + {"shape":"UnauthorizedException"}, + {"shape":"NotFoundException"}, + {"shape":"BadRequestException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"ConflictException"} + ] + }, + "UpdateIntegrationResponse":{ + "name":"UpdateIntegrationResponse", + "http":{ + "method":"PATCH", + "requestUri":"/restapis/{restapi_id}/resources/{resource_id}/methods/{http_method}/integration/responses/{status_code}" + }, + "input":{"shape":"UpdateIntegrationResponseRequest"}, + "output":{"shape":"IntegrationResponse"}, + "errors":[ + {"shape":"UnauthorizedException"}, + {"shape":"NotFoundException"}, + {"shape":"ConflictException"}, + {"shape":"BadRequestException"}, + {"shape":"TooManyRequestsException"} + ] + }, + "UpdateMethod":{ + "name":"UpdateMethod", + "http":{ + "method":"PATCH", + "requestUri":"/restapis/{restapi_id}/resources/{resource_id}/methods/{http_method}" + }, + "input":{"shape":"UpdateMethodRequest"}, + "output":{"shape":"Method"}, + "errors":[ + {"shape":"UnauthorizedException"}, + {"shape":"NotFoundException"}, + {"shape":"BadRequestException"}, + {"shape":"ConflictException"}, + {"shape":"TooManyRequestsException"} + ] + }, + "UpdateMethodResponse":{ + "name":"UpdateMethodResponse", + "http":{ + "method":"PATCH", + "requestUri":"/restapis/{restapi_id}/resources/{resource_id}/methods/{http_method}/responses/{status_code}", + "responseCode":201 + }, + "input":{"shape":"UpdateMethodResponseRequest"}, + "output":{"shape":"MethodResponse"}, + "errors":[ + {"shape":"UnauthorizedException"}, + {"shape":"NotFoundException"}, + {"shape":"ConflictException"}, + {"shape":"LimitExceededException"}, + {"shape":"BadRequestException"}, + {"shape":"TooManyRequestsException"} + ] + }, + "UpdateModel":{ + "name":"UpdateModel", + "http":{ + "method":"PATCH", + "requestUri":"/restapis/{restapi_id}/models/{model_name}" + }, + "input":{"shape":"UpdateModelRequest"}, + "output":{"shape":"Model"}, + "errors":[ + {"shape":"UnauthorizedException"}, + {"shape":"NotFoundException"}, + {"shape":"BadRequestException"}, + {"shape":"ConflictException"}, + {"shape":"TooManyRequestsException"} + ] + }, + "UpdateResource":{ + "name":"UpdateResource", + "http":{ + "method":"PATCH", + "requestUri":"/restapis/{restapi_id}/resources/{resource_id}" + }, + "input":{"shape":"UpdateResourceRequest"}, + "output":{"shape":"Resource"}, + "errors":[ + {"shape":"UnauthorizedException"}, + {"shape":"NotFoundException"}, + {"shape":"ConflictException"}, + {"shape":"BadRequestException"}, + {"shape":"TooManyRequestsException"} + ] + }, + "UpdateRestApi":{ + "name":"UpdateRestApi", + "http":{ + "method":"PATCH", + "requestUri":"/restapis/{restapi_id}" + }, + "input":{"shape":"UpdateRestApiRequest"}, + "output":{"shape":"RestApi"}, + "errors":[ + {"shape":"UnauthorizedException"}, + {"shape":"NotFoundException"}, + {"shape":"ConflictException"}, + {"shape":"BadRequestException"}, + {"shape":"TooManyRequestsException"} + ] + }, + "UpdateStage":{ + "name":"UpdateStage", + "http":{ + "method":"PATCH", + "requestUri":"/restapis/{restapi_id}/stages/{stage_name}" + }, + "input":{"shape":"UpdateStageRequest"}, + "output":{"shape":"Stage"}, + "errors":[ + {"shape":"UnauthorizedException"}, + {"shape":"NotFoundException"}, + {"shape":"ConflictException"}, + {"shape":"BadRequestException"}, + {"shape":"TooManyRequestsException"} + ] + } + }, + "shapes":{ + "Account":{ + "type":"structure", + "members":{ + "cloudwatchRoleArn":{"shape":"String"}, + "throttleSettings":{"shape":"ThrottleSettings"} + } + }, + "ApiKey":{ + "type":"structure", + "members":{ + "id":{"shape":"String"}, + "name":{"shape":"String"}, + "description":{"shape":"String"}, + "enabled":{"shape":"Boolean"}, + "stageKeys":{"shape":"ListOfString"}, + "createdDate":{"shape":"Timestamp"}, + "lastUpdatedDate":{"shape":"Timestamp"} + } + }, + "ApiKeys":{ + "type":"structure", + "members":{ + "position":{"shape":"String"}, + "items":{ + "shape":"ListOfApiKey", + "locationName":"item" + } + } + }, + "Authorizer":{ + "type":"structure", + "members":{ + "id":{"shape":"String"}, + "name":{"shape":"String"}, + "type":{"shape":"AuthorizerType"}, + "authType":{"shape":"String"}, + "authorizerUri":{"shape":"String"}, + "authorizerCredentials":{"shape":"String"}, + "identitySource":{"shape":"String"}, + "identityValidationExpression":{"shape":"String"}, + "authorizerResultTtlInSeconds":{"shape":"NullableInteger"} + } + }, + "AuthorizerType":{ + "type":"string", + "enum":["TOKEN"] + }, + "Authorizers":{ + "type":"structure", + "members":{ + "position":{"shape":"String"}, + "items":{ + "shape":"ListOfAuthorizer", + "locationName":"item" + } + } + }, + "BadRequestException":{ + "type":"structure", + "members":{ + "message":{"shape":"String"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "BasePathMapping":{ + "type":"structure", + "members":{ + "basePath":{"shape":"String"}, + "restApiId":{"shape":"String"}, + "stage":{"shape":"String"} + } + }, + "BasePathMappings":{ + "type":"structure", + "members":{ + "position":{"shape":"String"}, + "items":{ + "shape":"ListOfBasePathMapping", + "locationName":"item" + } + } + }, + "Blob":{"type":"blob"}, + "Boolean":{"type":"boolean"}, + "CacheClusterSize":{ + "type":"string", + "enum":[ + "0.5", + "1.6", + "6.1", + "13.5", + "28.4", + "58.2", + "118", + "237" + ] + }, + "CacheClusterStatus":{ + "type":"string", + "enum":[ + "CREATE_IN_PROGRESS", + "AVAILABLE", + "DELETE_IN_PROGRESS", + "NOT_AVAILABLE", + "FLUSH_IN_PROGRESS" + ] + }, + "ClientCertificate":{ + "type":"structure", + "members":{ + "clientCertificateId":{"shape":"String"}, + "description":{"shape":"String"}, + "pemEncodedCertificate":{"shape":"String"}, + "createdDate":{"shape":"Timestamp"}, + "expirationDate":{"shape":"Timestamp"} + } + }, + "ClientCertificates":{ + "type":"structure", + "members":{ + "position":{"shape":"String"}, + "items":{ + "shape":"ListOfClientCertificate", + "locationName":"item" + } + } + }, + "ConflictException":{ + "type":"structure", + "members":{ + "message":{"shape":"String"} + }, + "error":{"httpStatusCode":409}, + "exception":true + }, + "CreateApiKeyRequest":{ + "type":"structure", + "members":{ + "name":{"shape":"String"}, + "description":{"shape":"String"}, + "enabled":{"shape":"Boolean"}, + "stageKeys":{"shape":"ListOfStageKeys"} + } + }, + "CreateAuthorizerRequest":{ + "type":"structure", + "required":[ + "restApiId", + "name", + "type", + "authorizerUri", + "identitySource" + ], + "members":{ + "restApiId":{ + "shape":"String", + "location":"uri", + "locationName":"restapi_id" + }, + "name":{"shape":"String"}, + "type":{"shape":"AuthorizerType"}, + "authType":{"shape":"String"}, + "authorizerUri":{"shape":"String"}, + "authorizerCredentials":{"shape":"String"}, + "identitySource":{"shape":"String"}, + "identityValidationExpression":{"shape":"String"}, + "authorizerResultTtlInSeconds":{"shape":"NullableInteger"} + } + }, + "CreateBasePathMappingRequest":{ + "type":"structure", + "required":[ + "domainName", + "restApiId" + ], + "members":{ + "domainName":{ + "shape":"String", + "location":"uri", + "locationName":"domain_name" + }, + "basePath":{"shape":"String"}, + "restApiId":{"shape":"String"}, + "stage":{"shape":"String"} + } + }, + "CreateDeploymentRequest":{ + "type":"structure", + "required":[ + "restApiId", + "stageName" + ], + "members":{ + "restApiId":{ + "shape":"String", + "location":"uri", + "locationName":"restapi_id" + }, + "stageName":{"shape":"String"}, + "stageDescription":{"shape":"String"}, + "description":{"shape":"String"}, + "cacheClusterEnabled":{"shape":"NullableBoolean"}, + "cacheClusterSize":{"shape":"CacheClusterSize"}, + "variables":{"shape":"MapOfStringToString"} + } + }, + "CreateDomainNameRequest":{ + "type":"structure", + "required":[ + "domainName", + "certificateName", + "certificateBody", + "certificatePrivateKey", + "certificateChain" + ], + "members":{ + "domainName":{"shape":"String"}, + "certificateName":{"shape":"String"}, + "certificateBody":{"shape":"String"}, + "certificatePrivateKey":{"shape":"String"}, + "certificateChain":{"shape":"String"} + } + }, + "CreateModelRequest":{ + "type":"structure", + "required":[ + "restApiId", + "name", + "contentType" + ], + "members":{ + "restApiId":{ + "shape":"String", + "location":"uri", + "locationName":"restapi_id" + }, + "name":{"shape":"String"}, + "description":{"shape":"String"}, + "schema":{"shape":"String"}, + "contentType":{"shape":"String"} + } + }, + "CreateResourceRequest":{ + "type":"structure", + "required":[ + "restApiId", + "parentId", + "pathPart" + ], + "members":{ + "restApiId":{ + "shape":"String", + "location":"uri", + "locationName":"restapi_id" + }, + "parentId":{ + "shape":"String", + "location":"uri", + "locationName":"parent_id" + }, + "pathPart":{"shape":"String"} + } + }, + "CreateRestApiRequest":{ + "type":"structure", + "required":["name"], + "members":{ + "name":{"shape":"String"}, + "description":{"shape":"String"}, + "cloneFrom":{"shape":"String"} + } + }, + "CreateStageRequest":{ + "type":"structure", + "required":[ + "restApiId", + "stageName", + "deploymentId" + ], + "members":{ + "restApiId":{ + "shape":"String", + "location":"uri", + "locationName":"restapi_id" + }, + "stageName":{"shape":"String"}, + "deploymentId":{"shape":"String"}, + "description":{"shape":"String"}, + "cacheClusterEnabled":{"shape":"Boolean"}, + "cacheClusterSize":{"shape":"CacheClusterSize"}, + "variables":{"shape":"MapOfStringToString"} + } + }, + "DeleteApiKeyRequest":{ + "type":"structure", + "required":["apiKey"], + "members":{ + "apiKey":{ + "shape":"String", + "location":"uri", + "locationName":"api_Key" + } + } + }, + "DeleteAuthorizerRequest":{ + "type":"structure", + "required":[ + "restApiId", + "authorizerId" + ], + "members":{ + "restApiId":{ + "shape":"String", + "location":"uri", + "locationName":"restapi_id" + }, + "authorizerId":{ + "shape":"String", + "location":"uri", + "locationName":"authorizer_id" + } + } + }, + "DeleteBasePathMappingRequest":{ + "type":"structure", + "required":[ + "domainName", + "basePath" + ], + "members":{ + "domainName":{ + "shape":"String", + "location":"uri", + "locationName":"domain_name" + }, + "basePath":{ + "shape":"String", + "location":"uri", + "locationName":"base_path" + } + } + }, + "DeleteClientCertificateRequest":{ + "type":"structure", + "required":["clientCertificateId"], + "members":{ + "clientCertificateId":{ + "shape":"String", + "location":"uri", + "locationName":"clientcertificate_id" + } + } + }, + "DeleteDeploymentRequest":{ + "type":"structure", + "required":[ + "restApiId", + "deploymentId" + ], + "members":{ + "restApiId":{ + "shape":"String", + "location":"uri", + "locationName":"restapi_id" + }, + "deploymentId":{ + "shape":"String", + "location":"uri", + "locationName":"deployment_id" + } + } + }, + "DeleteDomainNameRequest":{ + "type":"structure", + "required":["domainName"], + "members":{ + "domainName":{ + "shape":"String", + "location":"uri", + "locationName":"domain_name" + } + } + }, + "DeleteIntegrationRequest":{ + "type":"structure", + "required":[ + "restApiId", + "resourceId", + "httpMethod" + ], + "members":{ + "restApiId":{ + "shape":"String", + "location":"uri", + "locationName":"restapi_id" + }, + "resourceId":{ + "shape":"String", + "location":"uri", + "locationName":"resource_id" + }, + "httpMethod":{ + "shape":"String", + "location":"uri", + "locationName":"http_method" + } + } + }, + "DeleteIntegrationResponseRequest":{ + "type":"structure", + "required":[ + "restApiId", + "resourceId", + "httpMethod", + "statusCode" + ], + "members":{ + "restApiId":{ + "shape":"String", + "location":"uri", + "locationName":"restapi_id" + }, + "resourceId":{ + "shape":"String", + "location":"uri", + "locationName":"resource_id" + }, + "httpMethod":{ + "shape":"String", + "location":"uri", + "locationName":"http_method" + }, + "statusCode":{ + "shape":"StatusCode", + "location":"uri", + "locationName":"status_code" + } + } + }, + "DeleteMethodRequest":{ + "type":"structure", + "required":[ + "restApiId", + "resourceId", + "httpMethod" + ], + "members":{ + "restApiId":{ + "shape":"String", + "location":"uri", + "locationName":"restapi_id" + }, + "resourceId":{ + "shape":"String", + "location":"uri", + "locationName":"resource_id" + }, + "httpMethod":{ + "shape":"String", + "location":"uri", + "locationName":"http_method" + } + } + }, + "DeleteMethodResponseRequest":{ + "type":"structure", + "required":[ + "restApiId", + "resourceId", + "httpMethod", + "statusCode" + ], + "members":{ + "restApiId":{ + "shape":"String", + "location":"uri", + "locationName":"restapi_id" + }, + "resourceId":{ + "shape":"String", + "location":"uri", + "locationName":"resource_id" + }, + "httpMethod":{ + "shape":"String", + "location":"uri", + "locationName":"http_method" + }, + "statusCode":{ + "shape":"StatusCode", + "location":"uri", + "locationName":"status_code" + } + } + }, + "DeleteModelRequest":{ + "type":"structure", + "required":[ + "restApiId", + "modelName" + ], + "members":{ + "restApiId":{ + "shape":"String", + "location":"uri", + "locationName":"restapi_id" + }, + "modelName":{ + "shape":"String", + "location":"uri", + "locationName":"model_name" + } + } + }, + "DeleteResourceRequest":{ + "type":"structure", + "required":[ + "restApiId", + "resourceId" + ], + "members":{ + "restApiId":{ + "shape":"String", + "location":"uri", + "locationName":"restapi_id" + }, + "resourceId":{ + "shape":"String", + "location":"uri", + "locationName":"resource_id" + } + } + }, + "DeleteRestApiRequest":{ + "type":"structure", + "required":["restApiId"], + "members":{ + "restApiId":{ + "shape":"String", + "location":"uri", + "locationName":"restapi_id" + } + } + }, + "DeleteStageRequest":{ + "type":"structure", + "required":[ + "restApiId", + "stageName" + ], + "members":{ + "restApiId":{ + "shape":"String", + "location":"uri", + "locationName":"restapi_id" + }, + "stageName":{ + "shape":"String", + "location":"uri", + "locationName":"stage_name" + } + } + }, + "Deployment":{ + "type":"structure", + "members":{ + "id":{"shape":"String"}, + "description":{"shape":"String"}, + "createdDate":{"shape":"Timestamp"}, + "apiSummary":{"shape":"PathToMapOfMethodSnapshot"} + } + }, + "Deployments":{ + "type":"structure", + "members":{ + "position":{"shape":"String"}, + "items":{ + "shape":"ListOfDeployment", + "locationName":"item" + } + } + }, + "DomainName":{ + "type":"structure", + "members":{ + "domainName":{"shape":"String"}, + "certificateName":{"shape":"String"}, + "certificateUploadDate":{"shape":"Timestamp"}, + "distributionDomainName":{"shape":"String"} + } + }, + "DomainNames":{ + "type":"structure", + "members":{ + "position":{"shape":"String"}, + "items":{ + "shape":"ListOfDomainName", + "locationName":"item" + } + } + }, + "Double":{"type":"double"}, + "ExportResponse":{ + "type":"structure", + "members":{ + "contentType":{ + "shape":"String", + "location":"header", + "locationName":"Content-Type" + }, + "contentDisposition":{ + "shape":"String", + "location":"header", + "locationName":"Content-Disposition" + }, + "body":{"shape":"Blob"} + }, + "payload":"body" + }, + "FlushStageAuthorizersCacheRequest":{ + "type":"structure", + "required":[ + "restApiId", + "stageName" + ], + "members":{ + "restApiId":{ + "shape":"String", + "location":"uri", + "locationName":"restapi_id" + }, + "stageName":{ + "shape":"String", + "location":"uri", + "locationName":"stage_name" + } + } + }, + "FlushStageCacheRequest":{ + "type":"structure", + "required":[ + "restApiId", + "stageName" + ], + "members":{ + "restApiId":{ + "shape":"String", + "location":"uri", + "locationName":"restapi_id" + }, + "stageName":{ + "shape":"String", + "location":"uri", + "locationName":"stage_name" + } + } + }, + "GenerateClientCertificateRequest":{ + "type":"structure", + "members":{ + "description":{"shape":"String"} + } + }, + "GetAccountRequest":{ + "type":"structure", + "members":{ + } + }, + "GetApiKeyRequest":{ + "type":"structure", + "required":["apiKey"], + "members":{ + "apiKey":{ + "shape":"String", + "location":"uri", + "locationName":"api_Key" + } + } + }, + "GetApiKeysRequest":{ + "type":"structure", + "members":{ + "position":{ + "shape":"String", + "location":"querystring", + "locationName":"position" + }, + "limit":{ + "shape":"NullableInteger", + "location":"querystring", + "locationName":"limit" + } + } + }, + "GetAuthorizerRequest":{ + "type":"structure", + "required":[ + "restApiId", + "authorizerId" + ], + "members":{ + "restApiId":{ + "shape":"String", + "location":"uri", + "locationName":"restapi_id" + }, + "authorizerId":{ + "shape":"String", + "location":"uri", + "locationName":"authorizer_id" + } + } + }, + "GetAuthorizersRequest":{ + "type":"structure", + "required":["restApiId"], + "members":{ + "restApiId":{ + "shape":"String", + "location":"uri", + "locationName":"restapi_id" + }, + "position":{ + "shape":"String", + "location":"querystring", + "locationName":"position" + }, + "limit":{ + "shape":"NullableInteger", + "location":"querystring", + "locationName":"limit" + } + } + }, + "GetBasePathMappingRequest":{ + "type":"structure", + "required":[ + "domainName", + "basePath" + ], + "members":{ + "domainName":{ + "shape":"String", + "location":"uri", + "locationName":"domain_name" + }, + "basePath":{ + "shape":"String", + "location":"uri", + "locationName":"base_path" + } + } + }, + "GetBasePathMappingsRequest":{ + "type":"structure", + "required":["domainName"], + "members":{ + "domainName":{ + "shape":"String", + "location":"uri", + "locationName":"domain_name" + }, + "position":{ + "shape":"String", + "location":"querystring", + "locationName":"position" + }, + "limit":{ + "shape":"NullableInteger", + "location":"querystring", + "locationName":"limit" + } + } + }, + "GetClientCertificateRequest":{ + "type":"structure", + "required":["clientCertificateId"], + "members":{ + "clientCertificateId":{ + "shape":"String", + "location":"uri", + "locationName":"clientcertificate_id" + } + } + }, + "GetClientCertificatesRequest":{ + "type":"structure", + "members":{ + "position":{ + "shape":"String", + "location":"querystring", + "locationName":"position" + }, + "limit":{ + "shape":"NullableInteger", + "location":"querystring", + "locationName":"limit" + } + } + }, + "GetDeploymentRequest":{ + "type":"structure", + "required":[ + "restApiId", + "deploymentId" + ], + "members":{ + "restApiId":{ + "shape":"String", + "location":"uri", + "locationName":"restapi_id" + }, + "deploymentId":{ + "shape":"String", + "location":"uri", + "locationName":"deployment_id" + } + } + }, + "GetDeploymentsRequest":{ + "type":"structure", + "required":["restApiId"], + "members":{ + "restApiId":{ + "shape":"String", + "location":"uri", + "locationName":"restapi_id" + }, + "position":{ + "shape":"String", + "location":"querystring", + "locationName":"position" + }, + "limit":{ + "shape":"NullableInteger", + "location":"querystring", + "locationName":"limit" + } + } + }, + "GetDomainNameRequest":{ + "type":"structure", + "required":["domainName"], + "members":{ + "domainName":{ + "shape":"String", + "location":"uri", + "locationName":"domain_name" + } + } + }, + "GetDomainNamesRequest":{ + "type":"structure", + "members":{ + "position":{ + "shape":"String", + "location":"querystring", + "locationName":"position" + }, + "limit":{ + "shape":"NullableInteger", + "location":"querystring", + "locationName":"limit" + } + } + }, + "GetExportRequest":{ + "type":"structure", + "required":[ + "restApiId", + "stageName", + "exportType" + ], + "members":{ + "restApiId":{ + "shape":"String", + "location":"uri", + "locationName":"restapi_id" + }, + "stageName":{ + "shape":"String", + "location":"uri", + "locationName":"stage_name" + }, + "exportType":{ + "shape":"String", + "location":"uri", + "locationName":"export_type" + }, + "parameters":{ + "shape":"MapOfStringToString", + "location":"querystring" + }, + "accepts":{ + "shape":"String", + "location":"header", + "locationName":"Accept" + } + } + }, + "GetIntegrationRequest":{ + "type":"structure", + "required":[ + "restApiId", + "resourceId", + "httpMethod" + ], + "members":{ + "restApiId":{ + "shape":"String", + "location":"uri", + "locationName":"restapi_id" + }, + "resourceId":{ + "shape":"String", + "location":"uri", + "locationName":"resource_id" + }, + "httpMethod":{ + "shape":"String", + "location":"uri", + "locationName":"http_method" + } + } + }, + "GetIntegrationResponseRequest":{ + "type":"structure", + "required":[ + "restApiId", + "resourceId", + "httpMethod", + "statusCode" + ], + "members":{ + "restApiId":{ + "shape":"String", + "location":"uri", + "locationName":"restapi_id" + }, + "resourceId":{ + "shape":"String", + "location":"uri", + "locationName":"resource_id" + }, + "httpMethod":{ + "shape":"String", + "location":"uri", + "locationName":"http_method" + }, + "statusCode":{ + "shape":"StatusCode", + "location":"uri", + "locationName":"status_code" + } + } + }, + "GetMethodRequest":{ + "type":"structure", + "required":[ + "restApiId", + "resourceId", + "httpMethod" + ], + "members":{ + "restApiId":{ + "shape":"String", + "location":"uri", + "locationName":"restapi_id" + }, + "resourceId":{ + "shape":"String", + "location":"uri", + "locationName":"resource_id" + }, + "httpMethod":{ + "shape":"String", + "location":"uri", + "locationName":"http_method" + } + } + }, + "GetMethodResponseRequest":{ + "type":"structure", + "required":[ + "restApiId", + "resourceId", + "httpMethod", + "statusCode" + ], + "members":{ + "restApiId":{ + "shape":"String", + "location":"uri", + "locationName":"restapi_id" + }, + "resourceId":{ + "shape":"String", + "location":"uri", + "locationName":"resource_id" + }, + "httpMethod":{ + "shape":"String", + "location":"uri", + "locationName":"http_method" + }, + "statusCode":{ + "shape":"StatusCode", + "location":"uri", + "locationName":"status_code" + } + } + }, + "GetModelRequest":{ + "type":"structure", + "required":[ + "restApiId", + "modelName" + ], + "members":{ + "restApiId":{ + "shape":"String", + "location":"uri", + "locationName":"restapi_id" + }, + "modelName":{ + "shape":"String", + "location":"uri", + "locationName":"model_name" + }, + "flatten":{ + "shape":"Boolean", + "location":"querystring", + "locationName":"flatten" + } + } + }, + "GetModelTemplateRequest":{ + "type":"structure", + "required":[ + "restApiId", + "modelName" + ], + "members":{ + "restApiId":{ + "shape":"String", + "location":"uri", + "locationName":"restapi_id" + }, + "modelName":{ + "shape":"String", + "location":"uri", + "locationName":"model_name" + } + } + }, + "GetModelsRequest":{ + "type":"structure", + "required":["restApiId"], + "members":{ + "restApiId":{ + "shape":"String", + "location":"uri", + "locationName":"restapi_id" + }, + "position":{ + "shape":"String", + "location":"querystring", + "locationName":"position" + }, + "limit":{ + "shape":"NullableInteger", + "location":"querystring", + "locationName":"limit" + } + } + }, + "GetResourceRequest":{ + "type":"structure", + "required":[ + "restApiId", + "resourceId" + ], + "members":{ + "restApiId":{ + "shape":"String", + "location":"uri", + "locationName":"restapi_id" + }, + "resourceId":{ + "shape":"String", + "location":"uri", + "locationName":"resource_id" + } + } + }, + "GetResourcesRequest":{ + "type":"structure", + "required":["restApiId"], + "members":{ + "restApiId":{ + "shape":"String", + "location":"uri", + "locationName":"restapi_id" + }, + "position":{ + "shape":"String", + "location":"querystring", + "locationName":"position" + }, + "limit":{ + "shape":"NullableInteger", + "location":"querystring", + "locationName":"limit" + } + } + }, + "GetRestApiRequest":{ + "type":"structure", + "required":["restApiId"], + "members":{ + "restApiId":{ + "shape":"String", + "location":"uri", + "locationName":"restapi_id" + } + } + }, + "GetRestApisRequest":{ + "type":"structure", + "members":{ + "position":{ + "shape":"String", + "location":"querystring", + "locationName":"position" + }, + "limit":{ + "shape":"NullableInteger", + "location":"querystring", + "locationName":"limit" + } + } + }, + "GetSdkRequest":{ + "type":"structure", + "required":[ + "restApiId", + "stageName", + "sdkType" + ], + "members":{ + "restApiId":{ + "shape":"String", + "location":"uri", + "locationName":"restapi_id" + }, + "stageName":{ + "shape":"String", + "location":"uri", + "locationName":"stage_name" + }, + "sdkType":{ + "shape":"String", + "location":"uri", + "locationName":"sdk_type" + }, + "parameters":{ + "shape":"MapOfStringToString", + "location":"querystring" + } + } + }, + "GetStageRequest":{ + "type":"structure", + "required":[ + "restApiId", + "stageName" + ], + "members":{ + "restApiId":{ + "shape":"String", + "location":"uri", + "locationName":"restapi_id" + }, + "stageName":{ + "shape":"String", + "location":"uri", + "locationName":"stage_name" + } + } + }, + "GetStagesRequest":{ + "type":"structure", + "required":["restApiId"], + "members":{ + "restApiId":{ + "shape":"String", + "location":"uri", + "locationName":"restapi_id" + }, + "deploymentId":{ + "shape":"String", + "location":"querystring", + "locationName":"deploymentId" + } + } + }, + "ImportRestApiRequest":{ + "type":"structure", + "required":["body"], + "members":{ + "failOnWarnings":{ + "shape":"Boolean", + "location":"querystring", + "locationName":"failonwarnings" + }, + "parameters":{ + "shape":"MapOfStringToString", + "location":"querystring" + }, + "body":{"shape":"Blob"} + }, + "payload":"body" + }, + "Integer":{"type":"integer"}, + "Integration":{ + "type":"structure", + "members":{ + "type":{"shape":"IntegrationType"}, + "httpMethod":{"shape":"String"}, + "uri":{"shape":"String"}, + "credentials":{"shape":"String"}, + "requestParameters":{"shape":"MapOfStringToString"}, + "requestTemplates":{"shape":"MapOfStringToString"}, + "passthroughBehavior":{"shape":"String"}, + "cacheNamespace":{"shape":"String"}, + "cacheKeyParameters":{"shape":"ListOfString"}, + "integrationResponses":{"shape":"MapOfIntegrationResponse"} + } + }, + "IntegrationResponse":{ + "type":"structure", + "members":{ + "statusCode":{"shape":"StatusCode"}, + "selectionPattern":{"shape":"String"}, + "responseParameters":{"shape":"MapOfStringToString"}, + "responseTemplates":{"shape":"MapOfStringToString"} + } + }, + "IntegrationType":{ + "type":"string", + "enum":[ + "HTTP", + "AWS", + "MOCK" + ] + }, + "LimitExceededException":{ + "type":"structure", + "members":{ + "retryAfterSeconds":{ + "shape":"String", + "location":"header", + "locationName":"Retry-After" + }, + "message":{"shape":"String"} + }, + "error":{"httpStatusCode":429}, + "exception":true + }, + "ListOfApiKey":{ + "type":"list", + "member":{"shape":"ApiKey"} + }, + "ListOfAuthorizer":{ + "type":"list", + "member":{"shape":"Authorizer"} + }, + "ListOfBasePathMapping":{ + "type":"list", + "member":{"shape":"BasePathMapping"} + }, + "ListOfClientCertificate":{ + "type":"list", + "member":{"shape":"ClientCertificate"} + }, + "ListOfDeployment":{ + "type":"list", + "member":{"shape":"Deployment"} + }, + "ListOfDomainName":{ + "type":"list", + "member":{"shape":"DomainName"} + }, + "ListOfModel":{ + "type":"list", + "member":{"shape":"Model"} + }, + "ListOfPatchOperation":{ + "type":"list", + "member":{"shape":"PatchOperation"} + }, + "ListOfResource":{ + "type":"list", + "member":{"shape":"Resource"} + }, + "ListOfRestApi":{ + "type":"list", + "member":{"shape":"RestApi"} + }, + "ListOfStage":{ + "type":"list", + "member":{"shape":"Stage"} + }, + "ListOfStageKeys":{ + "type":"list", + "member":{"shape":"StageKey"} + }, + "ListOfString":{ + "type":"list", + "member":{"shape":"String"} + }, + "Long":{"type":"long"}, + "MapOfHeaderValues":{ + "type":"map", + "key":{"shape":"String"}, + "value":{"shape":"String"} + }, + "MapOfIntegrationResponse":{ + "type":"map", + "key":{"shape":"String"}, + "value":{"shape":"IntegrationResponse"} + }, + "MapOfMethod":{ + "type":"map", + "key":{"shape":"String"}, + "value":{"shape":"Method"} + }, + "MapOfMethodResponse":{ + "type":"map", + "key":{"shape":"String"}, + "value":{"shape":"MethodResponse"} + }, + "MapOfMethodSettings":{ + "type":"map", + "key":{"shape":"String"}, + "value":{"shape":"MethodSetting"} + }, + "MapOfMethodSnapshot":{ + "type":"map", + "key":{"shape":"String"}, + "value":{"shape":"MethodSnapshot"} + }, + "MapOfStringToBoolean":{ + "type":"map", + "key":{"shape":"String"}, + "value":{"shape":"NullableBoolean"} + }, + "MapOfStringToList":{ + "type":"map", + "key":{"shape":"String"}, + "value":{"shape":"ListOfString"} + }, + "MapOfStringToString":{ + "type":"map", + "key":{"shape":"String"}, + "value":{"shape":"String"} + }, + "Method":{ + "type":"structure", + "members":{ + "httpMethod":{"shape":"String"}, + "authorizationType":{"shape":"String"}, + "authorizerId":{"shape":"String"}, + "apiKeyRequired":{"shape":"NullableBoolean"}, + "requestParameters":{"shape":"MapOfStringToBoolean"}, + "requestModels":{"shape":"MapOfStringToString"}, + "methodResponses":{"shape":"MapOfMethodResponse"}, + "methodIntegration":{"shape":"Integration"} + } + }, + "MethodResponse":{ + "type":"structure", + "members":{ + "statusCode":{"shape":"StatusCode"}, + "responseParameters":{"shape":"MapOfStringToBoolean"}, + "responseModels":{"shape":"MapOfStringToString"} + } + }, + "MethodSetting":{ + "type":"structure", + "members":{ + "metricsEnabled":{"shape":"Boolean"}, + "loggingLevel":{"shape":"String"}, + "dataTraceEnabled":{"shape":"Boolean"}, + "throttlingBurstLimit":{"shape":"Integer"}, + "throttlingRateLimit":{"shape":"Double"}, + "cachingEnabled":{"shape":"Boolean"}, + "cacheTtlInSeconds":{"shape":"Integer"}, + "cacheDataEncrypted":{"shape":"Boolean"}, + "requireAuthorizationForCacheControl":{"shape":"Boolean"}, + "unauthorizedCacheControlHeaderStrategy":{"shape":"UnauthorizedCacheControlHeaderStrategy"} + } + }, + "MethodSnapshot":{ + "type":"structure", + "members":{ + "authorizationType":{"shape":"String"}, + "apiKeyRequired":{"shape":"Boolean"} + } + }, + "Model":{ + "type":"structure", + "members":{ + "id":{"shape":"String"}, + "name":{"shape":"String"}, + "description":{"shape":"String"}, + "schema":{"shape":"String"}, + "contentType":{"shape":"String"} + } + }, + "Models":{ + "type":"structure", + "members":{ + "position":{"shape":"String"}, + "items":{ + "shape":"ListOfModel", + "locationName":"item" + } + } + }, + "NotFoundException":{ + "type":"structure", + "members":{ + "message":{"shape":"String"} + }, + "error":{"httpStatusCode":404}, + "exception":true + }, + "NullableBoolean":{"type":"boolean"}, + "NullableInteger":{"type":"integer"}, + "PatchOperation":{ + "type":"structure", + "members":{ + "op":{"shape":"op"}, + "path":{"shape":"String"}, + "value":{"shape":"String"}, + "from":{"shape":"String"} + } + }, + "PathToMapOfMethodSnapshot":{ + "type":"map", + "key":{"shape":"String"}, + "value":{"shape":"MapOfMethodSnapshot"} + }, + "PutIntegrationRequest":{ + "type":"structure", + "required":[ + "restApiId", + "resourceId", + "httpMethod", + "type" + ], + "members":{ + "restApiId":{ + "shape":"String", + "location":"uri", + "locationName":"restapi_id" + }, + "resourceId":{ + "shape":"String", + "location":"uri", + "locationName":"resource_id" + }, + "httpMethod":{ + "shape":"String", + "location":"uri", + "locationName":"http_method" + }, + "type":{"shape":"IntegrationType"}, + "integrationHttpMethod":{ + "shape":"String", + "locationName":"httpMethod" + }, + "uri":{"shape":"String"}, + "credentials":{"shape":"String"}, + "requestParameters":{"shape":"MapOfStringToString"}, + "requestTemplates":{"shape":"MapOfStringToString"}, + "passthroughBehavior":{"shape":"String"}, + "cacheNamespace":{"shape":"String"}, + "cacheKeyParameters":{"shape":"ListOfString"} + } + }, + "PutIntegrationResponseRequest":{ + "type":"structure", + "required":[ + "restApiId", + "resourceId", + "httpMethod", + "statusCode" + ], + "members":{ + "restApiId":{ + "shape":"String", + "location":"uri", + "locationName":"restapi_id" + }, + "resourceId":{ + "shape":"String", + "location":"uri", + "locationName":"resource_id" + }, + "httpMethod":{ + "shape":"String", + "location":"uri", + "locationName":"http_method" + }, + "statusCode":{ + "shape":"StatusCode", + "location":"uri", + "locationName":"status_code" + }, + "selectionPattern":{"shape":"String"}, + "responseParameters":{"shape":"MapOfStringToString"}, + "responseTemplates":{"shape":"MapOfStringToString"} + } + }, + "PutMethodRequest":{ + "type":"structure", + "required":[ + "restApiId", + "resourceId", + "httpMethod", + "authorizationType" + ], + "members":{ + "restApiId":{ + "shape":"String", + "location":"uri", + "locationName":"restapi_id" + }, + "resourceId":{ + "shape":"String", + "location":"uri", + "locationName":"resource_id" + }, + "httpMethod":{ + "shape":"String", + "location":"uri", + "locationName":"http_method" + }, + "authorizationType":{"shape":"String"}, + "authorizerId":{"shape":"String"}, + "apiKeyRequired":{"shape":"Boolean"}, + "requestParameters":{"shape":"MapOfStringToBoolean"}, + "requestModels":{"shape":"MapOfStringToString"} + } + }, + "PutMethodResponseRequest":{ + "type":"structure", + "required":[ + "restApiId", + "resourceId", + "httpMethod", + "statusCode" + ], + "members":{ + "restApiId":{ + "shape":"String", + "location":"uri", + "locationName":"restapi_id" + }, + "resourceId":{ + "shape":"String", + "location":"uri", + "locationName":"resource_id" + }, + "httpMethod":{ + "shape":"String", + "location":"uri", + "locationName":"http_method" + }, + "statusCode":{ + "shape":"StatusCode", + "location":"uri", + "locationName":"status_code" + }, + "responseParameters":{"shape":"MapOfStringToBoolean"}, + "responseModels":{"shape":"MapOfStringToString"} + } + }, + "PutMode":{ + "type":"string", + "enum":[ + "merge", + "overwrite" + ] + }, + "PutRestApiRequest":{ + "type":"structure", + "required":[ + "restApiId", + "body" + ], + "members":{ + "restApiId":{ + "shape":"String", + "location":"uri", + "locationName":"restapi_id" + }, + "mode":{ + "shape":"PutMode", + "location":"querystring", + "locationName":"mode" + }, + "failOnWarnings":{ + "shape":"Boolean", + "location":"querystring", + "locationName":"failonwarnings" + }, + "parameters":{ + "shape":"MapOfStringToString", + "location":"querystring" + }, + "body":{"shape":"Blob"} + }, + "payload":"body" + }, + "Resource":{ + "type":"structure", + "members":{ + "id":{"shape":"String"}, + "parentId":{"shape":"String"}, + "pathPart":{"shape":"String"}, + "path":{"shape":"String"}, + "resourceMethods":{"shape":"MapOfMethod"} + } + }, + "Resources":{ + "type":"structure", + "members":{ + "position":{"shape":"String"}, + "items":{ + "shape":"ListOfResource", + "locationName":"item" + } + } + }, + "RestApi":{ + "type":"structure", + "members":{ + "id":{"shape":"String"}, + "name":{"shape":"String"}, + "description":{"shape":"String"}, + "createdDate":{"shape":"Timestamp"}, + "warnings":{"shape":"ListOfString"} + } + }, + "RestApis":{ + "type":"structure", + "members":{ + "position":{"shape":"String"}, + "items":{ + "shape":"ListOfRestApi", + "locationName":"item" + } + } + }, + "SdkResponse":{ + "type":"structure", + "members":{ + "contentType":{ + "shape":"String", + "location":"header", + "locationName":"Content-Type" + }, + "contentDisposition":{ + "shape":"String", + "location":"header", + "locationName":"Content-Disposition" + }, + "body":{"shape":"Blob"} + }, + "payload":"body" + }, + "ServiceUnavailableException":{ + "type":"structure", + "members":{ + "retryAfterSeconds":{ + "shape":"String", + "location":"header", + "locationName":"Retry-After" + }, + "message":{"shape":"String"} + }, + "error":{"httpStatusCode":503}, + "exception":true, + "fault":true + }, + "Stage":{ + "type":"structure", + "members":{ + "deploymentId":{"shape":"String"}, + "clientCertificateId":{"shape":"String"}, + "stageName":{"shape":"String"}, + "description":{"shape":"String"}, + "cacheClusterEnabled":{"shape":"Boolean"}, + "cacheClusterSize":{"shape":"CacheClusterSize"}, + "cacheClusterStatus":{"shape":"CacheClusterStatus"}, + "methodSettings":{"shape":"MapOfMethodSettings"}, + "variables":{"shape":"MapOfStringToString"}, + "createdDate":{"shape":"Timestamp"}, + "lastUpdatedDate":{"shape":"Timestamp"} + } + }, + "StageKey":{ + "type":"structure", + "members":{ + "restApiId":{"shape":"String"}, + "stageName":{"shape":"String"} + } + }, + "Stages":{ + "type":"structure", + "members":{ + "item":{"shape":"ListOfStage"} + } + }, + "StatusCode":{ + "type":"string", + "pattern":"[1-5]\\d\\d" + }, + "String":{"type":"string"}, + "Template":{ + "type":"structure", + "members":{ + "value":{"shape":"String"} + } + }, + "TestInvokeAuthorizerRequest":{ + "type":"structure", + "required":[ + "restApiId", + "authorizerId" + ], + "members":{ + "restApiId":{ + "shape":"String", + "location":"uri", + "locationName":"restapi_id" + }, + "authorizerId":{ + "shape":"String", + "location":"uri", + "locationName":"authorizer_id" + }, + "headers":{"shape":"MapOfHeaderValues"}, + "pathWithQueryString":{"shape":"String"}, + "body":{"shape":"String"}, + "stageVariables":{"shape":"MapOfStringToString"}, + "additionalContext":{"shape":"MapOfStringToString"} + } + }, + "TestInvokeAuthorizerResponse":{ + "type":"structure", + "members":{ + "clientStatus":{"shape":"Integer"}, + "log":{"shape":"String"}, + "latency":{"shape":"Long"}, + "principalId":{"shape":"String"}, + "policy":{"shape":"String"}, + "authorization":{"shape":"MapOfStringToList"} + } + }, + "TestInvokeMethodRequest":{ + "type":"structure", + "required":[ + "restApiId", + "resourceId", + "httpMethod" + ], + "members":{ + "restApiId":{ + "shape":"String", + "location":"uri", + "locationName":"restapi_id" + }, + "resourceId":{ + "shape":"String", + "location":"uri", + "locationName":"resource_id" + }, + "httpMethod":{ + "shape":"String", + "location":"uri", + "locationName":"http_method" + }, + "pathWithQueryString":{"shape":"String"}, + "body":{"shape":"String"}, + "headers":{"shape":"MapOfHeaderValues"}, + "clientCertificateId":{"shape":"String"}, + "stageVariables":{"shape":"MapOfStringToString"} + } + }, + "TestInvokeMethodResponse":{ + "type":"structure", + "members":{ + "status":{"shape":"Integer"}, + "body":{"shape":"String"}, + "headers":{"shape":"MapOfHeaderValues"}, + "log":{"shape":"String"}, + "latency":{"shape":"Long"} + } + }, + "ThrottleSettings":{ + "type":"structure", + "members":{ + "burstLimit":{"shape":"Integer"}, + "rateLimit":{"shape":"Double"} + } + }, + "Timestamp":{"type":"timestamp"}, + "TooManyRequestsException":{ + "type":"structure", + "members":{ + "retryAfterSeconds":{ + "shape":"String", + "location":"header", + "locationName":"Retry-After" + }, + "message":{"shape":"String"} + }, + "error":{"httpStatusCode":429}, + "exception":true + }, + "UnauthorizedCacheControlHeaderStrategy":{ + "type":"string", + "enum":[ + "FAIL_WITH_403", + "SUCCEED_WITH_RESPONSE_HEADER", + "SUCCEED_WITHOUT_RESPONSE_HEADER" + ] + }, + "UnauthorizedException":{ + "type":"structure", + "members":{ + "message":{"shape":"String"} + }, + "error":{"httpStatusCode":401}, + "exception":true + }, + "UpdateAccountRequest":{ + "type":"structure", + "members":{ + "patchOperations":{"shape":"ListOfPatchOperation"} + } + }, + "UpdateApiKeyRequest":{ + "type":"structure", + "required":["apiKey"], + "members":{ + "apiKey":{ + "shape":"String", + "location":"uri", + "locationName":"api_Key" + }, + "patchOperations":{"shape":"ListOfPatchOperation"} + } + }, + "UpdateAuthorizerRequest":{ + "type":"structure", + "required":[ + "restApiId", + "authorizerId" + ], + "members":{ + "restApiId":{ + "shape":"String", + "location":"uri", + "locationName":"restapi_id" + }, + "authorizerId":{ + "shape":"String", + "location":"uri", + "locationName":"authorizer_id" + }, + "patchOperations":{"shape":"ListOfPatchOperation"} + } + }, + "UpdateBasePathMappingRequest":{ + "type":"structure", + "required":[ + "domainName", + "basePath" + ], + "members":{ + "domainName":{ + "shape":"String", + "location":"uri", + "locationName":"domain_name" + }, + "basePath":{ + "shape":"String", + "location":"uri", + "locationName":"base_path" + }, + "patchOperations":{"shape":"ListOfPatchOperation"} + } + }, + "UpdateClientCertificateRequest":{ + "type":"structure", + "required":["clientCertificateId"], + "members":{ + "clientCertificateId":{ + "shape":"String", + "location":"uri", + "locationName":"clientcertificate_id" + }, + "patchOperations":{"shape":"ListOfPatchOperation"} + } + }, + "UpdateDeploymentRequest":{ + "type":"structure", + "required":[ + "restApiId", + "deploymentId" + ], + "members":{ + "restApiId":{ + "shape":"String", + "location":"uri", + "locationName":"restapi_id" + }, + "deploymentId":{ + "shape":"String", + "location":"uri", + "locationName":"deployment_id" + }, + "patchOperations":{"shape":"ListOfPatchOperation"} + } + }, + "UpdateDomainNameRequest":{ + "type":"structure", + "required":["domainName"], + "members":{ + "domainName":{ + "shape":"String", + "location":"uri", + "locationName":"domain_name" + }, + "patchOperations":{"shape":"ListOfPatchOperation"} + } + }, + "UpdateIntegrationRequest":{ + "type":"structure", + "required":[ + "restApiId", + "resourceId", + "httpMethod" + ], + "members":{ + "restApiId":{ + "shape":"String", + "location":"uri", + "locationName":"restapi_id" + }, + "resourceId":{ + "shape":"String", + "location":"uri", + "locationName":"resource_id" + }, + "httpMethod":{ + "shape":"String", + "location":"uri", + "locationName":"http_method" + }, + "patchOperations":{"shape":"ListOfPatchOperation"} + } + }, + "UpdateIntegrationResponseRequest":{ + "type":"structure", + "required":[ + "restApiId", + "resourceId", + "httpMethod", + "statusCode" + ], + "members":{ + "restApiId":{ + "shape":"String", + "location":"uri", + "locationName":"restapi_id" + }, + "resourceId":{ + "shape":"String", + "location":"uri", + "locationName":"resource_id" + }, + "httpMethod":{ + "shape":"String", + "location":"uri", + "locationName":"http_method" + }, + "statusCode":{ + "shape":"StatusCode", + "location":"uri", + "locationName":"status_code" + }, + "patchOperations":{"shape":"ListOfPatchOperation"} + } + }, + "UpdateMethodRequest":{ + "type":"structure", + "required":[ + "restApiId", + "resourceId", + "httpMethod" + ], + "members":{ + "restApiId":{ + "shape":"String", + "location":"uri", + "locationName":"restapi_id" + }, + "resourceId":{ + "shape":"String", + "location":"uri", + "locationName":"resource_id" + }, + "httpMethod":{ + "shape":"String", + "location":"uri", + "locationName":"http_method" + }, + "patchOperations":{"shape":"ListOfPatchOperation"} + } + }, + "UpdateMethodResponseRequest":{ + "type":"structure", + "required":[ + "restApiId", + "resourceId", + "httpMethod", + "statusCode" + ], + "members":{ + "restApiId":{ + "shape":"String", + "location":"uri", + "locationName":"restapi_id" + }, + "resourceId":{ + "shape":"String", + "location":"uri", + "locationName":"resource_id" + }, + "httpMethod":{ + "shape":"String", + "location":"uri", + "locationName":"http_method" + }, + "statusCode":{ + "shape":"StatusCode", + "location":"uri", + "locationName":"status_code" + }, + "patchOperations":{"shape":"ListOfPatchOperation"} + } + }, + "UpdateModelRequest":{ + "type":"structure", + "required":[ + "restApiId", + "modelName" + ], + "members":{ + "restApiId":{ + "shape":"String", + "location":"uri", + "locationName":"restapi_id" + }, + "modelName":{ + "shape":"String", + "location":"uri", + "locationName":"model_name" + }, + "patchOperations":{"shape":"ListOfPatchOperation"} + } + }, + "UpdateResourceRequest":{ + "type":"structure", + "required":[ + "restApiId", + "resourceId" + ], + "members":{ + "restApiId":{ + "shape":"String", + "location":"uri", + "locationName":"restapi_id" + }, + "resourceId":{ + "shape":"String", + "location":"uri", + "locationName":"resource_id" + }, + "patchOperations":{"shape":"ListOfPatchOperation"} + } + }, + "UpdateRestApiRequest":{ + "type":"structure", + "required":["restApiId"], + "members":{ + "restApiId":{ + "shape":"String", + "location":"uri", + "locationName":"restapi_id" + }, + "patchOperations":{"shape":"ListOfPatchOperation"} + } + }, + "UpdateStageRequest":{ + "type":"structure", + "required":[ + "restApiId", + "stageName" + ], + "members":{ + "restApiId":{ + "shape":"String", + "location":"uri", + "locationName":"restapi_id" + }, + "stageName":{ + "shape":"String", + "location":"uri", + "locationName":"stage_name" + }, + "patchOperations":{"shape":"ListOfPatchOperation"} + } + }, + "op":{ + "type":"string", + "enum":[ + "add", + "remove", + "replace", + "move", + "copy", + "test" + ] + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/apigateway/2015-07-09/docs-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/apigateway/2015-07-09/docs-2.json new file mode 100644 index 000000000..41e2dbd50 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/apigateway/2015-07-09/docs-2.json @@ -0,0 +1,1315 @@ +{ + "version": "2.0", + "service": "Amazon API Gateway

    Amazon API Gateway helps developers deliver robust, secure and scalable mobile and web application backends. Amazon API Gateway allows developers to securely connect mobile and web applications to APIs that run on AWS Lambda, Amazon EC2, or other publicly addressable web services that are hosted outside of AWS.

    ", + "operations": { + "CreateApiKey": "

    Create an ApiKey resource.

    ", + "CreateAuthorizer": "

    Adds a new Authorizer resource to an existing RestApi resource.

    ", + "CreateBasePathMapping": "

    Creates a new BasePathMapping resource.

    ", + "CreateDeployment": "

    Creates a Deployment resource, which makes a specified RestApi callable over the internet.

    ", + "CreateDomainName": "

    Creates a new domain name.

    ", + "CreateModel": "

    Adds a new Model resource to an existing RestApi resource.

    ", + "CreateResource": "

    Creates a Resource resource.

    ", + "CreateRestApi": "

    Creates a new RestApi resource.

    ", + "CreateStage": "

    Creates a new Stage resource that references a pre-existing Deployment for the API.

    ", + "DeleteApiKey": "

    Deletes the ApiKey resource.

    ", + "DeleteAuthorizer": "

    Deletes an existing Authorizer resource.

    ", + "DeleteBasePathMapping": "

    Deletes the BasePathMapping resource.

    ", + "DeleteClientCertificate": "

    Deletes the ClientCertificate resource.

    ", + "DeleteDeployment": "

    Deletes a Deployment resource. Deleting a deployment will only succeed if there are no Stage resources associated with it.

    ", + "DeleteDomainName": "

    Deletes the DomainName resource.

    ", + "DeleteIntegration": "

    Represents a delete integration.

    ", + "DeleteIntegrationResponse": "

    Represents a delete integration response.

    ", + "DeleteMethod": "

    Deletes an existing Method resource.

    ", + "DeleteMethodResponse": "

    Deletes an existing MethodResponse resource.

    ", + "DeleteModel": "

    Deletes a model.

    ", + "DeleteResource": "

    Deletes a Resource resource.

    ", + "DeleteRestApi": "

    Deletes the specified API.

    ", + "DeleteStage": "

    Deletes a Stage resource.

    ", + "FlushStageAuthorizersCache": "

    Flushes all authorizer cache entries on a stage.

    ", + "FlushStageCache": "

    Flushes a stage's cache.

    ", + "GenerateClientCertificate": "

    Generates a ClientCertificate resource.

    ", + "GetAccount": "

    Gets information about the current Account resource.

    ", + "GetApiKey": "

    Gets information about the current ApiKey resource.

    ", + "GetApiKeys": "

    Gets information about the current ApiKeys resource.

    ", + "GetAuthorizer": "

    Describe an existing Authorizer resource.

    ", + "GetAuthorizers": "

    Describe an existing Authorizers resource.

    ", + "GetBasePathMapping": "

    Describe a BasePathMapping resource.

    ", + "GetBasePathMappings": "

    Represents a collection of BasePathMapping resources.

    ", + "GetClientCertificate": "

    Gets information about the current ClientCertificate resource.

    ", + "GetClientCertificates": "

    Gets a collection of ClientCertificate resources.

    ", + "GetDeployment": "

    Gets information about a Deployment resource.

    ", + "GetDeployments": "

    Gets information about a Deployments collection.

    ", + "GetDomainName": "

    Represents a domain name that is contained in a simpler, more intuitive URL that can be called.

    ", + "GetDomainNames": "

    Represents a collection of DomainName resources.

    ", + "GetExport": "

    Exports a deployed version of a RestApi in a specified format.

    ", + "GetIntegration": "

    Represents a get integration.

    ", + "GetIntegrationResponse": "

    Represents a get integration response.

    ", + "GetMethod": "

    Describe an existing Method resource.

    ", + "GetMethodResponse": "

    Describes a MethodResponse resource.

    ", + "GetModel": "

    Describes an existing model defined for a RestApi resource.

    ", + "GetModelTemplate": "

    Generates a sample mapping template that can be used to transform a payload into the structure of a model.

    ", + "GetModels": "

    Describes existing Models defined for a RestApi resource.

    ", + "GetResource": "

    Lists information about a resource.

    ", + "GetResources": "

    Lists information about a collection of Resource resources.

    ", + "GetRestApi": "

    Lists the RestApi resource in the collection.

    ", + "GetRestApis": "

    Lists the RestApis resources for your collection.

    ", + "GetSdk": "

    Generates a client SDK for a RestApi and Stage.

    ", + "GetStage": "

    Gets information about a Stage resource.

    ", + "GetStages": "

    Gets information about one or more Stage resources.

    ", + "ImportRestApi": "

    A feature of the Amazon API Gateway control service for creating a new API from an external API definition file.

    ", + "PutIntegration": "

    Represents a put integration.

    ", + "PutIntegrationResponse": "

    Represents a put integration.

    ", + "PutMethod": "

    Add a method to an existing Resource resource.

    ", + "PutMethodResponse": "

    Adds a MethodResponse to an existing Method resource.

    ", + "PutRestApi": "

    A feature of the Amazon API Gateway control service for updating an existing API with an input of external API definitions. The update can take the form of merging the supplied definition into the existing API or overwriting the existing API.

    ", + "TestInvokeAuthorizer": "

    Simulate the execution of an Authorizer in your RestApi with headers, parameters, and an incoming request body.

    ", + "TestInvokeMethod": "

    Simulate the execution of a Method in your RestApi with headers, parameters, and an incoming request body.

    ", + "UpdateAccount": "

    Changes information about the current Account resource.

    ", + "UpdateApiKey": "

    Changes information about an ApiKey resource.

    ", + "UpdateAuthorizer": "

    Updates an existing Authorizer resource.

    ", + "UpdateBasePathMapping": "

    Changes information about the BasePathMapping resource.

    ", + "UpdateClientCertificate": "

    Changes information about an ClientCertificate resource.

    ", + "UpdateDeployment": "

    Changes information about a Deployment resource.

    ", + "UpdateDomainName": "

    Changes information about the DomainName resource.

    ", + "UpdateIntegration": "

    Represents an update integration.

    ", + "UpdateIntegrationResponse": "

    Represents an update integration response.

    ", + "UpdateMethod": "

    Updates an existing Method resource.

    ", + "UpdateMethodResponse": "

    Updates an existing MethodResponse resource.

    ", + "UpdateModel": "

    Changes information about a model.

    ", + "UpdateResource": "

    Changes information about a Resource resource.

    ", + "UpdateRestApi": "

    Changes information about the specified API.

    ", + "UpdateStage": "

    Changes information about a Stage resource.

    " + }, + "shapes": { + "Account": { + "base": "

    Represents an AWS account that is associated with Amazon API Gateway.

    ", + "refs": { + } + }, + "ApiKey": { + "base": "

    A resource that can be distributed to callers for executing Method resources that require an API key. API keys can be mapped to any Stage on any RestApi, which indicates that the callers with the API key can make requests to that stage.

    ", + "refs": { + "ListOfApiKey$member": null + } + }, + "ApiKeys": { + "base": "

    Represents a collection of ApiKey resources.

    ", + "refs": { + } + }, + "Authorizer": { + "base": "

    Represents an authorization layer for methods. If enabled on a method, API Gateway will activate the authorizer when a client calls the method.

    ", + "refs": { + "ListOfAuthorizer$member": null + } + }, + "AuthorizerType": { + "base": "

    The authorizer type. Only current value is TOKEN.

    ", + "refs": { + "Authorizer$type": "

    [Required] The type of the authorizer. Currently, the only valid type is TOKEN.

    ", + "CreateAuthorizerRequest$type": "

    [Required] The type of the authorizer.

    " + } + }, + "Authorizers": { + "base": "

    Represents a collection of Authorizer resources.

    ", + "refs": { + } + }, + "BadRequestException": { + "base": null, + "refs": { + } + }, + "BasePathMapping": { + "base": "

    Represents the base path that callers of the API that must provide as part of the URL after the domain name.

    ", + "refs": { + "ListOfBasePathMapping$member": null + } + }, + "BasePathMappings": { + "base": "

    Represents a collection of BasePathMapping resources.

    ", + "refs": { + } + }, + "Blob": { + "base": null, + "refs": { + "ExportResponse$body": "

    The binary blob response to GetExport, which contains the export.

    ", + "ImportRestApiRequest$body": "

    The POST request body containing external API definitions. Currently, only Swagger definition JSON files are supported.

    ", + "PutRestApiRequest$body": "

    The PUT request body containing external API definitions. Currently, only Swagger definition JSON files are supported.

    ", + "SdkResponse$body": "

    The binary blob response to GetSdk, which contains the generated SDK.

    " + } + }, + "Boolean": { + "base": null, + "refs": { + "ApiKey$enabled": "

    Specifies whether the API Key can be used by callers.

    ", + "CreateApiKeyRequest$enabled": "

    Specifies whether the ApiKey can be used by callers.

    ", + "CreateStageRequest$cacheClusterEnabled": "

    Whether cache clustering is enabled for the stage.

    ", + "GetModelRequest$flatten": "

    Resolves all external model references and returns a flattened model schema.

    ", + "ImportRestApiRequest$failOnWarnings": "

    A query parameter to indicate whether to rollback the API creation (true) or not (false) when a warning is encountered. The default value is false.

    ", + "MethodSetting$metricsEnabled": "

    Specifies whether Amazon CloudWatch metrics are enabled for this method. The PATCH path for this setting is /{method_setting_key}/metrics/enabled, and the value is a Boolean.

    ", + "MethodSetting$dataTraceEnabled": "

    Specifies the whether data trace logging is enabled for this method, which effects the log entries pushed to Amazon CloudWatch Logs. The PATCH path for this setting is /{method_setting_key}/logging/dataTrace, and the value is a Boolean.

    ", + "MethodSetting$cachingEnabled": "

    Specifies whether responses should be cached and returned for requests. A cache cluster must be enabled on the stage for responses to be cached. The PATCH path for this setting is /{method_setting_key}/caching/enabled, and the value is a Boolean.

    ", + "MethodSetting$cacheDataEncrypted": "

    Specifies whether the cached responses are encrypted. The PATCH path for this setting is /{method_setting_key}/caching/dataEncrypted, and the value is a Boolean.

    ", + "MethodSetting$requireAuthorizationForCacheControl": "

    Specifies whether authorization is required for a cache invalidation request. The PATCH path for this setting is /{method_setting_key}/caching/requireAuthorizationForCacheControl, and the value is a Boolean.

    ", + "MethodSnapshot$apiKeyRequired": "

    Specifies whether the method requires a valid ApiKey.

    ", + "PutMethodRequest$apiKeyRequired": "

    Specifies whether the method required a valid ApiKey.

    ", + "PutRestApiRequest$failOnWarnings": "

    A query parameter to indicate whether to rollback the API update (true) or not (false) when a warning is encountered. The default value is false.

    ", + "Stage$cacheClusterEnabled": "

    Specifies whether a cache cluster is enabled for the stage.

    " + } + }, + "CacheClusterSize": { + "base": "

    Returns the size of the CacheCluster.

    ", + "refs": { + "CreateDeploymentRequest$cacheClusterSize": "

    Specifies the cache cluster size for the Stage resource specified in the input, if a cache cluster is enabled.

    ", + "CreateStageRequest$cacheClusterSize": "

    The stage's cache cluster size.

    ", + "Stage$cacheClusterSize": "

    The size of the cache cluster for the stage, if enabled.

    " + } + }, + "CacheClusterStatus": { + "base": "

    Returns the status of the CacheCluster.

    ", + "refs": { + "Stage$cacheClusterStatus": "

    The status of the cache cluster for the stage, if enabled.

    " + } + }, + "ClientCertificate": { + "base": "

    Represents a Client Certificate used to configure client-side SSL authentication while sending requests to the integration endpoint.

    ", + "refs": { + "ListOfClientCertificate$member": null + } + }, + "ClientCertificates": { + "base": "

    Represents a collection of ClientCertificate resources.

    ", + "refs": { + } + }, + "ConflictException": { + "base": null, + "refs": { + } + }, + "CreateApiKeyRequest": { + "base": "

    Request to create an ApiKey resource.

    ", + "refs": { + } + }, + "CreateAuthorizerRequest": { + "base": "

    Request to add a new Authorizer to an existing RestApi resource.

    ", + "refs": { + } + }, + "CreateBasePathMappingRequest": { + "base": "

    Requests Amazon API Gateway to create a new BasePathMapping resource.

    ", + "refs": { + } + }, + "CreateDeploymentRequest": { + "base": "

    Requests Amazon API Gateway to create a Deployment resource.

    ", + "refs": { + } + }, + "CreateDomainNameRequest": { + "base": "

    A request to create a new domain name.

    ", + "refs": { + } + }, + "CreateModelRequest": { + "base": "

    Request to add a new Model to an existing RestApi resource.

    ", + "refs": { + } + }, + "CreateResourceRequest": { + "base": "

    Requests Amazon API Gateway to create a Resource resource.

    ", + "refs": { + } + }, + "CreateRestApiRequest": { + "base": "

    The POST Request to add a new RestApi resource to your collection.

    ", + "refs": { + } + }, + "CreateStageRequest": { + "base": "

    Requests Amazon API Gateway to create a Stage resource.

    ", + "refs": { + } + }, + "DeleteApiKeyRequest": { + "base": "

    A request to delete the ApiKey resource.

    ", + "refs": { + } + }, + "DeleteAuthorizerRequest": { + "base": "

    Request to delete an existing Authorizer resource.

    ", + "refs": { + } + }, + "DeleteBasePathMappingRequest": { + "base": "

    A request to delete the BasePathMapping resource.

    ", + "refs": { + } + }, + "DeleteClientCertificateRequest": { + "base": "

    A request to delete the ClientCertificate resource.

    ", + "refs": { + } + }, + "DeleteDeploymentRequest": { + "base": "

    Requests Amazon API Gateway to delete a Deployment resource.

    ", + "refs": { + } + }, + "DeleteDomainNameRequest": { + "base": "

    A request to delete the DomainName resource.

    ", + "refs": { + } + }, + "DeleteIntegrationRequest": { + "base": "

    Represents a delete integration request.

    ", + "refs": { + } + }, + "DeleteIntegrationResponseRequest": { + "base": "

    Represents a delete integration response request.

    ", + "refs": { + } + }, + "DeleteMethodRequest": { + "base": "

    Request to delete an existing Method resource.

    ", + "refs": { + } + }, + "DeleteMethodResponseRequest": { + "base": "

    A request to delete an existing MethodResponse resource.

    ", + "refs": { + } + }, + "DeleteModelRequest": { + "base": "

    Request to delete an existing model in an existing RestApi resource.

    ", + "refs": { + } + }, + "DeleteResourceRequest": { + "base": "

    Request to delete a Resource.

    ", + "refs": { + } + }, + "DeleteRestApiRequest": { + "base": "

    Request to delete the specified API from your collection.

    ", + "refs": { + } + }, + "DeleteStageRequest": { + "base": "

    Requests Amazon API Gateway to delete a Stage resource.

    ", + "refs": { + } + }, + "Deployment": { + "base": "

    An immutable representation of a RestApi resource that can be called by users using Stages. A deployment must be associated with a Stage for it to be callable over the Internet.

    ", + "refs": { + "ListOfDeployment$member": null + } + }, + "Deployments": { + "base": "

    Represents a collection resource that contains zero or more references to your existing deployments, and links that guide you on ways to interact with your collection. The collection offers a paginated view of the contained deployments.

    ", + "refs": { + } + }, + "DomainName": { + "base": "

    Represents a domain name that is contained in a simpler, more intuitive URL that can be called.

    ", + "refs": { + "ListOfDomainName$member": null + } + }, + "DomainNames": { + "base": "

    Represents a collection of DomainName resources.

    ", + "refs": { + } + }, + "Double": { + "base": null, + "refs": { + "MethodSetting$throttlingRateLimit": "

    Specifies the throttling rate limit. The PATCH path for this setting is /{method_setting_key}/throttling/rateLimit, and the value is a double.

    ", + "ThrottleSettings$rateLimit": "

    Returns the rateLimit when ThrottleSettings is called.

    " + } + }, + "ExportResponse": { + "base": "

    The binary blob response to GetExport, which contains the generated SDK.

    ", + "refs": { + } + }, + "FlushStageAuthorizersCacheRequest": { + "base": "

    Request to flush authorizer cache entries on a specified stage.

    ", + "refs": { + } + }, + "FlushStageCacheRequest": { + "base": "

    Requests Amazon API Gateway to flush a stage's cache.

    ", + "refs": { + } + }, + "GenerateClientCertificateRequest": { + "base": "

    A request to generate a ClientCertificate resource.

    ", + "refs": { + } + }, + "GetAccountRequest": { + "base": "

    Requests Amazon API Gateway to get information about the current Account resource.

    ", + "refs": { + } + }, + "GetApiKeyRequest": { + "base": "

    A request to get information about the current ApiKey resource.

    ", + "refs": { + } + }, + "GetApiKeysRequest": { + "base": "

    A request to get information about the current ApiKeys resource.

    ", + "refs": { + } + }, + "GetAuthorizerRequest": { + "base": "

    Request to describe an existing Authorizer resource.

    ", + "refs": { + } + }, + "GetAuthorizersRequest": { + "base": "

    Request to describe an existing Authorizers resource.

    ", + "refs": { + } + }, + "GetBasePathMappingRequest": { + "base": "

    Request to describe a BasePathMapping resource.

    ", + "refs": { + } + }, + "GetBasePathMappingsRequest": { + "base": "

    A request to get information about a collection of BasePathMapping resources.

    ", + "refs": { + } + }, + "GetClientCertificateRequest": { + "base": "

    A request to get information about the current ClientCertificate resource.

    ", + "refs": { + } + }, + "GetClientCertificatesRequest": { + "base": "

    A request to get information about a collection of ClientCertificate resources.

    ", + "refs": { + } + }, + "GetDeploymentRequest": { + "base": "

    Requests Amazon API Gateway to get information about a Deployment resource.

    ", + "refs": { + } + }, + "GetDeploymentsRequest": { + "base": "

    Requests Amazon API Gateway to get information about a Deployments collection.

    ", + "refs": { + } + }, + "GetDomainNameRequest": { + "base": "

    Request to get the name of a DomainName resource.

    ", + "refs": { + } + }, + "GetDomainNamesRequest": { + "base": "

    Request to describe a collection of DomainName resources.

    ", + "refs": { + } + }, + "GetExportRequest": { + "base": "

    Request a new export of a RestApi for a particular Stage.

    ", + "refs": { + } + }, + "GetIntegrationRequest": { + "base": "

    Represents a get integration request.

    ", + "refs": { + } + }, + "GetIntegrationResponseRequest": { + "base": "

    Represents a get integration response request.

    ", + "refs": { + } + }, + "GetMethodRequest": { + "base": "

    Request to describe an existing Method resource.

    ", + "refs": { + } + }, + "GetMethodResponseRequest": { + "base": "

    Request to describe a MethodResponse resource.

    ", + "refs": { + } + }, + "GetModelRequest": { + "base": "

    Request to list information about a model in an existing RestApi resource.

    ", + "refs": { + } + }, + "GetModelTemplateRequest": { + "base": "

    Request to generate a sample mapping template used to transform the payload.

    ", + "refs": { + } + }, + "GetModelsRequest": { + "base": "

    Request to list existing Models defined for a RestApi resource.

    ", + "refs": { + } + }, + "GetResourceRequest": { + "base": "

    Request to list information about a resource.

    ", + "refs": { + } + }, + "GetResourcesRequest": { + "base": "

    Request to list information about a collection of resources.

    ", + "refs": { + } + }, + "GetRestApiRequest": { + "base": "

    The GET request to list an existing RestApi defined for your collection.

    ", + "refs": { + } + }, + "GetRestApisRequest": { + "base": "

    The GET request to list existing RestApis defined for your collection.

    ", + "refs": { + } + }, + "GetSdkRequest": { + "base": "

    Request a new generated client SDK for a RestApi and Stage.

    ", + "refs": { + } + }, + "GetStageRequest": { + "base": "

    Requests Amazon API Gateway to get information about a Stage resource.

    ", + "refs": { + } + }, + "GetStagesRequest": { + "base": "

    Requests Amazon API Gateway to get information about one or more Stage resources.

    ", + "refs": { + } + }, + "ImportRestApiRequest": { + "base": "

    A POST request to import an API to Amazon API Gateway using an input of an API definition file.

    ", + "refs": { + } + }, + "Integer": { + "base": null, + "refs": { + "MethodSetting$throttlingBurstLimit": "

    Specifies the throttling burst limit. The PATCH path for this setting is /{method_setting_key}/throttling/burstLimit, and the value is an integer.

    ", + "MethodSetting$cacheTtlInSeconds": "

    Specifies the time to live (TTL) in seconds, for cached responses. The higher a the TTL, the longer the response will be cached. The PATCH path for this setting is /{method_setting_key}/caching/ttlInSeconds, and the value is an integer.

    ", + "TestInvokeAuthorizerResponse$clientStatus": "

    The HTTP status code that the client would have received. Value is 0 if the authorizer succeeded.

    ", + "TestInvokeMethodResponse$status": "

    The HTTP status code.

    ", + "ThrottleSettings$burstLimit": "

    Returns the burstLimit when ThrottleSettings is called.

    " + } + }, + "Integration": { + "base": "

    Represents a HTTP, AWS, or Mock integration.

    ", + "refs": { + "Method$methodIntegration": "

    The method's integration.

    " + } + }, + "IntegrationResponse": { + "base": "

    Represents an integration response. The status code must map to an existing MethodResponse, and parameters and templates can be used to transform the backend response.

    ", + "refs": { + "MapOfIntegrationResponse$value": null + } + }, + "IntegrationType": { + "base": "

    The integration type. The valid value is HTTP, AWS, or MOCK.

    ", + "refs": { + "Integration$type": "

    Specifies the integration's type. The valid value is HTTP, AWS, or MOCK.

    ", + "PutIntegrationRequest$type": "

    Specifies a put integration input's type.

    " + } + }, + "LimitExceededException": { + "base": null, + "refs": { + } + }, + "ListOfApiKey": { + "base": null, + "refs": { + "ApiKeys$items": "

    The current page of any ApiKey resources in the collection of ApiKey resources.

    " + } + }, + "ListOfAuthorizer": { + "base": null, + "refs": { + "Authorizers$items": "

    Gets the current list of Authorizer resources in the collection.

    " + } + }, + "ListOfBasePathMapping": { + "base": null, + "refs": { + "BasePathMappings$items": "

    The current page of any BasePathMapping resources in the collection of base path mapping resources.

    " + } + }, + "ListOfClientCertificate": { + "base": null, + "refs": { + "ClientCertificates$items": "

    The current page of any ClientCertificate resources in the collection of ClientCertificate resources.

    " + } + }, + "ListOfDeployment": { + "base": null, + "refs": { + "Deployments$items": "

    The current page of any Deployment resources in the collection of deployment resources.

    " + } + }, + "ListOfDomainName": { + "base": null, + "refs": { + "DomainNames$items": "

    The current page of any DomainName resources in the collection of DomainName resources.

    " + } + }, + "ListOfModel": { + "base": null, + "refs": { + "Models$items": "

    Gets the current Model resource in the collection.

    " + } + }, + "ListOfPatchOperation": { + "base": "A list of operations describing the updates to apply to the specified resource. The patches are applied in the order specified in the list.", + "refs": { + "UpdateAccountRequest$patchOperations": "

    A list of operations describing the updates to apply to the specified resource. The patches are applied in the order specified in the list.

    ", + "UpdateApiKeyRequest$patchOperations": "

    A list of operations describing the updates to apply to the specified resource. The patches are applied in the order specified in the list.

    ", + "UpdateAuthorizerRequest$patchOperations": "

    A list of operations describing the updates to apply to the specified resource. The patches are applied in the order specified in the list.

    ", + "UpdateBasePathMappingRequest$patchOperations": "

    A list of operations describing the updates to apply to the specified resource. The patches are applied in the order specified in the list.

    ", + "UpdateClientCertificateRequest$patchOperations": "

    A list of operations describing the updates to apply to the specified resource. The patches are applied in the order specified in the list.

    ", + "UpdateDeploymentRequest$patchOperations": "

    A list of operations describing the updates to apply to the specified resource. The patches are applied in the order specified in the list.

    ", + "UpdateDomainNameRequest$patchOperations": "

    A list of operations describing the updates to apply to the specified resource. The patches are applied in the order specified in the list.

    ", + "UpdateIntegrationRequest$patchOperations": "

    A list of operations describing the updates to apply to the specified resource. The patches are applied in the order specified in the list.

    ", + "UpdateIntegrationResponseRequest$patchOperations": "

    A list of operations describing the updates to apply to the specified resource. The patches are applied in the order specified in the list.

    ", + "UpdateMethodRequest$patchOperations": "

    A list of operations describing the updates to apply to the specified resource. The patches are applied in the order specified in the list.

    ", + "UpdateMethodResponseRequest$patchOperations": "

    A list of operations describing the updates to apply to the specified resource. The patches are applied in the order specified in the list.

    ", + "UpdateModelRequest$patchOperations": "

    A list of operations describing the updates to apply to the specified resource. The patches are applied in the order specified in the list.

    ", + "UpdateResourceRequest$patchOperations": "

    A list of operations describing the updates to apply to the specified resource. The patches are applied in the order specified in the list.

    ", + "UpdateRestApiRequest$patchOperations": "

    A list of operations describing the updates to apply to the specified resource. The patches are applied in the order specified in the list.

    ", + "UpdateStageRequest$patchOperations": "

    A list of operations describing the updates to apply to the specified resource. The patches are applied in the order specified in the list.

    " + } + }, + "ListOfResource": { + "base": null, + "refs": { + "Resources$items": "

    Gets the current Resource resource in the collection.

    " + } + }, + "ListOfRestApi": { + "base": null, + "refs": { + "RestApis$items": "

    An array of links to the current page of RestApi resources.

    " + } + }, + "ListOfStage": { + "base": null, + "refs": { + "Stages$item": "

    An individual Stage resource.

    " + } + }, + "ListOfStageKeys": { + "base": null, + "refs": { + "CreateApiKeyRequest$stageKeys": "

    Specifies whether the ApiKey can be used by callers.

    " + } + }, + "ListOfString": { + "base": null, + "refs": { + "ApiKey$stageKeys": "

    A list of Stage resources that are associated with the ApiKey resource.

    ", + "Integration$cacheKeyParameters": "

    Specifies the integration's cache key parameters.

    ", + "MapOfStringToList$value": null, + "PutIntegrationRequest$cacheKeyParameters": "

    Specifies a put integration input's cache key parameters.

    ", + "RestApi$warnings": null + } + }, + "Long": { + "base": null, + "refs": { + "TestInvokeAuthorizerResponse$latency": "

    The execution latency of the test authorizer request

    ", + "TestInvokeMethodResponse$latency": "

    The execution latency of the test invoke request.

    " + } + }, + "MapOfHeaderValues": { + "base": null, + "refs": { + "TestInvokeAuthorizerRequest$headers": "

    [Required] A key-value map of headers to simulate an incoming invocation request. This is where the incoming authorization token, or identity source, should be specified.

    ", + "TestInvokeMethodRequest$headers": "

    A key-value map of headers to simulate an incoming invocation request.

    ", + "TestInvokeMethodResponse$headers": "

    The headers of HTTP response.

    " + } + }, + "MapOfIntegrationResponse": { + "base": null, + "refs": { + "Integration$integrationResponses": "

    Specifies the integration's responses.

    " + } + }, + "MapOfMethod": { + "base": null, + "refs": { + "Resource$resourceMethods": "

    Map of methods for this resource, which is included only if the request uses the embed query option.

    " + } + }, + "MapOfMethodResponse": { + "base": null, + "refs": { + "Method$methodResponses": "

    Represents available responses that can be sent to the caller. Method responses are represented as a key/value map, with an HTTP status code as the key and a MethodResponse as the value. The status codes are available for the Integration responses to map to.

    " + } + }, + "MapOfMethodSettings": { + "base": null, + "refs": { + "Stage$methodSettings": "

    A map that defines the method settings for a Stage resource. Keys are defined as {resource_path}/{http_method} for an individual method override, or \\*/\\* for the settings applied to all methods in the stage.

    " + } + }, + "MapOfMethodSnapshot": { + "base": null, + "refs": { + "PathToMapOfMethodSnapshot$value": null + } + }, + "MapOfStringToBoolean": { + "base": null, + "refs": { + "Method$requestParameters": "

    Represents request parameters that can be accepted by Amazon API Gateway. Request parameters are represented as a key/value map, with a source as the key and a Boolean flag as the value. The Boolean flag is used to specify whether the parameter is required. A source must match the pattern method.request.{location}.{name}, where location is either querystring, path, or header. name is a valid, unique parameter name. Sources specified here are available to the integration for mapping to integration request parameters or templates.

    ", + "MethodResponse$responseParameters": "

    Represents response parameters that can be sent back to the caller by Amazon API Gateway. Response parameters are represented as a key/value map, with a destination as the key and a boolean flag as the value, which is used to specify whether the parameter is required. A destination must match the pattern method.response.header.{name}, where name is a valid, unique header name. Destinations specified here are available to the integration for mapping from integration response parameters.

    ", + "PutMethodRequest$requestParameters": "

    Represents requests parameters that are sent with the backend request. Request parameters are represented as a key/value map, with a destination as the key and a source as the value. A source must match an existing method request parameter, or a static value. Static values must be enclosed with single quotes, and be pre-encoded based on their destination in the request. The destination must match the pattern integration.request.{location}.{name}, where location is either querystring, path, or header. name must be a valid, unique parameter name.

    ", + "PutMethodResponseRequest$responseParameters": "

    Represents response parameters that can be sent back to the caller by Amazon API Gateway. Response parameters are represented as a key/value map, with a destination as the key and a Boolean flag as the value. The Boolean flag is used to specify whether the parameter is required. A destination must match the pattern method.response.header.{name}, where name is a valid, unique header name. Destinations specified here are available to the integration for mapping from integration response parameters.

    " + } + }, + "MapOfStringToList": { + "base": null, + "refs": { + "TestInvokeAuthorizerResponse$authorization": null + } + }, + "MapOfStringToString": { + "base": null, + "refs": { + "CreateDeploymentRequest$variables": "

    A map that defines the stage variables for the Stage resource that is associated with the new deployment. Variable names can have alphanumeric characters, and the values must match [A-Za-z0-9-._~:/?#&=,]+.

    ", + "CreateStageRequest$variables": "

    A map that defines the stage variables for the new Stage resource. Variable names can have alphanumeric characters, and the values must match [A-Za-z0-9-._~:/?#&=,]+.

    ", + "GetExportRequest$parameters": "

    A key-value map of query string parameters that specify properties of the export, depending on the requested exportType. For exportType 'swagger', any combination of the following parameters are supported: 'integrations' will export x-amazon-apigateway-integration extensions 'authorizers' will export x-amazon-apigateway-authorizer extensions 'postman' will export with Postman extensions, allowing for import to the Postman tool

    ", + "GetSdkRequest$parameters": "

    A key-value map of query string parameters that specify properties of the SDK, depending on the requested sdkType. For sdkType 'objectivec', a parameter named \"classPrefix\" is required. For sdkType 'android', parameters named \"groupId\", \"artifactId\", \"artifactVersion\", and \"invokerPackage\" are required.

    ", + "ImportRestApiRequest$parameters": "

    Custom header parameters as part of the request.

    ", + "Integration$requestParameters": "

    Represents requests parameters that are sent with the backend request. Request parameters are represented as a key/value map, with a destination as the key and a source as the value. A source must match an existing method request parameter, or a static value. Static values must be enclosed with single quotes, and be pre-encoded based on their destination in the request. The destination must match the pattern integration.request.{location}.{name}, where location is either querystring, path, or header. name must be a valid, unique parameter name.

    ", + "Integration$requestTemplates": "

    Represents a map of Velocity templates that are applied on the request payload based on the value of the Content-Type header sent by the client. The content type value is the key in this map, and the template (as a String) is the value.

    ", + "IntegrationResponse$responseParameters": "

    Represents response parameters that can be read from the backend response. Response parameters are represented as a key/value map, with a destination as the key and a source as the value. A destination must match an existing response parameter in the MethodResponse. The source can be a header from the backend response, or a static value. Static values are specified using enclosing single quotes, and backend response headers can be read using the pattern integration.response.header.{name}.

    ", + "IntegrationResponse$responseTemplates": "

    Specifies the templates used to transform the integration response body. Response templates are represented as a key/value map, with a content-type as the key and a template as the value.

    ", + "Method$requestModels": "

    Specifies the Model resources used for the request's content type. Request models are represented as a key/value map, with a content type as the key and a Model name as the value.

    ", + "MethodResponse$responseModels": "

    Specifies the Model resources used for the response's content-type. Response models are represented as a key/value map, with a content-type as the key and a Model name as the value.

    ", + "PutIntegrationRequest$requestParameters": "

    Represents request parameters that are sent with the backend request. Request parameters are represented as a key/value map, with a destination as the key and a source as the value. A source must match an existing method request parameter, or a static value. Static values must be enclosed with single quotes, and be pre-encoded based on their destination in the request. The destination must match the pattern integration.request.{location}.{name}, where location is either querystring, path, or header. name must be a valid, unique parameter name.

    ", + "PutIntegrationRequest$requestTemplates": "

    Represents a map of Velocity templates that are applied on the request payload based on the value of the Content-Type header sent by the client. The content type value is the key in this map, and the template (as a String) is the value.

    ", + "PutIntegrationResponseRequest$responseParameters": "

    Represents response parameters that can be read from the backend response. Response parameters are represented as a key/value map, with a destination as the key and a source as the value. A destination must match an existing response parameter in the Method. The source can be a header from the backend response, or a static value. Static values are specified using enclosing single quotes, and backend response headers can be read using the pattern integration.response.header.{name}.

    ", + "PutIntegrationResponseRequest$responseTemplates": "

    Specifies a put integration response's templates.

    ", + "PutMethodRequest$requestModels": "

    Specifies the Model resources used for the request's content type. Request models are represented as a key/value map, with a content type as the key and a Model name as the value.

    ", + "PutMethodResponseRequest$responseModels": "

    Specifies the Model resources used for the response's content type. Response models are represented as a key/value map, with a content type as the key and a Model name as the value.

    ", + "PutRestApiRequest$parameters": "

    Custom headers supplied as part of the request.

    ", + "Stage$variables": "

    A map that defines the stage variables for a Stage resource. Variable names can have alphanumeric characters, and the values must match [A-Za-z0-9-._~:/?#&=,]+.

    ", + "TestInvokeAuthorizerRequest$stageVariables": "

    A key-value map of stage variables to simulate an invocation on a deployed Stage.

    ", + "TestInvokeAuthorizerRequest$additionalContext": "

    [Optional] A key-value map of additional context variables.

    ", + "TestInvokeMethodRequest$stageVariables": "

    A key-value map of stage variables to simulate an invocation on a deployed Stage.

    " + } + }, + "Method": { + "base": "

    Represents a method.

    ", + "refs": { + "MapOfMethod$value": null + } + }, + "MethodResponse": { + "base": "

    Represents a method response. Amazon API Gateway sends back the status code to the caller as the HTTP status code. Parameters and models can be used to transform the response from the method's integration.

    ", + "refs": { + "MapOfMethodResponse$value": null + } + }, + "MethodSetting": { + "base": "

    Specifies the method setting properties.

    ", + "refs": { + "MapOfMethodSettings$value": null + } + }, + "MethodSnapshot": { + "base": "

    Represents a summary of a Method resource, given a particular date and time.

    ", + "refs": { + "MapOfMethodSnapshot$value": null + } + }, + "Model": { + "base": "

    Represents the structure of a request or response payload for a method.

    ", + "refs": { + "ListOfModel$member": null + } + }, + "Models": { + "base": "

    Represents a collection of Model resources.

    ", + "refs": { + } + }, + "NotFoundException": { + "base": null, + "refs": { + } + }, + "NullableBoolean": { + "base": null, + "refs": { + "CreateDeploymentRequest$cacheClusterEnabled": "

    Enables a cache cluster for the Stage resource specified in the input.

    ", + "MapOfStringToBoolean$value": null, + "Method$apiKeyRequired": "

    Specifies whether the method requires a valid ApiKey.

    " + } + }, + "NullableInteger": { + "base": null, + "refs": { + "Authorizer$authorizerResultTtlInSeconds": "

    The TTL in seconds of cached authorizer results. If greater than 0, API Gateway will cache authorizer responses. If this field is not set, the default value is 300. The maximum value is 3600, or 1 hour.

    ", + "CreateAuthorizerRequest$authorizerResultTtlInSeconds": "

    The TTL of cached authorizer results.

    ", + "GetApiKeysRequest$limit": "

    The maximum number of ApiKeys to get information about.

    ", + "GetAuthorizersRequest$limit": "

    Limit the number of Authorizer resources in the response.

    ", + "GetBasePathMappingsRequest$limit": "

    The maximum number of BasePathMapping resources in the collection to get information about. The default limit is 25. It should be an integer between 1 - 500.

    ", + "GetClientCertificatesRequest$limit": "

    The maximum number of ClientCertificate resources in the collection to get information about. The default limit is 25. It should be an integer between 1 - 500.

    ", + "GetDeploymentsRequest$limit": "

    The maximum number of Deployment resources in the collection to get information about. The default limit is 25. It should be an integer between 1 - 500.

    ", + "GetDomainNamesRequest$limit": "

    The maximum number of DomainName resources in the collection to get information about. The default limit is 25. It should be an integer between 1 - 500.

    ", + "GetModelsRequest$limit": "

    The maximum number of models in the collection to get information about. The default limit is 25. It should be an integer between 1 - 500.

    ", + "GetResourcesRequest$limit": "

    The maximum number of Resource resources in the collection to get information about. The default limit is 25. It should be an integer between 1 - 500.

    ", + "GetRestApisRequest$limit": "

    The maximum number of RestApi resources in the collection to get information about. The default limit is 25. It should be an integer between 1 - 500.

    " + } + }, + "PatchOperation": { + "base": "A single patch operation to apply to the specified resource. Please refer to http://tools.ietf.org/html/rfc6902#section-4 for an explanation of how each operation is used.", + "refs": { + "ListOfPatchOperation$member": null + } + }, + "PathToMapOfMethodSnapshot": { + "base": null, + "refs": { + "Deployment$apiSummary": "

    Gets a summary of the RestApi at the date and time that the deployment resource was created.

    " + } + }, + "PutIntegrationRequest": { + "base": "

    Represents a put integration request.

    ", + "refs": { + } + }, + "PutIntegrationResponseRequest": { + "base": "

    Represents a put integration response request.

    ", + "refs": { + } + }, + "PutMethodRequest": { + "base": "

    Request to add a method to an existing Resource resource.

    ", + "refs": { + } + }, + "PutMethodResponseRequest": { + "base": "

    Request to add a MethodResponse to an existing Method resource.

    ", + "refs": { + } + }, + "PutMode": { + "base": null, + "refs": { + "PutRestApiRequest$mode": "

    The mode query parameter to specify the update mode. Valid values are \"merge\" and \"overwrite\". By default, the update mode is \"merge\".

    " + } + }, + "PutRestApiRequest": { + "base": "

    A PUT request to update an existing API, with external API definitions specified as the request body.

    ", + "refs": { + } + }, + "Resource": { + "base": "

    Represents a resource.

    ", + "refs": { + "ListOfResource$member": null + } + }, + "Resources": { + "base": "

    Represents a collection of Resource resources.

    ", + "refs": { + } + }, + "RestApi": { + "base": "

    Represents a REST API.

    ", + "refs": { + "ListOfRestApi$member": null + } + }, + "RestApis": { + "base": "

    Contains references to your APIs and links that guide you in ways to interact with your collection. A collection offers a paginated view of your APIs.

    ", + "refs": { + } + }, + "SdkResponse": { + "base": "

    The binary blob response to GetSdk, which contains the generated SDK.

    ", + "refs": { + } + }, + "ServiceUnavailableException": { + "base": null, + "refs": { + } + }, + "Stage": { + "base": "

    Represents a unique identifier for a version of a deployed RestApi that is callable by users.

    ", + "refs": { + "ListOfStage$member": null + } + }, + "StageKey": { + "base": "

    A reference to a unique stage identified in the format {restApiId}/{stage}.

    ", + "refs": { + "ListOfStageKeys$member": null + } + }, + "Stages": { + "base": "

    A list of Stage resource that are associated with the ApiKey resource.

    ", + "refs": { + } + }, + "StatusCode": { + "base": "

    The status code.

    ", + "refs": { + "DeleteIntegrationResponseRequest$statusCode": "

    Specifies a delete integration response request's status code.

    ", + "DeleteMethodResponseRequest$statusCode": "

    The status code identifier for the MethodResponse resource.

    ", + "GetIntegrationResponseRequest$statusCode": "

    Specifies a get integration response request's status code.

    ", + "GetMethodResponseRequest$statusCode": "

    The status code identifier for the MethodResponse resource.

    ", + "IntegrationResponse$statusCode": "

    Specifies the status code that is used to map the integration response to an existing MethodResponse.

    ", + "MethodResponse$statusCode": "

    The method response's status code.

    ", + "PutIntegrationResponseRequest$statusCode": "

    Specifies the status code that is used to map the integration response to an existing MethodResponse.

    ", + "PutMethodResponseRequest$statusCode": "

    The method response's status code.

    ", + "UpdateIntegrationResponseRequest$statusCode": "

    Specifies an update integration response request's status code.

    ", + "UpdateMethodResponseRequest$statusCode": "

    The status code identifier for the MethodResponse resource.

    " + } + }, + "String": { + "base": null, + "refs": { + "Account$cloudwatchRoleArn": "

    Specifies the Amazon resource name (ARN) of an Amazon CloudWatch role for the current Account resource.

    ", + "ApiKey$id": "

    The identifier of the API Key.

    ", + "ApiKey$name": "

    The name of the API Key.

    ", + "ApiKey$description": "

    The description of the API Key.

    ", + "ApiKeys$position": null, + "Authorizer$id": "

    The identifier for the authorizer resource.

    ", + "Authorizer$name": "

    [Required] The name of the authorizer.

    ", + "Authorizer$authType": "

    Optional customer-defined field, used in Swagger imports/exports. Has no functional impact.

    ", + "Authorizer$authorizerUri": "

    [Required] Specifies the authorizer's Uniform Resource Identifier (URI). For TOKEN authorizers, this must be a well-formed Lambda function URI. The URI should be of the form arn:aws:apigateway:{region}:lambda:path/{service_api}. Region is used to determine the right endpoint. In this case, path is used to indicate that the remaining substring in the URI should be treated as the path to the resource, including the initial /. For Lambda functions, this is usually of the form /2015-03-31/functions/[FunctionARN]/invocations

    ", + "Authorizer$authorizerCredentials": "

    Specifies the credentials required for the authorizer, if any. Two options are available. To specify an IAM Role for Amazon API Gateway to assume, use the role's Amazon Resource Name (ARN). To use resource-based permissions on the Lambda function, specify null.

    ", + "Authorizer$identitySource": "

    [Required] The source of the identity in an incoming request. For TOKEN authorizers, this value is a mapping expression with the same syntax as integration parameter mappings. The only valid source for tokens is 'header', so the expression should match 'method.request.header.[headerName]'. The value of the header '[headerName]' will be interpreted as the incoming token.

    ", + "Authorizer$identityValidationExpression": "

    A validation expression for the incoming identity. For TOKEN authorizers, this value should be a regular expression. The incoming token from the client is matched against this expression, and will proceed if the token matches. If the token doesn't match, the client receives a 401 Unauthorized response.

    ", + "Authorizers$position": null, + "BadRequestException$message": null, + "BasePathMapping$basePath": "

    The base path name that callers of the API must provide as part of the URL after the domain name.

    ", + "BasePathMapping$restApiId": "

    The name of the API.

    ", + "BasePathMapping$stage": "

    The name of the API's stage.

    ", + "BasePathMappings$position": null, + "ClientCertificate$clientCertificateId": "

    The identifier of the Client Certificate.

    ", + "ClientCertificate$description": "

    The description of the Client Certificate.

    ", + "ClientCertificate$pemEncodedCertificate": "

    The PEM-encoded public key of the Client Certificate, that can be used to configure certificate authentication in the integration endpoint .

    ", + "ClientCertificates$position": null, + "ConflictException$message": null, + "CreateApiKeyRequest$name": "

    The name of the ApiKey.

    ", + "CreateApiKeyRequest$description": "

    The description of the ApiKey.

    ", + "CreateAuthorizerRequest$restApiId": "

    The RestApi identifier under which the Authorizer will be created.

    ", + "CreateAuthorizerRequest$name": "

    [Required] The name of the authorizer.

    ", + "CreateAuthorizerRequest$authType": "

    Optional customer-defined field, used in Swagger imports/exports. Has no functional impact.

    ", + "CreateAuthorizerRequest$authorizerUri": "

    [Required] Specifies the authorizer's Uniform Resource Identifier (URI).

    ", + "CreateAuthorizerRequest$authorizerCredentials": "

    Specifies the credentials required for the authorizer, if any.

    ", + "CreateAuthorizerRequest$identitySource": "

    [Required] The source of the identity in an incoming request.

    ", + "CreateAuthorizerRequest$identityValidationExpression": "

    A validation expression for the incoming identity.

    ", + "CreateBasePathMappingRequest$domainName": "

    The domain name of the BasePathMapping resource to create.

    ", + "CreateBasePathMappingRequest$basePath": "

    The base path name that callers of the API must provide as part of the URL after the domain name. This value must be unique for all of the mappings across a single API. Leave this blank if you do not want callers to specify a base path name after the domain name.

    ", + "CreateBasePathMappingRequest$restApiId": "

    The name of the API that you want to apply this mapping to.

    ", + "CreateBasePathMappingRequest$stage": "

    The name of the API's stage that you want to use for this mapping. Leave this blank if you do not want callers to explicitly specify the stage name after any base path name.

    ", + "CreateDeploymentRequest$restApiId": "

    The RestApi resource identifier for the Deployment resource to create.

    ", + "CreateDeploymentRequest$stageName": "

    The name of the Stage resource for the Deployment resource to create.

    ", + "CreateDeploymentRequest$stageDescription": "

    The description of the Stage resource for the Deployment resource to create.

    ", + "CreateDeploymentRequest$description": "

    The description for the Deployment resource to create.

    ", + "CreateDomainNameRequest$domainName": "

    The name of the DomainName resource.

    ", + "CreateDomainNameRequest$certificateName": "

    The name of the certificate.

    ", + "CreateDomainNameRequest$certificateBody": "

    The body of the server certificate provided by your certificate authority.

    ", + "CreateDomainNameRequest$certificatePrivateKey": "

    Your certificate's private key.

    ", + "CreateDomainNameRequest$certificateChain": "

    The intermediate certificates and optionally the root certificate, one after the other without any blank lines. If you include the root certificate, your certificate chain must start with intermediate certificates and end with the root certificate. Use the intermediate certificates that were provided by your certificate authority. Do not include any intermediaries that are not in the chain of trust path.

    ", + "CreateModelRequest$restApiId": "

    The RestApi identifier under which the Model will be created.

    ", + "CreateModelRequest$name": "

    The name of the model.

    ", + "CreateModelRequest$description": "

    The description of the model.

    ", + "CreateModelRequest$schema": "

    The schema for the model. For application/json models, this should be JSON-schema draft v4 model.

    ", + "CreateModelRequest$contentType": "

    The content-type for the model.

    ", + "CreateResourceRequest$restApiId": "

    The identifier of the RestApi for the resource.

    ", + "CreateResourceRequest$parentId": "

    The parent resource's identifier.

    ", + "CreateResourceRequest$pathPart": "

    The last path segment for this resource.

    ", + "CreateRestApiRequest$name": "

    The name of the RestApi.

    ", + "CreateRestApiRequest$description": "

    The description of the RestApi.

    ", + "CreateRestApiRequest$cloneFrom": "

    The Id of the RestApi that you want to clone from.

    ", + "CreateStageRequest$restApiId": "

    The identifier of the RestApi resource for the Stage resource to create.

    ", + "CreateStageRequest$stageName": "

    The name for the Stage resource.

    ", + "CreateStageRequest$deploymentId": "

    The identifier of the Deployment resource for the Stage resource.

    ", + "CreateStageRequest$description": "

    The description of the Stage resource.

    ", + "DeleteApiKeyRequest$apiKey": "

    The identifier of the ApiKey resource to be deleted.

    ", + "DeleteAuthorizerRequest$restApiId": "

    The RestApi identifier for the Authorizer resource.

    ", + "DeleteAuthorizerRequest$authorizerId": "

    The identifier of the Authorizer resource.

    ", + "DeleteBasePathMappingRequest$domainName": "

    The domain name of the BasePathMapping resource to delete.

    ", + "DeleteBasePathMappingRequest$basePath": "

    The base path name of the BasePathMapping resource to delete.

    ", + "DeleteClientCertificateRequest$clientCertificateId": "

    The identifier of the ClientCertificate resource to be deleted.

    ", + "DeleteDeploymentRequest$restApiId": "

    The identifier of the RestApi resource for the Deployment resource to delete.

    ", + "DeleteDeploymentRequest$deploymentId": "

    The identifier of the Deployment resource to delete.

    ", + "DeleteDomainNameRequest$domainName": "

    The name of the DomainName resource to be deleted.

    ", + "DeleteIntegrationRequest$restApiId": "

    Specifies a delete integration request's API identifier.

    ", + "DeleteIntegrationRequest$resourceId": "

    Specifies a delete integration request's resource identifier.

    ", + "DeleteIntegrationRequest$httpMethod": "

    Specifies a delete integration request's HTTP method.

    ", + "DeleteIntegrationResponseRequest$restApiId": "

    Specifies a delete integration response request's API identifier.

    ", + "DeleteIntegrationResponseRequest$resourceId": "

    Specifies a delete integration response request's resource identifier.

    ", + "DeleteIntegrationResponseRequest$httpMethod": "

    Specifies a delete integration response request's HTTP method.

    ", + "DeleteMethodRequest$restApiId": "

    The RestApi identifier for the Method resource.

    ", + "DeleteMethodRequest$resourceId": "

    The Resource identifier for the Method resource.

    ", + "DeleteMethodRequest$httpMethod": "

    The HTTP verb that identifies the Method resource.

    ", + "DeleteMethodResponseRequest$restApiId": "

    The RestApi identifier for the MethodResponse resource.

    ", + "DeleteMethodResponseRequest$resourceId": "

    The Resource identifier for the MethodResponse resource.

    ", + "DeleteMethodResponseRequest$httpMethod": "

    The HTTP verb identifier for the parent Method resource.

    ", + "DeleteModelRequest$restApiId": "

    The RestApi under which the model will be deleted.

    ", + "DeleteModelRequest$modelName": "

    The name of the model to delete.

    ", + "DeleteResourceRequest$restApiId": "

    The RestApi identifier for the Resource resource.

    ", + "DeleteResourceRequest$resourceId": "

    The identifier of the Resource resource.

    ", + "DeleteRestApiRequest$restApiId": "

    The ID of the RestApi you want to delete.

    ", + "DeleteStageRequest$restApiId": "

    The identifier of the RestApi resource for the Stage resource to delete.

    ", + "DeleteStageRequest$stageName": "

    The name of the Stage resource to delete.

    ", + "Deployment$id": "

    The identifier for the deployment resource.

    ", + "Deployment$description": "

    The description for the deployment resource.

    ", + "Deployments$position": null, + "DomainName$domainName": "

    The name of the DomainName resource.

    ", + "DomainName$certificateName": "

    The name of the certificate.

    ", + "DomainName$distributionDomainName": "

    The domain name of the Amazon CloudFront distribution. For more information, see the Amazon CloudFront documentation.

    ", + "DomainNames$position": null, + "ExportResponse$contentType": "

    The content-type header value in the HTTP response. This will correspond to a valid 'accept' type in the request.

    ", + "ExportResponse$contentDisposition": "

    The content-disposition header value in the HTTP reseponse.

    ", + "FlushStageAuthorizersCacheRequest$restApiId": "

    The API identifier of the stage to flush.

    ", + "FlushStageAuthorizersCacheRequest$stageName": "

    The name of the stage to flush.

    ", + "FlushStageCacheRequest$restApiId": "

    The API identifier of the stage to flush its cache.

    ", + "FlushStageCacheRequest$stageName": "

    The name of the stage to flush its cache.

    ", + "GenerateClientCertificateRequest$description": "

    The description of the ClientCertificate.

    ", + "GetApiKeyRequest$apiKey": "

    The identifier of the ApiKey resource.

    ", + "GetApiKeysRequest$position": "

    The position of the current ApiKeys resource to get information about.

    ", + "GetAuthorizerRequest$restApiId": "

    The RestApi identifier for the Authorizer resource.

    ", + "GetAuthorizerRequest$authorizerId": "

    The identifier of the Authorizer resource.

    ", + "GetAuthorizersRequest$restApiId": "

    The RestApi identifier for the Authorizers resource.

    ", + "GetAuthorizersRequest$position": "

    If not all Authorizer resources in the response were present, the position will specificy where to start the next page of results.

    ", + "GetBasePathMappingRequest$domainName": "

    The domain name of the BasePathMapping resource to be described.

    ", + "GetBasePathMappingRequest$basePath": "

    The base path name that callers of the API must provide as part of the URL after the domain name. This value must be unique for all of the mappings across a single API. Leave this blank if you do not want callers to specify any base path name after the domain name.

    ", + "GetBasePathMappingsRequest$domainName": "

    The domain name of a BasePathMapping resource.

    ", + "GetBasePathMappingsRequest$position": "

    The position of the current BasePathMapping resource in the collection to get information about.

    ", + "GetClientCertificateRequest$clientCertificateId": "

    The identifier of the ClientCertificate resource to be described.

    ", + "GetClientCertificatesRequest$position": "

    The position of the current ClientCertificate resource in the collection to get information about.

    ", + "GetDeploymentRequest$restApiId": "

    The identifier of the RestApi resource for the Deployment resource to get information about.

    ", + "GetDeploymentRequest$deploymentId": "

    The identifier of the Deployment resource to get information about.

    ", + "GetDeploymentsRequest$restApiId": "

    The identifier of the RestApi resource for the collection of Deployment resources to get information about.

    ", + "GetDeploymentsRequest$position": "

    The position of the current Deployment resource in the collection to get information about.

    ", + "GetDomainNameRequest$domainName": "

    The name of the DomainName resource.

    ", + "GetDomainNamesRequest$position": "

    The position of the current domain names to get information about.

    ", + "GetExportRequest$restApiId": "

    The identifier of the RestApi to be exported.

    ", + "GetExportRequest$stageName": "

    The name of the Stage that will be exported.

    ", + "GetExportRequest$exportType": "

    The type of export. Currently only 'swagger' is supported.

    ", + "GetExportRequest$accepts": "

    The content-type of the export, for example 'application/json'. Currently 'application/json' and 'application/yaml' are supported for exportType 'swagger'. Should be specifed in the 'Accept' header for direct API requests.

    ", + "GetIntegrationRequest$restApiId": "

    Specifies a get integration request's API identifier.

    ", + "GetIntegrationRequest$resourceId": "

    Specifies a get integration request's resource identifier

    ", + "GetIntegrationRequest$httpMethod": "

    Specifies a get integration request's HTTP method.

    ", + "GetIntegrationResponseRequest$restApiId": "

    Specifies a get integration response request's API identifier.

    ", + "GetIntegrationResponseRequest$resourceId": "

    Specifies a get integration response request's resource identifier.

    ", + "GetIntegrationResponseRequest$httpMethod": "

    Specifies a get integration response request's HTTP method.

    ", + "GetMethodRequest$restApiId": "

    The RestApi identifier for the Method resource.

    ", + "GetMethodRequest$resourceId": "

    The Resource identifier for the Method resource.

    ", + "GetMethodRequest$httpMethod": "

    Specifies the put method request's HTTP method type.

    ", + "GetMethodResponseRequest$restApiId": "

    The RestApi identifier for the MethodResponse resource.

    ", + "GetMethodResponseRequest$resourceId": "

    The Resource identifier for the MethodResponse resource.

    ", + "GetMethodResponseRequest$httpMethod": "

    The HTTP verb identifier for the parent Method resource.

    ", + "GetModelRequest$restApiId": "

    The RestApi identifier under which the Model exists.

    ", + "GetModelRequest$modelName": "

    The name of the model as an identifier.

    ", + "GetModelTemplateRequest$restApiId": "

    The ID of the RestApi under which the model exists.

    ", + "GetModelTemplateRequest$modelName": "

    The name of the model for which to generate a template.

    ", + "GetModelsRequest$restApiId": "

    The RestApi identifier.

    ", + "GetModelsRequest$position": "

    The position of the next set of results in the Models resource to get information about.

    ", + "GetResourceRequest$restApiId": "

    The RestApi identifier for the resource.

    ", + "GetResourceRequest$resourceId": "

    The identifier for the Resource resource.

    ", + "GetResourcesRequest$restApiId": "

    The RestApi identifier for the Resource.

    ", + "GetResourcesRequest$position": "

    The position of the next set of results in the current Resources resource to get information about.

    ", + "GetRestApiRequest$restApiId": "

    The identifier of the RestApi resource.

    ", + "GetRestApisRequest$position": "

    The position of the current RestApis resource in the collection to get information about.

    ", + "GetSdkRequest$restApiId": "

    The identifier of the RestApi that the SDK will use.

    ", + "GetSdkRequest$stageName": "

    The name of the Stage that the SDK will use.

    ", + "GetSdkRequest$sdkType": "

    The language for the generated SDK. Currently javascript, android, and objectivec (for iOS) are supported.

    ", + "GetStageRequest$restApiId": "

    The identifier of the RestApi resource for the Stage resource to get information about.

    ", + "GetStageRequest$stageName": "

    The name of the Stage resource to get information about.

    ", + "GetStagesRequest$restApiId": "

    The stages' API identifiers.

    ", + "GetStagesRequest$deploymentId": "

    The stages' deployment identifiers.

    ", + "Integration$httpMethod": "

    Specifies the integration's HTTP method type.

    ", + "Integration$uri": "

    Specifies the integration's Uniform Resource Identifier (URI). For HTTP integrations, the URI must be a fully formed, encoded HTTP(S) URL according to the RFC-3986 specification. For AWS integrations, the URI should be of the form arn:aws:apigateway:{region}:{subdomain.service|service}:{path|action}/{service_api}. Region, subdomain and service are used to determine the right endpoint. For AWS services that use the Action= query string parameter, service_api should be a valid action for the desired service. For RESTful AWS service APIs, path is used to indicate that the remaining substring in the URI should be treated as the path to the resource, including the initial /.

    ", + "Integration$credentials": "

    Specifies the credentials required for the integration, if any. For AWS integrations, three options are available. To specify an IAM Role for Amazon API Gateway to assume, use the role's Amazon Resource Name (ARN). To require that the caller's identity be passed through from the request, specify the string arn:aws:iam::\\*:user/\\*. To use resource-based permissions on supported AWS services, specify null.

    ", + "Integration$passthroughBehavior": "

    Specifies the pass-through behavior for incoming requests based on the Content-Type header in the request, and the available requestTemplates defined on the Integration. There are three valid values: WHEN_NO_MATCH, WHEN_NO_TEMPLATES, and NEVER.

    WHEN_NO_MATCH passes the request body for unmapped content types through to the Integration backend without transformation.

    NEVER rejects unmapped content types with an HTTP 415 'Unsupported Media Type' response.

    WHEN_NO_TEMPLATES will allow pass-through when the Integration has NO content types mapped to templates. However if there is at least one content type defined, unmapped content types will be rejected with the same 415 response.

    ", + "Integration$cacheNamespace": "

    Specifies the integration's cache namespace.

    ", + "IntegrationResponse$selectionPattern": "

    Specifies the regular expression (regex) pattern used to choose an integration response based on the response from the backend. If the backend is an AWS Lambda function, the AWS Lambda function error header is matched. For all other HTTP and AWS backends, the HTTP status code is matched.

    ", + "LimitExceededException$retryAfterSeconds": null, + "LimitExceededException$message": null, + "ListOfString$member": null, + "MapOfHeaderValues$key": null, + "MapOfHeaderValues$value": null, + "MapOfIntegrationResponse$key": null, + "MapOfMethod$key": null, + "MapOfMethodResponse$key": null, + "MapOfMethodSettings$key": null, + "MapOfMethodSnapshot$key": null, + "MapOfStringToBoolean$key": null, + "MapOfStringToList$key": null, + "MapOfStringToString$key": null, + "MapOfStringToString$value": null, + "Method$httpMethod": "

    The HTTP method.

    ", + "Method$authorizationType": "

    The method's authorization type.

    ", + "Method$authorizerId": "

    Specifies the identifier of an Authorizer to use on this Method. The authorizationType must be CUSTOM.

    ", + "MethodSetting$loggingLevel": "

    Specifies the logging level for this method, which effects the log entries pushed to Amazon CloudWatch Logs. The PATCH path for this setting is /{method_setting_key}/logging/loglevel, and the available levels are OFF, ERROR, and INFO.

    ", + "MethodSnapshot$authorizationType": "

    Specifies the type of authorization used for the method.

    ", + "Model$id": "

    The identifier for the model resource.

    ", + "Model$name": "

    The name of the model.

    ", + "Model$description": "

    The description of the model.

    ", + "Model$schema": "

    The schema for the model. For application/json models, this should be JSON-schema draft v4 model.

    ", + "Model$contentType": "

    The content-type for the model.

    ", + "Models$position": null, + "NotFoundException$message": null, + "PatchOperation$path": "

    Operation objects MUST have exactly one \"path\" member. That member's value is a string containing a `JSON-Pointer` value that references a location within the target document (the \"target location\") where the operation is performed.

    ", + "PatchOperation$value": "

    The actual value content.

    ", + "PatchOperation$from": "

    The \"move\" and \"copy\" operation object MUST contain a \"from\" member, which is a string containing a JSON Pointer value that references the location in the target document to move the value from.

    ", + "PathToMapOfMethodSnapshot$key": null, + "PutIntegrationRequest$restApiId": "

    Specifies a put integration request's API identifier.

    ", + "PutIntegrationRequest$resourceId": "

    Specifies a put integration request's resource ID.

    ", + "PutIntegrationRequest$httpMethod": "

    Specifies a put integration request's HTTP method.

    ", + "PutIntegrationRequest$integrationHttpMethod": "

    Specifies a put integration HTTP method. When the integration type is HTTP or AWS, this field is required.

    ", + "PutIntegrationRequest$uri": "

    Specifies a put integration input's Uniform Resource Identifier (URI). When the integration type is HTTP or AWS, this field is required. For integration with Lambda as an AWS service proxy, this value is of the 'arn:aws:apigateway:<region>:lambda:path/2015-03-31/functions/<functionArn>/invocations' format.

    ", + "PutIntegrationRequest$credentials": "

    Specifies whether credentials are required for a put integration.

    ", + "PutIntegrationRequest$passthroughBehavior": "

    Specifies the pass-through behavior for incoming requests based on the Content-Type header in the request, and the available requestTemplates defined on the Integration. There are three valid values: WHEN_NO_MATCH, WHEN_NO_TEMPLATES, and NEVER.

    WHEN_NO_MATCH passes the request body for unmapped content types through to the Integration backend without transformation.

    NEVER rejects unmapped content types with an HTTP 415 'Unsupported Media Type' response.

    WHEN_NO_TEMPLATES will allow pass-through when the Integration has NO content types mapped to templates. However if there is at least one content type defined, unmapped content types will be rejected with the same 415 response.

    ", + "PutIntegrationRequest$cacheNamespace": "

    Specifies a put integration input's cache namespace.

    ", + "PutIntegrationResponseRequest$restApiId": "

    Specifies a put integration response request's API identifier.

    ", + "PutIntegrationResponseRequest$resourceId": "

    Specifies a put integration response request's resource identifier.

    ", + "PutIntegrationResponseRequest$httpMethod": "

    Specifies a put integration response request's HTTP method.

    ", + "PutIntegrationResponseRequest$selectionPattern": "

    Specifies the selection pattern of a put integration response.

    ", + "PutMethodRequest$restApiId": "

    The RestApi identifier for the new Method resource.

    ", + "PutMethodRequest$resourceId": "

    The Resource identifier for the new Method resource.

    ", + "PutMethodRequest$httpMethod": "

    Specifies the put method request's HTTP method type.

    ", + "PutMethodRequest$authorizationType": "

    Specifies the type of authorization used for the method.

    ", + "PutMethodRequest$authorizerId": "

    Specifies the identifier of an Authorizer to use on this Method, if the type is CUSTOM.

    ", + "PutMethodResponseRequest$restApiId": "

    The RestApi identifier for the Method resource.

    ", + "PutMethodResponseRequest$resourceId": "

    The Resource identifier for the Method resource.

    ", + "PutMethodResponseRequest$httpMethod": "

    The HTTP verb that identifies the Method resource.

    ", + "PutRestApiRequest$restApiId": "

    The identifier of the RestApi to be updated.

    ", + "Resource$id": "

    The resource's identifier.

    ", + "Resource$parentId": "

    The parent resource's identifier.

    ", + "Resource$pathPart": "

    The last path segment for this resource.

    ", + "Resource$path": "

    The full path for this resource.

    ", + "Resources$position": null, + "RestApi$id": "

    The API's identifier. This identifier is unique across all of your APIs in Amazon API Gateway.

    ", + "RestApi$name": "

    The API's name.

    ", + "RestApi$description": "

    The API's description.

    ", + "RestApis$position": null, + "SdkResponse$contentType": "

    The content-type header value in the HTTP response.

    ", + "SdkResponse$contentDisposition": "

    The content-disposition header value in the HTTP reseponse.

    ", + "ServiceUnavailableException$retryAfterSeconds": null, + "ServiceUnavailableException$message": null, + "Stage$deploymentId": "

    The identifier of the Deployment that the stage points to.

    ", + "Stage$clientCertificateId": null, + "Stage$stageName": "

    The name of the stage is the first path segment in the Uniform Resource Identifier (URI) of a call to Amazon API Gateway.

    ", + "Stage$description": "

    The stage's description.

    ", + "StageKey$restApiId": "

    A list of Stage resources that are associated with the ApiKey resource.

    ", + "StageKey$stageName": "

    The stage name in the RestApi that the stage key references.

    ", + "Template$value": "

    The Apache Velocity Template Language (VTL) template content used for the template resource.

    ", + "TestInvokeAuthorizerRequest$restApiId": "

    Specifies a test invoke authorizer request's RestApi identifier.

    ", + "TestInvokeAuthorizerRequest$authorizerId": "

    Specifies a test invoke authorizer request's Authorizer ID.

    ", + "TestInvokeAuthorizerRequest$pathWithQueryString": "

    [Optional] The URI path, including query string, of the simulated invocation request. Use this to specify path parameters and query string parameters.

    ", + "TestInvokeAuthorizerRequest$body": "

    [Optional] The simulated request body of an incoming invocation request.

    ", + "TestInvokeAuthorizerResponse$log": "

    The Amazon API Gateway execution log for the test authorizer request.

    ", + "TestInvokeAuthorizerResponse$principalId": "

    The principal identity returned by the Authorizer

    ", + "TestInvokeAuthorizerResponse$policy": "

    The policy JSON document returned by the Authorizer

    ", + "TestInvokeMethodRequest$restApiId": "

    Specifies a test invoke method request's API identifier.

    ", + "TestInvokeMethodRequest$resourceId": "

    Specifies a test invoke method request's resource ID.

    ", + "TestInvokeMethodRequest$httpMethod": "

    Specifies a test invoke method request's HTTP method.

    ", + "TestInvokeMethodRequest$pathWithQueryString": "

    The URI path, including query string, of the simulated invocation request. Use this to specify path parameters and query string parameters.

    ", + "TestInvokeMethodRequest$body": "

    The simulated request body of an incoming invocation request.

    ", + "TestInvokeMethodRequest$clientCertificateId": "

    A ClientCertificate identifier to use in the test invocation. API Gateway will use use the certificate when making the HTTPS request to the defined backend endpoint.

    ", + "TestInvokeMethodResponse$body": "

    The body of HTTP response.

    ", + "TestInvokeMethodResponse$log": "

    The Amazon API Gateway execution log for the test invoke request.

    ", + "TooManyRequestsException$retryAfterSeconds": null, + "TooManyRequestsException$message": null, + "UnauthorizedException$message": null, + "UpdateApiKeyRequest$apiKey": "

    The identifier of the ApiKey resource to be updated.

    ", + "UpdateAuthorizerRequest$restApiId": "

    The RestApi identifier for the Authorizer resource.

    ", + "UpdateAuthorizerRequest$authorizerId": "

    The identifier of the Authorizer resource.

    ", + "UpdateBasePathMappingRequest$domainName": "

    The domain name of the BasePathMapping resource to change.

    ", + "UpdateBasePathMappingRequest$basePath": "

    The base path of the BasePathMapping resource to change.

    ", + "UpdateClientCertificateRequest$clientCertificateId": "

    The identifier of the ClientCertificate resource to be updated.

    ", + "UpdateDeploymentRequest$restApiId": "

    The replacement identifier of the RestApi resource for the Deployment resource to change information about.

    ", + "UpdateDeploymentRequest$deploymentId": "

    The replacment identifier for the Deployment resource to change information about.

    ", + "UpdateDomainNameRequest$domainName": "

    The name of the DomainName resource to be changed.

    ", + "UpdateIntegrationRequest$restApiId": "

    Represents an update integration request's API identifier.

    ", + "UpdateIntegrationRequest$resourceId": "

    Represents an update integration request's resource identifier.

    ", + "UpdateIntegrationRequest$httpMethod": "

    Represents an update integration request's HTTP method.

    ", + "UpdateIntegrationResponseRequest$restApiId": "

    Specifies an update integration response request's API identifier.

    ", + "UpdateIntegrationResponseRequest$resourceId": "

    Specifies an update integration response request's resource identifier.

    ", + "UpdateIntegrationResponseRequest$httpMethod": "

    Specifies an update integration response request's HTTP method.

    ", + "UpdateMethodRequest$restApiId": "

    The RestApi identifier for the Method resource.

    ", + "UpdateMethodRequest$resourceId": "

    The Resource identifier for the Method resource.

    ", + "UpdateMethodRequest$httpMethod": "

    The HTTP verb that identifies the Method resource.

    ", + "UpdateMethodResponseRequest$restApiId": "

    The RestApi identifier for the MethodResponse resource.

    ", + "UpdateMethodResponseRequest$resourceId": "

    The Resource identifier for the MethodResponse resource.

    ", + "UpdateMethodResponseRequest$httpMethod": "

    The HTTP verb identifier for the parent Method resource.

    ", + "UpdateModelRequest$restApiId": "

    The RestApi identifier under which the model exists.

    ", + "UpdateModelRequest$modelName": "

    The name of the model to update.

    ", + "UpdateResourceRequest$restApiId": "

    The RestApi identifier for the Resource resource.

    ", + "UpdateResourceRequest$resourceId": "

    The identifier of the Resource resource.

    ", + "UpdateRestApiRequest$restApiId": "

    The ID of the RestApi you want to update.

    ", + "UpdateStageRequest$restApiId": "

    The identifier of the RestApi resource for the Stage resource to change information about.

    ", + "UpdateStageRequest$stageName": "

    The name of the Stage resource to change information about.

    " + } + }, + "Template": { + "base": "

    Represents a mapping template used to transform a payload.

    ", + "refs": { + } + }, + "TestInvokeAuthorizerRequest": { + "base": "

    Make a request to simulate the execution of an Authorizer.

    ", + "refs": { + } + }, + "TestInvokeAuthorizerResponse": { + "base": "

    Represents the response of the test invoke request in for a custom Authorizer

    ", + "refs": { + } + }, + "TestInvokeMethodRequest": { + "base": "

    Make a request to simulate the execution of a Method.

    ", + "refs": { + } + }, + "TestInvokeMethodResponse": { + "base": "

    Represents the response of the test invoke request in HTTP method.

    ", + "refs": { + } + }, + "ThrottleSettings": { + "base": "

    Returns the throttle settings.

    ", + "refs": { + "Account$throttleSettings": "

    Specifies the application programming interface (API) throttle settings for the current Account resource.

    " + } + }, + "Timestamp": { + "base": null, + "refs": { + "ApiKey$createdDate": "

    The date when the API Key was created, in ISO 8601 format.

    ", + "ApiKey$lastUpdatedDate": "

    When the API Key was last updated, in ISO 8601 format.

    ", + "ClientCertificate$createdDate": "

    The date when the Client Certificate was created, in ISO 8601 format.

    ", + "ClientCertificate$expirationDate": "

    The date when the Client Certificate will expire, in ISO 8601 format.

    ", + "Deployment$createdDate": "

    The date and time that the deployment resource was created.

    ", + "DomainName$certificateUploadDate": "

    The date when the certificate was uploaded, in ISO 8601 format.

    ", + "RestApi$createdDate": "

    The date when the API was created, in ISO 8601 format.

    ", + "Stage$createdDate": "

    The date and time that the stage was created, in ISO 8601 format.

    ", + "Stage$lastUpdatedDate": "

    The date and time that information about the stage was last updated, in ISO 8601 format.

    " + } + }, + "TooManyRequestsException": { + "base": null, + "refs": { + } + }, + "UnauthorizedCacheControlHeaderStrategy": { + "base": null, + "refs": { + "MethodSetting$unauthorizedCacheControlHeaderStrategy": "

    Specifies the strategy on how to handle the unauthorized requests for cache invalidation. The PATCH path for this setting is /{method_setting_key}/caching/unauthorizedCacheControlHeaderStrategy, and the available values are FAIL_WITH_403, SUCCEED_WITH_RESPONSE_HEADER, SUCCEED_WITHOUT_RESPONSE_HEADER.

    " + } + }, + "UnauthorizedException": { + "base": null, + "refs": { + } + }, + "UpdateAccountRequest": { + "base": "

    Requests Amazon API Gateway to change information about the current Account resource.

    ", + "refs": { + } + }, + "UpdateApiKeyRequest": { + "base": "

    A request to change information about an ApiKey resource.

    ", + "refs": { + } + }, + "UpdateAuthorizerRequest": { + "base": "

    Request to update an existing Authorizer resource.

    ", + "refs": { + } + }, + "UpdateBasePathMappingRequest": { + "base": "

    A request to change information about the BasePathMapping resource.

    ", + "refs": { + } + }, + "UpdateClientCertificateRequest": { + "base": "

    A request to change information about an ClientCertificate resource.

    ", + "refs": { + } + }, + "UpdateDeploymentRequest": { + "base": "

    Requests Amazon API Gateway to change information about a Deployment resource.

    ", + "refs": { + } + }, + "UpdateDomainNameRequest": { + "base": "

    A request to change information about the DomainName resource.

    ", + "refs": { + } + }, + "UpdateIntegrationRequest": { + "base": "

    Represents an update integration request.

    ", + "refs": { + } + }, + "UpdateIntegrationResponseRequest": { + "base": "

    Represents an update integration response request.

    ", + "refs": { + } + }, + "UpdateMethodRequest": { + "base": "

    Request to update an existing Method resource.

    ", + "refs": { + } + }, + "UpdateMethodResponseRequest": { + "base": "

    A request to update an existing MethodResponse resource.

    ", + "refs": { + } + }, + "UpdateModelRequest": { + "base": "

    Request to update an existing model in an existing RestApi resource.

    ", + "refs": { + } + }, + "UpdateResourceRequest": { + "base": "

    Request to change information about a Resource resource.

    ", + "refs": { + } + }, + "UpdateRestApiRequest": { + "base": "

    Request to update an existing RestApi resource in your collection.

    ", + "refs": { + } + }, + "UpdateStageRequest": { + "base": "

    Requests Amazon API Gateway to change information about a Stage resource.

    ", + "refs": { + } + }, + "op": { + "base": null, + "refs": { + "PatchOperation$op": "

    A patch operation whose value indicates the operation to perform. Its value MUST be one of \"add\", \"remove\", \"replace\", \"move\", \"copy\", or \"test\"; other values are errors.

    " + } + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/apigateway/2015-07-09/examples-1.json b/vendor/github.com/aws/aws-sdk-go/models/apis/apigateway/2015-07-09/examples-1.json new file mode 100644 index 000000000..0ea7e3b0b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/apigateway/2015-07-09/examples-1.json @@ -0,0 +1,5 @@ +{ + "version": "1.0", + "examples": { + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/apigateway/2015-07-09/paginators-1.json b/vendor/github.com/aws/aws-sdk-go/models/apis/apigateway/2015-07-09/paginators-1.json new file mode 100644 index 000000000..4a79cfbd6 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/apigateway/2015-07-09/paginators-1.json @@ -0,0 +1,52 @@ +{ + "pagination": { + "GetApiKeys": { + "input_token": "position", + "output_token": "position", + "limit_key": "limit", + "result_key": "items" + }, + "GetBasePathMappings": { + "input_token": "position", + "output_token": "position", + "limit_key": "limit", + "result_key": "items" + }, + "GetClientCertificates": { + "input_token": "position", + "output_token": "position", + "limit_key": "limit", + "result_key": "items" + }, + "GetDeployments": { + "input_token": "position", + "output_token": "position", + "limit_key": "limit", + "result_key": "items" + }, + "GetDomainNames": { + "input_token": "position", + "output_token": "position", + "limit_key": "limit", + "result_key": "items" + }, + "GetModels": { + "input_token": "position", + "output_token": "position", + "limit_key": "limit", + "result_key": "items" + }, + "GetResources": { + "input_token": "position", + "output_token": "position", + "limit_key": "limit", + "result_key": "items" + }, + "GetRestApis": { + "input_token": "position", + "output_token": "position", + "limit_key": "limit", + "result_key": "items" + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/application-autoscaling/2016-02-06/api-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/application-autoscaling/2016-02-06/api-2.json new file mode 100644 index 000000000..696da88ec --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/application-autoscaling/2016-02-06/api-2.json @@ -0,0 +1,502 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2016-02-06", + "endpointPrefix":"autoscaling", + "jsonVersion":"1.1", + "protocol":"json", + "serviceFullName":"Application Auto Scaling", + "signatureVersion":"v4", + "signingName":"application-autoscaling", + "targetPrefix":"AnyScaleFrontendService" + }, + "operations":{ + "DeleteScalingPolicy":{ + "name":"DeleteScalingPolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteScalingPolicyRequest"}, + "output":{"shape":"DeleteScalingPolicyResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ObjectNotFoundException"}, + {"shape":"ConcurrentUpdateException"}, + {"shape":"InternalServiceException"} + ] + }, + "DeregisterScalableTarget":{ + "name":"DeregisterScalableTarget", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeregisterScalableTargetRequest"}, + "output":{"shape":"DeregisterScalableTargetResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ObjectNotFoundException"}, + {"shape":"ConcurrentUpdateException"}, + {"shape":"InternalServiceException"} + ] + }, + "DescribeScalableTargets":{ + "name":"DescribeScalableTargets", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeScalableTargetsRequest"}, + "output":{"shape":"DescribeScalableTargetsResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"InvalidNextTokenException"}, + {"shape":"ConcurrentUpdateException"}, + {"shape":"InternalServiceException"} + ] + }, + "DescribeScalingActivities":{ + "name":"DescribeScalingActivities", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeScalingActivitiesRequest"}, + "output":{"shape":"DescribeScalingActivitiesResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"InvalidNextTokenException"}, + {"shape":"ConcurrentUpdateException"}, + {"shape":"InternalServiceException"} + ] + }, + "DescribeScalingPolicies":{ + "name":"DescribeScalingPolicies", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeScalingPoliciesRequest"}, + "output":{"shape":"DescribeScalingPoliciesResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"FailedResourceAccessException"}, + {"shape":"InvalidNextTokenException"}, + {"shape":"ConcurrentUpdateException"}, + {"shape":"InternalServiceException"} + ] + }, + "PutScalingPolicy":{ + "name":"PutScalingPolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PutScalingPolicyRequest"}, + "output":{"shape":"PutScalingPolicyResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"LimitExceededException"}, + {"shape":"ObjectNotFoundException"}, + {"shape":"ConcurrentUpdateException"}, + {"shape":"InternalServiceException"} + ] + }, + "RegisterScalableTarget":{ + "name":"RegisterScalableTarget", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RegisterScalableTargetRequest"}, + "output":{"shape":"RegisterScalableTargetResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"LimitExceededException"}, + {"shape":"ConcurrentUpdateException"}, + {"shape":"InternalServiceException"} + ] + } + }, + "shapes":{ + "AdjustmentType":{ + "type":"string", + "enum":[ + "ChangeInCapacity", + "PercentChangeInCapacity", + "ExactCapacity" + ] + }, + "Alarm":{ + "type":"structure", + "required":[ + "AlarmName", + "AlarmARN" + ], + "members":{ + "AlarmName":{"shape":"ResourceId"}, + "AlarmARN":{"shape":"ResourceId"} + } + }, + "Alarms":{ + "type":"list", + "member":{"shape":"Alarm"} + }, + "ConcurrentUpdateException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "exception":true + }, + "Cooldown":{"type":"integer"}, + "DeleteScalingPolicyRequest":{ + "type":"structure", + "required":[ + "PolicyName", + "ServiceNamespace", + "ResourceId", + "ScalableDimension" + ], + "members":{ + "PolicyName":{"shape":"ResourceIdMaxLen1600"}, + "ServiceNamespace":{"shape":"ServiceNamespace"}, + "ResourceId":{"shape":"ResourceIdMaxLen1600"}, + "ScalableDimension":{"shape":"ScalableDimension"} + } + }, + "DeleteScalingPolicyResponse":{ + "type":"structure", + "members":{ + } + }, + "DeregisterScalableTargetRequest":{ + "type":"structure", + "required":[ + "ServiceNamespace", + "ResourceId", + "ScalableDimension" + ], + "members":{ + "ServiceNamespace":{"shape":"ServiceNamespace"}, + "ResourceId":{"shape":"ResourceIdMaxLen1600"}, + "ScalableDimension":{"shape":"ScalableDimension"} + } + }, + "DeregisterScalableTargetResponse":{ + "type":"structure", + "members":{ + } + }, + "DescribeScalableTargetsRequest":{ + "type":"structure", + "required":["ServiceNamespace"], + "members":{ + "ServiceNamespace":{"shape":"ServiceNamespace"}, + "ResourceIds":{"shape":"ResourceIdsMaxLen1600"}, + "ScalableDimension":{"shape":"ScalableDimension"}, + "MaxResults":{"shape":"MaxResults"}, + "NextToken":{"shape":"XmlString"} + } + }, + "DescribeScalableTargetsResponse":{ + "type":"structure", + "members":{ + "ScalableTargets":{"shape":"ScalableTargets"}, + "NextToken":{"shape":"XmlString"} + } + }, + "DescribeScalingActivitiesRequest":{ + "type":"structure", + "required":["ServiceNamespace"], + "members":{ + "ServiceNamespace":{"shape":"ServiceNamespace"}, + "ResourceId":{"shape":"ResourceIdMaxLen1600"}, + "ScalableDimension":{"shape":"ScalableDimension"}, + "MaxResults":{"shape":"MaxResults"}, + "NextToken":{"shape":"XmlString"} + } + }, + "DescribeScalingActivitiesResponse":{ + "type":"structure", + "members":{ + "ScalingActivities":{"shape":"ScalingActivities"}, + "NextToken":{"shape":"XmlString"} + } + }, + "DescribeScalingPoliciesRequest":{ + "type":"structure", + "required":["ServiceNamespace"], + "members":{ + "PolicyNames":{"shape":"ResourceIdsMaxLen1600"}, + "ServiceNamespace":{"shape":"ServiceNamespace"}, + "ResourceId":{"shape":"ResourceIdMaxLen1600"}, + "ScalableDimension":{"shape":"ScalableDimension"}, + "MaxResults":{"shape":"MaxResults"}, + "NextToken":{"shape":"XmlString"} + } + }, + "DescribeScalingPoliciesResponse":{ + "type":"structure", + "members":{ + "ScalingPolicies":{"shape":"ScalingPolicies"}, + "NextToken":{"shape":"XmlString"} + } + }, + "ErrorMessage":{"type":"string"}, + "FailedResourceAccessException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "exception":true + }, + "InternalServiceException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "exception":true + }, + "InvalidNextTokenException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "exception":true + }, + "LimitExceededException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "exception":true + }, + "MaxResults":{"type":"integer"}, + "MetricAggregationType":{ + "type":"string", + "enum":[ + "Average", + "Minimum", + "Maximum" + ] + }, + "MetricScale":{"type":"double"}, + "MinAdjustmentMagnitude":{"type":"integer"}, + "ObjectNotFoundException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "exception":true + }, + "PolicyName":{ + "type":"string", + "max":256, + "min":1, + "pattern":"\\p{Print}+" + }, + "PolicyType":{ + "type":"string", + "enum":["StepScaling"] + }, + "PutScalingPolicyRequest":{ + "type":"structure", + "required":[ + "PolicyName", + "ServiceNamespace", + "ResourceId", + "ScalableDimension" + ], + "members":{ + "PolicyName":{"shape":"PolicyName"}, + "ServiceNamespace":{"shape":"ServiceNamespace"}, + "ResourceId":{"shape":"ResourceIdMaxLen1600"}, + "ScalableDimension":{"shape":"ScalableDimension"}, + "PolicyType":{"shape":"PolicyType"}, + "StepScalingPolicyConfiguration":{"shape":"StepScalingPolicyConfiguration"} + } + }, + "PutScalingPolicyResponse":{ + "type":"structure", + "required":["PolicyARN"], + "members":{ + "PolicyARN":{"shape":"ResourceIdMaxLen1600"} + } + }, + "RegisterScalableTargetRequest":{ + "type":"structure", + "required":[ + "ServiceNamespace", + "ResourceId", + "ScalableDimension" + ], + "members":{ + "ServiceNamespace":{"shape":"ServiceNamespace"}, + "ResourceId":{"shape":"ResourceIdMaxLen1600"}, + "ScalableDimension":{"shape":"ScalableDimension"}, + "MinCapacity":{"shape":"ResourceCapacity"}, + "MaxCapacity":{"shape":"ResourceCapacity"}, + "RoleARN":{"shape":"ResourceIdMaxLen1600"} + } + }, + "RegisterScalableTargetResponse":{ + "type":"structure", + "members":{ + } + }, + "ResourceCapacity":{"type":"integer"}, + "ResourceId":{ + "type":"string", + "pattern":"[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\r\\n\\t]*" + }, + "ResourceIdMaxLen1600":{ + "type":"string", + "max":1600, + "min":1, + "pattern":"[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\r\\n\\t]*" + }, + "ResourceIdsMaxLen1600":{ + "type":"list", + "member":{"shape":"ResourceIdMaxLen1600"} + }, + "ScalableDimension":{ + "type":"string", + "enum":["ecs:service:DesiredCount"] + }, + "ScalableTarget":{ + "type":"structure", + "required":[ + "ServiceNamespace", + "ResourceId", + "ScalableDimension", + "MinCapacity", + "MaxCapacity", + "RoleARN", + "CreationTime" + ], + "members":{ + "ServiceNamespace":{"shape":"ServiceNamespace"}, + "ResourceId":{"shape":"ResourceIdMaxLen1600"}, + "ScalableDimension":{"shape":"ScalableDimension"}, + "MinCapacity":{"shape":"ResourceCapacity"}, + "MaxCapacity":{"shape":"ResourceCapacity"}, + "RoleARN":{"shape":"ResourceIdMaxLen1600"}, + "CreationTime":{"shape":"TimestampType"} + } + }, + "ScalableTargets":{ + "type":"list", + "member":{"shape":"ScalableTarget"} + }, + "ScalingActivities":{ + "type":"list", + "member":{"shape":"ScalingActivity"} + }, + "ScalingActivity":{ + "type":"structure", + "required":[ + "ActivityId", + "ServiceNamespace", + "ResourceId", + "ScalableDimension", + "Description", + "Cause", + "StartTime", + "StatusCode" + ], + "members":{ + "ActivityId":{"shape":"ResourceId"}, + "ServiceNamespace":{"shape":"ServiceNamespace"}, + "ResourceId":{"shape":"ResourceIdMaxLen1600"}, + "ScalableDimension":{"shape":"ScalableDimension"}, + "Description":{"shape":"XmlString"}, + "Cause":{"shape":"XmlString"}, + "StartTime":{"shape":"TimestampType"}, + "EndTime":{"shape":"TimestampType"}, + "StatusCode":{"shape":"ScalingActivityStatusCode"}, + "StatusMessage":{"shape":"XmlString"}, + "Details":{"shape":"XmlString"} + } + }, + "ScalingActivityStatusCode":{ + "type":"string", + "enum":[ + "Pending", + "InProgress", + "Successful", + "Overridden", + "Unfulfilled", + "Failed" + ] + }, + "ScalingAdjustment":{"type":"integer"}, + "ScalingPolicies":{ + "type":"list", + "member":{"shape":"ScalingPolicy"} + }, + "ScalingPolicy":{ + "type":"structure", + "required":[ + "PolicyARN", + "PolicyName", + "ServiceNamespace", + "ResourceId", + "ScalableDimension", + "PolicyType", + "CreationTime" + ], + "members":{ + "PolicyARN":{"shape":"ResourceIdMaxLen1600"}, + "PolicyName":{"shape":"PolicyName"}, + "ServiceNamespace":{"shape":"ServiceNamespace"}, + "ResourceId":{"shape":"ResourceIdMaxLen1600"}, + "ScalableDimension":{"shape":"ScalableDimension"}, + "PolicyType":{"shape":"PolicyType"}, + "StepScalingPolicyConfiguration":{"shape":"StepScalingPolicyConfiguration"}, + "Alarms":{"shape":"Alarms"}, + "CreationTime":{"shape":"TimestampType"} + } + }, + "ServiceNamespace":{ + "type":"string", + "enum":["ecs"] + }, + "StepAdjustment":{ + "type":"structure", + "required":["ScalingAdjustment"], + "members":{ + "MetricIntervalLowerBound":{"shape":"MetricScale"}, + "MetricIntervalUpperBound":{"shape":"MetricScale"}, + "ScalingAdjustment":{"shape":"ScalingAdjustment"} + } + }, + "StepAdjustments":{ + "type":"list", + "member":{"shape":"StepAdjustment"} + }, + "StepScalingPolicyConfiguration":{ + "type":"structure", + "members":{ + "AdjustmentType":{"shape":"AdjustmentType"}, + "StepAdjustments":{"shape":"StepAdjustments"}, + "MinAdjustmentMagnitude":{"shape":"MinAdjustmentMagnitude"}, + "Cooldown":{"shape":"Cooldown"}, + "MetricAggregationType":{"shape":"MetricAggregationType"} + } + }, + "TimestampType":{"type":"timestamp"}, + "ValidationException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "exception":true + }, + "XmlString":{ + "type":"string", + "pattern":"[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\r\\n\\t]*" + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/application-autoscaling/2016-02-06/docs-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/application-autoscaling/2016-02-06/docs-2.json new file mode 100644 index 000000000..35a299912 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/application-autoscaling/2016-02-06/docs-2.json @@ -0,0 +1,362 @@ +{ + "version": "2.0", + "service": "

    Application Auto Scaling is a general purpose Auto Scaling service for supported elastic AWS resources. With Application Auto Scaling, you can automatically scale your AWS resources, with an experience similar to that of Auto Scaling.

    At this time, Application Auto Scaling only supports scaling Amazon ECS services.

    For example, you can use Application Auto Scaling to accomplish the following tasks:

    • Define scaling policies for automatically adjusting your application’s resources

    • Scale your resources in response to CloudWatch alarms

    • View history of your scaling events

    Application Auto Scaling is available in the following regions:

    • us-east-1

    • us-west-2

    • eu-west-1

    ", + "operations": { + "DeleteScalingPolicy": "

    Deletes an Application Auto Scaling scaling policy that was previously created. If you are no longer using a scaling policy, you can delete it with this operation.

    Deleting a policy deletes the underlying alarm action, but does not delete the CloudWatch alarm, even if it no longer has an associated action.

    To create a new scaling policy or update an existing one, see PutScalingPolicy.

    ", + "DeregisterScalableTarget": "

    Deregisters a scalable target that was previously registered. If you are no longer using a scalable target, you can delete it with this operation. When you deregister a scalable target, all of the scaling policies that are associated with that scalable target are deleted.

    To create a new scalable target or update an existing one, see RegisterScalableTarget.

    ", + "DescribeScalableTargets": "

    Provides descriptive information for scalable targets with a specified service namespace.

    You can filter the results in a service namespace with the ResourceIds and ScalableDimension parameters.

    To create a new scalable target or update an existing one, see RegisterScalableTarget. If you are no longer using a scalable target, you can deregister it with DeregisterScalableTarget.

    ", + "DescribeScalingActivities": "

    Provides descriptive information for scaling activities with a specified service namespace.

    You can filter the results in a service namespace with the ResourceId and ScalableDimension parameters.

    Scaling activities are triggered by CloudWatch alarms that are associated with scaling policies. To view the existing scaling policies for a service namespace, see DescribeScalingPolicies. To create a new scaling policy or update an existing one, see PutScalingPolicy.

    ", + "DescribeScalingPolicies": "

    Provides descriptive information for scaling policies with a specified service namespace.

    You can filter the results in a service namespace with the ResourceId, ScalableDimension, and PolicyNames parameters.

    To create a new scaling policy or update an existing one, see PutScalingPolicy. If you are no longer using a scaling policy, you can delete it with DeleteScalingPolicy.

    ", + "PutScalingPolicy": "

    Creates or updates a policy for an existing Application Auto Scaling scalable target. Each scalable target is identified by service namespace, a resource ID, and a scalable dimension, and a scaling policy applies to a scalable target that is identified by those three attributes. You cannot create a scaling policy without first registering a scalable target with RegisterScalableTarget.

    To update an existing policy, use the existing policy name and set the parameters you want to change. Any existing parameter not changed in an update to an existing policy is not changed in this update request.

    You can view the existing scaling policies for a service namespace with DescribeScalingPolicies. If you are no longer using a scaling policy, you can delete it with DeleteScalingPolicy.

    ", + "RegisterScalableTarget": "

    Registers or updates a scalable target. A scalable target is a resource that can be scaled up or down with Application Auto Scaling. After you have registered a scalable target, you can use this command to update the minimum and maximum values for your scalable dimension.

    At this time, Application Auto Scaling only supports scaling Amazon ECS services.

    After you register a scalable target with Application Auto Scaling, you can create and apply scaling policies to it with PutScalingPolicy. You can view the existing scaling policies for a service namespace with DescribeScalableTargets. If you are no longer using a scalable target, you can deregister it with DeregisterScalableTarget.

    " + }, + "shapes": { + "AdjustmentType": { + "base": null, + "refs": { + "StepScalingPolicyConfiguration$AdjustmentType": "

    The adjustment type, which specifies how the ScalingAdjustment parameter in a StepAdjustment is interpreted.

    " + } + }, + "Alarm": { + "base": "

    An object representing a CloudWatch alarm associated with a scaling policy.

    ", + "refs": { + "Alarms$member": null + } + }, + "Alarms": { + "base": null, + "refs": { + "ScalingPolicy$Alarms": "

    The CloudWatch alarms that are associated with the scaling policy.

    " + } + }, + "ConcurrentUpdateException": { + "base": "

    Concurrent updates caused an exception, for example, if you request an update to an Application Auto Scaling resource that already has a pending update.

    ", + "refs": { + } + }, + "Cooldown": { + "base": null, + "refs": { + "StepScalingPolicyConfiguration$Cooldown": "

    The amount of time, in seconds, after a scaling activity completes where previous trigger-related scaling activities can influence future scaling events.

    For scale out policies, while Cooldown is in effect, the capacity that has been added by the previous scale out event that initiated the Cooldown is calculated as part of the desired capacity for the next scale out. The intention is to continuously (but not excessively) scale out. For example, an alarm triggers a step scaling policy to scale out an Amazon ECS service by 2 tasks, the scaling activity completes successfully, and a Cooldown period of 5 minutes starts. During the Cooldown period, if the alarm triggers the same policy again but at a more aggressive step adjustment to scale out the service by 3 tasks, the 2 tasks that were added in the previous scale out event are considered part of that capacity and only 1 additional task is added to the desired count.

    For scale in policies, the Cooldown period is used to block subsequent scale in requests until it has expired. The intention is to scale in conservatively to protect your application's availability. However, if another alarm triggers a scale out policy during the Cooldown period after a scale-in, Application Auto Scaling scales out your scalable target immediately.

    " + } + }, + "DeleteScalingPolicyRequest": { + "base": null, + "refs": { + } + }, + "DeleteScalingPolicyResponse": { + "base": null, + "refs": { + } + }, + "DeregisterScalableTargetRequest": { + "base": null, + "refs": { + } + }, + "DeregisterScalableTargetResponse": { + "base": null, + "refs": { + } + }, + "DescribeScalableTargetsRequest": { + "base": null, + "refs": { + } + }, + "DescribeScalableTargetsResponse": { + "base": null, + "refs": { + } + }, + "DescribeScalingActivitiesRequest": { + "base": null, + "refs": { + } + }, + "DescribeScalingActivitiesResponse": { + "base": null, + "refs": { + } + }, + "DescribeScalingPoliciesRequest": { + "base": null, + "refs": { + } + }, + "DescribeScalingPoliciesResponse": { + "base": null, + "refs": { + } + }, + "ErrorMessage": { + "base": null, + "refs": { + "ConcurrentUpdateException$Message": null, + "FailedResourceAccessException$Message": null, + "InternalServiceException$Message": null, + "InvalidNextTokenException$Message": null, + "LimitExceededException$Message": null, + "ObjectNotFoundException$Message": null, + "ValidationException$Message": null + } + }, + "FailedResourceAccessException": { + "base": "

    Failed access to resources caused an exception. This exception currently only applies to DescribeScalingPolicies. It is thrown when Application Auto Scaling is unable to retrieve the alarms associated with a scaling policy due to a client error, for example, if the role ARN specified for a scalable target does not have the proper permissions to call the CloudWatch DescribeAlarms API operation on behalf of your account.

    ", + "refs": { + } + }, + "InternalServiceException": { + "base": "

    The service encountered an internal error.

    ", + "refs": { + } + }, + "InvalidNextTokenException": { + "base": "

    The next token supplied was invalid.

    ", + "refs": { + } + }, + "LimitExceededException": { + "base": "

    Your account exceeded a limit. This exception is thrown when a per-account resource limit is exceeded. Application Auto Scaling has a limit of 40 scalable targets per account for Amazon ECS services, 50 scaling policies per scalable target, and 20 step adjustments per step scaling policy.

    ", + "refs": { + } + }, + "MaxResults": { + "base": null, + "refs": { + "DescribeScalableTargetsRequest$MaxResults": "

    The maximum number of scalable target results returned by DescribeScalableTargets in paginated output. When this parameter is used, DescribeScalableTargets returns up to MaxResults results in a single page along with a NextToken response element. The remaining results of the initial request can be seen by sending another DescribeScalableTargets request with the returned NextToken value. This value can be between 1 and 50. If this parameter is not used, then DescribeScalableTargets returns up to 50 results and a NextToken value, if applicable.

    ", + "DescribeScalingActivitiesRequest$MaxResults": "

    The maximum number of scaling activity results returned by DescribeScalingActivities in paginated output. When this parameter is used, DescribeScalingActivities returns up to MaxResults results in a single page along with a NextToken response element. The remaining results of the initial request can be seen by sending another DescribeScalingActivities request with the returned NextToken value. This value can be between 1 and 50. If this parameter is not used, then DescribeScalingActivities returns up to 50 results and a NextToken value, if applicable.

    ", + "DescribeScalingPoliciesRequest$MaxResults": "

    The maximum number of scaling policy results returned by DescribeScalingPolicies in paginated output. When this parameter is used, DescribeScalingPolicies returns up to MaxResults results in a single page along with a NextToken response element. The remaining results of the initial request can be seen by sending another DescribeScalingPolicies request with the returned NextToken value. This value can be between 1 and 50. If this parameter is not used, then DescribeScalingPolicies returns up to 50 results and a NextToken value, if applicable.

    " + } + }, + "MetricAggregationType": { + "base": null, + "refs": { + "StepScalingPolicyConfiguration$MetricAggregationType": "

    The aggregation type for the CloudWatch metrics. Valid values are Minimum, Maximum, and Average.

    " + } + }, + "MetricScale": { + "base": null, + "refs": { + "StepAdjustment$MetricIntervalLowerBound": "

    The lower bound for the difference between the alarm threshold and the CloudWatch metric. If the metric value is above the breach threshold, the lower bound is inclusive (the metric must be greater than or equal to the threshold plus the lower bound). Otherwise, it is exclusive (the metric must be greater than the threshold plus the lower bound). A null value indicates negative infinity.

    ", + "StepAdjustment$MetricIntervalUpperBound": "

    The upper bound for the difference between the alarm threshold and the CloudWatch metric. If the metric value is above the breach threshold, the upper bound is exclusive (the metric must be less than the threshold plus the upper bound). Otherwise, it is inclusive (the metric must be less than or equal to the threshold plus the upper bound). A null value indicates positive infinity.

    The upper bound must be greater than the lower bound.

    " + } + }, + "MinAdjustmentMagnitude": { + "base": null, + "refs": { + "StepScalingPolicyConfiguration$MinAdjustmentMagnitude": "

    The minimum number to adjust your scalable dimension as a result of a scaling activity. If the adjustment type is PercentChangeInCapacity, the scaling policy changes the scalable dimension of the scalable target by this amount.

    " + } + }, + "ObjectNotFoundException": { + "base": "

    The specified object could not be found. For any Put or Register API operation, which depends on the existence of a scalable target, this exception is thrown if the scalable target with the specified service namespace, resource ID, and scalable dimension does not exist. For any Delete or Deregister API operation, this exception is thrown if the resource that is to be deleted or deregistered cannot be found.

    ", + "refs": { + } + }, + "PolicyName": { + "base": null, + "refs": { + "PutScalingPolicyRequest$PolicyName": "

    The name of the scaling policy.

    ", + "ScalingPolicy$PolicyName": "

    The name of the scaling policy.

    " + } + }, + "PolicyType": { + "base": null, + "refs": { + "PutScalingPolicyRequest$PolicyType": "

    The policy type. This parameter is required if you are creating a new policy.

    ", + "ScalingPolicy$PolicyType": "

    The scaling policy type.

    " + } + }, + "PutScalingPolicyRequest": { + "base": null, + "refs": { + } + }, + "PutScalingPolicyResponse": { + "base": null, + "refs": { + } + }, + "RegisterScalableTargetRequest": { + "base": null, + "refs": { + } + }, + "RegisterScalableTargetResponse": { + "base": null, + "refs": { + } + }, + "ResourceCapacity": { + "base": null, + "refs": { + "RegisterScalableTargetRequest$MinCapacity": "

    The minimum value for this scalable target to scale in to in response to scaling activities. This parameter is required if you are registering a new scalable target, and it is optional if you are updating an existing one.

    ", + "RegisterScalableTargetRequest$MaxCapacity": "

    The maximum value for this scalable target to scale out to in response to scaling activities. This parameter is required if you are registering a new scalable target, and it is optional if you are updating an existing one.

    ", + "ScalableTarget$MinCapacity": "

    The minimum value for this scalable target to scale in to in response to scaling activities.

    ", + "ScalableTarget$MaxCapacity": "

    The maximum value for this scalable target to scale out to in response to scaling activities.

    " + } + }, + "ResourceId": { + "base": null, + "refs": { + "Alarm$AlarmName": "

    The name of the alarm.

    ", + "Alarm$AlarmARN": "

    The Amazon Resource Name (ARN) of the alarm.

    ", + "ScalingActivity$ActivityId": "

    The unique identifier string for the scaling activity.

    " + } + }, + "ResourceIdMaxLen1600": { + "base": null, + "refs": { + "DeleteScalingPolicyRequest$PolicyName": "

    The name of the scaling policy to delete.

    ", + "DeleteScalingPolicyRequest$ResourceId": "

    The unique identifier string for the resource associated with the scaling policy. For Amazon ECS services, this value is the resource type, followed by the cluster name and service name, such as service/default/sample-webapp.

    ", + "DeregisterScalableTargetRequest$ResourceId": "

    The unique identifier string for the resource associated with the scalable target. For Amazon ECS services, this value is the resource type, followed by the cluster name and service name, such as service/default/sample-webapp.

    ", + "DescribeScalingActivitiesRequest$ResourceId": "

    The unique identifier string for the resource associated with the scaling activity. For Amazon ECS services, this value is the resource type, followed by the cluster name and service name, such as service/default/sample-webapp. If you specify a scalable dimension, you must also specify a resource ID.

    ", + "DescribeScalingPoliciesRequest$ResourceId": "

    The unique resource identifier string of the scalable target that the scaling policy is associated with. For Amazon ECS services, this value is the resource type, followed by the cluster name and service name, such as service/default/sample-webapp. If you specify a scalable dimension, you must also specify a resource ID.

    ", + "PutScalingPolicyRequest$ResourceId": "

    The unique resource identifier string for the scalable target that this scaling policy applies to. For Amazon ECS services, this value is the resource type, followed by the cluster name and service name, such as service/default/sample-webapp.

    ", + "PutScalingPolicyResponse$PolicyARN": "

    The Amazon Resource Name (ARN) of the resulting scaling policy.

    ", + "RegisterScalableTargetRequest$ResourceId": "

    The unique identifier string for the resource to associate with the scalable target. For Amazon ECS services, this value is the resource type, followed by the cluster name and service name, such as service/default/sample-webapp.

    ", + "RegisterScalableTargetRequest$RoleARN": "

    The ARN of the IAM role that allows Application Auto Scaling to modify your scalable target on your behalf. This parameter is required if you are registering a new scalable target, and it is optional if you are updating an existing one.

    ", + "ResourceIdsMaxLen1600$member": null, + "ScalableTarget$ResourceId": "

    The unique identifier string for the resource associated with the scalable target. For Amazon ECS services, this value is the resource type, followed by the cluster name and service name, such as service/default/sample-webapp.

    ", + "ScalableTarget$RoleARN": "

    The ARN of the IAM role that allows Application Auto Scaling to modify your scalable target on your behalf.

    ", + "ScalingActivity$ResourceId": "

    The unique identifier string for the resource associated with the scaling activity. For Amazon ECS services, this value is the resource type, followed by the cluster name and service name, such as service/default/sample-webapp.

    ", + "ScalingPolicy$PolicyARN": "

    The Amazon Resource Name (ARN) of the scaling policy.

    ", + "ScalingPolicy$ResourceId": "

    The unique identifier string for the resource associated with the scaling policy. For Amazon ECS services, this value is the resource type, followed by the cluster name and service name, such as service/default/sample-webapp.

    " + } + }, + "ResourceIdsMaxLen1600": { + "base": null, + "refs": { + "DescribeScalableTargetsRequest$ResourceIds": "

    The unique identifier string for the resource associated with the scalable target. For Amazon ECS services, this value is the resource type, followed by the cluster name and service name, such as service/default/sample-webapp. If you specify a scalable dimension, you must also specify a resource ID.

    ", + "DescribeScalingPoliciesRequest$PolicyNames": "

    The names of the scaling policies to describe.

    " + } + }, + "ScalableDimension": { + "base": null, + "refs": { + "DeleteScalingPolicyRequest$ScalableDimension": "

    The scalable dimension associated with the scaling policy. The scalable dimension contains the service namespace, resource type, and scaling property, such as ecs:service:DesiredCount for the desired task count of an Amazon ECS service.

    ", + "DeregisterScalableTargetRequest$ScalableDimension": "

    The scalable dimension associated with the scalable target. The scalable dimension contains the service namespace, resource type, and scaling property, such as ecs:service:DesiredCount for the desired task count of an Amazon ECS service.

    ", + "DescribeScalableTargetsRequest$ScalableDimension": "

    The scalable dimension associated with the scalable target. The scalable dimension contains the service namespace, resource type, and scaling property, such as ecs:service:DesiredCount for the desired task count of an Amazon ECS service. If you specify a scalable dimension, you must also specify a resource ID.

    ", + "DescribeScalingActivitiesRequest$ScalableDimension": "

    The scalable dimension associated with the scaling activity. The scalable dimension contains the service namespace, resource type, and scaling property, such as ecs:service:DesiredCount for the desired task count of an Amazon ECS service. If you specify a scalable dimension, you must also specify a resource ID.

    ", + "DescribeScalingPoliciesRequest$ScalableDimension": "

    The scalable dimension of the scalable target that the scaling policy is associated with. The scalable dimension contains the service namespace, resource type, and scaling property, such as ecs:service:DesiredCount for the desired task count of an Amazon ECS service. If you specify a scalable dimension, you must also specify a resource ID.

    ", + "PutScalingPolicyRequest$ScalableDimension": "

    The scalable dimension of the scalable target that this scaling policy applies to. The scalable dimension contains the service namespace, resource type, and scaling property, such as ecs:service:DesiredCount for the desired task count of an Amazon ECS service.

    ", + "RegisterScalableTargetRequest$ScalableDimension": "

    The scalable dimension associated with the scalable target. The scalable dimension contains the service namespace, resource type, and scaling property, such as ecs:service:DesiredCount for the desired task count of an Amazon ECS service.

    ", + "ScalableTarget$ScalableDimension": "

    The scalable dimension associated with the scalable target. The scalable dimension contains the service namespace, resource type, and scaling property, such as ecs:service:DesiredCount for the desired task count of an Amazon ECS service.

    ", + "ScalingActivity$ScalableDimension": "

    The scalable dimension associated with the scaling activity. The scalable dimension contains the service namespace, resource type, and scaling property, such as ecs:service:DesiredCount for the desired task count of an Amazon ECS service.

    ", + "ScalingPolicy$ScalableDimension": "

    The scalable dimension associated with the scaling policy. The scalable dimension contains the service namespace, resource type, and scaling property, such as ecs:service:DesiredCount for the desired task count of an Amazon ECS service.

    " + } + }, + "ScalableTarget": { + "base": "

    An object representing a scalable target.

    ", + "refs": { + "ScalableTargets$member": null + } + }, + "ScalableTargets": { + "base": null, + "refs": { + "DescribeScalableTargetsResponse$ScalableTargets": "

    The list of scalable targets that matches the request parameters.

    " + } + }, + "ScalingActivities": { + "base": null, + "refs": { + "DescribeScalingActivitiesResponse$ScalingActivities": "

    A list of scaling activity objects.

    " + } + }, + "ScalingActivity": { + "base": "

    An object representing a scaling activity.

    ", + "refs": { + "ScalingActivities$member": null + } + }, + "ScalingActivityStatusCode": { + "base": null, + "refs": { + "ScalingActivity$StatusCode": "

    Indicates the status of the scaling activity.

    " + } + }, + "ScalingAdjustment": { + "base": null, + "refs": { + "StepAdjustment$ScalingAdjustment": "

    The amount by which to scale, based on the specified adjustment type. A positive value adds to the current scalable dimension while a negative number removes from the current scalable dimension.

    " + } + }, + "ScalingPolicies": { + "base": null, + "refs": { + "DescribeScalingPoliciesResponse$ScalingPolicies": "

    A list of scaling policy objects.

    " + } + }, + "ScalingPolicy": { + "base": "

    An object representing a scaling policy.

    ", + "refs": { + "ScalingPolicies$member": null + } + }, + "ServiceNamespace": { + "base": null, + "refs": { + "DeleteScalingPolicyRequest$ServiceNamespace": "

    The namespace for the AWS service that the scaling policy is associated with. For more information, see AWS Service Namespaces in the Amazon Web Services General Reference.

    ", + "DeregisterScalableTargetRequest$ServiceNamespace": "

    The namespace for the AWS service that the scalable target is associated with. For more information, see AWS Service Namespaces in the Amazon Web Services General Reference.

    ", + "DescribeScalableTargetsRequest$ServiceNamespace": "

    The namespace for the AWS service that the scalable target is associated with. For more information, see AWS Service Namespaces in the Amazon Web Services General Reference.

    ", + "DescribeScalingActivitiesRequest$ServiceNamespace": "

    The namespace for the AWS service that the scaling activity is associated with. For more information, see AWS Service Namespaces in the Amazon Web Services General Reference.

    ", + "DescribeScalingPoliciesRequest$ServiceNamespace": "

    The AWS service namespace of the scalable target that the scaling policy is associated with. For more information, see AWS Service Namespaces in the Amazon Web Services General Reference.

    ", + "PutScalingPolicyRequest$ServiceNamespace": "

    The AWS service namespace of the scalable target that this scaling policy applies to. For more information, see AWS Service Namespaces in the Amazon Web Services General Reference.

    ", + "RegisterScalableTargetRequest$ServiceNamespace": "

    The namespace for the AWS service that the scalable target is associated with. For Amazon ECS services, the namespace value is ecs. For more information, see AWS Service Namespaces in the Amazon Web Services General Reference.

    ", + "ScalableTarget$ServiceNamespace": "

    The namespace for the AWS service that the scalable target is associated with. For more information, see AWS Service Namespaces in the Amazon Web Services General Reference.

    ", + "ScalingActivity$ServiceNamespace": "

    The namespace for the AWS service that the scaling activity is associated with. For more information, see AWS Service Namespaces in the Amazon Web Services General Reference.

    ", + "ScalingPolicy$ServiceNamespace": "

    The namespace for the AWS service that the scaling policy is associated with. For more information, see AWS Service Namespaces in the Amazon Web Services General Reference.

    " + } + }, + "StepAdjustment": { + "base": "

    An object representing a step adjustment for a StepScalingPolicyConfiguration. Describes an adjustment based on the difference between the value of the aggregated CloudWatch metric and the breach threshold that you've defined for the alarm.

    For the following examples, suppose that you have an alarm with a breach threshold of 50:

    • If you want the adjustment to be triggered when the metric is greater than or equal to 50 and less than 60, specify a lower bound of 0 and an upper bound of 10.

    • If you want the adjustment to be triggered when the metric is greater than 40 and less than or equal to 50, specify a lower bound of -10 and an upper bound of 0.

    There are a few rules for the step adjustments for your step policy:

    • The ranges of your step adjustments can't overlap or have a gap.

    • At most one step adjustment can have a null lower bound. If one step adjustment has a negative lower bound, then there must be a step adjustment with a null lower bound.

    • At most one step adjustment can have a null upper bound. If one step adjustment has a positive upper bound, then there must be a step adjustment with a null upper bound.

    • The upper and lower bound can't be null in the same step adjustment.

    ", + "refs": { + "StepAdjustments$member": null + } + }, + "StepAdjustments": { + "base": null, + "refs": { + "StepScalingPolicyConfiguration$StepAdjustments": "

    A set of adjustments that enable you to scale based on the size of the alarm breach.

    " + } + }, + "StepScalingPolicyConfiguration": { + "base": "

    An object representing a step scaling policy configuration.

    ", + "refs": { + "PutScalingPolicyRequest$StepScalingPolicyConfiguration": "

    The configuration for the step scaling policy. This parameter is required if you are creating a new policy. For more information, see StepScalingPolicyConfiguration and StepAdjustment.

    ", + "ScalingPolicy$StepScalingPolicyConfiguration": "

    The configuration for the step scaling policy.

    " + } + }, + "TimestampType": { + "base": null, + "refs": { + "ScalableTarget$CreationTime": "

    The Unix timestamp for when the scalable target was created.

    ", + "ScalingActivity$StartTime": "

    The Unix timestamp for when the scaling activity began.

    ", + "ScalingActivity$EndTime": "

    The Unix timestamp for when the scaling activity ended.

    ", + "ScalingPolicy$CreationTime": "

    The Unix timestamp for when the scaling policy was created.

    " + } + }, + "ValidationException": { + "base": "

    An exception was thrown for a validation issue. Review the available parameters for the API request.

    ", + "refs": { + } + }, + "XmlString": { + "base": null, + "refs": { + "DescribeScalableTargetsRequest$NextToken": "

    The NextToken value returned from a previous paginated DescribeScalableTargets request. Pagination continues from the end of the previous results that returned the NextToken value. This value is null when there are no more results to return.

    ", + "DescribeScalableTargetsResponse$NextToken": "

    The NextToken value to include in a future DescribeScalableTargets request. When the results of a DescribeScalableTargets request exceed MaxResults, this value can be used to retrieve the next page of results. This value is null when there are no more results to return.

    ", + "DescribeScalingActivitiesRequest$NextToken": "

    The NextToken value returned from a previous paginated DescribeScalingActivities request. Pagination continues from the end of the previous results that returned the NextToken value. This value is null when there are no more results to return.

    ", + "DescribeScalingActivitiesResponse$NextToken": "

    The NextToken value to include in a future DescribeScalingActivities request. When the results of a DescribeScalingActivities request exceed MaxResults, this value can be used to retrieve the next page of results. This value is null when there are no more results to return.

    ", + "DescribeScalingPoliciesRequest$NextToken": "

    The NextToken value returned from a previous paginated DescribeScalingPolicies request. Pagination continues from the end of the previous results that returned the NextToken value. This value is null when there are no more results to return.

    ", + "DescribeScalingPoliciesResponse$NextToken": "

    The NextToken value to include in a future DescribeScalingPolicies request. When the results of a DescribeScalingPolicies request exceed MaxResults, this value can be used to retrieve the next page of results. This value is null when there are no more results to return.

    ", + "ScalingActivity$Description": "

    A simple description of what action the scaling activity intends to accomplish.

    ", + "ScalingActivity$Cause": "

    A simple description of what caused the scaling activity to happen.

    ", + "ScalingActivity$StatusMessage": "

    A simple message about the current status of the scaling activity.

    ", + "ScalingActivity$Details": "

    The details about the scaling activity.

    " + } + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/application-autoscaling/2016-02-06/examples-1.json b/vendor/github.com/aws/aws-sdk-go/models/apis/application-autoscaling/2016-02-06/examples-1.json new file mode 100644 index 000000000..0ea7e3b0b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/application-autoscaling/2016-02-06/examples-1.json @@ -0,0 +1,5 @@ +{ + "version": "1.0", + "examples": { + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/application-autoscaling/2016-02-06/paginators-1.json b/vendor/github.com/aws/aws-sdk-go/models/apis/application-autoscaling/2016-02-06/paginators-1.json new file mode 100644 index 000000000..97ae6002f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/application-autoscaling/2016-02-06/paginators-1.json @@ -0,0 +1,22 @@ +{ + "pagination": { + "DescribeScalableTargets": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "ScalableTargets" + }, + "DescribeScalingPolicies": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "ScalingPolicies" + }, + "DescribeScalingActivities": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "ScalingActivities" + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/autoscaling/2011-01-01/api-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/autoscaling/2011-01-01/api-2.json new file mode 100644 index 000000000..378eca7c9 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/autoscaling/2011-01-01/api-2.json @@ -0,0 +1,2038 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2011-01-01", + "endpointPrefix":"autoscaling", + "protocol":"query", + "serviceFullName":"Auto Scaling", + "signatureVersion":"v4", + "xmlNamespace":"http://autoscaling.amazonaws.com/doc/2011-01-01/" + }, + "operations":{ + "AttachInstances":{ + "name":"AttachInstances", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AttachInstancesQuery"}, + "errors":[ + {"shape":"ResourceContentionFault"} + ] + }, + "AttachLoadBalancers":{ + "name":"AttachLoadBalancers", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AttachLoadBalancersType"}, + "output":{ + "shape":"AttachLoadBalancersResultType", + "resultWrapper":"AttachLoadBalancersResult" + }, + "errors":[ + {"shape":"ResourceContentionFault"} + ] + }, + "CompleteLifecycleAction":{ + "name":"CompleteLifecycleAction", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CompleteLifecycleActionType"}, + "output":{ + "shape":"CompleteLifecycleActionAnswer", + "resultWrapper":"CompleteLifecycleActionResult" + }, + "errors":[ + {"shape":"ResourceContentionFault"} + ] + }, + "CreateAutoScalingGroup":{ + "name":"CreateAutoScalingGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateAutoScalingGroupType"}, + "errors":[ + {"shape":"AlreadyExistsFault"}, + {"shape":"LimitExceededFault"}, + {"shape":"ResourceContentionFault"} + ] + }, + "CreateLaunchConfiguration":{ + "name":"CreateLaunchConfiguration", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateLaunchConfigurationType"}, + "errors":[ + {"shape":"AlreadyExistsFault"}, + {"shape":"LimitExceededFault"}, + {"shape":"ResourceContentionFault"} + ] + }, + "CreateOrUpdateTags":{ + "name":"CreateOrUpdateTags", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateOrUpdateTagsType"}, + "errors":[ + {"shape":"LimitExceededFault"}, + {"shape":"AlreadyExistsFault"}, + {"shape":"ResourceContentionFault"} + ] + }, + "DeleteAutoScalingGroup":{ + "name":"DeleteAutoScalingGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteAutoScalingGroupType"}, + "errors":[ + {"shape":"ScalingActivityInProgressFault"}, + {"shape":"ResourceInUseFault"}, + {"shape":"ResourceContentionFault"} + ] + }, + "DeleteLaunchConfiguration":{ + "name":"DeleteLaunchConfiguration", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"LaunchConfigurationNameType"}, + "errors":[ + {"shape":"ResourceInUseFault"}, + {"shape":"ResourceContentionFault"} + ] + }, + "DeleteLifecycleHook":{ + "name":"DeleteLifecycleHook", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteLifecycleHookType"}, + "output":{ + "shape":"DeleteLifecycleHookAnswer", + "resultWrapper":"DeleteLifecycleHookResult" + }, + "errors":[ + {"shape":"ResourceContentionFault"} + ] + }, + "DeleteNotificationConfiguration":{ + "name":"DeleteNotificationConfiguration", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteNotificationConfigurationType"}, + "errors":[ + {"shape":"ResourceContentionFault"} + ] + }, + "DeletePolicy":{ + "name":"DeletePolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeletePolicyType"}, + "errors":[ + {"shape":"ResourceContentionFault"} + ] + }, + "DeleteScheduledAction":{ + "name":"DeleteScheduledAction", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteScheduledActionType"}, + "errors":[ + {"shape":"ResourceContentionFault"} + ] + }, + "DeleteTags":{ + "name":"DeleteTags", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteTagsType"}, + "errors":[ + {"shape":"ResourceContentionFault"} + ] + }, + "DescribeAccountLimits":{ + "name":"DescribeAccountLimits", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "output":{ + "shape":"DescribeAccountLimitsAnswer", + "resultWrapper":"DescribeAccountLimitsResult" + }, + "errors":[ + {"shape":"ResourceContentionFault"} + ] + }, + "DescribeAdjustmentTypes":{ + "name":"DescribeAdjustmentTypes", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "output":{ + "shape":"DescribeAdjustmentTypesAnswer", + "resultWrapper":"DescribeAdjustmentTypesResult" + }, + "errors":[ + {"shape":"ResourceContentionFault"} + ] + }, + "DescribeAutoScalingGroups":{ + "name":"DescribeAutoScalingGroups", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AutoScalingGroupNamesType"}, + "output":{ + "shape":"AutoScalingGroupsType", + "resultWrapper":"DescribeAutoScalingGroupsResult" + }, + "errors":[ + {"shape":"InvalidNextToken"}, + {"shape":"ResourceContentionFault"} + ] + }, + "DescribeAutoScalingInstances":{ + "name":"DescribeAutoScalingInstances", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeAutoScalingInstancesType"}, + "output":{ + "shape":"AutoScalingInstancesType", + "resultWrapper":"DescribeAutoScalingInstancesResult" + }, + "errors":[ + {"shape":"InvalidNextToken"}, + {"shape":"ResourceContentionFault"} + ] + }, + "DescribeAutoScalingNotificationTypes":{ + "name":"DescribeAutoScalingNotificationTypes", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "output":{ + "shape":"DescribeAutoScalingNotificationTypesAnswer", + "resultWrapper":"DescribeAutoScalingNotificationTypesResult" + }, + "errors":[ + {"shape":"ResourceContentionFault"} + ] + }, + "DescribeLaunchConfigurations":{ + "name":"DescribeLaunchConfigurations", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"LaunchConfigurationNamesType"}, + "output":{ + "shape":"LaunchConfigurationsType", + "resultWrapper":"DescribeLaunchConfigurationsResult" + }, + "errors":[ + {"shape":"InvalidNextToken"}, + {"shape":"ResourceContentionFault"} + ] + }, + "DescribeLifecycleHookTypes":{ + "name":"DescribeLifecycleHookTypes", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "output":{ + "shape":"DescribeLifecycleHookTypesAnswer", + "resultWrapper":"DescribeLifecycleHookTypesResult" + }, + "errors":[ + {"shape":"ResourceContentionFault"} + ] + }, + "DescribeLifecycleHooks":{ + "name":"DescribeLifecycleHooks", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeLifecycleHooksType"}, + "output":{ + "shape":"DescribeLifecycleHooksAnswer", + "resultWrapper":"DescribeLifecycleHooksResult" + }, + "errors":[ + {"shape":"ResourceContentionFault"} + ] + }, + "DescribeLoadBalancers":{ + "name":"DescribeLoadBalancers", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeLoadBalancersRequest"}, + "output":{ + "shape":"DescribeLoadBalancersResponse", + "resultWrapper":"DescribeLoadBalancersResult" + }, + "errors":[ + {"shape":"ResourceContentionFault"} + ] + }, + "DescribeMetricCollectionTypes":{ + "name":"DescribeMetricCollectionTypes", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "output":{ + "shape":"DescribeMetricCollectionTypesAnswer", + "resultWrapper":"DescribeMetricCollectionTypesResult" + }, + "errors":[ + {"shape":"ResourceContentionFault"} + ] + }, + "DescribeNotificationConfigurations":{ + "name":"DescribeNotificationConfigurations", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeNotificationConfigurationsType"}, + "output":{ + "shape":"DescribeNotificationConfigurationsAnswer", + "resultWrapper":"DescribeNotificationConfigurationsResult" + }, + "errors":[ + {"shape":"InvalidNextToken"}, + {"shape":"ResourceContentionFault"} + ] + }, + "DescribePolicies":{ + "name":"DescribePolicies", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribePoliciesType"}, + "output":{ + "shape":"PoliciesType", + "resultWrapper":"DescribePoliciesResult" + }, + "errors":[ + {"shape":"InvalidNextToken"}, + {"shape":"ResourceContentionFault"} + ] + }, + "DescribeScalingActivities":{ + "name":"DescribeScalingActivities", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeScalingActivitiesType"}, + "output":{ + "shape":"ActivitiesType", + "resultWrapper":"DescribeScalingActivitiesResult" + }, + "errors":[ + {"shape":"InvalidNextToken"}, + {"shape":"ResourceContentionFault"} + ] + }, + "DescribeScalingProcessTypes":{ + "name":"DescribeScalingProcessTypes", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "output":{ + "shape":"ProcessesType", + "resultWrapper":"DescribeScalingProcessTypesResult" + }, + "errors":[ + {"shape":"ResourceContentionFault"} + ] + }, + "DescribeScheduledActions":{ + "name":"DescribeScheduledActions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeScheduledActionsType"}, + "output":{ + "shape":"ScheduledActionsType", + "resultWrapper":"DescribeScheduledActionsResult" + }, + "errors":[ + {"shape":"InvalidNextToken"}, + {"shape":"ResourceContentionFault"} + ] + }, + "DescribeTags":{ + "name":"DescribeTags", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeTagsType"}, + "output":{ + "shape":"TagsType", + "resultWrapper":"DescribeTagsResult" + }, + "errors":[ + {"shape":"InvalidNextToken"}, + {"shape":"ResourceContentionFault"} + ] + }, + "DescribeTerminationPolicyTypes":{ + "name":"DescribeTerminationPolicyTypes", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "output":{ + "shape":"DescribeTerminationPolicyTypesAnswer", + "resultWrapper":"DescribeTerminationPolicyTypesResult" + }, + "errors":[ + {"shape":"ResourceContentionFault"} + ] + }, + "DetachInstances":{ + "name":"DetachInstances", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DetachInstancesQuery"}, + "output":{ + "shape":"DetachInstancesAnswer", + "resultWrapper":"DetachInstancesResult" + }, + "errors":[ + {"shape":"ResourceContentionFault"} + ] + }, + "DetachLoadBalancers":{ + "name":"DetachLoadBalancers", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DetachLoadBalancersType"}, + "output":{ + "shape":"DetachLoadBalancersResultType", + "resultWrapper":"DetachLoadBalancersResult" + }, + "errors":[ + {"shape":"ResourceContentionFault"} + ] + }, + "DisableMetricsCollection":{ + "name":"DisableMetricsCollection", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DisableMetricsCollectionQuery"}, + "errors":[ + {"shape":"ResourceContentionFault"} + ] + }, + "EnableMetricsCollection":{ + "name":"EnableMetricsCollection", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"EnableMetricsCollectionQuery"}, + "errors":[ + {"shape":"ResourceContentionFault"} + ] + }, + "EnterStandby":{ + "name":"EnterStandby", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"EnterStandbyQuery"}, + "output":{ + "shape":"EnterStandbyAnswer", + "resultWrapper":"EnterStandbyResult" + }, + "errors":[ + {"shape":"ResourceContentionFault"} + ] + }, + "ExecutePolicy":{ + "name":"ExecutePolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ExecutePolicyType"}, + "errors":[ + {"shape":"ScalingActivityInProgressFault"}, + {"shape":"ResourceContentionFault"} + ] + }, + "ExitStandby":{ + "name":"ExitStandby", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ExitStandbyQuery"}, + "output":{ + "shape":"ExitStandbyAnswer", + "resultWrapper":"ExitStandbyResult" + }, + "errors":[ + {"shape":"ResourceContentionFault"} + ] + }, + "PutLifecycleHook":{ + "name":"PutLifecycleHook", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PutLifecycleHookType"}, + "output":{ + "shape":"PutLifecycleHookAnswer", + "resultWrapper":"PutLifecycleHookResult" + }, + "errors":[ + {"shape":"LimitExceededFault"}, + {"shape":"ResourceContentionFault"} + ] + }, + "PutNotificationConfiguration":{ + "name":"PutNotificationConfiguration", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PutNotificationConfigurationType"}, + "errors":[ + {"shape":"LimitExceededFault"}, + {"shape":"ResourceContentionFault"} + ] + }, + "PutScalingPolicy":{ + "name":"PutScalingPolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PutScalingPolicyType"}, + "output":{ + "shape":"PolicyARNType", + "resultWrapper":"PutScalingPolicyResult" + }, + "errors":[ + {"shape":"LimitExceededFault"}, + {"shape":"ResourceContentionFault"} + ] + }, + "PutScheduledUpdateGroupAction":{ + "name":"PutScheduledUpdateGroupAction", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PutScheduledUpdateGroupActionType"}, + "errors":[ + {"shape":"AlreadyExistsFault"}, + {"shape":"LimitExceededFault"}, + {"shape":"ResourceContentionFault"} + ] + }, + "RecordLifecycleActionHeartbeat":{ + "name":"RecordLifecycleActionHeartbeat", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RecordLifecycleActionHeartbeatType"}, + "output":{ + "shape":"RecordLifecycleActionHeartbeatAnswer", + "resultWrapper":"RecordLifecycleActionHeartbeatResult" + }, + "errors":[ + {"shape":"ResourceContentionFault"} + ] + }, + "ResumeProcesses":{ + "name":"ResumeProcesses", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ScalingProcessQuery"}, + "errors":[ + {"shape":"ResourceInUseFault"}, + {"shape":"ResourceContentionFault"} + ] + }, + "SetDesiredCapacity":{ + "name":"SetDesiredCapacity", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"SetDesiredCapacityType"}, + "errors":[ + {"shape":"ScalingActivityInProgressFault"}, + {"shape":"ResourceContentionFault"} + ] + }, + "SetInstanceHealth":{ + "name":"SetInstanceHealth", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"SetInstanceHealthQuery"}, + "errors":[ + {"shape":"ResourceContentionFault"} + ] + }, + "SetInstanceProtection":{ + "name":"SetInstanceProtection", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"SetInstanceProtectionQuery"}, + "output":{ + "shape":"SetInstanceProtectionAnswer", + "resultWrapper":"SetInstanceProtectionResult" + }, + "errors":[ + {"shape":"LimitExceededFault"}, + {"shape":"ResourceContentionFault"} + ] + }, + "SuspendProcesses":{ + "name":"SuspendProcesses", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ScalingProcessQuery"}, + "errors":[ + {"shape":"ResourceInUseFault"}, + {"shape":"ResourceContentionFault"} + ] + }, + "TerminateInstanceInAutoScalingGroup":{ + "name":"TerminateInstanceInAutoScalingGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"TerminateInstanceInAutoScalingGroupType"}, + "output":{ + "shape":"ActivityType", + "resultWrapper":"TerminateInstanceInAutoScalingGroupResult" + }, + "errors":[ + {"shape":"ScalingActivityInProgressFault"}, + {"shape":"ResourceContentionFault"} + ] + }, + "UpdateAutoScalingGroup":{ + "name":"UpdateAutoScalingGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateAutoScalingGroupType"}, + "errors":[ + {"shape":"ScalingActivityInProgressFault"}, + {"shape":"ResourceContentionFault"} + ] + } + }, + "shapes":{ + "Activities":{ + "type":"list", + "member":{"shape":"Activity"} + }, + "ActivitiesType":{ + "type":"structure", + "required":["Activities"], + "members":{ + "Activities":{"shape":"Activities"}, + "NextToken":{"shape":"XmlString"} + } + }, + "Activity":{ + "type":"structure", + "required":[ + "ActivityId", + "AutoScalingGroupName", + "Cause", + "StartTime", + "StatusCode" + ], + "members":{ + "ActivityId":{"shape":"XmlString"}, + "AutoScalingGroupName":{"shape":"XmlStringMaxLen255"}, + "Description":{"shape":"XmlString"}, + "Cause":{"shape":"XmlStringMaxLen1023"}, + "StartTime":{"shape":"TimestampType"}, + "EndTime":{"shape":"TimestampType"}, + "StatusCode":{"shape":"ScalingActivityStatusCode"}, + "StatusMessage":{"shape":"XmlStringMaxLen255"}, + "Progress":{"shape":"Progress"}, + "Details":{"shape":"XmlString"} + } + }, + "ActivityIds":{ + "type":"list", + "member":{"shape":"XmlString"} + }, + "ActivityType":{ + "type":"structure", + "members":{ + "Activity":{"shape":"Activity"} + } + }, + "AdjustmentType":{ + "type":"structure", + "members":{ + "AdjustmentType":{"shape":"XmlStringMaxLen255"} + } + }, + "AdjustmentTypes":{ + "type":"list", + "member":{"shape":"AdjustmentType"} + }, + "Alarm":{ + "type":"structure", + "members":{ + "AlarmName":{"shape":"XmlStringMaxLen255"}, + "AlarmARN":{"shape":"ResourceName"} + } + }, + "Alarms":{ + "type":"list", + "member":{"shape":"Alarm"} + }, + "AlreadyExistsFault":{ + "type":"structure", + "members":{ + "message":{"shape":"XmlStringMaxLen255"} + }, + "error":{ + "code":"AlreadyExists", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "AsciiStringMaxLen255":{ + "type":"string", + "max":255, + "min":1, + "pattern":"[A-Za-z0-9\\-_\\/]+" + }, + "AssociatePublicIpAddress":{"type":"boolean"}, + "AttachInstancesQuery":{ + "type":"structure", + "required":["AutoScalingGroupName"], + "members":{ + "InstanceIds":{"shape":"InstanceIds"}, + "AutoScalingGroupName":{"shape":"ResourceName"} + } + }, + "AttachLoadBalancersResultType":{ + "type":"structure", + "members":{ + } + }, + "AttachLoadBalancersType":{ + "type":"structure", + "members":{ + "AutoScalingGroupName":{"shape":"ResourceName"}, + "LoadBalancerNames":{"shape":"LoadBalancerNames"} + } + }, + "AutoScalingGroup":{ + "type":"structure", + "required":[ + "AutoScalingGroupName", + "MinSize", + "MaxSize", + "DesiredCapacity", + "DefaultCooldown", + "AvailabilityZones", + "HealthCheckType", + "CreatedTime" + ], + "members":{ + "AutoScalingGroupName":{"shape":"XmlStringMaxLen255"}, + "AutoScalingGroupARN":{"shape":"ResourceName"}, + "LaunchConfigurationName":{"shape":"XmlStringMaxLen255"}, + "MinSize":{"shape":"AutoScalingGroupMinSize"}, + "MaxSize":{"shape":"AutoScalingGroupMaxSize"}, + "DesiredCapacity":{"shape":"AutoScalingGroupDesiredCapacity"}, + "DefaultCooldown":{"shape":"Cooldown"}, + "AvailabilityZones":{"shape":"AvailabilityZones"}, + "LoadBalancerNames":{"shape":"LoadBalancerNames"}, + "HealthCheckType":{"shape":"XmlStringMaxLen32"}, + "HealthCheckGracePeriod":{"shape":"HealthCheckGracePeriod"}, + "Instances":{"shape":"Instances"}, + "CreatedTime":{"shape":"TimestampType"}, + "SuspendedProcesses":{"shape":"SuspendedProcesses"}, + "PlacementGroup":{"shape":"XmlStringMaxLen255"}, + "VPCZoneIdentifier":{"shape":"XmlStringMaxLen255"}, + "EnabledMetrics":{"shape":"EnabledMetrics"}, + "Status":{"shape":"XmlStringMaxLen255"}, + "Tags":{"shape":"TagDescriptionList"}, + "TerminationPolicies":{"shape":"TerminationPolicies"}, + "NewInstancesProtectedFromScaleIn":{"shape":"InstanceProtected"} + } + }, + "AutoScalingGroupDesiredCapacity":{"type":"integer"}, + "AutoScalingGroupMaxSize":{"type":"integer"}, + "AutoScalingGroupMinSize":{"type":"integer"}, + "AutoScalingGroupNames":{ + "type":"list", + "member":{"shape":"ResourceName"} + }, + "AutoScalingGroupNamesType":{ + "type":"structure", + "members":{ + "AutoScalingGroupNames":{"shape":"AutoScalingGroupNames"}, + "NextToken":{"shape":"XmlString"}, + "MaxRecords":{"shape":"MaxRecords"} + } + }, + "AutoScalingGroups":{ + "type":"list", + "member":{"shape":"AutoScalingGroup"} + }, + "AutoScalingGroupsType":{ + "type":"structure", + "required":["AutoScalingGroups"], + "members":{ + "AutoScalingGroups":{"shape":"AutoScalingGroups"}, + "NextToken":{"shape":"XmlString"} + } + }, + "AutoScalingInstanceDetails":{ + "type":"structure", + "required":[ + "InstanceId", + "AutoScalingGroupName", + "AvailabilityZone", + "LifecycleState", + "HealthStatus", + "LaunchConfigurationName", + "ProtectedFromScaleIn" + ], + "members":{ + "InstanceId":{"shape":"XmlStringMaxLen19"}, + "AutoScalingGroupName":{"shape":"XmlStringMaxLen255"}, + "AvailabilityZone":{"shape":"XmlStringMaxLen255"}, + "LifecycleState":{"shape":"XmlStringMaxLen32"}, + "HealthStatus":{"shape":"XmlStringMaxLen32"}, + "LaunchConfigurationName":{"shape":"XmlStringMaxLen255"}, + "ProtectedFromScaleIn":{"shape":"InstanceProtected"} + } + }, + "AutoScalingInstances":{ + "type":"list", + "member":{"shape":"AutoScalingInstanceDetails"} + }, + "AutoScalingInstancesType":{ + "type":"structure", + "members":{ + "AutoScalingInstances":{"shape":"AutoScalingInstances"}, + "NextToken":{"shape":"XmlString"} + } + }, + "AutoScalingNotificationTypes":{ + "type":"list", + "member":{"shape":"XmlStringMaxLen255"} + }, + "AvailabilityZones":{ + "type":"list", + "member":{"shape":"XmlStringMaxLen255"}, + "min":1 + }, + "BlockDeviceEbsDeleteOnTermination":{"type":"boolean"}, + "BlockDeviceEbsEncrypted":{"type":"boolean"}, + "BlockDeviceEbsIops":{ + "type":"integer", + "max":20000, + "min":100 + }, + "BlockDeviceEbsVolumeSize":{ + "type":"integer", + "max":16384, + "min":1 + }, + "BlockDeviceEbsVolumeType":{ + "type":"string", + "max":255, + "min":1 + }, + "BlockDeviceMapping":{ + "type":"structure", + "required":["DeviceName"], + "members":{ + "VirtualName":{"shape":"XmlStringMaxLen255"}, + "DeviceName":{"shape":"XmlStringMaxLen255"}, + "Ebs":{"shape":"Ebs"}, + "NoDevice":{"shape":"NoDevice"} + } + }, + "BlockDeviceMappings":{ + "type":"list", + "member":{"shape":"BlockDeviceMapping"} + }, + "ClassicLinkVPCSecurityGroups":{ + "type":"list", + "member":{"shape":"XmlStringMaxLen255"} + }, + "CompleteLifecycleActionAnswer":{ + "type":"structure", + "members":{ + } + }, + "CompleteLifecycleActionType":{ + "type":"structure", + "required":[ + "LifecycleHookName", + "AutoScalingGroupName", + "LifecycleActionResult" + ], + "members":{ + "LifecycleHookName":{"shape":"AsciiStringMaxLen255"}, + "AutoScalingGroupName":{"shape":"ResourceName"}, + "LifecycleActionToken":{"shape":"LifecycleActionToken"}, + "LifecycleActionResult":{"shape":"LifecycleActionResult"}, + "InstanceId":{"shape":"XmlStringMaxLen19"} + } + }, + "Cooldown":{"type":"integer"}, + "CreateAutoScalingGroupType":{ + "type":"structure", + "required":[ + "AutoScalingGroupName", + "MinSize", + "MaxSize" + ], + "members":{ + "AutoScalingGroupName":{"shape":"XmlStringMaxLen255"}, + "LaunchConfigurationName":{"shape":"ResourceName"}, + "InstanceId":{"shape":"XmlStringMaxLen19"}, + "MinSize":{"shape":"AutoScalingGroupMinSize"}, + "MaxSize":{"shape":"AutoScalingGroupMaxSize"}, + "DesiredCapacity":{"shape":"AutoScalingGroupDesiredCapacity"}, + "DefaultCooldown":{"shape":"Cooldown"}, + "AvailabilityZones":{"shape":"AvailabilityZones"}, + "LoadBalancerNames":{"shape":"LoadBalancerNames"}, + "HealthCheckType":{"shape":"XmlStringMaxLen32"}, + "HealthCheckGracePeriod":{"shape":"HealthCheckGracePeriod"}, + "PlacementGroup":{"shape":"XmlStringMaxLen255"}, + "VPCZoneIdentifier":{"shape":"XmlStringMaxLen255"}, + "TerminationPolicies":{"shape":"TerminationPolicies"}, + "NewInstancesProtectedFromScaleIn":{"shape":"InstanceProtected"}, + "Tags":{"shape":"Tags"} + } + }, + "CreateLaunchConfigurationType":{ + "type":"structure", + "required":["LaunchConfigurationName"], + "members":{ + "LaunchConfigurationName":{"shape":"XmlStringMaxLen255"}, + "ImageId":{"shape":"XmlStringMaxLen255"}, + "KeyName":{"shape":"XmlStringMaxLen255"}, + "SecurityGroups":{"shape":"SecurityGroups"}, + "ClassicLinkVPCId":{"shape":"XmlStringMaxLen255"}, + "ClassicLinkVPCSecurityGroups":{"shape":"ClassicLinkVPCSecurityGroups"}, + "UserData":{"shape":"XmlStringUserData"}, + "InstanceId":{"shape":"XmlStringMaxLen19"}, + "InstanceType":{"shape":"XmlStringMaxLen255"}, + "KernelId":{"shape":"XmlStringMaxLen255"}, + "RamdiskId":{"shape":"XmlStringMaxLen255"}, + "BlockDeviceMappings":{"shape":"BlockDeviceMappings"}, + "InstanceMonitoring":{"shape":"InstanceMonitoring"}, + "SpotPrice":{"shape":"SpotPrice"}, + "IamInstanceProfile":{"shape":"XmlStringMaxLen1600"}, + "EbsOptimized":{"shape":"EbsOptimized"}, + "AssociatePublicIpAddress":{"shape":"AssociatePublicIpAddress"}, + "PlacementTenancy":{"shape":"XmlStringMaxLen64"} + } + }, + "CreateOrUpdateTagsType":{ + "type":"structure", + "required":["Tags"], + "members":{ + "Tags":{"shape":"Tags"} + } + }, + "DeleteAutoScalingGroupType":{ + "type":"structure", + "required":["AutoScalingGroupName"], + "members":{ + "AutoScalingGroupName":{"shape":"ResourceName"}, + "ForceDelete":{"shape":"ForceDelete"} + } + }, + "DeleteLifecycleHookAnswer":{ + "type":"structure", + "members":{ + } + }, + "DeleteLifecycleHookType":{ + "type":"structure", + "required":[ + "LifecycleHookName", + "AutoScalingGroupName" + ], + "members":{ + "LifecycleHookName":{"shape":"AsciiStringMaxLen255"}, + "AutoScalingGroupName":{"shape":"ResourceName"} + } + }, + "DeleteNotificationConfigurationType":{ + "type":"structure", + "required":[ + "AutoScalingGroupName", + "TopicARN" + ], + "members":{ + "AutoScalingGroupName":{"shape":"ResourceName"}, + "TopicARN":{"shape":"ResourceName"} + } + }, + "DeletePolicyType":{ + "type":"structure", + "required":["PolicyName"], + "members":{ + "AutoScalingGroupName":{"shape":"ResourceName"}, + "PolicyName":{"shape":"ResourceName"} + } + }, + "DeleteScheduledActionType":{ + "type":"structure", + "required":["ScheduledActionName"], + "members":{ + "AutoScalingGroupName":{"shape":"ResourceName"}, + "ScheduledActionName":{"shape":"ResourceName"} + } + }, + "DeleteTagsType":{ + "type":"structure", + "required":["Tags"], + "members":{ + "Tags":{"shape":"Tags"} + } + }, + "DescribeAccountLimitsAnswer":{ + "type":"structure", + "members":{ + "MaxNumberOfAutoScalingGroups":{"shape":"MaxNumberOfAutoScalingGroups"}, + "MaxNumberOfLaunchConfigurations":{"shape":"MaxNumberOfLaunchConfigurations"}, + "NumberOfAutoScalingGroups":{"shape":"NumberOfAutoScalingGroups"}, + "NumberOfLaunchConfigurations":{"shape":"NumberOfLaunchConfigurations"} + } + }, + "DescribeAdjustmentTypesAnswer":{ + "type":"structure", + "members":{ + "AdjustmentTypes":{"shape":"AdjustmentTypes"} + } + }, + "DescribeAutoScalingInstancesType":{ + "type":"structure", + "members":{ + "InstanceIds":{"shape":"InstanceIds"}, + "MaxRecords":{"shape":"MaxRecords"}, + "NextToken":{"shape":"XmlString"} + } + }, + "DescribeAutoScalingNotificationTypesAnswer":{ + "type":"structure", + "members":{ + "AutoScalingNotificationTypes":{"shape":"AutoScalingNotificationTypes"} + } + }, + "DescribeLifecycleHookTypesAnswer":{ + "type":"structure", + "members":{ + "LifecycleHookTypes":{"shape":"AutoScalingNotificationTypes"} + } + }, + "DescribeLifecycleHooksAnswer":{ + "type":"structure", + "members":{ + "LifecycleHooks":{"shape":"LifecycleHooks"} + } + }, + "DescribeLifecycleHooksType":{ + "type":"structure", + "required":["AutoScalingGroupName"], + "members":{ + "AutoScalingGroupName":{"shape":"ResourceName"}, + "LifecycleHookNames":{"shape":"LifecycleHookNames"} + } + }, + "DescribeLoadBalancersRequest":{ + "type":"structure", + "required":["AutoScalingGroupName"], + "members":{ + "AutoScalingGroupName":{"shape":"ResourceName"}, + "NextToken":{"shape":"XmlString"}, + "MaxRecords":{"shape":"MaxRecords"} + } + }, + "DescribeLoadBalancersResponse":{ + "type":"structure", + "members":{ + "LoadBalancers":{"shape":"LoadBalancerStates"}, + "NextToken":{"shape":"XmlString"} + } + }, + "DescribeMetricCollectionTypesAnswer":{ + "type":"structure", + "members":{ + "Metrics":{"shape":"MetricCollectionTypes"}, + "Granularities":{"shape":"MetricGranularityTypes"} + } + }, + "DescribeNotificationConfigurationsAnswer":{ + "type":"structure", + "required":["NotificationConfigurations"], + "members":{ + "NotificationConfigurations":{"shape":"NotificationConfigurations"}, + "NextToken":{"shape":"XmlString"} + } + }, + "DescribeNotificationConfigurationsType":{ + "type":"structure", + "members":{ + "AutoScalingGroupNames":{"shape":"AutoScalingGroupNames"}, + "NextToken":{"shape":"XmlString"}, + "MaxRecords":{"shape":"MaxRecords"} + } + }, + "DescribePoliciesType":{ + "type":"structure", + "members":{ + "AutoScalingGroupName":{"shape":"ResourceName"}, + "PolicyNames":{"shape":"PolicyNames"}, + "PolicyTypes":{"shape":"PolicyTypes"}, + "NextToken":{"shape":"XmlString"}, + "MaxRecords":{"shape":"MaxRecords"} + } + }, + "DescribeScalingActivitiesType":{ + "type":"structure", + "members":{ + "ActivityIds":{"shape":"ActivityIds"}, + "AutoScalingGroupName":{"shape":"ResourceName"}, + "MaxRecords":{"shape":"MaxRecords"}, + "NextToken":{"shape":"XmlString"} + } + }, + "DescribeScheduledActionsType":{ + "type":"structure", + "members":{ + "AutoScalingGroupName":{"shape":"ResourceName"}, + "ScheduledActionNames":{"shape":"ScheduledActionNames"}, + "StartTime":{"shape":"TimestampType"}, + "EndTime":{"shape":"TimestampType"}, + "NextToken":{"shape":"XmlString"}, + "MaxRecords":{"shape":"MaxRecords"} + } + }, + "DescribeTagsType":{ + "type":"structure", + "members":{ + "Filters":{"shape":"Filters"}, + "NextToken":{"shape":"XmlString"}, + "MaxRecords":{"shape":"MaxRecords"} + } + }, + "DescribeTerminationPolicyTypesAnswer":{ + "type":"structure", + "members":{ + "TerminationPolicyTypes":{"shape":"TerminationPolicies"} + } + }, + "DetachInstancesAnswer":{ + "type":"structure", + "members":{ + "Activities":{"shape":"Activities"} + } + }, + "DetachInstancesQuery":{ + "type":"structure", + "required":[ + "AutoScalingGroupName", + "ShouldDecrementDesiredCapacity" + ], + "members":{ + "InstanceIds":{"shape":"InstanceIds"}, + "AutoScalingGroupName":{"shape":"ResourceName"}, + "ShouldDecrementDesiredCapacity":{"shape":"ShouldDecrementDesiredCapacity"} + } + }, + "DetachLoadBalancersResultType":{ + "type":"structure", + "members":{ + } + }, + "DetachLoadBalancersType":{ + "type":"structure", + "members":{ + "AutoScalingGroupName":{"shape":"ResourceName"}, + "LoadBalancerNames":{"shape":"LoadBalancerNames"} + } + }, + "DisableMetricsCollectionQuery":{ + "type":"structure", + "required":["AutoScalingGroupName"], + "members":{ + "AutoScalingGroupName":{"shape":"ResourceName"}, + "Metrics":{"shape":"Metrics"} + } + }, + "Ebs":{ + "type":"structure", + "members":{ + "SnapshotId":{"shape":"XmlStringMaxLen255"}, + "VolumeSize":{"shape":"BlockDeviceEbsVolumeSize"}, + "VolumeType":{"shape":"BlockDeviceEbsVolumeType"}, + "DeleteOnTermination":{"shape":"BlockDeviceEbsDeleteOnTermination"}, + "Iops":{"shape":"BlockDeviceEbsIops"}, + "Encrypted":{"shape":"BlockDeviceEbsEncrypted"} + } + }, + "EbsOptimized":{"type":"boolean"}, + "EnableMetricsCollectionQuery":{ + "type":"structure", + "required":[ + "AutoScalingGroupName", + "Granularity" + ], + "members":{ + "AutoScalingGroupName":{"shape":"ResourceName"}, + "Metrics":{"shape":"Metrics"}, + "Granularity":{"shape":"XmlStringMaxLen255"} + } + }, + "EnabledMetric":{ + "type":"structure", + "members":{ + "Metric":{"shape":"XmlStringMaxLen255"}, + "Granularity":{"shape":"XmlStringMaxLen255"} + } + }, + "EnabledMetrics":{ + "type":"list", + "member":{"shape":"EnabledMetric"} + }, + "EnterStandbyAnswer":{ + "type":"structure", + "members":{ + "Activities":{"shape":"Activities"} + } + }, + "EnterStandbyQuery":{ + "type":"structure", + "required":[ + "AutoScalingGroupName", + "ShouldDecrementDesiredCapacity" + ], + "members":{ + "InstanceIds":{"shape":"InstanceIds"}, + "AutoScalingGroupName":{"shape":"ResourceName"}, + "ShouldDecrementDesiredCapacity":{"shape":"ShouldDecrementDesiredCapacity"} + } + }, + "EstimatedInstanceWarmup":{"type":"integer"}, + "ExecutePolicyType":{ + "type":"structure", + "required":["PolicyName"], + "members":{ + "AutoScalingGroupName":{"shape":"ResourceName"}, + "PolicyName":{"shape":"ResourceName"}, + "HonorCooldown":{"shape":"HonorCooldown"}, + "MetricValue":{"shape":"MetricScale"}, + "BreachThreshold":{"shape":"MetricScale"} + } + }, + "ExitStandbyAnswer":{ + "type":"structure", + "members":{ + "Activities":{"shape":"Activities"} + } + }, + "ExitStandbyQuery":{ + "type":"structure", + "required":["AutoScalingGroupName"], + "members":{ + "InstanceIds":{"shape":"InstanceIds"}, + "AutoScalingGroupName":{"shape":"ResourceName"} + } + }, + "Filter":{ + "type":"structure", + "members":{ + "Name":{"shape":"XmlString"}, + "Values":{"shape":"Values"} + } + }, + "Filters":{ + "type":"list", + "member":{"shape":"Filter"} + }, + "ForceDelete":{"type":"boolean"}, + "GlobalTimeout":{"type":"integer"}, + "HealthCheckGracePeriod":{"type":"integer"}, + "HeartbeatTimeout":{"type":"integer"}, + "HonorCooldown":{"type":"boolean"}, + "Instance":{ + "type":"structure", + "required":[ + "InstanceId", + "AvailabilityZone", + "LifecycleState", + "HealthStatus", + "LaunchConfigurationName", + "ProtectedFromScaleIn" + ], + "members":{ + "InstanceId":{"shape":"XmlStringMaxLen19"}, + "AvailabilityZone":{"shape":"XmlStringMaxLen255"}, + "LifecycleState":{"shape":"LifecycleState"}, + "HealthStatus":{"shape":"XmlStringMaxLen32"}, + "LaunchConfigurationName":{"shape":"XmlStringMaxLen255"}, + "ProtectedFromScaleIn":{"shape":"InstanceProtected"} + } + }, + "InstanceIds":{ + "type":"list", + "member":{"shape":"XmlStringMaxLen19"} + }, + "InstanceMonitoring":{ + "type":"structure", + "members":{ + "Enabled":{"shape":"MonitoringEnabled"} + } + }, + "InstanceProtected":{"type":"boolean"}, + "Instances":{ + "type":"list", + "member":{"shape":"Instance"} + }, + "InvalidNextToken":{ + "type":"structure", + "members":{ + "message":{"shape":"XmlStringMaxLen255"} + }, + "error":{ + "code":"InvalidNextToken", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "LaunchConfiguration":{ + "type":"structure", + "required":[ + "LaunchConfigurationName", + "ImageId", + "InstanceType", + "CreatedTime" + ], + "members":{ + "LaunchConfigurationName":{"shape":"XmlStringMaxLen255"}, + "LaunchConfigurationARN":{"shape":"ResourceName"}, + "ImageId":{"shape":"XmlStringMaxLen255"}, + "KeyName":{"shape":"XmlStringMaxLen255"}, + "SecurityGroups":{"shape":"SecurityGroups"}, + "ClassicLinkVPCId":{"shape":"XmlStringMaxLen255"}, + "ClassicLinkVPCSecurityGroups":{"shape":"ClassicLinkVPCSecurityGroups"}, + "UserData":{"shape":"XmlStringUserData"}, + "InstanceType":{"shape":"XmlStringMaxLen255"}, + "KernelId":{"shape":"XmlStringMaxLen255"}, + "RamdiskId":{"shape":"XmlStringMaxLen255"}, + "BlockDeviceMappings":{"shape":"BlockDeviceMappings"}, + "InstanceMonitoring":{"shape":"InstanceMonitoring"}, + "SpotPrice":{"shape":"SpotPrice"}, + "IamInstanceProfile":{"shape":"XmlStringMaxLen1600"}, + "CreatedTime":{"shape":"TimestampType"}, + "EbsOptimized":{"shape":"EbsOptimized"}, + "AssociatePublicIpAddress":{"shape":"AssociatePublicIpAddress"}, + "PlacementTenancy":{"shape":"XmlStringMaxLen64"} + } + }, + "LaunchConfigurationNameType":{ + "type":"structure", + "required":["LaunchConfigurationName"], + "members":{ + "LaunchConfigurationName":{"shape":"ResourceName"} + } + }, + "LaunchConfigurationNames":{ + "type":"list", + "member":{"shape":"ResourceName"} + }, + "LaunchConfigurationNamesType":{ + "type":"structure", + "members":{ + "LaunchConfigurationNames":{"shape":"LaunchConfigurationNames"}, + "NextToken":{"shape":"XmlString"}, + "MaxRecords":{"shape":"MaxRecords"} + } + }, + "LaunchConfigurations":{ + "type":"list", + "member":{"shape":"LaunchConfiguration"} + }, + "LaunchConfigurationsType":{ + "type":"structure", + "required":["LaunchConfigurations"], + "members":{ + "LaunchConfigurations":{"shape":"LaunchConfigurations"}, + "NextToken":{"shape":"XmlString"} + } + }, + "LifecycleActionResult":{"type":"string"}, + "LifecycleActionToken":{ + "type":"string", + "max":36, + "min":36 + }, + "LifecycleHook":{ + "type":"structure", + "members":{ + "LifecycleHookName":{"shape":"AsciiStringMaxLen255"}, + "AutoScalingGroupName":{"shape":"ResourceName"}, + "LifecycleTransition":{"shape":"LifecycleTransition"}, + "NotificationTargetARN":{"shape":"ResourceName"}, + "RoleARN":{"shape":"ResourceName"}, + "NotificationMetadata":{"shape":"XmlStringMaxLen1023"}, + "HeartbeatTimeout":{"shape":"HeartbeatTimeout"}, + "GlobalTimeout":{"shape":"GlobalTimeout"}, + "DefaultResult":{"shape":"LifecycleActionResult"} + } + }, + "LifecycleHookNames":{ + "type":"list", + "member":{"shape":"AsciiStringMaxLen255"} + }, + "LifecycleHooks":{ + "type":"list", + "member":{"shape":"LifecycleHook"} + }, + "LifecycleState":{ + "type":"string", + "enum":[ + "Pending", + "Pending:Wait", + "Pending:Proceed", + "Quarantined", + "InService", + "Terminating", + "Terminating:Wait", + "Terminating:Proceed", + "Terminated", + "Detaching", + "Detached", + "EnteringStandby", + "Standby" + ] + }, + "LifecycleTransition":{"type":"string"}, + "LimitExceededFault":{ + "type":"structure", + "members":{ + "message":{"shape":"XmlStringMaxLen255"} + }, + "error":{ + "code":"LimitExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "LoadBalancerNames":{ + "type":"list", + "member":{"shape":"XmlStringMaxLen255"} + }, + "LoadBalancerState":{ + "type":"structure", + "members":{ + "LoadBalancerName":{"shape":"XmlStringMaxLen255"}, + "State":{"shape":"XmlStringMaxLen255"} + } + }, + "LoadBalancerStates":{ + "type":"list", + "member":{"shape":"LoadBalancerState"} + }, + "MaxNumberOfAutoScalingGroups":{"type":"integer"}, + "MaxNumberOfLaunchConfigurations":{"type":"integer"}, + "MaxRecords":{"type":"integer"}, + "MetricCollectionType":{ + "type":"structure", + "members":{ + "Metric":{"shape":"XmlStringMaxLen255"} + } + }, + "MetricCollectionTypes":{ + "type":"list", + "member":{"shape":"MetricCollectionType"} + }, + "MetricGranularityType":{ + "type":"structure", + "members":{ + "Granularity":{"shape":"XmlStringMaxLen255"} + } + }, + "MetricGranularityTypes":{ + "type":"list", + "member":{"shape":"MetricGranularityType"} + }, + "MetricScale":{"type":"double"}, + "Metrics":{ + "type":"list", + "member":{"shape":"XmlStringMaxLen255"} + }, + "MinAdjustmentMagnitude":{"type":"integer"}, + "MinAdjustmentStep":{ + "type":"integer", + "deprecated":true + }, + "MonitoringEnabled":{"type":"boolean"}, + "NoDevice":{"type":"boolean"}, + "NotificationConfiguration":{ + "type":"structure", + "members":{ + "AutoScalingGroupName":{"shape":"ResourceName"}, + "TopicARN":{"shape":"ResourceName"}, + "NotificationType":{"shape":"XmlStringMaxLen255"} + } + }, + "NotificationConfigurations":{ + "type":"list", + "member":{"shape":"NotificationConfiguration"} + }, + "NotificationTargetResourceName":{ + "type":"string", + "max":1600, + "min":0, + "pattern":"[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\r\\n\\t]*" + }, + "NumberOfAutoScalingGroups":{"type":"integer"}, + "NumberOfLaunchConfigurations":{"type":"integer"}, + "PoliciesType":{ + "type":"structure", + "members":{ + "ScalingPolicies":{"shape":"ScalingPolicies"}, + "NextToken":{"shape":"XmlString"} + } + }, + "PolicyARNType":{ + "type":"structure", + "members":{ + "PolicyARN":{"shape":"ResourceName"} + } + }, + "PolicyIncrement":{"type":"integer"}, + "PolicyNames":{ + "type":"list", + "member":{"shape":"ResourceName"} + }, + "PolicyTypes":{ + "type":"list", + "member":{"shape":"XmlStringMaxLen64"} + }, + "ProcessNames":{ + "type":"list", + "member":{"shape":"XmlStringMaxLen255"} + }, + "ProcessType":{ + "type":"structure", + "required":["ProcessName"], + "members":{ + "ProcessName":{"shape":"XmlStringMaxLen255"} + } + }, + "Processes":{ + "type":"list", + "member":{"shape":"ProcessType"} + }, + "ProcessesType":{ + "type":"structure", + "members":{ + "Processes":{"shape":"Processes"} + } + }, + "Progress":{"type":"integer"}, + "PropagateAtLaunch":{"type":"boolean"}, + "ProtectedFromScaleIn":{"type":"boolean"}, + "PutLifecycleHookAnswer":{ + "type":"structure", + "members":{ + } + }, + "PutLifecycleHookType":{ + "type":"structure", + "required":[ + "LifecycleHookName", + "AutoScalingGroupName" + ], + "members":{ + "LifecycleHookName":{"shape":"AsciiStringMaxLen255"}, + "AutoScalingGroupName":{"shape":"ResourceName"}, + "LifecycleTransition":{"shape":"LifecycleTransition"}, + "RoleARN":{"shape":"ResourceName"}, + "NotificationTargetARN":{"shape":"NotificationTargetResourceName"}, + "NotificationMetadata":{"shape":"XmlStringMaxLen1023"}, + "HeartbeatTimeout":{"shape":"HeartbeatTimeout"}, + "DefaultResult":{"shape":"LifecycleActionResult"} + } + }, + "PutNotificationConfigurationType":{ + "type":"structure", + "required":[ + "AutoScalingGroupName", + "TopicARN", + "NotificationTypes" + ], + "members":{ + "AutoScalingGroupName":{"shape":"ResourceName"}, + "TopicARN":{"shape":"ResourceName"}, + "NotificationTypes":{"shape":"AutoScalingNotificationTypes"} + } + }, + "PutScalingPolicyType":{ + "type":"structure", + "required":[ + "AutoScalingGroupName", + "PolicyName", + "AdjustmentType" + ], + "members":{ + "AutoScalingGroupName":{"shape":"ResourceName"}, + "PolicyName":{"shape":"XmlStringMaxLen255"}, + "PolicyType":{"shape":"XmlStringMaxLen64"}, + "AdjustmentType":{"shape":"XmlStringMaxLen255"}, + "MinAdjustmentStep":{"shape":"MinAdjustmentStep"}, + "MinAdjustmentMagnitude":{"shape":"MinAdjustmentMagnitude"}, + "ScalingAdjustment":{"shape":"PolicyIncrement"}, + "Cooldown":{"shape":"Cooldown"}, + "MetricAggregationType":{"shape":"XmlStringMaxLen32"}, + "StepAdjustments":{"shape":"StepAdjustments"}, + "EstimatedInstanceWarmup":{"shape":"EstimatedInstanceWarmup"} + } + }, + "PutScheduledUpdateGroupActionType":{ + "type":"structure", + "required":[ + "AutoScalingGroupName", + "ScheduledActionName" + ], + "members":{ + "AutoScalingGroupName":{"shape":"ResourceName"}, + "ScheduledActionName":{"shape":"XmlStringMaxLen255"}, + "Time":{"shape":"TimestampType"}, + "StartTime":{"shape":"TimestampType"}, + "EndTime":{"shape":"TimestampType"}, + "Recurrence":{"shape":"XmlStringMaxLen255"}, + "MinSize":{"shape":"AutoScalingGroupMinSize"}, + "MaxSize":{"shape":"AutoScalingGroupMaxSize"}, + "DesiredCapacity":{"shape":"AutoScalingGroupDesiredCapacity"} + } + }, + "RecordLifecycleActionHeartbeatAnswer":{ + "type":"structure", + "members":{ + } + }, + "RecordLifecycleActionHeartbeatType":{ + "type":"structure", + "required":[ + "LifecycleHookName", + "AutoScalingGroupName" + ], + "members":{ + "LifecycleHookName":{"shape":"AsciiStringMaxLen255"}, + "AutoScalingGroupName":{"shape":"ResourceName"}, + "LifecycleActionToken":{"shape":"LifecycleActionToken"}, + "InstanceId":{"shape":"XmlStringMaxLen19"} + } + }, + "ResourceContentionFault":{ + "type":"structure", + "members":{ + "message":{"shape":"XmlStringMaxLen255"} + }, + "error":{ + "code":"ResourceContention", + "httpStatusCode":500, + "senderFault":true + }, + "exception":true + }, + "ResourceInUseFault":{ + "type":"structure", + "members":{ + "message":{"shape":"XmlStringMaxLen255"} + }, + "error":{ + "code":"ResourceInUse", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "ResourceName":{ + "type":"string", + "max":1600, + "min":1, + "pattern":"[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\r\\n\\t]*" + }, + "ScalingActivityInProgressFault":{ + "type":"structure", + "members":{ + "message":{"shape":"XmlStringMaxLen255"} + }, + "error":{ + "code":"ScalingActivityInProgress", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "ScalingActivityStatusCode":{ + "type":"string", + "enum":[ + "PendingSpotBidPlacement", + "WaitingForSpotInstanceRequestId", + "WaitingForSpotInstanceId", + "WaitingForInstanceId", + "PreInService", + "InProgress", + "WaitingForELBConnectionDraining", + "MidLifecycleAction", + "WaitingForInstanceWarmup", + "Successful", + "Failed", + "Cancelled" + ] + }, + "ScalingPolicies":{ + "type":"list", + "member":{"shape":"ScalingPolicy"} + }, + "ScalingPolicy":{ + "type":"structure", + "members":{ + "AutoScalingGroupName":{"shape":"XmlStringMaxLen255"}, + "PolicyName":{"shape":"XmlStringMaxLen255"}, + "PolicyARN":{"shape":"ResourceName"}, + "PolicyType":{"shape":"XmlStringMaxLen64"}, + "AdjustmentType":{"shape":"XmlStringMaxLen255"}, + "MinAdjustmentStep":{"shape":"MinAdjustmentStep"}, + "MinAdjustmentMagnitude":{"shape":"MinAdjustmentMagnitude"}, + "ScalingAdjustment":{"shape":"PolicyIncrement"}, + "Cooldown":{"shape":"Cooldown"}, + "StepAdjustments":{"shape":"StepAdjustments"}, + "MetricAggregationType":{"shape":"XmlStringMaxLen32"}, + "EstimatedInstanceWarmup":{"shape":"EstimatedInstanceWarmup"}, + "Alarms":{"shape":"Alarms"} + } + }, + "ScalingProcessQuery":{ + "type":"structure", + "required":["AutoScalingGroupName"], + "members":{ + "AutoScalingGroupName":{"shape":"ResourceName"}, + "ScalingProcesses":{"shape":"ProcessNames"} + } + }, + "ScheduledActionNames":{ + "type":"list", + "member":{"shape":"ResourceName"} + }, + "ScheduledActionsType":{ + "type":"structure", + "members":{ + "ScheduledUpdateGroupActions":{"shape":"ScheduledUpdateGroupActions"}, + "NextToken":{"shape":"XmlString"} + } + }, + "ScheduledUpdateGroupAction":{ + "type":"structure", + "members":{ + "AutoScalingGroupName":{"shape":"XmlStringMaxLen255"}, + "ScheduledActionName":{"shape":"XmlStringMaxLen255"}, + "ScheduledActionARN":{"shape":"ResourceName"}, + "Time":{"shape":"TimestampType"}, + "StartTime":{"shape":"TimestampType"}, + "EndTime":{"shape":"TimestampType"}, + "Recurrence":{"shape":"XmlStringMaxLen255"}, + "MinSize":{"shape":"AutoScalingGroupMinSize"}, + "MaxSize":{"shape":"AutoScalingGroupMaxSize"}, + "DesiredCapacity":{"shape":"AutoScalingGroupDesiredCapacity"} + } + }, + "ScheduledUpdateGroupActions":{ + "type":"list", + "member":{"shape":"ScheduledUpdateGroupAction"} + }, + "SecurityGroups":{ + "type":"list", + "member":{"shape":"XmlString"} + }, + "SetDesiredCapacityType":{ + "type":"structure", + "required":[ + "AutoScalingGroupName", + "DesiredCapacity" + ], + "members":{ + "AutoScalingGroupName":{"shape":"ResourceName"}, + "DesiredCapacity":{"shape":"AutoScalingGroupDesiredCapacity"}, + "HonorCooldown":{"shape":"HonorCooldown"} + } + }, + "SetInstanceHealthQuery":{ + "type":"structure", + "required":[ + "InstanceId", + "HealthStatus" + ], + "members":{ + "InstanceId":{"shape":"XmlStringMaxLen19"}, + "HealthStatus":{"shape":"XmlStringMaxLen32"}, + "ShouldRespectGracePeriod":{"shape":"ShouldRespectGracePeriod"} + } + }, + "SetInstanceProtectionAnswer":{ + "type":"structure", + "members":{ + } + }, + "SetInstanceProtectionQuery":{ + "type":"structure", + "required":[ + "InstanceIds", + "AutoScalingGroupName", + "ProtectedFromScaleIn" + ], + "members":{ + "InstanceIds":{"shape":"InstanceIds"}, + "AutoScalingGroupName":{"shape":"ResourceName"}, + "ProtectedFromScaleIn":{"shape":"ProtectedFromScaleIn"} + } + }, + "ShouldDecrementDesiredCapacity":{"type":"boolean"}, + "ShouldRespectGracePeriod":{"type":"boolean"}, + "SpotPrice":{ + "type":"string", + "max":255, + "min":1 + }, + "StepAdjustment":{ + "type":"structure", + "required":["ScalingAdjustment"], + "members":{ + "MetricIntervalLowerBound":{"shape":"MetricScale"}, + "MetricIntervalUpperBound":{"shape":"MetricScale"}, + "ScalingAdjustment":{"shape":"PolicyIncrement"} + } + }, + "StepAdjustments":{ + "type":"list", + "member":{"shape":"StepAdjustment"} + }, + "SuspendedProcess":{ + "type":"structure", + "members":{ + "ProcessName":{"shape":"XmlStringMaxLen255"}, + "SuspensionReason":{"shape":"XmlStringMaxLen255"} + } + }, + "SuspendedProcesses":{ + "type":"list", + "member":{"shape":"SuspendedProcess"} + }, + "Tag":{ + "type":"structure", + "required":["Key"], + "members":{ + "ResourceId":{"shape":"XmlString"}, + "ResourceType":{"shape":"XmlString"}, + "Key":{"shape":"TagKey"}, + "Value":{"shape":"TagValue"}, + "PropagateAtLaunch":{"shape":"PropagateAtLaunch"} + } + }, + "TagDescription":{ + "type":"structure", + "members":{ + "ResourceId":{"shape":"XmlString"}, + "ResourceType":{"shape":"XmlString"}, + "Key":{"shape":"TagKey"}, + "Value":{"shape":"TagValue"}, + "PropagateAtLaunch":{"shape":"PropagateAtLaunch"} + } + }, + "TagDescriptionList":{ + "type":"list", + "member":{"shape":"TagDescription"} + }, + "TagKey":{ + "type":"string", + "max":128, + "min":1, + "pattern":"[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\r\\n\\t]*" + }, + "TagValue":{ + "type":"string", + "max":256, + "min":0, + "pattern":"[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\r\\n\\t]*" + }, + "Tags":{ + "type":"list", + "member":{"shape":"Tag"} + }, + "TagsType":{ + "type":"structure", + "members":{ + "Tags":{"shape":"TagDescriptionList"}, + "NextToken":{"shape":"XmlString"} + } + }, + "TerminateInstanceInAutoScalingGroupType":{ + "type":"structure", + "required":[ + "InstanceId", + "ShouldDecrementDesiredCapacity" + ], + "members":{ + "InstanceId":{"shape":"XmlStringMaxLen19"}, + "ShouldDecrementDesiredCapacity":{"shape":"ShouldDecrementDesiredCapacity"} + } + }, + "TerminationPolicies":{ + "type":"list", + "member":{"shape":"XmlStringMaxLen1600"} + }, + "TimestampType":{"type":"timestamp"}, + "UpdateAutoScalingGroupType":{ + "type":"structure", + "required":["AutoScalingGroupName"], + "members":{ + "AutoScalingGroupName":{"shape":"ResourceName"}, + "LaunchConfigurationName":{"shape":"ResourceName"}, + "MinSize":{"shape":"AutoScalingGroupMinSize"}, + "MaxSize":{"shape":"AutoScalingGroupMaxSize"}, + "DesiredCapacity":{"shape":"AutoScalingGroupDesiredCapacity"}, + "DefaultCooldown":{"shape":"Cooldown"}, + "AvailabilityZones":{"shape":"AvailabilityZones"}, + "HealthCheckType":{"shape":"XmlStringMaxLen32"}, + "HealthCheckGracePeriod":{"shape":"HealthCheckGracePeriod"}, + "PlacementGroup":{"shape":"XmlStringMaxLen255"}, + "VPCZoneIdentifier":{"shape":"XmlStringMaxLen255"}, + "TerminationPolicies":{"shape":"TerminationPolicies"}, + "NewInstancesProtectedFromScaleIn":{"shape":"InstanceProtected"} + } + }, + "Values":{ + "type":"list", + "member":{"shape":"XmlString"} + }, + "XmlString":{ + "type":"string", + "pattern":"[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\r\\n\\t]*" + }, + "XmlStringMaxLen1023":{ + "type":"string", + "max":1023, + "min":1, + "pattern":"[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\r\\n\\t]*" + }, + "XmlStringMaxLen1600":{ + "type":"string", + "max":1600, + "min":1, + "pattern":"[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\r\\n\\t]*" + }, + "XmlStringMaxLen19":{ + "type":"string", + "max":19, + "min":1, + "pattern":"[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\r\\n\\t]*" + }, + "XmlStringMaxLen255":{ + "type":"string", + "max":255, + "min":1, + "pattern":"[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\r\\n\\t]*" + }, + "XmlStringMaxLen32":{ + "type":"string", + "max":32, + "min":1, + "pattern":"[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\r\\n\\t]*" + }, + "XmlStringMaxLen64":{ + "type":"string", + "max":64, + "min":1, + "pattern":"[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\r\\n\\t]*" + }, + "XmlStringUserData":{ + "type":"string", + "max":21847, + "pattern":"[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\r\\n\\t]*" + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/autoscaling/2011-01-01/docs-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/autoscaling/2011-01-01/docs-2.json new file mode 100644 index 000000000..62f4de6bb --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/autoscaling/2011-01-01/docs-2.json @@ -0,0 +1,1410 @@ +{ + "version": "2.0", + "service": "Auto Scaling

    Auto Scaling is designed to automatically launch or terminate EC2 instances based on user-defined policies, schedules, and health checks. Use this service in conjunction with the Amazon CloudWatch and Elastic Load Balancing services.

    ", + "operations": { + "AttachInstances": "

    Attaches one or more EC2 instances to the specified Auto Scaling group.

    When you attach instances, Auto Scaling increases the desired capacity of the group by the number of instances being attached. If the number of instances being attached plus the desired capacity of the group exceeds the maximum size of the group, the operation fails.

    For more information, see Attach EC2 Instances to Your Auto Scaling Group in the Auto Scaling Developer Guide.

    ", + "AttachLoadBalancers": "

    Attaches one or more load balancers to the specified Auto Scaling group.

    To describe the load balancers for an Auto Scaling group, use DescribeLoadBalancers. To detach the load balancer from the Auto Scaling group, use DetachLoadBalancers.

    For more information, see Attach a Load Balancer to Your Auto Scaling Group in the Auto Scaling Developer Guide.

    ", + "CompleteLifecycleAction": "

    Completes the lifecycle action for the specified token or instance with the specified result.

    This step is a part of the procedure for adding a lifecycle hook to an Auto Scaling group:

    1. (Optional) Create a Lambda function and a rule that allows CloudWatch Events to invoke your Lambda function when Auto Scaling launches or terminates instances.
    2. (Optional) Create a notification target and an IAM role. The target can be either an Amazon SQS queue or an Amazon SNS topic. The role allows Auto Scaling to publish lifecycle notifications to the target.
    3. Create the lifecycle hook. Specify whether the hook is used when the instances launch or terminate.
    4. If you need more time, record the lifecycle action heartbeat to keep the instance in a pending state.
    5. If you finish before the timeout period ends, complete the lifecycle action.

    For more information, see Auto Scaling Lifecycle in the Auto Scaling Developer Guide.

    ", + "CreateAutoScalingGroup": "

    Creates an Auto Scaling group with the specified name and attributes.

    If you exceed your maximum limit of Auto Scaling groups, which by default is 20 per region, the call fails. For information about viewing and updating this limit, see DescribeAccountLimits.

    For more information, see Auto Scaling Groups in the Auto Scaling Developer Guide.

    ", + "CreateLaunchConfiguration": "

    Creates a launch configuration.

    If you exceed your maximum limit of launch configurations, which by default is 100 per region, the call fails. For information about viewing and updating this limit, see DescribeAccountLimits.

    For more information, see Launch Configurations in the Auto Scaling Developer Guide.

    ", + "CreateOrUpdateTags": "

    Creates or updates tags for the specified Auto Scaling group.

    When you specify a tag with a key that already exists, the operation overwrites the previous tag definition, and you do not get an error message.

    For more information, see Tagging Auto Scaling Groups and Instances in the Auto Scaling Developer Guide.

    ", + "DeleteAutoScalingGroup": "

    Deletes the specified Auto Scaling group.

    If the group has instances or scaling activities in progress, you must specify the option to force the deletion in order for it to succeed.

    If the group has policies, deleting the group deletes the policies, the underlying alarm actions, and any alarm that no longer has an associated action.

    To remove instances from the Auto Scaling group before deleting it, call DetachInstances with the list of instances and the option to decrement the desired capacity so that Auto Scaling does not launch replacement instances.

    To terminate all instances before deleting the Auto Scaling group, call UpdateAutoScalingGroup and set the minimum size and desired capacity of the Auto Scaling group to zero.

    ", + "DeleteLaunchConfiguration": "

    Deletes the specified launch configuration.

    The launch configuration must not be attached to an Auto Scaling group. When this call completes, the launch configuration is no longer available for use.

    ", + "DeleteLifecycleHook": "

    Deletes the specified lifecycle hook.

    If there are any outstanding lifecycle actions, they are completed first (ABANDON for launching instances, CONTINUE for terminating instances).

    ", + "DeleteNotificationConfiguration": "

    Deletes the specified notification.

    ", + "DeletePolicy": "

    Deletes the specified Auto Scaling policy.

    Deleting a policy deletes the underlying alarm action, but does not delete the alarm, even if it no longer has an associated action.

    ", + "DeleteScheduledAction": "

    Deletes the specified scheduled action.

    ", + "DeleteTags": "

    Deletes the specified tags.

    ", + "DescribeAccountLimits": "

    Describes the current Auto Scaling resource limits for your AWS account.

    For information about requesting an increase in these limits, see AWS Service Limits in the Amazon Web Services General Reference.

    ", + "DescribeAdjustmentTypes": "

    Describes the policy adjustment types for use with PutScalingPolicy.

    ", + "DescribeAutoScalingGroups": "

    Describes one or more Auto Scaling groups. If a list of names is not provided, the call describes all Auto Scaling groups.

    ", + "DescribeAutoScalingInstances": "

    Describes one or more Auto Scaling instances. If a list is not provided, the call describes all instances.

    ", + "DescribeAutoScalingNotificationTypes": "

    Describes the notification types that are supported by Auto Scaling.

    ", + "DescribeLaunchConfigurations": "

    Describes one or more launch configurations. If you omit the list of names, then the call describes all launch configurations.

    ", + "DescribeLifecycleHookTypes": "

    Describes the available types of lifecycle hooks.

    ", + "DescribeLifecycleHooks": "

    Describes the lifecycle hooks for the specified Auto Scaling group.

    ", + "DescribeLoadBalancers": "

    Describes the load balancers for the specified Auto Scaling group.

    ", + "DescribeMetricCollectionTypes": "

    Describes the available CloudWatch metrics for Auto Scaling.

    Note that the GroupStandbyInstances metric is not returned by default. You must explicitly request this metric when calling EnableMetricsCollection.

    ", + "DescribeNotificationConfigurations": "

    Describes the notification actions associated with the specified Auto Scaling group.

    ", + "DescribePolicies": "

    Describes the policies for the specified Auto Scaling group.

    ", + "DescribeScalingActivities": "

    Describes one or more scaling activities for the specified Auto Scaling group. If you omit the ActivityIds, the call returns all activities from the past six weeks. Activities are sorted by the start time. Activities still in progress appear first on the list.

    ", + "DescribeScalingProcessTypes": "

    Describes the scaling process types for use with ResumeProcesses and SuspendProcesses.

    ", + "DescribeScheduledActions": "

    Describes the actions scheduled for your Auto Scaling group that haven't run. To describe the actions that have already run, use DescribeScalingActivities.

    ", + "DescribeTags": "

    Describes the specified tags.

    You can use filters to limit the results. For example, you can query for the tags for a specific Auto Scaling group. You can specify multiple values for a filter. A tag must match at least one of the specified values for it to be included in the results.

    You can also specify multiple filters. The result includes information for a particular tag only if it matches all the filters. If there's no match, no special message is returned.

    ", + "DescribeTerminationPolicyTypes": "

    Describes the termination policies supported by Auto Scaling.

    ", + "DetachInstances": "

    Removes one or more instances from the specified Auto Scaling group.

    After the instances are detached, you can manage them independently from the rest of the Auto Scaling group.

    If you do not specify the option to decrement the desired capacity, Auto Scaling launches instances to replace the ones that are detached.

    For more information, see Detach EC2 Instances from Your Auto Scaling Group in the Auto Scaling Developer Guide.

    ", + "DetachLoadBalancers": "

    Removes one or more load balancers from the specified Auto Scaling group.

    When you detach a load balancer, it enters the Removing state while deregistering the instances in the group. When all instances are deregistered, then you can no longer describe the load balancer using DescribeLoadBalancers. Note that the instances remain running.

    ", + "DisableMetricsCollection": "

    Disables monitoring of the specified metrics for the specified Auto Scaling group.

    ", + "EnableMetricsCollection": "

    Enables monitoring of the specified metrics for the specified Auto Scaling group.

    You can only enable metrics collection if InstanceMonitoring in the launch configuration for the group is set to True.

    ", + "EnterStandby": "

    Moves the specified instances into Standby mode.

    For more information, see Auto Scaling Lifecycle in the Auto Scaling Developer Guide.

    ", + "ExecutePolicy": "

    Executes the specified policy.

    ", + "ExitStandby": "

    Moves the specified instances out of Standby mode.

    For more information, see Auto Scaling Lifecycle in the Auto Scaling Developer Guide.

    ", + "PutLifecycleHook": "

    Creates or updates a lifecycle hook for the specified Auto Scaling Group.

    A lifecycle hook tells Auto Scaling that you want to perform an action on an instance that is not actively in service; for example, either when the instance launches or before the instance terminates.

    This step is a part of the procedure for adding a lifecycle hook to an Auto Scaling group:

    1. (Optional) Create a Lambda function and a rule that allows CloudWatch Events to invoke your Lambda function when Auto Scaling launches or terminates instances.
    2. (Optional) Create a notification target and an IAM role. The target can be either an Amazon SQS queue or an Amazon SNS topic. The role allows Auto Scaling to publish lifecycle notifications to the target.
    3. Create the lifecycle hook. Specify whether the hook is used when the instances launch or terminate.
    4. If you need more time, record the lifecycle action heartbeat to keep the instance in a pending state.
    5. If you finish before the timeout period ends, complete the lifecycle action.

    For more information, see Auto Scaling Lifecycle in the Auto Scaling Developer Guide.

    If you exceed your maximum limit of lifecycle hooks, which by default is 50 per region, the call fails. For information about updating this limit, see AWS Service Limits in the Amazon Web Services General Reference.

    ", + "PutNotificationConfiguration": "

    Configures an Auto Scaling group to send notifications when specified events take place. Subscribers to this topic can have messages for events delivered to an endpoint such as a web server or email address.

    For more information see Getting Notifications When Your Auto Scaling Group Changes in the Auto Scaling Developer Guide.

    This configuration overwrites an existing configuration.

    ", + "PutScalingPolicy": "

    Creates or updates a policy for an Auto Scaling group. To update an existing policy, use the existing policy name and set the parameters you want to change. Any existing parameter not changed in an update to an existing policy is not changed in this update request.

    If you exceed your maximum limit of step adjustments, which by default is 20 per region, the call fails. For information about updating this limit, see AWS Service Limits in the Amazon Web Services General Reference.

    ", + "PutScheduledUpdateGroupAction": "

    Creates or updates a scheduled scaling action for an Auto Scaling group. When updating a scheduled scaling action, if you leave a parameter unspecified, the corresponding value remains unchanged in the affected Auto Scaling group.

    For more information, see Scheduled Scaling in the Auto Scaling Developer Guide.

    ", + "RecordLifecycleActionHeartbeat": "

    Records a heartbeat for the lifecycle action associated with the specified token or instance. This extends the timeout by the length of time defined using PutLifecycleHook.

    This step is a part of the procedure for adding a lifecycle hook to an Auto Scaling group:

    1. (Optional) Create a Lambda function and a rule that allows CloudWatch Events to invoke your Lambda function when Auto Scaling launches or terminates instances.
    2. (Optional) Create a notification target and an IAM role. The target can be either an Amazon SQS queue or an Amazon SNS topic. The role allows Auto Scaling to publish lifecycle notifications to the target.
    3. Create the lifecycle hook. Specify whether the hook is used when the instances launch or terminate.
    4. If you need more time, record the lifecycle action heartbeat to keep the instance in a pending state.
    5. If you finish before the timeout period ends, complete the lifecycle action.

    For more information, see Auto Scaling Lifecycle in the Auto Scaling Developer Guide.

    ", + "ResumeProcesses": "

    Resumes the specified suspended Auto Scaling processes, or all suspended process, for the specified Auto Scaling group.

    For more information, see Suspending and Resuming Auto Scaling Processes in the Auto Scaling Developer Guide.

    ", + "SetDesiredCapacity": "

    Sets the size of the specified Auto Scaling group.

    For more information about desired capacity, see What Is Auto Scaling? in the Auto Scaling Developer Guide.

    ", + "SetInstanceHealth": "

    Sets the health status of the specified instance.

    For more information, see Health Checks in the Auto Scaling Developer Guide.

    ", + "SetInstanceProtection": "

    Updates the instance protection settings of the specified instances.

    For more information, see Instance Protection in the Auto Scaling Developer Guide.

    ", + "SuspendProcesses": "

    Suspends the specified Auto Scaling processes, or all processes, for the specified Auto Scaling group.

    Note that if you suspend either the Launch or Terminate process types, it can prevent other process types from functioning properly.

    To resume processes that have been suspended, use ResumeProcesses.

    For more information, see Suspending and Resuming Auto Scaling Processes in the Auto Scaling Developer Guide.

    ", + "TerminateInstanceInAutoScalingGroup": "

    Terminates the specified instance and optionally adjusts the desired group size.

    This call simply makes a termination request. The instance is not terminated immediately.

    ", + "UpdateAutoScalingGroup": "

    Updates the configuration for the specified Auto Scaling group.

    To update an Auto Scaling group with a launch configuration with InstanceMonitoring set to False, you must first disable the collection of group metrics. Otherwise, you will get an error. If you have previously enabled the collection of group metrics, you can disable it using DisableMetricsCollection.

    The new settings are registered upon the completion of this call. Any launch configuration settings take effect on any triggers after this call returns. Scaling activities that are currently in progress aren't affected.

    Note the following:

    • If you specify a new value for MinSize without specifying a value for DesiredCapacity, and the new MinSize is larger than the current size of the group, we implicitly call SetDesiredCapacity to set the size of the group to the new value of MinSize.

    • If you specify a new value for MaxSize without specifying a value for DesiredCapacity, and the new MaxSize is smaller than the current size of the group, we implicitly call SetDesiredCapacity to set the size of the group to the new value of MaxSize.

    • All other optional parameters are left unchanged if not specified.

    " + }, + "shapes": { + "Activities": { + "base": null, + "refs": { + "ActivitiesType$Activities": "

    The scaling activities.

    ", + "DetachInstancesAnswer$Activities": "

    The activities related to detaching the instances from the Auto Scaling group.

    ", + "EnterStandbyAnswer$Activities": "

    The activities related to moving instances into Standby mode.

    ", + "ExitStandbyAnswer$Activities": "

    The activities related to moving instances out of Standby mode.

    " + } + }, + "ActivitiesType": { + "base": null, + "refs": { + } + }, + "Activity": { + "base": "

    Describes scaling activity, which is a long-running process that represents a change to your Auto Scaling group, such as changing its size or replacing an instance.

    ", + "refs": { + "Activities$member": null, + "ActivityType$Activity": "

    A scaling activity.

    " + } + }, + "ActivityIds": { + "base": null, + "refs": { + "DescribeScalingActivitiesType$ActivityIds": "

    The activity IDs of the desired scaling activities. If this list is omitted, all activities are described. If you specify an Auto Scaling group, the results are limited to that group. The list of requested activities cannot contain more than 50 items. If unknown activities are requested, they are ignored with no error.

    " + } + }, + "ActivityType": { + "base": null, + "refs": { + } + }, + "AdjustmentType": { + "base": "

    Describes a policy adjustment type.

    For more information, see Dynamic Scaling in the Auto Scaling Developer Guide.

    ", + "refs": { + "AdjustmentTypes$member": null + } + }, + "AdjustmentTypes": { + "base": null, + "refs": { + "DescribeAdjustmentTypesAnswer$AdjustmentTypes": "

    The policy adjustment types.

    " + } + }, + "Alarm": { + "base": "

    Describes an alarm.

    ", + "refs": { + "Alarms$member": null + } + }, + "Alarms": { + "base": null, + "refs": { + "ScalingPolicy$Alarms": "

    The CloudWatch alarms related to the policy.

    " + } + }, + "AlreadyExistsFault": { + "base": "

    You already have an Auto Scaling group or launch configuration with this name.

    ", + "refs": { + } + }, + "AsciiStringMaxLen255": { + "base": null, + "refs": { + "CompleteLifecycleActionType$LifecycleHookName": "

    The name of the lifecycle hook.

    ", + "DeleteLifecycleHookType$LifecycleHookName": "

    The name of the lifecycle hook.

    ", + "LifecycleHook$LifecycleHookName": "

    The name of the lifecycle hook.

    ", + "LifecycleHookNames$member": null, + "PutLifecycleHookType$LifecycleHookName": "

    The name of the lifecycle hook.

    ", + "RecordLifecycleActionHeartbeatType$LifecycleHookName": "

    The name of the lifecycle hook.

    " + } + }, + "AssociatePublicIpAddress": { + "base": null, + "refs": { + "CreateLaunchConfigurationType$AssociatePublicIpAddress": "

    Used for groups that launch instances into a virtual private cloud (VPC). Specifies whether to assign a public IP address to each instance. For more information, see Launching Auto Scaling Instances in a VPC in the Auto Scaling Developer Guide.

    If you specify this parameter, be sure to specify at least one subnet when you create your group.

    Default: If the instance is launched into a default subnet, the default is true. If the instance is launched into a nondefault subnet, the default is false. For more information, see Supported Platforms in the Amazon Elastic Compute Cloud User Guide.

    ", + "LaunchConfiguration$AssociatePublicIpAddress": "

    [EC2-VPC] Indicates whether to assign a public IP address to each instance.

    " + } + }, + "AttachInstancesQuery": { + "base": null, + "refs": { + } + }, + "AttachLoadBalancersResultType": { + "base": null, + "refs": { + } + }, + "AttachLoadBalancersType": { + "base": null, + "refs": { + } + }, + "AutoScalingGroup": { + "base": "

    Describes an Auto Scaling group.

    ", + "refs": { + "AutoScalingGroups$member": null + } + }, + "AutoScalingGroupDesiredCapacity": { + "base": null, + "refs": { + "AutoScalingGroup$DesiredCapacity": "

    The desired size of the group.

    ", + "CreateAutoScalingGroupType$DesiredCapacity": "

    The number of EC2 instances that should be running in the group. This number must be greater than or equal to the minimum size of the group and less than or equal to the maximum size of the group.

    ", + "PutScheduledUpdateGroupActionType$DesiredCapacity": "

    The number of EC2 instances that should be running in the group.

    ", + "ScheduledUpdateGroupAction$DesiredCapacity": "

    The number of instances you prefer to maintain in the group.

    ", + "SetDesiredCapacityType$DesiredCapacity": "

    The number of EC2 instances that should be running in the Auto Scaling group.

    ", + "UpdateAutoScalingGroupType$DesiredCapacity": "

    The number of EC2 instances that should be running in the Auto Scaling group. This number must be greater than or equal to the minimum size of the group and less than or equal to the maximum size of the group.

    " + } + }, + "AutoScalingGroupMaxSize": { + "base": null, + "refs": { + "AutoScalingGroup$MaxSize": "

    The maximum size of the group.

    ", + "CreateAutoScalingGroupType$MaxSize": "

    The maximum size of the group.

    ", + "PutScheduledUpdateGroupActionType$MaxSize": "

    The maximum size for the Auto Scaling group.

    ", + "ScheduledUpdateGroupAction$MaxSize": "

    The maximum size of the group.

    ", + "UpdateAutoScalingGroupType$MaxSize": "

    The maximum size of the Auto Scaling group.

    " + } + }, + "AutoScalingGroupMinSize": { + "base": null, + "refs": { + "AutoScalingGroup$MinSize": "

    The minimum size of the group.

    ", + "CreateAutoScalingGroupType$MinSize": "

    The minimum size of the group.

    ", + "PutScheduledUpdateGroupActionType$MinSize": "

    The minimum size for the Auto Scaling group.

    ", + "ScheduledUpdateGroupAction$MinSize": "

    The minimum size of the group.

    ", + "UpdateAutoScalingGroupType$MinSize": "

    The minimum size of the Auto Scaling group.

    " + } + }, + "AutoScalingGroupNames": { + "base": null, + "refs": { + "AutoScalingGroupNamesType$AutoScalingGroupNames": "

    The group names.

    ", + "DescribeNotificationConfigurationsType$AutoScalingGroupNames": "

    The name of the group.

    " + } + }, + "AutoScalingGroupNamesType": { + "base": null, + "refs": { + } + }, + "AutoScalingGroups": { + "base": null, + "refs": { + "AutoScalingGroupsType$AutoScalingGroups": "

    The groups.

    " + } + }, + "AutoScalingGroupsType": { + "base": null, + "refs": { + } + }, + "AutoScalingInstanceDetails": { + "base": "

    Describes an EC2 instance associated with an Auto Scaling group.

    ", + "refs": { + "AutoScalingInstances$member": null + } + }, + "AutoScalingInstances": { + "base": null, + "refs": { + "AutoScalingInstancesType$AutoScalingInstances": "

    The instances.

    " + } + }, + "AutoScalingInstancesType": { + "base": null, + "refs": { + } + }, + "AutoScalingNotificationTypes": { + "base": null, + "refs": { + "DescribeAutoScalingNotificationTypesAnswer$AutoScalingNotificationTypes": "

    One or more of the following notification types:

    • autoscaling:EC2_INSTANCE_LAUNCH

    • autoscaling:EC2_INSTANCE_LAUNCH_ERROR

    • autoscaling:EC2_INSTANCE_TERMINATE

    • autoscaling:EC2_INSTANCE_TERMINATE_ERROR

    • autoscaling:TEST_NOTIFICATION

    ", + "DescribeLifecycleHookTypesAnswer$LifecycleHookTypes": "

    One or more of the following notification types:

    • autoscaling:EC2_INSTANCE_LAUNCHING

    • autoscaling:EC2_INSTANCE_TERMINATING

    ", + "PutNotificationConfigurationType$NotificationTypes": "

    The type of event that will cause the notification to be sent. For details about notification types supported by Auto Scaling, see DescribeAutoScalingNotificationTypes.

    " + } + }, + "AvailabilityZones": { + "base": null, + "refs": { + "AutoScalingGroup$AvailabilityZones": "

    One or more Availability Zones for the group.

    ", + "CreateAutoScalingGroupType$AvailabilityZones": "

    One or more Availability Zones for the group. This parameter is optional if you specify one or more subnets.

    ", + "UpdateAutoScalingGroupType$AvailabilityZones": "

    One or more Availability Zones for the group.

    " + } + }, + "BlockDeviceEbsDeleteOnTermination": { + "base": null, + "refs": { + "Ebs$DeleteOnTermination": "

    Indicates whether the volume is deleted on instance termination.

    Default: true

    " + } + }, + "BlockDeviceEbsEncrypted": { + "base": null, + "refs": { + "Ebs$Encrypted": "

    Indicates whether the volume should be encrypted. Encrypted EBS volumes must be attached to instances that support Amazon EBS encryption. Volumes that are created from encrypted snapshots are automatically encrypted. There is no way to create an encrypted volume from an unencrypted snapshot or an unencrypted volume from an encrypted snapshot. For more information, see Amazon EBS Encryption in the Amazon Elastic Compute Cloud User Guide.

    " + } + }, + "BlockDeviceEbsIops": { + "base": null, + "refs": { + "Ebs$Iops": "

    The number of I/O operations per second (IOPS) to provision for the volume.

    Constraint: Required when the volume type is io1.

    " + } + }, + "BlockDeviceEbsVolumeSize": { + "base": null, + "refs": { + "Ebs$VolumeSize": "

    The volume size, in GiB. For standard volumes, specify a value from 1 to 1,024. For io1 volumes, specify a value from 4 to 16,384. For gp2 volumes, specify a value from 1 to 16,384. If you specify a snapshot, the volume size must be equal to or larger than the snapshot size.

    Default: If you create a volume from a snapshot and you don't specify a volume size, the default is the snapshot size.

    " + } + }, + "BlockDeviceEbsVolumeType": { + "base": null, + "refs": { + "Ebs$VolumeType": "

    The volume type. For more information, see Amazon EBS Volume Types in the Amazon Elastic Compute Cloud User Guide.

    Valid values: standard | io1 | gp2

    Default: standard

    " + } + }, + "BlockDeviceMapping": { + "base": "

    Describes a block device mapping.

    ", + "refs": { + "BlockDeviceMappings$member": null + } + }, + "BlockDeviceMappings": { + "base": null, + "refs": { + "CreateLaunchConfigurationType$BlockDeviceMappings": "

    One or more mappings that specify how block devices are exposed to the instance. For more information, see Block Device Mapping in the Amazon Elastic Compute Cloud User Guide.

    ", + "LaunchConfiguration$BlockDeviceMappings": "

    A block device mapping, which specifies the block devices for the instance.

    " + } + }, + "ClassicLinkVPCSecurityGroups": { + "base": null, + "refs": { + "CreateLaunchConfigurationType$ClassicLinkVPCSecurityGroups": "

    The IDs of one or more security groups for the specified ClassicLink-enabled VPC. This parameter is required if you specify a ClassicLink-enabled VPC, and is not supported otherwise. For more information, see ClassicLink in the Amazon Elastic Compute Cloud User Guide.

    ", + "LaunchConfiguration$ClassicLinkVPCSecurityGroups": "

    The IDs of one or more security groups for the VPC specified in ClassicLinkVPCId. This parameter is required if you specify a ClassicLink-enabled VPC, and cannot be used otherwise. For more information, see ClassicLink in the Amazon Elastic Compute Cloud User Guide.

    " + } + }, + "CompleteLifecycleActionAnswer": { + "base": null, + "refs": { + } + }, + "CompleteLifecycleActionType": { + "base": null, + "refs": { + } + }, + "Cooldown": { + "base": null, + "refs": { + "AutoScalingGroup$DefaultCooldown": "

    The amount of time, in seconds, after a scaling activity completes before another scaling activity can start.

    ", + "CreateAutoScalingGroupType$DefaultCooldown": "

    The amount of time, in seconds, after a scaling activity completes before another scaling activity can start. The default is 300.

    For more information, see Auto Scaling Cooldowns in the Auto Scaling Developer Guide.

    ", + "PutScalingPolicyType$Cooldown": "

    The amount of time, in seconds, after a scaling activity completes and before the next scaling activity can start. If this parameter is not specified, the default cooldown period for the group applies.

    This parameter is not supported unless the policy type is SimpleScaling.

    For more information, see Auto Scaling Cooldowns in the Auto Scaling Developer Guide.

    ", + "ScalingPolicy$Cooldown": "

    The amount of time, in seconds, after a scaling activity completes before any further trigger-related scaling activities can start.

    ", + "UpdateAutoScalingGroupType$DefaultCooldown": "

    The amount of time, in seconds, after a scaling activity completes before another scaling activity can start. The default is 300.

    For more information, see Auto Scaling Cooldowns in the Auto Scaling Developer Guide.

    " + } + }, + "CreateAutoScalingGroupType": { + "base": null, + "refs": { + } + }, + "CreateLaunchConfigurationType": { + "base": null, + "refs": { + } + }, + "CreateOrUpdateTagsType": { + "base": null, + "refs": { + } + }, + "DeleteAutoScalingGroupType": { + "base": null, + "refs": { + } + }, + "DeleteLifecycleHookAnswer": { + "base": null, + "refs": { + } + }, + "DeleteLifecycleHookType": { + "base": null, + "refs": { + } + }, + "DeleteNotificationConfigurationType": { + "base": null, + "refs": { + } + }, + "DeletePolicyType": { + "base": "

    ", + "refs": { + } + }, + "DeleteScheduledActionType": { + "base": null, + "refs": { + } + }, + "DeleteTagsType": { + "base": null, + "refs": { + } + }, + "DescribeAccountLimitsAnswer": { + "base": null, + "refs": { + } + }, + "DescribeAdjustmentTypesAnswer": { + "base": null, + "refs": { + } + }, + "DescribeAutoScalingInstancesType": { + "base": null, + "refs": { + } + }, + "DescribeAutoScalingNotificationTypesAnswer": { + "base": null, + "refs": { + } + }, + "DescribeLifecycleHookTypesAnswer": { + "base": null, + "refs": { + } + }, + "DescribeLifecycleHooksAnswer": { + "base": null, + "refs": { + } + }, + "DescribeLifecycleHooksType": { + "base": null, + "refs": { + } + }, + "DescribeLoadBalancersRequest": { + "base": null, + "refs": { + } + }, + "DescribeLoadBalancersResponse": { + "base": null, + "refs": { + } + }, + "DescribeMetricCollectionTypesAnswer": { + "base": null, + "refs": { + } + }, + "DescribeNotificationConfigurationsAnswer": { + "base": null, + "refs": { + } + }, + "DescribeNotificationConfigurationsType": { + "base": null, + "refs": { + } + }, + "DescribePoliciesType": { + "base": null, + "refs": { + } + }, + "DescribeScalingActivitiesType": { + "base": null, + "refs": { + } + }, + "DescribeScheduledActionsType": { + "base": null, + "refs": { + } + }, + "DescribeTagsType": { + "base": null, + "refs": { + } + }, + "DescribeTerminationPolicyTypesAnswer": { + "base": null, + "refs": { + } + }, + "DetachInstancesAnswer": { + "base": null, + "refs": { + } + }, + "DetachInstancesQuery": { + "base": null, + "refs": { + } + }, + "DetachLoadBalancersResultType": { + "base": null, + "refs": { + } + }, + "DetachLoadBalancersType": { + "base": null, + "refs": { + } + }, + "DisableMetricsCollectionQuery": { + "base": null, + "refs": { + } + }, + "Ebs": { + "base": "

    Describes an Amazon EBS volume.

    ", + "refs": { + "BlockDeviceMapping$Ebs": "

    The information about the Amazon EBS volume.

    " + } + }, + "EbsOptimized": { + "base": null, + "refs": { + "CreateLaunchConfigurationType$EbsOptimized": "

    Indicates whether the instance is optimized for Amazon EBS I/O. By default, the instance is not optimized for EBS I/O. The optimization provides dedicated throughput to Amazon EBS and an optimized configuration stack to provide optimal I/O performance. This optimization is not available with all instance types. Additional usage charges apply. For more information, see Amazon EBS-Optimized Instances in the Amazon Elastic Compute Cloud User Guide.

    ", + "LaunchConfiguration$EbsOptimized": "

    Controls whether the instance is optimized for EBS I/O (true) or not (false).

    " + } + }, + "EnableMetricsCollectionQuery": { + "base": null, + "refs": { + } + }, + "EnabledMetric": { + "base": "

    Describes an enabled metric.

    ", + "refs": { + "EnabledMetrics$member": null + } + }, + "EnabledMetrics": { + "base": null, + "refs": { + "AutoScalingGroup$EnabledMetrics": "

    The metrics enabled for the group.

    " + } + }, + "EnterStandbyAnswer": { + "base": null, + "refs": { + } + }, + "EnterStandbyQuery": { + "base": null, + "refs": { + } + }, + "EstimatedInstanceWarmup": { + "base": null, + "refs": { + "PutScalingPolicyType$EstimatedInstanceWarmup": "

    The estimated time, in seconds, until a newly launched instance can contribute to the CloudWatch metrics. The default is to use the value specified for the default cooldown period for the group.

    This parameter is not supported if the policy type is SimpleScaling.

    ", + "ScalingPolicy$EstimatedInstanceWarmup": "

    The estimated time, in seconds, until a newly launched instance can contribute to the CloudWatch metrics.

    " + } + }, + "ExecutePolicyType": { + "base": null, + "refs": { + } + }, + "ExitStandbyAnswer": { + "base": null, + "refs": { + } + }, + "ExitStandbyQuery": { + "base": null, + "refs": { + } + }, + "Filter": { + "base": "

    Describes a filter.

    ", + "refs": { + "Filters$member": null + } + }, + "Filters": { + "base": null, + "refs": { + "DescribeTagsType$Filters": "

    A filter used to scope the tags to return.

    " + } + }, + "ForceDelete": { + "base": null, + "refs": { + "DeleteAutoScalingGroupType$ForceDelete": "

    Specifies that the group will be deleted along with all instances associated with the group, without waiting for all instances to be terminated. This parameter also deletes any lifecycle actions associated with the group.

    " + } + }, + "GlobalTimeout": { + "base": null, + "refs": { + "LifecycleHook$GlobalTimeout": "

    The maximum time, in seconds, that an instance can remain in a Pending:Wait or Terminating:Wait state. The default is 172800 seconds (48 hours).

    " + } + }, + "HealthCheckGracePeriod": { + "base": null, + "refs": { + "AutoScalingGroup$HealthCheckGracePeriod": "

    The amount of time, in seconds, that Auto Scaling waits before checking the health status of an EC2 instance that has come into service.

    ", + "CreateAutoScalingGroupType$HealthCheckGracePeriod": "

    The amount of time, in seconds, that Auto Scaling waits before checking the health status of an EC2 instance that has come into service. During this time, any health check failures for the instance are ignored. The default is 300.

    This parameter is required if you are adding an ELB health check.

    For more information, see Health Checks in the Auto Scaling Developer Guide.

    ", + "UpdateAutoScalingGroupType$HealthCheckGracePeriod": "

    The amount of time, in seconds, that Auto Scaling waits before checking the health status of an EC2 instance that has come into service. The default is 300.

    For more information, see Health Checks in the Auto Scaling Developer Guide.

    " + } + }, + "HeartbeatTimeout": { + "base": null, + "refs": { + "LifecycleHook$HeartbeatTimeout": "

    The maximum time, in seconds, that can elapse before the lifecycle hook times out. The default is 3600 seconds (1 hour). When the lifecycle hook times out, Auto Scaling performs the default action. You can prevent the lifecycle hook from timing out by calling RecordLifecycleActionHeartbeat.

    ", + "PutLifecycleHookType$HeartbeatTimeout": "

    The amount of time, in seconds, that can elapse before the lifecycle hook times out. When the lifecycle hook times out, Auto Scaling performs the default action. You can prevent the lifecycle hook from timing out by calling RecordLifecycleActionHeartbeat. The default is 3600 seconds (1 hour).

    " + } + }, + "HonorCooldown": { + "base": null, + "refs": { + "ExecutePolicyType$HonorCooldown": "

    If this parameter is true, Auto Scaling waits for the cooldown period to complete before executing the policy. Otherwise, Auto Scaling executes the policy without waiting for the cooldown period to complete.

    This parameter is not supported if the policy type is StepScaling.

    For more information, see Auto Scaling Cooldowns in the Auto Scaling Developer Guide.

    ", + "SetDesiredCapacityType$HonorCooldown": "

    By default, SetDesiredCapacity overrides any cooldown period associated with the Auto Scaling group. Specify True to make Auto Scaling to wait for the cool-down period associated with the Auto Scaling group to complete before initiating a scaling activity to set your Auto Scaling group to its new capacity.

    " + } + }, + "Instance": { + "base": "

    Describes an EC2 instance.

    ", + "refs": { + "Instances$member": null + } + }, + "InstanceIds": { + "base": null, + "refs": { + "AttachInstancesQuery$InstanceIds": "

    One or more instance IDs.

    ", + "DescribeAutoScalingInstancesType$InstanceIds": "

    The instances to describe; up to 50 instance IDs. If you omit this parameter, all Auto Scaling instances are described. If you specify an ID that does not exist, it is ignored with no error.

    ", + "DetachInstancesQuery$InstanceIds": "

    One or more instance IDs.

    ", + "EnterStandbyQuery$InstanceIds": "

    One or more instances to move into Standby mode. You must specify at least one instance ID.

    ", + "ExitStandbyQuery$InstanceIds": "

    One or more instance IDs. You must specify at least one instance ID.

    ", + "SetInstanceProtectionQuery$InstanceIds": "

    One or more instance IDs.

    " + } + }, + "InstanceMonitoring": { + "base": "

    Describes whether instance monitoring is enabled.

    ", + "refs": { + "CreateLaunchConfigurationType$InstanceMonitoring": "

    Enables detailed monitoring if it is disabled. Detailed monitoring is enabled by default.

    When detailed monitoring is enabled, Amazon CloudWatch generates metrics every minute and your account is charged a fee. When you disable detailed monitoring, by specifying False, CloudWatch generates metrics every 5 minutes. For more information, see Monitoring Your Auto Scaling Instances and Groups in the Auto Scaling Developer Guide.

    ", + "LaunchConfiguration$InstanceMonitoring": "

    Controls whether instances in this group are launched with detailed monitoring.

    " + } + }, + "InstanceProtected": { + "base": null, + "refs": { + "AutoScalingGroup$NewInstancesProtectedFromScaleIn": "

    Indicates whether newly launched instances are protected from termination by Auto Scaling when scaling in.

    ", + "AutoScalingInstanceDetails$ProtectedFromScaleIn": "

    Indicates whether the instance is protected from termination by Auto Scaling when scaling in.

    ", + "CreateAutoScalingGroupType$NewInstancesProtectedFromScaleIn": "

    Indicates whether newly launched instances are protected from termination by Auto Scaling when scaling in.

    ", + "Instance$ProtectedFromScaleIn": "

    Indicates whether the instance is protected from termination by Auto Scaling when scaling in.

    ", + "UpdateAutoScalingGroupType$NewInstancesProtectedFromScaleIn": "

    Indicates whether newly launched instances are protected from termination by Auto Scaling when scaling in.

    " + } + }, + "Instances": { + "base": null, + "refs": { + "AutoScalingGroup$Instances": "

    The EC2 instances associated with the group.

    " + } + }, + "InvalidNextToken": { + "base": "

    The NextToken value is not valid.

    ", + "refs": { + } + }, + "LaunchConfiguration": { + "base": "

    Describes a launch configuration.

    ", + "refs": { + "LaunchConfigurations$member": null + } + }, + "LaunchConfigurationNameType": { + "base": null, + "refs": { + } + }, + "LaunchConfigurationNames": { + "base": null, + "refs": { + "LaunchConfigurationNamesType$LaunchConfigurationNames": "

    The launch configuration names.

    " + } + }, + "LaunchConfigurationNamesType": { + "base": null, + "refs": { + } + }, + "LaunchConfigurations": { + "base": null, + "refs": { + "LaunchConfigurationsType$LaunchConfigurations": "

    The launch configurations.

    " + } + }, + "LaunchConfigurationsType": { + "base": null, + "refs": { + } + }, + "LifecycleActionResult": { + "base": null, + "refs": { + "CompleteLifecycleActionType$LifecycleActionResult": "

    The action for the group to take. This parameter can be either CONTINUE or ABANDON.

    ", + "LifecycleHook$DefaultResult": "

    Defines the action the Auto Scaling group should take when the lifecycle hook timeout elapses or if an unexpected failure occurs. The valid values are CONTINUE and ABANDON. The default value is CONTINUE.

    ", + "PutLifecycleHookType$DefaultResult": "

    Defines the action the Auto Scaling group should take when the lifecycle hook timeout elapses or if an unexpected failure occurs. This parameter can be either CONTINUE or ABANDON. The default value is ABANDON.

    " + } + }, + "LifecycleActionToken": { + "base": null, + "refs": { + "CompleteLifecycleActionType$LifecycleActionToken": "

    A universally unique identifier (UUID) that identifies a specific lifecycle action associated with an instance. Auto Scaling sends this token to the notification target you specified when you created the lifecycle hook.

    ", + "RecordLifecycleActionHeartbeatType$LifecycleActionToken": "

    A token that uniquely identifies a specific lifecycle action associated with an instance. Auto Scaling sends this token to the notification target you specified when you created the lifecycle hook.

    " + } + }, + "LifecycleHook": { + "base": "

    Describes a lifecycle hook, which tells Auto Scaling that you want to perform an action when an instance launches or terminates. When you have a lifecycle hook in place, the Auto Scaling group will either:

    • Pause the instance after it launches, but before it is put into service
    • Pause the instance as it terminates, but before it is fully terminated

    For more information, see Auto Scaling Lifecycle in the Auto Scaling Developer Guide.

    ", + "refs": { + "LifecycleHooks$member": null + } + }, + "LifecycleHookNames": { + "base": null, + "refs": { + "DescribeLifecycleHooksType$LifecycleHookNames": "

    The names of one or more lifecycle hooks.

    " + } + }, + "LifecycleHooks": { + "base": null, + "refs": { + "DescribeLifecycleHooksAnswer$LifecycleHooks": "

    The lifecycle hooks for the specified group.

    " + } + }, + "LifecycleState": { + "base": null, + "refs": { + "Instance$LifecycleState": "

    A description of the current lifecycle state. Note that the Quarantined state is not used.

    " + } + }, + "LifecycleTransition": { + "base": null, + "refs": { + "LifecycleHook$LifecycleTransition": "

    The state of the EC2 instance to which you want to attach the lifecycle hook. For a list of lifecycle hook types, see DescribeLifecycleHookTypes.

    ", + "PutLifecycleHookType$LifecycleTransition": "

    The instance state to which you want to attach the lifecycle hook. For a list of lifecycle hook types, see DescribeLifecycleHookTypes.

    This parameter is required for new lifecycle hooks, but optional when updating existing hooks.

    " + } + }, + "LimitExceededFault": { + "base": "

    You have already reached a limit for your Auto Scaling resources (for example, groups, launch configurations, or lifecycle hooks). For more information, see DescribeAccountLimits.

    ", + "refs": { + } + }, + "LoadBalancerNames": { + "base": null, + "refs": { + "AttachLoadBalancersType$LoadBalancerNames": "

    One or more load balancer names.

    ", + "AutoScalingGroup$LoadBalancerNames": "

    One or more load balancers associated with the group.

    ", + "CreateAutoScalingGroupType$LoadBalancerNames": "

    One or more load balancers.

    For more information, see Using a Load Balancer With an Auto Scaling Group in the Auto Scaling Developer Guide.

    ", + "DetachLoadBalancersType$LoadBalancerNames": "

    One or more load balancer names.

    " + } + }, + "LoadBalancerState": { + "base": "

    Describes the state of a load balancer.

    ", + "refs": { + "LoadBalancerStates$member": null + } + }, + "LoadBalancerStates": { + "base": null, + "refs": { + "DescribeLoadBalancersResponse$LoadBalancers": "

    The load balancers.

    " + } + }, + "MaxNumberOfAutoScalingGroups": { + "base": null, + "refs": { + "DescribeAccountLimitsAnswer$MaxNumberOfAutoScalingGroups": "

    The maximum number of groups allowed for your AWS account. The default limit is 20 per region.

    " + } + }, + "MaxNumberOfLaunchConfigurations": { + "base": null, + "refs": { + "DescribeAccountLimitsAnswer$MaxNumberOfLaunchConfigurations": "

    The maximum number of launch configurations allowed for your AWS account. The default limit is 100 per region.

    " + } + }, + "MaxRecords": { + "base": null, + "refs": { + "AutoScalingGroupNamesType$MaxRecords": "

    The maximum number of items to return with this call.

    ", + "DescribeAutoScalingInstancesType$MaxRecords": "

    The maximum number of items to return with this call.

    ", + "DescribeLoadBalancersRequest$MaxRecords": "

    The maximum number of items to return with this call.

    ", + "DescribeNotificationConfigurationsType$MaxRecords": "

    The maximum number of items to return with this call.

    ", + "DescribePoliciesType$MaxRecords": "

    The maximum number of items to be returned with each call.

    ", + "DescribeScalingActivitiesType$MaxRecords": "

    The maximum number of items to return with this call.

    ", + "DescribeScheduledActionsType$MaxRecords": "

    The maximum number of items to return with this call.

    ", + "DescribeTagsType$MaxRecords": "

    The maximum number of items to return with this call.

    ", + "LaunchConfigurationNamesType$MaxRecords": "

    The maximum number of items to return with this call. The default is 100.

    " + } + }, + "MetricCollectionType": { + "base": "

    Describes a metric.

    ", + "refs": { + "MetricCollectionTypes$member": null + } + }, + "MetricCollectionTypes": { + "base": null, + "refs": { + "DescribeMetricCollectionTypesAnswer$Metrics": "

    One or more metrics.

    " + } + }, + "MetricGranularityType": { + "base": "

    Describes a granularity of a metric.

    ", + "refs": { + "MetricGranularityTypes$member": null + } + }, + "MetricGranularityTypes": { + "base": null, + "refs": { + "DescribeMetricCollectionTypesAnswer$Granularities": "

    The granularities for the metrics.

    " + } + }, + "MetricScale": { + "base": null, + "refs": { + "ExecutePolicyType$MetricValue": "

    The metric value to compare to BreachThreshold. This enables you to execute a policy of type StepScaling and determine which step adjustment to use. For example, if the breach threshold is 50 and you want to use a step adjustment with a lower bound of 0 and an upper bound of 10, you can set the metric value to 59.

    If you specify a metric value that doesn't correspond to a step adjustment for the policy, the call returns an error.

    This parameter is required if the policy type is StepScaling and not supported otherwise.

    ", + "ExecutePolicyType$BreachThreshold": "

    The breach threshold for the alarm.

    This parameter is required if the policy type is StepScaling and not supported otherwise.

    ", + "StepAdjustment$MetricIntervalLowerBound": "

    The lower bound for the difference between the alarm threshold and the CloudWatch metric. If the metric value is above the breach threshold, the lower bound is inclusive (the metric must be greater than or equal to the threshold plus the lower bound). Otherwise, it is exclusive (the metric must be greater than the threshold plus the lower bound). A null value indicates negative infinity.

    ", + "StepAdjustment$MetricIntervalUpperBound": "

    The upper bound for the difference between the alarm threshold and the CloudWatch metric. If the metric value is above the breach threshold, the upper bound is exclusive (the metric must be less than the threshold plus the upper bound). Otherwise, it is inclusive (the metric must be less than or equal to the threshold plus the upper bound). A null value indicates positive infinity.

    The upper bound must be greater than the lower bound.

    " + } + }, + "Metrics": { + "base": null, + "refs": { + "DisableMetricsCollectionQuery$Metrics": "

    One or more of the following metrics. If you omit this parameter, all metrics are disabled.

    • GroupMinSize

    • GroupMaxSize

    • GroupDesiredCapacity

    • GroupInServiceInstances

    • GroupPendingInstances

    • GroupStandbyInstances

    • GroupTerminatingInstances

    • GroupTotalInstances

    ", + "EnableMetricsCollectionQuery$Metrics": "

    One or more of the following metrics. If you omit this parameter, all metrics are enabled.

    • GroupMinSize

    • GroupMaxSize

    • GroupDesiredCapacity

    • GroupInServiceInstances

    • GroupPendingInstances

    • GroupStandbyInstances

    • GroupTerminatingInstances

    • GroupTotalInstances

    Note that the GroupStandbyInstances metric is not enabled by default. You must explicitly request this metric.

    " + } + }, + "MinAdjustmentMagnitude": { + "base": null, + "refs": { + "PutScalingPolicyType$MinAdjustmentMagnitude": "

    The minimum number of instances to scale. If the value of AdjustmentType is PercentChangeInCapacity, the scaling policy changes the DesiredCapacity of the Auto Scaling group by at least this many instances. Otherwise, the error is ValidationError.

    ", + "ScalingPolicy$MinAdjustmentMagnitude": "

    The minimum number of instances to scale. If the value of AdjustmentType is PercentChangeInCapacity, the scaling policy changes the DesiredCapacity of the Auto Scaling group by at least this many instances. Otherwise, the error is ValidationError.

    " + } + }, + "MinAdjustmentStep": { + "base": null, + "refs": { + "PutScalingPolicyType$MinAdjustmentStep": "

    Available for backward compatibility. Use MinAdjustmentMagnitude instead.

    ", + "ScalingPolicy$MinAdjustmentStep": "

    Available for backward compatibility. Use MinAdjustmentMagnitude instead.

    " + } + }, + "MonitoringEnabled": { + "base": null, + "refs": { + "InstanceMonitoring$Enabled": "

    If True, instance monitoring is enabled.

    " + } + }, + "NoDevice": { + "base": null, + "refs": { + "BlockDeviceMapping$NoDevice": "

    Suppresses a device mapping.

    If this parameter is true for the root device, the instance might fail the EC2 health check. Auto Scaling launches a replacement instance if the instance fails the health check.

    " + } + }, + "NotificationConfiguration": { + "base": "

    Describes a notification.

    ", + "refs": { + "NotificationConfigurations$member": null + } + }, + "NotificationConfigurations": { + "base": null, + "refs": { + "DescribeNotificationConfigurationsAnswer$NotificationConfigurations": "

    The notification configurations.

    " + } + }, + "NotificationTargetResourceName": { + "base": null, + "refs": { + "PutLifecycleHookType$NotificationTargetARN": "

    The ARN of the notification target that Auto Scaling will use to notify you when an instance is in the transition state for the lifecycle hook. This target can be either an SQS queue or an SNS topic. If you specify an empty string, this overrides the current ARN.

    The notification messages sent to the target include the following information:

    • AutoScalingGroupName. The name of the Auto Scaling group.
    • AccountId. The AWS account ID.
    • LifecycleTransition. The lifecycle hook type.
    • LifecycleActionToken. The lifecycle action token.
    • EC2InstanceId. The EC2 instance ID.
    • LifecycleHookName. The name of the lifecycle hook.
    • NotificationMetadata. User-defined information.

    This operation uses the JSON format when sending notifications to an Amazon SQS queue, and an email key/value pair format when sending notifications to an Amazon SNS topic.

    When you specify a notification target, Auto Scaling sends it a test message. Test messages contains the following additional key/value pair: \"Event\": \"autoscaling:TEST_NOTIFICATION\".

    " + } + }, + "NumberOfAutoScalingGroups": { + "base": null, + "refs": { + "DescribeAccountLimitsAnswer$NumberOfAutoScalingGroups": "

    The current number of groups for your AWS account.

    " + } + }, + "NumberOfLaunchConfigurations": { + "base": null, + "refs": { + "DescribeAccountLimitsAnswer$NumberOfLaunchConfigurations": "

    The current number of launch configurations for your AWS account.

    " + } + }, + "PoliciesType": { + "base": null, + "refs": { + } + }, + "PolicyARNType": { + "base": null, + "refs": { + } + }, + "PolicyIncrement": { + "base": null, + "refs": { + "PutScalingPolicyType$ScalingAdjustment": "

    The amount by which to scale, based on the specified adjustment type. A positive value adds to the current capacity while a negative number removes from the current capacity.

    This parameter is required if the policy type is SimpleScaling and not supported otherwise.

    ", + "ScalingPolicy$ScalingAdjustment": "

    The amount by which to scale, based on the specified adjustment type. A positive value adds to the current capacity while a negative number removes from the current capacity.

    ", + "StepAdjustment$ScalingAdjustment": "

    The amount by which to scale, based on the specified adjustment type. A positive value adds to the current capacity while a negative number removes from the current capacity.

    " + } + }, + "PolicyNames": { + "base": null, + "refs": { + "DescribePoliciesType$PolicyNames": "

    One or more policy names or policy ARNs to be described. If you omit this list, all policy names are described. If an group name is provided, the results are limited to that group. This list is limited to 50 items. If you specify an unknown policy name, it is ignored with no error.

    " + } + }, + "PolicyTypes": { + "base": null, + "refs": { + "DescribePoliciesType$PolicyTypes": "

    One or more policy types. Valid values are SimpleScaling and StepScaling.

    " + } + }, + "ProcessNames": { + "base": null, + "refs": { + "ScalingProcessQuery$ScalingProcesses": "

    One or more of the following processes:

    • Launch

    • Terminate

    • HealthCheck

    • ReplaceUnhealthy

    • AZRebalance

    • AlarmNotification

    • ScheduledActions

    • AddToLoadBalancer

    " + } + }, + "ProcessType": { + "base": "

    Describes a process type.

    For more information, see Auto Scaling Processes in the Auto Scaling Developer Guide.

    ", + "refs": { + "Processes$member": null + } + }, + "Processes": { + "base": null, + "refs": { + "ProcessesType$Processes": "

    The names of the process types.

    " + } + }, + "ProcessesType": { + "base": null, + "refs": { + } + }, + "Progress": { + "base": null, + "refs": { + "Activity$Progress": "

    A value between 0 and 100 that indicates the progress of the activity.

    " + } + }, + "PropagateAtLaunch": { + "base": null, + "refs": { + "Tag$PropagateAtLaunch": "

    Determines whether the tag is added to new instances as they are launched in the group.

    ", + "TagDescription$PropagateAtLaunch": "

    Determines whether the tag is added to new instances as they are launched in the group.

    " + } + }, + "ProtectedFromScaleIn": { + "base": null, + "refs": { + "SetInstanceProtectionQuery$ProtectedFromScaleIn": "

    Indicates whether the instance is protected from termination by Auto Scaling when scaling in.

    " + } + }, + "PutLifecycleHookAnswer": { + "base": null, + "refs": { + } + }, + "PutLifecycleHookType": { + "base": null, + "refs": { + } + }, + "PutNotificationConfigurationType": { + "base": null, + "refs": { + } + }, + "PutScalingPolicyType": { + "base": null, + "refs": { + } + }, + "PutScheduledUpdateGroupActionType": { + "base": null, + "refs": { + } + }, + "RecordLifecycleActionHeartbeatAnswer": { + "base": null, + "refs": { + } + }, + "RecordLifecycleActionHeartbeatType": { + "base": null, + "refs": { + } + }, + "ResourceContentionFault": { + "base": "

    You already have a pending update to an Auto Scaling resource (for example, a group, instance, or load balancer).

    ", + "refs": { + } + }, + "ResourceInUseFault": { + "base": "

    The Auto Scaling group or launch configuration can't be deleted because it is in use.

    ", + "refs": { + } + }, + "ResourceName": { + "base": null, + "refs": { + "Alarm$AlarmARN": "

    The Amazon Resource Name (ARN) of the alarm.

    ", + "AttachInstancesQuery$AutoScalingGroupName": "

    The name of the group.

    ", + "AttachLoadBalancersType$AutoScalingGroupName": "

    The name of the group.

    ", + "AutoScalingGroup$AutoScalingGroupARN": "

    The Amazon Resource Name (ARN) of the group.

    ", + "AutoScalingGroupNames$member": null, + "CompleteLifecycleActionType$AutoScalingGroupName": "

    The name of the group for the lifecycle hook.

    ", + "CreateAutoScalingGroupType$LaunchConfigurationName": "

    The name of the launch configuration. Alternatively, specify an EC2 instance instead of a launch configuration.

    ", + "DeleteAutoScalingGroupType$AutoScalingGroupName": "

    The name of the group to delete.

    ", + "DeleteLifecycleHookType$AutoScalingGroupName": "

    The name of the Auto Scaling group for the lifecycle hook.

    ", + "DeleteNotificationConfigurationType$AutoScalingGroupName": "

    The name of the Auto Scaling group.

    ", + "DeleteNotificationConfigurationType$TopicARN": "

    The Amazon Resource Name (ARN) of the Amazon Simple Notification Service (SNS) topic.

    ", + "DeletePolicyType$AutoScalingGroupName": "

    The name of the Auto Scaling group.

    ", + "DeletePolicyType$PolicyName": "

    The name or Amazon Resource Name (ARN) of the policy.

    ", + "DeleteScheduledActionType$AutoScalingGroupName": "

    The name of the Auto Scaling group.

    ", + "DeleteScheduledActionType$ScheduledActionName": "

    The name of the action to delete.

    ", + "DescribeLifecycleHooksType$AutoScalingGroupName": "

    The name of the group.

    ", + "DescribeLoadBalancersRequest$AutoScalingGroupName": "

    The name of the group.

    ", + "DescribePoliciesType$AutoScalingGroupName": "

    The name of the group.

    ", + "DescribeScalingActivitiesType$AutoScalingGroupName": "

    The name of the group.

    ", + "DescribeScheduledActionsType$AutoScalingGroupName": "

    The name of the group.

    ", + "DetachInstancesQuery$AutoScalingGroupName": "

    The name of the group.

    ", + "DetachLoadBalancersType$AutoScalingGroupName": "

    The name of the group.

    ", + "DisableMetricsCollectionQuery$AutoScalingGroupName": "

    The name or Amazon Resource Name (ARN) of the group.

    ", + "EnableMetricsCollectionQuery$AutoScalingGroupName": "

    The name or ARN of the Auto Scaling group.

    ", + "EnterStandbyQuery$AutoScalingGroupName": "

    The name of the Auto Scaling group.

    ", + "ExecutePolicyType$AutoScalingGroupName": "

    The name or Amazon Resource Name (ARN) of the Auto Scaling group.

    ", + "ExecutePolicyType$PolicyName": "

    The name or ARN of the policy.

    ", + "ExitStandbyQuery$AutoScalingGroupName": "

    The name of the Auto Scaling group.

    ", + "LaunchConfiguration$LaunchConfigurationARN": "

    The Amazon Resource Name (ARN) of the launch configuration.

    ", + "LaunchConfigurationNameType$LaunchConfigurationName": "

    The name of the launch configuration.

    ", + "LaunchConfigurationNames$member": null, + "LifecycleHook$AutoScalingGroupName": "

    The name of the Auto Scaling group for the lifecycle hook.

    ", + "LifecycleHook$NotificationTargetARN": "

    The ARN of the notification target that Auto Scaling uses to notify you when an instance is in the transition state for the lifecycle hook. This ARN target can be either an SQS queue or an SNS topic. The notification message sent to the target includes the following:

    • Lifecycle action token
    • User account ID
    • Name of the Auto Scaling group
    • Lifecycle hook name
    • EC2 instance ID
    • Lifecycle transition
    • Notification metadata
    ", + "LifecycleHook$RoleARN": "

    The ARN of the IAM role that allows the Auto Scaling group to publish to the specified notification target.

    ", + "NotificationConfiguration$AutoScalingGroupName": "

    The name of the group.

    ", + "NotificationConfiguration$TopicARN": "

    The Amazon Resource Name (ARN) of the Amazon Simple Notification Service (SNS) topic.

    ", + "PolicyARNType$PolicyARN": "

    The Amazon Resource Name (ARN) of the policy.

    ", + "PolicyNames$member": null, + "PutLifecycleHookType$AutoScalingGroupName": "

    The name of the Auto Scaling group to which you want to assign the lifecycle hook.

    ", + "PutLifecycleHookType$RoleARN": "

    The ARN of the IAM role that allows the Auto Scaling group to publish to the specified notification target.

    This parameter is required for new lifecycle hooks, but optional when updating existing hooks.

    ", + "PutNotificationConfigurationType$AutoScalingGroupName": "

    The name of the Auto Scaling group.

    ", + "PutNotificationConfigurationType$TopicARN": "

    The Amazon Resource Name (ARN) of the Amazon Simple Notification Service (SNS) topic.

    ", + "PutScalingPolicyType$AutoScalingGroupName": "

    The name or ARN of the group.

    ", + "PutScheduledUpdateGroupActionType$AutoScalingGroupName": "

    The name or Amazon Resource Name (ARN) of the Auto Scaling group.

    ", + "RecordLifecycleActionHeartbeatType$AutoScalingGroupName": "

    The name of the Auto Scaling group for the hook.

    ", + "ScalingPolicy$PolicyARN": "

    The Amazon Resource Name (ARN) of the policy.

    ", + "ScalingProcessQuery$AutoScalingGroupName": "

    The name or Amazon Resource Name (ARN) of the Auto Scaling group.

    ", + "ScheduledActionNames$member": null, + "ScheduledUpdateGroupAction$ScheduledActionARN": "

    The Amazon Resource Name (ARN) of the scheduled action.

    ", + "SetDesiredCapacityType$AutoScalingGroupName": "

    The name of the Auto Scaling group.

    ", + "SetInstanceProtectionQuery$AutoScalingGroupName": "

    The name of the group.

    ", + "UpdateAutoScalingGroupType$AutoScalingGroupName": "

    The name of the Auto Scaling group.

    ", + "UpdateAutoScalingGroupType$LaunchConfigurationName": "

    The name of the launch configuration.

    " + } + }, + "ScalingActivityInProgressFault": { + "base": "

    The Auto Scaling group can't be deleted because there are scaling activities in progress.

    ", + "refs": { + } + }, + "ScalingActivityStatusCode": { + "base": null, + "refs": { + "Activity$StatusCode": "

    The current status of the activity.

    " + } + }, + "ScalingPolicies": { + "base": null, + "refs": { + "PoliciesType$ScalingPolicies": "

    The scaling policies.

    " + } + }, + "ScalingPolicy": { + "base": "

    Describes a scaling policy.

    ", + "refs": { + "ScalingPolicies$member": null + } + }, + "ScalingProcessQuery": { + "base": null, + "refs": { + } + }, + "ScheduledActionNames": { + "base": null, + "refs": { + "DescribeScheduledActionsType$ScheduledActionNames": "

    Describes one or more scheduled actions. If you omit this list, the call describes all scheduled actions. If you specify an unknown scheduled action it is ignored with no error.

    You can describe up to a maximum of 50 instances with a single call. If there are more items to return, the call returns a token. To get the next set of items, repeat the call with the returned token.

    " + } + }, + "ScheduledActionsType": { + "base": null, + "refs": { + } + }, + "ScheduledUpdateGroupAction": { + "base": "

    Describes a scheduled update to an Auto Scaling group.

    ", + "refs": { + "ScheduledUpdateGroupActions$member": null + } + }, + "ScheduledUpdateGroupActions": { + "base": null, + "refs": { + "ScheduledActionsType$ScheduledUpdateGroupActions": "

    The scheduled actions.

    " + } + }, + "SecurityGroups": { + "base": null, + "refs": { + "CreateLaunchConfigurationType$SecurityGroups": "

    One or more security groups with which to associate the instances.

    If your instances are launched in EC2-Classic, you can either specify security group names or the security group IDs. For more information about security groups for EC2-Classic, see Amazon EC2 Security Groups in the Amazon Elastic Compute Cloud User Guide.

    If your instances are launched into a VPC, specify security group IDs. For more information, see Security Groups for Your VPC in the Amazon Virtual Private Cloud User Guide.

    ", + "LaunchConfiguration$SecurityGroups": "

    The security groups to associate with the instances.

    " + } + }, + "SetDesiredCapacityType": { + "base": null, + "refs": { + } + }, + "SetInstanceHealthQuery": { + "base": null, + "refs": { + } + }, + "SetInstanceProtectionAnswer": { + "base": null, + "refs": { + } + }, + "SetInstanceProtectionQuery": { + "base": null, + "refs": { + } + }, + "ShouldDecrementDesiredCapacity": { + "base": null, + "refs": { + "DetachInstancesQuery$ShouldDecrementDesiredCapacity": "

    If True, the Auto Scaling group decrements the desired capacity value by the number of instances detached.

    ", + "EnterStandbyQuery$ShouldDecrementDesiredCapacity": "

    Specifies whether the instances moved to Standby mode count as part of the Auto Scaling group's desired capacity. If set, the desired capacity for the Auto Scaling group decrements by the number of instances moved to Standby mode.

    ", + "TerminateInstanceInAutoScalingGroupType$ShouldDecrementDesiredCapacity": "

    If true, terminating the instance also decrements the size of the Auto Scaling group.

    " + } + }, + "ShouldRespectGracePeriod": { + "base": null, + "refs": { + "SetInstanceHealthQuery$ShouldRespectGracePeriod": "

    If the Auto Scaling group of the specified instance has a HealthCheckGracePeriod specified for the group, by default, this call will respect the grace period. Set this to False, if you do not want the call to respect the grace period associated with the group.

    For more information, see the description of the health check grace period for CreateAutoScalingGroup.

    " + } + }, + "SpotPrice": { + "base": null, + "refs": { + "CreateLaunchConfigurationType$SpotPrice": "

    The maximum hourly price to be paid for any Spot Instance launched to fulfill the request. Spot Instances are launched when the price you specify exceeds the current Spot market price. For more information, see Launching Spot Instances in Your Auto Scaling Group in the Auto Scaling Developer Guide.

    ", + "LaunchConfiguration$SpotPrice": "

    The price to bid when launching Spot Instances.

    " + } + }, + "StepAdjustment": { + "base": "

    Describes an adjustment based on the difference between the value of the aggregated CloudWatch metric and the breach threshold that you've defined for the alarm.

    For the following examples, suppose that you have an alarm with a breach threshold of 50:

    • If you want the adjustment to be triggered when the metric is greater than or equal to 50 and less than 60, specify a lower bound of 0 and an upper bound of 10.

    • If you want the adjustment to be triggered when the metric is greater than 40 and less than or equal to 50, specify a lower bound of -10 and an upper bound of 0.

    There are a few rules for the step adjustments for your step policy:

    • The ranges of your step adjustments can't overlap or have a gap.

    • At most one step adjustment can have a null lower bound. If one step adjustment has a negative lower bound, then there must be a step adjustment with a null lower bound.

    • At most one step adjustment can have a null upper bound. If one step adjustment has a positive upper bound, then there must be a step adjustment with a null upper bound.

    • The upper and lower bound can't be null in the same step adjustment.

    ", + "refs": { + "StepAdjustments$member": null + } + }, + "StepAdjustments": { + "base": null, + "refs": { + "PutScalingPolicyType$StepAdjustments": "

    A set of adjustments that enable you to scale based on the size of the alarm breach.

    This parameter is required if the policy type is StepScaling and not supported otherwise.

    ", + "ScalingPolicy$StepAdjustments": "

    A set of adjustments that enable you to scale based on the size of the alarm breach.

    " + } + }, + "SuspendedProcess": { + "base": "

    Describes an Auto Scaling process that has been suspended. For more information, see ProcessType.

    ", + "refs": { + "SuspendedProcesses$member": null + } + }, + "SuspendedProcesses": { + "base": null, + "refs": { + "AutoScalingGroup$SuspendedProcesses": "

    The suspended processes associated with the group.

    " + } + }, + "Tag": { + "base": "

    Describes a tag for an Auto Scaling group.

    ", + "refs": { + "Tags$member": null + } + }, + "TagDescription": { + "base": "

    Describes a tag for an Auto Scaling group.

    ", + "refs": { + "TagDescriptionList$member": null + } + }, + "TagDescriptionList": { + "base": null, + "refs": { + "AutoScalingGroup$Tags": "

    The tags for the group.

    ", + "TagsType$Tags": "

    One or more tags.

    " + } + }, + "TagKey": { + "base": null, + "refs": { + "Tag$Key": "

    The tag key.

    ", + "TagDescription$Key": "

    The tag key.

    " + } + }, + "TagValue": { + "base": null, + "refs": { + "Tag$Value": "

    The tag value.

    ", + "TagDescription$Value": "

    The tag value.

    " + } + }, + "Tags": { + "base": null, + "refs": { + "CreateAutoScalingGroupType$Tags": "

    One or more tags.

    For more information, see Tagging Auto Scaling Groups and Instances in the Auto Scaling Developer Guide.

    ", + "CreateOrUpdateTagsType$Tags": "

    One or more tags.

    ", + "DeleteTagsType$Tags": "

    One or more tags.

    " + } + }, + "TagsType": { + "base": null, + "refs": { + } + }, + "TerminateInstanceInAutoScalingGroupType": { + "base": null, + "refs": { + } + }, + "TerminationPolicies": { + "base": null, + "refs": { + "AutoScalingGroup$TerminationPolicies": "

    The termination policies for the group.

    ", + "CreateAutoScalingGroupType$TerminationPolicies": "

    One or more termination policies used to select the instance to terminate. These policies are executed in the order that they are listed.

    For more information, see Controlling Which Instances Auto Scaling Terminates During Scale In in the Auto Scaling Developer Guide.

    ", + "DescribeTerminationPolicyTypesAnswer$TerminationPolicyTypes": "

    The termination policies supported by Auto Scaling (OldestInstance, OldestLaunchConfiguration, NewestInstance, ClosestToNextInstanceHour, and Default).

    ", + "UpdateAutoScalingGroupType$TerminationPolicies": "

    A standalone termination policy or a list of termination policies used to select the instance to terminate. The policies are executed in the order that they are listed.

    For more information, see Controlling Which Instances Auto Scaling Terminates During Scale In in the Auto Scaling Developer Guide.

    " + } + }, + "TimestampType": { + "base": null, + "refs": { + "Activity$StartTime": "

    The start time of the activity.

    ", + "Activity$EndTime": "

    The end time of the activity.

    ", + "AutoScalingGroup$CreatedTime": "

    The date and time the group was created.

    ", + "DescribeScheduledActionsType$StartTime": "

    The earliest scheduled start time to return. If scheduled action names are provided, this parameter is ignored.

    ", + "DescribeScheduledActionsType$EndTime": "

    The latest scheduled start time to return. If scheduled action names are provided, this parameter is ignored.

    ", + "LaunchConfiguration$CreatedTime": "

    The creation date and time for the launch configuration.

    ", + "PutScheduledUpdateGroupActionType$Time": "

    This parameter is deprecated.

    ", + "PutScheduledUpdateGroupActionType$StartTime": "

    The time for this action to start, in \"YYYY-MM-DDThh:mm:ssZ\" format in UTC/GMT only (for example, 2014-06-01T00:00:00Z).

    If you try to schedule your action in the past, Auto Scaling returns an error message.

    When StartTime and EndTime are specified with Recurrence, they form the boundaries of when the recurring action starts and stops.

    ", + "PutScheduledUpdateGroupActionType$EndTime": "

    The time for this action to end.

    ", + "ScheduledUpdateGroupAction$Time": "

    This parameter is deprecated.

    ", + "ScheduledUpdateGroupAction$StartTime": "

    The date and time that the action is scheduled to begin. This date and time can be up to one month in the future.

    When StartTime and EndTime are specified with Recurrence, they form the boundaries of when the recurring action will start and stop.

    ", + "ScheduledUpdateGroupAction$EndTime": "

    The date and time that the action is scheduled to end. This date and time can be up to one month in the future.

    " + } + }, + "UpdateAutoScalingGroupType": { + "base": null, + "refs": { + } + }, + "Values": { + "base": null, + "refs": { + "Filter$Values": "

    The value of the filter.

    " + } + }, + "XmlString": { + "base": null, + "refs": { + "ActivitiesType$NextToken": "

    The token to use when requesting the next set of items. If there are no additional items to return, the string is empty.

    ", + "Activity$ActivityId": "

    The ID of the activity.

    ", + "Activity$Description": "

    A friendly, more verbose description of the activity.

    ", + "Activity$Details": "

    The details about the activity.

    ", + "ActivityIds$member": null, + "AutoScalingGroupNamesType$NextToken": "

    The token for the next set of items to return. (You received this token from a previous call.)

    ", + "AutoScalingGroupsType$NextToken": "

    The token to use when requesting the next set of items. If there are no additional items to return, the string is empty.

    ", + "AutoScalingInstancesType$NextToken": "

    The token to use when requesting the next set of items. If there are no additional items to return, the string is empty.

    ", + "DescribeAutoScalingInstancesType$NextToken": "

    The token for the next set of items to return. (You received this token from a previous call.)

    ", + "DescribeLoadBalancersRequest$NextToken": "

    The token for the next set of items to return. (You received this token from a previous call.)

    ", + "DescribeLoadBalancersResponse$NextToken": "

    The token to use when requesting the next set of items. If there are no additional items to return, the string is empty.

    ", + "DescribeNotificationConfigurationsAnswer$NextToken": "

    The token to use when requesting the next set of items. If there are no additional items to return, the string is empty.

    ", + "DescribeNotificationConfigurationsType$NextToken": "

    The token for the next set of items to return. (You received this token from a previous call.)

    ", + "DescribePoliciesType$NextToken": "

    The token for the next set of items to return. (You received this token from a previous call.)

    ", + "DescribeScalingActivitiesType$NextToken": "

    The token for the next set of items to return. (You received this token from a previous call.)

    ", + "DescribeScheduledActionsType$NextToken": "

    The token for the next set of items to return. (You received this token from a previous call.)

    ", + "DescribeTagsType$NextToken": "

    The token for the next set of items to return. (You received this token from a previous call.)

    ", + "Filter$Name": "

    The name of the filter. The valid values are: \"auto-scaling-group\", \"key\", \"value\", and \"propagate-at-launch\".

    ", + "LaunchConfigurationNamesType$NextToken": "

    The token for the next set of items to return. (You received this token from a previous call.)

    ", + "LaunchConfigurationsType$NextToken": "

    The token to use when requesting the next set of items. If there are no additional items to return, the string is empty.

    ", + "PoliciesType$NextToken": "

    The token to use when requesting the next set of items. If there are no additional items to return, the string is empty.

    ", + "ScheduledActionsType$NextToken": "

    The token to use when requesting the next set of items. If there are no additional items to return, the string is empty.

    ", + "SecurityGroups$member": null, + "Tag$ResourceId": "

    The name of the group.

    ", + "Tag$ResourceType": "

    The type of resource. The only supported value is auto-scaling-group.

    ", + "TagDescription$ResourceId": "

    The name of the group.

    ", + "TagDescription$ResourceType": "

    The type of resource. The only supported value is auto-scaling-group.

    ", + "TagsType$NextToken": "

    The token to use when requesting the next set of items. If there are no additional items to return, the string is empty.

    ", + "Values$member": null + } + }, + "XmlStringMaxLen1023": { + "base": null, + "refs": { + "Activity$Cause": "

    The reason the activity began.

    ", + "LifecycleHook$NotificationMetadata": "

    Additional information that you want to include any time Auto Scaling sends a message to the notification target.

    ", + "PutLifecycleHookType$NotificationMetadata": "

    Contains additional information that you want to include any time Auto Scaling sends a message to the notification target.

    " + } + }, + "XmlStringMaxLen1600": { + "base": null, + "refs": { + "CreateLaunchConfigurationType$IamInstanceProfile": "

    The name or the Amazon Resource Name (ARN) of the instance profile associated with the IAM role for the instance.

    EC2 instances launched with an IAM role will automatically have AWS security credentials available. You can use IAM roles with Auto Scaling to automatically enable applications running on your EC2 instances to securely access other AWS resources. For more information, see Launch Auto Scaling Instances with an IAM Role in the Auto Scaling Developer Guide.

    ", + "LaunchConfiguration$IamInstanceProfile": "

    The name or Amazon Resource Name (ARN) of the instance profile associated with the IAM role for the instance.

    ", + "TerminationPolicies$member": null + } + }, + "XmlStringMaxLen19": { + "base": null, + "refs": { + "AutoScalingInstanceDetails$InstanceId": "

    The ID of the instance.

    ", + "CompleteLifecycleActionType$InstanceId": "

    The ID of the instance.

    ", + "CreateAutoScalingGroupType$InstanceId": "

    The ID of the instance used to create a launch configuration for the group. Alternatively, specify a launch configuration instead of an EC2 instance.

    When you specify an ID of an instance, Auto Scaling creates a new launch configuration and associates it with the group. This launch configuration derives its attributes from the specified instance, with the exception of the block device mapping.

    For more information, see Create an Auto Scaling Group Using an EC2 Instance in the Auto Scaling Developer Guide.

    ", + "CreateLaunchConfigurationType$InstanceId": "

    The ID of the instance to use to create the launch configuration.

    The new launch configuration derives attributes from the instance, with the exception of the block device mapping.

    To create a launch configuration with a block device mapping or override any other instance attributes, specify them as part of the same request.

    For more information, see Create a Launch Configuration Using an EC2 Instance in the Auto Scaling Developer Guide.

    ", + "Instance$InstanceId": "

    The ID of the instance.

    ", + "InstanceIds$member": null, + "RecordLifecycleActionHeartbeatType$InstanceId": "

    The ID of the instance.

    ", + "SetInstanceHealthQuery$InstanceId": "

    The ID of the instance.

    ", + "TerminateInstanceInAutoScalingGroupType$InstanceId": "

    The ID of the instance.

    " + } + }, + "XmlStringMaxLen255": { + "base": null, + "refs": { + "Activity$AutoScalingGroupName": "

    The name of the Auto Scaling group.

    ", + "Activity$StatusMessage": "

    A friendly, more verbose description of the activity status.

    ", + "AdjustmentType$AdjustmentType": "

    The policy adjustment type. The valid values are ChangeInCapacity, ExactCapacity, and PercentChangeInCapacity.

    ", + "Alarm$AlarmName": "

    The name of the alarm.

    ", + "AlreadyExistsFault$message": null, + "AutoScalingGroup$AutoScalingGroupName": "

    The name of the group.

    ", + "AutoScalingGroup$LaunchConfigurationName": "

    The name of the associated launch configuration.

    ", + "AutoScalingGroup$PlacementGroup": "

    The name of the placement group into which you'll launch your instances, if any. For more information, see Placement Groups in the Amazon Elastic Compute Cloud User Guide.

    ", + "AutoScalingGroup$VPCZoneIdentifier": "

    One or more subnet IDs, if applicable, separated by commas.

    If you specify VPCZoneIdentifier and AvailabilityZones, ensure that the Availability Zones of the subnets match the values for AvailabilityZones.

    ", + "AutoScalingGroup$Status": "

    The current state of the group when DeleteAutoScalingGroup is in progress.

    ", + "AutoScalingInstanceDetails$AutoScalingGroupName": "

    The name of the Auto Scaling group associated with the instance.

    ", + "AutoScalingInstanceDetails$AvailabilityZone": "

    The Availability Zone for the instance.

    ", + "AutoScalingInstanceDetails$LaunchConfigurationName": "

    The launch configuration associated with the instance.

    ", + "AutoScalingNotificationTypes$member": null, + "AvailabilityZones$member": null, + "BlockDeviceMapping$VirtualName": "

    The name of the virtual device (for example, ephemeral0).

    ", + "BlockDeviceMapping$DeviceName": "

    The device name exposed to the EC2 instance (for example, /dev/sdh or xvdh).

    ", + "ClassicLinkVPCSecurityGroups$member": null, + "CreateAutoScalingGroupType$AutoScalingGroupName": "

    The name of the group. This name must be unique within the scope of your AWS account.

    ", + "CreateAutoScalingGroupType$PlacementGroup": "

    The name of the placement group into which you'll launch your instances, if any. For more information, see Placement Groups in the Amazon Elastic Compute Cloud User Guide.

    ", + "CreateAutoScalingGroupType$VPCZoneIdentifier": "

    A comma-separated list of subnet identifiers for your virtual private cloud (VPC).

    If you specify subnets and Availability Zones with this call, ensure that the subnets' Availability Zones match the Availability Zones specified.

    For more information, see Launching Auto Scaling Instances in a VPC in the Auto Scaling Developer Guide.

    ", + "CreateLaunchConfigurationType$LaunchConfigurationName": "

    The name of the launch configuration. This name must be unique within the scope of your AWS account.

    ", + "CreateLaunchConfigurationType$ImageId": "

    The ID of the Amazon Machine Image (AMI) to use to launch your EC2 instances. For more information, see Finding an AMI in the Amazon Elastic Compute Cloud User Guide.

    ", + "CreateLaunchConfigurationType$KeyName": "

    The name of the key pair. For more information, see Amazon EC2 Key Pairs in the Amazon Elastic Compute Cloud User Guide.

    ", + "CreateLaunchConfigurationType$ClassicLinkVPCId": "

    The ID of a ClassicLink-enabled VPC to link your EC2-Classic instances to. This parameter is supported only if you are launching EC2-Classic instances. For more information, see ClassicLink in the Amazon Elastic Compute Cloud User Guide.

    ", + "CreateLaunchConfigurationType$InstanceType": "

    The instance type of the EC2 instance. For information about available instance types, see Available Instance Types in the Amazon Elastic Compute Cloud User Guide.

    ", + "CreateLaunchConfigurationType$KernelId": "

    The ID of the kernel associated with the AMI.

    ", + "CreateLaunchConfigurationType$RamdiskId": "

    The ID of the RAM disk associated with the AMI.

    ", + "Ebs$SnapshotId": "

    The ID of the snapshot.

    ", + "EnableMetricsCollectionQuery$Granularity": "

    The granularity to associate with the metrics to collect. The only valid value is 1Minute.

    ", + "EnabledMetric$Metric": "

    One of the following metrics:

    • GroupMinSize

    • GroupMaxSize

    • GroupDesiredCapacity

    • GroupInServiceInstances

    • GroupPendingInstances

    • GroupStandbyInstances

    • GroupTerminatingInstances

    • GroupTotalInstances

    ", + "EnabledMetric$Granularity": "

    The granularity of the metric. The only valid value is 1Minute.

    ", + "Instance$AvailabilityZone": "

    The Availability Zone in which the instance is running.

    ", + "Instance$LaunchConfigurationName": "

    The launch configuration associated with the instance.

    ", + "InvalidNextToken$message": null, + "LaunchConfiguration$LaunchConfigurationName": "

    The name of the launch configuration.

    ", + "LaunchConfiguration$ImageId": "

    The ID of the Amazon Machine Image (AMI).

    ", + "LaunchConfiguration$KeyName": "

    The name of the key pair.

    ", + "LaunchConfiguration$ClassicLinkVPCId": "

    The ID of a ClassicLink-enabled VPC to link your EC2-Classic instances to. This parameter can only be used if you are launching EC2-Classic instances. For more information, see ClassicLink in the Amazon Elastic Compute Cloud User Guide.

    ", + "LaunchConfiguration$InstanceType": "

    The instance type for the instances.

    ", + "LaunchConfiguration$KernelId": "

    The ID of the kernel associated with the AMI.

    ", + "LaunchConfiguration$RamdiskId": "

    The ID of the RAM disk associated with the AMI.

    ", + "LimitExceededFault$message": null, + "LoadBalancerNames$member": null, + "LoadBalancerState$LoadBalancerName": "

    The name of the load balancer.

    ", + "LoadBalancerState$State": "

    One of the following load balancer states:

    • Adding - The instances in the group are being registered with the load balancer.

    • Added - All instances in the group are registered with the load balancer.

    • InService - At least one instance in the group passed an ELB health check.

    • Removing - The instances are being deregistered from the load balancer. If connection draining is enabled, Elastic Load Balancing waits for in-flight requests to complete before deregistering the instances.

    ", + "MetricCollectionType$Metric": "

    One of the following metrics:

    • GroupMinSize

    • GroupMaxSize

    • GroupDesiredCapacity

    • GroupInServiceInstances

    • GroupPendingInstances

    • GroupStandbyInstances

    • GroupTerminatingInstances

    • GroupTotalInstances

    ", + "MetricGranularityType$Granularity": "

    The granularity. The only valid value is 1Minute.

    ", + "Metrics$member": null, + "NotificationConfiguration$NotificationType": "

    One of the following event notification types:

    • autoscaling:EC2_INSTANCE_LAUNCH

    • autoscaling:EC2_INSTANCE_LAUNCH_ERROR

    • autoscaling:EC2_INSTANCE_TERMINATE

    • autoscaling:EC2_INSTANCE_TERMINATE_ERROR

    • autoscaling:TEST_NOTIFICATION

    ", + "ProcessNames$member": null, + "ProcessType$ProcessName": "

    One of the following processes:

    • Launch

    • Terminate

    • AddToLoadBalancer

    • AlarmNotification

    • AZRebalance

    • HealthCheck

    • ReplaceUnhealthy

    • ScheduledActions

    ", + "PutScalingPolicyType$PolicyName": "

    The name of the policy.

    ", + "PutScalingPolicyType$AdjustmentType": "

    The adjustment type. Valid values are ChangeInCapacity, ExactCapacity, and PercentChangeInCapacity.

    For more information, see Dynamic Scaling in the Auto Scaling Developer Guide.

    ", + "PutScheduledUpdateGroupActionType$ScheduledActionName": "

    The name of this scaling action.

    ", + "PutScheduledUpdateGroupActionType$Recurrence": "

    The time when recurring future actions will start. Start time is specified by the user following the Unix cron syntax format. For more information, see Cron in Wikipedia.

    When StartTime and EndTime are specified with Recurrence, they form the boundaries of when the recurring action will start and stop.

    ", + "ResourceContentionFault$message": null, + "ResourceInUseFault$message": null, + "ScalingActivityInProgressFault$message": null, + "ScalingPolicy$AutoScalingGroupName": "

    The name of the Auto Scaling group associated with this scaling policy.

    ", + "ScalingPolicy$PolicyName": "

    The name of the scaling policy.

    ", + "ScalingPolicy$AdjustmentType": "

    The adjustment type, which specifies how ScalingAdjustment is interpreted. Valid values are ChangeInCapacity, ExactCapacity, and PercentChangeInCapacity.

    ", + "ScheduledUpdateGroupAction$AutoScalingGroupName": "

    The name of the group.

    ", + "ScheduledUpdateGroupAction$ScheduledActionName": "

    The name of the scheduled action.

    ", + "ScheduledUpdateGroupAction$Recurrence": "

    The recurring schedule for the action.

    ", + "SuspendedProcess$ProcessName": "

    The name of the suspended process.

    ", + "SuspendedProcess$SuspensionReason": "

    The reason that the process was suspended.

    ", + "UpdateAutoScalingGroupType$PlacementGroup": "

    The name of the placement group into which you'll launch your instances, if any. For more information, see Placement Groups in the Amazon Elastic Compute Cloud User Guide.

    ", + "UpdateAutoScalingGroupType$VPCZoneIdentifier": "

    The ID of the subnet, if you are launching into a VPC. You can specify several subnets in a comma-separated list.

    When you specify VPCZoneIdentifier with AvailabilityZones, ensure that the subnets' Availability Zones match the values you specify for AvailabilityZones.

    For more information, see Launching Auto Scaling Instances in a VPC in the Auto Scaling Developer Guide.

    " + } + }, + "XmlStringMaxLen32": { + "base": null, + "refs": { + "AutoScalingGroup$HealthCheckType": "

    The service to use for the health checks. The valid values are EC2 and ELB.

    ", + "AutoScalingInstanceDetails$LifecycleState": "

    The lifecycle state for the instance. For more information, see Auto Scaling Lifecycle in the Auto Scaling Developer Guide.

    ", + "AutoScalingInstanceDetails$HealthStatus": "

    The health status of this instance. \"Healthy\" means that the instance is healthy and should remain in service. \"Unhealthy\" means that the instance is unhealthy and Auto Scaling should terminate and replace it.

    ", + "CreateAutoScalingGroupType$HealthCheckType": "

    The service to use for the health checks. The valid values are EC2 and ELB.

    By default, health checks use Amazon EC2 instance status checks to determine the health of an instance. For more information, see Health Checks in the Auto Scaling Developer Guide.

    ", + "Instance$HealthStatus": "

    The health status of the instance. \"Healthy\" means that the instance is healthy and should remain in service. \"Unhealthy\" means that the instance is unhealthy and Auto Scaling should terminate and replace it.

    ", + "PutScalingPolicyType$MetricAggregationType": "

    The aggregation type for the CloudWatch metrics. Valid values are Minimum, Maximum, and Average. If the aggregation type is null, the value is treated as Average.

    This parameter is not supported if the policy type is SimpleScaling.

    ", + "ScalingPolicy$MetricAggregationType": "

    The aggregation type for the CloudWatch metrics. Valid values are Minimum, Maximum, and Average.

    ", + "SetInstanceHealthQuery$HealthStatus": "

    The health status of the instance. Set to Healthy if you want the instance to remain in service. Set to Unhealthy if you want the instance to be out of service. Auto Scaling will terminate and replace the unhealthy instance.

    ", + "UpdateAutoScalingGroupType$HealthCheckType": "

    The service to use for the health checks. The valid values are EC2 and ELB.

    " + } + }, + "XmlStringMaxLen64": { + "base": null, + "refs": { + "CreateLaunchConfigurationType$PlacementTenancy": "

    The tenancy of the instance. An instance with a tenancy of dedicated runs on single-tenant hardware and can only be launched into a VPC.

    You must set the value of this parameter to dedicated if want to launch Dedicated Instances into a shared tenancy VPC (VPC with instance placement tenancy attribute set to default).

    If you specify this parameter, be sure to specify at least one subnet when you create your group.

    For more information, see Launching Auto Scaling Instances in a VPC in the Auto Scaling Developer Guide.

    Valid values: default | dedicated

    ", + "LaunchConfiguration$PlacementTenancy": "

    The tenancy of the instance, either default or dedicated. An instance with dedicated tenancy runs in an isolated, single-tenant hardware and can only be launched into a VPC.

    ", + "PolicyTypes$member": null, + "PutScalingPolicyType$PolicyType": "

    The policy type. Valid values are SimpleScaling and StepScaling. If the policy type is null, the value is treated as SimpleScaling.

    ", + "ScalingPolicy$PolicyType": "

    The policy type. Valid values are SimpleScaling and StepScaling.

    " + } + }, + "XmlStringUserData": { + "base": null, + "refs": { + "CreateLaunchConfigurationType$UserData": "

    The user data to make available to the launched EC2 instances. For more information, see Instance Metadata and User Data in the Amazon Elastic Compute Cloud User Guide.

    ", + "LaunchConfiguration$UserData": "

    The user data available to the instances.

    " + } + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/autoscaling/2011-01-01/examples-1.json b/vendor/github.com/aws/aws-sdk-go/models/apis/autoscaling/2011-01-01/examples-1.json new file mode 100644 index 000000000..0ea7e3b0b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/autoscaling/2011-01-01/examples-1.json @@ -0,0 +1,5 @@ +{ + "version": "1.0", + "examples": { + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/autoscaling/2011-01-01/paginators-1.json b/vendor/github.com/aws/aws-sdk-go/models/apis/autoscaling/2011-01-01/paginators-1.json new file mode 100644 index 000000000..31bc09445 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/autoscaling/2011-01-01/paginators-1.json @@ -0,0 +1,52 @@ +{ + "pagination": { + "DescribeAutoScalingGroups": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxRecords", + "result_key": "AutoScalingGroups" + }, + "DescribeAutoScalingInstances": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxRecords", + "result_key": "AutoScalingInstances" + }, + "DescribeLaunchConfigurations": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxRecords", + "result_key": "LaunchConfigurations" + }, + "DescribeNotificationConfigurations": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxRecords", + "result_key": "NotificationConfigurations" + }, + "DescribePolicies": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxRecords", + "result_key": "ScalingPolicies" + }, + "DescribeScalingActivities": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxRecords", + "result_key": "Activities" + }, + "DescribeScheduledActions": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxRecords", + "result_key": "ScheduledUpdateGroupActions" + }, + "DescribeTags": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxRecords", + "result_key": "Tags" + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/autoscaling/2011-01-01/waiters-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/autoscaling/2011-01-01/waiters-2.json new file mode 100644 index 000000000..76ee9983d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/autoscaling/2011-01-01/waiters-2.json @@ -0,0 +1,62 @@ +{ + "version": 2, + "waiters": { + "GroupExists": { + "acceptors": [ + { + "argument": "length(AutoScalingGroups) > `0`", + "expected": true, + "matcher": "path", + "state": "success" + }, + { + "argument": "length(AutoScalingGroups) > `0`", + "expected": false, + "matcher": "path", + "state": "retry" + } + ], + "delay": 5, + "maxAttempts": 10, + "operation": "DescribeAutoScalingGroups" + }, + "GroupInService": { + "acceptors": [ + { + "argument": "contains(AutoScalingGroups[].[length(Instances[?LifecycleState=='InService']) >= MinSize][], `false`)", + "expected": false, + "matcher": "path", + "state": "success" + }, + { + "argument": "contains(AutoScalingGroups[].[length(Instances[?LifecycleState=='InService']) >= MinSize][], `false`)", + "expected": true, + "matcher": "path", + "state": "retry" + } + ], + "delay": 15, + "maxAttempts": 40, + "operation": "DescribeAutoScalingGroups" + }, + "GroupNotExists": { + "acceptors": [ + { + "argument": "length(AutoScalingGroups) > `0`", + "expected": false, + "matcher": "path", + "state": "success" + }, + { + "argument": "length(AutoScalingGroups) > `0`", + "expected": true, + "matcher": "path", + "state": "retry" + } + ], + "delay": 15, + "maxAttempts": 40, + "operation": "DescribeAutoScalingGroups" + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/cloudformation/2010-05-15/api-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/cloudformation/2010-05-15/api-2.json new file mode 100644 index 000000000..c5d20ffda --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/cloudformation/2010-05-15/api-2.json @@ -0,0 +1,1299 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2010-05-15", + "endpointPrefix":"cloudformation", + "protocol":"query", + "serviceFullName":"AWS CloudFormation", + "signatureVersion":"v4", + "xmlNamespace":"http://cloudformation.amazonaws.com/doc/2010-05-15/" + }, + "operations":{ + "CancelUpdateStack":{ + "name":"CancelUpdateStack", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CancelUpdateStackInput"} + }, + "ContinueUpdateRollback":{ + "name":"ContinueUpdateRollback", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ContinueUpdateRollbackInput"}, + "output":{ + "shape":"ContinueUpdateRollbackOutput", + "resultWrapper":"ContinueUpdateRollbackResult" + } + }, + "CreateChangeSet":{ + "name":"CreateChangeSet", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateChangeSetInput"}, + "output":{ + "shape":"CreateChangeSetOutput", + "resultWrapper":"CreateChangeSetResult" + }, + "errors":[ + {"shape":"AlreadyExistsException"}, + {"shape":"InsufficientCapabilitiesException"}, + {"shape":"LimitExceededException"} + ] + }, + "CreateStack":{ + "name":"CreateStack", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateStackInput"}, + "output":{ + "shape":"CreateStackOutput", + "resultWrapper":"CreateStackResult" + }, + "errors":[ + {"shape":"LimitExceededException"}, + {"shape":"AlreadyExistsException"}, + {"shape":"InsufficientCapabilitiesException"} + ] + }, + "DeleteChangeSet":{ + "name":"DeleteChangeSet", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteChangeSetInput"}, + "output":{ + "shape":"DeleteChangeSetOutput", + "resultWrapper":"DeleteChangeSetResult" + }, + "errors":[ + {"shape":"InvalidChangeSetStatusException"} + ] + }, + "DeleteStack":{ + "name":"DeleteStack", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteStackInput"} + }, + "DescribeAccountLimits":{ + "name":"DescribeAccountLimits", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeAccountLimitsInput"}, + "output":{ + "shape":"DescribeAccountLimitsOutput", + "resultWrapper":"DescribeAccountLimitsResult" + } + }, + "DescribeChangeSet":{ + "name":"DescribeChangeSet", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeChangeSetInput"}, + "output":{ + "shape":"DescribeChangeSetOutput", + "resultWrapper":"DescribeChangeSetResult" + }, + "errors":[ + {"shape":"ChangeSetNotFoundException"} + ] + }, + "DescribeStackEvents":{ + "name":"DescribeStackEvents", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeStackEventsInput"}, + "output":{ + "shape":"DescribeStackEventsOutput", + "resultWrapper":"DescribeStackEventsResult" + } + }, + "DescribeStackResource":{ + "name":"DescribeStackResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeStackResourceInput"}, + "output":{ + "shape":"DescribeStackResourceOutput", + "resultWrapper":"DescribeStackResourceResult" + } + }, + "DescribeStackResources":{ + "name":"DescribeStackResources", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeStackResourcesInput"}, + "output":{ + "shape":"DescribeStackResourcesOutput", + "resultWrapper":"DescribeStackResourcesResult" + } + }, + "DescribeStacks":{ + "name":"DescribeStacks", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeStacksInput"}, + "output":{ + "shape":"DescribeStacksOutput", + "resultWrapper":"DescribeStacksResult" + } + }, + "EstimateTemplateCost":{ + "name":"EstimateTemplateCost", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"EstimateTemplateCostInput"}, + "output":{ + "shape":"EstimateTemplateCostOutput", + "resultWrapper":"EstimateTemplateCostResult" + } + }, + "ExecuteChangeSet":{ + "name":"ExecuteChangeSet", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ExecuteChangeSetInput"}, + "output":{ + "shape":"ExecuteChangeSetOutput", + "resultWrapper":"ExecuteChangeSetResult" + }, + "errors":[ + {"shape":"InvalidChangeSetStatusException"}, + {"shape":"ChangeSetNotFoundException"} + ] + }, + "GetStackPolicy":{ + "name":"GetStackPolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetStackPolicyInput"}, + "output":{ + "shape":"GetStackPolicyOutput", + "resultWrapper":"GetStackPolicyResult" + } + }, + "GetTemplate":{ + "name":"GetTemplate", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetTemplateInput"}, + "output":{ + "shape":"GetTemplateOutput", + "resultWrapper":"GetTemplateResult" + } + }, + "GetTemplateSummary":{ + "name":"GetTemplateSummary", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetTemplateSummaryInput"}, + "output":{ + "shape":"GetTemplateSummaryOutput", + "resultWrapper":"GetTemplateSummaryResult" + } + }, + "ListChangeSets":{ + "name":"ListChangeSets", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListChangeSetsInput"}, + "output":{ + "shape":"ListChangeSetsOutput", + "resultWrapper":"ListChangeSetsResult" + } + }, + "ListStackResources":{ + "name":"ListStackResources", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListStackResourcesInput"}, + "output":{ + "shape":"ListStackResourcesOutput", + "resultWrapper":"ListStackResourcesResult" + } + }, + "ListStacks":{ + "name":"ListStacks", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListStacksInput"}, + "output":{ + "shape":"ListStacksOutput", + "resultWrapper":"ListStacksResult" + } + }, + "SetStackPolicy":{ + "name":"SetStackPolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"SetStackPolicyInput"} + }, + "SignalResource":{ + "name":"SignalResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"SignalResourceInput"} + }, + "UpdateStack":{ + "name":"UpdateStack", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateStackInput"}, + "output":{ + "shape":"UpdateStackOutput", + "resultWrapper":"UpdateStackResult" + }, + "errors":[ + {"shape":"InsufficientCapabilitiesException"} + ] + }, + "ValidateTemplate":{ + "name":"ValidateTemplate", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ValidateTemplateInput"}, + "output":{ + "shape":"ValidateTemplateOutput", + "resultWrapper":"ValidateTemplateResult" + } + } + }, + "shapes":{ + "AccountLimit":{ + "type":"structure", + "members":{ + "Name":{"shape":"LimitName"}, + "Value":{"shape":"LimitValue"} + } + }, + "AccountLimitList":{ + "type":"list", + "member":{"shape":"AccountLimit"} + }, + "AllowedValue":{"type":"string"}, + "AllowedValues":{ + "type":"list", + "member":{"shape":"AllowedValue"} + }, + "AlreadyExistsException":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"AlreadyExistsException", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "CancelUpdateStackInput":{ + "type":"structure", + "required":["StackName"], + "members":{ + "StackName":{"shape":"StackName"} + } + }, + "Capabilities":{ + "type":"list", + "member":{"shape":"Capability"} + }, + "CapabilitiesReason":{"type":"string"}, + "Capability":{ + "type":"string", + "enum":["CAPABILITY_IAM"] + }, + "CausingEntity":{"type":"string"}, + "Change":{ + "type":"structure", + "members":{ + "Type":{"shape":"ChangeType"}, + "ResourceChange":{"shape":"ResourceChange"} + } + }, + "ChangeAction":{ + "type":"string", + "enum":[ + "Add", + "Modify", + "Remove" + ] + }, + "ChangeSetId":{ + "type":"string", + "min":1, + "pattern":"arn:[-a-zA-Z0-9:/]*" + }, + "ChangeSetName":{ + "type":"string", + "max":128, + "min":1, + "pattern":"[a-zA-Z][-a-zA-Z0-9]*" + }, + "ChangeSetNameOrId":{ + "type":"string", + "max":1600, + "min":1, + "pattern":"[a-zA-Z][-a-zA-Z0-9]*|arn:[-a-zA-Z0-9:/]*" + }, + "ChangeSetNotFoundException":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"ChangeSetNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "ChangeSetStatus":{ + "type":"string", + "enum":[ + "CREATE_PENDING", + "CREATE_IN_PROGRESS", + "CREATE_COMPLETE", + "DELETE_COMPLETE", + "FAILED" + ] + }, + "ChangeSetStatusReason":{"type":"string"}, + "ChangeSetSummaries":{ + "type":"list", + "member":{"shape":"ChangeSetSummary"} + }, + "ChangeSetSummary":{ + "type":"structure", + "members":{ + "StackId":{"shape":"StackId"}, + "StackName":{"shape":"StackName"}, + "ChangeSetId":{"shape":"ChangeSetId"}, + "ChangeSetName":{"shape":"ChangeSetName"}, + "ExecutionStatus":{"shape":"ExecutionStatus"}, + "Status":{"shape":"ChangeSetStatus"}, + "StatusReason":{"shape":"ChangeSetStatusReason"}, + "CreationTime":{"shape":"CreationTime"}, + "Description":{"shape":"Description"} + } + }, + "ChangeSource":{ + "type":"string", + "enum":[ + "ResourceReference", + "ParameterReference", + "ResourceAttribute", + "DirectModification", + "Automatic" + ] + }, + "ChangeType":{ + "type":"string", + "enum":["Resource"] + }, + "Changes":{ + "type":"list", + "member":{"shape":"Change"} + }, + "ClientToken":{ + "type":"string", + "max":128, + "min":1 + }, + "ContinueUpdateRollbackInput":{ + "type":"structure", + "required":["StackName"], + "members":{ + "StackName":{"shape":"StackNameOrId"} + } + }, + "ContinueUpdateRollbackOutput":{ + "type":"structure", + "members":{ + } + }, + "CreateChangeSetInput":{ + "type":"structure", + "required":[ + "StackName", + "ChangeSetName" + ], + "members":{ + "StackName":{"shape":"StackNameOrId"}, + "TemplateBody":{"shape":"TemplateBody"}, + "TemplateURL":{"shape":"TemplateURL"}, + "UsePreviousTemplate":{"shape":"UsePreviousTemplate"}, + "Parameters":{"shape":"Parameters"}, + "Capabilities":{"shape":"Capabilities"}, + "ResourceTypes":{"shape":"ResourceTypes"}, + "NotificationARNs":{"shape":"NotificationARNs"}, + "Tags":{"shape":"Tags"}, + "ChangeSetName":{"shape":"ChangeSetName"}, + "ClientToken":{"shape":"ClientToken"}, + "Description":{"shape":"Description"} + } + }, + "CreateChangeSetOutput":{ + "type":"structure", + "members":{ + "Id":{"shape":"ChangeSetId"} + } + }, + "CreateStackInput":{ + "type":"structure", + "required":["StackName"], + "members":{ + "StackName":{"shape":"StackName"}, + "TemplateBody":{"shape":"TemplateBody"}, + "TemplateURL":{"shape":"TemplateURL"}, + "Parameters":{"shape":"Parameters"}, + "DisableRollback":{"shape":"DisableRollback"}, + "TimeoutInMinutes":{"shape":"TimeoutMinutes"}, + "NotificationARNs":{"shape":"NotificationARNs"}, + "Capabilities":{"shape":"Capabilities"}, + "ResourceTypes":{"shape":"ResourceTypes"}, + "OnFailure":{"shape":"OnFailure"}, + "StackPolicyBody":{"shape":"StackPolicyBody"}, + "StackPolicyURL":{"shape":"StackPolicyURL"}, + "Tags":{"shape":"Tags"} + } + }, + "CreateStackOutput":{ + "type":"structure", + "members":{ + "StackId":{"shape":"StackId"} + } + }, + "CreationTime":{"type":"timestamp"}, + "DeleteChangeSetInput":{ + "type":"structure", + "required":["ChangeSetName"], + "members":{ + "ChangeSetName":{"shape":"ChangeSetNameOrId"}, + "StackName":{"shape":"StackNameOrId"} + } + }, + "DeleteChangeSetOutput":{ + "type":"structure", + "members":{ + } + }, + "DeleteStackInput":{ + "type":"structure", + "required":["StackName"], + "members":{ + "StackName":{"shape":"StackName"}, + "RetainResources":{"shape":"RetainResources"} + } + }, + "DeletionTime":{"type":"timestamp"}, + "DescribeAccountLimitsInput":{ + "type":"structure", + "members":{ + "NextToken":{"shape":"NextToken"} + } + }, + "DescribeAccountLimitsOutput":{ + "type":"structure", + "members":{ + "AccountLimits":{"shape":"AccountLimitList"}, + "NextToken":{"shape":"NextToken"} + } + }, + "DescribeChangeSetInput":{ + "type":"structure", + "required":["ChangeSetName"], + "members":{ + "ChangeSetName":{"shape":"ChangeSetNameOrId"}, + "StackName":{"shape":"StackNameOrId"}, + "NextToken":{"shape":"NextToken"} + } + }, + "DescribeChangeSetOutput":{ + "type":"structure", + "members":{ + "ChangeSetName":{"shape":"ChangeSetName"}, + "ChangeSetId":{"shape":"ChangeSetId"}, + "StackId":{"shape":"StackId"}, + "StackName":{"shape":"StackName"}, + "Description":{"shape":"Description"}, + "Parameters":{"shape":"Parameters"}, + "CreationTime":{"shape":"CreationTime"}, + "ExecutionStatus":{"shape":"ExecutionStatus"}, + "Status":{"shape":"ChangeSetStatus"}, + "StatusReason":{"shape":"ChangeSetStatusReason"}, + "NotificationARNs":{"shape":"NotificationARNs"}, + "Capabilities":{"shape":"Capabilities"}, + "Tags":{"shape":"Tags"}, + "Changes":{"shape":"Changes"}, + "NextToken":{"shape":"NextToken"} + } + }, + "DescribeStackEventsInput":{ + "type":"structure", + "members":{ + "StackName":{"shape":"StackName"}, + "NextToken":{"shape":"NextToken"} + } + }, + "DescribeStackEventsOutput":{ + "type":"structure", + "members":{ + "StackEvents":{"shape":"StackEvents"}, + "NextToken":{"shape":"NextToken"} + } + }, + "DescribeStackResourceInput":{ + "type":"structure", + "required":[ + "StackName", + "LogicalResourceId" + ], + "members":{ + "StackName":{"shape":"StackName"}, + "LogicalResourceId":{"shape":"LogicalResourceId"} + } + }, + "DescribeStackResourceOutput":{ + "type":"structure", + "members":{ + "StackResourceDetail":{"shape":"StackResourceDetail"} + } + }, + "DescribeStackResourcesInput":{ + "type":"structure", + "members":{ + "StackName":{"shape":"StackName"}, + "LogicalResourceId":{"shape":"LogicalResourceId"}, + "PhysicalResourceId":{"shape":"PhysicalResourceId"} + } + }, + "DescribeStackResourcesOutput":{ + "type":"structure", + "members":{ + "StackResources":{"shape":"StackResources"} + } + }, + "DescribeStacksInput":{ + "type":"structure", + "members":{ + "StackName":{"shape":"StackName"}, + "NextToken":{"shape":"NextToken"} + } + }, + "DescribeStacksOutput":{ + "type":"structure", + "members":{ + "Stacks":{"shape":"Stacks"}, + "NextToken":{"shape":"NextToken"} + } + }, + "Description":{ + "type":"string", + "max":1024, + "min":1 + }, + "DisableRollback":{"type":"boolean"}, + "EstimateTemplateCostInput":{ + "type":"structure", + "members":{ + "TemplateBody":{"shape":"TemplateBody"}, + "TemplateURL":{"shape":"TemplateURL"}, + "Parameters":{"shape":"Parameters"} + } + }, + "EstimateTemplateCostOutput":{ + "type":"structure", + "members":{ + "Url":{"shape":"Url"} + } + }, + "EvaluationType":{ + "type":"string", + "enum":[ + "Static", + "Dynamic" + ] + }, + "EventId":{"type":"string"}, + "ExecuteChangeSetInput":{ + "type":"structure", + "required":["ChangeSetName"], + "members":{ + "ChangeSetName":{"shape":"ChangeSetNameOrId"}, + "StackName":{"shape":"StackNameOrId"} + } + }, + "ExecuteChangeSetOutput":{ + "type":"structure", + "members":{ + } + }, + "ExecutionStatus":{ + "type":"string", + "enum":[ + "UNAVAILABLE", + "AVAILABLE", + "EXECUTE_IN_PROGRESS", + "EXECUTE_COMPLETE", + "EXECUTE_FAILED", + "OBSOLETE" + ] + }, + "GetStackPolicyInput":{ + "type":"structure", + "required":["StackName"], + "members":{ + "StackName":{"shape":"StackName"} + } + }, + "GetStackPolicyOutput":{ + "type":"structure", + "members":{ + "StackPolicyBody":{"shape":"StackPolicyBody"} + } + }, + "GetTemplateInput":{ + "type":"structure", + "required":["StackName"], + "members":{ + "StackName":{"shape":"StackName"} + } + }, + "GetTemplateOutput":{ + "type":"structure", + "members":{ + "TemplateBody":{"shape":"TemplateBody"} + } + }, + "GetTemplateSummaryInput":{ + "type":"structure", + "members":{ + "TemplateBody":{"shape":"TemplateBody"}, + "TemplateURL":{"shape":"TemplateURL"}, + "StackName":{"shape":"StackNameOrId"} + } + }, + "GetTemplateSummaryOutput":{ + "type":"structure", + "members":{ + "Parameters":{"shape":"ParameterDeclarations"}, + "Description":{"shape":"Description"}, + "Capabilities":{"shape":"Capabilities"}, + "CapabilitiesReason":{"shape":"CapabilitiesReason"}, + "ResourceTypes":{"shape":"ResourceTypes"}, + "Version":{"shape":"Version"}, + "Metadata":{"shape":"Metadata"} + } + }, + "InsufficientCapabilitiesException":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InsufficientCapabilitiesException", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidChangeSetStatusException":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidChangeSetStatus", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "LastUpdatedTime":{"type":"timestamp"}, + "LimitExceededException":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"LimitExceededException", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "LimitName":{"type":"string"}, + "LimitValue":{"type":"integer"}, + "ListChangeSetsInput":{ + "type":"structure", + "required":["StackName"], + "members":{ + "StackName":{"shape":"StackNameOrId"}, + "NextToken":{"shape":"NextToken"} + } + }, + "ListChangeSetsOutput":{ + "type":"structure", + "members":{ + "Summaries":{"shape":"ChangeSetSummaries"}, + "NextToken":{"shape":"NextToken"} + } + }, + "ListStackResourcesInput":{ + "type":"structure", + "required":["StackName"], + "members":{ + "StackName":{"shape":"StackName"}, + "NextToken":{"shape":"NextToken"} + } + }, + "ListStackResourcesOutput":{ + "type":"structure", + "members":{ + "StackResourceSummaries":{"shape":"StackResourceSummaries"}, + "NextToken":{"shape":"NextToken"} + } + }, + "ListStacksInput":{ + "type":"structure", + "members":{ + "NextToken":{"shape":"NextToken"}, + "StackStatusFilter":{"shape":"StackStatusFilter"} + } + }, + "ListStacksOutput":{ + "type":"structure", + "members":{ + "StackSummaries":{"shape":"StackSummaries"}, + "NextToken":{"shape":"NextToken"} + } + }, + "LogicalResourceId":{"type":"string"}, + "Metadata":{"type":"string"}, + "NextToken":{ + "type":"string", + "max":1024, + "min":1 + }, + "NoEcho":{"type":"boolean"}, + "NotificationARN":{"type":"string"}, + "NotificationARNs":{ + "type":"list", + "member":{"shape":"NotificationARN"}, + "max":5 + }, + "OnFailure":{ + "type":"string", + "enum":[ + "DO_NOTHING", + "ROLLBACK", + "DELETE" + ] + }, + "Output":{ + "type":"structure", + "members":{ + "OutputKey":{"shape":"OutputKey"}, + "OutputValue":{"shape":"OutputValue"}, + "Description":{"shape":"Description"} + } + }, + "OutputKey":{"type":"string"}, + "OutputValue":{"type":"string"}, + "Outputs":{ + "type":"list", + "member":{"shape":"Output"} + }, + "Parameter":{ + "type":"structure", + "members":{ + "ParameterKey":{"shape":"ParameterKey"}, + "ParameterValue":{"shape":"ParameterValue"}, + "UsePreviousValue":{"shape":"UsePreviousValue"} + } + }, + "ParameterConstraints":{ + "type":"structure", + "members":{ + "AllowedValues":{"shape":"AllowedValues"} + } + }, + "ParameterDeclaration":{ + "type":"structure", + "members":{ + "ParameterKey":{"shape":"ParameterKey"}, + "DefaultValue":{"shape":"ParameterValue"}, + "ParameterType":{"shape":"ParameterType"}, + "NoEcho":{"shape":"NoEcho"}, + "Description":{"shape":"Description"}, + "ParameterConstraints":{"shape":"ParameterConstraints"} + } + }, + "ParameterDeclarations":{ + "type":"list", + "member":{"shape":"ParameterDeclaration"} + }, + "ParameterKey":{"type":"string"}, + "ParameterType":{"type":"string"}, + "ParameterValue":{"type":"string"}, + "Parameters":{ + "type":"list", + "member":{"shape":"Parameter"} + }, + "PhysicalResourceId":{"type":"string"}, + "PropertyName":{"type":"string"}, + "Replacement":{ + "type":"string", + "enum":[ + "True", + "False", + "Conditional" + ] + }, + "RequiresRecreation":{ + "type":"string", + "enum":[ + "Never", + "Conditionally", + "Always" + ] + }, + "ResourceAttribute":{ + "type":"string", + "enum":[ + "Properties", + "Metadata", + "CreationPolicy", + "UpdatePolicy", + "DeletionPolicy", + "Tags" + ] + }, + "ResourceChange":{ + "type":"structure", + "members":{ + "Action":{"shape":"ChangeAction"}, + "LogicalResourceId":{"shape":"LogicalResourceId"}, + "PhysicalResourceId":{"shape":"PhysicalResourceId"}, + "ResourceType":{"shape":"ResourceType"}, + "Replacement":{"shape":"Replacement"}, + "Scope":{"shape":"Scope"}, + "Details":{"shape":"ResourceChangeDetails"} + } + }, + "ResourceChangeDetail":{ + "type":"structure", + "members":{ + "Target":{"shape":"ResourceTargetDefinition"}, + "Evaluation":{"shape":"EvaluationType"}, + "ChangeSource":{"shape":"ChangeSource"}, + "CausingEntity":{"shape":"CausingEntity"} + } + }, + "ResourceChangeDetails":{ + "type":"list", + "member":{"shape":"ResourceChangeDetail"} + }, + "ResourceProperties":{"type":"string"}, + "ResourceSignalStatus":{ + "type":"string", + "enum":[ + "SUCCESS", + "FAILURE" + ] + }, + "ResourceSignalUniqueId":{ + "type":"string", + "max":64, + "min":1 + }, + "ResourceStatus":{ + "type":"string", + "enum":[ + "CREATE_IN_PROGRESS", + "CREATE_FAILED", + "CREATE_COMPLETE", + "DELETE_IN_PROGRESS", + "DELETE_FAILED", + "DELETE_COMPLETE", + "DELETE_SKIPPED", + "UPDATE_IN_PROGRESS", + "UPDATE_FAILED", + "UPDATE_COMPLETE" + ] + }, + "ResourceStatusReason":{"type":"string"}, + "ResourceTargetDefinition":{ + "type":"structure", + "members":{ + "Attribute":{"shape":"ResourceAttribute"}, + "Name":{"shape":"PropertyName"}, + "RequiresRecreation":{"shape":"RequiresRecreation"} + } + }, + "ResourceType":{ + "type":"string", + "max":256, + "min":1 + }, + "ResourceTypes":{ + "type":"list", + "member":{"shape":"ResourceType"} + }, + "RetainResources":{ + "type":"list", + "member":{"shape":"LogicalResourceId"} + }, + "Scope":{ + "type":"list", + "member":{"shape":"ResourceAttribute"} + }, + "SetStackPolicyInput":{ + "type":"structure", + "required":["StackName"], + "members":{ + "StackName":{"shape":"StackName"}, + "StackPolicyBody":{"shape":"StackPolicyBody"}, + "StackPolicyURL":{"shape":"StackPolicyURL"} + } + }, + "SignalResourceInput":{ + "type":"structure", + "required":[ + "StackName", + "LogicalResourceId", + "UniqueId", + "Status" + ], + "members":{ + "StackName":{"shape":"StackNameOrId"}, + "LogicalResourceId":{"shape":"LogicalResourceId"}, + "UniqueId":{"shape":"ResourceSignalUniqueId"}, + "Status":{"shape":"ResourceSignalStatus"} + } + }, + "Stack":{ + "type":"structure", + "required":[ + "StackName", + "CreationTime", + "StackStatus" + ], + "members":{ + "StackId":{"shape":"StackId"}, + "StackName":{"shape":"StackName"}, + "Description":{"shape":"Description"}, + "Parameters":{"shape":"Parameters"}, + "CreationTime":{"shape":"CreationTime"}, + "LastUpdatedTime":{"shape":"LastUpdatedTime"}, + "StackStatus":{"shape":"StackStatus"}, + "StackStatusReason":{"shape":"StackStatusReason"}, + "DisableRollback":{"shape":"DisableRollback"}, + "NotificationARNs":{"shape":"NotificationARNs"}, + "TimeoutInMinutes":{"shape":"TimeoutMinutes"}, + "Capabilities":{"shape":"Capabilities"}, + "Outputs":{"shape":"Outputs"}, + "Tags":{"shape":"Tags"} + } + }, + "StackEvent":{ + "type":"structure", + "required":[ + "StackId", + "EventId", + "StackName", + "Timestamp" + ], + "members":{ + "StackId":{"shape":"StackId"}, + "EventId":{"shape":"EventId"}, + "StackName":{"shape":"StackName"}, + "LogicalResourceId":{"shape":"LogicalResourceId"}, + "PhysicalResourceId":{"shape":"PhysicalResourceId"}, + "ResourceType":{"shape":"ResourceType"}, + "Timestamp":{"shape":"Timestamp"}, + "ResourceStatus":{"shape":"ResourceStatus"}, + "ResourceStatusReason":{"shape":"ResourceStatusReason"}, + "ResourceProperties":{"shape":"ResourceProperties"} + } + }, + "StackEvents":{ + "type":"list", + "member":{"shape":"StackEvent"} + }, + "StackId":{"type":"string"}, + "StackName":{"type":"string"}, + "StackNameOrId":{ + "type":"string", + "min":1, + "pattern":"([a-zA-Z][-a-zA-Z0-9]*)|(arn:\\b(aws|aws-us-gov|aws-cn)\\b:[-a-zA-Z0-9:/._+]*)" + }, + "StackPolicyBody":{ + "type":"string", + "max":16384, + "min":1 + }, + "StackPolicyDuringUpdateBody":{ + "type":"string", + "max":16384, + "min":1 + }, + "StackPolicyDuringUpdateURL":{ + "type":"string", + "max":1350, + "min":1 + }, + "StackPolicyURL":{ + "type":"string", + "max":1350, + "min":1 + }, + "StackResource":{ + "type":"structure", + "required":[ + "LogicalResourceId", + "ResourceType", + "Timestamp", + "ResourceStatus" + ], + "members":{ + "StackName":{"shape":"StackName"}, + "StackId":{"shape":"StackId"}, + "LogicalResourceId":{"shape":"LogicalResourceId"}, + "PhysicalResourceId":{"shape":"PhysicalResourceId"}, + "ResourceType":{"shape":"ResourceType"}, + "Timestamp":{"shape":"Timestamp"}, + "ResourceStatus":{"shape":"ResourceStatus"}, + "ResourceStatusReason":{"shape":"ResourceStatusReason"}, + "Description":{"shape":"Description"} + } + }, + "StackResourceDetail":{ + "type":"structure", + "required":[ + "LogicalResourceId", + "ResourceType", + "LastUpdatedTimestamp", + "ResourceStatus" + ], + "members":{ + "StackName":{"shape":"StackName"}, + "StackId":{"shape":"StackId"}, + "LogicalResourceId":{"shape":"LogicalResourceId"}, + "PhysicalResourceId":{"shape":"PhysicalResourceId"}, + "ResourceType":{"shape":"ResourceType"}, + "LastUpdatedTimestamp":{"shape":"Timestamp"}, + "ResourceStatus":{"shape":"ResourceStatus"}, + "ResourceStatusReason":{"shape":"ResourceStatusReason"}, + "Description":{"shape":"Description"}, + "Metadata":{"shape":"Metadata"} + } + }, + "StackResourceSummaries":{ + "type":"list", + "member":{"shape":"StackResourceSummary"} + }, + "StackResourceSummary":{ + "type":"structure", + "required":[ + "LogicalResourceId", + "ResourceType", + "LastUpdatedTimestamp", + "ResourceStatus" + ], + "members":{ + "LogicalResourceId":{"shape":"LogicalResourceId"}, + "PhysicalResourceId":{"shape":"PhysicalResourceId"}, + "ResourceType":{"shape":"ResourceType"}, + "LastUpdatedTimestamp":{"shape":"Timestamp"}, + "ResourceStatus":{"shape":"ResourceStatus"}, + "ResourceStatusReason":{"shape":"ResourceStatusReason"} + } + }, + "StackResources":{ + "type":"list", + "member":{"shape":"StackResource"} + }, + "StackStatus":{ + "type":"string", + "enum":[ + "CREATE_IN_PROGRESS", + "CREATE_FAILED", + "CREATE_COMPLETE", + "ROLLBACK_IN_PROGRESS", + "ROLLBACK_FAILED", + "ROLLBACK_COMPLETE", + "DELETE_IN_PROGRESS", + "DELETE_FAILED", + "DELETE_COMPLETE", + "UPDATE_IN_PROGRESS", + "UPDATE_COMPLETE_CLEANUP_IN_PROGRESS", + "UPDATE_COMPLETE", + "UPDATE_ROLLBACK_IN_PROGRESS", + "UPDATE_ROLLBACK_FAILED", + "UPDATE_ROLLBACK_COMPLETE_CLEANUP_IN_PROGRESS", + "UPDATE_ROLLBACK_COMPLETE" + ] + }, + "StackStatusFilter":{ + "type":"list", + "member":{"shape":"StackStatus"} + }, + "StackStatusReason":{"type":"string"}, + "StackSummaries":{ + "type":"list", + "member":{"shape":"StackSummary"} + }, + "StackSummary":{ + "type":"structure", + "required":[ + "StackName", + "CreationTime", + "StackStatus" + ], + "members":{ + "StackId":{"shape":"StackId"}, + "StackName":{"shape":"StackName"}, + "TemplateDescription":{"shape":"TemplateDescription"}, + "CreationTime":{"shape":"CreationTime"}, + "LastUpdatedTime":{"shape":"LastUpdatedTime"}, + "DeletionTime":{"shape":"DeletionTime"}, + "StackStatus":{"shape":"StackStatus"}, + "StackStatusReason":{"shape":"StackStatusReason"} + } + }, + "Stacks":{ + "type":"list", + "member":{"shape":"Stack"} + }, + "Tag":{ + "type":"structure", + "members":{ + "Key":{"shape":"TagKey"}, + "Value":{"shape":"TagValue"} + } + }, + "TagKey":{"type":"string"}, + "TagValue":{"type":"string"}, + "Tags":{ + "type":"list", + "member":{"shape":"Tag"} + }, + "TemplateBody":{ + "type":"string", + "min":1 + }, + "TemplateDescription":{"type":"string"}, + "TemplateParameter":{ + "type":"structure", + "members":{ + "ParameterKey":{"shape":"ParameterKey"}, + "DefaultValue":{"shape":"ParameterValue"}, + "NoEcho":{"shape":"NoEcho"}, + "Description":{"shape":"Description"} + } + }, + "TemplateParameters":{ + "type":"list", + "member":{"shape":"TemplateParameter"} + }, + "TemplateURL":{ + "type":"string", + "max":1024, + "min":1 + }, + "TimeoutMinutes":{ + "type":"integer", + "min":1 + }, + "Timestamp":{"type":"timestamp"}, + "UpdateStackInput":{ + "type":"structure", + "required":["StackName"], + "members":{ + "StackName":{"shape":"StackName"}, + "TemplateBody":{"shape":"TemplateBody"}, + "TemplateURL":{"shape":"TemplateURL"}, + "UsePreviousTemplate":{"shape":"UsePreviousTemplate"}, + "StackPolicyDuringUpdateBody":{"shape":"StackPolicyDuringUpdateBody"}, + "StackPolicyDuringUpdateURL":{"shape":"StackPolicyDuringUpdateURL"}, + "Parameters":{"shape":"Parameters"}, + "Capabilities":{"shape":"Capabilities"}, + "ResourceTypes":{"shape":"ResourceTypes"}, + "StackPolicyBody":{"shape":"StackPolicyBody"}, + "StackPolicyURL":{"shape":"StackPolicyURL"}, + "NotificationARNs":{"shape":"NotificationARNs"}, + "Tags":{"shape":"Tags"} + } + }, + "UpdateStackOutput":{ + "type":"structure", + "members":{ + "StackId":{"shape":"StackId"} + } + }, + "Url":{"type":"string"}, + "UsePreviousTemplate":{"type":"boolean"}, + "UsePreviousValue":{"type":"boolean"}, + "ValidateTemplateInput":{ + "type":"structure", + "members":{ + "TemplateBody":{"shape":"TemplateBody"}, + "TemplateURL":{"shape":"TemplateURL"} + } + }, + "ValidateTemplateOutput":{ + "type":"structure", + "members":{ + "Parameters":{"shape":"TemplateParameters"}, + "Description":{"shape":"Description"}, + "Capabilities":{"shape":"Capabilities"}, + "CapabilitiesReason":{"shape":"CapabilitiesReason"} + } + }, + "Version":{"type":"string"} + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/cloudformation/2010-05-15/docs-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/cloudformation/2010-05-15/docs-2.json new file mode 100644 index 000000000..6deae3ea0 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/cloudformation/2010-05-15/docs-2.json @@ -0,0 +1,1049 @@ +{ + "version": "2.0", + "service": "AWS CloudFormation

    AWS CloudFormation enables you to create and manage AWS infrastructure deployments predictably and repeatedly. AWS CloudFormation helps you leverage AWS products such as Amazon EC2, EBS, Amazon SNS, ELB, and Auto Scaling to build highly-reliable, highly scalable, cost effective applications without worrying about creating and configuring the underlying AWS infrastructure.

    With AWS CloudFormation, you declare all of your resources and dependencies in a template file. The template defines a collection of resources as a single unit called a stack. AWS CloudFormation creates and deletes all member resources of the stack together and manages all dependencies between the resources for you.

    For more information about this product, go to the CloudFormation Product Page.

    Amazon CloudFormation makes use of other AWS products. If you need additional technical information about a specific AWS product, you can find the product's technical documentation at http://docs.aws.amazon.com/.

    ", + "operations": { + "CancelUpdateStack": "

    Cancels an update on the specified stack. If the call completes successfully, the stack rolls back the update and reverts to the previous stack configuration.

    You can cancel only stacks that are in the UPDATE_IN_PROGRESS state.

    ", + "ContinueUpdateRollback": "

    For a specified stack that is in the UPDATE_ROLLBACK_FAILED state, continues rolling it back to the UPDATE_ROLLBACK_COMPLETE state. Depending on the cause of the failure, you can manually fix the error and continue the rollback. By continuing the rollback, you can return your stack to a working state (the UPDATE_ROLLBACK_COMPLETE state), and then try to update the stack again.

    A stack goes into the UPDATE_ROLLBACK_FAILED state when AWS CloudFormation cannot roll back all changes after a failed stack update. For example, you might have a stack that is rolling back to an old database instance that was deleted outside of AWS CloudFormation. Because AWS CloudFormation doesn't know the database was deleted, it assumes that the database instance still exists and attempts to roll back to it, causing the update rollback to fail.

    ", + "CreateChangeSet": "

    Creates a list of changes for a stack. AWS CloudFormation generates the change set by comparing the stack's information with the information that you submit. A change set can help you understand which resources AWS CloudFormation will change and how it will change them before you update your stack. Change sets allow you to check before you make a change so that you don't delete or replace critical resources.

    AWS CloudFormation doesn't make any changes to the stack when you create a change set. To make the specified changes, you must execute the change set by using the ExecuteChangeSet action.

    After the call successfully completes, AWS CloudFormation starts creating the change set. To check the status of the change set, use the DescribeChangeSet action.

    ", + "CreateStack": "

    Creates a stack as specified in the template. After the call completes successfully, the stack creation starts. You can check the status of the stack via the DescribeStacks API.

    ", + "DeleteChangeSet": "

    Deletes the specified change set. Deleting change sets ensures that no one executes the wrong change set.

    If the call successfully completes, AWS CloudFormation successfully deleted the change set.

    ", + "DeleteStack": "

    Deletes a specified stack. Once the call completes successfully, stack deletion starts. Deleted stacks do not show up in the DescribeStacks API if the deletion has been completed successfully.

    ", + "DescribeAccountLimits": "

    Retrieves your account's AWS CloudFormation limits, such as the maximum number of stacks that you can create in your account.

    ", + "DescribeChangeSet": "

    Returns the inputs for the change set and a list of changes that AWS CloudFormation will make if you execute the change set. For more information, see Updating Stacks Using Change Sets in the AWS CloudFormation User Guide.

    ", + "DescribeStackEvents": "

    Returns all stack related events for a specified stack in reverse chronological order. For more information about a stack's event history, go to Stacks in the AWS CloudFormation User Guide.

    You can list events for stacks that have failed to create or have been deleted by specifying the unique stack identifier (stack ID).

    ", + "DescribeStackResource": "

    Returns a description of the specified resource in the specified stack.

    For deleted stacks, DescribeStackResource returns resource information for up to 90 days after the stack has been deleted.

    ", + "DescribeStackResources": "

    Returns AWS resource descriptions for running and deleted stacks. If StackName is specified, all the associated resources that are part of the stack are returned. If PhysicalResourceId is specified, the associated resources of the stack that the resource belongs to are returned.

    Only the first 100 resources will be returned. If your stack has more resources than this, you should use ListStackResources instead.

    For deleted stacks, DescribeStackResources returns resource information for up to 90 days after the stack has been deleted.

    You must specify either StackName or PhysicalResourceId, but not both. In addition, you can specify LogicalResourceId to filter the returned result. For more information about resources, the LogicalResourceId and PhysicalResourceId, go to the AWS CloudFormation User Guide.

    A ValidationError is returned if you specify both StackName and PhysicalResourceId in the same request.

    ", + "DescribeStacks": "

    Returns the description for the specified stack; if no stack name was specified, then it returns the description for all the stacks created.

    ", + "EstimateTemplateCost": "

    Returns the estimated monthly cost of a template. The return value is an AWS Simple Monthly Calculator URL with a query string that describes the resources required to run the template.

    ", + "ExecuteChangeSet": "

    Updates a stack using the input information that was provided when the specified change set was created. After the call successfully completes, AWS CloudFormation starts updating the stack. Use the DescribeStacks action to view the status of the update.

    When you execute a change set, AWS CloudFormation deletes all other change sets associated with the stack because they aren't valid for the updated stack.

    If a stack policy is associated with the stack, AWS CloudFormation enforces the policy during the update. You can't specify a temporary stack policy that overrides the current policy.

    ", + "GetStackPolicy": "

    Returns the stack policy for a specified stack. If a stack doesn't have a policy, a null value is returned.

    ", + "GetTemplate": "

    Returns the template body for a specified stack. You can get the template for running or deleted stacks.

    For deleted stacks, GetTemplate returns the template for up to 90 days after the stack has been deleted.

    If the template does not exist, a ValidationError is returned.

    ", + "GetTemplateSummary": "

    Returns information about a new or existing template. The GetTemplateSummary action is useful for viewing parameter information, such as default parameter values and parameter types, before you create or update a stack.

    You can use the GetTemplateSummary action when you submit a template, or you can get template information for a running or deleted stack.

    For deleted stacks, GetTemplateSummary returns the template information for up to 90 days after the stack has been deleted. If the template does not exist, a ValidationError is returned.

    ", + "ListChangeSets": "

    Returns the ID and status of each active change set for a stack. For example, AWS CloudFormation lists change sets that are in the CREATE_IN_PROGRESS or CREATE_PENDING state.

    ", + "ListStackResources": "

    Returns descriptions of all resources of the specified stack.

    For deleted stacks, ListStackResources returns resource information for up to 90 days after the stack has been deleted.

    ", + "ListStacks": "

    Returns the summary information for stacks whose status matches the specified StackStatusFilter. Summary information for stacks that have been deleted is kept for 90 days after the stack is deleted. If no StackStatusFilter is specified, summary information for all stacks is returned (including existing stacks and stacks that have been deleted).

    ", + "SetStackPolicy": "

    Sets a stack policy for a specified stack.

    ", + "SignalResource": "

    Sends a signal to the specified resource with a success or failure status. You can use the SignalResource API in conjunction with a creation policy or update policy. AWS CloudFormation doesn't proceed with a stack creation or update until resources receive the required number of signals or the timeout period is exceeded. The SignalResource API is useful in cases where you want to send signals from anywhere other than an Amazon EC2 instance.

    ", + "UpdateStack": "

    Updates a stack as specified in the template. After the call completes successfully, the stack update starts. You can check the status of the stack via the DescribeStacks action.

    To get a copy of the template for an existing stack, you can use the GetTemplate action.

    For more information about creating an update template, updating a stack, and monitoring the progress of the update, see Updating a Stack.

    ", + "ValidateTemplate": "

    Validates a specified template.

    " + }, + "shapes": { + "AccountLimit": { + "base": "

    The AccountLimit data type.

    ", + "refs": { + "AccountLimitList$member": null + } + }, + "AccountLimitList": { + "base": null, + "refs": { + "DescribeAccountLimitsOutput$AccountLimits": "

    An account limit structure that contain a list of AWS CloudFormation account limits and their values.

    " + } + }, + "AllowedValue": { + "base": null, + "refs": { + "AllowedValues$member": null + } + }, + "AllowedValues": { + "base": null, + "refs": { + "ParameterConstraints$AllowedValues": "

    A list of values that are permitted for a parameter.

    " + } + }, + "AlreadyExistsException": { + "base": "

    Resource with the name requested already exists.

    ", + "refs": { + } + }, + "CancelUpdateStackInput": { + "base": "

    The input for the CancelUpdateStack action.

    ", + "refs": { + } + }, + "Capabilities": { + "base": null, + "refs": { + "CreateChangeSetInput$Capabilities": "

    A list of capabilities that you must specify before AWS CloudFormation can update certain stacks. Some stack templates might include resources that can affect permissions in your AWS account, for example, by creating new AWS Identity and Access Management (IAM) users. For those stacks, you must explicitly acknowledge their capabilities by specifying this parameter.

    Currently, the only valid value is CAPABILITY_IAM, which is required for the following resources: AWS::IAM::AccessKey, AWS::IAM::Group, AWS::IAM::InstanceProfile, AWS::IAM::Policy, AWS::IAM::Role, AWS::IAM::User, and AWS::IAM::UserToGroupAddition. If your stack template contains these resources, we recommend that you review all permissions associated with them and edit their permissions if necessary. If your template contains any of the listed resources and you don't specify this parameter, this action returns an InsufficientCapabilities error.

    ", + "CreateStackInput$Capabilities": "

    A list of capabilities that you must specify before AWS CloudFormation can create certain stacks. Some stack templates might include resources that can affect permissions in your AWS account, for example, by creating new AWS Identity and Access Management (IAM) users. For those stacks, you must explicitly acknowledge their capabilities by specifying this parameter.

    Currently, the only valid value is CAPABILITY_IAM, which is required for the following resources: AWS::IAM::AccessKey, AWS::IAM::Group, AWS::IAM::InstanceProfile, AWS::IAM::Policy, AWS::IAM::Role, AWS::IAM::User, and AWS::IAM::UserToGroupAddition. If your stack template contains these resources, we recommend that you review all permissions associated with them and edit their permissions if necessary. If your template contains any of the listed resources and you don't specify this parameter, this action returns an InsufficientCapabilities error.

    ", + "DescribeChangeSetOutput$Capabilities": "

    If you execute the change set, the list of capabilities that were explicitly acknowledged when the change set was created.

    ", + "GetTemplateSummaryOutput$Capabilities": "

    The capabilities found within the template. Currently, AWS CloudFormation supports only the CAPABILITY_IAM capability. If your template contains IAM resources, you must specify the CAPABILITY_IAM value for this parameter when you use the CreateStack or UpdateStack actions with your template; otherwise, those actions return an InsufficientCapabilities error.

    ", + "Stack$Capabilities": "

    The capabilities allowed in the stack.

    ", + "UpdateStackInput$Capabilities": "

    A list of capabilities that you must specify before AWS CloudFormation can update certain stacks. Some stack templates might include resources that can affect permissions in your AWS account, for example, by creating new AWS Identity and Access Management (IAM) users. For those stacks, you must explicitly acknowledge their capabilities by specifying this parameter.

    Currently, the only valid value is CAPABILITY_IAM, which is required for the following resources: AWS::IAM::AccessKey, AWS::IAM::Group, AWS::IAM::InstanceProfile, AWS::IAM::Policy, AWS::IAM::Role, AWS::IAM::User, and AWS::IAM::UserToGroupAddition. If your stack template contains these resources, we recommend that you review all permissions associated with them and edit their permissions if necessary. If your template contains any of the listed resources and you don't specify this parameter, this action returns an InsufficientCapabilities error.

    ", + "ValidateTemplateOutput$Capabilities": "

    The capabilities found within the template. Currently, AWS CloudFormation supports only the CAPABILITY_IAM capability. If your template contains IAM resources, you must specify the CAPABILITY_IAM value for this parameter when you use the CreateStack or UpdateStack actions with your template; otherwise, those actions return an InsufficientCapabilities error.

    " + } + }, + "CapabilitiesReason": { + "base": null, + "refs": { + "GetTemplateSummaryOutput$CapabilitiesReason": "

    The list of resources that generated the values in the Capabilities response element.

    ", + "ValidateTemplateOutput$CapabilitiesReason": "

    The list of resources that generated the values in the Capabilities response element.

    " + } + }, + "Capability": { + "base": null, + "refs": { + "Capabilities$member": null + } + }, + "CausingEntity": { + "base": null, + "refs": { + "ResourceChangeDetail$CausingEntity": "

    The identity of the entity that triggered this change. This entity is a member of the group that is specified by the ChangeSource field. For example, if you modified the value of the KeyPairName parameter, the CausingEntity is the name of the parameter (KeyPairName).

    If the ChangeSource value is DirectModification, no value is given for CausingEntity.

    " + } + }, + "Change": { + "base": "

    The Change structure describes the changes AWS CloudFormation will perform if you execute the change set.

    ", + "refs": { + "Changes$member": null + } + }, + "ChangeAction": { + "base": null, + "refs": { + "ResourceChange$Action": "

    The action that AWS CloudFormation takes on the resource, such as Add (adds a new resource), Modify (changes a resource), or Remove (deletes a resource).

    " + } + }, + "ChangeSetId": { + "base": null, + "refs": { + "ChangeSetSummary$ChangeSetId": "

    The ID of the change set.

    ", + "CreateChangeSetOutput$Id": "

    The Amazon Resource Name (ARN) of the change set.

    ", + "DescribeChangeSetOutput$ChangeSetId": "

    The ARN of the change set.

    " + } + }, + "ChangeSetName": { + "base": null, + "refs": { + "ChangeSetSummary$ChangeSetName": "

    The name of the change set.

    ", + "CreateChangeSetInput$ChangeSetName": "

    The name of the change set. The name must be unique among all change sets that are associated with the specified stack.

    A change set name can contain only alphanumeric, case sensitive characters and hyphens. It must start with an alphabetic character and cannot exceed 128 characters.

    ", + "DescribeChangeSetOutput$ChangeSetName": "

    The name of the change set.

    " + } + }, + "ChangeSetNameOrId": { + "base": null, + "refs": { + "DeleteChangeSetInput$ChangeSetName": "

    The name or Amazon Resource Name (ARN) of the change set that you want to delete.

    ", + "DescribeChangeSetInput$ChangeSetName": "

    The name or Amazon Resource Name (ARN) of the change set that you want to describe.

    ", + "ExecuteChangeSetInput$ChangeSetName": "

    The name or ARN of the change set that you want use to update the specified stack.

    " + } + }, + "ChangeSetNotFoundException": { + "base": "

    The specified change set name or ID doesn't exit. To view valid change sets for a stack, use the ListChangeSets action.

    ", + "refs": { + } + }, + "ChangeSetStatus": { + "base": null, + "refs": { + "ChangeSetSummary$Status": "

    The state of the change set, such as CREATE_IN_PROGRESS, CREATE_COMPLETE, or FAILED.

    ", + "DescribeChangeSetOutput$Status": "

    The current status of the change set, such as CREATE_IN_PROGRESS, CREATE_COMPLETE, or FAILED.

    " + } + }, + "ChangeSetStatusReason": { + "base": null, + "refs": { + "ChangeSetSummary$StatusReason": "

    A description of the change set's status. For example, if your change set is in the FAILED state, AWS CloudFormation shows the error message.

    ", + "DescribeChangeSetOutput$StatusReason": "

    A description of the change set's status. For example, if your attempt to create a change set failed, AWS CloudFormation shows the error message.

    " + } + }, + "ChangeSetSummaries": { + "base": null, + "refs": { + "ListChangeSetsOutput$Summaries": "

    A list of ChangeSetSummary structures that provides the ID and status of each change set for the specified stack.

    " + } + }, + "ChangeSetSummary": { + "base": "

    The ChangeSetSummary structure describes a change set, its status, and the stack with which it's associated.

    ", + "refs": { + "ChangeSetSummaries$member": null + } + }, + "ChangeSource": { + "base": null, + "refs": { + "ResourceChangeDetail$ChangeSource": "

    The group to which the CausingEntity value belongs. There are five entity groups:

    • ResourceReference entities are Ref intrinsic functions that refer to resources in the template, such as { \"Ref\" : \"MyEC2InstanceResource\" }.

    • ParameterReference entities are Ref intrinsic functions that get template parameter values, such as { \"Ref\" : \"MyPasswordParameter\" }.

    • ResourceAttribute entities are Fn::GetAtt intrinsic functions that get resource attribute values, such as { \"Fn::GetAtt\" : [ \"MyEC2InstanceResource\", \"PublicDnsName\" ] }.

    • DirectModification entities are changes that are made directly to the template.

    • Automatic entities are AWS::CloudFormation::Stack resource types, which are also known as nested stacks. If you made no changes to the AWS::CloudFormation::Stack resource, AWS CloudFormation sets the ChangeSource to Automatic because the nested stack's template might have changed. Changes to a nested stack's template aren't visible to AWS CloudFormation until you run an update on the parent stack.

    " + } + }, + "ChangeType": { + "base": null, + "refs": { + "Change$Type": "

    The type of entity that AWS CloudFormation changes. Currently, the only entity type is Resource.

    " + } + }, + "Changes": { + "base": null, + "refs": { + "DescribeChangeSetOutput$Changes": "

    A list of Change structures that describes the resources AWS CloudFormation changes if you execute the change set.

    " + } + }, + "ClientToken": { + "base": null, + "refs": { + "CreateChangeSetInput$ClientToken": "

    A unique identifier for this CreateChangeSet request. Specify this token if you plan to retry requests so that AWS CloudFormation knows that you're not attempting to create another change set with the same name. You might retry CreateChangeSet requests to ensure that AWS CloudFormation successfully received them.

    " + } + }, + "ContinueUpdateRollbackInput": { + "base": "

    The input for the ContinueUpdateRollback action.

    ", + "refs": { + } + }, + "ContinueUpdateRollbackOutput": { + "base": "

    The output for a ContinueUpdateRollback action.

    ", + "refs": { + } + }, + "CreateChangeSetInput": { + "base": "

    The input for the CreateChangeSet action.

    ", + "refs": { + } + }, + "CreateChangeSetOutput": { + "base": "

    The output for the CreateChangeSet action.

    ", + "refs": { + } + }, + "CreateStackInput": { + "base": "

    The input for CreateStack action.

    ", + "refs": { + } + }, + "CreateStackOutput": { + "base": "

    The output for a CreateStack action.

    ", + "refs": { + } + }, + "CreationTime": { + "base": null, + "refs": { + "ChangeSetSummary$CreationTime": "

    The start time when the change set was created, in UTC.

    ", + "DescribeChangeSetOutput$CreationTime": "

    The start time when the change set was created, in UTC.

    ", + "Stack$CreationTime": "

    The time at which the stack was created.

    ", + "StackSummary$CreationTime": "

    The time the stack was created.

    " + } + }, + "DeleteChangeSetInput": { + "base": "

    The input for the DeleteChangeSet action.

    ", + "refs": { + } + }, + "DeleteChangeSetOutput": { + "base": "

    The output for the DeleteChangeSet action.

    ", + "refs": { + } + }, + "DeleteStackInput": { + "base": "

    The input for DeleteStack action.

    ", + "refs": { + } + }, + "DeletionTime": { + "base": null, + "refs": { + "StackSummary$DeletionTime": "

    The time the stack was deleted.

    " + } + }, + "DescribeAccountLimitsInput": { + "base": "

    The input for the DescribeAccountLimits action.

    ", + "refs": { + } + }, + "DescribeAccountLimitsOutput": { + "base": "

    The output for the DescribeAccountLimits action.

    ", + "refs": { + } + }, + "DescribeChangeSetInput": { + "base": "

    The input for the DescribeChangeSet action.

    ", + "refs": { + } + }, + "DescribeChangeSetOutput": { + "base": "

    The output for the DescribeChangeSet action.

    ", + "refs": { + } + }, + "DescribeStackEventsInput": { + "base": "

    The input for DescribeStackEvents action.

    ", + "refs": { + } + }, + "DescribeStackEventsOutput": { + "base": "

    The output for a DescribeStackEvents action.

    ", + "refs": { + } + }, + "DescribeStackResourceInput": { + "base": "

    The input for DescribeStackResource action.

    ", + "refs": { + } + }, + "DescribeStackResourceOutput": { + "base": "

    The output for a DescribeStackResource action.

    ", + "refs": { + } + }, + "DescribeStackResourcesInput": { + "base": "

    The input for DescribeStackResources action.

    ", + "refs": { + } + }, + "DescribeStackResourcesOutput": { + "base": "

    The output for a DescribeStackResources action.

    ", + "refs": { + } + }, + "DescribeStacksInput": { + "base": "

    The input for DescribeStacks action.

    ", + "refs": { + } + }, + "DescribeStacksOutput": { + "base": "

    The output for a DescribeStacks action.

    ", + "refs": { + } + }, + "Description": { + "base": null, + "refs": { + "ChangeSetSummary$Description": "

    Descriptive information about the change set.

    ", + "CreateChangeSetInput$Description": "

    A description to help you identify this change set.

    ", + "DescribeChangeSetOutput$Description": "

    Information about the change set.

    ", + "GetTemplateSummaryOutput$Description": "

    The value that is defined in the Description property of the template.

    ", + "Output$Description": "

    User defined description associated with the output.

    ", + "ParameterDeclaration$Description": "

    The description that is associate with the parameter.

    ", + "Stack$Description": "

    A user-defined description associated with the stack.

    ", + "StackResource$Description": "

    User defined description associated with the resource.

    ", + "StackResourceDetail$Description": "

    User defined description associated with the resource.

    ", + "TemplateParameter$Description": "

    User defined description associated with the parameter.

    ", + "ValidateTemplateOutput$Description": "

    The description found within the template.

    " + } + }, + "DisableRollback": { + "base": null, + "refs": { + "CreateStackInput$DisableRollback": "

    Set to true to disable rollback of the stack if stack creation failed. You can specify either DisableRollback or OnFailure, but not both.

    Default: false

    ", + "Stack$DisableRollback": "

    Boolean to enable or disable rollback on stack creation failures:

    • true: disable rollback

    • false: enable rollback

    " + } + }, + "EstimateTemplateCostInput": { + "base": "

    The input for an EstimateTemplateCost action.

    ", + "refs": { + } + }, + "EstimateTemplateCostOutput": { + "base": "

    The output for a EstimateTemplateCost action.

    ", + "refs": { + } + }, + "EvaluationType": { + "base": null, + "refs": { + "ResourceChangeDetail$Evaluation": "

    Indicates whether AWS CloudFormation can determine the target value, and whether the target value will change before you execute a change set.

    For Static evaluations, AWS CloudFormation can determine that the target value will change, and its value. For example, if you directly modify the InstanceType property of an EC2 instance, AWS CloudFormation knows that this property value will change, and its value, so this is a Static evaluation.

    For Dynamic evaluations, cannot determine the target value because it depends on the result of an intrinsic function, such as a Ref or Fn::GetAtt intrinsic function, when the stack is updated. For example, if your template includes a reference to a resource that is conditionally recreated, the value of the reference (the physical ID of the resource) might change, depending on if the resource is recreated. If the resource is recreated, it will have a new physical ID, so all references to that resource will also be updated.

    " + } + }, + "EventId": { + "base": null, + "refs": { + "StackEvent$EventId": "

    The unique ID of this event.

    " + } + }, + "ExecuteChangeSetInput": { + "base": "

    The input for the ExecuteChangeSet action.

    ", + "refs": { + } + }, + "ExecuteChangeSetOutput": { + "base": "

    The output for the ExecuteChangeSet action.

    ", + "refs": { + } + }, + "ExecutionStatus": { + "base": null, + "refs": { + "ChangeSetSummary$ExecutionStatus": "

    If the change set execution status is AVAILABLE, you can execute the change set. If you can’t execute the change set, the status indicates why. For example, a change set might be in an UNAVAILABLE state because AWS CloudFormation is still creating it or in an OBSOLETE state because the stack was already updated.

    ", + "DescribeChangeSetOutput$ExecutionStatus": "

    If the change set execution status is AVAILABLE, you can execute the change set. If you can’t execute the change set, the status indicates why. For example, a change set might be in an UNAVAILABLE state because AWS CloudFormation is still creating it or in an OBSOLETE state because the stack was already updated.

    " + } + }, + "GetStackPolicyInput": { + "base": "

    The input for the GetStackPolicy action.

    ", + "refs": { + } + }, + "GetStackPolicyOutput": { + "base": "

    The output for the GetStackPolicy action.

    ", + "refs": { + } + }, + "GetTemplateInput": { + "base": "

    The input for a GetTemplate action.

    ", + "refs": { + } + }, + "GetTemplateOutput": { + "base": "

    The output for GetTemplate action.

    ", + "refs": { + } + }, + "GetTemplateSummaryInput": { + "base": "

    The input for the GetTemplateSummary action.

    ", + "refs": { + } + }, + "GetTemplateSummaryOutput": { + "base": "

    The output for the GetTemplateSummary action.

    ", + "refs": { + } + }, + "InsufficientCapabilitiesException": { + "base": "

    The template contains resources with capabilities that were not specified in the Capabilities parameter.

    ", + "refs": { + } + }, + "InvalidChangeSetStatusException": { + "base": "

    The specified change set cannot be used to update the stack. For example, the change set status might be CREATE_IN_PROGRESS or the stack status might be UPDATE_IN_PROGRESS.

    ", + "refs": { + } + }, + "LastUpdatedTime": { + "base": null, + "refs": { + "Stack$LastUpdatedTime": "

    The time the stack was last updated. This field will only be returned if the stack has been updated at least once.

    ", + "StackSummary$LastUpdatedTime": "

    The time the stack was last updated. This field will only be returned if the stack has been updated at least once.

    " + } + }, + "LimitExceededException": { + "base": "

    Quota for the resource has already been reached.

    ", + "refs": { + } + }, + "LimitName": { + "base": null, + "refs": { + "AccountLimit$Name": "

    The name of the account limit. Currently, the only account limit is StackLimit.

    " + } + }, + "LimitValue": { + "base": null, + "refs": { + "AccountLimit$Value": "

    The value that is associated with the account limit name.

    " + } + }, + "ListChangeSetsInput": { + "base": "

    The input for the ListChangeSets action.

    ", + "refs": { + } + }, + "ListChangeSetsOutput": { + "base": "

    The output for the ListChangeSets action.

    ", + "refs": { + } + }, + "ListStackResourcesInput": { + "base": "

    The input for the ListStackResource action.

    ", + "refs": { + } + }, + "ListStackResourcesOutput": { + "base": "

    The output for a ListStackResources action.

    ", + "refs": { + } + }, + "ListStacksInput": { + "base": "

    The input for ListStacks action.

    ", + "refs": { + } + }, + "ListStacksOutput": { + "base": "

    The output for ListStacks action.

    ", + "refs": { + } + }, + "LogicalResourceId": { + "base": null, + "refs": { + "DescribeStackResourceInput$LogicalResourceId": "

    The logical name of the resource as specified in the template.

    Default: There is no default value.

    ", + "DescribeStackResourcesInput$LogicalResourceId": "

    The logical name of the resource as specified in the template.

    Default: There is no default value.

    ", + "ResourceChange$LogicalResourceId": "

    The resource's logical ID, which is defined in the stack's template.

    ", + "RetainResources$member": null, + "SignalResourceInput$LogicalResourceId": "

    The logical ID of the resource that you want to signal. The logical ID is the name of the resource that given in the template.

    ", + "StackEvent$LogicalResourceId": "

    The logical name of the resource specified in the template.

    ", + "StackResource$LogicalResourceId": "

    The logical name of the resource specified in the template.

    ", + "StackResourceDetail$LogicalResourceId": "

    The logical name of the resource specified in the template.

    ", + "StackResourceSummary$LogicalResourceId": "

    The logical name of the resource specified in the template.

    " + } + }, + "Metadata": { + "base": null, + "refs": { + "GetTemplateSummaryOutput$Metadata": "

    The value that is defined for the Metadata property of the template.

    ", + "StackResourceDetail$Metadata": "

    The JSON format content of the Metadata attribute declared for the resource. For more information, see Metadata Attribute in the AWS CloudFormation User Guide.

    " + } + }, + "NextToken": { + "base": null, + "refs": { + "DescribeAccountLimitsInput$NextToken": "

    A string that identifies the next page of limits that you want to retrieve.

    ", + "DescribeAccountLimitsOutput$NextToken": "

    If the output exceeds 1 MB in size, a string that identifies the next page of limits. If no additional page exists, this value is null.

    ", + "DescribeChangeSetInput$NextToken": "

    A string (provided by the DescribeChangeSet response output) that identifies the next page of information that you want to retrieve.

    ", + "DescribeChangeSetOutput$NextToken": "

    If the output exceeds 1 MB, a string that identifies the next page of changes. If there is no additional page, this value is null.

    ", + "DescribeStackEventsInput$NextToken": "

    A string that identifies the next page of events that you want to retrieve.

    ", + "DescribeStackEventsOutput$NextToken": "

    If the output exceeds 1 MB in size, a string that identifies the next page of events. If no additional page exists, this value is null.

    ", + "DescribeStacksInput$NextToken": "

    A string that identifies the next page of stacks that you want to retrieve.

    ", + "DescribeStacksOutput$NextToken": "

    If the output exceeds 1 MB in size, a string that identifies the next page of stacks. If no additional page exists, this value is null.

    ", + "ListChangeSetsInput$NextToken": "

    A string (provided by the ListChangeSets response output) that identifies the next page of change sets that you want to retrieve.

    ", + "ListChangeSetsOutput$NextToken": "

    If the output exceeds 1 MB, a string that identifies the next page of change sets. If there is no additional page, this value is null.

    ", + "ListStackResourcesInput$NextToken": "

    A string that identifies the next page of stack resources that you want to retrieve.

    ", + "ListStackResourcesOutput$NextToken": "

    If the output exceeds 1 MB, a string that identifies the next page of stack resources. If no additional page exists, this value is null.

    ", + "ListStacksInput$NextToken": "

    A string that identifies the next page of stacks that you want to retrieve.

    ", + "ListStacksOutput$NextToken": "

    If the output exceeds 1 MB in size, a string that identifies the next page of stacks. If no additional page exists, this value is null.

    " + } + }, + "NoEcho": { + "base": null, + "refs": { + "ParameterDeclaration$NoEcho": "

    Flag that indicates whether the parameter value is shown as plain text in logs and in the AWS Management Console.

    ", + "TemplateParameter$NoEcho": "

    Flag indicating whether the parameter should be displayed as plain text in logs and UIs.

    " + } + }, + "NotificationARN": { + "base": null, + "refs": { + "NotificationARNs$member": null + } + }, + "NotificationARNs": { + "base": null, + "refs": { + "CreateChangeSetInput$NotificationARNs": "

    The Amazon Resource Names (ARNs) of Amazon Simple Notification Service (Amazon SNS) topics that AWS CloudFormation associates with the stack. To remove all associated notification topics, specify an empty list.

    ", + "CreateStackInput$NotificationARNs": "

    The Simple Notification Service (SNS) topic ARNs to publish stack related events. You can find your SNS topic ARNs using the SNS console or your Command Line Interface (CLI).

    ", + "DescribeChangeSetOutput$NotificationARNs": "

    The ARNs of the Amazon Simple Notification Service (Amazon SNS) topics that will be associated with the stack if you execute the change set.

    ", + "Stack$NotificationARNs": "

    SNS topic ARNs to which stack related events are published.

    ", + "UpdateStackInput$NotificationARNs": "

    Amazon Simple Notification Service topic Amazon Resource Names (ARNs) that AWS CloudFormation associates with the stack. Specify an empty list to remove all notification topics.

    " + } + }, + "OnFailure": { + "base": null, + "refs": { + "CreateStackInput$OnFailure": "

    Determines what action will be taken if stack creation fails. This must be one of: DO_NOTHING, ROLLBACK, or DELETE. You can specify either OnFailure or DisableRollback, but not both.

    Default: ROLLBACK

    " + } + }, + "Output": { + "base": "

    The Output data type.

    ", + "refs": { + "Outputs$member": null + } + }, + "OutputKey": { + "base": null, + "refs": { + "Output$OutputKey": "

    The key associated with the output.

    " + } + }, + "OutputValue": { + "base": null, + "refs": { + "Output$OutputValue": "

    The value associated with the output.

    " + } + }, + "Outputs": { + "base": null, + "refs": { + "Stack$Outputs": "

    A list of output structures.

    " + } + }, + "Parameter": { + "base": "

    The Parameter data type.

    ", + "refs": { + "Parameters$member": null + } + }, + "ParameterConstraints": { + "base": "

    A set of criteria that AWS CloudFormation uses to validate parameter values. Although other constraints might be defined in the stack template, AWS CloudFormation returns only the AllowedValues property.

    ", + "refs": { + "ParameterDeclaration$ParameterConstraints": "

    The criteria that AWS CloudFormation uses to validate parameter values.

    " + } + }, + "ParameterDeclaration": { + "base": "

    The ParameterDeclaration data type.

    ", + "refs": { + "ParameterDeclarations$member": null + } + }, + "ParameterDeclarations": { + "base": null, + "refs": { + "GetTemplateSummaryOutput$Parameters": "

    A list of parameter declarations that describe various properties for each parameter.

    " + } + }, + "ParameterKey": { + "base": null, + "refs": { + "Parameter$ParameterKey": "

    The key associated with the parameter. If you don't specify a key and value for a particular parameter, AWS CloudFormation uses the default value that is specified in your template.

    ", + "ParameterDeclaration$ParameterKey": "

    The name that is associated with the parameter.

    ", + "TemplateParameter$ParameterKey": "

    The name associated with the parameter.

    " + } + }, + "ParameterType": { + "base": null, + "refs": { + "ParameterDeclaration$ParameterType": "

    The type of parameter.

    " + } + }, + "ParameterValue": { + "base": null, + "refs": { + "Parameter$ParameterValue": "

    The value associated with the parameter.

    ", + "ParameterDeclaration$DefaultValue": "

    The default value of the parameter.

    ", + "TemplateParameter$DefaultValue": "

    The default value associated with the parameter.

    " + } + }, + "Parameters": { + "base": null, + "refs": { + "CreateChangeSetInput$Parameters": "

    A list of Parameter structures that specify input parameters for the change set. For more information, see the Parameter data type.

    ", + "CreateStackInput$Parameters": "

    A list of Parameter structures that specify input parameters for the stack. For more information, see the Parameter data type.

    ", + "DescribeChangeSetOutput$Parameters": "

    A list of Parameter structures that describes the input parameters and their values used to create the change set. For more information, see the Parameter data type.

    ", + "EstimateTemplateCostInput$Parameters": "

    A list of Parameter structures that specify input parameters.

    ", + "Stack$Parameters": "

    A list of Parameter structures.

    ", + "UpdateStackInput$Parameters": "

    A list of Parameter structures that specify input parameters for the stack. For more information, see the Parameter data type.

    " + } + }, + "PhysicalResourceId": { + "base": null, + "refs": { + "DescribeStackResourcesInput$PhysicalResourceId": "

    The name or unique identifier that corresponds to a physical instance ID of a resource supported by AWS CloudFormation.

    For example, for an Amazon Elastic Compute Cloud (EC2) instance, PhysicalResourceId corresponds to the InstanceId. You can pass the EC2 InstanceId to DescribeStackResources to find which stack the instance belongs to and what other resources are part of the stack.

    Required: Conditional. If you do not specify PhysicalResourceId, you must specify StackName.

    Default: There is no default value.

    ", + "ResourceChange$PhysicalResourceId": "

    The resource's physical ID (resource name). Resources that you are adding don't have physical IDs because they haven't been created.

    ", + "StackEvent$PhysicalResourceId": "

    The name or unique identifier associated with the physical instance of the resource.

    ", + "StackResource$PhysicalResourceId": "

    The name or unique identifier that corresponds to a physical instance ID of a resource supported by AWS CloudFormation.

    ", + "StackResourceDetail$PhysicalResourceId": "

    The name or unique identifier that corresponds to a physical instance ID of a resource supported by AWS CloudFormation.

    ", + "StackResourceSummary$PhysicalResourceId": "

    The name or unique identifier that corresponds to a physical instance ID of the resource.

    " + } + }, + "PropertyName": { + "base": null, + "refs": { + "ResourceTargetDefinition$Name": "

    If the Attribute value is Properties, the name of the property. For all other attributes, the value is null.

    " + } + }, + "Replacement": { + "base": null, + "refs": { + "ResourceChange$Replacement": "

    For the Modify action, indicates whether AWS CloudFormation will replace the resource by creating a new one and deleting the old one. This value depends on the value of the RequiresRecreation property in the ResourceTargetDefinition structure. For example, if the RequiresRecreation field is Always and the Evaluation field is Static, Replacement is True. If the RequiresRecreation field is Always and the Evaluation field is Dynamic, Replacement is Conditionally.

    If you have multiple changes with different RequiresRecreation values, the Replacement value depends on the change with the most impact. A RequiresRecreation value of Always has the most impact, followed by Conditionally, and then Never.

    " + } + }, + "RequiresRecreation": { + "base": null, + "refs": { + "ResourceTargetDefinition$RequiresRecreation": "

    If the Attribute value is Properties, indicates whether a change to this property causes the resource to be recreated. The value can be Never, Always, or Conditionally. To determine the conditions for a Conditionally recreation, see the update behavior for that property in the AWS CloudFormation User Guide.

    " + } + }, + "ResourceAttribute": { + "base": null, + "refs": { + "ResourceTargetDefinition$Attribute": "

    Indicates which resource attribute is triggering this update, such as a change in the resource attribute's Metadata, Properties, or Tags.

    ", + "Scope$member": null + } + }, + "ResourceChange": { + "base": "

    The ResourceChange structure describes the resource and the action that AWS CloudFormation will perform on it if you execute this change set.

    ", + "refs": { + "Change$ResourceChange": "

    A ResourceChange structure that describes the resource and action that AWS CloudFormation will perform.

    " + } + }, + "ResourceChangeDetail": { + "base": "

    For a resource with Modify as the action, the ResourceChange structure describes the changes AWS CloudFormation will make to that resource.

    ", + "refs": { + "ResourceChangeDetails$member": null + } + }, + "ResourceChangeDetails": { + "base": null, + "refs": { + "ResourceChange$Details": "

    For the Modify action, a list of ResourceChangeDetail structures that describes the changes that AWS CloudFormation will make to the resource.

    " + } + }, + "ResourceProperties": { + "base": null, + "refs": { + "StackEvent$ResourceProperties": "

    BLOB of the properties used to create the resource.

    " + } + }, + "ResourceSignalStatus": { + "base": null, + "refs": { + "SignalResourceInput$Status": "

    The status of the signal, which is either success or failure. A failure signal causes AWS CloudFormation to immediately fail the stack creation or update.

    " + } + }, + "ResourceSignalUniqueId": { + "base": null, + "refs": { + "SignalResourceInput$UniqueId": "

    A unique ID of the signal. When you signal Amazon EC2 instances or Auto Scaling groups, specify the instance ID that you are signaling as the unique ID. If you send multiple signals to a single resource (such as signaling a wait condition), each signal requires a different unique ID.

    " + } + }, + "ResourceStatus": { + "base": null, + "refs": { + "StackEvent$ResourceStatus": "

    Current status of the resource.

    ", + "StackResource$ResourceStatus": "

    Current status of the resource.

    ", + "StackResourceDetail$ResourceStatus": "

    Current status of the resource.

    ", + "StackResourceSummary$ResourceStatus": "

    Current status of the resource.

    " + } + }, + "ResourceStatusReason": { + "base": null, + "refs": { + "StackEvent$ResourceStatusReason": "

    Success/failure message associated with the resource.

    ", + "StackResource$ResourceStatusReason": "

    Success/failure message associated with the resource.

    ", + "StackResourceDetail$ResourceStatusReason": "

    Success/failure message associated with the resource.

    ", + "StackResourceSummary$ResourceStatusReason": "

    Success/failure message associated with the resource.

    " + } + }, + "ResourceTargetDefinition": { + "base": "

    The field that AWS CloudFormation will change, such as the name of a resource's property, and whether the resource will be recreated.

    ", + "refs": { + "ResourceChangeDetail$Target": "

    A ResourceTargetDefinition structure that describes the field that AWS CloudFormation will change and whether the resource will be recreated.

    " + } + }, + "ResourceType": { + "base": null, + "refs": { + "ResourceChange$ResourceType": "

    The type of AWS CloudFormation resource, such as AWS::S3::Bucket.

    ", + "ResourceTypes$member": null, + "StackEvent$ResourceType": "

    Type of resource. (For more information, go to AWS Resource Types Reference in the AWS CloudFormation User Guide.)

    ", + "StackResource$ResourceType": "

    Type of resource. (For more information, go to AWS Resource Types Reference in the AWS CloudFormation User Guide.)

    ", + "StackResourceDetail$ResourceType": "

    Type of resource. ((For more information, go to AWS Resource Types Reference in the AWS CloudFormation User Guide.)

    ", + "StackResourceSummary$ResourceType": "

    Type of resource. (For more information, go to AWS Resource Types Reference in the AWS CloudFormation User Guide.)

    " + } + }, + "ResourceTypes": { + "base": null, + "refs": { + "CreateChangeSetInput$ResourceTypes": "

    The template resource types that you have permissions to work with if you execute this change set, such as AWS::EC2::Instance, AWS::EC2::*, or Custom::MyCustomInstance.

    If the list of resource types doesn't include a resource type that you're updating, the stack update fails. By default, AWS CloudFormation grants permissions to all resource types. AWS Identity and Access Management (IAM) uses this parameter for condition keys in IAM policies for AWS CloudFormation. For more information, see Controlling Access with AWS Identity and Access Management in the AWS CloudFormation User Guide.

    ", + "CreateStackInput$ResourceTypes": "

    The template resource types that you have permissions to work with for this create stack action, such as AWS::EC2::Instance, AWS::EC2::*, or Custom::MyCustomInstance. Use the following syntax to describe template resource types: AWS::* (for all AWS resource), Custom::* (for all custom resources), Custom::logical_ID (for a specific custom resource), AWS::service_name::* (for all resources of a particular AWS service), and AWS::service_name::resource_logical_ID (for a specific AWS resource).

    If the list of resource types doesn't include a resource that you're creating, the stack creation fails. By default, AWS CloudFormation grants permissions to all resource types. AWS Identity and Access Management (IAM) uses this parameter for AWS CloudFormation-specific condition keys in IAM policies. For more information, see Controlling Access with AWS Identity and Access Management.

    ", + "GetTemplateSummaryOutput$ResourceTypes": "

    A list of all the template resource types that are defined in the template, such as AWS::EC2::Instance, AWS::Dynamo::Table, and Custom::MyCustomInstance.

    ", + "UpdateStackInput$ResourceTypes": "

    The template resource types that you have permissions to work with for this update stack action, such as AWS::EC2::Instance, AWS::EC2::*, or Custom::MyCustomInstance.

    If the list of resource types doesn't include a resource that you're updating, the stack update fails. By default, AWS CloudFormation grants permissions to all resource types. AWS Identity and Access Management (IAM) uses this parameter for AWS CloudFormation-specific condition keys in IAM policies. For more information, see Controlling Access with AWS Identity and Access Management.

    " + } + }, + "RetainResources": { + "base": null, + "refs": { + "DeleteStackInput$RetainResources": "

    For stacks in the DELETE_FAILED state, a list of resource logical IDs that are associated with the resources you want to retain. During deletion, AWS CloudFormation deletes the stack but does not delete the retained resources.

    Retaining resources is useful when you cannot delete a resource, such as a non-empty S3 bucket, but you want to delete the stack.

    " + } + }, + "Scope": { + "base": null, + "refs": { + "ResourceChange$Scope": "

    For the Modify action, indicates which resource attribute is triggering this update, such as a change in the resource attribute's Metadata, Properties, or Tags.

    " + } + }, + "SetStackPolicyInput": { + "base": "

    The input for the SetStackPolicy action.

    ", + "refs": { + } + }, + "SignalResourceInput": { + "base": "

    The input for the SignalResource action.

    ", + "refs": { + } + }, + "Stack": { + "base": "

    The Stack data type.

    ", + "refs": { + "Stacks$member": null + } + }, + "StackEvent": { + "base": "

    The StackEvent data type.

    ", + "refs": { + "StackEvents$member": null + } + }, + "StackEvents": { + "base": null, + "refs": { + "DescribeStackEventsOutput$StackEvents": "

    A list of StackEvents structures.

    " + } + }, + "StackId": { + "base": null, + "refs": { + "ChangeSetSummary$StackId": "

    The ID of the stack with which the change set is associated.

    ", + "CreateStackOutput$StackId": "

    Unique identifier of the stack.

    ", + "DescribeChangeSetOutput$StackId": "

    The ARN of the stack that is associated with the change set.

    ", + "Stack$StackId": "

    Unique identifier of the stack.

    ", + "StackEvent$StackId": "

    The unique ID name of the instance of the stack.

    ", + "StackResource$StackId": "

    Unique identifier of the stack.

    ", + "StackResourceDetail$StackId": "

    Unique identifier of the stack.

    ", + "StackSummary$StackId": "

    Unique stack identifier.

    ", + "UpdateStackOutput$StackId": "

    Unique identifier of the stack.

    " + } + }, + "StackName": { + "base": null, + "refs": { + "CancelUpdateStackInput$StackName": "

    The name or the unique stack ID that is associated with the stack.

    ", + "ChangeSetSummary$StackName": "

    The name of the stack with which the change set is associated.

    ", + "CreateStackInput$StackName": "

    The name that is associated with the stack. The name must be unique in the region in which you are creating the stack.

    A stack name can contain only alphanumeric characters (case sensitive) and hyphens. It must start with an alphabetic character and cannot be longer than 128 characters.

    ", + "DeleteStackInput$StackName": "

    The name or the unique stack ID that is associated with the stack.

    ", + "DescribeChangeSetOutput$StackName": "

    The name of the stack that is associated with the change set.

    ", + "DescribeStackEventsInput$StackName": "

    The name or the unique stack ID that is associated with the stack, which are not always interchangeable:

    • Running stacks: You can specify either the stack's name or its unique stack ID.

    • Deleted stacks: You must specify the unique stack ID.

    Default: There is no default value.

    ", + "DescribeStackResourceInput$StackName": "

    The name or the unique stack ID that is associated with the stack, which are not always interchangeable:

    • Running stacks: You can specify either the stack's name or its unique stack ID.

    • Deleted stacks: You must specify the unique stack ID.

    Default: There is no default value.

    ", + "DescribeStackResourcesInput$StackName": "

    The name or the unique stack ID that is associated with the stack, which are not always interchangeable:

    • Running stacks: You can specify either the stack's name or its unique stack ID.

    • Deleted stacks: You must specify the unique stack ID.

    Default: There is no default value.

    Required: Conditional. If you do not specify StackName, you must specify PhysicalResourceId.

    ", + "DescribeStacksInput$StackName": "

    The name or the unique stack ID that is associated with the stack, which are not always interchangeable:

    • Running stacks: You can specify either the stack's name or its unique stack ID.

    • Deleted stacks: You must specify the unique stack ID.

    Default: There is no default value.

    ", + "GetStackPolicyInput$StackName": "

    The name or unique stack ID that is associated with the stack whose policy you want to get.

    ", + "GetTemplateInput$StackName": "

    The name or the unique stack ID that is associated with the stack, which are not always interchangeable:

    • Running stacks: You can specify either the stack's name or its unique stack ID.

    • Deleted stacks: You must specify the unique stack ID.

    Default: There is no default value.

    ", + "ListStackResourcesInput$StackName": "

    The name or the unique stack ID that is associated with the stack, which are not always interchangeable:

    • Running stacks: You can specify either the stack's name or its unique stack ID.

    • Deleted stacks: You must specify the unique stack ID.

    Default: There is no default value.

    ", + "SetStackPolicyInput$StackName": "

    The name or unique stack ID that you want to associate a policy with.

    ", + "Stack$StackName": "

    The name associated with the stack.

    ", + "StackEvent$StackName": "

    The name associated with a stack.

    ", + "StackResource$StackName": "

    The name associated with the stack.

    ", + "StackResourceDetail$StackName": "

    The name associated with the stack.

    ", + "StackSummary$StackName": "

    The name associated with the stack.

    ", + "UpdateStackInput$StackName": "

    The name or unique stack ID of the stack to update.

    " + } + }, + "StackNameOrId": { + "base": null, + "refs": { + "ContinueUpdateRollbackInput$StackName": "

    The name or the unique ID of the stack that you want to continue rolling back.

    ", + "CreateChangeSetInput$StackName": "

    The name or the unique ID of the stack for which you are creating a change set. AWS CloudFormation generates the change set by comparing this stack's information with the information that you submit, such as a modified template or different parameter input values.

    ", + "DeleteChangeSetInput$StackName": "

    If you specified the name of a change set to delete, specify the stack name or ID (ARN) that is associated with it.

    ", + "DescribeChangeSetInput$StackName": "

    If you specified the name of a change set, specify the stack name or ID (ARN) of the change set you want to describe.

    ", + "ExecuteChangeSetInput$StackName": "

    If you specified the name of a change set, specify the stack name or ID (ARN) that is associated with the change set you want to execute.

    ", + "GetTemplateSummaryInput$StackName": "

    The name or the stack ID that is associated with the stack, which are not always interchangeable. For running stacks, you can specify either the stack's name or its unique stack ID. For deleted stack, you must specify the unique stack ID.

    Conditional: You must specify only one of the following parameters: StackName, TemplateBody, or TemplateURL.

    ", + "ListChangeSetsInput$StackName": "

    The name or the Amazon Resource Name (ARN) of the stack for which you want to list change sets.

    ", + "SignalResourceInput$StackName": "

    The stack name or unique stack ID that includes the resource that you want to signal.

    " + } + }, + "StackPolicyBody": { + "base": null, + "refs": { + "CreateStackInput$StackPolicyBody": "

    Structure containing the stack policy body. For more information, go to Prevent Updates to Stack Resources in the AWS CloudFormation User Guide. You can specify either the StackPolicyBody or the StackPolicyURL parameter, but not both.

    ", + "GetStackPolicyOutput$StackPolicyBody": "

    Structure containing the stack policy body. (For more information, go to Prevent Updates to Stack Resources in the AWS CloudFormation User Guide.)

    ", + "SetStackPolicyInput$StackPolicyBody": "

    Structure containing the stack policy body. For more information, go to Prevent Updates to Stack Resources in the AWS CloudFormation User Guide. You can specify either the StackPolicyBody or the StackPolicyURL parameter, but not both.

    ", + "UpdateStackInput$StackPolicyBody": "

    Structure containing a new stack policy body. You can specify either the StackPolicyBody or the StackPolicyURL parameter, but not both.

    You might update the stack policy, for example, in order to protect a new resource that you created during a stack update. If you do not specify a stack policy, the current policy that is associated with the stack is unchanged.

    " + } + }, + "StackPolicyDuringUpdateBody": { + "base": null, + "refs": { + "UpdateStackInput$StackPolicyDuringUpdateBody": "

    Structure containing the temporary overriding stack policy body. You can specify either the StackPolicyDuringUpdateBody or the StackPolicyDuringUpdateURL parameter, but not both.

    If you want to update protected resources, specify a temporary overriding stack policy during this update. If you do not specify a stack policy, the current policy that is associated with the stack will be used.

    " + } + }, + "StackPolicyDuringUpdateURL": { + "base": null, + "refs": { + "UpdateStackInput$StackPolicyDuringUpdateURL": "

    Location of a file containing the temporary overriding stack policy. The URL must point to a policy (max size: 16KB) located in an S3 bucket in the same region as the stack. You can specify either the StackPolicyDuringUpdateBody or the StackPolicyDuringUpdateURL parameter, but not both.

    If you want to update protected resources, specify a temporary overriding stack policy during this update. If you do not specify a stack policy, the current policy that is associated with the stack will be used.

    " + } + }, + "StackPolicyURL": { + "base": null, + "refs": { + "CreateStackInput$StackPolicyURL": "

    Location of a file containing the stack policy. The URL must point to a policy (max size: 16KB) located in an S3 bucket in the same region as the stack. You can specify either the StackPolicyBody or the StackPolicyURL parameter, but not both.

    ", + "SetStackPolicyInput$StackPolicyURL": "

    Location of a file containing the stack policy. The URL must point to a policy (maximum size: 16 KB) located in an S3 bucket in the same region as the stack. You can specify either the StackPolicyBody or the StackPolicyURL parameter, but not both.

    ", + "UpdateStackInput$StackPolicyURL": "

    Location of a file containing the updated stack policy. The URL must point to a policy (max size: 16KB) located in an S3 bucket in the same region as the stack. You can specify either the StackPolicyBody or the StackPolicyURL parameter, but not both.

    You might update the stack policy, for example, in order to protect a new resource that you created during a stack update. If you do not specify a stack policy, the current policy that is associated with the stack is unchanged.

    " + } + }, + "StackResource": { + "base": "

    The StackResource data type.

    ", + "refs": { + "StackResources$member": null + } + }, + "StackResourceDetail": { + "base": "

    Contains detailed information about the specified stack resource.

    ", + "refs": { + "DescribeStackResourceOutput$StackResourceDetail": "

    A StackResourceDetail structure containing the description of the specified resource in the specified stack.

    " + } + }, + "StackResourceSummaries": { + "base": null, + "refs": { + "ListStackResourcesOutput$StackResourceSummaries": "

    A list of StackResourceSummary structures.

    " + } + }, + "StackResourceSummary": { + "base": "

    Contains high-level information about the specified stack resource.

    ", + "refs": { + "StackResourceSummaries$member": null + } + }, + "StackResources": { + "base": null, + "refs": { + "DescribeStackResourcesOutput$StackResources": "

    A list of StackResource structures.

    " + } + }, + "StackStatus": { + "base": null, + "refs": { + "Stack$StackStatus": "

    Current status of the stack.

    ", + "StackStatusFilter$member": null, + "StackSummary$StackStatus": "

    The current status of the stack.

    " + } + }, + "StackStatusFilter": { + "base": null, + "refs": { + "ListStacksInput$StackStatusFilter": "

    Stack status to use as a filter. Specify one or more stack status codes to list only stacks with the specified status codes. For a complete list of stack status codes, see the StackStatus parameter of the Stack data type.

    " + } + }, + "StackStatusReason": { + "base": null, + "refs": { + "Stack$StackStatusReason": "

    Success/failure message associated with the stack status.

    ", + "StackSummary$StackStatusReason": "

    Success/Failure message associated with the stack status.

    " + } + }, + "StackSummaries": { + "base": null, + "refs": { + "ListStacksOutput$StackSummaries": "

    A list of StackSummary structures containing information about the specified stacks.

    " + } + }, + "StackSummary": { + "base": "

    The StackSummary Data Type

    ", + "refs": { + "StackSummaries$member": null + } + }, + "Stacks": { + "base": null, + "refs": { + "DescribeStacksOutput$Stacks": "

    A list of stack structures.

    " + } + }, + "Tag": { + "base": "

    The Tag type enables you to specify a key-value pair that can be used to store information about an AWS CloudFormation stack.

    ", + "refs": { + "Tags$member": null + } + }, + "TagKey": { + "base": null, + "refs": { + "Tag$Key": "

    Required. A string used to identify this tag. You can specify a maximum of 128 characters for a tag key. Tags owned by Amazon Web Services (AWS) have the reserved prefix: aws:.

    " + } + }, + "TagValue": { + "base": null, + "refs": { + "Tag$Value": "

    Required. A string containing the value for this tag. You can specify a maximum of 256 characters for a tag value.

    " + } + }, + "Tags": { + "base": null, + "refs": { + "CreateChangeSetInput$Tags": "

    Key-value pairs to associate with this stack. AWS CloudFormation also propagates these tags to resources in the stack. You can specify a maximum of 10 tags.

    ", + "CreateStackInput$Tags": "

    Key-value pairs to associate with this stack. AWS CloudFormation also propagates these tags to the resources created in the stack. A maximum number of 10 tags can be specified.

    ", + "DescribeChangeSetOutput$Tags": "

    If you execute the change set, the tags that will be associated with the stack.

    ", + "Stack$Tags": "

    A list of Tags that specify information about the stack.

    ", + "UpdateStackInput$Tags": "

    Key-value pairs to associate with this stack. AWS CloudFormation also propagates these tags to supported resources in the stack. You can specify a maximum number of 10 tags.

    If you don't specify this parameter, AWS CloudFormation doesn't modify the stack's tags. If you specify an empty value, AWS CloudFormation removes all associated tags.

    " + } + }, + "TemplateBody": { + "base": null, + "refs": { + "CreateChangeSetInput$TemplateBody": "

    A structure that contains the body of the revised template, with a minimum length of 1 byte and a maximum length of 51,200 bytes. AWS CloudFormation generates the change set by comparing this template with the template of the stack that you specified.

    Conditional: You must specify only TemplateBody or TemplateURL.

    ", + "CreateStackInput$TemplateBody": "

    Structure containing the template body with a minimum length of 1 byte and a maximum length of 51,200 bytes. For more information, go to Template Anatomy in the AWS CloudFormation User Guide.

    Conditional: You must specify either the TemplateBody or the TemplateURL parameter, but not both.

    ", + "EstimateTemplateCostInput$TemplateBody": "

    Structure containing the template body with a minimum length of 1 byte and a maximum length of 51,200 bytes. (For more information, go to Template Anatomy in the AWS CloudFormation User Guide.)

    Conditional: You must pass TemplateBody or TemplateURL. If both are passed, only TemplateBody is used.

    ", + "GetTemplateOutput$TemplateBody": "

    Structure containing the template body. (For more information, go to Template Anatomy in the AWS CloudFormation User Guide.)

    ", + "GetTemplateSummaryInput$TemplateBody": "

    Structure containing the template body with a minimum length of 1 byte and a maximum length of 51,200 bytes. For more information about templates, see Template Anatomy in the AWS CloudFormation User Guide.

    Conditional: You must specify only one of the following parameters: StackName, TemplateBody, or TemplateURL.

    ", + "UpdateStackInput$TemplateBody": "

    Structure containing the template body with a minimum length of 1 byte and a maximum length of 51,200 bytes. (For more information, go to Template Anatomy in the AWS CloudFormation User Guide.)

    Conditional: You must specify either the TemplateBody or the TemplateURL parameter, but not both.

    ", + "ValidateTemplateInput$TemplateBody": "

    Structure containing the template body with a minimum length of 1 byte and a maximum length of 51,200 bytes. For more information, go to Template Anatomy in the AWS CloudFormation User Guide.

    Conditional: You must pass TemplateURL or TemplateBody. If both are passed, only TemplateBody is used.

    " + } + }, + "TemplateDescription": { + "base": null, + "refs": { + "StackSummary$TemplateDescription": "

    The template description of the template used to create the stack.

    " + } + }, + "TemplateParameter": { + "base": "

    The TemplateParameter data type.

    ", + "refs": { + "TemplateParameters$member": null + } + }, + "TemplateParameters": { + "base": null, + "refs": { + "ValidateTemplateOutput$Parameters": "

    A list of TemplateParameter structures.

    " + } + }, + "TemplateURL": { + "base": null, + "refs": { + "CreateChangeSetInput$TemplateURL": "

    The location of the file that contains the revised template. The URL must point to a template (max size: 460,800 bytes) that is located in an S3 bucket. AWS CloudFormation generates the change set by comparing this template with the stack that you specified.

    Conditional: You must specify only TemplateBody or TemplateURL.

    ", + "CreateStackInput$TemplateURL": "

    Location of file containing the template body. The URL must point to a template (max size: 460,800 bytes) that is located in an Amazon S3 bucket. For more information, go to the Template Anatomy in the AWS CloudFormation User Guide.

    Conditional: You must specify either the TemplateBody or the TemplateURL parameter, but not both.

    ", + "EstimateTemplateCostInput$TemplateURL": "

    Location of file containing the template body. The URL must point to a template that is located in an Amazon S3 bucket. For more information, go to Template Anatomy in the AWS CloudFormation User Guide.

    Conditional: You must pass TemplateURL or TemplateBody. If both are passed, only TemplateBody is used.

    ", + "GetTemplateSummaryInput$TemplateURL": "

    Location of file containing the template body. The URL must point to a template (max size: 460,800 bytes) that is located in an Amazon S3 bucket. For more information about templates, see Template Anatomy in the AWS CloudFormation User Guide.

    Conditional: You must specify only one of the following parameters: StackName, TemplateBody, or TemplateURL.

    ", + "UpdateStackInput$TemplateURL": "

    Location of file containing the template body. The URL must point to a template that is located in an Amazon S3 bucket. For more information, go to Template Anatomy in the AWS CloudFormation User Guide.

    Conditional: You must specify either the TemplateBody or the TemplateURL parameter, but not both.

    ", + "ValidateTemplateInput$TemplateURL": "

    Location of file containing the template body. The URL must point to a template (max size: 460,800 bytes) that is located in an Amazon S3 bucket. For more information, go to Template Anatomy in the AWS CloudFormation User Guide.

    Conditional: You must pass TemplateURL or TemplateBody. If both are passed, only TemplateBody is used.

    " + } + }, + "TimeoutMinutes": { + "base": null, + "refs": { + "CreateStackInput$TimeoutInMinutes": "

    The amount of time that can pass before the stack status becomes CREATE_FAILED; if DisableRollback is not set or is set to false, the stack will be rolled back.

    ", + "Stack$TimeoutInMinutes": "

    The amount of time within which stack creation should complete.

    " + } + }, + "Timestamp": { + "base": null, + "refs": { + "StackEvent$Timestamp": "

    Time the status was updated.

    ", + "StackResource$Timestamp": "

    Time the status was updated.

    ", + "StackResourceDetail$LastUpdatedTimestamp": "

    Time the status was updated.

    ", + "StackResourceSummary$LastUpdatedTimestamp": "

    Time the status was updated.

    " + } + }, + "UpdateStackInput": { + "base": "

    The input for an UpdateStack action.

    ", + "refs": { + } + }, + "UpdateStackOutput": { + "base": "

    The output for an UpdateStack action.

    ", + "refs": { + } + }, + "Url": { + "base": null, + "refs": { + "EstimateTemplateCostOutput$Url": "

    An AWS Simple Monthly Calculator URL with a query string that describes the resources required to run the template.

    " + } + }, + "UsePreviousTemplate": { + "base": null, + "refs": { + "CreateChangeSetInput$UsePreviousTemplate": "

    Whether to reuse the template that is associated with the stack to create the change set.

    ", + "UpdateStackInput$UsePreviousTemplate": "

    Reuse the existing template that is associated with the stack that you are updating.

    " + } + }, + "UsePreviousValue": { + "base": null, + "refs": { + "Parameter$UsePreviousValue": "

    During a stack update, use the existing parameter value that the stack is using for a given parameter key. If you specify true, do not specify a parameter value.

    " + } + }, + "ValidateTemplateInput": { + "base": "

    The input for ValidateTemplate action.

    ", + "refs": { + } + }, + "ValidateTemplateOutput": { + "base": "

    The output for ValidateTemplate action.

    ", + "refs": { + } + }, + "Version": { + "base": null, + "refs": { + "GetTemplateSummaryOutput$Version": "

    The AWS template format version, which identifies the capabilities of the template.

    " + } + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/cloudformation/2010-05-15/examples-1.json b/vendor/github.com/aws/aws-sdk-go/models/apis/cloudformation/2010-05-15/examples-1.json new file mode 100644 index 000000000..0ea7e3b0b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/cloudformation/2010-05-15/examples-1.json @@ -0,0 +1,5 @@ +{ + "version": "1.0", + "examples": { + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/cloudformation/2010-05-15/paginators-1.json b/vendor/github.com/aws/aws-sdk-go/models/apis/cloudformation/2010-05-15/paginators-1.json new file mode 100644 index 000000000..da6c17d42 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/cloudformation/2010-05-15/paginators-1.json @@ -0,0 +1,27 @@ +{ + "pagination": { + "DescribeStackEvents": { + "input_token": "NextToken", + "output_token": "NextToken", + "result_key": "StackEvents" + }, + "DescribeStackResources": { + "result_key": "StackResources" + }, + "DescribeStacks": { + "input_token": "NextToken", + "output_token": "NextToken", + "result_key": "Stacks" + }, + "ListStackResources": { + "input_token": "NextToken", + "output_token": "NextToken", + "result_key": "StackResourceSummaries" + }, + "ListStacks": { + "input_token": "NextToken", + "output_token": "NextToken", + "result_key": "StackSummaries" + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/cloudformation/2010-05-15/waiters-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/cloudformation/2010-05-15/waiters-2.json new file mode 100644 index 000000000..f3a5cde1c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/cloudformation/2010-05-15/waiters-2.json @@ -0,0 +1,235 @@ +{ + "version": 2, + "waiters": { + "StackExists": { + "delay": 5, + "operation": "DescribeStacks", + "maxAttempts": 20, + "acceptors": [ + { + "matcher": "status", + "expected": 200, + "state": "success" + }, + { + "matcher": "error", + "expected": "ValidationError", + "state": "retry" + } + ] + }, + "StackCreateComplete": { + "delay": 30, + "operation": "DescribeStacks", + "maxAttempts": 120, + "description": "Wait until stack status is CREATE_COMPLETE.", + "acceptors": [ + { + "expected": "CREATE_COMPLETE", + "matcher": "pathAll", + "state": "success", + "argument": "Stacks[].StackStatus" + }, + { + "expected": "CREATE_FAILED", + "matcher": "pathAny", + "state": "failure", + "argument": "Stacks[].StackStatus" + }, + { + "expected": "DELETE_COMPLETE", + "matcher": "pathAny", + "argument": "Stacks[].StackStatus", + "state": "failure" + }, + { + "expected": "DELETE_IN_PROGRESS", + "matcher": "pathAny", + "argument": "Stacks[].StackStatus", + "state": "failure" + }, + { + "expected": "DELETE_FAILED", + "matcher": "pathAny", + "argument": "Stacks[].StackStatus", + "state": "failure" + }, + { + "expected": "ROLLBACK_COMPLETE", + "matcher": "pathAny", + "state": "failure", + "argument": "Stacks[].StackStatus" + }, + { + "expected": "ROLLBACK_FAILED", + "matcher": "pathAny", + "state": "failure", + "argument": "Stacks[].StackStatus" + }, + { + "expected": "ROLLBACK_IN_PROGRESS", + "matcher": "pathAny", + "argument": "Stacks[].StackStatus", + "state": "failure" + }, + { + "expected": "ValidationError", + "matcher": "error", + "state": "failure" + } + ] + }, + "StackDeleteComplete": { + "delay": 30, + "operation": "DescribeStacks", + "maxAttempts": 120, + "description": "Wait until stack status is DELETE_COMPLETE.", + "acceptors": [ + { + "expected": "DELETE_COMPLETE", + "matcher": "pathAll", + "state": "success", + "argument": "Stacks[].StackStatus" + }, + { + "expected": "ValidationError", + "matcher": "error", + "state": "success" + }, + { + "expected": "DELETE_FAILED", + "matcher": "pathAny", + "state": "failure", + "argument": "Stacks[].StackStatus" + }, + { + "argument": "Stacks[].StackStatus", + "expected": "CREATE_COMPLETE", + "matcher": "pathAny", + "state": "failure" + }, + { + "argument": "Stacks[].StackStatus", + "expected": "CREATE_FAILED", + "matcher": "pathAny", + "state": "failure" + }, + { + "argument": "Stacks[].StackStatus", + "expected": "CREATE_IN_PROGRESS", + "matcher": "pathAny", + "state": "failure" + }, + { + "argument": "Stacks[].StackStatus", + "expected": "ROLLBACK_COMPLETE", + "matcher": "pathAny", + "state": "failure" + }, + { + "argument": "Stacks[].StackStatus", + "expected": "ROLLBACK_FAILED", + "matcher": "pathAny", + "state": "failure" + }, + { + "argument": "Stacks[].StackStatus", + "expected": "ROLLBACK_IN_PROGRESS", + "matcher": "pathAny", + "state": "failure" + }, + { + "argument": "Stacks[].StackStatus", + "expected": "UPDATE_COMPLETE", + "matcher": "pathAny", + "state": "failure" + }, + { + "argument": "Stacks[].StackStatus", + "expected": "UPDATE_COMPLETE_CLEANUP_IN_PROGRESS", + "matcher": "pathAny", + "state": "failure" + }, + { + "argument": "Stacks[].StackStatus", + "expected": "UPDATE_IN_PROGRESS", + "matcher": "pathAny", + "state": "failure" + }, + { + "argument": "Stacks[].StackStatus", + "expected": "UPDATE_ROLLBACK_COMPLETE", + "matcher": "pathAny", + "state": "failure" + }, + { + "argument": "Stacks[].StackStatus", + "expected": "UPDATE_ROLLBACK_COMPLETE_CLEANUP_IN_PROGRESS", + "matcher": "pathAny", + "state": "failure" + }, + { + "argument": "Stacks[].StackStatus", + "expected": "UPDATE_ROLLBACK_FAILED", + "matcher": "pathAny", + "state": "failure" + }, + { + "argument": "Stacks[].StackStatus", + "expected": "UPDATE_ROLLBACK_IN_PROGRESS", + "matcher": "pathAny", + "state": "failure" + } + ] + }, + "StackUpdateComplete": { + "delay": 30, + "maxAttempts": 120, + "operation": "DescribeStacks", + "description": "Wait until stack status is UPDATE_COMPLETE.", + "acceptors": [ + { + "expected": "UPDATE_COMPLETE", + "matcher": "pathAll", + "state": "success", + "argument": "Stacks[].StackStatus" + }, + { + "expected": "UPDATE_FAILED", + "matcher": "pathAny", + "state": "failure", + "argument": "Stacks[].StackStatus" + }, + { + "expected": "UPDATE_ROLLBACK_COMPLETE", + "matcher": "pathAny", + "state": "failure", + "argument": "Stacks[].StackStatus" + }, + { + "expected": "UPDATE_ROLLBACK_FAILED", + "matcher": "pathAny", + "state": "failure", + "argument": "Stacks[].StackStatus" + }, + { + "argument": "Stacks[].StackStatus", + "expected": "UPDATE_ROLLBACK_COMPLETE_CLEANUP_IN_PROGRESS", + "matcher": "pathAny", + "state": "failure" + }, + { + "argument": "Stacks[].StackStatus", + "expected": "UPDATE_ROLLBACK_IN_PROGRESS", + "matcher": "pathAny", + "state": "failure" + }, + { + "expected": "ValidationError", + "matcher": "error", + "state": "failure" + } + ] + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/cloudfront/2015-04-17/api-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/cloudfront/2015-04-17/api-2.json new file mode 100644 index 000000000..421d0d98b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/cloudfront/2015-04-17/api-2.json @@ -0,0 +1,2651 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2015-04-17", + "endpointPrefix":"cloudfront", + "globalEndpoint":"cloudfront.amazonaws.com", + "serviceAbbreviation":"CloudFront", + "serviceFullName":"Amazon CloudFront", + "signatureVersion":"v4", + "protocol":"rest-xml" + }, + "operations":{ + "CreateCloudFrontOriginAccessIdentity":{ + "name":"CreateCloudFrontOriginAccessIdentity2015_04_17", + "http":{ + "method":"POST", + "requestUri":"/2015-04-17/origin-access-identity/cloudfront", + "responseCode":201 + }, + "input":{"shape":"CreateCloudFrontOriginAccessIdentityRequest"}, + "output":{"shape":"CreateCloudFrontOriginAccessIdentityResult"}, + "errors":[ + { + "shape":"CloudFrontOriginAccessIdentityAlreadyExists", + "error":{"httpStatusCode":409}, + "exception":true + }, + { + "shape":"MissingBody", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"TooManyCloudFrontOriginAccessIdentities", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InvalidArgument", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InconsistentQuantities", + "error":{"httpStatusCode":400}, + "exception":true + } + ] + }, + "CreateDistribution":{ + "name":"CreateDistribution2015_04_17", + "http":{ + "method":"POST", + "requestUri":"/2015-04-17/distribution", + "responseCode":201 + }, + "input":{"shape":"CreateDistributionRequest"}, + "output":{"shape":"CreateDistributionResult"}, + "errors":[ + { + "shape":"CNAMEAlreadyExists", + "error":{"httpStatusCode":409}, + "exception":true + }, + { + "shape":"DistributionAlreadyExists", + "error":{"httpStatusCode":409}, + "exception":true + }, + { + "shape":"InvalidOrigin", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InvalidOriginAccessIdentity", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"AccessDenied", + "error":{"httpStatusCode":403}, + "exception":true + }, + { + "shape":"TooManyTrustedSigners", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"TrustedSignerDoesNotExist", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InvalidViewerCertificate", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InvalidMinimumProtocolVersion", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"MissingBody", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"TooManyDistributionCNAMEs", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"TooManyDistributions", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InvalidDefaultRootObject", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InvalidRelativePath", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InvalidErrorCode", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InvalidResponseCode", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InvalidArgument", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InvalidRequiredProtocol", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"NoSuchOrigin", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"TooManyOrigins", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"TooManyCacheBehaviors", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"TooManyCookieNamesInWhiteList", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InvalidForwardCookies", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"TooManyHeadersInForwardedValues", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InvalidHeadersForS3Origin", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InconsistentQuantities", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"TooManyCertificates", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InvalidLocationCode", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InvalidGeoRestrictionParameter", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InvalidProtocolSettings", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InvalidTTLOrder", + "error":{"httpStatusCode":400}, + "exception":true + } + ] + }, + "CreateInvalidation":{ + "name":"CreateInvalidation2015_04_17", + "http":{ + "method":"POST", + "requestUri":"/2015-04-17/distribution/{DistributionId}/invalidation", + "responseCode":201 + }, + "input":{"shape":"CreateInvalidationRequest"}, + "output":{"shape":"CreateInvalidationResult"}, + "errors":[ + { + "shape":"AccessDenied", + "error":{"httpStatusCode":403}, + "exception":true + }, + { + "shape":"MissingBody", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InvalidArgument", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"NoSuchDistribution", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"BatchTooLarge", + "error":{"httpStatusCode":413}, + "exception":true + }, + { + "shape":"TooManyInvalidationsInProgress", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InconsistentQuantities", + "error":{"httpStatusCode":400}, + "exception":true + } + ] + }, + "CreateStreamingDistribution":{ + "name":"CreateStreamingDistribution2015_04_17", + "http":{ + "method":"POST", + "requestUri":"/2015-04-17/streaming-distribution", + "responseCode":201 + }, + "input":{"shape":"CreateStreamingDistributionRequest"}, + "output":{"shape":"CreateStreamingDistributionResult"}, + "errors":[ + { + "shape":"CNAMEAlreadyExists", + "error":{"httpStatusCode":409}, + "exception":true + }, + { + "shape":"StreamingDistributionAlreadyExists", + "error":{"httpStatusCode":409}, + "exception":true + }, + { + "shape":"InvalidOrigin", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InvalidOriginAccessIdentity", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"AccessDenied", + "error":{"httpStatusCode":403}, + "exception":true + }, + { + "shape":"TooManyTrustedSigners", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"TrustedSignerDoesNotExist", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"MissingBody", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"TooManyStreamingDistributionCNAMEs", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"TooManyStreamingDistributions", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InvalidArgument", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InconsistentQuantities", + "error":{"httpStatusCode":400}, + "exception":true + } + ] + }, + "DeleteCloudFrontOriginAccessIdentity":{ + "name":"DeleteCloudFrontOriginAccessIdentity2015_04_17", + "http":{ + "method":"DELETE", + "requestUri":"/2015-04-17/origin-access-identity/cloudfront/{Id}", + "responseCode":204 + }, + "input":{"shape":"DeleteCloudFrontOriginAccessIdentityRequest"}, + "errors":[ + { + "shape":"AccessDenied", + "error":{"httpStatusCode":403}, + "exception":true + }, + { + "shape":"InvalidIfMatchVersion", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"NoSuchCloudFrontOriginAccessIdentity", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"PreconditionFailed", + "error":{"httpStatusCode":412}, + "exception":true + }, + { + "shape":"CloudFrontOriginAccessIdentityInUse", + "error":{"httpStatusCode":409}, + "exception":true + } + ] + }, + "DeleteDistribution":{ + "name":"DeleteDistribution2015_04_17", + "http":{ + "method":"DELETE", + "requestUri":"/2015-04-17/distribution/{Id}", + "responseCode":204 + }, + "input":{"shape":"DeleteDistributionRequest"}, + "errors":[ + { + "shape":"AccessDenied", + "error":{"httpStatusCode":403}, + "exception":true + }, + { + "shape":"DistributionNotDisabled", + "error":{"httpStatusCode":409}, + "exception":true + }, + { + "shape":"InvalidIfMatchVersion", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"NoSuchDistribution", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"PreconditionFailed", + "error":{"httpStatusCode":412}, + "exception":true + } + ] + }, + "DeleteStreamingDistribution":{ + "name":"DeleteStreamingDistribution2015_04_17", + "http":{ + "method":"DELETE", + "requestUri":"/2015-04-17/streaming-distribution/{Id}", + "responseCode":204 + }, + "input":{"shape":"DeleteStreamingDistributionRequest"}, + "errors":[ + { + "shape":"AccessDenied", + "error":{"httpStatusCode":403}, + "exception":true + }, + { + "shape":"StreamingDistributionNotDisabled", + "error":{"httpStatusCode":409}, + "exception":true + }, + { + "shape":"InvalidIfMatchVersion", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"NoSuchStreamingDistribution", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"PreconditionFailed", + "error":{"httpStatusCode":412}, + "exception":true + } + ] + }, + "GetCloudFrontOriginAccessIdentity":{ + "name":"GetCloudFrontOriginAccessIdentity2015_04_17", + "http":{ + "method":"GET", + "requestUri":"/2015-04-17/origin-access-identity/cloudfront/{Id}" + }, + "input":{"shape":"GetCloudFrontOriginAccessIdentityRequest"}, + "output":{"shape":"GetCloudFrontOriginAccessIdentityResult"}, + "errors":[ + { + "shape":"NoSuchCloudFrontOriginAccessIdentity", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"AccessDenied", + "error":{"httpStatusCode":403}, + "exception":true + } + ] + }, + "GetCloudFrontOriginAccessIdentityConfig":{ + "name":"GetCloudFrontOriginAccessIdentityConfig2015_04_17", + "http":{ + "method":"GET", + "requestUri":"/2015-04-17/origin-access-identity/cloudfront/{Id}/config" + }, + "input":{"shape":"GetCloudFrontOriginAccessIdentityConfigRequest"}, + "output":{"shape":"GetCloudFrontOriginAccessIdentityConfigResult"}, + "errors":[ + { + "shape":"NoSuchCloudFrontOriginAccessIdentity", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"AccessDenied", + "error":{"httpStatusCode":403}, + "exception":true + } + ] + }, + "GetDistribution":{ + "name":"GetDistribution2015_04_17", + "http":{ + "method":"GET", + "requestUri":"/2015-04-17/distribution/{Id}" + }, + "input":{"shape":"GetDistributionRequest"}, + "output":{"shape":"GetDistributionResult"}, + "errors":[ + { + "shape":"NoSuchDistribution", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"AccessDenied", + "error":{"httpStatusCode":403}, + "exception":true + } + ] + }, + "GetDistributionConfig":{ + "name":"GetDistributionConfig2015_04_17", + "http":{ + "method":"GET", + "requestUri":"/2015-04-17/distribution/{Id}/config" + }, + "input":{"shape":"GetDistributionConfigRequest"}, + "output":{"shape":"GetDistributionConfigResult"}, + "errors":[ + { + "shape":"NoSuchDistribution", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"AccessDenied", + "error":{"httpStatusCode":403}, + "exception":true + } + ] + }, + "GetInvalidation":{ + "name":"GetInvalidation2015_04_17", + "http":{ + "method":"GET", + "requestUri":"/2015-04-17/distribution/{DistributionId}/invalidation/{Id}" + }, + "input":{"shape":"GetInvalidationRequest"}, + "output":{"shape":"GetInvalidationResult"}, + "errors":[ + { + "shape":"NoSuchInvalidation", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"NoSuchDistribution", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"AccessDenied", + "error":{"httpStatusCode":403}, + "exception":true + } + ] + }, + "GetStreamingDistribution":{ + "name":"GetStreamingDistribution2015_04_17", + "http":{ + "method":"GET", + "requestUri":"/2015-04-17/streaming-distribution/{Id}" + }, + "input":{"shape":"GetStreamingDistributionRequest"}, + "output":{"shape":"GetStreamingDistributionResult"}, + "errors":[ + { + "shape":"NoSuchStreamingDistribution", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"AccessDenied", + "error":{"httpStatusCode":403}, + "exception":true + } + ] + }, + "GetStreamingDistributionConfig":{ + "name":"GetStreamingDistributionConfig2015_04_17", + "http":{ + "method":"GET", + "requestUri":"/2015-04-17/streaming-distribution/{Id}/config" + }, + "input":{"shape":"GetStreamingDistributionConfigRequest"}, + "output":{"shape":"GetStreamingDistributionConfigResult"}, + "errors":[ + { + "shape":"NoSuchStreamingDistribution", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"AccessDenied", + "error":{"httpStatusCode":403}, + "exception":true + } + ] + }, + "ListCloudFrontOriginAccessIdentities":{ + "name":"ListCloudFrontOriginAccessIdentities2015_04_17", + "http":{ + "method":"GET", + "requestUri":"/2015-04-17/origin-access-identity/cloudfront" + }, + "input":{"shape":"ListCloudFrontOriginAccessIdentitiesRequest"}, + "output":{"shape":"ListCloudFrontOriginAccessIdentitiesResult"}, + "errors":[ + { + "shape":"InvalidArgument", + "error":{"httpStatusCode":400}, + "exception":true + } + ] + }, + "ListDistributions":{ + "name":"ListDistributions2015_04_17", + "http":{ + "method":"GET", + "requestUri":"/2015-04-17/distribution" + }, + "input":{"shape":"ListDistributionsRequest"}, + "output":{"shape":"ListDistributionsResult"}, + "errors":[ + { + "shape":"InvalidArgument", + "error":{"httpStatusCode":400}, + "exception":true + } + ] + }, + "ListInvalidations":{ + "name":"ListInvalidations2015_04_17", + "http":{ + "method":"GET", + "requestUri":"/2015-04-17/distribution/{DistributionId}/invalidation" + }, + "input":{"shape":"ListInvalidationsRequest"}, + "output":{"shape":"ListInvalidationsResult"}, + "errors":[ + { + "shape":"InvalidArgument", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"NoSuchDistribution", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"AccessDenied", + "error":{"httpStatusCode":403}, + "exception":true + } + ] + }, + "ListStreamingDistributions":{ + "name":"ListStreamingDistributions2015_04_17", + "http":{ + "method":"GET", + "requestUri":"/2015-04-17/streaming-distribution" + }, + "input":{"shape":"ListStreamingDistributionsRequest"}, + "output":{"shape":"ListStreamingDistributionsResult"}, + "errors":[ + { + "shape":"InvalidArgument", + "error":{"httpStatusCode":400}, + "exception":true + } + ] + }, + "UpdateCloudFrontOriginAccessIdentity":{ + "name":"UpdateCloudFrontOriginAccessIdentity2015_04_17", + "http":{ + "method":"PUT", + "requestUri":"/2015-04-17/origin-access-identity/cloudfront/{Id}/config" + }, + "input":{"shape":"UpdateCloudFrontOriginAccessIdentityRequest"}, + "output":{"shape":"UpdateCloudFrontOriginAccessIdentityResult"}, + "errors":[ + { + "shape":"AccessDenied", + "error":{"httpStatusCode":403}, + "exception":true + }, + { + "shape":"IllegalUpdate", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InvalidIfMatchVersion", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"MissingBody", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"NoSuchCloudFrontOriginAccessIdentity", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"PreconditionFailed", + "error":{"httpStatusCode":412}, + "exception":true + }, + { + "shape":"InvalidArgument", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InconsistentQuantities", + "error":{"httpStatusCode":400}, + "exception":true + } + ] + }, + "UpdateDistribution":{ + "name":"UpdateDistribution2015_04_17", + "http":{ + "method":"PUT", + "requestUri":"/2015-04-17/distribution/{Id}/config" + }, + "input":{"shape":"UpdateDistributionRequest"}, + "output":{"shape":"UpdateDistributionResult"}, + "errors":[ + { + "shape":"AccessDenied", + "error":{"httpStatusCode":403}, + "exception":true + }, + { + "shape":"CNAMEAlreadyExists", + "error":{"httpStatusCode":409}, + "exception":true + }, + { + "shape":"IllegalUpdate", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InvalidIfMatchVersion", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"MissingBody", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"NoSuchDistribution", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"PreconditionFailed", + "error":{"httpStatusCode":412}, + "exception":true + }, + { + "shape":"TooManyDistributionCNAMEs", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InvalidDefaultRootObject", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InvalidRelativePath", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InvalidErrorCode", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InvalidResponseCode", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InvalidArgument", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InvalidOriginAccessIdentity", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"TooManyTrustedSigners", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"TrustedSignerDoesNotExist", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InvalidViewerCertificate", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InvalidMinimumProtocolVersion", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InvalidRequiredProtocol", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"NoSuchOrigin", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"TooManyOrigins", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"TooManyCacheBehaviors", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"TooManyCookieNamesInWhiteList", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InvalidForwardCookies", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"TooManyHeadersInForwardedValues", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InvalidHeadersForS3Origin", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InconsistentQuantities", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"TooManyCertificates", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InvalidLocationCode", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InvalidGeoRestrictionParameter", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InvalidTTLOrder", + "error":{"httpStatusCode":400}, + "exception":true + } + ] + }, + "UpdateStreamingDistribution":{ + "name":"UpdateStreamingDistribution2015_04_17", + "http":{ + "method":"PUT", + "requestUri":"/2015-04-17/streaming-distribution/{Id}/config" + }, + "input":{"shape":"UpdateStreamingDistributionRequest"}, + "output":{"shape":"UpdateStreamingDistributionResult"}, + "errors":[ + { + "shape":"AccessDenied", + "error":{"httpStatusCode":403}, + "exception":true + }, + { + "shape":"CNAMEAlreadyExists", + "error":{"httpStatusCode":409}, + "exception":true + }, + { + "shape":"IllegalUpdate", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InvalidIfMatchVersion", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"MissingBody", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"NoSuchStreamingDistribution", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"PreconditionFailed", + "error":{"httpStatusCode":412}, + "exception":true + }, + { + "shape":"TooManyStreamingDistributionCNAMEs", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InvalidArgument", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InvalidOriginAccessIdentity", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"TooManyTrustedSigners", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"TrustedSignerDoesNotExist", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InconsistentQuantities", + "error":{"httpStatusCode":400}, + "exception":true + } + ] + } + }, + "shapes":{ + "AccessDenied":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":403}, + "exception":true + }, + "ActiveTrustedSigners":{ + "type":"structure", + "required":[ + "Enabled", + "Quantity" + ], + "members":{ + "Enabled":{"shape":"boolean"}, + "Quantity":{"shape":"integer"}, + "Items":{"shape":"SignerList"} + } + }, + "AliasList":{ + "type":"list", + "member":{ + "shape":"string", + "locationName":"CNAME" + } + }, + "Aliases":{ + "type":"structure", + "required":["Quantity"], + "members":{ + "Quantity":{"shape":"integer"}, + "Items":{"shape":"AliasList"} + } + }, + "AllowedMethods":{ + "type":"structure", + "required":[ + "Quantity", + "Items" + ], + "members":{ + "Quantity":{"shape":"integer"}, + "Items":{"shape":"MethodsList"}, + "CachedMethods":{"shape":"CachedMethods"} + } + }, + "AwsAccountNumberList":{ + "type":"list", + "member":{ + "shape":"string", + "locationName":"AwsAccountNumber" + } + }, + "BatchTooLarge":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":413}, + "exception":true + }, + "CNAMEAlreadyExists":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":409}, + "exception":true + }, + "CacheBehavior":{ + "type":"structure", + "required":[ + "PathPattern", + "TargetOriginId", + "ForwardedValues", + "TrustedSigners", + "ViewerProtocolPolicy", + "MinTTL" + ], + "members":{ + "PathPattern":{"shape":"string"}, + "TargetOriginId":{"shape":"string"}, + "ForwardedValues":{"shape":"ForwardedValues"}, + "TrustedSigners":{"shape":"TrustedSigners"}, + "ViewerProtocolPolicy":{"shape":"ViewerProtocolPolicy"}, + "MinTTL":{"shape":"long"}, + "AllowedMethods":{"shape":"AllowedMethods"}, + "SmoothStreaming":{"shape":"boolean"}, + "DefaultTTL":{"shape":"long"}, + "MaxTTL":{"shape":"long"} + } + }, + "CacheBehaviorList":{ + "type":"list", + "member":{ + "shape":"CacheBehavior", + "locationName":"CacheBehavior" + } + }, + "CacheBehaviors":{ + "type":"structure", + "required":["Quantity"], + "members":{ + "Quantity":{"shape":"integer"}, + "Items":{"shape":"CacheBehaviorList"} + } + }, + "CachedMethods":{ + "type":"structure", + "required":[ + "Quantity", + "Items" + ], + "members":{ + "Quantity":{"shape":"integer"}, + "Items":{"shape":"MethodsList"} + } + }, + "CloudFrontOriginAccessIdentity":{ + "type":"structure", + "required":[ + "Id", + "S3CanonicalUserId" + ], + "members":{ + "Id":{"shape":"string"}, + "S3CanonicalUserId":{"shape":"string"}, + "CloudFrontOriginAccessIdentityConfig":{"shape":"CloudFrontOriginAccessIdentityConfig"} + } + }, + "CloudFrontOriginAccessIdentityAlreadyExists":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":409}, + "exception":true + }, + "CloudFrontOriginAccessIdentityConfig":{ + "type":"structure", + "required":[ + "CallerReference", + "Comment" + ], + "members":{ + "CallerReference":{"shape":"string"}, + "Comment":{"shape":"string"} + } + }, + "CloudFrontOriginAccessIdentityInUse":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":409}, + "exception":true + }, + "CloudFrontOriginAccessIdentityList":{ + "type":"structure", + "required":[ + "Marker", + "MaxItems", + "IsTruncated", + "Quantity" + ], + "members":{ + "Marker":{"shape":"string"}, + "NextMarker":{"shape":"string"}, + "MaxItems":{"shape":"integer"}, + "IsTruncated":{"shape":"boolean"}, + "Quantity":{"shape":"integer"}, + "Items":{"shape":"CloudFrontOriginAccessIdentitySummaryList"} + } + }, + "CloudFrontOriginAccessIdentitySummary":{ + "type":"structure", + "required":[ + "Id", + "S3CanonicalUserId", + "Comment" + ], + "members":{ + "Id":{"shape":"string"}, + "S3CanonicalUserId":{"shape":"string"}, + "Comment":{"shape":"string"} + } + }, + "CloudFrontOriginAccessIdentitySummaryList":{ + "type":"list", + "member":{ + "shape":"CloudFrontOriginAccessIdentitySummary", + "locationName":"CloudFrontOriginAccessIdentitySummary" + } + }, + "CookieNameList":{ + "type":"list", + "member":{ + "shape":"string", + "locationName":"Name" + } + }, + "CookieNames":{ + "type":"structure", + "required":["Quantity"], + "members":{ + "Quantity":{"shape":"integer"}, + "Items":{"shape":"CookieNameList"} + } + }, + "CookiePreference":{ + "type":"structure", + "required":["Forward"], + "members":{ + "Forward":{"shape":"ItemSelection"}, + "WhitelistedNames":{"shape":"CookieNames"} + } + }, + "CreateCloudFrontOriginAccessIdentityRequest":{ + "type":"structure", + "required":["CloudFrontOriginAccessIdentityConfig"], + "members":{ + "CloudFrontOriginAccessIdentityConfig":{ + "shape":"CloudFrontOriginAccessIdentityConfig", + "xmlNamespace":{"uri":"http://cloudfront.amazonaws.com/doc/2015-04-17/"}, + "locationName":"CloudFrontOriginAccessIdentityConfig" + } + }, + "payload":"CloudFrontOriginAccessIdentityConfig" + }, + "CreateCloudFrontOriginAccessIdentityResult":{ + "type":"structure", + "members":{ + "CloudFrontOriginAccessIdentity":{"shape":"CloudFrontOriginAccessIdentity"}, + "Location":{ + "shape":"string", + "location":"header", + "locationName":"Location" + }, + "ETag":{ + "shape":"string", + "location":"header", + "locationName":"ETag" + } + }, + "payload":"CloudFrontOriginAccessIdentity" + }, + "CreateDistributionRequest":{ + "type":"structure", + "required":["DistributionConfig"], + "members":{ + "DistributionConfig":{ + "shape":"DistributionConfig", + "xmlNamespace":{"uri":"http://cloudfront.amazonaws.com/doc/2015-04-17/"}, + "locationName":"DistributionConfig" + } + }, + "payload":"DistributionConfig" + }, + "CreateDistributionResult":{ + "type":"structure", + "members":{ + "Distribution":{"shape":"Distribution"}, + "Location":{ + "shape":"string", + "location":"header", + "locationName":"Location" + }, + "ETag":{ + "shape":"string", + "location":"header", + "locationName":"ETag" + } + }, + "payload":"Distribution" + }, + "CreateInvalidationRequest":{ + "type":"structure", + "required":[ + "DistributionId", + "InvalidationBatch" + ], + "members":{ + "DistributionId":{ + "shape":"string", + "location":"uri", + "locationName":"DistributionId" + }, + "InvalidationBatch":{ + "shape":"InvalidationBatch", + "xmlNamespace":{"uri":"http://cloudfront.amazonaws.com/doc/2015-04-17/"}, + "locationName":"InvalidationBatch" + } + }, + "payload":"InvalidationBatch" + }, + "CreateInvalidationResult":{ + "type":"structure", + "members":{ + "Location":{ + "shape":"string", + "location":"header", + "locationName":"Location" + }, + "Invalidation":{"shape":"Invalidation"} + }, + "payload":"Invalidation" + }, + "CreateStreamingDistributionRequest":{ + "type":"structure", + "required":["StreamingDistributionConfig"], + "members":{ + "StreamingDistributionConfig":{ + "shape":"StreamingDistributionConfig", + "xmlNamespace":{"uri":"http://cloudfront.amazonaws.com/doc/2015-04-17/"}, + "locationName":"StreamingDistributionConfig" + } + }, + "payload":"StreamingDistributionConfig" + }, + "CreateStreamingDistributionResult":{ + "type":"structure", + "members":{ + "StreamingDistribution":{"shape":"StreamingDistribution"}, + "Location":{ + "shape":"string", + "location":"header", + "locationName":"Location" + }, + "ETag":{ + "shape":"string", + "location":"header", + "locationName":"ETag" + } + }, + "payload":"StreamingDistribution" + }, + "CustomErrorResponse":{ + "type":"structure", + "required":["ErrorCode"], + "members":{ + "ErrorCode":{"shape":"integer"}, + "ResponsePagePath":{"shape":"string"}, + "ResponseCode":{"shape":"string"}, + "ErrorCachingMinTTL":{"shape":"long"} + } + }, + "CustomErrorResponseList":{ + "type":"list", + "member":{ + "shape":"CustomErrorResponse", + "locationName":"CustomErrorResponse" + } + }, + "CustomErrorResponses":{ + "type":"structure", + "required":["Quantity"], + "members":{ + "Quantity":{"shape":"integer"}, + "Items":{"shape":"CustomErrorResponseList"} + } + }, + "CustomOriginConfig":{ + "type":"structure", + "required":[ + "HTTPPort", + "HTTPSPort", + "OriginProtocolPolicy" + ], + "members":{ + "HTTPPort":{"shape":"integer"}, + "HTTPSPort":{"shape":"integer"}, + "OriginProtocolPolicy":{"shape":"OriginProtocolPolicy"} + } + }, + "DefaultCacheBehavior":{ + "type":"structure", + "required":[ + "TargetOriginId", + "ForwardedValues", + "TrustedSigners", + "ViewerProtocolPolicy", + "MinTTL" + ], + "members":{ + "TargetOriginId":{"shape":"string"}, + "ForwardedValues":{"shape":"ForwardedValues"}, + "TrustedSigners":{"shape":"TrustedSigners"}, + "ViewerProtocolPolicy":{"shape":"ViewerProtocolPolicy"}, + "MinTTL":{"shape":"long"}, + "AllowedMethods":{"shape":"AllowedMethods"}, + "SmoothStreaming":{"shape":"boolean"}, + "DefaultTTL":{"shape":"long"}, + "MaxTTL":{"shape":"long"} + } + }, + "DeleteCloudFrontOriginAccessIdentityRequest":{ + "type":"structure", + "members":{ + "Id":{ + "shape":"string", + "location":"uri", + "locationName":"Id" + }, + "IfMatch":{ + "shape":"string", + "location":"header", + "locationName":"If-Match" + } + }, + "required":["Id"] + }, + "DeleteDistributionRequest":{ + "type":"structure", + "members":{ + "Id":{ + "shape":"string", + "location":"uri", + "locationName":"Id" + }, + "IfMatch":{ + "shape":"string", + "location":"header", + "locationName":"If-Match" + } + }, + "required":["Id"] + }, + "DeleteStreamingDistributionRequest":{ + "type":"structure", + "members":{ + "Id":{ + "shape":"string", + "location":"uri", + "locationName":"Id" + }, + "IfMatch":{ + "shape":"string", + "location":"header", + "locationName":"If-Match" + } + }, + "required":["Id"] + }, + "Distribution":{ + "type":"structure", + "required":[ + "Id", + "Status", + "LastModifiedTime", + "InProgressInvalidationBatches", + "DomainName", + "ActiveTrustedSigners", + "DistributionConfig" + ], + "members":{ + "Id":{"shape":"string"}, + "Status":{"shape":"string"}, + "LastModifiedTime":{"shape":"timestamp"}, + "InProgressInvalidationBatches":{"shape":"integer"}, + "DomainName":{"shape":"string"}, + "ActiveTrustedSigners":{"shape":"ActiveTrustedSigners"}, + "DistributionConfig":{"shape":"DistributionConfig"} + } + }, + "DistributionAlreadyExists":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":409}, + "exception":true + }, + "DistributionConfig":{ + "type":"structure", + "required":[ + "CallerReference", + "Origins", + "DefaultCacheBehavior", + "Comment", + "Enabled" + ], + "members":{ + "CallerReference":{"shape":"string"}, + "Aliases":{"shape":"Aliases"}, + "DefaultRootObject":{"shape":"string"}, + "Origins":{"shape":"Origins"}, + "DefaultCacheBehavior":{"shape":"DefaultCacheBehavior"}, + "CacheBehaviors":{"shape":"CacheBehaviors"}, + "CustomErrorResponses":{"shape":"CustomErrorResponses"}, + "Comment":{"shape":"string"}, + "Logging":{"shape":"LoggingConfig"}, + "PriceClass":{"shape":"PriceClass"}, + "Enabled":{"shape":"boolean"}, + "ViewerCertificate":{"shape":"ViewerCertificate"}, + "Restrictions":{"shape":"Restrictions"} + } + }, + "DistributionList":{ + "type":"structure", + "required":[ + "Marker", + "MaxItems", + "IsTruncated", + "Quantity" + ], + "members":{ + "Marker":{"shape":"string"}, + "NextMarker":{"shape":"string"}, + "MaxItems":{"shape":"integer"}, + "IsTruncated":{"shape":"boolean"}, + "Quantity":{"shape":"integer"}, + "Items":{"shape":"DistributionSummaryList"} + } + }, + "DistributionNotDisabled":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":409}, + "exception":true + }, + "DistributionSummary":{ + "type":"structure", + "required":[ + "Id", + "Status", + "LastModifiedTime", + "DomainName", + "Aliases", + "Origins", + "DefaultCacheBehavior", + "CacheBehaviors", + "CustomErrorResponses", + "Comment", + "PriceClass", + "Enabled", + "ViewerCertificate", + "Restrictions" + ], + "members":{ + "Id":{"shape":"string"}, + "Status":{"shape":"string"}, + "LastModifiedTime":{"shape":"timestamp"}, + "DomainName":{"shape":"string"}, + "Aliases":{"shape":"Aliases"}, + "Origins":{"shape":"Origins"}, + "DefaultCacheBehavior":{"shape":"DefaultCacheBehavior"}, + "CacheBehaviors":{"shape":"CacheBehaviors"}, + "CustomErrorResponses":{"shape":"CustomErrorResponses"}, + "Comment":{"shape":"string"}, + "PriceClass":{"shape":"PriceClass"}, + "Enabled":{"shape":"boolean"}, + "ViewerCertificate":{"shape":"ViewerCertificate"}, + "Restrictions":{"shape":"Restrictions"} + } + }, + "DistributionSummaryList":{ + "type":"list", + "member":{ + "shape":"DistributionSummary", + "locationName":"DistributionSummary" + } + }, + "ForwardedValues":{ + "type":"structure", + "required":[ + "QueryString", + "Cookies" + ], + "members":{ + "QueryString":{"shape":"boolean"}, + "Cookies":{"shape":"CookiePreference"}, + "Headers":{"shape":"Headers"} + } + }, + "GeoRestriction":{ + "type":"structure", + "required":[ + "RestrictionType", + "Quantity" + ], + "members":{ + "RestrictionType":{"shape":"GeoRestrictionType"}, + "Quantity":{"shape":"integer"}, + "Items":{"shape":"LocationList"} + } + }, + "GeoRestrictionType":{ + "type":"string", + "enum":[ + "blacklist", + "whitelist", + "none" + ] + }, + "GetCloudFrontOriginAccessIdentityConfigRequest":{ + "type":"structure", + "members":{ + "Id":{ + "shape":"string", + "location":"uri", + "locationName":"Id" + } + }, + "required":["Id"] + }, + "GetCloudFrontOriginAccessIdentityConfigResult":{ + "type":"structure", + "members":{ + "CloudFrontOriginAccessIdentityConfig":{"shape":"CloudFrontOriginAccessIdentityConfig"}, + "ETag":{ + "shape":"string", + "location":"header", + "locationName":"ETag" + } + }, + "payload":"CloudFrontOriginAccessIdentityConfig" + }, + "GetCloudFrontOriginAccessIdentityRequest":{ + "type":"structure", + "members":{ + "Id":{ + "shape":"string", + "location":"uri", + "locationName":"Id" + } + }, + "required":["Id"] + }, + "GetCloudFrontOriginAccessIdentityResult":{ + "type":"structure", + "members":{ + "CloudFrontOriginAccessIdentity":{"shape":"CloudFrontOriginAccessIdentity"}, + "ETag":{ + "shape":"string", + "location":"header", + "locationName":"ETag" + } + }, + "payload":"CloudFrontOriginAccessIdentity" + }, + "GetDistributionConfigRequest":{ + "type":"structure", + "members":{ + "Id":{ + "shape":"string", + "location":"uri", + "locationName":"Id" + } + }, + "required":["Id"] + }, + "GetDistributionConfigResult":{ + "type":"structure", + "members":{ + "DistributionConfig":{"shape":"DistributionConfig"}, + "ETag":{ + "shape":"string", + "location":"header", + "locationName":"ETag" + } + }, + "payload":"DistributionConfig" + }, + "GetDistributionRequest":{ + "type":"structure", + "members":{ + "Id":{ + "shape":"string", + "location":"uri", + "locationName":"Id" + } + }, + "required":["Id"] + }, + "GetDistributionResult":{ + "type":"structure", + "members":{ + "Distribution":{"shape":"Distribution"}, + "ETag":{ + "shape":"string", + "location":"header", + "locationName":"ETag" + } + }, + "payload":"Distribution" + }, + "GetInvalidationRequest":{ + "type":"structure", + "required":[ + "DistributionId", + "Id" + ], + "members":{ + "DistributionId":{ + "shape":"string", + "location":"uri", + "locationName":"DistributionId" + }, + "Id":{ + "shape":"string", + "location":"uri", + "locationName":"Id" + } + } + }, + "GetInvalidationResult":{ + "type":"structure", + "members":{ + "Invalidation":{"shape":"Invalidation"} + }, + "payload":"Invalidation" + }, + "GetStreamingDistributionConfigRequest":{ + "type":"structure", + "members":{ + "Id":{ + "shape":"string", + "location":"uri", + "locationName":"Id" + } + }, + "required":["Id"] + }, + "GetStreamingDistributionConfigResult":{ + "type":"structure", + "members":{ + "StreamingDistributionConfig":{"shape":"StreamingDistributionConfig"}, + "ETag":{ + "shape":"string", + "location":"header", + "locationName":"ETag" + } + }, + "payload":"StreamingDistributionConfig" + }, + "GetStreamingDistributionRequest":{ + "type":"structure", + "members":{ + "Id":{ + "shape":"string", + "location":"uri", + "locationName":"Id" + } + }, + "required":["Id"] + }, + "GetStreamingDistributionResult":{ + "type":"structure", + "members":{ + "StreamingDistribution":{"shape":"StreamingDistribution"}, + "ETag":{ + "shape":"string", + "location":"header", + "locationName":"ETag" + } + }, + "payload":"StreamingDistribution" + }, + "HeaderList":{ + "type":"list", + "member":{ + "shape":"string", + "locationName":"Name" + } + }, + "Headers":{ + "type":"structure", + "required":["Quantity"], + "members":{ + "Quantity":{"shape":"integer"}, + "Items":{"shape":"HeaderList"} + } + }, + "IllegalUpdate":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InconsistentQuantities":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidArgument":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidDefaultRootObject":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidErrorCode":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidForwardCookies":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidGeoRestrictionParameter":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidHeadersForS3Origin":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidIfMatchVersion":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidLocationCode":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidMinimumProtocolVersion":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidOrigin":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidOriginAccessIdentity":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidProtocolSettings":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidRelativePath":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidRequiredProtocol":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidResponseCode":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidTTLOrder":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidViewerCertificate":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "Invalidation":{ + "type":"structure", + "required":[ + "Id", + "Status", + "CreateTime", + "InvalidationBatch" + ], + "members":{ + "Id":{"shape":"string"}, + "Status":{"shape":"string"}, + "CreateTime":{"shape":"timestamp"}, + "InvalidationBatch":{"shape":"InvalidationBatch"} + } + }, + "InvalidationBatch":{ + "type":"structure", + "required":[ + "Paths", + "CallerReference" + ], + "members":{ + "Paths":{"shape":"Paths"}, + "CallerReference":{"shape":"string"} + } + }, + "InvalidationList":{ + "type":"structure", + "required":[ + "Marker", + "MaxItems", + "IsTruncated", + "Quantity" + ], + "members":{ + "Marker":{"shape":"string"}, + "NextMarker":{"shape":"string"}, + "MaxItems":{"shape":"integer"}, + "IsTruncated":{"shape":"boolean"}, + "Quantity":{"shape":"integer"}, + "Items":{"shape":"InvalidationSummaryList"} + } + }, + "InvalidationSummary":{ + "type":"structure", + "required":[ + "Id", + "CreateTime", + "Status" + ], + "members":{ + "Id":{"shape":"string"}, + "CreateTime":{"shape":"timestamp"}, + "Status":{"shape":"string"} + } + }, + "InvalidationSummaryList":{ + "type":"list", + "member":{ + "shape":"InvalidationSummary", + "locationName":"InvalidationSummary" + } + }, + "ItemSelection":{ + "type":"string", + "enum":[ + "none", + "whitelist", + "all" + ] + }, + "KeyPairIdList":{ + "type":"list", + "member":{ + "shape":"string", + "locationName":"KeyPairId" + } + }, + "KeyPairIds":{ + "type":"structure", + "required":["Quantity"], + "members":{ + "Quantity":{"shape":"integer"}, + "Items":{"shape":"KeyPairIdList"} + } + }, + "ListCloudFrontOriginAccessIdentitiesRequest":{ + "type":"structure", + "members":{ + "Marker":{ + "shape":"string", + "location":"querystring", + "locationName":"Marker" + }, + "MaxItems":{ + "shape":"string", + "location":"querystring", + "locationName":"MaxItems" + } + } + }, + "ListCloudFrontOriginAccessIdentitiesResult":{ + "type":"structure", + "members":{ + "CloudFrontOriginAccessIdentityList":{"shape":"CloudFrontOriginAccessIdentityList"} + }, + "payload":"CloudFrontOriginAccessIdentityList" + }, + "ListDistributionsRequest":{ + "type":"structure", + "members":{ + "Marker":{ + "shape":"string", + "location":"querystring", + "locationName":"Marker" + }, + "MaxItems":{ + "shape":"string", + "location":"querystring", + "locationName":"MaxItems" + } + } + }, + "ListDistributionsResult":{ + "type":"structure", + "members":{ + "DistributionList":{"shape":"DistributionList"} + }, + "payload":"DistributionList" + }, + "ListInvalidationsRequest":{ + "type":"structure", + "required":["DistributionId"], + "members":{ + "DistributionId":{ + "shape":"string", + "location":"uri", + "locationName":"DistributionId" + }, + "Marker":{ + "shape":"string", + "location":"querystring", + "locationName":"Marker" + }, + "MaxItems":{ + "shape":"string", + "location":"querystring", + "locationName":"MaxItems" + } + } + }, + "ListInvalidationsResult":{ + "type":"structure", + "members":{ + "InvalidationList":{"shape":"InvalidationList"} + }, + "payload":"InvalidationList" + }, + "ListStreamingDistributionsRequest":{ + "type":"structure", + "members":{ + "Marker":{ + "shape":"string", + "location":"querystring", + "locationName":"Marker" + }, + "MaxItems":{ + "shape":"string", + "location":"querystring", + "locationName":"MaxItems" + } + } + }, + "ListStreamingDistributionsResult":{ + "type":"structure", + "members":{ + "StreamingDistributionList":{"shape":"StreamingDistributionList"} + }, + "payload":"StreamingDistributionList" + }, + "LocationList":{ + "type":"list", + "member":{ + "shape":"string", + "locationName":"Location" + } + }, + "LoggingConfig":{ + "type":"structure", + "required":[ + "Enabled", + "IncludeCookies", + "Bucket", + "Prefix" + ], + "members":{ + "Enabled":{"shape":"boolean"}, + "IncludeCookies":{"shape":"boolean"}, + "Bucket":{"shape":"string"}, + "Prefix":{"shape":"string"} + } + }, + "Method":{ + "type":"string", + "enum":[ + "GET", + "HEAD", + "POST", + "PUT", + "PATCH", + "OPTIONS", + "DELETE" + ] + }, + "MethodsList":{ + "type":"list", + "member":{ + "shape":"Method", + "locationName":"Method" + } + }, + "MinimumProtocolVersion":{ + "type":"string", + "enum":[ + "SSLv3", + "TLSv1" + ] + }, + "MissingBody":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "NoSuchCloudFrontOriginAccessIdentity":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":404}, + "exception":true + }, + "NoSuchDistribution":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":404}, + "exception":true + }, + "NoSuchInvalidation":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":404}, + "exception":true + }, + "NoSuchOrigin":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":404}, + "exception":true + }, + "NoSuchStreamingDistribution":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":404}, + "exception":true + }, + "Origin":{ + "type":"structure", + "required":[ + "Id", + "DomainName" + ], + "members":{ + "Id":{"shape":"string"}, + "DomainName":{"shape":"string"}, + "OriginPath":{"shape":"string"}, + "S3OriginConfig":{"shape":"S3OriginConfig"}, + "CustomOriginConfig":{"shape":"CustomOriginConfig"} + } + }, + "OriginList":{ + "type":"list", + "member":{ + "shape":"Origin", + "locationName":"Origin" + }, + "min":1 + }, + "OriginProtocolPolicy":{ + "type":"string", + "enum":[ + "http-only", + "match-viewer" + ] + }, + "Origins":{ + "type":"structure", + "required":["Quantity"], + "members":{ + "Quantity":{"shape":"integer"}, + "Items":{"shape":"OriginList"} + } + }, + "PathList":{ + "type":"list", + "member":{ + "shape":"string", + "locationName":"Path" + } + }, + "Paths":{ + "type":"structure", + "required":["Quantity"], + "members":{ + "Quantity":{"shape":"integer"}, + "Items":{"shape":"PathList"} + } + }, + "PreconditionFailed":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":412}, + "exception":true + }, + "PriceClass":{ + "type":"string", + "enum":[ + "PriceClass_100", + "PriceClass_200", + "PriceClass_All" + ] + }, + "Restrictions":{ + "type":"structure", + "required":["GeoRestriction"], + "members":{ + "GeoRestriction":{"shape":"GeoRestriction"} + } + }, + "S3Origin":{ + "type":"structure", + "required":[ + "DomainName", + "OriginAccessIdentity" + ], + "members":{ + "DomainName":{"shape":"string"}, + "OriginAccessIdentity":{"shape":"string"} + } + }, + "S3OriginConfig":{ + "type":"structure", + "required":["OriginAccessIdentity"], + "members":{ + "OriginAccessIdentity":{"shape":"string"} + } + }, + "SSLSupportMethod":{ + "type":"string", + "enum":[ + "sni-only", + "vip" + ] + }, + "Signer":{ + "type":"structure", + "members":{ + "AwsAccountNumber":{"shape":"string"}, + "KeyPairIds":{"shape":"KeyPairIds"} + } + }, + "SignerList":{ + "type":"list", + "member":{ + "shape":"Signer", + "locationName":"Signer" + } + }, + "StreamingDistribution":{ + "type":"structure", + "required":[ + "Id", + "Status", + "DomainName", + "ActiveTrustedSigners", + "StreamingDistributionConfig" + ], + "members":{ + "Id":{"shape":"string"}, + "Status":{"shape":"string"}, + "LastModifiedTime":{"shape":"timestamp"}, + "DomainName":{"shape":"string"}, + "ActiveTrustedSigners":{"shape":"ActiveTrustedSigners"}, + "StreamingDistributionConfig":{"shape":"StreamingDistributionConfig"} + } + }, + "StreamingDistributionAlreadyExists":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":409}, + "exception":true + }, + "StreamingDistributionConfig":{ + "type":"structure", + "required":[ + "CallerReference", + "S3Origin", + "Comment", + "TrustedSigners", + "Enabled" + ], + "members":{ + "CallerReference":{"shape":"string"}, + "S3Origin":{"shape":"S3Origin"}, + "Aliases":{"shape":"Aliases"}, + "Comment":{"shape":"string"}, + "Logging":{"shape":"StreamingLoggingConfig"}, + "TrustedSigners":{"shape":"TrustedSigners"}, + "PriceClass":{"shape":"PriceClass"}, + "Enabled":{"shape":"boolean"} + } + }, + "StreamingDistributionList":{ + "type":"structure", + "required":[ + "Marker", + "MaxItems", + "IsTruncated", + "Quantity" + ], + "members":{ + "Marker":{"shape":"string"}, + "NextMarker":{"shape":"string"}, + "MaxItems":{"shape":"integer"}, + "IsTruncated":{"shape":"boolean"}, + "Quantity":{"shape":"integer"}, + "Items":{"shape":"StreamingDistributionSummaryList"} + } + }, + "StreamingDistributionNotDisabled":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":409}, + "exception":true + }, + "StreamingDistributionSummary":{ + "type":"structure", + "required":[ + "Id", + "Status", + "LastModifiedTime", + "DomainName", + "S3Origin", + "Aliases", + "TrustedSigners", + "Comment", + "PriceClass", + "Enabled" + ], + "members":{ + "Id":{"shape":"string"}, + "Status":{"shape":"string"}, + "LastModifiedTime":{"shape":"timestamp"}, + "DomainName":{"shape":"string"}, + "S3Origin":{"shape":"S3Origin"}, + "Aliases":{"shape":"Aliases"}, + "TrustedSigners":{"shape":"TrustedSigners"}, + "Comment":{"shape":"string"}, + "PriceClass":{"shape":"PriceClass"}, + "Enabled":{"shape":"boolean"} + } + }, + "StreamingDistributionSummaryList":{ + "type":"list", + "member":{ + "shape":"StreamingDistributionSummary", + "locationName":"StreamingDistributionSummary" + } + }, + "StreamingLoggingConfig":{ + "type":"structure", + "required":[ + "Enabled", + "Bucket", + "Prefix" + ], + "members":{ + "Enabled":{"shape":"boolean"}, + "Bucket":{"shape":"string"}, + "Prefix":{"shape":"string"} + } + }, + "TooManyCacheBehaviors":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "TooManyCertificates":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "TooManyCloudFrontOriginAccessIdentities":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "TooManyCookieNamesInWhiteList":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "TooManyDistributionCNAMEs":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "TooManyDistributions":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "TooManyHeadersInForwardedValues":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "TooManyInvalidationsInProgress":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "TooManyOrigins":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "TooManyStreamingDistributionCNAMEs":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "TooManyStreamingDistributions":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "TooManyTrustedSigners":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "TrustedSignerDoesNotExist":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "TrustedSigners":{ + "type":"structure", + "required":[ + "Enabled", + "Quantity" + ], + "members":{ + "Enabled":{"shape":"boolean"}, + "Quantity":{"shape":"integer"}, + "Items":{"shape":"AwsAccountNumberList"} + } + }, + "UpdateCloudFrontOriginAccessIdentityRequest":{ + "type":"structure", + "required":[ + "CloudFrontOriginAccessIdentityConfig", + "Id" + ], + "members":{ + "CloudFrontOriginAccessIdentityConfig":{ + "shape":"CloudFrontOriginAccessIdentityConfig", + "xmlNamespace":{"uri":"http://cloudfront.amazonaws.com/doc/2015-04-17/"}, + "locationName":"CloudFrontOriginAccessIdentityConfig" + }, + "Id":{ + "shape":"string", + "location":"uri", + "locationName":"Id" + }, + "IfMatch":{ + "shape":"string", + "location":"header", + "locationName":"If-Match" + } + }, + "payload":"CloudFrontOriginAccessIdentityConfig" + }, + "UpdateCloudFrontOriginAccessIdentityResult":{ + "type":"structure", + "members":{ + "CloudFrontOriginAccessIdentity":{"shape":"CloudFrontOriginAccessIdentity"}, + "ETag":{ + "shape":"string", + "location":"header", + "locationName":"ETag" + } + }, + "payload":"CloudFrontOriginAccessIdentity" + }, + "UpdateDistributionRequest":{ + "type":"structure", + "required":[ + "DistributionConfig", + "Id" + ], + "members":{ + "DistributionConfig":{ + "shape":"DistributionConfig", + "xmlNamespace":{"uri":"http://cloudfront.amazonaws.com/doc/2015-04-17/"}, + "locationName":"DistributionConfig" + }, + "Id":{ + "shape":"string", + "location":"uri", + "locationName":"Id" + }, + "IfMatch":{ + "shape":"string", + "location":"header", + "locationName":"If-Match" + } + }, + "payload":"DistributionConfig" + }, + "UpdateDistributionResult":{ + "type":"structure", + "members":{ + "Distribution":{"shape":"Distribution"}, + "ETag":{ + "shape":"string", + "location":"header", + "locationName":"ETag" + } + }, + "payload":"Distribution" + }, + "UpdateStreamingDistributionRequest":{ + "type":"structure", + "required":[ + "StreamingDistributionConfig", + "Id" + ], + "members":{ + "StreamingDistributionConfig":{ + "shape":"StreamingDistributionConfig", + "xmlNamespace":{"uri":"http://cloudfront.amazonaws.com/doc/2015-04-17/"}, + "locationName":"StreamingDistributionConfig" + }, + "Id":{ + "shape":"string", + "location":"uri", + "locationName":"Id" + }, + "IfMatch":{ + "shape":"string", + "location":"header", + "locationName":"If-Match" + } + }, + "payload":"StreamingDistributionConfig" + }, + "UpdateStreamingDistributionResult":{ + "type":"structure", + "members":{ + "StreamingDistribution":{"shape":"StreamingDistribution"}, + "ETag":{ + "shape":"string", + "location":"header", + "locationName":"ETag" + } + }, + "payload":"StreamingDistribution" + }, + "ViewerCertificate":{ + "type":"structure", + "members":{ + "IAMCertificateId":{"shape":"string"}, + "CloudFrontDefaultCertificate":{"shape":"boolean"}, + "SSLSupportMethod":{"shape":"SSLSupportMethod"}, + "MinimumProtocolVersion":{"shape":"MinimumProtocolVersion"} + } + }, + "ViewerProtocolPolicy":{ + "type":"string", + "enum":[ + "allow-all", + "https-only", + "redirect-to-https" + ] + }, + "boolean":{"type":"boolean"}, + "integer":{"type":"integer"}, + "long":{"type":"long"}, + "string":{"type":"string"}, + "timestamp":{"type":"timestamp"} + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/cloudfront/2015-04-17/docs-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/cloudfront/2015-04-17/docs-2.json new file mode 100644 index 000000000..bd2eb39f1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/cloudfront/2015-04-17/docs-2.json @@ -0,0 +1,1141 @@ +{ + "version": "2.0", + "operations": { + "CreateCloudFrontOriginAccessIdentity": "Create a new origin access identity.", + "CreateDistribution": "Create a new distribution.", + "CreateInvalidation": "Create a new invalidation.", + "CreateStreamingDistribution": "Create a new streaming distribution.", + "DeleteCloudFrontOriginAccessIdentity": "Delete an origin access identity.", + "DeleteDistribution": "Delete a distribution.", + "DeleteStreamingDistribution": "Delete a streaming distribution.", + "GetCloudFrontOriginAccessIdentity": "Get the information about an origin access identity.", + "GetCloudFrontOriginAccessIdentityConfig": "Get the configuration information about an origin access identity.", + "GetDistribution": "Get the information about a distribution.", + "GetDistributionConfig": "Get the configuration information about a distribution.", + "GetInvalidation": "Get the information about an invalidation.", + "GetStreamingDistribution": "Get the information about a streaming distribution.", + "GetStreamingDistributionConfig": "Get the configuration information about a streaming distribution.", + "ListCloudFrontOriginAccessIdentities": "List origin access identities.", + "ListDistributions": "List distributions.", + "ListInvalidations": "List invalidation batches.", + "ListStreamingDistributions": "List streaming distributions.", + "UpdateCloudFrontOriginAccessIdentity": "Update an origin access identity.", + "UpdateDistribution": "Update a distribution.", + "UpdateStreamingDistribution": "Update a streaming distribution." + }, + "service": null, + "shapes": { + "AccessDenied": { + "base": "Access denied.", + "refs": { + } + }, + "ActiveTrustedSigners": { + "base": "A complex type that lists the AWS accounts, if any, that you included in the TrustedSigners complex type for the default cache behavior or for any of the other cache behaviors for this distribution. These are accounts that you want to allow to create signed URLs for private content.", + "refs": { + "Distribution$ActiveTrustedSigners": "CloudFront automatically adds this element to the response only if you've set up the distribution to serve private content with signed URLs. The element lists the key pair IDs that CloudFront is aware of for each trusted signer. The Signer child element lists the AWS account number of the trusted signer (or an empty Self element if the signer is you). The Signer element also includes the IDs of any active key pairs associated with the trusted signer's AWS account. If no KeyPairId element appears for a Signer, that signer can't create working signed URLs.", + "StreamingDistribution$ActiveTrustedSigners": "CloudFront automatically adds this element to the response only if you've set up the distribution to serve private content with signed URLs. The element lists the key pair IDs that CloudFront is aware of for each trusted signer. The Signer child element lists the AWS account number of the trusted signer (or an empty Self element if the signer is you). The Signer element also includes the IDs of any active key pairs associated with the trusted signer's AWS account. If no KeyPairId element appears for a Signer, that signer can't create working signed URLs." + } + }, + "AliasList": { + "base": null, + "refs": { + "Aliases$Items": "Optional: A complex type that contains CNAME elements, if any, for this distribution. If Quantity is 0, you can omit Items." + } + }, + "Aliases": { + "base": "A complex type that contains information about CNAMEs (alternate domain names), if any, for this distribution.", + "refs": { + "DistributionConfig$Aliases": "A complex type that contains information about CNAMEs (alternate domain names), if any, for this distribution.", + "DistributionSummary$Aliases": "A complex type that contains information about CNAMEs (alternate domain names), if any, for this distribution.", + "StreamingDistributionConfig$Aliases": "A complex type that contains information about CNAMEs (alternate domain names), if any, for this streaming distribution.", + "StreamingDistributionSummary$Aliases": "A complex type that contains information about CNAMEs (alternate domain names), if any, for this streaming distribution." + } + }, + "AllowedMethods": { + "base": "A complex type that controls which HTTP methods CloudFront processes and forwards to your Amazon S3 bucket or your custom origin. There are three choices: - CloudFront forwards only GET and HEAD requests. - CloudFront forwards only GET, HEAD and OPTIONS requests. - CloudFront forwards GET, HEAD, OPTIONS, PUT, PATCH, POST, and DELETE requests. If you pick the third choice, you may need to restrict access to your Amazon S3 bucket or to your custom origin so users can't perform operations that you don't want them to. For example, you may not want users to have permission to delete objects from your origin.", + "refs": { + "CacheBehavior$AllowedMethods": null, + "DefaultCacheBehavior$AllowedMethods": null + } + }, + "AwsAccountNumberList": { + "base": null, + "refs": { + "TrustedSigners$Items": "Optional: A complex type that contains trusted signers for this cache behavior. If Quantity is 0, you can omit Items." + } + }, + "BatchTooLarge": { + "base": null, + "refs": { + } + }, + "CNAMEAlreadyExists": { + "base": null, + "refs": { + } + }, + "CacheBehavior": { + "base": "A complex type that describes how CloudFront processes requests. You can create up to 10 cache behaviors.You must create at least as many cache behaviors (including the default cache behavior) as you have origins if you want CloudFront to distribute objects from all of the origins. Each cache behavior specifies the one origin from which you want CloudFront to get objects. If you have two origins and only the default cache behavior, the default cache behavior will cause CloudFront to get objects from one of the origins, but the other origin will never be used. If you don't want to specify any cache behaviors, include only an empty CacheBehaviors element. Don't include an empty CacheBehavior element, or CloudFront returns a MalformedXML error. To delete all cache behaviors in an existing distribution, update the distribution configuration and include only an empty CacheBehaviors element. To add, change, or remove one or more cache behaviors, update the distribution configuration and specify all of the cache behaviors that you want to include in the updated distribution.", + "refs": { + "CacheBehaviorList$member": null + } + }, + "CacheBehaviorList": { + "base": null, + "refs": { + "CacheBehaviors$Items": "Optional: A complex type that contains cache behaviors for this distribution. If Quantity is 0, you can omit Items." + } + }, + "CacheBehaviors": { + "base": "A complex type that contains zero or more CacheBehavior elements.", + "refs": { + "DistributionConfig$CacheBehaviors": "A complex type that contains zero or more CacheBehavior elements.", + "DistributionSummary$CacheBehaviors": "A complex type that contains zero or more CacheBehavior elements." + } + }, + "CachedMethods": { + "base": "A complex type that controls whether CloudFront caches the response to requests using the specified HTTP methods. There are two choices: - CloudFront caches responses to GET and HEAD requests. - CloudFront caches responses to GET, HEAD, and OPTIONS requests. If you pick the second choice for your S3 Origin, you may need to forward Access-Control-Request-Method, Access-Control-Request-Headers and Origin headers for the responses to be cached correctly.", + "refs": { + "AllowedMethods$CachedMethods": null + } + }, + "CloudFrontOriginAccessIdentity": { + "base": "CloudFront origin access identity.", + "refs": { + "CreateCloudFrontOriginAccessIdentityResult$CloudFrontOriginAccessIdentity": "The origin access identity's information.", + "GetCloudFrontOriginAccessIdentityResult$CloudFrontOriginAccessIdentity": "The origin access identity's information.", + "UpdateCloudFrontOriginAccessIdentityResult$CloudFrontOriginAccessIdentity": "The origin access identity's information." + } + }, + "CloudFrontOriginAccessIdentityAlreadyExists": { + "base": "If the CallerReference is a value you already sent in a previous request to create an identity but the content of the CloudFrontOriginAccessIdentityConfig is different from the original request, CloudFront returns a CloudFrontOriginAccessIdentityAlreadyExists error.", + "refs": { + } + }, + "CloudFrontOriginAccessIdentityConfig": { + "base": "Origin access identity configuration.", + "refs": { + "CloudFrontOriginAccessIdentity$CloudFrontOriginAccessIdentityConfig": "The current configuration information for the identity.", + "CreateCloudFrontOriginAccessIdentityRequest$CloudFrontOriginAccessIdentityConfig": "The origin access identity's configuration information.", + "GetCloudFrontOriginAccessIdentityConfigResult$CloudFrontOriginAccessIdentityConfig": "The origin access identity's configuration information.", + "UpdateCloudFrontOriginAccessIdentityRequest$CloudFrontOriginAccessIdentityConfig": "The identity's configuration information." + } + }, + "CloudFrontOriginAccessIdentityInUse": { + "base": null, + "refs": { + } + }, + "CloudFrontOriginAccessIdentityList": { + "base": "The CloudFrontOriginAccessIdentityList type.", + "refs": { + "ListCloudFrontOriginAccessIdentitiesResult$CloudFrontOriginAccessIdentityList": "The CloudFrontOriginAccessIdentityList type." + } + }, + "CloudFrontOriginAccessIdentitySummary": { + "base": "Summary of the information about a CloudFront origin access identity.", + "refs": { + "CloudFrontOriginAccessIdentitySummaryList$member": null + } + }, + "CloudFrontOriginAccessIdentitySummaryList": { + "base": null, + "refs": { + "CloudFrontOriginAccessIdentityList$Items": "A complex type that contains one CloudFrontOriginAccessIdentitySummary element for each origin access identity that was created by the current AWS account." + } + }, + "CookieNameList": { + "base": null, + "refs": { + "CookieNames$Items": "Optional: A complex type that contains whitelisted cookies for this cache behavior. If Quantity is 0, you can omit Items." + } + }, + "CookieNames": { + "base": "A complex type that specifies the whitelisted cookies, if any, that you want CloudFront to forward to your origin that is associated with this cache behavior.", + "refs": { + "CookiePreference$WhitelistedNames": "A complex type that specifies the whitelisted cookies, if any, that you want CloudFront to forward to your origin that is associated with this cache behavior." + } + }, + "CookiePreference": { + "base": "A complex type that specifies the cookie preferences associated with this cache behavior.", + "refs": { + "ForwardedValues$Cookies": "A complex type that specifies how CloudFront handles cookies." + } + }, + "CreateCloudFrontOriginAccessIdentityRequest": { + "base": "The request to create a new origin access identity.", + "refs": { + } + }, + "CreateCloudFrontOriginAccessIdentityResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "CreateDistributionRequest": { + "base": "The request to create a new distribution.", + "refs": { + } + }, + "CreateDistributionResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "CreateInvalidationRequest": { + "base": "The request to create an invalidation.", + "refs": { + } + }, + "CreateInvalidationResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "CreateStreamingDistributionRequest": { + "base": "The request to create a new streaming distribution.", + "refs": { + } + }, + "CreateStreamingDistributionResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "CustomErrorResponse": { + "base": "A complex type that describes how you'd prefer CloudFront to respond to requests that result in either a 4xx or 5xx response. You can control whether a custom error page should be displayed, what the desired response code should be for this error page and how long should the error response be cached by CloudFront. If you don't want to specify any custom error responses, include only an empty CustomErrorResponses element. To delete all custom error responses in an existing distribution, update the distribution configuration and include only an empty CustomErrorResponses element. To add, change, or remove one or more custom error responses, update the distribution configuration and specify all of the custom error responses that you want to include in the updated distribution.", + "refs": { + "CustomErrorResponseList$member": null + } + }, + "CustomErrorResponseList": { + "base": null, + "refs": { + "CustomErrorResponses$Items": "Optional: A complex type that contains custom error responses for this distribution. If Quantity is 0, you can omit Items." + } + }, + "CustomErrorResponses": { + "base": "A complex type that contains zero or more CustomErrorResponse elements.", + "refs": { + "DistributionConfig$CustomErrorResponses": "A complex type that contains zero or more CustomErrorResponse elements.", + "DistributionSummary$CustomErrorResponses": "A complex type that contains zero or more CustomErrorResponses elements." + } + }, + "CustomOriginConfig": { + "base": "A customer origin.", + "refs": { + "Origin$CustomOriginConfig": "A complex type that contains information about a custom origin. If the origin is an Amazon S3 bucket, use the S3OriginConfig element instead." + } + }, + "DefaultCacheBehavior": { + "base": "A complex type that describes the default cache behavior if you do not specify a CacheBehavior element or if files don't match any of the values of PathPattern in CacheBehavior elements.You must create exactly one default cache behavior.", + "refs": { + "DistributionConfig$DefaultCacheBehavior": "A complex type that describes the default cache behavior if you do not specify a CacheBehavior element or if files don't match any of the values of PathPattern in CacheBehavior elements.You must create exactly one default cache behavior.", + "DistributionSummary$DefaultCacheBehavior": "A complex type that describes the default cache behavior if you do not specify a CacheBehavior element or if files don't match any of the values of PathPattern in CacheBehavior elements.You must create exactly one default cache behavior." + } + }, + "DeleteCloudFrontOriginAccessIdentityRequest": { + "base": "The request to delete a origin access identity.", + "refs": { + } + }, + "DeleteDistributionRequest": { + "base": "The request to delete a distribution.", + "refs": { + } + }, + "DeleteStreamingDistributionRequest": { + "base": "The request to delete a streaming distribution.", + "refs": { + } + }, + "Distribution": { + "base": "A distribution.", + "refs": { + "CreateDistributionResult$Distribution": "The distribution's information.", + "GetDistributionResult$Distribution": "The distribution's information.", + "UpdateDistributionResult$Distribution": "The distribution's information." + } + }, + "DistributionAlreadyExists": { + "base": "The caller reference you attempted to create the distribution with is associated with another distribution.", + "refs": { + } + }, + "DistributionConfig": { + "base": "A distribution Configuration.", + "refs": { + "CreateDistributionRequest$DistributionConfig": "The distribution's configuration information.", + "Distribution$DistributionConfig": "The current configuration information for the distribution.", + "GetDistributionConfigResult$DistributionConfig": "The distribution's configuration information.", + "UpdateDistributionRequest$DistributionConfig": "The distribution's configuration information." + } + }, + "DistributionList": { + "base": "A distribution list.", + "refs": { + "ListDistributionsResult$DistributionList": "The DistributionList type." + } + }, + "DistributionNotDisabled": { + "base": null, + "refs": { + } + }, + "DistributionSummary": { + "base": "A summary of the information for an Amazon CloudFront distribution.", + "refs": { + "DistributionSummaryList$member": null + } + }, + "DistributionSummaryList": { + "base": null, + "refs": { + "DistributionList$Items": "A complex type that contains one DistributionSummary element for each distribution that was created by the current AWS account." + } + }, + "ForwardedValues": { + "base": "A complex type that specifies how CloudFront handles query strings, cookies and headers.", + "refs": { + "CacheBehavior$ForwardedValues": "A complex type that specifies how CloudFront handles query strings, cookies and headers.", + "DefaultCacheBehavior$ForwardedValues": "A complex type that specifies how CloudFront handles query strings, cookies and headers." + } + }, + "GeoRestriction": { + "base": "A complex type that controls the countries in which your content is distributed. For more information about geo restriction, go to Customizing Error Responses in the Amazon CloudFront Developer Guide. CloudFront determines the location of your users using MaxMind GeoIP databases. For information about the accuracy of these databases, see How accurate are your GeoIP databases? on the MaxMind website.", + "refs": { + "Restrictions$GeoRestriction": null + } + }, + "GeoRestrictionType": { + "base": null, + "refs": { + "GeoRestriction$RestrictionType": "The method that you want to use to restrict distribution of your content by country: - none: No geo restriction is enabled, meaning access to content is not restricted by client geo location. - blacklist: The Location elements specify the countries in which you do not want CloudFront to distribute your content. - whitelist: The Location elements specify the countries in which you want CloudFront to distribute your content." + } + }, + "GetCloudFrontOriginAccessIdentityConfigRequest": { + "base": "The request to get an origin access identity's configuration.", + "refs": { + } + }, + "GetCloudFrontOriginAccessIdentityConfigResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "GetCloudFrontOriginAccessIdentityRequest": { + "base": "The request to get an origin access identity's information.", + "refs": { + } + }, + "GetCloudFrontOriginAccessIdentityResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "GetDistributionConfigRequest": { + "base": "The request to get a distribution configuration.", + "refs": { + } + }, + "GetDistributionConfigResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "GetDistributionRequest": { + "base": "The request to get a distribution's information.", + "refs": { + } + }, + "GetDistributionResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "GetInvalidationRequest": { + "base": "The request to get an invalidation's information.", + "refs": { + } + }, + "GetInvalidationResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "GetStreamingDistributionConfigRequest": { + "base": "To request to get a streaming distribution configuration.", + "refs": { + } + }, + "GetStreamingDistributionConfigResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "GetStreamingDistributionRequest": { + "base": "The request to get a streaming distribution's information.", + "refs": { + } + }, + "GetStreamingDistributionResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "HeaderList": { + "base": null, + "refs": { + "Headers$Items": "Optional: A complex type that contains a Name element for each header that you want CloudFront to forward to the origin and to vary on for this cache behavior. If Quantity is 0, omit Items." + } + }, + "Headers": { + "base": "A complex type that specifies the headers that you want CloudFront to forward to the origin for this cache behavior. For the headers that you specify, CloudFront also caches separate versions of a given object based on the header values in viewer requests; this is known as varying on headers. For example, suppose viewer requests for logo.jpg contain a custom Product header that has a value of either Acme or Apex, and you configure CloudFront to vary on the Product header. CloudFront forwards the Product header to the origin and caches the response from the origin once for each header value.", + "refs": { + "ForwardedValues$Headers": "A complex type that specifies the Headers, if any, that you want CloudFront to vary upon for this cache behavior." + } + }, + "IllegalUpdate": { + "base": "Origin and CallerReference cannot be updated.", + "refs": { + } + }, + "InconsistentQuantities": { + "base": "The value of Quantity and the size of Items do not match.", + "refs": { + } + }, + "InvalidArgument": { + "base": "The argument is invalid.", + "refs": { + } + }, + "InvalidDefaultRootObject": { + "base": "The default root object file name is too big or contains an invalid character.", + "refs": { + } + }, + "InvalidErrorCode": { + "base": null, + "refs": { + } + }, + "InvalidForwardCookies": { + "base": "Your request contains forward cookies option which doesn't match with the expectation for the whitelisted list of cookie names. Either list of cookie names has been specified when not allowed or list of cookie names is missing when expected.", + "refs": { + } + }, + "InvalidGeoRestrictionParameter": { + "base": null, + "refs": { + } + }, + "InvalidHeadersForS3Origin": { + "base": null, + "refs": { + } + }, + "InvalidIfMatchVersion": { + "base": "The If-Match version is missing or not valid for the distribution.", + "refs": { + } + }, + "InvalidLocationCode": { + "base": null, + "refs": { + } + }, + "InvalidMinimumProtocolVersion": { + "base": null, + "refs": { + } + }, + "InvalidOrigin": { + "base": "The Amazon S3 origin server specified does not refer to a valid Amazon S3 bucket.", + "refs": { + } + }, + "InvalidOriginAccessIdentity": { + "base": "The origin access identity is not valid or doesn't exist.", + "refs": { + } + }, + "InvalidProtocolSettings": { + "base": "You cannot specify SSLv3 as the minimum protocol version if you only want to support only clients that Support Server Name Indication (SNI).", + "refs": { + } + }, + "InvalidRelativePath": { + "base": "The relative path is too big, is not URL-encoded, or does not begin with a slash (/).", + "refs": { + } + }, + "InvalidRequiredProtocol": { + "base": "This operation requires the HTTPS protocol. Ensure that you specify the HTTPS protocol in your request, or omit the RequiredProtocols element from your distribution configuration.", + "refs": { + } + }, + "InvalidResponseCode": { + "base": null, + "refs": { + } + }, + "InvalidTTLOrder": { + "base": null, + "refs": { + } + }, + "InvalidViewerCertificate": { + "base": null, + "refs": { + } + }, + "Invalidation": { + "base": "An invalidation.", + "refs": { + "CreateInvalidationResult$Invalidation": "The invalidation's information.", + "GetInvalidationResult$Invalidation": "The invalidation's information." + } + }, + "InvalidationBatch": { + "base": "An invalidation batch.", + "refs": { + "CreateInvalidationRequest$InvalidationBatch": "The batch information for the invalidation.", + "Invalidation$InvalidationBatch": "The current invalidation information for the batch request." + } + }, + "InvalidationList": { + "base": "An invalidation list.", + "refs": { + "ListInvalidationsResult$InvalidationList": "Information about invalidation batches." + } + }, + "InvalidationSummary": { + "base": "Summary of an invalidation request.", + "refs": { + "InvalidationSummaryList$member": null + } + }, + "InvalidationSummaryList": { + "base": null, + "refs": { + "InvalidationList$Items": "A complex type that contains one InvalidationSummary element for each invalidation batch that was created by the current AWS account." + } + }, + "ItemSelection": { + "base": null, + "refs": { + "CookiePreference$Forward": "Use this element to specify whether you want CloudFront to forward cookies to the origin that is associated with this cache behavior. You can specify all, none or whitelist. If you choose All, CloudFront forwards all cookies regardless of how many your application uses." + } + }, + "KeyPairIdList": { + "base": null, + "refs": { + "KeyPairIds$Items": "A complex type that lists the active CloudFront key pairs, if any, that are associated with AwsAccountNumber." + } + }, + "KeyPairIds": { + "base": "A complex type that lists the active CloudFront key pairs, if any, that are associated with AwsAccountNumber.", + "refs": { + "Signer$KeyPairIds": "A complex type that lists the active CloudFront key pairs, if any, that are associated with AwsAccountNumber." + } + }, + "ListCloudFrontOriginAccessIdentitiesRequest": { + "base": "The request to list origin access identities.", + "refs": { + } + }, + "ListCloudFrontOriginAccessIdentitiesResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "ListDistributionsRequest": { + "base": "The request to list your distributions.", + "refs": { + } + }, + "ListDistributionsResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "ListInvalidationsRequest": { + "base": "The request to list invalidations.", + "refs": { + } + }, + "ListInvalidationsResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "ListStreamingDistributionsRequest": { + "base": "The request to list your streaming distributions.", + "refs": { + } + }, + "ListStreamingDistributionsResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "LocationList": { + "base": null, + "refs": { + "GeoRestriction$Items": "A complex type that contains a Location element for each country in which you want CloudFront either to distribute your content (whitelist) or not distribute your content (blacklist). The Location element is a two-letter, uppercase country code for a country that you want to include in your blacklist or whitelist. Include one Location element for each country. CloudFront and MaxMind both use ISO 3166 country codes. For the current list of countries and the corresponding codes, see ISO 3166-1-alpha-2 code on the International Organization for Standardization website. You can also refer to the country list in the CloudFront console, which includes both country names and codes." + } + }, + "LoggingConfig": { + "base": "A complex type that controls whether access logs are written for the distribution.", + "refs": { + "DistributionConfig$Logging": "A complex type that controls whether access logs are written for the distribution." + } + }, + "Method": { + "base": null, + "refs": { + "MethodsList$member": null + } + }, + "MethodsList": { + "base": null, + "refs": { + "AllowedMethods$Items": "A complex type that contains the HTTP methods that you want CloudFront to process and forward to your origin.", + "CachedMethods$Items": "A complex type that contains the HTTP methods that you want CloudFront to cache responses to." + } + }, + "MinimumProtocolVersion": { + "base": null, + "refs": { + "ViewerCertificate$MinimumProtocolVersion": "Specify the minimum version of the SSL protocol that you want CloudFront to use, SSLv3 or TLSv1, for HTTPS connections. CloudFront will serve your objects only to browsers or devices that support at least the SSL version that you specify. The TLSv1 protocol is more secure, so we recommend that you specify SSLv3 only if your users are using browsers or devices that don't support TLSv1. If you're using a custom certificate (if you specify a value for IAMCertificateId) and if you're using dedicated IP (if you specify vip for SSLSupportMethod), you can choose SSLv3 or TLSv1 as the MinimumProtocolVersion. If you're using a custom certificate (if you specify a value for IAMCertificateId) and if you're using SNI (if you specify sni-only for SSLSupportMethod), you must specify TLSv1 for MinimumProtocolVersion." + } + }, + "MissingBody": { + "base": "This operation requires a body. Ensure that the body is present and the Content-Type header is set.", + "refs": { + } + }, + "NoSuchCloudFrontOriginAccessIdentity": { + "base": "The specified origin access identity does not exist.", + "refs": { + } + }, + "NoSuchDistribution": { + "base": "The specified distribution does not exist.", + "refs": { + } + }, + "NoSuchInvalidation": { + "base": "The specified invalidation does not exist.", + "refs": { + } + }, + "NoSuchOrigin": { + "base": "No origin exists with the specified Origin Id.", + "refs": { + } + }, + "NoSuchStreamingDistribution": { + "base": "The specified streaming distribution does not exist.", + "refs": { + } + }, + "Origin": { + "base": "A complex type that describes the Amazon S3 bucket or the HTTP server (for example, a web server) from which CloudFront gets your files.You must create at least one origin.", + "refs": { + "OriginList$member": null + } + }, + "OriginList": { + "base": null, + "refs": { + "Origins$Items": "A complex type that contains origins for this distribution." + } + }, + "OriginProtocolPolicy": { + "base": null, + "refs": { + "CustomOriginConfig$OriginProtocolPolicy": "The origin protocol policy to apply to your origin." + } + }, + "Origins": { + "base": "A complex type that contains information about origins for this distribution.", + "refs": { + "DistributionConfig$Origins": "A complex type that contains information about origins for this distribution.", + "DistributionSummary$Origins": "A complex type that contains information about origins for this distribution." + } + }, + "PathList": { + "base": null, + "refs": { + "Paths$Items": "A complex type that contains a list of the objects that you want to invalidate." + } + }, + "Paths": { + "base": "A complex type that contains information about the objects that you want to invalidate.", + "refs": { + "InvalidationBatch$Paths": "The path of the object to invalidate. The path is relative to the distribution and must begin with a slash (/). You must enclose each invalidation object with the Path element tags. If the path includes non-ASCII characters or unsafe characters as defined in RFC 1783 (http://www.ietf.org/rfc/rfc1738.txt), URL encode those characters. Do not URL encode any other characters in the path, or CloudFront will not invalidate the old version of the updated object." + } + }, + "PreconditionFailed": { + "base": "The precondition given in one or more of the request-header fields evaluated to false.", + "refs": { + } + }, + "PriceClass": { + "base": null, + "refs": { + "DistributionConfig$PriceClass": "A complex type that contains information about price class for this distribution.", + "DistributionSummary$PriceClass": null, + "StreamingDistributionConfig$PriceClass": "A complex type that contains information about price class for this streaming distribution.", + "StreamingDistributionSummary$PriceClass": null + } + }, + "Restrictions": { + "base": "A complex type that identifies ways in which you want to restrict distribution of your content.", + "refs": { + "DistributionConfig$Restrictions": null, + "DistributionSummary$Restrictions": null + } + }, + "S3Origin": { + "base": "A complex type that contains information about the Amazon S3 bucket from which you want CloudFront to get your media files for distribution.", + "refs": { + "StreamingDistributionConfig$S3Origin": "A complex type that contains information about the Amazon S3 bucket from which you want CloudFront to get your media files for distribution.", + "StreamingDistributionSummary$S3Origin": "A complex type that contains information about the Amazon S3 bucket from which you want CloudFront to get your media files for distribution." + } + }, + "S3OriginConfig": { + "base": "A complex type that contains information about the Amazon S3 origin. If the origin is a custom origin, use the CustomOriginConfig element instead.", + "refs": { + "Origin$S3OriginConfig": "A complex type that contains information about the Amazon S3 origin. If the origin is a custom origin, use the CustomOriginConfig element instead." + } + }, + "SSLSupportMethod": { + "base": null, + "refs": { + "ViewerCertificate$SSLSupportMethod": "If you specify a value for IAMCertificateId, you must also specify how you want CloudFront to serve HTTPS requests. Valid values are vip and sni-only. If you specify vip, CloudFront uses dedicated IP addresses for your content and can respond to HTTPS requests from any viewer. However, you must request permission to use this feature, and you incur additional monthly charges. If you specify sni-only, CloudFront can only respond to HTTPS requests from viewers that support Server Name Indication (SNI). All modern browsers support SNI, but some browsers still in use don't support SNI. Do not specify a value for SSLSupportMethod if you specified true for CloudFrontDefaultCertificate." + } + }, + "Signer": { + "base": "A complex type that lists the AWS accounts that were included in the TrustedSigners complex type, as well as their active CloudFront key pair IDs, if any.", + "refs": { + "SignerList$member": null + } + }, + "SignerList": { + "base": null, + "refs": { + "ActiveTrustedSigners$Items": "A complex type that contains one Signer complex type for each unique trusted signer that is specified in the TrustedSigners complex type, including trusted signers in the default cache behavior and in all of the other cache behaviors." + } + }, + "StreamingDistribution": { + "base": "A streaming distribution.", + "refs": { + "CreateStreamingDistributionResult$StreamingDistribution": "The streaming distribution's information.", + "GetStreamingDistributionResult$StreamingDistribution": "The streaming distribution's information.", + "UpdateStreamingDistributionResult$StreamingDistribution": "The streaming distribution's information." + } + }, + "StreamingDistributionAlreadyExists": { + "base": null, + "refs": { + } + }, + "StreamingDistributionConfig": { + "base": "The configuration for the streaming distribution.", + "refs": { + "CreateStreamingDistributionRequest$StreamingDistributionConfig": "The streaming distribution's configuration information.", + "GetStreamingDistributionConfigResult$StreamingDistributionConfig": "The streaming distribution's configuration information.", + "StreamingDistribution$StreamingDistributionConfig": "The current configuration information for the streaming distribution.", + "UpdateStreamingDistributionRequest$StreamingDistributionConfig": "The streaming distribution's configuration information." + } + }, + "StreamingDistributionList": { + "base": "A streaming distribution list.", + "refs": { + "ListStreamingDistributionsResult$StreamingDistributionList": "The StreamingDistributionList type." + } + }, + "StreamingDistributionNotDisabled": { + "base": null, + "refs": { + } + }, + "StreamingDistributionSummary": { + "base": "A summary of the information for an Amazon CloudFront streaming distribution.", + "refs": { + "StreamingDistributionSummaryList$member": null + } + }, + "StreamingDistributionSummaryList": { + "base": null, + "refs": { + "StreamingDistributionList$Items": "A complex type that contains one StreamingDistributionSummary element for each distribution that was created by the current AWS account." + } + }, + "StreamingLoggingConfig": { + "base": "A complex type that controls whether access logs are written for this streaming distribution.", + "refs": { + "StreamingDistributionConfig$Logging": "A complex type that controls whether access logs are written for the streaming distribution." + } + }, + "TooManyCacheBehaviors": { + "base": "You cannot create anymore cache behaviors for the distribution.", + "refs": { + } + }, + "TooManyCertificates": { + "base": "You cannot create anymore custom ssl certificates.", + "refs": { + } + }, + "TooManyCloudFrontOriginAccessIdentities": { + "base": "Processing your request would cause you to exceed the maximum number of origin access identities allowed.", + "refs": { + } + }, + "TooManyCookieNamesInWhiteList": { + "base": "Your request contains more cookie names in the whitelist than are allowed per cache behavior.", + "refs": { + } + }, + "TooManyDistributionCNAMEs": { + "base": "Your request contains more CNAMEs than are allowed per distribution.", + "refs": { + } + }, + "TooManyDistributions": { + "base": "Processing your request would cause you to exceed the maximum number of distributions allowed.", + "refs": { + } + }, + "TooManyHeadersInForwardedValues": { + "base": null, + "refs": { + } + }, + "TooManyInvalidationsInProgress": { + "base": "You have exceeded the maximum number of allowable InProgress invalidation batch requests, or invalidation objects.", + "refs": { + } + }, + "TooManyOrigins": { + "base": "You cannot create anymore origins for the distribution.", + "refs": { + } + }, + "TooManyStreamingDistributionCNAMEs": { + "base": null, + "refs": { + } + }, + "TooManyStreamingDistributions": { + "base": "Processing your request would cause you to exceed the maximum number of streaming distributions allowed.", + "refs": { + } + }, + "TooManyTrustedSigners": { + "base": "Your request contains more trusted signers than are allowed per distribution.", + "refs": { + } + }, + "TrustedSignerDoesNotExist": { + "base": "One or more of your trusted signers do not exist.", + "refs": { + } + }, + "TrustedSigners": { + "base": "A complex type that specifies the AWS accounts, if any, that you want to allow to create signed URLs for private content. If you want to require signed URLs in requests for objects in the target origin that match the PathPattern for this cache behavior, specify true for Enabled, and specify the applicable values for Quantity and Items. For more information, go to Using a Signed URL to Serve Private Content in the Amazon CloudFront Developer Guide. If you don't want to require signed URLs in requests for objects that match PathPattern, specify false for Enabled and 0 for Quantity. Omit Items. To add, change, or remove one or more trusted signers, change Enabled to true (if it's currently false), change Quantity as applicable, and specify all of the trusted signers that you want to include in the updated distribution.", + "refs": { + "CacheBehavior$TrustedSigners": "A complex type that specifies the AWS accounts, if any, that you want to allow to create signed URLs for private content. If you want to require signed URLs in requests for objects in the target origin that match the PathPattern for this cache behavior, specify true for Enabled, and specify the applicable values for Quantity and Items. For more information, go to Using a Signed URL to Serve Private Content in the Amazon CloudFront Developer Guide. If you don't want to require signed URLs in requests for objects that match PathPattern, specify false for Enabled and 0 for Quantity. Omit Items. To add, change, or remove one or more trusted signers, change Enabled to true (if it's currently false), change Quantity as applicable, and specify all of the trusted signers that you want to include in the updated distribution.", + "DefaultCacheBehavior$TrustedSigners": "A complex type that specifies the AWS accounts, if any, that you want to allow to create signed URLs for private content. If you want to require signed URLs in requests for objects in the target origin that match the PathPattern for this cache behavior, specify true for Enabled, and specify the applicable values for Quantity and Items. For more information, go to Using a Signed URL to Serve Private Content in the Amazon CloudFront Developer Guide. If you don't want to require signed URLs in requests for objects that match PathPattern, specify false for Enabled and 0 for Quantity. Omit Items. To add, change, or remove one or more trusted signers, change Enabled to true (if it's currently false), change Quantity as applicable, and specify all of the trusted signers that you want to include in the updated distribution.", + "StreamingDistributionConfig$TrustedSigners": "A complex type that specifies the AWS accounts, if any, that you want to allow to create signed URLs for private content. If you want to require signed URLs in requests for objects in the target origin that match the PathPattern for this cache behavior, specify true for Enabled, and specify the applicable values for Quantity and Items. For more information, go to Using a Signed URL to Serve Private Content in the Amazon CloudFront Developer Guide. If you don't want to require signed URLs in requests for objects that match PathPattern, specify false for Enabled and 0 for Quantity. Omit Items. To add, change, or remove one or more trusted signers, change Enabled to true (if it's currently false), change Quantity as applicable, and specify all of the trusted signers that you want to include in the updated distribution.", + "StreamingDistributionSummary$TrustedSigners": "A complex type that specifies the AWS accounts, if any, that you want to allow to create signed URLs for private content. If you want to require signed URLs in requests for objects in the target origin that match the PathPattern for this cache behavior, specify true for Enabled, and specify the applicable values for Quantity and Items. For more information, go to Using a Signed URL to Serve Private Content in the Amazon CloudFront Developer Guide. If you don't want to require signed URLs in requests for objects that match PathPattern, specify false for Enabled and 0 for Quantity. Omit Items. To add, change, or remove one or more trusted signers, change Enabled to true (if it's currently false), change Quantity as applicable, and specify all of the trusted signers that you want to include in the updated distribution." + } + }, + "UpdateCloudFrontOriginAccessIdentityRequest": { + "base": "The request to update an origin access identity.", + "refs": { + } + }, + "UpdateCloudFrontOriginAccessIdentityResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "UpdateDistributionRequest": { + "base": "The request to update a distribution.", + "refs": { + } + }, + "UpdateDistributionResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "UpdateStreamingDistributionRequest": { + "base": "The request to update a streaming distribution.", + "refs": { + } + }, + "UpdateStreamingDistributionResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "ViewerCertificate": { + "base": "A complex type that contains information about viewer certificates for this distribution.", + "refs": { + "DistributionConfig$ViewerCertificate": null, + "DistributionSummary$ViewerCertificate": null + } + }, + "ViewerProtocolPolicy": { + "base": null, + "refs": { + "CacheBehavior$ViewerProtocolPolicy": "Use this element to specify the protocol that users can use to access the files in the origin specified by TargetOriginId when a request matches the path pattern in PathPattern. If you want CloudFront to allow end users to use any available protocol, specify allow-all. If you want CloudFront to require HTTPS, specify https. If you want CloudFront to respond to an HTTP request with an HTTP status code of 301 (Moved Permanently) and the HTTPS URL, specify redirect-to-https. The viewer then resubmits the request using the HTTPS URL.", + "DefaultCacheBehavior$ViewerProtocolPolicy": "Use this element to specify the protocol that users can use to access the files in the origin specified by TargetOriginId when a request matches the path pattern in PathPattern. If you want CloudFront to allow end users to use any available protocol, specify allow-all. If you want CloudFront to require HTTPS, specify https. If you want CloudFront to respond to an HTTP request with an HTTP status code of 301 (Moved Permanently) and the HTTPS URL, specify redirect-to-https. The viewer then resubmits the request using the HTTPS URL." + } + }, + "boolean": { + "base": null, + "refs": { + "ActiveTrustedSigners$Enabled": "Each active trusted signer.", + "CacheBehavior$SmoothStreaming": "Indicates whether you want to distribute media files in Microsoft Smooth Streaming format using the origin that is associated with this cache behavior. If so, specify true; if not, specify false.", + "CloudFrontOriginAccessIdentityList$IsTruncated": "A flag that indicates whether more origin access identities remain to be listed. If your results were truncated, you can make a follow-up pagination request using the Marker request parameter to retrieve more items in the list.", + "DefaultCacheBehavior$SmoothStreaming": "Indicates whether you want to distribute media files in Microsoft Smooth Streaming format using the origin that is associated with this cache behavior. If so, specify true; if not, specify false.", + "DistributionConfig$Enabled": "Whether the distribution is enabled to accept end user requests for content.", + "DistributionList$IsTruncated": "A flag that indicates whether more distributions remain to be listed. If your results were truncated, you can make a follow-up pagination request using the Marker request parameter to retrieve more distributions in the list.", + "DistributionSummary$Enabled": "Whether the distribution is enabled to accept end user requests for content.", + "ForwardedValues$QueryString": "Indicates whether you want CloudFront to forward query strings to the origin that is associated with this cache behavior. If so, specify true; if not, specify false.", + "InvalidationList$IsTruncated": "A flag that indicates whether more invalidation batch requests remain to be listed. If your results were truncated, you can make a follow-up pagination request using the Marker request parameter to retrieve more invalidation batches in the list.", + "LoggingConfig$Enabled": "Specifies whether you want CloudFront to save access logs to an Amazon S3 bucket. If you do not want to enable logging when you create a distribution or if you want to disable logging for an existing distribution, specify false for Enabled, and specify empty Bucket and Prefix elements. If you specify false for Enabled but you specify values for Bucket, prefix and IncludeCookies, the values are automatically deleted.", + "LoggingConfig$IncludeCookies": "Specifies whether you want CloudFront to include cookies in access logs, specify true for IncludeCookies. If you choose to include cookies in logs, CloudFront logs all cookies regardless of how you configure the cache behaviors for this distribution. If you do not want to include cookies when you create a distribution or if you want to disable include cookies for an existing distribution, specify false for IncludeCookies.", + "StreamingDistributionConfig$Enabled": "Whether the streaming distribution is enabled to accept end user requests for content.", + "StreamingDistributionList$IsTruncated": "A flag that indicates whether more streaming distributions remain to be listed. If your results were truncated, you can make a follow-up pagination request using the Marker request parameter to retrieve more distributions in the list.", + "StreamingDistributionSummary$Enabled": "Whether the distribution is enabled to accept end user requests for content.", + "StreamingLoggingConfig$Enabled": "Specifies whether you want CloudFront to save access logs to an Amazon S3 bucket. If you do not want to enable logging when you create a streaming distribution or if you want to disable logging for an existing streaming distribution, specify false for Enabled, and specify empty Bucket and Prefix elements. If you specify false for Enabled but you specify values for Bucket and Prefix, the values are automatically deleted.", + "TrustedSigners$Enabled": "Specifies whether you want to require end users to use signed URLs to access the files specified by PathPattern and TargetOriginId.", + "ViewerCertificate$CloudFrontDefaultCertificate": "If you want viewers to use HTTPS to request your objects and you're using the CloudFront domain name of your distribution in your object URLs (for example, https://d111111abcdef8.cloudfront.net/logo.jpg), set to true. Omit this value if you are setting an IAMCertificateId." + } + }, + "integer": { + "base": null, + "refs": { + "ActiveTrustedSigners$Quantity": "The number of unique trusted signers included in all cache behaviors. For example, if three cache behaviors all list the same three AWS accounts, the value of Quantity for ActiveTrustedSigners will be 3.", + "Aliases$Quantity": "The number of CNAMEs, if any, for this distribution.", + "AllowedMethods$Quantity": "The number of HTTP methods that you want CloudFront to forward to your origin. Valid values are 2 (for GET and HEAD requests), 3 (for GET, HEAD and OPTIONS requests) and 7 (for GET, HEAD, OPTIONS, PUT, PATCH, POST, and DELETE requests).", + "CacheBehaviors$Quantity": "The number of cache behaviors for this distribution.", + "CachedMethods$Quantity": "The number of HTTP methods for which you want CloudFront to cache responses. Valid values are 2 (for caching responses to GET and HEAD requests) and 3 (for caching responses to GET, HEAD, and OPTIONS requests).", + "CloudFrontOriginAccessIdentityList$MaxItems": "The value you provided for the MaxItems request parameter.", + "CloudFrontOriginAccessIdentityList$Quantity": "The number of CloudFront origin access identities that were created by the current AWS account.", + "CookieNames$Quantity": "The number of whitelisted cookies for this cache behavior.", + "CustomErrorResponse$ErrorCode": "The 4xx or 5xx HTTP status code that you want to customize. For a list of HTTP status codes that you can customize, see CloudFront documentation.", + "CustomErrorResponses$Quantity": "The number of custom error responses for this distribution.", + "CustomOriginConfig$HTTPPort": "The HTTP port the custom origin listens on.", + "CustomOriginConfig$HTTPSPort": "The HTTPS port the custom origin listens on.", + "Distribution$InProgressInvalidationBatches": "The number of invalidation batches currently in progress.", + "DistributionList$MaxItems": "The value you provided for the MaxItems request parameter.", + "DistributionList$Quantity": "The number of distributions that were created by the current AWS account.", + "GeoRestriction$Quantity": "When geo restriction is enabled, this is the number of countries in your whitelist or blacklist. Otherwise, when it is not enabled, Quantity is 0, and you can omit Items.", + "Headers$Quantity": "The number of different headers that you want CloudFront to forward to the origin and to vary on for this cache behavior. The maximum number of headers that you can specify by name is 10. If you want CloudFront to forward all headers to the origin and vary on all of them, specify 1 for Quantity and * for Name. If you don't want CloudFront to forward any additional headers to the origin or to vary on any headers, specify 0 for Quantity and omit Items.", + "InvalidationList$MaxItems": "The value you provided for the MaxItems request parameter.", + "InvalidationList$Quantity": "The number of invalidation batches that were created by the current AWS account.", + "KeyPairIds$Quantity": "The number of active CloudFront key pairs for AwsAccountNumber.", + "Origins$Quantity": "The number of origins for this distribution.", + "Paths$Quantity": "The number of objects that you want to invalidate.", + "StreamingDistributionList$MaxItems": "The value you provided for the MaxItems request parameter.", + "StreamingDistributionList$Quantity": "The number of streaming distributions that were created by the current AWS account.", + "TrustedSigners$Quantity": "The number of trusted signers for this cache behavior." + } + }, + "long": { + "base": null, + "refs": { + "CacheBehavior$MinTTL": "The minimum amount of time that you want objects to stay in CloudFront caches before CloudFront queries your origin to see whether the object has been updated.You can specify a value from 0 to 3,153,600,000 seconds (100 years).", + "CacheBehavior$DefaultTTL": "If you don't configure your origin to add a Cache-Control max-age directive or an Expires header, DefaultTTL is the default amount of time (in seconds) that an object is in a CloudFront cache before CloudFront forwards another request to your origin to determine whether the object has been updated. The value that you specify applies only when your origin does not add HTTP headers such as Cache-Control max-age, Cache-Control s-maxage, and Expires to objects. You can specify a value from 0 to 3,153,600,000 seconds (100 years).", + "CacheBehavior$MaxTTL": "The maximum amount of time (in seconds) that an object is in a CloudFront cache before CloudFront forwards another request to your origin to determine whether the object has been updated. The value that you specify applies only when your origin adds HTTP headers such as Cache-Control max-age, Cache-Control s-maxage, and Expires to objects. You can specify a value from 0 to 3,153,600,000 seconds (100 years).", + "CustomErrorResponse$ErrorCachingMinTTL": "The minimum amount of time you want HTTP error codes to stay in CloudFront caches before CloudFront queries your origin to see whether the object has been updated. You can specify a value from 0 to 31,536,000.", + "DefaultCacheBehavior$MinTTL": "The minimum amount of time that you want objects to stay in CloudFront caches before CloudFront queries your origin to see whether the object has been updated.You can specify a value from 0 to 3,153,600,000 seconds (100 years).", + "DefaultCacheBehavior$DefaultTTL": "If you don't configure your origin to add a Cache-Control max-age directive or an Expires header, DefaultTTL is the default amount of time (in seconds) that an object is in a CloudFront cache before CloudFront forwards another request to your origin to determine whether the object has been updated. The value that you specify applies only when your origin does not add HTTP headers such as Cache-Control max-age, Cache-Control s-maxage, and Expires to objects. You can specify a value from 0 to 3,153,600,000 seconds (100 years).", + "DefaultCacheBehavior$MaxTTL": "The maximum amount of time (in seconds) that an object is in a CloudFront cache before CloudFront forwards another request to your origin to determine whether the object has been updated. The value that you specify applies only when your origin adds HTTP headers such as Cache-Control max-age, Cache-Control s-maxage, and Expires to objects. You can specify a value from 0 to 3,153,600,000 seconds (100 years)." + } + }, + "string": { + "base": null, + "refs": { + "AccessDenied$Message": null, + "AliasList$member": null, + "AwsAccountNumberList$member": null, + "BatchTooLarge$Message": null, + "CNAMEAlreadyExists$Message": null, + "CacheBehavior$PathPattern": "The pattern (for example, images/*.jpg) that specifies which requests you want this cache behavior to apply to. When CloudFront receives an end-user request, the requested path is compared with path patterns in the order in which cache behaviors are listed in the distribution. The path pattern for the default cache behavior is * and cannot be changed. If the request for an object does not match the path pattern for any cache behaviors, CloudFront applies the behavior in the default cache behavior.", + "CacheBehavior$TargetOriginId": "The value of ID for the origin that you want CloudFront to route requests to when a request matches the path pattern either for a cache behavior or for the default cache behavior.", + "CloudFrontOriginAccessIdentity$Id": "The ID for the origin access identity. For example: E74FTE3AJFJ256A.", + "CloudFrontOriginAccessIdentity$S3CanonicalUserId": "The Amazon S3 canonical user ID for the origin access identity, which you use when giving the origin access identity read permission to an object in Amazon S3.", + "CloudFrontOriginAccessIdentityAlreadyExists$Message": null, + "CloudFrontOriginAccessIdentityConfig$CallerReference": "A unique number that ensures the request can't be replayed. If the CallerReference is new (no matter the content of the CloudFrontOriginAccessIdentityConfig object), a new origin access identity is created. If the CallerReference is a value you already sent in a previous request to create an identity, and the content of the CloudFrontOriginAccessIdentityConfig is identical to the original request (ignoring white space), the response includes the same information returned to the original request. If the CallerReference is a value you already sent in a previous request to create an identity but the content of the CloudFrontOriginAccessIdentityConfig is different from the original request, CloudFront returns a CloudFrontOriginAccessIdentityAlreadyExists error.", + "CloudFrontOriginAccessIdentityConfig$Comment": "Any comments you want to include about the origin access identity.", + "CloudFrontOriginAccessIdentityInUse$Message": null, + "CloudFrontOriginAccessIdentityList$Marker": "The value you provided for the Marker request parameter.", + "CloudFrontOriginAccessIdentityList$NextMarker": "If IsTruncated is true, this element is present and contains the value you can use for the Marker request parameter to continue listing your origin access identities where they left off.", + "CloudFrontOriginAccessIdentitySummary$Id": "The ID for the origin access identity. For example: E74FTE3AJFJ256A.", + "CloudFrontOriginAccessIdentitySummary$S3CanonicalUserId": "The Amazon S3 canonical user ID for the origin access identity, which you use when giving the origin access identity read permission to an object in Amazon S3.", + "CloudFrontOriginAccessIdentitySummary$Comment": "The comment for this origin access identity, as originally specified when created.", + "CookieNameList$member": null, + "CreateCloudFrontOriginAccessIdentityResult$Location": "The fully qualified URI of the new origin access identity just created. For example: https://cloudfront.amazonaws.com/2010-11-01/origin-access-identity/cloudfront/E74FTE3AJFJ256A.", + "CreateCloudFrontOriginAccessIdentityResult$ETag": "The current version of the origin access identity created.", + "CreateDistributionResult$Location": "The fully qualified URI of the new distribution resource just created. For example: https://cloudfront.amazonaws.com/2010-11-01/distribution/EDFDVBD632BHDS5.", + "CreateDistributionResult$ETag": "The current version of the distribution created.", + "CreateInvalidationRequest$DistributionId": "The distribution's id.", + "CreateInvalidationResult$Location": "The fully qualified URI of the distribution and invalidation batch request, including the Invalidation ID.", + "CreateStreamingDistributionResult$Location": "The fully qualified URI of the new streaming distribution resource just created. For example: https://cloudfront.amazonaws.com/2010-11-01/streaming-distribution/EGTXBD79H29TRA8.", + "CreateStreamingDistributionResult$ETag": "The current version of the streaming distribution created.", + "CustomErrorResponse$ResponsePagePath": "The path of the custom error page (for example, /custom_404.html). The path is relative to the distribution and must begin with a slash (/). If the path includes any non-ASCII characters or unsafe characters as defined in RFC 1783 (http://www.ietf.org/rfc/rfc1738.txt), URL encode those characters. Do not URL encode any other characters in the path, or CloudFront will not return the custom error page to the viewer.", + "CustomErrorResponse$ResponseCode": "The HTTP status code that you want CloudFront to return with the custom error page to the viewer. For a list of HTTP status codes that you can replace, see CloudFront Documentation.", + "DefaultCacheBehavior$TargetOriginId": "The value of ID for the origin that you want CloudFront to route requests to when a request matches the path pattern either for a cache behavior or for the default cache behavior.", + "DeleteCloudFrontOriginAccessIdentityRequest$Id": "The origin access identity's id.", + "DeleteCloudFrontOriginAccessIdentityRequest$IfMatch": "The value of the ETag header you received from a previous GET or PUT request. For example: E2QWRUHAPOMQZL.", + "DeleteDistributionRequest$Id": "The distribution id.", + "DeleteDistributionRequest$IfMatch": "The value of the ETag header you received when you disabled the distribution. For example: E2QWRUHAPOMQZL.", + "DeleteStreamingDistributionRequest$Id": "The distribution id.", + "DeleteStreamingDistributionRequest$IfMatch": "The value of the ETag header you received when you disabled the streaming distribution. For example: E2QWRUHAPOMQZL.", + "Distribution$Id": "The identifier for the distribution. For example: EDFDVBD632BHDS5.", + "Distribution$Status": "This response element indicates the current status of the distribution. When the status is Deployed, the distribution's information is fully propagated throughout the Amazon CloudFront system.", + "Distribution$DomainName": "The domain name corresponding to the distribution. For example: d604721fxaaqy9.cloudfront.net.", + "DistributionAlreadyExists$Message": null, + "DistributionConfig$CallerReference": "A unique number that ensures the request can't be replayed. If the CallerReference is new (no matter the content of the DistributionConfig object), a new distribution is created. If the CallerReference is a value you already sent in a previous request to create a distribution, and the content of the DistributionConfig is identical to the original request (ignoring white space), the response includes the same information returned to the original request. If the CallerReference is a value you already sent in a previous request to create a distribution but the content of the DistributionConfig is different from the original request, CloudFront returns a DistributionAlreadyExists error.", + "DistributionConfig$DefaultRootObject": "The object that you want CloudFront to return (for example, index.html) when an end user requests the root URL for your distribution (http://www.example.com) instead of an object in your distribution (http://www.example.com/index.html). Specifying a default root object avoids exposing the contents of your distribution. If you don't want to specify a default root object when you create a distribution, include an empty DefaultRootObject element. To delete the default root object from an existing distribution, update the distribution configuration and include an empty DefaultRootObject element. To replace the default root object, update the distribution configuration and specify the new object.", + "DistributionConfig$Comment": "Any comments you want to include about the distribution.", + "DistributionList$Marker": "The value you provided for the Marker request parameter.", + "DistributionList$NextMarker": "If IsTruncated is true, this element is present and contains the value you can use for the Marker request parameter to continue listing your distributions where they left off.", + "DistributionNotDisabled$Message": null, + "DistributionSummary$Id": "The identifier for the distribution. For example: EDFDVBD632BHDS5.", + "DistributionSummary$Status": "This response element indicates the current status of the distribution. When the status is Deployed, the distribution's information is fully propagated throughout the Amazon CloudFront system.", + "DistributionSummary$DomainName": "The domain name corresponding to the distribution. For example: d604721fxaaqy9.cloudfront.net.", + "DistributionSummary$Comment": "The comment originally specified when this distribution was created.", + "GetCloudFrontOriginAccessIdentityConfigRequest$Id": "The identity's id.", + "GetCloudFrontOriginAccessIdentityConfigResult$ETag": "The current version of the configuration. For example: E2QWRUHAPOMQZL.", + "GetCloudFrontOriginAccessIdentityRequest$Id": "The identity's id.", + "GetCloudFrontOriginAccessIdentityResult$ETag": "The current version of the origin access identity's information. For example: E2QWRUHAPOMQZL.", + "GetDistributionConfigRequest$Id": "The distribution's id.", + "GetDistributionConfigResult$ETag": "The current version of the configuration. For example: E2QWRUHAPOMQZL.", + "GetDistributionRequest$Id": "The distribution's id.", + "GetDistributionResult$ETag": "The current version of the distribution's information. For example: E2QWRUHAPOMQZL.", + "GetInvalidationRequest$DistributionId": "The distribution's id.", + "GetInvalidationRequest$Id": "The invalidation's id.", + "GetStreamingDistributionConfigRequest$Id": "The streaming distribution's id.", + "GetStreamingDistributionConfigResult$ETag": "The current version of the configuration. For example: E2QWRUHAPOMQZL.", + "GetStreamingDistributionRequest$Id": "The streaming distribution's id.", + "GetStreamingDistributionResult$ETag": "The current version of the streaming distribution's information. For example: E2QWRUHAPOMQZL.", + "HeaderList$member": null, + "IllegalUpdate$Message": null, + "InconsistentQuantities$Message": null, + "InvalidArgument$Message": null, + "InvalidDefaultRootObject$Message": null, + "InvalidErrorCode$Message": null, + "InvalidForwardCookies$Message": null, + "InvalidGeoRestrictionParameter$Message": null, + "InvalidHeadersForS3Origin$Message": null, + "InvalidIfMatchVersion$Message": null, + "InvalidLocationCode$Message": null, + "InvalidMinimumProtocolVersion$Message": null, + "InvalidOrigin$Message": null, + "InvalidOriginAccessIdentity$Message": null, + "InvalidProtocolSettings$Message": null, + "InvalidRelativePath$Message": null, + "InvalidRequiredProtocol$Message": null, + "InvalidResponseCode$Message": null, + "InvalidTTLOrder$Message": null, + "InvalidViewerCertificate$Message": null, + "Invalidation$Id": "The identifier for the invalidation request. For example: IDFDVBD632BHDS5.", + "Invalidation$Status": "The status of the invalidation request. When the invalidation batch is finished, the status is Completed.", + "InvalidationBatch$CallerReference": "A unique name that ensures the request can't be replayed. If the CallerReference is new (no matter the content of the Path object), a new distribution is created. If the CallerReference is a value you already sent in a previous request to create an invalidation batch, and the content of each Path element is identical to the original request, the response includes the same information returned to the original request. If the CallerReference is a value you already sent in a previous request to create a distribution but the content of any Path is different from the original request, CloudFront returns an InvalidationBatchAlreadyExists error.", + "InvalidationList$Marker": "The value you provided for the Marker request parameter.", + "InvalidationList$NextMarker": "If IsTruncated is true, this element is present and contains the value you can use for the Marker request parameter to continue listing your invalidation batches where they left off.", + "InvalidationSummary$Id": "The unique ID for an invalidation request.", + "InvalidationSummary$Status": "The status of an invalidation request.", + "KeyPairIdList$member": null, + "ListCloudFrontOriginAccessIdentitiesRequest$Marker": "Use this when paginating results to indicate where to begin in your list of origin access identities. The results include identities in the list that occur after the marker. To get the next page of results, set the Marker to the value of the NextMarker from the current page's response (which is also the ID of the last identity on that page).", + "ListCloudFrontOriginAccessIdentitiesRequest$MaxItems": "The maximum number of origin access identities you want in the response body.", + "ListDistributionsRequest$Marker": "Use this when paginating results to indicate where to begin in your list of distributions. The results include distributions in the list that occur after the marker. To get the next page of results, set the Marker to the value of the NextMarker from the current page's response (which is also the ID of the last distribution on that page).", + "ListDistributionsRequest$MaxItems": "The maximum number of distributions you want in the response body.", + "ListInvalidationsRequest$DistributionId": "The distribution's id.", + "ListInvalidationsRequest$Marker": "Use this parameter when paginating results to indicate where to begin in your list of invalidation batches. Because the results are returned in decreasing order from most recent to oldest, the most recent results are on the first page, the second page will contain earlier results, and so on. To get the next page of results, set the Marker to the value of the NextMarker from the current page's response. This value is the same as the ID of the last invalidation batch on that page.", + "ListInvalidationsRequest$MaxItems": "The maximum number of invalidation batches you want in the response body.", + "ListStreamingDistributionsRequest$Marker": "Use this when paginating results to indicate where to begin in your list of streaming distributions. The results include distributions in the list that occur after the marker. To get the next page of results, set the Marker to the value of the NextMarker from the current page's response (which is also the ID of the last distribution on that page).", + "ListStreamingDistributionsRequest$MaxItems": "The maximum number of streaming distributions you want in the response body.", + "LocationList$member": null, + "LoggingConfig$Bucket": "The Amazon S3 bucket to store the access logs in, for example, myawslogbucket.s3.amazonaws.com.", + "LoggingConfig$Prefix": "An optional string that you want CloudFront to prefix to the access log filenames for this distribution, for example, myprefix/. If you want to enable logging, but you do not want to specify a prefix, you still must include an empty Prefix element in the Logging element.", + "MissingBody$Message": null, + "NoSuchCloudFrontOriginAccessIdentity$Message": null, + "NoSuchDistribution$Message": null, + "NoSuchInvalidation$Message": null, + "NoSuchOrigin$Message": null, + "NoSuchStreamingDistribution$Message": null, + "Origin$Id": "A unique identifier for the origin. The value of Id must be unique within the distribution. You use the value of Id when you create a cache behavior. The Id identifies the origin that CloudFront routes a request to when the request matches the path pattern for that cache behavior.", + "Origin$DomainName": "Amazon S3 origins: The DNS name of the Amazon S3 bucket from which you want CloudFront to get objects for this origin, for example, myawsbucket.s3.amazonaws.com. Custom origins: The DNS domain name for the HTTP server from which you want CloudFront to get objects for this origin, for example, www.example.com.", + "Origin$OriginPath": "An optional element that causes CloudFront to request your content from a directory in your Amazon S3 bucket or your custom origin. When you include the OriginPath element, specify the directory name, beginning with a /. CloudFront appends the directory name to the value of DomainName.", + "PathList$member": null, + "PreconditionFailed$Message": null, + "S3Origin$DomainName": "The DNS name of the S3 origin.", + "S3Origin$OriginAccessIdentity": "Your S3 origin's origin access identity.", + "S3OriginConfig$OriginAccessIdentity": "The CloudFront origin access identity to associate with the origin. Use an origin access identity to configure the origin so that end users can only access objects in an Amazon S3 bucket through CloudFront. If you want end users to be able to access objects using either the CloudFront URL or the Amazon S3 URL, specify an empty OriginAccessIdentity element. To delete the origin access identity from an existing distribution, update the distribution configuration and include an empty OriginAccessIdentity element. To replace the origin access identity, update the distribution configuration and specify the new origin access identity. Use the format origin-access-identity/cloudfront/Id where Id is the value that CloudFront returned in the Id element when you created the origin access identity.", + "Signer$AwsAccountNumber": "Specifies an AWS account that can create signed URLs. Values: self, which indicates that the AWS account that was used to create the distribution can created signed URLs, or an AWS account number. Omit the dashes in the account number.", + "StreamingDistribution$Id": "The identifier for the streaming distribution. For example: EGTXBD79H29TRA8.", + "StreamingDistribution$Status": "The current status of the streaming distribution. When the status is Deployed, the distribution's information is fully propagated throughout the Amazon CloudFront system.", + "StreamingDistribution$DomainName": "The domain name corresponding to the streaming distribution. For example: s5c39gqb8ow64r.cloudfront.net.", + "StreamingDistributionAlreadyExists$Message": null, + "StreamingDistributionConfig$CallerReference": "A unique number that ensures the request can't be replayed. If the CallerReference is new (no matter the content of the StreamingDistributionConfig object), a new streaming distribution is created. If the CallerReference is a value you already sent in a previous request to create a streaming distribution, and the content of the StreamingDistributionConfig is identical to the original request (ignoring white space), the response includes the same information returned to the original request. If the CallerReference is a value you already sent in a previous request to create a streaming distribution but the content of the StreamingDistributionConfig is different from the original request, CloudFront returns a DistributionAlreadyExists error.", + "StreamingDistributionConfig$Comment": "Any comments you want to include about the streaming distribution.", + "StreamingDistributionList$Marker": "The value you provided for the Marker request parameter.", + "StreamingDistributionList$NextMarker": "If IsTruncated is true, this element is present and contains the value you can use for the Marker request parameter to continue listing your streaming distributions where they left off.", + "StreamingDistributionNotDisabled$Message": null, + "StreamingDistributionSummary$Id": "The identifier for the distribution. For example: EDFDVBD632BHDS5.", + "StreamingDistributionSummary$Status": "Indicates the current status of the distribution. When the status is Deployed, the distribution's information is fully propagated throughout the Amazon CloudFront system.", + "StreamingDistributionSummary$DomainName": "The domain name corresponding to the distribution. For example: d604721fxaaqy9.cloudfront.net.", + "StreamingDistributionSummary$Comment": "The comment originally specified when this distribution was created.", + "StreamingLoggingConfig$Bucket": "The Amazon S3 bucket to store the access logs in, for example, myawslogbucket.s3.amazonaws.com.", + "StreamingLoggingConfig$Prefix": "An optional string that you want CloudFront to prefix to the access log filenames for this streaming distribution, for example, myprefix/. If you want to enable logging, but you do not want to specify a prefix, you still must include an empty Prefix element in the Logging element.", + "TooManyCacheBehaviors$Message": null, + "TooManyCertificates$Message": null, + "TooManyCloudFrontOriginAccessIdentities$Message": null, + "TooManyCookieNamesInWhiteList$Message": null, + "TooManyDistributionCNAMEs$Message": null, + "TooManyDistributions$Message": null, + "TooManyHeadersInForwardedValues$Message": null, + "TooManyInvalidationsInProgress$Message": null, + "TooManyOrigins$Message": null, + "TooManyStreamingDistributionCNAMEs$Message": null, + "TooManyStreamingDistributions$Message": null, + "TooManyTrustedSigners$Message": null, + "TrustedSignerDoesNotExist$Message": null, + "UpdateCloudFrontOriginAccessIdentityRequest$Id": "The identity's id.", + "UpdateCloudFrontOriginAccessIdentityRequest$IfMatch": "The value of the ETag header you received when retrieving the identity's configuration. For example: E2QWRUHAPOMQZL.", + "UpdateCloudFrontOriginAccessIdentityResult$ETag": "The current version of the configuration. For example: E2QWRUHAPOMQZL.", + "UpdateDistributionRequest$Id": "The distribution's id.", + "UpdateDistributionRequest$IfMatch": "The value of the ETag header you received when retrieving the distribution's configuration. For example: E2QWRUHAPOMQZL.", + "UpdateDistributionResult$ETag": "The current version of the configuration. For example: E2QWRUHAPOMQZL.", + "UpdateStreamingDistributionRequest$Id": "The streaming distribution's id.", + "UpdateStreamingDistributionRequest$IfMatch": "The value of the ETag header you received when retrieving the streaming distribution's configuration. For example: E2QWRUHAPOMQZL.", + "UpdateStreamingDistributionResult$ETag": "The current version of the configuration. For example: E2QWRUHAPOMQZL.", + "ViewerCertificate$IAMCertificateId": "If you want viewers to use HTTPS to request your objects and you're using an alternate domain name in your object URLs (for example, https://example.com/logo.jpg), specify the IAM certificate identifier of the custom viewer certificate for this distribution. Specify either this value or CloudFrontDefaultCertificate." + } + }, + "timestamp": { + "base": null, + "refs": { + "Distribution$LastModifiedTime": "The date and time the distribution was last modified.", + "DistributionSummary$LastModifiedTime": "The date and time the distribution was last modified.", + "Invalidation$CreateTime": "The date and time the invalidation request was first made.", + "InvalidationSummary$CreateTime": null, + "StreamingDistribution$LastModifiedTime": "The date and time the distribution was last modified.", + "StreamingDistributionSummary$LastModifiedTime": "The date and time the distribution was last modified." + } + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/cloudfront/2015-04-17/paginators-1.json b/vendor/github.com/aws/aws-sdk-go/models/apis/cloudfront/2015-04-17/paginators-1.json new file mode 100644 index 000000000..51fbb907f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/cloudfront/2015-04-17/paginators-1.json @@ -0,0 +1,32 @@ +{ + "pagination": { + "ListCloudFrontOriginAccessIdentities": { + "input_token": "Marker", + "output_token": "CloudFrontOriginAccessIdentityList.NextMarker", + "limit_key": "MaxItems", + "more_results": "CloudFrontOriginAccessIdentityList.IsTruncated", + "result_key": "CloudFrontOriginAccessIdentityList.Items" + }, + "ListDistributions": { + "input_token": "Marker", + "output_token": "DistributionList.NextMarker", + "limit_key": "MaxItems", + "more_results": "DistributionList.IsTruncated", + "result_key": "DistributionList.Items" + }, + "ListInvalidations": { + "input_token": "Marker", + "output_token": "InvalidationList.NextMarker", + "limit_key": "MaxItems", + "more_results": "InvalidationList.IsTruncated", + "result_key": "InvalidationList.Items" + }, + "ListStreamingDistributions": { + "input_token": "Marker", + "output_token": "StreamingDistributionList.NextMarker", + "limit_key": "MaxItems", + "more_results": "StreamingDistributionList.IsTruncated", + "result_key": "StreamingDistributionList.Items" + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/cloudfront/2015-04-17/waiters-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/cloudfront/2015-04-17/waiters-2.json new file mode 100644 index 000000000..edd74b2a3 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/cloudfront/2015-04-17/waiters-2.json @@ -0,0 +1,47 @@ +{ + "version": 2, + "waiters": { + "DistributionDeployed": { + "delay": 60, + "operation": "GetDistribution", + "maxAttempts": 25, + "description": "Wait until a distribution is deployed.", + "acceptors": [ + { + "expected": "Deployed", + "matcher": "path", + "state": "success", + "argument": "Distribution.Status" + } + ] + }, + "InvalidationCompleted": { + "delay": 20, + "operation": "GetInvalidation", + "maxAttempts": 30, + "description": "Wait until an invalidation has completed.", + "acceptors": [ + { + "expected": "Completed", + "matcher": "path", + "state": "success", + "argument": "Invalidation.Status" + } + ] + }, + "StreamingDistributionDeployed": { + "delay": 60, + "operation": "GetStreamingDistribution", + "maxAttempts": 25, + "description": "Wait until a streaming distribution is deployed.", + "acceptors": [ + { + "expected": "Deployed", + "matcher": "path", + "state": "success", + "argument": "StreamingDistribution.Status" + } + ] + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/cloudfront/2015-07-27/api-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/cloudfront/2015-07-27/api-2.json new file mode 100644 index 000000000..5da9d56e9 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/cloudfront/2015-07-27/api-2.json @@ -0,0 +1,2721 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2015-07-27", + "endpointPrefix":"cloudfront", + "globalEndpoint":"cloudfront.amazonaws.com", + "serviceAbbreviation":"CloudFront", + "serviceFullName":"Amazon CloudFront", + "signatureVersion":"v4", + "protocol":"rest-xml" + }, + "operations":{ + "CreateCloudFrontOriginAccessIdentity":{ + "name":"CreateCloudFrontOriginAccessIdentity2015_07_27", + "http":{ + "method":"POST", + "requestUri":"/2015-07-27/origin-access-identity/cloudfront", + "responseCode":201 + }, + "input":{"shape":"CreateCloudFrontOriginAccessIdentityRequest"}, + "output":{"shape":"CreateCloudFrontOriginAccessIdentityResult"}, + "errors":[ + { + "shape":"CloudFrontOriginAccessIdentityAlreadyExists", + "error":{"httpStatusCode":409}, + "exception":true + }, + { + "shape":"MissingBody", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"TooManyCloudFrontOriginAccessIdentities", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InvalidArgument", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InconsistentQuantities", + "error":{"httpStatusCode":400}, + "exception":true + } + ] + }, + "CreateDistribution":{ + "name":"CreateDistribution2015_07_27", + "http":{ + "method":"POST", + "requestUri":"/2015-07-27/distribution", + "responseCode":201 + }, + "input":{"shape":"CreateDistributionRequest"}, + "output":{"shape":"CreateDistributionResult"}, + "errors":[ + { + "shape":"CNAMEAlreadyExists", + "error":{"httpStatusCode":409}, + "exception":true + }, + { + "shape":"DistributionAlreadyExists", + "error":{"httpStatusCode":409}, + "exception":true + }, + { + "shape":"InvalidOrigin", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InvalidOriginAccessIdentity", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"AccessDenied", + "error":{"httpStatusCode":403}, + "exception":true + }, + { + "shape":"TooManyTrustedSigners", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"TrustedSignerDoesNotExist", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InvalidViewerCertificate", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InvalidMinimumProtocolVersion", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"MissingBody", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"TooManyDistributionCNAMEs", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"TooManyDistributions", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InvalidDefaultRootObject", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InvalidRelativePath", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InvalidErrorCode", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InvalidResponseCode", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InvalidArgument", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InvalidRequiredProtocol", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"NoSuchOrigin", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"TooManyOrigins", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"TooManyCacheBehaviors", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"TooManyCookieNamesInWhiteList", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InvalidForwardCookies", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"TooManyHeadersInForwardedValues", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InvalidHeadersForS3Origin", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InconsistentQuantities", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"TooManyCertificates", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InvalidLocationCode", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InvalidGeoRestrictionParameter", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InvalidProtocolSettings", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InvalidTTLOrder", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InvalidWebACLId", + "error":{"httpStatusCode":400}, + "exception":true + } + ] + }, + "CreateInvalidation":{ + "name":"CreateInvalidation2015_07_27", + "http":{ + "method":"POST", + "requestUri":"/2015-07-27/distribution/{DistributionId}/invalidation", + "responseCode":201 + }, + "input":{"shape":"CreateInvalidationRequest"}, + "output":{"shape":"CreateInvalidationResult"}, + "errors":[ + { + "shape":"AccessDenied", + "error":{"httpStatusCode":403}, + "exception":true + }, + { + "shape":"MissingBody", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InvalidArgument", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"NoSuchDistribution", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"BatchTooLarge", + "error":{"httpStatusCode":413}, + "exception":true + }, + { + "shape":"TooManyInvalidationsInProgress", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InconsistentQuantities", + "error":{"httpStatusCode":400}, + "exception":true + } + ] + }, + "CreateStreamingDistribution":{ + "name":"CreateStreamingDistribution2015_07_27", + "http":{ + "method":"POST", + "requestUri":"/2015-07-27/streaming-distribution", + "responseCode":201 + }, + "input":{"shape":"CreateStreamingDistributionRequest"}, + "output":{"shape":"CreateStreamingDistributionResult"}, + "errors":[ + { + "shape":"CNAMEAlreadyExists", + "error":{"httpStatusCode":409}, + "exception":true + }, + { + "shape":"StreamingDistributionAlreadyExists", + "error":{"httpStatusCode":409}, + "exception":true + }, + { + "shape":"InvalidOrigin", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InvalidOriginAccessIdentity", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"AccessDenied", + "error":{"httpStatusCode":403}, + "exception":true + }, + { + "shape":"TooManyTrustedSigners", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"TrustedSignerDoesNotExist", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"MissingBody", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"TooManyStreamingDistributionCNAMEs", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"TooManyStreamingDistributions", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InvalidArgument", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InconsistentQuantities", + "error":{"httpStatusCode":400}, + "exception":true + } + ] + }, + "DeleteCloudFrontOriginAccessIdentity":{ + "name":"DeleteCloudFrontOriginAccessIdentity2015_07_27", + "http":{ + "method":"DELETE", + "requestUri":"/2015-07-27/origin-access-identity/cloudfront/{Id}", + "responseCode":204 + }, + "input":{"shape":"DeleteCloudFrontOriginAccessIdentityRequest"}, + "errors":[ + { + "shape":"AccessDenied", + "error":{"httpStatusCode":403}, + "exception":true + }, + { + "shape":"InvalidIfMatchVersion", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"NoSuchCloudFrontOriginAccessIdentity", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"PreconditionFailed", + "error":{"httpStatusCode":412}, + "exception":true + }, + { + "shape":"CloudFrontOriginAccessIdentityInUse", + "error":{"httpStatusCode":409}, + "exception":true + } + ] + }, + "DeleteDistribution":{ + "name":"DeleteDistribution2015_07_27", + "http":{ + "method":"DELETE", + "requestUri":"/2015-07-27/distribution/{Id}", + "responseCode":204 + }, + "input":{"shape":"DeleteDistributionRequest"}, + "errors":[ + { + "shape":"AccessDenied", + "error":{"httpStatusCode":403}, + "exception":true + }, + { + "shape":"DistributionNotDisabled", + "error":{"httpStatusCode":409}, + "exception":true + }, + { + "shape":"InvalidIfMatchVersion", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"NoSuchDistribution", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"PreconditionFailed", + "error":{"httpStatusCode":412}, + "exception":true + } + ] + }, + "DeleteStreamingDistribution":{ + "name":"DeleteStreamingDistribution2015_07_27", + "http":{ + "method":"DELETE", + "requestUri":"/2015-07-27/streaming-distribution/{Id}", + "responseCode":204 + }, + "input":{"shape":"DeleteStreamingDistributionRequest"}, + "errors":[ + { + "shape":"AccessDenied", + "error":{"httpStatusCode":403}, + "exception":true + }, + { + "shape":"StreamingDistributionNotDisabled", + "error":{"httpStatusCode":409}, + "exception":true + }, + { + "shape":"InvalidIfMatchVersion", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"NoSuchStreamingDistribution", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"PreconditionFailed", + "error":{"httpStatusCode":412}, + "exception":true + } + ] + }, + "GetCloudFrontOriginAccessIdentity":{ + "name":"GetCloudFrontOriginAccessIdentity2015_07_27", + "http":{ + "method":"GET", + "requestUri":"/2015-07-27/origin-access-identity/cloudfront/{Id}" + }, + "input":{"shape":"GetCloudFrontOriginAccessIdentityRequest"}, + "output":{"shape":"GetCloudFrontOriginAccessIdentityResult"}, + "errors":[ + { + "shape":"NoSuchCloudFrontOriginAccessIdentity", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"AccessDenied", + "error":{"httpStatusCode":403}, + "exception":true + } + ] + }, + "GetCloudFrontOriginAccessIdentityConfig":{ + "name":"GetCloudFrontOriginAccessIdentityConfig2015_07_27", + "http":{ + "method":"GET", + "requestUri":"/2015-07-27/origin-access-identity/cloudfront/{Id}/config" + }, + "input":{"shape":"GetCloudFrontOriginAccessIdentityConfigRequest"}, + "output":{"shape":"GetCloudFrontOriginAccessIdentityConfigResult"}, + "errors":[ + { + "shape":"NoSuchCloudFrontOriginAccessIdentity", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"AccessDenied", + "error":{"httpStatusCode":403}, + "exception":true + } + ] + }, + "GetDistribution":{ + "name":"GetDistribution2015_07_27", + "http":{ + "method":"GET", + "requestUri":"/2015-07-27/distribution/{Id}" + }, + "input":{"shape":"GetDistributionRequest"}, + "output":{"shape":"GetDistributionResult"}, + "errors":[ + { + "shape":"NoSuchDistribution", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"AccessDenied", + "error":{"httpStatusCode":403}, + "exception":true + } + ] + }, + "GetDistributionConfig":{ + "name":"GetDistributionConfig2015_07_27", + "http":{ + "method":"GET", + "requestUri":"/2015-07-27/distribution/{Id}/config" + }, + "input":{"shape":"GetDistributionConfigRequest"}, + "output":{"shape":"GetDistributionConfigResult"}, + "errors":[ + { + "shape":"NoSuchDistribution", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"AccessDenied", + "error":{"httpStatusCode":403}, + "exception":true + } + ] + }, + "GetInvalidation":{ + "name":"GetInvalidation2015_07_27", + "http":{ + "method":"GET", + "requestUri":"/2015-07-27/distribution/{DistributionId}/invalidation/{Id}" + }, + "input":{"shape":"GetInvalidationRequest"}, + "output":{"shape":"GetInvalidationResult"}, + "errors":[ + { + "shape":"NoSuchInvalidation", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"NoSuchDistribution", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"AccessDenied", + "error":{"httpStatusCode":403}, + "exception":true + } + ] + }, + "GetStreamingDistribution":{ + "name":"GetStreamingDistribution2015_07_27", + "http":{ + "method":"GET", + "requestUri":"/2015-07-27/streaming-distribution/{Id}" + }, + "input":{"shape":"GetStreamingDistributionRequest"}, + "output":{"shape":"GetStreamingDistributionResult"}, + "errors":[ + { + "shape":"NoSuchStreamingDistribution", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"AccessDenied", + "error":{"httpStatusCode":403}, + "exception":true + } + ] + }, + "GetStreamingDistributionConfig":{ + "name":"GetStreamingDistributionConfig2015_07_27", + "http":{ + "method":"GET", + "requestUri":"/2015-07-27/streaming-distribution/{Id}/config" + }, + "input":{"shape":"GetStreamingDistributionConfigRequest"}, + "output":{"shape":"GetStreamingDistributionConfigResult"}, + "errors":[ + { + "shape":"NoSuchStreamingDistribution", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"AccessDenied", + "error":{"httpStatusCode":403}, + "exception":true + } + ] + }, + "ListCloudFrontOriginAccessIdentities":{ + "name":"ListCloudFrontOriginAccessIdentities2015_07_27", + "http":{ + "method":"GET", + "requestUri":"/2015-07-27/origin-access-identity/cloudfront" + }, + "input":{"shape":"ListCloudFrontOriginAccessIdentitiesRequest"}, + "output":{"shape":"ListCloudFrontOriginAccessIdentitiesResult"}, + "errors":[ + { + "shape":"InvalidArgument", + "error":{"httpStatusCode":400}, + "exception":true + } + ] + }, + "ListDistributions":{ + "name":"ListDistributions2015_07_27", + "http":{ + "method":"GET", + "requestUri":"/2015-07-27/distribution" + }, + "input":{"shape":"ListDistributionsRequest"}, + "output":{"shape":"ListDistributionsResult"}, + "errors":[ + { + "shape":"InvalidArgument", + "error":{"httpStatusCode":400}, + "exception":true + } + ] + }, + "ListDistributionsByWebACLId":{ + "name":"ListDistributionsByWebACLId2015_07_27", + "http":{ + "method":"GET", + "requestUri":"/2015-07-27/distributionsByWebACLId/{WebACLId}" + }, + "input":{"shape":"ListDistributionsByWebACLIdRequest"}, + "output":{"shape":"ListDistributionsByWebACLIdResult"}, + "errors":[ + { + "shape":"InvalidArgument", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InvalidWebACLId", + "error":{"httpStatusCode":400}, + "exception":true + } + ] + }, + "ListInvalidations":{ + "name":"ListInvalidations2015_07_27", + "http":{ + "method":"GET", + "requestUri":"/2015-07-27/distribution/{DistributionId}/invalidation" + }, + "input":{"shape":"ListInvalidationsRequest"}, + "output":{"shape":"ListInvalidationsResult"}, + "errors":[ + { + "shape":"InvalidArgument", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"NoSuchDistribution", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"AccessDenied", + "error":{"httpStatusCode":403}, + "exception":true + } + ] + }, + "ListStreamingDistributions":{ + "name":"ListStreamingDistributions2015_07_27", + "http":{ + "method":"GET", + "requestUri":"/2015-07-27/streaming-distribution" + }, + "input":{"shape":"ListStreamingDistributionsRequest"}, + "output":{"shape":"ListStreamingDistributionsResult"}, + "errors":[ + { + "shape":"InvalidArgument", + "error":{"httpStatusCode":400}, + "exception":true + } + ] + }, + "UpdateCloudFrontOriginAccessIdentity":{ + "name":"UpdateCloudFrontOriginAccessIdentity2015_07_27", + "http":{ + "method":"PUT", + "requestUri":"/2015-07-27/origin-access-identity/cloudfront/{Id}/config" + }, + "input":{"shape":"UpdateCloudFrontOriginAccessIdentityRequest"}, + "output":{"shape":"UpdateCloudFrontOriginAccessIdentityResult"}, + "errors":[ + { + "shape":"AccessDenied", + "error":{"httpStatusCode":403}, + "exception":true + }, + { + "shape":"IllegalUpdate", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InvalidIfMatchVersion", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"MissingBody", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"NoSuchCloudFrontOriginAccessIdentity", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"PreconditionFailed", + "error":{"httpStatusCode":412}, + "exception":true + }, + { + "shape":"InvalidArgument", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InconsistentQuantities", + "error":{"httpStatusCode":400}, + "exception":true + } + ] + }, + "UpdateDistribution":{ + "name":"UpdateDistribution2015_07_27", + "http":{ + "method":"PUT", + "requestUri":"/2015-07-27/distribution/{Id}/config" + }, + "input":{"shape":"UpdateDistributionRequest"}, + "output":{"shape":"UpdateDistributionResult"}, + "errors":[ + { + "shape":"AccessDenied", + "error":{"httpStatusCode":403}, + "exception":true + }, + { + "shape":"CNAMEAlreadyExists", + "error":{"httpStatusCode":409}, + "exception":true + }, + { + "shape":"IllegalUpdate", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InvalidIfMatchVersion", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"MissingBody", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"NoSuchDistribution", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"PreconditionFailed", + "error":{"httpStatusCode":412}, + "exception":true + }, + { + "shape":"TooManyDistributionCNAMEs", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InvalidDefaultRootObject", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InvalidRelativePath", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InvalidErrorCode", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InvalidResponseCode", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InvalidArgument", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InvalidOriginAccessIdentity", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"TooManyTrustedSigners", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"TrustedSignerDoesNotExist", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InvalidViewerCertificate", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InvalidMinimumProtocolVersion", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InvalidRequiredProtocol", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"NoSuchOrigin", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"TooManyOrigins", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"TooManyCacheBehaviors", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"TooManyCookieNamesInWhiteList", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InvalidForwardCookies", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"TooManyHeadersInForwardedValues", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InvalidHeadersForS3Origin", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InconsistentQuantities", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"TooManyCertificates", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InvalidLocationCode", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InvalidGeoRestrictionParameter", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InvalidTTLOrder", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InvalidWebACLId", + "error":{"httpStatusCode":400}, + "exception":true + } + ] + }, + "UpdateStreamingDistribution":{ + "name":"UpdateStreamingDistribution2015_07_27", + "http":{ + "method":"PUT", + "requestUri":"/2015-07-27/streaming-distribution/{Id}/config" + }, + "input":{"shape":"UpdateStreamingDistributionRequest"}, + "output":{"shape":"UpdateStreamingDistributionResult"}, + "errors":[ + { + "shape":"AccessDenied", + "error":{"httpStatusCode":403}, + "exception":true + }, + { + "shape":"CNAMEAlreadyExists", + "error":{"httpStatusCode":409}, + "exception":true + }, + { + "shape":"IllegalUpdate", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InvalidIfMatchVersion", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"MissingBody", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"NoSuchStreamingDistribution", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"PreconditionFailed", + "error":{"httpStatusCode":412}, + "exception":true + }, + { + "shape":"TooManyStreamingDistributionCNAMEs", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InvalidArgument", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InvalidOriginAccessIdentity", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"TooManyTrustedSigners", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"TrustedSignerDoesNotExist", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InconsistentQuantities", + "error":{"httpStatusCode":400}, + "exception":true + } + ] + } + }, + "shapes":{ + "AccessDenied":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":403}, + "exception":true + }, + "ActiveTrustedSigners":{ + "type":"structure", + "required":[ + "Enabled", + "Quantity" + ], + "members":{ + "Enabled":{"shape":"boolean"}, + "Quantity":{"shape":"integer"}, + "Items":{"shape":"SignerList"} + } + }, + "AliasList":{ + "type":"list", + "member":{ + "shape":"string", + "locationName":"CNAME" + } + }, + "Aliases":{ + "type":"structure", + "required":["Quantity"], + "members":{ + "Quantity":{"shape":"integer"}, + "Items":{"shape":"AliasList"} + } + }, + "AllowedMethods":{ + "type":"structure", + "required":[ + "Quantity", + "Items" + ], + "members":{ + "Quantity":{"shape":"integer"}, + "Items":{"shape":"MethodsList"}, + "CachedMethods":{"shape":"CachedMethods"} + } + }, + "AwsAccountNumberList":{ + "type":"list", + "member":{ + "shape":"string", + "locationName":"AwsAccountNumber" + } + }, + "BatchTooLarge":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":413}, + "exception":true + }, + "CNAMEAlreadyExists":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":409}, + "exception":true + }, + "CacheBehavior":{ + "type":"structure", + "required":[ + "PathPattern", + "TargetOriginId", + "ForwardedValues", + "TrustedSigners", + "ViewerProtocolPolicy", + "MinTTL" + ], + "members":{ + "PathPattern":{"shape":"string"}, + "TargetOriginId":{"shape":"string"}, + "ForwardedValues":{"shape":"ForwardedValues"}, + "TrustedSigners":{"shape":"TrustedSigners"}, + "ViewerProtocolPolicy":{"shape":"ViewerProtocolPolicy"}, + "MinTTL":{"shape":"long"}, + "AllowedMethods":{"shape":"AllowedMethods"}, + "SmoothStreaming":{"shape":"boolean"}, + "DefaultTTL":{"shape":"long"}, + "MaxTTL":{"shape":"long"} + } + }, + "CacheBehaviorList":{ + "type":"list", + "member":{ + "shape":"CacheBehavior", + "locationName":"CacheBehavior" + } + }, + "CacheBehaviors":{ + "type":"structure", + "required":["Quantity"], + "members":{ + "Quantity":{"shape":"integer"}, + "Items":{"shape":"CacheBehaviorList"} + } + }, + "CachedMethods":{ + "type":"structure", + "required":[ + "Quantity", + "Items" + ], + "members":{ + "Quantity":{"shape":"integer"}, + "Items":{"shape":"MethodsList"} + } + }, + "CloudFrontOriginAccessIdentity":{ + "type":"structure", + "required":[ + "Id", + "S3CanonicalUserId" + ], + "members":{ + "Id":{"shape":"string"}, + "S3CanonicalUserId":{"shape":"string"}, + "CloudFrontOriginAccessIdentityConfig":{"shape":"CloudFrontOriginAccessIdentityConfig"} + } + }, + "CloudFrontOriginAccessIdentityAlreadyExists":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":409}, + "exception":true + }, + "CloudFrontOriginAccessIdentityConfig":{ + "type":"structure", + "required":[ + "CallerReference", + "Comment" + ], + "members":{ + "CallerReference":{"shape":"string"}, + "Comment":{"shape":"string"} + } + }, + "CloudFrontOriginAccessIdentityInUse":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":409}, + "exception":true + }, + "CloudFrontOriginAccessIdentityList":{ + "type":"structure", + "required":[ + "Marker", + "MaxItems", + "IsTruncated", + "Quantity" + ], + "members":{ + "Marker":{"shape":"string"}, + "NextMarker":{"shape":"string"}, + "MaxItems":{"shape":"integer"}, + "IsTruncated":{"shape":"boolean"}, + "Quantity":{"shape":"integer"}, + "Items":{"shape":"CloudFrontOriginAccessIdentitySummaryList"} + } + }, + "CloudFrontOriginAccessIdentitySummary":{ + "type":"structure", + "required":[ + "Id", + "S3CanonicalUserId", + "Comment" + ], + "members":{ + "Id":{"shape":"string"}, + "S3CanonicalUserId":{"shape":"string"}, + "Comment":{"shape":"string"} + } + }, + "CloudFrontOriginAccessIdentitySummaryList":{ + "type":"list", + "member":{ + "shape":"CloudFrontOriginAccessIdentitySummary", + "locationName":"CloudFrontOriginAccessIdentitySummary" + } + }, + "CookieNameList":{ + "type":"list", + "member":{ + "shape":"string", + "locationName":"Name" + } + }, + "CookieNames":{ + "type":"structure", + "required":["Quantity"], + "members":{ + "Quantity":{"shape":"integer"}, + "Items":{"shape":"CookieNameList"} + } + }, + "CookiePreference":{ + "type":"structure", + "required":["Forward"], + "members":{ + "Forward":{"shape":"ItemSelection"}, + "WhitelistedNames":{"shape":"CookieNames"} + } + }, + "CreateCloudFrontOriginAccessIdentityRequest":{ + "type":"structure", + "required":["CloudFrontOriginAccessIdentityConfig"], + "members":{ + "CloudFrontOriginAccessIdentityConfig":{ + "shape":"CloudFrontOriginAccessIdentityConfig", + "xmlNamespace":{"uri":"http://cloudfront.amazonaws.com/doc/2015-07-27/"}, + "locationName":"CloudFrontOriginAccessIdentityConfig" + } + }, + "payload":"CloudFrontOriginAccessIdentityConfig" + }, + "CreateCloudFrontOriginAccessIdentityResult":{ + "type":"structure", + "members":{ + "CloudFrontOriginAccessIdentity":{"shape":"CloudFrontOriginAccessIdentity"}, + "Location":{ + "shape":"string", + "location":"header", + "locationName":"Location" + }, + "ETag":{ + "shape":"string", + "location":"header", + "locationName":"ETag" + } + }, + "payload":"CloudFrontOriginAccessIdentity" + }, + "CreateDistributionRequest":{ + "type":"structure", + "required":["DistributionConfig"], + "members":{ + "DistributionConfig":{ + "shape":"DistributionConfig", + "xmlNamespace":{"uri":"http://cloudfront.amazonaws.com/doc/2015-07-27/"}, + "locationName":"DistributionConfig" + } + }, + "payload":"DistributionConfig" + }, + "CreateDistributionResult":{ + "type":"structure", + "members":{ + "Distribution":{"shape":"Distribution"}, + "Location":{ + "shape":"string", + "location":"header", + "locationName":"Location" + }, + "ETag":{ + "shape":"string", + "location":"header", + "locationName":"ETag" + } + }, + "payload":"Distribution" + }, + "CreateInvalidationRequest":{ + "type":"structure", + "required":[ + "DistributionId", + "InvalidationBatch" + ], + "members":{ + "DistributionId":{ + "shape":"string", + "location":"uri", + "locationName":"DistributionId" + }, + "InvalidationBatch":{ + "shape":"InvalidationBatch", + "xmlNamespace":{"uri":"http://cloudfront.amazonaws.com/doc/2015-07-27/"}, + "locationName":"InvalidationBatch" + } + }, + "payload":"InvalidationBatch" + }, + "CreateInvalidationResult":{ + "type":"structure", + "members":{ + "Location":{ + "shape":"string", + "location":"header", + "locationName":"Location" + }, + "Invalidation":{"shape":"Invalidation"} + }, + "payload":"Invalidation" + }, + "CreateStreamingDistributionRequest":{ + "type":"structure", + "required":["StreamingDistributionConfig"], + "members":{ + "StreamingDistributionConfig":{ + "shape":"StreamingDistributionConfig", + "xmlNamespace":{"uri":"http://cloudfront.amazonaws.com/doc/2015-07-27/"}, + "locationName":"StreamingDistributionConfig" + } + }, + "payload":"StreamingDistributionConfig" + }, + "CreateStreamingDistributionResult":{ + "type":"structure", + "members":{ + "StreamingDistribution":{"shape":"StreamingDistribution"}, + "Location":{ + "shape":"string", + "location":"header", + "locationName":"Location" + }, + "ETag":{ + "shape":"string", + "location":"header", + "locationName":"ETag" + } + }, + "payload":"StreamingDistribution" + }, + "CustomErrorResponse":{ + "type":"structure", + "required":["ErrorCode"], + "members":{ + "ErrorCode":{"shape":"integer"}, + "ResponsePagePath":{"shape":"string"}, + "ResponseCode":{"shape":"string"}, + "ErrorCachingMinTTL":{"shape":"long"} + } + }, + "CustomErrorResponseList":{ + "type":"list", + "member":{ + "shape":"CustomErrorResponse", + "locationName":"CustomErrorResponse" + } + }, + "CustomErrorResponses":{ + "type":"structure", + "required":["Quantity"], + "members":{ + "Quantity":{"shape":"integer"}, + "Items":{"shape":"CustomErrorResponseList"} + } + }, + "CustomOriginConfig":{ + "type":"structure", + "required":[ + "HTTPPort", + "HTTPSPort", + "OriginProtocolPolicy" + ], + "members":{ + "HTTPPort":{"shape":"integer"}, + "HTTPSPort":{"shape":"integer"}, + "OriginProtocolPolicy":{"shape":"OriginProtocolPolicy"} + } + }, + "DefaultCacheBehavior":{ + "type":"structure", + "required":[ + "TargetOriginId", + "ForwardedValues", + "TrustedSigners", + "ViewerProtocolPolicy", + "MinTTL" + ], + "members":{ + "TargetOriginId":{"shape":"string"}, + "ForwardedValues":{"shape":"ForwardedValues"}, + "TrustedSigners":{"shape":"TrustedSigners"}, + "ViewerProtocolPolicy":{"shape":"ViewerProtocolPolicy"}, + "MinTTL":{"shape":"long"}, + "AllowedMethods":{"shape":"AllowedMethods"}, + "SmoothStreaming":{"shape":"boolean"}, + "DefaultTTL":{"shape":"long"}, + "MaxTTL":{"shape":"long"} + } + }, + "DeleteCloudFrontOriginAccessIdentityRequest":{ + "type":"structure", + "members":{ + "Id":{ + "shape":"string", + "location":"uri", + "locationName":"Id" + }, + "IfMatch":{ + "shape":"string", + "location":"header", + "locationName":"If-Match" + } + }, + "required":["Id"] + }, + "DeleteDistributionRequest":{ + "type":"structure", + "members":{ + "Id":{ + "shape":"string", + "location":"uri", + "locationName":"Id" + }, + "IfMatch":{ + "shape":"string", + "location":"header", + "locationName":"If-Match" + } + }, + "required":["Id"] + }, + "DeleteStreamingDistributionRequest":{ + "type":"structure", + "members":{ + "Id":{ + "shape":"string", + "location":"uri", + "locationName":"Id" + }, + "IfMatch":{ + "shape":"string", + "location":"header", + "locationName":"If-Match" + } + }, + "required":["Id"] + }, + "Distribution":{ + "type":"structure", + "required":[ + "Id", + "Status", + "LastModifiedTime", + "InProgressInvalidationBatches", + "DomainName", + "ActiveTrustedSigners", + "DistributionConfig" + ], + "members":{ + "Id":{"shape":"string"}, + "Status":{"shape":"string"}, + "LastModifiedTime":{"shape":"timestamp"}, + "InProgressInvalidationBatches":{"shape":"integer"}, + "DomainName":{"shape":"string"}, + "ActiveTrustedSigners":{"shape":"ActiveTrustedSigners"}, + "DistributionConfig":{"shape":"DistributionConfig"} + } + }, + "DistributionAlreadyExists":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":409}, + "exception":true + }, + "DistributionConfig":{ + "type":"structure", + "required":[ + "CallerReference", + "Origins", + "DefaultCacheBehavior", + "Comment", + "Enabled" + ], + "members":{ + "CallerReference":{"shape":"string"}, + "Aliases":{"shape":"Aliases"}, + "DefaultRootObject":{"shape":"string"}, + "Origins":{"shape":"Origins"}, + "DefaultCacheBehavior":{"shape":"DefaultCacheBehavior"}, + "CacheBehaviors":{"shape":"CacheBehaviors"}, + "CustomErrorResponses":{"shape":"CustomErrorResponses"}, + "Comment":{"shape":"string"}, + "Logging":{"shape":"LoggingConfig"}, + "PriceClass":{"shape":"PriceClass"}, + "Enabled":{"shape":"boolean"}, + "ViewerCertificate":{"shape":"ViewerCertificate"}, + "Restrictions":{"shape":"Restrictions"}, + "WebACLId":{"shape":"string"} + } + }, + "DistributionList":{ + "type":"structure", + "required":[ + "Marker", + "MaxItems", + "IsTruncated", + "Quantity" + ], + "members":{ + "Marker":{"shape":"string"}, + "NextMarker":{"shape":"string"}, + "MaxItems":{"shape":"integer"}, + "IsTruncated":{"shape":"boolean"}, + "Quantity":{"shape":"integer"}, + "Items":{"shape":"DistributionSummaryList"} + } + }, + "DistributionNotDisabled":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":409}, + "exception":true + }, + "DistributionSummary":{ + "type":"structure", + "required":[ + "Id", + "Status", + "LastModifiedTime", + "DomainName", + "Aliases", + "Origins", + "DefaultCacheBehavior", + "CacheBehaviors", + "CustomErrorResponses", + "Comment", + "PriceClass", + "Enabled", + "ViewerCertificate", + "Restrictions", + "WebACLId" + ], + "members":{ + "Id":{"shape":"string"}, + "Status":{"shape":"string"}, + "LastModifiedTime":{"shape":"timestamp"}, + "DomainName":{"shape":"string"}, + "Aliases":{"shape":"Aliases"}, + "Origins":{"shape":"Origins"}, + "DefaultCacheBehavior":{"shape":"DefaultCacheBehavior"}, + "CacheBehaviors":{"shape":"CacheBehaviors"}, + "CustomErrorResponses":{"shape":"CustomErrorResponses"}, + "Comment":{"shape":"string"}, + "PriceClass":{"shape":"PriceClass"}, + "Enabled":{"shape":"boolean"}, + "ViewerCertificate":{"shape":"ViewerCertificate"}, + "Restrictions":{"shape":"Restrictions"}, + "WebACLId":{"shape":"string"} + } + }, + "DistributionSummaryList":{ + "type":"list", + "member":{ + "shape":"DistributionSummary", + "locationName":"DistributionSummary" + } + }, + "ForwardedValues":{ + "type":"structure", + "required":[ + "QueryString", + "Cookies" + ], + "members":{ + "QueryString":{"shape":"boolean"}, + "Cookies":{"shape":"CookiePreference"}, + "Headers":{"shape":"Headers"} + } + }, + "GeoRestriction":{ + "type":"structure", + "required":[ + "RestrictionType", + "Quantity" + ], + "members":{ + "RestrictionType":{"shape":"GeoRestrictionType"}, + "Quantity":{"shape":"integer"}, + "Items":{"shape":"LocationList"} + } + }, + "GeoRestrictionType":{ + "type":"string", + "enum":[ + "blacklist", + "whitelist", + "none" + ] + }, + "GetCloudFrontOriginAccessIdentityConfigRequest":{ + "type":"structure", + "members":{ + "Id":{ + "shape":"string", + "location":"uri", + "locationName":"Id" + } + }, + "required":["Id"] + }, + "GetCloudFrontOriginAccessIdentityConfigResult":{ + "type":"structure", + "members":{ + "CloudFrontOriginAccessIdentityConfig":{"shape":"CloudFrontOriginAccessIdentityConfig"}, + "ETag":{ + "shape":"string", + "location":"header", + "locationName":"ETag" + } + }, + "payload":"CloudFrontOriginAccessIdentityConfig" + }, + "GetCloudFrontOriginAccessIdentityRequest":{ + "type":"structure", + "members":{ + "Id":{ + "shape":"string", + "location":"uri", + "locationName":"Id" + } + }, + "required":["Id"] + }, + "GetCloudFrontOriginAccessIdentityResult":{ + "type":"structure", + "members":{ + "CloudFrontOriginAccessIdentity":{"shape":"CloudFrontOriginAccessIdentity"}, + "ETag":{ + "shape":"string", + "location":"header", + "locationName":"ETag" + } + }, + "payload":"CloudFrontOriginAccessIdentity" + }, + "GetDistributionConfigRequest":{ + "type":"structure", + "members":{ + "Id":{ + "shape":"string", + "location":"uri", + "locationName":"Id" + } + }, + "required":["Id"] + }, + "GetDistributionConfigResult":{ + "type":"structure", + "members":{ + "DistributionConfig":{"shape":"DistributionConfig"}, + "ETag":{ + "shape":"string", + "location":"header", + "locationName":"ETag" + } + }, + "payload":"DistributionConfig" + }, + "GetDistributionRequest":{ + "type":"structure", + "members":{ + "Id":{ + "shape":"string", + "location":"uri", + "locationName":"Id" + } + }, + "required":["Id"] + }, + "GetDistributionResult":{ + "type":"structure", + "members":{ + "Distribution":{"shape":"Distribution"}, + "ETag":{ + "shape":"string", + "location":"header", + "locationName":"ETag" + } + }, + "payload":"Distribution" + }, + "GetInvalidationRequest":{ + "type":"structure", + "required":[ + "DistributionId", + "Id" + ], + "members":{ + "DistributionId":{ + "shape":"string", + "location":"uri", + "locationName":"DistributionId" + }, + "Id":{ + "shape":"string", + "location":"uri", + "locationName":"Id" + } + } + }, + "GetInvalidationResult":{ + "type":"structure", + "members":{ + "Invalidation":{"shape":"Invalidation"} + }, + "payload":"Invalidation" + }, + "GetStreamingDistributionConfigRequest":{ + "type":"structure", + "members":{ + "Id":{ + "shape":"string", + "location":"uri", + "locationName":"Id" + } + }, + "required":["Id"] + }, + "GetStreamingDistributionConfigResult":{ + "type":"structure", + "members":{ + "StreamingDistributionConfig":{"shape":"StreamingDistributionConfig"}, + "ETag":{ + "shape":"string", + "location":"header", + "locationName":"ETag" + } + }, + "payload":"StreamingDistributionConfig" + }, + "GetStreamingDistributionRequest":{ + "type":"structure", + "members":{ + "Id":{ + "shape":"string", + "location":"uri", + "locationName":"Id" + } + }, + "required":["Id"] + }, + "GetStreamingDistributionResult":{ + "type":"structure", + "members":{ + "StreamingDistribution":{"shape":"StreamingDistribution"}, + "ETag":{ + "shape":"string", + "location":"header", + "locationName":"ETag" + } + }, + "payload":"StreamingDistribution" + }, + "HeaderList":{ + "type":"list", + "member":{ + "shape":"string", + "locationName":"Name" + } + }, + "Headers":{ + "type":"structure", + "required":["Quantity"], + "members":{ + "Quantity":{"shape":"integer"}, + "Items":{"shape":"HeaderList"} + } + }, + "IllegalUpdate":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InconsistentQuantities":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidArgument":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidDefaultRootObject":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidErrorCode":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidForwardCookies":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidGeoRestrictionParameter":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidHeadersForS3Origin":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidIfMatchVersion":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidLocationCode":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidMinimumProtocolVersion":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidOrigin":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidOriginAccessIdentity":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidProtocolSettings":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidRelativePath":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidRequiredProtocol":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidResponseCode":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidTTLOrder":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidViewerCertificate":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidWebACLId":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "Invalidation":{ + "type":"structure", + "required":[ + "Id", + "Status", + "CreateTime", + "InvalidationBatch" + ], + "members":{ + "Id":{"shape":"string"}, + "Status":{"shape":"string"}, + "CreateTime":{"shape":"timestamp"}, + "InvalidationBatch":{"shape":"InvalidationBatch"} + } + }, + "InvalidationBatch":{ + "type":"structure", + "required":[ + "Paths", + "CallerReference" + ], + "members":{ + "Paths":{"shape":"Paths"}, + "CallerReference":{"shape":"string"} + } + }, + "InvalidationList":{ + "type":"structure", + "required":[ + "Marker", + "MaxItems", + "IsTruncated", + "Quantity" + ], + "members":{ + "Marker":{"shape":"string"}, + "NextMarker":{"shape":"string"}, + "MaxItems":{"shape":"integer"}, + "IsTruncated":{"shape":"boolean"}, + "Quantity":{"shape":"integer"}, + "Items":{"shape":"InvalidationSummaryList"} + } + }, + "InvalidationSummary":{ + "type":"structure", + "required":[ + "Id", + "CreateTime", + "Status" + ], + "members":{ + "Id":{"shape":"string"}, + "CreateTime":{"shape":"timestamp"}, + "Status":{"shape":"string"} + } + }, + "InvalidationSummaryList":{ + "type":"list", + "member":{ + "shape":"InvalidationSummary", + "locationName":"InvalidationSummary" + } + }, + "ItemSelection":{ + "type":"string", + "enum":[ + "none", + "whitelist", + "all" + ] + }, + "KeyPairIdList":{ + "type":"list", + "member":{ + "shape":"string", + "locationName":"KeyPairId" + } + }, + "KeyPairIds":{ + "type":"structure", + "required":["Quantity"], + "members":{ + "Quantity":{"shape":"integer"}, + "Items":{"shape":"KeyPairIdList"} + } + }, + "ListCloudFrontOriginAccessIdentitiesRequest":{ + "type":"structure", + "members":{ + "Marker":{ + "shape":"string", + "location":"querystring", + "locationName":"Marker" + }, + "MaxItems":{ + "shape":"string", + "location":"querystring", + "locationName":"MaxItems" + } + } + }, + "ListCloudFrontOriginAccessIdentitiesResult":{ + "type":"structure", + "members":{ + "CloudFrontOriginAccessIdentityList":{"shape":"CloudFrontOriginAccessIdentityList"} + }, + "payload":"CloudFrontOriginAccessIdentityList" + }, + "ListDistributionsByWebACLIdRequest":{ + "type":"structure", + "required":["WebACLId"], + "members":{ + "Marker":{ + "shape":"string", + "location":"querystring", + "locationName":"Marker" + }, + "MaxItems":{ + "shape":"string", + "location":"querystring", + "locationName":"MaxItems" + }, + "WebACLId":{ + "shape":"string", + "location":"uri", + "locationName":"WebACLId" + } + } + }, + "ListDistributionsByWebACLIdResult":{ + "type":"structure", + "members":{ + "DistributionList":{"shape":"DistributionList"} + }, + "payload":"DistributionList" + }, + "ListDistributionsRequest":{ + "type":"structure", + "members":{ + "Marker":{ + "shape":"string", + "location":"querystring", + "locationName":"Marker" + }, + "MaxItems":{ + "shape":"string", + "location":"querystring", + "locationName":"MaxItems" + } + } + }, + "ListDistributionsResult":{ + "type":"structure", + "members":{ + "DistributionList":{"shape":"DistributionList"} + }, + "payload":"DistributionList" + }, + "ListInvalidationsRequest":{ + "type":"structure", + "required":["DistributionId"], + "members":{ + "DistributionId":{ + "shape":"string", + "location":"uri", + "locationName":"DistributionId" + }, + "Marker":{ + "shape":"string", + "location":"querystring", + "locationName":"Marker" + }, + "MaxItems":{ + "shape":"string", + "location":"querystring", + "locationName":"MaxItems" + } + } + }, + "ListInvalidationsResult":{ + "type":"structure", + "members":{ + "InvalidationList":{"shape":"InvalidationList"} + }, + "payload":"InvalidationList" + }, + "ListStreamingDistributionsRequest":{ + "type":"structure", + "members":{ + "Marker":{ + "shape":"string", + "location":"querystring", + "locationName":"Marker" + }, + "MaxItems":{ + "shape":"string", + "location":"querystring", + "locationName":"MaxItems" + } + } + }, + "ListStreamingDistributionsResult":{ + "type":"structure", + "members":{ + "StreamingDistributionList":{"shape":"StreamingDistributionList"} + }, + "payload":"StreamingDistributionList" + }, + "LocationList":{ + "type":"list", + "member":{ + "shape":"string", + "locationName":"Location" + } + }, + "LoggingConfig":{ + "type":"structure", + "required":[ + "Enabled", + "IncludeCookies", + "Bucket", + "Prefix" + ], + "members":{ + "Enabled":{"shape":"boolean"}, + "IncludeCookies":{"shape":"boolean"}, + "Bucket":{"shape":"string"}, + "Prefix":{"shape":"string"} + } + }, + "Method":{ + "type":"string", + "enum":[ + "GET", + "HEAD", + "POST", + "PUT", + "PATCH", + "OPTIONS", + "DELETE" + ] + }, + "MethodsList":{ + "type":"list", + "member":{ + "shape":"Method", + "locationName":"Method" + } + }, + "MinimumProtocolVersion":{ + "type":"string", + "enum":[ + "SSLv3", + "TLSv1" + ] + }, + "MissingBody":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "NoSuchCloudFrontOriginAccessIdentity":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":404}, + "exception":true + }, + "NoSuchDistribution":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":404}, + "exception":true + }, + "NoSuchInvalidation":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":404}, + "exception":true + }, + "NoSuchOrigin":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":404}, + "exception":true + }, + "NoSuchStreamingDistribution":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":404}, + "exception":true + }, + "Origin":{ + "type":"structure", + "required":[ + "Id", + "DomainName" + ], + "members":{ + "Id":{"shape":"string"}, + "DomainName":{"shape":"string"}, + "OriginPath":{"shape":"string"}, + "S3OriginConfig":{"shape":"S3OriginConfig"}, + "CustomOriginConfig":{"shape":"CustomOriginConfig"} + } + }, + "OriginList":{ + "type":"list", + "member":{ + "shape":"Origin", + "locationName":"Origin" + }, + "min":1 + }, + "OriginProtocolPolicy":{ + "type":"string", + "enum":[ + "http-only", + "match-viewer" + ] + }, + "Origins":{ + "type":"structure", + "required":["Quantity"], + "members":{ + "Quantity":{"shape":"integer"}, + "Items":{"shape":"OriginList"} + } + }, + "PathList":{ + "type":"list", + "member":{ + "shape":"string", + "locationName":"Path" + } + }, + "Paths":{ + "type":"structure", + "required":["Quantity"], + "members":{ + "Quantity":{"shape":"integer"}, + "Items":{"shape":"PathList"} + } + }, + "PreconditionFailed":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":412}, + "exception":true + }, + "PriceClass":{ + "type":"string", + "enum":[ + "PriceClass_100", + "PriceClass_200", + "PriceClass_All" + ] + }, + "Restrictions":{ + "type":"structure", + "required":["GeoRestriction"], + "members":{ + "GeoRestriction":{"shape":"GeoRestriction"} + } + }, + "S3Origin":{ + "type":"structure", + "required":[ + "DomainName", + "OriginAccessIdentity" + ], + "members":{ + "DomainName":{"shape":"string"}, + "OriginAccessIdentity":{"shape":"string"} + } + }, + "S3OriginConfig":{ + "type":"structure", + "required":["OriginAccessIdentity"], + "members":{ + "OriginAccessIdentity":{"shape":"string"} + } + }, + "SSLSupportMethod":{ + "type":"string", + "enum":[ + "sni-only", + "vip" + ] + }, + "Signer":{ + "type":"structure", + "members":{ + "AwsAccountNumber":{"shape":"string"}, + "KeyPairIds":{"shape":"KeyPairIds"} + } + }, + "SignerList":{ + "type":"list", + "member":{ + "shape":"Signer", + "locationName":"Signer" + } + }, + "StreamingDistribution":{ + "type":"structure", + "required":[ + "Id", + "Status", + "DomainName", + "ActiveTrustedSigners", + "StreamingDistributionConfig" + ], + "members":{ + "Id":{"shape":"string"}, + "Status":{"shape":"string"}, + "LastModifiedTime":{"shape":"timestamp"}, + "DomainName":{"shape":"string"}, + "ActiveTrustedSigners":{"shape":"ActiveTrustedSigners"}, + "StreamingDistributionConfig":{"shape":"StreamingDistributionConfig"} + } + }, + "StreamingDistributionAlreadyExists":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":409}, + "exception":true + }, + "StreamingDistributionConfig":{ + "type":"structure", + "required":[ + "CallerReference", + "S3Origin", + "Comment", + "TrustedSigners", + "Enabled" + ], + "members":{ + "CallerReference":{"shape":"string"}, + "S3Origin":{"shape":"S3Origin"}, + "Aliases":{"shape":"Aliases"}, + "Comment":{"shape":"string"}, + "Logging":{"shape":"StreamingLoggingConfig"}, + "TrustedSigners":{"shape":"TrustedSigners"}, + "PriceClass":{"shape":"PriceClass"}, + "Enabled":{"shape":"boolean"} + } + }, + "StreamingDistributionList":{ + "type":"structure", + "required":[ + "Marker", + "MaxItems", + "IsTruncated", + "Quantity" + ], + "members":{ + "Marker":{"shape":"string"}, + "NextMarker":{"shape":"string"}, + "MaxItems":{"shape":"integer"}, + "IsTruncated":{"shape":"boolean"}, + "Quantity":{"shape":"integer"}, + "Items":{"shape":"StreamingDistributionSummaryList"} + } + }, + "StreamingDistributionNotDisabled":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":409}, + "exception":true + }, + "StreamingDistributionSummary":{ + "type":"structure", + "required":[ + "Id", + "Status", + "LastModifiedTime", + "DomainName", + "S3Origin", + "Aliases", + "TrustedSigners", + "Comment", + "PriceClass", + "Enabled" + ], + "members":{ + "Id":{"shape":"string"}, + "Status":{"shape":"string"}, + "LastModifiedTime":{"shape":"timestamp"}, + "DomainName":{"shape":"string"}, + "S3Origin":{"shape":"S3Origin"}, + "Aliases":{"shape":"Aliases"}, + "TrustedSigners":{"shape":"TrustedSigners"}, + "Comment":{"shape":"string"}, + "PriceClass":{"shape":"PriceClass"}, + "Enabled":{"shape":"boolean"} + } + }, + "StreamingDistributionSummaryList":{ + "type":"list", + "member":{ + "shape":"StreamingDistributionSummary", + "locationName":"StreamingDistributionSummary" + } + }, + "StreamingLoggingConfig":{ + "type":"structure", + "required":[ + "Enabled", + "Bucket", + "Prefix" + ], + "members":{ + "Enabled":{"shape":"boolean"}, + "Bucket":{"shape":"string"}, + "Prefix":{"shape":"string"} + } + }, + "TooManyCacheBehaviors":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "TooManyCertificates":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "TooManyCloudFrontOriginAccessIdentities":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "TooManyCookieNamesInWhiteList":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "TooManyDistributionCNAMEs":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "TooManyDistributions":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "TooManyHeadersInForwardedValues":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "TooManyInvalidationsInProgress":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "TooManyOrigins":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "TooManyStreamingDistributionCNAMEs":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "TooManyStreamingDistributions":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "TooManyTrustedSigners":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "TrustedSignerDoesNotExist":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "TrustedSigners":{ + "type":"structure", + "required":[ + "Enabled", + "Quantity" + ], + "members":{ + "Enabled":{"shape":"boolean"}, + "Quantity":{"shape":"integer"}, + "Items":{"shape":"AwsAccountNumberList"} + } + }, + "UpdateCloudFrontOriginAccessIdentityRequest":{ + "type":"structure", + "required":[ + "CloudFrontOriginAccessIdentityConfig", + "Id" + ], + "members":{ + "CloudFrontOriginAccessIdentityConfig":{ + "shape":"CloudFrontOriginAccessIdentityConfig", + "xmlNamespace":{"uri":"http://cloudfront.amazonaws.com/doc/2015-07-27/"}, + "locationName":"CloudFrontOriginAccessIdentityConfig" + }, + "Id":{ + "shape":"string", + "location":"uri", + "locationName":"Id" + }, + "IfMatch":{ + "shape":"string", + "location":"header", + "locationName":"If-Match" + } + }, + "payload":"CloudFrontOriginAccessIdentityConfig" + }, + "UpdateCloudFrontOriginAccessIdentityResult":{ + "type":"structure", + "members":{ + "CloudFrontOriginAccessIdentity":{"shape":"CloudFrontOriginAccessIdentity"}, + "ETag":{ + "shape":"string", + "location":"header", + "locationName":"ETag" + } + }, + "payload":"CloudFrontOriginAccessIdentity" + }, + "UpdateDistributionRequest":{ + "type":"structure", + "required":[ + "DistributionConfig", + "Id" + ], + "members":{ + "DistributionConfig":{ + "shape":"DistributionConfig", + "xmlNamespace":{"uri":"http://cloudfront.amazonaws.com/doc/2015-07-27/"}, + "locationName":"DistributionConfig" + }, + "Id":{ + "shape":"string", + "location":"uri", + "locationName":"Id" + }, + "IfMatch":{ + "shape":"string", + "location":"header", + "locationName":"If-Match" + } + }, + "payload":"DistributionConfig" + }, + "UpdateDistributionResult":{ + "type":"structure", + "members":{ + "Distribution":{"shape":"Distribution"}, + "ETag":{ + "shape":"string", + "location":"header", + "locationName":"ETag" + } + }, + "payload":"Distribution" + }, + "UpdateStreamingDistributionRequest":{ + "type":"structure", + "required":[ + "StreamingDistributionConfig", + "Id" + ], + "members":{ + "StreamingDistributionConfig":{ + "shape":"StreamingDistributionConfig", + "xmlNamespace":{"uri":"http://cloudfront.amazonaws.com/doc/2015-07-27/"}, + "locationName":"StreamingDistributionConfig" + }, + "Id":{ + "shape":"string", + "location":"uri", + "locationName":"Id" + }, + "IfMatch":{ + "shape":"string", + "location":"header", + "locationName":"If-Match" + } + }, + "payload":"StreamingDistributionConfig" + }, + "UpdateStreamingDistributionResult":{ + "type":"structure", + "members":{ + "StreamingDistribution":{"shape":"StreamingDistribution"}, + "ETag":{ + "shape":"string", + "location":"header", + "locationName":"ETag" + } + }, + "payload":"StreamingDistribution" + }, + "ViewerCertificate":{ + "type":"structure", + "members":{ + "IAMCertificateId":{"shape":"string"}, + "CloudFrontDefaultCertificate":{"shape":"boolean"}, + "SSLSupportMethod":{"shape":"SSLSupportMethod"}, + "MinimumProtocolVersion":{"shape":"MinimumProtocolVersion"} + } + }, + "ViewerProtocolPolicy":{ + "type":"string", + "enum":[ + "allow-all", + "https-only", + "redirect-to-https" + ] + }, + "boolean":{"type":"boolean"}, + "integer":{"type":"integer"}, + "long":{"type":"long"}, + "string":{"type":"string"}, + "timestamp":{"type":"timestamp"} + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/cloudfront/2015-07-27/docs-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/cloudfront/2015-07-27/docs-2.json new file mode 100644 index 000000000..07747194c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/cloudfront/2015-07-27/docs-2.json @@ -0,0 +1,1164 @@ +{ + "version": "2.0", + "operations": { + "CreateCloudFrontOriginAccessIdentity": "Create a new origin access identity.", + "CreateDistribution": "Create a new distribution.", + "CreateInvalidation": "Create a new invalidation.", + "CreateStreamingDistribution": "Create a new streaming distribution.", + "DeleteCloudFrontOriginAccessIdentity": "Delete an origin access identity.", + "DeleteDistribution": "Delete a distribution.", + "DeleteStreamingDistribution": "Delete a streaming distribution.", + "GetCloudFrontOriginAccessIdentity": "Get the information about an origin access identity.", + "GetCloudFrontOriginAccessIdentityConfig": "Get the configuration information about an origin access identity.", + "GetDistribution": "Get the information about a distribution.", + "GetDistributionConfig": "Get the configuration information about a distribution.", + "GetInvalidation": "Get the information about an invalidation.", + "GetStreamingDistribution": "Get the information about a streaming distribution.", + "GetStreamingDistributionConfig": "Get the configuration information about a streaming distribution.", + "ListCloudFrontOriginAccessIdentities": "List origin access identities.", + "ListDistributions": "List distributions.", + "ListDistributionsByWebACLId": "List the distributions that are associated with a specified AWS WAF web ACL.", + "ListInvalidations": "List invalidation batches.", + "ListStreamingDistributions": "List streaming distributions.", + "UpdateCloudFrontOriginAccessIdentity": "Update an origin access identity.", + "UpdateDistribution": "Update a distribution.", + "UpdateStreamingDistribution": "Update a streaming distribution." + }, + "service": null, + "shapes": { + "AccessDenied": { + "base": "Access denied.", + "refs": { + } + }, + "ActiveTrustedSigners": { + "base": "A complex type that lists the AWS accounts, if any, that you included in the TrustedSigners complex type for the default cache behavior or for any of the other cache behaviors for this distribution. These are accounts that you want to allow to create signed URLs for private content.", + "refs": { + "Distribution$ActiveTrustedSigners": "CloudFront automatically adds this element to the response only if you've set up the distribution to serve private content with signed URLs. The element lists the key pair IDs that CloudFront is aware of for each trusted signer. The Signer child element lists the AWS account number of the trusted signer (or an empty Self element if the signer is you). The Signer element also includes the IDs of any active key pairs associated with the trusted signer's AWS account. If no KeyPairId element appears for a Signer, that signer can't create working signed URLs.", + "StreamingDistribution$ActiveTrustedSigners": "CloudFront automatically adds this element to the response only if you've set up the distribution to serve private content with signed URLs. The element lists the key pair IDs that CloudFront is aware of for each trusted signer. The Signer child element lists the AWS account number of the trusted signer (or an empty Self element if the signer is you). The Signer element also includes the IDs of any active key pairs associated with the trusted signer's AWS account. If no KeyPairId element appears for a Signer, that signer can't create working signed URLs." + } + }, + "AliasList": { + "base": null, + "refs": { + "Aliases$Items": "Optional: A complex type that contains CNAME elements, if any, for this distribution. If Quantity is 0, you can omit Items." + } + }, + "Aliases": { + "base": "A complex type that contains information about CNAMEs (alternate domain names), if any, for this distribution.", + "refs": { + "DistributionConfig$Aliases": "A complex type that contains information about CNAMEs (alternate domain names), if any, for this distribution.", + "DistributionSummary$Aliases": "A complex type that contains information about CNAMEs (alternate domain names), if any, for this distribution.", + "StreamingDistributionConfig$Aliases": "A complex type that contains information about CNAMEs (alternate domain names), if any, for this streaming distribution.", + "StreamingDistributionSummary$Aliases": "A complex type that contains information about CNAMEs (alternate domain names), if any, for this streaming distribution." + } + }, + "AllowedMethods": { + "base": "A complex type that controls which HTTP methods CloudFront processes and forwards to your Amazon S3 bucket or your custom origin. There are three choices: - CloudFront forwards only GET and HEAD requests. - CloudFront forwards only GET, HEAD and OPTIONS requests. - CloudFront forwards GET, HEAD, OPTIONS, PUT, PATCH, POST, and DELETE requests. If you pick the third choice, you may need to restrict access to your Amazon S3 bucket or to your custom origin so users can't perform operations that you don't want them to. For example, you may not want users to have permission to delete objects from your origin.", + "refs": { + "CacheBehavior$AllowedMethods": null, + "DefaultCacheBehavior$AllowedMethods": null + } + }, + "AwsAccountNumberList": { + "base": null, + "refs": { + "TrustedSigners$Items": "Optional: A complex type that contains trusted signers for this cache behavior. If Quantity is 0, you can omit Items." + } + }, + "BatchTooLarge": { + "base": null, + "refs": { + } + }, + "CNAMEAlreadyExists": { + "base": null, + "refs": { + } + }, + "CacheBehavior": { + "base": "A complex type that describes how CloudFront processes requests. You can create up to 10 cache behaviors.You must create at least as many cache behaviors (including the default cache behavior) as you have origins if you want CloudFront to distribute objects from all of the origins. Each cache behavior specifies the one origin from which you want CloudFront to get objects. If you have two origins and only the default cache behavior, the default cache behavior will cause CloudFront to get objects from one of the origins, but the other origin will never be used. If you don't want to specify any cache behaviors, include only an empty CacheBehaviors element. Don't include an empty CacheBehavior element, or CloudFront returns a MalformedXML error. To delete all cache behaviors in an existing distribution, update the distribution configuration and include only an empty CacheBehaviors element. To add, change, or remove one or more cache behaviors, update the distribution configuration and specify all of the cache behaviors that you want to include in the updated distribution.", + "refs": { + "CacheBehaviorList$member": null + } + }, + "CacheBehaviorList": { + "base": null, + "refs": { + "CacheBehaviors$Items": "Optional: A complex type that contains cache behaviors for this distribution. If Quantity is 0, you can omit Items." + } + }, + "CacheBehaviors": { + "base": "A complex type that contains zero or more CacheBehavior elements.", + "refs": { + "DistributionConfig$CacheBehaviors": "A complex type that contains zero or more CacheBehavior elements.", + "DistributionSummary$CacheBehaviors": "A complex type that contains zero or more CacheBehavior elements." + } + }, + "CachedMethods": { + "base": "A complex type that controls whether CloudFront caches the response to requests using the specified HTTP methods. There are two choices: - CloudFront caches responses to GET and HEAD requests. - CloudFront caches responses to GET, HEAD, and OPTIONS requests. If you pick the second choice for your S3 Origin, you may need to forward Access-Control-Request-Method, Access-Control-Request-Headers and Origin headers for the responses to be cached correctly.", + "refs": { + "AllowedMethods$CachedMethods": null + } + }, + "CloudFrontOriginAccessIdentity": { + "base": "CloudFront origin access identity.", + "refs": { + "CreateCloudFrontOriginAccessIdentityResult$CloudFrontOriginAccessIdentity": "The origin access identity's information.", + "GetCloudFrontOriginAccessIdentityResult$CloudFrontOriginAccessIdentity": "The origin access identity's information.", + "UpdateCloudFrontOriginAccessIdentityResult$CloudFrontOriginAccessIdentity": "The origin access identity's information." + } + }, + "CloudFrontOriginAccessIdentityAlreadyExists": { + "base": "If the CallerReference is a value you already sent in a previous request to create an identity but the content of the CloudFrontOriginAccessIdentityConfig is different from the original request, CloudFront returns a CloudFrontOriginAccessIdentityAlreadyExists error.", + "refs": { + } + }, + "CloudFrontOriginAccessIdentityConfig": { + "base": "Origin access identity configuration.", + "refs": { + "CloudFrontOriginAccessIdentity$CloudFrontOriginAccessIdentityConfig": "The current configuration information for the identity.", + "CreateCloudFrontOriginAccessIdentityRequest$CloudFrontOriginAccessIdentityConfig": "The origin access identity's configuration information.", + "GetCloudFrontOriginAccessIdentityConfigResult$CloudFrontOriginAccessIdentityConfig": "The origin access identity's configuration information.", + "UpdateCloudFrontOriginAccessIdentityRequest$CloudFrontOriginAccessIdentityConfig": "The identity's configuration information." + } + }, + "CloudFrontOriginAccessIdentityInUse": { + "base": null, + "refs": { + } + }, + "CloudFrontOriginAccessIdentityList": { + "base": "The CloudFrontOriginAccessIdentityList type.", + "refs": { + "ListCloudFrontOriginAccessIdentitiesResult$CloudFrontOriginAccessIdentityList": "The CloudFrontOriginAccessIdentityList type." + } + }, + "CloudFrontOriginAccessIdentitySummary": { + "base": "Summary of the information about a CloudFront origin access identity.", + "refs": { + "CloudFrontOriginAccessIdentitySummaryList$member": null + } + }, + "CloudFrontOriginAccessIdentitySummaryList": { + "base": null, + "refs": { + "CloudFrontOriginAccessIdentityList$Items": "A complex type that contains one CloudFrontOriginAccessIdentitySummary element for each origin access identity that was created by the current AWS account." + } + }, + "CookieNameList": { + "base": null, + "refs": { + "CookieNames$Items": "Optional: A complex type that contains whitelisted cookies for this cache behavior. If Quantity is 0, you can omit Items." + } + }, + "CookieNames": { + "base": "A complex type that specifies the whitelisted cookies, if any, that you want CloudFront to forward to your origin that is associated with this cache behavior.", + "refs": { + "CookiePreference$WhitelistedNames": "A complex type that specifies the whitelisted cookies, if any, that you want CloudFront to forward to your origin that is associated with this cache behavior." + } + }, + "CookiePreference": { + "base": "A complex type that specifies the cookie preferences associated with this cache behavior.", + "refs": { + "ForwardedValues$Cookies": "A complex type that specifies how CloudFront handles cookies." + } + }, + "CreateCloudFrontOriginAccessIdentityRequest": { + "base": "The request to create a new origin access identity.", + "refs": { + } + }, + "CreateCloudFrontOriginAccessIdentityResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "CreateDistributionRequest": { + "base": "The request to create a new distribution.", + "refs": { + } + }, + "CreateDistributionResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "CreateInvalidationRequest": { + "base": "The request to create an invalidation.", + "refs": { + } + }, + "CreateInvalidationResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "CreateStreamingDistributionRequest": { + "base": "The request to create a new streaming distribution.", + "refs": { + } + }, + "CreateStreamingDistributionResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "CustomErrorResponse": { + "base": "A complex type that describes how you'd prefer CloudFront to respond to requests that result in either a 4xx or 5xx response. You can control whether a custom error page should be displayed, what the desired response code should be for this error page and how long should the error response be cached by CloudFront. If you don't want to specify any custom error responses, include only an empty CustomErrorResponses element. To delete all custom error responses in an existing distribution, update the distribution configuration and include only an empty CustomErrorResponses element. To add, change, or remove one or more custom error responses, update the distribution configuration and specify all of the custom error responses that you want to include in the updated distribution.", + "refs": { + "CustomErrorResponseList$member": null + } + }, + "CustomErrorResponseList": { + "base": null, + "refs": { + "CustomErrorResponses$Items": "Optional: A complex type that contains custom error responses for this distribution. If Quantity is 0, you can omit Items." + } + }, + "CustomErrorResponses": { + "base": "A complex type that contains zero or more CustomErrorResponse elements.", + "refs": { + "DistributionConfig$CustomErrorResponses": "A complex type that contains zero or more CustomErrorResponse elements.", + "DistributionSummary$CustomErrorResponses": "A complex type that contains zero or more CustomErrorResponses elements." + } + }, + "CustomOriginConfig": { + "base": "A customer origin.", + "refs": { + "Origin$CustomOriginConfig": "A complex type that contains information about a custom origin. If the origin is an Amazon S3 bucket, use the S3OriginConfig element instead." + } + }, + "DefaultCacheBehavior": { + "base": "A complex type that describes the default cache behavior if you do not specify a CacheBehavior element or if files don't match any of the values of PathPattern in CacheBehavior elements.You must create exactly one default cache behavior.", + "refs": { + "DistributionConfig$DefaultCacheBehavior": "A complex type that describes the default cache behavior if you do not specify a CacheBehavior element or if files don't match any of the values of PathPattern in CacheBehavior elements.You must create exactly one default cache behavior.", + "DistributionSummary$DefaultCacheBehavior": "A complex type that describes the default cache behavior if you do not specify a CacheBehavior element or if files don't match any of the values of PathPattern in CacheBehavior elements.You must create exactly one default cache behavior." + } + }, + "DeleteCloudFrontOriginAccessIdentityRequest": { + "base": "The request to delete a origin access identity.", + "refs": { + } + }, + "DeleteDistributionRequest": { + "base": "The request to delete a distribution.", + "refs": { + } + }, + "DeleteStreamingDistributionRequest": { + "base": "The request to delete a streaming distribution.", + "refs": { + } + }, + "Distribution": { + "base": "A distribution.", + "refs": { + "CreateDistributionResult$Distribution": "The distribution's information.", + "GetDistributionResult$Distribution": "The distribution's information.", + "UpdateDistributionResult$Distribution": "The distribution's information." + } + }, + "DistributionAlreadyExists": { + "base": "The caller reference you attempted to create the distribution with is associated with another distribution.", + "refs": { + } + }, + "DistributionConfig": { + "base": "A distribution Configuration.", + "refs": { + "CreateDistributionRequest$DistributionConfig": "The distribution's configuration information.", + "Distribution$DistributionConfig": "The current configuration information for the distribution.", + "GetDistributionConfigResult$DistributionConfig": "The distribution's configuration information.", + "UpdateDistributionRequest$DistributionConfig": "The distribution's configuration information." + } + }, + "DistributionList": { + "base": "A distribution list.", + "refs": { + "ListDistributionsByWebACLIdResult$DistributionList": "The DistributionList type.", + "ListDistributionsResult$DistributionList": "The DistributionList type." + } + }, + "DistributionNotDisabled": { + "base": null, + "refs": { + } + }, + "DistributionSummary": { + "base": "A summary of the information for an Amazon CloudFront distribution.", + "refs": { + "DistributionSummaryList$member": null + } + }, + "DistributionSummaryList": { + "base": null, + "refs": { + "DistributionList$Items": "A complex type that contains one DistributionSummary element for each distribution that was created by the current AWS account." + } + }, + "ForwardedValues": { + "base": "A complex type that specifies how CloudFront handles query strings, cookies and headers.", + "refs": { + "CacheBehavior$ForwardedValues": "A complex type that specifies how CloudFront handles query strings, cookies and headers.", + "DefaultCacheBehavior$ForwardedValues": "A complex type that specifies how CloudFront handles query strings, cookies and headers." + } + }, + "GeoRestriction": { + "base": "A complex type that controls the countries in which your content is distributed. For more information about geo restriction, go to Customizing Error Responses in the Amazon CloudFront Developer Guide. CloudFront determines the location of your users using MaxMind GeoIP databases. For information about the accuracy of these databases, see How accurate are your GeoIP databases? on the MaxMind website.", + "refs": { + "Restrictions$GeoRestriction": null + } + }, + "GeoRestrictionType": { + "base": null, + "refs": { + "GeoRestriction$RestrictionType": "The method that you want to use to restrict distribution of your content by country: - none: No geo restriction is enabled, meaning access to content is not restricted by client geo location. - blacklist: The Location elements specify the countries in which you do not want CloudFront to distribute your content. - whitelist: The Location elements specify the countries in which you want CloudFront to distribute your content." + } + }, + "GetCloudFrontOriginAccessIdentityConfigRequest": { + "base": "The request to get an origin access identity's configuration.", + "refs": { + } + }, + "GetCloudFrontOriginAccessIdentityConfigResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "GetCloudFrontOriginAccessIdentityRequest": { + "base": "The request to get an origin access identity's information.", + "refs": { + } + }, + "GetCloudFrontOriginAccessIdentityResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "GetDistributionConfigRequest": { + "base": "The request to get a distribution configuration.", + "refs": { + } + }, + "GetDistributionConfigResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "GetDistributionRequest": { + "base": "The request to get a distribution's information.", + "refs": { + } + }, + "GetDistributionResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "GetInvalidationRequest": { + "base": "The request to get an invalidation's information.", + "refs": { + } + }, + "GetInvalidationResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "GetStreamingDistributionConfigRequest": { + "base": "To request to get a streaming distribution configuration.", + "refs": { + } + }, + "GetStreamingDistributionConfigResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "GetStreamingDistributionRequest": { + "base": "The request to get a streaming distribution's information.", + "refs": { + } + }, + "GetStreamingDistributionResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "HeaderList": { + "base": null, + "refs": { + "Headers$Items": "Optional: A complex type that contains a Name element for each header that you want CloudFront to forward to the origin and to vary on for this cache behavior. If Quantity is 0, omit Items." + } + }, + "Headers": { + "base": "A complex type that specifies the headers that you want CloudFront to forward to the origin for this cache behavior. For the headers that you specify, CloudFront also caches separate versions of a given object based on the header values in viewer requests; this is known as varying on headers. For example, suppose viewer requests for logo.jpg contain a custom Product header that has a value of either Acme or Apex, and you configure CloudFront to vary on the Product header. CloudFront forwards the Product header to the origin and caches the response from the origin once for each header value.", + "refs": { + "ForwardedValues$Headers": "A complex type that specifies the Headers, if any, that you want CloudFront to vary upon for this cache behavior." + } + }, + "IllegalUpdate": { + "base": "Origin and CallerReference cannot be updated.", + "refs": { + } + }, + "InconsistentQuantities": { + "base": "The value of Quantity and the size of Items do not match.", + "refs": { + } + }, + "InvalidArgument": { + "base": "The argument is invalid.", + "refs": { + } + }, + "InvalidDefaultRootObject": { + "base": "The default root object file name is too big or contains an invalid character.", + "refs": { + } + }, + "InvalidErrorCode": { + "base": null, + "refs": { + } + }, + "InvalidForwardCookies": { + "base": "Your request contains forward cookies option which doesn't match with the expectation for the whitelisted list of cookie names. Either list of cookie names has been specified when not allowed or list of cookie names is missing when expected.", + "refs": { + } + }, + "InvalidGeoRestrictionParameter": { + "base": null, + "refs": { + } + }, + "InvalidHeadersForS3Origin": { + "base": null, + "refs": { + } + }, + "InvalidIfMatchVersion": { + "base": "The If-Match version is missing or not valid for the distribution.", + "refs": { + } + }, + "InvalidLocationCode": { + "base": null, + "refs": { + } + }, + "InvalidMinimumProtocolVersion": { + "base": null, + "refs": { + } + }, + "InvalidOrigin": { + "base": "The Amazon S3 origin server specified does not refer to a valid Amazon S3 bucket.", + "refs": { + } + }, + "InvalidOriginAccessIdentity": { + "base": "The origin access identity is not valid or doesn't exist.", + "refs": { + } + }, + "InvalidProtocolSettings": { + "base": "You cannot specify SSLv3 as the minimum protocol version if you only want to support only clients that Support Server Name Indication (SNI).", + "refs": { + } + }, + "InvalidRelativePath": { + "base": "The relative path is too big, is not URL-encoded, or does not begin with a slash (/).", + "refs": { + } + }, + "InvalidRequiredProtocol": { + "base": "This operation requires the HTTPS protocol. Ensure that you specify the HTTPS protocol in your request, or omit the RequiredProtocols element from your distribution configuration.", + "refs": { + } + }, + "InvalidResponseCode": { + "base": null, + "refs": { + } + }, + "InvalidTTLOrder": { + "base": null, + "refs": { + } + }, + "InvalidViewerCertificate": { + "base": null, + "refs": { + } + }, + "InvalidWebACLId": { + "base": null, + "refs": { + } + }, + "Invalidation": { + "base": "An invalidation.", + "refs": { + "CreateInvalidationResult$Invalidation": "The invalidation's information.", + "GetInvalidationResult$Invalidation": "The invalidation's information." + } + }, + "InvalidationBatch": { + "base": "An invalidation batch.", + "refs": { + "CreateInvalidationRequest$InvalidationBatch": "The batch information for the invalidation.", + "Invalidation$InvalidationBatch": "The current invalidation information for the batch request." + } + }, + "InvalidationList": { + "base": "An invalidation list.", + "refs": { + "ListInvalidationsResult$InvalidationList": "Information about invalidation batches." + } + }, + "InvalidationSummary": { + "base": "Summary of an invalidation request.", + "refs": { + "InvalidationSummaryList$member": null + } + }, + "InvalidationSummaryList": { + "base": null, + "refs": { + "InvalidationList$Items": "A complex type that contains one InvalidationSummary element for each invalidation batch that was created by the current AWS account." + } + }, + "ItemSelection": { + "base": null, + "refs": { + "CookiePreference$Forward": "Use this element to specify whether you want CloudFront to forward cookies to the origin that is associated with this cache behavior. You can specify all, none or whitelist. If you choose All, CloudFront forwards all cookies regardless of how many your application uses." + } + }, + "KeyPairIdList": { + "base": null, + "refs": { + "KeyPairIds$Items": "A complex type that lists the active CloudFront key pairs, if any, that are associated with AwsAccountNumber." + } + }, + "KeyPairIds": { + "base": "A complex type that lists the active CloudFront key pairs, if any, that are associated with AwsAccountNumber.", + "refs": { + "Signer$KeyPairIds": "A complex type that lists the active CloudFront key pairs, if any, that are associated with AwsAccountNumber." + } + }, + "ListCloudFrontOriginAccessIdentitiesRequest": { + "base": "The request to list origin access identities.", + "refs": { + } + }, + "ListCloudFrontOriginAccessIdentitiesResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "ListDistributionsByWebACLIdRequest": { + "base": "The request to list distributions that are associated with a specified AWS WAF web ACL.", + "refs": { + } + }, + "ListDistributionsByWebACLIdResult": { + "base": "The response to a request to list the distributions that are associated with a specified AWS WAF web ACL.", + "refs": { + } + }, + "ListDistributionsRequest": { + "base": "The request to list your distributions.", + "refs": { + } + }, + "ListDistributionsResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "ListInvalidationsRequest": { + "base": "The request to list invalidations.", + "refs": { + } + }, + "ListInvalidationsResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "ListStreamingDistributionsRequest": { + "base": "The request to list your streaming distributions.", + "refs": { + } + }, + "ListStreamingDistributionsResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "LocationList": { + "base": null, + "refs": { + "GeoRestriction$Items": "A complex type that contains a Location element for each country in which you want CloudFront either to distribute your content (whitelist) or not distribute your content (blacklist). The Location element is a two-letter, uppercase country code for a country that you want to include in your blacklist or whitelist. Include one Location element for each country. CloudFront and MaxMind both use ISO 3166 country codes. For the current list of countries and the corresponding codes, see ISO 3166-1-alpha-2 code on the International Organization for Standardization website. You can also refer to the country list in the CloudFront console, which includes both country names and codes." + } + }, + "LoggingConfig": { + "base": "A complex type that controls whether access logs are written for the distribution.", + "refs": { + "DistributionConfig$Logging": "A complex type that controls whether access logs are written for the distribution." + } + }, + "Method": { + "base": null, + "refs": { + "MethodsList$member": null + } + }, + "MethodsList": { + "base": null, + "refs": { + "AllowedMethods$Items": "A complex type that contains the HTTP methods that you want CloudFront to process and forward to your origin.", + "CachedMethods$Items": "A complex type that contains the HTTP methods that you want CloudFront to cache responses to." + } + }, + "MinimumProtocolVersion": { + "base": null, + "refs": { + "ViewerCertificate$MinimumProtocolVersion": "Specify the minimum version of the SSL protocol that you want CloudFront to use, SSLv3 or TLSv1, for HTTPS connections. CloudFront will serve your objects only to browsers or devices that support at least the SSL version that you specify. The TLSv1 protocol is more secure, so we recommend that you specify SSLv3 only if your users are using browsers or devices that don't support TLSv1. If you're using a custom certificate (if you specify a value for IAMCertificateId) and if you're using dedicated IP (if you specify vip for SSLSupportMethod), you can choose SSLv3 or TLSv1 as the MinimumProtocolVersion. If you're using a custom certificate (if you specify a value for IAMCertificateId) and if you're using SNI (if you specify sni-only for SSLSupportMethod), you must specify TLSv1 for MinimumProtocolVersion." + } + }, + "MissingBody": { + "base": "This operation requires a body. Ensure that the body is present and the Content-Type header is set.", + "refs": { + } + }, + "NoSuchCloudFrontOriginAccessIdentity": { + "base": "The specified origin access identity does not exist.", + "refs": { + } + }, + "NoSuchDistribution": { + "base": "The specified distribution does not exist.", + "refs": { + } + }, + "NoSuchInvalidation": { + "base": "The specified invalidation does not exist.", + "refs": { + } + }, + "NoSuchOrigin": { + "base": "No origin exists with the specified Origin Id.", + "refs": { + } + }, + "NoSuchStreamingDistribution": { + "base": "The specified streaming distribution does not exist.", + "refs": { + } + }, + "Origin": { + "base": "A complex type that describes the Amazon S3 bucket or the HTTP server (for example, a web server) from which CloudFront gets your files.You must create at least one origin.", + "refs": { + "OriginList$member": null + } + }, + "OriginList": { + "base": null, + "refs": { + "Origins$Items": "A complex type that contains origins for this distribution." + } + }, + "OriginProtocolPolicy": { + "base": null, + "refs": { + "CustomOriginConfig$OriginProtocolPolicy": "The origin protocol policy to apply to your origin." + } + }, + "Origins": { + "base": "A complex type that contains information about origins for this distribution.", + "refs": { + "DistributionConfig$Origins": "A complex type that contains information about origins for this distribution.", + "DistributionSummary$Origins": "A complex type that contains information about origins for this distribution." + } + }, + "PathList": { + "base": null, + "refs": { + "Paths$Items": "A complex type that contains a list of the objects that you want to invalidate." + } + }, + "Paths": { + "base": "A complex type that contains information about the objects that you want to invalidate.", + "refs": { + "InvalidationBatch$Paths": "The path of the object to invalidate. The path is relative to the distribution and must begin with a slash (/). You must enclose each invalidation object with the Path element tags. If the path includes non-ASCII characters or unsafe characters as defined in RFC 1783 (http://www.ietf.org/rfc/rfc1738.txt), URL encode those characters. Do not URL encode any other characters in the path, or CloudFront will not invalidate the old version of the updated object." + } + }, + "PreconditionFailed": { + "base": "The precondition given in one or more of the request-header fields evaluated to false.", + "refs": { + } + }, + "PriceClass": { + "base": null, + "refs": { + "DistributionConfig$PriceClass": "A complex type that contains information about price class for this distribution.", + "DistributionSummary$PriceClass": null, + "StreamingDistributionConfig$PriceClass": "A complex type that contains information about price class for this streaming distribution.", + "StreamingDistributionSummary$PriceClass": null + } + }, + "Restrictions": { + "base": "A complex type that identifies ways in which you want to restrict distribution of your content.", + "refs": { + "DistributionConfig$Restrictions": null, + "DistributionSummary$Restrictions": null + } + }, + "S3Origin": { + "base": "A complex type that contains information about the Amazon S3 bucket from which you want CloudFront to get your media files for distribution.", + "refs": { + "StreamingDistributionConfig$S3Origin": "A complex type that contains information about the Amazon S3 bucket from which you want CloudFront to get your media files for distribution.", + "StreamingDistributionSummary$S3Origin": "A complex type that contains information about the Amazon S3 bucket from which you want CloudFront to get your media files for distribution." + } + }, + "S3OriginConfig": { + "base": "A complex type that contains information about the Amazon S3 origin. If the origin is a custom origin, use the CustomOriginConfig element instead.", + "refs": { + "Origin$S3OriginConfig": "A complex type that contains information about the Amazon S3 origin. If the origin is a custom origin, use the CustomOriginConfig element instead." + } + }, + "SSLSupportMethod": { + "base": null, + "refs": { + "ViewerCertificate$SSLSupportMethod": "If you specify a value for IAMCertificateId, you must also specify how you want CloudFront to serve HTTPS requests. Valid values are vip and sni-only. If you specify vip, CloudFront uses dedicated IP addresses for your content and can respond to HTTPS requests from any viewer. However, you must request permission to use this feature, and you incur additional monthly charges. If you specify sni-only, CloudFront can only respond to HTTPS requests from viewers that support Server Name Indication (SNI). All modern browsers support SNI, but some browsers still in use don't support SNI. Do not specify a value for SSLSupportMethod if you specified true for CloudFrontDefaultCertificate." + } + }, + "Signer": { + "base": "A complex type that lists the AWS accounts that were included in the TrustedSigners complex type, as well as their active CloudFront key pair IDs, if any.", + "refs": { + "SignerList$member": null + } + }, + "SignerList": { + "base": null, + "refs": { + "ActiveTrustedSigners$Items": "A complex type that contains one Signer complex type for each unique trusted signer that is specified in the TrustedSigners complex type, including trusted signers in the default cache behavior and in all of the other cache behaviors." + } + }, + "StreamingDistribution": { + "base": "A streaming distribution.", + "refs": { + "CreateStreamingDistributionResult$StreamingDistribution": "The streaming distribution's information.", + "GetStreamingDistributionResult$StreamingDistribution": "The streaming distribution's information.", + "UpdateStreamingDistributionResult$StreamingDistribution": "The streaming distribution's information." + } + }, + "StreamingDistributionAlreadyExists": { + "base": null, + "refs": { + } + }, + "StreamingDistributionConfig": { + "base": "The configuration for the streaming distribution.", + "refs": { + "CreateStreamingDistributionRequest$StreamingDistributionConfig": "The streaming distribution's configuration information.", + "GetStreamingDistributionConfigResult$StreamingDistributionConfig": "The streaming distribution's configuration information.", + "StreamingDistribution$StreamingDistributionConfig": "The current configuration information for the streaming distribution.", + "UpdateStreamingDistributionRequest$StreamingDistributionConfig": "The streaming distribution's configuration information." + } + }, + "StreamingDistributionList": { + "base": "A streaming distribution list.", + "refs": { + "ListStreamingDistributionsResult$StreamingDistributionList": "The StreamingDistributionList type." + } + }, + "StreamingDistributionNotDisabled": { + "base": null, + "refs": { + } + }, + "StreamingDistributionSummary": { + "base": "A summary of the information for an Amazon CloudFront streaming distribution.", + "refs": { + "StreamingDistributionSummaryList$member": null + } + }, + "StreamingDistributionSummaryList": { + "base": null, + "refs": { + "StreamingDistributionList$Items": "A complex type that contains one StreamingDistributionSummary element for each distribution that was created by the current AWS account." + } + }, + "StreamingLoggingConfig": { + "base": "A complex type that controls whether access logs are written for this streaming distribution.", + "refs": { + "StreamingDistributionConfig$Logging": "A complex type that controls whether access logs are written for the streaming distribution." + } + }, + "TooManyCacheBehaviors": { + "base": "You cannot create anymore cache behaviors for the distribution.", + "refs": { + } + }, + "TooManyCertificates": { + "base": "You cannot create anymore custom ssl certificates.", + "refs": { + } + }, + "TooManyCloudFrontOriginAccessIdentities": { + "base": "Processing your request would cause you to exceed the maximum number of origin access identities allowed.", + "refs": { + } + }, + "TooManyCookieNamesInWhiteList": { + "base": "Your request contains more cookie names in the whitelist than are allowed per cache behavior.", + "refs": { + } + }, + "TooManyDistributionCNAMEs": { + "base": "Your request contains more CNAMEs than are allowed per distribution.", + "refs": { + } + }, + "TooManyDistributions": { + "base": "Processing your request would cause you to exceed the maximum number of distributions allowed.", + "refs": { + } + }, + "TooManyHeadersInForwardedValues": { + "base": null, + "refs": { + } + }, + "TooManyInvalidationsInProgress": { + "base": "You have exceeded the maximum number of allowable InProgress invalidation batch requests, or invalidation objects.", + "refs": { + } + }, + "TooManyOrigins": { + "base": "You cannot create anymore origins for the distribution.", + "refs": { + } + }, + "TooManyStreamingDistributionCNAMEs": { + "base": null, + "refs": { + } + }, + "TooManyStreamingDistributions": { + "base": "Processing your request would cause you to exceed the maximum number of streaming distributions allowed.", + "refs": { + } + }, + "TooManyTrustedSigners": { + "base": "Your request contains more trusted signers than are allowed per distribution.", + "refs": { + } + }, + "TrustedSignerDoesNotExist": { + "base": "One or more of your trusted signers do not exist.", + "refs": { + } + }, + "TrustedSigners": { + "base": "A complex type that specifies the AWS accounts, if any, that you want to allow to create signed URLs for private content. If you want to require signed URLs in requests for objects in the target origin that match the PathPattern for this cache behavior, specify true for Enabled, and specify the applicable values for Quantity and Items. For more information, go to Using a Signed URL to Serve Private Content in the Amazon CloudFront Developer Guide. If you don't want to require signed URLs in requests for objects that match PathPattern, specify false for Enabled and 0 for Quantity. Omit Items. To add, change, or remove one or more trusted signers, change Enabled to true (if it's currently false), change Quantity as applicable, and specify all of the trusted signers that you want to include in the updated distribution.", + "refs": { + "CacheBehavior$TrustedSigners": "A complex type that specifies the AWS accounts, if any, that you want to allow to create signed URLs for private content. If you want to require signed URLs in requests for objects in the target origin that match the PathPattern for this cache behavior, specify true for Enabled, and specify the applicable values for Quantity and Items. For more information, go to Using a Signed URL to Serve Private Content in the Amazon CloudFront Developer Guide. If you don't want to require signed URLs in requests for objects that match PathPattern, specify false for Enabled and 0 for Quantity. Omit Items. To add, change, or remove one or more trusted signers, change Enabled to true (if it's currently false), change Quantity as applicable, and specify all of the trusted signers that you want to include in the updated distribution.", + "DefaultCacheBehavior$TrustedSigners": "A complex type that specifies the AWS accounts, if any, that you want to allow to create signed URLs for private content. If you want to require signed URLs in requests for objects in the target origin that match the PathPattern for this cache behavior, specify true for Enabled, and specify the applicable values for Quantity and Items. For more information, go to Using a Signed URL to Serve Private Content in the Amazon CloudFront Developer Guide. If you don't want to require signed URLs in requests for objects that match PathPattern, specify false for Enabled and 0 for Quantity. Omit Items. To add, change, or remove one or more trusted signers, change Enabled to true (if it's currently false), change Quantity as applicable, and specify all of the trusted signers that you want to include in the updated distribution.", + "StreamingDistributionConfig$TrustedSigners": "A complex type that specifies the AWS accounts, if any, that you want to allow to create signed URLs for private content. If you want to require signed URLs in requests for objects in the target origin that match the PathPattern for this cache behavior, specify true for Enabled, and specify the applicable values for Quantity and Items. For more information, go to Using a Signed URL to Serve Private Content in the Amazon CloudFront Developer Guide. If you don't want to require signed URLs in requests for objects that match PathPattern, specify false for Enabled and 0 for Quantity. Omit Items. To add, change, or remove one or more trusted signers, change Enabled to true (if it's currently false), change Quantity as applicable, and specify all of the trusted signers that you want to include in the updated distribution.", + "StreamingDistributionSummary$TrustedSigners": "A complex type that specifies the AWS accounts, if any, that you want to allow to create signed URLs for private content. If you want to require signed URLs in requests for objects in the target origin that match the PathPattern for this cache behavior, specify true for Enabled, and specify the applicable values for Quantity and Items. For more information, go to Using a Signed URL to Serve Private Content in the Amazon CloudFront Developer Guide. If you don't want to require signed URLs in requests for objects that match PathPattern, specify false for Enabled and 0 for Quantity. Omit Items. To add, change, or remove one or more trusted signers, change Enabled to true (if it's currently false), change Quantity as applicable, and specify all of the trusted signers that you want to include in the updated distribution." + } + }, + "UpdateCloudFrontOriginAccessIdentityRequest": { + "base": "The request to update an origin access identity.", + "refs": { + } + }, + "UpdateCloudFrontOriginAccessIdentityResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "UpdateDistributionRequest": { + "base": "The request to update a distribution.", + "refs": { + } + }, + "UpdateDistributionResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "UpdateStreamingDistributionRequest": { + "base": "The request to update a streaming distribution.", + "refs": { + } + }, + "UpdateStreamingDistributionResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "ViewerCertificate": { + "base": "A complex type that contains information about viewer certificates for this distribution.", + "refs": { + "DistributionConfig$ViewerCertificate": null, + "DistributionSummary$ViewerCertificate": null + } + }, + "ViewerProtocolPolicy": { + "base": null, + "refs": { + "CacheBehavior$ViewerProtocolPolicy": "Use this element to specify the protocol that users can use to access the files in the origin specified by TargetOriginId when a request matches the path pattern in PathPattern. If you want CloudFront to allow end users to use any available protocol, specify allow-all. If you want CloudFront to require HTTPS, specify https. If you want CloudFront to respond to an HTTP request with an HTTP status code of 301 (Moved Permanently) and the HTTPS URL, specify redirect-to-https. The viewer then resubmits the request using the HTTPS URL.", + "DefaultCacheBehavior$ViewerProtocolPolicy": "Use this element to specify the protocol that users can use to access the files in the origin specified by TargetOriginId when a request matches the path pattern in PathPattern. If you want CloudFront to allow end users to use any available protocol, specify allow-all. If you want CloudFront to require HTTPS, specify https. If you want CloudFront to respond to an HTTP request with an HTTP status code of 301 (Moved Permanently) and the HTTPS URL, specify redirect-to-https. The viewer then resubmits the request using the HTTPS URL." + } + }, + "boolean": { + "base": null, + "refs": { + "ActiveTrustedSigners$Enabled": "Each active trusted signer.", + "CacheBehavior$SmoothStreaming": "Indicates whether you want to distribute media files in Microsoft Smooth Streaming format using the origin that is associated with this cache behavior. If so, specify true; if not, specify false.", + "CloudFrontOriginAccessIdentityList$IsTruncated": "A flag that indicates whether more origin access identities remain to be listed. If your results were truncated, you can make a follow-up pagination request using the Marker request parameter to retrieve more items in the list.", + "DefaultCacheBehavior$SmoothStreaming": "Indicates whether you want to distribute media files in Microsoft Smooth Streaming format using the origin that is associated with this cache behavior. If so, specify true; if not, specify false.", + "DistributionConfig$Enabled": "Whether the distribution is enabled to accept end user requests for content.", + "DistributionList$IsTruncated": "A flag that indicates whether more distributions remain to be listed. If your results were truncated, you can make a follow-up pagination request using the Marker request parameter to retrieve more distributions in the list.", + "DistributionSummary$Enabled": "Whether the distribution is enabled to accept end user requests for content.", + "ForwardedValues$QueryString": "Indicates whether you want CloudFront to forward query strings to the origin that is associated with this cache behavior. If so, specify true; if not, specify false.", + "InvalidationList$IsTruncated": "A flag that indicates whether more invalidation batch requests remain to be listed. If your results were truncated, you can make a follow-up pagination request using the Marker request parameter to retrieve more invalidation batches in the list.", + "LoggingConfig$Enabled": "Specifies whether you want CloudFront to save access logs to an Amazon S3 bucket. If you do not want to enable logging when you create a distribution or if you want to disable logging for an existing distribution, specify false for Enabled, and specify empty Bucket and Prefix elements. If you specify false for Enabled but you specify values for Bucket, prefix and IncludeCookies, the values are automatically deleted.", + "LoggingConfig$IncludeCookies": "Specifies whether you want CloudFront to include cookies in access logs, specify true for IncludeCookies. If you choose to include cookies in logs, CloudFront logs all cookies regardless of how you configure the cache behaviors for this distribution. If you do not want to include cookies when you create a distribution or if you want to disable include cookies for an existing distribution, specify false for IncludeCookies.", + "StreamingDistributionConfig$Enabled": "Whether the streaming distribution is enabled to accept end user requests for content.", + "StreamingDistributionList$IsTruncated": "A flag that indicates whether more streaming distributions remain to be listed. If your results were truncated, you can make a follow-up pagination request using the Marker request parameter to retrieve more distributions in the list.", + "StreamingDistributionSummary$Enabled": "Whether the distribution is enabled to accept end user requests for content.", + "StreamingLoggingConfig$Enabled": "Specifies whether you want CloudFront to save access logs to an Amazon S3 bucket. If you do not want to enable logging when you create a streaming distribution or if you want to disable logging for an existing streaming distribution, specify false for Enabled, and specify empty Bucket and Prefix elements. If you specify false for Enabled but you specify values for Bucket and Prefix, the values are automatically deleted.", + "TrustedSigners$Enabled": "Specifies whether you want to require end users to use signed URLs to access the files specified by PathPattern and TargetOriginId.", + "ViewerCertificate$CloudFrontDefaultCertificate": "If you want viewers to use HTTPS to request your objects and you're using the CloudFront domain name of your distribution in your object URLs (for example, https://d111111abcdef8.cloudfront.net/logo.jpg), set to true. Omit this value if you are setting an IAMCertificateId." + } + }, + "integer": { + "base": null, + "refs": { + "ActiveTrustedSigners$Quantity": "The number of unique trusted signers included in all cache behaviors. For example, if three cache behaviors all list the same three AWS accounts, the value of Quantity for ActiveTrustedSigners will be 3.", + "Aliases$Quantity": "The number of CNAMEs, if any, for this distribution.", + "AllowedMethods$Quantity": "The number of HTTP methods that you want CloudFront to forward to your origin. Valid values are 2 (for GET and HEAD requests), 3 (for GET, HEAD and OPTIONS requests) and 7 (for GET, HEAD, OPTIONS, PUT, PATCH, POST, and DELETE requests).", + "CacheBehaviors$Quantity": "The number of cache behaviors for this distribution.", + "CachedMethods$Quantity": "The number of HTTP methods for which you want CloudFront to cache responses. Valid values are 2 (for caching responses to GET and HEAD requests) and 3 (for caching responses to GET, HEAD, and OPTIONS requests).", + "CloudFrontOriginAccessIdentityList$MaxItems": "The value you provided for the MaxItems request parameter.", + "CloudFrontOriginAccessIdentityList$Quantity": "The number of CloudFront origin access identities that were created by the current AWS account.", + "CookieNames$Quantity": "The number of whitelisted cookies for this cache behavior.", + "CustomErrorResponse$ErrorCode": "The 4xx or 5xx HTTP status code that you want to customize. For a list of HTTP status codes that you can customize, see CloudFront documentation.", + "CustomErrorResponses$Quantity": "The number of custom error responses for this distribution.", + "CustomOriginConfig$HTTPPort": "The HTTP port the custom origin listens on.", + "CustomOriginConfig$HTTPSPort": "The HTTPS port the custom origin listens on.", + "Distribution$InProgressInvalidationBatches": "The number of invalidation batches currently in progress.", + "DistributionList$MaxItems": "The value you provided for the MaxItems request parameter.", + "DistributionList$Quantity": "The number of distributions that were created by the current AWS account.", + "GeoRestriction$Quantity": "When geo restriction is enabled, this is the number of countries in your whitelist or blacklist. Otherwise, when it is not enabled, Quantity is 0, and you can omit Items.", + "Headers$Quantity": "The number of different headers that you want CloudFront to forward to the origin and to vary on for this cache behavior. The maximum number of headers that you can specify by name is 10. If you want CloudFront to forward all headers to the origin and vary on all of them, specify 1 for Quantity and * for Name. If you don't want CloudFront to forward any additional headers to the origin or to vary on any headers, specify 0 for Quantity and omit Items.", + "InvalidationList$MaxItems": "The value you provided for the MaxItems request parameter.", + "InvalidationList$Quantity": "The number of invalidation batches that were created by the current AWS account.", + "KeyPairIds$Quantity": "The number of active CloudFront key pairs for AwsAccountNumber.", + "Origins$Quantity": "The number of origins for this distribution.", + "Paths$Quantity": "The number of objects that you want to invalidate.", + "StreamingDistributionList$MaxItems": "The value you provided for the MaxItems request parameter.", + "StreamingDistributionList$Quantity": "The number of streaming distributions that were created by the current AWS account.", + "TrustedSigners$Quantity": "The number of trusted signers for this cache behavior." + } + }, + "long": { + "base": null, + "refs": { + "CacheBehavior$MinTTL": "The minimum amount of time that you want objects to stay in CloudFront caches before CloudFront queries your origin to see whether the object has been updated.You can specify a value from 0 to 3,153,600,000 seconds (100 years).", + "CacheBehavior$DefaultTTL": "If you don't configure your origin to add a Cache-Control max-age directive or an Expires header, DefaultTTL is the default amount of time (in seconds) that an object is in a CloudFront cache before CloudFront forwards another request to your origin to determine whether the object has been updated. The value that you specify applies only when your origin does not add HTTP headers such as Cache-Control max-age, Cache-Control s-maxage, and Expires to objects. You can specify a value from 0 to 3,153,600,000 seconds (100 years).", + "CacheBehavior$MaxTTL": "The maximum amount of time (in seconds) that an object is in a CloudFront cache before CloudFront forwards another request to your origin to determine whether the object has been updated. The value that you specify applies only when your origin adds HTTP headers such as Cache-Control max-age, Cache-Control s-maxage, and Expires to objects. You can specify a value from 0 to 3,153,600,000 seconds (100 years).", + "CustomErrorResponse$ErrorCachingMinTTL": "The minimum amount of time you want HTTP error codes to stay in CloudFront caches before CloudFront queries your origin to see whether the object has been updated. You can specify a value from 0 to 31,536,000.", + "DefaultCacheBehavior$MinTTL": "The minimum amount of time that you want objects to stay in CloudFront caches before CloudFront queries your origin to see whether the object has been updated.You can specify a value from 0 to 3,153,600,000 seconds (100 years).", + "DefaultCacheBehavior$DefaultTTL": "If you don't configure your origin to add a Cache-Control max-age directive or an Expires header, DefaultTTL is the default amount of time (in seconds) that an object is in a CloudFront cache before CloudFront forwards another request to your origin to determine whether the object has been updated. The value that you specify applies only when your origin does not add HTTP headers such as Cache-Control max-age, Cache-Control s-maxage, and Expires to objects. You can specify a value from 0 to 3,153,600,000 seconds (100 years).", + "DefaultCacheBehavior$MaxTTL": "The maximum amount of time (in seconds) that an object is in a CloudFront cache before CloudFront forwards another request to your origin to determine whether the object has been updated. The value that you specify applies only when your origin adds HTTP headers such as Cache-Control max-age, Cache-Control s-maxage, and Expires to objects. You can specify a value from 0 to 3,153,600,000 seconds (100 years)." + } + }, + "string": { + "base": null, + "refs": { + "AccessDenied$Message": null, + "AliasList$member": null, + "AwsAccountNumberList$member": null, + "BatchTooLarge$Message": null, + "CNAMEAlreadyExists$Message": null, + "CacheBehavior$PathPattern": "The pattern (for example, images/*.jpg) that specifies which requests you want this cache behavior to apply to. When CloudFront receives an end-user request, the requested path is compared with path patterns in the order in which cache behaviors are listed in the distribution. The path pattern for the default cache behavior is * and cannot be changed. If the request for an object does not match the path pattern for any cache behaviors, CloudFront applies the behavior in the default cache behavior.", + "CacheBehavior$TargetOriginId": "The value of ID for the origin that you want CloudFront to route requests to when a request matches the path pattern either for a cache behavior or for the default cache behavior.", + "CloudFrontOriginAccessIdentity$Id": "The ID for the origin access identity. For example: E74FTE3AJFJ256A.", + "CloudFrontOriginAccessIdentity$S3CanonicalUserId": "The Amazon S3 canonical user ID for the origin access identity, which you use when giving the origin access identity read permission to an object in Amazon S3.", + "CloudFrontOriginAccessIdentityAlreadyExists$Message": null, + "CloudFrontOriginAccessIdentityConfig$CallerReference": "A unique number that ensures the request can't be replayed. If the CallerReference is new (no matter the content of the CloudFrontOriginAccessIdentityConfig object), a new origin access identity is created. If the CallerReference is a value you already sent in a previous request to create an identity, and the content of the CloudFrontOriginAccessIdentityConfig is identical to the original request (ignoring white space), the response includes the same information returned to the original request. If the CallerReference is a value you already sent in a previous request to create an identity but the content of the CloudFrontOriginAccessIdentityConfig is different from the original request, CloudFront returns a CloudFrontOriginAccessIdentityAlreadyExists error.", + "CloudFrontOriginAccessIdentityConfig$Comment": "Any comments you want to include about the origin access identity.", + "CloudFrontOriginAccessIdentityInUse$Message": null, + "CloudFrontOriginAccessIdentityList$Marker": "The value you provided for the Marker request parameter.", + "CloudFrontOriginAccessIdentityList$NextMarker": "If IsTruncated is true, this element is present and contains the value you can use for the Marker request parameter to continue listing your origin access identities where they left off.", + "CloudFrontOriginAccessIdentitySummary$Id": "The ID for the origin access identity. For example: E74FTE3AJFJ256A.", + "CloudFrontOriginAccessIdentitySummary$S3CanonicalUserId": "The Amazon S3 canonical user ID for the origin access identity, which you use when giving the origin access identity read permission to an object in Amazon S3.", + "CloudFrontOriginAccessIdentitySummary$Comment": "The comment for this origin access identity, as originally specified when created.", + "CookieNameList$member": null, + "CreateCloudFrontOriginAccessIdentityResult$Location": "The fully qualified URI of the new origin access identity just created. For example: https://cloudfront.amazonaws.com/2010-11-01/origin-access-identity/cloudfront/E74FTE3AJFJ256A.", + "CreateCloudFrontOriginAccessIdentityResult$ETag": "The current version of the origin access identity created.", + "CreateDistributionResult$Location": "The fully qualified URI of the new distribution resource just created. For example: https://cloudfront.amazonaws.com/2010-11-01/distribution/EDFDVBD632BHDS5.", + "CreateDistributionResult$ETag": "The current version of the distribution created.", + "CreateInvalidationRequest$DistributionId": "The distribution's id.", + "CreateInvalidationResult$Location": "The fully qualified URI of the distribution and invalidation batch request, including the Invalidation ID.", + "CreateStreamingDistributionResult$Location": "The fully qualified URI of the new streaming distribution resource just created. For example: https://cloudfront.amazonaws.com/2010-11-01/streaming-distribution/EGTXBD79H29TRA8.", + "CreateStreamingDistributionResult$ETag": "The current version of the streaming distribution created.", + "CustomErrorResponse$ResponsePagePath": "The path of the custom error page (for example, /custom_404.html). The path is relative to the distribution and must begin with a slash (/). If the path includes any non-ASCII characters or unsafe characters as defined in RFC 1783 (http://www.ietf.org/rfc/rfc1738.txt), URL encode those characters. Do not URL encode any other characters in the path, or CloudFront will not return the custom error page to the viewer.", + "CustomErrorResponse$ResponseCode": "The HTTP status code that you want CloudFront to return with the custom error page to the viewer. For a list of HTTP status codes that you can replace, see CloudFront Documentation.", + "DefaultCacheBehavior$TargetOriginId": "The value of ID for the origin that you want CloudFront to route requests to when a request matches the path pattern either for a cache behavior or for the default cache behavior.", + "DeleteCloudFrontOriginAccessIdentityRequest$Id": "The origin access identity's id.", + "DeleteCloudFrontOriginAccessIdentityRequest$IfMatch": "The value of the ETag header you received from a previous GET or PUT request. For example: E2QWRUHAPOMQZL.", + "DeleteDistributionRequest$Id": "The distribution id.", + "DeleteDistributionRequest$IfMatch": "The value of the ETag header you received when you disabled the distribution. For example: E2QWRUHAPOMQZL.", + "DeleteStreamingDistributionRequest$Id": "The distribution id.", + "DeleteStreamingDistributionRequest$IfMatch": "The value of the ETag header you received when you disabled the streaming distribution. For example: E2QWRUHAPOMQZL.", + "Distribution$Id": "The identifier for the distribution. For example: EDFDVBD632BHDS5.", + "Distribution$Status": "This response element indicates the current status of the distribution. When the status is Deployed, the distribution's information is fully propagated throughout the Amazon CloudFront system.", + "Distribution$DomainName": "The domain name corresponding to the distribution. For example: d604721fxaaqy9.cloudfront.net.", + "DistributionAlreadyExists$Message": null, + "DistributionConfig$CallerReference": "A unique number that ensures the request can't be replayed. If the CallerReference is new (no matter the content of the DistributionConfig object), a new distribution is created. If the CallerReference is a value you already sent in a previous request to create a distribution, and the content of the DistributionConfig is identical to the original request (ignoring white space), the response includes the same information returned to the original request. If the CallerReference is a value you already sent in a previous request to create a distribution but the content of the DistributionConfig is different from the original request, CloudFront returns a DistributionAlreadyExists error.", + "DistributionConfig$DefaultRootObject": "The object that you want CloudFront to return (for example, index.html) when an end user requests the root URL for your distribution (http://www.example.com) instead of an object in your distribution (http://www.example.com/index.html). Specifying a default root object avoids exposing the contents of your distribution. If you don't want to specify a default root object when you create a distribution, include an empty DefaultRootObject element. To delete the default root object from an existing distribution, update the distribution configuration and include an empty DefaultRootObject element. To replace the default root object, update the distribution configuration and specify the new object.", + "DistributionConfig$Comment": "Any comments you want to include about the distribution.", + "DistributionConfig$WebACLId": "(Optional) If you're using AWS WAF to filter CloudFront requests, the Id of the AWS WAF web ACL that is associated with the distribution.", + "DistributionList$Marker": "The value you provided for the Marker request parameter.", + "DistributionList$NextMarker": "If IsTruncated is true, this element is present and contains the value you can use for the Marker request parameter to continue listing your distributions where they left off.", + "DistributionNotDisabled$Message": null, + "DistributionSummary$Id": "The identifier for the distribution. For example: EDFDVBD632BHDS5.", + "DistributionSummary$Status": "This response element indicates the current status of the distribution. When the status is Deployed, the distribution's information is fully propagated throughout the Amazon CloudFront system.", + "DistributionSummary$DomainName": "The domain name corresponding to the distribution. For example: d604721fxaaqy9.cloudfront.net.", + "DistributionSummary$Comment": "The comment originally specified when this distribution was created.", + "DistributionSummary$WebACLId": "The Web ACL Id (if any) associated with the distribution.", + "GetCloudFrontOriginAccessIdentityConfigRequest$Id": "The identity's id.", + "GetCloudFrontOriginAccessIdentityConfigResult$ETag": "The current version of the configuration. For example: E2QWRUHAPOMQZL.", + "GetCloudFrontOriginAccessIdentityRequest$Id": "The identity's id.", + "GetCloudFrontOriginAccessIdentityResult$ETag": "The current version of the origin access identity's information. For example: E2QWRUHAPOMQZL.", + "GetDistributionConfigRequest$Id": "The distribution's id.", + "GetDistributionConfigResult$ETag": "The current version of the configuration. For example: E2QWRUHAPOMQZL.", + "GetDistributionRequest$Id": "The distribution's id.", + "GetDistributionResult$ETag": "The current version of the distribution's information. For example: E2QWRUHAPOMQZL.", + "GetInvalidationRequest$DistributionId": "The distribution's id.", + "GetInvalidationRequest$Id": "The invalidation's id.", + "GetStreamingDistributionConfigRequest$Id": "The streaming distribution's id.", + "GetStreamingDistributionConfigResult$ETag": "The current version of the configuration. For example: E2QWRUHAPOMQZL.", + "GetStreamingDistributionRequest$Id": "The streaming distribution's id.", + "GetStreamingDistributionResult$ETag": "The current version of the streaming distribution's information. For example: E2QWRUHAPOMQZL.", + "HeaderList$member": null, + "IllegalUpdate$Message": null, + "InconsistentQuantities$Message": null, + "InvalidArgument$Message": null, + "InvalidDefaultRootObject$Message": null, + "InvalidErrorCode$Message": null, + "InvalidForwardCookies$Message": null, + "InvalidGeoRestrictionParameter$Message": null, + "InvalidHeadersForS3Origin$Message": null, + "InvalidIfMatchVersion$Message": null, + "InvalidLocationCode$Message": null, + "InvalidMinimumProtocolVersion$Message": null, + "InvalidOrigin$Message": null, + "InvalidOriginAccessIdentity$Message": null, + "InvalidProtocolSettings$Message": null, + "InvalidRelativePath$Message": null, + "InvalidRequiredProtocol$Message": null, + "InvalidResponseCode$Message": null, + "InvalidTTLOrder$Message": null, + "InvalidViewerCertificate$Message": null, + "InvalidWebACLId$Message": null, + "Invalidation$Id": "The identifier for the invalidation request. For example: IDFDVBD632BHDS5.", + "Invalidation$Status": "The status of the invalidation request. When the invalidation batch is finished, the status is Completed.", + "InvalidationBatch$CallerReference": "A unique name that ensures the request can't be replayed. If the CallerReference is new (no matter the content of the Path object), a new distribution is created. If the CallerReference is a value you already sent in a previous request to create an invalidation batch, and the content of each Path element is identical to the original request, the response includes the same information returned to the original request. If the CallerReference is a value you already sent in a previous request to create a distribution but the content of any Path is different from the original request, CloudFront returns an InvalidationBatchAlreadyExists error.", + "InvalidationList$Marker": "The value you provided for the Marker request parameter.", + "InvalidationList$NextMarker": "If IsTruncated is true, this element is present and contains the value you can use for the Marker request parameter to continue listing your invalidation batches where they left off.", + "InvalidationSummary$Id": "The unique ID for an invalidation request.", + "InvalidationSummary$Status": "The status of an invalidation request.", + "KeyPairIdList$member": null, + "ListCloudFrontOriginAccessIdentitiesRequest$Marker": "Use this when paginating results to indicate where to begin in your list of origin access identities. The results include identities in the list that occur after the marker. To get the next page of results, set the Marker to the value of the NextMarker from the current page's response (which is also the ID of the last identity on that page).", + "ListCloudFrontOriginAccessIdentitiesRequest$MaxItems": "The maximum number of origin access identities you want in the response body.", + "ListDistributionsByWebACLIdRequest$Marker": "Use Marker and MaxItems to control pagination of results. If you have more than MaxItems distributions that satisfy the request, the response includes a NextMarker element. To get the next page of results, submit another request. For the value of Marker, specify the value of NextMarker from the last response. (For the first request, omit Marker.)", + "ListDistributionsByWebACLIdRequest$MaxItems": "The maximum number of distributions that you want CloudFront to return in the response body. The maximum and default values are both 100.", + "ListDistributionsByWebACLIdRequest$WebACLId": "The Id of the AWS WAF web ACL for which you want to list the associated distributions. If you specify \"null\" for the Id, the request returns a list of the distributions that aren't associated with a web ACL.", + "ListDistributionsRequest$Marker": "Use Marker and MaxItems to control pagination of results. If you have more than MaxItems distributions that satisfy the request, the response includes a NextMarker element. To get the next page of results, submit another request. For the value of Marker, specify the value of NextMarker from the last response. (For the first request, omit Marker.)", + "ListDistributionsRequest$MaxItems": "The maximum number of distributions that you want CloudFront to return in the response body. The maximum and default values are both 100.", + "ListInvalidationsRequest$DistributionId": "The distribution's id.", + "ListInvalidationsRequest$Marker": "Use this parameter when paginating results to indicate where to begin in your list of invalidation batches. Because the results are returned in decreasing order from most recent to oldest, the most recent results are on the first page, the second page will contain earlier results, and so on. To get the next page of results, set the Marker to the value of the NextMarker from the current page's response. This value is the same as the ID of the last invalidation batch on that page.", + "ListInvalidationsRequest$MaxItems": "The maximum number of invalidation batches you want in the response body.", + "ListStreamingDistributionsRequest$Marker": "Use this when paginating results to indicate where to begin in your list of streaming distributions. The results include distributions in the list that occur after the marker. To get the next page of results, set the Marker to the value of the NextMarker from the current page's response (which is also the ID of the last distribution on that page).", + "ListStreamingDistributionsRequest$MaxItems": "The maximum number of streaming distributions you want in the response body.", + "LocationList$member": null, + "LoggingConfig$Bucket": "The Amazon S3 bucket to store the access logs in, for example, myawslogbucket.s3.amazonaws.com.", + "LoggingConfig$Prefix": "An optional string that you want CloudFront to prefix to the access log filenames for this distribution, for example, myprefix/. If you want to enable logging, but you do not want to specify a prefix, you still must include an empty Prefix element in the Logging element.", + "MissingBody$Message": null, + "NoSuchCloudFrontOriginAccessIdentity$Message": null, + "NoSuchDistribution$Message": null, + "NoSuchInvalidation$Message": null, + "NoSuchOrigin$Message": null, + "NoSuchStreamingDistribution$Message": null, + "Origin$Id": "A unique identifier for the origin. The value of Id must be unique within the distribution. You use the value of Id when you create a cache behavior. The Id identifies the origin that CloudFront routes a request to when the request matches the path pattern for that cache behavior.", + "Origin$DomainName": "Amazon S3 origins: The DNS name of the Amazon S3 bucket from which you want CloudFront to get objects for this origin, for example, myawsbucket.s3.amazonaws.com. Custom origins: The DNS domain name for the HTTP server from which you want CloudFront to get objects for this origin, for example, www.example.com.", + "Origin$OriginPath": "An optional element that causes CloudFront to request your content from a directory in your Amazon S3 bucket or your custom origin. When you include the OriginPath element, specify the directory name, beginning with a /. CloudFront appends the directory name to the value of DomainName.", + "PathList$member": null, + "PreconditionFailed$Message": null, + "S3Origin$DomainName": "The DNS name of the S3 origin.", + "S3Origin$OriginAccessIdentity": "Your S3 origin's origin access identity.", + "S3OriginConfig$OriginAccessIdentity": "The CloudFront origin access identity to associate with the origin. Use an origin access identity to configure the origin so that end users can only access objects in an Amazon S3 bucket through CloudFront. If you want end users to be able to access objects using either the CloudFront URL or the Amazon S3 URL, specify an empty OriginAccessIdentity element. To delete the origin access identity from an existing distribution, update the distribution configuration and include an empty OriginAccessIdentity element. To replace the origin access identity, update the distribution configuration and specify the new origin access identity. Use the format origin-access-identity/cloudfront/Id where Id is the value that CloudFront returned in the Id element when you created the origin access identity.", + "Signer$AwsAccountNumber": "Specifies an AWS account that can create signed URLs. Values: self, which indicates that the AWS account that was used to create the distribution can created signed URLs, or an AWS account number. Omit the dashes in the account number.", + "StreamingDistribution$Id": "The identifier for the streaming distribution. For example: EGTXBD79H29TRA8.", + "StreamingDistribution$Status": "The current status of the streaming distribution. When the status is Deployed, the distribution's information is fully propagated throughout the Amazon CloudFront system.", + "StreamingDistribution$DomainName": "The domain name corresponding to the streaming distribution. For example: s5c39gqb8ow64r.cloudfront.net.", + "StreamingDistributionAlreadyExists$Message": null, + "StreamingDistributionConfig$CallerReference": "A unique number that ensures the request can't be replayed. If the CallerReference is new (no matter the content of the StreamingDistributionConfig object), a new streaming distribution is created. If the CallerReference is a value you already sent in a previous request to create a streaming distribution, and the content of the StreamingDistributionConfig is identical to the original request (ignoring white space), the response includes the same information returned to the original request. If the CallerReference is a value you already sent in a previous request to create a streaming distribution but the content of the StreamingDistributionConfig is different from the original request, CloudFront returns a DistributionAlreadyExists error.", + "StreamingDistributionConfig$Comment": "Any comments you want to include about the streaming distribution.", + "StreamingDistributionList$Marker": "The value you provided for the Marker request parameter.", + "StreamingDistributionList$NextMarker": "If IsTruncated is true, this element is present and contains the value you can use for the Marker request parameter to continue listing your streaming distributions where they left off.", + "StreamingDistributionNotDisabled$Message": null, + "StreamingDistributionSummary$Id": "The identifier for the distribution. For example: EDFDVBD632BHDS5.", + "StreamingDistributionSummary$Status": "Indicates the current status of the distribution. When the status is Deployed, the distribution's information is fully propagated throughout the Amazon CloudFront system.", + "StreamingDistributionSummary$DomainName": "The domain name corresponding to the distribution. For example: d604721fxaaqy9.cloudfront.net.", + "StreamingDistributionSummary$Comment": "The comment originally specified when this distribution was created.", + "StreamingLoggingConfig$Bucket": "The Amazon S3 bucket to store the access logs in, for example, myawslogbucket.s3.amazonaws.com.", + "StreamingLoggingConfig$Prefix": "An optional string that you want CloudFront to prefix to the access log filenames for this streaming distribution, for example, myprefix/. If you want to enable logging, but you do not want to specify a prefix, you still must include an empty Prefix element in the Logging element.", + "TooManyCacheBehaviors$Message": null, + "TooManyCertificates$Message": null, + "TooManyCloudFrontOriginAccessIdentities$Message": null, + "TooManyCookieNamesInWhiteList$Message": null, + "TooManyDistributionCNAMEs$Message": null, + "TooManyDistributions$Message": null, + "TooManyHeadersInForwardedValues$Message": null, + "TooManyInvalidationsInProgress$Message": null, + "TooManyOrigins$Message": null, + "TooManyStreamingDistributionCNAMEs$Message": null, + "TooManyStreamingDistributions$Message": null, + "TooManyTrustedSigners$Message": null, + "TrustedSignerDoesNotExist$Message": null, + "UpdateCloudFrontOriginAccessIdentityRequest$Id": "The identity's id.", + "UpdateCloudFrontOriginAccessIdentityRequest$IfMatch": "The value of the ETag header you received when retrieving the identity's configuration. For example: E2QWRUHAPOMQZL.", + "UpdateCloudFrontOriginAccessIdentityResult$ETag": "The current version of the configuration. For example: E2QWRUHAPOMQZL.", + "UpdateDistributionRequest$Id": "The distribution's id.", + "UpdateDistributionRequest$IfMatch": "The value of the ETag header you received when retrieving the distribution's configuration. For example: E2QWRUHAPOMQZL.", + "UpdateDistributionResult$ETag": "The current version of the configuration. For example: E2QWRUHAPOMQZL.", + "UpdateStreamingDistributionRequest$Id": "The streaming distribution's id.", + "UpdateStreamingDistributionRequest$IfMatch": "The value of the ETag header you received when retrieving the streaming distribution's configuration. For example: E2QWRUHAPOMQZL.", + "UpdateStreamingDistributionResult$ETag": "The current version of the configuration. For example: E2QWRUHAPOMQZL.", + "ViewerCertificate$IAMCertificateId": "If you want viewers to use HTTPS to request your objects and you're using an alternate domain name in your object URLs (for example, https://example.com/logo.jpg), specify the IAM certificate identifier of the custom viewer certificate for this distribution. Specify either this value or CloudFrontDefaultCertificate." + } + }, + "timestamp": { + "base": null, + "refs": { + "Distribution$LastModifiedTime": "The date and time the distribution was last modified.", + "DistributionSummary$LastModifiedTime": "The date and time the distribution was last modified.", + "Invalidation$CreateTime": "The date and time the invalidation request was first made.", + "InvalidationSummary$CreateTime": null, + "StreamingDistribution$LastModifiedTime": "The date and time the distribution was last modified.", + "StreamingDistributionSummary$LastModifiedTime": "The date and time the distribution was last modified." + } + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/cloudfront/2015-07-27/paginators-1.json b/vendor/github.com/aws/aws-sdk-go/models/apis/cloudfront/2015-07-27/paginators-1.json new file mode 100644 index 000000000..51fbb907f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/cloudfront/2015-07-27/paginators-1.json @@ -0,0 +1,32 @@ +{ + "pagination": { + "ListCloudFrontOriginAccessIdentities": { + "input_token": "Marker", + "output_token": "CloudFrontOriginAccessIdentityList.NextMarker", + "limit_key": "MaxItems", + "more_results": "CloudFrontOriginAccessIdentityList.IsTruncated", + "result_key": "CloudFrontOriginAccessIdentityList.Items" + }, + "ListDistributions": { + "input_token": "Marker", + "output_token": "DistributionList.NextMarker", + "limit_key": "MaxItems", + "more_results": "DistributionList.IsTruncated", + "result_key": "DistributionList.Items" + }, + "ListInvalidations": { + "input_token": "Marker", + "output_token": "InvalidationList.NextMarker", + "limit_key": "MaxItems", + "more_results": "InvalidationList.IsTruncated", + "result_key": "InvalidationList.Items" + }, + "ListStreamingDistributions": { + "input_token": "Marker", + "output_token": "StreamingDistributionList.NextMarker", + "limit_key": "MaxItems", + "more_results": "StreamingDistributionList.IsTruncated", + "result_key": "StreamingDistributionList.Items" + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/cloudfront/2015-07-27/waiters-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/cloudfront/2015-07-27/waiters-2.json new file mode 100644 index 000000000..f6d3ba7bc --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/cloudfront/2015-07-27/waiters-2.json @@ -0,0 +1,47 @@ +{ + "version": 2, + "waiters": { + "DistributionDeployed": { + "delay": 60, + "operation": "GetDistribution", + "maxAttempts": 25, + "description": "Wait until a distribution is deployed.", + "acceptors": [ + { + "expected": "Deployed", + "matcher": "path", + "state": "success", + "argument": "Status" + } + ] + }, + "InvalidationCompleted": { + "delay": 20, + "operation": "GetInvalidation", + "maxAttempts": 30, + "description": "Wait until an invalidation has completed.", + "acceptors": [ + { + "expected": "Completed", + "matcher": "path", + "state": "success", + "argument": "Status" + } + ] + }, + "StreamingDistributionDeployed": { + "delay": 60, + "operation": "GetStreamingDistribution", + "maxAttempts": 25, + "description": "Wait until a streaming distribution is deployed.", + "acceptors": [ + { + "expected": "Deployed", + "matcher": "path", + "state": "success", + "argument": "Status" + } + ] + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/cloudfront/2015-09-17/api-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/cloudfront/2015-09-17/api-2.json new file mode 100644 index 000000000..374b7e5e9 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/cloudfront/2015-09-17/api-2.json @@ -0,0 +1,2150 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2015-09-17", + "endpointPrefix":"cloudfront", + "globalEndpoint":"cloudfront.amazonaws.com", + "protocol":"rest-xml", + "serviceAbbreviation":"CloudFront", + "serviceFullName":"Amazon CloudFront", + "signatureVersion":"v4" + }, + "operations":{ + "CreateCloudFrontOriginAccessIdentity":{ + "name":"CreateCloudFrontOriginAccessIdentity2015_09_17", + "http":{ + "method":"POST", + "requestUri":"/2015-09-17/origin-access-identity/cloudfront", + "responseCode":201 + }, + "input":{"shape":"CreateCloudFrontOriginAccessIdentityRequest"}, + "output":{"shape":"CreateCloudFrontOriginAccessIdentityResult"}, + "errors":[ + {"shape":"CloudFrontOriginAccessIdentityAlreadyExists"}, + {"shape":"MissingBody"}, + {"shape":"TooManyCloudFrontOriginAccessIdentities"}, + {"shape":"InvalidArgument"}, + {"shape":"InconsistentQuantities"} + ] + }, + "CreateDistribution":{ + "name":"CreateDistribution2015_09_17", + "http":{ + "method":"POST", + "requestUri":"/2015-09-17/distribution", + "responseCode":201 + }, + "input":{"shape":"CreateDistributionRequest"}, + "output":{"shape":"CreateDistributionResult"}, + "errors":[ + {"shape":"CNAMEAlreadyExists"}, + {"shape":"DistributionAlreadyExists"}, + {"shape":"InvalidOrigin"}, + {"shape":"InvalidOriginAccessIdentity"}, + {"shape":"AccessDenied"}, + {"shape":"TooManyTrustedSigners"}, + {"shape":"TrustedSignerDoesNotExist"}, + {"shape":"InvalidViewerCertificate"}, + {"shape":"InvalidMinimumProtocolVersion"}, + {"shape":"MissingBody"}, + {"shape":"TooManyDistributionCNAMEs"}, + {"shape":"TooManyDistributions"}, + {"shape":"InvalidDefaultRootObject"}, + {"shape":"InvalidRelativePath"}, + {"shape":"InvalidErrorCode"}, + {"shape":"InvalidResponseCode"}, + {"shape":"InvalidArgument"}, + {"shape":"InvalidRequiredProtocol"}, + {"shape":"NoSuchOrigin"}, + {"shape":"TooManyOrigins"}, + {"shape":"TooManyCacheBehaviors"}, + {"shape":"TooManyCookieNamesInWhiteList"}, + {"shape":"InvalidForwardCookies"}, + {"shape":"TooManyHeadersInForwardedValues"}, + {"shape":"InvalidHeadersForS3Origin"}, + {"shape":"InconsistentQuantities"}, + {"shape":"TooManyCertificates"}, + {"shape":"InvalidLocationCode"}, + {"shape":"InvalidGeoRestrictionParameter"}, + {"shape":"InvalidProtocolSettings"}, + {"shape":"InvalidTTLOrder"}, + {"shape":"InvalidWebACLId"} + ] + }, + "CreateInvalidation":{ + "name":"CreateInvalidation2015_09_17", + "http":{ + "method":"POST", + "requestUri":"/2015-09-17/distribution/{DistributionId}/invalidation", + "responseCode":201 + }, + "input":{"shape":"CreateInvalidationRequest"}, + "output":{"shape":"CreateInvalidationResult"}, + "errors":[ + {"shape":"AccessDenied"}, + {"shape":"MissingBody"}, + {"shape":"InvalidArgument"}, + {"shape":"NoSuchDistribution"}, + {"shape":"BatchTooLarge"}, + {"shape":"TooManyInvalidationsInProgress"}, + {"shape":"InconsistentQuantities"} + ] + }, + "CreateStreamingDistribution":{ + "name":"CreateStreamingDistribution2015_09_17", + "http":{ + "method":"POST", + "requestUri":"/2015-09-17/streaming-distribution", + "responseCode":201 + }, + "input":{"shape":"CreateStreamingDistributionRequest"}, + "output":{"shape":"CreateStreamingDistributionResult"}, + "errors":[ + {"shape":"CNAMEAlreadyExists"}, + {"shape":"StreamingDistributionAlreadyExists"}, + {"shape":"InvalidOrigin"}, + {"shape":"InvalidOriginAccessIdentity"}, + {"shape":"AccessDenied"}, + {"shape":"TooManyTrustedSigners"}, + {"shape":"TrustedSignerDoesNotExist"}, + {"shape":"MissingBody"}, + {"shape":"TooManyStreamingDistributionCNAMEs"}, + {"shape":"TooManyStreamingDistributions"}, + {"shape":"InvalidArgument"}, + {"shape":"InconsistentQuantities"} + ] + }, + "DeleteCloudFrontOriginAccessIdentity":{ + "name":"DeleteCloudFrontOriginAccessIdentity2015_09_17", + "http":{ + "method":"DELETE", + "requestUri":"/2015-09-17/origin-access-identity/cloudfront/{Id}", + "responseCode":204 + }, + "input":{"shape":"DeleteCloudFrontOriginAccessIdentityRequest"}, + "errors":[ + {"shape":"AccessDenied"}, + {"shape":"InvalidIfMatchVersion"}, + {"shape":"NoSuchCloudFrontOriginAccessIdentity"}, + {"shape":"PreconditionFailed"}, + {"shape":"CloudFrontOriginAccessIdentityInUse"} + ] + }, + "DeleteDistribution":{ + "name":"DeleteDistribution2015_09_17", + "http":{ + "method":"DELETE", + "requestUri":"/2015-09-17/distribution/{Id}", + "responseCode":204 + }, + "input":{"shape":"DeleteDistributionRequest"}, + "errors":[ + {"shape":"AccessDenied"}, + {"shape":"DistributionNotDisabled"}, + {"shape":"InvalidIfMatchVersion"}, + {"shape":"NoSuchDistribution"}, + {"shape":"PreconditionFailed"} + ] + }, + "DeleteStreamingDistribution":{ + "name":"DeleteStreamingDistribution2015_09_17", + "http":{ + "method":"DELETE", + "requestUri":"/2015-09-17/streaming-distribution/{Id}", + "responseCode":204 + }, + "input":{"shape":"DeleteStreamingDistributionRequest"}, + "errors":[ + {"shape":"AccessDenied"}, + {"shape":"StreamingDistributionNotDisabled"}, + {"shape":"InvalidIfMatchVersion"}, + {"shape":"NoSuchStreamingDistribution"}, + {"shape":"PreconditionFailed"} + ] + }, + "GetCloudFrontOriginAccessIdentity":{ + "name":"GetCloudFrontOriginAccessIdentity2015_09_17", + "http":{ + "method":"GET", + "requestUri":"/2015-09-17/origin-access-identity/cloudfront/{Id}" + }, + "input":{"shape":"GetCloudFrontOriginAccessIdentityRequest"}, + "output":{"shape":"GetCloudFrontOriginAccessIdentityResult"}, + "errors":[ + {"shape":"NoSuchCloudFrontOriginAccessIdentity"}, + {"shape":"AccessDenied"} + ] + }, + "GetCloudFrontOriginAccessIdentityConfig":{ + "name":"GetCloudFrontOriginAccessIdentityConfig2015_09_17", + "http":{ + "method":"GET", + "requestUri":"/2015-09-17/origin-access-identity/cloudfront/{Id}/config" + }, + "input":{"shape":"GetCloudFrontOriginAccessIdentityConfigRequest"}, + "output":{"shape":"GetCloudFrontOriginAccessIdentityConfigResult"}, + "errors":[ + {"shape":"NoSuchCloudFrontOriginAccessIdentity"}, + {"shape":"AccessDenied"} + ] + }, + "GetDistribution":{ + "name":"GetDistribution2015_09_17", + "http":{ + "method":"GET", + "requestUri":"/2015-09-17/distribution/{Id}" + }, + "input":{"shape":"GetDistributionRequest"}, + "output":{"shape":"GetDistributionResult"}, + "errors":[ + {"shape":"NoSuchDistribution"}, + {"shape":"AccessDenied"} + ] + }, + "GetDistributionConfig":{ + "name":"GetDistributionConfig2015_09_17", + "http":{ + "method":"GET", + "requestUri":"/2015-09-17/distribution/{Id}/config" + }, + "input":{"shape":"GetDistributionConfigRequest"}, + "output":{"shape":"GetDistributionConfigResult"}, + "errors":[ + {"shape":"NoSuchDistribution"}, + {"shape":"AccessDenied"} + ] + }, + "GetInvalidation":{ + "name":"GetInvalidation2015_09_17", + "http":{ + "method":"GET", + "requestUri":"/2015-09-17/distribution/{DistributionId}/invalidation/{Id}" + }, + "input":{"shape":"GetInvalidationRequest"}, + "output":{"shape":"GetInvalidationResult"}, + "errors":[ + {"shape":"NoSuchInvalidation"}, + {"shape":"NoSuchDistribution"}, + {"shape":"AccessDenied"} + ] + }, + "GetStreamingDistribution":{ + "name":"GetStreamingDistribution2015_09_17", + "http":{ + "method":"GET", + "requestUri":"/2015-09-17/streaming-distribution/{Id}" + }, + "input":{"shape":"GetStreamingDistributionRequest"}, + "output":{"shape":"GetStreamingDistributionResult"}, + "errors":[ + {"shape":"NoSuchStreamingDistribution"}, + {"shape":"AccessDenied"} + ] + }, + "GetStreamingDistributionConfig":{ + "name":"GetStreamingDistributionConfig2015_09_17", + "http":{ + "method":"GET", + "requestUri":"/2015-09-17/streaming-distribution/{Id}/config" + }, + "input":{"shape":"GetStreamingDistributionConfigRequest"}, + "output":{"shape":"GetStreamingDistributionConfigResult"}, + "errors":[ + {"shape":"NoSuchStreamingDistribution"}, + {"shape":"AccessDenied"} + ] + }, + "ListCloudFrontOriginAccessIdentities":{ + "name":"ListCloudFrontOriginAccessIdentities2015_09_17", + "http":{ + "method":"GET", + "requestUri":"/2015-09-17/origin-access-identity/cloudfront" + }, + "input":{"shape":"ListCloudFrontOriginAccessIdentitiesRequest"}, + "output":{"shape":"ListCloudFrontOriginAccessIdentitiesResult"}, + "errors":[ + {"shape":"InvalidArgument"} + ] + }, + "ListDistributions":{ + "name":"ListDistributions2015_09_17", + "http":{ + "method":"GET", + "requestUri":"/2015-09-17/distribution" + }, + "input":{"shape":"ListDistributionsRequest"}, + "output":{"shape":"ListDistributionsResult"}, + "errors":[ + {"shape":"InvalidArgument"} + ] + }, + "ListDistributionsByWebACLId":{ + "name":"ListDistributionsByWebACLId2015_09_17", + "http":{ + "method":"GET", + "requestUri":"/2015-09-17/distributionsByWebACLId/{WebACLId}" + }, + "input":{"shape":"ListDistributionsByWebACLIdRequest"}, + "output":{"shape":"ListDistributionsByWebACLIdResult"}, + "errors":[ + {"shape":"InvalidArgument"}, + {"shape":"InvalidWebACLId"} + ] + }, + "ListInvalidations":{ + "name":"ListInvalidations2015_09_17", + "http":{ + "method":"GET", + "requestUri":"/2015-09-17/distribution/{DistributionId}/invalidation" + }, + "input":{"shape":"ListInvalidationsRequest"}, + "output":{"shape":"ListInvalidationsResult"}, + "errors":[ + {"shape":"InvalidArgument"}, + {"shape":"NoSuchDistribution"}, + {"shape":"AccessDenied"} + ] + }, + "ListStreamingDistributions":{ + "name":"ListStreamingDistributions2015_09_17", + "http":{ + "method":"GET", + "requestUri":"/2015-09-17/streaming-distribution" + }, + "input":{"shape":"ListStreamingDistributionsRequest"}, + "output":{"shape":"ListStreamingDistributionsResult"}, + "errors":[ + {"shape":"InvalidArgument"} + ] + }, + "UpdateCloudFrontOriginAccessIdentity":{ + "name":"UpdateCloudFrontOriginAccessIdentity2015_09_17", + "http":{ + "method":"PUT", + "requestUri":"/2015-09-17/origin-access-identity/cloudfront/{Id}/config" + }, + "input":{"shape":"UpdateCloudFrontOriginAccessIdentityRequest"}, + "output":{"shape":"UpdateCloudFrontOriginAccessIdentityResult"}, + "errors":[ + {"shape":"AccessDenied"}, + {"shape":"IllegalUpdate"}, + {"shape":"InvalidIfMatchVersion"}, + {"shape":"MissingBody"}, + {"shape":"NoSuchCloudFrontOriginAccessIdentity"}, + {"shape":"PreconditionFailed"}, + {"shape":"InvalidArgument"}, + {"shape":"InconsistentQuantities"} + ] + }, + "UpdateDistribution":{ + "name":"UpdateDistribution2015_09_17", + "http":{ + "method":"PUT", + "requestUri":"/2015-09-17/distribution/{Id}/config" + }, + "input":{"shape":"UpdateDistributionRequest"}, + "output":{"shape":"UpdateDistributionResult"}, + "errors":[ + {"shape":"AccessDenied"}, + {"shape":"CNAMEAlreadyExists"}, + {"shape":"IllegalUpdate"}, + {"shape":"InvalidIfMatchVersion"}, + {"shape":"MissingBody"}, + {"shape":"NoSuchDistribution"}, + {"shape":"PreconditionFailed"}, + {"shape":"TooManyDistributionCNAMEs"}, + {"shape":"InvalidDefaultRootObject"}, + {"shape":"InvalidRelativePath"}, + {"shape":"InvalidErrorCode"}, + {"shape":"InvalidResponseCode"}, + {"shape":"InvalidArgument"}, + {"shape":"InvalidOriginAccessIdentity"}, + {"shape":"TooManyTrustedSigners"}, + {"shape":"TrustedSignerDoesNotExist"}, + {"shape":"InvalidViewerCertificate"}, + {"shape":"InvalidMinimumProtocolVersion"}, + {"shape":"InvalidRequiredProtocol"}, + {"shape":"NoSuchOrigin"}, + {"shape":"TooManyOrigins"}, + {"shape":"TooManyCacheBehaviors"}, + {"shape":"TooManyCookieNamesInWhiteList"}, + {"shape":"InvalidForwardCookies"}, + {"shape":"TooManyHeadersInForwardedValues"}, + {"shape":"InvalidHeadersForS3Origin"}, + {"shape":"InconsistentQuantities"}, + {"shape":"TooManyCertificates"}, + {"shape":"InvalidLocationCode"}, + {"shape":"InvalidGeoRestrictionParameter"}, + {"shape":"InvalidTTLOrder"}, + {"shape":"InvalidWebACLId"} + ] + }, + "UpdateStreamingDistribution":{ + "name":"UpdateStreamingDistribution2015_09_17", + "http":{ + "method":"PUT", + "requestUri":"/2015-09-17/streaming-distribution/{Id}/config" + }, + "input":{"shape":"UpdateStreamingDistributionRequest"}, + "output":{"shape":"UpdateStreamingDistributionResult"}, + "errors":[ + {"shape":"AccessDenied"}, + {"shape":"CNAMEAlreadyExists"}, + {"shape":"IllegalUpdate"}, + {"shape":"InvalidIfMatchVersion"}, + {"shape":"MissingBody"}, + {"shape":"NoSuchStreamingDistribution"}, + {"shape":"PreconditionFailed"}, + {"shape":"TooManyStreamingDistributionCNAMEs"}, + {"shape":"InvalidArgument"}, + {"shape":"InvalidOriginAccessIdentity"}, + {"shape":"TooManyTrustedSigners"}, + {"shape":"TrustedSignerDoesNotExist"}, + {"shape":"InconsistentQuantities"} + ] + } + }, + "shapes":{ + "AccessDenied":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":403}, + "exception":true + }, + "ActiveTrustedSigners":{ + "type":"structure", + "required":[ + "Enabled", + "Quantity" + ], + "members":{ + "Enabled":{"shape":"boolean"}, + "Quantity":{"shape":"integer"}, + "Items":{"shape":"SignerList"} + } + }, + "AliasList":{ + "type":"list", + "member":{ + "shape":"string", + "locationName":"CNAME" + } + }, + "Aliases":{ + "type":"structure", + "required":["Quantity"], + "members":{ + "Quantity":{"shape":"integer"}, + "Items":{"shape":"AliasList"} + } + }, + "AllowedMethods":{ + "type":"structure", + "required":[ + "Quantity", + "Items" + ], + "members":{ + "Quantity":{"shape":"integer"}, + "Items":{"shape":"MethodsList"}, + "CachedMethods":{"shape":"CachedMethods"} + } + }, + "AwsAccountNumberList":{ + "type":"list", + "member":{ + "shape":"string", + "locationName":"AwsAccountNumber" + } + }, + "BatchTooLarge":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":413}, + "exception":true + }, + "CNAMEAlreadyExists":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":409}, + "exception":true + }, + "CacheBehavior":{ + "type":"structure", + "required":[ + "PathPattern", + "TargetOriginId", + "ForwardedValues", + "TrustedSigners", + "ViewerProtocolPolicy", + "MinTTL" + ], + "members":{ + "PathPattern":{"shape":"string"}, + "TargetOriginId":{"shape":"string"}, + "ForwardedValues":{"shape":"ForwardedValues"}, + "TrustedSigners":{"shape":"TrustedSigners"}, + "ViewerProtocolPolicy":{"shape":"ViewerProtocolPolicy"}, + "MinTTL":{"shape":"long"}, + "AllowedMethods":{"shape":"AllowedMethods"}, + "SmoothStreaming":{"shape":"boolean"}, + "DefaultTTL":{"shape":"long"}, + "MaxTTL":{"shape":"long"}, + "Compress":{"shape":"boolean"} + } + }, + "CacheBehaviorList":{ + "type":"list", + "member":{ + "shape":"CacheBehavior", + "locationName":"CacheBehavior" + } + }, + "CacheBehaviors":{ + "type":"structure", + "required":["Quantity"], + "members":{ + "Quantity":{"shape":"integer"}, + "Items":{"shape":"CacheBehaviorList"} + } + }, + "CachedMethods":{ + "type":"structure", + "required":[ + "Quantity", + "Items" + ], + "members":{ + "Quantity":{"shape":"integer"}, + "Items":{"shape":"MethodsList"} + } + }, + "CertificateSource":{ + "type":"string", + "enum":[ + "cloudfront", + "iam" + ] + }, + "CloudFrontOriginAccessIdentity":{ + "type":"structure", + "required":[ + "Id", + "S3CanonicalUserId" + ], + "members":{ + "Id":{"shape":"string"}, + "S3CanonicalUserId":{"shape":"string"}, + "CloudFrontOriginAccessIdentityConfig":{"shape":"CloudFrontOriginAccessIdentityConfig"} + } + }, + "CloudFrontOriginAccessIdentityAlreadyExists":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":409}, + "exception":true + }, + "CloudFrontOriginAccessIdentityConfig":{ + "type":"structure", + "required":[ + "CallerReference", + "Comment" + ], + "members":{ + "CallerReference":{"shape":"string"}, + "Comment":{"shape":"string"} + } + }, + "CloudFrontOriginAccessIdentityInUse":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":409}, + "exception":true + }, + "CloudFrontOriginAccessIdentityList":{ + "type":"structure", + "required":[ + "Marker", + "MaxItems", + "IsTruncated", + "Quantity" + ], + "members":{ + "Marker":{"shape":"string"}, + "NextMarker":{"shape":"string"}, + "MaxItems":{"shape":"integer"}, + "IsTruncated":{"shape":"boolean"}, + "Quantity":{"shape":"integer"}, + "Items":{"shape":"CloudFrontOriginAccessIdentitySummaryList"} + } + }, + "CloudFrontOriginAccessIdentitySummary":{ + "type":"structure", + "required":[ + "Id", + "S3CanonicalUserId", + "Comment" + ], + "members":{ + "Id":{"shape":"string"}, + "S3CanonicalUserId":{"shape":"string"}, + "Comment":{"shape":"string"} + } + }, + "CloudFrontOriginAccessIdentitySummaryList":{ + "type":"list", + "member":{ + "shape":"CloudFrontOriginAccessIdentitySummary", + "locationName":"CloudFrontOriginAccessIdentitySummary" + } + }, + "CookieNameList":{ + "type":"list", + "member":{ + "shape":"string", + "locationName":"Name" + } + }, + "CookieNames":{ + "type":"structure", + "required":["Quantity"], + "members":{ + "Quantity":{"shape":"integer"}, + "Items":{"shape":"CookieNameList"} + } + }, + "CookiePreference":{ + "type":"structure", + "required":["Forward"], + "members":{ + "Forward":{"shape":"ItemSelection"}, + "WhitelistedNames":{"shape":"CookieNames"} + } + }, + "CreateCloudFrontOriginAccessIdentityRequest":{ + "type":"structure", + "required":["CloudFrontOriginAccessIdentityConfig"], + "members":{ + "CloudFrontOriginAccessIdentityConfig":{ + "shape":"CloudFrontOriginAccessIdentityConfig", + "locationName":"CloudFrontOriginAccessIdentityConfig", + "xmlNamespace":{"uri":"http://cloudfront.amazonaws.com/doc/2015-09-17/"} + } + }, + "payload":"CloudFrontOriginAccessIdentityConfig" + }, + "CreateCloudFrontOriginAccessIdentityResult":{ + "type":"structure", + "members":{ + "CloudFrontOriginAccessIdentity":{"shape":"CloudFrontOriginAccessIdentity"}, + "Location":{ + "shape":"string", + "location":"header", + "locationName":"Location" + }, + "ETag":{ + "shape":"string", + "location":"header", + "locationName":"ETag" + } + }, + "payload":"CloudFrontOriginAccessIdentity" + }, + "CreateDistributionRequest":{ + "type":"structure", + "required":["DistributionConfig"], + "members":{ + "DistributionConfig":{ + "shape":"DistributionConfig", + "locationName":"DistributionConfig", + "xmlNamespace":{"uri":"http://cloudfront.amazonaws.com/doc/2015-09-17/"} + } + }, + "payload":"DistributionConfig" + }, + "CreateDistributionResult":{ + "type":"structure", + "members":{ + "Distribution":{"shape":"Distribution"}, + "Location":{ + "shape":"string", + "location":"header", + "locationName":"Location" + }, + "ETag":{ + "shape":"string", + "location":"header", + "locationName":"ETag" + } + }, + "payload":"Distribution" + }, + "CreateInvalidationRequest":{ + "type":"structure", + "required":[ + "DistributionId", + "InvalidationBatch" + ], + "members":{ + "DistributionId":{ + "shape":"string", + "location":"uri", + "locationName":"DistributionId" + }, + "InvalidationBatch":{ + "shape":"InvalidationBatch", + "locationName":"InvalidationBatch", + "xmlNamespace":{"uri":"http://cloudfront.amazonaws.com/doc/2015-09-17/"} + } + }, + "payload":"InvalidationBatch" + }, + "CreateInvalidationResult":{ + "type":"structure", + "members":{ + "Location":{ + "shape":"string", + "location":"header", + "locationName":"Location" + }, + "Invalidation":{"shape":"Invalidation"} + }, + "payload":"Invalidation" + }, + "CreateStreamingDistributionRequest":{ + "type":"structure", + "required":["StreamingDistributionConfig"], + "members":{ + "StreamingDistributionConfig":{ + "shape":"StreamingDistributionConfig", + "locationName":"StreamingDistributionConfig", + "xmlNamespace":{"uri":"http://cloudfront.amazonaws.com/doc/2015-09-17/"} + } + }, + "payload":"StreamingDistributionConfig" + }, + "CreateStreamingDistributionResult":{ + "type":"structure", + "members":{ + "StreamingDistribution":{"shape":"StreamingDistribution"}, + "Location":{ + "shape":"string", + "location":"header", + "locationName":"Location" + }, + "ETag":{ + "shape":"string", + "location":"header", + "locationName":"ETag" + } + }, + "payload":"StreamingDistribution" + }, + "CustomErrorResponse":{ + "type":"structure", + "required":["ErrorCode"], + "members":{ + "ErrorCode":{"shape":"integer"}, + "ResponsePagePath":{"shape":"string"}, + "ResponseCode":{"shape":"string"}, + "ErrorCachingMinTTL":{"shape":"long"} + } + }, + "CustomErrorResponseList":{ + "type":"list", + "member":{ + "shape":"CustomErrorResponse", + "locationName":"CustomErrorResponse" + } + }, + "CustomErrorResponses":{ + "type":"structure", + "required":["Quantity"], + "members":{ + "Quantity":{"shape":"integer"}, + "Items":{"shape":"CustomErrorResponseList"} + } + }, + "CustomOriginConfig":{ + "type":"structure", + "required":[ + "HTTPPort", + "HTTPSPort", + "OriginProtocolPolicy" + ], + "members":{ + "HTTPPort":{"shape":"integer"}, + "HTTPSPort":{"shape":"integer"}, + "OriginProtocolPolicy":{"shape":"OriginProtocolPolicy"} + } + }, + "DefaultCacheBehavior":{ + "type":"structure", + "required":[ + "TargetOriginId", + "ForwardedValues", + "TrustedSigners", + "ViewerProtocolPolicy", + "MinTTL" + ], + "members":{ + "TargetOriginId":{"shape":"string"}, + "ForwardedValues":{"shape":"ForwardedValues"}, + "TrustedSigners":{"shape":"TrustedSigners"}, + "ViewerProtocolPolicy":{"shape":"ViewerProtocolPolicy"}, + "MinTTL":{"shape":"long"}, + "AllowedMethods":{"shape":"AllowedMethods"}, + "SmoothStreaming":{"shape":"boolean"}, + "DefaultTTL":{"shape":"long"}, + "MaxTTL":{"shape":"long"}, + "Compress":{"shape":"boolean"} + } + }, + "DeleteCloudFrontOriginAccessIdentityRequest":{ + "type":"structure", + "required":["Id"], + "members":{ + "Id":{ + "shape":"string", + "location":"uri", + "locationName":"Id" + }, + "IfMatch":{ + "shape":"string", + "location":"header", + "locationName":"If-Match" + } + } + }, + "DeleteDistributionRequest":{ + "type":"structure", + "required":["Id"], + "members":{ + "Id":{ + "shape":"string", + "location":"uri", + "locationName":"Id" + }, + "IfMatch":{ + "shape":"string", + "location":"header", + "locationName":"If-Match" + } + } + }, + "DeleteStreamingDistributionRequest":{ + "type":"structure", + "required":["Id"], + "members":{ + "Id":{ + "shape":"string", + "location":"uri", + "locationName":"Id" + }, + "IfMatch":{ + "shape":"string", + "location":"header", + "locationName":"If-Match" + } + } + }, + "Distribution":{ + "type":"structure", + "required":[ + "Id", + "Status", + "LastModifiedTime", + "InProgressInvalidationBatches", + "DomainName", + "ActiveTrustedSigners", + "DistributionConfig" + ], + "members":{ + "Id":{"shape":"string"}, + "Status":{"shape":"string"}, + "LastModifiedTime":{"shape":"timestamp"}, + "InProgressInvalidationBatches":{"shape":"integer"}, + "DomainName":{"shape":"string"}, + "ActiveTrustedSigners":{"shape":"ActiveTrustedSigners"}, + "DistributionConfig":{"shape":"DistributionConfig"} + } + }, + "DistributionAlreadyExists":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":409}, + "exception":true + }, + "DistributionConfig":{ + "type":"structure", + "required":[ + "CallerReference", + "Origins", + "DefaultCacheBehavior", + "Comment", + "Enabled" + ], + "members":{ + "CallerReference":{"shape":"string"}, + "Aliases":{"shape":"Aliases"}, + "DefaultRootObject":{"shape":"string"}, + "Origins":{"shape":"Origins"}, + "DefaultCacheBehavior":{"shape":"DefaultCacheBehavior"}, + "CacheBehaviors":{"shape":"CacheBehaviors"}, + "CustomErrorResponses":{"shape":"CustomErrorResponses"}, + "Comment":{"shape":"string"}, + "Logging":{"shape":"LoggingConfig"}, + "PriceClass":{"shape":"PriceClass"}, + "Enabled":{"shape":"boolean"}, + "ViewerCertificate":{"shape":"ViewerCertificate"}, + "Restrictions":{"shape":"Restrictions"}, + "WebACLId":{"shape":"string"} + } + }, + "DistributionList":{ + "type":"structure", + "required":[ + "Marker", + "MaxItems", + "IsTruncated", + "Quantity" + ], + "members":{ + "Marker":{"shape":"string"}, + "NextMarker":{"shape":"string"}, + "MaxItems":{"shape":"integer"}, + "IsTruncated":{"shape":"boolean"}, + "Quantity":{"shape":"integer"}, + "Items":{"shape":"DistributionSummaryList"} + } + }, + "DistributionNotDisabled":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":409}, + "exception":true + }, + "DistributionSummary":{ + "type":"structure", + "required":[ + "Id", + "Status", + "LastModifiedTime", + "DomainName", + "Aliases", + "Origins", + "DefaultCacheBehavior", + "CacheBehaviors", + "CustomErrorResponses", + "Comment", + "PriceClass", + "Enabled", + "ViewerCertificate", + "Restrictions", + "WebACLId" + ], + "members":{ + "Id":{"shape":"string"}, + "Status":{"shape":"string"}, + "LastModifiedTime":{"shape":"timestamp"}, + "DomainName":{"shape":"string"}, + "Aliases":{"shape":"Aliases"}, + "Origins":{"shape":"Origins"}, + "DefaultCacheBehavior":{"shape":"DefaultCacheBehavior"}, + "CacheBehaviors":{"shape":"CacheBehaviors"}, + "CustomErrorResponses":{"shape":"CustomErrorResponses"}, + "Comment":{"shape":"string"}, + "PriceClass":{"shape":"PriceClass"}, + "Enabled":{"shape":"boolean"}, + "ViewerCertificate":{"shape":"ViewerCertificate"}, + "Restrictions":{"shape":"Restrictions"}, + "WebACLId":{"shape":"string"} + } + }, + "DistributionSummaryList":{ + "type":"list", + "member":{ + "shape":"DistributionSummary", + "locationName":"DistributionSummary" + } + }, + "ForwardedValues":{ + "type":"structure", + "required":[ + "QueryString", + "Cookies" + ], + "members":{ + "QueryString":{"shape":"boolean"}, + "Cookies":{"shape":"CookiePreference"}, + "Headers":{"shape":"Headers"} + } + }, + "GeoRestriction":{ + "type":"structure", + "required":[ + "RestrictionType", + "Quantity" + ], + "members":{ + "RestrictionType":{"shape":"GeoRestrictionType"}, + "Quantity":{"shape":"integer"}, + "Items":{"shape":"LocationList"} + } + }, + "GeoRestrictionType":{ + "type":"string", + "enum":[ + "blacklist", + "whitelist", + "none" + ] + }, + "GetCloudFrontOriginAccessIdentityConfigRequest":{ + "type":"structure", + "required":["Id"], + "members":{ + "Id":{ + "shape":"string", + "location":"uri", + "locationName":"Id" + } + } + }, + "GetCloudFrontOriginAccessIdentityConfigResult":{ + "type":"structure", + "members":{ + "CloudFrontOriginAccessIdentityConfig":{"shape":"CloudFrontOriginAccessIdentityConfig"}, + "ETag":{ + "shape":"string", + "location":"header", + "locationName":"ETag" + } + }, + "payload":"CloudFrontOriginAccessIdentityConfig" + }, + "GetCloudFrontOriginAccessIdentityRequest":{ + "type":"structure", + "required":["Id"], + "members":{ + "Id":{ + "shape":"string", + "location":"uri", + "locationName":"Id" + } + } + }, + "GetCloudFrontOriginAccessIdentityResult":{ + "type":"structure", + "members":{ + "CloudFrontOriginAccessIdentity":{"shape":"CloudFrontOriginAccessIdentity"}, + "ETag":{ + "shape":"string", + "location":"header", + "locationName":"ETag" + } + }, + "payload":"CloudFrontOriginAccessIdentity" + }, + "GetDistributionConfigRequest":{ + "type":"structure", + "required":["Id"], + "members":{ + "Id":{ + "shape":"string", + "location":"uri", + "locationName":"Id" + } + } + }, + "GetDistributionConfigResult":{ + "type":"structure", + "members":{ + "DistributionConfig":{"shape":"DistributionConfig"}, + "ETag":{ + "shape":"string", + "location":"header", + "locationName":"ETag" + } + }, + "payload":"DistributionConfig" + }, + "GetDistributionRequest":{ + "type":"structure", + "required":["Id"], + "members":{ + "Id":{ + "shape":"string", + "location":"uri", + "locationName":"Id" + } + } + }, + "GetDistributionResult":{ + "type":"structure", + "members":{ + "Distribution":{"shape":"Distribution"}, + "ETag":{ + "shape":"string", + "location":"header", + "locationName":"ETag" + } + }, + "payload":"Distribution" + }, + "GetInvalidationRequest":{ + "type":"structure", + "required":[ + "DistributionId", + "Id" + ], + "members":{ + "DistributionId":{ + "shape":"string", + "location":"uri", + "locationName":"DistributionId" + }, + "Id":{ + "shape":"string", + "location":"uri", + "locationName":"Id" + } + } + }, + "GetInvalidationResult":{ + "type":"structure", + "members":{ + "Invalidation":{"shape":"Invalidation"} + }, + "payload":"Invalidation" + }, + "GetStreamingDistributionConfigRequest":{ + "type":"structure", + "required":["Id"], + "members":{ + "Id":{ + "shape":"string", + "location":"uri", + "locationName":"Id" + } + } + }, + "GetStreamingDistributionConfigResult":{ + "type":"structure", + "members":{ + "StreamingDistributionConfig":{"shape":"StreamingDistributionConfig"}, + "ETag":{ + "shape":"string", + "location":"header", + "locationName":"ETag" + } + }, + "payload":"StreamingDistributionConfig" + }, + "GetStreamingDistributionRequest":{ + "type":"structure", + "required":["Id"], + "members":{ + "Id":{ + "shape":"string", + "location":"uri", + "locationName":"Id" + } + } + }, + "GetStreamingDistributionResult":{ + "type":"structure", + "members":{ + "StreamingDistribution":{"shape":"StreamingDistribution"}, + "ETag":{ + "shape":"string", + "location":"header", + "locationName":"ETag" + } + }, + "payload":"StreamingDistribution" + }, + "HeaderList":{ + "type":"list", + "member":{ + "shape":"string", + "locationName":"Name" + } + }, + "Headers":{ + "type":"structure", + "required":["Quantity"], + "members":{ + "Quantity":{"shape":"integer"}, + "Items":{"shape":"HeaderList"} + } + }, + "IllegalUpdate":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InconsistentQuantities":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidArgument":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidDefaultRootObject":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidErrorCode":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidForwardCookies":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidGeoRestrictionParameter":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidHeadersForS3Origin":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidIfMatchVersion":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidLocationCode":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidMinimumProtocolVersion":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidOrigin":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidOriginAccessIdentity":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidProtocolSettings":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidRelativePath":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidRequiredProtocol":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidResponseCode":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidTTLOrder":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidViewerCertificate":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidWebACLId":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "Invalidation":{ + "type":"structure", + "required":[ + "Id", + "Status", + "CreateTime", + "InvalidationBatch" + ], + "members":{ + "Id":{"shape":"string"}, + "Status":{"shape":"string"}, + "CreateTime":{"shape":"timestamp"}, + "InvalidationBatch":{"shape":"InvalidationBatch"} + } + }, + "InvalidationBatch":{ + "type":"structure", + "required":[ + "Paths", + "CallerReference" + ], + "members":{ + "Paths":{"shape":"Paths"}, + "CallerReference":{"shape":"string"} + } + }, + "InvalidationList":{ + "type":"structure", + "required":[ + "Marker", + "MaxItems", + "IsTruncated", + "Quantity" + ], + "members":{ + "Marker":{"shape":"string"}, + "NextMarker":{"shape":"string"}, + "MaxItems":{"shape":"integer"}, + "IsTruncated":{"shape":"boolean"}, + "Quantity":{"shape":"integer"}, + "Items":{"shape":"InvalidationSummaryList"} + } + }, + "InvalidationSummary":{ + "type":"structure", + "required":[ + "Id", + "CreateTime", + "Status" + ], + "members":{ + "Id":{"shape":"string"}, + "CreateTime":{"shape":"timestamp"}, + "Status":{"shape":"string"} + } + }, + "InvalidationSummaryList":{ + "type":"list", + "member":{ + "shape":"InvalidationSummary", + "locationName":"InvalidationSummary" + } + }, + "ItemSelection":{ + "type":"string", + "enum":[ + "none", + "whitelist", + "all" + ] + }, + "KeyPairIdList":{ + "type":"list", + "member":{ + "shape":"string", + "locationName":"KeyPairId" + } + }, + "KeyPairIds":{ + "type":"structure", + "required":["Quantity"], + "members":{ + "Quantity":{"shape":"integer"}, + "Items":{"shape":"KeyPairIdList"} + } + }, + "ListCloudFrontOriginAccessIdentitiesRequest":{ + "type":"structure", + "members":{ + "Marker":{ + "shape":"string", + "location":"querystring", + "locationName":"Marker" + }, + "MaxItems":{ + "shape":"string", + "location":"querystring", + "locationName":"MaxItems" + } + } + }, + "ListCloudFrontOriginAccessIdentitiesResult":{ + "type":"structure", + "members":{ + "CloudFrontOriginAccessIdentityList":{"shape":"CloudFrontOriginAccessIdentityList"} + }, + "payload":"CloudFrontOriginAccessIdentityList" + }, + "ListDistributionsByWebACLIdRequest":{ + "type":"structure", + "required":["WebACLId"], + "members":{ + "Marker":{ + "shape":"string", + "location":"querystring", + "locationName":"Marker" + }, + "MaxItems":{ + "shape":"string", + "location":"querystring", + "locationName":"MaxItems" + }, + "WebACLId":{ + "shape":"string", + "location":"uri", + "locationName":"WebACLId" + } + } + }, + "ListDistributionsByWebACLIdResult":{ + "type":"structure", + "members":{ + "DistributionList":{"shape":"DistributionList"} + }, + "payload":"DistributionList" + }, + "ListDistributionsRequest":{ + "type":"structure", + "members":{ + "Marker":{ + "shape":"string", + "location":"querystring", + "locationName":"Marker" + }, + "MaxItems":{ + "shape":"string", + "location":"querystring", + "locationName":"MaxItems" + } + } + }, + "ListDistributionsResult":{ + "type":"structure", + "members":{ + "DistributionList":{"shape":"DistributionList"} + }, + "payload":"DistributionList" + }, + "ListInvalidationsRequest":{ + "type":"structure", + "required":["DistributionId"], + "members":{ + "DistributionId":{ + "shape":"string", + "location":"uri", + "locationName":"DistributionId" + }, + "Marker":{ + "shape":"string", + "location":"querystring", + "locationName":"Marker" + }, + "MaxItems":{ + "shape":"string", + "location":"querystring", + "locationName":"MaxItems" + } + } + }, + "ListInvalidationsResult":{ + "type":"structure", + "members":{ + "InvalidationList":{"shape":"InvalidationList"} + }, + "payload":"InvalidationList" + }, + "ListStreamingDistributionsRequest":{ + "type":"structure", + "members":{ + "Marker":{ + "shape":"string", + "location":"querystring", + "locationName":"Marker" + }, + "MaxItems":{ + "shape":"string", + "location":"querystring", + "locationName":"MaxItems" + } + } + }, + "ListStreamingDistributionsResult":{ + "type":"structure", + "members":{ + "StreamingDistributionList":{"shape":"StreamingDistributionList"} + }, + "payload":"StreamingDistributionList" + }, + "LocationList":{ + "type":"list", + "member":{ + "shape":"string", + "locationName":"Location" + } + }, + "LoggingConfig":{ + "type":"structure", + "required":[ + "Enabled", + "IncludeCookies", + "Bucket", + "Prefix" + ], + "members":{ + "Enabled":{"shape":"boolean"}, + "IncludeCookies":{"shape":"boolean"}, + "Bucket":{"shape":"string"}, + "Prefix":{"shape":"string"} + } + }, + "Method":{ + "type":"string", + "enum":[ + "GET", + "HEAD", + "POST", + "PUT", + "PATCH", + "OPTIONS", + "DELETE" + ] + }, + "MethodsList":{ + "type":"list", + "member":{ + "shape":"Method", + "locationName":"Method" + } + }, + "MinimumProtocolVersion":{ + "type":"string", + "enum":[ + "SSLv3", + "TLSv1" + ] + }, + "MissingBody":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "NoSuchCloudFrontOriginAccessIdentity":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":404}, + "exception":true + }, + "NoSuchDistribution":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":404}, + "exception":true + }, + "NoSuchInvalidation":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":404}, + "exception":true + }, + "NoSuchOrigin":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":404}, + "exception":true + }, + "NoSuchStreamingDistribution":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":404}, + "exception":true + }, + "Origin":{ + "type":"structure", + "required":[ + "Id", + "DomainName" + ], + "members":{ + "Id":{"shape":"string"}, + "DomainName":{"shape":"string"}, + "OriginPath":{"shape":"string"}, + "S3OriginConfig":{"shape":"S3OriginConfig"}, + "CustomOriginConfig":{"shape":"CustomOriginConfig"} + } + }, + "OriginList":{ + "type":"list", + "member":{ + "shape":"Origin", + "locationName":"Origin" + }, + "min":1 + }, + "OriginProtocolPolicy":{ + "type":"string", + "enum":[ + "http-only", + "match-viewer" + ] + }, + "Origins":{ + "type":"structure", + "required":["Quantity"], + "members":{ + "Quantity":{"shape":"integer"}, + "Items":{"shape":"OriginList"} + } + }, + "PathList":{ + "type":"list", + "member":{ + "shape":"string", + "locationName":"Path" + } + }, + "Paths":{ + "type":"structure", + "required":["Quantity"], + "members":{ + "Quantity":{"shape":"integer"}, + "Items":{"shape":"PathList"} + } + }, + "PreconditionFailed":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":412}, + "exception":true + }, + "PriceClass":{ + "type":"string", + "enum":[ + "PriceClass_100", + "PriceClass_200", + "PriceClass_All" + ] + }, + "Restrictions":{ + "type":"structure", + "required":["GeoRestriction"], + "members":{ + "GeoRestriction":{"shape":"GeoRestriction"} + } + }, + "S3Origin":{ + "type":"structure", + "required":[ + "DomainName", + "OriginAccessIdentity" + ], + "members":{ + "DomainName":{"shape":"string"}, + "OriginAccessIdentity":{"shape":"string"} + } + }, + "S3OriginConfig":{ + "type":"structure", + "required":["OriginAccessIdentity"], + "members":{ + "OriginAccessIdentity":{"shape":"string"} + } + }, + "SSLSupportMethod":{ + "type":"string", + "enum":[ + "sni-only", + "vip" + ] + }, + "Signer":{ + "type":"structure", + "members":{ + "AwsAccountNumber":{"shape":"string"}, + "KeyPairIds":{"shape":"KeyPairIds"} + } + }, + "SignerList":{ + "type":"list", + "member":{ + "shape":"Signer", + "locationName":"Signer" + } + }, + "StreamingDistribution":{ + "type":"structure", + "required":[ + "Id", + "Status", + "DomainName", + "ActiveTrustedSigners", + "StreamingDistributionConfig" + ], + "members":{ + "Id":{"shape":"string"}, + "Status":{"shape":"string"}, + "LastModifiedTime":{"shape":"timestamp"}, + "DomainName":{"shape":"string"}, + "ActiveTrustedSigners":{"shape":"ActiveTrustedSigners"}, + "StreamingDistributionConfig":{"shape":"StreamingDistributionConfig"} + } + }, + "StreamingDistributionAlreadyExists":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":409}, + "exception":true + }, + "StreamingDistributionConfig":{ + "type":"structure", + "required":[ + "CallerReference", + "S3Origin", + "Comment", + "TrustedSigners", + "Enabled" + ], + "members":{ + "CallerReference":{"shape":"string"}, + "S3Origin":{"shape":"S3Origin"}, + "Aliases":{"shape":"Aliases"}, + "Comment":{"shape":"string"}, + "Logging":{"shape":"StreamingLoggingConfig"}, + "TrustedSigners":{"shape":"TrustedSigners"}, + "PriceClass":{"shape":"PriceClass"}, + "Enabled":{"shape":"boolean"} + } + }, + "StreamingDistributionList":{ + "type":"structure", + "required":[ + "Marker", + "MaxItems", + "IsTruncated", + "Quantity" + ], + "members":{ + "Marker":{"shape":"string"}, + "NextMarker":{"shape":"string"}, + "MaxItems":{"shape":"integer"}, + "IsTruncated":{"shape":"boolean"}, + "Quantity":{"shape":"integer"}, + "Items":{"shape":"StreamingDistributionSummaryList"} + } + }, + "StreamingDistributionNotDisabled":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":409}, + "exception":true + }, + "StreamingDistributionSummary":{ + "type":"structure", + "required":[ + "Id", + "Status", + "LastModifiedTime", + "DomainName", + "S3Origin", + "Aliases", + "TrustedSigners", + "Comment", + "PriceClass", + "Enabled" + ], + "members":{ + "Id":{"shape":"string"}, + "Status":{"shape":"string"}, + "LastModifiedTime":{"shape":"timestamp"}, + "DomainName":{"shape":"string"}, + "S3Origin":{"shape":"S3Origin"}, + "Aliases":{"shape":"Aliases"}, + "TrustedSigners":{"shape":"TrustedSigners"}, + "Comment":{"shape":"string"}, + "PriceClass":{"shape":"PriceClass"}, + "Enabled":{"shape":"boolean"} + } + }, + "StreamingDistributionSummaryList":{ + "type":"list", + "member":{ + "shape":"StreamingDistributionSummary", + "locationName":"StreamingDistributionSummary" + } + }, + "StreamingLoggingConfig":{ + "type":"structure", + "required":[ + "Enabled", + "Bucket", + "Prefix" + ], + "members":{ + "Enabled":{"shape":"boolean"}, + "Bucket":{"shape":"string"}, + "Prefix":{"shape":"string"} + } + }, + "TooManyCacheBehaviors":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "TooManyCertificates":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "TooManyCloudFrontOriginAccessIdentities":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "TooManyCookieNamesInWhiteList":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "TooManyDistributionCNAMEs":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "TooManyDistributions":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "TooManyHeadersInForwardedValues":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "TooManyInvalidationsInProgress":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "TooManyOrigins":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "TooManyStreamingDistributionCNAMEs":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "TooManyStreamingDistributions":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "TooManyTrustedSigners":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "TrustedSignerDoesNotExist":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "TrustedSigners":{ + "type":"structure", + "required":[ + "Enabled", + "Quantity" + ], + "members":{ + "Enabled":{"shape":"boolean"}, + "Quantity":{"shape":"integer"}, + "Items":{"shape":"AwsAccountNumberList"} + } + }, + "UpdateCloudFrontOriginAccessIdentityRequest":{ + "type":"structure", + "required":[ + "CloudFrontOriginAccessIdentityConfig", + "Id" + ], + "members":{ + "CloudFrontOriginAccessIdentityConfig":{ + "shape":"CloudFrontOriginAccessIdentityConfig", + "locationName":"CloudFrontOriginAccessIdentityConfig", + "xmlNamespace":{"uri":"http://cloudfront.amazonaws.com/doc/2015-09-17/"} + }, + "Id":{ + "shape":"string", + "location":"uri", + "locationName":"Id" + }, + "IfMatch":{ + "shape":"string", + "location":"header", + "locationName":"If-Match" + } + }, + "payload":"CloudFrontOriginAccessIdentityConfig" + }, + "UpdateCloudFrontOriginAccessIdentityResult":{ + "type":"structure", + "members":{ + "CloudFrontOriginAccessIdentity":{"shape":"CloudFrontOriginAccessIdentity"}, + "ETag":{ + "shape":"string", + "location":"header", + "locationName":"ETag" + } + }, + "payload":"CloudFrontOriginAccessIdentity" + }, + "UpdateDistributionRequest":{ + "type":"structure", + "required":[ + "DistributionConfig", + "Id" + ], + "members":{ + "DistributionConfig":{ + "shape":"DistributionConfig", + "locationName":"DistributionConfig", + "xmlNamespace":{"uri":"http://cloudfront.amazonaws.com/doc/2015-09-17/"} + }, + "Id":{ + "shape":"string", + "location":"uri", + "locationName":"Id" + }, + "IfMatch":{ + "shape":"string", + "location":"header", + "locationName":"If-Match" + } + }, + "payload":"DistributionConfig" + }, + "UpdateDistributionResult":{ + "type":"structure", + "members":{ + "Distribution":{"shape":"Distribution"}, + "ETag":{ + "shape":"string", + "location":"header", + "locationName":"ETag" + } + }, + "payload":"Distribution" + }, + "UpdateStreamingDistributionRequest":{ + "type":"structure", + "required":[ + "StreamingDistributionConfig", + "Id" + ], + "members":{ + "StreamingDistributionConfig":{ + "shape":"StreamingDistributionConfig", + "locationName":"StreamingDistributionConfig", + "xmlNamespace":{"uri":"http://cloudfront.amazonaws.com/doc/2015-09-17/"} + }, + "Id":{ + "shape":"string", + "location":"uri", + "locationName":"Id" + }, + "IfMatch":{ + "shape":"string", + "location":"header", + "locationName":"If-Match" + } + }, + "payload":"StreamingDistributionConfig" + }, + "UpdateStreamingDistributionResult":{ + "type":"structure", + "members":{ + "StreamingDistribution":{"shape":"StreamingDistribution"}, + "ETag":{ + "shape":"string", + "location":"header", + "locationName":"ETag" + } + }, + "payload":"StreamingDistribution" + }, + "ViewerCertificate":{ + "type":"structure", + "members":{ + "Certificate":{"shape":"string"}, + "CertificateSource":{"shape":"CertificateSource"}, + "SSLSupportMethod":{"shape":"SSLSupportMethod"}, + "MinimumProtocolVersion":{"shape":"MinimumProtocolVersion"}, + "IAMCertificateId":{ + "shape":"string", + "deprecated":true + }, + "CloudFrontDefaultCertificate":{ + "shape":"boolean", + "deprecated":true + } + } + }, + "ViewerProtocolPolicy":{ + "type":"string", + "enum":[ + "allow-all", + "https-only", + "redirect-to-https" + ] + }, + "boolean":{"type":"boolean"}, + "integer":{"type":"integer"}, + "long":{"type":"long"}, + "string":{"type":"string"}, + "timestamp":{"type":"timestamp"} + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/cloudfront/2015-09-17/docs-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/cloudfront/2015-09-17/docs-2.json new file mode 100644 index 000000000..3e23d960f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/cloudfront/2015-09-17/docs-2.json @@ -0,0 +1,1173 @@ +{ + "version": "2.0", + "service": null, + "operations": { + "CreateCloudFrontOriginAccessIdentity": "Create a new origin access identity.", + "CreateDistribution": "Create a new distribution.", + "CreateInvalidation": "Create a new invalidation.", + "CreateStreamingDistribution": "Create a new streaming distribution.", + "DeleteCloudFrontOriginAccessIdentity": "Delete an origin access identity.", + "DeleteDistribution": "Delete a distribution.", + "DeleteStreamingDistribution": "Delete a streaming distribution.", + "GetCloudFrontOriginAccessIdentity": "Get the information about an origin access identity.", + "GetCloudFrontOriginAccessIdentityConfig": "Get the configuration information about an origin access identity.", + "GetDistribution": "Get the information about a distribution.", + "GetDistributionConfig": "Get the configuration information about a distribution.", + "GetInvalidation": "Get the information about an invalidation.", + "GetStreamingDistribution": "Get the information about a streaming distribution.", + "GetStreamingDistributionConfig": "Get the configuration information about a streaming distribution.", + "ListCloudFrontOriginAccessIdentities": "List origin access identities.", + "ListDistributions": "List distributions.", + "ListDistributionsByWebACLId": "List the distributions that are associated with a specified AWS WAF web ACL.", + "ListInvalidations": "List invalidation batches.", + "ListStreamingDistributions": "List streaming distributions.", + "UpdateCloudFrontOriginAccessIdentity": "Update an origin access identity.", + "UpdateDistribution": "Update a distribution.", + "UpdateStreamingDistribution": "Update a streaming distribution." + }, + "shapes": { + "AccessDenied": { + "base": "Access denied.", + "refs": { + } + }, + "ActiveTrustedSigners": { + "base": "A complex type that lists the AWS accounts, if any, that you included in the TrustedSigners complex type for the default cache behavior or for any of the other cache behaviors for this distribution. These are accounts that you want to allow to create signed URLs for private content.", + "refs": { + "Distribution$ActiveTrustedSigners": "CloudFront automatically adds this element to the response only if you've set up the distribution to serve private content with signed URLs. The element lists the key pair IDs that CloudFront is aware of for each trusted signer. The Signer child element lists the AWS account number of the trusted signer (or an empty Self element if the signer is you). The Signer element also includes the IDs of any active key pairs associated with the trusted signer's AWS account. If no KeyPairId element appears for a Signer, that signer can't create working signed URLs.", + "StreamingDistribution$ActiveTrustedSigners": "CloudFront automatically adds this element to the response only if you've set up the distribution to serve private content with signed URLs. The element lists the key pair IDs that CloudFront is aware of for each trusted signer. The Signer child element lists the AWS account number of the trusted signer (or an empty Self element if the signer is you). The Signer element also includes the IDs of any active key pairs associated with the trusted signer's AWS account. If no KeyPairId element appears for a Signer, that signer can't create working signed URLs." + } + }, + "AliasList": { + "base": null, + "refs": { + "Aliases$Items": "Optional: A complex type that contains CNAME elements, if any, for this distribution. If Quantity is 0, you can omit Items." + } + }, + "Aliases": { + "base": "A complex type that contains information about CNAMEs (alternate domain names), if any, for this distribution.", + "refs": { + "DistributionConfig$Aliases": "A complex type that contains information about CNAMEs (alternate domain names), if any, for this distribution.", + "DistributionSummary$Aliases": "A complex type that contains information about CNAMEs (alternate domain names), if any, for this distribution.", + "StreamingDistributionConfig$Aliases": "A complex type that contains information about CNAMEs (alternate domain names), if any, for this streaming distribution.", + "StreamingDistributionSummary$Aliases": "A complex type that contains information about CNAMEs (alternate domain names), if any, for this streaming distribution." + } + }, + "AllowedMethods": { + "base": "A complex type that controls which HTTP methods CloudFront processes and forwards to your Amazon S3 bucket or your custom origin. There are three choices: - CloudFront forwards only GET and HEAD requests. - CloudFront forwards only GET, HEAD and OPTIONS requests. - CloudFront forwards GET, HEAD, OPTIONS, PUT, PATCH, POST, and DELETE requests. If you pick the third choice, you may need to restrict access to your Amazon S3 bucket or to your custom origin so users can't perform operations that you don't want them to. For example, you may not want users to have permission to delete objects from your origin.", + "refs": { + "CacheBehavior$AllowedMethods": null, + "DefaultCacheBehavior$AllowedMethods": null + } + }, + "AwsAccountNumberList": { + "base": null, + "refs": { + "TrustedSigners$Items": "Optional: A complex type that contains trusted signers for this cache behavior. If Quantity is 0, you can omit Items." + } + }, + "BatchTooLarge": { + "base": null, + "refs": { + } + }, + "CNAMEAlreadyExists": { + "base": null, + "refs": { + } + }, + "CacheBehavior": { + "base": "A complex type that describes how CloudFront processes requests. You can create up to 10 cache behaviors.You must create at least as many cache behaviors (including the default cache behavior) as you have origins if you want CloudFront to distribute objects from all of the origins. Each cache behavior specifies the one origin from which you want CloudFront to get objects. If you have two origins and only the default cache behavior, the default cache behavior will cause CloudFront to get objects from one of the origins, but the other origin will never be used. If you don't want to specify any cache behaviors, include only an empty CacheBehaviors element. Don't include an empty CacheBehavior element, or CloudFront returns a MalformedXML error. To delete all cache behaviors in an existing distribution, update the distribution configuration and include only an empty CacheBehaviors element. To add, change, or remove one or more cache behaviors, update the distribution configuration and specify all of the cache behaviors that you want to include in the updated distribution.", + "refs": { + "CacheBehaviorList$member": null + } + }, + "CacheBehaviorList": { + "base": null, + "refs": { + "CacheBehaviors$Items": "Optional: A complex type that contains cache behaviors for this distribution. If Quantity is 0, you can omit Items." + } + }, + "CacheBehaviors": { + "base": "A complex type that contains zero or more CacheBehavior elements.", + "refs": { + "DistributionConfig$CacheBehaviors": "A complex type that contains zero or more CacheBehavior elements.", + "DistributionSummary$CacheBehaviors": "A complex type that contains zero or more CacheBehavior elements." + } + }, + "CachedMethods": { + "base": "A complex type that controls whether CloudFront caches the response to requests using the specified HTTP methods. There are two choices: - CloudFront caches responses to GET and HEAD requests. - CloudFront caches responses to GET, HEAD, and OPTIONS requests. If you pick the second choice for your S3 Origin, you may need to forward Access-Control-Request-Method, Access-Control-Request-Headers and Origin headers for the responses to be cached correctly.", + "refs": { + "AllowedMethods$CachedMethods": null + } + }, + "CertificateSource": { + "base": null, + "refs": { + "ViewerCertificate$CertificateSource": "If you want viewers to use HTTPS to request your objects and you're using the CloudFront domain name of your distribution in your object URLs (for example, https://d111111abcdef8.cloudfront.net/logo.jpg), set to \"cloudfront\". If you want viewers to use HTTPS to request your objects and you're using an alternate domain name in your object URLs (for example, https://example.com/logo.jpg), set to \"iam\", and update the Certificate field with the IAM certificate identifier of the custom viewer certificate for this distribution." + } + }, + "CloudFrontOriginAccessIdentity": { + "base": "CloudFront origin access identity.", + "refs": { + "CreateCloudFrontOriginAccessIdentityResult$CloudFrontOriginAccessIdentity": "The origin access identity's information.", + "GetCloudFrontOriginAccessIdentityResult$CloudFrontOriginAccessIdentity": "The origin access identity's information.", + "UpdateCloudFrontOriginAccessIdentityResult$CloudFrontOriginAccessIdentity": "The origin access identity's information." + } + }, + "CloudFrontOriginAccessIdentityAlreadyExists": { + "base": "If the CallerReference is a value you already sent in a previous request to create an identity but the content of the CloudFrontOriginAccessIdentityConfig is different from the original request, CloudFront returns a CloudFrontOriginAccessIdentityAlreadyExists error.", + "refs": { + } + }, + "CloudFrontOriginAccessIdentityConfig": { + "base": "Origin access identity configuration.", + "refs": { + "CloudFrontOriginAccessIdentity$CloudFrontOriginAccessIdentityConfig": "The current configuration information for the identity.", + "CreateCloudFrontOriginAccessIdentityRequest$CloudFrontOriginAccessIdentityConfig": "The origin access identity's configuration information.", + "GetCloudFrontOriginAccessIdentityConfigResult$CloudFrontOriginAccessIdentityConfig": "The origin access identity's configuration information.", + "UpdateCloudFrontOriginAccessIdentityRequest$CloudFrontOriginAccessIdentityConfig": "The identity's configuration information." + } + }, + "CloudFrontOriginAccessIdentityInUse": { + "base": null, + "refs": { + } + }, + "CloudFrontOriginAccessIdentityList": { + "base": "The CloudFrontOriginAccessIdentityList type.", + "refs": { + "ListCloudFrontOriginAccessIdentitiesResult$CloudFrontOriginAccessIdentityList": "The CloudFrontOriginAccessIdentityList type." + } + }, + "CloudFrontOriginAccessIdentitySummary": { + "base": "Summary of the information about a CloudFront origin access identity.", + "refs": { + "CloudFrontOriginAccessIdentitySummaryList$member": null + } + }, + "CloudFrontOriginAccessIdentitySummaryList": { + "base": null, + "refs": { + "CloudFrontOriginAccessIdentityList$Items": "A complex type that contains one CloudFrontOriginAccessIdentitySummary element for each origin access identity that was created by the current AWS account." + } + }, + "CookieNameList": { + "base": null, + "refs": { + "CookieNames$Items": "Optional: A complex type that contains whitelisted cookies for this cache behavior. If Quantity is 0, you can omit Items." + } + }, + "CookieNames": { + "base": "A complex type that specifies the whitelisted cookies, if any, that you want CloudFront to forward to your origin that is associated with this cache behavior.", + "refs": { + "CookiePreference$WhitelistedNames": "A complex type that specifies the whitelisted cookies, if any, that you want CloudFront to forward to your origin that is associated with this cache behavior." + } + }, + "CookiePreference": { + "base": "A complex type that specifies the cookie preferences associated with this cache behavior.", + "refs": { + "ForwardedValues$Cookies": "A complex type that specifies how CloudFront handles cookies." + } + }, + "CreateCloudFrontOriginAccessIdentityRequest": { + "base": "The request to create a new origin access identity.", + "refs": { + } + }, + "CreateCloudFrontOriginAccessIdentityResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "CreateDistributionRequest": { + "base": "The request to create a new distribution.", + "refs": { + } + }, + "CreateDistributionResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "CreateInvalidationRequest": { + "base": "The request to create an invalidation.", + "refs": { + } + }, + "CreateInvalidationResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "CreateStreamingDistributionRequest": { + "base": "The request to create a new streaming distribution.", + "refs": { + } + }, + "CreateStreamingDistributionResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "CustomErrorResponse": { + "base": "A complex type that describes how you'd prefer CloudFront to respond to requests that result in either a 4xx or 5xx response. You can control whether a custom error page should be displayed, what the desired response code should be for this error page and how long should the error response be cached by CloudFront. If you don't want to specify any custom error responses, include only an empty CustomErrorResponses element. To delete all custom error responses in an existing distribution, update the distribution configuration and include only an empty CustomErrorResponses element. To add, change, or remove one or more custom error responses, update the distribution configuration and specify all of the custom error responses that you want to include in the updated distribution.", + "refs": { + "CustomErrorResponseList$member": null + } + }, + "CustomErrorResponseList": { + "base": null, + "refs": { + "CustomErrorResponses$Items": "Optional: A complex type that contains custom error responses for this distribution. If Quantity is 0, you can omit Items." + } + }, + "CustomErrorResponses": { + "base": "A complex type that contains zero or more CustomErrorResponse elements.", + "refs": { + "DistributionConfig$CustomErrorResponses": "A complex type that contains zero or more CustomErrorResponse elements.", + "DistributionSummary$CustomErrorResponses": "A complex type that contains zero or more CustomErrorResponses elements." + } + }, + "CustomOriginConfig": { + "base": "A customer origin.", + "refs": { + "Origin$CustomOriginConfig": "A complex type that contains information about a custom origin. If the origin is an Amazon S3 bucket, use the S3OriginConfig element instead." + } + }, + "DefaultCacheBehavior": { + "base": "A complex type that describes the default cache behavior if you do not specify a CacheBehavior element or if files don't match any of the values of PathPattern in CacheBehavior elements.You must create exactly one default cache behavior.", + "refs": { + "DistributionConfig$DefaultCacheBehavior": "A complex type that describes the default cache behavior if you do not specify a CacheBehavior element or if files don't match any of the values of PathPattern in CacheBehavior elements.You must create exactly one default cache behavior.", + "DistributionSummary$DefaultCacheBehavior": "A complex type that describes the default cache behavior if you do not specify a CacheBehavior element or if files don't match any of the values of PathPattern in CacheBehavior elements.You must create exactly one default cache behavior." + } + }, + "DeleteCloudFrontOriginAccessIdentityRequest": { + "base": "The request to delete a origin access identity.", + "refs": { + } + }, + "DeleteDistributionRequest": { + "base": "The request to delete a distribution.", + "refs": { + } + }, + "DeleteStreamingDistributionRequest": { + "base": "The request to delete a streaming distribution.", + "refs": { + } + }, + "Distribution": { + "base": "A distribution.", + "refs": { + "CreateDistributionResult$Distribution": "The distribution's information.", + "GetDistributionResult$Distribution": "The distribution's information.", + "UpdateDistributionResult$Distribution": "The distribution's information." + } + }, + "DistributionAlreadyExists": { + "base": "The caller reference you attempted to create the distribution with is associated with another distribution.", + "refs": { + } + }, + "DistributionConfig": { + "base": "A distribution Configuration.", + "refs": { + "CreateDistributionRequest$DistributionConfig": "The distribution's configuration information.", + "Distribution$DistributionConfig": "The current configuration information for the distribution.", + "GetDistributionConfigResult$DistributionConfig": "The distribution's configuration information.", + "UpdateDistributionRequest$DistributionConfig": "The distribution's configuration information." + } + }, + "DistributionList": { + "base": "A distribution list.", + "refs": { + "ListDistributionsByWebACLIdResult$DistributionList": "The DistributionList type.", + "ListDistributionsResult$DistributionList": "The DistributionList type." + } + }, + "DistributionNotDisabled": { + "base": null, + "refs": { + } + }, + "DistributionSummary": { + "base": "A summary of the information for an Amazon CloudFront distribution.", + "refs": { + "DistributionSummaryList$member": null + } + }, + "DistributionSummaryList": { + "base": null, + "refs": { + "DistributionList$Items": "A complex type that contains one DistributionSummary element for each distribution that was created by the current AWS account." + } + }, + "ForwardedValues": { + "base": "A complex type that specifies how CloudFront handles query strings, cookies and headers.", + "refs": { + "CacheBehavior$ForwardedValues": "A complex type that specifies how CloudFront handles query strings, cookies and headers.", + "DefaultCacheBehavior$ForwardedValues": "A complex type that specifies how CloudFront handles query strings, cookies and headers." + } + }, + "GeoRestriction": { + "base": "A complex type that controls the countries in which your content is distributed. For more information about geo restriction, go to Customizing Error Responses in the Amazon CloudFront Developer Guide. CloudFront determines the location of your users using MaxMind GeoIP databases. For information about the accuracy of these databases, see How accurate are your GeoIP databases? on the MaxMind website.", + "refs": { + "Restrictions$GeoRestriction": null + } + }, + "GeoRestrictionType": { + "base": null, + "refs": { + "GeoRestriction$RestrictionType": "The method that you want to use to restrict distribution of your content by country: - none: No geo restriction is enabled, meaning access to content is not restricted by client geo location. - blacklist: The Location elements specify the countries in which you do not want CloudFront to distribute your content. - whitelist: The Location elements specify the countries in which you want CloudFront to distribute your content." + } + }, + "GetCloudFrontOriginAccessIdentityConfigRequest": { + "base": "The request to get an origin access identity's configuration.", + "refs": { + } + }, + "GetCloudFrontOriginAccessIdentityConfigResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "GetCloudFrontOriginAccessIdentityRequest": { + "base": "The request to get an origin access identity's information.", + "refs": { + } + }, + "GetCloudFrontOriginAccessIdentityResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "GetDistributionConfigRequest": { + "base": "The request to get a distribution configuration.", + "refs": { + } + }, + "GetDistributionConfigResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "GetDistributionRequest": { + "base": "The request to get a distribution's information.", + "refs": { + } + }, + "GetDistributionResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "GetInvalidationRequest": { + "base": "The request to get an invalidation's information.", + "refs": { + } + }, + "GetInvalidationResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "GetStreamingDistributionConfigRequest": { + "base": "To request to get a streaming distribution configuration.", + "refs": { + } + }, + "GetStreamingDistributionConfigResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "GetStreamingDistributionRequest": { + "base": "The request to get a streaming distribution's information.", + "refs": { + } + }, + "GetStreamingDistributionResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "HeaderList": { + "base": null, + "refs": { + "Headers$Items": "Optional: A complex type that contains a Name element for each header that you want CloudFront to forward to the origin and to vary on for this cache behavior. If Quantity is 0, omit Items." + } + }, + "Headers": { + "base": "A complex type that specifies the headers that you want CloudFront to forward to the origin for this cache behavior. For the headers that you specify, CloudFront also caches separate versions of a given object based on the header values in viewer requests; this is known as varying on headers. For example, suppose viewer requests for logo.jpg contain a custom Product header that has a value of either Acme or Apex, and you configure CloudFront to vary on the Product header. CloudFront forwards the Product header to the origin and caches the response from the origin once for each header value.", + "refs": { + "ForwardedValues$Headers": "A complex type that specifies the Headers, if any, that you want CloudFront to vary upon for this cache behavior." + } + }, + "IllegalUpdate": { + "base": "Origin and CallerReference cannot be updated.", + "refs": { + } + }, + "InconsistentQuantities": { + "base": "The value of Quantity and the size of Items do not match.", + "refs": { + } + }, + "InvalidArgument": { + "base": "The argument is invalid.", + "refs": { + } + }, + "InvalidDefaultRootObject": { + "base": "The default root object file name is too big or contains an invalid character.", + "refs": { + } + }, + "InvalidErrorCode": { + "base": null, + "refs": { + } + }, + "InvalidForwardCookies": { + "base": "Your request contains forward cookies option which doesn't match with the expectation for the whitelisted list of cookie names. Either list of cookie names has been specified when not allowed or list of cookie names is missing when expected.", + "refs": { + } + }, + "InvalidGeoRestrictionParameter": { + "base": null, + "refs": { + } + }, + "InvalidHeadersForS3Origin": { + "base": null, + "refs": { + } + }, + "InvalidIfMatchVersion": { + "base": "The If-Match version is missing or not valid for the distribution.", + "refs": { + } + }, + "InvalidLocationCode": { + "base": null, + "refs": { + } + }, + "InvalidMinimumProtocolVersion": { + "base": null, + "refs": { + } + }, + "InvalidOrigin": { + "base": "The Amazon S3 origin server specified does not refer to a valid Amazon S3 bucket.", + "refs": { + } + }, + "InvalidOriginAccessIdentity": { + "base": "The origin access identity is not valid or doesn't exist.", + "refs": { + } + }, + "InvalidProtocolSettings": { + "base": "You cannot specify SSLv3 as the minimum protocol version if you only want to support only clients that Support Server Name Indication (SNI).", + "refs": { + } + }, + "InvalidRelativePath": { + "base": "The relative path is too big, is not URL-encoded, or does not begin with a slash (/).", + "refs": { + } + }, + "InvalidRequiredProtocol": { + "base": "This operation requires the HTTPS protocol. Ensure that you specify the HTTPS protocol in your request, or omit the RequiredProtocols element from your distribution configuration.", + "refs": { + } + }, + "InvalidResponseCode": { + "base": null, + "refs": { + } + }, + "InvalidTTLOrder": { + "base": null, + "refs": { + } + }, + "InvalidViewerCertificate": { + "base": null, + "refs": { + } + }, + "InvalidWebACLId": { + "base": null, + "refs": { + } + }, + "Invalidation": { + "base": "An invalidation.", + "refs": { + "CreateInvalidationResult$Invalidation": "The invalidation's information.", + "GetInvalidationResult$Invalidation": "The invalidation's information." + } + }, + "InvalidationBatch": { + "base": "An invalidation batch.", + "refs": { + "CreateInvalidationRequest$InvalidationBatch": "The batch information for the invalidation.", + "Invalidation$InvalidationBatch": "The current invalidation information for the batch request." + } + }, + "InvalidationList": { + "base": "An invalidation list.", + "refs": { + "ListInvalidationsResult$InvalidationList": "Information about invalidation batches." + } + }, + "InvalidationSummary": { + "base": "Summary of an invalidation request.", + "refs": { + "InvalidationSummaryList$member": null + } + }, + "InvalidationSummaryList": { + "base": null, + "refs": { + "InvalidationList$Items": "A complex type that contains one InvalidationSummary element for each invalidation batch that was created by the current AWS account." + } + }, + "ItemSelection": { + "base": null, + "refs": { + "CookiePreference$Forward": "Use this element to specify whether you want CloudFront to forward cookies to the origin that is associated with this cache behavior. You can specify all, none or whitelist. If you choose All, CloudFront forwards all cookies regardless of how many your application uses." + } + }, + "KeyPairIdList": { + "base": null, + "refs": { + "KeyPairIds$Items": "A complex type that lists the active CloudFront key pairs, if any, that are associated with AwsAccountNumber." + } + }, + "KeyPairIds": { + "base": "A complex type that lists the active CloudFront key pairs, if any, that are associated with AwsAccountNumber.", + "refs": { + "Signer$KeyPairIds": "A complex type that lists the active CloudFront key pairs, if any, that are associated with AwsAccountNumber." + } + }, + "ListCloudFrontOriginAccessIdentitiesRequest": { + "base": "The request to list origin access identities.", + "refs": { + } + }, + "ListCloudFrontOriginAccessIdentitiesResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "ListDistributionsByWebACLIdRequest": { + "base": "The request to list distributions that are associated with a specified AWS WAF web ACL.", + "refs": { + } + }, + "ListDistributionsByWebACLIdResult": { + "base": "The response to a request to list the distributions that are associated with a specified AWS WAF web ACL.", + "refs": { + } + }, + "ListDistributionsRequest": { + "base": "The request to list your distributions.", + "refs": { + } + }, + "ListDistributionsResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "ListInvalidationsRequest": { + "base": "The request to list invalidations.", + "refs": { + } + }, + "ListInvalidationsResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "ListStreamingDistributionsRequest": { + "base": "The request to list your streaming distributions.", + "refs": { + } + }, + "ListStreamingDistributionsResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "LocationList": { + "base": null, + "refs": { + "GeoRestriction$Items": "A complex type that contains a Location element for each country in which you want CloudFront either to distribute your content (whitelist) or not distribute your content (blacklist). The Location element is a two-letter, uppercase country code for a country that you want to include in your blacklist or whitelist. Include one Location element for each country. CloudFront and MaxMind both use ISO 3166 country codes. For the current list of countries and the corresponding codes, see ISO 3166-1-alpha-2 code on the International Organization for Standardization website. You can also refer to the country list in the CloudFront console, which includes both country names and codes." + } + }, + "LoggingConfig": { + "base": "A complex type that controls whether access logs are written for the distribution.", + "refs": { + "DistributionConfig$Logging": "A complex type that controls whether access logs are written for the distribution." + } + }, + "Method": { + "base": null, + "refs": { + "MethodsList$member": null + } + }, + "MethodsList": { + "base": null, + "refs": { + "AllowedMethods$Items": "A complex type that contains the HTTP methods that you want CloudFront to process and forward to your origin.", + "CachedMethods$Items": "A complex type that contains the HTTP methods that you want CloudFront to cache responses to." + } + }, + "MinimumProtocolVersion": { + "base": null, + "refs": { + "ViewerCertificate$MinimumProtocolVersion": "Specify the minimum version of the SSL protocol that you want CloudFront to use, SSLv3 or TLSv1, for HTTPS connections. CloudFront will serve your objects only to browsers or devices that support at least the SSL version that you specify. The TLSv1 protocol is more secure, so we recommend that you specify SSLv3 only if your users are using browsers or devices that don't support TLSv1. If you're using a custom certificate (if you specify a value for IAMCertificateId) and if you're using dedicated IP (if you specify vip for SSLSupportMethod), you can choose SSLv3 or TLSv1 as the MinimumProtocolVersion. If you're using a custom certificate (if you specify a value for IAMCertificateId) and if you're using SNI (if you specify sni-only for SSLSupportMethod), you must specify TLSv1 for MinimumProtocolVersion." + } + }, + "MissingBody": { + "base": "This operation requires a body. Ensure that the body is present and the Content-Type header is set.", + "refs": { + } + }, + "NoSuchCloudFrontOriginAccessIdentity": { + "base": "The specified origin access identity does not exist.", + "refs": { + } + }, + "NoSuchDistribution": { + "base": "The specified distribution does not exist.", + "refs": { + } + }, + "NoSuchInvalidation": { + "base": "The specified invalidation does not exist.", + "refs": { + } + }, + "NoSuchOrigin": { + "base": "No origin exists with the specified Origin Id.", + "refs": { + } + }, + "NoSuchStreamingDistribution": { + "base": "The specified streaming distribution does not exist.", + "refs": { + } + }, + "Origin": { + "base": "A complex type that describes the Amazon S3 bucket or the HTTP server (for example, a web server) from which CloudFront gets your files.You must create at least one origin.", + "refs": { + "OriginList$member": null + } + }, + "OriginList": { + "base": null, + "refs": { + "Origins$Items": "A complex type that contains origins for this distribution." + } + }, + "OriginProtocolPolicy": { + "base": null, + "refs": { + "CustomOriginConfig$OriginProtocolPolicy": "The origin protocol policy to apply to your origin." + } + }, + "Origins": { + "base": "A complex type that contains information about origins for this distribution.", + "refs": { + "DistributionConfig$Origins": "A complex type that contains information about origins for this distribution.", + "DistributionSummary$Origins": "A complex type that contains information about origins for this distribution." + } + }, + "PathList": { + "base": null, + "refs": { + "Paths$Items": "A complex type that contains a list of the objects that you want to invalidate." + } + }, + "Paths": { + "base": "A complex type that contains information about the objects that you want to invalidate.", + "refs": { + "InvalidationBatch$Paths": "The path of the object to invalidate. The path is relative to the distribution and must begin with a slash (/). You must enclose each invalidation object with the Path element tags. If the path includes non-ASCII characters or unsafe characters as defined in RFC 1783 (http://www.ietf.org/rfc/rfc1738.txt), URL encode those characters. Do not URL encode any other characters in the path, or CloudFront will not invalidate the old version of the updated object." + } + }, + "PreconditionFailed": { + "base": "The precondition given in one or more of the request-header fields evaluated to false.", + "refs": { + } + }, + "PriceClass": { + "base": null, + "refs": { + "DistributionConfig$PriceClass": "A complex type that contains information about price class for this distribution.", + "DistributionSummary$PriceClass": null, + "StreamingDistributionConfig$PriceClass": "A complex type that contains information about price class for this streaming distribution.", + "StreamingDistributionSummary$PriceClass": null + } + }, + "Restrictions": { + "base": "A complex type that identifies ways in which you want to restrict distribution of your content.", + "refs": { + "DistributionConfig$Restrictions": null, + "DistributionSummary$Restrictions": null + } + }, + "S3Origin": { + "base": "A complex type that contains information about the Amazon S3 bucket from which you want CloudFront to get your media files for distribution.", + "refs": { + "StreamingDistributionConfig$S3Origin": "A complex type that contains information about the Amazon S3 bucket from which you want CloudFront to get your media files for distribution.", + "StreamingDistributionSummary$S3Origin": "A complex type that contains information about the Amazon S3 bucket from which you want CloudFront to get your media files for distribution." + } + }, + "S3OriginConfig": { + "base": "A complex type that contains information about the Amazon S3 origin. If the origin is a custom origin, use the CustomOriginConfig element instead.", + "refs": { + "Origin$S3OriginConfig": "A complex type that contains information about the Amazon S3 origin. If the origin is a custom origin, use the CustomOriginConfig element instead." + } + }, + "SSLSupportMethod": { + "base": null, + "refs": { + "ViewerCertificate$SSLSupportMethod": "If you specify a value for IAMCertificateId, you must also specify how you want CloudFront to serve HTTPS requests. Valid values are vip and sni-only. If you specify vip, CloudFront uses dedicated IP addresses for your content and can respond to HTTPS requests from any viewer. However, you must request permission to use this feature, and you incur additional monthly charges. If you specify sni-only, CloudFront can only respond to HTTPS requests from viewers that support Server Name Indication (SNI). All modern browsers support SNI, but some browsers still in use don't support SNI. Do not specify a value for SSLSupportMethod if you specified true for CloudFrontDefaultCertificate." + } + }, + "Signer": { + "base": "A complex type that lists the AWS accounts that were included in the TrustedSigners complex type, as well as their active CloudFront key pair IDs, if any.", + "refs": { + "SignerList$member": null + } + }, + "SignerList": { + "base": null, + "refs": { + "ActiveTrustedSigners$Items": "A complex type that contains one Signer complex type for each unique trusted signer that is specified in the TrustedSigners complex type, including trusted signers in the default cache behavior and in all of the other cache behaviors." + } + }, + "StreamingDistribution": { + "base": "A streaming distribution.", + "refs": { + "CreateStreamingDistributionResult$StreamingDistribution": "The streaming distribution's information.", + "GetStreamingDistributionResult$StreamingDistribution": "The streaming distribution's information.", + "UpdateStreamingDistributionResult$StreamingDistribution": "The streaming distribution's information." + } + }, + "StreamingDistributionAlreadyExists": { + "base": null, + "refs": { + } + }, + "StreamingDistributionConfig": { + "base": "The configuration for the streaming distribution.", + "refs": { + "CreateStreamingDistributionRequest$StreamingDistributionConfig": "The streaming distribution's configuration information.", + "GetStreamingDistributionConfigResult$StreamingDistributionConfig": "The streaming distribution's configuration information.", + "StreamingDistribution$StreamingDistributionConfig": "The current configuration information for the streaming distribution.", + "UpdateStreamingDistributionRequest$StreamingDistributionConfig": "The streaming distribution's configuration information." + } + }, + "StreamingDistributionList": { + "base": "A streaming distribution list.", + "refs": { + "ListStreamingDistributionsResult$StreamingDistributionList": "The StreamingDistributionList type." + } + }, + "StreamingDistributionNotDisabled": { + "base": null, + "refs": { + } + }, + "StreamingDistributionSummary": { + "base": "A summary of the information for an Amazon CloudFront streaming distribution.", + "refs": { + "StreamingDistributionSummaryList$member": null + } + }, + "StreamingDistributionSummaryList": { + "base": null, + "refs": { + "StreamingDistributionList$Items": "A complex type that contains one StreamingDistributionSummary element for each distribution that was created by the current AWS account." + } + }, + "StreamingLoggingConfig": { + "base": "A complex type that controls whether access logs are written for this streaming distribution.", + "refs": { + "StreamingDistributionConfig$Logging": "A complex type that controls whether access logs are written for the streaming distribution." + } + }, + "TooManyCacheBehaviors": { + "base": "You cannot create anymore cache behaviors for the distribution.", + "refs": { + } + }, + "TooManyCertificates": { + "base": "You cannot create anymore custom ssl certificates.", + "refs": { + } + }, + "TooManyCloudFrontOriginAccessIdentities": { + "base": "Processing your request would cause you to exceed the maximum number of origin access identities allowed.", + "refs": { + } + }, + "TooManyCookieNamesInWhiteList": { + "base": "Your request contains more cookie names in the whitelist than are allowed per cache behavior.", + "refs": { + } + }, + "TooManyDistributionCNAMEs": { + "base": "Your request contains more CNAMEs than are allowed per distribution.", + "refs": { + } + }, + "TooManyDistributions": { + "base": "Processing your request would cause you to exceed the maximum number of distributions allowed.", + "refs": { + } + }, + "TooManyHeadersInForwardedValues": { + "base": null, + "refs": { + } + }, + "TooManyInvalidationsInProgress": { + "base": "You have exceeded the maximum number of allowable InProgress invalidation batch requests, or invalidation objects.", + "refs": { + } + }, + "TooManyOrigins": { + "base": "You cannot create anymore origins for the distribution.", + "refs": { + } + }, + "TooManyStreamingDistributionCNAMEs": { + "base": null, + "refs": { + } + }, + "TooManyStreamingDistributions": { + "base": "Processing your request would cause you to exceed the maximum number of streaming distributions allowed.", + "refs": { + } + }, + "TooManyTrustedSigners": { + "base": "Your request contains more trusted signers than are allowed per distribution.", + "refs": { + } + }, + "TrustedSignerDoesNotExist": { + "base": "One or more of your trusted signers do not exist.", + "refs": { + } + }, + "TrustedSigners": { + "base": "A complex type that specifies the AWS accounts, if any, that you want to allow to create signed URLs for private content. If you want to require signed URLs in requests for objects in the target origin that match the PathPattern for this cache behavior, specify true for Enabled, and specify the applicable values for Quantity and Items. For more information, go to Using a Signed URL to Serve Private Content in the Amazon CloudFront Developer Guide. If you don't want to require signed URLs in requests for objects that match PathPattern, specify false for Enabled and 0 for Quantity. Omit Items. To add, change, or remove one or more trusted signers, change Enabled to true (if it's currently false), change Quantity as applicable, and specify all of the trusted signers that you want to include in the updated distribution.", + "refs": { + "CacheBehavior$TrustedSigners": "A complex type that specifies the AWS accounts, if any, that you want to allow to create signed URLs for private content. If you want to require signed URLs in requests for objects in the target origin that match the PathPattern for this cache behavior, specify true for Enabled, and specify the applicable values for Quantity and Items. For more information, go to Using a Signed URL to Serve Private Content in the Amazon CloudFront Developer Guide. If you don't want to require signed URLs in requests for objects that match PathPattern, specify false for Enabled and 0 for Quantity. Omit Items. To add, change, or remove one or more trusted signers, change Enabled to true (if it's currently false), change Quantity as applicable, and specify all of the trusted signers that you want to include in the updated distribution.", + "DefaultCacheBehavior$TrustedSigners": "A complex type that specifies the AWS accounts, if any, that you want to allow to create signed URLs for private content. If you want to require signed URLs in requests for objects in the target origin that match the PathPattern for this cache behavior, specify true for Enabled, and specify the applicable values for Quantity and Items. For more information, go to Using a Signed URL to Serve Private Content in the Amazon CloudFront Developer Guide. If you don't want to require signed URLs in requests for objects that match PathPattern, specify false for Enabled and 0 for Quantity. Omit Items. To add, change, or remove one or more trusted signers, change Enabled to true (if it's currently false), change Quantity as applicable, and specify all of the trusted signers that you want to include in the updated distribution.", + "StreamingDistributionConfig$TrustedSigners": "A complex type that specifies the AWS accounts, if any, that you want to allow to create signed URLs for private content. If you want to require signed URLs in requests for objects in the target origin that match the PathPattern for this cache behavior, specify true for Enabled, and specify the applicable values for Quantity and Items. For more information, go to Using a Signed URL to Serve Private Content in the Amazon CloudFront Developer Guide. If you don't want to require signed URLs in requests for objects that match PathPattern, specify false for Enabled and 0 for Quantity. Omit Items. To add, change, or remove one or more trusted signers, change Enabled to true (if it's currently false), change Quantity as applicable, and specify all of the trusted signers that you want to include in the updated distribution.", + "StreamingDistributionSummary$TrustedSigners": "A complex type that specifies the AWS accounts, if any, that you want to allow to create signed URLs for private content. If you want to require signed URLs in requests for objects in the target origin that match the PathPattern for this cache behavior, specify true for Enabled, and specify the applicable values for Quantity and Items. For more information, go to Using a Signed URL to Serve Private Content in the Amazon CloudFront Developer Guide. If you don't want to require signed URLs in requests for objects that match PathPattern, specify false for Enabled and 0 for Quantity. Omit Items. To add, change, or remove one or more trusted signers, change Enabled to true (if it's currently false), change Quantity as applicable, and specify all of the trusted signers that you want to include in the updated distribution." + } + }, + "UpdateCloudFrontOriginAccessIdentityRequest": { + "base": "The request to update an origin access identity.", + "refs": { + } + }, + "UpdateCloudFrontOriginAccessIdentityResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "UpdateDistributionRequest": { + "base": "The request to update a distribution.", + "refs": { + } + }, + "UpdateDistributionResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "UpdateStreamingDistributionRequest": { + "base": "The request to update a streaming distribution.", + "refs": { + } + }, + "UpdateStreamingDistributionResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "ViewerCertificate": { + "base": "A complex type that contains information about viewer certificates for this distribution.", + "refs": { + "DistributionConfig$ViewerCertificate": null, + "DistributionSummary$ViewerCertificate": null + } + }, + "ViewerProtocolPolicy": { + "base": null, + "refs": { + "CacheBehavior$ViewerProtocolPolicy": "Use this element to specify the protocol that users can use to access the files in the origin specified by TargetOriginId when a request matches the path pattern in PathPattern. If you want CloudFront to allow end users to use any available protocol, specify allow-all. If you want CloudFront to require HTTPS, specify https. If you want CloudFront to respond to an HTTP request with an HTTP status code of 301 (Moved Permanently) and the HTTPS URL, specify redirect-to-https. The viewer then resubmits the request using the HTTPS URL.", + "DefaultCacheBehavior$ViewerProtocolPolicy": "Use this element to specify the protocol that users can use to access the files in the origin specified by TargetOriginId when a request matches the path pattern in PathPattern. If you want CloudFront to allow end users to use any available protocol, specify allow-all. If you want CloudFront to require HTTPS, specify https. If you want CloudFront to respond to an HTTP request with an HTTP status code of 301 (Moved Permanently) and the HTTPS URL, specify redirect-to-https. The viewer then resubmits the request using the HTTPS URL." + } + }, + "boolean": { + "base": null, + "refs": { + "ActiveTrustedSigners$Enabled": "Each active trusted signer.", + "CacheBehavior$SmoothStreaming": "Indicates whether you want to distribute media files in Microsoft Smooth Streaming format using the origin that is associated with this cache behavior. If so, specify true; if not, specify false.", + "CacheBehavior$Compress": "Whether you want CloudFront to automatically compress content for web requests that include Accept-Encoding: gzip in the request header. If so, specify true; if not, specify false. CloudFront compresses files larger than 1000 bytes and less than 1 megabyte for both Amazon S3 and custom origins. When a CloudFront edge location is unusually busy, some files might not be compressed. The value of the Content-Type header must be on the list of file types that CloudFront will compress. For the current list, see Serving Compressed Content in the Amazon CloudFront Developer Guide. If you configure CloudFront to compress content, CloudFront removes the ETag response header from the objects that it compresses. The ETag header indicates that the version in a CloudFront edge cache is identical to the version on the origin server, but after compression the two versions are no longer identical. As a result, for compressed objects, CloudFront can't use the ETag header to determine whether an expired object in the CloudFront edge cache is still the latest version.", + "CloudFrontOriginAccessIdentityList$IsTruncated": "A flag that indicates whether more origin access identities remain to be listed. If your results were truncated, you can make a follow-up pagination request using the Marker request parameter to retrieve more items in the list.", + "DefaultCacheBehavior$SmoothStreaming": "Indicates whether you want to distribute media files in Microsoft Smooth Streaming format using the origin that is associated with this cache behavior. If so, specify true; if not, specify false.", + "DefaultCacheBehavior$Compress": "Whether you want CloudFront to automatically compress content for web requests that include Accept-Encoding: gzip in the request header. If so, specify true; if not, specify false. CloudFront compresses files larger than 1000 bytes and less than 1 megabyte for both Amazon S3 and custom origins. When a CloudFront edge location is unusually busy, some files might not be compressed. The value of the Content-Type header must be on the list of file types that CloudFront will compress. For the current list, see Serving Compressed Content in the Amazon CloudFront Developer Guide. If you configure CloudFront to compress content, CloudFront removes the ETag response header from the objects that it compresses. The ETag header indicates that the version in a CloudFront edge cache is identical to the version on the origin server, but after compression the two versions are no longer identical. As a result, for compressed objects, CloudFront can't use the ETag header to determine whether an expired object in the CloudFront edge cache is still the latest version.", + "DistributionConfig$Enabled": "Whether the distribution is enabled to accept end user requests for content.", + "DistributionList$IsTruncated": "A flag that indicates whether more distributions remain to be listed. If your results were truncated, you can make a follow-up pagination request using the Marker request parameter to retrieve more distributions in the list.", + "DistributionSummary$Enabled": "Whether the distribution is enabled to accept end user requests for content.", + "ForwardedValues$QueryString": "Indicates whether you want CloudFront to forward query strings to the origin that is associated with this cache behavior. If so, specify true; if not, specify false.", + "InvalidationList$IsTruncated": "A flag that indicates whether more invalidation batch requests remain to be listed. If your results were truncated, you can make a follow-up pagination request using the Marker request parameter to retrieve more invalidation batches in the list.", + "LoggingConfig$Enabled": "Specifies whether you want CloudFront to save access logs to an Amazon S3 bucket. If you do not want to enable logging when you create a distribution or if you want to disable logging for an existing distribution, specify false for Enabled, and specify empty Bucket and Prefix elements. If you specify false for Enabled but you specify values for Bucket, prefix and IncludeCookies, the values are automatically deleted.", + "LoggingConfig$IncludeCookies": "Specifies whether you want CloudFront to include cookies in access logs, specify true for IncludeCookies. If you choose to include cookies in logs, CloudFront logs all cookies regardless of how you configure the cache behaviors for this distribution. If you do not want to include cookies when you create a distribution or if you want to disable include cookies for an existing distribution, specify false for IncludeCookies.", + "StreamingDistributionConfig$Enabled": "Whether the streaming distribution is enabled to accept end user requests for content.", + "StreamingDistributionList$IsTruncated": "A flag that indicates whether more streaming distributions remain to be listed. If your results were truncated, you can make a follow-up pagination request using the Marker request parameter to retrieve more distributions in the list.", + "StreamingDistributionSummary$Enabled": "Whether the distribution is enabled to accept end user requests for content.", + "StreamingLoggingConfig$Enabled": "Specifies whether you want CloudFront to save access logs to an Amazon S3 bucket. If you do not want to enable logging when you create a streaming distribution or if you want to disable logging for an existing streaming distribution, specify false for Enabled, and specify empty Bucket and Prefix elements. If you specify false for Enabled but you specify values for Bucket and Prefix, the values are automatically deleted.", + "TrustedSigners$Enabled": "Specifies whether you want to require end users to use signed URLs to access the files specified by PathPattern and TargetOriginId.", + "ViewerCertificate$CloudFrontDefaultCertificate": "Note: this field is deprecated. Please use \"cloudfront\" as CertificateSource and omit specifying a Certificate. If you want viewers to use HTTPS to request your objects and you're using the CloudFront domain name of your distribution in your object URLs (for example, https://d111111abcdef8.cloudfront.net/logo.jpg), set to true. Omit this value if you are setting an IAMCertificateId." + } + }, + "integer": { + "base": null, + "refs": { + "ActiveTrustedSigners$Quantity": "The number of unique trusted signers included in all cache behaviors. For example, if three cache behaviors all list the same three AWS accounts, the value of Quantity for ActiveTrustedSigners will be 3.", + "Aliases$Quantity": "The number of CNAMEs, if any, for this distribution.", + "AllowedMethods$Quantity": "The number of HTTP methods that you want CloudFront to forward to your origin. Valid values are 2 (for GET and HEAD requests), 3 (for GET, HEAD and OPTIONS requests) and 7 (for GET, HEAD, OPTIONS, PUT, PATCH, POST, and DELETE requests).", + "CacheBehaviors$Quantity": "The number of cache behaviors for this distribution.", + "CachedMethods$Quantity": "The number of HTTP methods for which you want CloudFront to cache responses. Valid values are 2 (for caching responses to GET and HEAD requests) and 3 (for caching responses to GET, HEAD, and OPTIONS requests).", + "CloudFrontOriginAccessIdentityList$MaxItems": "The value you provided for the MaxItems request parameter.", + "CloudFrontOriginAccessIdentityList$Quantity": "The number of CloudFront origin access identities that were created by the current AWS account.", + "CookieNames$Quantity": "The number of whitelisted cookies for this cache behavior.", + "CustomErrorResponse$ErrorCode": "The 4xx or 5xx HTTP status code that you want to customize. For a list of HTTP status codes that you can customize, see CloudFront documentation.", + "CustomErrorResponses$Quantity": "The number of custom error responses for this distribution.", + "CustomOriginConfig$HTTPPort": "The HTTP port the custom origin listens on.", + "CustomOriginConfig$HTTPSPort": "The HTTPS port the custom origin listens on.", + "Distribution$InProgressInvalidationBatches": "The number of invalidation batches currently in progress.", + "DistributionList$MaxItems": "The value you provided for the MaxItems request parameter.", + "DistributionList$Quantity": "The number of distributions that were created by the current AWS account.", + "GeoRestriction$Quantity": "When geo restriction is enabled, this is the number of countries in your whitelist or blacklist. Otherwise, when it is not enabled, Quantity is 0, and you can omit Items.", + "Headers$Quantity": "The number of different headers that you want CloudFront to forward to the origin and to vary on for this cache behavior. The maximum number of headers that you can specify by name is 10. If you want CloudFront to forward all headers to the origin and vary on all of them, specify 1 for Quantity and * for Name. If you don't want CloudFront to forward any additional headers to the origin or to vary on any headers, specify 0 for Quantity and omit Items.", + "InvalidationList$MaxItems": "The value you provided for the MaxItems request parameter.", + "InvalidationList$Quantity": "The number of invalidation batches that were created by the current AWS account.", + "KeyPairIds$Quantity": "The number of active CloudFront key pairs for AwsAccountNumber.", + "Origins$Quantity": "The number of origins for this distribution.", + "Paths$Quantity": "The number of objects that you want to invalidate.", + "StreamingDistributionList$MaxItems": "The value you provided for the MaxItems request parameter.", + "StreamingDistributionList$Quantity": "The number of streaming distributions that were created by the current AWS account.", + "TrustedSigners$Quantity": "The number of trusted signers for this cache behavior." + } + }, + "long": { + "base": null, + "refs": { + "CacheBehavior$MinTTL": "The minimum amount of time that you want objects to stay in CloudFront caches before CloudFront queries your origin to see whether the object has been updated.You can specify a value from 0 to 3,153,600,000 seconds (100 years).", + "CacheBehavior$DefaultTTL": "If you don't configure your origin to add a Cache-Control max-age directive or an Expires header, DefaultTTL is the default amount of time (in seconds) that an object is in a CloudFront cache before CloudFront forwards another request to your origin to determine whether the object has been updated. The value that you specify applies only when your origin does not add HTTP headers such as Cache-Control max-age, Cache-Control s-maxage, and Expires to objects. You can specify a value from 0 to 3,153,600,000 seconds (100 years).", + "CacheBehavior$MaxTTL": "The maximum amount of time (in seconds) that an object is in a CloudFront cache before CloudFront forwards another request to your origin to determine whether the object has been updated. The value that you specify applies only when your origin adds HTTP headers such as Cache-Control max-age, Cache-Control s-maxage, and Expires to objects. You can specify a value from 0 to 3,153,600,000 seconds (100 years).", + "CustomErrorResponse$ErrorCachingMinTTL": "The minimum amount of time you want HTTP error codes to stay in CloudFront caches before CloudFront queries your origin to see whether the object has been updated. You can specify a value from 0 to 31,536,000.", + "DefaultCacheBehavior$MinTTL": "The minimum amount of time that you want objects to stay in CloudFront caches before CloudFront queries your origin to see whether the object has been updated.You can specify a value from 0 to 3,153,600,000 seconds (100 years).", + "DefaultCacheBehavior$DefaultTTL": "If you don't configure your origin to add a Cache-Control max-age directive or an Expires header, DefaultTTL is the default amount of time (in seconds) that an object is in a CloudFront cache before CloudFront forwards another request to your origin to determine whether the object has been updated. The value that you specify applies only when your origin does not add HTTP headers such as Cache-Control max-age, Cache-Control s-maxage, and Expires to objects. You can specify a value from 0 to 3,153,600,000 seconds (100 years).", + "DefaultCacheBehavior$MaxTTL": "The maximum amount of time (in seconds) that an object is in a CloudFront cache before CloudFront forwards another request to your origin to determine whether the object has been updated. The value that you specify applies only when your origin adds HTTP headers such as Cache-Control max-age, Cache-Control s-maxage, and Expires to objects. You can specify a value from 0 to 3,153,600,000 seconds (100 years)." + } + }, + "string": { + "base": null, + "refs": { + "AccessDenied$Message": null, + "AliasList$member": null, + "AwsAccountNumberList$member": null, + "BatchTooLarge$Message": null, + "CNAMEAlreadyExists$Message": null, + "CacheBehavior$PathPattern": "The pattern (for example, images/*.jpg) that specifies which requests you want this cache behavior to apply to. When CloudFront receives an end-user request, the requested path is compared with path patterns in the order in which cache behaviors are listed in the distribution. The path pattern for the default cache behavior is * and cannot be changed. If the request for an object does not match the path pattern for any cache behaviors, CloudFront applies the behavior in the default cache behavior.", + "CacheBehavior$TargetOriginId": "The value of ID for the origin that you want CloudFront to route requests to when a request matches the path pattern either for a cache behavior or for the default cache behavior.", + "CloudFrontOriginAccessIdentity$Id": "The ID for the origin access identity. For example: E74FTE3AJFJ256A.", + "CloudFrontOriginAccessIdentity$S3CanonicalUserId": "The Amazon S3 canonical user ID for the origin access identity, which you use when giving the origin access identity read permission to an object in Amazon S3.", + "CloudFrontOriginAccessIdentityAlreadyExists$Message": null, + "CloudFrontOriginAccessIdentityConfig$CallerReference": "A unique number that ensures the request can't be replayed. If the CallerReference is new (no matter the content of the CloudFrontOriginAccessIdentityConfig object), a new origin access identity is created. If the CallerReference is a value you already sent in a previous request to create an identity, and the content of the CloudFrontOriginAccessIdentityConfig is identical to the original request (ignoring white space), the response includes the same information returned to the original request. If the CallerReference is a value you already sent in a previous request to create an identity but the content of the CloudFrontOriginAccessIdentityConfig is different from the original request, CloudFront returns a CloudFrontOriginAccessIdentityAlreadyExists error.", + "CloudFrontOriginAccessIdentityConfig$Comment": "Any comments you want to include about the origin access identity.", + "CloudFrontOriginAccessIdentityInUse$Message": null, + "CloudFrontOriginAccessIdentityList$Marker": "The value you provided for the Marker request parameter.", + "CloudFrontOriginAccessIdentityList$NextMarker": "If IsTruncated is true, this element is present and contains the value you can use for the Marker request parameter to continue listing your origin access identities where they left off.", + "CloudFrontOriginAccessIdentitySummary$Id": "The ID for the origin access identity. For example: E74FTE3AJFJ256A.", + "CloudFrontOriginAccessIdentitySummary$S3CanonicalUserId": "The Amazon S3 canonical user ID for the origin access identity, which you use when giving the origin access identity read permission to an object in Amazon S3.", + "CloudFrontOriginAccessIdentitySummary$Comment": "The comment for this origin access identity, as originally specified when created.", + "CookieNameList$member": null, + "CreateCloudFrontOriginAccessIdentityResult$Location": "The fully qualified URI of the new origin access identity just created. For example: https://cloudfront.amazonaws.com/2010-11-01/origin-access-identity/cloudfront/E74FTE3AJFJ256A.", + "CreateCloudFrontOriginAccessIdentityResult$ETag": "The current version of the origin access identity created.", + "CreateDistributionResult$Location": "The fully qualified URI of the new distribution resource just created. For example: https://cloudfront.amazonaws.com/2010-11-01/distribution/EDFDVBD632BHDS5.", + "CreateDistributionResult$ETag": "The current version of the distribution created.", + "CreateInvalidationRequest$DistributionId": "The distribution's id.", + "CreateInvalidationResult$Location": "The fully qualified URI of the distribution and invalidation batch request, including the Invalidation ID.", + "CreateStreamingDistributionResult$Location": "The fully qualified URI of the new streaming distribution resource just created. For example: https://cloudfront.amazonaws.com/2010-11-01/streaming-distribution/EGTXBD79H29TRA8.", + "CreateStreamingDistributionResult$ETag": "The current version of the streaming distribution created.", + "CustomErrorResponse$ResponsePagePath": "The path of the custom error page (for example, /custom_404.html). The path is relative to the distribution and must begin with a slash (/). If the path includes any non-ASCII characters or unsafe characters as defined in RFC 1783 (http://www.ietf.org/rfc/rfc1738.txt), URL encode those characters. Do not URL encode any other characters in the path, or CloudFront will not return the custom error page to the viewer.", + "CustomErrorResponse$ResponseCode": "The HTTP status code that you want CloudFront to return with the custom error page to the viewer. For a list of HTTP status codes that you can replace, see CloudFront Documentation.", + "DefaultCacheBehavior$TargetOriginId": "The value of ID for the origin that you want CloudFront to route requests to when a request matches the path pattern either for a cache behavior or for the default cache behavior.", + "DeleteCloudFrontOriginAccessIdentityRequest$Id": "The origin access identity's id.", + "DeleteCloudFrontOriginAccessIdentityRequest$IfMatch": "The value of the ETag header you received from a previous GET or PUT request. For example: E2QWRUHAPOMQZL.", + "DeleteDistributionRequest$Id": "The distribution id.", + "DeleteDistributionRequest$IfMatch": "The value of the ETag header you received when you disabled the distribution. For example: E2QWRUHAPOMQZL.", + "DeleteStreamingDistributionRequest$Id": "The distribution id.", + "DeleteStreamingDistributionRequest$IfMatch": "The value of the ETag header you received when you disabled the streaming distribution. For example: E2QWRUHAPOMQZL.", + "Distribution$Id": "The identifier for the distribution. For example: EDFDVBD632BHDS5.", + "Distribution$Status": "This response element indicates the current status of the distribution. When the status is Deployed, the distribution's information is fully propagated throughout the Amazon CloudFront system.", + "Distribution$DomainName": "The domain name corresponding to the distribution. For example: d604721fxaaqy9.cloudfront.net.", + "DistributionAlreadyExists$Message": null, + "DistributionConfig$CallerReference": "A unique number that ensures the request can't be replayed. If the CallerReference is new (no matter the content of the DistributionConfig object), a new distribution is created. If the CallerReference is a value you already sent in a previous request to create a distribution, and the content of the DistributionConfig is identical to the original request (ignoring white space), the response includes the same information returned to the original request. If the CallerReference is a value you already sent in a previous request to create a distribution but the content of the DistributionConfig is different from the original request, CloudFront returns a DistributionAlreadyExists error.", + "DistributionConfig$DefaultRootObject": "The object that you want CloudFront to return (for example, index.html) when an end user requests the root URL for your distribution (http://www.example.com) instead of an object in your distribution (http://www.example.com/index.html). Specifying a default root object avoids exposing the contents of your distribution. If you don't want to specify a default root object when you create a distribution, include an empty DefaultRootObject element. To delete the default root object from an existing distribution, update the distribution configuration and include an empty DefaultRootObject element. To replace the default root object, update the distribution configuration and specify the new object.", + "DistributionConfig$Comment": "Any comments you want to include about the distribution.", + "DistributionConfig$WebACLId": "(Optional) If you're using AWS WAF to filter CloudFront requests, the Id of the AWS WAF web ACL that is associated with the distribution.", + "DistributionList$Marker": "The value you provided for the Marker request parameter.", + "DistributionList$NextMarker": "If IsTruncated is true, this element is present and contains the value you can use for the Marker request parameter to continue listing your distributions where they left off.", + "DistributionNotDisabled$Message": null, + "DistributionSummary$Id": "The identifier for the distribution. For example: EDFDVBD632BHDS5.", + "DistributionSummary$Status": "This response element indicates the current status of the distribution. When the status is Deployed, the distribution's information is fully propagated throughout the Amazon CloudFront system.", + "DistributionSummary$DomainName": "The domain name corresponding to the distribution. For example: d604721fxaaqy9.cloudfront.net.", + "DistributionSummary$Comment": "The comment originally specified when this distribution was created.", + "DistributionSummary$WebACLId": "The Web ACL Id (if any) associated with the distribution.", + "GetCloudFrontOriginAccessIdentityConfigRequest$Id": "The identity's id.", + "GetCloudFrontOriginAccessIdentityConfigResult$ETag": "The current version of the configuration. For example: E2QWRUHAPOMQZL.", + "GetCloudFrontOriginAccessIdentityRequest$Id": "The identity's id.", + "GetCloudFrontOriginAccessIdentityResult$ETag": "The current version of the origin access identity's information. For example: E2QWRUHAPOMQZL.", + "GetDistributionConfigRequest$Id": "The distribution's id.", + "GetDistributionConfigResult$ETag": "The current version of the configuration. For example: E2QWRUHAPOMQZL.", + "GetDistributionRequest$Id": "The distribution's id.", + "GetDistributionResult$ETag": "The current version of the distribution's information. For example: E2QWRUHAPOMQZL.", + "GetInvalidationRequest$DistributionId": "The distribution's id.", + "GetInvalidationRequest$Id": "The invalidation's id.", + "GetStreamingDistributionConfigRequest$Id": "The streaming distribution's id.", + "GetStreamingDistributionConfigResult$ETag": "The current version of the configuration. For example: E2QWRUHAPOMQZL.", + "GetStreamingDistributionRequest$Id": "The streaming distribution's id.", + "GetStreamingDistributionResult$ETag": "The current version of the streaming distribution's information. For example: E2QWRUHAPOMQZL.", + "HeaderList$member": null, + "IllegalUpdate$Message": null, + "InconsistentQuantities$Message": null, + "InvalidArgument$Message": null, + "InvalidDefaultRootObject$Message": null, + "InvalidErrorCode$Message": null, + "InvalidForwardCookies$Message": null, + "InvalidGeoRestrictionParameter$Message": null, + "InvalidHeadersForS3Origin$Message": null, + "InvalidIfMatchVersion$Message": null, + "InvalidLocationCode$Message": null, + "InvalidMinimumProtocolVersion$Message": null, + "InvalidOrigin$Message": null, + "InvalidOriginAccessIdentity$Message": null, + "InvalidProtocolSettings$Message": null, + "InvalidRelativePath$Message": null, + "InvalidRequiredProtocol$Message": null, + "InvalidResponseCode$Message": null, + "InvalidTTLOrder$Message": null, + "InvalidViewerCertificate$Message": null, + "InvalidWebACLId$Message": null, + "Invalidation$Id": "The identifier for the invalidation request. For example: IDFDVBD632BHDS5.", + "Invalidation$Status": "The status of the invalidation request. When the invalidation batch is finished, the status is Completed.", + "InvalidationBatch$CallerReference": "A unique name that ensures the request can't be replayed. If the CallerReference is new (no matter the content of the Path object), a new distribution is created. If the CallerReference is a value you already sent in a previous request to create an invalidation batch, and the content of each Path element is identical to the original request, the response includes the same information returned to the original request. If the CallerReference is a value you already sent in a previous request to create a distribution but the content of any Path is different from the original request, CloudFront returns an InvalidationBatchAlreadyExists error.", + "InvalidationList$Marker": "The value you provided for the Marker request parameter.", + "InvalidationList$NextMarker": "If IsTruncated is true, this element is present and contains the value you can use for the Marker request parameter to continue listing your invalidation batches where they left off.", + "InvalidationSummary$Id": "The unique ID for an invalidation request.", + "InvalidationSummary$Status": "The status of an invalidation request.", + "KeyPairIdList$member": null, + "ListCloudFrontOriginAccessIdentitiesRequest$Marker": "Use this when paginating results to indicate where to begin in your list of origin access identities. The results include identities in the list that occur after the marker. To get the next page of results, set the Marker to the value of the NextMarker from the current page's response (which is also the ID of the last identity on that page).", + "ListCloudFrontOriginAccessIdentitiesRequest$MaxItems": "The maximum number of origin access identities you want in the response body.", + "ListDistributionsByWebACLIdRequest$Marker": "Use Marker and MaxItems to control pagination of results. If you have more than MaxItems distributions that satisfy the request, the response includes a NextMarker element. To get the next page of results, submit another request. For the value of Marker, specify the value of NextMarker from the last response. (For the first request, omit Marker.)", + "ListDistributionsByWebACLIdRequest$MaxItems": "The maximum number of distributions that you want CloudFront to return in the response body. The maximum and default values are both 100.", + "ListDistributionsByWebACLIdRequest$WebACLId": "The Id of the AWS WAF web ACL for which you want to list the associated distributions. If you specify \"null\" for the Id, the request returns a list of the distributions that aren't associated with a web ACL.", + "ListDistributionsRequest$Marker": "Use Marker and MaxItems to control pagination of results. If you have more than MaxItems distributions that satisfy the request, the response includes a NextMarker element. To get the next page of results, submit another request. For the value of Marker, specify the value of NextMarker from the last response. (For the first request, omit Marker.)", + "ListDistributionsRequest$MaxItems": "The maximum number of distributions that you want CloudFront to return in the response body. The maximum and default values are both 100.", + "ListInvalidationsRequest$DistributionId": "The distribution's id.", + "ListInvalidationsRequest$Marker": "Use this parameter when paginating results to indicate where to begin in your list of invalidation batches. Because the results are returned in decreasing order from most recent to oldest, the most recent results are on the first page, the second page will contain earlier results, and so on. To get the next page of results, set the Marker to the value of the NextMarker from the current page's response. This value is the same as the ID of the last invalidation batch on that page.", + "ListInvalidationsRequest$MaxItems": "The maximum number of invalidation batches you want in the response body.", + "ListStreamingDistributionsRequest$Marker": "Use this when paginating results to indicate where to begin in your list of streaming distributions. The results include distributions in the list that occur after the marker. To get the next page of results, set the Marker to the value of the NextMarker from the current page's response (which is also the ID of the last distribution on that page).", + "ListStreamingDistributionsRequest$MaxItems": "The maximum number of streaming distributions you want in the response body.", + "LocationList$member": null, + "LoggingConfig$Bucket": "The Amazon S3 bucket to store the access logs in, for example, myawslogbucket.s3.amazonaws.com.", + "LoggingConfig$Prefix": "An optional string that you want CloudFront to prefix to the access log filenames for this distribution, for example, myprefix/. If you want to enable logging, but you do not want to specify a prefix, you still must include an empty Prefix element in the Logging element.", + "MissingBody$Message": null, + "NoSuchCloudFrontOriginAccessIdentity$Message": null, + "NoSuchDistribution$Message": null, + "NoSuchInvalidation$Message": null, + "NoSuchOrigin$Message": null, + "NoSuchStreamingDistribution$Message": null, + "Origin$Id": "A unique identifier for the origin. The value of Id must be unique within the distribution. You use the value of Id when you create a cache behavior. The Id identifies the origin that CloudFront routes a request to when the request matches the path pattern for that cache behavior.", + "Origin$DomainName": "Amazon S3 origins: The DNS name of the Amazon S3 bucket from which you want CloudFront to get objects for this origin, for example, myawsbucket.s3.amazonaws.com. Custom origins: The DNS domain name for the HTTP server from which you want CloudFront to get objects for this origin, for example, www.example.com.", + "Origin$OriginPath": "An optional element that causes CloudFront to request your content from a directory in your Amazon S3 bucket or your custom origin. When you include the OriginPath element, specify the directory name, beginning with a /. CloudFront appends the directory name to the value of DomainName.", + "PathList$member": null, + "PreconditionFailed$Message": null, + "S3Origin$DomainName": "The DNS name of the S3 origin.", + "S3Origin$OriginAccessIdentity": "Your S3 origin's origin access identity.", + "S3OriginConfig$OriginAccessIdentity": "The CloudFront origin access identity to associate with the origin. Use an origin access identity to configure the origin so that end users can only access objects in an Amazon S3 bucket through CloudFront. If you want end users to be able to access objects using either the CloudFront URL or the Amazon S3 URL, specify an empty OriginAccessIdentity element. To delete the origin access identity from an existing distribution, update the distribution configuration and include an empty OriginAccessIdentity element. To replace the origin access identity, update the distribution configuration and specify the new origin access identity. Use the format origin-access-identity/cloudfront/Id where Id is the value that CloudFront returned in the Id element when you created the origin access identity.", + "Signer$AwsAccountNumber": "Specifies an AWS account that can create signed URLs. Values: self, which indicates that the AWS account that was used to create the distribution can created signed URLs, or an AWS account number. Omit the dashes in the account number.", + "StreamingDistribution$Id": "The identifier for the streaming distribution. For example: EGTXBD79H29TRA8.", + "StreamingDistribution$Status": "The current status of the streaming distribution. When the status is Deployed, the distribution's information is fully propagated throughout the Amazon CloudFront system.", + "StreamingDistribution$DomainName": "The domain name corresponding to the streaming distribution. For example: s5c39gqb8ow64r.cloudfront.net.", + "StreamingDistributionAlreadyExists$Message": null, + "StreamingDistributionConfig$CallerReference": "A unique number that ensures the request can't be replayed. If the CallerReference is new (no matter the content of the StreamingDistributionConfig object), a new streaming distribution is created. If the CallerReference is a value you already sent in a previous request to create a streaming distribution, and the content of the StreamingDistributionConfig is identical to the original request (ignoring white space), the response includes the same information returned to the original request. If the CallerReference is a value you already sent in a previous request to create a streaming distribution but the content of the StreamingDistributionConfig is different from the original request, CloudFront returns a DistributionAlreadyExists error.", + "StreamingDistributionConfig$Comment": "Any comments you want to include about the streaming distribution.", + "StreamingDistributionList$Marker": "The value you provided for the Marker request parameter.", + "StreamingDistributionList$NextMarker": "If IsTruncated is true, this element is present and contains the value you can use for the Marker request parameter to continue listing your streaming distributions where they left off.", + "StreamingDistributionNotDisabled$Message": null, + "StreamingDistributionSummary$Id": "The identifier for the distribution. For example: EDFDVBD632BHDS5.", + "StreamingDistributionSummary$Status": "Indicates the current status of the distribution. When the status is Deployed, the distribution's information is fully propagated throughout the Amazon CloudFront system.", + "StreamingDistributionSummary$DomainName": "The domain name corresponding to the distribution. For example: d604721fxaaqy9.cloudfront.net.", + "StreamingDistributionSummary$Comment": "The comment originally specified when this distribution was created.", + "StreamingLoggingConfig$Bucket": "The Amazon S3 bucket to store the access logs in, for example, myawslogbucket.s3.amazonaws.com.", + "StreamingLoggingConfig$Prefix": "An optional string that you want CloudFront to prefix to the access log filenames for this streaming distribution, for example, myprefix/. If you want to enable logging, but you do not want to specify a prefix, you still must include an empty Prefix element in the Logging element.", + "TooManyCacheBehaviors$Message": null, + "TooManyCertificates$Message": null, + "TooManyCloudFrontOriginAccessIdentities$Message": null, + "TooManyCookieNamesInWhiteList$Message": null, + "TooManyDistributionCNAMEs$Message": null, + "TooManyDistributions$Message": null, + "TooManyHeadersInForwardedValues$Message": null, + "TooManyInvalidationsInProgress$Message": null, + "TooManyOrigins$Message": null, + "TooManyStreamingDistributionCNAMEs$Message": null, + "TooManyStreamingDistributions$Message": null, + "TooManyTrustedSigners$Message": null, + "TrustedSignerDoesNotExist$Message": null, + "UpdateCloudFrontOriginAccessIdentityRequest$Id": "The identity's id.", + "UpdateCloudFrontOriginAccessIdentityRequest$IfMatch": "The value of the ETag header you received when retrieving the identity's configuration. For example: E2QWRUHAPOMQZL.", + "UpdateCloudFrontOriginAccessIdentityResult$ETag": "The current version of the configuration. For example: E2QWRUHAPOMQZL.", + "UpdateDistributionRequest$Id": "The distribution's id.", + "UpdateDistributionRequest$IfMatch": "The value of the ETag header you received when retrieving the distribution's configuration. For example: E2QWRUHAPOMQZL.", + "UpdateDistributionResult$ETag": "The current version of the configuration. For example: E2QWRUHAPOMQZL.", + "UpdateStreamingDistributionRequest$Id": "The streaming distribution's id.", + "UpdateStreamingDistributionRequest$IfMatch": "The value of the ETag header you received when retrieving the streaming distribution's configuration. For example: E2QWRUHAPOMQZL.", + "UpdateStreamingDistributionResult$ETag": "The current version of the configuration. For example: E2QWRUHAPOMQZL.", + "ViewerCertificate$Certificate": "If you want viewers to use HTTPS to request your objects and you're using an alternate domain name in your object URLs (for example, https://example.com/logo.jpg), set to the IAM certificate identifier of the custom viewer certificate for this distribution.", + "ViewerCertificate$IAMCertificateId": "Note: this field is deprecated. Please use \"iam\" as CertificateSource and specify the IAM certificate Id as the Certificate. If you want viewers to use HTTPS to request your objects and you're using an alternate domain name in your object URLs (for example, https://example.com/logo.jpg), specify the IAM certificate identifier of the custom viewer certificate for this distribution. Specify either this value or CloudFrontDefaultCertificate." + } + }, + "timestamp": { + "base": null, + "refs": { + "Distribution$LastModifiedTime": "The date and time the distribution was last modified.", + "DistributionSummary$LastModifiedTime": "The date and time the distribution was last modified.", + "Invalidation$CreateTime": "The date and time the invalidation request was first made.", + "InvalidationSummary$CreateTime": null, + "StreamingDistribution$LastModifiedTime": "The date and time the distribution was last modified.", + "StreamingDistributionSummary$LastModifiedTime": "The date and time the distribution was last modified." + } + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/cloudfront/2015-09-17/examples-1.json b/vendor/github.com/aws/aws-sdk-go/models/apis/cloudfront/2015-09-17/examples-1.json new file mode 100644 index 000000000..0ea7e3b0b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/cloudfront/2015-09-17/examples-1.json @@ -0,0 +1,5 @@ +{ + "version": "1.0", + "examples": { + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/cloudfront/2015-09-17/paginators-1.json b/vendor/github.com/aws/aws-sdk-go/models/apis/cloudfront/2015-09-17/paginators-1.json new file mode 100644 index 000000000..51fbb907f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/cloudfront/2015-09-17/paginators-1.json @@ -0,0 +1,32 @@ +{ + "pagination": { + "ListCloudFrontOriginAccessIdentities": { + "input_token": "Marker", + "output_token": "CloudFrontOriginAccessIdentityList.NextMarker", + "limit_key": "MaxItems", + "more_results": "CloudFrontOriginAccessIdentityList.IsTruncated", + "result_key": "CloudFrontOriginAccessIdentityList.Items" + }, + "ListDistributions": { + "input_token": "Marker", + "output_token": "DistributionList.NextMarker", + "limit_key": "MaxItems", + "more_results": "DistributionList.IsTruncated", + "result_key": "DistributionList.Items" + }, + "ListInvalidations": { + "input_token": "Marker", + "output_token": "InvalidationList.NextMarker", + "limit_key": "MaxItems", + "more_results": "InvalidationList.IsTruncated", + "result_key": "InvalidationList.Items" + }, + "ListStreamingDistributions": { + "input_token": "Marker", + "output_token": "StreamingDistributionList.NextMarker", + "limit_key": "MaxItems", + "more_results": "StreamingDistributionList.IsTruncated", + "result_key": "StreamingDistributionList.Items" + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/cloudfront/2015-09-17/waiters-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/cloudfront/2015-09-17/waiters-2.json new file mode 100644 index 000000000..f6d3ba7bc --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/cloudfront/2015-09-17/waiters-2.json @@ -0,0 +1,47 @@ +{ + "version": 2, + "waiters": { + "DistributionDeployed": { + "delay": 60, + "operation": "GetDistribution", + "maxAttempts": 25, + "description": "Wait until a distribution is deployed.", + "acceptors": [ + { + "expected": "Deployed", + "matcher": "path", + "state": "success", + "argument": "Status" + } + ] + }, + "InvalidationCompleted": { + "delay": 20, + "operation": "GetInvalidation", + "maxAttempts": 30, + "description": "Wait until an invalidation has completed.", + "acceptors": [ + { + "expected": "Completed", + "matcher": "path", + "state": "success", + "argument": "Status" + } + ] + }, + "StreamingDistributionDeployed": { + "delay": 60, + "operation": "GetStreamingDistribution", + "maxAttempts": 25, + "description": "Wait until a streaming distribution is deployed.", + "acceptors": [ + { + "expected": "Deployed", + "matcher": "path", + "state": "success", + "argument": "Status" + } + ] + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/cloudfront/2016-01-13/api-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/cloudfront/2016-01-13/api-2.json new file mode 100644 index 000000000..8fbe7298b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/cloudfront/2016-01-13/api-2.json @@ -0,0 +1,2216 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2016-01-13", + "endpointPrefix":"cloudfront", + "globalEndpoint":"cloudfront.amazonaws.com", + "protocol":"rest-xml", + "serviceAbbreviation":"CloudFront", + "serviceFullName":"Amazon CloudFront", + "signatureVersion":"v4" + }, + "operations":{ + "CreateCloudFrontOriginAccessIdentity":{ + "name":"CreateCloudFrontOriginAccessIdentity2016_01_13", + "http":{ + "method":"POST", + "requestUri":"/2016-01-13/origin-access-identity/cloudfront", + "responseCode":201 + }, + "input":{"shape":"CreateCloudFrontOriginAccessIdentityRequest"}, + "output":{"shape":"CreateCloudFrontOriginAccessIdentityResult"}, + "errors":[ + {"shape":"CloudFrontOriginAccessIdentityAlreadyExists"}, + {"shape":"MissingBody"}, + {"shape":"TooManyCloudFrontOriginAccessIdentities"}, + {"shape":"InvalidArgument"}, + {"shape":"InconsistentQuantities"} + ] + }, + "CreateDistribution":{ + "name":"CreateDistribution2016_01_13", + "http":{ + "method":"POST", + "requestUri":"/2016-01-13/distribution", + "responseCode":201 + }, + "input":{"shape":"CreateDistributionRequest"}, + "output":{"shape":"CreateDistributionResult"}, + "errors":[ + {"shape":"CNAMEAlreadyExists"}, + {"shape":"DistributionAlreadyExists"}, + {"shape":"InvalidOrigin"}, + {"shape":"InvalidOriginAccessIdentity"}, + {"shape":"AccessDenied"}, + {"shape":"TooManyTrustedSigners"}, + {"shape":"TrustedSignerDoesNotExist"}, + {"shape":"InvalidViewerCertificate"}, + {"shape":"InvalidMinimumProtocolVersion"}, + {"shape":"MissingBody"}, + {"shape":"TooManyDistributionCNAMEs"}, + {"shape":"TooManyDistributions"}, + {"shape":"InvalidDefaultRootObject"}, + {"shape":"InvalidRelativePath"}, + {"shape":"InvalidErrorCode"}, + {"shape":"InvalidResponseCode"}, + {"shape":"InvalidArgument"}, + {"shape":"InvalidRequiredProtocol"}, + {"shape":"NoSuchOrigin"}, + {"shape":"TooManyOrigins"}, + {"shape":"TooManyCacheBehaviors"}, + {"shape":"TooManyCookieNamesInWhiteList"}, + {"shape":"InvalidForwardCookies"}, + {"shape":"TooManyHeadersInForwardedValues"}, + {"shape":"InvalidHeadersForS3Origin"}, + {"shape":"InconsistentQuantities"}, + {"shape":"TooManyCertificates"}, + {"shape":"InvalidLocationCode"}, + {"shape":"InvalidGeoRestrictionParameter"}, + {"shape":"InvalidProtocolSettings"}, + {"shape":"InvalidTTLOrder"}, + {"shape":"InvalidWebACLId"}, + {"shape":"TooManyOriginCustomHeaders"} + ] + }, + "CreateInvalidation":{ + "name":"CreateInvalidation2016_01_13", + "http":{ + "method":"POST", + "requestUri":"/2016-01-13/distribution/{DistributionId}/invalidation", + "responseCode":201 + }, + "input":{"shape":"CreateInvalidationRequest"}, + "output":{"shape":"CreateInvalidationResult"}, + "errors":[ + {"shape":"AccessDenied"}, + {"shape":"MissingBody"}, + {"shape":"InvalidArgument"}, + {"shape":"NoSuchDistribution"}, + {"shape":"BatchTooLarge"}, + {"shape":"TooManyInvalidationsInProgress"}, + {"shape":"InconsistentQuantities"} + ] + }, + "CreateStreamingDistribution":{ + "name":"CreateStreamingDistribution2016_01_13", + "http":{ + "method":"POST", + "requestUri":"/2016-01-13/streaming-distribution", + "responseCode":201 + }, + "input":{"shape":"CreateStreamingDistributionRequest"}, + "output":{"shape":"CreateStreamingDistributionResult"}, + "errors":[ + {"shape":"CNAMEAlreadyExists"}, + {"shape":"StreamingDistributionAlreadyExists"}, + {"shape":"InvalidOrigin"}, + {"shape":"InvalidOriginAccessIdentity"}, + {"shape":"AccessDenied"}, + {"shape":"TooManyTrustedSigners"}, + {"shape":"TrustedSignerDoesNotExist"}, + {"shape":"MissingBody"}, + {"shape":"TooManyStreamingDistributionCNAMEs"}, + {"shape":"TooManyStreamingDistributions"}, + {"shape":"InvalidArgument"}, + {"shape":"InconsistentQuantities"} + ] + }, + "DeleteCloudFrontOriginAccessIdentity":{ + "name":"DeleteCloudFrontOriginAccessIdentity2016_01_13", + "http":{ + "method":"DELETE", + "requestUri":"/2016-01-13/origin-access-identity/cloudfront/{Id}", + "responseCode":204 + }, + "input":{"shape":"DeleteCloudFrontOriginAccessIdentityRequest"}, + "errors":[ + {"shape":"AccessDenied"}, + {"shape":"InvalidIfMatchVersion"}, + {"shape":"NoSuchCloudFrontOriginAccessIdentity"}, + {"shape":"PreconditionFailed"}, + {"shape":"CloudFrontOriginAccessIdentityInUse"} + ] + }, + "DeleteDistribution":{ + "name":"DeleteDistribution2016_01_13", + "http":{ + "method":"DELETE", + "requestUri":"/2016-01-13/distribution/{Id}", + "responseCode":204 + }, + "input":{"shape":"DeleteDistributionRequest"}, + "errors":[ + {"shape":"AccessDenied"}, + {"shape":"DistributionNotDisabled"}, + {"shape":"InvalidIfMatchVersion"}, + {"shape":"NoSuchDistribution"}, + {"shape":"PreconditionFailed"} + ] + }, + "DeleteStreamingDistribution":{ + "name":"DeleteStreamingDistribution2016_01_13", + "http":{ + "method":"DELETE", + "requestUri":"/2016-01-13/streaming-distribution/{Id}", + "responseCode":204 + }, + "input":{"shape":"DeleteStreamingDistributionRequest"}, + "errors":[ + {"shape":"AccessDenied"}, + {"shape":"StreamingDistributionNotDisabled"}, + {"shape":"InvalidIfMatchVersion"}, + {"shape":"NoSuchStreamingDistribution"}, + {"shape":"PreconditionFailed"} + ] + }, + "GetCloudFrontOriginAccessIdentity":{ + "name":"GetCloudFrontOriginAccessIdentity2016_01_13", + "http":{ + "method":"GET", + "requestUri":"/2016-01-13/origin-access-identity/cloudfront/{Id}" + }, + "input":{"shape":"GetCloudFrontOriginAccessIdentityRequest"}, + "output":{"shape":"GetCloudFrontOriginAccessIdentityResult"}, + "errors":[ + {"shape":"NoSuchCloudFrontOriginAccessIdentity"}, + {"shape":"AccessDenied"} + ] + }, + "GetCloudFrontOriginAccessIdentityConfig":{ + "name":"GetCloudFrontOriginAccessIdentityConfig2016_01_13", + "http":{ + "method":"GET", + "requestUri":"/2016-01-13/origin-access-identity/cloudfront/{Id}/config" + }, + "input":{"shape":"GetCloudFrontOriginAccessIdentityConfigRequest"}, + "output":{"shape":"GetCloudFrontOriginAccessIdentityConfigResult"}, + "errors":[ + {"shape":"NoSuchCloudFrontOriginAccessIdentity"}, + {"shape":"AccessDenied"} + ] + }, + "GetDistribution":{ + "name":"GetDistribution2016_01_13", + "http":{ + "method":"GET", + "requestUri":"/2016-01-13/distribution/{Id}" + }, + "input":{"shape":"GetDistributionRequest"}, + "output":{"shape":"GetDistributionResult"}, + "errors":[ + {"shape":"NoSuchDistribution"}, + {"shape":"AccessDenied"} + ] + }, + "GetDistributionConfig":{ + "name":"GetDistributionConfig2016_01_13", + "http":{ + "method":"GET", + "requestUri":"/2016-01-13/distribution/{Id}/config" + }, + "input":{"shape":"GetDistributionConfigRequest"}, + "output":{"shape":"GetDistributionConfigResult"}, + "errors":[ + {"shape":"NoSuchDistribution"}, + {"shape":"AccessDenied"} + ] + }, + "GetInvalidation":{ + "name":"GetInvalidation2016_01_13", + "http":{ + "method":"GET", + "requestUri":"/2016-01-13/distribution/{DistributionId}/invalidation/{Id}" + }, + "input":{"shape":"GetInvalidationRequest"}, + "output":{"shape":"GetInvalidationResult"}, + "errors":[ + {"shape":"NoSuchInvalidation"}, + {"shape":"NoSuchDistribution"}, + {"shape":"AccessDenied"} + ] + }, + "GetStreamingDistribution":{ + "name":"GetStreamingDistribution2016_01_13", + "http":{ + "method":"GET", + "requestUri":"/2016-01-13/streaming-distribution/{Id}" + }, + "input":{"shape":"GetStreamingDistributionRequest"}, + "output":{"shape":"GetStreamingDistributionResult"}, + "errors":[ + {"shape":"NoSuchStreamingDistribution"}, + {"shape":"AccessDenied"} + ] + }, + "GetStreamingDistributionConfig":{ + "name":"GetStreamingDistributionConfig2016_01_13", + "http":{ + "method":"GET", + "requestUri":"/2016-01-13/streaming-distribution/{Id}/config" + }, + "input":{"shape":"GetStreamingDistributionConfigRequest"}, + "output":{"shape":"GetStreamingDistributionConfigResult"}, + "errors":[ + {"shape":"NoSuchStreamingDistribution"}, + {"shape":"AccessDenied"} + ] + }, + "ListCloudFrontOriginAccessIdentities":{ + "name":"ListCloudFrontOriginAccessIdentities2016_01_13", + "http":{ + "method":"GET", + "requestUri":"/2016-01-13/origin-access-identity/cloudfront" + }, + "input":{"shape":"ListCloudFrontOriginAccessIdentitiesRequest"}, + "output":{"shape":"ListCloudFrontOriginAccessIdentitiesResult"}, + "errors":[ + {"shape":"InvalidArgument"} + ] + }, + "ListDistributions":{ + "name":"ListDistributions2016_01_13", + "http":{ + "method":"GET", + "requestUri":"/2016-01-13/distribution" + }, + "input":{"shape":"ListDistributionsRequest"}, + "output":{"shape":"ListDistributionsResult"}, + "errors":[ + {"shape":"InvalidArgument"} + ] + }, + "ListDistributionsByWebACLId":{ + "name":"ListDistributionsByWebACLId2016_01_13", + "http":{ + "method":"GET", + "requestUri":"/2016-01-13/distributionsByWebACLId/{WebACLId}" + }, + "input":{"shape":"ListDistributionsByWebACLIdRequest"}, + "output":{"shape":"ListDistributionsByWebACLIdResult"}, + "errors":[ + {"shape":"InvalidArgument"}, + {"shape":"InvalidWebACLId"} + ] + }, + "ListInvalidations":{ + "name":"ListInvalidations2016_01_13", + "http":{ + "method":"GET", + "requestUri":"/2016-01-13/distribution/{DistributionId}/invalidation" + }, + "input":{"shape":"ListInvalidationsRequest"}, + "output":{"shape":"ListInvalidationsResult"}, + "errors":[ + {"shape":"InvalidArgument"}, + {"shape":"NoSuchDistribution"}, + {"shape":"AccessDenied"} + ] + }, + "ListStreamingDistributions":{ + "name":"ListStreamingDistributions2016_01_13", + "http":{ + "method":"GET", + "requestUri":"/2016-01-13/streaming-distribution" + }, + "input":{"shape":"ListStreamingDistributionsRequest"}, + "output":{"shape":"ListStreamingDistributionsResult"}, + "errors":[ + {"shape":"InvalidArgument"} + ] + }, + "UpdateCloudFrontOriginAccessIdentity":{ + "name":"UpdateCloudFrontOriginAccessIdentity2016_01_13", + "http":{ + "method":"PUT", + "requestUri":"/2016-01-13/origin-access-identity/cloudfront/{Id}/config" + }, + "input":{"shape":"UpdateCloudFrontOriginAccessIdentityRequest"}, + "output":{"shape":"UpdateCloudFrontOriginAccessIdentityResult"}, + "errors":[ + {"shape":"AccessDenied"}, + {"shape":"IllegalUpdate"}, + {"shape":"InvalidIfMatchVersion"}, + {"shape":"MissingBody"}, + {"shape":"NoSuchCloudFrontOriginAccessIdentity"}, + {"shape":"PreconditionFailed"}, + {"shape":"InvalidArgument"}, + {"shape":"InconsistentQuantities"} + ] + }, + "UpdateDistribution":{ + "name":"UpdateDistribution2016_01_13", + "http":{ + "method":"PUT", + "requestUri":"/2016-01-13/distribution/{Id}/config" + }, + "input":{"shape":"UpdateDistributionRequest"}, + "output":{"shape":"UpdateDistributionResult"}, + "errors":[ + {"shape":"AccessDenied"}, + {"shape":"CNAMEAlreadyExists"}, + {"shape":"IllegalUpdate"}, + {"shape":"InvalidIfMatchVersion"}, + {"shape":"MissingBody"}, + {"shape":"NoSuchDistribution"}, + {"shape":"PreconditionFailed"}, + {"shape":"TooManyDistributionCNAMEs"}, + {"shape":"InvalidDefaultRootObject"}, + {"shape":"InvalidRelativePath"}, + {"shape":"InvalidErrorCode"}, + {"shape":"InvalidResponseCode"}, + {"shape":"InvalidArgument"}, + {"shape":"InvalidOriginAccessIdentity"}, + {"shape":"TooManyTrustedSigners"}, + {"shape":"TrustedSignerDoesNotExist"}, + {"shape":"InvalidViewerCertificate"}, + {"shape":"InvalidMinimumProtocolVersion"}, + {"shape":"InvalidRequiredProtocol"}, + {"shape":"NoSuchOrigin"}, + {"shape":"TooManyOrigins"}, + {"shape":"TooManyCacheBehaviors"}, + {"shape":"TooManyCookieNamesInWhiteList"}, + {"shape":"InvalidForwardCookies"}, + {"shape":"TooManyHeadersInForwardedValues"}, + {"shape":"InvalidHeadersForS3Origin"}, + {"shape":"InconsistentQuantities"}, + {"shape":"TooManyCertificates"}, + {"shape":"InvalidLocationCode"}, + {"shape":"InvalidGeoRestrictionParameter"}, + {"shape":"InvalidTTLOrder"}, + {"shape":"InvalidWebACLId"}, + {"shape":"TooManyOriginCustomHeaders"} + ] + }, + "UpdateStreamingDistribution":{ + "name":"UpdateStreamingDistribution2016_01_13", + "http":{ + "method":"PUT", + "requestUri":"/2016-01-13/streaming-distribution/{Id}/config" + }, + "input":{"shape":"UpdateStreamingDistributionRequest"}, + "output":{"shape":"UpdateStreamingDistributionResult"}, + "errors":[ + {"shape":"AccessDenied"}, + {"shape":"CNAMEAlreadyExists"}, + {"shape":"IllegalUpdate"}, + {"shape":"InvalidIfMatchVersion"}, + {"shape":"MissingBody"}, + {"shape":"NoSuchStreamingDistribution"}, + {"shape":"PreconditionFailed"}, + {"shape":"TooManyStreamingDistributionCNAMEs"}, + {"shape":"InvalidArgument"}, + {"shape":"InvalidOriginAccessIdentity"}, + {"shape":"TooManyTrustedSigners"}, + {"shape":"TrustedSignerDoesNotExist"}, + {"shape":"InconsistentQuantities"} + ] + } + }, + "shapes":{ + "AccessDenied":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":403}, + "exception":true + }, + "ActiveTrustedSigners":{ + "type":"structure", + "required":[ + "Enabled", + "Quantity" + ], + "members":{ + "Enabled":{"shape":"boolean"}, + "Quantity":{"shape":"integer"}, + "Items":{"shape":"SignerList"} + } + }, + "AliasList":{ + "type":"list", + "member":{ + "shape":"string", + "locationName":"CNAME" + } + }, + "Aliases":{ + "type":"structure", + "required":["Quantity"], + "members":{ + "Quantity":{"shape":"integer"}, + "Items":{"shape":"AliasList"} + } + }, + "AllowedMethods":{ + "type":"structure", + "required":[ + "Quantity", + "Items" + ], + "members":{ + "Quantity":{"shape":"integer"}, + "Items":{"shape":"MethodsList"}, + "CachedMethods":{"shape":"CachedMethods"} + } + }, + "AwsAccountNumberList":{ + "type":"list", + "member":{ + "shape":"string", + "locationName":"AwsAccountNumber" + } + }, + "BatchTooLarge":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":413}, + "exception":true + }, + "CNAMEAlreadyExists":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":409}, + "exception":true + }, + "CacheBehavior":{ + "type":"structure", + "required":[ + "PathPattern", + "TargetOriginId", + "ForwardedValues", + "TrustedSigners", + "ViewerProtocolPolicy", + "MinTTL" + ], + "members":{ + "PathPattern":{"shape":"string"}, + "TargetOriginId":{"shape":"string"}, + "ForwardedValues":{"shape":"ForwardedValues"}, + "TrustedSigners":{"shape":"TrustedSigners"}, + "ViewerProtocolPolicy":{"shape":"ViewerProtocolPolicy"}, + "MinTTL":{"shape":"long"}, + "AllowedMethods":{"shape":"AllowedMethods"}, + "SmoothStreaming":{"shape":"boolean"}, + "DefaultTTL":{"shape":"long"}, + "MaxTTL":{"shape":"long"}, + "Compress":{"shape":"boolean"} + } + }, + "CacheBehaviorList":{ + "type":"list", + "member":{ + "shape":"CacheBehavior", + "locationName":"CacheBehavior" + } + }, + "CacheBehaviors":{ + "type":"structure", + "required":["Quantity"], + "members":{ + "Quantity":{"shape":"integer"}, + "Items":{"shape":"CacheBehaviorList"} + } + }, + "CachedMethods":{ + "type":"structure", + "required":[ + "Quantity", + "Items" + ], + "members":{ + "Quantity":{"shape":"integer"}, + "Items":{"shape":"MethodsList"} + } + }, + "CertificateSource":{ + "type":"string", + "enum":[ + "cloudfront", + "iam" + ] + }, + "CloudFrontOriginAccessIdentity":{ + "type":"structure", + "required":[ + "Id", + "S3CanonicalUserId" + ], + "members":{ + "Id":{"shape":"string"}, + "S3CanonicalUserId":{"shape":"string"}, + "CloudFrontOriginAccessIdentityConfig":{"shape":"CloudFrontOriginAccessIdentityConfig"} + } + }, + "CloudFrontOriginAccessIdentityAlreadyExists":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":409}, + "exception":true + }, + "CloudFrontOriginAccessIdentityConfig":{ + "type":"structure", + "required":[ + "CallerReference", + "Comment" + ], + "members":{ + "CallerReference":{"shape":"string"}, + "Comment":{"shape":"string"} + } + }, + "CloudFrontOriginAccessIdentityInUse":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":409}, + "exception":true + }, + "CloudFrontOriginAccessIdentityList":{ + "type":"structure", + "required":[ + "Marker", + "MaxItems", + "IsTruncated", + "Quantity" + ], + "members":{ + "Marker":{"shape":"string"}, + "NextMarker":{"shape":"string"}, + "MaxItems":{"shape":"integer"}, + "IsTruncated":{"shape":"boolean"}, + "Quantity":{"shape":"integer"}, + "Items":{"shape":"CloudFrontOriginAccessIdentitySummaryList"} + } + }, + "CloudFrontOriginAccessIdentitySummary":{ + "type":"structure", + "required":[ + "Id", + "S3CanonicalUserId", + "Comment" + ], + "members":{ + "Id":{"shape":"string"}, + "S3CanonicalUserId":{"shape":"string"}, + "Comment":{"shape":"string"} + } + }, + "CloudFrontOriginAccessIdentitySummaryList":{ + "type":"list", + "member":{ + "shape":"CloudFrontOriginAccessIdentitySummary", + "locationName":"CloudFrontOriginAccessIdentitySummary" + } + }, + "CookieNameList":{ + "type":"list", + "member":{ + "shape":"string", + "locationName":"Name" + } + }, + "CookieNames":{ + "type":"structure", + "required":["Quantity"], + "members":{ + "Quantity":{"shape":"integer"}, + "Items":{"shape":"CookieNameList"} + } + }, + "CookiePreference":{ + "type":"structure", + "required":["Forward"], + "members":{ + "Forward":{"shape":"ItemSelection"}, + "WhitelistedNames":{"shape":"CookieNames"} + } + }, + "CreateCloudFrontOriginAccessIdentityRequest":{ + "type":"structure", + "required":["CloudFrontOriginAccessIdentityConfig"], + "members":{ + "CloudFrontOriginAccessIdentityConfig":{ + "shape":"CloudFrontOriginAccessIdentityConfig", + "locationName":"CloudFrontOriginAccessIdentityConfig", + "xmlNamespace":{"uri":"http://cloudfront.amazonaws.com/doc/2016-01-13/"} + } + }, + "payload":"CloudFrontOriginAccessIdentityConfig" + }, + "CreateCloudFrontOriginAccessIdentityResult":{ + "type":"structure", + "members":{ + "CloudFrontOriginAccessIdentity":{"shape":"CloudFrontOriginAccessIdentity"}, + "Location":{ + "shape":"string", + "location":"header", + "locationName":"Location" + }, + "ETag":{ + "shape":"string", + "location":"header", + "locationName":"ETag" + } + }, + "payload":"CloudFrontOriginAccessIdentity" + }, + "CreateDistributionRequest":{ + "type":"structure", + "required":["DistributionConfig"], + "members":{ + "DistributionConfig":{ + "shape":"DistributionConfig", + "locationName":"DistributionConfig", + "xmlNamespace":{"uri":"http://cloudfront.amazonaws.com/doc/2016-01-13/"} + } + }, + "payload":"DistributionConfig" + }, + "CreateDistributionResult":{ + "type":"structure", + "members":{ + "Distribution":{"shape":"Distribution"}, + "Location":{ + "shape":"string", + "location":"header", + "locationName":"Location" + }, + "ETag":{ + "shape":"string", + "location":"header", + "locationName":"ETag" + } + }, + "payload":"Distribution" + }, + "CreateInvalidationRequest":{ + "type":"structure", + "required":[ + "DistributionId", + "InvalidationBatch" + ], + "members":{ + "DistributionId":{ + "shape":"string", + "location":"uri", + "locationName":"DistributionId" + }, + "InvalidationBatch":{ + "shape":"InvalidationBatch", + "locationName":"InvalidationBatch", + "xmlNamespace":{"uri":"http://cloudfront.amazonaws.com/doc/2016-01-13/"} + } + }, + "payload":"InvalidationBatch" + }, + "CreateInvalidationResult":{ + "type":"structure", + "members":{ + "Location":{ + "shape":"string", + "location":"header", + "locationName":"Location" + }, + "Invalidation":{"shape":"Invalidation"} + }, + "payload":"Invalidation" + }, + "CreateStreamingDistributionRequest":{ + "type":"structure", + "required":["StreamingDistributionConfig"], + "members":{ + "StreamingDistributionConfig":{ + "shape":"StreamingDistributionConfig", + "locationName":"StreamingDistributionConfig", + "xmlNamespace":{"uri":"http://cloudfront.amazonaws.com/doc/2016-01-13/"} + } + }, + "payload":"StreamingDistributionConfig" + }, + "CreateStreamingDistributionResult":{ + "type":"structure", + "members":{ + "StreamingDistribution":{"shape":"StreamingDistribution"}, + "Location":{ + "shape":"string", + "location":"header", + "locationName":"Location" + }, + "ETag":{ + "shape":"string", + "location":"header", + "locationName":"ETag" + } + }, + "payload":"StreamingDistribution" + }, + "CustomErrorResponse":{ + "type":"structure", + "required":["ErrorCode"], + "members":{ + "ErrorCode":{"shape":"integer"}, + "ResponsePagePath":{"shape":"string"}, + "ResponseCode":{"shape":"string"}, + "ErrorCachingMinTTL":{"shape":"long"} + } + }, + "CustomErrorResponseList":{ + "type":"list", + "member":{ + "shape":"CustomErrorResponse", + "locationName":"CustomErrorResponse" + } + }, + "CustomErrorResponses":{ + "type":"structure", + "required":["Quantity"], + "members":{ + "Quantity":{"shape":"integer"}, + "Items":{"shape":"CustomErrorResponseList"} + } + }, + "CustomHeaders":{ + "type":"structure", + "required":["Quantity"], + "members":{ + "Quantity":{"shape":"integer"}, + "Items":{"shape":"OriginCustomHeadersList"} + } + }, + "CustomOriginConfig":{ + "type":"structure", + "required":[ + "HTTPPort", + "HTTPSPort", + "OriginProtocolPolicy" + ], + "members":{ + "HTTPPort":{"shape":"integer"}, + "HTTPSPort":{"shape":"integer"}, + "OriginProtocolPolicy":{"shape":"OriginProtocolPolicy"}, + "OriginSslProtocols":{"shape":"OriginSslProtocols"} + } + }, + "DefaultCacheBehavior":{ + "type":"structure", + "required":[ + "TargetOriginId", + "ForwardedValues", + "TrustedSigners", + "ViewerProtocolPolicy", + "MinTTL" + ], + "members":{ + "TargetOriginId":{"shape":"string"}, + "ForwardedValues":{"shape":"ForwardedValues"}, + "TrustedSigners":{"shape":"TrustedSigners"}, + "ViewerProtocolPolicy":{"shape":"ViewerProtocolPolicy"}, + "MinTTL":{"shape":"long"}, + "AllowedMethods":{"shape":"AllowedMethods"}, + "SmoothStreaming":{"shape":"boolean"}, + "DefaultTTL":{"shape":"long"}, + "MaxTTL":{"shape":"long"}, + "Compress":{"shape":"boolean"} + } + }, + "DeleteCloudFrontOriginAccessIdentityRequest":{ + "type":"structure", + "required":["Id"], + "members":{ + "Id":{ + "shape":"string", + "location":"uri", + "locationName":"Id" + }, + "IfMatch":{ + "shape":"string", + "location":"header", + "locationName":"If-Match" + } + } + }, + "DeleteDistributionRequest":{ + "type":"structure", + "required":["Id"], + "members":{ + "Id":{ + "shape":"string", + "location":"uri", + "locationName":"Id" + }, + "IfMatch":{ + "shape":"string", + "location":"header", + "locationName":"If-Match" + } + } + }, + "DeleteStreamingDistributionRequest":{ + "type":"structure", + "required":["Id"], + "members":{ + "Id":{ + "shape":"string", + "location":"uri", + "locationName":"Id" + }, + "IfMatch":{ + "shape":"string", + "location":"header", + "locationName":"If-Match" + } + } + }, + "Distribution":{ + "type":"structure", + "required":[ + "Id", + "Status", + "LastModifiedTime", + "InProgressInvalidationBatches", + "DomainName", + "ActiveTrustedSigners", + "DistributionConfig" + ], + "members":{ + "Id":{"shape":"string"}, + "Status":{"shape":"string"}, + "LastModifiedTime":{"shape":"timestamp"}, + "InProgressInvalidationBatches":{"shape":"integer"}, + "DomainName":{"shape":"string"}, + "ActiveTrustedSigners":{"shape":"ActiveTrustedSigners"}, + "DistributionConfig":{"shape":"DistributionConfig"} + } + }, + "DistributionAlreadyExists":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":409}, + "exception":true + }, + "DistributionConfig":{ + "type":"structure", + "required":[ + "CallerReference", + "Origins", + "DefaultCacheBehavior", + "Comment", + "Enabled" + ], + "members":{ + "CallerReference":{"shape":"string"}, + "Aliases":{"shape":"Aliases"}, + "DefaultRootObject":{"shape":"string"}, + "Origins":{"shape":"Origins"}, + "DefaultCacheBehavior":{"shape":"DefaultCacheBehavior"}, + "CacheBehaviors":{"shape":"CacheBehaviors"}, + "CustomErrorResponses":{"shape":"CustomErrorResponses"}, + "Comment":{"shape":"string"}, + "Logging":{"shape":"LoggingConfig"}, + "PriceClass":{"shape":"PriceClass"}, + "Enabled":{"shape":"boolean"}, + "ViewerCertificate":{"shape":"ViewerCertificate"}, + "Restrictions":{"shape":"Restrictions"}, + "WebACLId":{"shape":"string"} + } + }, + "DistributionList":{ + "type":"structure", + "required":[ + "Marker", + "MaxItems", + "IsTruncated", + "Quantity" + ], + "members":{ + "Marker":{"shape":"string"}, + "NextMarker":{"shape":"string"}, + "MaxItems":{"shape":"integer"}, + "IsTruncated":{"shape":"boolean"}, + "Quantity":{"shape":"integer"}, + "Items":{"shape":"DistributionSummaryList"} + } + }, + "DistributionNotDisabled":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":409}, + "exception":true + }, + "DistributionSummary":{ + "type":"structure", + "required":[ + "Id", + "Status", + "LastModifiedTime", + "DomainName", + "Aliases", + "Origins", + "DefaultCacheBehavior", + "CacheBehaviors", + "CustomErrorResponses", + "Comment", + "PriceClass", + "Enabled", + "ViewerCertificate", + "Restrictions", + "WebACLId" + ], + "members":{ + "Id":{"shape":"string"}, + "Status":{"shape":"string"}, + "LastModifiedTime":{"shape":"timestamp"}, + "DomainName":{"shape":"string"}, + "Aliases":{"shape":"Aliases"}, + "Origins":{"shape":"Origins"}, + "DefaultCacheBehavior":{"shape":"DefaultCacheBehavior"}, + "CacheBehaviors":{"shape":"CacheBehaviors"}, + "CustomErrorResponses":{"shape":"CustomErrorResponses"}, + "Comment":{"shape":"string"}, + "PriceClass":{"shape":"PriceClass"}, + "Enabled":{"shape":"boolean"}, + "ViewerCertificate":{"shape":"ViewerCertificate"}, + "Restrictions":{"shape":"Restrictions"}, + "WebACLId":{"shape":"string"} + } + }, + "DistributionSummaryList":{ + "type":"list", + "member":{ + "shape":"DistributionSummary", + "locationName":"DistributionSummary" + } + }, + "ForwardedValues":{ + "type":"structure", + "required":[ + "QueryString", + "Cookies" + ], + "members":{ + "QueryString":{"shape":"boolean"}, + "Cookies":{"shape":"CookiePreference"}, + "Headers":{"shape":"Headers"} + } + }, + "GeoRestriction":{ + "type":"structure", + "required":[ + "RestrictionType", + "Quantity" + ], + "members":{ + "RestrictionType":{"shape":"GeoRestrictionType"}, + "Quantity":{"shape":"integer"}, + "Items":{"shape":"LocationList"} + } + }, + "GeoRestrictionType":{ + "type":"string", + "enum":[ + "blacklist", + "whitelist", + "none" + ] + }, + "GetCloudFrontOriginAccessIdentityConfigRequest":{ + "type":"structure", + "required":["Id"], + "members":{ + "Id":{ + "shape":"string", + "location":"uri", + "locationName":"Id" + } + } + }, + "GetCloudFrontOriginAccessIdentityConfigResult":{ + "type":"structure", + "members":{ + "CloudFrontOriginAccessIdentityConfig":{"shape":"CloudFrontOriginAccessIdentityConfig"}, + "ETag":{ + "shape":"string", + "location":"header", + "locationName":"ETag" + } + }, + "payload":"CloudFrontOriginAccessIdentityConfig" + }, + "GetCloudFrontOriginAccessIdentityRequest":{ + "type":"structure", + "required":["Id"], + "members":{ + "Id":{ + "shape":"string", + "location":"uri", + "locationName":"Id" + } + } + }, + "GetCloudFrontOriginAccessIdentityResult":{ + "type":"structure", + "members":{ + "CloudFrontOriginAccessIdentity":{"shape":"CloudFrontOriginAccessIdentity"}, + "ETag":{ + "shape":"string", + "location":"header", + "locationName":"ETag" + } + }, + "payload":"CloudFrontOriginAccessIdentity" + }, + "GetDistributionConfigRequest":{ + "type":"structure", + "required":["Id"], + "members":{ + "Id":{ + "shape":"string", + "location":"uri", + "locationName":"Id" + } + } + }, + "GetDistributionConfigResult":{ + "type":"structure", + "members":{ + "DistributionConfig":{"shape":"DistributionConfig"}, + "ETag":{ + "shape":"string", + "location":"header", + "locationName":"ETag" + } + }, + "payload":"DistributionConfig" + }, + "GetDistributionRequest":{ + "type":"structure", + "required":["Id"], + "members":{ + "Id":{ + "shape":"string", + "location":"uri", + "locationName":"Id" + } + } + }, + "GetDistributionResult":{ + "type":"structure", + "members":{ + "Distribution":{"shape":"Distribution"}, + "ETag":{ + "shape":"string", + "location":"header", + "locationName":"ETag" + } + }, + "payload":"Distribution" + }, + "GetInvalidationRequest":{ + "type":"structure", + "required":[ + "DistributionId", + "Id" + ], + "members":{ + "DistributionId":{ + "shape":"string", + "location":"uri", + "locationName":"DistributionId" + }, + "Id":{ + "shape":"string", + "location":"uri", + "locationName":"Id" + } + } + }, + "GetInvalidationResult":{ + "type":"structure", + "members":{ + "Invalidation":{"shape":"Invalidation"} + }, + "payload":"Invalidation" + }, + "GetStreamingDistributionConfigRequest":{ + "type":"structure", + "required":["Id"], + "members":{ + "Id":{ + "shape":"string", + "location":"uri", + "locationName":"Id" + } + } + }, + "GetStreamingDistributionConfigResult":{ + "type":"structure", + "members":{ + "StreamingDistributionConfig":{"shape":"StreamingDistributionConfig"}, + "ETag":{ + "shape":"string", + "location":"header", + "locationName":"ETag" + } + }, + "payload":"StreamingDistributionConfig" + }, + "GetStreamingDistributionRequest":{ + "type":"structure", + "required":["Id"], + "members":{ + "Id":{ + "shape":"string", + "location":"uri", + "locationName":"Id" + } + } + }, + "GetStreamingDistributionResult":{ + "type":"structure", + "members":{ + "StreamingDistribution":{"shape":"StreamingDistribution"}, + "ETag":{ + "shape":"string", + "location":"header", + "locationName":"ETag" + } + }, + "payload":"StreamingDistribution" + }, + "HeaderList":{ + "type":"list", + "member":{ + "shape":"string", + "locationName":"Name" + } + }, + "Headers":{ + "type":"structure", + "required":["Quantity"], + "members":{ + "Quantity":{"shape":"integer"}, + "Items":{"shape":"HeaderList"} + } + }, + "IllegalUpdate":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InconsistentQuantities":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidArgument":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidDefaultRootObject":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidErrorCode":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidForwardCookies":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidGeoRestrictionParameter":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidHeadersForS3Origin":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidIfMatchVersion":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidLocationCode":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidMinimumProtocolVersion":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidOrigin":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidOriginAccessIdentity":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidProtocolSettings":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidRelativePath":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidRequiredProtocol":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidResponseCode":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidTTLOrder":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidViewerCertificate":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidWebACLId":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "Invalidation":{ + "type":"structure", + "required":[ + "Id", + "Status", + "CreateTime", + "InvalidationBatch" + ], + "members":{ + "Id":{"shape":"string"}, + "Status":{"shape":"string"}, + "CreateTime":{"shape":"timestamp"}, + "InvalidationBatch":{"shape":"InvalidationBatch"} + } + }, + "InvalidationBatch":{ + "type":"structure", + "required":[ + "Paths", + "CallerReference" + ], + "members":{ + "Paths":{"shape":"Paths"}, + "CallerReference":{"shape":"string"} + } + }, + "InvalidationList":{ + "type":"structure", + "required":[ + "Marker", + "MaxItems", + "IsTruncated", + "Quantity" + ], + "members":{ + "Marker":{"shape":"string"}, + "NextMarker":{"shape":"string"}, + "MaxItems":{"shape":"integer"}, + "IsTruncated":{"shape":"boolean"}, + "Quantity":{"shape":"integer"}, + "Items":{"shape":"InvalidationSummaryList"} + } + }, + "InvalidationSummary":{ + "type":"structure", + "required":[ + "Id", + "CreateTime", + "Status" + ], + "members":{ + "Id":{"shape":"string"}, + "CreateTime":{"shape":"timestamp"}, + "Status":{"shape":"string"} + } + }, + "InvalidationSummaryList":{ + "type":"list", + "member":{ + "shape":"InvalidationSummary", + "locationName":"InvalidationSummary" + } + }, + "ItemSelection":{ + "type":"string", + "enum":[ + "none", + "whitelist", + "all" + ] + }, + "KeyPairIdList":{ + "type":"list", + "member":{ + "shape":"string", + "locationName":"KeyPairId" + } + }, + "KeyPairIds":{ + "type":"structure", + "required":["Quantity"], + "members":{ + "Quantity":{"shape":"integer"}, + "Items":{"shape":"KeyPairIdList"} + } + }, + "ListCloudFrontOriginAccessIdentitiesRequest":{ + "type":"structure", + "members":{ + "Marker":{ + "shape":"string", + "location":"querystring", + "locationName":"Marker" + }, + "MaxItems":{ + "shape":"string", + "location":"querystring", + "locationName":"MaxItems" + } + } + }, + "ListCloudFrontOriginAccessIdentitiesResult":{ + "type":"structure", + "members":{ + "CloudFrontOriginAccessIdentityList":{"shape":"CloudFrontOriginAccessIdentityList"} + }, + "payload":"CloudFrontOriginAccessIdentityList" + }, + "ListDistributionsByWebACLIdRequest":{ + "type":"structure", + "required":["WebACLId"], + "members":{ + "Marker":{ + "shape":"string", + "location":"querystring", + "locationName":"Marker" + }, + "MaxItems":{ + "shape":"string", + "location":"querystring", + "locationName":"MaxItems" + }, + "WebACLId":{ + "shape":"string", + "location":"uri", + "locationName":"WebACLId" + } + } + }, + "ListDistributionsByWebACLIdResult":{ + "type":"structure", + "members":{ + "DistributionList":{"shape":"DistributionList"} + }, + "payload":"DistributionList" + }, + "ListDistributionsRequest":{ + "type":"structure", + "members":{ + "Marker":{ + "shape":"string", + "location":"querystring", + "locationName":"Marker" + }, + "MaxItems":{ + "shape":"string", + "location":"querystring", + "locationName":"MaxItems" + } + } + }, + "ListDistributionsResult":{ + "type":"structure", + "members":{ + "DistributionList":{"shape":"DistributionList"} + }, + "payload":"DistributionList" + }, + "ListInvalidationsRequest":{ + "type":"structure", + "required":["DistributionId"], + "members":{ + "DistributionId":{ + "shape":"string", + "location":"uri", + "locationName":"DistributionId" + }, + "Marker":{ + "shape":"string", + "location":"querystring", + "locationName":"Marker" + }, + "MaxItems":{ + "shape":"string", + "location":"querystring", + "locationName":"MaxItems" + } + } + }, + "ListInvalidationsResult":{ + "type":"structure", + "members":{ + "InvalidationList":{"shape":"InvalidationList"} + }, + "payload":"InvalidationList" + }, + "ListStreamingDistributionsRequest":{ + "type":"structure", + "members":{ + "Marker":{ + "shape":"string", + "location":"querystring", + "locationName":"Marker" + }, + "MaxItems":{ + "shape":"string", + "location":"querystring", + "locationName":"MaxItems" + } + } + }, + "ListStreamingDistributionsResult":{ + "type":"structure", + "members":{ + "StreamingDistributionList":{"shape":"StreamingDistributionList"} + }, + "payload":"StreamingDistributionList" + }, + "LocationList":{ + "type":"list", + "member":{ + "shape":"string", + "locationName":"Location" + } + }, + "LoggingConfig":{ + "type":"structure", + "required":[ + "Enabled", + "IncludeCookies", + "Bucket", + "Prefix" + ], + "members":{ + "Enabled":{"shape":"boolean"}, + "IncludeCookies":{"shape":"boolean"}, + "Bucket":{"shape":"string"}, + "Prefix":{"shape":"string"} + } + }, + "Method":{ + "type":"string", + "enum":[ + "GET", + "HEAD", + "POST", + "PUT", + "PATCH", + "OPTIONS", + "DELETE" + ] + }, + "MethodsList":{ + "type":"list", + "member":{ + "shape":"Method", + "locationName":"Method" + } + }, + "MinimumProtocolVersion":{ + "type":"string", + "enum":[ + "SSLv3", + "TLSv1" + ] + }, + "MissingBody":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "NoSuchCloudFrontOriginAccessIdentity":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":404}, + "exception":true + }, + "NoSuchDistribution":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":404}, + "exception":true + }, + "NoSuchInvalidation":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":404}, + "exception":true + }, + "NoSuchOrigin":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":404}, + "exception":true + }, + "NoSuchStreamingDistribution":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":404}, + "exception":true + }, + "Origin":{ + "type":"structure", + "required":[ + "Id", + "DomainName" + ], + "members":{ + "Id":{"shape":"string"}, + "DomainName":{"shape":"string"}, + "OriginPath":{"shape":"string"}, + "CustomHeaders":{"shape":"CustomHeaders"}, + "S3OriginConfig":{"shape":"S3OriginConfig"}, + "CustomOriginConfig":{"shape":"CustomOriginConfig"} + } + }, + "OriginCustomHeader":{ + "type":"structure", + "required":[ + "HeaderName", + "HeaderValue" + ], + "members":{ + "HeaderName":{"shape":"string"}, + "HeaderValue":{"shape":"string"} + } + }, + "OriginCustomHeadersList":{ + "type":"list", + "member":{ + "shape":"OriginCustomHeader", + "locationName":"OriginCustomHeader" + } + }, + "OriginList":{ + "type":"list", + "member":{ + "shape":"Origin", + "locationName":"Origin" + }, + "min":1 + }, + "OriginProtocolPolicy":{ + "type":"string", + "enum":[ + "http-only", + "match-viewer", + "https-only" + ] + }, + "OriginSslProtocols":{ + "type":"structure", + "required":[ + "Quantity", + "Items" + ], + "members":{ + "Quantity":{"shape":"integer"}, + "Items":{"shape":"SslProtocolsList"} + } + }, + "Origins":{ + "type":"structure", + "required":["Quantity"], + "members":{ + "Quantity":{"shape":"integer"}, + "Items":{"shape":"OriginList"} + } + }, + "PathList":{ + "type":"list", + "member":{ + "shape":"string", + "locationName":"Path" + } + }, + "Paths":{ + "type":"structure", + "required":["Quantity"], + "members":{ + "Quantity":{"shape":"integer"}, + "Items":{"shape":"PathList"} + } + }, + "PreconditionFailed":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":412}, + "exception":true + }, + "PriceClass":{ + "type":"string", + "enum":[ + "PriceClass_100", + "PriceClass_200", + "PriceClass_All" + ] + }, + "Restrictions":{ + "type":"structure", + "required":["GeoRestriction"], + "members":{ + "GeoRestriction":{"shape":"GeoRestriction"} + } + }, + "S3Origin":{ + "type":"structure", + "required":[ + "DomainName", + "OriginAccessIdentity" + ], + "members":{ + "DomainName":{"shape":"string"}, + "OriginAccessIdentity":{"shape":"string"} + } + }, + "S3OriginConfig":{ + "type":"structure", + "required":["OriginAccessIdentity"], + "members":{ + "OriginAccessIdentity":{"shape":"string"} + } + }, + "SSLSupportMethod":{ + "type":"string", + "enum":[ + "sni-only", + "vip" + ] + }, + "Signer":{ + "type":"structure", + "members":{ + "AwsAccountNumber":{"shape":"string"}, + "KeyPairIds":{"shape":"KeyPairIds"} + } + }, + "SignerList":{ + "type":"list", + "member":{ + "shape":"Signer", + "locationName":"Signer" + } + }, + "SslProtocol":{ + "type":"string", + "enum":[ + "SSLv3", + "TLSv1", + "TLSv1.1", + "TLSv1.2" + ] + }, + "SslProtocolsList":{ + "type":"list", + "member":{ + "shape":"SslProtocol", + "locationName":"SslProtocol" + } + }, + "StreamingDistribution":{ + "type":"structure", + "required":[ + "Id", + "Status", + "DomainName", + "ActiveTrustedSigners", + "StreamingDistributionConfig" + ], + "members":{ + "Id":{"shape":"string"}, + "Status":{"shape":"string"}, + "LastModifiedTime":{"shape":"timestamp"}, + "DomainName":{"shape":"string"}, + "ActiveTrustedSigners":{"shape":"ActiveTrustedSigners"}, + "StreamingDistributionConfig":{"shape":"StreamingDistributionConfig"} + } + }, + "StreamingDistributionAlreadyExists":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":409}, + "exception":true + }, + "StreamingDistributionConfig":{ + "type":"structure", + "required":[ + "CallerReference", + "S3Origin", + "Comment", + "TrustedSigners", + "Enabled" + ], + "members":{ + "CallerReference":{"shape":"string"}, + "S3Origin":{"shape":"S3Origin"}, + "Aliases":{"shape":"Aliases"}, + "Comment":{"shape":"string"}, + "Logging":{"shape":"StreamingLoggingConfig"}, + "TrustedSigners":{"shape":"TrustedSigners"}, + "PriceClass":{"shape":"PriceClass"}, + "Enabled":{"shape":"boolean"} + } + }, + "StreamingDistributionList":{ + "type":"structure", + "required":[ + "Marker", + "MaxItems", + "IsTruncated", + "Quantity" + ], + "members":{ + "Marker":{"shape":"string"}, + "NextMarker":{"shape":"string"}, + "MaxItems":{"shape":"integer"}, + "IsTruncated":{"shape":"boolean"}, + "Quantity":{"shape":"integer"}, + "Items":{"shape":"StreamingDistributionSummaryList"} + } + }, + "StreamingDistributionNotDisabled":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":409}, + "exception":true + }, + "StreamingDistributionSummary":{ + "type":"structure", + "required":[ + "Id", + "Status", + "LastModifiedTime", + "DomainName", + "S3Origin", + "Aliases", + "TrustedSigners", + "Comment", + "PriceClass", + "Enabled" + ], + "members":{ + "Id":{"shape":"string"}, + "Status":{"shape":"string"}, + "LastModifiedTime":{"shape":"timestamp"}, + "DomainName":{"shape":"string"}, + "S3Origin":{"shape":"S3Origin"}, + "Aliases":{"shape":"Aliases"}, + "TrustedSigners":{"shape":"TrustedSigners"}, + "Comment":{"shape":"string"}, + "PriceClass":{"shape":"PriceClass"}, + "Enabled":{"shape":"boolean"} + } + }, + "StreamingDistributionSummaryList":{ + "type":"list", + "member":{ + "shape":"StreamingDistributionSummary", + "locationName":"StreamingDistributionSummary" + } + }, + "StreamingLoggingConfig":{ + "type":"structure", + "required":[ + "Enabled", + "Bucket", + "Prefix" + ], + "members":{ + "Enabled":{"shape":"boolean"}, + "Bucket":{"shape":"string"}, + "Prefix":{"shape":"string"} + } + }, + "TooManyCacheBehaviors":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "TooManyCertificates":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "TooManyCloudFrontOriginAccessIdentities":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "TooManyCookieNamesInWhiteList":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "TooManyDistributionCNAMEs":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "TooManyDistributions":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "TooManyHeadersInForwardedValues":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "TooManyInvalidationsInProgress":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "TooManyOriginCustomHeaders":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "TooManyOrigins":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "TooManyStreamingDistributionCNAMEs":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "TooManyStreamingDistributions":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "TooManyTrustedSigners":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "TrustedSignerDoesNotExist":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "TrustedSigners":{ + "type":"structure", + "required":[ + "Enabled", + "Quantity" + ], + "members":{ + "Enabled":{"shape":"boolean"}, + "Quantity":{"shape":"integer"}, + "Items":{"shape":"AwsAccountNumberList"} + } + }, + "UpdateCloudFrontOriginAccessIdentityRequest":{ + "type":"structure", + "required":[ + "CloudFrontOriginAccessIdentityConfig", + "Id" + ], + "members":{ + "CloudFrontOriginAccessIdentityConfig":{ + "shape":"CloudFrontOriginAccessIdentityConfig", + "locationName":"CloudFrontOriginAccessIdentityConfig", + "xmlNamespace":{"uri":"http://cloudfront.amazonaws.com/doc/2016-01-13/"} + }, + "Id":{ + "shape":"string", + "location":"uri", + "locationName":"Id" + }, + "IfMatch":{ + "shape":"string", + "location":"header", + "locationName":"If-Match" + } + }, + "payload":"CloudFrontOriginAccessIdentityConfig" + }, + "UpdateCloudFrontOriginAccessIdentityResult":{ + "type":"structure", + "members":{ + "CloudFrontOriginAccessIdentity":{"shape":"CloudFrontOriginAccessIdentity"}, + "ETag":{ + "shape":"string", + "location":"header", + "locationName":"ETag" + } + }, + "payload":"CloudFrontOriginAccessIdentity" + }, + "UpdateDistributionRequest":{ + "type":"structure", + "required":[ + "DistributionConfig", + "Id" + ], + "members":{ + "DistributionConfig":{ + "shape":"DistributionConfig", + "locationName":"DistributionConfig", + "xmlNamespace":{"uri":"http://cloudfront.amazonaws.com/doc/2016-01-13/"} + }, + "Id":{ + "shape":"string", + "location":"uri", + "locationName":"Id" + }, + "IfMatch":{ + "shape":"string", + "location":"header", + "locationName":"If-Match" + } + }, + "payload":"DistributionConfig" + }, + "UpdateDistributionResult":{ + "type":"structure", + "members":{ + "Distribution":{"shape":"Distribution"}, + "ETag":{ + "shape":"string", + "location":"header", + "locationName":"ETag" + } + }, + "payload":"Distribution" + }, + "UpdateStreamingDistributionRequest":{ + "type":"structure", + "required":[ + "StreamingDistributionConfig", + "Id" + ], + "members":{ + "StreamingDistributionConfig":{ + "shape":"StreamingDistributionConfig", + "locationName":"StreamingDistributionConfig", + "xmlNamespace":{"uri":"http://cloudfront.amazonaws.com/doc/2016-01-13/"} + }, + "Id":{ + "shape":"string", + "location":"uri", + "locationName":"Id" + }, + "IfMatch":{ + "shape":"string", + "location":"header", + "locationName":"If-Match" + } + }, + "payload":"StreamingDistributionConfig" + }, + "UpdateStreamingDistributionResult":{ + "type":"structure", + "members":{ + "StreamingDistribution":{"shape":"StreamingDistribution"}, + "ETag":{ + "shape":"string", + "location":"header", + "locationName":"ETag" + } + }, + "payload":"StreamingDistribution" + }, + "ViewerCertificate":{ + "type":"structure", + "members":{ + "Certificate":{"shape":"string"}, + "CertificateSource":{"shape":"CertificateSource"}, + "SSLSupportMethod":{"shape":"SSLSupportMethod"}, + "MinimumProtocolVersion":{"shape":"MinimumProtocolVersion"}, + "IAMCertificateId":{ + "shape":"string", + "deprecated":true + }, + "CloudFrontDefaultCertificate":{ + "shape":"boolean", + "deprecated":true + } + } + }, + "ViewerProtocolPolicy":{ + "type":"string", + "enum":[ + "allow-all", + "https-only", + "redirect-to-https" + ] + }, + "boolean":{"type":"boolean"}, + "integer":{"type":"integer"}, + "long":{"type":"long"}, + "string":{"type":"string"}, + "timestamp":{"type":"timestamp"} + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/cloudfront/2016-01-13/docs-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/cloudfront/2016-01-13/docs-2.json new file mode 100644 index 000000000..a9e293427 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/cloudfront/2016-01-13/docs-2.json @@ -0,0 +1,1219 @@ +{ + "version": "2.0", + "service": null, + "operations": { + "CreateCloudFrontOriginAccessIdentity": "Create a new origin access identity.", + "CreateDistribution": "Create a new distribution.", + "CreateInvalidation": "Create a new invalidation.", + "CreateStreamingDistribution": "Create a new streaming distribution.", + "DeleteCloudFrontOriginAccessIdentity": "Delete an origin access identity.", + "DeleteDistribution": "Delete a distribution.", + "DeleteStreamingDistribution": "Delete a streaming distribution.", + "GetCloudFrontOriginAccessIdentity": "Get the information about an origin access identity.", + "GetCloudFrontOriginAccessIdentityConfig": "Get the configuration information about an origin access identity.", + "GetDistribution": "Get the information about a distribution.", + "GetDistributionConfig": "Get the configuration information about a distribution.", + "GetInvalidation": "Get the information about an invalidation.", + "GetStreamingDistribution": "Get the information about a streaming distribution.", + "GetStreamingDistributionConfig": "Get the configuration information about a streaming distribution.", + "ListCloudFrontOriginAccessIdentities": "List origin access identities.", + "ListDistributions": "List distributions.", + "ListDistributionsByWebACLId": "List the distributions that are associated with a specified AWS WAF web ACL.", + "ListInvalidations": "List invalidation batches.", + "ListStreamingDistributions": "List streaming distributions.", + "UpdateCloudFrontOriginAccessIdentity": "Update an origin access identity.", + "UpdateDistribution": "Update a distribution.", + "UpdateStreamingDistribution": "Update a streaming distribution." + }, + "shapes": { + "AccessDenied": { + "base": "Access denied.", + "refs": { + } + }, + "ActiveTrustedSigners": { + "base": "A complex type that lists the AWS accounts, if any, that you included in the TrustedSigners complex type for the default cache behavior or for any of the other cache behaviors for this distribution. These are accounts that you want to allow to create signed URLs for private content.", + "refs": { + "Distribution$ActiveTrustedSigners": "CloudFront automatically adds this element to the response only if you've set up the distribution to serve private content with signed URLs. The element lists the key pair IDs that CloudFront is aware of for each trusted signer. The Signer child element lists the AWS account number of the trusted signer (or an empty Self element if the signer is you). The Signer element also includes the IDs of any active key pairs associated with the trusted signer's AWS account. If no KeyPairId element appears for a Signer, that signer can't create working signed URLs.", + "StreamingDistribution$ActiveTrustedSigners": "CloudFront automatically adds this element to the response only if you've set up the distribution to serve private content with signed URLs. The element lists the key pair IDs that CloudFront is aware of for each trusted signer. The Signer child element lists the AWS account number of the trusted signer (or an empty Self element if the signer is you). The Signer element also includes the IDs of any active key pairs associated with the trusted signer's AWS account. If no KeyPairId element appears for a Signer, that signer can't create working signed URLs." + } + }, + "AliasList": { + "base": null, + "refs": { + "Aliases$Items": "Optional: A complex type that contains CNAME elements, if any, for this distribution. If Quantity is 0, you can omit Items." + } + }, + "Aliases": { + "base": "A complex type that contains information about CNAMEs (alternate domain names), if any, for this distribution.", + "refs": { + "DistributionConfig$Aliases": "A complex type that contains information about CNAMEs (alternate domain names), if any, for this distribution.", + "DistributionSummary$Aliases": "A complex type that contains information about CNAMEs (alternate domain names), if any, for this distribution.", + "StreamingDistributionConfig$Aliases": "A complex type that contains information about CNAMEs (alternate domain names), if any, for this streaming distribution.", + "StreamingDistributionSummary$Aliases": "A complex type that contains information about CNAMEs (alternate domain names), if any, for this streaming distribution." + } + }, + "AllowedMethods": { + "base": "A complex type that controls which HTTP methods CloudFront processes and forwards to your Amazon S3 bucket or your custom origin. There are three choices: - CloudFront forwards only GET and HEAD requests. - CloudFront forwards only GET, HEAD and OPTIONS requests. - CloudFront forwards GET, HEAD, OPTIONS, PUT, PATCH, POST, and DELETE requests. If you pick the third choice, you may need to restrict access to your Amazon S3 bucket or to your custom origin so users can't perform operations that you don't want them to. For example, you may not want users to have permission to delete objects from your origin.", + "refs": { + "CacheBehavior$AllowedMethods": null, + "DefaultCacheBehavior$AllowedMethods": null + } + }, + "AwsAccountNumberList": { + "base": null, + "refs": { + "TrustedSigners$Items": "Optional: A complex type that contains trusted signers for this cache behavior. If Quantity is 0, you can omit Items." + } + }, + "BatchTooLarge": { + "base": null, + "refs": { + } + }, + "CNAMEAlreadyExists": { + "base": null, + "refs": { + } + }, + "CacheBehavior": { + "base": "A complex type that describes how CloudFront processes requests. You can create up to 10 cache behaviors.You must create at least as many cache behaviors (including the default cache behavior) as you have origins if you want CloudFront to distribute objects from all of the origins. Each cache behavior specifies the one origin from which you want CloudFront to get objects. If you have two origins and only the default cache behavior, the default cache behavior will cause CloudFront to get objects from one of the origins, but the other origin will never be used. If you don't want to specify any cache behaviors, include only an empty CacheBehaviors element. Don't include an empty CacheBehavior element, or CloudFront returns a MalformedXML error. To delete all cache behaviors in an existing distribution, update the distribution configuration and include only an empty CacheBehaviors element. To add, change, or remove one or more cache behaviors, update the distribution configuration and specify all of the cache behaviors that you want to include in the updated distribution.", + "refs": { + "CacheBehaviorList$member": null + } + }, + "CacheBehaviorList": { + "base": null, + "refs": { + "CacheBehaviors$Items": "Optional: A complex type that contains cache behaviors for this distribution. If Quantity is 0, you can omit Items." + } + }, + "CacheBehaviors": { + "base": "A complex type that contains zero or more CacheBehavior elements.", + "refs": { + "DistributionConfig$CacheBehaviors": "A complex type that contains zero or more CacheBehavior elements.", + "DistributionSummary$CacheBehaviors": "A complex type that contains zero or more CacheBehavior elements." + } + }, + "CachedMethods": { + "base": "A complex type that controls whether CloudFront caches the response to requests using the specified HTTP methods. There are two choices: - CloudFront caches responses to GET and HEAD requests. - CloudFront caches responses to GET, HEAD, and OPTIONS requests. If you pick the second choice for your S3 Origin, you may need to forward Access-Control-Request-Method, Access-Control-Request-Headers and Origin headers for the responses to be cached correctly.", + "refs": { + "AllowedMethods$CachedMethods": null + } + }, + "CertificateSource": { + "base": null, + "refs": { + "ViewerCertificate$CertificateSource": "If you want viewers to use HTTPS to request your objects and you're using the CloudFront domain name of your distribution in your object URLs (for example, https://d111111abcdef8.cloudfront.net/logo.jpg), set to \"cloudfront\". If you want viewers to use HTTPS to request your objects and you're using an alternate domain name in your object URLs (for example, https://example.com/logo.jpg), you can use your own IAM or ACM certificate. To use an ACM certificate, set to \"acm\" and update the Certificate to the ACM certificate ARN. To use an IAM certificate, set to \"iam\" and update the Certificate to the IAM certificate identifier." + } + }, + "CloudFrontOriginAccessIdentity": { + "base": "CloudFront origin access identity.", + "refs": { + "CreateCloudFrontOriginAccessIdentityResult$CloudFrontOriginAccessIdentity": "The origin access identity's information.", + "GetCloudFrontOriginAccessIdentityResult$CloudFrontOriginAccessIdentity": "The origin access identity's information.", + "UpdateCloudFrontOriginAccessIdentityResult$CloudFrontOriginAccessIdentity": "The origin access identity's information." + } + }, + "CloudFrontOriginAccessIdentityAlreadyExists": { + "base": "If the CallerReference is a value you already sent in a previous request to create an identity but the content of the CloudFrontOriginAccessIdentityConfig is different from the original request, CloudFront returns a CloudFrontOriginAccessIdentityAlreadyExists error.", + "refs": { + } + }, + "CloudFrontOriginAccessIdentityConfig": { + "base": "Origin access identity configuration.", + "refs": { + "CloudFrontOriginAccessIdentity$CloudFrontOriginAccessIdentityConfig": "The current configuration information for the identity.", + "CreateCloudFrontOriginAccessIdentityRequest$CloudFrontOriginAccessIdentityConfig": "The origin access identity's configuration information.", + "GetCloudFrontOriginAccessIdentityConfigResult$CloudFrontOriginAccessIdentityConfig": "The origin access identity's configuration information.", + "UpdateCloudFrontOriginAccessIdentityRequest$CloudFrontOriginAccessIdentityConfig": "The identity's configuration information." + } + }, + "CloudFrontOriginAccessIdentityInUse": { + "base": null, + "refs": { + } + }, + "CloudFrontOriginAccessIdentityList": { + "base": "The CloudFrontOriginAccessIdentityList type.", + "refs": { + "ListCloudFrontOriginAccessIdentitiesResult$CloudFrontOriginAccessIdentityList": "The CloudFrontOriginAccessIdentityList type." + } + }, + "CloudFrontOriginAccessIdentitySummary": { + "base": "Summary of the information about a CloudFront origin access identity.", + "refs": { + "CloudFrontOriginAccessIdentitySummaryList$member": null + } + }, + "CloudFrontOriginAccessIdentitySummaryList": { + "base": null, + "refs": { + "CloudFrontOriginAccessIdentityList$Items": "A complex type that contains one CloudFrontOriginAccessIdentitySummary element for each origin access identity that was created by the current AWS account." + } + }, + "CookieNameList": { + "base": null, + "refs": { + "CookieNames$Items": "Optional: A complex type that contains whitelisted cookies for this cache behavior. If Quantity is 0, you can omit Items." + } + }, + "CookieNames": { + "base": "A complex type that specifies the whitelisted cookies, if any, that you want CloudFront to forward to your origin that is associated with this cache behavior.", + "refs": { + "CookiePreference$WhitelistedNames": "A complex type that specifies the whitelisted cookies, if any, that you want CloudFront to forward to your origin that is associated with this cache behavior." + } + }, + "CookiePreference": { + "base": "A complex type that specifies the cookie preferences associated with this cache behavior.", + "refs": { + "ForwardedValues$Cookies": "A complex type that specifies how CloudFront handles cookies." + } + }, + "CreateCloudFrontOriginAccessIdentityRequest": { + "base": "The request to create a new origin access identity.", + "refs": { + } + }, + "CreateCloudFrontOriginAccessIdentityResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "CreateDistributionRequest": { + "base": "The request to create a new distribution.", + "refs": { + } + }, + "CreateDistributionResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "CreateInvalidationRequest": { + "base": "The request to create an invalidation.", + "refs": { + } + }, + "CreateInvalidationResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "CreateStreamingDistributionRequest": { + "base": "The request to create a new streaming distribution.", + "refs": { + } + }, + "CreateStreamingDistributionResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "CustomErrorResponse": { + "base": "A complex type that describes how you'd prefer CloudFront to respond to requests that result in either a 4xx or 5xx response. You can control whether a custom error page should be displayed, what the desired response code should be for this error page and how long should the error response be cached by CloudFront. If you don't want to specify any custom error responses, include only an empty CustomErrorResponses element. To delete all custom error responses in an existing distribution, update the distribution configuration and include only an empty CustomErrorResponses element. To add, change, or remove one or more custom error responses, update the distribution configuration and specify all of the custom error responses that you want to include in the updated distribution.", + "refs": { + "CustomErrorResponseList$member": null + } + }, + "CustomErrorResponseList": { + "base": null, + "refs": { + "CustomErrorResponses$Items": "Optional: A complex type that contains custom error responses for this distribution. If Quantity is 0, you can omit Items." + } + }, + "CustomErrorResponses": { + "base": "A complex type that contains zero or more CustomErrorResponse elements.", + "refs": { + "DistributionConfig$CustomErrorResponses": "A complex type that contains zero or more CustomErrorResponse elements.", + "DistributionSummary$CustomErrorResponses": "A complex type that contains zero or more CustomErrorResponses elements." + } + }, + "CustomHeaders": { + "base": "A complex type that contains the list of Custom Headers for each origin.", + "refs": { + "Origin$CustomHeaders": "A complex type that contains information about the custom headers associated with this Origin." + } + }, + "CustomOriginConfig": { + "base": "A customer origin.", + "refs": { + "Origin$CustomOriginConfig": "A complex type that contains information about a custom origin. If the origin is an Amazon S3 bucket, use the S3OriginConfig element instead." + } + }, + "DefaultCacheBehavior": { + "base": "A complex type that describes the default cache behavior if you do not specify a CacheBehavior element or if files don't match any of the values of PathPattern in CacheBehavior elements.You must create exactly one default cache behavior.", + "refs": { + "DistributionConfig$DefaultCacheBehavior": "A complex type that describes the default cache behavior if you do not specify a CacheBehavior element or if files don't match any of the values of PathPattern in CacheBehavior elements.You must create exactly one default cache behavior.", + "DistributionSummary$DefaultCacheBehavior": "A complex type that describes the default cache behavior if you do not specify a CacheBehavior element or if files don't match any of the values of PathPattern in CacheBehavior elements.You must create exactly one default cache behavior." + } + }, + "DeleteCloudFrontOriginAccessIdentityRequest": { + "base": "The request to delete a origin access identity.", + "refs": { + } + }, + "DeleteDistributionRequest": { + "base": "The request to delete a distribution.", + "refs": { + } + }, + "DeleteStreamingDistributionRequest": { + "base": "The request to delete a streaming distribution.", + "refs": { + } + }, + "Distribution": { + "base": "A distribution.", + "refs": { + "CreateDistributionResult$Distribution": "The distribution's information.", + "GetDistributionResult$Distribution": "The distribution's information.", + "UpdateDistributionResult$Distribution": "The distribution's information." + } + }, + "DistributionAlreadyExists": { + "base": "The caller reference you attempted to create the distribution with is associated with another distribution.", + "refs": { + } + }, + "DistributionConfig": { + "base": "A distribution Configuration.", + "refs": { + "CreateDistributionRequest$DistributionConfig": "The distribution's configuration information.", + "Distribution$DistributionConfig": "The current configuration information for the distribution.", + "GetDistributionConfigResult$DistributionConfig": "The distribution's configuration information.", + "UpdateDistributionRequest$DistributionConfig": "The distribution's configuration information." + } + }, + "DistributionList": { + "base": "A distribution list.", + "refs": { + "ListDistributionsByWebACLIdResult$DistributionList": "The DistributionList type.", + "ListDistributionsResult$DistributionList": "The DistributionList type." + } + }, + "DistributionNotDisabled": { + "base": null, + "refs": { + } + }, + "DistributionSummary": { + "base": "A summary of the information for an Amazon CloudFront distribution.", + "refs": { + "DistributionSummaryList$member": null + } + }, + "DistributionSummaryList": { + "base": null, + "refs": { + "DistributionList$Items": "A complex type that contains one DistributionSummary element for each distribution that was created by the current AWS account." + } + }, + "ForwardedValues": { + "base": "A complex type that specifies how CloudFront handles query strings, cookies and headers.", + "refs": { + "CacheBehavior$ForwardedValues": "A complex type that specifies how CloudFront handles query strings, cookies and headers.", + "DefaultCacheBehavior$ForwardedValues": "A complex type that specifies how CloudFront handles query strings, cookies and headers." + } + }, + "GeoRestriction": { + "base": "A complex type that controls the countries in which your content is distributed. For more information about geo restriction, go to Customizing Error Responses in the Amazon CloudFront Developer Guide. CloudFront determines the location of your users using MaxMind GeoIP databases. For information about the accuracy of these databases, see How accurate are your GeoIP databases? on the MaxMind website.", + "refs": { + "Restrictions$GeoRestriction": null + } + }, + "GeoRestrictionType": { + "base": null, + "refs": { + "GeoRestriction$RestrictionType": "The method that you want to use to restrict distribution of your content by country: - none: No geo restriction is enabled, meaning access to content is not restricted by client geo location. - blacklist: The Location elements specify the countries in which you do not want CloudFront to distribute your content. - whitelist: The Location elements specify the countries in which you want CloudFront to distribute your content." + } + }, + "GetCloudFrontOriginAccessIdentityConfigRequest": { + "base": "The request to get an origin access identity's configuration.", + "refs": { + } + }, + "GetCloudFrontOriginAccessIdentityConfigResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "GetCloudFrontOriginAccessIdentityRequest": { + "base": "The request to get an origin access identity's information.", + "refs": { + } + }, + "GetCloudFrontOriginAccessIdentityResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "GetDistributionConfigRequest": { + "base": "The request to get a distribution configuration.", + "refs": { + } + }, + "GetDistributionConfigResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "GetDistributionRequest": { + "base": "The request to get a distribution's information.", + "refs": { + } + }, + "GetDistributionResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "GetInvalidationRequest": { + "base": "The request to get an invalidation's information.", + "refs": { + } + }, + "GetInvalidationResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "GetStreamingDistributionConfigRequest": { + "base": "To request to get a streaming distribution configuration.", + "refs": { + } + }, + "GetStreamingDistributionConfigResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "GetStreamingDistributionRequest": { + "base": "The request to get a streaming distribution's information.", + "refs": { + } + }, + "GetStreamingDistributionResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "HeaderList": { + "base": null, + "refs": { + "Headers$Items": "Optional: A complex type that contains a Name element for each header that you want CloudFront to forward to the origin and to vary on for this cache behavior. If Quantity is 0, omit Items." + } + }, + "Headers": { + "base": "A complex type that specifies the headers that you want CloudFront to forward to the origin for this cache behavior. For the headers that you specify, CloudFront also caches separate versions of a given object based on the header values in viewer requests; this is known as varying on headers. For example, suppose viewer requests for logo.jpg contain a custom Product header that has a value of either Acme or Apex, and you configure CloudFront to vary on the Product header. CloudFront forwards the Product header to the origin and caches the response from the origin once for each header value.", + "refs": { + "ForwardedValues$Headers": "A complex type that specifies the Headers, if any, that you want CloudFront to vary upon for this cache behavior." + } + }, + "IllegalUpdate": { + "base": "Origin and CallerReference cannot be updated.", + "refs": { + } + }, + "InconsistentQuantities": { + "base": "The value of Quantity and the size of Items do not match.", + "refs": { + } + }, + "InvalidArgument": { + "base": "The argument is invalid.", + "refs": { + } + }, + "InvalidDefaultRootObject": { + "base": "The default root object file name is too big or contains an invalid character.", + "refs": { + } + }, + "InvalidErrorCode": { + "base": null, + "refs": { + } + }, + "InvalidForwardCookies": { + "base": "Your request contains forward cookies option which doesn't match with the expectation for the whitelisted list of cookie names. Either list of cookie names has been specified when not allowed or list of cookie names is missing when expected.", + "refs": { + } + }, + "InvalidGeoRestrictionParameter": { + "base": null, + "refs": { + } + }, + "InvalidHeadersForS3Origin": { + "base": null, + "refs": { + } + }, + "InvalidIfMatchVersion": { + "base": "The If-Match version is missing or not valid for the distribution.", + "refs": { + } + }, + "InvalidLocationCode": { + "base": null, + "refs": { + } + }, + "InvalidMinimumProtocolVersion": { + "base": null, + "refs": { + } + }, + "InvalidOrigin": { + "base": "The Amazon S3 origin server specified does not refer to a valid Amazon S3 bucket.", + "refs": { + } + }, + "InvalidOriginAccessIdentity": { + "base": "The origin access identity is not valid or doesn't exist.", + "refs": { + } + }, + "InvalidProtocolSettings": { + "base": "You cannot specify SSLv3 as the minimum protocol version if you only want to support only clients that Support Server Name Indication (SNI).", + "refs": { + } + }, + "InvalidRelativePath": { + "base": "The relative path is too big, is not URL-encoded, or does not begin with a slash (/).", + "refs": { + } + }, + "InvalidRequiredProtocol": { + "base": "This operation requires the HTTPS protocol. Ensure that you specify the HTTPS protocol in your request, or omit the RequiredProtocols element from your distribution configuration.", + "refs": { + } + }, + "InvalidResponseCode": { + "base": null, + "refs": { + } + }, + "InvalidTTLOrder": { + "base": null, + "refs": { + } + }, + "InvalidViewerCertificate": { + "base": null, + "refs": { + } + }, + "InvalidWebACLId": { + "base": null, + "refs": { + } + }, + "Invalidation": { + "base": "An invalidation.", + "refs": { + "CreateInvalidationResult$Invalidation": "The invalidation's information.", + "GetInvalidationResult$Invalidation": "The invalidation's information." + } + }, + "InvalidationBatch": { + "base": "An invalidation batch.", + "refs": { + "CreateInvalidationRequest$InvalidationBatch": "The batch information for the invalidation.", + "Invalidation$InvalidationBatch": "The current invalidation information for the batch request." + } + }, + "InvalidationList": { + "base": "An invalidation list.", + "refs": { + "ListInvalidationsResult$InvalidationList": "Information about invalidation batches." + } + }, + "InvalidationSummary": { + "base": "Summary of an invalidation request.", + "refs": { + "InvalidationSummaryList$member": null + } + }, + "InvalidationSummaryList": { + "base": null, + "refs": { + "InvalidationList$Items": "A complex type that contains one InvalidationSummary element for each invalidation batch that was created by the current AWS account." + } + }, + "ItemSelection": { + "base": null, + "refs": { + "CookiePreference$Forward": "Use this element to specify whether you want CloudFront to forward cookies to the origin that is associated with this cache behavior. You can specify all, none or whitelist. If you choose All, CloudFront forwards all cookies regardless of how many your application uses." + } + }, + "KeyPairIdList": { + "base": null, + "refs": { + "KeyPairIds$Items": "A complex type that lists the active CloudFront key pairs, if any, that are associated with AwsAccountNumber." + } + }, + "KeyPairIds": { + "base": "A complex type that lists the active CloudFront key pairs, if any, that are associated with AwsAccountNumber.", + "refs": { + "Signer$KeyPairIds": "A complex type that lists the active CloudFront key pairs, if any, that are associated with AwsAccountNumber." + } + }, + "ListCloudFrontOriginAccessIdentitiesRequest": { + "base": "The request to list origin access identities.", + "refs": { + } + }, + "ListCloudFrontOriginAccessIdentitiesResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "ListDistributionsByWebACLIdRequest": { + "base": "The request to list distributions that are associated with a specified AWS WAF web ACL.", + "refs": { + } + }, + "ListDistributionsByWebACLIdResult": { + "base": "The response to a request to list the distributions that are associated with a specified AWS WAF web ACL.", + "refs": { + } + }, + "ListDistributionsRequest": { + "base": "The request to list your distributions.", + "refs": { + } + }, + "ListDistributionsResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "ListInvalidationsRequest": { + "base": "The request to list invalidations.", + "refs": { + } + }, + "ListInvalidationsResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "ListStreamingDistributionsRequest": { + "base": "The request to list your streaming distributions.", + "refs": { + } + }, + "ListStreamingDistributionsResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "LocationList": { + "base": null, + "refs": { + "GeoRestriction$Items": "A complex type that contains a Location element for each country in which you want CloudFront either to distribute your content (whitelist) or not distribute your content (blacklist). The Location element is a two-letter, uppercase country code for a country that you want to include in your blacklist or whitelist. Include one Location element for each country. CloudFront and MaxMind both use ISO 3166 country codes. For the current list of countries and the corresponding codes, see ISO 3166-1-alpha-2 code on the International Organization for Standardization website. You can also refer to the country list in the CloudFront console, which includes both country names and codes." + } + }, + "LoggingConfig": { + "base": "A complex type that controls whether access logs are written for the distribution.", + "refs": { + "DistributionConfig$Logging": "A complex type that controls whether access logs are written for the distribution." + } + }, + "Method": { + "base": null, + "refs": { + "MethodsList$member": null + } + }, + "MethodsList": { + "base": null, + "refs": { + "AllowedMethods$Items": "A complex type that contains the HTTP methods that you want CloudFront to process and forward to your origin.", + "CachedMethods$Items": "A complex type that contains the HTTP methods that you want CloudFront to cache responses to." + } + }, + "MinimumProtocolVersion": { + "base": null, + "refs": { + "ViewerCertificate$MinimumProtocolVersion": "Specify the minimum version of the SSL protocol that you want CloudFront to use, SSLv3 or TLSv1, for HTTPS connections. CloudFront will serve your objects only to browsers or devices that support at least the SSL version that you specify. The TLSv1 protocol is more secure, so we recommend that you specify SSLv3 only if your users are using browsers or devices that don't support TLSv1. If you're using a custom certificate (if you specify a value for IAMCertificateId) and if you're using dedicated IP (if you specify vip for SSLSupportMethod), you can choose SSLv3 or TLSv1 as the MinimumProtocolVersion. If you're using a custom certificate (if you specify a value for IAMCertificateId) and if you're using SNI (if you specify sni-only for SSLSupportMethod), you must specify TLSv1 for MinimumProtocolVersion." + } + }, + "MissingBody": { + "base": "This operation requires a body. Ensure that the body is present and the Content-Type header is set.", + "refs": { + } + }, + "NoSuchCloudFrontOriginAccessIdentity": { + "base": "The specified origin access identity does not exist.", + "refs": { + } + }, + "NoSuchDistribution": { + "base": "The specified distribution does not exist.", + "refs": { + } + }, + "NoSuchInvalidation": { + "base": "The specified invalidation does not exist.", + "refs": { + } + }, + "NoSuchOrigin": { + "base": "No origin exists with the specified Origin Id.", + "refs": { + } + }, + "NoSuchStreamingDistribution": { + "base": "The specified streaming distribution does not exist.", + "refs": { + } + }, + "Origin": { + "base": "A complex type that describes the Amazon S3 bucket or the HTTP server (for example, a web server) from which CloudFront gets your files.You must create at least one origin.", + "refs": { + "OriginList$member": null + } + }, + "OriginCustomHeader": { + "base": "A complex type that contains information related to a Header", + "refs": { + "OriginCustomHeadersList$member": null + } + }, + "OriginCustomHeadersList": { + "base": null, + "refs": { + "CustomHeaders$Items": "A complex type that contains the custom headers for this Origin." + } + }, + "OriginList": { + "base": null, + "refs": { + "Origins$Items": "A complex type that contains origins for this distribution." + } + }, + "OriginProtocolPolicy": { + "base": null, + "refs": { + "CustomOriginConfig$OriginProtocolPolicy": "The origin protocol policy to apply to your origin." + } + }, + "OriginSslProtocols": { + "base": "A complex type that contains the list of SSL/TLS protocols that you want CloudFront to use when communicating with your origin over HTTPS.", + "refs": { + "CustomOriginConfig$OriginSslProtocols": "The SSL/TLS protocols that you want CloudFront to use when communicating with your origin over HTTPS." + } + }, + "Origins": { + "base": "A complex type that contains information about origins for this distribution.", + "refs": { + "DistributionConfig$Origins": "A complex type that contains information about origins for this distribution.", + "DistributionSummary$Origins": "A complex type that contains information about origins for this distribution." + } + }, + "PathList": { + "base": null, + "refs": { + "Paths$Items": "A complex type that contains a list of the objects that you want to invalidate." + } + }, + "Paths": { + "base": "A complex type that contains information about the objects that you want to invalidate.", + "refs": { + "InvalidationBatch$Paths": "The path of the object to invalidate. The path is relative to the distribution and must begin with a slash (/). You must enclose each invalidation object with the Path element tags. If the path includes non-ASCII characters or unsafe characters as defined in RFC 1783 (http://www.ietf.org/rfc/rfc1738.txt), URL encode those characters. Do not URL encode any other characters in the path, or CloudFront will not invalidate the old version of the updated object." + } + }, + "PreconditionFailed": { + "base": "The precondition given in one or more of the request-header fields evaluated to false.", + "refs": { + } + }, + "PriceClass": { + "base": null, + "refs": { + "DistributionConfig$PriceClass": "A complex type that contains information about price class for this distribution.", + "DistributionSummary$PriceClass": null, + "StreamingDistributionConfig$PriceClass": "A complex type that contains information about price class for this streaming distribution.", + "StreamingDistributionSummary$PriceClass": null + } + }, + "Restrictions": { + "base": "A complex type that identifies ways in which you want to restrict distribution of your content.", + "refs": { + "DistributionConfig$Restrictions": null, + "DistributionSummary$Restrictions": null + } + }, + "S3Origin": { + "base": "A complex type that contains information about the Amazon S3 bucket from which you want CloudFront to get your media files for distribution.", + "refs": { + "StreamingDistributionConfig$S3Origin": "A complex type that contains information about the Amazon S3 bucket from which you want CloudFront to get your media files for distribution.", + "StreamingDistributionSummary$S3Origin": "A complex type that contains information about the Amazon S3 bucket from which you want CloudFront to get your media files for distribution." + } + }, + "S3OriginConfig": { + "base": "A complex type that contains information about the Amazon S3 origin. If the origin is a custom origin, use the CustomOriginConfig element instead.", + "refs": { + "Origin$S3OriginConfig": "A complex type that contains information about the Amazon S3 origin. If the origin is a custom origin, use the CustomOriginConfig element instead." + } + }, + "SSLSupportMethod": { + "base": null, + "refs": { + "ViewerCertificate$SSLSupportMethod": "If you specify a value for IAMCertificateId, you must also specify how you want CloudFront to serve HTTPS requests. Valid values are vip and sni-only. If you specify vip, CloudFront uses dedicated IP addresses for your content and can respond to HTTPS requests from any viewer. However, you must request permission to use this feature, and you incur additional monthly charges. If you specify sni-only, CloudFront can only respond to HTTPS requests from viewers that support Server Name Indication (SNI). All modern browsers support SNI, but some browsers still in use don't support SNI. Do not specify a value for SSLSupportMethod if you specified true for CloudFrontDefaultCertificate." + } + }, + "Signer": { + "base": "A complex type that lists the AWS accounts that were included in the TrustedSigners complex type, as well as their active CloudFront key pair IDs, if any.", + "refs": { + "SignerList$member": null + } + }, + "SignerList": { + "base": null, + "refs": { + "ActiveTrustedSigners$Items": "A complex type that contains one Signer complex type for each unique trusted signer that is specified in the TrustedSigners complex type, including trusted signers in the default cache behavior and in all of the other cache behaviors." + } + }, + "SslProtocol": { + "base": null, + "refs": { + "SslProtocolsList$member": null + } + }, + "SslProtocolsList": { + "base": null, + "refs": { + "OriginSslProtocols$Items": "A complex type that contains one SslProtocol element for each SSL/TLS protocol that you want to allow CloudFront to use when establishing an HTTPS connection with this origin." + } + }, + "StreamingDistribution": { + "base": "A streaming distribution.", + "refs": { + "CreateStreamingDistributionResult$StreamingDistribution": "The streaming distribution's information.", + "GetStreamingDistributionResult$StreamingDistribution": "The streaming distribution's information.", + "UpdateStreamingDistributionResult$StreamingDistribution": "The streaming distribution's information." + } + }, + "StreamingDistributionAlreadyExists": { + "base": null, + "refs": { + } + }, + "StreamingDistributionConfig": { + "base": "The configuration for the streaming distribution.", + "refs": { + "CreateStreamingDistributionRequest$StreamingDistributionConfig": "The streaming distribution's configuration information.", + "GetStreamingDistributionConfigResult$StreamingDistributionConfig": "The streaming distribution's configuration information.", + "StreamingDistribution$StreamingDistributionConfig": "The current configuration information for the streaming distribution.", + "UpdateStreamingDistributionRequest$StreamingDistributionConfig": "The streaming distribution's configuration information." + } + }, + "StreamingDistributionList": { + "base": "A streaming distribution list.", + "refs": { + "ListStreamingDistributionsResult$StreamingDistributionList": "The StreamingDistributionList type." + } + }, + "StreamingDistributionNotDisabled": { + "base": null, + "refs": { + } + }, + "StreamingDistributionSummary": { + "base": "A summary of the information for an Amazon CloudFront streaming distribution.", + "refs": { + "StreamingDistributionSummaryList$member": null + } + }, + "StreamingDistributionSummaryList": { + "base": null, + "refs": { + "StreamingDistributionList$Items": "A complex type that contains one StreamingDistributionSummary element for each distribution that was created by the current AWS account." + } + }, + "StreamingLoggingConfig": { + "base": "A complex type that controls whether access logs are written for this streaming distribution.", + "refs": { + "StreamingDistributionConfig$Logging": "A complex type that controls whether access logs are written for the streaming distribution." + } + }, + "TooManyCacheBehaviors": { + "base": "You cannot create anymore cache behaviors for the distribution.", + "refs": { + } + }, + "TooManyCertificates": { + "base": "You cannot create anymore custom ssl certificates.", + "refs": { + } + }, + "TooManyCloudFrontOriginAccessIdentities": { + "base": "Processing your request would cause you to exceed the maximum number of origin access identities allowed.", + "refs": { + } + }, + "TooManyCookieNamesInWhiteList": { + "base": "Your request contains more cookie names in the whitelist than are allowed per cache behavior.", + "refs": { + } + }, + "TooManyDistributionCNAMEs": { + "base": "Your request contains more CNAMEs than are allowed per distribution.", + "refs": { + } + }, + "TooManyDistributions": { + "base": "Processing your request would cause you to exceed the maximum number of distributions allowed.", + "refs": { + } + }, + "TooManyHeadersInForwardedValues": { + "base": null, + "refs": { + } + }, + "TooManyInvalidationsInProgress": { + "base": "You have exceeded the maximum number of allowable InProgress invalidation batch requests, or invalidation objects.", + "refs": { + } + }, + "TooManyOriginCustomHeaders": { + "base": null, + "refs": { + } + }, + "TooManyOrigins": { + "base": "You cannot create anymore origins for the distribution.", + "refs": { + } + }, + "TooManyStreamingDistributionCNAMEs": { + "base": null, + "refs": { + } + }, + "TooManyStreamingDistributions": { + "base": "Processing your request would cause you to exceed the maximum number of streaming distributions allowed.", + "refs": { + } + }, + "TooManyTrustedSigners": { + "base": "Your request contains more trusted signers than are allowed per distribution.", + "refs": { + } + }, + "TrustedSignerDoesNotExist": { + "base": "One or more of your trusted signers do not exist.", + "refs": { + } + }, + "TrustedSigners": { + "base": "A complex type that specifies the AWS accounts, if any, that you want to allow to create signed URLs for private content. If you want to require signed URLs in requests for objects in the target origin that match the PathPattern for this cache behavior, specify true for Enabled, and specify the applicable values for Quantity and Items. For more information, go to Using a Signed URL to Serve Private Content in the Amazon CloudFront Developer Guide. If you don't want to require signed URLs in requests for objects that match PathPattern, specify false for Enabled and 0 for Quantity. Omit Items. To add, change, or remove one or more trusted signers, change Enabled to true (if it's currently false), change Quantity as applicable, and specify all of the trusted signers that you want to include in the updated distribution.", + "refs": { + "CacheBehavior$TrustedSigners": "A complex type that specifies the AWS accounts, if any, that you want to allow to create signed URLs for private content. If you want to require signed URLs in requests for objects in the target origin that match the PathPattern for this cache behavior, specify true for Enabled, and specify the applicable values for Quantity and Items. For more information, go to Using a Signed URL to Serve Private Content in the Amazon CloudFront Developer Guide. If you don't want to require signed URLs in requests for objects that match PathPattern, specify false for Enabled and 0 for Quantity. Omit Items. To add, change, or remove one or more trusted signers, change Enabled to true (if it's currently false), change Quantity as applicable, and specify all of the trusted signers that you want to include in the updated distribution.", + "DefaultCacheBehavior$TrustedSigners": "A complex type that specifies the AWS accounts, if any, that you want to allow to create signed URLs for private content. If you want to require signed URLs in requests for objects in the target origin that match the PathPattern for this cache behavior, specify true for Enabled, and specify the applicable values for Quantity and Items. For more information, go to Using a Signed URL to Serve Private Content in the Amazon CloudFront Developer Guide. If you don't want to require signed URLs in requests for objects that match PathPattern, specify false for Enabled and 0 for Quantity. Omit Items. To add, change, or remove one or more trusted signers, change Enabled to true (if it's currently false), change Quantity as applicable, and specify all of the trusted signers that you want to include in the updated distribution.", + "StreamingDistributionConfig$TrustedSigners": "A complex type that specifies the AWS accounts, if any, that you want to allow to create signed URLs for private content. If you want to require signed URLs in requests for objects in the target origin that match the PathPattern for this cache behavior, specify true for Enabled, and specify the applicable values for Quantity and Items. For more information, go to Using a Signed URL to Serve Private Content in the Amazon CloudFront Developer Guide. If you don't want to require signed URLs in requests for objects that match PathPattern, specify false for Enabled and 0 for Quantity. Omit Items. To add, change, or remove one or more trusted signers, change Enabled to true (if it's currently false), change Quantity as applicable, and specify all of the trusted signers that you want to include in the updated distribution.", + "StreamingDistributionSummary$TrustedSigners": "A complex type that specifies the AWS accounts, if any, that you want to allow to create signed URLs for private content. If you want to require signed URLs in requests for objects in the target origin that match the PathPattern for this cache behavior, specify true for Enabled, and specify the applicable values for Quantity and Items. For more information, go to Using a Signed URL to Serve Private Content in the Amazon CloudFront Developer Guide. If you don't want to require signed URLs in requests for objects that match PathPattern, specify false for Enabled and 0 for Quantity. Omit Items. To add, change, or remove one or more trusted signers, change Enabled to true (if it's currently false), change Quantity as applicable, and specify all of the trusted signers that you want to include in the updated distribution." + } + }, + "UpdateCloudFrontOriginAccessIdentityRequest": { + "base": "The request to update an origin access identity.", + "refs": { + } + }, + "UpdateCloudFrontOriginAccessIdentityResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "UpdateDistributionRequest": { + "base": "The request to update a distribution.", + "refs": { + } + }, + "UpdateDistributionResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "UpdateStreamingDistributionRequest": { + "base": "The request to update a streaming distribution.", + "refs": { + } + }, + "UpdateStreamingDistributionResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "ViewerCertificate": { + "base": "A complex type that contains information about viewer certificates for this distribution.", + "refs": { + "DistributionConfig$ViewerCertificate": null, + "DistributionSummary$ViewerCertificate": null + } + }, + "ViewerProtocolPolicy": { + "base": null, + "refs": { + "CacheBehavior$ViewerProtocolPolicy": "Use this element to specify the protocol that users can use to access the files in the origin specified by TargetOriginId when a request matches the path pattern in PathPattern. If you want CloudFront to allow end users to use any available protocol, specify allow-all. If you want CloudFront to require HTTPS, specify https. If you want CloudFront to respond to an HTTP request with an HTTP status code of 301 (Moved Permanently) and the HTTPS URL, specify redirect-to-https. The viewer then resubmits the request using the HTTPS URL.", + "DefaultCacheBehavior$ViewerProtocolPolicy": "Use this element to specify the protocol that users can use to access the files in the origin specified by TargetOriginId when a request matches the path pattern in PathPattern. If you want CloudFront to allow end users to use any available protocol, specify allow-all. If you want CloudFront to require HTTPS, specify https. If you want CloudFront to respond to an HTTP request with an HTTP status code of 301 (Moved Permanently) and the HTTPS URL, specify redirect-to-https. The viewer then resubmits the request using the HTTPS URL." + } + }, + "boolean": { + "base": null, + "refs": { + "ActiveTrustedSigners$Enabled": "Each active trusted signer.", + "CacheBehavior$SmoothStreaming": "Indicates whether you want to distribute media files in Microsoft Smooth Streaming format using the origin that is associated with this cache behavior. If so, specify true; if not, specify false.", + "CacheBehavior$Compress": "Whether you want CloudFront to automatically compress content for web requests that include Accept-Encoding: gzip in the request header. If so, specify true; if not, specify false. CloudFront compresses files larger than 1000 bytes and less than 1 megabyte for both Amazon S3 and custom origins. When a CloudFront edge location is unusually busy, some files might not be compressed. The value of the Content-Type header must be on the list of file types that CloudFront will compress. For the current list, see Serving Compressed Content in the Amazon CloudFront Developer Guide. If you configure CloudFront to compress content, CloudFront removes the ETag response header from the objects that it compresses. The ETag header indicates that the version in a CloudFront edge cache is identical to the version on the origin server, but after compression the two versions are no longer identical. As a result, for compressed objects, CloudFront can't use the ETag header to determine whether an expired object in the CloudFront edge cache is still the latest version.", + "CloudFrontOriginAccessIdentityList$IsTruncated": "A flag that indicates whether more origin access identities remain to be listed. If your results were truncated, you can make a follow-up pagination request using the Marker request parameter to retrieve more items in the list.", + "DefaultCacheBehavior$SmoothStreaming": "Indicates whether you want to distribute media files in Microsoft Smooth Streaming format using the origin that is associated with this cache behavior. If so, specify true; if not, specify false.", + "DefaultCacheBehavior$Compress": "Whether you want CloudFront to automatically compress content for web requests that include Accept-Encoding: gzip in the request header. If so, specify true; if not, specify false. CloudFront compresses files larger than 1000 bytes and less than 1 megabyte for both Amazon S3 and custom origins. When a CloudFront edge location is unusually busy, some files might not be compressed. The value of the Content-Type header must be on the list of file types that CloudFront will compress. For the current list, see Serving Compressed Content in the Amazon CloudFront Developer Guide. If you configure CloudFront to compress content, CloudFront removes the ETag response header from the objects that it compresses. The ETag header indicates that the version in a CloudFront edge cache is identical to the version on the origin server, but after compression the two versions are no longer identical. As a result, for compressed objects, CloudFront can't use the ETag header to determine whether an expired object in the CloudFront edge cache is still the latest version.", + "DistributionConfig$Enabled": "Whether the distribution is enabled to accept end user requests for content.", + "DistributionList$IsTruncated": "A flag that indicates whether more distributions remain to be listed. If your results were truncated, you can make a follow-up pagination request using the Marker request parameter to retrieve more distributions in the list.", + "DistributionSummary$Enabled": "Whether the distribution is enabled to accept end user requests for content.", + "ForwardedValues$QueryString": "Indicates whether you want CloudFront to forward query strings to the origin that is associated with this cache behavior. If so, specify true; if not, specify false.", + "InvalidationList$IsTruncated": "A flag that indicates whether more invalidation batch requests remain to be listed. If your results were truncated, you can make a follow-up pagination request using the Marker request parameter to retrieve more invalidation batches in the list.", + "LoggingConfig$Enabled": "Specifies whether you want CloudFront to save access logs to an Amazon S3 bucket. If you do not want to enable logging when you create a distribution or if you want to disable logging for an existing distribution, specify false for Enabled, and specify empty Bucket and Prefix elements. If you specify false for Enabled but you specify values for Bucket, prefix and IncludeCookies, the values are automatically deleted.", + "LoggingConfig$IncludeCookies": "Specifies whether you want CloudFront to include cookies in access logs, specify true for IncludeCookies. If you choose to include cookies in logs, CloudFront logs all cookies regardless of how you configure the cache behaviors for this distribution. If you do not want to include cookies when you create a distribution or if you want to disable include cookies for an existing distribution, specify false for IncludeCookies.", + "StreamingDistributionConfig$Enabled": "Whether the streaming distribution is enabled to accept end user requests for content.", + "StreamingDistributionList$IsTruncated": "A flag that indicates whether more streaming distributions remain to be listed. If your results were truncated, you can make a follow-up pagination request using the Marker request parameter to retrieve more distributions in the list.", + "StreamingDistributionSummary$Enabled": "Whether the distribution is enabled to accept end user requests for content.", + "StreamingLoggingConfig$Enabled": "Specifies whether you want CloudFront to save access logs to an Amazon S3 bucket. If you do not want to enable logging when you create a streaming distribution or if you want to disable logging for an existing streaming distribution, specify false for Enabled, and specify empty Bucket and Prefix elements. If you specify false for Enabled but you specify values for Bucket and Prefix, the values are automatically deleted.", + "TrustedSigners$Enabled": "Specifies whether you want to require end users to use signed URLs to access the files specified by PathPattern and TargetOriginId.", + "ViewerCertificate$CloudFrontDefaultCertificate": "Note: this field is deprecated. Please use \"cloudfront\" as CertificateSource and omit specifying a Certificate. If you want viewers to use HTTPS to request your objects and you're using the CloudFront domain name of your distribution in your object URLs (for example, https://d111111abcdef8.cloudfront.net/logo.jpg), set to true. Omit this value if you are setting an IAMCertificateId." + } + }, + "integer": { + "base": null, + "refs": { + "ActiveTrustedSigners$Quantity": "The number of unique trusted signers included in all cache behaviors. For example, if three cache behaviors all list the same three AWS accounts, the value of Quantity for ActiveTrustedSigners will be 3.", + "Aliases$Quantity": "The number of CNAMEs, if any, for this distribution.", + "AllowedMethods$Quantity": "The number of HTTP methods that you want CloudFront to forward to your origin. Valid values are 2 (for GET and HEAD requests), 3 (for GET, HEAD and OPTIONS requests) and 7 (for GET, HEAD, OPTIONS, PUT, PATCH, POST, and DELETE requests).", + "CacheBehaviors$Quantity": "The number of cache behaviors for this distribution.", + "CachedMethods$Quantity": "The number of HTTP methods for which you want CloudFront to cache responses. Valid values are 2 (for caching responses to GET and HEAD requests) and 3 (for caching responses to GET, HEAD, and OPTIONS requests).", + "CloudFrontOriginAccessIdentityList$MaxItems": "The value you provided for the MaxItems request parameter.", + "CloudFrontOriginAccessIdentityList$Quantity": "The number of CloudFront origin access identities that were created by the current AWS account.", + "CookieNames$Quantity": "The number of whitelisted cookies for this cache behavior.", + "CustomErrorResponse$ErrorCode": "The 4xx or 5xx HTTP status code that you want to customize. For a list of HTTP status codes that you can customize, see CloudFront documentation.", + "CustomErrorResponses$Quantity": "The number of custom error responses for this distribution.", + "CustomHeaders$Quantity": "The number of custom headers for this origin.", + "CustomOriginConfig$HTTPPort": "The HTTP port the custom origin listens on.", + "CustomOriginConfig$HTTPSPort": "The HTTPS port the custom origin listens on.", + "Distribution$InProgressInvalidationBatches": "The number of invalidation batches currently in progress.", + "DistributionList$MaxItems": "The value you provided for the MaxItems request parameter.", + "DistributionList$Quantity": "The number of distributions that were created by the current AWS account.", + "GeoRestriction$Quantity": "When geo restriction is enabled, this is the number of countries in your whitelist or blacklist. Otherwise, when it is not enabled, Quantity is 0, and you can omit Items.", + "Headers$Quantity": "The number of different headers that you want CloudFront to forward to the origin and to vary on for this cache behavior. The maximum number of headers that you can specify by name is 10. If you want CloudFront to forward all headers to the origin and vary on all of them, specify 1 for Quantity and * for Name. If you don't want CloudFront to forward any additional headers to the origin or to vary on any headers, specify 0 for Quantity and omit Items.", + "InvalidationList$MaxItems": "The value you provided for the MaxItems request parameter.", + "InvalidationList$Quantity": "The number of invalidation batches that were created by the current AWS account.", + "KeyPairIds$Quantity": "The number of active CloudFront key pairs for AwsAccountNumber.", + "OriginSslProtocols$Quantity": "The number of SSL/TLS protocols that you want to allow CloudFront to use when establishing an HTTPS connection with this origin.", + "Origins$Quantity": "The number of origins for this distribution.", + "Paths$Quantity": "The number of objects that you want to invalidate.", + "StreamingDistributionList$MaxItems": "The value you provided for the MaxItems request parameter.", + "StreamingDistributionList$Quantity": "The number of streaming distributions that were created by the current AWS account.", + "TrustedSigners$Quantity": "The number of trusted signers for this cache behavior." + } + }, + "long": { + "base": null, + "refs": { + "CacheBehavior$MinTTL": "The minimum amount of time that you want objects to stay in CloudFront caches before CloudFront queries your origin to see whether the object has been updated.You can specify a value from 0 to 3,153,600,000 seconds (100 years).", + "CacheBehavior$DefaultTTL": "If you don't configure your origin to add a Cache-Control max-age directive or an Expires header, DefaultTTL is the default amount of time (in seconds) that an object is in a CloudFront cache before CloudFront forwards another request to your origin to determine whether the object has been updated. The value that you specify applies only when your origin does not add HTTP headers such as Cache-Control max-age, Cache-Control s-maxage, and Expires to objects. You can specify a value from 0 to 3,153,600,000 seconds (100 years).", + "CacheBehavior$MaxTTL": "The maximum amount of time (in seconds) that an object is in a CloudFront cache before CloudFront forwards another request to your origin to determine whether the object has been updated. The value that you specify applies only when your origin adds HTTP headers such as Cache-Control max-age, Cache-Control s-maxage, and Expires to objects. You can specify a value from 0 to 3,153,600,000 seconds (100 years).", + "CustomErrorResponse$ErrorCachingMinTTL": "The minimum amount of time you want HTTP error codes to stay in CloudFront caches before CloudFront queries your origin to see whether the object has been updated. You can specify a value from 0 to 31,536,000.", + "DefaultCacheBehavior$MinTTL": "The minimum amount of time that you want objects to stay in CloudFront caches before CloudFront queries your origin to see whether the object has been updated.You can specify a value from 0 to 3,153,600,000 seconds (100 years).", + "DefaultCacheBehavior$DefaultTTL": "If you don't configure your origin to add a Cache-Control max-age directive or an Expires header, DefaultTTL is the default amount of time (in seconds) that an object is in a CloudFront cache before CloudFront forwards another request to your origin to determine whether the object has been updated. The value that you specify applies only when your origin does not add HTTP headers such as Cache-Control max-age, Cache-Control s-maxage, and Expires to objects. You can specify a value from 0 to 3,153,600,000 seconds (100 years).", + "DefaultCacheBehavior$MaxTTL": "The maximum amount of time (in seconds) that an object is in a CloudFront cache before CloudFront forwards another request to your origin to determine whether the object has been updated. The value that you specify applies only when your origin adds HTTP headers such as Cache-Control max-age, Cache-Control s-maxage, and Expires to objects. You can specify a value from 0 to 3,153,600,000 seconds (100 years)." + } + }, + "string": { + "base": null, + "refs": { + "AccessDenied$Message": null, + "AliasList$member": null, + "AwsAccountNumberList$member": null, + "BatchTooLarge$Message": null, + "CNAMEAlreadyExists$Message": null, + "CacheBehavior$PathPattern": "The pattern (for example, images/*.jpg) that specifies which requests you want this cache behavior to apply to. When CloudFront receives an end-user request, the requested path is compared with path patterns in the order in which cache behaviors are listed in the distribution. The path pattern for the default cache behavior is * and cannot be changed. If the request for an object does not match the path pattern for any cache behaviors, CloudFront applies the behavior in the default cache behavior.", + "CacheBehavior$TargetOriginId": "The value of ID for the origin that you want CloudFront to route requests to when a request matches the path pattern either for a cache behavior or for the default cache behavior.", + "CloudFrontOriginAccessIdentity$Id": "The ID for the origin access identity. For example: E74FTE3AJFJ256A.", + "CloudFrontOriginAccessIdentity$S3CanonicalUserId": "The Amazon S3 canonical user ID for the origin access identity, which you use when giving the origin access identity read permission to an object in Amazon S3.", + "CloudFrontOriginAccessIdentityAlreadyExists$Message": null, + "CloudFrontOriginAccessIdentityConfig$CallerReference": "A unique number that ensures the request can't be replayed. If the CallerReference is new (no matter the content of the CloudFrontOriginAccessIdentityConfig object), a new origin access identity is created. If the CallerReference is a value you already sent in a previous request to create an identity, and the content of the CloudFrontOriginAccessIdentityConfig is identical to the original request (ignoring white space), the response includes the same information returned to the original request. If the CallerReference is a value you already sent in a previous request to create an identity but the content of the CloudFrontOriginAccessIdentityConfig is different from the original request, CloudFront returns a CloudFrontOriginAccessIdentityAlreadyExists error.", + "CloudFrontOriginAccessIdentityConfig$Comment": "Any comments you want to include about the origin access identity.", + "CloudFrontOriginAccessIdentityInUse$Message": null, + "CloudFrontOriginAccessIdentityList$Marker": "The value you provided for the Marker request parameter.", + "CloudFrontOriginAccessIdentityList$NextMarker": "If IsTruncated is true, this element is present and contains the value you can use for the Marker request parameter to continue listing your origin access identities where they left off.", + "CloudFrontOriginAccessIdentitySummary$Id": "The ID for the origin access identity. For example: E74FTE3AJFJ256A.", + "CloudFrontOriginAccessIdentitySummary$S3CanonicalUserId": "The Amazon S3 canonical user ID for the origin access identity, which you use when giving the origin access identity read permission to an object in Amazon S3.", + "CloudFrontOriginAccessIdentitySummary$Comment": "The comment for this origin access identity, as originally specified when created.", + "CookieNameList$member": null, + "CreateCloudFrontOriginAccessIdentityResult$Location": "The fully qualified URI of the new origin access identity just created. For example: https://cloudfront.amazonaws.com/2010-11-01/origin-access-identity/cloudfront/E74FTE3AJFJ256A.", + "CreateCloudFrontOriginAccessIdentityResult$ETag": "The current version of the origin access identity created.", + "CreateDistributionResult$Location": "The fully qualified URI of the new distribution resource just created. For example: https://cloudfront.amazonaws.com/2010-11-01/distribution/EDFDVBD632BHDS5.", + "CreateDistributionResult$ETag": "The current version of the distribution created.", + "CreateInvalidationRequest$DistributionId": "The distribution's id.", + "CreateInvalidationResult$Location": "The fully qualified URI of the distribution and invalidation batch request, including the Invalidation ID.", + "CreateStreamingDistributionResult$Location": "The fully qualified URI of the new streaming distribution resource just created. For example: https://cloudfront.amazonaws.com/2010-11-01/streaming-distribution/EGTXBD79H29TRA8.", + "CreateStreamingDistributionResult$ETag": "The current version of the streaming distribution created.", + "CustomErrorResponse$ResponsePagePath": "The path of the custom error page (for example, /custom_404.html). The path is relative to the distribution and must begin with a slash (/). If the path includes any non-ASCII characters or unsafe characters as defined in RFC 1783 (http://www.ietf.org/rfc/rfc1738.txt), URL encode those characters. Do not URL encode any other characters in the path, or CloudFront will not return the custom error page to the viewer.", + "CustomErrorResponse$ResponseCode": "The HTTP status code that you want CloudFront to return with the custom error page to the viewer. For a list of HTTP status codes that you can replace, see CloudFront Documentation.", + "DefaultCacheBehavior$TargetOriginId": "The value of ID for the origin that you want CloudFront to route requests to when a request matches the path pattern either for a cache behavior or for the default cache behavior.", + "DeleteCloudFrontOriginAccessIdentityRequest$Id": "The origin access identity's id.", + "DeleteCloudFrontOriginAccessIdentityRequest$IfMatch": "The value of the ETag header you received from a previous GET or PUT request. For example: E2QWRUHAPOMQZL.", + "DeleteDistributionRequest$Id": "The distribution id.", + "DeleteDistributionRequest$IfMatch": "The value of the ETag header you received when you disabled the distribution. For example: E2QWRUHAPOMQZL.", + "DeleteStreamingDistributionRequest$Id": "The distribution id.", + "DeleteStreamingDistributionRequest$IfMatch": "The value of the ETag header you received when you disabled the streaming distribution. For example: E2QWRUHAPOMQZL.", + "Distribution$Id": "The identifier for the distribution. For example: EDFDVBD632BHDS5.", + "Distribution$Status": "This response element indicates the current status of the distribution. When the status is Deployed, the distribution's information is fully propagated throughout the Amazon CloudFront system.", + "Distribution$DomainName": "The domain name corresponding to the distribution. For example: d604721fxaaqy9.cloudfront.net.", + "DistributionAlreadyExists$Message": null, + "DistributionConfig$CallerReference": "A unique number that ensures the request can't be replayed. If the CallerReference is new (no matter the content of the DistributionConfig object), a new distribution is created. If the CallerReference is a value you already sent in a previous request to create a distribution, and the content of the DistributionConfig is identical to the original request (ignoring white space), the response includes the same information returned to the original request. If the CallerReference is a value you already sent in a previous request to create a distribution but the content of the DistributionConfig is different from the original request, CloudFront returns a DistributionAlreadyExists error.", + "DistributionConfig$DefaultRootObject": "The object that you want CloudFront to return (for example, index.html) when an end user requests the root URL for your distribution (http://www.example.com) instead of an object in your distribution (http://www.example.com/index.html). Specifying a default root object avoids exposing the contents of your distribution. If you don't want to specify a default root object when you create a distribution, include an empty DefaultRootObject element. To delete the default root object from an existing distribution, update the distribution configuration and include an empty DefaultRootObject element. To replace the default root object, update the distribution configuration and specify the new object.", + "DistributionConfig$Comment": "Any comments you want to include about the distribution.", + "DistributionConfig$WebACLId": "(Optional) If you're using AWS WAF to filter CloudFront requests, the Id of the AWS WAF web ACL that is associated with the distribution.", + "DistributionList$Marker": "The value you provided for the Marker request parameter.", + "DistributionList$NextMarker": "If IsTruncated is true, this element is present and contains the value you can use for the Marker request parameter to continue listing your distributions where they left off.", + "DistributionNotDisabled$Message": null, + "DistributionSummary$Id": "The identifier for the distribution. For example: EDFDVBD632BHDS5.", + "DistributionSummary$Status": "This response element indicates the current status of the distribution. When the status is Deployed, the distribution's information is fully propagated throughout the Amazon CloudFront system.", + "DistributionSummary$DomainName": "The domain name corresponding to the distribution. For example: d604721fxaaqy9.cloudfront.net.", + "DistributionSummary$Comment": "The comment originally specified when this distribution was created.", + "DistributionSummary$WebACLId": "The Web ACL Id (if any) associated with the distribution.", + "GetCloudFrontOriginAccessIdentityConfigRequest$Id": "The identity's id.", + "GetCloudFrontOriginAccessIdentityConfigResult$ETag": "The current version of the configuration. For example: E2QWRUHAPOMQZL.", + "GetCloudFrontOriginAccessIdentityRequest$Id": "The identity's id.", + "GetCloudFrontOriginAccessIdentityResult$ETag": "The current version of the origin access identity's information. For example: E2QWRUHAPOMQZL.", + "GetDistributionConfigRequest$Id": "The distribution's id.", + "GetDistributionConfigResult$ETag": "The current version of the configuration. For example: E2QWRUHAPOMQZL.", + "GetDistributionRequest$Id": "The distribution's id.", + "GetDistributionResult$ETag": "The current version of the distribution's information. For example: E2QWRUHAPOMQZL.", + "GetInvalidationRequest$DistributionId": "The distribution's id.", + "GetInvalidationRequest$Id": "The invalidation's id.", + "GetStreamingDistributionConfigRequest$Id": "The streaming distribution's id.", + "GetStreamingDistributionConfigResult$ETag": "The current version of the configuration. For example: E2QWRUHAPOMQZL.", + "GetStreamingDistributionRequest$Id": "The streaming distribution's id.", + "GetStreamingDistributionResult$ETag": "The current version of the streaming distribution's information. For example: E2QWRUHAPOMQZL.", + "HeaderList$member": null, + "IllegalUpdate$Message": null, + "InconsistentQuantities$Message": null, + "InvalidArgument$Message": null, + "InvalidDefaultRootObject$Message": null, + "InvalidErrorCode$Message": null, + "InvalidForwardCookies$Message": null, + "InvalidGeoRestrictionParameter$Message": null, + "InvalidHeadersForS3Origin$Message": null, + "InvalidIfMatchVersion$Message": null, + "InvalidLocationCode$Message": null, + "InvalidMinimumProtocolVersion$Message": null, + "InvalidOrigin$Message": null, + "InvalidOriginAccessIdentity$Message": null, + "InvalidProtocolSettings$Message": null, + "InvalidRelativePath$Message": null, + "InvalidRequiredProtocol$Message": null, + "InvalidResponseCode$Message": null, + "InvalidTTLOrder$Message": null, + "InvalidViewerCertificate$Message": null, + "InvalidWebACLId$Message": null, + "Invalidation$Id": "The identifier for the invalidation request. For example: IDFDVBD632BHDS5.", + "Invalidation$Status": "The status of the invalidation request. When the invalidation batch is finished, the status is Completed.", + "InvalidationBatch$CallerReference": "A unique name that ensures the request can't be replayed. If the CallerReference is new (no matter the content of the Path object), a new distribution is created. If the CallerReference is a value you already sent in a previous request to create an invalidation batch, and the content of each Path element is identical to the original request, the response includes the same information returned to the original request. If the CallerReference is a value you already sent in a previous request to create a distribution but the content of any Path is different from the original request, CloudFront returns an InvalidationBatchAlreadyExists error.", + "InvalidationList$Marker": "The value you provided for the Marker request parameter.", + "InvalidationList$NextMarker": "If IsTruncated is true, this element is present and contains the value you can use for the Marker request parameter to continue listing your invalidation batches where they left off.", + "InvalidationSummary$Id": "The unique ID for an invalidation request.", + "InvalidationSummary$Status": "The status of an invalidation request.", + "KeyPairIdList$member": null, + "ListCloudFrontOriginAccessIdentitiesRequest$Marker": "Use this when paginating results to indicate where to begin in your list of origin access identities. The results include identities in the list that occur after the marker. To get the next page of results, set the Marker to the value of the NextMarker from the current page's response (which is also the ID of the last identity on that page).", + "ListCloudFrontOriginAccessIdentitiesRequest$MaxItems": "The maximum number of origin access identities you want in the response body.", + "ListDistributionsByWebACLIdRequest$Marker": "Use Marker and MaxItems to control pagination of results. If you have more than MaxItems distributions that satisfy the request, the response includes a NextMarker element. To get the next page of results, submit another request. For the value of Marker, specify the value of NextMarker from the last response. (For the first request, omit Marker.)", + "ListDistributionsByWebACLIdRequest$MaxItems": "The maximum number of distributions that you want CloudFront to return in the response body. The maximum and default values are both 100.", + "ListDistributionsByWebACLIdRequest$WebACLId": "The Id of the AWS WAF web ACL for which you want to list the associated distributions. If you specify \"null\" for the Id, the request returns a list of the distributions that aren't associated with a web ACL.", + "ListDistributionsRequest$Marker": "Use Marker and MaxItems to control pagination of results. If you have more than MaxItems distributions that satisfy the request, the response includes a NextMarker element. To get the next page of results, submit another request. For the value of Marker, specify the value of NextMarker from the last response. (For the first request, omit Marker.)", + "ListDistributionsRequest$MaxItems": "The maximum number of distributions that you want CloudFront to return in the response body. The maximum and default values are both 100.", + "ListInvalidationsRequest$DistributionId": "The distribution's id.", + "ListInvalidationsRequest$Marker": "Use this parameter when paginating results to indicate where to begin in your list of invalidation batches. Because the results are returned in decreasing order from most recent to oldest, the most recent results are on the first page, the second page will contain earlier results, and so on. To get the next page of results, set the Marker to the value of the NextMarker from the current page's response. This value is the same as the ID of the last invalidation batch on that page.", + "ListInvalidationsRequest$MaxItems": "The maximum number of invalidation batches you want in the response body.", + "ListStreamingDistributionsRequest$Marker": "Use this when paginating results to indicate where to begin in your list of streaming distributions. The results include distributions in the list that occur after the marker. To get the next page of results, set the Marker to the value of the NextMarker from the current page's response (which is also the ID of the last distribution on that page).", + "ListStreamingDistributionsRequest$MaxItems": "The maximum number of streaming distributions you want in the response body.", + "LocationList$member": null, + "LoggingConfig$Bucket": "The Amazon S3 bucket to store the access logs in, for example, myawslogbucket.s3.amazonaws.com.", + "LoggingConfig$Prefix": "An optional string that you want CloudFront to prefix to the access log filenames for this distribution, for example, myprefix/. If you want to enable logging, but you do not want to specify a prefix, you still must include an empty Prefix element in the Logging element.", + "MissingBody$Message": null, + "NoSuchCloudFrontOriginAccessIdentity$Message": null, + "NoSuchDistribution$Message": null, + "NoSuchInvalidation$Message": null, + "NoSuchOrigin$Message": null, + "NoSuchStreamingDistribution$Message": null, + "Origin$Id": "A unique identifier for the origin. The value of Id must be unique within the distribution. You use the value of Id when you create a cache behavior. The Id identifies the origin that CloudFront routes a request to when the request matches the path pattern for that cache behavior.", + "Origin$DomainName": "Amazon S3 origins: The DNS name of the Amazon S3 bucket from which you want CloudFront to get objects for this origin, for example, myawsbucket.s3.amazonaws.com. Custom origins: The DNS domain name for the HTTP server from which you want CloudFront to get objects for this origin, for example, www.example.com.", + "Origin$OriginPath": "An optional element that causes CloudFront to request your content from a directory in your Amazon S3 bucket or your custom origin. When you include the OriginPath element, specify the directory name, beginning with a /. CloudFront appends the directory name to the value of DomainName.", + "OriginCustomHeader$HeaderName": "The header's name.", + "OriginCustomHeader$HeaderValue": "The header's value.", + "PathList$member": null, + "PreconditionFailed$Message": null, + "S3Origin$DomainName": "The DNS name of the S3 origin.", + "S3Origin$OriginAccessIdentity": "Your S3 origin's origin access identity.", + "S3OriginConfig$OriginAccessIdentity": "The CloudFront origin access identity to associate with the origin. Use an origin access identity to configure the origin so that end users can only access objects in an Amazon S3 bucket through CloudFront. If you want end users to be able to access objects using either the CloudFront URL or the Amazon S3 URL, specify an empty OriginAccessIdentity element. To delete the origin access identity from an existing distribution, update the distribution configuration and include an empty OriginAccessIdentity element. To replace the origin access identity, update the distribution configuration and specify the new origin access identity. Use the format origin-access-identity/cloudfront/Id where Id is the value that CloudFront returned in the Id element when you created the origin access identity.", + "Signer$AwsAccountNumber": "Specifies an AWS account that can create signed URLs. Values: self, which indicates that the AWS account that was used to create the distribution can created signed URLs, or an AWS account number. Omit the dashes in the account number.", + "StreamingDistribution$Id": "The identifier for the streaming distribution. For example: EGTXBD79H29TRA8.", + "StreamingDistribution$Status": "The current status of the streaming distribution. When the status is Deployed, the distribution's information is fully propagated throughout the Amazon CloudFront system.", + "StreamingDistribution$DomainName": "The domain name corresponding to the streaming distribution. For example: s5c39gqb8ow64r.cloudfront.net.", + "StreamingDistributionAlreadyExists$Message": null, + "StreamingDistributionConfig$CallerReference": "A unique number that ensures the request can't be replayed. If the CallerReference is new (no matter the content of the StreamingDistributionConfig object), a new streaming distribution is created. If the CallerReference is a value you already sent in a previous request to create a streaming distribution, and the content of the StreamingDistributionConfig is identical to the original request (ignoring white space), the response includes the same information returned to the original request. If the CallerReference is a value you already sent in a previous request to create a streaming distribution but the content of the StreamingDistributionConfig is different from the original request, CloudFront returns a DistributionAlreadyExists error.", + "StreamingDistributionConfig$Comment": "Any comments you want to include about the streaming distribution.", + "StreamingDistributionList$Marker": "The value you provided for the Marker request parameter.", + "StreamingDistributionList$NextMarker": "If IsTruncated is true, this element is present and contains the value you can use for the Marker request parameter to continue listing your streaming distributions where they left off.", + "StreamingDistributionNotDisabled$Message": null, + "StreamingDistributionSummary$Id": "The identifier for the distribution. For example: EDFDVBD632BHDS5.", + "StreamingDistributionSummary$Status": "Indicates the current status of the distribution. When the status is Deployed, the distribution's information is fully propagated throughout the Amazon CloudFront system.", + "StreamingDistributionSummary$DomainName": "The domain name corresponding to the distribution. For example: d604721fxaaqy9.cloudfront.net.", + "StreamingDistributionSummary$Comment": "The comment originally specified when this distribution was created.", + "StreamingLoggingConfig$Bucket": "The Amazon S3 bucket to store the access logs in, for example, myawslogbucket.s3.amazonaws.com.", + "StreamingLoggingConfig$Prefix": "An optional string that you want CloudFront to prefix to the access log filenames for this streaming distribution, for example, myprefix/. If you want to enable logging, but you do not want to specify a prefix, you still must include an empty Prefix element in the Logging element.", + "TooManyCacheBehaviors$Message": null, + "TooManyCertificates$Message": null, + "TooManyCloudFrontOriginAccessIdentities$Message": null, + "TooManyCookieNamesInWhiteList$Message": null, + "TooManyDistributionCNAMEs$Message": null, + "TooManyDistributions$Message": null, + "TooManyHeadersInForwardedValues$Message": null, + "TooManyInvalidationsInProgress$Message": null, + "TooManyOriginCustomHeaders$Message": null, + "TooManyOrigins$Message": null, + "TooManyStreamingDistributionCNAMEs$Message": null, + "TooManyStreamingDistributions$Message": null, + "TooManyTrustedSigners$Message": null, + "TrustedSignerDoesNotExist$Message": null, + "UpdateCloudFrontOriginAccessIdentityRequest$Id": "The identity's id.", + "UpdateCloudFrontOriginAccessIdentityRequest$IfMatch": "The value of the ETag header you received when retrieving the identity's configuration. For example: E2QWRUHAPOMQZL.", + "UpdateCloudFrontOriginAccessIdentityResult$ETag": "The current version of the configuration. For example: E2QWRUHAPOMQZL.", + "UpdateDistributionRequest$Id": "The distribution's id.", + "UpdateDistributionRequest$IfMatch": "The value of the ETag header you received when retrieving the distribution's configuration. For example: E2QWRUHAPOMQZL.", + "UpdateDistributionResult$ETag": "The current version of the configuration. For example: E2QWRUHAPOMQZL.", + "UpdateStreamingDistributionRequest$Id": "The streaming distribution's id.", + "UpdateStreamingDistributionRequest$IfMatch": "The value of the ETag header you received when retrieving the streaming distribution's configuration. For example: E2QWRUHAPOMQZL.", + "UpdateStreamingDistributionResult$ETag": "The current version of the configuration. For example: E2QWRUHAPOMQZL.", + "ViewerCertificate$Certificate": "If you want viewers to use HTTPS to request your objects and you're using an alternate domain name in your object URLs (for example, https://example.com/logo.jpg), you can use your own IAM or ACM certificate. For ACM, set to the ACM certificate ARN. For IAM, set to the IAM certificate identifier.", + "ViewerCertificate$IAMCertificateId": "Note: this field is deprecated. Please use \"iam\" as CertificateSource and specify the IAM certificate Id as the Certificate. If you want viewers to use HTTPS to request your objects and you're using an alternate domain name in your object URLs (for example, https://example.com/logo.jpg), specify the IAM certificate identifier of the custom viewer certificate for this distribution. Specify either this value or CloudFrontDefaultCertificate." + } + }, + "timestamp": { + "base": null, + "refs": { + "Distribution$LastModifiedTime": "The date and time the distribution was last modified.", + "DistributionSummary$LastModifiedTime": "The date and time the distribution was last modified.", + "Invalidation$CreateTime": "The date and time the invalidation request was first made.", + "InvalidationSummary$CreateTime": null, + "StreamingDistribution$LastModifiedTime": "The date and time the distribution was last modified.", + "StreamingDistributionSummary$LastModifiedTime": "The date and time the distribution was last modified." + } + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/cloudfront/2016-01-13/examples-1.json b/vendor/github.com/aws/aws-sdk-go/models/apis/cloudfront/2016-01-13/examples-1.json new file mode 100644 index 000000000..0ea7e3b0b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/cloudfront/2016-01-13/examples-1.json @@ -0,0 +1,5 @@ +{ + "version": "1.0", + "examples": { + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/cloudfront/2016-01-13/paginators-1.json b/vendor/github.com/aws/aws-sdk-go/models/apis/cloudfront/2016-01-13/paginators-1.json new file mode 100644 index 000000000..51fbb907f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/cloudfront/2016-01-13/paginators-1.json @@ -0,0 +1,32 @@ +{ + "pagination": { + "ListCloudFrontOriginAccessIdentities": { + "input_token": "Marker", + "output_token": "CloudFrontOriginAccessIdentityList.NextMarker", + "limit_key": "MaxItems", + "more_results": "CloudFrontOriginAccessIdentityList.IsTruncated", + "result_key": "CloudFrontOriginAccessIdentityList.Items" + }, + "ListDistributions": { + "input_token": "Marker", + "output_token": "DistributionList.NextMarker", + "limit_key": "MaxItems", + "more_results": "DistributionList.IsTruncated", + "result_key": "DistributionList.Items" + }, + "ListInvalidations": { + "input_token": "Marker", + "output_token": "InvalidationList.NextMarker", + "limit_key": "MaxItems", + "more_results": "InvalidationList.IsTruncated", + "result_key": "InvalidationList.Items" + }, + "ListStreamingDistributions": { + "input_token": "Marker", + "output_token": "StreamingDistributionList.NextMarker", + "limit_key": "MaxItems", + "more_results": "StreamingDistributionList.IsTruncated", + "result_key": "StreamingDistributionList.Items" + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/cloudfront/2016-01-13/waiters-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/cloudfront/2016-01-13/waiters-2.json new file mode 100644 index 000000000..f6d3ba7bc --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/cloudfront/2016-01-13/waiters-2.json @@ -0,0 +1,47 @@ +{ + "version": 2, + "waiters": { + "DistributionDeployed": { + "delay": 60, + "operation": "GetDistribution", + "maxAttempts": 25, + "description": "Wait until a distribution is deployed.", + "acceptors": [ + { + "expected": "Deployed", + "matcher": "path", + "state": "success", + "argument": "Status" + } + ] + }, + "InvalidationCompleted": { + "delay": 20, + "operation": "GetInvalidation", + "maxAttempts": 30, + "description": "Wait until an invalidation has completed.", + "acceptors": [ + { + "expected": "Completed", + "matcher": "path", + "state": "success", + "argument": "Status" + } + ] + }, + "StreamingDistributionDeployed": { + "delay": 60, + "operation": "GetStreamingDistribution", + "maxAttempts": 25, + "description": "Wait until a streaming distribution is deployed.", + "acceptors": [ + { + "expected": "Deployed", + "matcher": "path", + "state": "success", + "argument": "Status" + } + ] + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/cloudfront/2016-01-28/api-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/cloudfront/2016-01-28/api-2.json new file mode 100644 index 000000000..19f4fd4f3 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/cloudfront/2016-01-28/api-2.json @@ -0,0 +1,2218 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2016-01-28", + "endpointPrefix":"cloudfront", + "globalEndpoint":"cloudfront.amazonaws.com", + "protocol":"rest-xml", + "serviceAbbreviation":"CloudFront", + "serviceFullName":"Amazon CloudFront", + "signatureVersion":"v4" + }, + "operations":{ + "CreateCloudFrontOriginAccessIdentity":{ + "name":"CreateCloudFrontOriginAccessIdentity2016_01_28", + "http":{ + "method":"POST", + "requestUri":"/2016-01-28/origin-access-identity/cloudfront", + "responseCode":201 + }, + "input":{"shape":"CreateCloudFrontOriginAccessIdentityRequest"}, + "output":{"shape":"CreateCloudFrontOriginAccessIdentityResult"}, + "errors":[ + {"shape":"CloudFrontOriginAccessIdentityAlreadyExists"}, + {"shape":"MissingBody"}, + {"shape":"TooManyCloudFrontOriginAccessIdentities"}, + {"shape":"InvalidArgument"}, + {"shape":"InconsistentQuantities"} + ] + }, + "CreateDistribution":{ + "name":"CreateDistribution2016_01_28", + "http":{ + "method":"POST", + "requestUri":"/2016-01-28/distribution", + "responseCode":201 + }, + "input":{"shape":"CreateDistributionRequest"}, + "output":{"shape":"CreateDistributionResult"}, + "errors":[ + {"shape":"CNAMEAlreadyExists"}, + {"shape":"DistributionAlreadyExists"}, + {"shape":"InvalidOrigin"}, + {"shape":"InvalidOriginAccessIdentity"}, + {"shape":"AccessDenied"}, + {"shape":"TooManyTrustedSigners"}, + {"shape":"TrustedSignerDoesNotExist"}, + {"shape":"InvalidViewerCertificate"}, + {"shape":"InvalidMinimumProtocolVersion"}, + {"shape":"MissingBody"}, + {"shape":"TooManyDistributionCNAMEs"}, + {"shape":"TooManyDistributions"}, + {"shape":"InvalidDefaultRootObject"}, + {"shape":"InvalidRelativePath"}, + {"shape":"InvalidErrorCode"}, + {"shape":"InvalidResponseCode"}, + {"shape":"InvalidArgument"}, + {"shape":"InvalidRequiredProtocol"}, + {"shape":"NoSuchOrigin"}, + {"shape":"TooManyOrigins"}, + {"shape":"TooManyCacheBehaviors"}, + {"shape":"TooManyCookieNamesInWhiteList"}, + {"shape":"InvalidForwardCookies"}, + {"shape":"TooManyHeadersInForwardedValues"}, + {"shape":"InvalidHeadersForS3Origin"}, + {"shape":"InconsistentQuantities"}, + {"shape":"TooManyCertificates"}, + {"shape":"InvalidLocationCode"}, + {"shape":"InvalidGeoRestrictionParameter"}, + {"shape":"InvalidProtocolSettings"}, + {"shape":"InvalidTTLOrder"}, + {"shape":"InvalidWebACLId"}, + {"shape":"TooManyOriginCustomHeaders"} + ] + }, + "CreateInvalidation":{ + "name":"CreateInvalidation2016_01_28", + "http":{ + "method":"POST", + "requestUri":"/2016-01-28/distribution/{DistributionId}/invalidation", + "responseCode":201 + }, + "input":{"shape":"CreateInvalidationRequest"}, + "output":{"shape":"CreateInvalidationResult"}, + "errors":[ + {"shape":"AccessDenied"}, + {"shape":"MissingBody"}, + {"shape":"InvalidArgument"}, + {"shape":"NoSuchDistribution"}, + {"shape":"BatchTooLarge"}, + {"shape":"TooManyInvalidationsInProgress"}, + {"shape":"InconsistentQuantities"} + ] + }, + "CreateStreamingDistribution":{ + "name":"CreateStreamingDistribution2016_01_28", + "http":{ + "method":"POST", + "requestUri":"/2016-01-28/streaming-distribution", + "responseCode":201 + }, + "input":{"shape":"CreateStreamingDistributionRequest"}, + "output":{"shape":"CreateStreamingDistributionResult"}, + "errors":[ + {"shape":"CNAMEAlreadyExists"}, + {"shape":"StreamingDistributionAlreadyExists"}, + {"shape":"InvalidOrigin"}, + {"shape":"InvalidOriginAccessIdentity"}, + {"shape":"AccessDenied"}, + {"shape":"TooManyTrustedSigners"}, + {"shape":"TrustedSignerDoesNotExist"}, + {"shape":"MissingBody"}, + {"shape":"TooManyStreamingDistributionCNAMEs"}, + {"shape":"TooManyStreamingDistributions"}, + {"shape":"InvalidArgument"}, + {"shape":"InconsistentQuantities"} + ] + }, + "DeleteCloudFrontOriginAccessIdentity":{ + "name":"DeleteCloudFrontOriginAccessIdentity2016_01_28", + "http":{ + "method":"DELETE", + "requestUri":"/2016-01-28/origin-access-identity/cloudfront/{Id}", + "responseCode":204 + }, + "input":{"shape":"DeleteCloudFrontOriginAccessIdentityRequest"}, + "errors":[ + {"shape":"AccessDenied"}, + {"shape":"InvalidIfMatchVersion"}, + {"shape":"NoSuchCloudFrontOriginAccessIdentity"}, + {"shape":"PreconditionFailed"}, + {"shape":"CloudFrontOriginAccessIdentityInUse"} + ] + }, + "DeleteDistribution":{ + "name":"DeleteDistribution2016_01_28", + "http":{ + "method":"DELETE", + "requestUri":"/2016-01-28/distribution/{Id}", + "responseCode":204 + }, + "input":{"shape":"DeleteDistributionRequest"}, + "errors":[ + {"shape":"AccessDenied"}, + {"shape":"DistributionNotDisabled"}, + {"shape":"InvalidIfMatchVersion"}, + {"shape":"NoSuchDistribution"}, + {"shape":"PreconditionFailed"} + ] + }, + "DeleteStreamingDistribution":{ + "name":"DeleteStreamingDistribution2016_01_28", + "http":{ + "method":"DELETE", + "requestUri":"/2016-01-28/streaming-distribution/{Id}", + "responseCode":204 + }, + "input":{"shape":"DeleteStreamingDistributionRequest"}, + "errors":[ + {"shape":"AccessDenied"}, + {"shape":"StreamingDistributionNotDisabled"}, + {"shape":"InvalidIfMatchVersion"}, + {"shape":"NoSuchStreamingDistribution"}, + {"shape":"PreconditionFailed"} + ] + }, + "GetCloudFrontOriginAccessIdentity":{ + "name":"GetCloudFrontOriginAccessIdentity2016_01_28", + "http":{ + "method":"GET", + "requestUri":"/2016-01-28/origin-access-identity/cloudfront/{Id}" + }, + "input":{"shape":"GetCloudFrontOriginAccessIdentityRequest"}, + "output":{"shape":"GetCloudFrontOriginAccessIdentityResult"}, + "errors":[ + {"shape":"NoSuchCloudFrontOriginAccessIdentity"}, + {"shape":"AccessDenied"} + ] + }, + "GetCloudFrontOriginAccessIdentityConfig":{ + "name":"GetCloudFrontOriginAccessIdentityConfig2016_01_28", + "http":{ + "method":"GET", + "requestUri":"/2016-01-28/origin-access-identity/cloudfront/{Id}/config" + }, + "input":{"shape":"GetCloudFrontOriginAccessIdentityConfigRequest"}, + "output":{"shape":"GetCloudFrontOriginAccessIdentityConfigResult"}, + "errors":[ + {"shape":"NoSuchCloudFrontOriginAccessIdentity"}, + {"shape":"AccessDenied"} + ] + }, + "GetDistribution":{ + "name":"GetDistribution2016_01_28", + "http":{ + "method":"GET", + "requestUri":"/2016-01-28/distribution/{Id}" + }, + "input":{"shape":"GetDistributionRequest"}, + "output":{"shape":"GetDistributionResult"}, + "errors":[ + {"shape":"NoSuchDistribution"}, + {"shape":"AccessDenied"} + ] + }, + "GetDistributionConfig":{ + "name":"GetDistributionConfig2016_01_28", + "http":{ + "method":"GET", + "requestUri":"/2016-01-28/distribution/{Id}/config" + }, + "input":{"shape":"GetDistributionConfigRequest"}, + "output":{"shape":"GetDistributionConfigResult"}, + "errors":[ + {"shape":"NoSuchDistribution"}, + {"shape":"AccessDenied"} + ] + }, + "GetInvalidation":{ + "name":"GetInvalidation2016_01_28", + "http":{ + "method":"GET", + "requestUri":"/2016-01-28/distribution/{DistributionId}/invalidation/{Id}" + }, + "input":{"shape":"GetInvalidationRequest"}, + "output":{"shape":"GetInvalidationResult"}, + "errors":[ + {"shape":"NoSuchInvalidation"}, + {"shape":"NoSuchDistribution"}, + {"shape":"AccessDenied"} + ] + }, + "GetStreamingDistribution":{ + "name":"GetStreamingDistribution2016_01_28", + "http":{ + "method":"GET", + "requestUri":"/2016-01-28/streaming-distribution/{Id}" + }, + "input":{"shape":"GetStreamingDistributionRequest"}, + "output":{"shape":"GetStreamingDistributionResult"}, + "errors":[ + {"shape":"NoSuchStreamingDistribution"}, + {"shape":"AccessDenied"} + ] + }, + "GetStreamingDistributionConfig":{ + "name":"GetStreamingDistributionConfig2016_01_28", + "http":{ + "method":"GET", + "requestUri":"/2016-01-28/streaming-distribution/{Id}/config" + }, + "input":{"shape":"GetStreamingDistributionConfigRequest"}, + "output":{"shape":"GetStreamingDistributionConfigResult"}, + "errors":[ + {"shape":"NoSuchStreamingDistribution"}, + {"shape":"AccessDenied"} + ] + }, + "ListCloudFrontOriginAccessIdentities":{ + "name":"ListCloudFrontOriginAccessIdentities2016_01_28", + "http":{ + "method":"GET", + "requestUri":"/2016-01-28/origin-access-identity/cloudfront" + }, + "input":{"shape":"ListCloudFrontOriginAccessIdentitiesRequest"}, + "output":{"shape":"ListCloudFrontOriginAccessIdentitiesResult"}, + "errors":[ + {"shape":"InvalidArgument"} + ] + }, + "ListDistributions":{ + "name":"ListDistributions2016_01_28", + "http":{ + "method":"GET", + "requestUri":"/2016-01-28/distribution" + }, + "input":{"shape":"ListDistributionsRequest"}, + "output":{"shape":"ListDistributionsResult"}, + "errors":[ + {"shape":"InvalidArgument"} + ] + }, + "ListDistributionsByWebACLId":{ + "name":"ListDistributionsByWebACLId2016_01_28", + "http":{ + "method":"GET", + "requestUri":"/2016-01-28/distributionsByWebACLId/{WebACLId}" + }, + "input":{"shape":"ListDistributionsByWebACLIdRequest"}, + "output":{"shape":"ListDistributionsByWebACLIdResult"}, + "errors":[ + {"shape":"InvalidArgument"}, + {"shape":"InvalidWebACLId"} + ] + }, + "ListInvalidations":{ + "name":"ListInvalidations2016_01_28", + "http":{ + "method":"GET", + "requestUri":"/2016-01-28/distribution/{DistributionId}/invalidation" + }, + "input":{"shape":"ListInvalidationsRequest"}, + "output":{"shape":"ListInvalidationsResult"}, + "errors":[ + {"shape":"InvalidArgument"}, + {"shape":"NoSuchDistribution"}, + {"shape":"AccessDenied"} + ] + }, + "ListStreamingDistributions":{ + "name":"ListStreamingDistributions2016_01_28", + "http":{ + "method":"GET", + "requestUri":"/2016-01-28/streaming-distribution" + }, + "input":{"shape":"ListStreamingDistributionsRequest"}, + "output":{"shape":"ListStreamingDistributionsResult"}, + "errors":[ + {"shape":"InvalidArgument"} + ] + }, + "UpdateCloudFrontOriginAccessIdentity":{ + "name":"UpdateCloudFrontOriginAccessIdentity2016_01_28", + "http":{ + "method":"PUT", + "requestUri":"/2016-01-28/origin-access-identity/cloudfront/{Id}/config" + }, + "input":{"shape":"UpdateCloudFrontOriginAccessIdentityRequest"}, + "output":{"shape":"UpdateCloudFrontOriginAccessIdentityResult"}, + "errors":[ + {"shape":"AccessDenied"}, + {"shape":"IllegalUpdate"}, + {"shape":"InvalidIfMatchVersion"}, + {"shape":"MissingBody"}, + {"shape":"NoSuchCloudFrontOriginAccessIdentity"}, + {"shape":"PreconditionFailed"}, + {"shape":"InvalidArgument"}, + {"shape":"InconsistentQuantities"} + ] + }, + "UpdateDistribution":{ + "name":"UpdateDistribution2016_01_28", + "http":{ + "method":"PUT", + "requestUri":"/2016-01-28/distribution/{Id}/config" + }, + "input":{"shape":"UpdateDistributionRequest"}, + "output":{"shape":"UpdateDistributionResult"}, + "errors":[ + {"shape":"AccessDenied"}, + {"shape":"CNAMEAlreadyExists"}, + {"shape":"IllegalUpdate"}, + {"shape":"InvalidIfMatchVersion"}, + {"shape":"MissingBody"}, + {"shape":"NoSuchDistribution"}, + {"shape":"PreconditionFailed"}, + {"shape":"TooManyDistributionCNAMEs"}, + {"shape":"InvalidDefaultRootObject"}, + {"shape":"InvalidRelativePath"}, + {"shape":"InvalidErrorCode"}, + {"shape":"InvalidResponseCode"}, + {"shape":"InvalidArgument"}, + {"shape":"InvalidOriginAccessIdentity"}, + {"shape":"TooManyTrustedSigners"}, + {"shape":"TrustedSignerDoesNotExist"}, + {"shape":"InvalidViewerCertificate"}, + {"shape":"InvalidMinimumProtocolVersion"}, + {"shape":"InvalidRequiredProtocol"}, + {"shape":"NoSuchOrigin"}, + {"shape":"TooManyOrigins"}, + {"shape":"TooManyCacheBehaviors"}, + {"shape":"TooManyCookieNamesInWhiteList"}, + {"shape":"InvalidForwardCookies"}, + {"shape":"TooManyHeadersInForwardedValues"}, + {"shape":"InvalidHeadersForS3Origin"}, + {"shape":"InconsistentQuantities"}, + {"shape":"TooManyCertificates"}, + {"shape":"InvalidLocationCode"}, + {"shape":"InvalidGeoRestrictionParameter"}, + {"shape":"InvalidTTLOrder"}, + {"shape":"InvalidWebACLId"}, + {"shape":"TooManyOriginCustomHeaders"} + ] + }, + "UpdateStreamingDistribution":{ + "name":"UpdateStreamingDistribution2016_01_28", + "http":{ + "method":"PUT", + "requestUri":"/2016-01-28/streaming-distribution/{Id}/config" + }, + "input":{"shape":"UpdateStreamingDistributionRequest"}, + "output":{"shape":"UpdateStreamingDistributionResult"}, + "errors":[ + {"shape":"AccessDenied"}, + {"shape":"CNAMEAlreadyExists"}, + {"shape":"IllegalUpdate"}, + {"shape":"InvalidIfMatchVersion"}, + {"shape":"MissingBody"}, + {"shape":"NoSuchStreamingDistribution"}, + {"shape":"PreconditionFailed"}, + {"shape":"TooManyStreamingDistributionCNAMEs"}, + {"shape":"InvalidArgument"}, + {"shape":"InvalidOriginAccessIdentity"}, + {"shape":"TooManyTrustedSigners"}, + {"shape":"TrustedSignerDoesNotExist"}, + {"shape":"InconsistentQuantities"} + ] + } + }, + "shapes":{ + "AccessDenied":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":403}, + "exception":true + }, + "ActiveTrustedSigners":{ + "type":"structure", + "required":[ + "Enabled", + "Quantity" + ], + "members":{ + "Enabled":{"shape":"boolean"}, + "Quantity":{"shape":"integer"}, + "Items":{"shape":"SignerList"} + } + }, + "AliasList":{ + "type":"list", + "member":{ + "shape":"string", + "locationName":"CNAME" + } + }, + "Aliases":{ + "type":"structure", + "required":["Quantity"], + "members":{ + "Quantity":{"shape":"integer"}, + "Items":{"shape":"AliasList"} + } + }, + "AllowedMethods":{ + "type":"structure", + "required":[ + "Quantity", + "Items" + ], + "members":{ + "Quantity":{"shape":"integer"}, + "Items":{"shape":"MethodsList"}, + "CachedMethods":{"shape":"CachedMethods"} + } + }, + "AwsAccountNumberList":{ + "type":"list", + "member":{ + "shape":"string", + "locationName":"AwsAccountNumber" + } + }, + "BatchTooLarge":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":413}, + "exception":true + }, + "CNAMEAlreadyExists":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":409}, + "exception":true + }, + "CacheBehavior":{ + "type":"structure", + "required":[ + "PathPattern", + "TargetOriginId", + "ForwardedValues", + "TrustedSigners", + "ViewerProtocolPolicy", + "MinTTL" + ], + "members":{ + "PathPattern":{"shape":"string"}, + "TargetOriginId":{"shape":"string"}, + "ForwardedValues":{"shape":"ForwardedValues"}, + "TrustedSigners":{"shape":"TrustedSigners"}, + "ViewerProtocolPolicy":{"shape":"ViewerProtocolPolicy"}, + "MinTTL":{"shape":"long"}, + "AllowedMethods":{"shape":"AllowedMethods"}, + "SmoothStreaming":{"shape":"boolean"}, + "DefaultTTL":{"shape":"long"}, + "MaxTTL":{"shape":"long"}, + "Compress":{"shape":"boolean"} + } + }, + "CacheBehaviorList":{ + "type":"list", + "member":{ + "shape":"CacheBehavior", + "locationName":"CacheBehavior" + } + }, + "CacheBehaviors":{ + "type":"structure", + "required":["Quantity"], + "members":{ + "Quantity":{"shape":"integer"}, + "Items":{"shape":"CacheBehaviorList"} + } + }, + "CachedMethods":{ + "type":"structure", + "required":[ + "Quantity", + "Items" + ], + "members":{ + "Quantity":{"shape":"integer"}, + "Items":{"shape":"MethodsList"} + } + }, + "CertificateSource":{ + "type":"string", + "enum":[ + "cloudfront", + "iam", + "acm" + ] + }, + "CloudFrontOriginAccessIdentity":{ + "type":"structure", + "required":[ + "Id", + "S3CanonicalUserId" + ], + "members":{ + "Id":{"shape":"string"}, + "S3CanonicalUserId":{"shape":"string"}, + "CloudFrontOriginAccessIdentityConfig":{"shape":"CloudFrontOriginAccessIdentityConfig"} + } + }, + "CloudFrontOriginAccessIdentityAlreadyExists":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":409}, + "exception":true + }, + "CloudFrontOriginAccessIdentityConfig":{ + "type":"structure", + "required":[ + "CallerReference", + "Comment" + ], + "members":{ + "CallerReference":{"shape":"string"}, + "Comment":{"shape":"string"} + } + }, + "CloudFrontOriginAccessIdentityInUse":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":409}, + "exception":true + }, + "CloudFrontOriginAccessIdentityList":{ + "type":"structure", + "required":[ + "Marker", + "MaxItems", + "IsTruncated", + "Quantity" + ], + "members":{ + "Marker":{"shape":"string"}, + "NextMarker":{"shape":"string"}, + "MaxItems":{"shape":"integer"}, + "IsTruncated":{"shape":"boolean"}, + "Quantity":{"shape":"integer"}, + "Items":{"shape":"CloudFrontOriginAccessIdentitySummaryList"} + } + }, + "CloudFrontOriginAccessIdentitySummary":{ + "type":"structure", + "required":[ + "Id", + "S3CanonicalUserId", + "Comment" + ], + "members":{ + "Id":{"shape":"string"}, + "S3CanonicalUserId":{"shape":"string"}, + "Comment":{"shape":"string"} + } + }, + "CloudFrontOriginAccessIdentitySummaryList":{ + "type":"list", + "member":{ + "shape":"CloudFrontOriginAccessIdentitySummary", + "locationName":"CloudFrontOriginAccessIdentitySummary" + } + }, + "CookieNameList":{ + "type":"list", + "member":{ + "shape":"string", + "locationName":"Name" + } + }, + "CookieNames":{ + "type":"structure", + "required":["Quantity"], + "members":{ + "Quantity":{"shape":"integer"}, + "Items":{"shape":"CookieNameList"} + } + }, + "CookiePreference":{ + "type":"structure", + "required":["Forward"], + "members":{ + "Forward":{"shape":"ItemSelection"}, + "WhitelistedNames":{"shape":"CookieNames"} + } + }, + "CreateCloudFrontOriginAccessIdentityRequest":{ + "type":"structure", + "required":["CloudFrontOriginAccessIdentityConfig"], + "members":{ + "CloudFrontOriginAccessIdentityConfig":{ + "shape":"CloudFrontOriginAccessIdentityConfig", + "locationName":"CloudFrontOriginAccessIdentityConfig", + "xmlNamespace":{"uri":"http://cloudfront.amazonaws.com/doc/2016-01-28/"} + } + }, + "payload":"CloudFrontOriginAccessIdentityConfig" + }, + "CreateCloudFrontOriginAccessIdentityResult":{ + "type":"structure", + "members":{ + "CloudFrontOriginAccessIdentity":{"shape":"CloudFrontOriginAccessIdentity"}, + "Location":{ + "shape":"string", + "location":"header", + "locationName":"Location" + }, + "ETag":{ + "shape":"string", + "location":"header", + "locationName":"ETag" + } + }, + "payload":"CloudFrontOriginAccessIdentity" + }, + "CreateDistributionRequest":{ + "type":"structure", + "required":["DistributionConfig"], + "members":{ + "DistributionConfig":{ + "shape":"DistributionConfig", + "locationName":"DistributionConfig", + "xmlNamespace":{"uri":"http://cloudfront.amazonaws.com/doc/2016-01-28/"} + } + }, + "payload":"DistributionConfig" + }, + "CreateDistributionResult":{ + "type":"structure", + "members":{ + "Distribution":{"shape":"Distribution"}, + "Location":{ + "shape":"string", + "location":"header", + "locationName":"Location" + }, + "ETag":{ + "shape":"string", + "location":"header", + "locationName":"ETag" + } + }, + "payload":"Distribution" + }, + "CreateInvalidationRequest":{ + "type":"structure", + "required":[ + "DistributionId", + "InvalidationBatch" + ], + "members":{ + "DistributionId":{ + "shape":"string", + "location":"uri", + "locationName":"DistributionId" + }, + "InvalidationBatch":{ + "shape":"InvalidationBatch", + "locationName":"InvalidationBatch", + "xmlNamespace":{"uri":"http://cloudfront.amazonaws.com/doc/2016-01-28/"} + } + }, + "payload":"InvalidationBatch" + }, + "CreateInvalidationResult":{ + "type":"structure", + "members":{ + "Location":{ + "shape":"string", + "location":"header", + "locationName":"Location" + }, + "Invalidation":{"shape":"Invalidation"} + }, + "payload":"Invalidation" + }, + "CreateStreamingDistributionRequest":{ + "type":"structure", + "required":["StreamingDistributionConfig"], + "members":{ + "StreamingDistributionConfig":{ + "shape":"StreamingDistributionConfig", + "locationName":"StreamingDistributionConfig", + "xmlNamespace":{"uri":"http://cloudfront.amazonaws.com/doc/2016-01-28/"} + } + }, + "payload":"StreamingDistributionConfig" + }, + "CreateStreamingDistributionResult":{ + "type":"structure", + "members":{ + "StreamingDistribution":{"shape":"StreamingDistribution"}, + "Location":{ + "shape":"string", + "location":"header", + "locationName":"Location" + }, + "ETag":{ + "shape":"string", + "location":"header", + "locationName":"ETag" + } + }, + "payload":"StreamingDistribution" + }, + "CustomErrorResponse":{ + "type":"structure", + "required":["ErrorCode"], + "members":{ + "ErrorCode":{"shape":"integer"}, + "ResponsePagePath":{"shape":"string"}, + "ResponseCode":{"shape":"string"}, + "ErrorCachingMinTTL":{"shape":"long"} + } + }, + "CustomErrorResponseList":{ + "type":"list", + "member":{ + "shape":"CustomErrorResponse", + "locationName":"CustomErrorResponse" + } + }, + "CustomErrorResponses":{ + "type":"structure", + "required":["Quantity"], + "members":{ + "Quantity":{"shape":"integer"}, + "Items":{"shape":"CustomErrorResponseList"} + } + }, + "CustomHeaders":{ + "type":"structure", + "required":["Quantity"], + "members":{ + "Quantity":{"shape":"integer"}, + "Items":{"shape":"OriginCustomHeadersList"} + } + }, + "CustomOriginConfig":{ + "type":"structure", + "required":[ + "HTTPPort", + "HTTPSPort", + "OriginProtocolPolicy" + ], + "members":{ + "HTTPPort":{"shape":"integer"}, + "HTTPSPort":{"shape":"integer"}, + "OriginProtocolPolicy":{"shape":"OriginProtocolPolicy"}, + "OriginSslProtocols":{"shape":"OriginSslProtocols"} + } + }, + "DefaultCacheBehavior":{ + "type":"structure", + "required":[ + "TargetOriginId", + "ForwardedValues", + "TrustedSigners", + "ViewerProtocolPolicy", + "MinTTL" + ], + "members":{ + "TargetOriginId":{"shape":"string"}, + "ForwardedValues":{"shape":"ForwardedValues"}, + "TrustedSigners":{"shape":"TrustedSigners"}, + "ViewerProtocolPolicy":{"shape":"ViewerProtocolPolicy"}, + "MinTTL":{"shape":"long"}, + "AllowedMethods":{"shape":"AllowedMethods"}, + "SmoothStreaming":{"shape":"boolean"}, + "DefaultTTL":{"shape":"long"}, + "MaxTTL":{"shape":"long"}, + "Compress":{"shape":"boolean"} + } + }, + "DeleteCloudFrontOriginAccessIdentityRequest":{ + "type":"structure", + "required":["Id"], + "members":{ + "Id":{ + "shape":"string", + "location":"uri", + "locationName":"Id" + }, + "IfMatch":{ + "shape":"string", + "location":"header", + "locationName":"If-Match" + } + } + }, + "DeleteDistributionRequest":{ + "type":"structure", + "required":["Id"], + "members":{ + "Id":{ + "shape":"string", + "location":"uri", + "locationName":"Id" + }, + "IfMatch":{ + "shape":"string", + "location":"header", + "locationName":"If-Match" + } + } + }, + "DeleteStreamingDistributionRequest":{ + "type":"structure", + "required":["Id"], + "members":{ + "Id":{ + "shape":"string", + "location":"uri", + "locationName":"Id" + }, + "IfMatch":{ + "shape":"string", + "location":"header", + "locationName":"If-Match" + } + } + }, + "Distribution":{ + "type":"structure", + "required":[ + "Id", + "Status", + "LastModifiedTime", + "InProgressInvalidationBatches", + "DomainName", + "ActiveTrustedSigners", + "DistributionConfig" + ], + "members":{ + "Id":{"shape":"string"}, + "Status":{"shape":"string"}, + "LastModifiedTime":{"shape":"timestamp"}, + "InProgressInvalidationBatches":{"shape":"integer"}, + "DomainName":{"shape":"string"}, + "ActiveTrustedSigners":{"shape":"ActiveTrustedSigners"}, + "DistributionConfig":{"shape":"DistributionConfig"} + } + }, + "DistributionAlreadyExists":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":409}, + "exception":true + }, + "DistributionConfig":{ + "type":"structure", + "required":[ + "CallerReference", + "Origins", + "DefaultCacheBehavior", + "Comment", + "Enabled" + ], + "members":{ + "CallerReference":{"shape":"string"}, + "Aliases":{"shape":"Aliases"}, + "DefaultRootObject":{"shape":"string"}, + "Origins":{"shape":"Origins"}, + "DefaultCacheBehavior":{"shape":"DefaultCacheBehavior"}, + "CacheBehaviors":{"shape":"CacheBehaviors"}, + "CustomErrorResponses":{"shape":"CustomErrorResponses"}, + "Comment":{"shape":"string"}, + "Logging":{"shape":"LoggingConfig"}, + "PriceClass":{"shape":"PriceClass"}, + "Enabled":{"shape":"boolean"}, + "ViewerCertificate":{"shape":"ViewerCertificate"}, + "Restrictions":{"shape":"Restrictions"}, + "WebACLId":{"shape":"string"} + } + }, + "DistributionList":{ + "type":"structure", + "required":[ + "Marker", + "MaxItems", + "IsTruncated", + "Quantity" + ], + "members":{ + "Marker":{"shape":"string"}, + "NextMarker":{"shape":"string"}, + "MaxItems":{"shape":"integer"}, + "IsTruncated":{"shape":"boolean"}, + "Quantity":{"shape":"integer"}, + "Items":{"shape":"DistributionSummaryList"} + } + }, + "DistributionNotDisabled":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":409}, + "exception":true + }, + "DistributionSummary":{ + "type":"structure", + "required":[ + "Id", + "Status", + "LastModifiedTime", + "DomainName", + "Aliases", + "Origins", + "DefaultCacheBehavior", + "CacheBehaviors", + "CustomErrorResponses", + "Comment", + "PriceClass", + "Enabled", + "ViewerCertificate", + "Restrictions", + "WebACLId" + ], + "members":{ + "Id":{"shape":"string"}, + "Status":{"shape":"string"}, + "LastModifiedTime":{"shape":"timestamp"}, + "DomainName":{"shape":"string"}, + "Aliases":{"shape":"Aliases"}, + "Origins":{"shape":"Origins"}, + "DefaultCacheBehavior":{"shape":"DefaultCacheBehavior"}, + "CacheBehaviors":{"shape":"CacheBehaviors"}, + "CustomErrorResponses":{"shape":"CustomErrorResponses"}, + "Comment":{"shape":"string"}, + "PriceClass":{"shape":"PriceClass"}, + "Enabled":{"shape":"boolean"}, + "ViewerCertificate":{"shape":"ViewerCertificate"}, + "Restrictions":{"shape":"Restrictions"}, + "WebACLId":{"shape":"string"} + } + }, + "DistributionSummaryList":{ + "type":"list", + "member":{ + "shape":"DistributionSummary", + "locationName":"DistributionSummary" + } + }, + "ForwardedValues":{ + "type":"structure", + "required":[ + "QueryString", + "Cookies" + ], + "members":{ + "QueryString":{"shape":"boolean"}, + "Cookies":{"shape":"CookiePreference"}, + "Headers":{"shape":"Headers"} + } + }, + "GeoRestriction":{ + "type":"structure", + "required":[ + "RestrictionType", + "Quantity" + ], + "members":{ + "RestrictionType":{"shape":"GeoRestrictionType"}, + "Quantity":{"shape":"integer"}, + "Items":{"shape":"LocationList"} + } + }, + "GeoRestrictionType":{ + "type":"string", + "enum":[ + "blacklist", + "whitelist", + "none" + ] + }, + "GetCloudFrontOriginAccessIdentityConfigRequest":{ + "type":"structure", + "required":["Id"], + "members":{ + "Id":{ + "shape":"string", + "location":"uri", + "locationName":"Id" + } + } + }, + "GetCloudFrontOriginAccessIdentityConfigResult":{ + "type":"structure", + "members":{ + "CloudFrontOriginAccessIdentityConfig":{"shape":"CloudFrontOriginAccessIdentityConfig"}, + "ETag":{ + "shape":"string", + "location":"header", + "locationName":"ETag" + } + }, + "payload":"CloudFrontOriginAccessIdentityConfig" + }, + "GetCloudFrontOriginAccessIdentityRequest":{ + "type":"structure", + "required":["Id"], + "members":{ + "Id":{ + "shape":"string", + "location":"uri", + "locationName":"Id" + } + } + }, + "GetCloudFrontOriginAccessIdentityResult":{ + "type":"structure", + "members":{ + "CloudFrontOriginAccessIdentity":{"shape":"CloudFrontOriginAccessIdentity"}, + "ETag":{ + "shape":"string", + "location":"header", + "locationName":"ETag" + } + }, + "payload":"CloudFrontOriginAccessIdentity" + }, + "GetDistributionConfigRequest":{ + "type":"structure", + "required":["Id"], + "members":{ + "Id":{ + "shape":"string", + "location":"uri", + "locationName":"Id" + } + } + }, + "GetDistributionConfigResult":{ + "type":"structure", + "members":{ + "DistributionConfig":{"shape":"DistributionConfig"}, + "ETag":{ + "shape":"string", + "location":"header", + "locationName":"ETag" + } + }, + "payload":"DistributionConfig" + }, + "GetDistributionRequest":{ + "type":"structure", + "required":["Id"], + "members":{ + "Id":{ + "shape":"string", + "location":"uri", + "locationName":"Id" + } + } + }, + "GetDistributionResult":{ + "type":"structure", + "members":{ + "Distribution":{"shape":"Distribution"}, + "ETag":{ + "shape":"string", + "location":"header", + "locationName":"ETag" + } + }, + "payload":"Distribution" + }, + "GetInvalidationRequest":{ + "type":"structure", + "required":[ + "DistributionId", + "Id" + ], + "members":{ + "DistributionId":{ + "shape":"string", + "location":"uri", + "locationName":"DistributionId" + }, + "Id":{ + "shape":"string", + "location":"uri", + "locationName":"Id" + } + } + }, + "GetInvalidationResult":{ + "type":"structure", + "members":{ + "Invalidation":{"shape":"Invalidation"} + }, + "payload":"Invalidation" + }, + "GetStreamingDistributionConfigRequest":{ + "type":"structure", + "required":["Id"], + "members":{ + "Id":{ + "shape":"string", + "location":"uri", + "locationName":"Id" + } + } + }, + "GetStreamingDistributionConfigResult":{ + "type":"structure", + "members":{ + "StreamingDistributionConfig":{"shape":"StreamingDistributionConfig"}, + "ETag":{ + "shape":"string", + "location":"header", + "locationName":"ETag" + } + }, + "payload":"StreamingDistributionConfig" + }, + "GetStreamingDistributionRequest":{ + "type":"structure", + "required":["Id"], + "members":{ + "Id":{ + "shape":"string", + "location":"uri", + "locationName":"Id" + } + } + }, + "GetStreamingDistributionResult":{ + "type":"structure", + "members":{ + "StreamingDistribution":{"shape":"StreamingDistribution"}, + "ETag":{ + "shape":"string", + "location":"header", + "locationName":"ETag" + } + }, + "payload":"StreamingDistribution" + }, + "HeaderList":{ + "type":"list", + "member":{ + "shape":"string", + "locationName":"Name" + } + }, + "Headers":{ + "type":"structure", + "required":["Quantity"], + "members":{ + "Quantity":{"shape":"integer"}, + "Items":{"shape":"HeaderList"} + } + }, + "IllegalUpdate":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InconsistentQuantities":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidArgument":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidDefaultRootObject":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidErrorCode":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidForwardCookies":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidGeoRestrictionParameter":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidHeadersForS3Origin":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidIfMatchVersion":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidLocationCode":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidMinimumProtocolVersion":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidOrigin":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidOriginAccessIdentity":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidProtocolSettings":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidRelativePath":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidRequiredProtocol":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidResponseCode":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidTTLOrder":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidViewerCertificate":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidWebACLId":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "Invalidation":{ + "type":"structure", + "required":[ + "Id", + "Status", + "CreateTime", + "InvalidationBatch" + ], + "members":{ + "Id":{"shape":"string"}, + "Status":{"shape":"string"}, + "CreateTime":{"shape":"timestamp"}, + "InvalidationBatch":{"shape":"InvalidationBatch"} + } + }, + "InvalidationBatch":{ + "type":"structure", + "required":[ + "Paths", + "CallerReference" + ], + "members":{ + "Paths":{"shape":"Paths"}, + "CallerReference":{"shape":"string"} + } + }, + "InvalidationList":{ + "type":"structure", + "required":[ + "Marker", + "MaxItems", + "IsTruncated", + "Quantity" + ], + "members":{ + "Marker":{"shape":"string"}, + "NextMarker":{"shape":"string"}, + "MaxItems":{"shape":"integer"}, + "IsTruncated":{"shape":"boolean"}, + "Quantity":{"shape":"integer"}, + "Items":{"shape":"InvalidationSummaryList"} + } + }, + "InvalidationSummary":{ + "type":"structure", + "required":[ + "Id", + "CreateTime", + "Status" + ], + "members":{ + "Id":{"shape":"string"}, + "CreateTime":{"shape":"timestamp"}, + "Status":{"shape":"string"} + } + }, + "InvalidationSummaryList":{ + "type":"list", + "member":{ + "shape":"InvalidationSummary", + "locationName":"InvalidationSummary" + } + }, + "ItemSelection":{ + "type":"string", + "enum":[ + "none", + "whitelist", + "all" + ] + }, + "KeyPairIdList":{ + "type":"list", + "member":{ + "shape":"string", + "locationName":"KeyPairId" + } + }, + "KeyPairIds":{ + "type":"structure", + "required":["Quantity"], + "members":{ + "Quantity":{"shape":"integer"}, + "Items":{"shape":"KeyPairIdList"} + } + }, + "ListCloudFrontOriginAccessIdentitiesRequest":{ + "type":"structure", + "members":{ + "Marker":{ + "shape":"string", + "location":"querystring", + "locationName":"Marker" + }, + "MaxItems":{ + "shape":"string", + "location":"querystring", + "locationName":"MaxItems" + } + } + }, + "ListCloudFrontOriginAccessIdentitiesResult":{ + "type":"structure", + "members":{ + "CloudFrontOriginAccessIdentityList":{"shape":"CloudFrontOriginAccessIdentityList"} + }, + "payload":"CloudFrontOriginAccessIdentityList" + }, + "ListDistributionsByWebACLIdRequest":{ + "type":"structure", + "required":["WebACLId"], + "members":{ + "Marker":{ + "shape":"string", + "location":"querystring", + "locationName":"Marker" + }, + "MaxItems":{ + "shape":"string", + "location":"querystring", + "locationName":"MaxItems" + }, + "WebACLId":{ + "shape":"string", + "location":"uri", + "locationName":"WebACLId" + } + } + }, + "ListDistributionsByWebACLIdResult":{ + "type":"structure", + "members":{ + "DistributionList":{"shape":"DistributionList"} + }, + "payload":"DistributionList" + }, + "ListDistributionsRequest":{ + "type":"structure", + "members":{ + "Marker":{ + "shape":"string", + "location":"querystring", + "locationName":"Marker" + }, + "MaxItems":{ + "shape":"string", + "location":"querystring", + "locationName":"MaxItems" + } + } + }, + "ListDistributionsResult":{ + "type":"structure", + "members":{ + "DistributionList":{"shape":"DistributionList"} + }, + "payload":"DistributionList" + }, + "ListInvalidationsRequest":{ + "type":"structure", + "required":["DistributionId"], + "members":{ + "DistributionId":{ + "shape":"string", + "location":"uri", + "locationName":"DistributionId" + }, + "Marker":{ + "shape":"string", + "location":"querystring", + "locationName":"Marker" + }, + "MaxItems":{ + "shape":"string", + "location":"querystring", + "locationName":"MaxItems" + } + } + }, + "ListInvalidationsResult":{ + "type":"structure", + "members":{ + "InvalidationList":{"shape":"InvalidationList"} + }, + "payload":"InvalidationList" + }, + "ListStreamingDistributionsRequest":{ + "type":"structure", + "members":{ + "Marker":{ + "shape":"string", + "location":"querystring", + "locationName":"Marker" + }, + "MaxItems":{ + "shape":"string", + "location":"querystring", + "locationName":"MaxItems" + } + } + }, + "ListStreamingDistributionsResult":{ + "type":"structure", + "members":{ + "StreamingDistributionList":{"shape":"StreamingDistributionList"} + }, + "payload":"StreamingDistributionList" + }, + "LocationList":{ + "type":"list", + "member":{ + "shape":"string", + "locationName":"Location" + } + }, + "LoggingConfig":{ + "type":"structure", + "required":[ + "Enabled", + "IncludeCookies", + "Bucket", + "Prefix" + ], + "members":{ + "Enabled":{"shape":"boolean"}, + "IncludeCookies":{"shape":"boolean"}, + "Bucket":{"shape":"string"}, + "Prefix":{"shape":"string"} + } + }, + "Method":{ + "type":"string", + "enum":[ + "GET", + "HEAD", + "POST", + "PUT", + "PATCH", + "OPTIONS", + "DELETE" + ] + }, + "MethodsList":{ + "type":"list", + "member":{ + "shape":"Method", + "locationName":"Method" + } + }, + "MinimumProtocolVersion":{ + "type":"string", + "enum":[ + "SSLv3", + "TLSv1" + ] + }, + "MissingBody":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "NoSuchCloudFrontOriginAccessIdentity":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":404}, + "exception":true + }, + "NoSuchDistribution":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":404}, + "exception":true + }, + "NoSuchInvalidation":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":404}, + "exception":true + }, + "NoSuchOrigin":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":404}, + "exception":true + }, + "NoSuchStreamingDistribution":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":404}, + "exception":true + }, + "Origin":{ + "type":"structure", + "required":[ + "Id", + "DomainName" + ], + "members":{ + "Id":{"shape":"string"}, + "DomainName":{"shape":"string"}, + "OriginPath":{"shape":"string"}, + "CustomHeaders":{"shape":"CustomHeaders"}, + "S3OriginConfig":{"shape":"S3OriginConfig"}, + "CustomOriginConfig":{"shape":"CustomOriginConfig"} + } + }, + "OriginCustomHeader":{ + "type":"structure", + "required":[ + "HeaderName", + "HeaderValue" + ], + "members":{ + "HeaderName":{"shape":"string"}, + "HeaderValue":{"shape":"string"} + } + }, + "OriginCustomHeadersList":{ + "type":"list", + "member":{ + "shape":"OriginCustomHeader", + "locationName":"OriginCustomHeader" + } + }, + "OriginList":{ + "type":"list", + "member":{ + "shape":"Origin", + "locationName":"Origin" + }, + "min":1 + }, + "OriginProtocolPolicy":{ + "type":"string", + "enum":[ + "http-only", + "match-viewer", + "https-only" + ] + }, + "OriginSslProtocols":{ + "type":"structure", + "required":[ + "Quantity", + "Items" + ], + "members":{ + "Quantity":{"shape":"integer"}, + "Items":{"shape":"SslProtocolsList"} + } + }, + "Origins":{ + "type":"structure", + "required":["Quantity"], + "members":{ + "Quantity":{"shape":"integer"}, + "Items":{"shape":"OriginList"} + } + }, + "PathList":{ + "type":"list", + "member":{ + "shape":"string", + "locationName":"Path" + } + }, + "Paths":{ + "type":"structure", + "required":["Quantity"], + "members":{ + "Quantity":{"shape":"integer"}, + "Items":{"shape":"PathList"} + } + }, + "PreconditionFailed":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":412}, + "exception":true + }, + "PriceClass":{ + "type":"string", + "enum":[ + "PriceClass_100", + "PriceClass_200", + "PriceClass_All" + ] + }, + "Restrictions":{ + "type":"structure", + "required":["GeoRestriction"], + "members":{ + "GeoRestriction":{"shape":"GeoRestriction"} + } + }, + "S3Origin":{ + "type":"structure", + "required":[ + "DomainName", + "OriginAccessIdentity" + ], + "members":{ + "DomainName":{"shape":"string"}, + "OriginAccessIdentity":{"shape":"string"} + } + }, + "S3OriginConfig":{ + "type":"structure", + "required":["OriginAccessIdentity"], + "members":{ + "OriginAccessIdentity":{"shape":"string"} + } + }, + "SSLSupportMethod":{ + "type":"string", + "enum":[ + "sni-only", + "vip" + ] + }, + "Signer":{ + "type":"structure", + "members":{ + "AwsAccountNumber":{"shape":"string"}, + "KeyPairIds":{"shape":"KeyPairIds"} + } + }, + "SignerList":{ + "type":"list", + "member":{ + "shape":"Signer", + "locationName":"Signer" + } + }, + "SslProtocol":{ + "type":"string", + "enum":[ + "SSLv3", + "TLSv1", + "TLSv1.1", + "TLSv1.2" + ] + }, + "SslProtocolsList":{ + "type":"list", + "member":{ + "shape":"SslProtocol", + "locationName":"SslProtocol" + } + }, + "StreamingDistribution":{ + "type":"structure", + "required":[ + "Id", + "Status", + "DomainName", + "ActiveTrustedSigners", + "StreamingDistributionConfig" + ], + "members":{ + "Id":{"shape":"string"}, + "Status":{"shape":"string"}, + "LastModifiedTime":{"shape":"timestamp"}, + "DomainName":{"shape":"string"}, + "ActiveTrustedSigners":{"shape":"ActiveTrustedSigners"}, + "StreamingDistributionConfig":{"shape":"StreamingDistributionConfig"} + } + }, + "StreamingDistributionAlreadyExists":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":409}, + "exception":true + }, + "StreamingDistributionConfig":{ + "type":"structure", + "required":[ + "CallerReference", + "S3Origin", + "Comment", + "TrustedSigners", + "Enabled" + ], + "members":{ + "CallerReference":{"shape":"string"}, + "S3Origin":{"shape":"S3Origin"}, + "Aliases":{"shape":"Aliases"}, + "Comment":{"shape":"string"}, + "Logging":{"shape":"StreamingLoggingConfig"}, + "TrustedSigners":{"shape":"TrustedSigners"}, + "PriceClass":{"shape":"PriceClass"}, + "Enabled":{"shape":"boolean"} + } + }, + "StreamingDistributionList":{ + "type":"structure", + "required":[ + "Marker", + "MaxItems", + "IsTruncated", + "Quantity" + ], + "members":{ + "Marker":{"shape":"string"}, + "NextMarker":{"shape":"string"}, + "MaxItems":{"shape":"integer"}, + "IsTruncated":{"shape":"boolean"}, + "Quantity":{"shape":"integer"}, + "Items":{"shape":"StreamingDistributionSummaryList"} + } + }, + "StreamingDistributionNotDisabled":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":409}, + "exception":true + }, + "StreamingDistributionSummary":{ + "type":"structure", + "required":[ + "Id", + "Status", + "LastModifiedTime", + "DomainName", + "S3Origin", + "Aliases", + "TrustedSigners", + "Comment", + "PriceClass", + "Enabled" + ], + "members":{ + "Id":{"shape":"string"}, + "Status":{"shape":"string"}, + "LastModifiedTime":{"shape":"timestamp"}, + "DomainName":{"shape":"string"}, + "S3Origin":{"shape":"S3Origin"}, + "Aliases":{"shape":"Aliases"}, + "TrustedSigners":{"shape":"TrustedSigners"}, + "Comment":{"shape":"string"}, + "PriceClass":{"shape":"PriceClass"}, + "Enabled":{"shape":"boolean"} + } + }, + "StreamingDistributionSummaryList":{ + "type":"list", + "member":{ + "shape":"StreamingDistributionSummary", + "locationName":"StreamingDistributionSummary" + } + }, + "StreamingLoggingConfig":{ + "type":"structure", + "required":[ + "Enabled", + "Bucket", + "Prefix" + ], + "members":{ + "Enabled":{"shape":"boolean"}, + "Bucket":{"shape":"string"}, + "Prefix":{"shape":"string"} + } + }, + "TooManyCacheBehaviors":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "TooManyCertificates":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "TooManyCloudFrontOriginAccessIdentities":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "TooManyCookieNamesInWhiteList":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "TooManyDistributionCNAMEs":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "TooManyDistributions":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "TooManyHeadersInForwardedValues":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "TooManyInvalidationsInProgress":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "TooManyOriginCustomHeaders":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "TooManyOrigins":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "TooManyStreamingDistributionCNAMEs":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "TooManyStreamingDistributions":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "TooManyTrustedSigners":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "TrustedSignerDoesNotExist":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "TrustedSigners":{ + "type":"structure", + "required":[ + "Enabled", + "Quantity" + ], + "members":{ + "Enabled":{"shape":"boolean"}, + "Quantity":{"shape":"integer"}, + "Items":{"shape":"AwsAccountNumberList"} + } + }, + "UpdateCloudFrontOriginAccessIdentityRequest":{ + "type":"structure", + "required":[ + "CloudFrontOriginAccessIdentityConfig", + "Id" + ], + "members":{ + "CloudFrontOriginAccessIdentityConfig":{ + "shape":"CloudFrontOriginAccessIdentityConfig", + "locationName":"CloudFrontOriginAccessIdentityConfig", + "xmlNamespace":{"uri":"http://cloudfront.amazonaws.com/doc/2016-01-28/"} + }, + "Id":{ + "shape":"string", + "location":"uri", + "locationName":"Id" + }, + "IfMatch":{ + "shape":"string", + "location":"header", + "locationName":"If-Match" + } + }, + "payload":"CloudFrontOriginAccessIdentityConfig" + }, + "UpdateCloudFrontOriginAccessIdentityResult":{ + "type":"structure", + "members":{ + "CloudFrontOriginAccessIdentity":{"shape":"CloudFrontOriginAccessIdentity"}, + "ETag":{ + "shape":"string", + "location":"header", + "locationName":"ETag" + } + }, + "payload":"CloudFrontOriginAccessIdentity" + }, + "UpdateDistributionRequest":{ + "type":"structure", + "required":[ + "DistributionConfig", + "Id" + ], + "members":{ + "DistributionConfig":{ + "shape":"DistributionConfig", + "locationName":"DistributionConfig", + "xmlNamespace":{"uri":"http://cloudfront.amazonaws.com/doc/2016-01-28/"} + }, + "Id":{ + "shape":"string", + "location":"uri", + "locationName":"Id" + }, + "IfMatch":{ + "shape":"string", + "location":"header", + "locationName":"If-Match" + } + }, + "payload":"DistributionConfig" + }, + "UpdateDistributionResult":{ + "type":"structure", + "members":{ + "Distribution":{"shape":"Distribution"}, + "ETag":{ + "shape":"string", + "location":"header", + "locationName":"ETag" + } + }, + "payload":"Distribution" + }, + "UpdateStreamingDistributionRequest":{ + "type":"structure", + "required":[ + "StreamingDistributionConfig", + "Id" + ], + "members":{ + "StreamingDistributionConfig":{ + "shape":"StreamingDistributionConfig", + "locationName":"StreamingDistributionConfig", + "xmlNamespace":{"uri":"http://cloudfront.amazonaws.com/doc/2016-01-28/"} + }, + "Id":{ + "shape":"string", + "location":"uri", + "locationName":"Id" + }, + "IfMatch":{ + "shape":"string", + "location":"header", + "locationName":"If-Match" + } + }, + "payload":"StreamingDistributionConfig" + }, + "UpdateStreamingDistributionResult":{ + "type":"structure", + "members":{ + "StreamingDistribution":{"shape":"StreamingDistribution"}, + "ETag":{ + "shape":"string", + "location":"header", + "locationName":"ETag" + } + }, + "payload":"StreamingDistribution" + }, + "ViewerCertificate":{ + "type":"structure", + "members":{ + "CloudFrontDefaultCertificate":{"shape":"boolean"}, + "IAMCertificateId":{"shape":"string"}, + "ACMCertificateArn":{"shape":"string"}, + "SSLSupportMethod":{"shape":"SSLSupportMethod"}, + "MinimumProtocolVersion":{"shape":"MinimumProtocolVersion"}, + "Certificate":{ + "shape":"string", + "deprecated":true + }, + "CertificateSource":{ + "shape":"CertificateSource", + "deprecated":true + } + } + }, + "ViewerProtocolPolicy":{ + "type":"string", + "enum":[ + "allow-all", + "https-only", + "redirect-to-https" + ] + }, + "boolean":{"type":"boolean"}, + "integer":{"type":"integer"}, + "long":{"type":"long"}, + "string":{"type":"string"}, + "timestamp":{"type":"timestamp"} + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/cloudfront/2016-01-28/docs-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/cloudfront/2016-01-28/docs-2.json new file mode 100644 index 000000000..8174625b4 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/cloudfront/2016-01-28/docs-2.json @@ -0,0 +1,1220 @@ +{ + "version": "2.0", + "service": null, + "operations": { + "CreateCloudFrontOriginAccessIdentity": "Create a new origin access identity.", + "CreateDistribution": "Create a new distribution.", + "CreateInvalidation": "Create a new invalidation.", + "CreateStreamingDistribution": "Create a new streaming distribution.", + "DeleteCloudFrontOriginAccessIdentity": "Delete an origin access identity.", + "DeleteDistribution": "Delete a distribution.", + "DeleteStreamingDistribution": "Delete a streaming distribution.", + "GetCloudFrontOriginAccessIdentity": "Get the information about an origin access identity.", + "GetCloudFrontOriginAccessIdentityConfig": "Get the configuration information about an origin access identity.", + "GetDistribution": "Get the information about a distribution.", + "GetDistributionConfig": "Get the configuration information about a distribution.", + "GetInvalidation": "Get the information about an invalidation.", + "GetStreamingDistribution": "Get the information about a streaming distribution.", + "GetStreamingDistributionConfig": "Get the configuration information about a streaming distribution.", + "ListCloudFrontOriginAccessIdentities": "List origin access identities.", + "ListDistributions": "List distributions.", + "ListDistributionsByWebACLId": "List the distributions that are associated with a specified AWS WAF web ACL.", + "ListInvalidations": "List invalidation batches.", + "ListStreamingDistributions": "List streaming distributions.", + "UpdateCloudFrontOriginAccessIdentity": "Update an origin access identity.", + "UpdateDistribution": "Update a distribution.", + "UpdateStreamingDistribution": "Update a streaming distribution." + }, + "shapes": { + "AccessDenied": { + "base": "Access denied.", + "refs": { + } + }, + "ActiveTrustedSigners": { + "base": "A complex type that lists the AWS accounts, if any, that you included in the TrustedSigners complex type for the default cache behavior or for any of the other cache behaviors for this distribution. These are accounts that you want to allow to create signed URLs for private content.", + "refs": { + "Distribution$ActiveTrustedSigners": "CloudFront automatically adds this element to the response only if you've set up the distribution to serve private content with signed URLs. The element lists the key pair IDs that CloudFront is aware of for each trusted signer. The Signer child element lists the AWS account number of the trusted signer (or an empty Self element if the signer is you). The Signer element also includes the IDs of any active key pairs associated with the trusted signer's AWS account. If no KeyPairId element appears for a Signer, that signer can't create working signed URLs.", + "StreamingDistribution$ActiveTrustedSigners": "CloudFront automatically adds this element to the response only if you've set up the distribution to serve private content with signed URLs. The element lists the key pair IDs that CloudFront is aware of for each trusted signer. The Signer child element lists the AWS account number of the trusted signer (or an empty Self element if the signer is you). The Signer element also includes the IDs of any active key pairs associated with the trusted signer's AWS account. If no KeyPairId element appears for a Signer, that signer can't create working signed URLs." + } + }, + "AliasList": { + "base": null, + "refs": { + "Aliases$Items": "Optional: A complex type that contains CNAME elements, if any, for this distribution. If Quantity is 0, you can omit Items." + } + }, + "Aliases": { + "base": "A complex type that contains information about CNAMEs (alternate domain names), if any, for this distribution.", + "refs": { + "DistributionConfig$Aliases": "A complex type that contains information about CNAMEs (alternate domain names), if any, for this distribution.", + "DistributionSummary$Aliases": "A complex type that contains information about CNAMEs (alternate domain names), if any, for this distribution.", + "StreamingDistributionConfig$Aliases": "A complex type that contains information about CNAMEs (alternate domain names), if any, for this streaming distribution.", + "StreamingDistributionSummary$Aliases": "A complex type that contains information about CNAMEs (alternate domain names), if any, for this streaming distribution." + } + }, + "AllowedMethods": { + "base": "A complex type that controls which HTTP methods CloudFront processes and forwards to your Amazon S3 bucket or your custom origin. There are three choices: - CloudFront forwards only GET and HEAD requests. - CloudFront forwards only GET, HEAD and OPTIONS requests. - CloudFront forwards GET, HEAD, OPTIONS, PUT, PATCH, POST, and DELETE requests. If you pick the third choice, you may need to restrict access to your Amazon S3 bucket or to your custom origin so users can't perform operations that you don't want them to. For example, you may not want users to have permission to delete objects from your origin.", + "refs": { + "CacheBehavior$AllowedMethods": null, + "DefaultCacheBehavior$AllowedMethods": null + } + }, + "AwsAccountNumberList": { + "base": null, + "refs": { + "TrustedSigners$Items": "Optional: A complex type that contains trusted signers for this cache behavior. If Quantity is 0, you can omit Items." + } + }, + "BatchTooLarge": { + "base": null, + "refs": { + } + }, + "CNAMEAlreadyExists": { + "base": null, + "refs": { + } + }, + "CacheBehavior": { + "base": "A complex type that describes how CloudFront processes requests. You can create up to 10 cache behaviors.You must create at least as many cache behaviors (including the default cache behavior) as you have origins if you want CloudFront to distribute objects from all of the origins. Each cache behavior specifies the one origin from which you want CloudFront to get objects. If you have two origins and only the default cache behavior, the default cache behavior will cause CloudFront to get objects from one of the origins, but the other origin will never be used. If you don't want to specify any cache behaviors, include only an empty CacheBehaviors element. Don't include an empty CacheBehavior element, or CloudFront returns a MalformedXML error. To delete all cache behaviors in an existing distribution, update the distribution configuration and include only an empty CacheBehaviors element. To add, change, or remove one or more cache behaviors, update the distribution configuration and specify all of the cache behaviors that you want to include in the updated distribution.", + "refs": { + "CacheBehaviorList$member": null + } + }, + "CacheBehaviorList": { + "base": null, + "refs": { + "CacheBehaviors$Items": "Optional: A complex type that contains cache behaviors for this distribution. If Quantity is 0, you can omit Items." + } + }, + "CacheBehaviors": { + "base": "A complex type that contains zero or more CacheBehavior elements.", + "refs": { + "DistributionConfig$CacheBehaviors": "A complex type that contains zero or more CacheBehavior elements.", + "DistributionSummary$CacheBehaviors": "A complex type that contains zero or more CacheBehavior elements." + } + }, + "CachedMethods": { + "base": "A complex type that controls whether CloudFront caches the response to requests using the specified HTTP methods. There are two choices: - CloudFront caches responses to GET and HEAD requests. - CloudFront caches responses to GET, HEAD, and OPTIONS requests. If you pick the second choice for your S3 Origin, you may need to forward Access-Control-Request-Method, Access-Control-Request-Headers and Origin headers for the responses to be cached correctly.", + "refs": { + "AllowedMethods$CachedMethods": null + } + }, + "CertificateSource": { + "base": null, + "refs": { + "ViewerCertificate$CertificateSource": "Note: this field is deprecated. Please use one of [ACMCertificateArn, IAMCertificateId, CloudFrontDefaultCertificate]." + } + }, + "CloudFrontOriginAccessIdentity": { + "base": "CloudFront origin access identity.", + "refs": { + "CreateCloudFrontOriginAccessIdentityResult$CloudFrontOriginAccessIdentity": "The origin access identity's information.", + "GetCloudFrontOriginAccessIdentityResult$CloudFrontOriginAccessIdentity": "The origin access identity's information.", + "UpdateCloudFrontOriginAccessIdentityResult$CloudFrontOriginAccessIdentity": "The origin access identity's information." + } + }, + "CloudFrontOriginAccessIdentityAlreadyExists": { + "base": "If the CallerReference is a value you already sent in a previous request to create an identity but the content of the CloudFrontOriginAccessIdentityConfig is different from the original request, CloudFront returns a CloudFrontOriginAccessIdentityAlreadyExists error.", + "refs": { + } + }, + "CloudFrontOriginAccessIdentityConfig": { + "base": "Origin access identity configuration.", + "refs": { + "CloudFrontOriginAccessIdentity$CloudFrontOriginAccessIdentityConfig": "The current configuration information for the identity.", + "CreateCloudFrontOriginAccessIdentityRequest$CloudFrontOriginAccessIdentityConfig": "The origin access identity's configuration information.", + "GetCloudFrontOriginAccessIdentityConfigResult$CloudFrontOriginAccessIdentityConfig": "The origin access identity's configuration information.", + "UpdateCloudFrontOriginAccessIdentityRequest$CloudFrontOriginAccessIdentityConfig": "The identity's configuration information." + } + }, + "CloudFrontOriginAccessIdentityInUse": { + "base": null, + "refs": { + } + }, + "CloudFrontOriginAccessIdentityList": { + "base": "The CloudFrontOriginAccessIdentityList type.", + "refs": { + "ListCloudFrontOriginAccessIdentitiesResult$CloudFrontOriginAccessIdentityList": "The CloudFrontOriginAccessIdentityList type." + } + }, + "CloudFrontOriginAccessIdentitySummary": { + "base": "Summary of the information about a CloudFront origin access identity.", + "refs": { + "CloudFrontOriginAccessIdentitySummaryList$member": null + } + }, + "CloudFrontOriginAccessIdentitySummaryList": { + "base": null, + "refs": { + "CloudFrontOriginAccessIdentityList$Items": "A complex type that contains one CloudFrontOriginAccessIdentitySummary element for each origin access identity that was created by the current AWS account." + } + }, + "CookieNameList": { + "base": null, + "refs": { + "CookieNames$Items": "Optional: A complex type that contains whitelisted cookies for this cache behavior. If Quantity is 0, you can omit Items." + } + }, + "CookieNames": { + "base": "A complex type that specifies the whitelisted cookies, if any, that you want CloudFront to forward to your origin that is associated with this cache behavior.", + "refs": { + "CookiePreference$WhitelistedNames": "A complex type that specifies the whitelisted cookies, if any, that you want CloudFront to forward to your origin that is associated with this cache behavior." + } + }, + "CookiePreference": { + "base": "A complex type that specifies the cookie preferences associated with this cache behavior.", + "refs": { + "ForwardedValues$Cookies": "A complex type that specifies how CloudFront handles cookies." + } + }, + "CreateCloudFrontOriginAccessIdentityRequest": { + "base": "The request to create a new origin access identity.", + "refs": { + } + }, + "CreateCloudFrontOriginAccessIdentityResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "CreateDistributionRequest": { + "base": "The request to create a new distribution.", + "refs": { + } + }, + "CreateDistributionResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "CreateInvalidationRequest": { + "base": "The request to create an invalidation.", + "refs": { + } + }, + "CreateInvalidationResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "CreateStreamingDistributionRequest": { + "base": "The request to create a new streaming distribution.", + "refs": { + } + }, + "CreateStreamingDistributionResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "CustomErrorResponse": { + "base": "A complex type that describes how you'd prefer CloudFront to respond to requests that result in either a 4xx or 5xx response. You can control whether a custom error page should be displayed, what the desired response code should be for this error page and how long should the error response be cached by CloudFront. If you don't want to specify any custom error responses, include only an empty CustomErrorResponses element. To delete all custom error responses in an existing distribution, update the distribution configuration and include only an empty CustomErrorResponses element. To add, change, or remove one or more custom error responses, update the distribution configuration and specify all of the custom error responses that you want to include in the updated distribution.", + "refs": { + "CustomErrorResponseList$member": null + } + }, + "CustomErrorResponseList": { + "base": null, + "refs": { + "CustomErrorResponses$Items": "Optional: A complex type that contains custom error responses for this distribution. If Quantity is 0, you can omit Items." + } + }, + "CustomErrorResponses": { + "base": "A complex type that contains zero or more CustomErrorResponse elements.", + "refs": { + "DistributionConfig$CustomErrorResponses": "A complex type that contains zero or more CustomErrorResponse elements.", + "DistributionSummary$CustomErrorResponses": "A complex type that contains zero or more CustomErrorResponses elements." + } + }, + "CustomHeaders": { + "base": "A complex type that contains the list of Custom Headers for each origin.", + "refs": { + "Origin$CustomHeaders": "A complex type that contains information about the custom headers associated with this Origin." + } + }, + "CustomOriginConfig": { + "base": "A customer origin.", + "refs": { + "Origin$CustomOriginConfig": "A complex type that contains information about a custom origin. If the origin is an Amazon S3 bucket, use the S3OriginConfig element instead." + } + }, + "DefaultCacheBehavior": { + "base": "A complex type that describes the default cache behavior if you do not specify a CacheBehavior element or if files don't match any of the values of PathPattern in CacheBehavior elements.You must create exactly one default cache behavior.", + "refs": { + "DistributionConfig$DefaultCacheBehavior": "A complex type that describes the default cache behavior if you do not specify a CacheBehavior element or if files don't match any of the values of PathPattern in CacheBehavior elements.You must create exactly one default cache behavior.", + "DistributionSummary$DefaultCacheBehavior": "A complex type that describes the default cache behavior if you do not specify a CacheBehavior element or if files don't match any of the values of PathPattern in CacheBehavior elements.You must create exactly one default cache behavior." + } + }, + "DeleteCloudFrontOriginAccessIdentityRequest": { + "base": "The request to delete a origin access identity.", + "refs": { + } + }, + "DeleteDistributionRequest": { + "base": "The request to delete a distribution.", + "refs": { + } + }, + "DeleteStreamingDistributionRequest": { + "base": "The request to delete a streaming distribution.", + "refs": { + } + }, + "Distribution": { + "base": "A distribution.", + "refs": { + "CreateDistributionResult$Distribution": "The distribution's information.", + "GetDistributionResult$Distribution": "The distribution's information.", + "UpdateDistributionResult$Distribution": "The distribution's information." + } + }, + "DistributionAlreadyExists": { + "base": "The caller reference you attempted to create the distribution with is associated with another distribution.", + "refs": { + } + }, + "DistributionConfig": { + "base": "A distribution Configuration.", + "refs": { + "CreateDistributionRequest$DistributionConfig": "The distribution's configuration information.", + "Distribution$DistributionConfig": "The current configuration information for the distribution.", + "GetDistributionConfigResult$DistributionConfig": "The distribution's configuration information.", + "UpdateDistributionRequest$DistributionConfig": "The distribution's configuration information." + } + }, + "DistributionList": { + "base": "A distribution list.", + "refs": { + "ListDistributionsByWebACLIdResult$DistributionList": "The DistributionList type.", + "ListDistributionsResult$DistributionList": "The DistributionList type." + } + }, + "DistributionNotDisabled": { + "base": null, + "refs": { + } + }, + "DistributionSummary": { + "base": "A summary of the information for an Amazon CloudFront distribution.", + "refs": { + "DistributionSummaryList$member": null + } + }, + "DistributionSummaryList": { + "base": null, + "refs": { + "DistributionList$Items": "A complex type that contains one DistributionSummary element for each distribution that was created by the current AWS account." + } + }, + "ForwardedValues": { + "base": "A complex type that specifies how CloudFront handles query strings, cookies and headers.", + "refs": { + "CacheBehavior$ForwardedValues": "A complex type that specifies how CloudFront handles query strings, cookies and headers.", + "DefaultCacheBehavior$ForwardedValues": "A complex type that specifies how CloudFront handles query strings, cookies and headers." + } + }, + "GeoRestriction": { + "base": "A complex type that controls the countries in which your content is distributed. For more information about geo restriction, go to Customizing Error Responses in the Amazon CloudFront Developer Guide. CloudFront determines the location of your users using MaxMind GeoIP databases. For information about the accuracy of these databases, see How accurate are your GeoIP databases? on the MaxMind website.", + "refs": { + "Restrictions$GeoRestriction": null + } + }, + "GeoRestrictionType": { + "base": null, + "refs": { + "GeoRestriction$RestrictionType": "The method that you want to use to restrict distribution of your content by country: - none: No geo restriction is enabled, meaning access to content is not restricted by client geo location. - blacklist: The Location elements specify the countries in which you do not want CloudFront to distribute your content. - whitelist: The Location elements specify the countries in which you want CloudFront to distribute your content." + } + }, + "GetCloudFrontOriginAccessIdentityConfigRequest": { + "base": "The request to get an origin access identity's configuration.", + "refs": { + } + }, + "GetCloudFrontOriginAccessIdentityConfigResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "GetCloudFrontOriginAccessIdentityRequest": { + "base": "The request to get an origin access identity's information.", + "refs": { + } + }, + "GetCloudFrontOriginAccessIdentityResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "GetDistributionConfigRequest": { + "base": "The request to get a distribution configuration.", + "refs": { + } + }, + "GetDistributionConfigResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "GetDistributionRequest": { + "base": "The request to get a distribution's information.", + "refs": { + } + }, + "GetDistributionResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "GetInvalidationRequest": { + "base": "The request to get an invalidation's information.", + "refs": { + } + }, + "GetInvalidationResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "GetStreamingDistributionConfigRequest": { + "base": "To request to get a streaming distribution configuration.", + "refs": { + } + }, + "GetStreamingDistributionConfigResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "GetStreamingDistributionRequest": { + "base": "The request to get a streaming distribution's information.", + "refs": { + } + }, + "GetStreamingDistributionResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "HeaderList": { + "base": null, + "refs": { + "Headers$Items": "Optional: A complex type that contains a Name element for each header that you want CloudFront to forward to the origin and to vary on for this cache behavior. If Quantity is 0, omit Items." + } + }, + "Headers": { + "base": "A complex type that specifies the headers that you want CloudFront to forward to the origin for this cache behavior. For the headers that you specify, CloudFront also caches separate versions of a given object based on the header values in viewer requests; this is known as varying on headers. For example, suppose viewer requests for logo.jpg contain a custom Product header that has a value of either Acme or Apex, and you configure CloudFront to vary on the Product header. CloudFront forwards the Product header to the origin and caches the response from the origin once for each header value.", + "refs": { + "ForwardedValues$Headers": "A complex type that specifies the Headers, if any, that you want CloudFront to vary upon for this cache behavior." + } + }, + "IllegalUpdate": { + "base": "Origin and CallerReference cannot be updated.", + "refs": { + } + }, + "InconsistentQuantities": { + "base": "The value of Quantity and the size of Items do not match.", + "refs": { + } + }, + "InvalidArgument": { + "base": "The argument is invalid.", + "refs": { + } + }, + "InvalidDefaultRootObject": { + "base": "The default root object file name is too big or contains an invalid character.", + "refs": { + } + }, + "InvalidErrorCode": { + "base": null, + "refs": { + } + }, + "InvalidForwardCookies": { + "base": "Your request contains forward cookies option which doesn't match with the expectation for the whitelisted list of cookie names. Either list of cookie names has been specified when not allowed or list of cookie names is missing when expected.", + "refs": { + } + }, + "InvalidGeoRestrictionParameter": { + "base": null, + "refs": { + } + }, + "InvalidHeadersForS3Origin": { + "base": null, + "refs": { + } + }, + "InvalidIfMatchVersion": { + "base": "The If-Match version is missing or not valid for the distribution.", + "refs": { + } + }, + "InvalidLocationCode": { + "base": null, + "refs": { + } + }, + "InvalidMinimumProtocolVersion": { + "base": null, + "refs": { + } + }, + "InvalidOrigin": { + "base": "The Amazon S3 origin server specified does not refer to a valid Amazon S3 bucket.", + "refs": { + } + }, + "InvalidOriginAccessIdentity": { + "base": "The origin access identity is not valid or doesn't exist.", + "refs": { + } + }, + "InvalidProtocolSettings": { + "base": "You cannot specify SSLv3 as the minimum protocol version if you only want to support only clients that Support Server Name Indication (SNI).", + "refs": { + } + }, + "InvalidRelativePath": { + "base": "The relative path is too big, is not URL-encoded, or does not begin with a slash (/).", + "refs": { + } + }, + "InvalidRequiredProtocol": { + "base": "This operation requires the HTTPS protocol. Ensure that you specify the HTTPS protocol in your request, or omit the RequiredProtocols element from your distribution configuration.", + "refs": { + } + }, + "InvalidResponseCode": { + "base": null, + "refs": { + } + }, + "InvalidTTLOrder": { + "base": null, + "refs": { + } + }, + "InvalidViewerCertificate": { + "base": null, + "refs": { + } + }, + "InvalidWebACLId": { + "base": null, + "refs": { + } + }, + "Invalidation": { + "base": "An invalidation.", + "refs": { + "CreateInvalidationResult$Invalidation": "The invalidation's information.", + "GetInvalidationResult$Invalidation": "The invalidation's information." + } + }, + "InvalidationBatch": { + "base": "An invalidation batch.", + "refs": { + "CreateInvalidationRequest$InvalidationBatch": "The batch information for the invalidation.", + "Invalidation$InvalidationBatch": "The current invalidation information for the batch request." + } + }, + "InvalidationList": { + "base": "An invalidation list.", + "refs": { + "ListInvalidationsResult$InvalidationList": "Information about invalidation batches." + } + }, + "InvalidationSummary": { + "base": "Summary of an invalidation request.", + "refs": { + "InvalidationSummaryList$member": null + } + }, + "InvalidationSummaryList": { + "base": null, + "refs": { + "InvalidationList$Items": "A complex type that contains one InvalidationSummary element for each invalidation batch that was created by the current AWS account." + } + }, + "ItemSelection": { + "base": null, + "refs": { + "CookiePreference$Forward": "Use this element to specify whether you want CloudFront to forward cookies to the origin that is associated with this cache behavior. You can specify all, none or whitelist. If you choose All, CloudFront forwards all cookies regardless of how many your application uses." + } + }, + "KeyPairIdList": { + "base": null, + "refs": { + "KeyPairIds$Items": "A complex type that lists the active CloudFront key pairs, if any, that are associated with AwsAccountNumber." + } + }, + "KeyPairIds": { + "base": "A complex type that lists the active CloudFront key pairs, if any, that are associated with AwsAccountNumber.", + "refs": { + "Signer$KeyPairIds": "A complex type that lists the active CloudFront key pairs, if any, that are associated with AwsAccountNumber." + } + }, + "ListCloudFrontOriginAccessIdentitiesRequest": { + "base": "The request to list origin access identities.", + "refs": { + } + }, + "ListCloudFrontOriginAccessIdentitiesResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "ListDistributionsByWebACLIdRequest": { + "base": "The request to list distributions that are associated with a specified AWS WAF web ACL.", + "refs": { + } + }, + "ListDistributionsByWebACLIdResult": { + "base": "The response to a request to list the distributions that are associated with a specified AWS WAF web ACL.", + "refs": { + } + }, + "ListDistributionsRequest": { + "base": "The request to list your distributions.", + "refs": { + } + }, + "ListDistributionsResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "ListInvalidationsRequest": { + "base": "The request to list invalidations.", + "refs": { + } + }, + "ListInvalidationsResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "ListStreamingDistributionsRequest": { + "base": "The request to list your streaming distributions.", + "refs": { + } + }, + "ListStreamingDistributionsResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "LocationList": { + "base": null, + "refs": { + "GeoRestriction$Items": "A complex type that contains a Location element for each country in which you want CloudFront either to distribute your content (whitelist) or not distribute your content (blacklist). The Location element is a two-letter, uppercase country code for a country that you want to include in your blacklist or whitelist. Include one Location element for each country. CloudFront and MaxMind both use ISO 3166 country codes. For the current list of countries and the corresponding codes, see ISO 3166-1-alpha-2 code on the International Organization for Standardization website. You can also refer to the country list in the CloudFront console, which includes both country names and codes." + } + }, + "LoggingConfig": { + "base": "A complex type that controls whether access logs are written for the distribution.", + "refs": { + "DistributionConfig$Logging": "A complex type that controls whether access logs are written for the distribution." + } + }, + "Method": { + "base": null, + "refs": { + "MethodsList$member": null + } + }, + "MethodsList": { + "base": null, + "refs": { + "AllowedMethods$Items": "A complex type that contains the HTTP methods that you want CloudFront to process and forward to your origin.", + "CachedMethods$Items": "A complex type that contains the HTTP methods that you want CloudFront to cache responses to." + } + }, + "MinimumProtocolVersion": { + "base": null, + "refs": { + "ViewerCertificate$MinimumProtocolVersion": "Specify the minimum version of the SSL protocol that you want CloudFront to use, SSLv3 or TLSv1, for HTTPS connections. CloudFront will serve your objects only to browsers or devices that support at least the SSL version that you specify. The TLSv1 protocol is more secure, so we recommend that you specify SSLv3 only if your users are using browsers or devices that don't support TLSv1. If you're using a custom certificate (if you specify a value for IAMCertificateId) and if you're using dedicated IP (if you specify vip for SSLSupportMethod), you can choose SSLv3 or TLSv1 as the MinimumProtocolVersion. If you're using a custom certificate (if you specify a value for IAMCertificateId) and if you're using SNI (if you specify sni-only for SSLSupportMethod), you must specify TLSv1 for MinimumProtocolVersion." + } + }, + "MissingBody": { + "base": "This operation requires a body. Ensure that the body is present and the Content-Type header is set.", + "refs": { + } + }, + "NoSuchCloudFrontOriginAccessIdentity": { + "base": "The specified origin access identity does not exist.", + "refs": { + } + }, + "NoSuchDistribution": { + "base": "The specified distribution does not exist.", + "refs": { + } + }, + "NoSuchInvalidation": { + "base": "The specified invalidation does not exist.", + "refs": { + } + }, + "NoSuchOrigin": { + "base": "No origin exists with the specified Origin Id.", + "refs": { + } + }, + "NoSuchStreamingDistribution": { + "base": "The specified streaming distribution does not exist.", + "refs": { + } + }, + "Origin": { + "base": "A complex type that describes the Amazon S3 bucket or the HTTP server (for example, a web server) from which CloudFront gets your files.You must create at least one origin.", + "refs": { + "OriginList$member": null + } + }, + "OriginCustomHeader": { + "base": "A complex type that contains information related to a Header", + "refs": { + "OriginCustomHeadersList$member": null + } + }, + "OriginCustomHeadersList": { + "base": null, + "refs": { + "CustomHeaders$Items": "A complex type that contains the custom headers for this Origin." + } + }, + "OriginList": { + "base": null, + "refs": { + "Origins$Items": "A complex type that contains origins for this distribution." + } + }, + "OriginProtocolPolicy": { + "base": null, + "refs": { + "CustomOriginConfig$OriginProtocolPolicy": "The origin protocol policy to apply to your origin." + } + }, + "OriginSslProtocols": { + "base": "A complex type that contains the list of SSL/TLS protocols that you want CloudFront to use when communicating with your origin over HTTPS.", + "refs": { + "CustomOriginConfig$OriginSslProtocols": "The SSL/TLS protocols that you want CloudFront to use when communicating with your origin over HTTPS." + } + }, + "Origins": { + "base": "A complex type that contains information about origins for this distribution.", + "refs": { + "DistributionConfig$Origins": "A complex type that contains information about origins for this distribution.", + "DistributionSummary$Origins": "A complex type that contains information about origins for this distribution." + } + }, + "PathList": { + "base": null, + "refs": { + "Paths$Items": "A complex type that contains a list of the objects that you want to invalidate." + } + }, + "Paths": { + "base": "A complex type that contains information about the objects that you want to invalidate.", + "refs": { + "InvalidationBatch$Paths": "The path of the object to invalidate. The path is relative to the distribution and must begin with a slash (/). You must enclose each invalidation object with the Path element tags. If the path includes non-ASCII characters or unsafe characters as defined in RFC 1783 (http://www.ietf.org/rfc/rfc1738.txt), URL encode those characters. Do not URL encode any other characters in the path, or CloudFront will not invalidate the old version of the updated object." + } + }, + "PreconditionFailed": { + "base": "The precondition given in one or more of the request-header fields evaluated to false.", + "refs": { + } + }, + "PriceClass": { + "base": null, + "refs": { + "DistributionConfig$PriceClass": "A complex type that contains information about price class for this distribution.", + "DistributionSummary$PriceClass": null, + "StreamingDistributionConfig$PriceClass": "A complex type that contains information about price class for this streaming distribution.", + "StreamingDistributionSummary$PriceClass": null + } + }, + "Restrictions": { + "base": "A complex type that identifies ways in which you want to restrict distribution of your content.", + "refs": { + "DistributionConfig$Restrictions": null, + "DistributionSummary$Restrictions": null + } + }, + "S3Origin": { + "base": "A complex type that contains information about the Amazon S3 bucket from which you want CloudFront to get your media files for distribution.", + "refs": { + "StreamingDistributionConfig$S3Origin": "A complex type that contains information about the Amazon S3 bucket from which you want CloudFront to get your media files for distribution.", + "StreamingDistributionSummary$S3Origin": "A complex type that contains information about the Amazon S3 bucket from which you want CloudFront to get your media files for distribution." + } + }, + "S3OriginConfig": { + "base": "A complex type that contains information about the Amazon S3 origin. If the origin is a custom origin, use the CustomOriginConfig element instead.", + "refs": { + "Origin$S3OriginConfig": "A complex type that contains information about the Amazon S3 origin. If the origin is a custom origin, use the CustomOriginConfig element instead." + } + }, + "SSLSupportMethod": { + "base": null, + "refs": { + "ViewerCertificate$SSLSupportMethod": "If you specify a value for IAMCertificateId, you must also specify how you want CloudFront to serve HTTPS requests. Valid values are vip and sni-only. If you specify vip, CloudFront uses dedicated IP addresses for your content and can respond to HTTPS requests from any viewer. However, you must request permission to use this feature, and you incur additional monthly charges. If you specify sni-only, CloudFront can only respond to HTTPS requests from viewers that support Server Name Indication (SNI). All modern browsers support SNI, but some browsers still in use don't support SNI. Do not specify a value for SSLSupportMethod if you specified true for CloudFrontDefaultCertificate." + } + }, + "Signer": { + "base": "A complex type that lists the AWS accounts that were included in the TrustedSigners complex type, as well as their active CloudFront key pair IDs, if any.", + "refs": { + "SignerList$member": null + } + }, + "SignerList": { + "base": null, + "refs": { + "ActiveTrustedSigners$Items": "A complex type that contains one Signer complex type for each unique trusted signer that is specified in the TrustedSigners complex type, including trusted signers in the default cache behavior and in all of the other cache behaviors." + } + }, + "SslProtocol": { + "base": null, + "refs": { + "SslProtocolsList$member": null + } + }, + "SslProtocolsList": { + "base": null, + "refs": { + "OriginSslProtocols$Items": "A complex type that contains one SslProtocol element for each SSL/TLS protocol that you want to allow CloudFront to use when establishing an HTTPS connection with this origin." + } + }, + "StreamingDistribution": { + "base": "A streaming distribution.", + "refs": { + "CreateStreamingDistributionResult$StreamingDistribution": "The streaming distribution's information.", + "GetStreamingDistributionResult$StreamingDistribution": "The streaming distribution's information.", + "UpdateStreamingDistributionResult$StreamingDistribution": "The streaming distribution's information." + } + }, + "StreamingDistributionAlreadyExists": { + "base": null, + "refs": { + } + }, + "StreamingDistributionConfig": { + "base": "The configuration for the streaming distribution.", + "refs": { + "CreateStreamingDistributionRequest$StreamingDistributionConfig": "The streaming distribution's configuration information.", + "GetStreamingDistributionConfigResult$StreamingDistributionConfig": "The streaming distribution's configuration information.", + "StreamingDistribution$StreamingDistributionConfig": "The current configuration information for the streaming distribution.", + "UpdateStreamingDistributionRequest$StreamingDistributionConfig": "The streaming distribution's configuration information." + } + }, + "StreamingDistributionList": { + "base": "A streaming distribution list.", + "refs": { + "ListStreamingDistributionsResult$StreamingDistributionList": "The StreamingDistributionList type." + } + }, + "StreamingDistributionNotDisabled": { + "base": null, + "refs": { + } + }, + "StreamingDistributionSummary": { + "base": "A summary of the information for an Amazon CloudFront streaming distribution.", + "refs": { + "StreamingDistributionSummaryList$member": null + } + }, + "StreamingDistributionSummaryList": { + "base": null, + "refs": { + "StreamingDistributionList$Items": "A complex type that contains one StreamingDistributionSummary element for each distribution that was created by the current AWS account." + } + }, + "StreamingLoggingConfig": { + "base": "A complex type that controls whether access logs are written for this streaming distribution.", + "refs": { + "StreamingDistributionConfig$Logging": "A complex type that controls whether access logs are written for the streaming distribution." + } + }, + "TooManyCacheBehaviors": { + "base": "You cannot create anymore cache behaviors for the distribution.", + "refs": { + } + }, + "TooManyCertificates": { + "base": "You cannot create anymore custom ssl certificates.", + "refs": { + } + }, + "TooManyCloudFrontOriginAccessIdentities": { + "base": "Processing your request would cause you to exceed the maximum number of origin access identities allowed.", + "refs": { + } + }, + "TooManyCookieNamesInWhiteList": { + "base": "Your request contains more cookie names in the whitelist than are allowed per cache behavior.", + "refs": { + } + }, + "TooManyDistributionCNAMEs": { + "base": "Your request contains more CNAMEs than are allowed per distribution.", + "refs": { + } + }, + "TooManyDistributions": { + "base": "Processing your request would cause you to exceed the maximum number of distributions allowed.", + "refs": { + } + }, + "TooManyHeadersInForwardedValues": { + "base": null, + "refs": { + } + }, + "TooManyInvalidationsInProgress": { + "base": "You have exceeded the maximum number of allowable InProgress invalidation batch requests, or invalidation objects.", + "refs": { + } + }, + "TooManyOriginCustomHeaders": { + "base": null, + "refs": { + } + }, + "TooManyOrigins": { + "base": "You cannot create anymore origins for the distribution.", + "refs": { + } + }, + "TooManyStreamingDistributionCNAMEs": { + "base": null, + "refs": { + } + }, + "TooManyStreamingDistributions": { + "base": "Processing your request would cause you to exceed the maximum number of streaming distributions allowed.", + "refs": { + } + }, + "TooManyTrustedSigners": { + "base": "Your request contains more trusted signers than are allowed per distribution.", + "refs": { + } + }, + "TrustedSignerDoesNotExist": { + "base": "One or more of your trusted signers do not exist.", + "refs": { + } + }, + "TrustedSigners": { + "base": "A complex type that specifies the AWS accounts, if any, that you want to allow to create signed URLs for private content. If you want to require signed URLs in requests for objects in the target origin that match the PathPattern for this cache behavior, specify true for Enabled, and specify the applicable values for Quantity and Items. For more information, go to Using a Signed URL to Serve Private Content in the Amazon CloudFront Developer Guide. If you don't want to require signed URLs in requests for objects that match PathPattern, specify false for Enabled and 0 for Quantity. Omit Items. To add, change, or remove one or more trusted signers, change Enabled to true (if it's currently false), change Quantity as applicable, and specify all of the trusted signers that you want to include in the updated distribution.", + "refs": { + "CacheBehavior$TrustedSigners": "A complex type that specifies the AWS accounts, if any, that you want to allow to create signed URLs for private content. If you want to require signed URLs in requests for objects in the target origin that match the PathPattern for this cache behavior, specify true for Enabled, and specify the applicable values for Quantity and Items. For more information, go to Using a Signed URL to Serve Private Content in the Amazon CloudFront Developer Guide. If you don't want to require signed URLs in requests for objects that match PathPattern, specify false for Enabled and 0 for Quantity. Omit Items. To add, change, or remove one or more trusted signers, change Enabled to true (if it's currently false), change Quantity as applicable, and specify all of the trusted signers that you want to include in the updated distribution.", + "DefaultCacheBehavior$TrustedSigners": "A complex type that specifies the AWS accounts, if any, that you want to allow to create signed URLs for private content. If you want to require signed URLs in requests for objects in the target origin that match the PathPattern for this cache behavior, specify true for Enabled, and specify the applicable values for Quantity and Items. For more information, go to Using a Signed URL to Serve Private Content in the Amazon CloudFront Developer Guide. If you don't want to require signed URLs in requests for objects that match PathPattern, specify false for Enabled and 0 for Quantity. Omit Items. To add, change, or remove one or more trusted signers, change Enabled to true (if it's currently false), change Quantity as applicable, and specify all of the trusted signers that you want to include in the updated distribution.", + "StreamingDistributionConfig$TrustedSigners": "A complex type that specifies the AWS accounts, if any, that you want to allow to create signed URLs for private content. If you want to require signed URLs in requests for objects in the target origin that match the PathPattern for this cache behavior, specify true for Enabled, and specify the applicable values for Quantity and Items. For more information, go to Using a Signed URL to Serve Private Content in the Amazon CloudFront Developer Guide. If you don't want to require signed URLs in requests for objects that match PathPattern, specify false for Enabled and 0 for Quantity. Omit Items. To add, change, or remove one or more trusted signers, change Enabled to true (if it's currently false), change Quantity as applicable, and specify all of the trusted signers that you want to include in the updated distribution.", + "StreamingDistributionSummary$TrustedSigners": "A complex type that specifies the AWS accounts, if any, that you want to allow to create signed URLs for private content. If you want to require signed URLs in requests for objects in the target origin that match the PathPattern for this cache behavior, specify true for Enabled, and specify the applicable values for Quantity and Items. For more information, go to Using a Signed URL to Serve Private Content in the Amazon CloudFront Developer Guide. If you don't want to require signed URLs in requests for objects that match PathPattern, specify false for Enabled and 0 for Quantity. Omit Items. To add, change, or remove one or more trusted signers, change Enabled to true (if it's currently false), change Quantity as applicable, and specify all of the trusted signers that you want to include in the updated distribution." + } + }, + "UpdateCloudFrontOriginAccessIdentityRequest": { + "base": "The request to update an origin access identity.", + "refs": { + } + }, + "UpdateCloudFrontOriginAccessIdentityResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "UpdateDistributionRequest": { + "base": "The request to update a distribution.", + "refs": { + } + }, + "UpdateDistributionResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "UpdateStreamingDistributionRequest": { + "base": "The request to update a streaming distribution.", + "refs": { + } + }, + "UpdateStreamingDistributionResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "ViewerCertificate": { + "base": "A complex type that contains information about viewer certificates for this distribution.", + "refs": { + "DistributionConfig$ViewerCertificate": null, + "DistributionSummary$ViewerCertificate": null + } + }, + "ViewerProtocolPolicy": { + "base": null, + "refs": { + "CacheBehavior$ViewerProtocolPolicy": "Use this element to specify the protocol that users can use to access the files in the origin specified by TargetOriginId when a request matches the path pattern in PathPattern. If you want CloudFront to allow end users to use any available protocol, specify allow-all. If you want CloudFront to require HTTPS, specify https. If you want CloudFront to respond to an HTTP request with an HTTP status code of 301 (Moved Permanently) and the HTTPS URL, specify redirect-to-https. The viewer then resubmits the request using the HTTPS URL.", + "DefaultCacheBehavior$ViewerProtocolPolicy": "Use this element to specify the protocol that users can use to access the files in the origin specified by TargetOriginId when a request matches the path pattern in PathPattern. If you want CloudFront to allow end users to use any available protocol, specify allow-all. If you want CloudFront to require HTTPS, specify https. If you want CloudFront to respond to an HTTP request with an HTTP status code of 301 (Moved Permanently) and the HTTPS URL, specify redirect-to-https. The viewer then resubmits the request using the HTTPS URL." + } + }, + "boolean": { + "base": null, + "refs": { + "ActiveTrustedSigners$Enabled": "Each active trusted signer.", + "CacheBehavior$SmoothStreaming": "Indicates whether you want to distribute media files in Microsoft Smooth Streaming format using the origin that is associated with this cache behavior. If so, specify true; if not, specify false.", + "CacheBehavior$Compress": "Whether you want CloudFront to automatically compress content for web requests that include Accept-Encoding: gzip in the request header. If so, specify true; if not, specify false. CloudFront compresses files larger than 1000 bytes and less than 1 megabyte for both Amazon S3 and custom origins. When a CloudFront edge location is unusually busy, some files might not be compressed. The value of the Content-Type header must be on the list of file types that CloudFront will compress. For the current list, see Serving Compressed Content in the Amazon CloudFront Developer Guide. If you configure CloudFront to compress content, CloudFront removes the ETag response header from the objects that it compresses. The ETag header indicates that the version in a CloudFront edge cache is identical to the version on the origin server, but after compression the two versions are no longer identical. As a result, for compressed objects, CloudFront can't use the ETag header to determine whether an expired object in the CloudFront edge cache is still the latest version.", + "CloudFrontOriginAccessIdentityList$IsTruncated": "A flag that indicates whether more origin access identities remain to be listed. If your results were truncated, you can make a follow-up pagination request using the Marker request parameter to retrieve more items in the list.", + "DefaultCacheBehavior$SmoothStreaming": "Indicates whether you want to distribute media files in Microsoft Smooth Streaming format using the origin that is associated with this cache behavior. If so, specify true; if not, specify false.", + "DefaultCacheBehavior$Compress": "Whether you want CloudFront to automatically compress content for web requests that include Accept-Encoding: gzip in the request header. If so, specify true; if not, specify false. CloudFront compresses files larger than 1000 bytes and less than 1 megabyte for both Amazon S3 and custom origins. When a CloudFront edge location is unusually busy, some files might not be compressed. The value of the Content-Type header must be on the list of file types that CloudFront will compress. For the current list, see Serving Compressed Content in the Amazon CloudFront Developer Guide. If you configure CloudFront to compress content, CloudFront removes the ETag response header from the objects that it compresses. The ETag header indicates that the version in a CloudFront edge cache is identical to the version on the origin server, but after compression the two versions are no longer identical. As a result, for compressed objects, CloudFront can't use the ETag header to determine whether an expired object in the CloudFront edge cache is still the latest version.", + "DistributionConfig$Enabled": "Whether the distribution is enabled to accept end user requests for content.", + "DistributionList$IsTruncated": "A flag that indicates whether more distributions remain to be listed. If your results were truncated, you can make a follow-up pagination request using the Marker request parameter to retrieve more distributions in the list.", + "DistributionSummary$Enabled": "Whether the distribution is enabled to accept end user requests for content.", + "ForwardedValues$QueryString": "Indicates whether you want CloudFront to forward query strings to the origin that is associated with this cache behavior. If so, specify true; if not, specify false.", + "InvalidationList$IsTruncated": "A flag that indicates whether more invalidation batch requests remain to be listed. If your results were truncated, you can make a follow-up pagination request using the Marker request parameter to retrieve more invalidation batches in the list.", + "LoggingConfig$Enabled": "Specifies whether you want CloudFront to save access logs to an Amazon S3 bucket. If you do not want to enable logging when you create a distribution or if you want to disable logging for an existing distribution, specify false for Enabled, and specify empty Bucket and Prefix elements. If you specify false for Enabled but you specify values for Bucket, prefix and IncludeCookies, the values are automatically deleted.", + "LoggingConfig$IncludeCookies": "Specifies whether you want CloudFront to include cookies in access logs, specify true for IncludeCookies. If you choose to include cookies in logs, CloudFront logs all cookies regardless of how you configure the cache behaviors for this distribution. If you do not want to include cookies when you create a distribution or if you want to disable include cookies for an existing distribution, specify false for IncludeCookies.", + "StreamingDistributionConfig$Enabled": "Whether the streaming distribution is enabled to accept end user requests for content.", + "StreamingDistributionList$IsTruncated": "A flag that indicates whether more streaming distributions remain to be listed. If your results were truncated, you can make a follow-up pagination request using the Marker request parameter to retrieve more distributions in the list.", + "StreamingDistributionSummary$Enabled": "Whether the distribution is enabled to accept end user requests for content.", + "StreamingLoggingConfig$Enabled": "Specifies whether you want CloudFront to save access logs to an Amazon S3 bucket. If you do not want to enable logging when you create a streaming distribution or if you want to disable logging for an existing streaming distribution, specify false for Enabled, and specify empty Bucket and Prefix elements. If you specify false for Enabled but you specify values for Bucket and Prefix, the values are automatically deleted.", + "TrustedSigners$Enabled": "Specifies whether you want to require end users to use signed URLs to access the files specified by PathPattern and TargetOriginId.", + "ViewerCertificate$CloudFrontDefaultCertificate": "If you want viewers to use HTTPS to request your objects and you're using the CloudFront domain name of your distribution in your object URLs (for example, https://d111111abcdef8.cloudfront.net/logo.jpg), set to true. Omit this value if you are setting an ACMCertificateArn or IAMCertificateId." + } + }, + "integer": { + "base": null, + "refs": { + "ActiveTrustedSigners$Quantity": "The number of unique trusted signers included in all cache behaviors. For example, if three cache behaviors all list the same three AWS accounts, the value of Quantity for ActiveTrustedSigners will be 3.", + "Aliases$Quantity": "The number of CNAMEs, if any, for this distribution.", + "AllowedMethods$Quantity": "The number of HTTP methods that you want CloudFront to forward to your origin. Valid values are 2 (for GET and HEAD requests), 3 (for GET, HEAD and OPTIONS requests) and 7 (for GET, HEAD, OPTIONS, PUT, PATCH, POST, and DELETE requests).", + "CacheBehaviors$Quantity": "The number of cache behaviors for this distribution.", + "CachedMethods$Quantity": "The number of HTTP methods for which you want CloudFront to cache responses. Valid values are 2 (for caching responses to GET and HEAD requests) and 3 (for caching responses to GET, HEAD, and OPTIONS requests).", + "CloudFrontOriginAccessIdentityList$MaxItems": "The value you provided for the MaxItems request parameter.", + "CloudFrontOriginAccessIdentityList$Quantity": "The number of CloudFront origin access identities that were created by the current AWS account.", + "CookieNames$Quantity": "The number of whitelisted cookies for this cache behavior.", + "CustomErrorResponse$ErrorCode": "The 4xx or 5xx HTTP status code that you want to customize. For a list of HTTP status codes that you can customize, see CloudFront documentation.", + "CustomErrorResponses$Quantity": "The number of custom error responses for this distribution.", + "CustomHeaders$Quantity": "The number of custom headers for this origin.", + "CustomOriginConfig$HTTPPort": "The HTTP port the custom origin listens on.", + "CustomOriginConfig$HTTPSPort": "The HTTPS port the custom origin listens on.", + "Distribution$InProgressInvalidationBatches": "The number of invalidation batches currently in progress.", + "DistributionList$MaxItems": "The value you provided for the MaxItems request parameter.", + "DistributionList$Quantity": "The number of distributions that were created by the current AWS account.", + "GeoRestriction$Quantity": "When geo restriction is enabled, this is the number of countries in your whitelist or blacklist. Otherwise, when it is not enabled, Quantity is 0, and you can omit Items.", + "Headers$Quantity": "The number of different headers that you want CloudFront to forward to the origin and to vary on for this cache behavior. The maximum number of headers that you can specify by name is 10. If you want CloudFront to forward all headers to the origin and vary on all of them, specify 1 for Quantity and * for Name. If you don't want CloudFront to forward any additional headers to the origin or to vary on any headers, specify 0 for Quantity and omit Items.", + "InvalidationList$MaxItems": "The value you provided for the MaxItems request parameter.", + "InvalidationList$Quantity": "The number of invalidation batches that were created by the current AWS account.", + "KeyPairIds$Quantity": "The number of active CloudFront key pairs for AwsAccountNumber.", + "OriginSslProtocols$Quantity": "The number of SSL/TLS protocols that you want to allow CloudFront to use when establishing an HTTPS connection with this origin.", + "Origins$Quantity": "The number of origins for this distribution.", + "Paths$Quantity": "The number of objects that you want to invalidate.", + "StreamingDistributionList$MaxItems": "The value you provided for the MaxItems request parameter.", + "StreamingDistributionList$Quantity": "The number of streaming distributions that were created by the current AWS account.", + "TrustedSigners$Quantity": "The number of trusted signers for this cache behavior." + } + }, + "long": { + "base": null, + "refs": { + "CacheBehavior$MinTTL": "The minimum amount of time that you want objects to stay in CloudFront caches before CloudFront queries your origin to see whether the object has been updated.You can specify a value from 0 to 3,153,600,000 seconds (100 years).", + "CacheBehavior$DefaultTTL": "If you don't configure your origin to add a Cache-Control max-age directive or an Expires header, DefaultTTL is the default amount of time (in seconds) that an object is in a CloudFront cache before CloudFront forwards another request to your origin to determine whether the object has been updated. The value that you specify applies only when your origin does not add HTTP headers such as Cache-Control max-age, Cache-Control s-maxage, and Expires to objects. You can specify a value from 0 to 3,153,600,000 seconds (100 years).", + "CacheBehavior$MaxTTL": "The maximum amount of time (in seconds) that an object is in a CloudFront cache before CloudFront forwards another request to your origin to determine whether the object has been updated. The value that you specify applies only when your origin adds HTTP headers such as Cache-Control max-age, Cache-Control s-maxage, and Expires to objects. You can specify a value from 0 to 3,153,600,000 seconds (100 years).", + "CustomErrorResponse$ErrorCachingMinTTL": "The minimum amount of time you want HTTP error codes to stay in CloudFront caches before CloudFront queries your origin to see whether the object has been updated. You can specify a value from 0 to 31,536,000.", + "DefaultCacheBehavior$MinTTL": "The minimum amount of time that you want objects to stay in CloudFront caches before CloudFront queries your origin to see whether the object has been updated.You can specify a value from 0 to 3,153,600,000 seconds (100 years).", + "DefaultCacheBehavior$DefaultTTL": "If you don't configure your origin to add a Cache-Control max-age directive or an Expires header, DefaultTTL is the default amount of time (in seconds) that an object is in a CloudFront cache before CloudFront forwards another request to your origin to determine whether the object has been updated. The value that you specify applies only when your origin does not add HTTP headers such as Cache-Control max-age, Cache-Control s-maxage, and Expires to objects. You can specify a value from 0 to 3,153,600,000 seconds (100 years).", + "DefaultCacheBehavior$MaxTTL": "The maximum amount of time (in seconds) that an object is in a CloudFront cache before CloudFront forwards another request to your origin to determine whether the object has been updated. The value that you specify applies only when your origin adds HTTP headers such as Cache-Control max-age, Cache-Control s-maxage, and Expires to objects. You can specify a value from 0 to 3,153,600,000 seconds (100 years)." + } + }, + "string": { + "base": null, + "refs": { + "AccessDenied$Message": null, + "AliasList$member": null, + "AwsAccountNumberList$member": null, + "BatchTooLarge$Message": null, + "CNAMEAlreadyExists$Message": null, + "CacheBehavior$PathPattern": "The pattern (for example, images/*.jpg) that specifies which requests you want this cache behavior to apply to. When CloudFront receives an end-user request, the requested path is compared with path patterns in the order in which cache behaviors are listed in the distribution. The path pattern for the default cache behavior is * and cannot be changed. If the request for an object does not match the path pattern for any cache behaviors, CloudFront applies the behavior in the default cache behavior.", + "CacheBehavior$TargetOriginId": "The value of ID for the origin that you want CloudFront to route requests to when a request matches the path pattern either for a cache behavior or for the default cache behavior.", + "CloudFrontOriginAccessIdentity$Id": "The ID for the origin access identity. For example: E74FTE3AJFJ256A.", + "CloudFrontOriginAccessIdentity$S3CanonicalUserId": "The Amazon S3 canonical user ID for the origin access identity, which you use when giving the origin access identity read permission to an object in Amazon S3.", + "CloudFrontOriginAccessIdentityAlreadyExists$Message": null, + "CloudFrontOriginAccessIdentityConfig$CallerReference": "A unique number that ensures the request can't be replayed. If the CallerReference is new (no matter the content of the CloudFrontOriginAccessIdentityConfig object), a new origin access identity is created. If the CallerReference is a value you already sent in a previous request to create an identity, and the content of the CloudFrontOriginAccessIdentityConfig is identical to the original request (ignoring white space), the response includes the same information returned to the original request. If the CallerReference is a value you already sent in a previous request to create an identity but the content of the CloudFrontOriginAccessIdentityConfig is different from the original request, CloudFront returns a CloudFrontOriginAccessIdentityAlreadyExists error.", + "CloudFrontOriginAccessIdentityConfig$Comment": "Any comments you want to include about the origin access identity.", + "CloudFrontOriginAccessIdentityInUse$Message": null, + "CloudFrontOriginAccessIdentityList$Marker": "The value you provided for the Marker request parameter.", + "CloudFrontOriginAccessIdentityList$NextMarker": "If IsTruncated is true, this element is present and contains the value you can use for the Marker request parameter to continue listing your origin access identities where they left off.", + "CloudFrontOriginAccessIdentitySummary$Id": "The ID for the origin access identity. For example: E74FTE3AJFJ256A.", + "CloudFrontOriginAccessIdentitySummary$S3CanonicalUserId": "The Amazon S3 canonical user ID for the origin access identity, which you use when giving the origin access identity read permission to an object in Amazon S3.", + "CloudFrontOriginAccessIdentitySummary$Comment": "The comment for this origin access identity, as originally specified when created.", + "CookieNameList$member": null, + "CreateCloudFrontOriginAccessIdentityResult$Location": "The fully qualified URI of the new origin access identity just created. For example: https://cloudfront.amazonaws.com/2010-11-01/origin-access-identity/cloudfront/E74FTE3AJFJ256A.", + "CreateCloudFrontOriginAccessIdentityResult$ETag": "The current version of the origin access identity created.", + "CreateDistributionResult$Location": "The fully qualified URI of the new distribution resource just created. For example: https://cloudfront.amazonaws.com/2010-11-01/distribution/EDFDVBD632BHDS5.", + "CreateDistributionResult$ETag": "The current version of the distribution created.", + "CreateInvalidationRequest$DistributionId": "The distribution's id.", + "CreateInvalidationResult$Location": "The fully qualified URI of the distribution and invalidation batch request, including the Invalidation ID.", + "CreateStreamingDistributionResult$Location": "The fully qualified URI of the new streaming distribution resource just created. For example: https://cloudfront.amazonaws.com/2010-11-01/streaming-distribution/EGTXBD79H29TRA8.", + "CreateStreamingDistributionResult$ETag": "The current version of the streaming distribution created.", + "CustomErrorResponse$ResponsePagePath": "The path of the custom error page (for example, /custom_404.html). The path is relative to the distribution and must begin with a slash (/). If the path includes any non-ASCII characters or unsafe characters as defined in RFC 1783 (http://www.ietf.org/rfc/rfc1738.txt), URL encode those characters. Do not URL encode any other characters in the path, or CloudFront will not return the custom error page to the viewer.", + "CustomErrorResponse$ResponseCode": "The HTTP status code that you want CloudFront to return with the custom error page to the viewer. For a list of HTTP status codes that you can replace, see CloudFront Documentation.", + "DefaultCacheBehavior$TargetOriginId": "The value of ID for the origin that you want CloudFront to route requests to when a request matches the path pattern either for a cache behavior or for the default cache behavior.", + "DeleteCloudFrontOriginAccessIdentityRequest$Id": "The origin access identity's id.", + "DeleteCloudFrontOriginAccessIdentityRequest$IfMatch": "The value of the ETag header you received from a previous GET or PUT request. For example: E2QWRUHAPOMQZL.", + "DeleteDistributionRequest$Id": "The distribution id.", + "DeleteDistributionRequest$IfMatch": "The value of the ETag header you received when you disabled the distribution. For example: E2QWRUHAPOMQZL.", + "DeleteStreamingDistributionRequest$Id": "The distribution id.", + "DeleteStreamingDistributionRequest$IfMatch": "The value of the ETag header you received when you disabled the streaming distribution. For example: E2QWRUHAPOMQZL.", + "Distribution$Id": "The identifier for the distribution. For example: EDFDVBD632BHDS5.", + "Distribution$Status": "This response element indicates the current status of the distribution. When the status is Deployed, the distribution's information is fully propagated throughout the Amazon CloudFront system.", + "Distribution$DomainName": "The domain name corresponding to the distribution. For example: d604721fxaaqy9.cloudfront.net.", + "DistributionAlreadyExists$Message": null, + "DistributionConfig$CallerReference": "A unique number that ensures the request can't be replayed. If the CallerReference is new (no matter the content of the DistributionConfig object), a new distribution is created. If the CallerReference is a value you already sent in a previous request to create a distribution, and the content of the DistributionConfig is identical to the original request (ignoring white space), the response includes the same information returned to the original request. If the CallerReference is a value you already sent in a previous request to create a distribution but the content of the DistributionConfig is different from the original request, CloudFront returns a DistributionAlreadyExists error.", + "DistributionConfig$DefaultRootObject": "The object that you want CloudFront to return (for example, index.html) when an end user requests the root URL for your distribution (http://www.example.com) instead of an object in your distribution (http://www.example.com/index.html). Specifying a default root object avoids exposing the contents of your distribution. If you don't want to specify a default root object when you create a distribution, include an empty DefaultRootObject element. To delete the default root object from an existing distribution, update the distribution configuration and include an empty DefaultRootObject element. To replace the default root object, update the distribution configuration and specify the new object.", + "DistributionConfig$Comment": "Any comments you want to include about the distribution.", + "DistributionConfig$WebACLId": "(Optional) If you're using AWS WAF to filter CloudFront requests, the Id of the AWS WAF web ACL that is associated with the distribution.", + "DistributionList$Marker": "The value you provided for the Marker request parameter.", + "DistributionList$NextMarker": "If IsTruncated is true, this element is present and contains the value you can use for the Marker request parameter to continue listing your distributions where they left off.", + "DistributionNotDisabled$Message": null, + "DistributionSummary$Id": "The identifier for the distribution. For example: EDFDVBD632BHDS5.", + "DistributionSummary$Status": "This response element indicates the current status of the distribution. When the status is Deployed, the distribution's information is fully propagated throughout the Amazon CloudFront system.", + "DistributionSummary$DomainName": "The domain name corresponding to the distribution. For example: d604721fxaaqy9.cloudfront.net.", + "DistributionSummary$Comment": "The comment originally specified when this distribution was created.", + "DistributionSummary$WebACLId": "The Web ACL Id (if any) associated with the distribution.", + "GetCloudFrontOriginAccessIdentityConfigRequest$Id": "The identity's id.", + "GetCloudFrontOriginAccessIdentityConfigResult$ETag": "The current version of the configuration. For example: E2QWRUHAPOMQZL.", + "GetCloudFrontOriginAccessIdentityRequest$Id": "The identity's id.", + "GetCloudFrontOriginAccessIdentityResult$ETag": "The current version of the origin access identity's information. For example: E2QWRUHAPOMQZL.", + "GetDistributionConfigRequest$Id": "The distribution's id.", + "GetDistributionConfigResult$ETag": "The current version of the configuration. For example: E2QWRUHAPOMQZL.", + "GetDistributionRequest$Id": "The distribution's id.", + "GetDistributionResult$ETag": "The current version of the distribution's information. For example: E2QWRUHAPOMQZL.", + "GetInvalidationRequest$DistributionId": "The distribution's id.", + "GetInvalidationRequest$Id": "The invalidation's id.", + "GetStreamingDistributionConfigRequest$Id": "The streaming distribution's id.", + "GetStreamingDistributionConfigResult$ETag": "The current version of the configuration. For example: E2QWRUHAPOMQZL.", + "GetStreamingDistributionRequest$Id": "The streaming distribution's id.", + "GetStreamingDistributionResult$ETag": "The current version of the streaming distribution's information. For example: E2QWRUHAPOMQZL.", + "HeaderList$member": null, + "IllegalUpdate$Message": null, + "InconsistentQuantities$Message": null, + "InvalidArgument$Message": null, + "InvalidDefaultRootObject$Message": null, + "InvalidErrorCode$Message": null, + "InvalidForwardCookies$Message": null, + "InvalidGeoRestrictionParameter$Message": null, + "InvalidHeadersForS3Origin$Message": null, + "InvalidIfMatchVersion$Message": null, + "InvalidLocationCode$Message": null, + "InvalidMinimumProtocolVersion$Message": null, + "InvalidOrigin$Message": null, + "InvalidOriginAccessIdentity$Message": null, + "InvalidProtocolSettings$Message": null, + "InvalidRelativePath$Message": null, + "InvalidRequiredProtocol$Message": null, + "InvalidResponseCode$Message": null, + "InvalidTTLOrder$Message": null, + "InvalidViewerCertificate$Message": null, + "InvalidWebACLId$Message": null, + "Invalidation$Id": "The identifier for the invalidation request. For example: IDFDVBD632BHDS5.", + "Invalidation$Status": "The status of the invalidation request. When the invalidation batch is finished, the status is Completed.", + "InvalidationBatch$CallerReference": "A unique name that ensures the request can't be replayed. If the CallerReference is new (no matter the content of the Path object), a new distribution is created. If the CallerReference is a value you already sent in a previous request to create an invalidation batch, and the content of each Path element is identical to the original request, the response includes the same information returned to the original request. If the CallerReference is a value you already sent in a previous request to create a distribution but the content of any Path is different from the original request, CloudFront returns an InvalidationBatchAlreadyExists error.", + "InvalidationList$Marker": "The value you provided for the Marker request parameter.", + "InvalidationList$NextMarker": "If IsTruncated is true, this element is present and contains the value you can use for the Marker request parameter to continue listing your invalidation batches where they left off.", + "InvalidationSummary$Id": "The unique ID for an invalidation request.", + "InvalidationSummary$Status": "The status of an invalidation request.", + "KeyPairIdList$member": null, + "ListCloudFrontOriginAccessIdentitiesRequest$Marker": "Use this when paginating results to indicate where to begin in your list of origin access identities. The results include identities in the list that occur after the marker. To get the next page of results, set the Marker to the value of the NextMarker from the current page's response (which is also the ID of the last identity on that page).", + "ListCloudFrontOriginAccessIdentitiesRequest$MaxItems": "The maximum number of origin access identities you want in the response body.", + "ListDistributionsByWebACLIdRequest$Marker": "Use Marker and MaxItems to control pagination of results. If you have more than MaxItems distributions that satisfy the request, the response includes a NextMarker element. To get the next page of results, submit another request. For the value of Marker, specify the value of NextMarker from the last response. (For the first request, omit Marker.)", + "ListDistributionsByWebACLIdRequest$MaxItems": "The maximum number of distributions that you want CloudFront to return in the response body. The maximum and default values are both 100.", + "ListDistributionsByWebACLIdRequest$WebACLId": "The Id of the AWS WAF web ACL for which you want to list the associated distributions. If you specify \"null\" for the Id, the request returns a list of the distributions that aren't associated with a web ACL.", + "ListDistributionsRequest$Marker": "Use Marker and MaxItems to control pagination of results. If you have more than MaxItems distributions that satisfy the request, the response includes a NextMarker element. To get the next page of results, submit another request. For the value of Marker, specify the value of NextMarker from the last response. (For the first request, omit Marker.)", + "ListDistributionsRequest$MaxItems": "The maximum number of distributions that you want CloudFront to return in the response body. The maximum and default values are both 100.", + "ListInvalidationsRequest$DistributionId": "The distribution's id.", + "ListInvalidationsRequest$Marker": "Use this parameter when paginating results to indicate where to begin in your list of invalidation batches. Because the results are returned in decreasing order from most recent to oldest, the most recent results are on the first page, the second page will contain earlier results, and so on. To get the next page of results, set the Marker to the value of the NextMarker from the current page's response. This value is the same as the ID of the last invalidation batch on that page.", + "ListInvalidationsRequest$MaxItems": "The maximum number of invalidation batches you want in the response body.", + "ListStreamingDistributionsRequest$Marker": "Use this when paginating results to indicate where to begin in your list of streaming distributions. The results include distributions in the list that occur after the marker. To get the next page of results, set the Marker to the value of the NextMarker from the current page's response (which is also the ID of the last distribution on that page).", + "ListStreamingDistributionsRequest$MaxItems": "The maximum number of streaming distributions you want in the response body.", + "LocationList$member": null, + "LoggingConfig$Bucket": "The Amazon S3 bucket to store the access logs in, for example, myawslogbucket.s3.amazonaws.com.", + "LoggingConfig$Prefix": "An optional string that you want CloudFront to prefix to the access log filenames for this distribution, for example, myprefix/. If you want to enable logging, but you do not want to specify a prefix, you still must include an empty Prefix element in the Logging element.", + "MissingBody$Message": null, + "NoSuchCloudFrontOriginAccessIdentity$Message": null, + "NoSuchDistribution$Message": null, + "NoSuchInvalidation$Message": null, + "NoSuchOrigin$Message": null, + "NoSuchStreamingDistribution$Message": null, + "Origin$Id": "A unique identifier for the origin. The value of Id must be unique within the distribution. You use the value of Id when you create a cache behavior. The Id identifies the origin that CloudFront routes a request to when the request matches the path pattern for that cache behavior.", + "Origin$DomainName": "Amazon S3 origins: The DNS name of the Amazon S3 bucket from which you want CloudFront to get objects for this origin, for example, myawsbucket.s3.amazonaws.com. Custom origins: The DNS domain name for the HTTP server from which you want CloudFront to get objects for this origin, for example, www.example.com.", + "Origin$OriginPath": "An optional element that causes CloudFront to request your content from a directory in your Amazon S3 bucket or your custom origin. When you include the OriginPath element, specify the directory name, beginning with a /. CloudFront appends the directory name to the value of DomainName.", + "OriginCustomHeader$HeaderName": "The header's name.", + "OriginCustomHeader$HeaderValue": "The header's value.", + "PathList$member": null, + "PreconditionFailed$Message": null, + "S3Origin$DomainName": "The DNS name of the S3 origin.", + "S3Origin$OriginAccessIdentity": "Your S3 origin's origin access identity.", + "S3OriginConfig$OriginAccessIdentity": "The CloudFront origin access identity to associate with the origin. Use an origin access identity to configure the origin so that end users can only access objects in an Amazon S3 bucket through CloudFront. If you want end users to be able to access objects using either the CloudFront URL or the Amazon S3 URL, specify an empty OriginAccessIdentity element. To delete the origin access identity from an existing distribution, update the distribution configuration and include an empty OriginAccessIdentity element. To replace the origin access identity, update the distribution configuration and specify the new origin access identity. Use the format origin-access-identity/cloudfront/Id where Id is the value that CloudFront returned in the Id element when you created the origin access identity.", + "Signer$AwsAccountNumber": "Specifies an AWS account that can create signed URLs. Values: self, which indicates that the AWS account that was used to create the distribution can created signed URLs, or an AWS account number. Omit the dashes in the account number.", + "StreamingDistribution$Id": "The identifier for the streaming distribution. For example: EGTXBD79H29TRA8.", + "StreamingDistribution$Status": "The current status of the streaming distribution. When the status is Deployed, the distribution's information is fully propagated throughout the Amazon CloudFront system.", + "StreamingDistribution$DomainName": "The domain name corresponding to the streaming distribution. For example: s5c39gqb8ow64r.cloudfront.net.", + "StreamingDistributionAlreadyExists$Message": null, + "StreamingDistributionConfig$CallerReference": "A unique number that ensures the request can't be replayed. If the CallerReference is new (no matter the content of the StreamingDistributionConfig object), a new streaming distribution is created. If the CallerReference is a value you already sent in a previous request to create a streaming distribution, and the content of the StreamingDistributionConfig is identical to the original request (ignoring white space), the response includes the same information returned to the original request. If the CallerReference is a value you already sent in a previous request to create a streaming distribution but the content of the StreamingDistributionConfig is different from the original request, CloudFront returns a DistributionAlreadyExists error.", + "StreamingDistributionConfig$Comment": "Any comments you want to include about the streaming distribution.", + "StreamingDistributionList$Marker": "The value you provided for the Marker request parameter.", + "StreamingDistributionList$NextMarker": "If IsTruncated is true, this element is present and contains the value you can use for the Marker request parameter to continue listing your streaming distributions where they left off.", + "StreamingDistributionNotDisabled$Message": null, + "StreamingDistributionSummary$Id": "The identifier for the distribution. For example: EDFDVBD632BHDS5.", + "StreamingDistributionSummary$Status": "Indicates the current status of the distribution. When the status is Deployed, the distribution's information is fully propagated throughout the Amazon CloudFront system.", + "StreamingDistributionSummary$DomainName": "The domain name corresponding to the distribution. For example: d604721fxaaqy9.cloudfront.net.", + "StreamingDistributionSummary$Comment": "The comment originally specified when this distribution was created.", + "StreamingLoggingConfig$Bucket": "The Amazon S3 bucket to store the access logs in, for example, myawslogbucket.s3.amazonaws.com.", + "StreamingLoggingConfig$Prefix": "An optional string that you want CloudFront to prefix to the access log filenames for this streaming distribution, for example, myprefix/. If you want to enable logging, but you do not want to specify a prefix, you still must include an empty Prefix element in the Logging element.", + "TooManyCacheBehaviors$Message": null, + "TooManyCertificates$Message": null, + "TooManyCloudFrontOriginAccessIdentities$Message": null, + "TooManyCookieNamesInWhiteList$Message": null, + "TooManyDistributionCNAMEs$Message": null, + "TooManyDistributions$Message": null, + "TooManyHeadersInForwardedValues$Message": null, + "TooManyInvalidationsInProgress$Message": null, + "TooManyOriginCustomHeaders$Message": null, + "TooManyOrigins$Message": null, + "TooManyStreamingDistributionCNAMEs$Message": null, + "TooManyStreamingDistributions$Message": null, + "TooManyTrustedSigners$Message": null, + "TrustedSignerDoesNotExist$Message": null, + "UpdateCloudFrontOriginAccessIdentityRequest$Id": "The identity's id.", + "UpdateCloudFrontOriginAccessIdentityRequest$IfMatch": "The value of the ETag header you received when retrieving the identity's configuration. For example: E2QWRUHAPOMQZL.", + "UpdateCloudFrontOriginAccessIdentityResult$ETag": "The current version of the configuration. For example: E2QWRUHAPOMQZL.", + "UpdateDistributionRequest$Id": "The distribution's id.", + "UpdateDistributionRequest$IfMatch": "The value of the ETag header you received when retrieving the distribution's configuration. For example: E2QWRUHAPOMQZL.", + "UpdateDistributionResult$ETag": "The current version of the configuration. For example: E2QWRUHAPOMQZL.", + "UpdateStreamingDistributionRequest$Id": "The streaming distribution's id.", + "UpdateStreamingDistributionRequest$IfMatch": "The value of the ETag header you received when retrieving the streaming distribution's configuration. For example: E2QWRUHAPOMQZL.", + "UpdateStreamingDistributionResult$ETag": "The current version of the configuration. For example: E2QWRUHAPOMQZL.", + "ViewerCertificate$IAMCertificateId": "If you want viewers to use HTTPS to request your objects and you're using an alternate domain name in your object URLs (for example, https://example.com/logo.jpg), specify the IAM certificate identifier of the custom viewer certificate for this distribution. Specify either this value, ACMCertificateArn, or CloudFrontDefaultCertificate.", + "ViewerCertificate$ACMCertificateArn": "If you want viewers to use HTTPS to request your objects and you're using an alternate domain name in your object URLs (for example, https://example.com/logo.jpg), specify the ACM certificate ARN of the custom viewer certificate for this distribution. Specify either this value, IAMCertificateId, or CloudFrontDefaultCertificate.", + "ViewerCertificate$Certificate": "Note: this field is deprecated. Please use one of [ACMCertificateArn, IAMCertificateId, CloudFrontDefaultCertificate]." + } + }, + "timestamp": { + "base": null, + "refs": { + "Distribution$LastModifiedTime": "The date and time the distribution was last modified.", + "DistributionSummary$LastModifiedTime": "The date and time the distribution was last modified.", + "Invalidation$CreateTime": "The date and time the invalidation request was first made.", + "InvalidationSummary$CreateTime": null, + "StreamingDistribution$LastModifiedTime": "The date and time the distribution was last modified.", + "StreamingDistributionSummary$LastModifiedTime": "The date and time the distribution was last modified." + } + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/cloudfront/2016-01-28/examples-1.json b/vendor/github.com/aws/aws-sdk-go/models/apis/cloudfront/2016-01-28/examples-1.json new file mode 100644 index 000000000..0ea7e3b0b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/cloudfront/2016-01-28/examples-1.json @@ -0,0 +1,5 @@ +{ + "version": "1.0", + "examples": { + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/cloudfront/2016-01-28/paginators-1.json b/vendor/github.com/aws/aws-sdk-go/models/apis/cloudfront/2016-01-28/paginators-1.json new file mode 100644 index 000000000..51fbb907f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/cloudfront/2016-01-28/paginators-1.json @@ -0,0 +1,32 @@ +{ + "pagination": { + "ListCloudFrontOriginAccessIdentities": { + "input_token": "Marker", + "output_token": "CloudFrontOriginAccessIdentityList.NextMarker", + "limit_key": "MaxItems", + "more_results": "CloudFrontOriginAccessIdentityList.IsTruncated", + "result_key": "CloudFrontOriginAccessIdentityList.Items" + }, + "ListDistributions": { + "input_token": "Marker", + "output_token": "DistributionList.NextMarker", + "limit_key": "MaxItems", + "more_results": "DistributionList.IsTruncated", + "result_key": "DistributionList.Items" + }, + "ListInvalidations": { + "input_token": "Marker", + "output_token": "InvalidationList.NextMarker", + "limit_key": "MaxItems", + "more_results": "InvalidationList.IsTruncated", + "result_key": "InvalidationList.Items" + }, + "ListStreamingDistributions": { + "input_token": "Marker", + "output_token": "StreamingDistributionList.NextMarker", + "limit_key": "MaxItems", + "more_results": "StreamingDistributionList.IsTruncated", + "result_key": "StreamingDistributionList.Items" + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/cloudfront/2016-01-28/waiters-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/cloudfront/2016-01-28/waiters-2.json new file mode 100644 index 000000000..edd74b2a3 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/cloudfront/2016-01-28/waiters-2.json @@ -0,0 +1,47 @@ +{ + "version": 2, + "waiters": { + "DistributionDeployed": { + "delay": 60, + "operation": "GetDistribution", + "maxAttempts": 25, + "description": "Wait until a distribution is deployed.", + "acceptors": [ + { + "expected": "Deployed", + "matcher": "path", + "state": "success", + "argument": "Distribution.Status" + } + ] + }, + "InvalidationCompleted": { + "delay": 20, + "operation": "GetInvalidation", + "maxAttempts": 30, + "description": "Wait until an invalidation has completed.", + "acceptors": [ + { + "expected": "Completed", + "matcher": "path", + "state": "success", + "argument": "Invalidation.Status" + } + ] + }, + "StreamingDistributionDeployed": { + "delay": 60, + "operation": "GetStreamingDistribution", + "maxAttempts": 25, + "description": "Wait until a streaming distribution is deployed.", + "acceptors": [ + { + "expected": "Deployed", + "matcher": "path", + "state": "success", + "argument": "StreamingDistribution.Status" + } + ] + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/cloudhsm/2014-05-30/api-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/cloudhsm/2014-05-30/api-2.json new file mode 100644 index 000000000..d4cfcbaf6 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/cloudhsm/2014-05-30/api-2.json @@ -0,0 +1,877 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2014-05-30", + "endpointPrefix":"cloudhsm", + "jsonVersion":"1.1", + "protocol":"json", + "serviceAbbreviation":"CloudHSM", + "serviceFullName":"Amazon CloudHSM", + "signatureVersion":"v4", + "targetPrefix":"CloudHsmFrontendService" + }, + "operations":{ + "AddTagsToResource":{ + "name":"AddTagsToResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AddTagsToResourceRequest"}, + "output":{"shape":"AddTagsToResourceResponse"}, + "errors":[ + {"shape":"CloudHsmServiceException"}, + {"shape":"CloudHsmInternalException"}, + {"shape":"InvalidRequestException"} + ] + }, + "CreateHapg":{ + "name":"CreateHapg", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateHapgRequest"}, + "output":{"shape":"CreateHapgResponse"}, + "errors":[ + {"shape":"CloudHsmServiceException"}, + {"shape":"CloudHsmInternalException"}, + {"shape":"InvalidRequestException"} + ] + }, + "CreateHsm":{ + "name":"CreateHsm", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateHsmRequest"}, + "output":{"shape":"CreateHsmResponse"}, + "errors":[ + {"shape":"CloudHsmServiceException"}, + {"shape":"CloudHsmInternalException"}, + {"shape":"InvalidRequestException"} + ] + }, + "CreateLunaClient":{ + "name":"CreateLunaClient", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateLunaClientRequest"}, + "output":{"shape":"CreateLunaClientResponse"}, + "errors":[ + {"shape":"CloudHsmServiceException"}, + {"shape":"CloudHsmInternalException"}, + {"shape":"InvalidRequestException"} + ] + }, + "DeleteHapg":{ + "name":"DeleteHapg", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteHapgRequest"}, + "output":{"shape":"DeleteHapgResponse"}, + "errors":[ + {"shape":"CloudHsmServiceException"}, + {"shape":"CloudHsmInternalException"}, + {"shape":"InvalidRequestException"} + ] + }, + "DeleteHsm":{ + "name":"DeleteHsm", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteHsmRequest"}, + "output":{"shape":"DeleteHsmResponse"}, + "errors":[ + {"shape":"CloudHsmServiceException"}, + {"shape":"CloudHsmInternalException"}, + {"shape":"InvalidRequestException"} + ] + }, + "DeleteLunaClient":{ + "name":"DeleteLunaClient", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteLunaClientRequest"}, + "output":{"shape":"DeleteLunaClientResponse"}, + "errors":[ + {"shape":"CloudHsmServiceException"}, + {"shape":"CloudHsmInternalException"}, + {"shape":"InvalidRequestException"} + ] + }, + "DescribeHapg":{ + "name":"DescribeHapg", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeHapgRequest"}, + "output":{"shape":"DescribeHapgResponse"}, + "errors":[ + {"shape":"CloudHsmServiceException"}, + {"shape":"CloudHsmInternalException"}, + {"shape":"InvalidRequestException"} + ] + }, + "DescribeHsm":{ + "name":"DescribeHsm", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeHsmRequest"}, + "output":{"shape":"DescribeHsmResponse"}, + "errors":[ + {"shape":"CloudHsmServiceException"}, + {"shape":"CloudHsmInternalException"}, + {"shape":"InvalidRequestException"} + ] + }, + "DescribeLunaClient":{ + "name":"DescribeLunaClient", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeLunaClientRequest"}, + "output":{"shape":"DescribeLunaClientResponse"}, + "errors":[ + {"shape":"CloudHsmServiceException"}, + {"shape":"CloudHsmInternalException"}, + {"shape":"InvalidRequestException"} + ] + }, + "GetConfig":{ + "name":"GetConfig", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetConfigRequest"}, + "output":{"shape":"GetConfigResponse"}, + "errors":[ + {"shape":"CloudHsmServiceException"}, + {"shape":"CloudHsmInternalException"}, + {"shape":"InvalidRequestException"} + ] + }, + "ListAvailableZones":{ + "name":"ListAvailableZones", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListAvailableZonesRequest"}, + "output":{"shape":"ListAvailableZonesResponse"}, + "errors":[ + {"shape":"CloudHsmServiceException"}, + {"shape":"CloudHsmInternalException"}, + {"shape":"InvalidRequestException"} + ] + }, + "ListHapgs":{ + "name":"ListHapgs", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListHapgsRequest"}, + "output":{"shape":"ListHapgsResponse"}, + "errors":[ + {"shape":"CloudHsmServiceException"}, + {"shape":"CloudHsmInternalException"}, + {"shape":"InvalidRequestException"} + ] + }, + "ListHsms":{ + "name":"ListHsms", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListHsmsRequest"}, + "output":{"shape":"ListHsmsResponse"}, + "errors":[ + {"shape":"CloudHsmServiceException"}, + {"shape":"CloudHsmInternalException"}, + {"shape":"InvalidRequestException"} + ] + }, + "ListLunaClients":{ + "name":"ListLunaClients", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListLunaClientsRequest"}, + "output":{"shape":"ListLunaClientsResponse"}, + "errors":[ + {"shape":"CloudHsmServiceException"}, + {"shape":"CloudHsmInternalException"}, + {"shape":"InvalidRequestException"} + ] + }, + "ListTagsForResource":{ + "name":"ListTagsForResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListTagsForResourceRequest"}, + "output":{"shape":"ListTagsForResourceResponse"}, + "errors":[ + {"shape":"CloudHsmServiceException"}, + {"shape":"CloudHsmInternalException"}, + {"shape":"InvalidRequestException"} + ] + }, + "ModifyHapg":{ + "name":"ModifyHapg", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyHapgRequest"}, + "output":{"shape":"ModifyHapgResponse"}, + "errors":[ + {"shape":"CloudHsmServiceException"}, + {"shape":"CloudHsmInternalException"}, + {"shape":"InvalidRequestException"} + ] + }, + "ModifyHsm":{ + "name":"ModifyHsm", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyHsmRequest"}, + "output":{"shape":"ModifyHsmResponse"}, + "errors":[ + {"shape":"CloudHsmServiceException"}, + {"shape":"CloudHsmInternalException"}, + {"shape":"InvalidRequestException"} + ] + }, + "ModifyLunaClient":{ + "name":"ModifyLunaClient", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyLunaClientRequest"}, + "output":{"shape":"ModifyLunaClientResponse"}, + "errors":[ + {"shape":"CloudHsmServiceException"} + ] + }, + "RemoveTagsFromResource":{ + "name":"RemoveTagsFromResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RemoveTagsFromResourceRequest"}, + "output":{"shape":"RemoveTagsFromResourceResponse"}, + "errors":[ + {"shape":"CloudHsmServiceException"}, + {"shape":"CloudHsmInternalException"}, + {"shape":"InvalidRequestException"} + ] + } + }, + "shapes":{ + "AZ":{ + "type":"string", + "pattern":"[a-zA-Z0-9\\-]*" + }, + "AZList":{ + "type":"list", + "member":{"shape":"AZ"} + }, + "AddTagsToResourceRequest":{ + "type":"structure", + "required":[ + "ResourceArn", + "TagList" + ], + "members":{ + "ResourceArn":{"shape":"String"}, + "TagList":{"shape":"TagList"} + } + }, + "AddTagsToResourceResponse":{ + "type":"structure", + "required":["Status"], + "members":{ + "Status":{"shape":"String"} + } + }, + "Boolean":{"type":"boolean"}, + "Certificate":{ + "type":"string", + "max":2400, + "min":600, + "pattern":"[\\w :+=./\\n-]*" + }, + "CertificateFingerprint":{ + "type":"string", + "pattern":"([0-9a-fA-F][0-9a-fA-F]:){15}[0-9a-fA-F][0-9a-fA-F]" + }, + "ClientArn":{ + "type":"string", + "pattern":"arn:aws(-iso)?:cloudhsm:[a-zA-Z0-9\\-]*:[0-9]{12}:client-[0-9a-f]{8}" + }, + "ClientLabel":{ + "type":"string", + "pattern":"[a-zA-Z0-9_.-]{2,64}" + }, + "ClientList":{ + "type":"list", + "member":{"shape":"ClientArn"} + }, + "ClientToken":{ + "type":"string", + "pattern":"[a-zA-Z0-9]{1,64}" + }, + "ClientVersion":{ + "type":"string", + "enum":[ + "5.1", + "5.3" + ] + }, + "CloudHsmInternalException":{ + "type":"structure", + "members":{ + }, + "exception":true, + "fault":true + }, + "CloudHsmObjectState":{ + "type":"string", + "enum":[ + "READY", + "UPDATING", + "DEGRADED" + ] + }, + "CloudHsmServiceException":{ + "type":"structure", + "members":{ + "message":{"shape":"String"}, + "retryable":{"shape":"Boolean"} + }, + "exception":true + }, + "CreateHapgRequest":{ + "type":"structure", + "required":["Label"], + "members":{ + "Label":{"shape":"Label"} + } + }, + "CreateHapgResponse":{ + "type":"structure", + "members":{ + "HapgArn":{"shape":"HapgArn"} + } + }, + "CreateHsmRequest":{ + "type":"structure", + "required":[ + "SubnetId", + "SshKey", + "IamRoleArn", + "SubscriptionType" + ], + "members":{ + "SubnetId":{ + "shape":"SubnetId", + "locationName":"SubnetId" + }, + "SshKey":{ + "shape":"SshKey", + "locationName":"SshKey" + }, + "EniIp":{ + "shape":"IpAddress", + "locationName":"EniIp" + }, + "IamRoleArn":{ + "shape":"IamRoleArn", + "locationName":"IamRoleArn" + }, + "ExternalId":{ + "shape":"ExternalId", + "locationName":"ExternalId" + }, + "SubscriptionType":{ + "shape":"SubscriptionType", + "locationName":"SubscriptionType" + }, + "ClientToken":{ + "shape":"ClientToken", + "locationName":"ClientToken" + }, + "SyslogIp":{ + "shape":"IpAddress", + "locationName":"SyslogIp" + } + }, + "locationName":"CreateHsmRequest" + }, + "CreateHsmResponse":{ + "type":"structure", + "members":{ + "HsmArn":{"shape":"HsmArn"} + } + }, + "CreateLunaClientRequest":{ + "type":"structure", + "required":["Certificate"], + "members":{ + "Label":{"shape":"ClientLabel"}, + "Certificate":{"shape":"Certificate"} + } + }, + "CreateLunaClientResponse":{ + "type":"structure", + "members":{ + "ClientArn":{"shape":"ClientArn"} + } + }, + "DeleteHapgRequest":{ + "type":"structure", + "required":["HapgArn"], + "members":{ + "HapgArn":{"shape":"HapgArn"} + } + }, + "DeleteHapgResponse":{ + "type":"structure", + "required":["Status"], + "members":{ + "Status":{"shape":"String"} + } + }, + "DeleteHsmRequest":{ + "type":"structure", + "required":["HsmArn"], + "members":{ + "HsmArn":{ + "shape":"HsmArn", + "locationName":"HsmArn" + } + }, + "locationName":"DeleteHsmRequest" + }, + "DeleteHsmResponse":{ + "type":"structure", + "required":["Status"], + "members":{ + "Status":{"shape":"String"} + } + }, + "DeleteLunaClientRequest":{ + "type":"structure", + "required":["ClientArn"], + "members":{ + "ClientArn":{"shape":"ClientArn"} + } + }, + "DeleteLunaClientResponse":{ + "type":"structure", + "required":["Status"], + "members":{ + "Status":{"shape":"String"} + } + }, + "DescribeHapgRequest":{ + "type":"structure", + "required":["HapgArn"], + "members":{ + "HapgArn":{"shape":"HapgArn"} + } + }, + "DescribeHapgResponse":{ + "type":"structure", + "members":{ + "HapgArn":{"shape":"HapgArn"}, + "HapgSerial":{"shape":"String"}, + "HsmsLastActionFailed":{"shape":"HsmList"}, + "HsmsPendingDeletion":{"shape":"HsmList"}, + "HsmsPendingRegistration":{"shape":"HsmList"}, + "Label":{"shape":"Label"}, + "LastModifiedTimestamp":{"shape":"Timestamp"}, + "PartitionSerialList":{"shape":"PartitionSerialList"}, + "State":{"shape":"CloudHsmObjectState"} + } + }, + "DescribeHsmRequest":{ + "type":"structure", + "members":{ + "HsmArn":{"shape":"HsmArn"}, + "HsmSerialNumber":{"shape":"HsmSerialNumber"} + } + }, + "DescribeHsmResponse":{ + "type":"structure", + "members":{ + "HsmArn":{"shape":"HsmArn"}, + "Status":{"shape":"HsmStatus"}, + "StatusDetails":{"shape":"String"}, + "AvailabilityZone":{"shape":"AZ"}, + "EniId":{"shape":"EniId"}, + "EniIp":{"shape":"IpAddress"}, + "SubscriptionType":{"shape":"SubscriptionType"}, + "SubscriptionStartDate":{"shape":"Timestamp"}, + "SubscriptionEndDate":{"shape":"Timestamp"}, + "VpcId":{"shape":"VpcId"}, + "SubnetId":{"shape":"SubnetId"}, + "IamRoleArn":{"shape":"IamRoleArn"}, + "SerialNumber":{"shape":"HsmSerialNumber"}, + "VendorName":{"shape":"String"}, + "HsmType":{"shape":"String"}, + "SoftwareVersion":{"shape":"String"}, + "SshPublicKey":{"shape":"SshKey"}, + "SshKeyLastUpdated":{"shape":"Timestamp"}, + "ServerCertUri":{"shape":"String"}, + "ServerCertLastUpdated":{"shape":"Timestamp"}, + "Partitions":{"shape":"PartitionList"} + } + }, + "DescribeLunaClientRequest":{ + "type":"structure", + "members":{ + "ClientArn":{"shape":"ClientArn"}, + "CertificateFingerprint":{"shape":"CertificateFingerprint"} + } + }, + "DescribeLunaClientResponse":{ + "type":"structure", + "members":{ + "ClientArn":{"shape":"ClientArn"}, + "Certificate":{"shape":"Certificate"}, + "CertificateFingerprint":{"shape":"CertificateFingerprint"}, + "LastModifiedTimestamp":{"shape":"Timestamp"}, + "Label":{"shape":"Label"} + } + }, + "EniId":{ + "type":"string", + "pattern":"eni-[0-9a-f]{8}" + }, + "ExternalId":{ + "type":"string", + "pattern":"[\\w :+=./-]*" + }, + "GetConfigRequest":{ + "type":"structure", + "required":[ + "ClientArn", + "ClientVersion", + "HapgList" + ], + "members":{ + "ClientArn":{"shape":"ClientArn"}, + "ClientVersion":{"shape":"ClientVersion"}, + "HapgList":{"shape":"HapgList"} + } + }, + "GetConfigResponse":{ + "type":"structure", + "members":{ + "ConfigType":{"shape":"String"}, + "ConfigFile":{"shape":"String"}, + "ConfigCred":{"shape":"String"} + } + }, + "HapgArn":{ + "type":"string", + "pattern":"arn:aws(-iso)?:cloudhsm:[a-zA-Z0-9\\-]*:[0-9]{12}:hapg-[0-9a-f]{8}" + }, + "HapgList":{ + "type":"list", + "member":{"shape":"HapgArn"} + }, + "HsmArn":{ + "type":"string", + "pattern":"arn:aws(-iso)?:cloudhsm:[a-zA-Z0-9\\-]*:[0-9]{12}:hsm-[0-9a-f]{8}" + }, + "HsmList":{ + "type":"list", + "member":{"shape":"HsmArn"} + }, + "HsmSerialNumber":{ + "type":"string", + "pattern":"\\d{1,16}" + }, + "HsmStatus":{ + "type":"string", + "enum":[ + "PENDING", + "RUNNING", + "UPDATING", + "SUSPENDED", + "TERMINATING", + "TERMINATED", + "DEGRADED" + ] + }, + "IamRoleArn":{ + "type":"string", + "pattern":"arn:aws(-iso)?:iam::[0-9]{12}:role/[a-zA-Z0-9_\\+=,\\.\\-@]{1,64}" + }, + "InvalidRequestException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "IpAddress":{ + "type":"string", + "pattern":"\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}" + }, + "Label":{ + "type":"string", + "pattern":"[a-zA-Z0-9_.-]{1,64}" + }, + "ListAvailableZonesRequest":{ + "type":"structure", + "members":{ + } + }, + "ListAvailableZonesResponse":{ + "type":"structure", + "members":{ + "AZList":{"shape":"AZList"} + } + }, + "ListHapgsRequest":{ + "type":"structure", + "members":{ + "NextToken":{"shape":"PaginationToken"} + } + }, + "ListHapgsResponse":{ + "type":"structure", + "required":["HapgList"], + "members":{ + "HapgList":{"shape":"HapgList"}, + "NextToken":{"shape":"PaginationToken"} + } + }, + "ListHsmsRequest":{ + "type":"structure", + "members":{ + "NextToken":{"shape":"PaginationToken"} + } + }, + "ListHsmsResponse":{ + "type":"structure", + "members":{ + "HsmList":{"shape":"HsmList"}, + "NextToken":{"shape":"PaginationToken"} + } + }, + "ListLunaClientsRequest":{ + "type":"structure", + "members":{ + "NextToken":{"shape":"PaginationToken"} + } + }, + "ListLunaClientsResponse":{ + "type":"structure", + "required":["ClientList"], + "members":{ + "ClientList":{"shape":"ClientList"}, + "NextToken":{"shape":"PaginationToken"} + } + }, + "ListTagsForResourceRequest":{ + "type":"structure", + "required":["ResourceArn"], + "members":{ + "ResourceArn":{"shape":"String"} + } + }, + "ListTagsForResourceResponse":{ + "type":"structure", + "required":["TagList"], + "members":{ + "TagList":{"shape":"TagList"} + } + }, + "ModifyHapgRequest":{ + "type":"structure", + "required":["HapgArn"], + "members":{ + "HapgArn":{"shape":"HapgArn"}, + "Label":{"shape":"Label"}, + "PartitionSerialList":{"shape":"PartitionSerialList"} + } + }, + "ModifyHapgResponse":{ + "type":"structure", + "members":{ + "HapgArn":{"shape":"HapgArn"} + } + }, + "ModifyHsmRequest":{ + "type":"structure", + "required":["HsmArn"], + "members":{ + "HsmArn":{ + "shape":"HsmArn", + "locationName":"HsmArn" + }, + "SubnetId":{ + "shape":"SubnetId", + "locationName":"SubnetId" + }, + "EniIp":{ + "shape":"IpAddress", + "locationName":"EniIp" + }, + "IamRoleArn":{ + "shape":"IamRoleArn", + "locationName":"IamRoleArn" + }, + "ExternalId":{ + "shape":"ExternalId", + "locationName":"ExternalId" + }, + "SyslogIp":{ + "shape":"IpAddress", + "locationName":"SyslogIp" + } + }, + "locationName":"ModifyHsmRequest" + }, + "ModifyHsmResponse":{ + "type":"structure", + "members":{ + "HsmArn":{"shape":"HsmArn"} + } + }, + "ModifyLunaClientRequest":{ + "type":"structure", + "required":[ + "ClientArn", + "Certificate" + ], + "members":{ + "ClientArn":{"shape":"ClientArn"}, + "Certificate":{"shape":"Certificate"} + } + }, + "ModifyLunaClientResponse":{ + "type":"structure", + "members":{ + "ClientArn":{"shape":"ClientArn"} + } + }, + "PaginationToken":{ + "type":"string", + "pattern":"[a-zA-Z0-9+/]*" + }, + "PartitionArn":{ + "type":"string", + "pattern":"arn:aws(-iso)?:cloudhsm:[a-zA-Z0-9\\-]*:[0-9]{12}:hsm-[0-9a-f]{8}/partition-[0-9]{6,12}" + }, + "PartitionList":{ + "type":"list", + "member":{"shape":"PartitionArn"} + }, + "PartitionSerial":{ + "type":"string", + "pattern":"\\d{6,12}" + }, + "PartitionSerialList":{ + "type":"list", + "member":{"shape":"PartitionSerial"} + }, + "RemoveTagsFromResourceRequest":{ + "type":"structure", + "required":[ + "ResourceArn", + "TagKeyList" + ], + "members":{ + "ResourceArn":{"shape":"String"}, + "TagKeyList":{"shape":"TagKeyList"} + } + }, + "RemoveTagsFromResourceResponse":{ + "type":"structure", + "required":["Status"], + "members":{ + "Status":{"shape":"String"} + } + }, + "SshKey":{ + "type":"string", + "pattern":"[a-zA-Z0-9+/= ._:\\\\@-]*" + }, + "String":{ + "type":"string", + "pattern":"[\\w :+=./\\\\-]*" + }, + "SubnetId":{ + "type":"string", + "pattern":"subnet-[0-9a-f]{8}" + }, + "SubscriptionType":{ + "type":"string", + "enum":["PRODUCTION"] + }, + "Tag":{ + "type":"structure", + "required":[ + "Key", + "Value" + ], + "members":{ + "Key":{"shape":"TagKey"}, + "Value":{"shape":"TagValue"} + } + }, + "TagKey":{ + "type":"string", + "max":128, + "min":1 + }, + "TagKeyList":{ + "type":"list", + "member":{"shape":"TagKey"} + }, + "TagList":{ + "type":"list", + "member":{"shape":"Tag"} + }, + "TagValue":{ + "type":"string", + "max":256, + "min":0 + }, + "Timestamp":{ + "type":"string", + "pattern":"\\d*" + }, + "VpcId":{ + "type":"string", + "pattern":"vpc-[0-9a-f]{8}" + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/cloudhsm/2014-05-30/docs-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/cloudhsm/2014-05-30/docs-2.json new file mode 100644 index 000000000..2a4f88482 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/cloudhsm/2014-05-30/docs-2.json @@ -0,0 +1,543 @@ +{ + "version": "2.0", + "service": "AWS CloudHSM Service", + "operations": { + "AddTagsToResource": "

    Adds or overwrites one or more tags for the specified AWS CloudHSM resource.

    Each tag consists of a key and a value. Tag keys must be unique to each resource.

    ", + "CreateHapg": "

    Creates a high-availability partition group. A high-availability partition group is a group of partitions that spans multiple physical HSMs.

    ", + "CreateHsm": "

    Creates an uninitialized HSM instance.

    There is an upfront fee charged for each HSM instance that you create with the CreateHsm operation. If you accidentally provision an HSM and want to request a refund, delete the instance using the DeleteHsm operation, go to the AWS Support Center, create a new case, and select Account and Billing Support.

    It can take up to 20 minutes to create and provision an HSM. You can monitor the status of the HSM with the DescribeHsm operation. The HSM is ready to be initialized when the status changes to RUNNING.

    ", + "CreateLunaClient": "

    Creates an HSM client.

    ", + "DeleteHapg": "

    Deletes a high-availability partition group.

    ", + "DeleteHsm": "

    Deletes an HSM. After completion, this operation cannot be undone and your key material cannot be recovered.

    ", + "DeleteLunaClient": "

    Deletes a client.

    ", + "DescribeHapg": "

    Retrieves information about a high-availability partition group.

    ", + "DescribeHsm": "

    Retrieves information about an HSM. You can identify the HSM by its ARN or its serial number.

    ", + "DescribeLunaClient": "

    Retrieves information about an HSM client.

    ", + "GetConfig": "

    Gets the configuration files necessary to connect to all high availability partition groups the client is associated with.

    ", + "ListAvailableZones": "

    Lists the Availability Zones that have available AWS CloudHSM capacity.

    ", + "ListHapgs": "

    Lists the high-availability partition groups for the account.

    This operation supports pagination with the use of the NextToken member. If more results are available, the NextToken member of the response contains a token that you pass in the next call to ListHapgs to retrieve the next set of items.

    ", + "ListHsms": "

    Retrieves the identifiers of all of the HSMs provisioned for the current customer.

    This operation supports pagination with the use of the NextToken member. If more results are available, the NextToken member of the response contains a token that you pass in the next call to ListHsms to retrieve the next set of items.

    ", + "ListLunaClients": "

    Lists all of the clients.

    This operation supports pagination with the use of the NextToken member. If more results are available, the NextToken member of the response contains a token that you pass in the next call to ListLunaClients to retrieve the next set of items.

    ", + "ListTagsForResource": "

    Returns a list of all tags for the specified AWS CloudHSM resource.

    ", + "ModifyHapg": "

    Modifies an existing high-availability partition group.

    ", + "ModifyHsm": "

    Modifies an HSM.

    This operation can result in the HSM being offline for up to 15 minutes while the AWS CloudHSM service is reconfigured. If you are modifying a production HSM, you should ensure that your AWS CloudHSM service is configured for high availability, and consider executing this operation during a maintenance window.

    ", + "ModifyLunaClient": "

    Modifies the certificate used by the client.

    This action can potentially start a workflow to install the new certificate on the client's HSMs.

    ", + "RemoveTagsFromResource": "

    Removes one or more tags from the specified AWS CloudHSM resource.

    To remove a tag, specify only the tag key to remove (not the value). To overwrite the value for an existing tag, use AddTagsToResource.

    " + }, + "shapes": { + "AZ": { + "base": null, + "refs": { + "AZList$member": null, + "DescribeHsmResponse$AvailabilityZone": "

    The Availability Zone that the HSM is in.

    " + } + }, + "AZList": { + "base": null, + "refs": { + "ListAvailableZonesResponse$AZList": "

    The list of Availability Zones that have available AWS CloudHSM capacity.

    " + } + }, + "AddTagsToResourceRequest": { + "base": null, + "refs": { + } + }, + "AddTagsToResourceResponse": { + "base": null, + "refs": { + } + }, + "Boolean": { + "base": null, + "refs": { + "CloudHsmServiceException$retryable": "

    Indicates if the action can be retried.

    " + } + }, + "Certificate": { + "base": null, + "refs": { + "CreateLunaClientRequest$Certificate": "

    The contents of a Base64-Encoded X.509 v3 certificate to be installed on the HSMs used by this client.

    ", + "DescribeLunaClientResponse$Certificate": "

    The certificate installed on the HSMs used by this client.

    ", + "ModifyLunaClientRequest$Certificate": "

    The new certificate for the client.

    " + } + }, + "CertificateFingerprint": { + "base": null, + "refs": { + "DescribeLunaClientRequest$CertificateFingerprint": "

    The certificate fingerprint.

    ", + "DescribeLunaClientResponse$CertificateFingerprint": "

    The certificate fingerprint.

    " + } + }, + "ClientArn": { + "base": null, + "refs": { + "ClientList$member": null, + "CreateLunaClientResponse$ClientArn": "

    The ARN of the client.

    ", + "DeleteLunaClientRequest$ClientArn": "

    The ARN of the client to delete.

    ", + "DescribeLunaClientRequest$ClientArn": "

    The ARN of the client.

    ", + "DescribeLunaClientResponse$ClientArn": "

    The ARN of the client.

    ", + "GetConfigRequest$ClientArn": "

    The ARN of the client.

    ", + "ModifyLunaClientRequest$ClientArn": "

    The ARN of the client.

    ", + "ModifyLunaClientResponse$ClientArn": "

    The ARN of the client.

    " + } + }, + "ClientLabel": { + "base": null, + "refs": { + "CreateLunaClientRequest$Label": "

    The label for the client.

    " + } + }, + "ClientList": { + "base": null, + "refs": { + "ListLunaClientsResponse$ClientList": "

    The list of clients.

    " + } + }, + "ClientToken": { + "base": null, + "refs": { + "CreateHsmRequest$ClientToken": "

    A user-defined token to ensure idempotence. Subsequent calls to this operation with the same token will be ignored.

    " + } + }, + "ClientVersion": { + "base": null, + "refs": { + "GetConfigRequest$ClientVersion": "

    The client version.

    " + } + }, + "CloudHsmInternalException": { + "base": "

    Indicates that an internal error occurred.

    ", + "refs": { + } + }, + "CloudHsmObjectState": { + "base": null, + "refs": { + "DescribeHapgResponse$State": "

    The state of the high-availability partition group.

    " + } + }, + "CloudHsmServiceException": { + "base": "

    Indicates that an exception occurred in the AWS CloudHSM service.

    ", + "refs": { + } + }, + "CreateHapgRequest": { + "base": "

    Contains the inputs for the CreateHapgRequest action.

    ", + "refs": { + } + }, + "CreateHapgResponse": { + "base": "

    Contains the output of the CreateHAPartitionGroup action.

    ", + "refs": { + } + }, + "CreateHsmRequest": { + "base": "

    Contains the inputs for the CreateHsm operation.

    ", + "refs": { + } + }, + "CreateHsmResponse": { + "base": "

    Contains the output of the CreateHsm operation.

    ", + "refs": { + } + }, + "CreateLunaClientRequest": { + "base": "

    Contains the inputs for the CreateLunaClient action.

    ", + "refs": { + } + }, + "CreateLunaClientResponse": { + "base": "

    Contains the output of the CreateLunaClient action.

    ", + "refs": { + } + }, + "DeleteHapgRequest": { + "base": "

    Contains the inputs for the DeleteHapg action.

    ", + "refs": { + } + }, + "DeleteHapgResponse": { + "base": "

    Contains the output of the DeleteHapg action.

    ", + "refs": { + } + }, + "DeleteHsmRequest": { + "base": "

    Contains the inputs for the DeleteHsm operation.

    ", + "refs": { + } + }, + "DeleteHsmResponse": { + "base": "

    Contains the output of the DeleteHsm operation.

    ", + "refs": { + } + }, + "DeleteLunaClientRequest": { + "base": null, + "refs": { + } + }, + "DeleteLunaClientResponse": { + "base": null, + "refs": { + } + }, + "DescribeHapgRequest": { + "base": "

    Contains the inputs for the DescribeHapg action.

    ", + "refs": { + } + }, + "DescribeHapgResponse": { + "base": "

    Contains the output of the DescribeHapg action.

    ", + "refs": { + } + }, + "DescribeHsmRequest": { + "base": "

    Contains the inputs for the DescribeHsm operation.

    ", + "refs": { + } + }, + "DescribeHsmResponse": { + "base": "

    Contains the output of the DescribeHsm operation.

    ", + "refs": { + } + }, + "DescribeLunaClientRequest": { + "base": null, + "refs": { + } + }, + "DescribeLunaClientResponse": { + "base": null, + "refs": { + } + }, + "EniId": { + "base": null, + "refs": { + "DescribeHsmResponse$EniId": "

    The identifier of the elastic network interface (ENI) attached to the HSM.

    " + } + }, + "ExternalId": { + "base": null, + "refs": { + "CreateHsmRequest$ExternalId": "

    The external ID from IamRoleArn, if present.

    ", + "ModifyHsmRequest$ExternalId": "

    The new external ID.

    " + } + }, + "GetConfigRequest": { + "base": null, + "refs": { + } + }, + "GetConfigResponse": { + "base": null, + "refs": { + } + }, + "HapgArn": { + "base": null, + "refs": { + "CreateHapgResponse$HapgArn": "

    The ARN of the high-availability partition group.

    ", + "DeleteHapgRequest$HapgArn": "

    The ARN of the high-availability partition group to delete.

    ", + "DescribeHapgRequest$HapgArn": "

    The ARN of the high-availability partition group to describe.

    ", + "DescribeHapgResponse$HapgArn": "

    The ARN of the high-availability partition group.

    ", + "HapgList$member": null, + "ModifyHapgRequest$HapgArn": "

    The ARN of the high-availability partition group to modify.

    ", + "ModifyHapgResponse$HapgArn": "

    The ARN of the high-availability partition group.

    " + } + }, + "HapgList": { + "base": null, + "refs": { + "GetConfigRequest$HapgList": "

    A list of ARNs that identify the high-availability partition groups that are associated with the client.

    ", + "ListHapgsResponse$HapgList": "

    The list of high-availability partition groups.

    " + } + }, + "HsmArn": { + "base": "

    An ARN that identifies an HSM.

    ", + "refs": { + "CreateHsmResponse$HsmArn": "

    The ARN of the HSM.

    ", + "DeleteHsmRequest$HsmArn": "

    The ARN of the HSM to delete.

    ", + "DescribeHsmRequest$HsmArn": "

    The ARN of the HSM. Either the HsmArn or the SerialNumber parameter must be specified.

    ", + "DescribeHsmResponse$HsmArn": "

    The ARN of the HSM.

    ", + "HsmList$member": null, + "ModifyHsmRequest$HsmArn": "

    The ARN of the HSM to modify.

    ", + "ModifyHsmResponse$HsmArn": "

    The ARN of the HSM.

    " + } + }, + "HsmList": { + "base": "

    Contains a list of ARNs that identify the HSMs.

    ", + "refs": { + "DescribeHapgResponse$HsmsLastActionFailed": null, + "DescribeHapgResponse$HsmsPendingDeletion": null, + "DescribeHapgResponse$HsmsPendingRegistration": null, + "ListHsmsResponse$HsmList": "

    The list of ARNs that identify the HSMs.

    " + } + }, + "HsmSerialNumber": { + "base": null, + "refs": { + "DescribeHsmRequest$HsmSerialNumber": "

    The serial number of the HSM. Either the HsmArn or the HsmSerialNumber parameter must be specified.

    ", + "DescribeHsmResponse$SerialNumber": "

    The serial number of the HSM.

    " + } + }, + "HsmStatus": { + "base": null, + "refs": { + "DescribeHsmResponse$Status": "

    The status of the HSM.

    " + } + }, + "IamRoleArn": { + "base": null, + "refs": { + "CreateHsmRequest$IamRoleArn": "

    The ARN of an IAM role to enable the AWS CloudHSM service to allocate an ENI on your behalf.

    ", + "DescribeHsmResponse$IamRoleArn": "

    The ARN of the IAM role assigned to the HSM.

    ", + "ModifyHsmRequest$IamRoleArn": "

    The new IAM role ARN.

    " + } + }, + "InvalidRequestException": { + "base": "

    Indicates that one or more of the request parameters are not valid.

    ", + "refs": { + } + }, + "IpAddress": { + "base": null, + "refs": { + "CreateHsmRequest$EniIp": "

    The IP address to assign to the HSM's ENI.

    If an IP address is not specified, an IP address will be randomly chosen from the CIDR range of the subnet.

    ", + "CreateHsmRequest$SyslogIp": "

    The IP address for the syslog monitoring server. The AWS CloudHSM service only supports one syslog monitoring server.

    ", + "DescribeHsmResponse$EniIp": "

    The IP address assigned to the HSM's ENI.

    ", + "ModifyHsmRequest$EniIp": "

    The new IP address for the elastic network interface (ENI) attached to the HSM.

    If the HSM is moved to a different subnet, and an IP address is not specified, an IP address will be randomly chosen from the CIDR range of the new subnet.

    ", + "ModifyHsmRequest$SyslogIp": "

    The new IP address for the syslog monitoring server. The AWS CloudHSM service only supports one syslog monitoring server.

    " + } + }, + "Label": { + "base": null, + "refs": { + "CreateHapgRequest$Label": "

    The label of the new high-availability partition group.

    ", + "DescribeHapgResponse$Label": "

    The label for the high-availability partition group.

    ", + "DescribeLunaClientResponse$Label": "

    The label of the client.

    ", + "ModifyHapgRequest$Label": "

    The new label for the high-availability partition group.

    " + } + }, + "ListAvailableZonesRequest": { + "base": "

    Contains the inputs for the ListAvailableZones action.

    ", + "refs": { + } + }, + "ListAvailableZonesResponse": { + "base": null, + "refs": { + } + }, + "ListHapgsRequest": { + "base": null, + "refs": { + } + }, + "ListHapgsResponse": { + "base": null, + "refs": { + } + }, + "ListHsmsRequest": { + "base": null, + "refs": { + } + }, + "ListHsmsResponse": { + "base": "

    Contains the output of the ListHsms operation.

    ", + "refs": { + } + }, + "ListLunaClientsRequest": { + "base": null, + "refs": { + } + }, + "ListLunaClientsResponse": { + "base": null, + "refs": { + } + }, + "ListTagsForResourceRequest": { + "base": null, + "refs": { + } + }, + "ListTagsForResourceResponse": { + "base": null, + "refs": { + } + }, + "ModifyHapgRequest": { + "base": null, + "refs": { + } + }, + "ModifyHapgResponse": { + "base": null, + "refs": { + } + }, + "ModifyHsmRequest": { + "base": "

    Contains the inputs for the ModifyHsm operation.

    ", + "refs": { + } + }, + "ModifyHsmResponse": { + "base": "

    Contains the output of the ModifyHsm operation.

    ", + "refs": { + } + }, + "ModifyLunaClientRequest": { + "base": null, + "refs": { + } + }, + "ModifyLunaClientResponse": { + "base": null, + "refs": { + } + }, + "PaginationToken": { + "base": null, + "refs": { + "ListHapgsRequest$NextToken": "

    The NextToken value from a previous call to ListHapgs. Pass null if this is the first call.

    ", + "ListHapgsResponse$NextToken": "

    If not null, more results are available. Pass this value to ListHapgs to retrieve the next set of items.

    ", + "ListHsmsRequest$NextToken": "

    The NextToken value from a previous call to ListHsms. Pass null if this is the first call.

    ", + "ListHsmsResponse$NextToken": "

    If not null, more results are available. Pass this value to ListHsms to retrieve the next set of items.

    ", + "ListLunaClientsRequest$NextToken": "

    The NextToken value from a previous call to ListLunaClients. Pass null if this is the first call.

    ", + "ListLunaClientsResponse$NextToken": "

    If not null, more results are available. Pass this to ListLunaClients to retrieve the next set of items.

    " + } + }, + "PartitionArn": { + "base": null, + "refs": { + "PartitionList$member": null + } + }, + "PartitionList": { + "base": null, + "refs": { + "DescribeHsmResponse$Partitions": "

    The list of partitions on the HSM.

    " + } + }, + "PartitionSerial": { + "base": null, + "refs": { + "PartitionSerialList$member": null + } + }, + "PartitionSerialList": { + "base": null, + "refs": { + "DescribeHapgResponse$PartitionSerialList": "

    The list of partition serial numbers that belong to the high-availability partition group.

    ", + "ModifyHapgRequest$PartitionSerialList": "

    The list of partition serial numbers to make members of the high-availability partition group.

    " + } + }, + "RemoveTagsFromResourceRequest": { + "base": null, + "refs": { + } + }, + "RemoveTagsFromResourceResponse": { + "base": null, + "refs": { + } + }, + "SshKey": { + "base": null, + "refs": { + "CreateHsmRequest$SshKey": "

    The SSH public key to install on the HSM.

    ", + "DescribeHsmResponse$SshPublicKey": "

    The public SSH key.

    " + } + }, + "String": { + "base": null, + "refs": { + "AddTagsToResourceRequest$ResourceArn": "

    The Amazon Resource Name (ARN) of the AWS CloudHSM resource to tag.

    ", + "AddTagsToResourceResponse$Status": "

    The status of the operation.

    ", + "CloudHsmServiceException$message": "

    Additional information about the error.

    ", + "DeleteHapgResponse$Status": "

    The status of the action.

    ", + "DeleteHsmResponse$Status": "

    The status of the operation.

    ", + "DeleteLunaClientResponse$Status": "

    The status of the action.

    ", + "DescribeHapgResponse$HapgSerial": "

    The serial number of the high-availability partition group.

    ", + "DescribeHsmResponse$StatusDetails": "

    Contains additional information about the status of the HSM.

    ", + "DescribeHsmResponse$VendorName": "

    The name of the HSM vendor.

    ", + "DescribeHsmResponse$HsmType": "

    The HSM model type.

    ", + "DescribeHsmResponse$SoftwareVersion": "

    The HSM software version.

    ", + "DescribeHsmResponse$ServerCertUri": "

    The URI of the certificate server.

    ", + "GetConfigResponse$ConfigType": "

    The type of credentials.

    ", + "GetConfigResponse$ConfigFile": "

    The chrystoki.conf configuration file.

    ", + "GetConfigResponse$ConfigCred": "

    The certificate file containing the server.pem files of the HSMs.

    ", + "ListTagsForResourceRequest$ResourceArn": "

    The Amazon Resource Name (ARN) of the AWS CloudHSM resource.

    ", + "RemoveTagsFromResourceRequest$ResourceArn": "

    The Amazon Resource Name (ARN) of the AWS CloudHSM resource.

    ", + "RemoveTagsFromResourceResponse$Status": "

    The status of the operation.

    " + } + }, + "SubnetId": { + "base": null, + "refs": { + "CreateHsmRequest$SubnetId": "

    The identifier of the subnet in your VPC in which to place the HSM.

    ", + "DescribeHsmResponse$SubnetId": "

    The identifier of the subnet that the HSM is in.

    ", + "ModifyHsmRequest$SubnetId": "

    The new identifier of the subnet that the HSM is in. The new subnet must be in the same Availability Zone as the current subnet.

    " + } + }, + "SubscriptionType": { + "base": "

    Specifies the type of subscription for the HSM.

    • PRODUCTION - The HSM is being used in a production environment.
    • TRIAL - The HSM is being used in a product trial.
    ", + "refs": { + "CreateHsmRequest$SubscriptionType": null, + "DescribeHsmResponse$SubscriptionType": null + } + }, + "Tag": { + "base": "

    A key-value pair that identifies or specifies metadata about an AWS CloudHSM resource.

    ", + "refs": { + "TagList$member": null + } + }, + "TagKey": { + "base": null, + "refs": { + "Tag$Key": "

    The key of the tag.

    ", + "TagKeyList$member": null + } + }, + "TagKeyList": { + "base": null, + "refs": { + "RemoveTagsFromResourceRequest$TagKeyList": "

    The tag key or keys to remove.

    Specify only the tag key to remove (not the value). To overwrite the value for an existing tag, use AddTagsToResource.

    " + } + }, + "TagList": { + "base": null, + "refs": { + "AddTagsToResourceRequest$TagList": "

    One or more tags.

    ", + "ListTagsForResourceResponse$TagList": "

    One or more tags.

    " + } + }, + "TagValue": { + "base": null, + "refs": { + "Tag$Value": "

    The value of the tag.

    " + } + }, + "Timestamp": { + "base": null, + "refs": { + "DescribeHapgResponse$LastModifiedTimestamp": "

    The date and time the high-availability partition group was last modified.

    ", + "DescribeHsmResponse$SubscriptionStartDate": "

    The subscription start date.

    ", + "DescribeHsmResponse$SubscriptionEndDate": "

    The subscription end date.

    ", + "DescribeHsmResponse$SshKeyLastUpdated": "

    The date and time that the SSH key was last updated.

    ", + "DescribeHsmResponse$ServerCertLastUpdated": "

    The date and time that the server certificate was last updated.

    ", + "DescribeLunaClientResponse$LastModifiedTimestamp": "

    The date and time the client was last modified.

    " + } + }, + "VpcId": { + "base": null, + "refs": { + "DescribeHsmResponse$VpcId": "

    The identifier of the VPC that the HSM is in.

    " + } + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/cloudhsm/2014-05-30/examples-1.json b/vendor/github.com/aws/aws-sdk-go/models/apis/cloudhsm/2014-05-30/examples-1.json new file mode 100644 index 000000000..0ea7e3b0b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/cloudhsm/2014-05-30/examples-1.json @@ -0,0 +1,5 @@ +{ + "version": "1.0", + "examples": { + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/cloudsearch/2013-01-01/api-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/cloudsearch/2013-01-01/api-2.json new file mode 100644 index 000000000..eca143ce1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/cloudsearch/2013-01-01/api-2.json @@ -0,0 +1,2001 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2013-01-01", + "endpointPrefix":"cloudsearch", + "serviceFullName":"Amazon CloudSearch", + "signatureVersion":"v4", + "xmlNamespace":"http://cloudsearch.amazonaws.com/doc/2013-01-01/", + "protocol":"query" + }, + "operations":{ + "BuildSuggesters":{ + "name":"BuildSuggesters", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"BuildSuggestersRequest"}, + "output":{ + "shape":"BuildSuggestersResponse", + "resultWrapper":"BuildSuggestersResult" + }, + "errors":[ + { + "shape":"BaseException", + "exception":true + }, + { + "shape":"InternalException", + "error":{ + "code":"InternalException", + "httpStatusCode":500 + }, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{ + "code":"ResourceNotFound", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + } + ] + }, + "CreateDomain":{ + "name":"CreateDomain", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateDomainRequest"}, + "output":{ + "shape":"CreateDomainResponse", + "resultWrapper":"CreateDomainResult" + }, + "errors":[ + { + "shape":"BaseException", + "exception":true + }, + { + "shape":"InternalException", + "error":{ + "code":"InternalException", + "httpStatusCode":500 + }, + "exception":true + }, + { + "shape":"LimitExceededException", + "error":{ + "code":"LimitExceeded", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + } + ] + }, + "DefineAnalysisScheme":{ + "name":"DefineAnalysisScheme", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DefineAnalysisSchemeRequest"}, + "output":{ + "shape":"DefineAnalysisSchemeResponse", + "resultWrapper":"DefineAnalysisSchemeResult" + }, + "errors":[ + { + "shape":"BaseException", + "exception":true + }, + { + "shape":"InternalException", + "error":{ + "code":"InternalException", + "httpStatusCode":500 + }, + "exception":true + }, + { + "shape":"LimitExceededException", + "error":{ + "code":"LimitExceeded", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidTypeException", + "error":{ + "code":"InvalidType", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{ + "code":"ResourceNotFound", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + } + ] + }, + "DefineExpression":{ + "name":"DefineExpression", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DefineExpressionRequest"}, + "output":{ + "shape":"DefineExpressionResponse", + "resultWrapper":"DefineExpressionResult" + }, + "errors":[ + { + "shape":"BaseException", + "exception":true + }, + { + "shape":"InternalException", + "error":{ + "code":"InternalException", + "httpStatusCode":500 + }, + "exception":true + }, + { + "shape":"LimitExceededException", + "error":{ + "code":"LimitExceeded", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidTypeException", + "error":{ + "code":"InvalidType", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{ + "code":"ResourceNotFound", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + } + ] + }, + "DefineIndexField":{ + "name":"DefineIndexField", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DefineIndexFieldRequest"}, + "output":{ + "shape":"DefineIndexFieldResponse", + "resultWrapper":"DefineIndexFieldResult" + }, + "errors":[ + { + "shape":"BaseException", + "exception":true + }, + { + "shape":"InternalException", + "error":{ + "code":"InternalException", + "httpStatusCode":500 + }, + "exception":true + }, + { + "shape":"LimitExceededException", + "error":{ + "code":"LimitExceeded", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidTypeException", + "error":{ + "code":"InvalidType", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{ + "code":"ResourceNotFound", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + } + ] + }, + "DefineSuggester":{ + "name":"DefineSuggester", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DefineSuggesterRequest"}, + "output":{ + "shape":"DefineSuggesterResponse", + "resultWrapper":"DefineSuggesterResult" + }, + "errors":[ + { + "shape":"BaseException", + "exception":true + }, + { + "shape":"InternalException", + "error":{ + "code":"InternalException", + "httpStatusCode":500 + }, + "exception":true + }, + { + "shape":"LimitExceededException", + "error":{ + "code":"LimitExceeded", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidTypeException", + "error":{ + "code":"InvalidType", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{ + "code":"ResourceNotFound", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + } + ] + }, + "DeleteAnalysisScheme":{ + "name":"DeleteAnalysisScheme", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteAnalysisSchemeRequest"}, + "output":{ + "shape":"DeleteAnalysisSchemeResponse", + "resultWrapper":"DeleteAnalysisSchemeResult" + }, + "errors":[ + { + "shape":"BaseException", + "exception":true + }, + { + "shape":"InternalException", + "error":{ + "code":"InternalException", + "httpStatusCode":500 + }, + "exception":true + }, + { + "shape":"InvalidTypeException", + "error":{ + "code":"InvalidType", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{ + "code":"ResourceNotFound", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + } + ] + }, + "DeleteDomain":{ + "name":"DeleteDomain", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteDomainRequest"}, + "output":{ + "shape":"DeleteDomainResponse", + "resultWrapper":"DeleteDomainResult" + }, + "errors":[ + { + "shape":"BaseException", + "exception":true + }, + { + "shape":"InternalException", + "error":{ + "code":"InternalException", + "httpStatusCode":500 + }, + "exception":true + } + ] + }, + "DeleteExpression":{ + "name":"DeleteExpression", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteExpressionRequest"}, + "output":{ + "shape":"DeleteExpressionResponse", + "resultWrapper":"DeleteExpressionResult" + }, + "errors":[ + { + "shape":"BaseException", + "exception":true + }, + { + "shape":"InternalException", + "error":{ + "code":"InternalException", + "httpStatusCode":500 + }, + "exception":true + }, + { + "shape":"InvalidTypeException", + "error":{ + "code":"InvalidType", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{ + "code":"ResourceNotFound", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + } + ] + }, + "DeleteIndexField":{ + "name":"DeleteIndexField", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteIndexFieldRequest"}, + "output":{ + "shape":"DeleteIndexFieldResponse", + "resultWrapper":"DeleteIndexFieldResult" + }, + "errors":[ + { + "shape":"BaseException", + "exception":true + }, + { + "shape":"InternalException", + "error":{ + "code":"InternalException", + "httpStatusCode":500 + }, + "exception":true + }, + { + "shape":"InvalidTypeException", + "error":{ + "code":"InvalidType", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{ + "code":"ResourceNotFound", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + } + ] + }, + "DeleteSuggester":{ + "name":"DeleteSuggester", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteSuggesterRequest"}, + "output":{ + "shape":"DeleteSuggesterResponse", + "resultWrapper":"DeleteSuggesterResult" + }, + "errors":[ + { + "shape":"BaseException", + "exception":true + }, + { + "shape":"InternalException", + "error":{ + "code":"InternalException", + "httpStatusCode":500 + }, + "exception":true + }, + { + "shape":"InvalidTypeException", + "error":{ + "code":"InvalidType", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{ + "code":"ResourceNotFound", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + } + ] + }, + "DescribeAnalysisSchemes":{ + "name":"DescribeAnalysisSchemes", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeAnalysisSchemesRequest"}, + "output":{ + "shape":"DescribeAnalysisSchemesResponse", + "resultWrapper":"DescribeAnalysisSchemesResult" + }, + "errors":[ + { + "shape":"BaseException", + "exception":true + }, + { + "shape":"InternalException", + "error":{ + "code":"InternalException", + "httpStatusCode":500 + }, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{ + "code":"ResourceNotFound", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + } + ] + }, + "DescribeAvailabilityOptions":{ + "name":"DescribeAvailabilityOptions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeAvailabilityOptionsRequest"}, + "output":{ + "shape":"DescribeAvailabilityOptionsResponse", + "resultWrapper":"DescribeAvailabilityOptionsResult" + }, + "errors":[ + { + "shape":"BaseException", + "exception":true + }, + { + "shape":"InternalException", + "error":{ + "code":"InternalException", + "httpStatusCode":500 + }, + "exception":true + }, + { + "shape":"InvalidTypeException", + "error":{ + "code":"InvalidType", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + }, + { + "shape":"LimitExceededException", + "error":{ + "code":"LimitExceeded", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{ + "code":"ResourceNotFound", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + }, + { + "shape":"DisabledOperationException", + "error":{ + "code":"DisabledAction", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + } + ] + }, + "DescribeDomains":{ + "name":"DescribeDomains", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeDomainsRequest"}, + "output":{ + "shape":"DescribeDomainsResponse", + "resultWrapper":"DescribeDomainsResult" + }, + "errors":[ + { + "shape":"BaseException", + "exception":true + }, + { + "shape":"InternalException", + "error":{ + "code":"InternalException", + "httpStatusCode":500 + }, + "exception":true + } + ] + }, + "DescribeExpressions":{ + "name":"DescribeExpressions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeExpressionsRequest"}, + "output":{ + "shape":"DescribeExpressionsResponse", + "resultWrapper":"DescribeExpressionsResult" + }, + "errors":[ + { + "shape":"BaseException", + "exception":true + }, + { + "shape":"InternalException", + "error":{ + "code":"InternalException", + "httpStatusCode":500 + }, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{ + "code":"ResourceNotFound", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + } + ] + }, + "DescribeIndexFields":{ + "name":"DescribeIndexFields", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeIndexFieldsRequest"}, + "output":{ + "shape":"DescribeIndexFieldsResponse", + "resultWrapper":"DescribeIndexFieldsResult" + }, + "errors":[ + { + "shape":"BaseException", + "exception":true + }, + { + "shape":"InternalException", + "error":{ + "code":"InternalException", + "httpStatusCode":500 + }, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{ + "code":"ResourceNotFound", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + } + ] + }, + "DescribeScalingParameters":{ + "name":"DescribeScalingParameters", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeScalingParametersRequest"}, + "output":{ + "shape":"DescribeScalingParametersResponse", + "resultWrapper":"DescribeScalingParametersResult" + }, + "errors":[ + { + "shape":"BaseException", + "exception":true + }, + { + "shape":"InternalException", + "error":{ + "code":"InternalException", + "httpStatusCode":500 + }, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{ + "code":"ResourceNotFound", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + } + ] + }, + "DescribeServiceAccessPolicies":{ + "name":"DescribeServiceAccessPolicies", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeServiceAccessPoliciesRequest"}, + "output":{ + "shape":"DescribeServiceAccessPoliciesResponse", + "resultWrapper":"DescribeServiceAccessPoliciesResult" + }, + "errors":[ + { + "shape":"BaseException", + "exception":true + }, + { + "shape":"InternalException", + "error":{ + "code":"InternalException", + "httpStatusCode":500 + }, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{ + "code":"ResourceNotFound", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + } + ] + }, + "DescribeSuggesters":{ + "name":"DescribeSuggesters", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeSuggestersRequest"}, + "output":{ + "shape":"DescribeSuggestersResponse", + "resultWrapper":"DescribeSuggestersResult" + }, + "errors":[ + { + "shape":"BaseException", + "exception":true + }, + { + "shape":"InternalException", + "error":{ + "code":"InternalException", + "httpStatusCode":500 + }, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{ + "code":"ResourceNotFound", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + } + ] + }, + "IndexDocuments":{ + "name":"IndexDocuments", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"IndexDocumentsRequest"}, + "output":{ + "shape":"IndexDocumentsResponse", + "resultWrapper":"IndexDocumentsResult" + }, + "errors":[ + { + "shape":"BaseException", + "exception":true + }, + { + "shape":"InternalException", + "error":{ + "code":"InternalException", + "httpStatusCode":500 + }, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{ + "code":"ResourceNotFound", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + } + ] + }, + "ListDomainNames":{ + "name":"ListDomainNames", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "output":{ + "shape":"ListDomainNamesResponse", + "resultWrapper":"ListDomainNamesResult" + }, + "errors":[ + { + "shape":"BaseException", + "exception":true + } + ] + }, + "UpdateAvailabilityOptions":{ + "name":"UpdateAvailabilityOptions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateAvailabilityOptionsRequest"}, + "output":{ + "shape":"UpdateAvailabilityOptionsResponse", + "resultWrapper":"UpdateAvailabilityOptionsResult" + }, + "errors":[ + { + "shape":"BaseException", + "exception":true + }, + { + "shape":"InternalException", + "error":{ + "code":"InternalException", + "httpStatusCode":500 + }, + "exception":true + }, + { + "shape":"InvalidTypeException", + "error":{ + "code":"InvalidType", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + }, + { + "shape":"LimitExceededException", + "error":{ + "code":"LimitExceeded", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{ + "code":"ResourceNotFound", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + }, + { + "shape":"DisabledOperationException", + "error":{ + "code":"DisabledAction", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + } + ] + }, + "UpdateScalingParameters":{ + "name":"UpdateScalingParameters", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateScalingParametersRequest"}, + "output":{ + "shape":"UpdateScalingParametersResponse", + "resultWrapper":"UpdateScalingParametersResult" + }, + "errors":[ + { + "shape":"BaseException", + "exception":true + }, + { + "shape":"InternalException", + "error":{ + "code":"InternalException", + "httpStatusCode":500 + }, + "exception":true + }, + { + "shape":"LimitExceededException", + "error":{ + "code":"LimitExceeded", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{ + "code":"ResourceNotFound", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidTypeException", + "error":{ + "code":"InvalidType", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + } + ] + }, + "UpdateServiceAccessPolicies":{ + "name":"UpdateServiceAccessPolicies", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateServiceAccessPoliciesRequest"}, + "output":{ + "shape":"UpdateServiceAccessPoliciesResponse", + "resultWrapper":"UpdateServiceAccessPoliciesResult" + }, + "errors":[ + { + "shape":"BaseException", + "exception":true + }, + { + "shape":"InternalException", + "error":{ + "code":"InternalException", + "httpStatusCode":500 + }, + "exception":true + }, + { + "shape":"LimitExceededException", + "error":{ + "code":"LimitExceeded", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{ + "code":"ResourceNotFound", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidTypeException", + "error":{ + "code":"InvalidType", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + } + ] + } + }, + "shapes":{ + "APIVersion":{"type":"string"}, + "ARN":{"type":"string"}, + "AccessPoliciesStatus":{ + "type":"structure", + "required":[ + "Options", + "Status" + ], + "members":{ + "Options":{"shape":"PolicyDocument"}, + "Status":{"shape":"OptionStatus"} + } + }, + "AlgorithmicStemming":{ + "type":"string", + "enum":[ + "none", + "minimal", + "light", + "full" + ] + }, + "AnalysisOptions":{ + "type":"structure", + "members":{ + "Synonyms":{"shape":"String"}, + "Stopwords":{"shape":"String"}, + "StemmingDictionary":{"shape":"String"}, + "JapaneseTokenizationDictionary":{"shape":"String"}, + "AlgorithmicStemming":{"shape":"AlgorithmicStemming"} + } + }, + "AnalysisScheme":{ + "type":"structure", + "required":[ + "AnalysisSchemeName", + "AnalysisSchemeLanguage" + ], + "members":{ + "AnalysisSchemeName":{"shape":"StandardName"}, + "AnalysisSchemeLanguage":{"shape":"AnalysisSchemeLanguage"}, + "AnalysisOptions":{"shape":"AnalysisOptions"} + } + }, + "AnalysisSchemeLanguage":{ + "type":"string", + "enum":[ + "ar", + "bg", + "ca", + "cs", + "da", + "de", + "el", + "en", + "es", + "eu", + "fa", + "fi", + "fr", + "ga", + "gl", + "he", + "hi", + "hu", + "hy", + "id", + "it", + "ja", + "ko", + "lv", + "mul", + "nl", + "no", + "pt", + "ro", + "ru", + "sv", + "th", + "tr", + "zh-Hans", + "zh-Hant" + ] + }, + "AnalysisSchemeStatus":{ + "type":"structure", + "required":[ + "Options", + "Status" + ], + "members":{ + "Options":{"shape":"AnalysisScheme"}, + "Status":{"shape":"OptionStatus"} + } + }, + "AnalysisSchemeStatusList":{ + "type":"list", + "member":{"shape":"AnalysisSchemeStatus"} + }, + "AvailabilityOptionsStatus":{ + "type":"structure", + "required":[ + "Options", + "Status" + ], + "members":{ + "Options":{"shape":"MultiAZ"}, + "Status":{"shape":"OptionStatus"} + } + }, + "BaseException":{ + "type":"structure", + "members":{ + "Code":{"shape":"ErrorCode"}, + "Message":{"shape":"ErrorMessage"} + }, + "exception":true + }, + "Boolean":{"type":"boolean"}, + "BuildSuggestersRequest":{ + "type":"structure", + "required":["DomainName"], + "members":{ + "DomainName":{"shape":"DomainName"} + } + }, + "BuildSuggestersResponse":{ + "type":"structure", + "members":{ + "FieldNames":{"shape":"FieldNameList"} + } + }, + "CreateDomainRequest":{ + "type":"structure", + "required":["DomainName"], + "members":{ + "DomainName":{"shape":"DomainName"} + } + }, + "CreateDomainResponse":{ + "type":"structure", + "members":{ + "DomainStatus":{"shape":"DomainStatus"} + } + }, + "DateArrayOptions":{ + "type":"structure", + "members":{ + "DefaultValue":{"shape":"FieldValue"}, + "SourceFields":{"shape":"FieldNameCommaList"}, + "FacetEnabled":{"shape":"Boolean"}, + "SearchEnabled":{"shape":"Boolean"}, + "ReturnEnabled":{"shape":"Boolean"} + } + }, + "DateOptions":{ + "type":"structure", + "members":{ + "DefaultValue":{"shape":"FieldValue"}, + "SourceField":{"shape":"FieldName"}, + "FacetEnabled":{"shape":"Boolean"}, + "SearchEnabled":{"shape":"Boolean"}, + "ReturnEnabled":{"shape":"Boolean"}, + "SortEnabled":{"shape":"Boolean"} + } + }, + "DefineAnalysisSchemeRequest":{ + "type":"structure", + "required":[ + "DomainName", + "AnalysisScheme" + ], + "members":{ + "DomainName":{"shape":"DomainName"}, + "AnalysisScheme":{"shape":"AnalysisScheme"} + } + }, + "DefineAnalysisSchemeResponse":{ + "type":"structure", + "required":["AnalysisScheme"], + "members":{ + "AnalysisScheme":{"shape":"AnalysisSchemeStatus"} + } + }, + "DefineExpressionRequest":{ + "type":"structure", + "required":[ + "DomainName", + "Expression" + ], + "members":{ + "DomainName":{"shape":"DomainName"}, + "Expression":{"shape":"Expression"} + } + }, + "DefineExpressionResponse":{ + "type":"structure", + "required":["Expression"], + "members":{ + "Expression":{"shape":"ExpressionStatus"} + } + }, + "DefineIndexFieldRequest":{ + "type":"structure", + "required":[ + "DomainName", + "IndexField" + ], + "members":{ + "DomainName":{"shape":"DomainName"}, + "IndexField":{"shape":"IndexField"} + } + }, + "DefineIndexFieldResponse":{ + "type":"structure", + "required":["IndexField"], + "members":{ + "IndexField":{"shape":"IndexFieldStatus"} + } + }, + "DefineSuggesterRequest":{ + "type":"structure", + "required":[ + "DomainName", + "Suggester" + ], + "members":{ + "DomainName":{"shape":"DomainName"}, + "Suggester":{"shape":"Suggester"} + } + }, + "DefineSuggesterResponse":{ + "type":"structure", + "required":["Suggester"], + "members":{ + "Suggester":{"shape":"SuggesterStatus"} + } + }, + "DeleteAnalysisSchemeRequest":{ + "type":"structure", + "required":[ + "DomainName", + "AnalysisSchemeName" + ], + "members":{ + "DomainName":{"shape":"DomainName"}, + "AnalysisSchemeName":{"shape":"StandardName"} + } + }, + "DeleteAnalysisSchemeResponse":{ + "type":"structure", + "required":["AnalysisScheme"], + "members":{ + "AnalysisScheme":{"shape":"AnalysisSchemeStatus"} + } + }, + "DeleteDomainRequest":{ + "type":"structure", + "required":["DomainName"], + "members":{ + "DomainName":{"shape":"DomainName"} + } + }, + "DeleteDomainResponse":{ + "type":"structure", + "members":{ + "DomainStatus":{"shape":"DomainStatus"} + } + }, + "DeleteExpressionRequest":{ + "type":"structure", + "required":[ + "DomainName", + "ExpressionName" + ], + "members":{ + "DomainName":{"shape":"DomainName"}, + "ExpressionName":{"shape":"StandardName"} + } + }, + "DeleteExpressionResponse":{ + "type":"structure", + "required":["Expression"], + "members":{ + "Expression":{"shape":"ExpressionStatus"} + } + }, + "DeleteIndexFieldRequest":{ + "type":"structure", + "required":[ + "DomainName", + "IndexFieldName" + ], + "members":{ + "DomainName":{"shape":"DomainName"}, + "IndexFieldName":{"shape":"DynamicFieldName"} + } + }, + "DeleteIndexFieldResponse":{ + "type":"structure", + "required":["IndexField"], + "members":{ + "IndexField":{"shape":"IndexFieldStatus"} + } + }, + "DeleteSuggesterRequest":{ + "type":"structure", + "required":[ + "DomainName", + "SuggesterName" + ], + "members":{ + "DomainName":{"shape":"DomainName"}, + "SuggesterName":{"shape":"StandardName"} + } + }, + "DeleteSuggesterResponse":{ + "type":"structure", + "required":["Suggester"], + "members":{ + "Suggester":{"shape":"SuggesterStatus"} + } + }, + "DescribeAnalysisSchemesRequest":{ + "type":"structure", + "required":["DomainName"], + "members":{ + "DomainName":{"shape":"DomainName"}, + "AnalysisSchemeNames":{"shape":"StandardNameList"}, + "Deployed":{"shape":"Boolean"} + } + }, + "DescribeAnalysisSchemesResponse":{ + "type":"structure", + "required":["AnalysisSchemes"], + "members":{ + "AnalysisSchemes":{"shape":"AnalysisSchemeStatusList"} + } + }, + "DescribeAvailabilityOptionsRequest":{ + "type":"structure", + "required":["DomainName"], + "members":{ + "DomainName":{"shape":"DomainName"}, + "Deployed":{"shape":"Boolean"} + } + }, + "DescribeAvailabilityOptionsResponse":{ + "type":"structure", + "members":{ + "AvailabilityOptions":{"shape":"AvailabilityOptionsStatus"} + } + }, + "DescribeDomainsRequest":{ + "type":"structure", + "members":{ + "DomainNames":{"shape":"DomainNameList"} + } + }, + "DescribeDomainsResponse":{ + "type":"structure", + "required":["DomainStatusList"], + "members":{ + "DomainStatusList":{"shape":"DomainStatusList"} + } + }, + "DescribeExpressionsRequest":{ + "type":"structure", + "required":["DomainName"], + "members":{ + "DomainName":{"shape":"DomainName"}, + "ExpressionNames":{"shape":"StandardNameList"}, + "Deployed":{"shape":"Boolean"} + } + }, + "DescribeExpressionsResponse":{ + "type":"structure", + "required":["Expressions"], + "members":{ + "Expressions":{"shape":"ExpressionStatusList"} + } + }, + "DescribeIndexFieldsRequest":{ + "type":"structure", + "required":["DomainName"], + "members":{ + "DomainName":{"shape":"DomainName"}, + "FieldNames":{"shape":"DynamicFieldNameList"}, + "Deployed":{"shape":"Boolean"} + } + }, + "DescribeIndexFieldsResponse":{ + "type":"structure", + "required":["IndexFields"], + "members":{ + "IndexFields":{"shape":"IndexFieldStatusList"} + } + }, + "DescribeScalingParametersRequest":{ + "type":"structure", + "required":["DomainName"], + "members":{ + "DomainName":{"shape":"DomainName"} + } + }, + "DescribeScalingParametersResponse":{ + "type":"structure", + "required":["ScalingParameters"], + "members":{ + "ScalingParameters":{"shape":"ScalingParametersStatus"} + } + }, + "DescribeServiceAccessPoliciesRequest":{ + "type":"structure", + "required":["DomainName"], + "members":{ + "DomainName":{"shape":"DomainName"}, + "Deployed":{"shape":"Boolean"} + } + }, + "DescribeServiceAccessPoliciesResponse":{ + "type":"structure", + "required":["AccessPolicies"], + "members":{ + "AccessPolicies":{"shape":"AccessPoliciesStatus"} + } + }, + "DescribeSuggestersRequest":{ + "type":"structure", + "required":["DomainName"], + "members":{ + "DomainName":{"shape":"DomainName"}, + "SuggesterNames":{"shape":"StandardNameList"}, + "Deployed":{"shape":"Boolean"} + } + }, + "DescribeSuggestersResponse":{ + "type":"structure", + "required":["Suggesters"], + "members":{ + "Suggesters":{"shape":"SuggesterStatusList"} + } + }, + "DisabledOperationException":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DisabledAction", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + }, + "DocumentSuggesterOptions":{ + "type":"structure", + "required":["SourceField"], + "members":{ + "SourceField":{"shape":"FieldName"}, + "FuzzyMatching":{"shape":"SuggesterFuzzyMatching"}, + "SortExpression":{"shape":"String"} + } + }, + "DomainId":{ + "type":"string", + "min":1, + "max":64 + }, + "DomainName":{ + "type":"string", + "min":3, + "max":28, + "pattern":"[a-z][a-z0-9\\-]+" + }, + "DomainNameList":{ + "type":"list", + "member":{"shape":"DomainName"} + }, + "DomainNameMap":{ + "type":"map", + "key":{"shape":"DomainName"}, + "value":{"shape":"APIVersion"} + }, + "DomainStatus":{ + "type":"structure", + "required":[ + "DomainId", + "DomainName", + "RequiresIndexDocuments" + ], + "members":{ + "DomainId":{"shape":"DomainId"}, + "DomainName":{"shape":"DomainName"}, + "ARN":{"shape":"ARN"}, + "Created":{"shape":"Boolean"}, + "Deleted":{"shape":"Boolean"}, + "DocService":{"shape":"ServiceEndpoint"}, + "SearchService":{"shape":"ServiceEndpoint"}, + "RequiresIndexDocuments":{"shape":"Boolean"}, + "Processing":{"shape":"Boolean"}, + "SearchInstanceType":{"shape":"SearchInstanceType"}, + "SearchPartitionCount":{"shape":"PartitionCount"}, + "SearchInstanceCount":{"shape":"InstanceCount"}, + "Limits":{"shape":"Limits"} + } + }, + "DomainStatusList":{ + "type":"list", + "member":{"shape":"DomainStatus"} + }, + "Double":{"type":"double"}, + "DoubleArrayOptions":{ + "type":"structure", + "members":{ + "DefaultValue":{"shape":"Double"}, + "SourceFields":{"shape":"FieldNameCommaList"}, + "FacetEnabled":{"shape":"Boolean"}, + "SearchEnabled":{"shape":"Boolean"}, + "ReturnEnabled":{"shape":"Boolean"} + } + }, + "DoubleOptions":{ + "type":"structure", + "members":{ + "DefaultValue":{"shape":"Double"}, + "SourceField":{"shape":"FieldName"}, + "FacetEnabled":{"shape":"Boolean"}, + "SearchEnabled":{"shape":"Boolean"}, + "ReturnEnabled":{"shape":"Boolean"}, + "SortEnabled":{"shape":"Boolean"} + } + }, + "DynamicFieldName":{ + "type":"string", + "min":1, + "max":64, + "pattern":"([a-z][a-z0-9_]*\\*?|\\*[a-z0-9_]*)" + }, + "DynamicFieldNameList":{ + "type":"list", + "member":{"shape":"DynamicFieldName"} + }, + "ErrorCode":{"type":"string"}, + "ErrorMessage":{"type":"string"}, + "Expression":{ + "type":"structure", + "required":[ + "ExpressionName", + "ExpressionValue" + ], + "members":{ + "ExpressionName":{"shape":"StandardName"}, + "ExpressionValue":{"shape":"ExpressionValue"} + } + }, + "ExpressionStatus":{ + "type":"structure", + "required":[ + "Options", + "Status" + ], + "members":{ + "Options":{"shape":"Expression"}, + "Status":{"shape":"OptionStatus"} + } + }, + "ExpressionStatusList":{ + "type":"list", + "member":{"shape":"ExpressionStatus"} + }, + "ExpressionValue":{ + "type":"string", + "min":1, + "max":10240 + }, + "FieldName":{ + "type":"string", + "min":1, + "max":64, + "pattern":"[a-z][a-z0-9_]*" + }, + "FieldNameCommaList":{ + "type":"string", + "pattern":"\\s*[a-z*][a-z0-9_]*\\*?\\s*(,\\s*[a-z*][a-z0-9_]*\\*?\\s*)*" + }, + "FieldNameList":{ + "type":"list", + "member":{"shape":"FieldName"} + }, + "FieldValue":{ + "type":"string", + "min":0, + "max":1024 + }, + "IndexDocumentsRequest":{ + "type":"structure", + "required":["DomainName"], + "members":{ + "DomainName":{"shape":"DomainName"} + } + }, + "IndexDocumentsResponse":{ + "type":"structure", + "members":{ + "FieldNames":{"shape":"FieldNameList"} + } + }, + "IndexField":{ + "type":"structure", + "required":[ + "IndexFieldName", + "IndexFieldType" + ], + "members":{ + "IndexFieldName":{"shape":"DynamicFieldName"}, + "IndexFieldType":{"shape":"IndexFieldType"}, + "IntOptions":{"shape":"IntOptions"}, + "DoubleOptions":{"shape":"DoubleOptions"}, + "LiteralOptions":{"shape":"LiteralOptions"}, + "TextOptions":{"shape":"TextOptions"}, + "DateOptions":{"shape":"DateOptions"}, + "LatLonOptions":{"shape":"LatLonOptions"}, + "IntArrayOptions":{"shape":"IntArrayOptions"}, + "DoubleArrayOptions":{"shape":"DoubleArrayOptions"}, + "LiteralArrayOptions":{"shape":"LiteralArrayOptions"}, + "TextArrayOptions":{"shape":"TextArrayOptions"}, + "DateArrayOptions":{"shape":"DateArrayOptions"} + } + }, + "IndexFieldStatus":{ + "type":"structure", + "required":[ + "Options", + "Status" + ], + "members":{ + "Options":{"shape":"IndexField"}, + "Status":{"shape":"OptionStatus"} + } + }, + "IndexFieldStatusList":{ + "type":"list", + "member":{"shape":"IndexFieldStatus"} + }, + "IndexFieldType":{ + "type":"string", + "enum":[ + "int", + "double", + "literal", + "text", + "date", + "latlon", + "int-array", + "double-array", + "literal-array", + "text-array", + "date-array" + ] + }, + "InstanceCount":{ + "type":"integer", + "min":1 + }, + "IntArrayOptions":{ + "type":"structure", + "members":{ + "DefaultValue":{"shape":"Long"}, + "SourceFields":{"shape":"FieldNameCommaList"}, + "FacetEnabled":{"shape":"Boolean"}, + "SearchEnabled":{"shape":"Boolean"}, + "ReturnEnabled":{"shape":"Boolean"} + } + }, + "IntOptions":{ + "type":"structure", + "members":{ + "DefaultValue":{"shape":"Long"}, + "SourceField":{"shape":"FieldName"}, + "FacetEnabled":{"shape":"Boolean"}, + "SearchEnabled":{"shape":"Boolean"}, + "ReturnEnabled":{"shape":"Boolean"}, + "SortEnabled":{"shape":"Boolean"} + } + }, + "InternalException":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InternalException", + "httpStatusCode":500 + }, + "exception":true + }, + "InvalidTypeException":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidType", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + }, + "LatLonOptions":{ + "type":"structure", + "members":{ + "DefaultValue":{"shape":"FieldValue"}, + "SourceField":{"shape":"FieldName"}, + "FacetEnabled":{"shape":"Boolean"}, + "SearchEnabled":{"shape":"Boolean"}, + "ReturnEnabled":{"shape":"Boolean"}, + "SortEnabled":{"shape":"Boolean"} + } + }, + "LimitExceededException":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"LimitExceeded", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + }, + "Limits":{ + "type":"structure", + "required":[ + "MaximumReplicationCount", + "MaximumPartitionCount" + ], + "members":{ + "MaximumReplicationCount":{"shape":"MaximumReplicationCount"}, + "MaximumPartitionCount":{"shape":"MaximumPartitionCount"} + } + }, + "ListDomainNamesResponse":{ + "type":"structure", + "members":{ + "DomainNames":{"shape":"DomainNameMap"} + } + }, + "LiteralArrayOptions":{ + "type":"structure", + "members":{ + "DefaultValue":{"shape":"FieldValue"}, + "SourceFields":{"shape":"FieldNameCommaList"}, + "FacetEnabled":{"shape":"Boolean"}, + "SearchEnabled":{"shape":"Boolean"}, + "ReturnEnabled":{"shape":"Boolean"} + } + }, + "LiteralOptions":{ + "type":"structure", + "members":{ + "DefaultValue":{"shape":"FieldValue"}, + "SourceField":{"shape":"FieldName"}, + "FacetEnabled":{"shape":"Boolean"}, + "SearchEnabled":{"shape":"Boolean"}, + "ReturnEnabled":{"shape":"Boolean"}, + "SortEnabled":{"shape":"Boolean"} + } + }, + "Long":{"type":"long"}, + "MaximumPartitionCount":{ + "type":"integer", + "min":1 + }, + "MaximumReplicationCount":{ + "type":"integer", + "min":1 + }, + "MultiAZ":{"type":"boolean"}, + "OptionState":{ + "type":"string", + "enum":[ + "RequiresIndexDocuments", + "Processing", + "Active", + "FailedToValidate" + ] + }, + "OptionStatus":{ + "type":"structure", + "required":[ + "CreationDate", + "UpdateDate", + "State" + ], + "members":{ + "CreationDate":{"shape":"UpdateTimestamp"}, + "UpdateDate":{"shape":"UpdateTimestamp"}, + "UpdateVersion":{"shape":"UIntValue"}, + "State":{"shape":"OptionState"}, + "PendingDeletion":{"shape":"Boolean"} + } + }, + "PartitionCount":{ + "type":"integer", + "min":1 + }, + "PartitionInstanceType":{ + "type":"string", + "enum":[ + "search.m1.small", + "search.m1.large", + "search.m2.xlarge", + "search.m2.2xlarge", + "search.m3.medium", + "search.m3.large", + "search.m3.xlarge", + "search.m3.2xlarge" + ] + }, + "PolicyDocument":{"type":"string"}, + "ResourceNotFoundException":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"ResourceNotFound", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + }, + "ScalingParameters":{ + "type":"structure", + "members":{ + "DesiredInstanceType":{"shape":"PartitionInstanceType"}, + "DesiredReplicationCount":{"shape":"UIntValue"}, + "DesiredPartitionCount":{"shape":"UIntValue"} + } + }, + "ScalingParametersStatus":{ + "type":"structure", + "required":[ + "Options", + "Status" + ], + "members":{ + "Options":{"shape":"ScalingParameters"}, + "Status":{"shape":"OptionStatus"} + } + }, + "SearchInstanceType":{"type":"string"}, + "ServiceEndpoint":{ + "type":"structure", + "members":{ + "Endpoint":{"shape":"ServiceUrl"} + } + }, + "ServiceUrl":{"type":"string"}, + "StandardName":{ + "type":"string", + "min":1, + "max":64, + "pattern":"[a-z][a-z0-9_]*" + }, + "StandardNameList":{ + "type":"list", + "member":{"shape":"StandardName"} + }, + "String":{"type":"string"}, + "Suggester":{ + "type":"structure", + "required":[ + "SuggesterName", + "DocumentSuggesterOptions" + ], + "members":{ + "SuggesterName":{"shape":"StandardName"}, + "DocumentSuggesterOptions":{"shape":"DocumentSuggesterOptions"} + } + }, + "SuggesterFuzzyMatching":{ + "type":"string", + "enum":[ + "none", + "low", + "high" + ] + }, + "SuggesterStatus":{ + "type":"structure", + "required":[ + "Options", + "Status" + ], + "members":{ + "Options":{"shape":"Suggester"}, + "Status":{"shape":"OptionStatus"} + } + }, + "SuggesterStatusList":{ + "type":"list", + "member":{"shape":"SuggesterStatus"} + }, + "TextArrayOptions":{ + "type":"structure", + "members":{ + "DefaultValue":{"shape":"FieldValue"}, + "SourceFields":{"shape":"FieldNameCommaList"}, + "ReturnEnabled":{"shape":"Boolean"}, + "HighlightEnabled":{"shape":"Boolean"}, + "AnalysisScheme":{"shape":"Word"} + } + }, + "TextOptions":{ + "type":"structure", + "members":{ + "DefaultValue":{"shape":"FieldValue"}, + "SourceField":{"shape":"FieldName"}, + "ReturnEnabled":{"shape":"Boolean"}, + "SortEnabled":{"shape":"Boolean"}, + "HighlightEnabled":{"shape":"Boolean"}, + "AnalysisScheme":{"shape":"Word"} + } + }, + "UIntValue":{ + "type":"integer", + "min":0 + }, + "UpdateAvailabilityOptionsRequest":{ + "type":"structure", + "required":[ + "DomainName", + "MultiAZ" + ], + "members":{ + "DomainName":{"shape":"DomainName"}, + "MultiAZ":{"shape":"Boolean"} + } + }, + "UpdateAvailabilityOptionsResponse":{ + "type":"structure", + "members":{ + "AvailabilityOptions":{"shape":"AvailabilityOptionsStatus"} + } + }, + "UpdateScalingParametersRequest":{ + "type":"structure", + "required":[ + "DomainName", + "ScalingParameters" + ], + "members":{ + "DomainName":{"shape":"DomainName"}, + "ScalingParameters":{"shape":"ScalingParameters"} + } + }, + "UpdateScalingParametersResponse":{ + "type":"structure", + "required":["ScalingParameters"], + "members":{ + "ScalingParameters":{"shape":"ScalingParametersStatus"} + } + }, + "UpdateServiceAccessPoliciesRequest":{ + "type":"structure", + "required":[ + "DomainName", + "AccessPolicies" + ], + "members":{ + "DomainName":{"shape":"DomainName"}, + "AccessPolicies":{"shape":"PolicyDocument"} + } + }, + "UpdateServiceAccessPoliciesResponse":{ + "type":"structure", + "required":["AccessPolicies"], + "members":{ + "AccessPolicies":{"shape":"AccessPoliciesStatus"} + } + }, + "UpdateTimestamp":{"type":"timestamp"}, + "Word":{ + "type":"string", + "pattern":"[\\S]+" + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/cloudsearch/2013-01-01/docs-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/cloudsearch/2013-01-01/docs-2.json new file mode 100644 index 000000000..a4b126a7c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/cloudsearch/2013-01-01/docs-2.json @@ -0,0 +1,865 @@ +{ + "version": "2.0", + "operations": { + "BuildSuggesters": "

    Indexes the search suggestions. For more information, see Configuring Suggesters in the Amazon CloudSearch Developer Guide.

    ", + "CreateDomain": "

    Creates a new search domain. For more information, see Creating a Search Domain in the Amazon CloudSearch Developer Guide.

    ", + "DefineAnalysisScheme": "

    Configures an analysis scheme that can be applied to a text or text-array field to define language-specific text processing options. For more information, see Configuring Analysis Schemes in the Amazon CloudSearch Developer Guide.

    ", + "DefineExpression": "

    Configures an Expression for the search domain. Used to create new expressions and modify existing ones. If the expression exists, the new configuration replaces the old one. For more information, see Configuring Expressions in the Amazon CloudSearch Developer Guide.

    ", + "DefineIndexField": "

    Configures an IndexField for the search domain. Used to create new fields and modify existing ones. You must specify the name of the domain you are configuring and an index field configuration. The index field configuration specifies a unique name, the index field type, and the options you want to configure for the field. The options you can specify depend on the IndexFieldType. If the field exists, the new configuration replaces the old one. For more information, see Configuring Index Fields in the Amazon CloudSearch Developer Guide.

    ", + "DefineSuggester": "

    Configures a suggester for a domain. A suggester enables you to display possible matches before users finish typing their queries. When you configure a suggester, you must specify the name of the text field you want to search for possible matches and a unique name for the suggester. For more information, see Getting Search Suggestions in the Amazon CloudSearch Developer Guide.

    ", + "DeleteAnalysisScheme": "

    Deletes an analysis scheme. For more information, see Configuring Analysis Schemes in the Amazon CloudSearch Developer Guide.

    ", + "DeleteDomain": "

    Permanently deletes a search domain and all of its data. Once a domain has been deleted, it cannot be recovered. For more information, see Deleting a Search Domain in the Amazon CloudSearch Developer Guide.

    ", + "DeleteExpression": "

    Removes an Expression from the search domain. For more information, see Configuring Expressions in the Amazon CloudSearch Developer Guide.

    ", + "DeleteIndexField": "

    Removes an IndexField from the search domain. For more information, see Configuring Index Fields in the Amazon CloudSearch Developer Guide.

    ", + "DeleteSuggester": "

    Deletes a suggester. For more information, see Getting Search Suggestions in the Amazon CloudSearch Developer Guide.

    ", + "DescribeAnalysisSchemes": "

    Gets the analysis schemes configured for a domain. An analysis scheme defines language-specific text processing options for a text field. Can be limited to specific analysis schemes by name. By default, shows all analysis schemes and includes any pending changes to the configuration. Set the Deployed option to true to show the active configuration and exclude pending changes. For more information, see Configuring Analysis Schemes in the Amazon CloudSearch Developer Guide.

    ", + "DescribeAvailabilityOptions": "

    Gets the availability options configured for a domain. By default, shows the configuration with any pending changes. Set the Deployed option to true to show the active configuration and exclude pending changes. For more information, see Configuring Availability Options in the Amazon CloudSearch Developer Guide.

    ", + "DescribeDomains": "

    Gets information about the search domains owned by this account. Can be limited to specific domains. Shows all domains by default. To get the number of searchable documents in a domain, use the console or submit a matchall request to your domain's search endpoint: q=matchall&amp;q.parser=structured&amp;size=0. For more information, see Getting Information about a Search Domain in the Amazon CloudSearch Developer Guide.

    ", + "DescribeExpressions": "

    Gets the expressions configured for the search domain. Can be limited to specific expressions by name. By default, shows all expressions and includes any pending changes to the configuration. Set the Deployed option to true to show the active configuration and exclude pending changes. For more information, see Configuring Expressions in the Amazon CloudSearch Developer Guide.

    ", + "DescribeIndexFields": "

    Gets information about the index fields configured for the search domain. Can be limited to specific fields by name. By default, shows all fields and includes any pending changes to the configuration. Set the Deployed option to true to show the active configuration and exclude pending changes. For more information, see Getting Domain Information in the Amazon CloudSearch Developer Guide.

    ", + "DescribeScalingParameters": "

    Gets the scaling parameters configured for a domain. A domain's scaling parameters specify the desired search instance type and replication count. For more information, see Configuring Scaling Options in the Amazon CloudSearch Developer Guide.

    ", + "DescribeServiceAccessPolicies": "

    Gets information about the access policies that control access to the domain's document and search endpoints. By default, shows the configuration with any pending changes. Set the Deployed option to true to show the active configuration and exclude pending changes. For more information, see Configuring Access for a Search Domain in the Amazon CloudSearch Developer Guide.

    ", + "DescribeSuggesters": "

    Gets the suggesters configured for a domain. A suggester enables you to display possible matches before users finish typing their queries. Can be limited to specific suggesters by name. By default, shows all suggesters and includes any pending changes to the configuration. Set the Deployed option to true to show the active configuration and exclude pending changes. For more information, see Getting Search Suggestions in the Amazon CloudSearch Developer Guide.

    ", + "IndexDocuments": "

    Tells the search domain to start indexing its documents using the latest indexing options. This operation must be invoked to activate options whose OptionStatus is RequiresIndexDocuments.

    ", + "ListDomainNames": "

    Lists all search domains owned by an account.

    ", + "UpdateAvailabilityOptions": "

    Configures the availability options for a domain. Enabling the Multi-AZ option expands an Amazon CloudSearch domain to an additional Availability Zone in the same Region to increase fault tolerance in the event of a service disruption. Changes to the Multi-AZ option can take about half an hour to become active. For more information, see Configuring Availability Options in the Amazon CloudSearch Developer Guide.

    ", + "UpdateScalingParameters": "

    Configures scaling parameters for a domain. A domain's scaling parameters specify the desired search instance type and replication count. Amazon CloudSearch will still automatically scale your domain based on the volume of data and traffic, but not below the desired instance type and replication count. If the Multi-AZ option is enabled, these values control the resources used per Availability Zone. For more information, see Configuring Scaling Options in the Amazon CloudSearch Developer Guide.

    ", + "UpdateServiceAccessPolicies": "

    Configures the access rules that control access to the domain's document and search endpoints. For more information, see Configuring Access for an Amazon CloudSearch Domain.

    " + }, + "service": "Amazon CloudSearch Configuration Service

    You use the Amazon CloudSearch configuration service to create, configure, and manage search domains. Configuration service requests are submitted using the AWS Query protocol. AWS Query requests are HTTP or HTTPS requests submitted via HTTP GET or POST with a query parameter named Action.

    The endpoint for configuration service requests is region-specific: cloudsearch.region.amazonaws.com. For example, cloudsearch.us-east-1.amazonaws.com. For a current list of supported regions and endpoints, see Regions and Endpoints.

    ", + "shapes": { + "APIVersion": { + "base": "

    The Amazon CloudSearch API version for a domain: 2011-02-01 or 2013-01-01.

    ", + "refs": { + "DomainNameMap$value": null + } + }, + "ARN": { + "base": "

    The Amazon Resource Name (ARN) of the search domain. See Identifiers for IAM Entities in Using AWS Identity and Access Management for more information.

    ", + "refs": { + "DomainStatus$ARN": null + } + }, + "AccessPoliciesStatus": { + "base": "

    The configured access rules for the domain's document and search endpoints, and the current status of those rules.

    ", + "refs": { + "DescribeServiceAccessPoliciesResponse$AccessPolicies": "

    The access rules configured for the domain specified in the request.

    ", + "UpdateServiceAccessPoliciesResponse$AccessPolicies": "

    The access rules configured for the domain.

    " + } + }, + "AlgorithmicStemming": { + "base": null, + "refs": { + "AnalysisOptions$AlgorithmicStemming": "

    The level of algorithmic stemming to perform: none, minimal, light, or full. The available levels vary depending on the language. For more information, see Language Specific Text Processing Settings in the Amazon CloudSearch Developer Guide

    " + } + }, + "AnalysisOptions": { + "base": "

    Synonyms, stopwords, and stemming options for an analysis scheme. Includes tokenization dictionary for Japanese.

    ", + "refs": { + "AnalysisScheme$AnalysisOptions": null + } + }, + "AnalysisScheme": { + "base": "

    Configuration information for an analysis scheme. Each analysis scheme has a unique name and specifies the language of the text to be processed. The following options can be configured for an analysis scheme: Synonyms, Stopwords, StemmingDictionary, JapaneseTokenizationDictionary and AlgorithmicStemming.

    ", + "refs": { + "AnalysisSchemeStatus$Options": null, + "DefineAnalysisSchemeRequest$AnalysisScheme": null + } + }, + "AnalysisSchemeLanguage": { + "base": "

    An IETF RFC 4646 language code or mul for multiple languages.

    ", + "refs": { + "AnalysisScheme$AnalysisSchemeLanguage": null + } + }, + "AnalysisSchemeStatus": { + "base": "

    The status and configuration of an AnalysisScheme.

    ", + "refs": { + "AnalysisSchemeStatusList$member": null, + "DefineAnalysisSchemeResponse$AnalysisScheme": null, + "DeleteAnalysisSchemeResponse$AnalysisScheme": "

    The status of the analysis scheme being deleted.

    " + } + }, + "AnalysisSchemeStatusList": { + "base": "

    A list of the analysis schemes configured for a domain.

    ", + "refs": { + "DescribeAnalysisSchemesResponse$AnalysisSchemes": "

    The analysis scheme descriptions.

    " + } + }, + "AvailabilityOptionsStatus": { + "base": "

    The status and configuration of the domain's availability options.

    ", + "refs": { + "DescribeAvailabilityOptionsResponse$AvailabilityOptions": "

    The availability options configured for the domain. Indicates whether Multi-AZ is enabled for the domain.

    ", + "UpdateAvailabilityOptionsResponse$AvailabilityOptions": "

    The newly-configured availability options. Indicates whether Multi-AZ is enabled for the domain.

    " + } + }, + "BaseException": { + "base": "

    An error occurred while processing the request.

    ", + "refs": { + } + }, + "Boolean": { + "base": null, + "refs": { + "DateArrayOptions$FacetEnabled": "

    Whether facet information can be returned for the field.

    ", + "DateArrayOptions$SearchEnabled": "

    Whether the contents of the field are searchable.

    ", + "DateArrayOptions$ReturnEnabled": "

    Whether the contents of the field can be returned in the search results.

    ", + "DateOptions$FacetEnabled": "

    Whether facet information can be returned for the field.

    ", + "DateOptions$SearchEnabled": "

    Whether the contents of the field are searchable.

    ", + "DateOptions$ReturnEnabled": "

    Whether the contents of the field can be returned in the search results.

    ", + "DateOptions$SortEnabled": "

    Whether the field can be used to sort the search results.

    ", + "DescribeAnalysisSchemesRequest$Deployed": "

    Whether to display the deployed configuration (true) or include any pending changes (false). Defaults to false.

    ", + "DescribeAvailabilityOptionsRequest$Deployed": "

    Whether to display the deployed configuration (true) or include any pending changes (false). Defaults to false.

    ", + "DescribeExpressionsRequest$Deployed": "

    Whether to display the deployed configuration (true) or include any pending changes (false). Defaults to false.

    ", + "DescribeIndexFieldsRequest$Deployed": "

    Whether to display the deployed configuration (true) or include any pending changes (false). Defaults to false.

    ", + "DescribeServiceAccessPoliciesRequest$Deployed": "

    Whether to display the deployed configuration (true) or include any pending changes (false). Defaults to false.

    ", + "DescribeSuggestersRequest$Deployed": "

    Whether to display the deployed configuration (true) or include any pending changes (false). Defaults to false.

    ", + "DomainStatus$Created": "

    True if the search domain is created. It can take several minutes to initialize a domain when CreateDomain is called. Newly created search domains are returned from DescribeDomains with a false value for Created until domain creation is complete.

    ", + "DomainStatus$Deleted": "

    True if the search domain has been deleted. The system must clean up resources dedicated to the search domain when DeleteDomain is called. Newly deleted search domains are returned from DescribeDomains with a true value for IsDeleted for several minutes until resource cleanup is complete.

    ", + "DomainStatus$RequiresIndexDocuments": "

    True if IndexDocuments needs to be called to activate the current domain configuration.

    ", + "DomainStatus$Processing": "

    True if processing is being done to activate the current domain configuration.

    ", + "DoubleArrayOptions$FacetEnabled": "

    Whether facet information can be returned for the field.

    ", + "DoubleArrayOptions$SearchEnabled": "

    Whether the contents of the field are searchable.

    ", + "DoubleArrayOptions$ReturnEnabled": "

    Whether the contents of the field can be returned in the search results.

    ", + "DoubleOptions$FacetEnabled": "

    Whether facet information can be returned for the field.

    ", + "DoubleOptions$SearchEnabled": "

    Whether the contents of the field are searchable.

    ", + "DoubleOptions$ReturnEnabled": "

    Whether the contents of the field can be returned in the search results.

    ", + "DoubleOptions$SortEnabled": "

    Whether the field can be used to sort the search results.

    ", + "IntArrayOptions$FacetEnabled": "

    Whether facet information can be returned for the field.

    ", + "IntArrayOptions$SearchEnabled": "

    Whether the contents of the field are searchable.

    ", + "IntArrayOptions$ReturnEnabled": "

    Whether the contents of the field can be returned in the search results.

    ", + "IntOptions$FacetEnabled": "

    Whether facet information can be returned for the field.

    ", + "IntOptions$SearchEnabled": "

    Whether the contents of the field are searchable.

    ", + "IntOptions$ReturnEnabled": "

    Whether the contents of the field can be returned in the search results.

    ", + "IntOptions$SortEnabled": "

    Whether the field can be used to sort the search results.

    ", + "LatLonOptions$FacetEnabled": "

    Whether facet information can be returned for the field.

    ", + "LatLonOptions$SearchEnabled": "

    Whether the contents of the field are searchable.

    ", + "LatLonOptions$ReturnEnabled": "

    Whether the contents of the field can be returned in the search results.

    ", + "LatLonOptions$SortEnabled": "

    Whether the field can be used to sort the search results.

    ", + "LiteralArrayOptions$FacetEnabled": "

    Whether facet information can be returned for the field.

    ", + "LiteralArrayOptions$SearchEnabled": "

    Whether the contents of the field are searchable.

    ", + "LiteralArrayOptions$ReturnEnabled": "

    Whether the contents of the field can be returned in the search results.

    ", + "LiteralOptions$FacetEnabled": "

    Whether facet information can be returned for the field.

    ", + "LiteralOptions$SearchEnabled": "

    Whether the contents of the field are searchable.

    ", + "LiteralOptions$ReturnEnabled": "

    Whether the contents of the field can be returned in the search results.

    ", + "LiteralOptions$SortEnabled": "

    Whether the field can be used to sort the search results.

    ", + "OptionStatus$PendingDeletion": "

    Indicates that the option will be deleted once processing is complete.

    ", + "TextArrayOptions$ReturnEnabled": "

    Whether the contents of the field can be returned in the search results.

    ", + "TextArrayOptions$HighlightEnabled": "

    Whether highlights can be returned for the field.

    ", + "TextOptions$ReturnEnabled": "

    Whether the contents of the field can be returned in the search results.

    ", + "TextOptions$SortEnabled": "

    Whether the field can be used to sort the search results.

    ", + "TextOptions$HighlightEnabled": "

    Whether highlights can be returned for the field.

    ", + "UpdateAvailabilityOptionsRequest$MultiAZ": "

    You expand an existing search domain to a second Availability Zone by setting the Multi-AZ option to true. Similarly, you can turn off the Multi-AZ option to downgrade the domain to a single Availability Zone by setting the Multi-AZ option to false.

    " + } + }, + "BuildSuggestersRequest": { + "base": "

    Container for the parameters to the BuildSuggester operation. Specifies the name of the domain you want to update.

    ", + "refs": { + } + }, + "BuildSuggestersResponse": { + "base": "

    The result of a BuildSuggester request. Contains a list of the fields used for suggestions.

    ", + "refs": { + } + }, + "CreateDomainRequest": { + "base": "

    Container for the parameters to the CreateDomain operation. Specifies a name for the new search domain.

    ", + "refs": { + } + }, + "CreateDomainResponse": { + "base": "

    The result of a CreateDomainRequest. Contains the status of a newly created domain.

    ", + "refs": { + } + }, + "DateArrayOptions": { + "base": "

    Options for a field that contains an array of dates. Present if IndexFieldType specifies the field is of type date-array. All options are enabled by default.

    ", + "refs": { + "IndexField$DateArrayOptions": null + } + }, + "DateOptions": { + "base": "

    Options for a date field. Dates and times are specified in UTC (Coordinated Universal Time) according to IETF RFC3339: yyyy-mm-ddT00:00:00Z. Present if IndexFieldType specifies the field is of type date. All options are enabled by default.

    ", + "refs": { + "IndexField$DateOptions": null + } + }, + "DefineAnalysisSchemeRequest": { + "base": "

    Container for the parameters to the DefineAnalysisScheme operation. Specifies the name of the domain you want to update and the analysis scheme configuration.

    ", + "refs": { + } + }, + "DefineAnalysisSchemeResponse": { + "base": "

    The result of a DefineAnalysisScheme request. Contains the status of the newly-configured analysis scheme.

    ", + "refs": { + } + }, + "DefineExpressionRequest": { + "base": "

    Container for the parameters to the DefineExpression operation. Specifies the name of the domain you want to update and the expression you want to configure.

    ", + "refs": { + } + }, + "DefineExpressionResponse": { + "base": "

    The result of a DefineExpression request. Contains the status of the newly-configured expression.

    ", + "refs": { + } + }, + "DefineIndexFieldRequest": { + "base": "

    Container for the parameters to the DefineIndexField operation. Specifies the name of the domain you want to update and the index field configuration.

    ", + "refs": { + } + }, + "DefineIndexFieldResponse": { + "base": "

    The result of a DefineIndexField request. Contains the status of the newly-configured index field.

    ", + "refs": { + } + }, + "DefineSuggesterRequest": { + "base": "

    Container for the parameters to the DefineSuggester operation. Specifies the name of the domain you want to update and the suggester configuration.

    ", + "refs": { + } + }, + "DefineSuggesterResponse": { + "base": "

    The result of a DefineSuggester request. Contains the status of the newly-configured suggester.

    ", + "refs": { + } + }, + "DeleteAnalysisSchemeRequest": { + "base": "

    Container for the parameters to the DeleteAnalysisScheme operation. Specifies the name of the domain you want to update and the analysis scheme you want to delete.

    ", + "refs": { + } + }, + "DeleteAnalysisSchemeResponse": { + "base": "

    The result of a DeleteAnalysisScheme request. Contains the status of the deleted analysis scheme.

    ", + "refs": { + } + }, + "DeleteDomainRequest": { + "base": "

    Container for the parameters to the DeleteDomain operation. Specifies the name of the domain you want to delete.

    ", + "refs": { + } + }, + "DeleteDomainResponse": { + "base": "

    The result of a DeleteDomain request. Contains the status of a newly deleted domain, or no status if the domain has already been completely deleted.

    ", + "refs": { + } + }, + "DeleteExpressionRequest": { + "base": "

    Container for the parameters to the DeleteExpression operation. Specifies the name of the domain you want to update and the name of the expression you want to delete.

    ", + "refs": { + } + }, + "DeleteExpressionResponse": { + "base": "

    The result of a DeleteExpression request. Specifies the expression being deleted.

    ", + "refs": { + } + }, + "DeleteIndexFieldRequest": { + "base": "

    Container for the parameters to the DeleteIndexField operation. Specifies the name of the domain you want to update and the name of the index field you want to delete.

    ", + "refs": { + } + }, + "DeleteIndexFieldResponse": { + "base": "

    The result of a DeleteIndexField request.

    ", + "refs": { + } + }, + "DeleteSuggesterRequest": { + "base": "

    Container for the parameters to the DeleteSuggester operation. Specifies the name of the domain you want to update and name of the suggester you want to delete.

    ", + "refs": { + } + }, + "DeleteSuggesterResponse": { + "base": "

    The result of a DeleteSuggester request. Contains the status of the deleted suggester.

    ", + "refs": { + } + }, + "DescribeAnalysisSchemesRequest": { + "base": "

    Container for the parameters to the DescribeAnalysisSchemes operation. Specifies the name of the domain you want to describe. To limit the response to particular analysis schemes, specify the names of the analysis schemes you want to describe. To show the active configuration and exclude any pending changes, set the Deployed option to true.

    ", + "refs": { + } + }, + "DescribeAnalysisSchemesResponse": { + "base": "

    The result of a DescribeAnalysisSchemes request. Contains the analysis schemes configured for the domain specified in the request.

    ", + "refs": { + } + }, + "DescribeAvailabilityOptionsRequest": { + "base": "

    Container for the parameters to the DescribeAvailabilityOptions operation. Specifies the name of the domain you want to describe. To show the active configuration and exclude any pending changes, set the Deployed option to true.

    ", + "refs": { + } + }, + "DescribeAvailabilityOptionsResponse": { + "base": "

    The result of a DescribeAvailabilityOptions request. Indicates whether or not the Multi-AZ option is enabled for the domain specified in the request.

    ", + "refs": { + } + }, + "DescribeDomainsRequest": { + "base": "

    Container for the parameters to the DescribeDomains operation. By default shows the status of all domains. To restrict the response to particular domains, specify the names of the domains you want to describe.

    ", + "refs": { + } + }, + "DescribeDomainsResponse": { + "base": "

    The result of a DescribeDomains request. Contains the status of the domains specified in the request or all domains owned by the account.

    ", + "refs": { + } + }, + "DescribeExpressionsRequest": { + "base": "

    Container for the parameters to the DescribeDomains operation. Specifies the name of the domain you want to describe. To restrict the response to particular expressions, specify the names of the expressions you want to describe. To show the active configuration and exclude any pending changes, set the Deployed option to true.

    ", + "refs": { + } + }, + "DescribeExpressionsResponse": { + "base": "

    The result of a DescribeExpressions request. Contains the expressions configured for the domain specified in the request.

    ", + "refs": { + } + }, + "DescribeIndexFieldsRequest": { + "base": "

    Container for the parameters to the DescribeIndexFields operation. Specifies the name of the domain you want to describe. To restrict the response to particular index fields, specify the names of the index fields you want to describe. To show the active configuration and exclude any pending changes, set the Deployed option to true.

    ", + "refs": { + } + }, + "DescribeIndexFieldsResponse": { + "base": "

    The result of a DescribeIndexFields request. Contains the index fields configured for the domain specified in the request.

    ", + "refs": { + } + }, + "DescribeScalingParametersRequest": { + "base": "

    Container for the parameters to the DescribeScalingParameters operation. Specifies the name of the domain you want to describe.

    ", + "refs": { + } + }, + "DescribeScalingParametersResponse": { + "base": "

    The result of a DescribeScalingParameters request. Contains the scaling parameters configured for the domain specified in the request.

    ", + "refs": { + } + }, + "DescribeServiceAccessPoliciesRequest": { + "base": "

    Container for the parameters to the DescribeServiceAccessPolicies operation. Specifies the name of the domain you want to describe. To show the active configuration and exclude any pending changes, set the Deployed option to true.

    ", + "refs": { + } + }, + "DescribeServiceAccessPoliciesResponse": { + "base": "

    The result of a DescribeServiceAccessPolicies request.

    ", + "refs": { + } + }, + "DescribeSuggestersRequest": { + "base": "

    Container for the parameters to the DescribeSuggester operation. Specifies the name of the domain you want to describe. To restrict the response to particular suggesters, specify the names of the suggesters you want to describe. To show the active configuration and exclude any pending changes, set the Deployed option to true.

    ", + "refs": { + } + }, + "DescribeSuggestersResponse": { + "base": "

    The result of a DescribeSuggesters request.

    ", + "refs": { + } + }, + "DisabledOperationException": { + "base": "

    The request was rejected because it attempted an operation which is not enabled.

    ", + "refs": { + } + }, + "DocumentSuggesterOptions": { + "base": "

    Options for a search suggester.

    ", + "refs": { + "Suggester$DocumentSuggesterOptions": null + } + }, + "DomainId": { + "base": "

    An internally generated unique identifier for a domain.

    ", + "refs": { + "DomainStatus$DomainId": null + } + }, + "DomainName": { + "base": "

    A string that represents the name of a domain. Domain names are unique across the domains owned by an account within an AWS region. Domain names start with a letter or number and can contain the following characters: a-z (lowercase), 0-9, and - (hyphen).

    ", + "refs": { + "BuildSuggestersRequest$DomainName": null, + "CreateDomainRequest$DomainName": "

    A name for the domain you are creating. Allowed characters are a-z (lower-case letters), 0-9, and hyphen (-). Domain names must start with a letter or number and be at least 3 and no more than 28 characters long.

    ", + "DefineAnalysisSchemeRequest$DomainName": null, + "DefineExpressionRequest$DomainName": null, + "DefineIndexFieldRequest$DomainName": null, + "DefineSuggesterRequest$DomainName": null, + "DeleteAnalysisSchemeRequest$DomainName": null, + "DeleteDomainRequest$DomainName": "

    The name of the domain you want to permanently delete.

    ", + "DeleteExpressionRequest$DomainName": null, + "DeleteIndexFieldRequest$DomainName": null, + "DeleteSuggesterRequest$DomainName": null, + "DescribeAnalysisSchemesRequest$DomainName": "

    The name of the domain you want to describe.

    ", + "DescribeAvailabilityOptionsRequest$DomainName": "

    The name of the domain you want to describe.

    ", + "DescribeExpressionsRequest$DomainName": "

    The name of the domain you want to describe.

    ", + "DescribeIndexFieldsRequest$DomainName": "

    The name of the domain you want to describe.

    ", + "DescribeScalingParametersRequest$DomainName": null, + "DescribeServiceAccessPoliciesRequest$DomainName": "

    The name of the domain you want to describe.

    ", + "DescribeSuggestersRequest$DomainName": "

    The name of the domain you want to describe.

    ", + "DomainNameList$member": null, + "DomainNameMap$key": null, + "DomainStatus$DomainName": null, + "IndexDocumentsRequest$DomainName": null, + "UpdateAvailabilityOptionsRequest$DomainName": null, + "UpdateScalingParametersRequest$DomainName": null, + "UpdateServiceAccessPoliciesRequest$DomainName": null + } + }, + "DomainNameList": { + "base": "

    A list of domain names.

    ", + "refs": { + "DescribeDomainsRequest$DomainNames": "

    The names of the domains you want to include in the response.

    " + } + }, + "DomainNameMap": { + "base": "

    A collection of domain names.

    ", + "refs": { + "ListDomainNamesResponse$DomainNames": "

    The names of the search domains owned by an account.

    " + } + }, + "DomainStatus": { + "base": "

    The current status of the search domain.

    ", + "refs": { + "CreateDomainResponse$DomainStatus": null, + "DeleteDomainResponse$DomainStatus": null, + "DomainStatusList$member": null + } + }, + "DomainStatusList": { + "base": "

    A list that contains the status of each requested domain.

    ", + "refs": { + "DescribeDomainsResponse$DomainStatusList": null + } + }, + "Double": { + "base": null, + "refs": { + "DoubleArrayOptions$DefaultValue": "A value to use for the field if the field isn't specified for a document.", + "DoubleOptions$DefaultValue": "

    A value to use for the field if the field isn't specified for a document. This can be important if you are using the field in an expression and that field is not present in every document.

    " + } + }, + "DoubleArrayOptions": { + "base": "

    Options for a field that contains an array of double-precision 64-bit floating point values. Present if IndexFieldType specifies the field is of type double-array. All options are enabled by default.

    ", + "refs": { + "IndexField$DoubleArrayOptions": null + } + }, + "DoubleOptions": { + "base": "

    Options for a double-precision 64-bit floating point field. Present if IndexFieldType specifies the field is of type double. All options are enabled by default.

    ", + "refs": { + "IndexField$DoubleOptions": null + } + }, + "DynamicFieldName": { + "base": null, + "refs": { + "DeleteIndexFieldRequest$IndexFieldName": "

    The name of the index field your want to remove from the domain's indexing options.

    ", + "DynamicFieldNameList$member": null, + "IndexField$IndexFieldName": "

    A string that represents the name of an index field. CloudSearch supports regular index fields as well as dynamic fields. A dynamic field's name defines a pattern that begins or ends with a wildcard. Any document fields that don't map to a regular index field but do match a dynamic field's pattern are configured with the dynamic field's indexing options.

    Regular field names begin with a letter and can contain the following characters: a-z (lowercase), 0-9, and _ (underscore). Dynamic field names must begin or end with a wildcard (*). The wildcard can also be the only character in a dynamic field name. Multiple wildcards, and wildcards embedded within a string are not supported.

    The name score is reserved and cannot be used as a field name. To reference a document's ID, you can use the name _id.

    " + } + }, + "DynamicFieldNameList": { + "base": null, + "refs": { + "DescribeIndexFieldsRequest$FieldNames": "

    A list of the index fields you want to describe. If not specified, information is returned for all configured index fields.

    " + } + }, + "ErrorCode": { + "base": "

    A machine-parsable string error or warning code.

    ", + "refs": { + "BaseException$Code": null + } + }, + "ErrorMessage": { + "base": "

    A human-readable string error or warning message.

    ", + "refs": { + "BaseException$Message": null + } + }, + "Expression": { + "base": "

    A named expression that can be evaluated at search time. Can be used to sort the search results, define other expressions, or return computed information in the search results.

    ", + "refs": { + "DefineExpressionRequest$Expression": null, + "ExpressionStatus$Options": "

    The expression that is evaluated for sorting while processing a search request.

    " + } + }, + "ExpressionStatus": { + "base": "

    The value of an Expression and its current status.

    ", + "refs": { + "DefineExpressionResponse$Expression": null, + "DeleteExpressionResponse$Expression": "

    The status of the expression being deleted.

    ", + "ExpressionStatusList$member": null + } + }, + "ExpressionStatusList": { + "base": "

    Contains the status of multiple expressions.

    ", + "refs": { + "DescribeExpressionsResponse$Expressions": "

    The expressions configured for the domain.

    " + } + }, + "ExpressionValue": { + "base": "

    The expression to evaluate for sorting while processing a search request. The Expression syntax is based on JavaScript expressions. For more information, see Configuring Expressions in the Amazon CloudSearch Developer Guide.

    ", + "refs": { + "Expression$ExpressionValue": null + } + }, + "FieldName": { + "base": "

    A string that represents the name of an index field. CloudSearch supports regular index fields as well as dynamic fields. A dynamic field's name defines a pattern that begins or ends with a wildcard. Any document fields that don't map to a regular index field but do match a dynamic field's pattern are configured with the dynamic field's indexing options.

    Regular field names begin with a letter and can contain the following characters: a-z (lowercase), 0-9, and _ (underscore). Dynamic field names must begin or end with a wildcard (*). The wildcard can also be the only character in a dynamic field name. Multiple wildcards, and wildcards embedded within a string are not supported.

    The name score is reserved and cannot be used as a field name. To reference a document's ID, you can use the name _id.

    ", + "refs": { + "DateOptions$SourceField": null, + "DocumentSuggesterOptions$SourceField": "

    The name of the index field you want to use for suggestions.

    ", + "DoubleOptions$SourceField": "

    The name of the source field to map to the field.

    ", + "FieldNameList$member": null, + "IntOptions$SourceField": "

    The name of the source field to map to the field.

    ", + "LatLonOptions$SourceField": null, + "LiteralOptions$SourceField": null, + "TextOptions$SourceField": null + } + }, + "FieldNameCommaList": { + "base": null, + "refs": { + "DateArrayOptions$SourceFields": "

    A list of source fields to map to the field.

    ", + "DoubleArrayOptions$SourceFields": "

    A list of source fields to map to the field.

    ", + "IntArrayOptions$SourceFields": "

    A list of source fields to map to the field.

    ", + "LiteralArrayOptions$SourceFields": "

    A list of source fields to map to the field.

    ", + "TextArrayOptions$SourceFields": "

    A list of source fields to map to the field.

    " + } + }, + "FieldNameList": { + "base": "

    A list of field names.

    ", + "refs": { + "BuildSuggestersResponse$FieldNames": null, + "IndexDocumentsResponse$FieldNames": "

    The names of the fields that are currently being indexed.

    " + } + }, + "FieldValue": { + "base": "

    The value of a field attribute.

    ", + "refs": { + "DateArrayOptions$DefaultValue": "A value to use for the field if the field isn't specified for a document.", + "DateOptions$DefaultValue": "A value to use for the field if the field isn't specified for a document.", + "LatLonOptions$DefaultValue": "A value to use for the field if the field isn't specified for a document.", + "LiteralArrayOptions$DefaultValue": "A value to use for the field if the field isn't specified for a document.", + "LiteralOptions$DefaultValue": "A value to use for the field if the field isn't specified for a document.", + "TextArrayOptions$DefaultValue": "A value to use for the field if the field isn't specified for a document.", + "TextOptions$DefaultValue": "A value to use for the field if the field isn't specified for a document." + } + }, + "IndexDocumentsRequest": { + "base": "

    Container for the parameters to the IndexDocuments operation. Specifies the name of the domain you want to re-index.

    ", + "refs": { + } + }, + "IndexDocumentsResponse": { + "base": "

    The result of an IndexDocuments request. Contains the status of the indexing operation, including the fields being indexed.

    ", + "refs": { + } + }, + "IndexField": { + "base": "

    Configuration information for a field in the index, including its name, type, and options. The supported options depend on the IndexFieldType.

    ", + "refs": { + "DefineIndexFieldRequest$IndexField": "

    The index field and field options you want to configure.

    ", + "IndexFieldStatus$Options": null + } + }, + "IndexFieldStatus": { + "base": "

    The value of an IndexField and its current status.

    ", + "refs": { + "DefineIndexFieldResponse$IndexField": null, + "DeleteIndexFieldResponse$IndexField": "

    The status of the index field being deleted.

    ", + "IndexFieldStatusList$member": null + } + }, + "IndexFieldStatusList": { + "base": "

    Contains the status of multiple index fields.

    ", + "refs": { + "DescribeIndexFieldsResponse$IndexFields": "

    The index fields configured for the domain.

    " + } + }, + "IndexFieldType": { + "base": "

    The type of field. The valid options for a field depend on the field type. For more information about the supported field types, see Configuring Index Fields in the Amazon CloudSearch Developer Guide.

    ", + "refs": { + "IndexField$IndexFieldType": null + } + }, + "InstanceCount": { + "base": null, + "refs": { + "DomainStatus$SearchInstanceCount": "

    The number of search instances that are available to process search requests.

    " + } + }, + "IntArrayOptions": { + "base": "

    Options for a field that contains an array of 64-bit signed integers. Present if IndexFieldType specifies the field is of type int-array. All options are enabled by default.

    ", + "refs": { + "IndexField$IntArrayOptions": null + } + }, + "IntOptions": { + "base": "

    Options for a 64-bit signed integer field. Present if IndexFieldType specifies the field is of type int. All options are enabled by default.

    ", + "refs": { + "IndexField$IntOptions": null + } + }, + "InternalException": { + "base": "

    An internal error occurred while processing the request. If this problem persists, report an issue from the Service Health Dashboard.

    ", + "refs": { + } + }, + "InvalidTypeException": { + "base": "

    The request was rejected because it specified an invalid type definition.

    ", + "refs": { + } + }, + "LatLonOptions": { + "base": "

    Options for a latlon field. A latlon field contains a location stored as a latitude and longitude value pair. Present if IndexFieldType specifies the field is of type latlon. All options are enabled by default.

    ", + "refs": { + "IndexField$LatLonOptions": null + } + }, + "LimitExceededException": { + "base": "

    The request was rejected because a resource limit has already been met.

    ", + "refs": { + } + }, + "Limits": { + "base": null, + "refs": { + "DomainStatus$Limits": null + } + }, + "ListDomainNamesResponse": { + "base": "

    The result of a ListDomainNames request. Contains a list of the domains owned by an account.

    ", + "refs": { + } + }, + "LiteralArrayOptions": { + "base": "

    Options for a field that contains an array of literal strings. Present if IndexFieldType specifies the field is of type literal-array. All options are enabled by default.

    ", + "refs": { + "IndexField$LiteralArrayOptions": null + } + }, + "LiteralOptions": { + "base": "

    Options for literal field. Present if IndexFieldType specifies the field is of type literal. All options are enabled by default.

    ", + "refs": { + "IndexField$LiteralOptions": null + } + }, + "Long": { + "base": null, + "refs": { + "IntArrayOptions$DefaultValue": "A value to use for the field if the field isn't specified for a document.", + "IntOptions$DefaultValue": "A value to use for the field if the field isn't specified for a document. This can be important if you are using the field in an expression and that field is not present in every document." + } + }, + "MaximumPartitionCount": { + "base": null, + "refs": { + "Limits$MaximumPartitionCount": null + } + }, + "MaximumReplicationCount": { + "base": null, + "refs": { + "Limits$MaximumReplicationCount": null + } + }, + "MultiAZ": { + "base": null, + "refs": { + "AvailabilityOptionsStatus$Options": "

    The availability options configured for the domain.

    " + } + }, + "OptionState": { + "base": "

    The state of processing a change to an option. One of:

    • RequiresIndexDocuments: The option's latest value will not be deployed until IndexDocuments has been called and indexing is complete.
    • Processing: The option's latest value is in the process of being activated.
    • Active: The option's latest value is fully deployed.
    • FailedToValidate: The option value is not compatible with the domain's data and cannot be used to index the data. You must either modify the option value or update or remove the incompatible documents.
    ", + "refs": { + "OptionStatus$State": "

    The state of processing a change to an option. Possible values:

    • RequiresIndexDocuments: the option's latest value will not be deployed until IndexDocuments has been called and indexing is complete.
    • Processing: the option's latest value is in the process of being activated.
    • Active: the option's latest value is completely deployed.
    • FailedToValidate: the option value is not compatible with the domain's data and cannot be used to index the data. You must either modify the option value or update or remove the incompatible documents.
    " + } + }, + "OptionStatus": { + "base": "

    The status of domain configuration option.

    ", + "refs": { + "AccessPoliciesStatus$Status": null, + "AnalysisSchemeStatus$Status": null, + "AvailabilityOptionsStatus$Status": null, + "ExpressionStatus$Status": null, + "IndexFieldStatus$Status": null, + "ScalingParametersStatus$Status": null, + "SuggesterStatus$Status": null + } + }, + "PartitionCount": { + "base": "

    The number of partitions used to hold the domain's index.

    ", + "refs": { + "DomainStatus$SearchPartitionCount": "

    The number of partitions across which the search index is spread.

    " + } + }, + "PartitionInstanceType": { + "base": "

    The instance type (such as search.m1.small) on which an index partition is hosted.

    ", + "refs": { + "ScalingParameters$DesiredInstanceType": "

    The instance type that you want to preconfigure for your domain. For example, search.m1.small.

    " + } + }, + "PolicyDocument": { + "base": "

    Access rules for a domain's document or search service endpoints. For more information, see Configuring Access for a Search Domain in the Amazon CloudSearch Developer Guide. The maximum size of a policy document is 100 KB.

    ", + "refs": { + "AccessPoliciesStatus$Options": null, + "UpdateServiceAccessPoliciesRequest$AccessPolicies": "

    The access rules you want to configure. These rules replace any existing rules.

    " + } + }, + "ResourceNotFoundException": { + "base": "

    The request was rejected because it attempted to reference a resource that does not exist.

    ", + "refs": { + } + }, + "ScalingParameters": { + "base": "

    The desired instance type and desired number of replicas of each index partition.

    ", + "refs": { + "ScalingParametersStatus$Options": null, + "UpdateScalingParametersRequest$ScalingParameters": null + } + }, + "ScalingParametersStatus": { + "base": "

    The status and configuration of a search domain's scaling parameters.

    ", + "refs": { + "DescribeScalingParametersResponse$ScalingParameters": null, + "UpdateScalingParametersResponse$ScalingParameters": null + } + }, + "SearchInstanceType": { + "base": "

    The instance type (such as search.m1.small) that is being used to process search requests.

    ", + "refs": { + "DomainStatus$SearchInstanceType": "

    The instance type that is being used to process search requests.

    " + } + }, + "ServiceEndpoint": { + "base": "

    The endpoint to which service requests can be submitted.

    ", + "refs": { + "DomainStatus$DocService": "

    The service endpoint for updating documents in a search domain.

    ", + "DomainStatus$SearchService": "

    The service endpoint for requesting search results from a search domain.

    " + } + }, + "ServiceUrl": { + "base": "

    The endpoint to which service requests can be submitted. For example, search-imdb-movies-oopcnjfn6ugofer3zx5iadxxca.eu-west-1.cloudsearch.amazonaws.com or doc-imdb-movies-oopcnjfn6ugofer3zx5iadxxca.eu-west-1.cloudsearch.amazonaws.com.

    ", + "refs": { + "ServiceEndpoint$Endpoint": null + } + }, + "StandardName": { + "base": "

    Names must begin with a letter and can contain the following characters: a-z (lowercase), 0-9, and _ (underscore).

    ", + "refs": { + "AnalysisScheme$AnalysisSchemeName": null, + "DeleteAnalysisSchemeRequest$AnalysisSchemeName": "

    The name of the analysis scheme you want to delete.

    ", + "DeleteExpressionRequest$ExpressionName": "

    The name of the Expression to delete.

    ", + "DeleteSuggesterRequest$SuggesterName": "

    Specifies the name of the suggester you want to delete.

    ", + "Expression$ExpressionName": null, + "StandardNameList$member": null, + "Suggester$SuggesterName": null + } + }, + "StandardNameList": { + "base": null, + "refs": { + "DescribeAnalysisSchemesRequest$AnalysisSchemeNames": "

    The analysis schemes you want to describe.

    ", + "DescribeExpressionsRequest$ExpressionNames": "

    Limits the DescribeExpressions response to the specified expressions. If not specified, all expressions are shown.

    ", + "DescribeSuggestersRequest$SuggesterNames": "

    The suggesters you want to describe.

    " + } + }, + "String": { + "base": null, + "refs": { + "AnalysisOptions$Synonyms": "

    A JSON object that defines synonym groups and aliases. A synonym group is an array of arrays, where each sub-array is a group of terms where each term in the group is considered a synonym of every other term in the group. The aliases value is an object that contains a collection of string:value pairs where the string specifies a term and the array of values specifies each of the aliases for that term. An alias is considered a synonym of the specified term, but the term is not considered a synonym of the alias. For more information about specifying synonyms, see Synonyms in the Amazon CloudSearch Developer Guide.

    ", + "AnalysisOptions$Stopwords": "

    A JSON array of terms to ignore during indexing and searching. For example, [\"a\", \"an\", \"the\", \"of\"]. The stopwords dictionary must explicitly list each word you want to ignore. Wildcards and regular expressions are not supported.

    ", + "AnalysisOptions$StemmingDictionary": "

    A JSON object that contains a collection of string:value pairs that each map a term to its stem. For example, {\"term1\": \"stem1\", \"term2\": \"stem2\", \"term3\": \"stem3\"}. The stemming dictionary is applied in addition to any algorithmic stemming. This enables you to override the results of the algorithmic stemming to correct specific cases of overstemming or understemming. The maximum size of a stemming dictionary is 500 KB.

    ", + "AnalysisOptions$JapaneseTokenizationDictionary": "

    A JSON array that contains a collection of terms, tokens, readings and part of speech for Japanese Tokenizaiton. The Japanese tokenization dictionary enables you to override the default tokenization for selected terms. This is only valid for Japanese language fields.

    ", + "DocumentSuggesterOptions$SortExpression": "

    An expression that computes a score for each suggestion to control how they are sorted. The scores are rounded to the nearest integer, with a floor of 0 and a ceiling of 2^31-1. A document's relevance score is not computed for suggestions, so sort expressions cannot reference the _score value. To sort suggestions using a numeric field or existing expression, simply specify the name of the field or expression. If no expression is configured for the suggester, the suggestions are sorted with the closest matches listed first.

    " + } + }, + "Suggester": { + "base": "

    Configuration information for a search suggester. Each suggester has a unique name and specifies the text field you want to use for suggestions. The following options can be configured for a suggester: FuzzyMatching, SortExpression.

    ", + "refs": { + "DefineSuggesterRequest$Suggester": null, + "SuggesterStatus$Options": null + } + }, + "SuggesterFuzzyMatching": { + "base": null, + "refs": { + "DocumentSuggesterOptions$FuzzyMatching": "

    The level of fuzziness allowed when suggesting matches for a string: none, low, or high. With none, the specified string is treated as an exact prefix. With low, suggestions must differ from the specified string by no more than one character. With high, suggestions can differ by up to two characters. The default is none.

    " + } + }, + "SuggesterStatus": { + "base": "

    The value of a Suggester and its current status.

    ", + "refs": { + "DefineSuggesterResponse$Suggester": null, + "DeleteSuggesterResponse$Suggester": "

    The status of the suggester being deleted.

    ", + "SuggesterStatusList$member": null + } + }, + "SuggesterStatusList": { + "base": "

    Contains the status of multiple suggesters.

    ", + "refs": { + "DescribeSuggestersResponse$Suggesters": "

    The suggesters configured for the domain specified in the request.

    " + } + }, + "TextArrayOptions": { + "base": "

    Options for a field that contains an array of text strings. Present if IndexFieldType specifies the field is of type text-array. A text-array field is always searchable. All options are enabled by default.

    ", + "refs": { + "IndexField$TextArrayOptions": null + } + }, + "TextOptions": { + "base": "

    Options for text field. Present if IndexFieldType specifies the field is of type text. A text field is always searchable. All options are enabled by default.

    ", + "refs": { + "IndexField$TextOptions": null + } + }, + "UIntValue": { + "base": null, + "refs": { + "OptionStatus$UpdateVersion": "

    A unique integer that indicates when this option was last updated.

    ", + "ScalingParameters$DesiredReplicationCount": "

    The number of replicas you want to preconfigure for each index partition.

    ", + "ScalingParameters$DesiredPartitionCount": "

    The number of partitions you want to preconfigure for your domain. Only valid when you select m2.2xlarge as the desired instance type.

    " + } + }, + "UpdateAvailabilityOptionsRequest": { + "base": "

    Container for the parameters to the UpdateAvailabilityOptions operation. Specifies the name of the domain you want to update and the Multi-AZ availability option.

    ", + "refs": { + } + }, + "UpdateAvailabilityOptionsResponse": { + "base": "

    The result of a UpdateAvailabilityOptions request. Contains the status of the domain's availability options.

    ", + "refs": { + } + }, + "UpdateScalingParametersRequest": { + "base": "

    Container for the parameters to the UpdateScalingParameters operation. Specifies the name of the domain you want to update and the scaling parameters you want to configure.

    ", + "refs": { + } + }, + "UpdateScalingParametersResponse": { + "base": "

    The result of a UpdateScalingParameters request. Contains the status of the newly-configured scaling parameters.

    ", + "refs": { + } + }, + "UpdateServiceAccessPoliciesRequest": { + "base": "

    Container for the parameters to the UpdateServiceAccessPolicies operation. Specifies the name of the domain you want to update and the access rules you want to configure.

    ", + "refs": { + } + }, + "UpdateServiceAccessPoliciesResponse": { + "base": "

    The result of an UpdateServiceAccessPolicies request. Contains the new access policies.

    ", + "refs": { + } + }, + "UpdateTimestamp": { + "base": null, + "refs": { + "OptionStatus$CreationDate": "

    A timestamp for when this option was created.

    ", + "OptionStatus$UpdateDate": "

    A timestamp for when this option was last updated.

    " + } + }, + "Word": { + "base": null, + "refs": { + "TextArrayOptions$AnalysisScheme": "

    The name of an analysis scheme for a text-array field.

    ", + "TextOptions$AnalysisScheme": "

    The name of an analysis scheme for a text field.

    " + } + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/cloudsearch/2013-01-01/paginators-1.json b/vendor/github.com/aws/aws-sdk-go/models/apis/cloudsearch/2013-01-01/paginators-1.json new file mode 100644 index 000000000..82fa804ab --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/cloudsearch/2013-01-01/paginators-1.json @@ -0,0 +1,20 @@ +{ + "pagination": { + "DescribeAnalysisSchemes": { + "result_key": "AnalysisSchemes" + }, + "DescribeDomains": { + "result_key": "DomainStatusList" + }, + "DescribeExpressions": { + "result_key": "Expressions" + }, + "DescribeIndexFields": { + "result_key": "IndexFields" + }, + "DescribeSuggesters": { + "result_key": "Suggesters" + } + } +} + diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/cloudsearchdomain/2013-01-01/api-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/cloudsearchdomain/2013-01-01/api-2.json new file mode 100644 index 000000000..3d36fc2b3 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/cloudsearchdomain/2013-01-01/api-2.json @@ -0,0 +1,373 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2013-01-01", + "endpointPrefix":"cloudsearchdomain", + "jsonVersion":"1.1", + "protocol":"rest-json", + "serviceFullName":"Amazon CloudSearch Domain", + "signatureVersion":"v4", + "signingName":"cloudsearch" + }, + "operations":{ + "Search":{ + "name":"Search", + "http":{ + "method":"GET", + "requestUri":"/2013-01-01/search?format=sdk&pretty=true" + }, + "input":{"shape":"SearchRequest"}, + "output":{"shape":"SearchResponse"}, + "errors":[ + {"shape":"SearchException"} + ] + }, + "Suggest":{ + "name":"Suggest", + "http":{ + "method":"GET", + "requestUri":"/2013-01-01/suggest?format=sdk&pretty=true" + }, + "input":{"shape":"SuggestRequest"}, + "output":{"shape":"SuggestResponse"}, + "errors":[ + {"shape":"SearchException"} + ] + }, + "UploadDocuments":{ + "name":"UploadDocuments", + "http":{ + "method":"POST", + "requestUri":"/2013-01-01/documents/batch?format=sdk" + }, + "input":{"shape":"UploadDocumentsRequest"}, + "output":{"shape":"UploadDocumentsResponse"}, + "errors":[ + {"shape":"DocumentServiceException"} + ] + } + }, + "shapes":{ + "Adds":{"type":"long"}, + "Blob":{ + "type":"blob", + "streaming":true + }, + "Bucket":{ + "type":"structure", + "members":{ + "value":{"shape":"String"}, + "count":{"shape":"Long"} + } + }, + "BucketInfo":{ + "type":"structure", + "members":{ + "buckets":{"shape":"BucketList"} + } + }, + "BucketList":{ + "type":"list", + "member":{"shape":"Bucket"} + }, + "ContentType":{ + "type":"string", + "enum":[ + "application/json", + "application/xml" + ] + }, + "Cursor":{"type":"string"}, + "Deletes":{"type":"long"}, + "DocumentServiceException":{ + "type":"structure", + "members":{ + "status":{"shape":"String"}, + "message":{"shape":"String"} + }, + "exception":true + }, + "DocumentServiceWarning":{ + "type":"structure", + "members":{ + "message":{"shape":"String"} + } + }, + "DocumentServiceWarnings":{ + "type":"list", + "member":{"shape":"DocumentServiceWarning"} + }, + "Double":{"type":"double"}, + "Expr":{"type":"string"}, + "Exprs":{ + "type":"map", + "key":{"shape":"String"}, + "value":{"shape":"String"} + }, + "Facet":{"type":"string"}, + "Facets":{ + "type":"map", + "key":{"shape":"String"}, + "value":{"shape":"BucketInfo"} + }, + "FieldStats":{ + "type":"structure", + "members":{ + "min":{"shape":"String"}, + "max":{"shape":"String"}, + "count":{"shape":"Long"}, + "missing":{"shape":"Long"}, + "sum":{"shape":"Double"}, + "sumOfSquares":{"shape":"Double"}, + "mean":{"shape":"String"}, + "stddev":{"shape":"Double"} + } + }, + "FieldValue":{ + "type":"list", + "member":{"shape":"String"} + }, + "Fields":{ + "type":"map", + "key":{"shape":"String"}, + "value":{"shape":"FieldValue"} + }, + "FilterQuery":{"type":"string"}, + "Highlight":{"type":"string"}, + "Highlights":{ + "type":"map", + "key":{"shape":"String"}, + "value":{"shape":"String"} + }, + "Hit":{ + "type":"structure", + "members":{ + "id":{"shape":"String"}, + "fields":{"shape":"Fields"}, + "exprs":{"shape":"Exprs"}, + "highlights":{"shape":"Highlights"} + } + }, + "HitList":{ + "type":"list", + "member":{"shape":"Hit"} + }, + "Hits":{ + "type":"structure", + "members":{ + "found":{"shape":"Long"}, + "start":{"shape":"Long"}, + "cursor":{"shape":"String"}, + "hit":{"shape":"HitList"} + } + }, + "Long":{"type":"long"}, + "Partial":{"type":"boolean"}, + "Query":{"type":"string"}, + "QueryOptions":{"type":"string"}, + "QueryParser":{ + "type":"string", + "enum":[ + "simple", + "structured", + "lucene", + "dismax" + ] + }, + "Return":{"type":"string"}, + "SearchException":{ + "type":"structure", + "members":{ + "message":{"shape":"String"} + }, + "exception":true + }, + "SearchRequest":{ + "type":"structure", + "required":["query"], + "members":{ + "cursor":{ + "shape":"Cursor", + "location":"querystring", + "locationName":"cursor" + }, + "expr":{ + "shape":"Expr", + "location":"querystring", + "locationName":"expr" + }, + "facet":{ + "shape":"Facet", + "location":"querystring", + "locationName":"facet" + }, + "filterQuery":{ + "shape":"FilterQuery", + "location":"querystring", + "locationName":"fq" + }, + "highlight":{ + "shape":"Highlight", + "location":"querystring", + "locationName":"highlight" + }, + "partial":{ + "shape":"Partial", + "location":"querystring", + "locationName":"partial" + }, + "query":{ + "shape":"Query", + "location":"querystring", + "locationName":"q" + }, + "queryOptions":{ + "shape":"QueryOptions", + "location":"querystring", + "locationName":"q.options" + }, + "queryParser":{ + "shape":"QueryParser", + "location":"querystring", + "locationName":"q.parser" + }, + "return":{ + "shape":"Return", + "location":"querystring", + "locationName":"return" + }, + "size":{ + "shape":"Size", + "location":"querystring", + "locationName":"size" + }, + "sort":{ + "shape":"Sort", + "location":"querystring", + "locationName":"sort" + }, + "start":{ + "shape":"Start", + "location":"querystring", + "locationName":"start" + }, + "stats":{ + "shape":"Stat", + "location":"querystring", + "locationName":"stats" + } + } + }, + "SearchResponse":{ + "type":"structure", + "members":{ + "status":{"shape":"SearchStatus"}, + "hits":{"shape":"Hits"}, + "facets":{"shape":"Facets"}, + "stats":{"shape":"Stats"} + } + }, + "SearchStatus":{ + "type":"structure", + "members":{ + "timems":{"shape":"Long"}, + "rid":{"shape":"String"} + } + }, + "Size":{"type":"long"}, + "Sort":{"type":"string"}, + "Start":{"type":"long"}, + "Stat":{"type":"string"}, + "Stats":{ + "type":"map", + "key":{"shape":"String"}, + "value":{"shape":"FieldStats"} + }, + "String":{"type":"string"}, + "SuggestModel":{ + "type":"structure", + "members":{ + "query":{"shape":"String"}, + "found":{"shape":"Long"}, + "suggestions":{"shape":"Suggestions"} + } + }, + "SuggestRequest":{ + "type":"structure", + "required":[ + "query", + "suggester" + ], + "members":{ + "query":{ + "shape":"Query", + "location":"querystring", + "locationName":"q" + }, + "suggester":{ + "shape":"Suggester", + "location":"querystring", + "locationName":"suggester" + }, + "size":{ + "shape":"SuggestionsSize", + "location":"querystring", + "locationName":"size" + } + } + }, + "SuggestResponse":{ + "type":"structure", + "members":{ + "status":{"shape":"SuggestStatus"}, + "suggest":{"shape":"SuggestModel"} + } + }, + "SuggestStatus":{ + "type":"structure", + "members":{ + "timems":{"shape":"Long"}, + "rid":{"shape":"String"} + } + }, + "Suggester":{"type":"string"}, + "SuggestionMatch":{ + "type":"structure", + "members":{ + "suggestion":{"shape":"String"}, + "score":{"shape":"Long"}, + "id":{"shape":"String"} + } + }, + "Suggestions":{ + "type":"list", + "member":{"shape":"SuggestionMatch"} + }, + "SuggestionsSize":{"type":"long"}, + "UploadDocumentsRequest":{ + "type":"structure", + "required":[ + "documents", + "contentType" + ], + "members":{ + "documents":{"shape":"Blob"}, + "contentType":{ + "shape":"ContentType", + "location":"header", + "locationName":"Content-Type" + } + }, + "payload":"documents" + }, + "UploadDocumentsResponse":{ + "type":"structure", + "members":{ + "status":{"shape":"String"}, + "adds":{"shape":"Adds"}, + "deletes":{"shape":"Deletes"}, + "warnings":{"shape":"DocumentServiceWarnings"} + } + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/cloudsearchdomain/2013-01-01/docs-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/cloudsearchdomain/2013-01-01/docs-2.json new file mode 100644 index 000000000..4c30ea45c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/cloudsearchdomain/2013-01-01/docs-2.json @@ -0,0 +1,343 @@ +{ + "version": "2.0", + "service": "

    You use the AmazonCloudSearch2013 API to upload documents to a search domain and search those documents.

    The endpoints for submitting UploadDocuments, Search, and Suggest requests are domain-specific. To get the endpoints for your domain, use the Amazon CloudSearch configuration service DescribeDomains action. The domain endpoints are also displayed on the domain dashboard in the Amazon CloudSearch console. You submit suggest requests to the search endpoint.

    For more information, see the Amazon CloudSearch Developer Guide.

    ", + "operations": { + "Search": "

    Retrieves a list of documents that match the specified search criteria. How you specify the search criteria depends on which query parser you use. Amazon CloudSearch supports four query parsers:

    • simple: search all text and text-array fields for the specified string. Search for phrases, individual terms, and prefixes.
    • structured: search specific fields, construct compound queries using Boolean operators, and use advanced features such as term boosting and proximity searching.
    • lucene: specify search criteria using the Apache Lucene query parser syntax.
    • dismax: specify search criteria using the simplified subset of the Apache Lucene query parser syntax defined by the DisMax query parser.

    For more information, see Searching Your Data in the Amazon CloudSearch Developer Guide.

    The endpoint for submitting Search requests is domain-specific. You submit search requests to a domain's search endpoint. To get the search endpoint for your domain, use the Amazon CloudSearch configuration service DescribeDomains action. A domain's endpoints are also displayed on the domain dashboard in the Amazon CloudSearch console.

    ", + "Suggest": "

    Retrieves autocomplete suggestions for a partial query string. You can use suggestions enable you to display likely matches before users finish typing. In Amazon CloudSearch, suggestions are based on the contents of a particular text field. When you request suggestions, Amazon CloudSearch finds all of the documents whose values in the suggester field start with the specified query string. The beginning of the field must match the query string to be considered a match.

    For more information about configuring suggesters and retrieving suggestions, see Getting Suggestions in the Amazon CloudSearch Developer Guide.

    The endpoint for submitting Suggest requests is domain-specific. You submit suggest requests to a domain's search endpoint. To get the search endpoint for your domain, use the Amazon CloudSearch configuration service DescribeDomains action. A domain's endpoints are also displayed on the domain dashboard in the Amazon CloudSearch console.

    ", + "UploadDocuments": "

    Posts a batch of documents to a search domain for indexing. A document batch is a collection of add and delete operations that represent the documents you want to add, update, or delete from your domain. Batches can be described in either JSON or XML. Each item that you want Amazon CloudSearch to return as a search result (such as a product) is represented as a document. Every document has a unique ID and one or more fields that contain the data that you want to search and return in results. Individual documents cannot contain more than 1 MB of data. The entire batch cannot exceed 5 MB. To get the best possible upload performance, group add and delete operations in batches that are close the 5 MB limit. Submitting a large volume of single-document batches can overload a domain's document service.

    The endpoint for submitting UploadDocuments requests is domain-specific. To get the document endpoint for your domain, use the Amazon CloudSearch configuration service DescribeDomains action. A domain's endpoints are also displayed on the domain dashboard in the Amazon CloudSearch console.

    For more information about formatting your data for Amazon CloudSearch, see Preparing Your Data in the Amazon CloudSearch Developer Guide. For more information about uploading data for indexing, see Uploading Data in the Amazon CloudSearch Developer Guide.

    " + }, + "shapes": { + "Adds": { + "base": null, + "refs": { + "UploadDocumentsResponse$adds": "

    The number of documents that were added to the search domain.

    " + } + }, + "Blob": { + "base": null, + "refs": { + "UploadDocumentsRequest$documents": "

    A batch of documents formatted in JSON or HTML.

    " + } + }, + "Bucket": { + "base": "

    A container for facet information.

    ", + "refs": { + "BucketList$member": null + } + }, + "BucketInfo": { + "base": "

    A container for the calculated facet values and counts.

    ", + "refs": { + "Facets$value": null + } + }, + "BucketList": { + "base": null, + "refs": { + "BucketInfo$buckets": "

    A list of the calculated facet values and counts.

    " + } + }, + "ContentType": { + "base": null, + "refs": { + "UploadDocumentsRequest$contentType": "

    The format of the batch you are uploading. Amazon CloudSearch supports two document batch formats:

    • application/json
    • application/xml
    " + } + }, + "Cursor": { + "base": null, + "refs": { + "SearchRequest$cursor": "

    Retrieves a cursor value you can use to page through large result sets. Use the size parameter to control the number of hits to include in each response. You can specify either the cursor or start parameter in a request; they are mutually exclusive. To get the first cursor, set the cursor value to initial. In subsequent requests, specify the cursor value returned in the hits section of the response.

    For more information, see Paginating Results in the Amazon CloudSearch Developer Guide.

    " + } + }, + "Deletes": { + "base": null, + "refs": { + "UploadDocumentsResponse$deletes": "

    The number of documents that were deleted from the search domain.

    " + } + }, + "DocumentServiceException": { + "base": "

    Information about any problems encountered while processing an upload request.

    ", + "refs": { + } + }, + "DocumentServiceWarning": { + "base": "

    A warning returned by the document service when an issue is discovered while processing an upload request.

    ", + "refs": { + "DocumentServiceWarnings$member": null + } + }, + "DocumentServiceWarnings": { + "base": null, + "refs": { + "UploadDocumentsResponse$warnings": "

    Any warnings returned by the document service about the documents being uploaded.

    " + } + }, + "Double": { + "base": null, + "refs": { + "FieldStats$sum": "

    The sum of the field values across the documents in the result set. null for date fields.

    ", + "FieldStats$sumOfSquares": "

    The sum of all field values in the result set squared.

    ", + "FieldStats$stddev": "

    The standard deviation of the values in the specified field in the result set.

    " + } + }, + "Expr": { + "base": null, + "refs": { + "SearchRequest$expr": "

    Defines one or more numeric expressions that can be used to sort results or specify search or filter criteria. You can also specify expressions as return fields.

    You specify the expressions in JSON using the form {\"EXPRESSIONNAME\":\"EXPRESSION\"}. You can define and use multiple expressions in a search request. For example:

    {\"expression1\":\"_score*rating\", \"expression2\":\"(1/rank)*year\"}

    For information about the variables, operators, and functions you can use in expressions, see Writing Expressions in the Amazon CloudSearch Developer Guide.

    " + } + }, + "Exprs": { + "base": null, + "refs": { + "Hit$exprs": "

    The expressions returned from a document that matches the search request.

    " + } + }, + "Facet": { + "base": null, + "refs": { + "SearchRequest$facet": "

    Specifies one or more fields for which to get facet information, and options that control how the facet information is returned. Each specified field must be facet-enabled in the domain configuration. The fields and options are specified in JSON using the form {\"FIELD\":{\"OPTION\":VALUE,\"OPTION:\"STRING\"},\"FIELD\":{\"OPTION\":VALUE,\"OPTION\":\"STRING\"}}.

    You can specify the following faceting options:

    • buckets specifies an array of the facet values or ranges to count. Ranges are specified using the same syntax that you use to search for a range of values. For more information, see Searching for a Range of Values in the Amazon CloudSearch Developer Guide. Buckets are returned in the order they are specified in the request. The sort and size options are not valid if you specify buckets.

    • size specifies the maximum number of facets to include in the results. By default, Amazon CloudSearch returns counts for the top 10. The size parameter is only valid when you specify the sort option; it cannot be used in conjunction with buckets.

    • sort specifies how you want to sort the facets in the results: bucket or count. Specify bucket to sort alphabetically or numerically by facet value (in ascending order). Specify count to sort by the facet counts computed for each facet value (in descending order). To retrieve facet counts for particular values or ranges of values, use the buckets option instead of sort.

    If no facet options are specified, facet counts are computed for all field values, the facets are sorted by facet count, and the top 10 facets are returned in the results.

    To count particular buckets of values, use the buckets option. For example, the following request uses the buckets option to calculate and return facet counts by decade.

    {\"year\":{\"buckets\":[\"[1970,1979]\",\"[1980,1989]\",\"[1990,1999]\",\"[2000,2009]\",\"[2010,}\"]}}

    To sort facets by facet count, use the count option. For example, the following request sets the sort option to count to sort the facet values by facet count, with the facet values that have the most matching documents listed first. Setting the size option to 3 returns only the top three facet values.

    {\"year\":{\"sort\":\"count\",\"size\":3}}

    To sort the facets by value, use the bucket option. For example, the following request sets the sort option to bucket to sort the facet values numerically by year, with earliest year listed first.

    {\"year\":{\"sort\":\"bucket\"}}

    For more information, see Getting and Using Facet Information in the Amazon CloudSearch Developer Guide.

    " + } + }, + "Facets": { + "base": null, + "refs": { + "SearchResponse$facets": "

    The requested facet information.

    " + } + }, + "FieldStats": { + "base": "

    The statistics for a field calculated in the request.

    ", + "refs": { + "Stats$value": null + } + }, + "FieldValue": { + "base": null, + "refs": { + "Fields$value": null + } + }, + "Fields": { + "base": null, + "refs": { + "Hit$fields": "

    The fields returned from a document that matches the search request.

    " + } + }, + "FilterQuery": { + "base": null, + "refs": { + "SearchRequest$filterQuery": "

    Specifies a structured query that filters the results of a search without affecting how the results are scored and sorted. You use filterQuery in conjunction with the query parameter to filter the documents that match the constraints specified in the query parameter. Specifying a filter controls only which matching documents are included in the results, it has no effect on how they are scored and sorted. The filterQuery parameter supports the full structured query syntax.

    For more information about using filters, see Filtering Matching Documents in the Amazon CloudSearch Developer Guide.

    " + } + }, + "Highlight": { + "base": null, + "refs": { + "SearchRequest$highlight": "

    Retrieves highlights for matches in the specified text or text-array fields. Each specified field must be highlight enabled in the domain configuration. The fields and options are specified in JSON using the form {\"FIELD\":{\"OPTION\":VALUE,\"OPTION:\"STRING\"},\"FIELD\":{\"OPTION\":VALUE,\"OPTION\":\"STRING\"}}.

    You can specify the following highlight options:

    • format: specifies the format of the data in the text field: text or html. When data is returned as HTML, all non-alphanumeric characters are encoded. The default is html.
    • max_phrases: specifies the maximum number of occurrences of the search term(s) you want to highlight. By default, the first occurrence is highlighted.
    • pre_tag: specifies the string to prepend to an occurrence of a search term. The default for HTML highlights is &lt;em&gt;. The default for text highlights is *.
    • post_tag: specifies the string to append to an occurrence of a search term. The default for HTML highlights is &lt;/em&gt;. The default for text highlights is *.

    If no highlight options are specified for a field, the returned field text is treated as HTML and the first match is highlighted with emphasis tags: &lt;em>search-term&lt;/em&gt;.

    For example, the following request retrieves highlights for the actors and title fields.

    { \"actors\": {}, \"title\": {\"format\": \"text\",\"max_phrases\": 2,\"pre_tag\": \"\",\"post_tag\": \"\"} }

    " + } + }, + "Highlights": { + "base": null, + "refs": { + "Hit$highlights": "

    The highlights returned from a document that matches the search request.

    " + } + }, + "Hit": { + "base": "

    Information about a document that matches the search request.

    ", + "refs": { + "HitList$member": null + } + }, + "HitList": { + "base": null, + "refs": { + "Hits$hit": "

    A document that matches the search request.

    " + } + }, + "Hits": { + "base": "

    The collection of documents that match the search request.

    ", + "refs": { + "SearchResponse$hits": "

    The documents that match the search criteria.

    " + } + }, + "Long": { + "base": null, + "refs": { + "Bucket$count": "

    The number of hits that contain the facet value in the specified facet field.

    ", + "FieldStats$count": "

    The number of documents that contain a value in the specified field in the result set.

    ", + "FieldStats$missing": "

    The number of documents that do not contain a value in the specified field in the result set.

    ", + "Hits$found": "

    The total number of documents that match the search request.

    ", + "Hits$start": "

    The index of the first matching document.

    ", + "SearchStatus$timems": "

    How long it took to process the request, in milliseconds.

    ", + "SuggestModel$found": "

    The number of documents that were found to match the query string.

    ", + "SuggestStatus$timems": "

    How long it took to process the request, in milliseconds.

    ", + "SuggestionMatch$score": "

    The relevance score of a suggested match.

    " + } + }, + "Partial": { + "base": null, + "refs": { + "SearchRequest$partial": "

    Enables partial results to be returned if one or more index partitions are unavailable. When your search index is partitioned across multiple search instances, by default Amazon CloudSearch only returns results if every partition can be queried. This means that the failure of a single search instance can result in 5xx (internal server) errors. When you enable partial results, Amazon CloudSearch returns whatever results are available and includes the percentage of documents searched in the search results (percent-searched). This enables you to more gracefully degrade your users' search experience. For example, rather than displaying no results, you could display the partial results and a message indicating that the results might be incomplete due to a temporary system outage.

    " + } + }, + "Query": { + "base": null, + "refs": { + "SearchRequest$query": "

    Specifies the search criteria for the request. How you specify the search criteria depends on the query parser used for the request and the parser options specified in the queryOptions parameter. By default, the simple query parser is used to process requests. To use the structured, lucene, or dismax query parser, you must also specify the queryParser parameter.

    For more information about specifying search criteria, see Searching Your Data in the Amazon CloudSearch Developer Guide.

    ", + "SuggestRequest$query": "

    Specifies the string for which you want to get suggestions.

    " + } + }, + "QueryOptions": { + "base": null, + "refs": { + "SearchRequest$queryOptions": "

    Configures options for the query parser specified in the queryParser parameter. You specify the options in JSON using the following form {\"OPTION1\":\"VALUE1\",\"OPTION2\":VALUE2\"...\"OPTIONN\":\"VALUEN\"}.

    The options you can configure vary according to which parser you use:

    • defaultOperator: The default operator used to combine individual terms in the search string. For example: defaultOperator: 'or'. For the dismax parser, you specify a percentage that represents the percentage of terms in the search string (rounded down) that must match, rather than a default operator. A value of 0% is the equivalent to OR, and a value of 100% is equivalent to AND. The percentage must be specified as a value in the range 0-100 followed by the percent (%) symbol. For example, defaultOperator: 50%. Valid values: and, or, a percentage in the range 0%-100% (dismax). Default: and (simple, structured, lucene) or 100 (dismax). Valid for: simple, structured, lucene, and dismax.
    • fields: An array of the fields to search when no fields are specified in a search. If no fields are specified in a search and this option is not specified, all text and text-array fields are searched. You can specify a weight for each field to control the relative importance of each field when Amazon CloudSearch calculates relevance scores. To specify a field weight, append a caret (^) symbol and the weight to the field name. For example, to boost the importance of the title field over the description field you could specify: \"fields\":[\"title^5\",\"description\"]. Valid values: The name of any configured field and an optional numeric value greater than zero. Default: All text and text-array fields. Valid for: simple, structured, lucene, and dismax.
    • operators: An array of the operators or special characters you want to disable for the simple query parser. If you disable the and, or, or not operators, the corresponding operators (+, |, -) have no special meaning and are dropped from the search string. Similarly, disabling prefix disables the wildcard operator (*) and disabling phrase disables the ability to search for phrases by enclosing phrases in double quotes. Disabling precedence disables the ability to control order of precedence using parentheses. Disabling near disables the ability to use the ~ operator to perform a sloppy phrase search. Disabling the fuzzy operator disables the ability to use the ~ operator to perform a fuzzy search. escape disables the ability to use a backslash (\\) to escape special characters within the search string. Disabling whitespace is an advanced option that prevents the parser from tokenizing on whitespace, which can be useful for Vietnamese. (It prevents Vietnamese words from being split incorrectly.) For example, you could disable all operators other than the phrase operator to support just simple term and phrase queries: \"operators\":[\"and\",\"not\",\"or\", \"prefix\"]. Valid values: and, escape, fuzzy, near, not, or, phrase, precedence, prefix, whitespace. Default: All operators and special characters are enabled. Valid for: simple.
    • phraseFields: An array of the text or text-array fields you want to use for phrase searches. When the terms in the search string appear in close proximity within a field, the field scores higher. You can specify a weight for each field to boost that score. The phraseSlop option controls how much the matches can deviate from the search string and still be boosted. To specify a field weight, append a caret (^) symbol and the weight to the field name. For example, to boost phrase matches in the title field over the abstract field, you could specify: \"phraseFields\":[\"title^3\", \"plot\"] Valid values: The name of any text or text-array field and an optional numeric value greater than zero. Default: No fields. If you don't specify any fields with phraseFields, proximity scoring is disabled even if phraseSlop is specified. Valid for: dismax.
    • phraseSlop: An integer value that specifies how much matches can deviate from the search phrase and still be boosted according to the weights specified in the phraseFields option; for example, phraseSlop: 2. You must also specify phraseFields to enable proximity scoring. Valid values: positive integers. Default: 0. Valid for: dismax.
    • explicitPhraseSlop: An integer value that specifies how much a match can deviate from the search phrase when the phrase is enclosed in double quotes in the search string. (Phrases that exceed this proximity distance are not considered a match.) For example, to specify a slop of three for dismax phrase queries, you would specify \"explicitPhraseSlop\":3. Valid values: positive integers. Default: 0. Valid for: dismax.
    • tieBreaker: When a term in the search string is found in a document's field, a score is calculated for that field based on how common the word is in that field compared to other documents. If the term occurs in multiple fields within a document, by default only the highest scoring field contributes to the document's overall score. You can specify a tieBreaker value to enable the matches in lower-scoring fields to contribute to the document's score. That way, if two documents have the same max field score for a particular term, the score for the document that has matches in more fields will be higher. The formula for calculating the score with a tieBreaker is (max field score) + (tieBreaker) * (sum of the scores for the rest of the matching fields). Set tieBreaker to 0 to disregard all but the highest scoring field (pure max): \"tieBreaker\":0. Set to 1 to sum the scores from all fields (pure sum): \"tieBreaker\":1. Valid values: 0.0 to 1.0. Default: 0.0. Valid for: dismax.
    " + } + }, + "QueryParser": { + "base": null, + "refs": { + "SearchRequest$queryParser": "

    Specifies which query parser to use to process the request. If queryParser is not specified, Amazon CloudSearch uses the simple query parser.

    Amazon CloudSearch supports four query parsers:

    • simple: perform simple searches of text and text-array fields. By default, the simple query parser searches all text and text-array fields. You can specify which fields to search by with the queryOptions parameter. If you prefix a search term with a plus sign (+) documents must contain the term to be considered a match. (This is the default, unless you configure the default operator with the queryOptions parameter.) You can use the - (NOT), | (OR), and * (wildcard) operators to exclude particular terms, find results that match any of the specified terms, or search for a prefix. To search for a phrase rather than individual terms, enclose the phrase in double quotes. For more information, see Searching for Text in the Amazon CloudSearch Developer Guide.
    • structured: perform advanced searches by combining multiple expressions to define the search criteria. You can also search within particular fields, search for values and ranges of values, and use advanced options such as term boosting, matchall, and near. For more information, see Constructing Compound Queries in the Amazon CloudSearch Developer Guide.
    • lucene: search using the Apache Lucene query parser syntax. For more information, see Apache Lucene Query Parser Syntax.
    • dismax: search using the simplified subset of the Apache Lucene query parser syntax defined by the DisMax query parser. For more information, see DisMax Query Parser Syntax.
    " + } + }, + "Return": { + "base": null, + "refs": { + "SearchRequest$return": "

    Specifies the field and expression values to include in the response. Multiple fields or expressions are specified as a comma-separated list. By default, a search response includes all return enabled fields (_all_fields). To return only the document IDs for the matching documents, specify _no_fields. To retrieve the relevance score calculated for each document, specify _score.

    " + } + }, + "SearchException": { + "base": "

    Information about any problems encountered while processing a search request.

    ", + "refs": { + } + }, + "SearchRequest": { + "base": "

    Container for the parameters to the Search request.

    ", + "refs": { + } + }, + "SearchResponse": { + "base": "

    The result of a Search request. Contains the documents that match the specified search criteria and any requested fields, highlights, and facet information.

    ", + "refs": { + } + }, + "SearchStatus": { + "base": "

    Contains the resource id (rid) and the time it took to process the request (timems).

    ", + "refs": { + "SearchResponse$status": "

    The status information returned for the search request.

    " + } + }, + "Size": { + "base": null, + "refs": { + "SearchRequest$size": "

    Specifies the maximum number of search hits to include in the response.

    " + } + }, + "Sort": { + "base": null, + "refs": { + "SearchRequest$sort": "

    Specifies the fields or custom expressions to use to sort the search results. Multiple fields or expressions are specified as a comma-separated list. You must specify the sort direction (asc or desc) for each field; for example, year desc,title asc. To use a field to sort results, the field must be sort-enabled in the domain configuration. Array type fields cannot be used for sorting. If no sort parameter is specified, results are sorted by their default relevance scores in descending order: _score desc. You can also sort by document ID (_id asc) and version (_version desc).

    For more information, see Sorting Results in the Amazon CloudSearch Developer Guide.

    " + } + }, + "Start": { + "base": null, + "refs": { + "SearchRequest$start": "

    Specifies the offset of the first search hit you want to return. Note that the result set is zero-based; the first result is at index 0. You can specify either the start or cursor parameter in a request, they are mutually exclusive.

    For more information, see Paginating Results in the Amazon CloudSearch Developer Guide.

    " + } + }, + "Stat": { + "base": null, + "refs": { + "SearchRequest$stats": "

    Specifies one or more fields for which to get statistics information. Each specified field must be facet-enabled in the domain configuration. The fields are specified in JSON using the form:

    {\"FIELD-A\":{},\"FIELD-B\":{}}

    There are currently no options supported for statistics.

    " + } + }, + "Stats": { + "base": "

    The statistics calculated in the request.

    ", + "refs": { + "SearchResponse$stats": "

    The requested field statistics information.

    " + } + }, + "String": { + "base": null, + "refs": { + "Bucket$value": "

    The facet value being counted.

    ", + "DocumentServiceException$status": "

    The return status of a document upload request, error or success.

    ", + "DocumentServiceException$message": "

    The description of the errors returned by the document service.

    ", + "DocumentServiceWarning$message": "

    The description for a warning returned by the document service.

    ", + "Exprs$key": null, + "Exprs$value": null, + "Facets$key": null, + "FieldStats$min": "

    The minimum value found in the specified field in the result set.

    If the field is numeric (int, int-array, double, or double-array), min is the string representation of a double-precision 64-bit floating point value. If the field is date or date-array, min is the string representation of a date with the format specified in IETF RFC3339: yyyy-mm-ddTHH:mm:ss.SSSZ.

    ", + "FieldStats$max": "

    The maximum value found in the specified field in the result set.

    If the field is numeric (int, int-array, double, or double-array), max is the string representation of a double-precision 64-bit floating point value. If the field is date or date-array, max is the string representation of a date with the format specified in IETF RFC3339: yyyy-mm-ddTHH:mm:ss.SSSZ.

    ", + "FieldStats$mean": "

    The average of the values found in the specified field in the result set.

    If the field is numeric (int, int-array, double, or double-array), mean is the string representation of a double-precision 64-bit floating point value. If the field is date or date-array, mean is the string representation of a date with the format specified in IETF RFC3339: yyyy-mm-ddTHH:mm:ss.SSSZ.

    ", + "FieldValue$member": null, + "Fields$key": null, + "Highlights$key": null, + "Highlights$value": null, + "Hit$id": "

    The document ID of a document that matches the search request.

    ", + "Hits$cursor": "

    A cursor that can be used to retrieve the next set of matching documents when you want to page through a large result set.

    ", + "SearchException$message": "

    A description of the error returned by the search service.

    ", + "SearchStatus$rid": "

    The encrypted resource ID for the request.

    ", + "Stats$key": null, + "SuggestModel$query": "

    The query string specified in the suggest request.

    ", + "SuggestStatus$rid": "

    The encrypted resource ID for the request.

    ", + "SuggestionMatch$suggestion": "

    The string that matches the query string specified in the SuggestRequest.

    ", + "SuggestionMatch$id": "

    The document ID of the suggested document.

    ", + "UploadDocumentsResponse$status": "

    The status of an UploadDocumentsRequest.

    " + } + }, + "SuggestModel": { + "base": "

    Container for the suggestion information returned in a SuggestResponse.

    ", + "refs": { + "SuggestResponse$suggest": "

    Container for the matching search suggestion information.

    " + } + }, + "SuggestRequest": { + "base": "

    Container for the parameters to the Suggest request.

    ", + "refs": { + } + }, + "SuggestResponse": { + "base": "

    Contains the response to a Suggest request.

    ", + "refs": { + } + }, + "SuggestStatus": { + "base": "

    Contains the resource id (rid) and the time it took to process the request (timems).

    ", + "refs": { + "SuggestResponse$status": "

    The status of a SuggestRequest. Contains the resource ID (rid) and how long it took to process the request (timems).

    " + } + }, + "Suggester": { + "base": null, + "refs": { + "SuggestRequest$suggester": "

    Specifies the name of the suggester to use to find suggested matches.

    " + } + }, + "SuggestionMatch": { + "base": "

    An autocomplete suggestion that matches the query string specified in a SuggestRequest.

    ", + "refs": { + "Suggestions$member": null + } + }, + "Suggestions": { + "base": null, + "refs": { + "SuggestModel$suggestions": "

    The documents that match the query string.

    " + } + }, + "SuggestionsSize": { + "base": null, + "refs": { + "SuggestRequest$size": "

    Specifies the maximum number of suggestions to return.

    " + } + }, + "UploadDocumentsRequest": { + "base": "

    Container for the parameters to the UploadDocuments request.

    ", + "refs": { + } + }, + "UploadDocumentsResponse": { + "base": "

    Contains the response to an UploadDocuments request.

    ", + "refs": { + } + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/cloudsearchdomain/2013-01-01/examples-1.json b/vendor/github.com/aws/aws-sdk-go/models/apis/cloudsearchdomain/2013-01-01/examples-1.json new file mode 100644 index 000000000..0ea7e3b0b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/cloudsearchdomain/2013-01-01/examples-1.json @@ -0,0 +1,5 @@ +{ + "version": "1.0", + "examples": { + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/cloudtrail/2013-11-01/api-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/cloudtrail/2013-11-01/api-2.json new file mode 100644 index 000000000..8dbc6db4a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/cloudtrail/2013-11-01/api-2.json @@ -0,0 +1,801 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2013-11-01", + "endpointPrefix":"cloudtrail", + "jsonVersion":"1.1", + "protocol":"json", + "serviceAbbreviation":"CloudTrail", + "serviceFullName":"AWS CloudTrail", + "signatureVersion":"v4", + "targetPrefix":"com.amazonaws.cloudtrail.v20131101.CloudTrail_20131101" + }, + "operations":{ + "AddTags":{ + "name":"AddTags", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AddTagsRequest"}, + "output":{"shape":"AddTagsResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"CloudTrailARNInvalidException"}, + {"shape":"ResourceTypeNotSupportedException"}, + {"shape":"TagsLimitExceededException"}, + {"shape":"InvalidTrailNameException"}, + {"shape":"InvalidTagParameterException"}, + {"shape":"UnsupportedOperationException"}, + {"shape":"OperationNotPermittedException"} + ], + "idempotent":true + }, + "CreateTrail":{ + "name":"CreateTrail", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateTrailRequest"}, + "output":{"shape":"CreateTrailResponse"}, + "errors":[ + {"shape":"MaximumNumberOfTrailsExceededException"}, + {"shape":"TrailAlreadyExistsException"}, + {"shape":"S3BucketDoesNotExistException"}, + {"shape":"InsufficientS3BucketPolicyException"}, + {"shape":"InsufficientSnsTopicPolicyException"}, + {"shape":"InsufficientEncryptionPolicyException"}, + {"shape":"InvalidS3BucketNameException"}, + {"shape":"InvalidS3PrefixException"}, + {"shape":"InvalidSnsTopicNameException"}, + {"shape":"InvalidKmsKeyIdException"}, + {"shape":"InvalidTrailNameException"}, + {"shape":"TrailNotProvidedException"}, + {"shape":"InvalidParameterCombinationException"}, + {"shape":"KmsKeyNotFoundException"}, + {"shape":"KmsKeyDisabledException"}, + {"shape":"KmsException"}, + {"shape":"InvalidCloudWatchLogsLogGroupArnException"}, + {"shape":"InvalidCloudWatchLogsRoleArnException"}, + {"shape":"CloudWatchLogsDeliveryUnavailableException"}, + {"shape":"UnsupportedOperationException"}, + {"shape":"OperationNotPermittedException"} + ], + "idempotent":true + }, + "DeleteTrail":{ + "name":"DeleteTrail", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteTrailRequest"}, + "output":{"shape":"DeleteTrailResponse"}, + "errors":[ + {"shape":"TrailNotFoundException"}, + {"shape":"InvalidTrailNameException"}, + {"shape":"InvalidHomeRegionException"} + ], + "idempotent":true + }, + "DescribeTrails":{ + "name":"DescribeTrails", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeTrailsRequest"}, + "output":{"shape":"DescribeTrailsResponse"}, + "errors":[ + {"shape":"UnsupportedOperationException"}, + {"shape":"OperationNotPermittedException"} + ], + "idempotent":true + }, + "GetTrailStatus":{ + "name":"GetTrailStatus", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetTrailStatusRequest"}, + "output":{"shape":"GetTrailStatusResponse"}, + "errors":[ + {"shape":"TrailNotFoundException"}, + {"shape":"InvalidTrailNameException"} + ], + "idempotent":true + }, + "ListPublicKeys":{ + "name":"ListPublicKeys", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListPublicKeysRequest"}, + "output":{"shape":"ListPublicKeysResponse"}, + "errors":[ + {"shape":"InvalidTimeRangeException"}, + {"shape":"UnsupportedOperationException"}, + {"shape":"OperationNotPermittedException"}, + {"shape":"InvalidTokenException"} + ], + "idempotent":true + }, + "ListTags":{ + "name":"ListTags", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListTagsRequest"}, + "output":{"shape":"ListTagsResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"CloudTrailARNInvalidException"}, + {"shape":"ResourceTypeNotSupportedException"}, + {"shape":"InvalidTrailNameException"}, + {"shape":"UnsupportedOperationException"}, + {"shape":"OperationNotPermittedException"}, + {"shape":"InvalidTokenException"} + ], + "idempotent":true + }, + "LookupEvents":{ + "name":"LookupEvents", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"LookupEventsRequest"}, + "output":{"shape":"LookupEventsResponse"}, + "errors":[ + {"shape":"InvalidLookupAttributesException"}, + {"shape":"InvalidTimeRangeException"}, + {"shape":"InvalidMaxResultsException"}, + {"shape":"InvalidNextTokenException"} + ], + "idempotent":true + }, + "RemoveTags":{ + "name":"RemoveTags", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RemoveTagsRequest"}, + "output":{"shape":"RemoveTagsResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"CloudTrailARNInvalidException"}, + {"shape":"ResourceTypeNotSupportedException"}, + {"shape":"InvalidTrailNameException"}, + {"shape":"InvalidTagParameterException"}, + {"shape":"UnsupportedOperationException"}, + {"shape":"OperationNotPermittedException"} + ], + "idempotent":true + }, + "StartLogging":{ + "name":"StartLogging", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StartLoggingRequest"}, + "output":{"shape":"StartLoggingResponse"}, + "errors":[ + {"shape":"TrailNotFoundException"}, + {"shape":"InvalidTrailNameException"}, + {"shape":"InvalidHomeRegionException"} + ], + "idempotent":true + }, + "StopLogging":{ + "name":"StopLogging", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StopLoggingRequest"}, + "output":{"shape":"StopLoggingResponse"}, + "errors":[ + {"shape":"TrailNotFoundException"}, + {"shape":"InvalidTrailNameException"}, + {"shape":"InvalidHomeRegionException"} + ], + "idempotent":true + }, + "UpdateTrail":{ + "name":"UpdateTrail", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateTrailRequest"}, + "output":{"shape":"UpdateTrailResponse"}, + "errors":[ + {"shape":"S3BucketDoesNotExistException"}, + {"shape":"InsufficientS3BucketPolicyException"}, + {"shape":"InsufficientSnsTopicPolicyException"}, + {"shape":"InsufficientEncryptionPolicyException"}, + {"shape":"TrailNotFoundException"}, + {"shape":"InvalidS3BucketNameException"}, + {"shape":"InvalidS3PrefixException"}, + {"shape":"InvalidSnsTopicNameException"}, + {"shape":"InvalidKmsKeyIdException"}, + {"shape":"InvalidTrailNameException"}, + {"shape":"TrailNotProvidedException"}, + {"shape":"InvalidParameterCombinationException"}, + {"shape":"InvalidHomeRegionException"}, + {"shape":"KmsKeyNotFoundException"}, + {"shape":"KmsKeyDisabledException"}, + {"shape":"KmsException"}, + {"shape":"InvalidCloudWatchLogsLogGroupArnException"}, + {"shape":"InvalidCloudWatchLogsRoleArnException"}, + {"shape":"CloudWatchLogsDeliveryUnavailableException"}, + {"shape":"UnsupportedOperationException"}, + {"shape":"OperationNotPermittedException"} + ], + "idempotent":true + } + }, + "shapes":{ + "AddTagsRequest":{ + "type":"structure", + "required":["ResourceId"], + "members":{ + "ResourceId":{"shape":"String"}, + "TagsList":{"shape":"TagsList"} + } + }, + "AddTagsResponse":{ + "type":"structure", + "members":{ + } + }, + "Boolean":{"type":"boolean"}, + "ByteBuffer":{"type":"blob"}, + "CloudTrailARNInvalidException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "CloudWatchLogsDeliveryUnavailableException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "CreateTrailRequest":{ + "type":"structure", + "required":[ + "Name", + "S3BucketName" + ], + "members":{ + "Name":{"shape":"String"}, + "S3BucketName":{"shape":"String"}, + "S3KeyPrefix":{"shape":"String"}, + "SnsTopicName":{"shape":"String"}, + "IncludeGlobalServiceEvents":{"shape":"Boolean"}, + "IsMultiRegionTrail":{"shape":"Boolean"}, + "EnableLogFileValidation":{"shape":"Boolean"}, + "CloudWatchLogsLogGroupArn":{"shape":"String"}, + "CloudWatchLogsRoleArn":{"shape":"String"}, + "KmsKeyId":{"shape":"String"} + } + }, + "CreateTrailResponse":{ + "type":"structure", + "members":{ + "Name":{"shape":"String"}, + "S3BucketName":{"shape":"String"}, + "S3KeyPrefix":{"shape":"String"}, + "SnsTopicName":{ + "shape":"String", + "deprecated":true + }, + "SnsTopicARN":{"shape":"String"}, + "IncludeGlobalServiceEvents":{"shape":"Boolean"}, + "IsMultiRegionTrail":{"shape":"Boolean"}, + "TrailARN":{"shape":"String"}, + "LogFileValidationEnabled":{"shape":"Boolean"}, + "CloudWatchLogsLogGroupArn":{"shape":"String"}, + "CloudWatchLogsRoleArn":{"shape":"String"}, + "KmsKeyId":{"shape":"String"} + } + }, + "Date":{"type":"timestamp"}, + "DeleteTrailRequest":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{"shape":"String"} + } + }, + "DeleteTrailResponse":{ + "type":"structure", + "members":{ + } + }, + "DescribeTrailsRequest":{ + "type":"structure", + "members":{ + "trailNameList":{"shape":"TrailNameList"}, + "includeShadowTrails":{"shape":"Boolean"} + } + }, + "DescribeTrailsResponse":{ + "type":"structure", + "members":{ + "trailList":{"shape":"TrailList"} + } + }, + "Event":{ + "type":"structure", + "members":{ + "EventId":{"shape":"String"}, + "EventName":{"shape":"String"}, + "EventTime":{"shape":"Date"}, + "Username":{"shape":"String"}, + "Resources":{"shape":"ResourceList"}, + "CloudTrailEvent":{"shape":"String"} + } + }, + "EventsList":{ + "type":"list", + "member":{"shape":"Event"} + }, + "GetTrailStatusRequest":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{"shape":"String"} + } + }, + "GetTrailStatusResponse":{ + "type":"structure", + "members":{ + "IsLogging":{"shape":"Boolean"}, + "LatestDeliveryError":{"shape":"String"}, + "LatestNotificationError":{"shape":"String"}, + "LatestDeliveryTime":{"shape":"Date"}, + "LatestNotificationTime":{"shape":"Date"}, + "StartLoggingTime":{"shape":"Date"}, + "StopLoggingTime":{"shape":"Date"}, + "LatestCloudWatchLogsDeliveryError":{"shape":"String"}, + "LatestCloudWatchLogsDeliveryTime":{"shape":"Date"}, + "LatestDigestDeliveryTime":{"shape":"Date"}, + "LatestDigestDeliveryError":{"shape":"String"}, + "LatestDeliveryAttemptTime":{"shape":"String"}, + "LatestNotificationAttemptTime":{"shape":"String"}, + "LatestNotificationAttemptSucceeded":{"shape":"String"}, + "LatestDeliveryAttemptSucceeded":{"shape":"String"}, + "TimeLoggingStarted":{"shape":"String"}, + "TimeLoggingStopped":{"shape":"String"} + } + }, + "InsufficientEncryptionPolicyException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InsufficientS3BucketPolicyException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InsufficientSnsTopicPolicyException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InvalidCloudWatchLogsLogGroupArnException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InvalidCloudWatchLogsRoleArnException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InvalidHomeRegionException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InvalidKmsKeyIdException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InvalidLookupAttributesException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InvalidMaxResultsException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InvalidNextTokenException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InvalidParameterCombinationException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InvalidS3BucketNameException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InvalidS3PrefixException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InvalidSnsTopicNameException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InvalidTagParameterException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InvalidTimeRangeException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InvalidTokenException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InvalidTrailNameException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "KmsException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "KmsKeyDisabledException":{ + "type":"structure", + "members":{ + }, + "deprecated":true, + "exception":true + }, + "KmsKeyNotFoundException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "ListPublicKeysRequest":{ + "type":"structure", + "members":{ + "StartTime":{"shape":"Date"}, + "EndTime":{"shape":"Date"}, + "NextToken":{"shape":"String"} + } + }, + "ListPublicKeysResponse":{ + "type":"structure", + "members":{ + "PublicKeyList":{"shape":"PublicKeyList"}, + "NextToken":{"shape":"String"} + } + }, + "ListTagsRequest":{ + "type":"structure", + "required":["ResourceIdList"], + "members":{ + "ResourceIdList":{"shape":"ResourceIdList"}, + "NextToken":{"shape":"String"} + } + }, + "ListTagsResponse":{ + "type":"structure", + "members":{ + "ResourceTagList":{"shape":"ResourceTagList"}, + "NextToken":{"shape":"String"} + } + }, + "LookupAttribute":{ + "type":"structure", + "required":[ + "AttributeKey", + "AttributeValue" + ], + "members":{ + "AttributeKey":{"shape":"LookupAttributeKey"}, + "AttributeValue":{"shape":"String"} + } + }, + "LookupAttributeKey":{ + "type":"string", + "enum":[ + "EventId", + "EventName", + "Username", + "ResourceType", + "ResourceName" + ] + }, + "LookupAttributesList":{ + "type":"list", + "member":{"shape":"LookupAttribute"} + }, + "LookupEventsRequest":{ + "type":"structure", + "members":{ + "LookupAttributes":{"shape":"LookupAttributesList"}, + "StartTime":{"shape":"Date"}, + "EndTime":{"shape":"Date"}, + "MaxResults":{"shape":"MaxResults"}, + "NextToken":{"shape":"NextToken"} + } + }, + "LookupEventsResponse":{ + "type":"structure", + "members":{ + "Events":{"shape":"EventsList"}, + "NextToken":{"shape":"NextToken"} + } + }, + "MaxResults":{ + "type":"integer", + "max":50, + "min":1 + }, + "MaximumNumberOfTrailsExceededException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "NextToken":{"type":"string"}, + "OperationNotPermittedException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "PublicKey":{ + "type":"structure", + "members":{ + "Value":{"shape":"ByteBuffer"}, + "ValidityStartTime":{"shape":"Date"}, + "ValidityEndTime":{"shape":"Date"}, + "Fingerprint":{"shape":"String"} + } + }, + "PublicKeyList":{ + "type":"list", + "member":{"shape":"PublicKey"} + }, + "RemoveTagsRequest":{ + "type":"structure", + "required":["ResourceId"], + "members":{ + "ResourceId":{"shape":"String"}, + "TagsList":{"shape":"TagsList"} + } + }, + "RemoveTagsResponse":{ + "type":"structure", + "members":{ + } + }, + "Resource":{ + "type":"structure", + "members":{ + "ResourceType":{"shape":"String"}, + "ResourceName":{"shape":"String"} + } + }, + "ResourceIdList":{ + "type":"list", + "member":{"shape":"String"} + }, + "ResourceList":{ + "type":"list", + "member":{"shape":"Resource"} + }, + "ResourceNotFoundException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "ResourceTag":{ + "type":"structure", + "members":{ + "ResourceId":{"shape":"String"}, + "TagsList":{"shape":"TagsList"} + } + }, + "ResourceTagList":{ + "type":"list", + "member":{"shape":"ResourceTag"} + }, + "ResourceTypeNotSupportedException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "S3BucketDoesNotExistException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "StartLoggingRequest":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{"shape":"String"} + } + }, + "StartLoggingResponse":{ + "type":"structure", + "members":{ + } + }, + "StopLoggingRequest":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{"shape":"String"} + } + }, + "StopLoggingResponse":{ + "type":"structure", + "members":{ + } + }, + "String":{"type":"string"}, + "Tag":{ + "type":"structure", + "required":["Key"], + "members":{ + "Key":{"shape":"String"}, + "Value":{"shape":"String"} + } + }, + "TagsLimitExceededException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "TagsList":{ + "type":"list", + "member":{"shape":"Tag"} + }, + "Trail":{ + "type":"structure", + "members":{ + "Name":{"shape":"String"}, + "S3BucketName":{"shape":"String"}, + "S3KeyPrefix":{"shape":"String"}, + "SnsTopicName":{ + "shape":"String", + "deprecated":true + }, + "SnsTopicARN":{"shape":"String"}, + "IncludeGlobalServiceEvents":{"shape":"Boolean"}, + "IsMultiRegionTrail":{"shape":"Boolean"}, + "HomeRegion":{"shape":"String"}, + "TrailARN":{"shape":"String"}, + "LogFileValidationEnabled":{"shape":"Boolean"}, + "CloudWatchLogsLogGroupArn":{"shape":"String"}, + "CloudWatchLogsRoleArn":{"shape":"String"}, + "KmsKeyId":{"shape":"String"} + } + }, + "TrailAlreadyExistsException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "TrailList":{ + "type":"list", + "member":{"shape":"Trail"} + }, + "TrailNameList":{ + "type":"list", + "member":{"shape":"String"} + }, + "TrailNotFoundException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "TrailNotProvidedException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "UnsupportedOperationException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "UpdateTrailRequest":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{"shape":"String"}, + "S3BucketName":{"shape":"String"}, + "S3KeyPrefix":{"shape":"String"}, + "SnsTopicName":{"shape":"String"}, + "IncludeGlobalServiceEvents":{"shape":"Boolean"}, + "IsMultiRegionTrail":{"shape":"Boolean"}, + "EnableLogFileValidation":{"shape":"Boolean"}, + "CloudWatchLogsLogGroupArn":{"shape":"String"}, + "CloudWatchLogsRoleArn":{"shape":"String"}, + "KmsKeyId":{"shape":"String"} + } + }, + "UpdateTrailResponse":{ + "type":"structure", + "members":{ + "Name":{"shape":"String"}, + "S3BucketName":{"shape":"String"}, + "S3KeyPrefix":{"shape":"String"}, + "SnsTopicName":{ + "shape":"String", + "deprecated":true + }, + "SnsTopicARN":{"shape":"String"}, + "IncludeGlobalServiceEvents":{"shape":"Boolean"}, + "IsMultiRegionTrail":{"shape":"Boolean"}, + "TrailARN":{"shape":"String"}, + "LogFileValidationEnabled":{"shape":"Boolean"}, + "CloudWatchLogsLogGroupArn":{"shape":"String"}, + "CloudWatchLogsRoleArn":{"shape":"String"}, + "KmsKeyId":{"shape":"String"} + } + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/cloudtrail/2013-11-01/docs-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/cloudtrail/2013-11-01/docs-2.json new file mode 100644 index 000000000..a947f46b8 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/cloudtrail/2013-11-01/docs-2.json @@ -0,0 +1,548 @@ +{ + "version": "2.0", + "service": "AWS CloudTrail

    This is the CloudTrail API Reference. It provides descriptions of actions, data types, common parameters, and common errors for CloudTrail.

    CloudTrail is a web service that records AWS API calls for your AWS account and delivers log files to an Amazon S3 bucket. The recorded information includes the identity of the user, the start time of the AWS API call, the source IP address, the request parameters, and the response elements returned by the service.

    As an alternative to the API, you can use one of the AWS SDKs, which consist of libraries and sample code for various programming languages and platforms (Java, Ruby, .NET, iOS, Android, etc.). The SDKs provide a convenient way to create programmatic access to AWSCloudTrail. For example, the SDKs take care of cryptographically signing requests, managing errors, and retrying requests automatically. For information about the AWS SDKs, including how to download and install them, see the Tools for Amazon Web Services page.

    See the CloudTrail User Guide for information about the data that is included with each AWS API call listed in the log files.

    ", + "operations": { + "AddTags": "

    Adds one or more tags to a trail, up to a limit of 10. Tags must be unique per trail. Overwrites an existing tag's value when a new value is specified for an existing tag key. If you specify a key without a value, the tag will be created with the specified key and a value of null. You can tag a trail that applies to all regions only from the region in which the trail was created (that is, from its home region).

    ", + "CreateTrail": "

    Creates a trail that specifies the settings for delivery of log data to an Amazon S3 bucket. A maximum of five trails can exist in a region, irrespective of the region in which they were created.

    ", + "DeleteTrail": "

    Deletes a trail. This operation must be called from the region in which the trail was created. DeleteTrail cannot be called on the shadow trails (replicated trails in other regions) of a trail that is enabled in all regions.

    ", + "DescribeTrails": "

    Retrieves settings for the trail associated with the current region for your account.

    ", + "GetTrailStatus": "

    Returns a JSON-formatted list of information about the specified trail. Fields include information on delivery errors, Amazon SNS and Amazon S3 errors, and start and stop logging times for each trail. This operation returns trail status from a single region. To return trail status from all regions, you must call the operation on each region.

    ", + "ListPublicKeys": "

    Returns all public keys whose private keys were used to sign the digest files within the specified time range. The public key is needed to validate digest files that were signed with its corresponding private key.

    CloudTrail uses different private/public key pairs per region. Each digest file is signed with a private key unique to its region. Therefore, when you validate a digest file from a particular region, you must look in the same region for its corresponding public key.

    ", + "ListTags": "

    Lists the tags for the trail in the current region.

    ", + "LookupEvents": "

    Looks up API activity events captured by CloudTrail that create, update, or delete resources in your account. Events for a region can be looked up for the times in which you had CloudTrail turned on in that region during the last seven days. Lookup supports five different attributes: time range (defined by a start time and end time), user name, event name, resource type, and resource name. All attributes are optional. The maximum number of attributes that can be specified in any one lookup request are time range and one other attribute. The default number of results returned is 10, with a maximum of 50 possible. The response includes a token that you can use to get the next page of results.

    The rate of lookup requests is limited to one per second per account. If this limit is exceeded, a throttling error occurs.

    Events that occurred during the selected time range will not be available for lookup if CloudTrail logging was not enabled when the events occurred.

    ", + "RemoveTags": "

    Removes the specified tags from a trail.

    ", + "StartLogging": "

    Starts the recording of AWS API calls and log file delivery for a trail. For a trail that is enabled in all regions, this operation must be called from the region in which the trail was created. This operation cannot be called on the shadow trails (replicated trails in other regions) of a trail that is enabled in all regions.

    ", + "StopLogging": "

    Suspends the recording of AWS API calls and log file delivery for the specified trail. Under most circumstances, there is no need to use this action. You can update a trail without stopping it first. This action is the only way to stop recording. For a trail enabled in all regions, this operation must be called from the region in which the trail was created, or an InvalidHomeRegionException will occur. This operation cannot be called on the shadow trails (replicated trails in other regions) of a trail enabled in all regions.

    ", + "UpdateTrail": "

    Updates the settings that specify delivery of log files. Changes to a trail do not require stopping the CloudTrail service. Use this action to designate an existing bucket for log delivery. If the existing bucket has previously been a target for CloudTrail log files, an IAM policy exists for the bucket. UpdateTrail must be called from the region in which the trail was created; otherwise, an InvalidHomeRegionException is thrown.

    " + }, + "shapes": { + "AddTagsRequest": { + "base": "

    Specifies the tags to add to a trail.

    ", + "refs": { + } + }, + "AddTagsResponse": { + "base": "

    Returns the objects or data listed below if successful. Otherwise, returns an error.

    ", + "refs": { + } + }, + "Boolean": { + "base": null, + "refs": { + "CreateTrailRequest$IncludeGlobalServiceEvents": "

    Specifies whether the trail is publishing events from global services such as IAM to the log files.

    ", + "CreateTrailRequest$IsMultiRegionTrail": "

    Specifies whether the trail is created in the current region or in all regions. The default is false.

    ", + "CreateTrailRequest$EnableLogFileValidation": "

    Specifies whether log file integrity validation is enabled. The default is false.

    When you disable log file integrity validation, the chain of digest files is broken after one hour. CloudTrail will not create digest files for log files that were delivered during a period in which log file integrity validation was disabled. For example, if you enable log file integrity validation at noon on January 1, disable it at noon on January 2, and re-enable it at noon on January 10, digest files will not be created for the log files delivered from noon on January 2 to noon on January 10. The same applies whenever you stop CloudTrail logging or delete a trail.

    ", + "CreateTrailResponse$IncludeGlobalServiceEvents": "

    Specifies whether the trail is publishing events from global services such as IAM to the log files.

    ", + "CreateTrailResponse$IsMultiRegionTrail": "

    Specifies whether the trail exists in one region or in all regions.

    ", + "CreateTrailResponse$LogFileValidationEnabled": "

    Specifies whether log file integrity validation is enabled.

    ", + "DescribeTrailsRequest$includeShadowTrails": "

    Specifies whether to include shadow trails in the response. A shadow trail is the replication in a region of a trail that was created in a different region. The default is true.

    ", + "GetTrailStatusResponse$IsLogging": "

    Whether the CloudTrail is currently logging AWS API calls.

    ", + "Trail$IncludeGlobalServiceEvents": "

    Set to True to include AWS API calls from AWS global services such as IAM. Otherwise, False.

    ", + "Trail$IsMultiRegionTrail": "

    Specifies whether the trail belongs only to one region or exists in all regions.

    ", + "Trail$LogFileValidationEnabled": "

    Specifies whether log file validation is enabled.

    ", + "UpdateTrailRequest$IncludeGlobalServiceEvents": "

    Specifies whether the trail is publishing events from global services such as IAM to the log files.

    ", + "UpdateTrailRequest$IsMultiRegionTrail": "

    Specifies whether the trail applies only to the current region or to all regions. The default is false. If the trail exists only in the current region and this value is set to true, shadow trails (replications of the trail) will be created in the other regions. If the trail exists in all regions and this value is set to false, the trail will remain in the region where it was created, and its shadow trails in other regions will be deleted.

    ", + "UpdateTrailRequest$EnableLogFileValidation": "

    Specifies whether log file validation is enabled. The default is false.

    When you disable log file integrity validation, the chain of digest files is broken after one hour. CloudTrail will not create digest files for log files that were delivered during a period in which log file integrity validation was disabled. For example, if you enable log file integrity validation at noon on January 1, disable it at noon on January 2, and re-enable it at noon on January 10, digest files will not be created for the log files delivered from noon on January 2 to noon on January 10. The same applies whenever you stop CloudTrail logging or delete a trail.

    ", + "UpdateTrailResponse$IncludeGlobalServiceEvents": "

    Specifies whether the trail is publishing events from global services such as IAM to the log files.

    ", + "UpdateTrailResponse$IsMultiRegionTrail": "

    Specifies whether the trail exists in one region or in all regions.

    ", + "UpdateTrailResponse$LogFileValidationEnabled": "

    Specifies whether log file integrity validation is enabled.

    " + } + }, + "ByteBuffer": { + "base": null, + "refs": { + "PublicKey$Value": "

    The DER encoded public key value in PKCS#1 format.

    " + } + }, + "CloudTrailARNInvalidException": { + "base": "

    This exception is thrown when an operation is called with an invalid trail ARN. The format of a trail ARN is:

    arn:aws:cloudtrail:us-east-1:123456789012:trail/MyTrail

    ", + "refs": { + } + }, + "CloudWatchLogsDeliveryUnavailableException": { + "base": "

    Cannot set a CloudWatch Logs delivery for this region.

    ", + "refs": { + } + }, + "CreateTrailRequest": { + "base": "

    Specifies the settings for each trail.

    ", + "refs": { + } + }, + "CreateTrailResponse": { + "base": "

    Returns the objects or data listed below if successful. Otherwise, returns an error.

    ", + "refs": { + } + }, + "Date": { + "base": null, + "refs": { + "Event$EventTime": "

    The date and time of the event returned.

    ", + "GetTrailStatusResponse$LatestDeliveryTime": "

    Specifies the date and time that CloudTrail last delivered log files to an account's Amazon S3 bucket.

    ", + "GetTrailStatusResponse$LatestNotificationTime": "

    Specifies the date and time of the most recent Amazon SNS notification that CloudTrail has written a new log file to an account's Amazon S3 bucket.

    ", + "GetTrailStatusResponse$StartLoggingTime": "

    Specifies the most recent date and time when CloudTrail started recording API calls for an AWS account.

    ", + "GetTrailStatusResponse$StopLoggingTime": "

    Specifies the most recent date and time when CloudTrail stopped recording API calls for an AWS account.

    ", + "GetTrailStatusResponse$LatestCloudWatchLogsDeliveryTime": "

    Displays the most recent date and time when CloudTrail delivered logs to CloudWatch Logs.

    ", + "GetTrailStatusResponse$LatestDigestDeliveryTime": "

    Specifies the date and time that CloudTrail last delivered a digest file to an account's Amazon S3 bucket.

    ", + "ListPublicKeysRequest$StartTime": "

    Optionally specifies, in UTC, the start of the time range to look up public keys for CloudTrail digest files. If not specified, the current time is used, and the current public key is returned.

    ", + "ListPublicKeysRequest$EndTime": "

    Optionally specifies, in UTC, the end of the time range to look up public keys for CloudTrail digest files. If not specified, the current time is used.

    ", + "LookupEventsRequest$StartTime": "

    Specifies that only events that occur after or at the specified time are returned. If the specified start time is after the specified end time, an error is returned.

    ", + "LookupEventsRequest$EndTime": "

    Specifies that only events that occur before or at the specified time are returned. If the specified end time is before the specified start time, an error is returned.

    ", + "PublicKey$ValidityStartTime": "

    The starting time of validity of the public key.

    ", + "PublicKey$ValidityEndTime": "

    The ending time of validity of the public key.

    " + } + }, + "DeleteTrailRequest": { + "base": "

    The request that specifies the name of a trail to delete.

    ", + "refs": { + } + }, + "DeleteTrailResponse": { + "base": "

    Returns the objects or data listed below if successful. Otherwise, returns an error.

    ", + "refs": { + } + }, + "DescribeTrailsRequest": { + "base": "

    Returns information about the trail.

    ", + "refs": { + } + }, + "DescribeTrailsResponse": { + "base": "

    Returns the objects or data listed below if successful. Otherwise, returns an error.

    ", + "refs": { + } + }, + "Event": { + "base": "

    Contains information about an event that was returned by a lookup request. The result includes a representation of a CloudTrail event.

    ", + "refs": { + "EventsList$member": null + } + }, + "EventsList": { + "base": null, + "refs": { + "LookupEventsResponse$Events": "

    A list of events returned based on the lookup attributes specified and the CloudTrail event. The events list is sorted by time. The most recent event is listed first.

    " + } + }, + "GetTrailStatusRequest": { + "base": "

    The name of a trail about which you want the current status.

    ", + "refs": { + } + }, + "GetTrailStatusResponse": { + "base": "

    Returns the objects or data listed below if successful. Otherwise, returns an error.

    ", + "refs": { + } + }, + "InsufficientEncryptionPolicyException": { + "base": "

    This exception is thrown when the policy on the S3 bucket or KMS key is not sufficient.

    ", + "refs": { + } + }, + "InsufficientS3BucketPolicyException": { + "base": "

    This exception is thrown when the policy on the S3 bucket is not sufficient.

    ", + "refs": { + } + }, + "InsufficientSnsTopicPolicyException": { + "base": "

    This exception is thrown when the policy on the SNS topic is not sufficient.

    ", + "refs": { + } + }, + "InvalidCloudWatchLogsLogGroupArnException": { + "base": "

    This exception is thrown when the provided CloudWatch log group is not valid.

    ", + "refs": { + } + }, + "InvalidCloudWatchLogsRoleArnException": { + "base": "

    This exception is thrown when the provided role is not valid.

    ", + "refs": { + } + }, + "InvalidHomeRegionException": { + "base": "

    This exception is thrown when an operation is called on a trail from a region other than the region in which the trail was created.

    ", + "refs": { + } + }, + "InvalidKmsKeyIdException": { + "base": "

    This exception is thrown when the KMS key ARN is invalid.

    ", + "refs": { + } + }, + "InvalidLookupAttributesException": { + "base": "

    Occurs when an invalid lookup attribute is specified.

    ", + "refs": { + } + }, + "InvalidMaxResultsException": { + "base": "

    This exception is thrown if the limit specified is invalid.

    ", + "refs": { + } + }, + "InvalidNextTokenException": { + "base": "

    Invalid token or token that was previously used in a request with different parameters. This exception is thrown if the token is invalid.

    ", + "refs": { + } + }, + "InvalidParameterCombinationException": { + "base": "

    This exception is thrown when the combination of parameters provided is not valid.

    ", + "refs": { + } + }, + "InvalidS3BucketNameException": { + "base": "

    This exception is thrown when the provided S3 bucket name is not valid.

    ", + "refs": { + } + }, + "InvalidS3PrefixException": { + "base": "

    This exception is thrown when the provided S3 prefix is not valid.

    ", + "refs": { + } + }, + "InvalidSnsTopicNameException": { + "base": "

    This exception is thrown when the provided SNS topic name is not valid.

    ", + "refs": { + } + }, + "InvalidTagParameterException": { + "base": "

    This exception is thrown when the key or value specified for the tag does not match the regular expression ^([\\\\p{L}\\\\p{Z}\\\\p{N}_.:/=+\\\\-@]*)$.

    ", + "refs": { + } + }, + "InvalidTimeRangeException": { + "base": "

    Occurs if the timestamp values are invalid. Either the start time occurs after the end time or the time range is outside the range of possible values.

    ", + "refs": { + } + }, + "InvalidTokenException": { + "base": "

    Reserved for future use.

    ", + "refs": { + } + }, + "InvalidTrailNameException": { + "base": "

    This exception is thrown when the provided trail name is not valid. Trail names must meet the following requirements:

    • Contain only ASCII letters (a-z, A-Z), numbers (0-9), periods (.), underscores (_), or dashes (-)

    • Start with a letter or number, and end with a letter or number

    • Be between 3 and 128 characters

    • Have no adjacent periods, underscores or dashes. Names like my-_namespace and my--namespace are invalid.

    • Not be in IP address format (for example, 192.168.5.4)

    ", + "refs": { + } + }, + "KmsException": { + "base": "

    This exception is thrown when there is an issue with the specified KMS key and the trail can’t be updated.

    ", + "refs": { + } + }, + "KmsKeyDisabledException": { + "base": "

    This exception is deprecated.

    ", + "refs": { + } + }, + "KmsKeyNotFoundException": { + "base": "

    This exception is thrown when the KMS key does not exist, or when the S3 bucket and the KMS key are not in the same region.

    ", + "refs": { + } + }, + "ListPublicKeysRequest": { + "base": "

    Requests the public keys for a specified time range.

    ", + "refs": { + } + }, + "ListPublicKeysResponse": { + "base": "

    Returns the objects or data listed below if successful. Otherwise, returns an error.

    ", + "refs": { + } + }, + "ListTagsRequest": { + "base": "

    Specifies a list of trail tags to return.

    ", + "refs": { + } + }, + "ListTagsResponse": { + "base": "

    Returns the objects or data listed below if successful. Otherwise, returns an error.

    ", + "refs": { + } + }, + "LookupAttribute": { + "base": "

    Specifies an attribute and value that filter the events returned.

    ", + "refs": { + "LookupAttributesList$member": null + } + }, + "LookupAttributeKey": { + "base": null, + "refs": { + "LookupAttribute$AttributeKey": "

    Specifies an attribute on which to filter the events returned.

    " + } + }, + "LookupAttributesList": { + "base": null, + "refs": { + "LookupEventsRequest$LookupAttributes": "

    Contains a list of lookup attributes. Currently the list can contain only one item.

    " + } + }, + "LookupEventsRequest": { + "base": "

    Contains a request for LookupEvents.

    ", + "refs": { + } + }, + "LookupEventsResponse": { + "base": "

    Contains a response to a LookupEvents action.

    ", + "refs": { + } + }, + "MaxResults": { + "base": null, + "refs": { + "LookupEventsRequest$MaxResults": "

    The number of events to return. Possible values are 1 through 50. The default is 10.

    " + } + }, + "MaximumNumberOfTrailsExceededException": { + "base": "

    This exception is thrown when the maximum number of trails is reached.

    ", + "refs": { + } + }, + "NextToken": { + "base": null, + "refs": { + "LookupEventsRequest$NextToken": "

    The token to use to get the next page of results after a previous API call. This token must be passed in with the same parameters that were specified in the the original call. For example, if the original call specified an AttributeKey of 'Username' with a value of 'root', the call with NextToken should include those same parameters.

    ", + "LookupEventsResponse$NextToken": "

    The token to use to get the next page of results after a previous API call. If the token does not appear, there are no more results to return. The token must be passed in with the same parameters as the previous call. For example, if the original call specified an AttributeKey of 'Username' with a value of 'root', the call with NextToken should include those same parameters.

    " + } + }, + "OperationNotPermittedException": { + "base": "

    This exception is thrown when the requested operation is not permitted.

    ", + "refs": { + } + }, + "PublicKey": { + "base": "

    Contains information about a returned public key.

    ", + "refs": { + "PublicKeyList$member": null + } + }, + "PublicKeyList": { + "base": null, + "refs": { + "ListPublicKeysResponse$PublicKeyList": "

    Contains an array of PublicKey objects.

    The returned public keys may have validity time ranges that overlap.

    " + } + }, + "RemoveTagsRequest": { + "base": "

    Specifies the tags to remove from a trail.

    ", + "refs": { + } + }, + "RemoveTagsResponse": { + "base": "

    Returns the objects or data listed below if successful. Otherwise, returns an error.

    ", + "refs": { + } + }, + "Resource": { + "base": "

    Specifies the type and name of a resource referenced by an event.

    ", + "refs": { + "ResourceList$member": null + } + }, + "ResourceIdList": { + "base": null, + "refs": { + "ListTagsRequest$ResourceIdList": "

    Specifies a list of trail ARNs whose tags will be listed. The list has a limit of 20 ARNs. The format of a trail ARN is:

    arn:aws:cloudtrail:us-east-1:123456789012:trail/MyTrail

    " + } + }, + "ResourceList": { + "base": "

    A list of resources referenced by the event returned.

    ", + "refs": { + "Event$Resources": "

    A list of resources referenced by the event returned.

    " + } + }, + "ResourceNotFoundException": { + "base": "

    This exception is thrown when the specified resource is not found.

    ", + "refs": { + } + }, + "ResourceTag": { + "base": "

    A resource tag.

    ", + "refs": { + "ResourceTagList$member": null + } + }, + "ResourceTagList": { + "base": "

    A list of resource tags.

    ", + "refs": { + "ListTagsResponse$ResourceTagList": null + } + }, + "ResourceTypeNotSupportedException": { + "base": "

    This exception is thrown when the specified resource type is not supported by CloudTrail.

    ", + "refs": { + } + }, + "S3BucketDoesNotExistException": { + "base": "

    This exception is thrown when the specified S3 bucket does not exist.

    ", + "refs": { + } + }, + "StartLoggingRequest": { + "base": "

    The request to CloudTrail to start logging AWS API calls for an account.

    ", + "refs": { + } + }, + "StartLoggingResponse": { + "base": "

    Returns the objects or data listed below if successful. Otherwise, returns an error.

    ", + "refs": { + } + }, + "StopLoggingRequest": { + "base": "

    Passes the request to CloudTrail to stop logging AWS API calls for the specified account.

    ", + "refs": { + } + }, + "StopLoggingResponse": { + "base": "

    Returns the objects or data listed below if successful. Otherwise, returns an error.

    ", + "refs": { + } + }, + "String": { + "base": null, + "refs": { + "AddTagsRequest$ResourceId": "

    Specifies the ARN of the trail to which one or more tags will be added. The format of a trail ARN is:

    arn:aws:cloudtrail:us-east-1:123456789012:trail/MyTrail

    ", + "CreateTrailRequest$Name": "

    Specifies the name of the trail. The name must meet the following requirements:

    • Contain only ASCII letters (a-z, A-Z), numbers (0-9), periods (.), underscores (_), or dashes (-)

    • Start with a letter or number, and end with a letter or number

    • Be between 3 and 128 characters

    • Have no adjacent periods, underscores or dashes. Names like my-_namespace and my--namespace are invalid.

    • Not be in IP address format (for example, 192.168.5.4)

    ", + "CreateTrailRequest$S3BucketName": "

    Specifies the name of the Amazon S3 bucket designated for publishing log files. See Amazon S3 Bucket Naming Requirements.

    ", + "CreateTrailRequest$S3KeyPrefix": "

    Specifies the Amazon S3 key prefix that comes after the name of the bucket you have designated for log file delivery. For more information, see Finding Your CloudTrail Log Files. The maximum length is 200 characters.

    ", + "CreateTrailRequest$SnsTopicName": "

    Specifies the name of the Amazon SNS topic defined for notification of log file delivery. The maximum length is 256 characters.

    ", + "CreateTrailRequest$CloudWatchLogsLogGroupArn": "

    Specifies a log group name using an Amazon Resource Name (ARN), a unique identifier that represents the log group to which CloudTrail logs will be delivered. Not required unless you specify CloudWatchLogsRoleArn.

    ", + "CreateTrailRequest$CloudWatchLogsRoleArn": "

    Specifies the role for the CloudWatch Logs endpoint to assume to write to a user's log group.

    ", + "CreateTrailRequest$KmsKeyId": "

    Specifies the KMS key ID to use to encrypt the logs delivered by CloudTrail. The value can be a an alias name prefixed by \"alias/\", a fully specified ARN to an alias, a fully specified ARN to a key, or a globally unique identifier.

    Examples:

    • alias/MyAliasName

    • arn:aws:kms:us-east-1:123456789012:alias/MyAliasName

    • arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012

    • 12345678-1234-1234-1234-123456789012

    ", + "CreateTrailResponse$Name": "

    Specifies the name of the trail.

    ", + "CreateTrailResponse$S3BucketName": "

    Specifies the name of the Amazon S3 bucket designated for publishing log files.

    ", + "CreateTrailResponse$S3KeyPrefix": "

    Specifies the Amazon S3 key prefix that comes after the name of the bucket you have designated for log file delivery. For more information, see Finding Your CloudTrail Log Files.

    ", + "CreateTrailResponse$SnsTopicName": "

    This field is deprecated. Use SnsTopicARN.

    ", + "CreateTrailResponse$SnsTopicARN": "

    Specifies the ARN of the Amazon SNS topic that CloudTrail uses to send notifications when log files are delivered. The format of a topic ARN is:

    arn:aws:sns:us-east-1:123456789012:MyTopic

    ", + "CreateTrailResponse$TrailARN": "

    Specifies the ARN of the trail that was created. The format of a trail ARN is:

    arn:aws:cloudtrail:us-east-1:123456789012:trail/MyTrail

    ", + "CreateTrailResponse$CloudWatchLogsLogGroupArn": "

    Specifies the Amazon Resource Name (ARN) of the log group to which CloudTrail logs will be delivered.

    ", + "CreateTrailResponse$CloudWatchLogsRoleArn": "

    Specifies the role for the CloudWatch Logs endpoint to assume to write to a user's log group.

    ", + "CreateTrailResponse$KmsKeyId": "

    Specifies the KMS key ID that encrypts the logs delivered by CloudTrail. The value is a fully specified ARN to a KMS key in the format:

    arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012

    ", + "DeleteTrailRequest$Name": "

    Specifies the name or the CloudTrail ARN of the trail to be deleted. The format of a trail ARN is:

    arn:aws:cloudtrail:us-east-1:123456789012:trail/MyTrail

    ", + "Event$EventId": "

    The CloudTrail ID of the event returned.

    ", + "Event$EventName": "

    The name of the event returned.

    ", + "Event$Username": "

    A user name or role name of the requester that called the API in the event returned.

    ", + "Event$CloudTrailEvent": "

    A JSON string that contains a representation of the event returned.

    ", + "GetTrailStatusRequest$Name": "

    Specifies the name or the CloudTrail ARN of the trail for which you are requesting status. To get the status of a shadow trail (a replication of the trail in another region), you must specify its ARN. The format of a trail ARN is:

    arn:aws:cloudtrail:us-east-1:123456789012:trail/MyTrail

    ", + "GetTrailStatusResponse$LatestDeliveryError": "

    Displays any Amazon S3 error that CloudTrail encountered when attempting to deliver log files to the designated bucket. For more information see the topic Error Responses in the Amazon S3 API Reference.

    This error occurs only when there is a problem with the destination S3 bucket and will not occur for timeouts. To resolve the issue, create a new bucket and call UpdateTrail to specify the new bucket, or fix the existing objects so that CloudTrail can again write to the bucket.

    ", + "GetTrailStatusResponse$LatestNotificationError": "

    Displays any Amazon SNS error that CloudTrail encountered when attempting to send a notification. For more information about Amazon SNS errors, see the Amazon SNS Developer Guide.

    ", + "GetTrailStatusResponse$LatestCloudWatchLogsDeliveryError": "

    Displays any CloudWatch Logs error that CloudTrail encountered when attempting to deliver logs to CloudWatch Logs.

    ", + "GetTrailStatusResponse$LatestDigestDeliveryError": "

    Displays any Amazon S3 error that CloudTrail encountered when attempting to deliver a digest file to the designated bucket. For more information see the topic Error Responses in the Amazon S3 API Reference.

    This error occurs only when there is a problem with the destination S3 bucket and will not occur for timeouts. To resolve the issue, create a new bucket and call UpdateTrail to specify the new bucket, or fix the existing objects so that CloudTrail can again write to the bucket.

    ", + "GetTrailStatusResponse$LatestDeliveryAttemptTime": "

    This field is deprecated.

    ", + "GetTrailStatusResponse$LatestNotificationAttemptTime": "

    This field is deprecated.

    ", + "GetTrailStatusResponse$LatestNotificationAttemptSucceeded": "

    This field is deprecated.

    ", + "GetTrailStatusResponse$LatestDeliveryAttemptSucceeded": "

    This field is deprecated.

    ", + "GetTrailStatusResponse$TimeLoggingStarted": "

    This field is deprecated.

    ", + "GetTrailStatusResponse$TimeLoggingStopped": "

    This field is deprecated.

    ", + "ListPublicKeysRequest$NextToken": "

    Reserved for future use.

    ", + "ListPublicKeysResponse$NextToken": "

    Reserved for future use.

    ", + "ListTagsRequest$NextToken": "

    Reserved for future use.

    ", + "ListTagsResponse$NextToken": "

    Reserved for future use.

    ", + "LookupAttribute$AttributeValue": "

    Specifies a value for the specified AttributeKey.

    ", + "PublicKey$Fingerprint": "

    The fingerprint of the public key.

    ", + "RemoveTagsRequest$ResourceId": "

    Specifies the ARN of the trail from which tags should be removed. The format of a trail ARN is:

    arn:aws:cloudtrail:us-east-1:123456789012:trail/MyTrail

    ", + "Resource$ResourceType": "

    The type of a resource referenced by the event returned. When the resource type cannot be determined, null is returned. Some examples of resource types are: Instance for EC2, Trail for CloudTrail, DBInstance for RDS, and AccessKey for IAM. For a list of resource types supported for event lookup, see Resource Types Supported for Event Lookup.

    ", + "Resource$ResourceName": "

    The name of the resource referenced by the event returned. These are user-created names whose values will depend on the environment. For example, the resource name might be \"auto-scaling-test-group\" for an Auto Scaling Group or \"i-1234567\" for an EC2 Instance.

    ", + "ResourceIdList$member": null, + "ResourceTag$ResourceId": "

    Specifies the ARN of the resource.

    ", + "StartLoggingRequest$Name": "

    Specifies the name or the CloudTrail ARN of the trail for which CloudTrail logs AWS API calls. The format of a trail ARN is:

    arn:aws:cloudtrail:us-east-1:123456789012:trail/MyTrail

    ", + "StopLoggingRequest$Name": "

    Specifies the name or the CloudTrail ARN of the trail for which CloudTrail will stop logging AWS API calls. The format of a trail ARN is:

    arn:aws:cloudtrail:us-east-1:123456789012:trail/MyTrail

    ", + "Tag$Key": "

    The key in a key-value pair. The key must be must be no longer than 128 Unicode characters. The key must be unique for the resource to which it applies.

    ", + "Tag$Value": "

    The value in a key-value pair of a tag. The value must be no longer than 256 Unicode characters.

    ", + "Trail$Name": "

    Name of the trail set by calling CreateTrail. The maximum length is 128 characters.

    ", + "Trail$S3BucketName": "

    Name of the Amazon S3 bucket into which CloudTrail delivers your trail files. See Amazon S3 Bucket Naming Requirements.

    ", + "Trail$S3KeyPrefix": "

    Specifies the Amazon S3 key prefix that comes after the name of the bucket you have designated for log file delivery. For more information, see Finding Your CloudTrail Log Files.The maximum length is 200 characters.

    ", + "Trail$SnsTopicName": "

    This field is deprecated. Use SnsTopicARN.

    ", + "Trail$SnsTopicARN": "

    Specifies the ARN of the Amazon SNS topic that CloudTrail uses to send notifications when log files are delivered. The format of a topic ARN is:

    arn:aws:sns:us-east-1:123456789012:MyTopic

    ", + "Trail$HomeRegion": "

    The region in which the trail was created.

    ", + "Trail$TrailARN": "

    Specifies the ARN of the trail. The format of a trail ARN is:

    arn:aws:cloudtrail:us-east-1:123456789012:trail/MyTrail

    ", + "Trail$CloudWatchLogsLogGroupArn": "

    Specifies an Amazon Resource Name (ARN), a unique identifier that represents the log group to which CloudTrail logs will be delivered.

    ", + "Trail$CloudWatchLogsRoleArn": "

    Specifies the role for the CloudWatch Logs endpoint to assume to write to a user's log group.

    ", + "Trail$KmsKeyId": "

    Specifies the KMS key ID that encrypts the logs delivered by CloudTrail. The value is a fully specified ARN to a KMS key in the format:

    arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012

    ", + "TrailNameList$member": null, + "UpdateTrailRequest$Name": "

    Specifies the name of the trail or trail ARN. If Name is a trail name, the string must meet the following requirements:

    • Contain only ASCII letters (a-z, A-Z), numbers (0-9), periods (.), underscores (_), or dashes (-)

    • Start with a letter or number, and end with a letter or number

    • Be between 3 and 128 characters

    • Have no adjacent periods, underscores or dashes. Names like my-_namespace and my--namespace are invalid.

    • Not be in IP address format (for example, 192.168.5.4)

    If Name is a trail ARN, it must be in the format:

    arn:aws:cloudtrail:us-east-1:123456789012:trail/MyTrail

    ", + "UpdateTrailRequest$S3BucketName": "

    Specifies the name of the Amazon S3 bucket designated for publishing log files. See Amazon S3 Bucket Naming Requirements.

    ", + "UpdateTrailRequest$S3KeyPrefix": "

    Specifies the Amazon S3 key prefix that comes after the name of the bucket you have designated for log file delivery. For more information, see Finding Your CloudTrail Log Files. The maximum length is 200 characters.

    ", + "UpdateTrailRequest$SnsTopicName": "

    Specifies the name of the Amazon SNS topic defined for notification of log file delivery. The maximum length is 256 characters.

    ", + "UpdateTrailRequest$CloudWatchLogsLogGroupArn": "

    Specifies a log group name using an Amazon Resource Name (ARN), a unique identifier that represents the log group to which CloudTrail logs will be delivered. Not required unless you specify CloudWatchLogsRoleArn.

    ", + "UpdateTrailRequest$CloudWatchLogsRoleArn": "

    Specifies the role for the CloudWatch Logs endpoint to assume to write to a user's log group.

    ", + "UpdateTrailRequest$KmsKeyId": "

    Specifies the KMS key ID to use to encrypt the logs delivered by CloudTrail. The value can be a an alias name prefixed by \"alias/\", a fully specified ARN to an alias, a fully specified ARN to a key, or a globally unique identifier.

    Examples:

    • alias/MyAliasName

    • arn:aws:kms:us-east-1:123456789012:alias/MyAliasName

    • arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012

    • 12345678-1234-1234-1234-123456789012

    ", + "UpdateTrailResponse$Name": "

    Specifies the name of the trail.

    ", + "UpdateTrailResponse$S3BucketName": "

    Specifies the name of the Amazon S3 bucket designated for publishing log files.

    ", + "UpdateTrailResponse$S3KeyPrefix": "

    Specifies the Amazon S3 key prefix that comes after the name of the bucket you have designated for log file delivery. For more information, see Finding Your CloudTrail Log Files.

    ", + "UpdateTrailResponse$SnsTopicName": "

    This field is deprecated. Use SnsTopicARN.

    ", + "UpdateTrailResponse$SnsTopicARN": "

    Specifies the ARN of the Amazon SNS topic that CloudTrail uses to send notifications when log files are delivered. The format of a topic ARN is:

    arn:aws:sns:us-east-1:123456789012:MyTopic

    ", + "UpdateTrailResponse$TrailARN": "

    Specifies the ARN of the trail that was updated. The format of a trail ARN is:

    arn:aws:cloudtrail:us-east-1:123456789012:trail/MyTrail

    ", + "UpdateTrailResponse$CloudWatchLogsLogGroupArn": "

    Specifies the Amazon Resource Name (ARN) of the log group to which CloudTrail logs will be delivered.

    ", + "UpdateTrailResponse$CloudWatchLogsRoleArn": "

    Specifies the role for the CloudWatch Logs endpoint to assume to write to a user's log group.

    ", + "UpdateTrailResponse$KmsKeyId": "

    Specifies the KMS key ID that encrypts the logs delivered by CloudTrail. The value is a fully specified ARN to a KMS key in the format:

    arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012

    " + } + }, + "Tag": { + "base": "

    A custom key-value pair associated with a resource such as a CloudTrail trail.

    ", + "refs": { + "TagsList$member": null + } + }, + "TagsLimitExceededException": { + "base": "

    The number of tags per trail has exceeded the permitted amount. Currently, the limit is 10.

    ", + "refs": { + } + }, + "TagsList": { + "base": "

    A list of tags.

    ", + "refs": { + "AddTagsRequest$TagsList": "

    Contains a list of CloudTrail tags, up to a limit of 10.

    ", + "RemoveTagsRequest$TagsList": "

    Specifies a list of tags to be removed.

    ", + "ResourceTag$TagsList": null + } + }, + "Trail": { + "base": "

    The settings for a trail.

    ", + "refs": { + "TrailList$member": null + } + }, + "TrailAlreadyExistsException": { + "base": "

    This exception is thrown when the specified trail already exists.

    ", + "refs": { + } + }, + "TrailList": { + "base": null, + "refs": { + "DescribeTrailsResponse$trailList": "

    The list of trail objects.

    " + } + }, + "TrailNameList": { + "base": null, + "refs": { + "DescribeTrailsRequest$trailNameList": "

    Specifies a list of trail names, trail ARNs, or both, of the trails to describe. The format of a trail ARN is:

    arn:aws:cloudtrail:us-east-1:123456789012:trail/MyTrail

    If an empty list is specified, information for the trail in the current region is returned.

    • If an empty list is specified and IncludeShadowTrails is false, then information for all trails in the current region is returned.

    • If an empty list is specified and IncludeShadowTrails is null or true, then information for all trails in the current region and any associated shadow trails in other regions is returned.

    If one or more trail names are specified, information is returned only if the names match the names of trails belonging only to the current region. To return information about a trail in another region, you must specify its trail ARN.

    " + } + }, + "TrailNotFoundException": { + "base": "

    This exception is thrown when the trail with the given name is not found.

    ", + "refs": { + } + }, + "TrailNotProvidedException": { + "base": "

    This exception is deprecated.

    ", + "refs": { + } + }, + "UnsupportedOperationException": { + "base": "

    This exception is thrown when the requested operation is not supported.

    ", + "refs": { + } + }, + "UpdateTrailRequest": { + "base": "

    Specifies settings to update for the trail.

    ", + "refs": { + } + }, + "UpdateTrailResponse": { + "base": "

    Returns the objects or data listed below if successful. Otherwise, returns an error.

    ", + "refs": { + } + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/cloudtrail/2013-11-01/examples-1.json b/vendor/github.com/aws/aws-sdk-go/models/apis/cloudtrail/2013-11-01/examples-1.json new file mode 100644 index 000000000..0ea7e3b0b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/cloudtrail/2013-11-01/examples-1.json @@ -0,0 +1,5 @@ +{ + "version": "1.0", + "examples": { + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/cloudtrail/2013-11-01/paginators-1.json b/vendor/github.com/aws/aws-sdk-go/models/apis/cloudtrail/2013-11-01/paginators-1.json new file mode 100644 index 000000000..a11f43616 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/cloudtrail/2013-11-01/paginators-1.json @@ -0,0 +1,7 @@ +{ + "pagination": { + "DescribeTrails": { + "result_key": "trailList" + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/codecommit/2015-04-13/api-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/codecommit/2015-04-13/api-2.json new file mode 100644 index 000000000..0dde5d30f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/codecommit/2015-04-13/api-2.json @@ -0,0 +1,916 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2015-04-13", + "endpointPrefix":"codecommit", + "jsonVersion":"1.1", + "protocol":"json", + "serviceAbbreviation":"CodeCommit", + "serviceFullName":"AWS CodeCommit", + "signatureVersion":"v4", + "targetPrefix":"CodeCommit_20150413" + }, + "operations":{ + "BatchGetRepositories":{ + "name":"BatchGetRepositories", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"BatchGetRepositoriesInput"}, + "output":{"shape":"BatchGetRepositoriesOutput"}, + "errors":[ + {"shape":"RepositoryNamesRequiredException"}, + {"shape":"MaximumRepositoryNamesExceededException"}, + {"shape":"InvalidRepositoryNameException"}, + {"shape":"EncryptionIntegrityChecksFailedException"}, + {"shape":"EncryptionKeyAccessDeniedException"}, + {"shape":"EncryptionKeyDisabledException"}, + {"shape":"EncryptionKeyNotFoundException"}, + {"shape":"EncryptionKeyUnavailableException"} + ] + }, + "CreateBranch":{ + "name":"CreateBranch", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateBranchInput"}, + "errors":[ + {"shape":"RepositoryNameRequiredException"}, + {"shape":"InvalidRepositoryNameException"}, + {"shape":"RepositoryDoesNotExistException"}, + {"shape":"BranchNameRequiredException"}, + {"shape":"BranchNameExistsException"}, + {"shape":"InvalidBranchNameException"}, + {"shape":"CommitIdRequiredException"}, + {"shape":"CommitDoesNotExistException"}, + {"shape":"InvalidCommitIdException"}, + {"shape":"EncryptionIntegrityChecksFailedException"}, + {"shape":"EncryptionKeyAccessDeniedException"}, + {"shape":"EncryptionKeyDisabledException"}, + {"shape":"EncryptionKeyNotFoundException"}, + {"shape":"EncryptionKeyUnavailableException"} + ] + }, + "CreateRepository":{ + "name":"CreateRepository", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateRepositoryInput"}, + "output":{"shape":"CreateRepositoryOutput"}, + "errors":[ + {"shape":"RepositoryNameExistsException"}, + {"shape":"RepositoryNameRequiredException"}, + {"shape":"InvalidRepositoryNameException"}, + {"shape":"InvalidRepositoryDescriptionException"}, + {"shape":"RepositoryLimitExceededException"}, + {"shape":"EncryptionIntegrityChecksFailedException"}, + {"shape":"EncryptionKeyAccessDeniedException"}, + {"shape":"EncryptionKeyDisabledException"}, + {"shape":"EncryptionKeyNotFoundException"}, + {"shape":"EncryptionKeyUnavailableException"} + ] + }, + "DeleteRepository":{ + "name":"DeleteRepository", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteRepositoryInput"}, + "output":{"shape":"DeleteRepositoryOutput"}, + "errors":[ + {"shape":"RepositoryNameRequiredException"}, + {"shape":"InvalidRepositoryNameException"}, + {"shape":"EncryptionIntegrityChecksFailedException"}, + {"shape":"EncryptionKeyAccessDeniedException"}, + {"shape":"EncryptionKeyDisabledException"}, + {"shape":"EncryptionKeyNotFoundException"}, + {"shape":"EncryptionKeyUnavailableException"} + ] + }, + "GetBranch":{ + "name":"GetBranch", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetBranchInput"}, + "output":{"shape":"GetBranchOutput"}, + "errors":[ + {"shape":"RepositoryNameRequiredException"}, + {"shape":"RepositoryDoesNotExistException"}, + {"shape":"InvalidRepositoryNameException"}, + {"shape":"BranchNameRequiredException"}, + {"shape":"InvalidBranchNameException"}, + {"shape":"BranchDoesNotExistException"}, + {"shape":"EncryptionIntegrityChecksFailedException"}, + {"shape":"EncryptionKeyAccessDeniedException"}, + {"shape":"EncryptionKeyDisabledException"}, + {"shape":"EncryptionKeyNotFoundException"}, + {"shape":"EncryptionKeyUnavailableException"} + ] + }, + "GetCommit":{ + "name":"GetCommit", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetCommitInput"}, + "output":{"shape":"GetCommitOutput"}, + "errors":[ + {"shape":"RepositoryNameRequiredException"}, + {"shape":"InvalidRepositoryNameException"}, + {"shape":"RepositoryDoesNotExistException"}, + {"shape":"CommitIdRequiredException"}, + {"shape":"InvalidCommitIdException"}, + {"shape":"CommitIdDoesNotExistException"}, + {"shape":"EncryptionIntegrityChecksFailedException"}, + {"shape":"EncryptionKeyAccessDeniedException"}, + {"shape":"EncryptionKeyDisabledException"}, + {"shape":"EncryptionKeyNotFoundException"}, + {"shape":"EncryptionKeyUnavailableException"} + ] + }, + "GetRepository":{ + "name":"GetRepository", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetRepositoryInput"}, + "output":{"shape":"GetRepositoryOutput"}, + "errors":[ + {"shape":"RepositoryNameRequiredException"}, + {"shape":"RepositoryDoesNotExistException"}, + {"shape":"InvalidRepositoryNameException"}, + {"shape":"EncryptionIntegrityChecksFailedException"}, + {"shape":"EncryptionKeyAccessDeniedException"}, + {"shape":"EncryptionKeyDisabledException"}, + {"shape":"EncryptionKeyNotFoundException"}, + {"shape":"EncryptionKeyUnavailableException"} + ] + }, + "GetRepositoryTriggers":{ + "name":"GetRepositoryTriggers", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetRepositoryTriggersInput"}, + "output":{"shape":"GetRepositoryTriggersOutput"}, + "errors":[ + {"shape":"RepositoryNameRequiredException"}, + {"shape":"InvalidRepositoryNameException"}, + {"shape":"RepositoryDoesNotExistException"}, + {"shape":"EncryptionIntegrityChecksFailedException"}, + {"shape":"EncryptionKeyAccessDeniedException"}, + {"shape":"EncryptionKeyDisabledException"}, + {"shape":"EncryptionKeyNotFoundException"}, + {"shape":"EncryptionKeyUnavailableException"} + ] + }, + "ListBranches":{ + "name":"ListBranches", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListBranchesInput"}, + "output":{"shape":"ListBranchesOutput"}, + "errors":[ + {"shape":"RepositoryNameRequiredException"}, + {"shape":"RepositoryDoesNotExistException"}, + {"shape":"InvalidRepositoryNameException"}, + {"shape":"EncryptionIntegrityChecksFailedException"}, + {"shape":"EncryptionKeyAccessDeniedException"}, + {"shape":"EncryptionKeyDisabledException"}, + {"shape":"EncryptionKeyNotFoundException"}, + {"shape":"EncryptionKeyUnavailableException"}, + {"shape":"InvalidContinuationTokenException"} + ] + }, + "ListRepositories":{ + "name":"ListRepositories", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListRepositoriesInput"}, + "output":{"shape":"ListRepositoriesOutput"}, + "errors":[ + {"shape":"InvalidSortByException"}, + {"shape":"InvalidOrderException"}, + {"shape":"InvalidContinuationTokenException"} + ] + }, + "PutRepositoryTriggers":{ + "name":"PutRepositoryTriggers", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PutRepositoryTriggersInput"}, + "output":{"shape":"PutRepositoryTriggersOutput"}, + "errors":[ + {"shape":"RepositoryDoesNotExistException"}, + {"shape":"RepositoryNameRequiredException"}, + {"shape":"InvalidRepositoryNameException"}, + {"shape":"RepositoryTriggersListRequiredException"}, + {"shape":"MaximumRepositoryTriggersExceededException"}, + {"shape":"InvalidRepositoryTriggerNameException"}, + {"shape":"InvalidRepositoryTriggerDestinationArnException"}, + {"shape":"InvalidRepositoryTriggerRegionException"}, + {"shape":"InvalidRepositoryTriggerCustomDataException"}, + {"shape":"MaximumBranchesExceededException"}, + {"shape":"InvalidRepositoryTriggerBranchNameException"}, + {"shape":"InvalidRepositoryTriggerEventsException"}, + {"shape":"RepositoryTriggerNameRequiredException"}, + {"shape":"RepositoryTriggerDestinationArnRequiredException"}, + {"shape":"RepositoryTriggerBranchNameListRequiredException"}, + {"shape":"RepositoryTriggerEventsListRequiredException"}, + {"shape":"EncryptionIntegrityChecksFailedException"}, + {"shape":"EncryptionKeyAccessDeniedException"}, + {"shape":"EncryptionKeyDisabledException"}, + {"shape":"EncryptionKeyNotFoundException"}, + {"shape":"EncryptionKeyUnavailableException"} + ] + }, + "TestRepositoryTriggers":{ + "name":"TestRepositoryTriggers", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"TestRepositoryTriggersInput"}, + "output":{"shape":"TestRepositoryTriggersOutput"}, + "errors":[ + {"shape":"RepositoryDoesNotExistException"}, + {"shape":"RepositoryNameRequiredException"}, + {"shape":"InvalidRepositoryNameException"}, + {"shape":"RepositoryTriggersListRequiredException"}, + {"shape":"MaximumRepositoryTriggersExceededException"}, + {"shape":"InvalidRepositoryTriggerNameException"}, + {"shape":"InvalidRepositoryTriggerDestinationArnException"}, + {"shape":"InvalidRepositoryTriggerRegionException"}, + {"shape":"InvalidRepositoryTriggerCustomDataException"}, + {"shape":"MaximumBranchesExceededException"}, + {"shape":"InvalidRepositoryTriggerBranchNameException"}, + {"shape":"InvalidRepositoryTriggerEventsException"}, + {"shape":"RepositoryTriggerNameRequiredException"}, + {"shape":"RepositoryTriggerDestinationArnRequiredException"}, + {"shape":"RepositoryTriggerBranchNameListRequiredException"}, + {"shape":"RepositoryTriggerEventsListRequiredException"}, + {"shape":"EncryptionIntegrityChecksFailedException"}, + {"shape":"EncryptionKeyAccessDeniedException"}, + {"shape":"EncryptionKeyDisabledException"}, + {"shape":"EncryptionKeyNotFoundException"}, + {"shape":"EncryptionKeyUnavailableException"} + ] + }, + "UpdateDefaultBranch":{ + "name":"UpdateDefaultBranch", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateDefaultBranchInput"}, + "errors":[ + {"shape":"RepositoryNameRequiredException"}, + {"shape":"RepositoryDoesNotExistException"}, + {"shape":"InvalidRepositoryNameException"}, + {"shape":"BranchNameRequiredException"}, + {"shape":"InvalidBranchNameException"}, + {"shape":"BranchDoesNotExistException"}, + {"shape":"EncryptionIntegrityChecksFailedException"}, + {"shape":"EncryptionKeyAccessDeniedException"}, + {"shape":"EncryptionKeyDisabledException"}, + {"shape":"EncryptionKeyNotFoundException"}, + {"shape":"EncryptionKeyUnavailableException"} + ] + }, + "UpdateRepositoryDescription":{ + "name":"UpdateRepositoryDescription", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateRepositoryDescriptionInput"}, + "errors":[ + {"shape":"RepositoryNameRequiredException"}, + {"shape":"RepositoryDoesNotExistException"}, + {"shape":"InvalidRepositoryNameException"}, + {"shape":"InvalidRepositoryDescriptionException"}, + {"shape":"EncryptionIntegrityChecksFailedException"}, + {"shape":"EncryptionKeyAccessDeniedException"}, + {"shape":"EncryptionKeyDisabledException"}, + {"shape":"EncryptionKeyNotFoundException"}, + {"shape":"EncryptionKeyUnavailableException"} + ] + }, + "UpdateRepositoryName":{ + "name":"UpdateRepositoryName", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateRepositoryNameInput"}, + "errors":[ + {"shape":"RepositoryDoesNotExistException"}, + {"shape":"RepositoryNameExistsException"}, + {"shape":"RepositoryNameRequiredException"}, + {"shape":"InvalidRepositoryNameException"} + ] + } + }, + "shapes":{ + "AccountId":{"type":"string"}, + "AdditionalData":{"type":"string"}, + "Arn":{"type":"string"}, + "BatchGetRepositoriesInput":{ + "type":"structure", + "required":["repositoryNames"], + "members":{ + "repositoryNames":{"shape":"RepositoryNameList"} + } + }, + "BatchGetRepositoriesOutput":{ + "type":"structure", + "members":{ + "repositories":{"shape":"RepositoryMetadataList"}, + "repositoriesNotFound":{"shape":"RepositoryNotFoundList"} + } + }, + "BranchDoesNotExistException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "BranchInfo":{ + "type":"structure", + "members":{ + "branchName":{"shape":"BranchName"}, + "commitId":{"shape":"CommitId"} + } + }, + "BranchName":{ + "type":"string", + "max":100, + "min":1 + }, + "BranchNameExistsException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "BranchNameList":{ + "type":"list", + "member":{"shape":"BranchName"} + }, + "BranchNameRequiredException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "CloneUrlHttp":{"type":"string"}, + "CloneUrlSsh":{"type":"string"}, + "Commit":{ + "type":"structure", + "members":{ + "treeId":{"shape":"ObjectId"}, + "parents":{"shape":"ParentList"}, + "message":{"shape":"Message"}, + "author":{"shape":"UserInfo"}, + "committer":{"shape":"UserInfo"}, + "additionalData":{"shape":"AdditionalData"} + } + }, + "CommitDoesNotExistException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "CommitId":{"type":"string"}, + "CommitIdDoesNotExistException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "CommitIdRequiredException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "CreateBranchInput":{ + "type":"structure", + "required":[ + "repositoryName", + "branchName", + "commitId" + ], + "members":{ + "repositoryName":{"shape":"RepositoryName"}, + "branchName":{"shape":"BranchName"}, + "commitId":{"shape":"CommitId"} + } + }, + "CreateRepositoryInput":{ + "type":"structure", + "required":["repositoryName"], + "members":{ + "repositoryName":{"shape":"RepositoryName"}, + "repositoryDescription":{"shape":"RepositoryDescription"} + } + }, + "CreateRepositoryOutput":{ + "type":"structure", + "members":{ + "repositoryMetadata":{"shape":"RepositoryMetadata"} + } + }, + "CreationDate":{"type":"timestamp"}, + "Date":{"type":"string"}, + "DeleteRepositoryInput":{ + "type":"structure", + "required":["repositoryName"], + "members":{ + "repositoryName":{"shape":"RepositoryName"} + } + }, + "DeleteRepositoryOutput":{ + "type":"structure", + "members":{ + "repositoryId":{"shape":"RepositoryId"} + } + }, + "Email":{"type":"string"}, + "EncryptionIntegrityChecksFailedException":{ + "type":"structure", + "members":{ + }, + "exception":true, + "fault":true + }, + "EncryptionKeyAccessDeniedException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "EncryptionKeyDisabledException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "EncryptionKeyNotFoundException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "EncryptionKeyUnavailableException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "GetBranchInput":{ + "type":"structure", + "members":{ + "repositoryName":{"shape":"RepositoryName"}, + "branchName":{"shape":"BranchName"} + } + }, + "GetBranchOutput":{ + "type":"structure", + "members":{ + "branch":{"shape":"BranchInfo"} + } + }, + "GetCommitInput":{ + "type":"structure", + "required":[ + "repositoryName", + "commitId" + ], + "members":{ + "repositoryName":{"shape":"RepositoryName"}, + "commitId":{"shape":"ObjectId"} + } + }, + "GetCommitOutput":{ + "type":"structure", + "required":["commit"], + "members":{ + "commit":{"shape":"Commit"} + } + }, + "GetRepositoryInput":{ + "type":"structure", + "required":["repositoryName"], + "members":{ + "repositoryName":{"shape":"RepositoryName"} + } + }, + "GetRepositoryOutput":{ + "type":"structure", + "members":{ + "repositoryMetadata":{"shape":"RepositoryMetadata"} + } + }, + "GetRepositoryTriggersInput":{ + "type":"structure", + "members":{ + "repositoryName":{"shape":"RepositoryName"} + } + }, + "GetRepositoryTriggersOutput":{ + "type":"structure", + "members":{ + "configurationId":{"shape":"RepositoryTriggersConfigurationId"}, + "triggers":{"shape":"RepositoryTriggersList"} + } + }, + "InvalidBranchNameException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InvalidCommitIdException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InvalidContinuationTokenException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InvalidOrderException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InvalidRepositoryDescriptionException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InvalidRepositoryNameException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InvalidRepositoryTriggerBranchNameException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InvalidRepositoryTriggerCustomDataException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InvalidRepositoryTriggerDestinationArnException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InvalidRepositoryTriggerEventsException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InvalidRepositoryTriggerNameException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InvalidRepositoryTriggerRegionException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InvalidSortByException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "LastModifiedDate":{"type":"timestamp"}, + "ListBranchesInput":{ + "type":"structure", + "required":["repositoryName"], + "members":{ + "repositoryName":{"shape":"RepositoryName"}, + "nextToken":{"shape":"NextToken"} + } + }, + "ListBranchesOutput":{ + "type":"structure", + "members":{ + "branches":{"shape":"BranchNameList"}, + "nextToken":{"shape":"NextToken"} + } + }, + "ListRepositoriesInput":{ + "type":"structure", + "members":{ + "nextToken":{"shape":"NextToken"}, + "sortBy":{"shape":"SortByEnum"}, + "order":{"shape":"OrderEnum"} + } + }, + "ListRepositoriesOutput":{ + "type":"structure", + "members":{ + "repositories":{"shape":"RepositoryNameIdPairList"}, + "nextToken":{"shape":"NextToken"} + } + }, + "MaximumBranchesExceededException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "MaximumRepositoryNamesExceededException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "MaximumRepositoryTriggersExceededException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "Message":{"type":"string"}, + "Name":{"type":"string"}, + "NextToken":{"type":"string"}, + "ObjectId":{"type":"string"}, + "OrderEnum":{ + "type":"string", + "enum":[ + "ascending", + "descending" + ] + }, + "ParentList":{ + "type":"list", + "member":{"shape":"ObjectId"} + }, + "PutRepositoryTriggersInput":{ + "type":"structure", + "members":{ + "repositoryName":{"shape":"RepositoryName"}, + "triggers":{"shape":"RepositoryTriggersList"} + } + }, + "PutRepositoryTriggersOutput":{ + "type":"structure", + "members":{ + "configurationId":{"shape":"RepositoryTriggersConfigurationId"} + } + }, + "RepositoryDescription":{ + "type":"string", + "max":1000 + }, + "RepositoryDoesNotExistException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "RepositoryId":{"type":"string"}, + "RepositoryLimitExceededException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "RepositoryMetadata":{ + "type":"structure", + "members":{ + "accountId":{"shape":"AccountId"}, + "repositoryId":{"shape":"RepositoryId"}, + "repositoryName":{"shape":"RepositoryName"}, + "repositoryDescription":{"shape":"RepositoryDescription"}, + "defaultBranch":{"shape":"BranchName"}, + "lastModifiedDate":{"shape":"LastModifiedDate"}, + "creationDate":{"shape":"CreationDate"}, + "cloneUrlHttp":{"shape":"CloneUrlHttp"}, + "cloneUrlSsh":{"shape":"CloneUrlSsh"}, + "Arn":{"shape":"Arn"} + } + }, + "RepositoryMetadataList":{ + "type":"list", + "member":{"shape":"RepositoryMetadata"} + }, + "RepositoryName":{ + "type":"string", + "max":100, + "min":1, + "pattern":"[\\\\w\\\\.-]+" + }, + "RepositoryNameExistsException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "RepositoryNameIdPair":{ + "type":"structure", + "members":{ + "repositoryName":{"shape":"RepositoryName"}, + "repositoryId":{"shape":"RepositoryId"} + } + }, + "RepositoryNameIdPairList":{ + "type":"list", + "member":{"shape":"RepositoryNameIdPair"} + }, + "RepositoryNameList":{ + "type":"list", + "member":{"shape":"RepositoryName"} + }, + "RepositoryNameRequiredException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "RepositoryNamesRequiredException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "RepositoryNotFoundList":{ + "type":"list", + "member":{"shape":"RepositoryName"} + }, + "RepositoryTrigger":{ + "type":"structure", + "members":{ + "name":{"shape":"RepositoryTriggerName"}, + "destinationArn":{"shape":"Arn"}, + "customData":{"shape":"RepositoryTriggerCustomData"}, + "branches":{"shape":"BranchNameList"}, + "events":{"shape":"RepositoryTriggerEventList"} + } + }, + "RepositoryTriggerBranchNameListRequiredException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "RepositoryTriggerCustomData":{"type":"string"}, + "RepositoryTriggerDestinationArnRequiredException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "RepositoryTriggerEventEnum":{ + "type":"string", + "enum":[ + "all", + "updateReference", + "createReference", + "deleteReference" + ] + }, + "RepositoryTriggerEventList":{ + "type":"list", + "member":{"shape":"RepositoryTriggerEventEnum"} + }, + "RepositoryTriggerEventsListRequiredException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "RepositoryTriggerExecutionFailure":{ + "type":"structure", + "members":{ + "trigger":{"shape":"RepositoryTriggerName"}, + "failureMessage":{"shape":"RepositoryTriggerExecutionFailureMessage"} + } + }, + "RepositoryTriggerExecutionFailureList":{ + "type":"list", + "member":{"shape":"RepositoryTriggerExecutionFailure"} + }, + "RepositoryTriggerExecutionFailureMessage":{"type":"string"}, + "RepositoryTriggerName":{"type":"string"}, + "RepositoryTriggerNameList":{ + "type":"list", + "member":{"shape":"RepositoryTriggerName"} + }, + "RepositoryTriggerNameRequiredException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "RepositoryTriggersConfigurationId":{"type":"string"}, + "RepositoryTriggersList":{ + "type":"list", + "member":{"shape":"RepositoryTrigger"} + }, + "RepositoryTriggersListRequiredException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "SortByEnum":{ + "type":"string", + "enum":[ + "repositoryName", + "lastModifiedDate" + ] + }, + "TestRepositoryTriggersInput":{ + "type":"structure", + "members":{ + "repositoryName":{"shape":"RepositoryName"}, + "triggers":{"shape":"RepositoryTriggersList"} + } + }, + "TestRepositoryTriggersOutput":{ + "type":"structure", + "members":{ + "successfulExecutions":{"shape":"RepositoryTriggerNameList"}, + "failedExecutions":{"shape":"RepositoryTriggerExecutionFailureList"} + } + }, + "UpdateDefaultBranchInput":{ + "type":"structure", + "required":[ + "repositoryName", + "defaultBranchName" + ], + "members":{ + "repositoryName":{"shape":"RepositoryName"}, + "defaultBranchName":{"shape":"BranchName"} + } + }, + "UpdateRepositoryDescriptionInput":{ + "type":"structure", + "required":["repositoryName"], + "members":{ + "repositoryName":{"shape":"RepositoryName"}, + "repositoryDescription":{"shape":"RepositoryDescription"} + } + }, + "UpdateRepositoryNameInput":{ + "type":"structure", + "required":[ + "oldName", + "newName" + ], + "members":{ + "oldName":{"shape":"RepositoryName"}, + "newName":{"shape":"RepositoryName"} + } + }, + "UserInfo":{ + "type":"structure", + "members":{ + "name":{"shape":"Name"}, + "email":{"shape":"Email"}, + "date":{"shape":"Date"} + } + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/codecommit/2015-04-13/docs-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/codecommit/2015-04-13/docs-2.json new file mode 100644 index 000000000..317a852a2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/codecommit/2015-04-13/docs-2.json @@ -0,0 +1,632 @@ +{ + "version": "2.0", + "service": "AWS CodeCommit

    This is the AWS CodeCommit API Reference. This reference provides descriptions of the operations and data types for AWS CodeCommit API.

    You can use the AWS CodeCommit API to work with the following objects:

    • Repositories, by calling the following:
      • BatchGetRepositories, which returns information about one or more repositories associated with your AWS account
      • CreateRepository, which creates an AWS CodeCommit repository
      • DeleteRepository, which deletes an AWS CodeCommit repository
      • GetRepository, which returns information about a specified repository
      • ListRepositories, which lists all AWS CodeCommit repositories associated with your AWS account
      • UpdateRepositoryDescription, which sets or updates the description of the repository
      • UpdateRepositoryName, which changes the name of the repository. If you change the name of a repository, no other users of that repository will be able to access it until you send them the new HTTPS or SSH URL to use.
    • Branches, by calling the following:
      • CreateBranch, which creates a new branch in a specified repository
      • GetBranch, which returns information about a specified branch
      • ListBranches, which lists all branches for a specified repository
      • UpdateDefaultBranch, which changes the default branch for a repository
    • Information about committed code in a repository, by calling the following:
      • GetCommit, which returns information about a commit, including commit messages and committer information.
    • Triggers, by calling the following:
      • GetRepositoryTriggers, which returns information about triggers configured for a repository
      • PutRepositoryTriggers, which replaces all triggers for a repository and can be used to create or delete triggers
      • TestRepositoryTriggers, which tests the functionality of a repository trigger by sending data to the trigger target

    For information about how to use AWS CodeCommit, see the AWS CodeCommit User Guide.

    ", + "operations": { + "BatchGetRepositories": "

    Returns information about one or more repositories.

    The description field for a repository accepts all HTML characters and all valid Unicode characters. Applications that do not HTML-encode the description and display it in a web page could expose users to potentially malicious code. Make sure that you HTML-encode the description field in any application that uses this API to display the repository description on a web page.

    ", + "CreateBranch": "

    Creates a new branch in a repository and points the branch to a commit.

    Calling the create branch operation does not set a repository's default branch. To do this, call the update default branch operation.

    ", + "CreateRepository": "

    Creates a new, empty repository.

    ", + "DeleteRepository": "

    Deletes a repository. If a specified repository was already deleted, a null repository ID will be returned.

    Deleting a repository also deletes all associated objects and metadata. After a repository is deleted, all future push calls to the deleted repository will fail.", + "GetBranch": "

    Returns information about a repository branch, including its name and the last commit ID.

    ", + "GetCommit": "

    Returns information about a commit, including commit message and committer information.

    ", + "GetRepository": "

    Returns information about a repository.

    The description field for a repository accepts all HTML characters and all valid Unicode characters. Applications that do not HTML-encode the description and display it in a web page could expose users to potentially malicious code. Make sure that you HTML-encode the description field in any application that uses this API to display the repository description on a web page.

    ", + "GetRepositoryTriggers": "

    Gets information about triggers configured for a repository.

    ", + "ListBranches": "

    Gets information about one or more branches in a repository.

    ", + "ListRepositories": "

    Gets information about one or more repositories.

    ", + "PutRepositoryTriggers": "

    Replaces all triggers for a repository. This can be used to create or delete triggers.

    ", + "TestRepositoryTriggers": "

    Tests the functionality of repository triggers by sending information to the trigger target. If real data is available in the repository, the test will send data from the last commit. If no data is available, sample data will be generated.

    ", + "UpdateDefaultBranch": "

    Sets or changes the default branch name for the specified repository.

    If you use this operation to change the default branch name to the current default branch name, a success message is returned even though the default branch did not change.

    ", + "UpdateRepositoryDescription": "

    Sets or changes the comment or description for a repository.

    The description field for a repository accepts all HTML characters and all valid Unicode characters. Applications that do not HTML-encode the description and display it in a web page could expose users to potentially malicious code. Make sure that you HTML-encode the description field in any application that uses this API to display the repository description on a web page.

    ", + "UpdateRepositoryName": "

    Renames a repository. The repository name must be unique across the calling AWS account. In addition, repository names are limited to 100 alphanumeric, dash, and underscore characters, and cannot include certain characters. The suffix \".git\" is prohibited. For a full description of the limits on repository names, see Limits in the AWS CodeCommit User Guide.

    " + }, + "shapes": { + "AccountId": { + "base": null, + "refs": { + "RepositoryMetadata$accountId": "

    The ID of the AWS account associated with the repository.

    " + } + }, + "AdditionalData": { + "base": null, + "refs": { + "Commit$additionalData": "

    Any additional data associated with the specified commit.

    " + } + }, + "Arn": { + "base": null, + "refs": { + "RepositoryMetadata$Arn": "

    The Amazon Resource Name (ARN) of the repository.

    ", + "RepositoryTrigger$destinationArn": "

    The ARN of the resource that is the target for a trigger. For example, the ARN of a topic in Amazon Simple Notification Service (SNS).

    " + } + }, + "BatchGetRepositoriesInput": { + "base": "

    Represents the input of a batch get repositories operation.

    ", + "refs": { + } + }, + "BatchGetRepositoriesOutput": { + "base": "

    Represents the output of a batch get repositories operation.

    ", + "refs": { + } + }, + "BranchDoesNotExistException": { + "base": "

    The specified branch does not exist.

    ", + "refs": { + } + }, + "BranchInfo": { + "base": "

    Returns information about a branch.

    ", + "refs": { + "GetBranchOutput$branch": "

    The name of the branch.

    " + } + }, + "BranchName": { + "base": null, + "refs": { + "BranchInfo$branchName": "

    The name of the branch.

    ", + "BranchNameList$member": null, + "CreateBranchInput$branchName": "

    The name of the new branch to create.

    ", + "GetBranchInput$branchName": "

    The name of the branch for which you want to retrieve information.

    ", + "RepositoryMetadata$defaultBranch": "

    The repository's default branch name.

    ", + "UpdateDefaultBranchInput$defaultBranchName": "

    The name of the branch to set as the default.

    " + } + }, + "BranchNameExistsException": { + "base": "

    The specified branch name already exists.

    ", + "refs": { + } + }, + "BranchNameList": { + "base": null, + "refs": { + "ListBranchesOutput$branches": "

    The list of branch names.

    ", + "RepositoryTrigger$branches": "

    The branches that will be included in the trigger configuration. If no branches are specified, the trigger will apply to all branches.

    " + } + }, + "BranchNameRequiredException": { + "base": "

    A branch name is required but was not specified.

    ", + "refs": { + } + }, + "CloneUrlHttp": { + "base": null, + "refs": { + "RepositoryMetadata$cloneUrlHttp": "

    The URL to use for cloning the repository over HTTPS.

    " + } + }, + "CloneUrlSsh": { + "base": null, + "refs": { + "RepositoryMetadata$cloneUrlSsh": "

    The URL to use for cloning the repository over SSH.

    " + } + }, + "Commit": { + "base": "

    Returns information about a specific commit.

    ", + "refs": { + "GetCommitOutput$commit": "

    Information about the specified commit.

    " + } + }, + "CommitDoesNotExistException": { + "base": "

    The specified commit does not exist or no commit was specified, and the specified repository has no default branch.

    ", + "refs": { + } + }, + "CommitId": { + "base": null, + "refs": { + "BranchInfo$commitId": "

    The ID of the last commit made to the branch.

    ", + "CreateBranchInput$commitId": "

    The ID of the commit to point the new branch to.

    " + } + }, + "CommitIdDoesNotExistException": { + "base": "

    The specified commit ID does not exist.

    ", + "refs": { + } + }, + "CommitIdRequiredException": { + "base": "

    A commit ID was not specified.

    ", + "refs": { + } + }, + "CreateBranchInput": { + "base": "

    Represents the input of a create branch operation.

    ", + "refs": { + } + }, + "CreateRepositoryInput": { + "base": "

    Represents the input of a create repository operation.

    ", + "refs": { + } + }, + "CreateRepositoryOutput": { + "base": "

    Represents the output of a create repository operation.

    ", + "refs": { + } + }, + "CreationDate": { + "base": null, + "refs": { + "RepositoryMetadata$creationDate": "

    The date and time the repository was created, in timestamp format.

    " + } + }, + "Date": { + "base": null, + "refs": { + "UserInfo$date": "

    The date when the specified commit was pushed to the repository.

    " + } + }, + "DeleteRepositoryInput": { + "base": "

    Represents the input of a delete repository operation.

    ", + "refs": { + } + }, + "DeleteRepositoryOutput": { + "base": "

    Represents the output of a delete repository operation.

    ", + "refs": { + } + }, + "Email": { + "base": null, + "refs": { + "UserInfo$email": "

    The email address associated with the user who made the commit, if any.

    " + } + }, + "EncryptionIntegrityChecksFailedException": { + "base": "

    An encryption integrity check failed.

    ", + "refs": { + } + }, + "EncryptionKeyAccessDeniedException": { + "base": "

    An encryption key could not be accessed.

    ", + "refs": { + } + }, + "EncryptionKeyDisabledException": { + "base": "

    The encryption key is disabled.

    ", + "refs": { + } + }, + "EncryptionKeyNotFoundException": { + "base": "

    No encryption key was found.

    ", + "refs": { + } + }, + "EncryptionKeyUnavailableException": { + "base": "

    The encryption key is not available.

    ", + "refs": { + } + }, + "GetBranchInput": { + "base": "

    Represents the input of a get branch operation.

    ", + "refs": { + } + }, + "GetBranchOutput": { + "base": "

    Represents the output of a get branch operation.

    ", + "refs": { + } + }, + "GetCommitInput": { + "base": "

    Represents the input of a get commit operation.

    ", + "refs": { + } + }, + "GetCommitOutput": { + "base": "

    Represents the output of a get commit operation.

    ", + "refs": { + } + }, + "GetRepositoryInput": { + "base": "

    Represents the input of a get repository operation.

    ", + "refs": { + } + }, + "GetRepositoryOutput": { + "base": "

    Represents the output of a get repository operation.

    ", + "refs": { + } + }, + "GetRepositoryTriggersInput": { + "base": "

    Represents the input of a get repository triggers operation.

    ", + "refs": { + } + }, + "GetRepositoryTriggersOutput": { + "base": "

    Represents the output of a get repository triggers operation.

    ", + "refs": { + } + }, + "InvalidBranchNameException": { + "base": "

    The specified branch name is not valid.

    ", + "refs": { + } + }, + "InvalidCommitIdException": { + "base": "

    The specified commit ID is not valid.

    ", + "refs": { + } + }, + "InvalidContinuationTokenException": { + "base": "

    The specified continuation token is not valid.

    ", + "refs": { + } + }, + "InvalidOrderException": { + "base": "

    The specified sort order is not valid.

    ", + "refs": { + } + }, + "InvalidRepositoryDescriptionException": { + "base": "

    The specified repository description is not valid.

    ", + "refs": { + } + }, + "InvalidRepositoryNameException": { + "base": "

    At least one specified repository name is not valid.

    This exception only occurs when a specified repository name is not valid. Other exceptions occur when a required repository parameter is missing, or when a specified repository does not exist.

    ", + "refs": { + } + }, + "InvalidRepositoryTriggerBranchNameException": { + "base": "

    One or more branch names specified for the trigger is not valid.

    ", + "refs": { + } + }, + "InvalidRepositoryTriggerCustomDataException": { + "base": "

    The custom data provided for the trigger is not valid.

    ", + "refs": { + } + }, + "InvalidRepositoryTriggerDestinationArnException": { + "base": "

    The Amazon Resource Name (ARN) for the trigger is not valid for the specified destination. The most common reason for this error is that the ARN does not meet the requirements for the service type.

    ", + "refs": { + } + }, + "InvalidRepositoryTriggerEventsException": { + "base": "

    One or more events specified for the trigger is not valid. Check to make sure that all events specified match the requirements for allowed events.

    ", + "refs": { + } + }, + "InvalidRepositoryTriggerNameException": { + "base": "

    The name of the trigger is not valid.

    ", + "refs": { + } + }, + "InvalidRepositoryTriggerRegionException": { + "base": "

    The region for the trigger target does not match the region for the repository. Triggers must be created in the same region as the target for the trigger.

    ", + "refs": { + } + }, + "InvalidSortByException": { + "base": "

    The specified sort by value is not valid.

    ", + "refs": { + } + }, + "LastModifiedDate": { + "base": null, + "refs": { + "RepositoryMetadata$lastModifiedDate": "

    The date and time the repository was last modified, in timestamp format.

    " + } + }, + "ListBranchesInput": { + "base": "

    Represents the input of a list branches operation.

    ", + "refs": { + } + }, + "ListBranchesOutput": { + "base": "

    Represents the output of a list branches operation.

    ", + "refs": { + } + }, + "ListRepositoriesInput": { + "base": "

    Represents the input of a list repositories operation.

    ", + "refs": { + } + }, + "ListRepositoriesOutput": { + "base": "

    Represents the output of a list repositories operation.

    ", + "refs": { + } + }, + "MaximumBranchesExceededException": { + "base": "

    The number of branches for the trigger was exceeded.

    ", + "refs": { + } + }, + "MaximumRepositoryNamesExceededException": { + "base": "

    The maximum number of allowed repository names was exceeded. Currently, this number is 25.

    ", + "refs": { + } + }, + "MaximumRepositoryTriggersExceededException": { + "base": "

    The number of triggers allowed for the repository was exceeded.

    ", + "refs": { + } + }, + "Message": { + "base": null, + "refs": { + "Commit$message": "

    The message associated with the specified commit.

    " + } + }, + "Name": { + "base": null, + "refs": { + "UserInfo$name": "

    The name of the user who made the specified commit.

    " + } + }, + "NextToken": { + "base": null, + "refs": { + "ListBranchesInput$nextToken": "

    An enumeration token that allows the operation to batch the results.

    ", + "ListBranchesOutput$nextToken": "

    An enumeration token that returns the batch of the results.

    ", + "ListRepositoriesInput$nextToken": "

    An enumeration token that allows the operation to batch the results of the operation. Batch sizes are 1,000 for list repository operations. When the client sends the token back to AWS CodeCommit, another page of 1,000 records is retrieved.

    ", + "ListRepositoriesOutput$nextToken": "

    An enumeration token that allows the operation to batch the results of the operation. Batch sizes are 1,000 for list repository operations. When the client sends the token back to AWS CodeCommit, another page of 1,000 records is retrieved.

    " + } + }, + "ObjectId": { + "base": null, + "refs": { + "Commit$treeId": "

    Tree information for the specified commit.

    ", + "GetCommitInput$commitId": "

    The commit ID.

    ", + "ParentList$member": null + } + }, + "OrderEnum": { + "base": null, + "refs": { + "ListRepositoriesInput$order": "

    The order in which to sort the results of a list repositories operation.

    " + } + }, + "ParentList": { + "base": null, + "refs": { + "Commit$parents": "

    The parent list for the specified commit.

    " + } + }, + "PutRepositoryTriggersInput": { + "base": "

    Represents the input ofa put repository triggers operation.

    ", + "refs": { + } + }, + "PutRepositoryTriggersOutput": { + "base": "

    Represents the output of a put repository triggers operation.

    ", + "refs": { + } + }, + "RepositoryDescription": { + "base": null, + "refs": { + "CreateRepositoryInput$repositoryDescription": "

    A comment or description about the new repository.

    The description field for a repository accepts all HTML characters and all valid Unicode characters. Applications that do not HTML-encode the description and display it in a web page could expose users to potentially malicious code. Make sure that you HTML-encode the description field in any application that uses this API to display the repository description on a web page.

    ", + "RepositoryMetadata$repositoryDescription": "

    A comment or description about the repository.

    ", + "UpdateRepositoryDescriptionInput$repositoryDescription": "

    The new comment or description for the specified repository. Repository descriptions are limited to 1,000 characters.

    " + } + }, + "RepositoryDoesNotExistException": { + "base": "

    The specified repository does not exist.

    ", + "refs": { + } + }, + "RepositoryId": { + "base": null, + "refs": { + "DeleteRepositoryOutput$repositoryId": "

    The ID of the repository that was deleted.

    ", + "RepositoryMetadata$repositoryId": "

    The ID of the repository.

    ", + "RepositoryNameIdPair$repositoryId": "

    The ID associated with the repository.

    " + } + }, + "RepositoryLimitExceededException": { + "base": "

    A repository resource limit was exceeded.

    ", + "refs": { + } + }, + "RepositoryMetadata": { + "base": "

    Information about a repository.

    ", + "refs": { + "CreateRepositoryOutput$repositoryMetadata": "

    Information about the newly created repository.

    ", + "GetRepositoryOutput$repositoryMetadata": "

    Information about the repository.

    ", + "RepositoryMetadataList$member": null + } + }, + "RepositoryMetadataList": { + "base": null, + "refs": { + "BatchGetRepositoriesOutput$repositories": "

    A list of repositories returned by the batch get repositories operation.

    " + } + }, + "RepositoryName": { + "base": null, + "refs": { + "CreateBranchInput$repositoryName": "

    The name of the repository in which you want to create the new branch.

    ", + "CreateRepositoryInput$repositoryName": "

    The name of the new repository to be created.

    The repository name must be unique across the calling AWS account. In addition, repository names are limited to 100 alphanumeric, dash, and underscore characters, and cannot include certain characters. For a full description of the limits on repository names, see Limits in the AWS CodeCommit User Guide. The suffix \".git\" is prohibited.

    ", + "DeleteRepositoryInput$repositoryName": "

    The name of the repository to delete.

    ", + "GetBranchInput$repositoryName": "

    The name of the repository that contains the branch for which you want to retrieve information.

    ", + "GetCommitInput$repositoryName": "

    The name of the repository to which the commit was made.

    ", + "GetRepositoryInput$repositoryName": "

    The name of the repository to get information about.

    ", + "GetRepositoryTriggersInput$repositoryName": "

    The name of the repository for which the trigger is configured.

    ", + "ListBranchesInput$repositoryName": "

    The name of the repository that contains the branches.

    ", + "PutRepositoryTriggersInput$repositoryName": "

    The name of the repository where you want to create or update the trigger.

    ", + "RepositoryMetadata$repositoryName": "

    The repository's name.

    ", + "RepositoryNameIdPair$repositoryName": "

    The name associated with the repository.

    ", + "RepositoryNameList$member": null, + "RepositoryNotFoundList$member": null, + "TestRepositoryTriggersInput$repositoryName": "

    The name of the repository in which to test the triggers.

    ", + "UpdateDefaultBranchInput$repositoryName": "

    The name of the repository to set or change the default branch for.

    ", + "UpdateRepositoryDescriptionInput$repositoryName": "

    The name of the repository to set or change the comment or description for.

    ", + "UpdateRepositoryNameInput$oldName": "

    The existing name of the repository.

    ", + "UpdateRepositoryNameInput$newName": "

    The new name for the repository.

    " + } + }, + "RepositoryNameExistsException": { + "base": "

    The specified repository name already exists.

    ", + "refs": { + } + }, + "RepositoryNameIdPair": { + "base": "

    Information about a repository name and ID.

    ", + "refs": { + "RepositoryNameIdPairList$member": null + } + }, + "RepositoryNameIdPairList": { + "base": null, + "refs": { + "ListRepositoriesOutput$repositories": "

    Lists the repositories called by the list repositories operation.

    " + } + }, + "RepositoryNameList": { + "base": null, + "refs": { + "BatchGetRepositoriesInput$repositoryNames": "

    The names of the repositories to get information about.

    " + } + }, + "RepositoryNameRequiredException": { + "base": "

    A repository name is required but was not specified.

    ", + "refs": { + } + }, + "RepositoryNamesRequiredException": { + "base": "

    A repository names object is required but was not specified.

    ", + "refs": { + } + }, + "RepositoryNotFoundList": { + "base": null, + "refs": { + "BatchGetRepositoriesOutput$repositoriesNotFound": "

    Returns a list of repository names for which information could not be found.

    " + } + }, + "RepositoryTrigger": { + "base": "

    Information about a trigger for a repository.

    ", + "refs": { + "RepositoryTriggersList$member": null + } + }, + "RepositoryTriggerBranchNameListRequiredException": { + "base": "

    At least one branch name is required but was not specified in the trigger configuration.

    ", + "refs": { + } + }, + "RepositoryTriggerCustomData": { + "base": null, + "refs": { + "RepositoryTrigger$customData": "

    Any custom data associated with the trigger that will be included in the information sent to the target of the trigger.

    " + } + }, + "RepositoryTriggerDestinationArnRequiredException": { + "base": "

    A destination ARN for the target service for the trigger is required but was not specified.

    ", + "refs": { + } + }, + "RepositoryTriggerEventEnum": { + "base": null, + "refs": { + "RepositoryTriggerEventList$member": null + } + }, + "RepositoryTriggerEventList": { + "base": null, + "refs": { + "RepositoryTrigger$events": "

    The repository events that will cause the trigger to run actions in another service, such as sending a notification through Amazon Simple Notification Service (SNS). If no events are specified, the trigger will run for all repository events.

    " + } + }, + "RepositoryTriggerEventsListRequiredException": { + "base": "

    At least one event for the trigger is required but was not specified.

    ", + "refs": { + } + }, + "RepositoryTriggerExecutionFailure": { + "base": "

    A trigger failed to run.

    ", + "refs": { + "RepositoryTriggerExecutionFailureList$member": null + } + }, + "RepositoryTriggerExecutionFailureList": { + "base": null, + "refs": { + "TestRepositoryTriggersOutput$failedExecutions": "

    The list of triggers that were not able to be tested. This list provides the names of the triggers that could not be tested, separated by commas.

    " + } + }, + "RepositoryTriggerExecutionFailureMessage": { + "base": null, + "refs": { + "RepositoryTriggerExecutionFailure$failureMessage": "

    Additional message information about the trigger that did not run.

    " + } + }, + "RepositoryTriggerName": { + "base": null, + "refs": { + "RepositoryTrigger$name": "

    The name of the trigger.

    ", + "RepositoryTriggerExecutionFailure$trigger": "

    The name of the trigger that did not run.

    ", + "RepositoryTriggerNameList$member": null + } + }, + "RepositoryTriggerNameList": { + "base": null, + "refs": { + "TestRepositoryTriggersOutput$successfulExecutions": "

    The list of triggers that were successfully tested. This list provides the names of the triggers that were successfully tested, separated by commas.

    " + } + }, + "RepositoryTriggerNameRequiredException": { + "base": "

    A name for the trigger is required but was not specified.

    ", + "refs": { + } + }, + "RepositoryTriggersConfigurationId": { + "base": null, + "refs": { + "GetRepositoryTriggersOutput$configurationId": "

    The system-generated unique ID for the trigger.

    ", + "PutRepositoryTriggersOutput$configurationId": "

    The system-generated unique ID for the create or update operation.

    " + } + }, + "RepositoryTriggersList": { + "base": null, + "refs": { + "GetRepositoryTriggersOutput$triggers": "

    The JSON block of configuration information for each trigger.

    ", + "PutRepositoryTriggersInput$triggers": "

    The JSON block of configuration information for each trigger.

    ", + "TestRepositoryTriggersInput$triggers": "

    The list of triggers to test.

    " + } + }, + "RepositoryTriggersListRequiredException": { + "base": "

    The list of triggers for the repository is required but was not specified.

    ", + "refs": { + } + }, + "SortByEnum": { + "base": null, + "refs": { + "ListRepositoriesInput$sortBy": "

    The criteria used to sort the results of a list repositories operation.

    " + } + }, + "TestRepositoryTriggersInput": { + "base": "

    Represents the input of a test repository triggers operation.

    ", + "refs": { + } + }, + "TestRepositoryTriggersOutput": { + "base": "

    Represents the output of a test repository triggers operation.

    ", + "refs": { + } + }, + "UpdateDefaultBranchInput": { + "base": "

    Represents the input of an update default branch operation.

    ", + "refs": { + } + }, + "UpdateRepositoryDescriptionInput": { + "base": "

    Represents the input of an update repository description operation.

    ", + "refs": { + } + }, + "UpdateRepositoryNameInput": { + "base": "

    Represents the input of an update repository description operation.

    ", + "refs": { + } + }, + "UserInfo": { + "base": "

    Information about the user who made a specified commit.

    ", + "refs": { + "Commit$author": "

    Information about the author of the specified commit.

    ", + "Commit$committer": "

    Information about the person who committed the specified commit, also known as the committer. For more information about the difference between an author and a committer in Git, see Viewing the Commit History in Pro Git by Scott Chacon and Ben Straub.

    " + } + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/codecommit/2015-04-13/examples-1.json b/vendor/github.com/aws/aws-sdk-go/models/apis/codecommit/2015-04-13/examples-1.json new file mode 100644 index 000000000..0ea7e3b0b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/codecommit/2015-04-13/examples-1.json @@ -0,0 +1,5 @@ +{ + "version": "1.0", + "examples": { + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/codecommit/2015-04-13/paginators-1.json b/vendor/github.com/aws/aws-sdk-go/models/apis/codecommit/2015-04-13/paginators-1.json new file mode 100644 index 000000000..e4c9a9038 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/codecommit/2015-04-13/paginators-1.json @@ -0,0 +1,14 @@ +{ + "pagination": { + "ListBranches": { + "input_token": "nextToken", + "output_token": "nextToken", + "result_key": "branches" + }, + "ListRepositories": { + "input_token": "nextToken", + "output_token": "nextToken", + "result_key": "repositories" + } + } +} \ No newline at end of file diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/codedeploy/2014-10-06/api-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/codedeploy/2014-10-06/api-2.json new file mode 100644 index 000000000..fa8e09d0a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/codedeploy/2014-10-06/api-2.json @@ -0,0 +1,1954 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2014-10-06", + "endpointPrefix":"codedeploy", + "jsonVersion":"1.1", + "protocol":"json", + "serviceAbbreviation":"CodeDeploy", + "serviceFullName":"AWS CodeDeploy", + "signatureVersion":"v4", + "targetPrefix":"CodeDeploy_20141006", + "timestampFormat":"unixTimestamp" + }, + "operations":{ + "AddTagsToOnPremisesInstances":{ + "name":"AddTagsToOnPremisesInstances", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AddTagsToOnPremisesInstancesInput"}, + "errors":[ + {"shape":"InstanceNameRequiredException"}, + {"shape":"TagRequiredException"}, + {"shape":"InvalidTagException"}, + {"shape":"TagLimitExceededException"}, + {"shape":"InstanceLimitExceededException"}, + {"shape":"InstanceNotRegisteredException"} + ] + }, + "BatchGetApplicationRevisions":{ + "name":"BatchGetApplicationRevisions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"BatchGetApplicationRevisionsInput"}, + "output":{"shape":"BatchGetApplicationRevisionsOutput"}, + "errors":[ + {"shape":"ApplicationDoesNotExistException"}, + {"shape":"ApplicationNameRequiredException"}, + {"shape":"InvalidApplicationNameException"}, + {"shape":"RevisionRequiredException"}, + {"shape":"InvalidRevisionException"}, + {"shape":"BatchLimitExceededException"} + ] + }, + "BatchGetApplications":{ + "name":"BatchGetApplications", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"BatchGetApplicationsInput"}, + "output":{"shape":"BatchGetApplicationsOutput"}, + "errors":[ + {"shape":"ApplicationNameRequiredException"}, + {"shape":"InvalidApplicationNameException"}, + {"shape":"ApplicationDoesNotExistException"}, + {"shape":"BatchLimitExceededException"} + ] + }, + "BatchGetDeploymentGroups":{ + "name":"BatchGetDeploymentGroups", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"BatchGetDeploymentGroupsInput"}, + "output":{"shape":"BatchGetDeploymentGroupsOutput"}, + "errors":[ + {"shape":"ApplicationNameRequiredException"}, + {"shape":"InvalidApplicationNameException"}, + {"shape":"ApplicationDoesNotExistException"}, + {"shape":"DeploymentGroupNameRequiredException"}, + {"shape":"InvalidDeploymentGroupNameException"}, + {"shape":"BatchLimitExceededException"} + ] + }, + "BatchGetDeploymentInstances":{ + "name":"BatchGetDeploymentInstances", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"BatchGetDeploymentInstancesInput"}, + "output":{"shape":"BatchGetDeploymentInstancesOutput"}, + "errors":[ + {"shape":"DeploymentIdRequiredException"}, + {"shape":"DeploymentDoesNotExistException"}, + {"shape":"InstanceIdRequiredException"}, + {"shape":"InvalidDeploymentIdException"}, + {"shape":"InvalidInstanceNameException"}, + {"shape":"BatchLimitExceededException"} + ] + }, + "BatchGetDeployments":{ + "name":"BatchGetDeployments", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"BatchGetDeploymentsInput"}, + "output":{"shape":"BatchGetDeploymentsOutput"}, + "errors":[ + {"shape":"DeploymentIdRequiredException"}, + {"shape":"InvalidDeploymentIdException"}, + {"shape":"BatchLimitExceededException"} + ] + }, + "BatchGetOnPremisesInstances":{ + "name":"BatchGetOnPremisesInstances", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"BatchGetOnPremisesInstancesInput"}, + "output":{"shape":"BatchGetOnPremisesInstancesOutput"}, + "errors":[ + {"shape":"InstanceNameRequiredException"}, + {"shape":"InvalidInstanceNameException"}, + {"shape":"BatchLimitExceededException"} + ] + }, + "CreateApplication":{ + "name":"CreateApplication", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateApplicationInput"}, + "output":{"shape":"CreateApplicationOutput"}, + "errors":[ + {"shape":"ApplicationNameRequiredException"}, + {"shape":"InvalidApplicationNameException"}, + {"shape":"ApplicationAlreadyExistsException"}, + {"shape":"ApplicationLimitExceededException"} + ] + }, + "CreateDeployment":{ + "name":"CreateDeployment", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateDeploymentInput"}, + "output":{"shape":"CreateDeploymentOutput"}, + "errors":[ + {"shape":"ApplicationNameRequiredException"}, + {"shape":"InvalidApplicationNameException"}, + {"shape":"ApplicationDoesNotExistException"}, + {"shape":"DeploymentGroupNameRequiredException"}, + {"shape":"InvalidDeploymentGroupNameException"}, + {"shape":"DeploymentGroupDoesNotExistException"}, + {"shape":"RevisionRequiredException"}, + {"shape":"InvalidRevisionException"}, + {"shape":"InvalidDeploymentConfigNameException"}, + {"shape":"DeploymentConfigDoesNotExistException"}, + {"shape":"DescriptionTooLongException"}, + {"shape":"DeploymentLimitExceededException"} + ] + }, + "CreateDeploymentConfig":{ + "name":"CreateDeploymentConfig", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateDeploymentConfigInput"}, + "output":{"shape":"CreateDeploymentConfigOutput"}, + "errors":[ + {"shape":"InvalidDeploymentConfigNameException"}, + {"shape":"DeploymentConfigNameRequiredException"}, + {"shape":"DeploymentConfigAlreadyExistsException"}, + {"shape":"InvalidMinimumHealthyHostValueException"}, + {"shape":"DeploymentConfigLimitExceededException"} + ] + }, + "CreateDeploymentGroup":{ + "name":"CreateDeploymentGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateDeploymentGroupInput"}, + "output":{"shape":"CreateDeploymentGroupOutput"}, + "errors":[ + {"shape":"ApplicationNameRequiredException"}, + {"shape":"InvalidApplicationNameException"}, + {"shape":"ApplicationDoesNotExistException"}, + {"shape":"DeploymentGroupNameRequiredException"}, + {"shape":"InvalidDeploymentGroupNameException"}, + {"shape":"DeploymentGroupAlreadyExistsException"}, + {"shape":"InvalidEC2TagException"}, + {"shape":"InvalidTagException"}, + {"shape":"InvalidAutoScalingGroupException"}, + {"shape":"InvalidDeploymentConfigNameException"}, + {"shape":"DeploymentConfigDoesNotExistException"}, + {"shape":"RoleRequiredException"}, + {"shape":"InvalidRoleException"}, + {"shape":"DeploymentGroupLimitExceededException"}, + {"shape":"LifecycleHookLimitExceededException"}, + {"shape":"InvalidTriggerConfigException"}, + {"shape":"TriggerTargetsLimitExceededException"} + ] + }, + "DeleteApplication":{ + "name":"DeleteApplication", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteApplicationInput"}, + "errors":[ + {"shape":"ApplicationNameRequiredException"}, + {"shape":"InvalidApplicationNameException"} + ] + }, + "DeleteDeploymentConfig":{ + "name":"DeleteDeploymentConfig", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteDeploymentConfigInput"}, + "errors":[ + {"shape":"InvalidDeploymentConfigNameException"}, + {"shape":"DeploymentConfigNameRequiredException"}, + {"shape":"DeploymentConfigInUseException"}, + {"shape":"InvalidOperationException"} + ] + }, + "DeleteDeploymentGroup":{ + "name":"DeleteDeploymentGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteDeploymentGroupInput"}, + "output":{"shape":"DeleteDeploymentGroupOutput"}, + "errors":[ + {"shape":"ApplicationNameRequiredException"}, + {"shape":"InvalidApplicationNameException"}, + {"shape":"DeploymentGroupNameRequiredException"}, + {"shape":"InvalidDeploymentGroupNameException"}, + {"shape":"InvalidRoleException"} + ] + }, + "DeregisterOnPremisesInstance":{ + "name":"DeregisterOnPremisesInstance", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeregisterOnPremisesInstanceInput"}, + "errors":[ + {"shape":"InstanceNameRequiredException"}, + {"shape":"InvalidInstanceNameException"} + ] + }, + "GetApplication":{ + "name":"GetApplication", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetApplicationInput"}, + "output":{"shape":"GetApplicationOutput"}, + "errors":[ + {"shape":"ApplicationNameRequiredException"}, + {"shape":"InvalidApplicationNameException"}, + {"shape":"ApplicationDoesNotExistException"} + ] + }, + "GetApplicationRevision":{ + "name":"GetApplicationRevision", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetApplicationRevisionInput"}, + "output":{"shape":"GetApplicationRevisionOutput"}, + "errors":[ + {"shape":"ApplicationDoesNotExistException"}, + {"shape":"ApplicationNameRequiredException"}, + {"shape":"InvalidApplicationNameException"}, + {"shape":"RevisionDoesNotExistException"}, + {"shape":"RevisionRequiredException"}, + {"shape":"InvalidRevisionException"} + ] + }, + "GetDeployment":{ + "name":"GetDeployment", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetDeploymentInput"}, + "output":{"shape":"GetDeploymentOutput"}, + "errors":[ + {"shape":"DeploymentIdRequiredException"}, + {"shape":"InvalidDeploymentIdException"}, + {"shape":"DeploymentDoesNotExistException"} + ] + }, + "GetDeploymentConfig":{ + "name":"GetDeploymentConfig", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetDeploymentConfigInput"}, + "output":{"shape":"GetDeploymentConfigOutput"}, + "errors":[ + {"shape":"InvalidDeploymentConfigNameException"}, + {"shape":"DeploymentConfigNameRequiredException"}, + {"shape":"DeploymentConfigDoesNotExistException"} + ] + }, + "GetDeploymentGroup":{ + "name":"GetDeploymentGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetDeploymentGroupInput"}, + "output":{"shape":"GetDeploymentGroupOutput"}, + "errors":[ + {"shape":"ApplicationNameRequiredException"}, + {"shape":"InvalidApplicationNameException"}, + {"shape":"ApplicationDoesNotExistException"}, + {"shape":"DeploymentGroupNameRequiredException"}, + {"shape":"InvalidDeploymentGroupNameException"}, + {"shape":"DeploymentGroupDoesNotExistException"} + ] + }, + "GetDeploymentInstance":{ + "name":"GetDeploymentInstance", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetDeploymentInstanceInput"}, + "output":{"shape":"GetDeploymentInstanceOutput"}, + "errors":[ + {"shape":"DeploymentIdRequiredException"}, + {"shape":"DeploymentDoesNotExistException"}, + {"shape":"InstanceIdRequiredException"}, + {"shape":"InvalidDeploymentIdException"}, + {"shape":"InstanceDoesNotExistException"}, + {"shape":"InvalidInstanceNameException"} + ] + }, + "GetOnPremisesInstance":{ + "name":"GetOnPremisesInstance", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetOnPremisesInstanceInput"}, + "output":{"shape":"GetOnPremisesInstanceOutput"}, + "errors":[ + {"shape":"InstanceNameRequiredException"}, + {"shape":"InstanceNotRegisteredException"}, + {"shape":"InvalidInstanceNameException"} + ] + }, + "ListApplicationRevisions":{ + "name":"ListApplicationRevisions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListApplicationRevisionsInput"}, + "output":{"shape":"ListApplicationRevisionsOutput"}, + "errors":[ + {"shape":"ApplicationDoesNotExistException"}, + {"shape":"ApplicationNameRequiredException"}, + {"shape":"InvalidApplicationNameException"}, + {"shape":"InvalidSortByException"}, + {"shape":"InvalidSortOrderException"}, + {"shape":"InvalidBucketNameFilterException"}, + {"shape":"InvalidKeyPrefixFilterException"}, + {"shape":"BucketNameFilterRequiredException"}, + {"shape":"InvalidDeployedStateFilterException"}, + {"shape":"InvalidNextTokenException"} + ] + }, + "ListApplications":{ + "name":"ListApplications", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListApplicationsInput"}, + "output":{"shape":"ListApplicationsOutput"}, + "errors":[ + {"shape":"InvalidNextTokenException"} + ] + }, + "ListDeploymentConfigs":{ + "name":"ListDeploymentConfigs", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListDeploymentConfigsInput"}, + "output":{"shape":"ListDeploymentConfigsOutput"}, + "errors":[ + {"shape":"InvalidNextTokenException"} + ] + }, + "ListDeploymentGroups":{ + "name":"ListDeploymentGroups", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListDeploymentGroupsInput"}, + "output":{"shape":"ListDeploymentGroupsOutput"}, + "errors":[ + {"shape":"ApplicationNameRequiredException"}, + {"shape":"InvalidApplicationNameException"}, + {"shape":"ApplicationDoesNotExistException"}, + {"shape":"InvalidNextTokenException"} + ] + }, + "ListDeploymentInstances":{ + "name":"ListDeploymentInstances", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListDeploymentInstancesInput"}, + "output":{"shape":"ListDeploymentInstancesOutput"}, + "errors":[ + {"shape":"DeploymentIdRequiredException"}, + {"shape":"DeploymentDoesNotExistException"}, + {"shape":"DeploymentNotStartedException"}, + {"shape":"InvalidNextTokenException"}, + {"shape":"InvalidDeploymentIdException"}, + {"shape":"InvalidInstanceStatusException"} + ] + }, + "ListDeployments":{ + "name":"ListDeployments", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListDeploymentsInput"}, + "output":{"shape":"ListDeploymentsOutput"}, + "errors":[ + {"shape":"ApplicationNameRequiredException"}, + {"shape":"InvalidApplicationNameException"}, + {"shape":"ApplicationDoesNotExistException"}, + {"shape":"InvalidDeploymentGroupNameException"}, + {"shape":"DeploymentGroupDoesNotExistException"}, + {"shape":"DeploymentGroupNameRequiredException"}, + {"shape":"InvalidTimeRangeException"}, + {"shape":"InvalidDeploymentStatusException"}, + {"shape":"InvalidNextTokenException"} + ] + }, + "ListOnPremisesInstances":{ + "name":"ListOnPremisesInstances", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListOnPremisesInstancesInput"}, + "output":{"shape":"ListOnPremisesInstancesOutput"}, + "errors":[ + {"shape":"InvalidRegistrationStatusException"}, + {"shape":"InvalidTagFilterException"}, + {"shape":"InvalidNextTokenException"} + ] + }, + "RegisterApplicationRevision":{ + "name":"RegisterApplicationRevision", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RegisterApplicationRevisionInput"}, + "errors":[ + {"shape":"ApplicationDoesNotExistException"}, + {"shape":"ApplicationNameRequiredException"}, + {"shape":"InvalidApplicationNameException"}, + {"shape":"DescriptionTooLongException"}, + {"shape":"RevisionRequiredException"}, + {"shape":"InvalidRevisionException"} + ] + }, + "RegisterOnPremisesInstance":{ + "name":"RegisterOnPremisesInstance", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RegisterOnPremisesInstanceInput"}, + "errors":[ + {"shape":"InstanceNameAlreadyRegisteredException"}, + {"shape":"IamUserArnAlreadyRegisteredException"}, + {"shape":"InstanceNameRequiredException"}, + {"shape":"IamUserArnRequiredException"}, + {"shape":"InvalidInstanceNameException"}, + {"shape":"InvalidIamUserArnException"} + ] + }, + "RemoveTagsFromOnPremisesInstances":{ + "name":"RemoveTagsFromOnPremisesInstances", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RemoveTagsFromOnPremisesInstancesInput"}, + "errors":[ + {"shape":"InstanceNameRequiredException"}, + {"shape":"TagRequiredException"}, + {"shape":"InvalidTagException"}, + {"shape":"TagLimitExceededException"}, + {"shape":"InstanceLimitExceededException"}, + {"shape":"InstanceNotRegisteredException"} + ] + }, + "StopDeployment":{ + "name":"StopDeployment", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StopDeploymentInput"}, + "output":{"shape":"StopDeploymentOutput"}, + "errors":[ + {"shape":"DeploymentIdRequiredException"}, + {"shape":"DeploymentDoesNotExistException"}, + {"shape":"DeploymentAlreadyCompletedException"}, + {"shape":"InvalidDeploymentIdException"} + ] + }, + "UpdateApplication":{ + "name":"UpdateApplication", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateApplicationInput"}, + "errors":[ + {"shape":"ApplicationNameRequiredException"}, + {"shape":"InvalidApplicationNameException"}, + {"shape":"ApplicationAlreadyExistsException"}, + {"shape":"ApplicationDoesNotExistException"} + ] + }, + "UpdateDeploymentGroup":{ + "name":"UpdateDeploymentGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateDeploymentGroupInput"}, + "output":{"shape":"UpdateDeploymentGroupOutput"}, + "errors":[ + {"shape":"ApplicationNameRequiredException"}, + {"shape":"InvalidApplicationNameException"}, + {"shape":"ApplicationDoesNotExistException"}, + {"shape":"InvalidDeploymentGroupNameException"}, + {"shape":"DeploymentGroupAlreadyExistsException"}, + {"shape":"DeploymentGroupNameRequiredException"}, + {"shape":"DeploymentGroupDoesNotExistException"}, + {"shape":"InvalidEC2TagException"}, + {"shape":"InvalidTagException"}, + {"shape":"InvalidAutoScalingGroupException"}, + {"shape":"InvalidDeploymentConfigNameException"}, + {"shape":"DeploymentConfigDoesNotExistException"}, + {"shape":"InvalidRoleException"}, + {"shape":"LifecycleHookLimitExceededException"}, + {"shape":"InvalidTriggerConfigException"}, + {"shape":"TriggerTargetsLimitExceededException"} + ] + } + }, + "shapes":{ + "AddTagsToOnPremisesInstancesInput":{ + "type":"structure", + "required":[ + "tags", + "instanceNames" + ], + "members":{ + "tags":{"shape":"TagList"}, + "instanceNames":{"shape":"InstanceNameList"} + } + }, + "ApplicationAlreadyExistsException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "ApplicationDoesNotExistException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "ApplicationId":{"type":"string"}, + "ApplicationInfo":{ + "type":"structure", + "members":{ + "applicationId":{"shape":"ApplicationId"}, + "applicationName":{"shape":"ApplicationName"}, + "createTime":{"shape":"Timestamp"}, + "linkedToGitHub":{"shape":"Boolean"} + } + }, + "ApplicationLimitExceededException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "ApplicationName":{ + "type":"string", + "max":100, + "min":1 + }, + "ApplicationNameRequiredException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "ApplicationRevisionSortBy":{ + "type":"string", + "enum":[ + "registerTime", + "firstUsedTime", + "lastUsedTime" + ] + }, + "ApplicationsInfoList":{ + "type":"list", + "member":{"shape":"ApplicationInfo"} + }, + "ApplicationsList":{ + "type":"list", + "member":{"shape":"ApplicationName"} + }, + "AutoScalingGroup":{ + "type":"structure", + "members":{ + "name":{"shape":"AutoScalingGroupName"}, + "hook":{"shape":"AutoScalingGroupHook"} + } + }, + "AutoScalingGroupHook":{"type":"string"}, + "AutoScalingGroupList":{ + "type":"list", + "member":{"shape":"AutoScalingGroup"} + }, + "AutoScalingGroupName":{"type":"string"}, + "AutoScalingGroupNameList":{ + "type":"list", + "member":{"shape":"AutoScalingGroupName"} + }, + "BatchGetApplicationRevisionsInput":{ + "type":"structure", + "required":[ + "applicationName", + "revisions" + ], + "members":{ + "applicationName":{"shape":"ApplicationName"}, + "revisions":{"shape":"RevisionLocationList"} + } + }, + "BatchGetApplicationRevisionsOutput":{ + "type":"structure", + "members":{ + "applicationName":{"shape":"ApplicationName"}, + "errorMessage":{"shape":"ErrorMessage"}, + "revisions":{"shape":"RevisionInfoList"} + } + }, + "BatchGetApplicationsInput":{ + "type":"structure", + "members":{ + "applicationNames":{"shape":"ApplicationsList"} + } + }, + "BatchGetApplicationsOutput":{ + "type":"structure", + "members":{ + "applicationsInfo":{"shape":"ApplicationsInfoList"} + } + }, + "BatchGetDeploymentGroupsInput":{ + "type":"structure", + "required":[ + "applicationName", + "deploymentGroupNames" + ], + "members":{ + "applicationName":{"shape":"ApplicationName"}, + "deploymentGroupNames":{"shape":"DeploymentGroupsList"} + } + }, + "BatchGetDeploymentGroupsOutput":{ + "type":"structure", + "members":{ + "deploymentGroupsInfo":{"shape":"DeploymentGroupInfoList"}, + "errorMessage":{"shape":"ErrorMessage"} + } + }, + "BatchGetDeploymentInstancesInput":{ + "type":"structure", + "required":[ + "deploymentId", + "instanceIds" + ], + "members":{ + "deploymentId":{"shape":"DeploymentId"}, + "instanceIds":{"shape":"InstancesList"} + } + }, + "BatchGetDeploymentInstancesOutput":{ + "type":"structure", + "members":{ + "instancesSummary":{"shape":"InstanceSummaryList"}, + "errorMessage":{"shape":"ErrorMessage"} + } + }, + "BatchGetDeploymentsInput":{ + "type":"structure", + "members":{ + "deploymentIds":{"shape":"DeploymentsList"} + } + }, + "BatchGetDeploymentsOutput":{ + "type":"structure", + "members":{ + "deploymentsInfo":{"shape":"DeploymentsInfoList"} + } + }, + "BatchGetOnPremisesInstancesInput":{ + "type":"structure", + "members":{ + "instanceNames":{"shape":"InstanceNameList"} + } + }, + "BatchGetOnPremisesInstancesOutput":{ + "type":"structure", + "members":{ + "instanceInfos":{"shape":"InstanceInfoList"} + } + }, + "BatchLimitExceededException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "Boolean":{"type":"boolean"}, + "BucketNameFilterRequiredException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "BundleType":{ + "type":"string", + "enum":[ + "tar", + "tgz", + "zip" + ] + }, + "CommitId":{"type":"string"}, + "CreateApplicationInput":{ + "type":"structure", + "required":["applicationName"], + "members":{ + "applicationName":{"shape":"ApplicationName"} + } + }, + "CreateApplicationOutput":{ + "type":"structure", + "members":{ + "applicationId":{"shape":"ApplicationId"} + } + }, + "CreateDeploymentConfigInput":{ + "type":"structure", + "required":["deploymentConfigName"], + "members":{ + "deploymentConfigName":{"shape":"DeploymentConfigName"}, + "minimumHealthyHosts":{"shape":"MinimumHealthyHosts"} + } + }, + "CreateDeploymentConfigOutput":{ + "type":"structure", + "members":{ + "deploymentConfigId":{"shape":"DeploymentConfigId"} + } + }, + "CreateDeploymentGroupInput":{ + "type":"structure", + "required":[ + "applicationName", + "deploymentGroupName", + "serviceRoleArn" + ], + "members":{ + "applicationName":{"shape":"ApplicationName"}, + "deploymentGroupName":{"shape":"DeploymentGroupName"}, + "deploymentConfigName":{"shape":"DeploymentConfigName"}, + "ec2TagFilters":{"shape":"EC2TagFilterList"}, + "onPremisesInstanceTagFilters":{"shape":"TagFilterList"}, + "autoScalingGroups":{"shape":"AutoScalingGroupNameList"}, + "serviceRoleArn":{"shape":"Role"}, + "triggerConfigurations":{"shape":"TriggerConfigList"} + } + }, + "CreateDeploymentGroupOutput":{ + "type":"structure", + "members":{ + "deploymentGroupId":{"shape":"DeploymentGroupId"} + } + }, + "CreateDeploymentInput":{ + "type":"structure", + "required":["applicationName"], + "members":{ + "applicationName":{"shape":"ApplicationName"}, + "deploymentGroupName":{"shape":"DeploymentGroupName"}, + "revision":{"shape":"RevisionLocation"}, + "deploymentConfigName":{"shape":"DeploymentConfigName"}, + "description":{"shape":"Description"}, + "ignoreApplicationStopFailures":{"shape":"Boolean"} + } + }, + "CreateDeploymentOutput":{ + "type":"structure", + "members":{ + "deploymentId":{"shape":"DeploymentId"} + } + }, + "DeleteApplicationInput":{ + "type":"structure", + "required":["applicationName"], + "members":{ + "applicationName":{"shape":"ApplicationName"} + } + }, + "DeleteDeploymentConfigInput":{ + "type":"structure", + "required":["deploymentConfigName"], + "members":{ + "deploymentConfigName":{"shape":"DeploymentConfigName"} + } + }, + "DeleteDeploymentGroupInput":{ + "type":"structure", + "required":[ + "applicationName", + "deploymentGroupName" + ], + "members":{ + "applicationName":{"shape":"ApplicationName"}, + "deploymentGroupName":{"shape":"DeploymentGroupName"} + } + }, + "DeleteDeploymentGroupOutput":{ + "type":"structure", + "members":{ + "hooksNotCleanedUp":{"shape":"AutoScalingGroupList"} + } + }, + "DeploymentAlreadyCompletedException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "DeploymentConfigAlreadyExistsException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "DeploymentConfigDoesNotExistException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "DeploymentConfigId":{"type":"string"}, + "DeploymentConfigInUseException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "DeploymentConfigInfo":{ + "type":"structure", + "members":{ + "deploymentConfigId":{"shape":"DeploymentConfigId"}, + "deploymentConfigName":{"shape":"DeploymentConfigName"}, + "minimumHealthyHosts":{"shape":"MinimumHealthyHosts"}, + "createTime":{"shape":"Timestamp"} + } + }, + "DeploymentConfigLimitExceededException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "DeploymentConfigName":{ + "type":"string", + "max":100, + "min":1 + }, + "DeploymentConfigNameRequiredException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "DeploymentConfigsList":{ + "type":"list", + "member":{"shape":"DeploymentConfigName"} + }, + "DeploymentCreator":{ + "type":"string", + "enum":[ + "user", + "autoscaling" + ] + }, + "DeploymentDoesNotExistException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "DeploymentGroupAlreadyExistsException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "DeploymentGroupDoesNotExistException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "DeploymentGroupId":{"type":"string"}, + "DeploymentGroupInfo":{ + "type":"structure", + "members":{ + "applicationName":{"shape":"ApplicationName"}, + "deploymentGroupId":{"shape":"DeploymentGroupId"}, + "deploymentGroupName":{"shape":"DeploymentGroupName"}, + "deploymentConfigName":{"shape":"DeploymentConfigName"}, + "ec2TagFilters":{"shape":"EC2TagFilterList"}, + "onPremisesInstanceTagFilters":{"shape":"TagFilterList"}, + "autoScalingGroups":{"shape":"AutoScalingGroupList"}, + "serviceRoleArn":{"shape":"Role"}, + "targetRevision":{"shape":"RevisionLocation"}, + "triggerConfigurations":{"shape":"TriggerConfigList"} + } + }, + "DeploymentGroupInfoList":{ + "type":"list", + "member":{"shape":"DeploymentGroupInfo"} + }, + "DeploymentGroupLimitExceededException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "DeploymentGroupName":{ + "type":"string", + "max":100, + "min":1 + }, + "DeploymentGroupNameRequiredException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "DeploymentGroupsList":{ + "type":"list", + "member":{"shape":"DeploymentGroupName"} + }, + "DeploymentId":{"type":"string"}, + "DeploymentIdRequiredException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "DeploymentInfo":{ + "type":"structure", + "members":{ + "applicationName":{"shape":"ApplicationName"}, + "deploymentGroupName":{"shape":"DeploymentGroupName"}, + "deploymentConfigName":{"shape":"DeploymentConfigName"}, + "deploymentId":{"shape":"DeploymentId"}, + "revision":{"shape":"RevisionLocation"}, + "status":{"shape":"DeploymentStatus"}, + "errorInformation":{"shape":"ErrorInformation"}, + "createTime":{"shape":"Timestamp"}, + "startTime":{"shape":"Timestamp"}, + "completeTime":{"shape":"Timestamp"}, + "deploymentOverview":{"shape":"DeploymentOverview"}, + "description":{"shape":"Description"}, + "creator":{"shape":"DeploymentCreator"}, + "ignoreApplicationStopFailures":{"shape":"Boolean"} + } + }, + "DeploymentLimitExceededException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "DeploymentNotStartedException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "DeploymentOverview":{ + "type":"structure", + "members":{ + "Pending":{"shape":"InstanceCount"}, + "InProgress":{"shape":"InstanceCount"}, + "Succeeded":{"shape":"InstanceCount"}, + "Failed":{"shape":"InstanceCount"}, + "Skipped":{"shape":"InstanceCount"} + } + }, + "DeploymentStatus":{ + "type":"string", + "enum":[ + "Created", + "Queued", + "InProgress", + "Succeeded", + "Failed", + "Stopped" + ] + }, + "DeploymentStatusList":{ + "type":"list", + "member":{"shape":"DeploymentStatus"} + }, + "DeploymentsInfoList":{ + "type":"list", + "member":{"shape":"DeploymentInfo"} + }, + "DeploymentsList":{ + "type":"list", + "member":{"shape":"DeploymentId"} + }, + "DeregisterOnPremisesInstanceInput":{ + "type":"structure", + "required":["instanceName"], + "members":{ + "instanceName":{"shape":"InstanceName"} + } + }, + "Description":{"type":"string"}, + "DescriptionTooLongException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "Diagnostics":{ + "type":"structure", + "members":{ + "errorCode":{"shape":"LifecycleErrorCode"}, + "scriptName":{"shape":"ScriptName"}, + "message":{"shape":"LifecycleMessage"}, + "logTail":{"shape":"LogTail"} + } + }, + "EC2TagFilter":{ + "type":"structure", + "members":{ + "Key":{"shape":"Key"}, + "Value":{"shape":"Value"}, + "Type":{"shape":"EC2TagFilterType"} + } + }, + "EC2TagFilterList":{ + "type":"list", + "member":{"shape":"EC2TagFilter"} + }, + "EC2TagFilterType":{ + "type":"string", + "enum":[ + "KEY_ONLY", + "VALUE_ONLY", + "KEY_AND_VALUE" + ] + }, + "ETag":{"type":"string"}, + "ErrorCode":{ + "type":"string", + "enum":[ + "DEPLOYMENT_GROUP_MISSING", + "APPLICATION_MISSING", + "REVISION_MISSING", + "IAM_ROLE_MISSING", + "IAM_ROLE_PERMISSIONS", + "NO_EC2_SUBSCRIPTION", + "OVER_MAX_INSTANCES", + "NO_INSTANCES", + "TIMEOUT", + "HEALTH_CONSTRAINTS_INVALID", + "HEALTH_CONSTRAINTS", + "INTERNAL_ERROR", + "THROTTLED" + ] + }, + "ErrorInformation":{ + "type":"structure", + "members":{ + "code":{"shape":"ErrorCode"}, + "message":{"shape":"ErrorMessage"} + } + }, + "ErrorMessage":{"type":"string"}, + "GenericRevisionInfo":{ + "type":"structure", + "members":{ + "description":{"shape":"Description"}, + "deploymentGroups":{"shape":"DeploymentGroupsList"}, + "firstUsedTime":{"shape":"Timestamp"}, + "lastUsedTime":{"shape":"Timestamp"}, + "registerTime":{"shape":"Timestamp"} + } + }, + "GetApplicationInput":{ + "type":"structure", + "required":["applicationName"], + "members":{ + "applicationName":{"shape":"ApplicationName"} + } + }, + "GetApplicationOutput":{ + "type":"structure", + "members":{ + "application":{"shape":"ApplicationInfo"} + } + }, + "GetApplicationRevisionInput":{ + "type":"structure", + "required":[ + "applicationName", + "revision" + ], + "members":{ + "applicationName":{"shape":"ApplicationName"}, + "revision":{"shape":"RevisionLocation"} + } + }, + "GetApplicationRevisionOutput":{ + "type":"structure", + "members":{ + "applicationName":{"shape":"ApplicationName"}, + "revision":{"shape":"RevisionLocation"}, + "revisionInfo":{"shape":"GenericRevisionInfo"} + } + }, + "GetDeploymentConfigInput":{ + "type":"structure", + "required":["deploymentConfigName"], + "members":{ + "deploymentConfigName":{"shape":"DeploymentConfigName"} + } + }, + "GetDeploymentConfigOutput":{ + "type":"structure", + "members":{ + "deploymentConfigInfo":{"shape":"DeploymentConfigInfo"} + } + }, + "GetDeploymentGroupInput":{ + "type":"structure", + "required":[ + "applicationName", + "deploymentGroupName" + ], + "members":{ + "applicationName":{"shape":"ApplicationName"}, + "deploymentGroupName":{"shape":"DeploymentGroupName"} + } + }, + "GetDeploymentGroupOutput":{ + "type":"structure", + "members":{ + "deploymentGroupInfo":{"shape":"DeploymentGroupInfo"} + } + }, + "GetDeploymentInput":{ + "type":"structure", + "required":["deploymentId"], + "members":{ + "deploymentId":{"shape":"DeploymentId"} + } + }, + "GetDeploymentInstanceInput":{ + "type":"structure", + "required":[ + "deploymentId", + "instanceId" + ], + "members":{ + "deploymentId":{"shape":"DeploymentId"}, + "instanceId":{"shape":"InstanceId"} + } + }, + "GetDeploymentInstanceOutput":{ + "type":"structure", + "members":{ + "instanceSummary":{"shape":"InstanceSummary"} + } + }, + "GetDeploymentOutput":{ + "type":"structure", + "members":{ + "deploymentInfo":{"shape":"DeploymentInfo"} + } + }, + "GetOnPremisesInstanceInput":{ + "type":"structure", + "required":["instanceName"], + "members":{ + "instanceName":{"shape":"InstanceName"} + } + }, + "GetOnPremisesInstanceOutput":{ + "type":"structure", + "members":{ + "instanceInfo":{"shape":"InstanceInfo"} + } + }, + "GitHubLocation":{ + "type":"structure", + "members":{ + "repository":{"shape":"Repository"}, + "commitId":{"shape":"CommitId"} + } + }, + "IamUserArn":{"type":"string"}, + "IamUserArnAlreadyRegisteredException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "IamUserArnRequiredException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InstanceArn":{"type":"string"}, + "InstanceCount":{"type":"long"}, + "InstanceDoesNotExistException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InstanceId":{"type":"string"}, + "InstanceIdRequiredException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InstanceInfo":{ + "type":"structure", + "members":{ + "instanceName":{"shape":"InstanceName"}, + "iamUserArn":{"shape":"IamUserArn"}, + "instanceArn":{"shape":"InstanceArn"}, + "registerTime":{"shape":"Timestamp"}, + "deregisterTime":{"shape":"Timestamp"}, + "tags":{"shape":"TagList"} + } + }, + "InstanceInfoList":{ + "type":"list", + "member":{"shape":"InstanceInfo"} + }, + "InstanceLimitExceededException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InstanceName":{"type":"string"}, + "InstanceNameAlreadyRegisteredException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InstanceNameList":{ + "type":"list", + "member":{"shape":"InstanceName"} + }, + "InstanceNameRequiredException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InstanceNotRegisteredException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InstanceStatus":{ + "type":"string", + "enum":[ + "Pending", + "InProgress", + "Succeeded", + "Failed", + "Skipped", + "Unknown" + ] + }, + "InstanceStatusList":{ + "type":"list", + "member":{"shape":"InstanceStatus"} + }, + "InstanceSummary":{ + "type":"structure", + "members":{ + "deploymentId":{"shape":"DeploymentId"}, + "instanceId":{"shape":"InstanceId"}, + "status":{"shape":"InstanceStatus"}, + "lastUpdatedAt":{"shape":"Timestamp"}, + "lifecycleEvents":{"shape":"LifecycleEventList"} + } + }, + "InstanceSummaryList":{ + "type":"list", + "member":{"shape":"InstanceSummary"} + }, + "InstancesList":{ + "type":"list", + "member":{"shape":"InstanceId"} + }, + "InvalidApplicationNameException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InvalidAutoScalingGroupException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InvalidBucketNameFilterException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InvalidDeployedStateFilterException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InvalidDeploymentConfigNameException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InvalidDeploymentGroupNameException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InvalidDeploymentIdException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InvalidDeploymentStatusException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InvalidEC2TagException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InvalidIamUserArnException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InvalidInstanceNameException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InvalidInstanceStatusException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InvalidKeyPrefixFilterException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InvalidMinimumHealthyHostValueException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InvalidNextTokenException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InvalidOperationException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InvalidRegistrationStatusException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InvalidRevisionException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InvalidRoleException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InvalidSortByException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InvalidSortOrderException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InvalidTagException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InvalidTagFilterException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InvalidTimeRangeException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InvalidTriggerConfigException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "Key":{"type":"string"}, + "LifecycleErrorCode":{ + "type":"string", + "enum":[ + "Success", + "ScriptMissing", + "ScriptNotExecutable", + "ScriptTimedOut", + "ScriptFailed", + "UnknownError" + ] + }, + "LifecycleEvent":{ + "type":"structure", + "members":{ + "lifecycleEventName":{"shape":"LifecycleEventName"}, + "diagnostics":{"shape":"Diagnostics"}, + "startTime":{"shape":"Timestamp"}, + "endTime":{"shape":"Timestamp"}, + "status":{"shape":"LifecycleEventStatus"} + } + }, + "LifecycleEventList":{ + "type":"list", + "member":{"shape":"LifecycleEvent"} + }, + "LifecycleEventName":{"type":"string"}, + "LifecycleEventStatus":{ + "type":"string", + "enum":[ + "Pending", + "InProgress", + "Succeeded", + "Failed", + "Skipped", + "Unknown" + ] + }, + "LifecycleHookLimitExceededException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "LifecycleMessage":{"type":"string"}, + "ListApplicationRevisionsInput":{ + "type":"structure", + "required":["applicationName"], + "members":{ + "applicationName":{"shape":"ApplicationName"}, + "sortBy":{"shape":"ApplicationRevisionSortBy"}, + "sortOrder":{"shape":"SortOrder"}, + "s3Bucket":{"shape":"S3Bucket"}, + "s3KeyPrefix":{"shape":"S3Key"}, + "deployed":{"shape":"ListStateFilterAction"}, + "nextToken":{"shape":"NextToken"} + } + }, + "ListApplicationRevisionsOutput":{ + "type":"structure", + "members":{ + "revisions":{"shape":"RevisionLocationList"}, + "nextToken":{"shape":"NextToken"} + } + }, + "ListApplicationsInput":{ + "type":"structure", + "members":{ + "nextToken":{"shape":"NextToken"} + } + }, + "ListApplicationsOutput":{ + "type":"structure", + "members":{ + "applications":{"shape":"ApplicationsList"}, + "nextToken":{"shape":"NextToken"} + } + }, + "ListDeploymentConfigsInput":{ + "type":"structure", + "members":{ + "nextToken":{"shape":"NextToken"} + } + }, + "ListDeploymentConfigsOutput":{ + "type":"structure", + "members":{ + "deploymentConfigsList":{"shape":"DeploymentConfigsList"}, + "nextToken":{"shape":"NextToken"} + } + }, + "ListDeploymentGroupsInput":{ + "type":"structure", + "required":["applicationName"], + "members":{ + "applicationName":{"shape":"ApplicationName"}, + "nextToken":{"shape":"NextToken"} + } + }, + "ListDeploymentGroupsOutput":{ + "type":"structure", + "members":{ + "applicationName":{"shape":"ApplicationName"}, + "deploymentGroups":{"shape":"DeploymentGroupsList"}, + "nextToken":{"shape":"NextToken"} + } + }, + "ListDeploymentInstancesInput":{ + "type":"structure", + "required":["deploymentId"], + "members":{ + "deploymentId":{"shape":"DeploymentId"}, + "nextToken":{"shape":"NextToken"}, + "instanceStatusFilter":{"shape":"InstanceStatusList"} + } + }, + "ListDeploymentInstancesOutput":{ + "type":"structure", + "members":{ + "instancesList":{"shape":"InstancesList"}, + "nextToken":{"shape":"NextToken"} + } + }, + "ListDeploymentsInput":{ + "type":"structure", + "members":{ + "applicationName":{"shape":"ApplicationName"}, + "deploymentGroupName":{"shape":"DeploymentGroupName"}, + "includeOnlyStatuses":{"shape":"DeploymentStatusList"}, + "createTimeRange":{"shape":"TimeRange"}, + "nextToken":{"shape":"NextToken"} + } + }, + "ListDeploymentsOutput":{ + "type":"structure", + "members":{ + "deployments":{"shape":"DeploymentsList"}, + "nextToken":{"shape":"NextToken"} + } + }, + "ListOnPremisesInstancesInput":{ + "type":"structure", + "members":{ + "registrationStatus":{"shape":"RegistrationStatus"}, + "tagFilters":{"shape":"TagFilterList"}, + "nextToken":{"shape":"NextToken"} + } + }, + "ListOnPremisesInstancesOutput":{ + "type":"structure", + "members":{ + "instanceNames":{"shape":"InstanceNameList"}, + "nextToken":{"shape":"NextToken"} + } + }, + "ListStateFilterAction":{ + "type":"string", + "enum":[ + "include", + "exclude", + "ignore" + ] + }, + "LogTail":{"type":"string"}, + "Message":{"type":"string"}, + "MinimumHealthyHosts":{ + "type":"structure", + "members":{ + "value":{"shape":"MinimumHealthyHostsValue"}, + "type":{"shape":"MinimumHealthyHostsType"} + } + }, + "MinimumHealthyHostsType":{ + "type":"string", + "enum":[ + "HOST_COUNT", + "FLEET_PERCENT" + ] + }, + "MinimumHealthyHostsValue":{"type":"integer"}, + "NextToken":{"type":"string"}, + "RegisterApplicationRevisionInput":{ + "type":"structure", + "required":[ + "applicationName", + "revision" + ], + "members":{ + "applicationName":{"shape":"ApplicationName"}, + "description":{"shape":"Description"}, + "revision":{"shape":"RevisionLocation"} + } + }, + "RegisterOnPremisesInstanceInput":{ + "type":"structure", + "required":[ + "instanceName", + "iamUserArn" + ], + "members":{ + "instanceName":{"shape":"InstanceName"}, + "iamUserArn":{"shape":"IamUserArn"} + } + }, + "RegistrationStatus":{ + "type":"string", + "enum":[ + "Registered", + "Deregistered" + ] + }, + "RemoveTagsFromOnPremisesInstancesInput":{ + "type":"structure", + "required":[ + "tags", + "instanceNames" + ], + "members":{ + "tags":{"shape":"TagList"}, + "instanceNames":{"shape":"InstanceNameList"} + } + }, + "Repository":{"type":"string"}, + "RevisionDoesNotExistException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "RevisionInfo":{ + "type":"structure", + "members":{ + "revisionLocation":{"shape":"RevisionLocation"}, + "genericRevisionInfo":{"shape":"GenericRevisionInfo"} + } + }, + "RevisionInfoList":{ + "type":"list", + "member":{"shape":"RevisionInfo"} + }, + "RevisionLocation":{ + "type":"structure", + "members":{ + "revisionType":{"shape":"RevisionLocationType"}, + "s3Location":{"shape":"S3Location"}, + "gitHubLocation":{"shape":"GitHubLocation"} + } + }, + "RevisionLocationList":{ + "type":"list", + "member":{"shape":"RevisionLocation"} + }, + "RevisionLocationType":{ + "type":"string", + "enum":[ + "S3", + "GitHub" + ] + }, + "RevisionRequiredException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "Role":{"type":"string"}, + "RoleRequiredException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "S3Bucket":{"type":"string"}, + "S3Key":{"type":"string"}, + "S3Location":{ + "type":"structure", + "members":{ + "bucket":{"shape":"S3Bucket"}, + "key":{"shape":"S3Key"}, + "bundleType":{"shape":"BundleType"}, + "version":{"shape":"VersionId"}, + "eTag":{"shape":"ETag"} + } + }, + "ScriptName":{"type":"string"}, + "SortOrder":{ + "type":"string", + "enum":[ + "ascending", + "descending" + ] + }, + "StopDeploymentInput":{ + "type":"structure", + "required":["deploymentId"], + "members":{ + "deploymentId":{"shape":"DeploymentId"} + } + }, + "StopDeploymentOutput":{ + "type":"structure", + "members":{ + "status":{"shape":"StopStatus"}, + "statusMessage":{"shape":"Message"} + } + }, + "StopStatus":{ + "type":"string", + "enum":[ + "Pending", + "Succeeded" + ] + }, + "Tag":{ + "type":"structure", + "members":{ + "Key":{"shape":"Key"}, + "Value":{"shape":"Value"} + } + }, + "TagFilter":{ + "type":"structure", + "members":{ + "Key":{"shape":"Key"}, + "Value":{"shape":"Value"}, + "Type":{"shape":"TagFilterType"} + } + }, + "TagFilterList":{ + "type":"list", + "member":{"shape":"TagFilter"} + }, + "TagFilterType":{ + "type":"string", + "enum":[ + "KEY_ONLY", + "VALUE_ONLY", + "KEY_AND_VALUE" + ] + }, + "TagLimitExceededException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "TagList":{ + "type":"list", + "member":{"shape":"Tag"} + }, + "TagRequiredException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "TimeRange":{ + "type":"structure", + "members":{ + "start":{"shape":"Timestamp"}, + "end":{"shape":"Timestamp"} + } + }, + "Timestamp":{"type":"timestamp"}, + "TriggerConfig":{ + "type":"structure", + "members":{ + "triggerName":{"shape":"TriggerName"}, + "triggerTargetArn":{"shape":"TriggerTargetArn"}, + "triggerEvents":{"shape":"TriggerEventTypeList"} + } + }, + "TriggerConfigList":{ + "type":"list", + "member":{"shape":"TriggerConfig"} + }, + "TriggerEventType":{ + "type":"string", + "enum":[ + "DeploymentStart", + "DeploymentSuccess", + "DeploymentFailure", + "DeploymentStop", + "InstanceStart", + "InstanceSuccess", + "InstanceFailure" + ] + }, + "TriggerEventTypeList":{ + "type":"list", + "member":{"shape":"TriggerEventType"} + }, + "TriggerName":{"type":"string"}, + "TriggerTargetArn":{"type":"string"}, + "TriggerTargetsLimitExceededException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "UpdateApplicationInput":{ + "type":"structure", + "members":{ + "applicationName":{"shape":"ApplicationName"}, + "newApplicationName":{"shape":"ApplicationName"} + } + }, + "UpdateDeploymentGroupInput":{ + "type":"structure", + "required":[ + "applicationName", + "currentDeploymentGroupName" + ], + "members":{ + "applicationName":{"shape":"ApplicationName"}, + "currentDeploymentGroupName":{"shape":"DeploymentGroupName"}, + "newDeploymentGroupName":{"shape":"DeploymentGroupName"}, + "deploymentConfigName":{"shape":"DeploymentConfigName"}, + "ec2TagFilters":{"shape":"EC2TagFilterList"}, + "onPremisesInstanceTagFilters":{"shape":"TagFilterList"}, + "autoScalingGroups":{"shape":"AutoScalingGroupNameList"}, + "serviceRoleArn":{"shape":"Role"}, + "triggerConfigurations":{"shape":"TriggerConfigList"} + } + }, + "UpdateDeploymentGroupOutput":{ + "type":"structure", + "members":{ + "hooksNotCleanedUp":{"shape":"AutoScalingGroupList"} + } + }, + "Value":{"type":"string"}, + "VersionId":{"type":"string"} + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/codedeploy/2014-10-06/docs-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/codedeploy/2014-10-06/docs-2.json new file mode 100644 index 000000000..578da08bd --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/codedeploy/2014-10-06/docs-2.json @@ -0,0 +1,1390 @@ +{ + "version": "2.0", + "service": "AWS CodeDeploy Overview

    This reference guide provides descriptions of the AWS CodeDeploy APIs. For more information about AWS CodeDeploy, see the AWS CodeDeploy User Guide.

    Using the APIs

    You can use the AWS CodeDeploy APIs to work with the following:

    • Applications are unique identifiers used by AWS CodeDeploy to ensure the correct combinations of revisions, deployment configurations, and deployment groups are being referenced during deployments.

      You can use the AWS CodeDeploy APIs to create, delete, get, list, and update applications.

    • Deployment configurations are sets of deployment rules and success and failure conditions used by AWS CodeDeploy during deployments.

      You can use the AWS CodeDeploy APIs to create, delete, get, and list deployment configurations.

    • Deployment groups are groups of instances to which application revisions can be deployed.

      You can use the AWS CodeDeploy APIs to create, delete, get, list, and update deployment groups.

    • Instances represent Amazon EC2 instances to which application revisions are deployed. Instances are identified by their Amazon EC2 tags or Auto Scaling group names. Instances belong to deployment groups.

      You can use the AWS CodeDeploy APIs to get and list instance.

    • Deployments represent the process of deploying revisions to instances.

      You can use the AWS CodeDeploy APIs to create, get, list, and stop deployments.

    • Application revisions are archive files stored in Amazon S3 buckets or GitHub repositories. These revisions contain source content (such as source code, web pages, executable files, and deployment scripts) along with an application specification (AppSpec) file. (The AppSpec file is unique to AWS CodeDeploy; it defines the deployment actions you want AWS CodeDeploy to execute.) Ffor application revisions stored in Amazon S3 buckets, an application revision is uniquely identified by its Amazon S3 object key and its ETag, version, or both. For application revisions stored in GitHub repositories, an application revision is uniquely identified by its repository name and commit ID. Application revisions are deployed through deployment groups.

      You can use the AWS CodeDeploy APIs to get, list, and register application revisions.

    ", + "operations": { + "AddTagsToOnPremisesInstances": "

    Adds tags to on-premises instances.

    ", + "BatchGetApplicationRevisions": "

    Gets information about one or more application revisions.

    ", + "BatchGetApplications": "

    Gets information about one or more applications.

    ", + "BatchGetDeploymentGroups": "

    Get information about one or more deployment groups.

    ", + "BatchGetDeploymentInstances": "

    Gets information about one or more instance that are part of a deployment group.

    ", + "BatchGetDeployments": "

    Gets information about one or more deployments.

    ", + "BatchGetOnPremisesInstances": "

    Gets information about one or more on-premises instances.

    ", + "CreateApplication": "

    Creates an application.

    ", + "CreateDeployment": "

    Deploys an application revision through the specified deployment group.

    ", + "CreateDeploymentConfig": "

    Creates a deployment configuration.

    ", + "CreateDeploymentGroup": "

    Creates a deployment group to which application revisions will be deployed.

    ", + "DeleteApplication": "

    Deletes an application.

    ", + "DeleteDeploymentConfig": "

    Deletes a deployment configuration.

    A deployment configuration cannot be deleted if it is currently in use. Predefined configurations cannot be deleted.", + "DeleteDeploymentGroup": "

    Deletes a deployment group.

    ", + "DeregisterOnPremisesInstance": "

    Deregisters an on-premises instance.

    ", + "GetApplication": "

    Gets information about an application.

    ", + "GetApplicationRevision": "

    Gets information about an application revision.

    ", + "GetDeployment": "

    Gets information about a deployment.

    ", + "GetDeploymentConfig": "

    Gets information about a deployment configuration.

    ", + "GetDeploymentGroup": "

    Gets information about a deployment group.

    ", + "GetDeploymentInstance": "

    Gets information about an instance as part of a deployment.

    ", + "GetOnPremisesInstance": "

    Gets information about an on-premises instance.

    ", + "ListApplicationRevisions": "

    Lists information about revisions for an application.

    ", + "ListApplications": "

    Lists the applications registered with the applicable IAM user or AWS account.

    ", + "ListDeploymentConfigs": "

    Lists the deployment configurations with the applicable IAM user or AWS account.

    ", + "ListDeploymentGroups": "

    Lists the deployment groups for an application registered with the applicable IAM user or AWS account.

    ", + "ListDeploymentInstances": "

    Lists the instance for a deployment associated with the applicable IAM user or AWS account.

    ", + "ListDeployments": "

    Lists the deployments in a deployment group for an application registered with the applicable IAM user or AWS account.

    ", + "ListOnPremisesInstances": "

    Gets a list of names for one or more on-premises instances.

    Unless otherwise specified, both registered and deregistered on-premises instance names will be listed. To list only registered or deregistered on-premises instance names, use the registration status parameter.

    ", + "RegisterApplicationRevision": "

    Registers with AWS CodeDeploy a revision for the specified application.

    ", + "RegisterOnPremisesInstance": "

    Registers an on-premises instance.

    ", + "RemoveTagsFromOnPremisesInstances": "

    Removes one or more tags from one or more on-premises instances.

    ", + "StopDeployment": "

    Attempts to stop an ongoing deployment.

    ", + "UpdateApplication": "

    Changes the name of an application.

    ", + "UpdateDeploymentGroup": "

    Changes information about a deployment group.

    " + }, + "shapes": { + "AddTagsToOnPremisesInstancesInput": { + "base": "

    Represents the input of, and adds tags to, an on-premises instance operation.

    ", + "refs": { + } + }, + "ApplicationAlreadyExistsException": { + "base": "

    An application with the specified name already exists with the applicable IAM user or AWS account.

    ", + "refs": { + } + }, + "ApplicationDoesNotExistException": { + "base": "

    The application does not exist with the applicable IAM user or AWS account.

    ", + "refs": { + } + }, + "ApplicationId": { + "base": null, + "refs": { + "ApplicationInfo$applicationId": "

    The application ID.

    ", + "CreateApplicationOutput$applicationId": "

    A unique application ID.

    " + } + }, + "ApplicationInfo": { + "base": "

    Information about an application.

    ", + "refs": { + "ApplicationsInfoList$member": null, + "GetApplicationOutput$application": "

    Information about the application.

    " + } + }, + "ApplicationLimitExceededException": { + "base": "

    More applications were attempted to be created than are allowed.

    ", + "refs": { + } + }, + "ApplicationName": { + "base": null, + "refs": { + "ApplicationInfo$applicationName": "

    The application name.

    ", + "ApplicationsList$member": null, + "BatchGetApplicationRevisionsInput$applicationName": "

    The name of an AWS CodeDeploy application about which to get revision information.

    ", + "BatchGetApplicationRevisionsOutput$applicationName": "

    The name of the application that corresponds to the revisions.

    ", + "BatchGetDeploymentGroupsInput$applicationName": "

    The name of an AWS CodeDeploy application associated with the applicable IAM user or AWS account.

    ", + "CreateApplicationInput$applicationName": "

    The name of the application. This name must be unique with the applicable IAM user or AWS account.

    ", + "CreateDeploymentGroupInput$applicationName": "

    The name of an AWS CodeDeploy application associated with the applicable IAM user or AWS account.

    ", + "CreateDeploymentInput$applicationName": "

    The name of an AWS CodeDeploy application associated with the applicable IAM user or AWS account.

    ", + "DeleteApplicationInput$applicationName": "

    The name of an AWS CodeDeploy application associated with the applicable IAM user or AWS account.

    ", + "DeleteDeploymentGroupInput$applicationName": "

    The name of an AWS CodeDeploy application associated with the applicable IAM user or AWS account.

    ", + "DeploymentGroupInfo$applicationName": "

    The application name.

    ", + "DeploymentInfo$applicationName": "

    The application name.

    ", + "GetApplicationInput$applicationName": "

    The name of an AWS CodeDeploy application associated with the applicable IAM user or AWS account.

    ", + "GetApplicationRevisionInput$applicationName": "

    The name of the application that corresponds to the revision.

    ", + "GetApplicationRevisionOutput$applicationName": "

    The name of the application that corresponds to the revision.

    ", + "GetDeploymentGroupInput$applicationName": "

    The name of an AWS CodeDeploy application associated with the applicable IAM user or AWS account.

    ", + "ListApplicationRevisionsInput$applicationName": "

    The name of an AWS CodeDeploy application associated with the applicable IAM user or AWS account.

    ", + "ListDeploymentGroupsInput$applicationName": "

    The name of an AWS CodeDeploy application associated with the applicable IAM user or AWS account.

    ", + "ListDeploymentGroupsOutput$applicationName": "

    The application name.

    ", + "ListDeploymentsInput$applicationName": "

    The name of an AWS CodeDeploy application associated with the applicable IAM user or AWS account.

    ", + "RegisterApplicationRevisionInput$applicationName": "

    The name of an AWS CodeDeploy application associated with the applicable IAM user or AWS account.

    ", + "UpdateApplicationInput$applicationName": "

    The current name of the application you want to change.

    ", + "UpdateApplicationInput$newApplicationName": "

    The new name to give the application.

    ", + "UpdateDeploymentGroupInput$applicationName": "

    The application name corresponding to the deployment group to update.

    " + } + }, + "ApplicationNameRequiredException": { + "base": "

    The minimum number of required application names was not specified.

    ", + "refs": { + } + }, + "ApplicationRevisionSortBy": { + "base": null, + "refs": { + "ListApplicationRevisionsInput$sortBy": "

    The column name to use to sort the list results:

    • registerTime: Sort by the time the revisions were registered with AWS CodeDeploy.
    • firstUsedTime: Sort by the time the revisions were first used in a deployment.
    • lastUsedTime: Sort by the time the revisions were last used in a deployment.

    If not specified or set to null, the results will be returned in an arbitrary order.

    " + } + }, + "ApplicationsInfoList": { + "base": null, + "refs": { + "BatchGetApplicationsOutput$applicationsInfo": "

    Information about the applications.

    " + } + }, + "ApplicationsList": { + "base": null, + "refs": { + "BatchGetApplicationsInput$applicationNames": "

    A list of application names separated by spaces.

    ", + "ListApplicationsOutput$applications": "

    A list of application names.

    " + } + }, + "AutoScalingGroup": { + "base": "

    Information about an Auto Scaling group.

    ", + "refs": { + "AutoScalingGroupList$member": null + } + }, + "AutoScalingGroupHook": { + "base": null, + "refs": { + "AutoScalingGroup$hook": "

    An Auto Scaling lifecycle event hook name.

    " + } + }, + "AutoScalingGroupList": { + "base": null, + "refs": { + "DeleteDeploymentGroupOutput$hooksNotCleanedUp": "

    If the output contains no data, and the corresponding deployment group contained at least one Auto Scaling group, AWS CodeDeploy successfully removed all corresponding Auto Scaling lifecycle event hooks from the Amazon EC2 instances in the Auto Scaling group. If the output contains data, AWS CodeDeploy could not remove some Auto Scaling lifecycle event hooks from the Amazon EC2 instances in the Auto Scaling group.

    ", + "DeploymentGroupInfo$autoScalingGroups": "

    A list of associated Auto Scaling groups.

    ", + "UpdateDeploymentGroupOutput$hooksNotCleanedUp": "

    If the output contains no data, and the corresponding deployment group contained at least one Auto Scaling group, AWS CodeDeploy successfully removed all corresponding Auto Scaling lifecycle event hooks from the AWS account. If the output contains data, AWS CodeDeploy could not remove some Auto Scaling lifecycle event hooks from the AWS account.

    " + } + }, + "AutoScalingGroupName": { + "base": null, + "refs": { + "AutoScalingGroup$name": "

    The Auto Scaling group name.

    ", + "AutoScalingGroupNameList$member": null + } + }, + "AutoScalingGroupNameList": { + "base": null, + "refs": { + "CreateDeploymentGroupInput$autoScalingGroups": "

    A list of associated Auto Scaling groups.

    ", + "UpdateDeploymentGroupInput$autoScalingGroups": "

    The replacement list of Auto Scaling groups to be included in the deployment group, if you want to change them. To keep the Auto Scaling groups, enter their names. To remove Auto Scaling groups, do not enter any Auto Scaling group names.

    " + } + }, + "BatchGetApplicationRevisionsInput": { + "base": "

    Represents the input of a batch get application revisions operation.

    ", + "refs": { + } + }, + "BatchGetApplicationRevisionsOutput": { + "base": "

    Represents the output of a batch get application revisions operation.

    ", + "refs": { + } + }, + "BatchGetApplicationsInput": { + "base": "

    Represents the input of a batch get applications operation.

    ", + "refs": { + } + }, + "BatchGetApplicationsOutput": { + "base": "

    Represents the output of a batch get applications operation.

    ", + "refs": { + } + }, + "BatchGetDeploymentGroupsInput": { + "base": "

    Represents the input of a batch get deployment groups operation.

    ", + "refs": { + } + }, + "BatchGetDeploymentGroupsOutput": { + "base": "

    Represents the output of a batch get deployment groups operation.

    ", + "refs": { + } + }, + "BatchGetDeploymentInstancesInput": { + "base": "

    Represents the input of a batch get deployment instances operation.

    ", + "refs": { + } + }, + "BatchGetDeploymentInstancesOutput": { + "base": "

    Represents the output of a batch get deployment instance operation.

    ", + "refs": { + } + }, + "BatchGetDeploymentsInput": { + "base": "

    Represents the input of a batch get deployments operation.

    ", + "refs": { + } + }, + "BatchGetDeploymentsOutput": { + "base": "

    Represents the output of a batch get deployments operation.

    ", + "refs": { + } + }, + "BatchGetOnPremisesInstancesInput": { + "base": "

    Represents the input of a batch get on-premises instances operation.

    ", + "refs": { + } + }, + "BatchGetOnPremisesInstancesOutput": { + "base": "

    Represents the output of a batch get on-premises instances operation.

    ", + "refs": { + } + }, + "BatchLimitExceededException": { + "base": "

    The maximum number of names or IDs allowed for this request (100) was exceeded.

    ", + "refs": { + } + }, + "Boolean": { + "base": null, + "refs": { + "ApplicationInfo$linkedToGitHub": "

    True if the user has authenticated with GitHub for the specified application; otherwise, false.

    ", + "CreateDeploymentInput$ignoreApplicationStopFailures": "

    If set to true, then if the deployment causes the ApplicationStop deployment lifecycle event to an instance to fail, the deployment to that instance will not be considered to have failed at that point and will continue on to the BeforeInstall deployment lifecycle event.

    If set to false or not specified, then if the deployment causes the ApplicationStop deployment lifecycle event to fail to an instance, the deployment to that instance will stop, and the deployment to that instance will be considered to have failed.

    ", + "DeploymentInfo$ignoreApplicationStopFailures": "

    If true, then if the deployment causes the ApplicationStop deployment lifecycle event to an instance to fail, the deployment to that instance will not be considered to have failed at that point and will continue on to the BeforeInstall deployment lifecycle event.

    If false or not specified, then if the deployment causes the ApplicationStop deployment lifecycle event to an instance to fail, the deployment to that instance will stop, and the deployment to that instance will be considered to have failed.

    " + } + }, + "BucketNameFilterRequiredException": { + "base": "

    A bucket name is required, but was not provided.

    ", + "refs": { + } + }, + "BundleType": { + "base": null, + "refs": { + "S3Location$bundleType": "

    The file type of the application revision. Must be one of the following:

    • tar: A tar archive file.
    • tgz: A compressed tar archive file.
    • zip: A zip archive file.
    " + } + }, + "CommitId": { + "base": null, + "refs": { + "GitHubLocation$commitId": "

    The SHA1 commit ID of the GitHub commit that represents the bundled artifacts for the application revision.

    " + } + }, + "CreateApplicationInput": { + "base": "

    Represents the input of a create application operation.

    ", + "refs": { + } + }, + "CreateApplicationOutput": { + "base": "

    Represents the output of a create application operation.

    ", + "refs": { + } + }, + "CreateDeploymentConfigInput": { + "base": "

    Represents the input of a create deployment configuration operation.

    ", + "refs": { + } + }, + "CreateDeploymentConfigOutput": { + "base": "

    Represents the output of a create deployment configuration operation.

    ", + "refs": { + } + }, + "CreateDeploymentGroupInput": { + "base": "

    Represents the input of a create deployment group operation.

    ", + "refs": { + } + }, + "CreateDeploymentGroupOutput": { + "base": "

    Represents the output of a create deployment group operation.

    ", + "refs": { + } + }, + "CreateDeploymentInput": { + "base": "

    Represents the input of a create deployment operation.

    ", + "refs": { + } + }, + "CreateDeploymentOutput": { + "base": "

    Represents the output of a create deployment operation.

    ", + "refs": { + } + }, + "DeleteApplicationInput": { + "base": "

    Represents the input of a delete application operation.

    ", + "refs": { + } + }, + "DeleteDeploymentConfigInput": { + "base": "

    Represents the input of a delete deployment configuration operation.

    ", + "refs": { + } + }, + "DeleteDeploymentGroupInput": { + "base": "

    Represents the input of a delete deployment group operation.

    ", + "refs": { + } + }, + "DeleteDeploymentGroupOutput": { + "base": "

    Represents the output of a delete deployment group operation.

    ", + "refs": { + } + }, + "DeploymentAlreadyCompletedException": { + "base": "

    The deployment is already complete.

    ", + "refs": { + } + }, + "DeploymentConfigAlreadyExistsException": { + "base": "

    A deployment configuration with the specified name already exists with the applicable IAM user or AWS account.

    ", + "refs": { + } + }, + "DeploymentConfigDoesNotExistException": { + "base": "

    The deployment configuration does not exist with the applicable IAM user or AWS account.

    ", + "refs": { + } + }, + "DeploymentConfigId": { + "base": null, + "refs": { + "CreateDeploymentConfigOutput$deploymentConfigId": "

    A unique deployment configuration ID.

    ", + "DeploymentConfigInfo$deploymentConfigId": "

    The deployment configuration ID.

    " + } + }, + "DeploymentConfigInUseException": { + "base": "

    The deployment configuration is still in use.

    ", + "refs": { + } + }, + "DeploymentConfigInfo": { + "base": "

    Information about a deployment configuration.

    ", + "refs": { + "GetDeploymentConfigOutput$deploymentConfigInfo": "

    Information about the deployment configuration.

    " + } + }, + "DeploymentConfigLimitExceededException": { + "base": "

    The deployment configurations limit was exceeded.

    ", + "refs": { + } + }, + "DeploymentConfigName": { + "base": null, + "refs": { + "CreateDeploymentConfigInput$deploymentConfigName": "

    The name of the deployment configuration to create.

    ", + "CreateDeploymentGroupInput$deploymentConfigName": "

    If specified, the deployment configuration name can be either one of the predefined configurations provided with AWS CodeDeploy or a custom deployment configuration that you create by calling the create deployment configuration operation.

    CodeDeployDefault.OneAtATime is the default deployment configuration. It is used if a configuration isn't specified for the deployment or the deployment group.

    The predefined deployment configurations include the following:

    • CodeDeployDefault.AllAtOnce attempts to deploy an application revision to as many instance as possible at once. The status of the overall deployment will be displayed as Succeeded if the application revision is deployed to one or more of the instances. The status of the overall deployment will be displayed as Failed if the application revision is not deployed to any of the instances. Using an example of nine instance, CodeDeployDefault.AllAtOnce will attempt to deploy to all nine instance at once. The overall deployment will succeed if deployment to even a single instance is successful; it will fail only if deployments to all nine instance fail.

    • CodeDeployDefault.HalfAtATime deploys to up to half of the instances at a time (with fractions rounded down). The overall deployment succeeds if the application revision is deployed to at least half of the instances (with fractions rounded up); otherwise, the deployment fails. In the example of nine instances, it will deploy to up to four instance at a time. The overall deployment succeeds if deployment to five or more instances succeed; otherwise, the deployment fails. The deployment may be successfully deployed to some instances even if the overall deployment fails.

    • CodeDeployDefault.OneAtATime deploys the application revision to only one instance at a time.

      For deployment groups that contain more than one instance:

      • The overall deployment succeeds if the application revision is deployed to all of the instances. The exception to this rule is if deployment to the last instance fails, the overall deployment still succeeds. This is because AWS CodeDeploy allows only one instance at a time to be taken offline with the CodeDeployDefault.OneAtATime configuration.

      • The overall deployment fails as soon as the application revision fails to be deployed to any but the last instance. The deployment may be successfully deployed to some instances even if the overall deployment fails.

      • In an example using nine instance, it will deploy to one instance at a time. The overall deployment succeeds if deployment to the first eight instance is successful; the overall deployment fails if deployment to any of the first eight instance fails.

      For deployment groups that contain only one instance, the overall deployment is successful only if deployment to the single instance is successful

    ", + "CreateDeploymentInput$deploymentConfigName": "

    The name of a deployment configuration associated with the applicable IAM user or AWS account.

    If not specified, the value configured in the deployment group will be used as the default. If the deployment group does not have a deployment configuration associated with it, then CodeDeployDefault.OneAtATime will be used by default.

    ", + "DeleteDeploymentConfigInput$deploymentConfigName": "

    The name of a deployment configuration associated with the applicable IAM user or AWS account.

    ", + "DeploymentConfigInfo$deploymentConfigName": "

    The deployment configuration name.

    ", + "DeploymentConfigsList$member": null, + "DeploymentGroupInfo$deploymentConfigName": "

    The deployment configuration name.

    ", + "DeploymentInfo$deploymentConfigName": "

    The deployment configuration name.

    ", + "GetDeploymentConfigInput$deploymentConfigName": "

    The name of a deployment configuration associated with the applicable IAM user or AWS account.

    ", + "UpdateDeploymentGroupInput$deploymentConfigName": "

    The replacement deployment configuration name to use, if you want to change it.

    " + } + }, + "DeploymentConfigNameRequiredException": { + "base": "

    The deployment configuration name was not specified.

    ", + "refs": { + } + }, + "DeploymentConfigsList": { + "base": null, + "refs": { + "ListDeploymentConfigsOutput$deploymentConfigsList": "

    A list of deployment configurations, including built-in configurations such as CodeDeployDefault.OneAtATime.

    " + } + }, + "DeploymentCreator": { + "base": null, + "refs": { + "DeploymentInfo$creator": "

    The means by which the deployment was created:

    • user: A user created the deployment.
    • autoscaling: Auto Scaling created the deployment.
    " + } + }, + "DeploymentDoesNotExistException": { + "base": "

    The deployment does not exist with the applicable IAM user or AWS account.

    ", + "refs": { + } + }, + "DeploymentGroupAlreadyExistsException": { + "base": "

    A deployment group with the specified name already exists with the applicable IAM user or AWS account.

    ", + "refs": { + } + }, + "DeploymentGroupDoesNotExistException": { + "base": "

    The named deployment group does not exist with the applicable IAM user or AWS account.

    ", + "refs": { + } + }, + "DeploymentGroupId": { + "base": null, + "refs": { + "CreateDeploymentGroupOutput$deploymentGroupId": "

    A unique deployment group ID.

    ", + "DeploymentGroupInfo$deploymentGroupId": "

    The deployment group ID.

    " + } + }, + "DeploymentGroupInfo": { + "base": "

    Information about a deployment group.

    ", + "refs": { + "DeploymentGroupInfoList$member": null, + "GetDeploymentGroupOutput$deploymentGroupInfo": "

    Information about the deployment group.

    " + } + }, + "DeploymentGroupInfoList": { + "base": null, + "refs": { + "BatchGetDeploymentGroupsOutput$deploymentGroupsInfo": "

    Information about the deployment groups.

    " + } + }, + "DeploymentGroupLimitExceededException": { + "base": "

    The deployment groups limit was exceeded.

    ", + "refs": { + } + }, + "DeploymentGroupName": { + "base": null, + "refs": { + "CreateDeploymentGroupInput$deploymentGroupName": "

    The name of a new deployment group for the specified application.

    ", + "CreateDeploymentInput$deploymentGroupName": "

    The name of the deployment group.

    ", + "DeleteDeploymentGroupInput$deploymentGroupName": "

    The name of an existing deployment group for the specified application.

    ", + "DeploymentGroupInfo$deploymentGroupName": "

    The deployment group name.

    ", + "DeploymentGroupsList$member": null, + "DeploymentInfo$deploymentGroupName": "

    The deployment group name.

    ", + "GetDeploymentGroupInput$deploymentGroupName": "

    The name of an existing deployment group for the specified application.

    ", + "ListDeploymentsInput$deploymentGroupName": "

    The name of an existing deployment group for the specified application.

    ", + "UpdateDeploymentGroupInput$currentDeploymentGroupName": "

    The current name of the deployment group.

    ", + "UpdateDeploymentGroupInput$newDeploymentGroupName": "

    The new name of the deployment group, if you want to change it.

    " + } + }, + "DeploymentGroupNameRequiredException": { + "base": "

    The deployment group name was not specified.

    ", + "refs": { + } + }, + "DeploymentGroupsList": { + "base": null, + "refs": { + "BatchGetDeploymentGroupsInput$deploymentGroupNames": "

    The deployment groups' names.

    ", + "GenericRevisionInfo$deploymentGroups": "

    The deployment groups for which this is the current target revision.

    ", + "ListDeploymentGroupsOutput$deploymentGroups": "

    A list of corresponding deployment group names.

    " + } + }, + "DeploymentId": { + "base": null, + "refs": { + "BatchGetDeploymentInstancesInput$deploymentId": "

    The unique ID of a deployment.

    ", + "CreateDeploymentOutput$deploymentId": "

    A unique deployment ID.

    ", + "DeploymentInfo$deploymentId": "

    The deployment ID.

    ", + "DeploymentsList$member": null, + "GetDeploymentInput$deploymentId": "

    A deployment ID associated with the applicable IAM user or AWS account.

    ", + "GetDeploymentInstanceInput$deploymentId": "

    The unique ID of a deployment.

    ", + "InstanceSummary$deploymentId": "

    The deployment ID.

    ", + "ListDeploymentInstancesInput$deploymentId": "

    The unique ID of a deployment.

    ", + "StopDeploymentInput$deploymentId": "

    The unique ID of a deployment.

    " + } + }, + "DeploymentIdRequiredException": { + "base": "

    At least one deployment ID must be specified.

    ", + "refs": { + } + }, + "DeploymentInfo": { + "base": "

    Information about a deployment.

    ", + "refs": { + "DeploymentsInfoList$member": null, + "GetDeploymentOutput$deploymentInfo": "

    Information about the deployment.

    " + } + }, + "DeploymentLimitExceededException": { + "base": "

    The number of allowed deployments was exceeded.

    ", + "refs": { + } + }, + "DeploymentNotStartedException": { + "base": "

    The specified deployment has not started.

    ", + "refs": { + } + }, + "DeploymentOverview": { + "base": "

    Information about the deployment status of the instances in the deployment.

    ", + "refs": { + "DeploymentInfo$deploymentOverview": "

    A summary of the deployment status of the instances in the deployment.

    " + } + }, + "DeploymentStatus": { + "base": null, + "refs": { + "DeploymentInfo$status": "

    The current state of the deployment as a whole.

    ", + "DeploymentStatusList$member": null + } + }, + "DeploymentStatusList": { + "base": null, + "refs": { + "ListDeploymentsInput$includeOnlyStatuses": "

    A subset of deployments to list by status:

    • Created: Include created deployments in the resulting list.
    • Queued: Include queued deployments in the resulting list.
    • In Progress: Include in-progress deployments in the resulting list.
    • Succeeded: Include successful deployments in the resulting list.
    • Failed: Include failed deployments in the resulting list.
    • Stopped: Include stopped deployments in the resulting list.
    " + } + }, + "DeploymentsInfoList": { + "base": null, + "refs": { + "BatchGetDeploymentsOutput$deploymentsInfo": "

    Information about the deployments.

    " + } + }, + "DeploymentsList": { + "base": null, + "refs": { + "BatchGetDeploymentsInput$deploymentIds": "

    A list of deployment IDs, separated by spaces.

    ", + "ListDeploymentsOutput$deployments": "

    A list of deployment IDs.

    " + } + }, + "DeregisterOnPremisesInstanceInput": { + "base": "

    Represents the input of a deregister on-premises instance operation.

    ", + "refs": { + } + }, + "Description": { + "base": null, + "refs": { + "CreateDeploymentInput$description": "

    A comment about the deployment.

    ", + "DeploymentInfo$description": "

    A comment about the deployment.

    ", + "GenericRevisionInfo$description": "

    A comment about the revision.

    ", + "RegisterApplicationRevisionInput$description": "

    A comment about the revision.

    " + } + }, + "DescriptionTooLongException": { + "base": "

    The description is too long.

    ", + "refs": { + } + }, + "Diagnostics": { + "base": "

    Diagnostic information about executable scripts that are part of a deployment.

    ", + "refs": { + "LifecycleEvent$diagnostics": "

    Diagnostic information about the deployment lifecycle event.

    " + } + }, + "EC2TagFilter": { + "base": "

    Information about a tag filter.

    ", + "refs": { + "EC2TagFilterList$member": null + } + }, + "EC2TagFilterList": { + "base": null, + "refs": { + "CreateDeploymentGroupInput$ec2TagFilters": "

    The Amazon EC2 tags on which to filter.

    ", + "DeploymentGroupInfo$ec2TagFilters": "

    The Amazon EC2 tags on which to filter.

    ", + "UpdateDeploymentGroupInput$ec2TagFilters": "

    The replacement set of Amazon EC2 tags on which to filter, if you want to change them. To keep the existing tags, enter their names. To remove tags, do not enter any tag names.

    " + } + }, + "EC2TagFilterType": { + "base": null, + "refs": { + "EC2TagFilter$Type": "

    The tag filter type:

    • KEY_ONLY: Key only.
    • VALUE_ONLY: Value only.
    • KEY_AND_VALUE: Key and value.
    " + } + }, + "ETag": { + "base": null, + "refs": { + "S3Location$eTag": "

    The ETag of the Amazon S3 object that represents the bundled artifacts for the application revision.

    If the ETag is not specified as an input parameter, ETag validation of the object will be skipped.

    " + } + }, + "ErrorCode": { + "base": null, + "refs": { + "ErrorInformation$code": "

    The error code:

    • APPLICATION_MISSING: The application was missing. This error code will most likely be raised if the application is deleted after the deployment is created but before it is started.
    • DEPLOYMENT_GROUP_MISSING: The deployment group was missing. This error code will most likely be raised if the deployment group is deleted after the deployment is created but before it is started.
    • HEALTH_CONSTRAINTS: The deployment failed on too many instances to be successfully deployed within the instance health constraints specified.
    • HEALTH_CONSTRAINTS_INVALID: The revision cannot be successfully deployed within the instance health constraints specified.
    • IAM_ROLE_MISSING: The service role cannot be accessed.
    • IAM_ROLE_PERMISSIONS: The service role does not have the correct permissions.
    • INTERNAL_ERROR: There was an internal error.
    • NO_EC2_SUBSCRIPTION: The calling account is not subscribed to the Amazon EC2 service.
    • NO_INSTANCES: No instance were specified, or no instance can be found.
    • OVER_MAX_INSTANCES: The maximum number of instance was exceeded.
    • THROTTLED: The operation was throttled because the calling account exceeded the throttling limits of one or more AWS services.
    • TIMEOUT: The deployment has timed out.
    • REVISION_MISSING: The revision ID was missing. This error code will most likely be raised if the revision is deleted after the deployment is created but before it is started.
    " + } + }, + "ErrorInformation": { + "base": "

    Information about a deployment error.

    ", + "refs": { + "DeploymentInfo$errorInformation": "

    Information about any error associated with this deployment.

    " + } + }, + "ErrorMessage": { + "base": null, + "refs": { + "BatchGetApplicationRevisionsOutput$errorMessage": "

    Information about errors that may have occurred during the API call.

    ", + "BatchGetDeploymentGroupsOutput$errorMessage": "

    Information about errors that may have occurred during the API call.

    ", + "BatchGetDeploymentInstancesOutput$errorMessage": "

    Information about errors that may have occurred during the API call.

    ", + "ErrorInformation$message": "

    An accompanying error message.

    " + } + }, + "GenericRevisionInfo": { + "base": "

    Information about an application revision.

    ", + "refs": { + "GetApplicationRevisionOutput$revisionInfo": "

    General information about the revision.

    ", + "RevisionInfo$genericRevisionInfo": null + } + }, + "GetApplicationInput": { + "base": "

    Represents the input of a get application operation.

    ", + "refs": { + } + }, + "GetApplicationOutput": { + "base": "

    Represents the output of a get application operation.

    ", + "refs": { + } + }, + "GetApplicationRevisionInput": { + "base": "

    Represents the input of a get application revision operation.

    ", + "refs": { + } + }, + "GetApplicationRevisionOutput": { + "base": "

    Represents the output of a get application revision operation.

    ", + "refs": { + } + }, + "GetDeploymentConfigInput": { + "base": "

    Represents the input of a get deployment configuration operation.

    ", + "refs": { + } + }, + "GetDeploymentConfigOutput": { + "base": "

    Represents the output of a get deployment configuration operation.

    ", + "refs": { + } + }, + "GetDeploymentGroupInput": { + "base": "

    Represents the input of a get deployment group operation.

    ", + "refs": { + } + }, + "GetDeploymentGroupOutput": { + "base": "

    Represents the output of a get deployment group operation.

    ", + "refs": { + } + }, + "GetDeploymentInput": { + "base": "

    Represents the input of a get deployment operation.

    ", + "refs": { + } + }, + "GetDeploymentInstanceInput": { + "base": "

    Represents the input of a get deployment instance operation.

    ", + "refs": { + } + }, + "GetDeploymentInstanceOutput": { + "base": "

    Represents the output of a get deployment instance operation.

    ", + "refs": { + } + }, + "GetDeploymentOutput": { + "base": "

    Represents the output of a get deployment operation.

    ", + "refs": { + } + }, + "GetOnPremisesInstanceInput": { + "base": "

    Represents the input of a get on-premises instance operation.

    ", + "refs": { + } + }, + "GetOnPremisesInstanceOutput": { + "base": "

    Represents the output of a get on-premises instance operation.

    ", + "refs": { + } + }, + "GitHubLocation": { + "base": "

    Information about the location of application artifacts stored in GitHub.

    ", + "refs": { + "RevisionLocation$gitHubLocation": null + } + }, + "IamUserArn": { + "base": null, + "refs": { + "InstanceInfo$iamUserArn": "

    The IAM user ARN associated with the on-premises instance.

    ", + "RegisterOnPremisesInstanceInput$iamUserArn": "

    The ARN of the IAM user to associate with the on-premises instance.

    " + } + }, + "IamUserArnAlreadyRegisteredException": { + "base": "

    The specified IAM user ARN is already registered with an on-premises instance.

    ", + "refs": { + } + }, + "IamUserArnRequiredException": { + "base": "

    An IAM user ARN was not specified.

    ", + "refs": { + } + }, + "InstanceArn": { + "base": null, + "refs": { + "InstanceInfo$instanceArn": "

    The ARN of the on-premises instance.

    " + } + }, + "InstanceCount": { + "base": null, + "refs": { + "DeploymentOverview$Pending": "

    The number of instances in the deployment in a pending state.

    ", + "DeploymentOverview$InProgress": "

    The number of instances in which the deployment is in progress.

    ", + "DeploymentOverview$Succeeded": "

    The number of instances in the deployment to which revisions have been successfully deployed.

    ", + "DeploymentOverview$Failed": "

    The number of instances in the deployment in a failed state.

    ", + "DeploymentOverview$Skipped": "

    The number of instances in the deployment in a skipped state.

    " + } + }, + "InstanceDoesNotExistException": { + "base": "

    The specified instance does not exist in the deployment group.

    ", + "refs": { + } + }, + "InstanceId": { + "base": null, + "refs": { + "GetDeploymentInstanceInput$instanceId": "

    The unique ID of an instance in the deployment group.

    ", + "InstanceSummary$instanceId": "

    The instance ID.

    ", + "InstancesList$member": null + } + }, + "InstanceIdRequiredException": { + "base": "

    The instance ID was not specified.

    ", + "refs": { + } + }, + "InstanceInfo": { + "base": "

    Information about an on-premises instance.

    ", + "refs": { + "GetOnPremisesInstanceOutput$instanceInfo": "

    Information about the on-premises instance.

    ", + "InstanceInfoList$member": null + } + }, + "InstanceInfoList": { + "base": null, + "refs": { + "BatchGetOnPremisesInstancesOutput$instanceInfos": "

    Information about the on-premises instances.

    " + } + }, + "InstanceLimitExceededException": { + "base": "

    The maximum number of allowed on-premises instances in a single call was exceeded.

    ", + "refs": { + } + }, + "InstanceName": { + "base": null, + "refs": { + "DeregisterOnPremisesInstanceInput$instanceName": "

    The name of the on-premises instance to deregister.

    ", + "GetOnPremisesInstanceInput$instanceName": "

    The name of the on-premises instance about which to get information.

    ", + "InstanceInfo$instanceName": "

    The name of the on-premises instance.

    ", + "InstanceNameList$member": null, + "RegisterOnPremisesInstanceInput$instanceName": "

    The name of the on-premises instance to register.

    " + } + }, + "InstanceNameAlreadyRegisteredException": { + "base": "

    The specified on-premises instance name is already registered.

    ", + "refs": { + } + }, + "InstanceNameList": { + "base": null, + "refs": { + "AddTagsToOnPremisesInstancesInput$instanceNames": "

    The names of the on-premises instances to which to add tags.

    ", + "BatchGetOnPremisesInstancesInput$instanceNames": "

    The names of the on-premises instances about which to get information.

    ", + "ListOnPremisesInstancesOutput$instanceNames": "

    The list of matching on-premises instance names.

    ", + "RemoveTagsFromOnPremisesInstancesInput$instanceNames": "

    The names of the on-premises instances from which to remove tags.

    " + } + }, + "InstanceNameRequiredException": { + "base": "

    An on-premises instance name was not specified.

    ", + "refs": { + } + }, + "InstanceNotRegisteredException": { + "base": "

    The specified on-premises instance is not registered.

    ", + "refs": { + } + }, + "InstanceStatus": { + "base": null, + "refs": { + "InstanceStatusList$member": null, + "InstanceSummary$status": "

    The deployment status for this instance:

    • Pending: The deployment is pending for this instance.
    • In Progress: The deployment is in progress for this instance.
    • Succeeded: The deployment has succeeded for this instance.
    • Failed: The deployment has failed for this instance.
    • Skipped: The deployment has been skipped for this instance.
    • Unknown: The deployment status is unknown for this instance.
    " + } + }, + "InstanceStatusList": { + "base": null, + "refs": { + "ListDeploymentInstancesInput$instanceStatusFilter": "

    A subset of instances to list by status:

    • Pending: Include those instance with pending deployments.
    • InProgress: Include those instance where deployments are still in progress.
    • Succeeded: Include those instances with successful deployments.
    • Failed: Include those instance with failed deployments.
    • Skipped: Include those instance with skipped deployments.
    • Unknown: Include those instance with deployments in an unknown state.
    " + } + }, + "InstanceSummary": { + "base": "

    Information about an instance in a deployment.

    ", + "refs": { + "GetDeploymentInstanceOutput$instanceSummary": "

    Information about the instance.

    ", + "InstanceSummaryList$member": null + } + }, + "InstanceSummaryList": { + "base": null, + "refs": { + "BatchGetDeploymentInstancesOutput$instancesSummary": "

    Information about the instance.

    " + } + }, + "InstancesList": { + "base": null, + "refs": { + "BatchGetDeploymentInstancesInput$instanceIds": "

    The unique IDs of instances in the deployment group.

    ", + "ListDeploymentInstancesOutput$instancesList": "

    A list of instance IDs.

    " + } + }, + "InvalidApplicationNameException": { + "base": "

    The application name was specified in an invalid format.

    ", + "refs": { + } + }, + "InvalidAutoScalingGroupException": { + "base": "

    The Auto Scaling group was specified in an invalid format or does not exist.

    ", + "refs": { + } + }, + "InvalidBucketNameFilterException": { + "base": "

    The bucket name either doesn't exist or was specified in an invalid format.

    ", + "refs": { + } + }, + "InvalidDeployedStateFilterException": { + "base": "

    The deployed state filter was specified in an invalid format.

    ", + "refs": { + } + }, + "InvalidDeploymentConfigNameException": { + "base": "

    The deployment configuration name was specified in an invalid format.

    ", + "refs": { + } + }, + "InvalidDeploymentGroupNameException": { + "base": "

    The deployment group name was specified in an invalid format.

    ", + "refs": { + } + }, + "InvalidDeploymentIdException": { + "base": "

    At least one of the deployment IDs was specified in an invalid format.

    ", + "refs": { + } + }, + "InvalidDeploymentStatusException": { + "base": "

    The specified deployment status doesn't exist or cannot be determined.

    ", + "refs": { + } + }, + "InvalidEC2TagException": { + "base": "

    The tag was specified in an invalid format.

    ", + "refs": { + } + }, + "InvalidIamUserArnException": { + "base": "

    The IAM user ARN was specified in an invalid format.

    ", + "refs": { + } + }, + "InvalidInstanceNameException": { + "base": "

    The specified on-premises instance name was specified in an invalid format.

    ", + "refs": { + } + }, + "InvalidInstanceStatusException": { + "base": "

    The specified instance status does not exist.

    ", + "refs": { + } + }, + "InvalidKeyPrefixFilterException": { + "base": "

    The specified key prefix filter was specified in an invalid format.

    ", + "refs": { + } + }, + "InvalidMinimumHealthyHostValueException": { + "base": "

    The minimum healthy instance value was specified in an invalid format.

    ", + "refs": { + } + }, + "InvalidNextTokenException": { + "base": "

    The next token was specified in an invalid format.

    ", + "refs": { + } + }, + "InvalidOperationException": { + "base": "

    An invalid operation was detected.

    ", + "refs": { + } + }, + "InvalidRegistrationStatusException": { + "base": "

    The registration status was specified in an invalid format.

    ", + "refs": { + } + }, + "InvalidRevisionException": { + "base": "

    The revision was specified in an invalid format.

    ", + "refs": { + } + }, + "InvalidRoleException": { + "base": "

    The service role ARN was specified in an invalid format. Or, if an Auto Scaling group was specified, the specified service role does not grant the appropriate permissions to Auto Scaling.

    ", + "refs": { + } + }, + "InvalidSortByException": { + "base": "

    The column name to sort by is either not present or was specified in an invalid format.

    ", + "refs": { + } + }, + "InvalidSortOrderException": { + "base": "

    The sort order was specified in an invalid format.

    ", + "refs": { + } + }, + "InvalidTagException": { + "base": "

    The specified tag was specified in an invalid format.

    ", + "refs": { + } + }, + "InvalidTagFilterException": { + "base": "

    The specified tag filter was specified in an invalid format.

    ", + "refs": { + } + }, + "InvalidTimeRangeException": { + "base": "

    The specified time range was specified in an invalid format.

    ", + "refs": { + } + }, + "InvalidTriggerConfigException": { + "base": "

    The trigger was specified in an invalid format.

    ", + "refs": { + } + }, + "Key": { + "base": null, + "refs": { + "EC2TagFilter$Key": "

    The tag filter key.

    ", + "Tag$Key": "

    The tag's key.

    ", + "TagFilter$Key": "

    The on-premises instance tag filter key.

    " + } + }, + "LifecycleErrorCode": { + "base": null, + "refs": { + "Diagnostics$errorCode": "

    The associated error code:

    • Success: The specified script ran.
    • ScriptMissing: The specified script was not found in the specified location.
    • ScriptNotExecutable: The specified script is not a recognized executable file type.
    • ScriptTimedOut: The specified script did not finish running in the specified time period.
    • ScriptFailed: The specified script failed to run as expected.
    • UnknownError: The specified script did not run for an unknown reason.
    " + } + }, + "LifecycleEvent": { + "base": "

    Information about a deployment lifecycle event.

    ", + "refs": { + "LifecycleEventList$member": null + } + }, + "LifecycleEventList": { + "base": null, + "refs": { + "InstanceSummary$lifecycleEvents": "

    A list of lifecycle events for this instance.

    " + } + }, + "LifecycleEventName": { + "base": null, + "refs": { + "LifecycleEvent$lifecycleEventName": "

    The deployment lifecycle event name, such as ApplicationStop, BeforeInstall, AfterInstall, ApplicationStart, or ValidateService.

    " + } + }, + "LifecycleEventStatus": { + "base": null, + "refs": { + "LifecycleEvent$status": "

    The deployment lifecycle event status:

    • Pending: The deployment lifecycle event is pending.
    • InProgress: The deployment lifecycle event is in progress.
    • Succeeded: The deployment lifecycle event ran successfully.
    • Failed: The deployment lifecycle event has failed.
    • Skipped: The deployment lifecycle event has been skipped.
    • Unknown: The deployment lifecycle event is unknown.
    " + } + }, + "LifecycleHookLimitExceededException": { + "base": "

    The limit for lifecycle hooks was exceeded.

    ", + "refs": { + } + }, + "LifecycleMessage": { + "base": null, + "refs": { + "Diagnostics$message": "

    The message associated with the error.

    " + } + }, + "ListApplicationRevisionsInput": { + "base": "

    Represents the input of a list application revisions operation.

    ", + "refs": { + } + }, + "ListApplicationRevisionsOutput": { + "base": "

    Represents the output of a list application revisions operation.

    ", + "refs": { + } + }, + "ListApplicationsInput": { + "base": "

    Represents the input of a list applications operation.

    ", + "refs": { + } + }, + "ListApplicationsOutput": { + "base": "

    Represents the output of a list applications operation.

    ", + "refs": { + } + }, + "ListDeploymentConfigsInput": { + "base": "

    Represents the input of a list deployment configurations operation.

    ", + "refs": { + } + }, + "ListDeploymentConfigsOutput": { + "base": "

    Represents the output of a list deployment configurations operation.

    ", + "refs": { + } + }, + "ListDeploymentGroupsInput": { + "base": "

    Represents the input of a list deployment groups operation.

    ", + "refs": { + } + }, + "ListDeploymentGroupsOutput": { + "base": "

    Represents the output of a list deployment groups operation.

    ", + "refs": { + } + }, + "ListDeploymentInstancesInput": { + "base": "

    Represents the input of a list deployment instances operation.

    ", + "refs": { + } + }, + "ListDeploymentInstancesOutput": { + "base": "

    Represents the output of a list deployment instances operation.

    ", + "refs": { + } + }, + "ListDeploymentsInput": { + "base": "

    Represents the input of a list deployments operation.

    ", + "refs": { + } + }, + "ListDeploymentsOutput": { + "base": "

    Represents the output of a list deployments operation.

    ", + "refs": { + } + }, + "ListOnPremisesInstancesInput": { + "base": "

    Represents the input of a list on-premises instances operation.

    .", + "refs": { + } + }, + "ListOnPremisesInstancesOutput": { + "base": "

    Represents the output of list on-premises instances operation.

    ", + "refs": { + } + }, + "ListStateFilterAction": { + "base": null, + "refs": { + "ListApplicationRevisionsInput$deployed": "

    Whether to list revisions based on whether the revision is the target revision of an deployment group:

    • include: List revisions that are target revisions of a deployment group.
    • exclude: Do not list revisions that are target revisions of a deployment group.
    • ignore: List all revisions.
    " + } + }, + "LogTail": { + "base": null, + "refs": { + "Diagnostics$logTail": "

    The last portion of the diagnostic log.

    If available, AWS CodeDeploy returns up to the last 4 KB of the diagnostic log.

    " + } + }, + "Message": { + "base": null, + "refs": { + "StopDeploymentOutput$statusMessage": "

    An accompanying status message.

    " + } + }, + "MinimumHealthyHosts": { + "base": "

    Information about minimum healthy instance.

    ", + "refs": { + "CreateDeploymentConfigInput$minimumHealthyHosts": "

    The minimum number of healthy instances that should be available at any time during the deployment. There are two parameters expected in the input: type and value.

    The type parameter takes either of the following values:

    • HOST_COUNT: The value parameter represents the minimum number of healthy instances as an absolute value.
    • FLEET_PERCENT: The value parameter represents the minimum number of healthy instances as a percentage of the total number of instances in the deployment. If you specify FLEET_PERCENT, at the start of the deployment, AWS CodeDeploy converts the percentage to the equivalent number of instance and rounds up fractional instances.

    The value parameter takes an integer.

    For example, to set a minimum of 95% healthy instance, specify a type of FLEET_PERCENT and a value of 95.

    ", + "DeploymentConfigInfo$minimumHealthyHosts": "

    Information about the number or percentage of minimum healthy instance.

    " + } + }, + "MinimumHealthyHostsType": { + "base": null, + "refs": { + "MinimumHealthyHosts$type": "

    The minimum healthy instance type:

    • HOST_COUNT: The minimum number of healthy instance as an absolute value.
    • FLEET_PERCENT: The minimum number of healthy instance as a percentage of the total number of instance in the deployment.

    In an example of nine instance, if a HOST_COUNT of six is specified, deploy to up to three instances at a time. The deployment will be successful if six or more instances are deployed to successfully; otherwise, the deployment fails. If a FLEET_PERCENT of 40 is specified, deploy to up to five instance at a time. The deployment will be successful if four or more instance are deployed to successfully; otherwise, the deployment fails.

    In a call to the get deployment configuration operation, CodeDeployDefault.OneAtATime will return a minimum healthy instance type of MOST_CONCURRENCY and a value of 1. This means a deployment to only one instance at a time. (You cannot set the type to MOST_CONCURRENCY, only to HOST_COUNT or FLEET_PERCENT.) In addition, with CodeDeployDefault.OneAtATime, AWS CodeDeploy will try to ensure that all instances but one are kept in a healthy state during the deployment. Although this allows one instance at a time to be taken offline for a new deployment, it also means that if the deployment to the last instance fails, the overall deployment still succeeds." + } + }, + "MinimumHealthyHostsValue": { + "base": null, + "refs": { + "MinimumHealthyHosts$value": "

    The minimum healthy instance value.

    " + } + }, + "NextToken": { + "base": null, + "refs": { + "ListApplicationRevisionsInput$nextToken": "

    An identifier returned from the previous list application revisions call. It can be used to return the next set of applications in the list.

    ", + "ListApplicationRevisionsOutput$nextToken": "

    If a large amount of information is returned, an identifier will also be returned. It can be used in a subsequent list application revisions call to return the next set of application revisions in the list.

    ", + "ListApplicationsInput$nextToken": "

    An identifier returned from the previous list applications call. It can be used to return the next set of applications in the list.

    ", + "ListApplicationsOutput$nextToken": "

    If a large amount of information is returned, an identifier is also returned. It can be used in a subsequent list applications call to return the next set of applications, will also be returned. in the list.

    ", + "ListDeploymentConfigsInput$nextToken": "

    An identifier returned from the previous list deployment configurations call. It can be used to return the next set of deployment configurations in the list.

    ", + "ListDeploymentConfigsOutput$nextToken": "

    If a large amount of information is returned, an identifier is also returned. It can be used in a subsequent list deployment configurations call to return the next set of deployment configurations in the list.

    ", + "ListDeploymentGroupsInput$nextToken": "

    An identifier returned from the previous list deployment groups call. It can be used to return the next set of deployment groups in the list.

    ", + "ListDeploymentGroupsOutput$nextToken": "

    If a large amount of information is returned, an identifier is also returned. It can be used in a subsequent list deployment groups call to return the next set of deployment groups in the list.

    ", + "ListDeploymentInstancesInput$nextToken": "

    An identifier returned from the previous list deployment instances call. It can be used to return the next set of deployment instances in the list.

    ", + "ListDeploymentInstancesOutput$nextToken": "

    If a large amount of information is returned, an identifier is also returned. It can be used in a subsequent list deployment instances call to return the next set of deployment instances in the list.

    ", + "ListDeploymentsInput$nextToken": "

    An identifier returned from the previous list deployments call. It can be used to return the next set of deployments in the list.

    ", + "ListDeploymentsOutput$nextToken": "

    If a large amount of information is returned, an identifier is also returned. It can be used in a subsequent list deployments call to return the next set of deployments in the list.

    ", + "ListOnPremisesInstancesInput$nextToken": "

    An identifier returned from the previous list on-premises instances call. It can be used to return the next set of on-premises instances in the list.

    ", + "ListOnPremisesInstancesOutput$nextToken": "

    If a large amount of information is returned, an identifier is also returned. It can be used in a subsequent list on-premises instances call to return the next set of on-premises instances in the list.

    " + } + }, + "RegisterApplicationRevisionInput": { + "base": "

    Represents the input of a register application revision operation.

    ", + "refs": { + } + }, + "RegisterOnPremisesInstanceInput": { + "base": "

    Represents the input of the register on-premises instance operation.

    ", + "refs": { + } + }, + "RegistrationStatus": { + "base": null, + "refs": { + "ListOnPremisesInstancesInput$registrationStatus": "

    The registration status of the on-premises instances:

    • Deregistered: Include deregistered on-premises instances in the resulting list.
    • Registered: Include registered on-premises instances in the resulting list.
    " + } + }, + "RemoveTagsFromOnPremisesInstancesInput": { + "base": "

    Represents the input of a remove tags from on-premises instances operation.

    ", + "refs": { + } + }, + "Repository": { + "base": null, + "refs": { + "GitHubLocation$repository": "

    The GitHub account and repository pair that stores a reference to the commit that represents the bundled artifacts for the application revision.

    Specified as account/repository.

    " + } + }, + "RevisionDoesNotExistException": { + "base": "

    The named revision does not exist with the applicable IAM user or AWS account.

    ", + "refs": { + } + }, + "RevisionInfo": { + "base": "

    Information about an application revision.

    ", + "refs": { + "RevisionInfoList$member": null + } + }, + "RevisionInfoList": { + "base": null, + "refs": { + "BatchGetApplicationRevisionsOutput$revisions": "

    Additional information about the revisions, including the type and location.

    " + } + }, + "RevisionLocation": { + "base": "

    Information about the location of an application revision.

    ", + "refs": { + "CreateDeploymentInput$revision": "

    The type and location of the revision to deploy.

    ", + "DeploymentGroupInfo$targetRevision": "

    Information about the deployment group's target revision, including type and location.

    ", + "DeploymentInfo$revision": "

    Information about the location of stored application artifacts and the service from which to retrieve them.

    ", + "GetApplicationRevisionInput$revision": "

    Information about the application revision to get, including type and location.

    ", + "GetApplicationRevisionOutput$revision": "

    Additional information about the revision, including type and location.

    ", + "RegisterApplicationRevisionInput$revision": "

    Information about the application revision to register, including type and location.

    ", + "RevisionInfo$revisionLocation": null, + "RevisionLocationList$member": null + } + }, + "RevisionLocationList": { + "base": null, + "refs": { + "BatchGetApplicationRevisionsInput$revisions": "

    Information to get about the application revisions, including type and location.

    ", + "ListApplicationRevisionsOutput$revisions": "

    A list of locations that contain the matching revisions.

    " + } + }, + "RevisionLocationType": { + "base": null, + "refs": { + "RevisionLocation$revisionType": "

    The type of application revision:

    • S3: An application revision stored in Amazon S3.
    • GitHub: An application revision stored in GitHub.
    " + } + }, + "RevisionRequiredException": { + "base": "

    The revision ID was not specified.

    ", + "refs": { + } + }, + "Role": { + "base": null, + "refs": { + "CreateDeploymentGroupInput$serviceRoleArn": "

    A service role ARN that allows AWS CodeDeploy to act on the user's behalf when interacting with AWS services.

    ", + "DeploymentGroupInfo$serviceRoleArn": "

    A service role ARN.

    ", + "UpdateDeploymentGroupInput$serviceRoleArn": "

    A replacement ARN for the service role, if you want to change it.

    " + } + }, + "RoleRequiredException": { + "base": "

    The role ID was not specified.

    ", + "refs": { + } + }, + "S3Bucket": { + "base": null, + "refs": { + "ListApplicationRevisionsInput$s3Bucket": "

    An Amazon S3 bucket name to limit the search for revisions.

    If set to null, all of the user's buckets will be searched.

    ", + "S3Location$bucket": "

    The name of the Amazon S3 bucket where the application revision is stored.

    " + } + }, + "S3Key": { + "base": null, + "refs": { + "ListApplicationRevisionsInput$s3KeyPrefix": "

    A key prefix for the set of Amazon S3 objects to limit the search for revisions.

    ", + "S3Location$key": "

    The name of the Amazon S3 object that represents the bundled artifacts for the application revision.

    " + } + }, + "S3Location": { + "base": "

    Information about the location of application artifacts stored in Amazon S3.

    ", + "refs": { + "RevisionLocation$s3Location": null + } + }, + "ScriptName": { + "base": null, + "refs": { + "Diagnostics$scriptName": "

    The name of the script.

    " + } + }, + "SortOrder": { + "base": null, + "refs": { + "ListApplicationRevisionsInput$sortOrder": "

    The order in which to sort the list results:

    • ascending: ascending order.
    • descending: descending order.

    If not specified, the results will be sorted in ascending order.

    If set to null, the results will be sorted in an arbitrary order.

    " + } + }, + "StopDeploymentInput": { + "base": "

    Represents the input of a stop deployment operation.

    ", + "refs": { + } + }, + "StopDeploymentOutput": { + "base": "

    Represents the output of a stop deployment operation.

    ", + "refs": { + } + }, + "StopStatus": { + "base": null, + "refs": { + "StopDeploymentOutput$status": "

    The status of the stop deployment operation:

    • Pending: The stop operation is pending.
    • Succeeded: The stop operation was successful.
    " + } + }, + "Tag": { + "base": "

    Information about a tag.

    ", + "refs": { + "TagList$member": null + } + }, + "TagFilter": { + "base": "

    Information about an on-premises instance tag filter.

    ", + "refs": { + "TagFilterList$member": null + } + }, + "TagFilterList": { + "base": null, + "refs": { + "CreateDeploymentGroupInput$onPremisesInstanceTagFilters": "

    The on-premises instance tags on which to filter.

    ", + "DeploymentGroupInfo$onPremisesInstanceTagFilters": "

    The on-premises instance tags on which to filter.

    ", + "ListOnPremisesInstancesInput$tagFilters": "

    The on-premises instance tags that will be used to restrict the corresponding on-premises instance names returned.

    ", + "UpdateDeploymentGroupInput$onPremisesInstanceTagFilters": "

    The replacement set of on-premises instance tags on which to filter, if you want to change them. To keep the existing tags, enter their names. To remove tags, do not enter any tag names.

    " + } + }, + "TagFilterType": { + "base": null, + "refs": { + "TagFilter$Type": "

    The on-premises instance tag filter type:

    • KEY_ONLY: Key only.
    • VALUE_ONLY: Value only.
    • KEY_AND_VALUE: Key and value.
    " + } + }, + "TagLimitExceededException": { + "base": "

    The maximum allowed number of tags was exceeded.

    ", + "refs": { + } + }, + "TagList": { + "base": null, + "refs": { + "AddTagsToOnPremisesInstancesInput$tags": "

    The tag key-value pairs to add to the on-premises instances.

    Keys and values are both required. Keys cannot be null or empty strings. Value-only tags are not allowed.

    ", + "InstanceInfo$tags": "

    The tags currently associated with the on-premises instance.

    ", + "RemoveTagsFromOnPremisesInstancesInput$tags": "

    The tag key-value pairs to remove from the on-premises instances.

    " + } + }, + "TagRequiredException": { + "base": "

    A tag was not specified.

    ", + "refs": { + } + }, + "TimeRange": { + "base": "

    Information about a time range.

    ", + "refs": { + "ListDeploymentsInput$createTimeRange": "

    A time range (start and end) for returning a subset of the list of deployments.

    " + } + }, + "Timestamp": { + "base": null, + "refs": { + "ApplicationInfo$createTime": "

    The time at which the application was created.

    ", + "DeploymentConfigInfo$createTime": "

    The time at which the deployment configuration was created.

    ", + "DeploymentInfo$createTime": "

    A timestamp indicating when the deployment was created.

    ", + "DeploymentInfo$startTime": "

    A timestamp indicating when the deployment was deployed to the deployment group.

    In some cases, the reported value of the start time may be later than the complete time. This is due to differences in the clock settings of back-end servers that participate in the deployment process.

    ", + "DeploymentInfo$completeTime": "

    A timestamp indicating when the deployment was complete.

    ", + "GenericRevisionInfo$firstUsedTime": "

    When the revision was first used by AWS CodeDeploy.

    ", + "GenericRevisionInfo$lastUsedTime": "

    When the revision was last used by AWS CodeDeploy.

    ", + "GenericRevisionInfo$registerTime": "

    When the revision was registered with AWS CodeDeploy.

    ", + "InstanceInfo$registerTime": "

    The time at which the on-premises instance was registered.

    ", + "InstanceInfo$deregisterTime": "

    If the on-premises instance was deregistered, the time at which the on-premises instance was deregistered.

    ", + "InstanceSummary$lastUpdatedAt": "

    A timestamp indicating when the instance information was last updated.

    ", + "LifecycleEvent$startTime": "

    A timestamp indicating when the deployment lifecycle event started.

    ", + "LifecycleEvent$endTime": "

    A timestamp indicating when the deployment lifecycle event ended.

    ", + "TimeRange$start": "

    The start time of the time range.

    Specify null to leave the start time open-ended.", + "TimeRange$end": "

    The end time of the time range.

    Specify null to leave the end time open-ended." + } + }, + "TriggerConfig": { + "base": "

    Information about notification triggers for the deployment group.

    ", + "refs": { + "TriggerConfigList$member": null + } + }, + "TriggerConfigList": { + "base": null, + "refs": { + "CreateDeploymentGroupInput$triggerConfigurations": "

    Information about triggers to create when the deployment group is created.

    ", + "DeploymentGroupInfo$triggerConfigurations": "

    A list of associated triggers.

    ", + "UpdateDeploymentGroupInput$triggerConfigurations": "

    Information about triggers to change when the deployment group is updated.

    " + } + }, + "TriggerEventType": { + "base": null, + "refs": { + "TriggerEventTypeList$member": null + } + }, + "TriggerEventTypeList": { + "base": null, + "refs": { + "TriggerConfig$triggerEvents": "

    The event type or types for which notifications are triggered.

    The following event type values are supported:

    • DEPLOYMENT_START
    • DEPLOYMENT_SUCCESS
    • DEPLOYMENT_FAILURE
    • DEPLOYMENT_STOP
    • INSTANCE_START
    • INSTANCE_SUCCESS
    • INSTANCE_FAILURE
    " + } + }, + "TriggerName": { + "base": null, + "refs": { + "TriggerConfig$triggerName": "

    The name of the notification trigger.

    " + } + }, + "TriggerTargetArn": { + "base": null, + "refs": { + "TriggerConfig$triggerTargetArn": "

    The ARN of the Amazon Simple Notification Service topic through which notifications about deployment or instance events are sent.

    " + } + }, + "TriggerTargetsLimitExceededException": { + "base": "

    The maximum allowed number of triggers was exceeded.

    ", + "refs": { + } + }, + "UpdateApplicationInput": { + "base": "

    Represents the input of an update application operation.

    ", + "refs": { + } + }, + "UpdateDeploymentGroupInput": { + "base": "

    Represents the input of an update deployment group operation.

    ", + "refs": { + } + }, + "UpdateDeploymentGroupOutput": { + "base": "

    Represents the output of an update deployment group operation.

    ", + "refs": { + } + }, + "Value": { + "base": null, + "refs": { + "EC2TagFilter$Value": "

    The tag filter value.

    ", + "Tag$Value": "

    The tag's value.

    ", + "TagFilter$Value": "

    The on-premises instance tag filter value.

    " + } + }, + "VersionId": { + "base": null, + "refs": { + "S3Location$version": "

    A specific version of the Amazon S3 object that represents the bundled artifacts for the application revision.

    If the version is not specified, the system will use the most recent version by default.

    " + } + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/codedeploy/2014-10-06/examples-1.json b/vendor/github.com/aws/aws-sdk-go/models/apis/codedeploy/2014-10-06/examples-1.json new file mode 100644 index 000000000..0ea7e3b0b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/codedeploy/2014-10-06/examples-1.json @@ -0,0 +1,5 @@ +{ + "version": "1.0", + "examples": { + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/codedeploy/2014-10-06/paginators-1.json b/vendor/github.com/aws/aws-sdk-go/models/apis/codedeploy/2014-10-06/paginators-1.json new file mode 100644 index 000000000..cea07e68b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/codedeploy/2014-10-06/paginators-1.json @@ -0,0 +1,34 @@ +{ + "pagination": { + "ListApplicationRevisions": { + "input_token": "nextToken", + "output_token": "nextToken", + "result_key": "revisions" + }, + "ListApplications": { + "input_token": "nextToken", + "output_token": "nextToken", + "result_key": "applications" + }, + "ListDeploymentConfigs": { + "input_token": "nextToken", + "output_token": "nextToken", + "result_key": "deploymentConfigsList" + }, + "ListDeploymentGroups": { + "input_token": "nextToken", + "output_token": "nextToken", + "result_key": "deploymentGroups" + }, + "ListDeploymentInstances": { + "input_token": "nextToken", + "output_token": "nextToken", + "result_key": "instancesList" + }, + "ListDeployments": { + "input_token": "nextToken", + "output_token": "nextToken", + "result_key": "deployments" + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/codepipeline/2015-07-09/api-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/codepipeline/2015-07-09/api-2.json new file mode 100644 index 000000000..2f8ac81cb --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/codepipeline/2015-07-09/api-2.json @@ -0,0 +1,1633 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2015-07-09", + "endpointPrefix":"codepipeline", + "jsonVersion":"1.1", + "protocol":"json", + "serviceAbbreviation":"CodePipeline", + "serviceFullName":"AWS CodePipeline", + "signatureVersion":"v4", + "targetPrefix":"CodePipeline_20150709" + }, + "operations":{ + "AcknowledgeJob":{ + "name":"AcknowledgeJob", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AcknowledgeJobInput"}, + "output":{"shape":"AcknowledgeJobOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"InvalidNonceException"}, + {"shape":"JobNotFoundException"} + ] + }, + "AcknowledgeThirdPartyJob":{ + "name":"AcknowledgeThirdPartyJob", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AcknowledgeThirdPartyJobInput"}, + "output":{"shape":"AcknowledgeThirdPartyJobOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"InvalidNonceException"}, + {"shape":"JobNotFoundException"}, + {"shape":"InvalidClientTokenException"} + ] + }, + "CreateCustomActionType":{ + "name":"CreateCustomActionType", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateCustomActionTypeInput"}, + "output":{"shape":"CreateCustomActionTypeOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"LimitExceededException"} + ] + }, + "CreatePipeline":{ + "name":"CreatePipeline", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreatePipelineInput"}, + "output":{"shape":"CreatePipelineOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"PipelineNameInUseException"}, + {"shape":"InvalidStageDeclarationException"}, + {"shape":"InvalidActionDeclarationException"}, + {"shape":"InvalidBlockerDeclarationException"}, + {"shape":"InvalidStructureException"}, + {"shape":"LimitExceededException"} + ] + }, + "DeleteCustomActionType":{ + "name":"DeleteCustomActionType", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteCustomActionTypeInput"}, + "errors":[ + {"shape":"ValidationException"} + ] + }, + "DeletePipeline":{ + "name":"DeletePipeline", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeletePipelineInput"}, + "errors":[ + {"shape":"ValidationException"} + ] + }, + "DisableStageTransition":{ + "name":"DisableStageTransition", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DisableStageTransitionInput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"PipelineNotFoundException"}, + {"shape":"StageNotFoundException"} + ] + }, + "EnableStageTransition":{ + "name":"EnableStageTransition", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"EnableStageTransitionInput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"PipelineNotFoundException"}, + {"shape":"StageNotFoundException"} + ] + }, + "GetJobDetails":{ + "name":"GetJobDetails", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetJobDetailsInput"}, + "output":{"shape":"GetJobDetailsOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"JobNotFoundException"} + ] + }, + "GetPipeline":{ + "name":"GetPipeline", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetPipelineInput"}, + "output":{"shape":"GetPipelineOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"PipelineNotFoundException"}, + {"shape":"PipelineVersionNotFoundException"} + ] + }, + "GetPipelineState":{ + "name":"GetPipelineState", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetPipelineStateInput"}, + "output":{"shape":"GetPipelineStateOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"PipelineNotFoundException"} + ] + }, + "GetThirdPartyJobDetails":{ + "name":"GetThirdPartyJobDetails", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetThirdPartyJobDetailsInput"}, + "output":{"shape":"GetThirdPartyJobDetailsOutput"}, + "errors":[ + {"shape":"JobNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"InvalidClientTokenException"}, + {"shape":"InvalidJobException"} + ] + }, + "ListActionTypes":{ + "name":"ListActionTypes", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListActionTypesInput"}, + "output":{"shape":"ListActionTypesOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"InvalidNextTokenException"} + ] + }, + "ListPipelines":{ + "name":"ListPipelines", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListPipelinesInput"}, + "output":{"shape":"ListPipelinesOutput"}, + "errors":[ + {"shape":"InvalidNextTokenException"} + ] + }, + "PollForJobs":{ + "name":"PollForJobs", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PollForJobsInput"}, + "output":{"shape":"PollForJobsOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ActionTypeNotFoundException"} + ] + }, + "PollForThirdPartyJobs":{ + "name":"PollForThirdPartyJobs", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PollForThirdPartyJobsInput"}, + "output":{"shape":"PollForThirdPartyJobsOutput"}, + "errors":[ + {"shape":"ActionTypeNotFoundException"}, + {"shape":"ValidationException"} + ] + }, + "PutActionRevision":{ + "name":"PutActionRevision", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PutActionRevisionInput"}, + "output":{"shape":"PutActionRevisionOutput"}, + "errors":[ + {"shape":"PipelineNotFoundException"}, + {"shape":"StageNotFoundException"}, + {"shape":"ActionNotFoundException"}, + {"shape":"ValidationException"} + ] + }, + "PutApprovalResult":{ + "name":"PutApprovalResult", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PutApprovalResultInput"}, + "output":{"shape":"PutApprovalResultOutput"}, + "errors":[ + {"shape":"InvalidApprovalTokenException"}, + {"shape":"ApprovalAlreadyCompletedException"}, + {"shape":"PipelineNotFoundException"}, + {"shape":"StageNotFoundException"}, + {"shape":"ActionNotFoundException"}, + {"shape":"ValidationException"} + ] + }, + "PutJobFailureResult":{ + "name":"PutJobFailureResult", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PutJobFailureResultInput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"JobNotFoundException"}, + {"shape":"InvalidJobStateException"} + ] + }, + "PutJobSuccessResult":{ + "name":"PutJobSuccessResult", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PutJobSuccessResultInput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"JobNotFoundException"}, + {"shape":"InvalidJobStateException"} + ] + }, + "PutThirdPartyJobFailureResult":{ + "name":"PutThirdPartyJobFailureResult", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PutThirdPartyJobFailureResultInput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"JobNotFoundException"}, + {"shape":"InvalidJobStateException"}, + {"shape":"InvalidClientTokenException"} + ] + }, + "PutThirdPartyJobSuccessResult":{ + "name":"PutThirdPartyJobSuccessResult", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PutThirdPartyJobSuccessResultInput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"JobNotFoundException"}, + {"shape":"InvalidJobStateException"}, + {"shape":"InvalidClientTokenException"} + ] + }, + "RetryStageExecution":{ + "name":"RetryStageExecution", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RetryStageExecutionInput"}, + "output":{"shape":"RetryStageExecutionOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"PipelineNotFoundException"}, + {"shape":"StageNotFoundException"}, + {"shape":"StageNotRetryableException"}, + {"shape":"NotLatestPipelineExecutionException"} + ] + }, + "StartPipelineExecution":{ + "name":"StartPipelineExecution", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StartPipelineExecutionInput"}, + "output":{"shape":"StartPipelineExecutionOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"PipelineNotFoundException"} + ] + }, + "UpdatePipeline":{ + "name":"UpdatePipeline", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdatePipelineInput"}, + "output":{"shape":"UpdatePipelineOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"InvalidStageDeclarationException"}, + {"shape":"InvalidActionDeclarationException"}, + {"shape":"InvalidBlockerDeclarationException"}, + {"shape":"InvalidStructureException"} + ] + } + }, + "shapes":{ + "AWSSessionCredentials":{ + "type":"structure", + "required":[ + "accessKeyId", + "secretAccessKey", + "sessionToken" + ], + "members":{ + "accessKeyId":{"shape":"AccessKeyId"}, + "secretAccessKey":{"shape":"SecretAccessKey"}, + "sessionToken":{"shape":"SessionToken"} + }, + "sensitive":true + }, + "AccessKeyId":{"type":"string"}, + "AccountId":{ + "type":"string", + "pattern":"[0-9]{12}" + }, + "AcknowledgeJobInput":{ + "type":"structure", + "required":[ + "jobId", + "nonce" + ], + "members":{ + "jobId":{"shape":"JobId"}, + "nonce":{"shape":"Nonce"} + } + }, + "AcknowledgeJobOutput":{ + "type":"structure", + "members":{ + "status":{"shape":"JobStatus"} + } + }, + "AcknowledgeThirdPartyJobInput":{ + "type":"structure", + "required":[ + "jobId", + "nonce", + "clientToken" + ], + "members":{ + "jobId":{"shape":"ThirdPartyJobId"}, + "nonce":{"shape":"Nonce"}, + "clientToken":{"shape":"ClientToken"} + } + }, + "AcknowledgeThirdPartyJobOutput":{ + "type":"structure", + "members":{ + "status":{"shape":"JobStatus"} + } + }, + "ActionCategory":{ + "type":"string", + "enum":[ + "Source", + "Build", + "Deploy", + "Test", + "Invoke", + "Approval" + ] + }, + "ActionConfiguration":{ + "type":"structure", + "members":{ + "configuration":{"shape":"ActionConfigurationMap"} + } + }, + "ActionConfigurationKey":{ + "type":"string", + "max":50, + "min":1 + }, + "ActionConfigurationMap":{ + "type":"map", + "key":{"shape":"ActionConfigurationKey"}, + "value":{"shape":"ActionConfigurationValue"} + }, + "ActionConfigurationProperty":{ + "type":"structure", + "required":[ + "name", + "required", + "key", + "secret" + ], + "members":{ + "name":{"shape":"ActionConfigurationKey"}, + "required":{"shape":"Boolean"}, + "key":{"shape":"Boolean"}, + "secret":{"shape":"Boolean"}, + "queryable":{"shape":"Boolean"}, + "description":{"shape":"Description"}, + "type":{"shape":"ActionConfigurationPropertyType"} + } + }, + "ActionConfigurationPropertyList":{ + "type":"list", + "member":{"shape":"ActionConfigurationProperty"}, + "max":10 + }, + "ActionConfigurationPropertyType":{ + "type":"string", + "enum":[ + "String", + "Number", + "Boolean" + ] + }, + "ActionConfigurationQueryableValue":{ + "type":"string", + "max":20, + "min":1, + "pattern":"[a-zA-Z0-9_-]+" + }, + "ActionConfigurationValue":{ + "type":"string", + "max":500, + "min":1 + }, + "ActionContext":{ + "type":"structure", + "members":{ + "name":{"shape":"ActionName"} + } + }, + "ActionDeclaration":{ + "type":"structure", + "required":[ + "name", + "actionTypeId" + ], + "members":{ + "name":{"shape":"ActionName"}, + "actionTypeId":{"shape":"ActionTypeId"}, + "runOrder":{"shape":"ActionRunOrder"}, + "configuration":{"shape":"ActionConfigurationMap"}, + "outputArtifacts":{"shape":"OutputArtifactList"}, + "inputArtifacts":{"shape":"InputArtifactList"}, + "roleArn":{"shape":"RoleArn"} + } + }, + "ActionExecution":{ + "type":"structure", + "members":{ + "status":{"shape":"ActionExecutionStatus"}, + "summary":{"shape":"ExecutionSummary"}, + "lastStatusChange":{"shape":"Timestamp"}, + "token":{"shape":"ActionExecutionToken"}, + "lastUpdatedBy":{"shape":"LastUpdatedBy"}, + "externalExecutionId":{"shape":"ExecutionId"}, + "externalExecutionUrl":{"shape":"Url"}, + "percentComplete":{"shape":"Percentage"}, + "errorDetails":{"shape":"ErrorDetails"} + } + }, + "ActionExecutionStatus":{ + "type":"string", + "enum":[ + "InProgress", + "Succeeded", + "Failed" + ] + }, + "ActionExecutionToken":{"type":"string"}, + "ActionName":{ + "type":"string", + "max":100, + "min":1, + "pattern":"[A-Za-z0-9.@\\-_]+" + }, + "ActionNotFoundException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "ActionOwner":{ + "type":"string", + "enum":[ + "AWS", + "ThirdParty", + "Custom" + ] + }, + "ActionProvider":{ + "type":"string", + "max":25, + "min":1, + "pattern":"[0-9A-Za-z_-]+" + }, + "ActionRevision":{ + "type":"structure", + "required":[ + "revisionId", + "revisionChangeId", + "created" + ], + "members":{ + "revisionId":{"shape":"Revision"}, + "revisionChangeId":{"shape":"RevisionChangeIdentifier"}, + "created":{"shape":"Timestamp"} + } + }, + "ActionRunOrder":{ + "type":"integer", + "max":999, + "min":1 + }, + "ActionState":{ + "type":"structure", + "members":{ + "actionName":{"shape":"ActionName"}, + "currentRevision":{"shape":"ActionRevision"}, + "latestExecution":{"shape":"ActionExecution"}, + "entityUrl":{"shape":"Url"}, + "revisionUrl":{"shape":"Url"} + } + }, + "ActionStateList":{ + "type":"list", + "member":{"shape":"ActionState"} + }, + "ActionType":{ + "type":"structure", + "required":[ + "id", + "inputArtifactDetails", + "outputArtifactDetails" + ], + "members":{ + "id":{"shape":"ActionTypeId"}, + "settings":{"shape":"ActionTypeSettings"}, + "actionConfigurationProperties":{"shape":"ActionConfigurationPropertyList"}, + "inputArtifactDetails":{"shape":"ArtifactDetails"}, + "outputArtifactDetails":{"shape":"ArtifactDetails"} + } + }, + "ActionTypeId":{ + "type":"structure", + "required":[ + "category", + "owner", + "provider", + "version" + ], + "members":{ + "category":{"shape":"ActionCategory"}, + "owner":{"shape":"ActionOwner"}, + "provider":{"shape":"ActionProvider"}, + "version":{"shape":"Version"} + } + }, + "ActionTypeList":{ + "type":"list", + "member":{"shape":"ActionType"} + }, + "ActionTypeNotFoundException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "ActionTypeSettings":{ + "type":"structure", + "members":{ + "thirdPartyConfigurationUrl":{"shape":"Url"}, + "entityUrlTemplate":{"shape":"UrlTemplate"}, + "executionUrlTemplate":{"shape":"UrlTemplate"}, + "revisionUrlTemplate":{"shape":"UrlTemplate"} + } + }, + "ApprovalAlreadyCompletedException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "ApprovalResult":{ + "type":"structure", + "required":[ + "summary", + "status" + ], + "members":{ + "summary":{"shape":"ApprovalSummary"}, + "status":{"shape":"ApprovalStatus"} + } + }, + "ApprovalStatus":{ + "type":"string", + "enum":[ + "Approved", + "Rejected" + ] + }, + "ApprovalSummary":{ + "type":"string", + "max":512, + "min":0 + }, + "ApprovalToken":{ + "type":"string", + "pattern":"[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}" + }, + "Artifact":{ + "type":"structure", + "members":{ + "name":{"shape":"ArtifactName"}, + "revision":{"shape":"Revision"}, + "location":{"shape":"ArtifactLocation"} + } + }, + "ArtifactDetails":{ + "type":"structure", + "required":[ + "minimumCount", + "maximumCount" + ], + "members":{ + "minimumCount":{"shape":"MinimumArtifactCount"}, + "maximumCount":{"shape":"MaximumArtifactCount"} + } + }, + "ArtifactList":{ + "type":"list", + "member":{"shape":"Artifact"} + }, + "ArtifactLocation":{ + "type":"structure", + "members":{ + "type":{"shape":"ArtifactLocationType"}, + "s3Location":{"shape":"S3ArtifactLocation"} + } + }, + "ArtifactLocationType":{ + "type":"string", + "enum":["S3"] + }, + "ArtifactName":{ + "type":"string", + "max":100, + "min":1, + "pattern":"[a-zA-Z0-9_\\-]+" + }, + "ArtifactStore":{ + "type":"structure", + "required":[ + "type", + "location" + ], + "members":{ + "type":{"shape":"ArtifactStoreType"}, + "location":{"shape":"ArtifactStoreLocation"}, + "encryptionKey":{"shape":"EncryptionKey"} + } + }, + "ArtifactStoreLocation":{ + "type":"string", + "max":63, + "min":3, + "pattern":"[a-zA-Z0-9\\-\\.]+" + }, + "ArtifactStoreType":{ + "type":"string", + "enum":["S3"] + }, + "BlockerDeclaration":{ + "type":"structure", + "required":[ + "name", + "type" + ], + "members":{ + "name":{"shape":"BlockerName"}, + "type":{"shape":"BlockerType"} + } + }, + "BlockerName":{ + "type":"string", + "max":100, + "min":1 + }, + "BlockerType":{ + "type":"string", + "enum":["Schedule"] + }, + "Boolean":{"type":"boolean"}, + "ClientId":{ + "type":"string", + "pattern":"[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}" + }, + "ClientToken":{"type":"string"}, + "Code":{"type":"string"}, + "ContinuationToken":{"type":"string"}, + "CreateCustomActionTypeInput":{ + "type":"structure", + "required":[ + "category", + "provider", + "version", + "inputArtifactDetails", + "outputArtifactDetails" + ], + "members":{ + "category":{"shape":"ActionCategory"}, + "provider":{"shape":"ActionProvider"}, + "version":{"shape":"Version"}, + "settings":{"shape":"ActionTypeSettings"}, + "configurationProperties":{"shape":"ActionConfigurationPropertyList"}, + "inputArtifactDetails":{"shape":"ArtifactDetails"}, + "outputArtifactDetails":{"shape":"ArtifactDetails"} + } + }, + "CreateCustomActionTypeOutput":{ + "type":"structure", + "required":["actionType"], + "members":{ + "actionType":{"shape":"ActionType"} + } + }, + "CreatePipelineInput":{ + "type":"structure", + "required":["pipeline"], + "members":{ + "pipeline":{"shape":"PipelineDeclaration"} + } + }, + "CreatePipelineOutput":{ + "type":"structure", + "members":{ + "pipeline":{"shape":"PipelineDeclaration"} + } + }, + "CurrentRevision":{ + "type":"structure", + "required":[ + "revision", + "changeIdentifier" + ], + "members":{ + "revision":{"shape":"Revision"}, + "changeIdentifier":{"shape":"RevisionChangeIdentifier"} + } + }, + "DeleteCustomActionTypeInput":{ + "type":"structure", + "required":[ + "category", + "provider", + "version" + ], + "members":{ + "category":{"shape":"ActionCategory"}, + "provider":{"shape":"ActionProvider"}, + "version":{"shape":"Version"} + } + }, + "DeletePipelineInput":{ + "type":"structure", + "required":["name"], + "members":{ + "name":{"shape":"PipelineName"} + } + }, + "Description":{ + "type":"string", + "max":2048, + "min":1 + }, + "DisableStageTransitionInput":{ + "type":"structure", + "required":[ + "pipelineName", + "stageName", + "transitionType", + "reason" + ], + "members":{ + "pipelineName":{"shape":"PipelineName"}, + "stageName":{"shape":"StageName"}, + "transitionType":{"shape":"StageTransitionType"}, + "reason":{"shape":"DisabledReason"} + } + }, + "DisabledReason":{ + "type":"string", + "max":300, + "min":1, + "pattern":"[a-zA-Z0-9!@ \\(\\)\\.\\*\\?\\-]+" + }, + "EnableStageTransitionInput":{ + "type":"structure", + "required":[ + "pipelineName", + "stageName", + "transitionType" + ], + "members":{ + "pipelineName":{"shape":"PipelineName"}, + "stageName":{"shape":"StageName"}, + "transitionType":{"shape":"StageTransitionType"} + } + }, + "Enabled":{"type":"boolean"}, + "EncryptionKey":{ + "type":"structure", + "required":[ + "id", + "type" + ], + "members":{ + "id":{"shape":"EncryptionKeyId"}, + "type":{"shape":"EncryptionKeyType"} + } + }, + "EncryptionKeyId":{ + "type":"string", + "max":100, + "min":1 + }, + "EncryptionKeyType":{ + "type":"string", + "enum":["KMS"] + }, + "ErrorDetails":{ + "type":"structure", + "members":{ + "code":{"shape":"Code"}, + "message":{"shape":"Message"} + } + }, + "ExecutionDetails":{ + "type":"structure", + "members":{ + "summary":{"shape":"ExecutionSummary"}, + "externalExecutionId":{"shape":"ExecutionId"}, + "percentComplete":{"shape":"Percentage"} + } + }, + "ExecutionId":{ + "type":"string", + "max":1500, + "min":1 + }, + "ExecutionSummary":{"type":"string"}, + "FailureDetails":{ + "type":"structure", + "required":[ + "type", + "message" + ], + "members":{ + "type":{"shape":"FailureType"}, + "message":{"shape":"Message"}, + "externalExecutionId":{"shape":"ExecutionId"} + } + }, + "FailureType":{ + "type":"string", + "enum":[ + "JobFailed", + "ConfigurationError", + "PermissionError", + "RevisionOutOfSync", + "RevisionUnavailable", + "SystemUnavailable" + ] + }, + "GetJobDetailsInput":{ + "type":"structure", + "required":["jobId"], + "members":{ + "jobId":{"shape":"JobId"} + } + }, + "GetJobDetailsOutput":{ + "type":"structure", + "members":{ + "jobDetails":{"shape":"JobDetails"} + } + }, + "GetPipelineInput":{ + "type":"structure", + "required":["name"], + "members":{ + "name":{"shape":"PipelineName"}, + "version":{"shape":"PipelineVersion"} + } + }, + "GetPipelineOutput":{ + "type":"structure", + "members":{ + "pipeline":{"shape":"PipelineDeclaration"} + } + }, + "GetPipelineStateInput":{ + "type":"structure", + "required":["name"], + "members":{ + "name":{"shape":"PipelineName"} + } + }, + "GetPipelineStateOutput":{ + "type":"structure", + "members":{ + "pipelineName":{"shape":"PipelineName"}, + "pipelineVersion":{"shape":"PipelineVersion"}, + "stageStates":{"shape":"StageStateList"}, + "created":{"shape":"Timestamp"}, + "updated":{"shape":"Timestamp"} + } + }, + "GetThirdPartyJobDetailsInput":{ + "type":"structure", + "required":[ + "jobId", + "clientToken" + ], + "members":{ + "jobId":{"shape":"ThirdPartyJobId"}, + "clientToken":{"shape":"ClientToken"} + } + }, + "GetThirdPartyJobDetailsOutput":{ + "type":"structure", + "members":{ + "jobDetails":{"shape":"ThirdPartyJobDetails"} + } + }, + "InputArtifact":{ + "type":"structure", + "required":["name"], + "members":{ + "name":{"shape":"ArtifactName"} + } + }, + "InputArtifactList":{ + "type":"list", + "member":{"shape":"InputArtifact"} + }, + "InvalidActionDeclarationException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InvalidApprovalTokenException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InvalidBlockerDeclarationException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InvalidClientTokenException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InvalidJobException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InvalidJobStateException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InvalidNextTokenException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InvalidNonceException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InvalidStageDeclarationException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InvalidStructureException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "Job":{ + "type":"structure", + "members":{ + "id":{"shape":"JobId"}, + "data":{"shape":"JobData"}, + "nonce":{"shape":"Nonce"}, + "accountId":{"shape":"AccountId"} + } + }, + "JobData":{ + "type":"structure", + "members":{ + "actionTypeId":{"shape":"ActionTypeId"}, + "actionConfiguration":{"shape":"ActionConfiguration"}, + "pipelineContext":{"shape":"PipelineContext"}, + "inputArtifacts":{"shape":"ArtifactList"}, + "outputArtifacts":{"shape":"ArtifactList"}, + "artifactCredentials":{"shape":"AWSSessionCredentials"}, + "continuationToken":{"shape":"ContinuationToken"}, + "encryptionKey":{"shape":"EncryptionKey"} + } + }, + "JobDetails":{ + "type":"structure", + "members":{ + "id":{"shape":"JobId"}, + "data":{"shape":"JobData"}, + "accountId":{"shape":"AccountId"} + } + }, + "JobId":{ + "type":"string", + "pattern":"[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}" + }, + "JobList":{ + "type":"list", + "member":{"shape":"Job"} + }, + "JobNotFoundException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "JobStatus":{ + "type":"string", + "enum":[ + "Created", + "Queued", + "Dispatched", + "InProgress", + "TimedOut", + "Succeeded", + "Failed" + ] + }, + "LastChangedAt":{"type":"timestamp"}, + "LastChangedBy":{"type":"string"}, + "LastUpdatedBy":{"type":"string"}, + "LimitExceededException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "ListActionTypesInput":{ + "type":"structure", + "members":{ + "actionOwnerFilter":{"shape":"ActionOwner"}, + "nextToken":{"shape":"NextToken"} + } + }, + "ListActionTypesOutput":{ + "type":"structure", + "required":["actionTypes"], + "members":{ + "actionTypes":{"shape":"ActionTypeList"}, + "nextToken":{"shape":"NextToken"} + } + }, + "ListPipelinesInput":{ + "type":"structure", + "members":{ + "nextToken":{"shape":"NextToken"} + } + }, + "ListPipelinesOutput":{ + "type":"structure", + "members":{ + "pipelines":{"shape":"PipelineList"}, + "nextToken":{"shape":"NextToken"} + } + }, + "MaxBatchSize":{ + "type":"integer", + "min":1 + }, + "MaximumArtifactCount":{ + "type":"integer", + "max":5, + "min":0 + }, + "Message":{"type":"string"}, + "MinimumArtifactCount":{ + "type":"integer", + "max":5, + "min":0 + }, + "NextToken":{"type":"string"}, + "Nonce":{"type":"string"}, + "NotLatestPipelineExecutionException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "OutputArtifact":{ + "type":"structure", + "required":["name"], + "members":{ + "name":{"shape":"ArtifactName"} + } + }, + "OutputArtifactList":{ + "type":"list", + "member":{"shape":"OutputArtifact"} + }, + "Percentage":{ + "type":"integer", + "max":100, + "min":0 + }, + "PipelineContext":{ + "type":"structure", + "members":{ + "pipelineName":{"shape":"PipelineName"}, + "stage":{"shape":"StageContext"}, + "action":{"shape":"ActionContext"} + } + }, + "PipelineDeclaration":{ + "type":"structure", + "required":[ + "name", + "roleArn", + "artifactStore", + "stages" + ], + "members":{ + "name":{"shape":"PipelineName"}, + "roleArn":{"shape":"RoleArn"}, + "artifactStore":{"shape":"ArtifactStore"}, + "stages":{"shape":"PipelineStageDeclarationList"}, + "version":{"shape":"PipelineVersion"} + } + }, + "PipelineExecutionId":{ + "type":"string", + "pattern":"[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}" + }, + "PipelineList":{ + "type":"list", + "member":{"shape":"PipelineSummary"} + }, + "PipelineName":{ + "type":"string", + "max":100, + "min":1, + "pattern":"[A-Za-z0-9.@\\-_]+" + }, + "PipelineNameInUseException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "PipelineNotFoundException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "PipelineStageDeclarationList":{ + "type":"list", + "member":{"shape":"StageDeclaration"} + }, + "PipelineSummary":{ + "type":"structure", + "members":{ + "name":{"shape":"PipelineName"}, + "version":{"shape":"PipelineVersion"}, + "created":{"shape":"Timestamp"}, + "updated":{"shape":"Timestamp"} + } + }, + "PipelineVersion":{ + "type":"integer", + "min":1 + }, + "PipelineVersionNotFoundException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "PollForJobsInput":{ + "type":"structure", + "required":["actionTypeId"], + "members":{ + "actionTypeId":{"shape":"ActionTypeId"}, + "maxBatchSize":{"shape":"MaxBatchSize"}, + "queryParam":{"shape":"QueryParamMap"} + } + }, + "PollForJobsOutput":{ + "type":"structure", + "members":{ + "jobs":{"shape":"JobList"} + } + }, + "PollForThirdPartyJobsInput":{ + "type":"structure", + "required":["actionTypeId"], + "members":{ + "actionTypeId":{"shape":"ActionTypeId"}, + "maxBatchSize":{"shape":"MaxBatchSize"} + } + }, + "PollForThirdPartyJobsOutput":{ + "type":"structure", + "members":{ + "jobs":{"shape":"ThirdPartyJobList"} + } + }, + "PutActionRevisionInput":{ + "type":"structure", + "required":[ + "pipelineName", + "stageName", + "actionName", + "actionRevision" + ], + "members":{ + "pipelineName":{"shape":"PipelineName"}, + "stageName":{"shape":"StageName"}, + "actionName":{"shape":"ActionName"}, + "actionRevision":{"shape":"ActionRevision"} + } + }, + "PutActionRevisionOutput":{ + "type":"structure", + "members":{ + "newRevision":{"shape":"Boolean"}, + "pipelineExecutionId":{"shape":"PipelineExecutionId"} + } + }, + "PutApprovalResultInput":{ + "type":"structure", + "required":[ + "pipelineName", + "stageName", + "actionName", + "result" + ], + "members":{ + "pipelineName":{"shape":"PipelineName"}, + "stageName":{"shape":"StageName"}, + "actionName":{"shape":"ActionName"}, + "result":{"shape":"ApprovalResult"}, + "token":{"shape":"ApprovalToken"} + } + }, + "PutApprovalResultOutput":{ + "type":"structure", + "members":{ + "approvedAt":{"shape":"Timestamp"} + } + }, + "PutJobFailureResultInput":{ + "type":"structure", + "required":[ + "jobId", + "failureDetails" + ], + "members":{ + "jobId":{"shape":"JobId"}, + "failureDetails":{"shape":"FailureDetails"} + } + }, + "PutJobSuccessResultInput":{ + "type":"structure", + "required":["jobId"], + "members":{ + "jobId":{"shape":"JobId"}, + "currentRevision":{"shape":"CurrentRevision"}, + "continuationToken":{"shape":"ContinuationToken"}, + "executionDetails":{"shape":"ExecutionDetails"} + } + }, + "PutThirdPartyJobFailureResultInput":{ + "type":"structure", + "required":[ + "jobId", + "clientToken", + "failureDetails" + ], + "members":{ + "jobId":{"shape":"ThirdPartyJobId"}, + "clientToken":{"shape":"ClientToken"}, + "failureDetails":{"shape":"FailureDetails"} + } + }, + "PutThirdPartyJobSuccessResultInput":{ + "type":"structure", + "required":[ + "jobId", + "clientToken" + ], + "members":{ + "jobId":{"shape":"ThirdPartyJobId"}, + "clientToken":{"shape":"ClientToken"}, + "currentRevision":{"shape":"CurrentRevision"}, + "continuationToken":{"shape":"ContinuationToken"}, + "executionDetails":{"shape":"ExecutionDetails"} + } + }, + "QueryParamMap":{ + "type":"map", + "key":{"shape":"ActionConfigurationKey"}, + "value":{"shape":"ActionConfigurationQueryableValue"}, + "max":1, + "min":0 + }, + "RetryStageExecutionInput":{ + "type":"structure", + "required":[ + "pipelineName", + "stageName", + "pipelineExecutionId", + "retryMode" + ], + "members":{ + "pipelineName":{"shape":"PipelineName"}, + "stageName":{"shape":"StageName"}, + "pipelineExecutionId":{"shape":"PipelineExecutionId"}, + "retryMode":{"shape":"StageRetryMode"} + } + }, + "RetryStageExecutionOutput":{ + "type":"structure", + "members":{ + "pipelineExecutionId":{"shape":"PipelineExecutionId"} + } + }, + "Revision":{ + "type":"string", + "max":1500, + "min":1 + }, + "RevisionChangeIdentifier":{ + "type":"string", + "max":100, + "min":1 + }, + "RoleArn":{ + "type":"string", + "max":1024, + "pattern":"arn:aws(-[\\w]+)*:iam::[0-9]{12}:role/.*" + }, + "S3ArtifactLocation":{ + "type":"structure", + "required":[ + "bucketName", + "objectKey" + ], + "members":{ + "bucketName":{"shape":"S3BucketName"}, + "objectKey":{"shape":"S3ObjectKey"} + } + }, + "S3BucketName":{"type":"string"}, + "S3ObjectKey":{"type":"string"}, + "SecretAccessKey":{"type":"string"}, + "SessionToken":{"type":"string"}, + "StageActionDeclarationList":{ + "type":"list", + "member":{"shape":"ActionDeclaration"} + }, + "StageBlockerDeclarationList":{ + "type":"list", + "member":{"shape":"BlockerDeclaration"} + }, + "StageContext":{ + "type":"structure", + "members":{ + "name":{"shape":"StageName"} + } + }, + "StageDeclaration":{ + "type":"structure", + "required":[ + "name", + "actions" + ], + "members":{ + "name":{"shape":"StageName"}, + "blockers":{"shape":"StageBlockerDeclarationList"}, + "actions":{"shape":"StageActionDeclarationList"} + } + }, + "StageExecution":{ + "type":"structure", + "required":[ + "pipelineExecutionId", + "status" + ], + "members":{ + "pipelineExecutionId":{"shape":"PipelineExecutionId"}, + "status":{"shape":"StageExecutionStatus"} + } + }, + "StageExecutionStatus":{ + "type":"string", + "enum":[ + "InProgress", + "Failed", + "Succeeded" + ] + }, + "StageName":{ + "type":"string", + "max":100, + "min":1, + "pattern":"[A-Za-z0-9.@\\-_]+" + }, + "StageNotFoundException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "StageNotRetryableException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "StageRetryMode":{ + "type":"string", + "enum":["FAILED_ACTIONS"] + }, + "StageState":{ + "type":"structure", + "members":{ + "stageName":{"shape":"StageName"}, + "inboundTransitionState":{"shape":"TransitionState"}, + "actionStates":{"shape":"ActionStateList"}, + "latestExecution":{"shape":"StageExecution"} + } + }, + "StageStateList":{ + "type":"list", + "member":{"shape":"StageState"} + }, + "StageTransitionType":{ + "type":"string", + "enum":[ + "Inbound", + "Outbound" + ] + }, + "StartPipelineExecutionInput":{ + "type":"structure", + "required":["name"], + "members":{ + "name":{"shape":"PipelineName"} + } + }, + "StartPipelineExecutionOutput":{ + "type":"structure", + "members":{ + "pipelineExecutionId":{"shape":"PipelineExecutionId"} + } + }, + "ThirdPartyJob":{ + "type":"structure", + "members":{ + "clientId":{"shape":"ClientId"}, + "jobId":{"shape":"JobId"} + } + }, + "ThirdPartyJobData":{ + "type":"structure", + "members":{ + "actionTypeId":{"shape":"ActionTypeId"}, + "actionConfiguration":{"shape":"ActionConfiguration"}, + "pipelineContext":{"shape":"PipelineContext"}, + "inputArtifacts":{"shape":"ArtifactList"}, + "outputArtifacts":{"shape":"ArtifactList"}, + "artifactCredentials":{"shape":"AWSSessionCredentials"}, + "continuationToken":{"shape":"ContinuationToken"}, + "encryptionKey":{"shape":"EncryptionKey"} + } + }, + "ThirdPartyJobDetails":{ + "type":"structure", + "members":{ + "id":{"shape":"ThirdPartyJobId"}, + "data":{"shape":"ThirdPartyJobData"}, + "nonce":{"shape":"Nonce"} + } + }, + "ThirdPartyJobId":{ + "type":"string", + "max":512, + "min":1 + }, + "ThirdPartyJobList":{ + "type":"list", + "member":{"shape":"ThirdPartyJob"} + }, + "Timestamp":{"type":"timestamp"}, + "TransitionState":{ + "type":"structure", + "members":{ + "enabled":{"shape":"Enabled"}, + "lastChangedBy":{"shape":"LastChangedBy"}, + "lastChangedAt":{"shape":"LastChangedAt"}, + "disabledReason":{"shape":"DisabledReason"} + } + }, + "UpdatePipelineInput":{ + "type":"structure", + "required":["pipeline"], + "members":{ + "pipeline":{"shape":"PipelineDeclaration"} + } + }, + "UpdatePipelineOutput":{ + "type":"structure", + "members":{ + "pipeline":{"shape":"PipelineDeclaration"} + } + }, + "Url":{ + "type":"string", + "max":2048, + "min":1 + }, + "UrlTemplate":{ + "type":"string", + "max":2048, + "min":1 + }, + "ValidationException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "Version":{ + "type":"string", + "max":9, + "min":1, + "pattern":"[0-9A-Za-z_-]+" + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/codepipeline/2015-07-09/docs-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/codepipeline/2015-07-09/docs-2.json new file mode 100644 index 000000000..1760d6818 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/codepipeline/2015-07-09/docs-2.json @@ -0,0 +1,1194 @@ +{ + "version": "2.0", + "service": "AWS CodePipeline

    Overview

    This is the AWS CodePipeline API Reference. This guide provides descriptions of the actions and data types for AWS CodePipeline. Some functionality for your pipeline is only configurable through the API. For additional information, see the AWS CodePipeline User Guide.

    You can use the AWS CodePipeline API to work with pipelines, stages, actions, gates, and transitions, as described below.

    Pipelines are models of automated release processes. Each pipeline is uniquely named, and consists of actions, gates, and stages.

    You can work with pipelines by calling:

    • CreatePipeline, which creates a uniquely-named pipeline.

    • DeletePipeline, which deletes the specified pipeline.

    • GetPipeline, which returns information about a pipeline structure.

    • GetPipelineState, which returns information about the current state of the stages and actions of a pipeline.

    • ListPipelines, which gets a summary of all of the pipelines associated with your account.

    • StartPipelineExecution, which runs the the most recent revision of an artifact through the pipeline.

    • UpdatePipeline, which updates a pipeline with edits or changes to the structure of the pipeline.

    Pipelines include stages, which are which are logical groupings of gates and actions. Each stage contains one or more actions that must complete before the next stage begins. A stage will result in success or failure. If a stage fails, then the pipeline stops at that stage and will remain stopped until either a new version of an artifact appears in the source location, or a user takes action to re-run the most recent artifact through the pipeline. You can call GetPipelineState, which displays the status of a pipeline, including the status of stages in the pipeline, or GetPipeline, which returns the entire structure of the pipeline, including the stages of that pipeline. For more information about the structure of stages and actions, also refer to the AWS CodePipeline Pipeline Structure Reference.

    Pipeline stages include actions, which are categorized into categories such as source or build actions performed within a stage of a pipeline. For example, you can use a source action to import artifacts into a pipeline from a source such as Amazon S3. Like stages, you do not work with actions directly in most cases, but you do define and interact with actions when working with pipeline operations such as CreatePipeline and GetPipelineState.

    Pipelines also include transitions, which allow the transition of artifacts from one stage to the next in a pipeline after the actions in one stage complete.

    You can work with transitions by calling:

    Using the API to integrate with AWS CodePipeline

    For third-party integrators or developers who want to create their own integrations with AWS CodePipeline, the expected sequence varies from the standard API user. In order to integrate with AWS CodePipeline, developers will need to work with the following items:

    Jobs, which are instances of an action. For example, a job for a source action might import a revision of an artifact from a source.

    You can work with jobs by calling:

    Third party jobs, which are instances of an action created by a partner action and integrated into AWS CodePipeline. Partner actions are created by members of the AWS Partner Network.

    You can work with third party jobs by calling:

    ", + "operations": { + "AcknowledgeJob": "

    Returns information about a specified job and whether that job has been received by the job worker. Only used for custom actions.

    ", + "AcknowledgeThirdPartyJob": "

    Confirms a job worker has received the specified job. Only used for partner actions.

    ", + "CreateCustomActionType": "

    Creates a new custom action that can be used in all pipelines associated with the AWS account. Only used for custom actions.

    ", + "CreatePipeline": "

    Creates a pipeline.

    ", + "DeleteCustomActionType": "

    Marks a custom action as deleted. PollForJobs for the custom action will fail after the action is marked for deletion. Only used for custom actions.

    You cannot recreate a custom action after it has been deleted unless you increase the version number of the action.

    ", + "DeletePipeline": "

    Deletes the specified pipeline.

    ", + "DisableStageTransition": "

    Prevents artifacts in a pipeline from transitioning to the next stage in the pipeline.

    ", + "EnableStageTransition": "

    Enables artifacts in a pipeline to transition to a stage in a pipeline.

    ", + "GetJobDetails": "

    Returns information about a job. Only used for custom actions.

    When this API is called, AWS CodePipeline returns temporary credentials for the Amazon S3 bucket used to store artifacts for the pipeline, if the action requires access to that Amazon S3 bucket for input or output artifacts. Additionally, this API returns any secret values defined for the action.

    ", + "GetPipeline": "

    Returns the metadata, structure, stages, and actions of a pipeline. Can be used to return the entire structure of a pipeline in JSON format, which can then be modified and used to update the pipeline structure with UpdatePipeline.

    ", + "GetPipelineState": "

    Returns information about the state of a pipeline, including the stages and actions.

    ", + "GetThirdPartyJobDetails": "

    Requests the details of a job for a third party action. Only used for partner actions.

    When this API is called, AWS CodePipeline returns temporary credentials for the Amazon S3 bucket used to store artifacts for the pipeline, if the action requires access to that Amazon S3 bucket for input or output artifacts. Additionally, this API returns any secret values defined for the action.

    ", + "ListActionTypes": "

    Gets a summary of all AWS CodePipeline action types associated with your account.

    ", + "ListPipelines": "

    Gets a summary of all of the pipelines associated with your account.

    ", + "PollForJobs": "

    Returns information about any jobs for AWS CodePipeline to act upon.

    When this API is called, AWS CodePipeline returns temporary credentials for the Amazon S3 bucket used to store artifacts for the pipeline, if the action requires access to that Amazon S3 bucket for input or output artifacts. Additionally, this API returns any secret values defined for the action.

    ", + "PollForThirdPartyJobs": "

    Determines whether there are any third party jobs for a job worker to act on. Only used for partner actions.

    When this API is called, AWS CodePipeline returns temporary credentials for the Amazon S3 bucket used to store artifacts for the pipeline, if the action requires access to that Amazon S3 bucket for input or output artifacts.

    ", + "PutActionRevision": "

    Provides information to AWS CodePipeline about new revisions to a source.

    ", + "PutApprovalResult": "

    Provides the response to a manual approval request to AWS CodePipeline. Valid responses include Approved and Rejected.

    ", + "PutJobFailureResult": "

    Represents the failure of a job as returned to the pipeline by a job worker. Only used for custom actions.

    ", + "PutJobSuccessResult": "

    Represents the success of a job as returned to the pipeline by a job worker. Only used for custom actions.

    ", + "PutThirdPartyJobFailureResult": "

    Represents the failure of a third party job as returned to the pipeline by a job worker. Only used for partner actions.

    ", + "PutThirdPartyJobSuccessResult": "

    Represents the success of a third party job as returned to the pipeline by a job worker. Only used for partner actions.

    ", + "RetryStageExecution": "

    Resumes the pipeline execution by retrying the last failed actions in a stage.

    ", + "StartPipelineExecution": "

    Starts the specified pipeline. Specifically, it begins processing the latest commit to the source location specified as part of the pipeline.

    ", + "UpdatePipeline": "

    Updates a specified pipeline with edits or changes to its structure. Use a JSON file with the pipeline structure in conjunction with UpdatePipeline to provide the full structure of the pipeline. Updating the pipeline increases the version number of the pipeline by 1.

    " + }, + "shapes": { + "AWSSessionCredentials": { + "base": "

    Represents an AWS session credentials object. These credentials are temporary credentials that are issued by AWS Secure Token Service (STS). They can be used to access input and output artifacts in the Amazon S3 bucket used to store artifact for the pipeline in AWS CodePipeline.

    ", + "refs": { + "JobData$artifactCredentials": null, + "ThirdPartyJobData$artifactCredentials": null + } + }, + "AccessKeyId": { + "base": null, + "refs": { + "AWSSessionCredentials$accessKeyId": "

    The access key for the session.

    " + } + }, + "AccountId": { + "base": null, + "refs": { + "Job$accountId": "

    The ID of the AWS account to use when performing the job.

    ", + "JobDetails$accountId": "

    The AWS account ID associated with the job.

    " + } + }, + "AcknowledgeJobInput": { + "base": "

    Represents the input of an acknowledge job action.

    ", + "refs": { + } + }, + "AcknowledgeJobOutput": { + "base": "

    Represents the output of an acknowledge job action.

    ", + "refs": { + } + }, + "AcknowledgeThirdPartyJobInput": { + "base": "

    Represents the input of an acknowledge third party job action.

    ", + "refs": { + } + }, + "AcknowledgeThirdPartyJobOutput": { + "base": "

    Represents the output of an acknowledge third party job action.

    ", + "refs": { + } + }, + "ActionCategory": { + "base": null, + "refs": { + "ActionTypeId$category": "

    A category defines what kind of action can be taken in the stage, and constrains the provider type for the action. Valid categories are limited to one of the values below.

    ", + "CreateCustomActionTypeInput$category": "

    The category of the custom action, such as a source action or a build action.

    Although Source is listed as a valid value, it is not currently functional. This value is reserved for future use.

    ", + "DeleteCustomActionTypeInput$category": "

    The category of the custom action that you want to delete, such as source or deploy.

    " + } + }, + "ActionConfiguration": { + "base": "

    Represents information about an action configuration.

    ", + "refs": { + "JobData$actionConfiguration": null, + "ThirdPartyJobData$actionConfiguration": null + } + }, + "ActionConfigurationKey": { + "base": null, + "refs": { + "ActionConfigurationMap$key": null, + "ActionConfigurationProperty$name": "

    The name of the action configuration property.

    ", + "QueryParamMap$key": null + } + }, + "ActionConfigurationMap": { + "base": null, + "refs": { + "ActionConfiguration$configuration": "

    The configuration data for the action.

    ", + "ActionDeclaration$configuration": "

    The action declaration's configuration.

    " + } + }, + "ActionConfigurationProperty": { + "base": "

    Represents information about an action configuration property.

    ", + "refs": { + "ActionConfigurationPropertyList$member": null + } + }, + "ActionConfigurationPropertyList": { + "base": null, + "refs": { + "ActionType$actionConfigurationProperties": "

    The configuration properties for the action type.

    ", + "CreateCustomActionTypeInput$configurationProperties": "

    The configuration properties for the custom action.

    You can refer to a name in the configuration properties of the custom action within the URL templates by following the format of {Config:name}, as long as the configuration property is both required and not secret. For more information, see Create a Custom Action for a Pipeline.

    " + } + }, + "ActionConfigurationPropertyType": { + "base": null, + "refs": { + "ActionConfigurationProperty$type": "

    The type of the configuration property.

    " + } + }, + "ActionConfigurationQueryableValue": { + "base": null, + "refs": { + "QueryParamMap$value": null + } + }, + "ActionConfigurationValue": { + "base": null, + "refs": { + "ActionConfigurationMap$value": null + } + }, + "ActionContext": { + "base": "

    Represents the context of an action within the stage of a pipeline to a job worker.

    ", + "refs": { + "PipelineContext$action": null + } + }, + "ActionDeclaration": { + "base": "

    Represents information about an action declaration.

    ", + "refs": { + "StageActionDeclarationList$member": null + } + }, + "ActionExecution": { + "base": "

    Represents information about the run of an action.

    ", + "refs": { + "ActionState$latestExecution": null + } + }, + "ActionExecutionStatus": { + "base": null, + "refs": { + "ActionExecution$status": "

    The status of the action, or for a completed action, the last status of the action.

    " + } + }, + "ActionExecutionToken": { + "base": null, + "refs": { + "ActionExecution$token": "

    The system-generated token used to identify a unique approval request. The token for each open approval request can be obtained using the GetPipelineState command and is used to validate that the approval request corresponding to this token is still valid.

    " + } + }, + "ActionName": { + "base": null, + "refs": { + "ActionContext$name": "

    The name of the action within the context of a job.

    ", + "ActionDeclaration$name": "

    The action declaration's name.

    ", + "ActionState$actionName": "

    The name of the action.

    ", + "PutActionRevisionInput$actionName": "

    The name of the action that will process the revision.

    ", + "PutApprovalResultInput$actionName": "

    The name of the action for which approval is requested.

    " + } + }, + "ActionNotFoundException": { + "base": "

    The specified action cannot be found.

    ", + "refs": { + } + }, + "ActionOwner": { + "base": null, + "refs": { + "ActionTypeId$owner": "

    The creator of the action being called.

    ", + "ListActionTypesInput$actionOwnerFilter": "

    Filters the list of action types to those created by a specified entity.

    " + } + }, + "ActionProvider": { + "base": null, + "refs": { + "ActionTypeId$provider": "

    The provider of the service being called by the action. Valid providers are determined by the action category. For example, an action in the Deploy category type might have a provider of AWS CodeDeploy, which would be specified as CodeDeploy.

    ", + "CreateCustomActionTypeInput$provider": "

    The provider of the service used in the custom action, such as AWS CodeDeploy.

    ", + "DeleteCustomActionTypeInput$provider": "

    The provider of the service used in the custom action, such as AWS CodeDeploy.

    " + } + }, + "ActionRevision": { + "base": "

    Represents information about the version (or revision) of an action.

    ", + "refs": { + "ActionState$currentRevision": null, + "PutActionRevisionInput$actionRevision": null + } + }, + "ActionRunOrder": { + "base": null, + "refs": { + "ActionDeclaration$runOrder": "

    The order in which actions are run.

    " + } + }, + "ActionState": { + "base": "

    Represents information about the state of an action.

    ", + "refs": { + "ActionStateList$member": null + } + }, + "ActionStateList": { + "base": null, + "refs": { + "StageState$actionStates": "

    The state of the stage.

    " + } + }, + "ActionType": { + "base": "

    Returns information about the details of an action type.

    ", + "refs": { + "ActionTypeList$member": null, + "CreateCustomActionTypeOutput$actionType": null + } + }, + "ActionTypeId": { + "base": "

    Represents information about an action type.

    ", + "refs": { + "ActionDeclaration$actionTypeId": "

    The configuration information for the action type.

    ", + "ActionType$id": null, + "JobData$actionTypeId": null, + "PollForJobsInput$actionTypeId": null, + "PollForThirdPartyJobsInput$actionTypeId": null, + "ThirdPartyJobData$actionTypeId": null + } + }, + "ActionTypeList": { + "base": null, + "refs": { + "ListActionTypesOutput$actionTypes": "

    Provides details of the action types.

    " + } + }, + "ActionTypeNotFoundException": { + "base": "

    The specified action type cannot be found.

    ", + "refs": { + } + }, + "ActionTypeSettings": { + "base": "

    Returns information about the settings for an action type.

    ", + "refs": { + "ActionType$settings": "

    The settings for the action type.

    ", + "CreateCustomActionTypeInput$settings": null + } + }, + "ApprovalAlreadyCompletedException": { + "base": "

    The approval action has already been approved or rejected.

    ", + "refs": { + } + }, + "ApprovalResult": { + "base": "

    Represents information about the result of an approval request.

    ", + "refs": { + "PutApprovalResultInput$result": "

    Represents information about the result of the approval request.

    " + } + }, + "ApprovalStatus": { + "base": null, + "refs": { + "ApprovalResult$status": "

    The response submitted by a reviewer assigned to an approval action request.

    " + } + }, + "ApprovalSummary": { + "base": null, + "refs": { + "ApprovalResult$summary": "

    The summary of the current status of the approval request.

    " + } + }, + "ApprovalToken": { + "base": null, + "refs": { + "PutApprovalResultInput$token": "

    The system-generated token used to identify a unique approval request. The token for each open approval request can be obtained using the GetPipelineState action and is used to validate that the approval request corresponding to this token is still valid.

    " + } + }, + "Artifact": { + "base": "

    Represents information about an artifact that will be worked upon by actions in the pipeline.

    ", + "refs": { + "ArtifactList$member": null + } + }, + "ArtifactDetails": { + "base": "

    Returns information about the details of an artifact.

    ", + "refs": { + "ActionType$inputArtifactDetails": "

    The details of the input artifact for the action, such as its commit ID.

    ", + "ActionType$outputArtifactDetails": "

    The details of the output artifact of the action, such as its commit ID.

    ", + "CreateCustomActionTypeInput$inputArtifactDetails": null, + "CreateCustomActionTypeInput$outputArtifactDetails": null + } + }, + "ArtifactList": { + "base": null, + "refs": { + "JobData$inputArtifacts": "

    The artifact supplied to the job.

    ", + "JobData$outputArtifacts": "

    The output of the job.

    ", + "ThirdPartyJobData$inputArtifacts": "

    The name of the artifact that will be worked upon by the action, if any. This name might be system-generated, such as \"MyApp\", or might be defined by the user when the action is created. The input artifact name must match the name of an output artifact generated by an action in an earlier action or stage of the pipeline.

    ", + "ThirdPartyJobData$outputArtifacts": "

    The name of the artifact that will be the result of the action, if any. This name might be system-generated, such as \"MyBuiltApp\", or might be defined by the user when the action is created.

    " + } + }, + "ArtifactLocation": { + "base": "

    Represents information about the location of an artifact.

    ", + "refs": { + "Artifact$location": "

    The location of an artifact.

    " + } + }, + "ArtifactLocationType": { + "base": null, + "refs": { + "ArtifactLocation$type": "

    The type of artifact in the location.

    " + } + }, + "ArtifactName": { + "base": null, + "refs": { + "Artifact$name": "

    The artifact's name.

    ", + "InputArtifact$name": "

    The name of the artifact to be worked on, for example, \"My App\".

    The input artifact of an action must exactly match the output artifact declared in a preceding action, but the input artifact does not have to be the next action in strict sequence from the action that provided the output artifact. Actions in parallel can declare different output artifacts, which are in turn consumed by different following actions.

    ", + "OutputArtifact$name": "

    The name of the output of an artifact, such as \"My App\".

    The input artifact of an action must exactly match the output artifact declared in a preceding action, but the input artifact does not have to be the next action in strict sequence from the action that provided the output artifact. Actions in parallel can declare different output artifacts, which are in turn consumed by different following actions.

    Output artifact names must be unique within a pipeline.

    " + } + }, + "ArtifactStore": { + "base": "

    The Amazon S3 location where artifacts are stored for the pipeline. If this Amazon S3 bucket is created manually, it must meet the requirements for AWS CodePipeline. For more information, see the Concepts.

    ", + "refs": { + "PipelineDeclaration$artifactStore": null + } + }, + "ArtifactStoreLocation": { + "base": null, + "refs": { + "ArtifactStore$location": "

    The location for storing the artifacts for a pipeline, such as an S3 bucket or folder.

    " + } + }, + "ArtifactStoreType": { + "base": null, + "refs": { + "ArtifactStore$type": "

    The type of the artifact store, such as S3.

    " + } + }, + "BlockerDeclaration": { + "base": "

    Reserved for future use.

    ", + "refs": { + "StageBlockerDeclarationList$member": null + } + }, + "BlockerName": { + "base": null, + "refs": { + "BlockerDeclaration$name": "

    Reserved for future use.

    " + } + }, + "BlockerType": { + "base": null, + "refs": { + "BlockerDeclaration$type": "

    Reserved for future use.

    " + } + }, + "Boolean": { + "base": null, + "refs": { + "ActionConfigurationProperty$required": "

    Whether the configuration property is a required value.

    ", + "ActionConfigurationProperty$key": "

    Whether the configuration property is a key.

    ", + "ActionConfigurationProperty$secret": "

    Whether the configuration property is secret. Secrets are hidden from all calls except for GetJobDetails, GetThirdPartyJobDetails, PollForJobs, and PollForThirdPartyJobs.

    When updating a pipeline, passing * * * * * without changing any other values of the action will preserve the prior value of the secret.

    ", + "ActionConfigurationProperty$queryable": "

    Indicates that the proprety will be used in conjunction with PollForJobs. When creating a custom action, an action can have up to one queryable property. If it has one, that property must be both required and not secret.

    If you create a pipeline with a custom action type, and that custom action contains a queryable property, the value for that configuration property is subject to additional restrictions. The value must be less than or equal to twenty (20) characters. The value can contain only alphanumeric characters, underscores, and hyphens.

    ", + "PutActionRevisionOutput$newRevision": "

    The new revision number or ID for the revision after the action completes.

    " + } + }, + "ClientId": { + "base": null, + "refs": { + "ThirdPartyJob$clientId": "

    The clientToken portion of the clientId and clientToken pair used to verify that the calling entity is allowed access to the job and its details.

    " + } + }, + "ClientToken": { + "base": null, + "refs": { + "AcknowledgeThirdPartyJobInput$clientToken": "

    The clientToken portion of the clientId and clientToken pair used to verify that the calling entity is allowed access to the job and its details.

    ", + "GetThirdPartyJobDetailsInput$clientToken": "

    The clientToken portion of the clientId and clientToken pair used to verify that the calling entity is allowed access to the job and its details.

    ", + "PutThirdPartyJobFailureResultInput$clientToken": "

    The clientToken portion of the clientId and clientToken pair used to verify that the calling entity is allowed access to the job and its details.

    ", + "PutThirdPartyJobSuccessResultInput$clientToken": "

    The clientToken portion of the clientId and clientToken pair used to verify that the calling entity is allowed access to the job and its details.

    " + } + }, + "Code": { + "base": null, + "refs": { + "ErrorDetails$code": "

    The system ID or error number code of the error.

    " + } + }, + "ContinuationToken": { + "base": null, + "refs": { + "JobData$continuationToken": "

    A system-generated token, such as a AWS CodeDeploy deployment ID, that a job requires in order to continue the job asynchronously.

    ", + "PutJobSuccessResultInput$continuationToken": "

    A token generated by a job worker, such as an AWS CodeDeploy deployment ID, that a successful job provides to identify a custom action in progress. Future jobs will use this token in order to identify the running instance of the action. It can be reused to return additional information about the progress of the custom action. When the action is complete, no continuation token should be supplied.

    ", + "PutThirdPartyJobSuccessResultInput$continuationToken": "

    A token generated by a job worker, such as an AWS CodeDeploy deployment ID, that a successful job provides to identify a partner action in progress. Future jobs will use this token in order to identify the running instance of the action. It can be reused to return additional information about the progress of the partner action. When the action is complete, no continuation token should be supplied.

    ", + "ThirdPartyJobData$continuationToken": "

    A system-generated token, such as a AWS CodeDeploy deployment ID, that a job requires in order to continue the job asynchronously.

    " + } + }, + "CreateCustomActionTypeInput": { + "base": "

    Represents the input of a create custom action operation.

    ", + "refs": { + } + }, + "CreateCustomActionTypeOutput": { + "base": "

    Represents the output of a create custom action operation.

    ", + "refs": { + } + }, + "CreatePipelineInput": { + "base": "

    Represents the input of a create pipeline action.

    ", + "refs": { + } + }, + "CreatePipelineOutput": { + "base": "

    Represents the output of a create pipeline action.

    ", + "refs": { + } + }, + "CurrentRevision": { + "base": "

    Represents information about a current revision.

    ", + "refs": { + "PutJobSuccessResultInput$currentRevision": "

    The ID of the current revision of the artifact successfully worked upon by the job.

    ", + "PutThirdPartyJobSuccessResultInput$currentRevision": null + } + }, + "DeleteCustomActionTypeInput": { + "base": "

    Represents the input of a delete custom action operation. The custom action will be marked as deleted.

    ", + "refs": { + } + }, + "DeletePipelineInput": { + "base": "

    Represents the input of a delete pipeline action.

    ", + "refs": { + } + }, + "Description": { + "base": null, + "refs": { + "ActionConfigurationProperty$description": "

    The description of the action configuration property that will be displayed to users.

    " + } + }, + "DisableStageTransitionInput": { + "base": "

    Represents the input of a disable stage transition input action.

    ", + "refs": { + } + }, + "DisabledReason": { + "base": null, + "refs": { + "DisableStageTransitionInput$reason": "

    The reason given to the user why a stage is disabled, such as waiting for manual approval or manual tests. This message is displayed in the pipeline console UI.

    ", + "TransitionState$disabledReason": "

    The user-specified reason why the transition between two stages of a pipeline was disabled.

    " + } + }, + "EnableStageTransitionInput": { + "base": "

    Represents the input of an enable stage transition action.

    ", + "refs": { + } + }, + "Enabled": { + "base": null, + "refs": { + "TransitionState$enabled": "

    Whether the transition between stages is enabled (true) or disabled (false).

    " + } + }, + "EncryptionKey": { + "base": "

    Represents information about the key used to encrypt data in the artifact store, such as an AWS Key Management Service (AWS KMS) key.

    ", + "refs": { + "ArtifactStore$encryptionKey": "

    The encryption key used to encrypt the data in the artifact store, such as an AWS Key Management Service (AWS KMS) key. If this is undefined, the default key for Amazon S3 is used.

    ", + "JobData$encryptionKey": null, + "ThirdPartyJobData$encryptionKey": "

    The encryption key used to encrypt and decrypt data in the artifact store for the pipeline, such as an AWS Key Management Service (AWS KMS) key. This is optional and might not be present.

    " + } + }, + "EncryptionKeyId": { + "base": null, + "refs": { + "EncryptionKey$id": "

    The ID used to identify the key. For an AWS KMS key, this is the key ID or key ARN.

    " + } + }, + "EncryptionKeyType": { + "base": null, + "refs": { + "EncryptionKey$type": "

    The type of encryption key, such as an AWS Key Management Service (AWS KMS) key. When creating or updating a pipeline, the value must be set to 'KMS'.

    " + } + }, + "ErrorDetails": { + "base": "

    Represents information about an error in AWS CodePipeline.

    ", + "refs": { + "ActionExecution$errorDetails": "

    The details of an error returned by a URL external to AWS.

    " + } + }, + "ExecutionDetails": { + "base": "

    The details of the actions taken and results produced on an artifact as it passes through stages in the pipeline.

    ", + "refs": { + "PutJobSuccessResultInput$executionDetails": "

    The execution details of the successful job, such as the actions taken by the job worker.

    ", + "PutThirdPartyJobSuccessResultInput$executionDetails": null + } + }, + "ExecutionId": { + "base": null, + "refs": { + "ActionExecution$externalExecutionId": "

    The external ID of the run of the action.

    ", + "ExecutionDetails$externalExecutionId": "

    The system-generated unique ID of this action used to identify this job worker in any external systems, such as AWS CodeDeploy.

    ", + "FailureDetails$externalExecutionId": "

    The external ID of the run of the action that failed.

    " + } + }, + "ExecutionSummary": { + "base": null, + "refs": { + "ActionExecution$summary": "

    A summary of the run of the action.

    ", + "ExecutionDetails$summary": "

    The summary of the current status of the actions.

    " + } + }, + "FailureDetails": { + "base": "

    Represents information about failure details.

    ", + "refs": { + "PutJobFailureResultInput$failureDetails": "

    The details about the failure of a job.

    ", + "PutThirdPartyJobFailureResultInput$failureDetails": null + } + }, + "FailureType": { + "base": null, + "refs": { + "FailureDetails$type": "

    The type of the failure.

    " + } + }, + "GetJobDetailsInput": { + "base": "

    Represents the input of a get job details action.

    ", + "refs": { + } + }, + "GetJobDetailsOutput": { + "base": "

    Represents the output of a get job details action.

    ", + "refs": { + } + }, + "GetPipelineInput": { + "base": "

    Represents the input of a get pipeline action.

    ", + "refs": { + } + }, + "GetPipelineOutput": { + "base": "

    Represents the output of a get pipeline action.

    ", + "refs": { + } + }, + "GetPipelineStateInput": { + "base": "

    Represents the input of a get pipeline state action.

    ", + "refs": { + } + }, + "GetPipelineStateOutput": { + "base": "

    Represents the output of a get pipeline state action.

    ", + "refs": { + } + }, + "GetThirdPartyJobDetailsInput": { + "base": "

    Represents the input of a get third party job details action.

    ", + "refs": { + } + }, + "GetThirdPartyJobDetailsOutput": { + "base": "

    Represents the output of a get third party job details action.

    ", + "refs": { + } + }, + "InputArtifact": { + "base": "

    Represents information about an artifact to be worked on, such as a test or build artifact.

    ", + "refs": { + "InputArtifactList$member": null + } + }, + "InputArtifactList": { + "base": null, + "refs": { + "ActionDeclaration$inputArtifacts": "

    The name or ID of the artifact consumed by the action, such as a test or build artifact.

    " + } + }, + "InvalidActionDeclarationException": { + "base": "

    The specified action declaration was specified in an invalid format.

    ", + "refs": { + } + }, + "InvalidApprovalTokenException": { + "base": "

    The approval request already received a response or has expired.

    ", + "refs": { + } + }, + "InvalidBlockerDeclarationException": { + "base": "

    Reserved for future use.

    ", + "refs": { + } + }, + "InvalidClientTokenException": { + "base": "

    The client token was specified in an invalid format

    ", + "refs": { + } + }, + "InvalidJobException": { + "base": "

    The specified job was specified in an invalid format or cannot be found.

    ", + "refs": { + } + }, + "InvalidJobStateException": { + "base": "

    The specified job state was specified in an invalid format.

    ", + "refs": { + } + }, + "InvalidNextTokenException": { + "base": "

    The next token was specified in an invalid format. Make sure that the next token you provided is the token returned by a previous call.

    ", + "refs": { + } + }, + "InvalidNonceException": { + "base": "

    The specified nonce was specified in an invalid format.

    ", + "refs": { + } + }, + "InvalidStageDeclarationException": { + "base": "

    The specified stage declaration was specified in an invalid format.

    ", + "refs": { + } + }, + "InvalidStructureException": { + "base": "

    The specified structure was specified in an invalid format.

    ", + "refs": { + } + }, + "Job": { + "base": "

    Represents information about a job.

    ", + "refs": { + "JobList$member": null + } + }, + "JobData": { + "base": "

    Represents additional information about a job required for a job worker to complete the job.

    ", + "refs": { + "Job$data": "

    Additional data about a job.

    ", + "JobDetails$data": null + } + }, + "JobDetails": { + "base": "

    Represents information about the details of a job.

    ", + "refs": { + "GetJobDetailsOutput$jobDetails": "

    The details of the job.

    If AWSSessionCredentials is used, a long-running job can call GetJobDetails again to obtain new credentials.

    " + } + }, + "JobId": { + "base": null, + "refs": { + "AcknowledgeJobInput$jobId": "

    The unique system-generated ID of the job for which you want to confirm receipt.

    ", + "GetJobDetailsInput$jobId": "

    The unique system-generated ID for the job.

    ", + "Job$id": "

    The unique system-generated ID of the job.

    ", + "JobDetails$id": "

    The unique system-generated ID of the job.

    ", + "PutJobFailureResultInput$jobId": "

    The unique system-generated ID of the job that failed. This is the same ID returned from PollForJobs.

    ", + "PutJobSuccessResultInput$jobId": "

    The unique system-generated ID of the job that succeeded. This is the same ID returned from PollForJobs.

    ", + "ThirdPartyJob$jobId": "

    The identifier used to identify the job in AWS CodePipeline.

    " + } + }, + "JobList": { + "base": null, + "refs": { + "PollForJobsOutput$jobs": "

    Information about the jobs to take action on.

    " + } + }, + "JobNotFoundException": { + "base": "

    The specified job was specified in an invalid format or cannot be found.

    ", + "refs": { + } + }, + "JobStatus": { + "base": null, + "refs": { + "AcknowledgeJobOutput$status": "

    Whether the job worker has received the specified job.

    ", + "AcknowledgeThirdPartyJobOutput$status": "

    The status information for the third party job, if any.

    " + } + }, + "LastChangedAt": { + "base": null, + "refs": { + "TransitionState$lastChangedAt": "

    The timestamp when the transition state was last changed.

    " + } + }, + "LastChangedBy": { + "base": null, + "refs": { + "TransitionState$lastChangedBy": "

    The ID of the user who last changed the transition state.

    " + } + }, + "LastUpdatedBy": { + "base": null, + "refs": { + "ActionExecution$lastUpdatedBy": "

    The ARN of the user who last changed the pipeline.

    " + } + }, + "LimitExceededException": { + "base": "

    The number of pipelines associated with the AWS account has exceeded the limit allowed for the account.

    ", + "refs": { + } + }, + "ListActionTypesInput": { + "base": "

    Represents the input of a list action types action.

    ", + "refs": { + } + }, + "ListActionTypesOutput": { + "base": "

    Represents the output of a list action types action.

    ", + "refs": { + } + }, + "ListPipelinesInput": { + "base": "

    Represents the input of a list pipelines action.

    ", + "refs": { + } + }, + "ListPipelinesOutput": { + "base": "

    Represents the output of a list pipelines action.

    ", + "refs": { + } + }, + "MaxBatchSize": { + "base": null, + "refs": { + "PollForJobsInput$maxBatchSize": "

    The maximum number of jobs to return in a poll for jobs call.

    ", + "PollForThirdPartyJobsInput$maxBatchSize": "

    The maximum number of jobs to return in a poll for jobs call.

    " + } + }, + "MaximumArtifactCount": { + "base": null, + "refs": { + "ArtifactDetails$maximumCount": "

    The maximum number of artifacts allowed for the action type.

    " + } + }, + "Message": { + "base": null, + "refs": { + "ErrorDetails$message": "

    The text of the error message.

    ", + "FailureDetails$message": "

    The message about the failure.

    " + } + }, + "MinimumArtifactCount": { + "base": null, + "refs": { + "ArtifactDetails$minimumCount": "

    The minimum number of artifacts allowed for the action type.

    " + } + }, + "NextToken": { + "base": null, + "refs": { + "ListActionTypesInput$nextToken": "

    An identifier that was returned from the previous list action types call, which can be used to return the next set of action types in the list.

    ", + "ListActionTypesOutput$nextToken": "

    If the amount of returned information is significantly large, an identifier is also returned which can be used in a subsequent list action types call to return the next set of action types in the list.

    ", + "ListPipelinesInput$nextToken": "

    An identifier that was returned from the previous list pipelines call, which can be used to return the next set of pipelines in the list.

    ", + "ListPipelinesOutput$nextToken": "

    If the amount of returned information is significantly large, an identifier is also returned which can be used in a subsequent list pipelines call to return the next set of pipelines in the list.

    " + } + }, + "Nonce": { + "base": null, + "refs": { + "AcknowledgeJobInput$nonce": "

    A system-generated random number that AWS CodePipeline uses to ensure that the job is being worked on by only one job worker. This number must be returned in the response.

    ", + "AcknowledgeThirdPartyJobInput$nonce": "

    A system-generated random number that AWS CodePipeline uses to ensure that the job is being worked on by only one job worker. This number must be returned in the response.

    ", + "Job$nonce": "

    A system-generated random number that AWS CodePipeline uses to ensure that the job is being worked on by only one job worker. This number must be returned in the response.

    ", + "ThirdPartyJobDetails$nonce": "

    A system-generated random number that AWS CodePipeline uses to ensure that the job is being worked on by only one job worker. This number must be returned in the response.

    " + } + }, + "NotLatestPipelineExecutionException": { + "base": "

    The stage has failed in a later run of the pipeline and the pipelineExecutionId associated with the request is out of date.

    ", + "refs": { + } + }, + "OutputArtifact": { + "base": "

    Represents information about the output of an action.

    ", + "refs": { + "OutputArtifactList$member": null + } + }, + "OutputArtifactList": { + "base": null, + "refs": { + "ActionDeclaration$outputArtifacts": "

    The name or ID of the result of the action declaration, such as a test or build artifact.

    " + } + }, + "Percentage": { + "base": null, + "refs": { + "ActionExecution$percentComplete": "

    A percentage of completeness of the action as it runs.

    ", + "ExecutionDetails$percentComplete": "

    The percentage of work completed on the action, represented on a scale of zero to one hundred percent.

    " + } + }, + "PipelineContext": { + "base": "

    Represents information about a pipeline to a job worker.

    ", + "refs": { + "JobData$pipelineContext": null, + "ThirdPartyJobData$pipelineContext": null + } + }, + "PipelineDeclaration": { + "base": "

    Represents the structure of actions and stages to be performed in the pipeline.

    ", + "refs": { + "CreatePipelineInput$pipeline": null, + "CreatePipelineOutput$pipeline": null, + "GetPipelineOutput$pipeline": null, + "UpdatePipelineInput$pipeline": "

    The name of the pipeline to be updated.

    ", + "UpdatePipelineOutput$pipeline": "

    The structure of the updated pipeline.

    " + } + }, + "PipelineExecutionId": { + "base": null, + "refs": { + "PutActionRevisionOutput$pipelineExecutionId": "

    The ID of the current workflow state of the pipeline.

    ", + "RetryStageExecutionInput$pipelineExecutionId": "

    The ID of the pipeline execution in the failed stage to be retried. Use the GetPipelineState action to retrieve the current pipelineExecutionId of the failed stage

    ", + "RetryStageExecutionOutput$pipelineExecutionId": "

    The ID of the current workflow execution in the failed stage.

    ", + "StageExecution$pipelineExecutionId": "

    The ID of the pipeline execution associated with the stage.

    ", + "StartPipelineExecutionOutput$pipelineExecutionId": "

    The unique system-generated ID of the pipeline that was started.

    " + } + }, + "PipelineList": { + "base": null, + "refs": { + "ListPipelinesOutput$pipelines": "

    The list of pipelines.

    " + } + }, + "PipelineName": { + "base": null, + "refs": { + "DeletePipelineInput$name": "

    The name of the pipeline to be deleted.

    ", + "DisableStageTransitionInput$pipelineName": "

    The name of the pipeline in which you want to disable the flow of artifacts from one stage to another.

    ", + "EnableStageTransitionInput$pipelineName": "

    The name of the pipeline in which you want to enable the flow of artifacts from one stage to another.

    ", + "GetPipelineInput$name": "

    The name of the pipeline for which you want to get information. Pipeline names must be unique under an Amazon Web Services (AWS) user account.

    ", + "GetPipelineStateInput$name": "

    The name of the pipeline about which you want to get information.

    ", + "GetPipelineStateOutput$pipelineName": "

    The name of the pipeline for which you want to get the state.

    ", + "PipelineContext$pipelineName": "

    The name of the pipeline. This is a user-specified value. Pipeline names must be unique across all pipeline names under an Amazon Web Services account.

    ", + "PipelineDeclaration$name": "

    The name of the action to be performed.

    ", + "PipelineSummary$name": "

    The name of the pipeline.

    ", + "PutActionRevisionInput$pipelineName": "

    The name of the pipeline that will start processing the revision to the source.

    ", + "PutApprovalResultInput$pipelineName": "

    The name of the pipeline that contains the action.

    ", + "RetryStageExecutionInput$pipelineName": "

    The name of the pipeline that contains the failed stage.

    ", + "StartPipelineExecutionInput$name": "

    The name of the pipeline to start.

    " + } + }, + "PipelineNameInUseException": { + "base": "

    The specified pipeline name is already in use.

    ", + "refs": { + } + }, + "PipelineNotFoundException": { + "base": "

    The specified pipeline was specified in an invalid format or cannot be found.

    ", + "refs": { + } + }, + "PipelineStageDeclarationList": { + "base": null, + "refs": { + "PipelineDeclaration$stages": "

    The stage in which to perform the action.

    " + } + }, + "PipelineSummary": { + "base": "

    Returns a summary of a pipeline.

    ", + "refs": { + "PipelineList$member": null + } + }, + "PipelineVersion": { + "base": null, + "refs": { + "GetPipelineInput$version": "

    The version number of the pipeline. If you do not specify a version, defaults to the most current version.

    ", + "GetPipelineStateOutput$pipelineVersion": "

    The version number of the pipeline.

    A newly-created pipeline is always assigned a version number of 1.

    ", + "PipelineDeclaration$version": "

    The version number of the pipeline. A new pipeline always has a version number of 1. This number is automatically incremented when a pipeline is updated.

    ", + "PipelineSummary$version": "

    The version number of the pipeline.

    " + } + }, + "PipelineVersionNotFoundException": { + "base": "

    The specified pipeline version was specified in an invalid format or cannot be found.

    ", + "refs": { + } + }, + "PollForJobsInput": { + "base": "

    Represents the input of a poll for jobs action.

    ", + "refs": { + } + }, + "PollForJobsOutput": { + "base": "

    Represents the output of a poll for jobs action.

    ", + "refs": { + } + }, + "PollForThirdPartyJobsInput": { + "base": "

    Represents the input of a poll for third party jobs action.

    ", + "refs": { + } + }, + "PollForThirdPartyJobsOutput": { + "base": "

    Represents the output of a poll for third party jobs action.

    ", + "refs": { + } + }, + "PutActionRevisionInput": { + "base": "

    Represents the input of a put action revision action.

    ", + "refs": { + } + }, + "PutActionRevisionOutput": { + "base": "

    Represents the output of a put action revision action.

    ", + "refs": { + } + }, + "PutApprovalResultInput": { + "base": "

    Represents the input of a put approval result action.

    ", + "refs": { + } + }, + "PutApprovalResultOutput": { + "base": "

    Represents the output of a put approval result action.

    ", + "refs": { + } + }, + "PutJobFailureResultInput": { + "base": "

    Represents the input of a put job failure result action.

    ", + "refs": { + } + }, + "PutJobSuccessResultInput": { + "base": "

    Represents the input of a put job success result action.

    ", + "refs": { + } + }, + "PutThirdPartyJobFailureResultInput": { + "base": "

    Represents the input of a third party job failure result action.

    ", + "refs": { + } + }, + "PutThirdPartyJobSuccessResultInput": { + "base": "

    Represents the input of a put third party job success result action.

    ", + "refs": { + } + }, + "QueryParamMap": { + "base": null, + "refs": { + "PollForJobsInput$queryParam": "

    A map of property names and values. For an action type with no queryable properties, this value must be null or an empty map. For an action type with a queryable property, you must supply that property as a key in the map. Only jobs whose action configuration matches the mapped value will be returned.

    " + } + }, + "RetryStageExecutionInput": { + "base": "

    Represents the input of a retry stage execution action.

    ", + "refs": { + } + }, + "RetryStageExecutionOutput": { + "base": "

    Represents the output of a retry stage execution action.

    ", + "refs": { + } + }, + "Revision": { + "base": null, + "refs": { + "ActionRevision$revisionId": "

    The system-generated unique ID that identifies the revision number of the action.

    ", + "Artifact$revision": "

    The artifact's revision ID. Depending on the type of object, this could be a commit ID (GitHub) or a revision ID (Amazon S3).

    ", + "CurrentRevision$revision": "

    The revision ID of the current version of an artifact.

    " + } + }, + "RevisionChangeIdentifier": { + "base": null, + "refs": { + "ActionRevision$revisionChangeId": "

    The unique identifier of the change that set the state to this revision, for example a deployment ID or timestamp.

    ", + "CurrentRevision$changeIdentifier": "

    The change identifier for the current revision.

    " + } + }, + "RoleArn": { + "base": null, + "refs": { + "ActionDeclaration$roleArn": "

    The ARN of the IAM service role that will perform the declared action. This is assumed through the roleArn for the pipeline.

    ", + "PipelineDeclaration$roleArn": "

    The Amazon Resource Name (ARN) for AWS CodePipeline to use to either perform actions with no actionRoleArn, or to use to assume roles for actions with an actionRoleArn.

    " + } + }, + "S3ArtifactLocation": { + "base": "

    The location of the Amazon S3 bucket that contains a revision.

    ", + "refs": { + "ArtifactLocation$s3Location": "

    The Amazon S3 bucket that contains the artifact.

    " + } + }, + "S3BucketName": { + "base": null, + "refs": { + "S3ArtifactLocation$bucketName": "

    The name of the Amazon S3 bucket.

    " + } + }, + "S3ObjectKey": { + "base": null, + "refs": { + "S3ArtifactLocation$objectKey": "

    The key of the object in the Amazon S3 bucket, which uniquely identifies the object in the bucket.

    " + } + }, + "SecretAccessKey": { + "base": null, + "refs": { + "AWSSessionCredentials$secretAccessKey": "

    The secret access key for the session.

    " + } + }, + "SessionToken": { + "base": null, + "refs": { + "AWSSessionCredentials$sessionToken": "

    The token for the session.

    " + } + }, + "StageActionDeclarationList": { + "base": null, + "refs": { + "StageDeclaration$actions": "

    The actions included in a stage.

    " + } + }, + "StageBlockerDeclarationList": { + "base": null, + "refs": { + "StageDeclaration$blockers": "

    Reserved for future use.

    " + } + }, + "StageContext": { + "base": "

    Represents information about a stage to a job worker.

    ", + "refs": { + "PipelineContext$stage": "

    The stage of the pipeline.

    " + } + }, + "StageDeclaration": { + "base": "

    Represents information about a stage and its definition.

    ", + "refs": { + "PipelineStageDeclarationList$member": null + } + }, + "StageExecution": { + "base": "

    Represents information about the run of a stage.

    ", + "refs": { + "StageState$latestExecution": "

    Information about the latest execution in the stage, including its ID and status.

    " + } + }, + "StageExecutionStatus": { + "base": null, + "refs": { + "StageExecution$status": "

    The status of the stage, or for a completed stage, the last status of the stage.

    " + } + }, + "StageName": { + "base": null, + "refs": { + "DisableStageTransitionInput$stageName": "

    The name of the stage where you want to disable the inbound or outbound transition of artifacts.

    ", + "EnableStageTransitionInput$stageName": "

    The name of the stage where you want to enable the transition of artifacts, either into the stage (inbound) or from that stage to the next stage (outbound).

    ", + "PutActionRevisionInput$stageName": "

    The name of the stage that contains the action that will act upon the revision.

    ", + "PutApprovalResultInput$stageName": "

    The name of the stage that contains the action.

    ", + "RetryStageExecutionInput$stageName": "

    The name of the failed stage to be retried.

    ", + "StageContext$name": "

    The name of the stage.

    ", + "StageDeclaration$name": "

    The name of the stage.

    ", + "StageState$stageName": "

    The name of the stage.

    " + } + }, + "StageNotFoundException": { + "base": "

    The specified stage was specified in an invalid format or cannot be found.

    ", + "refs": { + } + }, + "StageNotRetryableException": { + "base": "

    The specified stage can't be retried because the pipeline structure or stage state changed after the stage was not completed; the stage contains no failed actions; one or more actions are still in progress; or another retry attempt is already in progress.

    ", + "refs": { + } + }, + "StageRetryMode": { + "base": null, + "refs": { + "RetryStageExecutionInput$retryMode": "

    The scope of the retry attempt. Currently, the only supported value is FAILED_ACTIONS.

    " + } + }, + "StageState": { + "base": "

    Represents information about the state of the stage.

    ", + "refs": { + "StageStateList$member": null + } + }, + "StageStateList": { + "base": null, + "refs": { + "GetPipelineStateOutput$stageStates": "

    A list of the pipeline stage output information, including stage name, state, most recent run details, whether the stage is disabled, and other data.

    " + } + }, + "StageTransitionType": { + "base": null, + "refs": { + "DisableStageTransitionInput$transitionType": "

    Specifies whether artifacts will be prevented from transitioning into the stage and being processed by the actions in that stage (inbound), or prevented from transitioning from the stage after they have been processed by the actions in that stage (outbound).

    ", + "EnableStageTransitionInput$transitionType": "

    Specifies whether artifacts will be allowed to enter the stage and be processed by the actions in that stage (inbound) or whether already-processed artifacts will be allowed to transition to the next stage (outbound).

    " + } + }, + "StartPipelineExecutionInput": { + "base": "

    Represents the input of a start pipeline execution action.

    ", + "refs": { + } + }, + "StartPipelineExecutionOutput": { + "base": "

    Represents the output of a start pipeline execution action.

    ", + "refs": { + } + }, + "ThirdPartyJob": { + "base": "

    A response to a PollForThirdPartyJobs request returned by AWS CodePipeline when there is a job to be worked upon by a partner action.

    ", + "refs": { + "ThirdPartyJobList$member": null + } + }, + "ThirdPartyJobData": { + "base": "

    Represents information about the job data for a partner action.

    ", + "refs": { + "ThirdPartyJobDetails$data": "

    The data to be returned by the third party job worker.

    " + } + }, + "ThirdPartyJobDetails": { + "base": "

    The details of a job sent in response to a GetThirdPartyJobDetails request.

    ", + "refs": { + "GetThirdPartyJobDetailsOutput$jobDetails": "

    The details of the job, including any protected values defined for the job.

    " + } + }, + "ThirdPartyJobId": { + "base": null, + "refs": { + "AcknowledgeThirdPartyJobInput$jobId": "

    The unique system-generated ID of the job.

    ", + "GetThirdPartyJobDetailsInput$jobId": "

    The unique system-generated ID used for identifying the job.

    ", + "PutThirdPartyJobFailureResultInput$jobId": "

    The ID of the job that failed. This is the same ID returned from PollForThirdPartyJobs.

    ", + "PutThirdPartyJobSuccessResultInput$jobId": "

    The ID of the job that successfully completed. This is the same ID returned from PollForThirdPartyJobs.

    ", + "ThirdPartyJobDetails$id": "

    The identifier used to identify the job details in AWS CodePipeline.

    " + } + }, + "ThirdPartyJobList": { + "base": null, + "refs": { + "PollForThirdPartyJobsOutput$jobs": "

    Information about the jobs to take action on.

    " + } + }, + "Timestamp": { + "base": null, + "refs": { + "ActionExecution$lastStatusChange": "

    The last status change of the action.

    ", + "ActionRevision$created": "

    The date and time when the most recent version of the action was created, in timestamp format.

    ", + "GetPipelineStateOutput$created": "

    The date and time the pipeline was created, in timestamp format.

    ", + "GetPipelineStateOutput$updated": "

    The date and time the pipeline was last updated, in timestamp format.

    ", + "PipelineSummary$created": "

    The date and time the pipeline was created, in timestamp format.

    ", + "PipelineSummary$updated": "

    The date and time of the last update to the pipeline, in timestamp format.

    ", + "PutApprovalResultOutput$approvedAt": "

    The timestamp showing when the approval or rejection was submitted.

    " + } + }, + "TransitionState": { + "base": "

    Represents information about the state of transitions between one stage and another stage.

    ", + "refs": { + "StageState$inboundTransitionState": "

    The state of the inbound transition, which is either enabled or disabled.

    " + } + }, + "UpdatePipelineInput": { + "base": "

    Represents the input of an update pipeline action.

    ", + "refs": { + } + }, + "UpdatePipelineOutput": { + "base": "

    Represents the output of an update pipeline action.

    ", + "refs": { + } + }, + "Url": { + "base": null, + "refs": { + "ActionExecution$externalExecutionUrl": "

    The URL of a resource external to AWS that will be used when running the action, for example an external repository URL.

    ", + "ActionState$entityUrl": "

    A URL link for more information about the state of the action, such as a deployment group details page.

    ", + "ActionState$revisionUrl": "

    A URL link for more information about the revision, such as a commit details page.

    ", + "ActionTypeSettings$thirdPartyConfigurationUrl": "

    The URL of a sign-up page where users can sign up for an external service and perform initial configuration of the action provided by that service.

    " + } + }, + "UrlTemplate": { + "base": null, + "refs": { + "ActionTypeSettings$entityUrlTemplate": "

    The URL returned to the AWS CodePipeline console that provides a deep link to the resources of the external system, such as the configuration page for an AWS CodeDeploy deployment group. This link is provided as part of the action display within the pipeline.

    ", + "ActionTypeSettings$executionUrlTemplate": "

    The URL returned to the AWS CodePipeline console that contains a link to the top-level landing page for the external system, such as console page for AWS CodeDeploy. This link is shown on the pipeline view page in the AWS CodePipeline console and provides a link to the execution entity of the external action.

    ", + "ActionTypeSettings$revisionUrlTemplate": "

    The URL returned to the AWS CodePipeline console that contains a link to the page where customers can update or change the configuration of the external action.

    " + } + }, + "ValidationException": { + "base": "

    The validation was specified in an invalid format.

    ", + "refs": { + } + }, + "Version": { + "base": null, + "refs": { + "ActionTypeId$version": "

    A string that identifies the action type.

    ", + "CreateCustomActionTypeInput$version": "

    The version number of the custom action.

    ", + "DeleteCustomActionTypeInput$version": "

    The version of the custom action to delete.

    " + } + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/codepipeline/2015-07-09/examples-1.json b/vendor/github.com/aws/aws-sdk-go/models/apis/codepipeline/2015-07-09/examples-1.json new file mode 100644 index 000000000..5732bc805 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/codepipeline/2015-07-09/examples-1.json @@ -0,0 +1,902 @@ +{ + "version": "1.0", + "examples": { + "AcknowledgeJob": [ + { + "input": { + "jobId": "11111111-abcd-1111-abcd-111111abcdef", + "nonce": "3" + }, + "output": { + "status": "InProgress" + }, + "comments": { + "input": { + "jobId": "Use the PollforJobs API to determine the ID of the job.", + "nonce": "Use the PollforJobs API to determine the nonce for the job." + }, + "output": { + "status": "Valid values include Created, Queued, Dispatched, InProgress, TimedOut, Suceeded, and Failed. Completed jobs are removed from the system after a short period of time." + } + }, + "description": "This example returns information about a specified job, including the status of that job if it exists. This is only used for job workers and custom actions in AWS CodePipeline. To determine the value of nonce and the job ID, use PollForJobs.", + "id": "acknowledge-a-job-for-a-custom-action-1449100979484", + "title": "Acknowledge a job for a custom action" + } + ], + "CreateCustomActionType": [ + { + "input": { + "version": "1", + "category": "Build", + "configurationProperties": [ + { + "name": "MyJenkinsExampleBuildProject", + "type": "String", + "required": true, + "key": true, + "description": "The name of the build project must be provided when this action is added to the pipeline.", + "queryable": false, + "secret": false + } + ], + "inputArtifactDetails": { + "maximumCount": 1, + "minimumCount": 0 + }, + "outputArtifactDetails": { + "maximumCount": 1, + "minimumCount": 0 + }, + "provider": "MyBuild-ProviderName", + "settings": { + "entityUrlTemplate": "https://192.0.2.4/job/{Config:ProjectName}/", + "executionUrlTemplate": "https://192.0.2.4/job/{Config:ProjectName}/lastSuccessfulBuild/{ExternalExecutionId}/", + "revisionUrlTemplate": "none" + } + }, + "output": { + "actionType": { + "actionConfigurationProperties": [ + { + "name": "MyJenkinsExampleBuildProject", + "required": true, + "key": true, + "description": "The name of the build project must be provided when this action is added to the pipeline.", + "queryable": false, + "secret": false + } + ], + "id": { + "version": "1", + "category": "Build", + "owner": "Custom", + "provider": "MyBuild-ProviderName" + }, + "inputArtifactDetails": { + "maximumCount": 1, + "minimumCount": 0 + }, + "outputArtifactDetails": { + "maximumCount": 1, + "minimumCount": 0 + }, + "settings": { + "entityUrlTemplate": "https://192.0.2.4/job/{Config:ProjectName}/", + "executionUrlTemplate": "https://192.0.2.4/job/{Config:ProjectName}/lastSuccessfulBuild/{ExternalExecutionId}/", + "revisionUrlTemplate": "none" + } + } + }, + "comments": { + "input": { + "version": "A new custom action always has a version of 1. This is required.", + "configurationProperties": "The text in description will be displayed to your users, and can contain a maximum of 2048 characters. The value for name in configurationProperties is the name of the project, if any. In this example, this is the name of the build project on the Jenkins server", + "inputArtifactDetails": "This is the minimum and maximum number of artifacts allowed as inputs for the action. For more information about input and output artifacts, see Pipeline Structure Reference in the AWS CodePipeline User Guide.", + "outputArtifactDetails": "This is the minimum and maximum number of artifacts allowed as outputs for the action. For more information about input and output artifacts, see Pipeline Structure Reference in the AWS CodePipeline User Guide.", + "provider": "In this example, this is the name given to the provider field when configuring the AWS CodePipeline Plugin for Jenkins. For more information, see the Four-Stage Pipeline Tutorial in the AWS CodePipeline User Guide.", + "settings": "entityUrlTemplate is the static link that provides information about the service provider for the action. In the example, the build system includes a static link to the Jenkins build project at the specific server address. Similarly, executionUrlTemplate is the dynamic link that will be updated with information about the current or most recent run of the action." + }, + "output": { + } + }, + "description": "This example creates a build custom action for AWS CodePipeline for a Jenkins build project. For more information about the requirements for creating a custom action, including the structure of the JSON file commonly used to help create custom actions, see Create a Custom Action in the AWS CodePipeline User Guide. For a walkthrough of creating a custom action in a pipeline, follow the Four-Stage Pipeline Tutorial.", + "id": "create-a-custom-action-1449103500903", + "title": "Create a custom action" + } + ], + "CreatePipeline": [ + { + "input": { + "pipeline": { + "version": 1, + "name": "MySecondPipeline", + "artifactStore": { + "type": "S3", + "location": "codepipeline-us-east-1-11EXAMPLE11" + }, + "roleArn": "arn:aws:iam::111111111111:role/AWS-CodePipeline-Service", + "stages": [ + { + "name": "Source", + "actions": [ + { + "name": "Source", + "actionTypeId": { + "version": "1", + "category": "Source", + "owner": "AWS", + "provider": "S3" + }, + "configuration": { + "S3Bucket": "awscodepipeline-demo-bucket", + "S3ObjectKey": "aws-codepipeline-s3-aws-codedeploy_linux.zip" + }, + "inputArtifacts": [ + + ], + "outputArtifacts": [ + { + "name": "MyApp" + } + ], + "runOrder": 1 + } + ] + }, + { + "name": "Beta", + "actions": [ + { + "name": "CodePipelineDemoFleet", + "actionTypeId": { + "version": "1", + "category": "Deploy", + "owner": "AWS", + "provider": "CodeDeploy" + }, + "configuration": { + "ApplicationName": "CodePipelineDemoApplication", + "DeploymentGroupName": "CodePipelineDemoFleet" + }, + "inputArtifacts": [ + { + "name": "MyApp" + } + ], + "outputArtifacts": [ + + ], + "runOrder": 1 + } + ] + } + ] + } + }, + "output": { + "pipeline": { + "version": 1, + "name": "MySecondPipeline", + "artifactStore": { + "type": "S3", + "location": "codepipeline-us-east-1-11EXAMPLE11" + }, + "roleArn": "arn:aws:iam::111111111111:role/AWS-CodePipeline-Service", + "stages": [ + { + "name": "Source", + "actions": [ + { + "name": "Source", + "actionTypeId": { + "version": "1", + "category": "Source", + "owner": "AWS", + "provider": "S3" + }, + "configuration": { + "S3Bucket": "awscodepipeline-demo-bucket", + "S3ObjectKey": "aws-codepipeline-s3-aws-codedeploy_linux.zip" + }, + "inputArtifacts": [ + + ], + "outputArtifacts": [ + { + "name": "MyApp" + } + ], + "runOrder": 1 + } + ] + }, + { + "name": "Beta", + "actions": [ + { + "name": "CodePipelineDemoFleet", + "actionTypeId": { + "version": "1", + "category": "Deploy", + "owner": "AWS", + "provider": "CodeDeploy" + }, + "configuration": { + "ApplicationName": "CodePipelineDemoApplication", + "DeploymentGroupName": "CodePipelineDemoFleet" + }, + "inputArtifacts": [ + { + "name": "MyApp" + } + ], + "outputArtifacts": [ + + ], + "runOrder": 1 + } + ] + } + ] + } + }, + "comments": { + "input": { + "version": "The version number of the pipeline. All new pipelines have a version number of 1. This number is incremented automatically every time a pipeline is updated.", + "name": "Pipeline names must be unique within a user's AWS account.", + "artifactStore": "This Amazon S3 bucket is where artifacts for the pipeline will be stored as the pipeline runs. For more information about the Amazon S3 bucket used as the artifact store, see Concepts in the AWS CodePipeline User Guide.", + "roleArn": "This is the ARN for the service role created for AWS CodePipeline.", + "stages": "Each stage block defines a different stage in the pipeline. Pipelines can have up to ten stages, with up to twenty actions per stage." + }, + "output": { + } + }, + "description": "This example creates a simple two-stage pipeline in AWS CodePipeline that uses an Amazon S3 bucket for its source stage and deploys code using AWS CodeDeploy. For more information about the requirements for creating a pipeline, including the structure of the JSON file commonly used to create a pipeline, see \"Create a Pipeline\" in the AWS CodePipeline User Guide.", + "id": "create-a-pipeline-1449162214392", + "title": "Create a pipeline" + } + ], + "DeleteCustomActionType": [ + { + "input": { + "version": "1", + "category": "Build", + "provider": "MyJenkinsProviderName" + }, + "comments": { + "input": { + "version": "This is the current version number of the custom action.", + "category": "This is the type of action that the custom action is, for example build or test.", + "provider": "This is the provider of the service used in the custom action. In this example, the custom action is for a Jenkins build, and the name of the provider is the one configured in the AWS CodePipeline Plugin for Jenkins" + }, + "output": { + } + }, + "description": "This example deletes a custom action in AWS CodePipeline by specifiying the action type, provider name, and version number of the action to be deleted. Only used for custom actions. Use the list-action-types command to view the correct values for category, version, and provider. After a custom action is deleted, PollForJobs for the custom action will fail. Warning: You cannot recreate a custom action after it has been deleted unless you increase the version number of the action.", + "id": "delete-a-custom-action-1449163239567", + "title": "Delete a custom action" + } + ], + "DeletePipeline": [ + { + "input": { + "name": "MySecondPipeline" + }, + "comments": { + "input": { + "name": "The name of the pipeline to delete." + }, + "output": { + } + }, + "description": "This example deletes a pipeline named MySecondPipeline from AWS CodePipeline. Use ListPipelines to view a list of pipelines associated with your AWS account.", + "id": "delete-a-pipeline-1449163893541", + "title": "Delete a pipeline" + } + ], + "DisableStageTransition": [ + { + "input": { + "pipelineName": "MyFirstPipeline", + "reason": "An example reason", + "stageName": "Beta", + "transitionType": "Inbound" + }, + "comments": { + "input": { + "transitionType": "Valid values are Inbound, which prevents artifacts from transitioning into the stage and being processed by the actions in that stage, or Outbound, which prevents artifacts from transitioning out of the stage after they have been processed by the actions in that stage." + }, + "output": { + } + }, + "description": "This example disables transitions into the Beta stage of the MyFirstPipeline pipeline in AWS CodePipeline.", + "id": "disable-transitions-into-or-out-of-a-stage-1449164517291", + "title": "Disable transitions into or out of a stage" + } + ], + "EnableStageTransition": [ + { + "input": { + "pipelineName": "MyFirstPipeline", + "stageName": "Beta", + "transitionType": "Inbound" + }, + "comments": { + "input": { + "transitionType": "Valid values are Inbound, which allows artifacts to transition into the stage and be processed by the actions in that stage, or Outbound, which allows artifacts to transition out of the stage after they have been processed by the actions in that stage." + }, + "output": { + } + }, + "description": "This example enables transitions into the Beta stage of the MyFirstPipeline pipeline in AWS CodePipeline.", + "id": "enable-transitions-into-or-out-of-a-stage-1449164924423", + "title": "Enable transitions into or out of a stage" + } + ], + "GetJobDetails": [ + { + "input": { + "jobId": "11111111-abcd-1111-abcd-111111abcdef" + }, + "output": { + "jobDetails": { + "accountId": "111111111111", + "data": { + "actionConfiguration": { + "configuration": { + "ProjectName": "MyJenkinsExampleTestProject" + } + }, + "actionTypeId": { + "version": "1", + "category": "Test", + "owner": "Custom", + "provider": "MyJenkinsProviderName" + }, + "artifactCredentials": { + "accessKeyId": "AKIAIOSFODNN7EXAMPLE", + "secretAccessKey": "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY", + "sessionToken": "fICCQD6m7oRw0uXOjANBgkqhkiG9w0BAQUFADCBiDELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAldBMRAwDgYDVQQHEwdTZWF0dGxlMQ8wDQYDVQQKEwZBbWF6b24xFDASBgNVBAsTC0lBTSBDb25zb2xlMRIwEAYDVQQDEwlUZXN0Q2lsYWMxHzAdBgkqhkiG9w0BCQEWEG5vb25lQGFtYXpvbi5jb20wHhcNMTEwNDI1MjA0NTIxWhcNMTIwNDI0MjA0NTIxWjCBiDELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAldBMRAwDgYDVQQHEwdTZWF0dGxlMQ8wDQYDVQQKEwZBbWF6b24xFDASBgNVBAsTC0lBTSBDb25zb2xlMRIwEAYDVQQDEwlUZXN0Q2lsYWMxHzAdBgkqhkiG9w0BCQEWEG5vb25lQGFtYXpvbi5jb20wgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBAMaK0dn+a4GmWIWJ21uUSfwfEvySWtC2XADZ4nB+BLYgVIk60CpiwsZ3G93vUEIO3IyNoH/f0wYK8m9TrDHudUZg3qX4waLG5M43q7Wgc/MbQITxOUSQv7c7ugFFDzQGBzZswY6786m86gpEIbb3OhjZnzcvQAaRHhdlQWIMm2nrAgMBAAEwDQYJKoZIhvcNAQEFBQADgYEAtCu4nUhVVxYUntneD9+h8Mg9q6q+auNKyExzyLwaxlAoo7TJHidbtS4J5iNmZgXL0FkbFFBjvSfpJIlJ00zbhNYS5f6GuoEDmFJl0ZxBHjJnyp378OD8uTs7fLvjx79LjSTbNYiytVbZPQUQ5Yaxu2jXnimvw3rrszlaEXAMPLE=" + }, + "inputArtifacts": [ + { + "name": "MyAppBuild", + "location": { + "type": "S3", + "s3Location": { + "bucketName": "codepipeline-us-east-1-11EXAMPLE11", + "objectKey": "MySecondPipeline/MyAppBuild/EXAMPLE" + } + } + } + ], + "outputArtifacts": [ + + ], + "pipelineContext": { + "action": { + "name": "MyJenkinsTest-Action" + }, + "pipelineName": "MySecondPipeline", + "stage": { + "name": "Testing" + } + } + }, + "id": "11111111-abcd-1111-abcd-111111abcdef" + } + }, + "comments": { + "input": { + }, + "output": { + } + }, + "description": "This example returns details about a job whose ID is represented by f4f4ff82-2d11-EXAMPLE. This command is only used for custom actions. When this command is called, AWS CodePipeline returns temporary credentials for the Amazon S3 bucket used to store artifacts for the pipeline, if required for the custom action. This command will also return any secret values defined for the action, if any are defined.", + "id": "get-the-details-of-a-job-1449183680273", + "title": "Get the details of a job" + } + ], + "GetPipeline": [ + { + "input": { + "version": 123, + "name": "MyFirstPipeline" + }, + "output": { + "pipeline": { + "version": 1, + "name": "MyFirstPipeline", + "artifactStore": { + "type": "S3", + "location": "codepipeline-us-east-1-11EXAMPLE11" + }, + "roleArn": "arn:aws:iam::111111111111:role/AWS-CodePipeline-Service", + "stages": [ + { + "name": "Source", + "actions": [ + { + "name": "Source", + "actionTypeId": { + "version": "1", + "category": "Source", + "owner": "AWS", + "provider": "S3" + }, + "configuration": { + "S3Bucket": "awscodepipeline-demo-bucket", + "S3ObjectKey": "aws-codepipeline-s3-aws-codedeploy_linux.zip" + }, + "inputArtifacts": [ + + ], + "outputArtifacts": [ + { + "name": "MyApp" + } + ], + "runOrder": 1 + } + ] + }, + { + "name": "Beta", + "actions": [ + { + "name": "CodePipelineDemoFleet", + "actionTypeId": { + "version": "1", + "category": "Deploy", + "owner": "AWS", + "provider": "CodeDeploy" + }, + "configuration": { + "ApplicationName": "CodePipelineDemoApplication", + "DeploymentGroupName": "CodePipelineDemoFleet" + }, + "inputArtifacts": [ + { + "name": "MyApp" + } + ], + "outputArtifacts": [ + + ], + "runOrder": 1 + } + ] + } + ] + } + }, + "comments": { + "input": { + "version": "This is an optional parameter. If you do not specify a version, the most current version of the pipeline structure is returned." + }, + "output": { + } + }, + "description": "This example returns the structure of a pipeline named MyFirstPipeline.", + "id": "view-the-structure-of-a-pipeline-1449184156329", + "title": "View the structure of a pipeline" + } + ], + "GetPipelineState": [ + { + "input": { + "name": "MyFirstPipeline" + }, + "output": { + "created": "1446137312.204", + "pipelineName": "MyFirstPipeline", + "pipelineVersion": 1, + "stageStates": [ + { + "actionStates": [ + { + "actionName": "Source", + "entityUrl": "https://console.aws.amazon.com/s3/home?#", + "latestExecution": { + "lastStatusChange": "1446137358.328", + "status": "Succeeded" + } + } + ], + "stageName": "Source" + }, + { + "actionStates": [ + { + "actionName": "CodePipelineDemoFleet", + "entityUrl": "https://console.aws.amazon.com/codedeploy/home?#/applications/CodePipelineDemoApplication/deployment-groups/CodePipelineDemoFleet", + "latestExecution": { + "externalExecutionId": "d-EXAMPLE", + "externalExecutionUrl": "https://console.aws.amazon.com/codedeploy/home?#/deployments/d-EXAMPLE", + "lastStatusChange": "1446137493.131", + "status": "Succeeded", + "summary": "Deployment Succeeded" + } + } + ], + "inboundTransitionState": { + "enabled": true + }, + "stageName": "Beta" + } + ], + "updated": "1446137312.204" + }, + "comments": { + "input": { + }, + "output": { + "created": "The value for created and all other time- and date-related information such as lastStatusChange, is returned in timestamp format." + } + }, + "description": "This example returns the most recent state of a pipeline named MyFirstPipeline.", + "id": "view-information-about-the-state-of-a-pipeline-1449184486550", + "title": "View information about the state of a pipeline" + } + ], + "ListActionTypes": [ + { + "input": { + "actionOwnerFilter": "Custom", + "nextToken": "" + }, + "output": { + "actionTypes": [ + { + "actionConfigurationProperties": [ + { + "name": "MyJenkinsExampleBuildProject", + "required": true, + "key": true, + "queryable": true, + "secret": false + } + ], + "id": { + "version": "1", + "category": "Build", + "owner": "Custom", + "provider": "MyJenkinsProviderName" + }, + "inputArtifactDetails": { + "maximumCount": 5, + "minimumCount": 0 + }, + "outputArtifactDetails": { + "maximumCount": 5, + "minimumCount": 0 + }, + "settings": { + "entityUrlTemplate": "http://54.174.131.118/job/{Config:ProjectName}", + "executionUrlTemplate": "http://54.174.131.118/job/{Config:ProjectName}/{ExternalExecutionId}" + } + }, + { + "actionConfigurationProperties": [ + { + "name": "MyJenkinsExampleTestProject", + "required": true, + "key": true, + "queryable": true, + "secret": false + } + ], + "id": { + "version": "1", + "category": "Build", + "owner": "Custom", + "provider": "MyJenkinsProviderName" + }, + "inputArtifactDetails": { + "maximumCount": 5, + "minimumCount": 0 + }, + "outputArtifactDetails": { + "maximumCount": 5, + "minimumCount": 0 + }, + "settings": { + "entityUrlTemplate": "http://54.210.140.46/job/{Config:ProjectName}", + "executionUrlTemplate": "http://54.210.140.46/job/{Config:ProjectName}/{ExternalExecutionId}" + } + } + ], + "nextToken": "" + }, + "comments": { + "input": { + }, + "output": { + "actionOwnerFilter": "actionOwnerFilter is optional. It is used to filter the response to actions created by a specific entity. Valid values include AWS, ThirdParty, and Custom.", + "nextToken": "nextToken is optional. Its operation is reserved for future use." + } + }, + "description": "Used by itself, ListActionTypes returns the structure of all AWS CodePipeline actions available to your AWS account. This example uses the actionOwnerFilter option to limit the response to include only the structure of all custom actions defined for the account.", + "id": "view-a-summary-of-all-action-types-associated-with-your-account-1455218918202", + "title": "View a summary of all action types associated with your account" + } + ], + "ListPipelines": [ + { + "input": { + }, + "output": { + "nextToken": "", + "pipelines": [ + { + "version": 1, + "name": "MyFirstPipeline", + "created": "1444681408.094", + "updated": "1444681408.094" + }, + { + "version": 3, + "name": "MySecondPipeline", + "created": "1443046290.003", + "updated": "1443048299.639" + } + ] + }, + "comments": { + "input": { + "nextToken": "nextToken is optional. Its operation is reserved for future use." + }, + "output": { + "pipelines": "Date and time information returned in the pipeline blocks, such as the values for created or updated, are in timestamp format." + } + }, + "description": "This example lists all AWS CodePipeline pipelines associated with the user's AWS account.", + "id": "view-a-summary-of-all-pipelines-associated-with-your-account-1449185747807", + "title": "View a summary of all pipelines associated with your account" + } + ], + "PollForJobs": [ + { + "input": { + "actionTypeId": { + "version": "1", + "category": "Test", + "owner": "Custom", + "provider": "MyJenkinsProviderName" + }, + "maxBatchSize": 5, + "queryParam": { + "ProjectName": "MyJenkinsTestProj" + } + }, + "output": { + "jobs": [ + { + "accountId": "111111111111", + "data": { + "actionConfiguration": { + "configuration": { + "ProjectName": "MyJenkinsTestProj" + } + }, + "actionTypeId": { + "version": "1", + "category": "Test", + "owner": "Custom", + "provider": "MyJenkinsProviderName" + }, + "artifactCredentials": { + "accessKeyId": "AKIAIOSFODNN7EXAMPLE", + "secretAccessKey": "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY", + "sessionToken": "fICCQD6m7oRw0uXOjANBgkqhkiG9w0BAQUFADCBiDELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAldBMRAwDgYDVQQHEwdTZWF0dGxlMQ8wDQYDVQQKEwZBbWF6b24xFDASBgNVBAsTC0lBTSBDb25zb2xlMRIwEAYDVQQDEwlUZXN0Q2lsYWMxHzAdBgkqhkiG9w0BCQEWEG5vb25lQGFtYXpvbi5jb20wHhcNMTEwNDI1MjA0NTIxWhcNMTIwNDI0MjA0NTIxWjCBiDELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAldBMRAwDgYDVQQHEwdTZWF0dGxlMQ8wDQYDVQQKEwZBbWF6b24xFDASBgNVBAsTC0lBTSBDb25zb2xlMRIwEAYDVQQDEwlUZXN0Q2lsYWMxHzAdBgkqhkiG9w0BCQEWEG5vb25lQGFtYXpvbi5jb20wgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBAMaK0dn+a4GmWIWJ21uUSfwfEvySWtC2XADZ4nB+BLYgVIk60CpiwsZ3G93vUEIO3IyNoH/f0wYK8m9TrDHudUZg3qX4waLG5M43q7Wgc/MbQITxOUSQv7c7ugFFDzQGBzZswY6786m86gpEIbb3OhjZnzcvQAaRHhdlQWIMm2nrAgMBAAEwDQYJKoZIhvcNAQEFBQADgYEAtCu4nUhVVxYUntneD9+h8Mg9q6q+auNKyExzyLwaxlAoo7TJHidbtS4J5iNmZgXL0FkbFFBjvSfpJIlJ00zbhNYS5f6GuoEDmFJl0ZxBHjJnyp378OD8uTs7fLvjx79LjSTbNYiytVbZPQUQ5Yaxu2jXnimvw3rrszlaEXAMPLE=" + }, + "inputArtifacts": [ + { + "name": "MyAppBuild", + "location": { + "type": "S3", + "s3Location": { + "bucketName": "codepipeline-us-east-1-11EXAMPLE11", + "objectKey": "MySecondPipeline/MyAppBuild/EXAMPLE" + } + } + } + ], + "outputArtifacts": [ + + ], + "pipelineContext": { + "action": { + "name": "MyJenkinsTest-Action" + }, + "pipelineName": "MySecondPipeline", + "stage": { + "name": "Testing" + } + } + }, + "id": "11111111-abcd-1111-abcd-111111abcdef", + "nonce": "3" + } + ] + }, + "comments": { + "input": { + }, + "output": { + } + }, + "description": "This example returns information about any jobs for a job worker to act upon. This command is only used for custom actions. When this command is called, AWS CodePipeline returns temporary credentials for the Amazon S3 bucket used to store artifacts for the pipeline. This command will also return any secret values defined for the action, if any are defined.", + "id": "view-any-available-jobs-1449186054484", + "title": "View any available jobs" + } + ], + "StartPipelineExecution": [ + { + "input": { + "name": "MyFirstPipeline" + }, + "output": { + "pipelineExecutionId": "11111111-abcd-1111-abcd-111111abcdef" + }, + "comments": { + "input": { + }, + "output": { + } + }, + "description": "This example runs the latest revision present in the source stage of a pipeline through the pipeline named \"MyFirstPipeline\".", + "id": "run-the-latest-revision-through-a-pipeline-1449186732433", + "title": "Run the latest revision through a pipeline" + } + ], + "UpdatePipeline": [ + { + "input": { + "pipeline": { + "version": 2, + "name": "MyFirstPipeline", + "artifactStore": { + "type": "S3", + "location": "codepipeline-us-east-1-11EXAMPLE11" + }, + "roleArn": "arn:aws:iam::111111111111:role/AWS-CodePipeline-Service", + "stages": [ + { + "name": "Source", + "actions": [ + { + "name": "Source", + "actionTypeId": { + "version": "1", + "category": "Source", + "owner": "AWS", + "provider": "S3" + }, + "configuration": { + "S3Bucket": "awscodepipeline-demo-bucket2", + "S3ObjectKey": "aws-codepipeline-s3-aws-codedeploy_linux.zip" + }, + "inputArtifacts": [ + + ], + "outputArtifacts": [ + { + "name": "MyApp" + } + ], + "runOrder": 1 + } + ] + }, + { + "name": "Beta", + "actions": [ + { + "name": "CodePipelineDemoFleet", + "actionTypeId": { + "version": "1", + "category": "Deploy", + "owner": "AWS", + "provider": "CodeDeploy" + }, + "configuration": { + "ApplicationName": "CodePipelineDemoApplication", + "DeploymentGroupName": "CodePipelineDemoFleet" + }, + "inputArtifacts": [ + { + "name": "MyApp" + } + ], + "outputArtifacts": [ + + ], + "runOrder": 1 + } + ] + } + ] + } + }, + "output": { + "pipeline": { + "version": 3, + "name": "MyFirstPipeline", + "artifactStore": { + "type": "S3", + "location": "codepipeline-us-east-1-11EXAMPLE11" + }, + "roleArn": "arn:aws:iam::111111111111:role/AWS-CodePipeline-Service", + "stages": [ + { + "name": "Source", + "actions": [ + { + "name": "Source", + "actionTypeId": { + "version": "1", + "category": "Source", + "owner": "AWS", + "provider": "S3" + }, + "configuration": { + "S3Bucket": "awscodepipeline-demo-bucket2", + "S3ObjectKey": "aws-codepipeline-s3-aws-codedeploy_linux.zip" + }, + "inputArtifacts": [ + + ], + "outputArtifacts": [ + { + "name": "MyApp" + } + ], + "runOrder": 1 + } + ] + }, + { + "name": "Beta", + "actions": [ + { + "name": "CodePipelineDemoFleet", + "actionTypeId": { + "version": "1", + "category": "Deploy", + "owner": "AWS", + "provider": "CodeDeploy" + }, + "configuration": { + "ApplicationName": "CodePipelineDemoApplication", + "DeploymentGroupName": "CodePipelineDemoFleet" + }, + "inputArtifacts": [ + { + "name": "MyApp" + } + ], + "outputArtifacts": [ + + ], + "runOrder": 1 + } + ] + } + ] + } + }, + "comments": { + "input": { + }, + "output": { + } + }, + "description": "This example updates the structure of a pipeline. The entire structure of the pipeline must be supplied, either by passing all of the parameters, or by using a pre-defined JSON file.", + "id": "update-the-structure-of-a-pipeline-1449186881322", + "title": "Update the structure of a pipeline" + } + ] + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/cognito-identity/2014-06-30/api-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/cognito-identity/2014-06-30/api-2.json new file mode 100644 index 000000000..095f84448 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/cognito-identity/2014-06-30/api-2.json @@ -0,0 +1,859 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2014-06-30", + "endpointPrefix":"cognito-identity", + "jsonVersion":"1.1", + "protocol":"json", + "serviceFullName":"Amazon Cognito Identity", + "signatureVersion":"v4", + "targetPrefix":"AWSCognitoIdentityService" + }, + "operations":{ + "CreateIdentityPool":{ + "name":"CreateIdentityPool", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateIdentityPoolInput"}, + "output":{"shape":"IdentityPool"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"NotAuthorizedException"}, + {"shape":"ResourceConflictException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"InternalErrorException"}, + {"shape":"LimitExceededException"} + ] + }, + "DeleteIdentities":{ + "name":"DeleteIdentities", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteIdentitiesInput"}, + "output":{"shape":"DeleteIdentitiesResponse"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"InternalErrorException"} + ] + }, + "DeleteIdentityPool":{ + "name":"DeleteIdentityPool", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteIdentityPoolInput"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"NotAuthorizedException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"InternalErrorException"} + ] + }, + "DescribeIdentity":{ + "name":"DescribeIdentity", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeIdentityInput"}, + "output":{"shape":"IdentityDescription"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"NotAuthorizedException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"InternalErrorException"} + ] + }, + "DescribeIdentityPool":{ + "name":"DescribeIdentityPool", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeIdentityPoolInput"}, + "output":{"shape":"IdentityPool"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"NotAuthorizedException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"InternalErrorException"} + ] + }, + "GetCredentialsForIdentity":{ + "name":"GetCredentialsForIdentity", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetCredentialsForIdentityInput"}, + "output":{"shape":"GetCredentialsForIdentityResponse"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"NotAuthorizedException"}, + {"shape":"ResourceConflictException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"InvalidIdentityPoolConfigurationException"}, + {"shape":"InternalErrorException"}, + {"shape":"ExternalServiceException"} + ] + }, + "GetId":{ + "name":"GetId", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetIdInput"}, + "output":{"shape":"GetIdResponse"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"NotAuthorizedException"}, + {"shape":"ResourceConflictException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"InternalErrorException"}, + {"shape":"LimitExceededException"}, + {"shape":"ExternalServiceException"} + ] + }, + "GetIdentityPoolRoles":{ + "name":"GetIdentityPoolRoles", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetIdentityPoolRolesInput"}, + "output":{"shape":"GetIdentityPoolRolesResponse"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"NotAuthorizedException"}, + {"shape":"ResourceConflictException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"InternalErrorException"} + ] + }, + "GetOpenIdToken":{ + "name":"GetOpenIdToken", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetOpenIdTokenInput"}, + "output":{"shape":"GetOpenIdTokenResponse"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"NotAuthorizedException"}, + {"shape":"ResourceConflictException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"InternalErrorException"}, + {"shape":"ExternalServiceException"} + ] + }, + "GetOpenIdTokenForDeveloperIdentity":{ + "name":"GetOpenIdTokenForDeveloperIdentity", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetOpenIdTokenForDeveloperIdentityInput"}, + "output":{"shape":"GetOpenIdTokenForDeveloperIdentityResponse"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"NotAuthorizedException"}, + {"shape":"ResourceConflictException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"InternalErrorException"}, + {"shape":"DeveloperUserAlreadyRegisteredException"} + ] + }, + "ListIdentities":{ + "name":"ListIdentities", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListIdentitiesInput"}, + "output":{"shape":"ListIdentitiesResponse"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"NotAuthorizedException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"InternalErrorException"} + ] + }, + "ListIdentityPools":{ + "name":"ListIdentityPools", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListIdentityPoolsInput"}, + "output":{"shape":"ListIdentityPoolsResponse"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"NotAuthorizedException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"InternalErrorException"} + ] + }, + "LookupDeveloperIdentity":{ + "name":"LookupDeveloperIdentity", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"LookupDeveloperIdentityInput"}, + "output":{"shape":"LookupDeveloperIdentityResponse"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"NotAuthorizedException"}, + {"shape":"ResourceConflictException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"InternalErrorException"} + ] + }, + "MergeDeveloperIdentities":{ + "name":"MergeDeveloperIdentities", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"MergeDeveloperIdentitiesInput"}, + "output":{"shape":"MergeDeveloperIdentitiesResponse"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"NotAuthorizedException"}, + {"shape":"ResourceConflictException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"InternalErrorException"} + ] + }, + "SetIdentityPoolRoles":{ + "name":"SetIdentityPoolRoles", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"SetIdentityPoolRolesInput"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"NotAuthorizedException"}, + {"shape":"ResourceConflictException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"InternalErrorException"}, + {"shape":"ConcurrentModificationException"} + ] + }, + "UnlinkDeveloperIdentity":{ + "name":"UnlinkDeveloperIdentity", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UnlinkDeveloperIdentityInput"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"NotAuthorizedException"}, + {"shape":"ResourceConflictException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"InternalErrorException"} + ] + }, + "UnlinkIdentity":{ + "name":"UnlinkIdentity", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UnlinkIdentityInput"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"NotAuthorizedException"}, + {"shape":"ResourceConflictException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"InternalErrorException"}, + {"shape":"ExternalServiceException"} + ] + }, + "UpdateIdentityPool":{ + "name":"UpdateIdentityPool", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"IdentityPool"}, + "output":{"shape":"IdentityPool"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"NotAuthorizedException"}, + {"shape":"ResourceConflictException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"InternalErrorException"}, + {"shape":"ConcurrentModificationException"}, + {"shape":"LimitExceededException"} + ] + } + }, + "shapes":{ + "ARNString":{ + "type":"string", + "max":2048, + "min":20 + }, + "AccessKeyString":{"type":"string"}, + "AccountId":{ + "type":"string", + "max":15, + "min":1, + "pattern":"\\d+" + }, + "CognitoIdentityProvider":{ + "type":"structure", + "members":{ + "ProviderName":{"shape":"CognitoIdentityProviderName"}, + "ClientId":{"shape":"CognitoIdentityProviderClientId"} + } + }, + "CognitoIdentityProviderClientId":{ + "type":"string", + "max":128, + "min":1, + "pattern":"[\\w_]+" + }, + "CognitoIdentityProviderList":{ + "type":"list", + "member":{"shape":"CognitoIdentityProvider"} + }, + "CognitoIdentityProviderName":{ + "type":"string", + "max":128, + "min":1, + "pattern":"[\\w._:/-]+" + }, + "ConcurrentModificationException":{ + "type":"structure", + "members":{ + "message":{"shape":"String"} + }, + "exception":true + }, + "CreateIdentityPoolInput":{ + "type":"structure", + "required":[ + "IdentityPoolName", + "AllowUnauthenticatedIdentities" + ], + "members":{ + "IdentityPoolName":{"shape":"IdentityPoolName"}, + "AllowUnauthenticatedIdentities":{"shape":"IdentityPoolUnauthenticated"}, + "SupportedLoginProviders":{"shape":"IdentityProviders"}, + "DeveloperProviderName":{"shape":"DeveloperProviderName"}, + "OpenIdConnectProviderARNs":{"shape":"OIDCProviderList"}, + "CognitoIdentityProviders":{"shape":"CognitoIdentityProviderList"}, + "SamlProviderARNs":{"shape":"SAMLProviderList"} + } + }, + "Credentials":{ + "type":"structure", + "members":{ + "AccessKeyId":{"shape":"AccessKeyString"}, + "SecretKey":{"shape":"SecretKeyString"}, + "SessionToken":{"shape":"SessionTokenString"}, + "Expiration":{"shape":"DateType"} + } + }, + "DateType":{"type":"timestamp"}, + "DeleteIdentitiesInput":{ + "type":"structure", + "required":["IdentityIdsToDelete"], + "members":{ + "IdentityIdsToDelete":{"shape":"IdentityIdList"} + } + }, + "DeleteIdentitiesResponse":{ + "type":"structure", + "members":{ + "UnprocessedIdentityIds":{"shape":"UnprocessedIdentityIdList"} + } + }, + "DeleteIdentityPoolInput":{ + "type":"structure", + "required":["IdentityPoolId"], + "members":{ + "IdentityPoolId":{"shape":"IdentityPoolId"} + } + }, + "DescribeIdentityInput":{ + "type":"structure", + "required":["IdentityId"], + "members":{ + "IdentityId":{"shape":"IdentityId"} + } + }, + "DescribeIdentityPoolInput":{ + "type":"structure", + "required":["IdentityPoolId"], + "members":{ + "IdentityPoolId":{"shape":"IdentityPoolId"} + } + }, + "DeveloperProviderName":{ + "type":"string", + "max":128, + "min":1, + "pattern":"[\\w._-]+" + }, + "DeveloperUserAlreadyRegisteredException":{ + "type":"structure", + "members":{ + "message":{"shape":"String"} + }, + "exception":true + }, + "DeveloperUserIdentifier":{ + "type":"string", + "max":1024, + "min":1, + "pattern":"[\\w.@_-]+" + }, + "DeveloperUserIdentifierList":{ + "type":"list", + "member":{"shape":"DeveloperUserIdentifier"} + }, + "ErrorCode":{ + "type":"string", + "enum":[ + "AccessDenied", + "InternalServerError" + ] + }, + "ExternalServiceException":{ + "type":"structure", + "members":{ + "message":{"shape":"String"} + }, + "exception":true + }, + "GetCredentialsForIdentityInput":{ + "type":"structure", + "required":["IdentityId"], + "members":{ + "IdentityId":{"shape":"IdentityId"}, + "Logins":{"shape":"LoginsMap"}, + "CustomRoleArn":{"shape":"ARNString"} + } + }, + "GetCredentialsForIdentityResponse":{ + "type":"structure", + "members":{ + "IdentityId":{"shape":"IdentityId"}, + "Credentials":{"shape":"Credentials"} + } + }, + "GetIdInput":{ + "type":"structure", + "required":["IdentityPoolId"], + "members":{ + "AccountId":{"shape":"AccountId"}, + "IdentityPoolId":{"shape":"IdentityPoolId"}, + "Logins":{"shape":"LoginsMap"} + } + }, + "GetIdResponse":{ + "type":"structure", + "members":{ + "IdentityId":{"shape":"IdentityId"} + } + }, + "GetIdentityPoolRolesInput":{ + "type":"structure", + "required":["IdentityPoolId"], + "members":{ + "IdentityPoolId":{"shape":"IdentityPoolId"} + } + }, + "GetIdentityPoolRolesResponse":{ + "type":"structure", + "members":{ + "IdentityPoolId":{"shape":"IdentityPoolId"}, + "Roles":{"shape":"RolesMap"} + } + }, + "GetOpenIdTokenForDeveloperIdentityInput":{ + "type":"structure", + "required":[ + "IdentityPoolId", + "Logins" + ], + "members":{ + "IdentityPoolId":{"shape":"IdentityPoolId"}, + "IdentityId":{"shape":"IdentityId"}, + "Logins":{"shape":"LoginsMap"}, + "TokenDuration":{"shape":"TokenDuration"} + } + }, + "GetOpenIdTokenForDeveloperIdentityResponse":{ + "type":"structure", + "members":{ + "IdentityId":{"shape":"IdentityId"}, + "Token":{"shape":"OIDCToken"} + } + }, + "GetOpenIdTokenInput":{ + "type":"structure", + "required":["IdentityId"], + "members":{ + "IdentityId":{"shape":"IdentityId"}, + "Logins":{"shape":"LoginsMap"} + } + }, + "GetOpenIdTokenResponse":{ + "type":"structure", + "members":{ + "IdentityId":{"shape":"IdentityId"}, + "Token":{"shape":"OIDCToken"} + } + }, + "HideDisabled":{"type":"boolean"}, + "IdentitiesList":{ + "type":"list", + "member":{"shape":"IdentityDescription"} + }, + "IdentityDescription":{ + "type":"structure", + "members":{ + "IdentityId":{"shape":"IdentityId"}, + "Logins":{"shape":"LoginsList"}, + "CreationDate":{"shape":"DateType"}, + "LastModifiedDate":{"shape":"DateType"} + } + }, + "IdentityId":{ + "type":"string", + "max":55, + "min":1, + "pattern":"[\\w-]+:[0-9a-f-]+" + }, + "IdentityIdList":{ + "type":"list", + "member":{"shape":"IdentityId"}, + "max":60, + "min":1 + }, + "IdentityPool":{ + "type":"structure", + "required":[ + "IdentityPoolId", + "IdentityPoolName", + "AllowUnauthenticatedIdentities" + ], + "members":{ + "IdentityPoolId":{"shape":"IdentityPoolId"}, + "IdentityPoolName":{"shape":"IdentityPoolName"}, + "AllowUnauthenticatedIdentities":{"shape":"IdentityPoolUnauthenticated"}, + "SupportedLoginProviders":{"shape":"IdentityProviders"}, + "DeveloperProviderName":{"shape":"DeveloperProviderName"}, + "OpenIdConnectProviderARNs":{"shape":"OIDCProviderList"}, + "CognitoIdentityProviders":{"shape":"CognitoIdentityProviderList"}, + "SamlProviderARNs":{"shape":"SAMLProviderList"} + } + }, + "IdentityPoolId":{ + "type":"string", + "max":55, + "min":1, + "pattern":"[\\w-]+:[0-9a-f-]+" + }, + "IdentityPoolName":{ + "type":"string", + "max":128, + "min":1, + "pattern":"[\\w ]+" + }, + "IdentityPoolShortDescription":{ + "type":"structure", + "members":{ + "IdentityPoolId":{"shape":"IdentityPoolId"}, + "IdentityPoolName":{"shape":"IdentityPoolName"} + } + }, + "IdentityPoolUnauthenticated":{"type":"boolean"}, + "IdentityPoolsList":{ + "type":"list", + "member":{"shape":"IdentityPoolShortDescription"} + }, + "IdentityProviderId":{ + "type":"string", + "max":128, + "min":1, + "pattern":"[\\w.;_/-]+" + }, + "IdentityProviderName":{ + "type":"string", + "max":128, + "min":1 + }, + "IdentityProviderToken":{ + "type":"string", + "max":50000, + "min":1 + }, + "IdentityProviders":{ + "type":"map", + "key":{"shape":"IdentityProviderName"}, + "value":{"shape":"IdentityProviderId"}, + "max":10 + }, + "InternalErrorException":{ + "type":"structure", + "members":{ + "message":{"shape":"String"} + }, + "exception":true, + "fault":true + }, + "InvalidIdentityPoolConfigurationException":{ + "type":"structure", + "members":{ + "message":{"shape":"String"} + }, + "exception":true + }, + "InvalidParameterException":{ + "type":"structure", + "members":{ + "message":{"shape":"String"} + }, + "exception":true + }, + "LimitExceededException":{ + "type":"structure", + "members":{ + "message":{"shape":"String"} + }, + "exception":true + }, + "ListIdentitiesInput":{ + "type":"structure", + "required":[ + "IdentityPoolId", + "MaxResults" + ], + "members":{ + "IdentityPoolId":{"shape":"IdentityPoolId"}, + "MaxResults":{"shape":"QueryLimit"}, + "NextToken":{"shape":"PaginationKey"}, + "HideDisabled":{"shape":"HideDisabled"} + } + }, + "ListIdentitiesResponse":{ + "type":"structure", + "members":{ + "IdentityPoolId":{"shape":"IdentityPoolId"}, + "Identities":{"shape":"IdentitiesList"}, + "NextToken":{"shape":"PaginationKey"} + } + }, + "ListIdentityPoolsInput":{ + "type":"structure", + "required":["MaxResults"], + "members":{ + "MaxResults":{"shape":"QueryLimit"}, + "NextToken":{"shape":"PaginationKey"} + } + }, + "ListIdentityPoolsResponse":{ + "type":"structure", + "members":{ + "IdentityPools":{"shape":"IdentityPoolsList"}, + "NextToken":{"shape":"PaginationKey"} + } + }, + "LoginsList":{ + "type":"list", + "member":{"shape":"IdentityProviderName"} + }, + "LoginsMap":{ + "type":"map", + "key":{"shape":"IdentityProviderName"}, + "value":{"shape":"IdentityProviderToken"}, + "max":10 + }, + "LookupDeveloperIdentityInput":{ + "type":"structure", + "required":["IdentityPoolId"], + "members":{ + "IdentityPoolId":{"shape":"IdentityPoolId"}, + "IdentityId":{"shape":"IdentityId"}, + "DeveloperUserIdentifier":{"shape":"DeveloperUserIdentifier"}, + "MaxResults":{"shape":"QueryLimit"}, + "NextToken":{"shape":"PaginationKey"} + } + }, + "LookupDeveloperIdentityResponse":{ + "type":"structure", + "members":{ + "IdentityId":{"shape":"IdentityId"}, + "DeveloperUserIdentifierList":{"shape":"DeveloperUserIdentifierList"}, + "NextToken":{"shape":"PaginationKey"} + } + }, + "MergeDeveloperIdentitiesInput":{ + "type":"structure", + "required":[ + "SourceUserIdentifier", + "DestinationUserIdentifier", + "DeveloperProviderName", + "IdentityPoolId" + ], + "members":{ + "SourceUserIdentifier":{"shape":"DeveloperUserIdentifier"}, + "DestinationUserIdentifier":{"shape":"DeveloperUserIdentifier"}, + "DeveloperProviderName":{"shape":"DeveloperProviderName"}, + "IdentityPoolId":{"shape":"IdentityPoolId"} + } + }, + "MergeDeveloperIdentitiesResponse":{ + "type":"structure", + "members":{ + "IdentityId":{"shape":"IdentityId"} + } + }, + "NotAuthorizedException":{ + "type":"structure", + "members":{ + "message":{"shape":"String"} + }, + "exception":true + }, + "OIDCProviderList":{ + "type":"list", + "member":{"shape":"ARNString"} + }, + "OIDCToken":{"type":"string"}, + "PaginationKey":{ + "type":"string", + "min":1, + "pattern":"[\\S]+" + }, + "QueryLimit":{ + "type":"integer", + "max":60, + "min":1 + }, + "ResourceConflictException":{ + "type":"structure", + "members":{ + "message":{"shape":"String"} + }, + "exception":true + }, + "ResourceNotFoundException":{ + "type":"structure", + "members":{ + "message":{"shape":"String"} + }, + "exception":true + }, + "RoleType":{ + "type":"string", + "pattern":"(un)?authenticated" + }, + "RolesMap":{ + "type":"map", + "key":{"shape":"RoleType"}, + "value":{"shape":"ARNString"}, + "max":2 + }, + "SAMLProviderList":{ + "type":"list", + "member":{"shape":"ARNString"} + }, + "SecretKeyString":{"type":"string"}, + "SessionTokenString":{"type":"string"}, + "SetIdentityPoolRolesInput":{ + "type":"structure", + "required":[ + "IdentityPoolId", + "Roles" + ], + "members":{ + "IdentityPoolId":{"shape":"IdentityPoolId"}, + "Roles":{"shape":"RolesMap"} + } + }, + "String":{"type":"string"}, + "TokenDuration":{ + "type":"long", + "max":86400, + "min":1 + }, + "TooManyRequestsException":{ + "type":"structure", + "members":{ + "message":{"shape":"String"} + }, + "exception":true + }, + "UnlinkDeveloperIdentityInput":{ + "type":"structure", + "required":[ + "IdentityId", + "IdentityPoolId", + "DeveloperProviderName", + "DeveloperUserIdentifier" + ], + "members":{ + "IdentityId":{"shape":"IdentityId"}, + "IdentityPoolId":{"shape":"IdentityPoolId"}, + "DeveloperProviderName":{"shape":"DeveloperProviderName"}, + "DeveloperUserIdentifier":{"shape":"DeveloperUserIdentifier"} + } + }, + "UnlinkIdentityInput":{ + "type":"structure", + "required":[ + "IdentityId", + "Logins", + "LoginsToRemove" + ], + "members":{ + "IdentityId":{"shape":"IdentityId"}, + "Logins":{"shape":"LoginsMap"}, + "LoginsToRemove":{"shape":"LoginsList"} + } + }, + "UnprocessedIdentityId":{ + "type":"structure", + "members":{ + "IdentityId":{"shape":"IdentityId"}, + "ErrorCode":{"shape":"ErrorCode"} + } + }, + "UnprocessedIdentityIdList":{ + "type":"list", + "member":{"shape":"UnprocessedIdentityId"}, + "max":60 + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/cognito-identity/2014-06-30/docs-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/cognito-identity/2014-06-30/docs-2.json new file mode 100644 index 000000000..78414f91e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/cognito-identity/2014-06-30/docs-2.json @@ -0,0 +1,546 @@ +{ + "version": "2.0", + "service": "Amazon Cognito

    Amazon Cognito is a web service that delivers scoped temporary credentials to mobile devices and other untrusted environments. Amazon Cognito uniquely identifies a device and supplies the user with a consistent identity over the lifetime of an application.

    Using Amazon Cognito, you can enable authentication with one or more third-party identity providers (Facebook, Google, or Login with Amazon), and you can also choose to support unauthenticated access from your app. Cognito delivers a unique identifier for each user and acts as an OpenID token provider trusted by AWS Security Token Service (STS) to access temporary, limited-privilege AWS credentials.

    To provide end-user credentials, first make an unsigned call to GetId. If the end user is authenticated with one of the supported identity providers, set the Logins map with the identity provider token. GetId returns a unique identifier for the user.

    Next, make an unsigned call to GetCredentialsForIdentity. This call expects the same Logins map as the GetId call, as well as the IdentityID originally returned by GetId. Assuming your identity pool has been configured via the SetIdentityPoolRoles operation, GetCredentialsForIdentity will return AWS credentials for your use. If your pool has not been configured with SetIdentityPoolRoles, or if you want to follow legacy flow, make an unsigned call to GetOpenIdToken, which returns the OpenID token necessary to call STS and retrieve AWS credentials. This call expects the same Logins map as the GetId call, as well as the IdentityID originally returned by GetId. The token returned by GetOpenIdToken can be passed to the STS operation AssumeRoleWithWebIdentity to retrieve AWS credentials.

    If you want to use Amazon Cognito in an Android, iOS, or Unity application, you will probably want to make API calls via the AWS Mobile SDK. To learn more, see the AWS Mobile SDK Developer Guide.

    ", + "operations": { + "CreateIdentityPool": "

    Creates a new identity pool. The identity pool is a store of user identity information that is specific to your AWS account. The limit on identity pools is 60 per account. The keys for SupportedLoginProviders are as follows:

    • Facebook: graph.facebook.com
    • Google: accounts.google.com
    • Amazon: www.amazon.com
    • Twitter: api.twitter.com
    • Digits: www.digits.com
    You must use AWS Developer credentials to call this API.

    ", + "DeleteIdentities": "

    Deletes identities from an identity pool. You can specify a list of 1-60 identities that you want to delete.

    You must use AWS Developer credentials to call this API.

    ", + "DeleteIdentityPool": "

    Deletes a user pool. Once a pool is deleted, users will not be able to authenticate with the pool.

    You must use AWS Developer credentials to call this API.

    ", + "DescribeIdentity": "

    Returns metadata related to the given identity, including when the identity was created and any associated linked logins.

    You must use AWS Developer credentials to call this API.

    ", + "DescribeIdentityPool": "

    Gets details about a particular identity pool, including the pool name, ID description, creation date, and current number of users.

    You must use AWS Developer credentials to call this API.

    ", + "GetCredentialsForIdentity": "

    Returns credentials for the provided identity ID. Any provided logins will be validated against supported login providers. If the token is for cognito-identity.amazonaws.com, it will be passed through to AWS Security Token Service with the appropriate role for the token.

    This is a public API. You do not need any credentials to call this API.

    ", + "GetId": "

    Generates (or retrieves) a Cognito ID. Supplying multiple logins will create an implicit linked account.

    This is a public API. You do not need any credentials to call this API.

    ", + "GetIdentityPoolRoles": "

    Gets the roles for an identity pool.

    You must use AWS Developer credentials to call this API.

    ", + "GetOpenIdToken": "

    Gets an OpenID token, using a known Cognito ID. This known Cognito ID is returned by GetId. You can optionally add additional logins for the identity. Supplying multiple logins creates an implicit link.

    The OpenId token is valid for 15 minutes.

    This is a public API. You do not need any credentials to call this API.

    ", + "GetOpenIdTokenForDeveloperIdentity": "

    Registers (or retrieves) a Cognito IdentityId and an OpenID Connect token for a user authenticated by your backend authentication process. Supplying multiple logins will create an implicit linked account. You can only specify one developer provider as part of the Logins map, which is linked to the identity pool. The developer provider is the \"domain\" by which Cognito will refer to your users.

    You can use GetOpenIdTokenForDeveloperIdentity to create a new identity and to link new logins (that is, user credentials issued by a public provider or developer provider) to an existing identity. When you want to create a new identity, the IdentityId should be null. When you want to associate a new login with an existing authenticated/unauthenticated identity, you can do so by providing the existing IdentityId. This API will create the identity in the specified IdentityPoolId.

    You must use AWS Developer credentials to call this API.

    ", + "ListIdentities": "

    Lists the identities in a pool.

    You must use AWS Developer credentials to call this API.

    ", + "ListIdentityPools": "

    Lists all of the Cognito identity pools registered for your account.

    You must use AWS Developer credentials to call this API.

    ", + "LookupDeveloperIdentity": "

    Retrieves the IdentityID associated with a DeveloperUserIdentifier or the list of DeveloperUserIdentifiers associated with an IdentityId for an existing identity. Either IdentityID or DeveloperUserIdentifier must not be null. If you supply only one of these values, the other value will be searched in the database and returned as a part of the response. If you supply both, DeveloperUserIdentifier will be matched against IdentityID. If the values are verified against the database, the response returns both values and is the same as the request. Otherwise a ResourceConflictException is thrown.

    You must use AWS Developer credentials to call this API.

    ", + "MergeDeveloperIdentities": "

    Merges two users having different IdentityIds, existing in the same identity pool, and identified by the same developer provider. You can use this action to request that discrete users be merged and identified as a single user in the Cognito environment. Cognito associates the given source user (SourceUserIdentifier) with the IdentityId of the DestinationUserIdentifier. Only developer-authenticated users can be merged. If the users to be merged are associated with the same public provider, but as two different users, an exception will be thrown.

    You must use AWS Developer credentials to call this API.

    ", + "SetIdentityPoolRoles": "

    Sets the roles for an identity pool. These roles are used when making calls to GetCredentialsForIdentity action.

    You must use AWS Developer credentials to call this API.

    ", + "UnlinkDeveloperIdentity": "

    Unlinks a DeveloperUserIdentifier from an existing identity. Unlinked developer users will be considered new identities next time they are seen. If, for a given Cognito identity, you remove all federated identities as well as the developer user identifier, the Cognito identity becomes inaccessible.

    You must use AWS Developer credentials to call this API.

    ", + "UnlinkIdentity": "

    Unlinks a federated identity from an existing account. Unlinked logins will be considered new identities next time they are seen. Removing the last linked login will make this identity inaccessible.

    This is a public API. You do not need any credentials to call this API.

    ", + "UpdateIdentityPool": "

    Updates a user pool.

    You must use AWS Developer credentials to call this API.

    " + }, + "shapes": { + "ARNString": { + "base": null, + "refs": { + "GetCredentialsForIdentityInput$CustomRoleArn": "

    The Amazon Resource Name (ARN) of the role to be assumed when multiple roles were received in the token from the identity provider. For example, a SAML-based identity provider. This parameter is optional for identity providers that do not support role customization.

    ", + "OIDCProviderList$member": null, + "RolesMap$value": null, + "SAMLProviderList$member": null + } + }, + "AccessKeyString": { + "base": null, + "refs": { + "Credentials$AccessKeyId": "

    The Access Key portion of the credentials.

    " + } + }, + "AccountId": { + "base": null, + "refs": { + "GetIdInput$AccountId": "A standard AWS account ID (9+ digits)." + } + }, + "CognitoIdentityProvider": { + "base": "

    A provider representing an Amazon Cognito Identity User Pool and its client ID.

    ", + "refs": { + "CognitoIdentityProviderList$member": null + } + }, + "CognitoIdentityProviderClientId": { + "base": null, + "refs": { + "CognitoIdentityProvider$ClientId": "

    The client ID for the Amazon Cognito Identity User Pool.

    " + } + }, + "CognitoIdentityProviderList": { + "base": null, + "refs": { + "CreateIdentityPoolInput$CognitoIdentityProviders": "

    An array of Amazon Cognito Identity user pools.

    ", + "IdentityPool$CognitoIdentityProviders": "

    A list representing an Amazon Cognito Identity User Pool and its client ID.

    " + } + }, + "CognitoIdentityProviderName": { + "base": null, + "refs": { + "CognitoIdentityProvider$ProviderName": "

    The provider name for an Amazon Cognito Identity User Pool. For example, cognito-idp.us-east-1.amazonaws.com/us-east-1_123456789.

    " + } + }, + "ConcurrentModificationException": { + "base": "

    Thrown if there are parallel requests to modify a resource.

    ", + "refs": { + } + }, + "CreateIdentityPoolInput": { + "base": "

    Input to the CreateIdentityPool action.

    ", + "refs": { + } + }, + "Credentials": { + "base": "

    Credentials for the provided identity ID.

    ", + "refs": { + "GetCredentialsForIdentityResponse$Credentials": "

    Credentials for the provided identity ID.

    " + } + }, + "DateType": { + "base": null, + "refs": { + "Credentials$Expiration": "

    The date at which these credentials will expire.

    ", + "IdentityDescription$CreationDate": "

    Date on which the identity was created.

    ", + "IdentityDescription$LastModifiedDate": "

    Date on which the identity was last modified.

    " + } + }, + "DeleteIdentitiesInput": { + "base": "

    Input to the DeleteIdentities action.

    ", + "refs": { + } + }, + "DeleteIdentitiesResponse": { + "base": "

    Returned in response to a successful DeleteIdentities operation.

    ", + "refs": { + } + }, + "DeleteIdentityPoolInput": { + "base": "

    Input to the DeleteIdentityPool action.

    ", + "refs": { + } + }, + "DescribeIdentityInput": { + "base": "

    Input to the DescribeIdentity action.

    ", + "refs": { + } + }, + "DescribeIdentityPoolInput": { + "base": "Input to the DescribeIdentityPool action.", + "refs": { + } + }, + "DeveloperProviderName": { + "base": null, + "refs": { + "CreateIdentityPoolInput$DeveloperProviderName": "

    The \"domain\" by which Cognito will refer to your users. This name acts as a placeholder that allows your backend and the Cognito service to communicate about the developer provider. For the DeveloperProviderName, you can use letters as well as period (.), underscore (_), and dash (-).

    Once you have set a developer provider name, you cannot change it. Please take care in setting this parameter.

    ", + "IdentityPool$DeveloperProviderName": "

    The \"domain\" by which Cognito will refer to your users.

    ", + "MergeDeveloperIdentitiesInput$DeveloperProviderName": "

    The \"domain\" by which Cognito will refer to your users. This is a (pseudo) domain name that you provide while creating an identity pool. This name acts as a placeholder that allows your backend and the Cognito service to communicate about the developer provider. For the DeveloperProviderName, you can use letters as well as period (.), underscore (_), and dash (-).

    ", + "UnlinkDeveloperIdentityInput$DeveloperProviderName": "

    The \"domain\" by which Cognito will refer to your users.

    " + } + }, + "DeveloperUserAlreadyRegisteredException": { + "base": "

    The provided developer user identifier is already registered with Cognito under a different identity ID.

    ", + "refs": { + } + }, + "DeveloperUserIdentifier": { + "base": null, + "refs": { + "DeveloperUserIdentifierList$member": null, + "LookupDeveloperIdentityInput$DeveloperUserIdentifier": "

    A unique ID used by your backend authentication process to identify a user. Typically, a developer identity provider would issue many developer user identifiers, in keeping with the number of users.

    ", + "MergeDeveloperIdentitiesInput$SourceUserIdentifier": "

    User identifier for the source user. The value should be a DeveloperUserIdentifier.

    ", + "MergeDeveloperIdentitiesInput$DestinationUserIdentifier": "

    User identifier for the destination user. The value should be a DeveloperUserIdentifier.

    ", + "UnlinkDeveloperIdentityInput$DeveloperUserIdentifier": "A unique ID used by your backend authentication process to identify a user." + } + }, + "DeveloperUserIdentifierList": { + "base": null, + "refs": { + "LookupDeveloperIdentityResponse$DeveloperUserIdentifierList": "

    This is the list of developer user identifiers associated with an identity ID. Cognito supports the association of multiple developer user identifiers with an identity ID.

    " + } + }, + "ErrorCode": { + "base": null, + "refs": { + "UnprocessedIdentityId$ErrorCode": "

    The error code indicating the type of error that occurred.

    " + } + }, + "ExternalServiceException": { + "base": "

    An exception thrown when a dependent service such as Facebook or Twitter is not responding

    ", + "refs": { + } + }, + "GetCredentialsForIdentityInput": { + "base": "

    Input to the GetCredentialsForIdentity action.

    ", + "refs": { + } + }, + "GetCredentialsForIdentityResponse": { + "base": "

    Returned in response to a successful GetCredentialsForIdentity operation.

    ", + "refs": { + } + }, + "GetIdInput": { + "base": "Input to the GetId action.", + "refs": { + } + }, + "GetIdResponse": { + "base": "Returned in response to a GetId request.", + "refs": { + } + }, + "GetIdentityPoolRolesInput": { + "base": "

    Input to the GetIdentityPoolRoles action.

    ", + "refs": { + } + }, + "GetIdentityPoolRolesResponse": { + "base": "

    Returned in response to a successful GetIdentityPoolRoles operation.

    ", + "refs": { + } + }, + "GetOpenIdTokenForDeveloperIdentityInput": { + "base": "

    Input to the GetOpenIdTokenForDeveloperIdentity action.

    ", + "refs": { + } + }, + "GetOpenIdTokenForDeveloperIdentityResponse": { + "base": "

    Returned in response to a successful GetOpenIdTokenForDeveloperIdentity request.

    ", + "refs": { + } + }, + "GetOpenIdTokenInput": { + "base": "Input to the GetOpenIdToken action.", + "refs": { + } + }, + "GetOpenIdTokenResponse": { + "base": "Returned in response to a successful GetOpenIdToken request.", + "refs": { + } + }, + "HideDisabled": { + "base": null, + "refs": { + "ListIdentitiesInput$HideDisabled": "

    An optional boolean parameter that allows you to hide disabled identities. If omitted, the ListIdentities API will include disabled identities in the response.

    " + } + }, + "IdentitiesList": { + "base": null, + "refs": { + "ListIdentitiesResponse$Identities": "An object containing a set of identities and associated mappings." + } + }, + "IdentityDescription": { + "base": "A description of the identity.", + "refs": { + "IdentitiesList$member": null + } + }, + "IdentityId": { + "base": null, + "refs": { + "DescribeIdentityInput$IdentityId": "

    A unique identifier in the format REGION:GUID.

    ", + "GetCredentialsForIdentityInput$IdentityId": "

    A unique identifier in the format REGION:GUID.

    ", + "GetCredentialsForIdentityResponse$IdentityId": "

    A unique identifier in the format REGION:GUID.

    ", + "GetIdResponse$IdentityId": "A unique identifier in the format REGION:GUID.", + "GetOpenIdTokenForDeveloperIdentityInput$IdentityId": "

    A unique identifier in the format REGION:GUID.

    ", + "GetOpenIdTokenForDeveloperIdentityResponse$IdentityId": "

    A unique identifier in the format REGION:GUID.

    ", + "GetOpenIdTokenInput$IdentityId": "A unique identifier in the format REGION:GUID.", + "GetOpenIdTokenResponse$IdentityId": "A unique identifier in the format REGION:GUID. Note that the IdentityId returned may not match the one passed on input.", + "IdentityDescription$IdentityId": "A unique identifier in the format REGION:GUID.", + "IdentityIdList$member": null, + "LookupDeveloperIdentityInput$IdentityId": "

    A unique identifier in the format REGION:GUID.

    ", + "LookupDeveloperIdentityResponse$IdentityId": "

    A unique identifier in the format REGION:GUID.

    ", + "MergeDeveloperIdentitiesResponse$IdentityId": "

    A unique identifier in the format REGION:GUID.

    ", + "UnlinkDeveloperIdentityInput$IdentityId": "

    A unique identifier in the format REGION:GUID.

    ", + "UnlinkIdentityInput$IdentityId": "A unique identifier in the format REGION:GUID.", + "UnprocessedIdentityId$IdentityId": "

    A unique identifier in the format REGION:GUID.

    " + } + }, + "IdentityIdList": { + "base": null, + "refs": { + "DeleteIdentitiesInput$IdentityIdsToDelete": "

    A list of 1-60 identities that you want to delete.

    " + } + }, + "IdentityPool": { + "base": "An object representing a Cognito identity pool.", + "refs": { + } + }, + "IdentityPoolId": { + "base": null, + "refs": { + "DeleteIdentityPoolInput$IdentityPoolId": "An identity pool ID in the format REGION:GUID.", + "DescribeIdentityPoolInput$IdentityPoolId": "An identity pool ID in the format REGION:GUID.", + "GetIdInput$IdentityPoolId": "An identity pool ID in the format REGION:GUID.", + "GetIdentityPoolRolesInput$IdentityPoolId": "

    An identity pool ID in the format REGION:GUID.

    ", + "GetIdentityPoolRolesResponse$IdentityPoolId": "

    An identity pool ID in the format REGION:GUID.

    ", + "GetOpenIdTokenForDeveloperIdentityInput$IdentityPoolId": "

    An identity pool ID in the format REGION:GUID.

    ", + "IdentityPool$IdentityPoolId": "An identity pool ID in the format REGION:GUID.", + "IdentityPoolShortDescription$IdentityPoolId": "An identity pool ID in the format REGION:GUID.", + "ListIdentitiesInput$IdentityPoolId": "An identity pool ID in the format REGION:GUID.", + "ListIdentitiesResponse$IdentityPoolId": "An identity pool ID in the format REGION:GUID.", + "LookupDeveloperIdentityInput$IdentityPoolId": "

    An identity pool ID in the format REGION:GUID.

    ", + "MergeDeveloperIdentitiesInput$IdentityPoolId": "

    An identity pool ID in the format REGION:GUID.

    ", + "SetIdentityPoolRolesInput$IdentityPoolId": "

    An identity pool ID in the format REGION:GUID.

    ", + "UnlinkDeveloperIdentityInput$IdentityPoolId": "

    An identity pool ID in the format REGION:GUID.

    " + } + }, + "IdentityPoolName": { + "base": null, + "refs": { + "CreateIdentityPoolInput$IdentityPoolName": "

    A string that you provide.

    ", + "IdentityPool$IdentityPoolName": "

    A string that you provide.

    ", + "IdentityPoolShortDescription$IdentityPoolName": "A string that you provide." + } + }, + "IdentityPoolShortDescription": { + "base": "A description of the identity pool.", + "refs": { + "IdentityPoolsList$member": null + } + }, + "IdentityPoolUnauthenticated": { + "base": null, + "refs": { + "CreateIdentityPoolInput$AllowUnauthenticatedIdentities": "

    TRUE if the identity pool supports unauthenticated logins.

    ", + "IdentityPool$AllowUnauthenticatedIdentities": "TRUE if the identity pool supports unauthenticated logins." + } + }, + "IdentityPoolsList": { + "base": null, + "refs": { + "ListIdentityPoolsResponse$IdentityPools": "The identity pools returned by the ListIdentityPools action." + } + }, + "IdentityProviderId": { + "base": null, + "refs": { + "IdentityProviders$value": null + } + }, + "IdentityProviderName": { + "base": null, + "refs": { + "IdentityProviders$key": null, + "LoginsList$member": null, + "LoginsMap$key": null + } + }, + "IdentityProviderToken": { + "base": null, + "refs": { + "LoginsMap$value": null + } + }, + "IdentityProviders": { + "base": null, + "refs": { + "CreateIdentityPoolInput$SupportedLoginProviders": "

    Optional key:value pairs mapping provider names to provider app IDs.

    ", + "IdentityPool$SupportedLoginProviders": "

    Optional key:value pairs mapping provider names to provider app IDs.

    " + } + }, + "InternalErrorException": { + "base": "Thrown when the service encounters an error during processing the request.", + "refs": { + } + }, + "InvalidIdentityPoolConfigurationException": { + "base": "

    Thrown if the identity pool has no role associated for the given auth type (auth/unauth) or if the AssumeRole fails.

    ", + "refs": { + } + }, + "InvalidParameterException": { + "base": "Thrown for missing or bad input parameter(s).", + "refs": { + } + }, + "LimitExceededException": { + "base": "Thrown when the total number of user pools has exceeded a preset limit.", + "refs": { + } + }, + "ListIdentitiesInput": { + "base": "Input to the ListIdentities action.", + "refs": { + } + }, + "ListIdentitiesResponse": { + "base": "The response to a ListIdentities request.", + "refs": { + } + }, + "ListIdentityPoolsInput": { + "base": "Input to the ListIdentityPools action.", + "refs": { + } + }, + "ListIdentityPoolsResponse": { + "base": "The result of a successful ListIdentityPools action.", + "refs": { + } + }, + "LoginsList": { + "base": null, + "refs": { + "IdentityDescription$Logins": "A set of optional name-value pairs that map provider names to provider tokens.", + "UnlinkIdentityInput$LoginsToRemove": "Provider names to unlink from this identity." + } + }, + "LoginsMap": { + "base": null, + "refs": { + "GetCredentialsForIdentityInput$Logins": "

    A set of optional name-value pairs that map provider names to provider tokens.

    ", + "GetIdInput$Logins": "

    A set of optional name-value pairs that map provider names to provider tokens.

    The available provider names for Logins are as follows:

    • Facebook: graph.facebook.com
    • Google: accounts.google.com
    • Amazon: www.amazon.com
    • Twitter: api.twitter.com
    • Digits: www.digits.com

    ", + "GetOpenIdTokenForDeveloperIdentityInput$Logins": "

    A set of optional name-value pairs that map provider names to provider tokens. Each name-value pair represents a user from a public provider or developer provider. If the user is from a developer provider, the name-value pair will follow the syntax \"developer_provider_name\": \"developer_user_identifier\". The developer provider is the \"domain\" by which Cognito will refer to your users; you provided this domain while creating/updating the identity pool. The developer user identifier is an identifier from your backend that uniquely identifies a user. When you create an identity pool, you can specify the supported logins.

    ", + "GetOpenIdTokenInput$Logins": "A set of optional name-value pairs that map provider names to provider tokens. When using graph.facebook.com and www.amazon.com, supply the access_token returned from the provider's authflow. For accounts.google.com or any other OpenId Connect provider, always include the id_token.", + "UnlinkIdentityInput$Logins": "A set of optional name-value pairs that map provider names to provider tokens." + } + }, + "LookupDeveloperIdentityInput": { + "base": "

    Input to the LookupDeveloperIdentityInput action.

    ", + "refs": { + } + }, + "LookupDeveloperIdentityResponse": { + "base": "

    Returned in response to a successful LookupDeveloperIdentity action.

    ", + "refs": { + } + }, + "MergeDeveloperIdentitiesInput": { + "base": "

    Input to the MergeDeveloperIdentities action.

    ", + "refs": { + } + }, + "MergeDeveloperIdentitiesResponse": { + "base": "

    Returned in response to a successful MergeDeveloperIdentities action.

    ", + "refs": { + } + }, + "NotAuthorizedException": { + "base": "Thrown when a user is not authorized to access the requested resource.", + "refs": { + } + }, + "OIDCProviderList": { + "base": null, + "refs": { + "CreateIdentityPoolInput$OpenIdConnectProviderARNs": "

    A list of OpendID Connect provider ARNs.

    ", + "IdentityPool$OpenIdConnectProviderARNs": "

    A list of OpendID Connect provider ARNs.

    " + } + }, + "OIDCToken": { + "base": null, + "refs": { + "GetOpenIdTokenForDeveloperIdentityResponse$Token": "

    An OpenID token.

    ", + "GetOpenIdTokenResponse$Token": "An OpenID token, valid for 15 minutes." + } + }, + "PaginationKey": { + "base": null, + "refs": { + "ListIdentitiesInput$NextToken": "A pagination token.", + "ListIdentitiesResponse$NextToken": "A pagination token.", + "ListIdentityPoolsInput$NextToken": "A pagination token.", + "ListIdentityPoolsResponse$NextToken": "A pagination token.", + "LookupDeveloperIdentityInput$NextToken": "

    A pagination token. The first call you make will have NextToken set to null. After that the service will return NextToken values as needed. For example, let's say you make a request with MaxResults set to 10, and there are 20 matches in the database. The service will return a pagination token as a part of the response. This token can be used to call the API again and get results starting from the 11th match.

    ", + "LookupDeveloperIdentityResponse$NextToken": "

    A pagination token. The first call you make will have NextToken set to null. After that the service will return NextToken values as needed. For example, let's say you make a request with MaxResults set to 10, and there are 20 matches in the database. The service will return a pagination token as a part of the response. This token can be used to call the API again and get results starting from the 11th match.

    " + } + }, + "QueryLimit": { + "base": null, + "refs": { + "ListIdentitiesInput$MaxResults": "The maximum number of identities to return.", + "ListIdentityPoolsInput$MaxResults": "The maximum number of identities to return.", + "LookupDeveloperIdentityInput$MaxResults": "

    The maximum number of identities to return.

    " + } + }, + "ResourceConflictException": { + "base": "Thrown when a user tries to use a login which is already linked to another account.", + "refs": { + } + }, + "ResourceNotFoundException": { + "base": "Thrown when the requested resource (for example, a dataset or record) does not exist.", + "refs": { + } + }, + "RoleType": { + "base": null, + "refs": { + "RolesMap$key": null + } + }, + "RolesMap": { + "base": null, + "refs": { + "GetIdentityPoolRolesResponse$Roles": "

    The map of roles associated with this pool. Currently only authenticated and unauthenticated roles are supported.

    ", + "SetIdentityPoolRolesInput$Roles": "

    The map of roles associated with this pool. For a given role, the key will be either \"authenticated\" or \"unauthenticated\" and the value will be the Role ARN.

    " + } + }, + "SAMLProviderList": { + "base": null, + "refs": { + "CreateIdentityPoolInput$SamlProviderARNs": "

    An array of Amazon Resource Names (ARNs) of the SAML provider for your identity pool.

    ", + "IdentityPool$SamlProviderARNs": "

    An array of Amazon Resource Names (ARNs) of the SAML provider for your identity pool.

    " + } + }, + "SecretKeyString": { + "base": null, + "refs": { + "Credentials$SecretKey": "

    The Secret Access Key portion of the credentials

    " + } + }, + "SessionTokenString": { + "base": null, + "refs": { + "Credentials$SessionToken": "

    The Session Token portion of the credentials

    " + } + }, + "SetIdentityPoolRolesInput": { + "base": "

    Input to the SetIdentityPoolRoles action.

    ", + "refs": { + } + }, + "String": { + "base": null, + "refs": { + "ConcurrentModificationException$message": "

    The message returned by a ConcurrentModificationException.

    ", + "DeveloperUserAlreadyRegisteredException$message": "

    This developer user identifier is already registered with Cognito.

    ", + "ExternalServiceException$message": "

    The message returned by an ExternalServiceException

    ", + "InternalErrorException$message": "The message returned by an InternalErrorException.", + "InvalidIdentityPoolConfigurationException$message": "

    The message returned for an InvalidIdentityPoolConfigurationException

    ", + "InvalidParameterException$message": "The message returned by an InvalidParameterException.", + "LimitExceededException$message": "The message returned by a LimitExceededException.", + "NotAuthorizedException$message": "The message returned by a NotAuthorizedException", + "ResourceConflictException$message": "The message returned by a ResourceConflictException.", + "ResourceNotFoundException$message": "The message returned by a ResourceNotFoundException.", + "TooManyRequestsException$message": "Message returned by a TooManyRequestsException" + } + }, + "TokenDuration": { + "base": null, + "refs": { + "GetOpenIdTokenForDeveloperIdentityInput$TokenDuration": "

    The expiration time of the token, in seconds. You can specify a custom expiration time for the token so that you can cache it. If you don't provide an expiration time, the token is valid for 15 minutes. You can exchange the token with Amazon STS for temporary AWS credentials, which are valid for a maximum of one hour. The maximum token duration you can set is 24 hours. You should take care in setting the expiration time for a token, as there are significant security implications: an attacker could use a leaked token to access your AWS resources for the token's duration.

    " + } + }, + "TooManyRequestsException": { + "base": "Thrown when a request is throttled.", + "refs": { + } + }, + "UnlinkDeveloperIdentityInput": { + "base": "

    Input to the UnlinkDeveloperIdentity action.

    ", + "refs": { + } + }, + "UnlinkIdentityInput": { + "base": "Input to the UnlinkIdentity action.", + "refs": { + } + }, + "UnprocessedIdentityId": { + "base": "

    An array of UnprocessedIdentityId objects, each of which contains an ErrorCode and IdentityId.

    ", + "refs": { + "UnprocessedIdentityIdList$member": null + } + }, + "UnprocessedIdentityIdList": { + "base": null, + "refs": { + "DeleteIdentitiesResponse$UnprocessedIdentityIds": "

    An array of UnprocessedIdentityId objects, each of which contains an ErrorCode and IdentityId.

    " + } + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/cognito-identity/2014-06-30/examples-1.json b/vendor/github.com/aws/aws-sdk-go/models/apis/cognito-identity/2014-06-30/examples-1.json new file mode 100644 index 000000000..0ea7e3b0b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/cognito-identity/2014-06-30/examples-1.json @@ -0,0 +1,5 @@ +{ + "version": "1.0", + "examples": { + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/cognito-idp/2016-04-18/api-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/cognito-idp/2016-04-18/api-2.json new file mode 100644 index 000000000..554bcbd5f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/cognito-idp/2016-04-18/api-2.json @@ -0,0 +1,1657 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2016-04-18", + "endpointPrefix":"cognito-idp", + "jsonVersion":"1.1", + "protocol":"json", + "serviceFullName":"Amazon Cognito Identity Provider", + "signatureVersion":"v4", + "targetPrefix":"AWSCognitoIdentityProviderService" + }, + "operations":{ + "AddCustomAttributes":{ + "name":"AddCustomAttributes", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AddCustomAttributesRequest"}, + "output":{"shape":"AddCustomAttributesResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidParameterException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"InternalErrorException"} + ] + }, + "AdminConfirmSignUp":{ + "name":"AdminConfirmSignUp", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AdminConfirmSignUpRequest"}, + "output":{"shape":"AdminConfirmSignUpResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidParameterException"}, + {"shape":"UnexpectedLambdaException"}, + {"shape":"UserLambdaValidationException"}, + {"shape":"NotAuthorizedException"}, + {"shape":"TooManyFailedAttemptsException"}, + {"shape":"InvalidLambdaResponseException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"LimitExceededException"}, + {"shape":"InternalErrorException"} + ] + }, + "AdminDeleteUser":{ + "name":"AdminDeleteUser", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AdminDeleteUserRequest"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidParameterException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"InternalErrorException"} + ] + }, + "AdminDeleteUserAttributes":{ + "name":"AdminDeleteUserAttributes", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AdminDeleteUserAttributesRequest"}, + "output":{"shape":"AdminDeleteUserAttributesResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidParameterException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"InternalErrorException"} + ] + }, + "AdminDisableUser":{ + "name":"AdminDisableUser", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AdminDisableUserRequest"}, + "output":{"shape":"AdminDisableUserResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidParameterException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"InternalErrorException"} + ] + }, + "AdminEnableUser":{ + "name":"AdminEnableUser", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AdminEnableUserRequest"}, + "output":{"shape":"AdminEnableUserResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidParameterException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"InternalErrorException"} + ] + }, + "AdminGetUser":{ + "name":"AdminGetUser", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AdminGetUserRequest"}, + "output":{"shape":"AdminGetUserResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidParameterException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"InternalErrorException"} + ] + }, + "AdminResetUserPassword":{ + "name":"AdminResetUserPassword", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AdminResetUserPasswordRequest"}, + "output":{"shape":"AdminResetUserPasswordResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidParameterException"}, + {"shape":"UnexpectedLambdaException"}, + {"shape":"UserLambdaValidationException"}, + {"shape":"NotAuthorizedException"}, + {"shape":"InvalidLambdaResponseException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"LimitExceededException"}, + {"shape":"InternalErrorException"} + ] + }, + "AdminSetUserSettings":{ + "name":"AdminSetUserSettings", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AdminSetUserSettingsRequest"}, + "output":{"shape":"AdminSetUserSettingsResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidParameterException"}, + {"shape":"NotAuthorizedException"} + ] + }, + "AdminUpdateUserAttributes":{ + "name":"AdminUpdateUserAttributes", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AdminUpdateUserAttributesRequest"}, + "output":{"shape":"AdminUpdateUserAttributesResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidParameterException"}, + {"shape":"UnexpectedLambdaException"}, + {"shape":"UserLambdaValidationException"}, + {"shape":"InvalidLambdaResponseException"}, + {"shape":"AliasExistsException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"InternalErrorException"} + ] + }, + "ChangePassword":{ + "name":"ChangePassword", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ChangePasswordRequest"}, + "output":{"shape":"ChangePasswordResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidParameterException"}, + {"shape":"InvalidPasswordException"}, + {"shape":"NotAuthorizedException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"InternalErrorException"} + ], + "authtype":"none" + }, + "ConfirmForgotPassword":{ + "name":"ConfirmForgotPassword", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ConfirmForgotPasswordRequest"}, + "output":{"shape":"ConfirmForgotPasswordResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"UnexpectedLambdaException"}, + {"shape":"UserLambdaValidationException"}, + {"shape":"InvalidParameterException"}, + {"shape":"InvalidPasswordException"}, + {"shape":"NotAuthorizedException"}, + {"shape":"CodeMismatchException"}, + {"shape":"ExpiredCodeException"}, + {"shape":"TooManyFailedAttemptsException"}, + {"shape":"InvalidLambdaResponseException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"LimitExceededException"}, + {"shape":"InternalErrorException"} + ], + "authtype":"none" + }, + "ConfirmSignUp":{ + "name":"ConfirmSignUp", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ConfirmSignUpRequest"}, + "output":{"shape":"ConfirmSignUpResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidParameterException"}, + {"shape":"UnexpectedLambdaException"}, + {"shape":"UserLambdaValidationException"}, + {"shape":"NotAuthorizedException"}, + {"shape":"TooManyFailedAttemptsException"}, + {"shape":"CodeMismatchException"}, + {"shape":"ExpiredCodeException"}, + {"shape":"InvalidLambdaResponseException"}, + {"shape":"AliasExistsException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"LimitExceededException"}, + {"shape":"InternalErrorException"} + ], + "authtype":"none" + }, + "CreateUserPool":{ + "name":"CreateUserPool", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateUserPoolRequest"}, + "output":{"shape":"CreateUserPoolResponse"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"InternalErrorException"}, + {"shape":"LimitExceededException"} + ] + }, + "CreateUserPoolClient":{ + "name":"CreateUserPoolClient", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateUserPoolClientRequest"}, + "output":{"shape":"CreateUserPoolClientResponse"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"LimitExceededException"}, + {"shape":"InternalErrorException"} + ] + }, + "DeleteUser":{ + "name":"DeleteUser", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteUserRequest"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidParameterException"}, + {"shape":"NotAuthorizedException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"InternalErrorException"} + ], + "authtype":"none" + }, + "DeleteUserAttributes":{ + "name":"DeleteUserAttributes", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteUserAttributesRequest"}, + "output":{"shape":"DeleteUserAttributesResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidParameterException"}, + {"shape":"NotAuthorizedException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"InternalErrorException"} + ], + "authtype":"none" + }, + "DeleteUserPool":{ + "name":"DeleteUserPool", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteUserPoolRequest"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidParameterException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"InternalErrorException"} + ] + }, + "DeleteUserPoolClient":{ + "name":"DeleteUserPoolClient", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteUserPoolClientRequest"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidParameterException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"InternalErrorException"} + ] + }, + "DescribeUserPool":{ + "name":"DescribeUserPool", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeUserPoolRequest"}, + "output":{"shape":"DescribeUserPoolResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidParameterException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"InternalErrorException"} + ] + }, + "DescribeUserPoolClient":{ + "name":"DescribeUserPoolClient", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeUserPoolClientRequest"}, + "output":{"shape":"DescribeUserPoolClientResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidParameterException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"InternalErrorException"} + ] + }, + "ForgotPassword":{ + "name":"ForgotPassword", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ForgotPasswordRequest"}, + "output":{"shape":"ForgotPasswordResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidParameterException"}, + {"shape":"UnexpectedLambdaException"}, + {"shape":"UserLambdaValidationException"}, + {"shape":"NotAuthorizedException"}, + {"shape":"InvalidLambdaResponseException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"LimitExceededException"}, + {"shape":"InternalErrorException"} + ], + "authtype":"none" + }, + "GetUser":{ + "name":"GetUser", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetUserRequest"}, + "output":{"shape":"GetUserResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidParameterException"}, + {"shape":"NotAuthorizedException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"InternalErrorException"} + ], + "authtype":"none" + }, + "GetUserAttributeVerificationCode":{ + "name":"GetUserAttributeVerificationCode", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetUserAttributeVerificationCodeRequest"}, + "output":{"shape":"GetUserAttributeVerificationCodeResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidParameterException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"NotAuthorizedException"}, + {"shape":"UnexpectedLambdaException"}, + {"shape":"UserLambdaValidationException"}, + {"shape":"InvalidLambdaResponseException"}, + {"shape":"InternalErrorException"} + ], + "authtype":"none" + }, + "ListUserPoolClients":{ + "name":"ListUserPoolClients", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListUserPoolClientsRequest"}, + "output":{"shape":"ListUserPoolClientsResponse"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"InternalErrorException"} + ] + }, + "ListUserPools":{ + "name":"ListUserPools", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListUserPoolsRequest"}, + "output":{"shape":"ListUserPoolsResponse"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"InternalErrorException"} + ] + }, + "ListUsers":{ + "name":"ListUsers", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListUsersRequest"}, + "output":{"shape":"ListUsersResponse"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"InternalErrorException"} + ] + }, + "ResendConfirmationCode":{ + "name":"ResendConfirmationCode", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ResendConfirmationCodeRequest"}, + "output":{"shape":"ResendConfirmationCodeResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidParameterException"}, + {"shape":"UnexpectedLambdaException"}, + {"shape":"UserLambdaValidationException"}, + {"shape":"NotAuthorizedException"}, + {"shape":"InvalidLambdaResponseException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"LimitExceededException"}, + {"shape":"InternalErrorException"} + ], + "authtype":"none" + }, + "SetUserSettings":{ + "name":"SetUserSettings", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"SetUserSettingsRequest"}, + "output":{"shape":"SetUserSettingsResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidParameterException"}, + {"shape":"NotAuthorizedException"} + ], + "authtype":"none" + }, + "SignUp":{ + "name":"SignUp", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"SignUpRequest"}, + "output":{"shape":"SignUpResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidParameterException"}, + {"shape":"UnexpectedLambdaException"}, + {"shape":"UserLambdaValidationException"}, + {"shape":"NotAuthorizedException"}, + {"shape":"InvalidPasswordException"}, + {"shape":"InvalidLambdaResponseException"}, + {"shape":"UsernameExistsException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"InternalErrorException"} + ], + "authtype":"none" + }, + "UpdateUserAttributes":{ + "name":"UpdateUserAttributes", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateUserAttributesRequest"}, + "output":{"shape":"UpdateUserAttributesResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidParameterException"}, + {"shape":"CodeMismatchException"}, + {"shape":"ExpiredCodeException"}, + {"shape":"NotAuthorizedException"}, + {"shape":"UnexpectedLambdaException"}, + {"shape":"UserLambdaValidationException"}, + {"shape":"InvalidLambdaResponseException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"AliasExistsException"}, + {"shape":"InternalErrorException"} + ], + "authtype":"none" + }, + "UpdateUserPool":{ + "name":"UpdateUserPool", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateUserPoolRequest"}, + "output":{"shape":"UpdateUserPoolResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ConcurrentModificationException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"InternalErrorException"} + ] + }, + "UpdateUserPoolClient":{ + "name":"UpdateUserPoolClient", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateUserPoolClientRequest"}, + "output":{"shape":"UpdateUserPoolClientResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidParameterException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"InternalErrorException"} + ] + }, + "VerifyUserAttribute":{ + "name":"VerifyUserAttribute", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"VerifyUserAttributeRequest"}, + "output":{"shape":"VerifyUserAttributeResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidParameterException"}, + {"shape":"CodeMismatchException"}, + {"shape":"ExpiredCodeException"}, + {"shape":"NotAuthorizedException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"InternalErrorException"} + ], + "authtype":"none" + } + }, + "shapes":{ + "AddCustomAttributesRequest":{ + "type":"structure", + "required":[ + "UserPoolId", + "CustomAttributes" + ], + "members":{ + "UserPoolId":{"shape":"UserPoolIdType"}, + "CustomAttributes":{"shape":"CustomAttributesListType"} + } + }, + "AddCustomAttributesResponse":{ + "type":"structure", + "members":{ + } + }, + "AdminConfirmSignUpRequest":{ + "type":"structure", + "required":[ + "UserPoolId", + "Username" + ], + "members":{ + "UserPoolId":{"shape":"UserPoolIdType"}, + "Username":{"shape":"UsernameType"} + } + }, + "AdminConfirmSignUpResponse":{ + "type":"structure", + "members":{ + } + }, + "AdminDeleteUserAttributesRequest":{ + "type":"structure", + "required":[ + "UserPoolId", + "Username", + "UserAttributeNames" + ], + "members":{ + "UserPoolId":{"shape":"UserPoolIdType"}, + "Username":{"shape":"UsernameType"}, + "UserAttributeNames":{"shape":"AttributeNameListType"} + } + }, + "AdminDeleteUserAttributesResponse":{ + "type":"structure", + "members":{ + } + }, + "AdminDeleteUserRequest":{ + "type":"structure", + "required":[ + "UserPoolId", + "Username" + ], + "members":{ + "UserPoolId":{"shape":"UserPoolIdType"}, + "Username":{"shape":"UsernameType"} + } + }, + "AdminDisableUserRequest":{ + "type":"structure", + "required":[ + "UserPoolId", + "Username" + ], + "members":{ + "UserPoolId":{"shape":"UserPoolIdType"}, + "Username":{"shape":"UsernameType"} + } + }, + "AdminDisableUserResponse":{ + "type":"structure", + "members":{ + } + }, + "AdminEnableUserRequest":{ + "type":"structure", + "required":[ + "UserPoolId", + "Username" + ], + "members":{ + "UserPoolId":{"shape":"UserPoolIdType"}, + "Username":{"shape":"UsernameType"} + } + }, + "AdminEnableUserResponse":{ + "type":"structure", + "members":{ + } + }, + "AdminGetUserRequest":{ + "type":"structure", + "required":[ + "UserPoolId", + "Username" + ], + "members":{ + "UserPoolId":{"shape":"UserPoolIdType"}, + "Username":{"shape":"UsernameType"} + } + }, + "AdminGetUserResponse":{ + "type":"structure", + "required":["Username"], + "members":{ + "Username":{"shape":"UsernameType"}, + "UserAttributes":{"shape":"AttributeListType"}, + "UserCreateDate":{"shape":"DateType"}, + "UserLastModifiedDate":{"shape":"DateType"}, + "Enabled":{"shape":"BooleanType"}, + "UserStatus":{"shape":"UserStatusType"}, + "MFAOptions":{"shape":"MFAOptionListType"} + } + }, + "AdminResetUserPasswordRequest":{ + "type":"structure", + "required":[ + "UserPoolId", + "Username" + ], + "members":{ + "UserPoolId":{"shape":"UserPoolIdType"}, + "Username":{"shape":"UsernameType"} + } + }, + "AdminResetUserPasswordResponse":{ + "type":"structure", + "members":{ + } + }, + "AdminSetUserSettingsRequest":{ + "type":"structure", + "required":[ + "UserPoolId", + "Username", + "MFAOptions" + ], + "members":{ + "UserPoolId":{"shape":"UserPoolIdType"}, + "Username":{"shape":"UsernameType"}, + "MFAOptions":{"shape":"MFAOptionListType"} + } + }, + "AdminSetUserSettingsResponse":{ + "type":"structure", + "members":{ + } + }, + "AdminUpdateUserAttributesRequest":{ + "type":"structure", + "required":[ + "UserPoolId", + "Username", + "UserAttributes" + ], + "members":{ + "UserPoolId":{"shape":"UserPoolIdType"}, + "Username":{"shape":"UsernameType"}, + "UserAttributes":{"shape":"AttributeListType"} + } + }, + "AdminUpdateUserAttributesResponse":{ + "type":"structure", + "members":{ + } + }, + "AliasAttributeType":{ + "type":"string", + "enum":[ + "phone_number", + "email", + "preferred_username" + ] + }, + "AliasAttributesListType":{ + "type":"list", + "member":{"shape":"AliasAttributeType"} + }, + "AliasExistsException":{ + "type":"structure", + "members":{ + "message":{"shape":"MessageType"} + }, + "exception":true + }, + "ArnType":{ + "type":"string", + "max":2048, + "min":20 + }, + "AttributeDataType":{ + "type":"string", + "enum":[ + "String", + "Number", + "DateTime", + "Boolean" + ] + }, + "AttributeListType":{ + "type":"list", + "member":{"shape":"AttributeType"} + }, + "AttributeNameListType":{ + "type":"list", + "member":{"shape":"AttributeNameType"} + }, + "AttributeNameType":{ + "type":"string", + "max":32, + "min":1, + "pattern":"[\\p{L}\\p{M}\\p{S}\\p{N}\\p{P}]+" + }, + "AttributeType":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{"shape":"AttributeNameType"}, + "Value":{"shape":"AttributeValueType"} + } + }, + "AttributeValueType":{ + "type":"string", + "max":256, + "sensitive":true + }, + "BooleanType":{"type":"boolean"}, + "ChangePasswordRequest":{ + "type":"structure", + "required":[ + "PreviousPassword", + "ProposedPassword" + ], + "members":{ + "PreviousPassword":{"shape":"PasswordType"}, + "ProposedPassword":{"shape":"PasswordType"}, + "AccessToken":{"shape":"TokenModelType"} + } + }, + "ChangePasswordResponse":{ + "type":"structure", + "members":{ + } + }, + "ClientIdType":{ + "type":"string", + "max":128, + "min":1, + "pattern":"[\\w+]+", + "sensitive":true + }, + "ClientNameType":{ + "type":"string", + "max":128, + "min":1, + "pattern":"[\\w\\s+=,.@-]+" + }, + "ClientSecretType":{ + "type":"string", + "max":64, + "min":1, + "pattern":"[\\w+]+", + "sensitive":true + }, + "CodeDeliveryDetailsListType":{ + "type":"list", + "member":{"shape":"CodeDeliveryDetailsType"} + }, + "CodeDeliveryDetailsType":{ + "type":"structure", + "members":{ + "Destination":{"shape":"StringType"}, + "DeliveryMedium":{"shape":"DeliveryMediumType"}, + "AttributeName":{"shape":"AttributeNameType"} + } + }, + "CodeMismatchException":{ + "type":"structure", + "members":{ + "message":{"shape":"MessageType"} + }, + "exception":true + }, + "ConcurrentModificationException":{ + "type":"structure", + "members":{ + "message":{"shape":"MessageType"} + }, + "exception":true + }, + "ConfirmForgotPasswordRequest":{ + "type":"structure", + "required":[ + "ClientId", + "Username", + "ConfirmationCode", + "Password" + ], + "members":{ + "ClientId":{"shape":"ClientIdType"}, + "SecretHash":{"shape":"SecretHashType"}, + "Username":{"shape":"UsernameType"}, + "ConfirmationCode":{"shape":"ConfirmationCodeType"}, + "Password":{"shape":"PasswordType"} + } + }, + "ConfirmForgotPasswordResponse":{ + "type":"structure", + "members":{ + } + }, + "ConfirmSignUpRequest":{ + "type":"structure", + "required":[ + "ClientId", + "Username", + "ConfirmationCode" + ], + "members":{ + "ClientId":{"shape":"ClientIdType"}, + "SecretHash":{"shape":"SecretHashType"}, + "Username":{"shape":"UsernameType"}, + "ConfirmationCode":{"shape":"ConfirmationCodeType"}, + "ForceAliasCreation":{"shape":"ForceAliasCreation"} + } + }, + "ConfirmSignUpResponse":{ + "type":"structure", + "members":{ + } + }, + "ConfirmationCodeType":{ + "type":"string", + "max":2048, + "min":1, + "pattern":"[\\S]+" + }, + "CreateUserPoolClientRequest":{ + "type":"structure", + "required":[ + "UserPoolId", + "ClientName" + ], + "members":{ + "UserPoolId":{"shape":"UserPoolIdType"}, + "ClientName":{"shape":"ClientNameType"}, + "GenerateSecret":{"shape":"GenerateSecret"} + } + }, + "CreateUserPoolClientResponse":{ + "type":"structure", + "members":{ + "UserPoolClient":{"shape":"UserPoolClientType"} + } + }, + "CreateUserPoolRequest":{ + "type":"structure", + "required":["PoolName"], + "members":{ + "PoolName":{"shape":"UserPoolNameType"}, + "Policies":{"shape":"UserPoolPolicyType"}, + "LambdaConfig":{"shape":"LambdaConfigType"}, + "AutoVerifiedAttributes":{"shape":"VerifiedAttributesListType"}, + "AliasAttributes":{"shape":"AliasAttributesListType"}, + "SmsVerificationMessage":{"shape":"SmsVerificationMessageType"}, + "EmailVerificationMessage":{"shape":"EmailVerificationMessageType"}, + "EmailVerificationSubject":{"shape":"EmailVerificationSubjectType"}, + "SmsAuthenticationMessage":{"shape":"SmsVerificationMessageType"}, + "MfaConfiguration":{"shape":"UserPoolMfaType"} + } + }, + "CreateUserPoolResponse":{ + "type":"structure", + "members":{ + "UserPool":{"shape":"UserPoolType"} + } + }, + "CustomAttributeNameType":{ + "type":"string", + "max":20, + "min":1, + "pattern":"[\\p{L}\\p{M}\\p{S}\\p{N}\\p{P}]+" + }, + "CustomAttributesListType":{ + "type":"list", + "member":{"shape":"SchemaAttributeType"}, + "max":25, + "min":1 + }, + "DateType":{"type":"timestamp"}, + "DeleteUserAttributesRequest":{ + "type":"structure", + "required":["UserAttributeNames"], + "members":{ + "UserAttributeNames":{"shape":"AttributeNameListType"}, + "AccessToken":{"shape":"TokenModelType"} + } + }, + "DeleteUserAttributesResponse":{ + "type":"structure", + "members":{ + } + }, + "DeleteUserPoolClientRequest":{ + "type":"structure", + "required":[ + "UserPoolId", + "ClientId" + ], + "members":{ + "UserPoolId":{"shape":"UserPoolIdType"}, + "ClientId":{"shape":"ClientIdType"} + } + }, + "DeleteUserPoolRequest":{ + "type":"structure", + "required":["UserPoolId"], + "members":{ + "UserPoolId":{"shape":"UserPoolIdType"} + } + }, + "DeleteUserRequest":{ + "type":"structure", + "members":{ + "AccessToken":{"shape":"TokenModelType"} + } + }, + "DeliveryMediumType":{ + "type":"string", + "enum":[ + "SMS", + "EMAIL" + ] + }, + "DescribeUserPoolClientRequest":{ + "type":"structure", + "required":[ + "UserPoolId", + "ClientId" + ], + "members":{ + "UserPoolId":{"shape":"UserPoolIdType"}, + "ClientId":{"shape":"ClientIdType"} + } + }, + "DescribeUserPoolClientResponse":{ + "type":"structure", + "members":{ + "UserPoolClient":{"shape":"UserPoolClientType"} + } + }, + "DescribeUserPoolRequest":{ + "type":"structure", + "required":["UserPoolId"], + "members":{ + "UserPoolId":{"shape":"UserPoolIdType"} + } + }, + "DescribeUserPoolResponse":{ + "type":"structure", + "members":{ + "UserPool":{"shape":"UserPoolType"} + } + }, + "EmailVerificationMessageType":{ + "type":"string", + "max":2048, + "min":6, + "pattern":"[\\p{L}\\p{M}\\p{S}\\p{N}\\p{P}\\s*]*\\{####\\}[\\p{L}\\p{M}\\p{S}\\p{N}\\p{P}\\s*]*" + }, + "EmailVerificationSubjectType":{ + "type":"string", + "max":140, + "min":1, + "pattern":"[\\p{L}\\p{M}\\p{S}\\p{N}\\p{P}\\s]+" + }, + "ExpiredCodeException":{ + "type":"structure", + "members":{ + "message":{"shape":"MessageType"} + }, + "exception":true + }, + "ForceAliasCreation":{"type":"boolean"}, + "ForgotPasswordRequest":{ + "type":"structure", + "required":[ + "ClientId", + "Username" + ], + "members":{ + "ClientId":{"shape":"ClientIdType"}, + "SecretHash":{"shape":"SecretHashType"}, + "Username":{"shape":"UsernameType"} + } + }, + "ForgotPasswordResponse":{ + "type":"structure", + "members":{ + "CodeDeliveryDetails":{"shape":"CodeDeliveryDetailsType"} + } + }, + "GenerateSecret":{"type":"boolean"}, + "GetUserAttributeVerificationCodeRequest":{ + "type":"structure", + "required":["AttributeName"], + "members":{ + "AccessToken":{"shape":"TokenModelType"}, + "AttributeName":{"shape":"AttributeNameType"} + } + }, + "GetUserAttributeVerificationCodeResponse":{ + "type":"structure", + "members":{ + "CodeDeliveryDetails":{"shape":"CodeDeliveryDetailsType"} + } + }, + "GetUserRequest":{ + "type":"structure", + "members":{ + "AccessToken":{"shape":"TokenModelType"} + } + }, + "GetUserResponse":{ + "type":"structure", + "required":[ + "Username", + "UserAttributes" + ], + "members":{ + "Username":{"shape":"UsernameType"}, + "UserAttributes":{"shape":"AttributeListType"}, + "MFAOptions":{"shape":"MFAOptionListType"} + } + }, + "IntegerType":{"type":"integer"}, + "InternalErrorException":{ + "type":"structure", + "members":{ + "message":{"shape":"MessageType"} + }, + "exception":true, + "fault":true + }, + "InvalidLambdaResponseException":{ + "type":"structure", + "members":{ + "message":{"shape":"MessageType"} + }, + "exception":true + }, + "InvalidParameterException":{ + "type":"structure", + "members":{ + "message":{"shape":"MessageType"} + }, + "exception":true + }, + "InvalidPasswordException":{ + "type":"structure", + "members":{ + "message":{"shape":"MessageType"} + }, + "exception":true + }, + "LambdaConfigType":{ + "type":"structure", + "members":{ + "PreSignUp":{"shape":"ArnType"}, + "CustomMessage":{"shape":"ArnType"}, + "PostConfirmation":{"shape":"ArnType"}, + "PreAuthentication":{"shape":"ArnType"}, + "PostAuthentication":{"shape":"ArnType"} + } + }, + "LimitExceededException":{ + "type":"structure", + "members":{ + "message":{"shape":"MessageType"} + }, + "exception":true + }, + "ListUserPoolClientsRequest":{ + "type":"structure", + "required":["UserPoolId"], + "members":{ + "UserPoolId":{"shape":"UserPoolIdType"}, + "MaxResults":{"shape":"QueryLimit"}, + "NextToken":{"shape":"PaginationKey"} + } + }, + "ListUserPoolClientsResponse":{ + "type":"structure", + "members":{ + "UserPoolClients":{"shape":"UserPoolClientListType"}, + "NextToken":{"shape":"PaginationKey"} + } + }, + "ListUserPoolsRequest":{ + "type":"structure", + "required":["MaxResults"], + "members":{ + "NextToken":{"shape":"PaginationKeyType"}, + "MaxResults":{"shape":"QueryLimitType"} + } + }, + "ListUserPoolsResponse":{ + "type":"structure", + "members":{ + "UserPools":{"shape":"UserPoolListType"}, + "NextToken":{"shape":"PaginationKeyType"} + } + }, + "ListUsersRequest":{ + "type":"structure", + "required":["UserPoolId"], + "members":{ + "UserPoolId":{"shape":"UserPoolIdType"}, + "AttributesToGet":{"shape":"SearchedAttributeNamesListType"}, + "Limit":{"shape":"QueryLimitType"}, + "PaginationToken":{"shape":"SearchPaginationTokenType"}, + "UserStatus":{"shape":"UserStatusType"} + } + }, + "ListUsersResponse":{ + "type":"structure", + "members":{ + "Users":{"shape":"UsersListType"}, + "PaginationToken":{"shape":"SearchPaginationTokenType"} + } + }, + "MFAOptionListType":{ + "type":"list", + "member":{"shape":"MFAOptionType"} + }, + "MFAOptionType":{ + "type":"structure", + "members":{ + "DeliveryMedium":{"shape":"DeliveryMediumType"}, + "AttributeName":{"shape":"AttributeNameType"} + } + }, + "MessageType":{"type":"string"}, + "NotAuthorizedException":{ + "type":"structure", + "members":{ + "message":{"shape":"MessageType"} + }, + "exception":true + }, + "NumberAttributeConstraintsType":{ + "type":"structure", + "members":{ + "MinValue":{"shape":"StringType"}, + "MaxValue":{"shape":"StringType"} + } + }, + "PaginationKey":{ + "type":"string", + "min":1 + }, + "PaginationKeyType":{ + "type":"string", + "min":1, + "pattern":"[\\S]+" + }, + "PasswordPolicyMinLengthType":{ + "type":"integer", + "max":99, + "min":6 + }, + "PasswordPolicyType":{ + "type":"structure", + "members":{ + "MinimumLength":{"shape":"PasswordPolicyMinLengthType"}, + "RequireUppercase":{"shape":"BooleanType"}, + "RequireLowercase":{"shape":"BooleanType"}, + "RequireNumbers":{"shape":"BooleanType"}, + "RequireSymbols":{"shape":"BooleanType"} + } + }, + "PasswordType":{ + "type":"string", + "max":256, + "min":6, + "pattern":"[\\S]+", + "sensitive":true + }, + "QueryLimit":{ + "type":"integer", + "max":60, + "min":1 + }, + "QueryLimitType":{ + "type":"integer", + "max":60, + "min":1 + }, + "ResendConfirmationCodeRequest":{ + "type":"structure", + "required":[ + "ClientId", + "Username" + ], + "members":{ + "ClientId":{"shape":"ClientIdType"}, + "SecretHash":{"shape":"SecretHashType"}, + "Username":{"shape":"UsernameType"} + } + }, + "ResendConfirmationCodeResponse":{ + "type":"structure", + "members":{ + "CodeDeliveryDetails":{"shape":"CodeDeliveryDetailsType"} + } + }, + "ResourceNotFoundException":{ + "type":"structure", + "members":{ + "message":{"shape":"MessageType"} + }, + "exception":true + }, + "SchemaAttributeType":{ + "type":"structure", + "members":{ + "Name":{"shape":"CustomAttributeNameType"}, + "AttributeDataType":{"shape":"AttributeDataType"}, + "DeveloperOnlyAttribute":{"shape":"BooleanType"}, + "Mutable":{"shape":"BooleanType"}, + "Required":{"shape":"BooleanType"}, + "NumberAttributeConstraints":{"shape":"NumberAttributeConstraintsType"}, + "StringAttributeConstraints":{"shape":"StringAttributeConstraintsType"} + } + }, + "SchemaAttributesListType":{ + "type":"list", + "member":{"shape":"SchemaAttributeType"}, + "max":50, + "min":1 + }, + "SearchPaginationTokenType":{ + "type":"string", + "min":1 + }, + "SearchedAttributeNamesListType":{ + "type":"list", + "member":{"shape":"AttributeNameType"} + }, + "SecretHashType":{ + "type":"string", + "max":128, + "min":1, + "pattern":"[\\w+=/]+", + "sensitive":true + }, + "SetUserSettingsRequest":{ + "type":"structure", + "required":[ + "AccessToken", + "MFAOptions" + ], + "members":{ + "AccessToken":{"shape":"TokenModelType"}, + "MFAOptions":{"shape":"MFAOptionListType"} + } + }, + "SetUserSettingsResponse":{ + "type":"structure", + "members":{ + } + }, + "SignUpRequest":{ + "type":"structure", + "required":[ + "ClientId", + "Username", + "Password" + ], + "members":{ + "ClientId":{"shape":"ClientIdType"}, + "SecretHash":{"shape":"SecretHashType"}, + "Username":{"shape":"UsernameType"}, + "Password":{"shape":"PasswordType"}, + "UserAttributes":{"shape":"AttributeListType"}, + "ValidationData":{"shape":"AttributeListType"} + } + }, + "SignUpResponse":{ + "type":"structure", + "members":{ + "UserConfirmed":{"shape":"BooleanType"}, + "CodeDeliveryDetails":{"shape":"CodeDeliveryDetailsType"} + } + }, + "SmsVerificationMessageType":{ + "type":"string", + "max":140, + "min":6, + "pattern":".*\\{####\\}.*" + }, + "StatusType":{ + "type":"string", + "enum":[ + "Enabled", + "Disabled" + ] + }, + "StringAttributeConstraintsType":{ + "type":"structure", + "members":{ + "MinLength":{"shape":"StringType"}, + "MaxLength":{"shape":"StringType"} + } + }, + "StringType":{"type":"string"}, + "TokenModelType":{ + "type":"string", + "sensitive":true + }, + "TooManyFailedAttemptsException":{ + "type":"structure", + "members":{ + "message":{"shape":"MessageType"} + }, + "exception":true + }, + "TooManyRequestsException":{ + "type":"structure", + "members":{ + "message":{"shape":"MessageType"} + }, + "exception":true + }, + "UnexpectedLambdaException":{ + "type":"structure", + "members":{ + "message":{"shape":"MessageType"} + }, + "exception":true + }, + "UpdateUserAttributesRequest":{ + "type":"structure", + "required":["UserAttributes"], + "members":{ + "UserAttributes":{"shape":"AttributeListType"}, + "AccessToken":{"shape":"TokenModelType"} + } + }, + "UpdateUserAttributesResponse":{ + "type":"structure", + "members":{ + "CodeDeliveryDetailsList":{"shape":"CodeDeliveryDetailsListType"} + } + }, + "UpdateUserPoolClientRequest":{ + "type":"structure", + "required":[ + "UserPoolId", + "ClientId" + ], + "members":{ + "UserPoolId":{"shape":"UserPoolIdType"}, + "ClientId":{"shape":"ClientIdType"}, + "ClientName":{"shape":"ClientNameType"} + } + }, + "UpdateUserPoolClientResponse":{ + "type":"structure", + "members":{ + "UserPoolClient":{"shape":"UserPoolClientType"} + } + }, + "UpdateUserPoolRequest":{ + "type":"structure", + "required":["UserPoolId"], + "members":{ + "UserPoolId":{"shape":"UserPoolIdType"}, + "Policies":{"shape":"UserPoolPolicyType"}, + "LambdaConfig":{"shape":"LambdaConfigType"}, + "AutoVerifiedAttributes":{"shape":"VerifiedAttributesListType"}, + "SmsVerificationMessage":{"shape":"SmsVerificationMessageType"}, + "EmailVerificationMessage":{"shape":"EmailVerificationMessageType"}, + "EmailVerificationSubject":{"shape":"EmailVerificationSubjectType"}, + "SmsAuthenticationMessage":{"shape":"SmsVerificationMessageType"}, + "MfaConfiguration":{"shape":"UserPoolMfaType"} + } + }, + "UpdateUserPoolResponse":{ + "type":"structure", + "members":{ + } + }, + "UserLambdaValidationException":{ + "type":"structure", + "members":{ + "message":{"shape":"MessageType"} + }, + "exception":true + }, + "UserPoolClientDescription":{ + "type":"structure", + "members":{ + "ClientId":{"shape":"ClientIdType"}, + "UserPoolId":{"shape":"UserPoolIdType"}, + "ClientName":{"shape":"ClientNameType"} + } + }, + "UserPoolClientListType":{ + "type":"list", + "member":{"shape":"UserPoolClientDescription"} + }, + "UserPoolClientType":{ + "type":"structure", + "members":{ + "UserPoolId":{"shape":"UserPoolIdType"}, + "ClientName":{"shape":"ClientNameType"}, + "ClientId":{"shape":"ClientIdType"}, + "ClientSecret":{"shape":"ClientSecretType"}, + "LastModifiedDate":{"shape":"DateType"}, + "CreationDate":{"shape":"DateType"} + } + }, + "UserPoolDescriptionType":{ + "type":"structure", + "members":{ + "Id":{"shape":"UserPoolIdType"}, + "Name":{"shape":"UserPoolNameType"}, + "LambdaConfig":{"shape":"LambdaConfigType"}, + "Status":{"shape":"StatusType"}, + "LastModifiedDate":{"shape":"DateType"}, + "CreationDate":{"shape":"DateType"} + } + }, + "UserPoolIdType":{ + "type":"string", + "max":55, + "min":1, + "pattern":"[\\w-]+.[0-9a-zA-Z-]+" + }, + "UserPoolListType":{ + "type":"list", + "member":{"shape":"UserPoolDescriptionType"} + }, + "UserPoolMfaType":{ + "type":"string", + "enum":[ + "OFF", + "ON", + "OPTIONAL" + ] + }, + "UserPoolNameType":{ + "type":"string", + "max":128, + "min":1, + "pattern":"[\\w\\s+=,.@-]+" + }, + "UserPoolPolicyType":{ + "type":"structure", + "members":{ + "PasswordPolicy":{"shape":"PasswordPolicyType"} + } + }, + "UserPoolType":{ + "type":"structure", + "members":{ + "Id":{"shape":"UserPoolIdType"}, + "Name":{"shape":"UserPoolNameType"}, + "Policies":{"shape":"UserPoolPolicyType"}, + "LambdaConfig":{"shape":"LambdaConfigType"}, + "Status":{"shape":"StatusType"}, + "LastModifiedDate":{"shape":"DateType"}, + "CreationDate":{"shape":"DateType"}, + "SchemaAttributes":{"shape":"SchemaAttributesListType"}, + "AutoVerifiedAttributes":{"shape":"VerifiedAttributesListType"}, + "AliasAttributes":{"shape":"AliasAttributesListType"}, + "SmsVerificationMessage":{"shape":"SmsVerificationMessageType"}, + "EmailVerificationMessage":{"shape":"EmailVerificationMessageType"}, + "EmailVerificationSubject":{"shape":"EmailVerificationSubjectType"}, + "SmsAuthenticationMessage":{"shape":"SmsVerificationMessageType"}, + "MfaConfiguration":{"shape":"UserPoolMfaType"}, + "EstimatedNumberOfUsers":{"shape":"IntegerType"} + } + }, + "UserStatusType":{ + "type":"string", + "enum":[ + "UNCONFIRMED", + "CONFIRMED", + "ARCHIVED", + "COMPROMISED", + "UNKNOWN" + ] + }, + "UserType":{ + "type":"structure", + "members":{ + "Username":{"shape":"UsernameType"}, + "Attributes":{"shape":"AttributeListType"}, + "UserCreateDate":{"shape":"DateType"}, + "UserLastModifiedDate":{"shape":"DateType"}, + "Enabled":{"shape":"BooleanType"}, + "UserStatus":{"shape":"UserStatusType"} + } + }, + "UsernameExistsException":{ + "type":"structure", + "members":{ + "message":{"shape":"MessageType"} + }, + "exception":true + }, + "UsernameType":{ + "type":"string", + "max":128, + "min":1, + "pattern":"[\\p{L}\\p{M}\\p{S}\\p{N}\\p{P}]+", + "sensitive":true + }, + "UsersListType":{ + "type":"list", + "member":{"shape":"UserType"} + }, + "VerifiedAttributeType":{ + "type":"string", + "enum":[ + "phone_number", + "email" + ] + }, + "VerifiedAttributesListType":{ + "type":"list", + "member":{"shape":"VerifiedAttributeType"} + }, + "VerifyUserAttributeRequest":{ + "type":"structure", + "required":[ + "AttributeName", + "Code" + ], + "members":{ + "AccessToken":{"shape":"TokenModelType"}, + "AttributeName":{"shape":"AttributeNameType"}, + "Code":{"shape":"ConfirmationCodeType"} + } + }, + "VerifyUserAttributeResponse":{ + "type":"structure", + "members":{ + } + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/cognito-idp/2016-04-18/docs-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/cognito-idp/2016-04-18/docs-2.json new file mode 100644 index 000000000..bd800b6e4 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/cognito-idp/2016-04-18/docs-2.json @@ -0,0 +1,980 @@ +{ + "version": "2.0", + "service": "

    You can create a user pool in Amazon Cognito Identity to manage directories and users. You can authenticate a user to obtain tokens related to user identity and access policies.

    This API reference provides information about user pools in Amazon Cognito Identity, which is a new capability that is available as a beta.

    ", + "operations": { + "AddCustomAttributes": "

    Adds additional user attributes to the user pool schema.

    ", + "AdminConfirmSignUp": "

    Confirms user registration as an admin without using a confirmation code. Works on any user.

    ", + "AdminDeleteUser": "

    Deletes a user as an administrator. Works on any user.

    ", + "AdminDeleteUserAttributes": "

    Deletes the user attributes in a user pool as an administrator. Works on any user.

    ", + "AdminDisableUser": "

    Disables the specified user as an administrator. Works on any user.

    ", + "AdminEnableUser": "

    Enables the specified user as an administrator. Works on any user.

    ", + "AdminGetUser": "

    Gets the specified user by user name in a user pool as an administrator. Works on any user.

    ", + "AdminResetUserPassword": "

    Resets the specified user's password in a user pool as an administrator. Works on any user.

    ", + "AdminSetUserSettings": "

    Sets all the user settings for a specified user name. Works on any user.

    ", + "AdminUpdateUserAttributes": "

    Updates the specified user's attributes, including developer attributes, as an administrator. Works on any user.

    ", + "ChangePassword": "

    Changes the password for a specified user in a user pool.

    ", + "ConfirmForgotPassword": "

    Allows a user to enter a code provided when they reset their password to update their password.

    ", + "ConfirmSignUp": "

    Confirms registration of a user and handles the existing alias from a previous user.

    ", + "CreateUserPool": "

    Creates a new Amazon Cognito user pool and sets the password policy for the pool.

    ", + "CreateUserPoolClient": "

    Creates the user pool client.

    ", + "DeleteUser": "

    Allows a user to delete one's self.

    ", + "DeleteUserAttributes": "

    Deletes the attributes for a user.

    ", + "DeleteUserPool": "

    Deletes the specified Amazon Cognito user pool.

    ", + "DeleteUserPoolClient": "

    Allows the developer to delete the user pool client.

    ", + "DescribeUserPool": "

    Returns the configuration information and metadata of the specified user pool.

    ", + "DescribeUserPoolClient": "

    Client method for returning the configuration information and metadata of the specified user pool client.

    ", + "ForgotPassword": "

    Retrieves the password for the specified client ID or username.

    ", + "GetUser": "

    Gets the user attributes and metadata for a user.

    ", + "GetUserAttributeVerificationCode": "

    Gets the user attribute verification code for the specified attribute name.

    ", + "ListUserPoolClients": "

    Lists the clients that have been created for the specified user pool.

    ", + "ListUserPools": "

    Lists the user pools associated with an AWS account.

    ", + "ListUsers": "

    Lists the users in the Amazon Cognito user pool.

    ", + "ResendConfirmationCode": "

    Resends the confirmation (for confirmation of registration) to a specific user in the user pool.

    ", + "SetUserSettings": "

    Sets the user settings like multi-factor authentication (MFA). If MFA is to be removed for a particular attribute pass the attribute with code delivery as null. If null list is passed, all MFA options are removed.

    ", + "SignUp": "

    Registers the user in the specified user pool and creates a user name, password, and user attributes.

    ", + "UpdateUserAttributes": "

    Allows a user to update a specific attribute (one at a time).

    ", + "UpdateUserPool": "

    Updates the specified user pool with the specified attributes.

    ", + "UpdateUserPoolClient": "

    Allows the developer to update the specified user pool client and password policy.

    ", + "VerifyUserAttribute": "

    Verifies the specified user attributes in the user pool.

    " + }, + "shapes": { + "AddCustomAttributesRequest": { + "base": "

    Represents the request to add custom attributes.

    ", + "refs": { + } + }, + "AddCustomAttributesResponse": { + "base": "

    Represents the response from the server for the request to add custom attributes.

    ", + "refs": { + } + }, + "AdminConfirmSignUpRequest": { + "base": "

    Represents the request to confirm user registration.

    ", + "refs": { + } + }, + "AdminConfirmSignUpResponse": { + "base": "

    Represents the response from the server for the request to confirm registration.

    ", + "refs": { + } + }, + "AdminDeleteUserAttributesRequest": { + "base": "

    Represents the request to delete user attributes as an administrator.

    ", + "refs": { + } + }, + "AdminDeleteUserAttributesResponse": { + "base": "

    Represents the response received from the server for a request to delete user attributes.

    ", + "refs": { + } + }, + "AdminDeleteUserRequest": { + "base": "

    Represents the request to delete a user as an administrator.

    ", + "refs": { + } + }, + "AdminDisableUserRequest": { + "base": "

    Represents the request to disable any user as an administrator.

    ", + "refs": { + } + }, + "AdminDisableUserResponse": { + "base": "

    Represents the response received from the server to disable the user as an administrator.

    ", + "refs": { + } + }, + "AdminEnableUserRequest": { + "base": "

    Represents the request that enables the user as an administrator.

    ", + "refs": { + } + }, + "AdminEnableUserResponse": { + "base": "

    Represents the response from the server for the request to enable a user as an administrator.

    ", + "refs": { + } + }, + "AdminGetUserRequest": { + "base": "

    Represents the request to get the specified user as an administrator.

    ", + "refs": { + } + }, + "AdminGetUserResponse": { + "base": "

    Represents the response from the server from the request to get the specified user as an administrator.

    ", + "refs": { + } + }, + "AdminResetUserPasswordRequest": { + "base": "

    Represents the request to reset a user's password as an administrator.

    ", + "refs": { + } + }, + "AdminResetUserPasswordResponse": { + "base": "

    Represents the response from the server to reset a user password as an administrator.

    ", + "refs": { + } + }, + "AdminSetUserSettingsRequest": { + "base": "

    Represents the request to set user settings as an administrator.

    ", + "refs": { + } + }, + "AdminSetUserSettingsResponse": { + "base": "

    Represents the response from the server to set user settings as an administrator.

    ", + "refs": { + } + }, + "AdminUpdateUserAttributesRequest": { + "base": "

    Represents the request to update the user's attributes as an administrator.

    ", + "refs": { + } + }, + "AdminUpdateUserAttributesResponse": { + "base": "

    Represents the response from the server for the request to update user attributes as an administrator.

    ", + "refs": { + } + }, + "AliasAttributeType": { + "base": null, + "refs": { + "AliasAttributesListType$member": null + } + }, + "AliasAttributesListType": { + "base": null, + "refs": { + "CreateUserPoolRequest$AliasAttributes": "

    Attributes supported as an alias for this user pool. Possible values: phone_number, email, or preferred_username.

    ", + "UserPoolType$AliasAttributes": "

    Specifies the attributes that are aliased in a user pool.

    " + } + }, + "AliasExistsException": { + "base": "

    This exception is thrown when a user tries to confirm the account with an email or phone number that has already been supplied as an alias from a different account. This exception tells user that an account with this email or phone already exists.

    ", + "refs": { + } + }, + "ArnType": { + "base": null, + "refs": { + "LambdaConfigType$PreSignUp": "

    A pre-registration AWS Lambda trigger.

    ", + "LambdaConfigType$CustomMessage": "

    A custom Message AWS Lambda trigger.

    ", + "LambdaConfigType$PostConfirmation": "

    A post-confirmation AWS Lambda trigger.

    ", + "LambdaConfigType$PreAuthentication": "

    A pre-authentication AWS Lambda trigger.

    ", + "LambdaConfigType$PostAuthentication": "

    A post-authentication AWS Lambda trigger.

    " + } + }, + "AttributeDataType": { + "base": null, + "refs": { + "SchemaAttributeType$AttributeDataType": "

    The attribute data type.

    " + } + }, + "AttributeListType": { + "base": null, + "refs": { + "AdminGetUserResponse$UserAttributes": "

    An array of name-value pairs representing user attributes.

    ", + "AdminUpdateUserAttributesRequest$UserAttributes": "

    An array of name-value pairs representing user attributes.

    ", + "GetUserResponse$UserAttributes": "

    An array of name-value pairs representing user attributes.

    ", + "SignUpRequest$UserAttributes": "

    An array of name-value pairs representing user attributes.

    ", + "SignUpRequest$ValidationData": "

    The validation data in the request to register a user.

    ", + "UpdateUserAttributesRequest$UserAttributes": "

    An array of name-value pairs representing user attributes.

    ", + "UserType$Attributes": "

    A container with information about the user type attributes.

    " + } + }, + "AttributeNameListType": { + "base": null, + "refs": { + "AdminDeleteUserAttributesRequest$UserAttributeNames": "

    An array of strings representing the user attribute names you wish to delete.

    ", + "DeleteUserAttributesRequest$UserAttributeNames": "

    An array of strings representing the user attribute names you wish to delete.

    " + } + }, + "AttributeNameType": { + "base": null, + "refs": { + "AttributeNameListType$member": null, + "AttributeType$Name": "

    The name of the attribute.

    ", + "CodeDeliveryDetailsType$AttributeName": "

    The name of the attribute in the code delivery details type.

    ", + "GetUserAttributeVerificationCodeRequest$AttributeName": "

    The attribute name returned by the server response to get the user attribute verification code.

    ", + "MFAOptionType$AttributeName": "

    The attribute name of the MFA option type.

    ", + "SearchedAttributeNamesListType$member": null, + "VerifyUserAttributeRequest$AttributeName": "

    The attribute name in the request to verify user attributes.

    " + } + }, + "AttributeType": { + "base": "

    Specifies whether the attribute is standard or custom.

    ", + "refs": { + "AttributeListType$member": null + } + }, + "AttributeValueType": { + "base": null, + "refs": { + "AttributeType$Value": "

    The value of the attribute.

    " + } + }, + "BooleanType": { + "base": null, + "refs": { + "AdminGetUserResponse$Enabled": "

    Indicates that the status is enabled.

    ", + "PasswordPolicyType$RequireUppercase": "

    In the password policy that you have set, refers to whether you have required users to use at least one uppercase letter in their password.

    ", + "PasswordPolicyType$RequireLowercase": "

    In the password policy that you have set, refers to whether you have required users to use at least one lowercase letter in their password.

    ", + "PasswordPolicyType$RequireNumbers": "

    In the password policy that you have set, refers to whether you have required users to use at least one number in their password.

    ", + "PasswordPolicyType$RequireSymbols": "

    In the password policy that you have set, refers to whether you have required users to use at least one symbol in their password.

    ", + "SchemaAttributeType$DeveloperOnlyAttribute": "

    Specifies whether the attribute type is developer only.

    ", + "SchemaAttributeType$Mutable": "

    Specifies whether the attribute can be changed once it has been created.

    ", + "SchemaAttributeType$Required": "

    Specifies whether a user pool attribute is required. If the attribute is required and the user does not provide a value, registration or sign-in will fail.

    ", + "SignUpResponse$UserConfirmed": "

    A response from the server indicating that a user registration has been confirmed.

    ", + "UserType$Enabled": "

    Specifies whether the user is enabled.

    " + } + }, + "ChangePasswordRequest": { + "base": "

    Represents the request to change a user password.

    ", + "refs": { + } + }, + "ChangePasswordResponse": { + "base": "

    The response from the server to the change password request.

    ", + "refs": { + } + }, + "ClientIdType": { + "base": null, + "refs": { + "ConfirmForgotPasswordRequest$ClientId": "

    The ID of the client associated with the user pool.

    ", + "ConfirmSignUpRequest$ClientId": "

    The ID of the client associated with the user pool.

    ", + "DeleteUserPoolClientRequest$ClientId": "

    The ID of the client associated with the user pool.

    ", + "DescribeUserPoolClientRequest$ClientId": "

    The ID of the client associated with the user pool.

    ", + "ForgotPasswordRequest$ClientId": "

    The ID of the client associated with the user pool.

    ", + "ResendConfirmationCodeRequest$ClientId": "

    The ID of the client associated with the user pool.

    ", + "SignUpRequest$ClientId": "

    The ID of the client associated with the user pool.

    ", + "UpdateUserPoolClientRequest$ClientId": "

    The ID of the client associated with the user pool.

    ", + "UserPoolClientDescription$ClientId": "

    The ID of the client associated with the user pool.

    ", + "UserPoolClientType$ClientId": "

    The ID of the client associated with the user pool.

    " + } + }, + "ClientNameType": { + "base": null, + "refs": { + "CreateUserPoolClientRequest$ClientName": "

    The client name for the user pool client you would like to create.

    ", + "UpdateUserPoolClientRequest$ClientName": "

    The client name from the update user pool client request.

    ", + "UserPoolClientDescription$ClientName": "

    The client name from the user pool client description.

    ", + "UserPoolClientType$ClientName": "

    The client name from the user pool request of the client type.

    " + } + }, + "ClientSecretType": { + "base": null, + "refs": { + "UserPoolClientType$ClientSecret": "

    The client secret from the user pool request of the client type.

    " + } + }, + "CodeDeliveryDetailsListType": { + "base": null, + "refs": { + "UpdateUserAttributesResponse$CodeDeliveryDetailsList": "

    The code delivery details list from the server for the request to update user attributes.

    " + } + }, + "CodeDeliveryDetailsType": { + "base": "

    The type of code delivery details being returned from the server.

    ", + "refs": { + "CodeDeliveryDetailsListType$member": null, + "ForgotPasswordResponse$CodeDeliveryDetails": null, + "GetUserAttributeVerificationCodeResponse$CodeDeliveryDetails": "

    The code delivery details returned by the server response to get the user attribute verification code.

    ", + "ResendConfirmationCodeResponse$CodeDeliveryDetails": null, + "SignUpResponse$CodeDeliveryDetails": null + } + }, + "CodeMismatchException": { + "base": "

    This exception is thrown if the provided code does not match what the server was expecting.

    ", + "refs": { + } + }, + "ConcurrentModificationException": { + "base": "

    This exception is thrown if two or more modifications are happening concurrently.

    ", + "refs": { + } + }, + "ConfirmForgotPasswordRequest": { + "base": "

    The request representing the confirmation for a password reset.

    ", + "refs": { + } + }, + "ConfirmForgotPasswordResponse": { + "base": "

    The response from the server that results from a user's request to retrieve a forgotten password.

    ", + "refs": { + } + }, + "ConfirmSignUpRequest": { + "base": "

    Represents the request to confirm registration of a user.

    ", + "refs": { + } + }, + "ConfirmSignUpResponse": { + "base": "

    Represents the response from the server for the registration confirmation.

    ", + "refs": { + } + }, + "ConfirmationCodeType": { + "base": null, + "refs": { + "ConfirmForgotPasswordRequest$ConfirmationCode": "

    The confirmation code sent by a user's request to retrieve a forgotten password.

    ", + "ConfirmSignUpRequest$ConfirmationCode": "

    The confirmation code sent by a user's request to confirm registration.

    ", + "VerifyUserAttributeRequest$Code": "

    The verification code in the request to verify user attributes.

    " + } + }, + "CreateUserPoolClientRequest": { + "base": "

    Represents the request to create a user pool client.

    ", + "refs": { + } + }, + "CreateUserPoolClientResponse": { + "base": "

    Represents the response from the server to create a user pool client.

    ", + "refs": { + } + }, + "CreateUserPoolRequest": { + "base": "

    Represents the request to create a user pool.

    ", + "refs": { + } + }, + "CreateUserPoolResponse": { + "base": "

    Represents the response from the server for the request to create a user pool.

    ", + "refs": { + } + }, + "CustomAttributeNameType": { + "base": null, + "refs": { + "SchemaAttributeType$Name": "

    A schema attribute of the name type.

    " + } + }, + "CustomAttributesListType": { + "base": null, + "refs": { + "AddCustomAttributesRequest$CustomAttributes": "

    An array of custom attributes, such as Mutable and Name.

    " + } + }, + "DateType": { + "base": null, + "refs": { + "AdminGetUserResponse$UserCreateDate": "

    The date the user was created.

    ", + "AdminGetUserResponse$UserLastModifiedDate": "

    The date the user was last modified.

    ", + "UserPoolClientType$LastModifiedDate": "

    The last modified date from the user pool request of the client type.

    ", + "UserPoolClientType$CreationDate": "

    The creation date from the user pool request of the client type.

    ", + "UserPoolDescriptionType$LastModifiedDate": "

    The last modified date in a user pool description.

    ", + "UserPoolDescriptionType$CreationDate": "

    The creation date in a user pool description.

    ", + "UserPoolType$LastModifiedDate": "

    The last modified date of a user pool.

    ", + "UserPoolType$CreationDate": "

    The creation date of a user pool.

    ", + "UserType$UserCreateDate": "

    The creation date of the user.

    ", + "UserType$UserLastModifiedDate": "

    The last modified date of the user.

    " + } + }, + "DeleteUserAttributesRequest": { + "base": "

    Represents the request to delete user attributes.

    ", + "refs": { + } + }, + "DeleteUserAttributesResponse": { + "base": "

    Represents the response from the server to delete user attributes.

    ", + "refs": { + } + }, + "DeleteUserPoolClientRequest": { + "base": "

    Represents the request to delete a user pool client.

    ", + "refs": { + } + }, + "DeleteUserPoolRequest": { + "base": "

    Represents the request to delete a user pool.

    ", + "refs": { + } + }, + "DeleteUserRequest": { + "base": "

    Represents the request to delete a user.

    ", + "refs": { + } + }, + "DeliveryMediumType": { + "base": null, + "refs": { + "CodeDeliveryDetailsType$DeliveryMedium": "

    The delivery medium (email message or phone number).

    ", + "MFAOptionType$DeliveryMedium": "

    The delivery medium (email message or SMS message) to send the MFA code.

    " + } + }, + "DescribeUserPoolClientRequest": { + "base": "

    Represents the request to describe a user pool client.

    ", + "refs": { + } + }, + "DescribeUserPoolClientResponse": { + "base": "

    Represents the response from the server from a request to describe the user pool client.

    ", + "refs": { + } + }, + "DescribeUserPoolRequest": { + "base": "

    Represents the request to describe the user pool.

    ", + "refs": { + } + }, + "DescribeUserPoolResponse": { + "base": "

    Represents the response to describe the user pool.

    ", + "refs": { + } + }, + "EmailVerificationMessageType": { + "base": null, + "refs": { + "CreateUserPoolRequest$EmailVerificationMessage": "

    A string representing the email verification message.

    ", + "UpdateUserPoolRequest$EmailVerificationMessage": "

    The contents of the email verification message.

    ", + "UserPoolType$EmailVerificationMessage": "

    The contents of the email verification message.

    " + } + }, + "EmailVerificationSubjectType": { + "base": null, + "refs": { + "CreateUserPoolRequest$EmailVerificationSubject": "

    A string representing the email verification subject.

    ", + "UpdateUserPoolRequest$EmailVerificationSubject": "

    The subject of the email verfication message

    ", + "UserPoolType$EmailVerificationSubject": "

    The subject of the email verification message.

    " + } + }, + "ExpiredCodeException": { + "base": "

    This exception is thrown if a code has expired.

    ", + "refs": { + } + }, + "ForceAliasCreation": { + "base": null, + "refs": { + "ConfirmSignUpRequest$ForceAliasCreation": "

    Boolean to be specified to force user confirmation irrespective of existing alias. By default set to False. If this parameter is set to True and the phone number/email used for sign up confirmation already exists as an alias with a different user, the API call will migrate the alias from the previous user to the newly created user being confirmed. If set to False, the API will throw an AliasExistsException error.

    " + } + }, + "ForgotPasswordRequest": { + "base": "

    Represents the request to reset a user's password.

    ", + "refs": { + } + }, + "ForgotPasswordResponse": { + "base": "

    Respresents the response from the server regarding the request to reset a password.

    ", + "refs": { + } + }, + "GenerateSecret": { + "base": null, + "refs": { + "CreateUserPoolClientRequest$GenerateSecret": "

    Boolean to specify whether you want to generate a secret for the user pool client being created.

    " + } + }, + "GetUserAttributeVerificationCodeRequest": { + "base": "

    Represents the request to get user attribute verification.

    ", + "refs": { + } + }, + "GetUserAttributeVerificationCodeResponse": { + "base": "

    The verification code response returned by the server response to get the user attribute verification code.

    ", + "refs": { + } + }, + "GetUserRequest": { + "base": "

    Represents the request to get information about the user.

    ", + "refs": { + } + }, + "GetUserResponse": { + "base": "

    Represents the response from the server from the request to get information about the user.

    ", + "refs": { + } + }, + "IntegerType": { + "base": null, + "refs": { + "UserPoolType$EstimatedNumberOfUsers": "

    A number estimating the size of the user pool.

    " + } + }, + "InternalErrorException": { + "base": "

    This exception is thrown when Amazon Cognito encounters an internal error.

    ", + "refs": { + } + }, + "InvalidLambdaResponseException": { + "base": "

    This exception is thrown when the Amazon Cognito service encounters an invalid AWS Lambda response.

    ", + "refs": { + } + }, + "InvalidParameterException": { + "base": "

    This exception is thrown when the Amazon Cognito service encounters an invalid parameter.

    ", + "refs": { + } + }, + "InvalidPasswordException": { + "base": "

    This exception is thrown when the Amazon Cognito service encounters an invalid password.

    ", + "refs": { + } + }, + "LambdaConfigType": { + "base": "

    Specifies the type of configuration for AWS Lambda triggers.

    ", + "refs": { + "CreateUserPoolRequest$LambdaConfig": "

    The Lambda trigger configuration information for the new user pool.

    ", + "UpdateUserPoolRequest$LambdaConfig": "

    The AWS Lambda configuration information from the request to update the user pool.

    ", + "UserPoolDescriptionType$LambdaConfig": "

    The AWS Lambda configuration information in a user pool description.

    ", + "UserPoolType$LambdaConfig": "

    A container describing the AWS Lambda triggers associated with a user pool.

    " + } + }, + "LimitExceededException": { + "base": "

    This exception is thrown when a user exceeds the limit for a requested AWS resource.

    ", + "refs": { + } + }, + "ListUserPoolClientsRequest": { + "base": "

    Represents the request to list the user pool clients.

    ", + "refs": { + } + }, + "ListUserPoolClientsResponse": { + "base": "

    Represents the response from the server that lists user pool clients.

    ", + "refs": { + } + }, + "ListUserPoolsRequest": { + "base": "

    Represents the request to list user pools.

    ", + "refs": { + } + }, + "ListUserPoolsResponse": { + "base": "

    Represents the response to list user pools.

    ", + "refs": { + } + }, + "ListUsersRequest": { + "base": "

    Represents the request to list users.

    ", + "refs": { + } + }, + "ListUsersResponse": { + "base": "

    The response from the request to list users.

    ", + "refs": { + } + }, + "MFAOptionListType": { + "base": null, + "refs": { + "AdminGetUserResponse$MFAOptions": "

    Specifies the options for MFA (e.g., email or phone number).

    ", + "AdminSetUserSettingsRequest$MFAOptions": "

    Specifies the options for MFA (e.g., email or phone number).

    ", + "GetUserResponse$MFAOptions": "

    Specifies the options for MFA (e.g., email or phone number).

    ", + "SetUserSettingsRequest$MFAOptions": "

    Specifies the options for MFA (e.g., email or phone number).

    " + } + }, + "MFAOptionType": { + "base": "

    Specifies the different settings for multi-factor authentication (MFA).

    ", + "refs": { + "MFAOptionListType$member": null + } + }, + "MessageType": { + "base": null, + "refs": { + "AliasExistsException$message": "

    The message sent to the user when an alias exists.

    ", + "CodeMismatchException$message": "

    The message provided when the code mismatch exception is thrown.

    ", + "ConcurrentModificationException$message": "

    The message provided when the concurrent exception is thrown.

    ", + "ExpiredCodeException$message": "

    The message returned when the expired code exception is thrown.

    ", + "InternalErrorException$message": "

    The message returned when Amazon Cognito throws an internal error exception.

    ", + "InvalidLambdaResponseException$message": "

    The message returned when the Amazon Cognito service throws an invalid AWS Lambda response exception.

    ", + "InvalidParameterException$message": "

    The message returned when the Amazon Cognito service throws an invalid parameter exception.

    ", + "InvalidPasswordException$message": "

    The message returned when the Amazon Cognito service throws an invalid user password exception.

    ", + "LimitExceededException$message": "

    The message returned when Amazon Cognito throws a limit exceeded exception.

    ", + "NotAuthorizedException$message": "

    The message returned when the Amazon Cognito service returns a not authorized exception.

    ", + "ResourceNotFoundException$message": "

    The message returned when the Amazon Cognito service returns a resource not found exception.

    ", + "TooManyFailedAttemptsException$message": "

    The message returned when the Amazon Cognito service returns a too many failed attempts exception.

    ", + "TooManyRequestsException$message": "

    The message returned when the Amazon Cognito service returns a too many requests exception.

    ", + "UnexpectedLambdaException$message": "

    The message returned when the Amazon Cognito service returns an unexpected AWS Lambda exception.

    ", + "UserLambdaValidationException$message": "

    The message returned when the Amazon Cognito service returns a user validation exception with the AWS Lambda service.

    ", + "UsernameExistsException$message": "

    The message returned when Amazon Cognito throws a user name exists exception.

    " + } + }, + "NotAuthorizedException": { + "base": "

    This exception gets thrown when a user is not authorized.

    ", + "refs": { + } + }, + "NumberAttributeConstraintsType": { + "base": "

    The minimum and maximum value of an attribute that is of the number data type.

    ", + "refs": { + "SchemaAttributeType$NumberAttributeConstraints": "

    Specifies the constraints for an attribute of the number type.

    " + } + }, + "PaginationKey": { + "base": null, + "refs": { + "ListUserPoolClientsRequest$NextToken": "

    An identifier that was returned from the previous call to this operation, which can be used to return the next set of items in the list.

    ", + "ListUserPoolClientsResponse$NextToken": "

    An identifier that was returned from the previous call to this operation, which can be used to return the next set of items in the list.

    " + } + }, + "PaginationKeyType": { + "base": null, + "refs": { + "ListUserPoolsRequest$NextToken": "

    An identifier that was returned from the previous call to this operation, which can be used to return the next set of items in the list.

    ", + "ListUserPoolsResponse$NextToken": "

    An identifier that was returned from the previous call to this operation, which can be used to return the next set of items in the list.

    " + } + }, + "PasswordPolicyMinLengthType": { + "base": null, + "refs": { + "PasswordPolicyType$MinimumLength": "

    The minimum length of the password policy that you have set. Cannot be less than 6.

    " + } + }, + "PasswordPolicyType": { + "base": "

    The password policy type.

    ", + "refs": { + "UserPoolPolicyType$PasswordPolicy": "

    A container with information about the user pool password policy.

    " + } + }, + "PasswordType": { + "base": null, + "refs": { + "ChangePasswordRequest$PreviousPassword": "

    The old password in the change password request.

    ", + "ChangePasswordRequest$ProposedPassword": "

    The new password in the change password request.

    ", + "ConfirmForgotPasswordRequest$Password": "

    The password sent by sent by a user's request to retrieve a forgotten password.

    ", + "SignUpRequest$Password": "

    The password of the user you wish to register.

    " + } + }, + "QueryLimit": { + "base": null, + "refs": { + "ListUserPoolClientsRequest$MaxResults": "

    The maximum number of results you want the request to return when listing the user pool clients.

    " + } + }, + "QueryLimitType": { + "base": null, + "refs": { + "ListUserPoolsRequest$MaxResults": "

    The maximum number of results you want the request to return when listing the user pools.

    ", + "ListUsersRequest$Limit": "

    The limit of the request to list users.

    " + } + }, + "ResendConfirmationCodeRequest": { + "base": "

    Represents the request to resend the confirmation code.

    ", + "refs": { + } + }, + "ResendConfirmationCodeResponse": { + "base": "

    The response from the server when the Amazon Cognito service makes the request to resend a confirmation code.

    ", + "refs": { + } + }, + "ResourceNotFoundException": { + "base": "

    This exception is thrown when the Amazon Cognito service cannot find the requested resource.

    ", + "refs": { + } + }, + "SchemaAttributeType": { + "base": "

    Contains information about the schema attribute.

    ", + "refs": { + "CustomAttributesListType$member": null, + "SchemaAttributesListType$member": null + } + }, + "SchemaAttributesListType": { + "base": null, + "refs": { + "UserPoolType$SchemaAttributes": "

    A container with the schema attributes of a user pool.

    " + } + }, + "SearchPaginationTokenType": { + "base": null, + "refs": { + "ListUsersRequest$PaginationToken": "

    An identifier that was returned from the previous call to this operation, which can be used to return the next set of items in the list.

    ", + "ListUsersResponse$PaginationToken": "

    An identifier that was returned from the previous call to this operation, which can be used to return the next set of items in the list.

    " + } + }, + "SearchedAttributeNamesListType": { + "base": null, + "refs": { + "ListUsersRequest$AttributesToGet": "

    The attributes to get from the request to list users.

    " + } + }, + "SecretHashType": { + "base": null, + "refs": { + "ConfirmForgotPasswordRequest$SecretHash": "

    A keyed-hash message authentication code (HMAC) calculated using the secret key of a user pool client and username plus the client ID in the message.

    ", + "ConfirmSignUpRequest$SecretHash": "

    A keyed-hash message authentication code (HMAC) calculated using the secret key of a user pool client and username plus the client ID in the message.

    ", + "ForgotPasswordRequest$SecretHash": "

    A keyed-hash message authentication code (HMAC) calculated using the secret key of a user pool client and username plus the client ID in the message.

    ", + "ResendConfirmationCodeRequest$SecretHash": "

    A keyed-hash message authentication code (HMAC) calculated using the secret key of a user pool client and username plus the client ID in the message.

    ", + "SignUpRequest$SecretHash": "

    A keyed-hash message authentication code (HMAC) calculated using the secret key of a user pool client and username plus the client ID in the message.

    " + } + }, + "SetUserSettingsRequest": { + "base": "

    Represents the request to set user settings.

    ", + "refs": { + } + }, + "SetUserSettingsResponse": { + "base": "

    The response from the server for a set user settings request.

    ", + "refs": { + } + }, + "SignUpRequest": { + "base": "

    Represents the request to register a user.

    ", + "refs": { + } + }, + "SignUpResponse": { + "base": "

    The response from the server for a registration request.

    ", + "refs": { + } + }, + "SmsVerificationMessageType": { + "base": null, + "refs": { + "CreateUserPoolRequest$SmsVerificationMessage": "

    A string representing the SMS verification message.

    ", + "CreateUserPoolRequest$SmsAuthenticationMessage": "

    A string representing the SMS authentication message.

    ", + "UpdateUserPoolRequest$SmsVerificationMessage": "

    A container with information about the SMS verification message.

    ", + "UpdateUserPoolRequest$SmsAuthenticationMessage": "

    The contents of the SMS authentication message.

    ", + "UserPoolType$SmsVerificationMessage": "

    The contents of the SMS verification message.

    ", + "UserPoolType$SmsAuthenticationMessage": "

    The contents of the SMS authentication message.

    " + } + }, + "StatusType": { + "base": null, + "refs": { + "UserPoolDescriptionType$Status": "

    The user pool status in a user pool description.

    ", + "UserPoolType$Status": "

    The status of a user pool.

    " + } + }, + "StringAttributeConstraintsType": { + "base": "

    The type of constraints associated with an attribute of the string type.

    ", + "refs": { + "SchemaAttributeType$StringAttributeConstraints": "

    Specifies the constraints for an attribute of the string type.

    " + } + }, + "StringType": { + "base": null, + "refs": { + "CodeDeliveryDetailsType$Destination": "

    The destination for the code delivery details.

    ", + "NumberAttributeConstraintsType$MinValue": "

    The minimum value of an attribute that is of the number data type.

    ", + "NumberAttributeConstraintsType$MaxValue": "

    The maximum value of an attribute that is of the number data type.

    ", + "StringAttributeConstraintsType$MinLength": "

    The minimum length of an attribute value of the string type.

    ", + "StringAttributeConstraintsType$MaxLength": "

    The maximum length of an attribute value of the string type.

    " + } + }, + "TokenModelType": { + "base": null, + "refs": { + "ChangePasswordRequest$AccessToken": "

    The access token in the change password request.

    ", + "DeleteUserAttributesRequest$AccessToken": "

    The access token used in the request to delete user attributes.

    ", + "DeleteUserRequest$AccessToken": "

    The access token from a request to delete a user.

    ", + "GetUserAttributeVerificationCodeRequest$AccessToken": "

    The access token returned by the server response to get the user attribute verification code.

    ", + "GetUserRequest$AccessToken": "

    The access token returned by the server response to get information about the user.

    ", + "SetUserSettingsRequest$AccessToken": "

    The access token for the set user settings request.

    ", + "UpdateUserAttributesRequest$AccessToken": "

    The access token for the request to update user attributes.

    ", + "VerifyUserAttributeRequest$AccessToken": "

    Represents the access token of the request to verify user attributes.

    " + } + }, + "TooManyFailedAttemptsException": { + "base": "

    This exception gets thrown when the user has made too many failed attempts for a given action (e.g., sign in).

    ", + "refs": { + } + }, + "TooManyRequestsException": { + "base": "

    This exception gets thrown when the user has made too many requests for a given operation.

    ", + "refs": { + } + }, + "UnexpectedLambdaException": { + "base": "

    This exception gets thrown when the Amazon Cognito service encounters an unexpected exception with the AWS Lambda service.

    ", + "refs": { + } + }, + "UpdateUserAttributesRequest": { + "base": "

    Represents the request to update user attributes.

    ", + "refs": { + } + }, + "UpdateUserAttributesResponse": { + "base": "

    Represents the response from the server for the request to update user attributes.

    ", + "refs": { + } + }, + "UpdateUserPoolClientRequest": { + "base": "

    Represents the request to update the user pool client.

    ", + "refs": { + } + }, + "UpdateUserPoolClientResponse": { + "base": "

    Represents the response from the server to the request to update the user pool client.

    ", + "refs": { + } + }, + "UpdateUserPoolRequest": { + "base": "

    Represents the request to update the user pool.

    ", + "refs": { + } + }, + "UpdateUserPoolResponse": { + "base": "

    Represents the response from the server when you make a request to update the user pool.

    ", + "refs": { + } + }, + "UserLambdaValidationException": { + "base": "

    This exception gets thrown when the Amazon Cognito service encounters a user validation exception with the AWS Lambda service.

    ", + "refs": { + } + }, + "UserPoolClientDescription": { + "base": "

    The description of the user poool client.

    ", + "refs": { + "UserPoolClientListType$member": null + } + }, + "UserPoolClientListType": { + "base": null, + "refs": { + "ListUserPoolClientsResponse$UserPoolClients": "

    The user pool clients in the response that lists user pool clients.

    " + } + }, + "UserPoolClientType": { + "base": "

    A user pool of the client type.

    ", + "refs": { + "CreateUserPoolClientResponse$UserPoolClient": "

    The user pool client that was just created.

    ", + "DescribeUserPoolClientResponse$UserPoolClient": "

    The user pool client from a server response to describe the user pool client.

    ", + "UpdateUserPoolClientResponse$UserPoolClient": "

    The user pool client value from the response from the server when an update user pool client request is made.

    " + } + }, + "UserPoolDescriptionType": { + "base": "

    A user pool description.

    ", + "refs": { + "UserPoolListType$member": null + } + }, + "UserPoolIdType": { + "base": null, + "refs": { + "AddCustomAttributesRequest$UserPoolId": "

    The user pool ID for the user pool where you want to add custom attributes.

    ", + "AdminConfirmSignUpRequest$UserPoolId": "

    The user pool ID for which you want to confirm user registration.

    ", + "AdminDeleteUserAttributesRequest$UserPoolId": "

    The user pool ID for the user pool where you want to delete user attributes.

    ", + "AdminDeleteUserRequest$UserPoolId": "

    The user pool ID for the user pool where you want to delete the user.

    ", + "AdminDisableUserRequest$UserPoolId": "

    The user pool ID for the user pool where you want to disable the user.

    ", + "AdminEnableUserRequest$UserPoolId": "

    The user pool ID for the user pool where you want to enable the user.

    ", + "AdminGetUserRequest$UserPoolId": "

    The user pool ID for the user pool where you want to get information about the user.

    ", + "AdminResetUserPasswordRequest$UserPoolId": "

    The user pool ID for the user pool where you want to reset the user's password.

    ", + "AdminSetUserSettingsRequest$UserPoolId": "

    The user pool ID for the user pool where you want to set the user's settings, such as MFA options.

    ", + "AdminUpdateUserAttributesRequest$UserPoolId": "

    The user pool ID for the user pool where you want to update user attributes.

    ", + "CreateUserPoolClientRequest$UserPoolId": "

    The user pool ID for the user pool where you want to create a user pool client.

    ", + "DeleteUserPoolClientRequest$UserPoolId": "

    The user pool ID for the user pool where you want to delete the client.

    ", + "DeleteUserPoolRequest$UserPoolId": "

    The user pool ID for the user pool you want to delete.

    ", + "DescribeUserPoolClientRequest$UserPoolId": "

    The user pool ID for the user pool you want to describe.

    ", + "DescribeUserPoolRequest$UserPoolId": "

    The user pool ID for the user pool you want to describe.

    ", + "ListUserPoolClientsRequest$UserPoolId": "

    The user pool ID for the user pool where you want to list user pool clients.

    ", + "ListUsersRequest$UserPoolId": "

    The user pool ID for which you want to list users.

    ", + "UpdateUserPoolClientRequest$UserPoolId": "

    The user pool ID for the user pool where you want to update the user pool client.

    ", + "UpdateUserPoolRequest$UserPoolId": "

    The user pool ID for the user pool you want to update.

    ", + "UserPoolClientDescription$UserPoolId": "

    The user pool ID for the user pool where you want to describe the user pool client.

    ", + "UserPoolClientType$UserPoolId": "

    The user pool ID for the user pool client.

    ", + "UserPoolDescriptionType$Id": "

    The ID in a user pool description.

    ", + "UserPoolType$Id": "

    The ID of the user pool.

    " + } + }, + "UserPoolListType": { + "base": null, + "refs": { + "ListUserPoolsResponse$UserPools": "

    The user pools from the response to list users.

    " + } + }, + "UserPoolMfaType": { + "base": null, + "refs": { + "CreateUserPoolRequest$MfaConfiguration": "

    Specifies MFA configuration details.

    ", + "UpdateUserPoolRequest$MfaConfiguration": "

    Can be one of the following values:

    • OFF - MFA tokens are not required and cannot be specified during user registration.
    • ON - MFA tokens are required for all user registrations. You can only specify required when you are initially creating a user pool.
    • OPTIONAL - Users have the option when registering to create an MFA token.
    ", + "UserPoolType$MfaConfiguration": "

    Can be one of the following values:

    • OFF - MFA tokens are not required and cannot be specified during user registration.
    • ON - MFA tokens are required for all user registrations. You can only specify required when you are initially creating a user pool.
    • OPTIONAL - Users have the option when registering to create an MFA token.
    " + } + }, + "UserPoolNameType": { + "base": null, + "refs": { + "CreateUserPoolRequest$PoolName": "

    A string used to name the user pool.

    ", + "UserPoolDescriptionType$Name": "

    The name in a user pool description.

    ", + "UserPoolType$Name": "

    The name of the user pool.

    " + } + }, + "UserPoolPolicyType": { + "base": "

    The type of policy in a user pool.

    ", + "refs": { + "CreateUserPoolRequest$Policies": "

    The policies associated with the new user pool.

    ", + "UpdateUserPoolRequest$Policies": "

    A container with the policies you wish to update in a user pool.

    ", + "UserPoolType$Policies": "

    A container describing the policies associated with a user pool.

    " + } + }, + "UserPoolType": { + "base": "

    A container with information about the user pool type.

    ", + "refs": { + "CreateUserPoolResponse$UserPool": "

    A container for the user pool details.

    ", + "DescribeUserPoolResponse$UserPool": "

    The container of metadata returned by the server to describe the pool.

    " + } + }, + "UserStatusType": { + "base": null, + "refs": { + "AdminGetUserResponse$UserStatus": "

    The user status. Can be one of the following:

    • UNCONFIRMED - User has been created but not confirmed.
    • CONFIRMED - User has been confirmed.
    • ARCHIVED - User is no longer active.
    • COMPROMISED - User is disabled due to a potential security threat.
    • UNKNOWN - User status is not known.
    ", + "ListUsersRequest$UserStatus": "

    The user status. Can be one of the following:

    • UNCONFIRMED - User has been created but not confirmed.
    • CONFIRMED - User has been confirmed.
    • ARCHIVED - User is no longer active.
    • COMPROMISED - User is disabled due to a potential security threat.
    • UNKNOWN - User status is not known.
    ", + "UserType$UserStatus": "

    The user status. Can be one of the following:

    • UNCONFIRMED - User has been created but not confirmed.
    • CONFIRMED - User has been confirmed.
    • ARCHIVED - User is no longer active.
    • COMPROMISED - User is disabled due to a potential security threat.
    • UNKNOWN - User status is not known.
    " + } + }, + "UserType": { + "base": "

    The user type.

    ", + "refs": { + "UsersListType$member": null + } + }, + "UsernameExistsException": { + "base": "

    This exception is thrown when Amazon Cognito encounters a user name that already exists in the user pool.

    ", + "refs": { + } + }, + "UsernameType": { + "base": null, + "refs": { + "AdminConfirmSignUpRequest$Username": "

    The user name for which you want to confirm user registration.

    ", + "AdminDeleteUserAttributesRequest$Username": "

    The user name of the user from which you would like to delete attributes.

    ", + "AdminDeleteUserRequest$Username": "

    The user name of the user you wish to delete.

    ", + "AdminDisableUserRequest$Username": "

    The user name of the user you wish to disable.

    ", + "AdminEnableUserRequest$Username": "

    The user name of the user you wish to ebable.

    ", + "AdminGetUserRequest$Username": "

    The user name of the user you wish to retrieve.

    ", + "AdminGetUserResponse$Username": "

    The user name of the user about whom you are receiving information.

    ", + "AdminResetUserPasswordRequest$Username": "

    The user name of the user whose password you wish to reset.

    ", + "AdminSetUserSettingsRequest$Username": "

    The user name of the user for whom you wish to set user settings.

    ", + "AdminUpdateUserAttributesRequest$Username": "

    The user name of the user for whom you want to update user attributes.

    ", + "ConfirmForgotPasswordRequest$Username": "

    The user name of the user for whom you want to enter a code to retrieve a forgotten password.

    ", + "ConfirmSignUpRequest$Username": "

    The user name of the user whose registration you wish to confirm.

    ", + "ForgotPasswordRequest$Username": "

    The user name of the user for whom you want to enter a code to retrieve a forgotten password.

    ", + "GetUserResponse$Username": "

    The user name of the user you wish to retrieve from the get user request.

    ", + "ResendConfirmationCodeRequest$Username": "

    The user name of the user to whom you wish to resend a confirmation code.

    ", + "SignUpRequest$Username": "

    The user name of the user you wish to register.

    ", + "UserType$Username": "

    The user name of the user you wish to describe.

    " + } + }, + "UsersListType": { + "base": null, + "refs": { + "ListUsersResponse$Users": "

    The users returned in the request to list users.

    " + } + }, + "VerifiedAttributeType": { + "base": null, + "refs": { + "VerifiedAttributesListType$member": null + } + }, + "VerifiedAttributesListType": { + "base": null, + "refs": { + "CreateUserPoolRequest$AutoVerifiedAttributes": "

    The attributes to be auto-verified. Possible values: email, phone_number.

    ", + "UpdateUserPoolRequest$AutoVerifiedAttributes": "

    The attributes that are automatically verified when the Amazon Cognito service makes a request to update user pools.

    ", + "UserPoolType$AutoVerifiedAttributes": "

    Specifies the attributes that are auto-verified in a user pool.

    " + } + }, + "VerifyUserAttributeRequest": { + "base": "

    Represents the request to verify user attributes.

    ", + "refs": { + } + }, + "VerifyUserAttributeResponse": { + "base": "

    A container representing the response from the server from the request to verify user attributes.

    ", + "refs": { + } + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/cognito-idp/2016-04-18/examples-1.json b/vendor/github.com/aws/aws-sdk-go/models/apis/cognito-idp/2016-04-18/examples-1.json new file mode 100644 index 000000000..0ea7e3b0b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/cognito-idp/2016-04-18/examples-1.json @@ -0,0 +1,5 @@ +{ + "version": "1.0", + "examples": { + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/cognito-sync/2014-06-30/api-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/cognito-sync/2014-06-30/api-2.json new file mode 100644 index 000000000..3f1b7122d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/cognito-sync/2014-06-30/api-2.json @@ -0,0 +1,1874 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2014-06-30", + "endpointPrefix":"cognito-sync", + "jsonVersion":"1.1", + "serviceFullName":"Amazon Cognito Sync", + "signatureVersion":"v4", + "protocol":"rest-json" + }, + "operations":{ + "BulkPublish":{ + "name":"BulkPublish", + "http":{ + "method":"POST", + "requestUri":"/identitypools/{IdentityPoolId}/bulkpublish", + "responseCode":200 + }, + "input":{"shape":"BulkPublishRequest"}, + "output":{"shape":"BulkPublishResponse"}, + "errors":[ + { + "shape":"NotAuthorizedException", + "error":{ + "code":"NotAuthorizedError", + "httpStatusCode":403, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidParameterException", + "error":{ + "code":"InvalidParameter", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{ + "code":"ResourceNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InternalErrorException", + "error":{ + "code":"InternalError", + "httpStatusCode":500 + }, + "exception":true, + "fault":true + }, + { + "shape":"DuplicateRequestException", + "error":{ + "code":"DuplicateRequest", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"AlreadyStreamedException", + "error":{ + "code":"AlreadyStreamed", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + } + ] + }, + "DeleteDataset":{ + "name":"DeleteDataset", + "http":{ + "method":"DELETE", + "requestUri":"/identitypools/{IdentityPoolId}/identities/{IdentityId}/datasets/{DatasetName}", + "responseCode":200 + }, + "input":{"shape":"DeleteDatasetRequest"}, + "output":{"shape":"DeleteDatasetResponse"}, + "errors":[ + { + "shape":"NotAuthorizedException", + "error":{ + "code":"NotAuthorizedError", + "httpStatusCode":403, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidParameterException", + "error":{ + "code":"InvalidParameter", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{ + "code":"ResourceNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InternalErrorException", + "error":{ + "code":"InternalError", + "httpStatusCode":500 + }, + "exception":true, + "fault":true + }, + { + "shape":"TooManyRequestsException", + "error":{ + "code":"TooManyRequests", + "httpStatusCode":429, + "senderFault":true + }, + "exception":true + }, + { + "shape":"ResourceConflictException", + "error":{ + "code":"ResourceConflict", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + } + ] + }, + "DescribeDataset":{ + "name":"DescribeDataset", + "http":{ + "method":"GET", + "requestUri":"/identitypools/{IdentityPoolId}/identities/{IdentityId}/datasets/{DatasetName}", + "responseCode":200 + }, + "input":{"shape":"DescribeDatasetRequest"}, + "output":{"shape":"DescribeDatasetResponse"}, + "errors":[ + { + "shape":"NotAuthorizedException", + "error":{ + "code":"NotAuthorizedError", + "httpStatusCode":403, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidParameterException", + "error":{ + "code":"InvalidParameter", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{ + "code":"ResourceNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InternalErrorException", + "error":{ + "code":"InternalError", + "httpStatusCode":500 + }, + "exception":true, + "fault":true + }, + { + "shape":"TooManyRequestsException", + "error":{ + "code":"TooManyRequests", + "httpStatusCode":429, + "senderFault":true + }, + "exception":true + } + ] + }, + "DescribeIdentityPoolUsage":{ + "name":"DescribeIdentityPoolUsage", + "http":{ + "method":"GET", + "requestUri":"/identitypools/{IdentityPoolId}", + "responseCode":200 + }, + "input":{"shape":"DescribeIdentityPoolUsageRequest"}, + "output":{"shape":"DescribeIdentityPoolUsageResponse"}, + "errors":[ + { + "shape":"NotAuthorizedException", + "error":{ + "code":"NotAuthorizedError", + "httpStatusCode":403, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidParameterException", + "error":{ + "code":"InvalidParameter", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{ + "code":"ResourceNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InternalErrorException", + "error":{ + "code":"InternalError", + "httpStatusCode":500 + }, + "exception":true, + "fault":true + }, + { + "shape":"TooManyRequestsException", + "error":{ + "code":"TooManyRequests", + "httpStatusCode":429, + "senderFault":true + }, + "exception":true + } + ] + }, + "DescribeIdentityUsage":{ + "name":"DescribeIdentityUsage", + "http":{ + "method":"GET", + "requestUri":"/identitypools/{IdentityPoolId}/identities/{IdentityId}", + "responseCode":200 + }, + "input":{"shape":"DescribeIdentityUsageRequest"}, + "output":{"shape":"DescribeIdentityUsageResponse"}, + "errors":[ + { + "shape":"NotAuthorizedException", + "error":{ + "code":"NotAuthorizedError", + "httpStatusCode":403, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidParameterException", + "error":{ + "code":"InvalidParameter", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{ + "code":"ResourceNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InternalErrorException", + "error":{ + "code":"InternalError", + "httpStatusCode":500 + }, + "exception":true, + "fault":true + }, + { + "shape":"TooManyRequestsException", + "error":{ + "code":"TooManyRequests", + "httpStatusCode":429, + "senderFault":true + }, + "exception":true + } + ] + }, + "GetBulkPublishDetails":{ + "name":"GetBulkPublishDetails", + "http":{ + "method":"POST", + "requestUri":"/identitypools/{IdentityPoolId}/getBulkPublishDetails", + "responseCode":200 + }, + "input":{"shape":"GetBulkPublishDetailsRequest"}, + "output":{"shape":"GetBulkPublishDetailsResponse"}, + "errors":[ + { + "shape":"NotAuthorizedException", + "error":{ + "code":"NotAuthorizedError", + "httpStatusCode":403, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidParameterException", + "error":{ + "code":"InvalidParameter", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{ + "code":"ResourceNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InternalErrorException", + "error":{ + "code":"InternalError", + "httpStatusCode":500 + }, + "exception":true, + "fault":true + } + ] + }, + "GetCognitoEvents":{ + "name":"GetCognitoEvents", + "http":{ + "method":"GET", + "requestUri":"/identitypools/{IdentityPoolId}/events", + "responseCode":200 + }, + "input":{"shape":"GetCognitoEventsRequest"}, + "output":{"shape":"GetCognitoEventsResponse"}, + "errors":[ + { + "shape":"InvalidParameterException", + "error":{ + "code":"InvalidParameter", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{ + "code":"ResourceNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + { + "shape":"NotAuthorizedException", + "error":{ + "code":"NotAuthorizedError", + "httpStatusCode":403, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InternalErrorException", + "error":{ + "code":"InternalError", + "httpStatusCode":500 + }, + "exception":true, + "fault":true + }, + { + "shape":"TooManyRequestsException", + "error":{ + "code":"TooManyRequests", + "httpStatusCode":429, + "senderFault":true + }, + "exception":true + } + ] + }, + "GetIdentityPoolConfiguration":{ + "name":"GetIdentityPoolConfiguration", + "http":{ + "method":"GET", + "requestUri":"/identitypools/{IdentityPoolId}/configuration", + "responseCode":200 + }, + "input":{"shape":"GetIdentityPoolConfigurationRequest"}, + "output":{"shape":"GetIdentityPoolConfigurationResponse"}, + "errors":[ + { + "shape":"NotAuthorizedException", + "error":{ + "code":"NotAuthorizedError", + "httpStatusCode":403, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidParameterException", + "error":{ + "code":"InvalidParameter", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{ + "code":"ResourceNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InternalErrorException", + "error":{ + "code":"InternalError", + "httpStatusCode":500 + }, + "exception":true, + "fault":true + }, + { + "shape":"TooManyRequestsException", + "error":{ + "code":"TooManyRequests", + "httpStatusCode":429, + "senderFault":true + }, + "exception":true + } + ] + }, + "ListDatasets":{ + "name":"ListDatasets", + "http":{ + "method":"GET", + "requestUri":"/identitypools/{IdentityPoolId}/identities/{IdentityId}/datasets", + "responseCode":200 + }, + "input":{"shape":"ListDatasetsRequest"}, + "output":{"shape":"ListDatasetsResponse"}, + "errors":[ + { + "shape":"NotAuthorizedException", + "error":{ + "code":"NotAuthorizedError", + "httpStatusCode":403, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidParameterException", + "error":{ + "code":"InvalidParameter", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InternalErrorException", + "error":{ + "code":"InternalError", + "httpStatusCode":500 + }, + "exception":true, + "fault":true + }, + { + "shape":"TooManyRequestsException", + "error":{ + "code":"TooManyRequests", + "httpStatusCode":429, + "senderFault":true + }, + "exception":true + } + ] + }, + "ListIdentityPoolUsage":{ + "name":"ListIdentityPoolUsage", + "http":{ + "method":"GET", + "requestUri":"/identitypools", + "responseCode":200 + }, + "input":{"shape":"ListIdentityPoolUsageRequest"}, + "output":{"shape":"ListIdentityPoolUsageResponse"}, + "errors":[ + { + "shape":"NotAuthorizedException", + "error":{ + "code":"NotAuthorizedError", + "httpStatusCode":403, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidParameterException", + "error":{ + "code":"InvalidParameter", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InternalErrorException", + "error":{ + "code":"InternalError", + "httpStatusCode":500 + }, + "exception":true, + "fault":true + }, + { + "shape":"TooManyRequestsException", + "error":{ + "code":"TooManyRequests", + "httpStatusCode":429, + "senderFault":true + }, + "exception":true + } + ] + }, + "ListRecords":{ + "name":"ListRecords", + "http":{ + "method":"GET", + "requestUri":"/identitypools/{IdentityPoolId}/identities/{IdentityId}/datasets/{DatasetName}/records", + "responseCode":200 + }, + "input":{"shape":"ListRecordsRequest"}, + "output":{"shape":"ListRecordsResponse"}, + "errors":[ + { + "shape":"InvalidParameterException", + "error":{ + "code":"InvalidParameter", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"NotAuthorizedException", + "error":{ + "code":"NotAuthorizedError", + "httpStatusCode":403, + "senderFault":true + }, + "exception":true + }, + { + "shape":"TooManyRequestsException", + "error":{ + "code":"TooManyRequests", + "httpStatusCode":429, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InternalErrorException", + "error":{ + "code":"InternalError", + "httpStatusCode":500 + }, + "exception":true, + "fault":true + } + ] + }, + "RegisterDevice":{ + "name":"RegisterDevice", + "http":{ + "method":"POST", + "requestUri":"/identitypools/{IdentityPoolId}/identity/{IdentityId}/device", + "responseCode":200 + }, + "input":{"shape":"RegisterDeviceRequest"}, + "output":{"shape":"RegisterDeviceResponse"}, + "errors":[ + { + "shape":"NotAuthorizedException", + "error":{ + "code":"NotAuthorizedError", + "httpStatusCode":403, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidParameterException", + "error":{ + "code":"InvalidParameter", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{ + "code":"ResourceNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InternalErrorException", + "error":{ + "code":"InternalError", + "httpStatusCode":500 + }, + "exception":true, + "fault":true + }, + { + "shape":"InvalidConfigurationException", + "error":{ + "code":"InvalidConfiguration", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"TooManyRequestsException", + "error":{ + "code":"TooManyRequests", + "httpStatusCode":429, + "senderFault":true + }, + "exception":true + } + ] + }, + "SetCognitoEvents":{ + "name":"SetCognitoEvents", + "http":{ + "method":"POST", + "requestUri":"/identitypools/{IdentityPoolId}/events", + "responseCode":200 + }, + "input":{"shape":"SetCognitoEventsRequest"}, + "errors":[ + { + "shape":"InvalidParameterException", + "error":{ + "code":"InvalidParameter", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{ + "code":"ResourceNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + { + "shape":"NotAuthorizedException", + "error":{ + "code":"NotAuthorizedError", + "httpStatusCode":403, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InternalErrorException", + "error":{ + "code":"InternalError", + "httpStatusCode":500 + }, + "exception":true, + "fault":true + }, + { + "shape":"TooManyRequestsException", + "error":{ + "code":"TooManyRequests", + "httpStatusCode":429, + "senderFault":true + }, + "exception":true + } + ] + }, + "SetIdentityPoolConfiguration":{ + "name":"SetIdentityPoolConfiguration", + "http":{ + "method":"POST", + "requestUri":"/identitypools/{IdentityPoolId}/configuration", + "responseCode":200 + }, + "input":{"shape":"SetIdentityPoolConfigurationRequest"}, + "output":{"shape":"SetIdentityPoolConfigurationResponse"}, + "errors":[ + { + "shape":"NotAuthorizedException", + "error":{ + "code":"NotAuthorizedError", + "httpStatusCode":403, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidParameterException", + "error":{ + "code":"InvalidParameter", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{ + "code":"ResourceNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InternalErrorException", + "error":{ + "code":"InternalError", + "httpStatusCode":500 + }, + "exception":true, + "fault":true + }, + { + "shape":"TooManyRequestsException", + "error":{ + "code":"TooManyRequests", + "httpStatusCode":429, + "senderFault":true + }, + "exception":true + }, + { + "shape":"ConcurrentModificationException", + "error":{ + "code":"ConcurrentModification", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + } + ] + }, + "SubscribeToDataset":{ + "name":"SubscribeToDataset", + "http":{ + "method":"POST", + "requestUri":"/identitypools/{IdentityPoolId}/identities/{IdentityId}/datasets/{DatasetName}/subscriptions/{DeviceId}", + "responseCode":200 + }, + "input":{"shape":"SubscribeToDatasetRequest"}, + "output":{"shape":"SubscribeToDatasetResponse"}, + "errors":[ + { + "shape":"NotAuthorizedException", + "error":{ + "code":"NotAuthorizedError", + "httpStatusCode":403, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidParameterException", + "error":{ + "code":"InvalidParameter", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{ + "code":"ResourceNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InternalErrorException", + "error":{ + "code":"InternalError", + "httpStatusCode":500 + }, + "exception":true, + "fault":true + }, + { + "shape":"InvalidConfigurationException", + "error":{ + "code":"InvalidConfiguration", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"TooManyRequestsException", + "error":{ + "code":"TooManyRequests", + "httpStatusCode":429, + "senderFault":true + }, + "exception":true + } + ] + }, + "UnsubscribeFromDataset":{ + "name":"UnsubscribeFromDataset", + "http":{ + "method":"DELETE", + "requestUri":"/identitypools/{IdentityPoolId}/identities/{IdentityId}/datasets/{DatasetName}/subscriptions/{DeviceId}", + "responseCode":200 + }, + "input":{"shape":"UnsubscribeFromDatasetRequest"}, + "output":{"shape":"UnsubscribeFromDatasetResponse"}, + "errors":[ + { + "shape":"NotAuthorizedException", + "error":{ + "code":"NotAuthorizedError", + "httpStatusCode":403, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidParameterException", + "error":{ + "code":"InvalidParameter", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{ + "code":"ResourceNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InternalErrorException", + "error":{ + "code":"InternalError", + "httpStatusCode":500 + }, + "exception":true, + "fault":true + }, + { + "shape":"InvalidConfigurationException", + "error":{ + "code":"InvalidConfiguration", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"TooManyRequestsException", + "error":{ + "code":"TooManyRequests", + "httpStatusCode":429, + "senderFault":true + }, + "exception":true + } + ] + }, + "UpdateRecords":{ + "name":"UpdateRecords", + "http":{ + "method":"POST", + "requestUri":"/identitypools/{IdentityPoolId}/identities/{IdentityId}/datasets/{DatasetName}", + "responseCode":200 + }, + "input":{"shape":"UpdateRecordsRequest"}, + "output":{"shape":"UpdateRecordsResponse"}, + "errors":[ + { + "shape":"InvalidParameterException", + "error":{ + "code":"InvalidParameter", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"LimitExceededException", + "error":{ + "code":"LimitExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"NotAuthorizedException", + "error":{ + "code":"NotAuthorizedError", + "httpStatusCode":403, + "senderFault":true + }, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{ + "code":"ResourceNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + { + "shape":"ResourceConflictException", + "error":{ + "code":"ResourceConflict", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidLambdaFunctionOutputException", + "error":{ + "code":"InvalidLambdaFunctionOutput", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"LambdaThrottledException", + "error":{ + "code":"LambdaThrottled", + "httpStatusCode":429, + "senderFault":true + }, + "exception":true + }, + { + "shape":"TooManyRequestsException", + "error":{ + "code":"TooManyRequests", + "httpStatusCode":429, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InternalErrorException", + "error":{ + "code":"InternalError", + "httpStatusCode":500 + }, + "exception":true, + "fault":true + } + ] + } + }, + "shapes":{ + "AlreadyStreamedException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"ExceptionMessage"} + }, + "error":{ + "code":"AlreadyStreamed", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "ApplicationArn":{ + "type":"string", + "pattern":"arn:aws:sns:[-0-9a-z]+:\\d+:app/[A-Z_]+/[a-zA-Z0-9_.-]+" + }, + "ApplicationArnList":{ + "type":"list", + "member":{"shape":"ApplicationArn"} + }, + "AssumeRoleArn":{ + "type":"string", + "min":20, + "max":2048, + "pattern":"arn:aws:iam::\\d+:role/.*" + }, + "Boolean":{"type":"boolean"}, + "BulkPublishRequest":{ + "type":"structure", + "required":["IdentityPoolId"], + "members":{ + "IdentityPoolId":{ + "shape":"IdentityPoolId", + "location":"uri", + "locationName":"IdentityPoolId" + } + } + }, + "BulkPublishResponse":{ + "type":"structure", + "members":{ + "IdentityPoolId":{"shape":"IdentityPoolId"} + } + }, + "BulkPublishStatus":{ + "type":"string", + "enum":[ + "NOT_STARTED", + "IN_PROGRESS", + "FAILED", + "SUCCEEDED" + ] + }, + "ClientContext":{"type":"string"}, + "CognitoEventType":{"type":"string"}, + "CognitoStreams":{ + "type":"structure", + "members":{ + "StreamName":{"shape":"StreamName"}, + "RoleArn":{"shape":"AssumeRoleArn"}, + "StreamingStatus":{"shape":"StreamingStatus"} + } + }, + "ConcurrentModificationException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"String"} + }, + "error":{ + "code":"ConcurrentModification", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "Dataset":{ + "type":"structure", + "members":{ + "IdentityId":{"shape":"IdentityId"}, + "DatasetName":{"shape":"DatasetName"}, + "CreationDate":{"shape":"Date"}, + "LastModifiedDate":{"shape":"Date"}, + "LastModifiedBy":{"shape":"String"}, + "DataStorage":{"shape":"Long"}, + "NumRecords":{"shape":"Long"} + } + }, + "DatasetList":{ + "type":"list", + "member":{"shape":"Dataset"} + }, + "DatasetName":{ + "type":"string", + "min":1, + "max":128, + "pattern":"[a-zA-Z0-9_.:-]+" + }, + "Date":{"type":"timestamp"}, + "DeleteDatasetRequest":{ + "type":"structure", + "required":[ + "IdentityPoolId", + "IdentityId", + "DatasetName" + ], + "members":{ + "IdentityPoolId":{ + "shape":"IdentityPoolId", + "location":"uri", + "locationName":"IdentityPoolId" + }, + "IdentityId":{ + "shape":"IdentityId", + "location":"uri", + "locationName":"IdentityId" + }, + "DatasetName":{ + "shape":"DatasetName", + "location":"uri", + "locationName":"DatasetName" + } + } + }, + "DeleteDatasetResponse":{ + "type":"structure", + "members":{ + "Dataset":{"shape":"Dataset"} + } + }, + "DescribeDatasetRequest":{ + "type":"structure", + "required":[ + "IdentityPoolId", + "IdentityId", + "DatasetName" + ], + "members":{ + "IdentityPoolId":{ + "shape":"IdentityPoolId", + "location":"uri", + "locationName":"IdentityPoolId" + }, + "IdentityId":{ + "shape":"IdentityId", + "location":"uri", + "locationName":"IdentityId" + }, + "DatasetName":{ + "shape":"DatasetName", + "location":"uri", + "locationName":"DatasetName" + } + } + }, + "DescribeDatasetResponse":{ + "type":"structure", + "members":{ + "Dataset":{"shape":"Dataset"} + } + }, + "DescribeIdentityPoolUsageRequest":{ + "type":"structure", + "required":["IdentityPoolId"], + "members":{ + "IdentityPoolId":{ + "shape":"IdentityPoolId", + "location":"uri", + "locationName":"IdentityPoolId" + } + } + }, + "DescribeIdentityPoolUsageResponse":{ + "type":"structure", + "members":{ + "IdentityPoolUsage":{"shape":"IdentityPoolUsage"} + } + }, + "DescribeIdentityUsageRequest":{ + "type":"structure", + "required":[ + "IdentityPoolId", + "IdentityId" + ], + "members":{ + "IdentityPoolId":{ + "shape":"IdentityPoolId", + "location":"uri", + "locationName":"IdentityPoolId" + }, + "IdentityId":{ + "shape":"IdentityId", + "location":"uri", + "locationName":"IdentityId" + } + } + }, + "DescribeIdentityUsageResponse":{ + "type":"structure", + "members":{ + "IdentityUsage":{"shape":"IdentityUsage"} + } + }, + "DeviceId":{ + "type":"string", + "min":1, + "max":256 + }, + "DuplicateRequestException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"ExceptionMessage"} + }, + "error":{ + "code":"DuplicateRequest", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "Events":{ + "type":"map", + "key":{"shape":"CognitoEventType"}, + "value":{"shape":"LambdaFunctionArn"}, + "max":1 + }, + "ExceptionMessage":{"type":"string"}, + "GetBulkPublishDetailsRequest":{ + "type":"structure", + "required":["IdentityPoolId"], + "members":{ + "IdentityPoolId":{ + "shape":"IdentityPoolId", + "location":"uri", + "locationName":"IdentityPoolId" + } + } + }, + "GetBulkPublishDetailsResponse":{ + "type":"structure", + "members":{ + "IdentityPoolId":{"shape":"IdentityPoolId"}, + "BulkPublishStartTime":{"shape":"Date"}, + "BulkPublishCompleteTime":{"shape":"Date"}, + "BulkPublishStatus":{"shape":"BulkPublishStatus"}, + "FailureMessage":{"shape":"String"} + } + }, + "GetCognitoEventsRequest":{ + "type":"structure", + "required":["IdentityPoolId"], + "members":{ + "IdentityPoolId":{ + "shape":"IdentityPoolId", + "location":"uri", + "locationName":"IdentityPoolId" + } + } + }, + "GetCognitoEventsResponse":{ + "type":"structure", + "members":{ + "Events":{"shape":"Events"} + } + }, + "GetIdentityPoolConfigurationRequest":{ + "type":"structure", + "required":["IdentityPoolId"], + "members":{ + "IdentityPoolId":{ + "shape":"IdentityPoolId", + "location":"uri", + "locationName":"IdentityPoolId" + } + } + }, + "GetIdentityPoolConfigurationResponse":{ + "type":"structure", + "members":{ + "IdentityPoolId":{"shape":"IdentityPoolId"}, + "PushSync":{"shape":"PushSync"}, + "CognitoStreams":{"shape":"CognitoStreams"} + } + }, + "IdentityId":{ + "type":"string", + "min":1, + "max":55, + "pattern":"[\\w-]+:[0-9a-f-]+" + }, + "IdentityPoolId":{ + "type":"string", + "min":1, + "max":55, + "pattern":"[\\w-]+:[0-9a-f-]+" + }, + "IdentityPoolUsage":{ + "type":"structure", + "members":{ + "IdentityPoolId":{"shape":"IdentityPoolId"}, + "SyncSessionsCount":{"shape":"Long"}, + "DataStorage":{"shape":"Long"}, + "LastModifiedDate":{"shape":"Date"} + } + }, + "IdentityPoolUsageList":{ + "type":"list", + "member":{"shape":"IdentityPoolUsage"} + }, + "IdentityUsage":{ + "type":"structure", + "members":{ + "IdentityId":{"shape":"IdentityId"}, + "IdentityPoolId":{"shape":"IdentityPoolId"}, + "LastModifiedDate":{"shape":"Date"}, + "DatasetCount":{"shape":"Integer"}, + "DataStorage":{"shape":"Long"} + } + }, + "Integer":{"type":"integer"}, + "IntegerString":{"type":"integer"}, + "InternalErrorException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"ExceptionMessage"} + }, + "error":{ + "code":"InternalError", + "httpStatusCode":500 + }, + "exception":true, + "fault":true + }, + "InvalidConfigurationException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"ExceptionMessage"} + }, + "error":{ + "code":"InvalidConfiguration", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidLambdaFunctionOutputException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"ExceptionMessage"} + }, + "error":{ + "code":"InvalidLambdaFunctionOutput", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidParameterException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"ExceptionMessage"} + }, + "error":{ + "code":"InvalidParameter", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "LambdaFunctionArn":{"type":"string"}, + "LambdaThrottledException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"ExceptionMessage"} + }, + "error":{ + "code":"LambdaThrottled", + "httpStatusCode":429, + "senderFault":true + }, + "exception":true + }, + "LimitExceededException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"ExceptionMessage"} + }, + "error":{ + "code":"LimitExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "ListDatasetsRequest":{ + "type":"structure", + "required":[ + "IdentityId", + "IdentityPoolId" + ], + "members":{ + "IdentityPoolId":{ + "shape":"IdentityPoolId", + "location":"uri", + "locationName":"IdentityPoolId" + }, + "IdentityId":{ + "shape":"IdentityId", + "location":"uri", + "locationName":"IdentityId" + }, + "NextToken":{ + "shape":"String", + "location":"querystring", + "locationName":"nextToken" + }, + "MaxResults":{ + "shape":"IntegerString", + "location":"querystring", + "locationName":"maxResults" + } + } + }, + "ListDatasetsResponse":{ + "type":"structure", + "members":{ + "Datasets":{"shape":"DatasetList"}, + "Count":{"shape":"Integer"}, + "NextToken":{"shape":"String"} + } + }, + "ListIdentityPoolUsageRequest":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"String", + "location":"querystring", + "locationName":"nextToken" + }, + "MaxResults":{ + "shape":"IntegerString", + "location":"querystring", + "locationName":"maxResults" + } + } + }, + "ListIdentityPoolUsageResponse":{ + "type":"structure", + "members":{ + "IdentityPoolUsages":{"shape":"IdentityPoolUsageList"}, + "MaxResults":{"shape":"Integer"}, + "Count":{"shape":"Integer"}, + "NextToken":{"shape":"String"} + } + }, + "ListRecordsRequest":{ + "type":"structure", + "required":[ + "IdentityPoolId", + "IdentityId", + "DatasetName" + ], + "members":{ + "IdentityPoolId":{ + "shape":"IdentityPoolId", + "location":"uri", + "locationName":"IdentityPoolId" + }, + "IdentityId":{ + "shape":"IdentityId", + "location":"uri", + "locationName":"IdentityId" + }, + "DatasetName":{ + "shape":"DatasetName", + "location":"uri", + "locationName":"DatasetName" + }, + "LastSyncCount":{ + "shape":"Long", + "location":"querystring", + "locationName":"lastSyncCount" + }, + "NextToken":{ + "shape":"String", + "location":"querystring", + "locationName":"nextToken" + }, + "MaxResults":{ + "shape":"IntegerString", + "location":"querystring", + "locationName":"maxResults" + }, + "SyncSessionToken":{ + "shape":"SyncSessionToken", + "location":"querystring", + "locationName":"syncSessionToken" + } + } + }, + "ListRecordsResponse":{ + "type":"structure", + "members":{ + "Records":{"shape":"RecordList"}, + "NextToken":{"shape":"String"}, + "Count":{"shape":"Integer"}, + "DatasetSyncCount":{"shape":"Long"}, + "LastModifiedBy":{"shape":"String"}, + "MergedDatasetNames":{"shape":"MergedDatasetNameList"}, + "DatasetExists":{"shape":"Boolean"}, + "DatasetDeletedAfterRequestedSyncCount":{"shape":"Boolean"}, + "SyncSessionToken":{"shape":"String"} + } + }, + "Long":{"type":"long"}, + "MergedDatasetNameList":{ + "type":"list", + "member":{"shape":"String"} + }, + "NotAuthorizedException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"ExceptionMessage"} + }, + "error":{ + "code":"NotAuthorizedError", + "httpStatusCode":403, + "senderFault":true + }, + "exception":true + }, + "Operation":{ + "type":"string", + "enum":[ + "replace", + "remove" + ] + }, + "Platform":{ + "type":"string", + "enum":[ + "APNS", + "APNS_SANDBOX", + "GCM", + "ADM" + ] + }, + "PushSync":{ + "type":"structure", + "members":{ + "ApplicationArns":{"shape":"ApplicationArnList"}, + "RoleArn":{"shape":"AssumeRoleArn"} + } + }, + "PushToken":{"type":"string"}, + "Record":{ + "type":"structure", + "members":{ + "Key":{"shape":"RecordKey"}, + "Value":{"shape":"RecordValue"}, + "SyncCount":{"shape":"Long"}, + "LastModifiedDate":{"shape":"Date"}, + "LastModifiedBy":{"shape":"String"}, + "DeviceLastModifiedDate":{"shape":"Date"} + } + }, + "RecordKey":{ + "type":"string", + "min":1, + "max":1024 + }, + "RecordList":{ + "type":"list", + "member":{"shape":"Record"} + }, + "RecordPatch":{ + "type":"structure", + "required":[ + "Op", + "Key", + "SyncCount" + ], + "members":{ + "Op":{"shape":"Operation"}, + "Key":{"shape":"RecordKey"}, + "Value":{"shape":"RecordValue"}, + "SyncCount":{"shape":"Long"}, + "DeviceLastModifiedDate":{"shape":"Date"} + } + }, + "RecordPatchList":{ + "type":"list", + "member":{"shape":"RecordPatch"} + }, + "RecordValue":{ + "type":"string", + "max":1048575 + }, + "RegisterDeviceRequest":{ + "type":"structure", + "required":[ + "IdentityPoolId", + "IdentityId", + "Platform", + "Token" + ], + "members":{ + "IdentityPoolId":{ + "shape":"IdentityPoolId", + "location":"uri", + "locationName":"IdentityPoolId" + }, + "IdentityId":{ + "shape":"IdentityId", + "location":"uri", + "locationName":"IdentityId" + }, + "Platform":{"shape":"Platform"}, + "Token":{"shape":"PushToken"} + } + }, + "RegisterDeviceResponse":{ + "type":"structure", + "members":{ + "DeviceId":{"shape":"DeviceId"} + } + }, + "ResourceConflictException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"ExceptionMessage"} + }, + "error":{ + "code":"ResourceConflict", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + }, + "ResourceNotFoundException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"ExceptionMessage"} + }, + "error":{ + "code":"ResourceNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "SetCognitoEventsRequest":{ + "type":"structure", + "required":[ + "IdentityPoolId", + "Events" + ], + "members":{ + "IdentityPoolId":{ + "shape":"IdentityPoolId", + "location":"uri", + "locationName":"IdentityPoolId" + }, + "Events":{"shape":"Events"} + } + }, + "SetIdentityPoolConfigurationRequest":{ + "type":"structure", + "required":["IdentityPoolId"], + "members":{ + "IdentityPoolId":{ + "shape":"IdentityPoolId", + "location":"uri", + "locationName":"IdentityPoolId" + }, + "PushSync":{"shape":"PushSync"}, + "CognitoStreams":{"shape":"CognitoStreams"} + } + }, + "SetIdentityPoolConfigurationResponse":{ + "type":"structure", + "members":{ + "IdentityPoolId":{"shape":"IdentityPoolId"}, + "PushSync":{"shape":"PushSync"}, + "CognitoStreams":{"shape":"CognitoStreams"} + } + }, + "StreamName":{ + "type":"string", + "min":1, + "max":128 + }, + "StreamingStatus":{ + "type":"string", + "enum":[ + "ENABLED", + "DISABLED" + ] + }, + "String":{"type":"string"}, + "SubscribeToDatasetRequest":{ + "type":"structure", + "required":[ + "IdentityPoolId", + "IdentityId", + "DatasetName", + "DeviceId" + ], + "members":{ + "IdentityPoolId":{ + "shape":"IdentityPoolId", + "location":"uri", + "locationName":"IdentityPoolId" + }, + "IdentityId":{ + "shape":"IdentityId", + "location":"uri", + "locationName":"IdentityId" + }, + "DatasetName":{ + "shape":"DatasetName", + "location":"uri", + "locationName":"DatasetName" + }, + "DeviceId":{ + "shape":"DeviceId", + "location":"uri", + "locationName":"DeviceId" + } + } + }, + "SubscribeToDatasetResponse":{ + "type":"structure", + "members":{ + } + }, + "SyncSessionToken":{"type":"string"}, + "TooManyRequestsException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"ExceptionMessage"} + }, + "error":{ + "code":"TooManyRequests", + "httpStatusCode":429, + "senderFault":true + }, + "exception":true + }, + "UnsubscribeFromDatasetRequest":{ + "type":"structure", + "required":[ + "IdentityPoolId", + "IdentityId", + "DatasetName", + "DeviceId" + ], + "members":{ + "IdentityPoolId":{ + "shape":"IdentityPoolId", + "location":"uri", + "locationName":"IdentityPoolId" + }, + "IdentityId":{ + "shape":"IdentityId", + "location":"uri", + "locationName":"IdentityId" + }, + "DatasetName":{ + "shape":"DatasetName", + "location":"uri", + "locationName":"DatasetName" + }, + "DeviceId":{ + "shape":"DeviceId", + "location":"uri", + "locationName":"DeviceId" + } + } + }, + "UnsubscribeFromDatasetResponse":{ + "type":"structure", + "members":{ + } + }, + "UpdateRecordsRequest":{ + "type":"structure", + "required":[ + "IdentityPoolId", + "IdentityId", + "DatasetName", + "SyncSessionToken" + ], + "members":{ + "IdentityPoolId":{ + "shape":"IdentityPoolId", + "location":"uri", + "locationName":"IdentityPoolId" + }, + "IdentityId":{ + "shape":"IdentityId", + "location":"uri", + "locationName":"IdentityId" + }, + "DatasetName":{ + "shape":"DatasetName", + "location":"uri", + "locationName":"DatasetName" + }, + "DeviceId":{"shape":"DeviceId"}, + "RecordPatches":{"shape":"RecordPatchList"}, + "SyncSessionToken":{"shape":"SyncSessionToken"}, + "ClientContext":{ + "shape":"ClientContext", + "location":"header", + "locationName":"x-amz-Client-Context" + } + } + }, + "UpdateRecordsResponse":{ + "type":"structure", + "members":{ + "Records":{"shape":"RecordList"} + } + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/cognito-sync/2014-06-30/docs-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/cognito-sync/2014-06-30/docs-2.json new file mode 100644 index 000000000..243b7973d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/cognito-sync/2014-06-30/docs-2.json @@ -0,0 +1,588 @@ +{ + "version": "2.0", + "operations": { + "BulkPublish": "

    Initiates a bulk publish of all existing datasets for an Identity Pool to the configured stream. Customers are limited to one successful bulk publish per 24 hours. Bulk publish is an asynchronous request, customers can see the status of the request via the GetBulkPublishDetails operation.

    This API can only be called with developer credentials. You cannot call this API with the temporary user credentials provided by Cognito Identity.

    ", + "DeleteDataset": "

    Deletes the specific dataset. The dataset will be deleted permanently, and the action can't be undone. Datasets that this dataset was merged with will no longer report the merge. Any subsequent operation on this dataset will result in a ResourceNotFoundException.

    This API can be called with temporary user credentials provided by Cognito Identity or with developer credentials.

    ", + "DescribeDataset": "

    Gets meta data about a dataset by identity and dataset name. With Amazon Cognito Sync, each identity has access only to its own data. Thus, the credentials used to make this API call need to have access to the identity data.

    This API can be called with temporary user credentials provided by Cognito Identity or with developer credentials. You should use Cognito Identity credentials to make this API call.

    ", + "DescribeIdentityPoolUsage": "

    Gets usage details (for example, data storage) about a particular identity pool.

    This API can only be called with developer credentials. You cannot call this API with the temporary user credentials provided by Cognito Identity.

    ", + "DescribeIdentityUsage": "

    Gets usage information for an identity, including number of datasets and data usage.

    This API can be called with temporary user credentials provided by Cognito Identity or with developer credentials.

    ", + "GetBulkPublishDetails": "

    Get the status of the last BulkPublish operation for an identity pool.

    This API can only be called with developer credentials. You cannot call this API with the temporary user credentials provided by Cognito Identity.

    ", + "GetCognitoEvents": "

    Gets the events and the corresponding Lambda functions associated with an identity pool.

    This API can only be called with developer credentials. You cannot call this API with the temporary user credentials provided by Cognito Identity.

    ", + "GetIdentityPoolConfiguration": "

    Gets the configuration settings of an identity pool.

    This API can only be called with developer credentials. You cannot call this API with the temporary user credentials provided by Cognito Identity.

    ", + "ListDatasets": "

    Lists datasets for an identity. With Amazon Cognito Sync, each identity has access only to its own data. Thus, the credentials used to make this API call need to have access to the identity data.

    ListDatasets can be called with temporary user credentials provided by Cognito Identity or with developer credentials. You should use the Cognito Identity credentials to make this API call.

    ", + "ListIdentityPoolUsage": "

    Gets a list of identity pools registered with Cognito.

    ListIdentityPoolUsage can only be called with developer credentials. You cannot make this API call with the temporary user credentials provided by Cognito Identity.

    ", + "ListRecords": "

    Gets paginated records, optionally changed after a particular sync count for a dataset and identity. With Amazon Cognito Sync, each identity has access only to its own data. Thus, the credentials used to make this API call need to have access to the identity data.

    ListRecords can be called with temporary user credentials provided by Cognito Identity or with developer credentials. You should use Cognito Identity credentials to make this API call.

    ", + "RegisterDevice": "

    Registers a device to receive push sync notifications.

    This API can only be called with temporary credentials provided by Cognito Identity. You cannot call this API with developer credentials.

    ", + "SetCognitoEvents": "

    Sets the AWS Lambda function for a given event type for an identity pool. This request only updates the key/value pair specified. Other key/values pairs are not updated. To remove a key value pair, pass a empty value for the particular key.

    This API can only be called with developer credentials. You cannot call this API with the temporary user credentials provided by Cognito Identity.

    ", + "SetIdentityPoolConfiguration": "

    Sets the necessary configuration for push sync.

    This API can only be called with developer credentials. You cannot call this API with the temporary user credentials provided by Cognito Identity.

    ", + "SubscribeToDataset": "

    Subscribes to receive notifications when a dataset is modified by another device.

    This API can only be called with temporary credentials provided by Cognito Identity. You cannot call this API with developer credentials.

    ", + "UnsubscribeFromDataset": "

    Unsubscribes from receiving notifications when a dataset is modified by another device.

    This API can only be called with temporary credentials provided by Cognito Identity. You cannot call this API with developer credentials.

    ", + "UpdateRecords": "

    Posts updates to records and adds and deletes records for a dataset and user.

    The sync count in the record patch is your last known sync count for that record. The server will reject an UpdateRecords request with a ResourceConflictException if you try to patch a record with a new value but a stale sync count.

    For example, if the sync count on the server is 5 for a key called highScore and you try and submit a new highScore with sync count of 4, the request will be rejected. To obtain the current sync count for a record, call ListRecords. On a successful update of the record, the response returns the new sync count for that record. You should present that sync count the next time you try to update that same record. When the record does not exist, specify the sync count as 0.

    This API can be called with temporary user credentials provided by Cognito Identity or with developer credentials.

    " + }, + "service": "Amazon Cognito Sync

    Amazon Cognito Sync provides an AWS service and client library that enable cross-device syncing of application-related user data. High-level client libraries are available for both iOS and Android. You can use these libraries to persist data locally so that it's available even if the device is offline. Developer credentials don't need to be stored on the mobile device to access the service. You can use Amazon Cognito to obtain a normalized user ID and credentials. User data is persisted in a dataset that can store up to 1 MB of key-value pairs, and you can have up to 20 datasets per user identity.

    With Amazon Cognito Sync, the data stored for each identity is accessible only to credentials assigned to that identity. In order to use the Cognito Sync service, you need to make API calls using credentials retrieved with Amazon Cognito Identity service.

    If you want to use Cognito Sync in an Android or iOS application, you will probably want to make API calls via the AWS Mobile SDK. To learn more, see the Developer Guide for Android and the Developer Guide for iOS.

    ", + "shapes": { + "AlreadyStreamedException": { + "base": "An exception thrown when a bulk publish operation is requested less than 24 hours after a previous bulk publish operation completed successfully.", + "refs": { + } + }, + "ApplicationArn": { + "base": null, + "refs": { + "ApplicationArnList$member": null + } + }, + "ApplicationArnList": { + "base": null, + "refs": { + "PushSync$ApplicationArns": "

    List of SNS platform application ARNs that could be used by clients.

    " + } + }, + "AssumeRoleArn": { + "base": null, + "refs": { + "CognitoStreams$RoleArn": "The ARN of the role Amazon Cognito can assume in order to publish to the stream. This role must grant access to Amazon Cognito (cognito-sync) to invoke PutRecord on your Cognito stream.", + "PushSync$RoleArn": "

    A role configured to allow Cognito to call SNS on behalf of the developer.

    " + } + }, + "Boolean": { + "base": null, + "refs": { + "ListRecordsResponse$DatasetExists": "Indicates whether the dataset exists.", + "ListRecordsResponse$DatasetDeletedAfterRequestedSyncCount": "A boolean value specifying whether to delete the dataset locally." + } + }, + "BulkPublishRequest": { + "base": "The input for the BulkPublish operation.", + "refs": { + } + }, + "BulkPublishResponse": { + "base": "The output for the BulkPublish operation.", + "refs": { + } + }, + "BulkPublishStatus": { + "base": null, + "refs": { + "GetBulkPublishDetailsResponse$BulkPublishStatus": "Status of the last bulk publish operation, valid values are:

    NOT_STARTED - No bulk publish has been requested for this identity pool

    IN_PROGRESS - Data is being published to the configured stream

    SUCCEEDED - All data for the identity pool has been published to the configured stream

    FAILED - Some portion of the data has failed to publish, check FailureMessage for the cause.

    " + } + }, + "ClientContext": { + "base": null, + "refs": { + "UpdateRecordsRequest$ClientContext": "Intended to supply a device ID that will populate the lastModifiedBy field referenced in other methods. The ClientContext field is not yet implemented." + } + }, + "CognitoEventType": { + "base": null, + "refs": { + "Events$key": null + } + }, + "CognitoStreams": { + "base": "Configuration options for configure Cognito streams.", + "refs": { + "GetIdentityPoolConfigurationResponse$CognitoStreams": "Options to apply to this identity pool for Amazon Cognito streams.", + "SetIdentityPoolConfigurationRequest$CognitoStreams": "Options to apply to this identity pool for Amazon Cognito streams.", + "SetIdentityPoolConfigurationResponse$CognitoStreams": "Options to apply to this identity pool for Amazon Cognito streams." + } + }, + "ConcurrentModificationException": { + "base": "

    Thrown if there are parallel requests to modify a resource.

    ", + "refs": { + } + }, + "Dataset": { + "base": "A collection of data for an identity pool. An identity pool can have multiple datasets. A dataset is per identity and can be general or associated with a particular entity in an application (like a saved game). Datasets are automatically created if they don't exist. Data is synced by dataset, and a dataset can hold up to 1MB of key-value pairs.", + "refs": { + "DatasetList$member": null, + "DeleteDatasetResponse$Dataset": "A collection of data for an identity pool. An identity pool can have multiple datasets. A dataset is per identity and can be general or associated with a particular entity in an application (like a saved game). Datasets are automatically created if they don't exist. Data is synced by dataset, and a dataset can hold up to 1MB of key-value pairs.", + "DescribeDatasetResponse$Dataset": "Meta data for a collection of data for an identity. An identity can have multiple datasets. A dataset can be general or associated with a particular entity in an application (like a saved game). Datasets are automatically created if they don't exist. Data is synced by dataset, and a dataset can hold up to 1MB of key-value pairs." + } + }, + "DatasetList": { + "base": null, + "refs": { + "ListDatasetsResponse$Datasets": "A set of datasets." + } + }, + "DatasetName": { + "base": null, + "refs": { + "Dataset$DatasetName": "A string of up to 128 characters. Allowed characters are a-z, A-Z, 0-9, '_' (underscore), '-' (dash), and '.' (dot).", + "DeleteDatasetRequest$DatasetName": "A string of up to 128 characters. Allowed characters are a-z, A-Z, 0-9, '_' (underscore), '-' (dash), and '.' (dot).", + "DescribeDatasetRequest$DatasetName": "A string of up to 128 characters. Allowed characters are a-z, A-Z, 0-9, '_' (underscore), '-' (dash), and '.' (dot).", + "ListRecordsRequest$DatasetName": "A string of up to 128 characters. Allowed characters are a-z, A-Z, 0-9, '_' (underscore), '-' (dash), and '.' (dot).", + "SubscribeToDatasetRequest$DatasetName": "

    The name of the dataset to subcribe to.

    ", + "UnsubscribeFromDatasetRequest$DatasetName": "

    The name of the dataset from which to unsubcribe.

    ", + "UpdateRecordsRequest$DatasetName": "A string of up to 128 characters. Allowed characters are a-z, A-Z, 0-9, '_' (underscore), '-' (dash), and '.' (dot)." + } + }, + "Date": { + "base": null, + "refs": { + "Dataset$CreationDate": "Date on which the dataset was created.", + "Dataset$LastModifiedDate": "Date when the dataset was last modified.", + "GetBulkPublishDetailsResponse$BulkPublishStartTime": "The date/time at which the last bulk publish was initiated.", + "GetBulkPublishDetailsResponse$BulkPublishCompleteTime": "If BulkPublishStatus is SUCCEEDED, the time the last bulk publish operation completed.", + "IdentityPoolUsage$LastModifiedDate": "Date on which the identity pool was last modified.", + "IdentityUsage$LastModifiedDate": "Date on which the identity was last modified.", + "Record$LastModifiedDate": "The date on which the record was last modified.", + "Record$DeviceLastModifiedDate": "The last modified date of the client device.", + "RecordPatch$DeviceLastModifiedDate": "The last modified date of the client device." + } + }, + "DeleteDatasetRequest": { + "base": "A request to delete the specific dataset.", + "refs": { + } + }, + "DeleteDatasetResponse": { + "base": "Response to a successful DeleteDataset request.", + "refs": { + } + }, + "DescribeDatasetRequest": { + "base": "A request for meta data about a dataset (creation date, number of records, size) by owner and dataset name.", + "refs": { + } + }, + "DescribeDatasetResponse": { + "base": "Response to a successful DescribeDataset request.", + "refs": { + } + }, + "DescribeIdentityPoolUsageRequest": { + "base": "A request for usage information about the identity pool.", + "refs": { + } + }, + "DescribeIdentityPoolUsageResponse": { + "base": "Response to a successful DescribeIdentityPoolUsage request.", + "refs": { + } + }, + "DescribeIdentityUsageRequest": { + "base": "A request for information about the usage of an identity pool.", + "refs": { + } + }, + "DescribeIdentityUsageResponse": { + "base": "The response to a successful DescribeIdentityUsage request.", + "refs": { + } + }, + "DeviceId": { + "base": null, + "refs": { + "RegisterDeviceResponse$DeviceId": "

    The unique ID generated for this device by Cognito.

    ", + "SubscribeToDatasetRequest$DeviceId": "

    The unique ID generated for this device by Cognito.

    ", + "UnsubscribeFromDatasetRequest$DeviceId": "

    The unique ID generated for this device by Cognito.

    ", + "UpdateRecordsRequest$DeviceId": "

    The unique ID generated for this device by Cognito.

    " + } + }, + "DuplicateRequestException": { + "base": "An exception thrown when there is an IN_PROGRESS bulk publish operation for the given identity pool.", + "refs": { + } + }, + "Events": { + "base": null, + "refs": { + "GetCognitoEventsResponse$Events": "

    The Cognito Events returned from the GetCognitoEvents request

    ", + "SetCognitoEventsRequest$Events": "

    The events to configure

    " + } + }, + "ExceptionMessage": { + "base": null, + "refs": { + "AlreadyStreamedException$message": "The message associated with the AlreadyStreamedException exception.", + "DuplicateRequestException$message": "The message associated with the DuplicateRequestException exception.", + "InternalErrorException$message": "Message returned by InternalErrorException.", + "InvalidConfigurationException$message": "Message returned by InvalidConfigurationException.", + "InvalidLambdaFunctionOutputException$message": "

    A message returned when an InvalidLambdaFunctionOutputException occurs

    ", + "InvalidParameterException$message": "Message returned by InvalidParameterException.", + "LambdaThrottledException$message": "

    A message returned when an LambdaThrottledException is thrown

    ", + "LimitExceededException$message": "Message returned by LimitExceededException.", + "NotAuthorizedException$message": "The message returned by a NotAuthorizedException.", + "ResourceConflictException$message": "The message returned by a ResourceConflictException.", + "ResourceNotFoundException$message": "Message returned by a ResourceNotFoundException.", + "TooManyRequestsException$message": "Message returned by a TooManyRequestsException." + } + }, + "GetBulkPublishDetailsRequest": { + "base": "The input for the GetBulkPublishDetails operation.", + "refs": { + } + }, + "GetBulkPublishDetailsResponse": { + "base": "The output for the GetBulkPublishDetails operation.", + "refs": { + } + }, + "GetCognitoEventsRequest": { + "base": "

    A request for a list of the configured Cognito Events

    ", + "refs": { + } + }, + "GetCognitoEventsResponse": { + "base": "

    The response from the GetCognitoEvents request

    ", + "refs": { + } + }, + "GetIdentityPoolConfigurationRequest": { + "base": "

    The input for the GetIdentityPoolConfiguration operation.

    ", + "refs": { + } + }, + "GetIdentityPoolConfigurationResponse": { + "base": "

    The output for the GetIdentityPoolConfiguration operation.

    ", + "refs": { + } + }, + "IdentityId": { + "base": null, + "refs": { + "Dataset$IdentityId": "A name-spaced GUID (for example, us-east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) created by Amazon Cognito. GUID generation is unique within a region.", + "DeleteDatasetRequest$IdentityId": "A name-spaced GUID (for example, us-east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) created by Amazon Cognito. GUID generation is unique within a region.", + "DescribeDatasetRequest$IdentityId": "A name-spaced GUID (for example, us-east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) created by Amazon Cognito. GUID generation is unique within a region.", + "DescribeIdentityUsageRequest$IdentityId": "A name-spaced GUID (for example, us-east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) created by Amazon Cognito. GUID generation is unique within a region.", + "IdentityUsage$IdentityId": "A name-spaced GUID (for example, us-east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) created by Amazon Cognito. GUID generation is unique within a region.", + "ListDatasetsRequest$IdentityId": "A name-spaced GUID (for example, us-east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) created by Amazon Cognito. GUID generation is unique within a region.", + "ListRecordsRequest$IdentityId": "A name-spaced GUID (for example, us-east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) created by Amazon Cognito. GUID generation is unique within a region.", + "RegisterDeviceRequest$IdentityId": "

    The unique ID for this identity.

    ", + "SubscribeToDatasetRequest$IdentityId": "

    Unique ID for this identity.

    ", + "UnsubscribeFromDatasetRequest$IdentityId": "

    Unique ID for this identity.

    ", + "UpdateRecordsRequest$IdentityId": "A name-spaced GUID (for example, us-east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) created by Amazon Cognito. GUID generation is unique within a region." + } + }, + "IdentityPoolId": { + "base": null, + "refs": { + "BulkPublishRequest$IdentityPoolId": "A name-spaced GUID (for example, us-east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) created by Amazon Cognito. GUID generation is unique within a region.", + "BulkPublishResponse$IdentityPoolId": "A name-spaced GUID (for example, us-east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) created by Amazon Cognito. GUID generation is unique within a region.", + "DeleteDatasetRequest$IdentityPoolId": "A name-spaced GUID (for example, us-east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) created by Amazon Cognito. GUID generation is unique within a region.", + "DescribeDatasetRequest$IdentityPoolId": "A name-spaced GUID (for example, us-east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) created by Amazon Cognito. GUID generation is unique within a region.", + "DescribeIdentityPoolUsageRequest$IdentityPoolId": "A name-spaced GUID (for example, us-east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) created by Amazon Cognito. GUID generation is unique within a region.", + "DescribeIdentityUsageRequest$IdentityPoolId": "A name-spaced GUID (for example, us-east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) created by Amazon Cognito. GUID generation is unique within a region.", + "GetBulkPublishDetailsRequest$IdentityPoolId": "A name-spaced GUID (for example, us-east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) created by Amazon Cognito. GUID generation is unique within a region.", + "GetBulkPublishDetailsResponse$IdentityPoolId": "A name-spaced GUID (for example, us-east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) created by Amazon Cognito. GUID generation is unique within a region.", + "GetCognitoEventsRequest$IdentityPoolId": "

    The Cognito Identity Pool ID for the request

    ", + "GetIdentityPoolConfigurationRequest$IdentityPoolId": "

    A name-spaced GUID (for example, us-east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) created by Amazon Cognito. This is the ID of the pool for which to return a configuration.

    ", + "GetIdentityPoolConfigurationResponse$IdentityPoolId": "

    A name-spaced GUID (for example, us-east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) created by Amazon Cognito.

    ", + "IdentityPoolUsage$IdentityPoolId": "A name-spaced GUID (for example, us-east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) created by Amazon Cognito. GUID generation is unique within a region.", + "IdentityUsage$IdentityPoolId": "A name-spaced GUID (for example, us-east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) created by Amazon Cognito. GUID generation is unique within a region.", + "ListDatasetsRequest$IdentityPoolId": "A name-spaced GUID (for example, us-east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) created by Amazon Cognito. GUID generation is unique within a region.", + "ListRecordsRequest$IdentityPoolId": "A name-spaced GUID (for example, us-east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) created by Amazon Cognito. GUID generation is unique within a region.", + "RegisterDeviceRequest$IdentityPoolId": "

    A name-spaced GUID (for example, us-east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) created by Amazon Cognito. Here, the ID of the pool that the identity belongs to.

    ", + "SetCognitoEventsRequest$IdentityPoolId": "

    The Cognito Identity Pool to use when configuring Cognito Events

    ", + "SetIdentityPoolConfigurationRequest$IdentityPoolId": "

    A name-spaced GUID (for example, us-east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) created by Amazon Cognito. This is the ID of the pool to modify.

    ", + "SetIdentityPoolConfigurationResponse$IdentityPoolId": "

    A name-spaced GUID (for example, us-east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) created by Amazon Cognito.

    ", + "SubscribeToDatasetRequest$IdentityPoolId": "

    A name-spaced GUID (for example, us-east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) created by Amazon Cognito. The ID of the pool to which the identity belongs.

    ", + "UnsubscribeFromDatasetRequest$IdentityPoolId": "

    A name-spaced GUID (for example, us-east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) created by Amazon Cognito. The ID of the pool to which this identity belongs.

    ", + "UpdateRecordsRequest$IdentityPoolId": "A name-spaced GUID (for example, us-east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) created by Amazon Cognito. GUID generation is unique within a region." + } + }, + "IdentityPoolUsage": { + "base": "Usage information for the identity pool.", + "refs": { + "DescribeIdentityPoolUsageResponse$IdentityPoolUsage": "Information about the usage of the identity pool.", + "IdentityPoolUsageList$member": null + } + }, + "IdentityPoolUsageList": { + "base": null, + "refs": { + "ListIdentityPoolUsageResponse$IdentityPoolUsages": "Usage information for the identity pools." + } + }, + "IdentityUsage": { + "base": "Usage information for the identity.", + "refs": { + "DescribeIdentityUsageResponse$IdentityUsage": "Usage information for the identity." + } + }, + "Integer": { + "base": null, + "refs": { + "IdentityUsage$DatasetCount": "Number of datasets for the identity.", + "ListDatasetsResponse$Count": "Number of datasets returned.", + "ListIdentityPoolUsageResponse$MaxResults": "The maximum number of results to be returned.", + "ListIdentityPoolUsageResponse$Count": "Total number of identities for the identity pool.", + "ListRecordsResponse$Count": "Total number of records." + } + }, + "IntegerString": { + "base": null, + "refs": { + "ListDatasetsRequest$MaxResults": "The maximum number of results to be returned.", + "ListIdentityPoolUsageRequest$MaxResults": "The maximum number of results to be returned.", + "ListRecordsRequest$MaxResults": "The maximum number of results to be returned." + } + }, + "InternalErrorException": { + "base": "Indicates an internal service error.", + "refs": { + } + }, + "InvalidConfigurationException": { + "base": null, + "refs": { + } + }, + "InvalidLambdaFunctionOutputException": { + "base": "

    The AWS Lambda function returned invalid output or an exception.

    ", + "refs": { + } + }, + "InvalidParameterException": { + "base": "Thrown when a request parameter does not comply with the associated constraints.", + "refs": { + } + }, + "LambdaFunctionArn": { + "base": null, + "refs": { + "Events$value": null + } + }, + "LambdaThrottledException": { + "base": "

    AWS Lambda throttled your account, please contact AWS Support

    ", + "refs": { + } + }, + "LimitExceededException": { + "base": "Thrown when the limit on the number of objects or operations has been exceeded.", + "refs": { + } + }, + "ListDatasetsRequest": { + "base": "Request for a list of datasets for an identity.", + "refs": { + } + }, + "ListDatasetsResponse": { + "base": "Returned for a successful ListDatasets request.", + "refs": { + } + }, + "ListIdentityPoolUsageRequest": { + "base": "A request for usage information on an identity pool.", + "refs": { + } + }, + "ListIdentityPoolUsageResponse": { + "base": "Returned for a successful ListIdentityPoolUsage request.", + "refs": { + } + }, + "ListRecordsRequest": { + "base": "A request for a list of records.", + "refs": { + } + }, + "ListRecordsResponse": { + "base": "Returned for a successful ListRecordsRequest.", + "refs": { + } + }, + "Long": { + "base": null, + "refs": { + "Dataset$DataStorage": "Total size in bytes of the records in this dataset.", + "Dataset$NumRecords": "Number of records in this dataset.", + "IdentityPoolUsage$SyncSessionsCount": "Number of sync sessions for the identity pool.", + "IdentityPoolUsage$DataStorage": "Data storage information for the identity pool.", + "IdentityUsage$DataStorage": "Total data storage for this identity.", + "ListRecordsRequest$LastSyncCount": "The last server sync count for this record.", + "ListRecordsResponse$DatasetSyncCount": "Server sync count for this dataset.", + "Record$SyncCount": "The server sync count for this record.", + "RecordPatch$SyncCount": "Last known server sync count for this record. Set to 0 if unknown." + } + }, + "MergedDatasetNameList": { + "base": null, + "refs": { + "ListRecordsResponse$MergedDatasetNames": "Names of merged datasets." + } + }, + "NotAuthorizedException": { + "base": "Thrown when a user is not authorized to access the requested resource.", + "refs": { + } + }, + "Operation": { + "base": null, + "refs": { + "RecordPatch$Op": "An operation, either replace or remove." + } + }, + "Platform": { + "base": null, + "refs": { + "RegisterDeviceRequest$Platform": "

    The SNS platform type (e.g. GCM, SDM, APNS, APNS_SANDBOX).

    " + } + }, + "PushSync": { + "base": "

    Configuration options to be applied to the identity pool.

    ", + "refs": { + "GetIdentityPoolConfigurationResponse$PushSync": "

    Options to apply to this identity pool for push synchronization.

    ", + "SetIdentityPoolConfigurationRequest$PushSync": "

    Options to apply to this identity pool for push synchronization.

    ", + "SetIdentityPoolConfigurationResponse$PushSync": "

    Options to apply to this identity pool for push synchronization.

    " + } + }, + "PushToken": { + "base": null, + "refs": { + "RegisterDeviceRequest$Token": "

    The push token.

    " + } + }, + "Record": { + "base": "The basic data structure of a dataset.", + "refs": { + "RecordList$member": null + } + }, + "RecordKey": { + "base": null, + "refs": { + "Record$Key": "The key for the record.", + "RecordPatch$Key": "The key associated with the record patch." + } + }, + "RecordList": { + "base": null, + "refs": { + "ListRecordsResponse$Records": "A list of all records.", + "UpdateRecordsResponse$Records": "A list of records that have been updated." + } + }, + "RecordPatch": { + "base": "An update operation for a record.", + "refs": { + "RecordPatchList$member": null + } + }, + "RecordPatchList": { + "base": null, + "refs": { + "UpdateRecordsRequest$RecordPatches": "A list of patch operations." + } + }, + "RecordValue": { + "base": null, + "refs": { + "Record$Value": "The value for the record.", + "RecordPatch$Value": "The value associated with the record patch." + } + }, + "RegisterDeviceRequest": { + "base": "

    A request to RegisterDevice.

    ", + "refs": { + } + }, + "RegisterDeviceResponse": { + "base": "

    Response to a RegisterDevice request.

    ", + "refs": { + } + }, + "ResourceConflictException": { + "base": "Thrown if an update can't be applied because the resource was changed by another call and this would result in a conflict.", + "refs": { + } + }, + "ResourceNotFoundException": { + "base": "Thrown if the resource doesn't exist.", + "refs": { + } + }, + "SetCognitoEventsRequest": { + "base": "

    A request to configure Cognito Events\"

    \"", + "refs": { + } + }, + "SetIdentityPoolConfigurationRequest": { + "base": "

    The input for the SetIdentityPoolConfiguration operation.

    ", + "refs": { + } + }, + "SetIdentityPoolConfigurationResponse": { + "base": "

    The output for the SetIdentityPoolConfiguration operation

    ", + "refs": { + } + }, + "StreamName": { + "base": null, + "refs": { + "CognitoStreams$StreamName": "The name of the Cognito stream to receive updates. This stream must be in the developers account and in the same region as the identity pool." + } + }, + "StreamingStatus": { + "base": null, + "refs": { + "CognitoStreams$StreamingStatus": "Status of the Cognito streams. Valid values are:

    ENABLED - Streaming of updates to identity pool is enabled.

    DISABLED - Streaming of updates to identity pool is disabled. Bulk publish will also fail if StreamingStatus is DISABLED.

    " + } + }, + "String": { + "base": null, + "refs": { + "ConcurrentModificationException$message": "

    The message returned by a ConcurrentModicationException.

    ", + "Dataset$LastModifiedBy": "The device that made the last change to this dataset.", + "GetBulkPublishDetailsResponse$FailureMessage": "If BulkPublishStatus is FAILED this field will contain the error message that caused the bulk publish to fail.", + "ListDatasetsRequest$NextToken": "A pagination token for obtaining the next page of results.", + "ListDatasetsResponse$NextToken": "A pagination token for obtaining the next page of results.", + "ListIdentityPoolUsageRequest$NextToken": "A pagination token for obtaining the next page of results.", + "ListIdentityPoolUsageResponse$NextToken": "A pagination token for obtaining the next page of results.", + "ListRecordsRequest$NextToken": "A pagination token for obtaining the next page of results.", + "ListRecordsResponse$NextToken": "A pagination token for obtaining the next page of results.", + "ListRecordsResponse$LastModifiedBy": "The user/device that made the last change to this record.", + "ListRecordsResponse$SyncSessionToken": "A token containing a session ID, identity ID, and expiration.", + "MergedDatasetNameList$member": null, + "Record$LastModifiedBy": "The user/device that made the last change to this record." + } + }, + "SubscribeToDatasetRequest": { + "base": "

    A request to SubscribeToDatasetRequest.

    ", + "refs": { + } + }, + "SubscribeToDatasetResponse": { + "base": "

    Response to a SubscribeToDataset request.

    ", + "refs": { + } + }, + "SyncSessionToken": { + "base": null, + "refs": { + "ListRecordsRequest$SyncSessionToken": "A token containing a session ID, identity ID, and expiration.", + "UpdateRecordsRequest$SyncSessionToken": "The SyncSessionToken returned by a previous call to ListRecords for this dataset and identity." + } + }, + "TooManyRequestsException": { + "base": "Thrown if the request is throttled.", + "refs": { + } + }, + "UnsubscribeFromDatasetRequest": { + "base": "

    A request to UnsubscribeFromDataset.

    ", + "refs": { + } + }, + "UnsubscribeFromDatasetResponse": { + "base": "

    Response to an UnsubscribeFromDataset request.

    ", + "refs": { + } + }, + "UpdateRecordsRequest": { + "base": "A request to post updates to records or add and delete records for a dataset and user.", + "refs": { + } + }, + "UpdateRecordsResponse": { + "base": "Returned for a successful UpdateRecordsRequest.", + "refs": { + } + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/config/2014-11-12/api-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/config/2014-11-12/api-2.json new file mode 100644 index 000000000..cae3a4796 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/config/2014-11-12/api-2.json @@ -0,0 +1,1303 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2014-11-12", + "endpointPrefix":"config", + "jsonVersion":"1.1", + "protocol":"json", + "serviceAbbreviation":"Config Service", + "serviceFullName":"AWS Config", + "signatureVersion":"v4", + "targetPrefix":"StarlingDoveService" + }, + "operations":{ + "DeleteConfigRule":{ + "name":"DeleteConfigRule", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteConfigRuleRequest"}, + "errors":[ + {"shape":"NoSuchConfigRuleException"}, + {"shape":"ResourceInUseException"} + ] + }, + "DeleteConfigurationRecorder":{ + "name":"DeleteConfigurationRecorder", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteConfigurationRecorderRequest"}, + "errors":[ + {"shape":"NoSuchConfigurationRecorderException"} + ] + }, + "DeleteDeliveryChannel":{ + "name":"DeleteDeliveryChannel", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteDeliveryChannelRequest"}, + "errors":[ + {"shape":"NoSuchDeliveryChannelException"}, + {"shape":"LastDeliveryChannelDeleteFailedException"} + ] + }, + "DeliverConfigSnapshot":{ + "name":"DeliverConfigSnapshot", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeliverConfigSnapshotRequest"}, + "output":{"shape":"DeliverConfigSnapshotResponse"}, + "errors":[ + {"shape":"NoSuchDeliveryChannelException"}, + {"shape":"NoAvailableConfigurationRecorderException"}, + {"shape":"NoRunningConfigurationRecorderException"} + ] + }, + "DescribeComplianceByConfigRule":{ + "name":"DescribeComplianceByConfigRule", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeComplianceByConfigRuleRequest"}, + "output":{"shape":"DescribeComplianceByConfigRuleResponse"}, + "errors":[ + {"shape":"InvalidParameterValueException"}, + {"shape":"NoSuchConfigRuleException"} + ] + }, + "DescribeComplianceByResource":{ + "name":"DescribeComplianceByResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeComplianceByResourceRequest"}, + "output":{"shape":"DescribeComplianceByResourceResponse"}, + "errors":[ + {"shape":"InvalidParameterValueException"}, + {"shape":"InvalidNextTokenException"} + ] + }, + "DescribeConfigRuleEvaluationStatus":{ + "name":"DescribeConfigRuleEvaluationStatus", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeConfigRuleEvaluationStatusRequest"}, + "output":{"shape":"DescribeConfigRuleEvaluationStatusResponse"}, + "errors":[ + {"shape":"NoSuchConfigRuleException"} + ] + }, + "DescribeConfigRules":{ + "name":"DescribeConfigRules", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeConfigRulesRequest"}, + "output":{"shape":"DescribeConfigRulesResponse"}, + "errors":[ + {"shape":"NoSuchConfigRuleException"} + ] + }, + "DescribeConfigurationRecorderStatus":{ + "name":"DescribeConfigurationRecorderStatus", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeConfigurationRecorderStatusRequest"}, + "output":{"shape":"DescribeConfigurationRecorderStatusResponse"}, + "errors":[ + {"shape":"NoSuchConfigurationRecorderException"} + ] + }, + "DescribeConfigurationRecorders":{ + "name":"DescribeConfigurationRecorders", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeConfigurationRecordersRequest"}, + "output":{"shape":"DescribeConfigurationRecordersResponse"}, + "errors":[ + {"shape":"NoSuchConfigurationRecorderException"} + ] + }, + "DescribeDeliveryChannelStatus":{ + "name":"DescribeDeliveryChannelStatus", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeDeliveryChannelStatusRequest"}, + "output":{"shape":"DescribeDeliveryChannelStatusResponse"}, + "errors":[ + {"shape":"NoSuchDeliveryChannelException"} + ] + }, + "DescribeDeliveryChannels":{ + "name":"DescribeDeliveryChannels", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeDeliveryChannelsRequest"}, + "output":{"shape":"DescribeDeliveryChannelsResponse"}, + "errors":[ + {"shape":"NoSuchDeliveryChannelException"} + ] + }, + "GetComplianceDetailsByConfigRule":{ + "name":"GetComplianceDetailsByConfigRule", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetComplianceDetailsByConfigRuleRequest"}, + "output":{"shape":"GetComplianceDetailsByConfigRuleResponse"}, + "errors":[ + {"shape":"InvalidParameterValueException"}, + {"shape":"InvalidNextTokenException"}, + {"shape":"NoSuchConfigRuleException"} + ] + }, + "GetComplianceDetailsByResource":{ + "name":"GetComplianceDetailsByResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetComplianceDetailsByResourceRequest"}, + "output":{"shape":"GetComplianceDetailsByResourceResponse"}, + "errors":[ + {"shape":"InvalidParameterValueException"} + ] + }, + "GetComplianceSummaryByConfigRule":{ + "name":"GetComplianceSummaryByConfigRule", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "output":{"shape":"GetComplianceSummaryByConfigRuleResponse"} + }, + "GetComplianceSummaryByResourceType":{ + "name":"GetComplianceSummaryByResourceType", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetComplianceSummaryByResourceTypeRequest"}, + "output":{"shape":"GetComplianceSummaryByResourceTypeResponse"}, + "errors":[ + {"shape":"InvalidParameterValueException"} + ] + }, + "GetResourceConfigHistory":{ + "name":"GetResourceConfigHistory", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetResourceConfigHistoryRequest"}, + "output":{"shape":"GetResourceConfigHistoryResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"InvalidTimeRangeException"}, + {"shape":"InvalidLimitException"}, + {"shape":"InvalidNextTokenException"}, + {"shape":"NoAvailableConfigurationRecorderException"}, + {"shape":"ResourceNotDiscoveredException"} + ] + }, + "ListDiscoveredResources":{ + "name":"ListDiscoveredResources", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListDiscoveredResourcesRequest"}, + "output":{"shape":"ListDiscoveredResourcesResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"InvalidLimitException"}, + {"shape":"InvalidNextTokenException"}, + {"shape":"NoAvailableConfigurationRecorderException"} + ] + }, + "PutConfigRule":{ + "name":"PutConfigRule", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PutConfigRuleRequest"}, + "errors":[ + {"shape":"InvalidParameterValueException"}, + {"shape":"MaxNumberOfConfigRulesExceededException"}, + {"shape":"ResourceInUseException"}, + {"shape":"InsufficientPermissionsException"}, + {"shape":"NoAvailableConfigurationRecorderException"} + ] + }, + "PutConfigurationRecorder":{ + "name":"PutConfigurationRecorder", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PutConfigurationRecorderRequest"}, + "errors":[ + {"shape":"MaxNumberOfConfigurationRecordersExceededException"}, + {"shape":"InvalidConfigurationRecorderNameException"}, + {"shape":"InvalidRoleException"}, + {"shape":"InvalidRecordingGroupException"} + ] + }, + "PutDeliveryChannel":{ + "name":"PutDeliveryChannel", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PutDeliveryChannelRequest"}, + "errors":[ + {"shape":"MaxNumberOfDeliveryChannelsExceededException"}, + {"shape":"NoAvailableConfigurationRecorderException"}, + {"shape":"InvalidDeliveryChannelNameException"}, + {"shape":"NoSuchBucketException"}, + {"shape":"InvalidS3KeyPrefixException"}, + {"shape":"InvalidSNSTopicARNException"}, + {"shape":"InsufficientDeliveryPolicyException"} + ] + }, + "PutEvaluations":{ + "name":"PutEvaluations", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PutEvaluationsRequest"}, + "output":{"shape":"PutEvaluationsResponse"}, + "errors":[ + {"shape":"InvalidParameterValueException"}, + {"shape":"InvalidResultTokenException"}, + {"shape":"NoSuchConfigRuleException"} + ] + }, + "StartConfigurationRecorder":{ + "name":"StartConfigurationRecorder", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StartConfigurationRecorderRequest"}, + "errors":[ + {"shape":"NoSuchConfigurationRecorderException"}, + {"shape":"NoAvailableDeliveryChannelException"} + ] + }, + "StopConfigurationRecorder":{ + "name":"StopConfigurationRecorder", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StopConfigurationRecorderRequest"}, + "errors":[ + {"shape":"NoSuchConfigurationRecorderException"} + ] + } + }, + "shapes":{ + "ARN":{"type":"string"}, + "AccountId":{"type":"string"}, + "AllSupported":{"type":"boolean"}, + "AvailabilityZone":{"type":"string"}, + "AwsRegion":{"type":"string"}, + "Boolean":{"type":"boolean"}, + "ChannelName":{ + "type":"string", + "max":256, + "min":1 + }, + "ChronologicalOrder":{ + "type":"string", + "enum":[ + "Reverse", + "Forward" + ] + }, + "Compliance":{ + "type":"structure", + "members":{ + "ComplianceType":{"shape":"ComplianceType"}, + "ComplianceContributorCount":{"shape":"ComplianceContributorCount"} + } + }, + "ComplianceByConfigRule":{ + "type":"structure", + "members":{ + "ConfigRuleName":{"shape":"StringWithCharLimit64"}, + "Compliance":{"shape":"Compliance"} + } + }, + "ComplianceByConfigRules":{ + "type":"list", + "member":{"shape":"ComplianceByConfigRule"} + }, + "ComplianceByResource":{ + "type":"structure", + "members":{ + "ResourceType":{"shape":"StringWithCharLimit256"}, + "ResourceId":{"shape":"StringWithCharLimit256"}, + "Compliance":{"shape":"Compliance"} + } + }, + "ComplianceByResources":{ + "type":"list", + "member":{"shape":"ComplianceByResource"} + }, + "ComplianceContributorCount":{ + "type":"structure", + "members":{ + "CappedCount":{"shape":"Integer"}, + "CapExceeded":{"shape":"Boolean"} + } + }, + "ComplianceResourceTypes":{ + "type":"list", + "member":{"shape":"StringWithCharLimit256"}, + "max":100, + "min":0 + }, + "ComplianceSummariesByResourceType":{ + "type":"list", + "member":{"shape":"ComplianceSummaryByResourceType"} + }, + "ComplianceSummary":{ + "type":"structure", + "members":{ + "CompliantResourceCount":{"shape":"ComplianceContributorCount"}, + "NonCompliantResourceCount":{"shape":"ComplianceContributorCount"}, + "ComplianceSummaryTimestamp":{"shape":"Date"} + } + }, + "ComplianceSummaryByResourceType":{ + "type":"structure", + "members":{ + "ResourceType":{"shape":"StringWithCharLimit256"}, + "ComplianceSummary":{"shape":"ComplianceSummary"} + } + }, + "ComplianceType":{ + "type":"string", + "enum":[ + "COMPLIANT", + "NON_COMPLIANT", + "NOT_APPLICABLE", + "INSUFFICIENT_DATA" + ] + }, + "ComplianceTypes":{ + "type":"list", + "member":{"shape":"ComplianceType"}, + "max":3, + "min":0 + }, + "ConfigExportDeliveryInfo":{ + "type":"structure", + "members":{ + "lastStatus":{"shape":"DeliveryStatus"}, + "lastErrorCode":{"shape":"String"}, + "lastErrorMessage":{"shape":"String"}, + "lastAttemptTime":{"shape":"Date"}, + "lastSuccessfulTime":{"shape":"Date"}, + "nextDeliveryTime":{"shape":"Date"} + } + }, + "ConfigRule":{ + "type":"structure", + "required":["Source"], + "members":{ + "ConfigRuleName":{"shape":"StringWithCharLimit64"}, + "ConfigRuleArn":{"shape":"String"}, + "ConfigRuleId":{"shape":"String"}, + "Description":{"shape":"EmptiableStringWithCharLimit256"}, + "Scope":{"shape":"Scope"}, + "Source":{"shape":"Source"}, + "InputParameters":{"shape":"StringWithCharLimit256"}, + "MaximumExecutionFrequency":{"shape":"MaximumExecutionFrequency"}, + "ConfigRuleState":{"shape":"ConfigRuleState"} + } + }, + "ConfigRuleEvaluationStatus":{ + "type":"structure", + "members":{ + "ConfigRuleName":{"shape":"StringWithCharLimit64"}, + "ConfigRuleArn":{"shape":"String"}, + "ConfigRuleId":{"shape":"String"}, + "LastSuccessfulInvocationTime":{"shape":"Date"}, + "LastFailedInvocationTime":{"shape":"Date"}, + "LastSuccessfulEvaluationTime":{"shape":"Date"}, + "LastFailedEvaluationTime":{"shape":"Date"}, + "FirstActivatedTime":{"shape":"Date"}, + "LastErrorCode":{"shape":"String"}, + "LastErrorMessage":{"shape":"String"}, + "FirstEvaluationStarted":{"shape":"Boolean"} + } + }, + "ConfigRuleEvaluationStatusList":{ + "type":"list", + "member":{"shape":"ConfigRuleEvaluationStatus"} + }, + "ConfigRuleNames":{ + "type":"list", + "member":{"shape":"StringWithCharLimit64"}, + "max":25, + "min":0 + }, + "ConfigRuleState":{ + "type":"string", + "enum":[ + "ACTIVE", + "DELETING" + ] + }, + "ConfigRules":{ + "type":"list", + "member":{"shape":"ConfigRule"} + }, + "ConfigSnapshotDeliveryProperties":{ + "type":"structure", + "members":{ + "deliveryFrequency":{"shape":"MaximumExecutionFrequency"} + } + }, + "ConfigStreamDeliveryInfo":{ + "type":"structure", + "members":{ + "lastStatus":{"shape":"DeliveryStatus"}, + "lastErrorCode":{"shape":"String"}, + "lastErrorMessage":{"shape":"String"}, + "lastStatusChangeTime":{"shape":"Date"} + } + }, + "Configuration":{"type":"string"}, + "ConfigurationItem":{ + "type":"structure", + "members":{ + "version":{"shape":"Version"}, + "accountId":{"shape":"AccountId"}, + "configurationItemCaptureTime":{"shape":"ConfigurationItemCaptureTime"}, + "configurationItemStatus":{"shape":"ConfigurationItemStatus"}, + "configurationStateId":{"shape":"ConfigurationStateId"}, + "configurationItemMD5Hash":{"shape":"ConfigurationItemMD5Hash"}, + "arn":{"shape":"ARN"}, + "resourceType":{"shape":"ResourceType"}, + "resourceId":{"shape":"ResourceId"}, + "resourceName":{"shape":"ResourceName"}, + "awsRegion":{"shape":"AwsRegion"}, + "availabilityZone":{"shape":"AvailabilityZone"}, + "resourceCreationTime":{"shape":"ResourceCreationTime"}, + "tags":{"shape":"Tags"}, + "relatedEvents":{"shape":"RelatedEventList"}, + "relationships":{"shape":"RelationshipList"}, + "configuration":{"shape":"Configuration"} + } + }, + "ConfigurationItemCaptureTime":{"type":"timestamp"}, + "ConfigurationItemList":{ + "type":"list", + "member":{"shape":"ConfigurationItem"} + }, + "ConfigurationItemMD5Hash":{"type":"string"}, + "ConfigurationItemStatus":{ + "type":"string", + "enum":[ + "Ok", + "Failed", + "Discovered", + "Deleted" + ] + }, + "ConfigurationRecorder":{ + "type":"structure", + "members":{ + "name":{"shape":"RecorderName"}, + "roleARN":{"shape":"String"}, + "recordingGroup":{"shape":"RecordingGroup"} + } + }, + "ConfigurationRecorderList":{ + "type":"list", + "member":{"shape":"ConfigurationRecorder"} + }, + "ConfigurationRecorderNameList":{ + "type":"list", + "member":{"shape":"RecorderName"} + }, + "ConfigurationRecorderStatus":{ + "type":"structure", + "members":{ + "name":{"shape":"String"}, + "lastStartTime":{"shape":"Date"}, + "lastStopTime":{"shape":"Date"}, + "recording":{"shape":"Boolean"}, + "lastStatus":{"shape":"RecorderStatus"}, + "lastErrorCode":{"shape":"String"}, + "lastErrorMessage":{"shape":"String"}, + "lastStatusChangeTime":{"shape":"Date"} + } + }, + "ConfigurationRecorderStatusList":{ + "type":"list", + "member":{"shape":"ConfigurationRecorderStatus"} + }, + "ConfigurationStateId":{"type":"string"}, + "Date":{"type":"timestamp"}, + "DeleteConfigRuleRequest":{ + "type":"structure", + "required":["ConfigRuleName"], + "members":{ + "ConfigRuleName":{"shape":"StringWithCharLimit64"} + } + }, + "DeleteConfigurationRecorderRequest":{ + "type":"structure", + "required":["ConfigurationRecorderName"], + "members":{ + "ConfigurationRecorderName":{"shape":"RecorderName"} + } + }, + "DeleteDeliveryChannelRequest":{ + "type":"structure", + "required":["DeliveryChannelName"], + "members":{ + "DeliveryChannelName":{"shape":"ChannelName"} + } + }, + "DeliverConfigSnapshotRequest":{ + "type":"structure", + "required":["deliveryChannelName"], + "members":{ + "deliveryChannelName":{"shape":"ChannelName"} + } + }, + "DeliverConfigSnapshotResponse":{ + "type":"structure", + "members":{ + "configSnapshotId":{"shape":"String"} + } + }, + "DeliveryChannel":{ + "type":"structure", + "members":{ + "name":{"shape":"ChannelName"}, + "s3BucketName":{"shape":"String"}, + "s3KeyPrefix":{"shape":"String"}, + "snsTopicARN":{"shape":"String"}, + "configSnapshotDeliveryProperties":{"shape":"ConfigSnapshotDeliveryProperties"} + } + }, + "DeliveryChannelList":{ + "type":"list", + "member":{"shape":"DeliveryChannel"} + }, + "DeliveryChannelNameList":{ + "type":"list", + "member":{"shape":"ChannelName"} + }, + "DeliveryChannelStatus":{ + "type":"structure", + "members":{ + "name":{"shape":"String"}, + "configSnapshotDeliveryInfo":{"shape":"ConfigExportDeliveryInfo"}, + "configHistoryDeliveryInfo":{"shape":"ConfigExportDeliveryInfo"}, + "configStreamDeliveryInfo":{"shape":"ConfigStreamDeliveryInfo"} + } + }, + "DeliveryChannelStatusList":{ + "type":"list", + "member":{"shape":"DeliveryChannelStatus"} + }, + "DeliveryStatus":{ + "type":"string", + "enum":[ + "Success", + "Failure", + "Not_Applicable" + ] + }, + "DescribeComplianceByConfigRuleRequest":{ + "type":"structure", + "members":{ + "ConfigRuleNames":{"shape":"ConfigRuleNames"}, + "ComplianceTypes":{"shape":"ComplianceTypes"}, + "NextToken":{"shape":"String"} + } + }, + "DescribeComplianceByConfigRuleResponse":{ + "type":"structure", + "members":{ + "ComplianceByConfigRules":{"shape":"ComplianceByConfigRules"}, + "NextToken":{"shape":"String"} + } + }, + "DescribeComplianceByResourceRequest":{ + "type":"structure", + "members":{ + "ResourceType":{"shape":"StringWithCharLimit256"}, + "ResourceId":{"shape":"StringWithCharLimit256"}, + "ComplianceTypes":{"shape":"ComplianceTypes"}, + "Limit":{"shape":"Limit"}, + "NextToken":{"shape":"NextToken"} + } + }, + "DescribeComplianceByResourceResponse":{ + "type":"structure", + "members":{ + "ComplianceByResources":{"shape":"ComplianceByResources"}, + "NextToken":{"shape":"NextToken"} + } + }, + "DescribeConfigRuleEvaluationStatusRequest":{ + "type":"structure", + "members":{ + "ConfigRuleNames":{"shape":"ConfigRuleNames"} + } + }, + "DescribeConfigRuleEvaluationStatusResponse":{ + "type":"structure", + "members":{ + "ConfigRulesEvaluationStatus":{"shape":"ConfigRuleEvaluationStatusList"} + } + }, + "DescribeConfigRulesRequest":{ + "type":"structure", + "members":{ + "ConfigRuleNames":{"shape":"ConfigRuleNames"}, + "NextToken":{"shape":"String"} + } + }, + "DescribeConfigRulesResponse":{ + "type":"structure", + "members":{ + "ConfigRules":{"shape":"ConfigRules"}, + "NextToken":{"shape":"String"} + } + }, + "DescribeConfigurationRecorderStatusRequest":{ + "type":"structure", + "members":{ + "ConfigurationRecorderNames":{"shape":"ConfigurationRecorderNameList"} + } + }, + "DescribeConfigurationRecorderStatusResponse":{ + "type":"structure", + "members":{ + "ConfigurationRecordersStatus":{"shape":"ConfigurationRecorderStatusList"} + } + }, + "DescribeConfigurationRecordersRequest":{ + "type":"structure", + "members":{ + "ConfigurationRecorderNames":{"shape":"ConfigurationRecorderNameList"} + } + }, + "DescribeConfigurationRecordersResponse":{ + "type":"structure", + "members":{ + "ConfigurationRecorders":{"shape":"ConfigurationRecorderList"} + } + }, + "DescribeDeliveryChannelStatusRequest":{ + "type":"structure", + "members":{ + "DeliveryChannelNames":{"shape":"DeliveryChannelNameList"} + } + }, + "DescribeDeliveryChannelStatusResponse":{ + "type":"structure", + "members":{ + "DeliveryChannelsStatus":{"shape":"DeliveryChannelStatusList"} + } + }, + "DescribeDeliveryChannelsRequest":{ + "type":"structure", + "members":{ + "DeliveryChannelNames":{"shape":"DeliveryChannelNameList"} + } + }, + "DescribeDeliveryChannelsResponse":{ + "type":"structure", + "members":{ + "DeliveryChannels":{"shape":"DeliveryChannelList"} + } + }, + "EarlierTime":{"type":"timestamp"}, + "EmptiableStringWithCharLimit256":{ + "type":"string", + "max":256, + "min":0 + }, + "Evaluation":{ + "type":"structure", + "required":[ + "ComplianceResourceType", + "ComplianceResourceId", + "ComplianceType", + "OrderingTimestamp" + ], + "members":{ + "ComplianceResourceType":{"shape":"StringWithCharLimit256"}, + "ComplianceResourceId":{"shape":"StringWithCharLimit256"}, + "ComplianceType":{"shape":"ComplianceType"}, + "Annotation":{"shape":"StringWithCharLimit256"}, + "OrderingTimestamp":{"shape":"OrderingTimestamp"} + } + }, + "EvaluationResult":{ + "type":"structure", + "members":{ + "EvaluationResultIdentifier":{"shape":"EvaluationResultIdentifier"}, + "ComplianceType":{"shape":"ComplianceType"}, + "ResultRecordedTime":{"shape":"Date"}, + "ConfigRuleInvokedTime":{"shape":"Date"}, + "Annotation":{"shape":"StringWithCharLimit256"}, + "ResultToken":{"shape":"String"} + } + }, + "EvaluationResultIdentifier":{ + "type":"structure", + "members":{ + "EvaluationResultQualifier":{"shape":"EvaluationResultQualifier"}, + "OrderingTimestamp":{"shape":"Date"} + } + }, + "EvaluationResultQualifier":{ + "type":"structure", + "members":{ + "ConfigRuleName":{"shape":"StringWithCharLimit64"}, + "ResourceType":{"shape":"StringWithCharLimit256"}, + "ResourceId":{"shape":"StringWithCharLimit256"} + } + }, + "EvaluationResults":{ + "type":"list", + "member":{"shape":"EvaluationResult"} + }, + "Evaluations":{ + "type":"list", + "member":{"shape":"Evaluation"}, + "max":100, + "min":0 + }, + "EventSource":{ + "type":"string", + "enum":["aws.config"] + }, + "GetComplianceDetailsByConfigRuleRequest":{ + "type":"structure", + "required":["ConfigRuleName"], + "members":{ + "ConfigRuleName":{"shape":"StringWithCharLimit64"}, + "ComplianceTypes":{"shape":"ComplianceTypes"}, + "Limit":{"shape":"Limit"}, + "NextToken":{"shape":"NextToken"} + } + }, + "GetComplianceDetailsByConfigRuleResponse":{ + "type":"structure", + "members":{ + "EvaluationResults":{"shape":"EvaluationResults"}, + "NextToken":{"shape":"NextToken"} + } + }, + "GetComplianceDetailsByResourceRequest":{ + "type":"structure", + "required":[ + "ResourceType", + "ResourceId" + ], + "members":{ + "ResourceType":{"shape":"StringWithCharLimit256"}, + "ResourceId":{"shape":"StringWithCharLimit256"}, + "ComplianceTypes":{"shape":"ComplianceTypes"}, + "NextToken":{"shape":"String"} + } + }, + "GetComplianceDetailsByResourceResponse":{ + "type":"structure", + "members":{ + "EvaluationResults":{"shape":"EvaluationResults"}, + "NextToken":{"shape":"String"} + } + }, + "GetComplianceSummaryByConfigRuleResponse":{ + "type":"structure", + "members":{ + "ComplianceSummary":{"shape":"ComplianceSummary"} + } + }, + "GetComplianceSummaryByResourceTypeRequest":{ + "type":"structure", + "members":{ + "ResourceTypes":{"shape":"ResourceTypes"} + } + }, + "GetComplianceSummaryByResourceTypeResponse":{ + "type":"structure", + "members":{ + "ComplianceSummariesByResourceType":{"shape":"ComplianceSummariesByResourceType"} + } + }, + "GetResourceConfigHistoryRequest":{ + "type":"structure", + "required":[ + "resourceType", + "resourceId" + ], + "members":{ + "resourceType":{"shape":"ResourceType"}, + "resourceId":{"shape":"ResourceId"}, + "laterTime":{"shape":"LaterTime"}, + "earlierTime":{"shape":"EarlierTime"}, + "chronologicalOrder":{"shape":"ChronologicalOrder"}, + "limit":{"shape":"Limit"}, + "nextToken":{"shape":"NextToken"} + } + }, + "GetResourceConfigHistoryResponse":{ + "type":"structure", + "members":{ + "configurationItems":{"shape":"ConfigurationItemList"}, + "nextToken":{"shape":"NextToken"} + } + }, + "IncludeGlobalResourceTypes":{"type":"boolean"}, + "InsufficientDeliveryPolicyException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InsufficientPermissionsException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "Integer":{"type":"integer"}, + "InvalidConfigurationRecorderNameException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InvalidDeliveryChannelNameException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InvalidLimitException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InvalidNextTokenException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InvalidParameterValueException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InvalidRecordingGroupException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InvalidResultTokenException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InvalidRoleException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InvalidS3KeyPrefixException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InvalidSNSTopicARNException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InvalidTimeRangeException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "LastDeliveryChannelDeleteFailedException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "LaterTime":{"type":"timestamp"}, + "Limit":{ + "type":"integer", + "max":100, + "min":0 + }, + "ListDiscoveredResourcesRequest":{ + "type":"structure", + "required":["resourceType"], + "members":{ + "resourceType":{"shape":"ResourceType"}, + "resourceIds":{"shape":"ResourceIdList"}, + "resourceName":{"shape":"ResourceName"}, + "limit":{"shape":"Limit"}, + "includeDeletedResources":{"shape":"Boolean"}, + "nextToken":{"shape":"NextToken"} + } + }, + "ListDiscoveredResourcesResponse":{ + "type":"structure", + "members":{ + "resourceIdentifiers":{"shape":"ResourceIdentifierList"}, + "nextToken":{"shape":"NextToken"} + } + }, + "MaxNumberOfConfigRulesExceededException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "MaxNumberOfConfigurationRecordersExceededException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "MaxNumberOfDeliveryChannelsExceededException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "MaximumExecutionFrequency":{ + "type":"string", + "enum":[ + "One_Hour", + "Three_Hours", + "Six_Hours", + "Twelve_Hours", + "TwentyFour_Hours" + ] + }, + "MessageType":{ + "type":"string", + "enum":[ + "ConfigurationItemChangeNotification", + "ConfigurationSnapshotDeliveryCompleted" + ] + }, + "Name":{"type":"string"}, + "NextToken":{"type":"string"}, + "NoAvailableConfigurationRecorderException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "NoAvailableDeliveryChannelException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "NoRunningConfigurationRecorderException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "NoSuchBucketException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "NoSuchConfigRuleException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "NoSuchConfigurationRecorderException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "NoSuchDeliveryChannelException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "OrderingTimestamp":{"type":"timestamp"}, + "Owner":{ + "type":"string", + "enum":[ + "CUSTOM_LAMBDA", + "AWS" + ] + }, + "PutConfigRuleRequest":{ + "type":"structure", + "required":["ConfigRule"], + "members":{ + "ConfigRule":{"shape":"ConfigRule"} + } + }, + "PutConfigurationRecorderRequest":{ + "type":"structure", + "required":["ConfigurationRecorder"], + "members":{ + "ConfigurationRecorder":{"shape":"ConfigurationRecorder"} + } + }, + "PutDeliveryChannelRequest":{ + "type":"structure", + "required":["DeliveryChannel"], + "members":{ + "DeliveryChannel":{"shape":"DeliveryChannel"} + } + }, + "PutEvaluationsRequest":{ + "type":"structure", + "required":["ResultToken"], + "members":{ + "Evaluations":{"shape":"Evaluations"}, + "ResultToken":{"shape":"String"} + } + }, + "PutEvaluationsResponse":{ + "type":"structure", + "members":{ + "FailedEvaluations":{"shape":"Evaluations"} + } + }, + "RecorderName":{ + "type":"string", + "max":256, + "min":1 + }, + "RecorderStatus":{ + "type":"string", + "enum":[ + "Pending", + "Success", + "Failure" + ] + }, + "RecordingGroup":{ + "type":"structure", + "members":{ + "allSupported":{"shape":"AllSupported"}, + "includeGlobalResourceTypes":{"shape":"IncludeGlobalResourceTypes"}, + "resourceTypes":{"shape":"ResourceTypeList"} + } + }, + "RelatedEvent":{"type":"string"}, + "RelatedEventList":{ + "type":"list", + "member":{"shape":"RelatedEvent"} + }, + "Relationship":{ + "type":"structure", + "members":{ + "resourceType":{"shape":"ResourceType"}, + "resourceId":{"shape":"ResourceId"}, + "resourceName":{"shape":"ResourceName"}, + "relationshipName":{"shape":"RelationshipName"} + } + }, + "RelationshipList":{ + "type":"list", + "member":{"shape":"Relationship"} + }, + "RelationshipName":{"type":"string"}, + "ResourceCreationTime":{"type":"timestamp"}, + "ResourceDeletionTime":{"type":"timestamp"}, + "ResourceId":{"type":"string"}, + "ResourceIdList":{ + "type":"list", + "member":{"shape":"ResourceId"} + }, + "ResourceIdentifier":{ + "type":"structure", + "members":{ + "resourceType":{"shape":"ResourceType"}, + "resourceId":{"shape":"ResourceId"}, + "resourceName":{"shape":"ResourceName"}, + "resourceDeletionTime":{"shape":"ResourceDeletionTime"} + } + }, + "ResourceIdentifierList":{ + "type":"list", + "member":{"shape":"ResourceIdentifier"} + }, + "ResourceInUseException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "ResourceName":{"type":"string"}, + "ResourceNotDiscoveredException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "ResourceType":{ + "type":"string", + "enum":[ + "AWS::EC2::CustomerGateway", + "AWS::EC2::EIP", + "AWS::EC2::Host", + "AWS::EC2::Instance", + "AWS::EC2::InternetGateway", + "AWS::EC2::NetworkAcl", + "AWS::EC2::NetworkInterface", + "AWS::EC2::RouteTable", + "AWS::EC2::SecurityGroup", + "AWS::EC2::Subnet", + "AWS::CloudTrail::Trail", + "AWS::EC2::Volume", + "AWS::EC2::VPC", + "AWS::EC2::VPNConnection", + "AWS::EC2::VPNGateway", + "AWS::IAM::Group", + "AWS::IAM::Policy", + "AWS::IAM::Role", + "AWS::IAM::User" + ] + }, + "ResourceTypeList":{ + "type":"list", + "member":{"shape":"ResourceType"} + }, + "ResourceTypes":{ + "type":"list", + "member":{"shape":"StringWithCharLimit256"}, + "max":20, + "min":0 + }, + "Scope":{ + "type":"structure", + "members":{ + "ComplianceResourceTypes":{"shape":"ComplianceResourceTypes"}, + "TagKey":{"shape":"StringWithCharLimit128"}, + "TagValue":{"shape":"StringWithCharLimit256"}, + "ComplianceResourceId":{"shape":"StringWithCharLimit256"} + } + }, + "Source":{ + "type":"structure", + "members":{ + "Owner":{"shape":"Owner"}, + "SourceIdentifier":{"shape":"StringWithCharLimit256"}, + "SourceDetails":{"shape":"SourceDetails"} + } + }, + "SourceDetail":{ + "type":"structure", + "members":{ + "EventSource":{"shape":"EventSource"}, + "MessageType":{"shape":"MessageType"} + } + }, + "SourceDetails":{ + "type":"list", + "member":{"shape":"SourceDetail"}, + "max":25, + "min":0 + }, + "StartConfigurationRecorderRequest":{ + "type":"structure", + "required":["ConfigurationRecorderName"], + "members":{ + "ConfigurationRecorderName":{"shape":"RecorderName"} + } + }, + "StopConfigurationRecorderRequest":{ + "type":"structure", + "required":["ConfigurationRecorderName"], + "members":{ + "ConfigurationRecorderName":{"shape":"RecorderName"} + } + }, + "String":{"type":"string"}, + "StringWithCharLimit128":{ + "type":"string", + "max":128, + "min":1 + }, + "StringWithCharLimit256":{ + "type":"string", + "max":256, + "min":1 + }, + "StringWithCharLimit64":{ + "type":"string", + "max":64, + "min":1 + }, + "Tags":{ + "type":"map", + "key":{"shape":"Name"}, + "value":{"shape":"Value"} + }, + "ValidationException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "Value":{"type":"string"}, + "Version":{"type":"string"} + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/config/2014-11-12/docs-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/config/2014-11-12/docs-2.json new file mode 100644 index 000000000..e10f9e40d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/config/2014-11-12/docs-2.json @@ -0,0 +1,1057 @@ +{ + "version": "2.0", + "service": "AWS Config

    AWS Config provides a way to keep track of the configurations of all the AWS resources associated with your AWS account. You can use AWS Config to get the current and historical configurations of each AWS resource and also to get information about the relationship between the resources. An AWS resource can be an Amazon Compute Cloud (Amazon EC2) instance, an Elastic Block Store (EBS) volume, an Elastic network Interface (ENI), or a security group. For a complete list of resources currently supported by AWS Config, see Supported AWS Resources.

    You can access and manage AWS Config through the AWS Management Console, the AWS Command Line Interface (AWS CLI), the AWS Config API, or the AWS SDKs for AWS Config

    This reference guide contains documentation for the AWS Config API and the AWS CLI commands that you can use to manage AWS Config.

    The AWS Config API uses the Signature Version 4 protocol for signing requests. For more information about how to sign a request with this protocol, see Signature Version 4 Signing Process.

    For detailed information about AWS Config features and their associated actions or commands, as well as how to work with AWS Management Console, see What Is AWS Config? in the AWS Config Developer Guide.

    ", + "operations": { + "DeleteConfigRule": "

    Deletes the specified AWS Config rule and all of its evaluation results.

    AWS Config sets the state of a rule to DELETING until the deletion is complete. You cannot update a rule while it is in this state. If you make a PutConfigRule or DeleteConfigRule request for the rule, you will receive a ResourceInUseException.

    You can check the state of a rule by using the DescribeConfigRules request.

    ", + "DeleteConfigurationRecorder": "

    Deletes the configuration recorder.

    After the configuration recorder is deleted, AWS Config will not record resource configuration changes until you create a new configuration recorder.

    This action does not delete the configuration information that was previously recorded. You will be able to access the previously recorded information by using the GetResourceConfigHistory action, but you will not be able to access this information in the AWS Config console until you create a new configuration recorder.

    ", + "DeleteDeliveryChannel": "

    Deletes the delivery channel.

    Before you can delete the delivery channel, you must stop the configuration recorder by using the StopConfigurationRecorder action.

    ", + "DeliverConfigSnapshot": "

    Schedules delivery of a configuration snapshot to the Amazon S3 bucket in the specified delivery channel. After the delivery has started, AWS Config sends following notifications using an Amazon SNS topic that you have specified.

    • Notification of starting the delivery.
    • Notification of delivery completed, if the delivery was successfully completed.
    • Notification of delivery failure, if the delivery failed to complete.
    ", + "DescribeComplianceByConfigRule": "

    Indicates whether the specified AWS Config rules are compliant. If a rule is noncompliant, this action returns the number of AWS resources that do not comply with the rule.

    A rule is compliant if all of the evaluated resources comply with it, and it is noncompliant if any of these resources do not comply.

    If AWS Config has no current evaluation results for the rule, it returns INSUFFICIENT_DATA. This result might indicate one of the following conditions:

    • AWS Config has never invoked an evaluation for the rule. To check whether it has, use the DescribeConfigRuleEvaluationStatus action to get the LastSuccessfulInvocationTime and LastFailedInvocationTime.
    • The rule's AWS Lambda function is failing to send evaluation results to AWS Config. Verify that the role that you assigned to your configuration recorder includes the config:PutEvaluations permission. If the rule is a customer managed rule, verify that the AWS Lambda execution role includes the config:PutEvaluations permission.
    • The rule's AWS Lambda function has returned NOT_APPLICABLE for all evaluation results. This can occur if the resources were deleted or removed from the rule's scope.
    ", + "DescribeComplianceByResource": "

    Indicates whether the specified AWS resources are compliant. If a resource is noncompliant, this action returns the number of AWS Config rules that the resource does not comply with.

    A resource is compliant if it complies with all the AWS Config rules that evaluate it. It is noncompliant if it does not comply with one or more of these rules.

    If AWS Config has no current evaluation results for the resource, it returns INSUFFICIENT_DATA. This result might indicate one of the following conditions about the rules that evaluate the resource:

    • AWS Config has never invoked an evaluation for the rule. To check whether it has, use the DescribeConfigRuleEvaluationStatus action to get the LastSuccessfulInvocationTime and LastFailedInvocationTime.
    • The rule's AWS Lambda function is failing to send evaluation results to AWS Config. Verify that the role that you assigned to your configuration recorder includes the config:PutEvaluations permission. If the rule is a customer managed rule, verify that the AWS Lambda execution role includes the config:PutEvaluations permission.
    • The rule's AWS Lambda function has returned NOT_APPLICABLE for all evaluation results. This can occur if the resources were deleted or removed from the rule's scope.
    ", + "DescribeConfigRuleEvaluationStatus": "

    Returns status information for each of your AWS managed Config rules. The status includes information such as the last time AWS Config invoked the rule, the last time AWS Config failed to invoke the rule, and the related error for the last failure.

    ", + "DescribeConfigRules": "

    Returns details about your AWS Config rules.

    ", + "DescribeConfigurationRecorderStatus": "

    Returns the current status of the specified configuration recorder. If a configuration recorder is not specified, this action returns the status of all configuration recorder associated with the account.

    Currently, you can specify only one configuration recorder per account.

    ", + "DescribeConfigurationRecorders": "

    Returns the name of one or more specified configuration recorders. If the recorder name is not specified, this action returns the names of all the configuration recorders associated with the account.

    Currently, you can specify only one configuration recorder per account.

    ", + "DescribeDeliveryChannelStatus": "

    Returns the current status of the specified delivery channel. If a delivery channel is not specified, this action returns the current status of all delivery channels associated with the account.

    Currently, you can specify only one delivery channel per account.

    ", + "DescribeDeliveryChannels": "

    Returns details about the specified delivery channel. If a delivery channel is not specified, this action returns the details of all delivery channels associated with the account.

    Currently, you can specify only one delivery channel per account.

    ", + "GetComplianceDetailsByConfigRule": "

    Returns the evaluation results for the specified AWS Config rule. The results indicate which AWS resources were evaluated by the rule, when each resource was last evaluated, and whether each resource complies with the rule.

    ", + "GetComplianceDetailsByResource": "

    Returns the evaluation results for the specified AWS resource. The results indicate which AWS Config rules were used to evaluate the resource, when each rule was last used, and whether the resource complies with each rule.

    ", + "GetComplianceSummaryByConfigRule": "

    Returns the number of AWS Config rules that are compliant and noncompliant, up to a maximum of 25 for each.

    ", + "GetComplianceSummaryByResourceType": "

    Returns the number of resources that are compliant and the number that are noncompliant. You can specify one or more resource types to get these numbers for each resource type. The maximum number returned is 100.

    ", + "GetResourceConfigHistory": "

    Returns a list of configuration items for the specified resource. The list contains details about each state of the resource during the specified time interval.

    The response is paginated, and by default, AWS Config returns a limit of 10 configuration items per page. You can customize this number with the limit parameter. The response includes a nextToken string, and to get the next page of results, run the request again and enter this string for the nextToken parameter.

    Each call to the API is limited to span a duration of seven days. It is likely that the number of records returned is smaller than the specified limit. In such cases, you can make another call, using the nextToken.

    ", + "ListDiscoveredResources": "

    Accepts a resource type and returns a list of resource identifiers for the resources of that type. A resource identifier includes the resource type, ID, and (if available) the custom resource name. The results consist of resources that AWS Config has discovered, including those that AWS Config is not currently recording. You can narrow the results to include only resources that have specific resource IDs or a resource name.

    You can specify either resource IDs or a resource name but not both in the same request.

    The response is paginated, and by default AWS Config lists 100 resource identifiers on each page. You can customize this number with the limit parameter. The response includes a nextToken string, and to get the next page of results, run the request again and enter this string for the nextToken parameter.

    ", + "PutConfigRule": "

    Adds or updates an AWS Config rule for evaluating whether your AWS resources comply with your desired configurations.

    You can use this action for customer managed Config rules and AWS managed Config rules. A customer managed Config rule is a custom rule that you develop and maintain. An AWS managed Config rule is a customizable, predefined rule that is provided by AWS Config.

    If you are adding a new customer managed Config rule, you must first create the AWS Lambda function that the rule invokes to evaluate your resources. When you use the PutConfigRule action to add the rule to AWS Config, you must specify the Amazon Resource Name (ARN) that AWS Lambda assigns to the function. Specify the ARN for the SourceIdentifier key. This key is part of the Source object, which is part of the ConfigRule object.

    If you are adding a new AWS managed Config rule, specify the rule's identifier for the SourceIdentifier key. To reference AWS managed Config rule identifiers, see Using AWS Managed Config Rules.

    For any new rule that you add, specify the ConfigRuleName in the ConfigRule object. Do not specify the ConfigRuleArn or the ConfigRuleId. These values are generated by AWS Config for new rules.

    If you are updating a rule that you have added previously, specify the rule's ConfigRuleName, ConfigRuleId, or ConfigRuleArn in the ConfigRule data type that you use in this request.

    The maximum number of rules that AWS Config supports is 25.

    For more information about developing and using AWS Config rules, see Evaluating AWS Resource Configurations with AWS Config in the AWS Config Developer Guide.

    ", + "PutConfigurationRecorder": "

    Creates a new configuration recorder to record the selected resource configurations.

    You can use this action to change the role roleARN and/or the recordingGroup of an existing recorder. To change the role, call the action on the existing configuration recorder and specify a role.

    Currently, you can specify only one configuration recorder per account.

    If ConfigurationRecorder does not have the recordingGroup parameter specified, the default is to record all supported resource types.

    ", + "PutDeliveryChannel": "

    Creates a delivery channel object to deliver configuration information to an Amazon S3 bucket and Amazon SNS topic.

    Before you can create a delivery channel, you must create a configuration recorder.

    You can use this action to change the Amazon S3 bucket or an Amazon SNS topic of the existing delivery channel. To change the Amazon S3 bucket or an Amazon SNS topic, call this action and specify the changed values for the S3 bucket and the SNS topic. If you specify a different value for either the S3 bucket or the SNS topic, this action will keep the existing value for the parameter that is not changed.

    You can have only one delivery channel per AWS account.

    ", + "PutEvaluations": "

    Used by an AWS Lambda function to deliver evaluation results to AWS Config. This action is required in every AWS Lambda function that is invoked by an AWS Config rule.

    ", + "StartConfigurationRecorder": "

    Starts recording configurations of the AWS resources you have selected to record in your AWS account.

    You must have created at least one delivery channel to successfully start the configuration recorder.

    ", + "StopConfigurationRecorder": "

    Stops recording configurations of the AWS resources you have selected to record in your AWS account.

    " + }, + "shapes": { + "ARN": { + "base": null, + "refs": { + "ConfigurationItem$arn": "

    The Amazon Resource Name (ARN) of the resource.

    " + } + }, + "AccountId": { + "base": null, + "refs": { + "ConfigurationItem$accountId": "

    The 12 digit AWS account ID associated with the resource.

    " + } + }, + "AllSupported": { + "base": null, + "refs": { + "RecordingGroup$allSupported": "

    Specifies whether AWS Config records configuration changes for every supported type of regional resource.

    If you set this option to true, when AWS Config adds support for a new type of regional resource, it automatically starts recording resources of that type.

    If you set this option to true, you cannot enumerate a list of resourceTypes.

    " + } + }, + "AvailabilityZone": { + "base": null, + "refs": { + "ConfigurationItem$availabilityZone": "

    The Availability Zone associated with the resource.

    " + } + }, + "AwsRegion": { + "base": null, + "refs": { + "ConfigurationItem$awsRegion": "

    The region where the resource resides.

    " + } + }, + "Boolean": { + "base": null, + "refs": { + "ComplianceContributorCount$CapExceeded": "

    Indicates whether the maximum count is reached.

    ", + "ConfigRuleEvaluationStatus$FirstEvaluationStarted": "

    Indicates whether AWS Config has evaluated your resources against the rule at least once.

    • true - AWS Config has evaluated your AWS resources against the rule at least once.
    • false - AWS Config has not once finished evaluating your AWS resources against the rule.
    ", + "ConfigurationRecorderStatus$recording": "

    Specifies whether the recorder is currently recording or not.

    ", + "ListDiscoveredResourcesRequest$includeDeletedResources": "

    Specifies whether AWS Config includes deleted resources in the results. By default, deleted resources are not included.

    " + } + }, + "ChannelName": { + "base": null, + "refs": { + "DeleteDeliveryChannelRequest$DeliveryChannelName": "

    The name of the delivery channel to delete.

    ", + "DeliverConfigSnapshotRequest$deliveryChannelName": "

    The name of the delivery channel through which the snapshot is delivered.

    ", + "DeliveryChannel$name": "

    The name of the delivery channel. By default, AWS Config assigns the name "default" when creating the delivery channel. To change the delivery channel name, you must use the DeleteDeliveryChannel action to delete your current delivery channel, and then you must use the PutDeliveryChannel command to create a delivery channel that has the desired name.

    ", + "DeliveryChannelNameList$member": null + } + }, + "ChronologicalOrder": { + "base": null, + "refs": { + "GetResourceConfigHistoryRequest$chronologicalOrder": "

    The chronological order for configuration items listed. By default the results are listed in reverse chronological order.

    " + } + }, + "Compliance": { + "base": "

    Indicates whether an AWS resource or AWS Config rule is compliant and provides the number of contributors that affect the compliance.

    ", + "refs": { + "ComplianceByConfigRule$Compliance": "

    Indicates whether the AWS Config rule is compliant.

    ", + "ComplianceByResource$Compliance": "

    Indicates whether the AWS resource complies with all of the AWS Config rules that evaluated it.

    " + } + }, + "ComplianceByConfigRule": { + "base": "

    Indicates whether an AWS Config rule is compliant. A rule is compliant if all of the resources that the rule evaluated comply with it, and it is noncompliant if any of these resources do not comply.

    ", + "refs": { + "ComplianceByConfigRules$member": null + } + }, + "ComplianceByConfigRules": { + "base": null, + "refs": { + "DescribeComplianceByConfigRuleResponse$ComplianceByConfigRules": "

    Indicates whether each of the specified AWS Config rules is compliant.

    " + } + }, + "ComplianceByResource": { + "base": "

    Indicates whether an AWS resource that is evaluated according to one or more AWS Config rules is compliant. A resource is compliant if it complies with all of the rules that evaluate it, and it is noncompliant if it does not comply with one or more of these rules.

    ", + "refs": { + "ComplianceByResources$member": null + } + }, + "ComplianceByResources": { + "base": null, + "refs": { + "DescribeComplianceByResourceResponse$ComplianceByResources": "

    Indicates whether the specified AWS resource complies with all of the AWS Config rules that evaluate it.

    " + } + }, + "ComplianceContributorCount": { + "base": "

    The number of AWS resources or AWS Config rules responsible for the current compliance of the item, up to a maximum number.

    ", + "refs": { + "Compliance$ComplianceContributorCount": "

    The number of AWS resources or AWS Config rules that cause a result of NON_COMPLIANT, up to a maximum number.

    ", + "ComplianceSummary$CompliantResourceCount": "

    The number of AWS Config rules or AWS resources that are compliant, up to a maximum of 25 for rules and 100 for resources.

    ", + "ComplianceSummary$NonCompliantResourceCount": "

    The number of AWS Config rules or AWS resources that are noncompliant, up to a maximum of 25 for rules and 100 for resources.

    " + } + }, + "ComplianceResourceTypes": { + "base": null, + "refs": { + "Scope$ComplianceResourceTypes": "

    The resource types of only those AWS resources that you want to trigger an evaluation for the rule. You can only specify one type if you also specify a resource ID for ComplianceResourceId.

    " + } + }, + "ComplianceSummariesByResourceType": { + "base": null, + "refs": { + "GetComplianceSummaryByResourceTypeResponse$ComplianceSummariesByResourceType": "

    The number of resources that are compliant and the number that are noncompliant. If one or more resource types were provided with the request, the numbers are returned for each resource type. The maximum number returned is 100.

    " + } + }, + "ComplianceSummary": { + "base": "

    The number of AWS Config rules or AWS resources that are compliant and noncompliant, up to a maximum.

    ", + "refs": { + "ComplianceSummaryByResourceType$ComplianceSummary": "

    The number of AWS resources that are compliant or noncompliant, up to a maximum of 100 for each compliance.

    ", + "GetComplianceSummaryByConfigRuleResponse$ComplianceSummary": "

    The number of AWS Config rules that are compliant and the number that are noncompliant, up to a maximum of 25 for each.

    " + } + }, + "ComplianceSummaryByResourceType": { + "base": "

    The number of AWS resources of a specific type that are compliant or noncompliant, up to a maximum of 100 for each compliance.

    ", + "refs": { + "ComplianceSummariesByResourceType$member": null + } + }, + "ComplianceType": { + "base": null, + "refs": { + "Compliance$ComplianceType": "

    Indicates whether an AWS resource or AWS Config rule is compliant.

    A resource is compliant if it complies with all of the AWS Config rules that evaluate it, and it is noncompliant if it does not comply with one or more of these rules.

    A rule is compliant if all of the resources that the rule evaluates comply with it, and it is noncompliant if any of these resources do not comply.

    AWS Config returns the INSUFFICIENT_DATA value when no evaluation results are available for the AWS resource or Config rule.

    For the Compliance data type, AWS Config supports only COMPLIANT, NON_COMPLIANT, and INSUFFICIENT_DATA values. AWS Config does not support the NOT_APPLICABLE value for the Compliance data type.

    ", + "ComplianceTypes$member": null, + "Evaluation$ComplianceType": "

    Indicates whether the AWS resource complies with the AWS Config rule that it was evaluated against.

    For the Evaluation data type, AWS Config supports only the COMPLIANT, NON_COMPLIANT, and NOT_APPLICABLE values. AWS Config does not support the INSUFFICIENT_DATA value for this data type.

    Similarly, AWS Config does not accept INSUFFICIENT_DATA as the value for ComplianceType from a PutEvaluations request. For example, an AWS Lambda function for a custom Config rule cannot pass an INSUFFICIENT_DATA value to AWS Config.

    ", + "EvaluationResult$ComplianceType": "

    Indicates whether the AWS resource complies with the AWS Config rule that evaluated it.

    For the EvaluationResult data type, AWS Config supports only the COMPLIANT, NON_COMPLIANT, and NOT_APPLICABLE values. AWS Config does not support the INSUFFICIENT_DATA value for the EvaluationResult data type.

    " + } + }, + "ComplianceTypes": { + "base": null, + "refs": { + "DescribeComplianceByConfigRuleRequest$ComplianceTypes": "

    Filters the results by compliance.

    The allowed values are COMPLIANT, NON_COMPLIANT, and INSUFFICIENT_DATA.

    ", + "DescribeComplianceByResourceRequest$ComplianceTypes": "

    Filters the results by compliance.

    The allowed values are COMPLIANT, NON_COMPLIANT, and INSUFFICIENT_DATA.

    ", + "GetComplianceDetailsByConfigRuleRequest$ComplianceTypes": "

    Filters the results by compliance.

    The allowed values are COMPLIANT, NON_COMPLIANT, and NOT_APPLICABLE.

    ", + "GetComplianceDetailsByResourceRequest$ComplianceTypes": "

    Filters the results by compliance.

    The allowed values are COMPLIANT, NON_COMPLIANT, and NOT_APPLICABLE.

    " + } + }, + "ConfigExportDeliveryInfo": { + "base": "

    A list that contains the status of the delivery of either the snapshot or the configuration history to the specified Amazon S3 bucket.

    ", + "refs": { + "DeliveryChannelStatus$configSnapshotDeliveryInfo": "

    A list containing the status of the delivery of the snapshot to the specified Amazon S3 bucket.

    ", + "DeliveryChannelStatus$configHistoryDeliveryInfo": "

    A list that contains the status of the delivery of the configuration history to the specified Amazon S3 bucket.

    " + } + }, + "ConfigRule": { + "base": "

    An AWS Lambda function that evaluates configuration items to assess whether your AWS resources comply with your desired configurations. This function can run when AWS Config detects a configuration change to an AWS resource, or when it delivers a configuration snapshot of the resources in the account.

    For more information about developing and using AWS Config rules, see Evaluating AWS Resource Configurations with AWS Config in the AWS Config Developer Guide.

    ", + "refs": { + "ConfigRules$member": null, + "PutConfigRuleRequest$ConfigRule": null + } + }, + "ConfigRuleEvaluationStatus": { + "base": "

    Status information for your AWS managed Config rules. The status includes information such as the last time the rule ran, the last time it failed, and the related error for the last failure.

    This action does not return status information about customer managed Config rules.

    ", + "refs": { + "ConfigRuleEvaluationStatusList$member": null + } + }, + "ConfigRuleEvaluationStatusList": { + "base": null, + "refs": { + "DescribeConfigRuleEvaluationStatusResponse$ConfigRulesEvaluationStatus": "

    Status information about your AWS managed Config rules.

    " + } + }, + "ConfigRuleNames": { + "base": null, + "refs": { + "DescribeComplianceByConfigRuleRequest$ConfigRuleNames": "

    Specify one or more AWS Config rule names to filter the results by rule.

    ", + "DescribeConfigRuleEvaluationStatusRequest$ConfigRuleNames": "

    The name of the AWS managed Config rules for which you want status information. If you do not specify any names, AWS Config returns status information for all AWS managed Config rules that you use.

    ", + "DescribeConfigRulesRequest$ConfigRuleNames": "

    The names of the AWS Config rules for which you want details. If you do not specify any names, AWS Config returns details for all your rules.

    " + } + }, + "ConfigRuleState": { + "base": null, + "refs": { + "ConfigRule$ConfigRuleState": "

    Indicates whether the AWS Config rule is active or currently being deleted by AWS Config.

    AWS Config sets the state of a rule to DELETING temporarily after you use the DeleteConfigRule request to delete the rule. After AWS Config finishes deleting a rule, the rule and all of its evaluations are erased and no longer available.

    You cannot add a rule to AWS Config that has the state set to DELETING. If you want to delete a rule, you must use the DeleteConfigRule request.

    " + } + }, + "ConfigRules": { + "base": null, + "refs": { + "DescribeConfigRulesResponse$ConfigRules": "

    The details about your AWS Config rules.

    " + } + }, + "ConfigSnapshotDeliveryProperties": { + "base": "

    Options for how AWS Config delivers configuration snapshots to the Amazon S3 bucket in your delivery channel.

    ", + "refs": { + "DeliveryChannel$configSnapshotDeliveryProperties": null + } + }, + "ConfigStreamDeliveryInfo": { + "base": "

    A list that contains the status of the delivery of the configuration stream notification to the Amazon SNS topic.

    ", + "refs": { + "DeliveryChannelStatus$configStreamDeliveryInfo": "

    A list containing the status of the delivery of the configuration stream notification to the specified Amazon SNS topic.

    " + } + }, + "Configuration": { + "base": null, + "refs": { + "ConfigurationItem$configuration": "

    The description of the resource configuration.

    " + } + }, + "ConfigurationItem": { + "base": "

    A list that contains detailed configurations of a specified resource.

    Currently, the list does not contain information about non-AWS components (for example, applications on your Amazon EC2 instances).

    ", + "refs": { + "ConfigurationItemList$member": null + } + }, + "ConfigurationItemCaptureTime": { + "base": null, + "refs": { + "ConfigurationItem$configurationItemCaptureTime": "

    The time when the configuration recording was initiated.

    " + } + }, + "ConfigurationItemList": { + "base": null, + "refs": { + "GetResourceConfigHistoryResponse$configurationItems": "

    A list that contains the configuration history of one or more resources.

    " + } + }, + "ConfigurationItemMD5Hash": { + "base": null, + "refs": { + "ConfigurationItem$configurationItemMD5Hash": "

    Unique MD5 hash that represents the configuration item's state.

    You can use MD5 hash to compare the states of two or more configuration items that are associated with the same resource.

    " + } + }, + "ConfigurationItemStatus": { + "base": null, + "refs": { + "ConfigurationItem$configurationItemStatus": "

    The configuration item status.

    " + } + }, + "ConfigurationRecorder": { + "base": "

    An object that represents the recording of configuration changes of an AWS resource.

    ", + "refs": { + "ConfigurationRecorderList$member": null, + "PutConfigurationRecorderRequest$ConfigurationRecorder": "

    The configuration recorder object that records each configuration change made to the resources.

    " + } + }, + "ConfigurationRecorderList": { + "base": null, + "refs": { + "DescribeConfigurationRecordersResponse$ConfigurationRecorders": "

    A list that contains the descriptions of the specified configuration recorders.

    " + } + }, + "ConfigurationRecorderNameList": { + "base": null, + "refs": { + "DescribeConfigurationRecorderStatusRequest$ConfigurationRecorderNames": "

    The name(s) of the configuration recorder. If the name is not specified, the action returns the current status of all the configuration recorders associated with the account.

    ", + "DescribeConfigurationRecordersRequest$ConfigurationRecorderNames": "

    A list of configuration recorder names.

    " + } + }, + "ConfigurationRecorderStatus": { + "base": "

    The current status of the configuration recorder.

    ", + "refs": { + "ConfigurationRecorderStatusList$member": null + } + }, + "ConfigurationRecorderStatusList": { + "base": null, + "refs": { + "DescribeConfigurationRecorderStatusResponse$ConfigurationRecordersStatus": "

    A list that contains status of the specified recorders.

    " + } + }, + "ConfigurationStateId": { + "base": null, + "refs": { + "ConfigurationItem$configurationStateId": "

    An identifier that indicates the ordering of the configuration items of a resource.

    " + } + }, + "Date": { + "base": null, + "refs": { + "ComplianceSummary$ComplianceSummaryTimestamp": "

    The time that AWS Config created the compliance summary.

    ", + "ConfigExportDeliveryInfo$lastAttemptTime": "

    The time of the last attempted delivery.

    ", + "ConfigExportDeliveryInfo$lastSuccessfulTime": "

    The time of the last successful delivery.

    ", + "ConfigExportDeliveryInfo$nextDeliveryTime": "

    The time that the next delivery occurs.

    ", + "ConfigRuleEvaluationStatus$LastSuccessfulInvocationTime": "

    The time that AWS Config last successfully invoked the AWS Config rule to evaluate your AWS resources.

    ", + "ConfigRuleEvaluationStatus$LastFailedInvocationTime": "

    The time that AWS Config last failed to invoke the AWS Config rule to evaluate your AWS resources.

    ", + "ConfigRuleEvaluationStatus$LastSuccessfulEvaluationTime": "

    The time that AWS Config last successfully evaluated your AWS resources against the rule.

    ", + "ConfigRuleEvaluationStatus$LastFailedEvaluationTime": "

    The time that AWS Config last failed to evaluate your AWS resources against the rule.

    ", + "ConfigRuleEvaluationStatus$FirstActivatedTime": "

    The time that you first activated the AWS Config rule.

    ", + "ConfigStreamDeliveryInfo$lastStatusChangeTime": "

    The time from the last status change.

    ", + "ConfigurationRecorderStatus$lastStartTime": "

    The time the recorder was last started.

    ", + "ConfigurationRecorderStatus$lastStopTime": "

    The time the recorder was last stopped.

    ", + "ConfigurationRecorderStatus$lastStatusChangeTime": "

    The time when the status was last changed.

    ", + "EvaluationResult$ResultRecordedTime": "

    The time when AWS Config recorded the evaluation result.

    ", + "EvaluationResult$ConfigRuleInvokedTime": "

    The time when the AWS Config rule evaluated the AWS resource.

    ", + "EvaluationResultIdentifier$OrderingTimestamp": "

    The time of the event that triggered the evaluation of your AWS resources. The time can indicate when AWS Config delivered a configuration item change notification, or it can indicate when AWS Config delivered the configuration snapshot, depending on which event triggered the evaluation.

    " + } + }, + "DeleteConfigRuleRequest": { + "base": null, + "refs": { + } + }, + "DeleteConfigurationRecorderRequest": { + "base": "

    The request object for the DeleteConfigurationRecorder action.

    ", + "refs": { + } + }, + "DeleteDeliveryChannelRequest": { + "base": "

    The input for the DeleteDeliveryChannel action. The action accepts the following data in JSON format.

    ", + "refs": { + } + }, + "DeliverConfigSnapshotRequest": { + "base": "

    The input for the DeliverConfigSnapshot action.

    ", + "refs": { + } + }, + "DeliverConfigSnapshotResponse": { + "base": "

    The output for the DeliverConfigSnapshot action in JSON format.

    ", + "refs": { + } + }, + "DeliveryChannel": { + "base": "

    The channel through which AWS Config delivers notifications and updated configuration states.

    ", + "refs": { + "DeliveryChannelList$member": null, + "PutDeliveryChannelRequest$DeliveryChannel": "

    The configuration delivery channel object that delivers the configuration information to an Amazon S3 bucket, and to an Amazon SNS topic.

    " + } + }, + "DeliveryChannelList": { + "base": null, + "refs": { + "DescribeDeliveryChannelsResponse$DeliveryChannels": "

    A list that contains the descriptions of the specified delivery channel.

    " + } + }, + "DeliveryChannelNameList": { + "base": null, + "refs": { + "DescribeDeliveryChannelStatusRequest$DeliveryChannelNames": "

    A list of delivery channel names.

    ", + "DescribeDeliveryChannelsRequest$DeliveryChannelNames": "

    A list of delivery channel names.

    " + } + }, + "DeliveryChannelStatus": { + "base": "

    The status of a specified delivery channel.

    Valid values: Success | Failure

    ", + "refs": { + "DeliveryChannelStatusList$member": null + } + }, + "DeliveryChannelStatusList": { + "base": null, + "refs": { + "DescribeDeliveryChannelStatusResponse$DeliveryChannelsStatus": "

    A list that contains the status of a specified delivery channel.

    " + } + }, + "DeliveryStatus": { + "base": null, + "refs": { + "ConfigExportDeliveryInfo$lastStatus": "

    Status of the last attempted delivery.

    ", + "ConfigStreamDeliveryInfo$lastStatus": "

    Status of the last attempted delivery.

    Note Providing an SNS topic on a DeliveryChannel for AWS Config is optional. If the SNS delivery is turned off, the last status will be Not_Applicable.

    " + } + }, + "DescribeComplianceByConfigRuleRequest": { + "base": null, + "refs": { + } + }, + "DescribeComplianceByConfigRuleResponse": { + "base": null, + "refs": { + } + }, + "DescribeComplianceByResourceRequest": { + "base": null, + "refs": { + } + }, + "DescribeComplianceByResourceResponse": { + "base": null, + "refs": { + } + }, + "DescribeConfigRuleEvaluationStatusRequest": { + "base": null, + "refs": { + } + }, + "DescribeConfigRuleEvaluationStatusResponse": { + "base": null, + "refs": { + } + }, + "DescribeConfigRulesRequest": { + "base": null, + "refs": { + } + }, + "DescribeConfigRulesResponse": { + "base": null, + "refs": { + } + }, + "DescribeConfigurationRecorderStatusRequest": { + "base": "

    The input for the DescribeConfigurationRecorderStatus action.

    ", + "refs": { + } + }, + "DescribeConfigurationRecorderStatusResponse": { + "base": "

    The output for the DescribeConfigurationRecorderStatus action in JSON format.

    ", + "refs": { + } + }, + "DescribeConfigurationRecordersRequest": { + "base": "

    The input for the DescribeConfigurationRecorders action.

    ", + "refs": { + } + }, + "DescribeConfigurationRecordersResponse": { + "base": "

    The output for the DescribeConfigurationRecorders action.

    ", + "refs": { + } + }, + "DescribeDeliveryChannelStatusRequest": { + "base": "

    The input for the DeliveryChannelStatus action.

    ", + "refs": { + } + }, + "DescribeDeliveryChannelStatusResponse": { + "base": "

    The output for the DescribeDeliveryChannelStatus action.

    ", + "refs": { + } + }, + "DescribeDeliveryChannelsRequest": { + "base": "

    The input for the DescribeDeliveryChannels action.

    ", + "refs": { + } + }, + "DescribeDeliveryChannelsResponse": { + "base": "

    The output for the DescribeDeliveryChannels action.

    ", + "refs": { + } + }, + "EarlierTime": { + "base": null, + "refs": { + "GetResourceConfigHistoryRequest$earlierTime": "

    The time stamp that indicates an earlier time. If not specified, the action returns paginated results that contain configuration items that start from when the first configuration item was recorded.

    " + } + }, + "EmptiableStringWithCharLimit256": { + "base": null, + "refs": { + "ConfigRule$Description": "

    The description that you provide for the AWS Config rule.

    " + } + }, + "Evaluation": { + "base": "

    Identifies an AWS resource and indicates whether it complies with the AWS Config rule that it was evaluated against.

    ", + "refs": { + "Evaluations$member": null + } + }, + "EvaluationResult": { + "base": "

    The details of an AWS Config evaluation. Provides the AWS resource that was evaluated, the compliance of the resource, related timestamps, and supplementary information.

    ", + "refs": { + "EvaluationResults$member": null + } + }, + "EvaluationResultIdentifier": { + "base": "

    Uniquely identifies an evaluation result.

    ", + "refs": { + "EvaluationResult$EvaluationResultIdentifier": "

    Uniquely identifies the evaluation result.

    " + } + }, + "EvaluationResultQualifier": { + "base": "

    Identifies an AWS Config rule that evaluated an AWS resource, and provides the type and ID of the resource that the rule evaluated.

    ", + "refs": { + "EvaluationResultIdentifier$EvaluationResultQualifier": "

    Identifies an AWS Config rule used to evaluate an AWS resource, and provides the type and ID of the evaluated resource.

    " + } + }, + "EvaluationResults": { + "base": null, + "refs": { + "GetComplianceDetailsByConfigRuleResponse$EvaluationResults": "

    Indicates whether the AWS resource complies with the specified AWS Config rule.

    ", + "GetComplianceDetailsByResourceResponse$EvaluationResults": "

    Indicates whether the specified AWS resource complies each AWS Config rule.

    " + } + }, + "Evaluations": { + "base": null, + "refs": { + "PutEvaluationsRequest$Evaluations": "

    The assessments that the AWS Lambda function performs. Each evaluation identifies an AWS resource and indicates whether it complies with the AWS Config rule that invokes the AWS Lambda function.

    ", + "PutEvaluationsResponse$FailedEvaluations": "

    Requests that failed because of a client or server error.

    " + } + }, + "EventSource": { + "base": null, + "refs": { + "SourceDetail$EventSource": "

    The source of the event, such as an AWS service, that triggers AWS Config to evaluate your AWS resources.

    " + } + }, + "GetComplianceDetailsByConfigRuleRequest": { + "base": null, + "refs": { + } + }, + "GetComplianceDetailsByConfigRuleResponse": { + "base": null, + "refs": { + } + }, + "GetComplianceDetailsByResourceRequest": { + "base": null, + "refs": { + } + }, + "GetComplianceDetailsByResourceResponse": { + "base": null, + "refs": { + } + }, + "GetComplianceSummaryByConfigRuleResponse": { + "base": null, + "refs": { + } + }, + "GetComplianceSummaryByResourceTypeRequest": { + "base": null, + "refs": { + } + }, + "GetComplianceSummaryByResourceTypeResponse": { + "base": null, + "refs": { + } + }, + "GetResourceConfigHistoryRequest": { + "base": "

    The input for the GetResourceConfigHistory action.

    ", + "refs": { + } + }, + "GetResourceConfigHistoryResponse": { + "base": "

    The output for the GetResourceConfigHistory action.

    ", + "refs": { + } + }, + "IncludeGlobalResourceTypes": { + "base": null, + "refs": { + "RecordingGroup$includeGlobalResourceTypes": "

    Specifies whether AWS Config includes all supported types of global resources (for example, IAM resources) with the resources that it records.

    Before you can set this option to true, you must set the allSupported option to true.

    If you set this option to true, when AWS Config adds support for a new type of global resource, it automatically starts recording resources of that type.

    The configuration details for any global resource are the same in all regions. To prevent duplicate configuration items, you should consider customizing AWS Config in only one region to record global resources.

    " + } + }, + "InsufficientDeliveryPolicyException": { + "base": "

    Your Amazon S3 bucket policy does not permit AWS Config to write to it.

    ", + "refs": { + } + }, + "InsufficientPermissionsException": { + "base": "

    Indicates one of the following errors:

    • The rule cannot be created because the IAM role assigned to AWS Config lacks permissions to perform the config:Put* action.
    • The AWS Lambda function cannot be invoked. Check the function ARN, and check the function's permissions.
    ", + "refs": { + } + }, + "Integer": { + "base": null, + "refs": { + "ComplianceContributorCount$CappedCount": "

    The number of AWS resources or AWS Config rules responsible for the current compliance of the item.

    " + } + }, + "InvalidConfigurationRecorderNameException": { + "base": "

    You have provided a configuration recorder name that is not valid.

    ", + "refs": { + } + }, + "InvalidDeliveryChannelNameException": { + "base": "

    The specified delivery channel name is not valid.

    ", + "refs": { + } + }, + "InvalidLimitException": { + "base": "

    The specified limit is outside the allowable range.

    ", + "refs": { + } + }, + "InvalidNextTokenException": { + "base": "

    The specified next token is invalid. Specify the nextToken string that was returned in the previous response to get the next page of results.

    ", + "refs": { + } + }, + "InvalidParameterValueException": { + "base": "

    One or more of the specified parameters are invalid. Verify that your parameters are valid and try again.

    ", + "refs": { + } + }, + "InvalidRecordingGroupException": { + "base": "

    AWS Config throws an exception if the recording group does not contain a valid list of resource types. Invalid values could also be incorrectly formatted.

    ", + "refs": { + } + }, + "InvalidResultTokenException": { + "base": "

    The result token is invalid.

    ", + "refs": { + } + }, + "InvalidRoleException": { + "base": "

    You have provided a null or empty role ARN.

    ", + "refs": { + } + }, + "InvalidS3KeyPrefixException": { + "base": "

    The specified Amazon S3 key prefix is not valid.

    ", + "refs": { + } + }, + "InvalidSNSTopicARNException": { + "base": "

    The specified Amazon SNS topic does not exist.

    ", + "refs": { + } + }, + "InvalidTimeRangeException": { + "base": "

    The specified time range is not valid. The earlier time is not chronologically before the later time.

    ", + "refs": { + } + }, + "LastDeliveryChannelDeleteFailedException": { + "base": "

    You cannot delete the delivery channel you specified because the configuration recorder is running.

    ", + "refs": { + } + }, + "LaterTime": { + "base": null, + "refs": { + "GetResourceConfigHistoryRequest$laterTime": "

    The time stamp that indicates a later time. If not specified, current time is taken.

    " + } + }, + "Limit": { + "base": null, + "refs": { + "DescribeComplianceByResourceRequest$Limit": "

    The maximum number of evaluation results returned on each page. The default is 10. You cannot specify a limit greater than 100. If you specify 0, AWS Config uses the default.

    ", + "GetComplianceDetailsByConfigRuleRequest$Limit": "

    The maximum number of evaluation results returned on each page. The default is 10. You cannot specify a limit greater than 100. If you specify 0, AWS Config uses the default.

    ", + "GetResourceConfigHistoryRequest$limit": "

    The maximum number of configuration items returned on each page. The default is 10. You cannot specify a limit greater than 100. If you specify 0, AWS Config uses the default.

    ", + "ListDiscoveredResourcesRequest$limit": "

    The maximum number of resource identifiers returned on each page. The default is 100. You cannot specify a limit greater than 100. If you specify 0, AWS Config uses the default.

    " + } + }, + "ListDiscoveredResourcesRequest": { + "base": null, + "refs": { + } + }, + "ListDiscoveredResourcesResponse": { + "base": null, + "refs": { + } + }, + "MaxNumberOfConfigRulesExceededException": { + "base": "

    Failed to add the AWS Config rule because the account already contains the maximum number of 25 rules. Consider deleting any deactivated rules before adding new rules.

    ", + "refs": { + } + }, + "MaxNumberOfConfigurationRecordersExceededException": { + "base": "

    You have reached the limit on the number of recorders you can create.

    ", + "refs": { + } + }, + "MaxNumberOfDeliveryChannelsExceededException": { + "base": "

    You have reached the limit on the number of delivery channels you can create.

    ", + "refs": { + } + }, + "MaximumExecutionFrequency": { + "base": null, + "refs": { + "ConfigRule$MaximumExecutionFrequency": "

    The maximum frequency at which the AWS Config rule runs evaluations.

    If your rule is periodic, meaning it runs an evaluation when AWS Config delivers a configuration snapshot, then it cannot run evaluations more frequently than AWS Config delivers the snapshots. For periodic rules, set the value of the MaximumExecutionFrequency key to be equal to or greater than the value of the deliveryFrequency key, which is part of ConfigSnapshotDeliveryProperties. To update the frequency with which AWS Config delivers your snapshots, use the PutDeliveryChannel action.

    ", + "ConfigSnapshotDeliveryProperties$deliveryFrequency": "

    The frequency with which AWS Config recurringly delivers configuration snapshots.

    " + } + }, + "MessageType": { + "base": null, + "refs": { + "SourceDetail$MessageType": "

    The type of SNS message that triggers AWS Config to run an evaluation. For evaluations that are initiated when AWS Config delivers a configuration item change notification, you must use ConfigurationItemChangeNotification. For evaluations that are initiated when AWS Config delivers a configuration snapshot, you must use ConfigurationSnapshotDeliveryCompleted.

    " + } + }, + "Name": { + "base": null, + "refs": { + "Tags$key": null + } + }, + "NextToken": { + "base": null, + "refs": { + "DescribeComplianceByResourceRequest$NextToken": "

    The nextToken string returned on a previous page that you use to get the next page of results in a paginated response.

    ", + "DescribeComplianceByResourceResponse$NextToken": "

    The string that you use in a subsequent request to get the next page of results in a paginated response.

    ", + "GetComplianceDetailsByConfigRuleRequest$NextToken": "

    The nextToken string returned on a previous page that you use to get the next page of results in a paginated response.

    ", + "GetComplianceDetailsByConfigRuleResponse$NextToken": "

    The string that you use in a subsequent request to get the next page of results in a paginated response.

    ", + "GetResourceConfigHistoryRequest$nextToken": "

    The nextToken string returned on a previous page that you use to get the next page of results in a paginated response.

    ", + "GetResourceConfigHistoryResponse$nextToken": "

    The string that you use in a subsequent request to get the next page of results in a paginated response.

    ", + "ListDiscoveredResourcesRequest$nextToken": "

    The nextToken string returned on a previous page that you use to get the next page of results in a paginated response.

    ", + "ListDiscoveredResourcesResponse$nextToken": "

    The string that you use in a subsequent request to get the next page of results in a paginated response.

    " + } + }, + "NoAvailableConfigurationRecorderException": { + "base": "

    There are no configuration recorders available to provide the role needed to describe your resources. Create a configuration recorder.

    ", + "refs": { + } + }, + "NoAvailableDeliveryChannelException": { + "base": "

    There is no delivery channel available to record configurations.

    ", + "refs": { + } + }, + "NoRunningConfigurationRecorderException": { + "base": "

    There is no configuration recorder running.

    ", + "refs": { + } + }, + "NoSuchBucketException": { + "base": "

    The specified Amazon S3 bucket does not exist.

    ", + "refs": { + } + }, + "NoSuchConfigRuleException": { + "base": "

    One or more AWS Config rules in the request are invalid. Verify that the rule names are correct and try again.

    ", + "refs": { + } + }, + "NoSuchConfigurationRecorderException": { + "base": "

    You have specified a configuration recorder that does not exist.

    ", + "refs": { + } + }, + "NoSuchDeliveryChannelException": { + "base": "

    You have specified a delivery channel that does not exist.

    ", + "refs": { + } + }, + "OrderingTimestamp": { + "base": null, + "refs": { + "Evaluation$OrderingTimestamp": "

    The time of the event in AWS Config that triggered the evaluation. For event-based evaluations, the time indicates when AWS Config created the configuration item that triggered the evaluation. For periodic evaluations, the time indicates when AWS Config delivered the configuration snapshot that triggered the evaluation.

    " + } + }, + "Owner": { + "base": null, + "refs": { + "Source$Owner": "

    Indicates whether AWS or the customer owns and manages the AWS Config rule.

    " + } + }, + "PutConfigRuleRequest": { + "base": null, + "refs": { + } + }, + "PutConfigurationRecorderRequest": { + "base": "

    The input for the PutConfigurationRecorder action.

    ", + "refs": { + } + }, + "PutDeliveryChannelRequest": { + "base": "

    The input for the PutDeliveryChannel action.

    ", + "refs": { + } + }, + "PutEvaluationsRequest": { + "base": null, + "refs": { + } + }, + "PutEvaluationsResponse": { + "base": null, + "refs": { + } + }, + "RecorderName": { + "base": null, + "refs": { + "ConfigurationRecorder$name": "

    The name of the recorder. By default, AWS Config automatically assigns the name "default" when creating the configuration recorder. You cannot change the assigned name.

    ", + "ConfigurationRecorderNameList$member": null, + "DeleteConfigurationRecorderRequest$ConfigurationRecorderName": "

    The name of the configuration recorder to be deleted. You can retrieve the name of your configuration recorder by using the DescribeConfigurationRecorders action.

    ", + "StartConfigurationRecorderRequest$ConfigurationRecorderName": "

    The name of the recorder object that records each configuration change made to the resources.

    ", + "StopConfigurationRecorderRequest$ConfigurationRecorderName": "

    The name of the recorder object that records each configuration change made to the resources.

    " + } + }, + "RecorderStatus": { + "base": null, + "refs": { + "ConfigurationRecorderStatus$lastStatus": "

    The last (previous) status of the recorder.

    " + } + }, + "RecordingGroup": { + "base": "

    Specifies the types of AWS resource for which AWS Config records configuration changes.

    In the recording group, you specify whether all supported types or specific types of resources are recorded.

    By default, AWS Config records configuration changes for all supported types of regional resources that AWS Config discovers in the region in which it is running. Regional resources are tied to a region and can be used only in that region. Examples of regional resources are EC2 instances and EBS volumes.

    You can also have AWS Config record configuration changes for supported types of global resources (for example, IAM resources). Global resources are not tied to an individual region and can be used in all regions.

    The configuration details for any global resource are the same in all regions. If you customize AWS Config in multiple regions to record global resources, it will create multiple configuration items each time a global resource changes: one configuration item for each region. These configuration items will contain identical data. To prevent duplicate configuration items, you should consider customizing AWS Config in only one region to record global resources, unless you want the configuration items to be available in multiple regions.

    If you don't want AWS Config to record all resources, you can specify which types of resources it will record with the resourceTypes parameter.

    For a list of supported resource types, see Supported resource types.

    For more information, see Selecting Which Resources AWS Config Records.

    ", + "refs": { + "ConfigurationRecorder$recordingGroup": "

    Specifies the types of AWS resource for which AWS Config records configuration changes.

    " + } + }, + "RelatedEvent": { + "base": null, + "refs": { + "RelatedEventList$member": null + } + }, + "RelatedEventList": { + "base": null, + "refs": { + "ConfigurationItem$relatedEvents": "

    A list of CloudTrail event IDs.

    A populated field indicates that the current configuration was initiated by the events recorded in the CloudTrail log. For more information about CloudTrail, see What is AWS CloudTrail?.

    An empty field indicates that the current configuration was not initiated by any event.

    " + } + }, + "Relationship": { + "base": "

    The relationship of the related resource to the main resource.

    ", + "refs": { + "RelationshipList$member": null + } + }, + "RelationshipList": { + "base": null, + "refs": { + "ConfigurationItem$relationships": "

    A list of related AWS resources.

    " + } + }, + "RelationshipName": { + "base": null, + "refs": { + "Relationship$relationshipName": "

    The type of relationship with the related resource.

    " + } + }, + "ResourceCreationTime": { + "base": null, + "refs": { + "ConfigurationItem$resourceCreationTime": "

    The time stamp when the resource was created.

    " + } + }, + "ResourceDeletionTime": { + "base": null, + "refs": { + "ResourceIdentifier$resourceDeletionTime": "

    The time that the resource was deleted.

    " + } + }, + "ResourceId": { + "base": null, + "refs": { + "ConfigurationItem$resourceId": "

    The ID of the resource (for example., sg-xxxxxx).

    ", + "GetResourceConfigHistoryRequest$resourceId": "

    The ID of the resource (for example., sg-xxxxxx).

    ", + "Relationship$resourceId": "

    The ID of the related resource (for example, sg-xxxxxx).

    ", + "ResourceIdList$member": null, + "ResourceIdentifier$resourceId": "

    The ID of the resource (for example., sg-xxxxxx).

    " + } + }, + "ResourceIdList": { + "base": null, + "refs": { + "ListDiscoveredResourcesRequest$resourceIds": "

    The IDs of only those resources that you want AWS Config to list in the response. If you do not specify this parameter, AWS Config lists all resources of the specified type that it has discovered.

    " + } + }, + "ResourceIdentifier": { + "base": "

    The details that identify a resource that is discovered by AWS Config, including the resource type, ID, and (if available) the custom resource name.

    ", + "refs": { + "ResourceIdentifierList$member": null + } + }, + "ResourceIdentifierList": { + "base": null, + "refs": { + "ListDiscoveredResourcesResponse$resourceIdentifiers": "

    The details that identify a resource that is discovered by AWS Config, including the resource type, ID, and (if available) the custom resource name.

    " + } + }, + "ResourceInUseException": { + "base": "

    The rule is currently being deleted. Wait for a while and try again.

    ", + "refs": { + } + }, + "ResourceName": { + "base": null, + "refs": { + "ConfigurationItem$resourceName": "

    The custom name of the resource, if available.

    ", + "ListDiscoveredResourcesRequest$resourceName": "

    The custom name of only those resources that you want AWS Config to list in the response. If you do not specify this parameter, AWS Config lists all resources of the specified type that it has discovered.

    ", + "Relationship$resourceName": "

    The custom name of the related resource, if available.

    ", + "ResourceIdentifier$resourceName": "

    The custom name of the resource (if available).

    " + } + }, + "ResourceNotDiscoveredException": { + "base": "

    You have specified a resource that is either unknown or has not been discovered.

    ", + "refs": { + } + }, + "ResourceType": { + "base": null, + "refs": { + "ConfigurationItem$resourceType": "

    The type of AWS resource.

    ", + "GetResourceConfigHistoryRequest$resourceType": "

    The resource type.

    ", + "ListDiscoveredResourcesRequest$resourceType": "

    The type of resources that you want AWS Config to list in the response.

    ", + "Relationship$resourceType": "

    The resource type of the related resource.

    ", + "ResourceIdentifier$resourceType": "

    The type of resource.

    ", + "ResourceTypeList$member": null + } + }, + "ResourceTypeList": { + "base": null, + "refs": { + "RecordingGroup$resourceTypes": "

    A comma-separated list that specifies the types of AWS resources for which AWS Config records configuration changes (for example, AWS::EC2::Instance or AWS::CloudTrail::Trail).

    Before you can set this option to true, you must set the allSupported option to false.

    If you set this option to true, when AWS Config adds support for a new type of resource, it will not record resources of that type unless you manually add that type to your recording group.

    For a list of valid resourceTypes values, see the resourceType Value column in Supported AWS Resource Types.

    " + } + }, + "ResourceTypes": { + "base": null, + "refs": { + "GetComplianceSummaryByResourceTypeRequest$ResourceTypes": "

    Specify one or more resource types to get the number of resources that are compliant and the number that are noncompliant for each resource type.

    For this request, you can specify an AWS resource type such as AWS::EC2::Instance, and you can specify that the resource type is an AWS account by specifying AWS::::Account.

    " + } + }, + "Scope": { + "base": "

    Defines which resources trigger an evaluation for an AWS Config rule. The scope can include one or more resource types, a combination of a tag key and value, or a combination of one resource type and one resource ID. Specify a scope to constrain which resources trigger an evaluation for a rule. Otherwise, evaluations for the rule are triggered when any resource in your recording group changes in configuration.

    ", + "refs": { + "ConfigRule$Scope": "

    Defines which resources can trigger an evaluation for the rule. The scope can include one or more resource types, a combination of one resource type and one resource ID, or a combination of a tag key and value. Specify a scope to constrain the resources that can trigger an evaluation for the rule. If you do not specify a scope, evaluations are triggered when any resource in the recording group changes.

    " + } + }, + "Source": { + "base": "

    Provides the AWS Config rule owner (AWS or customer), the rule identifier, and the events that trigger the evaluation of your AWS resources.

    ", + "refs": { + "ConfigRule$Source": "

    Provides the rule owner (AWS or customer), the rule identifier, and the events that cause the function to evaluate your AWS resources.

    " + } + }, + "SourceDetail": { + "base": "

    Provides the source and type of the event that triggers AWS Config to evaluate your AWS resources against a rule.

    ", + "refs": { + "SourceDetails$member": null + } + }, + "SourceDetails": { + "base": null, + "refs": { + "Source$SourceDetails": "

    Provides the source and type of the event that causes AWS Config to evaluate your AWS resources.

    " + } + }, + "StartConfigurationRecorderRequest": { + "base": "

    The input for the StartConfigurationRecorder action.

    ", + "refs": { + } + }, + "StopConfigurationRecorderRequest": { + "base": "

    The input for the StopConfigurationRecorder action.

    ", + "refs": { + } + }, + "String": { + "base": null, + "refs": { + "ConfigExportDeliveryInfo$lastErrorCode": "

    The error code from the last attempted delivery.

    ", + "ConfigExportDeliveryInfo$lastErrorMessage": "

    The error message from the last attempted delivery.

    ", + "ConfigRule$ConfigRuleArn": "

    The Amazon Resource Name (ARN) of the AWS Config rule.

    ", + "ConfigRule$ConfigRuleId": "

    The ID of the AWS Config rule.

    ", + "ConfigRuleEvaluationStatus$ConfigRuleArn": "

    The Amazon Resource Name (ARN) of the AWS Config rule.

    ", + "ConfigRuleEvaluationStatus$ConfigRuleId": "

    The ID of the AWS Config rule.

    ", + "ConfigRuleEvaluationStatus$LastErrorCode": "

    The error code that AWS Config returned when the rule last failed.

    ", + "ConfigRuleEvaluationStatus$LastErrorMessage": "

    The error message that AWS Config returned when the rule last failed.

    ", + "ConfigStreamDeliveryInfo$lastErrorCode": "

    The error code from the last attempted delivery.

    ", + "ConfigStreamDeliveryInfo$lastErrorMessage": "

    The error message from the last attempted delivery.

    ", + "ConfigurationRecorder$roleARN": "

    Amazon Resource Name (ARN) of the IAM role used to describe the AWS resources associated with the account.

    ", + "ConfigurationRecorderStatus$name": "

    The name of the configuration recorder.

    ", + "ConfigurationRecorderStatus$lastErrorCode": "

    The error code indicating that the recording failed.

    ", + "ConfigurationRecorderStatus$lastErrorMessage": "

    The message indicating that the recording failed due to an error.

    ", + "DeliverConfigSnapshotResponse$configSnapshotId": "

    The ID of the snapshot that is being created.

    ", + "DeliveryChannel$s3BucketName": "

    The name of the Amazon S3 bucket to which AWS Config delivers configuration snapshots and configuration history files.

    If you specify a bucket that belongs to another AWS account, that bucket must have policies that grant access permissions to AWS Config. For more information, see Permissions for the Amazon S3 Bucket in the AWS Config Developer Guide.

    ", + "DeliveryChannel$s3KeyPrefix": "

    The prefix for the specified Amazon S3 bucket.

    ", + "DeliveryChannel$snsTopicARN": "

    The Amazon Resource Name (ARN) of the Amazon SNS topic to which AWS Config sends notifications about configuration changes.

    If you choose a topic from another account, the topic must have policies that grant access permissions to AWS Config. For more information, see Permissions for the Amazon SNS Topic in the AWS Config Developer Guide.

    ", + "DeliveryChannelStatus$name": "

    The name of the delivery channel.

    ", + "DescribeComplianceByConfigRuleRequest$NextToken": "

    The nextToken string returned on a previous page that you use to get the next page of results in a paginated response.

    ", + "DescribeComplianceByConfigRuleResponse$NextToken": "

    The string that you use in a subsequent request to get the next page of results in a paginated response.

    ", + "DescribeConfigRulesRequest$NextToken": "

    The nextToken string returned on a previous page that you use to get the next page of results in a paginated response.

    ", + "DescribeConfigRulesResponse$NextToken": "

    The string that you use in a subsequent request to get the next page of results in a paginated response.

    ", + "EvaluationResult$ResultToken": "

    An encrypted token that associates an evaluation with an AWS Config rule. The token identifies the rule, the AWS resource being evaluated, and the event that triggered the evaluation.

    ", + "GetComplianceDetailsByResourceRequest$NextToken": "

    The nextToken string returned on a previous page that you use to get the next page of results in a paginated response.

    ", + "GetComplianceDetailsByResourceResponse$NextToken": "

    The string that you use in a subsequent request to get the next page of results in a paginated response.

    ", + "PutEvaluationsRequest$ResultToken": "

    An encrypted token that associates an evaluation with an AWS Config rule. Identifies the rule and the event that triggered the evaluation

    " + } + }, + "StringWithCharLimit128": { + "base": null, + "refs": { + "Scope$TagKey": "

    The tag key that is applied to only those AWS resources that you want you want to trigger an evaluation for the rule.

    " + } + }, + "StringWithCharLimit256": { + "base": null, + "refs": { + "ComplianceByResource$ResourceType": "

    The type of the AWS resource that was evaluated.

    ", + "ComplianceByResource$ResourceId": "

    The ID of the AWS resource that was evaluated.

    ", + "ComplianceResourceTypes$member": null, + "ComplianceSummaryByResourceType$ResourceType": "

    The type of AWS resource.

    ", + "ConfigRule$InputParameters": "

    A string in JSON format that is passed to the AWS Config rule Lambda function.

    ", + "DescribeComplianceByResourceRequest$ResourceType": "

    The types of AWS resources for which you want compliance information; for example, AWS::EC2::Instance. For this action, you can specify that the resource type is an AWS account by specifying AWS::::Account.

    ", + "DescribeComplianceByResourceRequest$ResourceId": "

    The ID of the AWS resource for which you want compliance information. You can specify only one resource ID. If you specify a resource ID, you must also specify a type for ResourceType.

    ", + "Evaluation$ComplianceResourceType": "

    The type of AWS resource that was evaluated.

    ", + "Evaluation$ComplianceResourceId": "

    The ID of the AWS resource that was evaluated.

    ", + "Evaluation$Annotation": "

    Supplementary information about how the evaluation determined the compliance.

    ", + "EvaluationResult$Annotation": "

    Supplementary information about how the evaluation determined the compliance.

    ", + "EvaluationResultQualifier$ResourceType": "

    The type of AWS resource that was evaluated.

    ", + "EvaluationResultQualifier$ResourceId": "

    The ID of the evaluated AWS resource.

    ", + "GetComplianceDetailsByResourceRequest$ResourceType": "

    The type of the AWS resource for which you want compliance information.

    ", + "GetComplianceDetailsByResourceRequest$ResourceId": "

    The ID of the AWS resource for which you want compliance information.

    ", + "ResourceTypes$member": null, + "Scope$TagValue": "

    The tag value applied to only those AWS resources that you want to trigger an evaluation for the rule. If you specify a value for TagValue, you must also specify a value for TagKey.

    ", + "Scope$ComplianceResourceId": "

    The IDs of the only AWS resource that you want to trigger an evaluation for the rule. If you specify a resource ID, you must specify one resource type for ComplianceResourceTypes.

    ", + "Source$SourceIdentifier": "

    For AWS managed Config rules, a pre-defined identifier from a list. To reference the list, see Using AWS Managed Config Rules.

    For customer managed Config rules, the identifier is the Amazon Resource Name (ARN) of the rule's AWS Lambda function.

    " + } + }, + "StringWithCharLimit64": { + "base": null, + "refs": { + "ComplianceByConfigRule$ConfigRuleName": "

    The name of the AWS Config rule.

    ", + "ConfigRule$ConfigRuleName": "

    The name that you assign to the AWS Config rule. The name is required if you are adding a new rule.

    ", + "ConfigRuleEvaluationStatus$ConfigRuleName": "

    The name of the AWS Config rule.

    ", + "ConfigRuleNames$member": null, + "DeleteConfigRuleRequest$ConfigRuleName": "

    The name of the AWS Config rule that you want to delete.

    ", + "EvaluationResultQualifier$ConfigRuleName": "

    The name of the AWS Config rule that was used in the evaluation.

    ", + "GetComplianceDetailsByConfigRuleRequest$ConfigRuleName": "

    The name of the AWS Config rule for which you want compliance information.

    " + } + }, + "Tags": { + "base": null, + "refs": { + "ConfigurationItem$tags": "

    A mapping of key value tags associated with the resource.

    " + } + }, + "ValidationException": { + "base": "

    The requested action is not valid.

    ", + "refs": { + } + }, + "Value": { + "base": null, + "refs": { + "Tags$value": null + } + }, + "Version": { + "base": null, + "refs": { + "ConfigurationItem$version": "

    The version number of the resource configuration.

    " + } + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/config/2014-11-12/examples-1.json b/vendor/github.com/aws/aws-sdk-go/models/apis/config/2014-11-12/examples-1.json new file mode 100644 index 000000000..0ea7e3b0b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/config/2014-11-12/examples-1.json @@ -0,0 +1,5 @@ +{ + "version": "1.0", + "examples": { + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/config/2014-11-12/paginators-1.json b/vendor/github.com/aws/aws-sdk-go/models/apis/config/2014-11-12/paginators-1.json new file mode 100644 index 000000000..45c365ec3 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/config/2014-11-12/paginators-1.json @@ -0,0 +1,10 @@ +{ + "pagination": { + "GetResourceConfigHistory": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "limit", + "result_key": "configurationItems" + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/datapipeline/2012-10-29/api-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/datapipeline/2012-10-29/api-2.json new file mode 100644 index 000000000..da8ae4ecf --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/datapipeline/2012-10-29/api-2.json @@ -0,0 +1,1167 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2012-10-29", + "endpointPrefix":"datapipeline", + "jsonVersion":"1.1", + "serviceFullName":"AWS Data Pipeline", + "signatureVersion":"v4", + "targetPrefix":"DataPipeline", + "protocol":"json" + }, + "operations":{ + "ActivatePipeline":{ + "name":"ActivatePipeline", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ActivatePipelineInput"}, + "output":{"shape":"ActivatePipelineOutput"}, + "errors":[ + { + "shape":"PipelineNotFoundException", + "exception":true + }, + { + "shape":"PipelineDeletedException", + "exception":true + }, + { + "shape":"InternalServiceError", + "exception":true, + "fault":true + }, + { + "shape":"InvalidRequestException", + "exception":true + } + ] + }, + "AddTags":{ + "name":"AddTags", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AddTagsInput"}, + "output":{"shape":"AddTagsOutput"}, + "errors":[ + { + "shape":"InternalServiceError", + "exception":true, + "fault":true + }, + { + "shape":"InvalidRequestException", + "exception":true + }, + { + "shape":"PipelineNotFoundException", + "exception":true + }, + { + "shape":"PipelineDeletedException", + "exception":true + } + ] + }, + "CreatePipeline":{ + "name":"CreatePipeline", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreatePipelineInput"}, + "output":{"shape":"CreatePipelineOutput"}, + "errors":[ + { + "shape":"InternalServiceError", + "exception":true, + "fault":true + }, + { + "shape":"InvalidRequestException", + "exception":true + } + ] + }, + "DeactivatePipeline":{ + "name":"DeactivatePipeline", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeactivatePipelineInput"}, + "output":{"shape":"DeactivatePipelineOutput"}, + "errors":[ + { + "shape":"PipelineNotFoundException", + "exception":true + }, + { + "shape":"PipelineDeletedException", + "exception":true + }, + { + "shape":"InternalServiceError", + "exception":true, + "fault":true + }, + { + "shape":"InvalidRequestException", + "exception":true + } + ] + }, + "DeletePipeline":{ + "name":"DeletePipeline", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeletePipelineInput"}, + "errors":[ + { + "shape":"PipelineNotFoundException", + "exception":true + }, + { + "shape":"InternalServiceError", + "exception":true, + "fault":true + }, + { + "shape":"InvalidRequestException", + "exception":true + } + ] + }, + "DescribeObjects":{ + "name":"DescribeObjects", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeObjectsInput"}, + "output":{"shape":"DescribeObjectsOutput"}, + "errors":[ + { + "shape":"InternalServiceError", + "exception":true, + "fault":true + }, + { + "shape":"InvalidRequestException", + "exception":true + }, + { + "shape":"PipelineNotFoundException", + "exception":true + }, + { + "shape":"PipelineDeletedException", + "exception":true + } + ] + }, + "DescribePipelines":{ + "name":"DescribePipelines", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribePipelinesInput"}, + "output":{"shape":"DescribePipelinesOutput"}, + "errors":[ + { + "shape":"PipelineNotFoundException", + "exception":true + }, + { + "shape":"PipelineDeletedException", + "exception":true + }, + { + "shape":"InternalServiceError", + "exception":true, + "fault":true + }, + { + "shape":"InvalidRequestException", + "exception":true + } + ] + }, + "EvaluateExpression":{ + "name":"EvaluateExpression", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"EvaluateExpressionInput"}, + "output":{"shape":"EvaluateExpressionOutput"}, + "errors":[ + { + "shape":"InternalServiceError", + "exception":true, + "fault":true + }, + { + "shape":"TaskNotFoundException", + "exception":true + }, + { + "shape":"InvalidRequestException", + "exception":true + }, + { + "shape":"PipelineNotFoundException", + "exception":true + }, + { + "shape":"PipelineDeletedException", + "exception":true + } + ] + }, + "GetPipelineDefinition":{ + "name":"GetPipelineDefinition", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetPipelineDefinitionInput"}, + "output":{"shape":"GetPipelineDefinitionOutput"}, + "errors":[ + { + "shape":"InternalServiceError", + "exception":true, + "fault":true + }, + { + "shape":"InvalidRequestException", + "exception":true + }, + { + "shape":"PipelineNotFoundException", + "exception":true + }, + { + "shape":"PipelineDeletedException", + "exception":true + } + ] + }, + "ListPipelines":{ + "name":"ListPipelines", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListPipelinesInput"}, + "output":{"shape":"ListPipelinesOutput"}, + "errors":[ + { + "shape":"InternalServiceError", + "exception":true, + "fault":true + }, + { + "shape":"InvalidRequestException", + "exception":true + } + ] + }, + "PollForTask":{ + "name":"PollForTask", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PollForTaskInput"}, + "output":{"shape":"PollForTaskOutput"}, + "errors":[ + { + "shape":"InternalServiceError", + "exception":true, + "fault":true + }, + { + "shape":"InvalidRequestException", + "exception":true + }, + { + "shape":"TaskNotFoundException", + "exception":true + } + ] + }, + "PutPipelineDefinition":{ + "name":"PutPipelineDefinition", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PutPipelineDefinitionInput"}, + "output":{"shape":"PutPipelineDefinitionOutput"}, + "errors":[ + { + "shape":"InternalServiceError", + "exception":true, + "fault":true + }, + { + "shape":"InvalidRequestException", + "exception":true + }, + { + "shape":"PipelineNotFoundException", + "exception":true + }, + { + "shape":"PipelineDeletedException", + "exception":true + } + ] + }, + "QueryObjects":{ + "name":"QueryObjects", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"QueryObjectsInput"}, + "output":{"shape":"QueryObjectsOutput"}, + "errors":[ + { + "shape":"PipelineNotFoundException", + "exception":true + }, + { + "shape":"PipelineDeletedException", + "exception":true + }, + { + "shape":"InternalServiceError", + "exception":true, + "fault":true + }, + { + "shape":"InvalidRequestException", + "exception":true + } + ] + }, + "RemoveTags":{ + "name":"RemoveTags", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RemoveTagsInput"}, + "output":{"shape":"RemoveTagsOutput"}, + "errors":[ + { + "shape":"InternalServiceError", + "exception":true, + "fault":true + }, + { + "shape":"InvalidRequestException", + "exception":true + }, + { + "shape":"PipelineNotFoundException", + "exception":true + }, + { + "shape":"PipelineDeletedException", + "exception":true + } + ] + }, + "ReportTaskProgress":{ + "name":"ReportTaskProgress", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ReportTaskProgressInput"}, + "output":{"shape":"ReportTaskProgressOutput"}, + "errors":[ + { + "shape":"InternalServiceError", + "exception":true, + "fault":true + }, + { + "shape":"InvalidRequestException", + "exception":true + }, + { + "shape":"TaskNotFoundException", + "exception":true + }, + { + "shape":"PipelineNotFoundException", + "exception":true + }, + { + "shape":"PipelineDeletedException", + "exception":true + } + ] + }, + "ReportTaskRunnerHeartbeat":{ + "name":"ReportTaskRunnerHeartbeat", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ReportTaskRunnerHeartbeatInput"}, + "output":{"shape":"ReportTaskRunnerHeartbeatOutput"}, + "errors":[ + { + "shape":"InternalServiceError", + "exception":true, + "fault":true + }, + { + "shape":"InvalidRequestException", + "exception":true + } + ] + }, + "SetStatus":{ + "name":"SetStatus", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"SetStatusInput"}, + "errors":[ + { + "shape":"PipelineNotFoundException", + "exception":true + }, + { + "shape":"PipelineDeletedException", + "exception":true + }, + { + "shape":"InternalServiceError", + "exception":true, + "fault":true + }, + { + "shape":"InvalidRequestException", + "exception":true + } + ] + }, + "SetTaskStatus":{ + "name":"SetTaskStatus", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"SetTaskStatusInput"}, + "output":{"shape":"SetTaskStatusOutput"}, + "errors":[ + { + "shape":"InternalServiceError", + "exception":true, + "fault":true + }, + { + "shape":"TaskNotFoundException", + "exception":true + }, + { + "shape":"InvalidRequestException", + "exception":true + }, + { + "shape":"PipelineNotFoundException", + "exception":true + }, + { + "shape":"PipelineDeletedException", + "exception":true + } + ] + }, + "ValidatePipelineDefinition":{ + "name":"ValidatePipelineDefinition", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ValidatePipelineDefinitionInput"}, + "output":{"shape":"ValidatePipelineDefinitionOutput"}, + "errors":[ + { + "shape":"InternalServiceError", + "exception":true, + "fault":true + }, + { + "shape":"InvalidRequestException", + "exception":true + }, + { + "shape":"PipelineNotFoundException", + "exception":true + }, + { + "shape":"PipelineDeletedException", + "exception":true + } + ] + } + }, + "shapes":{ + "ActivatePipelineInput":{ + "type":"structure", + "required":["pipelineId"], + "members":{ + "pipelineId":{"shape":"id"}, + "parameterValues":{"shape":"ParameterValueList"}, + "startTimestamp":{"shape":"timestamp"} + } + }, + "ActivatePipelineOutput":{ + "type":"structure", + "members":{ + } + }, + "AddTagsInput":{ + "type":"structure", + "required":[ + "pipelineId", + "tags" + ], + "members":{ + "pipelineId":{"shape":"id"}, + "tags":{"shape":"tagList"} + } + }, + "AddTagsOutput":{ + "type":"structure", + "members":{ + } + }, + "CreatePipelineInput":{ + "type":"structure", + "required":[ + "name", + "uniqueId" + ], + "members":{ + "name":{"shape":"id"}, + "uniqueId":{"shape":"id"}, + "description":{"shape":"string"}, + "tags":{"shape":"tagList"} + } + }, + "CreatePipelineOutput":{ + "type":"structure", + "required":["pipelineId"], + "members":{ + "pipelineId":{"shape":"id"} + } + }, + "DeactivatePipelineInput":{ + "type":"structure", + "required":["pipelineId"], + "members":{ + "pipelineId":{"shape":"id"}, + "cancelActive":{"shape":"cancelActive"} + } + }, + "DeactivatePipelineOutput":{ + "type":"structure", + "members":{ + } + }, + "DeletePipelineInput":{ + "type":"structure", + "required":["pipelineId"], + "members":{ + "pipelineId":{"shape":"id"} + } + }, + "DescribeObjectsInput":{ + "type":"structure", + "required":[ + "pipelineId", + "objectIds" + ], + "members":{ + "pipelineId":{"shape":"id"}, + "objectIds":{"shape":"idList"}, + "evaluateExpressions":{"shape":"boolean"}, + "marker":{"shape":"string"} + } + }, + "DescribeObjectsOutput":{ + "type":"structure", + "required":["pipelineObjects"], + "members":{ + "pipelineObjects":{"shape":"PipelineObjectList"}, + "marker":{"shape":"string"}, + "hasMoreResults":{"shape":"boolean"} + } + }, + "DescribePipelinesInput":{ + "type":"structure", + "required":["pipelineIds"], + "members":{ + "pipelineIds":{"shape":"idList"} + } + }, + "DescribePipelinesOutput":{ + "type":"structure", + "required":["pipelineDescriptionList"], + "members":{ + "pipelineDescriptionList":{"shape":"PipelineDescriptionList"} + } + }, + "EvaluateExpressionInput":{ + "type":"structure", + "required":[ + "pipelineId", + "objectId", + "expression" + ], + "members":{ + "pipelineId":{"shape":"id"}, + "objectId":{"shape":"id"}, + "expression":{"shape":"longString"} + } + }, + "EvaluateExpressionOutput":{ + "type":"structure", + "required":["evaluatedExpression"], + "members":{ + "evaluatedExpression":{"shape":"longString"} + } + }, + "Field":{ + "type":"structure", + "required":["key"], + "members":{ + "key":{"shape":"fieldNameString"}, + "stringValue":{"shape":"fieldStringValue"}, + "refValue":{"shape":"fieldNameString"} + } + }, + "GetPipelineDefinitionInput":{ + "type":"structure", + "required":["pipelineId"], + "members":{ + "pipelineId":{"shape":"id"}, + "version":{"shape":"string"} + } + }, + "GetPipelineDefinitionOutput":{ + "type":"structure", + "members":{ + "pipelineObjects":{"shape":"PipelineObjectList"}, + "parameterObjects":{"shape":"ParameterObjectList"}, + "parameterValues":{"shape":"ParameterValueList"} + } + }, + "InstanceIdentity":{ + "type":"structure", + "members":{ + "document":{"shape":"string"}, + "signature":{"shape":"string"} + } + }, + "InternalServiceError":{ + "type":"structure", + "members":{ + "message":{"shape":"errorMessage"} + }, + "exception":true, + "fault":true + }, + "InvalidRequestException":{ + "type":"structure", + "members":{ + "message":{"shape":"errorMessage"} + }, + "exception":true + }, + "ListPipelinesInput":{ + "type":"structure", + "members":{ + "marker":{"shape":"string"} + } + }, + "ListPipelinesOutput":{ + "type":"structure", + "required":["pipelineIdList"], + "members":{ + "pipelineIdList":{"shape":"pipelineList"}, + "marker":{"shape":"string"}, + "hasMoreResults":{"shape":"boolean"} + } + }, + "Operator":{ + "type":"structure", + "members":{ + "type":{"shape":"OperatorType"}, + "values":{"shape":"stringList"} + } + }, + "OperatorType":{ + "type":"string", + "enum":[ + "EQ", + "REF_EQ", + "LE", + "GE", + "BETWEEN" + ] + }, + "ParameterAttribute":{ + "type":"structure", + "required":[ + "key", + "stringValue" + ], + "members":{ + "key":{"shape":"attributeNameString"}, + "stringValue":{"shape":"attributeValueString"} + } + }, + "ParameterAttributeList":{ + "type":"list", + "member":{"shape":"ParameterAttribute"} + }, + "ParameterObject":{ + "type":"structure", + "required":[ + "id", + "attributes" + ], + "members":{ + "id":{"shape":"fieldNameString"}, + "attributes":{"shape":"ParameterAttributeList"} + } + }, + "ParameterObjectList":{ + "type":"list", + "member":{"shape":"ParameterObject"} + }, + "ParameterValue":{ + "type":"structure", + "required":[ + "id", + "stringValue" + ], + "members":{ + "id":{"shape":"fieldNameString"}, + "stringValue":{"shape":"fieldStringValue"} + } + }, + "ParameterValueList":{ + "type":"list", + "member":{"shape":"ParameterValue"} + }, + "PipelineDeletedException":{ + "type":"structure", + "members":{ + "message":{"shape":"errorMessage"} + }, + "exception":true + }, + "PipelineDescription":{ + "type":"structure", + "required":[ + "pipelineId", + "name", + "fields" + ], + "members":{ + "pipelineId":{"shape":"id"}, + "name":{"shape":"id"}, + "fields":{"shape":"fieldList"}, + "description":{"shape":"string"}, + "tags":{"shape":"tagList"} + } + }, + "PipelineDescriptionList":{ + "type":"list", + "member":{"shape":"PipelineDescription"} + }, + "PipelineIdName":{ + "type":"structure", + "members":{ + "id":{"shape":"id"}, + "name":{"shape":"id"} + } + }, + "PipelineNotFoundException":{ + "type":"structure", + "members":{ + "message":{"shape":"errorMessage"} + }, + "exception":true + }, + "PipelineObject":{ + "type":"structure", + "required":[ + "id", + "name", + "fields" + ], + "members":{ + "id":{"shape":"id"}, + "name":{"shape":"id"}, + "fields":{"shape":"fieldList"} + } + }, + "PipelineObjectList":{ + "type":"list", + "member":{"shape":"PipelineObject"} + }, + "PipelineObjectMap":{ + "type":"map", + "key":{"shape":"id"}, + "value":{"shape":"PipelineObject"} + }, + "PollForTaskInput":{ + "type":"structure", + "required":["workerGroup"], + "members":{ + "workerGroup":{"shape":"string"}, + "hostname":{"shape":"id"}, + "instanceIdentity":{"shape":"InstanceIdentity"} + } + }, + "PollForTaskOutput":{ + "type":"structure", + "members":{ + "taskObject":{"shape":"TaskObject"} + } + }, + "PutPipelineDefinitionInput":{ + "type":"structure", + "required":[ + "pipelineId", + "pipelineObjects" + ], + "members":{ + "pipelineId":{"shape":"id"}, + "pipelineObjects":{"shape":"PipelineObjectList"}, + "parameterObjects":{"shape":"ParameterObjectList"}, + "parameterValues":{"shape":"ParameterValueList"} + } + }, + "PutPipelineDefinitionOutput":{ + "type":"structure", + "required":["errored"], + "members":{ + "validationErrors":{"shape":"ValidationErrors"}, + "validationWarnings":{"shape":"ValidationWarnings"}, + "errored":{"shape":"boolean"} + } + }, + "Query":{ + "type":"structure", + "members":{ + "selectors":{"shape":"SelectorList"} + } + }, + "QueryObjectsInput":{ + "type":"structure", + "required":[ + "pipelineId", + "sphere" + ], + "members":{ + "pipelineId":{"shape":"id"}, + "query":{"shape":"Query"}, + "sphere":{"shape":"string"}, + "marker":{"shape":"string"}, + "limit":{"shape":"int"} + } + }, + "QueryObjectsOutput":{ + "type":"structure", + "members":{ + "ids":{"shape":"idList"}, + "marker":{"shape":"string"}, + "hasMoreResults":{"shape":"boolean"} + } + }, + "RemoveTagsInput":{ + "type":"structure", + "required":[ + "pipelineId", + "tagKeys" + ], + "members":{ + "pipelineId":{"shape":"id"}, + "tagKeys":{"shape":"stringList"} + } + }, + "RemoveTagsOutput":{ + "type":"structure", + "members":{ + } + }, + "ReportTaskProgressInput":{ + "type":"structure", + "required":["taskId"], + "members":{ + "taskId":{"shape":"taskId"}, + "fields":{"shape":"fieldList"} + } + }, + "ReportTaskProgressOutput":{ + "type":"structure", + "required":["canceled"], + "members":{ + "canceled":{"shape":"boolean"} + } + }, + "ReportTaskRunnerHeartbeatInput":{ + "type":"structure", + "required":["taskrunnerId"], + "members":{ + "taskrunnerId":{"shape":"id"}, + "workerGroup":{"shape":"string"}, + "hostname":{"shape":"id"} + } + }, + "ReportTaskRunnerHeartbeatOutput":{ + "type":"structure", + "required":["terminate"], + "members":{ + "terminate":{"shape":"boolean"} + } + }, + "Selector":{ + "type":"structure", + "members":{ + "fieldName":{"shape":"string"}, + "operator":{"shape":"Operator"} + } + }, + "SelectorList":{ + "type":"list", + "member":{"shape":"Selector"} + }, + "SetStatusInput":{ + "type":"structure", + "required":[ + "pipelineId", + "objectIds", + "status" + ], + "members":{ + "pipelineId":{"shape":"id"}, + "objectIds":{"shape":"idList"}, + "status":{"shape":"string"} + } + }, + "SetTaskStatusInput":{ + "type":"structure", + "required":[ + "taskId", + "taskStatus" + ], + "members":{ + "taskId":{"shape":"taskId"}, + "taskStatus":{"shape":"TaskStatus"}, + "errorId":{"shape":"string"}, + "errorMessage":{"shape":"errorMessage"}, + "errorStackTrace":{"shape":"string"} + } + }, + "SetTaskStatusOutput":{ + "type":"structure", + "members":{ + } + }, + "Tag":{ + "type":"structure", + "required":[ + "key", + "value" + ], + "members":{ + "key":{"shape":"tagKey"}, + "value":{"shape":"tagValue"} + } + }, + "TaskNotFoundException":{ + "type":"structure", + "members":{ + "message":{"shape":"errorMessage"} + }, + "exception":true + }, + "TaskObject":{ + "type":"structure", + "members":{ + "taskId":{"shape":"taskId"}, + "pipelineId":{"shape":"id"}, + "attemptId":{"shape":"id"}, + "objects":{"shape":"PipelineObjectMap"} + } + }, + "TaskStatus":{ + "type":"string", + "enum":[ + "FINISHED", + "FAILED", + "FALSE" + ] + }, + "ValidatePipelineDefinitionInput":{ + "type":"structure", + "required":[ + "pipelineId", + "pipelineObjects" + ], + "members":{ + "pipelineId":{"shape":"id"}, + "pipelineObjects":{"shape":"PipelineObjectList"}, + "parameterObjects":{"shape":"ParameterObjectList"}, + "parameterValues":{"shape":"ParameterValueList"} + } + }, + "ValidatePipelineDefinitionOutput":{ + "type":"structure", + "required":["errored"], + "members":{ + "validationErrors":{"shape":"ValidationErrors"}, + "validationWarnings":{"shape":"ValidationWarnings"}, + "errored":{"shape":"boolean"} + } + }, + "ValidationError":{ + "type":"structure", + "members":{ + "id":{"shape":"id"}, + "errors":{"shape":"validationMessages"} + } + }, + "ValidationErrors":{ + "type":"list", + "member":{"shape":"ValidationError"} + }, + "ValidationWarning":{ + "type":"structure", + "members":{ + "id":{"shape":"id"}, + "warnings":{"shape":"validationMessages"} + } + }, + "ValidationWarnings":{ + "type":"list", + "member":{"shape":"ValidationWarning"} + }, + "attributeNameString":{ + "type":"string", + "min":1, + "max":256, + "pattern":"[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\r\\n\\t]*" + }, + "attributeValueString":{ + "type":"string", + "min":0, + "max":10240, + "pattern":"[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\r\\n\\t]*" + }, + "boolean":{"type":"boolean"}, + "cancelActive":{"type":"boolean"}, + "errorMessage":{"type":"string"}, + "fieldList":{ + "type":"list", + "member":{"shape":"Field"} + }, + "fieldNameString":{ + "type":"string", + "min":1, + "max":256, + "pattern":"[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\r\\n\\t]*" + }, + "fieldStringValue":{ + "type":"string", + "min":0, + "max":10240, + "pattern":"[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\r\\n\\t]*" + }, + "id":{ + "type":"string", + "min":1, + "max":1024, + "pattern":"[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\r\\n\\t]*" + }, + "idList":{ + "type":"list", + "member":{"shape":"id"} + }, + "int":{"type":"integer"}, + "longString":{ + "type":"string", + "min":0, + "max":20971520, + "pattern":"[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\r\\n\\t]*" + }, + "pipelineList":{ + "type":"list", + "member":{"shape":"PipelineIdName"} + }, + "string":{ + "type":"string", + "min":0, + "max":1024, + "pattern":"[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\r\\n\\t]*" + }, + "stringList":{ + "type":"list", + "member":{"shape":"string"} + }, + "tagKey":{ + "type":"string", + "min":1, + "max":128 + }, + "tagList":{ + "type":"list", + "member":{"shape":"Tag"}, + "min":0, + "max":10 + }, + "tagValue":{ + "type":"string", + "min":0, + "max":256 + }, + "taskId":{ + "type":"string", + "min":1, + "max":2048, + "pattern":"[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\r\\n\\t]*" + }, + "timestamp":{"type":"timestamp"}, + "validationMessage":{ + "type":"string", + "min":0, + "max":10000, + "pattern":"[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\r\\n\\t]*" + }, + "validationMessages":{ + "type":"list", + "member":{"shape":"validationMessage"} + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/datapipeline/2012-10-29/docs-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/datapipeline/2012-10-29/docs-2.json new file mode 100644 index 000000000..7675f1057 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/datapipeline/2012-10-29/docs-2.json @@ -0,0 +1,607 @@ +{ + "version": "2.0", + "operations": { + "ActivatePipeline": "

    Validates the specified pipeline and starts processing pipeline tasks. If the pipeline does not pass validation, activation fails.

    If you need to pause the pipeline to investigate an issue with a component, such as a data source or script, call DeactivatePipeline.

    To activate a finished pipeline, modify the end date for the pipeline and then activate it.

    ", + "AddTags": "

    Adds or modifies tags for the specified pipeline.

    ", + "CreatePipeline": "

    Creates a new, empty pipeline. Use PutPipelineDefinition to populate the pipeline.

    ", + "DeactivatePipeline": "

    Deactivates the specified running pipeline. The pipeline is set to the DEACTIVATING state until the deactivation process completes.

    To resume a deactivated pipeline, use ActivatePipeline. By default, the pipeline resumes from the last completed execution. Optionally, you can specify the date and time to resume the pipeline.

    ", + "DeletePipeline": "

    Deletes a pipeline, its pipeline definition, and its run history. AWS Data Pipeline attempts to cancel instances associated with the pipeline that are currently being processed by task runners.

    Deleting a pipeline cannot be undone. You cannot query or restore a deleted pipeline. To temporarily pause a pipeline instead of deleting it, call SetStatus with the status set to PAUSE on individual components. Components that are paused by SetStatus can be resumed.

    ", + "DescribeObjects": "

    Gets the object definitions for a set of objects associated with the pipeline. Object definitions are composed of a set of fields that define the properties of the object.

    ", + "DescribePipelines": "

    Retrieves metadata about one or more pipelines. The information retrieved includes the name of the pipeline, the pipeline identifier, its current state, and the user account that owns the pipeline. Using account credentials, you can retrieve metadata about pipelines that you or your IAM users have created. If you are using an IAM user account, you can retrieve metadata about only those pipelines for which you have read permissions.

    To retrieve the full pipeline definition instead of metadata about the pipeline, call GetPipelineDefinition.

    ", + "EvaluateExpression": "

    Task runners call EvaluateExpression to evaluate a string in the context of the specified object. For example, a task runner can evaluate SQL queries stored in Amazon S3.

    ", + "GetPipelineDefinition": "

    Gets the definition of the specified pipeline. You can call GetPipelineDefinition to retrieve the pipeline definition that you provided using PutPipelineDefinition.

    ", + "ListPipelines": "

    Lists the pipeline identifiers for all active pipelines that you have permission to access.

    ", + "PollForTask": "

    Task runners call PollForTask to receive a task to perform from AWS Data Pipeline. The task runner specifies which tasks it can perform by setting a value for the workerGroup parameter. The task returned can come from any of the pipelines that match the workerGroup value passed in by the task runner and that was launched using the IAM user credentials specified by the task runner.

    If tasks are ready in the work queue, PollForTask returns a response immediately. If no tasks are available in the queue, PollForTask uses long-polling and holds on to a poll connection for up to a 90 seconds, during which time the first newly scheduled task is handed to the task runner. To accomodate this, set the socket timeout in your task runner to 90 seconds. The task runner should not call PollForTask again on the same workerGroup until it receives a response, and this can take up to 90 seconds.

    ", + "PutPipelineDefinition": "

    Adds tasks, schedules, and preconditions to the specified pipeline. You can use PutPipelineDefinition to populate a new pipeline.

    PutPipelineDefinition also validates the configuration as it adds it to the pipeline. Changes to the pipeline are saved unless one of the following three validation errors exists in the pipeline.

    1. An object is missing a name or identifier field.
    2. A string or reference field is empty.
    3. The number of objects in the pipeline exceeds the maximum allowed objects.
    4. The pipeline is in a FINISHED state.

    Pipeline object definitions are passed to the PutPipelineDefinition action and returned by the GetPipelineDefinition action.

    ", + "QueryObjects": "

    Queries the specified pipeline for the names of objects that match the specified set of conditions.

    ", + "RemoveTags": "

    Removes existing tags from the specified pipeline.

    ", + "ReportTaskProgress": "

    Task runners call ReportTaskProgress when assigned a task to acknowledge that it has the task. If the web service does not receive this acknowledgement within 2 minutes, it assigns the task in a subsequent PollForTask call. After this initial acknowledgement, the task runner only needs to report progress every 15 minutes to maintain its ownership of the task. You can change this reporting time from 15 minutes by specifying a reportProgressTimeout field in your pipeline.

    If a task runner does not report its status after 5 minutes, AWS Data Pipeline assumes that the task runner is unable to process the task and reassigns the task in a subsequent response to PollForTask. Task runners should call ReportTaskProgress every 60 seconds.

    ", + "ReportTaskRunnerHeartbeat": "

    Task runners call ReportTaskRunnerHeartbeat every 15 minutes to indicate that they are operational. If the AWS Data Pipeline Task Runner is launched on a resource managed by AWS Data Pipeline, the web service can use this call to detect when the task runner application has failed and restart a new instance.

    ", + "SetStatus": "

    Requests that the status of the specified physical or logical pipeline objects be updated in the specified pipeline. This update might not occur immediately, but is eventually consistent. The status that can be set depends on the type of object (for example, DataNode or Activity). You cannot perform this operation on FINISHED pipelines and attempting to do so returns InvalidRequestException.

    ", + "SetTaskStatus": "

    Task runners call SetTaskStatus to notify AWS Data Pipeline that a task is completed and provide information about the final status. A task runner makes this call regardless of whether the task was sucessful. A task runner does not need to call SetTaskStatus for tasks that are canceled by the web service during a call to ReportTaskProgress.

    ", + "ValidatePipelineDefinition": "

    Validates the specified pipeline definition to ensure that it is well formed and can be run without error.

    " + }, + "service": "

    AWS Data Pipeline configures and manages a data-driven workflow called a pipeline. AWS Data Pipeline handles the details of scheduling and ensuring that data dependencies are met so that your application can focus on processing the data.

    AWS Data Pipeline provides a JAR implementation of a task runner called AWS Data Pipeline Task Runner. AWS Data Pipeline Task Runner provides logic for common data management scenarios, such as performing database queries and running data analysis using Amazon Elastic MapReduce (Amazon EMR). You can use AWS Data Pipeline Task Runner as your task runner, or you can write your own task runner to provide custom data management.

    AWS Data Pipeline implements two main sets of functionality. Use the first set to create a pipeline and define data sources, schedules, dependencies, and the transforms to be performed on the data. Use the second set in your task runner application to receive the next task ready for processing. The logic for performing the task, such as querying the data, running data analysis, or converting the data from one format to another, is contained within the task runner. The task runner performs the task assigned to it by the web service, reporting progress to the web service as it does so. When the task is done, the task runner reports the final success or failure of the task to the web service.

    ", + "shapes": { + "ActivatePipelineInput": { + "base": "

    Contains the parameters for ActivatePipeline.

    ", + "refs": { + } + }, + "ActivatePipelineOutput": { + "base": "

    Contains the output of ActivatePipeline.

    ", + "refs": { + } + }, + "AddTagsInput": { + "base": "

    Contains the parameters for AddTags.

    ", + "refs": { + } + }, + "AddTagsOutput": { + "base": "

    Contains the output of AddTags.

    ", + "refs": { + } + }, + "CreatePipelineInput": { + "base": "

    Contains the parameters for CreatePipeline.

    ", + "refs": { + } + }, + "CreatePipelineOutput": { + "base": "

    Contains the output of CreatePipeline.

    ", + "refs": { + } + }, + "DeactivatePipelineInput": { + "base": "

    Contains the parameters for DeactivatePipeline.

    ", + "refs": { + } + }, + "DeactivatePipelineOutput": { + "base": "

    Contains the output of DeactivatePipeline.

    ", + "refs": { + } + }, + "DeletePipelineInput": { + "base": "

    Contains the parameters for DeletePipeline.

    ", + "refs": { + } + }, + "DescribeObjectsInput": { + "base": "

    Contains the parameters for DescribeObjects.

    ", + "refs": { + } + }, + "DescribeObjectsOutput": { + "base": "

    Contains the output of DescribeObjects.

    ", + "refs": { + } + }, + "DescribePipelinesInput": { + "base": "

    Contains the parameters for DescribePipelines.

    ", + "refs": { + } + }, + "DescribePipelinesOutput": { + "base": "

    Contains the output of DescribePipelines.

    ", + "refs": { + } + }, + "EvaluateExpressionInput": { + "base": "

    Contains the parameters for EvaluateExpression.

    ", + "refs": { + } + }, + "EvaluateExpressionOutput": { + "base": "

    Contains the output of EvaluateExpression.

    ", + "refs": { + } + }, + "Field": { + "base": "

    A key-value pair that describes a property of a pipeline object. The value is specified as either a string value (StringValue) or a reference to another object (RefValue) but not as both.

    ", + "refs": { + "fieldList$member": null + } + }, + "GetPipelineDefinitionInput": { + "base": "

    Contains the parameters for GetPipelineDefinition.

    ", + "refs": { + } + }, + "GetPipelineDefinitionOutput": { + "base": "

    Contains the output of GetPipelineDefinition.

    ", + "refs": { + } + }, + "InstanceIdentity": { + "base": "

    Identity information for the EC2 instance that is hosting the task runner. You can get this value by calling a metadata URI from the EC2 instance. For more information, see Instance Metadata in the Amazon Elastic Compute Cloud User Guide. Passing in this value proves that your task runner is running on an EC2 instance, and ensures the proper AWS Data Pipeline service charges are applied to your pipeline.

    ", + "refs": { + "PollForTaskInput$instanceIdentity": "

    Identity information for the EC2 instance that is hosting the task runner. You can get this value from the instance using http://169.254.169.254/latest/meta-data/instance-id. For more information, see Instance Metadata in the Amazon Elastic Compute Cloud User Guide. Passing in this value proves that your task runner is running on an EC2 instance, and ensures the proper AWS Data Pipeline service charges are applied to your pipeline.

    " + } + }, + "InternalServiceError": { + "base": "

    An internal service error occurred.

    ", + "refs": { + } + }, + "InvalidRequestException": { + "base": "

    The request was not valid. Verify that your request was properly formatted, that the signature was generated with the correct credentials, and that you haven't exceeded any of the service limits for your account.

    ", + "refs": { + } + }, + "ListPipelinesInput": { + "base": "

    Contains the parameters for ListPipelines.

    ", + "refs": { + } + }, + "ListPipelinesOutput": { + "base": "

    Contains the output of ListPipelines.

    ", + "refs": { + } + }, + "Operator": { + "base": "

    Contains a logical operation for comparing the value of a field with a specified value.

    ", + "refs": { + "Selector$operator": null + } + }, + "OperatorType": { + "base": null, + "refs": { + "Operator$type": "

    The logical operation to be performed: equal (EQ), equal reference (REF_EQ), less than or equal (LE), greater than or equal (GE), or between (BETWEEN). Equal reference (REF_EQ) can be used only with reference fields. The other comparison types can be used only with String fields. The comparison types you can use apply only to certain object fields, as detailed below.

    The comparison operators EQ and REF_EQ act on the following fields:

    • name
    • @sphere
    • parent
    • @componentParent
    • @instanceParent
    • @status
    • @scheduledStartTime
    • @scheduledEndTime
    • @actualStartTime
    • @actualEndTime

    The comparison operators GE, LE, and BETWEEN act on the following fields:

    • @scheduledStartTime
    • @scheduledEndTime
    • @actualStartTime
    • @actualEndTime

    Note that fields beginning with the at sign (@) are read-only and set by the web service. When you name fields, you should choose names containing only alpha-numeric values, as symbols may be reserved by AWS Data Pipeline. User-defined fields that you add to a pipeline should prefix their name with the string \"my\".

    " + } + }, + "ParameterAttribute": { + "base": "

    The attributes allowed or specified with a parameter object.

    ", + "refs": { + "ParameterAttributeList$member": null + } + }, + "ParameterAttributeList": { + "base": null, + "refs": { + "ParameterObject$attributes": "

    The attributes of the parameter object.

    " + } + }, + "ParameterObject": { + "base": "

    Contains information about a parameter object.

    ", + "refs": { + "ParameterObjectList$member": null + } + }, + "ParameterObjectList": { + "base": null, + "refs": { + "GetPipelineDefinitionOutput$parameterObjects": "

    The parameter objects used in the pipeline definition.

    ", + "PutPipelineDefinitionInput$parameterObjects": "

    The parameter objects used with the pipeline.

    ", + "ValidatePipelineDefinitionInput$parameterObjects": "

    The parameter objects used with the pipeline.

    " + } + }, + "ParameterValue": { + "base": "

    A value or list of parameter values.

    ", + "refs": { + "ParameterValueList$member": null + } + }, + "ParameterValueList": { + "base": null, + "refs": { + "ActivatePipelineInput$parameterValues": "

    A list of parameter values to pass to the pipeline at activation.

    ", + "GetPipelineDefinitionOutput$parameterValues": "

    The parameter values used in the pipeline definition.

    ", + "PutPipelineDefinitionInput$parameterValues": "

    The parameter values used with the pipeline.

    ", + "ValidatePipelineDefinitionInput$parameterValues": "

    The parameter values used with the pipeline.

    " + } + }, + "PipelineDeletedException": { + "base": "

    The specified pipeline has been deleted.

    ", + "refs": { + } + }, + "PipelineDescription": { + "base": "

    Contains pipeline metadata.

    ", + "refs": { + "PipelineDescriptionList$member": null + } + }, + "PipelineDescriptionList": { + "base": null, + "refs": { + "DescribePipelinesOutput$pipelineDescriptionList": "

    An array of descriptions for the specified pipelines.

    " + } + }, + "PipelineIdName": { + "base": "

    Contains the name and identifier of a pipeline.

    ", + "refs": { + "pipelineList$member": null + } + }, + "PipelineNotFoundException": { + "base": "

    The specified pipeline was not found. Verify that you used the correct user and account identifiers.

    ", + "refs": { + } + }, + "PipelineObject": { + "base": "

    Contains information about a pipeline object. This can be a logical, physical, or physical attempt pipeline object. The complete set of components of a pipeline defines the pipeline.

    ", + "refs": { + "PipelineObjectList$member": null, + "PipelineObjectMap$value": null + } + }, + "PipelineObjectList": { + "base": null, + "refs": { + "DescribeObjectsOutput$pipelineObjects": "

    An array of object definitions.

    ", + "GetPipelineDefinitionOutput$pipelineObjects": "

    The objects defined in the pipeline.

    ", + "PutPipelineDefinitionInput$pipelineObjects": "

    The objects that define the pipeline. These objects overwrite the existing pipeline definition.

    ", + "ValidatePipelineDefinitionInput$pipelineObjects": "

    The objects that define the pipeline changes to validate against the pipeline.

    " + } + }, + "PipelineObjectMap": { + "base": null, + "refs": { + "TaskObject$objects": "

    Connection information for the location where the task runner will publish the output of the task.

    " + } + }, + "PollForTaskInput": { + "base": "

    Contains the parameters for PollForTask.

    ", + "refs": { + } + }, + "PollForTaskOutput": { + "base": "

    Contains the output of PollForTask.

    ", + "refs": { + } + }, + "PutPipelineDefinitionInput": { + "base": "

    Contains the parameters for PutPipelineDefinition.

    ", + "refs": { + } + }, + "PutPipelineDefinitionOutput": { + "base": "

    Contains the output of PutPipelineDefinition.

    ", + "refs": { + } + }, + "Query": { + "base": "

    Defines the query to run against an object.

    ", + "refs": { + "QueryObjectsInput$query": "

    The query that defines the objects to be returned. The Query object can contain a maximum of ten selectors. The conditions in the query are limited to top-level String fields in the object. These filters can be applied to components, instances, and attempts.

    " + } + }, + "QueryObjectsInput": { + "base": "

    Contains the parameters for QueryObjects.

    ", + "refs": { + } + }, + "QueryObjectsOutput": { + "base": "

    Contains the output of QueryObjects.

    ", + "refs": { + } + }, + "RemoveTagsInput": { + "base": "

    Contains the parameters for RemoveTags.

    ", + "refs": { + } + }, + "RemoveTagsOutput": { + "base": "

    Contains the output of RemoveTags.

    ", + "refs": { + } + }, + "ReportTaskProgressInput": { + "base": "

    Contains the parameters for ReportTaskProgress.

    ", + "refs": { + } + }, + "ReportTaskProgressOutput": { + "base": "

    Contains the output of ReportTaskProgress.

    ", + "refs": { + } + }, + "ReportTaskRunnerHeartbeatInput": { + "base": "

    Contains the parameters for ReportTaskRunnerHeartbeat.

    ", + "refs": { + } + }, + "ReportTaskRunnerHeartbeatOutput": { + "base": "

    Contains the output of ReportTaskRunnerHeartbeat.

    ", + "refs": { + } + }, + "Selector": { + "base": "

    A comparision that is used to determine whether a query should return this object.

    ", + "refs": { + "SelectorList$member": null + } + }, + "SelectorList": { + "base": "

    The list of Selectors that define queries on individual fields.

    ", + "refs": { + "Query$selectors": "

    List of selectors that define the query. An object must satisfy all of the selectors to match the query.

    " + } + }, + "SetStatusInput": { + "base": "

    Contains the parameters for SetStatus.

    ", + "refs": { + } + }, + "SetTaskStatusInput": { + "base": "

    Contains the parameters for SetTaskStatus.

    ", + "refs": { + } + }, + "SetTaskStatusOutput": { + "base": "

    Contains the output of SetTaskStatus.

    ", + "refs": { + } + }, + "Tag": { + "base": "

    Tags are key/value pairs defined by a user and associated with a pipeline to control access. AWS Data Pipeline allows you to associate ten tags per pipeline. For more information, see Controlling User Access to Pipelines in the AWS Data Pipeline Developer Guide.

    ", + "refs": { + "tagList$member": null + } + }, + "TaskNotFoundException": { + "base": "

    The specified task was not found.

    ", + "refs": { + } + }, + "TaskObject": { + "base": "

    Contains information about a pipeline task that is assigned to a task runner.

    ", + "refs": { + "PollForTaskOutput$taskObject": "

    The information needed to complete the task that is being assigned to the task runner. One of the fields returned in this object is taskId, which contains an identifier for the task being assigned. The calling task runner uses taskId in subsequent calls to ReportTaskProgress and SetTaskStatus.

    " + } + }, + "TaskStatus": { + "base": null, + "refs": { + "SetTaskStatusInput$taskStatus": "

    If FINISHED, the task successfully completed. If FAILED, the task ended unsuccessfully. Preconditions use false.

    " + } + }, + "ValidatePipelineDefinitionInput": { + "base": "

    Contains the parameters for ValidatePipelineDefinition.

    ", + "refs": { + } + }, + "ValidatePipelineDefinitionOutput": { + "base": "

    Contains the output of ValidatePipelineDefinition.

    ", + "refs": { + } + }, + "ValidationError": { + "base": "

    Defines a validation error. Validation errors prevent pipeline activation. The set of validation errors that can be returned are defined by AWS Data Pipeline.

    ", + "refs": { + "ValidationErrors$member": null + } + }, + "ValidationErrors": { + "base": null, + "refs": { + "PutPipelineDefinitionOutput$validationErrors": "

    The validation errors that are associated with the objects defined in pipelineObjects.

    ", + "ValidatePipelineDefinitionOutput$validationErrors": "

    Any validation errors that were found.

    " + } + }, + "ValidationWarning": { + "base": "

    Defines a validation warning. Validation warnings do not prevent pipeline activation. The set of validation warnings that can be returned are defined by AWS Data Pipeline.

    ", + "refs": { + "ValidationWarnings$member": null + } + }, + "ValidationWarnings": { + "base": null, + "refs": { + "PutPipelineDefinitionOutput$validationWarnings": "

    The validation warnings that are associated with the objects defined in pipelineObjects.

    ", + "ValidatePipelineDefinitionOutput$validationWarnings": "

    Any validation warnings that were found.

    " + } + }, + "attributeNameString": { + "base": null, + "refs": { + "ParameterAttribute$key": "

    The field identifier.

    " + } + }, + "attributeValueString": { + "base": null, + "refs": { + "ParameterAttribute$stringValue": "

    The field value, expressed as a String.

    " + } + }, + "boolean": { + "base": null, + "refs": { + "DescribeObjectsInput$evaluateExpressions": "

    Indicates whether any expressions in the object should be evaluated when the object descriptions are returned.

    ", + "DescribeObjectsOutput$hasMoreResults": "

    Indicates whether there are more results to return.

    ", + "ListPipelinesOutput$hasMoreResults": "

    Indicates whether there are more results that can be obtained by a subsequent call.

    ", + "PutPipelineDefinitionOutput$errored": "

    Indicates whether there were validation errors, and the pipeline definition is stored but cannot be activated until you correct the pipeline and call PutPipelineDefinition to commit the corrected pipeline.

    ", + "QueryObjectsOutput$hasMoreResults": "

    Indicates whether there are more results that can be obtained by a subsequent call.

    ", + "ReportTaskProgressOutput$canceled": "

    If true, the calling task runner should cancel processing of the task. The task runner does not need to call SetTaskStatus for canceled tasks.

    ", + "ReportTaskRunnerHeartbeatOutput$terminate": "

    Indicates whether the calling task runner should terminate.

    ", + "ValidatePipelineDefinitionOutput$errored": "

    Indicates whether there were validation errors.

    " + } + }, + "cancelActive": { + "base": null, + "refs": { + "DeactivatePipelineInput$cancelActive": "

    Indicates whether to cancel any running objects. The default is true, which sets the state of any running objects to CANCELED. If this value is false, the pipeline is deactivated after all running objects finish.

    " + } + }, + "errorMessage": { + "base": null, + "refs": { + "InternalServiceError$message": "

    Description of the error message.

    ", + "InvalidRequestException$message": "

    Description of the error message.

    ", + "PipelineDeletedException$message": "

    Description of the error message.

    ", + "PipelineNotFoundException$message": "

    Description of the error message.

    ", + "SetTaskStatusInput$errorMessage": "

    If an error occurred during the task, this value specifies a text description of the error. This value is set on the physical attempt object. It is used to display error information to the user. The web service does not parse this value.

    ", + "TaskNotFoundException$message": "

    Description of the error message.

    " + } + }, + "fieldList": { + "base": null, + "refs": { + "PipelineDescription$fields": "

    A list of read-only fields that contain metadata about the pipeline: @userId, @accountId, and @pipelineState.

    ", + "PipelineObject$fields": "

    Key-value pairs that define the properties of the object.

    ", + "ReportTaskProgressInput$fields": "

    Key-value pairs that define the properties of the ReportTaskProgressInput object.

    " + } + }, + "fieldNameString": { + "base": null, + "refs": { + "Field$key": "

    The field identifier.

    ", + "Field$refValue": "

    The field value, expressed as the identifier of another object.

    ", + "ParameterObject$id": "

    The ID of the parameter object.

    ", + "ParameterValue$id": "

    The ID of the parameter value.

    " + } + }, + "fieldStringValue": { + "base": null, + "refs": { + "Field$stringValue": "

    The field value, expressed as a String.

    ", + "ParameterValue$stringValue": "

    The field value, expressed as a String.

    " + } + }, + "id": { + "base": null, + "refs": { + "ActivatePipelineInput$pipelineId": "

    The ID of the pipeline.

    ", + "AddTagsInput$pipelineId": "

    The ID of the pipeline.

    ", + "CreatePipelineInput$name": "

    The name for the pipeline. You can use the same name for multiple pipelines associated with your AWS account, because AWS Data Pipeline assigns each pipeline a unique pipeline identifier.

    ", + "CreatePipelineInput$uniqueId": "

    A unique identifier. This identifier is not the same as the pipeline identifier assigned by AWS Data Pipeline. You are responsible for defining the format and ensuring the uniqueness of this identifier. You use this parameter to ensure idempotency during repeated calls to CreatePipeline. For example, if the first call to CreatePipeline does not succeed, you can pass in the same unique identifier and pipeline name combination on a subsequent call to CreatePipeline. CreatePipeline ensures that if a pipeline already exists with the same name and unique identifier, a new pipeline is not created. Instead, you'll receive the pipeline identifier from the previous attempt. The uniqueness of the name and unique identifier combination is scoped to the AWS account or IAM user credentials.

    ", + "CreatePipelineOutput$pipelineId": "

    The ID that AWS Data Pipeline assigns the newly created pipeline. For example, df-06372391ZG65EXAMPLE.

    ", + "DeactivatePipelineInput$pipelineId": "

    The ID of the pipeline.

    ", + "DeletePipelineInput$pipelineId": "

    The ID of the pipeline.

    ", + "DescribeObjectsInput$pipelineId": "

    The ID of the pipeline that contains the object definitions.

    ", + "EvaluateExpressionInput$pipelineId": "

    The ID of the pipeline.

    ", + "EvaluateExpressionInput$objectId": "

    The ID of the object.

    ", + "GetPipelineDefinitionInput$pipelineId": "

    The ID of the pipeline.

    ", + "PipelineDescription$pipelineId": "

    The pipeline identifier that was assigned by AWS Data Pipeline. This is a string of the form df-297EG78HU43EEXAMPLE.

    ", + "PipelineDescription$name": "

    The name of the pipeline.

    ", + "PipelineIdName$id": "

    The ID of the pipeline that was assigned by AWS Data Pipeline. This is a string of the form df-297EG78HU43EEXAMPLE.

    ", + "PipelineIdName$name": "

    The name of the pipeline.

    ", + "PipelineObject$id": "

    The ID of the object.

    ", + "PipelineObject$name": "

    The name of the object.

    ", + "PipelineObjectMap$key": null, + "PollForTaskInput$hostname": "

    The public DNS name of the calling task runner.

    ", + "PutPipelineDefinitionInput$pipelineId": "

    The ID of the pipeline.

    ", + "QueryObjectsInput$pipelineId": "

    The ID of the pipeline.

    ", + "RemoveTagsInput$pipelineId": "

    The ID of the pipeline.

    ", + "ReportTaskRunnerHeartbeatInput$taskrunnerId": "

    The ID of the task runner. This value should be unique across your AWS account. In the case of AWS Data Pipeline Task Runner launched on a resource managed by AWS Data Pipeline, the web service provides a unique identifier when it launches the application. If you have written a custom task runner, you should assign a unique identifier for the task runner.

    ", + "ReportTaskRunnerHeartbeatInput$hostname": "

    The public DNS name of the task runner.

    ", + "SetStatusInput$pipelineId": "

    The ID of the pipeline that contains the objects.

    ", + "TaskObject$pipelineId": "

    The ID of the pipeline that provided the task.

    ", + "TaskObject$attemptId": "

    The ID of the pipeline task attempt object. AWS Data Pipeline uses this value to track how many times a task is attempted.

    ", + "ValidatePipelineDefinitionInput$pipelineId": "

    The ID of the pipeline.

    ", + "ValidationError$id": "

    The identifier of the object that contains the validation error.

    ", + "ValidationWarning$id": "

    The identifier of the object that contains the validation warning.

    ", + "idList$member": null + } + }, + "idList": { + "base": null, + "refs": { + "DescribeObjectsInput$objectIds": "

    The IDs of the pipeline objects that contain the definitions to be described. You can pass as many as 25 identifiers in a single call to DescribeObjects.

    ", + "DescribePipelinesInput$pipelineIds": "

    The IDs of the pipelines to describe. You can pass as many as 25 identifiers in a single call. To obtain pipeline IDs, call ListPipelines.

    ", + "QueryObjectsOutput$ids": "

    The identifiers that match the query selectors.

    ", + "SetStatusInput$objectIds": "

    The IDs of the objects. The corresponding objects can be either physical or components, but not a mix of both types.

    " + } + }, + "int": { + "base": null, + "refs": { + "QueryObjectsInput$limit": "

    The maximum number of object names that QueryObjects will return in a single call. The default value is 100.

    " + } + }, + "longString": { + "base": null, + "refs": { + "EvaluateExpressionInput$expression": "

    The expression to evaluate.

    ", + "EvaluateExpressionOutput$evaluatedExpression": "

    The evaluated expression.

    " + } + }, + "pipelineList": { + "base": null, + "refs": { + "ListPipelinesOutput$pipelineIdList": "

    The pipeline identifiers. If you require additional information about the pipelines, you can use these identifiers to call DescribePipelines and GetPipelineDefinition.

    " + } + }, + "string": { + "base": null, + "refs": { + "CreatePipelineInput$description": "

    The description for the pipeline.

    ", + "DescribeObjectsInput$marker": "

    The starting point for the results to be returned. For the first call, this value should be empty. As long as there are more results, continue to call DescribeObjects with the marker value from the previous call to retrieve the next set of results.

    ", + "DescribeObjectsOutput$marker": "

    The starting point for the next page of results. To view the next page of results, call DescribeObjects again with this marker value. If the value is null, there are no more results.

    ", + "GetPipelineDefinitionInput$version": "

    The version of the pipeline definition to retrieve. Set this parameter to latest (default) to use the last definition saved to the pipeline or active to use the last definition that was activated.

    ", + "InstanceIdentity$document": "

    A description of an EC2 instance that is generated when the instance is launched and exposed to the instance via the instance metadata service in the form of a JSON representation of an object.

    ", + "InstanceIdentity$signature": "

    A signature which can be used to verify the accuracy and authenticity of the information provided in the instance identity document.

    ", + "ListPipelinesInput$marker": "

    The starting point for the results to be returned. For the first call, this value should be empty. As long as there are more results, continue to call ListPipelines with the marker value from the previous call to retrieve the next set of results.

    ", + "ListPipelinesOutput$marker": "

    The starting point for the next page of results. To view the next page of results, call ListPipelinesOutput again with this marker value. If the value is null, there are no more results.

    ", + "PipelineDescription$description": "

    Description of the pipeline.

    ", + "PollForTaskInput$workerGroup": "

    The type of task the task runner is configured to accept and process. The worker group is set as a field on objects in the pipeline when they are created. You can only specify a single value for workerGroup in the call to PollForTask. There are no wildcard values permitted in workerGroup; the string must be an exact, case-sensitive, match.

    ", + "QueryObjectsInput$sphere": "

    Indicates whether the query applies to components or instances. The possible values are: COMPONENT, INSTANCE, and ATTEMPT.

    ", + "QueryObjectsInput$marker": "

    The starting point for the results to be returned. For the first call, this value should be empty. As long as there are more results, continue to call QueryObjects with the marker value from the previous call to retrieve the next set of results.

    ", + "QueryObjectsOutput$marker": "

    The starting point for the next page of results. To view the next page of results, call QueryObjects again with this marker value. If the value is null, there are no more results.

    ", + "ReportTaskRunnerHeartbeatInput$workerGroup": "

    The type of task the task runner is configured to accept and process. The worker group is set as a field on objects in the pipeline when they are created. You can only specify a single value for workerGroup. There are no wildcard values permitted in workerGroup; the string must be an exact, case-sensitive, match.

    ", + "Selector$fieldName": "

    The name of the field that the operator will be applied to. The field name is the \"key\" portion of the field definition in the pipeline definition syntax that is used by the AWS Data Pipeline API. If the field is not set on the object, the condition fails.

    ", + "SetStatusInput$status": "

    The status to be set on all the objects specified in objectIds. For components, use PAUSE or RESUME. For instances, use TRY_CANCEL, RERUN, or MARK_FINISHED.

    ", + "SetTaskStatusInput$errorId": "

    If an error occurred during the task, this value specifies the error code. This value is set on the physical attempt object. It is used to display error information to the user. It should not start with string \"Service_\" which is reserved by the system.

    ", + "SetTaskStatusInput$errorStackTrace": "

    If an error occurred during the task, this value specifies the stack trace associated with the error. This value is set on the physical attempt object. It is used to display error information to the user. The web service does not parse this value.

    ", + "stringList$member": null + } + }, + "stringList": { + "base": null, + "refs": { + "Operator$values": "

    The value that the actual field value will be compared with.

    ", + "RemoveTagsInput$tagKeys": "

    The keys of the tags to remove.

    " + } + }, + "tagKey": { + "base": null, + "refs": { + "Tag$key": "

    The key name of a tag defined by a user. For more information, see Controlling User Access to Pipelines in the AWS Data Pipeline Developer Guide.

    " + } + }, + "tagList": { + "base": null, + "refs": { + "AddTagsInput$tags": "

    The tags to add, as key/value pairs.

    ", + "CreatePipelineInput$tags": "

    A list of tags to associate with the pipeline at creation. Tags let you control access to pipelines. For more information, see Controlling User Access to Pipelines in the AWS Data Pipeline Developer Guide.

    ", + "PipelineDescription$tags": "

    A list of tags to associated with a pipeline. Tags let you control access to pipelines. For more information, see Controlling User Access to Pipelines in the AWS Data Pipeline Developer Guide.

    " + } + }, + "tagValue": { + "base": null, + "refs": { + "Tag$value": "

    The optional value portion of a tag defined by a user. For more information, see Controlling User Access to Pipelines in the AWS Data Pipeline Developer Guide.

    " + } + }, + "taskId": { + "base": null, + "refs": { + "ReportTaskProgressInput$taskId": "

    The ID of the task assigned to the task runner. This value is provided in the response for PollForTask.

    ", + "SetTaskStatusInput$taskId": "

    The ID of the task assigned to the task runner. This value is provided in the response for PollForTask.

    ", + "TaskObject$taskId": "

    An internal identifier for the task. This ID is passed to the SetTaskStatus and ReportTaskProgress actions.

    " + } + }, + "timestamp": { + "base": null, + "refs": { + "ActivatePipelineInput$startTimestamp": "

    The date and time to resume the pipeline. By default, the pipeline resumes from the last completed execution.

    " + } + }, + "validationMessage": { + "base": null, + "refs": { + "validationMessages$member": null + } + }, + "validationMessages": { + "base": null, + "refs": { + "ValidationError$errors": "

    A description of the validation error.

    ", + "ValidationWarning$warnings": "

    A description of the validation warning.

    " + } + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/datapipeline/2012-10-29/paginators-1.json b/vendor/github.com/aws/aws-sdk-go/models/apis/datapipeline/2012-10-29/paginators-1.json new file mode 100644 index 000000000..db941936b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/datapipeline/2012-10-29/paginators-1.json @@ -0,0 +1,26 @@ +{ + "pagination": { + "ListPipelines": { + "input_token": "marker", + "output_token": "marker", + "more_results": "hasMoreResults", + "result_key": "pipelineIdList" + }, + "DescribeObjects": { + "input_token": "marker", + "output_token": "marker", + "more_results": "hasMoreResults", + "result_key": "pipelineObjects" + }, + "DescribePipelines": { + "result_key": "pipelineDescriptionList" + }, + "QueryObjects": { + "input_token": "marker", + "output_token": "marker", + "more_results": "hasMoreResults", + "limit_key": "limit", + "result_key": "ids" + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/devicefarm/2015-06-23/api-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/devicefarm/2015-06-23/api-2.json new file mode 100644 index 000000000..0c3eeedca --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/devicefarm/2015-06-23/api-2.json @@ -0,0 +1,1819 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2015-06-23", + "endpointPrefix":"devicefarm", + "jsonVersion":"1.1", + "protocol":"json", + "serviceFullName":"AWS Device Farm", + "signatureVersion":"v4", + "targetPrefix":"DeviceFarm_20150623" + }, + "operations":{ + "CreateDevicePool":{ + "name":"CreateDevicePool", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateDevicePoolRequest"}, + "output":{"shape":"CreateDevicePoolResult"}, + "errors":[ + {"shape":"ArgumentException"}, + {"shape":"NotFoundException"}, + {"shape":"LimitExceededException"}, + {"shape":"ServiceAccountException"} + ] + }, + "CreateProject":{ + "name":"CreateProject", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateProjectRequest"}, + "output":{"shape":"CreateProjectResult"}, + "errors":[ + {"shape":"ArgumentException"}, + {"shape":"NotFoundException"}, + {"shape":"LimitExceededException"}, + {"shape":"ServiceAccountException"} + ] + }, + "CreateUpload":{ + "name":"CreateUpload", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateUploadRequest"}, + "output":{"shape":"CreateUploadResult"}, + "errors":[ + {"shape":"ArgumentException"}, + {"shape":"NotFoundException"}, + {"shape":"LimitExceededException"}, + {"shape":"ServiceAccountException"} + ] + }, + "DeleteDevicePool":{ + "name":"DeleteDevicePool", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteDevicePoolRequest"}, + "output":{"shape":"DeleteDevicePoolResult"}, + "errors":[ + {"shape":"ArgumentException"}, + {"shape":"NotFoundException"}, + {"shape":"LimitExceededException"}, + {"shape":"ServiceAccountException"} + ] + }, + "DeleteProject":{ + "name":"DeleteProject", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteProjectRequest"}, + "output":{"shape":"DeleteProjectResult"}, + "errors":[ + {"shape":"ArgumentException"}, + {"shape":"NotFoundException"}, + {"shape":"LimitExceededException"}, + {"shape":"ServiceAccountException"} + ] + }, + "DeleteRun":{ + "name":"DeleteRun", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteRunRequest"}, + "output":{"shape":"DeleteRunResult"}, + "errors":[ + {"shape":"ArgumentException"}, + {"shape":"NotFoundException"}, + {"shape":"LimitExceededException"}, + {"shape":"ServiceAccountException"} + ] + }, + "DeleteUpload":{ + "name":"DeleteUpload", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteUploadRequest"}, + "output":{"shape":"DeleteUploadResult"}, + "errors":[ + {"shape":"ArgumentException"}, + {"shape":"NotFoundException"}, + {"shape":"LimitExceededException"}, + {"shape":"ServiceAccountException"} + ] + }, + "GetAccountSettings":{ + "name":"GetAccountSettings", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetAccountSettingsRequest"}, + "output":{"shape":"GetAccountSettingsResult"}, + "errors":[ + {"shape":"ArgumentException"}, + {"shape":"NotFoundException"}, + {"shape":"LimitExceededException"}, + {"shape":"ServiceAccountException"} + ] + }, + "GetDevice":{ + "name":"GetDevice", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetDeviceRequest"}, + "output":{"shape":"GetDeviceResult"}, + "errors":[ + {"shape":"ArgumentException"}, + {"shape":"NotFoundException"}, + {"shape":"LimitExceededException"}, + {"shape":"ServiceAccountException"} + ] + }, + "GetDevicePool":{ + "name":"GetDevicePool", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetDevicePoolRequest"}, + "output":{"shape":"GetDevicePoolResult"}, + "errors":[ + {"shape":"ArgumentException"}, + {"shape":"NotFoundException"}, + {"shape":"LimitExceededException"}, + {"shape":"ServiceAccountException"} + ] + }, + "GetDevicePoolCompatibility":{ + "name":"GetDevicePoolCompatibility", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetDevicePoolCompatibilityRequest"}, + "output":{"shape":"GetDevicePoolCompatibilityResult"}, + "errors":[ + {"shape":"ArgumentException"}, + {"shape":"NotFoundException"}, + {"shape":"LimitExceededException"}, + {"shape":"ServiceAccountException"} + ] + }, + "GetJob":{ + "name":"GetJob", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetJobRequest"}, + "output":{"shape":"GetJobResult"}, + "errors":[ + {"shape":"ArgumentException"}, + {"shape":"NotFoundException"}, + {"shape":"LimitExceededException"}, + {"shape":"ServiceAccountException"} + ] + }, + "GetOfferingStatus":{ + "name":"GetOfferingStatus", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetOfferingStatusRequest"}, + "output":{"shape":"GetOfferingStatusResult"}, + "errors":[ + {"shape":"ArgumentException"}, + {"shape":"NotFoundException"}, + {"shape":"NotEligibleException"}, + {"shape":"LimitExceededException"}, + {"shape":"ServiceAccountException"} + ] + }, + "GetProject":{ + "name":"GetProject", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetProjectRequest"}, + "output":{"shape":"GetProjectResult"}, + "errors":[ + {"shape":"ArgumentException"}, + {"shape":"NotFoundException"}, + {"shape":"LimitExceededException"}, + {"shape":"ServiceAccountException"} + ] + }, + "GetRun":{ + "name":"GetRun", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetRunRequest"}, + "output":{"shape":"GetRunResult"}, + "errors":[ + {"shape":"ArgumentException"}, + {"shape":"NotFoundException"}, + {"shape":"LimitExceededException"}, + {"shape":"ServiceAccountException"} + ] + }, + "GetSuite":{ + "name":"GetSuite", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetSuiteRequest"}, + "output":{"shape":"GetSuiteResult"}, + "errors":[ + {"shape":"ArgumentException"}, + {"shape":"NotFoundException"}, + {"shape":"LimitExceededException"}, + {"shape":"ServiceAccountException"} + ] + }, + "GetTest":{ + "name":"GetTest", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetTestRequest"}, + "output":{"shape":"GetTestResult"}, + "errors":[ + {"shape":"ArgumentException"}, + {"shape":"NotFoundException"}, + {"shape":"LimitExceededException"}, + {"shape":"ServiceAccountException"} + ] + }, + "GetUpload":{ + "name":"GetUpload", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetUploadRequest"}, + "output":{"shape":"GetUploadResult"}, + "errors":[ + {"shape":"ArgumentException"}, + {"shape":"NotFoundException"}, + {"shape":"LimitExceededException"}, + {"shape":"ServiceAccountException"} + ] + }, + "ListArtifacts":{ + "name":"ListArtifacts", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListArtifactsRequest"}, + "output":{"shape":"ListArtifactsResult"}, + "errors":[ + {"shape":"ArgumentException"}, + {"shape":"NotFoundException"}, + {"shape":"LimitExceededException"}, + {"shape":"ServiceAccountException"} + ] + }, + "ListDevicePools":{ + "name":"ListDevicePools", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListDevicePoolsRequest"}, + "output":{"shape":"ListDevicePoolsResult"}, + "errors":[ + {"shape":"ArgumentException"}, + {"shape":"NotFoundException"}, + {"shape":"LimitExceededException"}, + {"shape":"ServiceAccountException"} + ] + }, + "ListDevices":{ + "name":"ListDevices", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListDevicesRequest"}, + "output":{"shape":"ListDevicesResult"}, + "errors":[ + {"shape":"ArgumentException"}, + {"shape":"NotFoundException"}, + {"shape":"LimitExceededException"}, + {"shape":"ServiceAccountException"} + ] + }, + "ListJobs":{ + "name":"ListJobs", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListJobsRequest"}, + "output":{"shape":"ListJobsResult"}, + "errors":[ + {"shape":"ArgumentException"}, + {"shape":"NotFoundException"}, + {"shape":"LimitExceededException"}, + {"shape":"ServiceAccountException"} + ] + }, + "ListOfferingTransactions":{ + "name":"ListOfferingTransactions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListOfferingTransactionsRequest"}, + "output":{"shape":"ListOfferingTransactionsResult"}, + "errors":[ + {"shape":"ArgumentException"}, + {"shape":"NotFoundException"}, + {"shape":"NotEligibleException"}, + {"shape":"LimitExceededException"}, + {"shape":"ServiceAccountException"} + ] + }, + "ListOfferings":{ + "name":"ListOfferings", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListOfferingsRequest"}, + "output":{"shape":"ListOfferingsResult"}, + "errors":[ + {"shape":"ArgumentException"}, + {"shape":"NotFoundException"}, + {"shape":"NotEligibleException"}, + {"shape":"LimitExceededException"}, + {"shape":"ServiceAccountException"} + ] + }, + "ListProjects":{ + "name":"ListProjects", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListProjectsRequest"}, + "output":{"shape":"ListProjectsResult"}, + "errors":[ + {"shape":"ArgumentException"}, + {"shape":"NotFoundException"}, + {"shape":"LimitExceededException"}, + {"shape":"ServiceAccountException"} + ] + }, + "ListRuns":{ + "name":"ListRuns", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListRunsRequest"}, + "output":{"shape":"ListRunsResult"}, + "errors":[ + {"shape":"ArgumentException"}, + {"shape":"NotFoundException"}, + {"shape":"LimitExceededException"}, + {"shape":"ServiceAccountException"} + ] + }, + "ListSamples":{ + "name":"ListSamples", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListSamplesRequest"}, + "output":{"shape":"ListSamplesResult"}, + "errors":[ + {"shape":"ArgumentException"}, + {"shape":"NotFoundException"}, + {"shape":"LimitExceededException"}, + {"shape":"ServiceAccountException"} + ] + }, + "ListSuites":{ + "name":"ListSuites", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListSuitesRequest"}, + "output":{"shape":"ListSuitesResult"}, + "errors":[ + {"shape":"ArgumentException"}, + {"shape":"NotFoundException"}, + {"shape":"LimitExceededException"}, + {"shape":"ServiceAccountException"} + ] + }, + "ListTests":{ + "name":"ListTests", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListTestsRequest"}, + "output":{"shape":"ListTestsResult"}, + "errors":[ + {"shape":"ArgumentException"}, + {"shape":"NotFoundException"}, + {"shape":"LimitExceededException"}, + {"shape":"ServiceAccountException"} + ] + }, + "ListUniqueProblems":{ + "name":"ListUniqueProblems", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListUniqueProblemsRequest"}, + "output":{"shape":"ListUniqueProblemsResult"}, + "errors":[ + {"shape":"ArgumentException"}, + {"shape":"NotFoundException"}, + {"shape":"LimitExceededException"}, + {"shape":"ServiceAccountException"} + ] + }, + "ListUploads":{ + "name":"ListUploads", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListUploadsRequest"}, + "output":{"shape":"ListUploadsResult"}, + "errors":[ + {"shape":"ArgumentException"}, + {"shape":"NotFoundException"}, + {"shape":"LimitExceededException"}, + {"shape":"ServiceAccountException"} + ] + }, + "PurchaseOffering":{ + "name":"PurchaseOffering", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PurchaseOfferingRequest"}, + "output":{"shape":"PurchaseOfferingResult"}, + "errors":[ + {"shape":"ArgumentException"}, + {"shape":"NotFoundException"}, + {"shape":"NotEligibleException"}, + {"shape":"LimitExceededException"}, + {"shape":"ServiceAccountException"} + ] + }, + "RenewOffering":{ + "name":"RenewOffering", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RenewOfferingRequest"}, + "output":{"shape":"RenewOfferingResult"}, + "errors":[ + {"shape":"ArgumentException"}, + {"shape":"NotFoundException"}, + {"shape":"NotEligibleException"}, + {"shape":"LimitExceededException"}, + {"shape":"ServiceAccountException"} + ] + }, + "ScheduleRun":{ + "name":"ScheduleRun", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ScheduleRunRequest"}, + "output":{"shape":"ScheduleRunResult"}, + "errors":[ + {"shape":"ArgumentException"}, + {"shape":"NotFoundException"}, + {"shape":"LimitExceededException"}, + {"shape":"IdempotencyException"}, + {"shape":"ServiceAccountException"} + ] + }, + "StopRun":{ + "name":"StopRun", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StopRunRequest"}, + "output":{"shape":"StopRunResult"}, + "errors":[ + {"shape":"ArgumentException"}, + {"shape":"NotFoundException"}, + {"shape":"LimitExceededException"}, + {"shape":"ServiceAccountException"} + ] + }, + "UpdateDevicePool":{ + "name":"UpdateDevicePool", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateDevicePoolRequest"}, + "output":{"shape":"UpdateDevicePoolResult"}, + "errors":[ + {"shape":"ArgumentException"}, + {"shape":"NotFoundException"}, + {"shape":"LimitExceededException"}, + {"shape":"ServiceAccountException"} + ] + }, + "UpdateProject":{ + "name":"UpdateProject", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateProjectRequest"}, + "output":{"shape":"UpdateProjectResult"}, + "errors":[ + {"shape":"ArgumentException"}, + {"shape":"NotFoundException"}, + {"shape":"LimitExceededException"}, + {"shape":"ServiceAccountException"} + ] + } + }, + "shapes":{ + "AWSAccountNumber":{ + "type":"string", + "max":16, + "min":2 + }, + "AccountSettings":{ + "type":"structure", + "members":{ + "awsAccountNumber":{"shape":"AWSAccountNumber"}, + "unmeteredDevices":{"shape":"PurchasedDevicesMap"}, + "unmeteredRemoteAccessDevices":{"shape":"PurchasedDevicesMap"} + } + }, + "AmazonResourceName":{ + "type":"string", + "min":32 + }, + "AmazonResourceNames":{ + "type":"list", + "member":{"shape":"AmazonResourceName"} + }, + "ArgumentException":{ + "type":"structure", + "members":{ + "message":{"shape":"Message"} + }, + "exception":true + }, + "Artifact":{ + "type":"structure", + "members":{ + "arn":{"shape":"AmazonResourceName"}, + "name":{"shape":"Name"}, + "type":{"shape":"ArtifactType"}, + "extension":{"shape":"String"}, + "url":{"shape":"URL"} + } + }, + "ArtifactCategory":{ + "type":"string", + "enum":[ + "SCREENSHOT", + "FILE", + "LOG" + ] + }, + "ArtifactType":{ + "type":"string", + "enum":[ + "UNKNOWN", + "SCREENSHOT", + "DEVICE_LOG", + "MESSAGE_LOG", + "RESULT_LOG", + "SERVICE_LOG", + "WEBKIT_LOG", + "INSTRUMENTATION_OUTPUT", + "EXERCISER_MONKEY_OUTPUT", + "CALABASH_JSON_OUTPUT", + "CALABASH_PRETTY_OUTPUT", + "CALABASH_STANDARD_OUTPUT", + "CALABASH_JAVA_XML_OUTPUT", + "AUTOMATION_OUTPUT", + "APPIUM_SERVER_OUTPUT", + "APPIUM_JAVA_OUTPUT", + "APPIUM_JAVA_XML_OUTPUT", + "APPIUM_PYTHON_OUTPUT", + "APPIUM_PYTHON_XML_OUTPUT", + "EXPLORER_EVENT_LOG", + "EXPLORER_SUMMARY_LOG", + "APPLICATION_CRASH_REPORT", + "XCTEST_LOG", + "VIDEO" + ] + }, + "Artifacts":{ + "type":"list", + "member":{"shape":"Artifact"} + }, + "BillingMethod":{ + "type":"string", + "enum":[ + "METERED", + "UNMETERED" + ] + }, + "Boolean":{"type":"boolean"}, + "CPU":{ + "type":"structure", + "members":{ + "frequency":{"shape":"String"}, + "architecture":{"shape":"String"}, + "clock":{"shape":"Double"} + } + }, + "ContentType":{ + "type":"string", + "max":64, + "min":0 + }, + "Counters":{ + "type":"structure", + "members":{ + "total":{"shape":"Integer"}, + "passed":{"shape":"Integer"}, + "failed":{"shape":"Integer"}, + "warned":{"shape":"Integer"}, + "errored":{"shape":"Integer"}, + "stopped":{"shape":"Integer"}, + "skipped":{"shape":"Integer"} + } + }, + "CreateDevicePoolRequest":{ + "type":"structure", + "required":[ + "projectArn", + "name", + "rules" + ], + "members":{ + "projectArn":{"shape":"AmazonResourceName"}, + "name":{"shape":"Name"}, + "description":{"shape":"Message"}, + "rules":{"shape":"Rules"} + } + }, + "CreateDevicePoolResult":{ + "type":"structure", + "members":{ + "devicePool":{"shape":"DevicePool"} + } + }, + "CreateProjectRequest":{ + "type":"structure", + "required":["name"], + "members":{ + "name":{"shape":"Name"} + } + }, + "CreateProjectResult":{ + "type":"structure", + "members":{ + "project":{"shape":"Project"} + } + }, + "CreateUploadRequest":{ + "type":"structure", + "required":[ + "projectArn", + "name", + "type" + ], + "members":{ + "projectArn":{"shape":"AmazonResourceName"}, + "name":{"shape":"Name"}, + "type":{"shape":"UploadType"}, + "contentType":{"shape":"ContentType"} + } + }, + "CreateUploadResult":{ + "type":"structure", + "members":{ + "upload":{"shape":"Upload"} + } + }, + "CurrencyCode":{ + "type":"string", + "enum":["USD"] + }, + "DateTime":{"type":"timestamp"}, + "DeleteDevicePoolRequest":{ + "type":"structure", + "required":["arn"], + "members":{ + "arn":{"shape":"AmazonResourceName"} + } + }, + "DeleteDevicePoolResult":{ + "type":"structure", + "members":{ + } + }, + "DeleteProjectRequest":{ + "type":"structure", + "required":["arn"], + "members":{ + "arn":{"shape":"AmazonResourceName"} + } + }, + "DeleteProjectResult":{ + "type":"structure", + "members":{ + } + }, + "DeleteRunRequest":{ + "type":"structure", + "required":["arn"], + "members":{ + "arn":{"shape":"AmazonResourceName"} + } + }, + "DeleteRunResult":{ + "type":"structure", + "members":{ + } + }, + "DeleteUploadRequest":{ + "type":"structure", + "required":["arn"], + "members":{ + "arn":{"shape":"AmazonResourceName"} + } + }, + "DeleteUploadResult":{ + "type":"structure", + "members":{ + } + }, + "Device":{ + "type":"structure", + "members":{ + "arn":{"shape":"AmazonResourceName"}, + "name":{"shape":"Name"}, + "manufacturer":{"shape":"String"}, + "model":{"shape":"String"}, + "formFactor":{"shape":"DeviceFormFactor"}, + "platform":{"shape":"DevicePlatform"}, + "os":{"shape":"String"}, + "cpu":{"shape":"CPU"}, + "resolution":{"shape":"Resolution"}, + "heapSize":{"shape":"Long"}, + "memory":{"shape":"Long"}, + "image":{"shape":"String"}, + "carrier":{"shape":"String"}, + "radio":{"shape":"String"} + } + }, + "DeviceAttribute":{ + "type":"string", + "enum":[ + "ARN", + "PLATFORM", + "FORM_FACTOR", + "MANUFACTURER" + ] + }, + "DeviceFormFactor":{ + "type":"string", + "enum":[ + "PHONE", + "TABLET" + ] + }, + "DeviceMinutes":{ + "type":"structure", + "members":{ + "total":{"shape":"Double"}, + "metered":{"shape":"Double"}, + "unmetered":{"shape":"Double"} + } + }, + "DevicePlatform":{ + "type":"string", + "enum":[ + "ANDROID", + "IOS" + ] + }, + "DevicePool":{ + "type":"structure", + "members":{ + "arn":{"shape":"AmazonResourceName"}, + "name":{"shape":"Name"}, + "description":{"shape":"Message"}, + "type":{"shape":"DevicePoolType"}, + "rules":{"shape":"Rules"} + } + }, + "DevicePoolCompatibilityResult":{ + "type":"structure", + "members":{ + "device":{"shape":"Device"}, + "compatible":{"shape":"Boolean"}, + "incompatibilityMessages":{"shape":"IncompatibilityMessages"} + } + }, + "DevicePoolCompatibilityResults":{ + "type":"list", + "member":{"shape":"DevicePoolCompatibilityResult"} + }, + "DevicePoolType":{ + "type":"string", + "enum":[ + "CURATED", + "PRIVATE" + ] + }, + "DevicePools":{ + "type":"list", + "member":{"shape":"DevicePool"} + }, + "Devices":{ + "type":"list", + "member":{"shape":"Device"} + }, + "Double":{"type":"double"}, + "ExecutionResult":{ + "type":"string", + "enum":[ + "PENDING", + "PASSED", + "WARNED", + "FAILED", + "SKIPPED", + "ERRORED", + "STOPPED" + ] + }, + "ExecutionStatus":{ + "type":"string", + "enum":[ + "PENDING", + "PENDING_CONCURRENCY", + "PENDING_DEVICE", + "PROCESSING", + "SCHEDULING", + "PREPARING", + "RUNNING", + "COMPLETED", + "STOPPING" + ] + }, + "Filter":{ + "type":"string", + "max":8192, + "min":0 + }, + "GetAccountSettingsRequest":{ + "type":"structure", + "members":{ + } + }, + "GetAccountSettingsResult":{ + "type":"structure", + "members":{ + "accountSettings":{"shape":"AccountSettings"} + } + }, + "GetDevicePoolCompatibilityRequest":{ + "type":"structure", + "required":["devicePoolArn"], + "members":{ + "devicePoolArn":{"shape":"AmazonResourceName"}, + "appArn":{"shape":"AmazonResourceName"}, + "testType":{"shape":"TestType"} + } + }, + "GetDevicePoolCompatibilityResult":{ + "type":"structure", + "members":{ + "compatibleDevices":{"shape":"DevicePoolCompatibilityResults"}, + "incompatibleDevices":{"shape":"DevicePoolCompatibilityResults"} + } + }, + "GetDevicePoolRequest":{ + "type":"structure", + "required":["arn"], + "members":{ + "arn":{"shape":"AmazonResourceName"} + } + }, + "GetDevicePoolResult":{ + "type":"structure", + "members":{ + "devicePool":{"shape":"DevicePool"} + } + }, + "GetDeviceRequest":{ + "type":"structure", + "required":["arn"], + "members":{ + "arn":{"shape":"AmazonResourceName"} + } + }, + "GetDeviceResult":{ + "type":"structure", + "members":{ + "device":{"shape":"Device"} + } + }, + "GetJobRequest":{ + "type":"structure", + "required":["arn"], + "members":{ + "arn":{"shape":"AmazonResourceName"} + } + }, + "GetJobResult":{ + "type":"structure", + "members":{ + "job":{"shape":"Job"} + } + }, + "GetOfferingStatusRequest":{ + "type":"structure", + "members":{ + "nextToken":{"shape":"PaginationToken"} + } + }, + "GetOfferingStatusResult":{ + "type":"structure", + "members":{ + "current":{"shape":"OfferingStatusMap"}, + "nextPeriod":{"shape":"OfferingStatusMap"}, + "nextToken":{"shape":"PaginationToken"} + } + }, + "GetProjectRequest":{ + "type":"structure", + "required":["arn"], + "members":{ + "arn":{"shape":"AmazonResourceName"} + } + }, + "GetProjectResult":{ + "type":"structure", + "members":{ + "project":{"shape":"Project"} + } + }, + "GetRunRequest":{ + "type":"structure", + "required":["arn"], + "members":{ + "arn":{"shape":"AmazonResourceName"} + } + }, + "GetRunResult":{ + "type":"structure", + "members":{ + "run":{"shape":"Run"} + } + }, + "GetSuiteRequest":{ + "type":"structure", + "required":["arn"], + "members":{ + "arn":{"shape":"AmazonResourceName"} + } + }, + "GetSuiteResult":{ + "type":"structure", + "members":{ + "suite":{"shape":"Suite"} + } + }, + "GetTestRequest":{ + "type":"structure", + "required":["arn"], + "members":{ + "arn":{"shape":"AmazonResourceName"} + } + }, + "GetTestResult":{ + "type":"structure", + "members":{ + "test":{"shape":"Test"} + } + }, + "GetUploadRequest":{ + "type":"structure", + "required":["arn"], + "members":{ + "arn":{"shape":"AmazonResourceName"} + } + }, + "GetUploadResult":{ + "type":"structure", + "members":{ + "upload":{"shape":"Upload"} + } + }, + "IdempotencyException":{ + "type":"structure", + "members":{ + "message":{"shape":"Message"} + }, + "exception":true + }, + "IncompatibilityMessage":{ + "type":"structure", + "members":{ + "message":{"shape":"Message"}, + "type":{"shape":"DeviceAttribute"} + } + }, + "IncompatibilityMessages":{ + "type":"list", + "member":{"shape":"IncompatibilityMessage"} + }, + "Integer":{"type":"integer"}, + "Job":{ + "type":"structure", + "members":{ + "arn":{"shape":"AmazonResourceName"}, + "name":{"shape":"Name"}, + "type":{"shape":"TestType"}, + "created":{"shape":"DateTime"}, + "status":{"shape":"ExecutionStatus"}, + "result":{"shape":"ExecutionResult"}, + "started":{"shape":"DateTime"}, + "stopped":{"shape":"DateTime"}, + "counters":{"shape":"Counters"}, + "message":{"shape":"Message"}, + "device":{"shape":"Device"}, + "deviceMinutes":{"shape":"DeviceMinutes"} + } + }, + "Jobs":{ + "type":"list", + "member":{"shape":"Job"} + }, + "LimitExceededException":{ + "type":"structure", + "members":{ + "message":{"shape":"Message"} + }, + "exception":true + }, + "ListArtifactsRequest":{ + "type":"structure", + "required":[ + "arn", + "type" + ], + "members":{ + "arn":{"shape":"AmazonResourceName"}, + "type":{"shape":"ArtifactCategory"}, + "nextToken":{"shape":"PaginationToken"} + } + }, + "ListArtifactsResult":{ + "type":"structure", + "members":{ + "artifacts":{"shape":"Artifacts"}, + "nextToken":{"shape":"PaginationToken"} + } + }, + "ListDevicePoolsRequest":{ + "type":"structure", + "required":["arn"], + "members":{ + "arn":{"shape":"AmazonResourceName"}, + "type":{"shape":"DevicePoolType"}, + "nextToken":{"shape":"PaginationToken"} + } + }, + "ListDevicePoolsResult":{ + "type":"structure", + "members":{ + "devicePools":{"shape":"DevicePools"}, + "nextToken":{"shape":"PaginationToken"} + } + }, + "ListDevicesRequest":{ + "type":"structure", + "members":{ + "arn":{"shape":"AmazonResourceName"}, + "nextToken":{"shape":"PaginationToken"} + } + }, + "ListDevicesResult":{ + "type":"structure", + "members":{ + "devices":{"shape":"Devices"}, + "nextToken":{"shape":"PaginationToken"} + } + }, + "ListJobsRequest":{ + "type":"structure", + "required":["arn"], + "members":{ + "arn":{"shape":"AmazonResourceName"}, + "nextToken":{"shape":"PaginationToken"} + } + }, + "ListJobsResult":{ + "type":"structure", + "members":{ + "jobs":{"shape":"Jobs"}, + "nextToken":{"shape":"PaginationToken"} + } + }, + "ListOfferingTransactionsRequest":{ + "type":"structure", + "members":{ + "nextToken":{"shape":"PaginationToken"} + } + }, + "ListOfferingTransactionsResult":{ + "type":"structure", + "members":{ + "offeringTransactions":{"shape":"OfferingTransactions"}, + "nextToken":{"shape":"PaginationToken"} + } + }, + "ListOfferingsRequest":{ + "type":"structure", + "members":{ + "nextToken":{"shape":"PaginationToken"} + } + }, + "ListOfferingsResult":{ + "type":"structure", + "members":{ + "offerings":{"shape":"Offerings"}, + "nextToken":{"shape":"PaginationToken"} + } + }, + "ListProjectsRequest":{ + "type":"structure", + "members":{ + "arn":{"shape":"AmazonResourceName"}, + "nextToken":{"shape":"PaginationToken"} + } + }, + "ListProjectsResult":{ + "type":"structure", + "members":{ + "projects":{"shape":"Projects"}, + "nextToken":{"shape":"PaginationToken"} + } + }, + "ListRunsRequest":{ + "type":"structure", + "required":["arn"], + "members":{ + "arn":{"shape":"AmazonResourceName"}, + "nextToken":{"shape":"PaginationToken"} + } + }, + "ListRunsResult":{ + "type":"structure", + "members":{ + "runs":{"shape":"Runs"}, + "nextToken":{"shape":"PaginationToken"} + } + }, + "ListSamplesRequest":{ + "type":"structure", + "required":["arn"], + "members":{ + "arn":{"shape":"AmazonResourceName"}, + "nextToken":{"shape":"PaginationToken"} + } + }, + "ListSamplesResult":{ + "type":"structure", + "members":{ + "samples":{"shape":"Samples"}, + "nextToken":{"shape":"PaginationToken"} + } + }, + "ListSuitesRequest":{ + "type":"structure", + "required":["arn"], + "members":{ + "arn":{"shape":"AmazonResourceName"}, + "nextToken":{"shape":"PaginationToken"} + } + }, + "ListSuitesResult":{ + "type":"structure", + "members":{ + "suites":{"shape":"Suites"}, + "nextToken":{"shape":"PaginationToken"} + } + }, + "ListTestsRequest":{ + "type":"structure", + "required":["arn"], + "members":{ + "arn":{"shape":"AmazonResourceName"}, + "nextToken":{"shape":"PaginationToken"} + } + }, + "ListTestsResult":{ + "type":"structure", + "members":{ + "tests":{"shape":"Tests"}, + "nextToken":{"shape":"PaginationToken"} + } + }, + "ListUniqueProblemsRequest":{ + "type":"structure", + "required":["arn"], + "members":{ + "arn":{"shape":"AmazonResourceName"}, + "nextToken":{"shape":"PaginationToken"} + } + }, + "ListUniqueProblemsResult":{ + "type":"structure", + "members":{ + "uniqueProblems":{"shape":"UniqueProblemsByExecutionResultMap"}, + "nextToken":{"shape":"PaginationToken"} + } + }, + "ListUploadsRequest":{ + "type":"structure", + "required":["arn"], + "members":{ + "arn":{"shape":"AmazonResourceName"}, + "nextToken":{"shape":"PaginationToken"} + } + }, + "ListUploadsResult":{ + "type":"structure", + "members":{ + "uploads":{"shape":"Uploads"}, + "nextToken":{"shape":"PaginationToken"} + } + }, + "Location":{ + "type":"structure", + "required":[ + "latitude", + "longitude" + ], + "members":{ + "latitude":{"shape":"Double"}, + "longitude":{"shape":"Double"} + } + }, + "Long":{"type":"long"}, + "Message":{ + "type":"string", + "max":16384, + "min":0 + }, + "Metadata":{ + "type":"string", + "max":8192, + "min":0 + }, + "MonetaryAmount":{ + "type":"structure", + "members":{ + "amount":{"shape":"Double"}, + "currencyCode":{"shape":"CurrencyCode"} + } + }, + "Name":{ + "type":"string", + "max":256, + "min":0 + }, + "NotEligibleException":{ + "type":"structure", + "members":{ + "message":{"shape":"Message"} + }, + "exception":true + }, + "NotFoundException":{ + "type":"structure", + "members":{ + "message":{"shape":"Message"} + }, + "exception":true + }, + "Offering":{ + "type":"structure", + "members":{ + "id":{"shape":"OfferingIdentifier"}, + "description":{"shape":"Message"}, + "type":{"shape":"OfferingType"}, + "platform":{"shape":"DevicePlatform"}, + "recurringCharges":{"shape":"RecurringCharges"} + } + }, + "OfferingIdentifier":{ + "type":"string", + "min":32 + }, + "OfferingStatus":{ + "type":"structure", + "members":{ + "type":{"shape":"OfferingTransactionType"}, + "offering":{"shape":"Offering"}, + "quantity":{"shape":"Integer"}, + "effectiveOn":{"shape":"DateTime"} + } + }, + "OfferingStatusMap":{ + "type":"map", + "key":{"shape":"OfferingIdentifier"}, + "value":{"shape":"OfferingStatus"} + }, + "OfferingTransaction":{ + "type":"structure", + "members":{ + "offeringStatus":{"shape":"OfferingStatus"}, + "transactionId":{"shape":"TransactionIdentifier"}, + "createdOn":{"shape":"DateTime"}, + "cost":{"shape":"MonetaryAmount"} + } + }, + "OfferingTransactionType":{ + "type":"string", + "enum":[ + "PURCHASE", + "RENEW", + "SYSTEM" + ] + }, + "OfferingTransactions":{ + "type":"list", + "member":{"shape":"OfferingTransaction"} + }, + "OfferingType":{ + "type":"string", + "enum":["RECURRING"] + }, + "Offerings":{ + "type":"list", + "member":{"shape":"Offering"} + }, + "PaginationToken":{ + "type":"string", + "max":1024, + "min":4 + }, + "Problem":{ + "type":"structure", + "members":{ + "run":{"shape":"ProblemDetail"}, + "job":{"shape":"ProblemDetail"}, + "suite":{"shape":"ProblemDetail"}, + "test":{"shape":"ProblemDetail"}, + "device":{"shape":"Device"}, + "result":{"shape":"ExecutionResult"}, + "message":{"shape":"Message"} + } + }, + "ProblemDetail":{ + "type":"structure", + "members":{ + "arn":{"shape":"AmazonResourceName"}, + "name":{"shape":"Name"} + } + }, + "Problems":{ + "type":"list", + "member":{"shape":"Problem"} + }, + "Project":{ + "type":"structure", + "members":{ + "arn":{"shape":"AmazonResourceName"}, + "name":{"shape":"Name"}, + "created":{"shape":"DateTime"} + } + }, + "Projects":{ + "type":"list", + "member":{"shape":"Project"} + }, + "PurchaseOfferingRequest":{ + "type":"structure", + "members":{ + "offeringId":{"shape":"OfferingIdentifier"}, + "quantity":{"shape":"Integer"} + } + }, + "PurchaseOfferingResult":{ + "type":"structure", + "members":{ + "offeringTransaction":{"shape":"OfferingTransaction"} + } + }, + "PurchasedDevicesMap":{ + "type":"map", + "key":{"shape":"DevicePlatform"}, + "value":{"shape":"Integer"} + }, + "Radios":{ + "type":"structure", + "members":{ + "wifi":{"shape":"Boolean"}, + "bluetooth":{"shape":"Boolean"}, + "nfc":{"shape":"Boolean"}, + "gps":{"shape":"Boolean"} + } + }, + "RecurringCharge":{ + "type":"structure", + "members":{ + "cost":{"shape":"MonetaryAmount"}, + "frequency":{"shape":"RecurringChargeFrequency"} + } + }, + "RecurringChargeFrequency":{ + "type":"string", + "enum":["MONTHLY"] + }, + "RecurringCharges":{ + "type":"list", + "member":{"shape":"RecurringCharge"} + }, + "RenewOfferingRequest":{ + "type":"structure", + "members":{ + "offeringId":{"shape":"OfferingIdentifier"}, + "quantity":{"shape":"Integer"} + } + }, + "RenewOfferingResult":{ + "type":"structure", + "members":{ + "offeringTransaction":{"shape":"OfferingTransaction"} + } + }, + "Resolution":{ + "type":"structure", + "members":{ + "width":{"shape":"Integer"}, + "height":{"shape":"Integer"} + } + }, + "Rule":{ + "type":"structure", + "members":{ + "attribute":{"shape":"DeviceAttribute"}, + "operator":{"shape":"RuleOperator"}, + "value":{"shape":"String"} + } + }, + "RuleOperator":{ + "type":"string", + "enum":[ + "EQUALS", + "LESS_THAN", + "GREATER_THAN", + "IN", + "NOT_IN" + ] + }, + "Rules":{ + "type":"list", + "member":{"shape":"Rule"} + }, + "Run":{ + "type":"structure", + "members":{ + "arn":{"shape":"AmazonResourceName"}, + "name":{"shape":"Name"}, + "type":{"shape":"TestType"}, + "platform":{"shape":"DevicePlatform"}, + "created":{"shape":"DateTime"}, + "status":{"shape":"ExecutionStatus"}, + "result":{"shape":"ExecutionResult"}, + "started":{"shape":"DateTime"}, + "stopped":{"shape":"DateTime"}, + "counters":{"shape":"Counters"}, + "message":{"shape":"Message"}, + "totalJobs":{"shape":"Integer"}, + "completedJobs":{"shape":"Integer"}, + "billingMethod":{"shape":"BillingMethod"}, + "deviceMinutes":{"shape":"DeviceMinutes"} + } + }, + "Runs":{ + "type":"list", + "member":{"shape":"Run"} + }, + "Sample":{ + "type":"structure", + "members":{ + "arn":{"shape":"AmazonResourceName"}, + "type":{"shape":"SampleType"}, + "url":{"shape":"URL"} + } + }, + "SampleType":{ + "type":"string", + "enum":[ + "CPU", + "MEMORY", + "THREADS", + "RX_RATE", + "TX_RATE", + "RX", + "TX", + "NATIVE_FRAMES", + "NATIVE_FPS", + "NATIVE_MIN_DRAWTIME", + "NATIVE_AVG_DRAWTIME", + "NATIVE_MAX_DRAWTIME", + "OPENGL_FRAMES", + "OPENGL_FPS", + "OPENGL_MIN_DRAWTIME", + "OPENGL_AVG_DRAWTIME", + "OPENGL_MAX_DRAWTIME" + ] + }, + "Samples":{ + "type":"list", + "member":{"shape":"Sample"} + }, + "ScheduleRunConfiguration":{ + "type":"structure", + "members":{ + "extraDataPackageArn":{"shape":"AmazonResourceName"}, + "networkProfileArn":{"shape":"AmazonResourceName"}, + "locale":{"shape":"String"}, + "location":{"shape":"Location"}, + "radios":{"shape":"Radios"}, + "auxiliaryApps":{"shape":"AmazonResourceNames"}, + "billingMethod":{"shape":"BillingMethod"} + } + }, + "ScheduleRunRequest":{ + "type":"structure", + "required":[ + "projectArn", + "devicePoolArn", + "test" + ], + "members":{ + "projectArn":{"shape":"AmazonResourceName"}, + "appArn":{"shape":"AmazonResourceName"}, + "devicePoolArn":{"shape":"AmazonResourceName"}, + "name":{"shape":"Name"}, + "test":{"shape":"ScheduleRunTest"}, + "configuration":{"shape":"ScheduleRunConfiguration"} + } + }, + "ScheduleRunResult":{ + "type":"structure", + "members":{ + "run":{"shape":"Run"} + } + }, + "ScheduleRunTest":{ + "type":"structure", + "required":["type"], + "members":{ + "type":{"shape":"TestType"}, + "testPackageArn":{"shape":"AmazonResourceName"}, + "filter":{"shape":"Filter"}, + "parameters":{"shape":"TestParameters"} + } + }, + "ServiceAccountException":{ + "type":"structure", + "members":{ + "message":{"shape":"Message"} + }, + "exception":true + }, + "StopRunRequest":{ + "type":"structure", + "required":["arn"], + "members":{ + "arn":{"shape":"AmazonResourceName"} + } + }, + "StopRunResult":{ + "type":"structure", + "members":{ + "run":{"shape":"Run"} + } + }, + "String":{"type":"string"}, + "Suite":{ + "type":"structure", + "members":{ + "arn":{"shape":"AmazonResourceName"}, + "name":{"shape":"Name"}, + "type":{"shape":"TestType"}, + "created":{"shape":"DateTime"}, + "status":{"shape":"ExecutionStatus"}, + "result":{"shape":"ExecutionResult"}, + "started":{"shape":"DateTime"}, + "stopped":{"shape":"DateTime"}, + "counters":{"shape":"Counters"}, + "message":{"shape":"Message"}, + "deviceMinutes":{"shape":"DeviceMinutes"} + } + }, + "Suites":{ + "type":"list", + "member":{"shape":"Suite"} + }, + "Test":{ + "type":"structure", + "members":{ + "arn":{"shape":"AmazonResourceName"}, + "name":{"shape":"Name"}, + "type":{"shape":"TestType"}, + "created":{"shape":"DateTime"}, + "status":{"shape":"ExecutionStatus"}, + "result":{"shape":"ExecutionResult"}, + "started":{"shape":"DateTime"}, + "stopped":{"shape":"DateTime"}, + "counters":{"shape":"Counters"}, + "message":{"shape":"Message"}, + "deviceMinutes":{"shape":"DeviceMinutes"} + } + }, + "TestParameters":{ + "type":"map", + "key":{"shape":"String"}, + "value":{"shape":"String"} + }, + "TestType":{ + "type":"string", + "enum":[ + "BUILTIN_FUZZ", + "BUILTIN_EXPLORER", + "APPIUM_JAVA_JUNIT", + "APPIUM_JAVA_TESTNG", + "APPIUM_PYTHON", + "APPIUM_WEB_JAVA_JUNIT", + "APPIUM_WEB_JAVA_TESTNG", + "APPIUM_WEB_PYTHON", + "CALABASH", + "INSTRUMENTATION", + "UIAUTOMATION", + "UIAUTOMATOR", + "XCTEST", + "XCTEST_UI" + ] + }, + "Tests":{ + "type":"list", + "member":{"shape":"Test"} + }, + "TransactionIdentifier":{ + "type":"string", + "min":32 + }, + "URL":{ + "type":"string", + "max":2048, + "min":0 + }, + "UniqueProblem":{ + "type":"structure", + "members":{ + "message":{"shape":"Message"}, + "problems":{"shape":"Problems"} + } + }, + "UniqueProblems":{ + "type":"list", + "member":{"shape":"UniqueProblem"} + }, + "UniqueProblemsByExecutionResultMap":{ + "type":"map", + "key":{"shape":"ExecutionResult"}, + "value":{"shape":"UniqueProblems"} + }, + "UpdateDevicePoolRequest":{ + "type":"structure", + "required":["arn"], + "members":{ + "arn":{"shape":"AmazonResourceName"}, + "name":{"shape":"Name"}, + "description":{"shape":"Message"}, + "rules":{"shape":"Rules"} + } + }, + "UpdateDevicePoolResult":{ + "type":"structure", + "members":{ + "devicePool":{"shape":"DevicePool"} + } + }, + "UpdateProjectRequest":{ + "type":"structure", + "required":["arn"], + "members":{ + "arn":{"shape":"AmazonResourceName"}, + "name":{"shape":"Name"} + } + }, + "UpdateProjectResult":{ + "type":"structure", + "members":{ + "project":{"shape":"Project"} + } + }, + "Upload":{ + "type":"structure", + "members":{ + "arn":{"shape":"AmazonResourceName"}, + "name":{"shape":"Name"}, + "created":{"shape":"DateTime"}, + "type":{"shape":"UploadType"}, + "status":{"shape":"UploadStatus"}, + "url":{"shape":"URL"}, + "metadata":{"shape":"Metadata"}, + "contentType":{"shape":"ContentType"}, + "message":{"shape":"Message"} + } + }, + "UploadStatus":{ + "type":"string", + "enum":[ + "INITIALIZED", + "PROCESSING", + "SUCCEEDED", + "FAILED" + ] + }, + "UploadType":{ + "type":"string", + "enum":[ + "ANDROID_APP", + "IOS_APP", + "WEB_APP", + "EXTERNAL_DATA", + "APPIUM_JAVA_JUNIT_TEST_PACKAGE", + "APPIUM_JAVA_TESTNG_TEST_PACKAGE", + "APPIUM_PYTHON_TEST_PACKAGE", + "APPIUM_WEB_JAVA_JUNIT_TEST_PACKAGE", + "APPIUM_WEB_JAVA_TESTNG_TEST_PACKAGE", + "APPIUM_WEB_PYTHON_TEST_PACKAGE", + "CALABASH_TEST_PACKAGE", + "INSTRUMENTATION_TEST_PACKAGE", + "UIAUTOMATION_TEST_PACKAGE", + "UIAUTOMATOR_TEST_PACKAGE", + "XCTEST_TEST_PACKAGE", + "XCTEST_UI_TEST_PACKAGE" + ] + }, + "Uploads":{ + "type":"list", + "member":{"shape":"Upload"} + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/devicefarm/2015-06-23/docs-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/devicefarm/2015-06-23/docs-2.json new file mode 100644 index 000000000..9abff6e6b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/devicefarm/2015-06-23/docs-2.json @@ -0,0 +1,1194 @@ +{ + "version": "2.0", + "service": "

    AWS Device Farm is a service that enables mobile app developers to test Android, iOS, and Fire OS apps on physical phones, tablets, and other devices in the cloud.

    ", + "operations": { + "CreateDevicePool": "

    Creates a device pool.

    ", + "CreateProject": "

    Creates a new project.

    ", + "CreateUpload": "

    Uploads an app or test scripts.

    ", + "DeleteDevicePool": "

    Deletes a device pool given the pool ARN. Does not allow deletion of curated pools owned by the system.

    ", + "DeleteProject": "

    Deletes an AWS Device Farm project, given the project ARN.

    Note Deleting this resource does not stop an in-progress run.

    ", + "DeleteRun": "

    Deletes the run, given the run ARN.

    Note Deleting this resource does not stop an in-progress run.

    ", + "DeleteUpload": "

    Deletes an upload given the upload ARN.

    ", + "GetAccountSettings": "

    Returns the number of unmetered iOS and/or unmetered Android devices that have been purchased by the account.

    ", + "GetDevice": "

    Gets information about a unique device type.

    ", + "GetDevicePool": "

    Gets information about a device pool.

    ", + "GetDevicePoolCompatibility": "

    Gets information about compatibility with a device pool.

    ", + "GetJob": "

    Gets information about a job.

    ", + "GetOfferingStatus": "

    Gets the current status and future status of all offerings purchased by an AWS account. The response indicates how many offerings are currently available and the offerings that will be available in the next period. The API returns a NotEligible error if the user is not permitted to invoke the operation. Please contact aws-devicefarm-support@amazon.com if you believe that you should be able to invoke this operation.

    ", + "GetProject": "

    Gets information about a project.

    ", + "GetRun": "

    Gets information about a run.

    ", + "GetSuite": "

    Gets information about a suite.

    ", + "GetTest": "

    Gets information about a test.

    ", + "GetUpload": "

    Gets information about an upload.

    ", + "ListArtifacts": "

    Gets information about artifacts.

    ", + "ListDevicePools": "

    Gets information about device pools.

    ", + "ListDevices": "

    Gets information about unique device types.

    ", + "ListJobs": "

    Gets information about jobs.

    ", + "ListOfferingTransactions": "

    Returns a list of all historical purchases, renewals, and system renewal transactions for an AWS account. The list is paginated and ordered by a descending timestamp (most recent transactions are first). The API returns a NotEligible error if the user is not permitted to invoke the operation. Please contact aws-devicefarm-support@amazon.com if you believe that you should be able to invoke this operation.

    ", + "ListOfferings": "

    Returns a list of products or offerings that the user can manage through the API. Each offering record indicates the recurring price per unit and the frequency for that offering. The API returns a NotEligible error if the user is not permitted to invoke the operation. Please contact aws-devicefarm-support@amazon.com if you believe that you should be able to invoke this operation.

    ", + "ListProjects": "

    Gets information about projects.

    ", + "ListRuns": "

    Gets information about runs.

    ", + "ListSamples": "

    Gets information about samples.

    ", + "ListSuites": "

    Gets information about suites.

    ", + "ListTests": "

    Gets information about tests.

    ", + "ListUniqueProblems": "

    Gets information about unique problems.

    ", + "ListUploads": "

    Gets information about uploads.

    ", + "PurchaseOffering": "

    Immediately purchases offerings for an AWS account. Offerings renew with the latest total purchased quantity for an offering, unless the renewal was overridden. The API returns a NotEligible error if the user is not permitted to invoke the operation. Please contact aws-devicefarm-support@amazon.com if you believe that you should be able to invoke this operation.

    ", + "RenewOffering": "

    Explicitly sets the quantity of devices to renew for an offering, starting from the effectiveDate of the next period. The API returns a NotEligible error if the user is not permitted to invoke the operation. Please contact aws-devicefarm-support@amazon.com if you believe that you should be able to invoke this operation.

    ", + "ScheduleRun": "

    Schedules a run.

    ", + "StopRun": "

    Initiates a stop request for the current test run. AWS Device Farm will immediately stop the run on devices where tests have not started executing, and you will not be billed for these devices. On devices where tests have started executing, Setup Suite and Teardown Suite tests will run to completion before stopping execution on those devices. You will be billed for Setup, Teardown, and any tests that were in progress or already completed.

    ", + "UpdateDevicePool": "

    Modifies the name, description, and rules in a device pool given the attributes and the pool ARN. Rule updates are all-or-nothing, meaning they can only be updated as a whole (or not at all).

    ", + "UpdateProject": "

    Modifies the specified project name, given the project ARN and a new name.

    " + }, + "shapes": { + "AWSAccountNumber": { + "base": null, + "refs": { + "AccountSettings$awsAccountNumber": "

    The AWS account number specified in the AccountSettings container.

    " + } + }, + "AccountSettings": { + "base": "

    A container for account-level settings within AWS Device Farm.

    ", + "refs": { + "GetAccountSettingsResult$accountSettings": null + } + }, + "AmazonResourceName": { + "base": null, + "refs": { + "AmazonResourceNames$member": null, + "Artifact$arn": "

    The artifact's ARN.

    ", + "CreateDevicePoolRequest$projectArn": "

    The ARN of the project for the device pool.

    ", + "CreateUploadRequest$projectArn": "

    The ARN of the project for the upload.

    ", + "DeleteDevicePoolRequest$arn": "

    Represents the Amazon Resource Name (ARN) of the Device Farm device pool you wish to delete.

    ", + "DeleteProjectRequest$arn": "

    Represents the Amazon Resource Name (ARN) of the Device Farm project you wish to delete.

    ", + "DeleteRunRequest$arn": "

    The Amazon Resource Name (ARN) for the run you wish to delete.

    ", + "DeleteUploadRequest$arn": "

    Represents the Amazon Resource Name (ARN) of the Device Farm upload you wish to delete.

    ", + "Device$arn": "

    The device's ARN.

    ", + "DevicePool$arn": "

    The device pool's ARN.

    ", + "GetDevicePoolCompatibilityRequest$devicePoolArn": "

    The device pool's ARN.

    ", + "GetDevicePoolCompatibilityRequest$appArn": "

    The ARN of the app that is associated with the specified device pool.

    ", + "GetDevicePoolRequest$arn": "

    The device pool's ARN.

    ", + "GetDeviceRequest$arn": "

    The device type's ARN.

    ", + "GetJobRequest$arn": "

    The job's ARN.

    ", + "GetProjectRequest$arn": "

    The project's ARN.

    ", + "GetRunRequest$arn": "

    The run's ARN.

    ", + "GetSuiteRequest$arn": "

    The suite's ARN.

    ", + "GetTestRequest$arn": "

    The test's ARN.

    ", + "GetUploadRequest$arn": "

    The upload's ARN.

    ", + "Job$arn": "

    The job's ARN.

    ", + "ListArtifactsRequest$arn": "

    The Run, Job, Suite, or Test ARN.

    ", + "ListDevicePoolsRequest$arn": "

    The project ARN.

    ", + "ListDevicesRequest$arn": "

    The device types' ARNs.

    ", + "ListJobsRequest$arn": "

    The jobs' ARNs.

    ", + "ListProjectsRequest$arn": "

    The projects' ARNs.

    ", + "ListRunsRequest$arn": "

    The runs' ARNs.

    ", + "ListSamplesRequest$arn": "

    The samples' ARNs.

    ", + "ListSuitesRequest$arn": "

    The suites' ARNs.

    ", + "ListTestsRequest$arn": "

    The tests' ARNs.

    ", + "ListUniqueProblemsRequest$arn": "

    The unique problems' ARNs.

    ", + "ListUploadsRequest$arn": "

    The uploads' ARNs.

    ", + "ProblemDetail$arn": "

    The problem detail's ARN.

    ", + "Project$arn": "

    The project's ARN.

    ", + "Run$arn": "

    The run's ARN.

    ", + "Sample$arn": "

    The sample's ARN.

    ", + "ScheduleRunConfiguration$extraDataPackageArn": "

    The ARN of the extra data for the run. The extra data is a .zip file that AWS Device Farm will extract to external data for Android or the app's sandbox for iOS.

    ", + "ScheduleRunConfiguration$networkProfileArn": "

    Reserved for internal use.

    ", + "ScheduleRunRequest$projectArn": "

    The ARN of the project for the run to be scheduled.

    ", + "ScheduleRunRequest$appArn": "

    The ARN of the app to schedule a run.

    ", + "ScheduleRunRequest$devicePoolArn": "

    The ARN of the device pool for the run to be scheduled.

    ", + "ScheduleRunTest$testPackageArn": "

    The ARN of the uploaded test that will be run.

    ", + "StopRunRequest$arn": "

    Represents the Amazon Resource Name (ARN) of the Device Farm run you wish to stop.

    ", + "Suite$arn": "

    The suite's ARN.

    ", + "Test$arn": "

    The test's ARN.

    ", + "UpdateDevicePoolRequest$arn": "

    The Amazon Resourc Name (ARN) of the Device Farm device pool you wish to update.

    ", + "UpdateProjectRequest$arn": "

    The Amazon Resource Name (ARN) of the project whose name you wish to update.

    ", + "Upload$arn": "

    The upload's ARN.

    " + } + }, + "AmazonResourceNames": { + "base": null, + "refs": { + "ScheduleRunConfiguration$auxiliaryApps": "

    A list of auxiliary apps for the run.

    " + } + }, + "ArgumentException": { + "base": "

    An invalid argument was specified.

    ", + "refs": { + } + }, + "Artifact": { + "base": "

    Represents the output of a test. Examples of artifacts include logs and screenshots.

    ", + "refs": { + "Artifacts$member": null + } + }, + "ArtifactCategory": { + "base": null, + "refs": { + "ListArtifactsRequest$type": "

    The artifacts' type.

    Allowed values include:

    • FILE: The artifacts are files.
    • LOG: The artifacts are logs.
    • SCREENSHOT: The artifacts are screenshots.
    " + } + }, + "ArtifactType": { + "base": null, + "refs": { + "Artifact$type": "

    The artifact's type.

    Allowed values include the following:

    • UNKNOWN: An unknown type.

    • SCREENSHOT: The screenshot type.

    • DEVICE_LOG: The device log type.

    • MESSAGE_LOG: The message log type.

    • RESULT_LOG: The result log type.

    • SERVICE_LOG: The service log type.

    • WEBKIT_LOG: The web kit log type.

    • INSTRUMENTATION_OUTPUT: The instrumentation type.

    • EXERCISER_MONKEY_OUTPUT: For Android, the artifact (log) generated by an Android fuzz test.

    • CALABASH_JSON_OUTPUT: The Calabash JSON output type.

    • CALABASH_PRETTY_OUTPUT: The Calabash pretty output type.

    • CALABASH_STANDARD_OUTPUT: The Calabash standard output type.

    • CALABASH_JAVA_XML_OUTPUT: The Calabash Java XML output type.

    • AUTOMATION_OUTPUT: The automation output type.

    • APPIUM_SERVER_OUTPUT: The Appium server output type.

    • APPIUM_JAVA_OUTPUT: The Appium Java output type.

    • APPIUM_JAVA_XML_OUTPUT: The Appium Java XML output type.

    • APPIUM_PYTHON_OUTPUT: The Appium Python output type.

    • APPIUM_PYTHON_XML_OUTPUT: The Appium Python XML output type.

    • EXPLORER_EVENT_LOG: The Explorer event log output type.

    • EXPLORER_SUMMARY_LOG: The Explorer summary log output type.

    • APPLICATION_CRASH_REPORT: The application crash report output type.

    • XCTEST_LOG: The XCode test output type.

    " + } + }, + "Artifacts": { + "base": null, + "refs": { + "ListArtifactsResult$artifacts": "

    Information about the artifacts.

    " + } + }, + "BillingMethod": { + "base": null, + "refs": { + "Run$billingMethod": "

    Specifies the billing method for a test run: metered or unmetered. If the parameter is not specified, the default value is unmetered.

    ", + "ScheduleRunConfiguration$billingMethod": "

    Specifies the billing method for a test run: metered or unmetered. If the parameter is not specified, the default value is unmetered.

    " + } + }, + "Boolean": { + "base": null, + "refs": { + "DevicePoolCompatibilityResult$compatible": "

    Whether the result was compatible with the device pool.

    ", + "Radios$wifi": "

    True if Wi-Fi is enabled at the beginning of the test; otherwise, false.

    ", + "Radios$bluetooth": "

    True if Bluetooth is enabled at the beginning of the test; otherwise, false.

    ", + "Radios$nfc": "

    True if NFC is enabled at the beginning of the test; otherwise, false.

    ", + "Radios$gps": "

    True if GPS is enabled at the beginning of the test; otherwise, false.

    " + } + }, + "CPU": { + "base": "

    Represents the amount of CPU that an app is using on a physical device.

    Note that this does not represent system-wide CPU usage.

    ", + "refs": { + "Device$cpu": "

    Information about the device's CPU.

    " + } + }, + "ContentType": { + "base": null, + "refs": { + "CreateUploadRequest$contentType": "

    The upload's content type (for example, \"application/octet-stream\").

    ", + "Upload$contentType": "

    The upload's content type (for example, \"application/octet-stream\").

    " + } + }, + "Counters": { + "base": "

    Represents entity counters.

    ", + "refs": { + "Job$counters": "

    The job's result counters.

    ", + "Run$counters": "

    The run's result counters.

    ", + "Suite$counters": "

    The suite's result counters.

    ", + "Test$counters": "

    The test's result counters.

    " + } + }, + "CreateDevicePoolRequest": { + "base": "

    Represents a request to the create device pool operation.

    ", + "refs": { + } + }, + "CreateDevicePoolResult": { + "base": "

    Represents the result of a create device pool request.

    ", + "refs": { + } + }, + "CreateProjectRequest": { + "base": "

    Represents a request to the create project operation.

    ", + "refs": { + } + }, + "CreateProjectResult": { + "base": "

    Represents the result of a create project request.

    ", + "refs": { + } + }, + "CreateUploadRequest": { + "base": "

    Represents a request to the create upload operation.

    ", + "refs": { + } + }, + "CreateUploadResult": { + "base": "

    Represents the result of a create upload request.

    ", + "refs": { + } + }, + "CurrencyCode": { + "base": null, + "refs": { + "MonetaryAmount$currencyCode": "

    The currency code of a monetary amount. For example, USD means \"U.S. dollars.\"

    " + } + }, + "DateTime": { + "base": null, + "refs": { + "Job$created": "

    When the job was created.

    ", + "Job$started": "

    The job's start time.

    ", + "Job$stopped": "

    The job's stop time.

    ", + "OfferingStatus$effectiveOn": "

    The date on which the offering is effective.

    ", + "OfferingTransaction$createdOn": "

    The date on which an offering transaction was created.

    ", + "Project$created": "

    When the project was created.

    ", + "Run$created": "

    When the run was created.

    ", + "Run$started": "

    The run's start time.

    ", + "Run$stopped": "

    The run's stop time.

    ", + "Suite$created": "

    When the suite was created.

    ", + "Suite$started": "

    The suite's start time.

    ", + "Suite$stopped": "

    The suite's stop time.

    ", + "Test$created": "

    When the test was created.

    ", + "Test$started": "

    The test's start time.

    ", + "Test$stopped": "

    The test's stop time.

    ", + "Upload$created": "

    When the upload was created.

    " + } + }, + "DeleteDevicePoolRequest": { + "base": "

    Represents a request to the delete device pool operation.

    ", + "refs": { + } + }, + "DeleteDevicePoolResult": { + "base": "

    Represents the result of a delete device pool request.

    ", + "refs": { + } + }, + "DeleteProjectRequest": { + "base": "

    Represents a request to the delete project operation.

    ", + "refs": { + } + }, + "DeleteProjectResult": { + "base": "

    Represents the result of a delete project request.

    ", + "refs": { + } + }, + "DeleteRunRequest": { + "base": "

    Represents a request to the delete run operation.

    ", + "refs": { + } + }, + "DeleteRunResult": { + "base": "

    Represents the result of a delete run request.

    ", + "refs": { + } + }, + "DeleteUploadRequest": { + "base": "

    Represents a request to the delete upload operation.

    ", + "refs": { + } + }, + "DeleteUploadResult": { + "base": "

    Represents the result of a delete upload request.

    ", + "refs": { + } + }, + "Device": { + "base": "

    Represents a device type that an app is tested against.

    ", + "refs": { + "DevicePoolCompatibilityResult$device": null, + "Devices$member": null, + "GetDeviceResult$device": null, + "Job$device": null, + "Problem$device": "

    Information about the associated device.

    " + } + }, + "DeviceAttribute": { + "base": null, + "refs": { + "IncompatibilityMessage$type": "

    The type of incompatibility.

    Allowed values include:

    • ARN: The ARN.

    • FORM_FACTOR: The form factor (for example, phone or tablet).

    • MANUFACTURER: The manufacturer.

    • PLATFORM: The platform (for example, Android or iOS).

    ", + "Rule$attribute": "

    The rule's stringified attribute. For example, specify the value as \"\\\"abc\\\"\".

    Allowed values include:

    • ARN: The ARN.

    • FORM_FACTOR: The form factor (for example, phone or tablet).

    • MANUFACTURER: The manufacturer.

    • PLATFORM: The platform (for example, Android or iOS).

    " + } + }, + "DeviceFormFactor": { + "base": null, + "refs": { + "Device$formFactor": "

    The device's form factor.

    Allowed values include:

    • PHONE: The phone form factor.

    • TABLET: The tablet form factor.

    " + } + }, + "DeviceMinutes": { + "base": "

    Represents the total (metered or unmetered) minutes used by the resource to run tests. Contains the sum of minutes consumed by all children.

    ", + "refs": { + "Job$deviceMinutes": "

    Represents the total (metered or unmetered) minutes used by the job.

    ", + "Run$deviceMinutes": "

    Represents the total (metered or unmetered) minutes used by the test run.

    ", + "Suite$deviceMinutes": "

    Represents the total (metered or unmetered) minutes used by the test suite.

    ", + "Test$deviceMinutes": "

    Represents the total (metered or unmetered) minutes used by the test.

    " + } + }, + "DevicePlatform": { + "base": null, + "refs": { + "Device$platform": "

    The device's platform.

    Allowed values include:

    • ANDROID: The Android platform.

    • IOS: The iOS platform.

    ", + "Offering$platform": "

    The platform of the device (e.g., ANDROID or IOS).

    ", + "PurchasedDevicesMap$key": null, + "Run$platform": "

    The run's platform.

    Allowed values include:

    • ANDROID: The Android platform.

    • IOS: The iOS platform.

    " + } + }, + "DevicePool": { + "base": "

    Represents a collection of device types.

    ", + "refs": { + "CreateDevicePoolResult$devicePool": "

    The newly created device pool.

    ", + "DevicePools$member": null, + "GetDevicePoolResult$devicePool": null, + "UpdateDevicePoolResult$devicePool": null + } + }, + "DevicePoolCompatibilityResult": { + "base": "

    Represents a device pool compatibility result.

    ", + "refs": { + "DevicePoolCompatibilityResults$member": null + } + }, + "DevicePoolCompatibilityResults": { + "base": null, + "refs": { + "GetDevicePoolCompatibilityResult$compatibleDevices": "

    Information about compatible devices.

    ", + "GetDevicePoolCompatibilityResult$incompatibleDevices": "

    Information about incompatible devices.

    " + } + }, + "DevicePoolType": { + "base": null, + "refs": { + "DevicePool$type": "

    The device pool's type.

    Allowed values include:

    • CURATED: A device pool that is created and managed by AWS Device Farm.

    • PRIVATE: A device pool that is created and managed by the device pool developer.

    ", + "ListDevicePoolsRequest$type": "

    The device pools' type.

    Allowed values include:

    • CURATED: A device pool that is created and managed by AWS Device Farm.

    • PRIVATE: A device pool that is created and managed by the device pool developer.

    " + } + }, + "DevicePools": { + "base": null, + "refs": { + "ListDevicePoolsResult$devicePools": "

    Information about the device pools.

    " + } + }, + "Devices": { + "base": null, + "refs": { + "ListDevicesResult$devices": "

    Information about the devices.

    " + } + }, + "Double": { + "base": null, + "refs": { + "CPU$clock": "

    The clock speed of the device's CPU, expressed in hertz (Hz). For example, a 1.2 GHz CPU is expressed as 1200000000.

    ", + "DeviceMinutes$total": "

    When specified, represents the total minutes used by the resource to run tests.

    ", + "DeviceMinutes$metered": "

    When specified, represents only the sum of metered minutes used by the resource to run tests.

    ", + "DeviceMinutes$unmetered": "

    When specified, represents only the sum of unmetered minutes used by the resource to run tests.

    ", + "Location$latitude": "

    The latitude.

    ", + "Location$longitude": "

    The longitude.

    ", + "MonetaryAmount$amount": "

    The numerical amount of an offering or transaction.

    " + } + }, + "ExecutionResult": { + "base": null, + "refs": { + "Job$result": "

    The job's result.

    Allowed values include:

    • PENDING: A pending condition.

    • PASSED: A passing condition.

    • WARNED: A warning condition.

    • FAILED: A failed condition.

    • SKIPPED: A skipped condition.

    • ERRORED: An error condition.

    • STOPPED: A stopped condition.

    ", + "Problem$result": "

    The problem's result.

    Allowed values include:

    • PENDING: A pending condition.

    • PASSED: A passing condition.

    • WARNED: A warning condition.

    • FAILED: A failed condition.

    • SKIPPED: A skipped condition.

    • ERRORED: An error condition.

    • STOPPED: A stopped condition.

    ", + "Run$result": "

    The run's result.

    Allowed values include:

    • PENDING: A pending condition.

    • PASSED: A passing condition.

    • WARNED: A warning condition.

    • FAILED: A failed condition.

    • SKIPPED: A skipped condition.

    • ERRORED: An error condition.

    • STOPPED: A stopped condition.

    ", + "Suite$result": "

    The suite's result.

    Allowed values include:

    • PENDING: A pending condition.

    • PASSED: A passing condition.

    • WARNED: A warning condition.

    • FAILED: A failed condition.

    • SKIPPED: A skipped condition.

    • ERRORED: An error condition.

    • STOPPED: A stopped condition.

    ", + "Test$result": "

    The test's result.

    Allowed values include:

    • PENDING: A pending condition.

    • PASSED: A passing condition.

    • WARNED: A warning condition.

    • FAILED: A failed condition.

    • SKIPPED: A skipped condition.

    • ERRORED: An error condition.

    • STOPPED: A stopped condition.

    ", + "UniqueProblemsByExecutionResultMap$key": null + } + }, + "ExecutionStatus": { + "base": null, + "refs": { + "Job$status": "

    The job's status.

    Allowed values include:

    • PENDING: A pending status.

    • PENDING_CONCURRENCY: A pending concurrency status.

    • PENDING_DEVICE: A pending device status.

    • PROCESSING: A processing status.

    • SCHEDULING: A scheduling status.

    • PREPARING: A preparing status.

    • RUNNING: A running status.

    • COMPLETED: A completed status.

    • STOPPING: A stopping status.

    ", + "Run$status": "

    The run's status.

    Allowed values include:

    • PENDING: A pending status.

    • PENDING_CONCURRENCY: A pending concurrency status.

    • PENDING_DEVICE: A pending device status.

    • PROCESSING: A processing status.

    • SCHEDULING: A scheduling status.

    • PREPARING: A preparing status.

    • RUNNING: A running status.

    • COMPLETED: A completed status.

    • STOPPING: A stopping status.

    ", + "Suite$status": "

    The suite's status.

    Allowed values include:

    • PENDING: A pending status.

    • PENDING_CONCURRENCY: A pending concurrency status.

    • PENDING_DEVICE: A pending device status.

    • PROCESSING: A processing status.

    • SCHEDULING: A scheduling status.

    • PREPARING: A preparing status.

    • RUNNING: A running status.

    • COMPLETED: A completed status.

    • STOPPING: A stopping status.

    ", + "Test$status": "

    The test's status.

    Allowed values include:

    • PENDING: A pending status.

    • PENDING_CONCURRENCY: A pending concurrency status.

    • PENDING_DEVICE: A pending device status.

    • PROCESSING: A processing status.

    • SCHEDULING: A scheduling status.

    • PREPARING: A preparing status.

    • RUNNING: A running status.

    • COMPLETED: A completed status.

    • STOPPING: A stopping status.

    " + } + }, + "Filter": { + "base": null, + "refs": { + "ScheduleRunTest$filter": "

    The test's filter.

    " + } + }, + "GetAccountSettingsRequest": { + "base": "

    Represents the request sent to retrieve the account settings.

    ", + "refs": { + } + }, + "GetAccountSettingsResult": { + "base": "

    Represents the account settings return values from the GetAccountSettings request.

    ", + "refs": { + } + }, + "GetDevicePoolCompatibilityRequest": { + "base": "

    Represents a request to the get device pool compatibility operation.

    ", + "refs": { + } + }, + "GetDevicePoolCompatibilityResult": { + "base": "

    Represents the result of describe device pool compatibility request.

    ", + "refs": { + } + }, + "GetDevicePoolRequest": { + "base": "

    Represents a request to the get device pool operation.

    ", + "refs": { + } + }, + "GetDevicePoolResult": { + "base": "

    Represents the result of a get device pool request.

    ", + "refs": { + } + }, + "GetDeviceRequest": { + "base": "

    Represents a request to the get device request.

    ", + "refs": { + } + }, + "GetDeviceResult": { + "base": "

    Represents the result of a get device request.

    ", + "refs": { + } + }, + "GetJobRequest": { + "base": "

    Represents a request to the get job operation.

    ", + "refs": { + } + }, + "GetJobResult": { + "base": "

    Represents the result of a get job request.

    ", + "refs": { + } + }, + "GetOfferingStatusRequest": { + "base": "

    Represents the request to retrieve the offering status for the specified customer or account.

    ", + "refs": { + } + }, + "GetOfferingStatusResult": { + "base": "

    Returns the status result for a device offering.

    ", + "refs": { + } + }, + "GetProjectRequest": { + "base": "

    Represents a request to the get project operation.

    ", + "refs": { + } + }, + "GetProjectResult": { + "base": "

    Represents the result of a get project request.

    ", + "refs": { + } + }, + "GetRunRequest": { + "base": "

    Represents a request to the get run operation.

    ", + "refs": { + } + }, + "GetRunResult": { + "base": "

    Represents the result of a get run request.

    ", + "refs": { + } + }, + "GetSuiteRequest": { + "base": "

    Represents a request to the get suite operation.

    ", + "refs": { + } + }, + "GetSuiteResult": { + "base": "

    Represents the result of a get suite request.

    ", + "refs": { + } + }, + "GetTestRequest": { + "base": "

    Represents a request to the get test operation.

    ", + "refs": { + } + }, + "GetTestResult": { + "base": "

    Represents the result of a get test request.

    ", + "refs": { + } + }, + "GetUploadRequest": { + "base": "

    Represents a request to the get upload operation.

    ", + "refs": { + } + }, + "GetUploadResult": { + "base": "

    Represents the result of a get upload request.

    ", + "refs": { + } + }, + "IdempotencyException": { + "base": "

    An entity with the same name already exists.

    ", + "refs": { + } + }, + "IncompatibilityMessage": { + "base": "

    Represents information about incompatibility.

    ", + "refs": { + "IncompatibilityMessages$member": null + } + }, + "IncompatibilityMessages": { + "base": null, + "refs": { + "DevicePoolCompatibilityResult$incompatibilityMessages": "

    Information about the compatibility.

    " + } + }, + "Integer": { + "base": null, + "refs": { + "Counters$total": "

    The total number of entities.

    ", + "Counters$passed": "

    The number of passed entities.

    ", + "Counters$failed": "

    The number of failed entities.

    ", + "Counters$warned": "

    The number of warned entities.

    ", + "Counters$errored": "

    The number of errored entities.

    ", + "Counters$stopped": "

    The number of stopped entities.

    ", + "Counters$skipped": "

    The number of skipped entities.

    ", + "OfferingStatus$quantity": "

    The number of available devices in the offering.

    ", + "PurchaseOfferingRequest$quantity": "

    The number of device slots you wish to purchase in an offering request.

    ", + "PurchasedDevicesMap$value": null, + "RenewOfferingRequest$quantity": "

    The quantity requested in an offering renewal.

    ", + "Resolution$width": "

    The screen resolution's width, expressed in pixels.

    ", + "Resolution$height": "

    The screen resolution's height, expressed in pixels.

    ", + "Run$totalJobs": "

    The total number of jobs for the run.

    ", + "Run$completedJobs": "

    The total number of completed jobs.

    " + } + }, + "Job": { + "base": "

    Represents a device.

    ", + "refs": { + "GetJobResult$job": null, + "Jobs$member": null + } + }, + "Jobs": { + "base": null, + "refs": { + "ListJobsResult$jobs": "

    Information about the jobs.

    " + } + }, + "LimitExceededException": { + "base": "

    A limit was exceeded.

    ", + "refs": { + } + }, + "ListArtifactsRequest": { + "base": "

    Represents a request to the list artifacts operation.

    ", + "refs": { + } + }, + "ListArtifactsResult": { + "base": "

    Represents the result of a list artifacts operation.

    ", + "refs": { + } + }, + "ListDevicePoolsRequest": { + "base": "

    Represents the result of a list device pools request.

    ", + "refs": { + } + }, + "ListDevicePoolsResult": { + "base": "

    Represents the result of a list device pools request.

    ", + "refs": { + } + }, + "ListDevicesRequest": { + "base": "

    Represents the result of a list devices request.

    ", + "refs": { + } + }, + "ListDevicesResult": { + "base": "

    Represents the result of a list devices operation.

    ", + "refs": { + } + }, + "ListJobsRequest": { + "base": "

    Represents a request to the list jobs operation.

    ", + "refs": { + } + }, + "ListJobsResult": { + "base": "

    Represents the result of a list jobs request.

    ", + "refs": { + } + }, + "ListOfferingTransactionsRequest": { + "base": "

    Represents the request to list the offering transaction history.

    ", + "refs": { + } + }, + "ListOfferingTransactionsResult": { + "base": "

    Returns the transaction log of the specified offerings.

    ", + "refs": { + } + }, + "ListOfferingsRequest": { + "base": "

    Represents the request to list all offerings.

    ", + "refs": { + } + }, + "ListOfferingsResult": { + "base": "

    Represents the return values of the list of offerings.

    ", + "refs": { + } + }, + "ListProjectsRequest": { + "base": "

    Represents a request to the list projects operation.

    ", + "refs": { + } + }, + "ListProjectsResult": { + "base": "

    Represents the result of a list projects request.

    ", + "refs": { + } + }, + "ListRunsRequest": { + "base": "

    Represents a request to the list runs operation.

    ", + "refs": { + } + }, + "ListRunsResult": { + "base": "

    Represents the result of a list runs request.

    ", + "refs": { + } + }, + "ListSamplesRequest": { + "base": "

    Represents a request to the list samples operation.

    ", + "refs": { + } + }, + "ListSamplesResult": { + "base": "

    Represents the result of a list samples request.

    ", + "refs": { + } + }, + "ListSuitesRequest": { + "base": "

    Represents a request to the list suites operation.

    ", + "refs": { + } + }, + "ListSuitesResult": { + "base": "

    Represents the result of a list suites request.

    ", + "refs": { + } + }, + "ListTestsRequest": { + "base": "

    Represents a request to the list tests operation.

    ", + "refs": { + } + }, + "ListTestsResult": { + "base": "

    Represents the result of a list tests request.

    ", + "refs": { + } + }, + "ListUniqueProblemsRequest": { + "base": "

    Represents a request to the list unique problems operation.

    ", + "refs": { + } + }, + "ListUniqueProblemsResult": { + "base": "

    Represents the result of a list unique problems request.

    ", + "refs": { + } + }, + "ListUploadsRequest": { + "base": "

    Represents a request to the list uploads operation.

    ", + "refs": { + } + }, + "ListUploadsResult": { + "base": "

    Represents the result of a list uploads request.

    ", + "refs": { + } + }, + "Location": { + "base": "

    Represents a latitude and longitude pair, expressed in geographic coordinate system degrees (for example 47.6204, -122.3491).

    Elevation is currently not supported.

    ", + "refs": { + "ScheduleRunConfiguration$location": "

    Information about the location that is used for the run.

    " + } + }, + "Long": { + "base": null, + "refs": { + "Device$heapSize": "

    The device's heap size, expressed in bytes.

    ", + "Device$memory": "

    The device's total memory size, expressed in bytes.

    " + } + }, + "Message": { + "base": null, + "refs": { + "ArgumentException$message": "

    Any additional information about the exception.

    ", + "CreateDevicePoolRequest$description": "

    The device pool's description.

    ", + "DevicePool$description": "

    The device pool's description.

    ", + "IdempotencyException$message": "

    Any additional information about the exception.

    ", + "IncompatibilityMessage$message": "

    A message about the incompatibility.

    ", + "Job$message": "

    A message about the job's result.

    ", + "LimitExceededException$message": "

    Any additional information about the exception.

    ", + "NotEligibleException$message": "

    The HTTP response code of a Not Eligible exception.

    ", + "NotFoundException$message": "

    Any additional information about the exception.

    ", + "Offering$description": "

    A string describing the offering.

    ", + "Problem$message": "

    A message about the problem's result.

    ", + "Run$message": "

    A message about the run's result.

    ", + "ServiceAccountException$message": "

    Any additional information about the exception.

    ", + "Suite$message": "

    A message about the suite's result.

    ", + "Test$message": "

    A message about the test's result.

    ", + "UniqueProblem$message": "

    A message about the unique problems' result.

    ", + "UpdateDevicePoolRequest$description": "

    A description of the device pool you wish to update.

    ", + "Upload$message": "

    A message about the upload's result.

    " + } + }, + "Metadata": { + "base": null, + "refs": { + "Upload$metadata": "

    The upload's metadata. For example, for Android, this contains information that is parsed from the manifest and is displayed in the AWS Device Farm console after the associated app is uploaded.

    " + } + }, + "MonetaryAmount": { + "base": "

    A number representing the monetary amount for an offering or transaction.

    ", + "refs": { + "OfferingTransaction$cost": "

    The cost of an offering transaction.

    ", + "RecurringCharge$cost": "

    The cost of the recurring charge.

    " + } + }, + "Name": { + "base": null, + "refs": { + "Artifact$name": "

    The artifact's name.

    ", + "CreateDevicePoolRequest$name": "

    The device pool's name.

    ", + "CreateProjectRequest$name": "

    The project's name.

    ", + "CreateUploadRequest$name": "

    The upload's file name.

    ", + "Device$name": "

    The device's display name.

    ", + "DevicePool$name": "

    The device pool's name.

    ", + "Job$name": "

    The job's name.

    ", + "ProblemDetail$name": "

    The problem detail's name.

    ", + "Project$name": "

    The project's name.

    ", + "Run$name": "

    The run's name.

    ", + "ScheduleRunRequest$name": "

    The name for the run to be scheduled.

    ", + "Suite$name": "

    The suite's name.

    ", + "Test$name": "

    The test's name.

    ", + "UpdateDevicePoolRequest$name": "

    A string representing the name of the device pool you wish to update.

    ", + "UpdateProjectRequest$name": "

    A string representing the new name of the project that you are updating.

    ", + "Upload$name": "

    The upload's file name.

    " + } + }, + "NotEligibleException": { + "base": "

    Exception gets thrown when a user is not eligible to perform the specified transaction.

    ", + "refs": { + } + }, + "NotFoundException": { + "base": "

    The specified entity was not found.

    ", + "refs": { + } + }, + "Offering": { + "base": "

    Represents the metadata of a device offering.

    ", + "refs": { + "OfferingStatus$offering": "

    Represents the metadata of an offering status.

    ", + "Offerings$member": null + } + }, + "OfferingIdentifier": { + "base": null, + "refs": { + "Offering$id": "

    The ID that corresponds to a device offering.

    ", + "OfferingStatusMap$key": null, + "PurchaseOfferingRequest$offeringId": "

    The ID of the offering.

    ", + "RenewOfferingRequest$offeringId": "

    The ID of a request to renew an offering.

    " + } + }, + "OfferingStatus": { + "base": "

    The status of the offering.

    ", + "refs": { + "OfferingStatusMap$value": null, + "OfferingTransaction$offeringStatus": "

    The status of an offering transaction.

    " + } + }, + "OfferingStatusMap": { + "base": null, + "refs": { + "GetOfferingStatusResult$current": "

    When specified, gets the offering status for the current period.

    ", + "GetOfferingStatusResult$nextPeriod": "

    When specified, gets the offering status for the next period.

    " + } + }, + "OfferingTransaction": { + "base": "

    Represents the metadata of an offering transaction.

    ", + "refs": { + "OfferingTransactions$member": null, + "PurchaseOfferingResult$offeringTransaction": "

    Represents the offering transaction for the purchase result.

    ", + "RenewOfferingResult$offeringTransaction": "

    Represents the status of the offering transaction for the renewal.

    " + } + }, + "OfferingTransactionType": { + "base": null, + "refs": { + "OfferingStatus$type": "

    The type specified for the offering status.

    " + } + }, + "OfferingTransactions": { + "base": null, + "refs": { + "ListOfferingTransactionsResult$offeringTransactions": "

    The audit log of subscriptions you have purchased and modified through AWS Device Farm.

    " + } + }, + "OfferingType": { + "base": null, + "refs": { + "Offering$type": "

    The type of offering (e.g., \"RECURRING\") for a device.

    " + } + }, + "Offerings": { + "base": null, + "refs": { + "ListOfferingsResult$offerings": "

    A value representing the list offering results.

    " + } + }, + "PaginationToken": { + "base": null, + "refs": { + "GetOfferingStatusRequest$nextToken": "

    An identifier that was returned from the previous call to this operation, which can be used to return the next set of items in the list.

    ", + "GetOfferingStatusResult$nextToken": "

    An identifier that was returned from the previous call to this operation, which can be used to return the next set of items in the list.

    ", + "ListArtifactsRequest$nextToken": "

    An identifier that was returned from the previous call to this operation, which can be used to return the next set of items in the list.

    ", + "ListArtifactsResult$nextToken": "

    If the number of items that are returned is significantly large, this is an identifier that is also returned, which can be used in a subsequent call to this operation to return the next set of items in the list.

    ", + "ListDevicePoolsRequest$nextToken": "

    An identifier that was returned from the previous call to this operation, which can be used to return the next set of items in the list.

    ", + "ListDevicePoolsResult$nextToken": "

    If the number of items that are returned is significantly large, this is an identifier that is also returned, which can be used in a subsequent call to this operation to return the next set of items in the list.

    ", + "ListDevicesRequest$nextToken": "

    An identifier that was returned from the previous call to this operation, which can be used to return the next set of items in the list.

    ", + "ListDevicesResult$nextToken": "

    If the number of items that are returned is significantly large, this is an identifier that is also returned, which can be used in a subsequent call to this operation to return the next set of items in the list.

    ", + "ListJobsRequest$nextToken": "

    An identifier that was returned from the previous call to this operation, which can be used to return the next set of items in the list.

    ", + "ListJobsResult$nextToken": "

    If the number of items that are returned is significantly large, this is an identifier that is also returned, which can be used in a subsequent call to this operation to return the next set of items in the list.

    ", + "ListOfferingTransactionsRequest$nextToken": "

    An identifier that was returned from the previous call to this operation, which can be used to return the next set of items in the list.

    ", + "ListOfferingTransactionsResult$nextToken": "

    An identifier that was returned from the previous call to this operation, which can be used to return the next set of items in the list.

    ", + "ListOfferingsRequest$nextToken": "

    An identifier that was returned from the previous call to this operation, which can be used to return the next set of items in the list.

    ", + "ListOfferingsResult$nextToken": "

    An identifier that was returned from the previous call to this operation, which can be used to return the next set of items in the list.

    ", + "ListProjectsRequest$nextToken": "

    An identifier that was returned from the previous call to this operation, which can be used to return the next set of items in the list.

    ", + "ListProjectsResult$nextToken": "

    If the number of items that are returned is significantly large, this is an identifier that is also returned, which can be used in a subsequent call to this operation to return the next set of items in the list.

    ", + "ListRunsRequest$nextToken": "

    An identifier that was returned from the previous call to this operation, which can be used to return the next set of items in the list.

    ", + "ListRunsResult$nextToken": "

    If the number of items that are returned is significantly large, this is an identifier that is also returned, which can be used in a subsequent call to this operation to return the next set of items in the list.

    ", + "ListSamplesRequest$nextToken": "

    An identifier that was returned from the previous call to this operation, which can be used to return the next set of items in the list.

    ", + "ListSamplesResult$nextToken": "

    If the number of items that are returned is significantly large, this is an identifier that is also returned, which can be used in a subsequent call to this operation to return the next set of items in the list.

    ", + "ListSuitesRequest$nextToken": "

    An identifier that was returned from the previous call to this operation, which can be used to return the next set of items in the list.

    ", + "ListSuitesResult$nextToken": "

    If the number of items that are returned is significantly large, this is an identifier that is also returned, which can be used in a subsequent call to this operation to return the next set of items in the list.

    ", + "ListTestsRequest$nextToken": "

    An identifier that was returned from the previous call to this operation, which can be used to return the next set of items in the list.

    ", + "ListTestsResult$nextToken": "

    If the number of items that are returned is significantly large, this is an identifier that is also returned, which can be used in a subsequent call to this operation to return the next set of items in the list.

    ", + "ListUniqueProblemsRequest$nextToken": "

    An identifier that was returned from the previous call to this operation, which can be used to return the next set of items in the list.

    ", + "ListUniqueProblemsResult$nextToken": "

    If the number of items that are returned is significantly large, this is an identifier that is also returned, which can be used in a subsequent call to this operation to return the next set of items in the list.

    ", + "ListUploadsRequest$nextToken": "

    An identifier that was returned from the previous call to this operation, which can be used to return the next set of items in the list.

    ", + "ListUploadsResult$nextToken": "

    If the number of items that are returned is significantly large, this is an identifier that is also returned, which can be used in a subsequent call to this operation to return the next set of items in the list.

    " + } + }, + "Problem": { + "base": "

    Represents a specific warning or failure.

    ", + "refs": { + "Problems$member": null + } + }, + "ProblemDetail": { + "base": "

    Information about a problem detail.

    ", + "refs": { + "Problem$run": "

    Information about the associated run.

    ", + "Problem$job": "

    Information about the associated job.

    ", + "Problem$suite": "

    Information about the associated suite.

    ", + "Problem$test": "

    Information about the associated test.

    " + } + }, + "Problems": { + "base": null, + "refs": { + "UniqueProblem$problems": "

    Information about the problems.

    " + } + }, + "Project": { + "base": "

    Represents an operating-system neutral workspace for running and managing tests.

    ", + "refs": { + "CreateProjectResult$project": "

    The newly created project.

    ", + "GetProjectResult$project": null, + "Projects$member": null, + "UpdateProjectResult$project": null + } + }, + "Projects": { + "base": null, + "refs": { + "ListProjectsResult$projects": "

    Information about the projects.

    " + } + }, + "PurchaseOfferingRequest": { + "base": "

    Represents a request for a purchase offering.

    ", + "refs": { + } + }, + "PurchaseOfferingResult": { + "base": "

    The result of the purchase offering (e.g., success or failure).

    ", + "refs": { + } + }, + "PurchasedDevicesMap": { + "base": null, + "refs": { + "AccountSettings$unmeteredDevices": "

    Returns the unmetered devices you have purchased or want to purchase.

    ", + "AccountSettings$unmeteredRemoteAccessDevices": "

    Returns the unmetered remote access devices you have purchased or want to purchase.

    " + } + }, + "Radios": { + "base": "

    Represents the set of radios and their states on a device. Examples of radios include Wi-Fi, GPS, Bluetooth, and NFC.

    ", + "refs": { + "ScheduleRunConfiguration$radios": "

    Information about the radio states for the run.

    " + } + }, + "RecurringCharge": { + "base": "

    Specifies whether charges for devices will be recurring.

    ", + "refs": { + "RecurringCharges$member": null + } + }, + "RecurringChargeFrequency": { + "base": null, + "refs": { + "RecurringCharge$frequency": "

    The frequency in which charges will recur.

    " + } + }, + "RecurringCharges": { + "base": null, + "refs": { + "Offering$recurringCharges": "

    Specifies whether there are recurring charges for the offering.

    " + } + }, + "RenewOfferingRequest": { + "base": "

    A request representing an offering renewal.

    ", + "refs": { + } + }, + "RenewOfferingResult": { + "base": "

    The result of a renewal offering.

    ", + "refs": { + } + }, + "Resolution": { + "base": "

    Represents the screen resolution of a device in height and width, expressed in pixels.

    ", + "refs": { + "Device$resolution": null + } + }, + "Rule": { + "base": "

    Represents a condition for a device pool.

    ", + "refs": { + "Rules$member": null + } + }, + "RuleOperator": { + "base": null, + "refs": { + "Rule$operator": "

    The rule's operator.

    • EQUALS: The equals operator.

    • GREATER_THAN: The greater-than operator.

    • IN: The in operator.

    • LESS_THAN: The less-than operator.

    • NOT_IN: The not-in operator.

    " + } + }, + "Rules": { + "base": null, + "refs": { + "CreateDevicePoolRequest$rules": "

    The device pool's rules.

    ", + "DevicePool$rules": "

    Information about the device pool's rules.

    ", + "UpdateDevicePoolRequest$rules": "

    Represents the rules you wish to modify for the device pool. Updating rules is optional; however, if you choose to update rules for your request, the update will replace the existing rules.

    " + } + }, + "Run": { + "base": "

    Represents an app on a set of devices with a specific test and configuration.

    ", + "refs": { + "GetRunResult$run": null, + "Runs$member": null, + "ScheduleRunResult$run": "

    Information about the scheduled run.

    ", + "StopRunResult$run": null + } + }, + "Runs": { + "base": null, + "refs": { + "ListRunsResult$runs": "

    Information about the runs.

    " + } + }, + "Sample": { + "base": "

    Represents a sample of performance data.

    ", + "refs": { + "Samples$member": null + } + }, + "SampleType": { + "base": null, + "refs": { + "Sample$type": "

    The sample's type.

    Must be one of the following values:

    • CPU: A CPU sample type. This is expressed as the app processing CPU time (including child processes) as reported by process, as a percentage.

    • MEMORY: A memory usage sample type. This is expressed as the total proportional set size of an app process, in kilobytes.

    • NATIVE_AVG_DRAWTIME

    • NATIVE_FPS

    • NATIVE_FRAMES

    • NATIVE_MAX_DRAWTIME

    • NATIVE_MIN_DRAWTIME

    • OPENGL_AVG_DRAWTIME

    • OPENGL_FPS

    • OPENGL_FRAMES

    • OPENGL_MAX_DRAWTIME

    • OPENGL_MIN_DRAWTIME

    • RX

    • RX_RATE: The total number of bytes per second (TCP and UDP) that are sent, by app process.

    • THREADS: A threads sample type. This is expressed as the total number of threads per app process.

    • TX

    • TX_RATE: The total number of bytes per second (TCP and UDP) that are received, by app process.

    " + } + }, + "Samples": { + "base": null, + "refs": { + "ListSamplesResult$samples": "

    Information about the samples.

    " + } + }, + "ScheduleRunConfiguration": { + "base": "

    Represents the settings for a run. Includes things like location, radio states, auxiliary apps, and network profiles.

    ", + "refs": { + "ScheduleRunRequest$configuration": "

    Information about the settings for the run to be scheduled.

    " + } + }, + "ScheduleRunRequest": { + "base": "

    Represents a request to the schedule run operation.

    ", + "refs": { + } + }, + "ScheduleRunResult": { + "base": "

    Represents the result of a schedule run request.

    ", + "refs": { + } + }, + "ScheduleRunTest": { + "base": "

    Represents additional test settings.

    ", + "refs": { + "ScheduleRunRequest$test": "

    Information about the test for the run to be scheduled.

    " + } + }, + "ServiceAccountException": { + "base": "

    There was a problem with the service account.

    ", + "refs": { + } + }, + "StopRunRequest": { + "base": "

    Represents the request to stop a specific run.

    ", + "refs": { + } + }, + "StopRunResult": { + "base": "

    Represents the results of your stop run attempt.

    ", + "refs": { + } + }, + "String": { + "base": null, + "refs": { + "Artifact$extension": "

    The artifact's file extension.

    ", + "CPU$frequency": "

    The CPU's frequency.

    ", + "CPU$architecture": "

    The CPU's architecture, for example x86 or ARM.

    ", + "Device$manufacturer": "

    The device's manufacturer name.

    ", + "Device$model": "

    The device's model name.

    ", + "Device$os": "

    The device's operating system type.

    ", + "Device$image": "

    The device's image name.

    ", + "Device$carrier": "

    The device's carrier.

    ", + "Device$radio": "

    The device's radio.

    ", + "Rule$value": "

    The rule's value.

    ", + "ScheduleRunConfiguration$locale": "

    Information about the locale that is used for the run.

    ", + "TestParameters$key": null, + "TestParameters$value": null + } + }, + "Suite": { + "base": "

    Represents a collection of one or more tests.

    ", + "refs": { + "GetSuiteResult$suite": null, + "Suites$member": null + } + }, + "Suites": { + "base": null, + "refs": { + "ListSuitesResult$suites": "

    Information about the suites.

    " + } + }, + "Test": { + "base": "

    Represents a condition that is evaluated.

    ", + "refs": { + "GetTestResult$test": null, + "Tests$member": null + } + }, + "TestParameters": { + "base": null, + "refs": { + "ScheduleRunTest$parameters": "

    The test's parameters, such as test framework parameters and fixture settings.

    " + } + }, + "TestType": { + "base": null, + "refs": { + "GetDevicePoolCompatibilityRequest$testType": "

    The test type for the specified device pool.

    Allowed values include the following:

    • BUILTIN_FUZZ: The built-in fuzz type.

    • BUILTIN_EXPLORER: For Android, an app explorer that will traverse an Android app, interacting with it and capturing screenshots at the same time.

    • APPIUM_JAVA_JUNIT: The Appium Java JUnit type.

    • APPIUM_JAVA_TESTNG: The Appium Java TestNG type.

    • APPIUM_PYTHON: The Appium Python type.

    • APPIUM_WEB_JAVA_JUNIT: The Appium Java JUnit type for Web apps.

    • APPIUM_WEB_JAVA_TESTNG: The Appium Java TestNG type for Web apps.

    • APPIUM_WEB_PYTHON: The Appium Python type for Web apps.

    • CALABASH: The Calabash type.

    • INSTRUMENTATION: The Instrumentation type.

    • UIAUTOMATION: The uiautomation type.

    • UIAUTOMATOR: The uiautomator type.

    • XCTEST: The XCode test type.

    • XCTEST_UI: The XCode UI test type.

    ", + "Job$type": "

    The job's type.

    Allowed values include the following:

    • BUILTIN_FUZZ: The built-in fuzz type.

    • BUILTIN_EXPLORER: For Android, an app explorer that will traverse an Android app, interacting with it and capturing screenshots at the same time.

    • APPIUM_JAVA_JUNIT: The Appium Java JUnit type.

    • APPIUM_JAVA_TESTNG: The Appium Java TestNG type.

    • APPIUM_PYTHON: The Appium Python type.

    • APPIUM_WEB_JAVA_JUNIT: The Appium Java JUnit type for Web apps.

    • APPIUM_WEB_JAVA_TESTNG: The Appium Java TestNG type for Web apps.

    • APPIUM_WEB_PYTHON: The Appium Python type for Web apps.

    • CALABASH: The Calabash type.

    • INSTRUMENTATION: The Instrumentation type.

    • UIAUTOMATION: The uiautomation type.

    • UIAUTOMATOR: The uiautomator type.

    • XCTEST: The XCode test type.

    • XCTEST_UI: The XCode UI test type.

    ", + "Run$type": "

    The run's type.

    Must be one of the following values:

    • BUILTIN_FUZZ: The built-in fuzz type.

    • BUILTIN_EXPLORER: For Android, an app explorer that will traverse an Android app, interacting with it and capturing screenshots at the same time.

    • APPIUM_JAVA_JUNIT: The Appium Java JUnit type.

    • APPIUM_JAVA_TESTNG: The Appium Java TestNG type.

    • APPIUM_PYTHON: The Appium Python type.

    • APPIUM_WEB_JAVA_JUNIT: The Appium Java JUnit type for Web apps.

    • APPIUM_WEB_JAVA_TESTNG: The Appium Java TestNG type for Web apps.

    • APPIUM_WEB_PYTHON: The Appium Python type for Web apps.

    • CALABASH: The Calabash type.

    • INSTRUMENTATION: The Instrumentation type.

    • UIAUTOMATION: The uiautomation type.

    • UIAUTOMATOR: The uiautomator type.

    • XCTEST: The XCode test type.

    • XCTEST_UI: The XCode UI test type.

    ", + "ScheduleRunTest$type": "

    The test's type.

    Must be one of the following values:

    • BUILTIN_FUZZ: The built-in fuzz type.

    • BUILTIN_EXPLORER: For Android, an app explorer that will traverse an Android app, interacting with it and capturing screenshots at the same time.

    • APPIUM_JAVA_JUNIT: The Appium Java JUnit type.

    • APPIUM_JAVA_TESTNG: The Appium Java TestNG type.

    • APPIUM_PYTHON: The Appium Python type.

    • APPIUM_WEB_JAVA_JUNIT: The Appium Java JUnit type for Web apps.

    • APPIUM_WEB_JAVA_TESTNG: The Appium Java TestNG type for Web apps.

    • APPIUM_WEB_PYTHON: The Appium Python type for Web apps.

    • CALABASH: The Calabash type.

    • INSTRUMENTATION: The Instrumentation type.

    • UIAUTOMATION: The uiautomation type.

    • UIAUTOMATOR: The uiautomator type.

    • XCTEST: The XCode test type.

    • XCTEST_UI: The XCode UI test type.

    ", + "Suite$type": "

    The suite's type.

    Must be one of the following values:

    • BUILTIN_FUZZ: The built-in fuzz type.

    • BUILTIN_EXPLORER: For Android, an app explorer that will traverse an Android app, interacting with it and capturing screenshots at the same time.

    • APPIUM_JAVA_JUNIT: The Appium Java JUnit type.

    • APPIUM_JAVA_TESTNG: The Appium Java TestNG type.

    • APPIUM_PYTHON: The Appium Python type.

    • APPIUM_WEB_JAVA_JUNIT: The Appium Java JUnit type for Web apps.

    • APPIUM_WEB_JAVA_TESTNG: The Appium Java TestNG type for Web apps.

    • APPIUM_WEB_PYTHON: The Appium Python type for Web apps.

    • CALABASH: The Calabash type.

    • INSTRUMENTATION: The Instrumentation type.

    • UIAUTOMATION: The uiautomation type.

    • UIAUTOMATOR: The uiautomator type.

    • XCTEST: The XCode test type.

    • XCTEST_UI: The XCode UI test type.

    ", + "Test$type": "

    The test's type.

    Must be one of the following values:

    • BUILTIN_FUZZ: The built-in fuzz type.

    • BUILTIN_EXPLORER: For Android, an app explorer that will traverse an Android app, interacting with it and capturing screenshots at the same time.

    • APPIUM_JAVA_JUNIT: The Appium Java JUnit type.

    • APPIUM_JAVA_TESTNG: The Appium Java TestNG type.

    • APPIUM_PYTHON: The Appium Python type.

    • APPIUM_WEB_JAVA_JUNIT: The Appium Java JUnit type for Web apps.

    • APPIUM_WEB_JAVA_TESTNG: The Appium Java TestNG type for Web apps.

    • APPIUM_WEB_PYTHON: The Appium Python type for Web apps.

    • CALABASH: The Calabash type.

    • INSTRUMENTATION: The Instrumentation type.

    • UIAUTOMATION: The uiautomation type.

    • UIAUTOMATOR: The uiautomator type.

    • XCTEST: The XCode test type.

    • XCTEST_UI: The XCode UI test type.

    " + } + }, + "Tests": { + "base": null, + "refs": { + "ListTestsResult$tests": "

    Information about the tests.

    " + } + }, + "TransactionIdentifier": { + "base": null, + "refs": { + "OfferingTransaction$transactionId": "

    The transaction ID of the offering transaction.

    " + } + }, + "URL": { + "base": null, + "refs": { + "Artifact$url": "

    The pre-signed Amazon S3 URL that can be used with a corresponding GET request to download the artifact's file.

    ", + "Sample$url": "

    The pre-signed Amazon S3 URL that can be used with a corresponding GET request to download the sample's file.

    ", + "Upload$url": "

    The pre-signed Amazon S3 URL that was used to store a file through a corresponding PUT request.

    " + } + }, + "UniqueProblem": { + "base": "

    A collection of one or more problems, grouped by their result.

    ", + "refs": { + "UniqueProblems$member": null + } + }, + "UniqueProblems": { + "base": null, + "refs": { + "UniqueProblemsByExecutionResultMap$value": null + } + }, + "UniqueProblemsByExecutionResultMap": { + "base": null, + "refs": { + "ListUniqueProblemsResult$uniqueProblems": "

    Information about the unique problems.

    Allowed values include:

    • PENDING: A pending condition.

    • PASSED: A passing condition.

    • WARNED: A warning condition.

    • FAILED: A failed condition.

    • SKIPPED: A skipped condition.

    • ERRORED: An error condition.

    • STOPPED: A stopped condition.

    " + } + }, + "UpdateDevicePoolRequest": { + "base": "

    Represents a request to the update device pool operation.

    ", + "refs": { + } + }, + "UpdateDevicePoolResult": { + "base": "

    Represents the result of an update device pool request.

    ", + "refs": { + } + }, + "UpdateProjectRequest": { + "base": "

    Represents a request to the update project operation.

    ", + "refs": { + } + }, + "UpdateProjectResult": { + "base": "

    Represents the result of an update project request.

    ", + "refs": { + } + }, + "Upload": { + "base": "

    An app or a set of one or more tests to upload or that have been uploaded.

    ", + "refs": { + "CreateUploadResult$upload": "

    The newly created upload.

    ", + "GetUploadResult$upload": null, + "Uploads$member": null + } + }, + "UploadStatus": { + "base": null, + "refs": { + "Upload$status": "

    The upload's status.

    Must be one of the following values:

    • FAILED: A failed status.

    • INITIALIZED: An initialized status.

    • PROCESSING: A processing status.

    • SUCCEEDED: A succeeded status.

    " + } + }, + "UploadType": { + "base": null, + "refs": { + "CreateUploadRequest$type": "

    The upload's upload type.

    Must be one of the following values:

    • ANDROID_APP: An Android upload.

    • IOS_APP: An iOS upload.

    • WEB_APP: A web appliction upload.

    • EXTERNAL_DATA: An external data upload.

    • APPIUM_JAVA_JUNIT_TEST_PACKAGE: An Appium Java JUnit test package upload.

    • APPIUM_JAVA_TESTNG_TEST_PACKAGE: An Appium Java TestNG test package upload.

    • APPIUM_PYTHON_TEST_PACKAGE: An Appium Python test package upload.

    • APPIUM_WEB_JAVA_JUNIT_TEST_PACKAGE: An Appium Java JUnit test package upload.

    • APPIUM_WEB_JAVA_TESTNG_TEST_PACKAGE: An Appium Java TestNG test package upload.

    • APPIUM_WEB_PYTHON_TEST_PACKAGE: An Appium Python test package upload.

    • CALABASH_TEST_PACKAGE: A Calabash test package upload.

    • INSTRUMENTATION_TEST_PACKAGE: An instrumentation upload.

    • UIAUTOMATION_TEST_PACKAGE: A uiautomation test package upload.

    • UIAUTOMATOR_TEST_PACKAGE: A uiautomator test package upload.

    • XCTEST_TEST_PACKAGE: An XCode test package upload.

    • XCTEST_UI_TEST_PACKAGE: An XCode UI test package upload.

    Note If you call CreateUpload with WEB_APP specified, AWS Device Farm throws an ArgumentException error.

    ", + "Upload$type": "

    The upload's type.

    Must be one of the following values:

    • ANDROID_APP: An Android upload.

    • IOS_APP: An iOS upload.

    • WEB_APP: A web appliction upload.

    • EXTERNAL_DATA: An external data upload.

    • APPIUM_JAVA_JUNIT_TEST_PACKAGE: An Appium Java JUnit test package upload.

    • APPIUM_JAVA_TESTNG_TEST_PACKAGE: An Appium Java TestNG test package upload.

    • APPIUM_PYTHON_TEST_PACKAGE: An Appium Python test package upload.

    • APPIUM_WEB_JAVA_JUNIT_TEST_PACKAGE: An Appium Java JUnit test package upload.

    • APPIUM_WEB_JAVA_TESTNG_TEST_PACKAGE: An Appium Java TestNG test package upload.

    • APPIUM_WEB_PYTHON_TEST_PACKAGE: An Appium Python test package upload.

    • CALABASH_TEST_PACKAGE: A Calabash test package upload.

    • INSTRUMENTATION_TEST_PACKAGE: An instrumentation upload.

    • UIAUTOMATION_TEST_PACKAGE: A uiautomation test package upload.

    • UIAUTOMATOR_TEST_PACKAGE: A uiautomator test package upload.

    • XCTEST_TEST_PACKAGE: An XCode test package upload.

    • XCTEST_UI_TEST_PACKAGE: An XCode UI test package upload.

    " + } + }, + "Uploads": { + "base": null, + "refs": { + "ListUploadsResult$uploads": "

    Information about the uploads.

    " + } + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/devicefarm/2015-06-23/examples-1.json b/vendor/github.com/aws/aws-sdk-go/models/apis/devicefarm/2015-06-23/examples-1.json new file mode 100644 index 000000000..0ea7e3b0b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/devicefarm/2015-06-23/examples-1.json @@ -0,0 +1,5 @@ +{ + "version": "1.0", + "examples": { + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/devicefarm/2015-06-23/paginators-1.json b/vendor/github.com/aws/aws-sdk-go/models/apis/devicefarm/2015-06-23/paginators-1.json new file mode 100644 index 000000000..ce5f8d790 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/devicefarm/2015-06-23/paginators-1.json @@ -0,0 +1,74 @@ +{ + "pagination": { + "GetOfferingStatus": { + "input_token": "nextToken", + "output_token": "nextToken", + "result_key": ["current","nextPeriod"] + }, + "ListArtifacts": { + "input_token": "nextToken", + "output_token": "nextToken", + "result_key": "artifacts" + }, + "ListDevicePools": { + "input_token": "nextToken", + "output_token": "nextToken", + "result_key": "devicePools" + }, + "ListDevices": { + "input_token": "nextToken", + "output_token": "nextToken", + "result_key": "devices" + }, + "ListJobs": { + "input_token": "nextToken", + "output_token": "nextToken", + "result_key": "jobs" + }, + "ListOfferingTransactions": { + "input_token": "nextToken", + "output_token": "nextToken", + "result_key": "offeringTransactions" + }, + "ListOfferings": { + "input_token": "nextToken", + "output_token": "nextToken", + "result_key": "offerings" + }, + "ListProjects": { + "input_token": "nextToken", + "output_token": "nextToken", + "result_key": "projects" + }, + "ListRuns": { + "input_token": "nextToken", + "output_token": "nextToken", + "result_key": "runs" + }, + "ListSamples": { + "input_token": "nextToken", + "output_token": "nextToken", + "result_key": "samples" + }, + "ListSuites": { + "input_token": "nextToken", + "output_token": "nextToken", + "result_key": "suites" + }, + "ListTests": { + "input_token": "nextToken", + "output_token": "nextToken", + "result_key": "tests" + }, + "ListUniqueProblems": { + "input_token": "nextToken", + "output_token": "nextToken", + "result_key": "uniqueProblems" + }, + "ListUploads": { + "input_token": "nextToken", + "output_token": "nextToken", + "result_key": "uploads" + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/directconnect/2012-10-25/api-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/directconnect/2012-10-25/api-2.json new file mode 100644 index 000000000..1a2870bac --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/directconnect/2012-10-25/api-2.json @@ -0,0 +1,793 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2012-10-25", + "endpointPrefix":"directconnect", + "jsonVersion":"1.1", + "protocol":"json", + "serviceFullName":"AWS Direct Connect", + "signatureVersion":"v4", + "targetPrefix":"OvertureService" + }, + "operations":{ + "AllocateConnectionOnInterconnect":{ + "name":"AllocateConnectionOnInterconnect", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AllocateConnectionOnInterconnectRequest"}, + "output":{"shape":"Connection"}, + "errors":[ + {"shape":"DirectConnectServerException"}, + {"shape":"DirectConnectClientException"} + ] + }, + "AllocatePrivateVirtualInterface":{ + "name":"AllocatePrivateVirtualInterface", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AllocatePrivateVirtualInterfaceRequest"}, + "output":{"shape":"VirtualInterface"}, + "errors":[ + {"shape":"DirectConnectServerException"}, + {"shape":"DirectConnectClientException"} + ] + }, + "AllocatePublicVirtualInterface":{ + "name":"AllocatePublicVirtualInterface", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AllocatePublicVirtualInterfaceRequest"}, + "output":{"shape":"VirtualInterface"}, + "errors":[ + {"shape":"DirectConnectServerException"}, + {"shape":"DirectConnectClientException"} + ] + }, + "ConfirmConnection":{ + "name":"ConfirmConnection", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ConfirmConnectionRequest"}, + "output":{"shape":"ConfirmConnectionResponse"}, + "errors":[ + {"shape":"DirectConnectServerException"}, + {"shape":"DirectConnectClientException"} + ] + }, + "ConfirmPrivateVirtualInterface":{ + "name":"ConfirmPrivateVirtualInterface", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ConfirmPrivateVirtualInterfaceRequest"}, + "output":{"shape":"ConfirmPrivateVirtualInterfaceResponse"}, + "errors":[ + {"shape":"DirectConnectServerException"}, + {"shape":"DirectConnectClientException"} + ] + }, + "ConfirmPublicVirtualInterface":{ + "name":"ConfirmPublicVirtualInterface", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ConfirmPublicVirtualInterfaceRequest"}, + "output":{"shape":"ConfirmPublicVirtualInterfaceResponse"}, + "errors":[ + {"shape":"DirectConnectServerException"}, + {"shape":"DirectConnectClientException"} + ] + }, + "CreateConnection":{ + "name":"CreateConnection", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateConnectionRequest"}, + "output":{"shape":"Connection"}, + "errors":[ + {"shape":"DirectConnectServerException"}, + {"shape":"DirectConnectClientException"} + ] + }, + "CreateInterconnect":{ + "name":"CreateInterconnect", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateInterconnectRequest"}, + "output":{"shape":"Interconnect"}, + "errors":[ + {"shape":"DirectConnectServerException"}, + {"shape":"DirectConnectClientException"} + ] + }, + "CreatePrivateVirtualInterface":{ + "name":"CreatePrivateVirtualInterface", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreatePrivateVirtualInterfaceRequest"}, + "output":{"shape":"VirtualInterface"}, + "errors":[ + {"shape":"DirectConnectServerException"}, + {"shape":"DirectConnectClientException"} + ] + }, + "CreatePublicVirtualInterface":{ + "name":"CreatePublicVirtualInterface", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreatePublicVirtualInterfaceRequest"}, + "output":{"shape":"VirtualInterface"}, + "errors":[ + {"shape":"DirectConnectServerException"}, + {"shape":"DirectConnectClientException"} + ] + }, + "DeleteConnection":{ + "name":"DeleteConnection", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteConnectionRequest"}, + "output":{"shape":"Connection"}, + "errors":[ + {"shape":"DirectConnectServerException"}, + {"shape":"DirectConnectClientException"} + ] + }, + "DeleteInterconnect":{ + "name":"DeleteInterconnect", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteInterconnectRequest"}, + "output":{"shape":"DeleteInterconnectResponse"}, + "errors":[ + {"shape":"DirectConnectServerException"}, + {"shape":"DirectConnectClientException"} + ] + }, + "DeleteVirtualInterface":{ + "name":"DeleteVirtualInterface", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteVirtualInterfaceRequest"}, + "output":{"shape":"DeleteVirtualInterfaceResponse"}, + "errors":[ + {"shape":"DirectConnectServerException"}, + {"shape":"DirectConnectClientException"} + ] + }, + "DescribeConnectionLoa":{ + "name":"DescribeConnectionLoa", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeConnectionLoaRequest"}, + "output":{"shape":"DescribeConnectionLoaResponse"}, + "errors":[ + {"shape":"DirectConnectServerException"}, + {"shape":"DirectConnectClientException"} + ] + }, + "DescribeConnections":{ + "name":"DescribeConnections", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeConnectionsRequest"}, + "output":{"shape":"Connections"}, + "errors":[ + {"shape":"DirectConnectServerException"}, + {"shape":"DirectConnectClientException"} + ] + }, + "DescribeConnectionsOnInterconnect":{ + "name":"DescribeConnectionsOnInterconnect", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeConnectionsOnInterconnectRequest"}, + "output":{"shape":"Connections"}, + "errors":[ + {"shape":"DirectConnectServerException"}, + {"shape":"DirectConnectClientException"} + ] + }, + "DescribeInterconnectLoa":{ + "name":"DescribeInterconnectLoa", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeInterconnectLoaRequest"}, + "output":{"shape":"DescribeInterconnectLoaResponse"}, + "errors":[ + {"shape":"DirectConnectServerException"}, + {"shape":"DirectConnectClientException"} + ] + }, + "DescribeInterconnects":{ + "name":"DescribeInterconnects", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeInterconnectsRequest"}, + "output":{"shape":"Interconnects"}, + "errors":[ + {"shape":"DirectConnectServerException"}, + {"shape":"DirectConnectClientException"} + ] + }, + "DescribeLocations":{ + "name":"DescribeLocations", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "output":{"shape":"Locations"}, + "errors":[ + {"shape":"DirectConnectServerException"}, + {"shape":"DirectConnectClientException"} + ] + }, + "DescribeVirtualGateways":{ + "name":"DescribeVirtualGateways", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "output":{"shape":"VirtualGateways"}, + "errors":[ + {"shape":"DirectConnectServerException"}, + {"shape":"DirectConnectClientException"} + ] + }, + "DescribeVirtualInterfaces":{ + "name":"DescribeVirtualInterfaces", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeVirtualInterfacesRequest"}, + "output":{"shape":"VirtualInterfaces"}, + "errors":[ + {"shape":"DirectConnectServerException"}, + {"shape":"DirectConnectClientException"} + ] + } + }, + "shapes":{ + "ASN":{"type":"integer"}, + "AllocateConnectionOnInterconnectRequest":{ + "type":"structure", + "required":[ + "bandwidth", + "connectionName", + "ownerAccount", + "interconnectId", + "vlan" + ], + "members":{ + "bandwidth":{"shape":"Bandwidth"}, + "connectionName":{"shape":"ConnectionName"}, + "ownerAccount":{"shape":"OwnerAccount"}, + "interconnectId":{"shape":"InterconnectId"}, + "vlan":{"shape":"VLAN"} + } + }, + "AllocatePrivateVirtualInterfaceRequest":{ + "type":"structure", + "required":[ + "connectionId", + "ownerAccount", + "newPrivateVirtualInterfaceAllocation" + ], + "members":{ + "connectionId":{"shape":"ConnectionId"}, + "ownerAccount":{"shape":"OwnerAccount"}, + "newPrivateVirtualInterfaceAllocation":{"shape":"NewPrivateVirtualInterfaceAllocation"} + } + }, + "AllocatePublicVirtualInterfaceRequest":{ + "type":"structure", + "required":[ + "connectionId", + "ownerAccount", + "newPublicVirtualInterfaceAllocation" + ], + "members":{ + "connectionId":{"shape":"ConnectionId"}, + "ownerAccount":{"shape":"OwnerAccount"}, + "newPublicVirtualInterfaceAllocation":{"shape":"NewPublicVirtualInterfaceAllocation"} + } + }, + "AmazonAddress":{"type":"string"}, + "BGPAuthKey":{"type":"string"}, + "Bandwidth":{"type":"string"}, + "CIDR":{"type":"string"}, + "ConfirmConnectionRequest":{ + "type":"structure", + "required":["connectionId"], + "members":{ + "connectionId":{"shape":"ConnectionId"} + } + }, + "ConfirmConnectionResponse":{ + "type":"structure", + "members":{ + "connectionState":{"shape":"ConnectionState"} + } + }, + "ConfirmPrivateVirtualInterfaceRequest":{ + "type":"structure", + "required":[ + "virtualInterfaceId", + "virtualGatewayId" + ], + "members":{ + "virtualInterfaceId":{"shape":"VirtualInterfaceId"}, + "virtualGatewayId":{"shape":"VirtualGatewayId"} + } + }, + "ConfirmPrivateVirtualInterfaceResponse":{ + "type":"structure", + "members":{ + "virtualInterfaceState":{"shape":"VirtualInterfaceState"} + } + }, + "ConfirmPublicVirtualInterfaceRequest":{ + "type":"structure", + "required":["virtualInterfaceId"], + "members":{ + "virtualInterfaceId":{"shape":"VirtualInterfaceId"} + } + }, + "ConfirmPublicVirtualInterfaceResponse":{ + "type":"structure", + "members":{ + "virtualInterfaceState":{"shape":"VirtualInterfaceState"} + } + }, + "Connection":{ + "type":"structure", + "members":{ + "ownerAccount":{"shape":"OwnerAccount"}, + "connectionId":{"shape":"ConnectionId"}, + "connectionName":{"shape":"ConnectionName"}, + "connectionState":{"shape":"ConnectionState"}, + "region":{"shape":"Region"}, + "location":{"shape":"LocationCode"}, + "bandwidth":{"shape":"Bandwidth"}, + "vlan":{"shape":"VLAN"}, + "partnerName":{"shape":"PartnerName"}, + "loaIssueTime":{"shape":"LoaIssueTime"} + } + }, + "ConnectionId":{"type":"string"}, + "ConnectionList":{ + "type":"list", + "member":{"shape":"Connection"} + }, + "ConnectionName":{"type":"string"}, + "ConnectionState":{ + "type":"string", + "enum":[ + "ordering", + "requested", + "pending", + "available", + "down", + "deleting", + "deleted", + "rejected" + ] + }, + "Connections":{ + "type":"structure", + "members":{ + "connections":{"shape":"ConnectionList"} + } + }, + "CreateConnectionRequest":{ + "type":"structure", + "required":[ + "location", + "bandwidth", + "connectionName" + ], + "members":{ + "location":{"shape":"LocationCode"}, + "bandwidth":{"shape":"Bandwidth"}, + "connectionName":{"shape":"ConnectionName"} + } + }, + "CreateInterconnectRequest":{ + "type":"structure", + "required":[ + "interconnectName", + "bandwidth", + "location" + ], + "members":{ + "interconnectName":{"shape":"InterconnectName"}, + "bandwidth":{"shape":"Bandwidth"}, + "location":{"shape":"LocationCode"} + } + }, + "CreatePrivateVirtualInterfaceRequest":{ + "type":"structure", + "required":[ + "connectionId", + "newPrivateVirtualInterface" + ], + "members":{ + "connectionId":{"shape":"ConnectionId"}, + "newPrivateVirtualInterface":{"shape":"NewPrivateVirtualInterface"} + } + }, + "CreatePublicVirtualInterfaceRequest":{ + "type":"structure", + "required":[ + "connectionId", + "newPublicVirtualInterface" + ], + "members":{ + "connectionId":{"shape":"ConnectionId"}, + "newPublicVirtualInterface":{"shape":"NewPublicVirtualInterface"} + } + }, + "CustomerAddress":{"type":"string"}, + "DeleteConnectionRequest":{ + "type":"structure", + "required":["connectionId"], + "members":{ + "connectionId":{"shape":"ConnectionId"} + } + }, + "DeleteInterconnectRequest":{ + "type":"structure", + "required":["interconnectId"], + "members":{ + "interconnectId":{"shape":"InterconnectId"} + } + }, + "DeleteInterconnectResponse":{ + "type":"structure", + "members":{ + "interconnectState":{"shape":"InterconnectState"} + } + }, + "DeleteVirtualInterfaceRequest":{ + "type":"structure", + "required":["virtualInterfaceId"], + "members":{ + "virtualInterfaceId":{"shape":"VirtualInterfaceId"} + } + }, + "DeleteVirtualInterfaceResponse":{ + "type":"structure", + "members":{ + "virtualInterfaceState":{"shape":"VirtualInterfaceState"} + } + }, + "DescribeConnectionLoaRequest":{ + "type":"structure", + "required":["connectionId"], + "members":{ + "connectionId":{"shape":"ConnectionId"}, + "providerName":{"shape":"ProviderName"}, + "loaContentType":{"shape":"LoaContentType"} + } + }, + "DescribeConnectionLoaResponse":{ + "type":"structure", + "members":{ + "loa":{"shape":"Loa"} + } + }, + "DescribeConnectionsOnInterconnectRequest":{ + "type":"structure", + "required":["interconnectId"], + "members":{ + "interconnectId":{"shape":"InterconnectId"} + } + }, + "DescribeConnectionsRequest":{ + "type":"structure", + "members":{ + "connectionId":{"shape":"ConnectionId"} + } + }, + "DescribeInterconnectLoaRequest":{ + "type":"structure", + "required":["interconnectId"], + "members":{ + "interconnectId":{"shape":"InterconnectId"}, + "providerName":{"shape":"ProviderName"}, + "loaContentType":{"shape":"LoaContentType"} + } + }, + "DescribeInterconnectLoaResponse":{ + "type":"structure", + "members":{ + "loa":{"shape":"Loa"} + } + }, + "DescribeInterconnectsRequest":{ + "type":"structure", + "members":{ + "interconnectId":{"shape":"InterconnectId"} + } + }, + "DescribeVirtualInterfacesRequest":{ + "type":"structure", + "members":{ + "connectionId":{"shape":"ConnectionId"}, + "virtualInterfaceId":{"shape":"VirtualInterfaceId"} + } + }, + "DirectConnectClientException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "exception":true + }, + "DirectConnectServerException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "exception":true + }, + "ErrorMessage":{"type":"string"}, + "Interconnect":{ + "type":"structure", + "members":{ + "interconnectId":{"shape":"InterconnectId"}, + "interconnectName":{"shape":"InterconnectName"}, + "interconnectState":{"shape":"InterconnectState"}, + "region":{"shape":"Region"}, + "location":{"shape":"LocationCode"}, + "bandwidth":{"shape":"Bandwidth"}, + "loaIssueTime":{"shape":"LoaIssueTime"} + } + }, + "InterconnectId":{"type":"string"}, + "InterconnectList":{ + "type":"list", + "member":{"shape":"Interconnect"} + }, + "InterconnectName":{"type":"string"}, + "InterconnectState":{ + "type":"string", + "enum":[ + "requested", + "pending", + "available", + "down", + "deleting", + "deleted" + ] + }, + "Interconnects":{ + "type":"structure", + "members":{ + "interconnects":{"shape":"InterconnectList"} + } + }, + "Loa":{ + "type":"structure", + "members":{ + "loaContent":{"shape":"LoaContent"}, + "loaContentType":{"shape":"LoaContentType"} + } + }, + "LoaContent":{"type":"blob"}, + "LoaContentType":{ + "type":"string", + "enum":["application/pdf"] + }, + "LoaIssueTime":{"type":"timestamp"}, + "Location":{ + "type":"structure", + "members":{ + "locationCode":{"shape":"LocationCode"}, + "locationName":{"shape":"LocationName"} + } + }, + "LocationCode":{"type":"string"}, + "LocationList":{ + "type":"list", + "member":{"shape":"Location"} + }, + "LocationName":{"type":"string"}, + "Locations":{ + "type":"structure", + "members":{ + "locations":{"shape":"LocationList"} + } + }, + "NewPrivateVirtualInterface":{ + "type":"structure", + "required":[ + "virtualInterfaceName", + "vlan", + "asn", + "virtualGatewayId" + ], + "members":{ + "virtualInterfaceName":{"shape":"VirtualInterfaceName"}, + "vlan":{"shape":"VLAN"}, + "asn":{"shape":"ASN"}, + "authKey":{"shape":"BGPAuthKey"}, + "amazonAddress":{"shape":"AmazonAddress"}, + "customerAddress":{"shape":"CustomerAddress"}, + "virtualGatewayId":{"shape":"VirtualGatewayId"} + } + }, + "NewPrivateVirtualInterfaceAllocation":{ + "type":"structure", + "required":[ + "virtualInterfaceName", + "vlan", + "asn" + ], + "members":{ + "virtualInterfaceName":{"shape":"VirtualInterfaceName"}, + "vlan":{"shape":"VLAN"}, + "asn":{"shape":"ASN"}, + "authKey":{"shape":"BGPAuthKey"}, + "amazonAddress":{"shape":"AmazonAddress"}, + "customerAddress":{"shape":"CustomerAddress"} + } + }, + "NewPublicVirtualInterface":{ + "type":"structure", + "required":[ + "virtualInterfaceName", + "vlan", + "asn", + "amazonAddress", + "customerAddress", + "routeFilterPrefixes" + ], + "members":{ + "virtualInterfaceName":{"shape":"VirtualInterfaceName"}, + "vlan":{"shape":"VLAN"}, + "asn":{"shape":"ASN"}, + "authKey":{"shape":"BGPAuthKey"}, + "amazonAddress":{"shape":"AmazonAddress"}, + "customerAddress":{"shape":"CustomerAddress"}, + "routeFilterPrefixes":{"shape":"RouteFilterPrefixList"} + } + }, + "NewPublicVirtualInterfaceAllocation":{ + "type":"structure", + "required":[ + "virtualInterfaceName", + "vlan", + "asn", + "amazonAddress", + "customerAddress", + "routeFilterPrefixes" + ], + "members":{ + "virtualInterfaceName":{"shape":"VirtualInterfaceName"}, + "vlan":{"shape":"VLAN"}, + "asn":{"shape":"ASN"}, + "authKey":{"shape":"BGPAuthKey"}, + "amazonAddress":{"shape":"AmazonAddress"}, + "customerAddress":{"shape":"CustomerAddress"}, + "routeFilterPrefixes":{"shape":"RouteFilterPrefixList"} + } + }, + "OwnerAccount":{"type":"string"}, + "PartnerName":{"type":"string"}, + "ProviderName":{"type":"string"}, + "Region":{"type":"string"}, + "RouteFilterPrefix":{ + "type":"structure", + "members":{ + "cidr":{"shape":"CIDR"} + } + }, + "RouteFilterPrefixList":{ + "type":"list", + "member":{"shape":"RouteFilterPrefix"} + }, + "RouterConfig":{"type":"string"}, + "VLAN":{"type":"integer"}, + "VirtualGateway":{ + "type":"structure", + "members":{ + "virtualGatewayId":{"shape":"VirtualGatewayId"}, + "virtualGatewayState":{"shape":"VirtualGatewayState"} + } + }, + "VirtualGatewayId":{"type":"string"}, + "VirtualGatewayList":{ + "type":"list", + "member":{"shape":"VirtualGateway"} + }, + "VirtualGatewayState":{"type":"string"}, + "VirtualGateways":{ + "type":"structure", + "members":{ + "virtualGateways":{"shape":"VirtualGatewayList"} + } + }, + "VirtualInterface":{ + "type":"structure", + "members":{ + "ownerAccount":{"shape":"OwnerAccount"}, + "virtualInterfaceId":{"shape":"VirtualInterfaceId"}, + "location":{"shape":"LocationCode"}, + "connectionId":{"shape":"ConnectionId"}, + "virtualInterfaceType":{"shape":"VirtualInterfaceType"}, + "virtualInterfaceName":{"shape":"VirtualInterfaceName"}, + "vlan":{"shape":"VLAN"}, + "asn":{"shape":"ASN"}, + "authKey":{"shape":"BGPAuthKey"}, + "amazonAddress":{"shape":"AmazonAddress"}, + "customerAddress":{"shape":"CustomerAddress"}, + "virtualInterfaceState":{"shape":"VirtualInterfaceState"}, + "customerRouterConfig":{"shape":"RouterConfig"}, + "virtualGatewayId":{"shape":"VirtualGatewayId"}, + "routeFilterPrefixes":{"shape":"RouteFilterPrefixList"} + } + }, + "VirtualInterfaceId":{"type":"string"}, + "VirtualInterfaceList":{ + "type":"list", + "member":{"shape":"VirtualInterface"} + }, + "VirtualInterfaceName":{"type":"string"}, + "VirtualInterfaceState":{ + "type":"string", + "enum":[ + "confirming", + "verifying", + "pending", + "available", + "down", + "deleting", + "deleted", + "rejected" + ] + }, + "VirtualInterfaceType":{"type":"string"}, + "VirtualInterfaces":{ + "type":"structure", + "members":{ + "virtualInterfaces":{"shape":"VirtualInterfaceList"} + } + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/directconnect/2012-10-25/docs-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/directconnect/2012-10-25/docs-2.json new file mode 100644 index 000000000..08b826db4 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/directconnect/2012-10-25/docs-2.json @@ -0,0 +1,554 @@ +{ + "version": "2.0", + "service": "

    AWS Direct Connect links your internal network to an AWS Direct Connect location over a standard 1 gigabit or 10 gigabit Ethernet fiber-optic cable. One end of the cable is connected to your router, the other to an AWS Direct Connect router. With this connection in place, you can create virtual interfaces directly to the AWS cloud (for example, to Amazon Elastic Compute Cloud (Amazon EC2) and Amazon Simple Storage Service (Amazon S3)) and to Amazon Virtual Private Cloud (Amazon VPC), bypassing Internet service providers in your network path. An AWS Direct Connect location provides access to AWS in the region it is associated with, as well as access to other US regions. For example, you can provision a single connection to any AWS Direct Connect location in the US and use it to access public AWS services in all US Regions and AWS GovCloud (US).

    ", + "operations": { + "AllocateConnectionOnInterconnect": "

    Creates a hosted connection on an interconnect.

    Allocates a VLAN number and a specified amount of bandwidth for use by a hosted connection on the given interconnect.

    This is intended for use by AWS Direct Connect partners only.

    ", + "AllocatePrivateVirtualInterface": "

    Provisions a private virtual interface to be owned by a different customer.

    The owner of a connection calls this function to provision a private virtual interface which will be owned by another AWS customer.

    Virtual interfaces created using this function must be confirmed by the virtual interface owner by calling ConfirmPrivateVirtualInterface. Until this step has been completed, the virtual interface will be in 'Confirming' state, and will not be available for handling traffic.

    ", + "AllocatePublicVirtualInterface": "

    Provisions a public virtual interface to be owned by a different customer.

    The owner of a connection calls this function to provision a public virtual interface which will be owned by another AWS customer.

    Virtual interfaces created using this function must be confirmed by the virtual interface owner by calling ConfirmPublicVirtualInterface. Until this step has been completed, the virtual interface will be in 'Confirming' state, and will not be available for handling traffic.

    ", + "ConfirmConnection": "

    Confirm the creation of a hosted connection on an interconnect.

    Upon creation, the hosted connection is initially in the 'Ordering' state, and will remain in this state until the owner calls ConfirmConnection to confirm creation of the hosted connection.

    ", + "ConfirmPrivateVirtualInterface": "

    Accept ownership of a private virtual interface created by another customer.

    After the virtual interface owner calls this function, the virtual interface will be created and attached to the given virtual private gateway, and will be available for handling traffic.

    ", + "ConfirmPublicVirtualInterface": "

    Accept ownership of a public virtual interface created by another customer.

    After the virtual interface owner calls this function, the specified virtual interface will be created and made available for handling traffic.

    ", + "CreateConnection": "

    Creates a new connection between the customer network and a specific AWS Direct Connect location.

    A connection links your internal network to an AWS Direct Connect location over a standard 1 gigabit or 10 gigabit Ethernet fiber-optic cable. One end of the cable is connected to your router, the other to an AWS Direct Connect router. An AWS Direct Connect location provides access to Amazon Web Services in the region it is associated with. You can establish connections with AWS Direct Connect locations in multiple regions, but a connection in one region does not provide connectivity to other regions.

    ", + "CreateInterconnect": "

    Creates a new interconnect between a AWS Direct Connect partner's network and a specific AWS Direct Connect location.

    An interconnect is a connection which is capable of hosting other connections. The AWS Direct Connect partner can use an interconnect to provide sub-1Gbps AWS Direct Connect service to tier 2 customers who do not have their own connections. Like a standard connection, an interconnect links the AWS Direct Connect partner's network to an AWS Direct Connect location over a standard 1 Gbps or 10 Gbps Ethernet fiber-optic cable. One end is connected to the partner's router, the other to an AWS Direct Connect router.

    For each end customer, the AWS Direct Connect partner provisions a connection on their interconnect by calling AllocateConnectionOnInterconnect. The end customer can then connect to AWS resources by creating a virtual interface on their connection, using the VLAN assigned to them by the AWS Direct Connect partner.

    This is intended for use by AWS Direct Connect partners only.

    ", + "CreatePrivateVirtualInterface": "

    Creates a new private virtual interface. A virtual interface is the VLAN that transports AWS Direct Connect traffic. A private virtual interface supports sending traffic to a single virtual private cloud (VPC).

    ", + "CreatePublicVirtualInterface": "

    Creates a new public virtual interface. A virtual interface is the VLAN that transports AWS Direct Connect traffic. A public virtual interface supports sending traffic to public services of AWS such as Amazon Simple Storage Service (Amazon S3).

    ", + "DeleteConnection": "

    Deletes the connection.

    Deleting a connection only stops the AWS Direct Connect port hour and data transfer charges. You need to cancel separately with the providers any services or charges for cross-connects or network circuits that connect you to the AWS Direct Connect location.

    ", + "DeleteInterconnect": "

    Deletes the specified interconnect.

    This is intended for use by AWS Direct Connect partners only.

    ", + "DeleteVirtualInterface": "

    Deletes a virtual interface.

    ", + "DescribeConnectionLoa": "

    Returns the LOA-CFA for a Connection.

    The Letter of Authorization - Connecting Facility Assignment (LOA-CFA) is a document that your APN partner or service provider uses when establishing your cross connect to AWS at the colocation facility. For more information, see Requesting Cross Connects at AWS Direct Connect Locations in the AWS Direct Connect user guide.

    ", + "DescribeConnections": "

    Displays all connections in this region.

    If a connection ID is provided, the call returns only that particular connection.

    ", + "DescribeConnectionsOnInterconnect": "

    Return a list of connections that have been provisioned on the given interconnect.

    This is intended for use by AWS Direct Connect partners only.

    ", + "DescribeInterconnectLoa": "

    Returns the LOA-CFA for an Interconnect.

    The Letter of Authorization - Connecting Facility Assignment (LOA-CFA) is a document that is used when establishing your cross connect to AWS at the colocation facility. For more information, see Requesting Cross Connects at AWS Direct Connect Locations in the AWS Direct Connect user guide.

    ", + "DescribeInterconnects": "

    Returns a list of interconnects owned by the AWS account.

    If an interconnect ID is provided, it will only return this particular interconnect.

    ", + "DescribeLocations": "

    Returns the list of AWS Direct Connect locations in the current AWS region. These are the locations that may be selected when calling CreateConnection or CreateInterconnect.

    ", + "DescribeVirtualGateways": "

    Returns a list of virtual private gateways owned by the AWS account.

    You can create one or more AWS Direct Connect private virtual interfaces linking to a virtual private gateway. A virtual private gateway can be managed via Amazon Virtual Private Cloud (VPC) console or the EC2 CreateVpnGateway action.

    ", + "DescribeVirtualInterfaces": "

    Displays all virtual interfaces for an AWS account. Virtual interfaces deleted fewer than 15 minutes before DescribeVirtualInterfaces is called are also returned. If a connection ID is included then only virtual interfaces associated with this connection will be returned. If a virtual interface ID is included then only a single virtual interface will be returned.

    A virtual interface (VLAN) transmits the traffic between the AWS Direct Connect location and the customer.

    If a connection ID is provided, only virtual interfaces provisioned on the specified connection will be returned. If a virtual interface ID is provided, only this particular virtual interface will be returned.

    " + }, + "shapes": { + "ASN": { + "base": "

    Autonomous system (AS) number for Border Gateway Protocol (BGP) configuration.

    Example: 65000

    ", + "refs": { + "NewPrivateVirtualInterface$asn": null, + "NewPrivateVirtualInterfaceAllocation$asn": null, + "NewPublicVirtualInterface$asn": null, + "NewPublicVirtualInterfaceAllocation$asn": null, + "VirtualInterface$asn": null + } + }, + "AllocateConnectionOnInterconnectRequest": { + "base": "

    Container for the parameters to the AllocateConnectionOnInterconnect operation.

    ", + "refs": { + } + }, + "AllocatePrivateVirtualInterfaceRequest": { + "base": "

    Container for the parameters to the AllocatePrivateVirtualInterface operation.

    ", + "refs": { + } + }, + "AllocatePublicVirtualInterfaceRequest": { + "base": "

    Container for the parameters to the AllocatePublicVirtualInterface operation.

    ", + "refs": { + } + }, + "AmazonAddress": { + "base": "

    IP address assigned to the Amazon interface.

    Example: 192.168.1.1/30

    ", + "refs": { + "NewPrivateVirtualInterface$amazonAddress": null, + "NewPrivateVirtualInterfaceAllocation$amazonAddress": null, + "NewPublicVirtualInterface$amazonAddress": null, + "NewPublicVirtualInterfaceAllocation$amazonAddress": null, + "VirtualInterface$amazonAddress": null + } + }, + "BGPAuthKey": { + "base": "

    Authentication key for BGP configuration.

    Example: asdf34example

    ", + "refs": { + "NewPrivateVirtualInterface$authKey": null, + "NewPrivateVirtualInterfaceAllocation$authKey": null, + "NewPublicVirtualInterface$authKey": null, + "NewPublicVirtualInterfaceAllocation$authKey": null, + "VirtualInterface$authKey": null + } + }, + "Bandwidth": { + "base": "

    Bandwidth of the connection.

    Example: 1Gbps

    Default: None

    ", + "refs": { + "AllocateConnectionOnInterconnectRequest$bandwidth": "

    Bandwidth of the connection.

    Example: \"500Mbps\"

    Default: None

    Values: 50M, 100M, 200M, 300M, 400M, or 500M

    ", + "Connection$bandwidth": "

    Bandwidth of the connection.

    Example: 1Gbps (for regular connections), or 500Mbps (for hosted connections)

    Default: None

    ", + "CreateConnectionRequest$bandwidth": null, + "CreateInterconnectRequest$bandwidth": "

    The port bandwidth

    Example: 1Gbps

    Default: None

    Available values: 1Gbps,10Gbps

    ", + "Interconnect$bandwidth": null + } + }, + "CIDR": { + "base": null, + "refs": { + "RouteFilterPrefix$cidr": "

    CIDR notation for the advertised route. Multiple routes are separated by commas.

    Example: 10.10.10.0/24,10.10.11.0/24

    " + } + }, + "ConfirmConnectionRequest": { + "base": "

    Container for the parameters to the ConfirmConnection operation.

    ", + "refs": { + } + }, + "ConfirmConnectionResponse": { + "base": "

    The response received when ConfirmConnection is called.

    ", + "refs": { + } + }, + "ConfirmPrivateVirtualInterfaceRequest": { + "base": "

    Container for the parameters to the ConfirmPrivateVirtualInterface operation.

    ", + "refs": { + } + }, + "ConfirmPrivateVirtualInterfaceResponse": { + "base": "

    The response received when ConfirmPrivateVirtualInterface is called.

    ", + "refs": { + } + }, + "ConfirmPublicVirtualInterfaceRequest": { + "base": "

    Container for the parameters to the ConfirmPublicVirtualInterface operation.

    ", + "refs": { + } + }, + "ConfirmPublicVirtualInterfaceResponse": { + "base": "

    The response received when ConfirmPublicVirtualInterface is called.

    ", + "refs": { + } + }, + "Connection": { + "base": "

    A connection represents the physical network connection between the AWS Direct Connect location and the customer.

    ", + "refs": { + "ConnectionList$member": null + } + }, + "ConnectionId": { + "base": "

    ID of the connection.

    Example: dxcon-fg5678gh

    Default: None

    ", + "refs": { + "AllocatePrivateVirtualInterfaceRequest$connectionId": "

    The connection ID on which the private virtual interface is provisioned.

    Default: None

    ", + "AllocatePublicVirtualInterfaceRequest$connectionId": "

    The connection ID on which the public virtual interface is provisioned.

    Default: None

    ", + "ConfirmConnectionRequest$connectionId": null, + "Connection$connectionId": null, + "CreatePrivateVirtualInterfaceRequest$connectionId": null, + "CreatePublicVirtualInterfaceRequest$connectionId": null, + "DeleteConnectionRequest$connectionId": null, + "DescribeConnectionLoaRequest$connectionId": null, + "DescribeConnectionsRequest$connectionId": null, + "DescribeVirtualInterfacesRequest$connectionId": null, + "VirtualInterface$connectionId": null + } + }, + "ConnectionList": { + "base": "

    A list of connections.

    ", + "refs": { + "Connections$connections": "

    A list of connections.

    " + } + }, + "ConnectionName": { + "base": "

    The name of the connection.

    Example: \"My Connection to AWS\"

    Default: None

    ", + "refs": { + "AllocateConnectionOnInterconnectRequest$connectionName": "

    Name of the provisioned connection.

    Example: \"500M Connection to AWS\"

    Default: None

    ", + "Connection$connectionName": null, + "CreateConnectionRequest$connectionName": null + } + }, + "ConnectionState": { + "base": "

    State of the connection.

    • Ordering: The initial state of a hosted connection provisioned on an interconnect. The connection stays in the ordering state until the owner of the hosted connection confirms or declines the connection order.

    • Requested: The initial state of a standard connection. The connection stays in the requested state until the Letter of Authorization (LOA) is sent to the customer.

    • Pending: The connection has been approved, and is being initialized.

    • Available: The network link is up, and the connection is ready for use.

    • Down: The network link is down.

    • Deleting: The connection is in the process of being deleted.

    • Deleted: The connection has been deleted.

    • Rejected: A hosted connection in the 'Ordering' state will enter the 'Rejected' state if it is deleted by the end customer.

    ", + "refs": { + "ConfirmConnectionResponse$connectionState": null, + "Connection$connectionState": null + } + }, + "Connections": { + "base": "

    A structure containing a list of connections.

    ", + "refs": { + } + }, + "CreateConnectionRequest": { + "base": "

    Container for the parameters to the CreateConnection operation.

    ", + "refs": { + } + }, + "CreateInterconnectRequest": { + "base": "

    Container for the parameters to the CreateInterconnect operation.

    ", + "refs": { + } + }, + "CreatePrivateVirtualInterfaceRequest": { + "base": "

    Container for the parameters to the CreatePrivateVirtualInterface operation.

    ", + "refs": { + } + }, + "CreatePublicVirtualInterfaceRequest": { + "base": "

    Container for the parameters to the CreatePublicVirtualInterface operation.

    ", + "refs": { + } + }, + "CustomerAddress": { + "base": "

    IP address assigned to the customer interface.

    Example: 192.168.1.2/30

    ", + "refs": { + "NewPrivateVirtualInterface$customerAddress": null, + "NewPrivateVirtualInterfaceAllocation$customerAddress": null, + "NewPublicVirtualInterface$customerAddress": null, + "NewPublicVirtualInterfaceAllocation$customerAddress": null, + "VirtualInterface$customerAddress": null + } + }, + "DeleteConnectionRequest": { + "base": "

    Container for the parameters to the DeleteConnection operation.

    ", + "refs": { + } + }, + "DeleteInterconnectRequest": { + "base": "

    Container for the parameters to the DeleteInterconnect operation.

    ", + "refs": { + } + }, + "DeleteInterconnectResponse": { + "base": "

    The response received when DeleteInterconnect is called.

    ", + "refs": { + } + }, + "DeleteVirtualInterfaceRequest": { + "base": "

    Container for the parameters to the DeleteVirtualInterface operation.

    ", + "refs": { + } + }, + "DeleteVirtualInterfaceResponse": { + "base": "

    The response received when DeleteVirtualInterface is called.

    ", + "refs": { + } + }, + "DescribeConnectionLoaRequest": { + "base": "

    Container for the parameters to the DescribeConnectionLoa operation.

    ", + "refs": { + } + }, + "DescribeConnectionLoaResponse": { + "base": "

    The response received when DescribeConnectionLoa is called.

    ", + "refs": { + } + }, + "DescribeConnectionsOnInterconnectRequest": { + "base": "

    Container for the parameters to the DescribeConnectionsOnInterconnect operation.

    ", + "refs": { + } + }, + "DescribeConnectionsRequest": { + "base": "

    Container for the parameters to the DescribeConnections operation.

    ", + "refs": { + } + }, + "DescribeInterconnectLoaRequest": { + "base": "

    Container for the parameters to the DescribeInterconnectLoa operation.

    ", + "refs": { + } + }, + "DescribeInterconnectLoaResponse": { + "base": "

    The response received when DescribeInterconnectLoa is called.

    ", + "refs": { + } + }, + "DescribeInterconnectsRequest": { + "base": "

    Container for the parameters to the DescribeInterconnects operation.

    ", + "refs": { + } + }, + "DescribeVirtualInterfacesRequest": { + "base": "

    Container for the parameters to the DescribeVirtualInterfaces operation.

    ", + "refs": { + } + }, + "DirectConnectClientException": { + "base": "

    The API was called with invalid parameters. The error message will contain additional details about the cause.

    ", + "refs": { + } + }, + "DirectConnectServerException": { + "base": "

    A server-side error occurred during the API call. The error message will contain additional details about the cause.

    ", + "refs": { + } + }, + "ErrorMessage": { + "base": null, + "refs": { + "DirectConnectClientException$message": "

    This is an exception thrown when there is an issue with the input provided by the API call. For example, the name provided for a connection contains a pound sign (#). This can also occur when a valid value is provided, but is otherwise constrained. For example, the valid VLAN tag range is 1-4096 but each can only be used once per connection.

    ", + "DirectConnectServerException$message": "

    This is an exception thrown when there is a backend issue on the server side.

    " + } + }, + "Interconnect": { + "base": "

    An interconnect is a connection that can host other connections.

    Like a standard AWS Direct Connect connection, an interconnect represents the physical connection between an AWS Direct Connect partner's network and a specific Direct Connect location. An AWS Direct Connect partner who owns an interconnect can provision hosted connections on the interconnect for their end customers, thereby providing the end customers with connectivity to AWS services.

    The resources of the interconnect, including bandwidth and VLAN numbers, are shared by all of the hosted connections on the interconnect, and the owner of the interconnect determines how these resources are assigned.

    ", + "refs": { + "InterconnectList$member": null + } + }, + "InterconnectId": { + "base": "

    The ID of the interconnect.

    Example: dxcon-abc123

    ", + "refs": { + "AllocateConnectionOnInterconnectRequest$interconnectId": "

    ID of the interconnect on which the connection will be provisioned.

    Example: dxcon-456abc78

    Default: None

    ", + "DeleteInterconnectRequest$interconnectId": null, + "DescribeConnectionsOnInterconnectRequest$interconnectId": "

    ID of the interconnect on which a list of connection is provisioned.

    Example: dxcon-abc123

    Default: None

    ", + "DescribeInterconnectLoaRequest$interconnectId": null, + "DescribeInterconnectsRequest$interconnectId": null, + "Interconnect$interconnectId": null + } + }, + "InterconnectList": { + "base": "

    A list of interconnects.

    ", + "refs": { + "Interconnects$interconnects": "

    A list of interconnects.

    " + } + }, + "InterconnectName": { + "base": "

    The name of the interconnect.

    Example: \"1G Interconnect to AWS\"

    ", + "refs": { + "CreateInterconnectRequest$interconnectName": "

    The name of the interconnect.

    Example: \"1G Interconnect to AWS\"

    Default: None

    ", + "Interconnect$interconnectName": null + } + }, + "InterconnectState": { + "base": "

    State of the interconnect.

    • Requested: The initial state of an interconnect. The interconnect stays in the requested state until the Letter of Authorization (LOA) is sent to the customer.

    • Pending>: The interconnect has been approved, and is being initialized.

    • Available: The network link is up, and the interconnect is ready for use.

    • Down: The network link is down.

    • Deleting: The interconnect is in the process of being deleted.

    • Deleted: The interconnect has been deleted.

    ", + "refs": { + "DeleteInterconnectResponse$interconnectState": null, + "Interconnect$interconnectState": null + } + }, + "Interconnects": { + "base": "

    A structure containing a list of interconnects.

    ", + "refs": { + } + }, + "Loa": { + "base": "

    A structure containing the Letter of Authorization - Connecting Facility Assignment (LOA-CFA) for a connection.

    ", + "refs": { + "DescribeConnectionLoaResponse$loa": null, + "DescribeInterconnectLoaResponse$loa": null + } + }, + "LoaContent": { + "base": "

    The binary contents of the LOA-CFA document.

    ", + "refs": { + "Loa$loaContent": null + } + }, + "LoaContentType": { + "base": "

    A standard media type indicating the content type of the LOA-CFA document. Currently, the only supported value is \"application/pdf\".

    Default: application/pdf

    ", + "refs": { + "DescribeConnectionLoaRequest$loaContentType": null, + "DescribeInterconnectLoaRequest$loaContentType": null, + "Loa$loaContentType": null + } + }, + "LoaIssueTime": { + "base": null, + "refs": { + "Connection$loaIssueTime": "

    The time of the most recent call to DescribeConnectionLoa for this Connection.

    ", + "Interconnect$loaIssueTime": "

    The time of the most recent call to DescribeInterconnectLoa for this Interconnect.

    " + } + }, + "Location": { + "base": "

    An AWS Direct Connect location where connections and interconnects can be requested.

    ", + "refs": { + "LocationList$member": null + } + }, + "LocationCode": { + "base": "

    Where the connection is located.

    Example: EqSV5

    Default: None

    ", + "refs": { + "Connection$location": null, + "CreateConnectionRequest$location": null, + "CreateInterconnectRequest$location": "

    Where the interconnect is located

    Example: EqSV5

    Default: None

    ", + "Interconnect$location": null, + "Location$locationCode": "

    The code used to indicate the AWS Direct Connect location.

    ", + "VirtualInterface$location": null + } + }, + "LocationList": { + "base": null, + "refs": { + "Locations$locations": "

    A list of colocation hubs where network providers have equipment. Most regions have multiple locations available.

    " + } + }, + "LocationName": { + "base": null, + "refs": { + "Location$locationName": "

    The name of the AWS Direct Connect location. The name includes the colocation partner name and the physical site of the lit building.

    " + } + }, + "Locations": { + "base": "

    A location is a network facility where AWS Direct Connect routers are available to be connected. Generally, these are colocation hubs where many network providers have equipment, and where cross connects can be delivered. Locations include a name and facility code, and must be provided when creating a connection.

    ", + "refs": { + } + }, + "NewPrivateVirtualInterface": { + "base": "

    A structure containing information about a new private virtual interface.

    ", + "refs": { + "CreatePrivateVirtualInterfaceRequest$newPrivateVirtualInterface": "

    Detailed information for the private virtual interface to be created.

    Default: None

    " + } + }, + "NewPrivateVirtualInterfaceAllocation": { + "base": "

    A structure containing information about a private virtual interface that will be provisioned on a connection.

    ", + "refs": { + "AllocatePrivateVirtualInterfaceRequest$newPrivateVirtualInterfaceAllocation": "

    Detailed information for the private virtual interface to be provisioned.

    Default: None

    " + } + }, + "NewPublicVirtualInterface": { + "base": "

    A structure containing information about a new public virtual interface.

    ", + "refs": { + "CreatePublicVirtualInterfaceRequest$newPublicVirtualInterface": "

    Detailed information for the public virtual interface to be created.

    Default: None

    " + } + }, + "NewPublicVirtualInterfaceAllocation": { + "base": "

    A structure containing information about a public virtual interface that will be provisioned on a connection.

    ", + "refs": { + "AllocatePublicVirtualInterfaceRequest$newPublicVirtualInterfaceAllocation": "

    Detailed information for the public virtual interface to be provisioned.

    Default: None

    " + } + }, + "OwnerAccount": { + "base": null, + "refs": { + "AllocateConnectionOnInterconnectRequest$ownerAccount": "

    Numeric account Id of the customer for whom the connection will be provisioned.

    Example: 123443215678

    Default: None

    ", + "AllocatePrivateVirtualInterfaceRequest$ownerAccount": "

    The AWS account that will own the new private virtual interface.

    Default: None

    ", + "AllocatePublicVirtualInterfaceRequest$ownerAccount": "

    The AWS account that will own the new public virtual interface.

    Default: None

    ", + "Connection$ownerAccount": "

    The AWS account that will own the new connection.

    ", + "VirtualInterface$ownerAccount": "

    The AWS account that will own the new virtual interface.

    " + } + }, + "PartnerName": { + "base": null, + "refs": { + "Connection$partnerName": "

    The name of the AWS Direct Connect service provider associated with the connection.

    " + } + }, + "ProviderName": { + "base": null, + "refs": { + "DescribeConnectionLoaRequest$providerName": "

    The name of the APN partner or service provider who establishes connectivity on your behalf. If you supply this parameter, the LOA-CFA lists the provider name alongside your company name as the requester of the cross connect.

    Default: None

    ", + "DescribeInterconnectLoaRequest$providerName": "

    The name of the service provider who establishes connectivity on your behalf. If you supply this parameter, the LOA-CFA lists the provider name alongside your company name as the requester of the cross connect.

    Default: None

    " + } + }, + "Region": { + "base": "

    The AWS region where the connection is located.

    Example: us-east-1

    Default: None

    ", + "refs": { + "Connection$region": null, + "Interconnect$region": null + } + }, + "RouteFilterPrefix": { + "base": "

    A route filter prefix that the customer can advertise through Border Gateway Protocol (BGP) over a public virtual interface.

    ", + "refs": { + "RouteFilterPrefixList$member": null + } + }, + "RouteFilterPrefixList": { + "base": "

    A list of routes to be advertised to the AWS network in this region (public virtual interface).

    ", + "refs": { + "NewPublicVirtualInterface$routeFilterPrefixes": null, + "NewPublicVirtualInterfaceAllocation$routeFilterPrefixes": null, + "VirtualInterface$routeFilterPrefixes": null + } + }, + "RouterConfig": { + "base": null, + "refs": { + "VirtualInterface$customerRouterConfig": "

    Information for generating the customer router configuration.

    " + } + }, + "VLAN": { + "base": "

    The VLAN ID.

    Example: 101

    ", + "refs": { + "AllocateConnectionOnInterconnectRequest$vlan": "

    The dedicated VLAN provisioned to the connection.

    Example: 101

    Default: None

    ", + "Connection$vlan": null, + "NewPrivateVirtualInterface$vlan": null, + "NewPrivateVirtualInterfaceAllocation$vlan": null, + "NewPublicVirtualInterface$vlan": null, + "NewPublicVirtualInterfaceAllocation$vlan": null, + "VirtualInterface$vlan": null + } + }, + "VirtualGateway": { + "base": "

    You can create one or more AWS Direct Connect private virtual interfaces linking to your virtual private gateway.

    Virtual private gateways can be managed using the Amazon Virtual Private Cloud (Amazon VPC) console or the Amazon EC2 CreateVpnGateway action.

    ", + "refs": { + "VirtualGatewayList$member": null + } + }, + "VirtualGatewayId": { + "base": "

    The ID of the virtual private gateway to a VPC. This only applies to private virtual interfaces.

    Example: vgw-123er56

    ", + "refs": { + "ConfirmPrivateVirtualInterfaceRequest$virtualGatewayId": "

    ID of the virtual private gateway that will be attached to the virtual interface.

    A virtual private gateway can be managed via the Amazon Virtual Private Cloud (VPC) console or the EC2 CreateVpnGateway action.

    Default: None

    ", + "NewPrivateVirtualInterface$virtualGatewayId": null, + "VirtualGateway$virtualGatewayId": null, + "VirtualInterface$virtualGatewayId": null + } + }, + "VirtualGatewayList": { + "base": "

    A list of virtual private gateways.

    ", + "refs": { + "VirtualGateways$virtualGateways": "

    A list of virtual private gateways.

    " + } + }, + "VirtualGatewayState": { + "base": "

    State of the virtual private gateway.

    • Pending: This is the initial state after calling CreateVpnGateway.

    • Available: Ready for use by a private virtual interface.

    • Deleting: This is the initial state after calling DeleteVpnGateway.

    • Deleted: In this state, a private virtual interface is unable to send traffic over this gateway.

    ", + "refs": { + "VirtualGateway$virtualGatewayState": null + } + }, + "VirtualGateways": { + "base": "

    A structure containing a list of virtual private gateways.

    ", + "refs": { + } + }, + "VirtualInterface": { + "base": "

    A virtual interface (VLAN) transmits the traffic between the AWS Direct Connect location and the customer.

    ", + "refs": { + "VirtualInterfaceList$member": null + } + }, + "VirtualInterfaceId": { + "base": "

    ID of the virtual interface.

    Example: dxvif-123dfg56

    Default: None

    ", + "refs": { + "ConfirmPrivateVirtualInterfaceRequest$virtualInterfaceId": null, + "ConfirmPublicVirtualInterfaceRequest$virtualInterfaceId": null, + "DeleteVirtualInterfaceRequest$virtualInterfaceId": null, + "DescribeVirtualInterfacesRequest$virtualInterfaceId": null, + "VirtualInterface$virtualInterfaceId": null + } + }, + "VirtualInterfaceList": { + "base": "

    A list of virtual interfaces.

    ", + "refs": { + "VirtualInterfaces$virtualInterfaces": "

    A list of virtual interfaces.

    " + } + }, + "VirtualInterfaceName": { + "base": "

    The name of the virtual interface assigned by the customer.

    Example: \"My VPC\"

    ", + "refs": { + "NewPrivateVirtualInterface$virtualInterfaceName": null, + "NewPrivateVirtualInterfaceAllocation$virtualInterfaceName": null, + "NewPublicVirtualInterface$virtualInterfaceName": null, + "NewPublicVirtualInterfaceAllocation$virtualInterfaceName": null, + "VirtualInterface$virtualInterfaceName": null + } + }, + "VirtualInterfaceState": { + "base": "

    State of the virtual interface.

    • Confirming: The creation of the virtual interface is pending confirmation from the virtual interface owner. If the owner of the virtual interface is different from the owner of the connection on which it is provisioned, then the virtual interface will remain in this state until it is confirmed by the virtual interface owner.

    • Verifying: This state only applies to public virtual interfaces. Each public virtual interface needs validation before the virtual interface can be created.

    • Pending: A virtual interface is in this state from the time that it is created until the virtual interface is ready to forward traffic.

    • Available: A virtual interface that is able to forward traffic.

    • Down: A virtual interface that is BGP down.

    • Deleting: A virtual interface is in this state immediately after calling DeleteVirtualInterface until it can no longer forward traffic.

    • Deleted: A virtual interface that cannot forward traffic.

    • Rejected: The virtual interface owner has declined creation of the virtual interface. If a virtual interface in the 'Confirming' state is deleted by the virtual interface owner, the virtual interface will enter the 'Rejected' state.

    ", + "refs": { + "ConfirmPrivateVirtualInterfaceResponse$virtualInterfaceState": null, + "ConfirmPublicVirtualInterfaceResponse$virtualInterfaceState": null, + "DeleteVirtualInterfaceResponse$virtualInterfaceState": null, + "VirtualInterface$virtualInterfaceState": null + } + }, + "VirtualInterfaceType": { + "base": "

    The type of virtual interface.

    Example: private (Amazon VPC) or public (Amazon S3, Amazon DynamoDB, and so on.)

    ", + "refs": { + "VirtualInterface$virtualInterfaceType": null + } + }, + "VirtualInterfaces": { + "base": "

    A structure containing a list of virtual interfaces.

    ", + "refs": { + } + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/directconnect/2012-10-25/examples-1.json b/vendor/github.com/aws/aws-sdk-go/models/apis/directconnect/2012-10-25/examples-1.json new file mode 100644 index 000000000..0ea7e3b0b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/directconnect/2012-10-25/examples-1.json @@ -0,0 +1,5 @@ +{ + "version": "1.0", + "examples": { + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/directconnect/2012-10-25/paginators-1.json b/vendor/github.com/aws/aws-sdk-go/models/apis/directconnect/2012-10-25/paginators-1.json new file mode 100644 index 000000000..cc93bedd4 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/directconnect/2012-10-25/paginators-1.json @@ -0,0 +1,22 @@ +{ + "pagination": { + "DescribeConnections": { + "result_key": "connections" + }, + "DescribeConnectionsOnInterconnect": { + "result_key": "connections" + }, + "DescribeInterconnects": { + "result_key": "interconnects" + }, + "DescribeLocations": { + "result_key": "locations" + }, + "DescribeVirtualGateways": { + "result_key": "virtualGateways" + }, + "DescribeVirtualInterfaces": { + "result_key": "virtualInterfaces" + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/discovery/2015-11-01/api-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/discovery/2015-11-01/api-2.json new file mode 100644 index 000000000..ce590fd67 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/discovery/2015-11-01/api-2.json @@ -0,0 +1,556 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2015-11-01", + "endpointPrefix":"discovery", + "jsonVersion":"1.1", + "protocol":"json", + "serviceFullName":"AWS Application Discovery Service", + "signatureVersion":"v4", + "targetPrefix":"AWSPoseidonService_V2015_11_01" + }, + "operations":{ + "CreateTags":{ + "name":"CreateTags", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateTagsRequest"}, + "output":{"shape":"CreateTagsResponse"}, + "errors":[ + {"shape":"AuthorizationErrorException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidParameterException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"ServerInternalErrorException"} + ] + }, + "DeleteTags":{ + "name":"DeleteTags", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteTagsRequest"}, + "output":{"shape":"DeleteTagsResponse"}, + "errors":[ + {"shape":"AuthorizationErrorException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidParameterException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"ServerInternalErrorException"} + ] + }, + "DescribeAgents":{ + "name":"DescribeAgents", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeAgentsRequest"}, + "output":{"shape":"DescribeAgentsResponse"}, + "errors":[ + {"shape":"AuthorizationErrorException"}, + {"shape":"InvalidParameterException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"ServerInternalErrorException"} + ] + }, + "DescribeConfigurations":{ + "name":"DescribeConfigurations", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeConfigurationsRequest"}, + "output":{"shape":"DescribeConfigurationsResponse"}, + "errors":[ + {"shape":"AuthorizationErrorException"}, + {"shape":"InvalidParameterException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"ServerInternalErrorException"} + ] + }, + "DescribeExportConfigurations":{ + "name":"DescribeExportConfigurations", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeExportConfigurationsRequest"}, + "output":{"shape":"DescribeExportConfigurationsResponse"}, + "errors":[ + {"shape":"AuthorizationErrorException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidParameterException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"ServerInternalErrorException"} + ] + }, + "DescribeTags":{ + "name":"DescribeTags", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeTagsRequest"}, + "output":{"shape":"DescribeTagsResponse"}, + "errors":[ + {"shape":"AuthorizationErrorException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidParameterException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"ServerInternalErrorException"} + ] + }, + "ExportConfigurations":{ + "name":"ExportConfigurations", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "output":{"shape":"ExportConfigurationsResponse"}, + "errors":[ + {"shape":"AuthorizationErrorException"}, + {"shape":"InvalidParameterException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"ServerInternalErrorException"}, + {"shape":"OperationNotPermittedException"} + ] + }, + "ListConfigurations":{ + "name":"ListConfigurations", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListConfigurationsRequest"}, + "output":{"shape":"ListConfigurationsResponse"}, + "errors":[ + {"shape":"AuthorizationErrorException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidParameterException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"ServerInternalErrorException"} + ] + }, + "StartDataCollectionByAgentIds":{ + "name":"StartDataCollectionByAgentIds", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StartDataCollectionByAgentIdsRequest"}, + "output":{"shape":"StartDataCollectionByAgentIdsResponse"}, + "errors":[ + {"shape":"AuthorizationErrorException"}, + {"shape":"InvalidParameterException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"ServerInternalErrorException"} + ] + }, + "StopDataCollectionByAgentIds":{ + "name":"StopDataCollectionByAgentIds", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StopDataCollectionByAgentIdsRequest"}, + "output":{"shape":"StopDataCollectionByAgentIdsResponse"}, + "errors":[ + {"shape":"AuthorizationErrorException"}, + {"shape":"InvalidParameterException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"ServerInternalErrorException"} + ] + } + }, + "shapes":{ + "AgentConfigurationStatus":{ + "type":"structure", + "members":{ + "agentId":{"shape":"String"}, + "operationSucceeded":{"shape":"Boolean"}, + "description":{"shape":"String"} + } + }, + "AgentConfigurationStatusList":{ + "type":"list", + "member":{"shape":"AgentConfigurationStatus"} + }, + "AgentId":{"type":"string"}, + "AgentIds":{ + "type":"list", + "member":{"shape":"AgentId"} + }, + "AgentInfo":{ + "type":"structure", + "members":{ + "agentId":{"shape":"AgentId"}, + "hostName":{"shape":"String"}, + "agentNetworkInfoList":{"shape":"AgentNetworkInfoList"}, + "connectorId":{"shape":"String"}, + "version":{"shape":"String"}, + "health":{"shape":"AgentStatus"} + } + }, + "AgentNetworkInfo":{ + "type":"structure", + "members":{ + "ipAddress":{"shape":"String"}, + "macAddress":{"shape":"String"} + } + }, + "AgentNetworkInfoList":{ + "type":"list", + "member":{"shape":"AgentNetworkInfo"} + }, + "AgentStatus":{ + "type":"string", + "enum":[ + "HEALTHY", + "UNHEALTHY", + "RUNNING", + "UNKNOWN", + "BLACKLISTED", + "SHUTDOWN" + ] + }, + "AgentsInfo":{ + "type":"list", + "member":{"shape":"AgentInfo"} + }, + "AuthorizationErrorException":{ + "type":"structure", + "members":{ + "message":{"shape":"Message"} + }, + "exception":true + }, + "Boolean":{"type":"boolean"}, + "Condition":{"type":"string"}, + "Configuration":{ + "type":"map", + "key":{"shape":"String"}, + "value":{"shape":"String"} + }, + "ConfigurationId":{"type":"string"}, + "ConfigurationIdList":{ + "type":"list", + "member":{"shape":"ConfigurationId"} + }, + "ConfigurationItemType":{ + "type":"string", + "enum":[ + "SERVER", + "PROCESS", + "CONNECTION" + ] + }, + "ConfigurationTag":{ + "type":"structure", + "members":{ + "configurationType":{"shape":"ConfigurationItemType"}, + "configurationId":{"shape":"ConfigurationId"}, + "key":{"shape":"TagKey"}, + "value":{"shape":"TagValue"}, + "timeOfCreation":{"shape":"TimeStamp"} + } + }, + "ConfigurationTagSet":{ + "type":"list", + "member":{ + "shape":"ConfigurationTag", + "locationName":"item" + } + }, + "Configurations":{ + "type":"list", + "member":{"shape":"Configuration"} + }, + "ConfigurationsDownloadUrl":{"type":"string"}, + "ConfigurationsExportId":{"type":"string"}, + "CreateTagsRequest":{ + "type":"structure", + "required":[ + "configurationIds", + "tags" + ], + "members":{ + "configurationIds":{"shape":"ConfigurationIdList"}, + "tags":{"shape":"TagSet"} + } + }, + "CreateTagsResponse":{ + "type":"structure", + "members":{ + } + }, + "DeleteTagsRequest":{ + "type":"structure", + "required":["configurationIds"], + "members":{ + "configurationIds":{"shape":"ConfigurationIdList"}, + "tags":{"shape":"TagSet"} + } + }, + "DeleteTagsResponse":{ + "type":"structure", + "members":{ + } + }, + "DescribeAgentsRequest":{ + "type":"structure", + "members":{ + "agentIds":{"shape":"AgentIds"}, + "maxResults":{"shape":"Integer"}, + "nextToken":{"shape":"NextToken"} + } + }, + "DescribeAgentsResponse":{ + "type":"structure", + "members":{ + "agentsInfo":{"shape":"AgentsInfo"}, + "nextToken":{"shape":"NextToken"} + } + }, + "DescribeConfigurationsAttribute":{ + "type":"map", + "key":{"shape":"String"}, + "value":{"shape":"String"} + }, + "DescribeConfigurationsAttributes":{ + "type":"list", + "member":{"shape":"DescribeConfigurationsAttribute"} + }, + "DescribeConfigurationsRequest":{ + "type":"structure", + "required":["configurationIds"], + "members":{ + "configurationIds":{"shape":"ConfigurationIdList"} + } + }, + "DescribeConfigurationsResponse":{ + "type":"structure", + "members":{ + "configurations":{"shape":"DescribeConfigurationsAttributes"} + } + }, + "DescribeExportConfigurationsRequest":{ + "type":"structure", + "members":{ + "exportIds":{"shape":"ExportIds"}, + "maxResults":{"shape":"Integer"}, + "nextToken":{"shape":"NextToken"} + } + }, + "DescribeExportConfigurationsResponse":{ + "type":"structure", + "members":{ + "exportsInfo":{"shape":"ExportsInfo"}, + "nextToken":{"shape":"NextToken"} + } + }, + "DescribeTagsRequest":{ + "type":"structure", + "members":{ + "filters":{"shape":"TagFilters"}, + "maxResults":{"shape":"Integer"}, + "nextToken":{"shape":"NextToken"} + } + }, + "DescribeTagsResponse":{ + "type":"structure", + "members":{ + "tags":{"shape":"ConfigurationTagSet"}, + "nextToken":{"shape":"NextToken"} + } + }, + "ExportConfigurationsResponse":{ + "type":"structure", + "members":{ + "exportId":{"shape":"ConfigurationsExportId"} + } + }, + "ExportIds":{ + "type":"list", + "member":{"shape":"ConfigurationsExportId"} + }, + "ExportInfo":{ + "type":"structure", + "required":[ + "exportId", + "exportStatus", + "statusMessage", + "exportRequestTime" + ], + "members":{ + "exportId":{"shape":"ConfigurationsExportId"}, + "exportStatus":{"shape":"ExportStatus"}, + "statusMessage":{"shape":"ExportStatusMessage"}, + "configurationsDownloadUrl":{"shape":"ConfigurationsDownloadUrl"}, + "exportRequestTime":{"shape":"ExportRequestTime"} + } + }, + "ExportRequestTime":{"type":"timestamp"}, + "ExportStatus":{ + "type":"string", + "enum":[ + "FAILED", + "SUCCEEDED", + "IN_PROGRESS" + ] + }, + "ExportStatusMessage":{"type":"string"}, + "ExportsInfo":{ + "type":"list", + "member":{"shape":"ExportInfo"} + }, + "Filter":{ + "type":"structure", + "required":[ + "name", + "values", + "condition" + ], + "members":{ + "name":{"shape":"String"}, + "values":{"shape":"FilterValues"}, + "condition":{"shape":"Condition"} + } + }, + "FilterName":{"type":"string"}, + "FilterValue":{"type":"string"}, + "FilterValues":{ + "type":"list", + "member":{ + "shape":"FilterValue", + "locationName":"item" + } + }, + "Filters":{ + "type":"list", + "member":{"shape":"Filter"} + }, + "Integer":{"type":"integer"}, + "InvalidParameterException":{ + "type":"structure", + "members":{ + "message":{"shape":"Message"} + }, + "exception":true + }, + "InvalidParameterValueException":{ + "type":"structure", + "members":{ + "message":{"shape":"Message"} + }, + "exception":true + }, + "ListConfigurationsRequest":{ + "type":"structure", + "required":["configurationType"], + "members":{ + "configurationType":{"shape":"ConfigurationItemType"}, + "filters":{"shape":"Filters"}, + "maxResults":{"shape":"Integer"}, + "nextToken":{"shape":"NextToken"} + } + }, + "ListConfigurationsResponse":{ + "type":"structure", + "members":{ + "configurations":{"shape":"Configurations"}, + "nextToken":{"shape":"NextToken"} + } + }, + "Message":{"type":"string"}, + "NextToken":{"type":"string"}, + "OperationNotPermittedException":{ + "type":"structure", + "members":{ + "message":{"shape":"Message"} + }, + "exception":true + }, + "ResourceNotFoundException":{ + "type":"structure", + "members":{ + "message":{"shape":"Message"} + }, + "exception":true + }, + "ServerInternalErrorException":{ + "type":"structure", + "members":{ + "message":{"shape":"Message"} + }, + "exception":true, + "fault":true + }, + "StartDataCollectionByAgentIdsRequest":{ + "type":"structure", + "required":["agentIds"], + "members":{ + "agentIds":{"shape":"AgentIds"} + } + }, + "StartDataCollectionByAgentIdsResponse":{ + "type":"structure", + "members":{ + "agentsConfigurationStatus":{"shape":"AgentConfigurationStatusList"} + } + }, + "StopDataCollectionByAgentIdsRequest":{ + "type":"structure", + "required":["agentIds"], + "members":{ + "agentIds":{"shape":"AgentIds"} + } + }, + "StopDataCollectionByAgentIdsResponse":{ + "type":"structure", + "members":{ + "agentsConfigurationStatus":{"shape":"AgentConfigurationStatusList"} + } + }, + "String":{"type":"string"}, + "Tag":{ + "type":"structure", + "required":[ + "key", + "value" + ], + "members":{ + "key":{"shape":"TagKey"}, + "value":{"shape":"TagValue"} + } + }, + "TagFilter":{ + "type":"structure", + "required":[ + "name", + "values" + ], + "members":{ + "name":{"shape":"FilterName"}, + "values":{"shape":"FilterValues"} + } + }, + "TagFilters":{ + "type":"list", + "member":{"shape":"TagFilter"} + }, + "TagKey":{"type":"string"}, + "TagSet":{ + "type":"list", + "member":{ + "shape":"Tag", + "locationName":"item" + } + }, + "TagValue":{"type":"string"}, + "TimeStamp":{"type":"timestamp"} + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/discovery/2015-11-01/docs-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/discovery/2015-11-01/docs-2.json new file mode 100644 index 000000000..cbb9bf5a9 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/discovery/2015-11-01/docs-2.json @@ -0,0 +1,447 @@ +{ + "version": "2.0", + "service": "

    The AWS Application Discovery Service helps Systems Integrators quickly and reliably plan application migration projects by automatically identifying applications running in on-premises data centers, their associated dependencies, and their performance profile.

    Planning data center migrations can involve thousands of workloads that are often deeply interdependent. Application discovery and dependency mapping are important early first steps in the migration process, but difficult to perform at scale due to the lack of automated tools.

    The AWS Application Discovery Service automatically collects configuration and usage data from servers to develop a list of applications, how they perform, and how they are interdependent. This information is securely retained in an AWS Application Discovery Service database which you can export as a CSV file into your preferred visualization tool or cloud migration solution to help reduce the complexity and time in planning your cloud migration.

    The Application Discovery Service is currently available for preview. Only customers who are engaged with AWS Professional Services or a certified AWS partner can use the service. To see the list of certified partners and request access to the Application Discovery Service, complete the following preview form.

    This API reference provides descriptions, syntax, and usage examples for each of the actions and data types for the Discovery Service. The topic for each action shows the API request parameters and the response. Alternatively, you can use one of the AWS SDKs to access an API that is tailored to the programming language or platform that you're using. For more information, see AWS SDKs.

    This guide is intended for use with the AWS Discovery Service User Guide .

    The following are short descriptions of each API action, organized by function.

    Managing AWS Agents Using the Application Discovery Service

    An AWS agent is software that you install on on-premises servers and virtual machines that are targeted for discovery and migration. Agents run on Linux and Windows Server and collect server configuration and activity information about your applications and infrastructure. Specifically, agents collect the following information and send it to the Application Discovery Service using Secure Sockets Layer (SSL) encryption:

    • User information (user name, home directory)

    • Group information (name)

    • List of installed packages

    • List of kernel modules

    • All create and stop process events

    • DNS queries

    • NIC information

    • TCP/UDP process listening ports

    • TCPV4/V6 connections

    • Operating system information

    • System performance

    • Process performance

    The Application Discovery Service API includes the following actions to manage AWS agents:

    • StartDataCollectionByAgentIds: Instructs the specified agents to start collecting data. The Application Discovery Service takes several minutes to receive and process data after you initiate data collection.

    • StopDataCollectionByAgentIds: Instructs the specified agents to stop collecting data.

    • DescribeAgents: Lists AWS agents by ID or lists all agents associated with your user account if you did not specify an agent ID. The output includes agent IDs, IP addresses, media access control (MAC) addresses, agent health, host name where the agent resides, and the version number of each agent.

    Querying Configuration Items

    A configuration item is an IT asset that was discovered in your data center by an AWS agent. When you use the Application Discovery Service, you can specify filters and query specific configuration items. The service supports Server, Process, and Connection configuration items. This means you can specify a value for the following keys and query your IT assets:

    Server

    • server.HostName

    • server.osName

    • server.osVersion

    • server.configurationId

    • server.agentId

    Process

    • process.name

    • process.CommandLine

    • process.configurationId

    • server.hostName

    • server.osName

    • server.osVersion

    • server.configurationId

    • server.agentId

    Connection

    • connection.sourceIp

    • connection.sourcePort

    • connection.destinationIp

    • connection.destinationPort

    • sourceProcess.configurationId

    • sourceProcess.commandLine

    • sourceProcess.name

    • destinationProcessId.configurationId

    • destinationProcess.commandLine

    • destinationProcess.name

    • sourceServer.configurationId

    • sourceServer.hostName

    • sourceServer.osName

    • sourceServer.osVersion

    • destinationServer.configurationId

    • destinationServer.hostName

    • destinationServer.osName

    • destinationServer.osVersion

    The Application Discovery Service includes the following actions for querying configuration items.

    • DescribeConfigurations: Retrieves a list of attributes for a specific configuration ID. For example, the output for a server configuration item includes a list of attributes about the server, including host name, operating system, number of network cards, etc.

    • ListConfigurations: Retrieves a list of configuration items according to the criteria you specify in a filter. The filter criteria identify relationship requirements. For example, you can specify filter criteria of process.name with values of nginx and apache.

    Tagging Discovered Configuration Items

    You can tag discovered configuration items. Tags are metadata that help you categorize IT assets in your data center. Tags use a key-value format. For example, {\"key\": \"serverType\", \"value\": \"webServer\"}.

    • CreateTags: Creates one or more tags for a configuration items.

    • DescribeTags: Retrieves a list of configuration items that are tagged with a specific tag. Or, retrieves a list of all tags assigned to a specific configuration item.

    • DeleteTags: Deletes the association between a configuration item and one or more tags.

    Exporting Data

    You can export data as a CSV file to an Amazon S3 bucket or into your preferred visualization tool or cloud migration solution to help reduce the complexity and time in planning your cloud migration.

    • ExportConfigurations: Exports all discovered configuration data to an Amazon S3 bucket. Data includes tags and tag associations, processes, connections, servers, and system performance. This API returns an export ID which you can query using the GetExportStatus API.

    • DescribeExportConfigurations: Gets the status of the data export. When the export is complete, the service returns an Amazon S3 URL where you can download CSV files that include the data.

    ", + "operations": { + "CreateTags": "

    Creates one or more tags for configuration items. Tags are metadata that help you categorize IT assets. This API accepts a list of multiple configuration items.

    ", + "DeleteTags": "

    Deletes the association between configuration items and one or more tags. This API accepts a list of multiple configuration items.

    ", + "DescribeAgents": "

    Lists AWS agents by ID or lists all agents associated with your user account if you did not specify an agent ID.

    ", + "DescribeConfigurations": "

    Retrieves a list of attributes for a specific configuration ID. For example, the output for a server configuration item includes a list of attributes about the server, including host name, operating system, number of network cards, etc.

    ", + "DescribeExportConfigurations": "

    Retrieves the status of a given export process. You can retrieve status from a maximum of 100 processes.

    ", + "DescribeTags": "

    Retrieves a list of configuration items that are tagged with a specific tag. Or retrieves a list of all tags assigned to a specific configuration item.

    ", + "ExportConfigurations": "

    Exports all discovered configuration data to an Amazon S3 bucket or an application that enables you to view and evaluate the data. Data includes tags and tag associations, processes, connections, servers, and system performance. This API returns an export ID which you can query using the GetExportStatus API. The system imposes a limit of two configuration exports in six hours.

    ", + "ListConfigurations": "

    Retrieves a list of configurations items according to the criteria you specify in a filter. The filter criteria identify relationship requirements.

    ", + "StartDataCollectionByAgentIds": "

    Instructs the specified agents to start collecting data. Agents can reside on host servers or virtual machines in your data center.

    ", + "StopDataCollectionByAgentIds": "

    Instructs the specified agents to stop collecting data.

    " + }, + "shapes": { + "AgentConfigurationStatus": { + "base": "

    Information about agents that were instructed to start collecting data. Information includes the agent ID, a description of the operation, and whether or not the agent configuration was updated.

    ", + "refs": { + "AgentConfigurationStatusList$member": null + } + }, + "AgentConfigurationStatusList": { + "base": null, + "refs": { + "StartDataCollectionByAgentIdsResponse$agentsConfigurationStatus": "

    Information about agents that were instructed to start collecting data. Information includes the agent ID, a description of the operation performed, and whether or not the agent configuration was updated.

    ", + "StopDataCollectionByAgentIdsResponse$agentsConfigurationStatus": "

    Information about agents that were instructed to stop collecting data. Information includes the agent ID, a description of the operation performed, and whether or not the agent configuration was updated.

    " + } + }, + "AgentId": { + "base": null, + "refs": { + "AgentIds$member": null, + "AgentInfo$agentId": "

    The agent ID.

    " + } + }, + "AgentIds": { + "base": null, + "refs": { + "DescribeAgentsRequest$agentIds": "

    The agent IDs for which you want information. If you specify no IDs, the system returns information about all agents associated with your AWS user account.

    ", + "StartDataCollectionByAgentIdsRequest$agentIds": "

    The IDs of the agents that you want to start collecting data. If you send a request to an AWS agent ID that you do not have permission to contact, according to your AWS account, the service does not throw an exception. Instead, it returns the error in the Description field. If you send a request to multiple agents and you do not have permission to contact some of those agents, the system does not throw an exception. Instead, the system shows Failed in the Description field.

    ", + "StopDataCollectionByAgentIdsRequest$agentIds": "

    The IDs of the agents that you want to stop collecting data.

    " + } + }, + "AgentInfo": { + "base": "

    Information about agents associated with the user’s AWS account. Information includes agent IDs, IP addresses, media access control (MAC) addresses, agent health, hostname where the agent resides, and agent version for each agent.

    ", + "refs": { + "AgentsInfo$member": null + } + }, + "AgentNetworkInfo": { + "base": "

    Network details about the host where the agent resides.

    ", + "refs": { + "AgentNetworkInfoList$member": null + } + }, + "AgentNetworkInfoList": { + "base": null, + "refs": { + "AgentInfo$agentNetworkInfoList": "

    Network details about the host where the agent resides.

    " + } + }, + "AgentStatus": { + "base": null, + "refs": { + "AgentInfo$health": "

    The health of the agent.

    " + } + }, + "AgentsInfo": { + "base": null, + "refs": { + "DescribeAgentsResponse$agentsInfo": "

    Lists AWS agents by ID or lists all agents associated with your user account if you did not specify an agent ID. The output includes agent IDs, IP addresses, media access control (MAC) addresses, agent health, host name where the agent resides, and the version number of each agent.

    " + } + }, + "AuthorizationErrorException": { + "base": "

    The AWS user account does not have permission to perform the action. Check the IAM policy associated with this account.

    ", + "refs": { + } + }, + "Boolean": { + "base": null, + "refs": { + "AgentConfigurationStatus$operationSucceeded": "

    Information about the status of the StartDataCollection and StopDataCollection operations. The system has recorded the data collection operation. The agent receives this command the next time it polls for a new command.

    " + } + }, + "Condition": { + "base": null, + "refs": { + "Filter$condition": "

    A conditional operator. The following operators are valid: EQUALS, NOT_EQUALS, CONTAINS, NOT_CONTAINS. If you specify multiple filters, the system utilizes all filters as though concatenated by AND. If you specify multiple values for a particular filter, the system differentiates the values using OR. Calling either DescribeConfigurations or ListConfigurations returns attributes of matching configuration items.

    " + } + }, + "Configuration": { + "base": null, + "refs": { + "Configurations$member": null + } + }, + "ConfigurationId": { + "base": null, + "refs": { + "ConfigurationIdList$member": null, + "ConfigurationTag$configurationId": "

    The configuration ID for the item you want to tag. You can specify a list of keys and values.

    " + } + }, + "ConfigurationIdList": { + "base": null, + "refs": { + "CreateTagsRequest$configurationIds": "

    A list of configuration items that you want to tag.

    ", + "DeleteTagsRequest$configurationIds": "

    A list of configuration items with tags that you want to delete.

    ", + "DescribeConfigurationsRequest$configurationIds": "

    One or more configuration IDs.

    " + } + }, + "ConfigurationItemType": { + "base": null, + "refs": { + "ConfigurationTag$configurationType": "

    A type of IT asset that you want to tag.

    ", + "ListConfigurationsRequest$configurationType": "

    A valid configuration identified by the Discovery Service.

    " + } + }, + "ConfigurationTag": { + "base": "

    Tags for a configuration item. Tags are metadata that help you categorize IT assets.

    ", + "refs": { + "ConfigurationTagSet$member": null + } + }, + "ConfigurationTagSet": { + "base": null, + "refs": { + "DescribeTagsResponse$tags": "

    Depending on the input, this is a list of configuration items tagged with a specific tag, or a list of tags for a specific configuration item.

    " + } + }, + "Configurations": { + "base": null, + "refs": { + "ListConfigurationsResponse$configurations": "

    Returns configuration details, including the configuration ID, attribute names, and attribute values.

    " + } + }, + "ConfigurationsDownloadUrl": { + "base": null, + "refs": { + "ExportInfo$configurationsDownloadUrl": "

    A URL for an Amazon S3 bucket where you can review the configuration data. The URL is displayed only if the export succeeded.

    " + } + }, + "ConfigurationsExportId": { + "base": null, + "refs": { + "ExportConfigurationsResponse$exportId": "

    A unique identifier that you can use to query the export status.

    ", + "ExportIds$member": null, + "ExportInfo$exportId": "

    A unique identifier that you can use to query the export.

    " + } + }, + "CreateTagsRequest": { + "base": null, + "refs": { + } + }, + "CreateTagsResponse": { + "base": null, + "refs": { + } + }, + "DeleteTagsRequest": { + "base": null, + "refs": { + } + }, + "DeleteTagsResponse": { + "base": null, + "refs": { + } + }, + "DescribeAgentsRequest": { + "base": null, + "refs": { + } + }, + "DescribeAgentsResponse": { + "base": null, + "refs": { + } + }, + "DescribeConfigurationsAttribute": { + "base": null, + "refs": { + "DescribeConfigurationsAttributes$member": null + } + }, + "DescribeConfigurationsAttributes": { + "base": null, + "refs": { + "DescribeConfigurationsResponse$configurations": "

    A key in the response map. The value is an array of data.

    " + } + }, + "DescribeConfigurationsRequest": { + "base": null, + "refs": { + } + }, + "DescribeConfigurationsResponse": { + "base": null, + "refs": { + } + }, + "DescribeExportConfigurationsRequest": { + "base": null, + "refs": { + } + }, + "DescribeExportConfigurationsResponse": { + "base": null, + "refs": { + } + }, + "DescribeTagsRequest": { + "base": null, + "refs": { + } + }, + "DescribeTagsResponse": { + "base": null, + "refs": { + } + }, + "ExportConfigurationsResponse": { + "base": null, + "refs": { + } + }, + "ExportIds": { + "base": null, + "refs": { + "DescribeExportConfigurationsRequest$exportIds": "

    A unique identifier that you can use to query the export status.

    " + } + }, + "ExportInfo": { + "base": "

    Information regarding the export status of the discovered data. The value is an array of objects.

    ", + "refs": { + "ExportsInfo$member": null + } + }, + "ExportRequestTime": { + "base": null, + "refs": { + "ExportInfo$exportRequestTime": "

    The time the configuration data export was initiated.

    " + } + }, + "ExportStatus": { + "base": null, + "refs": { + "ExportInfo$exportStatus": "

    The status of the configuration data export. The status can succeed, fail, or be in-progress.

    " + } + }, + "ExportStatusMessage": { + "base": null, + "refs": { + "ExportInfo$statusMessage": "

    Helpful status messages for API callers. For example: Too many exports in the last 6 hours. Export in progress. Export was successful.

    " + } + }, + "ExportsInfo": { + "base": null, + "refs": { + "DescribeExportConfigurationsResponse$exportsInfo": "

    Returns export details. When the status is complete, the response includes a URL for an Amazon S3 bucket where you can view the data in a CSV file.

    " + } + }, + "Filter": { + "base": "

    A filter that can use conditional operators.

    ", + "refs": { + "Filters$member": null + } + }, + "FilterName": { + "base": null, + "refs": { + "TagFilter$name": "

    A name of a tag filter.

    " + } + }, + "FilterValue": { + "base": null, + "refs": { + "FilterValues$member": null + } + }, + "FilterValues": { + "base": null, + "refs": { + "Filter$values": "

    A string value that you want to filter on. For example, if you choose the destinationServer.osVersion filter name, you could specify Ubuntu for the value.

    ", + "TagFilter$values": "

    Values of a tag filter.

    " + } + }, + "Filters": { + "base": null, + "refs": { + "ListConfigurationsRequest$filters": "

    You can filter the list using a key-value format. For example:

    {\"key\": \"serverType\", \"value\": \"webServer\"}

    You can separate these items by using logical operators.

    " + } + }, + "Integer": { + "base": null, + "refs": { + "DescribeAgentsRequest$maxResults": "

    The total number of agents to return. The maximum value is 100.

    ", + "DescribeExportConfigurationsRequest$maxResults": "

    The maximum number of results that you want to display as a part of the query.

    ", + "DescribeTagsRequest$maxResults": "

    The total number of items to return. The maximum value is 100.

    ", + "ListConfigurationsRequest$maxResults": "

    The total number of items to return. The maximum value is 100.

    " + } + }, + "InvalidParameterException": { + "base": "

    One or more parameters are not valid. Verify the parameters and try again.

    ", + "refs": { + } + }, + "InvalidParameterValueException": { + "base": "

    The value of one or more parameters are either invalid or out of range. Verify the parameter values and try again.

    ", + "refs": { + } + }, + "ListConfigurationsRequest": { + "base": null, + "refs": { + } + }, + "ListConfigurationsResponse": { + "base": null, + "refs": { + } + }, + "Message": { + "base": null, + "refs": { + "AuthorizationErrorException$message": null, + "InvalidParameterException$message": null, + "InvalidParameterValueException$message": null, + "OperationNotPermittedException$message": null, + "ResourceNotFoundException$message": null, + "ServerInternalErrorException$message": null + } + }, + "NextToken": { + "base": null, + "refs": { + "DescribeAgentsRequest$nextToken": "

    A token to start the list. Use this token to get the next set of results.

    ", + "DescribeAgentsResponse$nextToken": "

    The call returns a token. Use this token to get the next set of results.

    ", + "DescribeExportConfigurationsRequest$nextToken": "

    A token to get the next set of results. For example, if you specified 100 IDs for DescribeConfigurationsRequest$configurationIds but set DescribeExportConfigurationsRequest$maxResults to 10, you will get results in a set of 10. Use the token in the query to get the next set of 10.

    ", + "DescribeExportConfigurationsResponse$nextToken": "

    A token to get the next set of results. For example, if you specified 100 IDs for DescribeConfigurationsRequest$configurationIds but set DescribeExportConfigurationsRequest$maxResults to 10, you will get results in a set of 10. Use the token in the query to get the next set of 10.

    ", + "DescribeTagsRequest$nextToken": "

    A token to start the list. Use this token to get the next set of results.

    ", + "DescribeTagsResponse$nextToken": "

    The call returns a token. Use this token to get the next set of results.

    ", + "ListConfigurationsRequest$nextToken": "

    A token to start the list. Use this token to get the next set of results.

    ", + "ListConfigurationsResponse$nextToken": "

    The call returns a token. Use this token to get the next set of results.

    " + } + }, + "OperationNotPermittedException": { + "base": "

    This operation is not permitted.

    ", + "refs": { + } + }, + "ResourceNotFoundException": { + "base": "

    The specified configuration ID was not located. Verify the configuration ID and try again.

    ", + "refs": { + } + }, + "ServerInternalErrorException": { + "base": "

    The server experienced an internal error. Try again.

    ", + "refs": { + } + }, + "StartDataCollectionByAgentIdsRequest": { + "base": null, + "refs": { + } + }, + "StartDataCollectionByAgentIdsResponse": { + "base": null, + "refs": { + } + }, + "StopDataCollectionByAgentIdsRequest": { + "base": null, + "refs": { + } + }, + "StopDataCollectionByAgentIdsResponse": { + "base": null, + "refs": { + } + }, + "String": { + "base": null, + "refs": { + "AgentConfigurationStatus$agentId": "

    The agent ID.

    ", + "AgentConfigurationStatus$description": "

    A description of the operation performed.

    ", + "AgentInfo$hostName": "

    The name of the host where the agent resides. The host can be a server or virtual machine.

    ", + "AgentInfo$connectorId": "

    This data type is currently not valid.

    ", + "AgentInfo$version": "

    The agent version.

    ", + "AgentNetworkInfo$ipAddress": "

    The IP address for the host where the agent resides.

    ", + "AgentNetworkInfo$macAddress": "

    The MAC address for the host where the agent resides.

    ", + "Configuration$key": null, + "Configuration$value": null, + "DescribeConfigurationsAttribute$key": null, + "DescribeConfigurationsAttribute$value": null, + "Filter$name": "

    The name of the filter. The following filter names are allowed for SERVER configuration items.

    Server

    • server.hostName

    • server.osName

    • server.osVersion

    • server.configurationid

    • server.agentid

    The name of the filter. The following filter names are allowed for PROCESS configuration items.

    Process

    • process.configurationid

    • process.name

    • process.commandLine

    • server.configurationid

    • server.hostName

    • server.osName

    • server.osVersion

    • server.agentId

    The name of the filter. The following filter names are allowed for CONNECTION configuration items.

    Connection

    • connection.sourceIp

    • connection.destinationIp

    • connection.destinationPort

    • sourceProcess.configurationId

    • sourceProcess.name

    • sourceProcess.commandLine

    • destinationProcess.configurationId

    • destinationProcess.name

    • destinationProcess.commandLine

    • sourceServer.configurationId

    • sourceServer.hostName

    • sourceServer.osName

    • sourceServer.osVersion

    • sourceServer.agentId

    • destinationServer.configurationId

    • destinationServer.hostName

    • destinationServer.osName

    • destinationServer.osVersion

    • destinationServer.agentId

    " + } + }, + "Tag": { + "base": "

    Metadata that help you categorize IT assets.

    ", + "refs": { + "TagSet$member": null + } + }, + "TagFilter": { + "base": "

    The name of a tag filter. Valid names are: tagKey, tagValue, configurationId.

    ", + "refs": { + "TagFilters$member": null + } + }, + "TagFilters": { + "base": null, + "refs": { + "DescribeTagsRequest$filters": "

    You can filter the list using a key-value format. You can separate these items by using logical operators. Allowed filters include tagKey, tagValue, and configurationId.

    " + } + }, + "TagKey": { + "base": null, + "refs": { + "ConfigurationTag$key": "

    A type of tag to filter on. For example, serverType.

    ", + "Tag$key": "

    A type of tag to filter on.

    " + } + }, + "TagSet": { + "base": null, + "refs": { + "CreateTagsRequest$tags": "

    Tags that you want to associate with one or more configuration items. Specify the tags that you want to create in a key-value format. For example:

    {\"key\": \"serverType\", \"value\": \"webServer\"}

    ", + "DeleteTagsRequest$tags": "

    Tags that you want to delete from one or more configuration items. Specify the tags that you want to delete in a key-value format. For example:

    {\"key\": \"serverType\", \"value\": \"webServer\"}

    " + } + }, + "TagValue": { + "base": null, + "refs": { + "ConfigurationTag$value": "

    A value to filter on. For example key = serverType and value = web server.

    ", + "Tag$value": "

    A value for a tag key to filter on.

    " + } + }, + "TimeStamp": { + "base": null, + "refs": { + "ConfigurationTag$timeOfCreation": "

    The time the configuration tag was created in Coordinated Universal Time (UTC).

    " + } + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/discovery/2015-11-01/examples-1.json b/vendor/github.com/aws/aws-sdk-go/models/apis/discovery/2015-11-01/examples-1.json new file mode 100644 index 000000000..0ea7e3b0b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/discovery/2015-11-01/examples-1.json @@ -0,0 +1,5 @@ +{ + "version": "1.0", + "examples": { + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/dms/2016-01-01/api-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/dms/2016-01-01/api-2.json new file mode 100644 index 000000000..6429f891f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/dms/2016-01-01/api-2.json @@ -0,0 +1,1325 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2016-01-01", + "endpointPrefix":"dms", + "jsonVersion":"1.1", + "protocol":"json", + "serviceFullName":"AWS Database Migration Service", + "signatureVersion":"v4", + "targetPrefix":"AmazonDMSv20160101" + }, + "operations":{ + "AddTagsToResource":{ + "name":"AddTagsToResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AddTagsToResourceMessage"}, + "output":{"shape":"AddTagsToResourceResponse"}, + "errors":[ + {"shape":"ResourceNotFoundFault"} + ] + }, + "CreateEndpoint":{ + "name":"CreateEndpoint", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateEndpointMessage"}, + "output":{"shape":"CreateEndpointResponse"}, + "errors":[ + {"shape":"KMSKeyNotAccessibleFault"}, + {"shape":"ResourceAlreadyExistsFault"}, + {"shape":"ResourceQuotaExceededFault"}, + {"shape":"InvalidResourceStateFault"}, + {"shape":"ResourceNotFoundFault"} + ] + }, + "CreateReplicationInstance":{ + "name":"CreateReplicationInstance", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateReplicationInstanceMessage"}, + "output":{"shape":"CreateReplicationInstanceResponse"}, + "errors":[ + {"shape":"AccessDeniedFault"}, + {"shape":"ResourceAlreadyExistsFault"}, + {"shape":"InsufficientResourceCapacityFault"}, + {"shape":"ResourceQuotaExceededFault"}, + {"shape":"StorageQuotaExceededFault"}, + {"shape":"ResourceNotFoundFault"}, + {"shape":"ReplicationSubnetGroupDoesNotCoverEnoughAZs"}, + {"shape":"InvalidResourceStateFault"}, + {"shape":"InvalidSubnet"}, + {"shape":"KMSKeyNotAccessibleFault"} + ] + }, + "CreateReplicationSubnetGroup":{ + "name":"CreateReplicationSubnetGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateReplicationSubnetGroupMessage"}, + "output":{"shape":"CreateReplicationSubnetGroupResponse"}, + "errors":[ + {"shape":"AccessDeniedFault"}, + {"shape":"ResourceAlreadyExistsFault"}, + {"shape":"ResourceNotFoundFault"}, + {"shape":"ResourceQuotaExceededFault"}, + {"shape":"ReplicationSubnetGroupDoesNotCoverEnoughAZs"}, + {"shape":"InvalidSubnet"} + ] + }, + "CreateReplicationTask":{ + "name":"CreateReplicationTask", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateReplicationTaskMessage"}, + "output":{"shape":"CreateReplicationTaskResponse"}, + "errors":[ + {"shape":"InvalidResourceStateFault"}, + {"shape":"ResourceAlreadyExistsFault"}, + {"shape":"ResourceNotFoundFault"}, + {"shape":"KMSKeyNotAccessibleFault"}, + {"shape":"ResourceQuotaExceededFault"} + ] + }, + "DeleteEndpoint":{ + "name":"DeleteEndpoint", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteEndpointMessage"}, + "output":{"shape":"DeleteEndpointResponse"}, + "errors":[ + {"shape":"ResourceNotFoundFault"}, + {"shape":"InvalidResourceStateFault"} + ] + }, + "DeleteReplicationInstance":{ + "name":"DeleteReplicationInstance", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteReplicationInstanceMessage"}, + "output":{"shape":"DeleteReplicationInstanceResponse"}, + "errors":[ + {"shape":"InvalidResourceStateFault"}, + {"shape":"ResourceNotFoundFault"} + ] + }, + "DeleteReplicationSubnetGroup":{ + "name":"DeleteReplicationSubnetGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteReplicationSubnetGroupMessage"}, + "output":{"shape":"DeleteReplicationSubnetGroupResponse"}, + "errors":[ + {"shape":"InvalidResourceStateFault"}, + {"shape":"ResourceNotFoundFault"} + ] + }, + "DeleteReplicationTask":{ + "name":"DeleteReplicationTask", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteReplicationTaskMessage"}, + "output":{"shape":"DeleteReplicationTaskResponse"}, + "errors":[ + {"shape":"ResourceNotFoundFault"}, + {"shape":"InvalidResourceStateFault"} + ] + }, + "DescribeAccountAttributes":{ + "name":"DescribeAccountAttributes", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeAccountAttributesMessage"}, + "output":{"shape":"DescribeAccountAttributesResponse"} + }, + "DescribeConnections":{ + "name":"DescribeConnections", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeConnectionsMessage"}, + "output":{"shape":"DescribeConnectionsResponse"}, + "errors":[ + {"shape":"ResourceNotFoundFault"} + ] + }, + "DescribeEndpointTypes":{ + "name":"DescribeEndpointTypes", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeEndpointTypesMessage"}, + "output":{"shape":"DescribeEndpointTypesResponse"} + }, + "DescribeEndpoints":{ + "name":"DescribeEndpoints", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeEndpointsMessage"}, + "output":{"shape":"DescribeEndpointsResponse"}, + "errors":[ + {"shape":"ResourceNotFoundFault"} + ] + }, + "DescribeOrderableReplicationInstances":{ + "name":"DescribeOrderableReplicationInstances", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeOrderableReplicationInstancesMessage"}, + "output":{"shape":"DescribeOrderableReplicationInstancesResponse"} + }, + "DescribeRefreshSchemasStatus":{ + "name":"DescribeRefreshSchemasStatus", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeRefreshSchemasStatusMessage"}, + "output":{"shape":"DescribeRefreshSchemasStatusResponse"}, + "errors":[ + {"shape":"InvalidResourceStateFault"}, + {"shape":"ResourceNotFoundFault"} + ] + }, + "DescribeReplicationInstances":{ + "name":"DescribeReplicationInstances", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeReplicationInstancesMessage"}, + "output":{"shape":"DescribeReplicationInstancesResponse"}, + "errors":[ + {"shape":"ResourceNotFoundFault"} + ] + }, + "DescribeReplicationSubnetGroups":{ + "name":"DescribeReplicationSubnetGroups", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeReplicationSubnetGroupsMessage"}, + "output":{"shape":"DescribeReplicationSubnetGroupsResponse"}, + "errors":[ + {"shape":"ResourceNotFoundFault"} + ] + }, + "DescribeReplicationTasks":{ + "name":"DescribeReplicationTasks", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeReplicationTasksMessage"}, + "output":{"shape":"DescribeReplicationTasksResponse"}, + "errors":[ + {"shape":"ResourceNotFoundFault"} + ] + }, + "DescribeSchemas":{ + "name":"DescribeSchemas", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeSchemasMessage"}, + "output":{"shape":"DescribeSchemasResponse"}, + "errors":[ + {"shape":"InvalidResourceStateFault"}, + {"shape":"ResourceNotFoundFault"} + ] + }, + "DescribeTableStatistics":{ + "name":"DescribeTableStatistics", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeTableStatisticsMessage"}, + "output":{"shape":"DescribeTableStatisticsResponse"}, + "errors":[ + {"shape":"ResourceNotFoundFault"}, + {"shape":"InvalidResourceStateFault"} + ] + }, + "ListTagsForResource":{ + "name":"ListTagsForResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListTagsForResourceMessage"}, + "output":{"shape":"ListTagsForResourceResponse"}, + "errors":[ + {"shape":"ResourceNotFoundFault"} + ] + }, + "ModifyEndpoint":{ + "name":"ModifyEndpoint", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyEndpointMessage"}, + "output":{"shape":"ModifyEndpointResponse"}, + "errors":[ + {"shape":"InvalidResourceStateFault"}, + {"shape":"ResourceNotFoundFault"}, + {"shape":"ResourceAlreadyExistsFault"}, + {"shape":"KMSKeyNotAccessibleFault"} + ] + }, + "ModifyReplicationInstance":{ + "name":"ModifyReplicationInstance", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyReplicationInstanceMessage"}, + "output":{"shape":"ModifyReplicationInstanceResponse"}, + "errors":[ + {"shape":"InvalidResourceStateFault"}, + {"shape":"ResourceAlreadyExistsFault"}, + {"shape":"ResourceNotFoundFault"}, + {"shape":"InsufficientResourceCapacityFault"}, + {"shape":"StorageQuotaExceededFault"}, + {"shape":"UpgradeDependencyFailureFault"} + ] + }, + "ModifyReplicationSubnetGroup":{ + "name":"ModifyReplicationSubnetGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyReplicationSubnetGroupMessage"}, + "output":{"shape":"ModifyReplicationSubnetGroupResponse"}, + "errors":[ + {"shape":"ResourceNotFoundFault"}, + {"shape":"ResourceQuotaExceededFault"}, + {"shape":"SubnetAlreadyInUse"}, + {"shape":"ReplicationSubnetGroupDoesNotCoverEnoughAZs"}, + {"shape":"InvalidSubnet"} + ] + }, + "RefreshSchemas":{ + "name":"RefreshSchemas", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RefreshSchemasMessage"}, + "output":{"shape":"RefreshSchemasResponse"}, + "errors":[ + {"shape":"InvalidResourceStateFault"}, + {"shape":"ResourceNotFoundFault"}, + {"shape":"KMSKeyNotAccessibleFault"}, + {"shape":"ResourceQuotaExceededFault"} + ] + }, + "RemoveTagsFromResource":{ + "name":"RemoveTagsFromResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RemoveTagsFromResourceMessage"}, + "output":{"shape":"RemoveTagsFromResourceResponse"}, + "errors":[ + {"shape":"ResourceNotFoundFault"} + ] + }, + "StartReplicationTask":{ + "name":"StartReplicationTask", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StartReplicationTaskMessage"}, + "output":{"shape":"StartReplicationTaskResponse"}, + "errors":[ + {"shape":"ResourceNotFoundFault"}, + {"shape":"InvalidResourceStateFault"} + ] + }, + "StopReplicationTask":{ + "name":"StopReplicationTask", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StopReplicationTaskMessage"}, + "output":{"shape":"StopReplicationTaskResponse"}, + "errors":[ + {"shape":"ResourceNotFoundFault"}, + {"shape":"InvalidResourceStateFault"} + ] + }, + "TestConnection":{ + "name":"TestConnection", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"TestConnectionMessage"}, + "output":{"shape":"TestConnectionResponse"}, + "errors":[ + {"shape":"ResourceNotFoundFault"}, + {"shape":"InvalidResourceStateFault"}, + {"shape":"KMSKeyNotAccessibleFault"}, + {"shape":"ResourceQuotaExceededFault"} + ] + } + }, + "shapes":{ + "AccessDeniedFault":{ + "type":"structure", + "members":{ + "message":{"shape":"ExceptionMessage"} + }, + "exception":true + }, + "AccountQuota":{ + "type":"structure", + "members":{ + "AccountQuotaName":{"shape":"String"}, + "Used":{"shape":"Long"}, + "Max":{"shape":"Long"} + } + }, + "AccountQuotaList":{ + "type":"list", + "member":{ + "shape":"AccountQuota", + "locationName":"AccountQuota" + } + }, + "AddTagsToResourceMessage":{ + "type":"structure", + "required":[ + "ResourceArn", + "Tags" + ], + "members":{ + "ResourceArn":{"shape":"String"}, + "Tags":{"shape":"TagList"} + } + }, + "AddTagsToResourceResponse":{ + "type":"structure", + "members":{ + } + }, + "AvailabilityZone":{ + "type":"structure", + "members":{ + "Name":{"shape":"String"} + } + }, + "Boolean":{"type":"boolean"}, + "BooleanOptional":{"type":"boolean"}, + "Connection":{ + "type":"structure", + "members":{ + "ReplicationInstanceArn":{"shape":"String"}, + "EndpointArn":{"shape":"String"}, + "Status":{"shape":"String"}, + "LastFailureMessage":{"shape":"String"}, + "EndpointIdentifier":{"shape":"String"}, + "ReplicationInstanceIdentifier":{"shape":"String"} + } + }, + "ConnectionList":{ + "type":"list", + "member":{ + "shape":"Connection", + "locationName":"Connection" + } + }, + "CreateEndpointMessage":{ + "type":"structure", + "required":[ + "EndpointIdentifier", + "EndpointType", + "EngineName", + "Username", + "Password", + "ServerName", + "Port" + ], + "members":{ + "EndpointIdentifier":{"shape":"String"}, + "EndpointType":{"shape":"ReplicationEndpointTypeValue"}, + "EngineName":{"shape":"String"}, + "Username":{"shape":"String"}, + "Password":{"shape":"SecretString"}, + "ServerName":{"shape":"String"}, + "Port":{"shape":"IntegerOptional"}, + "DatabaseName":{"shape":"String"}, + "ExtraConnectionAttributes":{"shape":"String"}, + "KmsKeyId":{"shape":"String"}, + "Tags":{"shape":"TagList"} + } + }, + "CreateEndpointResponse":{ + "type":"structure", + "members":{ + "Endpoint":{"shape":"Endpoint"} + } + }, + "CreateReplicationInstanceMessage":{ + "type":"structure", + "required":[ + "ReplicationInstanceIdentifier", + "ReplicationInstanceClass" + ], + "members":{ + "ReplicationInstanceIdentifier":{"shape":"String"}, + "AllocatedStorage":{"shape":"IntegerOptional"}, + "ReplicationInstanceClass":{"shape":"String"}, + "VpcSecurityGroupIds":{"shape":"VpcSecurityGroupIdList"}, + "AvailabilityZone":{"shape":"String"}, + "ReplicationSubnetGroupIdentifier":{"shape":"String"}, + "PreferredMaintenanceWindow":{"shape":"String"}, + "EngineVersion":{"shape":"String"}, + "AutoMinorVersionUpgrade":{"shape":"BooleanOptional"}, + "Tags":{"shape":"TagList"}, + "KmsKeyId":{"shape":"String"}, + "PubliclyAccessible":{"shape":"BooleanOptional"} + } + }, + "CreateReplicationInstanceResponse":{ + "type":"structure", + "members":{ + "ReplicationInstance":{"shape":"ReplicationInstance"} + } + }, + "CreateReplicationSubnetGroupMessage":{ + "type":"structure", + "required":[ + "ReplicationSubnetGroupIdentifier", + "ReplicationSubnetGroupDescription", + "SubnetIds" + ], + "members":{ + "ReplicationSubnetGroupIdentifier":{"shape":"String"}, + "ReplicationSubnetGroupDescription":{"shape":"String"}, + "SubnetIds":{"shape":"SubnetIdentifierList"}, + "Tags":{"shape":"TagList"} + } + }, + "CreateReplicationSubnetGroupResponse":{ + "type":"structure", + "members":{ + "ReplicationSubnetGroup":{"shape":"ReplicationSubnetGroup"} + } + }, + "CreateReplicationTaskMessage":{ + "type":"structure", + "required":[ + "ReplicationTaskIdentifier", + "SourceEndpointArn", + "TargetEndpointArn", + "ReplicationInstanceArn", + "MigrationType", + "TableMappings" + ], + "members":{ + "ReplicationTaskIdentifier":{"shape":"String"}, + "SourceEndpointArn":{"shape":"String"}, + "TargetEndpointArn":{"shape":"String"}, + "ReplicationInstanceArn":{"shape":"String"}, + "MigrationType":{"shape":"MigrationTypeValue"}, + "TableMappings":{"shape":"String"}, + "ReplicationTaskSettings":{"shape":"String"}, + "CdcStartTime":{"shape":"TStamp"}, + "Tags":{"shape":"TagList"} + } + }, + "CreateReplicationTaskResponse":{ + "type":"structure", + "members":{ + "ReplicationTask":{"shape":"ReplicationTask"} + } + }, + "DeleteEndpointMessage":{ + "type":"structure", + "required":["EndpointArn"], + "members":{ + "EndpointArn":{"shape":"String"} + } + }, + "DeleteEndpointResponse":{ + "type":"structure", + "members":{ + "Endpoint":{"shape":"Endpoint"} + } + }, + "DeleteReplicationInstanceMessage":{ + "type":"structure", + "required":["ReplicationInstanceArn"], + "members":{ + "ReplicationInstanceArn":{"shape":"String"} + } + }, + "DeleteReplicationInstanceResponse":{ + "type":"structure", + "members":{ + "ReplicationInstance":{"shape":"ReplicationInstance"} + } + }, + "DeleteReplicationSubnetGroupMessage":{ + "type":"structure", + "required":["ReplicationSubnetGroupIdentifier"], + "members":{ + "ReplicationSubnetGroupIdentifier":{"shape":"String"} + } + }, + "DeleteReplicationSubnetGroupResponse":{ + "type":"structure", + "members":{ + } + }, + "DeleteReplicationTaskMessage":{ + "type":"structure", + "required":["ReplicationTaskArn"], + "members":{ + "ReplicationTaskArn":{"shape":"String"} + } + }, + "DeleteReplicationTaskResponse":{ + "type":"structure", + "members":{ + "ReplicationTask":{"shape":"ReplicationTask"} + } + }, + "DescribeAccountAttributesMessage":{ + "type":"structure", + "members":{ + } + }, + "DescribeAccountAttributesResponse":{ + "type":"structure", + "members":{ + "AccountQuotas":{"shape":"AccountQuotaList"} + } + }, + "DescribeConnectionsMessage":{ + "type":"structure", + "members":{ + "Filters":{"shape":"FilterList"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeConnectionsResponse":{ + "type":"structure", + "members":{ + "Marker":{"shape":"String"}, + "Connections":{"shape":"ConnectionList"} + } + }, + "DescribeEndpointTypesMessage":{ + "type":"structure", + "members":{ + "Filters":{"shape":"FilterList"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeEndpointTypesResponse":{ + "type":"structure", + "members":{ + "Marker":{"shape":"String"}, + "SupportedEndpointTypes":{"shape":"SupportedEndpointTypeList"} + } + }, + "DescribeEndpointsMessage":{ + "type":"structure", + "members":{ + "Filters":{"shape":"FilterList"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeEndpointsResponse":{ + "type":"structure", + "members":{ + "Marker":{"shape":"String"}, + "Endpoints":{"shape":"EndpointList"} + } + }, + "DescribeOrderableReplicationInstancesMessage":{ + "type":"structure", + "members":{ + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeOrderableReplicationInstancesResponse":{ + "type":"structure", + "members":{ + "OrderableReplicationInstances":{"shape":"OrderableReplicationInstanceList"}, + "Marker":{"shape":"String"} + } + }, + "DescribeRefreshSchemasStatusMessage":{ + "type":"structure", + "required":["EndpointArn"], + "members":{ + "EndpointArn":{"shape":"String"} + } + }, + "DescribeRefreshSchemasStatusResponse":{ + "type":"structure", + "members":{ + "RefreshSchemasStatus":{"shape":"RefreshSchemasStatus"} + } + }, + "DescribeReplicationInstancesMessage":{ + "type":"structure", + "members":{ + "Filters":{"shape":"FilterList"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeReplicationInstancesResponse":{ + "type":"structure", + "members":{ + "Marker":{"shape":"String"}, + "ReplicationInstances":{"shape":"ReplicationInstanceList"} + } + }, + "DescribeReplicationSubnetGroupsMessage":{ + "type":"structure", + "members":{ + "Filters":{"shape":"FilterList"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeReplicationSubnetGroupsResponse":{ + "type":"structure", + "members":{ + "Marker":{"shape":"String"}, + "ReplicationSubnetGroups":{"shape":"ReplicationSubnetGroups"} + } + }, + "DescribeReplicationTasksMessage":{ + "type":"structure", + "members":{ + "Filters":{"shape":"FilterList"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeReplicationTasksResponse":{ + "type":"structure", + "members":{ + "Marker":{"shape":"String"}, + "ReplicationTasks":{"shape":"ReplicationTaskList"} + } + }, + "DescribeSchemasMessage":{ + "type":"structure", + "required":["EndpointArn"], + "members":{ + "EndpointArn":{"shape":"String"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeSchemasResponse":{ + "type":"structure", + "members":{ + "Marker":{"shape":"String"}, + "Schemas":{"shape":"SchemaList"} + } + }, + "DescribeTableStatisticsMessage":{ + "type":"structure", + "required":["ReplicationTaskArn"], + "members":{ + "ReplicationTaskArn":{"shape":"String"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeTableStatisticsResponse":{ + "type":"structure", + "members":{ + "ReplicationTaskArn":{"shape":"String"}, + "TableStatistics":{"shape":"TableStatisticsList"}, + "Marker":{"shape":"String"} + } + }, + "Endpoint":{ + "type":"structure", + "members":{ + "EndpointIdentifier":{"shape":"String"}, + "EndpointType":{"shape":"ReplicationEndpointTypeValue"}, + "EngineName":{"shape":"String"}, + "Username":{"shape":"String"}, + "ServerName":{"shape":"String"}, + "Port":{"shape":"IntegerOptional"}, + "DatabaseName":{"shape":"String"}, + "ExtraConnectionAttributes":{"shape":"String"}, + "Status":{"shape":"String"}, + "KmsKeyId":{"shape":"String"}, + "EndpointArn":{"shape":"String"} + } + }, + "EndpointList":{ + "type":"list", + "member":{ + "shape":"Endpoint", + "locationName":"Endpoint" + } + }, + "ExceptionMessage":{"type":"string"}, + "Filter":{ + "type":"structure", + "required":[ + "Name", + "Values" + ], + "members":{ + "Name":{"shape":"String"}, + "Values":{"shape":"FilterValueList"} + } + }, + "FilterList":{ + "type":"list", + "member":{ + "shape":"Filter", + "locationName":"Filter" + } + }, + "FilterValueList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"Value" + } + }, + "InsufficientResourceCapacityFault":{ + "type":"structure", + "members":{ + "message":{"shape":"ExceptionMessage"} + }, + "exception":true + }, + "Integer":{"type":"integer"}, + "IntegerOptional":{"type":"integer"}, + "InvalidResourceStateFault":{ + "type":"structure", + "members":{ + "message":{"shape":"ExceptionMessage"} + }, + "exception":true + }, + "InvalidSubnet":{ + "type":"structure", + "members":{ + "message":{"shape":"ExceptionMessage"} + }, + "exception":true + }, + "KMSKeyNotAccessibleFault":{ + "type":"structure", + "members":{ + "message":{"shape":"ExceptionMessage"} + }, + "exception":true + }, + "KeyList":{ + "type":"list", + "member":{"shape":"String"} + }, + "ListTagsForResourceMessage":{ + "type":"structure", + "required":["ResourceArn"], + "members":{ + "ResourceArn":{"shape":"String"} + } + }, + "ListTagsForResourceResponse":{ + "type":"structure", + "members":{ + "TagList":{"shape":"TagList"} + } + }, + "Long":{"type":"long"}, + "MigrationTypeValue":{ + "type":"string", + "enum":[ + "full-load", + "cdc", + "full-load-and-cdc" + ] + }, + "ModifyEndpointMessage":{ + "type":"structure", + "required":["EndpointArn"], + "members":{ + "EndpointArn":{"shape":"String"}, + "EndpointIdentifier":{"shape":"String"}, + "EndpointType":{"shape":"ReplicationEndpointTypeValue"}, + "EngineName":{"shape":"String"}, + "Username":{"shape":"String"}, + "Password":{"shape":"SecretString"}, + "ServerName":{"shape":"String"}, + "Port":{"shape":"IntegerOptional"}, + "DatabaseName":{"shape":"String"}, + "ExtraConnectionAttributes":{"shape":"String"} + } + }, + "ModifyEndpointResponse":{ + "type":"structure", + "members":{ + "Endpoint":{"shape":"Endpoint"} + } + }, + "ModifyReplicationInstanceMessage":{ + "type":"structure", + "required":["ReplicationInstanceArn"], + "members":{ + "ReplicationInstanceArn":{"shape":"String"}, + "AllocatedStorage":{"shape":"IntegerOptional"}, + "ApplyImmediately":{"shape":"Boolean"}, + "ReplicationInstanceClass":{"shape":"String"}, + "VpcSecurityGroupIds":{"shape":"VpcSecurityGroupIdList"}, + "PreferredMaintenanceWindow":{"shape":"String"}, + "EngineVersion":{"shape":"String"}, + "AllowMajorVersionUpgrade":{"shape":"Boolean"}, + "AutoMinorVersionUpgrade":{"shape":"BooleanOptional"}, + "ReplicationInstanceIdentifier":{"shape":"String"} + } + }, + "ModifyReplicationInstanceResponse":{ + "type":"structure", + "members":{ + "ReplicationInstance":{"shape":"ReplicationInstance"} + } + }, + "ModifyReplicationSubnetGroupMessage":{ + "type":"structure", + "required":[ + "ReplicationSubnetGroupIdentifier", + "SubnetIds" + ], + "members":{ + "ReplicationSubnetGroupIdentifier":{"shape":"String"}, + "ReplicationSubnetGroupDescription":{"shape":"String"}, + "SubnetIds":{"shape":"SubnetIdentifierList"} + } + }, + "ModifyReplicationSubnetGroupResponse":{ + "type":"structure", + "members":{ + "ReplicationSubnetGroup":{"shape":"ReplicationSubnetGroup"} + } + }, + "OrderableReplicationInstance":{ + "type":"structure", + "members":{ + "EngineVersion":{"shape":"String"}, + "ReplicationInstanceClass":{"shape":"String"}, + "StorageType":{"shape":"String"}, + "MinAllocatedStorage":{"shape":"Integer"}, + "MaxAllocatedStorage":{"shape":"Integer"}, + "DefaultAllocatedStorage":{"shape":"Integer"}, + "IncludedAllocatedStorage":{"shape":"Integer"} + } + }, + "OrderableReplicationInstanceList":{ + "type":"list", + "member":{ + "shape":"OrderableReplicationInstance", + "locationName":"OrderableReplicationInstance" + } + }, + "RefreshSchemasMessage":{ + "type":"structure", + "required":[ + "EndpointArn", + "ReplicationInstanceArn" + ], + "members":{ + "EndpointArn":{"shape":"String"}, + "ReplicationInstanceArn":{"shape":"String"} + } + }, + "RefreshSchemasResponse":{ + "type":"structure", + "members":{ + "RefreshSchemasStatus":{"shape":"RefreshSchemasStatus"} + } + }, + "RefreshSchemasStatus":{ + "type":"structure", + "members":{ + "EndpointArn":{"shape":"String"}, + "ReplicationInstanceArn":{"shape":"String"}, + "Status":{"shape":"RefreshSchemasStatusTypeValue"}, + "LastRefreshDate":{"shape":"TStamp"}, + "LastFailureMessage":{"shape":"String"} + } + }, + "RefreshSchemasStatusTypeValue":{ + "type":"string", + "enum":[ + "successful", + "failed", + "refreshing" + ] + }, + "RemoveTagsFromResourceMessage":{ + "type":"structure", + "required":[ + "ResourceArn", + "TagKeys" + ], + "members":{ + "ResourceArn":{"shape":"String"}, + "TagKeys":{"shape":"KeyList"} + } + }, + "RemoveTagsFromResourceResponse":{ + "type":"structure", + "members":{ + } + }, + "ReplicationEndpointTypeValue":{ + "type":"string", + "enum":[ + "source", + "target" + ] + }, + "ReplicationInstance":{ + "type":"structure", + "members":{ + "ReplicationInstanceIdentifier":{"shape":"String"}, + "ReplicationInstanceClass":{"shape":"String"}, + "ReplicationInstanceStatus":{"shape":"String"}, + "AllocatedStorage":{"shape":"Integer"}, + "InstanceCreateTime":{"shape":"TStamp"}, + "VpcSecurityGroups":{"shape":"VpcSecurityGroupMembershipList"}, + "AvailabilityZone":{"shape":"String"}, + "ReplicationSubnetGroup":{"shape":"ReplicationSubnetGroup"}, + "PreferredMaintenanceWindow":{"shape":"String"}, + "PendingModifiedValues":{"shape":"ReplicationPendingModifiedValues"}, + "EngineVersion":{"shape":"String"}, + "AutoMinorVersionUpgrade":{"shape":"Boolean"}, + "KmsKeyId":{"shape":"String"}, + "ReplicationInstanceArn":{"shape":"String"}, + "ReplicationInstancePublicIpAddress":{"shape":"String"}, + "ReplicationInstancePrivateIpAddress":{"shape":"String"}, + "PubliclyAccessible":{"shape":"Boolean"} + } + }, + "ReplicationInstanceList":{ + "type":"list", + "member":{ + "shape":"ReplicationInstance", + "locationName":"ReplicationInstance" + } + }, + "ReplicationPendingModifiedValues":{ + "type":"structure", + "members":{ + "ReplicationInstanceClass":{"shape":"String"}, + "AllocatedStorage":{"shape":"IntegerOptional"}, + "EngineVersion":{"shape":"String"} + } + }, + "ReplicationSubnetGroup":{ + "type":"structure", + "members":{ + "ReplicationSubnetGroupIdentifier":{"shape":"String"}, + "ReplicationSubnetGroupDescription":{"shape":"String"}, + "VpcId":{"shape":"String"}, + "SubnetGroupStatus":{"shape":"String"}, + "Subnets":{"shape":"SubnetList"} + } + }, + "ReplicationSubnetGroupDoesNotCoverEnoughAZs":{ + "type":"structure", + "members":{ + "message":{"shape":"ExceptionMessage"} + }, + "exception":true + }, + "ReplicationSubnetGroups":{ + "type":"list", + "member":{ + "shape":"ReplicationSubnetGroup", + "locationName":"ReplicationSubnetGroup" + } + }, + "ReplicationTask":{ + "type":"structure", + "members":{ + "ReplicationTaskIdentifier":{"shape":"String"}, + "SourceEndpointArn":{"shape":"String"}, + "TargetEndpointArn":{"shape":"String"}, + "ReplicationInstanceArn":{"shape":"String"}, + "MigrationType":{"shape":"MigrationTypeValue"}, + "TableMappings":{"shape":"String"}, + "ReplicationTaskSettings":{"shape":"String"}, + "Status":{"shape":"String"}, + "LastFailureMessage":{"shape":"String"}, + "ReplicationTaskCreationDate":{"shape":"TStamp"}, + "ReplicationTaskStartDate":{"shape":"TStamp"}, + "ReplicationTaskArn":{"shape":"String"}, + "ReplicationTaskStats":{"shape":"ReplicationTaskStats"} + } + }, + "ReplicationTaskList":{ + "type":"list", + "member":{ + "shape":"ReplicationTask", + "locationName":"ReplicationTask" + } + }, + "ReplicationTaskStats":{ + "type":"structure", + "members":{ + "FullLoadProgressPercent":{"shape":"Integer"}, + "ElapsedTimeMillis":{"shape":"Long"}, + "TablesLoaded":{"shape":"Integer"}, + "TablesLoading":{"shape":"Integer"}, + "TablesQueued":{"shape":"Integer"}, + "TablesErrored":{"shape":"Integer"} + } + }, + "ResourceAlreadyExistsFault":{ + "type":"structure", + "members":{ + "message":{"shape":"ExceptionMessage"} + }, + "exception":true + }, + "ResourceNotFoundFault":{ + "type":"structure", + "members":{ + "message":{"shape":"ExceptionMessage"} + }, + "exception":true + }, + "ResourceQuotaExceededFault":{ + "type":"structure", + "members":{ + "message":{"shape":"ExceptionMessage"} + }, + "exception":true + }, + "SchemaList":{ + "type":"list", + "member":{"shape":"String"} + }, + "SecretString":{ + "type":"string", + "sensitive":true + }, + "StartReplicationTaskMessage":{ + "type":"structure", + "required":[ + "ReplicationTaskArn", + "StartReplicationTaskType" + ], + "members":{ + "ReplicationTaskArn":{"shape":"String"}, + "StartReplicationTaskType":{"shape":"StartReplicationTaskTypeValue"}, + "CdcStartTime":{"shape":"TStamp"} + } + }, + "StartReplicationTaskResponse":{ + "type":"structure", + "members":{ + "ReplicationTask":{"shape":"ReplicationTask"} + } + }, + "StartReplicationTaskTypeValue":{ + "type":"string", + "enum":[ + "start-replication", + "resume-processing", + "reload-target" + ] + }, + "StopReplicationTaskMessage":{ + "type":"structure", + "required":["ReplicationTaskArn"], + "members":{ + "ReplicationTaskArn":{"shape":"String"} + } + }, + "StopReplicationTaskResponse":{ + "type":"structure", + "members":{ + "ReplicationTask":{"shape":"ReplicationTask"} + } + }, + "StorageQuotaExceededFault":{ + "type":"structure", + "members":{ + "message":{"shape":"ExceptionMessage"} + }, + "exception":true + }, + "String":{"type":"string"}, + "Subnet":{ + "type":"structure", + "members":{ + "SubnetIdentifier":{"shape":"String"}, + "SubnetAvailabilityZone":{"shape":"AvailabilityZone"}, + "SubnetStatus":{"shape":"String"} + } + }, + "SubnetAlreadyInUse":{ + "type":"structure", + "members":{ + "message":{"shape":"ExceptionMessage"} + }, + "exception":true + }, + "SubnetIdentifierList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"SubnetIdentifier" + } + }, + "SubnetList":{ + "type":"list", + "member":{ + "shape":"Subnet", + "locationName":"Subnet" + } + }, + "SupportedEndpointType":{ + "type":"structure", + "members":{ + "EngineName":{"shape":"String"}, + "SupportsCDC":{"shape":"Boolean"}, + "EndpointType":{"shape":"ReplicationEndpointTypeValue"} + } + }, + "SupportedEndpointTypeList":{ + "type":"list", + "member":{ + "shape":"SupportedEndpointType", + "locationName":"SupportedEndpointType" + } + }, + "TStamp":{"type":"timestamp"}, + "TableStatistics":{ + "type":"structure", + "members":{ + "SchemaName":{"shape":"String"}, + "TableName":{"shape":"String"}, + "Inserts":{"shape":"Long"}, + "Deletes":{"shape":"Long"}, + "Updates":{"shape":"Long"}, + "Ddls":{"shape":"Long"}, + "FullLoadRows":{"shape":"Long"}, + "LastUpdateTime":{"shape":"TStamp"}, + "TableState":{"shape":"String"} + } + }, + "TableStatisticsList":{ + "type":"list", + "member":{"shape":"TableStatistics"} + }, + "Tag":{ + "type":"structure", + "members":{ + "Key":{"shape":"String"}, + "Value":{"shape":"String"} + } + }, + "TagList":{ + "type":"list", + "member":{ + "shape":"Tag", + "locationName":"Tag" + } + }, + "TestConnectionMessage":{ + "type":"structure", + "required":[ + "ReplicationInstanceArn", + "EndpointArn" + ], + "members":{ + "ReplicationInstanceArn":{"shape":"String"}, + "EndpointArn":{"shape":"String"} + } + }, + "TestConnectionResponse":{ + "type":"structure", + "members":{ + "Connection":{"shape":"Connection"} + } + }, + "UpgradeDependencyFailureFault":{ + "type":"structure", + "members":{ + "message":{"shape":"ExceptionMessage"} + }, + "exception":true + }, + "VpcSecurityGroupIdList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"VpcSecurityGroupId" + } + }, + "VpcSecurityGroupMembership":{ + "type":"structure", + "members":{ + "VpcSecurityGroupId":{"shape":"String"}, + "Status":{"shape":"String"} + } + }, + "VpcSecurityGroupMembershipList":{ + "type":"list", + "member":{ + "shape":"VpcSecurityGroupMembership", + "locationName":"VpcSecurityGroupMembership" + } + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/dms/2016-01-01/docs-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/dms/2016-01-01/docs-2.json new file mode 100644 index 000000000..19c7853ca --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/dms/2016-01-01/docs-2.json @@ -0,0 +1,894 @@ +{ + "version": "2.0", + "service": "AWS Database Migration Service

    AWS Database Migration Service (AWS DMS) can migrate your data to and from the most widely used commercial and open-source databases such as Oracle, PostgreSQL, Microsoft SQL Server, Amazon Redshift, MariaDB, Amazon Aurora, and MySQL. The service supports homogeneous migrations such as Oracle to Oracle, as well as heterogeneous migrations between different database platforms, such as Oracle to MySQL or SQL Server to PostgreSQL.

    ", + "operations": { + "AddTagsToResource": "

    Adds metadata tags to a DMS resource, including replication instance, endpoint, security group, and migration task. These tags can also be used with cost allocation reporting to track cost associated with DMS resources, or used in a Condition statement in an IAM policy for DMS.

    ", + "CreateEndpoint": "

    Creates an endpoint using the provided settings.

    ", + "CreateReplicationInstance": "

    Creates the replication instance using the specified parameters.

    ", + "CreateReplicationSubnetGroup": "

    Creates a replication subnet group given a list of the subnet IDs in a VPC.

    ", + "CreateReplicationTask": "

    Creates a replication task using the specified parameters.

    ", + "DeleteEndpoint": "

    Deletes the specified endpoint.

    All tasks associated with the endpoint must be deleted before you can delete the endpoint.

    ", + "DeleteReplicationInstance": "

    Deletes the specified replication instance.

    You must delete any migration tasks that are associated with the replication instance before you can delete it.

    ", + "DeleteReplicationSubnetGroup": "

    Deletes a subnet group.

    ", + "DeleteReplicationTask": "

    Deletes the specified replication task.

    ", + "DescribeAccountAttributes": "

    Lists all of the AWS DMS attributes for a customer account. The attributes include AWS DMS quotas for the account, such as the number of replication instances allowed. The description for a quota includes the quota name, current usage toward that quota, and the quota's maximum value.

    This command does not take any parameters.

    ", + "DescribeConnections": "

    Describes the status of the connections that have been made between the replication instance and an endpoint. Connections are created when you test an endpoint.

    ", + "DescribeEndpointTypes": "

    Returns information about the type of endpoints available.

    ", + "DescribeEndpoints": "

    Returns information about the endpoints for your account in the current region.

    ", + "DescribeOrderableReplicationInstances": "

    Returns information about the replication instance types that can be created in the specified region.

    ", + "DescribeRefreshSchemasStatus": "

    Returns the status of the RefreshSchemas operation.

    ", + "DescribeReplicationInstances": "

    Returns information about replication instances for your account in the current region.

    ", + "DescribeReplicationSubnetGroups": "

    Returns information about the replication subnet groups.

    ", + "DescribeReplicationTasks": "

    Returns information about replication tasks for your account in the current region.

    ", + "DescribeSchemas": "

    Returns information about the schema for the specified endpoint.

    ", + "DescribeTableStatistics": "

    Returns table statistics on the database migration task, including table name, rows inserted, rows updated, and rows deleted.

    ", + "ListTagsForResource": "

    Lists all tags for an AWS DMS resource.

    ", + "ModifyEndpoint": "

    Modifies the specified endpoint.

    ", + "ModifyReplicationInstance": "

    Modifies the replication instance to apply new settings. You can change one or more parameters by specifying these parameters and the new values in the request.

    Some settings are applied during the maintenance window.

    ", + "ModifyReplicationSubnetGroup": "

    Modifies the settings for the specified replication subnet group.

    ", + "RefreshSchemas": "

    Populates the schema for the specified endpoint. This is an asynchronous operation and can take several minutes. You can check the status of this operation by calling the DescribeRefreshSchemasStatus operation.

    ", + "RemoveTagsFromResource": "

    Removes metadata tags from a DMS resource.

    ", + "StartReplicationTask": "

    Starts the replication task.

    ", + "StopReplicationTask": "

    Stops the replication task.

    ", + "TestConnection": "

    Tests the connection between the replication instance and the endpoint.

    " + }, + "shapes": { + "AccessDeniedFault": { + "base": "

    AWS DMS was denied access to the endpoint.

    ", + "refs": { + } + }, + "AccountQuota": { + "base": "

    Describes a quota for an AWS account, for example, the number of replication instances allowed.

    ", + "refs": { + "AccountQuotaList$member": null + } + }, + "AccountQuotaList": { + "base": null, + "refs": { + "DescribeAccountAttributesResponse$AccountQuotas": "

    Account quota information.

    " + } + }, + "AddTagsToResourceMessage": { + "base": "

    ", + "refs": { + } + }, + "AddTagsToResourceResponse": { + "base": "

    ", + "refs": { + } + }, + "AvailabilityZone": { + "base": "

    ", + "refs": { + "Subnet$SubnetAvailabilityZone": "

    The Availability Zone of the subnet.

    " + } + }, + "Boolean": { + "base": null, + "refs": { + "ModifyReplicationInstanceMessage$ApplyImmediately": "

    Indicates whether the changes should be applied immediately or during the next maintenance window.

    ", + "ModifyReplicationInstanceMessage$AllowMajorVersionUpgrade": "

    Indicates that major version upgrades are allowed. Changing this parameter does not result in an outage and the change is asynchronously applied as soon as possible.

    Constraints: This parameter must be set to true when specifying a value for the EngineVersion parameter that is a different major version than the replication instance's current version.

    ", + "ReplicationInstance$AutoMinorVersionUpgrade": "

    Boolean value indicating if minor version upgrades will be automatically applied to the instance.

    ", + "ReplicationInstance$PubliclyAccessible": "

    Specifies the accessibility options for the replication instance. A value of true represents an instance with a public IP address. A value of false represents an instance with a private IP address. The default value is true.

    ", + "SupportedEndpointType$SupportsCDC": "

    Indicates if Change Data Capture (CDC) is supported.

    " + } + }, + "BooleanOptional": { + "base": null, + "refs": { + "CreateReplicationInstanceMessage$AutoMinorVersionUpgrade": "

    Indicates that minor engine upgrades will be applied automatically to the replication instance during the maintenance window.

    Default: true

    ", + "CreateReplicationInstanceMessage$PubliclyAccessible": "

    Specifies the accessibility options for the replication instance. A value of true represents an instance with a public IP address. A value of false represents an instance with a private IP address. The default value is true.

    ", + "ModifyReplicationInstanceMessage$AutoMinorVersionUpgrade": "

    Indicates that minor version upgrades will be applied automatically to the replication instance during the maintenance window. Changing this parameter does not result in an outage except in the following case and the change is asynchronously applied as soon as possible. An outage will result if this parameter is set to true during the maintenance window, and a newer minor version is available, and AWS DMS has enabled auto patching for that engine version.

    " + } + }, + "Connection": { + "base": "

    ", + "refs": { + "ConnectionList$member": null, + "TestConnectionResponse$Connection": "

    The connection tested.

    " + } + }, + "ConnectionList": { + "base": null, + "refs": { + "DescribeConnectionsResponse$Connections": "

    A description of the connections.

    " + } + }, + "CreateEndpointMessage": { + "base": "

    ", + "refs": { + } + }, + "CreateEndpointResponse": { + "base": "

    ", + "refs": { + } + }, + "CreateReplicationInstanceMessage": { + "base": "

    ", + "refs": { + } + }, + "CreateReplicationInstanceResponse": { + "base": "

    ", + "refs": { + } + }, + "CreateReplicationSubnetGroupMessage": { + "base": "

    ", + "refs": { + } + }, + "CreateReplicationSubnetGroupResponse": { + "base": "

    ", + "refs": { + } + }, + "CreateReplicationTaskMessage": { + "base": "

    ", + "refs": { + } + }, + "CreateReplicationTaskResponse": { + "base": "

    ", + "refs": { + } + }, + "DeleteEndpointMessage": { + "base": "

    ", + "refs": { + } + }, + "DeleteEndpointResponse": { + "base": "

    ", + "refs": { + } + }, + "DeleteReplicationInstanceMessage": { + "base": "

    ", + "refs": { + } + }, + "DeleteReplicationInstanceResponse": { + "base": "

    ", + "refs": { + } + }, + "DeleteReplicationSubnetGroupMessage": { + "base": "

    ", + "refs": { + } + }, + "DeleteReplicationSubnetGroupResponse": { + "base": "

    ", + "refs": { + } + }, + "DeleteReplicationTaskMessage": { + "base": "

    ", + "refs": { + } + }, + "DeleteReplicationTaskResponse": { + "base": "

    ", + "refs": { + } + }, + "DescribeAccountAttributesMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeAccountAttributesResponse": { + "base": "

    ", + "refs": { + } + }, + "DescribeConnectionsMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeConnectionsResponse": { + "base": "

    ", + "refs": { + } + }, + "DescribeEndpointTypesMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeEndpointTypesResponse": { + "base": "

    ", + "refs": { + } + }, + "DescribeEndpointsMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeEndpointsResponse": { + "base": "

    ", + "refs": { + } + }, + "DescribeOrderableReplicationInstancesMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeOrderableReplicationInstancesResponse": { + "base": "

    ", + "refs": { + } + }, + "DescribeRefreshSchemasStatusMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeRefreshSchemasStatusResponse": { + "base": "

    ", + "refs": { + } + }, + "DescribeReplicationInstancesMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeReplicationInstancesResponse": { + "base": "

    ", + "refs": { + } + }, + "DescribeReplicationSubnetGroupsMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeReplicationSubnetGroupsResponse": { + "base": "

    ", + "refs": { + } + }, + "DescribeReplicationTasksMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeReplicationTasksResponse": { + "base": "

    ", + "refs": { + } + }, + "DescribeSchemasMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeSchemasResponse": { + "base": "

    ", + "refs": { + } + }, + "DescribeTableStatisticsMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeTableStatisticsResponse": { + "base": "

    ", + "refs": { + } + }, + "Endpoint": { + "base": "

    ", + "refs": { + "CreateEndpointResponse$Endpoint": "

    The endpoint that was created.

    ", + "DeleteEndpointResponse$Endpoint": "

    The endpoint that was deleted.

    ", + "EndpointList$member": null, + "ModifyEndpointResponse$Endpoint": "

    The modified endpoint.

    " + } + }, + "EndpointList": { + "base": null, + "refs": { + "DescribeEndpointsResponse$Endpoints": "

    Endpoint description.

    " + } + }, + "ExceptionMessage": { + "base": null, + "refs": { + "AccessDeniedFault$message": "

    ", + "InsufficientResourceCapacityFault$message": "

    ", + "InvalidResourceStateFault$message": "

    ", + "InvalidSubnet$message": "

    ", + "KMSKeyNotAccessibleFault$message": "

    ", + "ReplicationSubnetGroupDoesNotCoverEnoughAZs$message": "

    ", + "ResourceAlreadyExistsFault$message": "

    ", + "ResourceNotFoundFault$message": "

    ", + "ResourceQuotaExceededFault$message": "

    ", + "StorageQuotaExceededFault$message": "

    ", + "SubnetAlreadyInUse$message": "

    ", + "UpgradeDependencyFailureFault$message": "

    " + } + }, + "Filter": { + "base": "

    ", + "refs": { + "FilterList$member": null + } + }, + "FilterList": { + "base": null, + "refs": { + "DescribeConnectionsMessage$Filters": "

    The filters applied to the connection.

    Valid filter names: endpoint-arn | replication-instance-arn

    ", + "DescribeEndpointTypesMessage$Filters": "

    Filters applied to the describe action.

    Valid filter names: engine-name | endpoint-type

    ", + "DescribeEndpointsMessage$Filters": "

    Filters applied to the describe action.

    Valid filter names: endpoint-arn | endpoint-type | endpoint-id | engine-name

    ", + "DescribeReplicationInstancesMessage$Filters": "

    Filters applied to the describe action.

    Valid filter names: replication-instance-arn | replication-instance-id | replication-instance-class | engine-version

    ", + "DescribeReplicationSubnetGroupsMessage$Filters": "

    Filters applied to the describe action.

    ", + "DescribeReplicationTasksMessage$Filters": "

    Filters applied to the describe action.

    Valid filter names: replication-task-arn | replication-task-id | migration-type | endpoint-arn | replication-instance-arn

    " + } + }, + "FilterValueList": { + "base": null, + "refs": { + "Filter$Values": "

    The filter value.

    " + } + }, + "InsufficientResourceCapacityFault": { + "base": "

    There are not enough resources allocated to the database migration.

    ", + "refs": { + } + }, + "Integer": { + "base": null, + "refs": { + "OrderableReplicationInstance$MinAllocatedStorage": "

    The minimum amount of storage (in gigabytes) that can be allocated for the replication instance.

    ", + "OrderableReplicationInstance$MaxAllocatedStorage": "

    The minimum amount of storage (in gigabytes) that can be allocated for the replication instance.

    ", + "OrderableReplicationInstance$DefaultAllocatedStorage": "

    The default amount of storage (in gigabytes) that is allocated for the replication instance.

    ", + "OrderableReplicationInstance$IncludedAllocatedStorage": "

    The amount of storage (in gigabytes) that is allocated for the replication instance.

    ", + "ReplicationInstance$AllocatedStorage": "

    The amount of storage (in gigabytes) that is allocated for the replication instance.

    ", + "ReplicationTaskStats$FullLoadProgressPercent": "

    The percent complete for the full load migration task.

    ", + "ReplicationTaskStats$TablesLoaded": "

    The number of tables loaded for this task.

    ", + "ReplicationTaskStats$TablesLoading": "

    The number of tables currently loading for this task.

    ", + "ReplicationTaskStats$TablesQueued": "

    The number of tables queued for this task.

    ", + "ReplicationTaskStats$TablesErrored": "

    The number of errors that have occurred during this task.

    " + } + }, + "IntegerOptional": { + "base": null, + "refs": { + "CreateEndpointMessage$Port": "

    The port used by the endpoint database.

    ", + "CreateReplicationInstanceMessage$AllocatedStorage": "

    The amount of storage (in gigabytes) to be initially allocated for the replication instance.

    ", + "DescribeConnectionsMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved.

    Default: 100

    Constraints: Minimum 20, maximum 100.

    ", + "DescribeEndpointTypesMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved.

    Default: 100

    Constraints: Minimum 20, maximum 100.

    ", + "DescribeEndpointsMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved.

    Default: 100

    Constraints: Minimum 20, maximum 100.

    ", + "DescribeOrderableReplicationInstancesMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved.

    Default: 100

    Constraints: Minimum 20, maximum 100.

    ", + "DescribeReplicationInstancesMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved.

    Default: 100

    Constraints: Minimum 20, maximum 100.

    ", + "DescribeReplicationSubnetGroupsMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved.

    Default: 100

    Constraints: Minimum 20, maximum 100.

    ", + "DescribeReplicationTasksMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved.

    Default: 100

    Constraints: Minimum 20, maximum 100.

    ", + "DescribeSchemasMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved.

    Default: 100

    Constraints: Minimum 20, maximum 100.

    ", + "DescribeTableStatisticsMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved.

    Default: 100

    Constraints: Minimum 20, maximum 100.

    ", + "Endpoint$Port": "

    The port value used to access the endpoint.

    ", + "ModifyEndpointMessage$Port": "

    The port used by the endpoint database.

    ", + "ModifyReplicationInstanceMessage$AllocatedStorage": "

    The amount of storage (in gigabytes) to be allocated for the replication instance.

    ", + "ReplicationPendingModifiedValues$AllocatedStorage": "

    The amount of storage (in gigabytes) that is allocated for the replication instance.

    " + } + }, + "InvalidResourceStateFault": { + "base": "

    The resource is in a state that prevents it from being used for database migration.

    ", + "refs": { + } + }, + "InvalidSubnet": { + "base": "

    The subnet provided is invalid.

    ", + "refs": { + } + }, + "KMSKeyNotAccessibleFault": { + "base": "

    AWS DMS cannot access the KMS key.

    ", + "refs": { + } + }, + "KeyList": { + "base": null, + "refs": { + "RemoveTagsFromResourceMessage$TagKeys": "

    The tag key (name) of the tag to be removed.

    " + } + }, + "ListTagsForResourceMessage": { + "base": "

    ", + "refs": { + } + }, + "ListTagsForResourceResponse": { + "base": "

    ", + "refs": { + } + }, + "Long": { + "base": null, + "refs": { + "AccountQuota$Used": "

    The amount currently used toward the quota maximum.

    ", + "AccountQuota$Max": "

    The maximum allowed value for the quota.

    ", + "ReplicationTaskStats$ElapsedTimeMillis": "

    The elapsed time of the task, in milliseconds.

    ", + "TableStatistics$Inserts": "

    The number of insert actions performed on a table.

    ", + "TableStatistics$Deletes": "

    The number of delete actions performed on a table.

    ", + "TableStatistics$Updates": "

    The number of update actions performed on a table.

    ", + "TableStatistics$Ddls": "

    The Data Definition Language (DDL) used to build and modify the structure of your tables.

    ", + "TableStatistics$FullLoadRows": "

    The number of rows added during the Full Load operation.

    " + } + }, + "MigrationTypeValue": { + "base": null, + "refs": { + "CreateReplicationTaskMessage$MigrationType": "

    The migration type.

    ", + "ReplicationTask$MigrationType": "

    The type of migration.

    " + } + }, + "ModifyEndpointMessage": { + "base": "

    ", + "refs": { + } + }, + "ModifyEndpointResponse": { + "base": "

    ", + "refs": { + } + }, + "ModifyReplicationInstanceMessage": { + "base": "

    ", + "refs": { + } + }, + "ModifyReplicationInstanceResponse": { + "base": "

    ", + "refs": { + } + }, + "ModifyReplicationSubnetGroupMessage": { + "base": "

    ", + "refs": { + } + }, + "ModifyReplicationSubnetGroupResponse": { + "base": "

    ", + "refs": { + } + }, + "OrderableReplicationInstance": { + "base": "

    ", + "refs": { + "OrderableReplicationInstanceList$member": null + } + }, + "OrderableReplicationInstanceList": { + "base": null, + "refs": { + "DescribeOrderableReplicationInstancesResponse$OrderableReplicationInstances": "

    The order-able replication instances available.

    " + } + }, + "RefreshSchemasMessage": { + "base": "

    ", + "refs": { + } + }, + "RefreshSchemasResponse": { + "base": "

    ", + "refs": { + } + }, + "RefreshSchemasStatus": { + "base": "

    ", + "refs": { + "DescribeRefreshSchemasStatusResponse$RefreshSchemasStatus": "

    The status of the schema.

    ", + "RefreshSchemasResponse$RefreshSchemasStatus": "

    The status of the refreshed schema.

    " + } + }, + "RefreshSchemasStatusTypeValue": { + "base": null, + "refs": { + "RefreshSchemasStatus$Status": "

    The status of the schema.

    " + } + }, + "RemoveTagsFromResourceMessage": { + "base": "

    ", + "refs": { + } + }, + "RemoveTagsFromResourceResponse": { + "base": "

    ", + "refs": { + } + }, + "ReplicationEndpointTypeValue": { + "base": null, + "refs": { + "CreateEndpointMessage$EndpointType": "

    The type of endpoint.

    ", + "Endpoint$EndpointType": "

    The type of endpoint.

    ", + "ModifyEndpointMessage$EndpointType": "

    The type of endpoint.

    ", + "SupportedEndpointType$EndpointType": "

    The type of endpoint.

    " + } + }, + "ReplicationInstance": { + "base": "

    ", + "refs": { + "CreateReplicationInstanceResponse$ReplicationInstance": "

    The replication instance that was created.

    ", + "DeleteReplicationInstanceResponse$ReplicationInstance": "

    The replication instance that was deleted.

    ", + "ModifyReplicationInstanceResponse$ReplicationInstance": "

    The modified replication instance.

    ", + "ReplicationInstanceList$member": null + } + }, + "ReplicationInstanceList": { + "base": null, + "refs": { + "DescribeReplicationInstancesResponse$ReplicationInstances": "

    The replication instances described.

    " + } + }, + "ReplicationPendingModifiedValues": { + "base": "

    ", + "refs": { + "ReplicationInstance$PendingModifiedValues": "

    The pending modification values.

    " + } + }, + "ReplicationSubnetGroup": { + "base": "

    ", + "refs": { + "CreateReplicationSubnetGroupResponse$ReplicationSubnetGroup": "

    The replication subnet group that was created.

    ", + "ModifyReplicationSubnetGroupResponse$ReplicationSubnetGroup": "

    The modified replication subnet group.

    ", + "ReplicationInstance$ReplicationSubnetGroup": "

    The subnet group for the replication instance.

    ", + "ReplicationSubnetGroups$member": null + } + }, + "ReplicationSubnetGroupDoesNotCoverEnoughAZs": { + "base": "

    The replication subnet group does not cover enough Availability Zones (AZs). Edit the replication subnet group and add more AZs.

    ", + "refs": { + } + }, + "ReplicationSubnetGroups": { + "base": null, + "refs": { + "DescribeReplicationSubnetGroupsResponse$ReplicationSubnetGroups": "

    A description of the replication subnet groups.

    " + } + }, + "ReplicationTask": { + "base": "

    ", + "refs": { + "CreateReplicationTaskResponse$ReplicationTask": "

    The replication task that was created.

    ", + "DeleteReplicationTaskResponse$ReplicationTask": "

    The deleted replication task.

    ", + "ReplicationTaskList$member": null, + "StartReplicationTaskResponse$ReplicationTask": "

    The replication task started.

    ", + "StopReplicationTaskResponse$ReplicationTask": "

    The replication task stopped.

    " + } + }, + "ReplicationTaskList": { + "base": null, + "refs": { + "DescribeReplicationTasksResponse$ReplicationTasks": "

    A description of the replication tasks.

    " + } + }, + "ReplicationTaskStats": { + "base": "

    ", + "refs": { + "ReplicationTask$ReplicationTaskStats": "

    The statistics for the task, including elapsed time, tables loaded, and table errors.

    " + } + }, + "ResourceAlreadyExistsFault": { + "base": "

    The resource you are attempting to create already exists.

    ", + "refs": { + } + }, + "ResourceNotFoundFault": { + "base": "

    The resource could not be found.

    ", + "refs": { + } + }, + "ResourceQuotaExceededFault": { + "base": "

    The quota for this resource quota has been exceeded.

    ", + "refs": { + } + }, + "SchemaList": { + "base": null, + "refs": { + "DescribeSchemasResponse$Schemas": "

    The described schema.

    " + } + }, + "SecretString": { + "base": null, + "refs": { + "CreateEndpointMessage$Password": "

    The password to be used to login to the endpoint database.

    ", + "ModifyEndpointMessage$Password": "

    The password to be used to login to the endpoint database.

    " + } + }, + "StartReplicationTaskMessage": { + "base": "

    ", + "refs": { + } + }, + "StartReplicationTaskResponse": { + "base": "

    ", + "refs": { + } + }, + "StartReplicationTaskTypeValue": { + "base": null, + "refs": { + "StartReplicationTaskMessage$StartReplicationTaskType": "

    The type of replication task.

    " + } + }, + "StopReplicationTaskMessage": { + "base": "

    ", + "refs": { + } + }, + "StopReplicationTaskResponse": { + "base": "

    ", + "refs": { + } + }, + "StorageQuotaExceededFault": { + "base": "

    The storage quota has been exceeded.

    ", + "refs": { + } + }, + "String": { + "base": null, + "refs": { + "AccountQuota$AccountQuotaName": "

    The name of the AWS DMS quota for this AWS account.

    ", + "AddTagsToResourceMessage$ResourceArn": "

    The Amazon Resource Name (ARN) of the AWS DMS resource the tag is to be added to. AWS DMS resources include a replication instance, endpoint, and a replication task.

    ", + "AvailabilityZone$Name": "

    The name of the availability zone.

    ", + "Connection$ReplicationInstanceArn": "

    The Amazon Resource Name (ARN) of the replication instance.

    ", + "Connection$EndpointArn": "

    The Amazon Resource Name (ARN) string that uniquely identifies the endpoint.

    ", + "Connection$Status": "

    The connection status.

    ", + "Connection$LastFailureMessage": "

    The error message when the connection last failed.

    ", + "Connection$EndpointIdentifier": "

    The identifier of the endpoint. Identifiers must begin with a letter; must contain only ASCII letters, digits, and hyphens; and must not end with a hyphen or contain two consecutive hyphens.

    ", + "Connection$ReplicationInstanceIdentifier": "

    The replication instance identifier. This parameter is stored as a lowercase string.

    ", + "CreateEndpointMessage$EndpointIdentifier": "

    The database endpoint identifier. Identifiers must begin with a letter; must contain only ASCII letters, digits, and hyphens; and must not end with a hyphen or contain two consecutive hyphens.

    ", + "CreateEndpointMessage$EngineName": "

    The type of engine for the endpoint. Valid values include MYSQL, ORACLE, POSTGRES, MARIADB, AURORA, REDSHIFT, and SQLSERVER.

    ", + "CreateEndpointMessage$Username": "

    The user name to be used to login to the endpoint database.

    ", + "CreateEndpointMessage$ServerName": "

    The name of the server where the endpoint database resides.

    ", + "CreateEndpointMessage$DatabaseName": "

    The name of the endpoint database.

    ", + "CreateEndpointMessage$ExtraConnectionAttributes": "

    Additional attributes associated with the connection.

    ", + "CreateEndpointMessage$KmsKeyId": "

    The KMS key identifier that will be used to encrypt the connection parameters. If you do not specify a value for the KmsKeyId parameter, then AWS DMS will use your default encryption key. AWS KMS creates the default encryption key for your AWS account. Your AWS account has a different default encryption key for each AWS region.

    ", + "CreateReplicationInstanceMessage$ReplicationInstanceIdentifier": "

    The replication instance identifier. This parameter is stored as a lowercase string.

    Constraints:

    • Must contain from 1 to 63 alphanumeric characters or hyphens.

    • First character must be a letter.

    • Cannot end with a hyphen or contain two consecutive hyphens.

    Example: myrepinstance

    ", + "CreateReplicationInstanceMessage$ReplicationInstanceClass": "

    The compute and memory capacity of the replication instance as specified by the replication instance class.

    Valid Values: dms.t2.micro | dms.t2.small | dms.t2.medium | dms.t2.large | dms.c4.large | dms.c4.xlarge | dms.c4.2xlarge | dms.c4.4xlarge

    ", + "CreateReplicationInstanceMessage$AvailabilityZone": "

    The EC2 Availability Zone that the replication instance will be created in.

    Default: A random, system-chosen Availability Zone in the endpoint's region.

    Example: us-east-1d

    ", + "CreateReplicationInstanceMessage$ReplicationSubnetGroupIdentifier": "

    A subnet group to associate with the replication instance.

    ", + "CreateReplicationInstanceMessage$PreferredMaintenanceWindow": "

    The weekly time range during which system maintenance can occur, in Universal Coordinated Time (UTC).

    Format: ddd:hh24:mi-ddd:hh24:mi

    Default: A 30-minute window selected at random from an 8-hour block of time per region, occurring on a random day of the week.

    Valid Days: Mon, Tue, Wed, Thu, Fri, Sat, Sun

    Constraints: Minimum 30-minute window.

    ", + "CreateReplicationInstanceMessage$EngineVersion": "

    The engine version number of the replication instance.

    ", + "CreateReplicationInstanceMessage$KmsKeyId": "

    The KMS key identifier that will be used to encrypt the content on the replication instance. If you do not specify a value for the KmsKeyId parameter, then AWS DMS will use your default encryption key. AWS KMS creates the default encryption key for your AWS account. Your AWS account has a different default encryption key for each AWS region.

    ", + "CreateReplicationSubnetGroupMessage$ReplicationSubnetGroupIdentifier": "

    The name for the replication subnet group. This value is stored as a lowercase string.

    Constraints: Must contain no more than 255 alphanumeric characters, periods, spaces, underscores, or hyphens. Must not be \"default\".

    Example: mySubnetgroup

    ", + "CreateReplicationSubnetGroupMessage$ReplicationSubnetGroupDescription": "

    The description for the subnet group.

    ", + "CreateReplicationTaskMessage$ReplicationTaskIdentifier": "

    The replication task identifier.

    Constraints:

    • Must contain from 1 to 63 alphanumeric characters or hyphens.

    • First character must be a letter.

    • Cannot end with a hyphen or contain two consecutive hyphens.

    ", + "CreateReplicationTaskMessage$SourceEndpointArn": "

    The Amazon Resource Name (ARN) string that uniquely identifies the endpoint.

    ", + "CreateReplicationTaskMessage$TargetEndpointArn": "

    The Amazon Resource Name (ARN) string that uniquely identifies the endpoint.

    ", + "CreateReplicationTaskMessage$ReplicationInstanceArn": "

    The Amazon Resource Name (ARN) of the replication instance.

    ", + "CreateReplicationTaskMessage$TableMappings": "

    The path of the JSON file that contains the table mappings.

    ", + "CreateReplicationTaskMessage$ReplicationTaskSettings": "

    Settings for the task, such as target metadata settings.

    ", + "DeleteEndpointMessage$EndpointArn": "

    The Amazon Resource Name (ARN) string that uniquely identifies the endpoint.

    ", + "DeleteReplicationInstanceMessage$ReplicationInstanceArn": "

    The Amazon Resource Name (ARN) of the replication instance to be deleted.

    ", + "DeleteReplicationSubnetGroupMessage$ReplicationSubnetGroupIdentifier": "

    The subnet group name of the replication instance.

    ", + "DeleteReplicationTaskMessage$ReplicationTaskArn": "

    The Amazon Resource Name (ARN) of the replication task to be deleted.

    ", + "DescribeConnectionsMessage$Marker": "

    An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DescribeConnectionsResponse$Marker": "

    An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DescribeEndpointTypesMessage$Marker": "

    An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DescribeEndpointTypesResponse$Marker": "

    An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DescribeEndpointsMessage$Marker": "

    An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DescribeEndpointsResponse$Marker": "

    An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DescribeOrderableReplicationInstancesMessage$Marker": "

    An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DescribeOrderableReplicationInstancesResponse$Marker": "

    An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DescribeRefreshSchemasStatusMessage$EndpointArn": "

    The Amazon Resource Name (ARN) string that uniquely identifies the endpoint.

    ", + "DescribeReplicationInstancesMessage$Marker": "

    An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DescribeReplicationInstancesResponse$Marker": "

    An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DescribeReplicationSubnetGroupsMessage$Marker": "

    An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DescribeReplicationSubnetGroupsResponse$Marker": "

    An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DescribeReplicationTasksMessage$Marker": "

    An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DescribeReplicationTasksResponse$Marker": "

    An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DescribeSchemasMessage$EndpointArn": "

    The Amazon Resource Name (ARN) string that uniquely identifies the endpoint.

    ", + "DescribeSchemasMessage$Marker": "

    An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DescribeSchemasResponse$Marker": "

    An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DescribeTableStatisticsMessage$ReplicationTaskArn": "

    The Amazon Resource Name (ARN) of the replication task.

    ", + "DescribeTableStatisticsMessage$Marker": "

    An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DescribeTableStatisticsResponse$ReplicationTaskArn": "

    The Amazon Resource Name (ARN) of the replication task.

    ", + "DescribeTableStatisticsResponse$Marker": "

    An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "Endpoint$EndpointIdentifier": "

    The database endpoint identifier. Identifiers must begin with a letter; must contain only ASCII letters, digits, and hyphens; and must not end with a hyphen or contain two consecutive hyphens.

    ", + "Endpoint$EngineName": "

    The database engine name.

    ", + "Endpoint$Username": "

    The user name used to connect to the endpoint.

    ", + "Endpoint$ServerName": "

    The name of the server at the endpoint.

    ", + "Endpoint$DatabaseName": "

    The name of the database at the endpoint.

    ", + "Endpoint$ExtraConnectionAttributes": "

    Additional connection attributes used to connect to the endpoint.

    ", + "Endpoint$Status": "

    The status of the endpoint.

    ", + "Endpoint$KmsKeyId": "

    The KMS key identifier that will be used to encrypt the connection parameters. If you do not specify a value for the KmsKeyId parameter, then AWS DMS will use your default encryption key. AWS KMS creates the default encryption key for your AWS account. Your AWS account has a different default encryption key for each AWS region.

    ", + "Endpoint$EndpointArn": "

    The Amazon Resource Name (ARN) string that uniquely identifies the endpoint.

    ", + "Filter$Name": "

    The name of the filter.

    ", + "FilterValueList$member": null, + "KeyList$member": null, + "ListTagsForResourceMessage$ResourceArn": "

    The Amazon Resource Name (ARN) string that uniquely identifies the AWS DMS resource.

    ", + "ModifyEndpointMessage$EndpointArn": "

    The Amazon Resource Name (ARN) string that uniquely identifies the endpoint.

    ", + "ModifyEndpointMessage$EndpointIdentifier": "

    The database endpoint identifier. Identifiers must begin with a letter; must contain only ASCII letters, digits, and hyphens; and must not end with a hyphen or contain two consecutive hyphens.

    ", + "ModifyEndpointMessage$EngineName": "

    The type of engine for the endpoint. Valid values include MYSQL, ORACLE, POSTGRES, MARIADB, AURORA, REDSHIFT, and SQLSERVER.

    ", + "ModifyEndpointMessage$Username": "

    The user name to be used to login to the endpoint database.

    ", + "ModifyEndpointMessage$ServerName": "

    The name of the server where the endpoint database resides.

    ", + "ModifyEndpointMessage$DatabaseName": "

    The name of the endpoint database.

    ", + "ModifyEndpointMessage$ExtraConnectionAttributes": "

    Additional attributes associated with the connection.

    ", + "ModifyReplicationInstanceMessage$ReplicationInstanceArn": "

    The Amazon Resource Name (ARN) of the replication instance.

    ", + "ModifyReplicationInstanceMessage$ReplicationInstanceClass": "

    The compute and memory capacity of the replication instance.

    Valid Values: dms.t2.micro | dms.t2.small | dms.t2.medium | dms.t2.large | dms.c4.large | dms.c4.xlarge | dms.c4.2xlarge | dms.c4.4xlarge

    ", + "ModifyReplicationInstanceMessage$PreferredMaintenanceWindow": "

    The weekly time range (in UTC) during which system maintenance can occur, which might result in an outage. Changing this parameter does not result in an outage, except in the following situation, and the change is asynchronously applied as soon as possible. If moving this window to the current time, there must be at least 30 minutes between the current time and end of the window to ensure pending changes are applied.

    Default: Uses existing setting

    Format: ddd:hh24:mi-ddd:hh24:mi

    Valid Days: Mon | Tue | Wed | Thu | Fri | Sat | Sun

    Constraints: Must be at least 30 minutes

    ", + "ModifyReplicationInstanceMessage$EngineVersion": "

    The engine version number of the replication instance.

    ", + "ModifyReplicationInstanceMessage$ReplicationInstanceIdentifier": "

    The replication instance identifier. This parameter is stored as a lowercase string.

    ", + "ModifyReplicationSubnetGroupMessage$ReplicationSubnetGroupIdentifier": "

    The name of the replication instance subnet group.

    ", + "ModifyReplicationSubnetGroupMessage$ReplicationSubnetGroupDescription": "

    The description of the replication instance subnet group.

    ", + "OrderableReplicationInstance$EngineVersion": "

    The version of the replication engine.

    ", + "OrderableReplicationInstance$ReplicationInstanceClass": "

    The compute and memory capacity of the replication instance.

    Valid Values: dms.t2.micro | dms.t2.small | dms.t2.medium | dms.t2.large | dms.c4.large | dms.c4.xlarge | dms.c4.2xlarge | dms.c4.4xlarge

    ", + "OrderableReplicationInstance$StorageType": "

    The type of storage used by the replication instance.

    ", + "RefreshSchemasMessage$EndpointArn": "

    The Amazon Resource Name (ARN) string that uniquely identifies the endpoint.

    ", + "RefreshSchemasMessage$ReplicationInstanceArn": "

    The Amazon Resource Name (ARN) of the replication instance.

    ", + "RefreshSchemasStatus$EndpointArn": "

    The Amazon Resource Name (ARN) string that uniquely identifies the endpoint.

    ", + "RefreshSchemasStatus$ReplicationInstanceArn": "

    The Amazon Resource Name (ARN) of the replication instance.

    ", + "RefreshSchemasStatus$LastFailureMessage": "

    The last failure message for the schema.

    ", + "RemoveTagsFromResourceMessage$ResourceArn": "

    >The Amazon Resource Name (ARN) of the AWS DMS resource the tag is to be removed from.

    ", + "ReplicationInstance$ReplicationInstanceIdentifier": "

    The replication instance identifier. This parameter is stored as a lowercase string.

    Constraints:

    • Must contain from 1 to 63 alphanumeric characters or hyphens.

    • First character must be a letter.

    • Cannot end with a hyphen or contain two consecutive hyphens.

    Example: myrepinstance

    ", + "ReplicationInstance$ReplicationInstanceClass": "

    The compute and memory capacity of the replication instance.

    Valid Values: dms.t2.micro | dms.t2.small | dms.t2.medium | dms.t2.large | dms.c4.large | dms.c4.xlarge | dms.c4.2xlarge | dms.c4.4xlarge

    ", + "ReplicationInstance$ReplicationInstanceStatus": "

    The status of the replication instance.

    ", + "ReplicationInstance$AvailabilityZone": "

    The Availability Zone for the instance.

    ", + "ReplicationInstance$PreferredMaintenanceWindow": "

    The maintenance window times for the replication instance.

    ", + "ReplicationInstance$EngineVersion": "

    The engine version number of the replication instance.

    ", + "ReplicationInstance$KmsKeyId": "

    The KMS key identifier that is used to encrypt the content on the replication instance. If you do not specify a value for the KmsKeyId parameter, then AWS DMS will use your default encryption key. AWS KMS creates the default encryption key for your AWS account. Your AWS account has a different default encryption key for each AWS region.

    ", + "ReplicationInstance$ReplicationInstanceArn": "

    The Amazon Resource Name (ARN) of the replication instance.

    ", + "ReplicationInstance$ReplicationInstancePublicIpAddress": "

    The public IP address of the replication instance.

    ", + "ReplicationInstance$ReplicationInstancePrivateIpAddress": "

    The private IP address of the replication instance.

    ", + "ReplicationPendingModifiedValues$ReplicationInstanceClass": "

    The compute and memory capacity of the replication instance.

    Valid Values: dms.t2.micro | dms.t2.small | dms.t2.medium | dms.t2.large | dms.c4.large | dms.c4.xlarge | dms.c4.2xlarge | dms.c4.4xlarge

    ", + "ReplicationPendingModifiedValues$EngineVersion": "

    The engine version number of the replication instance.

    ", + "ReplicationSubnetGroup$ReplicationSubnetGroupIdentifier": "

    The identifier of the replication instance subnet group.

    ", + "ReplicationSubnetGroup$ReplicationSubnetGroupDescription": "

    The description of the replication subnet group.

    ", + "ReplicationSubnetGroup$VpcId": "

    The ID of the VPC.

    ", + "ReplicationSubnetGroup$SubnetGroupStatus": "

    The status of the subnet group.

    ", + "ReplicationTask$ReplicationTaskIdentifier": "

    The replication task identifier.

    Constraints:

    • Must contain from 1 to 63 alphanumeric characters or hyphens.

    • First character must be a letter.

    • Cannot end with a hyphen or contain two consecutive hyphens.

    ", + "ReplicationTask$SourceEndpointArn": "

    The Amazon Resource Name (ARN) string that uniquely identifies the endpoint.

    ", + "ReplicationTask$TargetEndpointArn": "

    The Amazon Resource Name (ARN) string that uniquely identifies the endpoint.

    ", + "ReplicationTask$ReplicationInstanceArn": "

    The Amazon Resource Name (ARN) of the replication instance.

    ", + "ReplicationTask$TableMappings": "

    Table mappings specified in the task.

    ", + "ReplicationTask$ReplicationTaskSettings": "

    The settings for the replication task.

    ", + "ReplicationTask$Status": "

    The status of the replication task.

    ", + "ReplicationTask$LastFailureMessage": "

    The last error (failure) message generated for the replication instance.

    ", + "ReplicationTask$ReplicationTaskArn": "

    The Amazon Resource Name (ARN) of the replication task.

    ", + "SchemaList$member": null, + "StartReplicationTaskMessage$ReplicationTaskArn": "

    The Amazon Resource Number (ARN) of the replication task to be started.

    ", + "StopReplicationTaskMessage$ReplicationTaskArn": "

    The Amazon Resource Number(ARN) of the replication task to be stopped.

    ", + "Subnet$SubnetIdentifier": "

    The subnet identifier.

    ", + "Subnet$SubnetStatus": "

    The status of the subnet.

    ", + "SubnetIdentifierList$member": null, + "SupportedEndpointType$EngineName": "

    The database engine name.

    ", + "TableStatistics$SchemaName": "

    The schema name.

    ", + "TableStatistics$TableName": "

    The name of the table.

    ", + "TableStatistics$TableState": "

    The state of the table.

    ", + "Tag$Key": "

    A key is the required name of the tag. The string value can be from 1 to 128 Unicode characters in length and cannot be prefixed with \"aws:\" or \"dms:\". The string can only contain only the set of Unicode letters, digits, white-space, '_', '.', '/', '=', '+', '-' (Java regex: \"^([\\\\p{L}\\\\p{Z}\\\\p{N}_.:/=+\\\\-]*)$\").

    ", + "Tag$Value": "

    A value is the optional value of the tag. The string value can be from 1 to 256 Unicode characters in length and cannot be prefixed with \"aws:\" or \"dms:\". The string can only contain only the set of Unicode letters, digits, white-space, '_', '.', '/', '=', '+', '-' (Java regex: \"^([\\\\p{L}\\\\p{Z}\\\\p{N}_.:/=+\\\\-]*)$\").

    ", + "TestConnectionMessage$ReplicationInstanceArn": "

    The Amazon Resource Number (ARN) of the replication instance.

    ", + "TestConnectionMessage$EndpointArn": "

    The Amazon Resource Name (ARN) string that uniquely identifies the endpoint.

    ", + "VpcSecurityGroupIdList$member": null, + "VpcSecurityGroupMembership$VpcSecurityGroupId": "

    The VPC security group Id.

    ", + "VpcSecurityGroupMembership$Status": "

    The status of the VPC security group.

    " + } + }, + "Subnet": { + "base": "

    ", + "refs": { + "SubnetList$member": null + } + }, + "SubnetAlreadyInUse": { + "base": "

    The specified subnet is already in use.

    ", + "refs": { + } + }, + "SubnetIdentifierList": { + "base": null, + "refs": { + "CreateReplicationSubnetGroupMessage$SubnetIds": "

    The EC2 subnet IDs for the subnet group.

    ", + "ModifyReplicationSubnetGroupMessage$SubnetIds": "

    A list of subnet IDs.

    " + } + }, + "SubnetList": { + "base": null, + "refs": { + "ReplicationSubnetGroup$Subnets": "

    The subnets that are in the subnet group.

    " + } + }, + "SupportedEndpointType": { + "base": "

    ", + "refs": { + "SupportedEndpointTypeList$member": null + } + }, + "SupportedEndpointTypeList": { + "base": null, + "refs": { + "DescribeEndpointTypesResponse$SupportedEndpointTypes": "

    The type of endpoints that are supported.

    " + } + }, + "TStamp": { + "base": null, + "refs": { + "CreateReplicationTaskMessage$CdcStartTime": "

    The start time for the Change Data Capture (CDC) operation.

    ", + "RefreshSchemasStatus$LastRefreshDate": "

    The date the schema was last refreshed.

    ", + "ReplicationInstance$InstanceCreateTime": "

    The time the replication instance was created.

    ", + "ReplicationTask$ReplicationTaskCreationDate": "

    The date the replication task was created.

    ", + "ReplicationTask$ReplicationTaskStartDate": "

    The date the replication task is scheduled to start.

    ", + "StartReplicationTaskMessage$CdcStartTime": "

    The start time for the Change Data Capture (CDC) operation.

    ", + "TableStatistics$LastUpdateTime": "

    The last time the table was updated.

    " + } + }, + "TableStatistics": { + "base": "

    ", + "refs": { + "TableStatisticsList$member": null + } + }, + "TableStatisticsList": { + "base": null, + "refs": { + "DescribeTableStatisticsResponse$TableStatistics": "

    The table statistics.

    " + } + }, + "Tag": { + "base": "

    ", + "refs": { + "TagList$member": null + } + }, + "TagList": { + "base": null, + "refs": { + "AddTagsToResourceMessage$Tags": "

    The tag to be assigned to the DMS resource.

    ", + "CreateEndpointMessage$Tags": "

    Tags to be added to the endpoint.

    ", + "CreateReplicationInstanceMessage$Tags": "

    Tags to be associated with the replication instance.

    ", + "CreateReplicationSubnetGroupMessage$Tags": "

    The tag to be assigned to the subnet group.

    ", + "CreateReplicationTaskMessage$Tags": "

    Tags to be added to the replication instance.

    ", + "ListTagsForResourceResponse$TagList": "

    A list of tags for the resource.

    " + } + }, + "TestConnectionMessage": { + "base": "

    ", + "refs": { + } + }, + "TestConnectionResponse": { + "base": "

    ", + "refs": { + } + }, + "UpgradeDependencyFailureFault": { + "base": "

    An upgrade dependency is preventing the database migration.

    ", + "refs": { + } + }, + "VpcSecurityGroupIdList": { + "base": null, + "refs": { + "CreateReplicationInstanceMessage$VpcSecurityGroupIds": "

    Specifies the VPC security group to be used with the replication instance. The VPC security group must work with the VPC containing the replication instance.

    ", + "ModifyReplicationInstanceMessage$VpcSecurityGroupIds": "

    Specifies the VPC security group to be used with the replication instance. The VPC security group must work with the VPC containing the replication instance.

    " + } + }, + "VpcSecurityGroupMembership": { + "base": "

    ", + "refs": { + "VpcSecurityGroupMembershipList$member": null + } + }, + "VpcSecurityGroupMembershipList": { + "base": null, + "refs": { + "ReplicationInstance$VpcSecurityGroups": "

    The VPC security group for the instance.

    " + } + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/dms/2016-01-01/examples-1.json b/vendor/github.com/aws/aws-sdk-go/models/apis/dms/2016-01-01/examples-1.json new file mode 100644 index 000000000..0ea7e3b0b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/dms/2016-01-01/examples-1.json @@ -0,0 +1,5 @@ +{ + "version": "1.0", + "examples": { + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/ds/2015-04-16/api-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/ds/2015-04-16/api-2.json new file mode 100644 index 000000000..91286bb57 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/ds/2015-04-16/api-2.json @@ -0,0 +1,1674 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2015-04-16", + "endpointPrefix":"ds", + "jsonVersion":"1.1", + "protocol":"json", + "serviceAbbreviation":"Directory Service", + "serviceFullName":"AWS Directory Service", + "signatureVersion":"v4", + "targetPrefix":"DirectoryService_20150416" + }, + "operations":{ + "AddTagsToResource":{ + "name":"AddTagsToResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AddTagsToResourceRequest"}, + "output":{"shape":"AddTagsToResourceResult"}, + "errors":[ + {"shape":"EntityDoesNotExistException"}, + {"shape":"InvalidParameterException"}, + {"shape":"TagLimitExceededException"}, + {"shape":"ClientException"}, + {"shape":"ServiceException"} + ] + }, + "ConnectDirectory":{ + "name":"ConnectDirectory", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ConnectDirectoryRequest"}, + "output":{"shape":"ConnectDirectoryResult"}, + "errors":[ + {"shape":"DirectoryLimitExceededException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ClientException"}, + {"shape":"ServiceException"} + ] + }, + "CreateAlias":{ + "name":"CreateAlias", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateAliasRequest"}, + "output":{"shape":"CreateAliasResult"}, + "errors":[ + {"shape":"EntityAlreadyExistsException"}, + {"shape":"EntityDoesNotExistException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ClientException"}, + {"shape":"ServiceException"} + ] + }, + "CreateComputer":{ + "name":"CreateComputer", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateComputerRequest"}, + "output":{"shape":"CreateComputerResult"}, + "errors":[ + {"shape":"AuthenticationFailedException"}, + {"shape":"DirectoryUnavailableException"}, + {"shape":"EntityAlreadyExistsException"}, + {"shape":"EntityDoesNotExistException"}, + {"shape":"InvalidParameterException"}, + {"shape":"UnsupportedOperationException"}, + {"shape":"ClientException"}, + {"shape":"ServiceException"} + ] + }, + "CreateConditionalForwarder":{ + "name":"CreateConditionalForwarder", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateConditionalForwarderRequest"}, + "output":{"shape":"CreateConditionalForwarderResult"}, + "errors":[ + {"shape":"EntityAlreadyExistsException"}, + {"shape":"EntityDoesNotExistException"}, + {"shape":"DirectoryUnavailableException"}, + {"shape":"InvalidParameterException"}, + {"shape":"UnsupportedOperationException"}, + {"shape":"ClientException"}, + {"shape":"ServiceException"} + ] + }, + "CreateDirectory":{ + "name":"CreateDirectory", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateDirectoryRequest"}, + "output":{"shape":"CreateDirectoryResult"}, + "errors":[ + {"shape":"DirectoryLimitExceededException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ClientException"}, + {"shape":"ServiceException"} + ] + }, + "CreateMicrosoftAD":{ + "name":"CreateMicrosoftAD", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateMicrosoftADRequest"}, + "output":{"shape":"CreateMicrosoftADResult"}, + "errors":[ + {"shape":"DirectoryLimitExceededException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ClientException"}, + {"shape":"ServiceException"}, + {"shape":"UnsupportedOperationException"} + ] + }, + "CreateSnapshot":{ + "name":"CreateSnapshot", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateSnapshotRequest"}, + "output":{"shape":"CreateSnapshotResult"}, + "errors":[ + {"shape":"EntityDoesNotExistException"}, + {"shape":"InvalidParameterException"}, + {"shape":"SnapshotLimitExceededException"}, + {"shape":"ClientException"}, + {"shape":"ServiceException"} + ] + }, + "CreateTrust":{ + "name":"CreateTrust", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateTrustRequest"}, + "output":{"shape":"CreateTrustResult"}, + "errors":[ + {"shape":"EntityAlreadyExistsException"}, + {"shape":"EntityDoesNotExistException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ClientException"}, + {"shape":"ServiceException"}, + {"shape":"UnsupportedOperationException"} + ] + }, + "DeleteConditionalForwarder":{ + "name":"DeleteConditionalForwarder", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteConditionalForwarderRequest"}, + "output":{"shape":"DeleteConditionalForwarderResult"}, + "errors":[ + {"shape":"EntityDoesNotExistException"}, + {"shape":"DirectoryUnavailableException"}, + {"shape":"InvalidParameterException"}, + {"shape":"UnsupportedOperationException"}, + {"shape":"ClientException"}, + {"shape":"ServiceException"} + ] + }, + "DeleteDirectory":{ + "name":"DeleteDirectory", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteDirectoryRequest"}, + "output":{"shape":"DeleteDirectoryResult"}, + "errors":[ + {"shape":"EntityDoesNotExistException"}, + {"shape":"ClientException"}, + {"shape":"ServiceException"} + ] + }, + "DeleteSnapshot":{ + "name":"DeleteSnapshot", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteSnapshotRequest"}, + "output":{"shape":"DeleteSnapshotResult"}, + "errors":[ + {"shape":"EntityDoesNotExistException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ClientException"}, + {"shape":"ServiceException"} + ] + }, + "DeleteTrust":{ + "name":"DeleteTrust", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteTrustRequest"}, + "output":{"shape":"DeleteTrustResult"}, + "errors":[ + {"shape":"EntityDoesNotExistException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ClientException"}, + {"shape":"ServiceException"}, + {"shape":"UnsupportedOperationException"} + ] + }, + "DeregisterEventTopic":{ + "name":"DeregisterEventTopic", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeregisterEventTopicRequest"}, + "output":{"shape":"DeregisterEventTopicResult"}, + "errors":[ + {"shape":"EntityDoesNotExistException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ClientException"}, + {"shape":"ServiceException"} + ] + }, + "DescribeConditionalForwarders":{ + "name":"DescribeConditionalForwarders", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeConditionalForwardersRequest"}, + "output":{"shape":"DescribeConditionalForwardersResult"}, + "errors":[ + {"shape":"EntityDoesNotExistException"}, + {"shape":"DirectoryUnavailableException"}, + {"shape":"InvalidParameterException"}, + {"shape":"UnsupportedOperationException"}, + {"shape":"ClientException"}, + {"shape":"ServiceException"} + ] + }, + "DescribeDirectories":{ + "name":"DescribeDirectories", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeDirectoriesRequest"}, + "output":{"shape":"DescribeDirectoriesResult"}, + "errors":[ + {"shape":"EntityDoesNotExistException"}, + {"shape":"InvalidParameterException"}, + {"shape":"InvalidNextTokenException"}, + {"shape":"ClientException"}, + {"shape":"ServiceException"} + ] + }, + "DescribeEventTopics":{ + "name":"DescribeEventTopics", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeEventTopicsRequest"}, + "output":{"shape":"DescribeEventTopicsResult"}, + "errors":[ + {"shape":"EntityDoesNotExistException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ClientException"}, + {"shape":"ServiceException"} + ] + }, + "DescribeSnapshots":{ + "name":"DescribeSnapshots", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeSnapshotsRequest"}, + "output":{"shape":"DescribeSnapshotsResult"}, + "errors":[ + {"shape":"EntityDoesNotExistException"}, + {"shape":"InvalidParameterException"}, + {"shape":"InvalidNextTokenException"}, + {"shape":"ClientException"}, + {"shape":"ServiceException"} + ] + }, + "DescribeTrusts":{ + "name":"DescribeTrusts", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeTrustsRequest"}, + "output":{"shape":"DescribeTrustsResult"}, + "errors":[ + {"shape":"EntityDoesNotExistException"}, + {"shape":"InvalidNextTokenException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ClientException"}, + {"shape":"ServiceException"}, + {"shape":"UnsupportedOperationException"} + ] + }, + "DisableRadius":{ + "name":"DisableRadius", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DisableRadiusRequest"}, + "output":{"shape":"DisableRadiusResult"}, + "errors":[ + {"shape":"EntityDoesNotExistException"}, + {"shape":"ClientException"}, + {"shape":"ServiceException"} + ] + }, + "DisableSso":{ + "name":"DisableSso", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DisableSsoRequest"}, + "output":{"shape":"DisableSsoResult"}, + "errors":[ + {"shape":"EntityDoesNotExistException"}, + {"shape":"InsufficientPermissionsException"}, + {"shape":"AuthenticationFailedException"}, + {"shape":"ClientException"}, + {"shape":"ServiceException"} + ] + }, + "EnableRadius":{ + "name":"EnableRadius", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"EnableRadiusRequest"}, + "output":{"shape":"EnableRadiusResult"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"EntityAlreadyExistsException"}, + {"shape":"EntityDoesNotExistException"}, + {"shape":"ClientException"}, + {"shape":"ServiceException"} + ] + }, + "EnableSso":{ + "name":"EnableSso", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"EnableSsoRequest"}, + "output":{"shape":"EnableSsoResult"}, + "errors":[ + {"shape":"EntityDoesNotExistException"}, + {"shape":"InsufficientPermissionsException"}, + {"shape":"AuthenticationFailedException"}, + {"shape":"ClientException"}, + {"shape":"ServiceException"} + ] + }, + "GetDirectoryLimits":{ + "name":"GetDirectoryLimits", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetDirectoryLimitsRequest"}, + "output":{"shape":"GetDirectoryLimitsResult"}, + "errors":[ + {"shape":"EntityDoesNotExistException"}, + {"shape":"ClientException"}, + {"shape":"ServiceException"} + ] + }, + "GetSnapshotLimits":{ + "name":"GetSnapshotLimits", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetSnapshotLimitsRequest"}, + "output":{"shape":"GetSnapshotLimitsResult"}, + "errors":[ + {"shape":"EntityDoesNotExistException"}, + {"shape":"ClientException"}, + {"shape":"ServiceException"} + ] + }, + "ListTagsForResource":{ + "name":"ListTagsForResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListTagsForResourceRequest"}, + "output":{"shape":"ListTagsForResourceResult"}, + "errors":[ + {"shape":"EntityDoesNotExistException"}, + {"shape":"InvalidNextTokenException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ClientException"}, + {"shape":"ServiceException"} + ] + }, + "RegisterEventTopic":{ + "name":"RegisterEventTopic", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RegisterEventTopicRequest"}, + "output":{"shape":"RegisterEventTopicResult"}, + "errors":[ + {"shape":"EntityDoesNotExistException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ClientException"}, + {"shape":"ServiceException"} + ] + }, + "RemoveTagsFromResource":{ + "name":"RemoveTagsFromResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RemoveTagsFromResourceRequest"}, + "output":{"shape":"RemoveTagsFromResourceResult"}, + "errors":[ + {"shape":"EntityDoesNotExistException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ClientException"}, + {"shape":"ServiceException"} + ] + }, + "RestoreFromSnapshot":{ + "name":"RestoreFromSnapshot", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RestoreFromSnapshotRequest"}, + "output":{"shape":"RestoreFromSnapshotResult"}, + "errors":[ + {"shape":"EntityDoesNotExistException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ClientException"}, + {"shape":"ServiceException"} + ] + }, + "UpdateConditionalForwarder":{ + "name":"UpdateConditionalForwarder", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateConditionalForwarderRequest"}, + "output":{"shape":"UpdateConditionalForwarderResult"}, + "errors":[ + {"shape":"EntityDoesNotExistException"}, + {"shape":"DirectoryUnavailableException"}, + {"shape":"InvalidParameterException"}, + {"shape":"UnsupportedOperationException"}, + {"shape":"ClientException"}, + {"shape":"ServiceException"} + ] + }, + "UpdateRadius":{ + "name":"UpdateRadius", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateRadiusRequest"}, + "output":{"shape":"UpdateRadiusResult"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"EntityDoesNotExistException"}, + {"shape":"ClientException"}, + {"shape":"ServiceException"} + ] + }, + "VerifyTrust":{ + "name":"VerifyTrust", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"VerifyTrustRequest"}, + "output":{"shape":"VerifyTrustResult"}, + "errors":[ + {"shape":"EntityDoesNotExistException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ClientException"}, + {"shape":"ServiceException"}, + {"shape":"UnsupportedOperationException"} + ] + } + }, + "shapes":{ + "AccessUrl":{ + "type":"string", + "max":128, + "min":1 + }, + "AddTagsToResourceRequest":{ + "type":"structure", + "required":[ + "ResourceId", + "Tags" + ], + "members":{ + "ResourceId":{"shape":"ResourceId"}, + "Tags":{"shape":"Tags"} + } + }, + "AddTagsToResourceResult":{ + "type":"structure", + "members":{ + } + }, + "AliasName":{ + "type":"string", + "max":62, + "min":1, + "pattern":"^(?!d-)([\\da-zA-Z]+)([-]*[\\da-zA-Z])*" + }, + "Attribute":{ + "type":"structure", + "members":{ + "Name":{"shape":"AttributeName"}, + "Value":{"shape":"AttributeValue"} + } + }, + "AttributeName":{ + "type":"string", + "min":1 + }, + "AttributeValue":{"type":"string"}, + "Attributes":{ + "type":"list", + "member":{"shape":"Attribute"} + }, + "AuthenticationFailedException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ExceptionMessage"}, + "RequestId":{"shape":"RequestId"} + }, + "exception":true + }, + "AvailabilityZone":{"type":"string"}, + "AvailabilityZones":{ + "type":"list", + "member":{"shape":"AvailabilityZone"} + }, + "ClientException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ExceptionMessage"}, + "RequestId":{"shape":"RequestId"} + }, + "exception":true + }, + "CloudOnlyDirectoriesLimitReached":{"type":"boolean"}, + "Computer":{ + "type":"structure", + "members":{ + "ComputerId":{"shape":"SID"}, + "ComputerName":{"shape":"ComputerName"}, + "ComputerAttributes":{"shape":"Attributes"} + } + }, + "ComputerName":{ + "type":"string", + "max":15, + "min":1 + }, + "ComputerPassword":{ + "type":"string", + "max":64, + "min":8, + "pattern":"[\\u0020-\\u00FF]+", + "sensitive":true + }, + "ConditionalForwarder":{ + "type":"structure", + "members":{ + "RemoteDomainName":{"shape":"RemoteDomainName"}, + "DnsIpAddrs":{"shape":"DnsIpAddrs"}, + "ReplicationScope":{"shape":"ReplicationScope"} + } + }, + "ConditionalForwarders":{ + "type":"list", + "member":{"shape":"ConditionalForwarder"} + }, + "ConnectDirectoryRequest":{ + "type":"structure", + "required":[ + "Name", + "Password", + "Size", + "ConnectSettings" + ], + "members":{ + "Name":{"shape":"DirectoryName"}, + "ShortName":{"shape":"DirectoryShortName"}, + "Password":{"shape":"ConnectPassword"}, + "Description":{"shape":"Description"}, + "Size":{"shape":"DirectorySize"}, + "ConnectSettings":{"shape":"DirectoryConnectSettings"} + } + }, + "ConnectDirectoryResult":{ + "type":"structure", + "members":{ + "DirectoryId":{"shape":"DirectoryId"} + } + }, + "ConnectPassword":{ + "type":"string", + "max":128, + "min":1, + "sensitive":true + }, + "ConnectedDirectoriesLimitReached":{"type":"boolean"}, + "CreateAliasRequest":{ + "type":"structure", + "required":[ + "DirectoryId", + "Alias" + ], + "members":{ + "DirectoryId":{"shape":"DirectoryId"}, + "Alias":{"shape":"AliasName"} + } + }, + "CreateAliasResult":{ + "type":"structure", + "members":{ + "DirectoryId":{"shape":"DirectoryId"}, + "Alias":{"shape":"AliasName"} + } + }, + "CreateComputerRequest":{ + "type":"structure", + "required":[ + "DirectoryId", + "ComputerName", + "Password" + ], + "members":{ + "DirectoryId":{"shape":"DirectoryId"}, + "ComputerName":{"shape":"ComputerName"}, + "Password":{"shape":"ComputerPassword"}, + "OrganizationalUnitDistinguishedName":{"shape":"OrganizationalUnitDN"}, + "ComputerAttributes":{"shape":"Attributes"} + } + }, + "CreateComputerResult":{ + "type":"structure", + "members":{ + "Computer":{"shape":"Computer"} + } + }, + "CreateConditionalForwarderRequest":{ + "type":"structure", + "required":[ + "DirectoryId", + "RemoteDomainName", + "DnsIpAddrs" + ], + "members":{ + "DirectoryId":{"shape":"DirectoryId"}, + "RemoteDomainName":{"shape":"RemoteDomainName"}, + "DnsIpAddrs":{"shape":"DnsIpAddrs"} + } + }, + "CreateConditionalForwarderResult":{ + "type":"structure", + "members":{ + } + }, + "CreateDirectoryRequest":{ + "type":"structure", + "required":[ + "Name", + "Password", + "Size" + ], + "members":{ + "Name":{"shape":"DirectoryName"}, + "ShortName":{"shape":"DirectoryShortName"}, + "Password":{"shape":"Password"}, + "Description":{"shape":"Description"}, + "Size":{"shape":"DirectorySize"}, + "VpcSettings":{"shape":"DirectoryVpcSettings"} + } + }, + "CreateDirectoryResult":{ + "type":"structure", + "members":{ + "DirectoryId":{"shape":"DirectoryId"} + } + }, + "CreateMicrosoftADRequest":{ + "type":"structure", + "required":[ + "Name", + "Password", + "VpcSettings" + ], + "members":{ + "Name":{"shape":"DirectoryName"}, + "ShortName":{"shape":"DirectoryShortName"}, + "Password":{"shape":"Password"}, + "Description":{"shape":"Description"}, + "VpcSettings":{"shape":"DirectoryVpcSettings"} + } + }, + "CreateMicrosoftADResult":{ + "type":"structure", + "members":{ + "DirectoryId":{"shape":"DirectoryId"} + } + }, + "CreateSnapshotRequest":{ + "type":"structure", + "required":["DirectoryId"], + "members":{ + "DirectoryId":{"shape":"DirectoryId"}, + "Name":{"shape":"SnapshotName"} + } + }, + "CreateSnapshotResult":{ + "type":"structure", + "members":{ + "SnapshotId":{"shape":"SnapshotId"} + } + }, + "CreateTrustRequest":{ + "type":"structure", + "required":[ + "DirectoryId", + "RemoteDomainName", + "TrustPassword", + "TrustDirection" + ], + "members":{ + "DirectoryId":{"shape":"DirectoryId"}, + "RemoteDomainName":{"shape":"RemoteDomainName"}, + "TrustPassword":{"shape":"TrustPassword"}, + "TrustDirection":{"shape":"TrustDirection"}, + "TrustType":{"shape":"TrustType"}, + "ConditionalForwarderIpAddrs":{"shape":"DnsIpAddrs"} + } + }, + "CreateTrustResult":{ + "type":"structure", + "members":{ + "TrustId":{"shape":"TrustId"} + } + }, + "CreatedDateTime":{"type":"timestamp"}, + "DeleteAssociatedConditionalForwarder":{"type":"boolean"}, + "DeleteConditionalForwarderRequest":{ + "type":"structure", + "required":[ + "DirectoryId", + "RemoteDomainName" + ], + "members":{ + "DirectoryId":{"shape":"DirectoryId"}, + "RemoteDomainName":{"shape":"RemoteDomainName"} + } + }, + "DeleteConditionalForwarderResult":{ + "type":"structure", + "members":{ + } + }, + "DeleteDirectoryRequest":{ + "type":"structure", + "required":["DirectoryId"], + "members":{ + "DirectoryId":{"shape":"DirectoryId"} + } + }, + "DeleteDirectoryResult":{ + "type":"structure", + "members":{ + "DirectoryId":{"shape":"DirectoryId"} + } + }, + "DeleteSnapshotRequest":{ + "type":"structure", + "required":["SnapshotId"], + "members":{ + "SnapshotId":{"shape":"SnapshotId"} + } + }, + "DeleteSnapshotResult":{ + "type":"structure", + "members":{ + "SnapshotId":{"shape":"SnapshotId"} + } + }, + "DeleteTrustRequest":{ + "type":"structure", + "required":["TrustId"], + "members":{ + "TrustId":{"shape":"TrustId"}, + "DeleteAssociatedConditionalForwarder":{"shape":"DeleteAssociatedConditionalForwarder"} + } + }, + "DeleteTrustResult":{ + "type":"structure", + "members":{ + "TrustId":{"shape":"TrustId"} + } + }, + "DeregisterEventTopicRequest":{ + "type":"structure", + "required":[ + "DirectoryId", + "TopicName" + ], + "members":{ + "DirectoryId":{"shape":"DirectoryId"}, + "TopicName":{"shape":"TopicName"} + } + }, + "DeregisterEventTopicResult":{ + "type":"structure", + "members":{ + } + }, + "DescribeConditionalForwardersRequest":{ + "type":"structure", + "required":["DirectoryId"], + "members":{ + "DirectoryId":{"shape":"DirectoryId"}, + "RemoteDomainNames":{"shape":"RemoteDomainNames"} + } + }, + "DescribeConditionalForwardersResult":{ + "type":"structure", + "members":{ + "ConditionalForwarders":{"shape":"ConditionalForwarders"} + } + }, + "DescribeDirectoriesRequest":{ + "type":"structure", + "members":{ + "DirectoryIds":{"shape":"DirectoryIds"}, + "NextToken":{"shape":"NextToken"}, + "Limit":{"shape":"Limit"} + } + }, + "DescribeDirectoriesResult":{ + "type":"structure", + "members":{ + "DirectoryDescriptions":{"shape":"DirectoryDescriptions"}, + "NextToken":{"shape":"NextToken"} + } + }, + "DescribeEventTopicsRequest":{ + "type":"structure", + "members":{ + "DirectoryId":{"shape":"DirectoryId"}, + "TopicNames":{"shape":"TopicNames"} + } + }, + "DescribeEventTopicsResult":{ + "type":"structure", + "members":{ + "EventTopics":{"shape":"EventTopics"} + } + }, + "DescribeSnapshotsRequest":{ + "type":"structure", + "members":{ + "DirectoryId":{"shape":"DirectoryId"}, + "SnapshotIds":{"shape":"SnapshotIds"}, + "NextToken":{"shape":"NextToken"}, + "Limit":{"shape":"Limit"} + } + }, + "DescribeSnapshotsResult":{ + "type":"structure", + "members":{ + "Snapshots":{"shape":"Snapshots"}, + "NextToken":{"shape":"NextToken"} + } + }, + "DescribeTrustsRequest":{ + "type":"structure", + "members":{ + "DirectoryId":{"shape":"DirectoryId"}, + "TrustIds":{"shape":"TrustIds"}, + "NextToken":{"shape":"NextToken"}, + "Limit":{"shape":"Limit"} + } + }, + "DescribeTrustsResult":{ + "type":"structure", + "members":{ + "Trusts":{"shape":"Trusts"}, + "NextToken":{"shape":"NextToken"} + } + }, + "Description":{ + "type":"string", + "max":128, + "min":0, + "pattern":"^([a-zA-Z0-9_])[\\\\a-zA-Z0-9_@#%*+=:?./!\\s-]*$" + }, + "DirectoryConnectSettings":{ + "type":"structure", + "required":[ + "VpcId", + "SubnetIds", + "CustomerDnsIps", + "CustomerUserName" + ], + "members":{ + "VpcId":{"shape":"VpcId"}, + "SubnetIds":{"shape":"SubnetIds"}, + "CustomerDnsIps":{"shape":"DnsIpAddrs"}, + "CustomerUserName":{"shape":"UserName"} + } + }, + "DirectoryConnectSettingsDescription":{ + "type":"structure", + "members":{ + "VpcId":{"shape":"VpcId"}, + "SubnetIds":{"shape":"SubnetIds"}, + "CustomerUserName":{"shape":"UserName"}, + "SecurityGroupId":{"shape":"SecurityGroupId"}, + "AvailabilityZones":{"shape":"AvailabilityZones"}, + "ConnectIps":{"shape":"IpAddrs"} + } + }, + "DirectoryDescription":{ + "type":"structure", + "members":{ + "DirectoryId":{"shape":"DirectoryId"}, + "Name":{"shape":"DirectoryName"}, + "ShortName":{"shape":"DirectoryShortName"}, + "Size":{"shape":"DirectorySize"}, + "Alias":{"shape":"AliasName"}, + "AccessUrl":{"shape":"AccessUrl"}, + "Description":{"shape":"Description"}, + "DnsIpAddrs":{"shape":"DnsIpAddrs"}, + "Stage":{"shape":"DirectoryStage"}, + "LaunchTime":{"shape":"LaunchTime"}, + "StageLastUpdatedDateTime":{"shape":"LastUpdatedDateTime"}, + "Type":{"shape":"DirectoryType"}, + "VpcSettings":{"shape":"DirectoryVpcSettingsDescription"}, + "ConnectSettings":{"shape":"DirectoryConnectSettingsDescription"}, + "RadiusSettings":{"shape":"RadiusSettings"}, + "RadiusStatus":{"shape":"RadiusStatus"}, + "StageReason":{"shape":"StageReason"}, + "SsoEnabled":{"shape":"SsoEnabled"} + } + }, + "DirectoryDescriptions":{ + "type":"list", + "member":{"shape":"DirectoryDescription"} + }, + "DirectoryId":{ + "type":"string", + "pattern":"^d-[0-9a-f]{10}$" + }, + "DirectoryIds":{ + "type":"list", + "member":{"shape":"DirectoryId"} + }, + "DirectoryLimitExceededException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ExceptionMessage"}, + "RequestId":{"shape":"RequestId"} + }, + "exception":true + }, + "DirectoryLimits":{ + "type":"structure", + "members":{ + "CloudOnlyDirectoriesLimit":{"shape":"Limit"}, + "CloudOnlyDirectoriesCurrentCount":{"shape":"Limit"}, + "CloudOnlyDirectoriesLimitReached":{"shape":"CloudOnlyDirectoriesLimitReached"}, + "CloudOnlyMicrosoftADLimit":{"shape":"Limit"}, + "CloudOnlyMicrosoftADCurrentCount":{"shape":"Limit"}, + "CloudOnlyMicrosoftADLimitReached":{"shape":"CloudOnlyDirectoriesLimitReached"}, + "ConnectedDirectoriesLimit":{"shape":"Limit"}, + "ConnectedDirectoriesCurrentCount":{"shape":"Limit"}, + "ConnectedDirectoriesLimitReached":{"shape":"ConnectedDirectoriesLimitReached"} + } + }, + "DirectoryName":{ + "type":"string", + "pattern":"^([a-zA-Z0-9]+[\\\\.-])+([a-zA-Z0-9])+$" + }, + "DirectoryShortName":{ + "type":"string", + "pattern":"^[^\\\\/:*?\\\"\\<\\>|.]+[^\\\\/:*?\\\"<>|]*$" + }, + "DirectorySize":{ + "type":"string", + "enum":[ + "Small", + "Large" + ] + }, + "DirectoryStage":{ + "type":"string", + "enum":[ + "Requested", + "Creating", + "Created", + "Active", + "Inoperable", + "Impaired", + "Restoring", + "RestoreFailed", + "Deleting", + "Deleted", + "Failed" + ] + }, + "DirectoryType":{ + "type":"string", + "enum":[ + "SimpleAD", + "ADConnector", + "MicrosoftAD" + ] + }, + "DirectoryUnavailableException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ExceptionMessage"}, + "RequestId":{"shape":"RequestId"} + }, + "exception":true + }, + "DirectoryVpcSettings":{ + "type":"structure", + "required":[ + "VpcId", + "SubnetIds" + ], + "members":{ + "VpcId":{"shape":"VpcId"}, + "SubnetIds":{"shape":"SubnetIds"} + } + }, + "DirectoryVpcSettingsDescription":{ + "type":"structure", + "members":{ + "VpcId":{"shape":"VpcId"}, + "SubnetIds":{"shape":"SubnetIds"}, + "SecurityGroupId":{"shape":"SecurityGroupId"}, + "AvailabilityZones":{"shape":"AvailabilityZones"} + } + }, + "DisableRadiusRequest":{ + "type":"structure", + "required":["DirectoryId"], + "members":{ + "DirectoryId":{"shape":"DirectoryId"} + } + }, + "DisableRadiusResult":{ + "type":"structure", + "members":{ + } + }, + "DisableSsoRequest":{ + "type":"structure", + "required":["DirectoryId"], + "members":{ + "DirectoryId":{"shape":"DirectoryId"}, + "UserName":{"shape":"UserName"}, + "Password":{"shape":"ConnectPassword"} + } + }, + "DisableSsoResult":{ + "type":"structure", + "members":{ + } + }, + "DnsIpAddrs":{ + "type":"list", + "member":{"shape":"IpAddr"} + }, + "EnableRadiusRequest":{ + "type":"structure", + "required":[ + "DirectoryId", + "RadiusSettings" + ], + "members":{ + "DirectoryId":{"shape":"DirectoryId"}, + "RadiusSettings":{"shape":"RadiusSettings"} + } + }, + "EnableRadiusResult":{ + "type":"structure", + "members":{ + } + }, + "EnableSsoRequest":{ + "type":"structure", + "required":["DirectoryId"], + "members":{ + "DirectoryId":{"shape":"DirectoryId"}, + "UserName":{"shape":"UserName"}, + "Password":{"shape":"ConnectPassword"} + } + }, + "EnableSsoResult":{ + "type":"structure", + "members":{ + } + }, + "EntityAlreadyExistsException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ExceptionMessage"}, + "RequestId":{"shape":"RequestId"} + }, + "exception":true + }, + "EntityDoesNotExistException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ExceptionMessage"}, + "RequestId":{"shape":"RequestId"} + }, + "exception":true + }, + "EventTopic":{ + "type":"structure", + "members":{ + "DirectoryId":{"shape":"DirectoryId"}, + "TopicName":{"shape":"TopicName"}, + "TopicArn":{"shape":"TopicArn"}, + "CreatedDateTime":{"shape":"CreatedDateTime"}, + "Status":{"shape":"TopicStatus"} + } + }, + "EventTopics":{ + "type":"list", + "member":{"shape":"EventTopic"} + }, + "ExceptionMessage":{"type":"string"}, + "GetDirectoryLimitsRequest":{ + "type":"structure", + "members":{ + } + }, + "GetDirectoryLimitsResult":{ + "type":"structure", + "members":{ + "DirectoryLimits":{"shape":"DirectoryLimits"} + } + }, + "GetSnapshotLimitsRequest":{ + "type":"structure", + "required":["DirectoryId"], + "members":{ + "DirectoryId":{"shape":"DirectoryId"} + } + }, + "GetSnapshotLimitsResult":{ + "type":"structure", + "members":{ + "SnapshotLimits":{"shape":"SnapshotLimits"} + } + }, + "InsufficientPermissionsException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ExceptionMessage"}, + "RequestId":{"shape":"RequestId"} + }, + "exception":true + }, + "InvalidNextTokenException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ExceptionMessage"}, + "RequestId":{"shape":"RequestId"} + }, + "exception":true + }, + "InvalidParameterException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ExceptionMessage"}, + "RequestId":{"shape":"RequestId"} + }, + "exception":true + }, + "IpAddr":{ + "type":"string", + "pattern":"^(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$" + }, + "IpAddrs":{ + "type":"list", + "member":{"shape":"IpAddr"} + }, + "LastUpdatedDateTime":{"type":"timestamp"}, + "LaunchTime":{"type":"timestamp"}, + "Limit":{ + "type":"integer", + "min":0 + }, + "ListTagsForResourceRequest":{ + "type":"structure", + "required":["ResourceId"], + "members":{ + "ResourceId":{"shape":"ResourceId"}, + "NextToken":{"shape":"NextToken"}, + "Limit":{"shape":"Limit"} + } + }, + "ListTagsForResourceResult":{ + "type":"structure", + "members":{ + "Tags":{"shape":"Tags"}, + "NextToken":{"shape":"NextToken"} + } + }, + "ManualSnapshotsLimitReached":{"type":"boolean"}, + "NextToken":{"type":"string"}, + "OrganizationalUnitDN":{ + "type":"string", + "max":2000, + "min":1 + }, + "Password":{ + "type":"string", + "pattern":"(?=^.{8,64}$)((?=.*\\d)(?=.*[A-Z])(?=.*[a-z])|(?=.*\\d)(?=.*[^A-Za-z0-9\\s])(?=.*[a-z])|(?=.*[^A-Za-z0-9\\s])(?=.*[A-Z])(?=.*[a-z])|(?=.*\\d)(?=.*[A-Z])(?=.*[^A-Za-z0-9\\s]))^.*", + "sensitive":true + }, + "PortNumber":{ + "type":"integer", + "max":65535, + "min":1025 + }, + "RadiusAuthenticationProtocol":{ + "type":"string", + "enum":[ + "PAP", + "CHAP", + "MS-CHAPv1", + "MS-CHAPv2" + ] + }, + "RadiusDisplayLabel":{ + "type":"string", + "max":64, + "min":1 + }, + "RadiusRetries":{ + "type":"integer", + "max":10, + "min":0 + }, + "RadiusSettings":{ + "type":"structure", + "members":{ + "RadiusServers":{"shape":"Servers"}, + "RadiusPort":{"shape":"PortNumber"}, + "RadiusTimeout":{"shape":"RadiusTimeout"}, + "RadiusRetries":{"shape":"RadiusRetries"}, + "SharedSecret":{"shape":"RadiusSharedSecret"}, + "AuthenticationProtocol":{"shape":"RadiusAuthenticationProtocol"}, + "DisplayLabel":{"shape":"RadiusDisplayLabel"}, + "UseSameUsername":{"shape":"UseSameUsername"} + } + }, + "RadiusSharedSecret":{ + "type":"string", + "max":512, + "min":8, + "sensitive":true + }, + "RadiusStatus":{ + "type":"string", + "enum":[ + "Creating", + "Completed", + "Failed" + ] + }, + "RadiusTimeout":{ + "type":"integer", + "max":20, + "min":1 + }, + "RegisterEventTopicRequest":{ + "type":"structure", + "required":[ + "DirectoryId", + "TopicName" + ], + "members":{ + "DirectoryId":{"shape":"DirectoryId"}, + "TopicName":{"shape":"TopicName"} + } + }, + "RegisterEventTopicResult":{ + "type":"structure", + "members":{ + } + }, + "RemoteDomainName":{ + "type":"string", + "pattern":"^([a-zA-Z0-9]+[\\\\.-])+([a-zA-Z0-9])+[.]?$" + }, + "RemoteDomainNames":{ + "type":"list", + "member":{"shape":"RemoteDomainName"} + }, + "RemoveTagsFromResourceRequest":{ + "type":"structure", + "required":[ + "ResourceId", + "TagKeys" + ], + "members":{ + "ResourceId":{"shape":"ResourceId"}, + "TagKeys":{"shape":"TagKeys"} + } + }, + "RemoveTagsFromResourceResult":{ + "type":"structure", + "members":{ + } + }, + "ReplicationScope":{ + "type":"string", + "enum":["Domain"] + }, + "RequestId":{ + "type":"string", + "pattern":"^([A-Fa-f0-9]{8}-[A-Fa-f0-9]{4}-[A-Fa-f0-9]{4}-[A-Fa-f0-9]{4}-[A-Fa-f0-9]{12})$" + }, + "ResourceId":{ + "type":"string", + "pattern":"^[d]-[0-9a-f]{10}$" + }, + "RestoreFromSnapshotRequest":{ + "type":"structure", + "required":["SnapshotId"], + "members":{ + "SnapshotId":{"shape":"SnapshotId"} + } + }, + "RestoreFromSnapshotResult":{ + "type":"structure", + "members":{ + } + }, + "SID":{ + "type":"string", + "max":256, + "min":1, + "pattern":"[&\\w+-.@]+" + }, + "SecurityGroupId":{ + "type":"string", + "pattern":"^(sg-[0-9a-f]{8})$" + }, + "Server":{ + "type":"string", + "max":256, + "min":1 + }, + "Servers":{ + "type":"list", + "member":{"shape":"Server"} + }, + "ServiceException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ExceptionMessage"}, + "RequestId":{"shape":"RequestId"} + }, + "exception":true, + "fault":true + }, + "Snapshot":{ + "type":"structure", + "members":{ + "DirectoryId":{"shape":"DirectoryId"}, + "SnapshotId":{"shape":"SnapshotId"}, + "Type":{"shape":"SnapshotType"}, + "Name":{"shape":"SnapshotName"}, + "Status":{"shape":"SnapshotStatus"}, + "StartTime":{"shape":"StartTime"} + } + }, + "SnapshotId":{ + "type":"string", + "pattern":"^s-[0-9a-f]{10}$" + }, + "SnapshotIds":{ + "type":"list", + "member":{"shape":"SnapshotId"} + }, + "SnapshotLimitExceededException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ExceptionMessage"}, + "RequestId":{"shape":"RequestId"} + }, + "exception":true + }, + "SnapshotLimits":{ + "type":"structure", + "members":{ + "ManualSnapshotsLimit":{"shape":"Limit"}, + "ManualSnapshotsCurrentCount":{"shape":"Limit"}, + "ManualSnapshotsLimitReached":{"shape":"ManualSnapshotsLimitReached"} + } + }, + "SnapshotName":{ + "type":"string", + "max":128, + "min":0, + "pattern":"^([a-zA-Z0-9_])[\\\\a-zA-Z0-9_@#%*+=:?./!\\s-]*$" + }, + "SnapshotStatus":{ + "type":"string", + "enum":[ + "Creating", + "Completed", + "Failed" + ] + }, + "SnapshotType":{ + "type":"string", + "enum":[ + "Auto", + "Manual" + ] + }, + "Snapshots":{ + "type":"list", + "member":{"shape":"Snapshot"} + }, + "SsoEnabled":{"type":"boolean"}, + "StageReason":{"type":"string"}, + "StartTime":{"type":"timestamp"}, + "StateLastUpdatedDateTime":{"type":"timestamp"}, + "SubnetId":{ + "type":"string", + "pattern":"^(subnet-[0-9a-f]{8})$" + }, + "SubnetIds":{ + "type":"list", + "member":{"shape":"SubnetId"} + }, + "Tag":{ + "type":"structure", + "required":[ + "Key", + "Value" + ], + "members":{ + "Key":{"shape":"TagKey"}, + "Value":{"shape":"TagValue"} + } + }, + "TagKey":{ + "type":"string", + "max":128, + "min":1, + "pattern":"^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*)$" + }, + "TagKeys":{ + "type":"list", + "member":{"shape":"TagKey"} + }, + "TagLimitExceededException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ExceptionMessage"}, + "RequestId":{"shape":"RequestId"} + }, + "exception":true + }, + "TagValue":{ + "type":"string", + "max":256, + "min":0, + "pattern":"^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*)$" + }, + "Tags":{ + "type":"list", + "member":{"shape":"Tag"} + }, + "TopicArn":{"type":"string"}, + "TopicName":{ + "type":"string", + "max":256, + "min":1, + "pattern":"[a-zA-Z0-9_-]+" + }, + "TopicNames":{ + "type":"list", + "member":{"shape":"TopicName"} + }, + "TopicStatus":{ + "type":"string", + "enum":[ + "Registered", + "Topic not found", + "Failed", + "Deleted" + ] + }, + "Trust":{ + "type":"structure", + "members":{ + "DirectoryId":{"shape":"DirectoryId"}, + "TrustId":{"shape":"TrustId"}, + "RemoteDomainName":{"shape":"RemoteDomainName"}, + "TrustType":{"shape":"TrustType"}, + "TrustDirection":{"shape":"TrustDirection"}, + "TrustState":{"shape":"TrustState"}, + "CreatedDateTime":{"shape":"CreatedDateTime"}, + "LastUpdatedDateTime":{"shape":"LastUpdatedDateTime"}, + "StateLastUpdatedDateTime":{"shape":"StateLastUpdatedDateTime"}, + "TrustStateReason":{"shape":"TrustStateReason"} + } + }, + "TrustDirection":{ + "type":"string", + "enum":[ + "One-Way: Outgoing", + "One-Way: Incoming", + "Two-Way" + ] + }, + "TrustId":{ + "type":"string", + "pattern":"^t-[0-9a-f]{10}$" + }, + "TrustIds":{ + "type":"list", + "member":{"shape":"TrustId"} + }, + "TrustPassword":{ + "type":"string", + "max":128, + "min":1, + "sensitive":true + }, + "TrustState":{ + "type":"string", + "enum":[ + "Creating", + "Created", + "Verifying", + "VerifyFailed", + "Verified", + "Deleting", + "Deleted", + "Failed" + ] + }, + "TrustStateReason":{"type":"string"}, + "TrustType":{ + "type":"string", + "enum":["Forest"] + }, + "Trusts":{ + "type":"list", + "member":{"shape":"Trust"} + }, + "UnsupportedOperationException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ExceptionMessage"}, + "RequestId":{"shape":"RequestId"} + }, + "exception":true + }, + "UpdateConditionalForwarderRequest":{ + "type":"structure", + "required":[ + "DirectoryId", + "RemoteDomainName", + "DnsIpAddrs" + ], + "members":{ + "DirectoryId":{"shape":"DirectoryId"}, + "RemoteDomainName":{"shape":"RemoteDomainName"}, + "DnsIpAddrs":{"shape":"DnsIpAddrs"} + } + }, + "UpdateConditionalForwarderResult":{ + "type":"structure", + "members":{ + } + }, + "UpdateRadiusRequest":{ + "type":"structure", + "required":[ + "DirectoryId", + "RadiusSettings" + ], + "members":{ + "DirectoryId":{"shape":"DirectoryId"}, + "RadiusSettings":{"shape":"RadiusSettings"} + } + }, + "UpdateRadiusResult":{ + "type":"structure", + "members":{ + } + }, + "UseSameUsername":{"type":"boolean"}, + "UserName":{ + "type":"string", + "min":1, + "pattern":"[a-zA-Z0-9._-]+" + }, + "VerifyTrustRequest":{ + "type":"structure", + "required":["TrustId"], + "members":{ + "TrustId":{"shape":"TrustId"} + } + }, + "VerifyTrustResult":{ + "type":"structure", + "members":{ + "TrustId":{"shape":"TrustId"} + } + }, + "VpcId":{ + "type":"string", + "pattern":"^(vpc-[0-9a-f]{8})$" + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/ds/2015-04-16/docs-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/ds/2015-04-16/docs-2.json new file mode 100644 index 000000000..b52697b04 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/ds/2015-04-16/docs-2.json @@ -0,0 +1,1152 @@ +{ + "version": "2.0", + "service": "AWS Directory Service

    This is the AWS Directory Service API Reference. This guide provides detailed information about AWS Directory Service operations, data types, parameters, and errors.

    ", + "operations": { + "AddTagsToResource": "

    Adds or overwrites one or more tags for the specified Amazon Directory Services directory. Each directory can have a maximum of 10 tags. Each tag consists of a key and optional value. Tag keys must be unique per resource.

    ", + "ConnectDirectory": "

    Creates an AD Connector to connect to an on-premises directory.

    ", + "CreateAlias": "

    Creates an alias for a directory and assigns the alias to the directory. The alias is used to construct the access URL for the directory, such as http://<alias>.awsapps.com.

    After an alias has been created, it cannot be deleted or reused, so this operation should only be used when absolutely necessary.

    ", + "CreateComputer": "

    Creates a computer account in the specified directory, and joins the computer to the directory.

    ", + "CreateConditionalForwarder": "

    Creates a conditional forwarder associated with your AWS directory. Conditional forwarders are required in order to set up a trust relationship with another domain. The conditional forwarder points to the trusted domain.

    ", + "CreateDirectory": "

    Creates a Simple AD directory.

    ", + "CreateMicrosoftAD": "

    Creates a Microsoft AD in the AWS cloud.

    ", + "CreateSnapshot": "

    Creates a snapshot of a Simple AD or Microsoft AD directory in the AWS cloud.

    You cannot take snapshots of AD Connector directories.

    ", + "CreateTrust": "

    AWS Directory Service for Microsoft Active Directory allows you to configure trust relationships. For example, you can establish a trust between your Microsoft AD in the AWS cloud, and your existing on-premises Microsoft Active Directory. This would allow you to provide users and groups access to resources in either domain, with a single set of credentials.

    This action initiates the creation of the AWS side of a trust relationship between a Microsoft AD in the AWS cloud and an external domain.

    ", + "DeleteConditionalForwarder": "

    Deletes a conditional forwarder that has been set up for your AWS directory.

    ", + "DeleteDirectory": "

    Deletes an AWS Directory Service directory.

    ", + "DeleteSnapshot": "

    Deletes a directory snapshot.

    ", + "DeleteTrust": "

    Deletes an existing trust relationship between your Microsoft AD in the AWS cloud and an external domain.

    ", + "DeregisterEventTopic": "

    Removes the specified directory as a publisher to the specified SNS topic.

    ", + "DescribeConditionalForwarders": "

    Obtains information about the conditional forwarders for this account.

    If no input parameters are provided for RemoteDomainNames, this request describes all conditional forwarders for the specified directory ID.

    ", + "DescribeDirectories": "

    Obtains information about the directories that belong to this account.

    You can retrieve information about specific directories by passing the directory identifiers in the DirectoryIds parameter. Otherwise, all directories that belong to the current account are returned.

    This operation supports pagination with the use of the NextToken request and response parameters. If more results are available, the DescribeDirectoriesResult.NextToken member contains a token that you pass in the next call to DescribeDirectories to retrieve the next set of items.

    You can also specify a maximum number of return results with the Limit parameter.

    ", + "DescribeEventTopics": "

    Obtains information about which SNS topics receive status messages from the specified directory.

    If no input parameters are provided, such as DirectoryId or TopicName, this request describes all of the associations in the account.

    ", + "DescribeSnapshots": "

    Obtains information about the directory snapshots that belong to this account.

    This operation supports pagination with the use of the NextToken request and response parameters. If more results are available, the DescribeSnapshots.NextToken member contains a token that you pass in the next call to DescribeSnapshots to retrieve the next set of items.

    You can also specify a maximum number of return results with the Limit parameter.

    ", + "DescribeTrusts": "

    Obtains information about the trust relationships for this account.

    If no input parameters are provided, such as DirectoryId or TrustIds, this request describes all the trust relationships belonging to the account.

    ", + "DisableRadius": "

    Disables multi-factor authentication (MFA) with the Remote Authentication Dial In User Service (RADIUS) server for an AD Connector directory.

    ", + "DisableSso": "

    Disables single-sign on for a directory.

    ", + "EnableRadius": "

    Enables multi-factor authentication (MFA) with the Remote Authentication Dial In User Service (RADIUS) server for an AD Connector directory.

    ", + "EnableSso": "

    Enables single-sign on for a directory.

    ", + "GetDirectoryLimits": "

    Obtains directory limit information for the current region.

    ", + "GetSnapshotLimits": "

    Obtains the manual snapshot limits for a directory.

    ", + "ListTagsForResource": "

    Lists all tags on an Amazon Directory Services directory.

    ", + "RegisterEventTopic": "

    Associates a directory with an SNS topic. This establishes the directory as a publisher to the specified SNS topic. You can then receive email or text (SMS) messages when the status of your directory changes. You get notified if your directory goes from an Active status to an Impaired or Inoperable status. You also receive a notification when the directory returns to an Active status.

    ", + "RemoveTagsFromResource": "

    Removes tags from an Amazon Directory Services directory.

    ", + "RestoreFromSnapshot": "

    Restores a directory using an existing directory snapshot.

    When you restore a directory from a snapshot, any changes made to the directory after the snapshot date are overwritten.

    This action returns as soon as the restore operation is initiated. You can monitor the progress of the restore operation by calling the DescribeDirectories operation with the directory identifier. When the DirectoryDescription.Stage value changes to Active, the restore operation is complete.

    ", + "UpdateConditionalForwarder": "

    Updates a conditional forwarder that has been set up for your AWS directory.

    ", + "UpdateRadius": "

    Updates the Remote Authentication Dial In User Service (RADIUS) server information for an AD Connector directory.

    ", + "VerifyTrust": "

    AWS Directory Service for Microsoft Active Directory allows you to configure and verify trust relationships.

    This action verifies a trust relationship between your Microsoft AD in the AWS cloud and an external domain.

    " + }, + "shapes": { + "AccessUrl": { + "base": null, + "refs": { + "DirectoryDescription$AccessUrl": "

    The access URL for the directory, such as http://<alias>.awsapps.com. If no alias has been created for the directory, <alias> is the directory identifier, such as d-XXXXXXXXXX.

    " + } + }, + "AddTagsToResourceRequest": { + "base": null, + "refs": { + } + }, + "AddTagsToResourceResult": { + "base": null, + "refs": { + } + }, + "AliasName": { + "base": null, + "refs": { + "CreateAliasRequest$Alias": "

    The requested alias.

    The alias must be unique amongst all aliases in AWS. This operation throws an EntityAlreadyExistsException error if the alias already exists.

    ", + "CreateAliasResult$Alias": "

    The alias for the directory.

    ", + "DirectoryDescription$Alias": "

    The alias for the directory. If no alias has been created for the directory, the alias is the directory identifier, such as d-XXXXXXXXXX.

    " + } + }, + "Attribute": { + "base": "

    Represents a named directory attribute.

    ", + "refs": { + "Attributes$member": null + } + }, + "AttributeName": { + "base": null, + "refs": { + "Attribute$Name": "

    The name of the attribute.

    " + } + }, + "AttributeValue": { + "base": null, + "refs": { + "Attribute$Value": "

    The value of the attribute.

    " + } + }, + "Attributes": { + "base": null, + "refs": { + "Computer$ComputerAttributes": "

    An array of Attribute objects containing the LDAP attributes that belong to the computer account.

    ", + "CreateComputerRequest$ComputerAttributes": "

    An array of Attribute objects that contain any LDAP attributes to apply to the computer account.

    " + } + }, + "AuthenticationFailedException": { + "base": "

    An authentication error occurred.

    ", + "refs": { + } + }, + "AvailabilityZone": { + "base": null, + "refs": { + "AvailabilityZones$member": null + } + }, + "AvailabilityZones": { + "base": null, + "refs": { + "DirectoryConnectSettingsDescription$AvailabilityZones": "

    A list of the Availability Zones that the directory is in.

    ", + "DirectoryVpcSettingsDescription$AvailabilityZones": "

    The list of Availability Zones that the directory is in.

    " + } + }, + "ClientException": { + "base": "

    A client exception has occurred.

    ", + "refs": { + } + }, + "CloudOnlyDirectoriesLimitReached": { + "base": null, + "refs": { + "DirectoryLimits$CloudOnlyDirectoriesLimitReached": "

    Indicates if the cloud directory limit has been reached.

    ", + "DirectoryLimits$CloudOnlyMicrosoftADLimitReached": "

    Indicates if the Microsoft AD directory limit has been reached.

    " + } + }, + "Computer": { + "base": "

    Contains information about a computer account in a directory.

    ", + "refs": { + "CreateComputerResult$Computer": "

    A Computer object that represents the computer account.

    " + } + }, + "ComputerName": { + "base": null, + "refs": { + "Computer$ComputerName": "

    The computer name.

    ", + "CreateComputerRequest$ComputerName": "

    The name of the computer account.

    " + } + }, + "ComputerPassword": { + "base": null, + "refs": { + "CreateComputerRequest$Password": "

    A one-time password that is used to join the computer to the directory. You should generate a random, strong password to use for this parameter.

    " + } + }, + "ConditionalForwarder": { + "base": "

    Points to a remote domain with which you are setting up a trust relationship. Conditional forwarders are required in order to set up a trust relationship with another domain.

    ", + "refs": { + "ConditionalForwarders$member": null + } + }, + "ConditionalForwarders": { + "base": null, + "refs": { + "DescribeConditionalForwardersResult$ConditionalForwarders": "

    The list of conditional forwarders that have been created.

    " + } + }, + "ConnectDirectoryRequest": { + "base": "

    Contains the inputs for the ConnectDirectory operation.

    ", + "refs": { + } + }, + "ConnectDirectoryResult": { + "base": "

    Contains the results of the ConnectDirectory operation.

    ", + "refs": { + } + }, + "ConnectPassword": { + "base": null, + "refs": { + "ConnectDirectoryRequest$Password": "

    The password for the on-premises user account.

    ", + "DisableSsoRequest$Password": "

    The password of an alternate account to use to disable single-sign on. This is only used for AD Connector directories. For more information, see the UserName parameter.

    ", + "EnableSsoRequest$Password": "

    The password of an alternate account to use to enable single-sign on. This is only used for AD Connector directories. For more information, see the UserName parameter.

    " + } + }, + "ConnectedDirectoriesLimitReached": { + "base": null, + "refs": { + "DirectoryLimits$ConnectedDirectoriesLimitReached": "

    Indicates if the connected directory limit has been reached.

    " + } + }, + "CreateAliasRequest": { + "base": "

    Contains the inputs for the CreateAlias operation.

    ", + "refs": { + } + }, + "CreateAliasResult": { + "base": "

    Contains the results of the CreateAlias operation.

    ", + "refs": { + } + }, + "CreateComputerRequest": { + "base": "

    Contains the inputs for the CreateComputer operation.

    ", + "refs": { + } + }, + "CreateComputerResult": { + "base": "

    Contains the results for the CreateComputer operation.

    ", + "refs": { + } + }, + "CreateConditionalForwarderRequest": { + "base": "

    Initiates the creation of a conditional forwarder for your AWS Directory Service for Microsoft Active Directory. Conditional forwarders are required in order to set up a trust relationship with another domain.

    ", + "refs": { + } + }, + "CreateConditionalForwarderResult": { + "base": "

    The result of a CreateConditinalForwarder request.

    ", + "refs": { + } + }, + "CreateDirectoryRequest": { + "base": "

    Contains the inputs for the CreateDirectory operation.

    ", + "refs": { + } + }, + "CreateDirectoryResult": { + "base": "

    Contains the results of the CreateDirectory operation.

    ", + "refs": { + } + }, + "CreateMicrosoftADRequest": { + "base": "

    Creates a Microsoft AD in the AWS cloud.

    ", + "refs": { + } + }, + "CreateMicrosoftADResult": { + "base": "

    Result of a CreateMicrosoftAD request.

    ", + "refs": { + } + }, + "CreateSnapshotRequest": { + "base": "

    Contains the inputs for the CreateSnapshot operation.

    ", + "refs": { + } + }, + "CreateSnapshotResult": { + "base": "

    Contains the results of the CreateSnapshot operation.

    ", + "refs": { + } + }, + "CreateTrustRequest": { + "base": "

    AWS Directory Service for Microsoft Active Directory allows you to configure trust relationships. For example, you can establish a trust between your Microsoft AD in the AWS cloud, and your existing on-premises Microsoft Active Directory. This would allow you to provide users and groups access to resources in either domain, with a single set of credentials.

    This action initiates the creation of the AWS side of a trust relationship between a Microsoft AD in the AWS cloud and an external domain.

    ", + "refs": { + } + }, + "CreateTrustResult": { + "base": "

    The result of a CreateTrust request.

    ", + "refs": { + } + }, + "CreatedDateTime": { + "base": null, + "refs": { + "EventTopic$CreatedDateTime": "

    The date and time of when you associated your directory with the SNS topic.

    ", + "Trust$CreatedDateTime": "

    The date and time that the trust relationship was created.

    " + } + }, + "DeleteAssociatedConditionalForwarder": { + "base": null, + "refs": { + "DeleteTrustRequest$DeleteAssociatedConditionalForwarder": "

    Delete a conditional forwarder as part of a DeleteTrustRequest.

    " + } + }, + "DeleteConditionalForwarderRequest": { + "base": "

    Deletes a conditional forwarder.

    ", + "refs": { + } + }, + "DeleteConditionalForwarderResult": { + "base": "

    The result of a DeleteConditionalForwarder request.

    ", + "refs": { + } + }, + "DeleteDirectoryRequest": { + "base": "

    Contains the inputs for the DeleteDirectory operation.

    ", + "refs": { + } + }, + "DeleteDirectoryResult": { + "base": "

    Contains the results of the DeleteDirectory operation.

    ", + "refs": { + } + }, + "DeleteSnapshotRequest": { + "base": "

    Contains the inputs for the DeleteSnapshot operation.

    ", + "refs": { + } + }, + "DeleteSnapshotResult": { + "base": "

    Contains the results of the DeleteSnapshot operation.

    ", + "refs": { + } + }, + "DeleteTrustRequest": { + "base": "

    Deletes the local side of an existing trust relationship between the Microsoft AD in the AWS cloud and the external domain.

    ", + "refs": { + } + }, + "DeleteTrustResult": { + "base": "

    The result of a DeleteTrust request.

    ", + "refs": { + } + }, + "DeregisterEventTopicRequest": { + "base": "

    Removes the specified directory as a publisher to the specified SNS topic.

    ", + "refs": { + } + }, + "DeregisterEventTopicResult": { + "base": "

    The result of a DeregisterEventTopic request.

    ", + "refs": { + } + }, + "DescribeConditionalForwardersRequest": { + "base": "

    Describes a conditional forwarder.

    ", + "refs": { + } + }, + "DescribeConditionalForwardersResult": { + "base": "

    The result of a DescribeConditionalForwarder request.

    ", + "refs": { + } + }, + "DescribeDirectoriesRequest": { + "base": "

    Contains the inputs for the DescribeDirectories operation.

    ", + "refs": { + } + }, + "DescribeDirectoriesResult": { + "base": "

    Contains the results of the DescribeDirectories operation.

    ", + "refs": { + } + }, + "DescribeEventTopicsRequest": { + "base": "

    Describes event topics.

    ", + "refs": { + } + }, + "DescribeEventTopicsResult": { + "base": "

    The result of a DescribeEventTopic request.

    ", + "refs": { + } + }, + "DescribeSnapshotsRequest": { + "base": "

    Contains the inputs for the DescribeSnapshots operation.

    ", + "refs": { + } + }, + "DescribeSnapshotsResult": { + "base": "

    Contains the results of the DescribeSnapshots operation.

    ", + "refs": { + } + }, + "DescribeTrustsRequest": { + "base": "

    Describes the trust relationships for a particular Microsoft AD in the AWS cloud. If no input parameters are are provided, such as directory ID or trust ID, this request describes all the trust relationships.

    ", + "refs": { + } + }, + "DescribeTrustsResult": { + "base": "

    The result of a DescribeTrust request.

    ", + "refs": { + } + }, + "Description": { + "base": null, + "refs": { + "ConnectDirectoryRequest$Description": "

    A textual description for the directory.

    ", + "CreateDirectoryRequest$Description": "

    A textual description for the directory.

    ", + "CreateMicrosoftADRequest$Description": "

    A textual description for the directory. This label will appear on the AWS console Directory Details page after the directory is created.

    ", + "DirectoryDescription$Description": "

    The textual description for the directory.

    " + } + }, + "DirectoryConnectSettings": { + "base": "

    Contains information for the ConnectDirectory operation when an AD Connector directory is being created.

    ", + "refs": { + "ConnectDirectoryRequest$ConnectSettings": "

    A DirectoryConnectSettings object that contains additional information for the operation.

    " + } + }, + "DirectoryConnectSettingsDescription": { + "base": "

    Contains information about an AD Connector directory.

    ", + "refs": { + "DirectoryDescription$ConnectSettings": "

    A DirectoryConnectSettingsDescription object that contains additional information about an AD Connector directory. This member is only present if the directory is an AD Connector directory.

    " + } + }, + "DirectoryDescription": { + "base": "

    Contains information about an AWS Directory Service directory.

    ", + "refs": { + "DirectoryDescriptions$member": null + } + }, + "DirectoryDescriptions": { + "base": "

    A list of directory descriptions.

    ", + "refs": { + "DescribeDirectoriesResult$DirectoryDescriptions": "

    The list of DirectoryDescription objects that were retrieved.

    It is possible that this list contains less than the number of items specified in the Limit member of the request. This occurs if there are less than the requested number of items left to retrieve, or if the limitations of the operation have been exceeded.

    " + } + }, + "DirectoryId": { + "base": null, + "refs": { + "ConnectDirectoryResult$DirectoryId": "

    The identifier of the new directory.

    ", + "CreateAliasRequest$DirectoryId": "

    The identifier of the directory for which to create the alias.

    ", + "CreateAliasResult$DirectoryId": "

    The identifier of the directory.

    ", + "CreateComputerRequest$DirectoryId": "

    The identifier of the directory in which to create the computer account.

    ", + "CreateConditionalForwarderRequest$DirectoryId": "

    The directory ID of the AWS directory for which you are creating the conditional forwarder.

    ", + "CreateDirectoryResult$DirectoryId": "

    The identifier of the directory that was created.

    ", + "CreateMicrosoftADResult$DirectoryId": "

    The identifier of the directory that was created.

    ", + "CreateSnapshotRequest$DirectoryId": "

    The identifier of the directory of which to take a snapshot.

    ", + "CreateTrustRequest$DirectoryId": "

    The Directory ID of the Microsoft AD in the AWS cloud for which to establish the trust relationship.

    ", + "DeleteConditionalForwarderRequest$DirectoryId": "

    The directory ID for which you are deleting the conditional forwarder.

    ", + "DeleteDirectoryRequest$DirectoryId": "

    The identifier of the directory to delete.

    ", + "DeleteDirectoryResult$DirectoryId": "

    The directory identifier.

    ", + "DeregisterEventTopicRequest$DirectoryId": "

    The Directory ID to remove as a publisher. This directory will no longer send messages to the specified SNS topic.

    ", + "DescribeConditionalForwardersRequest$DirectoryId": "

    The directory ID for which to get the list of associated conditional forwarders.

    ", + "DescribeEventTopicsRequest$DirectoryId": "

    The Directory ID for which to get the list of associated SNS topics. If this member is null, associations for all Directory IDs are returned.

    ", + "DescribeSnapshotsRequest$DirectoryId": "

    The identifier of the directory for which to retrieve snapshot information.

    ", + "DescribeTrustsRequest$DirectoryId": "

    The Directory ID of the AWS directory that is a part of the requested trust relationship.

    ", + "DirectoryDescription$DirectoryId": "

    The directory identifier.

    ", + "DirectoryIds$member": null, + "DisableRadiusRequest$DirectoryId": "

    The identifier of the directory for which to disable MFA.

    ", + "DisableSsoRequest$DirectoryId": "

    The identifier of the directory for which to disable single-sign on.

    ", + "EnableRadiusRequest$DirectoryId": "

    The identifier of the directory for which to enable MFA.

    ", + "EnableSsoRequest$DirectoryId": "

    The identifier of the directory for which to enable single-sign on.

    ", + "EventTopic$DirectoryId": "

    The Directory ID of an AWS Directory Service directory that will publish status messages to an SNS topic.

    ", + "GetSnapshotLimitsRequest$DirectoryId": "

    Contains the identifier of the directory to obtain the limits for.

    ", + "RegisterEventTopicRequest$DirectoryId": "

    The Directory ID that will publish status messages to the SNS topic.

    ", + "Snapshot$DirectoryId": "

    The directory identifier.

    ", + "Trust$DirectoryId": "

    The Directory ID of the AWS directory involved in the trust relationship.

    ", + "UpdateConditionalForwarderRequest$DirectoryId": "

    The directory ID of the AWS directory for which to update the conditional forwarder.

    ", + "UpdateRadiusRequest$DirectoryId": "

    The identifier of the directory for which to update the RADIUS server information.

    " + } + }, + "DirectoryIds": { + "base": "

    A list of directory identifiers.

    ", + "refs": { + "DescribeDirectoriesRequest$DirectoryIds": "

    A list of identifiers of the directories for which to obtain the information. If this member is null, all directories that belong to the current account are returned.

    An empty list results in an InvalidParameterException being thrown.

    " + } + }, + "DirectoryLimitExceededException": { + "base": "

    The maximum number of directories in the region has been reached. You can use the GetDirectoryLimits operation to determine your directory limits in the region.

    ", + "refs": { + } + }, + "DirectoryLimits": { + "base": "

    Contains directory limit information for a region.

    ", + "refs": { + "GetDirectoryLimitsResult$DirectoryLimits": "

    A DirectoryLimits object that contains the directory limits for the current region.

    " + } + }, + "DirectoryName": { + "base": null, + "refs": { + "ConnectDirectoryRequest$Name": "

    The fully-qualified name of the on-premises directory, such as corp.example.com.

    ", + "CreateDirectoryRequest$Name": "

    The fully qualified name for the directory, such as corp.example.com.

    ", + "CreateMicrosoftADRequest$Name": "

    The fully qualified domain name for the directory, such as corp.example.com. This name will resolve inside your VPC only. It does not need to be publicly resolvable.

    ", + "DirectoryDescription$Name": "

    The fully-qualified name of the directory.

    " + } + }, + "DirectoryShortName": { + "base": null, + "refs": { + "ConnectDirectoryRequest$ShortName": "

    The NetBIOS name of the on-premises directory, such as CORP.

    ", + "CreateDirectoryRequest$ShortName": "

    The short name of the directory, such as CORP.

    ", + "CreateMicrosoftADRequest$ShortName": "

    The NetBIOS name for your domain. A short identifier for your domain, such as CORP. If you don't specify a NetBIOS name, it will default to the first part of your directory DNS. For example, CORP for the directory DNS corp.example.com.

    ", + "DirectoryDescription$ShortName": "

    The short name of the directory.

    " + } + }, + "DirectorySize": { + "base": null, + "refs": { + "ConnectDirectoryRequest$Size": "

    The size of the directory.

    ", + "CreateDirectoryRequest$Size": "

    The size of the directory.

    ", + "DirectoryDescription$Size": "

    The directory size.

    " + } + }, + "DirectoryStage": { + "base": null, + "refs": { + "DirectoryDescription$Stage": "

    The current stage of the directory.

    " + } + }, + "DirectoryType": { + "base": null, + "refs": { + "DirectoryDescription$Type": "

    The directory size.

    " + } + }, + "DirectoryUnavailableException": { + "base": "

    The specified directory is unavailable or could not be found.

    ", + "refs": { + } + }, + "DirectoryVpcSettings": { + "base": "

    Contains VPC information for the CreateDirectory or CreateMicrosoftAD operation.

    ", + "refs": { + "CreateDirectoryRequest$VpcSettings": "

    A DirectoryVpcSettings object that contains additional information for the operation.

    ", + "CreateMicrosoftADRequest$VpcSettings": null + } + }, + "DirectoryVpcSettingsDescription": { + "base": "

    Contains information about the directory.

    ", + "refs": { + "DirectoryDescription$VpcSettings": "

    A DirectoryVpcSettingsDescription object that contains additional information about a directory. This member is only present if the directory is a Simple AD or Managed AD directory.

    " + } + }, + "DisableRadiusRequest": { + "base": "

    Contains the inputs for the DisableRadius operation.

    ", + "refs": { + } + }, + "DisableRadiusResult": { + "base": "

    Contains the results of the DisableRadius operation.

    ", + "refs": { + } + }, + "DisableSsoRequest": { + "base": "

    Contains the inputs for the DisableSso operation.

    ", + "refs": { + } + }, + "DisableSsoResult": { + "base": "

    Contains the results of the DisableSso operation.

    ", + "refs": { + } + }, + "DnsIpAddrs": { + "base": null, + "refs": { + "ConditionalForwarder$DnsIpAddrs": "

    The IP addresses of the remote DNS server associated with RemoteDomainName. This is the IP address of the DNS server that your conditional forwarder points to.

    ", + "CreateConditionalForwarderRequest$DnsIpAddrs": "

    The IP addresses of the remote DNS server associated with RemoteDomainName.

    ", + "CreateTrustRequest$ConditionalForwarderIpAddrs": "

    The IP addresses of the remote DNS server associated with RemoteDomainName.

    ", + "DirectoryConnectSettings$CustomerDnsIps": "

    A list of one or more IP addresses of DNS servers or domain controllers in the on-premises directory.

    ", + "DirectoryDescription$DnsIpAddrs": "

    The IP addresses of the DNS servers for the directory. For a Simple AD or Microsoft AD directory, these are the IP addresses of the Simple AD or Microsoft AD directory servers. For an AD Connector directory, these are the IP addresses of the DNS servers or domain controllers in the on-premises directory to which the AD Connector is connected.

    ", + "UpdateConditionalForwarderRequest$DnsIpAddrs": "

    The updated IP addresses of the remote DNS server associated with the conditional forwarder.

    " + } + }, + "EnableRadiusRequest": { + "base": "

    Contains the inputs for the EnableRadius operation.

    ", + "refs": { + } + }, + "EnableRadiusResult": { + "base": "

    Contains the results of the EnableRadius operation.

    ", + "refs": { + } + }, + "EnableSsoRequest": { + "base": "

    Contains the inputs for the EnableSso operation.

    ", + "refs": { + } + }, + "EnableSsoResult": { + "base": "

    Contains the results of the EnableSso operation.

    ", + "refs": { + } + }, + "EntityAlreadyExistsException": { + "base": "

    The specified entity already exists.

    ", + "refs": { + } + }, + "EntityDoesNotExistException": { + "base": "

    The specified entity could not be found.

    ", + "refs": { + } + }, + "EventTopic": { + "base": "

    Information about SNS topic and AWS Directory Service directory associations.

    ", + "refs": { + "EventTopics$member": null + } + }, + "EventTopics": { + "base": null, + "refs": { + "DescribeEventTopicsResult$EventTopics": "

    A list of SNS topic names that receive status messages from the specified Directory ID.

    " + } + }, + "ExceptionMessage": { + "base": "

    The descriptive message for the exception.

    ", + "refs": { + "AuthenticationFailedException$Message": "

    The textual message for the exception.

    ", + "ClientException$Message": null, + "DirectoryLimitExceededException$Message": null, + "DirectoryUnavailableException$Message": null, + "EntityAlreadyExistsException$Message": null, + "EntityDoesNotExistException$Message": null, + "InsufficientPermissionsException$Message": null, + "InvalidNextTokenException$Message": null, + "InvalidParameterException$Message": null, + "ServiceException$Message": null, + "SnapshotLimitExceededException$Message": null, + "TagLimitExceededException$Message": null, + "UnsupportedOperationException$Message": null + } + }, + "GetDirectoryLimitsRequest": { + "base": "

    Contains the inputs for the GetDirectoryLimits operation.

    ", + "refs": { + } + }, + "GetDirectoryLimitsResult": { + "base": "

    Contains the results of the GetDirectoryLimits operation.

    ", + "refs": { + } + }, + "GetSnapshotLimitsRequest": { + "base": "

    Contains the inputs for the GetSnapshotLimits operation.

    ", + "refs": { + } + }, + "GetSnapshotLimitsResult": { + "base": "

    Contains the results of the GetSnapshotLimits operation.

    ", + "refs": { + } + }, + "InsufficientPermissionsException": { + "base": "

    The account does not have sufficient permission to perform the operation.

    ", + "refs": { + } + }, + "InvalidNextTokenException": { + "base": "

    The NextToken value is not valid.

    ", + "refs": { + } + }, + "InvalidParameterException": { + "base": "

    One or more parameters are not valid.

    ", + "refs": { + } + }, + "IpAddr": { + "base": null, + "refs": { + "DnsIpAddrs$member": null, + "IpAddrs$member": null + } + }, + "IpAddrs": { + "base": null, + "refs": { + "DirectoryConnectSettingsDescription$ConnectIps": "

    The IP addresses of the AD Connector servers.

    " + } + }, + "LastUpdatedDateTime": { + "base": null, + "refs": { + "DirectoryDescription$StageLastUpdatedDateTime": "

    The date and time that the stage was last updated.

    ", + "Trust$LastUpdatedDateTime": "

    The date and time that the trust relationship was last updated.

    " + } + }, + "LaunchTime": { + "base": null, + "refs": { + "DirectoryDescription$LaunchTime": "

    Specifies when the directory was created.

    " + } + }, + "Limit": { + "base": null, + "refs": { + "DescribeDirectoriesRequest$Limit": "

    The maximum number of items to return. If this value is zero, the maximum number of items is specified by the limitations of the operation.

    ", + "DescribeSnapshotsRequest$Limit": "

    The maximum number of objects to return.

    ", + "DescribeTrustsRequest$Limit": "

    The maximum number of objects to return.

    ", + "DirectoryLimits$CloudOnlyDirectoriesLimit": "

    The maximum number of cloud directories allowed in the region.

    ", + "DirectoryLimits$CloudOnlyDirectoriesCurrentCount": "

    The current number of cloud directories in the region.

    ", + "DirectoryLimits$CloudOnlyMicrosoftADLimit": "

    The maximum number of Microsoft AD directories allowed in the region.

    ", + "DirectoryLimits$CloudOnlyMicrosoftADCurrentCount": "

    The current number of Microsoft AD directories in the region.

    ", + "DirectoryLimits$ConnectedDirectoriesLimit": "

    The maximum number of connected directories allowed in the region.

    ", + "DirectoryLimits$ConnectedDirectoriesCurrentCount": "

    The current number of connected directories in the region.

    ", + "ListTagsForResourceRequest$Limit": "

    Reserved for future use.

    ", + "SnapshotLimits$ManualSnapshotsLimit": "

    The maximum number of manual snapshots allowed.

    ", + "SnapshotLimits$ManualSnapshotsCurrentCount": "

    The current number of manual snapshots of the directory.

    " + } + }, + "ListTagsForResourceRequest": { + "base": null, + "refs": { + } + }, + "ListTagsForResourceResult": { + "base": null, + "refs": { + } + }, + "ManualSnapshotsLimitReached": { + "base": null, + "refs": { + "SnapshotLimits$ManualSnapshotsLimitReached": "

    Indicates if the manual snapshot limit has been reached.

    " + } + }, + "NextToken": { + "base": null, + "refs": { + "DescribeDirectoriesRequest$NextToken": "

    The DescribeDirectoriesResult.NextToken value from a previous call to DescribeDirectories. Pass null if this is the first call.

    ", + "DescribeDirectoriesResult$NextToken": "

    If not null, more results are available. Pass this value for the NextToken parameter in a subsequent call to DescribeDirectories to retrieve the next set of items.

    ", + "DescribeSnapshotsRequest$NextToken": "

    The DescribeSnapshotsResult.NextToken value from a previous call to DescribeSnapshots. Pass null if this is the first call.

    ", + "DescribeSnapshotsResult$NextToken": "

    If not null, more results are available. Pass this value in the NextToken member of a subsequent call to DescribeSnapshots.

    ", + "DescribeTrustsRequest$NextToken": "

    The DescribeTrustsResult.NextToken value from a previous call to DescribeTrusts. Pass null if this is the first call.

    ", + "DescribeTrustsResult$NextToken": "

    If not null, more results are available. Pass this value for the NextToken parameter in a subsequent call to DescribeTrusts to retrieve the next set of items.

    ", + "ListTagsForResourceRequest$NextToken": "

    Reserved for future use.

    ", + "ListTagsForResourceResult$NextToken": "

    Reserved for future use.

    " + } + }, + "OrganizationalUnitDN": { + "base": null, + "refs": { + "CreateComputerRequest$OrganizationalUnitDistinguishedName": "

    The fully-qualified distinguished name of the organizational unit to place the computer account in.

    " + } + }, + "Password": { + "base": null, + "refs": { + "CreateDirectoryRequest$Password": "

    The password for the directory administrator. The directory creation process creates a directory administrator account with the username Administrator and this password.

    ", + "CreateMicrosoftADRequest$Password": "

    The password for the default administrative user named Admin.

    " + } + }, + "PortNumber": { + "base": null, + "refs": { + "RadiusSettings$RadiusPort": "

    The port that your RADIUS server is using for communications. Your on-premises network must allow inbound traffic over this port from the AWS Directory Service servers.

    " + } + }, + "RadiusAuthenticationProtocol": { + "base": null, + "refs": { + "RadiusSettings$AuthenticationProtocol": "

    The protocol specified for your RADIUS endpoints.

    " + } + }, + "RadiusDisplayLabel": { + "base": null, + "refs": { + "RadiusSettings$DisplayLabel": "

    Not currently used.

    " + } + }, + "RadiusRetries": { + "base": null, + "refs": { + "RadiusSettings$RadiusRetries": "

    The maximum number of times that communication with the RADIUS server is attempted.

    " + } + }, + "RadiusSettings": { + "base": "

    Contains information about a Remote Authentication Dial In User Service (RADIUS) server.

    ", + "refs": { + "DirectoryDescription$RadiusSettings": "

    A RadiusSettings object that contains information about the RADIUS server configured for this directory.

    ", + "EnableRadiusRequest$RadiusSettings": "

    A RadiusSettings object that contains information about the RADIUS server.

    ", + "UpdateRadiusRequest$RadiusSettings": "

    A RadiusSettings object that contains information about the RADIUS server.

    " + } + }, + "RadiusSharedSecret": { + "base": null, + "refs": { + "RadiusSettings$SharedSecret": "

    The shared secret code that was specified when your RADIUS endpoints were created.

    " + } + }, + "RadiusStatus": { + "base": null, + "refs": { + "DirectoryDescription$RadiusStatus": "

    The status of the RADIUS MFA server connection.

    " + } + }, + "RadiusTimeout": { + "base": null, + "refs": { + "RadiusSettings$RadiusTimeout": "

    The amount of time, in seconds, to wait for the RADIUS server to respond.

    " + } + }, + "RegisterEventTopicRequest": { + "base": "

    Registers a new event topic.

    ", + "refs": { + } + }, + "RegisterEventTopicResult": { + "base": "

    The result of a RegisterEventTopic request.

    ", + "refs": { + } + }, + "RemoteDomainName": { + "base": null, + "refs": { + "ConditionalForwarder$RemoteDomainName": "

    The fully qualified domain name (FQDN) of the remote domains pointed to by the conditional forwarder.

    ", + "CreateConditionalForwarderRequest$RemoteDomainName": "

    The fully qualified domain name (FQDN) of the remote domain with which you will set up a trust relationship.

    ", + "CreateTrustRequest$RemoteDomainName": "

    The Fully Qualified Domain Name (FQDN) of the external domain for which to create the trust relationship.

    ", + "DeleteConditionalForwarderRequest$RemoteDomainName": "

    The fully qualified domain name (FQDN) of the remote domain with which you are deleting the conditional forwarder.

    ", + "RemoteDomainNames$member": null, + "Trust$RemoteDomainName": "

    The Fully Qualified Domain Name (FQDN) of the external domain involved in the trust relationship.

    ", + "UpdateConditionalForwarderRequest$RemoteDomainName": "

    The fully qualified domain name (FQDN) of the remote domain with which you will set up a trust relationship.

    " + } + }, + "RemoteDomainNames": { + "base": null, + "refs": { + "DescribeConditionalForwardersRequest$RemoteDomainNames": "

    The fully qualified domain names (FQDN) of the remote domains for which to get the list of associated conditional forwarders. If this member is null, all conditional forwarders are returned.

    " + } + }, + "RemoveTagsFromResourceRequest": { + "base": null, + "refs": { + } + }, + "RemoveTagsFromResourceResult": { + "base": null, + "refs": { + } + }, + "ReplicationScope": { + "base": null, + "refs": { + "ConditionalForwarder$ReplicationScope": "

    The replication scope of the conditional forwarder. The only allowed value is Domain, which will replicate the conditional forwarder to all of the domain controllers for your AWS directory.

    " + } + }, + "RequestId": { + "base": "

    The AWS request identifier.

    ", + "refs": { + "AuthenticationFailedException$RequestId": "

    The identifier of the request that caused the exception.

    ", + "ClientException$RequestId": null, + "DirectoryLimitExceededException$RequestId": null, + "DirectoryUnavailableException$RequestId": null, + "EntityAlreadyExistsException$RequestId": null, + "EntityDoesNotExistException$RequestId": null, + "InsufficientPermissionsException$RequestId": null, + "InvalidNextTokenException$RequestId": null, + "InvalidParameterException$RequestId": null, + "ServiceException$RequestId": null, + "SnapshotLimitExceededException$RequestId": null, + "TagLimitExceededException$RequestId": null, + "UnsupportedOperationException$RequestId": null + } + }, + "ResourceId": { + "base": null, + "refs": { + "AddTagsToResourceRequest$ResourceId": "

    The ID of the directory to which to add the tag.

    ", + "ListTagsForResourceRequest$ResourceId": "

    The ID of the directory for which you want to retrieve tags.

    ", + "RemoveTagsFromResourceRequest$ResourceId": "

    The ID of the directory from which to remove the tag.

    " + } + }, + "RestoreFromSnapshotRequest": { + "base": "

    An object representing the inputs for the RestoreFromSnapshot operation.

    ", + "refs": { + } + }, + "RestoreFromSnapshotResult": { + "base": "

    Contains the results of the RestoreFromSnapshot operation.

    ", + "refs": { + } + }, + "SID": { + "base": null, + "refs": { + "Computer$ComputerId": "

    The identifier of the computer.

    " + } + }, + "SecurityGroupId": { + "base": null, + "refs": { + "DirectoryConnectSettingsDescription$SecurityGroupId": "

    The security group identifier for the AD Connector directory.

    ", + "DirectoryVpcSettingsDescription$SecurityGroupId": "

    The security group identifier for the directory. If the directory was created before 8/1/2014, this is the identifier of the directory members security group that was created when the directory was created. If the directory was created after this date, this value is null.

    " + } + }, + "Server": { + "base": null, + "refs": { + "Servers$member": null + } + }, + "Servers": { + "base": null, + "refs": { + "RadiusSettings$RadiusServers": "

    An array of strings that contains the IP addresses of the RADIUS server endpoints, or the IP addresses of your RADIUS server load balancer.

    " + } + }, + "ServiceException": { + "base": "

    An exception has occurred in AWS Directory Service.

    ", + "refs": { + } + }, + "Snapshot": { + "base": "

    Describes a directory snapshot.

    ", + "refs": { + "Snapshots$member": null + } + }, + "SnapshotId": { + "base": null, + "refs": { + "CreateSnapshotResult$SnapshotId": "

    The identifier of the snapshot that was created.

    ", + "DeleteSnapshotRequest$SnapshotId": "

    The identifier of the directory snapshot to be deleted.

    ", + "DeleteSnapshotResult$SnapshotId": "

    The identifier of the directory snapshot that was deleted.

    ", + "RestoreFromSnapshotRequest$SnapshotId": "

    The identifier of the snapshot to restore from.

    ", + "Snapshot$SnapshotId": "

    The snapshot identifier.

    ", + "SnapshotIds$member": null + } + }, + "SnapshotIds": { + "base": "

    A list of directory snapshot identifiers.

    ", + "refs": { + "DescribeSnapshotsRequest$SnapshotIds": "

    A list of identifiers of the snapshots to obtain the information for. If this member is null or empty, all snapshots are returned using the Limit and NextToken members.

    " + } + }, + "SnapshotLimitExceededException": { + "base": "

    The maximum number of manual snapshots for the directory has been reached. You can use the GetSnapshotLimits operation to determine the snapshot limits for a directory.

    ", + "refs": { + } + }, + "SnapshotLimits": { + "base": "

    Contains manual snapshot limit information for a directory.

    ", + "refs": { + "GetSnapshotLimitsResult$SnapshotLimits": "

    A SnapshotLimits object that contains the manual snapshot limits for the specified directory.

    " + } + }, + "SnapshotName": { + "base": null, + "refs": { + "CreateSnapshotRequest$Name": "

    The descriptive name to apply to the snapshot.

    ", + "Snapshot$Name": "

    The descriptive name of the snapshot.

    " + } + }, + "SnapshotStatus": { + "base": null, + "refs": { + "Snapshot$Status": "

    The snapshot status.

    " + } + }, + "SnapshotType": { + "base": null, + "refs": { + "Snapshot$Type": "

    The snapshot type.

    " + } + }, + "Snapshots": { + "base": "

    A list of descriptions of directory snapshots.

    ", + "refs": { + "DescribeSnapshotsResult$Snapshots": "

    The list of Snapshot objects that were retrieved.

    It is possible that this list contains less than the number of items specified in the Limit member of the request. This occurs if there are less than the requested number of items left to retrieve, or if the limitations of the operation have been exceeded.

    " + } + }, + "SsoEnabled": { + "base": null, + "refs": { + "DirectoryDescription$SsoEnabled": "

    Indicates if single-sign on is enabled for the directory. For more information, see EnableSso and DisableSso.

    " + } + }, + "StageReason": { + "base": null, + "refs": { + "DirectoryDescription$StageReason": "

    Additional information about the directory stage.

    " + } + }, + "StartTime": { + "base": null, + "refs": { + "Snapshot$StartTime": "

    The date and time that the snapshot was taken.

    " + } + }, + "StateLastUpdatedDateTime": { + "base": null, + "refs": { + "Trust$StateLastUpdatedDateTime": "

    The date and time that the TrustState was last updated.

    " + } + }, + "SubnetId": { + "base": null, + "refs": { + "SubnetIds$member": null + } + }, + "SubnetIds": { + "base": null, + "refs": { + "DirectoryConnectSettings$SubnetIds": "

    A list of subnet identifiers in the VPC in which the AD Connector is created.

    ", + "DirectoryConnectSettingsDescription$SubnetIds": "

    A list of subnet identifiers in the VPC that the AD connector is in.

    ", + "DirectoryVpcSettings$SubnetIds": "

    The identifiers of the subnets for the directory servers. The two subnets must be in different Availability Zones. AWS Directory Service creates a directory server and a DNS server in each of these subnets.

    ", + "DirectoryVpcSettingsDescription$SubnetIds": "

    The identifiers of the subnets for the directory servers.

    " + } + }, + "Tag": { + "base": "

    Metadata assigned to an Amazon Directory Services directory consisting of a key-value pair.

    ", + "refs": { + "Tags$member": null + } + }, + "TagKey": { + "base": null, + "refs": { + "Tag$Key": "

    A key is the required name of the tag. The string value can be from 1 to 128 Unicode characters in length and cannot be prefixed with \"aws:\". The string can only contain only the set of Unicode letters, digits, white-space, '_', '.', '/', '=', '+', '-' (Java regex: \"^([\\\\p{L}\\\\p{Z}\\\\p{N}_.:/=+\\\\-]*)$\").

    ", + "TagKeys$member": null + } + }, + "TagKeys": { + "base": null, + "refs": { + "RemoveTagsFromResourceRequest$TagKeys": "

    The tag key (name) of the tag to be removed.

    " + } + }, + "TagLimitExceededException": { + "base": "

    The maximum allowed number of tags was exceeded.

    ", + "refs": { + } + }, + "TagValue": { + "base": null, + "refs": { + "Tag$Value": "

    A value is the optional value of the tag. The string value can be from 1 to 256 Unicode characters in length. The string can only contain only the set of Unicode letters, digits, white-space, '_', '.', '/', '=', '+', '-' (Java regex: \"^([\\\\p{L}\\\\p{Z}\\\\p{N}_.:/=+\\\\-]*)$\").

    " + } + }, + "Tags": { + "base": null, + "refs": { + "AddTagsToResourceRequest$Tags": "

    The tags to be assigned to the Amazon Directory Services directory.

    ", + "ListTagsForResourceResult$Tags": "

    List of tags returned by the ListTagsForResource operation.

    " + } + }, + "TopicArn": { + "base": null, + "refs": { + "EventTopic$TopicArn": "

    The SNS topic ARN (Amazon Resource Name).

    " + } + }, + "TopicName": { + "base": null, + "refs": { + "DeregisterEventTopicRequest$TopicName": "

    The name of the SNS topic from which to remove the directory as a publisher.

    ", + "EventTopic$TopicName": "

    The name of an AWS SNS topic the receives status messages from the directory.

    ", + "RegisterEventTopicRequest$TopicName": "

    The SNS topic name to which the directory will publish status messages. This SNS topic must be in the same region as the specified Directory ID.

    ", + "TopicNames$member": null + } + }, + "TopicNames": { + "base": null, + "refs": { + "DescribeEventTopicsRequest$TopicNames": "

    A list of SNS topic names for which to obtain the information. If this member is null, all associations for the specified Directory ID are returned.

    An empty list results in an InvalidParameterException being thrown.

    " + } + }, + "TopicStatus": { + "base": null, + "refs": { + "EventTopic$Status": "

    The topic registration status.

    " + } + }, + "Trust": { + "base": "

    Describes a trust relationship between an Microsoft AD in the AWS cloud and an external domain.

    ", + "refs": { + "Trusts$member": null + } + }, + "TrustDirection": { + "base": null, + "refs": { + "CreateTrustRequest$TrustDirection": "

    The direction of the trust relationship.

    ", + "Trust$TrustDirection": "

    The trust relationship direction.

    " + } + }, + "TrustId": { + "base": null, + "refs": { + "CreateTrustResult$TrustId": "

    A unique identifier for the trust relationship that was created.

    ", + "DeleteTrustRequest$TrustId": "

    The Trust ID of the trust relationship to be deleted.

    ", + "DeleteTrustResult$TrustId": "

    The Trust ID of the trust relationship that was deleted.

    ", + "Trust$TrustId": "

    The unique ID of the trust relationship.

    ", + "TrustIds$member": null, + "VerifyTrustRequest$TrustId": "

    The unique Trust ID of the trust relationship to verify.

    ", + "VerifyTrustResult$TrustId": "

    The unique Trust ID of the trust relationship that was verified.

    " + } + }, + "TrustIds": { + "base": null, + "refs": { + "DescribeTrustsRequest$TrustIds": "

    A list of identifiers of the trust relationships for which to obtain the information. If this member is null, all trust relationships that belong to the current account are returned.

    An empty list results in an InvalidParameterException being thrown.

    " + } + }, + "TrustPassword": { + "base": null, + "refs": { + "CreateTrustRequest$TrustPassword": "

    The trust password. The must be the same password that was used when creating the trust relationship on the external domain.

    " + } + }, + "TrustState": { + "base": null, + "refs": { + "Trust$TrustState": "

    The trust relationship state.

    " + } + }, + "TrustStateReason": { + "base": null, + "refs": { + "Trust$TrustStateReason": "

    The reason for the TrustState.

    " + } + }, + "TrustType": { + "base": null, + "refs": { + "CreateTrustRequest$TrustType": "

    The trust relationship type.

    ", + "Trust$TrustType": "

    The trust relationship type.

    " + } + }, + "Trusts": { + "base": null, + "refs": { + "DescribeTrustsResult$Trusts": "

    The list of Trust objects that were retrieved.

    It is possible that this list contains less than the number of items specified in the Limit member of the request. This occurs if there are less than the requested number of items left to retrieve, or if the limitations of the operation have been exceeded.

    " + } + }, + "UnsupportedOperationException": { + "base": "

    The operation is not supported.

    ", + "refs": { + } + }, + "UpdateConditionalForwarderRequest": { + "base": "

    Updates a conditional forwarder.

    ", + "refs": { + } + }, + "UpdateConditionalForwarderResult": { + "base": "

    The result of an UpdateConditionalForwarder request.

    ", + "refs": { + } + }, + "UpdateRadiusRequest": { + "base": "

    Contains the inputs for the UpdateRadius operation.

    ", + "refs": { + } + }, + "UpdateRadiusResult": { + "base": "

    Contains the results of the UpdateRadius operation.

    ", + "refs": { + } + }, + "UseSameUsername": { + "base": null, + "refs": { + "RadiusSettings$UseSameUsername": "

    Not currently used.

    " + } + }, + "UserName": { + "base": null, + "refs": { + "DirectoryConnectSettings$CustomerUserName": "

    The username of an account in the on-premises directory that is used to connect to the directory. This account must have the following privileges:

    • Read users and groups

    • Create computer objects

    • Join computers to the domain

    ", + "DirectoryConnectSettingsDescription$CustomerUserName": "

    The username of the service account in the on-premises directory.

    ", + "DisableSsoRequest$UserName": "

    The username of an alternate account to use to disable single-sign on. This is only used for AD Connector directories. This account must have privileges to remove a service principal name.

    If the AD Connector service account does not have privileges to remove a service principal name, you can specify an alternate account with the UserName and Password parameters. These credentials are only used to disable single sign-on and are not stored by the service. The AD Connector service account is not changed.

    ", + "EnableSsoRequest$UserName": "

    The username of an alternate account to use to enable single-sign on. This is only used for AD Connector directories. This account must have privileges to add a service principal name.

    If the AD Connector service account does not have privileges to add a service principal name, you can specify an alternate account with the UserName and Password parameters. These credentials are only used to enable single sign-on and are not stored by the service. The AD Connector service account is not changed.

    " + } + }, + "VerifyTrustRequest": { + "base": "

    Initiates the verification of an existing trust relationship between a Microsoft AD in the AWS cloud and an external domain.

    ", + "refs": { + } + }, + "VerifyTrustResult": { + "base": "

    Result of a VerifyTrust request.

    ", + "refs": { + } + }, + "VpcId": { + "base": null, + "refs": { + "DirectoryConnectSettings$VpcId": "

    The identifier of the VPC in which the AD Connector is created.

    ", + "DirectoryConnectSettingsDescription$VpcId": "

    The identifier of the VPC that the AD Connector is in.

    ", + "DirectoryVpcSettings$VpcId": "

    The identifier of the VPC in which to create the directory.

    ", + "DirectoryVpcSettingsDescription$VpcId": "

    The identifier of the VPC that the directory is in.

    " + } + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/ds/2015-04-16/examples-1.json b/vendor/github.com/aws/aws-sdk-go/models/apis/ds/2015-04-16/examples-1.json new file mode 100644 index 000000000..0ea7e3b0b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/ds/2015-04-16/examples-1.json @@ -0,0 +1,5 @@ +{ + "version": "1.0", + "examples": { + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/dynamodb/2011-12-05/api-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/dynamodb/2011-12-05/api-2.json new file mode 100644 index 000000000..89de839f0 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/dynamodb/2011-12-05/api-2.json @@ -0,0 +1,801 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2011-12-05", + "endpointPrefix":"dynamodb", + "jsonVersion":"1.0", + "protocol":"json", + "serviceAbbreviation":"DynamoDB", + "serviceFullName":"Amazon DynamoDB", + "signatureVersion":"v4", + "targetPrefix":"DynamoDB_20111205" + }, + "operations":{ + "BatchGetItem":{ + "name":"BatchGetItem", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"BatchGetItemInput"}, + "output":{"shape":"BatchGetItemOutput"}, + "errors":[ + {"shape":"ProvisionedThroughputExceededException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerError"} + ] + }, + "BatchWriteItem":{ + "name":"BatchWriteItem", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"BatchWriteItemInput"}, + "output":{"shape":"BatchWriteItemOutput"}, + "errors":[ + {"shape":"ProvisionedThroughputExceededException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"LimitExceededException"}, + {"shape":"InternalServerError"} + ] + }, + "CreateTable":{ + "name":"CreateTable", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateTableInput"}, + "output":{"shape":"CreateTableOutput"}, + "errors":[ + {"shape":"ResourceInUseException"}, + {"shape":"LimitExceededException"}, + {"shape":"InternalServerError"} + ] + }, + "DeleteItem":{ + "name":"DeleteItem", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteItemInput"}, + "output":{"shape":"DeleteItemOutput"}, + "errors":[ + {"shape":"ConditionalCheckFailedException"}, + {"shape":"ProvisionedThroughputExceededException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"LimitExceededException"}, + {"shape":"InternalServerError"} + ] + }, + "DeleteTable":{ + "name":"DeleteTable", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteTableInput"}, + "output":{"shape":"DeleteTableOutput"}, + "errors":[ + {"shape":"ResourceInUseException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"LimitExceededException"}, + {"shape":"InternalServerError"} + ] + }, + "DescribeTable":{ + "name":"DescribeTable", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeTableInput"}, + "output":{"shape":"DescribeTableOutput"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerError"} + ] + }, + "GetItem":{ + "name":"GetItem", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetItemInput"}, + "output":{"shape":"GetItemOutput"}, + "errors":[ + {"shape":"ProvisionedThroughputExceededException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerError"} + ] + }, + "ListTables":{ + "name":"ListTables", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListTablesInput"}, + "output":{"shape":"ListTablesOutput"}, + "errors":[ + {"shape":"InternalServerError"} + ] + }, + "PutItem":{ + "name":"PutItem", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PutItemInput"}, + "output":{"shape":"PutItemOutput"}, + "errors":[ + {"shape":"ConditionalCheckFailedException"}, + {"shape":"ProvisionedThroughputExceededException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"LimitExceededException"}, + {"shape":"InternalServerError"} + ] + }, + "Query":{ + "name":"Query", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"QueryInput"}, + "output":{"shape":"QueryOutput"}, + "errors":[ + {"shape":"ProvisionedThroughputExceededException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerError"} + ] + }, + "Scan":{ + "name":"Scan", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ScanInput"}, + "output":{"shape":"ScanOutput"}, + "errors":[ + {"shape":"ProvisionedThroughputExceededException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerError"} + ] + }, + "UpdateItem":{ + "name":"UpdateItem", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateItemInput"}, + "output":{"shape":"UpdateItemOutput"}, + "errors":[ + {"shape":"ConditionalCheckFailedException"}, + {"shape":"ProvisionedThroughputExceededException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"LimitExceededException"}, + {"shape":"InternalServerError"} + ] + }, + "UpdateTable":{ + "name":"UpdateTable", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateTableInput"}, + "output":{"shape":"UpdateTableOutput"}, + "errors":[ + {"shape":"ResourceInUseException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"LimitExceededException"}, + {"shape":"InternalServerError"} + ] + } + }, + "shapes":{ + "AttributeAction":{ + "type":"string", + "enum":[ + "ADD", + "PUT", + "DELETE" + ] + }, + "AttributeMap":{ + "type":"map", + "key":{"shape":"AttributeName"}, + "value":{"shape":"AttributeValue"} + }, + "AttributeName":{ + "type":"string", + "max":65535 + }, + "AttributeNameList":{ + "type":"list", + "member":{"shape":"AttributeName"}, + "min":1 + }, + "AttributeUpdates":{ + "type":"map", + "key":{"shape":"AttributeName"}, + "value":{"shape":"AttributeValueUpdate"} + }, + "AttributeValue":{ + "type":"structure", + "members":{ + "S":{"shape":"StringAttributeValue"}, + "N":{"shape":"NumberAttributeValue"}, + "B":{"shape":"BinaryAttributeValue"}, + "SS":{"shape":"StringSetAttributeValue"}, + "NS":{"shape":"NumberSetAttributeValue"}, + "BS":{"shape":"BinarySetAttributeValue"} + } + }, + "AttributeValueList":{ + "type":"list", + "member":{"shape":"AttributeValue"} + }, + "AttributeValueUpdate":{ + "type":"structure", + "members":{ + "Value":{"shape":"AttributeValue"}, + "Action":{"shape":"AttributeAction"} + } + }, + "BatchGetItemInput":{ + "type":"structure", + "required":["RequestItems"], + "members":{ + "RequestItems":{"shape":"BatchGetRequestMap"} + } + }, + "BatchGetItemOutput":{ + "type":"structure", + "members":{ + "Responses":{"shape":"BatchGetResponseMap"}, + "UnprocessedKeys":{"shape":"BatchGetRequestMap"} + } + }, + "BatchGetRequestMap":{ + "type":"map", + "key":{"shape":"TableName"}, + "value":{"shape":"KeysAndAttributes"}, + "max":100, + "min":1 + }, + "BatchGetResponseMap":{ + "type":"map", + "key":{"shape":"TableName"}, + "value":{"shape":"BatchResponse"} + }, + "BatchResponse":{ + "type":"structure", + "members":{ + "Items":{"shape":"ItemList"}, + "ConsumedCapacityUnits":{"shape":"ConsumedCapacityUnits"} + } + }, + "BatchWriteItemInput":{ + "type":"structure", + "required":["RequestItems"], + "members":{ + "RequestItems":{"shape":"BatchWriteItemRequestMap"} + } + }, + "BatchWriteItemOutput":{ + "type":"structure", + "members":{ + "Responses":{"shape":"BatchWriteResponseMap"}, + "UnprocessedItems":{"shape":"BatchWriteItemRequestMap"} + } + }, + "BatchWriteItemRequestMap":{ + "type":"map", + "key":{"shape":"TableName"}, + "value":{"shape":"WriteRequests"}, + "max":25, + "min":1 + }, + "BatchWriteResponse":{ + "type":"structure", + "members":{ + "ConsumedCapacityUnits":{"shape":"ConsumedCapacityUnits"} + } + }, + "BatchWriteResponseMap":{ + "type":"map", + "key":{"shape":"TableName"}, + "value":{"shape":"BatchWriteResponse"} + }, + "BinaryAttributeValue":{"type":"blob"}, + "BinarySetAttributeValue":{ + "type":"list", + "member":{"shape":"BinaryAttributeValue"} + }, + "BooleanObject":{"type":"boolean"}, + "ComparisonOperator":{ + "type":"string", + "enum":[ + "EQ", + "NE", + "IN", + "LE", + "LT", + "GE", + "GT", + "BETWEEN", + "NOT_NULL", + "NULL", + "CONTAINS", + "NOT_CONTAINS", + "BEGINS_WITH" + ] + }, + "Condition":{ + "type":"structure", + "required":["ComparisonOperator"], + "members":{ + "AttributeValueList":{"shape":"AttributeValueList"}, + "ComparisonOperator":{"shape":"ComparisonOperator"} + } + }, + "ConditionalCheckFailedException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "exception":true + }, + "ConsistentRead":{"type":"boolean"}, + "ConsumedCapacityUnits":{"type":"double"}, + "CreateTableInput":{ + "type":"structure", + "required":[ + "TableName", + "KeySchema", + "ProvisionedThroughput" + ], + "members":{ + "TableName":{"shape":"TableName"}, + "KeySchema":{"shape":"KeySchema"}, + "ProvisionedThroughput":{"shape":"ProvisionedThroughput"} + } + }, + "CreateTableOutput":{ + "type":"structure", + "members":{ + "TableDescription":{"shape":"TableDescription"} + } + }, + "Date":{"type":"timestamp"}, + "DeleteItemInput":{ + "type":"structure", + "required":[ + "TableName", + "Key" + ], + "members":{ + "TableName":{"shape":"TableName"}, + "Key":{"shape":"Key"}, + "Expected":{"shape":"ExpectedAttributeMap"}, + "ReturnValues":{"shape":"ReturnValue"} + } + }, + "DeleteItemOutput":{ + "type":"structure", + "members":{ + "Attributes":{"shape":"AttributeMap"}, + "ConsumedCapacityUnits":{"shape":"ConsumedCapacityUnits"} + } + }, + "DeleteRequest":{ + "type":"structure", + "required":["Key"], + "members":{ + "Key":{"shape":"Key"} + } + }, + "DeleteTableInput":{ + "type":"structure", + "required":["TableName"], + "members":{ + "TableName":{"shape":"TableName"} + } + }, + "DeleteTableOutput":{ + "type":"structure", + "members":{ + "TableDescription":{"shape":"TableDescription"} + } + }, + "DescribeTableInput":{ + "type":"structure", + "required":["TableName"], + "members":{ + "TableName":{"shape":"TableName"} + } + }, + "DescribeTableOutput":{ + "type":"structure", + "members":{ + "Table":{"shape":"TableDescription"} + } + }, + "ErrorMessage":{"type":"string"}, + "ExpectedAttributeMap":{ + "type":"map", + "key":{"shape":"AttributeName"}, + "value":{"shape":"ExpectedAttributeValue"} + }, + "ExpectedAttributeValue":{ + "type":"structure", + "members":{ + "Value":{"shape":"AttributeValue"}, + "Exists":{"shape":"BooleanObject"} + } + }, + "FilterConditionMap":{ + "type":"map", + "key":{"shape":"String"}, + "value":{"shape":"Condition"} + }, + "GetItemInput":{ + "type":"structure", + "required":[ + "TableName", + "Key" + ], + "members":{ + "TableName":{"shape":"TableName"}, + "Key":{"shape":"Key"}, + "AttributesToGet":{"shape":"AttributeNameList"}, + "ConsistentRead":{"shape":"ConsistentRead"} + } + }, + "GetItemOutput":{ + "type":"structure", + "members":{ + "Item":{"shape":"AttributeMap"}, + "ConsumedCapacityUnits":{"shape":"ConsumedCapacityUnits"} + } + }, + "Integer":{"type":"integer"}, + "InternalServerError":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "exception":true, + "fault":true + }, + "ItemList":{ + "type":"list", + "member":{"shape":"AttributeMap"} + }, + "Key":{ + "type":"structure", + "required":["HashKeyElement"], + "members":{ + "HashKeyElement":{"shape":"AttributeValue"}, + "RangeKeyElement":{"shape":"AttributeValue"} + } + }, + "KeyList":{ + "type":"list", + "member":{"shape":"Key"}, + "max":100, + "min":1 + }, + "KeySchema":{ + "type":"structure", + "required":["HashKeyElement"], + "members":{ + "HashKeyElement":{"shape":"KeySchemaElement"}, + "RangeKeyElement":{"shape":"KeySchemaElement"} + } + }, + "KeySchemaAttributeName":{ + "type":"string", + "max":255, + "min":1 + }, + "KeySchemaElement":{ + "type":"structure", + "required":[ + "AttributeName", + "AttributeType" + ], + "members":{ + "AttributeName":{"shape":"KeySchemaAttributeName"}, + "AttributeType":{"shape":"ScalarAttributeType"} + } + }, + "KeysAndAttributes":{ + "type":"structure", + "required":["Keys"], + "members":{ + "Keys":{"shape":"KeyList"}, + "AttributesToGet":{"shape":"AttributeNameList"}, + "ConsistentRead":{"shape":"ConsistentRead"} + } + }, + "LimitExceededException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "exception":true + }, + "ListTablesInput":{ + "type":"structure", + "members":{ + "ExclusiveStartTableName":{"shape":"TableName"}, + "Limit":{"shape":"ListTablesInputLimit"} + } + }, + "ListTablesInputLimit":{ + "type":"integer", + "max":100, + "min":1 + }, + "ListTablesOutput":{ + "type":"structure", + "members":{ + "TableNames":{"shape":"TableNameList"}, + "LastEvaluatedTableName":{"shape":"TableName"} + } + }, + "Long":{"type":"long"}, + "NumberAttributeValue":{"type":"string"}, + "NumberSetAttributeValue":{ + "type":"list", + "member":{"shape":"NumberAttributeValue"} + }, + "PositiveIntegerObject":{ + "type":"integer", + "min":1 + }, + "PositiveLongObject":{ + "type":"long", + "min":1 + }, + "ProvisionedThroughput":{ + "type":"structure", + "required":[ + "ReadCapacityUnits", + "WriteCapacityUnits" + ], + "members":{ + "ReadCapacityUnits":{"shape":"PositiveLongObject"}, + "WriteCapacityUnits":{"shape":"PositiveLongObject"} + } + }, + "ProvisionedThroughputDescription":{ + "type":"structure", + "members":{ + "LastIncreaseDateTime":{"shape":"Date"}, + "LastDecreaseDateTime":{"shape":"Date"}, + "NumberOfDecreasesToday":{"shape":"PositiveLongObject"}, + "ReadCapacityUnits":{"shape":"PositiveLongObject"}, + "WriteCapacityUnits":{"shape":"PositiveLongObject"} + } + }, + "ProvisionedThroughputExceededException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "exception":true + }, + "PutItemInput":{ + "type":"structure", + "required":[ + "TableName", + "Item" + ], + "members":{ + "TableName":{"shape":"TableName"}, + "Item":{"shape":"PutItemInputAttributeMap"}, + "Expected":{"shape":"ExpectedAttributeMap"}, + "ReturnValues":{"shape":"ReturnValue"} + } + }, + "PutItemInputAttributeMap":{ + "type":"map", + "key":{"shape":"AttributeName"}, + "value":{"shape":"AttributeValue"} + }, + "PutItemOutput":{ + "type":"structure", + "members":{ + "Attributes":{"shape":"AttributeMap"}, + "ConsumedCapacityUnits":{"shape":"ConsumedCapacityUnits"} + } + }, + "PutRequest":{ + "type":"structure", + "required":["Item"], + "members":{ + "Item":{"shape":"PutItemInputAttributeMap"} + } + }, + "QueryInput":{ + "type":"structure", + "required":[ + "TableName", + "HashKeyValue" + ], + "members":{ + "TableName":{"shape":"TableName"}, + "AttributesToGet":{"shape":"AttributeNameList"}, + "Limit":{"shape":"PositiveIntegerObject"}, + "ConsistentRead":{"shape":"ConsistentRead"}, + "Count":{"shape":"BooleanObject"}, + "HashKeyValue":{"shape":"AttributeValue"}, + "RangeKeyCondition":{"shape":"Condition"}, + "ScanIndexForward":{"shape":"BooleanObject"}, + "ExclusiveStartKey":{"shape":"Key"} + } + }, + "QueryOutput":{ + "type":"structure", + "members":{ + "Items":{"shape":"ItemList"}, + "Count":{"shape":"Integer"}, + "LastEvaluatedKey":{"shape":"Key"}, + "ConsumedCapacityUnits":{"shape":"ConsumedCapacityUnits"} + } + }, + "ResourceInUseException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "exception":true + }, + "ResourceNotFoundException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "exception":true + }, + "ReturnValue":{ + "type":"string", + "enum":[ + "NONE", + "ALL_OLD", + "UPDATED_OLD", + "ALL_NEW", + "UPDATED_NEW" + ] + }, + "ScalarAttributeType":{ + "type":"string", + "enum":[ + "S", + "N", + "B" + ] + }, + "ScanInput":{ + "type":"structure", + "required":["TableName"], + "members":{ + "TableName":{"shape":"TableName"}, + "AttributesToGet":{"shape":"AttributeNameList"}, + "Limit":{"shape":"PositiveIntegerObject"}, + "Count":{"shape":"BooleanObject"}, + "ScanFilter":{"shape":"FilterConditionMap"}, + "ExclusiveStartKey":{"shape":"Key"} + } + }, + "ScanOutput":{ + "type":"structure", + "members":{ + "Items":{"shape":"ItemList"}, + "Count":{"shape":"Integer"}, + "ScannedCount":{"shape":"Integer"}, + "LastEvaluatedKey":{"shape":"Key"}, + "ConsumedCapacityUnits":{"shape":"ConsumedCapacityUnits"} + } + }, + "String":{"type":"string"}, + "StringAttributeValue":{"type":"string"}, + "StringSetAttributeValue":{ + "type":"list", + "member":{"shape":"StringAttributeValue"} + }, + "TableDescription":{ + "type":"structure", + "members":{ + "TableName":{"shape":"TableName"}, + "KeySchema":{"shape":"KeySchema"}, + "TableStatus":{"shape":"TableStatus"}, + "CreationDateTime":{"shape":"Date"}, + "ProvisionedThroughput":{"shape":"ProvisionedThroughputDescription"}, + "TableSizeBytes":{"shape":"Long"}, + "ItemCount":{"shape":"Long"} + } + }, + "TableName":{ + "type":"string", + "max":255, + "min":3, + "pattern":"[a-zA-Z0-9_.-]+" + }, + "TableNameList":{ + "type":"list", + "member":{"shape":"TableName"} + }, + "TableStatus":{ + "type":"string", + "enum":[ + "CREATING", + "UPDATING", + "DELETING", + "ACTIVE" + ] + }, + "UpdateItemInput":{ + "type":"structure", + "required":[ + "TableName", + "Key", + "AttributeUpdates" + ], + "members":{ + "TableName":{"shape":"TableName"}, + "Key":{"shape":"Key"}, + "AttributeUpdates":{"shape":"AttributeUpdates"}, + "Expected":{"shape":"ExpectedAttributeMap"}, + "ReturnValues":{"shape":"ReturnValue"} + } + }, + "UpdateItemOutput":{ + "type":"structure", + "members":{ + "Attributes":{"shape":"AttributeMap"}, + "ConsumedCapacityUnits":{"shape":"ConsumedCapacityUnits"} + } + }, + "UpdateTableInput":{ + "type":"structure", + "required":[ + "TableName", + "ProvisionedThroughput" + ], + "members":{ + "TableName":{"shape":"TableName"}, + "ProvisionedThroughput":{"shape":"ProvisionedThroughput"} + } + }, + "UpdateTableOutput":{ + "type":"structure", + "members":{ + "TableDescription":{"shape":"TableDescription"} + } + }, + "WriteRequest":{ + "type":"structure", + "members":{ + "PutRequest":{"shape":"PutRequest"}, + "DeleteRequest":{"shape":"DeleteRequest"} + } + }, + "WriteRequests":{ + "type":"list", + "member":{"shape":"WriteRequest"}, + "max":25, + "min":1 + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/dynamodb/2011-12-05/docs-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/dynamodb/2011-12-05/docs-2.json new file mode 100644 index 000000000..80242d9ee --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/dynamodb/2011-12-05/docs-2.json @@ -0,0 +1,606 @@ +{ + "version": "2.0", + "service": "

    Amazon DynamoDB is a fast, highly scalable, highly available, cost-effective non-relational database service.

    Amazon DynamoDB removes traditional scalability limitations on data storage while maintaining low latency and predictable performance.

    ", + "operations": { + "BatchGetItem": "

    Retrieves the attributes for multiple items from multiple tables using their primary keys.

    The maximum number of item attributes that can be retrieved for a single operation is 100. Also, the number of items retrieved is constrained by a 1 MB the size limit. If the response size limit is exceeded or a partial result is returned due to an internal processing failure, Amazon DynamoDB returns an UnprocessedKeys value so you can retry the operation starting with the next item to get.

    Amazon DynamoDB automatically adjusts the number of items returned per page to enforce this limit. For example, even if you ask to retrieve 100 items, but each individual item is 50k in size, the system returns 20 items and an appropriate UnprocessedKeys value so you can get the next page of results. If necessary, your application needs its own logic to assemble the pages of results into one set.

    ", + "BatchWriteItem": "

    Allows to execute a batch of Put and/or Delete Requests for many tables in a single call. A total of 25 requests are allowed.

    There are no transaction guarantees provided by this API. It does not allow conditional puts nor does it support return values.

    ", + "CreateTable": "

    Adds a new table to your account.

    The table name must be unique among those associated with the AWS Account issuing the request, and the AWS Region that receives the request (e.g. us-east-1).

    The CreateTable operation triggers an asynchronous workflow to begin creating the table. Amazon DynamoDB immediately returns the state of the table (CREATING) until the table is in the ACTIVE state. Once the table is in the ACTIVE state, you can perform data plane operations.

    ", + "DeleteItem": "

    Deletes a single item in a table by primary key.

    You can perform a conditional delete operation that deletes the item if it exists, or if it has an expected attribute value.

    ", + "DeleteTable": "

    Deletes a table and all of its items.

    If the table is in the ACTIVE state, you can delete it. If a table is in CREATING or UPDATING states then Amazon DynamoDB returns a ResourceInUseException. If the specified table does not exist, Amazon DynamoDB returns a ResourceNotFoundException.

    ", + "DescribeTable": "

    Retrieves information about the table, including the current status of the table, the primary key schema and when the table was created.

    If the table does not exist, Amazon DynamoDB returns a ResourceNotFoundException.

    ", + "GetItem": "

    Retrieves a set of Attributes for an item that matches the primary key.

    The GetItem operation provides an eventually-consistent read by default. If eventually-consistent reads are not acceptable for your application, use ConsistentRead. Although this operation might take longer than a standard read, it always returns the last updated value.

    ", + "ListTables": "

    Retrieves a paginated list of table names created by the AWS Account of the caller in the AWS Region (e.g. us-east-1).

    ", + "PutItem": "

    Creates a new item, or replaces an old item with a new item (including all the attributes).

    If an item already exists in the specified table with the same primary key, the new item completely replaces the existing item. You can perform a conditional put (insert a new item if one with the specified primary key doesn't exist), or replace an existing item if it has certain attribute values.

    ", + "Query": "

    Gets the values of one or more items and its attributes by primary key (composite primary key, only).

    Narrow the scope of the query using comparison operators on the RangeKeyValue of the composite key. Use the ScanIndexForward parameter to get results in forward or reverse order by range key.

    ", + "Scan": "

    Retrieves one or more items and its attributes by performing a full scan of a table.

    Provide a ScanFilter to get more specific results.

    ", + "UpdateItem": "

    Edits an existing item's attributes.

    You can perform a conditional update (insert a new attribute name-value pair if it doesn't exist, or replace an existing name-value pair if it has certain expected attribute values).

    ", + "UpdateTable": "

    Updates the provisioned throughput for the given table.

    Setting the throughput for a table helps you manage performance and is part of the Provisioned Throughput feature of Amazon DynamoDB.

    " + }, + "shapes": { + "AttributeAction": { + "base": "

    The type of action for an item update operation. Only use the add action for numbers or sets; the specified value is added to the existing value. If a set of values is specified, the values are added to the existing set. Adds the specified attribute. If the attribute exists, it is replaced by the new value. If no value is specified, this removes the attribute and its value. If a set of values is specified, then the values in the specified set are removed from the old set.

    ", + "refs": { + "AttributeValueUpdate$Action": null + } + }, + "AttributeMap": { + "base": null, + "refs": { + "DeleteItemOutput$Attributes": "

    If the ReturnValues parameter is provided as ALL_OLD in the request, Amazon DynamoDB returns an array of attribute name-value pairs (essentially, the deleted item). Otherwise, the response contains an empty set.

    ", + "GetItemOutput$Item": "

    Contains the requested attributes.

    ", + "ItemList$member": null, + "PutItemOutput$Attributes": "

    Attribute values before the put operation, but only if the ReturnValues parameter is specified as ALL_OLD in the request.

    ", + "UpdateItemOutput$Attributes": "

    A map of attribute name-value pairs, but only if the ReturnValues parameter is specified as something other than NONE in the request.

    " + } + }, + "AttributeName": { + "base": null, + "refs": { + "AttributeMap$key": null, + "AttributeNameList$member": null, + "AttributeUpdates$key": null, + "ExpectedAttributeMap$key": null, + "PutItemInputAttributeMap$key": null + } + }, + "AttributeNameList": { + "base": "

    List of Attribute names. If attribute names are not specified then all attributes will be returned. If some attributes are not found, they will not appear in the result.

    ", + "refs": { + "GetItemInput$AttributesToGet": null, + "KeysAndAttributes$AttributesToGet": null, + "QueryInput$AttributesToGet": null, + "ScanInput$AttributesToGet": null + } + }, + "AttributeUpdates": { + "base": "

    Map of attribute name to the new value and action for the update. The attribute names specify the attributes to modify, and cannot contain any primary key attributes.

    ", + "refs": { + "UpdateItemInput$AttributeUpdates": null + } + }, + "AttributeValue": { + "base": "

    AttributeValue can be String, Number, Binary, StringSet, NumberSet, BinarySet.

    ", + "refs": { + "AttributeMap$value": null, + "AttributeValueList$member": null, + "AttributeValueUpdate$Value": null, + "ExpectedAttributeValue$Value": "

    Specify whether or not a value already exists and has a specific content for the attribute name-value pair.

    ", + "Key$HashKeyElement": "

    A hash key element is treated as the primary key, and can be a string or a number. Single attribute primary keys have one index value. The value can be String, Number, StringSet, NumberSet.

    ", + "Key$RangeKeyElement": "

    A range key element is treated as a secondary key (used in conjunction with the primary key), and can be a string or a number, and is only used for hash-and-range primary keys. The value can be String, Number, StringSet, NumberSet.

    ", + "PutItemInputAttributeMap$value": null, + "QueryInput$HashKeyValue": "

    Attribute value of the hash component of the composite primary key.

    " + } + }, + "AttributeValueList": { + "base": "

    A list of attribute values to be used with a comparison operator for a scan or query operation. For comparisons that require more than one value, such as a BETWEEN comparison, the AttributeValueList contains two attribute values and the comparison operator.

    ", + "refs": { + "Condition$AttributeValueList": null + } + }, + "AttributeValueUpdate": { + "base": "

    Specifies the attribute to update and how to perform the update. Possible values: PUT (default), ADD or DELETE.

    ", + "refs": { + "AttributeUpdates$value": null + } + }, + "BatchGetItemInput": { + "base": null, + "refs": { + } + }, + "BatchGetItemOutput": { + "base": null, + "refs": { + } + }, + "BatchGetRequestMap": { + "base": "

    A map of the table name and corresponding items to get by primary key. While requesting items, each table name can be invoked only once per operation.

    ", + "refs": { + "BatchGetItemInput$RequestItems": null, + "BatchGetItemOutput$UnprocessedKeys": "

    Contains a map of tables and their respective keys that were not processed with the current response, possibly due to reaching a limit on the response size. The UnprocessedKeys value is in the same form as a RequestItems parameter (so the value can be provided directly to a subsequent BatchGetItem operation). For more information, see the above RequestItems parameter.

    " + } + }, + "BatchGetResponseMap": { + "base": "

    Table names and the respective item attributes from the tables.

    ", + "refs": { + "BatchGetItemOutput$Responses": null + } + }, + "BatchResponse": { + "base": "

    The item attributes from a response in a specific table, along with the read resources consumed on the table during the request.

    ", + "refs": { + "BatchGetResponseMap$value": null + } + }, + "BatchWriteItemInput": { + "base": null, + "refs": { + } + }, + "BatchWriteItemOutput": { + "base": "

    A container for BatchWriteItem response

    ", + "refs": { + } + }, + "BatchWriteItemRequestMap": { + "base": "

    A map of table name to list-of-write-requests.

    Key: The table name corresponding to the list of requests

    Value: Essentially a list of request items. Each request item could contain either a PutRequest or DeleteRequest. Never both.

    ", + "refs": { + "BatchWriteItemInput$RequestItems": "

    A map of table name to list-of-write-requests. Used as input to the BatchWriteItem API call

    ", + "BatchWriteItemOutput$UnprocessedItems": "

    The Items which we could not successfully process in a BatchWriteItem call is returned as UnprocessedItems

    " + } + }, + "BatchWriteResponse": { + "base": null, + "refs": { + "BatchWriteResponseMap$value": null + } + }, + "BatchWriteResponseMap": { + "base": null, + "refs": { + "BatchWriteItemOutput$Responses": "

    The response object as a result of BatchWriteItem call. This is essentially a map of table name to ConsumedCapacityUnits.

    " + } + }, + "BinaryAttributeValue": { + "base": null, + "refs": { + "AttributeValue$B": "

    Binary attributes are sequences of unsigned bytes.

    ", + "BinarySetAttributeValue$member": null + } + }, + "BinarySetAttributeValue": { + "base": null, + "refs": { + "AttributeValue$BS": "

    A set of binary attributes.

    " + } + }, + "BooleanObject": { + "base": null, + "refs": { + "ExpectedAttributeValue$Exists": "

    Specify whether or not a value already exists for the attribute name-value pair.

    ", + "QueryInput$Count": "

    If set to true, Amazon DynamoDB returns a total number of items that match the query parameters, instead of a list of the matching items and their attributes. Do not set Count to true while providing a list of AttributesToGet, otherwise Amazon DynamoDB returns a validation error.

    ", + "QueryInput$ScanIndexForward": "

    Specifies forward or backward traversal of the index. Amazon DynamoDB returns results reflecting the requested order, determined by the range key. The default value is true (forward).

    ", + "ScanInput$Count": "

    If set to true, Amazon DynamoDB returns a total number of items for the Scan operation, even if the operation has no matching items for the assigned filter. Do not set Count to true while providing a list of AttributesToGet, otherwise Amazon DynamoDB returns a validation error.

    " + } + }, + "ComparisonOperator": { + "base": "

    A comparison operator is an enumeration of several operations:

    • EQ for equal.
    • NE for not equal.
    • IN checks for exact matches.
    • LE for less than or equal to.
    • LT for less than.
    • GE for greater than or equal to.
    • GT for greater than.
    • BETWEEN for between.
    • NOT_NULL for exists.
    • NULL for not exists.
    • CONTAINS for substring or value in a set.
    • NOT_CONTAINS for absence of a substring or absence of a value in a set.
    • BEGINS_WITH for a substring prefix.

    Scan operations support all available comparison operators.

    Query operations support a subset of the available comparison operators: EQ, LE, LT, GE, GT, BETWEEN, and BEGINS_WITH.

    ", + "refs": { + "Condition$ComparisonOperator": null + } + }, + "Condition": { + "base": null, + "refs": { + "FilterConditionMap$value": null, + "QueryInput$RangeKeyCondition": "

    A container for the attribute values and comparison operators to use for the query.

    " + } + }, + "ConditionalCheckFailedException": { + "base": "

    This exception is thrown when an expected value does not match what was found in the system.

    ", + "refs": { + } + }, + "ConsistentRead": { + "base": "

    If set to true, then a consistent read is issued. Otherwise eventually-consistent is used.

    ", + "refs": { + "GetItemInput$ConsistentRead": null, + "KeysAndAttributes$ConsistentRead": null, + "QueryInput$ConsistentRead": null + } + }, + "ConsumedCapacityUnits": { + "base": "

    The number of Capacity Units of the provisioned throughput of the table consumed during the operation. GetItem, BatchGetItem, BatchWriteItem, Query, and Scan operations consume ReadCapacityUnits, while PutItem, UpdateItem, and DeleteItem operations consume WriteCapacityUnits.

    ", + "refs": { + "BatchResponse$ConsumedCapacityUnits": null, + "BatchWriteResponse$ConsumedCapacityUnits": null, + "DeleteItemOutput$ConsumedCapacityUnits": null, + "GetItemOutput$ConsumedCapacityUnits": null, + "PutItemOutput$ConsumedCapacityUnits": null, + "QueryOutput$ConsumedCapacityUnits": null, + "ScanOutput$ConsumedCapacityUnits": null, + "UpdateItemOutput$ConsumedCapacityUnits": null + } + }, + "CreateTableInput": { + "base": null, + "refs": { + } + }, + "CreateTableOutput": { + "base": null, + "refs": { + } + }, + "Date": { + "base": null, + "refs": { + "ProvisionedThroughputDescription$LastIncreaseDateTime": null, + "ProvisionedThroughputDescription$LastDecreaseDateTime": null, + "TableDescription$CreationDateTime": null + } + }, + "DeleteItemInput": { + "base": null, + "refs": { + } + }, + "DeleteItemOutput": { + "base": null, + "refs": { + } + }, + "DeleteRequest": { + "base": "

    A container for a Delete BatchWrite request

    ", + "refs": { + "WriteRequest$DeleteRequest": null + } + }, + "DeleteTableInput": { + "base": null, + "refs": { + } + }, + "DeleteTableOutput": { + "base": null, + "refs": { + } + }, + "DescribeTableInput": { + "base": null, + "refs": { + } + }, + "DescribeTableOutput": { + "base": null, + "refs": { + } + }, + "ErrorMessage": { + "base": null, + "refs": { + "ConditionalCheckFailedException$message": null, + "InternalServerError$message": null, + "LimitExceededException$message": null, + "ProvisionedThroughputExceededException$message": null, + "ResourceInUseException$message": null, + "ResourceNotFoundException$message": null + } + }, + "ExpectedAttributeMap": { + "base": "

    Designates an attribute for a conditional modification. The Expected parameter allows you to provide an attribute name, and whether or not Amazon DynamoDB should check to see if the attribute has a particular value before modifying it.

    ", + "refs": { + "DeleteItemInput$Expected": null, + "PutItemInput$Expected": null, + "UpdateItemInput$Expected": null + } + }, + "ExpectedAttributeValue": { + "base": "

    Allows you to provide an attribute name, and whether or not Amazon DynamoDB should check to see if the attribute value already exists; or if the attribute value exists and has a particular value before changing it.

    ", + "refs": { + "ExpectedAttributeMap$value": null + } + }, + "FilterConditionMap": { + "base": null, + "refs": { + "ScanInput$ScanFilter": "

    Evaluates the scan results and returns only the desired values.

    " + } + }, + "GetItemInput": { + "base": null, + "refs": { + } + }, + "GetItemOutput": { + "base": null, + "refs": { + } + }, + "Integer": { + "base": null, + "refs": { + "QueryOutput$Count": "

    Number of items in the response.

    ", + "ScanOutput$Count": "

    Number of items in the response.

    ", + "ScanOutput$ScannedCount": "

    Number of items in the complete scan before any filters are applied. A high ScannedCount value with few, or no, Count results indicates an inefficient Scan operation.

    " + } + }, + "InternalServerError": { + "base": "

    This exception is thrown when the service has a problem when trying to process the request.

    ", + "refs": { + } + }, + "ItemList": { + "base": null, + "refs": { + "BatchResponse$Items": null, + "QueryOutput$Items": null, + "ScanOutput$Items": null + } + }, + "Key": { + "base": "

    The primary key that uniquely identifies each item in a table. A primary key can be a one attribute (hash) primary key or a two attribute (hash-and-range) primary key.

    ", + "refs": { + "DeleteItemInput$Key": null, + "DeleteRequest$Key": "

    The item's key to be delete

    ", + "GetItemInput$Key": null, + "KeyList$member": null, + "QueryInput$ExclusiveStartKey": "

    Primary key of the item from which to continue an earlier query. An earlier query might provide this value as the LastEvaluatedKey if that query operation was interrupted before completing the query; either because of the result set size or the Limit parameter. The LastEvaluatedKey can be passed back in a new query request to continue the operation from that point.

    ", + "QueryOutput$LastEvaluatedKey": "

    Primary key of the item where the query operation stopped, inclusive of the previous result set. Use this value to start a new operation excluding this value in the new request. The LastEvaluatedKey is null when the entire query result set is complete (i.e. the operation processed the \"last page\").

    ", + "ScanInput$ExclusiveStartKey": "

    Primary key of the item from which to continue an earlier scan. An earlier scan might provide this value if that scan operation was interrupted before scanning the entire table; either because of the result set size or the Limit parameter. The LastEvaluatedKey can be passed back in a new scan request to continue the operation from that point.

    ", + "ScanOutput$LastEvaluatedKey": "

    Primary key of the item where the scan operation stopped. Provide this value in a subsequent scan operation to continue the operation from that point. The LastEvaluatedKey is null when the entire scan result set is complete (i.e. the operation processed the \"last page\").

    ", + "UpdateItemInput$Key": null + } + }, + "KeyList": { + "base": null, + "refs": { + "KeysAndAttributes$Keys": null + } + }, + "KeySchema": { + "base": "

    The KeySchema identifies the primary key as a one attribute primary key (hash) or a composite two attribute (hash-and-range) primary key. Single attribute primary keys have one index value: a HashKeyElement. A composite hash-and-range primary key contains two attribute values: a HashKeyElement and a RangeKeyElement.

    ", + "refs": { + "CreateTableInput$KeySchema": null, + "TableDescription$KeySchema": null + } + }, + "KeySchemaAttributeName": { + "base": null, + "refs": { + "KeySchemaElement$AttributeName": "

    The AttributeName of the KeySchemaElement.

    " + } + }, + "KeySchemaElement": { + "base": "

    KeySchemaElement is the primary key (hash or hash-and-range) structure for the table.

    ", + "refs": { + "KeySchema$HashKeyElement": "

    A hash key element is treated as the primary key, and can be a string or a number. Single attribute primary keys have one index value. The value can be String, Number, StringSet, NumberSet.

    ", + "KeySchema$RangeKeyElement": "

    A range key element is treated as a secondary key (used in conjunction with the primary key), and can be a string or a number, and is only used for hash-and-range primary keys. The value can be String, Number, StringSet, NumberSet.

    " + } + }, + "KeysAndAttributes": { + "base": null, + "refs": { + "BatchGetRequestMap$value": null + } + }, + "LimitExceededException": { + "base": "

    This exception is thrown when the subscriber exceeded the limits on the number of objects or operations.

    ", + "refs": { + } + }, + "ListTablesInput": { + "base": null, + "refs": { + } + }, + "ListTablesInputLimit": { + "base": "

    A number of maximum table names to return.

    ", + "refs": { + "ListTablesInput$Limit": null + } + }, + "ListTablesOutput": { + "base": null, + "refs": { + } + }, + "Long": { + "base": null, + "refs": { + "TableDescription$TableSizeBytes": null, + "TableDescription$ItemCount": null + } + }, + "NumberAttributeValue": { + "base": null, + "refs": { + "AttributeValue$N": "

    Numbers are positive or negative exact-value decimals and integers. A number can have up to 38 digits precision and can be between 10^-128 to 10^+126.

    ", + "NumberSetAttributeValue$member": null + } + }, + "NumberSetAttributeValue": { + "base": null, + "refs": { + "AttributeValue$NS": "

    A set of numbers.

    " + } + }, + "PositiveIntegerObject": { + "base": null, + "refs": { + "QueryInput$Limit": "

    The maximum number of items to return. If Amazon DynamoDB hits this limit while querying the table, it stops the query and returns the matching values up to the limit, and a LastEvaluatedKey to apply in a subsequent operation to continue the query. Also, if the result set size exceeds 1MB before Amazon DynamoDB hits this limit, it stops the query and returns the matching values, and a LastEvaluatedKey to apply in a subsequent operation to continue the query.

    ", + "ScanInput$Limit": "

    The maximum number of items to return. If Amazon DynamoDB hits this limit while scanning the table, it stops the scan and returns the matching values up to the limit, and a LastEvaluatedKey to apply in a subsequent operation to continue the scan. Also, if the scanned data set size exceeds 1 MB before Amazon DynamoDB hits this limit, it stops the scan and returns the matching values up to the limit, and a LastEvaluatedKey to apply in a subsequent operation to continue the scan.

    " + } + }, + "PositiveLongObject": { + "base": null, + "refs": { + "ProvisionedThroughput$ReadCapacityUnits": "

    ReadCapacityUnits are in terms of strictly consistent reads, assuming items of 1k. 2k items require twice the ReadCapacityUnits. Eventually-consistent reads only require half the ReadCapacityUnits of stirctly consistent reads.

    ", + "ProvisionedThroughput$WriteCapacityUnits": "

    WriteCapacityUnits are in terms of strictly consistent reads, assuming items of 1k. 2k items require twice the WriteCapacityUnits.

    ", + "ProvisionedThroughputDescription$NumberOfDecreasesToday": null, + "ProvisionedThroughputDescription$ReadCapacityUnits": null, + "ProvisionedThroughputDescription$WriteCapacityUnits": null + } + }, + "ProvisionedThroughput": { + "base": "

    Provisioned throughput reserves the required read and write resources for your table in terms of ReadCapacityUnits and WriteCapacityUnits. Values for provisioned throughput depend upon your expected read/write rates, item size, and consistency. Provide the expected number of read and write operations, assuming an item size of 1k and strictly consistent reads. For 2k item size, double the value. For 3k, triple the value, etc. Eventually-consistent reads consume half the resources of strictly consistent reads.

    ", + "refs": { + "CreateTableInput$ProvisionedThroughput": null, + "UpdateTableInput$ProvisionedThroughput": null + } + }, + "ProvisionedThroughputDescription": { + "base": null, + "refs": { + "TableDescription$ProvisionedThroughput": null + } + }, + "ProvisionedThroughputExceededException": { + "base": "

    This exception is thrown when the level of provisioned throughput defined for the table is exceeded.

    ", + "refs": { + } + }, + "PutItemInput": { + "base": null, + "refs": { + } + }, + "PutItemInputAttributeMap": { + "base": "

    A map of the attributes for the item, and must include the primary key values that define the item. Other attribute name-value pairs can be provided for the item.

    ", + "refs": { + "PutItemInput$Item": null, + "PutRequest$Item": "

    The item to put

    " + } + }, + "PutItemOutput": { + "base": null, + "refs": { + } + }, + "PutRequest": { + "base": "

    A container for a Put BatchWrite request

    ", + "refs": { + "WriteRequest$PutRequest": null + } + }, + "QueryInput": { + "base": null, + "refs": { + } + }, + "QueryOutput": { + "base": null, + "refs": { + } + }, + "ResourceInUseException": { + "base": "

    This exception is thrown when the resource which is being attempted to be changed is in use.

    ", + "refs": { + } + }, + "ResourceNotFoundException": { + "base": "

    This exception is thrown when the resource which is being attempted to be changed is in use.

    ", + "refs": { + } + }, + "ReturnValue": { + "base": "

    Use this parameter if you want to get the attribute name-value pairs before or after they are modified. For PUT operations, the possible parameter values are NONE (default) or ALL_OLD. For update operations, the possible parameter values are NONE (default) or ALL_OLD, UPDATED_OLD, ALL_NEW or UPDATED_NEW.

    • NONE: Nothing is returned.
    • ALL_OLD: Returns the attributes of the item as they were before the operation.
    • UPDATED_OLD: Returns the values of the updated attributes, only, as they were before the operation.
    • ALL_NEW: Returns all the attributes and their new values after the operation.
    • UPDATED_NEW: Returns the values of the updated attributes, only, as they are after the operation.
    ", + "refs": { + "DeleteItemInput$ReturnValues": null, + "PutItemInput$ReturnValues": null, + "UpdateItemInput$ReturnValues": null + } + }, + "ScalarAttributeType": { + "base": null, + "refs": { + "KeySchemaElement$AttributeType": "

    The AttributeType of the KeySchemaElement which can be a String or a Number.

    " + } + }, + "ScanInput": { + "base": null, + "refs": { + } + }, + "ScanOutput": { + "base": null, + "refs": { + } + }, + "String": { + "base": null, + "refs": { + "FilterConditionMap$key": null + } + }, + "StringAttributeValue": { + "base": null, + "refs": { + "AttributeValue$S": "

    Strings are Unicode with UTF-8 binary encoding. The maximum size is limited by the size of the primary key (1024 bytes as a range part of a key or 2048 bytes as a single part hash key) or the item size (64k).

    ", + "StringSetAttributeValue$member": null + } + }, + "StringSetAttributeValue": { + "base": null, + "refs": { + "AttributeValue$SS": "

    A set of strings.

    " + } + }, + "TableDescription": { + "base": null, + "refs": { + "CreateTableOutput$TableDescription": null, + "DeleteTableOutput$TableDescription": null, + "DescribeTableOutput$Table": null, + "UpdateTableOutput$TableDescription": null + } + }, + "TableName": { + "base": null, + "refs": { + "BatchGetRequestMap$key": null, + "BatchGetResponseMap$key": null, + "BatchWriteItemRequestMap$key": null, + "BatchWriteResponseMap$key": null, + "CreateTableInput$TableName": "

    The name of the table you want to create. Allowed characters are a-z, A-Z, 0-9, _ (underscore), - (hyphen) and . (period).

    ", + "DeleteItemInput$TableName": "

    The name of the table in which you want to delete an item. Allowed characters are a-z, A-Z, 0-9, _ (underscore), - (hyphen) and . (period).

    ", + "DeleteTableInput$TableName": "

    The name of the table you want to delete. Allowed characters are a-z, A-Z, 0-9, _ (underscore), - (hyphen) and . (period).

    ", + "DescribeTableInput$TableName": "

    The name of the table you want to describe. Allowed characters are a-z, A-Z, 0-9, _ (underscore), - (hyphen) and . (period).

    ", + "GetItemInput$TableName": "

    The name of the table in which you want to get an item. Allowed characters are a-z, A-Z, 0-9, _ (underscore), - (hyphen) and . (period).

    ", + "ListTablesInput$ExclusiveStartTableName": "

    The name of the table that starts the list. If you already ran a ListTables operation and received a LastEvaluatedTableName value in the response, use that value here to continue the list.

    ", + "ListTablesOutput$LastEvaluatedTableName": "

    The name of the last table in the current list. Use this value as the ExclusiveStartTableName in a new request to continue the list until all the table names are returned. If this value is null, all table names have been returned.

    ", + "PutItemInput$TableName": "

    The name of the table in which you want to put an item. Allowed characters are a-z, A-Z, 0-9, _ (underscore), - (hyphen) and . (period).

    ", + "QueryInput$TableName": "

    The name of the table in which you want to query. Allowed characters are a-z, A-Z, 0-9, _ (underscore), - (hyphen) and . (period).

    ", + "ScanInput$TableName": "

    The name of the table in which you want to scan. Allowed characters are a-z, A-Z, 0-9, _ (underscore), - (hyphen) and . (period).

    ", + "TableDescription$TableName": "

    The name of the table being described.

    ", + "TableNameList$member": null, + "UpdateItemInput$TableName": "

    The name of the table in which you want to update an item. Allowed characters are a-z, A-Z, 0-9, _ (underscore), - (hyphen) and . (period).

    ", + "UpdateTableInput$TableName": "

    The name of the table you want to update. Allowed characters are a-z, A-Z, 0-9, _ (underscore), - (hyphen) and . (period).

    " + } + }, + "TableNameList": { + "base": null, + "refs": { + "ListTablesOutput$TableNames": null + } + }, + "TableStatus": { + "base": null, + "refs": { + "TableDescription$TableStatus": null + } + }, + "UpdateItemInput": { + "base": null, + "refs": { + } + }, + "UpdateItemOutput": { + "base": null, + "refs": { + } + }, + "UpdateTableInput": { + "base": null, + "refs": { + } + }, + "UpdateTableOutput": { + "base": null, + "refs": { + } + }, + "WriteRequest": { + "base": "

    This structure is a Union of PutRequest and DeleteRequest. It can contain exactly one of PutRequest or DeleteRequest. Never Both. This is enforced in the code.

    ", + "refs": { + "WriteRequests$member": null + } + }, + "WriteRequests": { + "base": null, + "refs": { + "BatchWriteItemRequestMap$value": null + } + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/dynamodb/2011-12-05/examples-1.json b/vendor/github.com/aws/aws-sdk-go/models/apis/dynamodb/2011-12-05/examples-1.json new file mode 100644 index 000000000..0ea7e3b0b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/dynamodb/2011-12-05/examples-1.json @@ -0,0 +1,5 @@ +{ + "version": "1.0", + "examples": { + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/dynamodb/2011-12-05/paginators-1.json b/vendor/github.com/aws/aws-sdk-go/models/apis/dynamodb/2011-12-05/paginators-1.json new file mode 100644 index 000000000..d4075e120 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/dynamodb/2011-12-05/paginators-1.json @@ -0,0 +1,26 @@ +{ + "pagination": { + "BatchGetItem": { + "input_token": "RequestItems", + "output_token": "UnprocessedKeys" + }, + "ListTables": { + "input_token": "ExclusiveStartTableName", + "output_token": "LastEvaluatedTableName", + "limit_key": "Limit", + "result_key": "TableNames" + }, + "Query": { + "input_token": "ExclusiveStartKey", + "output_token": "LastEvaluatedKey", + "limit_key": "Limit", + "result_key": "Items" + }, + "Scan": { + "input_token": "ExclusiveStartKey", + "output_token": "LastEvaluatedKey", + "limit_key": "Limit", + "result_key": "Items" + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/dynamodb/2011-12-05/waiters-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/dynamodb/2011-12-05/waiters-2.json new file mode 100644 index 000000000..43a55ca7b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/dynamodb/2011-12-05/waiters-2.json @@ -0,0 +1,35 @@ +{ + "version": 2, + "waiters": { + "TableExists": { + "delay": 20, + "operation": "DescribeTable", + "maxAttempts": 25, + "acceptors": [ + { + "expected": "ACTIVE", + "matcher": "path", + "state": "success", + "argument": "Table.TableStatus" + }, + { + "expected": "ResourceNotFoundException", + "matcher": "error", + "state": "retry" + } + ] + }, + "TableNotExists": { + "delay": 20, + "operation": "DescribeTable", + "maxAttempts": 25, + "acceptors": [ + { + "expected": "ResourceNotFoundException", + "matcher": "error", + "state": "success" + } + ] + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/dynamodb/2012-08-10/api-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/dynamodb/2012-08-10/api-2.json new file mode 100644 index 000000000..a72e8da65 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/dynamodb/2012-08-10/api-2.json @@ -0,0 +1,1200 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2012-08-10", + "endpointPrefix":"dynamodb", + "jsonVersion":"1.0", + "protocol":"json", + "serviceAbbreviation":"DynamoDB", + "serviceFullName":"Amazon DynamoDB", + "signatureVersion":"v4", + "targetPrefix":"DynamoDB_20120810" + }, + "operations":{ + "BatchGetItem":{ + "name":"BatchGetItem", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"BatchGetItemInput"}, + "output":{"shape":"BatchGetItemOutput"}, + "errors":[ + {"shape":"ProvisionedThroughputExceededException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerError"} + ] + }, + "BatchWriteItem":{ + "name":"BatchWriteItem", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"BatchWriteItemInput"}, + "output":{"shape":"BatchWriteItemOutput"}, + "errors":[ + {"shape":"ProvisionedThroughputExceededException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ItemCollectionSizeLimitExceededException"}, + {"shape":"InternalServerError"} + ] + }, + "CreateTable":{ + "name":"CreateTable", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateTableInput"}, + "output":{"shape":"CreateTableOutput"}, + "errors":[ + {"shape":"ResourceInUseException"}, + {"shape":"LimitExceededException"}, + {"shape":"InternalServerError"} + ] + }, + "DeleteItem":{ + "name":"DeleteItem", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteItemInput"}, + "output":{"shape":"DeleteItemOutput"}, + "errors":[ + {"shape":"ConditionalCheckFailedException"}, + {"shape":"ProvisionedThroughputExceededException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ItemCollectionSizeLimitExceededException"}, + {"shape":"InternalServerError"} + ] + }, + "DeleteTable":{ + "name":"DeleteTable", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteTableInput"}, + "output":{"shape":"DeleteTableOutput"}, + "errors":[ + {"shape":"ResourceInUseException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"LimitExceededException"}, + {"shape":"InternalServerError"} + ] + }, + "DescribeLimits":{ + "name":"DescribeLimits", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeLimitsInput"}, + "output":{"shape":"DescribeLimitsOutput"}, + "errors":[ + {"shape":"InternalServerError"} + ] + }, + "DescribeTable":{ + "name":"DescribeTable", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeTableInput"}, + "output":{"shape":"DescribeTableOutput"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerError"} + ] + }, + "GetItem":{ + "name":"GetItem", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetItemInput"}, + "output":{"shape":"GetItemOutput"}, + "errors":[ + {"shape":"ProvisionedThroughputExceededException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerError"} + ] + }, + "ListTables":{ + "name":"ListTables", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListTablesInput"}, + "output":{"shape":"ListTablesOutput"}, + "errors":[ + {"shape":"InternalServerError"} + ] + }, + "PutItem":{ + "name":"PutItem", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PutItemInput"}, + "output":{"shape":"PutItemOutput"}, + "errors":[ + {"shape":"ConditionalCheckFailedException"}, + {"shape":"ProvisionedThroughputExceededException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ItemCollectionSizeLimitExceededException"}, + {"shape":"InternalServerError"} + ] + }, + "Query":{ + "name":"Query", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"QueryInput"}, + "output":{"shape":"QueryOutput"}, + "errors":[ + {"shape":"ProvisionedThroughputExceededException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerError"} + ] + }, + "Scan":{ + "name":"Scan", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ScanInput"}, + "output":{"shape":"ScanOutput"}, + "errors":[ + {"shape":"ProvisionedThroughputExceededException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerError"} + ] + }, + "UpdateItem":{ + "name":"UpdateItem", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateItemInput"}, + "output":{"shape":"UpdateItemOutput"}, + "errors":[ + {"shape":"ConditionalCheckFailedException"}, + {"shape":"ProvisionedThroughputExceededException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ItemCollectionSizeLimitExceededException"}, + {"shape":"InternalServerError"} + ] + }, + "UpdateTable":{ + "name":"UpdateTable", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateTableInput"}, + "output":{"shape":"UpdateTableOutput"}, + "errors":[ + {"shape":"ResourceInUseException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"LimitExceededException"}, + {"shape":"InternalServerError"} + ] + } + }, + "shapes":{ + "AttributeAction":{ + "type":"string", + "enum":[ + "ADD", + "PUT", + "DELETE" + ] + }, + "AttributeDefinition":{ + "type":"structure", + "required":[ + "AttributeName", + "AttributeType" + ], + "members":{ + "AttributeName":{"shape":"KeySchemaAttributeName"}, + "AttributeType":{"shape":"ScalarAttributeType"} + } + }, + "AttributeDefinitions":{ + "type":"list", + "member":{"shape":"AttributeDefinition"} + }, + "AttributeMap":{ + "type":"map", + "key":{"shape":"AttributeName"}, + "value":{"shape":"AttributeValue"} + }, + "AttributeName":{ + "type":"string", + "max":65535 + }, + "AttributeNameList":{ + "type":"list", + "member":{"shape":"AttributeName"}, + "min":1 + }, + "AttributeUpdates":{ + "type":"map", + "key":{"shape":"AttributeName"}, + "value":{"shape":"AttributeValueUpdate"} + }, + "AttributeValue":{ + "type":"structure", + "members":{ + "S":{"shape":"StringAttributeValue"}, + "N":{"shape":"NumberAttributeValue"}, + "B":{"shape":"BinaryAttributeValue"}, + "SS":{"shape":"StringSetAttributeValue"}, + "NS":{"shape":"NumberSetAttributeValue"}, + "BS":{"shape":"BinarySetAttributeValue"}, + "M":{"shape":"MapAttributeValue"}, + "L":{"shape":"ListAttributeValue"}, + "NULL":{"shape":"NullAttributeValue"}, + "BOOL":{"shape":"BooleanAttributeValue"} + } + }, + "AttributeValueList":{ + "type":"list", + "member":{"shape":"AttributeValue"} + }, + "AttributeValueUpdate":{ + "type":"structure", + "members":{ + "Value":{"shape":"AttributeValue"}, + "Action":{"shape":"AttributeAction"} + } + }, + "Backfilling":{"type":"boolean"}, + "BatchGetItemInput":{ + "type":"structure", + "required":["RequestItems"], + "members":{ + "RequestItems":{"shape":"BatchGetRequestMap"}, + "ReturnConsumedCapacity":{"shape":"ReturnConsumedCapacity"} + } + }, + "BatchGetItemOutput":{ + "type":"structure", + "members":{ + "Responses":{"shape":"BatchGetResponseMap"}, + "UnprocessedKeys":{"shape":"BatchGetRequestMap"}, + "ConsumedCapacity":{"shape":"ConsumedCapacityMultiple"} + } + }, + "BatchGetRequestMap":{ + "type":"map", + "key":{"shape":"TableName"}, + "value":{"shape":"KeysAndAttributes"}, + "max":100, + "min":1 + }, + "BatchGetResponseMap":{ + "type":"map", + "key":{"shape":"TableName"}, + "value":{"shape":"ItemList"} + }, + "BatchWriteItemInput":{ + "type":"structure", + "required":["RequestItems"], + "members":{ + "RequestItems":{"shape":"BatchWriteItemRequestMap"}, + "ReturnConsumedCapacity":{"shape":"ReturnConsumedCapacity"}, + "ReturnItemCollectionMetrics":{"shape":"ReturnItemCollectionMetrics"} + } + }, + "BatchWriteItemOutput":{ + "type":"structure", + "members":{ + "UnprocessedItems":{"shape":"BatchWriteItemRequestMap"}, + "ItemCollectionMetrics":{"shape":"ItemCollectionMetricsPerTable"}, + "ConsumedCapacity":{"shape":"ConsumedCapacityMultiple"} + } + }, + "BatchWriteItemRequestMap":{ + "type":"map", + "key":{"shape":"TableName"}, + "value":{"shape":"WriteRequests"}, + "max":25, + "min":1 + }, + "BinaryAttributeValue":{"type":"blob"}, + "BinarySetAttributeValue":{ + "type":"list", + "member":{"shape":"BinaryAttributeValue"} + }, + "BooleanAttributeValue":{"type":"boolean"}, + "BooleanObject":{"type":"boolean"}, + "Capacity":{ + "type":"structure", + "members":{ + "CapacityUnits":{"shape":"ConsumedCapacityUnits"} + } + }, + "ComparisonOperator":{ + "type":"string", + "enum":[ + "EQ", + "NE", + "IN", + "LE", + "LT", + "GE", + "GT", + "BETWEEN", + "NOT_NULL", + "NULL", + "CONTAINS", + "NOT_CONTAINS", + "BEGINS_WITH" + ] + }, + "Condition":{ + "type":"structure", + "required":["ComparisonOperator"], + "members":{ + "AttributeValueList":{"shape":"AttributeValueList"}, + "ComparisonOperator":{"shape":"ComparisonOperator"} + } + }, + "ConditionExpression":{"type":"string"}, + "ConditionalCheckFailedException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "exception":true + }, + "ConditionalOperator":{ + "type":"string", + "enum":[ + "AND", + "OR" + ] + }, + "ConsistentRead":{"type":"boolean"}, + "ConsumedCapacity":{ + "type":"structure", + "members":{ + "TableName":{"shape":"TableName"}, + "CapacityUnits":{"shape":"ConsumedCapacityUnits"}, + "Table":{"shape":"Capacity"}, + "LocalSecondaryIndexes":{"shape":"SecondaryIndexesCapacityMap"}, + "GlobalSecondaryIndexes":{"shape":"SecondaryIndexesCapacityMap"} + } + }, + "ConsumedCapacityMultiple":{ + "type":"list", + "member":{"shape":"ConsumedCapacity"} + }, + "ConsumedCapacityUnits":{"type":"double"}, + "CreateGlobalSecondaryIndexAction":{ + "type":"structure", + "required":[ + "IndexName", + "KeySchema", + "Projection", + "ProvisionedThroughput" + ], + "members":{ + "IndexName":{"shape":"IndexName"}, + "KeySchema":{"shape":"KeySchema"}, + "Projection":{"shape":"Projection"}, + "ProvisionedThroughput":{"shape":"ProvisionedThroughput"} + } + }, + "CreateTableInput":{ + "type":"structure", + "required":[ + "AttributeDefinitions", + "TableName", + "KeySchema", + "ProvisionedThroughput" + ], + "members":{ + "AttributeDefinitions":{"shape":"AttributeDefinitions"}, + "TableName":{"shape":"TableName"}, + "KeySchema":{"shape":"KeySchema"}, + "LocalSecondaryIndexes":{"shape":"LocalSecondaryIndexList"}, + "GlobalSecondaryIndexes":{"shape":"GlobalSecondaryIndexList"}, + "ProvisionedThroughput":{"shape":"ProvisionedThroughput"}, + "StreamSpecification":{"shape":"StreamSpecification"} + } + }, + "CreateTableOutput":{ + "type":"structure", + "members":{ + "TableDescription":{"shape":"TableDescription"} + } + }, + "Date":{"type":"timestamp"}, + "DeleteGlobalSecondaryIndexAction":{ + "type":"structure", + "required":["IndexName"], + "members":{ + "IndexName":{"shape":"IndexName"} + } + }, + "DeleteItemInput":{ + "type":"structure", + "required":[ + "TableName", + "Key" + ], + "members":{ + "TableName":{"shape":"TableName"}, + "Key":{"shape":"Key"}, + "Expected":{"shape":"ExpectedAttributeMap"}, + "ConditionalOperator":{"shape":"ConditionalOperator"}, + "ReturnValues":{"shape":"ReturnValue"}, + "ReturnConsumedCapacity":{"shape":"ReturnConsumedCapacity"}, + "ReturnItemCollectionMetrics":{"shape":"ReturnItemCollectionMetrics"}, + "ConditionExpression":{"shape":"ConditionExpression"}, + "ExpressionAttributeNames":{"shape":"ExpressionAttributeNameMap"}, + "ExpressionAttributeValues":{"shape":"ExpressionAttributeValueMap"} + } + }, + "DeleteItemOutput":{ + "type":"structure", + "members":{ + "Attributes":{"shape":"AttributeMap"}, + "ConsumedCapacity":{"shape":"ConsumedCapacity"}, + "ItemCollectionMetrics":{"shape":"ItemCollectionMetrics"} + } + }, + "DeleteRequest":{ + "type":"structure", + "required":["Key"], + "members":{ + "Key":{"shape":"Key"} + } + }, + "DeleteTableInput":{ + "type":"structure", + "required":["TableName"], + "members":{ + "TableName":{"shape":"TableName"} + } + }, + "DeleteTableOutput":{ + "type":"structure", + "members":{ + "TableDescription":{"shape":"TableDescription"} + } + }, + "DescribeLimitsInput":{ + "type":"structure", + "members":{ + } + }, + "DescribeLimitsOutput":{ + "type":"structure", + "members":{ + "AccountMaxReadCapacityUnits":{"shape":"PositiveLongObject"}, + "AccountMaxWriteCapacityUnits":{"shape":"PositiveLongObject"}, + "TableMaxReadCapacityUnits":{"shape":"PositiveLongObject"}, + "TableMaxWriteCapacityUnits":{"shape":"PositiveLongObject"} + } + }, + "DescribeTableInput":{ + "type":"structure", + "required":["TableName"], + "members":{ + "TableName":{"shape":"TableName"} + } + }, + "DescribeTableOutput":{ + "type":"structure", + "members":{ + "Table":{"shape":"TableDescription"} + } + }, + "ErrorMessage":{"type":"string"}, + "ExpectedAttributeMap":{ + "type":"map", + "key":{"shape":"AttributeName"}, + "value":{"shape":"ExpectedAttributeValue"} + }, + "ExpectedAttributeValue":{ + "type":"structure", + "members":{ + "Value":{"shape":"AttributeValue"}, + "Exists":{"shape":"BooleanObject"}, + "ComparisonOperator":{"shape":"ComparisonOperator"}, + "AttributeValueList":{"shape":"AttributeValueList"} + } + }, + "ExpressionAttributeNameMap":{ + "type":"map", + "key":{"shape":"ExpressionAttributeNameVariable"}, + "value":{"shape":"AttributeName"} + }, + "ExpressionAttributeNameVariable":{"type":"string"}, + "ExpressionAttributeValueMap":{ + "type":"map", + "key":{"shape":"ExpressionAttributeValueVariable"}, + "value":{"shape":"AttributeValue"} + }, + "ExpressionAttributeValueVariable":{"type":"string"}, + "FilterConditionMap":{ + "type":"map", + "key":{"shape":"AttributeName"}, + "value":{"shape":"Condition"} + }, + "GetItemInput":{ + "type":"structure", + "required":[ + "TableName", + "Key" + ], + "members":{ + "TableName":{"shape":"TableName"}, + "Key":{"shape":"Key"}, + "AttributesToGet":{"shape":"AttributeNameList"}, + "ConsistentRead":{"shape":"ConsistentRead"}, + "ReturnConsumedCapacity":{"shape":"ReturnConsumedCapacity"}, + "ProjectionExpression":{"shape":"ProjectionExpression"}, + "ExpressionAttributeNames":{"shape":"ExpressionAttributeNameMap"} + } + }, + "GetItemOutput":{ + "type":"structure", + "members":{ + "Item":{"shape":"AttributeMap"}, + "ConsumedCapacity":{"shape":"ConsumedCapacity"} + } + }, + "GlobalSecondaryIndex":{ + "type":"structure", + "required":[ + "IndexName", + "KeySchema", + "Projection", + "ProvisionedThroughput" + ], + "members":{ + "IndexName":{"shape":"IndexName"}, + "KeySchema":{"shape":"KeySchema"}, + "Projection":{"shape":"Projection"}, + "ProvisionedThroughput":{"shape":"ProvisionedThroughput"} + } + }, + "GlobalSecondaryIndexDescription":{ + "type":"structure", + "members":{ + "IndexName":{"shape":"IndexName"}, + "KeySchema":{"shape":"KeySchema"}, + "Projection":{"shape":"Projection"}, + "IndexStatus":{"shape":"IndexStatus"}, + "Backfilling":{"shape":"Backfilling"}, + "ProvisionedThroughput":{"shape":"ProvisionedThroughputDescription"}, + "IndexSizeBytes":{"shape":"Long"}, + "ItemCount":{"shape":"Long"}, + "IndexArn":{"shape":"String"} + } + }, + "GlobalSecondaryIndexDescriptionList":{ + "type":"list", + "member":{"shape":"GlobalSecondaryIndexDescription"} + }, + "GlobalSecondaryIndexList":{ + "type":"list", + "member":{"shape":"GlobalSecondaryIndex"} + }, + "GlobalSecondaryIndexUpdate":{ + "type":"structure", + "members":{ + "Update":{"shape":"UpdateGlobalSecondaryIndexAction"}, + "Create":{"shape":"CreateGlobalSecondaryIndexAction"}, + "Delete":{"shape":"DeleteGlobalSecondaryIndexAction"} + } + }, + "GlobalSecondaryIndexUpdateList":{ + "type":"list", + "member":{"shape":"GlobalSecondaryIndexUpdate"} + }, + "IndexName":{ + "type":"string", + "max":255, + "min":3, + "pattern":"[a-zA-Z0-9_.-]+" + }, + "IndexStatus":{ + "type":"string", + "enum":[ + "CREATING", + "UPDATING", + "DELETING", + "ACTIVE" + ] + }, + "Integer":{"type":"integer"}, + "InternalServerError":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "exception":true, + "fault":true + }, + "ItemCollectionKeyAttributeMap":{ + "type":"map", + "key":{"shape":"AttributeName"}, + "value":{"shape":"AttributeValue"} + }, + "ItemCollectionMetrics":{ + "type":"structure", + "members":{ + "ItemCollectionKey":{"shape":"ItemCollectionKeyAttributeMap"}, + "SizeEstimateRangeGB":{"shape":"ItemCollectionSizeEstimateRange"} + } + }, + "ItemCollectionMetricsMultiple":{ + "type":"list", + "member":{"shape":"ItemCollectionMetrics"} + }, + "ItemCollectionMetricsPerTable":{ + "type":"map", + "key":{"shape":"TableName"}, + "value":{"shape":"ItemCollectionMetricsMultiple"} + }, + "ItemCollectionSizeEstimateBound":{"type":"double"}, + "ItemCollectionSizeEstimateRange":{ + "type":"list", + "member":{"shape":"ItemCollectionSizeEstimateBound"} + }, + "ItemCollectionSizeLimitExceededException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "exception":true + }, + "ItemList":{ + "type":"list", + "member":{"shape":"AttributeMap"} + }, + "Key":{ + "type":"map", + "key":{"shape":"AttributeName"}, + "value":{"shape":"AttributeValue"} + }, + "KeyConditions":{ + "type":"map", + "key":{"shape":"AttributeName"}, + "value":{"shape":"Condition"} + }, + "KeyExpression":{"type":"string"}, + "KeyList":{ + "type":"list", + "member":{"shape":"Key"}, + "max":100, + "min":1 + }, + "KeySchema":{ + "type":"list", + "member":{"shape":"KeySchemaElement"}, + "max":2, + "min":1 + }, + "KeySchemaAttributeName":{ + "type":"string", + "max":255, + "min":1 + }, + "KeySchemaElement":{ + "type":"structure", + "required":[ + "AttributeName", + "KeyType" + ], + "members":{ + "AttributeName":{"shape":"KeySchemaAttributeName"}, + "KeyType":{"shape":"KeyType"} + } + }, + "KeyType":{ + "type":"string", + "enum":[ + "HASH", + "RANGE" + ] + }, + "KeysAndAttributes":{ + "type":"structure", + "required":["Keys"], + "members":{ + "Keys":{"shape":"KeyList"}, + "AttributesToGet":{"shape":"AttributeNameList"}, + "ConsistentRead":{"shape":"ConsistentRead"}, + "ProjectionExpression":{"shape":"ProjectionExpression"}, + "ExpressionAttributeNames":{"shape":"ExpressionAttributeNameMap"} + } + }, + "LimitExceededException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "exception":true + }, + "ListAttributeValue":{ + "type":"list", + "member":{"shape":"AttributeValue"} + }, + "ListTablesInput":{ + "type":"structure", + "members":{ + "ExclusiveStartTableName":{"shape":"TableName"}, + "Limit":{"shape":"ListTablesInputLimit"} + } + }, + "ListTablesInputLimit":{ + "type":"integer", + "max":100, + "min":1 + }, + "ListTablesOutput":{ + "type":"structure", + "members":{ + "TableNames":{"shape":"TableNameList"}, + "LastEvaluatedTableName":{"shape":"TableName"} + } + }, + "LocalSecondaryIndex":{ + "type":"structure", + "required":[ + "IndexName", + "KeySchema", + "Projection" + ], + "members":{ + "IndexName":{"shape":"IndexName"}, + "KeySchema":{"shape":"KeySchema"}, + "Projection":{"shape":"Projection"} + } + }, + "LocalSecondaryIndexDescription":{ + "type":"structure", + "members":{ + "IndexName":{"shape":"IndexName"}, + "KeySchema":{"shape":"KeySchema"}, + "Projection":{"shape":"Projection"}, + "IndexSizeBytes":{"shape":"Long"}, + "ItemCount":{"shape":"Long"}, + "IndexArn":{"shape":"String"} + } + }, + "LocalSecondaryIndexDescriptionList":{ + "type":"list", + "member":{"shape":"LocalSecondaryIndexDescription"} + }, + "LocalSecondaryIndexList":{ + "type":"list", + "member":{"shape":"LocalSecondaryIndex"} + }, + "Long":{"type":"long"}, + "MapAttributeValue":{ + "type":"map", + "key":{"shape":"AttributeName"}, + "value":{"shape":"AttributeValue"} + }, + "NonKeyAttributeName":{ + "type":"string", + "max":255, + "min":1 + }, + "NonKeyAttributeNameList":{ + "type":"list", + "member":{"shape":"NonKeyAttributeName"}, + "max":20, + "min":1 + }, + "NullAttributeValue":{"type":"boolean"}, + "NumberAttributeValue":{"type":"string"}, + "NumberSetAttributeValue":{ + "type":"list", + "member":{"shape":"NumberAttributeValue"} + }, + "PositiveIntegerObject":{ + "type":"integer", + "min":1 + }, + "PositiveLongObject":{ + "type":"long", + "min":1 + }, + "Projection":{ + "type":"structure", + "members":{ + "ProjectionType":{"shape":"ProjectionType"}, + "NonKeyAttributes":{"shape":"NonKeyAttributeNameList"} + } + }, + "ProjectionExpression":{"type":"string"}, + "ProjectionType":{ + "type":"string", + "enum":[ + "ALL", + "KEYS_ONLY", + "INCLUDE" + ] + }, + "ProvisionedThroughput":{ + "type":"structure", + "required":[ + "ReadCapacityUnits", + "WriteCapacityUnits" + ], + "members":{ + "ReadCapacityUnits":{"shape":"PositiveLongObject"}, + "WriteCapacityUnits":{"shape":"PositiveLongObject"} + } + }, + "ProvisionedThroughputDescription":{ + "type":"structure", + "members":{ + "LastIncreaseDateTime":{"shape":"Date"}, + "LastDecreaseDateTime":{"shape":"Date"}, + "NumberOfDecreasesToday":{"shape":"PositiveLongObject"}, + "ReadCapacityUnits":{"shape":"PositiveLongObject"}, + "WriteCapacityUnits":{"shape":"PositiveLongObject"} + } + }, + "ProvisionedThroughputExceededException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "exception":true + }, + "PutItemInput":{ + "type":"structure", + "required":[ + "TableName", + "Item" + ], + "members":{ + "TableName":{"shape":"TableName"}, + "Item":{"shape":"PutItemInputAttributeMap"}, + "Expected":{"shape":"ExpectedAttributeMap"}, + "ReturnValues":{"shape":"ReturnValue"}, + "ReturnConsumedCapacity":{"shape":"ReturnConsumedCapacity"}, + "ReturnItemCollectionMetrics":{"shape":"ReturnItemCollectionMetrics"}, + "ConditionalOperator":{"shape":"ConditionalOperator"}, + "ConditionExpression":{"shape":"ConditionExpression"}, + "ExpressionAttributeNames":{"shape":"ExpressionAttributeNameMap"}, + "ExpressionAttributeValues":{"shape":"ExpressionAttributeValueMap"} + } + }, + "PutItemInputAttributeMap":{ + "type":"map", + "key":{"shape":"AttributeName"}, + "value":{"shape":"AttributeValue"} + }, + "PutItemOutput":{ + "type":"structure", + "members":{ + "Attributes":{"shape":"AttributeMap"}, + "ConsumedCapacity":{"shape":"ConsumedCapacity"}, + "ItemCollectionMetrics":{"shape":"ItemCollectionMetrics"} + } + }, + "PutRequest":{ + "type":"structure", + "required":["Item"], + "members":{ + "Item":{"shape":"PutItemInputAttributeMap"} + } + }, + "QueryInput":{ + "type":"structure", + "required":["TableName"], + "members":{ + "TableName":{"shape":"TableName"}, + "IndexName":{"shape":"IndexName"}, + "Select":{"shape":"Select"}, + "AttributesToGet":{"shape":"AttributeNameList"}, + "Limit":{"shape":"PositiveIntegerObject"}, + "ConsistentRead":{"shape":"ConsistentRead"}, + "KeyConditions":{"shape":"KeyConditions"}, + "QueryFilter":{"shape":"FilterConditionMap"}, + "ConditionalOperator":{"shape":"ConditionalOperator"}, + "ScanIndexForward":{"shape":"BooleanObject"}, + "ExclusiveStartKey":{"shape":"Key"}, + "ReturnConsumedCapacity":{"shape":"ReturnConsumedCapacity"}, + "ProjectionExpression":{"shape":"ProjectionExpression"}, + "FilterExpression":{"shape":"ConditionExpression"}, + "KeyConditionExpression":{"shape":"KeyExpression"}, + "ExpressionAttributeNames":{"shape":"ExpressionAttributeNameMap"}, + "ExpressionAttributeValues":{"shape":"ExpressionAttributeValueMap"} + } + }, + "QueryOutput":{ + "type":"structure", + "members":{ + "Items":{"shape":"ItemList"}, + "Count":{"shape":"Integer"}, + "ScannedCount":{"shape":"Integer"}, + "LastEvaluatedKey":{"shape":"Key"}, + "ConsumedCapacity":{"shape":"ConsumedCapacity"} + } + }, + "ResourceInUseException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "exception":true + }, + "ResourceNotFoundException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "exception":true + }, + "ReturnConsumedCapacity":{ + "type":"string", + "enum":[ + "INDEXES", + "TOTAL", + "NONE" + ] + }, + "ReturnItemCollectionMetrics":{ + "type":"string", + "enum":[ + "SIZE", + "NONE" + ] + }, + "ReturnValue":{ + "type":"string", + "enum":[ + "NONE", + "ALL_OLD", + "UPDATED_OLD", + "ALL_NEW", + "UPDATED_NEW" + ] + }, + "ScalarAttributeType":{ + "type":"string", + "enum":[ + "S", + "N", + "B" + ] + }, + "ScanInput":{ + "type":"structure", + "required":["TableName"], + "members":{ + "TableName":{"shape":"TableName"}, + "IndexName":{"shape":"IndexName"}, + "AttributesToGet":{"shape":"AttributeNameList"}, + "Limit":{"shape":"PositiveIntegerObject"}, + "Select":{"shape":"Select"}, + "ScanFilter":{"shape":"FilterConditionMap"}, + "ConditionalOperator":{"shape":"ConditionalOperator"}, + "ExclusiveStartKey":{"shape":"Key"}, + "ReturnConsumedCapacity":{"shape":"ReturnConsumedCapacity"}, + "TotalSegments":{"shape":"ScanTotalSegments"}, + "Segment":{"shape":"ScanSegment"}, + "ProjectionExpression":{"shape":"ProjectionExpression"}, + "FilterExpression":{"shape":"ConditionExpression"}, + "ExpressionAttributeNames":{"shape":"ExpressionAttributeNameMap"}, + "ExpressionAttributeValues":{"shape":"ExpressionAttributeValueMap"}, + "ConsistentRead":{"shape":"ConsistentRead"} + } + }, + "ScanOutput":{ + "type":"structure", + "members":{ + "Items":{"shape":"ItemList"}, + "Count":{"shape":"Integer"}, + "ScannedCount":{"shape":"Integer"}, + "LastEvaluatedKey":{"shape":"Key"}, + "ConsumedCapacity":{"shape":"ConsumedCapacity"} + } + }, + "ScanSegment":{ + "type":"integer", + "max":999999, + "min":0 + }, + "ScanTotalSegments":{ + "type":"integer", + "max":1000000, + "min":1 + }, + "SecondaryIndexesCapacityMap":{ + "type":"map", + "key":{"shape":"IndexName"}, + "value":{"shape":"Capacity"} + }, + "Select":{ + "type":"string", + "enum":[ + "ALL_ATTRIBUTES", + "ALL_PROJECTED_ATTRIBUTES", + "SPECIFIC_ATTRIBUTES", + "COUNT" + ] + }, + "StreamArn":{ + "type":"string", + "max":1024, + "min":37 + }, + "StreamEnabled":{"type":"boolean"}, + "StreamSpecification":{ + "type":"structure", + "members":{ + "StreamEnabled":{"shape":"StreamEnabled"}, + "StreamViewType":{"shape":"StreamViewType"} + } + }, + "StreamViewType":{ + "type":"string", + "enum":[ + "NEW_IMAGE", + "OLD_IMAGE", + "NEW_AND_OLD_IMAGES", + "KEYS_ONLY" + ] + }, + "String":{"type":"string"}, + "StringAttributeValue":{"type":"string"}, + "StringSetAttributeValue":{ + "type":"list", + "member":{"shape":"StringAttributeValue"} + }, + "TableDescription":{ + "type":"structure", + "members":{ + "AttributeDefinitions":{"shape":"AttributeDefinitions"}, + "TableName":{"shape":"TableName"}, + "KeySchema":{"shape":"KeySchema"}, + "TableStatus":{"shape":"TableStatus"}, + "CreationDateTime":{"shape":"Date"}, + "ProvisionedThroughput":{"shape":"ProvisionedThroughputDescription"}, + "TableSizeBytes":{"shape":"Long"}, + "ItemCount":{"shape":"Long"}, + "TableArn":{"shape":"String"}, + "LocalSecondaryIndexes":{"shape":"LocalSecondaryIndexDescriptionList"}, + "GlobalSecondaryIndexes":{"shape":"GlobalSecondaryIndexDescriptionList"}, + "StreamSpecification":{"shape":"StreamSpecification"}, + "LatestStreamLabel":{"shape":"String"}, + "LatestStreamArn":{"shape":"StreamArn"} + } + }, + "TableName":{ + "type":"string", + "max":255, + "min":3, + "pattern":"[a-zA-Z0-9_.-]+" + }, + "TableNameList":{ + "type":"list", + "member":{"shape":"TableName"} + }, + "TableStatus":{ + "type":"string", + "enum":[ + "CREATING", + "UPDATING", + "DELETING", + "ACTIVE" + ] + }, + "UpdateExpression":{"type":"string"}, + "UpdateGlobalSecondaryIndexAction":{ + "type":"structure", + "required":[ + "IndexName", + "ProvisionedThroughput" + ], + "members":{ + "IndexName":{"shape":"IndexName"}, + "ProvisionedThroughput":{"shape":"ProvisionedThroughput"} + } + }, + "UpdateItemInput":{ + "type":"structure", + "required":[ + "TableName", + "Key" + ], + "members":{ + "TableName":{"shape":"TableName"}, + "Key":{"shape":"Key"}, + "AttributeUpdates":{"shape":"AttributeUpdates"}, + "Expected":{"shape":"ExpectedAttributeMap"}, + "ConditionalOperator":{"shape":"ConditionalOperator"}, + "ReturnValues":{"shape":"ReturnValue"}, + "ReturnConsumedCapacity":{"shape":"ReturnConsumedCapacity"}, + "ReturnItemCollectionMetrics":{"shape":"ReturnItemCollectionMetrics"}, + "UpdateExpression":{"shape":"UpdateExpression"}, + "ConditionExpression":{"shape":"ConditionExpression"}, + "ExpressionAttributeNames":{"shape":"ExpressionAttributeNameMap"}, + "ExpressionAttributeValues":{"shape":"ExpressionAttributeValueMap"} + } + }, + "UpdateItemOutput":{ + "type":"structure", + "members":{ + "Attributes":{"shape":"AttributeMap"}, + "ConsumedCapacity":{"shape":"ConsumedCapacity"}, + "ItemCollectionMetrics":{"shape":"ItemCollectionMetrics"} + } + }, + "UpdateTableInput":{ + "type":"structure", + "required":["TableName"], + "members":{ + "AttributeDefinitions":{"shape":"AttributeDefinitions"}, + "TableName":{"shape":"TableName"}, + "ProvisionedThroughput":{"shape":"ProvisionedThroughput"}, + "GlobalSecondaryIndexUpdates":{"shape":"GlobalSecondaryIndexUpdateList"}, + "StreamSpecification":{"shape":"StreamSpecification"} + } + }, + "UpdateTableOutput":{ + "type":"structure", + "members":{ + "TableDescription":{"shape":"TableDescription"} + } + }, + "WriteRequest":{ + "type":"structure", + "members":{ + "PutRequest":{"shape":"PutRequest"}, + "DeleteRequest":{"shape":"DeleteRequest"} + } + }, + "WriteRequests":{ + "type":"list", + "member":{"shape":"WriteRequest"}, + "max":25, + "min":1 + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/dynamodb/2012-08-10/docs-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/dynamodb/2012-08-10/docs-2.json new file mode 100644 index 000000000..38082d233 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/dynamodb/2012-08-10/docs-2.json @@ -0,0 +1,1028 @@ +{ + "version": "2.0", + "service": "Amazon DynamoDB

    This is the Amazon DynamoDB API Reference. This guide provides descriptions of the low-level DynamoDB API.

    This guide is intended for use with the following DynamoDB documentation:

    Instead of making the requests to the low-level DynamoDB API directly from your application, we recommend that you use the AWS Software Development Kits (SDKs). The easy-to-use libraries in the AWS SDKs make it unnecessary to call the low-level DynamoDB API directly from your application. The libraries take care of request authentication, serialization, and connection management. For more information, see Using the AWS SDKs with DynamoDB in the Amazon DynamoDB Developer Guide.

    If you decide to code against the low-level DynamoDB API directly, you will need to write the necessary code to authenticate your requests. For more information on signing your requests, see Using the DynamoDB API in the Amazon DynamoDB Developer Guide.

    The following are short descriptions of each low-level API action, organized by function.

    Managing Tables

    • CreateTable - Creates a table with user-specified provisioned throughput settings. You must define a primary key for the table - either a simple primary key (partition key), or a composite primary key (partition key and sort key). Optionally, you can create one or more secondary indexes, which provide fast data access using non-key attributes.

    • DescribeTable - Returns metadata for a table, such as table size, status, and index information.

    • UpdateTable - Modifies the provisioned throughput settings for a table. Optionally, you can modify the provisioned throughput settings for global secondary indexes on the table.

    • ListTables - Returns a list of all tables associated with the current AWS account and endpoint.

    • DeleteTable - Deletes a table and all of its indexes.

    For conceptual information about managing tables, see Working with Tables in the Amazon DynamoDB Developer Guide.

    Reading Data

    • GetItem - Returns a set of attributes for the item that has a given primary key. By default, GetItem performs an eventually consistent read; however, applications can request a strongly consistent read instead.

    • BatchGetItem - Performs multiple GetItem requests for data items using their primary keys, from one table or multiple tables. The response from BatchGetItem has a size limit of 16 MB and returns a maximum of 100 items. Both eventually consistent and strongly consistent reads can be used.

    • Query - Returns one or more items from a table or a secondary index. You must provide a specific value for the partition key. You can narrow the scope of the query using comparison operators against a sort key value, or on the index key. Query supports either eventual or strong consistency. A single response has a size limit of 1 MB.

    • Scan - Reads every item in a table; the result set is eventually consistent. You can limit the number of items returned by filtering the data attributes, using conditional expressions. Scan can be used to enable ad-hoc querying of a table against non-key attributes; however, since this is a full table scan without using an index, Scan should not be used for any application query use case that requires predictable performance.

    For conceptual information about reading data, see Working with Items and Query and Scan Operations in the Amazon DynamoDB Developer Guide.

    Modifying Data

    • PutItem - Creates a new item, or replaces an existing item with a new item (including all the attributes). By default, if an item in the table already exists with the same primary key, the new item completely replaces the existing item. You can use conditional operators to replace an item only if its attribute values match certain conditions, or to insert a new item only if that item doesn't already exist.

    • UpdateItem - Modifies the attributes of an existing item. You can also use conditional operators to perform an update only if the item's attribute values match certain conditions.

    • DeleteItem - Deletes an item in a table by primary key. You can use conditional operators to perform a delete an item only if the item's attribute values match certain conditions.

    • BatchWriteItem - Performs multiple PutItem and DeleteItem requests across multiple tables in a single request. A failure of any request(s) in the batch will not cause the entire BatchWriteItem operation to fail. Supports batches of up to 25 items to put or delete, with a maximum total request size of 16 MB.

    For conceptual information about modifying data, see Working with Items and Query and Scan Operations in the Amazon DynamoDB Developer Guide.

    ", + "operations": { + "BatchGetItem": "

    The BatchGetItem operation returns the attributes of one or more items from one or more tables. You identify requested items by primary key.

    A single operation can retrieve up to 16 MB of data, which can contain as many as 100 items. BatchGetItem will return a partial result if the response size limit is exceeded, the table's provisioned throughput is exceeded, or an internal processing failure occurs. If a partial result is returned, the operation returns a value for UnprocessedKeys. You can use this value to retry the operation starting with the next item to get.

    If you request more than 100 items BatchGetItem will return a ValidationException with the message \"Too many items requested for the BatchGetItem call\".

    For example, if you ask to retrieve 100 items, but each individual item is 300 KB in size, the system returns 52 items (so as not to exceed the 16 MB limit). It also returns an appropriate UnprocessedKeys value so you can get the next page of results. If desired, your application can include its own logic to assemble the pages of results into one data set.

    If none of the items can be processed due to insufficient provisioned throughput on all of the tables in the request, then BatchGetItem will return a ProvisionedThroughputExceededException. If at least one of the items is successfully processed, then BatchGetItem completes successfully, while returning the keys of the unread items in UnprocessedKeys.

    If DynamoDB returns any unprocessed items, you should retry the batch operation on those items. However, we strongly recommend that you use an exponential backoff algorithm. If you retry the batch operation immediately, the underlying read or write requests can still fail due to throttling on the individual tables. If you delay the batch operation using exponential backoff, the individual requests in the batch are much more likely to succeed.

    For more information, see Batch Operations and Error Handling in the Amazon DynamoDB Developer Guide.

    By default, BatchGetItem performs eventually consistent reads on every table in the request. If you want strongly consistent reads instead, you can set ConsistentRead to true for any or all tables.

    In order to minimize response latency, BatchGetItem retrieves items in parallel.

    When designing your application, keep in mind that DynamoDB does not return items in any particular order. To help parse the response by item, include the primary key values for the items in your request in the AttributesToGet parameter.

    If a requested item does not exist, it is not returned in the result. Requests for nonexistent items consume the minimum read capacity units according to the type of read. For more information, see Capacity Units Calculations in the Amazon DynamoDB Developer Guide.

    ", + "BatchWriteItem": "

    The BatchWriteItem operation puts or deletes multiple items in one or more tables. A single call to BatchWriteItem can write up to 16 MB of data, which can comprise as many as 25 put or delete requests. Individual items to be written can be as large as 400 KB.

    BatchWriteItem cannot update items. To update items, use the UpdateItem API.

    The individual PutItem and DeleteItem operations specified in BatchWriteItem are atomic; however BatchWriteItem as a whole is not. If any requested operations fail because the table's provisioned throughput is exceeded or an internal processing failure occurs, the failed operations are returned in the UnprocessedItems response parameter. You can investigate and optionally resend the requests. Typically, you would call BatchWriteItem in a loop. Each iteration would check for unprocessed items and submit a new BatchWriteItem request with those unprocessed items until all items have been processed.

    Note that if none of the items can be processed due to insufficient provisioned throughput on all of the tables in the request, then BatchWriteItem will return a ProvisionedThroughputExceededException.

    If DynamoDB returns any unprocessed items, you should retry the batch operation on those items. However, we strongly recommend that you use an exponential backoff algorithm. If you retry the batch operation immediately, the underlying read or write requests can still fail due to throttling on the individual tables. If you delay the batch operation using exponential backoff, the individual requests in the batch are much more likely to succeed.

    For more information, see Batch Operations and Error Handling in the Amazon DynamoDB Developer Guide.

    With BatchWriteItem, you can efficiently write or delete large amounts of data, such as from Amazon Elastic MapReduce (EMR), or copy data from another database into DynamoDB. In order to improve performance with these large-scale operations, BatchWriteItem does not behave in the same way as individual PutItem and DeleteItem calls would. For example, you cannot specify conditions on individual put and delete requests, and BatchWriteItem does not return deleted items in the response.

    If you use a programming language that supports concurrency, you can use threads to write items in parallel. Your application must include the necessary logic to manage the threads. With languages that don't support threading, you must update or delete the specified items one at a time. In both situations, BatchWriteItem provides an alternative where the API performs the specified put and delete operations in parallel, giving you the power of the thread pool approach without having to introduce complexity into your application.

    Parallel processing reduces latency, but each specified put and delete request consumes the same number of write capacity units whether it is processed in parallel or not. Delete operations on nonexistent items consume one write capacity unit.

    If one or more of the following is true, DynamoDB rejects the entire batch write operation:

    • One or more tables specified in the BatchWriteItem request does not exist.

    • Primary key attributes specified on an item in the request do not match those in the corresponding table's primary key schema.

    • You try to perform multiple operations on the same item in the same BatchWriteItem request. For example, you cannot put and delete the same item in the same BatchWriteItem request.

    • There are more than 25 requests in the batch.

    • Any individual item in a batch exceeds 400 KB.

    • The total request size exceeds 16 MB.

    ", + "CreateTable": "

    The CreateTable operation adds a new table to your account. In an AWS account, table names must be unique within each region. That is, you can have two tables with same name if you create the tables in different regions.

    CreateTable is an asynchronous operation. Upon receiving a CreateTable request, DynamoDB immediately returns a response with a TableStatus of CREATING. After the table is created, DynamoDB sets the TableStatus to ACTIVE. You can perform read and write operations only on an ACTIVE table.

    You can optionally define secondary indexes on the new table, as part of the CreateTable operation. If you want to create multiple tables with secondary indexes on them, you must create the tables sequentially. Only one table with secondary indexes can be in the CREATING state at any given time.

    You can use the DescribeTable API to check the table status.

    ", + "DeleteItem": "

    Deletes a single item in a table by primary key. You can perform a conditional delete operation that deletes the item if it exists, or if it has an expected attribute value.

    In addition to deleting an item, you can also return the item's attribute values in the same operation, using the ReturnValues parameter.

    Unless you specify conditions, the DeleteItem is an idempotent operation; running it multiple times on the same item or attribute does not result in an error response.

    Conditional deletes are useful for deleting items only if specific conditions are met. If those conditions are met, DynamoDB performs the delete. Otherwise, the item is not deleted.

    ", + "DeleteTable": "

    The DeleteTable operation deletes a table and all of its items. After a DeleteTable request, the specified table is in the DELETING state until DynamoDB completes the deletion. If the table is in the ACTIVE state, you can delete it. If a table is in CREATING or UPDATING states, then DynamoDB returns a ResourceInUseException. If the specified table does not exist, DynamoDB returns a ResourceNotFoundException. If table is already in the DELETING state, no error is returned.

    DynamoDB might continue to accept data read and write operations, such as GetItem and PutItem, on a table in the DELETING state until the table deletion is complete.

    When you delete a table, any indexes on that table are also deleted.

    If you have DynamoDB Streams enabled on the table, then the corresponding stream on that table goes into the DISABLED state, and the stream is automatically deleted after 24 hours.

    Use the DescribeTable API to check the status of the table.

    ", + "DescribeLimits": "

    Returns the current provisioned-capacity limits for your AWS account in a region, both for the region as a whole and for any one DynamoDB table that you create there.

    When you establish an AWS account, the account has initial limits on the maximum read capacity units and write capacity units that you can provision across all of your DynamoDB tables in a given region. Also, there are per-table limits that apply when you create a table there. For more information, see Limits page in the Amazon DynamoDB Developer Guide.

    Although you can increase these limits by filing a case at AWS Support Center, obtaining the increase is not instantaneous. The DescribeLimits API lets you write code to compare the capacity you are currently using to those limits imposed by your account so that you have enough time to apply for an increase before you hit a limit.

    For example, you could use one of the AWS SDKs to do the following:

    1. Call DescribeLimits for a particular region to obtain your current account limits on provisioned capacity there.

    2. Create a variable to hold the aggregate read capacity units provisioned for all your tables in that region, and one to hold the aggregate write capacity units. Zero them both.

    3. Call ListTables to obtain a list of all your DynamoDB tables.

    4. For each table name listed by ListTables, do the following:

      • Call DescribeTable with the table name.

      • Use the data returned by DescribeTable to add the read capacity units and write capacity units provisioned for the table itself to your variables.

      • If the table has one or more global secondary indexes (GSIs), loop over these GSIs and add their provisioned capacity values to your variables as well.

    5. Report the account limits for that region returned by DescribeLimits, along with the total current provisioned capacity levels you have calculated.

    This will let you see whether you are getting close to your account-level limits.

    The per-table limits apply only when you are creating a new table. They restrict the sum of the provisioned capacity of the new table itself and all its global secondary indexes.

    For existing tables and their GSIs, DynamoDB will not let you increase provisioned capacity extremely rapidly, but the only upper limit that applies is that the aggregate provisioned capacity over all your tables and GSIs cannot exceed either of the per-account limits.

    DescribeLimits should only be called periodically. You can expect throttling errors if you call it more than once in a minute.

    The DescribeLimits Request element has no content.

    ", + "DescribeTable": "

    Returns information about the table, including the current status of the table, when it was created, the primary key schema, and any indexes on the table.

    If you issue a DescribeTable request immediately after a CreateTable request, DynamoDB might return a ResourceNotFoundException. This is because DescribeTable uses an eventually consistent query, and the metadata for your table might not be available at that moment. Wait for a few seconds, and then try the DescribeTable request again.

    ", + "GetItem": "

    The GetItem operation returns a set of attributes for the item with the given primary key. If there is no matching item, GetItem does not return any data.

    GetItem provides an eventually consistent read by default. If your application requires a strongly consistent read, set ConsistentRead to true. Although a strongly consistent read might take more time than an eventually consistent read, it always returns the last updated value.

    ", + "ListTables": "

    Returns an array of table names associated with the current account and endpoint. The output from ListTables is paginated, with each page returning a maximum of 100 table names.

    ", + "PutItem": "

    Creates a new item, or replaces an old item with a new item. If an item that has the same primary key as the new item already exists in the specified table, the new item completely replaces the existing item. You can perform a conditional put operation (add a new item if one with the specified primary key doesn't exist), or replace an existing item if it has certain attribute values.

    In addition to putting an item, you can also return the item's attribute values in the same operation, using the ReturnValues parameter.

    When you add an item, the primary key attribute(s) are the only required attributes. Attribute values cannot be null. String and Binary type attributes must have lengths greater than zero. Set type attributes cannot be empty. Requests with empty values will be rejected with a ValidationException exception.

    You can request that PutItem return either a copy of the original item (before the update) or a copy of the updated item (after the update). For more information, see the ReturnValues description below.

    To prevent a new item from replacing an existing item, use a conditional expression that contains the attribute_not_exists function with the name of the attribute being used as the partition key for the table. Since every record must contain that attribute, the attribute_not_exists function will only succeed if no matching item exists.

    For more information about using this API, see Working with Items in the Amazon DynamoDB Developer Guide.

    ", + "Query": "

    A Query operation uses the primary key of a table or a secondary index to directly access items from that table or index.

    Use the KeyConditionExpression parameter to provide a specific value for the partition key. The Query operation will return all of the items from the table or index with that partition key value. You can optionally narrow the scope of the Query operation by specifying a sort key value and a comparison operator in KeyConditionExpression. You can use the ScanIndexForward parameter to get results in forward or reverse order, by sort key.

    Queries that do not return results consume the minimum number of read capacity units for that type of read operation.

    If the total number of items meeting the query criteria exceeds the result set size limit of 1 MB, the query stops and results are returned to the user with the LastEvaluatedKey element to continue the query in a subsequent operation. Unlike a Scan operation, a Query operation never returns both an empty result set and a LastEvaluatedKey value. LastEvaluatedKey is only provided if you have used the Limit parameter, or if the result set exceeds 1 MB (prior to applying a filter).

    You can query a table, a local secondary index, or a global secondary index. For a query on a table or on a local secondary index, you can set the ConsistentRead parameter to true and obtain a strongly consistent result. Global secondary indexes support eventually consistent reads only, so do not specify ConsistentRead when querying a global secondary index.

    ", + "Scan": "

    The Scan operation returns one or more items and item attributes by accessing every item in a table or a secondary index. To have DynamoDB return fewer items, you can provide a ScanFilter operation.

    If the total number of scanned items exceeds the maximum data set size limit of 1 MB, the scan stops and results are returned to the user as a LastEvaluatedKey value to continue the scan in a subsequent operation. The results also include the number of items exceeding the limit. A scan can result in no table data meeting the filter criteria.

    By default, Scan operations proceed sequentially; however, for faster performance on a large table or secondary index, applications can request a parallel Scan operation by providing the Segment and TotalSegments parameters. For more information, see Parallel Scan in the Amazon DynamoDB Developer Guide.

    By default, Scan uses eventually consistent reads when accessing the data in a table; therefore, the result set might not include the changes to data in the table immediately before the operation began. If you need a consistent copy of the data, as of the time that the Scan begins, you can set the ConsistentRead parameter to true.

    ", + "UpdateItem": "

    Edits an existing item's attributes, or adds a new item to the table if it does not already exist. You can put, delete, or add attribute values. You can also perform a conditional update on an existing item (insert a new attribute name-value pair if it doesn't exist, or replace an existing name-value pair if it has certain expected attribute values).

    You can also return the item's attribute values in the same UpdateItem operation using the ReturnValues parameter.

    ", + "UpdateTable": "

    Modifies the provisioned throughput settings, global secondary indexes, or DynamoDB Streams settings for a given table.

    You can only perform one of the following operations at once:

    • Modify the provisioned throughput settings of the table.

    • Enable or disable Streams on the table.

    • Remove a global secondary index from the table.

    • Create a new global secondary index on the table. Once the index begins backfilling, you can use UpdateTable to perform other operations.

    UpdateTable is an asynchronous operation; while it is executing, the table status changes from ACTIVE to UPDATING. While it is UPDATING, you cannot issue another UpdateTable request. When the table returns to the ACTIVE state, the UpdateTable operation is complete.

    " + }, + "shapes": { + "AttributeAction": { + "base": null, + "refs": { + "AttributeValueUpdate$Action": "

    Specifies how to perform the update. Valid values are PUT (default), DELETE, and ADD. The behavior depends on whether the specified primary key already exists in the table.

    If an item with the specified Key is found in the table:

    • PUT - Adds the specified attribute to the item. If the attribute already exists, it is replaced by the new value.

    • DELETE - If no value is specified, the attribute and its value are removed from the item. The data type of the specified value must match the existing value's data type.

      If a set of values is specified, then those values are subtracted from the old set. For example, if the attribute value was the set [a,b,c] and the DELETE action specified [a,c], then the final attribute value would be [b]. Specifying an empty set is an error.

    • ADD - If the attribute does not already exist, then the attribute and its values are added to the item. If the attribute does exist, then the behavior of ADD depends on the data type of the attribute:

      • If the existing attribute is a number, and if Value is also a number, then the Value is mathematically added to the existing attribute. If Value is a negative number, then it is subtracted from the existing attribute.

        If you use ADD to increment or decrement a number value for an item that doesn't exist before the update, DynamoDB uses 0 as the initial value.

        In addition, if you use ADD to update an existing item, and intend to increment or decrement an attribute value which does not yet exist, DynamoDB uses 0 as the initial value. For example, suppose that the item you want to update does not yet have an attribute named itemcount, but you decide to ADD the number 3 to this attribute anyway, even though it currently does not exist. DynamoDB will create the itemcount attribute, set its initial value to 0, and finally add 3 to it. The result will be a new itemcount attribute in the item, with a value of 3.

      • If the existing data type is a set, and if the Value is also a set, then the Value is added to the existing set. (This is a set operation, not mathematical addition.) For example, if the attribute value was the set [1,2], and the ADD action specified [3], then the final attribute value would be [1,2,3]. An error occurs if an Add action is specified for a set attribute and the attribute type specified does not match the existing set type.

        Both sets must have the same primitive data type. For example, if the existing data type is a set of strings, the Value must also be a set of strings. The same holds true for number sets and binary sets.

      This action is only valid for an existing attribute whose data type is number or is a set. Do not use ADD for any other data types.

    If no item with the specified Key is found:

    • PUT - DynamoDB creates a new item with the specified primary key, and then adds the attribute.

    • DELETE - Nothing happens; there is no attribute to delete.

    • ADD - DynamoDB creates an item with the supplied primary key and number (or set of numbers) for the attribute value. The only data types allowed are number and number set; no other data types can be specified.

    " + } + }, + "AttributeDefinition": { + "base": "

    Represents an attribute for describing the key schema for the table and indexes.

    ", + "refs": { + "AttributeDefinitions$member": null + } + }, + "AttributeDefinitions": { + "base": null, + "refs": { + "CreateTableInput$AttributeDefinitions": "

    An array of attributes that describe the key schema for the table and indexes.

    ", + "TableDescription$AttributeDefinitions": "

    An array of AttributeDefinition objects. Each of these objects describes one attribute in the table and index key schema.

    Each AttributeDefinition object in this array is composed of:

    • AttributeName - The name of the attribute.

    • AttributeType - The data type for the attribute.

    ", + "UpdateTableInput$AttributeDefinitions": "

    An array of attributes that describe the key schema for the table and indexes. If you are adding a new global secondary index to the table, AttributeDefinitions must include the key element(s) of the new index.

    " + } + }, + "AttributeMap": { + "base": null, + "refs": { + "DeleteItemOutput$Attributes": "

    A map of attribute names to AttributeValue objects, representing the item as it appeared before the DeleteItem operation. This map appears in the response only if ReturnValues was specified as ALL_OLD in the request.

    ", + "GetItemOutput$Item": "

    A map of attribute names to AttributeValue objects, as specified by AttributesToGet.

    ", + "ItemList$member": null, + "PutItemOutput$Attributes": "

    The attribute values as they appeared before the PutItem operation, but only if ReturnValues is specified as ALL_OLD in the request. Each element consists of an attribute name and an attribute value.

    ", + "UpdateItemOutput$Attributes": "

    A map of attribute values as they appeared before the UpdateItem operation. This map only appears if ReturnValues was specified as something other than NONE in the request. Each element represents one attribute.

    " + } + }, + "AttributeName": { + "base": null, + "refs": { + "AttributeMap$key": null, + "AttributeNameList$member": null, + "AttributeUpdates$key": null, + "ExpectedAttributeMap$key": null, + "ExpressionAttributeNameMap$value": null, + "FilterConditionMap$key": null, + "ItemCollectionKeyAttributeMap$key": null, + "Key$key": null, + "KeyConditions$key": null, + "MapAttributeValue$key": null, + "PutItemInputAttributeMap$key": null + } + }, + "AttributeNameList": { + "base": null, + "refs": { + "GetItemInput$AttributesToGet": "

    This is a legacy parameter, for backward compatibility. New applications should use ProjectionExpression instead. Do not combine legacy parameters and expression parameters in a single API call; otherwise, DynamoDB will return a ValidationException exception.

    This parameter allows you to retrieve attributes of type List or Map; however, it cannot retrieve individual elements within a List or a Map.

    The names of one or more attributes to retrieve. If no attribute names are provided, then all attributes will be returned. If any of the requested attributes are not found, they will not appear in the result.

    Note that AttributesToGet has no effect on provisioned throughput consumption. DynamoDB determines capacity units consumed based on item size, not on the amount of data that is returned to an application.

    ", + "KeysAndAttributes$AttributesToGet": "

    One or more attributes to retrieve from the table or index. If no attribute names are specified then all attributes will be returned. If any of the specified attributes are not found, they will not appear in the result.

    ", + "QueryInput$AttributesToGet": "

    This is a legacy parameter, for backward compatibility. New applications should use ProjectionExpression instead. Do not combine legacy parameters and expression parameters in a single API call; otherwise, DynamoDB will return a ValidationException exception.

    This parameter allows you to retrieve attributes of type List or Map; however, it cannot retrieve individual elements within a List or a Map.

    The names of one or more attributes to retrieve. If no attribute names are provided, then all attributes will be returned. If any of the requested attributes are not found, they will not appear in the result.

    Note that AttributesToGet has no effect on provisioned throughput consumption. DynamoDB determines capacity units consumed based on item size, not on the amount of data that is returned to an application.

    You cannot use both AttributesToGet and Select together in a Query request, unless the value for Select is SPECIFIC_ATTRIBUTES. (This usage is equivalent to specifying AttributesToGet without any value for Select.)

    If you query a local secondary index and request only attributes that are projected into that index, the operation will read only the index and not the table. If any of the requested attributes are not projected into the local secondary index, DynamoDB will fetch each of these attributes from the parent table. This extra fetching incurs additional throughput cost and latency.

    If you query a global secondary index, you can only request attributes that are projected into the index. Global secondary index queries cannot fetch attributes from the parent table.

    ", + "ScanInput$AttributesToGet": "

    This is a legacy parameter, for backward compatibility. New applications should use ProjectionExpression instead. Do not combine legacy parameters and expression parameters in a single API call; otherwise, DynamoDB will return a ValidationException exception.

    This parameter allows you to retrieve attributes of type List or Map; however, it cannot retrieve individual elements within a List or a Map.

    The names of one or more attributes to retrieve. If no attribute names are provided, then all attributes will be returned. If any of the requested attributes are not found, they will not appear in the result.

    Note that AttributesToGet has no effect on provisioned throughput consumption. DynamoDB determines capacity units consumed based on item size, not on the amount of data that is returned to an application.

    " + } + }, + "AttributeUpdates": { + "base": null, + "refs": { + "UpdateItemInput$AttributeUpdates": "

    This is a legacy parameter, for backward compatibility. New applications should use UpdateExpression instead. Do not combine legacy parameters and expression parameters in a single API call; otherwise, DynamoDB will return a ValidationException exception.

    This parameter can be used for modifying top-level attributes; however, it does not support individual list or map elements.

    The names of attributes to be modified, the action to perform on each, and the new value for each. If you are updating an attribute that is an index key attribute for any indexes on that table, the attribute type must match the index key type defined in the AttributesDefinition of the table description. You can use UpdateItem to update any non-key attributes.

    Attribute values cannot be null. String and Binary type attributes must have lengths greater than zero. Set type attributes must not be empty. Requests with empty values will be rejected with a ValidationException exception.

    Each AttributeUpdates element consists of an attribute name to modify, along with the following:

    • Value - The new value, if applicable, for this attribute.

    • Action - A value that specifies how to perform the update. This action is only valid for an existing attribute whose data type is Number or is a set; do not use ADD for other data types.

      If an item with the specified primary key is found in the table, the following values perform the following actions:

      • PUT - Adds the specified attribute to the item. If the attribute already exists, it is replaced by the new value.

      • DELETE - Removes the attribute and its value, if no value is specified for DELETE. The data type of the specified value must match the existing value's data type.

        If a set of values is specified, then those values are subtracted from the old set. For example, if the attribute value was the set [a,b,c] and the DELETE action specifies [a,c], then the final attribute value is [b]. Specifying an empty set is an error.

      • ADD - Adds the specified value to the item, if the attribute does not already exist. If the attribute does exist, then the behavior of ADD depends on the data type of the attribute:

        • If the existing attribute is a number, and if Value is also a number, then Value is mathematically added to the existing attribute. If Value is a negative number, then it is subtracted from the existing attribute.

          If you use ADD to increment or decrement a number value for an item that doesn't exist before the update, DynamoDB uses 0 as the initial value.

          Similarly, if you use ADD for an existing item to increment or decrement an attribute value that doesn't exist before the update, DynamoDB uses 0 as the initial value. For example, suppose that the item you want to update doesn't have an attribute named itemcount, but you decide to ADD the number 3 to this attribute anyway. DynamoDB will create the itemcount attribute, set its initial value to 0, and finally add 3 to it. The result will be a new itemcount attribute, with a value of 3.

        • If the existing data type is a set, and if Value is also a set, then Value is appended to the existing set. For example, if the attribute value is the set [1,2], and the ADD action specified [3], then the final attribute value is [1,2,3]. An error occurs if an ADD action is specified for a set attribute and the attribute type specified does not match the existing set type.

          Both sets must have the same primitive data type. For example, if the existing data type is a set of strings, Value must also be a set of strings.

      If no item with the specified key is found in the table, the following values perform the following actions:

      • PUT - Causes DynamoDB to create a new item with the specified primary key, and then adds the attribute.

      • DELETE - Nothing happens, because attributes cannot be deleted from a nonexistent item. The operation succeeds, but DynamoDB does not create a new item.

      • ADD - Causes DynamoDB to create an item with the supplied primary key and number (or set of numbers) for the attribute value. The only data types allowed are Number and Number Set.

    If you provide any attributes that are part of an index key, then the data types for those attributes must match those of the schema in the table's attribute definition.

    " + } + }, + "AttributeValue": { + "base": "

    Represents the data for an attribute. You can set one, and only one, of the elements.

    Each attribute in an item is a name-value pair. An attribute can be single-valued or multi-valued set. For example, a book item can have title and authors attributes. Each book has one title but can have many authors. The multi-valued attribute is a set; duplicate values are not allowed.

    ", + "refs": { + "AttributeMap$value": null, + "AttributeValueList$member": null, + "AttributeValueUpdate$Value": null, + "ExpectedAttributeValue$Value": null, + "ExpressionAttributeValueMap$value": null, + "ItemCollectionKeyAttributeMap$value": null, + "Key$value": null, + "ListAttributeValue$member": null, + "MapAttributeValue$value": null, + "PutItemInputAttributeMap$value": null + } + }, + "AttributeValueList": { + "base": null, + "refs": { + "Condition$AttributeValueList": "

    One or more values to evaluate against the supplied attribute. The number of values in the list depends on the ComparisonOperator being used.

    For type Number, value comparisons are numeric.

    String value comparisons for greater than, equals, or less than are based on ASCII character code values. For example, a is greater than A, and a is greater than B. For a list of code values, see http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters.

    For Binary, DynamoDB treats each byte of the binary data as unsigned when it compares binary values.

    ", + "ExpectedAttributeValue$AttributeValueList": "

    One or more values to evaluate against the supplied attribute. The number of values in the list depends on the ComparisonOperator being used.

    For type Number, value comparisons are numeric.

    String value comparisons for greater than, equals, or less than are based on ASCII character code values. For example, a is greater than A, and a is greater than B. For a list of code values, see http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters.

    For Binary, DynamoDB treats each byte of the binary data as unsigned when it compares binary values.

    For information on specifying data types in JSON, see JSON Data Format in the Amazon DynamoDB Developer Guide.

    " + } + }, + "AttributeValueUpdate": { + "base": "

    For the UpdateItem operation, represents the attributes to be modified, the action to perform on each, and the new value for each.

    You cannot use UpdateItem to update any primary key attributes. Instead, you will need to delete the item, and then use PutItem to create a new item with new attributes.

    Attribute values cannot be null; string and binary type attributes must have lengths greater than zero; and set type attributes must not be empty. Requests with empty values will be rejected with a ValidationException exception.

    ", + "refs": { + "AttributeUpdates$value": null + } + }, + "Backfilling": { + "base": null, + "refs": { + "GlobalSecondaryIndexDescription$Backfilling": "

    Indicates whether the index is currently backfilling. Backfilling is the process of reading items from the table and determining whether they can be added to the index. (Not all items will qualify: For example, a partition key cannot have any duplicate values.) If an item can be added to the index, DynamoDB will do so. After all items have been processed, the backfilling operation is complete and Backfilling is false.

    For indexes that were created during a CreateTable operation, the Backfilling attribute does not appear in the DescribeTable output.

    " + } + }, + "BatchGetItemInput": { + "base": "

    Represents the input of a BatchGetItem operation.

    ", + "refs": { + } + }, + "BatchGetItemOutput": { + "base": "

    Represents the output of a BatchGetItem operation.

    ", + "refs": { + } + }, + "BatchGetRequestMap": { + "base": null, + "refs": { + "BatchGetItemInput$RequestItems": "

    A map of one or more table names and, for each table, a map that describes one or more items to retrieve from that table. Each table name can be used only once per BatchGetItem request.

    Each element in the map of items to retrieve consists of the following:

    • ConsistentRead - If true, a strongly consistent read is used; if false (the default), an eventually consistent read is used.

    • ExpressionAttributeNames - One or more substitution tokens for attribute names in the ProjectionExpression parameter. The following are some use cases for using ExpressionAttributeNames:

      • To access an attribute whose name conflicts with a DynamoDB reserved word.

      • To create a placeholder for repeating occurrences of an attribute name in an expression.

      • To prevent special characters in an attribute name from being misinterpreted in an expression.

      Use the # character in an expression to dereference an attribute name. For example, consider the following attribute name:

      • Percentile

      The name of this attribute conflicts with a reserved word, so it cannot be used directly in an expression. (For the complete list of reserved words, see Reserved Words in the Amazon DynamoDB Developer Guide). To work around this, you could specify the following for ExpressionAttributeNames:

      • {\"#P\":\"Percentile\"}

      You could then use this substitution in an expression, as in this example:

      • #P = :val

      Tokens that begin with the : character are expression attribute values, which are placeholders for the actual value at runtime.

      For more information on expression attribute names, see Accessing Item Attributes in the Amazon DynamoDB Developer Guide.

    • Keys - An array of primary key attribute values that define specific items in the table. For each primary key, you must provide all of the key attributes. For example, with a simple primary key, you only need to provide the partition key value. For a composite key, you must provide both the partition key value and the sort key value.

    • ProjectionExpression - A string that identifies one or more attributes to retrieve from the table. These attributes can include scalars, sets, or elements of a JSON document. The attributes in the expression must be separated by commas.

      If no attribute names are specified, then all attributes will be returned. If any of the requested attributes are not found, they will not appear in the result.

      For more information, see Accessing Item Attributes in the Amazon DynamoDB Developer Guide.

    • AttributesToGet -

      This is a legacy parameter, for backward compatibility. New applications should use ProjectionExpression instead. Do not combine legacy parameters and expression parameters in a single API call; otherwise, DynamoDB will return a ValidationException exception.

      This parameter allows you to retrieve attributes of type List or Map; however, it cannot retrieve individual elements within a List or a Map.

      The names of one or more attributes to retrieve. If no attribute names are provided, then all attributes will be returned. If any of the requested attributes are not found, they will not appear in the result.

      Note that AttributesToGet has no effect on provisioned throughput consumption. DynamoDB determines capacity units consumed based on item size, not on the amount of data that is returned to an application.

    ", + "BatchGetItemOutput$UnprocessedKeys": "

    A map of tables and their respective keys that were not processed with the current response. The UnprocessedKeys value is in the same form as RequestItems, so the value can be provided directly to a subsequent BatchGetItem operation. For more information, see RequestItems in the Request Parameters section.

    Each element consists of:

    • Keys - An array of primary key attribute values that define specific items in the table.

    • AttributesToGet - One or more attributes to be retrieved from the table or index. By default, all attributes are returned. If a requested attribute is not found, it does not appear in the result.

    • ConsistentRead - The consistency of a read operation. If set to true, then a strongly consistent read is used; otherwise, an eventually consistent read is used.

    If there are no unprocessed keys remaining, the response contains an empty UnprocessedKeys map.

    " + } + }, + "BatchGetResponseMap": { + "base": null, + "refs": { + "BatchGetItemOutput$Responses": "

    A map of table name to a list of items. Each object in Responses consists of a table name, along with a map of attribute data consisting of the data type and attribute value.

    " + } + }, + "BatchWriteItemInput": { + "base": "

    Represents the input of a BatchWriteItem operation.

    ", + "refs": { + } + }, + "BatchWriteItemOutput": { + "base": "

    Represents the output of a BatchWriteItem operation.

    ", + "refs": { + } + }, + "BatchWriteItemRequestMap": { + "base": null, + "refs": { + "BatchWriteItemInput$RequestItems": "

    A map of one or more table names and, for each table, a list of operations to be performed (DeleteRequest or PutRequest). Each element in the map consists of the following:

    • DeleteRequest - Perform a DeleteItem operation on the specified item. The item to be deleted is identified by a Key subelement:

      • Key - A map of primary key attribute values that uniquely identify the ! item. Each entry in this map consists of an attribute name and an attribute value. For each primary key, you must provide all of the key attributes. For example, with a simple primary key, you only need to provide a value for the partition key. For a composite primary key, you must provide values for both the partition key and the sort key.

    • PutRequest - Perform a PutItem operation on the specified item. The item to be put is identified by an Item subelement:

      • Item - A map of attributes and their values. Each entry in this map consists of an attribute name and an attribute value. Attribute values must not be null; string and binary type attributes must have lengths greater than zero; and set type attributes must not be empty. Requests that contain empty values will be rejected with a ValidationException exception.

        If you specify any attributes that are part of an index key, then the data types for those attributes must match those of the schema in the table's attribute definition.

    ", + "BatchWriteItemOutput$UnprocessedItems": "

    A map of tables and requests against those tables that were not processed. The UnprocessedItems value is in the same form as RequestItems, so you can provide this value directly to a subsequent BatchGetItem operation. For more information, see RequestItems in the Request Parameters section.

    Each UnprocessedItems entry consists of a table name and, for that table, a list of operations to perform (DeleteRequest or PutRequest).

    • DeleteRequest - Perform a DeleteItem operation on the specified item. The item to be deleted is identified by a Key subelement:

      • Key - A map of primary key attribute values that uniquely identify the item. Each entry in this map consists of an attribute name and an attribute value.

    • PutRequest - Perform a PutItem operation on the specified item. The item to be put is identified by an Item subelement:

      • Item - A map of attributes and their values. Each entry in this map consists of an attribute name and an attribute value. Attribute values must not be null; string and binary type attributes must have lengths greater than zero; and set type attributes must not be empty. Requests that contain empty values will be rejected with a ValidationException exception.

        If you specify any attributes that are part of an index key, then the data types for those attributes must match those of the schema in the table's attribute definition.

    If there are no unprocessed items remaining, the response contains an empty UnprocessedItems map.

    " + } + }, + "BinaryAttributeValue": { + "base": null, + "refs": { + "AttributeValue$B": "

    A Binary data type.

    ", + "BinarySetAttributeValue$member": null + } + }, + "BinarySetAttributeValue": { + "base": null, + "refs": { + "AttributeValue$BS": "

    A Binary Set data type.

    " + } + }, + "BooleanAttributeValue": { + "base": null, + "refs": { + "AttributeValue$BOOL": "

    A Boolean data type.

    " + } + }, + "BooleanObject": { + "base": null, + "refs": { + "ExpectedAttributeValue$Exists": "

    Causes DynamoDB to evaluate the value before attempting a conditional operation:

    • If Exists is true, DynamoDB will check to see if that attribute value already exists in the table. If it is found, then the operation succeeds. If it is not found, the operation fails with a ConditionalCheckFailedException.

    • If Exists is false, DynamoDB assumes that the attribute value does not exist in the table. If in fact the value does not exist, then the assumption is valid and the operation succeeds. If the value is found, despite the assumption that it does not exist, the operation fails with a ConditionalCheckFailedException.

    The default setting for Exists is true. If you supply a Value all by itself, DynamoDB assumes the attribute exists: You don't have to set Exists to true, because it is implied.

    DynamoDB returns a ValidationException if:

    • Exists is true but there is no Value to check. (You expect a value to exist, but don't specify what that value is.)

    • Exists is false but you also provide a Value. (You cannot expect an attribute to have a value, while also expecting it not to exist.)

    ", + "QueryInput$ScanIndexForward": "

    Specifies the order for index traversal: If true (default), the traversal is performed in ascending order; if false, the traversal is performed in descending order.

    Items with the same partition key value are stored in sorted order by sort key. If the sort key data type is Number, the results are stored in numeric order. For type String, the results are stored in order of ASCII character code values. For type Binary, DynamoDB treats each byte of the binary data as unsigned.

    If ScanIndexForward is true, DynamoDB returns the results in the order in which they are stored (by sort key value). This is the default behavior. If ScanIndexForward is false, DynamoDB reads the results in reverse order by sort key value, and then returns the results to the client.

    " + } + }, + "Capacity": { + "base": "

    Represents the amount of provisioned throughput capacity consumed on a table or an index.

    ", + "refs": { + "ConsumedCapacity$Table": "

    The amount of throughput consumed on the table affected by the operation.

    ", + "SecondaryIndexesCapacityMap$value": null + } + }, + "ComparisonOperator": { + "base": null, + "refs": { + "Condition$ComparisonOperator": "

    A comparator for evaluating attributes. For example, equals, greater than, less than, etc.

    The following comparison operators are available:

    EQ | NE | LE | LT | GE | GT | NOT_NULL | NULL | CONTAINS | NOT_CONTAINS | BEGINS_WITH | IN | BETWEEN

    The following are descriptions of each comparison operator.

    • EQ : Equal. EQ is supported for all datatypes, including lists and maps.

      AttributeValueList can contain only one AttributeValue element of type String, Number, Binary, String Set, Number Set, or Binary Set. If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {\"S\":\"6\"} does not equal {\"N\":\"6\"}. Also, {\"N\":\"6\"} does not equal {\"NS\":[\"6\", \"2\", \"1\"]}.

    • NE : Not equal. NE is supported for all datatypes, including lists and maps.

      AttributeValueList can contain only one AttributeValue of type String, Number, Binary, String Set, Number Set, or Binary Set. If an item contains an AttributeValue of a different type than the one provided in the request, the value does not match. For example, {\"S\":\"6\"} does not equal {\"N\":\"6\"}. Also, {\"N\":\"6\"} does not equal {\"NS\":[\"6\", \"2\", \"1\"]}.

    • LE : Less than or equal.

      AttributeValueList can contain only one AttributeValue element of type String, Number, or Binary (not a set type). If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {\"S\":\"6\"} does not equal {\"N\":\"6\"}. Also, {\"N\":\"6\"} does not compare to {\"NS\":[\"6\", \"2\", \"1\"]}.

    • LT : Less than.

      AttributeValueList can contain only one AttributeValue of type String, Number, or Binary (not a set type). If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {\"S\":\"6\"} does not equal {\"N\":\"6\"}. Also, {\"N\":\"6\"} does not compare to {\"NS\":[\"6\", \"2\", \"1\"]}.

    • GE : Greater than or equal.

      AttributeValueList can contain only one AttributeValue element of type String, Number, or Binary (not a set type). If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {\"S\":\"6\"} does not equal {\"N\":\"6\"}. Also, {\"N\":\"6\"} does not compare to {\"NS\":[\"6\", \"2\", \"1\"]}.

    • GT : Greater than.

      AttributeValueList can contain only one AttributeValue element of type String, Number, or Binary (not a set type). If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {\"S\":\"6\"} does not equal {\"N\":\"6\"}. Also, {\"N\":\"6\"} does not compare to {\"NS\":[\"6\", \"2\", \"1\"]}.

    • NOT_NULL : The attribute exists. NOT_NULL is supported for all datatypes, including lists and maps.

      This operator tests for the existence of an attribute, not its data type. If the data type of attribute \"a\" is null, and you evaluate it using NOT_NULL, the result is a Boolean true. This result is because the attribute \"a\" exists; its data type is not relevant to the NOT_NULL comparison operator.

    • NULL : The attribute does not exist. NULL is supported for all datatypes, including lists and maps.

      This operator tests for the nonexistence of an attribute, not its data type. If the data type of attribute \"a\" is null, and you evaluate it using NULL, the result is a Boolean false. This is because the attribute \"a\" exists; its data type is not relevant to the NULL comparison operator.

    • CONTAINS : Checks for a subsequence, or value in a set.

      AttributeValueList can contain only one AttributeValue element of type String, Number, or Binary (not a set type). If the target attribute of the comparison is of type String, then the operator checks for a substring match. If the target attribute of the comparison is of type Binary, then the operator looks for a subsequence of the target that matches the input. If the target attribute of the comparison is a set (\"SS\", \"NS\", or \"BS\"), then the operator evaluates to true if it finds an exact match with any member of the set.

      CONTAINS is supported for lists: When evaluating \"a CONTAINS b\", \"a\" can be a list; however, \"b\" cannot be a set, a map, or a list.

    • NOT_CONTAINS : Checks for absence of a subsequence, or absence of a value in a set.

      AttributeValueList can contain only one AttributeValue element of type String, Number, or Binary (not a set type). If the target attribute of the comparison is a String, then the operator checks for the absence of a substring match. If the target attribute of the comparison is Binary, then the operator checks for the absence of a subsequence of the target that matches the input. If the target attribute of the comparison is a set (\"SS\", \"NS\", or \"BS\"), then the operator evaluates to true if it does not find an exact match with any member of the set.

      NOT_CONTAINS is supported for lists: When evaluating \"a NOT CONTAINS b\", \"a\" can be a list; however, \"b\" cannot be a set, a map, or a list.

    • BEGINS_WITH : Checks for a prefix.

      AttributeValueList can contain only one AttributeValue of type String or Binary (not a Number or a set type). The target attribute of the comparison must be of type String or Binary (not a Number or a set type).

    • IN : Checks for matching elements within two sets.

      AttributeValueList can contain one or more AttributeValue elements of type String, Number, or Binary (not a set type). These attributes are compared against an existing set type attribute of an item. If any elements of the input set are present in the item attribute, the expression evaluates to true.

    • BETWEEN : Greater than or equal to the first value, and less than or equal to the second value.

      AttributeValueList must contain two AttributeValue elements of the same type, either String, Number, or Binary (not a set type). A target attribute matches if the target value is greater than, or equal to, the first element and less than, or equal to, the second element. If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {\"S\":\"6\"} does not compare to {\"N\":\"6\"}. Also, {\"N\":\"6\"} does not compare to {\"NS\":[\"6\", \"2\", \"1\"]}

    For usage examples of AttributeValueList and ComparisonOperator, see Legacy Conditional Parameters in the Amazon DynamoDB Developer Guide.

    ", + "ExpectedAttributeValue$ComparisonOperator": "

    A comparator for evaluating attributes in the AttributeValueList. For example, equals, greater than, less than, etc.

    The following comparison operators are available:

    EQ | NE | LE | LT | GE | GT | NOT_NULL | NULL | CONTAINS | NOT_CONTAINS | BEGINS_WITH | IN | BETWEEN

    The following are descriptions of each comparison operator.

    • EQ : Equal. EQ is supported for all datatypes, including lists and maps.

      AttributeValueList can contain only one AttributeValue element of type String, Number, Binary, String Set, Number Set, or Binary Set. If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {\"S\":\"6\"} does not equal {\"N\":\"6\"}. Also, {\"N\":\"6\"} does not equal {\"NS\":[\"6\", \"2\", \"1\"]}.

    • NE : Not equal. NE is supported for all datatypes, including lists and maps.

      AttributeValueList can contain only one AttributeValue of type String, Number, Binary, String Set, Number Set, or Binary Set. If an item contains an AttributeValue of a different type than the one provided in the request, the value does not match. For example, {\"S\":\"6\"} does not equal {\"N\":\"6\"}. Also, {\"N\":\"6\"} does not equal {\"NS\":[\"6\", \"2\", \"1\"]}.

    • LE : Less than or equal.

      AttributeValueList can contain only one AttributeValue element of type String, Number, or Binary (not a set type). If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {\"S\":\"6\"} does not equal {\"N\":\"6\"}. Also, {\"N\":\"6\"} does not compare to {\"NS\":[\"6\", \"2\", \"1\"]}.

    • LT : Less than.

      AttributeValueList can contain only one AttributeValue of type String, Number, or Binary (not a set type). If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {\"S\":\"6\"} does not equal {\"N\":\"6\"}. Also, {\"N\":\"6\"} does not compare to {\"NS\":[\"6\", \"2\", \"1\"]}.

    • GE : Greater than or equal.

      AttributeValueList can contain only one AttributeValue element of type String, Number, or Binary (not a set type). If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {\"S\":\"6\"} does not equal {\"N\":\"6\"}. Also, {\"N\":\"6\"} does not compare to {\"NS\":[\"6\", \"2\", \"1\"]}.

    • GT : Greater than.

      AttributeValueList can contain only one AttributeValue element of type String, Number, or Binary (not a set type). If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {\"S\":\"6\"} does not equal {\"N\":\"6\"}. Also, {\"N\":\"6\"} does not compare to {\"NS\":[\"6\", \"2\", \"1\"]}.

    • NOT_NULL : The attribute exists. NOT_NULL is supported for all datatypes, including lists and maps.

      This operator tests for the existence of an attribute, not its data type. If the data type of attribute \"a\" is null, and you evaluate it using NOT_NULL, the result is a Boolean true. This result is because the attribute \"a\" exists; its data type is not relevant to the NOT_NULL comparison operator.

    • NULL : The attribute does not exist. NULL is supported for all datatypes, including lists and maps.

      This operator tests for the nonexistence of an attribute, not its data type. If the data type of attribute \"a\" is null, and you evaluate it using NULL, the result is a Boolean false. This is because the attribute \"a\" exists; its data type is not relevant to the NULL comparison operator.

    • CONTAINS : Checks for a subsequence, or value in a set.

      AttributeValueList can contain only one AttributeValue element of type String, Number, or Binary (not a set type). If the target attribute of the comparison is of type String, then the operator checks for a substring match. If the target attribute of the comparison is of type Binary, then the operator looks for a subsequence of the target that matches the input. If the target attribute of the comparison is a set (\"SS\", \"NS\", or \"BS\"), then the operator evaluates to true if it finds an exact match with any member of the set.

      CONTAINS is supported for lists: When evaluating \"a CONTAINS b\", \"a\" can be a list; however, \"b\" cannot be a set, a map, or a list.

    • NOT_CONTAINS : Checks for absence of a subsequence, or absence of a value in a set.

      AttributeValueList can contain only one AttributeValue element of type String, Number, or Binary (not a set type). If the target attribute of the comparison is a String, then the operator checks for the absence of a substring match. If the target attribute of the comparison is Binary, then the operator checks for the absence of a subsequence of the target that matches the input. If the target attribute of the comparison is a set (\"SS\", \"NS\", or \"BS\"), then the operator evaluates to true if it does not find an exact match with any member of the set.

      NOT_CONTAINS is supported for lists: When evaluating \"a NOT CONTAINS b\", \"a\" can be a list; however, \"b\" cannot be a set, a map, or a list.

    • BEGINS_WITH : Checks for a prefix.

      AttributeValueList can contain only one AttributeValue of type String or Binary (not a Number or a set type). The target attribute of the comparison must be of type String or Binary (not a Number or a set type).

    • IN : Checks for matching elements within two sets.

      AttributeValueList can contain one or more AttributeValue elements of type String, Number, or Binary (not a set type). These attributes are compared against an existing set type attribute of an item. If any elements of the input set are present in the item attribute, the expression evaluates to true.

    • BETWEEN : Greater than or equal to the first value, and less than or equal to the second value.

      AttributeValueList must contain two AttributeValue elements of the same type, either String, Number, or Binary (not a set type). A target attribute matches if the target value is greater than, or equal to, the first element and less than, or equal to, the second element. If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {\"S\":\"6\"} does not compare to {\"N\":\"6\"}. Also, {\"N\":\"6\"} does not compare to {\"NS\":[\"6\", \"2\", \"1\"]}

    " + } + }, + "Condition": { + "base": "

    Represents the selection criteria for a Query or Scan operation:

    • For a Query operation, Condition is used for specifying the KeyConditions to use when querying a table or an index. For KeyConditions, only the following comparison operators are supported:

      EQ | LE | LT | GE | GT | BEGINS_WITH | BETWEEN

      Condition is also used in a QueryFilter, which evaluates the query results and returns only the desired values.

    • For a Scan operation, Condition is used in a ScanFilter, which evaluates the scan results and returns only the desired values.

    ", + "refs": { + "FilterConditionMap$value": null, + "KeyConditions$value": null + } + }, + "ConditionExpression": { + "base": null, + "refs": { + "DeleteItemInput$ConditionExpression": "

    A condition that must be satisfied in order for a conditional DeleteItem to succeed.

    An expression can contain any of the following:

    • Functions: attribute_exists | attribute_not_exists | attribute_type | contains | begins_with | size

      These function names are case-sensitive.

    • Comparison operators: = | &#x3C;&#x3E; | &#x3C; | &#x3E; | &#x3C;= | &#x3E;= | BETWEEN | IN

    • Logical operators: AND | OR | NOT

    For more information on condition expressions, see Specifying Conditions in the Amazon DynamoDB Developer Guide.

    ConditionExpression replaces the legacy ConditionalOperator and Expected parameters.

    ", + "PutItemInput$ConditionExpression": "

    A condition that must be satisfied in order for a conditional PutItem operation to succeed.

    An expression can contain any of the following:

    • Functions: attribute_exists | attribute_not_exists | attribute_type | contains | begins_with | size

      These function names are case-sensitive.

    • Comparison operators: = | &#x3C;&#x3E; | &#x3C; | &#x3E; | &#x3C;= | &#x3E;= | BETWEEN | IN

    • Logical operators: AND | OR | NOT

    For more information on condition expressions, see Specifying Conditions in the Amazon DynamoDB Developer Guide.

    ConditionExpression replaces the legacy ConditionalOperator and Expected parameters.

    ", + "QueryInput$FilterExpression": "

    A string that contains conditions that DynamoDB applies after the Query operation, but before the data is returned to you. Items that do not satisfy the FilterExpression criteria are not returned.

    A FilterExpression is applied after the items have already been read; the process of filtering does not consume any additional read capacity units.

    For more information, see Filter Expressions in the Amazon DynamoDB Developer Guide.

    FilterExpression replaces the legacy QueryFilter and ConditionalOperator parameters.

    ", + "ScanInput$FilterExpression": "

    A string that contains conditions that DynamoDB applies after the Scan operation, but before the data is returned to you. Items that do not satisfy the FilterExpression criteria are not returned.

    A FilterExpression is applied after the items have already been read; the process of filtering does not consume any additional read capacity units.

    For more information, see Filter Expressions in the Amazon DynamoDB Developer Guide.

    FilterExpression replaces the legacy ScanFilter and ConditionalOperator parameters.

    ", + "UpdateItemInput$ConditionExpression": "

    A condition that must be satisfied in order for a conditional update to succeed.

    An expression can contain any of the following:

    • Functions: attribute_exists | attribute_not_exists | attribute_type | contains | begins_with | size

      These function names are case-sensitive.

    • Comparison operators: = | &#x3C;&#x3E; | &#x3C; | &#x3E; | &#x3C;= | &#x3E;= | BETWEEN | IN

    • Logical operators: AND | OR | NOT

    For more information on condition expressions, see Specifying Conditions in the Amazon DynamoDB Developer Guide.

    ConditionExpression replaces the legacy ConditionalOperator and Expected parameters.

    " + } + }, + "ConditionalCheckFailedException": { + "base": "

    A condition specified in the operation could not be evaluated.

    ", + "refs": { + } + }, + "ConditionalOperator": { + "base": null, + "refs": { + "DeleteItemInput$ConditionalOperator": "

    This is a legacy parameter, for backward compatibility. New applications should use ConditionExpression instead. Do not combine legacy parameters and expression parameters in a single API call; otherwise, DynamoDB will return a ValidationException exception.

    A logical operator to apply to the conditions in the Expected map:

    • AND - If all of the conditions evaluate to true, then the entire map evaluates to true.

    • OR - If at least one of the conditions evaluate to true, then the entire map evaluates to true.

    If you omit ConditionalOperator, then AND is the default.

    The operation will succeed only if the entire map evaluates to true.

    This parameter does not support attributes of type List or Map.

    ", + "PutItemInput$ConditionalOperator": "

    This is a legacy parameter, for backward compatibility. New applications should use ConditionExpression instead. Do not combine legacy parameters and expression parameters in a single API call; otherwise, DynamoDB will return a ValidationException exception.

    A logical operator to apply to the conditions in the Expected map:

    • AND - If all of the conditions evaluate to true, then the entire map evaluates to true.

    • OR - If at least one of the conditions evaluate to true, then the entire map evaluates to true.

    If you omit ConditionalOperator, then AND is the default.

    The operation will succeed only if the entire map evaluates to true.

    This parameter does not support attributes of type List or Map.

    ", + "QueryInput$ConditionalOperator": "

    This is a legacy parameter, for backward compatibility. New applications should use FilterExpression instead. Do not combine legacy parameters and expression parameters in a single API call; otherwise, DynamoDB will return a ValidationException exception.

    A logical operator to apply to the conditions in a QueryFilter map:

    • AND - If all of the conditions evaluate to true, then the entire map evaluates to true.

    • OR - If at least one of the conditions evaluate to true, then the entire map evaluates to true.

    If you omit ConditionalOperator, then AND is the default.

    The operation will succeed only if the entire map evaluates to true.

    This parameter does not support attributes of type List or Map.

    ", + "ScanInput$ConditionalOperator": "

    This is a legacy parameter, for backward compatibility. New applications should use FilterExpression instead. Do not combine legacy parameters and expression parameters in a single API call; otherwise, DynamoDB will return a ValidationException exception.

    A logical operator to apply to the conditions in a ScanFilter map:

    • AND - If all of the conditions evaluate to true, then the entire map evaluates to true.

    • OR - If at least one of the conditions evaluate to true, then the entire map evaluates to true.

    If you omit ConditionalOperator, then AND is the default.

    The operation will succeed only if the entire map evaluates to true.

    This parameter does not support attributes of type List or Map.

    ", + "UpdateItemInput$ConditionalOperator": "

    This is a legacy parameter, for backward compatibility. New applications should use ConditionExpression instead. Do not combine legacy parameters and expression parameters in a single API call; otherwise, DynamoDB will return a ValidationException exception.

    A logical operator to apply to the conditions in the Expected map:

    • AND - If all of the conditions evaluate to true, then the entire map evaluates to true.

    • OR - If at least one of the conditions evaluate to true, then the entire map evaluates to true.

    If you omit ConditionalOperator, then AND is the default.

    The operation will succeed only if the entire map evaluates to true.

    This parameter does not support attributes of type List or Map.

    " + } + }, + "ConsistentRead": { + "base": null, + "refs": { + "GetItemInput$ConsistentRead": "

    Determines the read consistency model: If set to true, then the operation uses strongly consistent reads; otherwise, the operation uses eventually consistent reads.

    ", + "KeysAndAttributes$ConsistentRead": "

    The consistency of a read operation. If set to true, then a strongly consistent read is used; otherwise, an eventually consistent read is used.

    ", + "QueryInput$ConsistentRead": "

    Determines the read consistency model: If set to true, then the operation uses strongly consistent reads; otherwise, the operation uses eventually consistent reads.

    Strongly consistent reads are not supported on global secondary indexes. If you query a global secondary index with ConsistentRead set to true, you will receive a ValidationException.

    ", + "ScanInput$ConsistentRead": "

    A Boolean value that determines the read consistency model during the scan:

    • If ConsistentRead is false, then the data returned from Scan might not contain the results from other recently completed write operations (PutItem, UpdateItem or DeleteItem).

    • If ConsistentRead is true, then all of the write operations that completed before the Scan began are guaranteed to be contained in the Scan response.

    The default setting for ConsistentRead is false.

    The ConsistentRead parameter is not supported on global secondary indexes. If you scan a global secondary index with ConsistentRead set to true, you will receive a ValidationException.

    " + } + }, + "ConsumedCapacity": { + "base": "

    The capacity units consumed by an operation. The data returned includes the total provisioned throughput consumed, along with statistics for the table and any indexes involved in the operation. ConsumedCapacity is only returned if the request asked for it. For more information, see Provisioned Throughput in the Amazon DynamoDB Developer Guide.

    ", + "refs": { + "ConsumedCapacityMultiple$member": null, + "DeleteItemOutput$ConsumedCapacity": null, + "GetItemOutput$ConsumedCapacity": null, + "PutItemOutput$ConsumedCapacity": null, + "QueryOutput$ConsumedCapacity": null, + "ScanOutput$ConsumedCapacity": null, + "UpdateItemOutput$ConsumedCapacity": null + } + }, + "ConsumedCapacityMultiple": { + "base": null, + "refs": { + "BatchGetItemOutput$ConsumedCapacity": "

    The read capacity units consumed by the operation.

    Each element consists of:

    • TableName - The table that consumed the provisioned throughput.

    • CapacityUnits - The total number of capacity units consumed.

    ", + "BatchWriteItemOutput$ConsumedCapacity": "

    The capacity units consumed by the operation.

    Each element consists of:

    • TableName - The table that consumed the provisioned throughput.

    • CapacityUnits - The total number of capacity units consumed.

    " + } + }, + "ConsumedCapacityUnits": { + "base": null, + "refs": { + "Capacity$CapacityUnits": "

    The total number of capacity units consumed on a table or an index.

    ", + "ConsumedCapacity$CapacityUnits": "

    The total number of capacity units consumed by the operation.

    " + } + }, + "CreateGlobalSecondaryIndexAction": { + "base": "

    Represents a new global secondary index to be added to an existing table.

    ", + "refs": { + "GlobalSecondaryIndexUpdate$Create": "

    The parameters required for creating a global secondary index on an existing table:

    • IndexName

    • KeySchema

    • AttributeDefinitions

    • Projection

    • ProvisionedThroughput

    " + } + }, + "CreateTableInput": { + "base": "

    Represents the input of a CreateTable operation.

    ", + "refs": { + } + }, + "CreateTableOutput": { + "base": "

    Represents the output of a CreateTable operation.

    ", + "refs": { + } + }, + "Date": { + "base": null, + "refs": { + "ProvisionedThroughputDescription$LastIncreaseDateTime": "

    The date and time of the last provisioned throughput increase for this table.

    ", + "ProvisionedThroughputDescription$LastDecreaseDateTime": "

    The date and time of the last provisioned throughput decrease for this table.

    ", + "TableDescription$CreationDateTime": "

    The date and time when the table was created, in UNIX epoch time format.

    " + } + }, + "DeleteGlobalSecondaryIndexAction": { + "base": "

    Represents a global secondary index to be deleted from an existing table.

    ", + "refs": { + "GlobalSecondaryIndexUpdate$Delete": "

    The name of an existing global secondary index to be removed.

    " + } + }, + "DeleteItemInput": { + "base": "

    Represents the input of a DeleteItem operation.

    ", + "refs": { + } + }, + "DeleteItemOutput": { + "base": "

    Represents the output of a DeleteItem operation.

    ", + "refs": { + } + }, + "DeleteRequest": { + "base": "

    Represents a request to perform a DeleteItem operation on an item.

    ", + "refs": { + "WriteRequest$DeleteRequest": "

    A request to perform a DeleteItem operation.

    " + } + }, + "DeleteTableInput": { + "base": "

    Represents the input of a DeleteTable operation.

    ", + "refs": { + } + }, + "DeleteTableOutput": { + "base": "

    Represents the output of a DeleteTable operation.

    ", + "refs": { + } + }, + "DescribeLimitsInput": { + "base": "

    Represents the input of a DescribeLimits operation. Has no content.

    ", + "refs": { + } + }, + "DescribeLimitsOutput": { + "base": "

    Represents the output of a DescribeLimits operation.

    ", + "refs": { + } + }, + "DescribeTableInput": { + "base": "

    Represents the input of a DescribeTable operation.

    ", + "refs": { + } + }, + "DescribeTableOutput": { + "base": "

    Represents the output of a DescribeTable operation.

    ", + "refs": { + } + }, + "ErrorMessage": { + "base": null, + "refs": { + "ConditionalCheckFailedException$message": "

    The conditional request failed.

    ", + "InternalServerError$message": "

    The server encountered an internal error trying to fulfill the request.

    ", + "ItemCollectionSizeLimitExceededException$message": "

    The total size of an item collection has exceeded the maximum limit of 10 gigabytes.

    ", + "LimitExceededException$message": "

    Too many operations for a given subscriber.

    ", + "ProvisionedThroughputExceededException$message": "

    You exceeded your maximum allowed provisioned throughput.

    ", + "ResourceInUseException$message": "

    The resource which is being attempted to be changed is in use.

    ", + "ResourceNotFoundException$message": "

    The resource which is being requested does not exist.

    " + } + }, + "ExpectedAttributeMap": { + "base": null, + "refs": { + "DeleteItemInput$Expected": "

    This is a legacy parameter, for backward compatibility. New applications should use ConditionExpression instead. Do not combine legacy parameters and expression parameters in a single API call; otherwise, DynamoDB will return a ValidationException exception.

    A map of attribute/condition pairs. Expected provides a conditional block for the DeleteItem operation.

    Each element of Expected consists of an attribute name, a comparison operator, and one or more values. DynamoDB compares the attribute with the value(s) you supplied, using the comparison operator. For each Expected element, the result of the evaluation is either true or false.

    If you specify more than one element in the Expected map, then by default all of the conditions must evaluate to true. In other words, the conditions are ANDed together. (You can use the ConditionalOperator parameter to OR the conditions instead. If you do this, then at least one of the conditions must evaluate to true, rather than all of them.)

    If the Expected map evaluates to true, then the conditional operation succeeds; otherwise, it fails.

    Expected contains the following:

    • AttributeValueList - One or more values to evaluate against the supplied attribute. The number of values in the list depends on the ComparisonOperator being used.

      For type Number, value comparisons are numeric.

      String value comparisons for greater than, equals, or less than are based on ASCII character code values. For example, a is greater than A, and a is greater than B. For a list of code values, see http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters.

      For type Binary, DynamoDB treats each byte of the binary data as unsigned when it compares binary values.

    • ComparisonOperator - A comparator for evaluating attributes in the AttributeValueList. When performing the comparison, DynamoDB uses strongly consistent reads.

      The following comparison operators are available:

      EQ | NE | LE | LT | GE | GT | NOT_NULL | NULL | CONTAINS | NOT_CONTAINS | BEGINS_WITH | IN | BETWEEN

      The following are descriptions of each comparison operator.

      • EQ : Equal. EQ is supported for all datatypes, including lists and maps.

        AttributeValueList can contain only one AttributeValue element of type String, Number, Binary, String Set, Number Set, or Binary Set. If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {\"S\":\"6\"} does not equal {\"N\":\"6\"}. Also, {\"N\":\"6\"} does not equal {\"NS\":[\"6\", \"2\", \"1\"]}.

      • NE : Not equal. NE is supported for all datatypes, including lists and maps.

        AttributeValueList can contain only one AttributeValue of type String, Number, Binary, String Set, Number Set, or Binary Set. If an item contains an AttributeValue of a different type than the one provided in the request, the value does not match. For example, {\"S\":\"6\"} does not equal {\"N\":\"6\"}. Also, {\"N\":\"6\"} does not equal {\"NS\":[\"6\", \"2\", \"1\"]}.

      • LE : Less than or equal.

        AttributeValueList can contain only one AttributeValue element of type String, Number, or Binary (not a set type). If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {\"S\":\"6\"} does not equal {\"N\":\"6\"}. Also, {\"N\":\"6\"} does not compare to {\"NS\":[\"6\", \"2\", \"1\"]}.

      • LT : Less than.

        AttributeValueList can contain only one AttributeValue of type String, Number, or Binary (not a set type). If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {\"S\":\"6\"} does not equal {\"N\":\"6\"}. Also, {\"N\":\"6\"} does not compare to {\"NS\":[\"6\", \"2\", \"1\"]}.

      • GE : Greater than or equal.

        AttributeValueList can contain only one AttributeValue element of type String, Number, or Binary (not a set type). If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {\"S\":\"6\"} does not equal {\"N\":\"6\"}. Also, {\"N\":\"6\"} does not compare to {\"NS\":[\"6\", \"2\", \"1\"]}.

      • GT : Greater than.

        AttributeValueList can contain only one AttributeValue element of type String, Number, or Binary (not a set type). If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {\"S\":\"6\"} does not equal {\"N\":\"6\"}. Also, {\"N\":\"6\"} does not compare to {\"NS\":[\"6\", \"2\", \"1\"]}.

      • NOT_NULL : The attribute exists. NOT_NULL is supported for all datatypes, including lists and maps.

        This operator tests for the existence of an attribute, not its data type. If the data type of attribute \"a\" is null, and you evaluate it using NOT_NULL, the result is a Boolean true. This result is because the attribute \"a\" exists; its data type is not relevant to the NOT_NULL comparison operator.

      • NULL : The attribute does not exist. NULL is supported for all datatypes, including lists and maps.

        This operator tests for the nonexistence of an attribute, not its data type. If the data type of attribute \"a\" is null, and you evaluate it using NULL, the result is a Boolean false. This is because the attribute \"a\" exists; its data type is not relevant to the NULL comparison operator.

      • CONTAINS : Checks for a subsequence, or value in a set.

        AttributeValueList can contain only one AttributeValue element of type String, Number, or Binary (not a set type). If the target attribute of the comparison is of type String, then the operator checks for a substring match. If the target attribute of the comparison is of type Binary, then the operator looks for a subsequence of the target that matches the input. If the target attribute of the comparison is a set (\"SS\", \"NS\", or \"BS\"), then the operator evaluates to true if it finds an exact match with any member of the set.

        CONTAINS is supported for lists: When evaluating \"a CONTAINS b\", \"a\" can be a list; however, \"b\" cannot be a set, a map, or a list.

      • NOT_CONTAINS : Checks for absence of a subsequence, or absence of a value in a set.

        AttributeValueList can contain only one AttributeValue element of type String, Number, or Binary (not a set type). If the target attribute of the comparison is a String, then the operator checks for the absence of a substring match. If the target attribute of the comparison is Binary, then the operator checks for the absence of a subsequence of the target that matches the input. If the target attribute of the comparison is a set (\"SS\", \"NS\", or \"BS\"), then the operator evaluates to true if it does not find an exact match with any member of the set.

        NOT_CONTAINS is supported for lists: When evaluating \"a NOT CONTAINS b\", \"a\" can be a list; however, \"b\" cannot be a set, a map, or a list.

      • BEGINS_WITH : Checks for a prefix.

        AttributeValueList can contain only one AttributeValue of type String or Binary (not a Number or a set type). The target attribute of the comparison must be of type String or Binary (not a Number or a set type).

      • IN : Checks for matching elements within two sets.

        AttributeValueList can contain one or more AttributeValue elements of type String, Number, or Binary (not a set type). These attributes are compared against an existing set type attribute of an item. If any elements of the input set are present in the item attribute, the expression evaluates to true.

      • BETWEEN : Greater than or equal to the first value, and less than or equal to the second value.

        AttributeValueList must contain two AttributeValue elements of the same type, either String, Number, or Binary (not a set type). A target attribute matches if the target value is greater than, or equal to, the first element and less than, or equal to, the second element. If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {\"S\":\"6\"} does not compare to {\"N\":\"6\"}. Also, {\"N\":\"6\"} does not compare to {\"NS\":[\"6\", \"2\", \"1\"]}

    For usage examples of AttributeValueList and ComparisonOperator, see Legacy Conditional Parameters in the Amazon DynamoDB Developer Guide.

    For backward compatibility with previous DynamoDB releases, the following parameters can be used instead of AttributeValueList and ComparisonOperator:

    • Value - A value for DynamoDB to compare with an attribute.

    • Exists - A Boolean value that causes DynamoDB to evaluate the value before attempting the conditional operation:

      • If Exists is true, DynamoDB will check to see if that attribute value already exists in the table. If it is found, then the condition evaluates to true; otherwise the condition evaluate to false.

      • If Exists is false, DynamoDB assumes that the attribute value does not exist in the table. If in fact the value does not exist, then the assumption is valid and the condition evaluates to true. If the value is found, despite the assumption that it does not exist, the condition evaluates to false.

      Note that the default value for Exists is true.

    The Value and Exists parameters are incompatible with AttributeValueList and ComparisonOperator. Note that if you use both sets of parameters at once, DynamoDB will return a ValidationException exception.

    This parameter does not support attributes of type List or Map.

    ", + "PutItemInput$Expected": "

    This is a legacy parameter, for backward compatibility. New applications should use ConditionExpression instead. Do not combine legacy parameters and expression parameters in a single API call; otherwise, DynamoDB will return a ValidationException exception.

    A map of attribute/condition pairs. Expected provides a conditional block for the PutItem operation.

    This parameter does not support attributes of type List or Map.

    Each element of Expected consists of an attribute name, a comparison operator, and one or more values. DynamoDB compares the attribute with the value(s) you supplied, using the comparison operator. For each Expected element, the result of the evaluation is either true or false.

    If you specify more than one element in the Expected map, then by default all of the conditions must evaluate to true. In other words, the conditions are ANDed together. (You can use the ConditionalOperator parameter to OR the conditions instead. If you do this, then at least one of the conditions must evaluate to true, rather than all of them.)

    If the Expected map evaluates to true, then the conditional operation succeeds; otherwise, it fails.

    Expected contains the following:

    • AttributeValueList - One or more values to evaluate against the supplied attribute. The number of values in the list depends on the ComparisonOperator being used.

      For type Number, value comparisons are numeric.

      String value comparisons for greater than, equals, or less than are based on ASCII character code values. For example, a is greater than A, and a is greater than B. For a list of code values, see http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters.

      For type Binary, DynamoDB treats each byte of the binary data as unsigned when it compares binary values.

    • ComparisonOperator - A comparator for evaluating attributes in the AttributeValueList. When performing the comparison, DynamoDB uses strongly consistent reads.

      The following comparison operators are available:

      EQ | NE | LE | LT | GE | GT | NOT_NULL | NULL | CONTAINS | NOT_CONTAINS | BEGINS_WITH | IN | BETWEEN

      The following are descriptions of each comparison operator.

      • EQ : Equal. EQ is supported for all datatypes, including lists and maps.

        AttributeValueList can contain only one AttributeValue element of type String, Number, Binary, String Set, Number Set, or Binary Set. If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {\"S\":\"6\"} does not equal {\"N\":\"6\"}. Also, {\"N\":\"6\"} does not equal {\"NS\":[\"6\", \"2\", \"1\"]}.

      • NE : Not equal. NE is supported for all datatypes, including lists and maps.

        AttributeValueList can contain only one AttributeValue of type String, Number, Binary, String Set, Number Set, or Binary Set. If an item contains an AttributeValue of a different type than the one provided in the request, the value does not match. For example, {\"S\":\"6\"} does not equal {\"N\":\"6\"}. Also, {\"N\":\"6\"} does not equal {\"NS\":[\"6\", \"2\", \"1\"]}.

      • LE : Less than or equal.

        AttributeValueList can contain only one AttributeValue element of type String, Number, or Binary (not a set type). If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {\"S\":\"6\"} does not equal {\"N\":\"6\"}. Also, {\"N\":\"6\"} does not compare to {\"NS\":[\"6\", \"2\", \"1\"]}.

      • LT : Less than.

        AttributeValueList can contain only one AttributeValue of type String, Number, or Binary (not a set type). If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {\"S\":\"6\"} does not equal {\"N\":\"6\"}. Also, {\"N\":\"6\"} does not compare to {\"NS\":[\"6\", \"2\", \"1\"]}.

      • GE : Greater than or equal.

        AttributeValueList can contain only one AttributeValue element of type String, Number, or Binary (not a set type). If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {\"S\":\"6\"} does not equal {\"N\":\"6\"}. Also, {\"N\":\"6\"} does not compare to {\"NS\":[\"6\", \"2\", \"1\"]}.

      • GT : Greater than.

        AttributeValueList can contain only one AttributeValue element of type String, Number, or Binary (not a set type). If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {\"S\":\"6\"} does not equal {\"N\":\"6\"}. Also, {\"N\":\"6\"} does not compare to {\"NS\":[\"6\", \"2\", \"1\"]}.

      • NOT_NULL : The attribute exists. NOT_NULL is supported for all datatypes, including lists and maps.

        This operator tests for the existence of an attribute, not its data type. If the data type of attribute \"a\" is null, and you evaluate it using NOT_NULL, the result is a Boolean true. This result is because the attribute \"a\" exists; its data type is not relevant to the NOT_NULL comparison operator.

      • NULL : The attribute does not exist. NULL is supported for all datatypes, including lists and maps.

        This operator tests for the nonexistence of an attribute, not its data type. If the data type of attribute \"a\" is null, and you evaluate it using NULL, the result is a Boolean false. This is because the attribute \"a\" exists; its data type is not relevant to the NULL comparison operator.

      • CONTAINS : Checks for a subsequence, or value in a set.

        AttributeValueList can contain only one AttributeValue element of type String, Number, or Binary (not a set type). If the target attribute of the comparison is of type String, then the operator checks for a substring match. If the target attribute of the comparison is of type Binary, then the operator looks for a subsequence of the target that matches the input. If the target attribute of the comparison is a set (\"SS\", \"NS\", or \"BS\"), then the operator evaluates to true if it finds an exact match with any member of the set.

        CONTAINS is supported for lists: When evaluating \"a CONTAINS b\", \"a\" can be a list; however, \"b\" cannot be a set, a map, or a list.

      • NOT_CONTAINS : Checks for absence of a subsequence, or absence of a value in a set.

        AttributeValueList can contain only one AttributeValue element of type String, Number, or Binary (not a set type). If the target attribute of the comparison is a String, then the operator checks for the absence of a substring match. If the target attribute of the comparison is Binary, then the operator checks for the absence of a subsequence of the target that matches the input. If the target attribute of the comparison is a set (\"SS\", \"NS\", or \"BS\"), then the operator evaluates to true if it does not find an exact match with any member of the set.

        NOT_CONTAINS is supported for lists: When evaluating \"a NOT CONTAINS b\", \"a\" can be a list; however, \"b\" cannot be a set, a map, or a list.

      • BEGINS_WITH : Checks for a prefix.

        AttributeValueList can contain only one AttributeValue of type String or Binary (not a Number or a set type). The target attribute of the comparison must be of type String or Binary (not a Number or a set type).

      • IN : Checks for matching elements within two sets.

        AttributeValueList can contain one or more AttributeValue elements of type String, Number, or Binary (not a set type). These attributes are compared against an existing set type attribute of an item. If any elements of the input set are present in the item attribute, the expression evaluates to true.

      • BETWEEN : Greater than or equal to the first value, and less than or equal to the second value.

        AttributeValueList must contain two AttributeValue elements of the same type, either String, Number, or Binary (not a set type). A target attribute matches if the target value is greater than, or equal to, the first element and less than, or equal to, the second element. If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {\"S\":\"6\"} does not compare to {\"N\":\"6\"}. Also, {\"N\":\"6\"} does not compare to {\"NS\":[\"6\", \"2\", \"1\"]}

    For usage examples of AttributeValueList and ComparisonOperator, see Legacy Conditional Parameters in the Amazon DynamoDB Developer Guide.

    For backward compatibility with previous DynamoDB releases, the following parameters can be used instead of AttributeValueList and ComparisonOperator:

    • Value - A value for DynamoDB to compare with an attribute.

    • Exists - A Boolean value that causes DynamoDB to evaluate the value before attempting the conditional operation:

      • If Exists is true, DynamoDB will check to see if that attribute value already exists in the table. If it is found, then the condition evaluates to true; otherwise the condition evaluate to false.

      • If Exists is false, DynamoDB assumes that the attribute value does not exist in the table. If in fact the value does not exist, then the assumption is valid and the condition evaluates to true. If the value is found, despite the assumption that it does not exist, the condition evaluates to false.

      Note that the default value for Exists is true.

    The Value and Exists parameters are incompatible with AttributeValueList and ComparisonOperator. Note that if you use both sets of parameters at once, DynamoDB will return a ValidationException exception.

    ", + "UpdateItemInput$Expected": "

    This is a legacy parameter, for backward compatibility. New applications should use ConditionExpression instead. Do not combine legacy parameters and expression parameters in a single API call; otherwise, DynamoDB will return a ValidationException exception.

    A map of attribute/condition pairs. Expected provides a conditional block for the UpdateItem operation.

    Each element of Expected consists of an attribute name, a comparison operator, and one or more values. DynamoDB compares the attribute with the value(s) you supplied, using the comparison operator. For each Expected element, the result of the evaluation is either true or false.

    If you specify more than one element in the Expected map, then by default all of the conditions must evaluate to true. In other words, the conditions are ANDed together. (You can use the ConditionalOperator parameter to OR the conditions instead. If you do this, then at least one of the conditions must evaluate to true, rather than all of them.)

    If the Expected map evaluates to true, then the conditional operation succeeds; otherwise, it fails.

    Expected contains the following:

    • AttributeValueList - One or more values to evaluate against the supplied attribute. The number of values in the list depends on the ComparisonOperator being used.

      For type Number, value comparisons are numeric.

      String value comparisons for greater than, equals, or less than are based on ASCII character code values. For example, a is greater than A, and a is greater than B. For a list of code values, see http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters.

      For type Binary, DynamoDB treats each byte of the binary data as unsigned when it compares binary values.

    • ComparisonOperator - A comparator for evaluating attributes in the AttributeValueList. When performing the comparison, DynamoDB uses strongly consistent reads.

      The following comparison operators are available:

      EQ | NE | LE | LT | GE | GT | NOT_NULL | NULL | CONTAINS | NOT_CONTAINS | BEGINS_WITH | IN | BETWEEN

      The following are descriptions of each comparison operator.

      • EQ : Equal. EQ is supported for all datatypes, including lists and maps.

        AttributeValueList can contain only one AttributeValue element of type String, Number, Binary, String Set, Number Set, or Binary Set. If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {\"S\":\"6\"} does not equal {\"N\":\"6\"}. Also, {\"N\":\"6\"} does not equal {\"NS\":[\"6\", \"2\", \"1\"]}.

      • NE : Not equal. NE is supported for all datatypes, including lists and maps.

        AttributeValueList can contain only one AttributeValue of type String, Number, Binary, String Set, Number Set, or Binary Set. If an item contains an AttributeValue of a different type than the one provided in the request, the value does not match. For example, {\"S\":\"6\"} does not equal {\"N\":\"6\"}. Also, {\"N\":\"6\"} does not equal {\"NS\":[\"6\", \"2\", \"1\"]}.

      • LE : Less than or equal.

        AttributeValueList can contain only one AttributeValue element of type String, Number, or Binary (not a set type). If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {\"S\":\"6\"} does not equal {\"N\":\"6\"}. Also, {\"N\":\"6\"} does not compare to {\"NS\":[\"6\", \"2\", \"1\"]}.

      • LT : Less than.

        AttributeValueList can contain only one AttributeValue of type String, Number, or Binary (not a set type). If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {\"S\":\"6\"} does not equal {\"N\":\"6\"}. Also, {\"N\":\"6\"} does not compare to {\"NS\":[\"6\", \"2\", \"1\"]}.

      • GE : Greater than or equal.

        AttributeValueList can contain only one AttributeValue element of type String, Number, or Binary (not a set type). If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {\"S\":\"6\"} does not equal {\"N\":\"6\"}. Also, {\"N\":\"6\"} does not compare to {\"NS\":[\"6\", \"2\", \"1\"]}.

      • GT : Greater than.

        AttributeValueList can contain only one AttributeValue element of type String, Number, or Binary (not a set type). If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {\"S\":\"6\"} does not equal {\"N\":\"6\"}. Also, {\"N\":\"6\"} does not compare to {\"NS\":[\"6\", \"2\", \"1\"]}.

      • NOT_NULL : The attribute exists. NOT_NULL is supported for all datatypes, including lists and maps.

        This operator tests for the existence of an attribute, not its data type. If the data type of attribute \"a\" is null, and you evaluate it using NOT_NULL, the result is a Boolean true. This result is because the attribute \"a\" exists; its data type is not relevant to the NOT_NULL comparison operator.

      • NULL : The attribute does not exist. NULL is supported for all datatypes, including lists and maps.

        This operator tests for the nonexistence of an attribute, not its data type. If the data type of attribute \"a\" is null, and you evaluate it using NULL, the result is a Boolean false. This is because the attribute \"a\" exists; its data type is not relevant to the NULL comparison operator.

      • CONTAINS : Checks for a subsequence, or value in a set.

        AttributeValueList can contain only one AttributeValue element of type String, Number, or Binary (not a set type). If the target attribute of the comparison is of type String, then the operator checks for a substring match. If the target attribute of the comparison is of type Binary, then the operator looks for a subsequence of the target that matches the input. If the target attribute of the comparison is a set (\"SS\", \"NS\", or \"BS\"), then the operator evaluates to true if it finds an exact match with any member of the set.

        CONTAINS is supported for lists: When evaluating \"a CONTAINS b\", \"a\" can be a list; however, \"b\" cannot be a set, a map, or a list.

      • NOT_CONTAINS : Checks for absence of a subsequence, or absence of a value in a set.

        AttributeValueList can contain only one AttributeValue element of type String, Number, or Binary (not a set type). If the target attribute of the comparison is a String, then the operator checks for the absence of a substring match. If the target attribute of the comparison is Binary, then the operator checks for the absence of a subsequence of the target that matches the input. If the target attribute of the comparison is a set (\"SS\", \"NS\", or \"BS\"), then the operator evaluates to true if it does not find an exact match with any member of the set.

        NOT_CONTAINS is supported for lists: When evaluating \"a NOT CONTAINS b\", \"a\" can be a list; however, \"b\" cannot be a set, a map, or a list.

      • BEGINS_WITH : Checks for a prefix.

        AttributeValueList can contain only one AttributeValue of type String or Binary (not a Number or a set type). The target attribute of the comparison must be of type String or Binary (not a Number or a set type).

      • IN : Checks for matching elements within two sets.

        AttributeValueList can contain one or more AttributeValue elements of type String, Number, or Binary (not a set type). These attributes are compared against an existing set type attribute of an item. If any elements of the input set are present in the item attribute, the expression evaluates to true.

      • BETWEEN : Greater than or equal to the first value, and less than or equal to the second value.

        AttributeValueList must contain two AttributeValue elements of the same type, either String, Number, or Binary (not a set type). A target attribute matches if the target value is greater than, or equal to, the first element and less than, or equal to, the second element. If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {\"S\":\"6\"} does not compare to {\"N\":\"6\"}. Also, {\"N\":\"6\"} does not compare to {\"NS\":[\"6\", \"2\", \"1\"]}

    For usage examples of AttributeValueList and ComparisonOperator, see Legacy Conditional Parameters in the Amazon DynamoDB Developer Guide.

    For backward compatibility with previous DynamoDB releases, the following parameters can be used instead of AttributeValueList and ComparisonOperator:

    • Value - A value for DynamoDB to compare with an attribute.

    • Exists - A Boolean value that causes DynamoDB to evaluate the value before attempting the conditional operation:

      • If Exists is true, DynamoDB will check to see if that attribute value already exists in the table. If it is found, then the condition evaluates to true; otherwise the condition evaluate to false.

      • If Exists is false, DynamoDB assumes that the attribute value does not exist in the table. If in fact the value does not exist, then the assumption is valid and the condition evaluates to true. If the value is found, despite the assumption that it does not exist, the condition evaluates to false.

      Note that the default value for Exists is true.

    The Value and Exists parameters are incompatible with AttributeValueList and ComparisonOperator. Note that if you use both sets of parameters at once, DynamoDB will return a ValidationException exception.

    This parameter does not support attributes of type List or Map.

    " + } + }, + "ExpectedAttributeValue": { + "base": "

    Represents a condition to be compared with an attribute value. This condition can be used with DeleteItem, PutItem or UpdateItem operations; if the comparison evaluates to true, the operation succeeds; if not, the operation fails. You can use ExpectedAttributeValue in one of two different ways:

    • Use AttributeValueList to specify one or more values to compare against an attribute. Use ComparisonOperator to specify how you want to perform the comparison. If the comparison evaluates to true, then the conditional operation succeeds.

    • Use Value to specify a value that DynamoDB will compare against an attribute. If the values match, then ExpectedAttributeValue evaluates to true and the conditional operation succeeds. Optionally, you can also set Exists to false, indicating that you do not expect to find the attribute value in the table. In this case, the conditional operation succeeds only if the comparison evaluates to false.

    Value and Exists are incompatible with AttributeValueList and ComparisonOperator. Note that if you use both sets of parameters at once, DynamoDB will return a ValidationException exception.

    ", + "refs": { + "ExpectedAttributeMap$value": null + } + }, + "ExpressionAttributeNameMap": { + "base": null, + "refs": { + "DeleteItemInput$ExpressionAttributeNames": "

    One or more substitution tokens for attribute names in an expression. The following are some use cases for using ExpressionAttributeNames:

    • To access an attribute whose name conflicts with a DynamoDB reserved word.

    • To create a placeholder for repeating occurrences of an attribute name in an expression.

    • To prevent special characters in an attribute name from being misinterpreted in an expression.

    Use the # character in an expression to dereference an attribute name. For example, consider the following attribute name:

    • Percentile

    The name of this attribute conflicts with a reserved word, so it cannot be used directly in an expression. (For the complete list of reserved words, see Reserved Words in the Amazon DynamoDB Developer Guide). To work around this, you could specify the following for ExpressionAttributeNames:

    • {\"#P\":\"Percentile\"}

    You could then use this substitution in an expression, as in this example:

    • #P = :val

    Tokens that begin with the : character are expression attribute values, which are placeholders for the actual value at runtime.

    For more information on expression attribute names, see Accessing Item Attributes in the Amazon DynamoDB Developer Guide.

    ", + "GetItemInput$ExpressionAttributeNames": "

    One or more substitution tokens for attribute names in an expression. The following are some use cases for using ExpressionAttributeNames:

    • To access an attribute whose name conflicts with a DynamoDB reserved word.

    • To create a placeholder for repeating occurrences of an attribute name in an expression.

    • To prevent special characters in an attribute name from being misinterpreted in an expression.

    Use the # character in an expression to dereference an attribute name. For example, consider the following attribute name:

    • Percentile

    The name of this attribute conflicts with a reserved word, so it cannot be used directly in an expression. (For the complete list of reserved words, see Reserved Words in the Amazon DynamoDB Developer Guide). To work around this, you could specify the following for ExpressionAttributeNames:

    • {\"#P\":\"Percentile\"}

    You could then use this substitution in an expression, as in this example:

    • #P = :val

    Tokens that begin with the : character are expression attribute values, which are placeholders for the actual value at runtime.

    For more information on expression attribute names, see Accessing Item Attributes in the Amazon DynamoDB Developer Guide.

    ", + "KeysAndAttributes$ExpressionAttributeNames": "

    One or more substitution tokens for attribute names in an expression. The following are some use cases for using ExpressionAttributeNames:

    • To access an attribute whose name conflicts with a DynamoDB reserved word.

    • To create a placeholder for repeating occurrences of an attribute name in an expression.

    • To prevent special characters in an attribute name from being misinterpreted in an expression.

    Use the # character in an expression to dereference an attribute name. For example, consider the following attribute name:

    • Percentile

    The name of this attribute conflicts with a reserved word, so it cannot be used directly in an expression. (For the complete list of reserved words, see Reserved Words in the Amazon DynamoDB Developer Guide). To work around this, you could specify the following for ExpressionAttributeNames:

    • {\"#P\":\"Percentile\"}

    You could then use this substitution in an expression, as in this example:

    • #P = :val

    Tokens that begin with the : character are expression attribute values, which are placeholders for the actual value at runtime.

    For more information on expression attribute names, see Accessing Item Attributes in the Amazon DynamoDB Developer Guide.

    ", + "PutItemInput$ExpressionAttributeNames": "

    One or more substitution tokens for attribute names in an expression. The following are some use cases for using ExpressionAttributeNames:

    • To access an attribute whose name conflicts with a DynamoDB reserved word.

    • To create a placeholder for repeating occurrences of an attribute name in an expression.

    • To prevent special characters in an attribute name from being misinterpreted in an expression.

    Use the # character in an expression to dereference an attribute name. For example, consider the following attribute name:

    • Percentile

    The name of this attribute conflicts with a reserved word, so it cannot be used directly in an expression. (For the complete list of reserved words, see Reserved Words in the Amazon DynamoDB Developer Guide). To work around this, you could specify the following for ExpressionAttributeNames:

    • {\"#P\":\"Percentile\"}

    You could then use this substitution in an expression, as in this example:

    • #P = :val

    Tokens that begin with the : character are expression attribute values, which are placeholders for the actual value at runtime.

    For more information on expression attribute names, see Accessing Item Attributes in the Amazon DynamoDB Developer Guide.

    ", + "QueryInput$ExpressionAttributeNames": "

    One or more substitution tokens for attribute names in an expression. The following are some use cases for using ExpressionAttributeNames:

    • To access an attribute whose name conflicts with a DynamoDB reserved word.

    • To create a placeholder for repeating occurrences of an attribute name in an expression.

    • To prevent special characters in an attribute name from being misinterpreted in an expression.

    Use the # character in an expression to dereference an attribute name. For example, consider the following attribute name:

    • Percentile

    The name of this attribute conflicts with a reserved word, so it cannot be used directly in an expression. (For the complete list of reserved words, see Reserved Words in the Amazon DynamoDB Developer Guide). To work around this, you could specify the following for ExpressionAttributeNames:

    • {\"#P\":\"Percentile\"}

    You could then use this substitution in an expression, as in this example:

    • #P = :val

    Tokens that begin with the : character are expression attribute values, which are placeholders for the actual value at runtime.

    For more information on expression attribute names, see Accessing Item Attributes in the Amazon DynamoDB Developer Guide.

    ", + "ScanInput$ExpressionAttributeNames": "

    One or more substitution tokens for attribute names in an expression. The following are some use cases for using ExpressionAttributeNames:

    • To access an attribute whose name conflicts with a DynamoDB reserved word.

    • To create a placeholder for repeating occurrences of an attribute name in an expression.

    • To prevent special characters in an attribute name from being misinterpreted in an expression.

    Use the # character in an expression to dereference an attribute name. For example, consider the following attribute name:

    • Percentile

    The name of this attribute conflicts with a reserved word, so it cannot be used directly in an expression. (For the complete list of reserved words, see Reserved Words in the Amazon DynamoDB Developer Guide). To work around this, you could specify the following for ExpressionAttributeNames:

    • {\"#P\":\"Percentile\"}

    You could then use this substitution in an expression, as in this example:

    • #P = :val

    Tokens that begin with the : character are expression attribute values, which are placeholders for the actual value at runtime.

    For more information on expression attribute names, see Accessing Item Attributes in the Amazon DynamoDB Developer Guide.

    ", + "UpdateItemInput$ExpressionAttributeNames": "

    One or more substitution tokens for attribute names in an expression. The following are some use cases for using ExpressionAttributeNames:

    • To access an attribute whose name conflicts with a DynamoDB reserved word.

    • To create a placeholder for repeating occurrences of an attribute name in an expression.

    • To prevent special characters in an attribute name from being misinterpreted in an expression.

    Use the # character in an expression to dereference an attribute name. For example, consider the following attribute name:

    • Percentile

    The name of this attribute conflicts with a reserved word, so it cannot be used directly in an expression. (For the complete list of reserved words, see Reserved Words in the Amazon DynamoDB Developer Guide). To work around this, you could specify the following for ExpressionAttributeNames:

    • {\"#P\":\"Percentile\"}

    You could then use this substitution in an expression, as in this example:

    • #P = :val

    Tokens that begin with the : character are expression attribute values, which are placeholders for the actual value at runtime.

    For more information on expression attribute names, see Accessing Item Attributes in the Amazon DynamoDB Developer Guide.

    " + } + }, + "ExpressionAttributeNameVariable": { + "base": null, + "refs": { + "ExpressionAttributeNameMap$key": null + } + }, + "ExpressionAttributeValueMap": { + "base": null, + "refs": { + "DeleteItemInput$ExpressionAttributeValues": "

    One or more values that can be substituted in an expression.

    Use the : (colon) character in an expression to dereference an attribute value. For example, suppose that you wanted to check whether the value of the ProductStatus attribute was one of the following:

    Available | Backordered | Discontinued

    You would first need to specify ExpressionAttributeValues as follows:

    { \":avail\":{\"S\":\"Available\"}, \":back\":{\"S\":\"Backordered\"}, \":disc\":{\"S\":\"Discontinued\"} }

    You could then use these values in an expression, such as this:

    ProductStatus IN (:avail, :back, :disc)

    For more information on expression attribute values, see Specifying Conditions in the Amazon DynamoDB Developer Guide.

    ", + "PutItemInput$ExpressionAttributeValues": "

    One or more values that can be substituted in an expression.

    Use the : (colon) character in an expression to dereference an attribute value. For example, suppose that you wanted to check whether the value of the ProductStatus attribute was one of the following:

    Available | Backordered | Discontinued

    You would first need to specify ExpressionAttributeValues as follows:

    { \":avail\":{\"S\":\"Available\"}, \":back\":{\"S\":\"Backordered\"}, \":disc\":{\"S\":\"Discontinued\"} }

    You could then use these values in an expression, such as this:

    ProductStatus IN (:avail, :back, :disc)

    For more information on expression attribute values, see Specifying Conditions in the Amazon DynamoDB Developer Guide.

    ", + "QueryInput$ExpressionAttributeValues": "

    One or more values that can be substituted in an expression.

    Use the : (colon) character in an expression to dereference an attribute value. For example, suppose that you wanted to check whether the value of the ProductStatus attribute was one of the following:

    Available | Backordered | Discontinued

    You would first need to specify ExpressionAttributeValues as follows:

    { \":avail\":{\"S\":\"Available\"}, \":back\":{\"S\":\"Backordered\"}, \":disc\":{\"S\":\"Discontinued\"} }

    You could then use these values in an expression, such as this:

    ProductStatus IN (:avail, :back, :disc)

    For more information on expression attribute values, see Specifying Conditions in the Amazon DynamoDB Developer Guide.

    ", + "ScanInput$ExpressionAttributeValues": "

    One or more values that can be substituted in an expression.

    Use the : (colon) character in an expression to dereference an attribute value. For example, suppose that you wanted to check whether the value of the ProductStatus attribute was one of the following:

    Available | Backordered | Discontinued

    You would first need to specify ExpressionAttributeValues as follows:

    { \":avail\":{\"S\":\"Available\"}, \":back\":{\"S\":\"Backordered\"}, \":disc\":{\"S\":\"Discontinued\"} }

    You could then use these values in an expression, such as this:

    ProductStatus IN (:avail, :back, :disc)

    For more information on expression attribute values, see Specifying Conditions in the Amazon DynamoDB Developer Guide.

    ", + "UpdateItemInput$ExpressionAttributeValues": "

    One or more values that can be substituted in an expression.

    Use the : (colon) character in an expression to dereference an attribute value. For example, suppose that you wanted to check whether the value of the ProductStatus attribute was one of the following:

    Available | Backordered | Discontinued

    You would first need to specify ExpressionAttributeValues as follows:

    { \":avail\":{\"S\":\"Available\"}, \":back\":{\"S\":\"Backordered\"}, \":disc\":{\"S\":\"Discontinued\"} }

    You could then use these values in an expression, such as this:

    ProductStatus IN (:avail, :back, :disc)

    For more information on expression attribute values, see Specifying Conditions in the Amazon DynamoDB Developer Guide.

    " + } + }, + "ExpressionAttributeValueVariable": { + "base": null, + "refs": { + "ExpressionAttributeValueMap$key": null + } + }, + "FilterConditionMap": { + "base": null, + "refs": { + "QueryInput$QueryFilter": "

    This is a legacy parameter, for backward compatibility. New applications should use FilterExpression instead. Do not combine legacy parameters and expression parameters in a single API call; otherwise, DynamoDB will return a ValidationException exception.

    A condition that evaluates the query results after the items are read and returns only the desired values.

    This parameter does not support attributes of type List or Map.

    A QueryFilter is applied after the items have already been read; the process of filtering does not consume any additional read capacity units.

    If you provide more than one condition in the QueryFilter map, then by default all of the conditions must evaluate to true. In other words, the conditions are ANDed together. (You can use the ConditionalOperator parameter to OR the conditions instead. If you do this, then at least one of the conditions must evaluate to true, rather than all of them.)

    Note that QueryFilter does not allow key attributes. You cannot define a filter condition on a partition key or a sort key.

    Each QueryFilter element consists of an attribute name to compare, along with the following:

    • AttributeValueList - One or more values to evaluate against the supplied attribute. The number of values in the list depends on the operator specified in ComparisonOperator.

      For type Number, value comparisons are numeric.

      String value comparisons for greater than, equals, or less than are based on ASCII character code values. For example, a is greater than A, and a is greater than B. For a list of code values, see http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters.

      For type Binary, DynamoDB treats each byte of the binary data as unsigned when it compares binary values.

      For information on specifying data types in JSON, see JSON Data Format in the Amazon DynamoDB Developer Guide.

    • ComparisonOperator - A comparator for evaluating attributes. For example, equals, greater than, less than, etc.

      The following comparison operators are available:

      EQ | NE | LE | LT | GE | GT | NOT_NULL | NULL | CONTAINS | NOT_CONTAINS | BEGINS_WITH | IN | BETWEEN

      For complete descriptions of all comparison operators, see the Condition data type.

    ", + "ScanInput$ScanFilter": "

    This is a legacy parameter, for backward compatibility. New applications should use FilterExpression instead. Do not combine legacy parameters and expression parameters in a single API call; otherwise, DynamoDB will return a ValidationException exception.

    A condition that evaluates the scan results and returns only the desired values.

    This parameter does not support attributes of type List or Map.

    If you specify more than one condition in the ScanFilter map, then by default all of the conditions must evaluate to true. In other words, the conditions are ANDed together. (You can use the ConditionalOperator parameter to OR the conditions instead. If you do this, then at least one of the conditions must evaluate to true, rather than all of them.)

    Each ScanFilter element consists of an attribute name to compare, along with the following:

    • AttributeValueList - One or more values to evaluate against the supplied attribute. The number of values in the list depends on the operator specified in ComparisonOperator .

      For type Number, value comparisons are numeric.

      String value comparisons for greater than, equals, or less than are based on ASCII character code values. For example, a is greater than A, and a is greater than B. For a list of code values, see http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters.

      For Binary, DynamoDB treats each byte of the binary data as unsigned when it compares binary values.

      For information on specifying data types in JSON, see JSON Data Format in the Amazon DynamoDB Developer Guide.

    • ComparisonOperator - A comparator for evaluating attributes. For example, equals, greater than, less than, etc.

      The following comparison operators are available:

      EQ | NE | LE | LT | GE | GT | NOT_NULL | NULL | CONTAINS | NOT_CONTAINS | BEGINS_WITH | IN | BETWEEN

      For complete descriptions of all comparison operators, see Condition.

    " + } + }, + "GetItemInput": { + "base": "

    Represents the input of a GetItem operation.

    ", + "refs": { + } + }, + "GetItemOutput": { + "base": "

    Represents the output of a GetItem operation.

    ", + "refs": { + } + }, + "GlobalSecondaryIndex": { + "base": "

    Represents the properties of a global secondary index.

    ", + "refs": { + "GlobalSecondaryIndexList$member": null + } + }, + "GlobalSecondaryIndexDescription": { + "base": "

    Represents the properties of a global secondary index.

    ", + "refs": { + "GlobalSecondaryIndexDescriptionList$member": null + } + }, + "GlobalSecondaryIndexDescriptionList": { + "base": null, + "refs": { + "TableDescription$GlobalSecondaryIndexes": "

    The global secondary indexes, if any, on the table. Each index is scoped to a given partition key value. Each element is composed of:

    • Backfilling - If true, then the index is currently in the backfilling phase. Backfilling occurs only when a new global secondary index is added to the table; it is the process by which DynamoDB populates the new index with data from the table. (This attribute does not appear for indexes that were created during a CreateTable operation.)

    • IndexName - The name of the global secondary index.

    • IndexSizeBytes - The total size of the global secondary index, in bytes. DynamoDB updates this value approximately every six hours. Recent changes might not be reflected in this value.

    • IndexStatus - The current status of the global secondary index:

      • CREATING - The index is being created.

      • UPDATING - The index is being updated.

      • DELETING - The index is being deleted.

      • ACTIVE - The index is ready for use.

    • ItemCount - The number of items in the global secondary index. DynamoDB updates this value approximately every six hours. Recent changes might not be reflected in this value.

    • KeySchema - Specifies the complete index key schema. The attribute names in the key schema must be between 1 and 255 characters (inclusive). The key schema must begin with the same partition key as the table.

    • Projection - Specifies attributes that are copied (projected) from the table into the index. These are in addition to the primary key attributes and index key attributes, which are automatically projected. Each attribute specification is composed of:

      • ProjectionType - One of the following:

        • KEYS_ONLY - Only the index and primary keys are projected into the index.

        • INCLUDE - Only the specified table attributes are projected into the index. The list of projected attributes are in NonKeyAttributes.

        • ALL - All of the table attributes are projected into the index.

      • NonKeyAttributes - A list of one or more non-key attribute names that are projected into the secondary index. The total count of attributes provided in NonKeyAttributes, summed across all of the secondary indexes, must not exceed 20. If you project the same attribute into two different indexes, this counts as two distinct attributes when determining the total.

    • ProvisionedThroughput - The provisioned throughput settings for the global secondary index, consisting of read and write capacity units, along with data about increases and decreases.

    If the table is in the DELETING state, no information about indexes will be returned.

    " + } + }, + "GlobalSecondaryIndexList": { + "base": null, + "refs": { + "CreateTableInput$GlobalSecondaryIndexes": "

    One or more global secondary indexes (the maximum is five) to be created on the table. Each global secondary index in the array includes the following:

    • IndexName - The name of the global secondary index. Must be unique only for this table.

    • KeySchema - Specifies the key schema for the global secondary index.

    • Projection - Specifies attributes that are copied (projected) from the table into the index. These are in addition to the primary key attributes and index key attributes, which are automatically projected. Each attribute specification is composed of:

      • ProjectionType - One of the following:

        • KEYS_ONLY - Only the index and primary keys are projected into the index.

        • INCLUDE - Only the specified table attributes are projected into the index. The list of projected attributes are in NonKeyAttributes.

        • ALL - All of the table attributes are projected into the index.

      • NonKeyAttributes - A list of one or more non-key attribute names that are projected into the secondary index. The total count of attributes provided in NonKeyAttributes, summed across all of the secondary indexes, must not exceed 20. If you project the same attribute into two different indexes, this counts as two distinct attributes when determining the total.

    • ProvisionedThroughput - The provisioned throughput settings for the global secondary index, consisting of read and write capacity units.

    " + } + }, + "GlobalSecondaryIndexUpdate": { + "base": "

    Represents one of the following:

    • A new global secondary index to be added to an existing table.

    • New provisioned throughput parameters for an existing global secondary index.

    • An existing global secondary index to be removed from an existing table.

    ", + "refs": { + "GlobalSecondaryIndexUpdateList$member": null + } + }, + "GlobalSecondaryIndexUpdateList": { + "base": null, + "refs": { + "UpdateTableInput$GlobalSecondaryIndexUpdates": "

    An array of one or more global secondary indexes for the table. For each index in the array, you can request one action:

    • Create - add a new global secondary index to the table.

    • Update - modify the provisioned throughput settings of an existing global secondary index.

    • Delete - remove a global secondary index from the table.

    For more information, see Managing Global Secondary Indexes in the Amazon DynamoDB Developer Guide.

    " + } + }, + "IndexName": { + "base": null, + "refs": { + "CreateGlobalSecondaryIndexAction$IndexName": "

    The name of the global secondary index to be created.

    ", + "DeleteGlobalSecondaryIndexAction$IndexName": "

    The name of the global secondary index to be deleted.

    ", + "GlobalSecondaryIndex$IndexName": "

    The name of the global secondary index. The name must be unique among all other indexes on this table.

    ", + "GlobalSecondaryIndexDescription$IndexName": "

    The name of the global secondary index.

    ", + "LocalSecondaryIndex$IndexName": "

    The name of the local secondary index. The name must be unique among all other indexes on this table.

    ", + "LocalSecondaryIndexDescription$IndexName": "

    Represents the name of the local secondary index.

    ", + "QueryInput$IndexName": "

    The name of an index to query. This index can be any local secondary index or global secondary index on the table. Note that if you use the IndexName parameter, you must also provide TableName.

    ", + "ScanInput$IndexName": "

    The name of a secondary index to scan. This index can be any local secondary index or global secondary index. Note that if you use the IndexName parameter, you must also provide TableName.

    ", + "SecondaryIndexesCapacityMap$key": null, + "UpdateGlobalSecondaryIndexAction$IndexName": "

    The name of the global secondary index to be updated.

    " + } + }, + "IndexStatus": { + "base": null, + "refs": { + "GlobalSecondaryIndexDescription$IndexStatus": "

    The current state of the global secondary index:

    • CREATING - The index is being created.

    • UPDATING - The index is being updated.

    • DELETING - The index is being deleted.

    • ACTIVE - The index is ready for use.

    " + } + }, + "Integer": { + "base": null, + "refs": { + "QueryOutput$Count": "

    The number of items in the response.

    If you used a QueryFilter in the request, then Count is the number of items returned after the filter was applied, and ScannedCount is the number of matching items before the filter was applied.

    If you did not use a filter in the request, then Count and ScannedCount are the same.

    ", + "QueryOutput$ScannedCount": "

    The number of items evaluated, before any QueryFilter is applied. A high ScannedCount value with few, or no, Count results indicates an inefficient Query operation. For more information, see Count and ScannedCount in the Amazon DynamoDB Developer Guide.

    If you did not use a filter in the request, then ScannedCount is the same as Count.

    ", + "ScanOutput$Count": "

    The number of items in the response.

    If you set ScanFilter in the request, then Count is the number of items returned after the filter was applied, and ScannedCount is the number of matching items before the filter was applied.

    If you did not use a filter in the request, then Count is the same as ScannedCount.

    ", + "ScanOutput$ScannedCount": "

    The number of items evaluated, before any ScanFilter is applied. A high ScannedCount value with few, or no, Count results indicates an inefficient Scan operation. For more information, see Count and ScannedCount in the Amazon DynamoDB Developer Guide.

    If you did not use a filter in the request, then ScannedCount is the same as Count.

    " + } + }, + "InternalServerError": { + "base": "

    An error occurred on the server side.

    ", + "refs": { + } + }, + "ItemCollectionKeyAttributeMap": { + "base": null, + "refs": { + "ItemCollectionMetrics$ItemCollectionKey": "

    The partition key value of the item collection. This value is the same as the partition key value of the item.

    " + } + }, + "ItemCollectionMetrics": { + "base": "

    Information about item collections, if any, that were affected by the operation. ItemCollectionMetrics is only returned if the request asked for it. If the table does not have any local secondary indexes, this information is not returned in the response.

    ", + "refs": { + "DeleteItemOutput$ItemCollectionMetrics": "

    Information about item collections, if any, that were affected by the operation. ItemCollectionMetrics is only returned if the request asked for it. If the table does not have any local secondary indexes, this information is not returned in the response.

    Each ItemCollectionMetrics element consists of:

    • ItemCollectionKey - The partition key value of the item collection. This is the same as the partition key value of the item itself.

    • SizeEstimateRange - An estimate of item collection size, in gigabytes. This value is a two-element array containing a lower bound and an upper bound for the estimate. The estimate includes the size of all the items in the table, plus the size of all attributes projected into all of the local secondary indexes on that table. Use this estimate to measure whether a local secondary index is approaching its size limit.

      The estimate is subject to change over time; therefore, do not rely on the precision or accuracy of the estimate.

    ", + "ItemCollectionMetricsMultiple$member": null, + "PutItemOutput$ItemCollectionMetrics": "

    Information about item collections, if any, that were affected by the operation. ItemCollectionMetrics is only returned if the request asked for it. If the table does not have any local secondary indexes, this information is not returned in the response.

    Each ItemCollectionMetrics element consists of:

    • ItemCollectionKey - The partition key value of the item collection. This is the same as the partition key value of the item itself.

    • SizeEstimateRange - An estimate of item collection size, in gigabytes. This value is a two-element array containing a lower bound and an upper bound for the estimate. The estimate includes the size of all the items in the table, plus the size of all attributes projected into all of the local secondary indexes on that table. Use this estimate to measure whether a local secondary index is approaching its size limit.

      The estimate is subject to change over time; therefore, do not rely on the precision or accuracy of the estimate.

    ", + "UpdateItemOutput$ItemCollectionMetrics": null + } + }, + "ItemCollectionMetricsMultiple": { + "base": null, + "refs": { + "ItemCollectionMetricsPerTable$value": null + } + }, + "ItemCollectionMetricsPerTable": { + "base": null, + "refs": { + "BatchWriteItemOutput$ItemCollectionMetrics": "

    A list of tables that were processed by BatchWriteItem and, for each table, information about any item collections that were affected by individual DeleteItem or PutItem operations.

    Each entry consists of the following subelements:

    • ItemCollectionKey - The partition key value of the item collection. This is the same as the partition key value of the item.

    • SizeEstimateRange - An estimate of item collection size, expressed in GB. This is a two-element array containing a lower bound and an upper bound for the estimate. The estimate includes the size of all the items in the table, plus the size of all attributes projected into all of the local secondary indexes on the table. Use this estimate to measure whether a local secondary index is approaching its size limit.

      The estimate is subject to change over time; therefore, do not rely on the precision or accuracy of the estimate.

    " + } + }, + "ItemCollectionSizeEstimateBound": { + "base": null, + "refs": { + "ItemCollectionSizeEstimateRange$member": null + } + }, + "ItemCollectionSizeEstimateRange": { + "base": null, + "refs": { + "ItemCollectionMetrics$SizeEstimateRangeGB": "

    An estimate of item collection size, in gigabytes. This value is a two-element array containing a lower bound and an upper bound for the estimate. The estimate includes the size of all the items in the table, plus the size of all attributes projected into all of the local secondary indexes on that table. Use this estimate to measure whether a local secondary index is approaching its size limit.

    The estimate is subject to change over time; therefore, do not rely on the precision or accuracy of the estimate.

    " + } + }, + "ItemCollectionSizeLimitExceededException": { + "base": "

    An item collection is too large. This exception is only returned for tables that have one or more local secondary indexes.

    ", + "refs": { + } + }, + "ItemList": { + "base": null, + "refs": { + "BatchGetResponseMap$value": null, + "QueryOutput$Items": "

    An array of item attributes that match the query criteria. Each element in this array consists of an attribute name and the value for that attribute.

    ", + "ScanOutput$Items": "

    An array of item attributes that match the scan criteria. Each element in this array consists of an attribute name and the value for that attribute.

    " + } + }, + "Key": { + "base": null, + "refs": { + "DeleteItemInput$Key": "

    A map of attribute names to AttributeValue objects, representing the primary key of the item to delete.

    For the primary key, you must provide all of the attributes. For example, with a simple primary key, you only need to provide a value for the partition key. For a composite primary key, you must provide values for both the partition key and the sort key.

    ", + "DeleteRequest$Key": "

    A map of attribute name to attribute values, representing the primary key of the item to delete. All of the table's primary key attributes must be specified, and their data types must match those of the table's key schema.

    ", + "GetItemInput$Key": "

    A map of attribute names to AttributeValue objects, representing the primary key of the item to retrieve.

    For the primary key, you must provide all of the attributes. For example, with a simple primary key, you only need to provide a value for the partition key. For a composite primary key, you must provide values for both the partition key and the sort key.

    ", + "KeyList$member": null, + "QueryInput$ExclusiveStartKey": "

    The primary key of the first item that this operation will evaluate. Use the value that was returned for LastEvaluatedKey in the previous operation.

    The data type for ExclusiveStartKey must be String, Number or Binary. No set data types are allowed.

    ", + "QueryOutput$LastEvaluatedKey": "

    The primary key of the item where the operation stopped, inclusive of the previous result set. Use this value to start a new operation, excluding this value in the new request.

    If LastEvaluatedKey is empty, then the \"last page\" of results has been processed and there is no more data to be retrieved.

    If LastEvaluatedKey is not empty, it does not necessarily mean that there is more data in the result set. The only way to know when you have reached the end of the result set is when LastEvaluatedKey is empty.

    ", + "ScanInput$ExclusiveStartKey": "

    The primary key of the first item that this operation will evaluate. Use the value that was returned for LastEvaluatedKey in the previous operation.

    The data type for ExclusiveStartKey must be String, Number or Binary. No set data types are allowed.

    In a parallel scan, a Scan request that includes ExclusiveStartKey must specify the same segment whose previous Scan returned the corresponding value of LastEvaluatedKey.

    ", + "ScanOutput$LastEvaluatedKey": "

    The primary key of the item where the operation stopped, inclusive of the previous result set. Use this value to start a new operation, excluding this value in the new request.

    If LastEvaluatedKey is empty, then the \"last page\" of results has been processed and there is no more data to be retrieved.

    If LastEvaluatedKey is not empty, it does not necessarily mean that there is more data in the result set. The only way to know when you have reached the end of the result set is when LastEvaluatedKey is empty.

    ", + "UpdateItemInput$Key": "

    The primary key of the item to be updated. Each element consists of an attribute name and a value for that attribute.

    For the primary key, you must provide all of the attributes. For example, with a simple primary key, you only need to provide a value for the partition key. For a composite primary key, you must provide values for both the partition key and the sort key.

    " + } + }, + "KeyConditions": { + "base": null, + "refs": { + "QueryInput$KeyConditions": "

    This is a legacy parameter, for backward compatibility. New applications should use KeyConditionExpression instead. Do not combine legacy parameters and expression parameters in a single API call; otherwise, DynamoDB will return a ValidationException exception.

    The selection criteria for the query. For a query on a table, you can have conditions only on the table primary key attributes. You must provide the partition key name and value as an EQ condition. You can optionally provide a second condition, referring to the sort key.

    If you don't provide a sort key condition, all of the items that match the partition key will be retrieved. If a FilterExpression or QueryFilter is present, it will be applied after the items are retrieved.

    For a query on an index, you can have conditions only on the index key attributes. You must provide the index partition key name and value as an EQ condition. You can optionally provide a second condition, referring to the index sort key.

    Each KeyConditions element consists of an attribute name to compare, along with the following:

    • AttributeValueList - One or more values to evaluate against the supplied attribute. The number of values in the list depends on the ComparisonOperator being used.

      For type Number, value comparisons are numeric.

      String value comparisons for greater than, equals, or less than are based on ASCII character code values. For example, a is greater than A, and a is greater than B. For a list of code values, see http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters.

      For Binary, DynamoDB treats each byte of the binary data as unsigned when it compares binary values.

    • ComparisonOperator - A comparator for evaluating attributes, for example, equals, greater than, less than, and so on.

      For KeyConditions, only the following comparison operators are supported:

      EQ | LE | LT | GE | GT | BEGINS_WITH | BETWEEN

      The following are descriptions of these comparison operators.

      • EQ : Equal.

        AttributeValueList can contain only one AttributeValue of type String, Number, or Binary (not a set type). If an item contains an AttributeValue element of a different type than the one specified in the request, the value does not match. For example, {\"S\":\"6\"} does not equal {\"N\":\"6\"}. Also, {\"N\":\"6\"} does not equal {\"NS\":[\"6\", \"2\", \"1\"]}.

      • LE : Less than or equal.

        AttributeValueList can contain only one AttributeValue element of type String, Number, or Binary (not a set type). If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {\"S\":\"6\"} does not equal {\"N\":\"6\"}. Also, {\"N\":\"6\"} does not compare to {\"NS\":[\"6\", \"2\", \"1\"]}.

      • LT : Less than.

        AttributeValueList can contain only one AttributeValue of type String, Number, or Binary (not a set type). If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {\"S\":\"6\"} does not equal {\"N\":\"6\"}. Also, {\"N\":\"6\"} does not compare to {\"NS\":[\"6\", \"2\", \"1\"]}.

      • GE : Greater than or equal.

        AttributeValueList can contain only one AttributeValue element of type String, Number, or Binary (not a set type). If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {\"S\":\"6\"} does not equal {\"N\":\"6\"}. Also, {\"N\":\"6\"} does not compare to {\"NS\":[\"6\", \"2\", \"1\"]}.

      • GT : Greater than.

        AttributeValueList can contain only one AttributeValue element of type String, Number, or Binary (not a set type). If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {\"S\":\"6\"} does not equal {\"N\":\"6\"}. Also, {\"N\":\"6\"} does not compare to {\"NS\":[\"6\", \"2\", \"1\"]}.

      • BEGINS_WITH : Checks for a prefix.

        AttributeValueList can contain only one AttributeValue of type String or Binary (not a Number or a set type). The target attribute of the comparison must be of type String or Binary (not a Number or a set type).

      • BETWEEN : Greater than or equal to the first value, and less than or equal to the second value.

        AttributeValueList must contain two AttributeValue elements of the same type, either String, Number, or Binary (not a set type). A target attribute matches if the target value is greater than, or equal to, the first element and less than, or equal to, the second element. If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {\"S\":\"6\"} does not compare to {\"N\":\"6\"}. Also, {\"N\":\"6\"} does not compare to {\"NS\":[\"6\", \"2\", \"1\"]}

    For usage examples of AttributeValueList and ComparisonOperator, see Legacy Conditional Parameters in the Amazon DynamoDB Developer Guide.

    " + } + }, + "KeyExpression": { + "base": null, + "refs": { + "QueryInput$KeyConditionExpression": "

    The condition that specifies the key value(s) for items to be retrieved by the Query action.

    The condition must perform an equality test on a single partition key value. The condition can also perform one of several comparison tests on a single sort key value. Query can use KeyConditionExpression to retrieve one item with a given partition key value and sort key value, or several items that have the same partition key value but different sort key values.

    The partition key equality test is required, and must be specified in the following format:

    partitionKeyName = :partitionkeyval

    If you also want to provide a condition for the sort key, it must be combined using AND with the condition for the sort key. Following is an example, using the = comparison operator for the sort key:

    partitionKeyName = :partitionkeyval AND sortKeyName = :sortkeyval

    Valid comparisons for the sort key condition are as follows:

    • sortKeyName = :sortkeyval - true if the sort key value is equal to :sortkeyval.

    • sortKeyName < :sortkeyval - true if the sort key value is less than :sortkeyval.

    • sortKeyName <= :sortkeyval - true if the sort key value is less than or equal to :sortkeyval.

    • sortKeyName > :sortkeyval - true if the sort key value is greater than :sortkeyval.

    • sortKeyName >= :sortkeyval - true if the sort key value is greater than or equal to :sortkeyval.

    • sortKeyName BETWEEN :sortkeyval1 AND :sortkeyval2 - true if the sort key value is greater than or equal to :sortkeyval1, and less than or equal to :sortkeyval2.

    • begins_with ( sortKeyName, :sortkeyval ) - true if the sort key value begins with a particular operand. (You cannot use this function with a sort key that is of type Number.) Note that the function name begins_with is case-sensitive.

    Use the ExpressionAttributeValues parameter to replace tokens such as :partitionval and :sortval with actual values at runtime.

    You can optionally use the ExpressionAttributeNames parameter to replace the names of the partition key and sort key with placeholder tokens. This option might be necessary if an attribute name conflicts with a DynamoDB reserved word. For example, the following KeyConditionExpression parameter causes an error because Size is a reserved word:

    • Size = :myval

    To work around this, define a placeholder (such a #S) to represent the attribute name Size. KeyConditionExpression then is as follows:

    • #S = :myval

    For a list of reserved words, see Reserved Words in the Amazon DynamoDB Developer Guide.

    For more information on ExpressionAttributeNames and ExpressionAttributeValues, see Using Placeholders for Attribute Names and Values in the Amazon DynamoDB Developer Guide.

    KeyConditionExpression replaces the legacy KeyConditions parameter.

    " + } + }, + "KeyList": { + "base": null, + "refs": { + "KeysAndAttributes$Keys": "

    The primary key attribute values that define the items and the attributes associated with the items.

    " + } + }, + "KeySchema": { + "base": null, + "refs": { + "CreateGlobalSecondaryIndexAction$KeySchema": "

    The key schema for the global secondary index.

    ", + "CreateTableInput$KeySchema": "

    Specifies the attributes that make up the primary key for a table or an index. The attributes in KeySchema must also be defined in the AttributeDefinitions array. For more information, see Data Model in the Amazon DynamoDB Developer Guide.

    Each KeySchemaElement in the array is composed of:

    • AttributeName - The name of this key attribute.

    • KeyType - The role that the key attribute will assume:

      • HASH - partition key

      • RANGE - sort key

    The partition key of an item is also known as its hash attribute. The term \"hash attribute\" derives from DynamoDB' usage of an internal hash function to evenly distribute data items across partitions, based on their partition key values.

    The sort key of an item is also known as its range attribute. The term \"range attribute\" derives from the way DynamoDB stores items with the same partition key physically close together, in sorted order by the sort key value.

    For a simple primary key (partition key), you must provide exactly one element with a KeyType of HASH.

    For a composite primary key (partition key and sort key), you must provide exactly two elements, in this order: The first element must have a KeyType of HASH, and the second element must have a KeyType of RANGE.

    For more information, see Specifying the Primary Key in the Amazon DynamoDB Developer Guide.

    ", + "GlobalSecondaryIndex$KeySchema": "

    The complete key schema for a global secondary index, which consists of one or more pairs of attribute names and key types:

    • HASH - partition key

    • RANGE - sort key

    The partition key of an item is also known as its hash attribute. The term \"hash attribute\" derives from DynamoDB' usage of an internal hash function to evenly distribute data items across partitions, based on their partition key values.

    The sort key of an item is also known as its range attribute. The term \"range attribute\" derives from the way DynamoDB stores items with the same partition key physically close together, in sorted order by the sort key value.

    ", + "GlobalSecondaryIndexDescription$KeySchema": "

    The complete key schema for a global secondary index, which consists of one or more pairs of attribute names and key types:

    • HASH - partition key

    • RANGE - sort key

    The partition key of an item is also known as its hash attribute. The term \"hash attribute\" derives from DynamoDB' usage of an internal hash function to evenly distribute data items across partitions, based on their partition key values.

    The sort key of an item is also known as its range attribute. The term \"range attribute\" derives from the way DynamoDB stores items with the same partition key physically close together, in sorted order by the sort key value.

    ", + "LocalSecondaryIndex$KeySchema": "

    The complete key schema for the local secondary index, consisting of one or more pairs of attribute names and key types:

    • HASH - partition key

    • RANGE - sort key

    The partition key of an item is also known as its hash attribute. The term \"hash attribute\" derives from DynamoDB' usage of an internal hash function to evenly distribute data items across partitions, based on their partition key values.

    The sort key of an item is also known as its range attribute. The term \"range attribute\" derives from the way DynamoDB stores items with the same partition key physically close together, in sorted order by the sort key value.

    ", + "LocalSecondaryIndexDescription$KeySchema": "

    The complete key schema for the local secondary index, consisting of one or more pairs of attribute names and key types:

    • HASH - partition key

    • RANGE - sort key

    The partition key of an item is also known as its hash attribute. The term \"hash attribute\" derives from DynamoDB' usage of an internal hash function to evenly distribute data items across partitions, based on their partition key values.

    The sort key of an item is also known as its range attribute. The term \"range attribute\" derives from the way DynamoDB stores items with the same partition key physically close together, in sorted order by the sort key value.

    ", + "TableDescription$KeySchema": "

    The primary key structure for the table. Each KeySchemaElement consists of:

    • AttributeName - The name of the attribute.

    • KeyType - The role of the attribute:

      • HASH - partition key

      • RANGE - sort key

      The partition key of an item is also known as its hash attribute. The term \"hash attribute\" derives from DynamoDB' usage of an internal hash function to evenly distribute data items across partitions, based on their partition key values.

      The sort key of an item is also known as its range attribute. The term \"range attribute\" derives from the way DynamoDB stores items with the same partition key physically close together, in sorted order by the sort key value.

    For more information about primary keys, see Primary Key in the Amazon DynamoDB Developer Guide.

    " + } + }, + "KeySchemaAttributeName": { + "base": null, + "refs": { + "AttributeDefinition$AttributeName": "

    A name for the attribute.

    ", + "KeySchemaElement$AttributeName": "

    The name of a key attribute.

    " + } + }, + "KeySchemaElement": { + "base": "

    Represents a single element of a key schema. A key schema specifies the attributes that make up the primary key of a table, or the key attributes of an index.

    A KeySchemaElement represents exactly one attribute of the primary key. For example, a simple primary key would be represented by one KeySchemaElement (for the partition key). A composite primary key would require one KeySchemaElement for the partition key, and another KeySchemaElement for the sort key.

    A KeySchemaElement must be a scalar, top-level attribute (not a nested attribute). The data type must be one of String, Number, or Binary. The attribute cannot be nested within a List or a Map.

    ", + "refs": { + "KeySchema$member": null + } + }, + "KeyType": { + "base": null, + "refs": { + "KeySchemaElement$KeyType": "

    The role that this key attribute will assume:

    • HASH - partition key

    • RANGE - sort key

    The partition key of an item is also known as its hash attribute. The term \"hash attribute\" derives from DynamoDB' usage of an internal hash function to evenly distribute data items across partitions, based on their partition key values.

    The sort key of an item is also known as its range attribute. The term \"range attribute\" derives from the way DynamoDB stores items with the same partition key physically close together, in sorted order by the sort key value.

    " + } + }, + "KeysAndAttributes": { + "base": "

    Represents a set of primary keys and, for each key, the attributes to retrieve from the table.

    For each primary key, you must provide all of the key attributes. For example, with a simple primary key, you only need to provide the partition key. For a composite primary key, you must provide both the partition key and the sort key.

    ", + "refs": { + "BatchGetRequestMap$value": null + } + }, + "LimitExceededException": { + "base": "

    The number of concurrent table requests (cumulative number of tables in the CREATING, DELETING or UPDATING state) exceeds the maximum allowed of 10.

    Also, for tables with secondary indexes, only one of those tables can be in the CREATING state at any point in time. Do not attempt to create more than one such table simultaneously.

    The total limit of tables in the ACTIVE state is 250.

    ", + "refs": { + } + }, + "ListAttributeValue": { + "base": null, + "refs": { + "AttributeValue$L": "

    A List of attribute values.

    " + } + }, + "ListTablesInput": { + "base": "

    Represents the input of a ListTables operation.

    ", + "refs": { + } + }, + "ListTablesInputLimit": { + "base": null, + "refs": { + "ListTablesInput$Limit": "

    A maximum number of table names to return. If this parameter is not specified, the limit is 100.

    " + } + }, + "ListTablesOutput": { + "base": "

    Represents the output of a ListTables operation.

    ", + "refs": { + } + }, + "LocalSecondaryIndex": { + "base": "

    Represents the properties of a local secondary index.

    ", + "refs": { + "LocalSecondaryIndexList$member": null + } + }, + "LocalSecondaryIndexDescription": { + "base": "

    Represents the properties of a local secondary index.

    ", + "refs": { + "LocalSecondaryIndexDescriptionList$member": null + } + }, + "LocalSecondaryIndexDescriptionList": { + "base": null, + "refs": { + "TableDescription$LocalSecondaryIndexes": "

    Represents one or more local secondary indexes on the table. Each index is scoped to a given partition key value. Tables with one or more local secondary indexes are subject to an item collection size limit, where the amount of data within a given item collection cannot exceed 10 GB. Each element is composed of:

    • IndexName - The name of the local secondary index.

    • KeySchema - Specifies the complete index key schema. The attribute names in the key schema must be between 1 and 255 characters (inclusive). The key schema must begin with the same partition key as the table.

    • Projection - Specifies attributes that are copied (projected) from the table into the index. These are in addition to the primary key attributes and index key attributes, which are automatically projected. Each attribute specification is composed of:

      • ProjectionType - One of the following:

        • KEYS_ONLY - Only the index and primary keys are projected into the index.

        • INCLUDE - Only the specified table attributes are projected into the index. The list of projected attributes are in NonKeyAttributes.

        • ALL - All of the table attributes are projected into the index.

      • NonKeyAttributes - A list of one or more non-key attribute names that are projected into the secondary index. The total count of attributes provided in NonKeyAttributes, summed across all of the secondary indexes, must not exceed 20. If you project the same attribute into two different indexes, this counts as two distinct attributes when determining the total.

    • IndexSizeBytes - Represents the total size of the index, in bytes. DynamoDB updates this value approximately every six hours. Recent changes might not be reflected in this value.

    • ItemCount - Represents the number of items in the index. DynamoDB updates this value approximately every six hours. Recent changes might not be reflected in this value.

    If the table is in the DELETING state, no information about indexes will be returned.

    " + } + }, + "LocalSecondaryIndexList": { + "base": null, + "refs": { + "CreateTableInput$LocalSecondaryIndexes": "

    One or more local secondary indexes (the maximum is five) to be created on the table. Each index is scoped to a given partition key value. There is a 10 GB size limit per partition key value; otherwise, the size of a local secondary index is unconstrained.

    Each local secondary index in the array includes the following:

    • IndexName - The name of the local secondary index. Must be unique only for this table.

    • KeySchema - Specifies the key schema for the local secondary index. The key schema must begin with the same partition key as the table.

    • Projection - Specifies attributes that are copied (projected) from the table into the index. These are in addition to the primary key attributes and index key attributes, which are automatically projected. Each attribute specification is composed of:

      • ProjectionType - One of the following:

        • KEYS_ONLY - Only the index and primary keys are projected into the index.

        • INCLUDE - Only the specified table attributes are projected into the index. The list of projected attributes are in NonKeyAttributes.

        • ALL - All of the table attributes are projected into the index.

      • NonKeyAttributes - A list of one or more non-key attribute names that are projected into the secondary index. The total count of attributes provided in NonKeyAttributes, summed across all of the secondary indexes, must not exceed 20. If you project the same attribute into two different indexes, this counts as two distinct attributes when determining the total.

    " + } + }, + "Long": { + "base": null, + "refs": { + "GlobalSecondaryIndexDescription$IndexSizeBytes": "

    The total size of the specified index, in bytes. DynamoDB updates this value approximately every six hours. Recent changes might not be reflected in this value.

    ", + "GlobalSecondaryIndexDescription$ItemCount": "

    The number of items in the specified index. DynamoDB updates this value approximately every six hours. Recent changes might not be reflected in this value.

    ", + "LocalSecondaryIndexDescription$IndexSizeBytes": "

    The total size of the specified index, in bytes. DynamoDB updates this value approximately every six hours. Recent changes might not be reflected in this value.

    ", + "LocalSecondaryIndexDescription$ItemCount": "

    The number of items in the specified index. DynamoDB updates this value approximately every six hours. Recent changes might not be reflected in this value.

    ", + "TableDescription$TableSizeBytes": "

    The total size of the specified table, in bytes. DynamoDB updates this value approximately every six hours. Recent changes might not be reflected in this value.

    ", + "TableDescription$ItemCount": "

    The number of items in the specified table. DynamoDB updates this value approximately every six hours. Recent changes might not be reflected in this value.

    " + } + }, + "MapAttributeValue": { + "base": null, + "refs": { + "AttributeValue$M": "

    A Map of attribute values.

    " + } + }, + "NonKeyAttributeName": { + "base": null, + "refs": { + "NonKeyAttributeNameList$member": null + } + }, + "NonKeyAttributeNameList": { + "base": null, + "refs": { + "Projection$NonKeyAttributes": "

    Represents the non-key attribute names which will be projected into the index.

    For local secondary indexes, the total count of NonKeyAttributes summed across all of the local secondary indexes, must not exceed 20. If you project the same attribute into two different indexes, this counts as two distinct attributes when determining the total.

    " + } + }, + "NullAttributeValue": { + "base": null, + "refs": { + "AttributeValue$NULL": "

    A Null data type.

    " + } + }, + "NumberAttributeValue": { + "base": null, + "refs": { + "AttributeValue$N": "

    A Number data type.

    ", + "NumberSetAttributeValue$member": null + } + }, + "NumberSetAttributeValue": { + "base": null, + "refs": { + "AttributeValue$NS": "

    A Number Set data type.

    " + } + }, + "PositiveIntegerObject": { + "base": null, + "refs": { + "QueryInput$Limit": "

    The maximum number of items to evaluate (not necessarily the number of matching items). If DynamoDB processes the number of items up to the limit while processing the results, it stops the operation and returns the matching values up to that point, and a key in LastEvaluatedKey to apply in a subsequent operation, so that you can pick up where you left off. Also, if the processed data set size exceeds 1 MB before DynamoDB reaches this limit, it stops the operation and returns the matching values up to the limit, and a key in LastEvaluatedKey to apply in a subsequent operation to continue the operation. For more information, see Query and Scan in the Amazon DynamoDB Developer Guide.

    ", + "ScanInput$Limit": "

    The maximum number of items to evaluate (not necessarily the number of matching items). If DynamoDB processes the number of items up to the limit while processing the results, it stops the operation and returns the matching values up to that point, and a key in LastEvaluatedKey to apply in a subsequent operation, so that you can pick up where you left off. Also, if the processed data set size exceeds 1 MB before DynamoDB reaches this limit, it stops the operation and returns the matching values up to the limit, and a key in LastEvaluatedKey to apply in a subsequent operation to continue the operation. For more information, see Query and Scan in the Amazon DynamoDB Developer Guide.

    " + } + }, + "PositiveLongObject": { + "base": null, + "refs": { + "DescribeLimitsOutput$AccountMaxReadCapacityUnits": "

    The maximum total read capacity units that your account allows you to provision across all of your tables in this region.

    ", + "DescribeLimitsOutput$AccountMaxWriteCapacityUnits": "

    The maximum total write capacity units that your account allows you to provision across all of your tables in this region.

    ", + "DescribeLimitsOutput$TableMaxReadCapacityUnits": "

    The maximum read capacity units that your account allows you to provision for a new table that you are creating in this region, including the read capacity units provisioned for its global secondary indexes (GSIs).

    ", + "DescribeLimitsOutput$TableMaxWriteCapacityUnits": "

    The maximum write capacity units that your account allows you to provision for a new table that you are creating in this region, including the write capacity units provisioned for its global secondary indexes (GSIs).

    ", + "ProvisionedThroughput$ReadCapacityUnits": "

    The maximum number of strongly consistent reads consumed per second before DynamoDB returns a ThrottlingException. For more information, see Specifying Read and Write Requirements in the Amazon DynamoDB Developer Guide.

    ", + "ProvisionedThroughput$WriteCapacityUnits": "

    The maximum number of writes consumed per second before DynamoDB returns a ThrottlingException. For more information, see Specifying Read and Write Requirements in the Amazon DynamoDB Developer Guide.

    ", + "ProvisionedThroughputDescription$NumberOfDecreasesToday": "

    The number of provisioned throughput decreases for this table during this UTC calendar day. For current maximums on provisioned throughput decreases, see Limits in the Amazon DynamoDB Developer Guide.

    ", + "ProvisionedThroughputDescription$ReadCapacityUnits": "

    The maximum number of strongly consistent reads consumed per second before DynamoDB returns a ThrottlingException. Eventually consistent reads require less effort than strongly consistent reads, so a setting of 50 ReadCapacityUnits per second provides 100 eventually consistent ReadCapacityUnits per second.

    ", + "ProvisionedThroughputDescription$WriteCapacityUnits": "

    The maximum number of writes consumed per second before DynamoDB returns a ThrottlingException.

    " + } + }, + "Projection": { + "base": "

    Represents attributes that are copied (projected) from the table into an index. These are in addition to the primary key attributes and index key attributes, which are automatically projected.

    ", + "refs": { + "CreateGlobalSecondaryIndexAction$Projection": null, + "GlobalSecondaryIndex$Projection": null, + "GlobalSecondaryIndexDescription$Projection": null, + "LocalSecondaryIndex$Projection": null, + "LocalSecondaryIndexDescription$Projection": null + } + }, + "ProjectionExpression": { + "base": null, + "refs": { + "GetItemInput$ProjectionExpression": "

    A string that identifies one or more attributes to retrieve from the table. These attributes can include scalars, sets, or elements of a JSON document. The attributes in the expression must be separated by commas.

    If no attribute names are specified, then all attributes will be returned. If any of the requested attributes are not found, they will not appear in the result.

    For more information, see Accessing Item Attributes in the Amazon DynamoDB Developer Guide.

    ProjectionExpression replaces the legacy AttributesToGet parameter.

    ", + "KeysAndAttributes$ProjectionExpression": "

    A string that identifies one or more attributes to retrieve from the table. These attributes can include scalars, sets, or elements of a JSON document. The attributes in the ProjectionExpression must be separated by commas.

    If no attribute names are specified, then all attributes will be returned. If any of the requested attributes are not found, they will not appear in the result.

    For more information, see Accessing Item Attributes in the Amazon DynamoDB Developer Guide.

    ProjectionExpression replaces the legacy AttributesToGet parameter.

    ", + "QueryInput$ProjectionExpression": "

    A string that identifies one or more attributes to retrieve from the table. These attributes can include scalars, sets, or elements of a JSON document. The attributes in the expression must be separated by commas.

    If no attribute names are specified, then all attributes will be returned. If any of the requested attributes are not found, they will not appear in the result.

    For more information, see Accessing Item Attributes in the Amazon DynamoDB Developer Guide.

    ProjectionExpression replaces the legacy AttributesToGet parameter.

    ", + "ScanInput$ProjectionExpression": "

    A string that identifies one or more attributes to retrieve from the specified table or index. These attributes can include scalars, sets, or elements of a JSON document. The attributes in the expression must be separated by commas.

    If no attribute names are specified, then all attributes will be returned. If any of the requested attributes are not found, they will not appear in the result.

    For more information, see Accessing Item Attributes in the Amazon DynamoDB Developer Guide.

    ProjectionExpression replaces the legacy AttributesToGet parameter.

    " + } + }, + "ProjectionType": { + "base": null, + "refs": { + "Projection$ProjectionType": "

    The set of attributes that are projected into the index:

    • KEYS_ONLY - Only the index and primary keys are projected into the index.

    • INCLUDE - Only the specified table attributes are projected into the index. The list of projected attributes are in NonKeyAttributes.

    • ALL - All of the table attributes are projected into the index.

    " + } + }, + "ProvisionedThroughput": { + "base": "

    Represents the provisioned throughput settings for a specified table or index. The settings can be modified using the UpdateTable operation.

    For current minimum and maximum provisioned throughput values, see Limits in the Amazon DynamoDB Developer Guide.

    ", + "refs": { + "CreateGlobalSecondaryIndexAction$ProvisionedThroughput": null, + "CreateTableInput$ProvisionedThroughput": null, + "GlobalSecondaryIndex$ProvisionedThroughput": null, + "UpdateGlobalSecondaryIndexAction$ProvisionedThroughput": null, + "UpdateTableInput$ProvisionedThroughput": null + } + }, + "ProvisionedThroughputDescription": { + "base": "

    Represents the provisioned throughput settings for the table, consisting of read and write capacity units, along with data about increases and decreases.

    ", + "refs": { + "GlobalSecondaryIndexDescription$ProvisionedThroughput": null, + "TableDescription$ProvisionedThroughput": "

    The provisioned throughput settings for the table, consisting of read and write capacity units, along with data about increases and decreases.

    " + } + }, + "ProvisionedThroughputExceededException": { + "base": "

    Your request rate is too high. The AWS SDKs for DynamoDB automatically retry requests that receive this exception. Your request is eventually successful, unless your retry queue is too large to finish. Reduce the frequency of requests and use exponential backoff. For more information, go to Error Retries and Exponential Backoff in the Amazon DynamoDB Developer Guide.

    ", + "refs": { + } + }, + "PutItemInput": { + "base": "

    Represents the input of a PutItem operation.

    ", + "refs": { + } + }, + "PutItemInputAttributeMap": { + "base": null, + "refs": { + "PutItemInput$Item": "

    A map of attribute name/value pairs, one for each attribute. Only the primary key attributes are required; you can optionally provide other attribute name-value pairs for the item.

    You must provide all of the attributes for the primary key. For example, with a simple primary key, you only need to provide a value for the partition key. For a composite primary key, you must provide both values for both the partition key and the sort key.

    If you specify any attributes that are part of an index key, then the data types for those attributes must match those of the schema in the table's attribute definition.

    For more information about primary keys, see Primary Key in the Amazon DynamoDB Developer Guide.

    Each element in the Item map is an AttributeValue object.

    ", + "PutRequest$Item": "

    A map of attribute name to attribute values, representing the primary key of an item to be processed by PutItem. All of the table's primary key attributes must be specified, and their data types must match those of the table's key schema. If any attributes are present in the item which are part of an index key schema for the table, their types must match the index key schema.

    " + } + }, + "PutItemOutput": { + "base": "

    Represents the output of a PutItem operation.

    ", + "refs": { + } + }, + "PutRequest": { + "base": "

    Represents a request to perform a PutItem operation on an item.

    ", + "refs": { + "WriteRequest$PutRequest": "

    A request to perform a PutItem operation.

    " + } + }, + "QueryInput": { + "base": "

    Represents the input of a Query operation.

    ", + "refs": { + } + }, + "QueryOutput": { + "base": "

    Represents the output of a Query operation.

    ", + "refs": { + } + }, + "ResourceInUseException": { + "base": "

    The operation conflicts with the resource's availability. For example, you attempted to recreate an existing table, or tried to delete a table currently in the CREATING state.

    ", + "refs": { + } + }, + "ResourceNotFoundException": { + "base": "

    The operation tried to access a nonexistent table or index. The resource might not be specified correctly, or its status might not be ACTIVE.

    ", + "refs": { + } + }, + "ReturnConsumedCapacity": { + "base": "

    Determines the level of detail about provisioned throughput consumption that is returned in the response:

    • INDEXES - The response includes the aggregate ConsumedCapacity for the operation, together with ConsumedCapacity for each table and secondary index that was accessed.

      Note that some operations, such as GetItem and BatchGetItem, do not access any indexes at all. In these cases, specifying INDEXES will only return ConsumedCapacity information for table(s).

    • TOTAL - The response includes only the aggregate ConsumedCapacity for the operation.

    • NONE - No ConsumedCapacity details are included in the response.

    ", + "refs": { + "BatchGetItemInput$ReturnConsumedCapacity": null, + "BatchWriteItemInput$ReturnConsumedCapacity": null, + "DeleteItemInput$ReturnConsumedCapacity": null, + "GetItemInput$ReturnConsumedCapacity": null, + "PutItemInput$ReturnConsumedCapacity": null, + "QueryInput$ReturnConsumedCapacity": null, + "ScanInput$ReturnConsumedCapacity": null, + "UpdateItemInput$ReturnConsumedCapacity": null + } + }, + "ReturnItemCollectionMetrics": { + "base": null, + "refs": { + "BatchWriteItemInput$ReturnItemCollectionMetrics": "

    Determines whether item collection metrics are returned. If set to SIZE, the response includes statistics about item collections, if any, that were modified during the operation are returned in the response. If set to NONE (the default), no statistics are returned.

    ", + "DeleteItemInput$ReturnItemCollectionMetrics": "

    Determines whether item collection metrics are returned. If set to SIZE, the response includes statistics about item collections, if any, that were modified during the operation are returned in the response. If set to NONE (the default), no statistics are returned.

    ", + "PutItemInput$ReturnItemCollectionMetrics": "

    Determines whether item collection metrics are returned. If set to SIZE, the response includes statistics about item collections, if any, that were modified during the operation are returned in the response. If set to NONE (the default), no statistics are returned.

    ", + "UpdateItemInput$ReturnItemCollectionMetrics": "

    Determines whether item collection metrics are returned. If set to SIZE, the response includes statistics about item collections, if any, that were modified during the operation are returned in the response. If set to NONE (the default), no statistics are returned.

    " + } + }, + "ReturnValue": { + "base": null, + "refs": { + "DeleteItemInput$ReturnValues": "

    Use ReturnValues if you want to get the item attributes as they appeared before they were deleted. For DeleteItem, the valid values are:

    • NONE - If ReturnValues is not specified, or if its value is NONE, then nothing is returned. (This setting is the default for ReturnValues.)

    • ALL_OLD - The content of the old item is returned.

    The ReturnValues parameter is used by several DynamoDB operations; however, DeleteItem does not recognize any values other than NONE or ALL_OLD.

    ", + "PutItemInput$ReturnValues": "

    Use ReturnValues if you want to get the item attributes as they appeared before they were updated with the PutItem request. For PutItem, the valid values are:

    • NONE - If ReturnValues is not specified, or if its value is NONE, then nothing is returned. (This setting is the default for ReturnValues.)

    • ALL_OLD - If PutItem overwrote an attribute name-value pair, then the content of the old item is returned.

    The ReturnValues parameter is used by several DynamoDB operations; however, PutItem does not recognize any values other than NONE or ALL_OLD.

    ", + "UpdateItemInput$ReturnValues": "

    Use ReturnValues if you want to get the item attributes as they appeared either before or after they were updated. For UpdateItem, the valid values are:

    • NONE - If ReturnValues is not specified, or if its value is NONE, then nothing is returned. (This setting is the default for ReturnValues.)

    • ALL_OLD - If UpdateItem overwrote an attribute name-value pair, then the content of the old item is returned.

    • UPDATED_OLD - The old versions of only the updated attributes are returned.

    • ALL_NEW - All of the attributes of the new version of the item are returned.

    • UPDATED_NEW - The new versions of only the updated attributes are returned.

    There is no additional cost associated with requesting a return value aside from the small network and processing overhead of receiving a larger response. No Read Capacity Units are consumed.

    Values returned are strongly consistent

    " + } + }, + "ScalarAttributeType": { + "base": null, + "refs": { + "AttributeDefinition$AttributeType": "

    The data type for the attribute, where:

    • S - the attribute is of type String

    • N - the attribute is of type Number

    • B - the attribute is of type Binary

    " + } + }, + "ScanInput": { + "base": "

    Represents the input of a Scan operation.

    ", + "refs": { + } + }, + "ScanOutput": { + "base": "

    Represents the output of a Scan operation.

    ", + "refs": { + } + }, + "ScanSegment": { + "base": null, + "refs": { + "ScanInput$Segment": "

    For a parallel Scan request, Segment identifies an individual segment to be scanned by an application worker.

    Segment IDs are zero-based, so the first segment is always 0. For example, if you want to use four application threads to scan a table or an index, then the first thread specifies a Segment value of 0, the second thread specifies 1, and so on.

    The value of LastEvaluatedKey returned from a parallel Scan request must be used as ExclusiveStartKey with the same segment ID in a subsequent Scan operation.

    The value for Segment must be greater than or equal to 0, and less than the value provided for TotalSegments.

    If you provide Segment, you must also provide TotalSegments.

    " + } + }, + "ScanTotalSegments": { + "base": null, + "refs": { + "ScanInput$TotalSegments": "

    For a parallel Scan request, TotalSegments represents the total number of segments into which the Scan operation will be divided. The value of TotalSegments corresponds to the number of application workers that will perform the parallel scan. For example, if you want to use four application threads to scan a table or an index, specify a TotalSegments value of 4.

    The value for TotalSegments must be greater than or equal to 1, and less than or equal to 1000000. If you specify a TotalSegments value of 1, the Scan operation will be sequential rather than parallel.

    If you specify TotalSegments, you must also specify Segment.

    " + } + }, + "SecondaryIndexesCapacityMap": { + "base": null, + "refs": { + "ConsumedCapacity$LocalSecondaryIndexes": "

    The amount of throughput consumed on each local index affected by the operation.

    ", + "ConsumedCapacity$GlobalSecondaryIndexes": "

    The amount of throughput consumed on each global index affected by the operation.

    " + } + }, + "Select": { + "base": null, + "refs": { + "QueryInput$Select": "

    The attributes to be returned in the result. You can retrieve all item attributes, specific item attributes, the count of matching items, or in the case of an index, some or all of the attributes projected into the index.

    • ALL_ATTRIBUTES - Returns all of the item attributes from the specified table or index. If you query a local secondary index, then for each matching item in the index DynamoDB will fetch the entire item from the parent table. If the index is configured to project all item attributes, then all of the data can be obtained from the local secondary index, and no fetching is required.

    • ALL_PROJECTED_ATTRIBUTES - Allowed only when querying an index. Retrieves all attributes that have been projected into the index. If the index is configured to project all attributes, this return value is equivalent to specifying ALL_ATTRIBUTES.

    • COUNT - Returns the number of matching items, rather than the matching items themselves.

    • SPECIFIC_ATTRIBUTES - Returns only the attributes listed in AttributesToGet. This return value is equivalent to specifying AttributesToGet without specifying any value for Select.

      If you query a local secondary index and request only attributes that are projected into that index, the operation will read only the index and not the table. If any of the requested attributes are not projected into the local secondary index, DynamoDB will fetch each of these attributes from the parent table. This extra fetching incurs additional throughput cost and latency.

      If you query a global secondary index, you can only request attributes that are projected into the index. Global secondary index queries cannot fetch attributes from the parent table.

    If neither Select nor AttributesToGet are specified, DynamoDB defaults to ALL_ATTRIBUTES when accessing a table, and ALL_PROJECTED_ATTRIBUTES when accessing an index. You cannot use both Select and AttributesToGet together in a single request, unless the value for Select is SPECIFIC_ATTRIBUTES. (This usage is equivalent to specifying AttributesToGet without any value for Select.)

    If you use the ProjectionExpression parameter, then the value for Select can only be SPECIFIC_ATTRIBUTES. Any other value for Select will return an error.

    ", + "ScanInput$Select": "

    The attributes to be returned in the result. You can retrieve all item attributes, specific item attributes, or the count of matching items.

    • ALL_ATTRIBUTES - Returns all of the item attributes.

    • ALL_PROJECTED_ATTRIBUTES - Allowed only when querying an index. Retrieves all attributes that have been projected into the index. If the index is configured to project all attributes, this return value is equivalent to specifying ALL_ATTRIBUTES.

    • COUNT - Returns the number of matching items, rather than the matching items themselves.

    • SPECIFIC_ATTRIBUTES - Returns only the attributes listed in AttributesToGet. This return value is equivalent to specifying AttributesToGet without specifying any value for Select.

    If neither Select nor AttributesToGet are specified, DynamoDB defaults to ALL_ATTRIBUTES. You cannot use both AttributesToGet and Select together in a single request, unless the value for Select is SPECIFIC_ATTRIBUTES. (This usage is equivalent to specifying AttributesToGet without any value for Select.)

    " + } + }, + "StreamArn": { + "base": null, + "refs": { + "TableDescription$LatestStreamArn": "

    The Amazon Resource Name (ARN) that uniquely identifies the latest stream for this table.

    " + } + }, + "StreamEnabled": { + "base": null, + "refs": { + "StreamSpecification$StreamEnabled": "

    Indicates whether DynamoDB Streams is enabled (true) or disabled (false) on the table.

    " + } + }, + "StreamSpecification": { + "base": "

    Represents the DynamoDB Streams configuration for a table in DynamoDB.

    ", + "refs": { + "CreateTableInput$StreamSpecification": "

    The settings for DynamoDB Streams on the table. These settings consist of:

    • StreamEnabled - Indicates whether Streams is to be enabled (true) or disabled (false).

    • StreamViewType - When an item in the table is modified, StreamViewType determines what information is written to the table's stream. Valid values for StreamViewType are:

      • KEYS_ONLY - Only the key attributes of the modified item are written to the stream.

      • NEW_IMAGE - The entire item, as it appears after it was modified, is written to the stream.

      • OLD_IMAGE - The entire item, as it appeared before it was modified, is written to the stream.

      • NEW_AND_OLD_IMAGES - Both the new and the old item images of the item are written to the stream.

    ", + "TableDescription$StreamSpecification": "

    The current DynamoDB Streams configuration for the table.

    ", + "UpdateTableInput$StreamSpecification": "

    Represents the DynamoDB Streams configuration for the table.

    You will receive a ResourceInUseException if you attempt to enable a stream on a table that already has a stream, or if you attempt to disable a stream on a table which does not have a stream.

    " + } + }, + "StreamViewType": { + "base": null, + "refs": { + "StreamSpecification$StreamViewType": "

    The DynamoDB Streams settings for the table. These settings consist of:

    • StreamEnabled - Indicates whether DynamoDB Streams is enabled (true) or disabled (false) on the table.

    • StreamViewType - When an item in the table is modified, StreamViewType determines what information is written to the stream for this table. Valid values for StreamViewType are:

      • KEYS_ONLY - Only the key attributes of the modified item are written to the stream.

      • NEW_IMAGE - The entire item, as it appears after it was modified, is written to the stream.

      • OLD_IMAGE - The entire item, as it appeared before it was modified, is written to the stream.

      • NEW_AND_OLD_IMAGES - Both the new and the old item images of the item are written to the stream.

    " + } + }, + "String": { + "base": null, + "refs": { + "GlobalSecondaryIndexDescription$IndexArn": "

    The Amazon Resource Name (ARN) that uniquely identifies the index.

    ", + "LocalSecondaryIndexDescription$IndexArn": "

    The Amazon Resource Name (ARN) that uniquely identifies the index.

    ", + "TableDescription$TableArn": "

    The Amazon Resource Name (ARN) that uniquely identifies the table.

    ", + "TableDescription$LatestStreamLabel": "

    A timestamp, in ISO 8601 format, for this stream.

    Note that LatestStreamLabel is not a unique identifier for the stream, because it is possible that a stream from another table might have the same timestamp. However, the combination of the following three elements is guaranteed to be unique:

    • the AWS customer ID.

    • the table name.

    • the StreamLabel.

    " + } + }, + "StringAttributeValue": { + "base": null, + "refs": { + "AttributeValue$S": "

    A String data type.

    ", + "StringSetAttributeValue$member": null + } + }, + "StringSetAttributeValue": { + "base": null, + "refs": { + "AttributeValue$SS": "

    A String Set data type.

    " + } + }, + "TableDescription": { + "base": "

    Represents the properties of a table.

    ", + "refs": { + "CreateTableOutput$TableDescription": null, + "DeleteTableOutput$TableDescription": null, + "DescribeTableOutput$Table": null, + "UpdateTableOutput$TableDescription": null + } + }, + "TableName": { + "base": null, + "refs": { + "BatchGetRequestMap$key": null, + "BatchGetResponseMap$key": null, + "BatchWriteItemRequestMap$key": null, + "ConsumedCapacity$TableName": "

    The name of the table that was affected by the operation.

    ", + "CreateTableInput$TableName": "

    The name of the table to create.

    ", + "DeleteItemInput$TableName": "

    The name of the table from which to delete the item.

    ", + "DeleteTableInput$TableName": "

    The name of the table to delete.

    ", + "DescribeTableInput$TableName": "

    The name of the table to describe.

    ", + "GetItemInput$TableName": "

    The name of the table containing the requested item.

    ", + "ItemCollectionMetricsPerTable$key": null, + "ListTablesInput$ExclusiveStartTableName": "

    The first table name that this operation will evaluate. Use the value that was returned for LastEvaluatedTableName in a previous operation, so that you can obtain the next page of results.

    ", + "ListTablesOutput$LastEvaluatedTableName": "

    The name of the last table in the current page of results. Use this value as the ExclusiveStartTableName in a new request to obtain the next page of results, until all the table names are returned.

    If you do not receive a LastEvaluatedTableName value in the response, this means that there are no more table names to be retrieved.

    ", + "PutItemInput$TableName": "

    The name of the table to contain the item.

    ", + "QueryInput$TableName": "

    The name of the table containing the requested items.

    ", + "ScanInput$TableName": "

    The name of the table containing the requested items; or, if you provide IndexName, the name of the table to which that index belongs.

    ", + "TableDescription$TableName": "

    The name of the table.

    ", + "TableNameList$member": null, + "UpdateItemInput$TableName": "

    The name of the table containing the item to update.

    ", + "UpdateTableInput$TableName": "

    The name of the table to be updated.

    " + } + }, + "TableNameList": { + "base": null, + "refs": { + "ListTablesOutput$TableNames": "

    The names of the tables associated with the current account at the current endpoint. The maximum size of this array is 100.

    If LastEvaluatedTableName also appears in the output, you can use this value as the ExclusiveStartTableName parameter in a subsequent ListTables request and obtain the next page of results.

    " + } + }, + "TableStatus": { + "base": null, + "refs": { + "TableDescription$TableStatus": "

    The current state of the table:

    • CREATING - The table is being created.

    • UPDATING - The table is being updated.

    • DELETING - The table is being deleted.

    • ACTIVE - The table is ready for use.

    " + } + }, + "UpdateExpression": { + "base": null, + "refs": { + "UpdateItemInput$UpdateExpression": "

    An expression that defines one or more attributes to be updated, the action to be performed on them, and new value(s) for them.

    The following action values are available for UpdateExpression.

    • SET - Adds one or more attributes and values to an item. If any of these attribute already exist, they are replaced by the new values. You can also use SET to add or subtract from an attribute that is of type Number. For example: SET myNum = myNum + :val

      SET supports the following functions:

      • if_not_exists (path, operand) - if the item does not contain an attribute at the specified path, then if_not_exists evaluates to operand; otherwise, it evaluates to path. You can use this function to avoid overwriting an attribute that may already be present in the item.

      • list_append (operand, operand) - evaluates to a list with a new element added to it. You can append the new element to the start or the end of the list by reversing the order of the operands.

      These function names are case-sensitive.

    • REMOVE - Removes one or more attributes from an item.

    • ADD - Adds the specified value to the item, if the attribute does not already exist. If the attribute does exist, then the behavior of ADD depends on the data type of the attribute:

      • If the existing attribute is a number, and if Value is also a number, then Value is mathematically added to the existing attribute. If Value is a negative number, then it is subtracted from the existing attribute.

        If you use ADD to increment or decrement a number value for an item that doesn't exist before the update, DynamoDB uses 0 as the initial value.

        Similarly, if you use ADD for an existing item to increment or decrement an attribute value that doesn't exist before the update, DynamoDB uses 0 as the initial value. For example, suppose that the item you want to update doesn't have an attribute named itemcount, but you decide to ADD the number 3 to this attribute anyway. DynamoDB will create the itemcount attribute, set its initial value to 0, and finally add 3 to it. The result will be a new itemcount attribute in the item, with a value of 3.

      • If the existing data type is a set and if Value is also a set, then Value is added to the existing set. For example, if the attribute value is the set [1,2], and the ADD action specified [3], then the final attribute value is [1,2,3]. An error occurs if an ADD action is specified for a set attribute and the attribute type specified does not match the existing set type.

        Both sets must have the same primitive data type. For example, if the existing data type is a set of strings, the Value must also be a set of strings.

      The ADD action only supports Number and set data types. In addition, ADD can only be used on top-level attributes, not nested attributes.

    • DELETE - Deletes an element from a set.

      If a set of values is specified, then those values are subtracted from the old set. For example, if the attribute value was the set [a,b,c] and the DELETE action specifies [a,c], then the final attribute value is [b]. Specifying an empty set is an error.

      The DELETE action only supports set data types. In addition, DELETE can only be used on top-level attributes, not nested attributes.

    You can have many actions in a single expression, such as the following: SET a=:value1, b=:value2 DELETE :value3, :value4, :value5

    For more information on update expressions, see Modifying Items and Attributes in the Amazon DynamoDB Developer Guide.

    UpdateExpression replaces the legacy AttributeUpdates parameter.

    " + } + }, + "UpdateGlobalSecondaryIndexAction": { + "base": "

    Represents the new provisioned throughput settings to be applied to a global secondary index.

    ", + "refs": { + "GlobalSecondaryIndexUpdate$Update": "

    The name of an existing global secondary index, along with new provisioned throughput settings to be applied to that index.

    " + } + }, + "UpdateItemInput": { + "base": "

    Represents the input of an UpdateItem operation.

    ", + "refs": { + } + }, + "UpdateItemOutput": { + "base": "

    Represents the output of an UpdateItem operation.

    ", + "refs": { + } + }, + "UpdateTableInput": { + "base": "

    Represents the input of an UpdateTable operation.

    ", + "refs": { + } + }, + "UpdateTableOutput": { + "base": "

    Represents the output of an UpdateTable operation.

    ", + "refs": { + } + }, + "WriteRequest": { + "base": "

    Represents an operation to perform - either DeleteItem or PutItem. You can only request one of these operations, not both, in a single WriteRequest. If you do need to perform both of these operations, you will need to provide two separate WriteRequest objects.

    ", + "refs": { + "WriteRequests$member": null + } + }, + "WriteRequests": { + "base": null, + "refs": { + "BatchWriteItemRequestMap$value": null + } + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/dynamodb/2012-08-10/examples-1.json b/vendor/github.com/aws/aws-sdk-go/models/apis/dynamodb/2012-08-10/examples-1.json new file mode 100644 index 000000000..0ea7e3b0b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/dynamodb/2012-08-10/examples-1.json @@ -0,0 +1,5 @@ +{ + "version": "1.0", + "examples": { + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/dynamodb/2012-08-10/paginators-1.json b/vendor/github.com/aws/aws-sdk-go/models/apis/dynamodb/2012-08-10/paginators-1.json new file mode 100644 index 000000000..d4075e120 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/dynamodb/2012-08-10/paginators-1.json @@ -0,0 +1,26 @@ +{ + "pagination": { + "BatchGetItem": { + "input_token": "RequestItems", + "output_token": "UnprocessedKeys" + }, + "ListTables": { + "input_token": "ExclusiveStartTableName", + "output_token": "LastEvaluatedTableName", + "limit_key": "Limit", + "result_key": "TableNames" + }, + "Query": { + "input_token": "ExclusiveStartKey", + "output_token": "LastEvaluatedKey", + "limit_key": "Limit", + "result_key": "Items" + }, + "Scan": { + "input_token": "ExclusiveStartKey", + "output_token": "LastEvaluatedKey", + "limit_key": "Limit", + "result_key": "Items" + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/dynamodb/2012-08-10/waiters-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/dynamodb/2012-08-10/waiters-2.json new file mode 100644 index 000000000..43a55ca7b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/dynamodb/2012-08-10/waiters-2.json @@ -0,0 +1,35 @@ +{ + "version": 2, + "waiters": { + "TableExists": { + "delay": 20, + "operation": "DescribeTable", + "maxAttempts": 25, + "acceptors": [ + { + "expected": "ACTIVE", + "matcher": "path", + "state": "success", + "argument": "Table.TableStatus" + }, + { + "expected": "ResourceNotFoundException", + "matcher": "error", + "state": "retry" + } + ] + }, + "TableNotExists": { + "delay": 20, + "operation": "DescribeTable", + "maxAttempts": 25, + "acceptors": [ + { + "expected": "ResourceNotFoundException", + "matcher": "error", + "state": "success" + } + ] + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/ec2/2015-04-15/api-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/ec2/2015-04-15/api-2.json new file mode 100644 index 000000000..3281d2ea0 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/ec2/2015-04-15/api-2.json @@ -0,0 +1,12049 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2015-04-15", + "endpointPrefix":"ec2", + "serviceAbbreviation":"Amazon EC2", + "serviceFullName":"Amazon Elastic Compute Cloud", + "signatureVersion":"v4", + "xmlNamespace":"http://ec2.amazonaws.com/doc/2015-04-15", + "protocol":"ec2" + }, + "operations":{ + "AcceptVpcPeeringConnection":{ + "name":"AcceptVpcPeeringConnection", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AcceptVpcPeeringConnectionRequest"}, + "output":{"shape":"AcceptVpcPeeringConnectionResult"} + }, + "AllocateAddress":{ + "name":"AllocateAddress", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AllocateAddressRequest"}, + "output":{"shape":"AllocateAddressResult"} + }, + "AssignPrivateIpAddresses":{ + "name":"AssignPrivateIpAddresses", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AssignPrivateIpAddressesRequest"} + }, + "AssociateAddress":{ + "name":"AssociateAddress", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AssociateAddressRequest"}, + "output":{"shape":"AssociateAddressResult"} + }, + "AssociateDhcpOptions":{ + "name":"AssociateDhcpOptions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AssociateDhcpOptionsRequest"} + }, + "AssociateRouteTable":{ + "name":"AssociateRouteTable", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AssociateRouteTableRequest"}, + "output":{"shape":"AssociateRouteTableResult"} + }, + "AttachClassicLinkVpc":{ + "name":"AttachClassicLinkVpc", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AttachClassicLinkVpcRequest"}, + "output":{"shape":"AttachClassicLinkVpcResult"} + }, + "AttachInternetGateway":{ + "name":"AttachInternetGateway", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AttachInternetGatewayRequest"} + }, + "AttachNetworkInterface":{ + "name":"AttachNetworkInterface", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AttachNetworkInterfaceRequest"}, + "output":{"shape":"AttachNetworkInterfaceResult"} + }, + "AttachVolume":{ + "name":"AttachVolume", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AttachVolumeRequest"}, + "output":{ + "shape":"VolumeAttachment", + "locationName":"attachment" + } + }, + "AttachVpnGateway":{ + "name":"AttachVpnGateway", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AttachVpnGatewayRequest"}, + "output":{"shape":"AttachVpnGatewayResult"} + }, + "AuthorizeSecurityGroupEgress":{ + "name":"AuthorizeSecurityGroupEgress", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AuthorizeSecurityGroupEgressRequest"} + }, + "AuthorizeSecurityGroupIngress":{ + "name":"AuthorizeSecurityGroupIngress", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AuthorizeSecurityGroupIngressRequest"} + }, + "BundleInstance":{ + "name":"BundleInstance", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"BundleInstanceRequest"}, + "output":{"shape":"BundleInstanceResult"} + }, + "CancelBundleTask":{ + "name":"CancelBundleTask", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CancelBundleTaskRequest"}, + "output":{"shape":"CancelBundleTaskResult"} + }, + "CancelConversionTask":{ + "name":"CancelConversionTask", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CancelConversionRequest"} + }, + "CancelExportTask":{ + "name":"CancelExportTask", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CancelExportTaskRequest"} + }, + "CancelImportTask":{ + "name":"CancelImportTask", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CancelImportTaskRequest"}, + "output":{"shape":"CancelImportTaskResult"} + }, + "CancelReservedInstancesListing":{ + "name":"CancelReservedInstancesListing", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CancelReservedInstancesListingRequest"}, + "output":{"shape":"CancelReservedInstancesListingResult"} + }, + "CancelSpotFleetRequests":{ + "name":"CancelSpotFleetRequests", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CancelSpotFleetRequestsRequest"}, + "output":{"shape":"CancelSpotFleetRequestsResponse"} + }, + "CancelSpotInstanceRequests":{ + "name":"CancelSpotInstanceRequests", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CancelSpotInstanceRequestsRequest"}, + "output":{"shape":"CancelSpotInstanceRequestsResult"} + }, + "ConfirmProductInstance":{ + "name":"ConfirmProductInstance", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ConfirmProductInstanceRequest"}, + "output":{"shape":"ConfirmProductInstanceResult"} + }, + "CopyImage":{ + "name":"CopyImage", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CopyImageRequest"}, + "output":{"shape":"CopyImageResult"} + }, + "CopySnapshot":{ + "name":"CopySnapshot", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CopySnapshotRequest"}, + "output":{"shape":"CopySnapshotResult"} + }, + "CreateCustomerGateway":{ + "name":"CreateCustomerGateway", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateCustomerGatewayRequest"}, + "output":{"shape":"CreateCustomerGatewayResult"} + }, + "CreateDhcpOptions":{ + "name":"CreateDhcpOptions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateDhcpOptionsRequest"}, + "output":{"shape":"CreateDhcpOptionsResult"} + }, + "CreateFlowLogs":{ + "name":"CreateFlowLogs", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateFlowLogsRequest"}, + "output":{"shape":"CreateFlowLogsResult"} + }, + "CreateImage":{ + "name":"CreateImage", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateImageRequest"}, + "output":{"shape":"CreateImageResult"} + }, + "CreateInstanceExportTask":{ + "name":"CreateInstanceExportTask", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateInstanceExportTaskRequest"}, + "output":{"shape":"CreateInstanceExportTaskResult"} + }, + "CreateInternetGateway":{ + "name":"CreateInternetGateway", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateInternetGatewayRequest"}, + "output":{"shape":"CreateInternetGatewayResult"} + }, + "CreateKeyPair":{ + "name":"CreateKeyPair", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateKeyPairRequest"}, + "output":{ + "shape":"KeyPair", + "locationName":"keyPair" + } + }, + "CreateNetworkAcl":{ + "name":"CreateNetworkAcl", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateNetworkAclRequest"}, + "output":{"shape":"CreateNetworkAclResult"} + }, + "CreateNetworkAclEntry":{ + "name":"CreateNetworkAclEntry", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateNetworkAclEntryRequest"} + }, + "CreateNetworkInterface":{ + "name":"CreateNetworkInterface", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateNetworkInterfaceRequest"}, + "output":{"shape":"CreateNetworkInterfaceResult"} + }, + "CreatePlacementGroup":{ + "name":"CreatePlacementGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreatePlacementGroupRequest"} + }, + "CreateReservedInstancesListing":{ + "name":"CreateReservedInstancesListing", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateReservedInstancesListingRequest"}, + "output":{"shape":"CreateReservedInstancesListingResult"} + }, + "CreateRoute":{ + "name":"CreateRoute", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateRouteRequest"}, + "output":{"shape":"CreateRouteResult"} + }, + "CreateRouteTable":{ + "name":"CreateRouteTable", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateRouteTableRequest"}, + "output":{"shape":"CreateRouteTableResult"} + }, + "CreateSecurityGroup":{ + "name":"CreateSecurityGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateSecurityGroupRequest"}, + "output":{"shape":"CreateSecurityGroupResult"} + }, + "CreateSnapshot":{ + "name":"CreateSnapshot", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateSnapshotRequest"}, + "output":{ + "shape":"Snapshot", + "locationName":"snapshot" + } + }, + "CreateSpotDatafeedSubscription":{ + "name":"CreateSpotDatafeedSubscription", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateSpotDatafeedSubscriptionRequest"}, + "output":{"shape":"CreateSpotDatafeedSubscriptionResult"} + }, + "CreateSubnet":{ + "name":"CreateSubnet", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateSubnetRequest"}, + "output":{"shape":"CreateSubnetResult"} + }, + "CreateTags":{ + "name":"CreateTags", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateTagsRequest"} + }, + "CreateVolume":{ + "name":"CreateVolume", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateVolumeRequest"}, + "output":{ + "shape":"Volume", + "locationName":"volume" + } + }, + "CreateVpc":{ + "name":"CreateVpc", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateVpcRequest"}, + "output":{"shape":"CreateVpcResult"} + }, + "CreateVpcEndpoint":{ + "name":"CreateVpcEndpoint", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateVpcEndpointRequest"}, + "output":{"shape":"CreateVpcEndpointResult"} + }, + "CreateVpcPeeringConnection":{ + "name":"CreateVpcPeeringConnection", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateVpcPeeringConnectionRequest"}, + "output":{"shape":"CreateVpcPeeringConnectionResult"} + }, + "CreateVpnConnection":{ + "name":"CreateVpnConnection", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateVpnConnectionRequest"}, + "output":{"shape":"CreateVpnConnectionResult"} + }, + "CreateVpnConnectionRoute":{ + "name":"CreateVpnConnectionRoute", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateVpnConnectionRouteRequest"} + }, + "CreateVpnGateway":{ + "name":"CreateVpnGateway", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateVpnGatewayRequest"}, + "output":{"shape":"CreateVpnGatewayResult"} + }, + "DeleteCustomerGateway":{ + "name":"DeleteCustomerGateway", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteCustomerGatewayRequest"} + }, + "DeleteDhcpOptions":{ + "name":"DeleteDhcpOptions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteDhcpOptionsRequest"} + }, + "DeleteFlowLogs":{ + "name":"DeleteFlowLogs", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteFlowLogsRequest"}, + "output":{"shape":"DeleteFlowLogsResult"} + }, + "DeleteInternetGateway":{ + "name":"DeleteInternetGateway", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteInternetGatewayRequest"} + }, + "DeleteKeyPair":{ + "name":"DeleteKeyPair", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteKeyPairRequest"} + }, + "DeleteNetworkAcl":{ + "name":"DeleteNetworkAcl", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteNetworkAclRequest"} + }, + "DeleteNetworkAclEntry":{ + "name":"DeleteNetworkAclEntry", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteNetworkAclEntryRequest"} + }, + "DeleteNetworkInterface":{ + "name":"DeleteNetworkInterface", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteNetworkInterfaceRequest"} + }, + "DeletePlacementGroup":{ + "name":"DeletePlacementGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeletePlacementGroupRequest"} + }, + "DeleteRoute":{ + "name":"DeleteRoute", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteRouteRequest"} + }, + "DeleteRouteTable":{ + "name":"DeleteRouteTable", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteRouteTableRequest"} + }, + "DeleteSecurityGroup":{ + "name":"DeleteSecurityGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteSecurityGroupRequest"} + }, + "DeleteSnapshot":{ + "name":"DeleteSnapshot", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteSnapshotRequest"} + }, + "DeleteSpotDatafeedSubscription":{ + "name":"DeleteSpotDatafeedSubscription", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteSpotDatafeedSubscriptionRequest"} + }, + "DeleteSubnet":{ + "name":"DeleteSubnet", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteSubnetRequest"} + }, + "DeleteTags":{ + "name":"DeleteTags", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteTagsRequest"} + }, + "DeleteVolume":{ + "name":"DeleteVolume", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteVolumeRequest"} + }, + "DeleteVpc":{ + "name":"DeleteVpc", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteVpcRequest"} + }, + "DeleteVpcEndpoints":{ + "name":"DeleteVpcEndpoints", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteVpcEndpointsRequest"}, + "output":{"shape":"DeleteVpcEndpointsResult"} + }, + "DeleteVpcPeeringConnection":{ + "name":"DeleteVpcPeeringConnection", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteVpcPeeringConnectionRequest"}, + "output":{"shape":"DeleteVpcPeeringConnectionResult"} + }, + "DeleteVpnConnection":{ + "name":"DeleteVpnConnection", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteVpnConnectionRequest"} + }, + "DeleteVpnConnectionRoute":{ + "name":"DeleteVpnConnectionRoute", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteVpnConnectionRouteRequest"} + }, + "DeleteVpnGateway":{ + "name":"DeleteVpnGateway", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteVpnGatewayRequest"} + }, + "DeregisterImage":{ + "name":"DeregisterImage", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeregisterImageRequest"} + }, + "DescribeAccountAttributes":{ + "name":"DescribeAccountAttributes", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeAccountAttributesRequest"}, + "output":{"shape":"DescribeAccountAttributesResult"} + }, + "DescribeAddresses":{ + "name":"DescribeAddresses", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeAddressesRequest"}, + "output":{"shape":"DescribeAddressesResult"} + }, + "DescribeAvailabilityZones":{ + "name":"DescribeAvailabilityZones", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeAvailabilityZonesRequest"}, + "output":{"shape":"DescribeAvailabilityZonesResult"} + }, + "DescribeBundleTasks":{ + "name":"DescribeBundleTasks", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeBundleTasksRequest"}, + "output":{"shape":"DescribeBundleTasksResult"} + }, + "DescribeClassicLinkInstances":{ + "name":"DescribeClassicLinkInstances", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeClassicLinkInstancesRequest"}, + "output":{"shape":"DescribeClassicLinkInstancesResult"} + }, + "DescribeConversionTasks":{ + "name":"DescribeConversionTasks", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeConversionTasksRequest"}, + "output":{"shape":"DescribeConversionTasksResult"} + }, + "DescribeCustomerGateways":{ + "name":"DescribeCustomerGateways", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeCustomerGatewaysRequest"}, + "output":{"shape":"DescribeCustomerGatewaysResult"} + }, + "DescribeDhcpOptions":{ + "name":"DescribeDhcpOptions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeDhcpOptionsRequest"}, + "output":{"shape":"DescribeDhcpOptionsResult"} + }, + "DescribeExportTasks":{ + "name":"DescribeExportTasks", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeExportTasksRequest"}, + "output":{"shape":"DescribeExportTasksResult"} + }, + "DescribeFlowLogs":{ + "name":"DescribeFlowLogs", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeFlowLogsRequest"}, + "output":{"shape":"DescribeFlowLogsResult"} + }, + "DescribeImageAttribute":{ + "name":"DescribeImageAttribute", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeImageAttributeRequest"}, + "output":{ + "shape":"ImageAttribute", + "locationName":"imageAttribute" + } + }, + "DescribeImages":{ + "name":"DescribeImages", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeImagesRequest"}, + "output":{"shape":"DescribeImagesResult"} + }, + "DescribeImportImageTasks":{ + "name":"DescribeImportImageTasks", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeImportImageTasksRequest"}, + "output":{"shape":"DescribeImportImageTasksResult"} + }, + "DescribeImportSnapshotTasks":{ + "name":"DescribeImportSnapshotTasks", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeImportSnapshotTasksRequest"}, + "output":{"shape":"DescribeImportSnapshotTasksResult"} + }, + "DescribeInstanceAttribute":{ + "name":"DescribeInstanceAttribute", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeInstanceAttributeRequest"}, + "output":{"shape":"InstanceAttribute"} + }, + "DescribeInstanceStatus":{ + "name":"DescribeInstanceStatus", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeInstanceStatusRequest"}, + "output":{"shape":"DescribeInstanceStatusResult"} + }, + "DescribeInstances":{ + "name":"DescribeInstances", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeInstancesRequest"}, + "output":{"shape":"DescribeInstancesResult"} + }, + "DescribeInternetGateways":{ + "name":"DescribeInternetGateways", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeInternetGatewaysRequest"}, + "output":{"shape":"DescribeInternetGatewaysResult"} + }, + "DescribeKeyPairs":{ + "name":"DescribeKeyPairs", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeKeyPairsRequest"}, + "output":{"shape":"DescribeKeyPairsResult"} + }, + "DescribeMovingAddresses":{ + "name":"DescribeMovingAddresses", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeMovingAddressesRequest"}, + "output":{"shape":"DescribeMovingAddressesResult"} + }, + "DescribeNetworkAcls":{ + "name":"DescribeNetworkAcls", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeNetworkAclsRequest"}, + "output":{"shape":"DescribeNetworkAclsResult"} + }, + "DescribeNetworkInterfaceAttribute":{ + "name":"DescribeNetworkInterfaceAttribute", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeNetworkInterfaceAttributeRequest"}, + "output":{"shape":"DescribeNetworkInterfaceAttributeResult"} + }, + "DescribeNetworkInterfaces":{ + "name":"DescribeNetworkInterfaces", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeNetworkInterfacesRequest"}, + "output":{"shape":"DescribeNetworkInterfacesResult"} + }, + "DescribePlacementGroups":{ + "name":"DescribePlacementGroups", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribePlacementGroupsRequest"}, + "output":{"shape":"DescribePlacementGroupsResult"} + }, + "DescribePrefixLists":{ + "name":"DescribePrefixLists", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribePrefixListsRequest"}, + "output":{"shape":"DescribePrefixListsResult"} + }, + "DescribeRegions":{ + "name":"DescribeRegions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeRegionsRequest"}, + "output":{"shape":"DescribeRegionsResult"} + }, + "DescribeReservedInstances":{ + "name":"DescribeReservedInstances", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeReservedInstancesRequest"}, + "output":{"shape":"DescribeReservedInstancesResult"} + }, + "DescribeReservedInstancesListings":{ + "name":"DescribeReservedInstancesListings", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeReservedInstancesListingsRequest"}, + "output":{"shape":"DescribeReservedInstancesListingsResult"} + }, + "DescribeReservedInstancesModifications":{ + "name":"DescribeReservedInstancesModifications", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeReservedInstancesModificationsRequest"}, + "output":{"shape":"DescribeReservedInstancesModificationsResult"} + }, + "DescribeReservedInstancesOfferings":{ + "name":"DescribeReservedInstancesOfferings", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeReservedInstancesOfferingsRequest"}, + "output":{"shape":"DescribeReservedInstancesOfferingsResult"} + }, + "DescribeRouteTables":{ + "name":"DescribeRouteTables", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeRouteTablesRequest"}, + "output":{"shape":"DescribeRouteTablesResult"} + }, + "DescribeSecurityGroups":{ + "name":"DescribeSecurityGroups", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeSecurityGroupsRequest"}, + "output":{"shape":"DescribeSecurityGroupsResult"} + }, + "DescribeSnapshotAttribute":{ + "name":"DescribeSnapshotAttribute", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeSnapshotAttributeRequest"}, + "output":{"shape":"DescribeSnapshotAttributeResult"} + }, + "DescribeSnapshots":{ + "name":"DescribeSnapshots", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeSnapshotsRequest"}, + "output":{"shape":"DescribeSnapshotsResult"} + }, + "DescribeSpotDatafeedSubscription":{ + "name":"DescribeSpotDatafeedSubscription", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeSpotDatafeedSubscriptionRequest"}, + "output":{"shape":"DescribeSpotDatafeedSubscriptionResult"} + }, + "DescribeSpotFleetInstances":{ + "name":"DescribeSpotFleetInstances", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeSpotFleetInstancesRequest"}, + "output":{"shape":"DescribeSpotFleetInstancesResponse"} + }, + "DescribeSpotFleetRequestHistory":{ + "name":"DescribeSpotFleetRequestHistory", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeSpotFleetRequestHistoryRequest"}, + "output":{"shape":"DescribeSpotFleetRequestHistoryResponse"} + }, + "DescribeSpotFleetRequests":{ + "name":"DescribeSpotFleetRequests", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeSpotFleetRequestsRequest"}, + "output":{"shape":"DescribeSpotFleetRequestsResponse"} + }, + "DescribeSpotInstanceRequests":{ + "name":"DescribeSpotInstanceRequests", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeSpotInstanceRequestsRequest"}, + "output":{"shape":"DescribeSpotInstanceRequestsResult"} + }, + "DescribeSpotPriceHistory":{ + "name":"DescribeSpotPriceHistory", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeSpotPriceHistoryRequest"}, + "output":{"shape":"DescribeSpotPriceHistoryResult"} + }, + "DescribeSubnets":{ + "name":"DescribeSubnets", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeSubnetsRequest"}, + "output":{"shape":"DescribeSubnetsResult"} + }, + "DescribeTags":{ + "name":"DescribeTags", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeTagsRequest"}, + "output":{"shape":"DescribeTagsResult"} + }, + "DescribeVolumeAttribute":{ + "name":"DescribeVolumeAttribute", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeVolumeAttributeRequest"}, + "output":{"shape":"DescribeVolumeAttributeResult"} + }, + "DescribeVolumeStatus":{ + "name":"DescribeVolumeStatus", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeVolumeStatusRequest"}, + "output":{"shape":"DescribeVolumeStatusResult"} + }, + "DescribeVolumes":{ + "name":"DescribeVolumes", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeVolumesRequest"}, + "output":{"shape":"DescribeVolumesResult"} + }, + "DescribeVpcAttribute":{ + "name":"DescribeVpcAttribute", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeVpcAttributeRequest"}, + "output":{"shape":"DescribeVpcAttributeResult"} + }, + "DescribeVpcClassicLink":{ + "name":"DescribeVpcClassicLink", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeVpcClassicLinkRequest"}, + "output":{"shape":"DescribeVpcClassicLinkResult"} + }, + "DescribeVpcEndpointServices":{ + "name":"DescribeVpcEndpointServices", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeVpcEndpointServicesRequest"}, + "output":{"shape":"DescribeVpcEndpointServicesResult"} + }, + "DescribeVpcEndpoints":{ + "name":"DescribeVpcEndpoints", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeVpcEndpointsRequest"}, + "output":{"shape":"DescribeVpcEndpointsResult"} + }, + "DescribeVpcPeeringConnections":{ + "name":"DescribeVpcPeeringConnections", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeVpcPeeringConnectionsRequest"}, + "output":{"shape":"DescribeVpcPeeringConnectionsResult"} + }, + "DescribeVpcs":{ + "name":"DescribeVpcs", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeVpcsRequest"}, + "output":{"shape":"DescribeVpcsResult"} + }, + "DescribeVpnConnections":{ + "name":"DescribeVpnConnections", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeVpnConnectionsRequest"}, + "output":{"shape":"DescribeVpnConnectionsResult"} + }, + "DescribeVpnGateways":{ + "name":"DescribeVpnGateways", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeVpnGatewaysRequest"}, + "output":{"shape":"DescribeVpnGatewaysResult"} + }, + "DetachClassicLinkVpc":{ + "name":"DetachClassicLinkVpc", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DetachClassicLinkVpcRequest"}, + "output":{"shape":"DetachClassicLinkVpcResult"} + }, + "DetachInternetGateway":{ + "name":"DetachInternetGateway", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DetachInternetGatewayRequest"} + }, + "DetachNetworkInterface":{ + "name":"DetachNetworkInterface", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DetachNetworkInterfaceRequest"} + }, + "DetachVolume":{ + "name":"DetachVolume", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DetachVolumeRequest"}, + "output":{ + "shape":"VolumeAttachment", + "locationName":"attachment" + } + }, + "DetachVpnGateway":{ + "name":"DetachVpnGateway", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DetachVpnGatewayRequest"} + }, + "DisableVgwRoutePropagation":{ + "name":"DisableVgwRoutePropagation", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DisableVgwRoutePropagationRequest"} + }, + "DisableVpcClassicLink":{ + "name":"DisableVpcClassicLink", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DisableVpcClassicLinkRequest"}, + "output":{"shape":"DisableVpcClassicLinkResult"} + }, + "DisassociateAddress":{ + "name":"DisassociateAddress", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DisassociateAddressRequest"} + }, + "DisassociateRouteTable":{ + "name":"DisassociateRouteTable", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DisassociateRouteTableRequest"} + }, + "EnableVgwRoutePropagation":{ + "name":"EnableVgwRoutePropagation", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"EnableVgwRoutePropagationRequest"} + }, + "EnableVolumeIO":{ + "name":"EnableVolumeIO", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"EnableVolumeIORequest"} + }, + "EnableVpcClassicLink":{ + "name":"EnableVpcClassicLink", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"EnableVpcClassicLinkRequest"}, + "output":{"shape":"EnableVpcClassicLinkResult"} + }, + "GetConsoleOutput":{ + "name":"GetConsoleOutput", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetConsoleOutputRequest"}, + "output":{"shape":"GetConsoleOutputResult"} + }, + "GetPasswordData":{ + "name":"GetPasswordData", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetPasswordDataRequest"}, + "output":{"shape":"GetPasswordDataResult"} + }, + "ImportImage":{ + "name":"ImportImage", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ImportImageRequest"}, + "output":{"shape":"ImportImageResult"} + }, + "ImportInstance":{ + "name":"ImportInstance", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ImportInstanceRequest"}, + "output":{"shape":"ImportInstanceResult"} + }, + "ImportKeyPair":{ + "name":"ImportKeyPair", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ImportKeyPairRequest"}, + "output":{"shape":"ImportKeyPairResult"} + }, + "ImportSnapshot":{ + "name":"ImportSnapshot", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ImportSnapshotRequest"}, + "output":{"shape":"ImportSnapshotResult"} + }, + "ImportVolume":{ + "name":"ImportVolume", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ImportVolumeRequest"}, + "output":{"shape":"ImportVolumeResult"} + }, + "ModifyImageAttribute":{ + "name":"ModifyImageAttribute", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyImageAttributeRequest"} + }, + "ModifyInstanceAttribute":{ + "name":"ModifyInstanceAttribute", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyInstanceAttributeRequest"} + }, + "ModifyNetworkInterfaceAttribute":{ + "name":"ModifyNetworkInterfaceAttribute", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyNetworkInterfaceAttributeRequest"} + }, + "ModifyReservedInstances":{ + "name":"ModifyReservedInstances", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyReservedInstancesRequest"}, + "output":{"shape":"ModifyReservedInstancesResult"} + }, + "ModifySnapshotAttribute":{ + "name":"ModifySnapshotAttribute", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifySnapshotAttributeRequest"} + }, + "ModifySubnetAttribute":{ + "name":"ModifySubnetAttribute", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifySubnetAttributeRequest"} + }, + "ModifyVolumeAttribute":{ + "name":"ModifyVolumeAttribute", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyVolumeAttributeRequest"} + }, + "ModifyVpcAttribute":{ + "name":"ModifyVpcAttribute", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyVpcAttributeRequest"} + }, + "ModifyVpcEndpoint":{ + "name":"ModifyVpcEndpoint", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyVpcEndpointRequest"}, + "output":{"shape":"ModifyVpcEndpointResult"} + }, + "MonitorInstances":{ + "name":"MonitorInstances", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"MonitorInstancesRequest"}, + "output":{"shape":"MonitorInstancesResult"} + }, + "MoveAddressToVpc":{ + "name":"MoveAddressToVpc", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"MoveAddressToVpcRequest"}, + "output":{"shape":"MoveAddressToVpcResult"} + }, + "PurchaseReservedInstancesOffering":{ + "name":"PurchaseReservedInstancesOffering", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PurchaseReservedInstancesOfferingRequest"}, + "output":{"shape":"PurchaseReservedInstancesOfferingResult"} + }, + "RebootInstances":{ + "name":"RebootInstances", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RebootInstancesRequest"} + }, + "RegisterImage":{ + "name":"RegisterImage", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RegisterImageRequest"}, + "output":{"shape":"RegisterImageResult"} + }, + "RejectVpcPeeringConnection":{ + "name":"RejectVpcPeeringConnection", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RejectVpcPeeringConnectionRequest"}, + "output":{"shape":"RejectVpcPeeringConnectionResult"} + }, + "ReleaseAddress":{ + "name":"ReleaseAddress", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ReleaseAddressRequest"} + }, + "ReplaceNetworkAclAssociation":{ + "name":"ReplaceNetworkAclAssociation", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ReplaceNetworkAclAssociationRequest"}, + "output":{"shape":"ReplaceNetworkAclAssociationResult"} + }, + "ReplaceNetworkAclEntry":{ + "name":"ReplaceNetworkAclEntry", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ReplaceNetworkAclEntryRequest"} + }, + "ReplaceRoute":{ + "name":"ReplaceRoute", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ReplaceRouteRequest"} + }, + "ReplaceRouteTableAssociation":{ + "name":"ReplaceRouteTableAssociation", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ReplaceRouteTableAssociationRequest"}, + "output":{"shape":"ReplaceRouteTableAssociationResult"} + }, + "ReportInstanceStatus":{ + "name":"ReportInstanceStatus", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ReportInstanceStatusRequest"} + }, + "RequestSpotFleet":{ + "name":"RequestSpotFleet", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RequestSpotFleetRequest"}, + "output":{"shape":"RequestSpotFleetResponse"} + }, + "RequestSpotInstances":{ + "name":"RequestSpotInstances", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RequestSpotInstancesRequest"}, + "output":{"shape":"RequestSpotInstancesResult"} + }, + "ResetImageAttribute":{ + "name":"ResetImageAttribute", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ResetImageAttributeRequest"} + }, + "ResetInstanceAttribute":{ + "name":"ResetInstanceAttribute", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ResetInstanceAttributeRequest"} + }, + "ResetNetworkInterfaceAttribute":{ + "name":"ResetNetworkInterfaceAttribute", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ResetNetworkInterfaceAttributeRequest"} + }, + "ResetSnapshotAttribute":{ + "name":"ResetSnapshotAttribute", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ResetSnapshotAttributeRequest"} + }, + "RestoreAddressToClassic":{ + "name":"RestoreAddressToClassic", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RestoreAddressToClassicRequest"}, + "output":{"shape":"RestoreAddressToClassicResult"} + }, + "RevokeSecurityGroupEgress":{ + "name":"RevokeSecurityGroupEgress", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RevokeSecurityGroupEgressRequest"} + }, + "RevokeSecurityGroupIngress":{ + "name":"RevokeSecurityGroupIngress", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RevokeSecurityGroupIngressRequest"} + }, + "RunInstances":{ + "name":"RunInstances", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RunInstancesRequest"}, + "output":{ + "shape":"Reservation", + "locationName":"reservation" + } + }, + "StartInstances":{ + "name":"StartInstances", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StartInstancesRequest"}, + "output":{"shape":"StartInstancesResult"} + }, + "StopInstances":{ + "name":"StopInstances", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StopInstancesRequest"}, + "output":{"shape":"StopInstancesResult"} + }, + "TerminateInstances":{ + "name":"TerminateInstances", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"TerminateInstancesRequest"}, + "output":{"shape":"TerminateInstancesResult"} + }, + "UnassignPrivateIpAddresses":{ + "name":"UnassignPrivateIpAddresses", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UnassignPrivateIpAddressesRequest"} + }, + "UnmonitorInstances":{ + "name":"UnmonitorInstances", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UnmonitorInstancesRequest"}, + "output":{"shape":"UnmonitorInstancesResult"} + } + }, + "shapes":{ + "AcceptVpcPeeringConnectionRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "VpcPeeringConnectionId":{ + "shape":"String", + "locationName":"vpcPeeringConnectionId" + } + } + }, + "AcceptVpcPeeringConnectionResult":{ + "type":"structure", + "members":{ + "VpcPeeringConnection":{ + "shape":"VpcPeeringConnection", + "locationName":"vpcPeeringConnection" + } + } + }, + "AccountAttribute":{ + "type":"structure", + "members":{ + "AttributeName":{ + "shape":"String", + "locationName":"attributeName" + }, + "AttributeValues":{ + "shape":"AccountAttributeValueList", + "locationName":"attributeValueSet" + } + } + }, + "AccountAttributeList":{ + "type":"list", + "member":{ + "shape":"AccountAttribute", + "locationName":"item" + } + }, + "AccountAttributeName":{ + "type":"string", + "enum":[ + "supported-platforms", + "default-vpc" + ] + }, + "AccountAttributeNameStringList":{ + "type":"list", + "member":{ + "shape":"AccountAttributeName", + "locationName":"attributeName" + } + }, + "AccountAttributeValue":{ + "type":"structure", + "members":{ + "AttributeValue":{ + "shape":"String", + "locationName":"attributeValue" + } + } + }, + "AccountAttributeValueList":{ + "type":"list", + "member":{ + "shape":"AccountAttributeValue", + "locationName":"item" + } + }, + "ActiveInstance":{ + "type":"structure", + "members":{ + "InstanceType":{ + "shape":"String", + "locationName":"instanceType" + }, + "InstanceId":{ + "shape":"String", + "locationName":"instanceId" + }, + "SpotInstanceRequestId":{ + "shape":"String", + "locationName":"spotInstanceRequestId" + } + } + }, + "ActiveInstanceSet":{ + "type":"list", + "member":{ + "shape":"ActiveInstance", + "locationName":"item" + } + }, + "Address":{ + "type":"structure", + "members":{ + "InstanceId":{ + "shape":"String", + "locationName":"instanceId" + }, + "PublicIp":{ + "shape":"String", + "locationName":"publicIp" + }, + "AllocationId":{ + "shape":"String", + "locationName":"allocationId" + }, + "AssociationId":{ + "shape":"String", + "locationName":"associationId" + }, + "Domain":{ + "shape":"DomainType", + "locationName":"domain" + }, + "NetworkInterfaceId":{ + "shape":"String", + "locationName":"networkInterfaceId" + }, + "NetworkInterfaceOwnerId":{ + "shape":"String", + "locationName":"networkInterfaceOwnerId" + }, + "PrivateIpAddress":{ + "shape":"String", + "locationName":"privateIpAddress" + } + } + }, + "AddressList":{ + "type":"list", + "member":{ + "shape":"Address", + "locationName":"item" + } + }, + "AllocateAddressRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "Domain":{"shape":"DomainType"} + } + }, + "AllocateAddressResult":{ + "type":"structure", + "members":{ + "PublicIp":{ + "shape":"String", + "locationName":"publicIp" + }, + "Domain":{ + "shape":"DomainType", + "locationName":"domain" + }, + "AllocationId":{ + "shape":"String", + "locationName":"allocationId" + } + } + }, + "AllocationIdList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"AllocationId" + } + }, + "AllocationStrategy":{ + "type":"string", + "enum":[ + "lowestPrice", + "diversified" + ] + }, + "ArchitectureValues":{ + "type":"string", + "enum":[ + "i386", + "x86_64" + ] + }, + "AssignPrivateIpAddressesRequest":{ + "type":"structure", + "required":["NetworkInterfaceId"], + "members":{ + "NetworkInterfaceId":{ + "shape":"String", + "locationName":"networkInterfaceId" + }, + "PrivateIpAddresses":{ + "shape":"PrivateIpAddressStringList", + "locationName":"privateIpAddress" + }, + "SecondaryPrivateIpAddressCount":{ + "shape":"Integer", + "locationName":"secondaryPrivateIpAddressCount" + }, + "AllowReassignment":{ + "shape":"Boolean", + "locationName":"allowReassignment" + } + } + }, + "AssociateAddressRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "InstanceId":{"shape":"String"}, + "PublicIp":{"shape":"String"}, + "AllocationId":{"shape":"String"}, + "NetworkInterfaceId":{ + "shape":"String", + "locationName":"networkInterfaceId" + }, + "PrivateIpAddress":{ + "shape":"String", + "locationName":"privateIpAddress" + }, + "AllowReassociation":{ + "shape":"Boolean", + "locationName":"allowReassociation" + } + } + }, + "AssociateAddressResult":{ + "type":"structure", + "members":{ + "AssociationId":{ + "shape":"String", + "locationName":"associationId" + } + } + }, + "AssociateDhcpOptionsRequest":{ + "type":"structure", + "required":[ + "DhcpOptionsId", + "VpcId" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "DhcpOptionsId":{"shape":"String"}, + "VpcId":{"shape":"String"} + } + }, + "AssociateRouteTableRequest":{ + "type":"structure", + "required":[ + "SubnetId", + "RouteTableId" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "SubnetId":{ + "shape":"String", + "locationName":"subnetId" + }, + "RouteTableId":{ + "shape":"String", + "locationName":"routeTableId" + } + } + }, + "AssociateRouteTableResult":{ + "type":"structure", + "members":{ + "AssociationId":{ + "shape":"String", + "locationName":"associationId" + } + } + }, + "AttachClassicLinkVpcRequest":{ + "type":"structure", + "required":[ + "InstanceId", + "VpcId", + "Groups" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "InstanceId":{ + "shape":"String", + "locationName":"instanceId" + }, + "VpcId":{ + "shape":"String", + "locationName":"vpcId" + }, + "Groups":{ + "shape":"GroupIdStringList", + "locationName":"SecurityGroupId" + } + } + }, + "AttachClassicLinkVpcResult":{ + "type":"structure", + "members":{ + "Return":{ + "shape":"Boolean", + "locationName":"return" + } + } + }, + "AttachInternetGatewayRequest":{ + "type":"structure", + "required":[ + "InternetGatewayId", + "VpcId" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "InternetGatewayId":{ + "shape":"String", + "locationName":"internetGatewayId" + }, + "VpcId":{ + "shape":"String", + "locationName":"vpcId" + } + } + }, + "AttachNetworkInterfaceRequest":{ + "type":"structure", + "required":[ + "NetworkInterfaceId", + "InstanceId", + "DeviceIndex" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "NetworkInterfaceId":{ + "shape":"String", + "locationName":"networkInterfaceId" + }, + "InstanceId":{ + "shape":"String", + "locationName":"instanceId" + }, + "DeviceIndex":{ + "shape":"Integer", + "locationName":"deviceIndex" + } + } + }, + "AttachNetworkInterfaceResult":{ + "type":"structure", + "members":{ + "AttachmentId":{ + "shape":"String", + "locationName":"attachmentId" + } + } + }, + "AttachVolumeRequest":{ + "type":"structure", + "required":[ + "VolumeId", + "InstanceId", + "Device" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "VolumeId":{"shape":"String"}, + "InstanceId":{"shape":"String"}, + "Device":{"shape":"String"} + } + }, + "AttachVpnGatewayRequest":{ + "type":"structure", + "required":[ + "VpnGatewayId", + "VpcId" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "VpnGatewayId":{"shape":"String"}, + "VpcId":{"shape":"String"} + } + }, + "AttachVpnGatewayResult":{ + "type":"structure", + "members":{ + "VpcAttachment":{ + "shape":"VpcAttachment", + "locationName":"attachment" + } + } + }, + "AttachmentStatus":{ + "type":"string", + "enum":[ + "attaching", + "attached", + "detaching", + "detached" + ] + }, + "AttributeBooleanValue":{ + "type":"structure", + "members":{ + "Value":{ + "shape":"Boolean", + "locationName":"value" + } + } + }, + "AttributeValue":{ + "type":"structure", + "members":{ + "Value":{ + "shape":"String", + "locationName":"value" + } + } + }, + "AuthorizeSecurityGroupEgressRequest":{ + "type":"structure", + "required":["GroupId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "GroupId":{ + "shape":"String", + "locationName":"groupId" + }, + "SourceSecurityGroupName":{ + "shape":"String", + "locationName":"sourceSecurityGroupName" + }, + "SourceSecurityGroupOwnerId":{ + "shape":"String", + "locationName":"sourceSecurityGroupOwnerId" + }, + "IpProtocol":{ + "shape":"String", + "locationName":"ipProtocol" + }, + "FromPort":{ + "shape":"Integer", + "locationName":"fromPort" + }, + "ToPort":{ + "shape":"Integer", + "locationName":"toPort" + }, + "CidrIp":{ + "shape":"String", + "locationName":"cidrIp" + }, + "IpPermissions":{ + "shape":"IpPermissionList", + "locationName":"ipPermissions" + } + } + }, + "AuthorizeSecurityGroupIngressRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "GroupName":{"shape":"String"}, + "GroupId":{"shape":"String"}, + "SourceSecurityGroupName":{"shape":"String"}, + "SourceSecurityGroupOwnerId":{"shape":"String"}, + "IpProtocol":{"shape":"String"}, + "FromPort":{"shape":"Integer"}, + "ToPort":{"shape":"Integer"}, + "CidrIp":{"shape":"String"}, + "IpPermissions":{"shape":"IpPermissionList"} + } + }, + "AvailabilityZone":{ + "type":"structure", + "members":{ + "ZoneName":{ + "shape":"String", + "locationName":"zoneName" + }, + "State":{ + "shape":"AvailabilityZoneState", + "locationName":"zoneState" + }, + "RegionName":{ + "shape":"String", + "locationName":"regionName" + }, + "Messages":{ + "shape":"AvailabilityZoneMessageList", + "locationName":"messageSet" + } + } + }, + "AvailabilityZoneList":{ + "type":"list", + "member":{ + "shape":"AvailabilityZone", + "locationName":"item" + } + }, + "AvailabilityZoneMessage":{ + "type":"structure", + "members":{ + "Message":{ + "shape":"String", + "locationName":"message" + } + } + }, + "AvailabilityZoneMessageList":{ + "type":"list", + "member":{ + "shape":"AvailabilityZoneMessage", + "locationName":"item" + } + }, + "AvailabilityZoneState":{ + "type":"string", + "enum":["available"] + }, + "BatchState":{ + "type":"string", + "enum":[ + "submitted", + "active", + "cancelled", + "failed", + "cancelled_running", + "cancelled_terminating" + ] + }, + "BlockDeviceMapping":{ + "type":"structure", + "members":{ + "VirtualName":{ + "shape":"String", + "locationName":"virtualName" + }, + "DeviceName":{ + "shape":"String", + "locationName":"deviceName" + }, + "Ebs":{ + "shape":"EbsBlockDevice", + "locationName":"ebs" + }, + "NoDevice":{ + "shape":"String", + "locationName":"noDevice" + } + } + }, + "BlockDeviceMappingList":{ + "type":"list", + "member":{ + "shape":"BlockDeviceMapping", + "locationName":"item" + } + }, + "BlockDeviceMappingRequestList":{ + "type":"list", + "member":{ + "shape":"BlockDeviceMapping", + "locationName":"BlockDeviceMapping" + } + }, + "Boolean":{"type":"boolean"}, + "BundleIdStringList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"BundleId" + } + }, + "BundleInstanceRequest":{ + "type":"structure", + "required":[ + "InstanceId", + "Storage" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "InstanceId":{"shape":"String"}, + "Storage":{"shape":"Storage"} + } + }, + "BundleInstanceResult":{ + "type":"structure", + "members":{ + "BundleTask":{ + "shape":"BundleTask", + "locationName":"bundleInstanceTask" + } + } + }, + "BundleTask":{ + "type":"structure", + "members":{ + "InstanceId":{ + "shape":"String", + "locationName":"instanceId" + }, + "BundleId":{ + "shape":"String", + "locationName":"bundleId" + }, + "State":{ + "shape":"BundleTaskState", + "locationName":"state" + }, + "StartTime":{ + "shape":"DateTime", + "locationName":"startTime" + }, + "UpdateTime":{ + "shape":"DateTime", + "locationName":"updateTime" + }, + "Storage":{ + "shape":"Storage", + "locationName":"storage" + }, + "Progress":{ + "shape":"String", + "locationName":"progress" + }, + "BundleTaskError":{ + "shape":"BundleTaskError", + "locationName":"error" + } + } + }, + "BundleTaskError":{ + "type":"structure", + "members":{ + "Code":{ + "shape":"String", + "locationName":"code" + }, + "Message":{ + "shape":"String", + "locationName":"message" + } + } + }, + "BundleTaskList":{ + "type":"list", + "member":{ + "shape":"BundleTask", + "locationName":"item" + } + }, + "BundleTaskState":{ + "type":"string", + "enum":[ + "pending", + "waiting-for-shutdown", + "bundling", + "storing", + "cancelling", + "complete", + "failed" + ] + }, + "CancelBatchErrorCode":{ + "type":"string", + "enum":[ + "fleetRequestIdDoesNotExist", + "fleetRequestIdMalformed", + "fleetRequestNotInCancellableState", + "unexpectedError" + ] + }, + "CancelBundleTaskRequest":{ + "type":"structure", + "required":["BundleId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "BundleId":{"shape":"String"} + } + }, + "CancelBundleTaskResult":{ + "type":"structure", + "members":{ + "BundleTask":{ + "shape":"BundleTask", + "locationName":"bundleInstanceTask" + } + } + }, + "CancelConversionRequest":{ + "type":"structure", + "required":["ConversionTaskId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "ConversionTaskId":{ + "shape":"String", + "locationName":"conversionTaskId" + }, + "ReasonMessage":{ + "shape":"String", + "locationName":"reasonMessage" + } + } + }, + "CancelExportTaskRequest":{ + "type":"structure", + "required":["ExportTaskId"], + "members":{ + "ExportTaskId":{ + "shape":"String", + "locationName":"exportTaskId" + } + } + }, + "CancelImportTaskRequest":{ + "type":"structure", + "members":{ + "DryRun":{"shape":"Boolean"}, + "ImportTaskId":{"shape":"String"}, + "CancelReason":{"shape":"String"} + } + }, + "CancelImportTaskResult":{ + "type":"structure", + "members":{ + "ImportTaskId":{ + "shape":"String", + "locationName":"importTaskId" + }, + "State":{ + "shape":"String", + "locationName":"state" + }, + "PreviousState":{ + "shape":"String", + "locationName":"previousState" + } + } + }, + "CancelReservedInstancesListingRequest":{ + "type":"structure", + "required":["ReservedInstancesListingId"], + "members":{ + "ReservedInstancesListingId":{ + "shape":"String", + "locationName":"reservedInstancesListingId" + } + } + }, + "CancelReservedInstancesListingResult":{ + "type":"structure", + "members":{ + "ReservedInstancesListings":{ + "shape":"ReservedInstancesListingList", + "locationName":"reservedInstancesListingsSet" + } + } + }, + "CancelSpotFleetRequestsError":{ + "type":"structure", + "required":[ + "Code", + "Message" + ], + "members":{ + "Code":{ + "shape":"CancelBatchErrorCode", + "locationName":"code" + }, + "Message":{ + "shape":"String", + "locationName":"message" + } + } + }, + "CancelSpotFleetRequestsErrorItem":{ + "type":"structure", + "required":[ + "SpotFleetRequestId", + "Error" + ], + "members":{ + "SpotFleetRequestId":{ + "shape":"String", + "locationName":"spotFleetRequestId" + }, + "Error":{ + "shape":"CancelSpotFleetRequestsError", + "locationName":"error" + } + } + }, + "CancelSpotFleetRequestsErrorSet":{ + "type":"list", + "member":{ + "shape":"CancelSpotFleetRequestsErrorItem", + "locationName":"item" + } + }, + "CancelSpotFleetRequestsRequest":{ + "type":"structure", + "required":[ + "SpotFleetRequestIds", + "TerminateInstances" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "SpotFleetRequestIds":{ + "shape":"ValueStringList", + "locationName":"spotFleetRequestId" + }, + "TerminateInstances":{ + "shape":"Boolean", + "locationName":"terminateInstances" + } + } + }, + "CancelSpotFleetRequestsResponse":{ + "type":"structure", + "members":{ + "UnsuccessfulFleetRequests":{ + "shape":"CancelSpotFleetRequestsErrorSet", + "locationName":"unsuccessfulFleetRequestSet" + }, + "SuccessfulFleetRequests":{ + "shape":"CancelSpotFleetRequestsSuccessSet", + "locationName":"successfulFleetRequestSet" + } + } + }, + "CancelSpotFleetRequestsSuccessItem":{ + "type":"structure", + "required":[ + "SpotFleetRequestId", + "CurrentSpotFleetRequestState", + "PreviousSpotFleetRequestState" + ], + "members":{ + "SpotFleetRequestId":{ + "shape":"String", + "locationName":"spotFleetRequestId" + }, + "CurrentSpotFleetRequestState":{ + "shape":"BatchState", + "locationName":"currentSpotFleetRequestState" + }, + "PreviousSpotFleetRequestState":{ + "shape":"BatchState", + "locationName":"previousSpotFleetRequestState" + } + } + }, + "CancelSpotFleetRequestsSuccessSet":{ + "type":"list", + "member":{ + "shape":"CancelSpotFleetRequestsSuccessItem", + "locationName":"item" + } + }, + "CancelSpotInstanceRequestState":{ + "type":"string", + "enum":[ + "active", + "open", + "closed", + "cancelled", + "completed" + ] + }, + "CancelSpotInstanceRequestsRequest":{ + "type":"structure", + "required":["SpotInstanceRequestIds"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "SpotInstanceRequestIds":{ + "shape":"SpotInstanceRequestIdList", + "locationName":"SpotInstanceRequestId" + } + } + }, + "CancelSpotInstanceRequestsResult":{ + "type":"structure", + "members":{ + "CancelledSpotInstanceRequests":{ + "shape":"CancelledSpotInstanceRequestList", + "locationName":"spotInstanceRequestSet" + } + } + }, + "CancelledSpotInstanceRequest":{ + "type":"structure", + "members":{ + "SpotInstanceRequestId":{ + "shape":"String", + "locationName":"spotInstanceRequestId" + }, + "State":{ + "shape":"CancelSpotInstanceRequestState", + "locationName":"state" + } + } + }, + "CancelledSpotInstanceRequestList":{ + "type":"list", + "member":{ + "shape":"CancelledSpotInstanceRequest", + "locationName":"item" + } + }, + "ClassicLinkInstance":{ + "type":"structure", + "members":{ + "InstanceId":{ + "shape":"String", + "locationName":"instanceId" + }, + "VpcId":{ + "shape":"String", + "locationName":"vpcId" + }, + "Groups":{ + "shape":"GroupIdentifierList", + "locationName":"groupSet" + }, + "Tags":{ + "shape":"TagList", + "locationName":"tagSet" + } + } + }, + "ClassicLinkInstanceList":{ + "type":"list", + "member":{ + "shape":"ClassicLinkInstance", + "locationName":"item" + } + }, + "ClientData":{ + "type":"structure", + "members":{ + "UploadStart":{"shape":"DateTime"}, + "UploadEnd":{"shape":"DateTime"}, + "UploadSize":{"shape":"Double"}, + "Comment":{"shape":"String"} + } + }, + "ConfirmProductInstanceRequest":{ + "type":"structure", + "required":[ + "ProductCode", + "InstanceId" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "ProductCode":{"shape":"String"}, + "InstanceId":{"shape":"String"} + } + }, + "ConfirmProductInstanceResult":{ + "type":"structure", + "members":{ + "OwnerId":{ + "shape":"String", + "locationName":"ownerId" + }, + "Return":{ + "shape":"Boolean", + "locationName":"return" + } + } + }, + "ContainerFormat":{ + "type":"string", + "enum":["ova"] + }, + "ConversionIdStringList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"item" + } + }, + "ConversionTask":{ + "type":"structure", + "required":[ + "ConversionTaskId", + "State" + ], + "members":{ + "ConversionTaskId":{ + "shape":"String", + "locationName":"conversionTaskId" + }, + "ExpirationTime":{ + "shape":"String", + "locationName":"expirationTime" + }, + "ImportInstance":{ + "shape":"ImportInstanceTaskDetails", + "locationName":"importInstance" + }, + "ImportVolume":{ + "shape":"ImportVolumeTaskDetails", + "locationName":"importVolume" + }, + "State":{ + "shape":"ConversionTaskState", + "locationName":"state" + }, + "StatusMessage":{ + "shape":"String", + "locationName":"statusMessage" + }, + "Tags":{ + "shape":"TagList", + "locationName":"tagSet" + } + } + }, + "ConversionTaskState":{ + "type":"string", + "enum":[ + "active", + "cancelling", + "cancelled", + "completed" + ] + }, + "CopyImageRequest":{ + "type":"structure", + "required":[ + "SourceRegion", + "SourceImageId", + "Name" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "SourceRegion":{"shape":"String"}, + "SourceImageId":{"shape":"String"}, + "Name":{"shape":"String"}, + "Description":{"shape":"String"}, + "ClientToken":{"shape":"String"} + } + }, + "CopyImageResult":{ + "type":"structure", + "members":{ + "ImageId":{ + "shape":"String", + "locationName":"imageId" + } + } + }, + "CopySnapshotRequest":{ + "type":"structure", + "required":[ + "SourceRegion", + "SourceSnapshotId" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "SourceRegion":{"shape":"String"}, + "SourceSnapshotId":{"shape":"String"}, + "Description":{"shape":"String"}, + "DestinationRegion":{ + "shape":"String", + "locationName":"destinationRegion" + }, + "PresignedUrl":{ + "shape":"String", + "locationName":"presignedUrl" + }, + "Encrypted":{ + "shape":"Boolean", + "locationName":"encrypted" + }, + "KmsKeyId":{ + "shape":"String", + "locationName":"kmsKeyId" + } + } + }, + "CopySnapshotResult":{ + "type":"structure", + "members":{ + "SnapshotId":{ + "shape":"String", + "locationName":"snapshotId" + } + } + }, + "CreateCustomerGatewayRequest":{ + "type":"structure", + "required":[ + "Type", + "PublicIp", + "BgpAsn" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "Type":{"shape":"GatewayType"}, + "PublicIp":{ + "shape":"String", + "locationName":"IpAddress" + }, + "BgpAsn":{"shape":"Integer"} + } + }, + "CreateCustomerGatewayResult":{ + "type":"structure", + "members":{ + "CustomerGateway":{ + "shape":"CustomerGateway", + "locationName":"customerGateway" + } + } + }, + "CreateDhcpOptionsRequest":{ + "type":"structure", + "required":["DhcpConfigurations"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "DhcpConfigurations":{ + "shape":"NewDhcpConfigurationList", + "locationName":"dhcpConfiguration" + } + } + }, + "CreateDhcpOptionsResult":{ + "type":"structure", + "members":{ + "DhcpOptions":{ + "shape":"DhcpOptions", + "locationName":"dhcpOptions" + } + } + }, + "CreateFlowLogsRequest":{ + "type":"structure", + "required":[ + "ResourceIds", + "ResourceType", + "TrafficType", + "LogGroupName", + "DeliverLogsPermissionArn" + ], + "members":{ + "ResourceIds":{ + "shape":"ValueStringList", + "locationName":"ResourceId" + }, + "ResourceType":{"shape":"FlowLogsResourceType"}, + "TrafficType":{"shape":"TrafficType"}, + "LogGroupName":{"shape":"String"}, + "DeliverLogsPermissionArn":{"shape":"String"}, + "ClientToken":{"shape":"String"} + } + }, + "CreateFlowLogsResult":{ + "type":"structure", + "members":{ + "FlowLogIds":{ + "shape":"ValueStringList", + "locationName":"flowLogIdSet" + }, + "ClientToken":{ + "shape":"String", + "locationName":"clientToken" + }, + "Unsuccessful":{ + "shape":"UnsuccessfulItemSet", + "locationName":"unsuccessful" + } + } + }, + "CreateImageRequest":{ + "type":"structure", + "required":[ + "InstanceId", + "Name" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "InstanceId":{ + "shape":"String", + "locationName":"instanceId" + }, + "Name":{ + "shape":"String", + "locationName":"name" + }, + "Description":{ + "shape":"String", + "locationName":"description" + }, + "NoReboot":{ + "shape":"Boolean", + "locationName":"noReboot" + }, + "BlockDeviceMappings":{ + "shape":"BlockDeviceMappingRequestList", + "locationName":"blockDeviceMapping" + } + } + }, + "CreateImageResult":{ + "type":"structure", + "members":{ + "ImageId":{ + "shape":"String", + "locationName":"imageId" + } + } + }, + "CreateInstanceExportTaskRequest":{ + "type":"structure", + "required":["InstanceId"], + "members":{ + "Description":{ + "shape":"String", + "locationName":"description" + }, + "InstanceId":{ + "shape":"String", + "locationName":"instanceId" + }, + "TargetEnvironment":{ + "shape":"ExportEnvironment", + "locationName":"targetEnvironment" + }, + "ExportToS3Task":{ + "shape":"ExportToS3TaskSpecification", + "locationName":"exportToS3" + } + } + }, + "CreateInstanceExportTaskResult":{ + "type":"structure", + "members":{ + "ExportTask":{ + "shape":"ExportTask", + "locationName":"exportTask" + } + } + }, + "CreateInternetGatewayRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + } + } + }, + "CreateInternetGatewayResult":{ + "type":"structure", + "members":{ + "InternetGateway":{ + "shape":"InternetGateway", + "locationName":"internetGateway" + } + } + }, + "CreateKeyPairRequest":{ + "type":"structure", + "required":["KeyName"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "KeyName":{"shape":"String"} + } + }, + "CreateNetworkAclEntryRequest":{ + "type":"structure", + "required":[ + "NetworkAclId", + "RuleNumber", + "Protocol", + "RuleAction", + "Egress", + "CidrBlock" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "NetworkAclId":{ + "shape":"String", + "locationName":"networkAclId" + }, + "RuleNumber":{ + "shape":"Integer", + "locationName":"ruleNumber" + }, + "Protocol":{ + "shape":"String", + "locationName":"protocol" + }, + "RuleAction":{ + "shape":"RuleAction", + "locationName":"ruleAction" + }, + "Egress":{ + "shape":"Boolean", + "locationName":"egress" + }, + "CidrBlock":{ + "shape":"String", + "locationName":"cidrBlock" + }, + "IcmpTypeCode":{ + "shape":"IcmpTypeCode", + "locationName":"Icmp" + }, + "PortRange":{ + "shape":"PortRange", + "locationName":"portRange" + } + } + }, + "CreateNetworkAclRequest":{ + "type":"structure", + "required":["VpcId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "VpcId":{ + "shape":"String", + "locationName":"vpcId" + } + } + }, + "CreateNetworkAclResult":{ + "type":"structure", + "members":{ + "NetworkAcl":{ + "shape":"NetworkAcl", + "locationName":"networkAcl" + } + } + }, + "CreateNetworkInterfaceRequest":{ + "type":"structure", + "required":["SubnetId"], + "members":{ + "SubnetId":{ + "shape":"String", + "locationName":"subnetId" + }, + "Description":{ + "shape":"String", + "locationName":"description" + }, + "PrivateIpAddress":{ + "shape":"String", + "locationName":"privateIpAddress" + }, + "Groups":{ + "shape":"SecurityGroupIdStringList", + "locationName":"SecurityGroupId" + }, + "PrivateIpAddresses":{ + "shape":"PrivateIpAddressSpecificationList", + "locationName":"privateIpAddresses" + }, + "SecondaryPrivateIpAddressCount":{ + "shape":"Integer", + "locationName":"secondaryPrivateIpAddressCount" + }, + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + } + } + }, + "CreateNetworkInterfaceResult":{ + "type":"structure", + "members":{ + "NetworkInterface":{ + "shape":"NetworkInterface", + "locationName":"networkInterface" + } + } + }, + "CreatePlacementGroupRequest":{ + "type":"structure", + "required":[ + "GroupName", + "Strategy" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "GroupName":{ + "shape":"String", + "locationName":"groupName" + }, + "Strategy":{ + "shape":"PlacementStrategy", + "locationName":"strategy" + } + } + }, + "CreateReservedInstancesListingRequest":{ + "type":"structure", + "required":[ + "ReservedInstancesId", + "InstanceCount", + "PriceSchedules", + "ClientToken" + ], + "members":{ + "ReservedInstancesId":{ + "shape":"String", + "locationName":"reservedInstancesId" + }, + "InstanceCount":{ + "shape":"Integer", + "locationName":"instanceCount" + }, + "PriceSchedules":{ + "shape":"PriceScheduleSpecificationList", + "locationName":"priceSchedules" + }, + "ClientToken":{ + "shape":"String", + "locationName":"clientToken" + } + } + }, + "CreateReservedInstancesListingResult":{ + "type":"structure", + "members":{ + "ReservedInstancesListings":{ + "shape":"ReservedInstancesListingList", + "locationName":"reservedInstancesListingsSet" + } + } + }, + "CreateRouteRequest":{ + "type":"structure", + "required":[ + "RouteTableId", + "DestinationCidrBlock" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "RouteTableId":{ + "shape":"String", + "locationName":"routeTableId" + }, + "DestinationCidrBlock":{ + "shape":"String", + "locationName":"destinationCidrBlock" + }, + "GatewayId":{ + "shape":"String", + "locationName":"gatewayId" + }, + "InstanceId":{ + "shape":"String", + "locationName":"instanceId" + }, + "NetworkInterfaceId":{ + "shape":"String", + "locationName":"networkInterfaceId" + }, + "VpcPeeringConnectionId":{ + "shape":"String", + "locationName":"vpcPeeringConnectionId" + } + } + }, + "CreateRouteResult":{ + "type":"structure", + "members":{ + "Return":{ + "shape":"Boolean", + "locationName":"return" + } + } + }, + "CreateRouteTableRequest":{ + "type":"structure", + "required":["VpcId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "VpcId":{ + "shape":"String", + "locationName":"vpcId" + } + } + }, + "CreateRouteTableResult":{ + "type":"structure", + "members":{ + "RouteTable":{ + "shape":"RouteTable", + "locationName":"routeTable" + } + } + }, + "CreateSecurityGroupRequest":{ + "type":"structure", + "required":[ + "GroupName", + "Description" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "GroupName":{"shape":"String"}, + "Description":{ + "shape":"String", + "locationName":"GroupDescription" + }, + "VpcId":{"shape":"String"} + } + }, + "CreateSecurityGroupResult":{ + "type":"structure", + "members":{ + "GroupId":{ + "shape":"String", + "locationName":"groupId" + } + } + }, + "CreateSnapshotRequest":{ + "type":"structure", + "required":["VolumeId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "VolumeId":{"shape":"String"}, + "Description":{"shape":"String"} + } + }, + "CreateSpotDatafeedSubscriptionRequest":{ + "type":"structure", + "required":["Bucket"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "Bucket":{ + "shape":"String", + "locationName":"bucket" + }, + "Prefix":{ + "shape":"String", + "locationName":"prefix" + } + } + }, + "CreateSpotDatafeedSubscriptionResult":{ + "type":"structure", + "members":{ + "SpotDatafeedSubscription":{ + "shape":"SpotDatafeedSubscription", + "locationName":"spotDatafeedSubscription" + } + } + }, + "CreateSubnetRequest":{ + "type":"structure", + "required":[ + "VpcId", + "CidrBlock" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "VpcId":{"shape":"String"}, + "CidrBlock":{"shape":"String"}, + "AvailabilityZone":{"shape":"String"} + } + }, + "CreateSubnetResult":{ + "type":"structure", + "members":{ + "Subnet":{ + "shape":"Subnet", + "locationName":"subnet" + } + } + }, + "CreateTagsRequest":{ + "type":"structure", + "required":[ + "Resources", + "Tags" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "Resources":{ + "shape":"ResourceIdList", + "locationName":"ResourceId" + }, + "Tags":{ + "shape":"TagList", + "locationName":"Tag" + } + } + }, + "CreateVolumePermission":{ + "type":"structure", + "members":{ + "UserId":{ + "shape":"String", + "locationName":"userId" + }, + "Group":{ + "shape":"PermissionGroup", + "locationName":"group" + } + } + }, + "CreateVolumePermissionList":{ + "type":"list", + "member":{ + "shape":"CreateVolumePermission", + "locationName":"item" + } + }, + "CreateVolumePermissionModifications":{ + "type":"structure", + "members":{ + "Add":{"shape":"CreateVolumePermissionList"}, + "Remove":{"shape":"CreateVolumePermissionList"} + } + }, + "CreateVolumeRequest":{ + "type":"structure", + "required":["AvailabilityZone"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "Size":{"shape":"Integer"}, + "SnapshotId":{"shape":"String"}, + "AvailabilityZone":{"shape":"String"}, + "VolumeType":{"shape":"VolumeType"}, + "Iops":{"shape":"Integer"}, + "Encrypted":{ + "shape":"Boolean", + "locationName":"encrypted" + }, + "KmsKeyId":{"shape":"String"} + } + }, + "CreateVpcEndpointRequest":{ + "type":"structure", + "required":[ + "VpcId", + "ServiceName" + ], + "members":{ + "DryRun":{"shape":"Boolean"}, + "VpcId":{"shape":"String"}, + "ServiceName":{"shape":"String"}, + "PolicyDocument":{"shape":"String"}, + "RouteTableIds":{ + "shape":"ValueStringList", + "locationName":"RouteTableId" + }, + "ClientToken":{"shape":"String"} + } + }, + "CreateVpcEndpointResult":{ + "type":"structure", + "members":{ + "VpcEndpoint":{ + "shape":"VpcEndpoint", + "locationName":"vpcEndpoint" + }, + "ClientToken":{ + "shape":"String", + "locationName":"clientToken" + } + } + }, + "CreateVpcPeeringConnectionRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "VpcId":{ + "shape":"String", + "locationName":"vpcId" + }, + "PeerVpcId":{ + "shape":"String", + "locationName":"peerVpcId" + }, + "PeerOwnerId":{ + "shape":"String", + "locationName":"peerOwnerId" + } + } + }, + "CreateVpcPeeringConnectionResult":{ + "type":"structure", + "members":{ + "VpcPeeringConnection":{ + "shape":"VpcPeeringConnection", + "locationName":"vpcPeeringConnection" + } + } + }, + "CreateVpcRequest":{ + "type":"structure", + "required":["CidrBlock"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "CidrBlock":{"shape":"String"}, + "InstanceTenancy":{ + "shape":"Tenancy", + "locationName":"instanceTenancy" + } + } + }, + "CreateVpcResult":{ + "type":"structure", + "members":{ + "Vpc":{ + "shape":"Vpc", + "locationName":"vpc" + } + } + }, + "CreateVpnConnectionRequest":{ + "type":"structure", + "required":[ + "Type", + "CustomerGatewayId", + "VpnGatewayId" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "Type":{"shape":"String"}, + "CustomerGatewayId":{"shape":"String"}, + "VpnGatewayId":{"shape":"String"}, + "Options":{ + "shape":"VpnConnectionOptionsSpecification", + "locationName":"options" + } + } + }, + "CreateVpnConnectionResult":{ + "type":"structure", + "members":{ + "VpnConnection":{ + "shape":"VpnConnection", + "locationName":"vpnConnection" + } + } + }, + "CreateVpnConnectionRouteRequest":{ + "type":"structure", + "required":[ + "VpnConnectionId", + "DestinationCidrBlock" + ], + "members":{ + "VpnConnectionId":{"shape":"String"}, + "DestinationCidrBlock":{"shape":"String"} + } + }, + "CreateVpnGatewayRequest":{ + "type":"structure", + "required":["Type"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "Type":{"shape":"GatewayType"}, + "AvailabilityZone":{"shape":"String"} + } + }, + "CreateVpnGatewayResult":{ + "type":"structure", + "members":{ + "VpnGateway":{ + "shape":"VpnGateway", + "locationName":"vpnGateway" + } + } + }, + "CurrencyCodeValues":{ + "type":"string", + "enum":["USD"] + }, + "CustomerGateway":{ + "type":"structure", + "members":{ + "CustomerGatewayId":{ + "shape":"String", + "locationName":"customerGatewayId" + }, + "State":{ + "shape":"String", + "locationName":"state" + }, + "Type":{ + "shape":"String", + "locationName":"type" + }, + "IpAddress":{ + "shape":"String", + "locationName":"ipAddress" + }, + "BgpAsn":{ + "shape":"String", + "locationName":"bgpAsn" + }, + "Tags":{ + "shape":"TagList", + "locationName":"tagSet" + } + } + }, + "CustomerGatewayIdStringList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"CustomerGatewayId" + } + }, + "CustomerGatewayList":{ + "type":"list", + "member":{ + "shape":"CustomerGateway", + "locationName":"item" + } + }, + "DatafeedSubscriptionState":{ + "type":"string", + "enum":[ + "Active", + "Inactive" + ] + }, + "DateTime":{"type":"timestamp"}, + "DeleteCustomerGatewayRequest":{ + "type":"structure", + "required":["CustomerGatewayId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "CustomerGatewayId":{"shape":"String"} + } + }, + "DeleteDhcpOptionsRequest":{ + "type":"structure", + "required":["DhcpOptionsId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "DhcpOptionsId":{"shape":"String"} + } + }, + "DeleteFlowLogsRequest":{ + "type":"structure", + "required":["FlowLogIds"], + "members":{ + "FlowLogIds":{ + "shape":"ValueStringList", + "locationName":"FlowLogId" + } + } + }, + "DeleteFlowLogsResult":{ + "type":"structure", + "members":{ + "Unsuccessful":{ + "shape":"UnsuccessfulItemSet", + "locationName":"unsuccessful" + } + } + }, + "DeleteInternetGatewayRequest":{ + "type":"structure", + "required":["InternetGatewayId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "InternetGatewayId":{ + "shape":"String", + "locationName":"internetGatewayId" + } + } + }, + "DeleteKeyPairRequest":{ + "type":"structure", + "required":["KeyName"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "KeyName":{"shape":"String"} + } + }, + "DeleteNetworkAclEntryRequest":{ + "type":"structure", + "required":[ + "NetworkAclId", + "RuleNumber", + "Egress" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "NetworkAclId":{ + "shape":"String", + "locationName":"networkAclId" + }, + "RuleNumber":{ + "shape":"Integer", + "locationName":"ruleNumber" + }, + "Egress":{ + "shape":"Boolean", + "locationName":"egress" + } + } + }, + "DeleteNetworkAclRequest":{ + "type":"structure", + "required":["NetworkAclId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "NetworkAclId":{ + "shape":"String", + "locationName":"networkAclId" + } + } + }, + "DeleteNetworkInterfaceRequest":{ + "type":"structure", + "required":["NetworkInterfaceId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "NetworkInterfaceId":{ + "shape":"String", + "locationName":"networkInterfaceId" + } + } + }, + "DeletePlacementGroupRequest":{ + "type":"structure", + "required":["GroupName"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "GroupName":{ + "shape":"String", + "locationName":"groupName" + } + } + }, + "DeleteRouteRequest":{ + "type":"structure", + "required":[ + "RouteTableId", + "DestinationCidrBlock" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "RouteTableId":{ + "shape":"String", + "locationName":"routeTableId" + }, + "DestinationCidrBlock":{ + "shape":"String", + "locationName":"destinationCidrBlock" + } + } + }, + "DeleteRouteTableRequest":{ + "type":"structure", + "required":["RouteTableId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "RouteTableId":{ + "shape":"String", + "locationName":"routeTableId" + } + } + }, + "DeleteSecurityGroupRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "GroupName":{"shape":"String"}, + "GroupId":{"shape":"String"} + } + }, + "DeleteSnapshotRequest":{ + "type":"structure", + "required":["SnapshotId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "SnapshotId":{"shape":"String"} + } + }, + "DeleteSpotDatafeedSubscriptionRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + } + } + }, + "DeleteSubnetRequest":{ + "type":"structure", + "required":["SubnetId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "SubnetId":{"shape":"String"} + } + }, + "DeleteTagsRequest":{ + "type":"structure", + "required":["Resources"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "Resources":{ + "shape":"ResourceIdList", + "locationName":"resourceId" + }, + "Tags":{ + "shape":"TagList", + "locationName":"tag" + } + } + }, + "DeleteVolumeRequest":{ + "type":"structure", + "required":["VolumeId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "VolumeId":{"shape":"String"} + } + }, + "DeleteVpcEndpointsRequest":{ + "type":"structure", + "required":["VpcEndpointIds"], + "members":{ + "DryRun":{"shape":"Boolean"}, + "VpcEndpointIds":{ + "shape":"ValueStringList", + "locationName":"VpcEndpointId" + } + } + }, + "DeleteVpcEndpointsResult":{ + "type":"structure", + "members":{ + "Unsuccessful":{ + "shape":"UnsuccessfulItemSet", + "locationName":"unsuccessful" + } + } + }, + "DeleteVpcPeeringConnectionRequest":{ + "type":"structure", + "required":["VpcPeeringConnectionId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "VpcPeeringConnectionId":{ + "shape":"String", + "locationName":"vpcPeeringConnectionId" + } + } + }, + "DeleteVpcPeeringConnectionResult":{ + "type":"structure", + "members":{ + "Return":{ + "shape":"Boolean", + "locationName":"return" + } + } + }, + "DeleteVpcRequest":{ + "type":"structure", + "required":["VpcId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "VpcId":{"shape":"String"} + } + }, + "DeleteVpnConnectionRequest":{ + "type":"structure", + "required":["VpnConnectionId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "VpnConnectionId":{"shape":"String"} + } + }, + "DeleteVpnConnectionRouteRequest":{ + "type":"structure", + "required":[ + "VpnConnectionId", + "DestinationCidrBlock" + ], + "members":{ + "VpnConnectionId":{"shape":"String"}, + "DestinationCidrBlock":{"shape":"String"} + } + }, + "DeleteVpnGatewayRequest":{ + "type":"structure", + "required":["VpnGatewayId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "VpnGatewayId":{"shape":"String"} + } + }, + "DeregisterImageRequest":{ + "type":"structure", + "required":["ImageId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "ImageId":{"shape":"String"} + } + }, + "DescribeAccountAttributesRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "AttributeNames":{ + "shape":"AccountAttributeNameStringList", + "locationName":"attributeName" + } + } + }, + "DescribeAccountAttributesResult":{ + "type":"structure", + "members":{ + "AccountAttributes":{ + "shape":"AccountAttributeList", + "locationName":"accountAttributeSet" + } + } + }, + "DescribeAddressesRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "PublicIps":{ + "shape":"PublicIpStringList", + "locationName":"PublicIp" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"Filter" + }, + "AllocationIds":{ + "shape":"AllocationIdList", + "locationName":"AllocationId" + } + } + }, + "DescribeAddressesResult":{ + "type":"structure", + "members":{ + "Addresses":{ + "shape":"AddressList", + "locationName":"addressesSet" + } + } + }, + "DescribeAvailabilityZonesRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "ZoneNames":{ + "shape":"ZoneNameStringList", + "locationName":"ZoneName" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"Filter" + } + } + }, + "DescribeAvailabilityZonesResult":{ + "type":"structure", + "members":{ + "AvailabilityZones":{ + "shape":"AvailabilityZoneList", + "locationName":"availabilityZoneInfo" + } + } + }, + "DescribeBundleTasksRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "BundleIds":{ + "shape":"BundleIdStringList", + "locationName":"BundleId" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"Filter" + } + } + }, + "DescribeBundleTasksResult":{ + "type":"structure", + "members":{ + "BundleTasks":{ + "shape":"BundleTaskList", + "locationName":"bundleInstanceTasksSet" + } + } + }, + "DescribeClassicLinkInstancesRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "InstanceIds":{ + "shape":"InstanceIdStringList", + "locationName":"InstanceId" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"Filter" + }, + "NextToken":{ + "shape":"String", + "locationName":"nextToken" + }, + "MaxResults":{ + "shape":"Integer", + "locationName":"maxResults" + } + } + }, + "DescribeClassicLinkInstancesResult":{ + "type":"structure", + "members":{ + "Instances":{ + "shape":"ClassicLinkInstanceList", + "locationName":"instancesSet" + }, + "NextToken":{ + "shape":"String", + "locationName":"nextToken" + } + } + }, + "DescribeConversionTaskList":{ + "type":"list", + "member":{ + "shape":"ConversionTask", + "locationName":"item" + } + }, + "DescribeConversionTasksRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"filter" + }, + "ConversionTaskIds":{ + "shape":"ConversionIdStringList", + "locationName":"conversionTaskId" + } + } + }, + "DescribeConversionTasksResult":{ + "type":"structure", + "members":{ + "ConversionTasks":{ + "shape":"DescribeConversionTaskList", + "locationName":"conversionTasks" + } + } + }, + "DescribeCustomerGatewaysRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "CustomerGatewayIds":{ + "shape":"CustomerGatewayIdStringList", + "locationName":"CustomerGatewayId" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"Filter" + } + } + }, + "DescribeCustomerGatewaysResult":{ + "type":"structure", + "members":{ + "CustomerGateways":{ + "shape":"CustomerGatewayList", + "locationName":"customerGatewaySet" + } + } + }, + "DescribeDhcpOptionsRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "DhcpOptionsIds":{ + "shape":"DhcpOptionsIdStringList", + "locationName":"DhcpOptionsId" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"Filter" + } + } + }, + "DescribeDhcpOptionsResult":{ + "type":"structure", + "members":{ + "DhcpOptions":{ + "shape":"DhcpOptionsList", + "locationName":"dhcpOptionsSet" + } + } + }, + "DescribeExportTasksRequest":{ + "type":"structure", + "members":{ + "ExportTaskIds":{ + "shape":"ExportTaskIdStringList", + "locationName":"exportTaskId" + } + } + }, + "DescribeExportTasksResult":{ + "type":"structure", + "members":{ + "ExportTasks":{ + "shape":"ExportTaskList", + "locationName":"exportTaskSet" + } + } + }, + "DescribeFlowLogsRequest":{ + "type":"structure", + "members":{ + "FlowLogIds":{ + "shape":"ValueStringList", + "locationName":"FlowLogId" + }, + "Filter":{"shape":"FilterList"}, + "NextToken":{"shape":"String"}, + "MaxResults":{"shape":"Integer"} + } + }, + "DescribeFlowLogsResult":{ + "type":"structure", + "members":{ + "FlowLogs":{ + "shape":"FlowLogSet", + "locationName":"flowLogSet" + }, + "NextToken":{ + "shape":"String", + "locationName":"nextToken" + } + } + }, + "DescribeImageAttributeRequest":{ + "type":"structure", + "required":[ + "ImageId", + "Attribute" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "ImageId":{"shape":"String"}, + "Attribute":{"shape":"ImageAttributeName"} + } + }, + "DescribeImagesRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "ImageIds":{ + "shape":"ImageIdStringList", + "locationName":"ImageId" + }, + "Owners":{ + "shape":"OwnerStringList", + "locationName":"Owner" + }, + "ExecutableUsers":{ + "shape":"ExecutableByStringList", + "locationName":"ExecutableBy" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"Filter" + } + } + }, + "DescribeImagesResult":{ + "type":"structure", + "members":{ + "Images":{ + "shape":"ImageList", + "locationName":"imagesSet" + } + } + }, + "DescribeImportImageTasksRequest":{ + "type":"structure", + "members":{ + "DryRun":{"shape":"Boolean"}, + "ImportTaskIds":{ + "shape":"ImportTaskIdList", + "locationName":"ImportTaskId" + }, + "NextToken":{"shape":"String"}, + "MaxResults":{"shape":"Integer"}, + "Filters":{"shape":"FilterList"} + } + }, + "DescribeImportImageTasksResult":{ + "type":"structure", + "members":{ + "ImportImageTasks":{ + "shape":"ImportImageTaskList", + "locationName":"importImageTaskSet" + }, + "NextToken":{ + "shape":"String", + "locationName":"nextToken" + } + } + }, + "DescribeImportSnapshotTasksRequest":{ + "type":"structure", + "members":{ + "DryRun":{"shape":"Boolean"}, + "ImportTaskIds":{ + "shape":"ImportTaskIdList", + "locationName":"ImportTaskId" + }, + "NextToken":{"shape":"String"}, + "MaxResults":{"shape":"Integer"}, + "Filters":{"shape":"FilterList"} + } + }, + "DescribeImportSnapshotTasksResult":{ + "type":"structure", + "members":{ + "ImportSnapshotTasks":{ + "shape":"ImportSnapshotTaskList", + "locationName":"importSnapshotTaskSet" + }, + "NextToken":{ + "shape":"String", + "locationName":"nextToken" + } + } + }, + "DescribeInstanceAttributeRequest":{ + "type":"structure", + "required":[ + "InstanceId", + "Attribute" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "InstanceId":{ + "shape":"String", + "locationName":"instanceId" + }, + "Attribute":{ + "shape":"InstanceAttributeName", + "locationName":"attribute" + } + } + }, + "DescribeInstanceStatusRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "InstanceIds":{ + "shape":"InstanceIdStringList", + "locationName":"InstanceId" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"Filter" + }, + "NextToken":{"shape":"String"}, + "MaxResults":{"shape":"Integer"}, + "IncludeAllInstances":{ + "shape":"Boolean", + "locationName":"includeAllInstances" + } + } + }, + "DescribeInstanceStatusResult":{ + "type":"structure", + "members":{ + "InstanceStatuses":{ + "shape":"InstanceStatusList", + "locationName":"instanceStatusSet" + }, + "NextToken":{ + "shape":"String", + "locationName":"nextToken" + } + } + }, + "DescribeInstancesRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "InstanceIds":{ + "shape":"InstanceIdStringList", + "locationName":"InstanceId" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"Filter" + }, + "NextToken":{ + "shape":"String", + "locationName":"nextToken" + }, + "MaxResults":{ + "shape":"Integer", + "locationName":"maxResults" + } + } + }, + "DescribeInstancesResult":{ + "type":"structure", + "members":{ + "Reservations":{ + "shape":"ReservationList", + "locationName":"reservationSet" + }, + "NextToken":{ + "shape":"String", + "locationName":"nextToken" + } + } + }, + "DescribeInternetGatewaysRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "InternetGatewayIds":{ + "shape":"ValueStringList", + "locationName":"internetGatewayId" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"Filter" + } + } + }, + "DescribeInternetGatewaysResult":{ + "type":"structure", + "members":{ + "InternetGateways":{ + "shape":"InternetGatewayList", + "locationName":"internetGatewaySet" + } + } + }, + "DescribeKeyPairsRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "KeyNames":{ + "shape":"KeyNameStringList", + "locationName":"KeyName" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"Filter" + } + } + }, + "DescribeKeyPairsResult":{ + "type":"structure", + "members":{ + "KeyPairs":{ + "shape":"KeyPairList", + "locationName":"keySet" + } + } + }, + "DescribeMovingAddressesRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "PublicIps":{ + "shape":"ValueStringList", + "locationName":"publicIp" + }, + "NextToken":{ + "shape":"String", + "locationName":"nextToken" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"filter" + }, + "MaxResults":{ + "shape":"Integer", + "locationName":"maxResults" + } + } + }, + "DescribeMovingAddressesResult":{ + "type":"structure", + "members":{ + "MovingAddressStatuses":{ + "shape":"MovingAddressStatusSet", + "locationName":"movingAddressStatusSet" + }, + "NextToken":{ + "shape":"String", + "locationName":"nextToken" + } + } + }, + "DescribeNetworkAclsRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "NetworkAclIds":{ + "shape":"ValueStringList", + "locationName":"NetworkAclId" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"Filter" + } + } + }, + "DescribeNetworkAclsResult":{ + "type":"structure", + "members":{ + "NetworkAcls":{ + "shape":"NetworkAclList", + "locationName":"networkAclSet" + } + } + }, + "DescribeNetworkInterfaceAttributeRequest":{ + "type":"structure", + "required":["NetworkInterfaceId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "NetworkInterfaceId":{ + "shape":"String", + "locationName":"networkInterfaceId" + }, + "Attribute":{ + "shape":"NetworkInterfaceAttribute", + "locationName":"attribute" + } + } + }, + "DescribeNetworkInterfaceAttributeResult":{ + "type":"structure", + "members":{ + "NetworkInterfaceId":{ + "shape":"String", + "locationName":"networkInterfaceId" + }, + "Description":{ + "shape":"AttributeValue", + "locationName":"description" + }, + "SourceDestCheck":{ + "shape":"AttributeBooleanValue", + "locationName":"sourceDestCheck" + }, + "Groups":{ + "shape":"GroupIdentifierList", + "locationName":"groupSet" + }, + "Attachment":{ + "shape":"NetworkInterfaceAttachment", + "locationName":"attachment" + } + } + }, + "DescribeNetworkInterfacesRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "NetworkInterfaceIds":{ + "shape":"NetworkInterfaceIdList", + "locationName":"NetworkInterfaceId" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"filter" + } + } + }, + "DescribeNetworkInterfacesResult":{ + "type":"structure", + "members":{ + "NetworkInterfaces":{ + "shape":"NetworkInterfaceList", + "locationName":"networkInterfaceSet" + } + } + }, + "DescribePlacementGroupsRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "GroupNames":{ + "shape":"PlacementGroupStringList", + "locationName":"groupName" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"Filter" + } + } + }, + "DescribePlacementGroupsResult":{ + "type":"structure", + "members":{ + "PlacementGroups":{ + "shape":"PlacementGroupList", + "locationName":"placementGroupSet" + } + } + }, + "DescribePrefixListsRequest":{ + "type":"structure", + "members":{ + "DryRun":{"shape":"Boolean"}, + "PrefixListIds":{ + "shape":"ValueStringList", + "locationName":"PrefixListId" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"Filter" + }, + "MaxResults":{"shape":"Integer"}, + "NextToken":{"shape":"String"} + } + }, + "DescribePrefixListsResult":{ + "type":"structure", + "members":{ + "PrefixLists":{ + "shape":"PrefixListSet", + "locationName":"prefixListSet" + }, + "NextToken":{ + "shape":"String", + "locationName":"nextToken" + } + } + }, + "DescribeRegionsRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "RegionNames":{ + "shape":"RegionNameStringList", + "locationName":"RegionName" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"Filter" + } + } + }, + "DescribeRegionsResult":{ + "type":"structure", + "members":{ + "Regions":{ + "shape":"RegionList", + "locationName":"regionInfo" + } + } + }, + "DescribeReservedInstancesListingsRequest":{ + "type":"structure", + "members":{ + "ReservedInstancesId":{ + "shape":"String", + "locationName":"reservedInstancesId" + }, + "ReservedInstancesListingId":{ + "shape":"String", + "locationName":"reservedInstancesListingId" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"filters" + } + } + }, + "DescribeReservedInstancesListingsResult":{ + "type":"structure", + "members":{ + "ReservedInstancesListings":{ + "shape":"ReservedInstancesListingList", + "locationName":"reservedInstancesListingsSet" + } + } + }, + "DescribeReservedInstancesModificationsRequest":{ + "type":"structure", + "members":{ + "ReservedInstancesModificationIds":{ + "shape":"ReservedInstancesModificationIdStringList", + "locationName":"ReservedInstancesModificationId" + }, + "NextToken":{ + "shape":"String", + "locationName":"nextToken" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"Filter" + } + } + }, + "DescribeReservedInstancesModificationsResult":{ + "type":"structure", + "members":{ + "ReservedInstancesModifications":{ + "shape":"ReservedInstancesModificationList", + "locationName":"reservedInstancesModificationsSet" + }, + "NextToken":{ + "shape":"String", + "locationName":"nextToken" + } + } + }, + "DescribeReservedInstancesOfferingsRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "ReservedInstancesOfferingIds":{ + "shape":"ReservedInstancesOfferingIdStringList", + "locationName":"ReservedInstancesOfferingId" + }, + "InstanceType":{"shape":"InstanceType"}, + "AvailabilityZone":{"shape":"String"}, + "ProductDescription":{"shape":"RIProductDescription"}, + "Filters":{ + "shape":"FilterList", + "locationName":"Filter" + }, + "InstanceTenancy":{ + "shape":"Tenancy", + "locationName":"instanceTenancy" + }, + "OfferingType":{ + "shape":"OfferingTypeValues", + "locationName":"offeringType" + }, + "NextToken":{ + "shape":"String", + "locationName":"nextToken" + }, + "MaxResults":{ + "shape":"Integer", + "locationName":"maxResults" + }, + "IncludeMarketplace":{"shape":"Boolean"}, + "MinDuration":{"shape":"Long"}, + "MaxDuration":{"shape":"Long"}, + "MaxInstanceCount":{"shape":"Integer"} + } + }, + "DescribeReservedInstancesOfferingsResult":{ + "type":"structure", + "members":{ + "ReservedInstancesOfferings":{ + "shape":"ReservedInstancesOfferingList", + "locationName":"reservedInstancesOfferingsSet" + }, + "NextToken":{ + "shape":"String", + "locationName":"nextToken" + } + } + }, + "DescribeReservedInstancesRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "ReservedInstancesIds":{ + "shape":"ReservedInstancesIdStringList", + "locationName":"ReservedInstancesId" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"Filter" + }, + "OfferingType":{ + "shape":"OfferingTypeValues", + "locationName":"offeringType" + } + } + }, + "DescribeReservedInstancesResult":{ + "type":"structure", + "members":{ + "ReservedInstances":{ + "shape":"ReservedInstancesList", + "locationName":"reservedInstancesSet" + } + } + }, + "DescribeRouteTablesRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "RouteTableIds":{ + "shape":"ValueStringList", + "locationName":"RouteTableId" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"Filter" + } + } + }, + "DescribeRouteTablesResult":{ + "type":"structure", + "members":{ + "RouteTables":{ + "shape":"RouteTableList", + "locationName":"routeTableSet" + } + } + }, + "DescribeSecurityGroupsRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "GroupNames":{ + "shape":"GroupNameStringList", + "locationName":"GroupName" + }, + "GroupIds":{ + "shape":"GroupIdStringList", + "locationName":"GroupId" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"Filter" + } + } + }, + "DescribeSecurityGroupsResult":{ + "type":"structure", + "members":{ + "SecurityGroups":{ + "shape":"SecurityGroupList", + "locationName":"securityGroupInfo" + } + } + }, + "DescribeSnapshotAttributeRequest":{ + "type":"structure", + "required":[ + "SnapshotId", + "Attribute" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "SnapshotId":{"shape":"String"}, + "Attribute":{"shape":"SnapshotAttributeName"} + } + }, + "DescribeSnapshotAttributeResult":{ + "type":"structure", + "members":{ + "SnapshotId":{ + "shape":"String", + "locationName":"snapshotId" + }, + "CreateVolumePermissions":{ + "shape":"CreateVolumePermissionList", + "locationName":"createVolumePermission" + }, + "ProductCodes":{ + "shape":"ProductCodeList", + "locationName":"productCodes" + } + } + }, + "DescribeSnapshotsRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "SnapshotIds":{ + "shape":"SnapshotIdStringList", + "locationName":"SnapshotId" + }, + "OwnerIds":{ + "shape":"OwnerStringList", + "locationName":"Owner" + }, + "RestorableByUserIds":{ + "shape":"RestorableByStringList", + "locationName":"RestorableBy" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"Filter" + }, + "NextToken":{"shape":"String"}, + "MaxResults":{"shape":"Integer"} + } + }, + "DescribeSnapshotsResult":{ + "type":"structure", + "members":{ + "Snapshots":{ + "shape":"SnapshotList", + "locationName":"snapshotSet" + }, + "NextToken":{ + "shape":"String", + "locationName":"nextToken" + } + } + }, + "DescribeSpotDatafeedSubscriptionRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + } + } + }, + "DescribeSpotDatafeedSubscriptionResult":{ + "type":"structure", + "members":{ + "SpotDatafeedSubscription":{ + "shape":"SpotDatafeedSubscription", + "locationName":"spotDatafeedSubscription" + } + } + }, + "DescribeSpotFleetInstancesRequest":{ + "type":"structure", + "required":["SpotFleetRequestId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "SpotFleetRequestId":{ + "shape":"String", + "locationName":"spotFleetRequestId" + }, + "NextToken":{ + "shape":"String", + "locationName":"nextToken" + }, + "MaxResults":{ + "shape":"Integer", + "locationName":"maxResults" + } + } + }, + "DescribeSpotFleetInstancesResponse":{ + "type":"structure", + "required":[ + "SpotFleetRequestId", + "ActiveInstances" + ], + "members":{ + "SpotFleetRequestId":{ + "shape":"String", + "locationName":"spotFleetRequestId" + }, + "ActiveInstances":{ + "shape":"ActiveInstanceSet", + "locationName":"activeInstanceSet" + }, + "NextToken":{ + "shape":"String", + "locationName":"nextToken" + } + } + }, + "DescribeSpotFleetRequestHistoryRequest":{ + "type":"structure", + "required":[ + "SpotFleetRequestId", + "StartTime" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "SpotFleetRequestId":{ + "shape":"String", + "locationName":"spotFleetRequestId" + }, + "EventType":{ + "shape":"EventType", + "locationName":"eventType" + }, + "StartTime":{ + "shape":"DateTime", + "locationName":"startTime" + }, + "NextToken":{ + "shape":"String", + "locationName":"nextToken" + }, + "MaxResults":{ + "shape":"Integer", + "locationName":"maxResults" + } + } + }, + "DescribeSpotFleetRequestHistoryResponse":{ + "type":"structure", + "required":[ + "SpotFleetRequestId", + "StartTime", + "LastEvaluatedTime", + "HistoryRecords" + ], + "members":{ + "SpotFleetRequestId":{ + "shape":"String", + "locationName":"spotFleetRequestId" + }, + "StartTime":{ + "shape":"DateTime", + "locationName":"startTime" + }, + "LastEvaluatedTime":{ + "shape":"DateTime", + "locationName":"lastEvaluatedTime" + }, + "HistoryRecords":{ + "shape":"HistoryRecords", + "locationName":"historyRecordSet" + }, + "NextToken":{ + "shape":"String", + "locationName":"nextToken" + } + } + }, + "DescribeSpotFleetRequestsRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "SpotFleetRequestIds":{ + "shape":"ValueStringList", + "locationName":"spotFleetRequestId" + }, + "NextToken":{ + "shape":"String", + "locationName":"nextToken" + }, + "MaxResults":{ + "shape":"Integer", + "locationName":"maxResults" + } + } + }, + "DescribeSpotFleetRequestsResponse":{ + "type":"structure", + "required":["SpotFleetRequestConfigs"], + "members":{ + "SpotFleetRequestConfigs":{ + "shape":"SpotFleetRequestConfigSet", + "locationName":"spotFleetRequestConfigSet" + }, + "NextToken":{ + "shape":"String", + "locationName":"nextToken" + } + } + }, + "DescribeSpotInstanceRequestsRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "SpotInstanceRequestIds":{ + "shape":"SpotInstanceRequestIdList", + "locationName":"SpotInstanceRequestId" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"Filter" + } + } + }, + "DescribeSpotInstanceRequestsResult":{ + "type":"structure", + "members":{ + "SpotInstanceRequests":{ + "shape":"SpotInstanceRequestList", + "locationName":"spotInstanceRequestSet" + } + } + }, + "DescribeSpotPriceHistoryRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "StartTime":{ + "shape":"DateTime", + "locationName":"startTime" + }, + "EndTime":{ + "shape":"DateTime", + "locationName":"endTime" + }, + "InstanceTypes":{ + "shape":"InstanceTypeList", + "locationName":"InstanceType" + }, + "ProductDescriptions":{ + "shape":"ProductDescriptionList", + "locationName":"ProductDescription" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"Filter" + }, + "AvailabilityZone":{ + "shape":"String", + "locationName":"availabilityZone" + }, + "MaxResults":{ + "shape":"Integer", + "locationName":"maxResults" + }, + "NextToken":{ + "shape":"String", + "locationName":"nextToken" + } + } + }, + "DescribeSpotPriceHistoryResult":{ + "type":"structure", + "members":{ + "SpotPriceHistory":{ + "shape":"SpotPriceHistoryList", + "locationName":"spotPriceHistorySet" + }, + "NextToken":{ + "shape":"String", + "locationName":"nextToken" + } + } + }, + "DescribeSubnetsRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "SubnetIds":{ + "shape":"SubnetIdStringList", + "locationName":"SubnetId" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"Filter" + } + } + }, + "DescribeSubnetsResult":{ + "type":"structure", + "members":{ + "Subnets":{ + "shape":"SubnetList", + "locationName":"subnetSet" + } + } + }, + "DescribeTagsRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"Filter" + }, + "MaxResults":{ + "shape":"Integer", + "locationName":"maxResults" + }, + "NextToken":{ + "shape":"String", + "locationName":"nextToken" + } + } + }, + "DescribeTagsResult":{ + "type":"structure", + "members":{ + "Tags":{ + "shape":"TagDescriptionList", + "locationName":"tagSet" + }, + "NextToken":{ + "shape":"String", + "locationName":"nextToken" + } + } + }, + "DescribeVolumeAttributeRequest":{ + "type":"structure", + "required":["VolumeId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "VolumeId":{"shape":"String"}, + "Attribute":{"shape":"VolumeAttributeName"} + } + }, + "DescribeVolumeAttributeResult":{ + "type":"structure", + "members":{ + "VolumeId":{ + "shape":"String", + "locationName":"volumeId" + }, + "AutoEnableIO":{ + "shape":"AttributeBooleanValue", + "locationName":"autoEnableIO" + }, + "ProductCodes":{ + "shape":"ProductCodeList", + "locationName":"productCodes" + } + } + }, + "DescribeVolumeStatusRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "VolumeIds":{ + "shape":"VolumeIdStringList", + "locationName":"VolumeId" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"Filter" + }, + "NextToken":{"shape":"String"}, + "MaxResults":{"shape":"Integer"} + } + }, + "DescribeVolumeStatusResult":{ + "type":"structure", + "members":{ + "VolumeStatuses":{ + "shape":"VolumeStatusList", + "locationName":"volumeStatusSet" + }, + "NextToken":{ + "shape":"String", + "locationName":"nextToken" + } + } + }, + "DescribeVolumesRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "VolumeIds":{ + "shape":"VolumeIdStringList", + "locationName":"VolumeId" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"Filter" + }, + "NextToken":{ + "shape":"String", + "locationName":"nextToken" + }, + "MaxResults":{ + "shape":"Integer", + "locationName":"maxResults" + } + } + }, + "DescribeVolumesResult":{ + "type":"structure", + "members":{ + "Volumes":{ + "shape":"VolumeList", + "locationName":"volumeSet" + }, + "NextToken":{ + "shape":"String", + "locationName":"nextToken" + } + } + }, + "DescribeVpcAttributeRequest":{ + "type":"structure", + "required":["VpcId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "VpcId":{"shape":"String"}, + "Attribute":{"shape":"VpcAttributeName"} + } + }, + "DescribeVpcAttributeResult":{ + "type":"structure", + "members":{ + "VpcId":{ + "shape":"String", + "locationName":"vpcId" + }, + "EnableDnsSupport":{ + "shape":"AttributeBooleanValue", + "locationName":"enableDnsSupport" + }, + "EnableDnsHostnames":{ + "shape":"AttributeBooleanValue", + "locationName":"enableDnsHostnames" + } + } + }, + "DescribeVpcClassicLinkRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "VpcIds":{ + "shape":"VpcClassicLinkIdList", + "locationName":"VpcId" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"Filter" + } + } + }, + "DescribeVpcClassicLinkResult":{ + "type":"structure", + "members":{ + "Vpcs":{ + "shape":"VpcClassicLinkList", + "locationName":"vpcSet" + } + } + }, + "DescribeVpcEndpointServicesRequest":{ + "type":"structure", + "members":{ + "DryRun":{"shape":"Boolean"}, + "MaxResults":{"shape":"Integer"}, + "NextToken":{"shape":"String"} + } + }, + "DescribeVpcEndpointServicesResult":{ + "type":"structure", + "members":{ + "ServiceNames":{ + "shape":"ValueStringList", + "locationName":"serviceNameSet" + }, + "NextToken":{ + "shape":"String", + "locationName":"nextToken" + } + } + }, + "DescribeVpcEndpointsRequest":{ + "type":"structure", + "members":{ + "DryRun":{"shape":"Boolean"}, + "VpcEndpointIds":{ + "shape":"ValueStringList", + "locationName":"VpcEndpointId" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"Filter" + }, + "MaxResults":{"shape":"Integer"}, + "NextToken":{"shape":"String"} + } + }, + "DescribeVpcEndpointsResult":{ + "type":"structure", + "members":{ + "VpcEndpoints":{ + "shape":"VpcEndpointSet", + "locationName":"vpcEndpointSet" + }, + "NextToken":{ + "shape":"String", + "locationName":"nextToken" + } + } + }, + "DescribeVpcPeeringConnectionsRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "VpcPeeringConnectionIds":{ + "shape":"ValueStringList", + "locationName":"VpcPeeringConnectionId" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"Filter" + } + } + }, + "DescribeVpcPeeringConnectionsResult":{ + "type":"structure", + "members":{ + "VpcPeeringConnections":{ + "shape":"VpcPeeringConnectionList", + "locationName":"vpcPeeringConnectionSet" + } + } + }, + "DescribeVpcsRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "VpcIds":{ + "shape":"VpcIdStringList", + "locationName":"VpcId" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"Filter" + } + } + }, + "DescribeVpcsResult":{ + "type":"structure", + "members":{ + "Vpcs":{ + "shape":"VpcList", + "locationName":"vpcSet" + } + } + }, + "DescribeVpnConnectionsRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "VpnConnectionIds":{ + "shape":"VpnConnectionIdStringList", + "locationName":"VpnConnectionId" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"Filter" + } + } + }, + "DescribeVpnConnectionsResult":{ + "type":"structure", + "members":{ + "VpnConnections":{ + "shape":"VpnConnectionList", + "locationName":"vpnConnectionSet" + } + } + }, + "DescribeVpnGatewaysRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "VpnGatewayIds":{ + "shape":"VpnGatewayIdStringList", + "locationName":"VpnGatewayId" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"Filter" + } + } + }, + "DescribeVpnGatewaysResult":{ + "type":"structure", + "members":{ + "VpnGateways":{ + "shape":"VpnGatewayList", + "locationName":"vpnGatewaySet" + } + } + }, + "DetachClassicLinkVpcRequest":{ + "type":"structure", + "required":[ + "InstanceId", + "VpcId" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "InstanceId":{ + "shape":"String", + "locationName":"instanceId" + }, + "VpcId":{ + "shape":"String", + "locationName":"vpcId" + } + } + }, + "DetachClassicLinkVpcResult":{ + "type":"structure", + "members":{ + "Return":{ + "shape":"Boolean", + "locationName":"return" + } + } + }, + "DetachInternetGatewayRequest":{ + "type":"structure", + "required":[ + "InternetGatewayId", + "VpcId" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "InternetGatewayId":{ + "shape":"String", + "locationName":"internetGatewayId" + }, + "VpcId":{ + "shape":"String", + "locationName":"vpcId" + } + } + }, + "DetachNetworkInterfaceRequest":{ + "type":"structure", + "required":["AttachmentId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "AttachmentId":{ + "shape":"String", + "locationName":"attachmentId" + }, + "Force":{ + "shape":"Boolean", + "locationName":"force" + } + } + }, + "DetachVolumeRequest":{ + "type":"structure", + "required":["VolumeId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "VolumeId":{"shape":"String"}, + "InstanceId":{"shape":"String"}, + "Device":{"shape":"String"}, + "Force":{"shape":"Boolean"} + } + }, + "DetachVpnGatewayRequest":{ + "type":"structure", + "required":[ + "VpnGatewayId", + "VpcId" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "VpnGatewayId":{"shape":"String"}, + "VpcId":{"shape":"String"} + } + }, + "DeviceType":{ + "type":"string", + "enum":[ + "ebs", + "instance-store" + ] + }, + "DhcpConfiguration":{ + "type":"structure", + "members":{ + "Key":{ + "shape":"String", + "locationName":"key" + }, + "Values":{ + "shape":"DhcpConfigurationValueList", + "locationName":"valueSet" + } + } + }, + "DhcpConfigurationList":{ + "type":"list", + "member":{ + "shape":"DhcpConfiguration", + "locationName":"item" + } + }, + "DhcpOptions":{ + "type":"structure", + "members":{ + "DhcpOptionsId":{ + "shape":"String", + "locationName":"dhcpOptionsId" + }, + "DhcpConfigurations":{ + "shape":"DhcpConfigurationList", + "locationName":"dhcpConfigurationSet" + }, + "Tags":{ + "shape":"TagList", + "locationName":"tagSet" + } + } + }, + "DhcpOptionsIdStringList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"DhcpOptionsId" + } + }, + "DhcpOptionsList":{ + "type":"list", + "member":{ + "shape":"DhcpOptions", + "locationName":"item" + } + }, + "DisableVgwRoutePropagationRequest":{ + "type":"structure", + "required":[ + "RouteTableId", + "GatewayId" + ], + "members":{ + "RouteTableId":{"shape":"String"}, + "GatewayId":{"shape":"String"} + } + }, + "DisableVpcClassicLinkRequest":{ + "type":"structure", + "required":["VpcId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "VpcId":{ + "shape":"String", + "locationName":"vpcId" + } + } + }, + "DisableVpcClassicLinkResult":{ + "type":"structure", + "members":{ + "Return":{ + "shape":"Boolean", + "locationName":"return" + } + } + }, + "DisassociateAddressRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "PublicIp":{"shape":"String"}, + "AssociationId":{"shape":"String"} + } + }, + "DisassociateRouteTableRequest":{ + "type":"structure", + "required":["AssociationId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "AssociationId":{ + "shape":"String", + "locationName":"associationId" + } + } + }, + "DiskImage":{ + "type":"structure", + "members":{ + "Image":{"shape":"DiskImageDetail"}, + "Description":{"shape":"String"}, + "Volume":{"shape":"VolumeDetail"} + } + }, + "DiskImageDescription":{ + "type":"structure", + "required":[ + "Format", + "Size", + "ImportManifestUrl" + ], + "members":{ + "Format":{ + "shape":"DiskImageFormat", + "locationName":"format" + }, + "Size":{ + "shape":"Long", + "locationName":"size" + }, + "ImportManifestUrl":{ + "shape":"String", + "locationName":"importManifestUrl" + }, + "Checksum":{ + "shape":"String", + "locationName":"checksum" + } + } + }, + "DiskImageDetail":{ + "type":"structure", + "required":[ + "Format", + "Bytes", + "ImportManifestUrl" + ], + "members":{ + "Format":{ + "shape":"DiskImageFormat", + "locationName":"format" + }, + "Bytes":{ + "shape":"Long", + "locationName":"bytes" + }, + "ImportManifestUrl":{ + "shape":"String", + "locationName":"importManifestUrl" + } + } + }, + "DiskImageFormat":{ + "type":"string", + "enum":[ + "VMDK", + "RAW", + "VHD" + ] + }, + "DiskImageList":{ + "type":"list", + "member":{"shape":"DiskImage"} + }, + "DiskImageVolumeDescription":{ + "type":"structure", + "required":["Id"], + "members":{ + "Size":{ + "shape":"Long", + "locationName":"size" + }, + "Id":{ + "shape":"String", + "locationName":"id" + } + } + }, + "DomainType":{ + "type":"string", + "enum":[ + "vpc", + "standard" + ] + }, + "Double":{"type":"double"}, + "EbsBlockDevice":{ + "type":"structure", + "members":{ + "SnapshotId":{ + "shape":"String", + "locationName":"snapshotId" + }, + "VolumeSize":{ + "shape":"Integer", + "locationName":"volumeSize" + }, + "DeleteOnTermination":{ + "shape":"Boolean", + "locationName":"deleteOnTermination" + }, + "VolumeType":{ + "shape":"VolumeType", + "locationName":"volumeType" + }, + "Iops":{ + "shape":"Integer", + "locationName":"iops" + }, + "Encrypted":{ + "shape":"Boolean", + "locationName":"encrypted" + } + } + }, + "EbsInstanceBlockDevice":{ + "type":"structure", + "members":{ + "VolumeId":{ + "shape":"String", + "locationName":"volumeId" + }, + "Status":{ + "shape":"AttachmentStatus", + "locationName":"status" + }, + "AttachTime":{ + "shape":"DateTime", + "locationName":"attachTime" + }, + "DeleteOnTermination":{ + "shape":"Boolean", + "locationName":"deleteOnTermination" + } + } + }, + "EbsInstanceBlockDeviceSpecification":{ + "type":"structure", + "members":{ + "VolumeId":{ + "shape":"String", + "locationName":"volumeId" + }, + "DeleteOnTermination":{ + "shape":"Boolean", + "locationName":"deleteOnTermination" + } + } + }, + "EnableVgwRoutePropagationRequest":{ + "type":"structure", + "required":[ + "RouteTableId", + "GatewayId" + ], + "members":{ + "RouteTableId":{"shape":"String"}, + "GatewayId":{"shape":"String"} + } + }, + "EnableVolumeIORequest":{ + "type":"structure", + "required":["VolumeId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "VolumeId":{ + "shape":"String", + "locationName":"volumeId" + } + } + }, + "EnableVpcClassicLinkRequest":{ + "type":"structure", + "required":["VpcId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "VpcId":{ + "shape":"String", + "locationName":"vpcId" + } + } + }, + "EnableVpcClassicLinkResult":{ + "type":"structure", + "members":{ + "Return":{ + "shape":"Boolean", + "locationName":"return" + } + } + }, + "EventCode":{ + "type":"string", + "enum":[ + "instance-reboot", + "system-reboot", + "system-maintenance", + "instance-retirement", + "instance-stop" + ] + }, + "EventInformation":{ + "type":"structure", + "members":{ + "InstanceId":{ + "shape":"String", + "locationName":"instanceId" + }, + "EventSubType":{ + "shape":"String", + "locationName":"eventSubType" + }, + "EventDescription":{ + "shape":"String", + "locationName":"eventDescription" + } + } + }, + "EventType":{ + "type":"string", + "enum":[ + "instanceChange", + "fleetRequestChange", + "error" + ] + }, + "ExecutableByStringList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"ExecutableBy" + } + }, + "ExportEnvironment":{ + "type":"string", + "enum":[ + "citrix", + "vmware", + "microsoft" + ] + }, + "ExportTask":{ + "type":"structure", + "members":{ + "ExportTaskId":{ + "shape":"String", + "locationName":"exportTaskId" + }, + "Description":{ + "shape":"String", + "locationName":"description" + }, + "State":{ + "shape":"ExportTaskState", + "locationName":"state" + }, + "StatusMessage":{ + "shape":"String", + "locationName":"statusMessage" + }, + "InstanceExportDetails":{ + "shape":"InstanceExportDetails", + "locationName":"instanceExport" + }, + "ExportToS3Task":{ + "shape":"ExportToS3Task", + "locationName":"exportToS3" + } + } + }, + "ExportTaskIdStringList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"ExportTaskId" + } + }, + "ExportTaskList":{ + "type":"list", + "member":{ + "shape":"ExportTask", + "locationName":"item" + } + }, + "ExportTaskState":{ + "type":"string", + "enum":[ + "active", + "cancelling", + "cancelled", + "completed" + ] + }, + "ExportToS3Task":{ + "type":"structure", + "members":{ + "DiskImageFormat":{ + "shape":"DiskImageFormat", + "locationName":"diskImageFormat" + }, + "ContainerFormat":{ + "shape":"ContainerFormat", + "locationName":"containerFormat" + }, + "S3Bucket":{ + "shape":"String", + "locationName":"s3Bucket" + }, + "S3Key":{ + "shape":"String", + "locationName":"s3Key" + } + } + }, + "ExportToS3TaskSpecification":{ + "type":"structure", + "members":{ + "DiskImageFormat":{ + "shape":"DiskImageFormat", + "locationName":"diskImageFormat" + }, + "ContainerFormat":{ + "shape":"ContainerFormat", + "locationName":"containerFormat" + }, + "S3Bucket":{ + "shape":"String", + "locationName":"s3Bucket" + }, + "S3Prefix":{ + "shape":"String", + "locationName":"s3Prefix" + } + } + }, + "Filter":{ + "type":"structure", + "members":{ + "Name":{"shape":"String"}, + "Values":{ + "shape":"ValueStringList", + "locationName":"Value" + } + } + }, + "FilterList":{ + "type":"list", + "member":{ + "shape":"Filter", + "locationName":"Filter" + } + }, + "Float":{"type":"float"}, + "FlowLog":{ + "type":"structure", + "members":{ + "CreationTime":{ + "shape":"DateTime", + "locationName":"creationTime" + }, + "FlowLogId":{ + "shape":"String", + "locationName":"flowLogId" + }, + "FlowLogStatus":{ + "shape":"String", + "locationName":"flowLogStatus" + }, + "ResourceId":{ + "shape":"String", + "locationName":"resourceId" + }, + "TrafficType":{ + "shape":"TrafficType", + "locationName":"trafficType" + }, + "LogGroupName":{ + "shape":"String", + "locationName":"logGroupName" + }, + "DeliverLogsStatus":{ + "shape":"String", + "locationName":"deliverLogsStatus" + }, + "DeliverLogsErrorMessage":{ + "shape":"String", + "locationName":"deliverLogsErrorMessage" + }, + "DeliverLogsPermissionArn":{ + "shape":"String", + "locationName":"deliverLogsPermissionArn" + } + } + }, + "FlowLogSet":{ + "type":"list", + "member":{ + "shape":"FlowLog", + "locationName":"item" + } + }, + "FlowLogsResourceType":{ + "type":"string", + "enum":[ + "VPC", + "Subnet", + "NetworkInterface" + ] + }, + "GatewayType":{ + "type":"string", + "enum":["ipsec.1"] + }, + "GetConsoleOutputRequest":{ + "type":"structure", + "required":["InstanceId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "InstanceId":{"shape":"String"} + } + }, + "GetConsoleOutputResult":{ + "type":"structure", + "members":{ + "InstanceId":{ + "shape":"String", + "locationName":"instanceId" + }, + "Timestamp":{ + "shape":"DateTime", + "locationName":"timestamp" + }, + "Output":{ + "shape":"String", + "locationName":"output" + } + } + }, + "GetPasswordDataRequest":{ + "type":"structure", + "required":["InstanceId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "InstanceId":{"shape":"String"} + } + }, + "GetPasswordDataResult":{ + "type":"structure", + "members":{ + "InstanceId":{ + "shape":"String", + "locationName":"instanceId" + }, + "Timestamp":{ + "shape":"DateTime", + "locationName":"timestamp" + }, + "PasswordData":{ + "shape":"String", + "locationName":"passwordData" + } + } + }, + "GroupIdStringList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"groupId" + } + }, + "GroupIdentifier":{ + "type":"structure", + "members":{ + "GroupName":{ + "shape":"String", + "locationName":"groupName" + }, + "GroupId":{ + "shape":"String", + "locationName":"groupId" + } + } + }, + "GroupIdentifierList":{ + "type":"list", + "member":{ + "shape":"GroupIdentifier", + "locationName":"item" + } + }, + "GroupNameStringList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"GroupName" + } + }, + "HistoryRecord":{ + "type":"structure", + "required":[ + "Timestamp", + "EventType", + "EventInformation" + ], + "members":{ + "Timestamp":{ + "shape":"DateTime", + "locationName":"timestamp" + }, + "EventType":{ + "shape":"EventType", + "locationName":"eventType" + }, + "EventInformation":{ + "shape":"EventInformation", + "locationName":"eventInformation" + } + } + }, + "HistoryRecords":{ + "type":"list", + "member":{ + "shape":"HistoryRecord", + "locationName":"item" + } + }, + "HypervisorType":{ + "type":"string", + "enum":[ + "ovm", + "xen" + ] + }, + "IamInstanceProfile":{ + "type":"structure", + "members":{ + "Arn":{ + "shape":"String", + "locationName":"arn" + }, + "Id":{ + "shape":"String", + "locationName":"id" + } + } + }, + "IamInstanceProfileSpecification":{ + "type":"structure", + "members":{ + "Arn":{ + "shape":"String", + "locationName":"arn" + }, + "Name":{ + "shape":"String", + "locationName":"name" + } + } + }, + "IcmpTypeCode":{ + "type":"structure", + "members":{ + "Type":{ + "shape":"Integer", + "locationName":"type" + }, + "Code":{ + "shape":"Integer", + "locationName":"code" + } + } + }, + "Image":{ + "type":"structure", + "members":{ + "ImageId":{ + "shape":"String", + "locationName":"imageId" + }, + "ImageLocation":{ + "shape":"String", + "locationName":"imageLocation" + }, + "State":{ + "shape":"ImageState", + "locationName":"imageState" + }, + "OwnerId":{ + "shape":"String", + "locationName":"imageOwnerId" + }, + "CreationDate":{ + "shape":"String", + "locationName":"creationDate" + }, + "Public":{ + "shape":"Boolean", + "locationName":"isPublic" + }, + "ProductCodes":{ + "shape":"ProductCodeList", + "locationName":"productCodes" + }, + "Architecture":{ + "shape":"ArchitectureValues", + "locationName":"architecture" + }, + "ImageType":{ + "shape":"ImageTypeValues", + "locationName":"imageType" + }, + "KernelId":{ + "shape":"String", + "locationName":"kernelId" + }, + "RamdiskId":{ + "shape":"String", + "locationName":"ramdiskId" + }, + "Platform":{ + "shape":"PlatformValues", + "locationName":"platform" + }, + "SriovNetSupport":{ + "shape":"String", + "locationName":"sriovNetSupport" + }, + "StateReason":{ + "shape":"StateReason", + "locationName":"stateReason" + }, + "ImageOwnerAlias":{ + "shape":"String", + "locationName":"imageOwnerAlias" + }, + "Name":{ + "shape":"String", + "locationName":"name" + }, + "Description":{ + "shape":"String", + "locationName":"description" + }, + "RootDeviceType":{ + "shape":"DeviceType", + "locationName":"rootDeviceType" + }, + "RootDeviceName":{ + "shape":"String", + "locationName":"rootDeviceName" + }, + "BlockDeviceMappings":{ + "shape":"BlockDeviceMappingList", + "locationName":"blockDeviceMapping" + }, + "VirtualizationType":{ + "shape":"VirtualizationType", + "locationName":"virtualizationType" + }, + "Tags":{ + "shape":"TagList", + "locationName":"tagSet" + }, + "Hypervisor":{ + "shape":"HypervisorType", + "locationName":"hypervisor" + } + } + }, + "ImageAttribute":{ + "type":"structure", + "members":{ + "ImageId":{ + "shape":"String", + "locationName":"imageId" + }, + "LaunchPermissions":{ + "shape":"LaunchPermissionList", + "locationName":"launchPermission" + }, + "ProductCodes":{ + "shape":"ProductCodeList", + "locationName":"productCodes" + }, + "KernelId":{ + "shape":"AttributeValue", + "locationName":"kernel" + }, + "RamdiskId":{ + "shape":"AttributeValue", + "locationName":"ramdisk" + }, + "Description":{ + "shape":"AttributeValue", + "locationName":"description" + }, + "SriovNetSupport":{ + "shape":"AttributeValue", + "locationName":"sriovNetSupport" + }, + "BlockDeviceMappings":{ + "shape":"BlockDeviceMappingList", + "locationName":"blockDeviceMapping" + } + } + }, + "ImageAttributeName":{ + "type":"string", + "enum":[ + "description", + "kernel", + "ramdisk", + "launchPermission", + "productCodes", + "blockDeviceMapping", + "sriovNetSupport" + ] + }, + "ImageDiskContainer":{ + "type":"structure", + "members":{ + "Description":{"shape":"String"}, + "Format":{"shape":"String"}, + "Url":{"shape":"String"}, + "UserBucket":{"shape":"UserBucket"}, + "DeviceName":{"shape":"String"}, + "SnapshotId":{"shape":"String"} + } + }, + "ImageDiskContainerList":{ + "type":"list", + "member":{ + "shape":"ImageDiskContainer", + "locationName":"item" + } + }, + "ImageIdStringList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"ImageId" + } + }, + "ImageList":{ + "type":"list", + "member":{ + "shape":"Image", + "locationName":"item" + } + }, + "ImageState":{ + "type":"string", + "enum":[ + "pending", + "available", + "invalid", + "deregistered", + "transient", + "failed", + "error" + ] + }, + "ImageTypeValues":{ + "type":"string", + "enum":[ + "machine", + "kernel", + "ramdisk" + ] + }, + "ImportImageRequest":{ + "type":"structure", + "members":{ + "DryRun":{"shape":"Boolean"}, + "Description":{"shape":"String"}, + "DiskContainers":{ + "shape":"ImageDiskContainerList", + "locationName":"DiskContainer" + }, + "LicenseType":{"shape":"String"}, + "Hypervisor":{"shape":"String"}, + "Architecture":{"shape":"String"}, + "Platform":{"shape":"String"}, + "ClientData":{"shape":"ClientData"}, + "ClientToken":{"shape":"String"}, + "RoleName":{"shape":"String"} + } + }, + "ImportImageResult":{ + "type":"structure", + "members":{ + "ImportTaskId":{ + "shape":"String", + "locationName":"importTaskId" + }, + "Architecture":{ + "shape":"String", + "locationName":"architecture" + }, + "LicenseType":{ + "shape":"String", + "locationName":"licenseType" + }, + "Platform":{ + "shape":"String", + "locationName":"platform" + }, + "Hypervisor":{ + "shape":"String", + "locationName":"hypervisor" + }, + "Description":{ + "shape":"String", + "locationName":"description" + }, + "SnapshotDetails":{ + "shape":"SnapshotDetailList", + "locationName":"snapshotDetailSet" + }, + "ImageId":{ + "shape":"String", + "locationName":"imageId" + }, + "Progress":{ + "shape":"String", + "locationName":"progress" + }, + "StatusMessage":{ + "shape":"String", + "locationName":"statusMessage" + }, + "Status":{ + "shape":"String", + "locationName":"status" + } + } + }, + "ImportImageTask":{ + "type":"structure", + "members":{ + "ImportTaskId":{ + "shape":"String", + "locationName":"importTaskId" + }, + "Architecture":{ + "shape":"String", + "locationName":"architecture" + }, + "LicenseType":{ + "shape":"String", + "locationName":"licenseType" + }, + "Platform":{ + "shape":"String", + "locationName":"platform" + }, + "Hypervisor":{ + "shape":"String", + "locationName":"hypervisor" + }, + "Description":{ + "shape":"String", + "locationName":"description" + }, + "SnapshotDetails":{ + "shape":"SnapshotDetailList", + "locationName":"snapshotDetailSet" + }, + "ImageId":{ + "shape":"String", + "locationName":"imageId" + }, + "Progress":{ + "shape":"String", + "locationName":"progress" + }, + "StatusMessage":{ + "shape":"String", + "locationName":"statusMessage" + }, + "Status":{ + "shape":"String", + "locationName":"status" + } + } + }, + "ImportImageTaskList":{ + "type":"list", + "member":{ + "shape":"ImportImageTask", + "locationName":"item" + } + }, + "ImportInstanceLaunchSpecification":{ + "type":"structure", + "members":{ + "Architecture":{ + "shape":"ArchitectureValues", + "locationName":"architecture" + }, + "GroupNames":{ + "shape":"SecurityGroupStringList", + "locationName":"GroupName" + }, + "GroupIds":{ + "shape":"SecurityGroupIdStringList", + "locationName":"GroupId" + }, + "AdditionalInfo":{ + "shape":"String", + "locationName":"additionalInfo" + }, + "UserData":{ + "shape":"UserData", + "locationName":"userData" + }, + "InstanceType":{ + "shape":"InstanceType", + "locationName":"instanceType" + }, + "Placement":{ + "shape":"Placement", + "locationName":"placement" + }, + "Monitoring":{ + "shape":"Boolean", + "locationName":"monitoring" + }, + "SubnetId":{ + "shape":"String", + "locationName":"subnetId" + }, + "InstanceInitiatedShutdownBehavior":{ + "shape":"ShutdownBehavior", + "locationName":"instanceInitiatedShutdownBehavior" + }, + "PrivateIpAddress":{ + "shape":"String", + "locationName":"privateIpAddress" + } + } + }, + "ImportInstanceRequest":{ + "type":"structure", + "required":["Platform"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "Description":{ + "shape":"String", + "locationName":"description" + }, + "LaunchSpecification":{ + "shape":"ImportInstanceLaunchSpecification", + "locationName":"launchSpecification" + }, + "DiskImages":{ + "shape":"DiskImageList", + "locationName":"diskImage" + }, + "Platform":{ + "shape":"PlatformValues", + "locationName":"platform" + } + } + }, + "ImportInstanceResult":{ + "type":"structure", + "members":{ + "ConversionTask":{ + "shape":"ConversionTask", + "locationName":"conversionTask" + } + } + }, + "ImportInstanceTaskDetails":{ + "type":"structure", + "required":["Volumes"], + "members":{ + "Volumes":{ + "shape":"ImportInstanceVolumeDetailSet", + "locationName":"volumes" + }, + "InstanceId":{ + "shape":"String", + "locationName":"instanceId" + }, + "Platform":{ + "shape":"PlatformValues", + "locationName":"platform" + }, + "Description":{ + "shape":"String", + "locationName":"description" + } + } + }, + "ImportInstanceVolumeDetailItem":{ + "type":"structure", + "required":[ + "BytesConverted", + "AvailabilityZone", + "Image", + "Volume", + "Status" + ], + "members":{ + "BytesConverted":{ + "shape":"Long", + "locationName":"bytesConverted" + }, + "AvailabilityZone":{ + "shape":"String", + "locationName":"availabilityZone" + }, + "Image":{ + "shape":"DiskImageDescription", + "locationName":"image" + }, + "Volume":{ + "shape":"DiskImageVolumeDescription", + "locationName":"volume" + }, + "Status":{ + "shape":"String", + "locationName":"status" + }, + "StatusMessage":{ + "shape":"String", + "locationName":"statusMessage" + }, + "Description":{ + "shape":"String", + "locationName":"description" + } + } + }, + "ImportInstanceVolumeDetailSet":{ + "type":"list", + "member":{ + "shape":"ImportInstanceVolumeDetailItem", + "locationName":"item" + } + }, + "ImportKeyPairRequest":{ + "type":"structure", + "required":[ + "KeyName", + "PublicKeyMaterial" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "KeyName":{ + "shape":"String", + "locationName":"keyName" + }, + "PublicKeyMaterial":{ + "shape":"Blob", + "locationName":"publicKeyMaterial" + } + } + }, + "ImportKeyPairResult":{ + "type":"structure", + "members":{ + "KeyName":{ + "shape":"String", + "locationName":"keyName" + }, + "KeyFingerprint":{ + "shape":"String", + "locationName":"keyFingerprint" + } + } + }, + "ImportSnapshotRequest":{ + "type":"structure", + "members":{ + "DryRun":{"shape":"Boolean"}, + "Description":{"shape":"String"}, + "DiskContainer":{"shape":"SnapshotDiskContainer"}, + "ClientData":{"shape":"ClientData"}, + "ClientToken":{"shape":"String"}, + "RoleName":{"shape":"String"} + } + }, + "ImportSnapshotResult":{ + "type":"structure", + "members":{ + "ImportTaskId":{ + "shape":"String", + "locationName":"importTaskId" + }, + "SnapshotTaskDetail":{ + "shape":"SnapshotTaskDetail", + "locationName":"snapshotTaskDetail" + }, + "Description":{ + "shape":"String", + "locationName":"description" + } + } + }, + "ImportSnapshotTask":{ + "type":"structure", + "members":{ + "ImportTaskId":{ + "shape":"String", + "locationName":"importTaskId" + }, + "SnapshotTaskDetail":{ + "shape":"SnapshotTaskDetail", + "locationName":"snapshotTaskDetail" + }, + "Description":{ + "shape":"String", + "locationName":"description" + } + } + }, + "ImportSnapshotTaskList":{ + "type":"list", + "member":{ + "shape":"ImportSnapshotTask", + "locationName":"item" + } + }, + "ImportTaskIdList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"ImportTaskId" + } + }, + "ImportVolumeRequest":{ + "type":"structure", + "required":[ + "AvailabilityZone", + "Image", + "Volume" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "AvailabilityZone":{ + "shape":"String", + "locationName":"availabilityZone" + }, + "Image":{ + "shape":"DiskImageDetail", + "locationName":"image" + }, + "Description":{ + "shape":"String", + "locationName":"description" + }, + "Volume":{ + "shape":"VolumeDetail", + "locationName":"volume" + } + } + }, + "ImportVolumeResult":{ + "type":"structure", + "members":{ + "ConversionTask":{ + "shape":"ConversionTask", + "locationName":"conversionTask" + } + } + }, + "ImportVolumeTaskDetails":{ + "type":"structure", + "required":[ + "BytesConverted", + "AvailabilityZone", + "Image", + "Volume" + ], + "members":{ + "BytesConverted":{ + "shape":"Long", + "locationName":"bytesConverted" + }, + "AvailabilityZone":{ + "shape":"String", + "locationName":"availabilityZone" + }, + "Description":{ + "shape":"String", + "locationName":"description" + }, + "Image":{ + "shape":"DiskImageDescription", + "locationName":"image" + }, + "Volume":{ + "shape":"DiskImageVolumeDescription", + "locationName":"volume" + } + } + }, + "Instance":{ + "type":"structure", + "members":{ + "InstanceId":{ + "shape":"String", + "locationName":"instanceId" + }, + "ImageId":{ + "shape":"String", + "locationName":"imageId" + }, + "State":{ + "shape":"InstanceState", + "locationName":"instanceState" + }, + "PrivateDnsName":{ + "shape":"String", + "locationName":"privateDnsName" + }, + "PublicDnsName":{ + "shape":"String", + "locationName":"dnsName" + }, + "StateTransitionReason":{ + "shape":"String", + "locationName":"reason" + }, + "KeyName":{ + "shape":"String", + "locationName":"keyName" + }, + "AmiLaunchIndex":{ + "shape":"Integer", + "locationName":"amiLaunchIndex" + }, + "ProductCodes":{ + "shape":"ProductCodeList", + "locationName":"productCodes" + }, + "InstanceType":{ + "shape":"InstanceType", + "locationName":"instanceType" + }, + "LaunchTime":{ + "shape":"DateTime", + "locationName":"launchTime" + }, + "Placement":{ + "shape":"Placement", + "locationName":"placement" + }, + "KernelId":{ + "shape":"String", + "locationName":"kernelId" + }, + "RamdiskId":{ + "shape":"String", + "locationName":"ramdiskId" + }, + "Platform":{ + "shape":"PlatformValues", + "locationName":"platform" + }, + "Monitoring":{ + "shape":"Monitoring", + "locationName":"monitoring" + }, + "SubnetId":{ + "shape":"String", + "locationName":"subnetId" + }, + "VpcId":{ + "shape":"String", + "locationName":"vpcId" + }, + "PrivateIpAddress":{ + "shape":"String", + "locationName":"privateIpAddress" + }, + "PublicIpAddress":{ + "shape":"String", + "locationName":"ipAddress" + }, + "StateReason":{ + "shape":"StateReason", + "locationName":"stateReason" + }, + "Architecture":{ + "shape":"ArchitectureValues", + "locationName":"architecture" + }, + "RootDeviceType":{ + "shape":"DeviceType", + "locationName":"rootDeviceType" + }, + "RootDeviceName":{ + "shape":"String", + "locationName":"rootDeviceName" + }, + "BlockDeviceMappings":{ + "shape":"InstanceBlockDeviceMappingList", + "locationName":"blockDeviceMapping" + }, + "VirtualizationType":{ + "shape":"VirtualizationType", + "locationName":"virtualizationType" + }, + "InstanceLifecycle":{ + "shape":"InstanceLifecycleType", + "locationName":"instanceLifecycle" + }, + "SpotInstanceRequestId":{ + "shape":"String", + "locationName":"spotInstanceRequestId" + }, + "ClientToken":{ + "shape":"String", + "locationName":"clientToken" + }, + "Tags":{ + "shape":"TagList", + "locationName":"tagSet" + }, + "SecurityGroups":{ + "shape":"GroupIdentifierList", + "locationName":"groupSet" + }, + "SourceDestCheck":{ + "shape":"Boolean", + "locationName":"sourceDestCheck" + }, + "Hypervisor":{ + "shape":"HypervisorType", + "locationName":"hypervisor" + }, + "NetworkInterfaces":{ + "shape":"InstanceNetworkInterfaceList", + "locationName":"networkInterfaceSet" + }, + "IamInstanceProfile":{ + "shape":"IamInstanceProfile", + "locationName":"iamInstanceProfile" + }, + "EbsOptimized":{ + "shape":"Boolean", + "locationName":"ebsOptimized" + }, + "SriovNetSupport":{ + "shape":"String", + "locationName":"sriovNetSupport" + } + } + }, + "InstanceAttribute":{ + "type":"structure", + "members":{ + "InstanceId":{ + "shape":"String", + "locationName":"instanceId" + }, + "InstanceType":{ + "shape":"AttributeValue", + "locationName":"instanceType" + }, + "KernelId":{ + "shape":"AttributeValue", + "locationName":"kernel" + }, + "RamdiskId":{ + "shape":"AttributeValue", + "locationName":"ramdisk" + }, + "UserData":{ + "shape":"AttributeValue", + "locationName":"userData" + }, + "DisableApiTermination":{ + "shape":"AttributeBooleanValue", + "locationName":"disableApiTermination" + }, + "InstanceInitiatedShutdownBehavior":{ + "shape":"AttributeValue", + "locationName":"instanceInitiatedShutdownBehavior" + }, + "RootDeviceName":{ + "shape":"AttributeValue", + "locationName":"rootDeviceName" + }, + "BlockDeviceMappings":{ + "shape":"InstanceBlockDeviceMappingList", + "locationName":"blockDeviceMapping" + }, + "ProductCodes":{ + "shape":"ProductCodeList", + "locationName":"productCodes" + }, + "EbsOptimized":{ + "shape":"AttributeBooleanValue", + "locationName":"ebsOptimized" + }, + "SriovNetSupport":{ + "shape":"AttributeValue", + "locationName":"sriovNetSupport" + }, + "SourceDestCheck":{ + "shape":"AttributeBooleanValue", + "locationName":"sourceDestCheck" + }, + "Groups":{ + "shape":"GroupIdentifierList", + "locationName":"groupSet" + } + } + }, + "InstanceAttributeName":{ + "type":"string", + "enum":[ + "instanceType", + "kernel", + "ramdisk", + "userData", + "disableApiTermination", + "instanceInitiatedShutdownBehavior", + "rootDeviceName", + "blockDeviceMapping", + "productCodes", + "sourceDestCheck", + "groupSet", + "ebsOptimized", + "sriovNetSupport" + ] + }, + "InstanceBlockDeviceMapping":{ + "type":"structure", + "members":{ + "DeviceName":{ + "shape":"String", + "locationName":"deviceName" + }, + "Ebs":{ + "shape":"EbsInstanceBlockDevice", + "locationName":"ebs" + } + } + }, + "InstanceBlockDeviceMappingList":{ + "type":"list", + "member":{ + "shape":"InstanceBlockDeviceMapping", + "locationName":"item" + } + }, + "InstanceBlockDeviceMappingSpecification":{ + "type":"structure", + "members":{ + "DeviceName":{ + "shape":"String", + "locationName":"deviceName" + }, + "Ebs":{ + "shape":"EbsInstanceBlockDeviceSpecification", + "locationName":"ebs" + }, + "VirtualName":{ + "shape":"String", + "locationName":"virtualName" + }, + "NoDevice":{ + "shape":"String", + "locationName":"noDevice" + } + } + }, + "InstanceBlockDeviceMappingSpecificationList":{ + "type":"list", + "member":{ + "shape":"InstanceBlockDeviceMappingSpecification", + "locationName":"item" + } + }, + "InstanceCount":{ + "type":"structure", + "members":{ + "State":{ + "shape":"ListingState", + "locationName":"state" + }, + "InstanceCount":{ + "shape":"Integer", + "locationName":"instanceCount" + } + } + }, + "InstanceCountList":{ + "type":"list", + "member":{ + "shape":"InstanceCount", + "locationName":"item" + } + }, + "InstanceExportDetails":{ + "type":"structure", + "members":{ + "InstanceId":{ + "shape":"String", + "locationName":"instanceId" + }, + "TargetEnvironment":{ + "shape":"ExportEnvironment", + "locationName":"targetEnvironment" + } + } + }, + "InstanceIdStringList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"InstanceId" + } + }, + "InstanceLifecycleType":{ + "type":"string", + "enum":["spot"] + }, + "InstanceList":{ + "type":"list", + "member":{ + "shape":"Instance", + "locationName":"item" + } + }, + "InstanceMonitoring":{ + "type":"structure", + "members":{ + "InstanceId":{ + "shape":"String", + "locationName":"instanceId" + }, + "Monitoring":{ + "shape":"Monitoring", + "locationName":"monitoring" + } + } + }, + "InstanceMonitoringList":{ + "type":"list", + "member":{ + "shape":"InstanceMonitoring", + "locationName":"item" + } + }, + "InstanceNetworkInterface":{ + "type":"structure", + "members":{ + "NetworkInterfaceId":{ + "shape":"String", + "locationName":"networkInterfaceId" + }, + "SubnetId":{ + "shape":"String", + "locationName":"subnetId" + }, + "VpcId":{ + "shape":"String", + "locationName":"vpcId" + }, + "Description":{ + "shape":"String", + "locationName":"description" + }, + "OwnerId":{ + "shape":"String", + "locationName":"ownerId" + }, + "Status":{ + "shape":"NetworkInterfaceStatus", + "locationName":"status" + }, + "MacAddress":{ + "shape":"String", + "locationName":"macAddress" + }, + "PrivateIpAddress":{ + "shape":"String", + "locationName":"privateIpAddress" + }, + "PrivateDnsName":{ + "shape":"String", + "locationName":"privateDnsName" + }, + "SourceDestCheck":{ + "shape":"Boolean", + "locationName":"sourceDestCheck" + }, + "Groups":{ + "shape":"GroupIdentifierList", + "locationName":"groupSet" + }, + "Attachment":{ + "shape":"InstanceNetworkInterfaceAttachment", + "locationName":"attachment" + }, + "Association":{ + "shape":"InstanceNetworkInterfaceAssociation", + "locationName":"association" + }, + "PrivateIpAddresses":{ + "shape":"InstancePrivateIpAddressList", + "locationName":"privateIpAddressesSet" + } + } + }, + "InstanceNetworkInterfaceAssociation":{ + "type":"structure", + "members":{ + "PublicIp":{ + "shape":"String", + "locationName":"publicIp" + }, + "PublicDnsName":{ + "shape":"String", + "locationName":"publicDnsName" + }, + "IpOwnerId":{ + "shape":"String", + "locationName":"ipOwnerId" + } + } + }, + "InstanceNetworkInterfaceAttachment":{ + "type":"structure", + "members":{ + "AttachmentId":{ + "shape":"String", + "locationName":"attachmentId" + }, + "DeviceIndex":{ + "shape":"Integer", + "locationName":"deviceIndex" + }, + "Status":{ + "shape":"AttachmentStatus", + "locationName":"status" + }, + "AttachTime":{ + "shape":"DateTime", + "locationName":"attachTime" + }, + "DeleteOnTermination":{ + "shape":"Boolean", + "locationName":"deleteOnTermination" + } + } + }, + "InstanceNetworkInterfaceList":{ + "type":"list", + "member":{ + "shape":"InstanceNetworkInterface", + "locationName":"item" + } + }, + "InstanceNetworkInterfaceSpecification":{ + "type":"structure", + "members":{ + "NetworkInterfaceId":{ + "shape":"String", + "locationName":"networkInterfaceId" + }, + "DeviceIndex":{ + "shape":"Integer", + "locationName":"deviceIndex" + }, + "SubnetId":{ + "shape":"String", + "locationName":"subnetId" + }, + "Description":{ + "shape":"String", + "locationName":"description" + }, + "PrivateIpAddress":{ + "shape":"String", + "locationName":"privateIpAddress" + }, + "Groups":{ + "shape":"SecurityGroupIdStringList", + "locationName":"SecurityGroupId" + }, + "DeleteOnTermination":{ + "shape":"Boolean", + "locationName":"deleteOnTermination" + }, + "PrivateIpAddresses":{ + "shape":"PrivateIpAddressSpecificationList", + "locationName":"privateIpAddressesSet", + "queryName":"PrivateIpAddresses" + }, + "SecondaryPrivateIpAddressCount":{ + "shape":"Integer", + "locationName":"secondaryPrivateIpAddressCount" + }, + "AssociatePublicIpAddress":{ + "shape":"Boolean", + "locationName":"associatePublicIpAddress" + } + } + }, + "InstanceNetworkInterfaceSpecificationList":{ + "type":"list", + "member":{ + "shape":"InstanceNetworkInterfaceSpecification", + "locationName":"item" + } + }, + "InstancePrivateIpAddress":{ + "type":"structure", + "members":{ + "PrivateIpAddress":{ + "shape":"String", + "locationName":"privateIpAddress" + }, + "PrivateDnsName":{ + "shape":"String", + "locationName":"privateDnsName" + }, + "Primary":{ + "shape":"Boolean", + "locationName":"primary" + }, + "Association":{ + "shape":"InstanceNetworkInterfaceAssociation", + "locationName":"association" + } + } + }, + "InstancePrivateIpAddressList":{ + "type":"list", + "member":{ + "shape":"InstancePrivateIpAddress", + "locationName":"item" + } + }, + "InstanceState":{ + "type":"structure", + "members":{ + "Code":{ + "shape":"Integer", + "locationName":"code" + }, + "Name":{ + "shape":"InstanceStateName", + "locationName":"name" + } + } + }, + "InstanceStateChange":{ + "type":"structure", + "members":{ + "InstanceId":{ + "shape":"String", + "locationName":"instanceId" + }, + "CurrentState":{ + "shape":"InstanceState", + "locationName":"currentState" + }, + "PreviousState":{ + "shape":"InstanceState", + "locationName":"previousState" + } + } + }, + "InstanceStateChangeList":{ + "type":"list", + "member":{ + "shape":"InstanceStateChange", + "locationName":"item" + } + }, + "InstanceStateName":{ + "type":"string", + "enum":[ + "pending", + "running", + "shutting-down", + "terminated", + "stopping", + "stopped" + ] + }, + "InstanceStatus":{ + "type":"structure", + "members":{ + "InstanceId":{ + "shape":"String", + "locationName":"instanceId" + }, + "AvailabilityZone":{ + "shape":"String", + "locationName":"availabilityZone" + }, + "Events":{ + "shape":"InstanceStatusEventList", + "locationName":"eventsSet" + }, + "InstanceState":{ + "shape":"InstanceState", + "locationName":"instanceState" + }, + "SystemStatus":{ + "shape":"InstanceStatusSummary", + "locationName":"systemStatus" + }, + "InstanceStatus":{ + "shape":"InstanceStatusSummary", + "locationName":"instanceStatus" + } + } + }, + "InstanceStatusDetails":{ + "type":"structure", + "members":{ + "Name":{ + "shape":"StatusName", + "locationName":"name" + }, + "Status":{ + "shape":"StatusType", + "locationName":"status" + }, + "ImpairedSince":{ + "shape":"DateTime", + "locationName":"impairedSince" + } + } + }, + "InstanceStatusDetailsList":{ + "type":"list", + "member":{ + "shape":"InstanceStatusDetails", + "locationName":"item" + } + }, + "InstanceStatusEvent":{ + "type":"structure", + "members":{ + "Code":{ + "shape":"EventCode", + "locationName":"code" + }, + "Description":{ + "shape":"String", + "locationName":"description" + }, + "NotBefore":{ + "shape":"DateTime", + "locationName":"notBefore" + }, + "NotAfter":{ + "shape":"DateTime", + "locationName":"notAfter" + } + } + }, + "InstanceStatusEventList":{ + "type":"list", + "member":{ + "shape":"InstanceStatusEvent", + "locationName":"item" + } + }, + "InstanceStatusList":{ + "type":"list", + "member":{ + "shape":"InstanceStatus", + "locationName":"item" + } + }, + "InstanceStatusSummary":{ + "type":"structure", + "members":{ + "Status":{ + "shape":"SummaryStatus", + "locationName":"status" + }, + "Details":{ + "shape":"InstanceStatusDetailsList", + "locationName":"details" + } + } + }, + "InstanceType":{ + "type":"string", + "enum":[ + "t1.micro", + "m1.small", + "m1.medium", + "m1.large", + "m1.xlarge", + "m3.medium", + "m3.large", + "m3.xlarge", + "m3.2xlarge", + "m4.large", + "m4.xlarge", + "m4.2xlarge", + "m4.4xlarge", + "m4.10xlarge", + "t2.micro", + "t2.small", + "t2.medium", + "t2.large", + "m2.xlarge", + "m2.2xlarge", + "m2.4xlarge", + "cr1.8xlarge", + "i2.xlarge", + "i2.2xlarge", + "i2.4xlarge", + "i2.8xlarge", + "hi1.4xlarge", + "hs1.8xlarge", + "c1.medium", + "c1.xlarge", + "c3.large", + "c3.xlarge", + "c3.2xlarge", + "c3.4xlarge", + "c3.8xlarge", + "c4.large", + "c4.xlarge", + "c4.2xlarge", + "c4.4xlarge", + "c4.8xlarge", + "cc1.4xlarge", + "cc2.8xlarge", + "g2.2xlarge", + "cg1.4xlarge", + "r3.large", + "r3.xlarge", + "r3.2xlarge", + "r3.4xlarge", + "r3.8xlarge", + "d2.xlarge", + "d2.2xlarge", + "d2.4xlarge", + "d2.8xlarge" + ] + }, + "InstanceTypeList":{ + "type":"list", + "member":{"shape":"InstanceType"} + }, + "Integer":{"type":"integer"}, + "InternetGateway":{ + "type":"structure", + "members":{ + "InternetGatewayId":{ + "shape":"String", + "locationName":"internetGatewayId" + }, + "Attachments":{ + "shape":"InternetGatewayAttachmentList", + "locationName":"attachmentSet" + }, + "Tags":{ + "shape":"TagList", + "locationName":"tagSet" + } + } + }, + "InternetGatewayAttachment":{ + "type":"structure", + "members":{ + "VpcId":{ + "shape":"String", + "locationName":"vpcId" + }, + "State":{ + "shape":"AttachmentStatus", + "locationName":"state" + } + } + }, + "InternetGatewayAttachmentList":{ + "type":"list", + "member":{ + "shape":"InternetGatewayAttachment", + "locationName":"item" + } + }, + "InternetGatewayList":{ + "type":"list", + "member":{ + "shape":"InternetGateway", + "locationName":"item" + } + }, + "IpPermission":{ + "type":"structure", + "members":{ + "IpProtocol":{ + "shape":"String", + "locationName":"ipProtocol" + }, + "FromPort":{ + "shape":"Integer", + "locationName":"fromPort" + }, + "ToPort":{ + "shape":"Integer", + "locationName":"toPort" + }, + "UserIdGroupPairs":{ + "shape":"UserIdGroupPairList", + "locationName":"groups" + }, + "IpRanges":{ + "shape":"IpRangeList", + "locationName":"ipRanges" + }, + "PrefixListIds":{ + "shape":"PrefixListIdList", + "locationName":"prefixListIds" + } + } + }, + "IpPermissionList":{ + "type":"list", + "member":{ + "shape":"IpPermission", + "locationName":"item" + } + }, + "IpRange":{ + "type":"structure", + "members":{ + "CidrIp":{ + "shape":"String", + "locationName":"cidrIp" + } + } + }, + "IpRangeList":{ + "type":"list", + "member":{ + "shape":"IpRange", + "locationName":"item" + } + }, + "KeyNameStringList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"KeyName" + } + }, + "KeyPair":{ + "type":"structure", + "members":{ + "KeyName":{ + "shape":"String", + "locationName":"keyName" + }, + "KeyFingerprint":{ + "shape":"String", + "locationName":"keyFingerprint" + }, + "KeyMaterial":{ + "shape":"String", + "locationName":"keyMaterial" + } + } + }, + "KeyPairInfo":{ + "type":"structure", + "members":{ + "KeyName":{ + "shape":"String", + "locationName":"keyName" + }, + "KeyFingerprint":{ + "shape":"String", + "locationName":"keyFingerprint" + } + } + }, + "KeyPairList":{ + "type":"list", + "member":{ + "shape":"KeyPairInfo", + "locationName":"item" + } + }, + "LaunchPermission":{ + "type":"structure", + "members":{ + "UserId":{ + "shape":"String", + "locationName":"userId" + }, + "Group":{ + "shape":"PermissionGroup", + "locationName":"group" + } + } + }, + "LaunchPermissionList":{ + "type":"list", + "member":{ + "shape":"LaunchPermission", + "locationName":"item" + } + }, + "LaunchPermissionModifications":{ + "type":"structure", + "members":{ + "Add":{"shape":"LaunchPermissionList"}, + "Remove":{"shape":"LaunchPermissionList"} + } + }, + "LaunchSpecification":{ + "type":"structure", + "members":{ + "ImageId":{ + "shape":"String", + "locationName":"imageId" + }, + "KeyName":{ + "shape":"String", + "locationName":"keyName" + }, + "SecurityGroups":{ + "shape":"GroupIdentifierList", + "locationName":"groupSet" + }, + "UserData":{ + "shape":"String", + "locationName":"userData" + }, + "AddressingType":{ + "shape":"String", + "locationName":"addressingType" + }, + "InstanceType":{ + "shape":"InstanceType", + "locationName":"instanceType" + }, + "Placement":{ + "shape":"SpotPlacement", + "locationName":"placement" + }, + "KernelId":{ + "shape":"String", + "locationName":"kernelId" + }, + "RamdiskId":{ + "shape":"String", + "locationName":"ramdiskId" + }, + "BlockDeviceMappings":{ + "shape":"BlockDeviceMappingList", + "locationName":"blockDeviceMapping" + }, + "SubnetId":{ + "shape":"String", + "locationName":"subnetId" + }, + "NetworkInterfaces":{ + "shape":"InstanceNetworkInterfaceSpecificationList", + "locationName":"networkInterfaceSet" + }, + "IamInstanceProfile":{ + "shape":"IamInstanceProfileSpecification", + "locationName":"iamInstanceProfile" + }, + "EbsOptimized":{ + "shape":"Boolean", + "locationName":"ebsOptimized" + }, + "Monitoring":{ + "shape":"RunInstancesMonitoringEnabled", + "locationName":"monitoring" + } + } + }, + "LaunchSpecsList":{ + "type":"list", + "member":{ + "shape":"SpotFleetLaunchSpecification", + "locationName":"item" + }, + "min":1 + }, + "ListingState":{ + "type":"string", + "enum":[ + "available", + "sold", + "cancelled", + "pending" + ] + }, + "ListingStatus":{ + "type":"string", + "enum":[ + "active", + "pending", + "cancelled", + "closed" + ] + }, + "Long":{"type":"long"}, + "ModifyImageAttributeRequest":{ + "type":"structure", + "required":["ImageId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "ImageId":{"shape":"String"}, + "Attribute":{"shape":"String"}, + "OperationType":{"shape":"OperationType"}, + "UserIds":{ + "shape":"UserIdStringList", + "locationName":"UserId" + }, + "UserGroups":{ + "shape":"UserGroupStringList", + "locationName":"UserGroup" + }, + "ProductCodes":{ + "shape":"ProductCodeStringList", + "locationName":"ProductCode" + }, + "Value":{"shape":"String"}, + "LaunchPermission":{"shape":"LaunchPermissionModifications"}, + "Description":{"shape":"AttributeValue"} + } + }, + "ModifyInstanceAttributeRequest":{ + "type":"structure", + "required":["InstanceId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "InstanceId":{ + "shape":"String", + "locationName":"instanceId" + }, + "Attribute":{ + "shape":"InstanceAttributeName", + "locationName":"attribute" + }, + "Value":{ + "shape":"String", + "locationName":"value" + }, + "BlockDeviceMappings":{ + "shape":"InstanceBlockDeviceMappingSpecificationList", + "locationName":"blockDeviceMapping" + }, + "SourceDestCheck":{"shape":"AttributeBooleanValue"}, + "DisableApiTermination":{ + "shape":"AttributeBooleanValue", + "locationName":"disableApiTermination" + }, + "InstanceType":{ + "shape":"AttributeValue", + "locationName":"instanceType" + }, + "Kernel":{ + "shape":"AttributeValue", + "locationName":"kernel" + }, + "Ramdisk":{ + "shape":"AttributeValue", + "locationName":"ramdisk" + }, + "UserData":{ + "shape":"BlobAttributeValue", + "locationName":"userData" + }, + "InstanceInitiatedShutdownBehavior":{ + "shape":"AttributeValue", + "locationName":"instanceInitiatedShutdownBehavior" + }, + "Groups":{ + "shape":"GroupIdStringList", + "locationName":"GroupId" + }, + "EbsOptimized":{ + "shape":"AttributeBooleanValue", + "locationName":"ebsOptimized" + }, + "SriovNetSupport":{ + "shape":"AttributeValue", + "locationName":"sriovNetSupport" + } + } + }, + "ModifyNetworkInterfaceAttributeRequest":{ + "type":"structure", + "required":["NetworkInterfaceId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "NetworkInterfaceId":{ + "shape":"String", + "locationName":"networkInterfaceId" + }, + "Description":{ + "shape":"AttributeValue", + "locationName":"description" + }, + "SourceDestCheck":{ + "shape":"AttributeBooleanValue", + "locationName":"sourceDestCheck" + }, + "Groups":{ + "shape":"SecurityGroupIdStringList", + "locationName":"SecurityGroupId" + }, + "Attachment":{ + "shape":"NetworkInterfaceAttachmentChanges", + "locationName":"attachment" + } + } + }, + "ModifyReservedInstancesRequest":{ + "type":"structure", + "required":[ + "ReservedInstancesIds", + "TargetConfigurations" + ], + "members":{ + "ClientToken":{ + "shape":"String", + "locationName":"clientToken" + }, + "ReservedInstancesIds":{ + "shape":"ReservedInstancesIdStringList", + "locationName":"ReservedInstancesId" + }, + "TargetConfigurations":{ + "shape":"ReservedInstancesConfigurationList", + "locationName":"ReservedInstancesConfigurationSetItemType" + } + } + }, + "ModifyReservedInstancesResult":{ + "type":"structure", + "members":{ + "ReservedInstancesModificationId":{ + "shape":"String", + "locationName":"reservedInstancesModificationId" + } + } + }, + "ModifySnapshotAttributeRequest":{ + "type":"structure", + "required":["SnapshotId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "SnapshotId":{"shape":"String"}, + "Attribute":{"shape":"SnapshotAttributeName"}, + "OperationType":{"shape":"OperationType"}, + "UserIds":{ + "shape":"UserIdStringList", + "locationName":"UserId" + }, + "GroupNames":{ + "shape":"GroupNameStringList", + "locationName":"UserGroup" + }, + "CreateVolumePermission":{"shape":"CreateVolumePermissionModifications"} + } + }, + "ModifySubnetAttributeRequest":{ + "type":"structure", + "required":["SubnetId"], + "members":{ + "SubnetId":{ + "shape":"String", + "locationName":"subnetId" + }, + "MapPublicIpOnLaunch":{"shape":"AttributeBooleanValue"} + } + }, + "ModifyVolumeAttributeRequest":{ + "type":"structure", + "required":["VolumeId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "VolumeId":{"shape":"String"}, + "AutoEnableIO":{"shape":"AttributeBooleanValue"} + } + }, + "ModifyVpcAttributeRequest":{ + "type":"structure", + "required":["VpcId"], + "members":{ + "VpcId":{ + "shape":"String", + "locationName":"vpcId" + }, + "EnableDnsSupport":{"shape":"AttributeBooleanValue"}, + "EnableDnsHostnames":{"shape":"AttributeBooleanValue"} + } + }, + "ModifyVpcEndpointRequest":{ + "type":"structure", + "required":["VpcEndpointId"], + "members":{ + "DryRun":{"shape":"Boolean"}, + "VpcEndpointId":{"shape":"String"}, + "ResetPolicy":{"shape":"Boolean"}, + "PolicyDocument":{"shape":"String"}, + "AddRouteTableIds":{ + "shape":"ValueStringList", + "locationName":"AddRouteTableId" + }, + "RemoveRouteTableIds":{ + "shape":"ValueStringList", + "locationName":"RemoveRouteTableId" + } + } + }, + "ModifyVpcEndpointResult":{ + "type":"structure", + "members":{ + "Return":{ + "shape":"Boolean", + "locationName":"return" + } + } + }, + "MonitorInstancesRequest":{ + "type":"structure", + "required":["InstanceIds"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "InstanceIds":{ + "shape":"InstanceIdStringList", + "locationName":"InstanceId" + } + } + }, + "MonitorInstancesResult":{ + "type":"structure", + "members":{ + "InstanceMonitorings":{ + "shape":"InstanceMonitoringList", + "locationName":"instancesSet" + } + } + }, + "Monitoring":{ + "type":"structure", + "members":{ + "State":{ + "shape":"MonitoringState", + "locationName":"state" + } + } + }, + "MonitoringState":{ + "type":"string", + "enum":[ + "disabled", + "disabling", + "enabled", + "pending" + ] + }, + "MoveAddressToVpcRequest":{ + "type":"structure", + "required":["PublicIp"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "PublicIp":{ + "shape":"String", + "locationName":"publicIp" + } + } + }, + "MoveAddressToVpcResult":{ + "type":"structure", + "members":{ + "AllocationId":{ + "shape":"String", + "locationName":"allocationId" + }, + "Status":{ + "shape":"Status", + "locationName":"status" + } + } + }, + "MoveStatus":{ + "type":"string", + "enum":[ + "movingToVpc", + "restoringToClassic" + ] + }, + "MovingAddressStatus":{ + "type":"structure", + "members":{ + "PublicIp":{ + "shape":"String", + "locationName":"publicIp" + }, + "MoveStatus":{ + "shape":"MoveStatus", + "locationName":"moveStatus" + } + } + }, + "MovingAddressStatusSet":{ + "type":"list", + "member":{ + "shape":"MovingAddressStatus", + "locationName":"item" + } + }, + "NetworkAcl":{ + "type":"structure", + "members":{ + "NetworkAclId":{ + "shape":"String", + "locationName":"networkAclId" + }, + "VpcId":{ + "shape":"String", + "locationName":"vpcId" + }, + "IsDefault":{ + "shape":"Boolean", + "locationName":"default" + }, + "Entries":{ + "shape":"NetworkAclEntryList", + "locationName":"entrySet" + }, + "Associations":{ + "shape":"NetworkAclAssociationList", + "locationName":"associationSet" + }, + "Tags":{ + "shape":"TagList", + "locationName":"tagSet" + } + } + }, + "NetworkAclAssociation":{ + "type":"structure", + "members":{ + "NetworkAclAssociationId":{ + "shape":"String", + "locationName":"networkAclAssociationId" + }, + "NetworkAclId":{ + "shape":"String", + "locationName":"networkAclId" + }, + "SubnetId":{ + "shape":"String", + "locationName":"subnetId" + } + } + }, + "NetworkAclAssociationList":{ + "type":"list", + "member":{ + "shape":"NetworkAclAssociation", + "locationName":"item" + } + }, + "NetworkAclEntry":{ + "type":"structure", + "members":{ + "RuleNumber":{ + "shape":"Integer", + "locationName":"ruleNumber" + }, + "Protocol":{ + "shape":"String", + "locationName":"protocol" + }, + "RuleAction":{ + "shape":"RuleAction", + "locationName":"ruleAction" + }, + "Egress":{ + "shape":"Boolean", + "locationName":"egress" + }, + "CidrBlock":{ + "shape":"String", + "locationName":"cidrBlock" + }, + "IcmpTypeCode":{ + "shape":"IcmpTypeCode", + "locationName":"icmpTypeCode" + }, + "PortRange":{ + "shape":"PortRange", + "locationName":"portRange" + } + } + }, + "NetworkAclEntryList":{ + "type":"list", + "member":{ + "shape":"NetworkAclEntry", + "locationName":"item" + } + }, + "NetworkAclList":{ + "type":"list", + "member":{ + "shape":"NetworkAcl", + "locationName":"item" + } + }, + "NetworkInterface":{ + "type":"structure", + "members":{ + "NetworkInterfaceId":{ + "shape":"String", + "locationName":"networkInterfaceId" + }, + "SubnetId":{ + "shape":"String", + "locationName":"subnetId" + }, + "VpcId":{ + "shape":"String", + "locationName":"vpcId" + }, + "AvailabilityZone":{ + "shape":"String", + "locationName":"availabilityZone" + }, + "Description":{ + "shape":"String", + "locationName":"description" + }, + "OwnerId":{ + "shape":"String", + "locationName":"ownerId" + }, + "RequesterId":{ + "shape":"String", + "locationName":"requesterId" + }, + "RequesterManaged":{ + "shape":"Boolean", + "locationName":"requesterManaged" + }, + "Status":{ + "shape":"NetworkInterfaceStatus", + "locationName":"status" + }, + "MacAddress":{ + "shape":"String", + "locationName":"macAddress" + }, + "PrivateIpAddress":{ + "shape":"String", + "locationName":"privateIpAddress" + }, + "PrivateDnsName":{ + "shape":"String", + "locationName":"privateDnsName" + }, + "SourceDestCheck":{ + "shape":"Boolean", + "locationName":"sourceDestCheck" + }, + "Groups":{ + "shape":"GroupIdentifierList", + "locationName":"groupSet" + }, + "Attachment":{ + "shape":"NetworkInterfaceAttachment", + "locationName":"attachment" + }, + "Association":{ + "shape":"NetworkInterfaceAssociation", + "locationName":"association" + }, + "TagSet":{ + "shape":"TagList", + "locationName":"tagSet" + }, + "PrivateIpAddresses":{ + "shape":"NetworkInterfacePrivateIpAddressList", + "locationName":"privateIpAddressesSet" + } + } + }, + "NetworkInterfaceAssociation":{ + "type":"structure", + "members":{ + "PublicIp":{ + "shape":"String", + "locationName":"publicIp" + }, + "PublicDnsName":{ + "shape":"String", + "locationName":"publicDnsName" + }, + "IpOwnerId":{ + "shape":"String", + "locationName":"ipOwnerId" + }, + "AllocationId":{ + "shape":"String", + "locationName":"allocationId" + }, + "AssociationId":{ + "shape":"String", + "locationName":"associationId" + } + } + }, + "NetworkInterfaceAttachment":{ + "type":"structure", + "members":{ + "AttachmentId":{ + "shape":"String", + "locationName":"attachmentId" + }, + "InstanceId":{ + "shape":"String", + "locationName":"instanceId" + }, + "InstanceOwnerId":{ + "shape":"String", + "locationName":"instanceOwnerId" + }, + "DeviceIndex":{ + "shape":"Integer", + "locationName":"deviceIndex" + }, + "Status":{ + "shape":"AttachmentStatus", + "locationName":"status" + }, + "AttachTime":{ + "shape":"DateTime", + "locationName":"attachTime" + }, + "DeleteOnTermination":{ + "shape":"Boolean", + "locationName":"deleteOnTermination" + } + } + }, + "NetworkInterfaceAttachmentChanges":{ + "type":"structure", + "members":{ + "AttachmentId":{ + "shape":"String", + "locationName":"attachmentId" + }, + "DeleteOnTermination":{ + "shape":"Boolean", + "locationName":"deleteOnTermination" + } + } + }, + "NetworkInterfaceAttribute":{ + "type":"string", + "enum":[ + "description", + "groupSet", + "sourceDestCheck", + "attachment" + ] + }, + "NetworkInterfaceIdList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"item" + } + }, + "NetworkInterfaceList":{ + "type":"list", + "member":{ + "shape":"NetworkInterface", + "locationName":"item" + } + }, + "NetworkInterfacePrivateIpAddress":{ + "type":"structure", + "members":{ + "PrivateIpAddress":{ + "shape":"String", + "locationName":"privateIpAddress" + }, + "PrivateDnsName":{ + "shape":"String", + "locationName":"privateDnsName" + }, + "Primary":{ + "shape":"Boolean", + "locationName":"primary" + }, + "Association":{ + "shape":"NetworkInterfaceAssociation", + "locationName":"association" + } + } + }, + "NetworkInterfacePrivateIpAddressList":{ + "type":"list", + "member":{ + "shape":"NetworkInterfacePrivateIpAddress", + "locationName":"item" + } + }, + "NetworkInterfaceStatus":{ + "type":"string", + "enum":[ + "available", + "attaching", + "in-use", + "detaching" + ] + }, + "OfferingTypeValues":{ + "type":"string", + "enum":[ + "Heavy Utilization", + "Medium Utilization", + "Light Utilization", + "No Upfront", + "Partial Upfront", + "All Upfront" + ] + }, + "OperationType":{ + "type":"string", + "enum":[ + "add", + "remove" + ] + }, + "OwnerStringList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"Owner" + } + }, + "PermissionGroup":{ + "type":"string", + "enum":["all"] + }, + "Placement":{ + "type":"structure", + "members":{ + "AvailabilityZone":{ + "shape":"String", + "locationName":"availabilityZone" + }, + "GroupName":{ + "shape":"String", + "locationName":"groupName" + }, + "Tenancy":{ + "shape":"Tenancy", + "locationName":"tenancy" + } + } + }, + "PlacementGroup":{ + "type":"structure", + "members":{ + "GroupName":{ + "shape":"String", + "locationName":"groupName" + }, + "Strategy":{ + "shape":"PlacementStrategy", + "locationName":"strategy" + }, + "State":{ + "shape":"PlacementGroupState", + "locationName":"state" + } + } + }, + "PlacementGroupList":{ + "type":"list", + "member":{ + "shape":"PlacementGroup", + "locationName":"item" + } + }, + "PlacementGroupState":{ + "type":"string", + "enum":[ + "pending", + "available", + "deleting", + "deleted" + ] + }, + "PlacementGroupStringList":{ + "type":"list", + "member":{"shape":"String"} + }, + "PlacementStrategy":{ + "type":"string", + "enum":["cluster"] + }, + "PlatformValues":{ + "type":"string", + "enum":["Windows"] + }, + "PortRange":{ + "type":"structure", + "members":{ + "From":{ + "shape":"Integer", + "locationName":"from" + }, + "To":{ + "shape":"Integer", + "locationName":"to" + } + } + }, + "PrefixList":{ + "type":"structure", + "members":{ + "PrefixListId":{ + "shape":"String", + "locationName":"prefixListId" + }, + "PrefixListName":{ + "shape":"String", + "locationName":"prefixListName" + }, + "Cidrs":{ + "shape":"ValueStringList", + "locationName":"cidrSet" + } + } + }, + "PrefixListId":{ + "type":"structure", + "members":{ + "PrefixListId":{ + "shape":"String", + "locationName":"prefixListId" + } + } + }, + "PrefixListIdList":{ + "type":"list", + "member":{ + "shape":"PrefixListId", + "locationName":"item" + } + }, + "PrefixListSet":{ + "type":"list", + "member":{ + "shape":"PrefixList", + "locationName":"item" + } + }, + "PriceSchedule":{ + "type":"structure", + "members":{ + "Term":{ + "shape":"Long", + "locationName":"term" + }, + "Price":{ + "shape":"Double", + "locationName":"price" + }, + "CurrencyCode":{ + "shape":"CurrencyCodeValues", + "locationName":"currencyCode" + }, + "Active":{ + "shape":"Boolean", + "locationName":"active" + } + } + }, + "PriceScheduleList":{ + "type":"list", + "member":{ + "shape":"PriceSchedule", + "locationName":"item" + } + }, + "PriceScheduleSpecification":{ + "type":"structure", + "members":{ + "Term":{ + "shape":"Long", + "locationName":"term" + }, + "Price":{ + "shape":"Double", + "locationName":"price" + }, + "CurrencyCode":{ + "shape":"CurrencyCodeValues", + "locationName":"currencyCode" + } + } + }, + "PriceScheduleSpecificationList":{ + "type":"list", + "member":{ + "shape":"PriceScheduleSpecification", + "locationName":"item" + } + }, + "PricingDetail":{ + "type":"structure", + "members":{ + "Price":{ + "shape":"Double", + "locationName":"price" + }, + "Count":{ + "shape":"Integer", + "locationName":"count" + } + } + }, + "PricingDetailsList":{ + "type":"list", + "member":{ + "shape":"PricingDetail", + "locationName":"item" + } + }, + "PrivateIpAddressSpecification":{ + "type":"structure", + "required":["PrivateIpAddress"], + "members":{ + "PrivateIpAddress":{ + "shape":"String", + "locationName":"privateIpAddress" + }, + "Primary":{ + "shape":"Boolean", + "locationName":"primary" + } + } + }, + "PrivateIpAddressSpecificationList":{ + "type":"list", + "member":{ + "shape":"PrivateIpAddressSpecification", + "locationName":"item" + } + }, + "PrivateIpAddressStringList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"PrivateIpAddress" + } + }, + "ProductCode":{ + "type":"structure", + "members":{ + "ProductCodeId":{ + "shape":"String", + "locationName":"productCode" + }, + "ProductCodeType":{ + "shape":"ProductCodeValues", + "locationName":"type" + } + } + }, + "ProductCodeList":{ + "type":"list", + "member":{ + "shape":"ProductCode", + "locationName":"item" + } + }, + "ProductCodeStringList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"ProductCode" + } + }, + "ProductCodeValues":{ + "type":"string", + "enum":[ + "devpay", + "marketplace" + ] + }, + "ProductDescriptionList":{ + "type":"list", + "member":{"shape":"String"} + }, + "PropagatingVgw":{ + "type":"structure", + "members":{ + "GatewayId":{ + "shape":"String", + "locationName":"gatewayId" + } + } + }, + "PropagatingVgwList":{ + "type":"list", + "member":{ + "shape":"PropagatingVgw", + "locationName":"item" + } + }, + "PublicIpStringList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"PublicIp" + } + }, + "PurchaseReservedInstancesOfferingRequest":{ + "type":"structure", + "required":[ + "ReservedInstancesOfferingId", + "InstanceCount" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "ReservedInstancesOfferingId":{"shape":"String"}, + "InstanceCount":{"shape":"Integer"}, + "LimitPrice":{ + "shape":"ReservedInstanceLimitPrice", + "locationName":"limitPrice" + } + } + }, + "PurchaseReservedInstancesOfferingResult":{ + "type":"structure", + "members":{ + "ReservedInstancesId":{ + "shape":"String", + "locationName":"reservedInstancesId" + } + } + }, + "RIProductDescription":{ + "type":"string", + "enum":[ + "Linux/UNIX", + "Linux/UNIX (Amazon VPC)", + "Windows", + "Windows (Amazon VPC)" + ] + }, + "ReasonCodesList":{ + "type":"list", + "member":{ + "shape":"ReportInstanceReasonCodes", + "locationName":"item" + } + }, + "RebootInstancesRequest":{ + "type":"structure", + "required":["InstanceIds"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "InstanceIds":{ + "shape":"InstanceIdStringList", + "locationName":"InstanceId" + } + } + }, + "RecurringCharge":{ + "type":"structure", + "members":{ + "Frequency":{ + "shape":"RecurringChargeFrequency", + "locationName":"frequency" + }, + "Amount":{ + "shape":"Double", + "locationName":"amount" + } + } + }, + "RecurringChargeFrequency":{ + "type":"string", + "enum":["Hourly"] + }, + "RecurringChargesList":{ + "type":"list", + "member":{ + "shape":"RecurringCharge", + "locationName":"item" + } + }, + "Region":{ + "type":"structure", + "members":{ + "RegionName":{ + "shape":"String", + "locationName":"regionName" + }, + "Endpoint":{ + "shape":"String", + "locationName":"regionEndpoint" + } + } + }, + "RegionList":{ + "type":"list", + "member":{ + "shape":"Region", + "locationName":"item" + } + }, + "RegionNameStringList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"RegionName" + } + }, + "RegisterImageRequest":{ + "type":"structure", + "required":["Name"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "ImageLocation":{"shape":"String"}, + "Name":{ + "shape":"String", + "locationName":"name" + }, + "Description":{ + "shape":"String", + "locationName":"description" + }, + "Architecture":{ + "shape":"ArchitectureValues", + "locationName":"architecture" + }, + "KernelId":{ + "shape":"String", + "locationName":"kernelId" + }, + "RamdiskId":{ + "shape":"String", + "locationName":"ramdiskId" + }, + "RootDeviceName":{ + "shape":"String", + "locationName":"rootDeviceName" + }, + "BlockDeviceMappings":{ + "shape":"BlockDeviceMappingRequestList", + "locationName":"BlockDeviceMapping" + }, + "VirtualizationType":{ + "shape":"String", + "locationName":"virtualizationType" + }, + "SriovNetSupport":{ + "shape":"String", + "locationName":"sriovNetSupport" + } + } + }, + "RegisterImageResult":{ + "type":"structure", + "members":{ + "ImageId":{ + "shape":"String", + "locationName":"imageId" + } + } + }, + "RejectVpcPeeringConnectionRequest":{ + "type":"structure", + "required":["VpcPeeringConnectionId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "VpcPeeringConnectionId":{ + "shape":"String", + "locationName":"vpcPeeringConnectionId" + } + } + }, + "RejectVpcPeeringConnectionResult":{ + "type":"structure", + "members":{ + "Return":{ + "shape":"Boolean", + "locationName":"return" + } + } + }, + "ReleaseAddressRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "PublicIp":{"shape":"String"}, + "AllocationId":{"shape":"String"} + } + }, + "ReplaceNetworkAclAssociationRequest":{ + "type":"structure", + "required":[ + "AssociationId", + "NetworkAclId" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "AssociationId":{ + "shape":"String", + "locationName":"associationId" + }, + "NetworkAclId":{ + "shape":"String", + "locationName":"networkAclId" + } + } + }, + "ReplaceNetworkAclAssociationResult":{ + "type":"structure", + "members":{ + "NewAssociationId":{ + "shape":"String", + "locationName":"newAssociationId" + } + } + }, + "ReplaceNetworkAclEntryRequest":{ + "type":"structure", + "required":[ + "NetworkAclId", + "RuleNumber", + "Protocol", + "RuleAction", + "Egress", + "CidrBlock" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "NetworkAclId":{ + "shape":"String", + "locationName":"networkAclId" + }, + "RuleNumber":{ + "shape":"Integer", + "locationName":"ruleNumber" + }, + "Protocol":{ + "shape":"String", + "locationName":"protocol" + }, + "RuleAction":{ + "shape":"RuleAction", + "locationName":"ruleAction" + }, + "Egress":{ + "shape":"Boolean", + "locationName":"egress" + }, + "CidrBlock":{ + "shape":"String", + "locationName":"cidrBlock" + }, + "IcmpTypeCode":{ + "shape":"IcmpTypeCode", + "locationName":"Icmp" + }, + "PortRange":{ + "shape":"PortRange", + "locationName":"portRange" + } + } + }, + "ReplaceRouteRequest":{ + "type":"structure", + "required":[ + "RouteTableId", + "DestinationCidrBlock" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "RouteTableId":{ + "shape":"String", + "locationName":"routeTableId" + }, + "DestinationCidrBlock":{ + "shape":"String", + "locationName":"destinationCidrBlock" + }, + "GatewayId":{ + "shape":"String", + "locationName":"gatewayId" + }, + "InstanceId":{ + "shape":"String", + "locationName":"instanceId" + }, + "NetworkInterfaceId":{ + "shape":"String", + "locationName":"networkInterfaceId" + }, + "VpcPeeringConnectionId":{ + "shape":"String", + "locationName":"vpcPeeringConnectionId" + } + } + }, + "ReplaceRouteTableAssociationRequest":{ + "type":"structure", + "required":[ + "AssociationId", + "RouteTableId" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "AssociationId":{ + "shape":"String", + "locationName":"associationId" + }, + "RouteTableId":{ + "shape":"String", + "locationName":"routeTableId" + } + } + }, + "ReplaceRouteTableAssociationResult":{ + "type":"structure", + "members":{ + "NewAssociationId":{ + "shape":"String", + "locationName":"newAssociationId" + } + } + }, + "ReportInstanceReasonCodes":{ + "type":"string", + "enum":[ + "instance-stuck-in-state", + "unresponsive", + "not-accepting-credentials", + "password-not-available", + "performance-network", + "performance-instance-store", + "performance-ebs-volume", + "performance-other", + "other" + ] + }, + "ReportInstanceStatusRequest":{ + "type":"structure", + "required":[ + "Instances", + "Status", + "ReasonCodes" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "Instances":{ + "shape":"InstanceIdStringList", + "locationName":"instanceId" + }, + "Status":{ + "shape":"ReportStatusType", + "locationName":"status" + }, + "StartTime":{ + "shape":"DateTime", + "locationName":"startTime" + }, + "EndTime":{ + "shape":"DateTime", + "locationName":"endTime" + }, + "ReasonCodes":{ + "shape":"ReasonCodesList", + "locationName":"reasonCode" + }, + "Description":{ + "shape":"String", + "locationName":"description" + } + } + }, + "ReportStatusType":{ + "type":"string", + "enum":[ + "ok", + "impaired" + ] + }, + "RequestSpotFleetRequest":{ + "type":"structure", + "required":["SpotFleetRequestConfig"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "SpotFleetRequestConfig":{ + "shape":"SpotFleetRequestConfigData", + "locationName":"spotFleetRequestConfig" + } + } + }, + "RequestSpotFleetResponse":{ + "type":"structure", + "required":["SpotFleetRequestId"], + "members":{ + "SpotFleetRequestId":{ + "shape":"String", + "locationName":"spotFleetRequestId" + } + } + }, + "RequestSpotInstancesRequest":{ + "type":"structure", + "required":["SpotPrice"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "SpotPrice":{ + "shape":"String", + "locationName":"spotPrice" + }, + "ClientToken":{ + "shape":"String", + "locationName":"clientToken" + }, + "InstanceCount":{ + "shape":"Integer", + "locationName":"instanceCount" + }, + "Type":{ + "shape":"SpotInstanceType", + "locationName":"type" + }, + "ValidFrom":{ + "shape":"DateTime", + "locationName":"validFrom" + }, + "ValidUntil":{ + "shape":"DateTime", + "locationName":"validUntil" + }, + "LaunchGroup":{ + "shape":"String", + "locationName":"launchGroup" + }, + "AvailabilityZoneGroup":{ + "shape":"String", + "locationName":"availabilityZoneGroup" + }, + "LaunchSpecification":{"shape":"RequestSpotLaunchSpecification"} + } + }, + "RequestSpotInstancesResult":{ + "type":"structure", + "members":{ + "SpotInstanceRequests":{ + "shape":"SpotInstanceRequestList", + "locationName":"spotInstanceRequestSet" + } + } + }, + "Reservation":{ + "type":"structure", + "members":{ + "ReservationId":{ + "shape":"String", + "locationName":"reservationId" + }, + "OwnerId":{ + "shape":"String", + "locationName":"ownerId" + }, + "RequesterId":{ + "shape":"String", + "locationName":"requesterId" + }, + "Groups":{ + "shape":"GroupIdentifierList", + "locationName":"groupSet" + }, + "Instances":{ + "shape":"InstanceList", + "locationName":"instancesSet" + } + } + }, + "ReservationList":{ + "type":"list", + "member":{ + "shape":"Reservation", + "locationName":"item" + } + }, + "ReservedInstanceLimitPrice":{ + "type":"structure", + "members":{ + "Amount":{ + "shape":"Double", + "locationName":"amount" + }, + "CurrencyCode":{ + "shape":"CurrencyCodeValues", + "locationName":"currencyCode" + } + } + }, + "ReservedInstanceState":{ + "type":"string", + "enum":[ + "payment-pending", + "active", + "payment-failed", + "retired" + ] + }, + "ReservedInstances":{ + "type":"structure", + "members":{ + "ReservedInstancesId":{ + "shape":"String", + "locationName":"reservedInstancesId" + }, + "InstanceType":{ + "shape":"InstanceType", + "locationName":"instanceType" + }, + "AvailabilityZone":{ + "shape":"String", + "locationName":"availabilityZone" + }, + "Start":{ + "shape":"DateTime", + "locationName":"start" + }, + "End":{ + "shape":"DateTime", + "locationName":"end" + }, + "Duration":{ + "shape":"Long", + "locationName":"duration" + }, + "UsagePrice":{ + "shape":"Float", + "locationName":"usagePrice" + }, + "FixedPrice":{ + "shape":"Float", + "locationName":"fixedPrice" + }, + "InstanceCount":{ + "shape":"Integer", + "locationName":"instanceCount" + }, + "ProductDescription":{ + "shape":"RIProductDescription", + "locationName":"productDescription" + }, + "State":{ + "shape":"ReservedInstanceState", + "locationName":"state" + }, + "Tags":{ + "shape":"TagList", + "locationName":"tagSet" + }, + "InstanceTenancy":{ + "shape":"Tenancy", + "locationName":"instanceTenancy" + }, + "CurrencyCode":{ + "shape":"CurrencyCodeValues", + "locationName":"currencyCode" + }, + "OfferingType":{ + "shape":"OfferingTypeValues", + "locationName":"offeringType" + }, + "RecurringCharges":{ + "shape":"RecurringChargesList", + "locationName":"recurringCharges" + } + } + }, + "ReservedInstancesConfiguration":{ + "type":"structure", + "members":{ + "AvailabilityZone":{ + "shape":"String", + "locationName":"availabilityZone" + }, + "Platform":{ + "shape":"String", + "locationName":"platform" + }, + "InstanceCount":{ + "shape":"Integer", + "locationName":"instanceCount" + }, + "InstanceType":{ + "shape":"InstanceType", + "locationName":"instanceType" + } + } + }, + "ReservedInstancesConfigurationList":{ + "type":"list", + "member":{ + "shape":"ReservedInstancesConfiguration", + "locationName":"item" + } + }, + "ReservedInstancesId":{ + "type":"structure", + "members":{ + "ReservedInstancesId":{ + "shape":"String", + "locationName":"reservedInstancesId" + } + } + }, + "ReservedInstancesIdStringList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"ReservedInstancesId" + } + }, + "ReservedInstancesList":{ + "type":"list", + "member":{ + "shape":"ReservedInstances", + "locationName":"item" + } + }, + "ReservedInstancesListing":{ + "type":"structure", + "members":{ + "ReservedInstancesListingId":{ + "shape":"String", + "locationName":"reservedInstancesListingId" + }, + "ReservedInstancesId":{ + "shape":"String", + "locationName":"reservedInstancesId" + }, + "CreateDate":{ + "shape":"DateTime", + "locationName":"createDate" + }, + "UpdateDate":{ + "shape":"DateTime", + "locationName":"updateDate" + }, + "Status":{ + "shape":"ListingStatus", + "locationName":"status" + }, + "StatusMessage":{ + "shape":"String", + "locationName":"statusMessage" + }, + "InstanceCounts":{ + "shape":"InstanceCountList", + "locationName":"instanceCounts" + }, + "PriceSchedules":{ + "shape":"PriceScheduleList", + "locationName":"priceSchedules" + }, + "Tags":{ + "shape":"TagList", + "locationName":"tagSet" + }, + "ClientToken":{ + "shape":"String", + "locationName":"clientToken" + } + } + }, + "ReservedInstancesListingList":{ + "type":"list", + "member":{ + "shape":"ReservedInstancesListing", + "locationName":"item" + } + }, + "ReservedInstancesModification":{ + "type":"structure", + "members":{ + "ReservedInstancesModificationId":{ + "shape":"String", + "locationName":"reservedInstancesModificationId" + }, + "ReservedInstancesIds":{ + "shape":"ReservedIntancesIds", + "locationName":"reservedInstancesSet" + }, + "ModificationResults":{ + "shape":"ReservedInstancesModificationResultList", + "locationName":"modificationResultSet" + }, + "CreateDate":{ + "shape":"DateTime", + "locationName":"createDate" + }, + "UpdateDate":{ + "shape":"DateTime", + "locationName":"updateDate" + }, + "EffectiveDate":{ + "shape":"DateTime", + "locationName":"effectiveDate" + }, + "Status":{ + "shape":"String", + "locationName":"status" + }, + "StatusMessage":{ + "shape":"String", + "locationName":"statusMessage" + }, + "ClientToken":{ + "shape":"String", + "locationName":"clientToken" + } + } + }, + "ReservedInstancesModificationIdStringList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"ReservedInstancesModificationId" + } + }, + "ReservedInstancesModificationList":{ + "type":"list", + "member":{ + "shape":"ReservedInstancesModification", + "locationName":"item" + } + }, + "ReservedInstancesModificationResult":{ + "type":"structure", + "members":{ + "ReservedInstancesId":{ + "shape":"String", + "locationName":"reservedInstancesId" + }, + "TargetConfiguration":{ + "shape":"ReservedInstancesConfiguration", + "locationName":"targetConfiguration" + } + } + }, + "ReservedInstancesModificationResultList":{ + "type":"list", + "member":{ + "shape":"ReservedInstancesModificationResult", + "locationName":"item" + } + }, + "ReservedInstancesOffering":{ + "type":"structure", + "members":{ + "ReservedInstancesOfferingId":{ + "shape":"String", + "locationName":"reservedInstancesOfferingId" + }, + "InstanceType":{ + "shape":"InstanceType", + "locationName":"instanceType" + }, + "AvailabilityZone":{ + "shape":"String", + "locationName":"availabilityZone" + }, + "Duration":{ + "shape":"Long", + "locationName":"duration" + }, + "UsagePrice":{ + "shape":"Float", + "locationName":"usagePrice" + }, + "FixedPrice":{ + "shape":"Float", + "locationName":"fixedPrice" + }, + "ProductDescription":{ + "shape":"RIProductDescription", + "locationName":"productDescription" + }, + "InstanceTenancy":{ + "shape":"Tenancy", + "locationName":"instanceTenancy" + }, + "CurrencyCode":{ + "shape":"CurrencyCodeValues", + "locationName":"currencyCode" + }, + "OfferingType":{ + "shape":"OfferingTypeValues", + "locationName":"offeringType" + }, + "RecurringCharges":{ + "shape":"RecurringChargesList", + "locationName":"recurringCharges" + }, + "Marketplace":{ + "shape":"Boolean", + "locationName":"marketplace" + }, + "PricingDetails":{ + "shape":"PricingDetailsList", + "locationName":"pricingDetailsSet" + } + } + }, + "ReservedInstancesOfferingIdStringList":{ + "type":"list", + "member":{"shape":"String"} + }, + "ReservedInstancesOfferingList":{ + "type":"list", + "member":{ + "shape":"ReservedInstancesOffering", + "locationName":"item" + } + }, + "ReservedIntancesIds":{ + "type":"list", + "member":{ + "shape":"ReservedInstancesId", + "locationName":"item" + } + }, + "ResetImageAttributeName":{ + "type":"string", + "enum":["launchPermission"] + }, + "ResetImageAttributeRequest":{ + "type":"structure", + "required":[ + "ImageId", + "Attribute" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "ImageId":{"shape":"String"}, + "Attribute":{"shape":"ResetImageAttributeName"} + } + }, + "ResetInstanceAttributeRequest":{ + "type":"structure", + "required":[ + "InstanceId", + "Attribute" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "InstanceId":{ + "shape":"String", + "locationName":"instanceId" + }, + "Attribute":{ + "shape":"InstanceAttributeName", + "locationName":"attribute" + } + } + }, + "ResetNetworkInterfaceAttributeRequest":{ + "type":"structure", + "required":["NetworkInterfaceId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "NetworkInterfaceId":{ + "shape":"String", + "locationName":"networkInterfaceId" + }, + "SourceDestCheck":{ + "shape":"String", + "locationName":"sourceDestCheck" + } + } + }, + "ResetSnapshotAttributeRequest":{ + "type":"structure", + "required":[ + "SnapshotId", + "Attribute" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "SnapshotId":{"shape":"String"}, + "Attribute":{"shape":"SnapshotAttributeName"} + } + }, + "ResourceIdList":{ + "type":"list", + "member":{"shape":"String"} + }, + "ResourceType":{ + "type":"string", + "enum":[ + "customer-gateway", + "dhcp-options", + "image", + "instance", + "internet-gateway", + "network-acl", + "network-interface", + "reserved-instances", + "route-table", + "snapshot", + "spot-instances-request", + "subnet", + "security-group", + "volume", + "vpc", + "vpn-connection", + "vpn-gateway" + ] + }, + "RestorableByStringList":{ + "type":"list", + "member":{"shape":"String"} + }, + "RestoreAddressToClassicRequest":{ + "type":"structure", + "required":["PublicIp"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "PublicIp":{ + "shape":"String", + "locationName":"publicIp" + } + } + }, + "RestoreAddressToClassicResult":{ + "type":"structure", + "members":{ + "Status":{ + "shape":"Status", + "locationName":"status" + }, + "PublicIp":{ + "shape":"String", + "locationName":"publicIp" + } + } + }, + "RevokeSecurityGroupEgressRequest":{ + "type":"structure", + "required":["GroupId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "GroupId":{ + "shape":"String", + "locationName":"groupId" + }, + "SourceSecurityGroupName":{ + "shape":"String", + "locationName":"sourceSecurityGroupName" + }, + "SourceSecurityGroupOwnerId":{ + "shape":"String", + "locationName":"sourceSecurityGroupOwnerId" + }, + "IpProtocol":{ + "shape":"String", + "locationName":"ipProtocol" + }, + "FromPort":{ + "shape":"Integer", + "locationName":"fromPort" + }, + "ToPort":{ + "shape":"Integer", + "locationName":"toPort" + }, + "CidrIp":{ + "shape":"String", + "locationName":"cidrIp" + }, + "IpPermissions":{ + "shape":"IpPermissionList", + "locationName":"ipPermissions" + } + } + }, + "RevokeSecurityGroupIngressRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "GroupName":{"shape":"String"}, + "GroupId":{"shape":"String"}, + "SourceSecurityGroupName":{"shape":"String"}, + "SourceSecurityGroupOwnerId":{"shape":"String"}, + "IpProtocol":{"shape":"String"}, + "FromPort":{"shape":"Integer"}, + "ToPort":{"shape":"Integer"}, + "CidrIp":{"shape":"String"}, + "IpPermissions":{"shape":"IpPermissionList"} + } + }, + "Route":{ + "type":"structure", + "members":{ + "DestinationCidrBlock":{ + "shape":"String", + "locationName":"destinationCidrBlock" + }, + "DestinationPrefixListId":{ + "shape":"String", + "locationName":"destinationPrefixListId" + }, + "GatewayId":{ + "shape":"String", + "locationName":"gatewayId" + }, + "InstanceId":{ + "shape":"String", + "locationName":"instanceId" + }, + "InstanceOwnerId":{ + "shape":"String", + "locationName":"instanceOwnerId" + }, + "NetworkInterfaceId":{ + "shape":"String", + "locationName":"networkInterfaceId" + }, + "VpcPeeringConnectionId":{ + "shape":"String", + "locationName":"vpcPeeringConnectionId" + }, + "State":{ + "shape":"RouteState", + "locationName":"state" + }, + "Origin":{ + "shape":"RouteOrigin", + "locationName":"origin" + } + } + }, + "RouteList":{ + "type":"list", + "member":{ + "shape":"Route", + "locationName":"item" + } + }, + "RouteOrigin":{ + "type":"string", + "enum":[ + "CreateRouteTable", + "CreateRoute", + "EnableVgwRoutePropagation" + ] + }, + "RouteState":{ + "type":"string", + "enum":[ + "active", + "blackhole" + ] + }, + "RouteTable":{ + "type":"structure", + "members":{ + "RouteTableId":{ + "shape":"String", + "locationName":"routeTableId" + }, + "VpcId":{ + "shape":"String", + "locationName":"vpcId" + }, + "Routes":{ + "shape":"RouteList", + "locationName":"routeSet" + }, + "Associations":{ + "shape":"RouteTableAssociationList", + "locationName":"associationSet" + }, + "Tags":{ + "shape":"TagList", + "locationName":"tagSet" + }, + "PropagatingVgws":{ + "shape":"PropagatingVgwList", + "locationName":"propagatingVgwSet" + } + } + }, + "RouteTableAssociation":{ + "type":"structure", + "members":{ + "RouteTableAssociationId":{ + "shape":"String", + "locationName":"routeTableAssociationId" + }, + "RouteTableId":{ + "shape":"String", + "locationName":"routeTableId" + }, + "SubnetId":{ + "shape":"String", + "locationName":"subnetId" + }, + "Main":{ + "shape":"Boolean", + "locationName":"main" + } + } + }, + "RouteTableAssociationList":{ + "type":"list", + "member":{ + "shape":"RouteTableAssociation", + "locationName":"item" + } + }, + "RouteTableList":{ + "type":"list", + "member":{ + "shape":"RouteTable", + "locationName":"item" + } + }, + "RuleAction":{ + "type":"string", + "enum":[ + "allow", + "deny" + ] + }, + "RunInstancesMonitoringEnabled":{ + "type":"structure", + "required":["Enabled"], + "members":{ + "Enabled":{ + "shape":"Boolean", + "locationName":"enabled" + } + } + }, + "RunInstancesRequest":{ + "type":"structure", + "required":[ + "ImageId", + "MinCount", + "MaxCount" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "ImageId":{"shape":"String"}, + "MinCount":{"shape":"Integer"}, + "MaxCount":{"shape":"Integer"}, + "KeyName":{"shape":"String"}, + "SecurityGroups":{ + "shape":"SecurityGroupStringList", + "locationName":"SecurityGroup" + }, + "SecurityGroupIds":{ + "shape":"SecurityGroupIdStringList", + "locationName":"SecurityGroupId" + }, + "UserData":{"shape":"String"}, + "InstanceType":{"shape":"InstanceType"}, + "Placement":{"shape":"Placement"}, + "KernelId":{"shape":"String"}, + "RamdiskId":{"shape":"String"}, + "BlockDeviceMappings":{ + "shape":"BlockDeviceMappingRequestList", + "locationName":"BlockDeviceMapping" + }, + "Monitoring":{"shape":"RunInstancesMonitoringEnabled"}, + "SubnetId":{"shape":"String"}, + "DisableApiTermination":{ + "shape":"Boolean", + "locationName":"disableApiTermination" + }, + "InstanceInitiatedShutdownBehavior":{ + "shape":"ShutdownBehavior", + "locationName":"instanceInitiatedShutdownBehavior" + }, + "PrivateIpAddress":{ + "shape":"String", + "locationName":"privateIpAddress" + }, + "ClientToken":{ + "shape":"String", + "locationName":"clientToken" + }, + "AdditionalInfo":{ + "shape":"String", + "locationName":"additionalInfo" + }, + "NetworkInterfaces":{ + "shape":"InstanceNetworkInterfaceSpecificationList", + "locationName":"networkInterface" + }, + "IamInstanceProfile":{ + "shape":"IamInstanceProfileSpecification", + "locationName":"iamInstanceProfile" + }, + "EbsOptimized":{ + "shape":"Boolean", + "locationName":"ebsOptimized" + } + } + }, + "S3Storage":{ + "type":"structure", + "members":{ + "Bucket":{ + "shape":"String", + "locationName":"bucket" + }, + "Prefix":{ + "shape":"String", + "locationName":"prefix" + }, + "AWSAccessKeyId":{"shape":"String"}, + "UploadPolicy":{ + "shape":"Blob", + "locationName":"uploadPolicy" + }, + "UploadPolicySignature":{ + "shape":"String", + "locationName":"uploadPolicySignature" + } + } + }, + "SecurityGroup":{ + "type":"structure", + "members":{ + "OwnerId":{ + "shape":"String", + "locationName":"ownerId" + }, + "GroupName":{ + "shape":"String", + "locationName":"groupName" + }, + "GroupId":{ + "shape":"String", + "locationName":"groupId" + }, + "Description":{ + "shape":"String", + "locationName":"groupDescription" + }, + "IpPermissions":{ + "shape":"IpPermissionList", + "locationName":"ipPermissions" + }, + "IpPermissionsEgress":{ + "shape":"IpPermissionList", + "locationName":"ipPermissionsEgress" + }, + "VpcId":{ + "shape":"String", + "locationName":"vpcId" + }, + "Tags":{ + "shape":"TagList", + "locationName":"tagSet" + } + } + }, + "SecurityGroupIdStringList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"SecurityGroupId" + } + }, + "SecurityGroupList":{ + "type":"list", + "member":{ + "shape":"SecurityGroup", + "locationName":"item" + } + }, + "SecurityGroupStringList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"SecurityGroup" + } + }, + "ShutdownBehavior":{ + "type":"string", + "enum":[ + "stop", + "terminate" + ] + }, + "Snapshot":{ + "type":"structure", + "members":{ + "SnapshotId":{ + "shape":"String", + "locationName":"snapshotId" + }, + "VolumeId":{ + "shape":"String", + "locationName":"volumeId" + }, + "State":{ + "shape":"SnapshotState", + "locationName":"status" + }, + "StateMessage":{ + "shape":"String", + "locationName":"statusMessage" + }, + "StartTime":{ + "shape":"DateTime", + "locationName":"startTime" + }, + "Progress":{ + "shape":"String", + "locationName":"progress" + }, + "OwnerId":{ + "shape":"String", + "locationName":"ownerId" + }, + "Description":{ + "shape":"String", + "locationName":"description" + }, + "VolumeSize":{ + "shape":"Integer", + "locationName":"volumeSize" + }, + "OwnerAlias":{ + "shape":"String", + "locationName":"ownerAlias" + }, + "Tags":{ + "shape":"TagList", + "locationName":"tagSet" + }, + "Encrypted":{ + "shape":"Boolean", + "locationName":"encrypted" + }, + "KmsKeyId":{ + "shape":"String", + "locationName":"kmsKeyId" + }, + "DataEncryptionKeyId":{ + "shape":"String", + "locationName":"dataEncryptionKeyId" + } + } + }, + "SnapshotAttributeName":{ + "type":"string", + "enum":[ + "productCodes", + "createVolumePermission" + ] + }, + "SnapshotDetail":{ + "type":"structure", + "members":{ + "DiskImageSize":{ + "shape":"Double", + "locationName":"diskImageSize" + }, + "Description":{ + "shape":"String", + "locationName":"description" + }, + "Format":{ + "shape":"String", + "locationName":"format" + }, + "Url":{ + "shape":"String", + "locationName":"url" + }, + "UserBucket":{ + "shape":"UserBucketDetails", + "locationName":"userBucket" + }, + "DeviceName":{ + "shape":"String", + "locationName":"deviceName" + }, + "SnapshotId":{ + "shape":"String", + "locationName":"snapshotId" + }, + "Progress":{ + "shape":"String", + "locationName":"progress" + }, + "StatusMessage":{ + "shape":"String", + "locationName":"statusMessage" + }, + "Status":{ + "shape":"String", + "locationName":"status" + } + } + }, + "SnapshotDetailList":{ + "type":"list", + "member":{ + "shape":"SnapshotDetail", + "locationName":"item" + } + }, + "SnapshotDiskContainer":{ + "type":"structure", + "members":{ + "Description":{"shape":"String"}, + "Format":{"shape":"String"}, + "Url":{"shape":"String"}, + "UserBucket":{"shape":"UserBucket"} + } + }, + "SnapshotIdStringList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"SnapshotId" + } + }, + "SnapshotList":{ + "type":"list", + "member":{ + "shape":"Snapshot", + "locationName":"item" + } + }, + "SnapshotState":{ + "type":"string", + "enum":[ + "pending", + "completed", + "error" + ] + }, + "SnapshotTaskDetail":{ + "type":"structure", + "members":{ + "DiskImageSize":{ + "shape":"Double", + "locationName":"diskImageSize" + }, + "Description":{ + "shape":"String", + "locationName":"description" + }, + "Format":{ + "shape":"String", + "locationName":"format" + }, + "Url":{ + "shape":"String", + "locationName":"url" + }, + "UserBucket":{ + "shape":"UserBucketDetails", + "locationName":"userBucket" + }, + "SnapshotId":{ + "shape":"String", + "locationName":"snapshotId" + }, + "Progress":{ + "shape":"String", + "locationName":"progress" + }, + "StatusMessage":{ + "shape":"String", + "locationName":"statusMessage" + }, + "Status":{ + "shape":"String", + "locationName":"status" + } + } + }, + "SpotDatafeedSubscription":{ + "type":"structure", + "members":{ + "OwnerId":{ + "shape":"String", + "locationName":"ownerId" + }, + "Bucket":{ + "shape":"String", + "locationName":"bucket" + }, + "Prefix":{ + "shape":"String", + "locationName":"prefix" + }, + "State":{ + "shape":"DatafeedSubscriptionState", + "locationName":"state" + }, + "Fault":{ + "shape":"SpotInstanceStateFault", + "locationName":"fault" + } + } + }, + "SpotFleetLaunchSpecification":{ + "type":"structure", + "members":{ + "ImageId":{ + "shape":"String", + "locationName":"imageId" + }, + "KeyName":{ + "shape":"String", + "locationName":"keyName" + }, + "SecurityGroups":{ + "shape":"GroupIdentifierList", + "locationName":"groupSet" + }, + "UserData":{ + "shape":"String", + "locationName":"userData" + }, + "AddressingType":{ + "shape":"String", + "locationName":"addressingType" + }, + "InstanceType":{ + "shape":"InstanceType", + "locationName":"instanceType" + }, + "Placement":{ + "shape":"SpotPlacement", + "locationName":"placement" + }, + "KernelId":{ + "shape":"String", + "locationName":"kernelId" + }, + "RamdiskId":{ + "shape":"String", + "locationName":"ramdiskId" + }, + "BlockDeviceMappings":{ + "shape":"BlockDeviceMappingList", + "locationName":"blockDeviceMapping" + }, + "Monitoring":{ + "shape":"SpotFleetMonitoring", + "locationName":"monitoring" + }, + "SubnetId":{ + "shape":"String", + "locationName":"subnetId" + }, + "NetworkInterfaces":{ + "shape":"InstanceNetworkInterfaceSpecificationList", + "locationName":"networkInterfaceSet" + }, + "IamInstanceProfile":{ + "shape":"IamInstanceProfileSpecification", + "locationName":"iamInstanceProfile" + }, + "EbsOptimized":{ + "shape":"Boolean", + "locationName":"ebsOptimized" + }, + "WeightedCapacity":{ + "shape":"Double", + "locationName":"weightedCapacity" + }, + "SpotPrice":{ + "shape":"String", + "locationName":"spotPrice" + } + } + }, + "SpotFleetMonitoring":{ + "type":"structure", + "members":{ + "Enabled":{ + "shape":"Boolean", + "locationName":"enabled" + } + } + }, + "SpotFleetRequestConfig":{ + "type":"structure", + "required":[ + "SpotFleetRequestId", + "SpotFleetRequestState", + "SpotFleetRequestConfig" + ], + "members":{ + "SpotFleetRequestId":{ + "shape":"String", + "locationName":"spotFleetRequestId" + }, + "SpotFleetRequestState":{ + "shape":"BatchState", + "locationName":"spotFleetRequestState" + }, + "SpotFleetRequestConfig":{ + "shape":"SpotFleetRequestConfigData", + "locationName":"spotFleetRequestConfig" + } + } + }, + "SpotFleetRequestConfigData":{ + "type":"structure", + "required":[ + "SpotPrice", + "TargetCapacity", + "IamFleetRole", + "LaunchSpecifications" + ], + "members":{ + "ClientToken":{ + "shape":"String", + "locationName":"clientToken" + }, + "SpotPrice":{ + "shape":"String", + "locationName":"spotPrice" + }, + "TargetCapacity":{ + "shape":"Integer", + "locationName":"targetCapacity" + }, + "ValidFrom":{ + "shape":"DateTime", + "locationName":"validFrom" + }, + "ValidUntil":{ + "shape":"DateTime", + "locationName":"validUntil" + }, + "TerminateInstancesWithExpiration":{ + "shape":"Boolean", + "locationName":"terminateInstancesWithExpiration" + }, + "IamFleetRole":{ + "shape":"String", + "locationName":"iamFleetRole" + }, + "LaunchSpecifications":{ + "shape":"LaunchSpecsList", + "locationName":"launchSpecifications" + }, + "AllocationStrategy":{ + "shape":"AllocationStrategy", + "locationName":"allocationStrategy" + } + } + }, + "SpotFleetRequestConfigSet":{ + "type":"list", + "member":{ + "shape":"SpotFleetRequestConfig", + "locationName":"item" + } + }, + "SpotInstanceRequest":{ + "type":"structure", + "members":{ + "SpotInstanceRequestId":{ + "shape":"String", + "locationName":"spotInstanceRequestId" + }, + "SpotPrice":{ + "shape":"String", + "locationName":"spotPrice" + }, + "Type":{ + "shape":"SpotInstanceType", + "locationName":"type" + }, + "State":{ + "shape":"SpotInstanceState", + "locationName":"state" + }, + "Fault":{ + "shape":"SpotInstanceStateFault", + "locationName":"fault" + }, + "Status":{ + "shape":"SpotInstanceStatus", + "locationName":"status" + }, + "ValidFrom":{ + "shape":"DateTime", + "locationName":"validFrom" + }, + "ValidUntil":{ + "shape":"DateTime", + "locationName":"validUntil" + }, + "LaunchGroup":{ + "shape":"String", + "locationName":"launchGroup" + }, + "AvailabilityZoneGroup":{ + "shape":"String", + "locationName":"availabilityZoneGroup" + }, + "LaunchSpecification":{ + "shape":"LaunchSpecification", + "locationName":"launchSpecification" + }, + "InstanceId":{ + "shape":"String", + "locationName":"instanceId" + }, + "CreateTime":{ + "shape":"DateTime", + "locationName":"createTime" + }, + "ProductDescription":{ + "shape":"RIProductDescription", + "locationName":"productDescription" + }, + "Tags":{ + "shape":"TagList", + "locationName":"tagSet" + }, + "LaunchedAvailabilityZone":{ + "shape":"String", + "locationName":"launchedAvailabilityZone" + } + } + }, + "SpotInstanceRequestIdList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"SpotInstanceRequestId" + } + }, + "SpotInstanceRequestList":{ + "type":"list", + "member":{ + "shape":"SpotInstanceRequest", + "locationName":"item" + } + }, + "SpotInstanceState":{ + "type":"string", + "enum":[ + "open", + "active", + "closed", + "cancelled", + "failed" + ] + }, + "SpotInstanceStateFault":{ + "type":"structure", + "members":{ + "Code":{ + "shape":"String", + "locationName":"code" + }, + "Message":{ + "shape":"String", + "locationName":"message" + } + } + }, + "SpotInstanceStatus":{ + "type":"structure", + "members":{ + "Code":{ + "shape":"String", + "locationName":"code" + }, + "UpdateTime":{ + "shape":"DateTime", + "locationName":"updateTime" + }, + "Message":{ + "shape":"String", + "locationName":"message" + } + } + }, + "SpotInstanceType":{ + "type":"string", + "enum":[ + "one-time", + "persistent" + ] + }, + "SpotPlacement":{ + "type":"structure", + "members":{ + "AvailabilityZone":{ + "shape":"String", + "locationName":"availabilityZone" + }, + "GroupName":{ + "shape":"String", + "locationName":"groupName" + } + } + }, + "SpotPrice":{ + "type":"structure", + "members":{ + "InstanceType":{ + "shape":"InstanceType", + "locationName":"instanceType" + }, + "ProductDescription":{ + "shape":"RIProductDescription", + "locationName":"productDescription" + }, + "SpotPrice":{ + "shape":"String", + "locationName":"spotPrice" + }, + "Timestamp":{ + "shape":"DateTime", + "locationName":"timestamp" + }, + "AvailabilityZone":{ + "shape":"String", + "locationName":"availabilityZone" + } + } + }, + "SpotPriceHistoryList":{ + "type":"list", + "member":{ + "shape":"SpotPrice", + "locationName":"item" + } + }, + "StartInstancesRequest":{ + "type":"structure", + "required":["InstanceIds"], + "members":{ + "InstanceIds":{ + "shape":"InstanceIdStringList", + "locationName":"InstanceId" + }, + "AdditionalInfo":{ + "shape":"String", + "locationName":"additionalInfo" + }, + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + } + } + }, + "StartInstancesResult":{ + "type":"structure", + "members":{ + "StartingInstances":{ + "shape":"InstanceStateChangeList", + "locationName":"instancesSet" + } + } + }, + "State":{ + "type":"string", + "enum":[ + "Pending", + "Available", + "Deleting", + "Deleted" + ] + }, + "StateReason":{ + "type":"structure", + "members":{ + "Code":{ + "shape":"String", + "locationName":"code" + }, + "Message":{ + "shape":"String", + "locationName":"message" + } + } + }, + "Status":{ + "type":"string", + "enum":[ + "MoveInProgress", + "InVpc", + "InClassic" + ] + }, + "StatusName":{ + "type":"string", + "enum":["reachability"] + }, + "StatusType":{ + "type":"string", + "enum":[ + "passed", + "failed", + "insufficient-data", + "initializing" + ] + }, + "StopInstancesRequest":{ + "type":"structure", + "required":["InstanceIds"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "InstanceIds":{ + "shape":"InstanceIdStringList", + "locationName":"InstanceId" + }, + "Force":{ + "shape":"Boolean", + "locationName":"force" + } + } + }, + "StopInstancesResult":{ + "type":"structure", + "members":{ + "StoppingInstances":{ + "shape":"InstanceStateChangeList", + "locationName":"instancesSet" + } + } + }, + "Storage":{ + "type":"structure", + "members":{ + "S3":{"shape":"S3Storage"} + } + }, + "String":{"type":"string"}, + "Subnet":{ + "type":"structure", + "members":{ + "SubnetId":{ + "shape":"String", + "locationName":"subnetId" + }, + "State":{ + "shape":"SubnetState", + "locationName":"state" + }, + "VpcId":{ + "shape":"String", + "locationName":"vpcId" + }, + "CidrBlock":{ + "shape":"String", + "locationName":"cidrBlock" + }, + "AvailableIpAddressCount":{ + "shape":"Integer", + "locationName":"availableIpAddressCount" + }, + "AvailabilityZone":{ + "shape":"String", + "locationName":"availabilityZone" + }, + "DefaultForAz":{ + "shape":"Boolean", + "locationName":"defaultForAz" + }, + "MapPublicIpOnLaunch":{ + "shape":"Boolean", + "locationName":"mapPublicIpOnLaunch" + }, + "Tags":{ + "shape":"TagList", + "locationName":"tagSet" + } + } + }, + "SubnetIdStringList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"SubnetId" + } + }, + "SubnetList":{ + "type":"list", + "member":{ + "shape":"Subnet", + "locationName":"item" + } + }, + "SubnetState":{ + "type":"string", + "enum":[ + "pending", + "available" + ] + }, + "SummaryStatus":{ + "type":"string", + "enum":[ + "ok", + "impaired", + "insufficient-data", + "not-applicable", + "initializing" + ] + }, + "Tag":{ + "type":"structure", + "members":{ + "Key":{ + "shape":"String", + "locationName":"key" + }, + "Value":{ + "shape":"String", + "locationName":"value" + } + } + }, + "TagDescription":{ + "type":"structure", + "members":{ + "ResourceId":{ + "shape":"String", + "locationName":"resourceId" + }, + "ResourceType":{ + "shape":"ResourceType", + "locationName":"resourceType" + }, + "Key":{ + "shape":"String", + "locationName":"key" + }, + "Value":{ + "shape":"String", + "locationName":"value" + } + } + }, + "TagDescriptionList":{ + "type":"list", + "member":{ + "shape":"TagDescription", + "locationName":"item" + } + }, + "TagList":{ + "type":"list", + "member":{ + "shape":"Tag", + "locationName":"item" + } + }, + "TelemetryStatus":{ + "type":"string", + "enum":[ + "UP", + "DOWN" + ] + }, + "Tenancy":{ + "type":"string", + "enum":[ + "default", + "dedicated" + ] + }, + "TerminateInstancesRequest":{ + "type":"structure", + "required":["InstanceIds"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "InstanceIds":{ + "shape":"InstanceIdStringList", + "locationName":"InstanceId" + } + } + }, + "TerminateInstancesResult":{ + "type":"structure", + "members":{ + "TerminatingInstances":{ + "shape":"InstanceStateChangeList", + "locationName":"instancesSet" + } + } + }, + "TrafficType":{ + "type":"string", + "enum":[ + "ACCEPT", + "REJECT", + "ALL" + ] + }, + "UnassignPrivateIpAddressesRequest":{ + "type":"structure", + "required":[ + "NetworkInterfaceId", + "PrivateIpAddresses" + ], + "members":{ + "NetworkInterfaceId":{ + "shape":"String", + "locationName":"networkInterfaceId" + }, + "PrivateIpAddresses":{ + "shape":"PrivateIpAddressStringList", + "locationName":"privateIpAddress" + } + } + }, + "UnmonitorInstancesRequest":{ + "type":"structure", + "required":["InstanceIds"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "InstanceIds":{ + "shape":"InstanceIdStringList", + "locationName":"InstanceId" + } + } + }, + "UnmonitorInstancesResult":{ + "type":"structure", + "members":{ + "InstanceMonitorings":{ + "shape":"InstanceMonitoringList", + "locationName":"instancesSet" + } + } + }, + "UnsuccessfulItem":{ + "type":"structure", + "required":["Error"], + "members":{ + "ResourceId":{ + "shape":"String", + "locationName":"resourceId" + }, + "Error":{ + "shape":"UnsuccessfulItemError", + "locationName":"error" + } + } + }, + "UnsuccessfulItemError":{ + "type":"structure", + "required":[ + "Code", + "Message" + ], + "members":{ + "Code":{ + "shape":"String", + "locationName":"code" + }, + "Message":{ + "shape":"String", + "locationName":"message" + } + } + }, + "UnsuccessfulItemSet":{ + "type":"list", + "member":{ + "shape":"UnsuccessfulItem", + "locationName":"item" + } + }, + "UserBucket":{ + "type":"structure", + "members":{ + "S3Bucket":{"shape":"String"}, + "S3Key":{"shape":"String"} + } + }, + "UserBucketDetails":{ + "type":"structure", + "members":{ + "S3Bucket":{ + "shape":"String", + "locationName":"s3Bucket" + }, + "S3Key":{ + "shape":"String", + "locationName":"s3Key" + } + } + }, + "UserData":{ + "type":"structure", + "members":{ + "Data":{ + "shape":"String", + "locationName":"data" + } + } + }, + "UserGroupStringList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"UserGroup" + } + }, + "UserIdGroupPair":{ + "type":"structure", + "members":{ + "UserId":{ + "shape":"String", + "locationName":"userId" + }, + "GroupName":{ + "shape":"String", + "locationName":"groupName" + }, + "GroupId":{ + "shape":"String", + "locationName":"groupId" + } + } + }, + "UserIdGroupPairList":{ + "type":"list", + "member":{ + "shape":"UserIdGroupPair", + "locationName":"item" + } + }, + "UserIdStringList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"UserId" + } + }, + "ValueStringList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"item" + } + }, + "VgwTelemetry":{ + "type":"structure", + "members":{ + "OutsideIpAddress":{ + "shape":"String", + "locationName":"outsideIpAddress" + }, + "Status":{ + "shape":"TelemetryStatus", + "locationName":"status" + }, + "LastStatusChange":{ + "shape":"DateTime", + "locationName":"lastStatusChange" + }, + "StatusMessage":{ + "shape":"String", + "locationName":"statusMessage" + }, + "AcceptedRouteCount":{ + "shape":"Integer", + "locationName":"acceptedRouteCount" + } + } + }, + "VgwTelemetryList":{ + "type":"list", + "member":{ + "shape":"VgwTelemetry", + "locationName":"item" + } + }, + "VirtualizationType":{ + "type":"string", + "enum":[ + "hvm", + "paravirtual" + ] + }, + "Volume":{ + "type":"structure", + "members":{ + "VolumeId":{ + "shape":"String", + "locationName":"volumeId" + }, + "Size":{ + "shape":"Integer", + "locationName":"size" + }, + "SnapshotId":{ + "shape":"String", + "locationName":"snapshotId" + }, + "AvailabilityZone":{ + "shape":"String", + "locationName":"availabilityZone" + }, + "State":{ + "shape":"VolumeState", + "locationName":"status" + }, + "CreateTime":{ + "shape":"DateTime", + "locationName":"createTime" + }, + "Attachments":{ + "shape":"VolumeAttachmentList", + "locationName":"attachmentSet" + }, + "Tags":{ + "shape":"TagList", + "locationName":"tagSet" + }, + "VolumeType":{ + "shape":"VolumeType", + "locationName":"volumeType" + }, + "Iops":{ + "shape":"Integer", + "locationName":"iops" + }, + "Encrypted":{ + "shape":"Boolean", + "locationName":"encrypted" + }, + "KmsKeyId":{ + "shape":"String", + "locationName":"kmsKeyId" + } + } + }, + "VolumeAttachment":{ + "type":"structure", + "members":{ + "VolumeId":{ + "shape":"String", + "locationName":"volumeId" + }, + "InstanceId":{ + "shape":"String", + "locationName":"instanceId" + }, + "Device":{ + "shape":"String", + "locationName":"device" + }, + "State":{ + "shape":"VolumeAttachmentState", + "locationName":"status" + }, + "AttachTime":{ + "shape":"DateTime", + "locationName":"attachTime" + }, + "DeleteOnTermination":{ + "shape":"Boolean", + "locationName":"deleteOnTermination" + } + } + }, + "VolumeAttachmentList":{ + "type":"list", + "member":{ + "shape":"VolumeAttachment", + "locationName":"item" + } + }, + "VolumeAttachmentState":{ + "type":"string", + "enum":[ + "attaching", + "attached", + "detaching", + "detached" + ] + }, + "VolumeAttributeName":{ + "type":"string", + "enum":[ + "autoEnableIO", + "productCodes" + ] + }, + "VolumeDetail":{ + "type":"structure", + "required":["Size"], + "members":{ + "Size":{ + "shape":"Long", + "locationName":"size" + } + } + }, + "VolumeIdStringList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"VolumeId" + } + }, + "VolumeList":{ + "type":"list", + "member":{ + "shape":"Volume", + "locationName":"item" + } + }, + "VolumeState":{ + "type":"string", + "enum":[ + "creating", + "available", + "in-use", + "deleting", + "deleted", + "error" + ] + }, + "VolumeStatusAction":{ + "type":"structure", + "members":{ + "Code":{ + "shape":"String", + "locationName":"code" + }, + "Description":{ + "shape":"String", + "locationName":"description" + }, + "EventType":{ + "shape":"String", + "locationName":"eventType" + }, + "EventId":{ + "shape":"String", + "locationName":"eventId" + } + } + }, + "VolumeStatusActionsList":{ + "type":"list", + "member":{ + "shape":"VolumeStatusAction", + "locationName":"item" + } + }, + "VolumeStatusDetails":{ + "type":"structure", + "members":{ + "Name":{ + "shape":"VolumeStatusName", + "locationName":"name" + }, + "Status":{ + "shape":"String", + "locationName":"status" + } + } + }, + "VolumeStatusDetailsList":{ + "type":"list", + "member":{ + "shape":"VolumeStatusDetails", + "locationName":"item" + } + }, + "VolumeStatusEvent":{ + "type":"structure", + "members":{ + "EventType":{ + "shape":"String", + "locationName":"eventType" + }, + "Description":{ + "shape":"String", + "locationName":"description" + }, + "NotBefore":{ + "shape":"DateTime", + "locationName":"notBefore" + }, + "NotAfter":{ + "shape":"DateTime", + "locationName":"notAfter" + }, + "EventId":{ + "shape":"String", + "locationName":"eventId" + } + } + }, + "VolumeStatusEventsList":{ + "type":"list", + "member":{ + "shape":"VolumeStatusEvent", + "locationName":"item" + } + }, + "VolumeStatusInfo":{ + "type":"structure", + "members":{ + "Status":{ + "shape":"VolumeStatusInfoStatus", + "locationName":"status" + }, + "Details":{ + "shape":"VolumeStatusDetailsList", + "locationName":"details" + } + } + }, + "VolumeStatusInfoStatus":{ + "type":"string", + "enum":[ + "ok", + "impaired", + "insufficient-data" + ] + }, + "VolumeStatusItem":{ + "type":"structure", + "members":{ + "VolumeId":{ + "shape":"String", + "locationName":"volumeId" + }, + "AvailabilityZone":{ + "shape":"String", + "locationName":"availabilityZone" + }, + "VolumeStatus":{ + "shape":"VolumeStatusInfo", + "locationName":"volumeStatus" + }, + "Events":{ + "shape":"VolumeStatusEventsList", + "locationName":"eventsSet" + }, + "Actions":{ + "shape":"VolumeStatusActionsList", + "locationName":"actionsSet" + } + } + }, + "VolumeStatusList":{ + "type":"list", + "member":{ + "shape":"VolumeStatusItem", + "locationName":"item" + } + }, + "VolumeStatusName":{ + "type":"string", + "enum":[ + "io-enabled", + "io-performance" + ] + }, + "VolumeType":{ + "type":"string", + "enum":[ + "standard", + "io1", + "gp2" + ] + }, + "Vpc":{ + "type":"structure", + "members":{ + "VpcId":{ + "shape":"String", + "locationName":"vpcId" + }, + "State":{ + "shape":"VpcState", + "locationName":"state" + }, + "CidrBlock":{ + "shape":"String", + "locationName":"cidrBlock" + }, + "DhcpOptionsId":{ + "shape":"String", + "locationName":"dhcpOptionsId" + }, + "Tags":{ + "shape":"TagList", + "locationName":"tagSet" + }, + "InstanceTenancy":{ + "shape":"Tenancy", + "locationName":"instanceTenancy" + }, + "IsDefault":{ + "shape":"Boolean", + "locationName":"isDefault" + } + } + }, + "VpcAttachment":{ + "type":"structure", + "members":{ + "VpcId":{ + "shape":"String", + "locationName":"vpcId" + }, + "State":{ + "shape":"AttachmentStatus", + "locationName":"state" + } + } + }, + "VpcAttachmentList":{ + "type":"list", + "member":{ + "shape":"VpcAttachment", + "locationName":"item" + } + }, + "VpcAttributeName":{ + "type":"string", + "enum":[ + "enableDnsSupport", + "enableDnsHostnames" + ] + }, + "VpcClassicLink":{ + "type":"structure", + "members":{ + "VpcId":{ + "shape":"String", + "locationName":"vpcId" + }, + "ClassicLinkEnabled":{ + "shape":"Boolean", + "locationName":"classicLinkEnabled" + }, + "Tags":{ + "shape":"TagList", + "locationName":"tagSet" + } + } + }, + "VpcClassicLinkIdList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"VpcId" + } + }, + "VpcClassicLinkList":{ + "type":"list", + "member":{ + "shape":"VpcClassicLink", + "locationName":"item" + } + }, + "VpcEndpoint":{ + "type":"structure", + "members":{ + "VpcEndpointId":{ + "shape":"String", + "locationName":"vpcEndpointId" + }, + "VpcId":{ + "shape":"String", + "locationName":"vpcId" + }, + "ServiceName":{ + "shape":"String", + "locationName":"serviceName" + }, + "State":{ + "shape":"State", + "locationName":"state" + }, + "PolicyDocument":{ + "shape":"String", + "locationName":"policyDocument" + }, + "RouteTableIds":{ + "shape":"ValueStringList", + "locationName":"routeTableIdSet" + }, + "CreationTimestamp":{ + "shape":"DateTime", + "locationName":"creationTimestamp" + } + } + }, + "VpcEndpointSet":{ + "type":"list", + "member":{ + "shape":"VpcEndpoint", + "locationName":"item" + } + }, + "VpcIdStringList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"VpcId" + } + }, + "VpcList":{ + "type":"list", + "member":{ + "shape":"Vpc", + "locationName":"item" + } + }, + "VpcPeeringConnection":{ + "type":"structure", + "members":{ + "AccepterVpcInfo":{ + "shape":"VpcPeeringConnectionVpcInfo", + "locationName":"accepterVpcInfo" + }, + "ExpirationTime":{ + "shape":"DateTime", + "locationName":"expirationTime" + }, + "RequesterVpcInfo":{ + "shape":"VpcPeeringConnectionVpcInfo", + "locationName":"requesterVpcInfo" + }, + "Status":{ + "shape":"VpcPeeringConnectionStateReason", + "locationName":"status" + }, + "Tags":{ + "shape":"TagList", + "locationName":"tagSet" + }, + "VpcPeeringConnectionId":{ + "shape":"String", + "locationName":"vpcPeeringConnectionId" + } + } + }, + "VpcPeeringConnectionList":{ + "type":"list", + "member":{ + "shape":"VpcPeeringConnection", + "locationName":"item" + } + }, + "VpcPeeringConnectionStateReason":{ + "type":"structure", + "members":{ + "Code":{ + "shape":"VpcPeeringConnectionStateReasonCode", + "locationName":"code" + }, + "Message":{ + "shape":"String", + "locationName":"message" + } + } + }, + "VpcPeeringConnectionStateReasonCode":{ + "type":"string", + "enum":[ + "initiating-request", + "pending-acceptance", + "active", + "deleted", + "rejected", + "failed", + "expired", + "provisioning", + "deleting" + ] + }, + "VpcPeeringConnectionVpcInfo":{ + "type":"structure", + "members":{ + "CidrBlock":{ + "shape":"String", + "locationName":"cidrBlock" + }, + "OwnerId":{ + "shape":"String", + "locationName":"ownerId" + }, + "VpcId":{ + "shape":"String", + "locationName":"vpcId" + } + } + }, + "VpcState":{ + "type":"string", + "enum":[ + "pending", + "available" + ] + }, + "VpnConnection":{ + "type":"structure", + "members":{ + "VpnConnectionId":{ + "shape":"String", + "locationName":"vpnConnectionId" + }, + "State":{ + "shape":"VpnState", + "locationName":"state" + }, + "CustomerGatewayConfiguration":{ + "shape":"String", + "locationName":"customerGatewayConfiguration" + }, + "Type":{ + "shape":"GatewayType", + "locationName":"type" + }, + "CustomerGatewayId":{ + "shape":"String", + "locationName":"customerGatewayId" + }, + "VpnGatewayId":{ + "shape":"String", + "locationName":"vpnGatewayId" + }, + "Tags":{ + "shape":"TagList", + "locationName":"tagSet" + }, + "VgwTelemetry":{ + "shape":"VgwTelemetryList", + "locationName":"vgwTelemetry" + }, + "Options":{ + "shape":"VpnConnectionOptions", + "locationName":"options" + }, + "Routes":{ + "shape":"VpnStaticRouteList", + "locationName":"routes" + } + } + }, + "VpnConnectionIdStringList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"VpnConnectionId" + } + }, + "VpnConnectionList":{ + "type":"list", + "member":{ + "shape":"VpnConnection", + "locationName":"item" + } + }, + "VpnConnectionOptions":{ + "type":"structure", + "members":{ + "StaticRoutesOnly":{ + "shape":"Boolean", + "locationName":"staticRoutesOnly" + } + } + }, + "VpnConnectionOptionsSpecification":{ + "type":"structure", + "members":{ + "StaticRoutesOnly":{ + "shape":"Boolean", + "locationName":"staticRoutesOnly" + } + } + }, + "VpnGateway":{ + "type":"structure", + "members":{ + "VpnGatewayId":{ + "shape":"String", + "locationName":"vpnGatewayId" + }, + "State":{ + "shape":"VpnState", + "locationName":"state" + }, + "Type":{ + "shape":"GatewayType", + "locationName":"type" + }, + "AvailabilityZone":{ + "shape":"String", + "locationName":"availabilityZone" + }, + "VpcAttachments":{ + "shape":"VpcAttachmentList", + "locationName":"attachments" + }, + "Tags":{ + "shape":"TagList", + "locationName":"tagSet" + } + } + }, + "VpnGatewayIdStringList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"VpnGatewayId" + } + }, + "VpnGatewayList":{ + "type":"list", + "member":{ + "shape":"VpnGateway", + "locationName":"item" + } + }, + "VpnState":{ + "type":"string", + "enum":[ + "pending", + "available", + "deleting", + "deleted" + ] + }, + "VpnStaticRoute":{ + "type":"structure", + "members":{ + "DestinationCidrBlock":{ + "shape":"String", + "locationName":"destinationCidrBlock" + }, + "Source":{ + "shape":"VpnStaticRouteSource", + "locationName":"source" + }, + "State":{ + "shape":"VpnState", + "locationName":"state" + } + } + }, + "VpnStaticRouteList":{ + "type":"list", + "member":{ + "shape":"VpnStaticRoute", + "locationName":"item" + } + }, + "VpnStaticRouteSource":{ + "type":"string", + "enum":["Static"] + }, + "ZoneNameStringList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"ZoneName" + } + }, + "NewDhcpConfigurationList":{ + "type":"list", + "member":{ + "shape":"NewDhcpConfiguration", + "locationName":"item" + } + }, + "NewDhcpConfiguration":{ + "type":"structure", + "members":{ + "Key":{ + "shape":"String", + "locationName":"key" + }, + "Values":{ + "shape":"ValueStringList", + "locationName":"Value" + } + } + }, + "DhcpConfigurationValueList":{ + "type":"list", + "member":{ + "shape":"AttributeValue", + "locationName":"item" + } + }, + "Blob":{"type":"blob"}, + "BlobAttributeValue":{ + "type":"structure", + "members":{ + "Value":{ + "shape":"Blob", + "locationName":"value" + } + } + }, + "RequestSpotLaunchSpecification":{ + "type":"structure", + "members":{ + "ImageId":{ + "shape":"String", + "locationName":"imageId" + }, + "KeyName":{ + "shape":"String", + "locationName":"keyName" + }, + "SecurityGroups":{ + "shape":"ValueStringList", + "locationName":"SecurityGroup" + }, + "UserData":{ + "shape":"String", + "locationName":"userData" + }, + "AddressingType":{ + "shape":"String", + "locationName":"addressingType" + }, + "InstanceType":{ + "shape":"InstanceType", + "locationName":"instanceType" + }, + "Placement":{ + "shape":"SpotPlacement", + "locationName":"placement" + }, + "KernelId":{ + "shape":"String", + "locationName":"kernelId" + }, + "RamdiskId":{ + "shape":"String", + "locationName":"ramdiskId" + }, + "BlockDeviceMappings":{ + "shape":"BlockDeviceMappingList", + "locationName":"blockDeviceMapping" + }, + "SubnetId":{ + "shape":"String", + "locationName":"subnetId" + }, + "NetworkInterfaces":{ + "shape":"InstanceNetworkInterfaceSpecificationList", + "locationName":"NetworkInterface" + }, + "IamInstanceProfile":{ + "shape":"IamInstanceProfileSpecification", + "locationName":"iamInstanceProfile" + }, + "EbsOptimized":{ + "shape":"Boolean", + "locationName":"ebsOptimized" + }, + "Monitoring":{ + "shape":"RunInstancesMonitoringEnabled", + "locationName":"monitoring" + }, + "SecurityGroupIds":{ + "shape":"ValueStringList", + "locationName":"SecurityGroupId" + } + } + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/ec2/2015-04-15/docs-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/ec2/2015-04-15/docs-2.json new file mode 100644 index 000000000..a970264b7 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/ec2/2015-04-15/docs-2.json @@ -0,0 +1,5495 @@ +{ + "version": "2.0", + "operations": { + "AcceptVpcPeeringConnection": "

    Accept a VPC peering connection request. To accept a request, the VPC peering connection must be in the pending-acceptance state, and you must be the owner of the peer VPC. Use the DescribeVpcPeeringConnections request to view your outstanding VPC peering connection requests.

    ", + "AllocateAddress": "

    Acquires an Elastic IP address.

    An Elastic IP address is for use either in the EC2-Classic platform or in a VPC. For more information, see Elastic IP Addresses in the Amazon Elastic Compute Cloud User Guide.

    ", + "AssignPrivateIpAddresses": "

    Assigns one or more secondary private IP addresses to the specified network interface. You can specify one or more specific secondary IP addresses, or you can specify the number of secondary IP addresses to be automatically assigned within the subnet's CIDR block range. The number of secondary IP addresses that you can assign to an instance varies by instance type. For information about instance types, see Instance Types in the Amazon Elastic Compute Cloud User Guide. For more information about Elastic IP addresses, see Elastic IP Addresses in the Amazon Elastic Compute Cloud User Guide.

    AssignPrivateIpAddresses is available only in EC2-VPC.

    ", + "AssociateAddress": "

    Associates an Elastic IP address with an instance or a network interface.

    An Elastic IP address is for use in either the EC2-Classic platform or in a VPC. For more information, see Elastic IP Addresses in the Amazon Elastic Compute Cloud User Guide.

    [EC2-Classic, VPC in an EC2-VPC-only account] If the Elastic IP address is already associated with a different instance, it is disassociated from that instance and associated with the specified instance.

    [VPC in an EC2-Classic account] If you don't specify a private IP address, the Elastic IP address is associated with the primary IP address. If the Elastic IP address is already associated with a different instance or a network interface, you get an error unless you allow reassociation.

    This is an idempotent operation. If you perform the operation more than once, Amazon EC2 doesn't return an error.

    ", + "AssociateDhcpOptions": "

    Associates a set of DHCP options (that you've previously created) with the specified VPC, or associates no DHCP options with the VPC.

    After you associate the options with the VPC, any existing instances and all new instances that you launch in that VPC use the options. You don't need to restart or relaunch the instances. They automatically pick up the changes within a few hours, depending on how frequently the instance renews its DHCP lease. You can explicitly renew the lease using the operating system on the instance.

    For more information, see DHCP Options Sets in the Amazon Virtual Private Cloud User Guide.

    ", + "AssociateRouteTable": "

    Associates a subnet with a route table. The subnet and route table must be in the same VPC. This association causes traffic originating from the subnet to be routed according to the routes in the route table. The action returns an association ID, which you need in order to disassociate the route table from the subnet later. A route table can be associated with multiple subnets.

    For more information about route tables, see Route Tables in the Amazon Virtual Private Cloud User Guide.

    ", + "AttachClassicLinkVpc": "

    Links an EC2-Classic instance to a ClassicLink-enabled VPC through one or more of the VPC's security groups. You cannot link an EC2-Classic instance to more than one VPC at a time. You can only link an instance that's in the running state. An instance is automatically unlinked from a VPC when it's stopped - you can link it to the VPC again when you restart it.

    After you've linked an instance, you cannot change the VPC security groups that are associated with it. To change the security groups, you must first unlink the instance, and then link it again.

    Linking your instance to a VPC is sometimes referred to as attaching your instance.

    ", + "AttachInternetGateway": "

    Attaches an Internet gateway to a VPC, enabling connectivity between the Internet and the VPC. For more information about your VPC and Internet gateway, see the Amazon Virtual Private Cloud User Guide.

    ", + "AttachNetworkInterface": "

    Attaches a network interface to an instance.

    ", + "AttachVolume": "

    Attaches an EBS volume to a running or stopped instance and exposes it to the instance with the specified device name.

    Encrypted EBS volumes may only be attached to instances that support Amazon EBS encryption. For more information, see Amazon EBS Encryption in the Amazon Elastic Compute Cloud User Guide.

    For a list of supported device names, see Attaching an EBS Volume to an Instance. Any device names that aren't reserved for instance store volumes can be used for EBS volumes. For more information, see Amazon EC2 Instance Store in the Amazon Elastic Compute Cloud User Guide.

    If a volume has an AWS Marketplace product code:

    • The volume can be attached only to a stopped instance.
    • AWS Marketplace product codes are copied from the volume to the instance.
    • You must be subscribed to the product.
    • The instance type and operating system of the instance must support the product. For example, you can't detach a volume from a Windows instance and attach it to a Linux instance.

    For an overview of the AWS Marketplace, see Introducing AWS Marketplace.

    For more information about EBS volumes, see Attaching Amazon EBS Volumes in the Amazon Elastic Compute Cloud User Guide.

    ", + "AttachVpnGateway": "

    Attaches a virtual private gateway to a VPC. For more information, see Adding a Hardware Virtual Private Gateway to Your VPC in the Amazon Virtual Private Cloud User Guide.

    ", + "AuthorizeSecurityGroupEgress": "

    Adds one or more egress rules to a security group for use with a VPC. Specifically, this action permits instances to send traffic to one or more destination CIDR IP address ranges, or to one or more destination security groups for the same VPC.

    You can have up to 50 rules per security group (covering both ingress and egress rules).

    A security group is for use with instances either in the EC2-Classic platform or in a specific VPC. This action doesn't apply to security groups for use in EC2-Classic. For more information, see Security Groups for Your VPC in the Amazon Virtual Private Cloud User Guide.

    Each rule consists of the protocol (for example, TCP), plus either a CIDR range or a source group. For the TCP and UDP protocols, you must also specify the destination port or port range. For the ICMP protocol, you must also specify the ICMP type and code. You can use -1 for the type or code to mean all types or all codes.

    Rule changes are propagated to affected instances as quickly as possible. However, a small delay might occur.

    ", + "AuthorizeSecurityGroupIngress": "

    Adds one or more ingress rules to a security group.

    EC2-Classic: You can have up to 100 rules per group.

    EC2-VPC: You can have up to 50 rules per group (covering both ingress and egress rules).

    Rule changes are propagated to instances within the security group as quickly as possible. However, a small delay might occur.

    [EC2-Classic] This action gives one or more CIDR IP address ranges permission to access a security group in your account, or gives one or more security groups (called the source groups) permission to access a security group for your account. A source group can be for your own AWS account, or another.

    [EC2-VPC] This action gives one or more CIDR IP address ranges permission to access a security group in your VPC, or gives one or more other security groups (called the source groups) permission to access a security group for your VPC. The security groups must all be for the same VPC.

    ", + "BundleInstance": "

    Bundles an Amazon instance store-backed Windows instance.

    During bundling, only the root device volume (C:\\) is bundled. Data on other instance store volumes is not preserved.

    This action is not applicable for Linux/Unix instances or Windows instances that are backed by Amazon EBS.

    For more information, see Creating an Instance Store-Backed Windows AMI.

    ", + "CancelBundleTask": "

    Cancels a bundling operation for an instance store-backed Windows instance.

    ", + "CancelConversionTask": "

    Cancels an active conversion task. The task can be the import of an instance or volume. The action removes all artifacts of the conversion, including a partially uploaded volume or instance. If the conversion is complete or is in the process of transferring the final disk image, the command fails and returns an exception.

    For more information, see Using the Command Line Tools to Import Your Virtual Machine to Amazon EC2 in the Amazon Elastic Compute Cloud User Guide.

    ", + "CancelExportTask": "

    Cancels an active export task. The request removes all artifacts of the export, including any partially-created Amazon S3 objects. If the export task is complete or is in the process of transferring the final disk image, the command fails and returns an error.

    ", + "CancelImportTask": "

    Cancels an in-process import virtual machine or import snapshot task.

    ", + "CancelReservedInstancesListing": "

    Cancels the specified Reserved Instance listing in the Reserved Instance Marketplace.

    For more information, see Reserved Instance Marketplace in the Amazon Elastic Compute Cloud User Guide.

    ", + "CancelSpotFleetRequests": "

    Cancels the specified Spot fleet requests.

    ", + "CancelSpotInstanceRequests": "

    Cancels one or more Spot instance requests. Spot instances are instances that Amazon EC2 starts on your behalf when the bid price that you specify exceeds the current Spot price. Amazon EC2 periodically sets the Spot price based on available Spot instance capacity and current Spot instance requests. For more information, see Spot Instance Requests in the Amazon Elastic Compute Cloud User Guide.

    Canceling a Spot instance request does not terminate running Spot instances associated with the request.

    ", + "ConfirmProductInstance": "

    Determines whether a product code is associated with an instance. This action can only be used by the owner of the product code. It is useful when a product code owner needs to verify whether another user's instance is eligible for support.

    ", + "CopyImage": "

    Initiates the copy of an AMI from the specified source region to the current region. You specify the destination region by using its endpoint when making the request. AMIs that use encrypted EBS snapshots cannot be copied with this method.

    For more information, see Copying AMIs in the Amazon Elastic Compute Cloud User Guide.

    ", + "CopySnapshot": "

    Copies a point-in-time snapshot of an EBS volume and stores it in Amazon S3. You can copy the snapshot within the same region or from one region to another. You can use the snapshot to create EBS volumes or Amazon Machine Images (AMIs). The snapshot is copied to the regional endpoint that you send the HTTP request to.

    Copies of encrypted EBS snapshots remain encrypted. Copies of unencrypted snapshots remain unencrypted, unless the Encrypted flag is specified during the snapshot copy operation. By default, encrypted snapshot copies use the default AWS Key Management Service (AWS KMS) customer master key (CMK); however, you can specify a non-default CMK with the KmsKeyId parameter.

    For more information, see Copying an Amazon EBS Snapshot in the Amazon Elastic Compute Cloud User Guide.

    ", + "CreateCustomerGateway": "

    Provides information to AWS about your VPN customer gateway device. The customer gateway is the appliance at your end of the VPN connection. (The device on the AWS side of the VPN connection is the virtual private gateway.) You must provide the Internet-routable IP address of the customer gateway's external interface. The IP address must be static and can't be behind a device performing network address translation (NAT).

    For devices that use Border Gateway Protocol (BGP), you can also provide the device's BGP Autonomous System Number (ASN). You can use an existing ASN assigned to your network. If you don't have an ASN already, you can use a private ASN (in the 64512 - 65534 range).

    Amazon EC2 supports all 2-byte ASN numbers in the range of 1 - 65534, with the exception of 7224, which is reserved in the us-east-1 region, and 9059, which is reserved in the eu-west-1 region.

    For more information about VPN customer gateways, see Adding a Hardware Virtual Private Gateway to Your VPC in the Amazon Virtual Private Cloud User Guide.

    You cannot create more than one customer gateway with the same VPN type, IP address, and BGP ASN parameter values. If you run an identical request more than one time, the first request creates the customer gateway, and subsequent requests return information about the existing customer gateway. The subsequent requests do not create new customer gateway resources.

    ", + "CreateDhcpOptions": "

    Creates a set of DHCP options for your VPC. After creating the set, you must associate it with the VPC, causing all existing and new instances that you launch in the VPC to use this set of DHCP options. The following are the individual DHCP options you can specify. For more information about the options, see RFC 2132.

    • domain-name-servers - The IP addresses of up to four domain name servers, or AmazonProvidedDNS. The default DHCP option set specifies AmazonProvidedDNS. If specifying more than one domain name server, specify the IP addresses in a single parameter, separated by commas.
    • domain-name - If you're using AmazonProvidedDNS in us-east-1, specify ec2.internal. If you're using AmazonProvidedDNS in another region, specify region.compute.internal (for example, ap-northeast-1.compute.internal). Otherwise, specify a domain name (for example, MyCompany.com). Important: Some Linux operating systems accept multiple domain names separated by spaces. However, Windows and other Linux operating systems treat the value as a single domain, which results in unexpected behavior. If your DHCP options set is associated with a VPC that has instances with multiple operating systems, specify only one domain name.
    • ntp-servers - The IP addresses of up to four Network Time Protocol (NTP) servers.
    • netbios-name-servers - The IP addresses of up to four NetBIOS name servers.
    • netbios-node-type - The NetBIOS node type (1, 2, 4, or 8). We recommend that you specify 2 (broadcast and multicast are not currently supported). For more information about these node types, see RFC 2132.

    Your VPC automatically starts out with a set of DHCP options that includes only a DNS server that we provide (AmazonProvidedDNS). If you create a set of options, and if your VPC has an Internet gateway, make sure to set the domain-name-servers option either to AmazonProvidedDNS or to a domain name server of your choice. For more information about DHCP options, see DHCP Options Sets in the Amazon Virtual Private Cloud User Guide.

    ", + "CreateFlowLogs": "

    Creates one or more flow logs to capture IP traffic for a specific network interface, subnet, or VPC. Flow logs are delivered to a specified log group in Amazon CloudWatch Logs. If you specify a VPC or subnet in the request, a log stream is created in CloudWatch Logs for each network interface in the subnet or VPC. Log streams can include information about accepted and rejected traffic to a network interface. You can view the data in your log streams using Amazon CloudWatch Logs.

    In your request, you must also specify an IAM role that has permission to publish logs to CloudWatch Logs.

    ", + "CreateImage": "

    Creates an Amazon EBS-backed AMI from an Amazon EBS-backed instance that is either running or stopped.

    If you customized your instance with instance store volumes or EBS volumes in addition to the root device volume, the new AMI contains block device mapping information for those volumes. When you launch an instance from this new AMI, the instance automatically launches with those additional volumes.

    For more information, see Creating Amazon EBS-Backed Linux AMIs in the Amazon Elastic Compute Cloud User Guide.

    ", + "CreateInstanceExportTask": "

    Exports a running or stopped instance to an S3 bucket.

    For information about the supported operating systems, image formats, and known limitations for the types of instances you can export, see Exporting EC2 Instances in the Amazon Elastic Compute Cloud User Guide.

    ", + "CreateInternetGateway": "

    Creates an Internet gateway for use with a VPC. After creating the Internet gateway, you attach it to a VPC using AttachInternetGateway.

    For more information about your VPC and Internet gateway, see the Amazon Virtual Private Cloud User Guide.

    ", + "CreateKeyPair": "

    Creates a 2048-bit RSA key pair with the specified name. Amazon EC2 stores the public key and displays the private key for you to save to a file. The private key is returned as an unencrypted PEM encoded PKCS#8 private key. If a key with the specified name already exists, Amazon EC2 returns an error.

    You can have up to five thousand key pairs per region.

    The key pair returned to you is available only in the region in which you create it. To create a key pair that is available in all regions, use ImportKeyPair.

    For more information about key pairs, see Key Pairs in the Amazon Elastic Compute Cloud User Guide.

    ", + "CreateNetworkAcl": "

    Creates a network ACL in a VPC. Network ACLs provide an optional layer of security (in addition to security groups) for the instances in your VPC.

    For more information about network ACLs, see Network ACLs in the Amazon Virtual Private Cloud User Guide.

    ", + "CreateNetworkAclEntry": "

    Creates an entry (a rule) in a network ACL with the specified rule number. Each network ACL has a set of numbered ingress rules and a separate set of numbered egress rules. When determining whether a packet should be allowed in or out of a subnet associated with the ACL, we process the entries in the ACL according to the rule numbers, in ascending order. Each network ACL has a set of ingress rules and a separate set of egress rules.

    We recommend that you leave room between the rule numbers (for example, 100, 110, 120, ...), and not number them one right after the other (for example, 101, 102, 103, ...). This makes it easier to add a rule between existing ones without having to renumber the rules.

    After you add an entry, you can't modify it; you must either replace it, or create an entry and delete the old one.

    For more information about network ACLs, see Network ACLs in the Amazon Virtual Private Cloud User Guide.

    ", + "CreateNetworkInterface": "

    Creates a network interface in the specified subnet.

    For more information about network interfaces, see Elastic Network Interfaces in the Amazon Elastic Compute Cloud User Guide.

    ", + "CreatePlacementGroup": "

    Creates a placement group that you launch cluster instances into. You must give the group a name that's unique within the scope of your account.

    For more information about placement groups and cluster instances, see Cluster Instances in the Amazon Elastic Compute Cloud User Guide.

    ", + "CreateReservedInstancesListing": "

    Creates a listing for Amazon EC2 Reserved Instances to be sold in the Reserved Instance Marketplace. You can submit one Reserved Instance listing at a time. To get a list of your Reserved Instances, you can use the DescribeReservedInstances operation.

    The Reserved Instance Marketplace matches sellers who want to resell Reserved Instance capacity that they no longer need with buyers who want to purchase additional capacity. Reserved Instances bought and sold through the Reserved Instance Marketplace work like any other Reserved Instances.

    To sell your Reserved Instances, you must first register as a seller in the Reserved Instance Marketplace. After completing the registration process, you can create a Reserved Instance Marketplace listing of some or all of your Reserved Instances, and specify the upfront price to receive for them. Your Reserved Instance listings then become available for purchase. To view the details of your Reserved Instance listing, you can use the DescribeReservedInstancesListings operation.

    For more information, see Reserved Instance Marketplace in the Amazon Elastic Compute Cloud User Guide.

    ", + "CreateRoute": "

    Creates a route in a route table within a VPC.

    You must specify one of the following targets: Internet gateway or virtual private gateway, NAT instance, VPC peering connection, or network interface.

    When determining how to route traffic, we use the route with the most specific match. For example, let's say the traffic is destined for 192.0.2.3, and the route table includes the following two routes:

    • 192.0.2.0/24 (goes to some target A)

    • 192.0.2.0/28 (goes to some target B)

    Both routes apply to the traffic destined for 192.0.2.3. However, the second route in the list covers a smaller number of IP addresses and is therefore more specific, so we use that route to determine where to target the traffic.

    For more information about route tables, see Route Tables in the Amazon Virtual Private Cloud User Guide.

    ", + "CreateRouteTable": "

    Creates a route table for the specified VPC. After you create a route table, you can add routes and associate the table with a subnet.

    For more information about route tables, see Route Tables in the Amazon Virtual Private Cloud User Guide.

    ", + "CreateSecurityGroup": "

    Creates a security group.

    A security group is for use with instances either in the EC2-Classic platform or in a specific VPC. For more information, see Amazon EC2 Security Groups in the Amazon Elastic Compute Cloud User Guide and Security Groups for Your VPC in the Amazon Virtual Private Cloud User Guide.

    EC2-Classic: You can have up to 500 security groups.

    EC2-VPC: You can create up to 100 security groups per VPC.

    When you create a security group, you specify a friendly name of your choice. You can have a security group for use in EC2-Classic with the same name as a security group for use in a VPC. However, you can't have two security groups for use in EC2-Classic with the same name or two security groups for use in a VPC with the same name.

    You have a default security group for use in EC2-Classic and a default security group for use in your VPC. If you don't specify a security group when you launch an instance, the instance is launched into the appropriate default security group. A default security group includes a default rule that grants instances unrestricted network access to each other.

    You can add or remove rules from your security groups using AuthorizeSecurityGroupIngress, AuthorizeSecurityGroupEgress, RevokeSecurityGroupIngress, and RevokeSecurityGroupEgress.

    ", + "CreateSnapshot": "

    Creates a snapshot of an EBS volume and stores it in Amazon S3. You can use snapshots for backups, to make copies of EBS volumes, and to save data before shutting down an instance.

    When a snapshot is created, any AWS Marketplace product codes that are associated with the source volume are propagated to the snapshot.

    You can take a snapshot of an attached volume that is in use. However, snapshots only capture data that has been written to your EBS volume at the time the snapshot command is issued; this may exclude any data that has been cached by any applications or the operating system. If you can pause any file systems on the volume long enough to take a snapshot, your snapshot should be complete. However, if you cannot pause all file writes to the volume, you should unmount the volume from within the instance, issue the snapshot command, and then remount the volume to ensure a consistent and complete snapshot. You may remount and use your volume while the snapshot status is pending.

    To create a snapshot for EBS volumes that serve as root devices, you should stop the instance before taking the snapshot.

    Snapshots that are taken from encrypted volumes are automatically encrypted. Volumes that are created from encrypted snapshots are also automatically encrypted. Your encrypted volumes and any associated snapshots always remain protected.

    For more information, see Amazon Elastic Block Store and Amazon EBS Encryption in the Amazon Elastic Compute Cloud User Guide.

    ", + "CreateSpotDatafeedSubscription": "

    Creates a data feed for Spot instances, enabling you to view Spot instance usage logs. You can create one data feed per AWS account. For more information, see Spot Instance Data Feed in the Amazon Elastic Compute Cloud User Guide.

    ", + "CreateSubnet": "

    Creates a subnet in an existing VPC.

    When you create each subnet, you provide the VPC ID and the CIDR block you want for the subnet. After you create a subnet, you can't change its CIDR block. The subnet's CIDR block can be the same as the VPC's CIDR block (assuming you want only a single subnet in the VPC), or a subset of the VPC's CIDR block. If you create more than one subnet in a VPC, the subnets' CIDR blocks must not overlap. The smallest subnet (and VPC) you can create uses a /28 netmask (16 IP addresses), and the largest uses a /16 netmask (65,536 IP addresses).

    AWS reserves both the first four and the last IP address in each subnet's CIDR block. They're not available for use.

    If you add more than one subnet to a VPC, they're set up in a star topology with a logical router in the middle.

    If you launch an instance in a VPC using an Amazon EBS-backed AMI, the IP address doesn't change if you stop and restart the instance (unlike a similar instance launched outside a VPC, which gets a new IP address when restarted). It's therefore possible to have a subnet with no running instances (they're all stopped), but no remaining IP addresses available.

    For more information about subnets, see Your VPC and Subnets in the Amazon Virtual Private Cloud User Guide.

    ", + "CreateTags": "

    Adds or overwrites one or more tags for the specified Amazon EC2 resource or resources. Each resource can have a maximum of 10 tags. Each tag consists of a key and optional value. Tag keys must be unique per resource.

    For more information about tags, see Tagging Your Resources in the Amazon Elastic Compute Cloud User Guide.

    ", + "CreateVolume": "

    Creates an EBS volume that can be attached to an instance in the same Availability Zone. The volume is created in the regional endpoint that you send the HTTP request to. For more information see Regions and Endpoints.

    You can create a new empty volume or restore a volume from an EBS snapshot. Any AWS Marketplace product codes from the snapshot are propagated to the volume.

    You can create encrypted volumes with the Encrypted parameter. Encrypted volumes may only be attached to instances that support Amazon EBS encryption. Volumes that are created from encrypted snapshots are also automatically encrypted. For more information, see Amazon EBS Encryption in the Amazon Elastic Compute Cloud User Guide.

    For more information, see Creating or Restoring an Amazon EBS Volume in the Amazon Elastic Compute Cloud User Guide.

    ", + "CreateVpc": "

    Creates a VPC with the specified CIDR block.

    The smallest VPC you can create uses a /28 netmask (16 IP addresses), and the largest uses a /16 netmask (65,536 IP addresses). To help you decide how big to make your VPC, see Your VPC and Subnets in the Amazon Virtual Private Cloud User Guide.

    By default, each instance you launch in the VPC has the default DHCP options, which includes only a default DNS server that we provide (AmazonProvidedDNS). For more information about DHCP options, see DHCP Options Sets in the Amazon Virtual Private Cloud User Guide.

    ", + "CreateVpcEndpoint": "

    Creates a VPC endpoint for a specified AWS service. An endpoint enables you to create a private connection between your VPC and another AWS service in your account. You can specify an endpoint policy to attach to the endpoint that will control access to the service from your VPC. You can also specify the VPC route tables that use the endpoint.

    Currently, only endpoints to Amazon S3 are supported.

    ", + "CreateVpcPeeringConnection": "

    Requests a VPC peering connection between two VPCs: a requester VPC that you own and a peer VPC with which to create the connection. The peer VPC can belong to another AWS account. The requester VPC and peer VPC cannot have overlapping CIDR blocks.

    The owner of the peer VPC must accept the peering request to activate the peering connection. The VPC peering connection request expires after 7 days, after which it cannot be accepted or rejected.

    A CreateVpcPeeringConnection request between VPCs with overlapping CIDR blocks results in the VPC peering connection having a status of failed.

    ", + "CreateVpnConnection": "

    Creates a VPN connection between an existing virtual private gateway and a VPN customer gateway. The only supported connection type is ipsec.1.

    The response includes information that you need to give to your network administrator to configure your customer gateway.

    We strongly recommend that you use HTTPS when calling this operation because the response contains sensitive cryptographic information for configuring your customer gateway.

    If you decide to shut down your VPN connection for any reason and later create a new VPN connection, you must reconfigure your customer gateway with the new information returned from this call.

    For more information about VPN connections, see Adding a Hardware Virtual Private Gateway to Your VPC in the Amazon Virtual Private Cloud User Guide.

    ", + "CreateVpnConnectionRoute": "

    Creates a static route associated with a VPN connection between an existing virtual private gateway and a VPN customer gateway. The static route allows traffic to be routed from the virtual private gateway to the VPN customer gateway.

    For more information about VPN connections, see Adding a Hardware Virtual Private Gateway to Your VPC in the Amazon Virtual Private Cloud User Guide.

    ", + "CreateVpnGateway": "

    Creates a virtual private gateway. A virtual private gateway is the endpoint on the VPC side of your VPN connection. You can create a virtual private gateway before creating the VPC itself.

    For more information about virtual private gateways, see Adding a Hardware Virtual Private Gateway to Your VPC in the Amazon Virtual Private Cloud User Guide.

    ", + "DeleteCustomerGateway": "

    Deletes the specified customer gateway. You must delete the VPN connection before you can delete the customer gateway.

    ", + "DeleteDhcpOptions": "

    Deletes the specified set of DHCP options. You must disassociate the set of DHCP options before you can delete it. You can disassociate the set of DHCP options by associating either a new set of options or the default set of options with the VPC.

    ", + "DeleteFlowLogs": "

    Deletes one or more flow logs.

    ", + "DeleteInternetGateway": "

    Deletes the specified Internet gateway. You must detach the Internet gateway from the VPC before you can delete it.

    ", + "DeleteKeyPair": "

    Deletes the specified key pair, by removing the public key from Amazon EC2.

    ", + "DeleteNetworkAcl": "

    Deletes the specified network ACL. You can't delete the ACL if it's associated with any subnets. You can't delete the default network ACL.

    ", + "DeleteNetworkAclEntry": "

    Deletes the specified ingress or egress entry (rule) from the specified network ACL.

    ", + "DeleteNetworkInterface": "

    Deletes the specified network interface. You must detach the network interface before you can delete it.

    ", + "DeletePlacementGroup": "

    Deletes the specified placement group. You must terminate all instances in the placement group before you can delete the placement group. For more information about placement groups and cluster instances, see Cluster Instances in the Amazon Elastic Compute Cloud User Guide.

    ", + "DeleteRoute": "

    Deletes the specified route from the specified route table.

    ", + "DeleteRouteTable": "

    Deletes the specified route table. You must disassociate the route table from any subnets before you can delete it. You can't delete the main route table.

    ", + "DeleteSecurityGroup": "

    Deletes a security group.

    If you attempt to delete a security group that is associated with an instance, or is referenced by another security group, the operation fails with InvalidGroup.InUse in EC2-Classic or DependencyViolation in EC2-VPC.

    ", + "DeleteSnapshot": "

    Deletes the specified snapshot.

    When you make periodic snapshots of a volume, the snapshots are incremental, and only the blocks on the device that have changed since your last snapshot are saved in the new snapshot. When you delete a snapshot, only the data not needed for any other snapshot is removed. So regardless of which prior snapshots have been deleted, all active snapshots will have access to all the information needed to restore the volume.

    You cannot delete a snapshot of the root device of an EBS volume used by a registered AMI. You must first de-register the AMI before you can delete the snapshot.

    For more information, see Deleting an Amazon EBS Snapshot in the Amazon Elastic Compute Cloud User Guide.

    ", + "DeleteSpotDatafeedSubscription": "

    Deletes the data feed for Spot instances.

    ", + "DeleteSubnet": "

    Deletes the specified subnet. You must terminate all running instances in the subnet before you can delete the subnet.

    ", + "DeleteTags": "

    Deletes the specified set of tags from the specified set of resources. This call is designed to follow a DescribeTags request.

    For more information about tags, see Tagging Your Resources in the Amazon Elastic Compute Cloud User Guide.

    ", + "DeleteVolume": "

    Deletes the specified EBS volume. The volume must be in the available state (not attached to an instance).

    The volume may remain in the deleting state for several minutes.

    For more information, see Deleting an Amazon EBS Volume in the Amazon Elastic Compute Cloud User Guide.

    ", + "DeleteVpc": "

    Deletes the specified VPC. You must detach or delete all gateways and resources that are associated with the VPC before you can delete it. For example, you must terminate all instances running in the VPC, delete all security groups associated with the VPC (except the default one), delete all route tables associated with the VPC (except the default one), and so on.

    ", + "DeleteVpcEndpoints": "

    Deletes one or more specified VPC endpoints. Deleting the endpoint also deletes the endpoint routes in the route tables that were associated with the endpoint.

    ", + "DeleteVpcPeeringConnection": "

    Deletes a VPC peering connection. Either the owner of the requester VPC or the owner of the peer VPC can delete the VPC peering connection if it's in the active state. The owner of the requester VPC can delete a VPC peering connection in the pending-acceptance state.

    ", + "DeleteVpnConnection": "

    Deletes the specified VPN connection.

    If you're deleting the VPC and its associated components, we recommend that you detach the virtual private gateway from the VPC and delete the VPC before deleting the VPN connection. If you believe that the tunnel credentials for your VPN connection have been compromised, you can delete the VPN connection and create a new one that has new keys, without needing to delete the VPC or virtual private gateway. If you create a new VPN connection, you must reconfigure the customer gateway using the new configuration information returned with the new VPN connection ID.

    ", + "DeleteVpnConnectionRoute": "

    Deletes the specified static route associated with a VPN connection between an existing virtual private gateway and a VPN customer gateway. The static route allows traffic to be routed from the virtual private gateway to the VPN customer gateway.

    ", + "DeleteVpnGateway": "

    Deletes the specified virtual private gateway. We recommend that before you delete a virtual private gateway, you detach it from the VPC and delete the VPN connection. Note that you don't need to delete the virtual private gateway if you plan to delete and recreate the VPN connection between your VPC and your network.

    ", + "DeregisterImage": "

    Deregisters the specified AMI. After you deregister an AMI, it can't be used to launch new instances.

    This command does not delete the AMI.

    ", + "DescribeAccountAttributes": "

    Describes attributes of your AWS account. The following are the supported account attributes:

    • supported-platforms: Indicates whether your account can launch instances into EC2-Classic and EC2-VPC, or only into EC2-VPC.

    • default-vpc: The ID of the default VPC for your account, or none.

    • max-instances: The maximum number of On-Demand instances that you can run.

    • vpc-max-security-groups-per-interface: The maximum number of security groups that you can assign to a network interface.

    • max-elastic-ips: The maximum number of Elastic IP addresses that you can allocate for use with EC2-Classic.

    • vpc-max-elastic-ips: The maximum number of Elastic IP addresses that you can allocate for use with EC2-VPC.

    ", + "DescribeAddresses": "

    Describes one or more of your Elastic IP addresses.

    An Elastic IP address is for use in either the EC2-Classic platform or in a VPC. For more information, see Elastic IP Addresses in the Amazon Elastic Compute Cloud User Guide.

    ", + "DescribeAvailabilityZones": "

    Describes one or more of the Availability Zones that are available to you. The results include zones only for the region you're currently using. If there is an event impacting an Availability Zone, you can use this request to view the state and any provided message for that Availability Zone.

    For more information, see Regions and Availability Zones in the Amazon Elastic Compute Cloud User Guide.

    ", + "DescribeBundleTasks": "

    Describes one or more of your bundling tasks.

    Completed bundle tasks are listed for only a limited time. If your bundle task is no longer in the list, you can still register an AMI from it. Just use RegisterImage with the Amazon S3 bucket name and image manifest name you provided to the bundle task.

    ", + "DescribeClassicLinkInstances": "

    Describes one or more of your linked EC2-Classic instances. This request only returns information about EC2-Classic instances linked to a VPC through ClassicLink; you cannot use this request to return information about other instances.

    ", + "DescribeConversionTasks": "

    Describes one or more of your conversion tasks. For more information, see Using the Command Line Tools to Import Your Virtual Machine to Amazon EC2 in the Amazon Elastic Compute Cloud User Guide.

    ", + "DescribeCustomerGateways": "

    Describes one or more of your VPN customer gateways.

    For more information about VPN customer gateways, see Adding a Hardware Virtual Private Gateway to Your VPC in the Amazon Virtual Private Cloud User Guide.

    ", + "DescribeDhcpOptions": "

    Describes one or more of your DHCP options sets.

    For more information about DHCP options sets, see DHCP Options Sets in the Amazon Virtual Private Cloud User Guide.

    ", + "DescribeExportTasks": "

    Describes one or more of your export tasks.

    ", + "DescribeFlowLogs": "

    Describes one or more flow logs. To view the information in your flow logs (the log streams for the network interfaces), you must use the CloudWatch Logs console or the CloudWatch Logs API.

    ", + "DescribeImageAttribute": "

    Describes the specified attribute of the specified AMI. You can specify only one attribute at a time.

    ", + "DescribeImages": "

    Describes one or more of the images (AMIs, AKIs, and ARIs) available to you. Images available to you include public images, private images that you own, and private images owned by other AWS accounts but for which you have explicit launch permissions.

    Deregistered images are included in the returned results for an unspecified interval after deregistration.

    ", + "DescribeImportImageTasks": "

    Displays details about an import virtual machine or import snapshot tasks that are already created.

    ", + "DescribeImportSnapshotTasks": "

    Describes your import snapshot tasks.

    ", + "DescribeInstanceAttribute": "

    Describes the specified attribute of the specified instance. You can specify only one attribute at a time. Valid attribute values are: instanceType | kernel | ramdisk | userData | disableApiTermination | instanceInitiatedShutdownBehavior | rootDeviceName | blockDeviceMapping | productCodes | sourceDestCheck | groupSet | ebsOptimized | sriovNetSupport

    ", + "DescribeInstanceStatus": "

    Describes the status of one or more instances.

    Instance status includes the following components:

    • Status checks - Amazon EC2 performs status checks on running EC2 instances to identify hardware and software issues. For more information, see Status Checks for Your Instances and Troubleshooting Instances with Failed Status Checks in the Amazon Elastic Compute Cloud User Guide.

    • Scheduled events - Amazon EC2 can schedule events (such as reboot, stop, or terminate) for your instances related to hardware issues, software updates, or system maintenance. For more information, see Scheduled Events for Your Instances in the Amazon Elastic Compute Cloud User Guide.

    • Instance state - You can manage your instances from the moment you launch them through their termination. For more information, see Instance Lifecycle in the Amazon Elastic Compute Cloud User Guide.

    ", + "DescribeInstances": "

    Describes one or more of your instances.

    If you specify one or more instance IDs, Amazon EC2 returns information for those instances. If you do not specify instance IDs, Amazon EC2 returns information for all relevant instances. If you specify an instance ID that is not valid, an error is returned. If you specify an instance that you do not own, it is not included in the returned results.

    Recently terminated instances might appear in the returned results. This interval is usually less than one hour.

    ", + "DescribeInternetGateways": "

    Describes one or more of your Internet gateways.

    ", + "DescribeKeyPairs": "

    Describes one or more of your key pairs.

    For more information about key pairs, see Key Pairs in the Amazon Elastic Compute Cloud User Guide.

    ", + "DescribeMovingAddresses": "

    Describes your Elastic IP addresses that are being moved to the EC2-VPC platform, or that are being restored to the EC2-Classic platform. This request does not return information about any other Elastic IP addresses in your account.

    ", + "DescribeNetworkAcls": "

    Describes one or more of your network ACLs.

    For more information about network ACLs, see Network ACLs in the Amazon Virtual Private Cloud User Guide.

    ", + "DescribeNetworkInterfaceAttribute": "

    Describes a network interface attribute. You can specify only one attribute at a time.

    ", + "DescribeNetworkInterfaces": "

    Describes one or more of your network interfaces.

    ", + "DescribePlacementGroups": "

    Describes one or more of your placement groups. For more information about placement groups and cluster instances, see Cluster Instances in the Amazon Elastic Compute Cloud User Guide.

    ", + "DescribePrefixLists": "

    Describes available AWS services in a prefix list format, which includes the prefix list name and prefix list ID of the service and the IP address range for the service. A prefix list ID is required for creating an outbound security group rule that allows traffic from a VPC to access an AWS service through a VPC endpoint.

    ", + "DescribeRegions": "

    Describes one or more regions that are currently available to you.

    For a list of the regions supported by Amazon EC2, see Regions and Endpoints.

    ", + "DescribeReservedInstances": "

    Describes one or more of the Reserved Instances that you purchased.

    For more information about Reserved Instances, see Reserved Instances in the Amazon Elastic Compute Cloud User Guide.

    ", + "DescribeReservedInstancesListings": "

    Describes your account's Reserved Instance listings in the Reserved Instance Marketplace.

    The Reserved Instance Marketplace matches sellers who want to resell Reserved Instance capacity that they no longer need with buyers who want to purchase additional capacity. Reserved Instances bought and sold through the Reserved Instance Marketplace work like any other Reserved Instances.

    As a seller, you choose to list some or all of your Reserved Instances, and you specify the upfront price to receive for them. Your Reserved Instances are then listed in the Reserved Instance Marketplace and are available for purchase.

    As a buyer, you specify the configuration of the Reserved Instance to purchase, and the Marketplace matches what you're searching for with what's available. The Marketplace first sells the lowest priced Reserved Instances to you, and continues to sell available Reserved Instance listings to you until your demand is met. You are charged based on the total price of all of the listings that you purchase.

    For more information, see Reserved Instance Marketplace in the Amazon Elastic Compute Cloud User Guide.

    ", + "DescribeReservedInstancesModifications": "

    Describes the modifications made to your Reserved Instances. If no parameter is specified, information about all your Reserved Instances modification requests is returned. If a modification ID is specified, only information about the specific modification is returned.

    For more information, see Modifying Reserved Instances in the Amazon Elastic Compute Cloud User Guide.

    ", + "DescribeReservedInstancesOfferings": "

    Describes Reserved Instance offerings that are available for purchase. With Reserved Instances, you purchase the right to launch instances for a period of time. During that time period, you do not receive insufficient capacity errors, and you pay a lower usage rate than the rate charged for On-Demand instances for the actual time used.

    For more information, see Reserved Instance Marketplace in the Amazon Elastic Compute Cloud User Guide.

    ", + "DescribeRouteTables": "

    Describes one or more of your route tables.

    Each subnet in your VPC must be associated with a route table. If a subnet is not explicitly associated with any route table, it is implicitly associated with the main route table. This command does not return the subnet ID for implicit associations.

    For more information about route tables, see Route Tables in the Amazon Virtual Private Cloud User Guide.

    ", + "DescribeSecurityGroups": "

    Describes one or more of your security groups.

    A security group is for use with instances either in the EC2-Classic platform or in a specific VPC. For more information, see Amazon EC2 Security Groups in the Amazon Elastic Compute Cloud User Guide and Security Groups for Your VPC in the Amazon Virtual Private Cloud User Guide.

    ", + "DescribeSnapshotAttribute": "

    Describes the specified attribute of the specified snapshot. You can specify only one attribute at a time.

    For more information about EBS snapshots, see Amazon EBS Snapshots in the Amazon Elastic Compute Cloud User Guide.

    ", + "DescribeSnapshots": "

    Describes one or more of the EBS snapshots available to you. Available snapshots include public snapshots available for any AWS account to launch, private snapshots that you own, and private snapshots owned by another AWS account but for which you've been given explicit create volume permissions.

    The create volume permissions fall into the following categories:

    • public: The owner of the snapshot granted create volume permissions for the snapshot to the all group. All AWS accounts have create volume permissions for these snapshots.
    • explicit: The owner of the snapshot granted create volume permissions to a specific AWS account.
    • implicit: An AWS account has implicit create volume permissions for all snapshots it owns.

    The list of snapshots returned can be modified by specifying snapshot IDs, snapshot owners, or AWS accounts with create volume permissions. If no options are specified, Amazon EC2 returns all snapshots for which you have create volume permissions.

    If you specify one or more snapshot IDs, only snapshots that have the specified IDs are returned. If you specify an invalid snapshot ID, an error is returned. If you specify a snapshot ID for which you do not have access, it is not included in the returned results.

    If you specify one or more snapshot owners, only snapshots from the specified owners and for which you have access are returned. The results can include the AWS account IDs of the specified owners, amazon for snapshots owned by Amazon, or self for snapshots that you own.

    If you specify a list of restorable users, only snapshots with create snapshot permissions for those users are returned. You can specify AWS account IDs (if you own the snapshots), self for snapshots for which you own or have explicit permissions, or all for public snapshots.

    If you are describing a long list of snapshots, you can paginate the output to make the list more manageable. The MaxResults parameter sets the maximum number of results returned in a single page. If the list of results exceeds your MaxResults value, then that number of results is returned along with a NextToken value that can be passed to a subsequent DescribeSnapshots request to retrieve the remaining results.

    For more information about EBS snapshots, see Amazon EBS Snapshots in the Amazon Elastic Compute Cloud User Guide.

    ", + "DescribeSpotDatafeedSubscription": "

    Describes the data feed for Spot instances. For more information, see Spot Instance Data Feed in the Amazon Elastic Compute Cloud User Guide.

    ", + "DescribeSpotFleetInstances": "

    Describes the running instances for the specified Spot fleet.

    ", + "DescribeSpotFleetRequestHistory": "

    Describes the events for the specified Spot fleet request during the specified time.

    Spot fleet events are delayed by up to 30 seconds before they can be described. This ensures that you can query by the last evaluated time and not miss a recorded event.

    ", + "DescribeSpotFleetRequests": "

    Describes your Spot fleet requests.

    ", + "DescribeSpotInstanceRequests": "

    Describes the Spot instance requests that belong to your account. Spot instances are instances that Amazon EC2 launches when the bid price that you specify exceeds the current Spot price. Amazon EC2 periodically sets the Spot price based on available Spot instance capacity and current Spot instance requests. For more information, see Spot Instance Requests in the Amazon Elastic Compute Cloud User Guide.

    You can use DescribeSpotInstanceRequests to find a running Spot instance by examining the response. If the status of the Spot instance is fulfilled, the instance ID appears in the response and contains the identifier of the instance. Alternatively, you can use DescribeInstances with a filter to look for instances where the instance lifecycle is spot.

    ", + "DescribeSpotPriceHistory": "

    Describes the Spot price history. The prices returned are listed in chronological order, from the oldest to the most recent, for up to the past 90 days. For more information, see Spot Instance Pricing History in the Amazon Elastic Compute Cloud User Guide.

    When you specify a start and end time, this operation returns the prices of the instance types within the time range that you specified and the time when the price changed. The price is valid within the time period that you specified; the response merely indicates the last time that the price changed.

    ", + "DescribeSubnets": "

    Describes one or more of your subnets.

    For more information about subnets, see Your VPC and Subnets in the Amazon Virtual Private Cloud User Guide.

    ", + "DescribeTags": "

    Describes one or more of the tags for your EC2 resources.

    For more information about tags, see Tagging Your Resources in the Amazon Elastic Compute Cloud User Guide.

    ", + "DescribeVolumeAttribute": "

    Describes the specified attribute of the specified volume. You can specify only one attribute at a time.

    For more information about EBS volumes, see Amazon EBS Volumes in the Amazon Elastic Compute Cloud User Guide.

    ", + "DescribeVolumeStatus": "

    Describes the status of the specified volumes. Volume status provides the result of the checks performed on your volumes to determine events that can impair the performance of your volumes. The performance of a volume can be affected if an issue occurs on the volume's underlying host. If the volume's underlying host experiences a power outage or system issue, after the system is restored, there could be data inconsistencies on the volume. Volume events notify you if this occurs. Volume actions notify you if any action needs to be taken in response to the event.

    The DescribeVolumeStatus operation provides the following information about the specified volumes:

    Status: Reflects the current status of the volume. The possible values are ok, impaired , warning, or insufficient-data. If all checks pass, the overall status of the volume is ok. If the check fails, the overall status is impaired. If the status is insufficient-data, then the checks may still be taking place on your volume at the time. We recommend that you retry the request. For more information on volume status, see Monitoring the Status of Your Volumes.

    Events: Reflect the cause of a volume status and may require you to take action. For example, if your volume returns an impaired status, then the volume event might be potential-data-inconsistency. This means that your volume has been affected by an issue with the underlying host, has all I/O operations disabled, and may have inconsistent data.

    Actions: Reflect the actions you may have to take in response to an event. For example, if the status of the volume is impaired and the volume event shows potential-data-inconsistency, then the action shows enable-volume-io. This means that you may want to enable the I/O operations for the volume by calling the EnableVolumeIO action and then check the volume for data consistency.

    Volume status is based on the volume status checks, and does not reflect the volume state. Therefore, volume status does not indicate volumes in the error state (for example, when a volume is incapable of accepting I/O.)

    ", + "DescribeVolumes": "

    Describes the specified EBS volumes.

    If you are describing a long list of volumes, you can paginate the output to make the list more manageable. The MaxResults parameter sets the maximum number of results returned in a single page. If the list of results exceeds your MaxResults value, then that number of results is returned along with a NextToken value that can be passed to a subsequent DescribeVolumes request to retrieve the remaining results.

    For more information about EBS volumes, see Amazon EBS Volumes in the Amazon Elastic Compute Cloud User Guide.

    ", + "DescribeVpcAttribute": "

    Describes the specified attribute of the specified VPC. You can specify only one attribute at a time.

    ", + "DescribeVpcClassicLink": "

    Describes the ClassicLink status of one or more VPCs.

    ", + "DescribeVpcEndpointServices": "

    Describes all supported AWS services that can be specified when creating a VPC endpoint.

    ", + "DescribeVpcEndpoints": "

    Describes one or more of your VPC endpoints.

    ", + "DescribeVpcPeeringConnections": "

    Describes one or more of your VPC peering connections.

    ", + "DescribeVpcs": "

    Describes one or more of your VPCs.

    ", + "DescribeVpnConnections": "

    Describes one or more of your VPN connections.

    For more information about VPN connections, see Adding a Hardware Virtual Private Gateway to Your VPC in the Amazon Virtual Private Cloud User Guide.

    ", + "DescribeVpnGateways": "

    Describes one or more of your virtual private gateways.

    For more information about virtual private gateways, see Adding an IPsec Hardware VPN to Your VPC in the Amazon Virtual Private Cloud User Guide.

    ", + "DetachClassicLinkVpc": "

    Unlinks (detaches) a linked EC2-Classic instance from a VPC. After the instance has been unlinked, the VPC security groups are no longer associated with it. An instance is automatically unlinked from a VPC when it's stopped.

    ", + "DetachInternetGateway": "

    Detaches an Internet gateway from a VPC, disabling connectivity between the Internet and the VPC. The VPC must not contain any running instances with Elastic IP addresses.

    ", + "DetachNetworkInterface": "

    Detaches a network interface from an instance.

    ", + "DetachVolume": "

    Detaches an EBS volume from an instance. Make sure to unmount any file systems on the device within your operating system before detaching the volume. Failure to do so results in the volume being stuck in a busy state while detaching.

    If an Amazon EBS volume is the root device of an instance, it can't be detached while the instance is running. To detach the root volume, stop the instance first.

    When a volume with an AWS Marketplace product code is detached from an instance, the product code is no longer associated with the instance.

    For more information, see Detaching an Amazon EBS Volume in the Amazon Elastic Compute Cloud User Guide.

    ", + "DetachVpnGateway": "

    Detaches a virtual private gateway from a VPC. You do this if you're planning to turn off the VPC and not use it anymore. You can confirm a virtual private gateway has been completely detached from a VPC by describing the virtual private gateway (any attachments to the virtual private gateway are also described).

    You must wait for the attachment's state to switch to detached before you can delete the VPC or attach a different VPC to the virtual private gateway.

    ", + "DisableVgwRoutePropagation": "

    Disables a virtual private gateway (VGW) from propagating routes to a specified route table of a VPC.

    ", + "DisableVpcClassicLink": "

    Disables ClassicLink for a VPC. You cannot disable ClassicLink for a VPC that has EC2-Classic instances linked to it.

    ", + "DisassociateAddress": "

    Disassociates an Elastic IP address from the instance or network interface it's associated with.

    An Elastic IP address is for use in either the EC2-Classic platform or in a VPC. For more information, see Elastic IP Addresses in the Amazon Elastic Compute Cloud User Guide.

    This is an idempotent operation. If you perform the operation more than once, Amazon EC2 doesn't return an error.

    ", + "DisassociateRouteTable": "

    Disassociates a subnet from a route table.

    After you perform this action, the subnet no longer uses the routes in the route table. Instead, it uses the routes in the VPC's main route table. For more information about route tables, see Route Tables in the Amazon Virtual Private Cloud User Guide.

    ", + "EnableVgwRoutePropagation": "

    Enables a virtual private gateway (VGW) to propagate routes to the specified route table of a VPC.

    ", + "EnableVolumeIO": "

    Enables I/O operations for a volume that had I/O operations disabled because the data on the volume was potentially inconsistent.

    ", + "EnableVpcClassicLink": "

    Enables a VPC for ClassicLink. You can then link EC2-Classic instances to your ClassicLink-enabled VPC to allow communication over private IP addresses. You cannot enable your VPC for ClassicLink if any of your VPC's route tables have existing routes for address ranges within the 10.0.0.0/8 IP address range, excluding local routes for VPCs in the 10.0.0.0/16 and 10.1.0.0/16 IP address ranges. For more information, see ClassicLink in the Amazon Elastic Compute Cloud User Guide.

    ", + "GetConsoleOutput": "

    Gets the console output for the specified instance.

    Instances do not have a physical monitor through which you can view their console output. They also lack physical controls that allow you to power up, reboot, or shut them down. To allow these actions, we provide them through the Amazon EC2 API and command line interface.

    Instance console output is buffered and posted shortly after instance boot, reboot, and termination. Amazon EC2 preserves the most recent 64 KB output which is available for at least one hour after the most recent post.

    For Linux instances, the instance console output displays the exact console output that would normally be displayed on a physical monitor attached to a computer. This output is buffered because the instance produces it and then posts it to a store where the instance's owner can retrieve it.

    For Windows instances, the instance console output includes output from the EC2Config service.

    ", + "GetPasswordData": "

    Retrieves the encrypted administrator password for an instance running Windows.

    The Windows password is generated at boot if the EC2Config service plugin, Ec2SetPassword, is enabled. This usually only happens the first time an AMI is launched, and then Ec2SetPassword is automatically disabled. The password is not generated for rebundled AMIs unless Ec2SetPassword is enabled before bundling.

    The password is encrypted using the key pair that you specified when you launched the instance. You must provide the corresponding key pair file.

    Password generation and encryption takes a few moments. We recommend that you wait up to 15 minutes after launching an instance before trying to retrieve the generated password.

    ", + "ImportImage": "

    Import single or multi-volume disk images or EBS snapshots into an Amazon Machine Image (AMI).

    ", + "ImportInstance": "

    Creates an import instance task using metadata from the specified disk image. ImportInstance only supports single-volume VMs. To import multi-volume VMs, use ImportImage. After importing the image, you then upload it using the ec2-import-volume command in the EC2 command line tools. For more information, see Using the Command Line Tools to Import Your Virtual Machine to Amazon EC2 in the Amazon Elastic Compute Cloud User Guide.

    ", + "ImportKeyPair": "

    Imports the public key from an RSA key pair that you created with a third-party tool. Compare this with CreateKeyPair, in which AWS creates the key pair and gives the keys to you (AWS keeps a copy of the public key). With ImportKeyPair, you create the key pair and give AWS just the public key. The private key is never transferred between you and AWS.

    For more information about key pairs, see Key Pairs in the Amazon Elastic Compute Cloud User Guide.

    ", + "ImportSnapshot": "

    Imports a disk into an EBS snapshot.

    ", + "ImportVolume": "

    Creates an import volume task using metadata from the specified disk image. After importing the image, you then upload it using the ec2-import-volume command in the Amazon EC2 command-line interface (CLI) tools. For more information, see Using the Command Line Tools to Import Your Virtual Machine to Amazon EC2 in the Amazon Elastic Compute Cloud User Guide.

    ", + "ModifyImageAttribute": "

    Modifies the specified attribute of the specified AMI. You can specify only one attribute at a time.

    AWS Marketplace product codes cannot be modified. Images with an AWS Marketplace product code cannot be made public.

    ", + "ModifyInstanceAttribute": "

    Modifies the specified attribute of the specified instance. You can specify only one attribute at a time.

    To modify some attributes, the instance must be stopped. For more information, see Modifying Attributes of a Stopped Instance in the Amazon Elastic Compute Cloud User Guide.

    ", + "ModifyNetworkInterfaceAttribute": "

    Modifies the specified network interface attribute. You can specify only one attribute at a time.

    ", + "ModifyReservedInstances": "

    Modifies the Availability Zone, instance count, instance type, or network platform (EC2-Classic or EC2-VPC) of your Reserved Instances. The Reserved Instances to be modified must be identical, except for Availability Zone, network platform, and instance type.

    For more information, see Modifying Reserved Instances in the Amazon Elastic Compute Cloud User Guide.

    ", + "ModifySnapshotAttribute": "

    Adds or removes permission settings for the specified snapshot. You may add or remove specified AWS account IDs from a snapshot's list of create volume permissions, but you cannot do both in a single API call. If you need to both add and remove account IDs for a snapshot, you must use multiple API calls.

    For more information on modifying snapshot permissions, see Sharing Snapshots in the Amazon Elastic Compute Cloud User Guide.

    Snapshots with AWS Marketplace product codes cannot be made public.

    ", + "ModifySubnetAttribute": "

    Modifies a subnet attribute.

    ", + "ModifyVolumeAttribute": "

    Modifies a volume attribute.

    By default, all I/O operations for the volume are suspended when the data on the volume is determined to be potentially inconsistent, to prevent undetectable, latent data corruption. The I/O access to the volume can be resumed by first enabling I/O access and then checking the data consistency on your volume.

    You can change the default behavior to resume I/O operations. We recommend that you change this only for boot volumes or for volumes that are stateless or disposable.

    ", + "ModifyVpcAttribute": "

    Modifies the specified attribute of the specified VPC.

    ", + "ModifyVpcEndpoint": "

    Modifies attributes of a specified VPC endpoint. You can modify the policy associated with the endpoint, and you can add and remove route tables associated with the endpoint.

    ", + "MonitorInstances": "

    Enables monitoring for a running instance. For more information about monitoring instances, see Monitoring Your Instances and Volumes in the Amazon Elastic Compute Cloud User Guide.

    ", + "MoveAddressToVpc": "

    Moves an Elastic IP address from the EC2-Classic platform to the EC2-VPC platform. The Elastic IP address must be allocated to your account, and it must not be associated with an instance. After the Elastic IP address is moved, it is no longer available for use in the EC2-Classic platform, unless you move it back using the RestoreAddressToClassic request. You cannot move an Elastic IP address that's allocated for use in the EC2-VPC platform to the EC2-Classic platform.

    ", + "PurchaseReservedInstancesOffering": "

    Purchases a Reserved Instance for use with your account. With Amazon EC2 Reserved Instances, you obtain a capacity reservation for a certain instance configuration over a specified period of time and pay a lower hourly rate compared to on-Demand Instance pricing.

    Use DescribeReservedInstancesOfferings to get a list of Reserved Instance offerings that match your specifications. After you've purchased a Reserved Instance, you can check for your new Reserved Instance with DescribeReservedInstances.

    For more information, see Reserved Instances and Reserved Instance Marketplace in the Amazon Elastic Compute Cloud User Guide.

    ", + "RebootInstances": "

    Requests a reboot of one or more instances. This operation is asynchronous; it only queues a request to reboot the specified instances. The operation succeeds if the instances are valid and belong to you. Requests to reboot terminated instances are ignored.

    If a Linux/Unix instance does not cleanly shut down within four minutes, Amazon EC2 performs a hard reboot.

    For more information about troubleshooting, see Getting Console Output and Rebooting Instances in the Amazon Elastic Compute Cloud User Guide.

    ", + "RegisterImage": "

    Registers an AMI. When you're creating an AMI, this is the final step you must complete before you can launch an instance from the AMI. For more information about creating AMIs, see Creating Your Own AMIs in the Amazon Elastic Compute Cloud User Guide.

    For Amazon EBS-backed instances, CreateImage creates and registers the AMI in a single request, so you don't have to register the AMI yourself.

    You can also use RegisterImage to create an Amazon EBS-backed Linux AMI from a snapshot of a root device volume. For more information, see Launching an Instance from a Snapshot in the Amazon Elastic Compute Cloud User Guide.

    Some Linux distributions, such as Red Hat Enterprise Linux (RHEL) and SUSE Linux Enterprise Server (SLES), use the EC2 billingProduct code associated with an AMI to verify subscription status for package updates. Creating an AMI from an EBS snapshot does not maintain this billing code, and subsequent instances launched from such an AMI will not be able to connect to package update infrastructure.

    Similarly, although you can create a Windows AMI from a snapshot, you can't successfully launch an instance from the AMI.

    To create Windows AMIs or to create AMIs for Linux operating systems that must retain AMI billing codes to work properly, see CreateImage.

    If needed, you can deregister an AMI at any time. Any modifications you make to an AMI backed by an instance store volume invalidates its registration. If you make changes to an image, deregister the previous image and register the new image.

    You can't register an image where a secondary (non-root) snapshot has AWS Marketplace product codes.

    ", + "RejectVpcPeeringConnection": "

    Rejects a VPC peering connection request. The VPC peering connection must be in the pending-acceptance state. Use the DescribeVpcPeeringConnections request to view your outstanding VPC peering connection requests. To delete an active VPC peering connection, or to delete a VPC peering connection request that you initiated, use DeleteVpcPeeringConnection.

    ", + "ReleaseAddress": "

    Releases the specified Elastic IP address.

    After releasing an Elastic IP address, it is released to the IP address pool and might be unavailable to you. Be sure to update your DNS records and any servers or devices that communicate with the address. If you attempt to release an Elastic IP address that you already released, you'll get an AuthFailure error if the address is already allocated to another AWS account.

    [EC2-Classic, default VPC] Releasing an Elastic IP address automatically disassociates it from any instance that it's associated with. To disassociate an Elastic IP address without releasing it, use DisassociateAddress.

    [Nondefault VPC] You must use DisassociateAddress to disassociate the Elastic IP address before you try to release it. Otherwise, Amazon EC2 returns an error (InvalidIPAddress.InUse).

    ", + "ReplaceNetworkAclAssociation": "

    Changes which network ACL a subnet is associated with. By default when you create a subnet, it's automatically associated with the default network ACL. For more information about network ACLs, see Network ACLs in the Amazon Virtual Private Cloud User Guide.

    ", + "ReplaceNetworkAclEntry": "

    Replaces an entry (rule) in a network ACL. For more information about network ACLs, see Network ACLs in the Amazon Virtual Private Cloud User Guide.

    ", + "ReplaceRoute": "

    Replaces an existing route within a route table in a VPC. You must provide only one of the following: Internet gateway or virtual private gateway, NAT instance, VPC peering connection, or network interface.

    For more information about route tables, see Route Tables in the Amazon Virtual Private Cloud User Guide.

    ", + "ReplaceRouteTableAssociation": "

    Changes the route table associated with a given subnet in a VPC. After the operation completes, the subnet uses the routes in the new route table it's associated with. For more information about route tables, see Route Tables in the Amazon Virtual Private Cloud User Guide.

    You can also use ReplaceRouteTableAssociation to change which table is the main route table in the VPC. You just specify the main route table's association ID and the route table to be the new main route table.

    ", + "ReportInstanceStatus": "

    Submits feedback about the status of an instance. The instance must be in the running state. If your experience with the instance differs from the instance status returned by DescribeInstanceStatus, use ReportInstanceStatus to report your experience with the instance. Amazon EC2 collects this information to improve the accuracy of status checks.

    Use of this action does not change the value returned by DescribeInstanceStatus.

    ", + "RequestSpotFleet": "

    Creates a Spot fleet request.

    You can submit a single request that includes multiple launch specifications that vary by instance type, AMI, Availability Zone, or subnet.

    By default, the Spot fleet requests Spot instances in the Spot pool where the price per unit is the lowest. Each launch specification can include its own instance weighting that reflects the value of the instance type to your application workload.

    Alternatively, you can specify that the Spot fleet distribute the target capacity across the Spot pools included in its launch specifications. By ensuring that the Spot instances in your Spot fleet are in different Spot pools, you can improve the availability of your fleet.

    For more information, see Spot Fleet Requests in the Amazon Elastic Compute Cloud User Guide.

    ", + "RequestSpotInstances": "

    Creates a Spot instance request. Spot instances are instances that Amazon EC2 launches when the bid price that you specify exceeds the current Spot price. Amazon EC2 periodically sets the Spot price based on available Spot Instance capacity and current Spot instance requests. For more information, see Spot Instance Requests in the Amazon Elastic Compute Cloud User Guide.

    ", + "ResetImageAttribute": "

    Resets an attribute of an AMI to its default value.

    The productCodes attribute can't be reset.

    ", + "ResetInstanceAttribute": "

    Resets an attribute of an instance to its default value. To reset the kernel or ramdisk, the instance must be in a stopped state. To reset the SourceDestCheck, the instance can be either running or stopped.

    The SourceDestCheck attribute controls whether source/destination checking is enabled. The default value is true, which means checking is enabled. This value must be false for a NAT instance to perform NAT. For more information, see NAT Instances in the Amazon Virtual Private Cloud User Guide.

    ", + "ResetNetworkInterfaceAttribute": "

    Resets a network interface attribute. You can specify only one attribute at a time.

    ", + "ResetSnapshotAttribute": "

    Resets permission settings for the specified snapshot.

    For more information on modifying snapshot permissions, see Sharing Snapshots in the Amazon Elastic Compute Cloud User Guide.

    ", + "RestoreAddressToClassic": "

    Restores an Elastic IP address that was previously moved to the EC2-VPC platform back to the EC2-Classic platform. You cannot move an Elastic IP address that was originally allocated for use in EC2-VPC. The Elastic IP address must not be associated with an instance or network interface.

    ", + "RevokeSecurityGroupEgress": "

    Removes one or more egress rules from a security group for EC2-VPC. The values that you specify in the revoke request (for example, ports) must match the existing rule's values for the rule to be revoked.

    Each rule consists of the protocol and the CIDR range or source security group. For the TCP and UDP protocols, you must also specify the destination port or range of ports. For the ICMP protocol, you must also specify the ICMP type and code.

    Rule changes are propagated to instances within the security group as quickly as possible. However, a small delay might occur.

    ", + "RevokeSecurityGroupIngress": "

    Removes one or more ingress rules from a security group. The values that you specify in the revoke request (for example, ports) must match the existing rule's values for the rule to be removed.

    Each rule consists of the protocol and the CIDR range or source security group. For the TCP and UDP protocols, you must also specify the destination port or range of ports. For the ICMP protocol, you must also specify the ICMP type and code.

    Rule changes are propagated to instances within the security group as quickly as possible. However, a small delay might occur.

    ", + "RunInstances": "

    Launches the specified number of instances using an AMI for which you have permissions.

    When you launch an instance, it enters the pending state. After the instance is ready for you, it enters the running state. To check the state of your instance, call DescribeInstances.

    If you don't specify a security group when launching an instance, Amazon EC2 uses the default security group. For more information, see Security Groups in the Amazon Elastic Compute Cloud User Guide.

    [EC2-VPC only accounts] If you don't specify a subnet in the request, we choose a default subnet from your default VPC for you.

    [EC2-Classic accounts] If you're launching into EC2-Classic and you don't specify an Availability Zone, we choose one for you.

    Linux instances have access to the public key of the key pair at boot. You can use this key to provide secure access to the instance. Amazon EC2 public images use this feature to provide secure access without passwords. For more information, see Key Pairs in the Amazon Elastic Compute Cloud User Guide.

    You can provide optional user data when launching an instance. For more information, see Instance Metadata in the Amazon Elastic Compute Cloud User Guide.

    If any of the AMIs have a product code attached for which the user has not subscribed, RunInstances fails.

    T2 instance types can only be launched into a VPC. If you do not have a default VPC, or if you do not specify a subnet ID in the request, RunInstances fails.

    For more information about troubleshooting, see What To Do If An Instance Immediately Terminates, and Troubleshooting Connecting to Your Instance in the Amazon Elastic Compute Cloud User Guide.

    ", + "StartInstances": "

    Starts an Amazon EBS-backed AMI that you've previously stopped.

    Instances that use Amazon EBS volumes as their root devices can be quickly stopped and started. When an instance is stopped, the compute resources are released and you are not billed for hourly instance usage. However, your root partition Amazon EBS volume remains, continues to persist your data, and you are charged for Amazon EBS volume usage. You can restart your instance at any time. Each time you transition an instance from stopped to started, Amazon EC2 charges a full instance hour, even if transitions happen multiple times within a single hour.

    Before stopping an instance, make sure it is in a state from which it can be restarted. Stopping an instance does not preserve data stored in RAM.

    Performing this operation on an instance that uses an instance store as its root device returns an error.

    For more information, see Stopping Instances in the Amazon Elastic Compute Cloud User Guide.

    ", + "StopInstances": "

    Stops an Amazon EBS-backed instance. Each time you transition an instance from stopped to started, Amazon EC2 charges a full instance hour, even if transitions happen multiple times within a single hour.

    You can't start or stop Spot Instances.

    Instances that use Amazon EBS volumes as their root devices can be quickly stopped and started. When an instance is stopped, the compute resources are released and you are not billed for hourly instance usage. However, your root partition Amazon EBS volume remains, continues to persist your data, and you are charged for Amazon EBS volume usage. You can restart your instance at any time.

    Before stopping an instance, make sure it is in a state from which it can be restarted. Stopping an instance does not preserve data stored in RAM.

    Performing this operation on an instance that uses an instance store as its root device returns an error.

    You can stop, start, and terminate EBS-backed instances. You can only terminate instance store-backed instances. What happens to an instance differs if you stop it or terminate it. For example, when you stop an instance, the root device and any other devices attached to the instance persist. When you terminate an instance, the root device and any other devices attached during the instance launch are automatically deleted. For more information about the differences between stopping and terminating instances, see Instance Lifecycle in the Amazon Elastic Compute Cloud User Guide.

    For more information about troubleshooting, see Troubleshooting Stopping Your Instance in the Amazon Elastic Compute Cloud User Guide.

    ", + "TerminateInstances": "

    Shuts down one or more instances. This operation is idempotent; if you terminate an instance more than once, each call succeeds.

    Terminated instances remain visible after termination (for approximately one hour).

    By default, Amazon EC2 deletes all EBS volumes that were attached when the instance launched. Volumes attached after instance launch continue running.

    You can stop, start, and terminate EBS-backed instances. You can only terminate instance store-backed instances. What happens to an instance differs if you stop it or terminate it. For example, when you stop an instance, the root device and any other devices attached to the instance persist. When you terminate an instance, any attached EBS volumes with the DeleteOnTermination block device mapping parameter set to true are automatically deleted. For more information about the differences between stopping and terminating instances, see Instance Lifecycle in the Amazon Elastic Compute Cloud User Guide.

    For more information about troubleshooting, see Troubleshooting Terminating Your Instance in the Amazon Elastic Compute Cloud User Guide.

    ", + "UnassignPrivateIpAddresses": "

    Unassigns one or more secondary private IP addresses from a network interface.

    ", + "UnmonitorInstances": "

    Disables monitoring for a running instance. For more information about monitoring instances, see Monitoring Your Instances and Volumes in the Amazon Elastic Compute Cloud User Guide.

    " + }, + "service": "Amazon Elastic Compute Cloud

    Amazon Elastic Compute Cloud (Amazon EC2) provides resizable computing capacity in the Amazon Web Services (AWS) cloud. Using Amazon EC2 eliminates your need to invest in hardware up front, so you can develop and deploy applications faster.

    ", + "shapes": { + "AcceptVpcPeeringConnectionRequest": { + "base": null, + "refs": { + } + }, + "AcceptVpcPeeringConnectionResult": { + "base": null, + "refs": { + } + }, + "AccountAttribute": { + "base": "

    Describes an account attribute.

    ", + "refs": { + "AccountAttributeList$member": null + } + }, + "AccountAttributeList": { + "base": null, + "refs": { + "DescribeAccountAttributesResult$AccountAttributes": "

    Information about one or more account attributes.

    " + } + }, + "AccountAttributeName": { + "base": null, + "refs": { + "AccountAttributeNameStringList$member": null + } + }, + "AccountAttributeNameStringList": { + "base": null, + "refs": { + "DescribeAccountAttributesRequest$AttributeNames": "

    One or more account attribute names.

    " + } + }, + "AccountAttributeValue": { + "base": "

    Describes a value of an account attribute.

    ", + "refs": { + "AccountAttributeValueList$member": null + } + }, + "AccountAttributeValueList": { + "base": null, + "refs": { + "AccountAttribute$AttributeValues": "

    One or more values for the account attribute.

    " + } + }, + "ActiveInstance": { + "base": "

    Describes a running instance in a Spot fleet.

    ", + "refs": { + "ActiveInstanceSet$member": null + } + }, + "ActiveInstanceSet": { + "base": null, + "refs": { + "DescribeSpotFleetInstancesResponse$ActiveInstances": "

    The running instances. Note that this list is refreshed periodically and might be out of date.

    " + } + }, + "Address": { + "base": "

    Describes an Elastic IP address.

    ", + "refs": { + "AddressList$member": null + } + }, + "AddressList": { + "base": null, + "refs": { + "DescribeAddressesResult$Addresses": "

    Information about one or more Elastic IP addresses.

    " + } + }, + "AllocateAddressRequest": { + "base": null, + "refs": { + } + }, + "AllocateAddressResult": { + "base": null, + "refs": { + } + }, + "AllocationIdList": { + "base": null, + "refs": { + "DescribeAddressesRequest$AllocationIds": "

    [EC2-VPC] One or more allocation IDs.

    Default: Describes all your Elastic IP addresses.

    " + } + }, + "AllocationStrategy": { + "base": null, + "refs": { + "SpotFleetRequestConfigData$AllocationStrategy": "

    Determines how to allocate the target capacity across the Spot pools specified by the Spot fleet request. The default is lowestPrice.

    " + } + }, + "ArchitectureValues": { + "base": null, + "refs": { + "Image$Architecture": "

    The architecture of the image.

    ", + "ImportInstanceLaunchSpecification$Architecture": "

    The architecture of the instance.

    ", + "Instance$Architecture": "

    The architecture of the image.

    ", + "RegisterImageRequest$Architecture": "

    The architecture of the AMI.

    Default: For Amazon EBS-backed AMIs, i386. For instance store-backed AMIs, the architecture specified in the manifest file.

    " + } + }, + "AssignPrivateIpAddressesRequest": { + "base": null, + "refs": { + } + }, + "AssociateAddressRequest": { + "base": null, + "refs": { + } + }, + "AssociateAddressResult": { + "base": null, + "refs": { + } + }, + "AssociateDhcpOptionsRequest": { + "base": null, + "refs": { + } + }, + "AssociateRouteTableRequest": { + "base": null, + "refs": { + } + }, + "AssociateRouteTableResult": { + "base": null, + "refs": { + } + }, + "AttachClassicLinkVpcRequest": { + "base": null, + "refs": { + } + }, + "AttachClassicLinkVpcResult": { + "base": null, + "refs": { + } + }, + "AttachInternetGatewayRequest": { + "base": null, + "refs": { + } + }, + "AttachNetworkInterfaceRequest": { + "base": null, + "refs": { + } + }, + "AttachNetworkInterfaceResult": { + "base": null, + "refs": { + } + }, + "AttachVolumeRequest": { + "base": null, + "refs": { + } + }, + "AttachVpnGatewayRequest": { + "base": null, + "refs": { + } + }, + "AttachVpnGatewayResult": { + "base": null, + "refs": { + } + }, + "AttachmentStatus": { + "base": null, + "refs": { + "EbsInstanceBlockDevice$Status": "

    The attachment state.

    ", + "InstanceNetworkInterfaceAttachment$Status": "

    The attachment state.

    ", + "InternetGatewayAttachment$State": "

    The current state of the attachment.

    ", + "NetworkInterfaceAttachment$Status": "

    The attachment state.

    ", + "VpcAttachment$State": "

    The current state of the attachment.

    " + } + }, + "AttributeBooleanValue": { + "base": "

    The value to use when a resource attribute accepts a Boolean value.

    ", + "refs": { + "DescribeNetworkInterfaceAttributeResult$SourceDestCheck": "

    Indicates whether source/destination checking is enabled.

    ", + "DescribeVolumeAttributeResult$AutoEnableIO": "

    The state of autoEnableIO attribute.

    ", + "DescribeVpcAttributeResult$EnableDnsSupport": "

    Indicates whether DNS resolution is enabled for the VPC. If this attribute is true, the Amazon DNS server resolves DNS hostnames for your instances to their corresponding IP addresses; otherwise, it does not.

    ", + "DescribeVpcAttributeResult$EnableDnsHostnames": "

    Indicates whether the instances launched in the VPC get DNS hostnames. If this attribute is true, instances in the VPC get DNS hostnames; otherwise, they do not.

    ", + "InstanceAttribute$DisableApiTermination": "

    If the value is true, you can't terminate the instance through the Amazon EC2 console, CLI, or API; otherwise, you can.

    ", + "InstanceAttribute$EbsOptimized": "

    Indicates whether the instance is optimized for EBS I/O.

    ", + "InstanceAttribute$SourceDestCheck": "

    Indicates whether source/destination checking is enabled. A value of true means checking is enabled, and false means checking is disabled. This value must be false for a NAT instance to perform NAT.

    ", + "ModifyInstanceAttributeRequest$SourceDestCheck": "

    Specifies whether source/destination checking is enabled. A value of true means that checking is enabled, and false means checking is disabled. This value must be false for a NAT instance to perform NAT.

    ", + "ModifyInstanceAttributeRequest$DisableApiTermination": "

    If the value is true, you can't terminate the instance using the Amazon EC2 console, CLI, or API; otherwise, you can. You cannot use this paramater for Spot Instances.

    ", + "ModifyInstanceAttributeRequest$EbsOptimized": "

    Specifies whether the instance is optimized for EBS I/O. This optimization provides dedicated throughput to Amazon EBS and an optimized configuration stack to provide optimal EBS I/O performance. This optimization isn't available with all instance types. Additional usage charges apply when using an EBS Optimized instance.

    ", + "ModifyNetworkInterfaceAttributeRequest$SourceDestCheck": "

    Indicates whether source/destination checking is enabled. A value of true means checking is enabled, and false means checking is disabled. This value must be false for a NAT instance to perform NAT. For more information, see NAT Instances in the Amazon Virtual Private Cloud User Guide.

    ", + "ModifySubnetAttributeRequest$MapPublicIpOnLaunch": "

    Specify true to indicate that instances launched into the specified subnet should be assigned public IP address.

    ", + "ModifyVolumeAttributeRequest$AutoEnableIO": "

    Indicates whether the volume should be auto-enabled for I/O operations.

    ", + "ModifyVpcAttributeRequest$EnableDnsSupport": "

    Indicates whether the DNS resolution is supported for the VPC. If enabled, queries to the Amazon provided DNS server at the 169.254.169.253 IP address, or the reserved IP address at the base of the VPC network range \"plus two\" will succeed. If disabled, the Amazon provided DNS service in the VPC that resolves public DNS hostnames to IP addresses is not enabled.

    ", + "ModifyVpcAttributeRequest$EnableDnsHostnames": "

    Indicates whether the instances launched in the VPC get DNS hostnames. If enabled, instances in the VPC get DNS hostnames; otherwise, they do not.

    You can only enable DNS hostnames if you also enable DNS support.

    " + } + }, + "AttributeValue": { + "base": "

    The value to use for a resource attribute.

    ", + "refs": { + "DescribeNetworkInterfaceAttributeResult$Description": "

    The description of the network interface.

    ", + "ImageAttribute$KernelId": "

    The kernel ID.

    ", + "ImageAttribute$RamdiskId": "

    The RAM disk ID.

    ", + "ImageAttribute$Description": "

    A description for the AMI.

    ", + "ImageAttribute$SriovNetSupport": null, + "InstanceAttribute$InstanceType": "

    The instance type.

    ", + "InstanceAttribute$KernelId": "

    The kernel ID.

    ", + "InstanceAttribute$RamdiskId": "

    The RAM disk ID.

    ", + "InstanceAttribute$UserData": "

    The Base64-encoded MIME user data.

    ", + "InstanceAttribute$InstanceInitiatedShutdownBehavior": "

    Indicates whether an instance stops or terminates when you initiate shutdown from the instance (using the operating system command for system shutdown).

    ", + "InstanceAttribute$RootDeviceName": "

    The name of the root device (for example, /dev/sda1 or /dev/xvda).

    ", + "InstanceAttribute$SriovNetSupport": null, + "ModifyImageAttributeRequest$Description": "

    A description for the AMI.

    ", + "ModifyInstanceAttributeRequest$InstanceType": "

    Changes the instance type to the specified value. For more information, see Instance Types. If the instance type is not valid, the error returned is InvalidInstanceAttributeValue.

    ", + "ModifyInstanceAttributeRequest$Kernel": "

    Changes the instance's kernel to the specified value. We recommend that you use PV-GRUB instead of kernels and RAM disks. For more information, see PV-GRUB.

    ", + "ModifyInstanceAttributeRequest$Ramdisk": "

    Changes the instance's RAM disk to the specified value. We recommend that you use PV-GRUB instead of kernels and RAM disks. For more information, see PV-GRUB.

    ", + "ModifyInstanceAttributeRequest$InstanceInitiatedShutdownBehavior": "

    Specifies whether an instance stops or terminates when you initiate shutdown from the instance (using the operating system command for system shutdown).

    ", + "ModifyInstanceAttributeRequest$SriovNetSupport": "

    Set to simple to enable enhanced networking for the instance.

    There is no way to disable enhanced networking at this time.

    This option is supported only for HVM instances. Specifying this option with a PV instance can make it unreachable.

    ", + "ModifyNetworkInterfaceAttributeRequest$Description": "

    A description for the network interface.

    ", + "DhcpConfigurationValueList$member": null + } + }, + "AuthorizeSecurityGroupEgressRequest": { + "base": null, + "refs": { + } + }, + "AuthorizeSecurityGroupIngressRequest": { + "base": null, + "refs": { + } + }, + "AvailabilityZone": { + "base": "

    Describes an Availability Zone.

    ", + "refs": { + "AvailabilityZoneList$member": null + } + }, + "AvailabilityZoneList": { + "base": null, + "refs": { + "DescribeAvailabilityZonesResult$AvailabilityZones": "

    Information about one or more Availability Zones.

    " + } + }, + "AvailabilityZoneMessage": { + "base": "

    Describes a message about an Availability Zone.

    ", + "refs": { + "AvailabilityZoneMessageList$member": null + } + }, + "AvailabilityZoneMessageList": { + "base": null, + "refs": { + "AvailabilityZone$Messages": "

    Any messages about the Availability Zone.

    " + } + }, + "AvailabilityZoneState": { + "base": null, + "refs": { + "AvailabilityZone$State": "

    The state of the Availability Zone (available | impaired | unavailable).

    " + } + }, + "BatchState": { + "base": null, + "refs": { + "CancelSpotFleetRequestsSuccessItem$CurrentSpotFleetRequestState": "

    The current state of the Spot fleet request.

    ", + "CancelSpotFleetRequestsSuccessItem$PreviousSpotFleetRequestState": "

    The previous state of the Spot fleet request.

    ", + "SpotFleetRequestConfig$SpotFleetRequestState": "

    The state of the Spot fleet request.

    " + } + }, + "BlockDeviceMapping": { + "base": "

    Describes a block device mapping.

    ", + "refs": { + "BlockDeviceMappingList$member": null, + "BlockDeviceMappingRequestList$member": null + } + }, + "BlockDeviceMappingList": { + "base": null, + "refs": { + "Image$BlockDeviceMappings": "

    Any block device mapping entries.

    ", + "ImageAttribute$BlockDeviceMappings": "

    One or more block device mapping entries.

    ", + "LaunchSpecification$BlockDeviceMappings": "

    One or more block device mapping entries.

    ", + "SpotFleetLaunchSpecification$BlockDeviceMappings": "

    One or more block device mapping entries.

    ", + "RequestSpotLaunchSpecification$BlockDeviceMappings": "

    One or more block device mapping entries.

    " + } + }, + "BlockDeviceMappingRequestList": { + "base": null, + "refs": { + "CreateImageRequest$BlockDeviceMappings": "

    Information about one or more block device mappings.

    ", + "RegisterImageRequest$BlockDeviceMappings": "

    One or more block device mapping entries.

    ", + "RunInstancesRequest$BlockDeviceMappings": "

    The block device mapping.

    " + } + }, + "Boolean": { + "base": null, + "refs": { + "AcceptVpcPeeringConnectionRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "AllocateAddressRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "AssignPrivateIpAddressesRequest$AllowReassignment": "

    Indicates whether to allow an IP address that is already assigned to another network interface or instance to be reassigned to the specified network interface.

    ", + "AssociateAddressRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "AssociateAddressRequest$AllowReassociation": "

    [EC2-VPC] Allows an Elastic IP address that is already associated with an instance or network interface to be re-associated with the specified instance or network interface. Otherwise, the operation fails.

    Default: false

    ", + "AssociateDhcpOptionsRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "AssociateRouteTableRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "AttachClassicLinkVpcRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "AttachClassicLinkVpcResult$Return": "

    Returns true if the request succeeds; otherwise, it returns an error.

    ", + "AttachInternetGatewayRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "AttachNetworkInterfaceRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "AttachVolumeRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "AttachVpnGatewayRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "AttributeBooleanValue$Value": "

    Valid values are true or false.

    ", + "AuthorizeSecurityGroupEgressRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "AuthorizeSecurityGroupIngressRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "BundleInstanceRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "CancelBundleTaskRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "CancelConversionRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "CancelImportTaskRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "CancelSpotFleetRequestsRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "CancelSpotFleetRequestsRequest$TerminateInstances": "

    Indicates whether to terminate instances for a Spot fleet request if it is canceled successfully.

    ", + "CancelSpotInstanceRequestsRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "ConfirmProductInstanceRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "ConfirmProductInstanceResult$Return": "

    The return value of the request. Returns true if the specified product code is owned by the requester and associated with the specified instance.

    ", + "CopyImageRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "CopySnapshotRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "CopySnapshotRequest$Encrypted": "

    Specifies whether the destination snapshot should be encrypted. There is no way to create an unencrypted snapshot copy from an encrypted snapshot; however, you can encrypt a copy of an unencrypted snapshot with this flag. The default CMK for EBS is used unless a non-default AWS Key Management Service (AWS KMS) CMK is specified with KmsKeyId. For more information, see Amazon EBS Encryption in the Amazon Elastic Compute Cloud User Guide.

    ", + "CreateCustomerGatewayRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "CreateDhcpOptionsRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "CreateImageRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "CreateImageRequest$NoReboot": "

    By default, this parameter is set to false, which means Amazon EC2 attempts to shut down the instance cleanly before image creation and then reboots the instance. When the parameter is set to true, Amazon EC2 doesn't shut down the instance before creating the image. When this option is used, file system integrity on the created image can't be guaranteed.

    ", + "CreateInternetGatewayRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "CreateKeyPairRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "CreateNetworkAclEntryRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "CreateNetworkAclEntryRequest$Egress": "

    Indicates whether this is an egress rule (rule is applied to traffic leaving the subnet).

    ", + "CreateNetworkAclRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "CreateNetworkInterfaceRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "CreatePlacementGroupRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "CreateRouteRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "CreateRouteResult$Return": "

    Returns true if the request succeeds; otherwise, it returns an error.

    ", + "CreateRouteTableRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "CreateSecurityGroupRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "CreateSnapshotRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "CreateSpotDatafeedSubscriptionRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "CreateSubnetRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "CreateTagsRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "CreateVolumeRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "CreateVolumeRequest$Encrypted": "

    Specifies whether the volume should be encrypted. Encrypted Amazon EBS volumes may only be attached to instances that support Amazon EBS encryption. Volumes that are created from encrypted snapshots are automatically encrypted. There is no way to create an encrypted volume from an unencrypted snapshot or vice versa. If your AMI uses encrypted volumes, you can only launch it on supported instance types. For more information, see Amazon EBS Encryption in the Amazon Elastic Compute Cloud User Guide.

    ", + "CreateVpcEndpointRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "CreateVpcPeeringConnectionRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "CreateVpcRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "CreateVpnConnectionRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "CreateVpnGatewayRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DeleteCustomerGatewayRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DeleteDhcpOptionsRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DeleteInternetGatewayRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DeleteKeyPairRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DeleteNetworkAclEntryRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DeleteNetworkAclEntryRequest$Egress": "

    Indicates whether the rule is an egress rule.

    ", + "DeleteNetworkAclRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DeleteNetworkInterfaceRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DeletePlacementGroupRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DeleteRouteRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DeleteRouteTableRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DeleteSecurityGroupRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DeleteSnapshotRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DeleteSpotDatafeedSubscriptionRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DeleteSubnetRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DeleteTagsRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DeleteVolumeRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DeleteVpcEndpointsRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DeleteVpcPeeringConnectionRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DeleteVpcPeeringConnectionResult$Return": "

    Returns true if the request succeeds; otherwise, it returns an error.

    ", + "DeleteVpcRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DeleteVpnConnectionRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DeleteVpnGatewayRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DeregisterImageRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeAccountAttributesRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeAddressesRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeAvailabilityZonesRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeBundleTasksRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeClassicLinkInstancesRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeConversionTasksRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeCustomerGatewaysRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeDhcpOptionsRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeImageAttributeRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeImagesRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeImportImageTasksRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeImportSnapshotTasksRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeInstanceAttributeRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeInstanceStatusRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeInstanceStatusRequest$IncludeAllInstances": "

    When true, includes the health status for all instances. When false, includes the health status for running instances only.

    Default: false

    ", + "DescribeInstancesRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeInternetGatewaysRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeKeyPairsRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeMovingAddressesRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeNetworkAclsRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeNetworkInterfaceAttributeRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeNetworkInterfacesRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribePlacementGroupsRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribePrefixListsRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeRegionsRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeReservedInstancesOfferingsRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeReservedInstancesOfferingsRequest$IncludeMarketplace": "

    Include Marketplace offerings in the response.

    ", + "DescribeReservedInstancesRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeRouteTablesRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeSecurityGroupsRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeSnapshotAttributeRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeSnapshotsRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeSpotDatafeedSubscriptionRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeSpotFleetInstancesRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeSpotFleetRequestHistoryRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeSpotFleetRequestsRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeSpotInstanceRequestsRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeSpotPriceHistoryRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeSubnetsRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeTagsRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeVolumeAttributeRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeVolumeStatusRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeVolumesRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeVpcAttributeRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeVpcClassicLinkRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeVpcEndpointServicesRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeVpcEndpointsRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeVpcPeeringConnectionsRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeVpcsRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeVpnConnectionsRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeVpnGatewaysRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DetachClassicLinkVpcRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DetachClassicLinkVpcResult$Return": "

    Returns true if the request succeeds; otherwise, it returns an error.

    ", + "DetachInternetGatewayRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DetachNetworkInterfaceRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DetachNetworkInterfaceRequest$Force": "

    Specifies whether to force a detachment.

    ", + "DetachVolumeRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DetachVolumeRequest$Force": "

    Forces detachment if the previous detachment attempt did not occur cleanly (for example, logging into an instance, unmounting the volume, and detaching normally). This option can lead to data loss or a corrupted file system. Use this option only as a last resort to detach a volume from a failed instance. The instance won't have an opportunity to flush file system caches or file system metadata. If you use this option, you must perform file system check and repair procedures.

    ", + "DetachVpnGatewayRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DisableVpcClassicLinkRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DisableVpcClassicLinkResult$Return": "

    Returns true if the request succeeds; otherwise, it returns an error.

    ", + "DisassociateAddressRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DisassociateRouteTableRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "EbsBlockDevice$DeleteOnTermination": "

    Indicates whether the EBS volume is deleted on instance termination.

    ", + "EbsBlockDevice$Encrypted": "

    Indicates whether the EBS volume is encrypted. Encrypted Amazon EBS volumes may only be attached to instances that support Amazon EBS encryption.

    ", + "EbsInstanceBlockDevice$DeleteOnTermination": "

    Indicates whether the volume is deleted on instance termination.

    ", + "EbsInstanceBlockDeviceSpecification$DeleteOnTermination": "

    Indicates whether the volume is deleted on instance termination.

    ", + "EnableVolumeIORequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "EnableVpcClassicLinkRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "EnableVpcClassicLinkResult$Return": "

    Returns true if the request succeeds; otherwise, it returns an error.

    ", + "GetConsoleOutputRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "GetPasswordDataRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "Image$Public": "

    Indicates whether the image has public launch permissions. The value is true if this image has public launch permissions or false if it has only implicit and explicit launch permissions.

    ", + "ImportImageRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "ImportInstanceLaunchSpecification$Monitoring": "

    Indicates whether monitoring is enabled.

    ", + "ImportInstanceRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "ImportKeyPairRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "ImportSnapshotRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "ImportVolumeRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "Instance$SourceDestCheck": "

    Specifies whether to enable an instance launched in a VPC to perform NAT. This controls whether source/destination checking is enabled on the instance. A value of true means checking is enabled, and false means checking is disabled. The value must be false for the instance to perform NAT. For more information, see NAT Instances in the Amazon Virtual Private Cloud User Guide.

    ", + "Instance$EbsOptimized": "

    Indicates whether the instance is optimized for EBS I/O. This optimization provides dedicated throughput to Amazon EBS and an optimized configuration stack to provide optimal I/O performance. This optimization isn't available with all instance types. Additional usage charges apply when using an EBS Optimized instance.

    ", + "InstanceNetworkInterface$SourceDestCheck": "

    Indicates whether to validate network traffic to or from this network interface.

    ", + "InstanceNetworkInterfaceAttachment$DeleteOnTermination": "

    Indicates whether the network interface is deleted when the instance is terminated.

    ", + "InstanceNetworkInterfaceSpecification$DeleteOnTermination": "

    If set to true, the interface is deleted when the instance is terminated. You can specify true only if creating a new network interface when launching an instance.

    ", + "InstanceNetworkInterfaceSpecification$AssociatePublicIpAddress": "

    Indicates whether to assign a public IP address to an instance you launch in a VPC. The public IP address can only be assigned to a network interface for eth0, and can only be assigned to a new network interface, not an existing one. You cannot specify more than one network interface in the request. If launching into a default subnet, the default value is true.

    ", + "InstancePrivateIpAddress$Primary": "

    Indicates whether this IP address is the primary private IP address of the network interface.

    ", + "LaunchSpecification$EbsOptimized": "

    Indicates whether the instance is optimized for EBS I/O. This optimization provides dedicated throughput to Amazon EBS and an optimized configuration stack to provide optimal EBS I/O performance. This optimization isn't available with all instance types. Additional usage charges apply when using an EBS Optimized instance.

    Default: false

    ", + "ModifyImageAttributeRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "ModifyInstanceAttributeRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "ModifyNetworkInterfaceAttributeRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "ModifySnapshotAttributeRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "ModifyVolumeAttributeRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "ModifyVpcEndpointRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "ModifyVpcEndpointRequest$ResetPolicy": "

    Specify true to reset the policy document to the default policy. The default policy allows access to the service.

    ", + "ModifyVpcEndpointResult$Return": "

    Returns true if the request succeeds; otherwise, it returns an error.

    ", + "MonitorInstancesRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "MoveAddressToVpcRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "NetworkAcl$IsDefault": "

    Indicates whether this is the default network ACL for the VPC.

    ", + "NetworkAclEntry$Egress": "

    Indicates whether the rule is an egress rule (applied to traffic leaving the subnet).

    ", + "NetworkInterface$RequesterManaged": "

    Indicates whether the network interface is being managed by AWS.

    ", + "NetworkInterface$SourceDestCheck": "

    Indicates whether traffic to or from the instance is validated.

    ", + "NetworkInterfaceAttachment$DeleteOnTermination": "

    Indicates whether the network interface is deleted when the instance is terminated.

    ", + "NetworkInterfaceAttachmentChanges$DeleteOnTermination": "

    Indicates whether the network interface is deleted when the instance is terminated.

    ", + "NetworkInterfacePrivateIpAddress$Primary": "

    Indicates whether this IP address is the primary private IP address of the network interface.

    ", + "PriceSchedule$Active": "

    The current price schedule, as determined by the term remaining for the Reserved Instance in the listing.

    A specific price schedule is always in effect, but only one price schedule can be active at any time. Take, for example, a Reserved Instance listing that has five months remaining in its term. When you specify price schedules for five months and two months, this means that schedule 1, covering the first three months of the remaining term, will be active during months 5, 4, and 3. Then schedule 2, covering the last two months of the term, will be active for months 2 and 1.

    ", + "PrivateIpAddressSpecification$Primary": "

    Indicates whether the private IP address is the primary private IP address. Only one IP address can be designated as primary.

    ", + "PurchaseReservedInstancesOfferingRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "RebootInstancesRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "RegisterImageRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "RejectVpcPeeringConnectionRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "RejectVpcPeeringConnectionResult$Return": "

    Returns true if the request succeeds; otherwise, it returns an error.

    ", + "ReleaseAddressRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "ReplaceNetworkAclAssociationRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "ReplaceNetworkAclEntryRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "ReplaceNetworkAclEntryRequest$Egress": "

    Indicates whether to replace the egress rule.

    Default: If no value is specified, we replace the ingress rule.

    ", + "ReplaceRouteRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "ReplaceRouteTableAssociationRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "ReportInstanceStatusRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "RequestSpotFleetRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "RequestSpotInstancesRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "ReservedInstancesOffering$Marketplace": "

    Indicates whether the offering is available through the Reserved Instance Marketplace (resale) or AWS. If it's a Reserved Instance Marketplace offering, this is true.

    ", + "ResetImageAttributeRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "ResetInstanceAttributeRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "ResetNetworkInterfaceAttributeRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "ResetSnapshotAttributeRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "RestoreAddressToClassicRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "RevokeSecurityGroupEgressRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "RevokeSecurityGroupIngressRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "RouteTableAssociation$Main": "

    Indicates whether this is the main route table.

    ", + "RunInstancesMonitoringEnabled$Enabled": "

    Indicates whether monitoring is enabled for the instance.

    ", + "RunInstancesRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "RunInstancesRequest$DisableApiTermination": "

    If you set this parameter to true, you can't terminate the instance using the Amazon EC2 console, CLI, or API; otherwise, you can. If you set this parameter to true and then later want to be able to terminate the instance, you must first change the value of the disableApiTermination attribute to false using ModifyInstanceAttribute. Alternatively, if you set InstanceInitiatedShutdownBehavior to terminate, you can terminate the instance by running the shutdown command from the instance.

    Default: false

    ", + "RunInstancesRequest$EbsOptimized": "

    Indicates whether the instance is optimized for EBS I/O. This optimization provides dedicated throughput to Amazon EBS and an optimized configuration stack to provide optimal EBS I/O performance. This optimization isn't available with all instance types. Additional usage charges apply when using an EBS-optimized instance.

    Default: false

    ", + "Snapshot$Encrypted": "

    Indicates whether the snapshot is encrypted.

    ", + "SpotFleetLaunchSpecification$EbsOptimized": "

    Indicates whether the instances are optimized for EBS I/O. This optimization provides dedicated throughput to Amazon EBS and an optimized configuration stack to provide optimal EBS I/O performance. This optimization isn't available with all instance types. Additional usage charges apply when using an EBS Optimized instance.

    Default: false

    ", + "SpotFleetMonitoring$Enabled": "

    Enables monitoring for the instance.

    Default: false

    ", + "SpotFleetRequestConfigData$TerminateInstancesWithExpiration": "

    Indicates whether running Spot instances should be terminated when the Spot fleet request expires.

    ", + "StartInstancesRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "StopInstancesRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "StopInstancesRequest$Force": "

    Forces the instances to stop. The instances do not have an opportunity to flush file system caches or file system metadata. If you use this option, you must perform file system check and repair procedures. This option is not recommended for Windows instances.

    Default: false

    ", + "Subnet$DefaultForAz": "

    Indicates whether this is the default subnet for the Availability Zone.

    ", + "Subnet$MapPublicIpOnLaunch": "

    Indicates whether instances launched in this subnet receive a public IP address.

    ", + "TerminateInstancesRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "UnmonitorInstancesRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "Volume$Encrypted": "

    Indicates whether the volume will be encrypted.

    ", + "VolumeAttachment$DeleteOnTermination": "

    Indicates whether the EBS volume is deleted on instance termination.

    ", + "Vpc$IsDefault": "

    Indicates whether the VPC is the default VPC.

    ", + "VpcClassicLink$ClassicLinkEnabled": "

    Indicates whether the VPC is enabled for ClassicLink.

    ", + "VpnConnectionOptions$StaticRoutesOnly": "

    Indicates whether the VPN connection uses static routes only. Static routes must be used for devices that don't support BGP.

    ", + "VpnConnectionOptionsSpecification$StaticRoutesOnly": "

    Indicates whether the VPN connection uses static routes only. Static routes must be used for devices that don't support BGP.

    ", + "RequestSpotLaunchSpecification$EbsOptimized": "

    Indicates whether the instance is optimized for EBS I/O. This optimization provides dedicated throughput to Amazon EBS and an optimized configuration stack to provide optimal EBS I/O performance. This optimization isn't available with all instance types. Additional usage charges apply when using an EBS Optimized instance.

    Default: false

    " + } + }, + "BundleIdStringList": { + "base": null, + "refs": { + "DescribeBundleTasksRequest$BundleIds": "

    One or more bundle task IDs.

    Default: Describes all your bundle tasks.

    " + } + }, + "BundleInstanceRequest": { + "base": null, + "refs": { + } + }, + "BundleInstanceResult": { + "base": null, + "refs": { + } + }, + "BundleTask": { + "base": "

    Describes a bundle task.

    ", + "refs": { + "BundleInstanceResult$BundleTask": "

    Information about the bundle task.

    ", + "BundleTaskList$member": null, + "CancelBundleTaskResult$BundleTask": "

    Information about the bundle task.

    " + } + }, + "BundleTaskError": { + "base": "

    Describes an error for BundleInstance.

    ", + "refs": { + "BundleTask$BundleTaskError": "

    If the task fails, a description of the error.

    " + } + }, + "BundleTaskList": { + "base": null, + "refs": { + "DescribeBundleTasksResult$BundleTasks": "

    Information about one or more bundle tasks.

    " + } + }, + "BundleTaskState": { + "base": null, + "refs": { + "BundleTask$State": "

    The state of the task.

    " + } + }, + "CancelBatchErrorCode": { + "base": null, + "refs": { + "CancelSpotFleetRequestsError$Code": "

    The error code.

    " + } + }, + "CancelBundleTaskRequest": { + "base": null, + "refs": { + } + }, + "CancelBundleTaskResult": { + "base": null, + "refs": { + } + }, + "CancelConversionRequest": { + "base": null, + "refs": { + } + }, + "CancelExportTaskRequest": { + "base": null, + "refs": { + } + }, + "CancelImportTaskRequest": { + "base": null, + "refs": { + } + }, + "CancelImportTaskResult": { + "base": null, + "refs": { + } + }, + "CancelReservedInstancesListingRequest": { + "base": null, + "refs": { + } + }, + "CancelReservedInstancesListingResult": { + "base": null, + "refs": { + } + }, + "CancelSpotFleetRequestsError": { + "base": "

    Describes a Spot fleet error.

    ", + "refs": { + "CancelSpotFleetRequestsErrorItem$Error": "

    The error.

    " + } + }, + "CancelSpotFleetRequestsErrorItem": { + "base": "

    Describes a Spot fleet request that was not successfully canceled.

    ", + "refs": { + "CancelSpotFleetRequestsErrorSet$member": null + } + }, + "CancelSpotFleetRequestsErrorSet": { + "base": null, + "refs": { + "CancelSpotFleetRequestsResponse$UnsuccessfulFleetRequests": "

    Information about the Spot fleet requests that are not successfully canceled.

    " + } + }, + "CancelSpotFleetRequestsRequest": { + "base": "

    Contains the parameters for CancelSpotFleetRequests.

    ", + "refs": { + } + }, + "CancelSpotFleetRequestsResponse": { + "base": "

    Contains the output of CancelSpotFleetRequests.

    ", + "refs": { + } + }, + "CancelSpotFleetRequestsSuccessItem": { + "base": "

    Describes a Spot fleet request that was successfully canceled.

    ", + "refs": { + "CancelSpotFleetRequestsSuccessSet$member": null + } + }, + "CancelSpotFleetRequestsSuccessSet": { + "base": null, + "refs": { + "CancelSpotFleetRequestsResponse$SuccessfulFleetRequests": "

    Information about the Spot fleet requests that are successfully canceled.

    " + } + }, + "CancelSpotInstanceRequestState": { + "base": null, + "refs": { + "CancelledSpotInstanceRequest$State": "

    The state of the Spot instance request.

    " + } + }, + "CancelSpotInstanceRequestsRequest": { + "base": "

    Contains the parameters for CancelSpotInstanceRequests.

    ", + "refs": { + } + }, + "CancelSpotInstanceRequestsResult": { + "base": "

    Contains the output of CancelSpotInstanceRequests.

    ", + "refs": { + } + }, + "CancelledSpotInstanceRequest": { + "base": "

    Describes a request to cancel a Spot instance.

    ", + "refs": { + "CancelledSpotInstanceRequestList$member": null + } + }, + "CancelledSpotInstanceRequestList": { + "base": null, + "refs": { + "CancelSpotInstanceRequestsResult$CancelledSpotInstanceRequests": "

    One or more Spot instance requests.

    " + } + }, + "ClassicLinkInstance": { + "base": "

    Describes a linked EC2-Classic instance.

    ", + "refs": { + "ClassicLinkInstanceList$member": null + } + }, + "ClassicLinkInstanceList": { + "base": null, + "refs": { + "DescribeClassicLinkInstancesResult$Instances": "

    Information about one or more linked EC2-Classic instances.

    " + } + }, + "ClientData": { + "base": "

    Describes the client-specific data.

    ", + "refs": { + "ImportImageRequest$ClientData": "

    The client-specific data.

    ", + "ImportSnapshotRequest$ClientData": "

    The client-specific data.

    " + } + }, + "ConfirmProductInstanceRequest": { + "base": null, + "refs": { + } + }, + "ConfirmProductInstanceResult": { + "base": null, + "refs": { + } + }, + "ContainerFormat": { + "base": null, + "refs": { + "ExportToS3Task$ContainerFormat": "

    The container format used to combine disk images with metadata (such as OVF). If absent, only the disk image is exported.

    ", + "ExportToS3TaskSpecification$ContainerFormat": "

    The container format used to combine disk images with metadata (such as OVF). If absent, only the disk image is exported.

    " + } + }, + "ConversionIdStringList": { + "base": null, + "refs": { + "DescribeConversionTasksRequest$ConversionTaskIds": "

    One or more conversion task IDs.

    " + } + }, + "ConversionTask": { + "base": "

    Describes a conversion task.

    ", + "refs": { + "DescribeConversionTaskList$member": null, + "ImportInstanceResult$ConversionTask": "

    Information about the conversion task.

    ", + "ImportVolumeResult$ConversionTask": "

    Information about the conversion task.

    " + } + }, + "ConversionTaskState": { + "base": null, + "refs": { + "ConversionTask$State": "

    The state of the conversion task.

    " + } + }, + "CopyImageRequest": { + "base": null, + "refs": { + } + }, + "CopyImageResult": { + "base": null, + "refs": { + } + }, + "CopySnapshotRequest": { + "base": null, + "refs": { + } + }, + "CopySnapshotResult": { + "base": null, + "refs": { + } + }, + "CreateCustomerGatewayRequest": { + "base": null, + "refs": { + } + }, + "CreateCustomerGatewayResult": { + "base": null, + "refs": { + } + }, + "CreateDhcpOptionsRequest": { + "base": null, + "refs": { + } + }, + "CreateDhcpOptionsResult": { + "base": null, + "refs": { + } + }, + "CreateFlowLogsRequest": { + "base": null, + "refs": { + } + }, + "CreateFlowLogsResult": { + "base": null, + "refs": { + } + }, + "CreateImageRequest": { + "base": null, + "refs": { + } + }, + "CreateImageResult": { + "base": null, + "refs": { + } + }, + "CreateInstanceExportTaskRequest": { + "base": null, + "refs": { + } + }, + "CreateInstanceExportTaskResult": { + "base": null, + "refs": { + } + }, + "CreateInternetGatewayRequest": { + "base": null, + "refs": { + } + }, + "CreateInternetGatewayResult": { + "base": null, + "refs": { + } + }, + "CreateKeyPairRequest": { + "base": null, + "refs": { + } + }, + "CreateNetworkAclEntryRequest": { + "base": null, + "refs": { + } + }, + "CreateNetworkAclRequest": { + "base": null, + "refs": { + } + }, + "CreateNetworkAclResult": { + "base": null, + "refs": { + } + }, + "CreateNetworkInterfaceRequest": { + "base": null, + "refs": { + } + }, + "CreateNetworkInterfaceResult": { + "base": null, + "refs": { + } + }, + "CreatePlacementGroupRequest": { + "base": null, + "refs": { + } + }, + "CreateReservedInstancesListingRequest": { + "base": null, + "refs": { + } + }, + "CreateReservedInstancesListingResult": { + "base": null, + "refs": { + } + }, + "CreateRouteRequest": { + "base": null, + "refs": { + } + }, + "CreateRouteResult": { + "base": null, + "refs": { + } + }, + "CreateRouteTableRequest": { + "base": null, + "refs": { + } + }, + "CreateRouteTableResult": { + "base": null, + "refs": { + } + }, + "CreateSecurityGroupRequest": { + "base": null, + "refs": { + } + }, + "CreateSecurityGroupResult": { + "base": null, + "refs": { + } + }, + "CreateSnapshotRequest": { + "base": null, + "refs": { + } + }, + "CreateSpotDatafeedSubscriptionRequest": { + "base": "

    Contains the parameters for CreateSpotDatafeedSubscription.

    ", + "refs": { + } + }, + "CreateSpotDatafeedSubscriptionResult": { + "base": "

    Contains the output of CreateSpotDatafeedSubscription.

    ", + "refs": { + } + }, + "CreateSubnetRequest": { + "base": null, + "refs": { + } + }, + "CreateSubnetResult": { + "base": null, + "refs": { + } + }, + "CreateTagsRequest": { + "base": null, + "refs": { + } + }, + "CreateVolumePermission": { + "base": "

    Describes the user or group to be added or removed from the permissions for a volume.

    ", + "refs": { + "CreateVolumePermissionList$member": null + } + }, + "CreateVolumePermissionList": { + "base": null, + "refs": { + "CreateVolumePermissionModifications$Add": "

    Adds a specific AWS account ID or group to a volume's list of create volume permissions.

    ", + "CreateVolumePermissionModifications$Remove": "

    Removes a specific AWS account ID or group from a volume's list of create volume permissions.

    ", + "DescribeSnapshotAttributeResult$CreateVolumePermissions": "

    A list of permissions for creating volumes from the snapshot.

    " + } + }, + "CreateVolumePermissionModifications": { + "base": "

    Describes modifications to the permissions for a volume.

    ", + "refs": { + "ModifySnapshotAttributeRequest$CreateVolumePermission": "

    A JSON representation of the snapshot attribute modification.

    " + } + }, + "CreateVolumeRequest": { + "base": null, + "refs": { + } + }, + "CreateVpcEndpointRequest": { + "base": null, + "refs": { + } + }, + "CreateVpcEndpointResult": { + "base": null, + "refs": { + } + }, + "CreateVpcPeeringConnectionRequest": { + "base": null, + "refs": { + } + }, + "CreateVpcPeeringConnectionResult": { + "base": null, + "refs": { + } + }, + "CreateVpcRequest": { + "base": null, + "refs": { + } + }, + "CreateVpcResult": { + "base": null, + "refs": { + } + }, + "CreateVpnConnectionRequest": { + "base": null, + "refs": { + } + }, + "CreateVpnConnectionResult": { + "base": null, + "refs": { + } + }, + "CreateVpnConnectionRouteRequest": { + "base": null, + "refs": { + } + }, + "CreateVpnGatewayRequest": { + "base": null, + "refs": { + } + }, + "CreateVpnGatewayResult": { + "base": null, + "refs": { + } + }, + "CurrencyCodeValues": { + "base": null, + "refs": { + "PriceSchedule$CurrencyCode": "

    The currency for transacting the Reserved Instance resale. At this time, the only supported currency is USD.

    ", + "PriceScheduleSpecification$CurrencyCode": "

    The currency for transacting the Reserved Instance resale. At this time, the only supported currency is USD.

    ", + "ReservedInstanceLimitPrice$CurrencyCode": "

    The currency in which the limitPrice amount is specified. At this time, the only supported currency is USD.

    ", + "ReservedInstances$CurrencyCode": "

    The currency of the Reserved Instance. It's specified using ISO 4217 standard currency codes. At this time, the only supported currency is USD.

    ", + "ReservedInstancesOffering$CurrencyCode": "

    The currency of the Reserved Instance offering you are purchasing. It's specified using ISO 4217 standard currency codes. At this time, the only supported currency is USD.

    " + } + }, + "CustomerGateway": { + "base": "

    Describes a customer gateway.

    ", + "refs": { + "CreateCustomerGatewayResult$CustomerGateway": "

    Information about the customer gateway.

    ", + "CustomerGatewayList$member": null + } + }, + "CustomerGatewayIdStringList": { + "base": null, + "refs": { + "DescribeCustomerGatewaysRequest$CustomerGatewayIds": "

    One or more customer gateway IDs.

    Default: Describes all your customer gateways.

    " + } + }, + "CustomerGatewayList": { + "base": null, + "refs": { + "DescribeCustomerGatewaysResult$CustomerGateways": "

    Information about one or more customer gateways.

    " + } + }, + "DatafeedSubscriptionState": { + "base": null, + "refs": { + "SpotDatafeedSubscription$State": "

    The state of the Spot instance data feed subscription.

    " + } + }, + "DateTime": { + "base": null, + "refs": { + "BundleTask$StartTime": "

    The time this task started.

    ", + "BundleTask$UpdateTime": "

    The time of the most recent update for the task.

    ", + "ClientData$UploadStart": "

    The time that the disk upload starts.

    ", + "ClientData$UploadEnd": "

    The time that the disk upload ends.

    ", + "DescribeSpotFleetRequestHistoryRequest$StartTime": "

    The starting date and time for the events, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ).

    ", + "DescribeSpotFleetRequestHistoryResponse$StartTime": "

    The starting date and time for the events, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ).

    ", + "DescribeSpotFleetRequestHistoryResponse$LastEvaluatedTime": "

    The last date and time for the events, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ). All records up to this time were retrieved.

    If nextToken indicates that there are more results, this value is not present.

    ", + "DescribeSpotPriceHistoryRequest$StartTime": "

    The date and time, up to the past 90 days, from which to start retrieving the price history data, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ).

    ", + "DescribeSpotPriceHistoryRequest$EndTime": "

    The date and time, up to the current date, from which to stop retrieving the price history data, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ).

    ", + "EbsInstanceBlockDevice$AttachTime": "

    The time stamp when the attachment initiated.

    ", + "FlowLog$CreationTime": "

    The date and time the flow log was created.

    ", + "GetConsoleOutputResult$Timestamp": "

    The time the output was last updated.

    ", + "GetPasswordDataResult$Timestamp": "

    The time the data was last updated.

    ", + "HistoryRecord$Timestamp": "

    The date and time of the event, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ).

    ", + "Instance$LaunchTime": "

    The time the instance was launched.

    ", + "InstanceNetworkInterfaceAttachment$AttachTime": "

    The time stamp when the attachment initiated.

    ", + "InstanceStatusDetails$ImpairedSince": "

    The time when a status check failed. For an instance that was launched and impaired, this is the time when the instance was launched.

    ", + "InstanceStatusEvent$NotBefore": "

    The earliest scheduled start time for the event.

    ", + "InstanceStatusEvent$NotAfter": "

    The latest scheduled end time for the event.

    ", + "NetworkInterfaceAttachment$AttachTime": "

    The timestamp indicating when the attachment initiated.

    ", + "ReportInstanceStatusRequest$StartTime": "

    The time at which the reported instance health state began.

    ", + "ReportInstanceStatusRequest$EndTime": "

    The time at which the reported instance health state ended.

    ", + "RequestSpotInstancesRequest$ValidFrom": "

    The start date of the request. If this is a one-time request, the request becomes active at this date and time and remains active until all instances launch, the request expires, or the request is canceled. If the request is persistent, the request becomes active at this date and time and remains active until it expires or is canceled.

    Default: The request is effective indefinitely.

    ", + "RequestSpotInstancesRequest$ValidUntil": "

    The end date of the request. If this is a one-time request, the request remains active until all instances launch, the request is canceled, or this date is reached. If the request is persistent, it remains active until it is canceled or this date and time is reached.

    Default: The request is effective indefinitely.

    ", + "ReservedInstances$Start": "

    The date and time the Reserved Instance started.

    ", + "ReservedInstances$End": "

    The time when the Reserved Instance expires.

    ", + "ReservedInstancesListing$CreateDate": "

    The time the listing was created.

    ", + "ReservedInstancesListing$UpdateDate": "

    The last modified timestamp of the listing.

    ", + "ReservedInstancesModification$CreateDate": "

    The time when the modification request was created.

    ", + "ReservedInstancesModification$UpdateDate": "

    The time when the modification request was last updated.

    ", + "ReservedInstancesModification$EffectiveDate": "

    The time for the modification to become effective.

    ", + "Snapshot$StartTime": "

    The time stamp when the snapshot was initiated.

    ", + "SpotFleetRequestConfigData$ValidFrom": "

    The start date and time of the request, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ). The default is to start fulfilling the request immediately.

    ", + "SpotFleetRequestConfigData$ValidUntil": "

    The end date and time of the request, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ). At this point, no new Spot instance requests are placed or enabled to fulfill the request.

    ", + "SpotInstanceRequest$ValidFrom": "

    The start date of the request, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ). If this is a one-time request, the request becomes active at this date and time and remains active until all instances launch, the request expires, or the request is canceled. If the request is persistent, the request becomes active at this date and time and remains active until it expires or is canceled.

    ", + "SpotInstanceRequest$ValidUntil": "

    The end date of the request, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ). If this is a one-time request, the request remains active until all instances launch, the request is canceled, or this date is reached. If the request is persistent, it remains active until it is canceled or this date is reached.

    ", + "SpotInstanceRequest$CreateTime": "

    The date and time when the Spot instance request was created, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ).

    ", + "SpotInstanceStatus$UpdateTime": "

    The date and time of the most recent status update, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ).

    ", + "SpotPrice$Timestamp": "

    The date and time the request was created, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ).

    ", + "VgwTelemetry$LastStatusChange": "

    The date and time of the last change in status.

    ", + "Volume$CreateTime": "

    The time stamp when volume creation was initiated.

    ", + "VolumeAttachment$AttachTime": "

    The time stamp when the attachment initiated.

    ", + "VolumeStatusEvent$NotBefore": "

    The earliest start time of the event.

    ", + "VolumeStatusEvent$NotAfter": "

    The latest end time of the event.

    ", + "VpcEndpoint$CreationTimestamp": "

    The date and time the VPC endpoint was created.

    ", + "VpcPeeringConnection$ExpirationTime": "

    The time that an unaccepted VPC peering connection will expire.

    " + } + }, + "DeleteCustomerGatewayRequest": { + "base": null, + "refs": { + } + }, + "DeleteDhcpOptionsRequest": { + "base": null, + "refs": { + } + }, + "DeleteFlowLogsRequest": { + "base": null, + "refs": { + } + }, + "DeleteFlowLogsResult": { + "base": null, + "refs": { + } + }, + "DeleteInternetGatewayRequest": { + "base": null, + "refs": { + } + }, + "DeleteKeyPairRequest": { + "base": null, + "refs": { + } + }, + "DeleteNetworkAclEntryRequest": { + "base": null, + "refs": { + } + }, + "DeleteNetworkAclRequest": { + "base": null, + "refs": { + } + }, + "DeleteNetworkInterfaceRequest": { + "base": null, + "refs": { + } + }, + "DeletePlacementGroupRequest": { + "base": null, + "refs": { + } + }, + "DeleteRouteRequest": { + "base": null, + "refs": { + } + }, + "DeleteRouteTableRequest": { + "base": null, + "refs": { + } + }, + "DeleteSecurityGroupRequest": { + "base": null, + "refs": { + } + }, + "DeleteSnapshotRequest": { + "base": null, + "refs": { + } + }, + "DeleteSpotDatafeedSubscriptionRequest": { + "base": "

    Contains the parameters for DeleteSpotDatafeedSubscription.

    ", + "refs": { + } + }, + "DeleteSubnetRequest": { + "base": null, + "refs": { + } + }, + "DeleteTagsRequest": { + "base": null, + "refs": { + } + }, + "DeleteVolumeRequest": { + "base": null, + "refs": { + } + }, + "DeleteVpcEndpointsRequest": { + "base": null, + "refs": { + } + }, + "DeleteVpcEndpointsResult": { + "base": null, + "refs": { + } + }, + "DeleteVpcPeeringConnectionRequest": { + "base": null, + "refs": { + } + }, + "DeleteVpcPeeringConnectionResult": { + "base": null, + "refs": { + } + }, + "DeleteVpcRequest": { + "base": null, + "refs": { + } + }, + "DeleteVpnConnectionRequest": { + "base": null, + "refs": { + } + }, + "DeleteVpnConnectionRouteRequest": { + "base": null, + "refs": { + } + }, + "DeleteVpnGatewayRequest": { + "base": null, + "refs": { + } + }, + "DeregisterImageRequest": { + "base": null, + "refs": { + } + }, + "DescribeAccountAttributesRequest": { + "base": null, + "refs": { + } + }, + "DescribeAccountAttributesResult": { + "base": null, + "refs": { + } + }, + "DescribeAddressesRequest": { + "base": null, + "refs": { + } + }, + "DescribeAddressesResult": { + "base": null, + "refs": { + } + }, + "DescribeAvailabilityZonesRequest": { + "base": null, + "refs": { + } + }, + "DescribeAvailabilityZonesResult": { + "base": null, + "refs": { + } + }, + "DescribeBundleTasksRequest": { + "base": null, + "refs": { + } + }, + "DescribeBundleTasksResult": { + "base": null, + "refs": { + } + }, + "DescribeClassicLinkInstancesRequest": { + "base": null, + "refs": { + } + }, + "DescribeClassicLinkInstancesResult": { + "base": null, + "refs": { + } + }, + "DescribeConversionTaskList": { + "base": null, + "refs": { + "DescribeConversionTasksResult$ConversionTasks": "

    Information about the conversion tasks.

    " + } + }, + "DescribeConversionTasksRequest": { + "base": null, + "refs": { + } + }, + "DescribeConversionTasksResult": { + "base": null, + "refs": { + } + }, + "DescribeCustomerGatewaysRequest": { + "base": null, + "refs": { + } + }, + "DescribeCustomerGatewaysResult": { + "base": null, + "refs": { + } + }, + "DescribeDhcpOptionsRequest": { + "base": null, + "refs": { + } + }, + "DescribeDhcpOptionsResult": { + "base": null, + "refs": { + } + }, + "DescribeExportTasksRequest": { + "base": null, + "refs": { + } + }, + "DescribeExportTasksResult": { + "base": null, + "refs": { + } + }, + "DescribeFlowLogsRequest": { + "base": null, + "refs": { + } + }, + "DescribeFlowLogsResult": { + "base": null, + "refs": { + } + }, + "DescribeImageAttributeRequest": { + "base": null, + "refs": { + } + }, + "DescribeImagesRequest": { + "base": null, + "refs": { + } + }, + "DescribeImagesResult": { + "base": null, + "refs": { + } + }, + "DescribeImportImageTasksRequest": { + "base": null, + "refs": { + } + }, + "DescribeImportImageTasksResult": { + "base": null, + "refs": { + } + }, + "DescribeImportSnapshotTasksRequest": { + "base": null, + "refs": { + } + }, + "DescribeImportSnapshotTasksResult": { + "base": null, + "refs": { + } + }, + "DescribeInstanceAttributeRequest": { + "base": null, + "refs": { + } + }, + "DescribeInstanceStatusRequest": { + "base": null, + "refs": { + } + }, + "DescribeInstanceStatusResult": { + "base": null, + "refs": { + } + }, + "DescribeInstancesRequest": { + "base": null, + "refs": { + } + }, + "DescribeInstancesResult": { + "base": null, + "refs": { + } + }, + "DescribeInternetGatewaysRequest": { + "base": null, + "refs": { + } + }, + "DescribeInternetGatewaysResult": { + "base": null, + "refs": { + } + }, + "DescribeKeyPairsRequest": { + "base": null, + "refs": { + } + }, + "DescribeKeyPairsResult": { + "base": null, + "refs": { + } + }, + "DescribeMovingAddressesRequest": { + "base": null, + "refs": { + } + }, + "DescribeMovingAddressesResult": { + "base": null, + "refs": { + } + }, + "DescribeNetworkAclsRequest": { + "base": null, + "refs": { + } + }, + "DescribeNetworkAclsResult": { + "base": null, + "refs": { + } + }, + "DescribeNetworkInterfaceAttributeRequest": { + "base": null, + "refs": { + } + }, + "DescribeNetworkInterfaceAttributeResult": { + "base": null, + "refs": { + } + }, + "DescribeNetworkInterfacesRequest": { + "base": null, + "refs": { + } + }, + "DescribeNetworkInterfacesResult": { + "base": null, + "refs": { + } + }, + "DescribePlacementGroupsRequest": { + "base": null, + "refs": { + } + }, + "DescribePlacementGroupsResult": { + "base": null, + "refs": { + } + }, + "DescribePrefixListsRequest": { + "base": null, + "refs": { + } + }, + "DescribePrefixListsResult": { + "base": null, + "refs": { + } + }, + "DescribeRegionsRequest": { + "base": null, + "refs": { + } + }, + "DescribeRegionsResult": { + "base": null, + "refs": { + } + }, + "DescribeReservedInstancesListingsRequest": { + "base": null, + "refs": { + } + }, + "DescribeReservedInstancesListingsResult": { + "base": null, + "refs": { + } + }, + "DescribeReservedInstancesModificationsRequest": { + "base": null, + "refs": { + } + }, + "DescribeReservedInstancesModificationsResult": { + "base": null, + "refs": { + } + }, + "DescribeReservedInstancesOfferingsRequest": { + "base": null, + "refs": { + } + }, + "DescribeReservedInstancesOfferingsResult": { + "base": null, + "refs": { + } + }, + "DescribeReservedInstancesRequest": { + "base": null, + "refs": { + } + }, + "DescribeReservedInstancesResult": { + "base": null, + "refs": { + } + }, + "DescribeRouteTablesRequest": { + "base": null, + "refs": { + } + }, + "DescribeRouteTablesResult": { + "base": null, + "refs": { + } + }, + "DescribeSecurityGroupsRequest": { + "base": null, + "refs": { + } + }, + "DescribeSecurityGroupsResult": { + "base": null, + "refs": { + } + }, + "DescribeSnapshotAttributeRequest": { + "base": null, + "refs": { + } + }, + "DescribeSnapshotAttributeResult": { + "base": null, + "refs": { + } + }, + "DescribeSnapshotsRequest": { + "base": null, + "refs": { + } + }, + "DescribeSnapshotsResult": { + "base": null, + "refs": { + } + }, + "DescribeSpotDatafeedSubscriptionRequest": { + "base": "

    Contains the parameters for DescribeSpotDatafeedSubscription.

    ", + "refs": { + } + }, + "DescribeSpotDatafeedSubscriptionResult": { + "base": "

    Contains the output of DescribeSpotDatafeedSubscription.

    ", + "refs": { + } + }, + "DescribeSpotFleetInstancesRequest": { + "base": "

    Contains the parameters for DescribeSpotFleetInstances.

    ", + "refs": { + } + }, + "DescribeSpotFleetInstancesResponse": { + "base": "

    Contains the output of DescribeSpotFleetInstances.

    ", + "refs": { + } + }, + "DescribeSpotFleetRequestHistoryRequest": { + "base": "

    Contains the parameters for DescribeSpotFleetRequestHistory.

    ", + "refs": { + } + }, + "DescribeSpotFleetRequestHistoryResponse": { + "base": "

    Contains the output of DescribeSpotFleetRequestHistory.

    ", + "refs": { + } + }, + "DescribeSpotFleetRequestsRequest": { + "base": "

    Contains the parameters for DescribeSpotFleetRequests.

    ", + "refs": { + } + }, + "DescribeSpotFleetRequestsResponse": { + "base": "

    Contains the output of DescribeSpotFleetRequests.

    ", + "refs": { + } + }, + "DescribeSpotInstanceRequestsRequest": { + "base": "

    Contains the parameters for DescribeSpotInstanceRequests.

    ", + "refs": { + } + }, + "DescribeSpotInstanceRequestsResult": { + "base": "

    Contains the output of DescribeSpotInstanceRequests.

    ", + "refs": { + } + }, + "DescribeSpotPriceHistoryRequest": { + "base": "

    Contains the parameters for DescribeSpotPriceHistory.

    ", + "refs": { + } + }, + "DescribeSpotPriceHistoryResult": { + "base": "

    Contains the output of DescribeSpotPriceHistory.

    ", + "refs": { + } + }, + "DescribeSubnetsRequest": { + "base": null, + "refs": { + } + }, + "DescribeSubnetsResult": { + "base": null, + "refs": { + } + }, + "DescribeTagsRequest": { + "base": null, + "refs": { + } + }, + "DescribeTagsResult": { + "base": null, + "refs": { + } + }, + "DescribeVolumeAttributeRequest": { + "base": null, + "refs": { + } + }, + "DescribeVolumeAttributeResult": { + "base": null, + "refs": { + } + }, + "DescribeVolumeStatusRequest": { + "base": null, + "refs": { + } + }, + "DescribeVolumeStatusResult": { + "base": null, + "refs": { + } + }, + "DescribeVolumesRequest": { + "base": null, + "refs": { + } + }, + "DescribeVolumesResult": { + "base": null, + "refs": { + } + }, + "DescribeVpcAttributeRequest": { + "base": null, + "refs": { + } + }, + "DescribeVpcAttributeResult": { + "base": null, + "refs": { + } + }, + "DescribeVpcClassicLinkRequest": { + "base": null, + "refs": { + } + }, + "DescribeVpcClassicLinkResult": { + "base": null, + "refs": { + } + }, + "DescribeVpcEndpointServicesRequest": { + "base": null, + "refs": { + } + }, + "DescribeVpcEndpointServicesResult": { + "base": null, + "refs": { + } + }, + "DescribeVpcEndpointsRequest": { + "base": null, + "refs": { + } + }, + "DescribeVpcEndpointsResult": { + "base": null, + "refs": { + } + }, + "DescribeVpcPeeringConnectionsRequest": { + "base": null, + "refs": { + } + }, + "DescribeVpcPeeringConnectionsResult": { + "base": null, + "refs": { + } + }, + "DescribeVpcsRequest": { + "base": null, + "refs": { + } + }, + "DescribeVpcsResult": { + "base": null, + "refs": { + } + }, + "DescribeVpnConnectionsRequest": { + "base": null, + "refs": { + } + }, + "DescribeVpnConnectionsResult": { + "base": null, + "refs": { + } + }, + "DescribeVpnGatewaysRequest": { + "base": null, + "refs": { + } + }, + "DescribeVpnGatewaysResult": { + "base": null, + "refs": { + } + }, + "DetachClassicLinkVpcRequest": { + "base": null, + "refs": { + } + }, + "DetachClassicLinkVpcResult": { + "base": null, + "refs": { + } + }, + "DetachInternetGatewayRequest": { + "base": null, + "refs": { + } + }, + "DetachNetworkInterfaceRequest": { + "base": null, + "refs": { + } + }, + "DetachVolumeRequest": { + "base": null, + "refs": { + } + }, + "DetachVpnGatewayRequest": { + "base": null, + "refs": { + } + }, + "DeviceType": { + "base": null, + "refs": { + "Image$RootDeviceType": "

    The type of root device used by the AMI. The AMI can use an EBS volume or an instance store volume.

    ", + "Instance$RootDeviceType": "

    The root device type used by the AMI. The AMI can use an EBS volume or an instance store volume.

    " + } + }, + "DhcpConfiguration": { + "base": "

    Describes a DHCP configuration option.

    ", + "refs": { + "DhcpConfigurationList$member": null + } + }, + "DhcpConfigurationList": { + "base": null, + "refs": { + "DhcpOptions$DhcpConfigurations": "

    One or more DHCP options in the set.

    " + } + }, + "DhcpOptions": { + "base": "

    Describes a set of DHCP options.

    ", + "refs": { + "CreateDhcpOptionsResult$DhcpOptions": "

    A set of DHCP options.

    ", + "DhcpOptionsList$member": null + } + }, + "DhcpOptionsIdStringList": { + "base": null, + "refs": { + "DescribeDhcpOptionsRequest$DhcpOptionsIds": "

    The IDs of one or more DHCP options sets.

    Default: Describes all your DHCP options sets.

    " + } + }, + "DhcpOptionsList": { + "base": null, + "refs": { + "DescribeDhcpOptionsResult$DhcpOptions": "

    Information about one or more DHCP options sets.

    " + } + }, + "DisableVgwRoutePropagationRequest": { + "base": null, + "refs": { + } + }, + "DisableVpcClassicLinkRequest": { + "base": null, + "refs": { + } + }, + "DisableVpcClassicLinkResult": { + "base": null, + "refs": { + } + }, + "DisassociateAddressRequest": { + "base": null, + "refs": { + } + }, + "DisassociateRouteTableRequest": { + "base": null, + "refs": { + } + }, + "DiskImage": { + "base": "

    Describes a disk image.

    ", + "refs": { + "DiskImageList$member": null + } + }, + "DiskImageDescription": { + "base": "

    Describes a disk image.

    ", + "refs": { + "ImportInstanceVolumeDetailItem$Image": "

    The image.

    ", + "ImportVolumeTaskDetails$Image": "

    The image.

    " + } + }, + "DiskImageDetail": { + "base": "

    Describes a disk image.

    ", + "refs": { + "DiskImage$Image": "

    Information about the disk image.

    ", + "ImportVolumeRequest$Image": "

    The disk image.

    " + } + }, + "DiskImageFormat": { + "base": null, + "refs": { + "DiskImageDescription$Format": "

    The disk image format.

    ", + "DiskImageDetail$Format": "

    The disk image format.

    ", + "ExportToS3Task$DiskImageFormat": "

    The format for the exported image.

    ", + "ExportToS3TaskSpecification$DiskImageFormat": "

    The format for the exported image.

    " + } + }, + "DiskImageList": { + "base": null, + "refs": { + "ImportInstanceRequest$DiskImages": "

    The disk image.

    " + } + }, + "DiskImageVolumeDescription": { + "base": "

    Describes a disk image volume.

    ", + "refs": { + "ImportInstanceVolumeDetailItem$Volume": "

    The volume.

    ", + "ImportVolumeTaskDetails$Volume": "

    The volume.

    " + } + }, + "DomainType": { + "base": null, + "refs": { + "Address$Domain": "

    Indicates whether this Elastic IP address is for use with instances in EC2-Classic (standard) or instances in a VPC (vpc).

    ", + "AllocateAddressRequest$Domain": "

    Set to vpc to allocate the address for use with instances in a VPC.

    Default: The address is for use with instances in EC2-Classic.

    ", + "AllocateAddressResult$Domain": "

    Indicates whether this Elastic IP address is for use with instances in EC2-Classic (standard) or instances in a VPC (vpc).

    " + } + }, + "Double": { + "base": null, + "refs": { + "ClientData$UploadSize": "

    The size of the uploaded disk image, in GiB.

    ", + "PriceSchedule$Price": "

    The fixed price for the term.

    ", + "PriceScheduleSpecification$Price": "

    The fixed price for the term.

    ", + "PricingDetail$Price": "

    The price per instance.

    ", + "RecurringCharge$Amount": "

    The amount of the recurring charge.

    ", + "ReservedInstanceLimitPrice$Amount": "

    Used for Reserved Instance Marketplace offerings. Specifies the limit price on the total order (instanceCount * price).

    ", + "SnapshotDetail$DiskImageSize": "

    The size of the disk in the snapshot, in GiB.

    ", + "SnapshotTaskDetail$DiskImageSize": "

    The size of the disk in the snapshot, in GiB.

    ", + "SpotFleetLaunchSpecification$WeightedCapacity": "

    The number of units provided by the specified instance type. These are the same units that you chose to set the target capacity in terms (instances or a performance characteristic such as vCPUs, memory, or I/O).

    If the target capacity divided by this value is not a whole number, we round the number of instances to the next whole number. If this value is not specified, the default is 1.

    " + } + }, + "EbsBlockDevice": { + "base": "

    Describes a block device for an EBS volume.

    ", + "refs": { + "BlockDeviceMapping$Ebs": "

    Parameters used to automatically set up EBS volumes when the instance is launched.

    " + } + }, + "EbsInstanceBlockDevice": { + "base": "

    Describes a parameter used to set up an EBS volume in a block device mapping.

    ", + "refs": { + "InstanceBlockDeviceMapping$Ebs": "

    Parameters used to automatically set up EBS volumes when the instance is launched.

    " + } + }, + "EbsInstanceBlockDeviceSpecification": { + "base": null, + "refs": { + "InstanceBlockDeviceMappingSpecification$Ebs": "

    Parameters used to automatically set up EBS volumes when the instance is launched.

    " + } + }, + "EnableVgwRoutePropagationRequest": { + "base": null, + "refs": { + } + }, + "EnableVolumeIORequest": { + "base": null, + "refs": { + } + }, + "EnableVpcClassicLinkRequest": { + "base": null, + "refs": { + } + }, + "EnableVpcClassicLinkResult": { + "base": null, + "refs": { + } + }, + "EventCode": { + "base": null, + "refs": { + "InstanceStatusEvent$Code": "

    The event code.

    " + } + }, + "EventInformation": { + "base": "

    Describes a Spot fleet event.

    ", + "refs": { + "HistoryRecord$EventInformation": "

    Information about the event.

    " + } + }, + "EventType": { + "base": null, + "refs": { + "DescribeSpotFleetRequestHistoryRequest$EventType": "

    The type of events to describe. By default, all events are described.

    ", + "HistoryRecord$EventType": "

    The event type.

    • error - Indicates an error with the Spot fleet request.

    • fleetRequestChange - Indicates a change in the status or configuration of the Spot fleet request.

    • instanceChange - Indicates that an instance was launched or terminated.

    " + } + }, + "ExecutableByStringList": { + "base": null, + "refs": { + "DescribeImagesRequest$ExecutableUsers": "

    Scopes the images by users with explicit launch permissions. Specify an AWS account ID, self (the sender of the request), or all (public AMIs).

    " + } + }, + "ExportEnvironment": { + "base": null, + "refs": { + "CreateInstanceExportTaskRequest$TargetEnvironment": "

    The target virtualization environment.

    ", + "InstanceExportDetails$TargetEnvironment": "

    The target virtualization environment.

    " + } + }, + "ExportTask": { + "base": "

    Describes an instance export task.

    ", + "refs": { + "CreateInstanceExportTaskResult$ExportTask": "

    Information about the instance export task.

    ", + "ExportTaskList$member": null + } + }, + "ExportTaskIdStringList": { + "base": null, + "refs": { + "DescribeExportTasksRequest$ExportTaskIds": "

    One or more export task IDs.

    " + } + }, + "ExportTaskList": { + "base": null, + "refs": { + "DescribeExportTasksResult$ExportTasks": "

    Information about the export tasks.

    " + } + }, + "ExportTaskState": { + "base": null, + "refs": { + "ExportTask$State": "

    The state of the export task.

    " + } + }, + "ExportToS3Task": { + "base": "

    Describes the format and location for an instance export task.

    ", + "refs": { + "ExportTask$ExportToS3Task": "

    Information about the export task.

    " + } + }, + "ExportToS3TaskSpecification": { + "base": "

    Describes an instance export task.

    ", + "refs": { + "CreateInstanceExportTaskRequest$ExportToS3Task": "

    The format and location for an instance export task.

    " + } + }, + "Filter": { + "base": "

    A filter name and value pair that is used to return a more specific list of results. Filters can be used to match a set of resources by various criteria, such as tags, attributes, or IDs.

    ", + "refs": { + "FilterList$member": null + } + }, + "FilterList": { + "base": null, + "refs": { + "DescribeAddressesRequest$Filters": "

    One or more filters. Filter names and values are case-sensitive.

    • allocation-id - [EC2-VPC] The allocation ID for the address.

    • association-id - [EC2-VPC] The association ID for the address.

    • domain - Indicates whether the address is for use in EC2-Classic (standard) or in a VPC (vpc).

    • instance-id - The ID of the instance the address is associated with, if any.

    • network-interface-id - [EC2-VPC] The ID of the network interface that the address is associated with, if any.

    • network-interface-owner-id - The AWS account ID of the owner.

    • private-ip-address - [EC2-VPC] The private IP address associated with the Elastic IP address.

    • public-ip - The Elastic IP address.

    ", + "DescribeAvailabilityZonesRequest$Filters": "

    One or more filters.

    • message - Information about the Availability Zone.

    • region-name - The name of the region for the Availability Zone (for example, us-east-1).

    • state - The state of the Availability Zone (available | impaired | unavailable).

    • zone-name - The name of the Availability Zone (for example, us-east-1a).

    ", + "DescribeBundleTasksRequest$Filters": "

    One or more filters.

    • bundle-id - The ID of the bundle task.

    • error-code - If the task failed, the error code returned.

    • error-message - If the task failed, the error message returned.

    • instance-id - The ID of the instance.

    • progress - The level of task completion, as a percentage (for example, 20%).

    • s3-bucket - The Amazon S3 bucket to store the AMI.

    • s3-prefix - The beginning of the AMI name.

    • start-time - The time the task started (for example, 2013-09-15T17:15:20.000Z).

    • state - The state of the task (pending | waiting-for-shutdown | bundling | storing | cancelling | complete | failed).

    • update-time - The time of the most recent update for the task.

    ", + "DescribeClassicLinkInstancesRequest$Filters": "

    One or more filters.

    • group-id - The ID of a VPC security group that's associated with the instance.

    • instance-id - The ID of the instance.

    • tag:key=value - The key/value combination of a tag assigned to the resource.

    • tag-key - The key of a tag assigned to the resource. This filter is independent of the tag-value filter. For example, if you use both the filter \"tag-key=Purpose\" and the filter \"tag-value=X\", you get any resources assigned both the tag key Purpose (regardless of what the tag's value is), and the tag value X (regardless of what the tag's key is). If you want to list only resources where Purpose is X, see the tag:key=value filter.

    • tag-value - The value of a tag assigned to the resource. This filter is independent of the tag-key filter.

    • vpc-id - The ID of the VPC that the instance is linked to.

    ", + "DescribeConversionTasksRequest$Filters": "

    One or more filters.

    ", + "DescribeCustomerGatewaysRequest$Filters": "

    One or more filters.

    • bgp-asn - The customer gateway's Border Gateway Protocol (BGP) Autonomous System Number (ASN).

    • customer-gateway-id - The ID of the customer gateway.

    • ip-address - The IP address of the customer gateway's Internet-routable external interface.

    • state - The state of the customer gateway (pending | available | deleting | deleted).

    • type - The type of customer gateway. Currently, the only supported type is ipsec.1.

    • tag:key=value - The key/value combination of a tag assigned to the resource.

    • tag-key - The key of a tag assigned to the resource. This filter is independent of the tag-value filter. For example, if you use both the filter \"tag-key=Purpose\" and the filter \"tag-value=X\", you get any resources assigned both the tag key Purpose (regardless of what the tag's value is), and the tag value X (regardless of what the tag's key is). If you want to list only resources where Purpose is X, see the tag:key=value filter.

    • tag-value - The value of a tag assigned to the resource. This filter is independent of the tag-key filter.

    ", + "DescribeDhcpOptionsRequest$Filters": "

    One or more filters.

    • dhcp-options-id - The ID of a set of DHCP options.

    • key - The key for one of the options (for example, domain-name).

    • value - The value for one of the options.

    • tag:key=value - The key/value combination of a tag assigned to the resource.

    • tag-key - The key of a tag assigned to the resource. This filter is independent of the tag-value filter. For example, if you use both the filter \"tag-key=Purpose\" and the filter \"tag-value=X\", you get any resources assigned both the tag key Purpose (regardless of what the tag's value is), and the tag value X (regardless of what the tag's key is). If you want to list only resources where Purpose is X, see the tag:key=value filter.

    • tag-value - The value of a tag assigned to the resource. This filter is independent of the tag-key filter.

    ", + "DescribeFlowLogsRequest$Filter": "

    One or more filters.

    • deliver-log-status - The status of the logs delivery (SUCCESS | FAILED).

    • flow-log-id - The ID of the flow log.

    • log-group-name - The name of the log group.

    • resource-id - The ID of the VPC, subnet, or network interface.

    • traffic-type - The type of traffic (ACCEPT | REJECT | ALL)

    ", + "DescribeImagesRequest$Filters": "

    One or more filters.

    • architecture - The image architecture (i386 | x86_64).

    • block-device-mapping.delete-on-termination - A Boolean value that indicates whether the Amazon EBS volume is deleted on instance termination.

    • block-device-mapping.device-name - The device name for the EBS volume (for example, /dev/sdh).

    • block-device-mapping.snapshot-id - The ID of the snapshot used for the EBS volume.

    • block-device-mapping.volume-size - The volume size of the EBS volume, in GiB.

    • block-device-mapping.volume-type - The volume type of the EBS volume (gp2 | standard | io1).

    • description - The description of the image (provided during image creation).

    • hypervisor - The hypervisor type (ovm | xen).

    • image-id - The ID of the image.

    • image-type - The image type (machine | kernel | ramdisk).

    • is-public - A Boolean that indicates whether the image is public.

    • kernel-id - The kernel ID.

    • manifest-location - The location of the image manifest.

    • name - The name of the AMI (provided during image creation).

    • owner-alias - The AWS account alias (for example, amazon).

    • owner-id - The AWS account ID of the image owner.

    • platform - The platform. To only list Windows-based AMIs, use windows.

    • product-code - The product code.

    • product-code.type - The type of the product code (devpay | marketplace).

    • ramdisk-id - The RAM disk ID.

    • root-device-name - The name of the root device volume (for example, /dev/sda1).

    • root-device-type - The type of the root device volume (ebs | instance-store).

    • state - The state of the image (available | pending | failed).

    • state-reason-code - The reason code for the state change.

    • state-reason-message - The message for the state change.

    • tag:key=value - The key/value combination of a tag assigned to the resource.

    • tag-key - The key of a tag assigned to the resource. This filter is independent of the tag-value filter. For example, if you use both the filter \"tag-key=Purpose\" and the filter \"tag-value=X\", you get any resources assigned both the tag key Purpose (regardless of what the tag's value is), and the tag value X (regardless of what the tag's key is). If you want to list only resources where Purpose is X, see the tag:key=value filter.

    • tag-value - The value of a tag assigned to the resource. This filter is independent of the tag-key filter.

    • virtualization-type - The virtualization type (paravirtual | hvm).

    ", + "DescribeImportImageTasksRequest$Filters": "

    One or more filters.

    ", + "DescribeImportSnapshotTasksRequest$Filters": "

    One or more filters.

    ", + "DescribeInstanceStatusRequest$Filters": "

    One or more filters.

    • availability-zone - The Availability Zone of the instance.

    • event.code - The code for the scheduled event (instance-reboot | system-reboot | system-maintenance | instance-retirement | instance-stop).

    • event.description - A description of the event.

    • event.not-after - The latest end time for the scheduled event (for example, 2014-09-15T17:15:20.000Z).

    • event.not-before - The earliest start time for the scheduled event (for example, 2014-09-15T17:15:20.000Z).

    • instance-state-code - The code for the instance state, as a 16-bit unsigned integer. The high byte is an opaque internal value and should be ignored. The low byte is set based on the state represented. The valid values are 0 (pending), 16 (running), 32 (shutting-down), 48 (terminated), 64 (stopping), and 80 (stopped).

    • instance-state-name - The state of the instance (pending | running | shutting-down | terminated | stopping | stopped).

    • instance-status.reachability - Filters on instance status where the name is reachability (passed | failed | initializing | insufficient-data).

    • instance-status.status - The status of the instance (ok | impaired | initializing | insufficient-data | not-applicable).

    • system-status.reachability - Filters on system status where the name is reachability (passed | failed | initializing | insufficient-data).

    • system-status.status - The system status of the instance (ok | impaired | initializing | insufficient-data | not-applicable).

    ", + "DescribeInstancesRequest$Filters": "

    One or more filters.

    • architecture - The instance architecture (i386 | x86_64).

    • availability-zone - The Availability Zone of the instance.

    • block-device-mapping.attach-time - The attach time for an EBS volume mapped to the instance, for example, 2010-09-15T17:15:20.000Z.

    • block-device-mapping.delete-on-termination - A Boolean that indicates whether the EBS volume is deleted on instance termination.

    • block-device-mapping.device-name - The device name for the EBS volume (for example, /dev/sdh or xvdh).

    • block-device-mapping.status - The status for the EBS volume (attaching | attached | detaching | detached).

    • block-device-mapping.volume-id - The volume ID of the EBS volume.

    • client-token - The idempotency token you provided when you launched the instance.

    • dns-name - The public DNS name of the instance.

    • group-id - The ID of the security group for the instance. EC2-Classic only.

    • group-name - The name of the security group for the instance. EC2-Classic only.

    • hypervisor - The hypervisor type of the instance (ovm | xen).

    • iam-instance-profile.arn - The instance profile associated with the instance. Specified as an ARN.

    • image-id - The ID of the image used to launch the instance.

    • instance-id - The ID of the instance.

    • instance-lifecycle - Indicates whether this is a Spot Instance (spot).

    • instance-state-code - The state of the instance, as a 16-bit unsigned integer. The high byte is an opaque internal value and should be ignored. The low byte is set based on the state represented. The valid values are: 0 (pending), 16 (running), 32 (shutting-down), 48 (terminated), 64 (stopping), and 80 (stopped).

    • instance-state-name - The state of the instance (pending | running | shutting-down | terminated | stopping | stopped).

    • instance-type - The type of instance (for example, t2.micro).

    • instance.group-id - The ID of the security group for the instance.

    • instance.group-name - The name of the security group for the instance.

    • ip-address - The public IP address of the instance.

    • kernel-id - The kernel ID.

    • key-name - The name of the key pair used when the instance was launched.

    • launch-index - When launching multiple instances, this is the index for the instance in the launch group (for example, 0, 1, 2, and so on).

    • launch-time - The time when the instance was launched.

    • monitoring-state - Indicates whether monitoring is enabled for the instance (disabled | enabled).

    • owner-id - The AWS account ID of the instance owner.

    • placement-group-name - The name of the placement group for the instance.

    • platform - The platform. Use windows if you have Windows instances; otherwise, leave blank.

    • private-dns-name - The private DNS name of the instance.

    • private-ip-address - The private IP address of the instance.

    • product-code - The product code associated with the AMI used to launch the instance.

    • product-code.type - The type of product code (devpay | marketplace).

    • ramdisk-id - The RAM disk ID.

    • reason - The reason for the current state of the instance (for example, shows \"User Initiated [date]\" when you stop or terminate the instance). Similar to the state-reason-code filter.

    • requester-id - The ID of the entity that launched the instance on your behalf (for example, AWS Management Console, Auto Scaling, and so on).

    • reservation-id - The ID of the instance's reservation. A reservation ID is created any time you launch an instance. A reservation ID has a one-to-one relationship with an instance launch request, but can be associated with more than one instance if you launch multiple instances using the same launch request. For example, if you launch one instance, you'll get one reservation ID. If you launch ten instances using the same launch request, you'll also get one reservation ID.

    • root-device-name - The name of the root device for the instance (for example, /dev/sda1 or /dev/xvda).

    • root-device-type - The type of root device that the instance uses (ebs | instance-store).

    • source-dest-check - Indicates whether the instance performs source/destination checking. A value of true means that checking is enabled, and false means checking is disabled. The value must be false for the instance to perform network address translation (NAT) in your VPC.

    • spot-instance-request-id - The ID of the Spot Instance request.

    • state-reason-code - The reason code for the state change.

    • state-reason-message - A message that describes the state change.

    • subnet-id - The ID of the subnet for the instance.

    • tag:key=value - The key/value combination of a tag assigned to the resource, where tag:key is the tag's key.

    • tag-key - The key of a tag assigned to the resource. This filter is independent of the tag-value filter. For example, if you use both the filter \"tag-key=Purpose\" and the filter \"tag-value=X\", you get any resources assigned both the tag key Purpose (regardless of what the tag's value is), and the tag value X (regardless of what the tag's key is). If you want to list only resources where Purpose is X, see the tag:key=value filter.

    • tag-value - The value of a tag assigned to the resource. This filter is independent of the tag-key filter.

    • tenancy - The tenancy of an instance (dedicated | default).

    • virtualization-type - The virtualization type of the instance (paravirtual | hvm).

    • vpc-id - The ID of the VPC that the instance is running in.

    • network-interface.description - The description of the network interface.

    • network-interface.subnet-id - The ID of the subnet for the network interface.

    • network-interface.vpc-id - The ID of the VPC for the network interface.

    • network-interface.network-interface.id - The ID of the network interface.

    • network-interface.owner-id - The ID of the owner of the network interface.

    • network-interface.availability-zone - The Availability Zone for the network interface.

    • network-interface.requester-id - The requester ID for the network interface.

    • network-interface.requester-managed - Indicates whether the network interface is being managed by AWS.

    • network-interface.status - The status of the network interface (available) | in-use).

    • network-interface.mac-address - The MAC address of the network interface.

    • network-interface-private-dns-name - The private DNS name of the network interface.

    • network-interface.source-dest-check - Whether the network interface performs source/destination checking. A value of true means checking is enabled, and false means checking is disabled. The value must be false for the network interface to perform network address translation (NAT) in your VPC.

    • network-interface.group-id - The ID of a security group associated with the network interface.

    • network-interface.group-name - The name of a security group associated with the network interface.

    • network-interface.attachment.attachment-id - The ID of the interface attachment.

    • network-interface.attachment.instance-id - The ID of the instance to which the network interface is attached.

    • network-interface.attachment.instance-owner-id - The owner ID of the instance to which the network interface is attached.

    • network-interface.addresses.private-ip-address - The private IP address associated with the network interface.

    • network-interface.attachment.device-index - The device index to which the network interface is attached.

    • network-interface.attachment.status - The status of the attachment (attaching | attached | detaching | detached).

    • network-interface.attachment.attach-time - The time that the network interface was attached to an instance.

    • network-interface.attachment.delete-on-termination - Specifies whether the attachment is deleted when an instance is terminated.

    • network-interface.addresses.primary - Specifies whether the IP address of the network interface is the primary private IP address.

    • network-interface.addresses.association.public-ip - The ID of the association of an Elastic IP address with a network interface.

    • network-interface.addresses.association.ip-owner-id - The owner ID of the private IP address associated with the network interface.

    • association.public-ip - The address of the Elastic IP address bound to the network interface.

    • association.ip-owner-id - The owner of the Elastic IP address associated with the network interface.

    • association.allocation-id - The allocation ID returned when you allocated the Elastic IP address for your network interface.

    • association.association-id - The association ID returned when the network interface was associated with an IP address.

    ", + "DescribeInternetGatewaysRequest$Filters": "

    One or more filters.

    • attachment.state - The current state of the attachment between the gateway and the VPC (available). Present only if a VPC is attached.

    • attachment.vpc-id - The ID of an attached VPC.

    • internet-gateway-id - The ID of the Internet gateway.

    • tag:key=value - The key/value combination of a tag assigned to the resource.

    • tag-key - The key of a tag assigned to the resource. This filter is independent of the tag-value filter. For example, if you use both the filter \"tag-key=Purpose\" and the filter \"tag-value=X\", you get any resources assigned both the tag key Purpose (regardless of what the tag's value is), and the tag value X (regardless of what the tag's key is). If you want to list only resources where Purpose is X, see the tag:key=value filter.

    • tag-value - The value of a tag assigned to the resource. This filter is independent of the tag-key filter.

    ", + "DescribeKeyPairsRequest$Filters": "

    One or more filters.

    • fingerprint - The fingerprint of the key pair.

    • key-name - The name of the key pair.

    ", + "DescribeMovingAddressesRequest$Filters": "

    One or more filters.

    • moving-status - The status of the Elastic IP address (MovingToVpc | RestoringToClassic).

    ", + "DescribeNetworkAclsRequest$Filters": "

    One or more filters.

    • association.association-id - The ID of an association ID for the ACL.

    • association.network-acl-id - The ID of the network ACL involved in the association.

    • association.subnet-id - The ID of the subnet involved in the association.

    • default - Indicates whether the ACL is the default network ACL for the VPC.

    • entry.cidr - The CIDR range specified in the entry.

    • entry.egress - Indicates whether the entry applies to egress traffic.

    • entry.icmp.code - The ICMP code specified in the entry, if any.

    • entry.icmp.type - The ICMP type specified in the entry, if any.

    • entry.port-range.from - The start of the port range specified in the entry.

    • entry.port-range.to - The end of the port range specified in the entry.

    • entry.protocol - The protocol specified in the entry (tcp | udp | icmp or a protocol number).

    • entry.rule-action - Allows or denies the matching traffic (allow | deny).

    • entry.rule-number - The number of an entry (in other words, rule) in the ACL's set of entries.

    • network-acl-id - The ID of the network ACL.

    • tag:key=value - The key/value combination of a tag assigned to the resource.

    • tag-key - The key of a tag assigned to the resource. This filter is independent of the tag-value filter. For example, if you use both the filter \"tag-key=Purpose\" and the filter \"tag-value=X\", you get any resources assigned both the tag key Purpose (regardless of what the tag's value is), and the tag value X (regardless of what the tag's key is). If you want to list only resources where Purpose is X, see the tag:key=value filter.

    • tag-value - The value of a tag assigned to the resource. This filter is independent of the tag-key filter.

    • vpc-id - The ID of the VPC for the network ACL.

    ", + "DescribeNetworkInterfacesRequest$Filters": "

    One or more filters.

    • addresses.private-ip-address - The private IP addresses associated with the network interface.

    • addresses.primary - Whether the private IP address is the primary IP address associated with the network interface.

    • addresses.association.public-ip - The association ID returned when the network interface was associated with the Elastic IP address.

    • addresses.association.owner-id - The owner ID of the addresses associated with the network interface.

    • association.association-id - The association ID returned when the network interface was associated with an IP address.

    • association.allocation-id - The allocation ID returned when you allocated the Elastic IP address for your network interface.

    • association.ip-owner-id - The owner of the Elastic IP address associated with the network interface.

    • association.public-ip - The address of the Elastic IP address bound to the network interface.

    • association.public-dns-name - The public DNS name for the network interface.

    • attachment.attachment-id - The ID of the interface attachment.

    • attachment.instance-id - The ID of the instance to which the network interface is attached.

    • attachment.instance-owner-id - The owner ID of the instance to which the network interface is attached.

    • attachment.device-index - The device index to which the network interface is attached.

    • attachment.status - The status of the attachment (attaching | attached | detaching | detached).

    • attachment.attach.time - The time that the network interface was attached to an instance.

    • attachment.delete-on-termination - Indicates whether the attachment is deleted when an instance is terminated.

    • availability-zone - The Availability Zone of the network interface.

    • description - The description of the network interface.

    • group-id - The ID of a security group associated with the network interface.

    • group-name - The name of a security group associated with the network interface.

    • mac-address - The MAC address of the network interface.

    • network-interface-id - The ID of the network interface.

    • owner-id - The AWS account ID of the network interface owner.

    • private-ip-address - The private IP address or addresses of the network interface.

    • private-dns-name - The private DNS name of the network interface.

    • requester-id - The ID of the entity that launched the instance on your behalf (for example, AWS Management Console, Auto Scaling, and so on).

    • requester-managed - Indicates whether the network interface is being managed by an AWS service (for example, AWS Management Console, Auto Scaling, and so on).

    • source-desk-check - Indicates whether the network interface performs source/destination checking. A value of true means checking is enabled, and false means checking is disabled. The value must be false for the network interface to perform Network Address Translation (NAT) in your VPC.

    • status - The status of the network interface. If the network interface is not attached to an instance, the status is available; if a network interface is attached to an instance the status is in-use.

    • subnet-id - The ID of the subnet for the network interface.

    • tag:key=value - The key/value combination of a tag assigned to the resource.

    • tag-key - The key of a tag assigned to the resource. This filter is independent of the tag-value filter. For example, if you use both the filter \"tag-key=Purpose\" and the filter \"tag-value=X\", you get any resources assigned both the tag key Purpose (regardless of what the tag's value is), and the tag value X (regardless of what the tag's key is). If you want to list only resources where Purpose is X, see the tag:key=value filter.

    • tag-value - The value of a tag assigned to the resource. This filter is independent of the tag-key filter.

    • vpc-id - The ID of the VPC for the network interface.

    ", + "DescribePlacementGroupsRequest$Filters": "

    One or more filters.

    • group-name - The name of the placement group.

    • state - The state of the placement group (pending | available | deleting | deleted).

    • strategy - The strategy of the placement group (cluster).

    ", + "DescribePrefixListsRequest$Filters": "

    One or more filters.

    • prefix-list-id: The ID of a prefix list.

    • prefix-list-name: The name of a prefix list.

    ", + "DescribeRegionsRequest$Filters": "

    One or more filters.

    • endpoint - The endpoint of the region (for example, ec2.us-east-1.amazonaws.com).

    • region-name - The name of the region (for example, us-east-1).

    ", + "DescribeReservedInstancesListingsRequest$Filters": "

    One or more filters.

    • reserved-instances-id - The ID of the Reserved Instances.

    • reserved-instances-listing-id - The ID of the Reserved Instances listing.

    • status - The status of the Reserved Instance listing (pending | active | cancelled | closed).

    • status-message - The reason for the status.

    ", + "DescribeReservedInstancesModificationsRequest$Filters": "

    One or more filters.

    • client-token - The idempotency token for the modification request.

    • create-date - The time when the modification request was created.

    • effective-date - The time when the modification becomes effective.

    • modification-result.reserved-instances-id - The ID for the Reserved Instances created as part of the modification request. This ID is only available when the status of the modification is fulfilled.

    • modification-result.target-configuration.availability-zone - The Availability Zone for the new Reserved Instances.

    • modification-result.target-configuration.instance-count - The number of new Reserved Instances.

    • modification-result.target-configuration.instance-type - The instance type of the new Reserved Instances.

    • modification-result.target-configuration.platform - The network platform of the new Reserved Instances (EC2-Classic | EC2-VPC).

    • reserved-instances-id - The ID of the Reserved Instances modified.

    • reserved-instances-modification-id - The ID of the modification request.

    • status - The status of the Reserved Instances modification request (processing | fulfilled | failed).

    • status-message - The reason for the status.

    • update-date - The time when the modification request was last updated.

    ", + "DescribeReservedInstancesOfferingsRequest$Filters": "

    One or more filters.

    • availability-zone - The Availability Zone where the Reserved Instance can be used.

    • duration - The duration of the Reserved Instance (for example, one year or three years), in seconds (31536000 | 94608000).

    • fixed-price - The purchase price of the Reserved Instance (for example, 9800.0).

    • instance-type - The instance type on which the Reserved Instance can be used.

    • marketplace - Set to true to show only Reserved Instance Marketplace offerings. When this filter is not used, which is the default behavior, all offerings from AWS and Reserved Instance Marketplace are listed.

    • product-description - The Reserved Instance product platform description. Instances that include (Amazon VPC) in the product platform description will only be displayed to EC2-Classic account holders and are for use with Amazon VPC. (Linux/UNIX | Linux/UNIX (Amazon VPC) | SUSE Linux | SUSE Linux (Amazon VPC) | Red Hat Enterprise Linux | Red Hat Enterprise Linux (Amazon VPC) | Windows | Windows (Amazon VPC) | Windows with SQL Server Standard | Windows with SQL Server Standard (Amazon VPC) | Windows with SQL Server Web | Windows with SQL Server Web (Amazon VPC) | Windows with SQL Server Enterprise | Windows with SQL Server Enterprise (Amazon VPC))

    • reserved-instances-offering-id - The Reserved Instances offering ID.

    • usage-price - The usage price of the Reserved Instance, per hour (for example, 0.84).

    ", + "DescribeReservedInstancesRequest$Filters": "

    One or more filters.

    • availability-zone - The Availability Zone where the Reserved Instance can be used.

    • duration - The duration of the Reserved Instance (one year or three years), in seconds (31536000 | 94608000).

    • end - The time when the Reserved Instance expires (for example, 2015-08-07T11:54:42.000Z).

    • fixed-price - The purchase price of the Reserved Instance (for example, 9800.0).

    • instance-type - The instance type on which the Reserved Instance can be used.

    • product-description - The Reserved Instance product platform description. Instances that include (Amazon VPC) in the product platform description will only be displayed to EC2-Classic account holders and are for use with Amazon VPC. (Linux/UNIX | Linux/UNIX (Amazon VPC) | SUSE Linux | SUSE Linux (Amazon VPC) | Red Hat Enterprise Linux | Red Hat Enterprise Linux (Amazon VPC) | Windows | Windows (Amazon VPC) | Windows with SQL Server Standard | Windows with SQL Server Standard (Amazon VPC) | Windows with SQL Server Web | Windows with SQL Server Web (Amazon VPC) | Windows with SQL Server Enterprise | Windows with SQL Server Enterprise (Amazon VPC)).

    • reserved-instances-id - The ID of the Reserved Instance.

    • start - The time at which the Reserved Instance purchase request was placed (for example, 2014-08-07T11:54:42.000Z).

    • state - The state of the Reserved Instance (payment-pending | active | payment-failed | retired).

    • tag:key=value - The key/value combination of a tag assigned to the resource.

    • tag-key - The key of a tag assigned to the resource. This filter is independent of the tag-value filter. For example, if you use both the filter \"tag-key=Purpose\" and the filter \"tag-value=X\", you get any resources assigned both the tag key Purpose (regardless of what the tag's value is), and the tag value X (regardless of what the tag's key is). If you want to list only resources where Purpose is X, see the tag:key=value filter.

    • tag-value - The value of a tag assigned to the resource. This filter is independent of the tag-key filter.

    • usage-price - The usage price of the Reserved Instance, per hour (for example, 0.84).

    ", + "DescribeRouteTablesRequest$Filters": "

    One or more filters.

    • association.route-table-association-id - The ID of an association ID for the route table.

    • association.route-table-id - The ID of the route table involved in the association.

    • association.subnet-id - The ID of the subnet involved in the association.

    • association.main - Indicates whether the route table is the main route table for the VPC.

    • route-table-id - The ID of the route table.

    • route.destination-cidr-block - The CIDR range specified in a route in the table.

    • route.destination-prefix-list-id - The ID (prefix) of the AWS service specified in a route in the table.

    • route.gateway-id - The ID of a gateway specified in a route in the table.

    • route.instance-id - The ID of an instance specified in a route in the table.

    • route.origin - Describes how the route was created. CreateRouteTable indicates that the route was automatically created when the route table was created; CreateRoute indicates that the route was manually added to the route table; EnableVgwRoutePropagation indicates that the route was propagated by route propagation.

    • route.state - The state of a route in the route table (active | blackhole). The blackhole state indicates that the route's target isn't available (for example, the specified gateway isn't attached to the VPC, the specified NAT instance has been terminated, and so on).

    • route.vpc-peering-connection-id - The ID of a VPC peering connection specified in a route in the table.

    • tag:key=value - The key/value combination of a tag assigned to the resource.

    • tag-key - The key of a tag assigned to the resource. This filter is independent of the tag-value filter. For example, if you use both the filter \"tag-key=Purpose\" and the filter \"tag-value=X\", you get any resources assigned both the tag key Purpose (regardless of what the tag's value is), and the tag value X (regardless of what the tag's key is). If you want to list only resources where Purpose is X, see the tag:key=value filter.

    • tag-value - The value of a tag assigned to the resource. This filter is independent of the tag-key filter.

    • vpc-id - The ID of the VPC for the route table.

    ", + "DescribeSecurityGroupsRequest$Filters": "

    One or more filters. If using multiple filters for rules, the results include security groups for which any combination of rules - not necessarily a single rule - match all filters.

    • description - The description of the security group.

    • egress.ip-permission.prefix-list-id - The ID (prefix) of the AWS service to which the security group allows access.

    • group-id - The ID of the security group.

    • group-name - The name of the security group.

    • ip-permission.cidr - A CIDR range that has been granted permission.

    • ip-permission.from-port - The start of port range for the TCP and UDP protocols, or an ICMP type number.

    • ip-permission.group-id - The ID of a security group that has been granted permission.

    • ip-permission.group-name - The name of a security group that has been granted permission.

    • ip-permission.protocol - The IP protocol for the permission (tcp | udp | icmp or a protocol number).

    • ip-permission.to-port - The end of port range for the TCP and UDP protocols, or an ICMP code.

    • ip-permission.user-id - The ID of an AWS account that has been granted permission.

    • owner-id - The AWS account ID of the owner of the security group.

    • tag-key - The key of a tag assigned to the security group.

    • tag-value - The value of a tag assigned to the security group.

    • vpc-id - The ID of the VPC specified when the security group was created.

    ", + "DescribeSnapshotsRequest$Filters": "

    One or more filters.

    • description - A description of the snapshot.

    • owner-alias - The AWS account alias (for example, amazon) that owns the snapshot.

    • owner-id - The ID of the AWS account that owns the snapshot.

    • progress - The progress of the snapshot, as a percentage (for example, 80%).

    • snapshot-id - The snapshot ID.

    • start-time - The time stamp when the snapshot was initiated.

    • status - The status of the snapshot (pending | completed | error).

    • tag:key=value - The key/value combination of a tag assigned to the resource.

    • tag-key - The key of a tag assigned to the resource. This filter is independent of the tag-value filter. For example, if you use both the filter \"tag-key=Purpose\" and the filter \"tag-value=X\", you get any resources assigned both the tag key Purpose (regardless of what the tag's value is), and the tag value X (regardless of what the tag's key is). If you want to list only resources where Purpose is X, see the tag:key=value filter.

    • tag-value - The value of a tag assigned to the resource. This filter is independent of the tag-key filter.

    • volume-id - The ID of the volume the snapshot is for.

    • volume-size - The size of the volume, in GiB.

    ", + "DescribeSpotInstanceRequestsRequest$Filters": "

    One or more filters.

    • availability-zone-group - The Availability Zone group.

    • create-time - The time stamp when the Spot instance request was created.

    • fault-code - The fault code related to the request.

    • fault-message - The fault message related to the request.

    • instance-id - The ID of the instance that fulfilled the request.

    • launch-group - The Spot instance launch group.

    • launch.block-device-mapping.delete-on-termination - Indicates whether the Amazon EBS volume is deleted on instance termination.

    • launch.block-device-mapping.device-name - The device name for the Amazon EBS volume (for example, /dev/sdh).

    • launch.block-device-mapping.snapshot-id - The ID of the snapshot used for the Amazon EBS volume.

    • launch.block-device-mapping.volume-size - The size of the Amazon EBS volume, in GiB.

    • launch.block-device-mapping.volume-type - The type of the Amazon EBS volume (gp2 | standard | io1).

    • launch.group-id - The security group for the instance.

    • launch.image-id - The ID of the AMI.

    • launch.instance-type - The type of instance (for example, m1.small).

    • launch.kernel-id - The kernel ID.

    • launch.key-name - The name of the key pair the instance launched with.

    • launch.monitoring-enabled - Whether monitoring is enabled for the Spot instance.

    • launch.ramdisk-id - The RAM disk ID.

    • network-interface.network-interface-id - The ID of the network interface.

    • network-interface.device-index - The index of the device for the network interface attachment on the instance.

    • network-interface.subnet-id - The ID of the subnet for the instance.

    • network-interface.description - A description of the network interface.

    • network-interface.private-ip-address - The primary private IP address of the network interface.

    • network-interface.delete-on-termination - Indicates whether the network interface is deleted when the instance is terminated.

    • network-interface.group-id - The ID of the security group associated with the network interface.

    • network-interface.group-name - The name of the security group associated with the network interface.

    • network-interface.addresses.primary - Indicates whether the IP address is the primary private IP address.

    • product-description - The product description associated with the instance (Linux/UNIX | Windows).

    • spot-instance-request-id - The Spot instance request ID.

    • spot-price - The maximum hourly price for any Spot instance launched to fulfill the request.

    • state - The state of the Spot instance request (open | active | closed | cancelled | failed). Spot bid status information can help you track your Amazon EC2 Spot instance requests. For more information, see Spot Bid Status in the Amazon Elastic Compute Cloud User Guide.

    • status-code - The short code describing the most recent evaluation of your Spot instance request.

    • status-message - The message explaining the status of the Spot instance request.

    • tag:key=value - The key/value combination of a tag assigned to the resource.

    • tag-key - The key of a tag assigned to the resource. This filter is independent of the tag-value filter. For example, if you use both the filter \"tag-key=Purpose\" and the filter \"tag-value=X\", you get any resources assigned both the tag key Purpose (regardless of what the tag's value is), and the tag value X (regardless of what the tag's key is). If you want to list only resources where Purpose is X, see the tag:key=value filter.

    • tag-value - The value of a tag assigned to the resource. This filter is independent of the tag-key filter.

    • type - The type of Spot instance request (one-time | persistent).

    • launched-availability-zone - The Availability Zone in which the bid is launched.

    • valid-from - The start date of the request.

    • valid-until - The end date of the request.

    ", + "DescribeSpotPriceHistoryRequest$Filters": "

    One or more filters.

    • availability-zone - The Availability Zone for which prices should be returned.

    • instance-type - The type of instance (for example, m1.small).

    • product-description - The product description for the Spot price (Linux/UNIX | SUSE Linux | Windows | Linux/UNIX (Amazon VPC) | SUSE Linux (Amazon VPC) | Windows (Amazon VPC)).

    • spot-price - The Spot price. The value must match exactly (or use wildcards; greater than or less than comparison is not supported).

    • timestamp - The timestamp of the Spot price history, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ). You can use wildcards (* and ?). Greater than or less than comparison is not supported.

    ", + "DescribeSubnetsRequest$Filters": "

    One or more filters.

    • availabilityZone - The Availability Zone for the subnet. You can also use availability-zone as the filter name.

    • available-ip-address-count - The number of IP addresses in the subnet that are available.

    • cidrBlock - The CIDR block of the subnet. The CIDR block you specify must exactly match the subnet's CIDR block for information to be returned for the subnet. You can also use cidr or cidr-block as the filter names.

    • defaultForAz - Indicates whether this is the default subnet for the Availability Zone. You can also use default-for-az as the filter name.

    • state - The state of the subnet (pending | available).

    • subnet-id - The ID of the subnet.

    • tag:key=value - The key/value combination of a tag assigned to the resource.

    • tag-key - The key of a tag assigned to the resource. This filter is independent of the tag-value filter. For example, if you use both the filter \"tag-key=Purpose\" and the filter \"tag-value=X\", you get any resources assigned both the tag key Purpose (regardless of what the tag's value is), and the tag value X (regardless of what the tag's key is). If you want to list only resources where Purpose is X, see the tag:key=value filter.

    • tag-value - The value of a tag assigned to the resource. This filter is independent of the tag-key filter.

    • vpc-id - The ID of the VPC for the subnet.

    ", + "DescribeTagsRequest$Filters": "

    One or more filters.

    • key - The tag key.

    • resource-id - The resource ID.

    • resource-type - The resource type (customer-gateway | dhcp-options | image | instance | internet-gateway | network-acl | network-interface | reserved-instances | route-table | security-group | snapshot | spot-instances-request | subnet | volume | vpc | vpn-connection | vpn-gateway).

    • value - The tag value.

    ", + "DescribeVolumeStatusRequest$Filters": "

    One or more filters.

    • action.code - The action code for the event (for example, enable-volume-io).

    • action.description - A description of the action.

    • action.event-id - The event ID associated with the action.

    • availability-zone - The Availability Zone of the instance.

    • event.description - A description of the event.

    • event.event-id - The event ID.

    • event.event-type - The event type (for io-enabled: passed | failed; for io-performance: io-performance:degraded | io-performance:severely-degraded | io-performance:stalled).

    • event.not-after - The latest end time for the event.

    • event.not-before - The earliest start time for the event.

    • volume-status.details-name - The cause for volume-status.status (io-enabled | io-performance).

    • volume-status.details-status - The status of volume-status.details-name (for io-enabled: passed | failed; for io-performance: normal | degraded | severely-degraded | stalled).

    • volume-status.status - The status of the volume (ok | impaired | warning | insufficient-data).

    ", + "DescribeVolumesRequest$Filters": "

    One or more filters.

    • attachment.attach-time - The time stamp when the attachment initiated.

    • attachment.delete-on-termination - Whether the volume is deleted on instance termination.

    • attachment.device - The device name that is exposed to the instance (for example, /dev/sda1).

    • attachment.instance-id - The ID of the instance the volume is attached to.

    • attachment.status - The attachment state (attaching | attached | detaching | detached).

    • availability-zone - The Availability Zone in which the volume was created.

    • create-time - The time stamp when the volume was created.

    • encrypted - The encryption status of the volume.

    • size - The size of the volume, in GiB.

    • snapshot-id - The snapshot from which the volume was created.

    • status - The status of the volume (creating | available | in-use | deleting | deleted | error).

    • tag:key=value - The key/value combination of a tag assigned to the resource.

    • tag-key - The key of a tag assigned to the resource. This filter is independent of the tag-value filter. For example, if you use both the filter \"tag-key=Purpose\" and the filter \"tag-value=X\", you get any resources assigned both the tag key Purpose (regardless of what the tag's value is), and the tag value X (regardless of what the tag's key is). If you want to list only resources where Purpose is X, see the tag:key=value filter.

    • tag-value - The value of a tag assigned to the resource. This filter is independent of the tag-key filter.

    • volume-id - The volume ID.

    • volume-type - The Amazon EBS volume type. This can be gp2 for General Purpose (SSD) volumes, io1 for Provisioned IOPS (SSD) volumes, or standard for Magnetic volumes.

    ", + "DescribeVpcClassicLinkRequest$Filters": "

    One or more filters.

    • is-classic-link-enabled - Whether the VPC is enabled for ClassicLink (true | false).

    • tag:key=value - The key/value combination of a tag assigned to the resource.

    • tag-key - The key of a tag assigned to the resource. This filter is independent of the tag-value filter. For example, if you use both the filter \"tag-key=Purpose\" and the filter \"tag-value=X\", you get any resources assigned both the tag key Purpose (regardless of what the tag's value is), and the tag value X (regardless of what the tag's key is). If you want to list only resources where Purpose is X, see the tag:key=value filter.

    • tag-value - The value of a tag assigned to the resource. This filter is independent of the tag-key filter.

    ", + "DescribeVpcEndpointsRequest$Filters": "

    One or more filters.

    • service-name: The name of the AWS service.

    • vpc-id: The ID of the VPC in which the endpoint resides.

    • vpc-endpoint-id: The ID of the endpoint.

    • vpc-endpoint-state: The state of the endpoint. (pending | available | deleting | deleted)

    ", + "DescribeVpcPeeringConnectionsRequest$Filters": "

    One or more filters.

    • accepter-vpc-info.cidr-block - The CIDR block of the peer VPC.

    • accepter-vpc-info.owner-id - The AWS account ID of the owner of the peer VPC.

    • accepter-vpc-info.vpc-id - The ID of the peer VPC.

    • expiration-time - The expiration date and time for the VPC peering connection.

    • requester-vpc-info.cidr-block - The CIDR block of the requester's VPC.

    • requester-vpc-info.owner-id - The AWS account ID of the owner of the requester VPC.

    • requester-vpc-info.vpc-id - The ID of the requester VPC.

    • status-code - The status of the VPC peering connection (pending-acceptance | failed | expired | provisioning | active | deleted | rejected).

    • status-message - A message that provides more information about the status of the VPC peering connection, if applicable.

    • tag:key=value - The key/value combination of a tag assigned to the resource.

    • tag-key - The key of a tag assigned to the resource. This filter is independent of the tag-value filter. For example, if you use both the filter \"tag-key=Purpose\" and the filter \"tag-value=X\", you get any resources assigned both the tag key Purpose (regardless of what the tag's value is), and the tag value X (regardless of what the tag's key is). If you want to list only resources where Purpose is X, see the tag:key=value filter.

    • tag-value - The value of a tag assigned to the resource. This filter is independent of the tag-key filter.

    • vpc-peering-connection-id - The ID of the VPC peering connection.

    ", + "DescribeVpcsRequest$Filters": "

    One or more filters.

    • cidr - The CIDR block of the VPC. The CIDR block you specify must exactly match the VPC's CIDR block for information to be returned for the VPC. Must contain the slash followed by one or two digits (for example, /28).

    • dhcp-options-id - The ID of a set of DHCP options.

    • isDefault - Indicates whether the VPC is the default VPC.

    • state - The state of the VPC (pending | available).

    • tag:key=value - The key/value combination of a tag assigned to the resource.

    • tag-key - The key of a tag assigned to the resource. This filter is independent of the tag-value filter. For example, if you use both the filter \"tag-key=Purpose\" and the filter \"tag-value=X\", you get any resources assigned both the tag key Purpose (regardless of what the tag's value is), and the tag value X (regardless of what the tag's key is). If you want to list only resources where Purpose is X, see the tag:key=value filter.

    • tag-value - The value of a tag assigned to the resource. This filter is independent of the tag-key filter.

    • vpc-id - The ID of the VPC.

    ", + "DescribeVpnConnectionsRequest$Filters": "

    One or more filters.

    • customer-gateway-configuration - The configuration information for the customer gateway.

    • customer-gateway-id - The ID of a customer gateway associated with the VPN connection.

    • state - The state of the VPN connection (pending | available | deleting | deleted).

    • option.static-routes-only - Indicates whether the connection has static routes only. Used for devices that do not support Border Gateway Protocol (BGP).

    • route.destination-cidr-block - The destination CIDR block. This corresponds to the subnet used in a customer data center.

    • bgp-asn - The BGP Autonomous System Number (ASN) associated with a BGP device.

    • tag:key=value - The key/value combination of a tag assigned to the resource.

    • tag-key - The key of a tag assigned to the resource. This filter is independent of the tag-value filter. For example, if you use both the filter \"tag-key=Purpose\" and the filter \"tag-value=X\", you get any resources assigned both the tag key Purpose (regardless of what the tag's value is), and the tag value X (regardless of what the tag's key is). If you want to list only resources where Purpose is X, see the tag:key=value filter.

    • tag-value - The value of a tag assigned to the resource. This filter is independent of the tag-key filter.

    • type - The type of VPN connection. Currently the only supported type is ipsec.1.

    • vpn-connection-id - The ID of the VPN connection.

    • vpn-gateway-id - The ID of a virtual private gateway associated with the VPN connection.

    ", + "DescribeVpnGatewaysRequest$Filters": "

    One or more filters.

    • attachment.state - The current state of the attachment between the gateway and the VPC (attaching | attached | detaching | detached).

    • attachment.vpc-id - The ID of an attached VPC.

    • availability-zone - The Availability Zone for the virtual private gateway.

    • state - The state of the virtual private gateway (pending | available | deleting | deleted).

    • tag:key=value - The key/value combination of a tag assigned to the resource.

    • tag-key - The key of a tag assigned to the resource. This filter is independent of the tag-value filter. For example, if you use both the filter \"tag-key=Purpose\" and the filter \"tag-value=X\", you get any resources assigned both the tag key Purpose (regardless of what the tag's value is), and the tag value X (regardless of what the tag's key is). If you want to list only resources where Purpose is X, see the tag:key=value filter.

    • tag-value - The value of a tag assigned to the resource. This filter is independent of the tag-key filter.

    • type - The type of virtual private gateway. Currently the only supported type is ipsec.1.

    • vpn-gateway-id - The ID of the virtual private gateway.

    " + } + }, + "Float": { + "base": null, + "refs": { + "ReservedInstances$UsagePrice": "

    The usage price of the Reserved Instance, per hour.

    ", + "ReservedInstances$FixedPrice": "

    The purchase price of the Reserved Instance.

    ", + "ReservedInstancesOffering$UsagePrice": "

    The usage price of the Reserved Instance, per hour.

    ", + "ReservedInstancesOffering$FixedPrice": "

    The purchase price of the Reserved Instance.

    " + } + }, + "FlowLog": { + "base": "

    Describes a flow log.

    ", + "refs": { + "FlowLogSet$member": null + } + }, + "FlowLogSet": { + "base": null, + "refs": { + "DescribeFlowLogsResult$FlowLogs": "

    Information about the flow logs.

    " + } + }, + "FlowLogsResourceType": { + "base": null, + "refs": { + "CreateFlowLogsRequest$ResourceType": "

    The type of resource on which to create the flow log.

    " + } + }, + "GatewayType": { + "base": null, + "refs": { + "CreateCustomerGatewayRequest$Type": "

    The type of VPN connection that this customer gateway supports (ipsec.1).

    ", + "CreateVpnGatewayRequest$Type": "

    The type of VPN connection this virtual private gateway supports.

    ", + "VpnConnection$Type": "

    The type of VPN connection.

    ", + "VpnGateway$Type": "

    The type of VPN connection the virtual private gateway supports.

    " + } + }, + "GetConsoleOutputRequest": { + "base": null, + "refs": { + } + }, + "GetConsoleOutputResult": { + "base": null, + "refs": { + } + }, + "GetPasswordDataRequest": { + "base": null, + "refs": { + } + }, + "GetPasswordDataResult": { + "base": null, + "refs": { + } + }, + "GroupIdStringList": { + "base": null, + "refs": { + "AttachClassicLinkVpcRequest$Groups": "

    The ID of one or more of the VPC's security groups. You cannot specify security groups from a different VPC.

    ", + "DescribeSecurityGroupsRequest$GroupIds": "

    One or more security group IDs. Required for security groups in a nondefault VPC.

    Default: Describes all your security groups.

    ", + "ModifyInstanceAttributeRequest$Groups": "

    [EC2-VPC] Changes the security groups of the instance. You must specify at least one security group, even if it's just the default security group for the VPC. You must specify the security group ID, not the security group name.

    " + } + }, + "GroupIdentifier": { + "base": "

    Describes a security group.

    ", + "refs": { + "GroupIdentifierList$member": null + } + }, + "GroupIdentifierList": { + "base": null, + "refs": { + "ClassicLinkInstance$Groups": "

    A list of security groups.

    ", + "DescribeNetworkInterfaceAttributeResult$Groups": "

    The security groups associated with the network interface.

    ", + "Instance$SecurityGroups": "

    One or more security groups for the instance.

    ", + "InstanceAttribute$Groups": "

    The security groups associated with the instance.

    ", + "InstanceNetworkInterface$Groups": "

    One or more security groups.

    ", + "LaunchSpecification$SecurityGroups": "

    One or more security groups. To request an instance in a nondefault VPC, you must specify the ID of the security group. To request an instance in EC2-Classic or a default VPC, you can specify the name or the ID of the security group.

    ", + "NetworkInterface$Groups": "

    Any security groups for the network interface.

    ", + "Reservation$Groups": "

    One or more security groups.

    ", + "SpotFleetLaunchSpecification$SecurityGroups": "

    One or more security groups. To request an instance in a nondefault VPC, you must specify the ID of the security group. To request an instance in EC2-Classic or a default VPC, you can specify the name or the ID of the security group.

    " + } + }, + "GroupNameStringList": { + "base": null, + "refs": { + "DescribeSecurityGroupsRequest$GroupNames": "

    [EC2-Classic and default VPC only] One or more security group names. You can specify either the security group name or the security group ID. For security groups in a nondefault VPC, use the group-name filter to describe security groups by name.

    Default: Describes all your security groups.

    ", + "ModifySnapshotAttributeRequest$GroupNames": "

    The group to modify for the snapshot.

    " + } + }, + "HistoryRecord": { + "base": "

    Describes an event in the history of the Spot fleet request.

    ", + "refs": { + "HistoryRecords$member": null + } + }, + "HistoryRecords": { + "base": null, + "refs": { + "DescribeSpotFleetRequestHistoryResponse$HistoryRecords": "

    Information about the events in the history of the Spot fleet request.

    " + } + }, + "HypervisorType": { + "base": null, + "refs": { + "Image$Hypervisor": "

    The hypervisor type of the image.

    ", + "Instance$Hypervisor": "

    The hypervisor type of the instance.

    " + } + }, + "IamInstanceProfile": { + "base": "

    Describes an IAM instance profile.

    ", + "refs": { + "Instance$IamInstanceProfile": "

    The IAM instance profile associated with the instance.

    " + } + }, + "IamInstanceProfileSpecification": { + "base": "

    Describes an IAM instance profile.

    ", + "refs": { + "LaunchSpecification$IamInstanceProfile": "

    The IAM instance profile.

    ", + "RunInstancesRequest$IamInstanceProfile": "

    The IAM instance profile.

    ", + "SpotFleetLaunchSpecification$IamInstanceProfile": "

    The IAM instance profile.

    ", + "RequestSpotLaunchSpecification$IamInstanceProfile": "

    The IAM instance profile.

    " + } + }, + "IcmpTypeCode": { + "base": "

    Describes the ICMP type and code.

    ", + "refs": { + "CreateNetworkAclEntryRequest$IcmpTypeCode": "

    ICMP protocol: The ICMP type and code. Required if specifying ICMP for the protocol.

    ", + "NetworkAclEntry$IcmpTypeCode": "

    ICMP protocol: The ICMP type and code.

    ", + "ReplaceNetworkAclEntryRequest$IcmpTypeCode": "

    ICMP protocol: The ICMP type and code. Required if specifying 1 (ICMP) for the protocol.

    " + } + }, + "Image": { + "base": "

    Describes an image.

    ", + "refs": { + "ImageList$member": null + } + }, + "ImageAttribute": { + "base": "

    Describes an image attribute.

    ", + "refs": { + } + }, + "ImageAttributeName": { + "base": null, + "refs": { + "DescribeImageAttributeRequest$Attribute": "

    The AMI attribute.

    Note: Depending on your account privileges, the blockDeviceMapping attribute may return a Client.AuthFailure error. If this happens, use DescribeImages to get information about the block device mapping for the AMI.

    " + } + }, + "ImageDiskContainer": { + "base": "

    Describes the disk container object for an import image task.

    ", + "refs": { + "ImageDiskContainerList$member": null + } + }, + "ImageDiskContainerList": { + "base": null, + "refs": { + "ImportImageRequest$DiskContainers": "

    Information about the disk containers.

    " + } + }, + "ImageIdStringList": { + "base": null, + "refs": { + "DescribeImagesRequest$ImageIds": "

    One or more image IDs.

    Default: Describes all images available to you.

    " + } + }, + "ImageList": { + "base": null, + "refs": { + "DescribeImagesResult$Images": "

    Information about one or more images.

    " + } + }, + "ImageState": { + "base": null, + "refs": { + "Image$State": "

    The current state of the AMI. If the state is available, the image is successfully registered and can be used to launch an instance.

    " + } + }, + "ImageTypeValues": { + "base": null, + "refs": { + "Image$ImageType": "

    The type of image.

    " + } + }, + "ImportImageRequest": { + "base": null, + "refs": { + } + }, + "ImportImageResult": { + "base": null, + "refs": { + } + }, + "ImportImageTask": { + "base": "

    Describes an import image task.

    ", + "refs": { + "ImportImageTaskList$member": null + } + }, + "ImportImageTaskList": { + "base": null, + "refs": { + "DescribeImportImageTasksResult$ImportImageTasks": "

    A list of zero or more import image tasks that are currently active or were completed or canceled in the previous 7 days.

    " + } + }, + "ImportInstanceLaunchSpecification": { + "base": "

    Describes the launch specification for VM import.

    ", + "refs": { + "ImportInstanceRequest$LaunchSpecification": "

    The launch specification.

    " + } + }, + "ImportInstanceRequest": { + "base": null, + "refs": { + } + }, + "ImportInstanceResult": { + "base": null, + "refs": { + } + }, + "ImportInstanceTaskDetails": { + "base": "

    Describes an import instance task.

    ", + "refs": { + "ConversionTask$ImportInstance": "

    If the task is for importing an instance, this contains information about the import instance task.

    " + } + }, + "ImportInstanceVolumeDetailItem": { + "base": "

    Describes an import volume task.

    ", + "refs": { + "ImportInstanceVolumeDetailSet$member": null + } + }, + "ImportInstanceVolumeDetailSet": { + "base": null, + "refs": { + "ImportInstanceTaskDetails$Volumes": "

    One or more volumes.

    " + } + }, + "ImportKeyPairRequest": { + "base": null, + "refs": { + } + }, + "ImportKeyPairResult": { + "base": null, + "refs": { + } + }, + "ImportSnapshotRequest": { + "base": null, + "refs": { + } + }, + "ImportSnapshotResult": { + "base": null, + "refs": { + } + }, + "ImportSnapshotTask": { + "base": "

    Describes an import snapshot task.

    ", + "refs": { + "ImportSnapshotTaskList$member": null + } + }, + "ImportSnapshotTaskList": { + "base": null, + "refs": { + "DescribeImportSnapshotTasksResult$ImportSnapshotTasks": "

    A list of zero or more import snapshot tasks that are currently active or were completed or canceled in the previous 7 days.

    " + } + }, + "ImportTaskIdList": { + "base": null, + "refs": { + "DescribeImportImageTasksRequest$ImportTaskIds": "

    A list of import image task IDs.

    ", + "DescribeImportSnapshotTasksRequest$ImportTaskIds": "

    A list of import snapshot task IDs.

    " + } + }, + "ImportVolumeRequest": { + "base": null, + "refs": { + } + }, + "ImportVolumeResult": { + "base": null, + "refs": { + } + }, + "ImportVolumeTaskDetails": { + "base": "

    Describes an import volume task.

    ", + "refs": { + "ConversionTask$ImportVolume": "

    If the task is for importing a volume, this contains information about the import volume task.

    " + } + }, + "Instance": { + "base": "

    Describes an instance.

    ", + "refs": { + "InstanceList$member": null + } + }, + "InstanceAttribute": { + "base": "

    Describes an instance attribute.

    ", + "refs": { + } + }, + "InstanceAttributeName": { + "base": null, + "refs": { + "DescribeInstanceAttributeRequest$Attribute": "

    The instance attribute.

    ", + "ModifyInstanceAttributeRequest$Attribute": "

    The name of the attribute.

    ", + "ResetInstanceAttributeRequest$Attribute": "

    The attribute to reset.

    " + } + }, + "InstanceBlockDeviceMapping": { + "base": "

    Describes a block device mapping.

    ", + "refs": { + "InstanceBlockDeviceMappingList$member": null + } + }, + "InstanceBlockDeviceMappingList": { + "base": null, + "refs": { + "Instance$BlockDeviceMappings": "

    Any block device mapping entries for the instance.

    ", + "InstanceAttribute$BlockDeviceMappings": "

    The block device mapping of the instance.

    " + } + }, + "InstanceBlockDeviceMappingSpecification": { + "base": "

    Describes a block device mapping entry.

    ", + "refs": { + "InstanceBlockDeviceMappingSpecificationList$member": null + } + }, + "InstanceBlockDeviceMappingSpecificationList": { + "base": null, + "refs": { + "ModifyInstanceAttributeRequest$BlockDeviceMappings": "

    Modifies the DeleteOnTermination attribute for volumes that are currently attached. The volume must be owned by the caller. If no value is specified for DeleteOnTermination, the default is true and the volume is deleted when the instance is terminated.

    To add instance store volumes to an Amazon EBS-backed instance, you must add them when you launch the instance. For more information, see Updating the Block Device Mapping when Launching an Instance in the Amazon Elastic Compute Cloud User Guide.

    " + } + }, + "InstanceCount": { + "base": "

    Describes a Reserved Instance listing state.

    ", + "refs": { + "InstanceCountList$member": null + } + }, + "InstanceCountList": { + "base": null, + "refs": { + "ReservedInstancesListing$InstanceCounts": "

    The number of instances in this state.

    " + } + }, + "InstanceExportDetails": { + "base": "

    Describes an instance to export.

    ", + "refs": { + "ExportTask$InstanceExportDetails": "

    Information about the instance to export.

    " + } + }, + "InstanceIdStringList": { + "base": null, + "refs": { + "DescribeClassicLinkInstancesRequest$InstanceIds": "

    One or more instance IDs. Must be instances linked to a VPC through ClassicLink.

    ", + "DescribeInstanceStatusRequest$InstanceIds": "

    One or more instance IDs.

    Default: Describes all your instances.

    Constraints: Maximum 100 explicitly specified instance IDs.

    ", + "DescribeInstancesRequest$InstanceIds": "

    One or more instance IDs.

    Default: Describes all your instances.

    ", + "MonitorInstancesRequest$InstanceIds": "

    One or more instance IDs.

    ", + "RebootInstancesRequest$InstanceIds": "

    One or more instance IDs.

    ", + "ReportInstanceStatusRequest$Instances": "

    One or more instances.

    ", + "StartInstancesRequest$InstanceIds": "

    One or more instance IDs.

    ", + "StopInstancesRequest$InstanceIds": "

    One or more instance IDs.

    ", + "TerminateInstancesRequest$InstanceIds": "

    One or more instance IDs.

    ", + "UnmonitorInstancesRequest$InstanceIds": "

    One or more instance IDs.

    " + } + }, + "InstanceLifecycleType": { + "base": null, + "refs": { + "Instance$InstanceLifecycle": "

    Indicates whether this is a Spot Instance.

    " + } + }, + "InstanceList": { + "base": null, + "refs": { + "Reservation$Instances": "

    One or more instances.

    " + } + }, + "InstanceMonitoring": { + "base": "

    Describes the monitoring information of the instance.

    ", + "refs": { + "InstanceMonitoringList$member": null + } + }, + "InstanceMonitoringList": { + "base": null, + "refs": { + "MonitorInstancesResult$InstanceMonitorings": "

    Monitoring information for one or more instances.

    ", + "UnmonitorInstancesResult$InstanceMonitorings": "

    Monitoring information for one or more instances.

    " + } + }, + "InstanceNetworkInterface": { + "base": "

    Describes a network interface.

    ", + "refs": { + "InstanceNetworkInterfaceList$member": null + } + }, + "InstanceNetworkInterfaceAssociation": { + "base": "

    Describes association information for an Elastic IP address.

    ", + "refs": { + "InstanceNetworkInterface$Association": "

    The association information for an Elastic IP associated with the network interface.

    ", + "InstancePrivateIpAddress$Association": "

    The association information for an Elastic IP address for the network interface.

    " + } + }, + "InstanceNetworkInterfaceAttachment": { + "base": "

    Describes a network interface attachment.

    ", + "refs": { + "InstanceNetworkInterface$Attachment": "

    The network interface attachment.

    " + } + }, + "InstanceNetworkInterfaceList": { + "base": null, + "refs": { + "Instance$NetworkInterfaces": "

    [EC2-VPC] One or more network interfaces for the instance.

    " + } + }, + "InstanceNetworkInterfaceSpecification": { + "base": "

    Describes a network interface.

    ", + "refs": { + "InstanceNetworkInterfaceSpecificationList$member": null + } + }, + "InstanceNetworkInterfaceSpecificationList": { + "base": null, + "refs": { + "LaunchSpecification$NetworkInterfaces": "

    One or more network interfaces.

    ", + "RunInstancesRequest$NetworkInterfaces": "

    One or more network interfaces.

    ", + "SpotFleetLaunchSpecification$NetworkInterfaces": "

    One or more network interfaces.

    ", + "RequestSpotLaunchSpecification$NetworkInterfaces": "

    One or more network interfaces.

    " + } + }, + "InstancePrivateIpAddress": { + "base": "

    Describes a private IP address.

    ", + "refs": { + "InstancePrivateIpAddressList$member": null + } + }, + "InstancePrivateIpAddressList": { + "base": null, + "refs": { + "InstanceNetworkInterface$PrivateIpAddresses": "

    The private IP addresses associated with the network interface.

    " + } + }, + "InstanceState": { + "base": "

    Describes the current state of the instance.

    ", + "refs": { + "Instance$State": "

    The current state of the instance.

    ", + "InstanceStateChange$CurrentState": "

    The current state of the instance.

    ", + "InstanceStateChange$PreviousState": "

    The previous state of the instance.

    ", + "InstanceStatus$InstanceState": "

    The intended state of the instance. DescribeInstanceStatus requires that an instance be in the running state.

    " + } + }, + "InstanceStateChange": { + "base": "

    Describes an instance state change.

    ", + "refs": { + "InstanceStateChangeList$member": null + } + }, + "InstanceStateChangeList": { + "base": null, + "refs": { + "StartInstancesResult$StartingInstances": "

    Information about one or more started instances.

    ", + "StopInstancesResult$StoppingInstances": "

    Information about one or more stopped instances.

    ", + "TerminateInstancesResult$TerminatingInstances": "

    Information about one or more terminated instances.

    " + } + }, + "InstanceStateName": { + "base": null, + "refs": { + "InstanceState$Name": "

    The current state of the instance.

    " + } + }, + "InstanceStatus": { + "base": "

    Describes the status of an instance.

    ", + "refs": { + "InstanceStatusList$member": null + } + }, + "InstanceStatusDetails": { + "base": "

    Describes the instance status.

    ", + "refs": { + "InstanceStatusDetailsList$member": null + } + }, + "InstanceStatusDetailsList": { + "base": null, + "refs": { + "InstanceStatusSummary$Details": "

    The system instance health or application instance health.

    " + } + }, + "InstanceStatusEvent": { + "base": "

    Describes a scheduled event for an instance.

    ", + "refs": { + "InstanceStatusEventList$member": null + } + }, + "InstanceStatusEventList": { + "base": null, + "refs": { + "InstanceStatus$Events": "

    Any scheduled events associated with the instance.

    " + } + }, + "InstanceStatusList": { + "base": null, + "refs": { + "DescribeInstanceStatusResult$InstanceStatuses": "

    One or more instance status descriptions.

    " + } + }, + "InstanceStatusSummary": { + "base": "

    Describes the status of an instance.

    ", + "refs": { + "InstanceStatus$SystemStatus": "

    Reports impaired functionality that stems from issues related to the systems that support an instance, such as hardware failures and network connectivity problems.

    ", + "InstanceStatus$InstanceStatus": "

    Reports impaired functionality that stems from issues internal to the instance, such as impaired reachability.

    " + } + }, + "InstanceType": { + "base": null, + "refs": { + "DescribeReservedInstancesOfferingsRequest$InstanceType": "

    The instance type on which the Reserved Instance can be used. For more information, see Instance Types in the Amazon Elastic Compute Cloud User Guide.

    ", + "ImportInstanceLaunchSpecification$InstanceType": "

    The instance type. For more information about the instance types that you can import, see Before You Get Started in the Amazon Elastic Compute Cloud User Guide.

    ", + "Instance$InstanceType": "

    The instance type.

    ", + "InstanceTypeList$member": null, + "LaunchSpecification$InstanceType": "

    The instance type.

    ", + "ReservedInstances$InstanceType": "

    The instance type on which the Reserved Instance can be used.

    ", + "ReservedInstancesConfiguration$InstanceType": "

    The instance type for the modified Reserved Instances.

    ", + "ReservedInstancesOffering$InstanceType": "

    The instance type on which the Reserved Instance can be used.

    ", + "RunInstancesRequest$InstanceType": "

    The instance type. For more information, see Instance Types in the Amazon Elastic Compute Cloud User Guide.

    Default: m1.small

    ", + "SpotFleetLaunchSpecification$InstanceType": "

    The instance type.

    ", + "SpotPrice$InstanceType": "

    The instance type.

    ", + "RequestSpotLaunchSpecification$InstanceType": "

    The instance type.

    " + } + }, + "InstanceTypeList": { + "base": null, + "refs": { + "DescribeSpotPriceHistoryRequest$InstanceTypes": "

    Filters the results by the specified instance types.

    " + } + }, + "Integer": { + "base": null, + "refs": { + "AssignPrivateIpAddressesRequest$SecondaryPrivateIpAddressCount": "

    The number of secondary IP addresses to assign to the network interface. You can't specify this parameter when also specifying private IP addresses.

    ", + "AttachNetworkInterfaceRequest$DeviceIndex": "

    The index of the device for the network interface attachment.

    ", + "AuthorizeSecurityGroupEgressRequest$FromPort": "

    The start of port range for the TCP and UDP protocols, or an ICMP type number. For the ICMP type number, use -1 to specify all ICMP types.

    ", + "AuthorizeSecurityGroupEgressRequest$ToPort": "

    The end of port range for the TCP and UDP protocols, or an ICMP code number. For the ICMP code number, use -1 to specify all ICMP codes for the ICMP type.

    ", + "AuthorizeSecurityGroupIngressRequest$FromPort": "

    The start of port range for the TCP and UDP protocols, or an ICMP type number. For the ICMP type number, use -1 to specify all ICMP types.

    ", + "AuthorizeSecurityGroupIngressRequest$ToPort": "

    The end of port range for the TCP and UDP protocols, or an ICMP code number. For the ICMP code number, use -1 to specify all ICMP codes for the ICMP type.

    ", + "CreateCustomerGatewayRequest$BgpAsn": "

    For devices that support BGP, the customer gateway's BGP ASN.

    Default: 65000

    ", + "CreateNetworkAclEntryRequest$RuleNumber": "

    The rule number for the entry (for example, 100). ACL entries are processed in ascending order by rule number.

    Constraints: Positive integer from 1 to 32766

    ", + "CreateNetworkInterfaceRequest$SecondaryPrivateIpAddressCount": "

    The number of secondary private IP addresses to assign to a network interface. When you specify a number of secondary IP addresses, Amazon EC2 selects these IP addresses within the subnet range. You can't specify this option and specify more than one private IP address using privateIpAddresses.

    The number of IP addresses you can assign to a network interface varies by instance type. For more information, see Private IP Addresses Per ENI Per Instance Type in the Amazon Elastic Compute Cloud User Guide.

    ", + "CreateReservedInstancesListingRequest$InstanceCount": "

    The number of instances that are a part of a Reserved Instance account to be listed in the Reserved Instance Marketplace. This number should be less than or equal to the instance count associated with the Reserved Instance ID specified in this call.

    ", + "CreateVolumeRequest$Size": "

    The size of the volume, in GiBs.

    Constraints: 1-1024 for standard volumes, 1-16384 for gp2 volumes, and 4-16384 for io1 volumes. If you specify a snapshot, the volume size must be equal to or larger than the snapshot size.

    Default: If you're creating the volume from a snapshot and don't specify a volume size, the default is the snapshot size.

    ", + "CreateVolumeRequest$Iops": "

    Only valid for Provisioned IOPS (SSD) volumes. The number of I/O operations per second (IOPS) to provision for the volume, with a maximum ratio of 30 IOPS/GiB.

    Constraint: Range is 100 to 20000 for Provisioned IOPS (SSD) volumes

    ", + "DeleteNetworkAclEntryRequest$RuleNumber": "

    The rule number of the entry to delete.

    ", + "DescribeClassicLinkInstancesRequest$MaxResults": "

    The maximum number of results to return for the request in a single page. The remaining results of the initial request can be seen by sending another request with the returned NextToken value. This value can be between 5 and 1000; if MaxResults is given a value larger than 1000, only 1000 results are returned. You cannot specify this parameter and the instance IDs parameter in the same request.

    Constraint: If the value is greater than 1000, we return only 1000 items.

    ", + "DescribeFlowLogsRequest$MaxResults": "

    The maximum number of results to return for the request in a single page. The remaining results can be seen by sending another request with the returned NextToken value. This value can be between 5 and 1000; if MaxResults is given a value larger than 1000, only 1000 results are returned. You cannot specify this parameter and the flow log IDs parameter in the same request.

    ", + "DescribeImportImageTasksRequest$MaxResults": "

    The maximum number of results to return in a single request.

    ", + "DescribeImportSnapshotTasksRequest$MaxResults": "

    The maximum number of results to return in a single request.

    ", + "DescribeInstanceStatusRequest$MaxResults": "

    The maximum number of results to return for the request in a single page. The remaining results of the initial request can be seen by sending another request with the returned NextToken value. This value can be between 5 and 1000; if MaxResults is given a value larger than 1000, only 1000 results are returned. You cannot specify this parameter and the instance IDs parameter in the same request.

    ", + "DescribeInstancesRequest$MaxResults": "

    The maximum number of results to return for the request in a single page. The remaining results of the initial request can be seen by sending another request with the returned NextToken value. This value can be between 5 and 1000; if MaxResults is given a value larger than 1000, only 1000 results are returned. You cannot specify this parameter and the instance IDs parameter in the same request.

    ", + "DescribeMovingAddressesRequest$MaxResults": "

    The maximum number of results to return for the request in a single page. The remaining results of the initial request can be seen by sending another request with the returned NextToken value. This value can be between 5 and 1000; if MaxResults is given a value outside of this range, an error is returned.

    Default: If no value is provided, the default is 1000.

    ", + "DescribePrefixListsRequest$MaxResults": "

    The maximum number of items to return for this request. The request returns a token that you can specify in a subsequent call to get the next set of results.

    Constraint: If the value specified is greater than 1000, we return only 1000 items.

    ", + "DescribeReservedInstancesOfferingsRequest$MaxResults": "

    The maximum number of results to return for the request in a single page. The remaining results of the initial request can be seen by sending another request with the returned NextToken value. The maximum is 100.

    Default: 100

    ", + "DescribeReservedInstancesOfferingsRequest$MaxInstanceCount": "

    The maximum number of instances to filter when searching for offerings.

    Default: 20

    ", + "DescribeSnapshotsRequest$MaxResults": "

    The maximum number of snapshot results returned by DescribeSnapshots in paginated output. When this parameter is used, DescribeSnapshots only returns MaxResults results in a single page along with a NextToken response element. The remaining results of the initial request can be seen by sending another DescribeSnapshots request with the returned NextToken value. This value can be between 5 and 1000; if MaxResults is given a value larger than 1000, only 1000 results are returned. If this parameter is not used, then DescribeSnapshots returns all results. You cannot specify this parameter and the snapshot IDs parameter in the same request.

    ", + "DescribeSpotFleetInstancesRequest$MaxResults": "

    The maximum number of results to return in a single call. Specify a value between 1 and 1000. The default value is 1000. To retrieve the remaining results, make another call with the returned NextToken value.

    ", + "DescribeSpotFleetRequestHistoryRequest$MaxResults": "

    The maximum number of results to return in a single call. Specify a value between 1 and 1000. The default value is 1000. To retrieve the remaining results, make another call with the returned NextToken value.

    ", + "DescribeSpotFleetRequestsRequest$MaxResults": "

    The maximum number of results to return in a single call. Specify a value between 1 and 1000. The default value is 1000. To retrieve the remaining results, make another call with the returned NextToken value.

    ", + "DescribeSpotPriceHistoryRequest$MaxResults": "

    The maximum number of results to return in a single call. Specify a value between 1 and 1000. The default value is 1000. To retrieve the remaining results, make another call with the returned NextToken value.

    ", + "DescribeTagsRequest$MaxResults": "

    The maximum number of results to return for the request in a single page. The remaining results of the initial request can be seen by sending another request with the returned NextToken value. This value can be between 5 and 1000; if MaxResults is given a value larger than 1000, only 1000 results are returned.

    ", + "DescribeVolumeStatusRequest$MaxResults": "

    The maximum number of volume results returned by DescribeVolumeStatus in paginated output. When this parameter is used, the request only returns MaxResults results in a single page along with a NextToken response element. The remaining results of the initial request can be seen by sending another request with the returned NextToken value. This value can be between 5 and 1000; if MaxResults is given a value larger than 1000, only 1000 results are returned. If this parameter is not used, then DescribeVolumeStatus returns all results. You cannot specify this parameter and the volume IDs parameter in the same request.

    ", + "DescribeVolumesRequest$MaxResults": "

    The maximum number of volume results returned by DescribeVolumes in paginated output. When this parameter is used, DescribeVolumes only returns MaxResults results in a single page along with a NextToken response element. The remaining results of the initial request can be seen by sending another DescribeVolumes request with the returned NextToken value. This value can be between 5 and 1000; if MaxResults is given a value larger than 1000, only 1000 results are returned. If this parameter is not used, then DescribeVolumes returns all results. You cannot specify this parameter and the volume IDs parameter in the same request.

    ", + "DescribeVpcEndpointServicesRequest$MaxResults": "

    The maximum number of items to return for this request. The request returns a token that you can specify in a subsequent call to get the next set of results.

    Constraint: If the value is greater than 1000, we return only 1000 items.

    ", + "DescribeVpcEndpointsRequest$MaxResults": "

    The maximum number of items to return for this request. The request returns a token that you can specify in a subsequent call to get the next set of results.

    Constraint: If the value is greater than 1000, we return only 1000 items.

    ", + "EbsBlockDevice$VolumeSize": "

    The size of the volume, in GiB.

    Constraints: 1-1024 for standard volumes, 1-16384 for gp2 volumes, and 4-16384 for io1 volumes. If you specify a snapshot, the volume size must be equal to or larger than the snapshot size.

    Default: If you're creating the volume from a snapshot and don't specify a volume size, the default is the snapshot size.

    ", + "EbsBlockDevice$Iops": "

    The number of I/O operations per second (IOPS) that the volume supports. For Provisioned IOPS (SSD) volumes, this represents the number of IOPS that are provisioned for the volume. For General Purpose (SSD) volumes, this represents the baseline performance of the volume and the rate at which the volume accumulates I/O credits for bursting. For more information on General Purpose (SSD) baseline performance, I/O credits, and bursting, see Amazon EBS Volume Types in the Amazon Elastic Compute Cloud User Guide.

    Constraint: Range is 100 to 20000 for Provisioned IOPS (SSD) volumes and 3 to 10000 for General Purpose (SSD) volumes.

    Condition: This parameter is required for requests to create io1 volumes; it is not used in requests to create standard or gp2 volumes.

    ", + "IcmpTypeCode$Type": "

    The ICMP code. A value of -1 means all codes for the specified ICMP type.

    ", + "IcmpTypeCode$Code": "

    The ICMP type. A value of -1 means all types.

    ", + "Instance$AmiLaunchIndex": "

    The AMI launch index, which can be used to find this instance in the launch group.

    ", + "InstanceCount$InstanceCount": "

    The number of listed Reserved Instances in the state specified by the state.

    ", + "InstanceNetworkInterfaceAttachment$DeviceIndex": "

    The index of the device on the instance for the network interface attachment.

    ", + "InstanceNetworkInterfaceSpecification$DeviceIndex": "

    The index of the device on the instance for the network interface attachment. If you are specifying a network interface in a RunInstances request, you must provide the device index.

    ", + "InstanceNetworkInterfaceSpecification$SecondaryPrivateIpAddressCount": "

    The number of secondary private IP addresses. You can't specify this option and specify more than one private IP address using the private IP addresses option.

    ", + "InstanceState$Code": "

    The low byte represents the state. The high byte is an opaque internal value and should be ignored.

    • 0 : pending

    • 16 : running

    • 32 : shutting-down

    • 48 : terminated

    • 64 : stopping

    • 80 : stopped

    ", + "IpPermission$FromPort": "

    The start of port range for the TCP and UDP protocols, or an ICMP type number. A value of -1 indicates all ICMP types.

    ", + "IpPermission$ToPort": "

    The end of port range for the TCP and UDP protocols, or an ICMP code. A value of -1 indicates all ICMP codes for the specified ICMP type.

    ", + "NetworkAclEntry$RuleNumber": "

    The rule number for the entry. ACL entries are processed in ascending order by rule number.

    ", + "NetworkInterfaceAttachment$DeviceIndex": "

    The device index of the network interface attachment on the instance.

    ", + "PortRange$From": "

    The first port in the range.

    ", + "PortRange$To": "

    The last port in the range.

    ", + "PricingDetail$Count": "

    The number of instances available for the price.

    ", + "PurchaseReservedInstancesOfferingRequest$InstanceCount": "

    The number of Reserved Instances to purchase.

    ", + "ReplaceNetworkAclEntryRequest$RuleNumber": "

    The rule number of the entry to replace.

    ", + "RequestSpotInstancesRequest$InstanceCount": "

    The maximum number of Spot instances to launch.

    Default: 1

    ", + "ReservedInstances$InstanceCount": "

    The number of Reserved Instances purchased.

    ", + "ReservedInstancesConfiguration$InstanceCount": "

    The number of modified Reserved Instances.

    ", + "RevokeSecurityGroupEgressRequest$FromPort": "

    The start of port range for the TCP and UDP protocols, or an ICMP type number. For the ICMP type number, use -1 to specify all ICMP types.

    ", + "RevokeSecurityGroupEgressRequest$ToPort": "

    The end of port range for the TCP and UDP protocols, or an ICMP code number. For the ICMP code number, use -1 to specify all ICMP codes for the ICMP type.

    ", + "RevokeSecurityGroupIngressRequest$FromPort": "

    The start of port range for the TCP and UDP protocols, or an ICMP type number. For the ICMP type number, use -1 to specify all ICMP types.

    ", + "RevokeSecurityGroupIngressRequest$ToPort": "

    The end of port range for the TCP and UDP protocols, or an ICMP code number. For the ICMP code number, use -1 to specify all ICMP codes for the ICMP type.

    ", + "RunInstancesRequest$MinCount": "

    The minimum number of instances to launch. If you specify a minimum that is more instances than Amazon EC2 can launch in the target Availability Zone, Amazon EC2 launches no instances.

    Constraints: Between 1 and the maximum number you're allowed for the specified instance type. For more information about the default limits, and how to request an increase, see How many instances can I run in Amazon EC2 in the Amazon EC2 General FAQ.

    ", + "RunInstancesRequest$MaxCount": "

    The maximum number of instances to launch. If you specify more instances than Amazon EC2 can launch in the target Availability Zone, Amazon EC2 launches the largest possible number of instances above MinCount.

    Constraints: Between 1 and the maximum number you're allowed for the specified instance type. For more information about the default limits, and how to request an increase, see How many instances can I run in Amazon EC2 in the Amazon EC2 General FAQ.

    ", + "Snapshot$VolumeSize": "

    The size of the volume, in GiB.

    ", + "SpotFleetRequestConfigData$TargetCapacity": "

    The number of units to request. You can choose to set the target capacity in terms of instances or a performance characteristic that is important to your application workload, such as vCPUs, memory, or I/O.

    ", + "Subnet$AvailableIpAddressCount": "

    The number of unused IP addresses in the subnet. Note that the IP addresses for any stopped instances are considered unavailable.

    ", + "VgwTelemetry$AcceptedRouteCount": "

    The number of accepted routes.

    ", + "Volume$Size": "

    The size of the volume, in GiBs.

    ", + "Volume$Iops": "

    The number of I/O operations per second (IOPS) that the volume supports. For Provisioned IOPS (SSD) volumes, this represents the number of IOPS that are provisioned for the volume. For General Purpose (SSD) volumes, this represents the baseline performance of the volume and the rate at which the volume accumulates I/O credits for bursting. For more information on General Purpose (SSD) baseline performance, I/O credits, and bursting, see Amazon EBS Volume Types in the Amazon Elastic Compute Cloud User Guide.

    Constraint: Range is 100 to 20000 for Provisioned IOPS (SSD) volumes and 3 to 10000 for General Purpose (SSD) volumes.

    Condition: This parameter is required for requests to create io1 volumes; it is not used in requests to create standard or gp2 volumes.

    " + } + }, + "InternetGateway": { + "base": "

    Describes an Internet gateway.

    ", + "refs": { + "CreateInternetGatewayResult$InternetGateway": "

    Information about the Internet gateway.

    ", + "InternetGatewayList$member": null + } + }, + "InternetGatewayAttachment": { + "base": "

    Describes the attachment of a VPC to an Internet gateway.

    ", + "refs": { + "InternetGatewayAttachmentList$member": null + } + }, + "InternetGatewayAttachmentList": { + "base": null, + "refs": { + "InternetGateway$Attachments": "

    Any VPCs attached to the Internet gateway.

    " + } + }, + "InternetGatewayList": { + "base": null, + "refs": { + "DescribeInternetGatewaysResult$InternetGateways": "

    Information about one or more Internet gateways.

    " + } + }, + "IpPermission": { + "base": "

    Describes a security group rule.

    ", + "refs": { + "IpPermissionList$member": null + } + }, + "IpPermissionList": { + "base": null, + "refs": { + "AuthorizeSecurityGroupEgressRequest$IpPermissions": "

    A set of IP permissions. You can't specify a destination security group and a CIDR IP address range.

    ", + "AuthorizeSecurityGroupIngressRequest$IpPermissions": "

    A set of IP permissions. Can be used to specify multiple rules in a single command.

    ", + "RevokeSecurityGroupEgressRequest$IpPermissions": "

    A set of IP permissions. You can't specify a destination security group and a CIDR IP address range.

    ", + "RevokeSecurityGroupIngressRequest$IpPermissions": "

    A set of IP permissions. You can't specify a source security group and a CIDR IP address range.

    ", + "SecurityGroup$IpPermissions": "

    One or more inbound rules associated with the security group.

    ", + "SecurityGroup$IpPermissionsEgress": "

    [EC2-VPC] One or more outbound rules associated with the security group.

    " + } + }, + "IpRange": { + "base": "

    Describes an IP range.

    ", + "refs": { + "IpRangeList$member": null + } + }, + "IpRangeList": { + "base": null, + "refs": { + "IpPermission$IpRanges": "

    One or more IP ranges.

    " + } + }, + "KeyNameStringList": { + "base": null, + "refs": { + "DescribeKeyPairsRequest$KeyNames": "

    One or more key pair names.

    Default: Describes all your key pairs.

    " + } + }, + "KeyPair": { + "base": "

    Describes a key pair.

    ", + "refs": { + } + }, + "KeyPairInfo": { + "base": "

    Describes a key pair.

    ", + "refs": { + "KeyPairList$member": null + } + }, + "KeyPairList": { + "base": null, + "refs": { + "DescribeKeyPairsResult$KeyPairs": "

    Information about one or more key pairs.

    " + } + }, + "LaunchPermission": { + "base": "

    Describes a launch permission.

    ", + "refs": { + "LaunchPermissionList$member": null + } + }, + "LaunchPermissionList": { + "base": null, + "refs": { + "ImageAttribute$LaunchPermissions": "

    One or more launch permissions.

    ", + "LaunchPermissionModifications$Add": "

    The AWS account ID to add to the list of launch permissions for the AMI.

    ", + "LaunchPermissionModifications$Remove": "

    The AWS account ID to remove from the list of launch permissions for the AMI.

    " + } + }, + "LaunchPermissionModifications": { + "base": "

    Describes a launch permission modification.

    ", + "refs": { + "ModifyImageAttributeRequest$LaunchPermission": "

    A launch permission modification.

    " + } + }, + "LaunchSpecification": { + "base": "

    Describes the launch specification for an instance.

    ", + "refs": { + "SpotInstanceRequest$LaunchSpecification": "

    Additional information for launching instances.

    " + } + }, + "LaunchSpecsList": { + "base": null, + "refs": { + "SpotFleetRequestConfigData$LaunchSpecifications": "

    Information about the launch specifications for the Spot fleet request.

    " + } + }, + "ListingState": { + "base": null, + "refs": { + "InstanceCount$State": "

    The states of the listed Reserved Instances.

    " + } + }, + "ListingStatus": { + "base": null, + "refs": { + "ReservedInstancesListing$Status": "

    The status of the Reserved Instance listing.

    " + } + }, + "Long": { + "base": null, + "refs": { + "DescribeReservedInstancesOfferingsRequest$MinDuration": "

    The minimum duration (in seconds) to filter when searching for offerings.

    Default: 2592000 (1 month)

    ", + "DescribeReservedInstancesOfferingsRequest$MaxDuration": "

    The maximum duration (in seconds) to filter when searching for offerings.

    Default: 94608000 (3 years)

    ", + "DiskImageDescription$Size": "

    The size of the disk image, in GiB.

    ", + "DiskImageDetail$Bytes": "

    The size of the disk image, in GiB.

    ", + "DiskImageVolumeDescription$Size": "

    The size of the volume, in GiB.

    ", + "ImportInstanceVolumeDetailItem$BytesConverted": "

    The number of bytes converted so far.

    ", + "ImportVolumeTaskDetails$BytesConverted": "

    The number of bytes converted so far.

    ", + "PriceSchedule$Term": "

    The number of months remaining in the reservation. For example, 2 is the second to the last month before the capacity reservation expires.

    ", + "PriceScheduleSpecification$Term": "

    The number of months remaining in the reservation. For example, 2 is the second to the last month before the capacity reservation expires.

    ", + "ReservedInstances$Duration": "

    The duration of the Reserved Instance, in seconds.

    ", + "ReservedInstancesOffering$Duration": "

    The duration of the Reserved Instance, in seconds.

    ", + "VolumeDetail$Size": "

    The size of the volume, in GiB.

    " + } + }, + "ModifyImageAttributeRequest": { + "base": null, + "refs": { + } + }, + "ModifyInstanceAttributeRequest": { + "base": null, + "refs": { + } + }, + "ModifyNetworkInterfaceAttributeRequest": { + "base": null, + "refs": { + } + }, + "ModifyReservedInstancesRequest": { + "base": null, + "refs": { + } + }, + "ModifyReservedInstancesResult": { + "base": null, + "refs": { + } + }, + "ModifySnapshotAttributeRequest": { + "base": null, + "refs": { + } + }, + "ModifySubnetAttributeRequest": { + "base": null, + "refs": { + } + }, + "ModifyVolumeAttributeRequest": { + "base": null, + "refs": { + } + }, + "ModifyVpcAttributeRequest": { + "base": null, + "refs": { + } + }, + "ModifyVpcEndpointRequest": { + "base": null, + "refs": { + } + }, + "ModifyVpcEndpointResult": { + "base": null, + "refs": { + } + }, + "MonitorInstancesRequest": { + "base": null, + "refs": { + } + }, + "MonitorInstancesResult": { + "base": null, + "refs": { + } + }, + "Monitoring": { + "base": "

    Describes the monitoring for the instance.

    ", + "refs": { + "Instance$Monitoring": "

    The monitoring information for the instance.

    ", + "InstanceMonitoring$Monitoring": "

    The monitoring information.

    " + } + }, + "MonitoringState": { + "base": null, + "refs": { + "Monitoring$State": "

    Indicates whether monitoring is enabled for the instance.

    " + } + }, + "MoveAddressToVpcRequest": { + "base": null, + "refs": { + } + }, + "MoveAddressToVpcResult": { + "base": null, + "refs": { + } + }, + "MoveStatus": { + "base": null, + "refs": { + "MovingAddressStatus$MoveStatus": "

    The status of the Elastic IP address that's being moved to the EC2-VPC platform, or restored to the EC2-Classic platform.

    " + } + }, + "MovingAddressStatus": { + "base": "

    Describes the status of a moving Elastic IP address.

    ", + "refs": { + "MovingAddressStatusSet$member": null + } + }, + "MovingAddressStatusSet": { + "base": null, + "refs": { + "DescribeMovingAddressesResult$MovingAddressStatuses": "

    The status for each Elastic IP address.

    " + } + }, + "NetworkAcl": { + "base": "

    Describes a network ACL.

    ", + "refs": { + "CreateNetworkAclResult$NetworkAcl": "

    Information about the network ACL.

    ", + "NetworkAclList$member": null + } + }, + "NetworkAclAssociation": { + "base": "

    Describes an association between a network ACL and a subnet.

    ", + "refs": { + "NetworkAclAssociationList$member": null + } + }, + "NetworkAclAssociationList": { + "base": null, + "refs": { + "NetworkAcl$Associations": "

    Any associations between the network ACL and one or more subnets

    " + } + }, + "NetworkAclEntry": { + "base": "

    Describes an entry in a network ACL.

    ", + "refs": { + "NetworkAclEntryList$member": null + } + }, + "NetworkAclEntryList": { + "base": null, + "refs": { + "NetworkAcl$Entries": "

    One or more entries (rules) in the network ACL.

    " + } + }, + "NetworkAclList": { + "base": null, + "refs": { + "DescribeNetworkAclsResult$NetworkAcls": "

    Information about one or more network ACLs.

    " + } + }, + "NetworkInterface": { + "base": "

    Describes a network interface.

    ", + "refs": { + "CreateNetworkInterfaceResult$NetworkInterface": "

    Information about the network interface.

    ", + "NetworkInterfaceList$member": null + } + }, + "NetworkInterfaceAssociation": { + "base": "

    Describes association information for an Elastic IP address.

    ", + "refs": { + "NetworkInterface$Association": "

    The association information for an Elastic IP associated with the network interface.

    ", + "NetworkInterfacePrivateIpAddress$Association": "

    The association information for an Elastic IP address associated with the network interface.

    " + } + }, + "NetworkInterfaceAttachment": { + "base": "

    Describes a network interface attachment.

    ", + "refs": { + "DescribeNetworkInterfaceAttributeResult$Attachment": "

    The attachment (if any) of the network interface.

    ", + "NetworkInterface$Attachment": "

    The network interface attachment.

    " + } + }, + "NetworkInterfaceAttachmentChanges": { + "base": "

    Describes an attachment change.

    ", + "refs": { + "ModifyNetworkInterfaceAttributeRequest$Attachment": "

    Information about the interface attachment. If modifying the 'delete on termination' attribute, you must specify the ID of the interface attachment.

    " + } + }, + "NetworkInterfaceAttribute": { + "base": null, + "refs": { + "DescribeNetworkInterfaceAttributeRequest$Attribute": "

    The attribute of the network interface.

    " + } + }, + "NetworkInterfaceIdList": { + "base": null, + "refs": { + "DescribeNetworkInterfacesRequest$NetworkInterfaceIds": "

    One or more network interface IDs.

    Default: Describes all your network interfaces.

    " + } + }, + "NetworkInterfaceList": { + "base": null, + "refs": { + "DescribeNetworkInterfacesResult$NetworkInterfaces": "

    Information about one or more network interfaces.

    " + } + }, + "NetworkInterfacePrivateIpAddress": { + "base": "

    Describes the private IP address of a network interface.

    ", + "refs": { + "NetworkInterfacePrivateIpAddressList$member": null + } + }, + "NetworkInterfacePrivateIpAddressList": { + "base": null, + "refs": { + "NetworkInterface$PrivateIpAddresses": "

    The private IP addresses associated with the network interface.

    " + } + }, + "NetworkInterfaceStatus": { + "base": null, + "refs": { + "InstanceNetworkInterface$Status": "

    The status of the network interface.

    ", + "NetworkInterface$Status": "

    The status of the network interface.

    " + } + }, + "OfferingTypeValues": { + "base": null, + "refs": { + "DescribeReservedInstancesOfferingsRequest$OfferingType": "

    The Reserved Instance offering type. If you are using tools that predate the 2011-11-01 API version, you only have access to the Medium Utilization Reserved Instance offering type.

    ", + "DescribeReservedInstancesRequest$OfferingType": "

    The Reserved Instance offering type. If you are using tools that predate the 2011-11-01 API version, you only have access to the Medium Utilization Reserved Instance offering type.

    ", + "ReservedInstances$OfferingType": "

    The Reserved Instance offering type.

    ", + "ReservedInstancesOffering$OfferingType": "

    The Reserved Instance offering type.

    " + } + }, + "OperationType": { + "base": null, + "refs": { + "ModifyImageAttributeRequest$OperationType": "

    The operation type.

    ", + "ModifySnapshotAttributeRequest$OperationType": "

    The type of operation to perform to the attribute.

    " + } + }, + "OwnerStringList": { + "base": null, + "refs": { + "DescribeImagesRequest$Owners": "

    Filters the images by the owner. Specify an AWS account ID, amazon (owner is Amazon), aws-marketplace (owner is AWS Marketplace), self (owner is the sender of the request). Omitting this option returns all images for which you have launch permissions, regardless of ownership.

    ", + "DescribeSnapshotsRequest$OwnerIds": "

    Returns the snapshots owned by the specified owner. Multiple owners can be specified.

    " + } + }, + "PermissionGroup": { + "base": null, + "refs": { + "CreateVolumePermission$Group": "

    The specific group that is to be added or removed from a volume's list of create volume permissions.

    ", + "LaunchPermission$Group": "

    The name of the group.

    " + } + }, + "Placement": { + "base": "

    Describes the placement for the instance.

    ", + "refs": { + "ImportInstanceLaunchSpecification$Placement": "

    The placement information for the instance.

    ", + "Instance$Placement": "

    The location where the instance launched.

    ", + "RunInstancesRequest$Placement": "

    The placement for the instance.

    " + } + }, + "PlacementGroup": { + "base": "

    Describes a placement group.

    ", + "refs": { + "PlacementGroupList$member": null + } + }, + "PlacementGroupList": { + "base": null, + "refs": { + "DescribePlacementGroupsResult$PlacementGroups": "

    One or more placement groups.

    " + } + }, + "PlacementGroupState": { + "base": null, + "refs": { + "PlacementGroup$State": "

    The state of the placement group.

    " + } + }, + "PlacementGroupStringList": { + "base": null, + "refs": { + "DescribePlacementGroupsRequest$GroupNames": "

    One or more placement group names.

    Default: Describes all your placement groups, or only those otherwise specified.

    " + } + }, + "PlacementStrategy": { + "base": null, + "refs": { + "CreatePlacementGroupRequest$Strategy": "

    The placement strategy.

    ", + "PlacementGroup$Strategy": "

    The placement strategy.

    " + } + }, + "PlatformValues": { + "base": null, + "refs": { + "Image$Platform": "

    The value is Windows for Windows AMIs; otherwise blank.

    ", + "ImportInstanceRequest$Platform": "

    The instance operating system.

    ", + "ImportInstanceTaskDetails$Platform": "

    The instance operating system.

    ", + "Instance$Platform": "

    The value is Windows for Windows instances; otherwise blank.

    " + } + }, + "PortRange": { + "base": "

    Describes a range of ports.

    ", + "refs": { + "CreateNetworkAclEntryRequest$PortRange": "

    TCP or UDP protocols: The range of ports the rule applies to.

    ", + "NetworkAclEntry$PortRange": "

    TCP or UDP protocols: The range of ports the rule applies to.

    ", + "ReplaceNetworkAclEntryRequest$PortRange": "

    TCP or UDP protocols: The range of ports the rule applies to. Required if specifying 6 (TCP) or 17 (UDP) for the protocol.

    " + } + }, + "PrefixList": { + "base": "

    Describes prefixes for AWS services.

    ", + "refs": { + "PrefixListSet$member": null + } + }, + "PrefixListId": { + "base": "

    The ID of the prefix.

    ", + "refs": { + "PrefixListIdList$member": null + } + }, + "PrefixListIdList": { + "base": null, + "refs": { + "IpPermission$PrefixListIds": "

    (Valid for AuthorizeSecurityGroupEgress, RevokeSecurityGroupEgress and DescribeSecurityGroups only) One or more prefix list IDs for an AWS service. In an AuthorizeSecurityGroupEgress request, this is the AWS service that you want to access through a VPC endpoint from instances associated with the security group.

    " + } + }, + "PrefixListSet": { + "base": null, + "refs": { + "DescribePrefixListsResult$PrefixLists": "

    All available prefix lists.

    " + } + }, + "PriceSchedule": { + "base": "

    Describes the price for a Reserved Instance.

    ", + "refs": { + "PriceScheduleList$member": null + } + }, + "PriceScheduleList": { + "base": null, + "refs": { + "ReservedInstancesListing$PriceSchedules": "

    The price of the Reserved Instance listing.

    " + } + }, + "PriceScheduleSpecification": { + "base": "

    Describes the price for a Reserved Instance.

    ", + "refs": { + "PriceScheduleSpecificationList$member": null + } + }, + "PriceScheduleSpecificationList": { + "base": null, + "refs": { + "CreateReservedInstancesListingRequest$PriceSchedules": "

    A list specifying the price of the Reserved Instance for each month remaining in the Reserved Instance term.

    " + } + }, + "PricingDetail": { + "base": "

    Describes a Reserved Instance offering.

    ", + "refs": { + "PricingDetailsList$member": null + } + }, + "PricingDetailsList": { + "base": null, + "refs": { + "ReservedInstancesOffering$PricingDetails": "

    The pricing details of the Reserved Instance offering.

    " + } + }, + "PrivateIpAddressSpecification": { + "base": "

    Describes a secondary private IP address for a network interface.

    ", + "refs": { + "PrivateIpAddressSpecificationList$member": null + } + }, + "PrivateIpAddressSpecificationList": { + "base": null, + "refs": { + "CreateNetworkInterfaceRequest$PrivateIpAddresses": "

    One or more private IP addresses.

    ", + "InstanceNetworkInterfaceSpecification$PrivateIpAddresses": "

    One or more private IP addresses to assign to the network interface. Only one private IP address can be designated as primary.

    " + } + }, + "PrivateIpAddressStringList": { + "base": null, + "refs": { + "AssignPrivateIpAddressesRequest$PrivateIpAddresses": "

    One or more IP addresses to be assigned as a secondary private IP address to the network interface. You can't specify this parameter when also specifying a number of secondary IP addresses.

    If you don't specify an IP address, Amazon EC2 automatically selects an IP address within the subnet range.

    ", + "UnassignPrivateIpAddressesRequest$PrivateIpAddresses": "

    The secondary private IP addresses to unassign from the network interface. You can specify this option multiple times to unassign more than one IP address.

    " + } + }, + "ProductCode": { + "base": "

    Describes a product code.

    ", + "refs": { + "ProductCodeList$member": null + } + }, + "ProductCodeList": { + "base": null, + "refs": { + "DescribeSnapshotAttributeResult$ProductCodes": "

    A list of product codes.

    ", + "DescribeVolumeAttributeResult$ProductCodes": "

    A list of product codes.

    ", + "Image$ProductCodes": "

    Any product codes associated with the AMI.

    ", + "ImageAttribute$ProductCodes": "

    One or more product codes.

    ", + "Instance$ProductCodes": "

    The product codes attached to this instance.

    ", + "InstanceAttribute$ProductCodes": "

    A list of product codes.

    " + } + }, + "ProductCodeStringList": { + "base": null, + "refs": { + "ModifyImageAttributeRequest$ProductCodes": "

    One or more product codes. After you add a product code to an AMI, it can't be removed. This is only valid when modifying the productCodes attribute.

    " + } + }, + "ProductCodeValues": { + "base": null, + "refs": { + "ProductCode$ProductCodeType": "

    The type of product code.

    " + } + }, + "ProductDescriptionList": { + "base": null, + "refs": { + "DescribeSpotPriceHistoryRequest$ProductDescriptions": "

    Filters the results by the specified basic product descriptions.

    " + } + }, + "PropagatingVgw": { + "base": "

    Describes a virtual private gateway propagating route.

    ", + "refs": { + "PropagatingVgwList$member": null + } + }, + "PropagatingVgwList": { + "base": null, + "refs": { + "RouteTable$PropagatingVgws": "

    Any virtual private gateway (VGW) propagating routes.

    " + } + }, + "PublicIpStringList": { + "base": null, + "refs": { + "DescribeAddressesRequest$PublicIps": "

    [EC2-Classic] One or more Elastic IP addresses.

    Default: Describes all your Elastic IP addresses.

    " + } + }, + "PurchaseReservedInstancesOfferingRequest": { + "base": null, + "refs": { + } + }, + "PurchaseReservedInstancesOfferingResult": { + "base": null, + "refs": { + } + }, + "RIProductDescription": { + "base": null, + "refs": { + "DescribeReservedInstancesOfferingsRequest$ProductDescription": "

    The Reserved Instance product platform description. Instances that include (Amazon VPC) in the description are for use with Amazon VPC.

    ", + "ReservedInstances$ProductDescription": "

    The Reserved Instance product platform description.

    ", + "ReservedInstancesOffering$ProductDescription": "

    The Reserved Instance product platform description.

    ", + "SpotInstanceRequest$ProductDescription": "

    The product description associated with the Spot instance.

    ", + "SpotPrice$ProductDescription": "

    A general description of the AMI.

    " + } + }, + "ReasonCodesList": { + "base": null, + "refs": { + "ReportInstanceStatusRequest$ReasonCodes": "

    One or more reason codes that describes the health state of your instance.

    • instance-stuck-in-state: My instance is stuck in a state.

    • unresponsive: My instance is unresponsive.

    • not-accepting-credentials: My instance is not accepting my credentials.

    • password-not-available: A password is not available for my instance.

    • performance-network: My instance is experiencing performance problems which I believe are network related.

    • performance-instance-store: My instance is experiencing performance problems which I believe are related to the instance stores.

    • performance-ebs-volume: My instance is experiencing performance problems which I believe are related to an EBS volume.

    • performance-other: My instance is experiencing performance problems.

    • other: [explain using the description parameter]

    " + } + }, + "RebootInstancesRequest": { + "base": null, + "refs": { + } + }, + "RecurringCharge": { + "base": "

    Describes a recurring charge.

    ", + "refs": { + "RecurringChargesList$member": null + } + }, + "RecurringChargeFrequency": { + "base": null, + "refs": { + "RecurringCharge$Frequency": "

    The frequency of the recurring charge.

    " + } + }, + "RecurringChargesList": { + "base": null, + "refs": { + "ReservedInstances$RecurringCharges": "

    The recurring charge tag assigned to the resource.

    ", + "ReservedInstancesOffering$RecurringCharges": "

    The recurring charge tag assigned to the resource.

    " + } + }, + "Region": { + "base": "

    Describes a region.

    ", + "refs": { + "RegionList$member": null + } + }, + "RegionList": { + "base": null, + "refs": { + "DescribeRegionsResult$Regions": "

    Information about one or more regions.

    " + } + }, + "RegionNameStringList": { + "base": null, + "refs": { + "DescribeRegionsRequest$RegionNames": "

    The names of one or more regions.

    " + } + }, + "RegisterImageRequest": { + "base": null, + "refs": { + } + }, + "RegisterImageResult": { + "base": null, + "refs": { + } + }, + "RejectVpcPeeringConnectionRequest": { + "base": null, + "refs": { + } + }, + "RejectVpcPeeringConnectionResult": { + "base": null, + "refs": { + } + }, + "ReleaseAddressRequest": { + "base": null, + "refs": { + } + }, + "ReplaceNetworkAclAssociationRequest": { + "base": null, + "refs": { + } + }, + "ReplaceNetworkAclAssociationResult": { + "base": null, + "refs": { + } + }, + "ReplaceNetworkAclEntryRequest": { + "base": null, + "refs": { + } + }, + "ReplaceRouteRequest": { + "base": null, + "refs": { + } + }, + "ReplaceRouteTableAssociationRequest": { + "base": null, + "refs": { + } + }, + "ReplaceRouteTableAssociationResult": { + "base": null, + "refs": { + } + }, + "ReportInstanceReasonCodes": { + "base": null, + "refs": { + "ReasonCodesList$member": null + } + }, + "ReportInstanceStatusRequest": { + "base": null, + "refs": { + } + }, + "ReportStatusType": { + "base": null, + "refs": { + "ReportInstanceStatusRequest$Status": "

    The status of all instances listed.

    " + } + }, + "RequestSpotFleetRequest": { + "base": "

    Contains the parameters for RequestSpotFleet.

    ", + "refs": { + } + }, + "RequestSpotFleetResponse": { + "base": "

    Contains the output of RequestSpotFleet.

    ", + "refs": { + } + }, + "RequestSpotInstancesRequest": { + "base": "

    Contains the parameters for RequestSpotInstances.

    ", + "refs": { + } + }, + "RequestSpotInstancesResult": { + "base": "

    Contains the output of RequestSpotInstances.

    ", + "refs": { + } + }, + "Reservation": { + "base": "

    Describes a reservation.

    ", + "refs": { + "ReservationList$member": null + } + }, + "ReservationList": { + "base": null, + "refs": { + "DescribeInstancesResult$Reservations": "

    One or more reservations.

    " + } + }, + "ReservedInstanceLimitPrice": { + "base": "

    Describes the limit price of a Reserved Instance offering.

    ", + "refs": { + "PurchaseReservedInstancesOfferingRequest$LimitPrice": "

    Specified for Reserved Instance Marketplace offerings to limit the total order and ensure that the Reserved Instances are not purchased at unexpected prices.

    " + } + }, + "ReservedInstanceState": { + "base": null, + "refs": { + "ReservedInstances$State": "

    The state of the Reserved Instance purchase.

    " + } + }, + "ReservedInstances": { + "base": "

    Describes a Reserved Instance.

    ", + "refs": { + "ReservedInstancesList$member": null + } + }, + "ReservedInstancesConfiguration": { + "base": "

    Describes the configuration settings for the modified Reserved Instances.

    ", + "refs": { + "ReservedInstancesConfigurationList$member": null, + "ReservedInstancesModificationResult$TargetConfiguration": "

    The target Reserved Instances configurations supplied as part of the modification request.

    " + } + }, + "ReservedInstancesConfigurationList": { + "base": null, + "refs": { + "ModifyReservedInstancesRequest$TargetConfigurations": "

    The configuration settings for the Reserved Instances to modify.

    " + } + }, + "ReservedInstancesId": { + "base": "

    Describes the ID of a Reserved Instance.

    ", + "refs": { + "ReservedIntancesIds$member": null + } + }, + "ReservedInstancesIdStringList": { + "base": null, + "refs": { + "DescribeReservedInstancesRequest$ReservedInstancesIds": "

    One or more Reserved Instance IDs.

    Default: Describes all your Reserved Instances, or only those otherwise specified.

    ", + "ModifyReservedInstancesRequest$ReservedInstancesIds": "

    The IDs of the Reserved Instances to modify.

    " + } + }, + "ReservedInstancesList": { + "base": null, + "refs": { + "DescribeReservedInstancesResult$ReservedInstances": "

    A list of Reserved Instances.

    " + } + }, + "ReservedInstancesListing": { + "base": "

    Describes a Reserved Instance listing.

    ", + "refs": { + "ReservedInstancesListingList$member": null + } + }, + "ReservedInstancesListingList": { + "base": null, + "refs": { + "CancelReservedInstancesListingResult$ReservedInstancesListings": "

    The Reserved Instance listing.

    ", + "CreateReservedInstancesListingResult$ReservedInstancesListings": "

    Information about the Reserved Instances listing.

    ", + "DescribeReservedInstancesListingsResult$ReservedInstancesListings": "

    Information about the Reserved Instance listing.

    " + } + }, + "ReservedInstancesModification": { + "base": "

    Describes a Reserved Instance modification.

    ", + "refs": { + "ReservedInstancesModificationList$member": null + } + }, + "ReservedInstancesModificationIdStringList": { + "base": null, + "refs": { + "DescribeReservedInstancesModificationsRequest$ReservedInstancesModificationIds": "

    IDs for the submitted modification request.

    " + } + }, + "ReservedInstancesModificationList": { + "base": null, + "refs": { + "DescribeReservedInstancesModificationsResult$ReservedInstancesModifications": "

    The Reserved Instance modification information.

    " + } + }, + "ReservedInstancesModificationResult": { + "base": null, + "refs": { + "ReservedInstancesModificationResultList$member": null + } + }, + "ReservedInstancesModificationResultList": { + "base": null, + "refs": { + "ReservedInstancesModification$ModificationResults": "

    Contains target configurations along with their corresponding new Reserved Instance IDs.

    " + } + }, + "ReservedInstancesOffering": { + "base": "

    Describes a Reserved Instance offering.

    ", + "refs": { + "ReservedInstancesOfferingList$member": null + } + }, + "ReservedInstancesOfferingIdStringList": { + "base": null, + "refs": { + "DescribeReservedInstancesOfferingsRequest$ReservedInstancesOfferingIds": "

    One or more Reserved Instances offering IDs.

    " + } + }, + "ReservedInstancesOfferingList": { + "base": null, + "refs": { + "DescribeReservedInstancesOfferingsResult$ReservedInstancesOfferings": "

    A list of Reserved Instances offerings.

    " + } + }, + "ReservedIntancesIds": { + "base": null, + "refs": { + "ReservedInstancesModification$ReservedInstancesIds": "

    The IDs of one or more Reserved Instances.

    " + } + }, + "ResetImageAttributeName": { + "base": null, + "refs": { + "ResetImageAttributeRequest$Attribute": "

    The attribute to reset (currently you can only reset the launch permission attribute).

    " + } + }, + "ResetImageAttributeRequest": { + "base": null, + "refs": { + } + }, + "ResetInstanceAttributeRequest": { + "base": null, + "refs": { + } + }, + "ResetNetworkInterfaceAttributeRequest": { + "base": null, + "refs": { + } + }, + "ResetSnapshotAttributeRequest": { + "base": null, + "refs": { + } + }, + "ResourceIdList": { + "base": null, + "refs": { + "CreateTagsRequest$Resources": "

    The IDs of one or more resources to tag. For example, ami-1a2b3c4d.

    ", + "DeleteTagsRequest$Resources": "

    The ID of the resource. For example, ami-1a2b3c4d. You can specify more than one resource ID.

    " + } + }, + "ResourceType": { + "base": null, + "refs": { + "TagDescription$ResourceType": "

    The resource type.

    " + } + }, + "RestorableByStringList": { + "base": null, + "refs": { + "DescribeSnapshotsRequest$RestorableByUserIds": "

    One or more AWS accounts IDs that can create volumes from the snapshot.

    " + } + }, + "RestoreAddressToClassicRequest": { + "base": null, + "refs": { + } + }, + "RestoreAddressToClassicResult": { + "base": null, + "refs": { + } + }, + "RevokeSecurityGroupEgressRequest": { + "base": null, + "refs": { + } + }, + "RevokeSecurityGroupIngressRequest": { + "base": null, + "refs": { + } + }, + "Route": { + "base": "

    Describes a route in a route table.

    ", + "refs": { + "RouteList$member": null + } + }, + "RouteList": { + "base": null, + "refs": { + "RouteTable$Routes": "

    The routes in the route table.

    " + } + }, + "RouteOrigin": { + "base": null, + "refs": { + "Route$Origin": "

    Describes how the route was created.

    • CreateRouteTable indicates that route was automatically created when the route table was created.
    • CreateRoute indicates that the route was manually added to the route table.
    • EnableVgwRoutePropagation indicates that the route was propagated by route propagation.
    " + } + }, + "RouteState": { + "base": null, + "refs": { + "Route$State": "

    The state of the route. The blackhole state indicates that the route's target isn't available (for example, the specified gateway isn't attached to the VPC, or the specified NAT instance has been terminated).

    " + } + }, + "RouteTable": { + "base": "

    Describes a route table.

    ", + "refs": { + "CreateRouteTableResult$RouteTable": "

    Information about the route table.

    ", + "RouteTableList$member": null + } + }, + "RouteTableAssociation": { + "base": "

    Describes an association between a route table and a subnet.

    ", + "refs": { + "RouteTableAssociationList$member": null + } + }, + "RouteTableAssociationList": { + "base": null, + "refs": { + "RouteTable$Associations": "

    The associations between the route table and one or more subnets.

    " + } + }, + "RouteTableList": { + "base": null, + "refs": { + "DescribeRouteTablesResult$RouteTables": "

    Information about one or more route tables.

    " + } + }, + "RuleAction": { + "base": null, + "refs": { + "CreateNetworkAclEntryRequest$RuleAction": "

    Indicates whether to allow or deny the traffic that matches the rule.

    ", + "NetworkAclEntry$RuleAction": "

    Indicates whether to allow or deny the traffic that matches the rule.

    ", + "ReplaceNetworkAclEntryRequest$RuleAction": "

    Indicates whether to allow or deny the traffic that matches the rule.

    " + } + }, + "RunInstancesMonitoringEnabled": { + "base": "

    Describes the monitoring for the instance.

    ", + "refs": { + "LaunchSpecification$Monitoring": null, + "RunInstancesRequest$Monitoring": "

    The monitoring for the instance.

    ", + "RequestSpotLaunchSpecification$Monitoring": null + } + }, + "RunInstancesRequest": { + "base": null, + "refs": { + } + }, + "S3Storage": { + "base": "

    Describes the storage parameters for S3 and S3 buckets for an instance store-backed AMI.

    ", + "refs": { + "Storage$S3": "

    An Amazon S3 storage location.

    " + } + }, + "SecurityGroup": { + "base": "

    Describes a security group

    ", + "refs": { + "SecurityGroupList$member": null + } + }, + "SecurityGroupIdStringList": { + "base": null, + "refs": { + "CreateNetworkInterfaceRequest$Groups": "

    The IDs of one or more security groups.

    ", + "ImportInstanceLaunchSpecification$GroupIds": "

    One or more security group IDs.

    ", + "InstanceNetworkInterfaceSpecification$Groups": "

    The IDs of the security groups for the network interface. Applies only if creating a network interface when launching an instance.

    ", + "ModifyNetworkInterfaceAttributeRequest$Groups": "

    Changes the security groups for the network interface. The new set of groups you specify replaces the current set. You must specify at least one group, even if it's just the default security group in the VPC. You must specify the ID of the security group, not the name.

    ", + "RunInstancesRequest$SecurityGroupIds": "

    One or more security group IDs. You can create a security group using CreateSecurityGroup.

    Default: Amazon EC2 uses the default security group.

    " + } + }, + "SecurityGroupList": { + "base": null, + "refs": { + "DescribeSecurityGroupsResult$SecurityGroups": "

    Information about one or more security groups.

    " + } + }, + "SecurityGroupStringList": { + "base": null, + "refs": { + "ImportInstanceLaunchSpecification$GroupNames": "

    One or more security group names.

    ", + "RunInstancesRequest$SecurityGroups": "

    [EC2-Classic, default VPC] One or more security group names. For a nondefault VPC, you must use security group IDs instead.

    Default: Amazon EC2 uses the default security group.

    " + } + }, + "ShutdownBehavior": { + "base": null, + "refs": { + "ImportInstanceLaunchSpecification$InstanceInitiatedShutdownBehavior": "

    Indicates whether an instance stops or terminates when you initiate shutdown from the instance (using the operating system command for system shutdown).

    ", + "RunInstancesRequest$InstanceInitiatedShutdownBehavior": "

    Indicates whether an instance stops or terminates when you initiate shutdown from the instance (using the operating system command for system shutdown).

    Default: stop

    " + } + }, + "Snapshot": { + "base": "

    Describes a snapshot.

    ", + "refs": { + "SnapshotList$member": null + } + }, + "SnapshotAttributeName": { + "base": null, + "refs": { + "DescribeSnapshotAttributeRequest$Attribute": "

    The snapshot attribute you would like to view.

    ", + "ModifySnapshotAttributeRequest$Attribute": "

    The snapshot attribute to modify.

    Only volume creation permissions may be modified at the customer level.

    ", + "ResetSnapshotAttributeRequest$Attribute": "

    The attribute to reset. Currently, only the attribute for permission to create volumes can be reset.

    " + } + }, + "SnapshotDetail": { + "base": "

    Describes the snapshot created from the imported disk.

    ", + "refs": { + "SnapshotDetailList$member": null + } + }, + "SnapshotDetailList": { + "base": null, + "refs": { + "ImportImageResult$SnapshotDetails": "

    Information about the snapshots.

    ", + "ImportImageTask$SnapshotDetails": "

    Information about the snapshots.

    " + } + }, + "SnapshotDiskContainer": { + "base": "

    The disk container object for the import snapshot request.

    ", + "refs": { + "ImportSnapshotRequest$DiskContainer": "

    Information about the disk container.

    " + } + }, + "SnapshotIdStringList": { + "base": null, + "refs": { + "DescribeSnapshotsRequest$SnapshotIds": "

    One or more snapshot IDs.

    Default: Describes snapshots for which you have launch permissions.

    " + } + }, + "SnapshotList": { + "base": null, + "refs": { + "DescribeSnapshotsResult$Snapshots": "

    Information about the snapshots.

    " + } + }, + "SnapshotState": { + "base": null, + "refs": { + "Snapshot$State": "

    The snapshot state.

    " + } + }, + "SnapshotTaskDetail": { + "base": "

    Details about the import snapshot task.

    ", + "refs": { + "ImportSnapshotResult$SnapshotTaskDetail": "

    Information about the import snapshot task.

    ", + "ImportSnapshotTask$SnapshotTaskDetail": "

    Describes an import snapshot task.

    " + } + }, + "SpotDatafeedSubscription": { + "base": "

    Describes the data feed for a Spot instance.

    ", + "refs": { + "CreateSpotDatafeedSubscriptionResult$SpotDatafeedSubscription": "

    The Spot instance data feed subscription.

    ", + "DescribeSpotDatafeedSubscriptionResult$SpotDatafeedSubscription": "

    The Spot instance data feed subscription.

    " + } + }, + "SpotFleetLaunchSpecification": { + "base": "

    Describes the launch specification for one or more Spot instances.

    ", + "refs": { + "LaunchSpecsList$member": null + } + }, + "SpotFleetMonitoring": { + "base": "

    Describes whether monitoring is enabled.

    ", + "refs": { + "SpotFleetLaunchSpecification$Monitoring": "

    Enable or disable monitoring for the instances.

    " + } + }, + "SpotFleetRequestConfig": { + "base": "

    Describes a Spot fleet request.

    ", + "refs": { + "SpotFleetRequestConfigSet$member": null + } + }, + "SpotFleetRequestConfigData": { + "base": "

    Describes the configuration of a Spot fleet request.

    ", + "refs": { + "RequestSpotFleetRequest$SpotFleetRequestConfig": "

    The configuration for the Spot fleet request.

    ", + "SpotFleetRequestConfig$SpotFleetRequestConfig": "

    Information about the configuration of the Spot fleet request.

    " + } + }, + "SpotFleetRequestConfigSet": { + "base": null, + "refs": { + "DescribeSpotFleetRequestsResponse$SpotFleetRequestConfigs": "

    Information about the configuration of your Spot fleet.

    " + } + }, + "SpotInstanceRequest": { + "base": "

    Describe a Spot instance request.

    ", + "refs": { + "SpotInstanceRequestList$member": null + } + }, + "SpotInstanceRequestIdList": { + "base": null, + "refs": { + "CancelSpotInstanceRequestsRequest$SpotInstanceRequestIds": "

    One or more Spot instance request IDs.

    ", + "DescribeSpotInstanceRequestsRequest$SpotInstanceRequestIds": "

    One or more Spot instance request IDs.

    " + } + }, + "SpotInstanceRequestList": { + "base": null, + "refs": { + "DescribeSpotInstanceRequestsResult$SpotInstanceRequests": "

    One or more Spot instance requests.

    ", + "RequestSpotInstancesResult$SpotInstanceRequests": "

    One or more Spot instance requests.

    " + } + }, + "SpotInstanceState": { + "base": null, + "refs": { + "SpotInstanceRequest$State": "

    The state of the Spot instance request. Spot bid status information can help you track your Spot instance requests. For more information, see Spot Bid Status in the Amazon Elastic Compute Cloud User Guide.

    " + } + }, + "SpotInstanceStateFault": { + "base": "

    Describes a Spot instance state change.

    ", + "refs": { + "SpotDatafeedSubscription$Fault": "

    The fault codes for the Spot instance request, if any.

    ", + "SpotInstanceRequest$Fault": "

    The fault codes for the Spot instance request, if any.

    " + } + }, + "SpotInstanceStatus": { + "base": "

    Describes the status of a Spot instance request.

    ", + "refs": { + "SpotInstanceRequest$Status": "

    The status code and status message describing the Spot instance request.

    " + } + }, + "SpotInstanceType": { + "base": null, + "refs": { + "RequestSpotInstancesRequest$Type": "

    The Spot instance request type.

    Default: one-time

    ", + "SpotInstanceRequest$Type": "

    The Spot instance request type.

    " + } + }, + "SpotPlacement": { + "base": "

    Describes Spot instance placement.

    ", + "refs": { + "LaunchSpecification$Placement": "

    The placement information for the instance.

    ", + "SpotFleetLaunchSpecification$Placement": "

    The placement information.

    ", + "RequestSpotLaunchSpecification$Placement": "

    The placement information for the instance.

    " + } + }, + "SpotPrice": { + "base": "

    Describes the maximum hourly price (bid) for any Spot instance launched to fulfill the request.

    ", + "refs": { + "SpotPriceHistoryList$member": null + } + }, + "SpotPriceHistoryList": { + "base": null, + "refs": { + "DescribeSpotPriceHistoryResult$SpotPriceHistory": "

    The historical Spot prices.

    " + } + }, + "StartInstancesRequest": { + "base": null, + "refs": { + } + }, + "StartInstancesResult": { + "base": null, + "refs": { + } + }, + "State": { + "base": null, + "refs": { + "VpcEndpoint$State": "

    The state of the VPC endpoint.

    " + } + }, + "StateReason": { + "base": "

    Describes a state change.

    ", + "refs": { + "Image$StateReason": "

    The reason for the state change.

    ", + "Instance$StateReason": "

    The reason for the most recent state transition.

    " + } + }, + "Status": { + "base": null, + "refs": { + "MoveAddressToVpcResult$Status": "

    The status of the move of the IP address.

    ", + "RestoreAddressToClassicResult$Status": "

    The move status for the IP address.

    " + } + }, + "StatusName": { + "base": null, + "refs": { + "InstanceStatusDetails$Name": "

    The type of instance status.

    " + } + }, + "StatusType": { + "base": null, + "refs": { + "InstanceStatusDetails$Status": "

    The status.

    " + } + }, + "StopInstancesRequest": { + "base": null, + "refs": { + } + }, + "StopInstancesResult": { + "base": null, + "refs": { + } + }, + "Storage": { + "base": "

    Describes the storage location for an instance store-backed AMI.

    ", + "refs": { + "BundleInstanceRequest$Storage": "

    The bucket in which to store the AMI. You can specify a bucket that you already own or a new bucket that Amazon EC2 creates on your behalf. If you specify a bucket that belongs to someone else, Amazon EC2 returns an error.

    ", + "BundleTask$Storage": "

    The Amazon S3 storage locations.

    " + } + }, + "String": { + "base": null, + "refs": { + "AcceptVpcPeeringConnectionRequest$VpcPeeringConnectionId": "

    The ID of the VPC peering connection.

    ", + "AccountAttribute$AttributeName": "

    The name of the account attribute.

    ", + "AccountAttributeValue$AttributeValue": "

    The value of the attribute.

    ", + "ActiveInstance$InstanceType": "

    The instance type.

    ", + "ActiveInstance$InstanceId": "

    The ID of the instance.

    ", + "ActiveInstance$SpotInstanceRequestId": "

    The ID of the Spot instance request.

    ", + "Address$InstanceId": "

    The ID of the instance that the address is associated with (if any).

    ", + "Address$PublicIp": "

    The Elastic IP address.

    ", + "Address$AllocationId": "

    The ID representing the allocation of the address for use with EC2-VPC.

    ", + "Address$AssociationId": "

    The ID representing the association of the address with an instance in a VPC.

    ", + "Address$NetworkInterfaceId": "

    The ID of the network interface.

    ", + "Address$NetworkInterfaceOwnerId": "

    The ID of the AWS account that owns the network interface.

    ", + "Address$PrivateIpAddress": "

    The private IP address associated with the Elastic IP address.

    ", + "AllocateAddressResult$PublicIp": "

    The Elastic IP address.

    ", + "AllocateAddressResult$AllocationId": "

    [EC2-VPC] The ID that AWS assigns to represent the allocation of the Elastic IP address for use with instances in a VPC.

    ", + "AllocationIdList$member": null, + "AssignPrivateIpAddressesRequest$NetworkInterfaceId": "

    The ID of the network interface.

    ", + "AssociateAddressRequest$InstanceId": "

    The ID of the instance. This is required for EC2-Classic. For EC2-VPC, you can specify either the instance ID or the network interface ID, but not both. The operation fails if you specify an instance ID unless exactly one network interface is attached.

    ", + "AssociateAddressRequest$PublicIp": "

    The Elastic IP address. This is required for EC2-Classic.

    ", + "AssociateAddressRequest$AllocationId": "

    [EC2-VPC] The allocation ID. This is required for EC2-VPC.

    ", + "AssociateAddressRequest$NetworkInterfaceId": "

    [EC2-VPC] The ID of the network interface. If the instance has more than one network interface, you must specify a network interface ID.

    ", + "AssociateAddressRequest$PrivateIpAddress": "

    [EC2-VPC] The primary or secondary private IP address to associate with the Elastic IP address. If no private IP address is specified, the Elastic IP address is associated with the primary private IP address.

    ", + "AssociateAddressResult$AssociationId": "

    [EC2-VPC] The ID that represents the association of the Elastic IP address with an instance.

    ", + "AssociateDhcpOptionsRequest$DhcpOptionsId": "

    The ID of the DHCP options set, or default to associate no DHCP options with the VPC.

    ", + "AssociateDhcpOptionsRequest$VpcId": "

    The ID of the VPC.

    ", + "AssociateRouteTableRequest$SubnetId": "

    The ID of the subnet.

    ", + "AssociateRouteTableRequest$RouteTableId": "

    The ID of the route table.

    ", + "AssociateRouteTableResult$AssociationId": "

    The route table association ID (needed to disassociate the route table).

    ", + "AttachClassicLinkVpcRequest$InstanceId": "

    The ID of an EC2-Classic instance to link to the ClassicLink-enabled VPC.

    ", + "AttachClassicLinkVpcRequest$VpcId": "

    The ID of a ClassicLink-enabled VPC.

    ", + "AttachInternetGatewayRequest$InternetGatewayId": "

    The ID of the Internet gateway.

    ", + "AttachInternetGatewayRequest$VpcId": "

    The ID of the VPC.

    ", + "AttachNetworkInterfaceRequest$NetworkInterfaceId": "

    The ID of the network interface.

    ", + "AttachNetworkInterfaceRequest$InstanceId": "

    The ID of the instance.

    ", + "AttachNetworkInterfaceResult$AttachmentId": "

    The ID of the network interface attachment.

    ", + "AttachVolumeRequest$VolumeId": "

    The ID of the EBS volume. The volume and instance must be within the same Availability Zone.

    ", + "AttachVolumeRequest$InstanceId": "

    The ID of the instance.

    ", + "AttachVolumeRequest$Device": "

    The device name to expose to the instance (for example, /dev/sdh or xvdh).

    ", + "AttachVpnGatewayRequest$VpnGatewayId": "

    The ID of the virtual private gateway.

    ", + "AttachVpnGatewayRequest$VpcId": "

    The ID of the VPC.

    ", + "AttributeValue$Value": "

    Valid values are case-sensitive and vary by action.

    ", + "AuthorizeSecurityGroupEgressRequest$GroupId": "

    The ID of the security group.

    ", + "AuthorizeSecurityGroupEgressRequest$SourceSecurityGroupName": "

    The name of a destination security group. To authorize outbound access to a destination security group, we recommend that you use a set of IP permissions instead.

    ", + "AuthorizeSecurityGroupEgressRequest$SourceSecurityGroupOwnerId": "

    The AWS account number for a destination security group. To authorize outbound access to a destination security group, we recommend that you use a set of IP permissions instead.

    ", + "AuthorizeSecurityGroupEgressRequest$IpProtocol": "

    The IP protocol name (tcp, udp, icmp) or number (see Protocol Numbers). Use -1 to specify all.

    ", + "AuthorizeSecurityGroupEgressRequest$CidrIp": "

    The CIDR IP address range. You can't specify this parameter when specifying a source security group.

    ", + "AuthorizeSecurityGroupIngressRequest$GroupName": "

    [EC2-Classic, default VPC] The name of the security group.

    ", + "AuthorizeSecurityGroupIngressRequest$GroupId": "

    The ID of the security group. Required for a nondefault VPC.

    ", + "AuthorizeSecurityGroupIngressRequest$SourceSecurityGroupName": "

    [EC2-Classic, default VPC] The name of the source security group. You can't specify this parameter in combination with the following parameters: the CIDR IP address range, the start of the port range, the IP protocol, and the end of the port range. For EC2-VPC, the source security group must be in the same VPC.

    ", + "AuthorizeSecurityGroupIngressRequest$SourceSecurityGroupOwnerId": "

    [EC2-Classic, default VPC] The AWS account number for the source security group. For EC2-VPC, the source security group must be in the same VPC. You can't specify this parameter in combination with the following parameters: the CIDR IP address range, the IP protocol, the start of the port range, and the end of the port range. Creates rules that grant full ICMP, UDP, and TCP access. To create a rule with a specific IP protocol and port range, use a set of IP permissions instead.

    ", + "AuthorizeSecurityGroupIngressRequest$IpProtocol": "

    The IP protocol name (tcp, udp, icmp) or number (see Protocol Numbers). (VPC only) Use -1 to specify all.

    ", + "AuthorizeSecurityGroupIngressRequest$CidrIp": "

    The CIDR IP address range. You can't specify this parameter when specifying a source security group.

    ", + "AvailabilityZone$ZoneName": "

    The name of the Availability Zone.

    ", + "AvailabilityZone$RegionName": "

    The name of the region.

    ", + "AvailabilityZoneMessage$Message": "

    The message about the Availability Zone.

    ", + "BlockDeviceMapping$VirtualName": "

    The virtual device name (ephemeralN). Instance store volumes are numbered starting from 0. An instance type with 2 available instance store volumes can specify mappings for ephemeral0 and ephemeral1.The number of available instance store volumes depends on the instance type. After you connect to the instance, you must mount the volume.

    Constraints: For M3 instances, you must specify instance store volumes in the block device mapping for the instance. When you launch an M3 instance, we ignore any instance store volumes specified in the block device mapping for the AMI.

    ", + "BlockDeviceMapping$DeviceName": "

    The device name exposed to the instance (for example, /dev/sdh or xvdh).

    ", + "BlockDeviceMapping$NoDevice": "

    Suppresses the specified device included in the block device mapping of the AMI.

    ", + "BundleIdStringList$member": null, + "BundleInstanceRequest$InstanceId": "

    The ID of the instance to bundle.

    Type: String

    Default: None

    Required: Yes

    ", + "BundleTask$InstanceId": "

    The ID of the instance associated with this bundle task.

    ", + "BundleTask$BundleId": "

    The ID of the bundle task.

    ", + "BundleTask$Progress": "

    The level of task completion, as a percent (for example, 20%).

    ", + "BundleTaskError$Code": "

    The error code.

    ", + "BundleTaskError$Message": "

    The error message.

    ", + "CancelBundleTaskRequest$BundleId": "

    The ID of the bundle task.

    ", + "CancelConversionRequest$ConversionTaskId": "

    The ID of the conversion task.

    ", + "CancelConversionRequest$ReasonMessage": "

    The reason for canceling the conversion task.

    ", + "CancelExportTaskRequest$ExportTaskId": "

    The ID of the export task. This is the ID returned by CreateInstanceExportTask.

    ", + "CancelImportTaskRequest$ImportTaskId": "

    The ID of the import image or import snapshot task to be canceled.

    ", + "CancelImportTaskRequest$CancelReason": "

    The reason for canceling the task.

    ", + "CancelImportTaskResult$ImportTaskId": "

    The ID of the task being canceled.

    ", + "CancelImportTaskResult$State": "

    The current state of the task being canceled.

    ", + "CancelImportTaskResult$PreviousState": "

    The current state of the task being canceled.

    ", + "CancelReservedInstancesListingRequest$ReservedInstancesListingId": "

    The ID of the Reserved Instance listing.

    ", + "CancelSpotFleetRequestsError$Message": "

    The description for the error code.

    ", + "CancelSpotFleetRequestsErrorItem$SpotFleetRequestId": "

    The ID of the Spot fleet request.

    ", + "CancelSpotFleetRequestsSuccessItem$SpotFleetRequestId": "

    The ID of the Spot fleet request.

    ", + "CancelledSpotInstanceRequest$SpotInstanceRequestId": "

    The ID of the Spot instance request.

    ", + "ClassicLinkInstance$InstanceId": "

    The ID of the instance.

    ", + "ClassicLinkInstance$VpcId": "

    The ID of the VPC.

    ", + "ClientData$Comment": "

    A user-defined comment about the disk upload.

    ", + "ConfirmProductInstanceRequest$ProductCode": "

    The product code. This must be a product code that you own.

    ", + "ConfirmProductInstanceRequest$InstanceId": "

    The ID of the instance.

    ", + "ConfirmProductInstanceResult$OwnerId": "

    The AWS account ID of the instance owner. This is only present if the product code is attached to the instance.

    ", + "ConversionIdStringList$member": null, + "ConversionTask$ConversionTaskId": "

    The ID of the conversion task.

    ", + "ConversionTask$ExpirationTime": "

    The time when the task expires. If the upload isn't complete before the expiration time, we automatically cancel the task.

    ", + "ConversionTask$StatusMessage": "

    The status message related to the conversion task.

    ", + "CopyImageRequest$SourceRegion": "

    The name of the region that contains the AMI to copy.

    ", + "CopyImageRequest$SourceImageId": "

    The ID of the AMI to copy.

    ", + "CopyImageRequest$Name": "

    The name of the new AMI in the destination region.

    ", + "CopyImageRequest$Description": "

    A description for the new AMI in the destination region.

    ", + "CopyImageRequest$ClientToken": "

    Unique, case-sensitive identifier you provide to ensure idempotency of the request. For more information, see How to Ensure Idempotency in the Amazon Elastic Compute Cloud User Guide.

    ", + "CopyImageResult$ImageId": "

    The ID of the new AMI.

    ", + "CopySnapshotRequest$SourceRegion": "

    The ID of the region that contains the snapshot to be copied.

    ", + "CopySnapshotRequest$SourceSnapshotId": "

    The ID of the EBS snapshot to copy.

    ", + "CopySnapshotRequest$Description": "

    A description for the EBS snapshot.

    ", + "CopySnapshotRequest$DestinationRegion": "

    The destination region to use in the PresignedUrl parameter of a snapshot copy operation. This parameter is only valid for specifying the destination region in a PresignedUrl parameter, where it is required.

    CopySnapshot sends the snapshot copy to the regional endpoint that you send the HTTP request to, such as ec2.us-east-1.amazonaws.com (in the AWS CLI, this is specified with the --region parameter or the default region in your AWS configuration file).

    ", + "CopySnapshotRequest$PresignedUrl": "

    The pre-signed URL that facilitates copying an encrypted snapshot. This parameter is only required when copying an encrypted snapshot with the Amazon EC2 Query API; it is available as an optional parameter in all other cases. The PresignedUrl should use the snapshot source endpoint, the CopySnapshot action, and include the SourceRegion, SourceSnapshotId, and DestinationRegion parameters. The PresignedUrl must be signed using AWS Signature Version 4. Because EBS snapshots are stored in Amazon S3, the signing algorithm for this parameter uses the same logic that is described in Authenticating Requests by Using Query Parameters (AWS Signature Version 4) in the Amazon Simple Storage Service API Reference. An invalid or improperly signed PresignedUrl will cause the copy operation to fail asynchronously, and the snapshot will move to an error state.

    ", + "CopySnapshotRequest$KmsKeyId": "

    The full ARN of the AWS Key Management Service (AWS KMS) CMK to use when creating the snapshot copy. This parameter is only required if you want to use a non-default CMK; if this parameter is not specified, the default CMK for EBS is used. The ARN contains the arn:aws:kms namespace, followed by the region of the CMK, the AWS account ID of the CMK owner, the key namespace, and then the CMK ID. For example, arn:aws:kms:us-east-1:012345678910:key/abcd1234-a123-456a-a12b-a123b4cd56ef. The specified CMK must exist in the region that the snapshot is being copied to. If a KmsKeyId is specified, the Encrypted flag must also be set.

    ", + "CopySnapshotResult$SnapshotId": "

    The ID of the new snapshot.

    ", + "CreateCustomerGatewayRequest$PublicIp": "

    The Internet-routable IP address for the customer gateway's outside interface. The address must be static.

    ", + "CreateFlowLogsRequest$LogGroupName": "

    The name of the CloudWatch log group.

    ", + "CreateFlowLogsRequest$DeliverLogsPermissionArn": "

    The ARN for the IAM role that's used to post flow logs to a CloudWatch Logs log group.

    ", + "CreateFlowLogsRequest$ClientToken": "

    Unique, case-sensitive identifier you provide to ensure the idempotency of the request. For more information, see How to Ensure Idempotency.

    ", + "CreateFlowLogsResult$ClientToken": "

    Unique, case-sensitive identifier you provide to ensure the idempotency of the request.

    ", + "CreateImageRequest$InstanceId": "

    The ID of the instance.

    ", + "CreateImageRequest$Name": "

    A name for the new image.

    Constraints: 3-128 alphanumeric characters, parentheses (()), square brackets ([]), spaces ( ), periods (.), slashes (/), dashes (-), single quotes ('), at-signs (@), or underscores(_)

    ", + "CreateImageRequest$Description": "

    A description for the new image.

    ", + "CreateImageResult$ImageId": "

    The ID of the new AMI.

    ", + "CreateInstanceExportTaskRequest$Description": "

    A description for the conversion task or the resource being exported. The maximum length is 255 bytes.

    ", + "CreateInstanceExportTaskRequest$InstanceId": "

    The ID of the instance.

    ", + "CreateKeyPairRequest$KeyName": "

    A unique name for the key pair.

    Constraints: Up to 255 ASCII characters

    ", + "CreateNetworkAclEntryRequest$NetworkAclId": "

    The ID of the network ACL.

    ", + "CreateNetworkAclEntryRequest$Protocol": "

    The protocol. A value of -1 means all protocols.

    ", + "CreateNetworkAclEntryRequest$CidrBlock": "

    The network range to allow or deny, in CIDR notation (for example 172.16.0.0/24).

    ", + "CreateNetworkAclRequest$VpcId": "

    The ID of the VPC.

    ", + "CreateNetworkInterfaceRequest$SubnetId": "

    The ID of the subnet to associate with the network interface.

    ", + "CreateNetworkInterfaceRequest$Description": "

    A description for the network interface.

    ", + "CreateNetworkInterfaceRequest$PrivateIpAddress": "

    The primary private IP address of the network interface. If you don't specify an IP address, Amazon EC2 selects one for you from the subnet range. If you specify an IP address, you cannot indicate any IP addresses specified in privateIpAddresses as primary (only one IP address can be designated as primary).

    ", + "CreatePlacementGroupRequest$GroupName": "

    A name for the placement group.

    Constraints: Up to 255 ASCII characters

    ", + "CreateReservedInstancesListingRequest$ReservedInstancesId": "

    The ID of the active Reserved Instance.

    ", + "CreateReservedInstancesListingRequest$ClientToken": "

    Unique, case-sensitive identifier you provide to ensure idempotency of your listings. This helps avoid duplicate listings. For more information, see Ensuring Idempotency.

    ", + "CreateRouteRequest$RouteTableId": "

    The ID of the route table for the route.

    ", + "CreateRouteRequest$DestinationCidrBlock": "

    The CIDR address block used for the destination match. Routing decisions are based on the most specific match.

    ", + "CreateRouteRequest$GatewayId": "

    The ID of an Internet gateway or virtual private gateway attached to your VPC.

    ", + "CreateRouteRequest$InstanceId": "

    The ID of a NAT instance in your VPC. The operation fails if you specify an instance ID unless exactly one network interface is attached.

    ", + "CreateRouteRequest$NetworkInterfaceId": "

    The ID of a network interface.

    ", + "CreateRouteRequest$VpcPeeringConnectionId": "

    The ID of a VPC peering connection.

    ", + "CreateRouteTableRequest$VpcId": "

    The ID of the VPC.

    ", + "CreateSecurityGroupRequest$GroupName": "

    The name of the security group.

    Constraints: Up to 255 characters in length

    Constraints for EC2-Classic: ASCII characters

    Constraints for EC2-VPC: a-z, A-Z, 0-9, spaces, and ._-:/()#,@[]+=&;{}!$*

    ", + "CreateSecurityGroupRequest$Description": "

    A description for the security group. This is informational only.

    Constraints: Up to 255 characters in length

    Constraints for EC2-Classic: ASCII characters

    Constraints for EC2-VPC: a-z, A-Z, 0-9, spaces, and ._-:/()#,@[]+=&;{}!$*

    ", + "CreateSecurityGroupRequest$VpcId": "

    [EC2-VPC] The ID of the VPC. Required for EC2-VPC.

    ", + "CreateSecurityGroupResult$GroupId": "

    The ID of the security group.

    ", + "CreateSnapshotRequest$VolumeId": "

    The ID of the EBS volume.

    ", + "CreateSnapshotRequest$Description": "

    A description for the snapshot.

    ", + "CreateSpotDatafeedSubscriptionRequest$Bucket": "

    The Amazon S3 bucket in which to store the Spot instance data feed.

    ", + "CreateSpotDatafeedSubscriptionRequest$Prefix": "

    A prefix for the data feed file names.

    ", + "CreateSubnetRequest$VpcId": "

    The ID of the VPC.

    ", + "CreateSubnetRequest$CidrBlock": "

    The network range for the subnet, in CIDR notation. For example, 10.0.0.0/24.

    ", + "CreateSubnetRequest$AvailabilityZone": "

    The Availability Zone for the subnet.

    Default: Amazon EC2 selects one for you (recommended).

    ", + "CreateVolumePermission$UserId": "

    The specific AWS account ID that is to be added or removed from a volume's list of create volume permissions.

    ", + "CreateVolumeRequest$SnapshotId": "

    The snapshot from which to create the volume.

    ", + "CreateVolumeRequest$AvailabilityZone": "

    The Availability Zone in which to create the volume. Use DescribeAvailabilityZones to list the Availability Zones that are currently available to you.

    ", + "CreateVolumeRequest$KmsKeyId": "

    The full ARN of the AWS Key Management Service (AWS KMS) customer master key (CMK) to use when creating the encrypted volume. This parameter is only required if you want to use a non-default CMK; if this parameter is not specified, the default CMK for EBS is used. The ARN contains the arn:aws:kms namespace, followed by the region of the CMK, the AWS account ID of the CMK owner, the key namespace, and then the CMK ID. For example, arn:aws:kms:us-east-1:012345678910:key/abcd1234-a123-456a-a12b-a123b4cd56ef. If a KmsKeyId is specified, the Encrypted flag must also be set.

    ", + "CreateVpcEndpointRequest$VpcId": "

    The ID of the VPC in which the endpoint will be used.

    ", + "CreateVpcEndpointRequest$ServiceName": "

    The AWS service name, in the form com.amazonaws.region.service. To get a list of available services, use the DescribeVpcEndpointServices request.

    ", + "CreateVpcEndpointRequest$PolicyDocument": "

    A policy to attach to the endpoint that controls access to the service. The policy must be in valid JSON format. If this parameter is not specified, we attach a default policy that allows full access to the service.

    ", + "CreateVpcEndpointRequest$ClientToken": "

    Unique, case-sensitive identifier you provide to ensure the idempotency of the request. For more information, see How to Ensure Idempotency.

    ", + "CreateVpcEndpointResult$ClientToken": "

    Unique, case-sensitive identifier you provide to ensure the idempotency of the request.

    ", + "CreateVpcPeeringConnectionRequest$VpcId": "

    The ID of the requester VPC.

    ", + "CreateVpcPeeringConnectionRequest$PeerVpcId": "

    The ID of the VPC with which you are creating the VPC peering connection.

    ", + "CreateVpcPeeringConnectionRequest$PeerOwnerId": "

    The AWS account ID of the owner of the peer VPC.

    Default: Your AWS account ID

    ", + "CreateVpcRequest$CidrBlock": "

    The network range for the VPC, in CIDR notation. For example, 10.0.0.0/16.

    ", + "CreateVpnConnectionRequest$Type": "

    The type of VPN connection (ipsec.1).

    ", + "CreateVpnConnectionRequest$CustomerGatewayId": "

    The ID of the customer gateway.

    ", + "CreateVpnConnectionRequest$VpnGatewayId": "

    The ID of the virtual private gateway.

    ", + "CreateVpnConnectionRouteRequest$VpnConnectionId": "

    The ID of the VPN connection.

    ", + "CreateVpnConnectionRouteRequest$DestinationCidrBlock": "

    The CIDR block associated with the local subnet of the customer network.

    ", + "CreateVpnGatewayRequest$AvailabilityZone": "

    The Availability Zone for the virtual private gateway.

    ", + "CustomerGateway$CustomerGatewayId": "

    The ID of the customer gateway.

    ", + "CustomerGateway$State": "

    The current state of the customer gateway (pending | available | deleting | deleted).

    ", + "CustomerGateway$Type": "

    The type of VPN connection the customer gateway supports (ipsec.1).

    ", + "CustomerGateway$IpAddress": "

    The Internet-routable IP address of the customer gateway's outside interface.

    ", + "CustomerGateway$BgpAsn": "

    The customer gateway's Border Gateway Protocol (BGP) Autonomous System Number (ASN).

    ", + "CustomerGatewayIdStringList$member": null, + "DeleteCustomerGatewayRequest$CustomerGatewayId": "

    The ID of the customer gateway.

    ", + "DeleteDhcpOptionsRequest$DhcpOptionsId": "

    The ID of the DHCP options set.

    ", + "DeleteInternetGatewayRequest$InternetGatewayId": "

    The ID of the Internet gateway.

    ", + "DeleteKeyPairRequest$KeyName": "

    The name of the key pair.

    ", + "DeleteNetworkAclEntryRequest$NetworkAclId": "

    The ID of the network ACL.

    ", + "DeleteNetworkAclRequest$NetworkAclId": "

    The ID of the network ACL.

    ", + "DeleteNetworkInterfaceRequest$NetworkInterfaceId": "

    The ID of the network interface.

    ", + "DeletePlacementGroupRequest$GroupName": "

    The name of the placement group.

    ", + "DeleteRouteRequest$RouteTableId": "

    The ID of the route table.

    ", + "DeleteRouteRequest$DestinationCidrBlock": "

    The CIDR range for the route. The value you specify must match the CIDR for the route exactly.

    ", + "DeleteRouteTableRequest$RouteTableId": "

    The ID of the route table.

    ", + "DeleteSecurityGroupRequest$GroupName": "

    [EC2-Classic, default VPC] The name of the security group. You can specify either the security group name or the security group ID.

    ", + "DeleteSecurityGroupRequest$GroupId": "

    The ID of the security group. Required for a nondefault VPC.

    ", + "DeleteSnapshotRequest$SnapshotId": "

    The ID of the EBS snapshot.

    ", + "DeleteSubnetRequest$SubnetId": "

    The ID of the subnet.

    ", + "DeleteVolumeRequest$VolumeId": "

    The ID of the volume.

    ", + "DeleteVpcPeeringConnectionRequest$VpcPeeringConnectionId": "

    The ID of the VPC peering connection.

    ", + "DeleteVpcRequest$VpcId": "

    The ID of the VPC.

    ", + "DeleteVpnConnectionRequest$VpnConnectionId": "

    The ID of the VPN connection.

    ", + "DeleteVpnConnectionRouteRequest$VpnConnectionId": "

    The ID of the VPN connection.

    ", + "DeleteVpnConnectionRouteRequest$DestinationCidrBlock": "

    The CIDR block associated with the local subnet of the customer network.

    ", + "DeleteVpnGatewayRequest$VpnGatewayId": "

    The ID of the virtual private gateway.

    ", + "DeregisterImageRequest$ImageId": "

    The ID of the AMI.

    ", + "DescribeClassicLinkInstancesRequest$NextToken": "

    The token to retrieve the next page of results.

    ", + "DescribeClassicLinkInstancesResult$NextToken": "

    The token to use to retrieve the next page of results. This value is null when there are no more results to return.

    ", + "DescribeFlowLogsRequest$NextToken": "

    The token to retrieve the next page of results.

    ", + "DescribeFlowLogsResult$NextToken": "

    The token to use to retrieve the next page of results. This value is null when there are no more results to return.

    ", + "DescribeImageAttributeRequest$ImageId": "

    The ID of the AMI.

    ", + "DescribeImportImageTasksRequest$NextToken": "

    A token that indicates the next page of results.

    ", + "DescribeImportImageTasksResult$NextToken": "

    The token to use to get the next page of results. This value is null when there are no more results to return.

    ", + "DescribeImportSnapshotTasksRequest$NextToken": "

    A token that indicates the next page of results.

    ", + "DescribeImportSnapshotTasksResult$NextToken": "

    The token to use to get the next page of results. This value is null when there are no more results to return.

    ", + "DescribeInstanceAttributeRequest$InstanceId": "

    The ID of the instance.

    ", + "DescribeInstanceStatusRequest$NextToken": "

    The token to retrieve the next page of results.

    ", + "DescribeInstanceStatusResult$NextToken": "

    The token to use to retrieve the next page of results. This value is null when there are no more results to return.

    ", + "DescribeInstancesRequest$NextToken": "

    The token to request the next page of results.

    ", + "DescribeInstancesResult$NextToken": "

    The token to use to retrieve the next page of results. This value is null when there are no more results to return.

    ", + "DescribeMovingAddressesRequest$NextToken": "

    The token to use to retrieve the next page of results.

    ", + "DescribeMovingAddressesResult$NextToken": "

    The token to use to retrieve the next page of results. This value is null when there are no more results to return.

    ", + "DescribeNetworkInterfaceAttributeRequest$NetworkInterfaceId": "

    The ID of the network interface.

    ", + "DescribeNetworkInterfaceAttributeResult$NetworkInterfaceId": "

    The ID of the network interface.

    ", + "DescribePrefixListsRequest$NextToken": "

    The token for the next set of items to return. (You received this token from a prior call.)

    ", + "DescribePrefixListsResult$NextToken": "

    The token to use when requesting the next set of items. If there are no additional items to return, the string is empty.

    ", + "DescribeReservedInstancesListingsRequest$ReservedInstancesId": "

    One or more Reserved Instance IDs.

    ", + "DescribeReservedInstancesListingsRequest$ReservedInstancesListingId": "

    One or more Reserved Instance Listing IDs.

    ", + "DescribeReservedInstancesModificationsRequest$NextToken": "

    The token to retrieve the next page of results.

    ", + "DescribeReservedInstancesModificationsResult$NextToken": "

    The token to use to retrieve the next page of results. This value is null when there are no more results to return.

    ", + "DescribeReservedInstancesOfferingsRequest$AvailabilityZone": "

    The Availability Zone in which the Reserved Instance can be used.

    ", + "DescribeReservedInstancesOfferingsRequest$NextToken": "

    The token to retrieve the next page of results.

    ", + "DescribeReservedInstancesOfferingsResult$NextToken": "

    The token to use to retrieve the next page of results. This value is null when there are no more results to return.

    ", + "DescribeSnapshotAttributeRequest$SnapshotId": "

    The ID of the EBS snapshot.

    ", + "DescribeSnapshotAttributeResult$SnapshotId": "

    The ID of the EBS snapshot.

    ", + "DescribeSnapshotsRequest$NextToken": "

    The NextToken value returned from a previous paginated DescribeSnapshots request where MaxResults was used and the results exceeded the value of that parameter. Pagination continues from the end of the previous results that returned the NextToken value. This value is null when there are no more results to return.

    ", + "DescribeSnapshotsResult$NextToken": "

    The NextToken value to include in a future DescribeSnapshots request. When the results of a DescribeSnapshots request exceed MaxResults, this value can be used to retrieve the next page of results. This value is null when there are no more results to return.

    ", + "DescribeSpotFleetInstancesRequest$SpotFleetRequestId": "

    The ID of the Spot fleet request.

    ", + "DescribeSpotFleetInstancesRequest$NextToken": "

    The token for the next set of results.

    ", + "DescribeSpotFleetInstancesResponse$SpotFleetRequestId": "

    The ID of the Spot fleet request.

    ", + "DescribeSpotFleetInstancesResponse$NextToken": "

    The token required to retrieve the next set of results. This value is null when there are no more results to return.

    ", + "DescribeSpotFleetRequestHistoryRequest$SpotFleetRequestId": "

    The ID of the Spot fleet request.

    ", + "DescribeSpotFleetRequestHistoryRequest$NextToken": "

    The token for the next set of results.

    ", + "DescribeSpotFleetRequestHistoryResponse$SpotFleetRequestId": "

    The ID of the Spot fleet request.

    ", + "DescribeSpotFleetRequestHistoryResponse$NextToken": "

    The token required to retrieve the next set of results. This value is null when there are no more results to return.

    ", + "DescribeSpotFleetRequestsRequest$NextToken": "

    The token for the next set of results.

    ", + "DescribeSpotFleetRequestsResponse$NextToken": "

    The token required to retrieve the next set of results. This value is null when there are no more results to return.

    ", + "DescribeSpotPriceHistoryRequest$AvailabilityZone": "

    Filters the results by the specified Availability Zone.

    ", + "DescribeSpotPriceHistoryRequest$NextToken": "

    The token for the next set of results.

    ", + "DescribeSpotPriceHistoryResult$NextToken": "

    The token required to retrieve the next set of results. This value is null when there are no more results to return.

    ", + "DescribeTagsRequest$NextToken": "

    The token to retrieve the next page of results.

    ", + "DescribeTagsResult$NextToken": "

    The token to use to retrieve the next page of results. This value is null when there are no more results to return..

    ", + "DescribeVolumeAttributeRequest$VolumeId": "

    The ID of the volume.

    ", + "DescribeVolumeAttributeResult$VolumeId": "

    The ID of the volume.

    ", + "DescribeVolumeStatusRequest$NextToken": "

    The NextToken value to include in a future DescribeVolumeStatus request. When the results of the request exceed MaxResults, this value can be used to retrieve the next page of results. This value is null when there are no more results to return.

    ", + "DescribeVolumeStatusResult$NextToken": "

    The token to use to retrieve the next page of results. This value is null when there are no more results to return.

    ", + "DescribeVolumesRequest$NextToken": "

    The NextToken value returned from a previous paginated DescribeVolumes request where MaxResults was used and the results exceeded the value of that parameter. Pagination continues from the end of the previous results that returned the NextToken value. This value is null when there are no more results to return.

    ", + "DescribeVolumesResult$NextToken": "

    The NextToken value to include in a future DescribeVolumes request. When the results of a DescribeVolumes request exceed MaxResults, this value can be used to retrieve the next page of results. This value is null when there are no more results to return.

    ", + "DescribeVpcAttributeRequest$VpcId": "

    The ID of the VPC.

    ", + "DescribeVpcAttributeResult$VpcId": "

    The ID of the VPC.

    ", + "DescribeVpcEndpointServicesRequest$NextToken": "

    The token for the next set of items to return. (You received this token from a prior call.)

    ", + "DescribeVpcEndpointServicesResult$NextToken": "

    The token to use when requesting the next set of items. If there are no additional items to return, the string is empty.

    ", + "DescribeVpcEndpointsRequest$NextToken": "

    The token for the next set of items to return. (You received this token from a prior call.)

    ", + "DescribeVpcEndpointsResult$NextToken": "

    The token to use when requesting the next set of items. If there are no additional items to return, the string is empty.

    ", + "DetachClassicLinkVpcRequest$InstanceId": "

    The ID of the instance to unlink from the VPC.

    ", + "DetachClassicLinkVpcRequest$VpcId": "

    The ID of the VPC to which the instance is linked.

    ", + "DetachInternetGatewayRequest$InternetGatewayId": "

    The ID of the Internet gateway.

    ", + "DetachInternetGatewayRequest$VpcId": "

    The ID of the VPC.

    ", + "DetachNetworkInterfaceRequest$AttachmentId": "

    The ID of the attachment.

    ", + "DetachVolumeRequest$VolumeId": "

    The ID of the volume.

    ", + "DetachVolumeRequest$InstanceId": "

    The ID of the instance.

    ", + "DetachVolumeRequest$Device": "

    The device name.

    ", + "DetachVpnGatewayRequest$VpnGatewayId": "

    The ID of the virtual private gateway.

    ", + "DetachVpnGatewayRequest$VpcId": "

    The ID of the VPC.

    ", + "DhcpConfiguration$Key": "

    The name of a DHCP option.

    ", + "DhcpOptions$DhcpOptionsId": "

    The ID of the set of DHCP options.

    ", + "DhcpOptionsIdStringList$member": null, + "DisableVgwRoutePropagationRequest$RouteTableId": "

    The ID of the route table.

    ", + "DisableVgwRoutePropagationRequest$GatewayId": "

    The ID of the virtual private gateway.

    ", + "DisableVpcClassicLinkRequest$VpcId": "

    The ID of the VPC.

    ", + "DisassociateAddressRequest$PublicIp": "

    [EC2-Classic] The Elastic IP address. Required for EC2-Classic.

    ", + "DisassociateAddressRequest$AssociationId": "

    [EC2-VPC] The association ID. Required for EC2-VPC.

    ", + "DisassociateRouteTableRequest$AssociationId": "

    The association ID representing the current association between the route table and subnet.

    ", + "DiskImage$Description": "

    A description of the disk image.

    ", + "DiskImageDescription$ImportManifestUrl": "

    A presigned URL for the import manifest stored in Amazon S3. For information about creating a presigned URL for an Amazon S3 object, read the \"Query String Request Authentication Alternative\" section of the Authenticating REST Requests topic in the Amazon Simple Storage Service Developer Guide.

    ", + "DiskImageDescription$Checksum": "

    The checksum computed for the disk image.

    ", + "DiskImageDetail$ImportManifestUrl": "

    A presigned URL for the import manifest stored in Amazon S3 and presented here as an Amazon S3 presigned URL. For information about creating a presigned URL for an Amazon S3 object, read the \"Query String Request Authentication Alternative\" section of the Authenticating REST Requests topic in the Amazon Simple Storage Service Developer Guide.

    ", + "DiskImageVolumeDescription$Id": "

    The volume identifier.

    ", + "EbsBlockDevice$SnapshotId": "

    The ID of the snapshot.

    ", + "EbsInstanceBlockDevice$VolumeId": "

    The ID of the EBS volume.

    ", + "EbsInstanceBlockDeviceSpecification$VolumeId": "

    The ID of the EBS volume.

    ", + "EnableVgwRoutePropagationRequest$RouteTableId": "

    The ID of the route table.

    ", + "EnableVgwRoutePropagationRequest$GatewayId": "

    The ID of the virtual private gateway.

    ", + "EnableVolumeIORequest$VolumeId": "

    The ID of the volume.

    ", + "EnableVpcClassicLinkRequest$VpcId": "

    The ID of the VPC.

    ", + "EventInformation$InstanceId": "

    The ID of the instance. This information is available only for instanceChange events.

    ", + "EventInformation$EventSubType": "

    The event.

    The following are the error events.

    • iamFleetRoleInvalid - Spot fleet did not have the required permissions either to launch or terminate an instance.

    • spotFleetRequestConfigurationInvalid - The configuration is not valid. For more information, see the description.

    • spotInstanceCountLimitExceeded - You've reached the limit on the number of Spot instances that you can launch.

    The following are the fleetRequestChange events.

    • active - The Spot fleet has been validated and Amazon EC2 is attempting to maintain the target number of running Spot instances.

    • cancelled - The Spot fleet is canceled and has no running Spot instances. The Spot fleet will be deleted two days after its instances were terminated.

    • cancelled_running - The Spot fleet is canceled and will not launch additional Spot instances, but its existing Spot instances continue to run until they are interrupted or terminated.

    • cancelled_terminating - The Spot fleet is canceled and its Spot instances are terminating.

    • expired - The Spot fleet request has expired. A subsequent event indicates that the instances were terminated, if the request was created with TerminateInstancesWithExpiration set.

    • price_update - The bid price for a launch configuration was adjusted because it was too high. This change is permanent.

    • submitted - The Spot fleet request is being evaluated and Amazon EC2 is preparing to launch the target number of Spot instances.

    The following are the instanceChange events.

    • launched - A bid was fulfilled and a new instance was launched.

    • terminated - An instance was terminated by the user.

    ", + "EventInformation$EventDescription": "

    The description of the event.

    ", + "ExecutableByStringList$member": null, + "ExportTask$ExportTaskId": "

    The ID of the export task.

    ", + "ExportTask$Description": "

    A description of the resource being exported.

    ", + "ExportTask$StatusMessage": "

    The status message related to the export task.

    ", + "ExportTaskIdStringList$member": null, + "ExportToS3Task$S3Bucket": "

    The S3 bucket for the destination image. The destination bucket must exist and grant WRITE and READ_ACP permissions to the AWS account vm-import-export@amazon.com.

    ", + "ExportToS3Task$S3Key": "

    The encryption key for your S3 bucket.

    ", + "ExportToS3TaskSpecification$S3Bucket": "

    The S3 bucket for the destination image. The destination bucket must exist and grant WRITE and READ_ACP permissions to the AWS account vm-import-export@amazon.com.

    ", + "ExportToS3TaskSpecification$S3Prefix": "

    The image is written to a single object in the S3 bucket at the S3 key s3prefix + exportTaskId + '.' + diskImageFormat.

    ", + "Filter$Name": "

    The name of the filter. Filter names are case-sensitive.

    ", + "FlowLog$FlowLogId": "

    The flow log ID.

    ", + "FlowLog$FlowLogStatus": "

    The status of the flow log (ACTIVE).

    ", + "FlowLog$ResourceId": "

    The ID of the resource on which the flow log was created.

    ", + "FlowLog$LogGroupName": "

    The name of the flow log group.

    ", + "FlowLog$DeliverLogsStatus": "

    The status of the logs delivery (SUCCESS | FAILED).

    ", + "FlowLog$DeliverLogsErrorMessage": "

    Information about the error that occurred. Rate limited indicates that CloudWatch logs throttling has been applied for one or more network interfaces. Access error indicates that the IAM role associated with the flow log does not have sufficient permissions to publish to CloudWatch Logs. Unknown error indicates an internal error.

    ", + "FlowLog$DeliverLogsPermissionArn": "

    The ARN of the IAM role that posts logs to CloudWatch Logs.

    ", + "GetConsoleOutputRequest$InstanceId": "

    The ID of the instance.

    ", + "GetConsoleOutputResult$InstanceId": "

    The ID of the instance.

    ", + "GetConsoleOutputResult$Output": "

    The console output, Base64 encoded.

    ", + "GetPasswordDataRequest$InstanceId": "

    The ID of the Windows instance.

    ", + "GetPasswordDataResult$InstanceId": "

    The ID of the Windows instance.

    ", + "GetPasswordDataResult$PasswordData": "

    The password of the instance.

    ", + "GroupIdStringList$member": null, + "GroupIdentifier$GroupName": "

    The name of the security group.

    ", + "GroupIdentifier$GroupId": "

    The ID of the security group.

    ", + "GroupNameStringList$member": null, + "IamInstanceProfile$Arn": "

    The Amazon Resource Name (ARN) of the instance profile.

    ", + "IamInstanceProfile$Id": "

    The ID of the instance profile.

    ", + "IamInstanceProfileSpecification$Arn": "

    The Amazon Resource Name (ARN) of the instance profile.

    ", + "IamInstanceProfileSpecification$Name": "

    The name of the instance profile.

    ", + "Image$ImageId": "

    The ID of the AMI.

    ", + "Image$ImageLocation": "

    The location of the AMI.

    ", + "Image$OwnerId": "

    The AWS account ID of the image owner.

    ", + "Image$CreationDate": "

    The date and time the image was created.

    ", + "Image$KernelId": "

    The kernel associated with the image, if any. Only applicable for machine images.

    ", + "Image$RamdiskId": "

    The RAM disk associated with the image, if any. Only applicable for machine images.

    ", + "Image$SriovNetSupport": "

    Specifies whether enhanced networking is enabled.

    ", + "Image$ImageOwnerAlias": "

    The AWS account alias (for example, amazon, self) or the AWS account ID of the AMI owner.

    ", + "Image$Name": "

    The name of the AMI that was provided during image creation.

    ", + "Image$Description": "

    The description of the AMI that was provided during image creation.

    ", + "Image$RootDeviceName": "

    The device name of the root device (for example, /dev/sda1 or /dev/xvda).

    ", + "ImageAttribute$ImageId": "

    The ID of the AMI.

    ", + "ImageDiskContainer$Description": "

    The description of the disk image.

    ", + "ImageDiskContainer$Format": "

    The format of the disk image being imported.

    Valid values: RAW | VHD | VMDK | OVA

    ", + "ImageDiskContainer$Url": "

    The URL to the Amazon S3-based disk image being imported. The URL can either be a https URL (https://..) or an Amazon S3 URL (s3://..)

    ", + "ImageDiskContainer$DeviceName": "

    The block device mapping for the disk.

    ", + "ImageDiskContainer$SnapshotId": "

    The ID of the EBS snapshot to be used for importing the snapshot.

    ", + "ImageIdStringList$member": null, + "ImportImageRequest$Description": "

    A description string for the import image task.

    ", + "ImportImageRequest$LicenseType": "

    The license type to be used for the Amazon Machine Image (AMI) after importing.

    Note: You may only use BYOL if you have existing licenses with rights to use these licenses in a third party cloud like AWS. For more information, see VM Import/Export Prerequisites in the Amazon Elastic Compute Cloud User Guide.

    Valid values: AWS | BYOL

    ", + "ImportImageRequest$Hypervisor": "

    The target hypervisor platform.

    Valid values: xen

    ", + "ImportImageRequest$Architecture": "

    The architecture of the virtual machine.

    Valid values: i386 | x86_64

    ", + "ImportImageRequest$Platform": "

    The operating system of the virtual machine.

    Valid values: Windows | Linux

    ", + "ImportImageRequest$ClientToken": "

    The token to enable idempotency for VM import requests.

    ", + "ImportImageRequest$RoleName": "

    The name of the role to use when not using the default role, 'vmimport'.

    ", + "ImportImageResult$ImportTaskId": "

    The task ID of the import image task.

    ", + "ImportImageResult$Architecture": "

    The architecture of the virtual machine.

    ", + "ImportImageResult$LicenseType": "

    The license type of the virtual machine.

    ", + "ImportImageResult$Platform": "

    The operating system of the virtual machine.

    ", + "ImportImageResult$Hypervisor": "

    The target hypervisor of the import task.

    ", + "ImportImageResult$Description": "

    A description of the import task.

    ", + "ImportImageResult$ImageId": "

    The ID of the Amazon Machine Image (AMI) created by the import task.

    ", + "ImportImageResult$Progress": "

    The progress of the task.

    ", + "ImportImageResult$StatusMessage": "

    A detailed status message of the import task.

    ", + "ImportImageResult$Status": "

    A brief status of the task.

    ", + "ImportImageTask$ImportTaskId": "

    The ID of the import image task.

    ", + "ImportImageTask$Architecture": "

    The architecture of the virtual machine.

    Valid values: i386 | x86_64

    ", + "ImportImageTask$LicenseType": "

    The license type of the virtual machine.

    ", + "ImportImageTask$Platform": "

    The description string for the import image task.

    ", + "ImportImageTask$Hypervisor": "

    The target hypervisor for the import task.

    Valid values: xen

    ", + "ImportImageTask$Description": "

    A description of the import task.

    ", + "ImportImageTask$ImageId": "

    The ID of the Amazon Machine Image (AMI) of the imported virtual machine.

    ", + "ImportImageTask$Progress": "

    The percentage of progress of the import image task.

    ", + "ImportImageTask$StatusMessage": "

    A descriptive status message for the import image task.

    ", + "ImportImageTask$Status": "

    A brief status for the import image task.

    ", + "ImportInstanceLaunchSpecification$AdditionalInfo": "

    Reserved.

    ", + "ImportInstanceLaunchSpecification$SubnetId": "

    [EC2-VPC] The ID of the subnet in which to launch the instance.

    ", + "ImportInstanceLaunchSpecification$PrivateIpAddress": "

    [EC2-VPC] An available IP address from the IP address range of the subnet.

    ", + "ImportInstanceRequest$Description": "

    A description for the instance being imported.

    ", + "ImportInstanceTaskDetails$InstanceId": "

    The ID of the instance.

    ", + "ImportInstanceTaskDetails$Description": "

    A description of the task.

    ", + "ImportInstanceVolumeDetailItem$AvailabilityZone": "

    The Availability Zone where the resulting instance will reside.

    ", + "ImportInstanceVolumeDetailItem$Status": "

    The status of the import of this particular disk image.

    ", + "ImportInstanceVolumeDetailItem$StatusMessage": "

    The status information or errors related to the disk image.

    ", + "ImportInstanceVolumeDetailItem$Description": "

    A description of the task.

    ", + "ImportKeyPairRequest$KeyName": "

    A unique name for the key pair.

    ", + "ImportKeyPairResult$KeyName": "

    The key pair name you provided.

    ", + "ImportKeyPairResult$KeyFingerprint": "

    The MD5 public key fingerprint as specified in section 4 of RFC 4716.

    ", + "ImportSnapshotRequest$Description": "

    The description string for the import snapshot task.

    ", + "ImportSnapshotRequest$ClientToken": "

    Token to enable idempotency for VM import requests.

    ", + "ImportSnapshotRequest$RoleName": "

    The name of the role to use when not using the default role, 'vmimport'.

    ", + "ImportSnapshotResult$ImportTaskId": "

    The ID of the import snapshot task.

    ", + "ImportSnapshotResult$Description": "

    A description of the import snapshot task.

    ", + "ImportSnapshotTask$ImportTaskId": "

    The ID of the import snapshot task.

    ", + "ImportSnapshotTask$Description": "

    A description of the import snapshot task.

    ", + "ImportTaskIdList$member": null, + "ImportVolumeRequest$AvailabilityZone": "

    The Availability Zone for the resulting EBS volume.

    ", + "ImportVolumeRequest$Description": "

    A description of the volume.

    ", + "ImportVolumeTaskDetails$AvailabilityZone": "

    The Availability Zone where the resulting volume will reside.

    ", + "ImportVolumeTaskDetails$Description": "

    The description you provided when starting the import volume task.

    ", + "Instance$InstanceId": "

    The ID of the instance.

    ", + "Instance$ImageId": "

    The ID of the AMI used to launch the instance.

    ", + "Instance$PrivateDnsName": "

    The private DNS name assigned to the instance. This DNS name can only be used inside the Amazon EC2 network. This name is not available until the instance enters the running state.

    ", + "Instance$PublicDnsName": "

    The public DNS name assigned to the instance. This name is not available until the instance enters the running state.

    ", + "Instance$StateTransitionReason": "

    The reason for the most recent state transition. This might be an empty string.

    ", + "Instance$KeyName": "

    The name of the key pair, if this instance was launched with an associated key pair.

    ", + "Instance$KernelId": "

    The kernel associated with this instance.

    ", + "Instance$RamdiskId": "

    The RAM disk associated with this instance.

    ", + "Instance$SubnetId": "

    The ID of the subnet in which the instance is running.

    ", + "Instance$VpcId": "

    The ID of the VPC in which the instance is running.

    ", + "Instance$PrivateIpAddress": "

    The private IP address assigned to the instance.

    ", + "Instance$PublicIpAddress": "

    The public IP address assigned to the instance.

    ", + "Instance$RootDeviceName": "

    The root device name (for example, /dev/sda1 or /dev/xvda).

    ", + "Instance$SpotInstanceRequestId": "

    The ID of the Spot Instance request.

    ", + "Instance$ClientToken": "

    The idempotency token you provided when you launched the instance.

    ", + "Instance$SriovNetSupport": "

    Specifies whether enhanced networking is enabled.

    ", + "InstanceAttribute$InstanceId": "

    The ID of the instance.

    ", + "InstanceBlockDeviceMapping$DeviceName": "

    The device name exposed to the instance (for example, /dev/sdh or xvdh).

    ", + "InstanceBlockDeviceMappingSpecification$DeviceName": "

    The device name exposed to the instance (for example, /dev/sdh or xvdh).

    ", + "InstanceBlockDeviceMappingSpecification$VirtualName": "

    The virtual device name.

    ", + "InstanceBlockDeviceMappingSpecification$NoDevice": "

    suppress the specified device included in the block device mapping.

    ", + "InstanceExportDetails$InstanceId": "

    The ID of the resource being exported.

    ", + "InstanceIdStringList$member": null, + "InstanceMonitoring$InstanceId": "

    The ID of the instance.

    ", + "InstanceNetworkInterface$NetworkInterfaceId": "

    The ID of the network interface.

    ", + "InstanceNetworkInterface$SubnetId": "

    The ID of the subnet.

    ", + "InstanceNetworkInterface$VpcId": "

    The ID of the VPC.

    ", + "InstanceNetworkInterface$Description": "

    The description.

    ", + "InstanceNetworkInterface$OwnerId": "

    The ID of the AWS account that created the network interface.

    ", + "InstanceNetworkInterface$MacAddress": "

    The MAC address.

    ", + "InstanceNetworkInterface$PrivateIpAddress": "

    The IP address of the network interface within the subnet.

    ", + "InstanceNetworkInterface$PrivateDnsName": "

    The private DNS name.

    ", + "InstanceNetworkInterfaceAssociation$PublicIp": "

    The public IP address or Elastic IP address bound to the network interface.

    ", + "InstanceNetworkInterfaceAssociation$PublicDnsName": "

    The public DNS name.

    ", + "InstanceNetworkInterfaceAssociation$IpOwnerId": "

    The ID of the owner of the Elastic IP address.

    ", + "InstanceNetworkInterfaceAttachment$AttachmentId": "

    The ID of the network interface attachment.

    ", + "InstanceNetworkInterfaceSpecification$NetworkInterfaceId": "

    The ID of the network interface.

    ", + "InstanceNetworkInterfaceSpecification$SubnetId": "

    The ID of the subnet associated with the network string. Applies only if creating a network interface when launching an instance.

    ", + "InstanceNetworkInterfaceSpecification$Description": "

    The description of the network interface. Applies only if creating a network interface when launching an instance.

    ", + "InstanceNetworkInterfaceSpecification$PrivateIpAddress": "

    The private IP address of the network interface. Applies only if creating a network interface when launching an instance.

    ", + "InstancePrivateIpAddress$PrivateIpAddress": "

    The private IP address of the network interface.

    ", + "InstancePrivateIpAddress$PrivateDnsName": "

    The private DNS name.

    ", + "InstanceStateChange$InstanceId": "

    The ID of the instance.

    ", + "InstanceStatus$InstanceId": "

    The ID of the instance.

    ", + "InstanceStatus$AvailabilityZone": "

    The Availability Zone of the instance.

    ", + "InstanceStatusEvent$Description": "

    A description of the event.

    After a scheduled event is completed, it can still be described for up to a week. If the event has been completed, this description starts with the following text: [Completed].

    ", + "InternetGateway$InternetGatewayId": "

    The ID of the Internet gateway.

    ", + "InternetGatewayAttachment$VpcId": "

    The ID of the VPC.

    ", + "IpPermission$IpProtocol": "

    The protocol.

    When you call DescribeSecurityGroups, the protocol value returned is the number. Exception: For TCP, UDP, and ICMP, the value returned is the name (for example, tcp, udp, or icmp). For a list of protocol numbers, see Protocol Numbers. (VPC only) When you call AuthorizeSecurityGroupIngress, you can use -1 to specify all.

    ", + "IpRange$CidrIp": "

    The CIDR range. You can either specify a CIDR range or a source security group, not both.

    ", + "KeyNameStringList$member": null, + "KeyPair$KeyName": "

    The name of the key pair.

    ", + "KeyPair$KeyFingerprint": "

    The SHA-1 digest of the DER encoded private key.

    ", + "KeyPair$KeyMaterial": "

    An unencrypted PEM encoded RSA private key.

    ", + "KeyPairInfo$KeyName": "

    The name of the key pair.

    ", + "KeyPairInfo$KeyFingerprint": "

    If you used CreateKeyPair to create the key pair, this is the SHA-1 digest of the DER encoded private key. If you used ImportKeyPair to provide AWS the public key, this is the MD5 public key fingerprint as specified in section 4 of RFC4716.

    ", + "LaunchPermission$UserId": "

    The AWS account ID.

    ", + "LaunchSpecification$ImageId": "

    The ID of the AMI.

    ", + "LaunchSpecification$KeyName": "

    The name of the key pair.

    ", + "LaunchSpecification$UserData": "

    The Base64-encoded MIME user data to make available to the instances.

    ", + "LaunchSpecification$AddressingType": "

    Deprecated.

    ", + "LaunchSpecification$KernelId": "

    The ID of the kernel.

    ", + "LaunchSpecification$RamdiskId": "

    The ID of the RAM disk.

    ", + "LaunchSpecification$SubnetId": "

    The ID of the subnet in which to launch the instance.

    ", + "ModifyImageAttributeRequest$ImageId": "

    The ID of the AMI.

    ", + "ModifyImageAttributeRequest$Attribute": "

    The name of the attribute to modify.

    ", + "ModifyImageAttributeRequest$Value": "

    The value of the attribute being modified. This is only valid when modifying the description attribute.

    ", + "ModifyInstanceAttributeRequest$InstanceId": "

    The ID of the instance.

    ", + "ModifyInstanceAttributeRequest$Value": "

    A new value for the attribute. Use only with the kernel, ramdisk, userData, disableApiTermination, or instanceInitiatedShutdownBehavior attribute.

    ", + "ModifyNetworkInterfaceAttributeRequest$NetworkInterfaceId": "

    The ID of the network interface.

    ", + "ModifyReservedInstancesRequest$ClientToken": "

    A unique, case-sensitive token you provide to ensure idempotency of your modification request. For more information, see Ensuring Idempotency.

    ", + "ModifyReservedInstancesResult$ReservedInstancesModificationId": "

    The ID for the modification.

    ", + "ModifySnapshotAttributeRequest$SnapshotId": "

    The ID of the snapshot.

    ", + "ModifySubnetAttributeRequest$SubnetId": "

    The ID of the subnet.

    ", + "ModifyVolumeAttributeRequest$VolumeId": "

    The ID of the volume.

    ", + "ModifyVpcAttributeRequest$VpcId": "

    The ID of the VPC.

    ", + "ModifyVpcEndpointRequest$VpcEndpointId": "

    The ID of the endpoint.

    ", + "ModifyVpcEndpointRequest$PolicyDocument": "

    A policy document to attach to the endpoint. The policy must be in valid JSON format.

    ", + "MoveAddressToVpcRequest$PublicIp": "

    The Elastic IP address.

    ", + "MoveAddressToVpcResult$AllocationId": "

    The allocation ID for the Elastic IP address.

    ", + "MovingAddressStatus$PublicIp": "

    The Elastic IP address.

    ", + "NetworkAcl$NetworkAclId": "

    The ID of the network ACL.

    ", + "NetworkAcl$VpcId": "

    The ID of the VPC for the network ACL.

    ", + "NetworkAclAssociation$NetworkAclAssociationId": "

    The ID of the association between a network ACL and a subnet.

    ", + "NetworkAclAssociation$NetworkAclId": "

    The ID of the network ACL.

    ", + "NetworkAclAssociation$SubnetId": "

    The ID of the subnet.

    ", + "NetworkAclEntry$Protocol": "

    The protocol. A value of -1 means all protocols.

    ", + "NetworkAclEntry$CidrBlock": "

    The network range to allow or deny, in CIDR notation.

    ", + "NetworkInterface$NetworkInterfaceId": "

    The ID of the network interface.

    ", + "NetworkInterface$SubnetId": "

    The ID of the subnet.

    ", + "NetworkInterface$VpcId": "

    The ID of the VPC.

    ", + "NetworkInterface$AvailabilityZone": "

    The Availability Zone.

    ", + "NetworkInterface$Description": "

    A description.

    ", + "NetworkInterface$OwnerId": "

    The AWS account ID of the owner of the network interface.

    ", + "NetworkInterface$RequesterId": "

    The ID of the entity that launched the instance on your behalf (for example, AWS Management Console or Auto Scaling).

    ", + "NetworkInterface$MacAddress": "

    The MAC address.

    ", + "NetworkInterface$PrivateIpAddress": "

    The IP address of the network interface within the subnet.

    ", + "NetworkInterface$PrivateDnsName": "

    The private DNS name.

    ", + "NetworkInterfaceAssociation$PublicIp": "

    The address of the Elastic IP address bound to the network interface.

    ", + "NetworkInterfaceAssociation$PublicDnsName": "

    The public DNS name.

    ", + "NetworkInterfaceAssociation$IpOwnerId": "

    The ID of the Elastic IP address owner.

    ", + "NetworkInterfaceAssociation$AllocationId": "

    The allocation ID.

    ", + "NetworkInterfaceAssociation$AssociationId": "

    The association ID.

    ", + "NetworkInterfaceAttachment$AttachmentId": "

    The ID of the network interface attachment.

    ", + "NetworkInterfaceAttachment$InstanceId": "

    The ID of the instance.

    ", + "NetworkInterfaceAttachment$InstanceOwnerId": "

    The AWS account ID of the owner of the instance.

    ", + "NetworkInterfaceAttachmentChanges$AttachmentId": "

    The ID of the network interface attachment.

    ", + "NetworkInterfaceIdList$member": null, + "NetworkInterfacePrivateIpAddress$PrivateIpAddress": "

    The private IP address.

    ", + "NetworkInterfacePrivateIpAddress$PrivateDnsName": "

    The private DNS name.

    ", + "OwnerStringList$member": null, + "Placement$AvailabilityZone": "

    The Availability Zone of the instance.

    ", + "Placement$GroupName": "

    The name of the placement group the instance is in (for cluster compute instances).

    ", + "PlacementGroup$GroupName": "

    The name of the placement group.

    ", + "PlacementGroupStringList$member": null, + "PrefixList$PrefixListId": "

    The ID of the prefix.

    ", + "PrefixList$PrefixListName": "

    The name of the prefix.

    ", + "PrefixListId$PrefixListId": "

    The ID of the prefix.

    ", + "PrivateIpAddressSpecification$PrivateIpAddress": "

    The private IP addresses.

    ", + "PrivateIpAddressStringList$member": null, + "ProductCode$ProductCodeId": "

    The product code.

    ", + "ProductCodeStringList$member": null, + "ProductDescriptionList$member": null, + "PropagatingVgw$GatewayId": "

    The ID of the virtual private gateway (VGW).

    ", + "PublicIpStringList$member": null, + "PurchaseReservedInstancesOfferingRequest$ReservedInstancesOfferingId": "

    The ID of the Reserved Instance offering to purchase.

    ", + "PurchaseReservedInstancesOfferingResult$ReservedInstancesId": "

    The IDs of the purchased Reserved Instances.

    ", + "Region$RegionName": "

    The name of the region.

    ", + "Region$Endpoint": "

    The region service endpoint.

    ", + "RegionNameStringList$member": null, + "RegisterImageRequest$ImageLocation": "

    The full path to your AMI manifest in Amazon S3 storage.

    ", + "RegisterImageRequest$Name": "

    A name for your AMI.

    Constraints: 3-128 alphanumeric characters, parentheses (()), square brackets ([]), spaces ( ), periods (.), slashes (/), dashes (-), single quotes ('), at-signs (@), or underscores(_)

    ", + "RegisterImageRequest$Description": "

    A description for your AMI.

    ", + "RegisterImageRequest$KernelId": "

    The ID of the kernel.

    ", + "RegisterImageRequest$RamdiskId": "

    The ID of the RAM disk.

    ", + "RegisterImageRequest$RootDeviceName": "

    The name of the root device (for example, /dev/sda1, or /dev/xvda).

    ", + "RegisterImageRequest$VirtualizationType": "

    The type of virtualization.

    Default: paravirtual

    ", + "RegisterImageRequest$SriovNetSupport": "

    Set to simple to enable enhanced networking for the AMI and any instances that you launch from the AMI.

    There is no way to disable enhanced networking at this time.

    This option is supported only for HVM AMIs. Specifying this option with a PV AMI can make instances launched from the AMI unreachable.

    ", + "RegisterImageResult$ImageId": "

    The ID of the newly registered AMI.

    ", + "RejectVpcPeeringConnectionRequest$VpcPeeringConnectionId": "

    The ID of the VPC peering connection.

    ", + "ReleaseAddressRequest$PublicIp": "

    [EC2-Classic] The Elastic IP address. Required for EC2-Classic.

    ", + "ReleaseAddressRequest$AllocationId": "

    [EC2-VPC] The allocation ID. Required for EC2-VPC.

    ", + "ReplaceNetworkAclAssociationRequest$AssociationId": "

    The ID of the current association between the original network ACL and the subnet.

    ", + "ReplaceNetworkAclAssociationRequest$NetworkAclId": "

    The ID of the new network ACL to associate with the subnet.

    ", + "ReplaceNetworkAclAssociationResult$NewAssociationId": "

    The ID of the new association.

    ", + "ReplaceNetworkAclEntryRequest$NetworkAclId": "

    The ID of the ACL.

    ", + "ReplaceNetworkAclEntryRequest$Protocol": "

    The IP protocol. You can specify all or -1 to mean all protocols.

    ", + "ReplaceNetworkAclEntryRequest$CidrBlock": "

    The network range to allow or deny, in CIDR notation.

    ", + "ReplaceRouteRequest$RouteTableId": "

    The ID of the route table.

    ", + "ReplaceRouteRequest$DestinationCidrBlock": "

    The CIDR address block used for the destination match. The value you provide must match the CIDR of an existing route in the table.

    ", + "ReplaceRouteRequest$GatewayId": "

    The ID of an Internet gateway or virtual private gateway.

    ", + "ReplaceRouteRequest$InstanceId": "

    The ID of a NAT instance in your VPC.

    ", + "ReplaceRouteRequest$NetworkInterfaceId": "

    The ID of a network interface.

    ", + "ReplaceRouteRequest$VpcPeeringConnectionId": "

    The ID of a VPC peering connection.

    ", + "ReplaceRouteTableAssociationRequest$AssociationId": "

    The association ID.

    ", + "ReplaceRouteTableAssociationRequest$RouteTableId": "

    The ID of the new route table to associate with the subnet.

    ", + "ReplaceRouteTableAssociationResult$NewAssociationId": "

    The ID of the new association.

    ", + "ReportInstanceStatusRequest$Description": "

    Descriptive text about the health state of your instance.

    ", + "RequestSpotFleetResponse$SpotFleetRequestId": "

    The ID of the Spot fleet request.

    ", + "RequestSpotInstancesRequest$SpotPrice": "

    The maximum hourly price (bid) for any Spot instance launched to fulfill the request.

    ", + "RequestSpotInstancesRequest$ClientToken": "

    Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see How to Ensure Idempotency in the Amazon Elastic Compute Cloud User Guide.

    ", + "RequestSpotInstancesRequest$LaunchGroup": "

    The instance launch group. Launch groups are Spot instances that launch together and terminate together.

    Default: Instances are launched and terminated individually

    ", + "RequestSpotInstancesRequest$AvailabilityZoneGroup": "

    The user-specified name for a logical grouping of bids.

    When you specify an Availability Zone group in a Spot Instance request, all Spot instances in the request are launched in the same Availability Zone. Instance proximity is maintained with this parameter, but the choice of Availability Zone is not. The group applies only to bids for Spot Instances of the same instance type. Any additional Spot instance requests that are specified with the same Availability Zone group name are launched in that same Availability Zone, as long as at least one instance from the group is still active.

    If there is no active instance running in the Availability Zone group that you specify for a new Spot instance request (all instances are terminated, the bid is expired, or the bid falls below current market), then Amazon EC2 launches the instance in any Availability Zone where the constraint can be met. Consequently, the subsequent set of Spot instances could be placed in a different zone from the original request, even if you specified the same Availability Zone group.

    Default: Instances are launched in any available Availability Zone.

    ", + "Reservation$ReservationId": "

    The ID of the reservation.

    ", + "Reservation$OwnerId": "

    The ID of the AWS account that owns the reservation.

    ", + "Reservation$RequesterId": "

    The ID of the requester that launched the instances on your behalf (for example, AWS Management Console or Auto Scaling).

    ", + "ReservedInstances$ReservedInstancesId": "

    The ID of the Reserved Instance.

    ", + "ReservedInstances$AvailabilityZone": "

    The Availability Zone in which the Reserved Instance can be used.

    ", + "ReservedInstancesConfiguration$AvailabilityZone": "

    The Availability Zone for the modified Reserved Instances.

    ", + "ReservedInstancesConfiguration$Platform": "

    The network platform of the modified Reserved Instances, which is either EC2-Classic or EC2-VPC.

    ", + "ReservedInstancesId$ReservedInstancesId": "

    The ID of the Reserved Instance.

    ", + "ReservedInstancesIdStringList$member": null, + "ReservedInstancesListing$ReservedInstancesListingId": "

    The ID of the Reserved Instance listing.

    ", + "ReservedInstancesListing$ReservedInstancesId": "

    The ID of the Reserved Instance.

    ", + "ReservedInstancesListing$StatusMessage": "

    The reason for the current status of the Reserved Instance listing. The response can be blank.

    ", + "ReservedInstancesListing$ClientToken": "

    A unique, case-sensitive key supplied by the client to ensure that the request is idempotent. For more information, see Ensuring Idempotency.

    ", + "ReservedInstancesModification$ReservedInstancesModificationId": "

    A unique ID for the Reserved Instance modification.

    ", + "ReservedInstancesModification$Status": "

    The status of the Reserved Instances modification request.

    ", + "ReservedInstancesModification$StatusMessage": "

    The reason for the status.

    ", + "ReservedInstancesModification$ClientToken": "

    A unique, case-sensitive key supplied by the client to ensure that the request is idempotent. For more information, see Ensuring Idempotency.

    ", + "ReservedInstancesModificationIdStringList$member": null, + "ReservedInstancesModificationResult$ReservedInstancesId": "

    The ID for the Reserved Instances that were created as part of the modification request. This field is only available when the modification is fulfilled.

    ", + "ReservedInstancesOffering$ReservedInstancesOfferingId": "

    The ID of the Reserved Instance offering.

    ", + "ReservedInstancesOffering$AvailabilityZone": "

    The Availability Zone in which the Reserved Instance can be used.

    ", + "ReservedInstancesOfferingIdStringList$member": null, + "ResetImageAttributeRequest$ImageId": "

    The ID of the AMI.

    ", + "ResetInstanceAttributeRequest$InstanceId": "

    The ID of the instance.

    ", + "ResetNetworkInterfaceAttributeRequest$NetworkInterfaceId": "

    The ID of the network interface.

    ", + "ResetNetworkInterfaceAttributeRequest$SourceDestCheck": "

    The source/destination checking attribute. Resets the value to true.

    ", + "ResetSnapshotAttributeRequest$SnapshotId": "

    The ID of the snapshot.

    ", + "ResourceIdList$member": null, + "RestorableByStringList$member": null, + "RestoreAddressToClassicRequest$PublicIp": "

    The Elastic IP address.

    ", + "RestoreAddressToClassicResult$PublicIp": "

    The Elastic IP address.

    ", + "RevokeSecurityGroupEgressRequest$GroupId": "

    The ID of the security group.

    ", + "RevokeSecurityGroupEgressRequest$SourceSecurityGroupName": "

    The name of a destination security group. To revoke outbound access to a destination security group, we recommend that you use a set of IP permissions instead.

    ", + "RevokeSecurityGroupEgressRequest$SourceSecurityGroupOwnerId": "

    The AWS account number for a destination security group. To revoke outbound access to a destination security group, we recommend that you use a set of IP permissions instead.

    ", + "RevokeSecurityGroupEgressRequest$IpProtocol": "

    The IP protocol name (tcp, udp, icmp) or number (see Protocol Numbers). Use -1 to specify all.

    ", + "RevokeSecurityGroupEgressRequest$CidrIp": "

    The CIDR IP address range. You can't specify this parameter when specifying a source security group.

    ", + "RevokeSecurityGroupIngressRequest$GroupName": "

    [EC2-Classic, default VPC] The name of the security group.

    ", + "RevokeSecurityGroupIngressRequest$GroupId": "

    The ID of the security group. Required for a security group in a nondefault VPC.

    ", + "RevokeSecurityGroupIngressRequest$SourceSecurityGroupName": "

    [EC2-Classic, default VPC] The name of the source security group. You can't specify this parameter in combination with the following parameters: the CIDR IP address range, the start of the port range, the IP protocol, and the end of the port range. For EC2-VPC, the source security group must be in the same VPC.

    ", + "RevokeSecurityGroupIngressRequest$SourceSecurityGroupOwnerId": "

    [EC2-Classic, default VPC] The AWS account ID of the source security group. For EC2-VPC, the source security group must be in the same VPC. You can't specify this parameter in combination with the following parameters: the CIDR IP address range, the IP protocol, the start of the port range, and the end of the port range. To revoke a specific rule for an IP protocol and port range, use a set of IP permissions instead.

    ", + "RevokeSecurityGroupIngressRequest$IpProtocol": "

    The IP protocol name (tcp, udp, icmp) or number (see Protocol Numbers). Use -1 to specify all.

    ", + "RevokeSecurityGroupIngressRequest$CidrIp": "

    The CIDR IP address range. You can't specify this parameter when specifying a source security group.

    ", + "Route$DestinationCidrBlock": "

    The CIDR block used for the destination match.

    ", + "Route$DestinationPrefixListId": "

    The prefix of the AWS service.

    ", + "Route$GatewayId": "

    The ID of a gateway attached to your VPC.

    ", + "Route$InstanceId": "

    The ID of a NAT instance in your VPC.

    ", + "Route$InstanceOwnerId": "

    The AWS account ID of the owner of the instance.

    ", + "Route$NetworkInterfaceId": "

    The ID of the network interface.

    ", + "Route$VpcPeeringConnectionId": "

    The ID of the VPC peering connection.

    ", + "RouteTable$RouteTableId": "

    The ID of the route table.

    ", + "RouteTable$VpcId": "

    The ID of the VPC.

    ", + "RouteTableAssociation$RouteTableAssociationId": "

    The ID of the association between a route table and a subnet.

    ", + "RouteTableAssociation$RouteTableId": "

    The ID of the route table.

    ", + "RouteTableAssociation$SubnetId": "

    The ID of the subnet. A subnet ID is not returned for an implicit association.

    ", + "RunInstancesRequest$ImageId": "

    The ID of the AMI, which you can get by calling DescribeImages.

    ", + "RunInstancesRequest$KeyName": "

    The name of the key pair. You can create a key pair using CreateKeyPair or ImportKeyPair.

    If you do not specify a key pair, you can't connect to the instance unless you choose an AMI that is configured to allow users another way to log in.

    ", + "RunInstancesRequest$UserData": "

    The Base64-encoded MIME user data for the instances.

    ", + "RunInstancesRequest$KernelId": "

    The ID of the kernel.

    We recommend that you use PV-GRUB instead of kernels and RAM disks. For more information, see PV-GRUB in the Amazon Elastic Compute Cloud User Guide.

    ", + "RunInstancesRequest$RamdiskId": "

    The ID of the RAM disk.

    We recommend that you use PV-GRUB instead of kernels and RAM disks. For more information, see PV-GRUB in the Amazon Elastic Compute Cloud User Guide.

    ", + "RunInstancesRequest$SubnetId": "

    [EC2-VPC] The ID of the subnet to launch the instance into.

    ", + "RunInstancesRequest$PrivateIpAddress": "

    [EC2-VPC] The primary IP address. You must specify a value from the IP address range of the subnet.

    Only one private IP address can be designated as primary. Therefore, you can't specify this parameter if PrivateIpAddresses.n.Primary is set to true and PrivateIpAddresses.n.PrivateIpAddress is set to an IP address.

    Default: We select an IP address from the IP address range of the subnet.

    ", + "RunInstancesRequest$ClientToken": "

    Unique, case-sensitive identifier you provide to ensure the idempotency of the request. For more information, see Ensuring Idempotency.

    Constraints: Maximum 64 ASCII characters

    ", + "RunInstancesRequest$AdditionalInfo": "

    Reserved.

    ", + "S3Storage$Bucket": "

    The bucket in which to store the AMI. You can specify a bucket that you already own or a new bucket that Amazon EC2 creates on your behalf. If you specify a bucket that belongs to someone else, Amazon EC2 returns an error.

    ", + "S3Storage$Prefix": "

    The beginning of the file name of the AMI.

    ", + "S3Storage$AWSAccessKeyId": "

    The access key ID of the owner of the bucket. Before you specify a value for your access key ID, review and follow the guidance in Best Practices for Managing AWS Access Keys.

    ", + "S3Storage$UploadPolicySignature": "

    The signature of the Base64 encoded JSON document.

    ", + "SecurityGroup$OwnerId": "

    The AWS account ID of the owner of the security group.

    ", + "SecurityGroup$GroupName": "

    The name of the security group.

    ", + "SecurityGroup$GroupId": "

    The ID of the security group.

    ", + "SecurityGroup$Description": "

    A description of the security group.

    ", + "SecurityGroup$VpcId": "

    [EC2-VPC] The ID of the VPC for the security group.

    ", + "SecurityGroupIdStringList$member": null, + "SecurityGroupStringList$member": null, + "Snapshot$SnapshotId": "

    The ID of the snapshot. Each snapshot receives a unique identifier when it is created.

    ", + "Snapshot$VolumeId": "

    The ID of the volume that was used to create the snapshot.

    ", + "Snapshot$StateMessage": "

    Encrypted Amazon EBS snapshots are copied asynchronously. If a snapshot copy operation fails (for example, if the proper AWS Key Management Service (AWS KMS) permissions are not obtained) this field displays error state details to help you diagnose why the error occurred. This parameter is only returned by the DescribeSnapshots API operation.

    ", + "Snapshot$Progress": "

    The progress of the snapshot, as a percentage.

    ", + "Snapshot$OwnerId": "

    The AWS account ID of the EBS snapshot owner.

    ", + "Snapshot$Description": "

    The description for the snapshot.

    ", + "Snapshot$OwnerAlias": "

    The AWS account alias (for example, amazon, self) or AWS account ID that owns the snapshot.

    ", + "Snapshot$KmsKeyId": "

    The full ARN of the AWS Key Management Service (AWS KMS) customer master key (CMK) that was used to protect the volume encryption key for the parent volume.

    ", + "Snapshot$DataEncryptionKeyId": "

    The data encryption key identifier for the snapshot. This value is a unique identifier that corresponds to the data encryption key that was used to encrypt the original volume or snapshot copy. Because data encryption keys are inherited by volumes created from snapshots, and vice versa, if snapshots share the same data encryption key identifier, then they belong to the same volume/snapshot lineage. This parameter is only returned by the DescribeSnapshots API operation.

    ", + "SnapshotDetail$Description": "

    A description for the snapshot.

    ", + "SnapshotDetail$Format": "

    The format of the disk image from which the snapshot is created.

    ", + "SnapshotDetail$Url": "

    The URL used to access the disk image.

    ", + "SnapshotDetail$DeviceName": "

    The block device mapping for the snapshot.

    ", + "SnapshotDetail$SnapshotId": "

    The snapshot ID of the disk being imported.

    ", + "SnapshotDetail$Progress": "

    The percentage of progress for the task.

    ", + "SnapshotDetail$StatusMessage": "

    A detailed status message for the snapshot creation.

    ", + "SnapshotDetail$Status": "

    A brief status of the snapshot creation.

    ", + "SnapshotDiskContainer$Description": "

    The description of the disk image being imported.

    ", + "SnapshotDiskContainer$Format": "

    The format of the disk image being imported.

    Valid values: RAW | VHD | VMDK | OVA

    ", + "SnapshotDiskContainer$Url": "

    The URL to the Amazon S3-based disk image being imported. It can either be a https URL (https://..) or an Amazon S3 URL (s3://..).

    ", + "SnapshotIdStringList$member": null, + "SnapshotTaskDetail$Description": "

    The description of the snapshot.

    ", + "SnapshotTaskDetail$Format": "

    The format of the disk image from which the snapshot is created.

    ", + "SnapshotTaskDetail$Url": "

    The URL of the disk image from which the snapshot is created.

    ", + "SnapshotTaskDetail$SnapshotId": "

    The snapshot ID of the disk being imported.

    ", + "SnapshotTaskDetail$Progress": "

    The percentage of completion for the import snapshot task.

    ", + "SnapshotTaskDetail$StatusMessage": "

    A detailed status message for the import snapshot task.

    ", + "SnapshotTaskDetail$Status": "

    A brief status for the import snapshot task.

    ", + "SpotDatafeedSubscription$OwnerId": "

    The AWS account ID of the account.

    ", + "SpotDatafeedSubscription$Bucket": "

    The Amazon S3 bucket where the Spot instance data feed is located.

    ", + "SpotDatafeedSubscription$Prefix": "

    The prefix that is prepended to data feed files.

    ", + "SpotFleetLaunchSpecification$ImageId": "

    The ID of the AMI.

    ", + "SpotFleetLaunchSpecification$KeyName": "

    The name of the key pair.

    ", + "SpotFleetLaunchSpecification$UserData": "

    The Base64-encoded MIME user data to make available to the instances.

    ", + "SpotFleetLaunchSpecification$AddressingType": "

    Deprecated.

    ", + "SpotFleetLaunchSpecification$KernelId": "

    The ID of the kernel.

    ", + "SpotFleetLaunchSpecification$RamdiskId": "

    The ID of the RAM disk.

    ", + "SpotFleetLaunchSpecification$SubnetId": "

    The ID of the subnet in which to launch the instances.

    ", + "SpotFleetLaunchSpecification$SpotPrice": "

    The bid price per unit hour for the specified instance type. If this value is not specified, the default is the Spot bid price specified for the fleet. To determine the bid price per unit hour, divide the Spot bid price by the value of WeightedCapacity.

    ", + "SpotFleetRequestConfig$SpotFleetRequestId": "

    The ID of the Spot fleet request.

    ", + "SpotFleetRequestConfigData$ClientToken": "

    A unique, case-sensitive identifier you provide to ensure idempotency of your listings. This helps avoid duplicate listings. For more information, see Ensuring Idempotency.

    ", + "SpotFleetRequestConfigData$SpotPrice": "

    The bid price per unit hour.

    ", + "SpotFleetRequestConfigData$IamFleetRole": "

    Grants the Spot fleet permission to terminate Spot instances on your behalf when you cancel its Spot fleet request using CancelSpotFleetRequests or when the Spot fleet request expires, if you set terminateInstancesWithExpiration.

    ", + "SpotInstanceRequest$SpotInstanceRequestId": "

    The ID of the Spot instance request.

    ", + "SpotInstanceRequest$SpotPrice": "

    The maximum hourly price (bid) for any Spot instance launched to fulfill the request.

    ", + "SpotInstanceRequest$LaunchGroup": "

    The instance launch group. Launch groups are Spot instances that launch together and terminate together.

    ", + "SpotInstanceRequest$AvailabilityZoneGroup": "

    The Availability Zone group. If you specify the same Availability Zone group for all Spot instance requests, all Spot instances are launched in the same Availability Zone.

    ", + "SpotInstanceRequest$InstanceId": "

    The instance ID, if an instance has been launched to fulfill the Spot instance request.

    ", + "SpotInstanceRequest$LaunchedAvailabilityZone": "

    The Availability Zone in which the bid is launched.

    ", + "SpotInstanceRequestIdList$member": null, + "SpotInstanceStateFault$Code": "

    The reason code for the Spot instance state change.

    ", + "SpotInstanceStateFault$Message": "

    The message for the Spot instance state change.

    ", + "SpotInstanceStatus$Code": "

    The status code.

    ", + "SpotInstanceStatus$Message": "

    The description for the status code.

    ", + "SpotPlacement$AvailabilityZone": "

    The Availability Zone.

    ", + "SpotPlacement$GroupName": "

    The name of the placement group (for cluster instances).

    ", + "SpotPrice$SpotPrice": "

    The maximum price (bid) that you are willing to pay for a Spot instance.

    ", + "SpotPrice$AvailabilityZone": "

    The Availability Zone.

    ", + "StartInstancesRequest$AdditionalInfo": "

    Reserved.

    ", + "StateReason$Code": "

    The reason code for the state change.

    ", + "StateReason$Message": "

    The message for the state change.

    • Server.SpotInstanceTermination: A Spot Instance was terminated due to an increase in the market price.

    • Server.InternalError: An internal error occurred during instance launch, resulting in termination.

    • Server.InsufficientInstanceCapacity: There was insufficient instance capacity to satisfy the launch request.

    • Client.InternalError: A client error caused the instance to terminate on launch.

    • Client.InstanceInitiatedShutdown: The instance was shut down using the shutdown -h command from the instance.

    • Client.UserInitiatedShutdown: The instance was shut down using the Amazon EC2 API.

    • Client.VolumeLimitExceeded: The volume limit was exceeded.

    • Client.InvalidSnapshot.NotFound: The specified snapshot was not found.

    ", + "Subnet$SubnetId": "

    The ID of the subnet.

    ", + "Subnet$VpcId": "

    The ID of the VPC the subnet is in.

    ", + "Subnet$CidrBlock": "

    The CIDR block assigned to the subnet.

    ", + "Subnet$AvailabilityZone": "

    The Availability Zone of the subnet.

    ", + "SubnetIdStringList$member": null, + "Tag$Key": "

    The key of the tag.

    Constraints: Tag keys are case-sensitive and accept a maximum of 127 Unicode characters. May not begin with aws:

    ", + "Tag$Value": "

    The value of the tag.

    Constraints: Tag values are case-sensitive and accept a maximum of 255 Unicode characters.

    ", + "TagDescription$ResourceId": "

    The ID of the resource. For example, ami-1a2b3c4d.

    ", + "TagDescription$Key": "

    The tag key.

    ", + "TagDescription$Value": "

    The tag value.

    ", + "UnassignPrivateIpAddressesRequest$NetworkInterfaceId": "

    The ID of the network interface.

    ", + "UnsuccessfulItem$ResourceId": "

    The ID of the resource.

    ", + "UnsuccessfulItemError$Code": "

    The error code.

    ", + "UnsuccessfulItemError$Message": "

    The error message accompanying the error code.

    ", + "UserBucket$S3Bucket": "

    The name of the S3 bucket where the disk image is located.

    ", + "UserBucket$S3Key": "

    The key for the disk image.

    ", + "UserBucketDetails$S3Bucket": "

    The S3 bucket from which the disk image was created.

    ", + "UserBucketDetails$S3Key": "

    The key from which the disk image was created.

    ", + "UserData$Data": "

    The Base64-encoded MIME user data for the instance.

    ", + "UserGroupStringList$member": null, + "UserIdGroupPair$UserId": "

    The ID of an AWS account. EC2-Classic only.

    ", + "UserIdGroupPair$GroupName": "

    The name of the security group. In a request, use this parameter for a security group in EC2-Classic or a default VPC only. For a security group in a nondefault VPC, use GroupId.

    ", + "UserIdGroupPair$GroupId": "

    The ID of the security group.

    ", + "UserIdStringList$member": null, + "ValueStringList$member": null, + "VgwTelemetry$OutsideIpAddress": "

    The Internet-routable IP address of the virtual private gateway's outside interface.

    ", + "VgwTelemetry$StatusMessage": "

    If an error occurs, a description of the error.

    ", + "Volume$VolumeId": "

    The ID of the volume.

    ", + "Volume$SnapshotId": "

    The snapshot from which the volume was created, if applicable.

    ", + "Volume$AvailabilityZone": "

    The Availability Zone for the volume.

    ", + "Volume$KmsKeyId": "

    The full ARN of the AWS Key Management Service (AWS KMS) customer master key (CMK) that was used to protect the volume encryption key for the volume.

    ", + "VolumeAttachment$VolumeId": "

    The ID of the volume.

    ", + "VolumeAttachment$InstanceId": "

    The ID of the instance.

    ", + "VolumeAttachment$Device": "

    The device name.

    ", + "VolumeIdStringList$member": null, + "VolumeStatusAction$Code": "

    The code identifying the operation, for example, enable-volume-io.

    ", + "VolumeStatusAction$Description": "

    A description of the operation.

    ", + "VolumeStatusAction$EventType": "

    The event type associated with this operation.

    ", + "VolumeStatusAction$EventId": "

    The ID of the event associated with this operation.

    ", + "VolumeStatusDetails$Status": "

    The intended status of the volume status.

    ", + "VolumeStatusEvent$EventType": "

    The type of this event.

    ", + "VolumeStatusEvent$Description": "

    A description of the event.

    ", + "VolumeStatusEvent$EventId": "

    The ID of this event.

    ", + "VolumeStatusItem$VolumeId": "

    The volume ID.

    ", + "VolumeStatusItem$AvailabilityZone": "

    The Availability Zone of the volume.

    ", + "Vpc$VpcId": "

    The ID of the VPC.

    ", + "Vpc$CidrBlock": "

    The CIDR block for the VPC.

    ", + "Vpc$DhcpOptionsId": "

    The ID of the set of DHCP options you've associated with the VPC (or default if the default options are associated with the VPC).

    ", + "VpcAttachment$VpcId": "

    The ID of the VPC.

    ", + "VpcClassicLink$VpcId": "

    The ID of the VPC.

    ", + "VpcClassicLinkIdList$member": null, + "VpcEndpoint$VpcEndpointId": "

    The ID of the VPC endpoint.

    ", + "VpcEndpoint$VpcId": "

    The ID of the VPC to which the endpoint is associated.

    ", + "VpcEndpoint$ServiceName": "

    The name of the AWS service to which the endpoint is associated.

    ", + "VpcEndpoint$PolicyDocument": "

    The policy document associated with the endpoint.

    ", + "VpcIdStringList$member": null, + "VpcPeeringConnection$VpcPeeringConnectionId": "

    The ID of the VPC peering connection.

    ", + "VpcPeeringConnectionStateReason$Message": "

    A message that provides more information about the status, if applicable.

    ", + "VpcPeeringConnectionVpcInfo$CidrBlock": "

    The CIDR block for the VPC.

    ", + "VpcPeeringConnectionVpcInfo$OwnerId": "

    The AWS account ID of the VPC owner.

    ", + "VpcPeeringConnectionVpcInfo$VpcId": "

    The ID of the VPC.

    ", + "VpnConnection$VpnConnectionId": "

    The ID of the VPN connection.

    ", + "VpnConnection$CustomerGatewayConfiguration": "

    The configuration information for the VPN connection's customer gateway (in the native XML format). This element is always present in the CreateVpnConnection response; however, it's present in the DescribeVpnConnections response only if the VPN connection is in the pending or available state.

    ", + "VpnConnection$CustomerGatewayId": "

    The ID of the customer gateway at your end of the VPN connection.

    ", + "VpnConnection$VpnGatewayId": "

    The ID of the virtual private gateway at the AWS side of the VPN connection.

    ", + "VpnConnectionIdStringList$member": null, + "VpnGateway$VpnGatewayId": "

    The ID of the virtual private gateway.

    ", + "VpnGateway$AvailabilityZone": "

    The Availability Zone where the virtual private gateway was created.

    ", + "VpnGatewayIdStringList$member": null, + "VpnStaticRoute$DestinationCidrBlock": "

    The CIDR block associated with the local subnet of the customer data center.

    ", + "ZoneNameStringList$member": null, + "NewDhcpConfiguration$Key": null, + "RequestSpotLaunchSpecification$ImageId": "

    The ID of the AMI.

    ", + "RequestSpotLaunchSpecification$KeyName": "

    The name of the key pair.

    ", + "RequestSpotLaunchSpecification$UserData": "

    The Base64-encoded MIME user data to make available to the instances.

    ", + "RequestSpotLaunchSpecification$AddressingType": "

    Deprecated.

    ", + "RequestSpotLaunchSpecification$KernelId": "

    The ID of the kernel.

    ", + "RequestSpotLaunchSpecification$RamdiskId": "

    The ID of the RAM disk.

    ", + "RequestSpotLaunchSpecification$SubnetId": "

    The ID of the subnet in which to launch the instance.

    " + } + }, + "Subnet": { + "base": "

    Describes a subnet.

    ", + "refs": { + "CreateSubnetResult$Subnet": "

    Information about the subnet.

    ", + "SubnetList$member": null + } + }, + "SubnetIdStringList": { + "base": null, + "refs": { + "DescribeSubnetsRequest$SubnetIds": "

    One or more subnet IDs.

    Default: Describes all your subnets.

    " + } + }, + "SubnetList": { + "base": null, + "refs": { + "DescribeSubnetsResult$Subnets": "

    Information about one or more subnets.

    " + } + }, + "SubnetState": { + "base": null, + "refs": { + "Subnet$State": "

    The current state of the subnet.

    " + } + }, + "SummaryStatus": { + "base": null, + "refs": { + "InstanceStatusSummary$Status": "

    The status.

    " + } + }, + "Tag": { + "base": "

    Describes a tag.

    ", + "refs": { + "TagList$member": null + } + }, + "TagDescription": { + "base": "

    Describes a tag.

    ", + "refs": { + "TagDescriptionList$member": null + } + }, + "TagDescriptionList": { + "base": null, + "refs": { + "DescribeTagsResult$Tags": "

    A list of tags.

    " + } + }, + "TagList": { + "base": null, + "refs": { + "ClassicLinkInstance$Tags": "

    Any tags assigned to the instance.

    ", + "ConversionTask$Tags": "

    Any tags assigned to the task.

    ", + "CreateTagsRequest$Tags": "

    One or more tags. The value parameter is required, but if you don't want the tag to have a value, specify the parameter with no value, and we set the value to an empty string.

    ", + "CustomerGateway$Tags": "

    Any tags assigned to the customer gateway.

    ", + "DeleteTagsRequest$Tags": "

    One or more tags to delete. If you omit the value parameter, we delete the tag regardless of its value. If you specify this parameter with an empty string as the value, we delete the key only if its value is an empty string.

    ", + "DhcpOptions$Tags": "

    Any tags assigned to the DHCP options set.

    ", + "Image$Tags": "

    Any tags assigned to the image.

    ", + "Instance$Tags": "

    Any tags assigned to the instance.

    ", + "InternetGateway$Tags": "

    Any tags assigned to the Internet gateway.

    ", + "NetworkAcl$Tags": "

    Any tags assigned to the network ACL.

    ", + "NetworkInterface$TagSet": "

    Any tags assigned to the network interface.

    ", + "ReservedInstances$Tags": "

    Any tags assigned to the resource.

    ", + "ReservedInstancesListing$Tags": "

    Any tags assigned to the resource.

    ", + "RouteTable$Tags": "

    Any tags assigned to the route table.

    ", + "SecurityGroup$Tags": "

    Any tags assigned to the security group.

    ", + "Snapshot$Tags": "

    Any tags assigned to the snapshot.

    ", + "SpotInstanceRequest$Tags": "

    Any tags assigned to the resource.

    ", + "Subnet$Tags": "

    Any tags assigned to the subnet.

    ", + "Volume$Tags": "

    Any tags assigned to the volume.

    ", + "Vpc$Tags": "

    Any tags assigned to the VPC.

    ", + "VpcClassicLink$Tags": "

    Any tags assigned to the VPC.

    ", + "VpcPeeringConnection$Tags": "

    Any tags assigned to the resource.

    ", + "VpnConnection$Tags": "

    Any tags assigned to the VPN connection.

    ", + "VpnGateway$Tags": "

    Any tags assigned to the virtual private gateway.

    " + } + }, + "TelemetryStatus": { + "base": null, + "refs": { + "VgwTelemetry$Status": "

    The status of the VPN tunnel.

    " + } + }, + "Tenancy": { + "base": null, + "refs": { + "CreateVpcRequest$InstanceTenancy": "

    The supported tenancy options for instances launched into the VPC. A value of default means that instances can be launched with any tenancy; a value of dedicated means all instances launched into the VPC are launched as dedicated tenancy instances regardless of the tenancy assigned to the instance at launch. Dedicated tenancy instances run on single-tenant hardware.

    Default: default

    ", + "DescribeReservedInstancesOfferingsRequest$InstanceTenancy": "

    The tenancy of the Reserved Instance offering. A Reserved Instance with dedicated tenancy runs on single-tenant hardware and can only be launched within a VPC.

    Default: default

    ", + "Placement$Tenancy": "

    The tenancy of the instance (if the instance is running in a VPC). An instance with a tenancy of dedicated runs on single-tenant hardware.

    ", + "ReservedInstances$InstanceTenancy": "

    The tenancy of the reserved instance.

    ", + "ReservedInstancesOffering$InstanceTenancy": "

    The tenancy of the reserved instance.

    ", + "Vpc$InstanceTenancy": "

    The allowed tenancy of instances launched into the VPC.

    " + } + }, + "TerminateInstancesRequest": { + "base": null, + "refs": { + } + }, + "TerminateInstancesResult": { + "base": null, + "refs": { + } + }, + "TrafficType": { + "base": null, + "refs": { + "CreateFlowLogsRequest$TrafficType": "

    The type of traffic to log.

    ", + "FlowLog$TrafficType": "

    The type of traffic captured for the flow log.

    " + } + }, + "UnassignPrivateIpAddressesRequest": { + "base": null, + "refs": { + } + }, + "UnmonitorInstancesRequest": { + "base": null, + "refs": { + } + }, + "UnmonitorInstancesResult": { + "base": null, + "refs": { + } + }, + "UnsuccessfulItem": { + "base": "

    Information about items that were not successfully processed in a batch call.

    ", + "refs": { + "UnsuccessfulItemSet$member": null + } + }, + "UnsuccessfulItemError": { + "base": "

    Information about the error that occurred. For more information about errors, see Error Codes.

    ", + "refs": { + "UnsuccessfulItem$Error": "

    Information about the error.

    " + } + }, + "UnsuccessfulItemSet": { + "base": null, + "refs": { + "CreateFlowLogsResult$Unsuccessful": "

    Information about the flow logs that could not be created successfully.

    ", + "DeleteFlowLogsResult$Unsuccessful": "

    Information about the flow logs that could not be deleted successfully.

    ", + "DeleteVpcEndpointsResult$Unsuccessful": "

    Information about the endpoints that were not successfully deleted.

    " + } + }, + "UserBucket": { + "base": "

    Describes the S3 bucket for the disk image.

    ", + "refs": { + "ImageDiskContainer$UserBucket": "

    The S3 bucket for the disk image.

    ", + "SnapshotDiskContainer$UserBucket": null + } + }, + "UserBucketDetails": { + "base": "

    Describes the S3 bucket for the disk image.

    ", + "refs": { + "SnapshotDetail$UserBucket": null, + "SnapshotTaskDetail$UserBucket": "

    The S3 bucket for the disk image.

    " + } + }, + "UserData": { + "base": "

    Describes the user data to be made available to an instance.

    ", + "refs": { + "ImportInstanceLaunchSpecification$UserData": "

    The Base64-encoded MIME user data to be made available to the instance.

    " + } + }, + "UserGroupStringList": { + "base": null, + "refs": { + "ModifyImageAttributeRequest$UserGroups": "

    One or more user groups. This is only valid when modifying the launchPermission attribute.

    " + } + }, + "UserIdGroupPair": { + "base": "

    Describes a security group and AWS account ID pair.

    ", + "refs": { + "UserIdGroupPairList$member": null + } + }, + "UserIdGroupPairList": { + "base": null, + "refs": { + "IpPermission$UserIdGroupPairs": "

    One or more security group and AWS account ID pairs.

    " + } + }, + "UserIdStringList": { + "base": null, + "refs": { + "ModifyImageAttributeRequest$UserIds": "

    One or more AWS account IDs. This is only valid when modifying the launchPermission attribute.

    ", + "ModifySnapshotAttributeRequest$UserIds": "

    The account ID to modify for the snapshot.

    " + } + }, + "ValueStringList": { + "base": null, + "refs": { + "CancelSpotFleetRequestsRequest$SpotFleetRequestIds": "

    The IDs of the Spot fleet requests.

    ", + "CreateFlowLogsRequest$ResourceIds": "

    One or more subnet, network interface, or VPC IDs.

    ", + "CreateFlowLogsResult$FlowLogIds": "

    The IDs of the flow logs.

    ", + "CreateVpcEndpointRequest$RouteTableIds": "

    One or more route table IDs.

    ", + "DeleteFlowLogsRequest$FlowLogIds": "

    One or more flow log IDs.

    ", + "DeleteVpcEndpointsRequest$VpcEndpointIds": "

    One or more endpoint IDs.

    ", + "DescribeFlowLogsRequest$FlowLogIds": "

    One or more flow log IDs.

    ", + "DescribeInternetGatewaysRequest$InternetGatewayIds": "

    One or more Internet gateway IDs.

    Default: Describes all your Internet gateways.

    ", + "DescribeMovingAddressesRequest$PublicIps": "

    One or more Elastic IP addresses.

    ", + "DescribeNetworkAclsRequest$NetworkAclIds": "

    One or more network ACL IDs.

    Default: Describes all your network ACLs.

    ", + "DescribePrefixListsRequest$PrefixListIds": "

    One or more prefix list IDs.

    ", + "DescribeRouteTablesRequest$RouteTableIds": "

    One or more route table IDs.

    Default: Describes all your route tables.

    ", + "DescribeSpotFleetRequestsRequest$SpotFleetRequestIds": "

    The IDs of the Spot fleet requests.

    ", + "DescribeVpcEndpointServicesResult$ServiceNames": "

    A list of supported AWS services.

    ", + "DescribeVpcEndpointsRequest$VpcEndpointIds": "

    One or more endpoint IDs.

    ", + "DescribeVpcPeeringConnectionsRequest$VpcPeeringConnectionIds": "

    One or more VPC peering connection IDs.

    Default: Describes all your VPC peering connections.

    ", + "Filter$Values": "

    One or more filter values. Filter values are case-sensitive.

    ", + "ModifyVpcEndpointRequest$AddRouteTableIds": "

    One or more route tables IDs to associate with the endpoint.

    ", + "ModifyVpcEndpointRequest$RemoveRouteTableIds": "

    One or more route table IDs to disassociate from the endpoint.

    ", + "PrefixList$Cidrs": "

    The IP address range of the AWS service.

    ", + "VpcEndpoint$RouteTableIds": "

    One or more route tables associated with the endpoint.

    ", + "NewDhcpConfiguration$Values": null, + "RequestSpotLaunchSpecification$SecurityGroups": null, + "RequestSpotLaunchSpecification$SecurityGroupIds": null + } + }, + "VgwTelemetry": { + "base": "

    Describes telemetry for a VPN tunnel.

    ", + "refs": { + "VgwTelemetryList$member": null + } + }, + "VgwTelemetryList": { + "base": null, + "refs": { + "VpnConnection$VgwTelemetry": "

    Information about the VPN tunnel.

    " + } + }, + "VirtualizationType": { + "base": null, + "refs": { + "Image$VirtualizationType": "

    The type of virtualization of the AMI.

    ", + "Instance$VirtualizationType": "

    The virtualization type of the instance.

    " + } + }, + "Volume": { + "base": "

    Describes a volume.

    ", + "refs": { + "VolumeList$member": null + } + }, + "VolumeAttachment": { + "base": "

    Describes volume attachment details.

    ", + "refs": { + "VolumeAttachmentList$member": null + } + }, + "VolumeAttachmentList": { + "base": null, + "refs": { + "Volume$Attachments": "

    Information about the volume attachments.

    " + } + }, + "VolumeAttachmentState": { + "base": null, + "refs": { + "VolumeAttachment$State": "

    The attachment state of the volume.

    " + } + }, + "VolumeAttributeName": { + "base": null, + "refs": { + "DescribeVolumeAttributeRequest$Attribute": "

    The instance attribute.

    " + } + }, + "VolumeDetail": { + "base": "

    Describes an EBS volume.

    ", + "refs": { + "DiskImage$Volume": "

    Information about the volume.

    ", + "ImportVolumeRequest$Volume": "

    The volume size.

    " + } + }, + "VolumeIdStringList": { + "base": null, + "refs": { + "DescribeVolumeStatusRequest$VolumeIds": "

    One or more volume IDs.

    Default: Describes all your volumes.

    ", + "DescribeVolumesRequest$VolumeIds": "

    One or more volume IDs.

    " + } + }, + "VolumeList": { + "base": null, + "refs": { + "DescribeVolumesResult$Volumes": "

    Information about the volumes.

    " + } + }, + "VolumeState": { + "base": null, + "refs": { + "Volume$State": "

    The volume state.

    " + } + }, + "VolumeStatusAction": { + "base": "

    Describes a volume status operation code.

    ", + "refs": { + "VolumeStatusActionsList$member": null + } + }, + "VolumeStatusActionsList": { + "base": null, + "refs": { + "VolumeStatusItem$Actions": "

    The details of the operation.

    " + } + }, + "VolumeStatusDetails": { + "base": "

    Describes a volume status.

    ", + "refs": { + "VolumeStatusDetailsList$member": null + } + }, + "VolumeStatusDetailsList": { + "base": null, + "refs": { + "VolumeStatusInfo$Details": "

    The details of the volume status.

    " + } + }, + "VolumeStatusEvent": { + "base": "

    Describes a volume status event.

    ", + "refs": { + "VolumeStatusEventsList$member": null + } + }, + "VolumeStatusEventsList": { + "base": null, + "refs": { + "VolumeStatusItem$Events": "

    A list of events associated with the volume.

    " + } + }, + "VolumeStatusInfo": { + "base": "

    Describes the status of a volume.

    ", + "refs": { + "VolumeStatusItem$VolumeStatus": "

    The volume status.

    " + } + }, + "VolumeStatusInfoStatus": { + "base": null, + "refs": { + "VolumeStatusInfo$Status": "

    The status of the volume.

    " + } + }, + "VolumeStatusItem": { + "base": "

    Describes the volume status.

    ", + "refs": { + "VolumeStatusList$member": null + } + }, + "VolumeStatusList": { + "base": null, + "refs": { + "DescribeVolumeStatusResult$VolumeStatuses": "

    A list of volumes.

    " + } + }, + "VolumeStatusName": { + "base": null, + "refs": { + "VolumeStatusDetails$Name": "

    The name of the volume status.

    " + } + }, + "VolumeType": { + "base": null, + "refs": { + "CreateVolumeRequest$VolumeType": "

    The volume type. This can be gp2 for General Purpose (SSD) volumes, io1 for Provisioned IOPS (SSD) volumes, or standard for Magnetic volumes.

    Default: standard

    ", + "EbsBlockDevice$VolumeType": "

    The volume type. gp2 for General Purpose (SSD) volumes, io1 for Provisioned IOPS (SSD) volumes, and standard for Magnetic volumes.

    Default: standard

    ", + "Volume$VolumeType": "

    The volume type. This can be gp2 for General Purpose (SSD) volumes, io1 for Provisioned IOPS (SSD) volumes, or standard for Magnetic volumes.

    " + } + }, + "Vpc": { + "base": "

    Describes a VPC.

    ", + "refs": { + "CreateVpcResult$Vpc": "

    Information about the VPC.

    ", + "VpcList$member": null + } + }, + "VpcAttachment": { + "base": "

    Describes an attachment between a virtual private gateway and a VPC.

    ", + "refs": { + "AttachVpnGatewayResult$VpcAttachment": "

    Information about the attachment.

    ", + "VpcAttachmentList$member": null + } + }, + "VpcAttachmentList": { + "base": null, + "refs": { + "VpnGateway$VpcAttachments": "

    Any VPCs attached to the virtual private gateway.

    " + } + }, + "VpcAttributeName": { + "base": null, + "refs": { + "DescribeVpcAttributeRequest$Attribute": "

    The VPC attribute.

    " + } + }, + "VpcClassicLink": { + "base": "

    Describes whether a VPC is enabled for ClassicLink.

    ", + "refs": { + "VpcClassicLinkList$member": null + } + }, + "VpcClassicLinkIdList": { + "base": null, + "refs": { + "DescribeVpcClassicLinkRequest$VpcIds": "

    One or more VPCs for which you want to describe the ClassicLink status.

    " + } + }, + "VpcClassicLinkList": { + "base": null, + "refs": { + "DescribeVpcClassicLinkResult$Vpcs": "

    The ClassicLink status of one or more VPCs.

    " + } + }, + "VpcEndpoint": { + "base": "

    Describes a VPC endpoint.

    ", + "refs": { + "CreateVpcEndpointResult$VpcEndpoint": "

    Information about the endpoint.

    ", + "VpcEndpointSet$member": null + } + }, + "VpcEndpointSet": { + "base": null, + "refs": { + "DescribeVpcEndpointsResult$VpcEndpoints": "

    Information about the endpoints.

    " + } + }, + "VpcIdStringList": { + "base": null, + "refs": { + "DescribeVpcsRequest$VpcIds": "

    One or more VPC IDs.

    Default: Describes all your VPCs.

    " + } + }, + "VpcList": { + "base": null, + "refs": { + "DescribeVpcsResult$Vpcs": "

    Information about one or more VPCs.

    " + } + }, + "VpcPeeringConnection": { + "base": "

    Describes a VPC peering connection.

    ", + "refs": { + "AcceptVpcPeeringConnectionResult$VpcPeeringConnection": "

    Information about the VPC peering connection.

    ", + "CreateVpcPeeringConnectionResult$VpcPeeringConnection": "

    Information about the VPC peering connection.

    ", + "VpcPeeringConnectionList$member": null + } + }, + "VpcPeeringConnectionList": { + "base": null, + "refs": { + "DescribeVpcPeeringConnectionsResult$VpcPeeringConnections": "

    Information about the VPC peering connections.

    " + } + }, + "VpcPeeringConnectionStateReason": { + "base": "

    Describes the status of a VPC peering connection.

    ", + "refs": { + "VpcPeeringConnection$Status": "

    The status of the VPC peering connection.

    " + } + }, + "VpcPeeringConnectionStateReasonCode": { + "base": null, + "refs": { + "VpcPeeringConnectionStateReason$Code": "

    The status of the VPC peering connection.

    " + } + }, + "VpcPeeringConnectionVpcInfo": { + "base": "

    Describes a VPC in a VPC peering connection.

    ", + "refs": { + "VpcPeeringConnection$AccepterVpcInfo": "

    The information of the peer VPC.

    ", + "VpcPeeringConnection$RequesterVpcInfo": "

    The information of the requester VPC.

    " + } + }, + "VpcState": { + "base": null, + "refs": { + "Vpc$State": "

    The current state of the VPC.

    " + } + }, + "VpnConnection": { + "base": "

    Describes a VPN connection.

    ", + "refs": { + "CreateVpnConnectionResult$VpnConnection": "

    Information about the VPN connection.

    ", + "VpnConnectionList$member": null + } + }, + "VpnConnectionIdStringList": { + "base": null, + "refs": { + "DescribeVpnConnectionsRequest$VpnConnectionIds": "

    One or more VPN connection IDs.

    Default: Describes your VPN connections.

    " + } + }, + "VpnConnectionList": { + "base": null, + "refs": { + "DescribeVpnConnectionsResult$VpnConnections": "

    Information about one or more VPN connections.

    " + } + }, + "VpnConnectionOptions": { + "base": "

    Describes VPN connection options.

    ", + "refs": { + "VpnConnection$Options": "

    The VPN connection options.

    " + } + }, + "VpnConnectionOptionsSpecification": { + "base": "

    Describes VPN connection options.

    ", + "refs": { + "CreateVpnConnectionRequest$Options": "

    Indicates whether the VPN connection requires static routes. If you are creating a VPN connection for a device that does not support BGP, you must specify true.

    Default: false

    " + } + }, + "VpnGateway": { + "base": "

    Describes a virtual private gateway.

    ", + "refs": { + "CreateVpnGatewayResult$VpnGateway": "

    Information about the virtual private gateway.

    ", + "VpnGatewayList$member": null + } + }, + "VpnGatewayIdStringList": { + "base": null, + "refs": { + "DescribeVpnGatewaysRequest$VpnGatewayIds": "

    One or more virtual private gateway IDs.

    Default: Describes all your virtual private gateways.

    " + } + }, + "VpnGatewayList": { + "base": null, + "refs": { + "DescribeVpnGatewaysResult$VpnGateways": "

    Information about one or more virtual private gateways.

    " + } + }, + "VpnState": { + "base": null, + "refs": { + "VpnConnection$State": "

    The current state of the VPN connection.

    ", + "VpnGateway$State": "

    The current state of the virtual private gateway.

    ", + "VpnStaticRoute$State": "

    The current state of the static route.

    " + } + }, + "VpnStaticRoute": { + "base": "

    Describes a static route for a VPN connection.

    ", + "refs": { + "VpnStaticRouteList$member": null + } + }, + "VpnStaticRouteList": { + "base": null, + "refs": { + "VpnConnection$Routes": "

    The static routes associated with the VPN connection.

    " + } + }, + "VpnStaticRouteSource": { + "base": null, + "refs": { + "VpnStaticRoute$Source": "

    Indicates how the routes were provided.

    " + } + }, + "ZoneNameStringList": { + "base": null, + "refs": { + "DescribeAvailabilityZonesRequest$ZoneNames": "

    The names of one or more Availability Zones.

    " + } + }, + "NewDhcpConfigurationList": { + "base": null, + "refs": { + "CreateDhcpOptionsRequest$DhcpConfigurations": "

    A DHCP configuration option.

    " + } + }, + "NewDhcpConfiguration": { + "base": null, + "refs": { + "NewDhcpConfigurationList$member": null + } + }, + "DhcpConfigurationValueList": { + "base": null, + "refs": { + "DhcpConfiguration$Values": "

    One or more values for the DHCP option.

    " + } + }, + "Blob": { + "base": null, + "refs": { + "ImportKeyPairRequest$PublicKeyMaterial": "

    The public key. You must base64 encode the public key material before sending it to AWS.

    ", + "S3Storage$UploadPolicy": "

    A Base64-encoded Amazon S3 upload policy that gives Amazon EC2 permission to upload items into Amazon S3 on your behalf.

    ", + "BlobAttributeValue$Value": null + } + }, + "BlobAttributeValue": { + "base": null, + "refs": { + "ModifyInstanceAttributeRequest$UserData": "

    Changes the instance's user data to the specified value.

    " + } + }, + "RequestSpotLaunchSpecification": { + "base": "

    Describes the launch specification for an instance.

    ", + "refs": { + "RequestSpotInstancesRequest$LaunchSpecification": null + } + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/ec2/2015-04-15/paginators-1.json b/vendor/github.com/aws/aws-sdk-go/models/apis/ec2/2015-04-15/paginators-1.json new file mode 100644 index 000000000..740f2e36a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/ec2/2015-04-15/paginators-1.json @@ -0,0 +1,125 @@ +{ + "pagination": { + "DescribeAccountAttributes": { + "result_key": "AccountAttributes" + }, + "DescribeAddresses": { + "result_key": "Addresses" + }, + "DescribeAvailabilityZones": { + "result_key": "AvailabilityZones" + }, + "DescribeBundleTasks": { + "result_key": "BundleTasks" + }, + "DescribeConversionTasks": { + "result_key": "ConversionTasks" + }, + "DescribeCustomerGateways": { + "result_key": "CustomerGateways" + }, + "DescribeDhcpOptions": { + "result_key": "DhcpOptions" + }, + "DescribeExportTasks": { + "result_key": "ExportTasks" + }, + "DescribeImages": { + "result_key": "Images" + }, + "DescribeInstanceStatus": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "InstanceStatuses" + }, + "DescribeInstances": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "Reservations" + }, + "DescribeInternetGateways": { + "result_key": "InternetGateways" + }, + "DescribeKeyPairs": { + "result_key": "KeyPairs" + }, + "DescribeNetworkAcls": { + "result_key": "NetworkAcls" + }, + "DescribeNetworkInterfaces": { + "result_key": "NetworkInterfaces" + }, + "DescribePlacementGroups": { + "result_key": "PlacementGroups" + }, + "DescribeRegions": { + "result_key": "Regions" + }, + "DescribeReservedInstances": { + "result_key": "ReservedInstances" + }, + "DescribeReservedInstancesListings": { + "result_key": "ReservedInstancesListings" + }, + "DescribeReservedInstancesOfferings": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "ReservedInstancesOfferings" + }, + "DescribeReservedInstancesModifications": { + "input_token": "NextToken", + "output_token": "NextToken", + "result_key": "ReservedInstancesModifications" + }, + "DescribeRouteTables": { + "result_key": "RouteTables" + }, + "DescribeSecurityGroups": { + "result_key": "SecurityGroups" + }, + "DescribeSnapshots": { + "input_token": "NextToken", + "output_token": "NextToken", + "result_key": "Snapshots" + }, + "DescribeSpotInstanceRequests": { + "result_key": "SpotInstanceRequests" + }, + "DescribeSpotPriceHistory": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "SpotPriceHistory" + }, + "DescribeSubnets": { + "result_key": "Subnets" + }, + "DescribeTags": { + "result_key": "Tags" + }, + "DescribeVolumeStatus": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "VolumeStatuses" + }, + "DescribeVolumes": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "Volumes" + }, + "DescribeVpcs": { + "result_key": "Vpcs" + }, + "DescribeVpnConnections": { + "result_key": "VpnConnections" + }, + "DescribeVpnGateways": { + "result_key": "VpnGateways" + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/ec2/2015-04-15/waiters-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/ec2/2015-04-15/waiters-2.json new file mode 100644 index 000000000..0599f2422 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/ec2/2015-04-15/waiters-2.json @@ -0,0 +1,494 @@ +{ + "version": 2, + "waiters": { + "InstanceExists": { + "delay": 5, + "maxAttempts": 40, + "operation": "DescribeInstances", + "acceptors": [ + { + "matcher": "status", + "expected": 200, + "state": "success" + }, + { + "matcher": "error", + "expected": "InvalidInstanceIDNotFound", + "state": "retry" + } + ] + }, + "BundleTaskComplete": { + "delay": 15, + "operation": "DescribeBundleTasks", + "maxAttempts": 40, + "acceptors": [ + { + "expected": "complete", + "matcher": "pathAll", + "state": "success", + "argument": "BundleTasks[].State" + }, + { + "expected": "failed", + "matcher": "pathAny", + "state": "failure", + "argument": "BundleTasks[].State" + } + ] + }, + "ConversionTaskCancelled": { + "delay": 15, + "operation": "DescribeConversionTasks", + "maxAttempts": 40, + "acceptors": [ + { + "expected": "cancelled", + "matcher": "pathAll", + "state": "success", + "argument": "ConversionTasks[].State" + } + ] + }, + "ConversionTaskCompleted": { + "delay": 15, + "operation": "DescribeConversionTasks", + "maxAttempts": 40, + "acceptors": [ + { + "expected": "completed", + "matcher": "pathAll", + "state": "success", + "argument": "ConversionTasks[].State" + }, + { + "expected": "cancelled", + "matcher": "pathAny", + "state": "failure", + "argument": "ConversionTasks[].State" + }, + { + "expected": "cancelling", + "matcher": "pathAny", + "state": "failure", + "argument": "ConversionTasks[].State" + } + ] + }, + "ConversionTaskDeleted": { + "delay": 15, + "operation": "DescribeConversionTasks", + "maxAttempts": 40, + "acceptors": [ + { + "expected": "deleted", + "matcher": "pathAll", + "state": "success", + "argument": "ConversionTasks[].State" + } + ] + }, + "CustomerGatewayAvailable": { + "delay": 15, + "operation": "DescribeCustomerGateways", + "maxAttempts": 40, + "acceptors": [ + { + "expected": "available", + "matcher": "pathAll", + "state": "success", + "argument": "CustomerGateways[].State" + }, + { + "expected": "deleted", + "matcher": "pathAny", + "state": "failure", + "argument": "CustomerGateways[].State" + }, + { + "expected": "deleting", + "matcher": "pathAny", + "state": "failure", + "argument": "CustomerGateways[].State" + } + ] + }, + "ExportTaskCancelled": { + "delay": 15, + "operation": "DescribeExportTasks", + "maxAttempts": 40, + "acceptors": [ + { + "expected": "cancelled", + "matcher": "pathAll", + "state": "success", + "argument": "ExportTasks[].State" + } + ] + }, + "ExportTaskCompleted": { + "delay": 15, + "operation": "DescribeExportTasks", + "maxAttempts": 40, + "acceptors": [ + { + "expected": "completed", + "matcher": "pathAll", + "state": "success", + "argument": "ExportTasks[].State" + } + ] + }, + "ImageAvailable": { + "operation": "DescribeImages", + "maxAttempts": 40, + "delay": 15, + "acceptors": [ + { + "state": "success", + "matcher": "pathAll", + "argument": "Images[].State", + "expected": "available" + }, + { + "state": "failure", + "matcher": "pathAny", + "argument": "Images[].State", + "expected": "failed" + } + ] + }, + "InstanceRunning": { + "delay": 15, + "operation": "DescribeInstances", + "maxAttempts": 40, + "acceptors": [ + { + "expected": "running", + "matcher": "pathAll", + "state": "success", + "argument": "Reservations[].Instances[].State.Name" + }, + { + "expected": "shutting-down", + "matcher": "pathAny", + "state": "failure", + "argument": "Reservations[].Instances[].State.Name" + }, + { + "expected": "terminated", + "matcher": "pathAny", + "state": "failure", + "argument": "Reservations[].Instances[].State.Name" + }, + { + "expected": "stopping", + "matcher": "pathAny", + "state": "failure", + "argument": "Reservations[].Instances[].State.Name" + } + ] + }, + "InstanceStatusOk": { + "operation": "DescribeInstanceStatus", + "maxAttempts": 40, + "delay": 15, + "acceptors": [ + { + "state": "success", + "matcher": "pathAll", + "argument": "InstanceStatuses[].InstanceStatus.Status", + "expected": "ok" + } + ] + }, + "InstanceStopped": { + "delay": 15, + "operation": "DescribeInstances", + "maxAttempts": 40, + "acceptors": [ + { + "expected": "stopped", + "matcher": "pathAll", + "state": "success", + "argument": "Reservations[].Instances[].State.Name" + }, + { + "expected": "pending", + "matcher": "pathAny", + "state": "failure", + "argument": "Reservations[].Instances[].State.Name" + }, + { + "expected": "terminated", + "matcher": "pathAny", + "state": "failure", + "argument": "Reservations[].Instances[].State.Name" + } + ] + }, + "InstanceTerminated": { + "delay": 15, + "operation": "DescribeInstances", + "maxAttempts": 40, + "acceptors": [ + { + "expected": "terminated", + "matcher": "pathAll", + "state": "success", + "argument": "Reservations[].Instances[].State.Name" + }, + { + "expected": "pending", + "matcher": "pathAny", + "state": "failure", + "argument": "Reservations[].Instances[].State.Name" + }, + { + "expected": "stopping", + "matcher": "pathAny", + "state": "failure", + "argument": "Reservations[].Instances[].State.Name" + } + ] + }, + "KeyPairExists": { + "operation": "DescribeKeyPairs", + "delay": 5, + "maxAttempts": 6, + "acceptors": [ + { + "expected": true, + "matcher": "pathAll", + "state": "success", + "argument": "length(KeyPairs[].KeyName) > `0`" + }, + { + "expected": "InvalidKeyPairNotFound", + "matcher": "error", + "state": "retry" + } + ] + }, + "NetworkInterfaceAvailable": { + "operation": "DescribeNetworkInterfaces", + "delay": 20, + "maxAttempts": 10, + "acceptors": [ + { + "expected": "available", + "matcher": "pathAll", + "state": "success", + "argument": "NetworkInterfaces[].Status" + }, + { + "expected": "InvalidNetworkInterfaceIDNotFound", + "matcher": "error", + "state": "failure" + } + ] + }, + "PasswordDataAvailable": { + "operation": "GetPasswordData", + "maxAttempts": 40, + "delay": 15, + "acceptors": [ + { + "state": "success", + "matcher": "path", + "argument": "length(PasswordData) > `0`", + "expected": true + } + ] + }, + "SnapshotCompleted": { + "delay": 15, + "operation": "DescribeSnapshots", + "maxAttempts": 40, + "acceptors": [ + { + "expected": "completed", + "matcher": "pathAll", + "state": "success", + "argument": "Snapshots[].State" + } + ] + }, + "SpotInstanceRequestFulfilled": { + "operation": "DescribeSpotInstanceRequests", + "maxAttempts": 40, + "delay": 15, + "acceptors": [ + { + "state": "success", + "matcher": "pathAll", + "argument": "SpotInstanceRequests[].Status.Code", + "expected": "fulfilled" + }, + { + "state": "failure", + "matcher": "pathAny", + "argument": "SpotInstanceRequests[].Status.Code", + "expected": "schedule-expired" + }, + { + "state": "failure", + "matcher": "pathAny", + "argument": "SpotInstanceRequests[].Status.Code", + "expected": "canceled-before-fulfillment" + }, + { + "state": "failure", + "matcher": "pathAny", + "argument": "SpotInstanceRequests[].Status.Code", + "expected": "bad-parameters" + }, + { + "state": "failure", + "matcher": "pathAny", + "argument": "SpotInstanceRequests[].Status.Code", + "expected": "system-error" + } + ] + }, + "SubnetAvailable": { + "delay": 15, + "operation": "DescribeSubnets", + "maxAttempts": 40, + "acceptors": [ + { + "expected": "available", + "matcher": "pathAll", + "state": "success", + "argument": "Subnets[].State" + } + ] + }, + "SystemStatusOk": { + "operation": "DescribeInstanceStatus", + "maxAttempts": 40, + "delay": 15, + "acceptors": [ + { + "state": "success", + "matcher": "pathAll", + "argument": "InstanceStatuses[].SystemStatus.Status", + "expected": "ok" + } + ] + }, + "VolumeAvailable": { + "delay": 15, + "operation": "DescribeVolumes", + "maxAttempts": 40, + "acceptors": [ + { + "expected": "available", + "matcher": "pathAll", + "state": "success", + "argument": "Volumes[].State" + }, + { + "expected": "deleted", + "matcher": "pathAny", + "state": "failure", + "argument": "Volumes[].State" + } + ] + }, + "VolumeDeleted": { + "delay": 15, + "operation": "DescribeVolumes", + "maxAttempts": 40, + "acceptors": [ + { + "expected": "deleted", + "matcher": "pathAll", + "state": "success", + "argument": "Volumes[].State" + }, + { + "matcher": "error", + "expected": "InvalidVolumeNotFound", + "state": "success" + } + ] + }, + "VolumeInUse": { + "delay": 15, + "operation": "DescribeVolumes", + "maxAttempts": 40, + "acceptors": [ + { + "expected": "in-use", + "matcher": "pathAll", + "state": "success", + "argument": "Volumes[].State" + }, + { + "expected": "deleted", + "matcher": "pathAny", + "state": "failure", + "argument": "Volumes[].State" + } + ] + }, + "VpcAvailable": { + "delay": 15, + "operation": "DescribeVpcs", + "maxAttempts": 40, + "acceptors": [ + { + "expected": "available", + "matcher": "pathAll", + "state": "success", + "argument": "Vpcs[].State" + } + ] + }, + "VpnConnectionAvailable": { + "delay": 15, + "operation": "DescribeVpnConnections", + "maxAttempts": 40, + "acceptors": [ + { + "expected": "available", + "matcher": "pathAll", + "state": "success", + "argument": "VpnConnections[].State" + }, + { + "expected": "deleting", + "matcher": "pathAny", + "state": "failure", + "argument": "VpnConnections[].State" + }, + { + "expected": "deleted", + "matcher": "pathAny", + "state": "failure", + "argument": "VpnConnections[].State" + } + ] + }, + "VpnConnectionDeleted": { + "delay": 15, + "operation": "DescribeVpnConnections", + "maxAttempts": 40, + "acceptors": [ + { + "expected": "deleted", + "matcher": "pathAll", + "state": "success", + "argument": "VpnConnections[].State" + }, + { + "expected": "pending", + "matcher": "pathAny", + "state": "failure", + "argument": "VpnConnections[].State" + } + ] + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/ec2/2015-10-01/api-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/ec2/2015-10-01/api-2.json new file mode 100644 index 000000000..d5853bafb --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/ec2/2015-10-01/api-2.json @@ -0,0 +1,13759 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2015-10-01", + "endpointPrefix":"ec2", + "protocol":"ec2", + "serviceAbbreviation":"Amazon EC2", + "serviceFullName":"Amazon Elastic Compute Cloud", + "signatureVersion":"v4", + "xmlNamespace":"http://ec2.amazonaws.com/doc/2015-10-01" + }, + "operations":{ + "AcceptVpcPeeringConnection":{ + "name":"AcceptVpcPeeringConnection", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AcceptVpcPeeringConnectionRequest"}, + "output":{"shape":"AcceptVpcPeeringConnectionResult"} + }, + "AllocateAddress":{ + "name":"AllocateAddress", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AllocateAddressRequest"}, + "output":{"shape":"AllocateAddressResult"} + }, + "AllocateHosts":{ + "name":"AllocateHosts", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AllocateHostsRequest"}, + "output":{"shape":"AllocateHostsResult"} + }, + "AssignPrivateIpAddresses":{ + "name":"AssignPrivateIpAddresses", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AssignPrivateIpAddressesRequest"} + }, + "AssociateAddress":{ + "name":"AssociateAddress", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AssociateAddressRequest"}, + "output":{"shape":"AssociateAddressResult"} + }, + "AssociateDhcpOptions":{ + "name":"AssociateDhcpOptions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AssociateDhcpOptionsRequest"} + }, + "AssociateRouteTable":{ + "name":"AssociateRouteTable", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AssociateRouteTableRequest"}, + "output":{"shape":"AssociateRouteTableResult"} + }, + "AttachClassicLinkVpc":{ + "name":"AttachClassicLinkVpc", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AttachClassicLinkVpcRequest"}, + "output":{"shape":"AttachClassicLinkVpcResult"} + }, + "AttachInternetGateway":{ + "name":"AttachInternetGateway", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AttachInternetGatewayRequest"} + }, + "AttachNetworkInterface":{ + "name":"AttachNetworkInterface", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AttachNetworkInterfaceRequest"}, + "output":{"shape":"AttachNetworkInterfaceResult"} + }, + "AttachVolume":{ + "name":"AttachVolume", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AttachVolumeRequest"}, + "output":{"shape":"VolumeAttachment"} + }, + "AttachVpnGateway":{ + "name":"AttachVpnGateway", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AttachVpnGatewayRequest"}, + "output":{"shape":"AttachVpnGatewayResult"} + }, + "AuthorizeSecurityGroupEgress":{ + "name":"AuthorizeSecurityGroupEgress", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AuthorizeSecurityGroupEgressRequest"} + }, + "AuthorizeSecurityGroupIngress":{ + "name":"AuthorizeSecurityGroupIngress", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AuthorizeSecurityGroupIngressRequest"} + }, + "BundleInstance":{ + "name":"BundleInstance", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"BundleInstanceRequest"}, + "output":{"shape":"BundleInstanceResult"} + }, + "CancelBundleTask":{ + "name":"CancelBundleTask", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CancelBundleTaskRequest"}, + "output":{"shape":"CancelBundleTaskResult"} + }, + "CancelConversionTask":{ + "name":"CancelConversionTask", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CancelConversionRequest"} + }, + "CancelExportTask":{ + "name":"CancelExportTask", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CancelExportTaskRequest"} + }, + "CancelImportTask":{ + "name":"CancelImportTask", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CancelImportTaskRequest"}, + "output":{"shape":"CancelImportTaskResult"} + }, + "CancelReservedInstancesListing":{ + "name":"CancelReservedInstancesListing", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CancelReservedInstancesListingRequest"}, + "output":{"shape":"CancelReservedInstancesListingResult"} + }, + "CancelSpotFleetRequests":{ + "name":"CancelSpotFleetRequests", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CancelSpotFleetRequestsRequest"}, + "output":{"shape":"CancelSpotFleetRequestsResponse"} + }, + "CancelSpotInstanceRequests":{ + "name":"CancelSpotInstanceRequests", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CancelSpotInstanceRequestsRequest"}, + "output":{"shape":"CancelSpotInstanceRequestsResult"} + }, + "ConfirmProductInstance":{ + "name":"ConfirmProductInstance", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ConfirmProductInstanceRequest"}, + "output":{"shape":"ConfirmProductInstanceResult"} + }, + "CopyImage":{ + "name":"CopyImage", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CopyImageRequest"}, + "output":{"shape":"CopyImageResult"} + }, + "CopySnapshot":{ + "name":"CopySnapshot", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CopySnapshotRequest"}, + "output":{"shape":"CopySnapshotResult"} + }, + "CreateCustomerGateway":{ + "name":"CreateCustomerGateway", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateCustomerGatewayRequest"}, + "output":{"shape":"CreateCustomerGatewayResult"} + }, + "CreateDhcpOptions":{ + "name":"CreateDhcpOptions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateDhcpOptionsRequest"}, + "output":{"shape":"CreateDhcpOptionsResult"} + }, + "CreateFlowLogs":{ + "name":"CreateFlowLogs", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateFlowLogsRequest"}, + "output":{"shape":"CreateFlowLogsResult"} + }, + "CreateImage":{ + "name":"CreateImage", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateImageRequest"}, + "output":{"shape":"CreateImageResult"} + }, + "CreateInstanceExportTask":{ + "name":"CreateInstanceExportTask", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateInstanceExportTaskRequest"}, + "output":{"shape":"CreateInstanceExportTaskResult"} + }, + "CreateInternetGateway":{ + "name":"CreateInternetGateway", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateInternetGatewayRequest"}, + "output":{"shape":"CreateInternetGatewayResult"} + }, + "CreateKeyPair":{ + "name":"CreateKeyPair", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateKeyPairRequest"}, + "output":{"shape":"KeyPair"} + }, + "CreateNatGateway":{ + "name":"CreateNatGateway", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateNatGatewayRequest"}, + "output":{"shape":"CreateNatGatewayResult"} + }, + "CreateNetworkAcl":{ + "name":"CreateNetworkAcl", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateNetworkAclRequest"}, + "output":{"shape":"CreateNetworkAclResult"} + }, + "CreateNetworkAclEntry":{ + "name":"CreateNetworkAclEntry", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateNetworkAclEntryRequest"} + }, + "CreateNetworkInterface":{ + "name":"CreateNetworkInterface", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateNetworkInterfaceRequest"}, + "output":{"shape":"CreateNetworkInterfaceResult"} + }, + "CreatePlacementGroup":{ + "name":"CreatePlacementGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreatePlacementGroupRequest"} + }, + "CreateReservedInstancesListing":{ + "name":"CreateReservedInstancesListing", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateReservedInstancesListingRequest"}, + "output":{"shape":"CreateReservedInstancesListingResult"} + }, + "CreateRoute":{ + "name":"CreateRoute", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateRouteRequest"}, + "output":{"shape":"CreateRouteResult"} + }, + "CreateRouteTable":{ + "name":"CreateRouteTable", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateRouteTableRequest"}, + "output":{"shape":"CreateRouteTableResult"} + }, + "CreateSecurityGroup":{ + "name":"CreateSecurityGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateSecurityGroupRequest"}, + "output":{"shape":"CreateSecurityGroupResult"} + }, + "CreateSnapshot":{ + "name":"CreateSnapshot", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateSnapshotRequest"}, + "output":{"shape":"Snapshot"} + }, + "CreateSpotDatafeedSubscription":{ + "name":"CreateSpotDatafeedSubscription", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateSpotDatafeedSubscriptionRequest"}, + "output":{"shape":"CreateSpotDatafeedSubscriptionResult"} + }, + "CreateSubnet":{ + "name":"CreateSubnet", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateSubnetRequest"}, + "output":{"shape":"CreateSubnetResult"} + }, + "CreateTags":{ + "name":"CreateTags", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateTagsRequest"} + }, + "CreateVolume":{ + "name":"CreateVolume", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateVolumeRequest"}, + "output":{"shape":"Volume"} + }, + "CreateVpc":{ + "name":"CreateVpc", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateVpcRequest"}, + "output":{"shape":"CreateVpcResult"} + }, + "CreateVpcEndpoint":{ + "name":"CreateVpcEndpoint", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateVpcEndpointRequest"}, + "output":{"shape":"CreateVpcEndpointResult"} + }, + "CreateVpcPeeringConnection":{ + "name":"CreateVpcPeeringConnection", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateVpcPeeringConnectionRequest"}, + "output":{"shape":"CreateVpcPeeringConnectionResult"} + }, + "CreateVpnConnection":{ + "name":"CreateVpnConnection", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateVpnConnectionRequest"}, + "output":{"shape":"CreateVpnConnectionResult"} + }, + "CreateVpnConnectionRoute":{ + "name":"CreateVpnConnectionRoute", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateVpnConnectionRouteRequest"} + }, + "CreateVpnGateway":{ + "name":"CreateVpnGateway", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateVpnGatewayRequest"}, + "output":{"shape":"CreateVpnGatewayResult"} + }, + "DeleteCustomerGateway":{ + "name":"DeleteCustomerGateway", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteCustomerGatewayRequest"} + }, + "DeleteDhcpOptions":{ + "name":"DeleteDhcpOptions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteDhcpOptionsRequest"} + }, + "DeleteFlowLogs":{ + "name":"DeleteFlowLogs", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteFlowLogsRequest"}, + "output":{"shape":"DeleteFlowLogsResult"} + }, + "DeleteInternetGateway":{ + "name":"DeleteInternetGateway", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteInternetGatewayRequest"} + }, + "DeleteKeyPair":{ + "name":"DeleteKeyPair", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteKeyPairRequest"} + }, + "DeleteNatGateway":{ + "name":"DeleteNatGateway", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteNatGatewayRequest"}, + "output":{"shape":"DeleteNatGatewayResult"} + }, + "DeleteNetworkAcl":{ + "name":"DeleteNetworkAcl", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteNetworkAclRequest"} + }, + "DeleteNetworkAclEntry":{ + "name":"DeleteNetworkAclEntry", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteNetworkAclEntryRequest"} + }, + "DeleteNetworkInterface":{ + "name":"DeleteNetworkInterface", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteNetworkInterfaceRequest"} + }, + "DeletePlacementGroup":{ + "name":"DeletePlacementGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeletePlacementGroupRequest"} + }, + "DeleteRoute":{ + "name":"DeleteRoute", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteRouteRequest"} + }, + "DeleteRouteTable":{ + "name":"DeleteRouteTable", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteRouteTableRequest"} + }, + "DeleteSecurityGroup":{ + "name":"DeleteSecurityGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteSecurityGroupRequest"} + }, + "DeleteSnapshot":{ + "name":"DeleteSnapshot", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteSnapshotRequest"} + }, + "DeleteSpotDatafeedSubscription":{ + "name":"DeleteSpotDatafeedSubscription", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteSpotDatafeedSubscriptionRequest"} + }, + "DeleteSubnet":{ + "name":"DeleteSubnet", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteSubnetRequest"} + }, + "DeleteTags":{ + "name":"DeleteTags", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteTagsRequest"} + }, + "DeleteVolume":{ + "name":"DeleteVolume", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteVolumeRequest"} + }, + "DeleteVpc":{ + "name":"DeleteVpc", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteVpcRequest"} + }, + "DeleteVpcEndpoints":{ + "name":"DeleteVpcEndpoints", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteVpcEndpointsRequest"}, + "output":{"shape":"DeleteVpcEndpointsResult"} + }, + "DeleteVpcPeeringConnection":{ + "name":"DeleteVpcPeeringConnection", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteVpcPeeringConnectionRequest"}, + "output":{"shape":"DeleteVpcPeeringConnectionResult"} + }, + "DeleteVpnConnection":{ + "name":"DeleteVpnConnection", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteVpnConnectionRequest"} + }, + "DeleteVpnConnectionRoute":{ + "name":"DeleteVpnConnectionRoute", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteVpnConnectionRouteRequest"} + }, + "DeleteVpnGateway":{ + "name":"DeleteVpnGateway", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteVpnGatewayRequest"} + }, + "DeregisterImage":{ + "name":"DeregisterImage", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeregisterImageRequest"} + }, + "DescribeAccountAttributes":{ + "name":"DescribeAccountAttributes", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeAccountAttributesRequest"}, + "output":{"shape":"DescribeAccountAttributesResult"} + }, + "DescribeAddresses":{ + "name":"DescribeAddresses", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeAddressesRequest"}, + "output":{"shape":"DescribeAddressesResult"} + }, + "DescribeAvailabilityZones":{ + "name":"DescribeAvailabilityZones", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeAvailabilityZonesRequest"}, + "output":{"shape":"DescribeAvailabilityZonesResult"} + }, + "DescribeBundleTasks":{ + "name":"DescribeBundleTasks", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeBundleTasksRequest"}, + "output":{"shape":"DescribeBundleTasksResult"} + }, + "DescribeClassicLinkInstances":{ + "name":"DescribeClassicLinkInstances", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeClassicLinkInstancesRequest"}, + "output":{"shape":"DescribeClassicLinkInstancesResult"} + }, + "DescribeConversionTasks":{ + "name":"DescribeConversionTasks", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeConversionTasksRequest"}, + "output":{"shape":"DescribeConversionTasksResult"} + }, + "DescribeCustomerGateways":{ + "name":"DescribeCustomerGateways", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeCustomerGatewaysRequest"}, + "output":{"shape":"DescribeCustomerGatewaysResult"} + }, + "DescribeDhcpOptions":{ + "name":"DescribeDhcpOptions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeDhcpOptionsRequest"}, + "output":{"shape":"DescribeDhcpOptionsResult"} + }, + "DescribeExportTasks":{ + "name":"DescribeExportTasks", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeExportTasksRequest"}, + "output":{"shape":"DescribeExportTasksResult"} + }, + "DescribeFlowLogs":{ + "name":"DescribeFlowLogs", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeFlowLogsRequest"}, + "output":{"shape":"DescribeFlowLogsResult"} + }, + "DescribeHosts":{ + "name":"DescribeHosts", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeHostsRequest"}, + "output":{"shape":"DescribeHostsResult"} + }, + "DescribeIdFormat":{ + "name":"DescribeIdFormat", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeIdFormatRequest"}, + "output":{"shape":"DescribeIdFormatResult"} + }, + "DescribeImageAttribute":{ + "name":"DescribeImageAttribute", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeImageAttributeRequest"}, + "output":{"shape":"ImageAttribute"} + }, + "DescribeImages":{ + "name":"DescribeImages", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeImagesRequest"}, + "output":{"shape":"DescribeImagesResult"} + }, + "DescribeImportImageTasks":{ + "name":"DescribeImportImageTasks", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeImportImageTasksRequest"}, + "output":{"shape":"DescribeImportImageTasksResult"} + }, + "DescribeImportSnapshotTasks":{ + "name":"DescribeImportSnapshotTasks", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeImportSnapshotTasksRequest"}, + "output":{"shape":"DescribeImportSnapshotTasksResult"} + }, + "DescribeInstanceAttribute":{ + "name":"DescribeInstanceAttribute", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeInstanceAttributeRequest"}, + "output":{"shape":"InstanceAttribute"} + }, + "DescribeInstanceStatus":{ + "name":"DescribeInstanceStatus", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeInstanceStatusRequest"}, + "output":{"shape":"DescribeInstanceStatusResult"} + }, + "DescribeInstances":{ + "name":"DescribeInstances", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeInstancesRequest"}, + "output":{"shape":"DescribeInstancesResult"} + }, + "DescribeInternetGateways":{ + "name":"DescribeInternetGateways", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeInternetGatewaysRequest"}, + "output":{"shape":"DescribeInternetGatewaysResult"} + }, + "DescribeKeyPairs":{ + "name":"DescribeKeyPairs", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeKeyPairsRequest"}, + "output":{"shape":"DescribeKeyPairsResult"} + }, + "DescribeMovingAddresses":{ + "name":"DescribeMovingAddresses", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeMovingAddressesRequest"}, + "output":{"shape":"DescribeMovingAddressesResult"} + }, + "DescribeNatGateways":{ + "name":"DescribeNatGateways", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeNatGatewaysRequest"}, + "output":{"shape":"DescribeNatGatewaysResult"} + }, + "DescribeNetworkAcls":{ + "name":"DescribeNetworkAcls", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeNetworkAclsRequest"}, + "output":{"shape":"DescribeNetworkAclsResult"} + }, + "DescribeNetworkInterfaceAttribute":{ + "name":"DescribeNetworkInterfaceAttribute", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeNetworkInterfaceAttributeRequest"}, + "output":{"shape":"DescribeNetworkInterfaceAttributeResult"} + }, + "DescribeNetworkInterfaces":{ + "name":"DescribeNetworkInterfaces", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeNetworkInterfacesRequest"}, + "output":{"shape":"DescribeNetworkInterfacesResult"} + }, + "DescribePlacementGroups":{ + "name":"DescribePlacementGroups", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribePlacementGroupsRequest"}, + "output":{"shape":"DescribePlacementGroupsResult"} + }, + "DescribePrefixLists":{ + "name":"DescribePrefixLists", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribePrefixListsRequest"}, + "output":{"shape":"DescribePrefixListsResult"} + }, + "DescribeRegions":{ + "name":"DescribeRegions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeRegionsRequest"}, + "output":{"shape":"DescribeRegionsResult"} + }, + "DescribeReservedInstances":{ + "name":"DescribeReservedInstances", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeReservedInstancesRequest"}, + "output":{"shape":"DescribeReservedInstancesResult"} + }, + "DescribeReservedInstancesListings":{ + "name":"DescribeReservedInstancesListings", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeReservedInstancesListingsRequest"}, + "output":{"shape":"DescribeReservedInstancesListingsResult"} + }, + "DescribeReservedInstancesModifications":{ + "name":"DescribeReservedInstancesModifications", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeReservedInstancesModificationsRequest"}, + "output":{"shape":"DescribeReservedInstancesModificationsResult"} + }, + "DescribeReservedInstancesOfferings":{ + "name":"DescribeReservedInstancesOfferings", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeReservedInstancesOfferingsRequest"}, + "output":{"shape":"DescribeReservedInstancesOfferingsResult"} + }, + "DescribeRouteTables":{ + "name":"DescribeRouteTables", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeRouteTablesRequest"}, + "output":{"shape":"DescribeRouteTablesResult"} + }, + "DescribeScheduledInstanceAvailability":{ + "name":"DescribeScheduledInstanceAvailability", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeScheduledInstanceAvailabilityRequest"}, + "output":{"shape":"DescribeScheduledInstanceAvailabilityResult"} + }, + "DescribeScheduledInstances":{ + "name":"DescribeScheduledInstances", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeScheduledInstancesRequest"}, + "output":{"shape":"DescribeScheduledInstancesResult"} + }, + "DescribeSecurityGroupReferences":{ + "name":"DescribeSecurityGroupReferences", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeSecurityGroupReferencesRequest"}, + "output":{"shape":"DescribeSecurityGroupReferencesResult"} + }, + "DescribeSecurityGroups":{ + "name":"DescribeSecurityGroups", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeSecurityGroupsRequest"}, + "output":{"shape":"DescribeSecurityGroupsResult"} + }, + "DescribeSnapshotAttribute":{ + "name":"DescribeSnapshotAttribute", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeSnapshotAttributeRequest"}, + "output":{"shape":"DescribeSnapshotAttributeResult"} + }, + "DescribeSnapshots":{ + "name":"DescribeSnapshots", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeSnapshotsRequest"}, + "output":{"shape":"DescribeSnapshotsResult"} + }, + "DescribeSpotDatafeedSubscription":{ + "name":"DescribeSpotDatafeedSubscription", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeSpotDatafeedSubscriptionRequest"}, + "output":{"shape":"DescribeSpotDatafeedSubscriptionResult"} + }, + "DescribeSpotFleetInstances":{ + "name":"DescribeSpotFleetInstances", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeSpotFleetInstancesRequest"}, + "output":{"shape":"DescribeSpotFleetInstancesResponse"} + }, + "DescribeSpotFleetRequestHistory":{ + "name":"DescribeSpotFleetRequestHistory", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeSpotFleetRequestHistoryRequest"}, + "output":{"shape":"DescribeSpotFleetRequestHistoryResponse"} + }, + "DescribeSpotFleetRequests":{ + "name":"DescribeSpotFleetRequests", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeSpotFleetRequestsRequest"}, + "output":{"shape":"DescribeSpotFleetRequestsResponse"} + }, + "DescribeSpotInstanceRequests":{ + "name":"DescribeSpotInstanceRequests", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeSpotInstanceRequestsRequest"}, + "output":{"shape":"DescribeSpotInstanceRequestsResult"} + }, + "DescribeSpotPriceHistory":{ + "name":"DescribeSpotPriceHistory", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeSpotPriceHistoryRequest"}, + "output":{"shape":"DescribeSpotPriceHistoryResult"} + }, + "DescribeStaleSecurityGroups":{ + "name":"DescribeStaleSecurityGroups", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeStaleSecurityGroupsRequest"}, + "output":{"shape":"DescribeStaleSecurityGroupsResult"} + }, + "DescribeSubnets":{ + "name":"DescribeSubnets", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeSubnetsRequest"}, + "output":{"shape":"DescribeSubnetsResult"} + }, + "DescribeTags":{ + "name":"DescribeTags", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeTagsRequest"}, + "output":{"shape":"DescribeTagsResult"} + }, + "DescribeVolumeAttribute":{ + "name":"DescribeVolumeAttribute", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeVolumeAttributeRequest"}, + "output":{"shape":"DescribeVolumeAttributeResult"} + }, + "DescribeVolumeStatus":{ + "name":"DescribeVolumeStatus", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeVolumeStatusRequest"}, + "output":{"shape":"DescribeVolumeStatusResult"} + }, + "DescribeVolumes":{ + "name":"DescribeVolumes", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeVolumesRequest"}, + "output":{"shape":"DescribeVolumesResult"} + }, + "DescribeVpcAttribute":{ + "name":"DescribeVpcAttribute", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeVpcAttributeRequest"}, + "output":{"shape":"DescribeVpcAttributeResult"} + }, + "DescribeVpcClassicLink":{ + "name":"DescribeVpcClassicLink", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeVpcClassicLinkRequest"}, + "output":{"shape":"DescribeVpcClassicLinkResult"} + }, + "DescribeVpcClassicLinkDnsSupport":{ + "name":"DescribeVpcClassicLinkDnsSupport", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeVpcClassicLinkDnsSupportRequest"}, + "output":{"shape":"DescribeVpcClassicLinkDnsSupportResult"} + }, + "DescribeVpcEndpointServices":{ + "name":"DescribeVpcEndpointServices", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeVpcEndpointServicesRequest"}, + "output":{"shape":"DescribeVpcEndpointServicesResult"} + }, + "DescribeVpcEndpoints":{ + "name":"DescribeVpcEndpoints", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeVpcEndpointsRequest"}, + "output":{"shape":"DescribeVpcEndpointsResult"} + }, + "DescribeVpcPeeringConnections":{ + "name":"DescribeVpcPeeringConnections", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeVpcPeeringConnectionsRequest"}, + "output":{"shape":"DescribeVpcPeeringConnectionsResult"} + }, + "DescribeVpcs":{ + "name":"DescribeVpcs", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeVpcsRequest"}, + "output":{"shape":"DescribeVpcsResult"} + }, + "DescribeVpnConnections":{ + "name":"DescribeVpnConnections", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeVpnConnectionsRequest"}, + "output":{"shape":"DescribeVpnConnectionsResult"} + }, + "DescribeVpnGateways":{ + "name":"DescribeVpnGateways", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeVpnGatewaysRequest"}, + "output":{"shape":"DescribeVpnGatewaysResult"} + }, + "DetachClassicLinkVpc":{ + "name":"DetachClassicLinkVpc", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DetachClassicLinkVpcRequest"}, + "output":{"shape":"DetachClassicLinkVpcResult"} + }, + "DetachInternetGateway":{ + "name":"DetachInternetGateway", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DetachInternetGatewayRequest"} + }, + "DetachNetworkInterface":{ + "name":"DetachNetworkInterface", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DetachNetworkInterfaceRequest"} + }, + "DetachVolume":{ + "name":"DetachVolume", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DetachVolumeRequest"}, + "output":{"shape":"VolumeAttachment"} + }, + "DetachVpnGateway":{ + "name":"DetachVpnGateway", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DetachVpnGatewayRequest"} + }, + "DisableVgwRoutePropagation":{ + "name":"DisableVgwRoutePropagation", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DisableVgwRoutePropagationRequest"} + }, + "DisableVpcClassicLink":{ + "name":"DisableVpcClassicLink", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DisableVpcClassicLinkRequest"}, + "output":{"shape":"DisableVpcClassicLinkResult"} + }, + "DisableVpcClassicLinkDnsSupport":{ + "name":"DisableVpcClassicLinkDnsSupport", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DisableVpcClassicLinkDnsSupportRequest"}, + "output":{"shape":"DisableVpcClassicLinkDnsSupportResult"} + }, + "DisassociateAddress":{ + "name":"DisassociateAddress", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DisassociateAddressRequest"} + }, + "DisassociateRouteTable":{ + "name":"DisassociateRouteTable", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DisassociateRouteTableRequest"} + }, + "EnableVgwRoutePropagation":{ + "name":"EnableVgwRoutePropagation", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"EnableVgwRoutePropagationRequest"} + }, + "EnableVolumeIO":{ + "name":"EnableVolumeIO", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"EnableVolumeIORequest"} + }, + "EnableVpcClassicLink":{ + "name":"EnableVpcClassicLink", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"EnableVpcClassicLinkRequest"}, + "output":{"shape":"EnableVpcClassicLinkResult"} + }, + "EnableVpcClassicLinkDnsSupport":{ + "name":"EnableVpcClassicLinkDnsSupport", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"EnableVpcClassicLinkDnsSupportRequest"}, + "output":{"shape":"EnableVpcClassicLinkDnsSupportResult"} + }, + "GetConsoleOutput":{ + "name":"GetConsoleOutput", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetConsoleOutputRequest"}, + "output":{"shape":"GetConsoleOutputResult"} + }, + "GetConsoleScreenshot":{ + "name":"GetConsoleScreenshot", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetConsoleScreenshotRequest"}, + "output":{"shape":"GetConsoleScreenshotResult"} + }, + "GetPasswordData":{ + "name":"GetPasswordData", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetPasswordDataRequest"}, + "output":{"shape":"GetPasswordDataResult"} + }, + "ImportImage":{ + "name":"ImportImage", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ImportImageRequest"}, + "output":{"shape":"ImportImageResult"} + }, + "ImportInstance":{ + "name":"ImportInstance", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ImportInstanceRequest"}, + "output":{"shape":"ImportInstanceResult"} + }, + "ImportKeyPair":{ + "name":"ImportKeyPair", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ImportKeyPairRequest"}, + "output":{"shape":"ImportKeyPairResult"} + }, + "ImportSnapshot":{ + "name":"ImportSnapshot", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ImportSnapshotRequest"}, + "output":{"shape":"ImportSnapshotResult"} + }, + "ImportVolume":{ + "name":"ImportVolume", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ImportVolumeRequest"}, + "output":{"shape":"ImportVolumeResult"} + }, + "ModifyHosts":{ + "name":"ModifyHosts", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyHostsRequest"}, + "output":{"shape":"ModifyHostsResult"} + }, + "ModifyIdFormat":{ + "name":"ModifyIdFormat", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyIdFormatRequest"} + }, + "ModifyImageAttribute":{ + "name":"ModifyImageAttribute", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyImageAttributeRequest"} + }, + "ModifyInstanceAttribute":{ + "name":"ModifyInstanceAttribute", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyInstanceAttributeRequest"} + }, + "ModifyInstancePlacement":{ + "name":"ModifyInstancePlacement", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyInstancePlacementRequest"}, + "output":{"shape":"ModifyInstancePlacementResult"} + }, + "ModifyNetworkInterfaceAttribute":{ + "name":"ModifyNetworkInterfaceAttribute", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyNetworkInterfaceAttributeRequest"} + }, + "ModifyReservedInstances":{ + "name":"ModifyReservedInstances", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyReservedInstancesRequest"}, + "output":{"shape":"ModifyReservedInstancesResult"} + }, + "ModifySnapshotAttribute":{ + "name":"ModifySnapshotAttribute", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifySnapshotAttributeRequest"} + }, + "ModifySpotFleetRequest":{ + "name":"ModifySpotFleetRequest", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifySpotFleetRequestRequest"}, + "output":{"shape":"ModifySpotFleetRequestResponse"} + }, + "ModifySubnetAttribute":{ + "name":"ModifySubnetAttribute", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifySubnetAttributeRequest"} + }, + "ModifyVolumeAttribute":{ + "name":"ModifyVolumeAttribute", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyVolumeAttributeRequest"} + }, + "ModifyVpcAttribute":{ + "name":"ModifyVpcAttribute", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyVpcAttributeRequest"} + }, + "ModifyVpcEndpoint":{ + "name":"ModifyVpcEndpoint", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyVpcEndpointRequest"}, + "output":{"shape":"ModifyVpcEndpointResult"} + }, + "ModifyVpcPeeringConnectionOptions":{ + "name":"ModifyVpcPeeringConnectionOptions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyVpcPeeringConnectionOptionsRequest"}, + "output":{"shape":"ModifyVpcPeeringConnectionOptionsResult"} + }, + "MonitorInstances":{ + "name":"MonitorInstances", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"MonitorInstancesRequest"}, + "output":{"shape":"MonitorInstancesResult"} + }, + "MoveAddressToVpc":{ + "name":"MoveAddressToVpc", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"MoveAddressToVpcRequest"}, + "output":{"shape":"MoveAddressToVpcResult"} + }, + "PurchaseReservedInstancesOffering":{ + "name":"PurchaseReservedInstancesOffering", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PurchaseReservedInstancesOfferingRequest"}, + "output":{"shape":"PurchaseReservedInstancesOfferingResult"} + }, + "PurchaseScheduledInstances":{ + "name":"PurchaseScheduledInstances", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PurchaseScheduledInstancesRequest"}, + "output":{"shape":"PurchaseScheduledInstancesResult"} + }, + "RebootInstances":{ + "name":"RebootInstances", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RebootInstancesRequest"} + }, + "RegisterImage":{ + "name":"RegisterImage", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RegisterImageRequest"}, + "output":{"shape":"RegisterImageResult"} + }, + "RejectVpcPeeringConnection":{ + "name":"RejectVpcPeeringConnection", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RejectVpcPeeringConnectionRequest"}, + "output":{"shape":"RejectVpcPeeringConnectionResult"} + }, + "ReleaseAddress":{ + "name":"ReleaseAddress", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ReleaseAddressRequest"} + }, + "ReleaseHosts":{ + "name":"ReleaseHosts", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ReleaseHostsRequest"}, + "output":{"shape":"ReleaseHostsResult"} + }, + "ReplaceNetworkAclAssociation":{ + "name":"ReplaceNetworkAclAssociation", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ReplaceNetworkAclAssociationRequest"}, + "output":{"shape":"ReplaceNetworkAclAssociationResult"} + }, + "ReplaceNetworkAclEntry":{ + "name":"ReplaceNetworkAclEntry", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ReplaceNetworkAclEntryRequest"} + }, + "ReplaceRoute":{ + "name":"ReplaceRoute", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ReplaceRouteRequest"} + }, + "ReplaceRouteTableAssociation":{ + "name":"ReplaceRouteTableAssociation", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ReplaceRouteTableAssociationRequest"}, + "output":{"shape":"ReplaceRouteTableAssociationResult"} + }, + "ReportInstanceStatus":{ + "name":"ReportInstanceStatus", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ReportInstanceStatusRequest"} + }, + "RequestSpotFleet":{ + "name":"RequestSpotFleet", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RequestSpotFleetRequest"}, + "output":{"shape":"RequestSpotFleetResponse"} + }, + "RequestSpotInstances":{ + "name":"RequestSpotInstances", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RequestSpotInstancesRequest"}, + "output":{"shape":"RequestSpotInstancesResult"} + }, + "ResetImageAttribute":{ + "name":"ResetImageAttribute", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ResetImageAttributeRequest"} + }, + "ResetInstanceAttribute":{ + "name":"ResetInstanceAttribute", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ResetInstanceAttributeRequest"} + }, + "ResetNetworkInterfaceAttribute":{ + "name":"ResetNetworkInterfaceAttribute", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ResetNetworkInterfaceAttributeRequest"} + }, + "ResetSnapshotAttribute":{ + "name":"ResetSnapshotAttribute", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ResetSnapshotAttributeRequest"} + }, + "RestoreAddressToClassic":{ + "name":"RestoreAddressToClassic", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RestoreAddressToClassicRequest"}, + "output":{"shape":"RestoreAddressToClassicResult"} + }, + "RevokeSecurityGroupEgress":{ + "name":"RevokeSecurityGroupEgress", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RevokeSecurityGroupEgressRequest"} + }, + "RevokeSecurityGroupIngress":{ + "name":"RevokeSecurityGroupIngress", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RevokeSecurityGroupIngressRequest"} + }, + "RunInstances":{ + "name":"RunInstances", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RunInstancesRequest"}, + "output":{"shape":"Reservation"} + }, + "RunScheduledInstances":{ + "name":"RunScheduledInstances", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RunScheduledInstancesRequest"}, + "output":{"shape":"RunScheduledInstancesResult"} + }, + "StartInstances":{ + "name":"StartInstances", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StartInstancesRequest"}, + "output":{"shape":"StartInstancesResult"} + }, + "StopInstances":{ + "name":"StopInstances", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StopInstancesRequest"}, + "output":{"shape":"StopInstancesResult"} + }, + "TerminateInstances":{ + "name":"TerminateInstances", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"TerminateInstancesRequest"}, + "output":{"shape":"TerminateInstancesResult"} + }, + "UnassignPrivateIpAddresses":{ + "name":"UnassignPrivateIpAddresses", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UnassignPrivateIpAddressesRequest"} + }, + "UnmonitorInstances":{ + "name":"UnmonitorInstances", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UnmonitorInstancesRequest"}, + "output":{"shape":"UnmonitorInstancesResult"} + } + }, + "shapes":{ + "AcceptVpcPeeringConnectionRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "VpcPeeringConnectionId":{ + "shape":"String", + "locationName":"vpcPeeringConnectionId" + } + } + }, + "AcceptVpcPeeringConnectionResult":{ + "type":"structure", + "members":{ + "VpcPeeringConnection":{ + "shape":"VpcPeeringConnection", + "locationName":"vpcPeeringConnection" + } + } + }, + "AccountAttribute":{ + "type":"structure", + "members":{ + "AttributeName":{ + "shape":"String", + "locationName":"attributeName" + }, + "AttributeValues":{ + "shape":"AccountAttributeValueList", + "locationName":"attributeValueSet" + } + } + }, + "AccountAttributeList":{ + "type":"list", + "member":{ + "shape":"AccountAttribute", + "locationName":"item" + } + }, + "AccountAttributeName":{ + "type":"string", + "enum":[ + "supported-platforms", + "default-vpc" + ] + }, + "AccountAttributeNameStringList":{ + "type":"list", + "member":{ + "shape":"AccountAttributeName", + "locationName":"attributeName" + } + }, + "AccountAttributeValue":{ + "type":"structure", + "members":{ + "AttributeValue":{ + "shape":"String", + "locationName":"attributeValue" + } + } + }, + "AccountAttributeValueList":{ + "type":"list", + "member":{ + "shape":"AccountAttributeValue", + "locationName":"item" + } + }, + "ActiveInstance":{ + "type":"structure", + "members":{ + "InstanceType":{ + "shape":"String", + "locationName":"instanceType" + }, + "InstanceId":{ + "shape":"String", + "locationName":"instanceId" + }, + "SpotInstanceRequestId":{ + "shape":"String", + "locationName":"spotInstanceRequestId" + } + } + }, + "ActiveInstanceSet":{ + "type":"list", + "member":{ + "shape":"ActiveInstance", + "locationName":"item" + } + }, + "Address":{ + "type":"structure", + "members":{ + "InstanceId":{ + "shape":"String", + "locationName":"instanceId" + }, + "PublicIp":{ + "shape":"String", + "locationName":"publicIp" + }, + "AllocationId":{ + "shape":"String", + "locationName":"allocationId" + }, + "AssociationId":{ + "shape":"String", + "locationName":"associationId" + }, + "Domain":{ + "shape":"DomainType", + "locationName":"domain" + }, + "NetworkInterfaceId":{ + "shape":"String", + "locationName":"networkInterfaceId" + }, + "NetworkInterfaceOwnerId":{ + "shape":"String", + "locationName":"networkInterfaceOwnerId" + }, + "PrivateIpAddress":{ + "shape":"String", + "locationName":"privateIpAddress" + } + } + }, + "AddressList":{ + "type":"list", + "member":{ + "shape":"Address", + "locationName":"item" + } + }, + "Affinity":{ + "type":"string", + "enum":[ + "default", + "host" + ] + }, + "AllocateAddressRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "Domain":{"shape":"DomainType"} + } + }, + "AllocateAddressResult":{ + "type":"structure", + "members":{ + "PublicIp":{ + "shape":"String", + "locationName":"publicIp" + }, + "Domain":{ + "shape":"DomainType", + "locationName":"domain" + }, + "AllocationId":{ + "shape":"String", + "locationName":"allocationId" + } + } + }, + "AllocateHostsRequest":{ + "type":"structure", + "required":[ + "InstanceType", + "Quantity", + "AvailabilityZone" + ], + "members":{ + "AutoPlacement":{ + "shape":"AutoPlacement", + "locationName":"autoPlacement" + }, + "ClientToken":{ + "shape":"String", + "locationName":"clientToken" + }, + "InstanceType":{ + "shape":"String", + "locationName":"instanceType" + }, + "Quantity":{ + "shape":"Integer", + "locationName":"quantity" + }, + "AvailabilityZone":{ + "shape":"String", + "locationName":"availabilityZone" + } + } + }, + "AllocateHostsResult":{ + "type":"structure", + "members":{ + "HostIds":{ + "shape":"ResponseHostIdList", + "locationName":"hostIdSet" + } + } + }, + "AllocationIdList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"AllocationId" + } + }, + "AllocationState":{ + "type":"string", + "enum":[ + "available", + "under-assessment", + "permanent-failure", + "released", + "released-permanent-failure" + ] + }, + "AllocationStrategy":{ + "type":"string", + "enum":[ + "lowestPrice", + "diversified" + ] + }, + "ArchitectureValues":{ + "type":"string", + "enum":[ + "i386", + "x86_64" + ] + }, + "AssignPrivateIpAddressesRequest":{ + "type":"structure", + "required":["NetworkInterfaceId"], + "members":{ + "NetworkInterfaceId":{ + "shape":"String", + "locationName":"networkInterfaceId" + }, + "PrivateIpAddresses":{ + "shape":"PrivateIpAddressStringList", + "locationName":"privateIpAddress" + }, + "SecondaryPrivateIpAddressCount":{ + "shape":"Integer", + "locationName":"secondaryPrivateIpAddressCount" + }, + "AllowReassignment":{ + "shape":"Boolean", + "locationName":"allowReassignment" + } + } + }, + "AssociateAddressRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "InstanceId":{"shape":"String"}, + "PublicIp":{"shape":"String"}, + "AllocationId":{"shape":"String"}, + "NetworkInterfaceId":{ + "shape":"String", + "locationName":"networkInterfaceId" + }, + "PrivateIpAddress":{ + "shape":"String", + "locationName":"privateIpAddress" + }, + "AllowReassociation":{ + "shape":"Boolean", + "locationName":"allowReassociation" + } + } + }, + "AssociateAddressResult":{ + "type":"structure", + "members":{ + "AssociationId":{ + "shape":"String", + "locationName":"associationId" + } + } + }, + "AssociateDhcpOptionsRequest":{ + "type":"structure", + "required":[ + "DhcpOptionsId", + "VpcId" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "DhcpOptionsId":{"shape":"String"}, + "VpcId":{"shape":"String"} + } + }, + "AssociateRouteTableRequest":{ + "type":"structure", + "required":[ + "SubnetId", + "RouteTableId" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "SubnetId":{ + "shape":"String", + "locationName":"subnetId" + }, + "RouteTableId":{ + "shape":"String", + "locationName":"routeTableId" + } + } + }, + "AssociateRouteTableResult":{ + "type":"structure", + "members":{ + "AssociationId":{ + "shape":"String", + "locationName":"associationId" + } + } + }, + "AttachClassicLinkVpcRequest":{ + "type":"structure", + "required":[ + "InstanceId", + "VpcId", + "Groups" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "InstanceId":{ + "shape":"String", + "locationName":"instanceId" + }, + "VpcId":{ + "shape":"String", + "locationName":"vpcId" + }, + "Groups":{ + "shape":"GroupIdStringList", + "locationName":"SecurityGroupId" + } + } + }, + "AttachClassicLinkVpcResult":{ + "type":"structure", + "members":{ + "Return":{ + "shape":"Boolean", + "locationName":"return" + } + } + }, + "AttachInternetGatewayRequest":{ + "type":"structure", + "required":[ + "InternetGatewayId", + "VpcId" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "InternetGatewayId":{ + "shape":"String", + "locationName":"internetGatewayId" + }, + "VpcId":{ + "shape":"String", + "locationName":"vpcId" + } + } + }, + "AttachNetworkInterfaceRequest":{ + "type":"structure", + "required":[ + "NetworkInterfaceId", + "InstanceId", + "DeviceIndex" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "NetworkInterfaceId":{ + "shape":"String", + "locationName":"networkInterfaceId" + }, + "InstanceId":{ + "shape":"String", + "locationName":"instanceId" + }, + "DeviceIndex":{ + "shape":"Integer", + "locationName":"deviceIndex" + } + } + }, + "AttachNetworkInterfaceResult":{ + "type":"structure", + "members":{ + "AttachmentId":{ + "shape":"String", + "locationName":"attachmentId" + } + } + }, + "AttachVolumeRequest":{ + "type":"structure", + "required":[ + "VolumeId", + "InstanceId", + "Device" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "VolumeId":{"shape":"String"}, + "InstanceId":{"shape":"String"}, + "Device":{"shape":"String"} + } + }, + "AttachVpnGatewayRequest":{ + "type":"structure", + "required":[ + "VpnGatewayId", + "VpcId" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "VpnGatewayId":{"shape":"String"}, + "VpcId":{"shape":"String"} + } + }, + "AttachVpnGatewayResult":{ + "type":"structure", + "members":{ + "VpcAttachment":{ + "shape":"VpcAttachment", + "locationName":"attachment" + } + } + }, + "AttachmentStatus":{ + "type":"string", + "enum":[ + "attaching", + "attached", + "detaching", + "detached" + ] + }, + "AttributeBooleanValue":{ + "type":"structure", + "members":{ + "Value":{ + "shape":"Boolean", + "locationName":"value" + } + } + }, + "AttributeValue":{ + "type":"structure", + "members":{ + "Value":{ + "shape":"String", + "locationName":"value" + } + } + }, + "AuthorizeSecurityGroupEgressRequest":{ + "type":"structure", + "required":["GroupId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "GroupId":{ + "shape":"String", + "locationName":"groupId" + }, + "SourceSecurityGroupName":{ + "shape":"String", + "locationName":"sourceSecurityGroupName" + }, + "SourceSecurityGroupOwnerId":{ + "shape":"String", + "locationName":"sourceSecurityGroupOwnerId" + }, + "IpProtocol":{ + "shape":"String", + "locationName":"ipProtocol" + }, + "FromPort":{ + "shape":"Integer", + "locationName":"fromPort" + }, + "ToPort":{ + "shape":"Integer", + "locationName":"toPort" + }, + "CidrIp":{ + "shape":"String", + "locationName":"cidrIp" + }, + "IpPermissions":{ + "shape":"IpPermissionList", + "locationName":"ipPermissions" + } + } + }, + "AuthorizeSecurityGroupIngressRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "GroupName":{"shape":"String"}, + "GroupId":{"shape":"String"}, + "SourceSecurityGroupName":{"shape":"String"}, + "SourceSecurityGroupOwnerId":{"shape":"String"}, + "IpProtocol":{"shape":"String"}, + "FromPort":{"shape":"Integer"}, + "ToPort":{"shape":"Integer"}, + "CidrIp":{"shape":"String"}, + "IpPermissions":{"shape":"IpPermissionList"} + } + }, + "AutoPlacement":{ + "type":"string", + "enum":[ + "on", + "off" + ] + }, + "AvailabilityZone":{ + "type":"structure", + "members":{ + "ZoneName":{ + "shape":"String", + "locationName":"zoneName" + }, + "State":{ + "shape":"AvailabilityZoneState", + "locationName":"zoneState" + }, + "RegionName":{ + "shape":"String", + "locationName":"regionName" + }, + "Messages":{ + "shape":"AvailabilityZoneMessageList", + "locationName":"messageSet" + } + } + }, + "AvailabilityZoneList":{ + "type":"list", + "member":{ + "shape":"AvailabilityZone", + "locationName":"item" + } + }, + "AvailabilityZoneMessage":{ + "type":"structure", + "members":{ + "Message":{ + "shape":"String", + "locationName":"message" + } + } + }, + "AvailabilityZoneMessageList":{ + "type":"list", + "member":{ + "shape":"AvailabilityZoneMessage", + "locationName":"item" + } + }, + "AvailabilityZoneState":{ + "type":"string", + "enum":[ + "available", + "information", + "impaired", + "unavailable" + ] + }, + "AvailableCapacity":{ + "type":"structure", + "members":{ + "AvailableInstanceCapacity":{ + "shape":"AvailableInstanceCapacityList", + "locationName":"availableInstanceCapacity" + }, + "AvailableVCpus":{ + "shape":"Integer", + "locationName":"availableVCpus" + } + } + }, + "AvailableInstanceCapacityList":{ + "type":"list", + "member":{ + "shape":"InstanceCapacity", + "locationName":"item" + } + }, + "BatchState":{ + "type":"string", + "enum":[ + "submitted", + "active", + "cancelled", + "failed", + "cancelled_running", + "cancelled_terminating", + "modifying" + ] + }, + "Blob":{"type":"blob"}, + "BlobAttributeValue":{ + "type":"structure", + "members":{ + "Value":{ + "shape":"Blob", + "locationName":"value" + } + } + }, + "BlockDeviceMapping":{ + "type":"structure", + "members":{ + "VirtualName":{ + "shape":"String", + "locationName":"virtualName" + }, + "DeviceName":{ + "shape":"String", + "locationName":"deviceName" + }, + "Ebs":{ + "shape":"EbsBlockDevice", + "locationName":"ebs" + }, + "NoDevice":{ + "shape":"String", + "locationName":"noDevice" + } + } + }, + "BlockDeviceMappingList":{ + "type":"list", + "member":{ + "shape":"BlockDeviceMapping", + "locationName":"item" + } + }, + "BlockDeviceMappingRequestList":{ + "type":"list", + "member":{ + "shape":"BlockDeviceMapping", + "locationName":"BlockDeviceMapping" + } + }, + "Boolean":{"type":"boolean"}, + "BundleIdStringList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"BundleId" + } + }, + "BundleInstanceRequest":{ + "type":"structure", + "required":[ + "InstanceId", + "Storage" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "InstanceId":{"shape":"String"}, + "Storage":{"shape":"Storage"} + } + }, + "BundleInstanceResult":{ + "type":"structure", + "members":{ + "BundleTask":{ + "shape":"BundleTask", + "locationName":"bundleInstanceTask" + } + } + }, + "BundleTask":{ + "type":"structure", + "members":{ + "InstanceId":{ + "shape":"String", + "locationName":"instanceId" + }, + "BundleId":{ + "shape":"String", + "locationName":"bundleId" + }, + "State":{ + "shape":"BundleTaskState", + "locationName":"state" + }, + "StartTime":{ + "shape":"DateTime", + "locationName":"startTime" + }, + "UpdateTime":{ + "shape":"DateTime", + "locationName":"updateTime" + }, + "Storage":{ + "shape":"Storage", + "locationName":"storage" + }, + "Progress":{ + "shape":"String", + "locationName":"progress" + }, + "BundleTaskError":{ + "shape":"BundleTaskError", + "locationName":"error" + } + } + }, + "BundleTaskError":{ + "type":"structure", + "members":{ + "Code":{ + "shape":"String", + "locationName":"code" + }, + "Message":{ + "shape":"String", + "locationName":"message" + } + } + }, + "BundleTaskList":{ + "type":"list", + "member":{ + "shape":"BundleTask", + "locationName":"item" + } + }, + "BundleTaskState":{ + "type":"string", + "enum":[ + "pending", + "waiting-for-shutdown", + "bundling", + "storing", + "cancelling", + "complete", + "failed" + ] + }, + "CancelBatchErrorCode":{ + "type":"string", + "enum":[ + "fleetRequestIdDoesNotExist", + "fleetRequestIdMalformed", + "fleetRequestNotInCancellableState", + "unexpectedError" + ] + }, + "CancelBundleTaskRequest":{ + "type":"structure", + "required":["BundleId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "BundleId":{"shape":"String"} + } + }, + "CancelBundleTaskResult":{ + "type":"structure", + "members":{ + "BundleTask":{ + "shape":"BundleTask", + "locationName":"bundleInstanceTask" + } + } + }, + "CancelConversionRequest":{ + "type":"structure", + "required":["ConversionTaskId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "ConversionTaskId":{ + "shape":"String", + "locationName":"conversionTaskId" + }, + "ReasonMessage":{ + "shape":"String", + "locationName":"reasonMessage" + } + } + }, + "CancelExportTaskRequest":{ + "type":"structure", + "required":["ExportTaskId"], + "members":{ + "ExportTaskId":{ + "shape":"String", + "locationName":"exportTaskId" + } + } + }, + "CancelImportTaskRequest":{ + "type":"structure", + "members":{ + "DryRun":{"shape":"Boolean"}, + "ImportTaskId":{"shape":"String"}, + "CancelReason":{"shape":"String"} + } + }, + "CancelImportTaskResult":{ + "type":"structure", + "members":{ + "ImportTaskId":{ + "shape":"String", + "locationName":"importTaskId" + }, + "State":{ + "shape":"String", + "locationName":"state" + }, + "PreviousState":{ + "shape":"String", + "locationName":"previousState" + } + } + }, + "CancelReservedInstancesListingRequest":{ + "type":"structure", + "required":["ReservedInstancesListingId"], + "members":{ + "ReservedInstancesListingId":{ + "shape":"String", + "locationName":"reservedInstancesListingId" + } + } + }, + "CancelReservedInstancesListingResult":{ + "type":"structure", + "members":{ + "ReservedInstancesListings":{ + "shape":"ReservedInstancesListingList", + "locationName":"reservedInstancesListingsSet" + } + } + }, + "CancelSpotFleetRequestsError":{ + "type":"structure", + "required":[ + "Code", + "Message" + ], + "members":{ + "Code":{ + "shape":"CancelBatchErrorCode", + "locationName":"code" + }, + "Message":{ + "shape":"String", + "locationName":"message" + } + } + }, + "CancelSpotFleetRequestsErrorItem":{ + "type":"structure", + "required":[ + "SpotFleetRequestId", + "Error" + ], + "members":{ + "SpotFleetRequestId":{ + "shape":"String", + "locationName":"spotFleetRequestId" + }, + "Error":{ + "shape":"CancelSpotFleetRequestsError", + "locationName":"error" + } + } + }, + "CancelSpotFleetRequestsErrorSet":{ + "type":"list", + "member":{ + "shape":"CancelSpotFleetRequestsErrorItem", + "locationName":"item" + } + }, + "CancelSpotFleetRequestsRequest":{ + "type":"structure", + "required":[ + "SpotFleetRequestIds", + "TerminateInstances" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "SpotFleetRequestIds":{ + "shape":"ValueStringList", + "locationName":"spotFleetRequestId" + }, + "TerminateInstances":{ + "shape":"Boolean", + "locationName":"terminateInstances" + } + } + }, + "CancelSpotFleetRequestsResponse":{ + "type":"structure", + "members":{ + "UnsuccessfulFleetRequests":{ + "shape":"CancelSpotFleetRequestsErrorSet", + "locationName":"unsuccessfulFleetRequestSet" + }, + "SuccessfulFleetRequests":{ + "shape":"CancelSpotFleetRequestsSuccessSet", + "locationName":"successfulFleetRequestSet" + } + } + }, + "CancelSpotFleetRequestsSuccessItem":{ + "type":"structure", + "required":[ + "SpotFleetRequestId", + "CurrentSpotFleetRequestState", + "PreviousSpotFleetRequestState" + ], + "members":{ + "SpotFleetRequestId":{ + "shape":"String", + "locationName":"spotFleetRequestId" + }, + "CurrentSpotFleetRequestState":{ + "shape":"BatchState", + "locationName":"currentSpotFleetRequestState" + }, + "PreviousSpotFleetRequestState":{ + "shape":"BatchState", + "locationName":"previousSpotFleetRequestState" + } + } + }, + "CancelSpotFleetRequestsSuccessSet":{ + "type":"list", + "member":{ + "shape":"CancelSpotFleetRequestsSuccessItem", + "locationName":"item" + } + }, + "CancelSpotInstanceRequestState":{ + "type":"string", + "enum":[ + "active", + "open", + "closed", + "cancelled", + "completed" + ] + }, + "CancelSpotInstanceRequestsRequest":{ + "type":"structure", + "required":["SpotInstanceRequestIds"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "SpotInstanceRequestIds":{ + "shape":"SpotInstanceRequestIdList", + "locationName":"SpotInstanceRequestId" + } + } + }, + "CancelSpotInstanceRequestsResult":{ + "type":"structure", + "members":{ + "CancelledSpotInstanceRequests":{ + "shape":"CancelledSpotInstanceRequestList", + "locationName":"spotInstanceRequestSet" + } + } + }, + "CancelledSpotInstanceRequest":{ + "type":"structure", + "members":{ + "SpotInstanceRequestId":{ + "shape":"String", + "locationName":"spotInstanceRequestId" + }, + "State":{ + "shape":"CancelSpotInstanceRequestState", + "locationName":"state" + } + } + }, + "CancelledSpotInstanceRequestList":{ + "type":"list", + "member":{ + "shape":"CancelledSpotInstanceRequest", + "locationName":"item" + } + }, + "ClassicLinkDnsSupport":{ + "type":"structure", + "members":{ + "VpcId":{ + "shape":"String", + "locationName":"vpcId" + }, + "ClassicLinkDnsSupported":{ + "shape":"Boolean", + "locationName":"classicLinkDnsSupported" + } + } + }, + "ClassicLinkDnsSupportList":{ + "type":"list", + "member":{ + "shape":"ClassicLinkDnsSupport", + "locationName":"item" + } + }, + "ClassicLinkInstance":{ + "type":"structure", + "members":{ + "InstanceId":{ + "shape":"String", + "locationName":"instanceId" + }, + "VpcId":{ + "shape":"String", + "locationName":"vpcId" + }, + "Groups":{ + "shape":"GroupIdentifierList", + "locationName":"groupSet" + }, + "Tags":{ + "shape":"TagList", + "locationName":"tagSet" + } + } + }, + "ClassicLinkInstanceList":{ + "type":"list", + "member":{ + "shape":"ClassicLinkInstance", + "locationName":"item" + } + }, + "ClientData":{ + "type":"structure", + "members":{ + "UploadStart":{"shape":"DateTime"}, + "UploadEnd":{"shape":"DateTime"}, + "UploadSize":{"shape":"Double"}, + "Comment":{"shape":"String"} + } + }, + "ConfirmProductInstanceRequest":{ + "type":"structure", + "required":[ + "ProductCode", + "InstanceId" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "ProductCode":{"shape":"String"}, + "InstanceId":{"shape":"String"} + } + }, + "ConfirmProductInstanceResult":{ + "type":"structure", + "members":{ + "OwnerId":{ + "shape":"String", + "locationName":"ownerId" + }, + "Return":{ + "shape":"Boolean", + "locationName":"return" + } + } + }, + "ContainerFormat":{ + "type":"string", + "enum":["ova"] + }, + "ConversionIdStringList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"item" + } + }, + "ConversionTask":{ + "type":"structure", + "required":[ + "ConversionTaskId", + "State" + ], + "members":{ + "ConversionTaskId":{ + "shape":"String", + "locationName":"conversionTaskId" + }, + "ExpirationTime":{ + "shape":"String", + "locationName":"expirationTime" + }, + "ImportInstance":{ + "shape":"ImportInstanceTaskDetails", + "locationName":"importInstance" + }, + "ImportVolume":{ + "shape":"ImportVolumeTaskDetails", + "locationName":"importVolume" + }, + "State":{ + "shape":"ConversionTaskState", + "locationName":"state" + }, + "StatusMessage":{ + "shape":"String", + "locationName":"statusMessage" + }, + "Tags":{ + "shape":"TagList", + "locationName":"tagSet" + } + } + }, + "ConversionTaskState":{ + "type":"string", + "enum":[ + "active", + "cancelling", + "cancelled", + "completed" + ] + }, + "CopyImageRequest":{ + "type":"structure", + "required":[ + "SourceRegion", + "SourceImageId", + "Name" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "SourceRegion":{"shape":"String"}, + "SourceImageId":{"shape":"String"}, + "Name":{"shape":"String"}, + "Description":{"shape":"String"}, + "ClientToken":{"shape":"String"}, + "Encrypted":{ + "shape":"Boolean", + "locationName":"encrypted" + }, + "KmsKeyId":{ + "shape":"String", + "locationName":"kmsKeyId" + } + } + }, + "CopyImageResult":{ + "type":"structure", + "members":{ + "ImageId":{ + "shape":"String", + "locationName":"imageId" + } + } + }, + "CopySnapshotRequest":{ + "type":"structure", + "required":[ + "SourceRegion", + "SourceSnapshotId" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "SourceRegion":{"shape":"String"}, + "SourceSnapshotId":{"shape":"String"}, + "Description":{"shape":"String"}, + "DestinationRegion":{ + "shape":"String", + "locationName":"destinationRegion" + }, + "PresignedUrl":{ + "shape":"String", + "locationName":"presignedUrl" + }, + "Encrypted":{ + "shape":"Boolean", + "locationName":"encrypted" + }, + "KmsKeyId":{ + "shape":"String", + "locationName":"kmsKeyId" + } + } + }, + "CopySnapshotResult":{ + "type":"structure", + "members":{ + "SnapshotId":{ + "shape":"String", + "locationName":"snapshotId" + } + } + }, + "CreateCustomerGatewayRequest":{ + "type":"structure", + "required":[ + "Type", + "PublicIp", + "BgpAsn" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "Type":{"shape":"GatewayType"}, + "PublicIp":{ + "shape":"String", + "locationName":"IpAddress" + }, + "BgpAsn":{"shape":"Integer"} + } + }, + "CreateCustomerGatewayResult":{ + "type":"structure", + "members":{ + "CustomerGateway":{ + "shape":"CustomerGateway", + "locationName":"customerGateway" + } + } + }, + "CreateDhcpOptionsRequest":{ + "type":"structure", + "required":["DhcpConfigurations"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "DhcpConfigurations":{ + "shape":"NewDhcpConfigurationList", + "locationName":"dhcpConfiguration" + } + } + }, + "CreateDhcpOptionsResult":{ + "type":"structure", + "members":{ + "DhcpOptions":{ + "shape":"DhcpOptions", + "locationName":"dhcpOptions" + } + } + }, + "CreateFlowLogsRequest":{ + "type":"structure", + "required":[ + "ResourceIds", + "ResourceType", + "TrafficType", + "LogGroupName", + "DeliverLogsPermissionArn" + ], + "members":{ + "ResourceIds":{ + "shape":"ValueStringList", + "locationName":"ResourceId" + }, + "ResourceType":{"shape":"FlowLogsResourceType"}, + "TrafficType":{"shape":"TrafficType"}, + "LogGroupName":{"shape":"String"}, + "DeliverLogsPermissionArn":{"shape":"String"}, + "ClientToken":{"shape":"String"} + } + }, + "CreateFlowLogsResult":{ + "type":"structure", + "members":{ + "FlowLogIds":{ + "shape":"ValueStringList", + "locationName":"flowLogIdSet" + }, + "ClientToken":{ + "shape":"String", + "locationName":"clientToken" + }, + "Unsuccessful":{ + "shape":"UnsuccessfulItemSet", + "locationName":"unsuccessful" + } + } + }, + "CreateImageRequest":{ + "type":"structure", + "required":[ + "InstanceId", + "Name" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "InstanceId":{ + "shape":"String", + "locationName":"instanceId" + }, + "Name":{ + "shape":"String", + "locationName":"name" + }, + "Description":{ + "shape":"String", + "locationName":"description" + }, + "NoReboot":{ + "shape":"Boolean", + "locationName":"noReboot" + }, + "BlockDeviceMappings":{ + "shape":"BlockDeviceMappingRequestList", + "locationName":"blockDeviceMapping" + } + } + }, + "CreateImageResult":{ + "type":"structure", + "members":{ + "ImageId":{ + "shape":"String", + "locationName":"imageId" + } + } + }, + "CreateInstanceExportTaskRequest":{ + "type":"structure", + "required":["InstanceId"], + "members":{ + "Description":{ + "shape":"String", + "locationName":"description" + }, + "InstanceId":{ + "shape":"String", + "locationName":"instanceId" + }, + "TargetEnvironment":{ + "shape":"ExportEnvironment", + "locationName":"targetEnvironment" + }, + "ExportToS3Task":{ + "shape":"ExportToS3TaskSpecification", + "locationName":"exportToS3" + } + } + }, + "CreateInstanceExportTaskResult":{ + "type":"structure", + "members":{ + "ExportTask":{ + "shape":"ExportTask", + "locationName":"exportTask" + } + } + }, + "CreateInternetGatewayRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + } + } + }, + "CreateInternetGatewayResult":{ + "type":"structure", + "members":{ + "InternetGateway":{ + "shape":"InternetGateway", + "locationName":"internetGateway" + } + } + }, + "CreateKeyPairRequest":{ + "type":"structure", + "required":["KeyName"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "KeyName":{"shape":"String"} + } + }, + "CreateNatGatewayRequest":{ + "type":"structure", + "required":[ + "SubnetId", + "AllocationId" + ], + "members":{ + "SubnetId":{"shape":"String"}, + "AllocationId":{"shape":"String"}, + "ClientToken":{"shape":"String"} + } + }, + "CreateNatGatewayResult":{ + "type":"structure", + "members":{ + "NatGateway":{ + "shape":"NatGateway", + "locationName":"natGateway" + }, + "ClientToken":{ + "shape":"String", + "locationName":"clientToken" + } + } + }, + "CreateNetworkAclEntryRequest":{ + "type":"structure", + "required":[ + "NetworkAclId", + "RuleNumber", + "Protocol", + "RuleAction", + "Egress", + "CidrBlock" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "NetworkAclId":{ + "shape":"String", + "locationName":"networkAclId" + }, + "RuleNumber":{ + "shape":"Integer", + "locationName":"ruleNumber" + }, + "Protocol":{ + "shape":"String", + "locationName":"protocol" + }, + "RuleAction":{ + "shape":"RuleAction", + "locationName":"ruleAction" + }, + "Egress":{ + "shape":"Boolean", + "locationName":"egress" + }, + "CidrBlock":{ + "shape":"String", + "locationName":"cidrBlock" + }, + "IcmpTypeCode":{ + "shape":"IcmpTypeCode", + "locationName":"Icmp" + }, + "PortRange":{ + "shape":"PortRange", + "locationName":"portRange" + } + } + }, + "CreateNetworkAclRequest":{ + "type":"structure", + "required":["VpcId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "VpcId":{ + "shape":"String", + "locationName":"vpcId" + } + } + }, + "CreateNetworkAclResult":{ + "type":"structure", + "members":{ + "NetworkAcl":{ + "shape":"NetworkAcl", + "locationName":"networkAcl" + } + } + }, + "CreateNetworkInterfaceRequest":{ + "type":"structure", + "required":["SubnetId"], + "members":{ + "SubnetId":{ + "shape":"String", + "locationName":"subnetId" + }, + "Description":{ + "shape":"String", + "locationName":"description" + }, + "PrivateIpAddress":{ + "shape":"String", + "locationName":"privateIpAddress" + }, + "Groups":{ + "shape":"SecurityGroupIdStringList", + "locationName":"SecurityGroupId" + }, + "PrivateIpAddresses":{ + "shape":"PrivateIpAddressSpecificationList", + "locationName":"privateIpAddresses" + }, + "SecondaryPrivateIpAddressCount":{ + "shape":"Integer", + "locationName":"secondaryPrivateIpAddressCount" + }, + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + } + } + }, + "CreateNetworkInterfaceResult":{ + "type":"structure", + "members":{ + "NetworkInterface":{ + "shape":"NetworkInterface", + "locationName":"networkInterface" + } + } + }, + "CreatePlacementGroupRequest":{ + "type":"structure", + "required":[ + "GroupName", + "Strategy" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "GroupName":{ + "shape":"String", + "locationName":"groupName" + }, + "Strategy":{ + "shape":"PlacementStrategy", + "locationName":"strategy" + } + } + }, + "CreateReservedInstancesListingRequest":{ + "type":"structure", + "required":[ + "ReservedInstancesId", + "InstanceCount", + "PriceSchedules", + "ClientToken" + ], + "members":{ + "ReservedInstancesId":{ + "shape":"String", + "locationName":"reservedInstancesId" + }, + "InstanceCount":{ + "shape":"Integer", + "locationName":"instanceCount" + }, + "PriceSchedules":{ + "shape":"PriceScheduleSpecificationList", + "locationName":"priceSchedules" + }, + "ClientToken":{ + "shape":"String", + "locationName":"clientToken" + } + } + }, + "CreateReservedInstancesListingResult":{ + "type":"structure", + "members":{ + "ReservedInstancesListings":{ + "shape":"ReservedInstancesListingList", + "locationName":"reservedInstancesListingsSet" + } + } + }, + "CreateRouteRequest":{ + "type":"structure", + "required":[ + "RouteTableId", + "DestinationCidrBlock" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "RouteTableId":{ + "shape":"String", + "locationName":"routeTableId" + }, + "DestinationCidrBlock":{ + "shape":"String", + "locationName":"destinationCidrBlock" + }, + "GatewayId":{ + "shape":"String", + "locationName":"gatewayId" + }, + "InstanceId":{ + "shape":"String", + "locationName":"instanceId" + }, + "NetworkInterfaceId":{ + "shape":"String", + "locationName":"networkInterfaceId" + }, + "VpcPeeringConnectionId":{ + "shape":"String", + "locationName":"vpcPeeringConnectionId" + }, + "NatGatewayId":{ + "shape":"String", + "locationName":"natGatewayId" + } + } + }, + "CreateRouteResult":{ + "type":"structure", + "members":{ + "Return":{ + "shape":"Boolean", + "locationName":"return" + } + } + }, + "CreateRouteTableRequest":{ + "type":"structure", + "required":["VpcId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "VpcId":{ + "shape":"String", + "locationName":"vpcId" + } + } + }, + "CreateRouteTableResult":{ + "type":"structure", + "members":{ + "RouteTable":{ + "shape":"RouteTable", + "locationName":"routeTable" + } + } + }, + "CreateSecurityGroupRequest":{ + "type":"structure", + "required":[ + "GroupName", + "Description" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "GroupName":{"shape":"String"}, + "Description":{ + "shape":"String", + "locationName":"GroupDescription" + }, + "VpcId":{"shape":"String"} + } + }, + "CreateSecurityGroupResult":{ + "type":"structure", + "members":{ + "GroupId":{ + "shape":"String", + "locationName":"groupId" + } + } + }, + "CreateSnapshotRequest":{ + "type":"structure", + "required":["VolumeId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "VolumeId":{"shape":"String"}, + "Description":{"shape":"String"} + } + }, + "CreateSpotDatafeedSubscriptionRequest":{ + "type":"structure", + "required":["Bucket"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "Bucket":{ + "shape":"String", + "locationName":"bucket" + }, + "Prefix":{ + "shape":"String", + "locationName":"prefix" + } + } + }, + "CreateSpotDatafeedSubscriptionResult":{ + "type":"structure", + "members":{ + "SpotDatafeedSubscription":{ + "shape":"SpotDatafeedSubscription", + "locationName":"spotDatafeedSubscription" + } + } + }, + "CreateSubnetRequest":{ + "type":"structure", + "required":[ + "VpcId", + "CidrBlock" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "VpcId":{"shape":"String"}, + "CidrBlock":{"shape":"String"}, + "AvailabilityZone":{"shape":"String"} + } + }, + "CreateSubnetResult":{ + "type":"structure", + "members":{ + "Subnet":{ + "shape":"Subnet", + "locationName":"subnet" + } + } + }, + "CreateTagsRequest":{ + "type":"structure", + "required":[ + "Resources", + "Tags" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "Resources":{ + "shape":"ResourceIdList", + "locationName":"ResourceId" + }, + "Tags":{ + "shape":"TagList", + "locationName":"Tag" + } + } + }, + "CreateVolumePermission":{ + "type":"structure", + "members":{ + "UserId":{ + "shape":"String", + "locationName":"userId" + }, + "Group":{ + "shape":"PermissionGroup", + "locationName":"group" + } + } + }, + "CreateVolumePermissionList":{ + "type":"list", + "member":{ + "shape":"CreateVolumePermission", + "locationName":"item" + } + }, + "CreateVolumePermissionModifications":{ + "type":"structure", + "members":{ + "Add":{"shape":"CreateVolumePermissionList"}, + "Remove":{"shape":"CreateVolumePermissionList"} + } + }, + "CreateVolumeRequest":{ + "type":"structure", + "required":["AvailabilityZone"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "Size":{"shape":"Integer"}, + "SnapshotId":{"shape":"String"}, + "AvailabilityZone":{"shape":"String"}, + "VolumeType":{"shape":"VolumeType"}, + "Iops":{"shape":"Integer"}, + "Encrypted":{ + "shape":"Boolean", + "locationName":"encrypted" + }, + "KmsKeyId":{"shape":"String"} + } + }, + "CreateVpcEndpointRequest":{ + "type":"structure", + "required":[ + "VpcId", + "ServiceName" + ], + "members":{ + "DryRun":{"shape":"Boolean"}, + "VpcId":{"shape":"String"}, + "ServiceName":{"shape":"String"}, + "PolicyDocument":{"shape":"String"}, + "RouteTableIds":{ + "shape":"ValueStringList", + "locationName":"RouteTableId" + }, + "ClientToken":{"shape":"String"} + } + }, + "CreateVpcEndpointResult":{ + "type":"structure", + "members":{ + "VpcEndpoint":{ + "shape":"VpcEndpoint", + "locationName":"vpcEndpoint" + }, + "ClientToken":{ + "shape":"String", + "locationName":"clientToken" + } + } + }, + "CreateVpcPeeringConnectionRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "VpcId":{ + "shape":"String", + "locationName":"vpcId" + }, + "PeerVpcId":{ + "shape":"String", + "locationName":"peerVpcId" + }, + "PeerOwnerId":{ + "shape":"String", + "locationName":"peerOwnerId" + } + } + }, + "CreateVpcPeeringConnectionResult":{ + "type":"structure", + "members":{ + "VpcPeeringConnection":{ + "shape":"VpcPeeringConnection", + "locationName":"vpcPeeringConnection" + } + } + }, + "CreateVpcRequest":{ + "type":"structure", + "required":["CidrBlock"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "CidrBlock":{"shape":"String"}, + "InstanceTenancy":{ + "shape":"Tenancy", + "locationName":"instanceTenancy" + } + } + }, + "CreateVpcResult":{ + "type":"structure", + "members":{ + "Vpc":{ + "shape":"Vpc", + "locationName":"vpc" + } + } + }, + "CreateVpnConnectionRequest":{ + "type":"structure", + "required":[ + "Type", + "CustomerGatewayId", + "VpnGatewayId" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "Type":{"shape":"String"}, + "CustomerGatewayId":{"shape":"String"}, + "VpnGatewayId":{"shape":"String"}, + "Options":{ + "shape":"VpnConnectionOptionsSpecification", + "locationName":"options" + } + } + }, + "CreateVpnConnectionResult":{ + "type":"structure", + "members":{ + "VpnConnection":{ + "shape":"VpnConnection", + "locationName":"vpnConnection" + } + } + }, + "CreateVpnConnectionRouteRequest":{ + "type":"structure", + "required":[ + "VpnConnectionId", + "DestinationCidrBlock" + ], + "members":{ + "VpnConnectionId":{"shape":"String"}, + "DestinationCidrBlock":{"shape":"String"} + } + }, + "CreateVpnGatewayRequest":{ + "type":"structure", + "required":["Type"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "Type":{"shape":"GatewayType"}, + "AvailabilityZone":{"shape":"String"} + } + }, + "CreateVpnGatewayResult":{ + "type":"structure", + "members":{ + "VpnGateway":{ + "shape":"VpnGateway", + "locationName":"vpnGateway" + } + } + }, + "CurrencyCodeValues":{ + "type":"string", + "enum":["USD"] + }, + "CustomerGateway":{ + "type":"structure", + "members":{ + "CustomerGatewayId":{ + "shape":"String", + "locationName":"customerGatewayId" + }, + "State":{ + "shape":"String", + "locationName":"state" + }, + "Type":{ + "shape":"String", + "locationName":"type" + }, + "IpAddress":{ + "shape":"String", + "locationName":"ipAddress" + }, + "BgpAsn":{ + "shape":"String", + "locationName":"bgpAsn" + }, + "Tags":{ + "shape":"TagList", + "locationName":"tagSet" + } + } + }, + "CustomerGatewayIdStringList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"CustomerGatewayId" + } + }, + "CustomerGatewayList":{ + "type":"list", + "member":{ + "shape":"CustomerGateway", + "locationName":"item" + } + }, + "DatafeedSubscriptionState":{ + "type":"string", + "enum":[ + "Active", + "Inactive" + ] + }, + "DateTime":{"type":"timestamp"}, + "DeleteCustomerGatewayRequest":{ + "type":"structure", + "required":["CustomerGatewayId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "CustomerGatewayId":{"shape":"String"} + } + }, + "DeleteDhcpOptionsRequest":{ + "type":"structure", + "required":["DhcpOptionsId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "DhcpOptionsId":{"shape":"String"} + } + }, + "DeleteFlowLogsRequest":{ + "type":"structure", + "required":["FlowLogIds"], + "members":{ + "FlowLogIds":{ + "shape":"ValueStringList", + "locationName":"FlowLogId" + } + } + }, + "DeleteFlowLogsResult":{ + "type":"structure", + "members":{ + "Unsuccessful":{ + "shape":"UnsuccessfulItemSet", + "locationName":"unsuccessful" + } + } + }, + "DeleteInternetGatewayRequest":{ + "type":"structure", + "required":["InternetGatewayId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "InternetGatewayId":{ + "shape":"String", + "locationName":"internetGatewayId" + } + } + }, + "DeleteKeyPairRequest":{ + "type":"structure", + "required":["KeyName"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "KeyName":{"shape":"String"} + } + }, + "DeleteNatGatewayRequest":{ + "type":"structure", + "required":["NatGatewayId"], + "members":{ + "NatGatewayId":{"shape":"String"} + } + }, + "DeleteNatGatewayResult":{ + "type":"structure", + "members":{ + "NatGatewayId":{ + "shape":"String", + "locationName":"natGatewayId" + } + } + }, + "DeleteNetworkAclEntryRequest":{ + "type":"structure", + "required":[ + "NetworkAclId", + "RuleNumber", + "Egress" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "NetworkAclId":{ + "shape":"String", + "locationName":"networkAclId" + }, + "RuleNumber":{ + "shape":"Integer", + "locationName":"ruleNumber" + }, + "Egress":{ + "shape":"Boolean", + "locationName":"egress" + } + } + }, + "DeleteNetworkAclRequest":{ + "type":"structure", + "required":["NetworkAclId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "NetworkAclId":{ + "shape":"String", + "locationName":"networkAclId" + } + } + }, + "DeleteNetworkInterfaceRequest":{ + "type":"structure", + "required":["NetworkInterfaceId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "NetworkInterfaceId":{ + "shape":"String", + "locationName":"networkInterfaceId" + } + } + }, + "DeletePlacementGroupRequest":{ + "type":"structure", + "required":["GroupName"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "GroupName":{ + "shape":"String", + "locationName":"groupName" + } + } + }, + "DeleteRouteRequest":{ + "type":"structure", + "required":[ + "RouteTableId", + "DestinationCidrBlock" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "RouteTableId":{ + "shape":"String", + "locationName":"routeTableId" + }, + "DestinationCidrBlock":{ + "shape":"String", + "locationName":"destinationCidrBlock" + } + } + }, + "DeleteRouteTableRequest":{ + "type":"structure", + "required":["RouteTableId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "RouteTableId":{ + "shape":"String", + "locationName":"routeTableId" + } + } + }, + "DeleteSecurityGroupRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "GroupName":{"shape":"String"}, + "GroupId":{"shape":"String"} + } + }, + "DeleteSnapshotRequest":{ + "type":"structure", + "required":["SnapshotId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "SnapshotId":{"shape":"String"} + } + }, + "DeleteSpotDatafeedSubscriptionRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + } + } + }, + "DeleteSubnetRequest":{ + "type":"structure", + "required":["SubnetId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "SubnetId":{"shape":"String"} + } + }, + "DeleteTagsRequest":{ + "type":"structure", + "required":["Resources"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "Resources":{ + "shape":"ResourceIdList", + "locationName":"resourceId" + }, + "Tags":{ + "shape":"TagList", + "locationName":"tag" + } + } + }, + "DeleteVolumeRequest":{ + "type":"structure", + "required":["VolumeId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "VolumeId":{"shape":"String"} + } + }, + "DeleteVpcEndpointsRequest":{ + "type":"structure", + "required":["VpcEndpointIds"], + "members":{ + "DryRun":{"shape":"Boolean"}, + "VpcEndpointIds":{ + "shape":"ValueStringList", + "locationName":"VpcEndpointId" + } + } + }, + "DeleteVpcEndpointsResult":{ + "type":"structure", + "members":{ + "Unsuccessful":{ + "shape":"UnsuccessfulItemSet", + "locationName":"unsuccessful" + } + } + }, + "DeleteVpcPeeringConnectionRequest":{ + "type":"structure", + "required":["VpcPeeringConnectionId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "VpcPeeringConnectionId":{ + "shape":"String", + "locationName":"vpcPeeringConnectionId" + } + } + }, + "DeleteVpcPeeringConnectionResult":{ + "type":"structure", + "members":{ + "Return":{ + "shape":"Boolean", + "locationName":"return" + } + } + }, + "DeleteVpcRequest":{ + "type":"structure", + "required":["VpcId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "VpcId":{"shape":"String"} + } + }, + "DeleteVpnConnectionRequest":{ + "type":"structure", + "required":["VpnConnectionId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "VpnConnectionId":{"shape":"String"} + } + }, + "DeleteVpnConnectionRouteRequest":{ + "type":"structure", + "required":[ + "VpnConnectionId", + "DestinationCidrBlock" + ], + "members":{ + "VpnConnectionId":{"shape":"String"}, + "DestinationCidrBlock":{"shape":"String"} + } + }, + "DeleteVpnGatewayRequest":{ + "type":"structure", + "required":["VpnGatewayId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "VpnGatewayId":{"shape":"String"} + } + }, + "DeregisterImageRequest":{ + "type":"structure", + "required":["ImageId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "ImageId":{"shape":"String"} + } + }, + "DescribeAccountAttributesRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "AttributeNames":{ + "shape":"AccountAttributeNameStringList", + "locationName":"attributeName" + } + } + }, + "DescribeAccountAttributesResult":{ + "type":"structure", + "members":{ + "AccountAttributes":{ + "shape":"AccountAttributeList", + "locationName":"accountAttributeSet" + } + } + }, + "DescribeAddressesRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "PublicIps":{ + "shape":"PublicIpStringList", + "locationName":"PublicIp" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"Filter" + }, + "AllocationIds":{ + "shape":"AllocationIdList", + "locationName":"AllocationId" + } + } + }, + "DescribeAddressesResult":{ + "type":"structure", + "members":{ + "Addresses":{ + "shape":"AddressList", + "locationName":"addressesSet" + } + } + }, + "DescribeAvailabilityZonesRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "ZoneNames":{ + "shape":"ZoneNameStringList", + "locationName":"ZoneName" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"Filter" + } + } + }, + "DescribeAvailabilityZonesResult":{ + "type":"structure", + "members":{ + "AvailabilityZones":{ + "shape":"AvailabilityZoneList", + "locationName":"availabilityZoneInfo" + } + } + }, + "DescribeBundleTasksRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "BundleIds":{ + "shape":"BundleIdStringList", + "locationName":"BundleId" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"Filter" + } + } + }, + "DescribeBundleTasksResult":{ + "type":"structure", + "members":{ + "BundleTasks":{ + "shape":"BundleTaskList", + "locationName":"bundleInstanceTasksSet" + } + } + }, + "DescribeClassicLinkInstancesRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "InstanceIds":{ + "shape":"InstanceIdStringList", + "locationName":"InstanceId" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"Filter" + }, + "NextToken":{ + "shape":"String", + "locationName":"nextToken" + }, + "MaxResults":{ + "shape":"Integer", + "locationName":"maxResults" + } + } + }, + "DescribeClassicLinkInstancesResult":{ + "type":"structure", + "members":{ + "Instances":{ + "shape":"ClassicLinkInstanceList", + "locationName":"instancesSet" + }, + "NextToken":{ + "shape":"String", + "locationName":"nextToken" + } + } + }, + "DescribeConversionTaskList":{ + "type":"list", + "member":{ + "shape":"ConversionTask", + "locationName":"item" + } + }, + "DescribeConversionTasksRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"filter" + }, + "ConversionTaskIds":{ + "shape":"ConversionIdStringList", + "locationName":"conversionTaskId" + } + } + }, + "DescribeConversionTasksResult":{ + "type":"structure", + "members":{ + "ConversionTasks":{ + "shape":"DescribeConversionTaskList", + "locationName":"conversionTasks" + } + } + }, + "DescribeCustomerGatewaysRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "CustomerGatewayIds":{ + "shape":"CustomerGatewayIdStringList", + "locationName":"CustomerGatewayId" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"Filter" + } + } + }, + "DescribeCustomerGatewaysResult":{ + "type":"structure", + "members":{ + "CustomerGateways":{ + "shape":"CustomerGatewayList", + "locationName":"customerGatewaySet" + } + } + }, + "DescribeDhcpOptionsRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "DhcpOptionsIds":{ + "shape":"DhcpOptionsIdStringList", + "locationName":"DhcpOptionsId" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"Filter" + } + } + }, + "DescribeDhcpOptionsResult":{ + "type":"structure", + "members":{ + "DhcpOptions":{ + "shape":"DhcpOptionsList", + "locationName":"dhcpOptionsSet" + } + } + }, + "DescribeExportTasksRequest":{ + "type":"structure", + "members":{ + "ExportTaskIds":{ + "shape":"ExportTaskIdStringList", + "locationName":"exportTaskId" + } + } + }, + "DescribeExportTasksResult":{ + "type":"structure", + "members":{ + "ExportTasks":{ + "shape":"ExportTaskList", + "locationName":"exportTaskSet" + } + } + }, + "DescribeFlowLogsRequest":{ + "type":"structure", + "members":{ + "FlowLogIds":{ + "shape":"ValueStringList", + "locationName":"FlowLogId" + }, + "Filter":{"shape":"FilterList"}, + "NextToken":{"shape":"String"}, + "MaxResults":{"shape":"Integer"} + } + }, + "DescribeFlowLogsResult":{ + "type":"structure", + "members":{ + "FlowLogs":{ + "shape":"FlowLogSet", + "locationName":"flowLogSet" + }, + "NextToken":{ + "shape":"String", + "locationName":"nextToken" + } + } + }, + "DescribeHostsRequest":{ + "type":"structure", + "members":{ + "HostIds":{ + "shape":"RequestHostIdList", + "locationName":"hostId" + }, + "NextToken":{ + "shape":"String", + "locationName":"nextToken" + }, + "MaxResults":{ + "shape":"Integer", + "locationName":"maxResults" + }, + "Filter":{ + "shape":"FilterList", + "locationName":"filter" + } + } + }, + "DescribeHostsResult":{ + "type":"structure", + "members":{ + "Hosts":{ + "shape":"HostList", + "locationName":"hostSet" + }, + "NextToken":{ + "shape":"String", + "locationName":"nextToken" + } + } + }, + "DescribeIdFormatRequest":{ + "type":"structure", + "members":{ + "Resource":{"shape":"String"} + } + }, + "DescribeIdFormatResult":{ + "type":"structure", + "members":{ + "Statuses":{ + "shape":"IdFormatList", + "locationName":"statusSet" + } + } + }, + "DescribeImageAttributeRequest":{ + "type":"structure", + "required":[ + "ImageId", + "Attribute" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "ImageId":{"shape":"String"}, + "Attribute":{"shape":"ImageAttributeName"} + } + }, + "DescribeImagesRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "ImageIds":{ + "shape":"ImageIdStringList", + "locationName":"ImageId" + }, + "Owners":{ + "shape":"OwnerStringList", + "locationName":"Owner" + }, + "ExecutableUsers":{ + "shape":"ExecutableByStringList", + "locationName":"ExecutableBy" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"Filter" + } + } + }, + "DescribeImagesResult":{ + "type":"structure", + "members":{ + "Images":{ + "shape":"ImageList", + "locationName":"imagesSet" + } + } + }, + "DescribeImportImageTasksRequest":{ + "type":"structure", + "members":{ + "DryRun":{"shape":"Boolean"}, + "ImportTaskIds":{ + "shape":"ImportTaskIdList", + "locationName":"ImportTaskId" + }, + "NextToken":{"shape":"String"}, + "MaxResults":{"shape":"Integer"}, + "Filters":{"shape":"FilterList"} + } + }, + "DescribeImportImageTasksResult":{ + "type":"structure", + "members":{ + "ImportImageTasks":{ + "shape":"ImportImageTaskList", + "locationName":"importImageTaskSet" + }, + "NextToken":{ + "shape":"String", + "locationName":"nextToken" + } + } + }, + "DescribeImportSnapshotTasksRequest":{ + "type":"structure", + "members":{ + "DryRun":{"shape":"Boolean"}, + "ImportTaskIds":{ + "shape":"ImportTaskIdList", + "locationName":"ImportTaskId" + }, + "NextToken":{"shape":"String"}, + "MaxResults":{"shape":"Integer"}, + "Filters":{"shape":"FilterList"} + } + }, + "DescribeImportSnapshotTasksResult":{ + "type":"structure", + "members":{ + "ImportSnapshotTasks":{ + "shape":"ImportSnapshotTaskList", + "locationName":"importSnapshotTaskSet" + }, + "NextToken":{ + "shape":"String", + "locationName":"nextToken" + } + } + }, + "DescribeInstanceAttributeRequest":{ + "type":"structure", + "required":[ + "InstanceId", + "Attribute" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "InstanceId":{ + "shape":"String", + "locationName":"instanceId" + }, + "Attribute":{ + "shape":"InstanceAttributeName", + "locationName":"attribute" + } + } + }, + "DescribeInstanceStatusRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "InstanceIds":{ + "shape":"InstanceIdStringList", + "locationName":"InstanceId" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"Filter" + }, + "NextToken":{"shape":"String"}, + "MaxResults":{"shape":"Integer"}, + "IncludeAllInstances":{ + "shape":"Boolean", + "locationName":"includeAllInstances" + } + } + }, + "DescribeInstanceStatusResult":{ + "type":"structure", + "members":{ + "InstanceStatuses":{ + "shape":"InstanceStatusList", + "locationName":"instanceStatusSet" + }, + "NextToken":{ + "shape":"String", + "locationName":"nextToken" + } + } + }, + "DescribeInstancesRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "InstanceIds":{ + "shape":"InstanceIdStringList", + "locationName":"InstanceId" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"Filter" + }, + "NextToken":{ + "shape":"String", + "locationName":"nextToken" + }, + "MaxResults":{ + "shape":"Integer", + "locationName":"maxResults" + } + } + }, + "DescribeInstancesResult":{ + "type":"structure", + "members":{ + "Reservations":{ + "shape":"ReservationList", + "locationName":"reservationSet" + }, + "NextToken":{ + "shape":"String", + "locationName":"nextToken" + } + } + }, + "DescribeInternetGatewaysRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "InternetGatewayIds":{ + "shape":"ValueStringList", + "locationName":"internetGatewayId" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"Filter" + } + } + }, + "DescribeInternetGatewaysResult":{ + "type":"structure", + "members":{ + "InternetGateways":{ + "shape":"InternetGatewayList", + "locationName":"internetGatewaySet" + } + } + }, + "DescribeKeyPairsRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "KeyNames":{ + "shape":"KeyNameStringList", + "locationName":"KeyName" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"Filter" + } + } + }, + "DescribeKeyPairsResult":{ + "type":"structure", + "members":{ + "KeyPairs":{ + "shape":"KeyPairList", + "locationName":"keySet" + } + } + }, + "DescribeMovingAddressesRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "PublicIps":{ + "shape":"ValueStringList", + "locationName":"publicIp" + }, + "NextToken":{ + "shape":"String", + "locationName":"nextToken" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"filter" + }, + "MaxResults":{ + "shape":"Integer", + "locationName":"maxResults" + } + } + }, + "DescribeMovingAddressesResult":{ + "type":"structure", + "members":{ + "MovingAddressStatuses":{ + "shape":"MovingAddressStatusSet", + "locationName":"movingAddressStatusSet" + }, + "NextToken":{ + "shape":"String", + "locationName":"nextToken" + } + } + }, + "DescribeNatGatewaysRequest":{ + "type":"structure", + "members":{ + "NatGatewayIds":{ + "shape":"ValueStringList", + "locationName":"NatGatewayId" + }, + "Filter":{"shape":"FilterList"}, + "MaxResults":{"shape":"Integer"}, + "NextToken":{"shape":"String"} + } + }, + "DescribeNatGatewaysResult":{ + "type":"structure", + "members":{ + "NatGateways":{ + "shape":"NatGatewayList", + "locationName":"natGatewaySet" + }, + "NextToken":{ + "shape":"String", + "locationName":"nextToken" + } + } + }, + "DescribeNetworkAclsRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "NetworkAclIds":{ + "shape":"ValueStringList", + "locationName":"NetworkAclId" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"Filter" + } + } + }, + "DescribeNetworkAclsResult":{ + "type":"structure", + "members":{ + "NetworkAcls":{ + "shape":"NetworkAclList", + "locationName":"networkAclSet" + } + } + }, + "DescribeNetworkInterfaceAttributeRequest":{ + "type":"structure", + "required":["NetworkInterfaceId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "NetworkInterfaceId":{ + "shape":"String", + "locationName":"networkInterfaceId" + }, + "Attribute":{ + "shape":"NetworkInterfaceAttribute", + "locationName":"attribute" + } + } + }, + "DescribeNetworkInterfaceAttributeResult":{ + "type":"structure", + "members":{ + "NetworkInterfaceId":{ + "shape":"String", + "locationName":"networkInterfaceId" + }, + "Description":{ + "shape":"AttributeValue", + "locationName":"description" + }, + "SourceDestCheck":{ + "shape":"AttributeBooleanValue", + "locationName":"sourceDestCheck" + }, + "Groups":{ + "shape":"GroupIdentifierList", + "locationName":"groupSet" + }, + "Attachment":{ + "shape":"NetworkInterfaceAttachment", + "locationName":"attachment" + } + } + }, + "DescribeNetworkInterfacesRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "NetworkInterfaceIds":{ + "shape":"NetworkInterfaceIdList", + "locationName":"NetworkInterfaceId" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"filter" + } + } + }, + "DescribeNetworkInterfacesResult":{ + "type":"structure", + "members":{ + "NetworkInterfaces":{ + "shape":"NetworkInterfaceList", + "locationName":"networkInterfaceSet" + } + } + }, + "DescribePlacementGroupsRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "GroupNames":{ + "shape":"PlacementGroupStringList", + "locationName":"groupName" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"Filter" + } + } + }, + "DescribePlacementGroupsResult":{ + "type":"structure", + "members":{ + "PlacementGroups":{ + "shape":"PlacementGroupList", + "locationName":"placementGroupSet" + } + } + }, + "DescribePrefixListsRequest":{ + "type":"structure", + "members":{ + "DryRun":{"shape":"Boolean"}, + "PrefixListIds":{ + "shape":"ValueStringList", + "locationName":"PrefixListId" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"Filter" + }, + "MaxResults":{"shape":"Integer"}, + "NextToken":{"shape":"String"} + } + }, + "DescribePrefixListsResult":{ + "type":"structure", + "members":{ + "PrefixLists":{ + "shape":"PrefixListSet", + "locationName":"prefixListSet" + }, + "NextToken":{ + "shape":"String", + "locationName":"nextToken" + } + } + }, + "DescribeRegionsRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "RegionNames":{ + "shape":"RegionNameStringList", + "locationName":"RegionName" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"Filter" + } + } + }, + "DescribeRegionsResult":{ + "type":"structure", + "members":{ + "Regions":{ + "shape":"RegionList", + "locationName":"regionInfo" + } + } + }, + "DescribeReservedInstancesListingsRequest":{ + "type":"structure", + "members":{ + "ReservedInstancesId":{ + "shape":"String", + "locationName":"reservedInstancesId" + }, + "ReservedInstancesListingId":{ + "shape":"String", + "locationName":"reservedInstancesListingId" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"filters" + } + } + }, + "DescribeReservedInstancesListingsResult":{ + "type":"structure", + "members":{ + "ReservedInstancesListings":{ + "shape":"ReservedInstancesListingList", + "locationName":"reservedInstancesListingsSet" + } + } + }, + "DescribeReservedInstancesModificationsRequest":{ + "type":"structure", + "members":{ + "ReservedInstancesModificationIds":{ + "shape":"ReservedInstancesModificationIdStringList", + "locationName":"ReservedInstancesModificationId" + }, + "NextToken":{ + "shape":"String", + "locationName":"nextToken" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"Filter" + } + } + }, + "DescribeReservedInstancesModificationsResult":{ + "type":"structure", + "members":{ + "ReservedInstancesModifications":{ + "shape":"ReservedInstancesModificationList", + "locationName":"reservedInstancesModificationsSet" + }, + "NextToken":{ + "shape":"String", + "locationName":"nextToken" + } + } + }, + "DescribeReservedInstancesOfferingsRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "ReservedInstancesOfferingIds":{ + "shape":"ReservedInstancesOfferingIdStringList", + "locationName":"ReservedInstancesOfferingId" + }, + "InstanceType":{"shape":"InstanceType"}, + "AvailabilityZone":{"shape":"String"}, + "ProductDescription":{"shape":"RIProductDescription"}, + "Filters":{ + "shape":"FilterList", + "locationName":"Filter" + }, + "InstanceTenancy":{ + "shape":"Tenancy", + "locationName":"instanceTenancy" + }, + "OfferingType":{ + "shape":"OfferingTypeValues", + "locationName":"offeringType" + }, + "NextToken":{ + "shape":"String", + "locationName":"nextToken" + }, + "MaxResults":{ + "shape":"Integer", + "locationName":"maxResults" + }, + "IncludeMarketplace":{"shape":"Boolean"}, + "MinDuration":{"shape":"Long"}, + "MaxDuration":{"shape":"Long"}, + "MaxInstanceCount":{"shape":"Integer"} + } + }, + "DescribeReservedInstancesOfferingsResult":{ + "type":"structure", + "members":{ + "ReservedInstancesOfferings":{ + "shape":"ReservedInstancesOfferingList", + "locationName":"reservedInstancesOfferingsSet" + }, + "NextToken":{ + "shape":"String", + "locationName":"nextToken" + } + } + }, + "DescribeReservedInstancesRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "ReservedInstancesIds":{ + "shape":"ReservedInstancesIdStringList", + "locationName":"ReservedInstancesId" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"Filter" + }, + "OfferingType":{ + "shape":"OfferingTypeValues", + "locationName":"offeringType" + } + } + }, + "DescribeReservedInstancesResult":{ + "type":"structure", + "members":{ + "ReservedInstances":{ + "shape":"ReservedInstancesList", + "locationName":"reservedInstancesSet" + } + } + }, + "DescribeRouteTablesRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "RouteTableIds":{ + "shape":"ValueStringList", + "locationName":"RouteTableId" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"Filter" + } + } + }, + "DescribeRouteTablesResult":{ + "type":"structure", + "members":{ + "RouteTables":{ + "shape":"RouteTableList", + "locationName":"routeTableSet" + } + } + }, + "DescribeScheduledInstanceAvailabilityRequest":{ + "type":"structure", + "required":[ + "Recurrence", + "FirstSlotStartTimeRange" + ], + "members":{ + "DryRun":{"shape":"Boolean"}, + "Recurrence":{"shape":"ScheduledInstanceRecurrenceRequest"}, + "FirstSlotStartTimeRange":{"shape":"SlotDateTimeRangeRequest"}, + "MinSlotDurationInHours":{"shape":"Integer"}, + "MaxSlotDurationInHours":{"shape":"Integer"}, + "NextToken":{"shape":"String"}, + "MaxResults":{"shape":"Integer"}, + "Filters":{ + "shape":"FilterList", + "locationName":"Filter" + } + } + }, + "DescribeScheduledInstanceAvailabilityResult":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"String", + "locationName":"nextToken" + }, + "ScheduledInstanceAvailabilitySet":{ + "shape":"ScheduledInstanceAvailabilitySet", + "locationName":"scheduledInstanceAvailabilitySet" + } + } + }, + "DescribeScheduledInstancesRequest":{ + "type":"structure", + "members":{ + "DryRun":{"shape":"Boolean"}, + "ScheduledInstanceIds":{ + "shape":"ScheduledInstanceIdRequestSet", + "locationName":"ScheduledInstanceId" + }, + "SlotStartTimeRange":{"shape":"SlotStartTimeRangeRequest"}, + "NextToken":{"shape":"String"}, + "MaxResults":{"shape":"Integer"}, + "Filters":{ + "shape":"FilterList", + "locationName":"Filter" + } + } + }, + "DescribeScheduledInstancesResult":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"String", + "locationName":"nextToken" + }, + "ScheduledInstanceSet":{ + "shape":"ScheduledInstanceSet", + "locationName":"scheduledInstanceSet" + } + } + }, + "DescribeSecurityGroupReferencesRequest":{ + "type":"structure", + "required":["GroupId"], + "members":{ + "DryRun":{"shape":"Boolean"}, + "GroupId":{"shape":"GroupIds"} + } + }, + "DescribeSecurityGroupReferencesResult":{ + "type":"structure", + "members":{ + "SecurityGroupReferenceSet":{ + "shape":"SecurityGroupReferences", + "locationName":"securityGroupReferenceSet" + } + } + }, + "DescribeSecurityGroupsRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "GroupNames":{ + "shape":"GroupNameStringList", + "locationName":"GroupName" + }, + "GroupIds":{ + "shape":"GroupIdStringList", + "locationName":"GroupId" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"Filter" + } + } + }, + "DescribeSecurityGroupsResult":{ + "type":"structure", + "members":{ + "SecurityGroups":{ + "shape":"SecurityGroupList", + "locationName":"securityGroupInfo" + } + } + }, + "DescribeSnapshotAttributeRequest":{ + "type":"structure", + "required":[ + "SnapshotId", + "Attribute" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "SnapshotId":{"shape":"String"}, + "Attribute":{"shape":"SnapshotAttributeName"} + } + }, + "DescribeSnapshotAttributeResult":{ + "type":"structure", + "members":{ + "SnapshotId":{ + "shape":"String", + "locationName":"snapshotId" + }, + "CreateVolumePermissions":{ + "shape":"CreateVolumePermissionList", + "locationName":"createVolumePermission" + }, + "ProductCodes":{ + "shape":"ProductCodeList", + "locationName":"productCodes" + } + } + }, + "DescribeSnapshotsRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "SnapshotIds":{ + "shape":"SnapshotIdStringList", + "locationName":"SnapshotId" + }, + "OwnerIds":{ + "shape":"OwnerStringList", + "locationName":"Owner" + }, + "RestorableByUserIds":{ + "shape":"RestorableByStringList", + "locationName":"RestorableBy" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"Filter" + }, + "NextToken":{"shape":"String"}, + "MaxResults":{"shape":"Integer"} + } + }, + "DescribeSnapshotsResult":{ + "type":"structure", + "members":{ + "Snapshots":{ + "shape":"SnapshotList", + "locationName":"snapshotSet" + }, + "NextToken":{ + "shape":"String", + "locationName":"nextToken" + } + } + }, + "DescribeSpotDatafeedSubscriptionRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + } + } + }, + "DescribeSpotDatafeedSubscriptionResult":{ + "type":"structure", + "members":{ + "SpotDatafeedSubscription":{ + "shape":"SpotDatafeedSubscription", + "locationName":"spotDatafeedSubscription" + } + } + }, + "DescribeSpotFleetInstancesRequest":{ + "type":"structure", + "required":["SpotFleetRequestId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "SpotFleetRequestId":{ + "shape":"String", + "locationName":"spotFleetRequestId" + }, + "NextToken":{ + "shape":"String", + "locationName":"nextToken" + }, + "MaxResults":{ + "shape":"Integer", + "locationName":"maxResults" + } + } + }, + "DescribeSpotFleetInstancesResponse":{ + "type":"structure", + "required":[ + "SpotFleetRequestId", + "ActiveInstances" + ], + "members":{ + "SpotFleetRequestId":{ + "shape":"String", + "locationName":"spotFleetRequestId" + }, + "ActiveInstances":{ + "shape":"ActiveInstanceSet", + "locationName":"activeInstanceSet" + }, + "NextToken":{ + "shape":"String", + "locationName":"nextToken" + } + } + }, + "DescribeSpotFleetRequestHistoryRequest":{ + "type":"structure", + "required":[ + "SpotFleetRequestId", + "StartTime" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "SpotFleetRequestId":{ + "shape":"String", + "locationName":"spotFleetRequestId" + }, + "EventType":{ + "shape":"EventType", + "locationName":"eventType" + }, + "StartTime":{ + "shape":"DateTime", + "locationName":"startTime" + }, + "NextToken":{ + "shape":"String", + "locationName":"nextToken" + }, + "MaxResults":{ + "shape":"Integer", + "locationName":"maxResults" + } + } + }, + "DescribeSpotFleetRequestHistoryResponse":{ + "type":"structure", + "required":[ + "SpotFleetRequestId", + "StartTime", + "LastEvaluatedTime", + "HistoryRecords" + ], + "members":{ + "SpotFleetRequestId":{ + "shape":"String", + "locationName":"spotFleetRequestId" + }, + "StartTime":{ + "shape":"DateTime", + "locationName":"startTime" + }, + "LastEvaluatedTime":{ + "shape":"DateTime", + "locationName":"lastEvaluatedTime" + }, + "HistoryRecords":{ + "shape":"HistoryRecords", + "locationName":"historyRecordSet" + }, + "NextToken":{ + "shape":"String", + "locationName":"nextToken" + } + } + }, + "DescribeSpotFleetRequestsRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "SpotFleetRequestIds":{ + "shape":"ValueStringList", + "locationName":"spotFleetRequestId" + }, + "NextToken":{ + "shape":"String", + "locationName":"nextToken" + }, + "MaxResults":{ + "shape":"Integer", + "locationName":"maxResults" + } + } + }, + "DescribeSpotFleetRequestsResponse":{ + "type":"structure", + "required":["SpotFleetRequestConfigs"], + "members":{ + "SpotFleetRequestConfigs":{ + "shape":"SpotFleetRequestConfigSet", + "locationName":"spotFleetRequestConfigSet" + }, + "NextToken":{ + "shape":"String", + "locationName":"nextToken" + } + } + }, + "DescribeSpotInstanceRequestsRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "SpotInstanceRequestIds":{ + "shape":"SpotInstanceRequestIdList", + "locationName":"SpotInstanceRequestId" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"Filter" + } + } + }, + "DescribeSpotInstanceRequestsResult":{ + "type":"structure", + "members":{ + "SpotInstanceRequests":{ + "shape":"SpotInstanceRequestList", + "locationName":"spotInstanceRequestSet" + } + } + }, + "DescribeSpotPriceHistoryRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "StartTime":{ + "shape":"DateTime", + "locationName":"startTime" + }, + "EndTime":{ + "shape":"DateTime", + "locationName":"endTime" + }, + "InstanceTypes":{ + "shape":"InstanceTypeList", + "locationName":"InstanceType" + }, + "ProductDescriptions":{ + "shape":"ProductDescriptionList", + "locationName":"ProductDescription" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"Filter" + }, + "AvailabilityZone":{ + "shape":"String", + "locationName":"availabilityZone" + }, + "MaxResults":{ + "shape":"Integer", + "locationName":"maxResults" + }, + "NextToken":{ + "shape":"String", + "locationName":"nextToken" + } + } + }, + "DescribeSpotPriceHistoryResult":{ + "type":"structure", + "members":{ + "SpotPriceHistory":{ + "shape":"SpotPriceHistoryList", + "locationName":"spotPriceHistorySet" + }, + "NextToken":{ + "shape":"String", + "locationName":"nextToken" + } + } + }, + "DescribeStaleSecurityGroupsRequest":{ + "type":"structure", + "required":["VpcId"], + "members":{ + "DryRun":{"shape":"Boolean"}, + "VpcId":{"shape":"String"}, + "MaxResults":{"shape":"MaxResults"}, + "NextToken":{"shape":"NextToken"} + } + }, + "DescribeStaleSecurityGroupsResult":{ + "type":"structure", + "members":{ + "StaleSecurityGroupSet":{ + "shape":"StaleSecurityGroupSet", + "locationName":"staleSecurityGroupSet" + }, + "NextToken":{ + "shape":"String", + "locationName":"nextToken" + } + } + }, + "DescribeSubnetsRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "SubnetIds":{ + "shape":"SubnetIdStringList", + "locationName":"SubnetId" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"Filter" + } + } + }, + "DescribeSubnetsResult":{ + "type":"structure", + "members":{ + "Subnets":{ + "shape":"SubnetList", + "locationName":"subnetSet" + } + } + }, + "DescribeTagsRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"Filter" + }, + "MaxResults":{ + "shape":"Integer", + "locationName":"maxResults" + }, + "NextToken":{ + "shape":"String", + "locationName":"nextToken" + } + } + }, + "DescribeTagsResult":{ + "type":"structure", + "members":{ + "Tags":{ + "shape":"TagDescriptionList", + "locationName":"tagSet" + }, + "NextToken":{ + "shape":"String", + "locationName":"nextToken" + } + } + }, + "DescribeVolumeAttributeRequest":{ + "type":"structure", + "required":["VolumeId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "VolumeId":{"shape":"String"}, + "Attribute":{"shape":"VolumeAttributeName"} + } + }, + "DescribeVolumeAttributeResult":{ + "type":"structure", + "members":{ + "VolumeId":{ + "shape":"String", + "locationName":"volumeId" + }, + "AutoEnableIO":{ + "shape":"AttributeBooleanValue", + "locationName":"autoEnableIO" + }, + "ProductCodes":{ + "shape":"ProductCodeList", + "locationName":"productCodes" + } + } + }, + "DescribeVolumeStatusRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "VolumeIds":{ + "shape":"VolumeIdStringList", + "locationName":"VolumeId" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"Filter" + }, + "NextToken":{"shape":"String"}, + "MaxResults":{"shape":"Integer"} + } + }, + "DescribeVolumeStatusResult":{ + "type":"structure", + "members":{ + "VolumeStatuses":{ + "shape":"VolumeStatusList", + "locationName":"volumeStatusSet" + }, + "NextToken":{ + "shape":"String", + "locationName":"nextToken" + } + } + }, + "DescribeVolumesRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "VolumeIds":{ + "shape":"VolumeIdStringList", + "locationName":"VolumeId" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"Filter" + }, + "NextToken":{ + "shape":"String", + "locationName":"nextToken" + }, + "MaxResults":{ + "shape":"Integer", + "locationName":"maxResults" + } + } + }, + "DescribeVolumesResult":{ + "type":"structure", + "members":{ + "Volumes":{ + "shape":"VolumeList", + "locationName":"volumeSet" + }, + "NextToken":{ + "shape":"String", + "locationName":"nextToken" + } + } + }, + "DescribeVpcAttributeRequest":{ + "type":"structure", + "required":[ + "VpcId", + "Attribute" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "VpcId":{"shape":"String"}, + "Attribute":{"shape":"VpcAttributeName"} + } + }, + "DescribeVpcAttributeResult":{ + "type":"structure", + "members":{ + "VpcId":{ + "shape":"String", + "locationName":"vpcId" + }, + "EnableDnsSupport":{ + "shape":"AttributeBooleanValue", + "locationName":"enableDnsSupport" + }, + "EnableDnsHostnames":{ + "shape":"AttributeBooleanValue", + "locationName":"enableDnsHostnames" + } + } + }, + "DescribeVpcClassicLinkDnsSupportRequest":{ + "type":"structure", + "members":{ + "VpcIds":{"shape":"VpcClassicLinkIdList"}, + "MaxResults":{ + "shape":"MaxResults", + "locationName":"maxResults" + }, + "NextToken":{ + "shape":"NextToken", + "locationName":"nextToken" + } + } + }, + "DescribeVpcClassicLinkDnsSupportResult":{ + "type":"structure", + "members":{ + "Vpcs":{ + "shape":"ClassicLinkDnsSupportList", + "locationName":"vpcs" + }, + "NextToken":{ + "shape":"NextToken", + "locationName":"nextToken" + } + } + }, + "DescribeVpcClassicLinkRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "VpcIds":{ + "shape":"VpcClassicLinkIdList", + "locationName":"VpcId" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"Filter" + } + } + }, + "DescribeVpcClassicLinkResult":{ + "type":"structure", + "members":{ + "Vpcs":{ + "shape":"VpcClassicLinkList", + "locationName":"vpcSet" + } + } + }, + "DescribeVpcEndpointServicesRequest":{ + "type":"structure", + "members":{ + "DryRun":{"shape":"Boolean"}, + "MaxResults":{"shape":"Integer"}, + "NextToken":{"shape":"String"} + } + }, + "DescribeVpcEndpointServicesResult":{ + "type":"structure", + "members":{ + "ServiceNames":{ + "shape":"ValueStringList", + "locationName":"serviceNameSet" + }, + "NextToken":{ + "shape":"String", + "locationName":"nextToken" + } + } + }, + "DescribeVpcEndpointsRequest":{ + "type":"structure", + "members":{ + "DryRun":{"shape":"Boolean"}, + "VpcEndpointIds":{ + "shape":"ValueStringList", + "locationName":"VpcEndpointId" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"Filter" + }, + "MaxResults":{"shape":"Integer"}, + "NextToken":{"shape":"String"} + } + }, + "DescribeVpcEndpointsResult":{ + "type":"structure", + "members":{ + "VpcEndpoints":{ + "shape":"VpcEndpointSet", + "locationName":"vpcEndpointSet" + }, + "NextToken":{ + "shape":"String", + "locationName":"nextToken" + } + } + }, + "DescribeVpcPeeringConnectionsRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "VpcPeeringConnectionIds":{ + "shape":"ValueStringList", + "locationName":"VpcPeeringConnectionId" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"Filter" + } + } + }, + "DescribeVpcPeeringConnectionsResult":{ + "type":"structure", + "members":{ + "VpcPeeringConnections":{ + "shape":"VpcPeeringConnectionList", + "locationName":"vpcPeeringConnectionSet" + } + } + }, + "DescribeVpcsRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "VpcIds":{ + "shape":"VpcIdStringList", + "locationName":"VpcId" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"Filter" + } + } + }, + "DescribeVpcsResult":{ + "type":"structure", + "members":{ + "Vpcs":{ + "shape":"VpcList", + "locationName":"vpcSet" + } + } + }, + "DescribeVpnConnectionsRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "VpnConnectionIds":{ + "shape":"VpnConnectionIdStringList", + "locationName":"VpnConnectionId" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"Filter" + } + } + }, + "DescribeVpnConnectionsResult":{ + "type":"structure", + "members":{ + "VpnConnections":{ + "shape":"VpnConnectionList", + "locationName":"vpnConnectionSet" + } + } + }, + "DescribeVpnGatewaysRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "VpnGatewayIds":{ + "shape":"VpnGatewayIdStringList", + "locationName":"VpnGatewayId" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"Filter" + } + } + }, + "DescribeVpnGatewaysResult":{ + "type":"structure", + "members":{ + "VpnGateways":{ + "shape":"VpnGatewayList", + "locationName":"vpnGatewaySet" + } + } + }, + "DetachClassicLinkVpcRequest":{ + "type":"structure", + "required":[ + "InstanceId", + "VpcId" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "InstanceId":{ + "shape":"String", + "locationName":"instanceId" + }, + "VpcId":{ + "shape":"String", + "locationName":"vpcId" + } + } + }, + "DetachClassicLinkVpcResult":{ + "type":"structure", + "members":{ + "Return":{ + "shape":"Boolean", + "locationName":"return" + } + } + }, + "DetachInternetGatewayRequest":{ + "type":"structure", + "required":[ + "InternetGatewayId", + "VpcId" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "InternetGatewayId":{ + "shape":"String", + "locationName":"internetGatewayId" + }, + "VpcId":{ + "shape":"String", + "locationName":"vpcId" + } + } + }, + "DetachNetworkInterfaceRequest":{ + "type":"structure", + "required":["AttachmentId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "AttachmentId":{ + "shape":"String", + "locationName":"attachmentId" + }, + "Force":{ + "shape":"Boolean", + "locationName":"force" + } + } + }, + "DetachVolumeRequest":{ + "type":"structure", + "required":["VolumeId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "VolumeId":{"shape":"String"}, + "InstanceId":{"shape":"String"}, + "Device":{"shape":"String"}, + "Force":{"shape":"Boolean"} + } + }, + "DetachVpnGatewayRequest":{ + "type":"structure", + "required":[ + "VpnGatewayId", + "VpcId" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "VpnGatewayId":{"shape":"String"}, + "VpcId":{"shape":"String"} + } + }, + "DeviceType":{ + "type":"string", + "enum":[ + "ebs", + "instance-store" + ] + }, + "DhcpConfiguration":{ + "type":"structure", + "members":{ + "Key":{ + "shape":"String", + "locationName":"key" + }, + "Values":{ + "shape":"DhcpConfigurationValueList", + "locationName":"valueSet" + } + } + }, + "DhcpConfigurationList":{ + "type":"list", + "member":{ + "shape":"DhcpConfiguration", + "locationName":"item" + } + }, + "DhcpConfigurationValueList":{ + "type":"list", + "member":{ + "shape":"AttributeValue", + "locationName":"item" + } + }, + "DhcpOptions":{ + "type":"structure", + "members":{ + "DhcpOptionsId":{ + "shape":"String", + "locationName":"dhcpOptionsId" + }, + "DhcpConfigurations":{ + "shape":"DhcpConfigurationList", + "locationName":"dhcpConfigurationSet" + }, + "Tags":{ + "shape":"TagList", + "locationName":"tagSet" + } + } + }, + "DhcpOptionsIdStringList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"DhcpOptionsId" + } + }, + "DhcpOptionsList":{ + "type":"list", + "member":{ + "shape":"DhcpOptions", + "locationName":"item" + } + }, + "DisableVgwRoutePropagationRequest":{ + "type":"structure", + "required":[ + "RouteTableId", + "GatewayId" + ], + "members":{ + "RouteTableId":{"shape":"String"}, + "GatewayId":{"shape":"String"} + } + }, + "DisableVpcClassicLinkDnsSupportRequest":{ + "type":"structure", + "members":{ + "VpcId":{"shape":"String"} + } + }, + "DisableVpcClassicLinkDnsSupportResult":{ + "type":"structure", + "members":{ + "Return":{ + "shape":"Boolean", + "locationName":"return" + } + } + }, + "DisableVpcClassicLinkRequest":{ + "type":"structure", + "required":["VpcId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "VpcId":{ + "shape":"String", + "locationName":"vpcId" + } + } + }, + "DisableVpcClassicLinkResult":{ + "type":"structure", + "members":{ + "Return":{ + "shape":"Boolean", + "locationName":"return" + } + } + }, + "DisassociateAddressRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "PublicIp":{"shape":"String"}, + "AssociationId":{"shape":"String"} + } + }, + "DisassociateRouteTableRequest":{ + "type":"structure", + "required":["AssociationId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "AssociationId":{ + "shape":"String", + "locationName":"associationId" + } + } + }, + "DiskImage":{ + "type":"structure", + "members":{ + "Image":{"shape":"DiskImageDetail"}, + "Description":{"shape":"String"}, + "Volume":{"shape":"VolumeDetail"} + } + }, + "DiskImageDescription":{ + "type":"structure", + "required":[ + "Format", + "Size", + "ImportManifestUrl" + ], + "members":{ + "Format":{ + "shape":"DiskImageFormat", + "locationName":"format" + }, + "Size":{ + "shape":"Long", + "locationName":"size" + }, + "ImportManifestUrl":{ + "shape":"String", + "locationName":"importManifestUrl" + }, + "Checksum":{ + "shape":"String", + "locationName":"checksum" + } + } + }, + "DiskImageDetail":{ + "type":"structure", + "required":[ + "Format", + "Bytes", + "ImportManifestUrl" + ], + "members":{ + "Format":{ + "shape":"DiskImageFormat", + "locationName":"format" + }, + "Bytes":{ + "shape":"Long", + "locationName":"bytes" + }, + "ImportManifestUrl":{ + "shape":"String", + "locationName":"importManifestUrl" + } + } + }, + "DiskImageFormat":{ + "type":"string", + "enum":[ + "VMDK", + "RAW", + "VHD" + ] + }, + "DiskImageList":{ + "type":"list", + "member":{"shape":"DiskImage"} + }, + "DiskImageVolumeDescription":{ + "type":"structure", + "required":["Id"], + "members":{ + "Size":{ + "shape":"Long", + "locationName":"size" + }, + "Id":{ + "shape":"String", + "locationName":"id" + } + } + }, + "DomainType":{ + "type":"string", + "enum":[ + "vpc", + "standard" + ] + }, + "Double":{"type":"double"}, + "EbsBlockDevice":{ + "type":"structure", + "members":{ + "SnapshotId":{ + "shape":"String", + "locationName":"snapshotId" + }, + "VolumeSize":{ + "shape":"Integer", + "locationName":"volumeSize" + }, + "DeleteOnTermination":{ + "shape":"Boolean", + "locationName":"deleteOnTermination" + }, + "VolumeType":{ + "shape":"VolumeType", + "locationName":"volumeType" + }, + "Iops":{ + "shape":"Integer", + "locationName":"iops" + }, + "Encrypted":{ + "shape":"Boolean", + "locationName":"encrypted" + } + } + }, + "EbsInstanceBlockDevice":{ + "type":"structure", + "members":{ + "VolumeId":{ + "shape":"String", + "locationName":"volumeId" + }, + "Status":{ + "shape":"AttachmentStatus", + "locationName":"status" + }, + "AttachTime":{ + "shape":"DateTime", + "locationName":"attachTime" + }, + "DeleteOnTermination":{ + "shape":"Boolean", + "locationName":"deleteOnTermination" + } + } + }, + "EbsInstanceBlockDeviceSpecification":{ + "type":"structure", + "members":{ + "VolumeId":{ + "shape":"String", + "locationName":"volumeId" + }, + "DeleteOnTermination":{ + "shape":"Boolean", + "locationName":"deleteOnTermination" + } + } + }, + "EnableVgwRoutePropagationRequest":{ + "type":"structure", + "required":[ + "RouteTableId", + "GatewayId" + ], + "members":{ + "RouteTableId":{"shape":"String"}, + "GatewayId":{"shape":"String"} + } + }, + "EnableVolumeIORequest":{ + "type":"structure", + "required":["VolumeId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "VolumeId":{ + "shape":"String", + "locationName":"volumeId" + } + } + }, + "EnableVpcClassicLinkDnsSupportRequest":{ + "type":"structure", + "members":{ + "VpcId":{"shape":"String"} + } + }, + "EnableVpcClassicLinkDnsSupportResult":{ + "type":"structure", + "members":{ + "Return":{ + "shape":"Boolean", + "locationName":"return" + } + } + }, + "EnableVpcClassicLinkRequest":{ + "type":"structure", + "required":["VpcId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "VpcId":{ + "shape":"String", + "locationName":"vpcId" + } + } + }, + "EnableVpcClassicLinkResult":{ + "type":"structure", + "members":{ + "Return":{ + "shape":"Boolean", + "locationName":"return" + } + } + }, + "EventCode":{ + "type":"string", + "enum":[ + "instance-reboot", + "system-reboot", + "system-maintenance", + "instance-retirement", + "instance-stop" + ] + }, + "EventInformation":{ + "type":"structure", + "members":{ + "InstanceId":{ + "shape":"String", + "locationName":"instanceId" + }, + "EventSubType":{ + "shape":"String", + "locationName":"eventSubType" + }, + "EventDescription":{ + "shape":"String", + "locationName":"eventDescription" + } + } + }, + "EventType":{ + "type":"string", + "enum":[ + "instanceChange", + "fleetRequestChange", + "error" + ] + }, + "ExcessCapacityTerminationPolicy":{ + "type":"string", + "enum":[ + "noTermination", + "default" + ] + }, + "ExecutableByStringList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"ExecutableBy" + } + }, + "ExportEnvironment":{ + "type":"string", + "enum":[ + "citrix", + "vmware", + "microsoft" + ] + }, + "ExportTask":{ + "type":"structure", + "members":{ + "ExportTaskId":{ + "shape":"String", + "locationName":"exportTaskId" + }, + "Description":{ + "shape":"String", + "locationName":"description" + }, + "State":{ + "shape":"ExportTaskState", + "locationName":"state" + }, + "StatusMessage":{ + "shape":"String", + "locationName":"statusMessage" + }, + "InstanceExportDetails":{ + "shape":"InstanceExportDetails", + "locationName":"instanceExport" + }, + "ExportToS3Task":{ + "shape":"ExportToS3Task", + "locationName":"exportToS3" + } + } + }, + "ExportTaskIdStringList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"ExportTaskId" + } + }, + "ExportTaskList":{ + "type":"list", + "member":{ + "shape":"ExportTask", + "locationName":"item" + } + }, + "ExportTaskState":{ + "type":"string", + "enum":[ + "active", + "cancelling", + "cancelled", + "completed" + ] + }, + "ExportToS3Task":{ + "type":"structure", + "members":{ + "DiskImageFormat":{ + "shape":"DiskImageFormat", + "locationName":"diskImageFormat" + }, + "ContainerFormat":{ + "shape":"ContainerFormat", + "locationName":"containerFormat" + }, + "S3Bucket":{ + "shape":"String", + "locationName":"s3Bucket" + }, + "S3Key":{ + "shape":"String", + "locationName":"s3Key" + } + } + }, + "ExportToS3TaskSpecification":{ + "type":"structure", + "members":{ + "DiskImageFormat":{ + "shape":"DiskImageFormat", + "locationName":"diskImageFormat" + }, + "ContainerFormat":{ + "shape":"ContainerFormat", + "locationName":"containerFormat" + }, + "S3Bucket":{ + "shape":"String", + "locationName":"s3Bucket" + }, + "S3Prefix":{ + "shape":"String", + "locationName":"s3Prefix" + } + } + }, + "Filter":{ + "type":"structure", + "members":{ + "Name":{"shape":"String"}, + "Values":{ + "shape":"ValueStringList", + "locationName":"Value" + } + } + }, + "FilterList":{ + "type":"list", + "member":{ + "shape":"Filter", + "locationName":"Filter" + } + }, + "FleetType":{ + "type":"string", + "enum":[ + "request", + "maintain" + ] + }, + "Float":{"type":"float"}, + "FlowLog":{ + "type":"structure", + "members":{ + "CreationTime":{ + "shape":"DateTime", + "locationName":"creationTime" + }, + "FlowLogId":{ + "shape":"String", + "locationName":"flowLogId" + }, + "FlowLogStatus":{ + "shape":"String", + "locationName":"flowLogStatus" + }, + "ResourceId":{ + "shape":"String", + "locationName":"resourceId" + }, + "TrafficType":{ + "shape":"TrafficType", + "locationName":"trafficType" + }, + "LogGroupName":{ + "shape":"String", + "locationName":"logGroupName" + }, + "DeliverLogsStatus":{ + "shape":"String", + "locationName":"deliverLogsStatus" + }, + "DeliverLogsErrorMessage":{ + "shape":"String", + "locationName":"deliverLogsErrorMessage" + }, + "DeliverLogsPermissionArn":{ + "shape":"String", + "locationName":"deliverLogsPermissionArn" + } + } + }, + "FlowLogSet":{ + "type":"list", + "member":{ + "shape":"FlowLog", + "locationName":"item" + } + }, + "FlowLogsResourceType":{ + "type":"string", + "enum":[ + "VPC", + "Subnet", + "NetworkInterface" + ] + }, + "GatewayType":{ + "type":"string", + "enum":["ipsec.1"] + }, + "GetConsoleOutputRequest":{ + "type":"structure", + "required":["InstanceId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "InstanceId":{"shape":"String"} + } + }, + "GetConsoleOutputResult":{ + "type":"structure", + "members":{ + "InstanceId":{ + "shape":"String", + "locationName":"instanceId" + }, + "Timestamp":{ + "shape":"DateTime", + "locationName":"timestamp" + }, + "Output":{ + "shape":"String", + "locationName":"output" + } + } + }, + "GetConsoleScreenshotRequest":{ + "type":"structure", + "required":["InstanceId"], + "members":{ + "DryRun":{"shape":"Boolean"}, + "InstanceId":{"shape":"String"}, + "WakeUp":{"shape":"Boolean"} + } + }, + "GetConsoleScreenshotResult":{ + "type":"structure", + "members":{ + "InstanceId":{ + "shape":"String", + "locationName":"instanceId" + }, + "ImageData":{ + "shape":"String", + "locationName":"imageData" + } + } + }, + "GetPasswordDataRequest":{ + "type":"structure", + "required":["InstanceId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "InstanceId":{"shape":"String"} + } + }, + "GetPasswordDataResult":{ + "type":"structure", + "members":{ + "InstanceId":{ + "shape":"String", + "locationName":"instanceId" + }, + "Timestamp":{ + "shape":"DateTime", + "locationName":"timestamp" + }, + "PasswordData":{ + "shape":"String", + "locationName":"passwordData" + } + } + }, + "GroupIdStringList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"groupId" + } + }, + "GroupIdentifier":{ + "type":"structure", + "members":{ + "GroupName":{ + "shape":"String", + "locationName":"groupName" + }, + "GroupId":{ + "shape":"String", + "locationName":"groupId" + } + } + }, + "GroupIdentifierList":{ + "type":"list", + "member":{ + "shape":"GroupIdentifier", + "locationName":"item" + } + }, + "GroupIds":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"item" + } + }, + "GroupNameStringList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"GroupName" + } + }, + "HistoryRecord":{ + "type":"structure", + "required":[ + "Timestamp", + "EventType", + "EventInformation" + ], + "members":{ + "Timestamp":{ + "shape":"DateTime", + "locationName":"timestamp" + }, + "EventType":{ + "shape":"EventType", + "locationName":"eventType" + }, + "EventInformation":{ + "shape":"EventInformation", + "locationName":"eventInformation" + } + } + }, + "HistoryRecords":{ + "type":"list", + "member":{ + "shape":"HistoryRecord", + "locationName":"item" + } + }, + "Host":{ + "type":"structure", + "members":{ + "HostId":{ + "shape":"String", + "locationName":"hostId" + }, + "AutoPlacement":{ + "shape":"AutoPlacement", + "locationName":"autoPlacement" + }, + "HostReservationId":{ + "shape":"String", + "locationName":"hostReservationId" + }, + "ClientToken":{ + "shape":"String", + "locationName":"clientToken" + }, + "HostProperties":{ + "shape":"HostProperties", + "locationName":"hostProperties" + }, + "State":{ + "shape":"AllocationState", + "locationName":"state" + }, + "AvailabilityZone":{ + "shape":"String", + "locationName":"availabilityZone" + }, + "Instances":{ + "shape":"HostInstanceList", + "locationName":"instances" + }, + "AvailableCapacity":{ + "shape":"AvailableCapacity", + "locationName":"availableCapacity" + } + } + }, + "HostInstance":{ + "type":"structure", + "members":{ + "InstanceId":{ + "shape":"String", + "locationName":"instanceId" + }, + "InstanceType":{ + "shape":"String", + "locationName":"instanceType" + } + } + }, + "HostInstanceList":{ + "type":"list", + "member":{ + "shape":"HostInstance", + "locationName":"item" + } + }, + "HostList":{ + "type":"list", + "member":{ + "shape":"Host", + "locationName":"item" + } + }, + "HostProperties":{ + "type":"structure", + "members":{ + "Sockets":{ + "shape":"Integer", + "locationName":"sockets" + }, + "Cores":{ + "shape":"Integer", + "locationName":"cores" + }, + "TotalVCpus":{ + "shape":"Integer", + "locationName":"totalVCpus" + }, + "InstanceType":{ + "shape":"String", + "locationName":"instanceType" + } + } + }, + "HostTenancy":{ + "type":"string", + "enum":[ + "dedicated", + "host" + ] + }, + "HypervisorType":{ + "type":"string", + "enum":[ + "ovm", + "xen" + ] + }, + "IamInstanceProfile":{ + "type":"structure", + "members":{ + "Arn":{ + "shape":"String", + "locationName":"arn" + }, + "Id":{ + "shape":"String", + "locationName":"id" + } + } + }, + "IamInstanceProfileSpecification":{ + "type":"structure", + "members":{ + "Arn":{ + "shape":"String", + "locationName":"arn" + }, + "Name":{ + "shape":"String", + "locationName":"name" + } + } + }, + "IcmpTypeCode":{ + "type":"structure", + "members":{ + "Type":{ + "shape":"Integer", + "locationName":"type" + }, + "Code":{ + "shape":"Integer", + "locationName":"code" + } + } + }, + "IdFormat":{ + "type":"structure", + "members":{ + "Resource":{ + "shape":"String", + "locationName":"resource" + }, + "UseLongIds":{ + "shape":"Boolean", + "locationName":"useLongIds" + }, + "Deadline":{ + "shape":"DateTime", + "locationName":"deadline" + } + } + }, + "IdFormatList":{ + "type":"list", + "member":{ + "shape":"IdFormat", + "locationName":"item" + } + }, + "Image":{ + "type":"structure", + "members":{ + "ImageId":{ + "shape":"String", + "locationName":"imageId" + }, + "ImageLocation":{ + "shape":"String", + "locationName":"imageLocation" + }, + "State":{ + "shape":"ImageState", + "locationName":"imageState" + }, + "OwnerId":{ + "shape":"String", + "locationName":"imageOwnerId" + }, + "CreationDate":{ + "shape":"String", + "locationName":"creationDate" + }, + "Public":{ + "shape":"Boolean", + "locationName":"isPublic" + }, + "ProductCodes":{ + "shape":"ProductCodeList", + "locationName":"productCodes" + }, + "Architecture":{ + "shape":"ArchitectureValues", + "locationName":"architecture" + }, + "ImageType":{ + "shape":"ImageTypeValues", + "locationName":"imageType" + }, + "KernelId":{ + "shape":"String", + "locationName":"kernelId" + }, + "RamdiskId":{ + "shape":"String", + "locationName":"ramdiskId" + }, + "Platform":{ + "shape":"PlatformValues", + "locationName":"platform" + }, + "SriovNetSupport":{ + "shape":"String", + "locationName":"sriovNetSupport" + }, + "StateReason":{ + "shape":"StateReason", + "locationName":"stateReason" + }, + "ImageOwnerAlias":{ + "shape":"String", + "locationName":"imageOwnerAlias" + }, + "Name":{ + "shape":"String", + "locationName":"name" + }, + "Description":{ + "shape":"String", + "locationName":"description" + }, + "RootDeviceType":{ + "shape":"DeviceType", + "locationName":"rootDeviceType" + }, + "RootDeviceName":{ + "shape":"String", + "locationName":"rootDeviceName" + }, + "BlockDeviceMappings":{ + "shape":"BlockDeviceMappingList", + "locationName":"blockDeviceMapping" + }, + "VirtualizationType":{ + "shape":"VirtualizationType", + "locationName":"virtualizationType" + }, + "Tags":{ + "shape":"TagList", + "locationName":"tagSet" + }, + "Hypervisor":{ + "shape":"HypervisorType", + "locationName":"hypervisor" + } + } + }, + "ImageAttribute":{ + "type":"structure", + "members":{ + "ImageId":{ + "shape":"String", + "locationName":"imageId" + }, + "LaunchPermissions":{ + "shape":"LaunchPermissionList", + "locationName":"launchPermission" + }, + "ProductCodes":{ + "shape":"ProductCodeList", + "locationName":"productCodes" + }, + "KernelId":{ + "shape":"AttributeValue", + "locationName":"kernel" + }, + "RamdiskId":{ + "shape":"AttributeValue", + "locationName":"ramdisk" + }, + "Description":{ + "shape":"AttributeValue", + "locationName":"description" + }, + "SriovNetSupport":{ + "shape":"AttributeValue", + "locationName":"sriovNetSupport" + }, + "BlockDeviceMappings":{ + "shape":"BlockDeviceMappingList", + "locationName":"blockDeviceMapping" + } + } + }, + "ImageAttributeName":{ + "type":"string", + "enum":[ + "description", + "kernel", + "ramdisk", + "launchPermission", + "productCodes", + "blockDeviceMapping", + "sriovNetSupport" + ] + }, + "ImageDiskContainer":{ + "type":"structure", + "members":{ + "Description":{"shape":"String"}, + "Format":{"shape":"String"}, + "Url":{"shape":"String"}, + "UserBucket":{"shape":"UserBucket"}, + "DeviceName":{"shape":"String"}, + "SnapshotId":{"shape":"String"} + } + }, + "ImageDiskContainerList":{ + "type":"list", + "member":{ + "shape":"ImageDiskContainer", + "locationName":"item" + } + }, + "ImageIdStringList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"ImageId" + } + }, + "ImageList":{ + "type":"list", + "member":{ + "shape":"Image", + "locationName":"item" + } + }, + "ImageState":{ + "type":"string", + "enum":[ + "pending", + "available", + "invalid", + "deregistered", + "transient", + "failed", + "error" + ] + }, + "ImageTypeValues":{ + "type":"string", + "enum":[ + "machine", + "kernel", + "ramdisk" + ] + }, + "ImportImageRequest":{ + "type":"structure", + "members":{ + "DryRun":{"shape":"Boolean"}, + "Description":{"shape":"String"}, + "DiskContainers":{ + "shape":"ImageDiskContainerList", + "locationName":"DiskContainer" + }, + "LicenseType":{"shape":"String"}, + "Hypervisor":{"shape":"String"}, + "Architecture":{"shape":"String"}, + "Platform":{"shape":"String"}, + "ClientData":{"shape":"ClientData"}, + "ClientToken":{"shape":"String"}, + "RoleName":{"shape":"String"} + } + }, + "ImportImageResult":{ + "type":"structure", + "members":{ + "ImportTaskId":{ + "shape":"String", + "locationName":"importTaskId" + }, + "Architecture":{ + "shape":"String", + "locationName":"architecture" + }, + "LicenseType":{ + "shape":"String", + "locationName":"licenseType" + }, + "Platform":{ + "shape":"String", + "locationName":"platform" + }, + "Hypervisor":{ + "shape":"String", + "locationName":"hypervisor" + }, + "Description":{ + "shape":"String", + "locationName":"description" + }, + "SnapshotDetails":{ + "shape":"SnapshotDetailList", + "locationName":"snapshotDetailSet" + }, + "ImageId":{ + "shape":"String", + "locationName":"imageId" + }, + "Progress":{ + "shape":"String", + "locationName":"progress" + }, + "StatusMessage":{ + "shape":"String", + "locationName":"statusMessage" + }, + "Status":{ + "shape":"String", + "locationName":"status" + } + } + }, + "ImportImageTask":{ + "type":"structure", + "members":{ + "ImportTaskId":{ + "shape":"String", + "locationName":"importTaskId" + }, + "Architecture":{ + "shape":"String", + "locationName":"architecture" + }, + "LicenseType":{ + "shape":"String", + "locationName":"licenseType" + }, + "Platform":{ + "shape":"String", + "locationName":"platform" + }, + "Hypervisor":{ + "shape":"String", + "locationName":"hypervisor" + }, + "Description":{ + "shape":"String", + "locationName":"description" + }, + "SnapshotDetails":{ + "shape":"SnapshotDetailList", + "locationName":"snapshotDetailSet" + }, + "ImageId":{ + "shape":"String", + "locationName":"imageId" + }, + "Progress":{ + "shape":"String", + "locationName":"progress" + }, + "StatusMessage":{ + "shape":"String", + "locationName":"statusMessage" + }, + "Status":{ + "shape":"String", + "locationName":"status" + } + } + }, + "ImportImageTaskList":{ + "type":"list", + "member":{ + "shape":"ImportImageTask", + "locationName":"item" + } + }, + "ImportInstanceLaunchSpecification":{ + "type":"structure", + "members":{ + "Architecture":{ + "shape":"ArchitectureValues", + "locationName":"architecture" + }, + "GroupNames":{ + "shape":"SecurityGroupStringList", + "locationName":"GroupName" + }, + "GroupIds":{ + "shape":"SecurityGroupIdStringList", + "locationName":"GroupId" + }, + "AdditionalInfo":{ + "shape":"String", + "locationName":"additionalInfo" + }, + "UserData":{ + "shape":"UserData", + "locationName":"userData" + }, + "InstanceType":{ + "shape":"InstanceType", + "locationName":"instanceType" + }, + "Placement":{ + "shape":"Placement", + "locationName":"placement" + }, + "Monitoring":{ + "shape":"Boolean", + "locationName":"monitoring" + }, + "SubnetId":{ + "shape":"String", + "locationName":"subnetId" + }, + "InstanceInitiatedShutdownBehavior":{ + "shape":"ShutdownBehavior", + "locationName":"instanceInitiatedShutdownBehavior" + }, + "PrivateIpAddress":{ + "shape":"String", + "locationName":"privateIpAddress" + } + } + }, + "ImportInstanceRequest":{ + "type":"structure", + "required":["Platform"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "Description":{ + "shape":"String", + "locationName":"description" + }, + "LaunchSpecification":{ + "shape":"ImportInstanceLaunchSpecification", + "locationName":"launchSpecification" + }, + "DiskImages":{ + "shape":"DiskImageList", + "locationName":"diskImage" + }, + "Platform":{ + "shape":"PlatformValues", + "locationName":"platform" + } + } + }, + "ImportInstanceResult":{ + "type":"structure", + "members":{ + "ConversionTask":{ + "shape":"ConversionTask", + "locationName":"conversionTask" + } + } + }, + "ImportInstanceTaskDetails":{ + "type":"structure", + "required":["Volumes"], + "members":{ + "Volumes":{ + "shape":"ImportInstanceVolumeDetailSet", + "locationName":"volumes" + }, + "InstanceId":{ + "shape":"String", + "locationName":"instanceId" + }, + "Platform":{ + "shape":"PlatformValues", + "locationName":"platform" + }, + "Description":{ + "shape":"String", + "locationName":"description" + } + } + }, + "ImportInstanceVolumeDetailItem":{ + "type":"structure", + "required":[ + "BytesConverted", + "AvailabilityZone", + "Image", + "Volume", + "Status" + ], + "members":{ + "BytesConverted":{ + "shape":"Long", + "locationName":"bytesConverted" + }, + "AvailabilityZone":{ + "shape":"String", + "locationName":"availabilityZone" + }, + "Image":{ + "shape":"DiskImageDescription", + "locationName":"image" + }, + "Volume":{ + "shape":"DiskImageVolumeDescription", + "locationName":"volume" + }, + "Status":{ + "shape":"String", + "locationName":"status" + }, + "StatusMessage":{ + "shape":"String", + "locationName":"statusMessage" + }, + "Description":{ + "shape":"String", + "locationName":"description" + } + } + }, + "ImportInstanceVolumeDetailSet":{ + "type":"list", + "member":{ + "shape":"ImportInstanceVolumeDetailItem", + "locationName":"item" + } + }, + "ImportKeyPairRequest":{ + "type":"structure", + "required":[ + "KeyName", + "PublicKeyMaterial" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "KeyName":{ + "shape":"String", + "locationName":"keyName" + }, + "PublicKeyMaterial":{ + "shape":"Blob", + "locationName":"publicKeyMaterial" + } + } + }, + "ImportKeyPairResult":{ + "type":"structure", + "members":{ + "KeyName":{ + "shape":"String", + "locationName":"keyName" + }, + "KeyFingerprint":{ + "shape":"String", + "locationName":"keyFingerprint" + } + } + }, + "ImportSnapshotRequest":{ + "type":"structure", + "members":{ + "DryRun":{"shape":"Boolean"}, + "Description":{"shape":"String"}, + "DiskContainer":{"shape":"SnapshotDiskContainer"}, + "ClientData":{"shape":"ClientData"}, + "ClientToken":{"shape":"String"}, + "RoleName":{"shape":"String"} + } + }, + "ImportSnapshotResult":{ + "type":"structure", + "members":{ + "ImportTaskId":{ + "shape":"String", + "locationName":"importTaskId" + }, + "SnapshotTaskDetail":{ + "shape":"SnapshotTaskDetail", + "locationName":"snapshotTaskDetail" + }, + "Description":{ + "shape":"String", + "locationName":"description" + } + } + }, + "ImportSnapshotTask":{ + "type":"structure", + "members":{ + "ImportTaskId":{ + "shape":"String", + "locationName":"importTaskId" + }, + "SnapshotTaskDetail":{ + "shape":"SnapshotTaskDetail", + "locationName":"snapshotTaskDetail" + }, + "Description":{ + "shape":"String", + "locationName":"description" + } + } + }, + "ImportSnapshotTaskList":{ + "type":"list", + "member":{ + "shape":"ImportSnapshotTask", + "locationName":"item" + } + }, + "ImportTaskIdList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"ImportTaskId" + } + }, + "ImportVolumeRequest":{ + "type":"structure", + "required":[ + "AvailabilityZone", + "Image", + "Volume" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "AvailabilityZone":{ + "shape":"String", + "locationName":"availabilityZone" + }, + "Image":{ + "shape":"DiskImageDetail", + "locationName":"image" + }, + "Description":{ + "shape":"String", + "locationName":"description" + }, + "Volume":{ + "shape":"VolumeDetail", + "locationName":"volume" + } + } + }, + "ImportVolumeResult":{ + "type":"structure", + "members":{ + "ConversionTask":{ + "shape":"ConversionTask", + "locationName":"conversionTask" + } + } + }, + "ImportVolumeTaskDetails":{ + "type":"structure", + "required":[ + "BytesConverted", + "AvailabilityZone", + "Image", + "Volume" + ], + "members":{ + "BytesConverted":{ + "shape":"Long", + "locationName":"bytesConverted" + }, + "AvailabilityZone":{ + "shape":"String", + "locationName":"availabilityZone" + }, + "Description":{ + "shape":"String", + "locationName":"description" + }, + "Image":{ + "shape":"DiskImageDescription", + "locationName":"image" + }, + "Volume":{ + "shape":"DiskImageVolumeDescription", + "locationName":"volume" + } + } + }, + "Instance":{ + "type":"structure", + "members":{ + "InstanceId":{ + "shape":"String", + "locationName":"instanceId" + }, + "ImageId":{ + "shape":"String", + "locationName":"imageId" + }, + "State":{ + "shape":"InstanceState", + "locationName":"instanceState" + }, + "PrivateDnsName":{ + "shape":"String", + "locationName":"privateDnsName" + }, + "PublicDnsName":{ + "shape":"String", + "locationName":"dnsName" + }, + "StateTransitionReason":{ + "shape":"String", + "locationName":"reason" + }, + "KeyName":{ + "shape":"String", + "locationName":"keyName" + }, + "AmiLaunchIndex":{ + "shape":"Integer", + "locationName":"amiLaunchIndex" + }, + "ProductCodes":{ + "shape":"ProductCodeList", + "locationName":"productCodes" + }, + "InstanceType":{ + "shape":"InstanceType", + "locationName":"instanceType" + }, + "LaunchTime":{ + "shape":"DateTime", + "locationName":"launchTime" + }, + "Placement":{ + "shape":"Placement", + "locationName":"placement" + }, + "KernelId":{ + "shape":"String", + "locationName":"kernelId" + }, + "RamdiskId":{ + "shape":"String", + "locationName":"ramdiskId" + }, + "Platform":{ + "shape":"PlatformValues", + "locationName":"platform" + }, + "Monitoring":{ + "shape":"Monitoring", + "locationName":"monitoring" + }, + "SubnetId":{ + "shape":"String", + "locationName":"subnetId" + }, + "VpcId":{ + "shape":"String", + "locationName":"vpcId" + }, + "PrivateIpAddress":{ + "shape":"String", + "locationName":"privateIpAddress" + }, + "PublicIpAddress":{ + "shape":"String", + "locationName":"ipAddress" + }, + "StateReason":{ + "shape":"StateReason", + "locationName":"stateReason" + }, + "Architecture":{ + "shape":"ArchitectureValues", + "locationName":"architecture" + }, + "RootDeviceType":{ + "shape":"DeviceType", + "locationName":"rootDeviceType" + }, + "RootDeviceName":{ + "shape":"String", + "locationName":"rootDeviceName" + }, + "BlockDeviceMappings":{ + "shape":"InstanceBlockDeviceMappingList", + "locationName":"blockDeviceMapping" + }, + "VirtualizationType":{ + "shape":"VirtualizationType", + "locationName":"virtualizationType" + }, + "InstanceLifecycle":{ + "shape":"InstanceLifecycleType", + "locationName":"instanceLifecycle" + }, + "SpotInstanceRequestId":{ + "shape":"String", + "locationName":"spotInstanceRequestId" + }, + "ClientToken":{ + "shape":"String", + "locationName":"clientToken" + }, + "Tags":{ + "shape":"TagList", + "locationName":"tagSet" + }, + "SecurityGroups":{ + "shape":"GroupIdentifierList", + "locationName":"groupSet" + }, + "SourceDestCheck":{ + "shape":"Boolean", + "locationName":"sourceDestCheck" + }, + "Hypervisor":{ + "shape":"HypervisorType", + "locationName":"hypervisor" + }, + "NetworkInterfaces":{ + "shape":"InstanceNetworkInterfaceList", + "locationName":"networkInterfaceSet" + }, + "IamInstanceProfile":{ + "shape":"IamInstanceProfile", + "locationName":"iamInstanceProfile" + }, + "EbsOptimized":{ + "shape":"Boolean", + "locationName":"ebsOptimized" + }, + "SriovNetSupport":{ + "shape":"String", + "locationName":"sriovNetSupport" + } + } + }, + "InstanceAttribute":{ + "type":"structure", + "members":{ + "InstanceId":{ + "shape":"String", + "locationName":"instanceId" + }, + "InstanceType":{ + "shape":"AttributeValue", + "locationName":"instanceType" + }, + "KernelId":{ + "shape":"AttributeValue", + "locationName":"kernel" + }, + "RamdiskId":{ + "shape":"AttributeValue", + "locationName":"ramdisk" + }, + "UserData":{ + "shape":"AttributeValue", + "locationName":"userData" + }, + "DisableApiTermination":{ + "shape":"AttributeBooleanValue", + "locationName":"disableApiTermination" + }, + "InstanceInitiatedShutdownBehavior":{ + "shape":"AttributeValue", + "locationName":"instanceInitiatedShutdownBehavior" + }, + "RootDeviceName":{ + "shape":"AttributeValue", + "locationName":"rootDeviceName" + }, + "BlockDeviceMappings":{ + "shape":"InstanceBlockDeviceMappingList", + "locationName":"blockDeviceMapping" + }, + "ProductCodes":{ + "shape":"ProductCodeList", + "locationName":"productCodes" + }, + "EbsOptimized":{ + "shape":"AttributeBooleanValue", + "locationName":"ebsOptimized" + }, + "SriovNetSupport":{ + "shape":"AttributeValue", + "locationName":"sriovNetSupport" + }, + "SourceDestCheck":{ + "shape":"AttributeBooleanValue", + "locationName":"sourceDestCheck" + }, + "Groups":{ + "shape":"GroupIdentifierList", + "locationName":"groupSet" + } + } + }, + "InstanceAttributeName":{ + "type":"string", + "enum":[ + "instanceType", + "kernel", + "ramdisk", + "userData", + "disableApiTermination", + "instanceInitiatedShutdownBehavior", + "rootDeviceName", + "blockDeviceMapping", + "productCodes", + "sourceDestCheck", + "groupSet", + "ebsOptimized", + "sriovNetSupport" + ] + }, + "InstanceBlockDeviceMapping":{ + "type":"structure", + "members":{ + "DeviceName":{ + "shape":"String", + "locationName":"deviceName" + }, + "Ebs":{ + "shape":"EbsInstanceBlockDevice", + "locationName":"ebs" + } + } + }, + "InstanceBlockDeviceMappingList":{ + "type":"list", + "member":{ + "shape":"InstanceBlockDeviceMapping", + "locationName":"item" + } + }, + "InstanceBlockDeviceMappingSpecification":{ + "type":"structure", + "members":{ + "DeviceName":{ + "shape":"String", + "locationName":"deviceName" + }, + "Ebs":{ + "shape":"EbsInstanceBlockDeviceSpecification", + "locationName":"ebs" + }, + "VirtualName":{ + "shape":"String", + "locationName":"virtualName" + }, + "NoDevice":{ + "shape":"String", + "locationName":"noDevice" + } + } + }, + "InstanceBlockDeviceMappingSpecificationList":{ + "type":"list", + "member":{ + "shape":"InstanceBlockDeviceMappingSpecification", + "locationName":"item" + } + }, + "InstanceCapacity":{ + "type":"structure", + "members":{ + "InstanceType":{ + "shape":"String", + "locationName":"instanceType" + }, + "AvailableCapacity":{ + "shape":"Integer", + "locationName":"availableCapacity" + }, + "TotalCapacity":{ + "shape":"Integer", + "locationName":"totalCapacity" + } + } + }, + "InstanceCount":{ + "type":"structure", + "members":{ + "State":{ + "shape":"ListingState", + "locationName":"state" + }, + "InstanceCount":{ + "shape":"Integer", + "locationName":"instanceCount" + } + } + }, + "InstanceCountList":{ + "type":"list", + "member":{ + "shape":"InstanceCount", + "locationName":"item" + } + }, + "InstanceExportDetails":{ + "type":"structure", + "members":{ + "InstanceId":{ + "shape":"String", + "locationName":"instanceId" + }, + "TargetEnvironment":{ + "shape":"ExportEnvironment", + "locationName":"targetEnvironment" + } + } + }, + "InstanceIdSet":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"item" + } + }, + "InstanceIdStringList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"InstanceId" + } + }, + "InstanceLifecycleType":{ + "type":"string", + "enum":[ + "spot", + "scheduled" + ] + }, + "InstanceList":{ + "type":"list", + "member":{ + "shape":"Instance", + "locationName":"item" + } + }, + "InstanceMonitoring":{ + "type":"structure", + "members":{ + "InstanceId":{ + "shape":"String", + "locationName":"instanceId" + }, + "Monitoring":{ + "shape":"Monitoring", + "locationName":"monitoring" + } + } + }, + "InstanceMonitoringList":{ + "type":"list", + "member":{ + "shape":"InstanceMonitoring", + "locationName":"item" + } + }, + "InstanceNetworkInterface":{ + "type":"structure", + "members":{ + "NetworkInterfaceId":{ + "shape":"String", + "locationName":"networkInterfaceId" + }, + "SubnetId":{ + "shape":"String", + "locationName":"subnetId" + }, + "VpcId":{ + "shape":"String", + "locationName":"vpcId" + }, + "Description":{ + "shape":"String", + "locationName":"description" + }, + "OwnerId":{ + "shape":"String", + "locationName":"ownerId" + }, + "Status":{ + "shape":"NetworkInterfaceStatus", + "locationName":"status" + }, + "MacAddress":{ + "shape":"String", + "locationName":"macAddress" + }, + "PrivateIpAddress":{ + "shape":"String", + "locationName":"privateIpAddress" + }, + "PrivateDnsName":{ + "shape":"String", + "locationName":"privateDnsName" + }, + "SourceDestCheck":{ + "shape":"Boolean", + "locationName":"sourceDestCheck" + }, + "Groups":{ + "shape":"GroupIdentifierList", + "locationName":"groupSet" + }, + "Attachment":{ + "shape":"InstanceNetworkInterfaceAttachment", + "locationName":"attachment" + }, + "Association":{ + "shape":"InstanceNetworkInterfaceAssociation", + "locationName":"association" + }, + "PrivateIpAddresses":{ + "shape":"InstancePrivateIpAddressList", + "locationName":"privateIpAddressesSet" + } + } + }, + "InstanceNetworkInterfaceAssociation":{ + "type":"structure", + "members":{ + "PublicIp":{ + "shape":"String", + "locationName":"publicIp" + }, + "PublicDnsName":{ + "shape":"String", + "locationName":"publicDnsName" + }, + "IpOwnerId":{ + "shape":"String", + "locationName":"ipOwnerId" + } + } + }, + "InstanceNetworkInterfaceAttachment":{ + "type":"structure", + "members":{ + "AttachmentId":{ + "shape":"String", + "locationName":"attachmentId" + }, + "DeviceIndex":{ + "shape":"Integer", + "locationName":"deviceIndex" + }, + "Status":{ + "shape":"AttachmentStatus", + "locationName":"status" + }, + "AttachTime":{ + "shape":"DateTime", + "locationName":"attachTime" + }, + "DeleteOnTermination":{ + "shape":"Boolean", + "locationName":"deleteOnTermination" + } + } + }, + "InstanceNetworkInterfaceList":{ + "type":"list", + "member":{ + "shape":"InstanceNetworkInterface", + "locationName":"item" + } + }, + "InstanceNetworkInterfaceSpecification":{ + "type":"structure", + "members":{ + "NetworkInterfaceId":{ + "shape":"String", + "locationName":"networkInterfaceId" + }, + "DeviceIndex":{ + "shape":"Integer", + "locationName":"deviceIndex" + }, + "SubnetId":{ + "shape":"String", + "locationName":"subnetId" + }, + "Description":{ + "shape":"String", + "locationName":"description" + }, + "PrivateIpAddress":{ + "shape":"String", + "locationName":"privateIpAddress" + }, + "Groups":{ + "shape":"SecurityGroupIdStringList", + "locationName":"SecurityGroupId" + }, + "DeleteOnTermination":{ + "shape":"Boolean", + "locationName":"deleteOnTermination" + }, + "PrivateIpAddresses":{ + "shape":"PrivateIpAddressSpecificationList", + "locationName":"privateIpAddressesSet", + "queryName":"PrivateIpAddresses" + }, + "SecondaryPrivateIpAddressCount":{ + "shape":"Integer", + "locationName":"secondaryPrivateIpAddressCount" + }, + "AssociatePublicIpAddress":{ + "shape":"Boolean", + "locationName":"associatePublicIpAddress" + } + } + }, + "InstanceNetworkInterfaceSpecificationList":{ + "type":"list", + "member":{ + "shape":"InstanceNetworkInterfaceSpecification", + "locationName":"item" + } + }, + "InstancePrivateIpAddress":{ + "type":"structure", + "members":{ + "PrivateIpAddress":{ + "shape":"String", + "locationName":"privateIpAddress" + }, + "PrivateDnsName":{ + "shape":"String", + "locationName":"privateDnsName" + }, + "Primary":{ + "shape":"Boolean", + "locationName":"primary" + }, + "Association":{ + "shape":"InstanceNetworkInterfaceAssociation", + "locationName":"association" + } + } + }, + "InstancePrivateIpAddressList":{ + "type":"list", + "member":{ + "shape":"InstancePrivateIpAddress", + "locationName":"item" + } + }, + "InstanceState":{ + "type":"structure", + "members":{ + "Code":{ + "shape":"Integer", + "locationName":"code" + }, + "Name":{ + "shape":"InstanceStateName", + "locationName":"name" + } + } + }, + "InstanceStateChange":{ + "type":"structure", + "members":{ + "InstanceId":{ + "shape":"String", + "locationName":"instanceId" + }, + "CurrentState":{ + "shape":"InstanceState", + "locationName":"currentState" + }, + "PreviousState":{ + "shape":"InstanceState", + "locationName":"previousState" + } + } + }, + "InstanceStateChangeList":{ + "type":"list", + "member":{ + "shape":"InstanceStateChange", + "locationName":"item" + } + }, + "InstanceStateName":{ + "type":"string", + "enum":[ + "pending", + "running", + "shutting-down", + "terminated", + "stopping", + "stopped" + ] + }, + "InstanceStatus":{ + "type":"structure", + "members":{ + "InstanceId":{ + "shape":"String", + "locationName":"instanceId" + }, + "AvailabilityZone":{ + "shape":"String", + "locationName":"availabilityZone" + }, + "Events":{ + "shape":"InstanceStatusEventList", + "locationName":"eventsSet" + }, + "InstanceState":{ + "shape":"InstanceState", + "locationName":"instanceState" + }, + "SystemStatus":{ + "shape":"InstanceStatusSummary", + "locationName":"systemStatus" + }, + "InstanceStatus":{ + "shape":"InstanceStatusSummary", + "locationName":"instanceStatus" + } + } + }, + "InstanceStatusDetails":{ + "type":"structure", + "members":{ + "Name":{ + "shape":"StatusName", + "locationName":"name" + }, + "Status":{ + "shape":"StatusType", + "locationName":"status" + }, + "ImpairedSince":{ + "shape":"DateTime", + "locationName":"impairedSince" + } + } + }, + "InstanceStatusDetailsList":{ + "type":"list", + "member":{ + "shape":"InstanceStatusDetails", + "locationName":"item" + } + }, + "InstanceStatusEvent":{ + "type":"structure", + "members":{ + "Code":{ + "shape":"EventCode", + "locationName":"code" + }, + "Description":{ + "shape":"String", + "locationName":"description" + }, + "NotBefore":{ + "shape":"DateTime", + "locationName":"notBefore" + }, + "NotAfter":{ + "shape":"DateTime", + "locationName":"notAfter" + } + } + }, + "InstanceStatusEventList":{ + "type":"list", + "member":{ + "shape":"InstanceStatusEvent", + "locationName":"item" + } + }, + "InstanceStatusList":{ + "type":"list", + "member":{ + "shape":"InstanceStatus", + "locationName":"item" + } + }, + "InstanceStatusSummary":{ + "type":"structure", + "members":{ + "Status":{ + "shape":"SummaryStatus", + "locationName":"status" + }, + "Details":{ + "shape":"InstanceStatusDetailsList", + "locationName":"details" + } + } + }, + "InstanceType":{ + "type":"string", + "enum":[ + "t1.micro", + "m1.small", + "m1.medium", + "m1.large", + "m1.xlarge", + "m3.medium", + "m3.large", + "m3.xlarge", + "m3.2xlarge", + "m4.large", + "m4.xlarge", + "m4.2xlarge", + "m4.4xlarge", + "m4.10xlarge", + "t2.nano", + "t2.micro", + "t2.small", + "t2.medium", + "t2.large", + "m2.xlarge", + "m2.2xlarge", + "m2.4xlarge", + "cr1.8xlarge", + "x1.4xlarge", + "x1.8xlarge", + "x1.16xlarge", + "x1.32xlarge", + "i2.xlarge", + "i2.2xlarge", + "i2.4xlarge", + "i2.8xlarge", + "hi1.4xlarge", + "hs1.8xlarge", + "c1.medium", + "c1.xlarge", + "c3.large", + "c3.xlarge", + "c3.2xlarge", + "c3.4xlarge", + "c3.8xlarge", + "c4.large", + "c4.xlarge", + "c4.2xlarge", + "c4.4xlarge", + "c4.8xlarge", + "cc1.4xlarge", + "cc2.8xlarge", + "g2.2xlarge", + "g2.8xlarge", + "cg1.4xlarge", + "r3.large", + "r3.xlarge", + "r3.2xlarge", + "r3.4xlarge", + "r3.8xlarge", + "d2.xlarge", + "d2.2xlarge", + "d2.4xlarge", + "d2.8xlarge" + ] + }, + "InstanceTypeList":{ + "type":"list", + "member":{"shape":"InstanceType"} + }, + "Integer":{"type":"integer"}, + "InternetGateway":{ + "type":"structure", + "members":{ + "InternetGatewayId":{ + "shape":"String", + "locationName":"internetGatewayId" + }, + "Attachments":{ + "shape":"InternetGatewayAttachmentList", + "locationName":"attachmentSet" + }, + "Tags":{ + "shape":"TagList", + "locationName":"tagSet" + } + } + }, + "InternetGatewayAttachment":{ + "type":"structure", + "members":{ + "VpcId":{ + "shape":"String", + "locationName":"vpcId" + }, + "State":{ + "shape":"AttachmentStatus", + "locationName":"state" + } + } + }, + "InternetGatewayAttachmentList":{ + "type":"list", + "member":{ + "shape":"InternetGatewayAttachment", + "locationName":"item" + } + }, + "InternetGatewayList":{ + "type":"list", + "member":{ + "shape":"InternetGateway", + "locationName":"item" + } + }, + "IpPermission":{ + "type":"structure", + "members":{ + "IpProtocol":{ + "shape":"String", + "locationName":"ipProtocol" + }, + "FromPort":{ + "shape":"Integer", + "locationName":"fromPort" + }, + "ToPort":{ + "shape":"Integer", + "locationName":"toPort" + }, + "UserIdGroupPairs":{ + "shape":"UserIdGroupPairList", + "locationName":"groups" + }, + "IpRanges":{ + "shape":"IpRangeList", + "locationName":"ipRanges" + }, + "PrefixListIds":{ + "shape":"PrefixListIdList", + "locationName":"prefixListIds" + } + } + }, + "IpPermissionList":{ + "type":"list", + "member":{ + "shape":"IpPermission", + "locationName":"item" + } + }, + "IpRange":{ + "type":"structure", + "members":{ + "CidrIp":{ + "shape":"String", + "locationName":"cidrIp" + } + } + }, + "IpRangeList":{ + "type":"list", + "member":{ + "shape":"IpRange", + "locationName":"item" + } + }, + "IpRanges":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"item" + } + }, + "KeyNameStringList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"KeyName" + } + }, + "KeyPair":{ + "type":"structure", + "members":{ + "KeyName":{ + "shape":"String", + "locationName":"keyName" + }, + "KeyFingerprint":{ + "shape":"String", + "locationName":"keyFingerprint" + }, + "KeyMaterial":{ + "shape":"String", + "locationName":"keyMaterial" + } + } + }, + "KeyPairInfo":{ + "type":"structure", + "members":{ + "KeyName":{ + "shape":"String", + "locationName":"keyName" + }, + "KeyFingerprint":{ + "shape":"String", + "locationName":"keyFingerprint" + } + } + }, + "KeyPairList":{ + "type":"list", + "member":{ + "shape":"KeyPairInfo", + "locationName":"item" + } + }, + "LaunchPermission":{ + "type":"structure", + "members":{ + "UserId":{ + "shape":"String", + "locationName":"userId" + }, + "Group":{ + "shape":"PermissionGroup", + "locationName":"group" + } + } + }, + "LaunchPermissionList":{ + "type":"list", + "member":{ + "shape":"LaunchPermission", + "locationName":"item" + } + }, + "LaunchPermissionModifications":{ + "type":"structure", + "members":{ + "Add":{"shape":"LaunchPermissionList"}, + "Remove":{"shape":"LaunchPermissionList"} + } + }, + "LaunchSpecification":{ + "type":"structure", + "members":{ + "ImageId":{ + "shape":"String", + "locationName":"imageId" + }, + "KeyName":{ + "shape":"String", + "locationName":"keyName" + }, + "SecurityGroups":{ + "shape":"GroupIdentifierList", + "locationName":"groupSet" + }, + "UserData":{ + "shape":"String", + "locationName":"userData" + }, + "AddressingType":{ + "shape":"String", + "locationName":"addressingType" + }, + "InstanceType":{ + "shape":"InstanceType", + "locationName":"instanceType" + }, + "Placement":{ + "shape":"SpotPlacement", + "locationName":"placement" + }, + "KernelId":{ + "shape":"String", + "locationName":"kernelId" + }, + "RamdiskId":{ + "shape":"String", + "locationName":"ramdiskId" + }, + "BlockDeviceMappings":{ + "shape":"BlockDeviceMappingList", + "locationName":"blockDeviceMapping" + }, + "SubnetId":{ + "shape":"String", + "locationName":"subnetId" + }, + "NetworkInterfaces":{ + "shape":"InstanceNetworkInterfaceSpecificationList", + "locationName":"networkInterfaceSet" + }, + "IamInstanceProfile":{ + "shape":"IamInstanceProfileSpecification", + "locationName":"iamInstanceProfile" + }, + "EbsOptimized":{ + "shape":"Boolean", + "locationName":"ebsOptimized" + }, + "Monitoring":{ + "shape":"RunInstancesMonitoringEnabled", + "locationName":"monitoring" + } + } + }, + "LaunchSpecsList":{ + "type":"list", + "member":{ + "shape":"SpotFleetLaunchSpecification", + "locationName":"item" + }, + "min":1 + }, + "ListingState":{ + "type":"string", + "enum":[ + "available", + "sold", + "cancelled", + "pending" + ] + }, + "ListingStatus":{ + "type":"string", + "enum":[ + "active", + "pending", + "cancelled", + "closed" + ] + }, + "Long":{"type":"long"}, + "MaxResults":{ + "type":"integer", + "max":255, + "min":5 + }, + "ModifyHostsRequest":{ + "type":"structure", + "required":[ + "HostIds", + "AutoPlacement" + ], + "members":{ + "HostIds":{ + "shape":"RequestHostIdList", + "locationName":"hostId" + }, + "AutoPlacement":{ + "shape":"AutoPlacement", + "locationName":"autoPlacement" + } + } + }, + "ModifyHostsResult":{ + "type":"structure", + "members":{ + "Successful":{ + "shape":"ResponseHostIdList", + "locationName":"successful" + }, + "Unsuccessful":{ + "shape":"UnsuccessfulItemList", + "locationName":"unsuccessful" + } + } + }, + "ModifyIdFormatRequest":{ + "type":"structure", + "required":[ + "Resource", + "UseLongIds" + ], + "members":{ + "Resource":{"shape":"String"}, + "UseLongIds":{"shape":"Boolean"} + } + }, + "ModifyImageAttributeRequest":{ + "type":"structure", + "required":["ImageId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "ImageId":{"shape":"String"}, + "Attribute":{"shape":"String"}, + "OperationType":{"shape":"OperationType"}, + "UserIds":{ + "shape":"UserIdStringList", + "locationName":"UserId" + }, + "UserGroups":{ + "shape":"UserGroupStringList", + "locationName":"UserGroup" + }, + "ProductCodes":{ + "shape":"ProductCodeStringList", + "locationName":"ProductCode" + }, + "Value":{"shape":"String"}, + "LaunchPermission":{"shape":"LaunchPermissionModifications"}, + "Description":{"shape":"AttributeValue"} + } + }, + "ModifyInstanceAttributeRequest":{ + "type":"structure", + "required":["InstanceId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "InstanceId":{ + "shape":"String", + "locationName":"instanceId" + }, + "Attribute":{ + "shape":"InstanceAttributeName", + "locationName":"attribute" + }, + "Value":{ + "shape":"String", + "locationName":"value" + }, + "BlockDeviceMappings":{ + "shape":"InstanceBlockDeviceMappingSpecificationList", + "locationName":"blockDeviceMapping" + }, + "SourceDestCheck":{"shape":"AttributeBooleanValue"}, + "DisableApiTermination":{ + "shape":"AttributeBooleanValue", + "locationName":"disableApiTermination" + }, + "InstanceType":{ + "shape":"AttributeValue", + "locationName":"instanceType" + }, + "Kernel":{ + "shape":"AttributeValue", + "locationName":"kernel" + }, + "Ramdisk":{ + "shape":"AttributeValue", + "locationName":"ramdisk" + }, + "UserData":{ + "shape":"BlobAttributeValue", + "locationName":"userData" + }, + "InstanceInitiatedShutdownBehavior":{ + "shape":"AttributeValue", + "locationName":"instanceInitiatedShutdownBehavior" + }, + "Groups":{ + "shape":"GroupIdStringList", + "locationName":"GroupId" + }, + "EbsOptimized":{ + "shape":"AttributeBooleanValue", + "locationName":"ebsOptimized" + }, + "SriovNetSupport":{ + "shape":"AttributeValue", + "locationName":"sriovNetSupport" + } + } + }, + "ModifyInstancePlacementRequest":{ + "type":"structure", + "required":["InstanceId"], + "members":{ + "InstanceId":{ + "shape":"String", + "locationName":"instanceId" + }, + "Tenancy":{ + "shape":"HostTenancy", + "locationName":"tenancy" + }, + "Affinity":{ + "shape":"Affinity", + "locationName":"affinity" + }, + "HostId":{ + "shape":"String", + "locationName":"hostId" + } + } + }, + "ModifyInstancePlacementResult":{ + "type":"structure", + "members":{ + "Return":{ + "shape":"Boolean", + "locationName":"return" + } + } + }, + "ModifyNetworkInterfaceAttributeRequest":{ + "type":"structure", + "required":["NetworkInterfaceId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "NetworkInterfaceId":{ + "shape":"String", + "locationName":"networkInterfaceId" + }, + "Description":{ + "shape":"AttributeValue", + "locationName":"description" + }, + "SourceDestCheck":{ + "shape":"AttributeBooleanValue", + "locationName":"sourceDestCheck" + }, + "Groups":{ + "shape":"SecurityGroupIdStringList", + "locationName":"SecurityGroupId" + }, + "Attachment":{ + "shape":"NetworkInterfaceAttachmentChanges", + "locationName":"attachment" + } + } + }, + "ModifyReservedInstancesRequest":{ + "type":"structure", + "required":[ + "ReservedInstancesIds", + "TargetConfigurations" + ], + "members":{ + "ClientToken":{ + "shape":"String", + "locationName":"clientToken" + }, + "ReservedInstancesIds":{ + "shape":"ReservedInstancesIdStringList", + "locationName":"ReservedInstancesId" + }, + "TargetConfigurations":{ + "shape":"ReservedInstancesConfigurationList", + "locationName":"ReservedInstancesConfigurationSetItemType" + } + } + }, + "ModifyReservedInstancesResult":{ + "type":"structure", + "members":{ + "ReservedInstancesModificationId":{ + "shape":"String", + "locationName":"reservedInstancesModificationId" + } + } + }, + "ModifySnapshotAttributeRequest":{ + "type":"structure", + "required":["SnapshotId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "SnapshotId":{"shape":"String"}, + "Attribute":{"shape":"SnapshotAttributeName"}, + "OperationType":{"shape":"OperationType"}, + "UserIds":{ + "shape":"UserIdStringList", + "locationName":"UserId" + }, + "GroupNames":{ + "shape":"GroupNameStringList", + "locationName":"UserGroup" + }, + "CreateVolumePermission":{"shape":"CreateVolumePermissionModifications"} + } + }, + "ModifySpotFleetRequestRequest":{ + "type":"structure", + "required":["SpotFleetRequestId"], + "members":{ + "SpotFleetRequestId":{ + "shape":"String", + "locationName":"spotFleetRequestId" + }, + "TargetCapacity":{ + "shape":"Integer", + "locationName":"targetCapacity" + }, + "ExcessCapacityTerminationPolicy":{ + "shape":"ExcessCapacityTerminationPolicy", + "locationName":"excessCapacityTerminationPolicy" + } + } + }, + "ModifySpotFleetRequestResponse":{ + "type":"structure", + "members":{ + "Return":{ + "shape":"Boolean", + "locationName":"return" + } + } + }, + "ModifySubnetAttributeRequest":{ + "type":"structure", + "required":["SubnetId"], + "members":{ + "SubnetId":{ + "shape":"String", + "locationName":"subnetId" + }, + "MapPublicIpOnLaunch":{"shape":"AttributeBooleanValue"} + } + }, + "ModifyVolumeAttributeRequest":{ + "type":"structure", + "required":["VolumeId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "VolumeId":{"shape":"String"}, + "AutoEnableIO":{"shape":"AttributeBooleanValue"} + } + }, + "ModifyVpcAttributeRequest":{ + "type":"structure", + "required":["VpcId"], + "members":{ + "VpcId":{ + "shape":"String", + "locationName":"vpcId" + }, + "EnableDnsSupport":{"shape":"AttributeBooleanValue"}, + "EnableDnsHostnames":{"shape":"AttributeBooleanValue"} + } + }, + "ModifyVpcEndpointRequest":{ + "type":"structure", + "required":["VpcEndpointId"], + "members":{ + "DryRun":{"shape":"Boolean"}, + "VpcEndpointId":{"shape":"String"}, + "ResetPolicy":{"shape":"Boolean"}, + "PolicyDocument":{"shape":"String"}, + "AddRouteTableIds":{ + "shape":"ValueStringList", + "locationName":"AddRouteTableId" + }, + "RemoveRouteTableIds":{ + "shape":"ValueStringList", + "locationName":"RemoveRouteTableId" + } + } + }, + "ModifyVpcEndpointResult":{ + "type":"structure", + "members":{ + "Return":{ + "shape":"Boolean", + "locationName":"return" + } + } + }, + "ModifyVpcPeeringConnectionOptionsRequest":{ + "type":"structure", + "required":["VpcPeeringConnectionId"], + "members":{ + "DryRun":{"shape":"Boolean"}, + "VpcPeeringConnectionId":{"shape":"String"}, + "RequesterPeeringConnectionOptions":{"shape":"PeeringConnectionOptionsRequest"}, + "AccepterPeeringConnectionOptions":{"shape":"PeeringConnectionOptionsRequest"} + } + }, + "ModifyVpcPeeringConnectionOptionsResult":{ + "type":"structure", + "members":{ + "RequesterPeeringConnectionOptions":{ + "shape":"PeeringConnectionOptions", + "locationName":"requesterPeeringConnectionOptions" + }, + "AccepterPeeringConnectionOptions":{ + "shape":"PeeringConnectionOptions", + "locationName":"accepterPeeringConnectionOptions" + } + } + }, + "MonitorInstancesRequest":{ + "type":"structure", + "required":["InstanceIds"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "InstanceIds":{ + "shape":"InstanceIdStringList", + "locationName":"InstanceId" + } + } + }, + "MonitorInstancesResult":{ + "type":"structure", + "members":{ + "InstanceMonitorings":{ + "shape":"InstanceMonitoringList", + "locationName":"instancesSet" + } + } + }, + "Monitoring":{ + "type":"structure", + "members":{ + "State":{ + "shape":"MonitoringState", + "locationName":"state" + } + } + }, + "MonitoringState":{ + "type":"string", + "enum":[ + "disabled", + "disabling", + "enabled", + "pending" + ] + }, + "MoveAddressToVpcRequest":{ + "type":"structure", + "required":["PublicIp"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "PublicIp":{ + "shape":"String", + "locationName":"publicIp" + } + } + }, + "MoveAddressToVpcResult":{ + "type":"structure", + "members":{ + "AllocationId":{ + "shape":"String", + "locationName":"allocationId" + }, + "Status":{ + "shape":"Status", + "locationName":"status" + } + } + }, + "MoveStatus":{ + "type":"string", + "enum":[ + "movingToVpc", + "restoringToClassic" + ] + }, + "MovingAddressStatus":{ + "type":"structure", + "members":{ + "PublicIp":{ + "shape":"String", + "locationName":"publicIp" + }, + "MoveStatus":{ + "shape":"MoveStatus", + "locationName":"moveStatus" + } + } + }, + "MovingAddressStatusSet":{ + "type":"list", + "member":{ + "shape":"MovingAddressStatus", + "locationName":"item" + } + }, + "NatGateway":{ + "type":"structure", + "members":{ + "VpcId":{ + "shape":"String", + "locationName":"vpcId" + }, + "SubnetId":{ + "shape":"String", + "locationName":"subnetId" + }, + "NatGatewayId":{ + "shape":"String", + "locationName":"natGatewayId" + }, + "CreateTime":{ + "shape":"DateTime", + "locationName":"createTime" + }, + "DeleteTime":{ + "shape":"DateTime", + "locationName":"deleteTime" + }, + "NatGatewayAddresses":{ + "shape":"NatGatewayAddressList", + "locationName":"natGatewayAddressSet" + }, + "State":{ + "shape":"NatGatewayState", + "locationName":"state" + }, + "FailureCode":{ + "shape":"String", + "locationName":"failureCode" + }, + "FailureMessage":{ + "shape":"String", + "locationName":"failureMessage" + }, + "ProvisionedBandwidth":{ + "shape":"ProvisionedBandwidth", + "locationName":"provisionedBandwidth" + } + } + }, + "NatGatewayAddress":{ + "type":"structure", + "members":{ + "PublicIp":{ + "shape":"String", + "locationName":"publicIp" + }, + "AllocationId":{ + "shape":"String", + "locationName":"allocationId" + }, + "PrivateIp":{ + "shape":"String", + "locationName":"privateIp" + }, + "NetworkInterfaceId":{ + "shape":"String", + "locationName":"networkInterfaceId" + } + } + }, + "NatGatewayAddressList":{ + "type":"list", + "member":{ + "shape":"NatGatewayAddress", + "locationName":"item" + } + }, + "NatGatewayList":{ + "type":"list", + "member":{ + "shape":"NatGateway", + "locationName":"item" + } + }, + "NatGatewayState":{ + "type":"string", + "enum":[ + "pending", + "failed", + "available", + "deleting", + "deleted" + ] + }, + "NetworkAcl":{ + "type":"structure", + "members":{ + "NetworkAclId":{ + "shape":"String", + "locationName":"networkAclId" + }, + "VpcId":{ + "shape":"String", + "locationName":"vpcId" + }, + "IsDefault":{ + "shape":"Boolean", + "locationName":"default" + }, + "Entries":{ + "shape":"NetworkAclEntryList", + "locationName":"entrySet" + }, + "Associations":{ + "shape":"NetworkAclAssociationList", + "locationName":"associationSet" + }, + "Tags":{ + "shape":"TagList", + "locationName":"tagSet" + } + } + }, + "NetworkAclAssociation":{ + "type":"structure", + "members":{ + "NetworkAclAssociationId":{ + "shape":"String", + "locationName":"networkAclAssociationId" + }, + "NetworkAclId":{ + "shape":"String", + "locationName":"networkAclId" + }, + "SubnetId":{ + "shape":"String", + "locationName":"subnetId" + } + } + }, + "NetworkAclAssociationList":{ + "type":"list", + "member":{ + "shape":"NetworkAclAssociation", + "locationName":"item" + } + }, + "NetworkAclEntry":{ + "type":"structure", + "members":{ + "RuleNumber":{ + "shape":"Integer", + "locationName":"ruleNumber" + }, + "Protocol":{ + "shape":"String", + "locationName":"protocol" + }, + "RuleAction":{ + "shape":"RuleAction", + "locationName":"ruleAction" + }, + "Egress":{ + "shape":"Boolean", + "locationName":"egress" + }, + "CidrBlock":{ + "shape":"String", + "locationName":"cidrBlock" + }, + "IcmpTypeCode":{ + "shape":"IcmpTypeCode", + "locationName":"icmpTypeCode" + }, + "PortRange":{ + "shape":"PortRange", + "locationName":"portRange" + } + } + }, + "NetworkAclEntryList":{ + "type":"list", + "member":{ + "shape":"NetworkAclEntry", + "locationName":"item" + } + }, + "NetworkAclList":{ + "type":"list", + "member":{ + "shape":"NetworkAcl", + "locationName":"item" + } + }, + "NetworkInterface":{ + "type":"structure", + "members":{ + "NetworkInterfaceId":{ + "shape":"String", + "locationName":"networkInterfaceId" + }, + "SubnetId":{ + "shape":"String", + "locationName":"subnetId" + }, + "VpcId":{ + "shape":"String", + "locationName":"vpcId" + }, + "AvailabilityZone":{ + "shape":"String", + "locationName":"availabilityZone" + }, + "Description":{ + "shape":"String", + "locationName":"description" + }, + "OwnerId":{ + "shape":"String", + "locationName":"ownerId" + }, + "RequesterId":{ + "shape":"String", + "locationName":"requesterId" + }, + "RequesterManaged":{ + "shape":"Boolean", + "locationName":"requesterManaged" + }, + "Status":{ + "shape":"NetworkInterfaceStatus", + "locationName":"status" + }, + "MacAddress":{ + "shape":"String", + "locationName":"macAddress" + }, + "PrivateIpAddress":{ + "shape":"String", + "locationName":"privateIpAddress" + }, + "PrivateDnsName":{ + "shape":"String", + "locationName":"privateDnsName" + }, + "SourceDestCheck":{ + "shape":"Boolean", + "locationName":"sourceDestCheck" + }, + "Groups":{ + "shape":"GroupIdentifierList", + "locationName":"groupSet" + }, + "Attachment":{ + "shape":"NetworkInterfaceAttachment", + "locationName":"attachment" + }, + "Association":{ + "shape":"NetworkInterfaceAssociation", + "locationName":"association" + }, + "TagSet":{ + "shape":"TagList", + "locationName":"tagSet" + }, + "PrivateIpAddresses":{ + "shape":"NetworkInterfacePrivateIpAddressList", + "locationName":"privateIpAddressesSet" + }, + "InterfaceType":{ + "shape":"NetworkInterfaceType", + "locationName":"interfaceType" + } + } + }, + "NetworkInterfaceAssociation":{ + "type":"structure", + "members":{ + "PublicIp":{ + "shape":"String", + "locationName":"publicIp" + }, + "PublicDnsName":{ + "shape":"String", + "locationName":"publicDnsName" + }, + "IpOwnerId":{ + "shape":"String", + "locationName":"ipOwnerId" + }, + "AllocationId":{ + "shape":"String", + "locationName":"allocationId" + }, + "AssociationId":{ + "shape":"String", + "locationName":"associationId" + } + } + }, + "NetworkInterfaceAttachment":{ + "type":"structure", + "members":{ + "AttachmentId":{ + "shape":"String", + "locationName":"attachmentId" + }, + "InstanceId":{ + "shape":"String", + "locationName":"instanceId" + }, + "InstanceOwnerId":{ + "shape":"String", + "locationName":"instanceOwnerId" + }, + "DeviceIndex":{ + "shape":"Integer", + "locationName":"deviceIndex" + }, + "Status":{ + "shape":"AttachmentStatus", + "locationName":"status" + }, + "AttachTime":{ + "shape":"DateTime", + "locationName":"attachTime" + }, + "DeleteOnTermination":{ + "shape":"Boolean", + "locationName":"deleteOnTermination" + } + } + }, + "NetworkInterfaceAttachmentChanges":{ + "type":"structure", + "members":{ + "AttachmentId":{ + "shape":"String", + "locationName":"attachmentId" + }, + "DeleteOnTermination":{ + "shape":"Boolean", + "locationName":"deleteOnTermination" + } + } + }, + "NetworkInterfaceAttribute":{ + "type":"string", + "enum":[ + "description", + "groupSet", + "sourceDestCheck", + "attachment" + ] + }, + "NetworkInterfaceIdList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"item" + } + }, + "NetworkInterfaceList":{ + "type":"list", + "member":{ + "shape":"NetworkInterface", + "locationName":"item" + } + }, + "NetworkInterfacePrivateIpAddress":{ + "type":"structure", + "members":{ + "PrivateIpAddress":{ + "shape":"String", + "locationName":"privateIpAddress" + }, + "PrivateDnsName":{ + "shape":"String", + "locationName":"privateDnsName" + }, + "Primary":{ + "shape":"Boolean", + "locationName":"primary" + }, + "Association":{ + "shape":"NetworkInterfaceAssociation", + "locationName":"association" + } + } + }, + "NetworkInterfacePrivateIpAddressList":{ + "type":"list", + "member":{ + "shape":"NetworkInterfacePrivateIpAddress", + "locationName":"item" + } + }, + "NetworkInterfaceStatus":{ + "type":"string", + "enum":[ + "available", + "attaching", + "in-use", + "detaching" + ] + }, + "NetworkInterfaceType":{ + "type":"string", + "enum":[ + "interface", + "natGateway" + ] + }, + "NewDhcpConfiguration":{ + "type":"structure", + "members":{ + "Key":{ + "shape":"String", + "locationName":"key" + }, + "Values":{ + "shape":"ValueStringList", + "locationName":"Value" + } + } + }, + "NewDhcpConfigurationList":{ + "type":"list", + "member":{ + "shape":"NewDhcpConfiguration", + "locationName":"item" + } + }, + "NextToken":{ + "type":"string", + "max":1024, + "min":1 + }, + "OccurrenceDayRequestSet":{ + "type":"list", + "member":{ + "shape":"Integer", + "locationName":"OccurenceDay" + } + }, + "OccurrenceDaySet":{ + "type":"list", + "member":{ + "shape":"Integer", + "locationName":"item" + } + }, + "OfferingTypeValues":{ + "type":"string", + "enum":[ + "Heavy Utilization", + "Medium Utilization", + "Light Utilization", + "No Upfront", + "Partial Upfront", + "All Upfront" + ] + }, + "OperationType":{ + "type":"string", + "enum":[ + "add", + "remove" + ] + }, + "OwnerStringList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"Owner" + } + }, + "PeeringConnectionOptions":{ + "type":"structure", + "members":{ + "AllowEgressFromLocalClassicLinkToRemoteVpc":{ + "shape":"Boolean", + "locationName":"allowEgressFromLocalClassicLinkToRemoteVpc" + }, + "AllowEgressFromLocalVpcToRemoteClassicLink":{ + "shape":"Boolean", + "locationName":"allowEgressFromLocalVpcToRemoteClassicLink" + } + } + }, + "PeeringConnectionOptionsRequest":{ + "type":"structure", + "required":[ + "AllowEgressFromLocalClassicLinkToRemoteVpc", + "AllowEgressFromLocalVpcToRemoteClassicLink" + ], + "members":{ + "AllowEgressFromLocalClassicLinkToRemoteVpc":{"shape":"Boolean"}, + "AllowEgressFromLocalVpcToRemoteClassicLink":{"shape":"Boolean"} + } + }, + "PermissionGroup":{ + "type":"string", + "enum":["all"] + }, + "Placement":{ + "type":"structure", + "members":{ + "AvailabilityZone":{ + "shape":"String", + "locationName":"availabilityZone" + }, + "GroupName":{ + "shape":"String", + "locationName":"groupName" + }, + "Tenancy":{ + "shape":"Tenancy", + "locationName":"tenancy" + }, + "HostId":{ + "shape":"String", + "locationName":"hostId" + }, + "Affinity":{ + "shape":"String", + "locationName":"affinity" + } + } + }, + "PlacementGroup":{ + "type":"structure", + "members":{ + "GroupName":{ + "shape":"String", + "locationName":"groupName" + }, + "Strategy":{ + "shape":"PlacementStrategy", + "locationName":"strategy" + }, + "State":{ + "shape":"PlacementGroupState", + "locationName":"state" + } + } + }, + "PlacementGroupList":{ + "type":"list", + "member":{ + "shape":"PlacementGroup", + "locationName":"item" + } + }, + "PlacementGroupState":{ + "type":"string", + "enum":[ + "pending", + "available", + "deleting", + "deleted" + ] + }, + "PlacementGroupStringList":{ + "type":"list", + "member":{"shape":"String"} + }, + "PlacementStrategy":{ + "type":"string", + "enum":["cluster"] + }, + "PlatformValues":{ + "type":"string", + "enum":["Windows"] + }, + "PortRange":{ + "type":"structure", + "members":{ + "From":{ + "shape":"Integer", + "locationName":"from" + }, + "To":{ + "shape":"Integer", + "locationName":"to" + } + } + }, + "PrefixList":{ + "type":"structure", + "members":{ + "PrefixListId":{ + "shape":"String", + "locationName":"prefixListId" + }, + "PrefixListName":{ + "shape":"String", + "locationName":"prefixListName" + }, + "Cidrs":{ + "shape":"ValueStringList", + "locationName":"cidrSet" + } + } + }, + "PrefixListId":{ + "type":"structure", + "members":{ + "PrefixListId":{ + "shape":"String", + "locationName":"prefixListId" + } + } + }, + "PrefixListIdList":{ + "type":"list", + "member":{ + "shape":"PrefixListId", + "locationName":"item" + } + }, + "PrefixListIdSet":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"item" + } + }, + "PrefixListSet":{ + "type":"list", + "member":{ + "shape":"PrefixList", + "locationName":"item" + } + }, + "PriceSchedule":{ + "type":"structure", + "members":{ + "Term":{ + "shape":"Long", + "locationName":"term" + }, + "Price":{ + "shape":"Double", + "locationName":"price" + }, + "CurrencyCode":{ + "shape":"CurrencyCodeValues", + "locationName":"currencyCode" + }, + "Active":{ + "shape":"Boolean", + "locationName":"active" + } + } + }, + "PriceScheduleList":{ + "type":"list", + "member":{ + "shape":"PriceSchedule", + "locationName":"item" + } + }, + "PriceScheduleSpecification":{ + "type":"structure", + "members":{ + "Term":{ + "shape":"Long", + "locationName":"term" + }, + "Price":{ + "shape":"Double", + "locationName":"price" + }, + "CurrencyCode":{ + "shape":"CurrencyCodeValues", + "locationName":"currencyCode" + } + } + }, + "PriceScheduleSpecificationList":{ + "type":"list", + "member":{ + "shape":"PriceScheduleSpecification", + "locationName":"item" + } + }, + "PricingDetail":{ + "type":"structure", + "members":{ + "Price":{ + "shape":"Double", + "locationName":"price" + }, + "Count":{ + "shape":"Integer", + "locationName":"count" + } + } + }, + "PricingDetailsList":{ + "type":"list", + "member":{ + "shape":"PricingDetail", + "locationName":"item" + } + }, + "PrivateIpAddressConfigSet":{ + "type":"list", + "member":{ + "shape":"ScheduledInstancesPrivateIpAddressConfig", + "locationName":"PrivateIpAddressConfigSet" + } + }, + "PrivateIpAddressSpecification":{ + "type":"structure", + "required":["PrivateIpAddress"], + "members":{ + "PrivateIpAddress":{ + "shape":"String", + "locationName":"privateIpAddress" + }, + "Primary":{ + "shape":"Boolean", + "locationName":"primary" + } + } + }, + "PrivateIpAddressSpecificationList":{ + "type":"list", + "member":{ + "shape":"PrivateIpAddressSpecification", + "locationName":"item" + } + }, + "PrivateIpAddressStringList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"PrivateIpAddress" + } + }, + "ProductCode":{ + "type":"structure", + "members":{ + "ProductCodeId":{ + "shape":"String", + "locationName":"productCode" + }, + "ProductCodeType":{ + "shape":"ProductCodeValues", + "locationName":"type" + } + } + }, + "ProductCodeList":{ + "type":"list", + "member":{ + "shape":"ProductCode", + "locationName":"item" + } + }, + "ProductCodeStringList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"ProductCode" + } + }, + "ProductCodeValues":{ + "type":"string", + "enum":[ + "devpay", + "marketplace" + ] + }, + "ProductDescriptionList":{ + "type":"list", + "member":{"shape":"String"} + }, + "PropagatingVgw":{ + "type":"structure", + "members":{ + "GatewayId":{ + "shape":"String", + "locationName":"gatewayId" + } + } + }, + "PropagatingVgwList":{ + "type":"list", + "member":{ + "shape":"PropagatingVgw", + "locationName":"item" + } + }, + "ProvisionedBandwidth":{ + "type":"structure", + "members":{ + "Provisioned":{ + "shape":"String", + "locationName":"provisioned" + }, + "Requested":{ + "shape":"String", + "locationName":"requested" + }, + "RequestTime":{ + "shape":"DateTime", + "locationName":"requestTime" + }, + "ProvisionTime":{ + "shape":"DateTime", + "locationName":"provisionTime" + }, + "Status":{ + "shape":"String", + "locationName":"status" + } + } + }, + "PublicIpStringList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"PublicIp" + } + }, + "PurchaseRequest":{ + "type":"structure", + "required":[ + "PurchaseToken", + "InstanceCount" + ], + "members":{ + "PurchaseToken":{"shape":"String"}, + "InstanceCount":{"shape":"Integer"} + } + }, + "PurchaseRequestSet":{ + "type":"list", + "member":{ + "shape":"PurchaseRequest", + "locationName":"PurchaseRequest" + }, + "min":1 + }, + "PurchaseReservedInstancesOfferingRequest":{ + "type":"structure", + "required":[ + "ReservedInstancesOfferingId", + "InstanceCount" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "ReservedInstancesOfferingId":{"shape":"String"}, + "InstanceCount":{"shape":"Integer"}, + "LimitPrice":{ + "shape":"ReservedInstanceLimitPrice", + "locationName":"limitPrice" + } + } + }, + "PurchaseReservedInstancesOfferingResult":{ + "type":"structure", + "members":{ + "ReservedInstancesId":{ + "shape":"String", + "locationName":"reservedInstancesId" + } + } + }, + "PurchaseScheduledInstancesRequest":{ + "type":"structure", + "required":["PurchaseRequests"], + "members":{ + "DryRun":{"shape":"Boolean"}, + "ClientToken":{ + "shape":"String", + "idempotencyToken":true + }, + "PurchaseRequests":{ + "shape":"PurchaseRequestSet", + "locationName":"PurchaseRequest" + } + } + }, + "PurchaseScheduledInstancesResult":{ + "type":"structure", + "members":{ + "ScheduledInstanceSet":{ + "shape":"PurchasedScheduledInstanceSet", + "locationName":"scheduledInstanceSet" + } + } + }, + "PurchasedScheduledInstanceSet":{ + "type":"list", + "member":{ + "shape":"ScheduledInstance", + "locationName":"item" + } + }, + "RIProductDescription":{ + "type":"string", + "enum":[ + "Linux/UNIX", + "Linux/UNIX (Amazon VPC)", + "Windows", + "Windows (Amazon VPC)" + ] + }, + "ReasonCodesList":{ + "type":"list", + "member":{ + "shape":"ReportInstanceReasonCodes", + "locationName":"item" + } + }, + "RebootInstancesRequest":{ + "type":"structure", + "required":["InstanceIds"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "InstanceIds":{ + "shape":"InstanceIdStringList", + "locationName":"InstanceId" + } + } + }, + "RecurringCharge":{ + "type":"structure", + "members":{ + "Frequency":{ + "shape":"RecurringChargeFrequency", + "locationName":"frequency" + }, + "Amount":{ + "shape":"Double", + "locationName":"amount" + } + } + }, + "RecurringChargeFrequency":{ + "type":"string", + "enum":["Hourly"] + }, + "RecurringChargesList":{ + "type":"list", + "member":{ + "shape":"RecurringCharge", + "locationName":"item" + } + }, + "Region":{ + "type":"structure", + "members":{ + "RegionName":{ + "shape":"String", + "locationName":"regionName" + }, + "Endpoint":{ + "shape":"String", + "locationName":"regionEndpoint" + } + } + }, + "RegionList":{ + "type":"list", + "member":{ + "shape":"Region", + "locationName":"item" + } + }, + "RegionNameStringList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"RegionName" + } + }, + "RegisterImageRequest":{ + "type":"structure", + "required":["Name"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "ImageLocation":{"shape":"String"}, + "Name":{ + "shape":"String", + "locationName":"name" + }, + "Description":{ + "shape":"String", + "locationName":"description" + }, + "Architecture":{ + "shape":"ArchitectureValues", + "locationName":"architecture" + }, + "KernelId":{ + "shape":"String", + "locationName":"kernelId" + }, + "RamdiskId":{ + "shape":"String", + "locationName":"ramdiskId" + }, + "RootDeviceName":{ + "shape":"String", + "locationName":"rootDeviceName" + }, + "BlockDeviceMappings":{ + "shape":"BlockDeviceMappingRequestList", + "locationName":"BlockDeviceMapping" + }, + "VirtualizationType":{ + "shape":"String", + "locationName":"virtualizationType" + }, + "SriovNetSupport":{ + "shape":"String", + "locationName":"sriovNetSupport" + } + } + }, + "RegisterImageResult":{ + "type":"structure", + "members":{ + "ImageId":{ + "shape":"String", + "locationName":"imageId" + } + } + }, + "RejectVpcPeeringConnectionRequest":{ + "type":"structure", + "required":["VpcPeeringConnectionId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "VpcPeeringConnectionId":{ + "shape":"String", + "locationName":"vpcPeeringConnectionId" + } + } + }, + "RejectVpcPeeringConnectionResult":{ + "type":"structure", + "members":{ + "Return":{ + "shape":"Boolean", + "locationName":"return" + } + } + }, + "ReleaseAddressRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "PublicIp":{"shape":"String"}, + "AllocationId":{"shape":"String"} + } + }, + "ReleaseHostsRequest":{ + "type":"structure", + "required":["HostIds"], + "members":{ + "HostIds":{ + "shape":"RequestHostIdList", + "locationName":"hostId" + } + } + }, + "ReleaseHostsResult":{ + "type":"structure", + "members":{ + "Successful":{ + "shape":"ResponseHostIdList", + "locationName":"successful" + }, + "Unsuccessful":{ + "shape":"UnsuccessfulItemList", + "locationName":"unsuccessful" + } + } + }, + "ReplaceNetworkAclAssociationRequest":{ + "type":"structure", + "required":[ + "AssociationId", + "NetworkAclId" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "AssociationId":{ + "shape":"String", + "locationName":"associationId" + }, + "NetworkAclId":{ + "shape":"String", + "locationName":"networkAclId" + } + } + }, + "ReplaceNetworkAclAssociationResult":{ + "type":"structure", + "members":{ + "NewAssociationId":{ + "shape":"String", + "locationName":"newAssociationId" + } + } + }, + "ReplaceNetworkAclEntryRequest":{ + "type":"structure", + "required":[ + "NetworkAclId", + "RuleNumber", + "Protocol", + "RuleAction", + "Egress", + "CidrBlock" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "NetworkAclId":{ + "shape":"String", + "locationName":"networkAclId" + }, + "RuleNumber":{ + "shape":"Integer", + "locationName":"ruleNumber" + }, + "Protocol":{ + "shape":"String", + "locationName":"protocol" + }, + "RuleAction":{ + "shape":"RuleAction", + "locationName":"ruleAction" + }, + "Egress":{ + "shape":"Boolean", + "locationName":"egress" + }, + "CidrBlock":{ + "shape":"String", + "locationName":"cidrBlock" + }, + "IcmpTypeCode":{ + "shape":"IcmpTypeCode", + "locationName":"Icmp" + }, + "PortRange":{ + "shape":"PortRange", + "locationName":"portRange" + } + } + }, + "ReplaceRouteRequest":{ + "type":"structure", + "required":[ + "RouteTableId", + "DestinationCidrBlock" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "RouteTableId":{ + "shape":"String", + "locationName":"routeTableId" + }, + "DestinationCidrBlock":{ + "shape":"String", + "locationName":"destinationCidrBlock" + }, + "GatewayId":{ + "shape":"String", + "locationName":"gatewayId" + }, + "InstanceId":{ + "shape":"String", + "locationName":"instanceId" + }, + "NetworkInterfaceId":{ + "shape":"String", + "locationName":"networkInterfaceId" + }, + "VpcPeeringConnectionId":{ + "shape":"String", + "locationName":"vpcPeeringConnectionId" + }, + "NatGatewayId":{ + "shape":"String", + "locationName":"natGatewayId" + } + } + }, + "ReplaceRouteTableAssociationRequest":{ + "type":"structure", + "required":[ + "AssociationId", + "RouteTableId" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "AssociationId":{ + "shape":"String", + "locationName":"associationId" + }, + "RouteTableId":{ + "shape":"String", + "locationName":"routeTableId" + } + } + }, + "ReplaceRouteTableAssociationResult":{ + "type":"structure", + "members":{ + "NewAssociationId":{ + "shape":"String", + "locationName":"newAssociationId" + } + } + }, + "ReportInstanceReasonCodes":{ + "type":"string", + "enum":[ + "instance-stuck-in-state", + "unresponsive", + "not-accepting-credentials", + "password-not-available", + "performance-network", + "performance-instance-store", + "performance-ebs-volume", + "performance-other", + "other" + ] + }, + "ReportInstanceStatusRequest":{ + "type":"structure", + "required":[ + "Instances", + "Status", + "ReasonCodes" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "Instances":{ + "shape":"InstanceIdStringList", + "locationName":"instanceId" + }, + "Status":{ + "shape":"ReportStatusType", + "locationName":"status" + }, + "StartTime":{ + "shape":"DateTime", + "locationName":"startTime" + }, + "EndTime":{ + "shape":"DateTime", + "locationName":"endTime" + }, + "ReasonCodes":{ + "shape":"ReasonCodesList", + "locationName":"reasonCode" + }, + "Description":{ + "shape":"String", + "locationName":"description" + } + } + }, + "ReportStatusType":{ + "type":"string", + "enum":[ + "ok", + "impaired" + ] + }, + "RequestHostIdList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"item" + } + }, + "RequestSpotFleetRequest":{ + "type":"structure", + "required":["SpotFleetRequestConfig"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "SpotFleetRequestConfig":{ + "shape":"SpotFleetRequestConfigData", + "locationName":"spotFleetRequestConfig" + } + } + }, + "RequestSpotFleetResponse":{ + "type":"structure", + "required":["SpotFleetRequestId"], + "members":{ + "SpotFleetRequestId":{ + "shape":"String", + "locationName":"spotFleetRequestId" + } + } + }, + "RequestSpotInstancesRequest":{ + "type":"structure", + "required":["SpotPrice"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "SpotPrice":{ + "shape":"String", + "locationName":"spotPrice" + }, + "ClientToken":{ + "shape":"String", + "locationName":"clientToken" + }, + "InstanceCount":{ + "shape":"Integer", + "locationName":"instanceCount" + }, + "Type":{ + "shape":"SpotInstanceType", + "locationName":"type" + }, + "ValidFrom":{ + "shape":"DateTime", + "locationName":"validFrom" + }, + "ValidUntil":{ + "shape":"DateTime", + "locationName":"validUntil" + }, + "LaunchGroup":{ + "shape":"String", + "locationName":"launchGroup" + }, + "AvailabilityZoneGroup":{ + "shape":"String", + "locationName":"availabilityZoneGroup" + }, + "BlockDurationMinutes":{ + "shape":"Integer", + "locationName":"blockDurationMinutes" + }, + "LaunchSpecification":{"shape":"RequestSpotLaunchSpecification"} + } + }, + "RequestSpotInstancesResult":{ + "type":"structure", + "members":{ + "SpotInstanceRequests":{ + "shape":"SpotInstanceRequestList", + "locationName":"spotInstanceRequestSet" + } + } + }, + "RequestSpotLaunchSpecification":{ + "type":"structure", + "members":{ + "ImageId":{ + "shape":"String", + "locationName":"imageId" + }, + "KeyName":{ + "shape":"String", + "locationName":"keyName" + }, + "SecurityGroups":{ + "shape":"ValueStringList", + "locationName":"SecurityGroup" + }, + "UserData":{ + "shape":"String", + "locationName":"userData" + }, + "AddressingType":{ + "shape":"String", + "locationName":"addressingType" + }, + "InstanceType":{ + "shape":"InstanceType", + "locationName":"instanceType" + }, + "Placement":{ + "shape":"SpotPlacement", + "locationName":"placement" + }, + "KernelId":{ + "shape":"String", + "locationName":"kernelId" + }, + "RamdiskId":{ + "shape":"String", + "locationName":"ramdiskId" + }, + "BlockDeviceMappings":{ + "shape":"BlockDeviceMappingList", + "locationName":"blockDeviceMapping" + }, + "SubnetId":{ + "shape":"String", + "locationName":"subnetId" + }, + "NetworkInterfaces":{ + "shape":"InstanceNetworkInterfaceSpecificationList", + "locationName":"NetworkInterface" + }, + "IamInstanceProfile":{ + "shape":"IamInstanceProfileSpecification", + "locationName":"iamInstanceProfile" + }, + "EbsOptimized":{ + "shape":"Boolean", + "locationName":"ebsOptimized" + }, + "Monitoring":{ + "shape":"RunInstancesMonitoringEnabled", + "locationName":"monitoring" + }, + "SecurityGroupIds":{ + "shape":"ValueStringList", + "locationName":"SecurityGroupId" + } + } + }, + "Reservation":{ + "type":"structure", + "members":{ + "ReservationId":{ + "shape":"String", + "locationName":"reservationId" + }, + "OwnerId":{ + "shape":"String", + "locationName":"ownerId" + }, + "RequesterId":{ + "shape":"String", + "locationName":"requesterId" + }, + "Groups":{ + "shape":"GroupIdentifierList", + "locationName":"groupSet" + }, + "Instances":{ + "shape":"InstanceList", + "locationName":"instancesSet" + } + } + }, + "ReservationList":{ + "type":"list", + "member":{ + "shape":"Reservation", + "locationName":"item" + } + }, + "ReservedInstanceLimitPrice":{ + "type":"structure", + "members":{ + "Amount":{ + "shape":"Double", + "locationName":"amount" + }, + "CurrencyCode":{ + "shape":"CurrencyCodeValues", + "locationName":"currencyCode" + } + } + }, + "ReservedInstanceState":{ + "type":"string", + "enum":[ + "payment-pending", + "active", + "payment-failed", + "retired" + ] + }, + "ReservedInstances":{ + "type":"structure", + "members":{ + "ReservedInstancesId":{ + "shape":"String", + "locationName":"reservedInstancesId" + }, + "InstanceType":{ + "shape":"InstanceType", + "locationName":"instanceType" + }, + "AvailabilityZone":{ + "shape":"String", + "locationName":"availabilityZone" + }, + "Start":{ + "shape":"DateTime", + "locationName":"start" + }, + "End":{ + "shape":"DateTime", + "locationName":"end" + }, + "Duration":{ + "shape":"Long", + "locationName":"duration" + }, + "UsagePrice":{ + "shape":"Float", + "locationName":"usagePrice" + }, + "FixedPrice":{ + "shape":"Float", + "locationName":"fixedPrice" + }, + "InstanceCount":{ + "shape":"Integer", + "locationName":"instanceCount" + }, + "ProductDescription":{ + "shape":"RIProductDescription", + "locationName":"productDescription" + }, + "State":{ + "shape":"ReservedInstanceState", + "locationName":"state" + }, + "Tags":{ + "shape":"TagList", + "locationName":"tagSet" + }, + "InstanceTenancy":{ + "shape":"Tenancy", + "locationName":"instanceTenancy" + }, + "CurrencyCode":{ + "shape":"CurrencyCodeValues", + "locationName":"currencyCode" + }, + "OfferingType":{ + "shape":"OfferingTypeValues", + "locationName":"offeringType" + }, + "RecurringCharges":{ + "shape":"RecurringChargesList", + "locationName":"recurringCharges" + } + } + }, + "ReservedInstancesConfiguration":{ + "type":"structure", + "members":{ + "AvailabilityZone":{ + "shape":"String", + "locationName":"availabilityZone" + }, + "Platform":{ + "shape":"String", + "locationName":"platform" + }, + "InstanceCount":{ + "shape":"Integer", + "locationName":"instanceCount" + }, + "InstanceType":{ + "shape":"InstanceType", + "locationName":"instanceType" + } + } + }, + "ReservedInstancesConfigurationList":{ + "type":"list", + "member":{ + "shape":"ReservedInstancesConfiguration", + "locationName":"item" + } + }, + "ReservedInstancesId":{ + "type":"structure", + "members":{ + "ReservedInstancesId":{ + "shape":"String", + "locationName":"reservedInstancesId" + } + } + }, + "ReservedInstancesIdStringList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"ReservedInstancesId" + } + }, + "ReservedInstancesList":{ + "type":"list", + "member":{ + "shape":"ReservedInstances", + "locationName":"item" + } + }, + "ReservedInstancesListing":{ + "type":"structure", + "members":{ + "ReservedInstancesListingId":{ + "shape":"String", + "locationName":"reservedInstancesListingId" + }, + "ReservedInstancesId":{ + "shape":"String", + "locationName":"reservedInstancesId" + }, + "CreateDate":{ + "shape":"DateTime", + "locationName":"createDate" + }, + "UpdateDate":{ + "shape":"DateTime", + "locationName":"updateDate" + }, + "Status":{ + "shape":"ListingStatus", + "locationName":"status" + }, + "StatusMessage":{ + "shape":"String", + "locationName":"statusMessage" + }, + "InstanceCounts":{ + "shape":"InstanceCountList", + "locationName":"instanceCounts" + }, + "PriceSchedules":{ + "shape":"PriceScheduleList", + "locationName":"priceSchedules" + }, + "Tags":{ + "shape":"TagList", + "locationName":"tagSet" + }, + "ClientToken":{ + "shape":"String", + "locationName":"clientToken" + } + } + }, + "ReservedInstancesListingList":{ + "type":"list", + "member":{ + "shape":"ReservedInstancesListing", + "locationName":"item" + } + }, + "ReservedInstancesModification":{ + "type":"structure", + "members":{ + "ReservedInstancesModificationId":{ + "shape":"String", + "locationName":"reservedInstancesModificationId" + }, + "ReservedInstancesIds":{ + "shape":"ReservedIntancesIds", + "locationName":"reservedInstancesSet" + }, + "ModificationResults":{ + "shape":"ReservedInstancesModificationResultList", + "locationName":"modificationResultSet" + }, + "CreateDate":{ + "shape":"DateTime", + "locationName":"createDate" + }, + "UpdateDate":{ + "shape":"DateTime", + "locationName":"updateDate" + }, + "EffectiveDate":{ + "shape":"DateTime", + "locationName":"effectiveDate" + }, + "Status":{ + "shape":"String", + "locationName":"status" + }, + "StatusMessage":{ + "shape":"String", + "locationName":"statusMessage" + }, + "ClientToken":{ + "shape":"String", + "locationName":"clientToken" + } + } + }, + "ReservedInstancesModificationIdStringList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"ReservedInstancesModificationId" + } + }, + "ReservedInstancesModificationList":{ + "type":"list", + "member":{ + "shape":"ReservedInstancesModification", + "locationName":"item" + } + }, + "ReservedInstancesModificationResult":{ + "type":"structure", + "members":{ + "ReservedInstancesId":{ + "shape":"String", + "locationName":"reservedInstancesId" + }, + "TargetConfiguration":{ + "shape":"ReservedInstancesConfiguration", + "locationName":"targetConfiguration" + } + } + }, + "ReservedInstancesModificationResultList":{ + "type":"list", + "member":{ + "shape":"ReservedInstancesModificationResult", + "locationName":"item" + } + }, + "ReservedInstancesOffering":{ + "type":"structure", + "members":{ + "ReservedInstancesOfferingId":{ + "shape":"String", + "locationName":"reservedInstancesOfferingId" + }, + "InstanceType":{ + "shape":"InstanceType", + "locationName":"instanceType" + }, + "AvailabilityZone":{ + "shape":"String", + "locationName":"availabilityZone" + }, + "Duration":{ + "shape":"Long", + "locationName":"duration" + }, + "UsagePrice":{ + "shape":"Float", + "locationName":"usagePrice" + }, + "FixedPrice":{ + "shape":"Float", + "locationName":"fixedPrice" + }, + "ProductDescription":{ + "shape":"RIProductDescription", + "locationName":"productDescription" + }, + "InstanceTenancy":{ + "shape":"Tenancy", + "locationName":"instanceTenancy" + }, + "CurrencyCode":{ + "shape":"CurrencyCodeValues", + "locationName":"currencyCode" + }, + "OfferingType":{ + "shape":"OfferingTypeValues", + "locationName":"offeringType" + }, + "RecurringCharges":{ + "shape":"RecurringChargesList", + "locationName":"recurringCharges" + }, + "Marketplace":{ + "shape":"Boolean", + "locationName":"marketplace" + }, + "PricingDetails":{ + "shape":"PricingDetailsList", + "locationName":"pricingDetailsSet" + } + } + }, + "ReservedInstancesOfferingIdStringList":{ + "type":"list", + "member":{"shape":"String"} + }, + "ReservedInstancesOfferingList":{ + "type":"list", + "member":{ + "shape":"ReservedInstancesOffering", + "locationName":"item" + } + }, + "ReservedIntancesIds":{ + "type":"list", + "member":{ + "shape":"ReservedInstancesId", + "locationName":"item" + } + }, + "ResetImageAttributeName":{ + "type":"string", + "enum":["launchPermission"] + }, + "ResetImageAttributeRequest":{ + "type":"structure", + "required":[ + "ImageId", + "Attribute" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "ImageId":{"shape":"String"}, + "Attribute":{"shape":"ResetImageAttributeName"} + } + }, + "ResetInstanceAttributeRequest":{ + "type":"structure", + "required":[ + "InstanceId", + "Attribute" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "InstanceId":{ + "shape":"String", + "locationName":"instanceId" + }, + "Attribute":{ + "shape":"InstanceAttributeName", + "locationName":"attribute" + } + } + }, + "ResetNetworkInterfaceAttributeRequest":{ + "type":"structure", + "required":["NetworkInterfaceId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "NetworkInterfaceId":{ + "shape":"String", + "locationName":"networkInterfaceId" + }, + "SourceDestCheck":{ + "shape":"String", + "locationName":"sourceDestCheck" + } + } + }, + "ResetSnapshotAttributeRequest":{ + "type":"structure", + "required":[ + "SnapshotId", + "Attribute" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "SnapshotId":{"shape":"String"}, + "Attribute":{"shape":"SnapshotAttributeName"} + } + }, + "ResourceIdList":{ + "type":"list", + "member":{"shape":"String"} + }, + "ResourceType":{ + "type":"string", + "enum":[ + "customer-gateway", + "dhcp-options", + "image", + "instance", + "internet-gateway", + "network-acl", + "network-interface", + "reserved-instances", + "route-table", + "snapshot", + "spot-instances-request", + "subnet", + "security-group", + "volume", + "vpc", + "vpn-connection", + "vpn-gateway" + ] + }, + "ResponseHostIdList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"item" + } + }, + "RestorableByStringList":{ + "type":"list", + "member":{"shape":"String"} + }, + "RestoreAddressToClassicRequest":{ + "type":"structure", + "required":["PublicIp"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "PublicIp":{ + "shape":"String", + "locationName":"publicIp" + } + } + }, + "RestoreAddressToClassicResult":{ + "type":"structure", + "members":{ + "Status":{ + "shape":"Status", + "locationName":"status" + }, + "PublicIp":{ + "shape":"String", + "locationName":"publicIp" + } + } + }, + "RevokeSecurityGroupEgressRequest":{ + "type":"structure", + "required":["GroupId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "GroupId":{ + "shape":"String", + "locationName":"groupId" + }, + "SourceSecurityGroupName":{ + "shape":"String", + "locationName":"sourceSecurityGroupName" + }, + "SourceSecurityGroupOwnerId":{ + "shape":"String", + "locationName":"sourceSecurityGroupOwnerId" + }, + "IpProtocol":{ + "shape":"String", + "locationName":"ipProtocol" + }, + "FromPort":{ + "shape":"Integer", + "locationName":"fromPort" + }, + "ToPort":{ + "shape":"Integer", + "locationName":"toPort" + }, + "CidrIp":{ + "shape":"String", + "locationName":"cidrIp" + }, + "IpPermissions":{ + "shape":"IpPermissionList", + "locationName":"ipPermissions" + } + } + }, + "RevokeSecurityGroupIngressRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "GroupName":{"shape":"String"}, + "GroupId":{"shape":"String"}, + "SourceSecurityGroupName":{"shape":"String"}, + "SourceSecurityGroupOwnerId":{"shape":"String"}, + "IpProtocol":{"shape":"String"}, + "FromPort":{"shape":"Integer"}, + "ToPort":{"shape":"Integer"}, + "CidrIp":{"shape":"String"}, + "IpPermissions":{"shape":"IpPermissionList"} + } + }, + "Route":{ + "type":"structure", + "members":{ + "DestinationCidrBlock":{ + "shape":"String", + "locationName":"destinationCidrBlock" + }, + "DestinationPrefixListId":{ + "shape":"String", + "locationName":"destinationPrefixListId" + }, + "GatewayId":{ + "shape":"String", + "locationName":"gatewayId" + }, + "InstanceId":{ + "shape":"String", + "locationName":"instanceId" + }, + "InstanceOwnerId":{ + "shape":"String", + "locationName":"instanceOwnerId" + }, + "NetworkInterfaceId":{ + "shape":"String", + "locationName":"networkInterfaceId" + }, + "VpcPeeringConnectionId":{ + "shape":"String", + "locationName":"vpcPeeringConnectionId" + }, + "NatGatewayId":{ + "shape":"String", + "locationName":"natGatewayId" + }, + "State":{ + "shape":"RouteState", + "locationName":"state" + }, + "Origin":{ + "shape":"RouteOrigin", + "locationName":"origin" + } + } + }, + "RouteList":{ + "type":"list", + "member":{ + "shape":"Route", + "locationName":"item" + } + }, + "RouteOrigin":{ + "type":"string", + "enum":[ + "CreateRouteTable", + "CreateRoute", + "EnableVgwRoutePropagation" + ] + }, + "RouteState":{ + "type":"string", + "enum":[ + "active", + "blackhole" + ] + }, + "RouteTable":{ + "type":"structure", + "members":{ + "RouteTableId":{ + "shape":"String", + "locationName":"routeTableId" + }, + "VpcId":{ + "shape":"String", + "locationName":"vpcId" + }, + "Routes":{ + "shape":"RouteList", + "locationName":"routeSet" + }, + "Associations":{ + "shape":"RouteTableAssociationList", + "locationName":"associationSet" + }, + "Tags":{ + "shape":"TagList", + "locationName":"tagSet" + }, + "PropagatingVgws":{ + "shape":"PropagatingVgwList", + "locationName":"propagatingVgwSet" + } + } + }, + "RouteTableAssociation":{ + "type":"structure", + "members":{ + "RouteTableAssociationId":{ + "shape":"String", + "locationName":"routeTableAssociationId" + }, + "RouteTableId":{ + "shape":"String", + "locationName":"routeTableId" + }, + "SubnetId":{ + "shape":"String", + "locationName":"subnetId" + }, + "Main":{ + "shape":"Boolean", + "locationName":"main" + } + } + }, + "RouteTableAssociationList":{ + "type":"list", + "member":{ + "shape":"RouteTableAssociation", + "locationName":"item" + } + }, + "RouteTableList":{ + "type":"list", + "member":{ + "shape":"RouteTable", + "locationName":"item" + } + }, + "RuleAction":{ + "type":"string", + "enum":[ + "allow", + "deny" + ] + }, + "RunInstancesMonitoringEnabled":{ + "type":"structure", + "required":["Enabled"], + "members":{ + "Enabled":{ + "shape":"Boolean", + "locationName":"enabled" + } + } + }, + "RunInstancesRequest":{ + "type":"structure", + "required":[ + "ImageId", + "MinCount", + "MaxCount" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "ImageId":{"shape":"String"}, + "MinCount":{"shape":"Integer"}, + "MaxCount":{"shape":"Integer"}, + "KeyName":{"shape":"String"}, + "SecurityGroups":{ + "shape":"SecurityGroupStringList", + "locationName":"SecurityGroup" + }, + "SecurityGroupIds":{ + "shape":"SecurityGroupIdStringList", + "locationName":"SecurityGroupId" + }, + "UserData":{"shape":"String"}, + "InstanceType":{"shape":"InstanceType"}, + "Placement":{"shape":"Placement"}, + "KernelId":{"shape":"String"}, + "RamdiskId":{"shape":"String"}, + "BlockDeviceMappings":{ + "shape":"BlockDeviceMappingRequestList", + "locationName":"BlockDeviceMapping" + }, + "Monitoring":{"shape":"RunInstancesMonitoringEnabled"}, + "SubnetId":{"shape":"String"}, + "DisableApiTermination":{ + "shape":"Boolean", + "locationName":"disableApiTermination" + }, + "InstanceInitiatedShutdownBehavior":{ + "shape":"ShutdownBehavior", + "locationName":"instanceInitiatedShutdownBehavior" + }, + "PrivateIpAddress":{ + "shape":"String", + "locationName":"privateIpAddress" + }, + "ClientToken":{ + "shape":"String", + "locationName":"clientToken" + }, + "AdditionalInfo":{ + "shape":"String", + "locationName":"additionalInfo" + }, + "NetworkInterfaces":{ + "shape":"InstanceNetworkInterfaceSpecificationList", + "locationName":"networkInterface" + }, + "IamInstanceProfile":{ + "shape":"IamInstanceProfileSpecification", + "locationName":"iamInstanceProfile" + }, + "EbsOptimized":{ + "shape":"Boolean", + "locationName":"ebsOptimized" + } + } + }, + "RunScheduledInstancesRequest":{ + "type":"structure", + "required":[ + "ScheduledInstanceId", + "LaunchSpecification" + ], + "members":{ + "DryRun":{"shape":"Boolean"}, + "ClientToken":{ + "shape":"String", + "idempotencyToken":true + }, + "InstanceCount":{"shape":"Integer"}, + "ScheduledInstanceId":{"shape":"String"}, + "LaunchSpecification":{"shape":"ScheduledInstancesLaunchSpecification"} + } + }, + "RunScheduledInstancesResult":{ + "type":"structure", + "members":{ + "InstanceIdSet":{ + "shape":"InstanceIdSet", + "locationName":"instanceIdSet" + } + } + }, + "S3Storage":{ + "type":"structure", + "members":{ + "Bucket":{ + "shape":"String", + "locationName":"bucket" + }, + "Prefix":{ + "shape":"String", + "locationName":"prefix" + }, + "AWSAccessKeyId":{"shape":"String"}, + "UploadPolicy":{ + "shape":"Blob", + "locationName":"uploadPolicy" + }, + "UploadPolicySignature":{ + "shape":"String", + "locationName":"uploadPolicySignature" + } + } + }, + "ScheduledInstance":{ + "type":"structure", + "members":{ + "ScheduledInstanceId":{ + "shape":"String", + "locationName":"scheduledInstanceId" + }, + "InstanceType":{ + "shape":"String", + "locationName":"instanceType" + }, + "Platform":{ + "shape":"String", + "locationName":"platform" + }, + "NetworkPlatform":{ + "shape":"String", + "locationName":"networkPlatform" + }, + "AvailabilityZone":{ + "shape":"String", + "locationName":"availabilityZone" + }, + "SlotDurationInHours":{ + "shape":"Integer", + "locationName":"slotDurationInHours" + }, + "Recurrence":{ + "shape":"ScheduledInstanceRecurrence", + "locationName":"recurrence" + }, + "PreviousSlotEndTime":{ + "shape":"DateTime", + "locationName":"previousSlotEndTime" + }, + "NextSlotStartTime":{ + "shape":"DateTime", + "locationName":"nextSlotStartTime" + }, + "HourlyPrice":{ + "shape":"String", + "locationName":"hourlyPrice" + }, + "TotalScheduledInstanceHours":{ + "shape":"Integer", + "locationName":"totalScheduledInstanceHours" + }, + "InstanceCount":{ + "shape":"Integer", + "locationName":"instanceCount" + }, + "TermStartDate":{ + "shape":"DateTime", + "locationName":"termStartDate" + }, + "TermEndDate":{ + "shape":"DateTime", + "locationName":"termEndDate" + }, + "CreateDate":{ + "shape":"DateTime", + "locationName":"createDate" + } + } + }, + "ScheduledInstanceAvailability":{ + "type":"structure", + "members":{ + "InstanceType":{ + "shape":"String", + "locationName":"instanceType" + }, + "Platform":{ + "shape":"String", + "locationName":"platform" + }, + "NetworkPlatform":{ + "shape":"String", + "locationName":"networkPlatform" + }, + "AvailabilityZone":{ + "shape":"String", + "locationName":"availabilityZone" + }, + "PurchaseToken":{ + "shape":"String", + "locationName":"purchaseToken" + }, + "SlotDurationInHours":{ + "shape":"Integer", + "locationName":"slotDurationInHours" + }, + "Recurrence":{ + "shape":"ScheduledInstanceRecurrence", + "locationName":"recurrence" + }, + "FirstSlotStartTime":{ + "shape":"DateTime", + "locationName":"firstSlotStartTime" + }, + "HourlyPrice":{ + "shape":"String", + "locationName":"hourlyPrice" + }, + "TotalScheduledInstanceHours":{ + "shape":"Integer", + "locationName":"totalScheduledInstanceHours" + }, + "AvailableInstanceCount":{ + "shape":"Integer", + "locationName":"availableInstanceCount" + }, + "MinTermDurationInDays":{ + "shape":"Integer", + "locationName":"minTermDurationInDays" + }, + "MaxTermDurationInDays":{ + "shape":"Integer", + "locationName":"maxTermDurationInDays" + } + } + }, + "ScheduledInstanceAvailabilitySet":{ + "type":"list", + "member":{ + "shape":"ScheduledInstanceAvailability", + "locationName":"item" + } + }, + "ScheduledInstanceIdRequestSet":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"ScheduledInstanceId" + } + }, + "ScheduledInstanceRecurrence":{ + "type":"structure", + "members":{ + "Frequency":{ + "shape":"String", + "locationName":"frequency" + }, + "Interval":{ + "shape":"Integer", + "locationName":"interval" + }, + "OccurrenceDaySet":{ + "shape":"OccurrenceDaySet", + "locationName":"occurrenceDaySet" + }, + "OccurrenceRelativeToEnd":{ + "shape":"Boolean", + "locationName":"occurrenceRelativeToEnd" + }, + "OccurrenceUnit":{ + "shape":"String", + "locationName":"occurrenceUnit" + } + } + }, + "ScheduledInstanceRecurrenceRequest":{ + "type":"structure", + "members":{ + "Frequency":{"shape":"String"}, + "Interval":{"shape":"Integer"}, + "OccurrenceDays":{ + "shape":"OccurrenceDayRequestSet", + "locationName":"OccurrenceDay" + }, + "OccurrenceRelativeToEnd":{"shape":"Boolean"}, + "OccurrenceUnit":{"shape":"String"} + } + }, + "ScheduledInstanceSet":{ + "type":"list", + "member":{ + "shape":"ScheduledInstance", + "locationName":"item" + } + }, + "ScheduledInstancesBlockDeviceMapping":{ + "type":"structure", + "members":{ + "DeviceName":{"shape":"String"}, + "NoDevice":{"shape":"String"}, + "VirtualName":{"shape":"String"}, + "Ebs":{"shape":"ScheduledInstancesEbs"} + } + }, + "ScheduledInstancesBlockDeviceMappingSet":{ + "type":"list", + "member":{ + "shape":"ScheduledInstancesBlockDeviceMapping", + "locationName":"BlockDeviceMapping" + } + }, + "ScheduledInstancesEbs":{ + "type":"structure", + "members":{ + "SnapshotId":{"shape":"String"}, + "VolumeSize":{"shape":"Integer"}, + "DeleteOnTermination":{"shape":"Boolean"}, + "VolumeType":{"shape":"String"}, + "Iops":{"shape":"Integer"}, + "Encrypted":{"shape":"Boolean"} + } + }, + "ScheduledInstancesIamInstanceProfile":{ + "type":"structure", + "members":{ + "Arn":{"shape":"String"}, + "Name":{"shape":"String"} + } + }, + "ScheduledInstancesLaunchSpecification":{ + "type":"structure", + "required":["ImageId"], + "members":{ + "ImageId":{"shape":"String"}, + "KeyName":{"shape":"String"}, + "SecurityGroupIds":{ + "shape":"ScheduledInstancesSecurityGroupIdSet", + "locationName":"SecurityGroupId" + }, + "UserData":{"shape":"String"}, + "Placement":{"shape":"ScheduledInstancesPlacement"}, + "KernelId":{"shape":"String"}, + "InstanceType":{"shape":"String"}, + "RamdiskId":{"shape":"String"}, + "BlockDeviceMappings":{ + "shape":"ScheduledInstancesBlockDeviceMappingSet", + "locationName":"BlockDeviceMapping" + }, + "Monitoring":{"shape":"ScheduledInstancesMonitoring"}, + "SubnetId":{"shape":"String"}, + "NetworkInterfaces":{ + "shape":"ScheduledInstancesNetworkInterfaceSet", + "locationName":"NetworkInterface" + }, + "IamInstanceProfile":{"shape":"ScheduledInstancesIamInstanceProfile"}, + "EbsOptimized":{"shape":"Boolean"} + } + }, + "ScheduledInstancesMonitoring":{ + "type":"structure", + "members":{ + "Enabled":{"shape":"Boolean"} + } + }, + "ScheduledInstancesNetworkInterface":{ + "type":"structure", + "members":{ + "NetworkInterfaceId":{"shape":"String"}, + "DeviceIndex":{"shape":"Integer"}, + "SubnetId":{"shape":"String"}, + "Description":{"shape":"String"}, + "PrivateIpAddress":{"shape":"String"}, + "PrivateIpAddressConfigs":{ + "shape":"PrivateIpAddressConfigSet", + "locationName":"PrivateIpAddressConfig" + }, + "SecondaryPrivateIpAddressCount":{"shape":"Integer"}, + "AssociatePublicIpAddress":{"shape":"Boolean"}, + "Groups":{ + "shape":"ScheduledInstancesSecurityGroupIdSet", + "locationName":"Group" + }, + "DeleteOnTermination":{"shape":"Boolean"} + } + }, + "ScheduledInstancesNetworkInterfaceSet":{ + "type":"list", + "member":{ + "shape":"ScheduledInstancesNetworkInterface", + "locationName":"NetworkInterface" + } + }, + "ScheduledInstancesPlacement":{ + "type":"structure", + "members":{ + "AvailabilityZone":{"shape":"String"}, + "GroupName":{"shape":"String"} + } + }, + "ScheduledInstancesPrivateIpAddressConfig":{ + "type":"structure", + "members":{ + "PrivateIpAddress":{"shape":"String"}, + "Primary":{"shape":"Boolean"} + } + }, + "ScheduledInstancesSecurityGroupIdSet":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"SecurityGroupId" + } + }, + "SecurityGroup":{ + "type":"structure", + "members":{ + "OwnerId":{ + "shape":"String", + "locationName":"ownerId" + }, + "GroupName":{ + "shape":"String", + "locationName":"groupName" + }, + "GroupId":{ + "shape":"String", + "locationName":"groupId" + }, + "Description":{ + "shape":"String", + "locationName":"groupDescription" + }, + "IpPermissions":{ + "shape":"IpPermissionList", + "locationName":"ipPermissions" + }, + "IpPermissionsEgress":{ + "shape":"IpPermissionList", + "locationName":"ipPermissionsEgress" + }, + "VpcId":{ + "shape":"String", + "locationName":"vpcId" + }, + "Tags":{ + "shape":"TagList", + "locationName":"tagSet" + } + } + }, + "SecurityGroupIdStringList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"SecurityGroupId" + } + }, + "SecurityGroupList":{ + "type":"list", + "member":{ + "shape":"SecurityGroup", + "locationName":"item" + } + }, + "SecurityGroupReference":{ + "type":"structure", + "required":[ + "GroupId", + "ReferencingVpcId" + ], + "members":{ + "GroupId":{ + "shape":"String", + "locationName":"groupId" + }, + "ReferencingVpcId":{ + "shape":"String", + "locationName":"referencingVpcId" + }, + "VpcPeeringConnectionId":{ + "shape":"String", + "locationName":"vpcPeeringConnectionId" + } + } + }, + "SecurityGroupReferences":{ + "type":"list", + "member":{ + "shape":"SecurityGroupReference", + "locationName":"item" + } + }, + "SecurityGroupStringList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"SecurityGroup" + } + }, + "ShutdownBehavior":{ + "type":"string", + "enum":[ + "stop", + "terminate" + ] + }, + "SlotDateTimeRangeRequest":{ + "type":"structure", + "required":[ + "EarliestTime", + "LatestTime" + ], + "members":{ + "EarliestTime":{"shape":"DateTime"}, + "LatestTime":{"shape":"DateTime"} + } + }, + "SlotStartTimeRangeRequest":{ + "type":"structure", + "members":{ + "EarliestTime":{"shape":"DateTime"}, + "LatestTime":{"shape":"DateTime"} + } + }, + "Snapshot":{ + "type":"structure", + "members":{ + "SnapshotId":{ + "shape":"String", + "locationName":"snapshotId" + }, + "VolumeId":{ + "shape":"String", + "locationName":"volumeId" + }, + "State":{ + "shape":"SnapshotState", + "locationName":"status" + }, + "StateMessage":{ + "shape":"String", + "locationName":"statusMessage" + }, + "StartTime":{ + "shape":"DateTime", + "locationName":"startTime" + }, + "Progress":{ + "shape":"String", + "locationName":"progress" + }, + "OwnerId":{ + "shape":"String", + "locationName":"ownerId" + }, + "Description":{ + "shape":"String", + "locationName":"description" + }, + "VolumeSize":{ + "shape":"Integer", + "locationName":"volumeSize" + }, + "OwnerAlias":{ + "shape":"String", + "locationName":"ownerAlias" + }, + "Tags":{ + "shape":"TagList", + "locationName":"tagSet" + }, + "Encrypted":{ + "shape":"Boolean", + "locationName":"encrypted" + }, + "KmsKeyId":{ + "shape":"String", + "locationName":"kmsKeyId" + }, + "DataEncryptionKeyId":{ + "shape":"String", + "locationName":"dataEncryptionKeyId" + } + } + }, + "SnapshotAttributeName":{ + "type":"string", + "enum":[ + "productCodes", + "createVolumePermission" + ] + }, + "SnapshotDetail":{ + "type":"structure", + "members":{ + "DiskImageSize":{ + "shape":"Double", + "locationName":"diskImageSize" + }, + "Description":{ + "shape":"String", + "locationName":"description" + }, + "Format":{ + "shape":"String", + "locationName":"format" + }, + "Url":{ + "shape":"String", + "locationName":"url" + }, + "UserBucket":{ + "shape":"UserBucketDetails", + "locationName":"userBucket" + }, + "DeviceName":{ + "shape":"String", + "locationName":"deviceName" + }, + "SnapshotId":{ + "shape":"String", + "locationName":"snapshotId" + }, + "Progress":{ + "shape":"String", + "locationName":"progress" + }, + "StatusMessage":{ + "shape":"String", + "locationName":"statusMessage" + }, + "Status":{ + "shape":"String", + "locationName":"status" + } + } + }, + "SnapshotDetailList":{ + "type":"list", + "member":{ + "shape":"SnapshotDetail", + "locationName":"item" + } + }, + "SnapshotDiskContainer":{ + "type":"structure", + "members":{ + "Description":{"shape":"String"}, + "Format":{"shape":"String"}, + "Url":{"shape":"String"}, + "UserBucket":{"shape":"UserBucket"} + } + }, + "SnapshotIdStringList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"SnapshotId" + } + }, + "SnapshotList":{ + "type":"list", + "member":{ + "shape":"Snapshot", + "locationName":"item" + } + }, + "SnapshotState":{ + "type":"string", + "enum":[ + "pending", + "completed", + "error" + ] + }, + "SnapshotTaskDetail":{ + "type":"structure", + "members":{ + "DiskImageSize":{ + "shape":"Double", + "locationName":"diskImageSize" + }, + "Description":{ + "shape":"String", + "locationName":"description" + }, + "Format":{ + "shape":"String", + "locationName":"format" + }, + "Url":{ + "shape":"String", + "locationName":"url" + }, + "UserBucket":{ + "shape":"UserBucketDetails", + "locationName":"userBucket" + }, + "SnapshotId":{ + "shape":"String", + "locationName":"snapshotId" + }, + "Progress":{ + "shape":"String", + "locationName":"progress" + }, + "StatusMessage":{ + "shape":"String", + "locationName":"statusMessage" + }, + "Status":{ + "shape":"String", + "locationName":"status" + } + } + }, + "SpotDatafeedSubscription":{ + "type":"structure", + "members":{ + "OwnerId":{ + "shape":"String", + "locationName":"ownerId" + }, + "Bucket":{ + "shape":"String", + "locationName":"bucket" + }, + "Prefix":{ + "shape":"String", + "locationName":"prefix" + }, + "State":{ + "shape":"DatafeedSubscriptionState", + "locationName":"state" + }, + "Fault":{ + "shape":"SpotInstanceStateFault", + "locationName":"fault" + } + } + }, + "SpotFleetLaunchSpecification":{ + "type":"structure", + "members":{ + "ImageId":{ + "shape":"String", + "locationName":"imageId" + }, + "KeyName":{ + "shape":"String", + "locationName":"keyName" + }, + "SecurityGroups":{ + "shape":"GroupIdentifierList", + "locationName":"groupSet" + }, + "UserData":{ + "shape":"String", + "locationName":"userData" + }, + "AddressingType":{ + "shape":"String", + "locationName":"addressingType" + }, + "InstanceType":{ + "shape":"InstanceType", + "locationName":"instanceType" + }, + "Placement":{ + "shape":"SpotPlacement", + "locationName":"placement" + }, + "KernelId":{ + "shape":"String", + "locationName":"kernelId" + }, + "RamdiskId":{ + "shape":"String", + "locationName":"ramdiskId" + }, + "BlockDeviceMappings":{ + "shape":"BlockDeviceMappingList", + "locationName":"blockDeviceMapping" + }, + "Monitoring":{ + "shape":"SpotFleetMonitoring", + "locationName":"monitoring" + }, + "SubnetId":{ + "shape":"String", + "locationName":"subnetId" + }, + "NetworkInterfaces":{ + "shape":"InstanceNetworkInterfaceSpecificationList", + "locationName":"networkInterfaceSet" + }, + "IamInstanceProfile":{ + "shape":"IamInstanceProfileSpecification", + "locationName":"iamInstanceProfile" + }, + "EbsOptimized":{ + "shape":"Boolean", + "locationName":"ebsOptimized" + }, + "WeightedCapacity":{ + "shape":"Double", + "locationName":"weightedCapacity" + }, + "SpotPrice":{ + "shape":"String", + "locationName":"spotPrice" + } + } + }, + "SpotFleetMonitoring":{ + "type":"structure", + "members":{ + "Enabled":{ + "shape":"Boolean", + "locationName":"enabled" + } + } + }, + "SpotFleetRequestConfig":{ + "type":"structure", + "required":[ + "SpotFleetRequestId", + "SpotFleetRequestState", + "SpotFleetRequestConfig", + "CreateTime" + ], + "members":{ + "SpotFleetRequestId":{ + "shape":"String", + "locationName":"spotFleetRequestId" + }, + "SpotFleetRequestState":{ + "shape":"BatchState", + "locationName":"spotFleetRequestState" + }, + "SpotFleetRequestConfig":{ + "shape":"SpotFleetRequestConfigData", + "locationName":"spotFleetRequestConfig" + }, + "CreateTime":{ + "shape":"DateTime", + "locationName":"createTime" + } + } + }, + "SpotFleetRequestConfigData":{ + "type":"structure", + "required":[ + "SpotPrice", + "TargetCapacity", + "IamFleetRole", + "LaunchSpecifications" + ], + "members":{ + "ClientToken":{ + "shape":"String", + "locationName":"clientToken" + }, + "SpotPrice":{ + "shape":"String", + "locationName":"spotPrice" + }, + "TargetCapacity":{ + "shape":"Integer", + "locationName":"targetCapacity" + }, + "ValidFrom":{ + "shape":"DateTime", + "locationName":"validFrom" + }, + "ValidUntil":{ + "shape":"DateTime", + "locationName":"validUntil" + }, + "TerminateInstancesWithExpiration":{ + "shape":"Boolean", + "locationName":"terminateInstancesWithExpiration" + }, + "IamFleetRole":{ + "shape":"String", + "locationName":"iamFleetRole" + }, + "LaunchSpecifications":{ + "shape":"LaunchSpecsList", + "locationName":"launchSpecifications" + }, + "ExcessCapacityTerminationPolicy":{ + "shape":"ExcessCapacityTerminationPolicy", + "locationName":"excessCapacityTerminationPolicy" + }, + "AllocationStrategy":{ + "shape":"AllocationStrategy", + "locationName":"allocationStrategy" + }, + "FulfilledCapacity":{ + "shape":"Double", + "locationName":"fulfilledCapacity" + }, + "Type":{ + "shape":"FleetType", + "locationName":"type" + } + } + }, + "SpotFleetRequestConfigSet":{ + "type":"list", + "member":{ + "shape":"SpotFleetRequestConfig", + "locationName":"item" + } + }, + "SpotInstanceRequest":{ + "type":"structure", + "members":{ + "SpotInstanceRequestId":{ + "shape":"String", + "locationName":"spotInstanceRequestId" + }, + "SpotPrice":{ + "shape":"String", + "locationName":"spotPrice" + }, + "Type":{ + "shape":"SpotInstanceType", + "locationName":"type" + }, + "State":{ + "shape":"SpotInstanceState", + "locationName":"state" + }, + "Fault":{ + "shape":"SpotInstanceStateFault", + "locationName":"fault" + }, + "Status":{ + "shape":"SpotInstanceStatus", + "locationName":"status" + }, + "ValidFrom":{ + "shape":"DateTime", + "locationName":"validFrom" + }, + "ValidUntil":{ + "shape":"DateTime", + "locationName":"validUntil" + }, + "LaunchGroup":{ + "shape":"String", + "locationName":"launchGroup" + }, + "AvailabilityZoneGroup":{ + "shape":"String", + "locationName":"availabilityZoneGroup" + }, + "LaunchSpecification":{ + "shape":"LaunchSpecification", + "locationName":"launchSpecification" + }, + "InstanceId":{ + "shape":"String", + "locationName":"instanceId" + }, + "CreateTime":{ + "shape":"DateTime", + "locationName":"createTime" + }, + "ProductDescription":{ + "shape":"RIProductDescription", + "locationName":"productDescription" + }, + "BlockDurationMinutes":{ + "shape":"Integer", + "locationName":"blockDurationMinutes" + }, + "ActualBlockHourlyPrice":{ + "shape":"String", + "locationName":"actualBlockHourlyPrice" + }, + "Tags":{ + "shape":"TagList", + "locationName":"tagSet" + }, + "LaunchedAvailabilityZone":{ + "shape":"String", + "locationName":"launchedAvailabilityZone" + } + } + }, + "SpotInstanceRequestIdList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"SpotInstanceRequestId" + } + }, + "SpotInstanceRequestList":{ + "type":"list", + "member":{ + "shape":"SpotInstanceRequest", + "locationName":"item" + } + }, + "SpotInstanceState":{ + "type":"string", + "enum":[ + "open", + "active", + "closed", + "cancelled", + "failed" + ] + }, + "SpotInstanceStateFault":{ + "type":"structure", + "members":{ + "Code":{ + "shape":"String", + "locationName":"code" + }, + "Message":{ + "shape":"String", + "locationName":"message" + } + } + }, + "SpotInstanceStatus":{ + "type":"structure", + "members":{ + "Code":{ + "shape":"String", + "locationName":"code" + }, + "UpdateTime":{ + "shape":"DateTime", + "locationName":"updateTime" + }, + "Message":{ + "shape":"String", + "locationName":"message" + } + } + }, + "SpotInstanceType":{ + "type":"string", + "enum":[ + "one-time", + "persistent" + ] + }, + "SpotPlacement":{ + "type":"structure", + "members":{ + "AvailabilityZone":{ + "shape":"String", + "locationName":"availabilityZone" + }, + "GroupName":{ + "shape":"String", + "locationName":"groupName" + } + } + }, + "SpotPrice":{ + "type":"structure", + "members":{ + "InstanceType":{ + "shape":"InstanceType", + "locationName":"instanceType" + }, + "ProductDescription":{ + "shape":"RIProductDescription", + "locationName":"productDescription" + }, + "SpotPrice":{ + "shape":"String", + "locationName":"spotPrice" + }, + "Timestamp":{ + "shape":"DateTime", + "locationName":"timestamp" + }, + "AvailabilityZone":{ + "shape":"String", + "locationName":"availabilityZone" + } + } + }, + "SpotPriceHistoryList":{ + "type":"list", + "member":{ + "shape":"SpotPrice", + "locationName":"item" + } + }, + "StaleIpPermission":{ + "type":"structure", + "members":{ + "FromPort":{ + "shape":"Integer", + "locationName":"fromPort" + }, + "IpProtocol":{ + "shape":"String", + "locationName":"ipProtocol" + }, + "IpRanges":{ + "shape":"IpRanges", + "locationName":"ipRanges" + }, + "PrefixListIds":{ + "shape":"PrefixListIdSet", + "locationName":"prefixListIds" + }, + "ToPort":{ + "shape":"Integer", + "locationName":"toPort" + }, + "UserIdGroupPairs":{ + "shape":"UserIdGroupPairSet", + "locationName":"groups" + } + } + }, + "StaleIpPermissionSet":{ + "type":"list", + "member":{ + "shape":"StaleIpPermission", + "locationName":"item" + } + }, + "StaleSecurityGroup":{ + "type":"structure", + "required":["GroupId"], + "members":{ + "GroupId":{ + "shape":"String", + "locationName":"groupId" + }, + "GroupName":{ + "shape":"String", + "locationName":"groupName" + }, + "Description":{ + "shape":"String", + "locationName":"description" + }, + "VpcId":{ + "shape":"String", + "locationName":"vpcId" + }, + "StaleIpPermissions":{ + "shape":"StaleIpPermissionSet", + "locationName":"staleIpPermissions" + }, + "StaleIpPermissionsEgress":{ + "shape":"StaleIpPermissionSet", + "locationName":"staleIpPermissionsEgress" + } + } + }, + "StaleSecurityGroupSet":{ + "type":"list", + "member":{ + "shape":"StaleSecurityGroup", + "locationName":"item" + } + }, + "StartInstancesRequest":{ + "type":"structure", + "required":["InstanceIds"], + "members":{ + "InstanceIds":{ + "shape":"InstanceIdStringList", + "locationName":"InstanceId" + }, + "AdditionalInfo":{ + "shape":"String", + "locationName":"additionalInfo" + }, + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + } + } + }, + "StartInstancesResult":{ + "type":"structure", + "members":{ + "StartingInstances":{ + "shape":"InstanceStateChangeList", + "locationName":"instancesSet" + } + } + }, + "State":{ + "type":"string", + "enum":[ + "Pending", + "Available", + "Deleting", + "Deleted" + ] + }, + "StateReason":{ + "type":"structure", + "members":{ + "Code":{ + "shape":"String", + "locationName":"code" + }, + "Message":{ + "shape":"String", + "locationName":"message" + } + } + }, + "Status":{ + "type":"string", + "enum":[ + "MoveInProgress", + "InVpc", + "InClassic" + ] + }, + "StatusName":{ + "type":"string", + "enum":["reachability"] + }, + "StatusType":{ + "type":"string", + "enum":[ + "passed", + "failed", + "insufficient-data", + "initializing" + ] + }, + "StopInstancesRequest":{ + "type":"structure", + "required":["InstanceIds"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "InstanceIds":{ + "shape":"InstanceIdStringList", + "locationName":"InstanceId" + }, + "Force":{ + "shape":"Boolean", + "locationName":"force" + } + } + }, + "StopInstancesResult":{ + "type":"structure", + "members":{ + "StoppingInstances":{ + "shape":"InstanceStateChangeList", + "locationName":"instancesSet" + } + } + }, + "Storage":{ + "type":"structure", + "members":{ + "S3":{"shape":"S3Storage"} + } + }, + "String":{"type":"string"}, + "Subnet":{ + "type":"structure", + "members":{ + "SubnetId":{ + "shape":"String", + "locationName":"subnetId" + }, + "State":{ + "shape":"SubnetState", + "locationName":"state" + }, + "VpcId":{ + "shape":"String", + "locationName":"vpcId" + }, + "CidrBlock":{ + "shape":"String", + "locationName":"cidrBlock" + }, + "AvailableIpAddressCount":{ + "shape":"Integer", + "locationName":"availableIpAddressCount" + }, + "AvailabilityZone":{ + "shape":"String", + "locationName":"availabilityZone" + }, + "DefaultForAz":{ + "shape":"Boolean", + "locationName":"defaultForAz" + }, + "MapPublicIpOnLaunch":{ + "shape":"Boolean", + "locationName":"mapPublicIpOnLaunch" + }, + "Tags":{ + "shape":"TagList", + "locationName":"tagSet" + } + } + }, + "SubnetIdStringList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"SubnetId" + } + }, + "SubnetList":{ + "type":"list", + "member":{ + "shape":"Subnet", + "locationName":"item" + } + }, + "SubnetState":{ + "type":"string", + "enum":[ + "pending", + "available" + ] + }, + "SummaryStatus":{ + "type":"string", + "enum":[ + "ok", + "impaired", + "insufficient-data", + "not-applicable", + "initializing" + ] + }, + "Tag":{ + "type":"structure", + "members":{ + "Key":{ + "shape":"String", + "locationName":"key" + }, + "Value":{ + "shape":"String", + "locationName":"value" + } + } + }, + "TagDescription":{ + "type":"structure", + "members":{ + "ResourceId":{ + "shape":"String", + "locationName":"resourceId" + }, + "ResourceType":{ + "shape":"ResourceType", + "locationName":"resourceType" + }, + "Key":{ + "shape":"String", + "locationName":"key" + }, + "Value":{ + "shape":"String", + "locationName":"value" + } + } + }, + "TagDescriptionList":{ + "type":"list", + "member":{ + "shape":"TagDescription", + "locationName":"item" + } + }, + "TagList":{ + "type":"list", + "member":{ + "shape":"Tag", + "locationName":"item" + } + }, + "TelemetryStatus":{ + "type":"string", + "enum":[ + "UP", + "DOWN" + ] + }, + "Tenancy":{ + "type":"string", + "enum":[ + "default", + "dedicated", + "host" + ] + }, + "TerminateInstancesRequest":{ + "type":"structure", + "required":["InstanceIds"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "InstanceIds":{ + "shape":"InstanceIdStringList", + "locationName":"InstanceId" + } + } + }, + "TerminateInstancesResult":{ + "type":"structure", + "members":{ + "TerminatingInstances":{ + "shape":"InstanceStateChangeList", + "locationName":"instancesSet" + } + } + }, + "TrafficType":{ + "type":"string", + "enum":[ + "ACCEPT", + "REJECT", + "ALL" + ] + }, + "UnassignPrivateIpAddressesRequest":{ + "type":"structure", + "required":[ + "NetworkInterfaceId", + "PrivateIpAddresses" + ], + "members":{ + "NetworkInterfaceId":{ + "shape":"String", + "locationName":"networkInterfaceId" + }, + "PrivateIpAddresses":{ + "shape":"PrivateIpAddressStringList", + "locationName":"privateIpAddress" + } + } + }, + "UnmonitorInstancesRequest":{ + "type":"structure", + "required":["InstanceIds"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "InstanceIds":{ + "shape":"InstanceIdStringList", + "locationName":"InstanceId" + } + } + }, + "UnmonitorInstancesResult":{ + "type":"structure", + "members":{ + "InstanceMonitorings":{ + "shape":"InstanceMonitoringList", + "locationName":"instancesSet" + } + } + }, + "UnsuccessfulItem":{ + "type":"structure", + "required":["Error"], + "members":{ + "Error":{ + "shape":"UnsuccessfulItemError", + "locationName":"error" + }, + "ResourceId":{ + "shape":"String", + "locationName":"resourceId" + } + } + }, + "UnsuccessfulItemError":{ + "type":"structure", + "required":[ + "Code", + "Message" + ], + "members":{ + "Code":{ + "shape":"String", + "locationName":"code" + }, + "Message":{ + "shape":"String", + "locationName":"message" + } + } + }, + "UnsuccessfulItemList":{ + "type":"list", + "member":{ + "shape":"UnsuccessfulItem", + "locationName":"item" + } + }, + "UnsuccessfulItemSet":{ + "type":"list", + "member":{ + "shape":"UnsuccessfulItem", + "locationName":"item" + } + }, + "UserBucket":{ + "type":"structure", + "members":{ + "S3Bucket":{"shape":"String"}, + "S3Key":{"shape":"String"} + } + }, + "UserBucketDetails":{ + "type":"structure", + "members":{ + "S3Bucket":{ + "shape":"String", + "locationName":"s3Bucket" + }, + "S3Key":{ + "shape":"String", + "locationName":"s3Key" + } + } + }, + "UserData":{ + "type":"structure", + "members":{ + "Data":{ + "shape":"String", + "locationName":"data" + } + } + }, + "UserGroupStringList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"UserGroup" + } + }, + "UserIdGroupPair":{ + "type":"structure", + "members":{ + "UserId":{ + "shape":"String", + "locationName":"userId" + }, + "GroupName":{ + "shape":"String", + "locationName":"groupName" + }, + "GroupId":{ + "shape":"String", + "locationName":"groupId" + }, + "VpcId":{ + "shape":"String", + "locationName":"vpcId" + }, + "VpcPeeringConnectionId":{ + "shape":"String", + "locationName":"vpcPeeringConnectionId" + }, + "PeeringStatus":{ + "shape":"String", + "locationName":"peeringStatus" + } + } + }, + "UserIdGroupPairList":{ + "type":"list", + "member":{ + "shape":"UserIdGroupPair", + "locationName":"item" + } + }, + "UserIdGroupPairSet":{ + "type":"list", + "member":{ + "shape":"UserIdGroupPair", + "locationName":"item" + } + }, + "UserIdStringList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"UserId" + } + }, + "ValueStringList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"item" + } + }, + "VgwTelemetry":{ + "type":"structure", + "members":{ + "OutsideIpAddress":{ + "shape":"String", + "locationName":"outsideIpAddress" + }, + "Status":{ + "shape":"TelemetryStatus", + "locationName":"status" + }, + "LastStatusChange":{ + "shape":"DateTime", + "locationName":"lastStatusChange" + }, + "StatusMessage":{ + "shape":"String", + "locationName":"statusMessage" + }, + "AcceptedRouteCount":{ + "shape":"Integer", + "locationName":"acceptedRouteCount" + } + } + }, + "VgwTelemetryList":{ + "type":"list", + "member":{ + "shape":"VgwTelemetry", + "locationName":"item" + } + }, + "VirtualizationType":{ + "type":"string", + "enum":[ + "hvm", + "paravirtual" + ] + }, + "Volume":{ + "type":"structure", + "members":{ + "VolumeId":{ + "shape":"String", + "locationName":"volumeId" + }, + "Size":{ + "shape":"Integer", + "locationName":"size" + }, + "SnapshotId":{ + "shape":"String", + "locationName":"snapshotId" + }, + "AvailabilityZone":{ + "shape":"String", + "locationName":"availabilityZone" + }, + "State":{ + "shape":"VolumeState", + "locationName":"status" + }, + "CreateTime":{ + "shape":"DateTime", + "locationName":"createTime" + }, + "Attachments":{ + "shape":"VolumeAttachmentList", + "locationName":"attachmentSet" + }, + "Tags":{ + "shape":"TagList", + "locationName":"tagSet" + }, + "VolumeType":{ + "shape":"VolumeType", + "locationName":"volumeType" + }, + "Iops":{ + "shape":"Integer", + "locationName":"iops" + }, + "Encrypted":{ + "shape":"Boolean", + "locationName":"encrypted" + }, + "KmsKeyId":{ + "shape":"String", + "locationName":"kmsKeyId" + } + } + }, + "VolumeAttachment":{ + "type":"structure", + "members":{ + "VolumeId":{ + "shape":"String", + "locationName":"volumeId" + }, + "InstanceId":{ + "shape":"String", + "locationName":"instanceId" + }, + "Device":{ + "shape":"String", + "locationName":"device" + }, + "State":{ + "shape":"VolumeAttachmentState", + "locationName":"status" + }, + "AttachTime":{ + "shape":"DateTime", + "locationName":"attachTime" + }, + "DeleteOnTermination":{ + "shape":"Boolean", + "locationName":"deleteOnTermination" + } + } + }, + "VolumeAttachmentList":{ + "type":"list", + "member":{ + "shape":"VolumeAttachment", + "locationName":"item" + } + }, + "VolumeAttachmentState":{ + "type":"string", + "enum":[ + "attaching", + "attached", + "detaching", + "detached" + ] + }, + "VolumeAttributeName":{ + "type":"string", + "enum":[ + "autoEnableIO", + "productCodes" + ] + }, + "VolumeDetail":{ + "type":"structure", + "required":["Size"], + "members":{ + "Size":{ + "shape":"Long", + "locationName":"size" + } + } + }, + "VolumeIdStringList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"VolumeId" + } + }, + "VolumeList":{ + "type":"list", + "member":{ + "shape":"Volume", + "locationName":"item" + } + }, + "VolumeState":{ + "type":"string", + "enum":[ + "creating", + "available", + "in-use", + "deleting", + "deleted", + "error" + ] + }, + "VolumeStatusAction":{ + "type":"structure", + "members":{ + "Code":{ + "shape":"String", + "locationName":"code" + }, + "Description":{ + "shape":"String", + "locationName":"description" + }, + "EventType":{ + "shape":"String", + "locationName":"eventType" + }, + "EventId":{ + "shape":"String", + "locationName":"eventId" + } + } + }, + "VolumeStatusActionsList":{ + "type":"list", + "member":{ + "shape":"VolumeStatusAction", + "locationName":"item" + } + }, + "VolumeStatusDetails":{ + "type":"structure", + "members":{ + "Name":{ + "shape":"VolumeStatusName", + "locationName":"name" + }, + "Status":{ + "shape":"String", + "locationName":"status" + } + } + }, + "VolumeStatusDetailsList":{ + "type":"list", + "member":{ + "shape":"VolumeStatusDetails", + "locationName":"item" + } + }, + "VolumeStatusEvent":{ + "type":"structure", + "members":{ + "EventType":{ + "shape":"String", + "locationName":"eventType" + }, + "Description":{ + "shape":"String", + "locationName":"description" + }, + "NotBefore":{ + "shape":"DateTime", + "locationName":"notBefore" + }, + "NotAfter":{ + "shape":"DateTime", + "locationName":"notAfter" + }, + "EventId":{ + "shape":"String", + "locationName":"eventId" + } + } + }, + "VolumeStatusEventsList":{ + "type":"list", + "member":{ + "shape":"VolumeStatusEvent", + "locationName":"item" + } + }, + "VolumeStatusInfo":{ + "type":"structure", + "members":{ + "Status":{ + "shape":"VolumeStatusInfoStatus", + "locationName":"status" + }, + "Details":{ + "shape":"VolumeStatusDetailsList", + "locationName":"details" + } + } + }, + "VolumeStatusInfoStatus":{ + "type":"string", + "enum":[ + "ok", + "impaired", + "insufficient-data" + ] + }, + "VolumeStatusItem":{ + "type":"structure", + "members":{ + "VolumeId":{ + "shape":"String", + "locationName":"volumeId" + }, + "AvailabilityZone":{ + "shape":"String", + "locationName":"availabilityZone" + }, + "VolumeStatus":{ + "shape":"VolumeStatusInfo", + "locationName":"volumeStatus" + }, + "Events":{ + "shape":"VolumeStatusEventsList", + "locationName":"eventsSet" + }, + "Actions":{ + "shape":"VolumeStatusActionsList", + "locationName":"actionsSet" + } + } + }, + "VolumeStatusList":{ + "type":"list", + "member":{ + "shape":"VolumeStatusItem", + "locationName":"item" + } + }, + "VolumeStatusName":{ + "type":"string", + "enum":[ + "io-enabled", + "io-performance" + ] + }, + "VolumeType":{ + "type":"string", + "enum":[ + "standard", + "io1", + "gp2", + "sc1", + "st1" + ] + }, + "Vpc":{ + "type":"structure", + "members":{ + "VpcId":{ + "shape":"String", + "locationName":"vpcId" + }, + "State":{ + "shape":"VpcState", + "locationName":"state" + }, + "CidrBlock":{ + "shape":"String", + "locationName":"cidrBlock" + }, + "DhcpOptionsId":{ + "shape":"String", + "locationName":"dhcpOptionsId" + }, + "Tags":{ + "shape":"TagList", + "locationName":"tagSet" + }, + "InstanceTenancy":{ + "shape":"Tenancy", + "locationName":"instanceTenancy" + }, + "IsDefault":{ + "shape":"Boolean", + "locationName":"isDefault" + } + } + }, + "VpcAttachment":{ + "type":"structure", + "members":{ + "VpcId":{ + "shape":"String", + "locationName":"vpcId" + }, + "State":{ + "shape":"AttachmentStatus", + "locationName":"state" + } + } + }, + "VpcAttachmentList":{ + "type":"list", + "member":{ + "shape":"VpcAttachment", + "locationName":"item" + } + }, + "VpcAttributeName":{ + "type":"string", + "enum":[ + "enableDnsSupport", + "enableDnsHostnames" + ] + }, + "VpcClassicLink":{ + "type":"structure", + "members":{ + "VpcId":{ + "shape":"String", + "locationName":"vpcId" + }, + "ClassicLinkEnabled":{ + "shape":"Boolean", + "locationName":"classicLinkEnabled" + }, + "Tags":{ + "shape":"TagList", + "locationName":"tagSet" + } + } + }, + "VpcClassicLinkIdList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"VpcId" + } + }, + "VpcClassicLinkList":{ + "type":"list", + "member":{ + "shape":"VpcClassicLink", + "locationName":"item" + } + }, + "VpcEndpoint":{ + "type":"structure", + "members":{ + "VpcEndpointId":{ + "shape":"String", + "locationName":"vpcEndpointId" + }, + "VpcId":{ + "shape":"String", + "locationName":"vpcId" + }, + "ServiceName":{ + "shape":"String", + "locationName":"serviceName" + }, + "State":{ + "shape":"State", + "locationName":"state" + }, + "PolicyDocument":{ + "shape":"String", + "locationName":"policyDocument" + }, + "RouteTableIds":{ + "shape":"ValueStringList", + "locationName":"routeTableIdSet" + }, + "CreationTimestamp":{ + "shape":"DateTime", + "locationName":"creationTimestamp" + } + } + }, + "VpcEndpointSet":{ + "type":"list", + "member":{ + "shape":"VpcEndpoint", + "locationName":"item" + } + }, + "VpcIdStringList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"VpcId" + } + }, + "VpcList":{ + "type":"list", + "member":{ + "shape":"Vpc", + "locationName":"item" + } + }, + "VpcPeeringConnection":{ + "type":"structure", + "members":{ + "AccepterVpcInfo":{ + "shape":"VpcPeeringConnectionVpcInfo", + "locationName":"accepterVpcInfo" + }, + "ExpirationTime":{ + "shape":"DateTime", + "locationName":"expirationTime" + }, + "RequesterVpcInfo":{ + "shape":"VpcPeeringConnectionVpcInfo", + "locationName":"requesterVpcInfo" + }, + "Status":{ + "shape":"VpcPeeringConnectionStateReason", + "locationName":"status" + }, + "Tags":{ + "shape":"TagList", + "locationName":"tagSet" + }, + "VpcPeeringConnectionId":{ + "shape":"String", + "locationName":"vpcPeeringConnectionId" + } + } + }, + "VpcPeeringConnectionList":{ + "type":"list", + "member":{ + "shape":"VpcPeeringConnection", + "locationName":"item" + } + }, + "VpcPeeringConnectionOptionsDescription":{ + "type":"structure", + "members":{ + "AllowEgressFromLocalClassicLinkToRemoteVpc":{ + "shape":"Boolean", + "locationName":"allowEgressFromLocalClassicLinkToRemoteVpc" + }, + "AllowEgressFromLocalVpcToRemoteClassicLink":{ + "shape":"Boolean", + "locationName":"allowEgressFromLocalVpcToRemoteClassicLink" + } + } + }, + "VpcPeeringConnectionStateReason":{ + "type":"structure", + "members":{ + "Code":{ + "shape":"VpcPeeringConnectionStateReasonCode", + "locationName":"code" + }, + "Message":{ + "shape":"String", + "locationName":"message" + } + } + }, + "VpcPeeringConnectionStateReasonCode":{ + "type":"string", + "enum":[ + "initiating-request", + "pending-acceptance", + "active", + "deleted", + "rejected", + "failed", + "expired", + "provisioning", + "deleting" + ] + }, + "VpcPeeringConnectionVpcInfo":{ + "type":"structure", + "members":{ + "CidrBlock":{ + "shape":"String", + "locationName":"cidrBlock" + }, + "OwnerId":{ + "shape":"String", + "locationName":"ownerId" + }, + "VpcId":{ + "shape":"String", + "locationName":"vpcId" + }, + "PeeringOptions":{ + "shape":"VpcPeeringConnectionOptionsDescription", + "locationName":"peeringOptions" + } + } + }, + "VpcState":{ + "type":"string", + "enum":[ + "pending", + "available" + ] + }, + "VpnConnection":{ + "type":"structure", + "members":{ + "VpnConnectionId":{ + "shape":"String", + "locationName":"vpnConnectionId" + }, + "State":{ + "shape":"VpnState", + "locationName":"state" + }, + "CustomerGatewayConfiguration":{ + "shape":"String", + "locationName":"customerGatewayConfiguration" + }, + "Type":{ + "shape":"GatewayType", + "locationName":"type" + }, + "CustomerGatewayId":{ + "shape":"String", + "locationName":"customerGatewayId" + }, + "VpnGatewayId":{ + "shape":"String", + "locationName":"vpnGatewayId" + }, + "Tags":{ + "shape":"TagList", + "locationName":"tagSet" + }, + "VgwTelemetry":{ + "shape":"VgwTelemetryList", + "locationName":"vgwTelemetry" + }, + "Options":{ + "shape":"VpnConnectionOptions", + "locationName":"options" + }, + "Routes":{ + "shape":"VpnStaticRouteList", + "locationName":"routes" + } + } + }, + "VpnConnectionIdStringList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"VpnConnectionId" + } + }, + "VpnConnectionList":{ + "type":"list", + "member":{ + "shape":"VpnConnection", + "locationName":"item" + } + }, + "VpnConnectionOptions":{ + "type":"structure", + "members":{ + "StaticRoutesOnly":{ + "shape":"Boolean", + "locationName":"staticRoutesOnly" + } + } + }, + "VpnConnectionOptionsSpecification":{ + "type":"structure", + "members":{ + "StaticRoutesOnly":{ + "shape":"Boolean", + "locationName":"staticRoutesOnly" + } + } + }, + "VpnGateway":{ + "type":"structure", + "members":{ + "VpnGatewayId":{ + "shape":"String", + "locationName":"vpnGatewayId" + }, + "State":{ + "shape":"VpnState", + "locationName":"state" + }, + "Type":{ + "shape":"GatewayType", + "locationName":"type" + }, + "AvailabilityZone":{ + "shape":"String", + "locationName":"availabilityZone" + }, + "VpcAttachments":{ + "shape":"VpcAttachmentList", + "locationName":"attachments" + }, + "Tags":{ + "shape":"TagList", + "locationName":"tagSet" + } + } + }, + "VpnGatewayIdStringList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"VpnGatewayId" + } + }, + "VpnGatewayList":{ + "type":"list", + "member":{ + "shape":"VpnGateway", + "locationName":"item" + } + }, + "VpnState":{ + "type":"string", + "enum":[ + "pending", + "available", + "deleting", + "deleted" + ] + }, + "VpnStaticRoute":{ + "type":"structure", + "members":{ + "DestinationCidrBlock":{ + "shape":"String", + "locationName":"destinationCidrBlock" + }, + "Source":{ + "shape":"VpnStaticRouteSource", + "locationName":"source" + }, + "State":{ + "shape":"VpnState", + "locationName":"state" + } + } + }, + "VpnStaticRouteList":{ + "type":"list", + "member":{ + "shape":"VpnStaticRoute", + "locationName":"item" + } + }, + "VpnStaticRouteSource":{ + "type":"string", + "enum":["Static"] + }, + "ZoneNameStringList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"ZoneName" + } + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/ec2/2015-10-01/docs-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/ec2/2015-10-01/docs-2.json new file mode 100644 index 000000000..8d9b760b9 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/ec2/2015-10-01/docs-2.json @@ -0,0 +1,6382 @@ +{ + "version": "2.0", + "service": "Amazon Elastic Compute Cloud

    Amazon Elastic Compute Cloud (Amazon EC2) provides resizable computing capacity in the Amazon Web Services (AWS) cloud. Using Amazon EC2 eliminates your need to invest in hardware up front, so you can develop and deploy applications faster.

    ", + "operations": { + "AcceptVpcPeeringConnection": "

    Accept a VPC peering connection request. To accept a request, the VPC peering connection must be in the pending-acceptance state, and you must be the owner of the peer VPC. Use the DescribeVpcPeeringConnections request to view your outstanding VPC peering connection requests.

    ", + "AllocateAddress": "

    Acquires an Elastic IP address.

    An Elastic IP address is for use either in the EC2-Classic platform or in a VPC. For more information, see Elastic IP Addresses in the Amazon Elastic Compute Cloud User Guide.

    ", + "AllocateHosts": "

    Allocates a Dedicated host to your account. At minimum you need to specify the instance size type, Availability Zone, and quantity of hosts you want to allocate.

    ", + "AssignPrivateIpAddresses": "

    Assigns one or more secondary private IP addresses to the specified network interface. You can specify one or more specific secondary IP addresses, or you can specify the number of secondary IP addresses to be automatically assigned within the subnet's CIDR block range. The number of secondary IP addresses that you can assign to an instance varies by instance type. For information about instance types, see Instance Types in the Amazon Elastic Compute Cloud User Guide. For more information about Elastic IP addresses, see Elastic IP Addresses in the Amazon Elastic Compute Cloud User Guide.

    AssignPrivateIpAddresses is available only in EC2-VPC.

    ", + "AssociateAddress": "

    Associates an Elastic IP address with an instance or a network interface.

    An Elastic IP address is for use in either the EC2-Classic platform or in a VPC. For more information, see Elastic IP Addresses in the Amazon Elastic Compute Cloud User Guide.

    [EC2-Classic, VPC in an EC2-VPC-only account] If the Elastic IP address is already associated with a different instance, it is disassociated from that instance and associated with the specified instance.

    [VPC in an EC2-Classic account] If you don't specify a private IP address, the Elastic IP address is associated with the primary IP address. If the Elastic IP address is already associated with a different instance or a network interface, you get an error unless you allow reassociation.

    This is an idempotent operation. If you perform the operation more than once, Amazon EC2 doesn't return an error.

    ", + "AssociateDhcpOptions": "

    Associates a set of DHCP options (that you've previously created) with the specified VPC, or associates no DHCP options with the VPC.

    After you associate the options with the VPC, any existing instances and all new instances that you launch in that VPC use the options. You don't need to restart or relaunch the instances. They automatically pick up the changes within a few hours, depending on how frequently the instance renews its DHCP lease. You can explicitly renew the lease using the operating system on the instance.

    For more information, see DHCP Options Sets in the Amazon Virtual Private Cloud User Guide.

    ", + "AssociateRouteTable": "

    Associates a subnet with a route table. The subnet and route table must be in the same VPC. This association causes traffic originating from the subnet to be routed according to the routes in the route table. The action returns an association ID, which you need in order to disassociate the route table from the subnet later. A route table can be associated with multiple subnets.

    For more information about route tables, see Route Tables in the Amazon Virtual Private Cloud User Guide.

    ", + "AttachClassicLinkVpc": "

    Links an EC2-Classic instance to a ClassicLink-enabled VPC through one or more of the VPC's security groups. You cannot link an EC2-Classic instance to more than one VPC at a time. You can only link an instance that's in the running state. An instance is automatically unlinked from a VPC when it's stopped - you can link it to the VPC again when you restart it.

    After you've linked an instance, you cannot change the VPC security groups that are associated with it. To change the security groups, you must first unlink the instance, and then link it again.

    Linking your instance to a VPC is sometimes referred to as attaching your instance.

    ", + "AttachInternetGateway": "

    Attaches an Internet gateway to a VPC, enabling connectivity between the Internet and the VPC. For more information about your VPC and Internet gateway, see the Amazon Virtual Private Cloud User Guide.

    ", + "AttachNetworkInterface": "

    Attaches a network interface to an instance.

    ", + "AttachVolume": "

    Attaches an EBS volume to a running or stopped instance and exposes it to the instance with the specified device name.

    Encrypted EBS volumes may only be attached to instances that support Amazon EBS encryption. For more information, see Amazon EBS Encryption in the Amazon Elastic Compute Cloud User Guide.

    For a list of supported device names, see Attaching an EBS Volume to an Instance. Any device names that aren't reserved for instance store volumes can be used for EBS volumes. For more information, see Amazon EC2 Instance Store in the Amazon Elastic Compute Cloud User Guide.

    If a volume has an AWS Marketplace product code:

    • The volume can be attached only to a stopped instance.

    • AWS Marketplace product codes are copied from the volume to the instance.

    • You must be subscribed to the product.

    • The instance type and operating system of the instance must support the product. For example, you can't detach a volume from a Windows instance and attach it to a Linux instance.

    For an overview of the AWS Marketplace, see Introducing AWS Marketplace.

    For more information about EBS volumes, see Attaching Amazon EBS Volumes in the Amazon Elastic Compute Cloud User Guide.

    ", + "AttachVpnGateway": "

    Attaches a virtual private gateway to a VPC. For more information, see Adding a Hardware Virtual Private Gateway to Your VPC in the Amazon Virtual Private Cloud User Guide.

    ", + "AuthorizeSecurityGroupEgress": "

    [EC2-VPC only] Adds one or more egress rules to a security group for use with a VPC. Specifically, this action permits instances to send traffic to one or more destination CIDR IP address ranges, or to one or more destination security groups for the same VPC. This action doesn't apply to security groups for use in EC2-Classic. For more information, see Security Groups for Your VPC in the Amazon Virtual Private Cloud User Guide.

    You can have up to 50 rules per security group (covering both ingress and egress rules).

    Each rule consists of the protocol (for example, TCP), plus either a CIDR range or a source group. For the TCP and UDP protocols, you must also specify the destination port or port range. For the ICMP protocol, you must also specify the ICMP type and code. You can use -1 for the type or code to mean all types or all codes.

    Rule changes are propagated to affected instances as quickly as possible. However, a small delay might occur.

    ", + "AuthorizeSecurityGroupIngress": "

    Adds one or more ingress rules to a security group.

    EC2-Classic: You can have up to 100 rules per group.

    EC2-VPC: You can have up to 50 rules per group (covering both ingress and egress rules).

    Rule changes are propagated to instances within the security group as quickly as possible. However, a small delay might occur.

    [EC2-Classic] This action gives one or more CIDR IP address ranges permission to access a security group in your account, or gives one or more security groups (called the source groups) permission to access a security group for your account. A source group can be for your own AWS account, or another.

    [EC2-VPC] This action gives one or more CIDR IP address ranges permission to access a security group in your VPC, or gives one or more other security groups (called the source groups) permission to access a security group for your VPC. The security groups must all be for the same VPC.

    ", + "BundleInstance": "

    Bundles an Amazon instance store-backed Windows instance.

    During bundling, only the root device volume (C:\\) is bundled. Data on other instance store volumes is not preserved.

    This action is not applicable for Linux/Unix instances or Windows instances that are backed by Amazon EBS.

    For more information, see Creating an Instance Store-Backed Windows AMI.

    ", + "CancelBundleTask": "

    Cancels a bundling operation for an instance store-backed Windows instance.

    ", + "CancelConversionTask": "

    Cancels an active conversion task. The task can be the import of an instance or volume. The action removes all artifacts of the conversion, including a partially uploaded volume or instance. If the conversion is complete or is in the process of transferring the final disk image, the command fails and returns an exception.

    For more information, see Using the Command Line Tools to Import Your Virtual Machine to Amazon EC2 in the Amazon Elastic Compute Cloud User Guide.

    ", + "CancelExportTask": "

    Cancels an active export task. The request removes all artifacts of the export, including any partially-created Amazon S3 objects. If the export task is complete or is in the process of transferring the final disk image, the command fails and returns an error.

    ", + "CancelImportTask": "

    Cancels an in-process import virtual machine or import snapshot task.

    ", + "CancelReservedInstancesListing": "

    Cancels the specified Reserved Instance listing in the Reserved Instance Marketplace.

    For more information, see Reserved Instance Marketplace in the Amazon Elastic Compute Cloud User Guide.

    ", + "CancelSpotFleetRequests": "

    Cancels the specified Spot fleet requests.

    After you cancel a Spot fleet request, the Spot fleet launches no new Spot instances. You must specify whether the Spot fleet should also terminate its Spot instances. If you terminate the instances, the Spot fleet request enters the cancelled_terminating state. Otherwise, the Spot fleet request enters the cancelled_running state and the instances continue to run until they are interrupted or you terminate them manually.

    ", + "CancelSpotInstanceRequests": "

    Cancels one or more Spot instance requests. Spot instances are instances that Amazon EC2 starts on your behalf when the bid price that you specify exceeds the current Spot price. Amazon EC2 periodically sets the Spot price based on available Spot instance capacity and current Spot instance requests. For more information, see Spot Instance Requests in the Amazon Elastic Compute Cloud User Guide.

    Canceling a Spot instance request does not terminate running Spot instances associated with the request.

    ", + "ConfirmProductInstance": "

    Determines whether a product code is associated with an instance. This action can only be used by the owner of the product code. It is useful when a product code owner needs to verify whether another user's instance is eligible for support.

    ", + "CopyImage": "

    Initiates the copy of an AMI from the specified source region to the current region. You specify the destination region by using its endpoint when making the request.

    For more information, see Copying AMIs in the Amazon Elastic Compute Cloud User Guide.

    ", + "CopySnapshot": "

    Copies a point-in-time snapshot of an EBS volume and stores it in Amazon S3. You can copy the snapshot within the same region or from one region to another. You can use the snapshot to create EBS volumes or Amazon Machine Images (AMIs). The snapshot is copied to the regional endpoint that you send the HTTP request to.

    Copies of encrypted EBS snapshots remain encrypted. Copies of unencrypted snapshots remain unencrypted, unless the Encrypted flag is specified during the snapshot copy operation. By default, encrypted snapshot copies use the default AWS Key Management Service (AWS KMS) customer master key (CMK); however, you can specify a non-default CMK with the KmsKeyId parameter.

    For more information, see Copying an Amazon EBS Snapshot in the Amazon Elastic Compute Cloud User Guide.

    ", + "CreateCustomerGateway": "

    Provides information to AWS about your VPN customer gateway device. The customer gateway is the appliance at your end of the VPN connection. (The device on the AWS side of the VPN connection is the virtual private gateway.) You must provide the Internet-routable IP address of the customer gateway's external interface. The IP address must be static and may be behind a device performing network address translation (NAT).

    For devices that use Border Gateway Protocol (BGP), you can also provide the device's BGP Autonomous System Number (ASN). You can use an existing ASN assigned to your network. If you don't have an ASN already, you can use a private ASN (in the 64512 - 65534 range).

    Amazon EC2 supports all 2-byte ASN numbers in the range of 1 - 65534, with the exception of 7224, which is reserved in the us-east-1 region, and 9059, which is reserved in the eu-west-1 region.

    For more information about VPN customer gateways, see Adding a Hardware Virtual Private Gateway to Your VPC in the Amazon Virtual Private Cloud User Guide.

    You cannot create more than one customer gateway with the same VPN type, IP address, and BGP ASN parameter values. If you run an identical request more than one time, the first request creates the customer gateway, and subsequent requests return information about the existing customer gateway. The subsequent requests do not create new customer gateway resources.

    ", + "CreateDhcpOptions": "

    Creates a set of DHCP options for your VPC. After creating the set, you must associate it with the VPC, causing all existing and new instances that you launch in the VPC to use this set of DHCP options. The following are the individual DHCP options you can specify. For more information about the options, see RFC 2132.

    • domain-name-servers - The IP addresses of up to four domain name servers, or AmazonProvidedDNS. The default DHCP option set specifies AmazonProvidedDNS. If specifying more than one domain name server, specify the IP addresses in a single parameter, separated by commas.

    • domain-name - If you're using AmazonProvidedDNS in \"us-east-1\", specify \"ec2.internal\". If you're using AmazonProvidedDNS in another region, specify \"region.compute.internal\" (for example, \"ap-northeast-1.compute.internal\"). Otherwise, specify a domain name (for example, \"MyCompany.com\"). Important: Some Linux operating systems accept multiple domain names separated by spaces. However, Windows and other Linux operating systems treat the value as a single domain, which results in unexpected behavior. If your DHCP options set is associated with a VPC that has instances with multiple operating systems, specify only one domain name.

    • ntp-servers - The IP addresses of up to four Network Time Protocol (NTP) servers.

    • netbios-name-servers - The IP addresses of up to four NetBIOS name servers.

    • netbios-node-type - The NetBIOS node type (1, 2, 4, or 8). We recommend that you specify 2 (broadcast and multicast are not currently supported). For more information about these node types, see RFC 2132.

    Your VPC automatically starts out with a set of DHCP options that includes only a DNS server that we provide (AmazonProvidedDNS). If you create a set of options, and if your VPC has an Internet gateway, make sure to set the domain-name-servers option either to AmazonProvidedDNS or to a domain name server of your choice. For more information about DHCP options, see DHCP Options Sets in the Amazon Virtual Private Cloud User Guide.

    ", + "CreateFlowLogs": "

    Creates one or more flow logs to capture IP traffic for a specific network interface, subnet, or VPC. Flow logs are delivered to a specified log group in Amazon CloudWatch Logs. If you specify a VPC or subnet in the request, a log stream is created in CloudWatch Logs for each network interface in the subnet or VPC. Log streams can include information about accepted and rejected traffic to a network interface. You can view the data in your log streams using Amazon CloudWatch Logs.

    In your request, you must also specify an IAM role that has permission to publish logs to CloudWatch Logs.

    ", + "CreateImage": "

    Creates an Amazon EBS-backed AMI from an Amazon EBS-backed instance that is either running or stopped.

    If you customized your instance with instance store volumes or EBS volumes in addition to the root device volume, the new AMI contains block device mapping information for those volumes. When you launch an instance from this new AMI, the instance automatically launches with those additional volumes.

    For more information, see Creating Amazon EBS-Backed Linux AMIs in the Amazon Elastic Compute Cloud User Guide.

    ", + "CreateInstanceExportTask": "

    Exports a running or stopped instance to an S3 bucket.

    For information about the supported operating systems, image formats, and known limitations for the types of instances you can export, see Exporting EC2 Instances in the Amazon Elastic Compute Cloud User Guide.

    ", + "CreateInternetGateway": "

    Creates an Internet gateway for use with a VPC. After creating the Internet gateway, you attach it to a VPC using AttachInternetGateway.

    For more information about your VPC and Internet gateway, see the Amazon Virtual Private Cloud User Guide.

    ", + "CreateKeyPair": "

    Creates a 2048-bit RSA key pair with the specified name. Amazon EC2 stores the public key and displays the private key for you to save to a file. The private key is returned as an unencrypted PEM encoded PKCS#8 private key. If a key with the specified name already exists, Amazon EC2 returns an error.

    You can have up to five thousand key pairs per region.

    The key pair returned to you is available only in the region in which you create it. To create a key pair that is available in all regions, use ImportKeyPair.

    For more information about key pairs, see Key Pairs in the Amazon Elastic Compute Cloud User Guide.

    ", + "CreateNatGateway": "

    Creates a NAT gateway in the specified subnet. A NAT gateway can be used to enable instances in a private subnet to connect to the Internet. This action creates a network interface in the specified subnet with a private IP address from the IP address range of the subnet. For more information, see NAT Gateways in the Amazon Virtual Private Cloud User Guide.

    ", + "CreateNetworkAcl": "

    Creates a network ACL in a VPC. Network ACLs provide an optional layer of security (in addition to security groups) for the instances in your VPC.

    For more information about network ACLs, see Network ACLs in the Amazon Virtual Private Cloud User Guide.

    ", + "CreateNetworkAclEntry": "

    Creates an entry (a rule) in a network ACL with the specified rule number. Each network ACL has a set of numbered ingress rules and a separate set of numbered egress rules. When determining whether a packet should be allowed in or out of a subnet associated with the ACL, we process the entries in the ACL according to the rule numbers, in ascending order. Each network ACL has a set of ingress rules and a separate set of egress rules.

    We recommend that you leave room between the rule numbers (for example, 100, 110, 120, ...), and not number them one right after the other (for example, 101, 102, 103, ...). This makes it easier to add a rule between existing ones without having to renumber the rules.

    After you add an entry, you can't modify it; you must either replace it, or create an entry and delete the old one.

    For more information about network ACLs, see Network ACLs in the Amazon Virtual Private Cloud User Guide.

    ", + "CreateNetworkInterface": "

    Creates a network interface in the specified subnet.

    For more information about network interfaces, see Elastic Network Interfaces in the Amazon Elastic Compute Cloud User Guide.

    ", + "CreatePlacementGroup": "

    Creates a placement group that you launch cluster instances into. You must give the group a name that's unique within the scope of your account.

    For more information about placement groups and cluster instances, see Cluster Instances in the Amazon Elastic Compute Cloud User Guide.

    ", + "CreateReservedInstancesListing": "

    Creates a listing for Amazon EC2 Reserved Instances to be sold in the Reserved Instance Marketplace. You can submit one Reserved Instance listing at a time. To get a list of your Reserved Instances, you can use the DescribeReservedInstances operation.

    The Reserved Instance Marketplace matches sellers who want to resell Reserved Instance capacity that they no longer need with buyers who want to purchase additional capacity. Reserved Instances bought and sold through the Reserved Instance Marketplace work like any other Reserved Instances.

    To sell your Reserved Instances, you must first register as a seller in the Reserved Instance Marketplace. After completing the registration process, you can create a Reserved Instance Marketplace listing of some or all of your Reserved Instances, and specify the upfront price to receive for them. Your Reserved Instance listings then become available for purchase. To view the details of your Reserved Instance listing, you can use the DescribeReservedInstancesListings operation.

    For more information, see Reserved Instance Marketplace in the Amazon Elastic Compute Cloud User Guide.

    ", + "CreateRoute": "

    Creates a route in a route table within a VPC.

    You must specify one of the following targets: Internet gateway or virtual private gateway, NAT instance, NAT gateway, VPC peering connection, or network interface.

    When determining how to route traffic, we use the route with the most specific match. For example, let's say the traffic is destined for 192.0.2.3, and the route table includes the following two routes:

    • 192.0.2.0/24 (goes to some target A)

    • 192.0.2.0/28 (goes to some target B)

    Both routes apply to the traffic destined for 192.0.2.3. However, the second route in the list covers a smaller number of IP addresses and is therefore more specific, so we use that route to determine where to target the traffic.

    For more information about route tables, see Route Tables in the Amazon Virtual Private Cloud User Guide.

    ", + "CreateRouteTable": "

    Creates a route table for the specified VPC. After you create a route table, you can add routes and associate the table with a subnet.

    For more information about route tables, see Route Tables in the Amazon Virtual Private Cloud User Guide.

    ", + "CreateSecurityGroup": "

    Creates a security group.

    A security group is for use with instances either in the EC2-Classic platform or in a specific VPC. For more information, see Amazon EC2 Security Groups in the Amazon Elastic Compute Cloud User Guide and Security Groups for Your VPC in the Amazon Virtual Private Cloud User Guide.

    EC2-Classic: You can have up to 500 security groups.

    EC2-VPC: You can create up to 500 security groups per VPC.

    When you create a security group, you specify a friendly name of your choice. You can have a security group for use in EC2-Classic with the same name as a security group for use in a VPC. However, you can't have two security groups for use in EC2-Classic with the same name or two security groups for use in a VPC with the same name.

    You have a default security group for use in EC2-Classic and a default security group for use in your VPC. If you don't specify a security group when you launch an instance, the instance is launched into the appropriate default security group. A default security group includes a default rule that grants instances unrestricted network access to each other.

    You can add or remove rules from your security groups using AuthorizeSecurityGroupIngress, AuthorizeSecurityGroupEgress, RevokeSecurityGroupIngress, and RevokeSecurityGroupEgress.

    ", + "CreateSnapshot": "

    Creates a snapshot of an EBS volume and stores it in Amazon S3. You can use snapshots for backups, to make copies of EBS volumes, and to save data before shutting down an instance.

    When a snapshot is created, any AWS Marketplace product codes that are associated with the source volume are propagated to the snapshot.

    You can take a snapshot of an attached volume that is in use. However, snapshots only capture data that has been written to your EBS volume at the time the snapshot command is issued; this may exclude any data that has been cached by any applications or the operating system. If you can pause any file systems on the volume long enough to take a snapshot, your snapshot should be complete. However, if you cannot pause all file writes to the volume, you should unmount the volume from within the instance, issue the snapshot command, and then remount the volume to ensure a consistent and complete snapshot. You may remount and use your volume while the snapshot status is pending.

    To create a snapshot for EBS volumes that serve as root devices, you should stop the instance before taking the snapshot.

    Snapshots that are taken from encrypted volumes are automatically encrypted. Volumes that are created from encrypted snapshots are also automatically encrypted. Your encrypted volumes and any associated snapshots always remain protected.

    For more information, see Amazon Elastic Block Store and Amazon EBS Encryption in the Amazon Elastic Compute Cloud User Guide.

    ", + "CreateSpotDatafeedSubscription": "

    Creates a data feed for Spot instances, enabling you to view Spot instance usage logs. You can create one data feed per AWS account. For more information, see Spot Instance Data Feed in the Amazon Elastic Compute Cloud User Guide.

    ", + "CreateSubnet": "

    Creates a subnet in an existing VPC.

    When you create each subnet, you provide the VPC ID and the CIDR block you want for the subnet. After you create a subnet, you can't change its CIDR block. The subnet's CIDR block can be the same as the VPC's CIDR block (assuming you want only a single subnet in the VPC), or a subset of the VPC's CIDR block. If you create more than one subnet in a VPC, the subnets' CIDR blocks must not overlap. The smallest subnet (and VPC) you can create uses a /28 netmask (16 IP addresses), and the largest uses a /16 netmask (65,536 IP addresses).

    AWS reserves both the first four and the last IP address in each subnet's CIDR block. They're not available for use.

    If you add more than one subnet to a VPC, they're set up in a star topology with a logical router in the middle.

    If you launch an instance in a VPC using an Amazon EBS-backed AMI, the IP address doesn't change if you stop and restart the instance (unlike a similar instance launched outside a VPC, which gets a new IP address when restarted). It's therefore possible to have a subnet with no running instances (they're all stopped), but no remaining IP addresses available.

    For more information about subnets, see Your VPC and Subnets in the Amazon Virtual Private Cloud User Guide.

    ", + "CreateTags": "

    Adds or overwrites one or more tags for the specified Amazon EC2 resource or resources. Each resource can have a maximum of 10 tags. Each tag consists of a key and optional value. Tag keys must be unique per resource.

    For more information about tags, see Tagging Your Resources in the Amazon Elastic Compute Cloud User Guide. For more information about creating IAM policies that control users' access to resources based on tags, see Supported Resource-Level Permissions for Amazon EC2 API Actions in the Amazon Elastic Compute Cloud User Guide.

    ", + "CreateVolume": "

    Creates an EBS volume that can be attached to an instance in the same Availability Zone. The volume is created in the regional endpoint that you send the HTTP request to. For more information see Regions and Endpoints.

    You can create a new empty volume or restore a volume from an EBS snapshot. Any AWS Marketplace product codes from the snapshot are propagated to the volume.

    You can create encrypted volumes with the Encrypted parameter. Encrypted volumes may only be attached to instances that support Amazon EBS encryption. Volumes that are created from encrypted snapshots are also automatically encrypted. For more information, see Amazon EBS Encryption in the Amazon Elastic Compute Cloud User Guide.

    For more information, see Creating or Restoring an Amazon EBS Volume in the Amazon Elastic Compute Cloud User Guide.

    ", + "CreateVpc": "

    Creates a VPC with the specified CIDR block.

    The smallest VPC you can create uses a /28 netmask (16 IP addresses), and the largest uses a /16 netmask (65,536 IP addresses). To help you decide how big to make your VPC, see Your VPC and Subnets in the Amazon Virtual Private Cloud User Guide.

    By default, each instance you launch in the VPC has the default DHCP options, which includes only a default DNS server that we provide (AmazonProvidedDNS). For more information about DHCP options, see DHCP Options Sets in the Amazon Virtual Private Cloud User Guide.

    You can specify the instance tenancy value for the VPC when you create it. You can't change this value for the VPC after you create it. For more information, see Dedicated Instances in the Amazon Virtual Private Cloud User Guide.

    ", + "CreateVpcEndpoint": "

    Creates a VPC endpoint for a specified AWS service. An endpoint enables you to create a private connection between your VPC and another AWS service in your account. You can specify an endpoint policy to attach to the endpoint that will control access to the service from your VPC. You can also specify the VPC route tables that use the endpoint.

    Currently, only endpoints to Amazon S3 are supported.

    ", + "CreateVpcPeeringConnection": "

    Requests a VPC peering connection between two VPCs: a requester VPC that you own and a peer VPC with which to create the connection. The peer VPC can belong to another AWS account. The requester VPC and peer VPC cannot have overlapping CIDR blocks.

    The owner of the peer VPC must accept the peering request to activate the peering connection. The VPC peering connection request expires after 7 days, after which it cannot be accepted or rejected.

    A CreateVpcPeeringConnection request between VPCs with overlapping CIDR blocks results in the VPC peering connection having a status of failed.

    ", + "CreateVpnConnection": "

    Creates a VPN connection between an existing virtual private gateway and a VPN customer gateway. The only supported connection type is ipsec.1.

    The response includes information that you need to give to your network administrator to configure your customer gateway.

    We strongly recommend that you use HTTPS when calling this operation because the response contains sensitive cryptographic information for configuring your customer gateway.

    If you decide to shut down your VPN connection for any reason and later create a new VPN connection, you must reconfigure your customer gateway with the new information returned from this call.

    This is an idempotent operation. If you perform the operation more than once, Amazon EC2 doesn't return an error.

    For more information about VPN connections, see Adding a Hardware Virtual Private Gateway to Your VPC in the Amazon Virtual Private Cloud User Guide.

    ", + "CreateVpnConnectionRoute": "

    Creates a static route associated with a VPN connection between an existing virtual private gateway and a VPN customer gateway. The static route allows traffic to be routed from the virtual private gateway to the VPN customer gateway.

    For more information about VPN connections, see Adding a Hardware Virtual Private Gateway to Your VPC in the Amazon Virtual Private Cloud User Guide.

    ", + "CreateVpnGateway": "

    Creates a virtual private gateway. A virtual private gateway is the endpoint on the VPC side of your VPN connection. You can create a virtual private gateway before creating the VPC itself.

    For more information about virtual private gateways, see Adding a Hardware Virtual Private Gateway to Your VPC in the Amazon Virtual Private Cloud User Guide.

    ", + "DeleteCustomerGateway": "

    Deletes the specified customer gateway. You must delete the VPN connection before you can delete the customer gateway.

    ", + "DeleteDhcpOptions": "

    Deletes the specified set of DHCP options. You must disassociate the set of DHCP options before you can delete it. You can disassociate the set of DHCP options by associating either a new set of options or the default set of options with the VPC.

    ", + "DeleteFlowLogs": "

    Deletes one or more flow logs.

    ", + "DeleteInternetGateway": "

    Deletes the specified Internet gateway. You must detach the Internet gateway from the VPC before you can delete it.

    ", + "DeleteKeyPair": "

    Deletes the specified key pair, by removing the public key from Amazon EC2.

    ", + "DeleteNatGateway": "

    Deletes the specified NAT gateway. Deleting a NAT gateway disassociates its Elastic IP address, but does not release the address from your account. Deleting a NAT gateway does not delete any NAT gateway routes in your route tables.

    ", + "DeleteNetworkAcl": "

    Deletes the specified network ACL. You can't delete the ACL if it's associated with any subnets. You can't delete the default network ACL.

    ", + "DeleteNetworkAclEntry": "

    Deletes the specified ingress or egress entry (rule) from the specified network ACL.

    ", + "DeleteNetworkInterface": "

    Deletes the specified network interface. You must detach the network interface before you can delete it.

    ", + "DeletePlacementGroup": "

    Deletes the specified placement group. You must terminate all instances in the placement group before you can delete the placement group. For more information about placement groups and cluster instances, see Cluster Instances in the Amazon Elastic Compute Cloud User Guide.

    ", + "DeleteRoute": "

    Deletes the specified route from the specified route table.

    ", + "DeleteRouteTable": "

    Deletes the specified route table. You must disassociate the route table from any subnets before you can delete it. You can't delete the main route table.

    ", + "DeleteSecurityGroup": "

    Deletes a security group.

    If you attempt to delete a security group that is associated with an instance, or is referenced by another security group, the operation fails with InvalidGroup.InUse in EC2-Classic or DependencyViolation in EC2-VPC.

    ", + "DeleteSnapshot": "

    Deletes the specified snapshot.

    When you make periodic snapshots of a volume, the snapshots are incremental, and only the blocks on the device that have changed since your last snapshot are saved in the new snapshot. When you delete a snapshot, only the data not needed for any other snapshot is removed. So regardless of which prior snapshots have been deleted, all active snapshots will have access to all the information needed to restore the volume.

    You cannot delete a snapshot of the root device of an EBS volume used by a registered AMI. You must first de-register the AMI before you can delete the snapshot.

    For more information, see Deleting an Amazon EBS Snapshot in the Amazon Elastic Compute Cloud User Guide.

    ", + "DeleteSpotDatafeedSubscription": "

    Deletes the data feed for Spot instances.

    ", + "DeleteSubnet": "

    Deletes the specified subnet. You must terminate all running instances in the subnet before you can delete the subnet.

    ", + "DeleteTags": "

    Deletes the specified set of tags from the specified set of resources. This call is designed to follow a DescribeTags request.

    For more information about tags, see Tagging Your Resources in the Amazon Elastic Compute Cloud User Guide.

    ", + "DeleteVolume": "

    Deletes the specified EBS volume. The volume must be in the available state (not attached to an instance).

    The volume may remain in the deleting state for several minutes.

    For more information, see Deleting an Amazon EBS Volume in the Amazon Elastic Compute Cloud User Guide.

    ", + "DeleteVpc": "

    Deletes the specified VPC. You must detach or delete all gateways and resources that are associated with the VPC before you can delete it. For example, you must terminate all instances running in the VPC, delete all security groups associated with the VPC (except the default one), delete all route tables associated with the VPC (except the default one), and so on.

    ", + "DeleteVpcEndpoints": "

    Deletes one or more specified VPC endpoints. Deleting the endpoint also deletes the endpoint routes in the route tables that were associated with the endpoint.

    ", + "DeleteVpcPeeringConnection": "

    Deletes a VPC peering connection. Either the owner of the requester VPC or the owner of the peer VPC can delete the VPC peering connection if it's in the active state. The owner of the requester VPC can delete a VPC peering connection in the pending-acceptance state.

    ", + "DeleteVpnConnection": "

    Deletes the specified VPN connection.

    If you're deleting the VPC and its associated components, we recommend that you detach the virtual private gateway from the VPC and delete the VPC before deleting the VPN connection. If you believe that the tunnel credentials for your VPN connection have been compromised, you can delete the VPN connection and create a new one that has new keys, without needing to delete the VPC or virtual private gateway. If you create a new VPN connection, you must reconfigure the customer gateway using the new configuration information returned with the new VPN connection ID.

    ", + "DeleteVpnConnectionRoute": "

    Deletes the specified static route associated with a VPN connection between an existing virtual private gateway and a VPN customer gateway. The static route allows traffic to be routed from the virtual private gateway to the VPN customer gateway.

    ", + "DeleteVpnGateway": "

    Deletes the specified virtual private gateway. We recommend that before you delete a virtual private gateway, you detach it from the VPC and delete the VPN connection. Note that you don't need to delete the virtual private gateway if you plan to delete and recreate the VPN connection between your VPC and your network.

    ", + "DeregisterImage": "

    Deregisters the specified AMI. After you deregister an AMI, it can't be used to launch new instances.

    This command does not delete the AMI.

    ", + "DescribeAccountAttributes": "

    Describes attributes of your AWS account. The following are the supported account attributes:

    • supported-platforms: Indicates whether your account can launch instances into EC2-Classic and EC2-VPC, or only into EC2-VPC.

    • default-vpc: The ID of the default VPC for your account, or none.

    • max-instances: The maximum number of On-Demand instances that you can run.

    • vpc-max-security-groups-per-interface: The maximum number of security groups that you can assign to a network interface.

    • max-elastic-ips: The maximum number of Elastic IP addresses that you can allocate for use with EC2-Classic.

    • vpc-max-elastic-ips: The maximum number of Elastic IP addresses that you can allocate for use with EC2-VPC.

    ", + "DescribeAddresses": "

    Describes one or more of your Elastic IP addresses.

    An Elastic IP address is for use in either the EC2-Classic platform or in a VPC. For more information, see Elastic IP Addresses in the Amazon Elastic Compute Cloud User Guide.

    ", + "DescribeAvailabilityZones": "

    Describes one or more of the Availability Zones that are available to you. The results include zones only for the region you're currently using. If there is an event impacting an Availability Zone, you can use this request to view the state and any provided message for that Availability Zone.

    For more information, see Regions and Availability Zones in the Amazon Elastic Compute Cloud User Guide.

    ", + "DescribeBundleTasks": "

    Describes one or more of your bundling tasks.

    Completed bundle tasks are listed for only a limited time. If your bundle task is no longer in the list, you can still register an AMI from it. Just use RegisterImage with the Amazon S3 bucket name and image manifest name you provided to the bundle task.

    ", + "DescribeClassicLinkInstances": "

    Describes one or more of your linked EC2-Classic instances. This request only returns information about EC2-Classic instances linked to a VPC through ClassicLink; you cannot use this request to return information about other instances.

    ", + "DescribeConversionTasks": "

    Describes one or more of your conversion tasks. For more information, see Using the Command Line Tools to Import Your Virtual Machine to Amazon EC2 in the Amazon Elastic Compute Cloud User Guide.

    For information about the import manifest referenced by this API action, see VM Import Manifest.

    ", + "DescribeCustomerGateways": "

    Describes one or more of your VPN customer gateways.

    For more information about VPN customer gateways, see Adding a Hardware Virtual Private Gateway to Your VPC in the Amazon Virtual Private Cloud User Guide.

    ", + "DescribeDhcpOptions": "

    Describes one or more of your DHCP options sets.

    For more information about DHCP options sets, see DHCP Options Sets in the Amazon Virtual Private Cloud User Guide.

    ", + "DescribeExportTasks": "

    Describes one or more of your export tasks.

    ", + "DescribeFlowLogs": "

    Describes one or more flow logs. To view the information in your flow logs (the log streams for the network interfaces), you must use the CloudWatch Logs console or the CloudWatch Logs API.

    ", + "DescribeHosts": "

    Describes one or more of your Dedicated hosts.

    The results describe only the Dedicated hosts in the region you're currently using. All listed instances consume capacity on your Dedicated host. Dedicated hosts that have recently been released will be listed with the state released.

    ", + "DescribeIdFormat": "

    Describes the ID format settings for your resources on a per-region basis, for example, to view which resource types are enabled for longer IDs. This request only returns information about resource types whose ID formats can be modified; it does not return information about other resource types.

    The following resource types support longer IDs: instance | reservation | snapshot | volume.

    These settings apply to the IAM user who makes the request; they do not apply to the entire AWS account. By default, an IAM user defaults to the same settings as the root user, unless they explicitly override the settings by running the ModifyIdFormat command. Resources created with longer IDs are visible to all IAM users, regardless of these settings and provided that they have permission to use the relevant Describe command for the resource type.

    ", + "DescribeImageAttribute": "

    Describes the specified attribute of the specified AMI. You can specify only one attribute at a time.

    ", + "DescribeImages": "

    Describes one or more of the images (AMIs, AKIs, and ARIs) available to you. Images available to you include public images, private images that you own, and private images owned by other AWS accounts but for which you have explicit launch permissions.

    Deregistered images are included in the returned results for an unspecified interval after deregistration.

    ", + "DescribeImportImageTasks": "

    Displays details about an import virtual machine or import snapshot tasks that are already created.

    ", + "DescribeImportSnapshotTasks": "

    Describes your import snapshot tasks.

    ", + "DescribeInstanceAttribute": "

    Describes the specified attribute of the specified instance. You can specify only one attribute at a time. Valid attribute values are: instanceType | kernel | ramdisk | userData | disableApiTermination | instanceInitiatedShutdownBehavior | rootDeviceName | blockDeviceMapping | productCodes | sourceDestCheck | groupSet | ebsOptimized | sriovNetSupport

    ", + "DescribeInstanceStatus": "

    Describes the status of one or more instances. By default, only running instances are described, unless specified otherwise.

    Instance status includes the following components:

    • Status checks - Amazon EC2 performs status checks on running EC2 instances to identify hardware and software issues. For more information, see Status Checks for Your Instances and Troubleshooting Instances with Failed Status Checks in the Amazon Elastic Compute Cloud User Guide.

    • Scheduled events - Amazon EC2 can schedule events (such as reboot, stop, or terminate) for your instances related to hardware issues, software updates, or system maintenance. For more information, see Scheduled Events for Your Instances in the Amazon Elastic Compute Cloud User Guide.

    • Instance state - You can manage your instances from the moment you launch them through their termination. For more information, see Instance Lifecycle in the Amazon Elastic Compute Cloud User Guide.

    ", + "DescribeInstances": "

    Describes one or more of your instances.

    If you specify one or more instance IDs, Amazon EC2 returns information for those instances. If you do not specify instance IDs, Amazon EC2 returns information for all relevant instances. If you specify an instance ID that is not valid, an error is returned. If you specify an instance that you do not own, it is not included in the returned results.

    Recently terminated instances might appear in the returned results. This interval is usually less than one hour.

    ", + "DescribeInternetGateways": "

    Describes one or more of your Internet gateways.

    ", + "DescribeKeyPairs": "

    Describes one or more of your key pairs.

    For more information about key pairs, see Key Pairs in the Amazon Elastic Compute Cloud User Guide.

    ", + "DescribeMovingAddresses": "

    Describes your Elastic IP addresses that are being moved to the EC2-VPC platform, or that are being restored to the EC2-Classic platform. This request does not return information about any other Elastic IP addresses in your account.

    ", + "DescribeNatGateways": "

    Describes one or more of the your NAT gateways.

    ", + "DescribeNetworkAcls": "

    Describes one or more of your network ACLs.

    For more information about network ACLs, see Network ACLs in the Amazon Virtual Private Cloud User Guide.

    ", + "DescribeNetworkInterfaceAttribute": "

    Describes a network interface attribute. You can specify only one attribute at a time.

    ", + "DescribeNetworkInterfaces": "

    Describes one or more of your network interfaces.

    ", + "DescribePlacementGroups": "

    Describes one or more of your placement groups. For more information about placement groups and cluster instances, see Cluster Instances in the Amazon Elastic Compute Cloud User Guide.

    ", + "DescribePrefixLists": "

    Describes available AWS services in a prefix list format, which includes the prefix list name and prefix list ID of the service and the IP address range for the service. A prefix list ID is required for creating an outbound security group rule that allows traffic from a VPC to access an AWS service through a VPC endpoint.

    ", + "DescribeRegions": "

    Describes one or more regions that are currently available to you.

    For a list of the regions supported by Amazon EC2, see Regions and Endpoints.

    ", + "DescribeReservedInstances": "

    Describes one or more of the Reserved Instances that you purchased.

    For more information about Reserved Instances, see Reserved Instances in the Amazon Elastic Compute Cloud User Guide.

    ", + "DescribeReservedInstancesListings": "

    Describes your account's Reserved Instance listings in the Reserved Instance Marketplace.

    The Reserved Instance Marketplace matches sellers who want to resell Reserved Instance capacity that they no longer need with buyers who want to purchase additional capacity. Reserved Instances bought and sold through the Reserved Instance Marketplace work like any other Reserved Instances.

    As a seller, you choose to list some or all of your Reserved Instances, and you specify the upfront price to receive for them. Your Reserved Instances are then listed in the Reserved Instance Marketplace and are available for purchase.

    As a buyer, you specify the configuration of the Reserved Instance to purchase, and the Marketplace matches what you're searching for with what's available. The Marketplace first sells the lowest priced Reserved Instances to you, and continues to sell available Reserved Instance listings to you until your demand is met. You are charged based on the total price of all of the listings that you purchase.

    For more information, see Reserved Instance Marketplace in the Amazon Elastic Compute Cloud User Guide.

    ", + "DescribeReservedInstancesModifications": "

    Describes the modifications made to your Reserved Instances. If no parameter is specified, information about all your Reserved Instances modification requests is returned. If a modification ID is specified, only information about the specific modification is returned.

    For more information, see Modifying Reserved Instances in the Amazon Elastic Compute Cloud User Guide.

    ", + "DescribeReservedInstancesOfferings": "

    Describes Reserved Instance offerings that are available for purchase. With Reserved Instances, you purchase the right to launch instances for a period of time. During that time period, you do not receive insufficient capacity errors, and you pay a lower usage rate than the rate charged for On-Demand instances for the actual time used.

    If you have listed your own Reserved Instances for sale in the Reserved Instance Marketplace, they will be excluded from these results. This is to ensure that you do not purchase your own Reserved Instances.

    For more information, see Reserved Instance Marketplace in the Amazon Elastic Compute Cloud User Guide.

    ", + "DescribeRouteTables": "

    Describes one or more of your route tables.

    Each subnet in your VPC must be associated with a route table. If a subnet is not explicitly associated with any route table, it is implicitly associated with the main route table. This command does not return the subnet ID for implicit associations.

    For more information about route tables, see Route Tables in the Amazon Virtual Private Cloud User Guide.

    ", + "DescribeScheduledInstanceAvailability": "

    Finds available schedules that meet the specified criteria.

    You can search for an available schedule no more than 3 months in advance. You must meet the minimum required duration of 1,200 hours per year. For example, the minimum daily schedule is 4 hours, the minimum weekly schedule is 24 hours, and the minimum monthly schedule is 100 hours.

    After you find a schedule that meets your needs, call PurchaseScheduledInstances to purchase Scheduled Instances with that schedule.

    ", + "DescribeScheduledInstances": "

    Describes one or more of your Scheduled Instances.

    ", + "DescribeSecurityGroupReferences": "

    [EC2-VPC only] Describes the VPCs on the other side of a VPC peering connection that are referencing the security groups you've specified in this request.

    ", + "DescribeSecurityGroups": "

    Describes one or more of your security groups.

    A security group is for use with instances either in the EC2-Classic platform or in a specific VPC. For more information, see Amazon EC2 Security Groups in the Amazon Elastic Compute Cloud User Guide and Security Groups for Your VPC in the Amazon Virtual Private Cloud User Guide.

    ", + "DescribeSnapshotAttribute": "

    Describes the specified attribute of the specified snapshot. You can specify only one attribute at a time.

    For more information about EBS snapshots, see Amazon EBS Snapshots in the Amazon Elastic Compute Cloud User Guide.

    ", + "DescribeSnapshots": "

    Describes one or more of the EBS snapshots available to you. Available snapshots include public snapshots available for any AWS account to launch, private snapshots that you own, and private snapshots owned by another AWS account but for which you've been given explicit create volume permissions.

    The create volume permissions fall into the following categories:

    • public: The owner of the snapshot granted create volume permissions for the snapshot to the all group. All AWS accounts have create volume permissions for these snapshots.

    • explicit: The owner of the snapshot granted create volume permissions to a specific AWS account.

    • implicit: An AWS account has implicit create volume permissions for all snapshots it owns.

    The list of snapshots returned can be modified by specifying snapshot IDs, snapshot owners, or AWS accounts with create volume permissions. If no options are specified, Amazon EC2 returns all snapshots for which you have create volume permissions.

    If you specify one or more snapshot IDs, only snapshots that have the specified IDs are returned. If you specify an invalid snapshot ID, an error is returned. If you specify a snapshot ID for which you do not have access, it is not included in the returned results.

    If you specify one or more snapshot owners, only snapshots from the specified owners and for which you have access are returned. The results can include the AWS account IDs of the specified owners, amazon for snapshots owned by Amazon, or self for snapshots that you own.

    If you specify a list of restorable users, only snapshots with create snapshot permissions for those users are returned. You can specify AWS account IDs (if you own the snapshots), self for snapshots for which you own or have explicit permissions, or all for public snapshots.

    If you are describing a long list of snapshots, you can paginate the output to make the list more manageable. The MaxResults parameter sets the maximum number of results returned in a single page. If the list of results exceeds your MaxResults value, then that number of results is returned along with a NextToken value that can be passed to a subsequent DescribeSnapshots request to retrieve the remaining results.

    For more information about EBS snapshots, see Amazon EBS Snapshots in the Amazon Elastic Compute Cloud User Guide.

    ", + "DescribeSpotDatafeedSubscription": "

    Describes the data feed for Spot instances. For more information, see Spot Instance Data Feed in the Amazon Elastic Compute Cloud User Guide.

    ", + "DescribeSpotFleetInstances": "

    Describes the running instances for the specified Spot fleet.

    ", + "DescribeSpotFleetRequestHistory": "

    Describes the events for the specified Spot fleet request during the specified time.

    Spot fleet events are delayed by up to 30 seconds before they can be described. This ensures that you can query by the last evaluated time and not miss a recorded event.

    ", + "DescribeSpotFleetRequests": "

    Describes your Spot fleet requests.

    ", + "DescribeSpotInstanceRequests": "

    Describes the Spot instance requests that belong to your account. Spot instances are instances that Amazon EC2 launches when the bid price that you specify exceeds the current Spot price. Amazon EC2 periodically sets the Spot price based on available Spot instance capacity and current Spot instance requests. For more information, see Spot Instance Requests in the Amazon Elastic Compute Cloud User Guide.

    You can use DescribeSpotInstanceRequests to find a running Spot instance by examining the response. If the status of the Spot instance is fulfilled, the instance ID appears in the response and contains the identifier of the instance. Alternatively, you can use DescribeInstances with a filter to look for instances where the instance lifecycle is spot.

    ", + "DescribeSpotPriceHistory": "

    Describes the Spot price history. The prices returned are listed in chronological order, from the oldest to the most recent, for up to the past 90 days. For more information, see Spot Instance Pricing History in the Amazon Elastic Compute Cloud User Guide.

    When you specify a start and end time, this operation returns the prices of the instance types within the time range that you specified and the time when the price changed. The price is valid within the time period that you specified; the response merely indicates the last time that the price changed.

    ", + "DescribeStaleSecurityGroups": "

    [EC2-VPC only] Describes the stale security group rules for security groups in a specified VPC. Rules are stale when they reference a deleted security group in a peer VPC, or a security group in a peer VPC for which the VPC peering connection has been deleted.

    ", + "DescribeSubnets": "

    Describes one or more of your subnets.

    For more information about subnets, see Your VPC and Subnets in the Amazon Virtual Private Cloud User Guide.

    ", + "DescribeTags": "

    Describes one or more of the tags for your EC2 resources.

    For more information about tags, see Tagging Your Resources in the Amazon Elastic Compute Cloud User Guide.

    ", + "DescribeVolumeAttribute": "

    Describes the specified attribute of the specified volume. You can specify only one attribute at a time.

    For more information about EBS volumes, see Amazon EBS Volumes in the Amazon Elastic Compute Cloud User Guide.

    ", + "DescribeVolumeStatus": "

    Describes the status of the specified volumes. Volume status provides the result of the checks performed on your volumes to determine events that can impair the performance of your volumes. The performance of a volume can be affected if an issue occurs on the volume's underlying host. If the volume's underlying host experiences a power outage or system issue, after the system is restored, there could be data inconsistencies on the volume. Volume events notify you if this occurs. Volume actions notify you if any action needs to be taken in response to the event.

    The DescribeVolumeStatus operation provides the following information about the specified volumes:

    Status: Reflects the current status of the volume. The possible values are ok, impaired , warning, or insufficient-data. If all checks pass, the overall status of the volume is ok. If the check fails, the overall status is impaired. If the status is insufficient-data, then the checks may still be taking place on your volume at the time. We recommend that you retry the request. For more information on volume status, see Monitoring the Status of Your Volumes.

    Events: Reflect the cause of a volume status and may require you to take action. For example, if your volume returns an impaired status, then the volume event might be potential-data-inconsistency. This means that your volume has been affected by an issue with the underlying host, has all I/O operations disabled, and may have inconsistent data.

    Actions: Reflect the actions you may have to take in response to an event. For example, if the status of the volume is impaired and the volume event shows potential-data-inconsistency, then the action shows enable-volume-io. This means that you may want to enable the I/O operations for the volume by calling the EnableVolumeIO action and then check the volume for data consistency.

    Volume status is based on the volume status checks, and does not reflect the volume state. Therefore, volume status does not indicate volumes in the error state (for example, when a volume is incapable of accepting I/O.)

    ", + "DescribeVolumes": "

    Describes the specified EBS volumes.

    If you are describing a long list of volumes, you can paginate the output to make the list more manageable. The MaxResults parameter sets the maximum number of results returned in a single page. If the list of results exceeds your MaxResults value, then that number of results is returned along with a NextToken value that can be passed to a subsequent DescribeVolumes request to retrieve the remaining results.

    For more information about EBS volumes, see Amazon EBS Volumes in the Amazon Elastic Compute Cloud User Guide.

    ", + "DescribeVpcAttribute": "

    Describes the specified attribute of the specified VPC. You can specify only one attribute at a time.

    ", + "DescribeVpcClassicLink": "

    Describes the ClassicLink status of one or more VPCs.

    ", + "DescribeVpcClassicLinkDnsSupport": "

    Describes the ClassicLink DNS support status of one or more VPCs. If enabled, the DNS hostname of a linked EC2-Classic instance resolves to its private IP address when addressed from an instance in the VPC to which it's linked. Similarly, the DNS hostname of an instance in a VPC resolves to its private IP address when addressed from a linked EC2-Classic instance. For more information about ClassicLink, see ClassicLink in the Amazon Elastic Compute Cloud User Guide.

    ", + "DescribeVpcEndpointServices": "

    Describes all supported AWS services that can be specified when creating a VPC endpoint.

    ", + "DescribeVpcEndpoints": "

    Describes one or more of your VPC endpoints.

    ", + "DescribeVpcPeeringConnections": "

    Describes one or more of your VPC peering connections.

    ", + "DescribeVpcs": "

    Describes one or more of your VPCs.

    ", + "DescribeVpnConnections": "

    Describes one or more of your VPN connections.

    For more information about VPN connections, see Adding a Hardware Virtual Private Gateway to Your VPC in the Amazon Virtual Private Cloud User Guide.

    ", + "DescribeVpnGateways": "

    Describes one or more of your virtual private gateways.

    For more information about virtual private gateways, see Adding an IPsec Hardware VPN to Your VPC in the Amazon Virtual Private Cloud User Guide.

    ", + "DetachClassicLinkVpc": "

    Unlinks (detaches) a linked EC2-Classic instance from a VPC. After the instance has been unlinked, the VPC security groups are no longer associated with it. An instance is automatically unlinked from a VPC when it's stopped.

    ", + "DetachInternetGateway": "

    Detaches an Internet gateway from a VPC, disabling connectivity between the Internet and the VPC. The VPC must not contain any running instances with Elastic IP addresses.

    ", + "DetachNetworkInterface": "

    Detaches a network interface from an instance.

    ", + "DetachVolume": "

    Detaches an EBS volume from an instance. Make sure to unmount any file systems on the device within your operating system before detaching the volume. Failure to do so results in the volume being stuck in a busy state while detaching.

    If an Amazon EBS volume is the root device of an instance, it can't be detached while the instance is running. To detach the root volume, stop the instance first.

    When a volume with an AWS Marketplace product code is detached from an instance, the product code is no longer associated with the instance.

    For more information, see Detaching an Amazon EBS Volume in the Amazon Elastic Compute Cloud User Guide.

    ", + "DetachVpnGateway": "

    Detaches a virtual private gateway from a VPC. You do this if you're planning to turn off the VPC and not use it anymore. You can confirm a virtual private gateway has been completely detached from a VPC by describing the virtual private gateway (any attachments to the virtual private gateway are also described).

    You must wait for the attachment's state to switch to detached before you can delete the VPC or attach a different VPC to the virtual private gateway.

    ", + "DisableVgwRoutePropagation": "

    Disables a virtual private gateway (VGW) from propagating routes to a specified route table of a VPC.

    ", + "DisableVpcClassicLink": "

    Disables ClassicLink for a VPC. You cannot disable ClassicLink for a VPC that has EC2-Classic instances linked to it.

    ", + "DisableVpcClassicLinkDnsSupport": "

    Disables ClassicLink DNS support for a VPC. If disabled, DNS hostnames resolve to public IP addresses when addressed between a linked EC2-Classic instance and instances in the VPC to which it's linked. For more information about ClassicLink, see ClassicLink in the Amazon Elastic Compute Cloud User Guide.

    ", + "DisassociateAddress": "

    Disassociates an Elastic IP address from the instance or network interface it's associated with.

    An Elastic IP address is for use in either the EC2-Classic platform or in a VPC. For more information, see Elastic IP Addresses in the Amazon Elastic Compute Cloud User Guide.

    This is an idempotent operation. If you perform the operation more than once, Amazon EC2 doesn't return an error.

    ", + "DisassociateRouteTable": "

    Disassociates a subnet from a route table.

    After you perform this action, the subnet no longer uses the routes in the route table. Instead, it uses the routes in the VPC's main route table. For more information about route tables, see Route Tables in the Amazon Virtual Private Cloud User Guide.

    ", + "EnableVgwRoutePropagation": "

    Enables a virtual private gateway (VGW) to propagate routes to the specified route table of a VPC.

    ", + "EnableVolumeIO": "

    Enables I/O operations for a volume that had I/O operations disabled because the data on the volume was potentially inconsistent.

    ", + "EnableVpcClassicLink": "

    Enables a VPC for ClassicLink. You can then link EC2-Classic instances to your ClassicLink-enabled VPC to allow communication over private IP addresses. You cannot enable your VPC for ClassicLink if any of your VPC's route tables have existing routes for address ranges within the 10.0.0.0/8 IP address range, excluding local routes for VPCs in the 10.0.0.0/16 and 10.1.0.0/16 IP address ranges. For more information, see ClassicLink in the Amazon Elastic Compute Cloud User Guide.

    ", + "EnableVpcClassicLinkDnsSupport": "

    Enables a VPC to support DNS hostname resolution for ClassicLink. If enabled, the DNS hostname of a linked EC2-Classic instance resolves to its private IP address when addressed from an instance in the VPC to which it's linked. Similarly, the DNS hostname of an instance in a VPC resolves to its private IP address when addressed from a linked EC2-Classic instance. For more information about ClassicLink, see ClassicLink in the Amazon Elastic Compute Cloud User Guide.

    ", + "GetConsoleOutput": "

    Gets the console output for the specified instance.

    Instances do not have a physical monitor through which you can view their console output. They also lack physical controls that allow you to power up, reboot, or shut them down. To allow these actions, we provide them through the Amazon EC2 API and command line interface.

    Instance console output is buffered and posted shortly after instance boot, reboot, and termination. Amazon EC2 preserves the most recent 64 KB output which is available for at least one hour after the most recent post.

    For Linux instances, the instance console output displays the exact console output that would normally be displayed on a physical monitor attached to a computer. This output is buffered because the instance produces it and then posts it to a store where the instance's owner can retrieve it.

    For Windows instances, the instance console output includes output from the EC2Config service.

    ", + "GetConsoleScreenshot": "

    Retrieve a JPG-format screenshot of a running instance to help with troubleshooting.

    The returned content is base64-encoded.

    ", + "GetPasswordData": "

    Retrieves the encrypted administrator password for an instance running Windows.

    The Windows password is generated at boot if the EC2Config service plugin, Ec2SetPassword, is enabled. This usually only happens the first time an AMI is launched, and then Ec2SetPassword is automatically disabled. The password is not generated for rebundled AMIs unless Ec2SetPassword is enabled before bundling.

    The password is encrypted using the key pair that you specified when you launched the instance. You must provide the corresponding key pair file.

    Password generation and encryption takes a few moments. We recommend that you wait up to 15 minutes after launching an instance before trying to retrieve the generated password.

    ", + "ImportImage": "

    Import single or multi-volume disk images or EBS snapshots into an Amazon Machine Image (AMI).

    ", + "ImportInstance": "

    Creates an import instance task using metadata from the specified disk image. ImportInstance only supports single-volume VMs. To import multi-volume VMs, use ImportImage. After importing the image, you then upload it using the ec2-import-volume command in the EC2 command line tools. For more information, see Using the Command Line Tools to Import Your Virtual Machine to Amazon EC2 in the Amazon Elastic Compute Cloud User Guide.

    For information about the import manifest referenced by this API action, see VM Import Manifest.

    ", + "ImportKeyPair": "

    Imports the public key from an RSA key pair that you created with a third-party tool. Compare this with CreateKeyPair, in which AWS creates the key pair and gives the keys to you (AWS keeps a copy of the public key). With ImportKeyPair, you create the key pair and give AWS just the public key. The private key is never transferred between you and AWS.

    For more information about key pairs, see Key Pairs in the Amazon Elastic Compute Cloud User Guide.

    ", + "ImportSnapshot": "

    Imports a disk into an EBS snapshot.

    ", + "ImportVolume": "

    Creates an import volume task using metadata from the specified disk image. After importing the image, you then upload it using the ec2-import-volume command in the Amazon EC2 command-line interface (CLI) tools. For more information, see Using the Command Line Tools to Import Your Virtual Machine to Amazon EC2 in the Amazon Elastic Compute Cloud User Guide.

    For information about the import manifest referenced by this API action, see VM Import Manifest.

    ", + "ModifyHosts": "

    Modify the auto-placement setting of a Dedicated host. When auto-placement is enabled, AWS will place instances that you launch with a tenancy of host, but without targeting a specific host ID, onto any available Dedicated host in your account which has auto-placement enabled. When auto-placement is disabled, you need to provide a host ID if you want the instance to launch onto a specific host. If no host ID is provided, the instance will be launched onto a suitable host which has auto-placement enabled.

    ", + "ModifyIdFormat": "

    Modifies the ID format for the specified resource on a per-region basis. You can specify that resources should receive longer IDs (17-character IDs) when they are created. The following resource types support longer IDs: instance | reservation | snapshot | volume.

    This setting applies to the IAM user who makes the request; it does not apply to the entire AWS account. By default, an IAM user defaults to the same settings as the root user. If you're using this action as the root user or as an IAM role that has permission to use this action, then these settings apply to the entire account, unless an IAM user explicitly overrides these settings for themselves. For more information, see Controlling Access to Longer ID Settings in the Amazon Elastic Compute Cloud User Guide.

    Resources created with longer IDs are visible to all IAM users, regardless of these settings and provided that they have permission to use the relevant Describe command for the resource type.

    ", + "ModifyImageAttribute": "

    Modifies the specified attribute of the specified AMI. You can specify only one attribute at a time.

    AWS Marketplace product codes cannot be modified. Images with an AWS Marketplace product code cannot be made public.

    ", + "ModifyInstanceAttribute": "

    Modifies the specified attribute of the specified instance. You can specify only one attribute at a time.

    To modify some attributes, the instance must be stopped. For more information, see Modifying Attributes of a Stopped Instance in the Amazon Elastic Compute Cloud User Guide.

    ", + "ModifyInstancePlacement": "

    Set the instance affinity value for a specific stopped instance and modify the instance tenancy setting.

    Instance affinity is disabled by default. When instance affinity is host and it is not associated with a specific Dedicated host, the next time it is launched it will automatically be associated with the host it lands on. This relationship will persist if the instance is stopped/started, or rebooted.

    You can modify the host ID associated with a stopped instance. If a stopped instance has a new host ID association, the instance will target that host when restarted.

    You can modify the tenancy of a stopped instance with a tenancy of host or dedicated.

    Affinity, hostID, and tenancy are not required parameters, but at least one of them must be specified in the request. Affinity and tenancy can be modified in the same request, but tenancy can only be modified on instances that are stopped.

    ", + "ModifyNetworkInterfaceAttribute": "

    Modifies the specified network interface attribute. You can specify only one attribute at a time.

    ", + "ModifyReservedInstances": "

    Modifies the Availability Zone, instance count, instance type, or network platform (EC2-Classic or EC2-VPC) of your Reserved Instances. The Reserved Instances to be modified must be identical, except for Availability Zone, network platform, and instance type.

    For more information, see Modifying Reserved Instances in the Amazon Elastic Compute Cloud User Guide.

    ", + "ModifySnapshotAttribute": "

    Adds or removes permission settings for the specified snapshot. You may add or remove specified AWS account IDs from a snapshot's list of create volume permissions, but you cannot do both in a single API call. If you need to both add and remove account IDs for a snapshot, you must use multiple API calls.

    For more information on modifying snapshot permissions, see Sharing Snapshots in the Amazon Elastic Compute Cloud User Guide.

    Snapshots with AWS Marketplace product codes cannot be made public.

    ", + "ModifySpotFleetRequest": "

    Modifies the specified Spot fleet request.

    While the Spot fleet request is being modified, it is in the modifying state.

    To scale up your Spot fleet, increase its target capacity. The Spot fleet launches the additional Spot instances according to the allocation strategy for the Spot fleet request. If the allocation strategy is lowestPrice, the Spot fleet launches instances using the Spot pool with the lowest price. If the allocation strategy is diversified, the Spot fleet distributes the instances across the Spot pools.

    To scale down your Spot fleet, decrease its target capacity. First, the Spot fleet cancels any open bids that exceed the new target capacity. You can request that the Spot fleet terminate Spot instances until the size of the fleet no longer exceeds the new target capacity. If the allocation strategy is lowestPrice, the Spot fleet terminates the instances with the highest price per unit. If the allocation strategy is diversified, the Spot fleet terminates instances across the Spot pools. Alternatively, you can request that the Spot fleet keep the fleet at its current size, but not replace any Spot instances that are interrupted or that you terminate manually.

    ", + "ModifySubnetAttribute": "

    Modifies a subnet attribute.

    ", + "ModifyVolumeAttribute": "

    Modifies a volume attribute.

    By default, all I/O operations for the volume are suspended when the data on the volume is determined to be potentially inconsistent, to prevent undetectable, latent data corruption. The I/O access to the volume can be resumed by first enabling I/O access and then checking the data consistency on your volume.

    You can change the default behavior to resume I/O operations. We recommend that you change this only for boot volumes or for volumes that are stateless or disposable.

    ", + "ModifyVpcAttribute": "

    Modifies the specified attribute of the specified VPC.

    ", + "ModifyVpcEndpoint": "

    Modifies attributes of a specified VPC endpoint. You can modify the policy associated with the endpoint, and you can add and remove route tables associated with the endpoint.

    ", + "ModifyVpcPeeringConnectionOptions": "

    Modifies the VPC peering connection options on one side of a VPC peering connection. You can do the following:

    • Enable/disable communication over the peering connection between an EC2-Classic instance that's linked to your VPC (using ClassicLink) and instances in the peer VPC.

    • Enable/disable communication over the peering connection between instances in your VPC and an EC2-Classic instance that's linked to the peer VPC.

    If the peered VPCs are in different accounts, each owner must initiate a separate request to enable or disable communication in either direction, depending on whether their VPC was the requester or accepter for the VPC peering connection. If the peered VPCs are in the same account, you can modify the requester and accepter options in the same request. To confirm which VPC is the accepter and requester for a VPC peering connection, use the DescribeVpcPeeringConnections command.

    ", + "MonitorInstances": "

    Enables monitoring for a running instance. For more information about monitoring instances, see Monitoring Your Instances and Volumes in the Amazon Elastic Compute Cloud User Guide.

    ", + "MoveAddressToVpc": "

    Moves an Elastic IP address from the EC2-Classic platform to the EC2-VPC platform. The Elastic IP address must be allocated to your account for more than 24 hours, and it must not be associated with an instance. After the Elastic IP address is moved, it is no longer available for use in the EC2-Classic platform, unless you move it back using the RestoreAddressToClassic request. You cannot move an Elastic IP address that was originally allocated for use in the EC2-VPC platform to the EC2-Classic platform.

    ", + "PurchaseReservedInstancesOffering": "

    Purchases a Reserved Instance for use with your account. With Reserved Instances, you obtain a capacity reservation for a certain instance configuration over a specified period of time and pay a lower hourly rate compared to On-Demand instance pricing.

    Use DescribeReservedInstancesOfferings to get a list of Reserved Instance offerings that match your specifications. After you've purchased a Reserved Instance, you can check for your new Reserved Instance with DescribeReservedInstances.

    For more information, see Reserved Instances and Reserved Instance Marketplace in the Amazon Elastic Compute Cloud User Guide.

    ", + "PurchaseScheduledInstances": "

    Purchases one or more Scheduled Instances with the specified schedule.

    Scheduled Instances enable you to purchase Amazon EC2 compute capacity by the hour for a one-year term. Before you can purchase a Scheduled Instance, you must call DescribeScheduledInstanceAvailability to check for available schedules and obtain a purchase token. After you purchase a Scheduled Instance, you must call RunScheduledInstances during each scheduled time period.

    After you purchase a Scheduled Instance, you can't cancel, modify, or resell your purchase.

    ", + "RebootInstances": "

    Requests a reboot of one or more instances. This operation is asynchronous; it only queues a request to reboot the specified instances. The operation succeeds if the instances are valid and belong to you. Requests to reboot terminated instances are ignored.

    If an instance does not cleanly shut down within four minutes, Amazon EC2 performs a hard reboot.

    For more information about troubleshooting, see Getting Console Output and Rebooting Instances in the Amazon Elastic Compute Cloud User Guide.

    ", + "RegisterImage": "

    Registers an AMI. When you're creating an AMI, this is the final step you must complete before you can launch an instance from the AMI. For more information about creating AMIs, see Creating Your Own AMIs in the Amazon Elastic Compute Cloud User Guide.

    For Amazon EBS-backed instances, CreateImage creates and registers the AMI in a single request, so you don't have to register the AMI yourself.

    You can also use RegisterImage to create an Amazon EBS-backed Linux AMI from a snapshot of a root device volume. For more information, see Launching an Instance from a Snapshot in the Amazon Elastic Compute Cloud User Guide.

    Some Linux distributions, such as Red Hat Enterprise Linux (RHEL) and SUSE Linux Enterprise Server (SLES), use the EC2 billingProduct code associated with an AMI to verify subscription status for package updates. Creating an AMI from an EBS snapshot does not maintain this billing code, and subsequent instances launched from such an AMI will not be able to connect to package update infrastructure.

    Similarly, although you can create a Windows AMI from a snapshot, you can't successfully launch an instance from the AMI.

    To create Windows AMIs or to create AMIs for Linux operating systems that must retain AMI billing codes to work properly, see CreateImage.

    If needed, you can deregister an AMI at any time. Any modifications you make to an AMI backed by an instance store volume invalidates its registration. If you make changes to an image, deregister the previous image and register the new image.

    You can't register an image where a secondary (non-root) snapshot has AWS Marketplace product codes.

    ", + "RejectVpcPeeringConnection": "

    Rejects a VPC peering connection request. The VPC peering connection must be in the pending-acceptance state. Use the DescribeVpcPeeringConnections request to view your outstanding VPC peering connection requests. To delete an active VPC peering connection, or to delete a VPC peering connection request that you initiated, use DeleteVpcPeeringConnection.

    ", + "ReleaseAddress": "

    Releases the specified Elastic IP address.

    After releasing an Elastic IP address, it is released to the IP address pool and might be unavailable to you. Be sure to update your DNS records and any servers or devices that communicate with the address. If you attempt to release an Elastic IP address that you already released, you'll get an AuthFailure error if the address is already allocated to another AWS account.

    [EC2-Classic, default VPC] Releasing an Elastic IP address automatically disassociates it from any instance that it's associated with. To disassociate an Elastic IP address without releasing it, use DisassociateAddress.

    [Nondefault VPC] You must use DisassociateAddress to disassociate the Elastic IP address before you try to release it. Otherwise, Amazon EC2 returns an error (InvalidIPAddress.InUse).

    ", + "ReleaseHosts": "

    When you no longer want to use a Dedicated host it can be released. On-Demand billing is stopped and the host goes into released state. The host ID of Dedicated hosts that have been released can no longer be specified in another request, e.g., ModifyHosts. You must stop or terminate all instances on a host before it can be released.

    When Dedicated hosts are released, it make take some time for them to stop counting toward your limit and you may receive capacity errors when trying to allocate new Dedicated hosts. Try waiting a few minutes, and then try again.

    Released hosts will still appear in a DescribeHosts response.

    ", + "ReplaceNetworkAclAssociation": "

    Changes which network ACL a subnet is associated with. By default when you create a subnet, it's automatically associated with the default network ACL. For more information about network ACLs, see Network ACLs in the Amazon Virtual Private Cloud User Guide.

    ", + "ReplaceNetworkAclEntry": "

    Replaces an entry (rule) in a network ACL. For more information about network ACLs, see Network ACLs in the Amazon Virtual Private Cloud User Guide.

    ", + "ReplaceRoute": "

    Replaces an existing route within a route table in a VPC. You must provide only one of the following: Internet gateway or virtual private gateway, NAT instance, NAT gateway, VPC peering connection, or network interface.

    For more information about route tables, see Route Tables in the Amazon Virtual Private Cloud User Guide.

    ", + "ReplaceRouteTableAssociation": "

    Changes the route table associated with a given subnet in a VPC. After the operation completes, the subnet uses the routes in the new route table it's associated with. For more information about route tables, see Route Tables in the Amazon Virtual Private Cloud User Guide.

    You can also use ReplaceRouteTableAssociation to change which table is the main route table in the VPC. You just specify the main route table's association ID and the route table to be the new main route table.

    ", + "ReportInstanceStatus": "

    Submits feedback about the status of an instance. The instance must be in the running state. If your experience with the instance differs from the instance status returned by DescribeInstanceStatus, use ReportInstanceStatus to report your experience with the instance. Amazon EC2 collects this information to improve the accuracy of status checks.

    Use of this action does not change the value returned by DescribeInstanceStatus.

    ", + "RequestSpotFleet": "

    Creates a Spot fleet request.

    You can submit a single request that includes multiple launch specifications that vary by instance type, AMI, Availability Zone, or subnet.

    By default, the Spot fleet requests Spot instances in the Spot pool where the price per unit is the lowest. Each launch specification can include its own instance weighting that reflects the value of the instance type to your application workload.

    Alternatively, you can specify that the Spot fleet distribute the target capacity across the Spot pools included in its launch specifications. By ensuring that the Spot instances in your Spot fleet are in different Spot pools, you can improve the availability of your fleet.

    For more information, see Spot Fleet Requests in the Amazon Elastic Compute Cloud User Guide.

    ", + "RequestSpotInstances": "

    Creates a Spot instance request. Spot instances are instances that Amazon EC2 launches when the bid price that you specify exceeds the current Spot price. Amazon EC2 periodically sets the Spot price based on available Spot Instance capacity and current Spot instance requests. For more information, see Spot Instance Requests in the Amazon Elastic Compute Cloud User Guide.

    ", + "ResetImageAttribute": "

    Resets an attribute of an AMI to its default value.

    The productCodes attribute can't be reset.

    ", + "ResetInstanceAttribute": "

    Resets an attribute of an instance to its default value. To reset the kernel or ramdisk, the instance must be in a stopped state. To reset the sourceDestCheck, the instance can be either running or stopped.

    The sourceDestCheck attribute controls whether source/destination checking is enabled. The default value is true, which means checking is enabled. This value must be false for a NAT instance to perform NAT. For more information, see NAT Instances in the Amazon Virtual Private Cloud User Guide.

    ", + "ResetNetworkInterfaceAttribute": "

    Resets a network interface attribute. You can specify only one attribute at a time.

    ", + "ResetSnapshotAttribute": "

    Resets permission settings for the specified snapshot.

    For more information on modifying snapshot permissions, see Sharing Snapshots in the Amazon Elastic Compute Cloud User Guide.

    ", + "RestoreAddressToClassic": "

    Restores an Elastic IP address that was previously moved to the EC2-VPC platform back to the EC2-Classic platform. You cannot move an Elastic IP address that was originally allocated for use in EC2-VPC. The Elastic IP address must not be associated with an instance or network interface.

    ", + "RevokeSecurityGroupEgress": "

    [EC2-VPC only] Removes one or more egress rules from a security group for EC2-VPC. This action doesn't apply to security groups for use in EC2-Classic. The values that you specify in the revoke request (for example, ports) must match the existing rule's values for the rule to be revoked.

    Each rule consists of the protocol and the CIDR range or source security group. For the TCP and UDP protocols, you must also specify the destination port or range of ports. For the ICMP protocol, you must also specify the ICMP type and code.

    Rule changes are propagated to instances within the security group as quickly as possible. However, a small delay might occur.

    ", + "RevokeSecurityGroupIngress": "

    Removes one or more ingress rules from a security group. The values that you specify in the revoke request (for example, ports) must match the existing rule's values for the rule to be removed.

    Each rule consists of the protocol and the CIDR range or source security group. For the TCP and UDP protocols, you must also specify the destination port or range of ports. For the ICMP protocol, you must also specify the ICMP type and code.

    Rule changes are propagated to instances within the security group as quickly as possible. However, a small delay might occur.

    ", + "RunInstances": "

    Launches the specified number of instances using an AMI for which you have permissions.

    When you launch an instance, it enters the pending state. After the instance is ready for you, it enters the running state. To check the state of your instance, call DescribeInstances.

    To ensure faster instance launches, break up large requests into smaller batches. For example, create five separate launch requests for 100 instances each instead of one launch request for 500 instances.

    To tag your instance, ensure that it is running as CreateTags requires a resource ID. For more information about tagging, see Tagging Your Amazon EC2 Resources.

    If you don't specify a security group when launching an instance, Amazon EC2 uses the default security group. For more information, see Security Groups in the Amazon Elastic Compute Cloud User Guide.

    [EC2-VPC only accounts] If you don't specify a subnet in the request, we choose a default subnet from your default VPC for you.

    [EC2-Classic accounts] If you're launching into EC2-Classic and you don't specify an Availability Zone, we choose one for you.

    Linux instances have access to the public key of the key pair at boot. You can use this key to provide secure access to the instance. Amazon EC2 public images use this feature to provide secure access without passwords. For more information, see Key Pairs in the Amazon Elastic Compute Cloud User Guide.

    You can provide optional user data when launching an instance. For more information, see Instance Metadata in the Amazon Elastic Compute Cloud User Guide.

    If any of the AMIs have a product code attached for which the user has not subscribed, RunInstances fails.

    Some instance types can only be launched into a VPC. If you do not have a default VPC, or if you do not specify a subnet ID in the request, RunInstances fails. For more information, see Instance Types Available Only in a VPC.

    For more information about troubleshooting, see What To Do If An Instance Immediately Terminates, and Troubleshooting Connecting to Your Instance in the Amazon Elastic Compute Cloud User Guide.

    ", + "RunScheduledInstances": "

    Launches the specified Scheduled Instances.

    Before you can launch a Scheduled Instance, you must purchase it and obtain an identifier using PurchaseScheduledInstances.

    You must launch a Scheduled Instance during its scheduled time period. You can't stop or reboot a Scheduled Instance, but you can terminate it as needed. If you terminate a Scheduled Instance before the current scheduled time period ends, you can launch it again after a few minutes. For more information, see Scheduled Instances in the Amazon Elastic Compute Cloud User Guide.

    ", + "StartInstances": "

    Starts an Amazon EBS-backed AMI that you've previously stopped.

    Instances that use Amazon EBS volumes as their root devices can be quickly stopped and started. When an instance is stopped, the compute resources are released and you are not billed for hourly instance usage. However, your root partition Amazon EBS volume remains, continues to persist your data, and you are charged for Amazon EBS volume usage. You can restart your instance at any time. Each time you transition an instance from stopped to started, Amazon EC2 charges a full instance hour, even if transitions happen multiple times within a single hour.

    Before stopping an instance, make sure it is in a state from which it can be restarted. Stopping an instance does not preserve data stored in RAM.

    Performing this operation on an instance that uses an instance store as its root device returns an error.

    For more information, see Stopping Instances in the Amazon Elastic Compute Cloud User Guide.

    ", + "StopInstances": "

    Stops an Amazon EBS-backed instance.

    We don't charge hourly usage for a stopped instance, or data transfer fees; however, your root partition Amazon EBS volume remains, continues to persist your data, and you are charged for Amazon EBS volume usage. Each time you transition an instance from stopped to started, Amazon EC2 charges a full instance hour, even if transitions happen multiple times within a single hour.

    You can't start or stop Spot instances, and you can't stop instance store-backed instances.

    When you stop an instance, we shut it down. You can restart your instance at any time. Before stopping an instance, make sure it is in a state from which it can be restarted. Stopping an instance does not preserve data stored in RAM.

    Stopping an instance is different to rebooting or terminating it. For example, when you stop an instance, the root device and any other devices attached to the instance persist. When you terminate an instance, the root device and any other devices attached during the instance launch are automatically deleted. For more information about the differences between rebooting, stopping, and terminating instances, see Instance Lifecycle in the Amazon Elastic Compute Cloud User Guide.

    When you stop an instance, we attempt to shut it down forcibly after a short while. If your instance appears stuck in the stopping state after a period of time, there may be an issue with the underlying host computer. For more information, see Troubleshooting Stopping Your Instance in the Amazon Elastic Compute Cloud User Guide.

    ", + "TerminateInstances": "

    Shuts down one or more instances. This operation is idempotent; if you terminate an instance more than once, each call succeeds.

    Terminated instances remain visible after termination (for approximately one hour).

    By default, Amazon EC2 deletes all EBS volumes that were attached when the instance launched. Volumes attached after instance launch continue running.

    You can stop, start, and terminate EBS-backed instances. You can only terminate instance store-backed instances. What happens to an instance differs if you stop it or terminate it. For example, when you stop an instance, the root device and any other devices attached to the instance persist. When you terminate an instance, any attached EBS volumes with the DeleteOnTermination block device mapping parameter set to true are automatically deleted. For more information about the differences between stopping and terminating instances, see Instance Lifecycle in the Amazon Elastic Compute Cloud User Guide.

    For more information about troubleshooting, see Troubleshooting Terminating Your Instance in the Amazon Elastic Compute Cloud User Guide.

    ", + "UnassignPrivateIpAddresses": "

    Unassigns one or more secondary private IP addresses from a network interface.

    ", + "UnmonitorInstances": "

    Disables monitoring for a running instance. For more information about monitoring instances, see Monitoring Your Instances and Volumes in the Amazon Elastic Compute Cloud User Guide.

    " + }, + "shapes": { + "AcceptVpcPeeringConnectionRequest": { + "base": "

    Contains the parameters for AcceptVpcPeeringConnection.

    ", + "refs": { + } + }, + "AcceptVpcPeeringConnectionResult": { + "base": "

    Contains the output of AcceptVpcPeeringConnection.

    ", + "refs": { + } + }, + "AccountAttribute": { + "base": "

    Describes an account attribute.

    ", + "refs": { + "AccountAttributeList$member": null + } + }, + "AccountAttributeList": { + "base": null, + "refs": { + "DescribeAccountAttributesResult$AccountAttributes": "

    Information about one or more account attributes.

    " + } + }, + "AccountAttributeName": { + "base": null, + "refs": { + "AccountAttributeNameStringList$member": null + } + }, + "AccountAttributeNameStringList": { + "base": null, + "refs": { + "DescribeAccountAttributesRequest$AttributeNames": "

    One or more account attribute names.

    " + } + }, + "AccountAttributeValue": { + "base": "

    Describes a value of an account attribute.

    ", + "refs": { + "AccountAttributeValueList$member": null + } + }, + "AccountAttributeValueList": { + "base": null, + "refs": { + "AccountAttribute$AttributeValues": "

    One or more values for the account attribute.

    " + } + }, + "ActiveInstance": { + "base": "

    Describes a running instance in a Spot fleet.

    ", + "refs": { + "ActiveInstanceSet$member": null + } + }, + "ActiveInstanceSet": { + "base": null, + "refs": { + "DescribeSpotFleetInstancesResponse$ActiveInstances": "

    The running instances. Note that this list is refreshed periodically and might be out of date.

    " + } + }, + "Address": { + "base": "

    Describes an Elastic IP address.

    ", + "refs": { + "AddressList$member": null + } + }, + "AddressList": { + "base": null, + "refs": { + "DescribeAddressesResult$Addresses": "

    Information about one or more Elastic IP addresses.

    " + } + }, + "Affinity": { + "base": null, + "refs": { + "ModifyInstancePlacementRequest$Affinity": "

    The new affinity setting for the instance.

    " + } + }, + "AllocateAddressRequest": { + "base": "

    Contains the parameters for AllocateAddress.

    ", + "refs": { + } + }, + "AllocateAddressResult": { + "base": "

    Contains the output of AllocateAddress.

    ", + "refs": { + } + }, + "AllocateHostsRequest": { + "base": "

    Contains the parameters for AllocateHosts.

    ", + "refs": { + } + }, + "AllocateHostsResult": { + "base": "

    Contains the output of AllocateHosts.

    ", + "refs": { + } + }, + "AllocationIdList": { + "base": null, + "refs": { + "DescribeAddressesRequest$AllocationIds": "

    [EC2-VPC] One or more allocation IDs.

    Default: Describes all your Elastic IP addresses.

    " + } + }, + "AllocationState": { + "base": null, + "refs": { + "Host$State": "

    The Dedicated host's state.

    " + } + }, + "AllocationStrategy": { + "base": null, + "refs": { + "SpotFleetRequestConfigData$AllocationStrategy": "

    Indicates how to allocate the target capacity across the Spot pools specified by the Spot fleet request. The default is lowestPrice.

    " + } + }, + "ArchitectureValues": { + "base": null, + "refs": { + "Image$Architecture": "

    The architecture of the image.

    ", + "ImportInstanceLaunchSpecification$Architecture": "

    The architecture of the instance.

    ", + "Instance$Architecture": "

    The architecture of the image.

    ", + "RegisterImageRequest$Architecture": "

    The architecture of the AMI.

    Default: For Amazon EBS-backed AMIs, i386. For instance store-backed AMIs, the architecture specified in the manifest file.

    " + } + }, + "AssignPrivateIpAddressesRequest": { + "base": "

    Contains the parameters for AssignPrivateIpAddresses.

    ", + "refs": { + } + }, + "AssociateAddressRequest": { + "base": "

    Contains the parameters for AssociateAddress.

    ", + "refs": { + } + }, + "AssociateAddressResult": { + "base": "

    Contains the output of AssociateAddress.

    ", + "refs": { + } + }, + "AssociateDhcpOptionsRequest": { + "base": "

    Contains the parameters for AssociateDhcpOptions.

    ", + "refs": { + } + }, + "AssociateRouteTableRequest": { + "base": "

    Contains the parameters for AssociateRouteTable.

    ", + "refs": { + } + }, + "AssociateRouteTableResult": { + "base": "

    Contains the output of AssociateRouteTable.

    ", + "refs": { + } + }, + "AttachClassicLinkVpcRequest": { + "base": "

    Contains the parameters for AttachClassicLinkVpc.

    ", + "refs": { + } + }, + "AttachClassicLinkVpcResult": { + "base": "

    Contains the output of AttachClassicLinkVpc.

    ", + "refs": { + } + }, + "AttachInternetGatewayRequest": { + "base": "

    Contains the parameters for AttachInternetGateway.

    ", + "refs": { + } + }, + "AttachNetworkInterfaceRequest": { + "base": "

    Contains the parameters for AttachNetworkInterface.

    ", + "refs": { + } + }, + "AttachNetworkInterfaceResult": { + "base": "

    Contains the output of AttachNetworkInterface.

    ", + "refs": { + } + }, + "AttachVolumeRequest": { + "base": "

    Contains the parameters for AttachVolume.

    ", + "refs": { + } + }, + "AttachVpnGatewayRequest": { + "base": "

    Contains the parameters for AttachVpnGateway.

    ", + "refs": { + } + }, + "AttachVpnGatewayResult": { + "base": "

    Contains the output of AttachVpnGateway.

    ", + "refs": { + } + }, + "AttachmentStatus": { + "base": null, + "refs": { + "EbsInstanceBlockDevice$Status": "

    The attachment state.

    ", + "InstanceNetworkInterfaceAttachment$Status": "

    The attachment state.

    ", + "InternetGatewayAttachment$State": "

    The current state of the attachment.

    ", + "NetworkInterfaceAttachment$Status": "

    The attachment state.

    ", + "VpcAttachment$State": "

    The current state of the attachment.

    " + } + }, + "AttributeBooleanValue": { + "base": "

    The value to use when a resource attribute accepts a Boolean value.

    ", + "refs": { + "DescribeNetworkInterfaceAttributeResult$SourceDestCheck": "

    Indicates whether source/destination checking is enabled.

    ", + "DescribeVolumeAttributeResult$AutoEnableIO": "

    The state of autoEnableIO attribute.

    ", + "DescribeVpcAttributeResult$EnableDnsSupport": "

    Indicates whether DNS resolution is enabled for the VPC. If this attribute is true, the Amazon DNS server resolves DNS hostnames for your instances to their corresponding IP addresses; otherwise, it does not.

    ", + "DescribeVpcAttributeResult$EnableDnsHostnames": "

    Indicates whether the instances launched in the VPC get DNS hostnames. If this attribute is true, instances in the VPC get DNS hostnames; otherwise, they do not.

    ", + "InstanceAttribute$DisableApiTermination": "

    If the value is true, you can't terminate the instance through the Amazon EC2 console, CLI, or API; otherwise, you can.

    ", + "InstanceAttribute$EbsOptimized": "

    Indicates whether the instance is optimized for EBS I/O.

    ", + "InstanceAttribute$SourceDestCheck": "

    Indicates whether source/destination checking is enabled. A value of true means checking is enabled, and false means checking is disabled. This value must be false for a NAT instance to perform NAT.

    ", + "ModifyInstanceAttributeRequest$SourceDestCheck": "

    Specifies whether source/destination checking is enabled. A value of true means that checking is enabled, and false means checking is disabled. This value must be false for a NAT instance to perform NAT.

    ", + "ModifyInstanceAttributeRequest$DisableApiTermination": "

    If the value is true, you can't terminate the instance using the Amazon EC2 console, CLI, or API; otherwise, you can. You cannot use this paramater for Spot Instances.

    ", + "ModifyInstanceAttributeRequest$EbsOptimized": "

    Specifies whether the instance is optimized for EBS I/O. This optimization provides dedicated throughput to Amazon EBS and an optimized configuration stack to provide optimal EBS I/O performance. This optimization isn't available with all instance types. Additional usage charges apply when using an EBS Optimized instance.

    ", + "ModifyNetworkInterfaceAttributeRequest$SourceDestCheck": "

    Indicates whether source/destination checking is enabled. A value of true means checking is enabled, and false means checking is disabled. This value must be false for a NAT instance to perform NAT. For more information, see NAT Instances in the Amazon Virtual Private Cloud User Guide.

    ", + "ModifySubnetAttributeRequest$MapPublicIpOnLaunch": "

    Specify true to indicate that instances launched into the specified subnet should be assigned public IP address.

    ", + "ModifyVolumeAttributeRequest$AutoEnableIO": "

    Indicates whether the volume should be auto-enabled for I/O operations.

    ", + "ModifyVpcAttributeRequest$EnableDnsSupport": "

    Indicates whether the DNS resolution is supported for the VPC. If enabled, queries to the Amazon provided DNS server at the 169.254.169.253 IP address, or the reserved IP address at the base of the VPC network range \"plus two\" will succeed. If disabled, the Amazon provided DNS service in the VPC that resolves public DNS hostnames to IP addresses is not enabled.

    You cannot modify the DNS resolution and DNS hostnames attributes in the same request. Use separate requests for each attribute.

    ", + "ModifyVpcAttributeRequest$EnableDnsHostnames": "

    Indicates whether the instances launched in the VPC get DNS hostnames. If enabled, instances in the VPC get DNS hostnames; otherwise, they do not.

    You cannot modify the DNS resolution and DNS hostnames attributes in the same request. Use separate requests for each attribute. You can only enable DNS hostnames if you've enabled DNS support.

    " + } + }, + "AttributeValue": { + "base": "

    The value to use for a resource attribute.

    ", + "refs": { + "DescribeNetworkInterfaceAttributeResult$Description": "

    The description of the network interface.

    ", + "DhcpConfigurationValueList$member": null, + "ImageAttribute$KernelId": "

    The kernel ID.

    ", + "ImageAttribute$RamdiskId": "

    The RAM disk ID.

    ", + "ImageAttribute$Description": "

    A description for the AMI.

    ", + "ImageAttribute$SriovNetSupport": null, + "InstanceAttribute$InstanceType": "

    The instance type.

    ", + "InstanceAttribute$KernelId": "

    The kernel ID.

    ", + "InstanceAttribute$RamdiskId": "

    The RAM disk ID.

    ", + "InstanceAttribute$UserData": "

    The Base64-encoded MIME user data.

    ", + "InstanceAttribute$InstanceInitiatedShutdownBehavior": "

    Indicates whether an instance stops or terminates when you initiate shutdown from the instance (using the operating system command for system shutdown).

    ", + "InstanceAttribute$RootDeviceName": "

    The name of the root device (for example, /dev/sda1 or /dev/xvda).

    ", + "InstanceAttribute$SriovNetSupport": null, + "ModifyImageAttributeRequest$Description": "

    A description for the AMI.

    ", + "ModifyInstanceAttributeRequest$InstanceType": "

    Changes the instance type to the specified value. For more information, see Instance Types. If the instance type is not valid, the error returned is InvalidInstanceAttributeValue.

    ", + "ModifyInstanceAttributeRequest$Kernel": "

    Changes the instance's kernel to the specified value. We recommend that you use PV-GRUB instead of kernels and RAM disks. For more information, see PV-GRUB.

    ", + "ModifyInstanceAttributeRequest$Ramdisk": "

    Changes the instance's RAM disk to the specified value. We recommend that you use PV-GRUB instead of kernels and RAM disks. For more information, see PV-GRUB.

    ", + "ModifyInstanceAttributeRequest$InstanceInitiatedShutdownBehavior": "

    Specifies whether an instance stops or terminates when you initiate shutdown from the instance (using the operating system command for system shutdown).

    ", + "ModifyInstanceAttributeRequest$SriovNetSupport": "

    Set to simple to enable enhanced networking for the instance.

    There is no way to disable enhanced networking at this time.

    This option is supported only for HVM instances. Specifying this option with a PV instance can make it unreachable.

    ", + "ModifyNetworkInterfaceAttributeRequest$Description": "

    A description for the network interface.

    " + } + }, + "AuthorizeSecurityGroupEgressRequest": { + "base": "

    Contains the parameters for AuthorizeSecurityGroupEgress.

    ", + "refs": { + } + }, + "AuthorizeSecurityGroupIngressRequest": { + "base": "

    Contains the parameters for AuthorizeSecurityGroupIngress.

    ", + "refs": { + } + }, + "AutoPlacement": { + "base": null, + "refs": { + "AllocateHostsRequest$AutoPlacement": "

    This is enabled by default. This property allows instances to be automatically placed onto available Dedicated hosts, when you are launching instances without specifying a host ID.

    Default: Enabled

    ", + "Host$AutoPlacement": "

    Whether auto-placement is on or off.

    ", + "ModifyHostsRequest$AutoPlacement": "

    Specify whether to enable or disable auto-placement.

    " + } + }, + "AvailabilityZone": { + "base": "

    Describes an Availability Zone.

    ", + "refs": { + "AvailabilityZoneList$member": null + } + }, + "AvailabilityZoneList": { + "base": null, + "refs": { + "DescribeAvailabilityZonesResult$AvailabilityZones": "

    Information about one or more Availability Zones.

    " + } + }, + "AvailabilityZoneMessage": { + "base": "

    Describes a message about an Availability Zone.

    ", + "refs": { + "AvailabilityZoneMessageList$member": null + } + }, + "AvailabilityZoneMessageList": { + "base": null, + "refs": { + "AvailabilityZone$Messages": "

    Any messages about the Availability Zone.

    " + } + }, + "AvailabilityZoneState": { + "base": null, + "refs": { + "AvailabilityZone$State": "

    The state of the Availability Zone.

    " + } + }, + "AvailableCapacity": { + "base": "

    The capacity information for instances launched onto the Dedicated host.

    ", + "refs": { + "Host$AvailableCapacity": "

    The number of new instances that can be launched onto the Dedicated host.

    " + } + }, + "AvailableInstanceCapacityList": { + "base": null, + "refs": { + "AvailableCapacity$AvailableInstanceCapacity": "

    The total number of instances that the Dedicated host supports.

    " + } + }, + "BatchState": { + "base": null, + "refs": { + "CancelSpotFleetRequestsSuccessItem$CurrentSpotFleetRequestState": "

    The current state of the Spot fleet request.

    ", + "CancelSpotFleetRequestsSuccessItem$PreviousSpotFleetRequestState": "

    The previous state of the Spot fleet request.

    ", + "SpotFleetRequestConfig$SpotFleetRequestState": "

    The state of the Spot fleet request.

    " + } + }, + "Blob": { + "base": null, + "refs": { + "BlobAttributeValue$Value": null, + "ImportKeyPairRequest$PublicKeyMaterial": "

    The public key. For API calls, the text must be base64-encoded. For command line tools, base64 encoding is performed for you.

    ", + "S3Storage$UploadPolicy": "

    A base64-encoded Amazon S3 upload policy that gives Amazon EC2 permission to upload items into Amazon S3 on your behalf. For command line tools, base64 encoding is performed for you.

    " + } + }, + "BlobAttributeValue": { + "base": null, + "refs": { + "ModifyInstanceAttributeRequest$UserData": "

    Changes the instance's user data to the specified base64-encoded value. For command line tools, base64 encoding is performed for you.

    " + } + }, + "BlockDeviceMapping": { + "base": "

    Describes a block device mapping.

    ", + "refs": { + "BlockDeviceMappingList$member": null, + "BlockDeviceMappingRequestList$member": null + } + }, + "BlockDeviceMappingList": { + "base": null, + "refs": { + "Image$BlockDeviceMappings": "

    Any block device mapping entries.

    ", + "ImageAttribute$BlockDeviceMappings": "

    One or more block device mapping entries.

    ", + "LaunchSpecification$BlockDeviceMappings": "

    One or more block device mapping entries.

    Although you can specify encrypted EBS volumes in this block device mapping for your Spot Instances, these volumes are not encrypted.

    ", + "RequestSpotLaunchSpecification$BlockDeviceMappings": "

    One or more block device mapping entries.

    Although you can specify encrypted EBS volumes in this block device mapping for your Spot Instances, these volumes are not encrypted.

    ", + "SpotFleetLaunchSpecification$BlockDeviceMappings": "

    One or more block device mapping entries.

    " + } + }, + "BlockDeviceMappingRequestList": { + "base": null, + "refs": { + "CreateImageRequest$BlockDeviceMappings": "

    Information about one or more block device mappings.

    ", + "RegisterImageRequest$BlockDeviceMappings": "

    One or more block device mapping entries.

    ", + "RunInstancesRequest$BlockDeviceMappings": "

    The block device mapping.

    " + } + }, + "Boolean": { + "base": null, + "refs": { + "AcceptVpcPeeringConnectionRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "AllocateAddressRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "AssignPrivateIpAddressesRequest$AllowReassignment": "

    Indicates whether to allow an IP address that is already assigned to another network interface or instance to be reassigned to the specified network interface.

    ", + "AssociateAddressRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "AssociateAddressRequest$AllowReassociation": "

    [EC2-VPC] For a VPC in an EC2-Classic account, specify true to allow an Elastic IP address that is already associated with an instance or network interface to be reassociated with the specified instance or network interface. Otherwise, the operation fails. In a VPC in an EC2-VPC-only account, reassociation is automatic, therefore you can specify false to ensure the operation fails if the Elastic IP address is already associated with another resource.

    ", + "AssociateDhcpOptionsRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "AssociateRouteTableRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "AttachClassicLinkVpcRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "AttachClassicLinkVpcResult$Return": "

    Returns true if the request succeeds; otherwise, it returns an error.

    ", + "AttachInternetGatewayRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "AttachNetworkInterfaceRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "AttachVolumeRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "AttachVpnGatewayRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "AttributeBooleanValue$Value": "

    Valid values are true or false.

    ", + "AuthorizeSecurityGroupEgressRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "AuthorizeSecurityGroupIngressRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "BundleInstanceRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "CancelBundleTaskRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "CancelConversionRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "CancelImportTaskRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "CancelSpotFleetRequestsRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "CancelSpotFleetRequestsRequest$TerminateInstances": "

    Indicates whether to terminate instances for a Spot fleet request if it is canceled successfully.

    ", + "CancelSpotInstanceRequestsRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "ClassicLinkDnsSupport$ClassicLinkDnsSupported": "

    Indicates whether ClassicLink DNS support is enabled for the VPC.

    ", + "ConfirmProductInstanceRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "ConfirmProductInstanceResult$Return": "

    The return value of the request. Returns true if the specified product code is owned by the requester and associated with the specified instance.

    ", + "CopyImageRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "CopyImageRequest$Encrypted": "

    Specifies whether the destination snapshots of the copied image should be encrypted. The default CMK for EBS is used unless a non-default AWS Key Management Service (AWS KMS) CMK is specified with KmsKeyId. For more information, see Amazon EBS Encryption in the Amazon Elastic Compute Cloud User Guide.

    ", + "CopySnapshotRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "CopySnapshotRequest$Encrypted": "

    Specifies whether the destination snapshot should be encrypted. There is no way to create an unencrypted snapshot copy from an encrypted snapshot; however, you can encrypt a copy of an unencrypted snapshot with this flag. The default CMK for EBS is used unless a non-default AWS Key Management Service (AWS KMS) CMK is specified with KmsKeyId. For more information, see Amazon EBS Encryption in the Amazon Elastic Compute Cloud User Guide.

    ", + "CreateCustomerGatewayRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "CreateDhcpOptionsRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "CreateImageRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "CreateImageRequest$NoReboot": "

    By default, Amazon EC2 attempts to shut down and reboot the instance before creating the image. If the 'No Reboot' option is set, Amazon EC2 doesn't shut down the instance before creating the image. When this option is used, file system integrity on the created image can't be guaranteed.

    ", + "CreateInternetGatewayRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "CreateKeyPairRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "CreateNetworkAclEntryRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "CreateNetworkAclEntryRequest$Egress": "

    Indicates whether this is an egress rule (rule is applied to traffic leaving the subnet).

    ", + "CreateNetworkAclRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "CreateNetworkInterfaceRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "CreatePlacementGroupRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "CreateRouteRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "CreateRouteResult$Return": "

    Returns true if the request succeeds; otherwise, it returns an error.

    ", + "CreateRouteTableRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "CreateSecurityGroupRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "CreateSnapshotRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "CreateSpotDatafeedSubscriptionRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "CreateSubnetRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "CreateTagsRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "CreateVolumeRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "CreateVolumeRequest$Encrypted": "

    Specifies whether the volume should be encrypted. Encrypted Amazon EBS volumes may only be attached to instances that support Amazon EBS encryption. Volumes that are created from encrypted snapshots are automatically encrypted. There is no way to create an encrypted volume from an unencrypted snapshot or vice versa. If your AMI uses encrypted volumes, you can only launch it on supported instance types. For more information, see Amazon EBS Encryption in the Amazon Elastic Compute Cloud User Guide.

    ", + "CreateVpcEndpointRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "CreateVpcPeeringConnectionRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "CreateVpcRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "CreateVpnConnectionRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "CreateVpnGatewayRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DeleteCustomerGatewayRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DeleteDhcpOptionsRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DeleteInternetGatewayRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DeleteKeyPairRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DeleteNetworkAclEntryRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DeleteNetworkAclEntryRequest$Egress": "

    Indicates whether the rule is an egress rule.

    ", + "DeleteNetworkAclRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DeleteNetworkInterfaceRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DeletePlacementGroupRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DeleteRouteRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DeleteRouteTableRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DeleteSecurityGroupRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DeleteSnapshotRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DeleteSpotDatafeedSubscriptionRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DeleteSubnetRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DeleteTagsRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DeleteVolumeRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DeleteVpcEndpointsRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DeleteVpcPeeringConnectionRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DeleteVpcPeeringConnectionResult$Return": "

    Returns true if the request succeeds; otherwise, it returns an error.

    ", + "DeleteVpcRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DeleteVpnConnectionRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DeleteVpnGatewayRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DeregisterImageRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeAccountAttributesRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeAddressesRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeAvailabilityZonesRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeBundleTasksRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeClassicLinkInstancesRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeConversionTasksRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeCustomerGatewaysRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeDhcpOptionsRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeImageAttributeRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeImagesRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeImportImageTasksRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeImportSnapshotTasksRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeInstanceAttributeRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeInstanceStatusRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeInstanceStatusRequest$IncludeAllInstances": "

    When true, includes the health status for all instances. When false, includes the health status for running instances only.

    Default: false

    ", + "DescribeInstancesRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeInternetGatewaysRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeKeyPairsRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeMovingAddressesRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeNetworkAclsRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeNetworkInterfaceAttributeRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeNetworkInterfacesRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribePlacementGroupsRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribePrefixListsRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeRegionsRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeReservedInstancesOfferingsRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeReservedInstancesOfferingsRequest$IncludeMarketplace": "

    Include Reserved Instance Marketplace offerings in the response.

    ", + "DescribeReservedInstancesRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeRouteTablesRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeScheduledInstanceAvailabilityRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeScheduledInstancesRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeSecurityGroupReferencesRequest$DryRun": "

    Checks whether you have the required permissions for the operation, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeSecurityGroupsRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeSnapshotAttributeRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeSnapshotsRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeSpotDatafeedSubscriptionRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeSpotFleetInstancesRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeSpotFleetRequestHistoryRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeSpotFleetRequestsRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeSpotInstanceRequestsRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeSpotPriceHistoryRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeStaleSecurityGroupsRequest$DryRun": "

    Checks whether you have the required permissions for the operation, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeSubnetsRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeTagsRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeVolumeAttributeRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeVolumeStatusRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeVolumesRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeVpcAttributeRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeVpcClassicLinkRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeVpcEndpointServicesRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeVpcEndpointsRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeVpcPeeringConnectionsRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeVpcsRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeVpnConnectionsRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeVpnGatewaysRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DetachClassicLinkVpcRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DetachClassicLinkVpcResult$Return": "

    Returns true if the request succeeds; otherwise, it returns an error.

    ", + "DetachInternetGatewayRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DetachNetworkInterfaceRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DetachNetworkInterfaceRequest$Force": "

    Specifies whether to force a detachment.

    ", + "DetachVolumeRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DetachVolumeRequest$Force": "

    Forces detachment if the previous detachment attempt did not occur cleanly (for example, logging into an instance, unmounting the volume, and detaching normally). This option can lead to data loss or a corrupted file system. Use this option only as a last resort to detach a volume from a failed instance. The instance won't have an opportunity to flush file system caches or file system metadata. If you use this option, you must perform file system check and repair procedures.

    ", + "DetachVpnGatewayRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DisableVpcClassicLinkDnsSupportResult$Return": "

    Returns true if the request succeeds; otherwise, it returns an error.

    ", + "DisableVpcClassicLinkRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DisableVpcClassicLinkResult$Return": "

    Returns true if the request succeeds; otherwise, it returns an error.

    ", + "DisassociateAddressRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DisassociateRouteTableRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "EbsBlockDevice$DeleteOnTermination": "

    Indicates whether the EBS volume is deleted on instance termination.

    ", + "EbsBlockDevice$Encrypted": "

    Indicates whether the EBS volume is encrypted. Encrypted Amazon EBS volumes may only be attached to instances that support Amazon EBS encryption.

    ", + "EbsInstanceBlockDevice$DeleteOnTermination": "

    Indicates whether the volume is deleted on instance termination.

    ", + "EbsInstanceBlockDeviceSpecification$DeleteOnTermination": "

    Indicates whether the volume is deleted on instance termination.

    ", + "EnableVolumeIORequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "EnableVpcClassicLinkDnsSupportResult$Return": "

    Returns true if the request succeeds; otherwise, it returns an error.

    ", + "EnableVpcClassicLinkRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "EnableVpcClassicLinkResult$Return": "

    Returns true if the request succeeds; otherwise, it returns an error.

    ", + "GetConsoleOutputRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "GetConsoleScreenshotRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "GetConsoleScreenshotRequest$WakeUp": "

    When set to true, acts as keystroke input and wakes up an instance that's in standby or \"sleep\" mode.

    ", + "GetPasswordDataRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "IdFormat$UseLongIds": "

    Indicates whether longer IDs (17-character IDs) are enabled for the resource.

    ", + "Image$Public": "

    Indicates whether the image has public launch permissions. The value is true if this image has public launch permissions or false if it has only implicit and explicit launch permissions.

    ", + "ImportImageRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "ImportInstanceLaunchSpecification$Monitoring": "

    Indicates whether monitoring is enabled.

    ", + "ImportInstanceRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "ImportKeyPairRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "ImportSnapshotRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "ImportVolumeRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "Instance$SourceDestCheck": "

    Specifies whether to enable an instance launched in a VPC to perform NAT. This controls whether source/destination checking is enabled on the instance. A value of true means checking is enabled, and false means checking is disabled. The value must be false for the instance to perform NAT. For more information, see NAT Instances in the Amazon Virtual Private Cloud User Guide.

    ", + "Instance$EbsOptimized": "

    Indicates whether the instance is optimized for EBS I/O. This optimization provides dedicated throughput to Amazon EBS and an optimized configuration stack to provide optimal I/O performance. This optimization isn't available with all instance types. Additional usage charges apply when using an EBS Optimized instance.

    ", + "InstanceNetworkInterface$SourceDestCheck": "

    Indicates whether to validate network traffic to or from this network interface.

    ", + "InstanceNetworkInterfaceAttachment$DeleteOnTermination": "

    Indicates whether the network interface is deleted when the instance is terminated.

    ", + "InstanceNetworkInterfaceSpecification$DeleteOnTermination": "

    If set to true, the interface is deleted when the instance is terminated. You can specify true only if creating a new network interface when launching an instance.

    ", + "InstanceNetworkInterfaceSpecification$AssociatePublicIpAddress": "

    Indicates whether to assign a public IP address to an instance you launch in a VPC. The public IP address can only be assigned to a network interface for eth0, and can only be assigned to a new network interface, not an existing one. You cannot specify more than one network interface in the request. If launching into a default subnet, the default value is true.

    ", + "InstancePrivateIpAddress$Primary": "

    Indicates whether this IP address is the primary private IP address of the network interface.

    ", + "LaunchSpecification$EbsOptimized": "

    Indicates whether the instance is optimized for EBS I/O. This optimization provides dedicated throughput to Amazon EBS and an optimized configuration stack to provide optimal EBS I/O performance. This optimization isn't available with all instance types. Additional usage charges apply when using an EBS Optimized instance.

    Default: false

    ", + "ModifyIdFormatRequest$UseLongIds": "

    Indicate whether the resource should use longer IDs (17-character IDs).

    ", + "ModifyImageAttributeRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "ModifyInstanceAttributeRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "ModifyInstancePlacementResult$Return": "

    Is true if the request succeeds, and an error otherwise.

    ", + "ModifyNetworkInterfaceAttributeRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "ModifySnapshotAttributeRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "ModifySpotFleetRequestResponse$Return": "

    Is true if the request succeeds, and an error otherwise.

    ", + "ModifyVolumeAttributeRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "ModifyVpcEndpointRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "ModifyVpcEndpointRequest$ResetPolicy": "

    Specify true to reset the policy document to the default policy. The default policy allows access to the service.

    ", + "ModifyVpcEndpointResult$Return": "

    Returns true if the request succeeds; otherwise, it returns an error.

    ", + "ModifyVpcPeeringConnectionOptionsRequest$DryRun": "

    Checks whether you have the required permissions for the operation, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "MonitorInstancesRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "MoveAddressToVpcRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "NetworkAcl$IsDefault": "

    Indicates whether this is the default network ACL for the VPC.

    ", + "NetworkAclEntry$Egress": "

    Indicates whether the rule is an egress rule (applied to traffic leaving the subnet).

    ", + "NetworkInterface$RequesterManaged": "

    Indicates whether the network interface is being managed by AWS.

    ", + "NetworkInterface$SourceDestCheck": "

    Indicates whether traffic to or from the instance is validated.

    ", + "NetworkInterfaceAttachment$DeleteOnTermination": "

    Indicates whether the network interface is deleted when the instance is terminated.

    ", + "NetworkInterfaceAttachmentChanges$DeleteOnTermination": "

    Indicates whether the network interface is deleted when the instance is terminated.

    ", + "NetworkInterfacePrivateIpAddress$Primary": "

    Indicates whether this IP address is the primary private IP address of the network interface.

    ", + "PeeringConnectionOptions$AllowEgressFromLocalClassicLinkToRemoteVpc": "

    If true, enables outbound communication from an EC2-Classic instance that's linked to a local VPC via ClassicLink to instances in a peer VPC.

    ", + "PeeringConnectionOptions$AllowEgressFromLocalVpcToRemoteClassicLink": "

    If true, enables outbound communication from instances in a local VPC to an EC2-Classic instance that's linked to a peer VPC via ClassicLink.

    ", + "PeeringConnectionOptionsRequest$AllowEgressFromLocalClassicLinkToRemoteVpc": "

    If true, enables outbound communication from an EC2-Classic instance that's linked to a local VPC via ClassicLink to instances in a peer VPC.

    ", + "PeeringConnectionOptionsRequest$AllowEgressFromLocalVpcToRemoteClassicLink": "

    If true, enables outbound communication from instances in a local VPC to an EC2-Classic instance that's linked to a peer VPC via ClassicLink.

    ", + "PriceSchedule$Active": "

    The current price schedule, as determined by the term remaining for the Reserved Instance in the listing.

    A specific price schedule is always in effect, but only one price schedule can be active at any time. Take, for example, a Reserved Instance listing that has five months remaining in its term. When you specify price schedules for five months and two months, this means that schedule 1, covering the first three months of the remaining term, will be active during months 5, 4, and 3. Then schedule 2, covering the last two months of the term, will be active for months 2 and 1.

    ", + "PrivateIpAddressSpecification$Primary": "

    Indicates whether the private IP address is the primary private IP address. Only one IP address can be designated as primary.

    ", + "PurchaseReservedInstancesOfferingRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "PurchaseScheduledInstancesRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "RebootInstancesRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "RegisterImageRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "RejectVpcPeeringConnectionRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "RejectVpcPeeringConnectionResult$Return": "

    Returns true if the request succeeds; otherwise, it returns an error.

    ", + "ReleaseAddressRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "ReplaceNetworkAclAssociationRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "ReplaceNetworkAclEntryRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "ReplaceNetworkAclEntryRequest$Egress": "

    Indicates whether to replace the egress rule.

    Default: If no value is specified, we replace the ingress rule.

    ", + "ReplaceRouteRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "ReplaceRouteTableAssociationRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "ReportInstanceStatusRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "RequestSpotFleetRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "RequestSpotInstancesRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "RequestSpotLaunchSpecification$EbsOptimized": "

    Indicates whether the instance is optimized for EBS I/O. This optimization provides dedicated throughput to Amazon EBS and an optimized configuration stack to provide optimal EBS I/O performance. This optimization isn't available with all instance types. Additional usage charges apply when using an EBS Optimized instance.

    Default: false

    ", + "ReservedInstancesOffering$Marketplace": "

    Indicates whether the offering is available through the Reserved Instance Marketplace (resale) or AWS. If it's a Reserved Instance Marketplace offering, this is true.

    ", + "ResetImageAttributeRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "ResetInstanceAttributeRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "ResetNetworkInterfaceAttributeRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "ResetSnapshotAttributeRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "RestoreAddressToClassicRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "RevokeSecurityGroupEgressRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "RevokeSecurityGroupIngressRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "RouteTableAssociation$Main": "

    Indicates whether this is the main route table.

    ", + "RunInstancesMonitoringEnabled$Enabled": "

    Indicates whether monitoring is enabled for the instance.

    ", + "RunInstancesRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "RunInstancesRequest$DisableApiTermination": "

    If you set this parameter to true, you can't terminate the instance using the Amazon EC2 console, CLI, or API; otherwise, you can. If you set this parameter to true and then later want to be able to terminate the instance, you must first change the value of the disableApiTermination attribute to false using ModifyInstanceAttribute. Alternatively, if you set InstanceInitiatedShutdownBehavior to terminate, you can terminate the instance by running the shutdown command from the instance.

    Default: false

    ", + "RunInstancesRequest$EbsOptimized": "

    Indicates whether the instance is optimized for EBS I/O. This optimization provides dedicated throughput to Amazon EBS and an optimized configuration stack to provide optimal EBS I/O performance. This optimization isn't available with all instance types. Additional usage charges apply when using an EBS-optimized instance.

    Default: false

    ", + "RunScheduledInstancesRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "ScheduledInstanceRecurrence$OccurrenceRelativeToEnd": "

    Indicates whether the occurrence is relative to the end of the specified week or month.

    ", + "ScheduledInstanceRecurrenceRequest$OccurrenceRelativeToEnd": "

    Indicates whether the occurrence is relative to the end of the specified week or month. You can't specify this value with a daily schedule.

    ", + "ScheduledInstancesEbs$DeleteOnTermination": "

    Indicates whether the volume is deleted on instance termination.

    ", + "ScheduledInstancesEbs$Encrypted": "

    Indicates whether the volume is encrypted. You can attached encrypted volumes only to instances that support them.

    ", + "ScheduledInstancesLaunchSpecification$EbsOptimized": "

    Indicates whether the instances are optimized for EBS I/O. This optimization provides dedicated throughput to Amazon EBS and an optimized configuration stack to provide optimal EBS I/O performance. This optimization isn't available with all instance types. Additional usage charges apply when using an EBS-optimized instance.

    Default: false

    ", + "ScheduledInstancesMonitoring$Enabled": "

    Indicates whether monitoring is enabled.

    ", + "ScheduledInstancesNetworkInterface$AssociatePublicIpAddress": "

    Indicates whether to assign a public IP address to instances launched in a VPC. The public IP address can only be assigned to a network interface for eth0, and can only be assigned to a new network interface, not an existing one. You cannot specify more than one network interface in the request. If launching into a default subnet, the default value is true.

    ", + "ScheduledInstancesNetworkInterface$DeleteOnTermination": "

    Indicates whether to delete the interface when the instance is terminated.

    ", + "ScheduledInstancesPrivateIpAddressConfig$Primary": "

    Indicates whether this is a primary IP address. Otherwise, this is a secondary IP address.

    ", + "Snapshot$Encrypted": "

    Indicates whether the snapshot is encrypted.

    ", + "SpotFleetLaunchSpecification$EbsOptimized": "

    Indicates whether the instances are optimized for EBS I/O. This optimization provides dedicated throughput to Amazon EBS and an optimized configuration stack to provide optimal EBS I/O performance. This optimization isn't available with all instance types. Additional usage charges apply when using an EBS Optimized instance.

    Default: false

    ", + "SpotFleetMonitoring$Enabled": "

    Enables monitoring for the instance.

    Default: false

    ", + "SpotFleetRequestConfigData$TerminateInstancesWithExpiration": "

    Indicates whether running Spot instances should be terminated when the Spot fleet request expires.

    ", + "StartInstancesRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "StopInstancesRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "StopInstancesRequest$Force": "

    Forces the instances to stop. The instances do not have an opportunity to flush file system caches or file system metadata. If you use this option, you must perform file system check and repair procedures. This option is not recommended for Windows instances.

    Default: false

    ", + "Subnet$DefaultForAz": "

    Indicates whether this is the default subnet for the Availability Zone.

    ", + "Subnet$MapPublicIpOnLaunch": "

    Indicates whether instances launched in this subnet receive a public IP address.

    ", + "TerminateInstancesRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "UnmonitorInstancesRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "Volume$Encrypted": "

    Indicates whether the volume will be encrypted.

    ", + "VolumeAttachment$DeleteOnTermination": "

    Indicates whether the EBS volume is deleted on instance termination.

    ", + "Vpc$IsDefault": "

    Indicates whether the VPC is the default VPC.

    ", + "VpcClassicLink$ClassicLinkEnabled": "

    Indicates whether the VPC is enabled for ClassicLink.

    ", + "VpcPeeringConnectionOptionsDescription$AllowEgressFromLocalClassicLinkToRemoteVpc": "

    Indicates whether a local ClassicLink connection can communicate with the peer VPC over the VPC peering connection.

    ", + "VpcPeeringConnectionOptionsDescription$AllowEgressFromLocalVpcToRemoteClassicLink": "

    Indicates whether a local VPC can communicate with a ClassicLink connection in the peer VPC over the VPC peering connection.

    ", + "VpnConnectionOptions$StaticRoutesOnly": "

    Indicates whether the VPN connection uses static routes only. Static routes must be used for devices that don't support BGP.

    ", + "VpnConnectionOptionsSpecification$StaticRoutesOnly": "

    Indicates whether the VPN connection uses static routes only. Static routes must be used for devices that don't support BGP.

    " + } + }, + "BundleIdStringList": { + "base": null, + "refs": { + "DescribeBundleTasksRequest$BundleIds": "

    One or more bundle task IDs.

    Default: Describes all your bundle tasks.

    " + } + }, + "BundleInstanceRequest": { + "base": "

    Contains the parameters for BundleInstance.

    ", + "refs": { + } + }, + "BundleInstanceResult": { + "base": "

    Contains the output of BundleInstance.

    ", + "refs": { + } + }, + "BundleTask": { + "base": "

    Describes a bundle task.

    ", + "refs": { + "BundleInstanceResult$BundleTask": "

    Information about the bundle task.

    ", + "BundleTaskList$member": null, + "CancelBundleTaskResult$BundleTask": "

    Information about the bundle task.

    " + } + }, + "BundleTaskError": { + "base": "

    Describes an error for BundleInstance.

    ", + "refs": { + "BundleTask$BundleTaskError": "

    If the task fails, a description of the error.

    " + } + }, + "BundleTaskList": { + "base": null, + "refs": { + "DescribeBundleTasksResult$BundleTasks": "

    Information about one or more bundle tasks.

    " + } + }, + "BundleTaskState": { + "base": null, + "refs": { + "BundleTask$State": "

    The state of the task.

    " + } + }, + "CancelBatchErrorCode": { + "base": null, + "refs": { + "CancelSpotFleetRequestsError$Code": "

    The error code.

    " + } + }, + "CancelBundleTaskRequest": { + "base": "

    Contains the parameters for CancelBundleTask.

    ", + "refs": { + } + }, + "CancelBundleTaskResult": { + "base": "

    Contains the output of CancelBundleTask.

    ", + "refs": { + } + }, + "CancelConversionRequest": { + "base": "

    Contains the parameters for CancelConversionTask.

    ", + "refs": { + } + }, + "CancelExportTaskRequest": { + "base": "

    Contains the parameters for CancelExportTask.

    ", + "refs": { + } + }, + "CancelImportTaskRequest": { + "base": "

    Contains the parameters for CancelImportTask.

    ", + "refs": { + } + }, + "CancelImportTaskResult": { + "base": "

    Contains the output for CancelImportTask.

    ", + "refs": { + } + }, + "CancelReservedInstancesListingRequest": { + "base": "

    Contains the parameters for CancelReservedInstancesListing.

    ", + "refs": { + } + }, + "CancelReservedInstancesListingResult": { + "base": "

    Contains the output of CancelReservedInstancesListing.

    ", + "refs": { + } + }, + "CancelSpotFleetRequestsError": { + "base": "

    Describes a Spot fleet error.

    ", + "refs": { + "CancelSpotFleetRequestsErrorItem$Error": "

    The error.

    " + } + }, + "CancelSpotFleetRequestsErrorItem": { + "base": "

    Describes a Spot fleet request that was not successfully canceled.

    ", + "refs": { + "CancelSpotFleetRequestsErrorSet$member": null + } + }, + "CancelSpotFleetRequestsErrorSet": { + "base": null, + "refs": { + "CancelSpotFleetRequestsResponse$UnsuccessfulFleetRequests": "

    Information about the Spot fleet requests that are not successfully canceled.

    " + } + }, + "CancelSpotFleetRequestsRequest": { + "base": "

    Contains the parameters for CancelSpotFleetRequests.

    ", + "refs": { + } + }, + "CancelSpotFleetRequestsResponse": { + "base": "

    Contains the output of CancelSpotFleetRequests.

    ", + "refs": { + } + }, + "CancelSpotFleetRequestsSuccessItem": { + "base": "

    Describes a Spot fleet request that was successfully canceled.

    ", + "refs": { + "CancelSpotFleetRequestsSuccessSet$member": null + } + }, + "CancelSpotFleetRequestsSuccessSet": { + "base": null, + "refs": { + "CancelSpotFleetRequestsResponse$SuccessfulFleetRequests": "

    Information about the Spot fleet requests that are successfully canceled.

    " + } + }, + "CancelSpotInstanceRequestState": { + "base": null, + "refs": { + "CancelledSpotInstanceRequest$State": "

    The state of the Spot instance request.

    " + } + }, + "CancelSpotInstanceRequestsRequest": { + "base": "

    Contains the parameters for CancelSpotInstanceRequests.

    ", + "refs": { + } + }, + "CancelSpotInstanceRequestsResult": { + "base": "

    Contains the output of CancelSpotInstanceRequests.

    ", + "refs": { + } + }, + "CancelledSpotInstanceRequest": { + "base": "

    Describes a request to cancel a Spot instance.

    ", + "refs": { + "CancelledSpotInstanceRequestList$member": null + } + }, + "CancelledSpotInstanceRequestList": { + "base": null, + "refs": { + "CancelSpotInstanceRequestsResult$CancelledSpotInstanceRequests": "

    One or more Spot instance requests.

    " + } + }, + "ClassicLinkDnsSupport": { + "base": "

    Describes the ClassicLink DNS support status of a VPC.

    ", + "refs": { + "ClassicLinkDnsSupportList$member": null + } + }, + "ClassicLinkDnsSupportList": { + "base": null, + "refs": { + "DescribeVpcClassicLinkDnsSupportResult$Vpcs": "

    Information about the ClassicLink DNS support status of the VPCs.

    " + } + }, + "ClassicLinkInstance": { + "base": "

    Describes a linked EC2-Classic instance.

    ", + "refs": { + "ClassicLinkInstanceList$member": null + } + }, + "ClassicLinkInstanceList": { + "base": null, + "refs": { + "DescribeClassicLinkInstancesResult$Instances": "

    Information about one or more linked EC2-Classic instances.

    " + } + }, + "ClientData": { + "base": "

    Describes the client-specific data.

    ", + "refs": { + "ImportImageRequest$ClientData": "

    The client-specific data.

    ", + "ImportSnapshotRequest$ClientData": "

    The client-specific data.

    " + } + }, + "ConfirmProductInstanceRequest": { + "base": "

    Contains the parameters for ConfirmProductInstance.

    ", + "refs": { + } + }, + "ConfirmProductInstanceResult": { + "base": "

    Contains the output of ConfirmProductInstance.

    ", + "refs": { + } + }, + "ContainerFormat": { + "base": null, + "refs": { + "ExportToS3Task$ContainerFormat": "

    The container format used to combine disk images with metadata (such as OVF). If absent, only the disk image is exported.

    ", + "ExportToS3TaskSpecification$ContainerFormat": "

    The container format used to combine disk images with metadata (such as OVF). If absent, only the disk image is exported.

    " + } + }, + "ConversionIdStringList": { + "base": null, + "refs": { + "DescribeConversionTasksRequest$ConversionTaskIds": "

    One or more conversion task IDs.

    " + } + }, + "ConversionTask": { + "base": "

    Describes a conversion task.

    ", + "refs": { + "DescribeConversionTaskList$member": null, + "ImportInstanceResult$ConversionTask": "

    Information about the conversion task.

    ", + "ImportVolumeResult$ConversionTask": "

    Information about the conversion task.

    " + } + }, + "ConversionTaskState": { + "base": null, + "refs": { + "ConversionTask$State": "

    The state of the conversion task.

    " + } + }, + "CopyImageRequest": { + "base": "

    Contains the parameters for CopyImage.

    ", + "refs": { + } + }, + "CopyImageResult": { + "base": "

    Contains the output of CopyImage.

    ", + "refs": { + } + }, + "CopySnapshotRequest": { + "base": "

    Contains the parameters for CopySnapshot.

    ", + "refs": { + } + }, + "CopySnapshotResult": { + "base": "

    Contains the output of CopySnapshot.

    ", + "refs": { + } + }, + "CreateCustomerGatewayRequest": { + "base": "

    Contains the parameters for CreateCustomerGateway.

    ", + "refs": { + } + }, + "CreateCustomerGatewayResult": { + "base": "

    Contains the output of CreateCustomerGateway.

    ", + "refs": { + } + }, + "CreateDhcpOptionsRequest": { + "base": "

    Contains the parameters for CreateDhcpOptions.

    ", + "refs": { + } + }, + "CreateDhcpOptionsResult": { + "base": "

    Contains the output of CreateDhcpOptions.

    ", + "refs": { + } + }, + "CreateFlowLogsRequest": { + "base": "

    Contains the parameters for CreateFlowLogs.

    ", + "refs": { + } + }, + "CreateFlowLogsResult": { + "base": "

    Contains the output of CreateFlowLogs.

    ", + "refs": { + } + }, + "CreateImageRequest": { + "base": "

    Contains the parameters for CreateImage.

    ", + "refs": { + } + }, + "CreateImageResult": { + "base": "

    Contains the output of CreateImage.

    ", + "refs": { + } + }, + "CreateInstanceExportTaskRequest": { + "base": "

    Contains the parameters for CreateInstanceExportTask.

    ", + "refs": { + } + }, + "CreateInstanceExportTaskResult": { + "base": "

    Contains the output for CreateInstanceExportTask.

    ", + "refs": { + } + }, + "CreateInternetGatewayRequest": { + "base": "

    Contains the parameters for CreateInternetGateway.

    ", + "refs": { + } + }, + "CreateInternetGatewayResult": { + "base": "

    Contains the output of CreateInternetGateway.

    ", + "refs": { + } + }, + "CreateKeyPairRequest": { + "base": "

    Contains the parameters for CreateKeyPair.

    ", + "refs": { + } + }, + "CreateNatGatewayRequest": { + "base": "

    Contains the parameters for CreateNatGateway.

    ", + "refs": { + } + }, + "CreateNatGatewayResult": { + "base": "

    Contains the output of CreateNatGateway.

    ", + "refs": { + } + }, + "CreateNetworkAclEntryRequest": { + "base": "

    Contains the parameters for CreateNetworkAclEntry.

    ", + "refs": { + } + }, + "CreateNetworkAclRequest": { + "base": "

    Contains the parameters for CreateNetworkAcl.

    ", + "refs": { + } + }, + "CreateNetworkAclResult": { + "base": "

    Contains the output of CreateNetworkAcl.

    ", + "refs": { + } + }, + "CreateNetworkInterfaceRequest": { + "base": "

    Contains the parameters for CreateNetworkInterface.

    ", + "refs": { + } + }, + "CreateNetworkInterfaceResult": { + "base": "

    Contains the output of CreateNetworkInterface.

    ", + "refs": { + } + }, + "CreatePlacementGroupRequest": { + "base": "

    Contains the parameters for CreatePlacementGroup.

    ", + "refs": { + } + }, + "CreateReservedInstancesListingRequest": { + "base": "

    Contains the parameters for CreateReservedInstancesListing.

    ", + "refs": { + } + }, + "CreateReservedInstancesListingResult": { + "base": "

    Contains the output of CreateReservedInstancesListing.

    ", + "refs": { + } + }, + "CreateRouteRequest": { + "base": "

    Contains the parameters for CreateRoute.

    ", + "refs": { + } + }, + "CreateRouteResult": { + "base": "

    Contains the output of CreateRoute.

    ", + "refs": { + } + }, + "CreateRouteTableRequest": { + "base": "

    Contains the parameters for CreateRouteTable.

    ", + "refs": { + } + }, + "CreateRouteTableResult": { + "base": "

    Contains the output of CreateRouteTable.

    ", + "refs": { + } + }, + "CreateSecurityGroupRequest": { + "base": "

    Contains the parameters for CreateSecurityGroup.

    ", + "refs": { + } + }, + "CreateSecurityGroupResult": { + "base": "

    Contains the output of CreateSecurityGroup.

    ", + "refs": { + } + }, + "CreateSnapshotRequest": { + "base": "

    Contains the parameters for CreateSnapshot.

    ", + "refs": { + } + }, + "CreateSpotDatafeedSubscriptionRequest": { + "base": "

    Contains the parameters for CreateSpotDatafeedSubscription.

    ", + "refs": { + } + }, + "CreateSpotDatafeedSubscriptionResult": { + "base": "

    Contains the output of CreateSpotDatafeedSubscription.

    ", + "refs": { + } + }, + "CreateSubnetRequest": { + "base": "

    Contains the parameters for CreateSubnet.

    ", + "refs": { + } + }, + "CreateSubnetResult": { + "base": "

    Contains the output of CreateSubnet.

    ", + "refs": { + } + }, + "CreateTagsRequest": { + "base": "

    Contains the parameters for CreateTags.

    ", + "refs": { + } + }, + "CreateVolumePermission": { + "base": "

    Describes the user or group to be added or removed from the permissions for a volume.

    ", + "refs": { + "CreateVolumePermissionList$member": null + } + }, + "CreateVolumePermissionList": { + "base": null, + "refs": { + "CreateVolumePermissionModifications$Add": "

    Adds a specific AWS account ID or group to a volume's list of create volume permissions.

    ", + "CreateVolumePermissionModifications$Remove": "

    Removes a specific AWS account ID or group from a volume's list of create volume permissions.

    ", + "DescribeSnapshotAttributeResult$CreateVolumePermissions": "

    A list of permissions for creating volumes from the snapshot.

    " + } + }, + "CreateVolumePermissionModifications": { + "base": "

    Describes modifications to the permissions for a volume.

    ", + "refs": { + "ModifySnapshotAttributeRequest$CreateVolumePermission": "

    A JSON representation of the snapshot attribute modification.

    " + } + }, + "CreateVolumeRequest": { + "base": "

    Contains the parameters for CreateVolume.

    ", + "refs": { + } + }, + "CreateVpcEndpointRequest": { + "base": "

    Contains the parameters for CreateVpcEndpoint.

    ", + "refs": { + } + }, + "CreateVpcEndpointResult": { + "base": "

    Contains the output of CreateVpcEndpoint.

    ", + "refs": { + } + }, + "CreateVpcPeeringConnectionRequest": { + "base": "

    Contains the parameters for CreateVpcPeeringConnection.

    ", + "refs": { + } + }, + "CreateVpcPeeringConnectionResult": { + "base": "

    Contains the output of CreateVpcPeeringConnection.

    ", + "refs": { + } + }, + "CreateVpcRequest": { + "base": "

    Contains the parameters for CreateVpc.

    ", + "refs": { + } + }, + "CreateVpcResult": { + "base": "

    Contains the output of CreateVpc.

    ", + "refs": { + } + }, + "CreateVpnConnectionRequest": { + "base": "

    Contains the parameters for CreateVpnConnection.

    ", + "refs": { + } + }, + "CreateVpnConnectionResult": { + "base": "

    Contains the output of CreateVpnConnection.

    ", + "refs": { + } + }, + "CreateVpnConnectionRouteRequest": { + "base": "

    Contains the parameters for CreateVpnConnectionRoute.

    ", + "refs": { + } + }, + "CreateVpnGatewayRequest": { + "base": "

    Contains the parameters for CreateVpnGateway.

    ", + "refs": { + } + }, + "CreateVpnGatewayResult": { + "base": "

    Contains the output of CreateVpnGateway.

    ", + "refs": { + } + }, + "CurrencyCodeValues": { + "base": null, + "refs": { + "PriceSchedule$CurrencyCode": "

    The currency for transacting the Reserved Instance resale. At this time, the only supported currency is USD.

    ", + "PriceScheduleSpecification$CurrencyCode": "

    The currency for transacting the Reserved Instance resale. At this time, the only supported currency is USD.

    ", + "ReservedInstanceLimitPrice$CurrencyCode": "

    The currency in which the limitPrice amount is specified. At this time, the only supported currency is USD.

    ", + "ReservedInstances$CurrencyCode": "

    The currency of the Reserved Instance. It's specified using ISO 4217 standard currency codes. At this time, the only supported currency is USD.

    ", + "ReservedInstancesOffering$CurrencyCode": "

    The currency of the Reserved Instance offering you are purchasing. It's specified using ISO 4217 standard currency codes. At this time, the only supported currency is USD.

    " + } + }, + "CustomerGateway": { + "base": "

    Describes a customer gateway.

    ", + "refs": { + "CreateCustomerGatewayResult$CustomerGateway": "

    Information about the customer gateway.

    ", + "CustomerGatewayList$member": null + } + }, + "CustomerGatewayIdStringList": { + "base": null, + "refs": { + "DescribeCustomerGatewaysRequest$CustomerGatewayIds": "

    One or more customer gateway IDs.

    Default: Describes all your customer gateways.

    " + } + }, + "CustomerGatewayList": { + "base": null, + "refs": { + "DescribeCustomerGatewaysResult$CustomerGateways": "

    Information about one or more customer gateways.

    " + } + }, + "DatafeedSubscriptionState": { + "base": null, + "refs": { + "SpotDatafeedSubscription$State": "

    The state of the Spot instance data feed subscription.

    " + } + }, + "DateTime": { + "base": null, + "refs": { + "BundleTask$StartTime": "

    The time this task started.

    ", + "BundleTask$UpdateTime": "

    The time of the most recent update for the task.

    ", + "ClientData$UploadStart": "

    The time that the disk upload starts.

    ", + "ClientData$UploadEnd": "

    The time that the disk upload ends.

    ", + "DescribeSpotFleetRequestHistoryRequest$StartTime": "

    The starting date and time for the events, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ).

    ", + "DescribeSpotFleetRequestHistoryResponse$StartTime": "

    The starting date and time for the events, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ).

    ", + "DescribeSpotFleetRequestHistoryResponse$LastEvaluatedTime": "

    The last date and time for the events, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ). All records up to this time were retrieved.

    If nextToken indicates that there are more results, this value is not present.

    ", + "DescribeSpotPriceHistoryRequest$StartTime": "

    The date and time, up to the past 90 days, from which to start retrieving the price history data, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ).

    ", + "DescribeSpotPriceHistoryRequest$EndTime": "

    The date and time, up to the current date, from which to stop retrieving the price history data, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ).

    ", + "EbsInstanceBlockDevice$AttachTime": "

    The time stamp when the attachment initiated.

    ", + "FlowLog$CreationTime": "

    The date and time the flow log was created.

    ", + "GetConsoleOutputResult$Timestamp": "

    The time the output was last updated.

    ", + "GetPasswordDataResult$Timestamp": "

    The time the data was last updated.

    ", + "HistoryRecord$Timestamp": "

    The date and time of the event, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ).

    ", + "IdFormat$Deadline": "

    The date in UTC at which you are permanently switched over to using longer IDs. If a deadline is not yet available for this resource type, this field is not returned.

    ", + "Instance$LaunchTime": "

    The time the instance was launched.

    ", + "InstanceNetworkInterfaceAttachment$AttachTime": "

    The time stamp when the attachment initiated.

    ", + "InstanceStatusDetails$ImpairedSince": "

    The time when a status check failed. For an instance that was launched and impaired, this is the time when the instance was launched.

    ", + "InstanceStatusEvent$NotBefore": "

    The earliest scheduled start time for the event.

    ", + "InstanceStatusEvent$NotAfter": "

    The latest scheduled end time for the event.

    ", + "NatGateway$CreateTime": "

    The date and time the NAT gateway was created.

    ", + "NatGateway$DeleteTime": "

    The date and time the NAT gateway was deleted, if applicable.

    ", + "NetworkInterfaceAttachment$AttachTime": "

    The timestamp indicating when the attachment initiated.

    ", + "ProvisionedBandwidth$RequestTime": "

    Reserved. If you need to sustain traffic greater than the documented limits, contact us through the Support Center.

    ", + "ProvisionedBandwidth$ProvisionTime": "

    Reserved. If you need to sustain traffic greater than the documented limits, contact us through the Support Center.

    ", + "ReportInstanceStatusRequest$StartTime": "

    The time at which the reported instance health state began.

    ", + "ReportInstanceStatusRequest$EndTime": "

    The time at which the reported instance health state ended.

    ", + "RequestSpotInstancesRequest$ValidFrom": "

    The start date of the request. If this is a one-time request, the request becomes active at this date and time and remains active until all instances launch, the request expires, or the request is canceled. If the request is persistent, the request becomes active at this date and time and remains active until it expires or is canceled.

    Default: The request is effective indefinitely.

    ", + "RequestSpotInstancesRequest$ValidUntil": "

    The end date of the request. If this is a one-time request, the request remains active until all instances launch, the request is canceled, or this date is reached. If the request is persistent, it remains active until it is canceled or this date and time is reached.

    Default: The request is effective indefinitely.

    ", + "ReservedInstances$Start": "

    The date and time the Reserved Instance started.

    ", + "ReservedInstances$End": "

    The time when the Reserved Instance expires.

    ", + "ReservedInstancesListing$CreateDate": "

    The time the listing was created.

    ", + "ReservedInstancesListing$UpdateDate": "

    The last modified timestamp of the listing.

    ", + "ReservedInstancesModification$CreateDate": "

    The time when the modification request was created.

    ", + "ReservedInstancesModification$UpdateDate": "

    The time when the modification request was last updated.

    ", + "ReservedInstancesModification$EffectiveDate": "

    The time for the modification to become effective.

    ", + "ScheduledInstance$PreviousSlotEndTime": "

    The time that the previous schedule ended or will end.

    ", + "ScheduledInstance$NextSlotStartTime": "

    The time for the next schedule to start.

    ", + "ScheduledInstance$TermStartDate": "

    The start date for the Scheduled Instance.

    ", + "ScheduledInstance$TermEndDate": "

    The end date for the Scheduled Instance.

    ", + "ScheduledInstance$CreateDate": "

    The date when the Scheduled Instance was purchased.

    ", + "ScheduledInstanceAvailability$FirstSlotStartTime": "

    The time period for the first schedule to start.

    ", + "SlotDateTimeRangeRequest$EarliestTime": "

    The earliest date and time, in UTC, for the Scheduled Instance to start.

    ", + "SlotDateTimeRangeRequest$LatestTime": "

    The latest date and time, in UTC, for the Scheduled Instance to start. This value must be later than or equal to the earliest date and at most three months in the future.

    ", + "SlotStartTimeRangeRequest$EarliestTime": "

    The earliest date and time, in UTC, for the Scheduled Instance to start.

    ", + "SlotStartTimeRangeRequest$LatestTime": "

    The latest date and time, in UTC, for the Scheduled Instance to start.

    ", + "Snapshot$StartTime": "

    The time stamp when the snapshot was initiated.

    ", + "SpotFleetRequestConfig$CreateTime": "

    The creation date and time of the request.

    ", + "SpotFleetRequestConfigData$ValidFrom": "

    The start date and time of the request, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ). The default is to start fulfilling the request immediately.

    ", + "SpotFleetRequestConfigData$ValidUntil": "

    The end date and time of the request, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ). At this point, no new Spot instance requests are placed or enabled to fulfill the request.

    ", + "SpotInstanceRequest$ValidFrom": "

    The start date of the request, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ). The request becomes active at this date and time.

    ", + "SpotInstanceRequest$ValidUntil": "

    The end date of the request, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ). If this is a one-time request, it remains active until all instances launch, the request is canceled, or this date is reached. If the request is persistent, it remains active until it is canceled or this date is reached.

    ", + "SpotInstanceRequest$CreateTime": "

    The date and time when the Spot instance request was created, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ).

    ", + "SpotInstanceStatus$UpdateTime": "

    The date and time of the most recent status update, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ).

    ", + "SpotPrice$Timestamp": "

    The date and time the request was created, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ).

    ", + "VgwTelemetry$LastStatusChange": "

    The date and time of the last change in status.

    ", + "Volume$CreateTime": "

    The time stamp when volume creation was initiated.

    ", + "VolumeAttachment$AttachTime": "

    The time stamp when the attachment initiated.

    ", + "VolumeStatusEvent$NotBefore": "

    The earliest start time of the event.

    ", + "VolumeStatusEvent$NotAfter": "

    The latest end time of the event.

    ", + "VpcEndpoint$CreationTimestamp": "

    The date and time the VPC endpoint was created.

    ", + "VpcPeeringConnection$ExpirationTime": "

    The time that an unaccepted VPC peering connection will expire.

    " + } + }, + "DeleteCustomerGatewayRequest": { + "base": "

    Contains the parameters for DeleteCustomerGateway.

    ", + "refs": { + } + }, + "DeleteDhcpOptionsRequest": { + "base": "

    Contains the parameters for DeleteDhcpOptions.

    ", + "refs": { + } + }, + "DeleteFlowLogsRequest": { + "base": "

    Contains the parameters for DeleteFlowLogs.

    ", + "refs": { + } + }, + "DeleteFlowLogsResult": { + "base": "

    Contains the output of DeleteFlowLogs.

    ", + "refs": { + } + }, + "DeleteInternetGatewayRequest": { + "base": "

    Contains the parameters for DeleteInternetGateway.

    ", + "refs": { + } + }, + "DeleteKeyPairRequest": { + "base": "

    Contains the parameters for DeleteKeyPair.

    ", + "refs": { + } + }, + "DeleteNatGatewayRequest": { + "base": "

    Contains the parameters for DeleteNatGateway.

    ", + "refs": { + } + }, + "DeleteNatGatewayResult": { + "base": "

    Contains the output of DeleteNatGateway.

    ", + "refs": { + } + }, + "DeleteNetworkAclEntryRequest": { + "base": "

    Contains the parameters for DeleteNetworkAclEntry.

    ", + "refs": { + } + }, + "DeleteNetworkAclRequest": { + "base": "

    Contains the parameters for DeleteNetworkAcl.

    ", + "refs": { + } + }, + "DeleteNetworkInterfaceRequest": { + "base": "

    Contains the parameters for DeleteNetworkInterface.

    ", + "refs": { + } + }, + "DeletePlacementGroupRequest": { + "base": "

    Contains the parameters for DeletePlacementGroup.

    ", + "refs": { + } + }, + "DeleteRouteRequest": { + "base": "

    Contains the parameters for DeleteRoute.

    ", + "refs": { + } + }, + "DeleteRouteTableRequest": { + "base": "

    Contains the parameters for DeleteRouteTable.

    ", + "refs": { + } + }, + "DeleteSecurityGroupRequest": { + "base": "

    Contains the parameters for DeleteSecurityGroup.

    ", + "refs": { + } + }, + "DeleteSnapshotRequest": { + "base": "

    Contains the parameters for DeleteSnapshot.

    ", + "refs": { + } + }, + "DeleteSpotDatafeedSubscriptionRequest": { + "base": "

    Contains the parameters for DeleteSpotDatafeedSubscription.

    ", + "refs": { + } + }, + "DeleteSubnetRequest": { + "base": "

    Contains the parameters for DeleteSubnet.

    ", + "refs": { + } + }, + "DeleteTagsRequest": { + "base": "

    Contains the parameters for DeleteTags.

    ", + "refs": { + } + }, + "DeleteVolumeRequest": { + "base": "

    Contains the parameters for DeleteVolume.

    ", + "refs": { + } + }, + "DeleteVpcEndpointsRequest": { + "base": "

    Contains the parameters for DeleteVpcEndpoints.

    ", + "refs": { + } + }, + "DeleteVpcEndpointsResult": { + "base": "

    Contains the output of DeleteVpcEndpoints.

    ", + "refs": { + } + }, + "DeleteVpcPeeringConnectionRequest": { + "base": "

    Contains the parameters for DeleteVpcPeeringConnection.

    ", + "refs": { + } + }, + "DeleteVpcPeeringConnectionResult": { + "base": "

    Contains the output of DeleteVpcPeeringConnection.

    ", + "refs": { + } + }, + "DeleteVpcRequest": { + "base": "

    Contains the parameters for DeleteVpc.

    ", + "refs": { + } + }, + "DeleteVpnConnectionRequest": { + "base": "

    Contains the parameters for DeleteVpnConnection.

    ", + "refs": { + } + }, + "DeleteVpnConnectionRouteRequest": { + "base": "

    Contains the parameters for DeleteVpnConnectionRoute.

    ", + "refs": { + } + }, + "DeleteVpnGatewayRequest": { + "base": "

    Contains the parameters for DeleteVpnGateway.

    ", + "refs": { + } + }, + "DeregisterImageRequest": { + "base": "

    Contains the parameters for DeregisterImage.

    ", + "refs": { + } + }, + "DescribeAccountAttributesRequest": { + "base": "

    Contains the parameters for DescribeAccountAttributes.

    ", + "refs": { + } + }, + "DescribeAccountAttributesResult": { + "base": "

    Contains the output of DescribeAccountAttributes.

    ", + "refs": { + } + }, + "DescribeAddressesRequest": { + "base": "

    Contains the parameters for DescribeAddresses.

    ", + "refs": { + } + }, + "DescribeAddressesResult": { + "base": "

    Contains the output of DescribeAddresses.

    ", + "refs": { + } + }, + "DescribeAvailabilityZonesRequest": { + "base": "

    Contains the parameters for DescribeAvailabilityZones.

    ", + "refs": { + } + }, + "DescribeAvailabilityZonesResult": { + "base": "

    Contains the output of DescribeAvailabiltyZones.

    ", + "refs": { + } + }, + "DescribeBundleTasksRequest": { + "base": "

    Contains the parameters for DescribeBundleTasks.

    ", + "refs": { + } + }, + "DescribeBundleTasksResult": { + "base": "

    Contains the output of DescribeBundleTasks.

    ", + "refs": { + } + }, + "DescribeClassicLinkInstancesRequest": { + "base": "

    Contains the parameters for DescribeClassicLinkInstances.

    ", + "refs": { + } + }, + "DescribeClassicLinkInstancesResult": { + "base": "

    Contains the output of DescribeClassicLinkInstances.

    ", + "refs": { + } + }, + "DescribeConversionTaskList": { + "base": null, + "refs": { + "DescribeConversionTasksResult$ConversionTasks": "

    Information about the conversion tasks.

    " + } + }, + "DescribeConversionTasksRequest": { + "base": "

    Contains the parameters for DescribeConversionTasks.

    ", + "refs": { + } + }, + "DescribeConversionTasksResult": { + "base": "

    Contains the output for DescribeConversionTasks.

    ", + "refs": { + } + }, + "DescribeCustomerGatewaysRequest": { + "base": "

    Contains the parameters for DescribeCustomerGateways.

    ", + "refs": { + } + }, + "DescribeCustomerGatewaysResult": { + "base": "

    Contains the output of DescribeCustomerGateways.

    ", + "refs": { + } + }, + "DescribeDhcpOptionsRequest": { + "base": "

    Contains the parameters for DescribeDhcpOptions.

    ", + "refs": { + } + }, + "DescribeDhcpOptionsResult": { + "base": "

    Contains the output of DescribeDhcpOptions.

    ", + "refs": { + } + }, + "DescribeExportTasksRequest": { + "base": "

    Contains the parameters for DescribeExportTasks.

    ", + "refs": { + } + }, + "DescribeExportTasksResult": { + "base": "

    Contains the output for DescribeExportTasks.

    ", + "refs": { + } + }, + "DescribeFlowLogsRequest": { + "base": "

    Contains the parameters for DescribeFlowLogs.

    ", + "refs": { + } + }, + "DescribeFlowLogsResult": { + "base": "

    Contains the output of DescribeFlowLogs.

    ", + "refs": { + } + }, + "DescribeHostsRequest": { + "base": "

    Contains the parameters for DescribeHosts.

    ", + "refs": { + } + }, + "DescribeHostsResult": { + "base": "

    Contains the output of DescribeHosts.

    ", + "refs": { + } + }, + "DescribeIdFormatRequest": { + "base": "

    Contains the parameters for DescribeIdFormat.

    ", + "refs": { + } + }, + "DescribeIdFormatResult": { + "base": "

    Contains the output of DescribeIdFormat.

    ", + "refs": { + } + }, + "DescribeImageAttributeRequest": { + "base": "

    Contains the parameters for DescribeImageAttribute.

    ", + "refs": { + } + }, + "DescribeImagesRequest": { + "base": "

    Contains the parameters for DescribeImages.

    ", + "refs": { + } + }, + "DescribeImagesResult": { + "base": "

    Contains the output of DescribeImages.

    ", + "refs": { + } + }, + "DescribeImportImageTasksRequest": { + "base": "

    Contains the parameters for DescribeImportImageTasks.

    ", + "refs": { + } + }, + "DescribeImportImageTasksResult": { + "base": "

    Contains the output for DescribeImportImageTasks.

    ", + "refs": { + } + }, + "DescribeImportSnapshotTasksRequest": { + "base": "

    Contains the parameters for DescribeImportSnapshotTasks.

    ", + "refs": { + } + }, + "DescribeImportSnapshotTasksResult": { + "base": "

    Contains the output for DescribeImportSnapshotTasks.

    ", + "refs": { + } + }, + "DescribeInstanceAttributeRequest": { + "base": "

    Contains the parameters for DescribeInstanceAttribute.

    ", + "refs": { + } + }, + "DescribeInstanceStatusRequest": { + "base": "

    Contains the parameters for DescribeInstanceStatus.

    ", + "refs": { + } + }, + "DescribeInstanceStatusResult": { + "base": "

    Contains the output of DescribeInstanceStatus.

    ", + "refs": { + } + }, + "DescribeInstancesRequest": { + "base": "

    Contains the parameters for DescribeInstances.

    ", + "refs": { + } + }, + "DescribeInstancesResult": { + "base": "

    Contains the output of DescribeInstances.

    ", + "refs": { + } + }, + "DescribeInternetGatewaysRequest": { + "base": "

    Contains the parameters for DescribeInternetGateways.

    ", + "refs": { + } + }, + "DescribeInternetGatewaysResult": { + "base": "

    Contains the output of DescribeInternetGateways.

    ", + "refs": { + } + }, + "DescribeKeyPairsRequest": { + "base": "

    Contains the parameters for DescribeKeyPairs.

    ", + "refs": { + } + }, + "DescribeKeyPairsResult": { + "base": "

    Contains the output of DescribeKeyPairs.

    ", + "refs": { + } + }, + "DescribeMovingAddressesRequest": { + "base": "

    Contains the parameters for DescribeMovingAddresses.

    ", + "refs": { + } + }, + "DescribeMovingAddressesResult": { + "base": "

    Contains the output of DescribeMovingAddresses.

    ", + "refs": { + } + }, + "DescribeNatGatewaysRequest": { + "base": "

    Contains the parameters for DescribeNatGateways.

    ", + "refs": { + } + }, + "DescribeNatGatewaysResult": { + "base": "

    Contains the output of DescribeNatGateways.

    ", + "refs": { + } + }, + "DescribeNetworkAclsRequest": { + "base": "

    Contains the parameters for DescribeNetworkAcls.

    ", + "refs": { + } + }, + "DescribeNetworkAclsResult": { + "base": "

    Contains the output of DescribeNetworkAcls.

    ", + "refs": { + } + }, + "DescribeNetworkInterfaceAttributeRequest": { + "base": "

    Contains the parameters for DescribeNetworkInterfaceAttribute.

    ", + "refs": { + } + }, + "DescribeNetworkInterfaceAttributeResult": { + "base": "

    Contains the output of DescribeNetworkInterfaceAttribute.

    ", + "refs": { + } + }, + "DescribeNetworkInterfacesRequest": { + "base": "

    Contains the parameters for DescribeNetworkInterfaces.

    ", + "refs": { + } + }, + "DescribeNetworkInterfacesResult": { + "base": "

    Contains the output of DescribeNetworkInterfaces.

    ", + "refs": { + } + }, + "DescribePlacementGroupsRequest": { + "base": "

    Contains the parameters for DescribePlacementGroups.

    ", + "refs": { + } + }, + "DescribePlacementGroupsResult": { + "base": "

    Contains the output of DescribePlacementGroups.

    ", + "refs": { + } + }, + "DescribePrefixListsRequest": { + "base": "

    Contains the parameters for DescribePrefixLists.

    ", + "refs": { + } + }, + "DescribePrefixListsResult": { + "base": "

    Contains the output of DescribePrefixLists.

    ", + "refs": { + } + }, + "DescribeRegionsRequest": { + "base": "

    Contains the parameters for DescribeRegions.

    ", + "refs": { + } + }, + "DescribeRegionsResult": { + "base": "

    Contains the output of DescribeRegions.

    ", + "refs": { + } + }, + "DescribeReservedInstancesListingsRequest": { + "base": "

    Contains the parameters for DescribeReservedInstancesListings.

    ", + "refs": { + } + }, + "DescribeReservedInstancesListingsResult": { + "base": "

    Contains the output of DescribeReservedInstancesListings.

    ", + "refs": { + } + }, + "DescribeReservedInstancesModificationsRequest": { + "base": "

    Contains the parameters for DescribeReservedInstancesModifications.

    ", + "refs": { + } + }, + "DescribeReservedInstancesModificationsResult": { + "base": "

    Contains the output of DescribeReservedInstancesModifications.

    ", + "refs": { + } + }, + "DescribeReservedInstancesOfferingsRequest": { + "base": "

    Contains the parameters for DescribeReservedInstancesOfferings.

    ", + "refs": { + } + }, + "DescribeReservedInstancesOfferingsResult": { + "base": "

    Contains the output of DescribeReservedInstancesOfferings.

    ", + "refs": { + } + }, + "DescribeReservedInstancesRequest": { + "base": "

    Contains the parameters for DescribeReservedInstances.

    ", + "refs": { + } + }, + "DescribeReservedInstancesResult": { + "base": "

    Contains the output for DescribeReservedInstances.

    ", + "refs": { + } + }, + "DescribeRouteTablesRequest": { + "base": "

    Contains the parameters for DescribeRouteTables.

    ", + "refs": { + } + }, + "DescribeRouteTablesResult": { + "base": "

    Contains the output of DescribeRouteTables.

    ", + "refs": { + } + }, + "DescribeScheduledInstanceAvailabilityRequest": { + "base": "

    Contains the parameters for DescribeScheduledInstanceAvailability.

    ", + "refs": { + } + }, + "DescribeScheduledInstanceAvailabilityResult": { + "base": "

    Contains the output of DescribeScheduledInstanceAvailability.

    ", + "refs": { + } + }, + "DescribeScheduledInstancesRequest": { + "base": "

    Contains the parameters for DescribeScheduledInstances.

    ", + "refs": { + } + }, + "DescribeScheduledInstancesResult": { + "base": "

    Contains the output of DescribeScheduledInstances.

    ", + "refs": { + } + }, + "DescribeSecurityGroupReferencesRequest": { + "base": null, + "refs": { + } + }, + "DescribeSecurityGroupReferencesResult": { + "base": null, + "refs": { + } + }, + "DescribeSecurityGroupsRequest": { + "base": "

    Contains the parameters for DescribeSecurityGroups.

    ", + "refs": { + } + }, + "DescribeSecurityGroupsResult": { + "base": "

    Contains the output of DescribeSecurityGroups.

    ", + "refs": { + } + }, + "DescribeSnapshotAttributeRequest": { + "base": "

    Contains the parameters for DescribeSnapshotAttribute.

    ", + "refs": { + } + }, + "DescribeSnapshotAttributeResult": { + "base": "

    Contains the output of DescribeSnapshotAttribute.

    ", + "refs": { + } + }, + "DescribeSnapshotsRequest": { + "base": "

    Contains the parameters for DescribeSnapshots.

    ", + "refs": { + } + }, + "DescribeSnapshotsResult": { + "base": "

    Contains the output of DescribeSnapshots.

    ", + "refs": { + } + }, + "DescribeSpotDatafeedSubscriptionRequest": { + "base": "

    Contains the parameters for DescribeSpotDatafeedSubscription.

    ", + "refs": { + } + }, + "DescribeSpotDatafeedSubscriptionResult": { + "base": "

    Contains the output of DescribeSpotDatafeedSubscription.

    ", + "refs": { + } + }, + "DescribeSpotFleetInstancesRequest": { + "base": "

    Contains the parameters for DescribeSpotFleetInstances.

    ", + "refs": { + } + }, + "DescribeSpotFleetInstancesResponse": { + "base": "

    Contains the output of DescribeSpotFleetInstances.

    ", + "refs": { + } + }, + "DescribeSpotFleetRequestHistoryRequest": { + "base": "

    Contains the parameters for DescribeSpotFleetRequestHistory.

    ", + "refs": { + } + }, + "DescribeSpotFleetRequestHistoryResponse": { + "base": "

    Contains the output of DescribeSpotFleetRequestHistory.

    ", + "refs": { + } + }, + "DescribeSpotFleetRequestsRequest": { + "base": "

    Contains the parameters for DescribeSpotFleetRequests.

    ", + "refs": { + } + }, + "DescribeSpotFleetRequestsResponse": { + "base": "

    Contains the output of DescribeSpotFleetRequests.

    ", + "refs": { + } + }, + "DescribeSpotInstanceRequestsRequest": { + "base": "

    Contains the parameters for DescribeSpotInstanceRequests.

    ", + "refs": { + } + }, + "DescribeSpotInstanceRequestsResult": { + "base": "

    Contains the output of DescribeSpotInstanceRequests.

    ", + "refs": { + } + }, + "DescribeSpotPriceHistoryRequest": { + "base": "

    Contains the parameters for DescribeSpotPriceHistory.

    ", + "refs": { + } + }, + "DescribeSpotPriceHistoryResult": { + "base": "

    Contains the output of DescribeSpotPriceHistory.

    ", + "refs": { + } + }, + "DescribeStaleSecurityGroupsRequest": { + "base": null, + "refs": { + } + }, + "DescribeStaleSecurityGroupsResult": { + "base": null, + "refs": { + } + }, + "DescribeSubnetsRequest": { + "base": "

    Contains the parameters for DescribeSubnets.

    ", + "refs": { + } + }, + "DescribeSubnetsResult": { + "base": "

    Contains the output of DescribeSubnets.

    ", + "refs": { + } + }, + "DescribeTagsRequest": { + "base": "

    Contains the parameters for DescribeTags.

    ", + "refs": { + } + }, + "DescribeTagsResult": { + "base": "

    Contains the output of DescribeTags.

    ", + "refs": { + } + }, + "DescribeVolumeAttributeRequest": { + "base": "

    Contains the parameters for DescribeVolumeAttribute.

    ", + "refs": { + } + }, + "DescribeVolumeAttributeResult": { + "base": "

    Contains the output of DescribeVolumeAttribute.

    ", + "refs": { + } + }, + "DescribeVolumeStatusRequest": { + "base": "

    Contains the parameters for DescribeVolumeStatus.

    ", + "refs": { + } + }, + "DescribeVolumeStatusResult": { + "base": "

    Contains the output of DescribeVolumeStatus.

    ", + "refs": { + } + }, + "DescribeVolumesRequest": { + "base": "

    Contains the parameters for DescribeVolumes.

    ", + "refs": { + } + }, + "DescribeVolumesResult": { + "base": "

    Contains the output of DescribeVolumes.

    ", + "refs": { + } + }, + "DescribeVpcAttributeRequest": { + "base": "

    Contains the parameters for DescribeVpcAttribute.

    ", + "refs": { + } + }, + "DescribeVpcAttributeResult": { + "base": "

    Contains the output of DescribeVpcAttribute.

    ", + "refs": { + } + }, + "DescribeVpcClassicLinkDnsSupportRequest": { + "base": "

    Contains the parameters for DescribeVpcClassicLinkDnsSupport.

    ", + "refs": { + } + }, + "DescribeVpcClassicLinkDnsSupportResult": { + "base": "

    Contains the output of DescribeVpcClassicLinkDnsSupport.

    ", + "refs": { + } + }, + "DescribeVpcClassicLinkRequest": { + "base": "

    Contains the parameters for DescribeVpcClassicLink.

    ", + "refs": { + } + }, + "DescribeVpcClassicLinkResult": { + "base": "

    Contains the output of DescribeVpcClassicLink.

    ", + "refs": { + } + }, + "DescribeVpcEndpointServicesRequest": { + "base": "

    Contains the parameters for DescribeVpcEndpointServices.

    ", + "refs": { + } + }, + "DescribeVpcEndpointServicesResult": { + "base": "

    Contains the output of DescribeVpcEndpointServices.

    ", + "refs": { + } + }, + "DescribeVpcEndpointsRequest": { + "base": "

    Contains the parameters for DescribeVpcEndpoints.

    ", + "refs": { + } + }, + "DescribeVpcEndpointsResult": { + "base": "

    Contains the output of DescribeVpcEndpoints.

    ", + "refs": { + } + }, + "DescribeVpcPeeringConnectionsRequest": { + "base": "

    Contains the parameters for DescribeVpcPeeringConnections.

    ", + "refs": { + } + }, + "DescribeVpcPeeringConnectionsResult": { + "base": "

    Contains the output of DescribeVpcPeeringConnections.

    ", + "refs": { + } + }, + "DescribeVpcsRequest": { + "base": "

    Contains the parameters for DescribeVpcs.

    ", + "refs": { + } + }, + "DescribeVpcsResult": { + "base": "

    Contains the output of DescribeVpcs.

    ", + "refs": { + } + }, + "DescribeVpnConnectionsRequest": { + "base": "

    Contains the parameters for DescribeVpnConnections.

    ", + "refs": { + } + }, + "DescribeVpnConnectionsResult": { + "base": "

    Contains the output of DescribeVpnConnections.

    ", + "refs": { + } + }, + "DescribeVpnGatewaysRequest": { + "base": "

    Contains the parameters for DescribeVpnGateways.

    ", + "refs": { + } + }, + "DescribeVpnGatewaysResult": { + "base": "

    Contains the output of DescribeVpnGateways.

    ", + "refs": { + } + }, + "DetachClassicLinkVpcRequest": { + "base": "

    Contains the parameters for DetachClassicLinkVpc.

    ", + "refs": { + } + }, + "DetachClassicLinkVpcResult": { + "base": "

    Contains the output of DetachClassicLinkVpc.

    ", + "refs": { + } + }, + "DetachInternetGatewayRequest": { + "base": "

    Contains the parameters for DetachInternetGateway.

    ", + "refs": { + } + }, + "DetachNetworkInterfaceRequest": { + "base": "

    Contains the parameters for DetachNetworkInterface.

    ", + "refs": { + } + }, + "DetachVolumeRequest": { + "base": "

    Contains the parameters for DetachVolume.

    ", + "refs": { + } + }, + "DetachVpnGatewayRequest": { + "base": "

    Contains the parameters for DetachVpnGateway.

    ", + "refs": { + } + }, + "DeviceType": { + "base": null, + "refs": { + "Image$RootDeviceType": "

    The type of root device used by the AMI. The AMI can use an EBS volume or an instance store volume.

    ", + "Instance$RootDeviceType": "

    The root device type used by the AMI. The AMI can use an EBS volume or an instance store volume.

    " + } + }, + "DhcpConfiguration": { + "base": "

    Describes a DHCP configuration option.

    ", + "refs": { + "DhcpConfigurationList$member": null + } + }, + "DhcpConfigurationList": { + "base": null, + "refs": { + "DhcpOptions$DhcpConfigurations": "

    One or more DHCP options in the set.

    " + } + }, + "DhcpConfigurationValueList": { + "base": null, + "refs": { + "DhcpConfiguration$Values": "

    One or more values for the DHCP option.

    " + } + }, + "DhcpOptions": { + "base": "

    Describes a set of DHCP options.

    ", + "refs": { + "CreateDhcpOptionsResult$DhcpOptions": "

    A set of DHCP options.

    ", + "DhcpOptionsList$member": null + } + }, + "DhcpOptionsIdStringList": { + "base": null, + "refs": { + "DescribeDhcpOptionsRequest$DhcpOptionsIds": "

    The IDs of one or more DHCP options sets.

    Default: Describes all your DHCP options sets.

    " + } + }, + "DhcpOptionsList": { + "base": null, + "refs": { + "DescribeDhcpOptionsResult$DhcpOptions": "

    Information about one or more DHCP options sets.

    " + } + }, + "DisableVgwRoutePropagationRequest": { + "base": "

    Contains the parameters for DisableVgwRoutePropagation.

    ", + "refs": { + } + }, + "DisableVpcClassicLinkDnsSupportRequest": { + "base": "

    Contains the parameters for DisableVpcClassicLinkDnsSupport.

    ", + "refs": { + } + }, + "DisableVpcClassicLinkDnsSupportResult": { + "base": "

    Contains the output of DisableVpcClassicLinkDnsSupport.

    ", + "refs": { + } + }, + "DisableVpcClassicLinkRequest": { + "base": "

    Contains the parameters for DisableVpcClassicLink.

    ", + "refs": { + } + }, + "DisableVpcClassicLinkResult": { + "base": "

    Contains the output of DisableVpcClassicLink.

    ", + "refs": { + } + }, + "DisassociateAddressRequest": { + "base": "

    Contains the parameters for DisassociateAddress.

    ", + "refs": { + } + }, + "DisassociateRouteTableRequest": { + "base": "

    Contains the parameters for DisassociateRouteTable.

    ", + "refs": { + } + }, + "DiskImage": { + "base": "

    Describes a disk image.

    ", + "refs": { + "DiskImageList$member": null + } + }, + "DiskImageDescription": { + "base": "

    Describes a disk image.

    ", + "refs": { + "ImportInstanceVolumeDetailItem$Image": "

    The image.

    ", + "ImportVolumeTaskDetails$Image": "

    The image.

    " + } + }, + "DiskImageDetail": { + "base": "

    Describes a disk image.

    ", + "refs": { + "DiskImage$Image": "

    Information about the disk image.

    ", + "ImportVolumeRequest$Image": "

    The disk image.

    " + } + }, + "DiskImageFormat": { + "base": null, + "refs": { + "DiskImageDescription$Format": "

    The disk image format.

    ", + "DiskImageDetail$Format": "

    The disk image format.

    ", + "ExportToS3Task$DiskImageFormat": "

    The format for the exported image.

    ", + "ExportToS3TaskSpecification$DiskImageFormat": "

    The format for the exported image.

    " + } + }, + "DiskImageList": { + "base": null, + "refs": { + "ImportInstanceRequest$DiskImages": "

    The disk image.

    " + } + }, + "DiskImageVolumeDescription": { + "base": "

    Describes a disk image volume.

    ", + "refs": { + "ImportInstanceVolumeDetailItem$Volume": "

    The volume.

    ", + "ImportVolumeTaskDetails$Volume": "

    The volume.

    " + } + }, + "DomainType": { + "base": null, + "refs": { + "Address$Domain": "

    Indicates whether this Elastic IP address is for use with instances in EC2-Classic (standard) or instances in a VPC (vpc).

    ", + "AllocateAddressRequest$Domain": "

    Set to vpc to allocate the address for use with instances in a VPC.

    Default: The address is for use with instances in EC2-Classic.

    ", + "AllocateAddressResult$Domain": "

    Indicates whether this Elastic IP address is for use with instances in EC2-Classic (standard) or instances in a VPC (vpc).

    " + } + }, + "Double": { + "base": null, + "refs": { + "ClientData$UploadSize": "

    The size of the uploaded disk image, in GiB.

    ", + "PriceSchedule$Price": "

    The fixed price for the term.

    ", + "PriceScheduleSpecification$Price": "

    The fixed price for the term.

    ", + "PricingDetail$Price": "

    The price per instance.

    ", + "RecurringCharge$Amount": "

    The amount of the recurring charge.

    ", + "ReservedInstanceLimitPrice$Amount": "

    Used for Reserved Instance Marketplace offerings. Specifies the limit price on the total order (instanceCount * price).

    ", + "SnapshotDetail$DiskImageSize": "

    The size of the disk in the snapshot, in GiB.

    ", + "SnapshotTaskDetail$DiskImageSize": "

    The size of the disk in the snapshot, in GiB.

    ", + "SpotFleetLaunchSpecification$WeightedCapacity": "

    The number of units provided by the specified instance type. These are the same units that you chose to set the target capacity in terms (instances or a performance characteristic such as vCPUs, memory, or I/O).

    If the target capacity divided by this value is not a whole number, we round the number of instances to the next whole number. If this value is not specified, the default is 1.

    ", + "SpotFleetRequestConfigData$FulfilledCapacity": "

    The number of units fulfilled by this request compared to the set target capacity.

    " + } + }, + "EbsBlockDevice": { + "base": "

    Describes a block device for an EBS volume.

    ", + "refs": { + "BlockDeviceMapping$Ebs": "

    Parameters used to automatically set up EBS volumes when the instance is launched.

    " + } + }, + "EbsInstanceBlockDevice": { + "base": "

    Describes a parameter used to set up an EBS volume in a block device mapping.

    ", + "refs": { + "InstanceBlockDeviceMapping$Ebs": "

    Parameters used to automatically set up EBS volumes when the instance is launched.

    " + } + }, + "EbsInstanceBlockDeviceSpecification": { + "base": "

    Describes information used to set up an EBS volume specified in a block device mapping.

    ", + "refs": { + "InstanceBlockDeviceMappingSpecification$Ebs": "

    Parameters used to automatically set up EBS volumes when the instance is launched.

    " + } + }, + "EnableVgwRoutePropagationRequest": { + "base": "

    Contains the parameters for EnableVgwRoutePropagation.

    ", + "refs": { + } + }, + "EnableVolumeIORequest": { + "base": "

    Contains the parameters for EnableVolumeIO.

    ", + "refs": { + } + }, + "EnableVpcClassicLinkDnsSupportRequest": { + "base": "

    Contains the parameters for EnableVpcClassicLinkDnsSupport.

    ", + "refs": { + } + }, + "EnableVpcClassicLinkDnsSupportResult": { + "base": "

    Contains the output of EnableVpcClassicLinkDnsSupport.

    ", + "refs": { + } + }, + "EnableVpcClassicLinkRequest": { + "base": "

    Contains the parameters for EnableVpcClassicLink.

    ", + "refs": { + } + }, + "EnableVpcClassicLinkResult": { + "base": "

    Contains the output of EnableVpcClassicLink.

    ", + "refs": { + } + }, + "EventCode": { + "base": null, + "refs": { + "InstanceStatusEvent$Code": "

    The event code.

    " + } + }, + "EventInformation": { + "base": "

    Describes a Spot fleet event.

    ", + "refs": { + "HistoryRecord$EventInformation": "

    Information about the event.

    " + } + }, + "EventType": { + "base": null, + "refs": { + "DescribeSpotFleetRequestHistoryRequest$EventType": "

    The type of events to describe. By default, all events are described.

    ", + "HistoryRecord$EventType": "

    The event type.

    • error - Indicates an error with the Spot fleet request.

    • fleetRequestChange - Indicates a change in the status or configuration of the Spot fleet request.

    • instanceChange - Indicates that an instance was launched or terminated.

    " + } + }, + "ExcessCapacityTerminationPolicy": { + "base": null, + "refs": { + "ModifySpotFleetRequestRequest$ExcessCapacityTerminationPolicy": "

    Indicates whether running Spot instances should be terminated if the target capacity of the Spot fleet request is decreased below the current size of the Spot fleet.

    ", + "SpotFleetRequestConfigData$ExcessCapacityTerminationPolicy": "

    Indicates whether running Spot instances should be terminated if the target capacity of the Spot fleet request is decreased below the current size of the Spot fleet.

    " + } + }, + "ExecutableByStringList": { + "base": null, + "refs": { + "DescribeImagesRequest$ExecutableUsers": "

    Scopes the images by users with explicit launch permissions. Specify an AWS account ID, self (the sender of the request), or all (public AMIs).

    " + } + }, + "ExportEnvironment": { + "base": null, + "refs": { + "CreateInstanceExportTaskRequest$TargetEnvironment": "

    The target virtualization environment.

    ", + "InstanceExportDetails$TargetEnvironment": "

    The target virtualization environment.

    " + } + }, + "ExportTask": { + "base": "

    Describes an instance export task.

    ", + "refs": { + "CreateInstanceExportTaskResult$ExportTask": "

    Information about the instance export task.

    ", + "ExportTaskList$member": null + } + }, + "ExportTaskIdStringList": { + "base": null, + "refs": { + "DescribeExportTasksRequest$ExportTaskIds": "

    One or more export task IDs.

    " + } + }, + "ExportTaskList": { + "base": null, + "refs": { + "DescribeExportTasksResult$ExportTasks": "

    Information about the export tasks.

    " + } + }, + "ExportTaskState": { + "base": null, + "refs": { + "ExportTask$State": "

    The state of the export task.

    " + } + }, + "ExportToS3Task": { + "base": "

    Describes the format and location for an instance export task.

    ", + "refs": { + "ExportTask$ExportToS3Task": "

    Information about the export task.

    " + } + }, + "ExportToS3TaskSpecification": { + "base": "

    Describes an instance export task.

    ", + "refs": { + "CreateInstanceExportTaskRequest$ExportToS3Task": "

    The format and location for an instance export task.

    " + } + }, + "Filter": { + "base": "

    A filter name and value pair that is used to return a more specific list of results. Filters can be used to match a set of resources by various criteria, such as tags, attributes, or IDs.

    ", + "refs": { + "FilterList$member": null + } + }, + "FilterList": { + "base": null, + "refs": { + "DescribeAddressesRequest$Filters": "

    One or more filters. Filter names and values are case-sensitive.

    • allocation-id - [EC2-VPC] The allocation ID for the address.

    • association-id - [EC2-VPC] The association ID for the address.

    • domain - Indicates whether the address is for use in EC2-Classic (standard) or in a VPC (vpc).

    • instance-id - The ID of the instance the address is associated with, if any.

    • network-interface-id - [EC2-VPC] The ID of the network interface that the address is associated with, if any.

    • network-interface-owner-id - The AWS account ID of the owner.

    • private-ip-address - [EC2-VPC] The private IP address associated with the Elastic IP address.

    • public-ip - The Elastic IP address.

    ", + "DescribeAvailabilityZonesRequest$Filters": "

    One or more filters.

    • message - Information about the Availability Zone.

    • region-name - The name of the region for the Availability Zone (for example, us-east-1).

    • state - The state of the Availability Zone (available | information | impaired | unavailable).

    • zone-name - The name of the Availability Zone (for example, us-east-1a).

    ", + "DescribeBundleTasksRequest$Filters": "

    One or more filters.

    • bundle-id - The ID of the bundle task.

    • error-code - If the task failed, the error code returned.

    • error-message - If the task failed, the error message returned.

    • instance-id - The ID of the instance.

    • progress - The level of task completion, as a percentage (for example, 20%).

    • s3-bucket - The Amazon S3 bucket to store the AMI.

    • s3-prefix - The beginning of the AMI name.

    • start-time - The time the task started (for example, 2013-09-15T17:15:20.000Z).

    • state - The state of the task (pending | waiting-for-shutdown | bundling | storing | cancelling | complete | failed).

    • update-time - The time of the most recent update for the task.

    ", + "DescribeClassicLinkInstancesRequest$Filters": "

    One or more filters.

    • group-id - The ID of a VPC security group that's associated with the instance.

    • instance-id - The ID of the instance.

    • tag:key=value - The key/value combination of a tag assigned to the resource.

    • tag-key - The key of a tag assigned to the resource. This filter is independent of the tag-value filter. For example, if you use both the filter \"tag-key=Purpose\" and the filter \"tag-value=X\", you get any resources assigned both the tag key Purpose (regardless of what the tag's value is), and the tag value X (regardless of what the tag's key is). If you want to list only resources where Purpose is X, see the tag:key=value filter.

    • tag-value - The value of a tag assigned to the resource. This filter is independent of the tag-key filter.

    • vpc-id - The ID of the VPC that the instance is linked to.

    ", + "DescribeConversionTasksRequest$Filters": "

    One or more filters.

    ", + "DescribeCustomerGatewaysRequest$Filters": "

    One or more filters.

    • bgp-asn - The customer gateway's Border Gateway Protocol (BGP) Autonomous System Number (ASN).

    • customer-gateway-id - The ID of the customer gateway.

    • ip-address - The IP address of the customer gateway's Internet-routable external interface.

    • state - The state of the customer gateway (pending | available | deleting | deleted).

    • type - The type of customer gateway. Currently, the only supported type is ipsec.1.

    • tag:key=value - The key/value combination of a tag assigned to the resource.

    • tag-key - The key of a tag assigned to the resource. This filter is independent of the tag-value filter. For example, if you use both the filter \"tag-key=Purpose\" and the filter \"tag-value=X\", you get any resources assigned both the tag key Purpose (regardless of what the tag's value is), and the tag value X (regardless of what the tag's key is). If you want to list only resources where Purpose is X, see the tag:key=value filter.

    • tag-value - The value of a tag assigned to the resource. This filter is independent of the tag-key filter.

    ", + "DescribeDhcpOptionsRequest$Filters": "

    One or more filters.

    • dhcp-options-id - The ID of a set of DHCP options.

    • key - The key for one of the options (for example, domain-name).

    • value - The value for one of the options.

    • tag:key=value - The key/value combination of a tag assigned to the resource.

    • tag-key - The key of a tag assigned to the resource. This filter is independent of the tag-value filter. For example, if you use both the filter \"tag-key=Purpose\" and the filter \"tag-value=X\", you get any resources assigned both the tag key Purpose (regardless of what the tag's value is), and the tag value X (regardless of what the tag's key is). If you want to list only resources where Purpose is X, see the tag:key=value filter.

    • tag-value - The value of a tag assigned to the resource. This filter is independent of the tag-key filter.

    ", + "DescribeFlowLogsRequest$Filter": "

    One or more filters.

    • deliver-log-status - The status of the logs delivery (SUCCESS | FAILED).

    • flow-log-id - The ID of the flow log.

    • log-group-name - The name of the log group.

    • resource-id - The ID of the VPC, subnet, or network interface.

    • traffic-type - The type of traffic (ACCEPT | REJECT | ALL)

    ", + "DescribeHostsRequest$Filter": "

    One or more filters.

    • instance-type - The instance type size that the Dedicated host is configured to support.

    • auto-placement - Whether auto-placement is enabled or disabled (on | off).

    • host-reservation-id - The ID of the reservation associated with this host.

    • client-token - The idempotency token you provided when you launched the instance

    • state- The allocation state of the Dedicated host (available | under-assessment | permanent-failure | released | released-permanent-failure).

    • availability-zone - The Availability Zone of the host.

    ", + "DescribeImagesRequest$Filters": "

    One or more filters.

    • architecture - The image architecture (i386 | x86_64).

    • block-device-mapping.delete-on-termination - A Boolean value that indicates whether the Amazon EBS volume is deleted on instance termination.

    • block-device-mapping.device-name - The device name for the EBS volume (for example, /dev/sdh).

    • block-device-mapping.snapshot-id - The ID of the snapshot used for the EBS volume.

    • block-device-mapping.volume-size - The volume size of the EBS volume, in GiB.

    • block-device-mapping.volume-type - The volume type of the EBS volume (gp2 | io1 | st1 | sc1 | standard).

    • description - The description of the image (provided during image creation).

    • hypervisor - The hypervisor type (ovm | xen).

    • image-id - The ID of the image.

    • image-type - The image type (machine | kernel | ramdisk).

    • is-public - A Boolean that indicates whether the image is public.

    • kernel-id - The kernel ID.

    • manifest-location - The location of the image manifest.

    • name - The name of the AMI (provided during image creation).

    • owner-alias - The AWS account alias (for example, amazon).

    • owner-id - The AWS account ID of the image owner.

    • platform - The platform. To only list Windows-based AMIs, use windows.

    • product-code - The product code.

    • product-code.type - The type of the product code (devpay | marketplace).

    • ramdisk-id - The RAM disk ID.

    • root-device-name - The name of the root device volume (for example, /dev/sda1).

    • root-device-type - The type of the root device volume (ebs | instance-store).

    • state - The state of the image (available | pending | failed).

    • state-reason-code - The reason code for the state change.

    • state-reason-message - The message for the state change.

    • tag:key=value - The key/value combination of a tag assigned to the resource.

    • tag-key - The key of a tag assigned to the resource. This filter is independent of the tag-value filter. For example, if you use both the filter \"tag-key=Purpose\" and the filter \"tag-value=X\", you get any resources assigned both the tag key Purpose (regardless of what the tag's value is), and the tag value X (regardless of what the tag's key is). If you want to list only resources where Purpose is X, see the tag:key=value filter.

    • tag-value - The value of a tag assigned to the resource. This filter is independent of the tag-key filter.

    • virtualization-type - The virtualization type (paravirtual | hvm).

    ", + "DescribeImportImageTasksRequest$Filters": "

    Filter tasks using the task-state filter and one of the following values: active, completed, deleting, deleted.

    ", + "DescribeImportSnapshotTasksRequest$Filters": "

    One or more filters.

    ", + "DescribeInstanceStatusRequest$Filters": "

    One or more filters.

    • availability-zone - The Availability Zone of the instance.

    • event.code - The code for the scheduled event (instance-reboot | system-reboot | system-maintenance | instance-retirement | instance-stop).

    • event.description - A description of the event.

    • event.not-after - The latest end time for the scheduled event (for example, 2014-09-15T17:15:20.000Z).

    • event.not-before - The earliest start time for the scheduled event (for example, 2014-09-15T17:15:20.000Z).

    • instance-state-code - The code for the instance state, as a 16-bit unsigned integer. The high byte is an opaque internal value and should be ignored. The low byte is set based on the state represented. The valid values are 0 (pending), 16 (running), 32 (shutting-down), 48 (terminated), 64 (stopping), and 80 (stopped).

    • instance-state-name - The state of the instance (pending | running | shutting-down | terminated | stopping | stopped).

    • instance-status.reachability - Filters on instance status where the name is reachability (passed | failed | initializing | insufficient-data).

    • instance-status.status - The status of the instance (ok | impaired | initializing | insufficient-data | not-applicable).

    • system-status.reachability - Filters on system status where the name is reachability (passed | failed | initializing | insufficient-data).

    • system-status.status - The system status of the instance (ok | impaired | initializing | insufficient-data | not-applicable).

    ", + "DescribeInstancesRequest$Filters": "

    One or more filters.

    • affinity - The affinity setting for an instance running on a Dedicated host (default | host).

    • architecture - The instance architecture (i386 | x86_64).

    • availability-zone - The Availability Zone of the instance.

    • block-device-mapping.attach-time - The attach time for an EBS volume mapped to the instance, for example, 2010-09-15T17:15:20.000Z.

    • block-device-mapping.delete-on-termination - A Boolean that indicates whether the EBS volume is deleted on instance termination.

    • block-device-mapping.device-name - The device name for the EBS volume (for example, /dev/sdh or xvdh).

    • block-device-mapping.status - The status for the EBS volume (attaching | attached | detaching | detached).

    • block-device-mapping.volume-id - The volume ID of the EBS volume.

    • client-token - The idempotency token you provided when you launched the instance.

    • dns-name - The public DNS name of the instance.

    • group-id - The ID of the security group for the instance. EC2-Classic only.

    • group-name - The name of the security group for the instance. EC2-Classic only.

    • host-Id - The ID of the Dedicated host on which the instance is running, if applicable.

    • hypervisor - The hypervisor type of the instance (ovm | xen).

    • iam-instance-profile.arn - The instance profile associated with the instance. Specified as an ARN.

    • image-id - The ID of the image used to launch the instance.

    • instance-id - The ID of the instance.

    • instance-lifecycle - Indicates whether this is a Spot Instance or a Scheduled Instance (spot | scheduled).

    • instance-state-code - The state of the instance, as a 16-bit unsigned integer. The high byte is an opaque internal value and should be ignored. The low byte is set based on the state represented. The valid values are: 0 (pending), 16 (running), 32 (shutting-down), 48 (terminated), 64 (stopping), and 80 (stopped).

    • instance-state-name - The state of the instance (pending | running | shutting-down | terminated | stopping | stopped).

    • instance-type - The type of instance (for example, t2.micro).

    • instance.group-id - The ID of the security group for the instance.

    • instance.group-name - The name of the security group for the instance.

    • ip-address - The public IP address of the instance.

    • kernel-id - The kernel ID.

    • key-name - The name of the key pair used when the instance was launched.

    • launch-index - When launching multiple instances, this is the index for the instance in the launch group (for example, 0, 1, 2, and so on).

    • launch-time - The time when the instance was launched.

    • monitoring-state - Indicates whether monitoring is enabled for the instance (disabled | enabled).

    • owner-id - The AWS account ID of the instance owner.

    • placement-group-name - The name of the placement group for the instance.

    • platform - The platform. Use windows if you have Windows instances; otherwise, leave blank.

    • private-dns-name - The private DNS name of the instance.

    • private-ip-address - The private IP address of the instance.

    • product-code - The product code associated with the AMI used to launch the instance.

    • product-code.type - The type of product code (devpay | marketplace).

    • ramdisk-id - The RAM disk ID.

    • reason - The reason for the current state of the instance (for example, shows \"User Initiated [date]\" when you stop or terminate the instance). Similar to the state-reason-code filter.

    • requester-id - The ID of the entity that launched the instance on your behalf (for example, AWS Management Console, Auto Scaling, and so on).

    • reservation-id - The ID of the instance's reservation. A reservation ID is created any time you launch an instance. A reservation ID has a one-to-one relationship with an instance launch request, but can be associated with more than one instance if you launch multiple instances using the same launch request. For example, if you launch one instance, you'll get one reservation ID. If you launch ten instances using the same launch request, you'll also get one reservation ID.

    • root-device-name - The name of the root device for the instance (for example, /dev/sda1 or /dev/xvda).

    • root-device-type - The type of root device that the instance uses (ebs | instance-store).

    • source-dest-check - Indicates whether the instance performs source/destination checking. A value of true means that checking is enabled, and false means checking is disabled. The value must be false for the instance to perform network address translation (NAT) in your VPC.

    • spot-instance-request-id - The ID of the Spot instance request.

    • state-reason-code - The reason code for the state change.

    • state-reason-message - A message that describes the state change.

    • subnet-id - The ID of the subnet for the instance.

    • tag:key=value - The key/value combination of a tag assigned to the resource, where tag:key is the tag's key.

    • tag-key - The key of a tag assigned to the resource. This filter is independent of the tag-value filter. For example, if you use both the filter \"tag-key=Purpose\" and the filter \"tag-value=X\", you get any resources assigned both the tag key Purpose (regardless of what the tag's value is), and the tag value X (regardless of what the tag's key is). If you want to list only resources where Purpose is X, see the tag:key=value filter.

    • tag-value - The value of a tag assigned to the resource. This filter is independent of the tag-key filter.

    • tenancy - The tenancy of an instance (dedicated | default | host).

    • virtualization-type - The virtualization type of the instance (paravirtual | hvm).

    • vpc-id - The ID of the VPC that the instance is running in.

    • network-interface.description - The description of the network interface.

    • network-interface.subnet-id - The ID of the subnet for the network interface.

    • network-interface.vpc-id - The ID of the VPC for the network interface.

    • network-interface.network-interface-id - The ID of the network interface.

    • network-interface.owner-id - The ID of the owner of the network interface.

    • network-interface.availability-zone - The Availability Zone for the network interface.

    • network-interface.requester-id - The requester ID for the network interface.

    • network-interface.requester-managed - Indicates whether the network interface is being managed by AWS.

    • network-interface.status - The status of the network interface (available) | in-use).

    • network-interface.mac-address - The MAC address of the network interface.

    • network-interface.private-dns-name - The private DNS name of the network interface.

    • network-interface.source-dest-check - Whether the network interface performs source/destination checking. A value of true means checking is enabled, and false means checking is disabled. The value must be false for the network interface to perform network address translation (NAT) in your VPC.

    • network-interface.group-id - The ID of a security group associated with the network interface.

    • network-interface.group-name - The name of a security group associated with the network interface.

    • network-interface.attachment.attachment-id - The ID of the interface attachment.

    • network-interface.attachment.instance-id - The ID of the instance to which the network interface is attached.

    • network-interface.attachment.instance-owner-id - The owner ID of the instance to which the network interface is attached.

    • network-interface.addresses.private-ip-address - The private IP address associated with the network interface.

    • network-interface.attachment.device-index - The device index to which the network interface is attached.

    • network-interface.attachment.status - The status of the attachment (attaching | attached | detaching | detached).

    • network-interface.attachment.attach-time - The time that the network interface was attached to an instance.

    • network-interface.attachment.delete-on-termination - Specifies whether the attachment is deleted when an instance is terminated.

    • network-interface.addresses.primary - Specifies whether the IP address of the network interface is the primary private IP address.

    • network-interface.addresses.association.public-ip - The ID of the association of an Elastic IP address with a network interface.

    • network-interface.addresses.association.ip-owner-id - The owner ID of the private IP address associated with the network interface.

    • association.public-ip - The address of the Elastic IP address bound to the network interface.

    • association.ip-owner-id - The owner of the Elastic IP address associated with the network interface.

    • association.allocation-id - The allocation ID returned when you allocated the Elastic IP address for your network interface.

    • association.association-id - The association ID returned when the network interface was associated with an IP address.

    ", + "DescribeInternetGatewaysRequest$Filters": "

    One or more filters.

    • attachment.state - The current state of the attachment between the gateway and the VPC (available). Present only if a VPC is attached.

    • attachment.vpc-id - The ID of an attached VPC.

    • internet-gateway-id - The ID of the Internet gateway.

    • tag:key=value - The key/value combination of a tag assigned to the resource.

    • tag-key - The key of a tag assigned to the resource. This filter is independent of the tag-value filter. For example, if you use both the filter \"tag-key=Purpose\" and the filter \"tag-value=X\", you get any resources assigned both the tag key Purpose (regardless of what the tag's value is), and the tag value X (regardless of what the tag's key is). If you want to list only resources where Purpose is X, see the tag:key=value filter.

    • tag-value - The value of a tag assigned to the resource. This filter is independent of the tag-key filter.

    ", + "DescribeKeyPairsRequest$Filters": "

    One or more filters.

    • fingerprint - The fingerprint of the key pair.

    • key-name - The name of the key pair.

    ", + "DescribeMovingAddressesRequest$Filters": "

    One or more filters.

    • moving-status - The status of the Elastic IP address (MovingToVpc | RestoringToClassic).

    ", + "DescribeNatGatewaysRequest$Filter": "

    One or more filters.

    • nat-gateway-id - The ID of the NAT gateway.

    • state - The state of the NAT gateway (pending | failed | available | deleting | deleted).

    • subnet-id - The ID of the subnet in which the NAT gateway resides.

    • vpc-id - The ID of the VPC in which the NAT gateway resides.

    ", + "DescribeNetworkAclsRequest$Filters": "

    One or more filters.

    • association.association-id - The ID of an association ID for the ACL.

    • association.network-acl-id - The ID of the network ACL involved in the association.

    • association.subnet-id - The ID of the subnet involved in the association.

    • default - Indicates whether the ACL is the default network ACL for the VPC.

    • entry.cidr - The CIDR range specified in the entry.

    • entry.egress - Indicates whether the entry applies to egress traffic.

    • entry.icmp.code - The ICMP code specified in the entry, if any.

    • entry.icmp.type - The ICMP type specified in the entry, if any.

    • entry.port-range.from - The start of the port range specified in the entry.

    • entry.port-range.to - The end of the port range specified in the entry.

    • entry.protocol - The protocol specified in the entry (tcp | udp | icmp or a protocol number).

    • entry.rule-action - Allows or denies the matching traffic (allow | deny).

    • entry.rule-number - The number of an entry (in other words, rule) in the ACL's set of entries.

    • network-acl-id - The ID of the network ACL.

    • tag:key=value - The key/value combination of a tag assigned to the resource.

    • tag-key - The key of a tag assigned to the resource. This filter is independent of the tag-value filter. For example, if you use both the filter \"tag-key=Purpose\" and the filter \"tag-value=X\", you get any resources assigned both the tag key Purpose (regardless of what the tag's value is), and the tag value X (regardless of what the tag's key is). If you want to list only resources where Purpose is X, see the tag:key=value filter.

    • tag-value - The value of a tag assigned to the resource. This filter is independent of the tag-key filter.

    • vpc-id - The ID of the VPC for the network ACL.

    ", + "DescribeNetworkInterfacesRequest$Filters": "

    One or more filters.

    • addresses.private-ip-address - The private IP addresses associated with the network interface.

    • addresses.primary - Whether the private IP address is the primary IP address associated with the network interface.

    • addresses.association.public-ip - The association ID returned when the network interface was associated with the Elastic IP address.

    • addresses.association.owner-id - The owner ID of the addresses associated with the network interface.

    • association.association-id - The association ID returned when the network interface was associated with an IP address.

    • association.allocation-id - The allocation ID returned when you allocated the Elastic IP address for your network interface.

    • association.ip-owner-id - The owner of the Elastic IP address associated with the network interface.

    • association.public-ip - The address of the Elastic IP address bound to the network interface.

    • association.public-dns-name - The public DNS name for the network interface.

    • attachment.attachment-id - The ID of the interface attachment.

    • attachment.attach.time - The time that the network interface was attached to an instance.

    • attachment.delete-on-termination - Indicates whether the attachment is deleted when an instance is terminated.

    • attachment.device-index - The device index to which the network interface is attached.

    • attachment.instance-id - The ID of the instance to which the network interface is attached.

    • attachment.instance-owner-id - The owner ID of the instance to which the network interface is attached.

    • attachment.nat-gateway-id - The ID of the NAT gateway to which the network interface is attached.

    • attachment.status - The status of the attachment (attaching | attached | detaching | detached).

    • availability-zone - The Availability Zone of the network interface.

    • description - The description of the network interface.

    • group-id - The ID of a security group associated with the network interface.

    • group-name - The name of a security group associated with the network interface.

    • mac-address - The MAC address of the network interface.

    • network-interface-id - The ID of the network interface.

    • owner-id - The AWS account ID of the network interface owner.

    • private-ip-address - The private IP address or addresses of the network interface.

    • private-dns-name - The private DNS name of the network interface.

    • requester-id - The ID of the entity that launched the instance on your behalf (for example, AWS Management Console, Auto Scaling, and so on).

    • requester-managed - Indicates whether the network interface is being managed by an AWS service (for example, AWS Management Console, Auto Scaling, and so on).

    • source-desk-check - Indicates whether the network interface performs source/destination checking. A value of true means checking is enabled, and false means checking is disabled. The value must be false for the network interface to perform network address translation (NAT) in your VPC.

    • status - The status of the network interface. If the network interface is not attached to an instance, the status is available; if a network interface is attached to an instance the status is in-use.

    • subnet-id - The ID of the subnet for the network interface.

    • tag:key=value - The key/value combination of a tag assigned to the resource.

    • tag-key - The key of a tag assigned to the resource. This filter is independent of the tag-value filter. For example, if you use both the filter \"tag-key=Purpose\" and the filter \"tag-value=X\", you get any resources assigned both the tag key Purpose (regardless of what the tag's value is), and the tag value X (regardless of what the tag's key is). If you want to list only resources where Purpose is X, see the tag:key=value filter.

    • tag-value - The value of a tag assigned to the resource. This filter is independent of the tag-key filter.

    • vpc-id - The ID of the VPC for the network interface.

    ", + "DescribePlacementGroupsRequest$Filters": "

    One or more filters.

    • group-name - The name of the placement group.

    • state - The state of the placement group (pending | available | deleting | deleted).

    • strategy - The strategy of the placement group (cluster).

    ", + "DescribePrefixListsRequest$Filters": "

    One or more filters.

    • prefix-list-id: The ID of a prefix list.

    • prefix-list-name: The name of a prefix list.

    ", + "DescribeRegionsRequest$Filters": "

    One or more filters.

    • endpoint - The endpoint of the region (for example, ec2.us-east-1.amazonaws.com).

    • region-name - The name of the region (for example, us-east-1).

    ", + "DescribeReservedInstancesListingsRequest$Filters": "

    One or more filters.

    • reserved-instances-id - The ID of the Reserved Instances.

    • reserved-instances-listing-id - The ID of the Reserved Instances listing.

    • status - The status of the Reserved Instance listing (pending | active | cancelled | closed).

    • status-message - The reason for the status.

    ", + "DescribeReservedInstancesModificationsRequest$Filters": "

    One or more filters.

    • client-token - The idempotency token for the modification request.

    • create-date - The time when the modification request was created.

    • effective-date - The time when the modification becomes effective.

    • modification-result.reserved-instances-id - The ID for the Reserved Instances created as part of the modification request. This ID is only available when the status of the modification is fulfilled.

    • modification-result.target-configuration.availability-zone - The Availability Zone for the new Reserved Instances.

    • modification-result.target-configuration.instance-count - The number of new Reserved Instances.

    • modification-result.target-configuration.instance-type - The instance type of the new Reserved Instances.

    • modification-result.target-configuration.platform - The network platform of the new Reserved Instances (EC2-Classic | EC2-VPC).

    • reserved-instances-id - The ID of the Reserved Instances modified.

    • reserved-instances-modification-id - The ID of the modification request.

    • status - The status of the Reserved Instances modification request (processing | fulfilled | failed).

    • status-message - The reason for the status.

    • update-date - The time when the modification request was last updated.

    ", + "DescribeReservedInstancesOfferingsRequest$Filters": "

    One or more filters.

    • availability-zone - The Availability Zone where the Reserved Instance can be used.

    • duration - The duration of the Reserved Instance (for example, one year or three years), in seconds (31536000 | 94608000).

    • fixed-price - The purchase price of the Reserved Instance (for example, 9800.0).

    • instance-type - The instance type that is covered by the reservation.

    • marketplace - Set to true to show only Reserved Instance Marketplace offerings. When this filter is not used, which is the default behavior, all offerings from both AWS and the Reserved Instance Marketplace are listed.

    • product-description - The Reserved Instance product platform description. Instances that include (Amazon VPC) in the product platform description will only be displayed to EC2-Classic account holders and are for use with Amazon VPC. (Linux/UNIX | Linux/UNIX (Amazon VPC) | SUSE Linux | SUSE Linux (Amazon VPC) | Red Hat Enterprise Linux | Red Hat Enterprise Linux (Amazon VPC) | Windows | Windows (Amazon VPC) | Windows with SQL Server Standard | Windows with SQL Server Standard (Amazon VPC) | Windows with SQL Server Web | Windows with SQL Server Web (Amazon VPC) | Windows with SQL Server Enterprise | Windows with SQL Server Enterprise (Amazon VPC))

    • reserved-instances-offering-id - The Reserved Instances offering ID.

    • usage-price - The usage price of the Reserved Instance, per hour (for example, 0.84).

    ", + "DescribeReservedInstancesRequest$Filters": "

    One or more filters.

    • availability-zone - The Availability Zone where the Reserved Instance can be used.

    • duration - The duration of the Reserved Instance (one year or three years), in seconds (31536000 | 94608000).

    • end - The time when the Reserved Instance expires (for example, 2015-08-07T11:54:42.000Z).

    • fixed-price - The purchase price of the Reserved Instance (for example, 9800.0).

    • instance-type - The instance type that is covered by the reservation.

    • product-description - The Reserved Instance product platform description. Instances that include (Amazon VPC) in the product platform description will only be displayed to EC2-Classic account holders and are for use with Amazon VPC (Linux/UNIX | Linux/UNIX (Amazon VPC) | SUSE Linux | SUSE Linux (Amazon VPC) | Red Hat Enterprise Linux | Red Hat Enterprise Linux (Amazon VPC) | Windows | Windows (Amazon VPC) | Windows with SQL Server Standard | Windows with SQL Server Standard (Amazon VPC) | Windows with SQL Server Web | Windows with SQL Server Web (Amazon VPC) | Windows with SQL Server Enterprise | Windows with SQL Server Enterprise (Amazon VPC)).

    • reserved-instances-id - The ID of the Reserved Instance.

    • start - The time at which the Reserved Instance purchase request was placed (for example, 2014-08-07T11:54:42.000Z).

    • state - The state of the Reserved Instance (payment-pending | active | payment-failed | retired).

    • tag:key=value - The key/value combination of a tag assigned to the resource.

    • tag-key - The key of a tag assigned to the resource. This filter is independent of the tag-value filter. For example, if you use both the filter \"tag-key=Purpose\" and the filter \"tag-value=X\", you get any resources assigned both the tag key Purpose (regardless of what the tag's value is), and the tag value X (regardless of what the tag's key is). If you want to list only resources where Purpose is X, see the tag:key=value filter.

    • tag-value - The value of a tag assigned to the resource. This filter is independent of the tag-key filter.

    • usage-price - The usage price of the Reserved Instance, per hour (for example, 0.84).

    ", + "DescribeRouteTablesRequest$Filters": "

    One or more filters.

    • association.route-table-association-id - The ID of an association ID for the route table.

    • association.route-table-id - The ID of the route table involved in the association.

    • association.subnet-id - The ID of the subnet involved in the association.

    • association.main - Indicates whether the route table is the main route table for the VPC (true | false).

    • route-table-id - The ID of the route table.

    • route.destination-cidr-block - The CIDR range specified in a route in the table.

    • route.destination-prefix-list-id - The ID (prefix) of the AWS service specified in a route in the table.

    • route.gateway-id - The ID of a gateway specified in a route in the table.

    • route.instance-id - The ID of an instance specified in a route in the table.

    • route.nat-gateway-id - The ID of a NAT gateway.

    • route.origin - Describes how the route was created. CreateRouteTable indicates that the route was automatically created when the route table was created; CreateRoute indicates that the route was manually added to the route table; EnableVgwRoutePropagation indicates that the route was propagated by route propagation.

    • route.state - The state of a route in the route table (active | blackhole). The blackhole state indicates that the route's target isn't available (for example, the specified gateway isn't attached to the VPC, the specified NAT instance has been terminated, and so on).

    • route.vpc-peering-connection-id - The ID of a VPC peering connection specified in a route in the table.

    • tag:key=value - The key/value combination of a tag assigned to the resource.

    • tag-key - The key of a tag assigned to the resource. This filter is independent of the tag-value filter. For example, if you use both the filter \"tag-key=Purpose\" and the filter \"tag-value=X\", you get any resources assigned both the tag key Purpose (regardless of what the tag's value is), and the tag value X (regardless of what the tag's key is). If you want to list only resources where Purpose is X, see the tag:key=value filter.

    • tag-value - The value of a tag assigned to the resource. This filter is independent of the tag-key filter.

    • vpc-id - The ID of the VPC for the route table.

    ", + "DescribeScheduledInstanceAvailabilityRequest$Filters": "

    One or more filters.

    • availability-zone - The Availability Zone (for example, us-west-2a).

    • instance-type - The instance type (for example, c4.large).

    • network-platform - The network platform (EC2-Classic or EC2-VPC).

    • platform - The platform (Linux/UNIX or Windows).

    ", + "DescribeScheduledInstancesRequest$Filters": "

    One or more filters.

    • availability-zone - The Availability Zone (for example, us-west-2a).

    • instance-type - The instance type (for example, c4.large).

    • network-platform - The network platform (EC2-Classic or EC2-VPC).

    • platform - The platform (Linux/UNIX or Windows).

    ", + "DescribeSecurityGroupsRequest$Filters": "

    One or more filters. If using multiple filters for rules, the results include security groups for which any combination of rules - not necessarily a single rule - match all filters.

    • description - The description of the security group.

    • egress.ip-permission.prefix-list-id - The ID (prefix) of the AWS service to which the security group allows access.

    • group-id - The ID of the security group.

    • group-name - The name of the security group.

    • ip-permission.cidr - A CIDR range that has been granted permission.

    • ip-permission.from-port - The start of port range for the TCP and UDP protocols, or an ICMP type number.

    • ip-permission.group-id - The ID of a security group that has been granted permission.

    • ip-permission.group-name - The name of a security group that has been granted permission.

    • ip-permission.protocol - The IP protocol for the permission (tcp | udp | icmp or a protocol number).

    • ip-permission.to-port - The end of port range for the TCP and UDP protocols, or an ICMP code.

    • ip-permission.user-id - The ID of an AWS account that has been granted permission.

    • owner-id - The AWS account ID of the owner of the security group.

    • tag-key - The key of a tag assigned to the security group.

    • tag-value - The value of a tag assigned to the security group.

    • vpc-id - The ID of the VPC specified when the security group was created.

    ", + "DescribeSnapshotsRequest$Filters": "

    One or more filters.

    • description - A description of the snapshot.

    • owner-alias - The AWS account alias (for example, amazon) that owns the snapshot.

    • owner-id - The ID of the AWS account that owns the snapshot.

    • progress - The progress of the snapshot, as a percentage (for example, 80%).

    • snapshot-id - The snapshot ID.

    • start-time - The time stamp when the snapshot was initiated.

    • status - The status of the snapshot (pending | completed | error).

    • tag:key=value - The key/value combination of a tag assigned to the resource.

    • tag-key - The key of a tag assigned to the resource. This filter is independent of the tag-value filter. For example, if you use both the filter \"tag-key=Purpose\" and the filter \"tag-value=X\", you get any resources assigned both the tag key Purpose (regardless of what the tag's value is), and the tag value X (regardless of what the tag's key is). If you want to list only resources where Purpose is X, see the tag:key=value filter.

    • tag-value - The value of a tag assigned to the resource. This filter is independent of the tag-key filter.

    • volume-id - The ID of the volume the snapshot is for.

    • volume-size - The size of the volume, in GiB.

    ", + "DescribeSpotInstanceRequestsRequest$Filters": "

    One or more filters.

    • availability-zone-group - The Availability Zone group.

    • create-time - The time stamp when the Spot instance request was created.

    • fault-code - The fault code related to the request.

    • fault-message - The fault message related to the request.

    • instance-id - The ID of the instance that fulfilled the request.

    • launch-group - The Spot instance launch group.

    • launch.block-device-mapping.delete-on-termination - Indicates whether the Amazon EBS volume is deleted on instance termination.

    • launch.block-device-mapping.device-name - The device name for the Amazon EBS volume (for example, /dev/sdh).

    • launch.block-device-mapping.snapshot-id - The ID of the snapshot used for the Amazon EBS volume.

    • launch.block-device-mapping.volume-size - The size of the Amazon EBS volume, in GiB.

    • launch.block-device-mapping.volume-type - The type of the Amazon EBS volume: gp2 for General Purpose SSD, io1 for Provisioned IOPS SSD, st1 for Throughput Optimized HDD, sc1for Cold HDD, or standard for Magnetic.

    • launch.group-id - The security group for the instance.

    • launch.image-id - The ID of the AMI.

    • launch.instance-type - The type of instance (for example, m3.medium).

    • launch.kernel-id - The kernel ID.

    • launch.key-name - The name of the key pair the instance launched with.

    • launch.monitoring-enabled - Whether monitoring is enabled for the Spot instance.

    • launch.ramdisk-id - The RAM disk ID.

    • network-interface.network-interface-id - The ID of the network interface.

    • network-interface.device-index - The index of the device for the network interface attachment on the instance.

    • network-interface.subnet-id - The ID of the subnet for the instance.

    • network-interface.description - A description of the network interface.

    • network-interface.private-ip-address - The primary private IP address of the network interface.

    • network-interface.delete-on-termination - Indicates whether the network interface is deleted when the instance is terminated.

    • network-interface.group-id - The ID of the security group associated with the network interface.

    • network-interface.group-name - The name of the security group associated with the network interface.

    • network-interface.addresses.primary - Indicates whether the IP address is the primary private IP address.

    • product-description - The product description associated with the instance (Linux/UNIX | Windows).

    • spot-instance-request-id - The Spot instance request ID.

    • spot-price - The maximum hourly price for any Spot instance launched to fulfill the request.

    • state - The state of the Spot instance request (open | active | closed | cancelled | failed). Spot bid status information can help you track your Amazon EC2 Spot instance requests. For more information, see Spot Bid Status in the Amazon Elastic Compute Cloud User Guide.

    • status-code - The short code describing the most recent evaluation of your Spot instance request.

    • status-message - The message explaining the status of the Spot instance request.

    • tag:key=value - The key/value combination of a tag assigned to the resource.

    • tag-key - The key of a tag assigned to the resource. This filter is independent of the tag-value filter. For example, if you use both the filter \"tag-key=Purpose\" and the filter \"tag-value=X\", you get any resources assigned both the tag key Purpose (regardless of what the tag's value is), and the tag value X (regardless of what the tag's key is). If you want to list only resources where Purpose is X, see the tag:key=value filter.

    • tag-value - The value of a tag assigned to the resource. This filter is independent of the tag-key filter.

    • type - The type of Spot instance request (one-time | persistent).

    • launched-availability-zone - The Availability Zone in which the bid is launched.

    • valid-from - The start date of the request.

    • valid-until - The end date of the request.

    ", + "DescribeSpotPriceHistoryRequest$Filters": "

    One or more filters.

    • availability-zone - The Availability Zone for which prices should be returned.

    • instance-type - The type of instance (for example, m3.medium).

    • product-description - The product description for the Spot price (Linux/UNIX | SUSE Linux | Windows | Linux/UNIX (Amazon VPC) | SUSE Linux (Amazon VPC) | Windows (Amazon VPC)).

    • spot-price - The Spot price. The value must match exactly (or use wildcards; greater than or less than comparison is not supported).

    • timestamp - The timestamp of the Spot price history, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ). You can use wildcards (* and ?). Greater than or less than comparison is not supported.

    ", + "DescribeSubnetsRequest$Filters": "

    One or more filters.

    • availabilityZone - The Availability Zone for the subnet. You can also use availability-zone as the filter name.

    • available-ip-address-count - The number of IP addresses in the subnet that are available.

    • cidrBlock - The CIDR block of the subnet. The CIDR block you specify must exactly match the subnet's CIDR block for information to be returned for the subnet. You can also use cidr or cidr-block as the filter names.

    • defaultForAz - Indicates whether this is the default subnet for the Availability Zone. You can also use default-for-az as the filter name.

    • state - The state of the subnet (pending | available).

    • subnet-id - The ID of the subnet.

    • tag:key=value - The key/value combination of a tag assigned to the resource.

    • tag-key - The key of a tag assigned to the resource. This filter is independent of the tag-value filter. For example, if you use both the filter \"tag-key=Purpose\" and the filter \"tag-value=X\", you get any resources assigned both the tag key Purpose (regardless of what the tag's value is), and the tag value X (regardless of what the tag's key is). If you want to list only resources where Purpose is X, see the tag:key=value filter.

    • tag-value - The value of a tag assigned to the resource. This filter is independent of the tag-key filter.

    • vpc-id - The ID of the VPC for the subnet.

    ", + "DescribeTagsRequest$Filters": "

    One or more filters.

    • key - The tag key.

    • resource-id - The resource ID.

    • resource-type - The resource type (customer-gateway | dhcp-options | image | instance | internet-gateway | network-acl | network-interface | reserved-instances | route-table | security-group | snapshot | spot-instances-request | subnet | volume | vpc | vpn-connection | vpn-gateway).

    • value - The tag value.

    ", + "DescribeVolumeStatusRequest$Filters": "

    One or more filters.

    • action.code - The action code for the event (for example, enable-volume-io).

    • action.description - A description of the action.

    • action.event-id - The event ID associated with the action.

    • availability-zone - The Availability Zone of the instance.

    • event.description - A description of the event.

    • event.event-id - The event ID.

    • event.event-type - The event type (for io-enabled: passed | failed; for io-performance: io-performance:degraded | io-performance:severely-degraded | io-performance:stalled).

    • event.not-after - The latest end time for the event.

    • event.not-before - The earliest start time for the event.

    • volume-status.details-name - The cause for volume-status.status (io-enabled | io-performance).

    • volume-status.details-status - The status of volume-status.details-name (for io-enabled: passed | failed; for io-performance: normal | degraded | severely-degraded | stalled).

    • volume-status.status - The status of the volume (ok | impaired | warning | insufficient-data).

    ", + "DescribeVolumesRequest$Filters": "

    One or more filters.

    • attachment.attach-time - The time stamp when the attachment initiated.

    • attachment.delete-on-termination - Whether the volume is deleted on instance termination.

    • attachment.device - The device name that is exposed to the instance (for example, /dev/sda1).

    • attachment.instance-id - The ID of the instance the volume is attached to.

    • attachment.status - The attachment state (attaching | attached | detaching | detached).

    • availability-zone - The Availability Zone in which the volume was created.

    • create-time - The time stamp when the volume was created.

    • encrypted - The encryption status of the volume.

    • size - The size of the volume, in GiB.

    • snapshot-id - The snapshot from which the volume was created.

    • status - The status of the volume (creating | available | in-use | deleting | deleted | error).

    • tag:key=value - The key/value combination of a tag assigned to the resource.

    • tag-key - The key of a tag assigned to the resource. This filter is independent of the tag-value filter. For example, if you use both the filter \"tag-key=Purpose\" and the filter \"tag-value=X\", you get any resources assigned both the tag key Purpose (regardless of what the tag's value is), and the tag value X (regardless of what the tag's key is). If you want to list only resources where Purpose is X, see the tag:key=value filter.

    • tag-value - The value of a tag assigned to the resource. This filter is independent of the tag-key filter.

    • volume-id - The volume ID.

    • volume-type - The Amazon EBS volume type. This can be gp2 for General Purpose SSD, io1 for Provisioned IOPS SSD, st1 for Throughput Optimized HDD, sc1 for Cold HDD, or standard for Magnetic volumes.

    ", + "DescribeVpcClassicLinkRequest$Filters": "

    One or more filters.

    • is-classic-link-enabled - Whether the VPC is enabled for ClassicLink (true | false).

    • tag:key=value - The key/value combination of a tag assigned to the resource.

    • tag-key - The key of a tag assigned to the resource. This filter is independent of the tag-value filter. For example, if you use both the filter \"tag-key=Purpose\" and the filter \"tag-value=X\", you get any resources assigned both the tag key Purpose (regardless of what the tag's value is), and the tag value X (regardless of what the tag's key is). If you want to list only resources where Purpose is X, see the tag:key=value filter.

    • tag-value - The value of a tag assigned to the resource. This filter is independent of the tag-key filter.

    ", + "DescribeVpcEndpointsRequest$Filters": "

    One or more filters.

    • service-name: The name of the AWS service.

    • vpc-id: The ID of the VPC in which the endpoint resides.

    • vpc-endpoint-id: The ID of the endpoint.

    • vpc-endpoint-state: The state of the endpoint. (pending | available | deleting | deleted)

    ", + "DescribeVpcPeeringConnectionsRequest$Filters": "

    One or more filters.

    • accepter-vpc-info.cidr-block - The CIDR block of the peer VPC.

    • accepter-vpc-info.owner-id - The AWS account ID of the owner of the peer VPC.

    • accepter-vpc-info.vpc-id - The ID of the peer VPC.

    • expiration-time - The expiration date and time for the VPC peering connection.

    • requester-vpc-info.cidr-block - The CIDR block of the requester's VPC.

    • requester-vpc-info.owner-id - The AWS account ID of the owner of the requester VPC.

    • requester-vpc-info.vpc-id - The ID of the requester VPC.

    • status-code - The status of the VPC peering connection (pending-acceptance | failed | expired | provisioning | active | deleted | rejected).

    • status-message - A message that provides more information about the status of the VPC peering connection, if applicable.

    • tag:key=value - The key/value combination of a tag assigned to the resource.

    • tag-key - The key of a tag assigned to the resource. This filter is independent of the tag-value filter. For example, if you use both the filter \"tag-key=Purpose\" and the filter \"tag-value=X\", you get any resources assigned both the tag key Purpose (regardless of what the tag's value is), and the tag value X (regardless of what the tag's key is). If you want to list only resources where Purpose is X, see the tag:key=value filter.

    • tag-value - The value of a tag assigned to the resource. This filter is independent of the tag-key filter.

    • vpc-peering-connection-id - The ID of the VPC peering connection.

    ", + "DescribeVpcsRequest$Filters": "

    One or more filters.

    • cidr - The CIDR block of the VPC. The CIDR block you specify must exactly match the VPC's CIDR block for information to be returned for the VPC. Must contain the slash followed by one or two digits (for example, /28).

    • dhcp-options-id - The ID of a set of DHCP options.

    • isDefault - Indicates whether the VPC is the default VPC.

    • state - The state of the VPC (pending | available).

    • tag:key=value - The key/value combination of a tag assigned to the resource.

    • tag-key - The key of a tag assigned to the resource. This filter is independent of the tag-value filter. For example, if you use both the filter \"tag-key=Purpose\" and the filter \"tag-value=X\", you get any resources assigned both the tag key Purpose (regardless of what the tag's value is), and the tag value X (regardless of what the tag's key is). If you want to list only resources where Purpose is X, see the tag:key=value filter.

    • tag-value - The value of a tag assigned to the resource. This filter is independent of the tag-key filter.

    • vpc-id - The ID of the VPC.

    ", + "DescribeVpnConnectionsRequest$Filters": "

    One or more filters.

    • customer-gateway-configuration - The configuration information for the customer gateway.

    • customer-gateway-id - The ID of a customer gateway associated with the VPN connection.

    • state - The state of the VPN connection (pending | available | deleting | deleted).

    • option.static-routes-only - Indicates whether the connection has static routes only. Used for devices that do not support Border Gateway Protocol (BGP).

    • route.destination-cidr-block - The destination CIDR block. This corresponds to the subnet used in a customer data center.

    • bgp-asn - The BGP Autonomous System Number (ASN) associated with a BGP device.

    • tag:key=value - The key/value combination of a tag assigned to the resource.

    • tag-key - The key of a tag assigned to the resource. This filter is independent of the tag-value filter. For example, if you use both the filter \"tag-key=Purpose\" and the filter \"tag-value=X\", you get any resources assigned both the tag key Purpose (regardless of what the tag's value is), and the tag value X (regardless of what the tag's key is). If you want to list only resources where Purpose is X, see the tag:key=value filter.

    • tag-value - The value of a tag assigned to the resource. This filter is independent of the tag-key filter.

    • type - The type of VPN connection. Currently the only supported type is ipsec.1.

    • vpn-connection-id - The ID of the VPN connection.

    • vpn-gateway-id - The ID of a virtual private gateway associated with the VPN connection.

    ", + "DescribeVpnGatewaysRequest$Filters": "

    One or more filters.

    • attachment.state - The current state of the attachment between the gateway and the VPC (attaching | attached | detaching | detached).

    • attachment.vpc-id - The ID of an attached VPC.

    • availability-zone - The Availability Zone for the virtual private gateway (if applicable).

    • state - The state of the virtual private gateway (pending | available | deleting | deleted).

    • tag:key=value - The key/value combination of a tag assigned to the resource.

    • tag-key - The key of a tag assigned to the resource. This filter is independent of the tag-value filter. For example, if you use both the filter \"tag-key=Purpose\" and the filter \"tag-value=X\", you get any resources assigned both the tag key Purpose (regardless of what the tag's value is), and the tag value X (regardless of what the tag's key is). If you want to list only resources where Purpose is X, see the tag:key=value filter.

    • tag-value - The value of a tag assigned to the resource. This filter is independent of the tag-key filter.

    • type - The type of virtual private gateway. Currently the only supported type is ipsec.1.

    • vpn-gateway-id - The ID of the virtual private gateway.

    " + } + }, + "FleetType": { + "base": null, + "refs": { + "SpotFleetRequestConfigData$Type": "

    The type of request. Indicates whether the fleet will only request the target capacity or also attempt to maintain it. When you request a certain target capacity, the fleet will only place the required bids. It will not attempt to replenish Spot instances if capacity is diminished, nor will it submit bids in alternative Spot pools if capacity is not available. When you want to maintain a certain target capacity, fleet will place the required bids to meet this target capacity. It will also automatically replenish any interrupted instances. Default: maintain.

    " + } + }, + "Float": { + "base": null, + "refs": { + "ReservedInstances$UsagePrice": "

    The usage price of the Reserved Instance, per hour.

    ", + "ReservedInstances$FixedPrice": "

    The purchase price of the Reserved Instance.

    ", + "ReservedInstancesOffering$UsagePrice": "

    The usage price of the Reserved Instance, per hour.

    ", + "ReservedInstancesOffering$FixedPrice": "

    The purchase price of the Reserved Instance.

    " + } + }, + "FlowLog": { + "base": "

    Describes a flow log.

    ", + "refs": { + "FlowLogSet$member": null + } + }, + "FlowLogSet": { + "base": null, + "refs": { + "DescribeFlowLogsResult$FlowLogs": "

    Information about the flow logs.

    " + } + }, + "FlowLogsResourceType": { + "base": null, + "refs": { + "CreateFlowLogsRequest$ResourceType": "

    The type of resource on which to create the flow log.

    " + } + }, + "GatewayType": { + "base": null, + "refs": { + "CreateCustomerGatewayRequest$Type": "

    The type of VPN connection that this customer gateway supports (ipsec.1).

    ", + "CreateVpnGatewayRequest$Type": "

    The type of VPN connection this virtual private gateway supports.

    ", + "VpnConnection$Type": "

    The type of VPN connection.

    ", + "VpnGateway$Type": "

    The type of VPN connection the virtual private gateway supports.

    " + } + }, + "GetConsoleOutputRequest": { + "base": "

    Contains the parameters for GetConsoleOutput.

    ", + "refs": { + } + }, + "GetConsoleOutputResult": { + "base": "

    Contains the output of GetConsoleOutput.

    ", + "refs": { + } + }, + "GetConsoleScreenshotRequest": { + "base": "

    Contains the parameters for the request.

    ", + "refs": { + } + }, + "GetConsoleScreenshotResult": { + "base": "

    Contains the output of the request.

    ", + "refs": { + } + }, + "GetPasswordDataRequest": { + "base": "

    Contains the parameters for GetPasswordData.

    ", + "refs": { + } + }, + "GetPasswordDataResult": { + "base": "

    Contains the output of GetPasswordData.

    ", + "refs": { + } + }, + "GroupIdStringList": { + "base": null, + "refs": { + "AttachClassicLinkVpcRequest$Groups": "

    The ID of one or more of the VPC's security groups. You cannot specify security groups from a different VPC.

    ", + "DescribeSecurityGroupsRequest$GroupIds": "

    One or more security group IDs. Required for security groups in a nondefault VPC.

    Default: Describes all your security groups.

    ", + "ModifyInstanceAttributeRequest$Groups": "

    [EC2-VPC] Changes the security groups of the instance. You must specify at least one security group, even if it's just the default security group for the VPC. You must specify the security group ID, not the security group name.

    " + } + }, + "GroupIdentifier": { + "base": "

    Describes a security group.

    ", + "refs": { + "GroupIdentifierList$member": null + } + }, + "GroupIdentifierList": { + "base": null, + "refs": { + "ClassicLinkInstance$Groups": "

    A list of security groups.

    ", + "DescribeNetworkInterfaceAttributeResult$Groups": "

    The security groups associated with the network interface.

    ", + "Instance$SecurityGroups": "

    One or more security groups for the instance.

    ", + "InstanceAttribute$Groups": "

    The security groups associated with the instance.

    ", + "InstanceNetworkInterface$Groups": "

    One or more security groups.

    ", + "LaunchSpecification$SecurityGroups": "

    One or more security groups. When requesting instances in a VPC, you must specify the IDs of the security groups. When requesting instances in EC2-Classic, you can specify the names or the IDs of the security groups.

    ", + "NetworkInterface$Groups": "

    Any security groups for the network interface.

    ", + "Reservation$Groups": "

    [EC2-Classic only] One or more security groups.

    ", + "SpotFleetLaunchSpecification$SecurityGroups": "

    One or more security groups. When requesting instances in a VPC, you must specify the IDs of the security groups. When requesting instances in EC2-Classic, you can specify the names or the IDs of the security groups.

    " + } + }, + "GroupIds": { + "base": null, + "refs": { + "DescribeSecurityGroupReferencesRequest$GroupId": "

    One or more security group IDs in your account.

    " + } + }, + "GroupNameStringList": { + "base": null, + "refs": { + "DescribeSecurityGroupsRequest$GroupNames": "

    [EC2-Classic and default VPC only] One or more security group names. You can specify either the security group name or the security group ID. For security groups in a nondefault VPC, use the group-name filter to describe security groups by name.

    Default: Describes all your security groups.

    ", + "ModifySnapshotAttributeRequest$GroupNames": "

    The group to modify for the snapshot.

    " + } + }, + "HistoryRecord": { + "base": "

    Describes an event in the history of the Spot fleet request.

    ", + "refs": { + "HistoryRecords$member": null + } + }, + "HistoryRecords": { + "base": null, + "refs": { + "DescribeSpotFleetRequestHistoryResponse$HistoryRecords": "

    Information about the events in the history of the Spot fleet request.

    " + } + }, + "Host": { + "base": "

    Describes the properties of the Dedicated host.

    ", + "refs": { + "HostList$member": null + } + }, + "HostInstance": { + "base": "

    Describes an instance running on a Dedicated host.

    ", + "refs": { + "HostInstanceList$member": null + } + }, + "HostInstanceList": { + "base": null, + "refs": { + "Host$Instances": "

    The IDs and instance type that are currently running on the Dedicated host.

    " + } + }, + "HostList": { + "base": null, + "refs": { + "DescribeHostsResult$Hosts": "

    Information about the Dedicated hosts.

    " + } + }, + "HostProperties": { + "base": "

    Describes properties of a Dedicated host.

    ", + "refs": { + "Host$HostProperties": "

    The hardware specifications of the Dedicated host.

    " + } + }, + "HostTenancy": { + "base": null, + "refs": { + "ModifyInstancePlacementRequest$Tenancy": "

    The tenancy of the instance that you are modifying.

    " + } + }, + "HypervisorType": { + "base": null, + "refs": { + "Image$Hypervisor": "

    The hypervisor type of the image.

    ", + "Instance$Hypervisor": "

    The hypervisor type of the instance.

    " + } + }, + "IamInstanceProfile": { + "base": "

    Describes an IAM instance profile.

    ", + "refs": { + "Instance$IamInstanceProfile": "

    The IAM instance profile associated with the instance, if applicable.

    " + } + }, + "IamInstanceProfileSpecification": { + "base": "

    Describes an IAM instance profile.

    ", + "refs": { + "LaunchSpecification$IamInstanceProfile": "

    The IAM instance profile.

    ", + "RequestSpotLaunchSpecification$IamInstanceProfile": "

    The IAM instance profile.

    ", + "RunInstancesRequest$IamInstanceProfile": "

    The IAM instance profile.

    ", + "SpotFleetLaunchSpecification$IamInstanceProfile": "

    The IAM instance profile.

    " + } + }, + "IcmpTypeCode": { + "base": "

    Describes the ICMP type and code.

    ", + "refs": { + "CreateNetworkAclEntryRequest$IcmpTypeCode": "

    ICMP protocol: The ICMP type and code. Required if specifying ICMP for the protocol.

    ", + "NetworkAclEntry$IcmpTypeCode": "

    ICMP protocol: The ICMP type and code.

    ", + "ReplaceNetworkAclEntryRequest$IcmpTypeCode": "

    ICMP protocol: The ICMP type and code. Required if specifying 1 (ICMP) for the protocol.

    " + } + }, + "IdFormat": { + "base": "

    Describes the ID format for a resource.

    ", + "refs": { + "IdFormatList$member": null + } + }, + "IdFormatList": { + "base": null, + "refs": { + "DescribeIdFormatResult$Statuses": "

    Information about the ID format for the resource.

    " + } + }, + "Image": { + "base": "

    Describes an image.

    ", + "refs": { + "ImageList$member": null + } + }, + "ImageAttribute": { + "base": "

    Describes an image attribute.

    ", + "refs": { + } + }, + "ImageAttributeName": { + "base": null, + "refs": { + "DescribeImageAttributeRequest$Attribute": "

    The AMI attribute.

    Note: Depending on your account privileges, the blockDeviceMapping attribute may return a Client.AuthFailure error. If this happens, use DescribeImages to get information about the block device mapping for the AMI.

    " + } + }, + "ImageDiskContainer": { + "base": "

    Describes the disk container object for an import image task.

    ", + "refs": { + "ImageDiskContainerList$member": null + } + }, + "ImageDiskContainerList": { + "base": null, + "refs": { + "ImportImageRequest$DiskContainers": "

    Information about the disk containers.

    " + } + }, + "ImageIdStringList": { + "base": null, + "refs": { + "DescribeImagesRequest$ImageIds": "

    One or more image IDs.

    Default: Describes all images available to you.

    " + } + }, + "ImageList": { + "base": null, + "refs": { + "DescribeImagesResult$Images": "

    Information about one or more images.

    " + } + }, + "ImageState": { + "base": null, + "refs": { + "Image$State": "

    The current state of the AMI. If the state is available, the image is successfully registered and can be used to launch an instance.

    " + } + }, + "ImageTypeValues": { + "base": null, + "refs": { + "Image$ImageType": "

    The type of image.

    " + } + }, + "ImportImageRequest": { + "base": "

    Contains the parameters for ImportImage.

    ", + "refs": { + } + }, + "ImportImageResult": { + "base": "

    Contains the output for ImportImage.

    ", + "refs": { + } + }, + "ImportImageTask": { + "base": "

    Describes an import image task.

    ", + "refs": { + "ImportImageTaskList$member": null + } + }, + "ImportImageTaskList": { + "base": null, + "refs": { + "DescribeImportImageTasksResult$ImportImageTasks": "

    A list of zero or more import image tasks that are currently active or were completed or canceled in the previous 7 days.

    " + } + }, + "ImportInstanceLaunchSpecification": { + "base": "

    Describes the launch specification for VM import.

    ", + "refs": { + "ImportInstanceRequest$LaunchSpecification": "

    The launch specification.

    " + } + }, + "ImportInstanceRequest": { + "base": "

    Contains the parameters for ImportInstance.

    ", + "refs": { + } + }, + "ImportInstanceResult": { + "base": "

    Contains the output for ImportInstance.

    ", + "refs": { + } + }, + "ImportInstanceTaskDetails": { + "base": "

    Describes an import instance task.

    ", + "refs": { + "ConversionTask$ImportInstance": "

    If the task is for importing an instance, this contains information about the import instance task.

    " + } + }, + "ImportInstanceVolumeDetailItem": { + "base": "

    Describes an import volume task.

    ", + "refs": { + "ImportInstanceVolumeDetailSet$member": null + } + }, + "ImportInstanceVolumeDetailSet": { + "base": null, + "refs": { + "ImportInstanceTaskDetails$Volumes": "

    One or more volumes.

    " + } + }, + "ImportKeyPairRequest": { + "base": "

    Contains the parameters for ImportKeyPair.

    ", + "refs": { + } + }, + "ImportKeyPairResult": { + "base": "

    Contains the output of ImportKeyPair.

    ", + "refs": { + } + }, + "ImportSnapshotRequest": { + "base": "

    Contains the parameters for ImportSnapshot.

    ", + "refs": { + } + }, + "ImportSnapshotResult": { + "base": "

    Contains the output for ImportSnapshot.

    ", + "refs": { + } + }, + "ImportSnapshotTask": { + "base": "

    Describes an import snapshot task.

    ", + "refs": { + "ImportSnapshotTaskList$member": null + } + }, + "ImportSnapshotTaskList": { + "base": null, + "refs": { + "DescribeImportSnapshotTasksResult$ImportSnapshotTasks": "

    A list of zero or more import snapshot tasks that are currently active or were completed or canceled in the previous 7 days.

    " + } + }, + "ImportTaskIdList": { + "base": null, + "refs": { + "DescribeImportImageTasksRequest$ImportTaskIds": "

    A list of import image task IDs.

    ", + "DescribeImportSnapshotTasksRequest$ImportTaskIds": "

    A list of import snapshot task IDs.

    " + } + }, + "ImportVolumeRequest": { + "base": "

    Contains the parameters for ImportVolume.

    ", + "refs": { + } + }, + "ImportVolumeResult": { + "base": "

    Contains the output for ImportVolume.

    ", + "refs": { + } + }, + "ImportVolumeTaskDetails": { + "base": "

    Describes an import volume task.

    ", + "refs": { + "ConversionTask$ImportVolume": "

    If the task is for importing a volume, this contains information about the import volume task.

    " + } + }, + "Instance": { + "base": "

    Describes an instance.

    ", + "refs": { + "InstanceList$member": null + } + }, + "InstanceAttribute": { + "base": "

    Describes an instance attribute.

    ", + "refs": { + } + }, + "InstanceAttributeName": { + "base": null, + "refs": { + "DescribeInstanceAttributeRequest$Attribute": "

    The instance attribute.

    ", + "ModifyInstanceAttributeRequest$Attribute": "

    The name of the attribute.

    ", + "ResetInstanceAttributeRequest$Attribute": "

    The attribute to reset.

    You can only reset the following attributes: kernel | ramdisk | sourceDestCheck. To change an instance attribute, use ModifyInstanceAttribute.

    " + } + }, + "InstanceBlockDeviceMapping": { + "base": "

    Describes a block device mapping.

    ", + "refs": { + "InstanceBlockDeviceMappingList$member": null + } + }, + "InstanceBlockDeviceMappingList": { + "base": null, + "refs": { + "Instance$BlockDeviceMappings": "

    Any block device mapping entries for the instance.

    ", + "InstanceAttribute$BlockDeviceMappings": "

    The block device mapping of the instance.

    " + } + }, + "InstanceBlockDeviceMappingSpecification": { + "base": "

    Describes a block device mapping entry.

    ", + "refs": { + "InstanceBlockDeviceMappingSpecificationList$member": null + } + }, + "InstanceBlockDeviceMappingSpecificationList": { + "base": null, + "refs": { + "ModifyInstanceAttributeRequest$BlockDeviceMappings": "

    Modifies the DeleteOnTermination attribute for volumes that are currently attached. The volume must be owned by the caller. If no value is specified for DeleteOnTermination, the default is true and the volume is deleted when the instance is terminated.

    To add instance store volumes to an Amazon EBS-backed instance, you must add them when you launch the instance. For more information, see Updating the Block Device Mapping when Launching an Instance in the Amazon Elastic Compute Cloud User Guide.

    " + } + }, + "InstanceCapacity": { + "base": "

    Information about the instance type that the Dedicated host supports.

    ", + "refs": { + "AvailableInstanceCapacityList$member": null + } + }, + "InstanceCount": { + "base": "

    Describes a Reserved Instance listing state.

    ", + "refs": { + "InstanceCountList$member": null + } + }, + "InstanceCountList": { + "base": null, + "refs": { + "ReservedInstancesListing$InstanceCounts": "

    The number of instances in this state.

    " + } + }, + "InstanceExportDetails": { + "base": "

    Describes an instance to export.

    ", + "refs": { + "ExportTask$InstanceExportDetails": "

    Information about the instance to export.

    " + } + }, + "InstanceIdSet": { + "base": null, + "refs": { + "RunScheduledInstancesResult$InstanceIdSet": "

    The IDs of the newly launched instances.

    " + } + }, + "InstanceIdStringList": { + "base": null, + "refs": { + "DescribeClassicLinkInstancesRequest$InstanceIds": "

    One or more instance IDs. Must be instances linked to a VPC through ClassicLink.

    ", + "DescribeInstanceStatusRequest$InstanceIds": "

    One or more instance IDs.

    Default: Describes all your instances.

    Constraints: Maximum 100 explicitly specified instance IDs.

    ", + "DescribeInstancesRequest$InstanceIds": "

    One or more instance IDs.

    Default: Describes all your instances.

    ", + "MonitorInstancesRequest$InstanceIds": "

    One or more instance IDs.

    ", + "RebootInstancesRequest$InstanceIds": "

    One or more instance IDs.

    ", + "ReportInstanceStatusRequest$Instances": "

    One or more instances.

    ", + "StartInstancesRequest$InstanceIds": "

    One or more instance IDs.

    ", + "StopInstancesRequest$InstanceIds": "

    One or more instance IDs.

    ", + "TerminateInstancesRequest$InstanceIds": "

    One or more instance IDs.

    ", + "UnmonitorInstancesRequest$InstanceIds": "

    One or more instance IDs.

    " + } + }, + "InstanceLifecycleType": { + "base": null, + "refs": { + "Instance$InstanceLifecycle": "

    Indicates whether this is a Spot instance or a Scheduled Instance.

    " + } + }, + "InstanceList": { + "base": null, + "refs": { + "Reservation$Instances": "

    One or more instances.

    " + } + }, + "InstanceMonitoring": { + "base": "

    Describes the monitoring information of the instance.

    ", + "refs": { + "InstanceMonitoringList$member": null + } + }, + "InstanceMonitoringList": { + "base": null, + "refs": { + "MonitorInstancesResult$InstanceMonitorings": "

    Monitoring information for one or more instances.

    ", + "UnmonitorInstancesResult$InstanceMonitorings": "

    Monitoring information for one or more instances.

    " + } + }, + "InstanceNetworkInterface": { + "base": "

    Describes a network interface.

    ", + "refs": { + "InstanceNetworkInterfaceList$member": null + } + }, + "InstanceNetworkInterfaceAssociation": { + "base": "

    Describes association information for an Elastic IP address.

    ", + "refs": { + "InstanceNetworkInterface$Association": "

    The association information for an Elastic IP associated with the network interface.

    ", + "InstancePrivateIpAddress$Association": "

    The association information for an Elastic IP address for the network interface.

    " + } + }, + "InstanceNetworkInterfaceAttachment": { + "base": "

    Describes a network interface attachment.

    ", + "refs": { + "InstanceNetworkInterface$Attachment": "

    The network interface attachment.

    " + } + }, + "InstanceNetworkInterfaceList": { + "base": null, + "refs": { + "Instance$NetworkInterfaces": "

    [EC2-VPC] One or more network interfaces for the instance.

    " + } + }, + "InstanceNetworkInterfaceSpecification": { + "base": "

    Describes a network interface.

    ", + "refs": { + "InstanceNetworkInterfaceSpecificationList$member": null + } + }, + "InstanceNetworkInterfaceSpecificationList": { + "base": null, + "refs": { + "LaunchSpecification$NetworkInterfaces": "

    One or more network interfaces.

    ", + "RequestSpotLaunchSpecification$NetworkInterfaces": "

    One or more network interfaces.

    ", + "RunInstancesRequest$NetworkInterfaces": "

    One or more network interfaces.

    ", + "SpotFleetLaunchSpecification$NetworkInterfaces": "

    One or more network interfaces.

    " + } + }, + "InstancePrivateIpAddress": { + "base": "

    Describes a private IP address.

    ", + "refs": { + "InstancePrivateIpAddressList$member": null + } + }, + "InstancePrivateIpAddressList": { + "base": null, + "refs": { + "InstanceNetworkInterface$PrivateIpAddresses": "

    The private IP addresses associated with the network interface.

    " + } + }, + "InstanceState": { + "base": "

    Describes the current state of the instance.

    ", + "refs": { + "Instance$State": "

    The current state of the instance.

    ", + "InstanceStateChange$CurrentState": "

    The current state of the instance.

    ", + "InstanceStateChange$PreviousState": "

    The previous state of the instance.

    ", + "InstanceStatus$InstanceState": "

    The intended state of the instance. DescribeInstanceStatus requires that an instance be in the running state.

    " + } + }, + "InstanceStateChange": { + "base": "

    Describes an instance state change.

    ", + "refs": { + "InstanceStateChangeList$member": null + } + }, + "InstanceStateChangeList": { + "base": null, + "refs": { + "StartInstancesResult$StartingInstances": "

    Information about one or more started instances.

    ", + "StopInstancesResult$StoppingInstances": "

    Information about one or more stopped instances.

    ", + "TerminateInstancesResult$TerminatingInstances": "

    Information about one or more terminated instances.

    " + } + }, + "InstanceStateName": { + "base": null, + "refs": { + "InstanceState$Name": "

    The current state of the instance.

    " + } + }, + "InstanceStatus": { + "base": "

    Describes the status of an instance.

    ", + "refs": { + "InstanceStatusList$member": null + } + }, + "InstanceStatusDetails": { + "base": "

    Describes the instance status.

    ", + "refs": { + "InstanceStatusDetailsList$member": null + } + }, + "InstanceStatusDetailsList": { + "base": null, + "refs": { + "InstanceStatusSummary$Details": "

    The system instance health or application instance health.

    " + } + }, + "InstanceStatusEvent": { + "base": "

    Describes a scheduled event for an instance.

    ", + "refs": { + "InstanceStatusEventList$member": null + } + }, + "InstanceStatusEventList": { + "base": null, + "refs": { + "InstanceStatus$Events": "

    Any scheduled events associated with the instance.

    " + } + }, + "InstanceStatusList": { + "base": null, + "refs": { + "DescribeInstanceStatusResult$InstanceStatuses": "

    One or more instance status descriptions.

    " + } + }, + "InstanceStatusSummary": { + "base": "

    Describes the status of an instance.

    ", + "refs": { + "InstanceStatus$SystemStatus": "

    Reports impaired functionality that stems from issues related to the systems that support an instance, such as hardware failures and network connectivity problems.

    ", + "InstanceStatus$InstanceStatus": "

    Reports impaired functionality that stems from issues internal to the instance, such as impaired reachability.

    " + } + }, + "InstanceType": { + "base": null, + "refs": { + "DescribeReservedInstancesOfferingsRequest$InstanceType": "

    The instance type that the reservation will cover (for example, m1.small). For more information, see Instance Types in the Amazon Elastic Compute Cloud User Guide.

    ", + "ImportInstanceLaunchSpecification$InstanceType": "

    The instance type. For more information about the instance types that you can import, see Before You Get Started in the Amazon Elastic Compute Cloud User Guide.

    ", + "Instance$InstanceType": "

    The instance type.

    ", + "InstanceTypeList$member": null, + "LaunchSpecification$InstanceType": "

    The instance type.

    ", + "RequestSpotLaunchSpecification$InstanceType": "

    The instance type.

    ", + "ReservedInstances$InstanceType": "

    The instance type on which the Reserved Instance can be used.

    ", + "ReservedInstancesConfiguration$InstanceType": "

    The instance type for the modified Reserved Instances.

    ", + "ReservedInstancesOffering$InstanceType": "

    The instance type on which the Reserved Instance can be used.

    ", + "RunInstancesRequest$InstanceType": "

    The instance type. For more information, see Instance Types in the Amazon Elastic Compute Cloud User Guide.

    Default: m1.small

    ", + "SpotFleetLaunchSpecification$InstanceType": "

    The instance type.

    ", + "SpotPrice$InstanceType": "

    The instance type.

    " + } + }, + "InstanceTypeList": { + "base": null, + "refs": { + "DescribeSpotPriceHistoryRequest$InstanceTypes": "

    Filters the results by the specified instance types.

    " + } + }, + "Integer": { + "base": null, + "refs": { + "AllocateHostsRequest$Quantity": "

    The number of Dedicated hosts you want to allocate to your account with these parameters.

    ", + "AssignPrivateIpAddressesRequest$SecondaryPrivateIpAddressCount": "

    The number of secondary IP addresses to assign to the network interface. You can't specify this parameter when also specifying private IP addresses.

    ", + "AttachNetworkInterfaceRequest$DeviceIndex": "

    The index of the device for the network interface attachment.

    ", + "AuthorizeSecurityGroupEgressRequest$FromPort": "

    The start of port range for the TCP and UDP protocols, or an ICMP type number. We recommend that you specify the port range in a set of IP permissions instead.

    ", + "AuthorizeSecurityGroupEgressRequest$ToPort": "

    The end of port range for the TCP and UDP protocols, or an ICMP type number. We recommend that you specify the port range in a set of IP permissions instead.

    ", + "AuthorizeSecurityGroupIngressRequest$FromPort": "

    The start of port range for the TCP and UDP protocols, or an ICMP type number. For the ICMP type number, use -1 to specify all ICMP types.

    ", + "AuthorizeSecurityGroupIngressRequest$ToPort": "

    The end of port range for the TCP and UDP protocols, or an ICMP code number. For the ICMP code number, use -1 to specify all ICMP codes for the ICMP type.

    ", + "AvailableCapacity$AvailableVCpus": "

    The number of vCPUs available on the Dedicated host.

    ", + "CreateCustomerGatewayRequest$BgpAsn": "

    For devices that support BGP, the customer gateway's BGP ASN.

    Default: 65000

    ", + "CreateNetworkAclEntryRequest$RuleNumber": "

    The rule number for the entry (for example, 100). ACL entries are processed in ascending order by rule number.

    Constraints: Positive integer from 1 to 32766. The range 32767 to 65535 is reserved for internal use.

    ", + "CreateNetworkInterfaceRequest$SecondaryPrivateIpAddressCount": "

    The number of secondary private IP addresses to assign to a network interface. When you specify a number of secondary IP addresses, Amazon EC2 selects these IP addresses within the subnet range. You can't specify this option and specify more than one private IP address using privateIpAddresses.

    The number of IP addresses you can assign to a network interface varies by instance type. For more information, see Private IP Addresses Per ENI Per Instance Type in the Amazon Elastic Compute Cloud User Guide.

    ", + "CreateReservedInstancesListingRequest$InstanceCount": "

    The number of instances that are a part of a Reserved Instance account to be listed in the Reserved Instance Marketplace. This number should be less than or equal to the instance count associated with the Reserved Instance ID specified in this call.

    ", + "CreateVolumeRequest$Size": "

    The size of the volume, in GiBs.

    Constraints: 1-16384 for gp2, 4-16384 for io1, 500-16384 for st1, 500-16384 for sc1, and 1-1024 for standard. If you specify a snapshot, the volume size must be equal to or larger than the snapshot size.

    Default: If you're creating the volume from a snapshot and don't specify a volume size, the default is the snapshot size.

    ", + "CreateVolumeRequest$Iops": "

    Only valid for Provisioned IOPS SSD volumes. The number of I/O operations per second (IOPS) to provision for the volume, with a maximum ratio of 30 IOPS/GiB.

    Constraint: Range is 100 to 20000 for Provisioned IOPS SSD volumes

    ", + "DeleteNetworkAclEntryRequest$RuleNumber": "

    The rule number of the entry to delete.

    ", + "DescribeClassicLinkInstancesRequest$MaxResults": "

    The maximum number of results to return for the request in a single page. The remaining results of the initial request can be seen by sending another request with the returned NextToken value. This value can be between 5 and 1000; if MaxResults is given a value larger than 1000, only 1000 results are returned. You cannot specify this parameter and the instance IDs parameter in the same request.

    Constraint: If the value is greater than 1000, we return only 1000 items.

    ", + "DescribeFlowLogsRequest$MaxResults": "

    The maximum number of results to return for the request in a single page. The remaining results can be seen by sending another request with the returned NextToken value. This value can be between 5 and 1000; if MaxResults is given a value larger than 1000, only 1000 results are returned. You cannot specify this parameter and the flow log IDs parameter in the same request.

    ", + "DescribeHostsRequest$MaxResults": "

    The maximum number of results to return for the request in a single page. The remaining results can be seen by sending another request with the returned nextToken value. This value can be between 5 and 500; if maxResults is given a larger value than 500, you will receive an error. You cannot specify this parameter and the host IDs parameter in the same request.

    ", + "DescribeImportImageTasksRequest$MaxResults": "

    The maximum number of results to return in a single call. To retrieve the remaining results, make another call with the returned NextToken value.

    ", + "DescribeImportSnapshotTasksRequest$MaxResults": "

    The maximum number of results to return in a single call. To retrieve the remaining results, make another call with the returned NextToken value.

    ", + "DescribeInstanceStatusRequest$MaxResults": "

    The maximum number of results to return in a single call. To retrieve the remaining results, make another call with the returned NextToken value. This value can be between 5 and 1000. You cannot specify this parameter and the instance IDs parameter in the same call.

    ", + "DescribeInstancesRequest$MaxResults": "

    The maximum number of results to return in a single call. To retrieve the remaining results, make another call with the returned NextToken value. This value can be between 5 and 1000. You cannot specify this parameter and the instance IDs parameter or tag filters in the same call.

    ", + "DescribeMovingAddressesRequest$MaxResults": "

    The maximum number of results to return for the request in a single page. The remaining results of the initial request can be seen by sending another request with the returned NextToken value. This value can be between 5 and 1000; if MaxResults is given a value outside of this range, an error is returned.

    Default: If no value is provided, the default is 1000.

    ", + "DescribeNatGatewaysRequest$MaxResults": "

    The maximum number of items to return for this request. The request returns a token that you can specify in a subsequent call to get the next set of results.

    Constraint: If the value specified is greater than 1000, we return only 1000 items.

    ", + "DescribePrefixListsRequest$MaxResults": "

    The maximum number of items to return for this request. The request returns a token that you can specify in a subsequent call to get the next set of results.

    Constraint: If the value specified is greater than 1000, we return only 1000 items.

    ", + "DescribeReservedInstancesOfferingsRequest$MaxResults": "

    The maximum number of results to return for the request in a single page. The remaining results of the initial request can be seen by sending another request with the returned NextToken value. The maximum is 100.

    Default: 100

    ", + "DescribeReservedInstancesOfferingsRequest$MaxInstanceCount": "

    The maximum number of instances to filter when searching for offerings.

    Default: 20

    ", + "DescribeScheduledInstanceAvailabilityRequest$MinSlotDurationInHours": "

    The minimum available duration, in hours. The minimum required duration is 1,200 hours per year. For example, the minimum daily schedule is 4 hours, the minimum weekly schedule is 24 hours, and the minimum monthly schedule is 100 hours.

    ", + "DescribeScheduledInstanceAvailabilityRequest$MaxSlotDurationInHours": "

    The maximum available duration, in hours. This value must be greater than MinSlotDurationInHours and less than 1,720.

    ", + "DescribeScheduledInstanceAvailabilityRequest$MaxResults": "

    The maximum number of results to return in a single call. This value can be between 5 and 300. The default value is 300. To retrieve the remaining results, make another call with the returned NextToken value.

    ", + "DescribeScheduledInstancesRequest$MaxResults": "

    The maximum number of results to return in a single call. This value can be between 5 and 300. The default value is 100. To retrieve the remaining results, make another call with the returned NextToken value.

    ", + "DescribeSnapshotsRequest$MaxResults": "

    The maximum number of snapshot results returned by DescribeSnapshots in paginated output. When this parameter is used, DescribeSnapshots only returns MaxResults results in a single page along with a NextToken response element. The remaining results of the initial request can be seen by sending another DescribeSnapshots request with the returned NextToken value. This value can be between 5 and 1000; if MaxResults is given a value larger than 1000, only 1000 results are returned. If this parameter is not used, then DescribeSnapshots returns all results. You cannot specify this parameter and the snapshot IDs parameter in the same request.

    ", + "DescribeSpotFleetInstancesRequest$MaxResults": "

    The maximum number of results to return in a single call. Specify a value between 1 and 1000. The default value is 1000. To retrieve the remaining results, make another call with the returned NextToken value.

    ", + "DescribeSpotFleetRequestHistoryRequest$MaxResults": "

    The maximum number of results to return in a single call. Specify a value between 1 and 1000. The default value is 1000. To retrieve the remaining results, make another call with the returned NextToken value.

    ", + "DescribeSpotFleetRequestsRequest$MaxResults": "

    The maximum number of results to return in a single call. Specify a value between 1 and 1000. The default value is 1000. To retrieve the remaining results, make another call with the returned NextToken value.

    ", + "DescribeSpotPriceHistoryRequest$MaxResults": "

    The maximum number of results to return in a single call. Specify a value between 1 and 1000. The default value is 1000. To retrieve the remaining results, make another call with the returned NextToken value.

    ", + "DescribeTagsRequest$MaxResults": "

    The maximum number of results to return in a single call. This value can be between 5 and 1000. To retrieve the remaining results, make another call with the returned NextToken value.

    ", + "DescribeVolumeStatusRequest$MaxResults": "

    The maximum number of volume results returned by DescribeVolumeStatus in paginated output. When this parameter is used, the request only returns MaxResults results in a single page along with a NextToken response element. The remaining results of the initial request can be seen by sending another request with the returned NextToken value. This value can be between 5 and 1000; if MaxResults is given a value larger than 1000, only 1000 results are returned. If this parameter is not used, then DescribeVolumeStatus returns all results. You cannot specify this parameter and the volume IDs parameter in the same request.

    ", + "DescribeVolumesRequest$MaxResults": "

    The maximum number of volume results returned by DescribeVolumes in paginated output. When this parameter is used, DescribeVolumes only returns MaxResults results in a single page along with a NextToken response element. The remaining results of the initial request can be seen by sending another DescribeVolumes request with the returned NextToken value. This value can be between 5 and 1000; if MaxResults is given a value larger than 1000, only 1000 results are returned. If this parameter is not used, then DescribeVolumes returns all results. You cannot specify this parameter and the volume IDs parameter in the same request.

    ", + "DescribeVpcEndpointServicesRequest$MaxResults": "

    The maximum number of items to return for this request. The request returns a token that you can specify in a subsequent call to get the next set of results.

    Constraint: If the value is greater than 1000, we return only 1000 items.

    ", + "DescribeVpcEndpointsRequest$MaxResults": "

    The maximum number of items to return for this request. The request returns a token that you can specify in a subsequent call to get the next set of results.

    Constraint: If the value is greater than 1000, we return only 1000 items.

    ", + "EbsBlockDevice$VolumeSize": "

    The size of the volume, in GiB.

    Constraints: 1-16384 for General Purpose SSD (gp2), 4-16384 for Provisioned IOPS SSD (io1), 500-16384 for Throughput Optimized HDD (st1), 500-16384 for Cold HDD (sc1), and 1-1024 for Magnetic (standard) volumes. If you specify a snapshot, the volume size must be equal to or larger than the snapshot size.

    Default: If you're creating the volume from a snapshot and don't specify a volume size, the default is the snapshot size.

    ", + "EbsBlockDevice$Iops": "

    The number of I/O operations per second (IOPS) that the volume supports. For io1, this represents the number of IOPS that are provisioned for the volume. For gp2, this represents the baseline performance of the volume and the rate at which the volume accumulates I/O credits for bursting. For more information on General Purpose SSD baseline performance, I/O credits, and bursting, see Amazon EBS Volume Types in the Amazon Elastic Compute Cloud User Guide.

    Constraint: Range is 100-20000 IOPS for io1 volumes and 100-10000 IOPS for gp2 volumes.

    Condition: This parameter is required for requests to create io1 volumes; it is not used in requests to create gp2, st1, sc1, or standard volumes.

    ", + "HostProperties$Sockets": "

    The number of sockets on the Dedicated host.

    ", + "HostProperties$Cores": "

    The number of cores on the Dedicated host.

    ", + "HostProperties$TotalVCpus": "

    The number of vCPUs on the Dedicated host.

    ", + "IcmpTypeCode$Type": "

    The ICMP code. A value of -1 means all codes for the specified ICMP type.

    ", + "IcmpTypeCode$Code": "

    The ICMP type. A value of -1 means all types.

    ", + "Instance$AmiLaunchIndex": "

    The AMI launch index, which can be used to find this instance in the launch group.

    ", + "InstanceCapacity$AvailableCapacity": "

    The number of instances that can still be launched onto the Dedicated host.

    ", + "InstanceCapacity$TotalCapacity": "

    The total number of instances that can be launched onto the Dedicated host.

    ", + "InstanceCount$InstanceCount": "

    The number of listed Reserved Instances in the state specified by the state.

    ", + "InstanceNetworkInterfaceAttachment$DeviceIndex": "

    The index of the device on the instance for the network interface attachment.

    ", + "InstanceNetworkInterfaceSpecification$DeviceIndex": "

    The index of the device on the instance for the network interface attachment. If you are specifying a network interface in a RunInstances request, you must provide the device index.

    ", + "InstanceNetworkInterfaceSpecification$SecondaryPrivateIpAddressCount": "

    The number of secondary private IP addresses. You can't specify this option and specify more than one private IP address using the private IP addresses option.

    ", + "InstanceState$Code": "

    The low byte represents the state. The high byte is an opaque internal value and should be ignored.

    • 0 : pending

    • 16 : running

    • 32 : shutting-down

    • 48 : terminated

    • 64 : stopping

    • 80 : stopped

    ", + "IpPermission$FromPort": "

    The start of port range for the TCP and UDP protocols, or an ICMP type number. A value of -1 indicates all ICMP types.

    ", + "IpPermission$ToPort": "

    The end of port range for the TCP and UDP protocols, or an ICMP code. A value of -1 indicates all ICMP codes for the specified ICMP type.

    ", + "ModifySpotFleetRequestRequest$TargetCapacity": "

    The size of the fleet.

    ", + "NetworkAclEntry$RuleNumber": "

    The rule number for the entry. ACL entries are processed in ascending order by rule number.

    ", + "NetworkInterfaceAttachment$DeviceIndex": "

    The device index of the network interface attachment on the instance.

    ", + "OccurrenceDayRequestSet$member": null, + "OccurrenceDaySet$member": null, + "PortRange$From": "

    The first port in the range.

    ", + "PortRange$To": "

    The last port in the range.

    ", + "PricingDetail$Count": "

    The number of reservations available for the price.

    ", + "PurchaseRequest$InstanceCount": "

    The number of instances.

    ", + "PurchaseReservedInstancesOfferingRequest$InstanceCount": "

    The number of Reserved Instances to purchase.

    ", + "ReplaceNetworkAclEntryRequest$RuleNumber": "

    The rule number of the entry to replace.

    ", + "RequestSpotInstancesRequest$InstanceCount": "

    The maximum number of Spot instances to launch.

    Default: 1

    ", + "RequestSpotInstancesRequest$BlockDurationMinutes": "

    The required duration for the Spot instances (also known as Spot blocks), in minutes. This value must be a multiple of 60 (60, 120, 180, 240, 300, or 360).

    The duration period starts as soon as your Spot instance receives its instance ID. At the end of the duration period, Amazon EC2 marks the Spot instance for termination and provides a Spot instance termination notice, which gives the instance a two-minute warning before it terminates.

    Note that you can't specify an Availability Zone group or a launch group if you specify a duration.

    ", + "ReservedInstances$InstanceCount": "

    The number of reservations purchased.

    ", + "ReservedInstancesConfiguration$InstanceCount": "

    The number of modified Reserved Instances.

    ", + "RevokeSecurityGroupEgressRequest$FromPort": "

    The start of port range for the TCP and UDP protocols, or an ICMP type number. We recommend that you specify the port range in a set of IP permissions instead.

    ", + "RevokeSecurityGroupEgressRequest$ToPort": "

    The end of port range for the TCP and UDP protocols, or an ICMP type number. We recommend that you specify the port range in a set of IP permissions instead.

    ", + "RevokeSecurityGroupIngressRequest$FromPort": "

    The start of port range for the TCP and UDP protocols, or an ICMP type number. For the ICMP type number, use -1 to specify all ICMP types.

    ", + "RevokeSecurityGroupIngressRequest$ToPort": "

    The end of port range for the TCP and UDP protocols, or an ICMP code number. For the ICMP code number, use -1 to specify all ICMP codes for the ICMP type.

    ", + "RunInstancesRequest$MinCount": "

    The minimum number of instances to launch. If you specify a minimum that is more instances than Amazon EC2 can launch in the target Availability Zone, Amazon EC2 launches no instances.

    Constraints: Between 1 and the maximum number you're allowed for the specified instance type. For more information about the default limits, and how to request an increase, see How many instances can I run in Amazon EC2 in the Amazon EC2 General FAQ.

    ", + "RunInstancesRequest$MaxCount": "

    The maximum number of instances to launch. If you specify more instances than Amazon EC2 can launch in the target Availability Zone, Amazon EC2 launches the largest possible number of instances above MinCount.

    Constraints: Between 1 and the maximum number you're allowed for the specified instance type. For more information about the default limits, and how to request an increase, see How many instances can I run in Amazon EC2 in the Amazon EC2 FAQ.

    ", + "RunScheduledInstancesRequest$InstanceCount": "

    The number of instances.

    Default: 1

    ", + "ScheduledInstance$SlotDurationInHours": "

    The number of hours in the schedule.

    ", + "ScheduledInstance$TotalScheduledInstanceHours": "

    The total number of hours for a single instance for the entire term.

    ", + "ScheduledInstance$InstanceCount": "

    The number of instances.

    ", + "ScheduledInstanceAvailability$SlotDurationInHours": "

    The number of hours in the schedule.

    ", + "ScheduledInstanceAvailability$TotalScheduledInstanceHours": "

    The total number of hours for a single instance for the entire term.

    ", + "ScheduledInstanceAvailability$AvailableInstanceCount": "

    The number of available instances.

    ", + "ScheduledInstanceAvailability$MinTermDurationInDays": "

    The minimum term. The only possible value is 365 days.

    ", + "ScheduledInstanceAvailability$MaxTermDurationInDays": "

    The maximum term. The only possible value is 365 days.

    ", + "ScheduledInstanceRecurrence$Interval": "

    The interval quantity. The interval unit depends on the value of frequency. For example, every 2 weeks or every 2 months.

    ", + "ScheduledInstanceRecurrenceRequest$Interval": "

    The interval quantity. The interval unit depends on the value of Frequency. For example, every 2 weeks or every 2 months.

    ", + "ScheduledInstancesEbs$VolumeSize": "

    The size of the volume, in GiB.

    Default: If you're creating the volume from a snapshot and don't specify a volume size, the default is the snapshot size.

    ", + "ScheduledInstancesEbs$Iops": "

    The number of I/O operations per second (IOPS) that the volume supports. For io1 volumes, this represents the number of IOPS that are provisioned for the volume. For gp2 volumes, this represents the baseline performance of the volume and the rate at which the volume accumulates I/O credits for bursting. For more information about gp2 baseline performance, I/O credits, and bursting, see Amazon EBS Volume Types in the Amazon Elastic Compute Cloud User Guide.

    Constraint: Range is 100-20000 IOPS for io1 volumes and 100-10000 IOPS for gp2 volumes.

    Condition: This parameter is required for requests to create io1volumes; it is not used in requests to create gp2, st1, sc1, or standard volumes.

    ", + "ScheduledInstancesNetworkInterface$DeviceIndex": "

    The index of the device for the network interface attachment.

    ", + "ScheduledInstancesNetworkInterface$SecondaryPrivateIpAddressCount": "

    The number of secondary private IP addresses.

    ", + "Snapshot$VolumeSize": "

    The size of the volume, in GiB.

    ", + "SpotFleetRequestConfigData$TargetCapacity": "

    The number of units to request. You can choose to set the target capacity in terms of instances or a performance characteristic that is important to your application workload, such as vCPUs, memory, or I/O.

    ", + "SpotInstanceRequest$BlockDurationMinutes": "

    The duration for the Spot instance, in minutes.

    ", + "StaleIpPermission$FromPort": "

    The start of the port range for the TCP and UDP protocols, or an ICMP type number. A value of -1 indicates all ICMP types.

    ", + "StaleIpPermission$ToPort": "

    The end of the port range for the TCP and UDP protocols, or an ICMP type number. A value of -1 indicates all ICMP types.

    ", + "Subnet$AvailableIpAddressCount": "

    The number of unused IP addresses in the subnet. Note that the IP addresses for any stopped instances are considered unavailable.

    ", + "VgwTelemetry$AcceptedRouteCount": "

    The number of accepted routes.

    ", + "Volume$Size": "

    The size of the volume, in GiBs.

    ", + "Volume$Iops": "

    The number of I/O operations per second (IOPS) that the volume supports. For Provisioned IOPS SSD volumes, this represents the number of IOPS that are provisioned for the volume. For General Purpose SSD volumes, this represents the baseline performance of the volume and the rate at which the volume accumulates I/O credits for bursting. For more information on General Purpose SSD baseline performance, I/O credits, and bursting, see Amazon EBS Volume Types in the Amazon Elastic Compute Cloud User Guide.

    Constraint: Range is 100-20000 IOPS for io1 volumes and 100-10000 IOPS for gp2 volumes.

    Condition: This parameter is required for requests to create io1 volumes; it is not used in requests to create gp2, st1, sc1, or standard volumes.

    " + } + }, + "InternetGateway": { + "base": "

    Describes an Internet gateway.

    ", + "refs": { + "CreateInternetGatewayResult$InternetGateway": "

    Information about the Internet gateway.

    ", + "InternetGatewayList$member": null + } + }, + "InternetGatewayAttachment": { + "base": "

    Describes the attachment of a VPC to an Internet gateway.

    ", + "refs": { + "InternetGatewayAttachmentList$member": null + } + }, + "InternetGatewayAttachmentList": { + "base": null, + "refs": { + "InternetGateway$Attachments": "

    Any VPCs attached to the Internet gateway.

    " + } + }, + "InternetGatewayList": { + "base": null, + "refs": { + "DescribeInternetGatewaysResult$InternetGateways": "

    Information about one or more Internet gateways.

    " + } + }, + "IpPermission": { + "base": "

    Describes a security group rule.

    ", + "refs": { + "IpPermissionList$member": null + } + }, + "IpPermissionList": { + "base": null, + "refs": { + "AuthorizeSecurityGroupEgressRequest$IpPermissions": "

    A set of IP permissions. You can't specify a destination security group and a CIDR IP address range.

    ", + "AuthorizeSecurityGroupIngressRequest$IpPermissions": "

    A set of IP permissions. Can be used to specify multiple rules in a single command.

    ", + "RevokeSecurityGroupEgressRequest$IpPermissions": "

    A set of IP permissions. You can't specify a destination security group and a CIDR IP address range.

    ", + "RevokeSecurityGroupIngressRequest$IpPermissions": "

    A set of IP permissions. You can't specify a source security group and a CIDR IP address range.

    ", + "SecurityGroup$IpPermissions": "

    One or more inbound rules associated with the security group.

    ", + "SecurityGroup$IpPermissionsEgress": "

    [EC2-VPC] One or more outbound rules associated with the security group.

    " + } + }, + "IpRange": { + "base": "

    Describes an IP range.

    ", + "refs": { + "IpRangeList$member": null + } + }, + "IpRangeList": { + "base": null, + "refs": { + "IpPermission$IpRanges": "

    One or more IP ranges.

    " + } + }, + "IpRanges": { + "base": null, + "refs": { + "StaleIpPermission$IpRanges": "

    One or more IP ranges. Not applicable for stale security group rules.

    " + } + }, + "KeyNameStringList": { + "base": null, + "refs": { + "DescribeKeyPairsRequest$KeyNames": "

    One or more key pair names.

    Default: Describes all your key pairs.

    " + } + }, + "KeyPair": { + "base": "

    Describes a key pair.

    ", + "refs": { + } + }, + "KeyPairInfo": { + "base": "

    Describes a key pair.

    ", + "refs": { + "KeyPairList$member": null + } + }, + "KeyPairList": { + "base": null, + "refs": { + "DescribeKeyPairsResult$KeyPairs": "

    Information about one or more key pairs.

    " + } + }, + "LaunchPermission": { + "base": "

    Describes a launch permission.

    ", + "refs": { + "LaunchPermissionList$member": null + } + }, + "LaunchPermissionList": { + "base": null, + "refs": { + "ImageAttribute$LaunchPermissions": "

    One or more launch permissions.

    ", + "LaunchPermissionModifications$Add": "

    The AWS account ID to add to the list of launch permissions for the AMI.

    ", + "LaunchPermissionModifications$Remove": "

    The AWS account ID to remove from the list of launch permissions for the AMI.

    " + } + }, + "LaunchPermissionModifications": { + "base": "

    Describes a launch permission modification.

    ", + "refs": { + "ModifyImageAttributeRequest$LaunchPermission": "

    A launch permission modification.

    " + } + }, + "LaunchSpecification": { + "base": "

    Describes the launch specification for an instance.

    ", + "refs": { + "SpotInstanceRequest$LaunchSpecification": "

    Additional information for launching instances.

    " + } + }, + "LaunchSpecsList": { + "base": null, + "refs": { + "SpotFleetRequestConfigData$LaunchSpecifications": "

    Information about the launch specifications for the Spot fleet request.

    " + } + }, + "ListingState": { + "base": null, + "refs": { + "InstanceCount$State": "

    The states of the listed Reserved Instances.

    " + } + }, + "ListingStatus": { + "base": null, + "refs": { + "ReservedInstancesListing$Status": "

    The status of the Reserved Instance listing.

    " + } + }, + "Long": { + "base": null, + "refs": { + "DescribeReservedInstancesOfferingsRequest$MinDuration": "

    The minimum duration (in seconds) to filter when searching for offerings.

    Default: 2592000 (1 month)

    ", + "DescribeReservedInstancesOfferingsRequest$MaxDuration": "

    The maximum duration (in seconds) to filter when searching for offerings.

    Default: 94608000 (3 years)

    ", + "DiskImageDescription$Size": "

    The size of the disk image, in GiB.

    ", + "DiskImageDetail$Bytes": "

    The size of the disk image, in GiB.

    ", + "DiskImageVolumeDescription$Size": "

    The size of the volume, in GiB.

    ", + "ImportInstanceVolumeDetailItem$BytesConverted": "

    The number of bytes converted so far.

    ", + "ImportVolumeTaskDetails$BytesConverted": "

    The number of bytes converted so far.

    ", + "PriceSchedule$Term": "

    The number of months remaining in the reservation. For example, 2 is the second to the last month before the capacity reservation expires.

    ", + "PriceScheduleSpecification$Term": "

    The number of months remaining in the reservation. For example, 2 is the second to the last month before the capacity reservation expires.

    ", + "ReservedInstances$Duration": "

    The duration of the Reserved Instance, in seconds.

    ", + "ReservedInstancesOffering$Duration": "

    The duration of the Reserved Instance, in seconds.

    ", + "VolumeDetail$Size": "

    The size of the volume, in GiB.

    " + } + }, + "MaxResults": { + "base": null, + "refs": { + "DescribeStaleSecurityGroupsRequest$MaxResults": "

    The maximum number of items to return for this request. The request returns a token that you can specify in a subsequent call to get the next set of results.

    ", + "DescribeVpcClassicLinkDnsSupportRequest$MaxResults": "

    The maximum number of items to return for this request. The request returns a token that you can specify in a subsequent call to get the next set of results.

    " + } + }, + "ModifyHostsRequest": { + "base": "

    Contains the parameters for ModifyHosts.

    ", + "refs": { + } + }, + "ModifyHostsResult": { + "base": "

    Contains the output of ModifyHosts.

    ", + "refs": { + } + }, + "ModifyIdFormatRequest": { + "base": "

    Contains the parameters of ModifyIdFormat.

    ", + "refs": { + } + }, + "ModifyImageAttributeRequest": { + "base": "

    Contains the parameters for ModifyImageAttribute.

    ", + "refs": { + } + }, + "ModifyInstanceAttributeRequest": { + "base": "

    Contains the parameters for ModifyInstanceAttribute.

    ", + "refs": { + } + }, + "ModifyInstancePlacementRequest": { + "base": "

    Contains the parameters for ModifyInstancePlacement.

    ", + "refs": { + } + }, + "ModifyInstancePlacementResult": { + "base": "

    Contains the output of ModifyInstancePlacement.

    ", + "refs": { + } + }, + "ModifyNetworkInterfaceAttributeRequest": { + "base": "

    Contains the parameters for ModifyNetworkInterfaceAttribute.

    ", + "refs": { + } + }, + "ModifyReservedInstancesRequest": { + "base": "

    Contains the parameters for ModifyReservedInstances.

    ", + "refs": { + } + }, + "ModifyReservedInstancesResult": { + "base": "

    Contains the output of ModifyReservedInstances.

    ", + "refs": { + } + }, + "ModifySnapshotAttributeRequest": { + "base": "

    Contains the parameters for ModifySnapshotAttribute.

    ", + "refs": { + } + }, + "ModifySpotFleetRequestRequest": { + "base": "

    Contains the parameters for ModifySpotFleetRequest.

    ", + "refs": { + } + }, + "ModifySpotFleetRequestResponse": { + "base": "

    Contains the output of ModifySpotFleetRequest.

    ", + "refs": { + } + }, + "ModifySubnetAttributeRequest": { + "base": "

    Contains the parameters for ModifySubnetAttribute.

    ", + "refs": { + } + }, + "ModifyVolumeAttributeRequest": { + "base": "

    Contains the parameters for ModifyVolumeAttribute.

    ", + "refs": { + } + }, + "ModifyVpcAttributeRequest": { + "base": "

    Contains the parameters for ModifyVpcAttribute.

    ", + "refs": { + } + }, + "ModifyVpcEndpointRequest": { + "base": "

    Contains the parameters for ModifyVpcEndpoint.

    ", + "refs": { + } + }, + "ModifyVpcEndpointResult": { + "base": "

    Contains the output of ModifyVpcEndpoint.

    ", + "refs": { + } + }, + "ModifyVpcPeeringConnectionOptionsRequest": { + "base": null, + "refs": { + } + }, + "ModifyVpcPeeringConnectionOptionsResult": { + "base": null, + "refs": { + } + }, + "MonitorInstancesRequest": { + "base": "

    Contains the parameters for MonitorInstances.

    ", + "refs": { + } + }, + "MonitorInstancesResult": { + "base": "

    Contains the output of MonitorInstances.

    ", + "refs": { + } + }, + "Monitoring": { + "base": "

    Describes the monitoring for the instance.

    ", + "refs": { + "Instance$Monitoring": "

    The monitoring information for the instance.

    ", + "InstanceMonitoring$Monitoring": "

    The monitoring information.

    " + } + }, + "MonitoringState": { + "base": null, + "refs": { + "Monitoring$State": "

    Indicates whether monitoring is enabled for the instance.

    " + } + }, + "MoveAddressToVpcRequest": { + "base": "

    Contains the parameters for MoveAddressToVpc.

    ", + "refs": { + } + }, + "MoveAddressToVpcResult": { + "base": "

    Contains the output of MoveAddressToVpc.

    ", + "refs": { + } + }, + "MoveStatus": { + "base": null, + "refs": { + "MovingAddressStatus$MoveStatus": "

    The status of the Elastic IP address that's being moved to the EC2-VPC platform, or restored to the EC2-Classic platform.

    " + } + }, + "MovingAddressStatus": { + "base": "

    Describes the status of a moving Elastic IP address.

    ", + "refs": { + "MovingAddressStatusSet$member": null + } + }, + "MovingAddressStatusSet": { + "base": null, + "refs": { + "DescribeMovingAddressesResult$MovingAddressStatuses": "

    The status for each Elastic IP address.

    " + } + }, + "NatGateway": { + "base": "

    Describes a NAT gateway.

    ", + "refs": { + "CreateNatGatewayResult$NatGateway": "

    Information about the NAT gateway.

    ", + "NatGatewayList$member": null + } + }, + "NatGatewayAddress": { + "base": "

    Describes the IP addresses and network interface associated with a NAT gateway.

    ", + "refs": { + "NatGatewayAddressList$member": null + } + }, + "NatGatewayAddressList": { + "base": null, + "refs": { + "NatGateway$NatGatewayAddresses": "

    Information about the IP addresses and network interface associated with the NAT gateway.

    " + } + }, + "NatGatewayList": { + "base": null, + "refs": { + "DescribeNatGatewaysResult$NatGateways": "

    Information about the NAT gateways.

    " + } + }, + "NatGatewayState": { + "base": null, + "refs": { + "NatGateway$State": "

    The state of the NAT gateway.

    • pending: The NAT gateway is being created and is not ready to process traffic.

    • failed: The NAT gateway could not be created. Check the failureCode and failureMessage fields for the reason.

    • available: The NAT gateway is able to process traffic. This status remains until you delete the NAT gateway, and does not indicate the health of the NAT gateway.

    • deleting: The NAT gateway is in the process of being terminated and may still be processing traffic.

    • deleted: The NAT gateway has been terminated and is no longer processing traffic.

    " + } + }, + "NetworkAcl": { + "base": "

    Describes a network ACL.

    ", + "refs": { + "CreateNetworkAclResult$NetworkAcl": "

    Information about the network ACL.

    ", + "NetworkAclList$member": null + } + }, + "NetworkAclAssociation": { + "base": "

    Describes an association between a network ACL and a subnet.

    ", + "refs": { + "NetworkAclAssociationList$member": null + } + }, + "NetworkAclAssociationList": { + "base": null, + "refs": { + "NetworkAcl$Associations": "

    Any associations between the network ACL and one or more subnets

    " + } + }, + "NetworkAclEntry": { + "base": "

    Describes an entry in a network ACL.

    ", + "refs": { + "NetworkAclEntryList$member": null + } + }, + "NetworkAclEntryList": { + "base": null, + "refs": { + "NetworkAcl$Entries": "

    One or more entries (rules) in the network ACL.

    " + } + }, + "NetworkAclList": { + "base": null, + "refs": { + "DescribeNetworkAclsResult$NetworkAcls": "

    Information about one or more network ACLs.

    " + } + }, + "NetworkInterface": { + "base": "

    Describes a network interface.

    ", + "refs": { + "CreateNetworkInterfaceResult$NetworkInterface": "

    Information about the network interface.

    ", + "NetworkInterfaceList$member": null + } + }, + "NetworkInterfaceAssociation": { + "base": "

    Describes association information for an Elastic IP address.

    ", + "refs": { + "NetworkInterface$Association": "

    The association information for an Elastic IP associated with the network interface.

    ", + "NetworkInterfacePrivateIpAddress$Association": "

    The association information for an Elastic IP address associated with the network interface.

    " + } + }, + "NetworkInterfaceAttachment": { + "base": "

    Describes a network interface attachment.

    ", + "refs": { + "DescribeNetworkInterfaceAttributeResult$Attachment": "

    The attachment (if any) of the network interface.

    ", + "NetworkInterface$Attachment": "

    The network interface attachment.

    " + } + }, + "NetworkInterfaceAttachmentChanges": { + "base": "

    Describes an attachment change.

    ", + "refs": { + "ModifyNetworkInterfaceAttributeRequest$Attachment": "

    Information about the interface attachment. If modifying the 'delete on termination' attribute, you must specify the ID of the interface attachment.

    " + } + }, + "NetworkInterfaceAttribute": { + "base": null, + "refs": { + "DescribeNetworkInterfaceAttributeRequest$Attribute": "

    The attribute of the network interface.

    " + } + }, + "NetworkInterfaceIdList": { + "base": null, + "refs": { + "DescribeNetworkInterfacesRequest$NetworkInterfaceIds": "

    One or more network interface IDs.

    Default: Describes all your network interfaces.

    " + } + }, + "NetworkInterfaceList": { + "base": null, + "refs": { + "DescribeNetworkInterfacesResult$NetworkInterfaces": "

    Information about one or more network interfaces.

    " + } + }, + "NetworkInterfacePrivateIpAddress": { + "base": "

    Describes the private IP address of a network interface.

    ", + "refs": { + "NetworkInterfacePrivateIpAddressList$member": null + } + }, + "NetworkInterfacePrivateIpAddressList": { + "base": null, + "refs": { + "NetworkInterface$PrivateIpAddresses": "

    The private IP addresses associated with the network interface.

    " + } + }, + "NetworkInterfaceStatus": { + "base": null, + "refs": { + "InstanceNetworkInterface$Status": "

    The status of the network interface.

    ", + "NetworkInterface$Status": "

    The status of the network interface.

    " + } + }, + "NetworkInterfaceType": { + "base": null, + "refs": { + "NetworkInterface$InterfaceType": "

    The type of interface.

    " + } + }, + "NewDhcpConfiguration": { + "base": null, + "refs": { + "NewDhcpConfigurationList$member": null + } + }, + "NewDhcpConfigurationList": { + "base": null, + "refs": { + "CreateDhcpOptionsRequest$DhcpConfigurations": "

    A DHCP configuration option.

    " + } + }, + "NextToken": { + "base": null, + "refs": { + "DescribeStaleSecurityGroupsRequest$NextToken": "

    The token for the next set of items to return. (You received this token from a prior call.)

    ", + "DescribeVpcClassicLinkDnsSupportRequest$NextToken": "

    The token for the next set of items to return. (You received this token from a prior call.)

    ", + "DescribeVpcClassicLinkDnsSupportResult$NextToken": "

    The token to use when requesting the next set of items.

    " + } + }, + "OccurrenceDayRequestSet": { + "base": null, + "refs": { + "ScheduledInstanceRecurrenceRequest$OccurrenceDays": "

    The days. For a monthly schedule, this is one or more days of the month (1-31). For a weekly schedule, this is one or more days of the week (1-7, where 1 is Sunday). You can't specify this value with a daily schedule. If the occurrence is relative to the end of the month, you can specify only a single day.

    " + } + }, + "OccurrenceDaySet": { + "base": null, + "refs": { + "ScheduledInstanceRecurrence$OccurrenceDaySet": "

    The days. For a monthly schedule, this is one or more days of the month (1-31). For a weekly schedule, this is one or more days of the week (1-7, where 1 is Sunday).

    " + } + }, + "OfferingTypeValues": { + "base": null, + "refs": { + "DescribeReservedInstancesOfferingsRequest$OfferingType": "

    The Reserved Instance offering type. If you are using tools that predate the 2011-11-01 API version, you only have access to the Medium Utilization Reserved Instance offering type.

    ", + "DescribeReservedInstancesRequest$OfferingType": "

    The Reserved Instance offering type. If you are using tools that predate the 2011-11-01 API version, you only have access to the Medium Utilization Reserved Instance offering type.

    ", + "ReservedInstances$OfferingType": "

    The Reserved Instance offering type.

    ", + "ReservedInstancesOffering$OfferingType": "

    The Reserved Instance offering type.

    " + } + }, + "OperationType": { + "base": null, + "refs": { + "ModifyImageAttributeRequest$OperationType": "

    The operation type.

    ", + "ModifySnapshotAttributeRequest$OperationType": "

    The type of operation to perform to the attribute.

    " + } + }, + "OwnerStringList": { + "base": null, + "refs": { + "DescribeImagesRequest$Owners": "

    Filters the images by the owner. Specify an AWS account ID, amazon (owner is Amazon), aws-marketplace (owner is AWS Marketplace), self (owner is the sender of the request). Omitting this option returns all images for which you have launch permissions, regardless of ownership.

    ", + "DescribeSnapshotsRequest$OwnerIds": "

    Returns the snapshots owned by the specified owner. Multiple owners can be specified.

    " + } + }, + "PeeringConnectionOptions": { + "base": "

    Describes the VPC peering connection options.

    ", + "refs": { + "ModifyVpcPeeringConnectionOptionsResult$RequesterPeeringConnectionOptions": "

    Information about the VPC peering connection options for the requester VPC.

    ", + "ModifyVpcPeeringConnectionOptionsResult$AccepterPeeringConnectionOptions": "

    Information about the VPC peering connection options for the accepter VPC.

    " + } + }, + "PeeringConnectionOptionsRequest": { + "base": "

    The VPC peering connection options.

    ", + "refs": { + "ModifyVpcPeeringConnectionOptionsRequest$RequesterPeeringConnectionOptions": "

    The VPC peering connection options for the requester VPC.

    ", + "ModifyVpcPeeringConnectionOptionsRequest$AccepterPeeringConnectionOptions": "

    The VPC peering connection options for the accepter VPC.

    " + } + }, + "PermissionGroup": { + "base": null, + "refs": { + "CreateVolumePermission$Group": "

    The specific group that is to be added or removed from a volume's list of create volume permissions.

    ", + "LaunchPermission$Group": "

    The name of the group.

    " + } + }, + "Placement": { + "base": "

    Describes the placement for the instance.

    ", + "refs": { + "ImportInstanceLaunchSpecification$Placement": "

    The placement information for the instance.

    ", + "Instance$Placement": "

    The location where the instance launched, if applicable.

    ", + "RunInstancesRequest$Placement": "

    The placement for the instance.

    " + } + }, + "PlacementGroup": { + "base": "

    Describes a placement group.

    ", + "refs": { + "PlacementGroupList$member": null + } + }, + "PlacementGroupList": { + "base": null, + "refs": { + "DescribePlacementGroupsResult$PlacementGroups": "

    One or more placement groups.

    " + } + }, + "PlacementGroupState": { + "base": null, + "refs": { + "PlacementGroup$State": "

    The state of the placement group.

    " + } + }, + "PlacementGroupStringList": { + "base": null, + "refs": { + "DescribePlacementGroupsRequest$GroupNames": "

    One or more placement group names.

    Default: Describes all your placement groups, or only those otherwise specified.

    " + } + }, + "PlacementStrategy": { + "base": null, + "refs": { + "CreatePlacementGroupRequest$Strategy": "

    The placement strategy.

    ", + "PlacementGroup$Strategy": "

    The placement strategy.

    " + } + }, + "PlatformValues": { + "base": null, + "refs": { + "Image$Platform": "

    The value is Windows for Windows AMIs; otherwise blank.

    ", + "ImportInstanceRequest$Platform": "

    The instance operating system.

    ", + "ImportInstanceTaskDetails$Platform": "

    The instance operating system.

    ", + "Instance$Platform": "

    The value is Windows for Windows instances; otherwise blank.

    " + } + }, + "PortRange": { + "base": "

    Describes a range of ports.

    ", + "refs": { + "CreateNetworkAclEntryRequest$PortRange": "

    TCP or UDP protocols: The range of ports the rule applies to.

    ", + "NetworkAclEntry$PortRange": "

    TCP or UDP protocols: The range of ports the rule applies to.

    ", + "ReplaceNetworkAclEntryRequest$PortRange": "

    TCP or UDP protocols: The range of ports the rule applies to. Required if specifying 6 (TCP) or 17 (UDP) for the protocol.

    " + } + }, + "PrefixList": { + "base": "

    Describes prefixes for AWS services.

    ", + "refs": { + "PrefixListSet$member": null + } + }, + "PrefixListId": { + "base": "

    The ID of the prefix.

    ", + "refs": { + "PrefixListIdList$member": null + } + }, + "PrefixListIdList": { + "base": null, + "refs": { + "IpPermission$PrefixListIds": "

    (Valid for AuthorizeSecurityGroupEgress, RevokeSecurityGroupEgress and DescribeSecurityGroups only) One or more prefix list IDs for an AWS service. In an AuthorizeSecurityGroupEgress request, this is the AWS service that you want to access through a VPC endpoint from instances associated with the security group.

    " + } + }, + "PrefixListIdSet": { + "base": null, + "refs": { + "StaleIpPermission$PrefixListIds": "

    One or more prefix list IDs for an AWS service. Not applicable for stale security group rules.

    " + } + }, + "PrefixListSet": { + "base": null, + "refs": { + "DescribePrefixListsResult$PrefixLists": "

    All available prefix lists.

    " + } + }, + "PriceSchedule": { + "base": "

    Describes the price for a Reserved Instance.

    ", + "refs": { + "PriceScheduleList$member": null + } + }, + "PriceScheduleList": { + "base": null, + "refs": { + "ReservedInstancesListing$PriceSchedules": "

    The price of the Reserved Instance listing.

    " + } + }, + "PriceScheduleSpecification": { + "base": "

    Describes the price for a Reserved Instance.

    ", + "refs": { + "PriceScheduleSpecificationList$member": null + } + }, + "PriceScheduleSpecificationList": { + "base": null, + "refs": { + "CreateReservedInstancesListingRequest$PriceSchedules": "

    A list specifying the price of the Reserved Instance for each month remaining in the Reserved Instance term.

    " + } + }, + "PricingDetail": { + "base": "

    Describes a Reserved Instance offering.

    ", + "refs": { + "PricingDetailsList$member": null + } + }, + "PricingDetailsList": { + "base": null, + "refs": { + "ReservedInstancesOffering$PricingDetails": "

    The pricing details of the Reserved Instance offering.

    " + } + }, + "PrivateIpAddressConfigSet": { + "base": null, + "refs": { + "ScheduledInstancesNetworkInterface$PrivateIpAddressConfigs": "

    The private IP addresses.

    " + } + }, + "PrivateIpAddressSpecification": { + "base": "

    Describes a secondary private IP address for a network interface.

    ", + "refs": { + "PrivateIpAddressSpecificationList$member": null + } + }, + "PrivateIpAddressSpecificationList": { + "base": null, + "refs": { + "CreateNetworkInterfaceRequest$PrivateIpAddresses": "

    One or more private IP addresses.

    ", + "InstanceNetworkInterfaceSpecification$PrivateIpAddresses": "

    One or more private IP addresses to assign to the network interface. Only one private IP address can be designated as primary.

    " + } + }, + "PrivateIpAddressStringList": { + "base": null, + "refs": { + "AssignPrivateIpAddressesRequest$PrivateIpAddresses": "

    One or more IP addresses to be assigned as a secondary private IP address to the network interface. You can't specify this parameter when also specifying a number of secondary IP addresses.

    If you don't specify an IP address, Amazon EC2 automatically selects an IP address within the subnet range.

    ", + "UnassignPrivateIpAddressesRequest$PrivateIpAddresses": "

    The secondary private IP addresses to unassign from the network interface. You can specify this option multiple times to unassign more than one IP address.

    " + } + }, + "ProductCode": { + "base": "

    Describes a product code.

    ", + "refs": { + "ProductCodeList$member": null + } + }, + "ProductCodeList": { + "base": null, + "refs": { + "DescribeSnapshotAttributeResult$ProductCodes": "

    A list of product codes.

    ", + "DescribeVolumeAttributeResult$ProductCodes": "

    A list of product codes.

    ", + "Image$ProductCodes": "

    Any product codes associated with the AMI.

    ", + "ImageAttribute$ProductCodes": "

    One or more product codes.

    ", + "Instance$ProductCodes": "

    The product codes attached to this instance, if applicable.

    ", + "InstanceAttribute$ProductCodes": "

    A list of product codes.

    " + } + }, + "ProductCodeStringList": { + "base": null, + "refs": { + "ModifyImageAttributeRequest$ProductCodes": "

    One or more product codes. After you add a product code to an AMI, it can't be removed. This is only valid when modifying the productCodes attribute.

    " + } + }, + "ProductCodeValues": { + "base": null, + "refs": { + "ProductCode$ProductCodeType": "

    The type of product code.

    " + } + }, + "ProductDescriptionList": { + "base": null, + "refs": { + "DescribeSpotPriceHistoryRequest$ProductDescriptions": "

    Filters the results by the specified basic product descriptions.

    " + } + }, + "PropagatingVgw": { + "base": "

    Describes a virtual private gateway propagating route.

    ", + "refs": { + "PropagatingVgwList$member": null + } + }, + "PropagatingVgwList": { + "base": null, + "refs": { + "RouteTable$PropagatingVgws": "

    Any virtual private gateway (VGW) propagating routes.

    " + } + }, + "ProvisionedBandwidth": { + "base": "

    Reserved. If you need to sustain traffic greater than the documented limits, contact us through the Support Center.

    ", + "refs": { + "NatGateway$ProvisionedBandwidth": "

    Reserved. If you need to sustain traffic greater than the documented limits, contact us through the Support Center.

    " + } + }, + "PublicIpStringList": { + "base": null, + "refs": { + "DescribeAddressesRequest$PublicIps": "

    [EC2-Classic] One or more Elastic IP addresses.

    Default: Describes all your Elastic IP addresses.

    " + } + }, + "PurchaseRequest": { + "base": "

    Describes a request to purchase Scheduled Instances.

    ", + "refs": { + "PurchaseRequestSet$member": null + } + }, + "PurchaseRequestSet": { + "base": null, + "refs": { + "PurchaseScheduledInstancesRequest$PurchaseRequests": "

    One or more purchase requests.

    " + } + }, + "PurchaseReservedInstancesOfferingRequest": { + "base": "

    Contains the parameters for PurchaseReservedInstancesOffering.

    ", + "refs": { + } + }, + "PurchaseReservedInstancesOfferingResult": { + "base": "

    Contains the output of PurchaseReservedInstancesOffering.

    ", + "refs": { + } + }, + "PurchaseScheduledInstancesRequest": { + "base": "

    Contains the parameters for PurchaseScheduledInstances.

    ", + "refs": { + } + }, + "PurchaseScheduledInstancesResult": { + "base": "

    Contains the output of PurchaseScheduledInstances.

    ", + "refs": { + } + }, + "PurchasedScheduledInstanceSet": { + "base": null, + "refs": { + "PurchaseScheduledInstancesResult$ScheduledInstanceSet": "

    Information about the Scheduled Instances.

    " + } + }, + "RIProductDescription": { + "base": null, + "refs": { + "DescribeReservedInstancesOfferingsRequest$ProductDescription": "

    The Reserved Instance product platform description. Instances that include (Amazon VPC) in the description are for use with Amazon VPC.

    ", + "ReservedInstances$ProductDescription": "

    The Reserved Instance product platform description.

    ", + "ReservedInstancesOffering$ProductDescription": "

    The Reserved Instance product platform description.

    ", + "SpotInstanceRequest$ProductDescription": "

    The product description associated with the Spot instance.

    ", + "SpotPrice$ProductDescription": "

    A general description of the AMI.

    " + } + }, + "ReasonCodesList": { + "base": null, + "refs": { + "ReportInstanceStatusRequest$ReasonCodes": "

    One or more reason codes that describes the health state of your instance.

    • instance-stuck-in-state: My instance is stuck in a state.

    • unresponsive: My instance is unresponsive.

    • not-accepting-credentials: My instance is not accepting my credentials.

    • password-not-available: A password is not available for my instance.

    • performance-network: My instance is experiencing performance problems which I believe are network related.

    • performance-instance-store: My instance is experiencing performance problems which I believe are related to the instance stores.

    • performance-ebs-volume: My instance is experiencing performance problems which I believe are related to an EBS volume.

    • performance-other: My instance is experiencing performance problems.

    • other: [explain using the description parameter]

    " + } + }, + "RebootInstancesRequest": { + "base": "

    Contains the parameters for RebootInstances.

    ", + "refs": { + } + }, + "RecurringCharge": { + "base": "

    Describes a recurring charge.

    ", + "refs": { + "RecurringChargesList$member": null + } + }, + "RecurringChargeFrequency": { + "base": null, + "refs": { + "RecurringCharge$Frequency": "

    The frequency of the recurring charge.

    " + } + }, + "RecurringChargesList": { + "base": null, + "refs": { + "ReservedInstances$RecurringCharges": "

    The recurring charge tag assigned to the resource.

    ", + "ReservedInstancesOffering$RecurringCharges": "

    The recurring charge tag assigned to the resource.

    " + } + }, + "Region": { + "base": "

    Describes a region.

    ", + "refs": { + "RegionList$member": null + } + }, + "RegionList": { + "base": null, + "refs": { + "DescribeRegionsResult$Regions": "

    Information about one or more regions.

    " + } + }, + "RegionNameStringList": { + "base": null, + "refs": { + "DescribeRegionsRequest$RegionNames": "

    The names of one or more regions.

    " + } + }, + "RegisterImageRequest": { + "base": "

    Contains the parameters for RegisterImage.

    ", + "refs": { + } + }, + "RegisterImageResult": { + "base": "

    Contains the output of RegisterImage.

    ", + "refs": { + } + }, + "RejectVpcPeeringConnectionRequest": { + "base": "

    Contains the parameters for RejectVpcPeeringConnection.

    ", + "refs": { + } + }, + "RejectVpcPeeringConnectionResult": { + "base": "

    Contains the output of RejectVpcPeeringConnection.

    ", + "refs": { + } + }, + "ReleaseAddressRequest": { + "base": "

    Contains the parameters for ReleaseAddress.

    ", + "refs": { + } + }, + "ReleaseHostsRequest": { + "base": "

    Contains the parameters for ReleaseHosts.

    ", + "refs": { + } + }, + "ReleaseHostsResult": { + "base": "

    Contains the output of ReleaseHosts.

    ", + "refs": { + } + }, + "ReplaceNetworkAclAssociationRequest": { + "base": "

    Contains the parameters for ReplaceNetworkAclAssociation.

    ", + "refs": { + } + }, + "ReplaceNetworkAclAssociationResult": { + "base": "

    Contains the output of ReplaceNetworkAclAssociation.

    ", + "refs": { + } + }, + "ReplaceNetworkAclEntryRequest": { + "base": "

    Contains the parameters for ReplaceNetworkAclEntry.

    ", + "refs": { + } + }, + "ReplaceRouteRequest": { + "base": "

    Contains the parameters for ReplaceRoute.

    ", + "refs": { + } + }, + "ReplaceRouteTableAssociationRequest": { + "base": "

    Contains the parameters for ReplaceRouteTableAssociation.

    ", + "refs": { + } + }, + "ReplaceRouteTableAssociationResult": { + "base": "

    Contains the output of ReplaceRouteTableAssociation.

    ", + "refs": { + } + }, + "ReportInstanceReasonCodes": { + "base": null, + "refs": { + "ReasonCodesList$member": null + } + }, + "ReportInstanceStatusRequest": { + "base": "

    Contains the parameters for ReportInstanceStatus.

    ", + "refs": { + } + }, + "ReportStatusType": { + "base": null, + "refs": { + "ReportInstanceStatusRequest$Status": "

    The status of all instances listed.

    " + } + }, + "RequestHostIdList": { + "base": null, + "refs": { + "DescribeHostsRequest$HostIds": "

    The IDs of the Dedicated hosts. The IDs are used for targeted instance launches.

    ", + "ModifyHostsRequest$HostIds": "

    The host IDs of the Dedicated hosts you want to modify.

    ", + "ReleaseHostsRequest$HostIds": "

    The IDs of the Dedicated hosts you want to release.

    " + } + }, + "RequestSpotFleetRequest": { + "base": "

    Contains the parameters for RequestSpotFleet.

    ", + "refs": { + } + }, + "RequestSpotFleetResponse": { + "base": "

    Contains the output of RequestSpotFleet.

    ", + "refs": { + } + }, + "RequestSpotInstancesRequest": { + "base": "

    Contains the parameters for RequestSpotInstances.

    ", + "refs": { + } + }, + "RequestSpotInstancesResult": { + "base": "

    Contains the output of RequestSpotInstances.

    ", + "refs": { + } + }, + "RequestSpotLaunchSpecification": { + "base": "

    Describes the launch specification for an instance.

    ", + "refs": { + "RequestSpotInstancesRequest$LaunchSpecification": null + } + }, + "Reservation": { + "base": "

    Describes a reservation.

    ", + "refs": { + "ReservationList$member": null + } + }, + "ReservationList": { + "base": null, + "refs": { + "DescribeInstancesResult$Reservations": "

    Zero or more reservations.

    " + } + }, + "ReservedInstanceLimitPrice": { + "base": "

    Describes the limit price of a Reserved Instance offering.

    ", + "refs": { + "PurchaseReservedInstancesOfferingRequest$LimitPrice": "

    Specified for Reserved Instance Marketplace offerings to limit the total order and ensure that the Reserved Instances are not purchased at unexpected prices.

    " + } + }, + "ReservedInstanceState": { + "base": null, + "refs": { + "ReservedInstances$State": "

    The state of the Reserved Instance purchase.

    " + } + }, + "ReservedInstances": { + "base": "

    Describes a Reserved Instance.

    ", + "refs": { + "ReservedInstancesList$member": null + } + }, + "ReservedInstancesConfiguration": { + "base": "

    Describes the configuration settings for the modified Reserved Instances.

    ", + "refs": { + "ReservedInstancesConfigurationList$member": null, + "ReservedInstancesModificationResult$TargetConfiguration": "

    The target Reserved Instances configurations supplied as part of the modification request.

    " + } + }, + "ReservedInstancesConfigurationList": { + "base": null, + "refs": { + "ModifyReservedInstancesRequest$TargetConfigurations": "

    The configuration settings for the Reserved Instances to modify.

    " + } + }, + "ReservedInstancesId": { + "base": "

    Describes the ID of a Reserved Instance.

    ", + "refs": { + "ReservedIntancesIds$member": null + } + }, + "ReservedInstancesIdStringList": { + "base": null, + "refs": { + "DescribeReservedInstancesRequest$ReservedInstancesIds": "

    One or more Reserved Instance IDs.

    Default: Describes all your Reserved Instances, or only those otherwise specified.

    ", + "ModifyReservedInstancesRequest$ReservedInstancesIds": "

    The IDs of the Reserved Instances to modify.

    " + } + }, + "ReservedInstancesList": { + "base": null, + "refs": { + "DescribeReservedInstancesResult$ReservedInstances": "

    A list of Reserved Instances.

    " + } + }, + "ReservedInstancesListing": { + "base": "

    Describes a Reserved Instance listing.

    ", + "refs": { + "ReservedInstancesListingList$member": null + } + }, + "ReservedInstancesListingList": { + "base": null, + "refs": { + "CancelReservedInstancesListingResult$ReservedInstancesListings": "

    The Reserved Instance listing.

    ", + "CreateReservedInstancesListingResult$ReservedInstancesListings": "

    Information about the Reserved Instance listing.

    ", + "DescribeReservedInstancesListingsResult$ReservedInstancesListings": "

    Information about the Reserved Instance listing.

    " + } + }, + "ReservedInstancesModification": { + "base": "

    Describes a Reserved Instance modification.

    ", + "refs": { + "ReservedInstancesModificationList$member": null + } + }, + "ReservedInstancesModificationIdStringList": { + "base": null, + "refs": { + "DescribeReservedInstancesModificationsRequest$ReservedInstancesModificationIds": "

    IDs for the submitted modification request.

    " + } + }, + "ReservedInstancesModificationList": { + "base": null, + "refs": { + "DescribeReservedInstancesModificationsResult$ReservedInstancesModifications": "

    The Reserved Instance modification information.

    " + } + }, + "ReservedInstancesModificationResult": { + "base": "

    Describes the modification request/s.

    ", + "refs": { + "ReservedInstancesModificationResultList$member": null + } + }, + "ReservedInstancesModificationResultList": { + "base": null, + "refs": { + "ReservedInstancesModification$ModificationResults": "

    Contains target configurations along with their corresponding new Reserved Instance IDs.

    " + } + }, + "ReservedInstancesOffering": { + "base": "

    Describes a Reserved Instance offering.

    ", + "refs": { + "ReservedInstancesOfferingList$member": null + } + }, + "ReservedInstancesOfferingIdStringList": { + "base": null, + "refs": { + "DescribeReservedInstancesOfferingsRequest$ReservedInstancesOfferingIds": "

    One or more Reserved Instances offering IDs.

    " + } + }, + "ReservedInstancesOfferingList": { + "base": null, + "refs": { + "DescribeReservedInstancesOfferingsResult$ReservedInstancesOfferings": "

    A list of Reserved Instances offerings.

    " + } + }, + "ReservedIntancesIds": { + "base": null, + "refs": { + "ReservedInstancesModification$ReservedInstancesIds": "

    The IDs of one or more Reserved Instances.

    " + } + }, + "ResetImageAttributeName": { + "base": null, + "refs": { + "ResetImageAttributeRequest$Attribute": "

    The attribute to reset (currently you can only reset the launch permission attribute).

    " + } + }, + "ResetImageAttributeRequest": { + "base": "

    Contains the parameters for ResetImageAttribute.

    ", + "refs": { + } + }, + "ResetInstanceAttributeRequest": { + "base": "

    Contains the parameters for ResetInstanceAttribute.

    ", + "refs": { + } + }, + "ResetNetworkInterfaceAttributeRequest": { + "base": "

    Contains the parameters for ResetNetworkInterfaceAttribute.

    ", + "refs": { + } + }, + "ResetSnapshotAttributeRequest": { + "base": "

    Contains the parameters for ResetSnapshotAttribute.

    ", + "refs": { + } + }, + "ResourceIdList": { + "base": null, + "refs": { + "CreateTagsRequest$Resources": "

    The IDs of one or more resources to tag. For example, ami-1a2b3c4d.

    ", + "DeleteTagsRequest$Resources": "

    The ID of the resource. For example, ami-1a2b3c4d. You can specify more than one resource ID.

    " + } + }, + "ResourceType": { + "base": null, + "refs": { + "TagDescription$ResourceType": "

    The resource type.

    " + } + }, + "ResponseHostIdList": { + "base": null, + "refs": { + "AllocateHostsResult$HostIds": "

    The ID of the allocated Dedicated host. This is used when you want to launch an instance onto a specific host.

    ", + "ModifyHostsResult$Successful": "

    The IDs of the Dedicated hosts that were successfully modified.

    ", + "ReleaseHostsResult$Successful": "

    The IDs of the Dedicated hosts that were successfully released.

    " + } + }, + "RestorableByStringList": { + "base": null, + "refs": { + "DescribeSnapshotsRequest$RestorableByUserIds": "

    One or more AWS accounts IDs that can create volumes from the snapshot.

    " + } + }, + "RestoreAddressToClassicRequest": { + "base": "

    Contains the parameters for RestoreAddressToClassic.

    ", + "refs": { + } + }, + "RestoreAddressToClassicResult": { + "base": "

    Contains the output of RestoreAddressToClassic.

    ", + "refs": { + } + }, + "RevokeSecurityGroupEgressRequest": { + "base": "

    Contains the parameters for RevokeSecurityGroupEgress.

    ", + "refs": { + } + }, + "RevokeSecurityGroupIngressRequest": { + "base": "

    Contains the parameters for RevokeSecurityGroupIngress.

    ", + "refs": { + } + }, + "Route": { + "base": "

    Describes a route in a route table.

    ", + "refs": { + "RouteList$member": null + } + }, + "RouteList": { + "base": null, + "refs": { + "RouteTable$Routes": "

    The routes in the route table.

    " + } + }, + "RouteOrigin": { + "base": null, + "refs": { + "Route$Origin": "

    Describes how the route was created.

    • CreateRouteTable - The route was automatically created when the route table was created.

    • CreateRoute - The route was manually added to the route table.

    • EnableVgwRoutePropagation - The route was propagated by route propagation.

    " + } + }, + "RouteState": { + "base": null, + "refs": { + "Route$State": "

    The state of the route. The blackhole state indicates that the route's target isn't available (for example, the specified gateway isn't attached to the VPC, or the specified NAT instance has been terminated).

    " + } + }, + "RouteTable": { + "base": "

    Describes a route table.

    ", + "refs": { + "CreateRouteTableResult$RouteTable": "

    Information about the route table.

    ", + "RouteTableList$member": null + } + }, + "RouteTableAssociation": { + "base": "

    Describes an association between a route table and a subnet.

    ", + "refs": { + "RouteTableAssociationList$member": null + } + }, + "RouteTableAssociationList": { + "base": null, + "refs": { + "RouteTable$Associations": "

    The associations between the route table and one or more subnets.

    " + } + }, + "RouteTableList": { + "base": null, + "refs": { + "DescribeRouteTablesResult$RouteTables": "

    Information about one or more route tables.

    " + } + }, + "RuleAction": { + "base": null, + "refs": { + "CreateNetworkAclEntryRequest$RuleAction": "

    Indicates whether to allow or deny the traffic that matches the rule.

    ", + "NetworkAclEntry$RuleAction": "

    Indicates whether to allow or deny the traffic that matches the rule.

    ", + "ReplaceNetworkAclEntryRequest$RuleAction": "

    Indicates whether to allow or deny the traffic that matches the rule.

    " + } + }, + "RunInstancesMonitoringEnabled": { + "base": "

    Describes the monitoring for the instance.

    ", + "refs": { + "LaunchSpecification$Monitoring": null, + "RequestSpotLaunchSpecification$Monitoring": null, + "RunInstancesRequest$Monitoring": "

    The monitoring for the instance.

    " + } + }, + "RunInstancesRequest": { + "base": "

    Contains the parameters for RunInstances.

    ", + "refs": { + } + }, + "RunScheduledInstancesRequest": { + "base": "

    Contains the parameters for RunScheduledInstances.

    ", + "refs": { + } + }, + "RunScheduledInstancesResult": { + "base": "

    Contains the output of RunScheduledInstances.

    ", + "refs": { + } + }, + "S3Storage": { + "base": "

    Describes the storage parameters for S3 and S3 buckets for an instance store-backed AMI.

    ", + "refs": { + "Storage$S3": "

    An Amazon S3 storage location.

    " + } + }, + "ScheduledInstance": { + "base": "

    Describes a Scheduled Instance.

    ", + "refs": { + "PurchasedScheduledInstanceSet$member": null, + "ScheduledInstanceSet$member": null + } + }, + "ScheduledInstanceAvailability": { + "base": "

    Describes a schedule that is available for your Scheduled Instances.

    ", + "refs": { + "ScheduledInstanceAvailabilitySet$member": null + } + }, + "ScheduledInstanceAvailabilitySet": { + "base": null, + "refs": { + "DescribeScheduledInstanceAvailabilityResult$ScheduledInstanceAvailabilitySet": "

    Information about the available Scheduled Instances.

    " + } + }, + "ScheduledInstanceIdRequestSet": { + "base": null, + "refs": { + "DescribeScheduledInstancesRequest$ScheduledInstanceIds": "

    One or more Scheduled Instance IDs.

    " + } + }, + "ScheduledInstanceRecurrence": { + "base": "

    Describes the recurring schedule for a Scheduled Instance.

    ", + "refs": { + "ScheduledInstance$Recurrence": "

    The schedule recurrence.

    ", + "ScheduledInstanceAvailability$Recurrence": "

    The schedule recurrence.

    " + } + }, + "ScheduledInstanceRecurrenceRequest": { + "base": "

    Describes the recurring schedule for a Scheduled Instance.

    ", + "refs": { + "DescribeScheduledInstanceAvailabilityRequest$Recurrence": "

    The schedule recurrence.

    " + } + }, + "ScheduledInstanceSet": { + "base": null, + "refs": { + "DescribeScheduledInstancesResult$ScheduledInstanceSet": "

    Information about the Scheduled Instances.

    " + } + }, + "ScheduledInstancesBlockDeviceMapping": { + "base": "

    Describes a block device mapping for a Scheduled Instance.

    ", + "refs": { + "ScheduledInstancesBlockDeviceMappingSet$member": null + } + }, + "ScheduledInstancesBlockDeviceMappingSet": { + "base": null, + "refs": { + "ScheduledInstancesLaunchSpecification$BlockDeviceMappings": "

    One or more block device mapping entries.

    " + } + }, + "ScheduledInstancesEbs": { + "base": "

    Describes an EBS volume for a Scheduled Instance.

    ", + "refs": { + "ScheduledInstancesBlockDeviceMapping$Ebs": "

    Parameters used to set up EBS volumes automatically when the instance is launched.

    " + } + }, + "ScheduledInstancesIamInstanceProfile": { + "base": "

    Describes an IAM instance profile for a Scheduled Instance.

    ", + "refs": { + "ScheduledInstancesLaunchSpecification$IamInstanceProfile": "

    The IAM instance profile.

    " + } + }, + "ScheduledInstancesLaunchSpecification": { + "base": "

    Describes the launch specification for a Scheduled Instance.

    If you are launching the Scheduled Instance in EC2-VPC, you must specify the ID of the subnet. You can specify the subnet using either SubnetId or NetworkInterface.

    ", + "refs": { + "RunScheduledInstancesRequest$LaunchSpecification": "

    The launch specification.

    " + } + }, + "ScheduledInstancesMonitoring": { + "base": "

    Describes whether monitoring is enabled for a Scheduled Instance.

    ", + "refs": { + "ScheduledInstancesLaunchSpecification$Monitoring": "

    Enable or disable monitoring for the instances.

    " + } + }, + "ScheduledInstancesNetworkInterface": { + "base": "

    Describes a network interface for a Scheduled Instance.

    ", + "refs": { + "ScheduledInstancesNetworkInterfaceSet$member": null + } + }, + "ScheduledInstancesNetworkInterfaceSet": { + "base": null, + "refs": { + "ScheduledInstancesLaunchSpecification$NetworkInterfaces": "

    One or more network interfaces.

    " + } + }, + "ScheduledInstancesPlacement": { + "base": "

    Describes the placement for a Scheduled Instance.

    ", + "refs": { + "ScheduledInstancesLaunchSpecification$Placement": "

    The placement information.

    " + } + }, + "ScheduledInstancesPrivateIpAddressConfig": { + "base": "

    Describes a private IP address for a Scheduled Instance.

    ", + "refs": { + "PrivateIpAddressConfigSet$member": null + } + }, + "ScheduledInstancesSecurityGroupIdSet": { + "base": null, + "refs": { + "ScheduledInstancesLaunchSpecification$SecurityGroupIds": "

    The IDs of one or more security groups.

    ", + "ScheduledInstancesNetworkInterface$Groups": "

    The IDs of one or more security groups.

    " + } + }, + "SecurityGroup": { + "base": "

    Describes a security group

    ", + "refs": { + "SecurityGroupList$member": null + } + }, + "SecurityGroupIdStringList": { + "base": null, + "refs": { + "CreateNetworkInterfaceRequest$Groups": "

    The IDs of one or more security groups.

    ", + "ImportInstanceLaunchSpecification$GroupIds": "

    One or more security group IDs.

    ", + "InstanceNetworkInterfaceSpecification$Groups": "

    The IDs of the security groups for the network interface. Applies only if creating a network interface when launching an instance.

    ", + "ModifyNetworkInterfaceAttributeRequest$Groups": "

    Changes the security groups for the network interface. The new set of groups you specify replaces the current set. You must specify at least one group, even if it's just the default security group in the VPC. You must specify the ID of the security group, not the name.

    ", + "RunInstancesRequest$SecurityGroupIds": "

    One or more security group IDs. You can create a security group using CreateSecurityGroup.

    Default: Amazon EC2 uses the default security group.

    " + } + }, + "SecurityGroupList": { + "base": null, + "refs": { + "DescribeSecurityGroupsResult$SecurityGroups": "

    Information about one or more security groups.

    " + } + }, + "SecurityGroupReference": { + "base": "

    Describes a VPC with a security group that references your security group.

    ", + "refs": { + "SecurityGroupReferences$member": null + } + }, + "SecurityGroupReferences": { + "base": null, + "refs": { + "DescribeSecurityGroupReferencesResult$SecurityGroupReferenceSet": "

    Information about the VPCs with the referencing security groups.

    " + } + }, + "SecurityGroupStringList": { + "base": null, + "refs": { + "ImportInstanceLaunchSpecification$GroupNames": "

    One or more security group names.

    ", + "RunInstancesRequest$SecurityGroups": "

    [EC2-Classic, default VPC] One or more security group names. For a nondefault VPC, you must use security group IDs instead.

    Default: Amazon EC2 uses the default security group.

    " + } + }, + "ShutdownBehavior": { + "base": null, + "refs": { + "ImportInstanceLaunchSpecification$InstanceInitiatedShutdownBehavior": "

    Indicates whether an instance stops or terminates when you initiate shutdown from the instance (using the operating system command for system shutdown).

    ", + "RunInstancesRequest$InstanceInitiatedShutdownBehavior": "

    Indicates whether an instance stops or terminates when you initiate shutdown from the instance (using the operating system command for system shutdown).

    Default: stop

    " + } + }, + "SlotDateTimeRangeRequest": { + "base": "

    Describes the time period for a Scheduled Instance to start its first schedule. The time period must span less than one day.

    ", + "refs": { + "DescribeScheduledInstanceAvailabilityRequest$FirstSlotStartTimeRange": "

    The time period for the first schedule to start.

    " + } + }, + "SlotStartTimeRangeRequest": { + "base": "

    Describes the time period for a Scheduled Instance to start its first schedule.

    ", + "refs": { + "DescribeScheduledInstancesRequest$SlotStartTimeRange": "

    The time period for the first schedule to start.

    " + } + }, + "Snapshot": { + "base": "

    Describes a snapshot.

    ", + "refs": { + "SnapshotList$member": null + } + }, + "SnapshotAttributeName": { + "base": null, + "refs": { + "DescribeSnapshotAttributeRequest$Attribute": "

    The snapshot attribute you would like to view.

    ", + "ModifySnapshotAttributeRequest$Attribute": "

    The snapshot attribute to modify.

    Only volume creation permissions may be modified at the customer level.

    ", + "ResetSnapshotAttributeRequest$Attribute": "

    The attribute to reset. Currently, only the attribute for permission to create volumes can be reset.

    " + } + }, + "SnapshotDetail": { + "base": "

    Describes the snapshot created from the imported disk.

    ", + "refs": { + "SnapshotDetailList$member": null + } + }, + "SnapshotDetailList": { + "base": null, + "refs": { + "ImportImageResult$SnapshotDetails": "

    Information about the snapshots.

    ", + "ImportImageTask$SnapshotDetails": "

    Information about the snapshots.

    " + } + }, + "SnapshotDiskContainer": { + "base": "

    The disk container object for the import snapshot request.

    ", + "refs": { + "ImportSnapshotRequest$DiskContainer": "

    Information about the disk container.

    " + } + }, + "SnapshotIdStringList": { + "base": null, + "refs": { + "DescribeSnapshotsRequest$SnapshotIds": "

    One or more snapshot IDs.

    Default: Describes snapshots for which you have launch permissions.

    " + } + }, + "SnapshotList": { + "base": null, + "refs": { + "DescribeSnapshotsResult$Snapshots": "

    Information about the snapshots.

    " + } + }, + "SnapshotState": { + "base": null, + "refs": { + "Snapshot$State": "

    The snapshot state.

    " + } + }, + "SnapshotTaskDetail": { + "base": "

    Details about the import snapshot task.

    ", + "refs": { + "ImportSnapshotResult$SnapshotTaskDetail": "

    Information about the import snapshot task.

    ", + "ImportSnapshotTask$SnapshotTaskDetail": "

    Describes an import snapshot task.

    " + } + }, + "SpotDatafeedSubscription": { + "base": "

    Describes the data feed for a Spot instance.

    ", + "refs": { + "CreateSpotDatafeedSubscriptionResult$SpotDatafeedSubscription": "

    The Spot instance data feed subscription.

    ", + "DescribeSpotDatafeedSubscriptionResult$SpotDatafeedSubscription": "

    The Spot instance data feed subscription.

    " + } + }, + "SpotFleetLaunchSpecification": { + "base": "

    Describes the launch specification for one or more Spot instances.

    ", + "refs": { + "LaunchSpecsList$member": null + } + }, + "SpotFleetMonitoring": { + "base": "

    Describes whether monitoring is enabled.

    ", + "refs": { + "SpotFleetLaunchSpecification$Monitoring": "

    Enable or disable monitoring for the instances.

    " + } + }, + "SpotFleetRequestConfig": { + "base": "

    Describes a Spot fleet request.

    ", + "refs": { + "SpotFleetRequestConfigSet$member": null + } + }, + "SpotFleetRequestConfigData": { + "base": "

    Describes the configuration of a Spot fleet request.

    ", + "refs": { + "RequestSpotFleetRequest$SpotFleetRequestConfig": "

    The configuration for the Spot fleet request.

    ", + "SpotFleetRequestConfig$SpotFleetRequestConfig": "

    Information about the configuration of the Spot fleet request.

    " + } + }, + "SpotFleetRequestConfigSet": { + "base": null, + "refs": { + "DescribeSpotFleetRequestsResponse$SpotFleetRequestConfigs": "

    Information about the configuration of your Spot fleet.

    " + } + }, + "SpotInstanceRequest": { + "base": "

    Describes a Spot instance request.

    ", + "refs": { + "SpotInstanceRequestList$member": null + } + }, + "SpotInstanceRequestIdList": { + "base": null, + "refs": { + "CancelSpotInstanceRequestsRequest$SpotInstanceRequestIds": "

    One or more Spot instance request IDs.

    ", + "DescribeSpotInstanceRequestsRequest$SpotInstanceRequestIds": "

    One or more Spot instance request IDs.

    " + } + }, + "SpotInstanceRequestList": { + "base": null, + "refs": { + "DescribeSpotInstanceRequestsResult$SpotInstanceRequests": "

    One or more Spot instance requests.

    ", + "RequestSpotInstancesResult$SpotInstanceRequests": "

    One or more Spot instance requests.

    " + } + }, + "SpotInstanceState": { + "base": null, + "refs": { + "SpotInstanceRequest$State": "

    The state of the Spot instance request. Spot bid status information can help you track your Spot instance requests. For more information, see Spot Bid Status in the Amazon Elastic Compute Cloud User Guide.

    " + } + }, + "SpotInstanceStateFault": { + "base": "

    Describes a Spot instance state change.

    ", + "refs": { + "SpotDatafeedSubscription$Fault": "

    The fault codes for the Spot instance request, if any.

    ", + "SpotInstanceRequest$Fault": "

    The fault codes for the Spot instance request, if any.

    " + } + }, + "SpotInstanceStatus": { + "base": "

    Describes the status of a Spot instance request.

    ", + "refs": { + "SpotInstanceRequest$Status": "

    The status code and status message describing the Spot instance request.

    " + } + }, + "SpotInstanceType": { + "base": null, + "refs": { + "RequestSpotInstancesRequest$Type": "

    The Spot instance request type.

    Default: one-time

    ", + "SpotInstanceRequest$Type": "

    The Spot instance request type.

    " + } + }, + "SpotPlacement": { + "base": "

    Describes Spot instance placement.

    ", + "refs": { + "LaunchSpecification$Placement": "

    The placement information for the instance.

    ", + "RequestSpotLaunchSpecification$Placement": "

    The placement information for the instance.

    ", + "SpotFleetLaunchSpecification$Placement": "

    The placement information.

    " + } + }, + "SpotPrice": { + "base": "

    Describes the maximum hourly price (bid) for any Spot instance launched to fulfill the request.

    ", + "refs": { + "SpotPriceHistoryList$member": null + } + }, + "SpotPriceHistoryList": { + "base": null, + "refs": { + "DescribeSpotPriceHistoryResult$SpotPriceHistory": "

    The historical Spot prices.

    " + } + }, + "StaleIpPermission": { + "base": "

    Describes a stale rule in a security group.

    ", + "refs": { + "StaleIpPermissionSet$member": null + } + }, + "StaleIpPermissionSet": { + "base": null, + "refs": { + "StaleSecurityGroup$StaleIpPermissions": "

    Information about the stale inbound rules in the security group.

    ", + "StaleSecurityGroup$StaleIpPermissionsEgress": "

    Information about the stale outbound rules in the security group.

    " + } + }, + "StaleSecurityGroup": { + "base": "

    Describes a stale security group (a security group that contains stale rules).

    ", + "refs": { + "StaleSecurityGroupSet$member": null + } + }, + "StaleSecurityGroupSet": { + "base": null, + "refs": { + "DescribeStaleSecurityGroupsResult$StaleSecurityGroupSet": "

    Information about the stale security groups.

    " + } + }, + "StartInstancesRequest": { + "base": "

    Contains the parameters for StartInstances.

    ", + "refs": { + } + }, + "StartInstancesResult": { + "base": "

    Contains the output of StartInstances.

    ", + "refs": { + } + }, + "State": { + "base": null, + "refs": { + "VpcEndpoint$State": "

    The state of the VPC endpoint.

    " + } + }, + "StateReason": { + "base": "

    Describes a state change.

    ", + "refs": { + "Image$StateReason": "

    The reason for the state change.

    ", + "Instance$StateReason": "

    The reason for the most recent state transition.

    " + } + }, + "Status": { + "base": null, + "refs": { + "MoveAddressToVpcResult$Status": "

    The status of the move of the IP address.

    ", + "RestoreAddressToClassicResult$Status": "

    The move status for the IP address.

    " + } + }, + "StatusName": { + "base": null, + "refs": { + "InstanceStatusDetails$Name": "

    The type of instance status.

    " + } + }, + "StatusType": { + "base": null, + "refs": { + "InstanceStatusDetails$Status": "

    The status.

    " + } + }, + "StopInstancesRequest": { + "base": "

    Contains the parameters for StopInstances.

    ", + "refs": { + } + }, + "StopInstancesResult": { + "base": "

    Contains the output of StopInstances.

    ", + "refs": { + } + }, + "Storage": { + "base": "

    Describes the storage location for an instance store-backed AMI.

    ", + "refs": { + "BundleInstanceRequest$Storage": "

    The bucket in which to store the AMI. You can specify a bucket that you already own or a new bucket that Amazon EC2 creates on your behalf. If you specify a bucket that belongs to someone else, Amazon EC2 returns an error.

    ", + "BundleTask$Storage": "

    The Amazon S3 storage locations.

    " + } + }, + "String": { + "base": null, + "refs": { + "AcceptVpcPeeringConnectionRequest$VpcPeeringConnectionId": "

    The ID of the VPC peering connection.

    ", + "AccountAttribute$AttributeName": "

    The name of the account attribute.

    ", + "AccountAttributeValue$AttributeValue": "

    The value of the attribute.

    ", + "ActiveInstance$InstanceType": "

    The instance type.

    ", + "ActiveInstance$InstanceId": "

    The ID of the instance.

    ", + "ActiveInstance$SpotInstanceRequestId": "

    The ID of the Spot instance request.

    ", + "Address$InstanceId": "

    The ID of the instance that the address is associated with (if any).

    ", + "Address$PublicIp": "

    The Elastic IP address.

    ", + "Address$AllocationId": "

    The ID representing the allocation of the address for use with EC2-VPC.

    ", + "Address$AssociationId": "

    The ID representing the association of the address with an instance in a VPC.

    ", + "Address$NetworkInterfaceId": "

    The ID of the network interface.

    ", + "Address$NetworkInterfaceOwnerId": "

    The ID of the AWS account that owns the network interface.

    ", + "Address$PrivateIpAddress": "

    The private IP address associated with the Elastic IP address.

    ", + "AllocateAddressResult$PublicIp": "

    The Elastic IP address.

    ", + "AllocateAddressResult$AllocationId": "

    [EC2-VPC] The ID that AWS assigns to represent the allocation of the Elastic IP address for use with instances in a VPC.

    ", + "AllocateHostsRequest$ClientToken": "

    Unique, case-sensitive identifier you provide to ensure idempotency of the request. For more information, see How to Ensure Idempotency in the Amazon Elastic Compute Cloud User Guide.

    ", + "AllocateHostsRequest$InstanceType": "

    Specify the instance type that you want your Dedicated hosts to be configured for. When you specify the instance type, that is the only instance type that you can launch onto that host.

    ", + "AllocateHostsRequest$AvailabilityZone": "

    The Availability Zone for the Dedicated hosts.

    ", + "AllocationIdList$member": null, + "AssignPrivateIpAddressesRequest$NetworkInterfaceId": "

    The ID of the network interface.

    ", + "AssociateAddressRequest$InstanceId": "

    The ID of the instance. This is required for EC2-Classic. For EC2-VPC, you can specify either the instance ID or the network interface ID, but not both. The operation fails if you specify an instance ID unless exactly one network interface is attached.

    ", + "AssociateAddressRequest$PublicIp": "

    The Elastic IP address. This is required for EC2-Classic.

    ", + "AssociateAddressRequest$AllocationId": "

    [EC2-VPC] The allocation ID. This is required for EC2-VPC.

    ", + "AssociateAddressRequest$NetworkInterfaceId": "

    [EC2-VPC] The ID of the network interface. If the instance has more than one network interface, you must specify a network interface ID.

    ", + "AssociateAddressRequest$PrivateIpAddress": "

    [EC2-VPC] The primary or secondary private IP address to associate with the Elastic IP address. If no private IP address is specified, the Elastic IP address is associated with the primary private IP address.

    ", + "AssociateAddressResult$AssociationId": "

    [EC2-VPC] The ID that represents the association of the Elastic IP address with an instance.

    ", + "AssociateDhcpOptionsRequest$DhcpOptionsId": "

    The ID of the DHCP options set, or default to associate no DHCP options with the VPC.

    ", + "AssociateDhcpOptionsRequest$VpcId": "

    The ID of the VPC.

    ", + "AssociateRouteTableRequest$SubnetId": "

    The ID of the subnet.

    ", + "AssociateRouteTableRequest$RouteTableId": "

    The ID of the route table.

    ", + "AssociateRouteTableResult$AssociationId": "

    The route table association ID (needed to disassociate the route table).

    ", + "AttachClassicLinkVpcRequest$InstanceId": "

    The ID of an EC2-Classic instance to link to the ClassicLink-enabled VPC.

    ", + "AttachClassicLinkVpcRequest$VpcId": "

    The ID of a ClassicLink-enabled VPC.

    ", + "AttachInternetGatewayRequest$InternetGatewayId": "

    The ID of the Internet gateway.

    ", + "AttachInternetGatewayRequest$VpcId": "

    The ID of the VPC.

    ", + "AttachNetworkInterfaceRequest$NetworkInterfaceId": "

    The ID of the network interface.

    ", + "AttachNetworkInterfaceRequest$InstanceId": "

    The ID of the instance.

    ", + "AttachNetworkInterfaceResult$AttachmentId": "

    The ID of the network interface attachment.

    ", + "AttachVolumeRequest$VolumeId": "

    The ID of the EBS volume. The volume and instance must be within the same Availability Zone.

    ", + "AttachVolumeRequest$InstanceId": "

    The ID of the instance.

    ", + "AttachVolumeRequest$Device": "

    The device name to expose to the instance (for example, /dev/sdh or xvdh).

    ", + "AttachVpnGatewayRequest$VpnGatewayId": "

    The ID of the virtual private gateway.

    ", + "AttachVpnGatewayRequest$VpcId": "

    The ID of the VPC.

    ", + "AttributeValue$Value": "

    Valid values are case-sensitive and vary by action.

    ", + "AuthorizeSecurityGroupEgressRequest$GroupId": "

    The ID of the security group.

    ", + "AuthorizeSecurityGroupEgressRequest$SourceSecurityGroupName": "

    The name of a destination security group. To authorize outbound access to a destination security group, we recommend that you use a set of IP permissions instead.

    ", + "AuthorizeSecurityGroupEgressRequest$SourceSecurityGroupOwnerId": "

    The AWS account number for a destination security group. To authorize outbound access to a destination security group, we recommend that you use a set of IP permissions instead.

    ", + "AuthorizeSecurityGroupEgressRequest$IpProtocol": "

    The IP protocol name or number. We recommend that you specify the protocol in a set of IP permissions instead.

    ", + "AuthorizeSecurityGroupEgressRequest$CidrIp": "

    The CIDR IP address range. We recommend that you specify the CIDR range in a set of IP permissions instead.

    ", + "AuthorizeSecurityGroupIngressRequest$GroupName": "

    [EC2-Classic, default VPC] The name of the security group.

    ", + "AuthorizeSecurityGroupIngressRequest$GroupId": "

    The ID of the security group. Required for a nondefault VPC.

    ", + "AuthorizeSecurityGroupIngressRequest$SourceSecurityGroupName": "

    [EC2-Classic, default VPC] The name of the source security group. You can't specify this parameter in combination with the following parameters: the CIDR IP address range, the start of the port range, the IP protocol, and the end of the port range. Creates rules that grant full ICMP, UDP, and TCP access. To create a rule with a specific IP protocol and port range, use a set of IP permissions instead. For EC2-VPC, the source security group must be in the same VPC.

    ", + "AuthorizeSecurityGroupIngressRequest$SourceSecurityGroupOwnerId": "

    [EC2-Classic] The AWS account number for the source security group, if the source security group is in a different account. You can't specify this parameter in combination with the following parameters: the CIDR IP address range, the IP protocol, the start of the port range, and the end of the port range. Creates rules that grant full ICMP, UDP, and TCP access. To create a rule with a specific IP protocol and port range, use a set of IP permissions instead.

    ", + "AuthorizeSecurityGroupIngressRequest$IpProtocol": "

    The IP protocol name (tcp, udp, icmp) or number (see Protocol Numbers). (VPC only) Use -1 to specify all.

    ", + "AuthorizeSecurityGroupIngressRequest$CidrIp": "

    The CIDR IP address range. You can't specify this parameter when specifying a source security group.

    ", + "AvailabilityZone$ZoneName": "

    The name of the Availability Zone.

    ", + "AvailabilityZone$RegionName": "

    The name of the region.

    ", + "AvailabilityZoneMessage$Message": "

    The message about the Availability Zone.

    ", + "BlockDeviceMapping$VirtualName": "

    The virtual device name (ephemeralN). Instance store volumes are numbered starting from 0. An instance type with 2 available instance store volumes can specify mappings for ephemeral0 and ephemeral1.The number of available instance store volumes depends on the instance type. After you connect to the instance, you must mount the volume.

    Constraints: For M3 instances, you must specify instance store volumes in the block device mapping for the instance. When you launch an M3 instance, we ignore any instance store volumes specified in the block device mapping for the AMI.

    ", + "BlockDeviceMapping$DeviceName": "

    The device name exposed to the instance (for example, /dev/sdh or xvdh).

    ", + "BlockDeviceMapping$NoDevice": "

    Suppresses the specified device included in the block device mapping of the AMI.

    ", + "BundleIdStringList$member": null, + "BundleInstanceRequest$InstanceId": "

    The ID of the instance to bundle.

    Type: String

    Default: None

    Required: Yes

    ", + "BundleTask$InstanceId": "

    The ID of the instance associated with this bundle task.

    ", + "BundleTask$BundleId": "

    The ID of the bundle task.

    ", + "BundleTask$Progress": "

    The level of task completion, as a percent (for example, 20%).

    ", + "BundleTaskError$Code": "

    The error code.

    ", + "BundleTaskError$Message": "

    The error message.

    ", + "CancelBundleTaskRequest$BundleId": "

    The ID of the bundle task.

    ", + "CancelConversionRequest$ConversionTaskId": "

    The ID of the conversion task.

    ", + "CancelConversionRequest$ReasonMessage": "

    The reason for canceling the conversion task.

    ", + "CancelExportTaskRequest$ExportTaskId": "

    The ID of the export task. This is the ID returned by CreateInstanceExportTask.

    ", + "CancelImportTaskRequest$ImportTaskId": "

    The ID of the import image or import snapshot task to be canceled.

    ", + "CancelImportTaskRequest$CancelReason": "

    The reason for canceling the task.

    ", + "CancelImportTaskResult$ImportTaskId": "

    The ID of the task being canceled.

    ", + "CancelImportTaskResult$State": "

    The current state of the task being canceled.

    ", + "CancelImportTaskResult$PreviousState": "

    The current state of the task being canceled.

    ", + "CancelReservedInstancesListingRequest$ReservedInstancesListingId": "

    The ID of the Reserved Instance listing.

    ", + "CancelSpotFleetRequestsError$Message": "

    The description for the error code.

    ", + "CancelSpotFleetRequestsErrorItem$SpotFleetRequestId": "

    The ID of the Spot fleet request.

    ", + "CancelSpotFleetRequestsSuccessItem$SpotFleetRequestId": "

    The ID of the Spot fleet request.

    ", + "CancelledSpotInstanceRequest$SpotInstanceRequestId": "

    The ID of the Spot instance request.

    ", + "ClassicLinkDnsSupport$VpcId": "

    The ID of the VPC.

    ", + "ClassicLinkInstance$InstanceId": "

    The ID of the instance.

    ", + "ClassicLinkInstance$VpcId": "

    The ID of the VPC.

    ", + "ClientData$Comment": "

    A user-defined comment about the disk upload.

    ", + "ConfirmProductInstanceRequest$ProductCode": "

    The product code. This must be a product code that you own.

    ", + "ConfirmProductInstanceRequest$InstanceId": "

    The ID of the instance.

    ", + "ConfirmProductInstanceResult$OwnerId": "

    The AWS account ID of the instance owner. This is only present if the product code is attached to the instance.

    ", + "ConversionIdStringList$member": null, + "ConversionTask$ConversionTaskId": "

    The ID of the conversion task.

    ", + "ConversionTask$ExpirationTime": "

    The time when the task expires. If the upload isn't complete before the expiration time, we automatically cancel the task.

    ", + "ConversionTask$StatusMessage": "

    The status message related to the conversion task.

    ", + "CopyImageRequest$SourceRegion": "

    The name of the region that contains the AMI to copy.

    ", + "CopyImageRequest$SourceImageId": "

    The ID of the AMI to copy.

    ", + "CopyImageRequest$Name": "

    The name of the new AMI in the destination region.

    ", + "CopyImageRequest$Description": "

    A description for the new AMI in the destination region.

    ", + "CopyImageRequest$ClientToken": "

    Unique, case-sensitive identifier you provide to ensure idempotency of the request. For more information, see How to Ensure Idempotency in the Amazon Elastic Compute Cloud User Guide.

    ", + "CopyImageRequest$KmsKeyId": "

    The full ARN of the AWS Key Management Service (AWS KMS) CMK to use when encrypting the snapshots of an image during a copy operation. This parameter is only required if you want to use a non-default CMK; if this parameter is not specified, the default CMK for EBS is used. The ARN contains the arn:aws:kms namespace, followed by the region of the CMK, the AWS account ID of the CMK owner, the key namespace, and then the CMK ID. For example, arn:aws:kms:us-east-1:012345678910:key/abcd1234-a123-456a-a12b-a123b4cd56ef. The specified CMK must exist in the region that the snapshot is being copied to. If a KmsKeyId is specified, the Encrypted flag must also be set.

    ", + "CopyImageResult$ImageId": "

    The ID of the new AMI.

    ", + "CopySnapshotRequest$SourceRegion": "

    The ID of the region that contains the snapshot to be copied.

    ", + "CopySnapshotRequest$SourceSnapshotId": "

    The ID of the EBS snapshot to copy.

    ", + "CopySnapshotRequest$Description": "

    A description for the EBS snapshot.

    ", + "CopySnapshotRequest$DestinationRegion": "

    The destination region to use in the PresignedUrl parameter of a snapshot copy operation. This parameter is only valid for specifying the destination region in a PresignedUrl parameter, where it is required.

    CopySnapshot sends the snapshot copy to the regional endpoint that you send the HTTP request to, such as ec2.us-east-1.amazonaws.com (in the AWS CLI, this is specified with the --region parameter or the default region in your AWS configuration file).

    ", + "CopySnapshotRequest$PresignedUrl": "

    The pre-signed URL that facilitates copying an encrypted snapshot. This parameter is only required when copying an encrypted snapshot with the Amazon EC2 Query API; it is available as an optional parameter in all other cases. The PresignedUrl should use the snapshot source endpoint, the CopySnapshot action, and include the SourceRegion, SourceSnapshotId, and DestinationRegion parameters. The PresignedUrl must be signed using AWS Signature Version 4. Because EBS snapshots are stored in Amazon S3, the signing algorithm for this parameter uses the same logic that is described in Authenticating Requests by Using Query Parameters (AWS Signature Version 4) in the Amazon Simple Storage Service API Reference. An invalid or improperly signed PresignedUrl will cause the copy operation to fail asynchronously, and the snapshot will move to an error state.

    ", + "CopySnapshotRequest$KmsKeyId": "

    The full ARN of the AWS Key Management Service (AWS KMS) CMK to use when creating the snapshot copy. This parameter is only required if you want to use a non-default CMK; if this parameter is not specified, the default CMK for EBS is used. The ARN contains the arn:aws:kms namespace, followed by the region of the CMK, the AWS account ID of the CMK owner, the key namespace, and then the CMK ID. For example, arn:aws:kms:us-east-1:012345678910:key/abcd1234-a123-456a-a12b-a123b4cd56ef. The specified CMK must exist in the region that the snapshot is being copied to. If a KmsKeyId is specified, the Encrypted flag must also be set.

    ", + "CopySnapshotResult$SnapshotId": "

    The ID of the new snapshot.

    ", + "CreateCustomerGatewayRequest$PublicIp": "

    The Internet-routable IP address for the customer gateway's outside interface. The address must be static.

    ", + "CreateFlowLogsRequest$LogGroupName": "

    The name of the CloudWatch log group.

    ", + "CreateFlowLogsRequest$DeliverLogsPermissionArn": "

    The ARN for the IAM role that's used to post flow logs to a CloudWatch Logs log group.

    ", + "CreateFlowLogsRequest$ClientToken": "

    Unique, case-sensitive identifier you provide to ensure the idempotency of the request. For more information, see How to Ensure Idempotency.

    ", + "CreateFlowLogsResult$ClientToken": "

    Unique, case-sensitive identifier you provide to ensure the idempotency of the request.

    ", + "CreateImageRequest$InstanceId": "

    The ID of the instance.

    ", + "CreateImageRequest$Name": "

    A name for the new image.

    Constraints: 3-128 alphanumeric characters, parentheses (()), square brackets ([]), spaces ( ), periods (.), slashes (/), dashes (-), single quotes ('), at-signs (@), or underscores(_)

    ", + "CreateImageRequest$Description": "

    A description for the new image.

    ", + "CreateImageResult$ImageId": "

    The ID of the new AMI.

    ", + "CreateInstanceExportTaskRequest$Description": "

    A description for the conversion task or the resource being exported. The maximum length is 255 bytes.

    ", + "CreateInstanceExportTaskRequest$InstanceId": "

    The ID of the instance.

    ", + "CreateKeyPairRequest$KeyName": "

    A unique name for the key pair.

    Constraints: Up to 255 ASCII characters

    ", + "CreateNatGatewayRequest$SubnetId": "

    The subnet in which to create the NAT gateway.

    ", + "CreateNatGatewayRequest$AllocationId": "

    The allocation ID of an Elastic IP address to associate with the NAT gateway. If the Elastic IP address is associated with another resource, you must first disassociate it.

    ", + "CreateNatGatewayRequest$ClientToken": "

    Unique, case-sensitive identifier you provide to ensure the idempotency of the request. For more information, see How to Ensure Idempotency.

    Constraint: Maximum 64 ASCII characters.

    ", + "CreateNatGatewayResult$ClientToken": "

    Unique, case-sensitive identifier to ensure the idempotency of the request. Only returned if a client token was provided in the request.

    ", + "CreateNetworkAclEntryRequest$NetworkAclId": "

    The ID of the network ACL.

    ", + "CreateNetworkAclEntryRequest$Protocol": "

    The protocol. A value of -1 means all protocols.

    ", + "CreateNetworkAclEntryRequest$CidrBlock": "

    The network range to allow or deny, in CIDR notation (for example 172.16.0.0/24).

    ", + "CreateNetworkAclRequest$VpcId": "

    The ID of the VPC.

    ", + "CreateNetworkInterfaceRequest$SubnetId": "

    The ID of the subnet to associate with the network interface.

    ", + "CreateNetworkInterfaceRequest$Description": "

    A description for the network interface.

    ", + "CreateNetworkInterfaceRequest$PrivateIpAddress": "

    The primary private IP address of the network interface. If you don't specify an IP address, Amazon EC2 selects one for you from the subnet range. If you specify an IP address, you cannot indicate any IP addresses specified in privateIpAddresses as primary (only one IP address can be designated as primary).

    ", + "CreatePlacementGroupRequest$GroupName": "

    A name for the placement group.

    Constraints: Up to 255 ASCII characters

    ", + "CreateReservedInstancesListingRequest$ReservedInstancesId": "

    The ID of the active Reserved Instance.

    ", + "CreateReservedInstancesListingRequest$ClientToken": "

    Unique, case-sensitive identifier you provide to ensure idempotency of your listings. This helps avoid duplicate listings. For more information, see Ensuring Idempotency.

    ", + "CreateRouteRequest$RouteTableId": "

    The ID of the route table for the route.

    ", + "CreateRouteRequest$DestinationCidrBlock": "

    The CIDR address block used for the destination match. Routing decisions are based on the most specific match.

    ", + "CreateRouteRequest$GatewayId": "

    The ID of an Internet gateway or virtual private gateway attached to your VPC.

    ", + "CreateRouteRequest$InstanceId": "

    The ID of a NAT instance in your VPC. The operation fails if you specify an instance ID unless exactly one network interface is attached.

    ", + "CreateRouteRequest$NetworkInterfaceId": "

    The ID of a network interface.

    ", + "CreateRouteRequest$VpcPeeringConnectionId": "

    The ID of a VPC peering connection.

    ", + "CreateRouteRequest$NatGatewayId": "

    The ID of a NAT gateway.

    ", + "CreateRouteTableRequest$VpcId": "

    The ID of the VPC.

    ", + "CreateSecurityGroupRequest$GroupName": "

    The name of the security group.

    Constraints: Up to 255 characters in length

    Constraints for EC2-Classic: ASCII characters

    Constraints for EC2-VPC: a-z, A-Z, 0-9, spaces, and ._-:/()#,@[]+=;{}!$*

    ", + "CreateSecurityGroupRequest$Description": "

    A description for the security group. This is informational only.

    Constraints: Up to 255 characters in length

    Constraints for EC2-Classic: ASCII characters

    Constraints for EC2-VPC: a-z, A-Z, 0-9, spaces, and ._-:/()#,@[]+=;{}!$*

    ", + "CreateSecurityGroupRequest$VpcId": "

    [EC2-VPC] The ID of the VPC. Required for EC2-VPC.

    ", + "CreateSecurityGroupResult$GroupId": "

    The ID of the security group.

    ", + "CreateSnapshotRequest$VolumeId": "

    The ID of the EBS volume.

    ", + "CreateSnapshotRequest$Description": "

    A description for the snapshot.

    ", + "CreateSpotDatafeedSubscriptionRequest$Bucket": "

    The Amazon S3 bucket in which to store the Spot instance data feed.

    ", + "CreateSpotDatafeedSubscriptionRequest$Prefix": "

    A prefix for the data feed file names.

    ", + "CreateSubnetRequest$VpcId": "

    The ID of the VPC.

    ", + "CreateSubnetRequest$CidrBlock": "

    The network range for the subnet, in CIDR notation. For example, 10.0.0.0/24.

    ", + "CreateSubnetRequest$AvailabilityZone": "

    The Availability Zone for the subnet.

    Default: AWS selects one for you. If you create more than one subnet in your VPC, we may not necessarily select a different zone for each subnet.

    ", + "CreateVolumePermission$UserId": "

    The specific AWS account ID that is to be added or removed from a volume's list of create volume permissions.

    ", + "CreateVolumeRequest$SnapshotId": "

    The snapshot from which to create the volume.

    ", + "CreateVolumeRequest$AvailabilityZone": "

    The Availability Zone in which to create the volume. Use DescribeAvailabilityZones to list the Availability Zones that are currently available to you.

    ", + "CreateVolumeRequest$KmsKeyId": "

    The full ARN of the AWS Key Management Service (AWS KMS) customer master key (CMK) to use when creating the encrypted volume. This parameter is only required if you want to use a non-default CMK; if this parameter is not specified, the default CMK for EBS is used. The ARN contains the arn:aws:kms namespace, followed by the region of the CMK, the AWS account ID of the CMK owner, the key namespace, and then the CMK ID. For example, arn:aws:kms:us-east-1:012345678910:key/abcd1234-a123-456a-a12b-a123b4cd56ef. If a KmsKeyId is specified, the Encrypted flag must also be set.

    ", + "CreateVpcEndpointRequest$VpcId": "

    The ID of the VPC in which the endpoint will be used.

    ", + "CreateVpcEndpointRequest$ServiceName": "

    The AWS service name, in the form com.amazonaws.region.service. To get a list of available services, use the DescribeVpcEndpointServices request.

    ", + "CreateVpcEndpointRequest$PolicyDocument": "

    A policy to attach to the endpoint that controls access to the service. The policy must be in valid JSON format. If this parameter is not specified, we attach a default policy that allows full access to the service.

    ", + "CreateVpcEndpointRequest$ClientToken": "

    Unique, case-sensitive identifier you provide to ensure the idempotency of the request. For more information, see How to Ensure Idempotency.

    ", + "CreateVpcEndpointResult$ClientToken": "

    Unique, case-sensitive identifier you provide to ensure the idempotency of the request.

    ", + "CreateVpcPeeringConnectionRequest$VpcId": "

    The ID of the requester VPC.

    ", + "CreateVpcPeeringConnectionRequest$PeerVpcId": "

    The ID of the VPC with which you are creating the VPC peering connection.

    ", + "CreateVpcPeeringConnectionRequest$PeerOwnerId": "

    The AWS account ID of the owner of the peer VPC.

    Default: Your AWS account ID

    ", + "CreateVpcRequest$CidrBlock": "

    The network range for the VPC, in CIDR notation. For example, 10.0.0.0/16.

    ", + "CreateVpnConnectionRequest$Type": "

    The type of VPN connection (ipsec.1).

    ", + "CreateVpnConnectionRequest$CustomerGatewayId": "

    The ID of the customer gateway.

    ", + "CreateVpnConnectionRequest$VpnGatewayId": "

    The ID of the virtual private gateway.

    ", + "CreateVpnConnectionRouteRequest$VpnConnectionId": "

    The ID of the VPN connection.

    ", + "CreateVpnConnectionRouteRequest$DestinationCidrBlock": "

    The CIDR block associated with the local subnet of the customer network.

    ", + "CreateVpnGatewayRequest$AvailabilityZone": "

    The Availability Zone for the virtual private gateway.

    ", + "CustomerGateway$CustomerGatewayId": "

    The ID of the customer gateway.

    ", + "CustomerGateway$State": "

    The current state of the customer gateway (pending | available | deleting | deleted).

    ", + "CustomerGateway$Type": "

    The type of VPN connection the customer gateway supports (ipsec.1).

    ", + "CustomerGateway$IpAddress": "

    The Internet-routable IP address of the customer gateway's outside interface.

    ", + "CustomerGateway$BgpAsn": "

    The customer gateway's Border Gateway Protocol (BGP) Autonomous System Number (ASN).

    ", + "CustomerGatewayIdStringList$member": null, + "DeleteCustomerGatewayRequest$CustomerGatewayId": "

    The ID of the customer gateway.

    ", + "DeleteDhcpOptionsRequest$DhcpOptionsId": "

    The ID of the DHCP options set.

    ", + "DeleteInternetGatewayRequest$InternetGatewayId": "

    The ID of the Internet gateway.

    ", + "DeleteKeyPairRequest$KeyName": "

    The name of the key pair.

    ", + "DeleteNatGatewayRequest$NatGatewayId": "

    The ID of the NAT gateway.

    ", + "DeleteNatGatewayResult$NatGatewayId": "

    The ID of the NAT gateway.

    ", + "DeleteNetworkAclEntryRequest$NetworkAclId": "

    The ID of the network ACL.

    ", + "DeleteNetworkAclRequest$NetworkAclId": "

    The ID of the network ACL.

    ", + "DeleteNetworkInterfaceRequest$NetworkInterfaceId": "

    The ID of the network interface.

    ", + "DeletePlacementGroupRequest$GroupName": "

    The name of the placement group.

    ", + "DeleteRouteRequest$RouteTableId": "

    The ID of the route table.

    ", + "DeleteRouteRequest$DestinationCidrBlock": "

    The CIDR range for the route. The value you specify must match the CIDR for the route exactly.

    ", + "DeleteRouteTableRequest$RouteTableId": "

    The ID of the route table.

    ", + "DeleteSecurityGroupRequest$GroupName": "

    [EC2-Classic, default VPC] The name of the security group. You can specify either the security group name or the security group ID.

    ", + "DeleteSecurityGroupRequest$GroupId": "

    The ID of the security group. Required for a nondefault VPC.

    ", + "DeleteSnapshotRequest$SnapshotId": "

    The ID of the EBS snapshot.

    ", + "DeleteSubnetRequest$SubnetId": "

    The ID of the subnet.

    ", + "DeleteVolumeRequest$VolumeId": "

    The ID of the volume.

    ", + "DeleteVpcPeeringConnectionRequest$VpcPeeringConnectionId": "

    The ID of the VPC peering connection.

    ", + "DeleteVpcRequest$VpcId": "

    The ID of the VPC.

    ", + "DeleteVpnConnectionRequest$VpnConnectionId": "

    The ID of the VPN connection.

    ", + "DeleteVpnConnectionRouteRequest$VpnConnectionId": "

    The ID of the VPN connection.

    ", + "DeleteVpnConnectionRouteRequest$DestinationCidrBlock": "

    The CIDR block associated with the local subnet of the customer network.

    ", + "DeleteVpnGatewayRequest$VpnGatewayId": "

    The ID of the virtual private gateway.

    ", + "DeregisterImageRequest$ImageId": "

    The ID of the AMI.

    ", + "DescribeClassicLinkInstancesRequest$NextToken": "

    The token to retrieve the next page of results.

    ", + "DescribeClassicLinkInstancesResult$NextToken": "

    The token to use to retrieve the next page of results. This value is null when there are no more results to return.

    ", + "DescribeFlowLogsRequest$NextToken": "

    The token to retrieve the next page of results.

    ", + "DescribeFlowLogsResult$NextToken": "

    The token to use to retrieve the next page of results. This value is null when there are no more results to return.

    ", + "DescribeHostsRequest$NextToken": "

    The token to retrieve the next page of results.

    ", + "DescribeHostsResult$NextToken": "

    The token to use to retrieve the next page of results. This value is null when there are no more results to return.

    ", + "DescribeIdFormatRequest$Resource": "

    The type of resource.

    ", + "DescribeImageAttributeRequest$ImageId": "

    The ID of the AMI.

    ", + "DescribeImportImageTasksRequest$NextToken": "

    A token that indicates the next page of results.

    ", + "DescribeImportImageTasksResult$NextToken": "

    The token to use to get the next page of results. This value is null when there are no more results to return.

    ", + "DescribeImportSnapshotTasksRequest$NextToken": "

    A token that indicates the next page of results.

    ", + "DescribeImportSnapshotTasksResult$NextToken": "

    The token to use to get the next page of results. This value is null when there are no more results to return.

    ", + "DescribeInstanceAttributeRequest$InstanceId": "

    The ID of the instance.

    ", + "DescribeInstanceStatusRequest$NextToken": "

    The token to retrieve the next page of results.

    ", + "DescribeInstanceStatusResult$NextToken": "

    The token to use to retrieve the next page of results. This value is null when there are no more results to return.

    ", + "DescribeInstancesRequest$NextToken": "

    The token to request the next page of results.

    ", + "DescribeInstancesResult$NextToken": "

    The token to use to retrieve the next page of results. This value is null when there are no more results to return.

    ", + "DescribeMovingAddressesRequest$NextToken": "

    The token to use to retrieve the next page of results.

    ", + "DescribeMovingAddressesResult$NextToken": "

    The token to use to retrieve the next page of results. This value is null when there are no more results to return.

    ", + "DescribeNatGatewaysRequest$NextToken": "

    The token to retrieve the next page of results.

    ", + "DescribeNatGatewaysResult$NextToken": "

    The token to use to retrieve the next page of results. This value is null when there are no more results to return.

    ", + "DescribeNetworkInterfaceAttributeRequest$NetworkInterfaceId": "

    The ID of the network interface.

    ", + "DescribeNetworkInterfaceAttributeResult$NetworkInterfaceId": "

    The ID of the network interface.

    ", + "DescribePrefixListsRequest$NextToken": "

    The token for the next set of items to return. (You received this token from a prior call.)

    ", + "DescribePrefixListsResult$NextToken": "

    The token to use when requesting the next set of items. If there are no additional items to return, the string is empty.

    ", + "DescribeReservedInstancesListingsRequest$ReservedInstancesId": "

    One or more Reserved Instance IDs.

    ", + "DescribeReservedInstancesListingsRequest$ReservedInstancesListingId": "

    One or more Reserved Instance listing IDs.

    ", + "DescribeReservedInstancesModificationsRequest$NextToken": "

    The token to retrieve the next page of results.

    ", + "DescribeReservedInstancesModificationsResult$NextToken": "

    The token to use to retrieve the next page of results. This value is null when there are no more results to return.

    ", + "DescribeReservedInstancesOfferingsRequest$AvailabilityZone": "

    The Availability Zone in which the Reserved Instance can be used.

    ", + "DescribeReservedInstancesOfferingsRequest$NextToken": "

    The token to retrieve the next page of results.

    ", + "DescribeReservedInstancesOfferingsResult$NextToken": "

    The token to use to retrieve the next page of results. This value is null when there are no more results to return.

    ", + "DescribeScheduledInstanceAvailabilityRequest$NextToken": "

    The token for the next set of results.

    ", + "DescribeScheduledInstanceAvailabilityResult$NextToken": "

    The token required to retrieve the next set of results. This value is null when there are no more results to return.

    ", + "DescribeScheduledInstancesRequest$NextToken": "

    The token for the next set of results.

    ", + "DescribeScheduledInstancesResult$NextToken": "

    The token required to retrieve the next set of results. This value is null when there are no more results to return.

    ", + "DescribeSnapshotAttributeRequest$SnapshotId": "

    The ID of the EBS snapshot.

    ", + "DescribeSnapshotAttributeResult$SnapshotId": "

    The ID of the EBS snapshot.

    ", + "DescribeSnapshotsRequest$NextToken": "

    The NextToken value returned from a previous paginated DescribeSnapshots request where MaxResults was used and the results exceeded the value of that parameter. Pagination continues from the end of the previous results that returned the NextToken value. This value is null when there are no more results to return.

    ", + "DescribeSnapshotsResult$NextToken": "

    The NextToken value to include in a future DescribeSnapshots request. When the results of a DescribeSnapshots request exceed MaxResults, this value can be used to retrieve the next page of results. This value is null when there are no more results to return.

    ", + "DescribeSpotFleetInstancesRequest$SpotFleetRequestId": "

    The ID of the Spot fleet request.

    ", + "DescribeSpotFleetInstancesRequest$NextToken": "

    The token for the next set of results.

    ", + "DescribeSpotFleetInstancesResponse$SpotFleetRequestId": "

    The ID of the Spot fleet request.

    ", + "DescribeSpotFleetInstancesResponse$NextToken": "

    The token required to retrieve the next set of results. This value is null when there are no more results to return.

    ", + "DescribeSpotFleetRequestHistoryRequest$SpotFleetRequestId": "

    The ID of the Spot fleet request.

    ", + "DescribeSpotFleetRequestHistoryRequest$NextToken": "

    The token for the next set of results.

    ", + "DescribeSpotFleetRequestHistoryResponse$SpotFleetRequestId": "

    The ID of the Spot fleet request.

    ", + "DescribeSpotFleetRequestHistoryResponse$NextToken": "

    The token required to retrieve the next set of results. This value is null when there are no more results to return.

    ", + "DescribeSpotFleetRequestsRequest$NextToken": "

    The token for the next set of results.

    ", + "DescribeSpotFleetRequestsResponse$NextToken": "

    The token required to retrieve the next set of results. This value is null when there are no more results to return.

    ", + "DescribeSpotPriceHistoryRequest$AvailabilityZone": "

    Filters the results by the specified Availability Zone.

    ", + "DescribeSpotPriceHistoryRequest$NextToken": "

    The token for the next set of results.

    ", + "DescribeSpotPriceHistoryResult$NextToken": "

    The token required to retrieve the next set of results. This value is null when there are no more results to return.

    ", + "DescribeStaleSecurityGroupsRequest$VpcId": "

    The ID of the VPC.

    ", + "DescribeStaleSecurityGroupsResult$NextToken": "

    The token to use when requesting the next set of items. If there are no additional items to return, the string is empty.

    ", + "DescribeTagsRequest$NextToken": "

    The token to retrieve the next page of results.

    ", + "DescribeTagsResult$NextToken": "

    The token to use to retrieve the next page of results. This value is null when there are no more results to return..

    ", + "DescribeVolumeAttributeRequest$VolumeId": "

    The ID of the volume.

    ", + "DescribeVolumeAttributeResult$VolumeId": "

    The ID of the volume.

    ", + "DescribeVolumeStatusRequest$NextToken": "

    The NextToken value to include in a future DescribeVolumeStatus request. When the results of the request exceed MaxResults, this value can be used to retrieve the next page of results. This value is null when there are no more results to return.

    ", + "DescribeVolumeStatusResult$NextToken": "

    The token to use to retrieve the next page of results. This value is null when there are no more results to return.

    ", + "DescribeVolumesRequest$NextToken": "

    The NextToken value returned from a previous paginated DescribeVolumes request where MaxResults was used and the results exceeded the value of that parameter. Pagination continues from the end of the previous results that returned the NextToken value. This value is null when there are no more results to return.

    ", + "DescribeVolumesResult$NextToken": "

    The NextToken value to include in a future DescribeVolumes request. When the results of a DescribeVolumes request exceed MaxResults, this value can be used to retrieve the next page of results. This value is null when there are no more results to return.

    ", + "DescribeVpcAttributeRequest$VpcId": "

    The ID of the VPC.

    ", + "DescribeVpcAttributeResult$VpcId": "

    The ID of the VPC.

    ", + "DescribeVpcEndpointServicesRequest$NextToken": "

    The token for the next set of items to return. (You received this token from a prior call.)

    ", + "DescribeVpcEndpointServicesResult$NextToken": "

    The token to use when requesting the next set of items. If there are no additional items to return, the string is empty.

    ", + "DescribeVpcEndpointsRequest$NextToken": "

    The token for the next set of items to return. (You received this token from a prior call.)

    ", + "DescribeVpcEndpointsResult$NextToken": "

    The token to use when requesting the next set of items. If there are no additional items to return, the string is empty.

    ", + "DetachClassicLinkVpcRequest$InstanceId": "

    The ID of the instance to unlink from the VPC.

    ", + "DetachClassicLinkVpcRequest$VpcId": "

    The ID of the VPC to which the instance is linked.

    ", + "DetachInternetGatewayRequest$InternetGatewayId": "

    The ID of the Internet gateway.

    ", + "DetachInternetGatewayRequest$VpcId": "

    The ID of the VPC.

    ", + "DetachNetworkInterfaceRequest$AttachmentId": "

    The ID of the attachment.

    ", + "DetachVolumeRequest$VolumeId": "

    The ID of the volume.

    ", + "DetachVolumeRequest$InstanceId": "

    The ID of the instance.

    ", + "DetachVolumeRequest$Device": "

    The device name.

    ", + "DetachVpnGatewayRequest$VpnGatewayId": "

    The ID of the virtual private gateway.

    ", + "DetachVpnGatewayRequest$VpcId": "

    The ID of the VPC.

    ", + "DhcpConfiguration$Key": "

    The name of a DHCP option.

    ", + "DhcpOptions$DhcpOptionsId": "

    The ID of the set of DHCP options.

    ", + "DhcpOptionsIdStringList$member": null, + "DisableVgwRoutePropagationRequest$RouteTableId": "

    The ID of the route table.

    ", + "DisableVgwRoutePropagationRequest$GatewayId": "

    The ID of the virtual private gateway.

    ", + "DisableVpcClassicLinkDnsSupportRequest$VpcId": "

    The ID of the VPC.

    ", + "DisableVpcClassicLinkRequest$VpcId": "

    The ID of the VPC.

    ", + "DisassociateAddressRequest$PublicIp": "

    [EC2-Classic] The Elastic IP address. Required for EC2-Classic.

    ", + "DisassociateAddressRequest$AssociationId": "

    [EC2-VPC] The association ID. Required for EC2-VPC.

    ", + "DisassociateRouteTableRequest$AssociationId": "

    The association ID representing the current association between the route table and subnet.

    ", + "DiskImage$Description": "

    A description of the disk image.

    ", + "DiskImageDescription$ImportManifestUrl": "

    A presigned URL for the import manifest stored in Amazon S3. For information about creating a presigned URL for an Amazon S3 object, read the \"Query String Request Authentication Alternative\" section of the Authenticating REST Requests topic in the Amazon Simple Storage Service Developer Guide.

    For information about the import manifest referenced by this API action, see VM Import Manifest.

    ", + "DiskImageDescription$Checksum": "

    The checksum computed for the disk image.

    ", + "DiskImageDetail$ImportManifestUrl": "

    A presigned URL for the import manifest stored in Amazon S3 and presented here as an Amazon S3 presigned URL. For information about creating a presigned URL for an Amazon S3 object, read the \"Query String Request Authentication Alternative\" section of the Authenticating REST Requests topic in the Amazon Simple Storage Service Developer Guide.

    For information about the import manifest referenced by this API action, see VM Import Manifest.

    ", + "DiskImageVolumeDescription$Id": "

    The volume identifier.

    ", + "EbsBlockDevice$SnapshotId": "

    The ID of the snapshot.

    ", + "EbsInstanceBlockDevice$VolumeId": "

    The ID of the EBS volume.

    ", + "EbsInstanceBlockDeviceSpecification$VolumeId": "

    The ID of the EBS volume.

    ", + "EnableVgwRoutePropagationRequest$RouteTableId": "

    The ID of the route table.

    ", + "EnableVgwRoutePropagationRequest$GatewayId": "

    The ID of the virtual private gateway.

    ", + "EnableVolumeIORequest$VolumeId": "

    The ID of the volume.

    ", + "EnableVpcClassicLinkDnsSupportRequest$VpcId": "

    The ID of the VPC.

    ", + "EnableVpcClassicLinkRequest$VpcId": "

    The ID of the VPC.

    ", + "EventInformation$InstanceId": "

    The ID of the instance. This information is available only for instanceChange events.

    ", + "EventInformation$EventSubType": "

    The event.

    The following are the error events.

    • iamFleetRoleInvalid - The Spot fleet did not have the required permissions either to launch or terminate an instance.

    • launchSpecTemporarilyBlacklisted - The configuration is not valid and several attempts to launch instances have failed. For more information, see the description of the event.

    • spotFleetRequestConfigurationInvalid - The configuration is not valid. For more information, see the description of the event.

    • spotInstanceCountLimitExceeded - You've reached the limit on the number of Spot instances that you can launch.

    The following are the fleetRequestChange events.

    • active - The Spot fleet has been validated and Amazon EC2 is attempting to maintain the target number of running Spot instances.

    • cancelled - The Spot fleet is canceled and has no running Spot instances. The Spot fleet will be deleted two days after its instances were terminated.

    • cancelled_running - The Spot fleet is canceled and will not launch additional Spot instances, but its existing Spot instances continue to run until they are interrupted or terminated.

    • cancelled_terminating - The Spot fleet is canceled and its Spot instances are terminating.

    • expired - The Spot fleet request has expired. A subsequent event indicates that the instances were terminated, if the request was created with TerminateInstancesWithExpiration set.

    • modify_in_progress - A request to modify the Spot fleet request was accepted and is in progress.

    • modify_successful - The Spot fleet request was modified.

    • price_update - The bid price for a launch configuration was adjusted because it was too high. This change is permanent.

    • submitted - The Spot fleet request is being evaluated and Amazon EC2 is preparing to launch the target number of Spot instances.

    The following are the instanceChange events.

    • launched - A bid was fulfilled and a new instance was launched.

    • terminated - An instance was terminated by the user.

    ", + "EventInformation$EventDescription": "

    The description of the event.

    ", + "ExecutableByStringList$member": null, + "ExportTask$ExportTaskId": "

    The ID of the export task.

    ", + "ExportTask$Description": "

    A description of the resource being exported.

    ", + "ExportTask$StatusMessage": "

    The status message related to the export task.

    ", + "ExportTaskIdStringList$member": null, + "ExportToS3Task$S3Bucket": "

    The S3 bucket for the destination image. The destination bucket must exist and grant WRITE and READ_ACP permissions to the AWS account vm-import-export@amazon.com.

    ", + "ExportToS3Task$S3Key": "

    The encryption key for your S3 bucket.

    ", + "ExportToS3TaskSpecification$S3Bucket": "

    The S3 bucket for the destination image. The destination bucket must exist and grant WRITE and READ_ACP permissions to the AWS account vm-import-export@amazon.com.

    ", + "ExportToS3TaskSpecification$S3Prefix": "

    The image is written to a single object in the S3 bucket at the S3 key s3prefix + exportTaskId + '.' + diskImageFormat.

    ", + "Filter$Name": "

    The name of the filter. Filter names are case-sensitive.

    ", + "FlowLog$FlowLogId": "

    The flow log ID.

    ", + "FlowLog$FlowLogStatus": "

    The status of the flow log (ACTIVE).

    ", + "FlowLog$ResourceId": "

    The ID of the resource on which the flow log was created.

    ", + "FlowLog$LogGroupName": "

    The name of the flow log group.

    ", + "FlowLog$DeliverLogsStatus": "

    The status of the logs delivery (SUCCESS | FAILED).

    ", + "FlowLog$DeliverLogsErrorMessage": "

    Information about the error that occurred. Rate limited indicates that CloudWatch logs throttling has been applied for one or more network interfaces, or that you've reached the limit on the number of CloudWatch Logs log groups that you can create. Access error indicates that the IAM role associated with the flow log does not have sufficient permissions to publish to CloudWatch Logs. Unknown error indicates an internal error.

    ", + "FlowLog$DeliverLogsPermissionArn": "

    The ARN of the IAM role that posts logs to CloudWatch Logs.

    ", + "GetConsoleOutputRequest$InstanceId": "

    The ID of the instance.

    ", + "GetConsoleOutputResult$InstanceId": "

    The ID of the instance.

    ", + "GetConsoleOutputResult$Output": "

    The console output, base64-encoded. If using a command line tool, the tools decode the output for you.

    ", + "GetConsoleScreenshotRequest$InstanceId": "

    The ID of the instance.

    ", + "GetConsoleScreenshotResult$InstanceId": "

    The ID of the instance.

    ", + "GetConsoleScreenshotResult$ImageData": "

    The data that comprises the image.

    ", + "GetPasswordDataRequest$InstanceId": "

    The ID of the Windows instance.

    ", + "GetPasswordDataResult$InstanceId": "

    The ID of the Windows instance.

    ", + "GetPasswordDataResult$PasswordData": "

    The password of the instance.

    ", + "GroupIdStringList$member": null, + "GroupIdentifier$GroupName": "

    The name of the security group.

    ", + "GroupIdentifier$GroupId": "

    The ID of the security group.

    ", + "GroupIds$member": null, + "GroupNameStringList$member": null, + "Host$HostId": "

    The ID of the Dedicated host.

    ", + "Host$HostReservationId": "

    The reservation ID of the Dedicated host. This returns a null response if the Dedicated host doesn't have an associated reservation.

    ", + "Host$ClientToken": "

    Unique, case-sensitive identifier you provide to ensure idempotency of the request. For more information, see How to Ensure Idempotency in the Amazon Elastic Compute Cloud User Guide.

    ", + "Host$AvailabilityZone": "

    The Availability Zone of the Dedicated host.

    ", + "HostInstance$InstanceId": "

    the IDs of instances that are running on the Dedicated host.

    ", + "HostInstance$InstanceType": "

    The instance type size (for example, m3.medium) of the running instance.

    ", + "HostProperties$InstanceType": "

    The instance type size that the Dedicated host supports (for example, m3.medium).

    ", + "IamInstanceProfile$Arn": "

    The Amazon Resource Name (ARN) of the instance profile.

    ", + "IamInstanceProfile$Id": "

    The ID of the instance profile.

    ", + "IamInstanceProfileSpecification$Arn": "

    The Amazon Resource Name (ARN) of the instance profile.

    ", + "IamInstanceProfileSpecification$Name": "

    The name of the instance profile.

    ", + "IdFormat$Resource": "

    The type of resource.

    ", + "Image$ImageId": "

    The ID of the AMI.

    ", + "Image$ImageLocation": "

    The location of the AMI.

    ", + "Image$OwnerId": "

    The AWS account ID of the image owner.

    ", + "Image$CreationDate": "

    The date and time the image was created.

    ", + "Image$KernelId": "

    The kernel associated with the image, if any. Only applicable for machine images.

    ", + "Image$RamdiskId": "

    The RAM disk associated with the image, if any. Only applicable for machine images.

    ", + "Image$SriovNetSupport": "

    Specifies whether enhanced networking is enabled.

    ", + "Image$ImageOwnerAlias": "

    The AWS account alias (for example, amazon, self) or the AWS account ID of the AMI owner.

    ", + "Image$Name": "

    The name of the AMI that was provided during image creation.

    ", + "Image$Description": "

    The description of the AMI that was provided during image creation.

    ", + "Image$RootDeviceName": "

    The device name of the root device (for example, /dev/sda1 or /dev/xvda).

    ", + "ImageAttribute$ImageId": "

    The ID of the AMI.

    ", + "ImageDiskContainer$Description": "

    The description of the disk image.

    ", + "ImageDiskContainer$Format": "

    The format of the disk image being imported.

    Valid values: RAW | VHD | VMDK | OVA

    ", + "ImageDiskContainer$Url": "

    The URL to the Amazon S3-based disk image being imported. The URL can either be a https URL (https://..) or an Amazon S3 URL (s3://..)

    ", + "ImageDiskContainer$DeviceName": "

    The block device mapping for the disk.

    ", + "ImageDiskContainer$SnapshotId": "

    The ID of the EBS snapshot to be used for importing the snapshot.

    ", + "ImageIdStringList$member": null, + "ImportImageRequest$Description": "

    A description string for the import image task.

    ", + "ImportImageRequest$LicenseType": "

    The license type to be used for the Amazon Machine Image (AMI) after importing.

    Note: You may only use BYOL if you have existing licenses with rights to use these licenses in a third party cloud like AWS. For more information, see VM Import/Export Prerequisites in the Amazon Elastic Compute Cloud User Guide.

    Valid values: AWS | BYOL

    ", + "ImportImageRequest$Hypervisor": "

    The target hypervisor platform.

    Valid values: xen

    ", + "ImportImageRequest$Architecture": "

    The architecture of the virtual machine.

    Valid values: i386 | x86_64

    ", + "ImportImageRequest$Platform": "

    The operating system of the virtual machine.

    Valid values: Windows | Linux

    ", + "ImportImageRequest$ClientToken": "

    The token to enable idempotency for VM import requests.

    ", + "ImportImageRequest$RoleName": "

    The name of the role to use when not using the default role, 'vmimport'.

    ", + "ImportImageResult$ImportTaskId": "

    The task ID of the import image task.

    ", + "ImportImageResult$Architecture": "

    The architecture of the virtual machine.

    ", + "ImportImageResult$LicenseType": "

    The license type of the virtual machine.

    ", + "ImportImageResult$Platform": "

    The operating system of the virtual machine.

    ", + "ImportImageResult$Hypervisor": "

    The target hypervisor of the import task.

    ", + "ImportImageResult$Description": "

    A description of the import task.

    ", + "ImportImageResult$ImageId": "

    The ID of the Amazon Machine Image (AMI) created by the import task.

    ", + "ImportImageResult$Progress": "

    The progress of the task.

    ", + "ImportImageResult$StatusMessage": "

    A detailed status message of the import task.

    ", + "ImportImageResult$Status": "

    A brief status of the task.

    ", + "ImportImageTask$ImportTaskId": "

    The ID of the import image task.

    ", + "ImportImageTask$Architecture": "

    The architecture of the virtual machine.

    Valid values: i386 | x86_64

    ", + "ImportImageTask$LicenseType": "

    The license type of the virtual machine.

    ", + "ImportImageTask$Platform": "

    The description string for the import image task.

    ", + "ImportImageTask$Hypervisor": "

    The target hypervisor for the import task.

    Valid values: xen

    ", + "ImportImageTask$Description": "

    A description of the import task.

    ", + "ImportImageTask$ImageId": "

    The ID of the Amazon Machine Image (AMI) of the imported virtual machine.

    ", + "ImportImageTask$Progress": "

    The percentage of progress of the import image task.

    ", + "ImportImageTask$StatusMessage": "

    A descriptive status message for the import image task.

    ", + "ImportImageTask$Status": "

    A brief status for the import image task.

    ", + "ImportInstanceLaunchSpecification$AdditionalInfo": "

    Reserved.

    ", + "ImportInstanceLaunchSpecification$SubnetId": "

    [EC2-VPC] The ID of the subnet in which to launch the instance.

    ", + "ImportInstanceLaunchSpecification$PrivateIpAddress": "

    [EC2-VPC] An available IP address from the IP address range of the subnet.

    ", + "ImportInstanceRequest$Description": "

    A description for the instance being imported.

    ", + "ImportInstanceTaskDetails$InstanceId": "

    The ID of the instance.

    ", + "ImportInstanceTaskDetails$Description": "

    A description of the task.

    ", + "ImportInstanceVolumeDetailItem$AvailabilityZone": "

    The Availability Zone where the resulting instance will reside.

    ", + "ImportInstanceVolumeDetailItem$Status": "

    The status of the import of this particular disk image.

    ", + "ImportInstanceVolumeDetailItem$StatusMessage": "

    The status information or errors related to the disk image.

    ", + "ImportInstanceVolumeDetailItem$Description": "

    A description of the task.

    ", + "ImportKeyPairRequest$KeyName": "

    A unique name for the key pair.

    ", + "ImportKeyPairResult$KeyName": "

    The key pair name you provided.

    ", + "ImportKeyPairResult$KeyFingerprint": "

    The MD5 public key fingerprint as specified in section 4 of RFC 4716.

    ", + "ImportSnapshotRequest$Description": "

    The description string for the import snapshot task.

    ", + "ImportSnapshotRequest$ClientToken": "

    Token to enable idempotency for VM import requests.

    ", + "ImportSnapshotRequest$RoleName": "

    The name of the role to use when not using the default role, 'vmimport'.

    ", + "ImportSnapshotResult$ImportTaskId": "

    The ID of the import snapshot task.

    ", + "ImportSnapshotResult$Description": "

    A description of the import snapshot task.

    ", + "ImportSnapshotTask$ImportTaskId": "

    The ID of the import snapshot task.

    ", + "ImportSnapshotTask$Description": "

    A description of the import snapshot task.

    ", + "ImportTaskIdList$member": null, + "ImportVolumeRequest$AvailabilityZone": "

    The Availability Zone for the resulting EBS volume.

    ", + "ImportVolumeRequest$Description": "

    A description of the volume.

    ", + "ImportVolumeTaskDetails$AvailabilityZone": "

    The Availability Zone where the resulting volume will reside.

    ", + "ImportVolumeTaskDetails$Description": "

    The description you provided when starting the import volume task.

    ", + "Instance$InstanceId": "

    The ID of the instance.

    ", + "Instance$ImageId": "

    The ID of the AMI used to launch the instance.

    ", + "Instance$PrivateDnsName": "

    The private DNS name assigned to the instance. This DNS name can only be used inside the Amazon EC2 network. This name is not available until the instance enters the running state. For EC2-VPC, this name is only available if you've enabled DNS hostnames for your VPC.

    ", + "Instance$PublicDnsName": "

    The public DNS name assigned to the instance. This name is not available until the instance enters the running state. For EC2-VPC, this name is only available if you've enabled DNS hostnames for your VPC.

    ", + "Instance$StateTransitionReason": "

    The reason for the most recent state transition. This might be an empty string.

    ", + "Instance$KeyName": "

    The name of the key pair, if this instance was launched with an associated key pair.

    ", + "Instance$KernelId": "

    The kernel associated with this instance, if applicable.

    ", + "Instance$RamdiskId": "

    The RAM disk associated with this instance, if applicable.

    ", + "Instance$SubnetId": "

    [EC2-VPC] The ID of the subnet in which the instance is running.

    ", + "Instance$VpcId": "

    [EC2-VPC] The ID of the VPC in which the instance is running.

    ", + "Instance$PrivateIpAddress": "

    The private IP address assigned to the instance.

    ", + "Instance$PublicIpAddress": "

    The public IP address assigned to the instance, if applicable.

    ", + "Instance$RootDeviceName": "

    The root device name (for example, /dev/sda1 or /dev/xvda).

    ", + "Instance$SpotInstanceRequestId": "

    If the request is a Spot instance request, the ID of the request.

    ", + "Instance$ClientToken": "

    The idempotency token you provided when you launched the instance, if applicable.

    ", + "Instance$SriovNetSupport": "

    Specifies whether enhanced networking is enabled.

    ", + "InstanceAttribute$InstanceId": "

    The ID of the instance.

    ", + "InstanceBlockDeviceMapping$DeviceName": "

    The device name exposed to the instance (for example, /dev/sdh or xvdh).

    ", + "InstanceBlockDeviceMappingSpecification$DeviceName": "

    The device name exposed to the instance (for example, /dev/sdh or xvdh).

    ", + "InstanceBlockDeviceMappingSpecification$VirtualName": "

    The virtual device name.

    ", + "InstanceBlockDeviceMappingSpecification$NoDevice": "

    suppress the specified device included in the block device mapping.

    ", + "InstanceCapacity$InstanceType": "

    The instance type size supported by the Dedicated host.

    ", + "InstanceExportDetails$InstanceId": "

    The ID of the resource being exported.

    ", + "InstanceIdSet$member": null, + "InstanceIdStringList$member": null, + "InstanceMonitoring$InstanceId": "

    The ID of the instance.

    ", + "InstanceNetworkInterface$NetworkInterfaceId": "

    The ID of the network interface.

    ", + "InstanceNetworkInterface$SubnetId": "

    The ID of the subnet.

    ", + "InstanceNetworkInterface$VpcId": "

    The ID of the VPC.

    ", + "InstanceNetworkInterface$Description": "

    The description.

    ", + "InstanceNetworkInterface$OwnerId": "

    The ID of the AWS account that created the network interface.

    ", + "InstanceNetworkInterface$MacAddress": "

    The MAC address.

    ", + "InstanceNetworkInterface$PrivateIpAddress": "

    The IP address of the network interface within the subnet.

    ", + "InstanceNetworkInterface$PrivateDnsName": "

    The private DNS name.

    ", + "InstanceNetworkInterfaceAssociation$PublicIp": "

    The public IP address or Elastic IP address bound to the network interface.

    ", + "InstanceNetworkInterfaceAssociation$PublicDnsName": "

    The public DNS name.

    ", + "InstanceNetworkInterfaceAssociation$IpOwnerId": "

    The ID of the owner of the Elastic IP address.

    ", + "InstanceNetworkInterfaceAttachment$AttachmentId": "

    The ID of the network interface attachment.

    ", + "InstanceNetworkInterfaceSpecification$NetworkInterfaceId": "

    The ID of the network interface.

    ", + "InstanceNetworkInterfaceSpecification$SubnetId": "

    The ID of the subnet associated with the network string. Applies only if creating a network interface when launching an instance.

    ", + "InstanceNetworkInterfaceSpecification$Description": "

    The description of the network interface. Applies only if creating a network interface when launching an instance.

    ", + "InstanceNetworkInterfaceSpecification$PrivateIpAddress": "

    The private IP address of the network interface. Applies only if creating a network interface when launching an instance.

    ", + "InstancePrivateIpAddress$PrivateIpAddress": "

    The private IP address of the network interface.

    ", + "InstancePrivateIpAddress$PrivateDnsName": "

    The private DNS name.

    ", + "InstanceStateChange$InstanceId": "

    The ID of the instance.

    ", + "InstanceStatus$InstanceId": "

    The ID of the instance.

    ", + "InstanceStatus$AvailabilityZone": "

    The Availability Zone of the instance.

    ", + "InstanceStatusEvent$Description": "

    A description of the event.

    After a scheduled event is completed, it can still be described for up to a week. If the event has been completed, this description starts with the following text: [Completed].

    ", + "InternetGateway$InternetGatewayId": "

    The ID of the Internet gateway.

    ", + "InternetGatewayAttachment$VpcId": "

    The ID of the VPC.

    ", + "IpPermission$IpProtocol": "

    The IP protocol name (for tcp, udp, and icmp) or number (see Protocol Numbers).

    [EC2-VPC only] When you authorize or revoke security group rules, you can use -1 to specify all.

    ", + "IpRange$CidrIp": "

    The CIDR range. You can either specify a CIDR range or a source security group, not both.

    ", + "IpRanges$member": null, + "KeyNameStringList$member": null, + "KeyPair$KeyName": "

    The name of the key pair.

    ", + "KeyPair$KeyFingerprint": "

    The SHA-1 digest of the DER encoded private key.

    ", + "KeyPair$KeyMaterial": "

    An unencrypted PEM encoded RSA private key.

    ", + "KeyPairInfo$KeyName": "

    The name of the key pair.

    ", + "KeyPairInfo$KeyFingerprint": "

    If you used CreateKeyPair to create the key pair, this is the SHA-1 digest of the DER encoded private key. If you used ImportKeyPair to provide AWS the public key, this is the MD5 public key fingerprint as specified in section 4 of RFC4716.

    ", + "LaunchPermission$UserId": "

    The AWS account ID.

    ", + "LaunchSpecification$ImageId": "

    The ID of the AMI.

    ", + "LaunchSpecification$KeyName": "

    The name of the key pair.

    ", + "LaunchSpecification$UserData": "

    The Base64-encoded MIME user data to make available to the instances.

    ", + "LaunchSpecification$AddressingType": "

    Deprecated.

    ", + "LaunchSpecification$KernelId": "

    The ID of the kernel.

    ", + "LaunchSpecification$RamdiskId": "

    The ID of the RAM disk.

    ", + "LaunchSpecification$SubnetId": "

    The ID of the subnet in which to launch the instance.

    ", + "ModifyIdFormatRequest$Resource": "

    The type of resource.

    ", + "ModifyImageAttributeRequest$ImageId": "

    The ID of the AMI.

    ", + "ModifyImageAttributeRequest$Attribute": "

    The name of the attribute to modify.

    ", + "ModifyImageAttributeRequest$Value": "

    The value of the attribute being modified. This is only valid when modifying the description attribute.

    ", + "ModifyInstanceAttributeRequest$InstanceId": "

    The ID of the instance.

    ", + "ModifyInstanceAttributeRequest$Value": "

    A new value for the attribute. Use only with the kernel, ramdisk, userData, disableApiTermination, or instanceInitiatedShutdownBehavior attribute.

    ", + "ModifyInstancePlacementRequest$InstanceId": "

    The ID of the instance that you are modifying.

    ", + "ModifyInstancePlacementRequest$HostId": "

    The ID of the Dedicated host that the instance will have affinity with.

    ", + "ModifyNetworkInterfaceAttributeRequest$NetworkInterfaceId": "

    The ID of the network interface.

    ", + "ModifyReservedInstancesRequest$ClientToken": "

    A unique, case-sensitive token you provide to ensure idempotency of your modification request. For more information, see Ensuring Idempotency.

    ", + "ModifyReservedInstancesResult$ReservedInstancesModificationId": "

    The ID for the modification.

    ", + "ModifySnapshotAttributeRequest$SnapshotId": "

    The ID of the snapshot.

    ", + "ModifySpotFleetRequestRequest$SpotFleetRequestId": "

    The ID of the Spot fleet request.

    ", + "ModifySubnetAttributeRequest$SubnetId": "

    The ID of the subnet.

    ", + "ModifyVolumeAttributeRequest$VolumeId": "

    The ID of the volume.

    ", + "ModifyVpcAttributeRequest$VpcId": "

    The ID of the VPC.

    ", + "ModifyVpcEndpointRequest$VpcEndpointId": "

    The ID of the endpoint.

    ", + "ModifyVpcEndpointRequest$PolicyDocument": "

    A policy document to attach to the endpoint. The policy must be in valid JSON format.

    ", + "ModifyVpcPeeringConnectionOptionsRequest$VpcPeeringConnectionId": "

    The ID of the VPC peering connection.

    ", + "MoveAddressToVpcRequest$PublicIp": "

    The Elastic IP address.

    ", + "MoveAddressToVpcResult$AllocationId": "

    The allocation ID for the Elastic IP address.

    ", + "MovingAddressStatus$PublicIp": "

    The Elastic IP address.

    ", + "NatGateway$VpcId": "

    The ID of the VPC in which the NAT gateway is located.

    ", + "NatGateway$SubnetId": "

    The ID of the subnet in which the NAT gateway is located.

    ", + "NatGateway$NatGatewayId": "

    The ID of the NAT gateway.

    ", + "NatGateway$FailureCode": "

    If the NAT gateway could not be created, specifies the error code for the failure. (InsufficientFreeAddressesInSubnet | Gateway.NotAttached | InvalidAllocationID.NotFound | Resource.AlreadyAssociated | InternalError | InvalidSubnetID.NotFound)

    ", + "NatGateway$FailureMessage": "

    If the NAT gateway could not be created, specifies the error message for the failure, that corresponds to the error code.

    • For InsufficientFreeAddressesInSubnet: \"Subnet has insufficient free addresses to create this NAT gateway\"

    • For Gateway.NotAttached: \"Network vpc-xxxxxxxx has no Internet gateway attached\"

    • For InvalidAllocationID.NotFound: \"Elastic IP address eipalloc-xxxxxxxx could not be associated with this NAT gateway\"

    • For Resource.AlreadyAssociated: \"Elastic IP address eipalloc-xxxxxxxx is already associated\"

    • For InternalError: \"Network interface eni-xxxxxxxx, created and used internally by this NAT gateway is in an invalid state. Please try again.\"

    • For InvalidSubnetID.NotFound: \"The specified subnet subnet-xxxxxxxx does not exist or could not be found.\"

    ", + "NatGatewayAddress$PublicIp": "

    The Elastic IP address associated with the NAT gateway.

    ", + "NatGatewayAddress$AllocationId": "

    The allocation ID of the Elastic IP address that's associated with the NAT gateway.

    ", + "NatGatewayAddress$PrivateIp": "

    The private IP address associated with the Elastic IP address.

    ", + "NatGatewayAddress$NetworkInterfaceId": "

    The ID of the network interface associated with the NAT gateway.

    ", + "NetworkAcl$NetworkAclId": "

    The ID of the network ACL.

    ", + "NetworkAcl$VpcId": "

    The ID of the VPC for the network ACL.

    ", + "NetworkAclAssociation$NetworkAclAssociationId": "

    The ID of the association between a network ACL and a subnet.

    ", + "NetworkAclAssociation$NetworkAclId": "

    The ID of the network ACL.

    ", + "NetworkAclAssociation$SubnetId": "

    The ID of the subnet.

    ", + "NetworkAclEntry$Protocol": "

    The protocol. A value of -1 means all protocols.

    ", + "NetworkAclEntry$CidrBlock": "

    The network range to allow or deny, in CIDR notation.

    ", + "NetworkInterface$NetworkInterfaceId": "

    The ID of the network interface.

    ", + "NetworkInterface$SubnetId": "

    The ID of the subnet.

    ", + "NetworkInterface$VpcId": "

    The ID of the VPC.

    ", + "NetworkInterface$AvailabilityZone": "

    The Availability Zone.

    ", + "NetworkInterface$Description": "

    A description.

    ", + "NetworkInterface$OwnerId": "

    The AWS account ID of the owner of the network interface.

    ", + "NetworkInterface$RequesterId": "

    The ID of the entity that launched the instance on your behalf (for example, AWS Management Console or Auto Scaling).

    ", + "NetworkInterface$MacAddress": "

    The MAC address.

    ", + "NetworkInterface$PrivateIpAddress": "

    The IP address of the network interface within the subnet.

    ", + "NetworkInterface$PrivateDnsName": "

    The private DNS name.

    ", + "NetworkInterfaceAssociation$PublicIp": "

    The address of the Elastic IP address bound to the network interface.

    ", + "NetworkInterfaceAssociation$PublicDnsName": "

    The public DNS name.

    ", + "NetworkInterfaceAssociation$IpOwnerId": "

    The ID of the Elastic IP address owner.

    ", + "NetworkInterfaceAssociation$AllocationId": "

    The allocation ID.

    ", + "NetworkInterfaceAssociation$AssociationId": "

    The association ID.

    ", + "NetworkInterfaceAttachment$AttachmentId": "

    The ID of the network interface attachment.

    ", + "NetworkInterfaceAttachment$InstanceId": "

    The ID of the instance.

    ", + "NetworkInterfaceAttachment$InstanceOwnerId": "

    The AWS account ID of the owner of the instance.

    ", + "NetworkInterfaceAttachmentChanges$AttachmentId": "

    The ID of the network interface attachment.

    ", + "NetworkInterfaceIdList$member": null, + "NetworkInterfacePrivateIpAddress$PrivateIpAddress": "

    The private IP address.

    ", + "NetworkInterfacePrivateIpAddress$PrivateDnsName": "

    The private DNS name.

    ", + "NewDhcpConfiguration$Key": null, + "OwnerStringList$member": null, + "Placement$AvailabilityZone": "

    The Availability Zone of the instance.

    ", + "Placement$GroupName": "

    The name of the placement group the instance is in (for cluster compute instances).

    ", + "Placement$HostId": "

    The ID of the Dedicted host on which the instance resides. This parameter is not support for the ImportInstance command.

    ", + "Placement$Affinity": "

    The affinity setting for the instance on the Dedicated host. This parameter is not supported for the ImportInstance command.

    ", + "PlacementGroup$GroupName": "

    The name of the placement group.

    ", + "PlacementGroupStringList$member": null, + "PrefixList$PrefixListId": "

    The ID of the prefix.

    ", + "PrefixList$PrefixListName": "

    The name of the prefix.

    ", + "PrefixListId$PrefixListId": "

    The ID of the prefix.

    ", + "PrefixListIdSet$member": null, + "PrivateIpAddressSpecification$PrivateIpAddress": "

    The private IP addresses.

    ", + "PrivateIpAddressStringList$member": null, + "ProductCode$ProductCodeId": "

    The product code.

    ", + "ProductCodeStringList$member": null, + "ProductDescriptionList$member": null, + "PropagatingVgw$GatewayId": "

    The ID of the virtual private gateway (VGW).

    ", + "ProvisionedBandwidth$Provisioned": "

    Reserved. If you need to sustain traffic greater than the documented limits, contact us through the Support Center.

    ", + "ProvisionedBandwidth$Requested": "

    Reserved. If you need to sustain traffic greater than the documented limits, contact us through the Support Center.

    ", + "ProvisionedBandwidth$Status": "

    Reserved. If you need to sustain traffic greater than the documented limits, contact us through the Support Center.

    ", + "PublicIpStringList$member": null, + "PurchaseRequest$PurchaseToken": "

    The purchase token.

    ", + "PurchaseReservedInstancesOfferingRequest$ReservedInstancesOfferingId": "

    The ID of the Reserved Instance offering to purchase.

    ", + "PurchaseReservedInstancesOfferingResult$ReservedInstancesId": "

    The IDs of the purchased Reserved Instances.

    ", + "PurchaseScheduledInstancesRequest$ClientToken": "

    Unique, case-sensitive identifier that ensures the idempotency of the request. For more information, see Ensuring Idempotency.

    ", + "Region$RegionName": "

    The name of the region.

    ", + "Region$Endpoint": "

    The region service endpoint.

    ", + "RegionNameStringList$member": null, + "RegisterImageRequest$ImageLocation": "

    The full path to your AMI manifest in Amazon S3 storage.

    ", + "RegisterImageRequest$Name": "

    A name for your AMI.

    Constraints: 3-128 alphanumeric characters, parentheses (()), square brackets ([]), spaces ( ), periods (.), slashes (/), dashes (-), single quotes ('), at-signs (@), or underscores(_)

    ", + "RegisterImageRequest$Description": "

    A description for your AMI.

    ", + "RegisterImageRequest$KernelId": "

    The ID of the kernel.

    ", + "RegisterImageRequest$RamdiskId": "

    The ID of the RAM disk.

    ", + "RegisterImageRequest$RootDeviceName": "

    The name of the root device (for example, /dev/sda1, or /dev/xvda).

    ", + "RegisterImageRequest$VirtualizationType": "

    The type of virtualization.

    Default: paravirtual

    ", + "RegisterImageRequest$SriovNetSupport": "

    Set to simple to enable enhanced networking for the AMI and any instances that you launch from the AMI.

    There is no way to disable enhanced networking at this time.

    This option is supported only for HVM AMIs. Specifying this option with a PV AMI can make instances launched from the AMI unreachable.

    ", + "RegisterImageResult$ImageId": "

    The ID of the newly registered AMI.

    ", + "RejectVpcPeeringConnectionRequest$VpcPeeringConnectionId": "

    The ID of the VPC peering connection.

    ", + "ReleaseAddressRequest$PublicIp": "

    [EC2-Classic] The Elastic IP address. Required for EC2-Classic.

    ", + "ReleaseAddressRequest$AllocationId": "

    [EC2-VPC] The allocation ID. Required for EC2-VPC.

    ", + "ReplaceNetworkAclAssociationRequest$AssociationId": "

    The ID of the current association between the original network ACL and the subnet.

    ", + "ReplaceNetworkAclAssociationRequest$NetworkAclId": "

    The ID of the new network ACL to associate with the subnet.

    ", + "ReplaceNetworkAclAssociationResult$NewAssociationId": "

    The ID of the new association.

    ", + "ReplaceNetworkAclEntryRequest$NetworkAclId": "

    The ID of the ACL.

    ", + "ReplaceNetworkAclEntryRequest$Protocol": "

    The IP protocol. You can specify all or -1 to mean all protocols.

    ", + "ReplaceNetworkAclEntryRequest$CidrBlock": "

    The network range to allow or deny, in CIDR notation.

    ", + "ReplaceRouteRequest$RouteTableId": "

    The ID of the route table.

    ", + "ReplaceRouteRequest$DestinationCidrBlock": "

    The CIDR address block used for the destination match. The value you provide must match the CIDR of an existing route in the table.

    ", + "ReplaceRouteRequest$GatewayId": "

    The ID of an Internet gateway or virtual private gateway.

    ", + "ReplaceRouteRequest$InstanceId": "

    The ID of a NAT instance in your VPC.

    ", + "ReplaceRouteRequest$NetworkInterfaceId": "

    The ID of a network interface.

    ", + "ReplaceRouteRequest$VpcPeeringConnectionId": "

    The ID of a VPC peering connection.

    ", + "ReplaceRouteRequest$NatGatewayId": "

    The ID of a NAT gateway.

    ", + "ReplaceRouteTableAssociationRequest$AssociationId": "

    The association ID.

    ", + "ReplaceRouteTableAssociationRequest$RouteTableId": "

    The ID of the new route table to associate with the subnet.

    ", + "ReplaceRouteTableAssociationResult$NewAssociationId": "

    The ID of the new association.

    ", + "ReportInstanceStatusRequest$Description": "

    Descriptive text about the health state of your instance.

    ", + "RequestHostIdList$member": null, + "RequestSpotFleetResponse$SpotFleetRequestId": "

    The ID of the Spot fleet request.

    ", + "RequestSpotInstancesRequest$SpotPrice": "

    The maximum hourly price (bid) for any Spot instance launched to fulfill the request.

    ", + "RequestSpotInstancesRequest$ClientToken": "

    Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see How to Ensure Idempotency in the Amazon Elastic Compute Cloud User Guide.

    ", + "RequestSpotInstancesRequest$LaunchGroup": "

    The instance launch group. Launch groups are Spot instances that launch together and terminate together.

    Default: Instances are launched and terminated individually

    ", + "RequestSpotInstancesRequest$AvailabilityZoneGroup": "

    The user-specified name for a logical grouping of bids.

    When you specify an Availability Zone group in a Spot Instance request, all Spot instances in the request are launched in the same Availability Zone. Instance proximity is maintained with this parameter, but the choice of Availability Zone is not. The group applies only to bids for Spot Instances of the same instance type. Any additional Spot instance requests that are specified with the same Availability Zone group name are launched in that same Availability Zone, as long as at least one instance from the group is still active.

    If there is no active instance running in the Availability Zone group that you specify for a new Spot instance request (all instances are terminated, the bid is expired, or the bid falls below current market), then Amazon EC2 launches the instance in any Availability Zone where the constraint can be met. Consequently, the subsequent set of Spot instances could be placed in a different zone from the original request, even if you specified the same Availability Zone group.

    Default: Instances are launched in any available Availability Zone.

    ", + "RequestSpotLaunchSpecification$ImageId": "

    The ID of the AMI.

    ", + "RequestSpotLaunchSpecification$KeyName": "

    The name of the key pair.

    ", + "RequestSpotLaunchSpecification$UserData": "

    The Base64-encoded MIME user data to make available to the instances.

    ", + "RequestSpotLaunchSpecification$AddressingType": "

    Deprecated.

    ", + "RequestSpotLaunchSpecification$KernelId": "

    The ID of the kernel.

    ", + "RequestSpotLaunchSpecification$RamdiskId": "

    The ID of the RAM disk.

    ", + "RequestSpotLaunchSpecification$SubnetId": "

    The ID of the subnet in which to launch the instance.

    ", + "Reservation$ReservationId": "

    The ID of the reservation.

    ", + "Reservation$OwnerId": "

    The ID of the AWS account that owns the reservation.

    ", + "Reservation$RequesterId": "

    The ID of the requester that launched the instances on your behalf (for example, AWS Management Console or Auto Scaling).

    ", + "ReservedInstances$ReservedInstancesId": "

    The ID of the Reserved Instance.

    ", + "ReservedInstances$AvailabilityZone": "

    The Availability Zone in which the Reserved Instance can be used.

    ", + "ReservedInstancesConfiguration$AvailabilityZone": "

    The Availability Zone for the modified Reserved Instances.

    ", + "ReservedInstancesConfiguration$Platform": "

    The network platform of the modified Reserved Instances, which is either EC2-Classic or EC2-VPC.

    ", + "ReservedInstancesId$ReservedInstancesId": "

    The ID of the Reserved Instance.

    ", + "ReservedInstancesIdStringList$member": null, + "ReservedInstancesListing$ReservedInstancesListingId": "

    The ID of the Reserved Instance listing.

    ", + "ReservedInstancesListing$ReservedInstancesId": "

    The ID of the Reserved Instance.

    ", + "ReservedInstancesListing$StatusMessage": "

    The reason for the current status of the Reserved Instance listing. The response can be blank.

    ", + "ReservedInstancesListing$ClientToken": "

    A unique, case-sensitive key supplied by the client to ensure that the request is idempotent. For more information, see Ensuring Idempotency.

    ", + "ReservedInstancesModification$ReservedInstancesModificationId": "

    A unique ID for the Reserved Instance modification.

    ", + "ReservedInstancesModification$Status": "

    The status of the Reserved Instances modification request.

    ", + "ReservedInstancesModification$StatusMessage": "

    The reason for the status.

    ", + "ReservedInstancesModification$ClientToken": "

    A unique, case-sensitive key supplied by the client to ensure that the request is idempotent. For more information, see Ensuring Idempotency.

    ", + "ReservedInstancesModificationIdStringList$member": null, + "ReservedInstancesModificationResult$ReservedInstancesId": "

    The ID for the Reserved Instances that were created as part of the modification request. This field is only available when the modification is fulfilled.

    ", + "ReservedInstancesOffering$ReservedInstancesOfferingId": "

    The ID of the Reserved Instance offering.

    ", + "ReservedInstancesOffering$AvailabilityZone": "

    The Availability Zone in which the Reserved Instance can be used.

    ", + "ReservedInstancesOfferingIdStringList$member": null, + "ResetImageAttributeRequest$ImageId": "

    The ID of the AMI.

    ", + "ResetInstanceAttributeRequest$InstanceId": "

    The ID of the instance.

    ", + "ResetNetworkInterfaceAttributeRequest$NetworkInterfaceId": "

    The ID of the network interface.

    ", + "ResetNetworkInterfaceAttributeRequest$SourceDestCheck": "

    The source/destination checking attribute. Resets the value to true.

    ", + "ResetSnapshotAttributeRequest$SnapshotId": "

    The ID of the snapshot.

    ", + "ResourceIdList$member": null, + "ResponseHostIdList$member": null, + "RestorableByStringList$member": null, + "RestoreAddressToClassicRequest$PublicIp": "

    The Elastic IP address.

    ", + "RestoreAddressToClassicResult$PublicIp": "

    The Elastic IP address.

    ", + "RevokeSecurityGroupEgressRequest$GroupId": "

    The ID of the security group.

    ", + "RevokeSecurityGroupEgressRequest$SourceSecurityGroupName": "

    The name of a destination security group. To revoke outbound access to a destination security group, we recommend that you use a set of IP permissions instead.

    ", + "RevokeSecurityGroupEgressRequest$SourceSecurityGroupOwnerId": "

    The AWS account number for a destination security group. To revoke outbound access to a destination security group, we recommend that you use a set of IP permissions instead.

    ", + "RevokeSecurityGroupEgressRequest$IpProtocol": "

    The IP protocol name or number. We recommend that you specify the protocol in a set of IP permissions instead.

    ", + "RevokeSecurityGroupEgressRequest$CidrIp": "

    The CIDR IP address range. We recommend that you specify the CIDR range in a set of IP permissions instead.

    ", + "RevokeSecurityGroupIngressRequest$GroupName": "

    [EC2-Classic, default VPC] The name of the security group.

    ", + "RevokeSecurityGroupIngressRequest$GroupId": "

    The ID of the security group. Required for a security group in a nondefault VPC.

    ", + "RevokeSecurityGroupIngressRequest$SourceSecurityGroupName": "

    [EC2-Classic, default VPC] The name of the source security group. You can't specify this parameter in combination with the following parameters: the CIDR IP address range, the start of the port range, the IP protocol, and the end of the port range. For EC2-VPC, the source security group must be in the same VPC. To revoke a specific rule for an IP protocol and port range, use a set of IP permissions instead.

    ", + "RevokeSecurityGroupIngressRequest$SourceSecurityGroupOwnerId": "

    [EC2-Classic] The AWS account ID of the source security group, if the source security group is in a different account. You can't specify this parameter in combination with the following parameters: the CIDR IP address range, the IP protocol, the start of the port range, and the end of the port range. To revoke a specific rule for an IP protocol and port range, use a set of IP permissions instead.

    ", + "RevokeSecurityGroupIngressRequest$IpProtocol": "

    The IP protocol name (tcp, udp, icmp) or number (see Protocol Numbers). Use -1 to specify all.

    ", + "RevokeSecurityGroupIngressRequest$CidrIp": "

    The CIDR IP address range. You can't specify this parameter when specifying a source security group.

    ", + "Route$DestinationCidrBlock": "

    The CIDR block used for the destination match.

    ", + "Route$DestinationPrefixListId": "

    The prefix of the AWS service.

    ", + "Route$GatewayId": "

    The ID of a gateway attached to your VPC.

    ", + "Route$InstanceId": "

    The ID of a NAT instance in your VPC.

    ", + "Route$InstanceOwnerId": "

    The AWS account ID of the owner of the instance.

    ", + "Route$NetworkInterfaceId": "

    The ID of the network interface.

    ", + "Route$VpcPeeringConnectionId": "

    The ID of the VPC peering connection.

    ", + "Route$NatGatewayId": "

    The ID of a NAT gateway.

    ", + "RouteTable$RouteTableId": "

    The ID of the route table.

    ", + "RouteTable$VpcId": "

    The ID of the VPC.

    ", + "RouteTableAssociation$RouteTableAssociationId": "

    The ID of the association between a route table and a subnet.

    ", + "RouteTableAssociation$RouteTableId": "

    The ID of the route table.

    ", + "RouteTableAssociation$SubnetId": "

    The ID of the subnet. A subnet ID is not returned for an implicit association.

    ", + "RunInstancesRequest$ImageId": "

    The ID of the AMI, which you can get by calling DescribeImages.

    ", + "RunInstancesRequest$KeyName": "

    The name of the key pair. You can create a key pair using CreateKeyPair or ImportKeyPair.

    If you do not specify a key pair, you can't connect to the instance unless you choose an AMI that is configured to allow users another way to log in.

    ", + "RunInstancesRequest$UserData": "

    Data to configure the instance, or a script to run during instance launch. For more information, see Running Commands on Your Linux Instance at Launch (Linux) and Adding User Data (Windows). For API calls, the text must be base64-encoded. For command line tools, the encoding is performed for you, and you can load the text from a file.

    ", + "RunInstancesRequest$KernelId": "

    The ID of the kernel.

    We recommend that you use PV-GRUB instead of kernels and RAM disks. For more information, see PV-GRUB in the Amazon Elastic Compute Cloud User Guide.

    ", + "RunInstancesRequest$RamdiskId": "

    The ID of the RAM disk.

    We recommend that you use PV-GRUB instead of kernels and RAM disks. For more information, see PV-GRUB in the Amazon Elastic Compute Cloud User Guide.

    ", + "RunInstancesRequest$SubnetId": "

    [EC2-VPC] The ID of the subnet to launch the instance into.

    ", + "RunInstancesRequest$PrivateIpAddress": "

    [EC2-VPC] The primary IP address. You must specify a value from the IP address range of the subnet.

    Only one private IP address can be designated as primary. Therefore, you can't specify this parameter if PrivateIpAddresses.n.Primary is set to true and PrivateIpAddresses.n.PrivateIpAddress is set to an IP address.

    Default: We select an IP address from the IP address range of the subnet.

    ", + "RunInstancesRequest$ClientToken": "

    Unique, case-sensitive identifier you provide to ensure the idempotency of the request. For more information, see Ensuring Idempotency.

    Constraints: Maximum 64 ASCII characters

    ", + "RunInstancesRequest$AdditionalInfo": "

    Reserved.

    ", + "RunScheduledInstancesRequest$ClientToken": "

    Unique, case-sensitive identifier that ensures the idempotency of the request. For more information, see Ensuring Idempotency.

    ", + "RunScheduledInstancesRequest$ScheduledInstanceId": "

    The Scheduled Instance ID.

    ", + "S3Storage$Bucket": "

    The bucket in which to store the AMI. You can specify a bucket that you already own or a new bucket that Amazon EC2 creates on your behalf. If you specify a bucket that belongs to someone else, Amazon EC2 returns an error.

    ", + "S3Storage$Prefix": "

    The beginning of the file name of the AMI.

    ", + "S3Storage$AWSAccessKeyId": "

    The access key ID of the owner of the bucket. Before you specify a value for your access key ID, review and follow the guidance in Best Practices for Managing AWS Access Keys.

    ", + "S3Storage$UploadPolicySignature": "

    The signature of the Base64 encoded JSON document.

    ", + "ScheduledInstance$ScheduledInstanceId": "

    The Scheduled Instance ID.

    ", + "ScheduledInstance$InstanceType": "

    The instance type.

    ", + "ScheduledInstance$Platform": "

    The platform (Linux/UNIX or Windows).

    ", + "ScheduledInstance$NetworkPlatform": "

    The network platform (EC2-Classic or EC2-VPC).

    ", + "ScheduledInstance$AvailabilityZone": "

    The Availability Zone.

    ", + "ScheduledInstance$HourlyPrice": "

    The hourly price for a single instance.

    ", + "ScheduledInstanceAvailability$InstanceType": "

    The instance type. You can specify one of the C3, C4, M4, or R3 instance types.

    ", + "ScheduledInstanceAvailability$Platform": "

    The platform (Linux/UNIX or Windows).

    ", + "ScheduledInstanceAvailability$NetworkPlatform": "

    The network platform (EC2-Classic or EC2-VPC).

    ", + "ScheduledInstanceAvailability$AvailabilityZone": "

    The Availability Zone.

    ", + "ScheduledInstanceAvailability$PurchaseToken": "

    The purchase token. This token expires in two hours.

    ", + "ScheduledInstanceAvailability$HourlyPrice": "

    The hourly price for a single instance.

    ", + "ScheduledInstanceIdRequestSet$member": null, + "ScheduledInstanceRecurrence$Frequency": "

    The frequency (Daily, Weekly, or Monthly).

    ", + "ScheduledInstanceRecurrence$OccurrenceUnit": "

    The unit for occurrenceDaySet (DayOfWeek or DayOfMonth).

    ", + "ScheduledInstanceRecurrenceRequest$Frequency": "

    The frequency (Daily, Weekly, or Monthly).

    ", + "ScheduledInstanceRecurrenceRequest$OccurrenceUnit": "

    The unit for OccurrenceDays (DayOfWeek or DayOfMonth). This value is required for a monthly schedule. You can't specify DayOfWeek with a weekly schedule. You can't specify this value with a daily schedule.

    ", + "ScheduledInstancesBlockDeviceMapping$DeviceName": "

    The device name exposed to the instance (for example, /dev/sdh or xvdh).

    ", + "ScheduledInstancesBlockDeviceMapping$NoDevice": "

    Suppresses the specified device included in the block device mapping of the AMI.

    ", + "ScheduledInstancesBlockDeviceMapping$VirtualName": "

    The virtual device name (ephemeralN). Instance store volumes are numbered starting from 0. An instance type with two available instance store volumes can specify mappings for ephemeral0 and ephemeral1.The number of available instance store volumes depends on the instance type. After you connect to the instance, you must mount the volume.

    Constraints: For M3 instances, you must specify instance store volumes in the block device mapping for the instance. When you launch an M3 instance, we ignore any instance store volumes specified in the block device mapping for the AMI.

    ", + "ScheduledInstancesEbs$SnapshotId": "

    The ID of the snapshot.

    ", + "ScheduledInstancesEbs$VolumeType": "

    The volume type. gp2 for General Purpose SSD, io1 for Provisioned IOPS SSD, Throughput Optimized HDD for st1, Cold HDD for sc1, or standard for Magnetic.

    Default: standard

    ", + "ScheduledInstancesIamInstanceProfile$Arn": "

    The Amazon Resource Name (ARN).

    ", + "ScheduledInstancesIamInstanceProfile$Name": "

    The name.

    ", + "ScheduledInstancesLaunchSpecification$ImageId": "

    The ID of the Amazon Machine Image (AMI).

    ", + "ScheduledInstancesLaunchSpecification$KeyName": "

    The name of the key pair.

    ", + "ScheduledInstancesLaunchSpecification$UserData": "

    The base64-encoded MIME user data.

    ", + "ScheduledInstancesLaunchSpecification$KernelId": "

    The ID of the kernel.

    ", + "ScheduledInstancesLaunchSpecification$InstanceType": "

    The instance type.

    ", + "ScheduledInstancesLaunchSpecification$RamdiskId": "

    The ID of the RAM disk.

    ", + "ScheduledInstancesLaunchSpecification$SubnetId": "

    The ID of the subnet in which to launch the instances.

    ", + "ScheduledInstancesNetworkInterface$NetworkInterfaceId": "

    The ID of the network interface.

    ", + "ScheduledInstancesNetworkInterface$SubnetId": "

    The ID of the subnet.

    ", + "ScheduledInstancesNetworkInterface$Description": "

    The description.

    ", + "ScheduledInstancesNetworkInterface$PrivateIpAddress": "

    The IP address of the network interface within the subnet.

    ", + "ScheduledInstancesPlacement$AvailabilityZone": "

    The Availability Zone.

    ", + "ScheduledInstancesPlacement$GroupName": "

    The name of the placement group.

    ", + "ScheduledInstancesPrivateIpAddressConfig$PrivateIpAddress": "

    The IP address.

    ", + "ScheduledInstancesSecurityGroupIdSet$member": null, + "SecurityGroup$OwnerId": "

    The AWS account ID of the owner of the security group.

    ", + "SecurityGroup$GroupName": "

    The name of the security group.

    ", + "SecurityGroup$GroupId": "

    The ID of the security group.

    ", + "SecurityGroup$Description": "

    A description of the security group.

    ", + "SecurityGroup$VpcId": "

    [EC2-VPC] The ID of the VPC for the security group.

    ", + "SecurityGroupIdStringList$member": null, + "SecurityGroupReference$GroupId": "

    The ID of your security group.

    ", + "SecurityGroupReference$ReferencingVpcId": "

    The ID of the VPC with the referencing security group.

    ", + "SecurityGroupReference$VpcPeeringConnectionId": "

    The ID of the VPC peering connection.

    ", + "SecurityGroupStringList$member": null, + "Snapshot$SnapshotId": "

    The ID of the snapshot. Each snapshot receives a unique identifier when it is created.

    ", + "Snapshot$VolumeId": "

    The ID of the volume that was used to create the snapshot.

    ", + "Snapshot$StateMessage": "

    Encrypted Amazon EBS snapshots are copied asynchronously. If a snapshot copy operation fails (for example, if the proper AWS Key Management Service (AWS KMS) permissions are not obtained) this field displays error state details to help you diagnose why the error occurred. This parameter is only returned by the DescribeSnapshots API operation.

    ", + "Snapshot$Progress": "

    The progress of the snapshot, as a percentage.

    ", + "Snapshot$OwnerId": "

    The AWS account ID of the EBS snapshot owner.

    ", + "Snapshot$Description": "

    The description for the snapshot.

    ", + "Snapshot$OwnerAlias": "

    The AWS account alias (for example, amazon, self) or AWS account ID that owns the snapshot.

    ", + "Snapshot$KmsKeyId": "

    The full ARN of the AWS Key Management Service (AWS KMS) customer master key (CMK) that was used to protect the volume encryption key for the parent volume.

    ", + "Snapshot$DataEncryptionKeyId": "

    The data encryption key identifier for the snapshot. This value is a unique identifier that corresponds to the data encryption key that was used to encrypt the original volume or snapshot copy. Because data encryption keys are inherited by volumes created from snapshots, and vice versa, if snapshots share the same data encryption key identifier, then they belong to the same volume/snapshot lineage. This parameter is only returned by the DescribeSnapshots API operation.

    ", + "SnapshotDetail$Description": "

    A description for the snapshot.

    ", + "SnapshotDetail$Format": "

    The format of the disk image from which the snapshot is created.

    ", + "SnapshotDetail$Url": "

    The URL used to access the disk image.

    ", + "SnapshotDetail$DeviceName": "

    The block device mapping for the snapshot.

    ", + "SnapshotDetail$SnapshotId": "

    The snapshot ID of the disk being imported.

    ", + "SnapshotDetail$Progress": "

    The percentage of progress for the task.

    ", + "SnapshotDetail$StatusMessage": "

    A detailed status message for the snapshot creation.

    ", + "SnapshotDetail$Status": "

    A brief status of the snapshot creation.

    ", + "SnapshotDiskContainer$Description": "

    The description of the disk image being imported.

    ", + "SnapshotDiskContainer$Format": "

    The format of the disk image being imported.

    Valid values: RAW | VHD | VMDK | OVA

    ", + "SnapshotDiskContainer$Url": "

    The URL to the Amazon S3-based disk image being imported. It can either be a https URL (https://..) or an Amazon S3 URL (s3://..).

    ", + "SnapshotIdStringList$member": null, + "SnapshotTaskDetail$Description": "

    The description of the snapshot.

    ", + "SnapshotTaskDetail$Format": "

    The format of the disk image from which the snapshot is created.

    ", + "SnapshotTaskDetail$Url": "

    The URL of the disk image from which the snapshot is created.

    ", + "SnapshotTaskDetail$SnapshotId": "

    The snapshot ID of the disk being imported.

    ", + "SnapshotTaskDetail$Progress": "

    The percentage of completion for the import snapshot task.

    ", + "SnapshotTaskDetail$StatusMessage": "

    A detailed status message for the import snapshot task.

    ", + "SnapshotTaskDetail$Status": "

    A brief status for the import snapshot task.

    ", + "SpotDatafeedSubscription$OwnerId": "

    The AWS account ID of the account.

    ", + "SpotDatafeedSubscription$Bucket": "

    The Amazon S3 bucket where the Spot instance data feed is located.

    ", + "SpotDatafeedSubscription$Prefix": "

    The prefix that is prepended to data feed files.

    ", + "SpotFleetLaunchSpecification$ImageId": "

    The ID of the AMI.

    ", + "SpotFleetLaunchSpecification$KeyName": "

    The name of the key pair.

    ", + "SpotFleetLaunchSpecification$UserData": "

    The Base64-encoded MIME user data to make available to the instances.

    ", + "SpotFleetLaunchSpecification$AddressingType": "

    Deprecated.

    ", + "SpotFleetLaunchSpecification$KernelId": "

    The ID of the kernel.

    ", + "SpotFleetLaunchSpecification$RamdiskId": "

    The ID of the RAM disk.

    ", + "SpotFleetLaunchSpecification$SubnetId": "

    The ID of the subnet in which to launch the instances. To specify multiple subnets, separate them using commas; for example, \"subnet-a61dafcf, subnet-65ea5f08\".

    ", + "SpotFleetLaunchSpecification$SpotPrice": "

    The bid price per unit hour for the specified instance type. If this value is not specified, the default is the Spot bid price specified for the fleet. To determine the bid price per unit hour, divide the Spot bid price by the value of WeightedCapacity.

    ", + "SpotFleetRequestConfig$SpotFleetRequestId": "

    The ID of the Spot fleet request.

    ", + "SpotFleetRequestConfigData$ClientToken": "

    A unique, case-sensitive identifier you provide to ensure idempotency of your listings. This helps avoid duplicate listings. For more information, see Ensuring Idempotency.

    ", + "SpotFleetRequestConfigData$SpotPrice": "

    The bid price per unit hour.

    ", + "SpotFleetRequestConfigData$IamFleetRole": "

    Grants the Spot fleet permission to terminate Spot instances on your behalf when you cancel its Spot fleet request using CancelSpotFleetRequests or when the Spot fleet request expires, if you set terminateInstancesWithExpiration.

    ", + "SpotInstanceRequest$SpotInstanceRequestId": "

    The ID of the Spot instance request.

    ", + "SpotInstanceRequest$SpotPrice": "

    The maximum hourly price (bid) for the Spot instance launched to fulfill the request.

    ", + "SpotInstanceRequest$LaunchGroup": "

    The instance launch group. Launch groups are Spot instances that launch together and terminate together.

    ", + "SpotInstanceRequest$AvailabilityZoneGroup": "

    The Availability Zone group. If you specify the same Availability Zone group for all Spot instance requests, all Spot instances are launched in the same Availability Zone.

    ", + "SpotInstanceRequest$InstanceId": "

    The instance ID, if an instance has been launched to fulfill the Spot instance request.

    ", + "SpotInstanceRequest$ActualBlockHourlyPrice": "

    If you specified a duration and your Spot instance request was fulfilled, this is the fixed hourly price in effect for the Spot instance while it runs.

    ", + "SpotInstanceRequest$LaunchedAvailabilityZone": "

    The Availability Zone in which the bid is launched.

    ", + "SpotInstanceRequestIdList$member": null, + "SpotInstanceStateFault$Code": "

    The reason code for the Spot instance state change.

    ", + "SpotInstanceStateFault$Message": "

    The message for the Spot instance state change.

    ", + "SpotInstanceStatus$Code": "

    The status code. For a list of status codes, see Spot Bid Status Codes in the Amazon Elastic Compute Cloud User Guide.

    ", + "SpotInstanceStatus$Message": "

    The description for the status code.

    ", + "SpotPlacement$AvailabilityZone": "

    The Availability Zone.

    [Spot fleet only] To specify multiple Availability Zones, separate them using commas; for example, \"us-west-2a, us-west-2b\".

    ", + "SpotPlacement$GroupName": "

    The name of the placement group (for cluster instances).

    ", + "SpotPrice$SpotPrice": "

    The maximum price (bid) that you are willing to pay for a Spot instance.

    ", + "SpotPrice$AvailabilityZone": "

    The Availability Zone.

    ", + "StaleIpPermission$IpProtocol": "

    The IP protocol name (for tcp, udp, and icmp) or number (see Protocol Numbers).

    ", + "StaleSecurityGroup$GroupId": "

    The ID of the security group.

    ", + "StaleSecurityGroup$GroupName": "

    The name of the security group.

    ", + "StaleSecurityGroup$Description": "

    The description of the security group.

    ", + "StaleSecurityGroup$VpcId": "

    The ID of the VPC for the security group.

    ", + "StartInstancesRequest$AdditionalInfo": "

    Reserved.

    ", + "StateReason$Code": "

    The reason code for the state change.

    ", + "StateReason$Message": "

    The message for the state change.

    • Server.SpotInstanceTermination: A Spot instance was terminated due to an increase in the market price.

    • Server.InternalError: An internal error occurred during instance launch, resulting in termination.

    • Server.InsufficientInstanceCapacity: There was insufficient instance capacity to satisfy the launch request.

    • Client.InternalError: A client error caused the instance to terminate on launch.

    • Client.InstanceInitiatedShutdown: The instance was shut down using the shutdown -h command from the instance.

    • Client.UserInitiatedShutdown: The instance was shut down using the Amazon EC2 API.

    • Client.VolumeLimitExceeded: The limit on the number of EBS volumes or total storage was exceeded. Decrease usage or request an increase in your limits.

    • Client.InvalidSnapshot.NotFound: The specified snapshot was not found.

    ", + "Subnet$SubnetId": "

    The ID of the subnet.

    ", + "Subnet$VpcId": "

    The ID of the VPC the subnet is in.

    ", + "Subnet$CidrBlock": "

    The CIDR block assigned to the subnet.

    ", + "Subnet$AvailabilityZone": "

    The Availability Zone of the subnet.

    ", + "SubnetIdStringList$member": null, + "Tag$Key": "

    The key of the tag.

    Constraints: Tag keys are case-sensitive and accept a maximum of 127 Unicode characters. May not begin with aws:

    ", + "Tag$Value": "

    The value of the tag.

    Constraints: Tag values are case-sensitive and accept a maximum of 255 Unicode characters.

    ", + "TagDescription$ResourceId": "

    The ID of the resource. For example, ami-1a2b3c4d.

    ", + "TagDescription$Key": "

    The tag key.

    ", + "TagDescription$Value": "

    The tag value.

    ", + "UnassignPrivateIpAddressesRequest$NetworkInterfaceId": "

    The ID of the network interface.

    ", + "UnsuccessfulItem$ResourceId": "

    The ID of the resource.

    ", + "UnsuccessfulItemError$Code": "

    The error code.

    ", + "UnsuccessfulItemError$Message": "

    The error message accompanying the error code.

    ", + "UserBucket$S3Bucket": "

    The name of the S3 bucket where the disk image is located.

    ", + "UserBucket$S3Key": "

    The file name of the disk image.

    ", + "UserBucketDetails$S3Bucket": "

    The S3 bucket from which the disk image was created.

    ", + "UserBucketDetails$S3Key": "

    The file name of the disk image.

    ", + "UserData$Data": "

    The Base64-encoded MIME user data for the instance.

    ", + "UserGroupStringList$member": null, + "UserIdGroupPair$UserId": "

    The ID of an AWS account. For a referenced security group in another VPC, the account ID of the referenced security group is returned.

    [EC2-Classic] Required when adding or removing rules that reference a security group in another AWS account.

    ", + "UserIdGroupPair$GroupName": "

    The name of the security group. In a request, use this parameter for a security group in EC2-Classic or a default VPC only. For a security group in a nondefault VPC, use the security group ID.

    ", + "UserIdGroupPair$GroupId": "

    The ID of the security group.

    ", + "UserIdGroupPair$VpcId": "

    The ID of the VPC for the referenced security group, if applicable.

    ", + "UserIdGroupPair$VpcPeeringConnectionId": "

    The ID of the VPC peering connection, if applicable.

    ", + "UserIdGroupPair$PeeringStatus": "

    The status of a VPC peering connection, if applicable.

    ", + "UserIdStringList$member": null, + "ValueStringList$member": null, + "VgwTelemetry$OutsideIpAddress": "

    The Internet-routable IP address of the virtual private gateway's outside interface.

    ", + "VgwTelemetry$StatusMessage": "

    If an error occurs, a description of the error.

    ", + "Volume$VolumeId": "

    The ID of the volume.

    ", + "Volume$SnapshotId": "

    The snapshot from which the volume was created, if applicable.

    ", + "Volume$AvailabilityZone": "

    The Availability Zone for the volume.

    ", + "Volume$KmsKeyId": "

    The full ARN of the AWS Key Management Service (AWS KMS) customer master key (CMK) that was used to protect the volume encryption key for the volume.

    ", + "VolumeAttachment$VolumeId": "

    The ID of the volume.

    ", + "VolumeAttachment$InstanceId": "

    The ID of the instance.

    ", + "VolumeAttachment$Device": "

    The device name.

    ", + "VolumeIdStringList$member": null, + "VolumeStatusAction$Code": "

    The code identifying the operation, for example, enable-volume-io.

    ", + "VolumeStatusAction$Description": "

    A description of the operation.

    ", + "VolumeStatusAction$EventType": "

    The event type associated with this operation.

    ", + "VolumeStatusAction$EventId": "

    The ID of the event associated with this operation.

    ", + "VolumeStatusDetails$Status": "

    The intended status of the volume status.

    ", + "VolumeStatusEvent$EventType": "

    The type of this event.

    ", + "VolumeStatusEvent$Description": "

    A description of the event.

    ", + "VolumeStatusEvent$EventId": "

    The ID of this event.

    ", + "VolumeStatusItem$VolumeId": "

    The volume ID.

    ", + "VolumeStatusItem$AvailabilityZone": "

    The Availability Zone of the volume.

    ", + "Vpc$VpcId": "

    The ID of the VPC.

    ", + "Vpc$CidrBlock": "

    The CIDR block for the VPC.

    ", + "Vpc$DhcpOptionsId": "

    The ID of the set of DHCP options you've associated with the VPC (or default if the default options are associated with the VPC).

    ", + "VpcAttachment$VpcId": "

    The ID of the VPC.

    ", + "VpcClassicLink$VpcId": "

    The ID of the VPC.

    ", + "VpcClassicLinkIdList$member": null, + "VpcEndpoint$VpcEndpointId": "

    The ID of the VPC endpoint.

    ", + "VpcEndpoint$VpcId": "

    The ID of the VPC to which the endpoint is associated.

    ", + "VpcEndpoint$ServiceName": "

    The name of the AWS service to which the endpoint is associated.

    ", + "VpcEndpoint$PolicyDocument": "

    The policy document associated with the endpoint.

    ", + "VpcIdStringList$member": null, + "VpcPeeringConnection$VpcPeeringConnectionId": "

    The ID of the VPC peering connection.

    ", + "VpcPeeringConnectionStateReason$Message": "

    A message that provides more information about the status, if applicable.

    ", + "VpcPeeringConnectionVpcInfo$CidrBlock": "

    The CIDR block for the VPC.

    ", + "VpcPeeringConnectionVpcInfo$OwnerId": "

    The AWS account ID of the VPC owner.

    ", + "VpcPeeringConnectionVpcInfo$VpcId": "

    The ID of the VPC.

    ", + "VpnConnection$VpnConnectionId": "

    The ID of the VPN connection.

    ", + "VpnConnection$CustomerGatewayConfiguration": "

    The configuration information for the VPN connection's customer gateway (in the native XML format). This element is always present in the CreateVpnConnection response; however, it's present in the DescribeVpnConnections response only if the VPN connection is in the pending or available state.

    ", + "VpnConnection$CustomerGatewayId": "

    The ID of the customer gateway at your end of the VPN connection.

    ", + "VpnConnection$VpnGatewayId": "

    The ID of the virtual private gateway at the AWS side of the VPN connection.

    ", + "VpnConnectionIdStringList$member": null, + "VpnGateway$VpnGatewayId": "

    The ID of the virtual private gateway.

    ", + "VpnGateway$AvailabilityZone": "

    The Availability Zone where the virtual private gateway was created, if applicable. This field may be empty or not returned.

    ", + "VpnGatewayIdStringList$member": null, + "VpnStaticRoute$DestinationCidrBlock": "

    The CIDR block associated with the local subnet of the customer data center.

    ", + "ZoneNameStringList$member": null + } + }, + "Subnet": { + "base": "

    Describes a subnet.

    ", + "refs": { + "CreateSubnetResult$Subnet": "

    Information about the subnet.

    ", + "SubnetList$member": null + } + }, + "SubnetIdStringList": { + "base": null, + "refs": { + "DescribeSubnetsRequest$SubnetIds": "

    One or more subnet IDs.

    Default: Describes all your subnets.

    " + } + }, + "SubnetList": { + "base": null, + "refs": { + "DescribeSubnetsResult$Subnets": "

    Information about one or more subnets.

    " + } + }, + "SubnetState": { + "base": null, + "refs": { + "Subnet$State": "

    The current state of the subnet.

    " + } + }, + "SummaryStatus": { + "base": null, + "refs": { + "InstanceStatusSummary$Status": "

    The status.

    " + } + }, + "Tag": { + "base": "

    Describes a tag.

    ", + "refs": { + "TagList$member": null + } + }, + "TagDescription": { + "base": "

    Describes a tag.

    ", + "refs": { + "TagDescriptionList$member": null + } + }, + "TagDescriptionList": { + "base": null, + "refs": { + "DescribeTagsResult$Tags": "

    A list of tags.

    " + } + }, + "TagList": { + "base": null, + "refs": { + "ClassicLinkInstance$Tags": "

    Any tags assigned to the instance.

    ", + "ConversionTask$Tags": "

    Any tags assigned to the task.

    ", + "CreateTagsRequest$Tags": "

    One or more tags. The value parameter is required, but if you don't want the tag to have a value, specify the parameter with no value, and we set the value to an empty string.

    ", + "CustomerGateway$Tags": "

    Any tags assigned to the customer gateway.

    ", + "DeleteTagsRequest$Tags": "

    One or more tags to delete. If you omit the value parameter, we delete the tag regardless of its value. If you specify this parameter with an empty string as the value, we delete the key only if its value is an empty string.

    ", + "DhcpOptions$Tags": "

    Any tags assigned to the DHCP options set.

    ", + "Image$Tags": "

    Any tags assigned to the image.

    ", + "Instance$Tags": "

    Any tags assigned to the instance.

    ", + "InternetGateway$Tags": "

    Any tags assigned to the Internet gateway.

    ", + "NetworkAcl$Tags": "

    Any tags assigned to the network ACL.

    ", + "NetworkInterface$TagSet": "

    Any tags assigned to the network interface.

    ", + "ReservedInstances$Tags": "

    Any tags assigned to the resource.

    ", + "ReservedInstancesListing$Tags": "

    Any tags assigned to the resource.

    ", + "RouteTable$Tags": "

    Any tags assigned to the route table.

    ", + "SecurityGroup$Tags": "

    Any tags assigned to the security group.

    ", + "Snapshot$Tags": "

    Any tags assigned to the snapshot.

    ", + "SpotInstanceRequest$Tags": "

    Any tags assigned to the resource.

    ", + "Subnet$Tags": "

    Any tags assigned to the subnet.

    ", + "Volume$Tags": "

    Any tags assigned to the volume.

    ", + "Vpc$Tags": "

    Any tags assigned to the VPC.

    ", + "VpcClassicLink$Tags": "

    Any tags assigned to the VPC.

    ", + "VpcPeeringConnection$Tags": "

    Any tags assigned to the resource.

    ", + "VpnConnection$Tags": "

    Any tags assigned to the VPN connection.

    ", + "VpnGateway$Tags": "

    Any tags assigned to the virtual private gateway.

    " + } + }, + "TelemetryStatus": { + "base": null, + "refs": { + "VgwTelemetry$Status": "

    The status of the VPN tunnel.

    " + } + }, + "Tenancy": { + "base": null, + "refs": { + "CreateVpcRequest$InstanceTenancy": "

    The tenancy options for instances launched into the VPC. For default, instances are launched with shared tenancy by default. You can launch instances with any tenancy into a shared tenancy VPC. For dedicated, instances are launched as dedicated tenancy instances by default. You can only launch instances with a tenancy of dedicated or host into a dedicated tenancy VPC.

    Important: The host value cannot be used with this parameter. Use the default or dedicated values only.

    Default: default

    ", + "DescribeReservedInstancesOfferingsRequest$InstanceTenancy": "

    The tenancy of the instances covered by the reservation. A Reserved Instance with a tenancy of dedicated is applied to instances that run in a VPC on single-tenant hardware (i.e., Dedicated Instances).

    Default: default

    ", + "Placement$Tenancy": "

    The tenancy of the instance (if the instance is running in a VPC). An instance with a tenancy of dedicated runs on single-tenant hardware. The host tenancy is not supported for the ImportInstance command.

    ", + "ReservedInstances$InstanceTenancy": "

    The tenancy of the instance.

    ", + "ReservedInstancesOffering$InstanceTenancy": "

    The tenancy of the instance.

    ", + "Vpc$InstanceTenancy": "

    The allowed tenancy of instances launched into the VPC.

    " + } + }, + "TerminateInstancesRequest": { + "base": "

    Contains the parameters for TerminateInstances.

    ", + "refs": { + } + }, + "TerminateInstancesResult": { + "base": "

    Contains the output of TerminateInstances.

    ", + "refs": { + } + }, + "TrafficType": { + "base": null, + "refs": { + "CreateFlowLogsRequest$TrafficType": "

    The type of traffic to log.

    ", + "FlowLog$TrafficType": "

    The type of traffic captured for the flow log.

    " + } + }, + "UnassignPrivateIpAddressesRequest": { + "base": "

    Contains the parameters for UnassignPrivateIpAddresses.

    ", + "refs": { + } + }, + "UnmonitorInstancesRequest": { + "base": "

    Contains the parameters for UnmonitorInstances.

    ", + "refs": { + } + }, + "UnmonitorInstancesResult": { + "base": "

    Contains the output of UnmonitorInstances.

    ", + "refs": { + } + }, + "UnsuccessfulItem": { + "base": "

    Information about items that were not successfully processed in a batch call.

    ", + "refs": { + "UnsuccessfulItemList$member": null, + "UnsuccessfulItemSet$member": null + } + }, + "UnsuccessfulItemError": { + "base": "

    Information about the error that occurred. For more information about errors, see Error Codes.

    ", + "refs": { + "UnsuccessfulItem$Error": "

    Information about the error.

    " + } + }, + "UnsuccessfulItemList": { + "base": null, + "refs": { + "ModifyHostsResult$Unsuccessful": "

    The IDs of the Dedicated hosts that could not be modified. Check whether the setting you requested can be used.

    ", + "ReleaseHostsResult$Unsuccessful": "

    The IDs of the Dedicated hosts that could not be released, including an error message.

    " + } + }, + "UnsuccessfulItemSet": { + "base": null, + "refs": { + "CreateFlowLogsResult$Unsuccessful": "

    Information about the flow logs that could not be created successfully.

    ", + "DeleteFlowLogsResult$Unsuccessful": "

    Information about the flow logs that could not be deleted successfully.

    ", + "DeleteVpcEndpointsResult$Unsuccessful": "

    Information about the endpoints that were not successfully deleted.

    " + } + }, + "UserBucket": { + "base": "

    Describes the S3 bucket for the disk image.

    ", + "refs": { + "ImageDiskContainer$UserBucket": "

    The S3 bucket for the disk image.

    ", + "SnapshotDiskContainer$UserBucket": "

    The S3 bucket for the disk image.

    " + } + }, + "UserBucketDetails": { + "base": "

    Describes the S3 bucket for the disk image.

    ", + "refs": { + "SnapshotDetail$UserBucket": "

    The S3 bucket for the disk image.

    ", + "SnapshotTaskDetail$UserBucket": "

    The S3 bucket for the disk image.

    " + } + }, + "UserData": { + "base": "

    Describes the user data to be made available to an instance.

    ", + "refs": { + "ImportInstanceLaunchSpecification$UserData": "

    The Base64-encoded MIME user data to be made available to the instance.

    " + } + }, + "UserGroupStringList": { + "base": null, + "refs": { + "ModifyImageAttributeRequest$UserGroups": "

    One or more user groups. This is only valid when modifying the launchPermission attribute.

    " + } + }, + "UserIdGroupPair": { + "base": "

    Describes a security group and AWS account ID pair.

    ", + "refs": { + "UserIdGroupPairList$member": null, + "UserIdGroupPairSet$member": null + } + }, + "UserIdGroupPairList": { + "base": null, + "refs": { + "IpPermission$UserIdGroupPairs": "

    One or more security group and AWS account ID pairs.

    " + } + }, + "UserIdGroupPairSet": { + "base": null, + "refs": { + "StaleIpPermission$UserIdGroupPairs": "

    One or more security group pairs. Returns the ID of the referenced security group and VPC, and the ID and status of the VPC peering connection.

    " + } + }, + "UserIdStringList": { + "base": null, + "refs": { + "ModifyImageAttributeRequest$UserIds": "

    One or more AWS account IDs. This is only valid when modifying the launchPermission attribute.

    ", + "ModifySnapshotAttributeRequest$UserIds": "

    The account ID to modify for the snapshot.

    " + } + }, + "ValueStringList": { + "base": null, + "refs": { + "CancelSpotFleetRequestsRequest$SpotFleetRequestIds": "

    The IDs of the Spot fleet requests.

    ", + "CreateFlowLogsRequest$ResourceIds": "

    One or more subnet, network interface, or VPC IDs.

    Constraints: Maximum of 1000 resources

    ", + "CreateFlowLogsResult$FlowLogIds": "

    The IDs of the flow logs.

    ", + "CreateVpcEndpointRequest$RouteTableIds": "

    One or more route table IDs.

    ", + "DeleteFlowLogsRequest$FlowLogIds": "

    One or more flow log IDs.

    ", + "DeleteVpcEndpointsRequest$VpcEndpointIds": "

    One or more endpoint IDs.

    ", + "DescribeFlowLogsRequest$FlowLogIds": "

    One or more flow log IDs.

    ", + "DescribeInternetGatewaysRequest$InternetGatewayIds": "

    One or more Internet gateway IDs.

    Default: Describes all your Internet gateways.

    ", + "DescribeMovingAddressesRequest$PublicIps": "

    One or more Elastic IP addresses.

    ", + "DescribeNatGatewaysRequest$NatGatewayIds": "

    One or more NAT gateway IDs.

    ", + "DescribeNetworkAclsRequest$NetworkAclIds": "

    One or more network ACL IDs.

    Default: Describes all your network ACLs.

    ", + "DescribePrefixListsRequest$PrefixListIds": "

    One or more prefix list IDs.

    ", + "DescribeRouteTablesRequest$RouteTableIds": "

    One or more route table IDs.

    Default: Describes all your route tables.

    ", + "DescribeSpotFleetRequestsRequest$SpotFleetRequestIds": "

    The IDs of the Spot fleet requests.

    ", + "DescribeVpcEndpointServicesResult$ServiceNames": "

    A list of supported AWS services.

    ", + "DescribeVpcEndpointsRequest$VpcEndpointIds": "

    One or more endpoint IDs.

    ", + "DescribeVpcPeeringConnectionsRequest$VpcPeeringConnectionIds": "

    One or more VPC peering connection IDs.

    Default: Describes all your VPC peering connections.

    ", + "Filter$Values": "

    One or more filter values. Filter values are case-sensitive.

    ", + "ModifyVpcEndpointRequest$AddRouteTableIds": "

    One or more route tables IDs to associate with the endpoint.

    ", + "ModifyVpcEndpointRequest$RemoveRouteTableIds": "

    One or more route table IDs to disassociate from the endpoint.

    ", + "NewDhcpConfiguration$Values": null, + "PrefixList$Cidrs": "

    The IP address range of the AWS service.

    ", + "RequestSpotLaunchSpecification$SecurityGroups": null, + "RequestSpotLaunchSpecification$SecurityGroupIds": null, + "VpcEndpoint$RouteTableIds": "

    One or more route tables associated with the endpoint.

    " + } + }, + "VgwTelemetry": { + "base": "

    Describes telemetry for a VPN tunnel.

    ", + "refs": { + "VgwTelemetryList$member": null + } + }, + "VgwTelemetryList": { + "base": null, + "refs": { + "VpnConnection$VgwTelemetry": "

    Information about the VPN tunnel.

    " + } + }, + "VirtualizationType": { + "base": null, + "refs": { + "Image$VirtualizationType": "

    The type of virtualization of the AMI.

    ", + "Instance$VirtualizationType": "

    The virtualization type of the instance.

    " + } + }, + "Volume": { + "base": "

    Describes a volume.

    ", + "refs": { + "VolumeList$member": null + } + }, + "VolumeAttachment": { + "base": "

    Describes volume attachment details.

    ", + "refs": { + "VolumeAttachmentList$member": null + } + }, + "VolumeAttachmentList": { + "base": null, + "refs": { + "Volume$Attachments": "

    Information about the volume attachments.

    " + } + }, + "VolumeAttachmentState": { + "base": null, + "refs": { + "VolumeAttachment$State": "

    The attachment state of the volume.

    " + } + }, + "VolumeAttributeName": { + "base": null, + "refs": { + "DescribeVolumeAttributeRequest$Attribute": "

    The instance attribute.

    " + } + }, + "VolumeDetail": { + "base": "

    Describes an EBS volume.

    ", + "refs": { + "DiskImage$Volume": "

    Information about the volume.

    ", + "ImportVolumeRequest$Volume": "

    The volume size.

    " + } + }, + "VolumeIdStringList": { + "base": null, + "refs": { + "DescribeVolumeStatusRequest$VolumeIds": "

    One or more volume IDs.

    Default: Describes all your volumes.

    ", + "DescribeVolumesRequest$VolumeIds": "

    One or more volume IDs.

    " + } + }, + "VolumeList": { + "base": null, + "refs": { + "DescribeVolumesResult$Volumes": "

    Information about the volumes.

    " + } + }, + "VolumeState": { + "base": null, + "refs": { + "Volume$State": "

    The volume state.

    " + } + }, + "VolumeStatusAction": { + "base": "

    Describes a volume status operation code.

    ", + "refs": { + "VolumeStatusActionsList$member": null + } + }, + "VolumeStatusActionsList": { + "base": null, + "refs": { + "VolumeStatusItem$Actions": "

    The details of the operation.

    " + } + }, + "VolumeStatusDetails": { + "base": "

    Describes a volume status.

    ", + "refs": { + "VolumeStatusDetailsList$member": null + } + }, + "VolumeStatusDetailsList": { + "base": null, + "refs": { + "VolumeStatusInfo$Details": "

    The details of the volume status.

    " + } + }, + "VolumeStatusEvent": { + "base": "

    Describes a volume status event.

    ", + "refs": { + "VolumeStatusEventsList$member": null + } + }, + "VolumeStatusEventsList": { + "base": null, + "refs": { + "VolumeStatusItem$Events": "

    A list of events associated with the volume.

    " + } + }, + "VolumeStatusInfo": { + "base": "

    Describes the status of a volume.

    ", + "refs": { + "VolumeStatusItem$VolumeStatus": "

    The volume status.

    " + } + }, + "VolumeStatusInfoStatus": { + "base": null, + "refs": { + "VolumeStatusInfo$Status": "

    The status of the volume.

    " + } + }, + "VolumeStatusItem": { + "base": "

    Describes the volume status.

    ", + "refs": { + "VolumeStatusList$member": null + } + }, + "VolumeStatusList": { + "base": null, + "refs": { + "DescribeVolumeStatusResult$VolumeStatuses": "

    A list of volumes.

    " + } + }, + "VolumeStatusName": { + "base": null, + "refs": { + "VolumeStatusDetails$Name": "

    The name of the volume status.

    " + } + }, + "VolumeType": { + "base": null, + "refs": { + "CreateVolumeRequest$VolumeType": "

    The volume type. This can be gp2 for General Purpose SSD, io1 for Provisioned IOPS SSD, st1 for Throughput Optimized HDD, sc1 for Cold HDD, or standard for Magnetic volumes.

    Default: standard

    ", + "EbsBlockDevice$VolumeType": "

    The volume type: gp2, io1, st1, sc1, or standard.

    Default: standard

    ", + "Volume$VolumeType": "

    The volume type. This can be gp2 for General Purpose SSD, io1 for Provisioned IOPS SSD, st1 for Throughput Optimized HDD, sc1 for Cold HDD, or standard for Magnetic volumes.

    " + } + }, + "Vpc": { + "base": "

    Describes a VPC.

    ", + "refs": { + "CreateVpcResult$Vpc": "

    Information about the VPC.

    ", + "VpcList$member": null + } + }, + "VpcAttachment": { + "base": "

    Describes an attachment between a virtual private gateway and a VPC.

    ", + "refs": { + "AttachVpnGatewayResult$VpcAttachment": "

    Information about the attachment.

    ", + "VpcAttachmentList$member": null + } + }, + "VpcAttachmentList": { + "base": null, + "refs": { + "VpnGateway$VpcAttachments": "

    Any VPCs attached to the virtual private gateway.

    " + } + }, + "VpcAttributeName": { + "base": null, + "refs": { + "DescribeVpcAttributeRequest$Attribute": "

    The VPC attribute.

    " + } + }, + "VpcClassicLink": { + "base": "

    Describes whether a VPC is enabled for ClassicLink.

    ", + "refs": { + "VpcClassicLinkList$member": null + } + }, + "VpcClassicLinkIdList": { + "base": null, + "refs": { + "DescribeVpcClassicLinkDnsSupportRequest$VpcIds": "

    One or more VPC IDs.

    ", + "DescribeVpcClassicLinkRequest$VpcIds": "

    One or more VPCs for which you want to describe the ClassicLink status.

    " + } + }, + "VpcClassicLinkList": { + "base": null, + "refs": { + "DescribeVpcClassicLinkResult$Vpcs": "

    The ClassicLink status of one or more VPCs.

    " + } + }, + "VpcEndpoint": { + "base": "

    Describes a VPC endpoint.

    ", + "refs": { + "CreateVpcEndpointResult$VpcEndpoint": "

    Information about the endpoint.

    ", + "VpcEndpointSet$member": null + } + }, + "VpcEndpointSet": { + "base": null, + "refs": { + "DescribeVpcEndpointsResult$VpcEndpoints": "

    Information about the endpoints.

    " + } + }, + "VpcIdStringList": { + "base": null, + "refs": { + "DescribeVpcsRequest$VpcIds": "

    One or more VPC IDs.

    Default: Describes all your VPCs.

    " + } + }, + "VpcList": { + "base": null, + "refs": { + "DescribeVpcsResult$Vpcs": "

    Information about one or more VPCs.

    " + } + }, + "VpcPeeringConnection": { + "base": "

    Describes a VPC peering connection.

    ", + "refs": { + "AcceptVpcPeeringConnectionResult$VpcPeeringConnection": "

    Information about the VPC peering connection.

    ", + "CreateVpcPeeringConnectionResult$VpcPeeringConnection": "

    Information about the VPC peering connection.

    ", + "VpcPeeringConnectionList$member": null + } + }, + "VpcPeeringConnectionList": { + "base": null, + "refs": { + "DescribeVpcPeeringConnectionsResult$VpcPeeringConnections": "

    Information about the VPC peering connections.

    " + } + }, + "VpcPeeringConnectionOptionsDescription": { + "base": "

    Describes the VPC peering connection options.

    ", + "refs": { + "VpcPeeringConnectionVpcInfo$PeeringOptions": "

    Information about the VPC peering connection options for the accepter or requester VPC.

    " + } + }, + "VpcPeeringConnectionStateReason": { + "base": "

    Describes the status of a VPC peering connection.

    ", + "refs": { + "VpcPeeringConnection$Status": "

    The status of the VPC peering connection.

    " + } + }, + "VpcPeeringConnectionStateReasonCode": { + "base": null, + "refs": { + "VpcPeeringConnectionStateReason$Code": "

    The status of the VPC peering connection.

    " + } + }, + "VpcPeeringConnectionVpcInfo": { + "base": "

    Describes a VPC in a VPC peering connection.

    ", + "refs": { + "VpcPeeringConnection$AccepterVpcInfo": "

    Information about the accepter VPC. CIDR block information is not returned when creating a VPC peering connection, or when describing a VPC peering connection that's in the initiating-request or pending-acceptance state.

    ", + "VpcPeeringConnection$RequesterVpcInfo": "

    Information about the requester VPC.

    " + } + }, + "VpcState": { + "base": null, + "refs": { + "Vpc$State": "

    The current state of the VPC.

    " + } + }, + "VpnConnection": { + "base": "

    Describes a VPN connection.

    ", + "refs": { + "CreateVpnConnectionResult$VpnConnection": "

    Information about the VPN connection.

    ", + "VpnConnectionList$member": null + } + }, + "VpnConnectionIdStringList": { + "base": null, + "refs": { + "DescribeVpnConnectionsRequest$VpnConnectionIds": "

    One or more VPN connection IDs.

    Default: Describes your VPN connections.

    " + } + }, + "VpnConnectionList": { + "base": null, + "refs": { + "DescribeVpnConnectionsResult$VpnConnections": "

    Information about one or more VPN connections.

    " + } + }, + "VpnConnectionOptions": { + "base": "

    Describes VPN connection options.

    ", + "refs": { + "VpnConnection$Options": "

    The VPN connection options.

    " + } + }, + "VpnConnectionOptionsSpecification": { + "base": "

    Describes VPN connection options.

    ", + "refs": { + "CreateVpnConnectionRequest$Options": "

    Indicates whether the VPN connection requires static routes. If you are creating a VPN connection for a device that does not support BGP, you must specify true.

    Default: false

    " + } + }, + "VpnGateway": { + "base": "

    Describes a virtual private gateway.

    ", + "refs": { + "CreateVpnGatewayResult$VpnGateway": "

    Information about the virtual private gateway.

    ", + "VpnGatewayList$member": null + } + }, + "VpnGatewayIdStringList": { + "base": null, + "refs": { + "DescribeVpnGatewaysRequest$VpnGatewayIds": "

    One or more virtual private gateway IDs.

    Default: Describes all your virtual private gateways.

    " + } + }, + "VpnGatewayList": { + "base": null, + "refs": { + "DescribeVpnGatewaysResult$VpnGateways": "

    Information about one or more virtual private gateways.

    " + } + }, + "VpnState": { + "base": null, + "refs": { + "VpnConnection$State": "

    The current state of the VPN connection.

    ", + "VpnGateway$State": "

    The current state of the virtual private gateway.

    ", + "VpnStaticRoute$State": "

    The current state of the static route.

    " + } + }, + "VpnStaticRoute": { + "base": "

    Describes a static route for a VPN connection.

    ", + "refs": { + "VpnStaticRouteList$member": null + } + }, + "VpnStaticRouteList": { + "base": null, + "refs": { + "VpnConnection$Routes": "

    The static routes associated with the VPN connection.

    " + } + }, + "VpnStaticRouteSource": { + "base": null, + "refs": { + "VpnStaticRoute$Source": "

    Indicates how the routes were provided.

    " + } + }, + "ZoneNameStringList": { + "base": null, + "refs": { + "DescribeAvailabilityZonesRequest$ZoneNames": "

    The names of one or more Availability Zones.

    " + } + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/ec2/2015-10-01/examples-1.json b/vendor/github.com/aws/aws-sdk-go/models/apis/ec2/2015-10-01/examples-1.json new file mode 100644 index 000000000..0ea7e3b0b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/ec2/2015-10-01/examples-1.json @@ -0,0 +1,5 @@ +{ + "version": "1.0", + "examples": { + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/ec2/2015-10-01/paginators-1.json b/vendor/github.com/aws/aws-sdk-go/models/apis/ec2/2015-10-01/paginators-1.json new file mode 100644 index 000000000..9d04d89ab --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/ec2/2015-10-01/paginators-1.json @@ -0,0 +1,138 @@ +{ + "pagination": { + "DescribeAccountAttributes": { + "result_key": "AccountAttributes" + }, + "DescribeAddresses": { + "result_key": "Addresses" + }, + "DescribeAvailabilityZones": { + "result_key": "AvailabilityZones" + }, + "DescribeBundleTasks": { + "result_key": "BundleTasks" + }, + "DescribeConversionTasks": { + "result_key": "ConversionTasks" + }, + "DescribeCustomerGateways": { + "result_key": "CustomerGateways" + }, + "DescribeDhcpOptions": { + "result_key": "DhcpOptions" + }, + "DescribeExportTasks": { + "result_key": "ExportTasks" + }, + "DescribeImages": { + "result_key": "Images" + }, + "DescribeInstanceStatus": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "InstanceStatuses" + }, + "DescribeInstances": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "Reservations" + }, + "DescribeInternetGateways": { + "result_key": "InternetGateways" + }, + "DescribeKeyPairs": { + "result_key": "KeyPairs" + }, + "DescribeNetworkAcls": { + "result_key": "NetworkAcls" + }, + "DescribeNetworkInterfaces": { + "result_key": "NetworkInterfaces" + }, + "DescribePlacementGroups": { + "result_key": "PlacementGroups" + }, + "DescribeRegions": { + "result_key": "Regions" + }, + "DescribeReservedInstances": { + "result_key": "ReservedInstances" + }, + "DescribeReservedInstancesListings": { + "result_key": "ReservedInstancesListings" + }, + "DescribeReservedInstancesOfferings": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "ReservedInstancesOfferings" + }, + "DescribeReservedInstancesModifications": { + "input_token": "NextToken", + "output_token": "NextToken", + "result_key": "ReservedInstancesModifications" + }, + "DescribeRouteTables": { + "result_key": "RouteTables" + }, + "DescribeSecurityGroups": { + "result_key": "SecurityGroups" + }, + "DescribeSnapshots": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "Snapshots" + }, + "DescribeSpotInstanceRequests": { + "result_key": "SpotInstanceRequests" + }, + "DescribeSpotFleetRequests": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "SpotFleetRequestConfigs" + }, + "DescribeSpotPriceHistory": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "SpotPriceHistory" + }, + "DescribeSubnets": { + "result_key": "Subnets" + }, + "DescribeTags": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "Tags" + }, + "DescribeVolumeStatus": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "VolumeStatuses" + }, + "DescribeVolumes": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "Volumes" + }, + "DescribeVpcs": { + "result_key": "Vpcs" + }, + "DescribeVpcPeeringConnections": { + "result_key": "VpcPeeringConnections" + }, + "DescribeVpnConnections": { + "result_key": "VpnConnections" + }, + "DescribeVpnGateways": { + "result_key": "VpnGateways" + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/ec2/2015-10-01/waiters-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/ec2/2015-10-01/waiters-2.json new file mode 100644 index 000000000..ecc9f1b6f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/ec2/2015-10-01/waiters-2.json @@ -0,0 +1,593 @@ +{ + "version": 2, + "waiters": { + "InstanceExists": { + "delay": 5, + "maxAttempts": 40, + "operation": "DescribeInstances", + "acceptors": [ + { + "matcher": "path", + "expected": true, + "argument": "length(Reservations[]) > `0`", + "state": "success" + }, + { + "matcher": "error", + "expected": "InvalidInstanceID.NotFound", + "state": "retry" + } + ] + }, + "BundleTaskComplete": { + "delay": 15, + "operation": "DescribeBundleTasks", + "maxAttempts": 40, + "acceptors": [ + { + "expected": "complete", + "matcher": "pathAll", + "state": "success", + "argument": "BundleTasks[].State" + }, + { + "expected": "failed", + "matcher": "pathAny", + "state": "failure", + "argument": "BundleTasks[].State" + } + ] + }, + "ConversionTaskCancelled": { + "delay": 15, + "operation": "DescribeConversionTasks", + "maxAttempts": 40, + "acceptors": [ + { + "expected": "cancelled", + "matcher": "pathAll", + "state": "success", + "argument": "ConversionTasks[].State" + } + ] + }, + "ConversionTaskCompleted": { + "delay": 15, + "operation": "DescribeConversionTasks", + "maxAttempts": 40, + "acceptors": [ + { + "expected": "completed", + "matcher": "pathAll", + "state": "success", + "argument": "ConversionTasks[].State" + }, + { + "expected": "cancelled", + "matcher": "pathAny", + "state": "failure", + "argument": "ConversionTasks[].State" + }, + { + "expected": "cancelling", + "matcher": "pathAny", + "state": "failure", + "argument": "ConversionTasks[].State" + } + ] + }, + "ConversionTaskDeleted": { + "delay": 15, + "operation": "DescribeConversionTasks", + "maxAttempts": 40, + "acceptors": [ + { + "expected": "deleted", + "matcher": "pathAll", + "state": "success", + "argument": "ConversionTasks[].State" + } + ] + }, + "CustomerGatewayAvailable": { + "delay": 15, + "operation": "DescribeCustomerGateways", + "maxAttempts": 40, + "acceptors": [ + { + "expected": "available", + "matcher": "pathAll", + "state": "success", + "argument": "CustomerGateways[].State" + }, + { + "expected": "deleted", + "matcher": "pathAny", + "state": "failure", + "argument": "CustomerGateways[].State" + }, + { + "expected": "deleting", + "matcher": "pathAny", + "state": "failure", + "argument": "CustomerGateways[].State" + } + ] + }, + "ExportTaskCancelled": { + "delay": 15, + "operation": "DescribeExportTasks", + "maxAttempts": 40, + "acceptors": [ + { + "expected": "cancelled", + "matcher": "pathAll", + "state": "success", + "argument": "ExportTasks[].State" + } + ] + }, + "ExportTaskCompleted": { + "delay": 15, + "operation": "DescribeExportTasks", + "maxAttempts": 40, + "acceptors": [ + { + "expected": "completed", + "matcher": "pathAll", + "state": "success", + "argument": "ExportTasks[].State" + } + ] + }, + "ImageExists": { + "operation": "DescribeImages", + "maxAttempts": 40, + "delay": 15, + "acceptors": [ + { + "matcher": "path", + "expected": true, + "argument": "length(Images[]) > `0`", + "state": "success" + }, + { + "matcher": "error", + "expected": "InvalidAMIID.NotFound", + "state": "retry" + } + ] + }, + "ImageAvailable": { + "operation": "DescribeImages", + "maxAttempts": 40, + "delay": 15, + "acceptors": [ + { + "state": "success", + "matcher": "pathAll", + "argument": "Images[].State", + "expected": "available" + }, + { + "state": "failure", + "matcher": "pathAny", + "argument": "Images[].State", + "expected": "failed" + } + ] + }, + "InstanceRunning": { + "delay": 15, + "operation": "DescribeInstances", + "maxAttempts": 40, + "acceptors": [ + { + "expected": "running", + "matcher": "pathAll", + "state": "success", + "argument": "Reservations[].Instances[].State.Name" + }, + { + "expected": "shutting-down", + "matcher": "pathAny", + "state": "failure", + "argument": "Reservations[].Instances[].State.Name" + }, + { + "expected": "terminated", + "matcher": "pathAny", + "state": "failure", + "argument": "Reservations[].Instances[].State.Name" + }, + { + "expected": "stopping", + "matcher": "pathAny", + "state": "failure", + "argument": "Reservations[].Instances[].State.Name" + }, + { + "matcher": "error", + "expected": "InvalidInstanceID.NotFound", + "state": "retry" + } + ] + }, + "InstanceStatusOk": { + "operation": "DescribeInstanceStatus", + "maxAttempts": 40, + "delay": 15, + "acceptors": [ + { + "state": "success", + "matcher": "pathAll", + "argument": "InstanceStatuses[].InstanceStatus.Status", + "expected": "ok" + }, + { + "matcher": "error", + "expected": "InvalidInstanceID.NotFound", + "state": "retry" + } + ] + }, + "InstanceStopped": { + "delay": 15, + "operation": "DescribeInstances", + "maxAttempts": 40, + "acceptors": [ + { + "expected": "stopped", + "matcher": "pathAll", + "state": "success", + "argument": "Reservations[].Instances[].State.Name" + }, + { + "expected": "pending", + "matcher": "pathAny", + "state": "failure", + "argument": "Reservations[].Instances[].State.Name" + }, + { + "expected": "terminated", + "matcher": "pathAny", + "state": "failure", + "argument": "Reservations[].Instances[].State.Name" + } + ] + }, + "InstanceTerminated": { + "delay": 15, + "operation": "DescribeInstances", + "maxAttempts": 40, + "acceptors": [ + { + "expected": "terminated", + "matcher": "pathAll", + "state": "success", + "argument": "Reservations[].Instances[].State.Name" + }, + { + "expected": "pending", + "matcher": "pathAny", + "state": "failure", + "argument": "Reservations[].Instances[].State.Name" + }, + { + "expected": "stopping", + "matcher": "pathAny", + "state": "failure", + "argument": "Reservations[].Instances[].State.Name" + } + ] + }, + "KeyPairExists": { + "operation": "DescribeKeyPairs", + "delay": 5, + "maxAttempts": 6, + "acceptors": [ + { + "expected": true, + "matcher": "pathAll", + "state": "success", + "argument": "length(KeyPairs[].KeyName) > `0`" + }, + { + "expected": "InvalidKeyPair.NotFound", + "matcher": "error", + "state": "retry" + } + ] + }, + "NatGatewayAvailable": { + "operation": "DescribeNatGateways", + "delay": 15, + "maxAttempts": 40, + "acceptors": [ + { + "state": "success", + "matcher": "pathAll", + "argument": "NatGateways[].State", + "expected": "available" + }, + { + "state": "failure", + "matcher": "pathAny", + "argument": "NatGateways[].State", + "expected": "failed" + }, + { + "state": "failure", + "matcher": "pathAny", + "argument": "NatGateways[].State", + "expected": "deleting" + }, + { + "state": "failure", + "matcher": "pathAny", + "argument": "NatGateways[].State", + "expected": "deleted" + }, + { + "state": "retry", + "matcher": "error", + "expected": "NatGatewayNotFound" + } + ] + }, + "NetworkInterfaceAvailable": { + "operation": "DescribeNetworkInterfaces", + "delay": 20, + "maxAttempts": 10, + "acceptors": [ + { + "expected": "available", + "matcher": "pathAll", + "state": "success", + "argument": "NetworkInterfaces[].Status" + }, + { + "expected": "InvalidNetworkInterfaceID.NotFound", + "matcher": "error", + "state": "failure" + } + ] + }, + "PasswordDataAvailable": { + "operation": "GetPasswordData", + "maxAttempts": 40, + "delay": 15, + "acceptors": [ + { + "state": "success", + "matcher": "path", + "argument": "length(PasswordData) > `0`", + "expected": true + } + ] + }, + "SnapshotCompleted": { + "delay": 15, + "operation": "DescribeSnapshots", + "maxAttempts": 40, + "acceptors": [ + { + "expected": "completed", + "matcher": "pathAll", + "state": "success", + "argument": "Snapshots[].State" + } + ] + }, + "SpotInstanceRequestFulfilled": { + "operation": "DescribeSpotInstanceRequests", + "maxAttempts": 40, + "delay": 15, + "acceptors": [ + { + "state": "success", + "matcher": "pathAll", + "argument": "SpotInstanceRequests[].Status.Code", + "expected": "fulfilled" + }, + { + "state": "failure", + "matcher": "pathAny", + "argument": "SpotInstanceRequests[].Status.Code", + "expected": "schedule-expired" + }, + { + "state": "failure", + "matcher": "pathAny", + "argument": "SpotInstanceRequests[].Status.Code", + "expected": "canceled-before-fulfillment" + }, + { + "state": "failure", + "matcher": "pathAny", + "argument": "SpotInstanceRequests[].Status.Code", + "expected": "bad-parameters" + }, + { + "state": "failure", + "matcher": "pathAny", + "argument": "SpotInstanceRequests[].Status.Code", + "expected": "system-error" + } + ] + }, + "SubnetAvailable": { + "delay": 15, + "operation": "DescribeSubnets", + "maxAttempts": 40, + "acceptors": [ + { + "expected": "available", + "matcher": "pathAll", + "state": "success", + "argument": "Subnets[].State" + } + ] + }, + "SystemStatusOk": { + "operation": "DescribeInstanceStatus", + "maxAttempts": 40, + "delay": 15, + "acceptors": [ + { + "state": "success", + "matcher": "pathAll", + "argument": "InstanceStatuses[].SystemStatus.Status", + "expected": "ok" + } + ] + }, + "VolumeAvailable": { + "delay": 15, + "operation": "DescribeVolumes", + "maxAttempts": 40, + "acceptors": [ + { + "expected": "available", + "matcher": "pathAll", + "state": "success", + "argument": "Volumes[].State" + }, + { + "expected": "deleted", + "matcher": "pathAny", + "state": "failure", + "argument": "Volumes[].State" + } + ] + }, + "VolumeDeleted": { + "delay": 15, + "operation": "DescribeVolumes", + "maxAttempts": 40, + "acceptors": [ + { + "expected": "deleted", + "matcher": "pathAll", + "state": "success", + "argument": "Volumes[].State" + }, + { + "matcher": "error", + "expected": "InvalidVolume.NotFound", + "state": "success" + } + ] + }, + "VolumeInUse": { + "delay": 15, + "operation": "DescribeVolumes", + "maxAttempts": 40, + "acceptors": [ + { + "expected": "in-use", + "matcher": "pathAll", + "state": "success", + "argument": "Volumes[].State" + }, + { + "expected": "deleted", + "matcher": "pathAny", + "state": "failure", + "argument": "Volumes[].State" + } + ] + }, + "VpcAvailable": { + "delay": 15, + "operation": "DescribeVpcs", + "maxAttempts": 40, + "acceptors": [ + { + "expected": "available", + "matcher": "pathAll", + "state": "success", + "argument": "Vpcs[].State" + } + ] + }, + "VpcExists": { + "operation": "DescribeVpcs", + "delay": 1, + "maxAttempts": 5, + "acceptors": [ + { + "matcher": "status", + "expected": 200, + "state": "success" + }, + { + "matcher": "error", + "expected": "InvalidVpcID.NotFound", + "state": "retry" + } + ] + }, + "VpnConnectionAvailable": { + "delay": 15, + "operation": "DescribeVpnConnections", + "maxAttempts": 40, + "acceptors": [ + { + "expected": "available", + "matcher": "pathAll", + "state": "success", + "argument": "VpnConnections[].State" + }, + { + "expected": "deleting", + "matcher": "pathAny", + "state": "failure", + "argument": "VpnConnections[].State" + }, + { + "expected": "deleted", + "matcher": "pathAny", + "state": "failure", + "argument": "VpnConnections[].State" + } + ] + }, + "VpnConnectionDeleted": { + "delay": 15, + "operation": "DescribeVpnConnections", + "maxAttempts": 40, + "acceptors": [ + { + "expected": "deleted", + "matcher": "pathAll", + "state": "success", + "argument": "VpnConnections[].State" + }, + { + "expected": "pending", + "matcher": "pathAny", + "state": "failure", + "argument": "VpnConnections[].State" + } + ] + }, + "VpcPeeringConnectionExists": { + "delay": 15, + "operation": "DescribeVpcPeeringConnections", + "maxAttempts": 40, + "acceptors": [ + { + "matcher": "status", + "expected": 200, + "state": "success" + }, + { + "matcher": "error", + "expected": "InvalidVpcPeeringConnectionID.NotFound", + "state": "retry" + } + ] + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/ec2/2016-04-01/api-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/ec2/2016-04-01/api-2.json new file mode 100644 index 000000000..88269543f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/ec2/2016-04-01/api-2.json @@ -0,0 +1,13842 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2016-04-01", + "endpointPrefix":"ec2", + "protocol":"ec2", + "serviceAbbreviation":"Amazon EC2", + "serviceFullName":"Amazon Elastic Compute Cloud", + "signatureVersion":"v4", + "xmlNamespace":"http://ec2.amazonaws.com/doc/2016-04-01" + }, + "operations":{ + "AcceptVpcPeeringConnection":{ + "name":"AcceptVpcPeeringConnection", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AcceptVpcPeeringConnectionRequest"}, + "output":{"shape":"AcceptVpcPeeringConnectionResult"} + }, + "AllocateAddress":{ + "name":"AllocateAddress", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AllocateAddressRequest"}, + "output":{"shape":"AllocateAddressResult"} + }, + "AllocateHosts":{ + "name":"AllocateHosts", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AllocateHostsRequest"}, + "output":{"shape":"AllocateHostsResult"} + }, + "AssignPrivateIpAddresses":{ + "name":"AssignPrivateIpAddresses", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AssignPrivateIpAddressesRequest"} + }, + "AssociateAddress":{ + "name":"AssociateAddress", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AssociateAddressRequest"}, + "output":{"shape":"AssociateAddressResult"} + }, + "AssociateDhcpOptions":{ + "name":"AssociateDhcpOptions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AssociateDhcpOptionsRequest"} + }, + "AssociateRouteTable":{ + "name":"AssociateRouteTable", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AssociateRouteTableRequest"}, + "output":{"shape":"AssociateRouteTableResult"} + }, + "AttachClassicLinkVpc":{ + "name":"AttachClassicLinkVpc", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AttachClassicLinkVpcRequest"}, + "output":{"shape":"AttachClassicLinkVpcResult"} + }, + "AttachInternetGateway":{ + "name":"AttachInternetGateway", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AttachInternetGatewayRequest"} + }, + "AttachNetworkInterface":{ + "name":"AttachNetworkInterface", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AttachNetworkInterfaceRequest"}, + "output":{"shape":"AttachNetworkInterfaceResult"} + }, + "AttachVolume":{ + "name":"AttachVolume", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AttachVolumeRequest"}, + "output":{"shape":"VolumeAttachment"} + }, + "AttachVpnGateway":{ + "name":"AttachVpnGateway", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AttachVpnGatewayRequest"}, + "output":{"shape":"AttachVpnGatewayResult"} + }, + "AuthorizeSecurityGroupEgress":{ + "name":"AuthorizeSecurityGroupEgress", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AuthorizeSecurityGroupEgressRequest"} + }, + "AuthorizeSecurityGroupIngress":{ + "name":"AuthorizeSecurityGroupIngress", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AuthorizeSecurityGroupIngressRequest"} + }, + "BundleInstance":{ + "name":"BundleInstance", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"BundleInstanceRequest"}, + "output":{"shape":"BundleInstanceResult"} + }, + "CancelBundleTask":{ + "name":"CancelBundleTask", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CancelBundleTaskRequest"}, + "output":{"shape":"CancelBundleTaskResult"} + }, + "CancelConversionTask":{ + "name":"CancelConversionTask", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CancelConversionRequest"} + }, + "CancelExportTask":{ + "name":"CancelExportTask", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CancelExportTaskRequest"} + }, + "CancelImportTask":{ + "name":"CancelImportTask", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CancelImportTaskRequest"}, + "output":{"shape":"CancelImportTaskResult"} + }, + "CancelReservedInstancesListing":{ + "name":"CancelReservedInstancesListing", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CancelReservedInstancesListingRequest"}, + "output":{"shape":"CancelReservedInstancesListingResult"} + }, + "CancelSpotFleetRequests":{ + "name":"CancelSpotFleetRequests", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CancelSpotFleetRequestsRequest"}, + "output":{"shape":"CancelSpotFleetRequestsResponse"} + }, + "CancelSpotInstanceRequests":{ + "name":"CancelSpotInstanceRequests", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CancelSpotInstanceRequestsRequest"}, + "output":{"shape":"CancelSpotInstanceRequestsResult"} + }, + "ConfirmProductInstance":{ + "name":"ConfirmProductInstance", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ConfirmProductInstanceRequest"}, + "output":{"shape":"ConfirmProductInstanceResult"} + }, + "CopyImage":{ + "name":"CopyImage", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CopyImageRequest"}, + "output":{"shape":"CopyImageResult"} + }, + "CopySnapshot":{ + "name":"CopySnapshot", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CopySnapshotRequest"}, + "output":{"shape":"CopySnapshotResult"} + }, + "CreateCustomerGateway":{ + "name":"CreateCustomerGateway", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateCustomerGatewayRequest"}, + "output":{"shape":"CreateCustomerGatewayResult"} + }, + "CreateDhcpOptions":{ + "name":"CreateDhcpOptions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateDhcpOptionsRequest"}, + "output":{"shape":"CreateDhcpOptionsResult"} + }, + "CreateFlowLogs":{ + "name":"CreateFlowLogs", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateFlowLogsRequest"}, + "output":{"shape":"CreateFlowLogsResult"} + }, + "CreateImage":{ + "name":"CreateImage", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateImageRequest"}, + "output":{"shape":"CreateImageResult"} + }, + "CreateInstanceExportTask":{ + "name":"CreateInstanceExportTask", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateInstanceExportTaskRequest"}, + "output":{"shape":"CreateInstanceExportTaskResult"} + }, + "CreateInternetGateway":{ + "name":"CreateInternetGateway", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateInternetGatewayRequest"}, + "output":{"shape":"CreateInternetGatewayResult"} + }, + "CreateKeyPair":{ + "name":"CreateKeyPair", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateKeyPairRequest"}, + "output":{"shape":"KeyPair"} + }, + "CreateNatGateway":{ + "name":"CreateNatGateway", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateNatGatewayRequest"}, + "output":{"shape":"CreateNatGatewayResult"} + }, + "CreateNetworkAcl":{ + "name":"CreateNetworkAcl", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateNetworkAclRequest"}, + "output":{"shape":"CreateNetworkAclResult"} + }, + "CreateNetworkAclEntry":{ + "name":"CreateNetworkAclEntry", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateNetworkAclEntryRequest"} + }, + "CreateNetworkInterface":{ + "name":"CreateNetworkInterface", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateNetworkInterfaceRequest"}, + "output":{"shape":"CreateNetworkInterfaceResult"} + }, + "CreatePlacementGroup":{ + "name":"CreatePlacementGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreatePlacementGroupRequest"} + }, + "CreateReservedInstancesListing":{ + "name":"CreateReservedInstancesListing", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateReservedInstancesListingRequest"}, + "output":{"shape":"CreateReservedInstancesListingResult"} + }, + "CreateRoute":{ + "name":"CreateRoute", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateRouteRequest"}, + "output":{"shape":"CreateRouteResult"} + }, + "CreateRouteTable":{ + "name":"CreateRouteTable", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateRouteTableRequest"}, + "output":{"shape":"CreateRouteTableResult"} + }, + "CreateSecurityGroup":{ + "name":"CreateSecurityGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateSecurityGroupRequest"}, + "output":{"shape":"CreateSecurityGroupResult"} + }, + "CreateSnapshot":{ + "name":"CreateSnapshot", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateSnapshotRequest"}, + "output":{"shape":"Snapshot"} + }, + "CreateSpotDatafeedSubscription":{ + "name":"CreateSpotDatafeedSubscription", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateSpotDatafeedSubscriptionRequest"}, + "output":{"shape":"CreateSpotDatafeedSubscriptionResult"} + }, + "CreateSubnet":{ + "name":"CreateSubnet", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateSubnetRequest"}, + "output":{"shape":"CreateSubnetResult"} + }, + "CreateTags":{ + "name":"CreateTags", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateTagsRequest"} + }, + "CreateVolume":{ + "name":"CreateVolume", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateVolumeRequest"}, + "output":{"shape":"Volume"} + }, + "CreateVpc":{ + "name":"CreateVpc", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateVpcRequest"}, + "output":{"shape":"CreateVpcResult"} + }, + "CreateVpcEndpoint":{ + "name":"CreateVpcEndpoint", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateVpcEndpointRequest"}, + "output":{"shape":"CreateVpcEndpointResult"} + }, + "CreateVpcPeeringConnection":{ + "name":"CreateVpcPeeringConnection", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateVpcPeeringConnectionRequest"}, + "output":{"shape":"CreateVpcPeeringConnectionResult"} + }, + "CreateVpnConnection":{ + "name":"CreateVpnConnection", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateVpnConnectionRequest"}, + "output":{"shape":"CreateVpnConnectionResult"} + }, + "CreateVpnConnectionRoute":{ + "name":"CreateVpnConnectionRoute", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateVpnConnectionRouteRequest"} + }, + "CreateVpnGateway":{ + "name":"CreateVpnGateway", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateVpnGatewayRequest"}, + "output":{"shape":"CreateVpnGatewayResult"} + }, + "DeleteCustomerGateway":{ + "name":"DeleteCustomerGateway", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteCustomerGatewayRequest"} + }, + "DeleteDhcpOptions":{ + "name":"DeleteDhcpOptions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteDhcpOptionsRequest"} + }, + "DeleteFlowLogs":{ + "name":"DeleteFlowLogs", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteFlowLogsRequest"}, + "output":{"shape":"DeleteFlowLogsResult"} + }, + "DeleteInternetGateway":{ + "name":"DeleteInternetGateway", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteInternetGatewayRequest"} + }, + "DeleteKeyPair":{ + "name":"DeleteKeyPair", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteKeyPairRequest"} + }, + "DeleteNatGateway":{ + "name":"DeleteNatGateway", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteNatGatewayRequest"}, + "output":{"shape":"DeleteNatGatewayResult"} + }, + "DeleteNetworkAcl":{ + "name":"DeleteNetworkAcl", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteNetworkAclRequest"} + }, + "DeleteNetworkAclEntry":{ + "name":"DeleteNetworkAclEntry", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteNetworkAclEntryRequest"} + }, + "DeleteNetworkInterface":{ + "name":"DeleteNetworkInterface", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteNetworkInterfaceRequest"} + }, + "DeletePlacementGroup":{ + "name":"DeletePlacementGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeletePlacementGroupRequest"} + }, + "DeleteRoute":{ + "name":"DeleteRoute", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteRouteRequest"} + }, + "DeleteRouteTable":{ + "name":"DeleteRouteTable", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteRouteTableRequest"} + }, + "DeleteSecurityGroup":{ + "name":"DeleteSecurityGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteSecurityGroupRequest"} + }, + "DeleteSnapshot":{ + "name":"DeleteSnapshot", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteSnapshotRequest"} + }, + "DeleteSpotDatafeedSubscription":{ + "name":"DeleteSpotDatafeedSubscription", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteSpotDatafeedSubscriptionRequest"} + }, + "DeleteSubnet":{ + "name":"DeleteSubnet", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteSubnetRequest"} + }, + "DeleteTags":{ + "name":"DeleteTags", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteTagsRequest"} + }, + "DeleteVolume":{ + "name":"DeleteVolume", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteVolumeRequest"} + }, + "DeleteVpc":{ + "name":"DeleteVpc", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteVpcRequest"} + }, + "DeleteVpcEndpoints":{ + "name":"DeleteVpcEndpoints", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteVpcEndpointsRequest"}, + "output":{"shape":"DeleteVpcEndpointsResult"} + }, + "DeleteVpcPeeringConnection":{ + "name":"DeleteVpcPeeringConnection", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteVpcPeeringConnectionRequest"}, + "output":{"shape":"DeleteVpcPeeringConnectionResult"} + }, + "DeleteVpnConnection":{ + "name":"DeleteVpnConnection", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteVpnConnectionRequest"} + }, + "DeleteVpnConnectionRoute":{ + "name":"DeleteVpnConnectionRoute", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteVpnConnectionRouteRequest"} + }, + "DeleteVpnGateway":{ + "name":"DeleteVpnGateway", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteVpnGatewayRequest"} + }, + "DeregisterImage":{ + "name":"DeregisterImage", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeregisterImageRequest"} + }, + "DescribeAccountAttributes":{ + "name":"DescribeAccountAttributes", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeAccountAttributesRequest"}, + "output":{"shape":"DescribeAccountAttributesResult"} + }, + "DescribeAddresses":{ + "name":"DescribeAddresses", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeAddressesRequest"}, + "output":{"shape":"DescribeAddressesResult"} + }, + "DescribeAvailabilityZones":{ + "name":"DescribeAvailabilityZones", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeAvailabilityZonesRequest"}, + "output":{"shape":"DescribeAvailabilityZonesResult"} + }, + "DescribeBundleTasks":{ + "name":"DescribeBundleTasks", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeBundleTasksRequest"}, + "output":{"shape":"DescribeBundleTasksResult"} + }, + "DescribeClassicLinkInstances":{ + "name":"DescribeClassicLinkInstances", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeClassicLinkInstancesRequest"}, + "output":{"shape":"DescribeClassicLinkInstancesResult"} + }, + "DescribeConversionTasks":{ + "name":"DescribeConversionTasks", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeConversionTasksRequest"}, + "output":{"shape":"DescribeConversionTasksResult"} + }, + "DescribeCustomerGateways":{ + "name":"DescribeCustomerGateways", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeCustomerGatewaysRequest"}, + "output":{"shape":"DescribeCustomerGatewaysResult"} + }, + "DescribeDhcpOptions":{ + "name":"DescribeDhcpOptions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeDhcpOptionsRequest"}, + "output":{"shape":"DescribeDhcpOptionsResult"} + }, + "DescribeExportTasks":{ + "name":"DescribeExportTasks", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeExportTasksRequest"}, + "output":{"shape":"DescribeExportTasksResult"} + }, + "DescribeFlowLogs":{ + "name":"DescribeFlowLogs", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeFlowLogsRequest"}, + "output":{"shape":"DescribeFlowLogsResult"} + }, + "DescribeHosts":{ + "name":"DescribeHosts", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeHostsRequest"}, + "output":{"shape":"DescribeHostsResult"} + }, + "DescribeIdFormat":{ + "name":"DescribeIdFormat", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeIdFormatRequest"}, + "output":{"shape":"DescribeIdFormatResult"} + }, + "DescribeIdentityIdFormat":{ + "name":"DescribeIdentityIdFormat", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeIdentityIdFormatRequest"}, + "output":{"shape":"DescribeIdentityIdFormatResult"} + }, + "DescribeImageAttribute":{ + "name":"DescribeImageAttribute", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeImageAttributeRequest"}, + "output":{"shape":"ImageAttribute"} + }, + "DescribeImages":{ + "name":"DescribeImages", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeImagesRequest"}, + "output":{"shape":"DescribeImagesResult"} + }, + "DescribeImportImageTasks":{ + "name":"DescribeImportImageTasks", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeImportImageTasksRequest"}, + "output":{"shape":"DescribeImportImageTasksResult"} + }, + "DescribeImportSnapshotTasks":{ + "name":"DescribeImportSnapshotTasks", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeImportSnapshotTasksRequest"}, + "output":{"shape":"DescribeImportSnapshotTasksResult"} + }, + "DescribeInstanceAttribute":{ + "name":"DescribeInstanceAttribute", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeInstanceAttributeRequest"}, + "output":{"shape":"InstanceAttribute"} + }, + "DescribeInstanceStatus":{ + "name":"DescribeInstanceStatus", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeInstanceStatusRequest"}, + "output":{"shape":"DescribeInstanceStatusResult"} + }, + "DescribeInstances":{ + "name":"DescribeInstances", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeInstancesRequest"}, + "output":{"shape":"DescribeInstancesResult"} + }, + "DescribeInternetGateways":{ + "name":"DescribeInternetGateways", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeInternetGatewaysRequest"}, + "output":{"shape":"DescribeInternetGatewaysResult"} + }, + "DescribeKeyPairs":{ + "name":"DescribeKeyPairs", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeKeyPairsRequest"}, + "output":{"shape":"DescribeKeyPairsResult"} + }, + "DescribeMovingAddresses":{ + "name":"DescribeMovingAddresses", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeMovingAddressesRequest"}, + "output":{"shape":"DescribeMovingAddressesResult"} + }, + "DescribeNatGateways":{ + "name":"DescribeNatGateways", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeNatGatewaysRequest"}, + "output":{"shape":"DescribeNatGatewaysResult"} + }, + "DescribeNetworkAcls":{ + "name":"DescribeNetworkAcls", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeNetworkAclsRequest"}, + "output":{"shape":"DescribeNetworkAclsResult"} + }, + "DescribeNetworkInterfaceAttribute":{ + "name":"DescribeNetworkInterfaceAttribute", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeNetworkInterfaceAttributeRequest"}, + "output":{"shape":"DescribeNetworkInterfaceAttributeResult"} + }, + "DescribeNetworkInterfaces":{ + "name":"DescribeNetworkInterfaces", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeNetworkInterfacesRequest"}, + "output":{"shape":"DescribeNetworkInterfacesResult"} + }, + "DescribePlacementGroups":{ + "name":"DescribePlacementGroups", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribePlacementGroupsRequest"}, + "output":{"shape":"DescribePlacementGroupsResult"} + }, + "DescribePrefixLists":{ + "name":"DescribePrefixLists", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribePrefixListsRequest"}, + "output":{"shape":"DescribePrefixListsResult"} + }, + "DescribeRegions":{ + "name":"DescribeRegions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeRegionsRequest"}, + "output":{"shape":"DescribeRegionsResult"} + }, + "DescribeReservedInstances":{ + "name":"DescribeReservedInstances", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeReservedInstancesRequest"}, + "output":{"shape":"DescribeReservedInstancesResult"} + }, + "DescribeReservedInstancesListings":{ + "name":"DescribeReservedInstancesListings", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeReservedInstancesListingsRequest"}, + "output":{"shape":"DescribeReservedInstancesListingsResult"} + }, + "DescribeReservedInstancesModifications":{ + "name":"DescribeReservedInstancesModifications", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeReservedInstancesModificationsRequest"}, + "output":{"shape":"DescribeReservedInstancesModificationsResult"} + }, + "DescribeReservedInstancesOfferings":{ + "name":"DescribeReservedInstancesOfferings", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeReservedInstancesOfferingsRequest"}, + "output":{"shape":"DescribeReservedInstancesOfferingsResult"} + }, + "DescribeRouteTables":{ + "name":"DescribeRouteTables", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeRouteTablesRequest"}, + "output":{"shape":"DescribeRouteTablesResult"} + }, + "DescribeScheduledInstanceAvailability":{ + "name":"DescribeScheduledInstanceAvailability", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeScheduledInstanceAvailabilityRequest"}, + "output":{"shape":"DescribeScheduledInstanceAvailabilityResult"} + }, + "DescribeScheduledInstances":{ + "name":"DescribeScheduledInstances", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeScheduledInstancesRequest"}, + "output":{"shape":"DescribeScheduledInstancesResult"} + }, + "DescribeSecurityGroupReferences":{ + "name":"DescribeSecurityGroupReferences", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeSecurityGroupReferencesRequest"}, + "output":{"shape":"DescribeSecurityGroupReferencesResult"} + }, + "DescribeSecurityGroups":{ + "name":"DescribeSecurityGroups", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeSecurityGroupsRequest"}, + "output":{"shape":"DescribeSecurityGroupsResult"} + }, + "DescribeSnapshotAttribute":{ + "name":"DescribeSnapshotAttribute", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeSnapshotAttributeRequest"}, + "output":{"shape":"DescribeSnapshotAttributeResult"} + }, + "DescribeSnapshots":{ + "name":"DescribeSnapshots", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeSnapshotsRequest"}, + "output":{"shape":"DescribeSnapshotsResult"} + }, + "DescribeSpotDatafeedSubscription":{ + "name":"DescribeSpotDatafeedSubscription", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeSpotDatafeedSubscriptionRequest"}, + "output":{"shape":"DescribeSpotDatafeedSubscriptionResult"} + }, + "DescribeSpotFleetInstances":{ + "name":"DescribeSpotFleetInstances", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeSpotFleetInstancesRequest"}, + "output":{"shape":"DescribeSpotFleetInstancesResponse"} + }, + "DescribeSpotFleetRequestHistory":{ + "name":"DescribeSpotFleetRequestHistory", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeSpotFleetRequestHistoryRequest"}, + "output":{"shape":"DescribeSpotFleetRequestHistoryResponse"} + }, + "DescribeSpotFleetRequests":{ + "name":"DescribeSpotFleetRequests", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeSpotFleetRequestsRequest"}, + "output":{"shape":"DescribeSpotFleetRequestsResponse"} + }, + "DescribeSpotInstanceRequests":{ + "name":"DescribeSpotInstanceRequests", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeSpotInstanceRequestsRequest"}, + "output":{"shape":"DescribeSpotInstanceRequestsResult"} + }, + "DescribeSpotPriceHistory":{ + "name":"DescribeSpotPriceHistory", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeSpotPriceHistoryRequest"}, + "output":{"shape":"DescribeSpotPriceHistoryResult"} + }, + "DescribeStaleSecurityGroups":{ + "name":"DescribeStaleSecurityGroups", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeStaleSecurityGroupsRequest"}, + "output":{"shape":"DescribeStaleSecurityGroupsResult"} + }, + "DescribeSubnets":{ + "name":"DescribeSubnets", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeSubnetsRequest"}, + "output":{"shape":"DescribeSubnetsResult"} + }, + "DescribeTags":{ + "name":"DescribeTags", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeTagsRequest"}, + "output":{"shape":"DescribeTagsResult"} + }, + "DescribeVolumeAttribute":{ + "name":"DescribeVolumeAttribute", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeVolumeAttributeRequest"}, + "output":{"shape":"DescribeVolumeAttributeResult"} + }, + "DescribeVolumeStatus":{ + "name":"DescribeVolumeStatus", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeVolumeStatusRequest"}, + "output":{"shape":"DescribeVolumeStatusResult"} + }, + "DescribeVolumes":{ + "name":"DescribeVolumes", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeVolumesRequest"}, + "output":{"shape":"DescribeVolumesResult"} + }, + "DescribeVpcAttribute":{ + "name":"DescribeVpcAttribute", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeVpcAttributeRequest"}, + "output":{"shape":"DescribeVpcAttributeResult"} + }, + "DescribeVpcClassicLink":{ + "name":"DescribeVpcClassicLink", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeVpcClassicLinkRequest"}, + "output":{"shape":"DescribeVpcClassicLinkResult"} + }, + "DescribeVpcClassicLinkDnsSupport":{ + "name":"DescribeVpcClassicLinkDnsSupport", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeVpcClassicLinkDnsSupportRequest"}, + "output":{"shape":"DescribeVpcClassicLinkDnsSupportResult"} + }, + "DescribeVpcEndpointServices":{ + "name":"DescribeVpcEndpointServices", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeVpcEndpointServicesRequest"}, + "output":{"shape":"DescribeVpcEndpointServicesResult"} + }, + "DescribeVpcEndpoints":{ + "name":"DescribeVpcEndpoints", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeVpcEndpointsRequest"}, + "output":{"shape":"DescribeVpcEndpointsResult"} + }, + "DescribeVpcPeeringConnections":{ + "name":"DescribeVpcPeeringConnections", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeVpcPeeringConnectionsRequest"}, + "output":{"shape":"DescribeVpcPeeringConnectionsResult"} + }, + "DescribeVpcs":{ + "name":"DescribeVpcs", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeVpcsRequest"}, + "output":{"shape":"DescribeVpcsResult"} + }, + "DescribeVpnConnections":{ + "name":"DescribeVpnConnections", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeVpnConnectionsRequest"}, + "output":{"shape":"DescribeVpnConnectionsResult"} + }, + "DescribeVpnGateways":{ + "name":"DescribeVpnGateways", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeVpnGatewaysRequest"}, + "output":{"shape":"DescribeVpnGatewaysResult"} + }, + "DetachClassicLinkVpc":{ + "name":"DetachClassicLinkVpc", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DetachClassicLinkVpcRequest"}, + "output":{"shape":"DetachClassicLinkVpcResult"} + }, + "DetachInternetGateway":{ + "name":"DetachInternetGateway", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DetachInternetGatewayRequest"} + }, + "DetachNetworkInterface":{ + "name":"DetachNetworkInterface", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DetachNetworkInterfaceRequest"} + }, + "DetachVolume":{ + "name":"DetachVolume", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DetachVolumeRequest"}, + "output":{"shape":"VolumeAttachment"} + }, + "DetachVpnGateway":{ + "name":"DetachVpnGateway", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DetachVpnGatewayRequest"} + }, + "DisableVgwRoutePropagation":{ + "name":"DisableVgwRoutePropagation", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DisableVgwRoutePropagationRequest"} + }, + "DisableVpcClassicLink":{ + "name":"DisableVpcClassicLink", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DisableVpcClassicLinkRequest"}, + "output":{"shape":"DisableVpcClassicLinkResult"} + }, + "DisableVpcClassicLinkDnsSupport":{ + "name":"DisableVpcClassicLinkDnsSupport", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DisableVpcClassicLinkDnsSupportRequest"}, + "output":{"shape":"DisableVpcClassicLinkDnsSupportResult"} + }, + "DisassociateAddress":{ + "name":"DisassociateAddress", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DisassociateAddressRequest"} + }, + "DisassociateRouteTable":{ + "name":"DisassociateRouteTable", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DisassociateRouteTableRequest"} + }, + "EnableVgwRoutePropagation":{ + "name":"EnableVgwRoutePropagation", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"EnableVgwRoutePropagationRequest"} + }, + "EnableVolumeIO":{ + "name":"EnableVolumeIO", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"EnableVolumeIORequest"} + }, + "EnableVpcClassicLink":{ + "name":"EnableVpcClassicLink", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"EnableVpcClassicLinkRequest"}, + "output":{"shape":"EnableVpcClassicLinkResult"} + }, + "EnableVpcClassicLinkDnsSupport":{ + "name":"EnableVpcClassicLinkDnsSupport", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"EnableVpcClassicLinkDnsSupportRequest"}, + "output":{"shape":"EnableVpcClassicLinkDnsSupportResult"} + }, + "GetConsoleOutput":{ + "name":"GetConsoleOutput", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetConsoleOutputRequest"}, + "output":{"shape":"GetConsoleOutputResult"} + }, + "GetConsoleScreenshot":{ + "name":"GetConsoleScreenshot", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetConsoleScreenshotRequest"}, + "output":{"shape":"GetConsoleScreenshotResult"} + }, + "GetPasswordData":{ + "name":"GetPasswordData", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetPasswordDataRequest"}, + "output":{"shape":"GetPasswordDataResult"} + }, + "ImportImage":{ + "name":"ImportImage", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ImportImageRequest"}, + "output":{"shape":"ImportImageResult"} + }, + "ImportInstance":{ + "name":"ImportInstance", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ImportInstanceRequest"}, + "output":{"shape":"ImportInstanceResult"} + }, + "ImportKeyPair":{ + "name":"ImportKeyPair", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ImportKeyPairRequest"}, + "output":{"shape":"ImportKeyPairResult"} + }, + "ImportSnapshot":{ + "name":"ImportSnapshot", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ImportSnapshotRequest"}, + "output":{"shape":"ImportSnapshotResult"} + }, + "ImportVolume":{ + "name":"ImportVolume", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ImportVolumeRequest"}, + "output":{"shape":"ImportVolumeResult"} + }, + "ModifyHosts":{ + "name":"ModifyHosts", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyHostsRequest"}, + "output":{"shape":"ModifyHostsResult"} + }, + "ModifyIdFormat":{ + "name":"ModifyIdFormat", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyIdFormatRequest"} + }, + "ModifyIdentityIdFormat":{ + "name":"ModifyIdentityIdFormat", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyIdentityIdFormatRequest"} + }, + "ModifyImageAttribute":{ + "name":"ModifyImageAttribute", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyImageAttributeRequest"} + }, + "ModifyInstanceAttribute":{ + "name":"ModifyInstanceAttribute", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyInstanceAttributeRequest"} + }, + "ModifyInstancePlacement":{ + "name":"ModifyInstancePlacement", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyInstancePlacementRequest"}, + "output":{"shape":"ModifyInstancePlacementResult"} + }, + "ModifyNetworkInterfaceAttribute":{ + "name":"ModifyNetworkInterfaceAttribute", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyNetworkInterfaceAttributeRequest"} + }, + "ModifyReservedInstances":{ + "name":"ModifyReservedInstances", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyReservedInstancesRequest"}, + "output":{"shape":"ModifyReservedInstancesResult"} + }, + "ModifySnapshotAttribute":{ + "name":"ModifySnapshotAttribute", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifySnapshotAttributeRequest"} + }, + "ModifySpotFleetRequest":{ + "name":"ModifySpotFleetRequest", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifySpotFleetRequestRequest"}, + "output":{"shape":"ModifySpotFleetRequestResponse"} + }, + "ModifySubnetAttribute":{ + "name":"ModifySubnetAttribute", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifySubnetAttributeRequest"} + }, + "ModifyVolumeAttribute":{ + "name":"ModifyVolumeAttribute", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyVolumeAttributeRequest"} + }, + "ModifyVpcAttribute":{ + "name":"ModifyVpcAttribute", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyVpcAttributeRequest"} + }, + "ModifyVpcEndpoint":{ + "name":"ModifyVpcEndpoint", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyVpcEndpointRequest"}, + "output":{"shape":"ModifyVpcEndpointResult"} + }, + "ModifyVpcPeeringConnectionOptions":{ + "name":"ModifyVpcPeeringConnectionOptions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyVpcPeeringConnectionOptionsRequest"}, + "output":{"shape":"ModifyVpcPeeringConnectionOptionsResult"} + }, + "MonitorInstances":{ + "name":"MonitorInstances", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"MonitorInstancesRequest"}, + "output":{"shape":"MonitorInstancesResult"} + }, + "MoveAddressToVpc":{ + "name":"MoveAddressToVpc", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"MoveAddressToVpcRequest"}, + "output":{"shape":"MoveAddressToVpcResult"} + }, + "PurchaseReservedInstancesOffering":{ + "name":"PurchaseReservedInstancesOffering", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PurchaseReservedInstancesOfferingRequest"}, + "output":{"shape":"PurchaseReservedInstancesOfferingResult"} + }, + "PurchaseScheduledInstances":{ + "name":"PurchaseScheduledInstances", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PurchaseScheduledInstancesRequest"}, + "output":{"shape":"PurchaseScheduledInstancesResult"} + }, + "RebootInstances":{ + "name":"RebootInstances", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RebootInstancesRequest"} + }, + "RegisterImage":{ + "name":"RegisterImage", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RegisterImageRequest"}, + "output":{"shape":"RegisterImageResult"} + }, + "RejectVpcPeeringConnection":{ + "name":"RejectVpcPeeringConnection", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RejectVpcPeeringConnectionRequest"}, + "output":{"shape":"RejectVpcPeeringConnectionResult"} + }, + "ReleaseAddress":{ + "name":"ReleaseAddress", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ReleaseAddressRequest"} + }, + "ReleaseHosts":{ + "name":"ReleaseHosts", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ReleaseHostsRequest"}, + "output":{"shape":"ReleaseHostsResult"} + }, + "ReplaceNetworkAclAssociation":{ + "name":"ReplaceNetworkAclAssociation", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ReplaceNetworkAclAssociationRequest"}, + "output":{"shape":"ReplaceNetworkAclAssociationResult"} + }, + "ReplaceNetworkAclEntry":{ + "name":"ReplaceNetworkAclEntry", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ReplaceNetworkAclEntryRequest"} + }, + "ReplaceRoute":{ + "name":"ReplaceRoute", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ReplaceRouteRequest"} + }, + "ReplaceRouteTableAssociation":{ + "name":"ReplaceRouteTableAssociation", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ReplaceRouteTableAssociationRequest"}, + "output":{"shape":"ReplaceRouteTableAssociationResult"} + }, + "ReportInstanceStatus":{ + "name":"ReportInstanceStatus", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ReportInstanceStatusRequest"} + }, + "RequestSpotFleet":{ + "name":"RequestSpotFleet", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RequestSpotFleetRequest"}, + "output":{"shape":"RequestSpotFleetResponse"} + }, + "RequestSpotInstances":{ + "name":"RequestSpotInstances", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RequestSpotInstancesRequest"}, + "output":{"shape":"RequestSpotInstancesResult"} + }, + "ResetImageAttribute":{ + "name":"ResetImageAttribute", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ResetImageAttributeRequest"} + }, + "ResetInstanceAttribute":{ + "name":"ResetInstanceAttribute", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ResetInstanceAttributeRequest"} + }, + "ResetNetworkInterfaceAttribute":{ + "name":"ResetNetworkInterfaceAttribute", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ResetNetworkInterfaceAttributeRequest"} + }, + "ResetSnapshotAttribute":{ + "name":"ResetSnapshotAttribute", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ResetSnapshotAttributeRequest"} + }, + "RestoreAddressToClassic":{ + "name":"RestoreAddressToClassic", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RestoreAddressToClassicRequest"}, + "output":{"shape":"RestoreAddressToClassicResult"} + }, + "RevokeSecurityGroupEgress":{ + "name":"RevokeSecurityGroupEgress", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RevokeSecurityGroupEgressRequest"} + }, + "RevokeSecurityGroupIngress":{ + "name":"RevokeSecurityGroupIngress", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RevokeSecurityGroupIngressRequest"} + }, + "RunInstances":{ + "name":"RunInstances", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RunInstancesRequest"}, + "output":{"shape":"Reservation"} + }, + "RunScheduledInstances":{ + "name":"RunScheduledInstances", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RunScheduledInstancesRequest"}, + "output":{"shape":"RunScheduledInstancesResult"} + }, + "StartInstances":{ + "name":"StartInstances", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StartInstancesRequest"}, + "output":{"shape":"StartInstancesResult"} + }, + "StopInstances":{ + "name":"StopInstances", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StopInstancesRequest"}, + "output":{"shape":"StopInstancesResult"} + }, + "TerminateInstances":{ + "name":"TerminateInstances", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"TerminateInstancesRequest"}, + "output":{"shape":"TerminateInstancesResult"} + }, + "UnassignPrivateIpAddresses":{ + "name":"UnassignPrivateIpAddresses", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UnassignPrivateIpAddressesRequest"} + }, + "UnmonitorInstances":{ + "name":"UnmonitorInstances", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UnmonitorInstancesRequest"}, + "output":{"shape":"UnmonitorInstancesResult"} + } + }, + "shapes":{ + "AcceptVpcPeeringConnectionRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "VpcPeeringConnectionId":{ + "shape":"String", + "locationName":"vpcPeeringConnectionId" + } + } + }, + "AcceptVpcPeeringConnectionResult":{ + "type":"structure", + "members":{ + "VpcPeeringConnection":{ + "shape":"VpcPeeringConnection", + "locationName":"vpcPeeringConnection" + } + } + }, + "AccountAttribute":{ + "type":"structure", + "members":{ + "AttributeName":{ + "shape":"String", + "locationName":"attributeName" + }, + "AttributeValues":{ + "shape":"AccountAttributeValueList", + "locationName":"attributeValueSet" + } + } + }, + "AccountAttributeList":{ + "type":"list", + "member":{ + "shape":"AccountAttribute", + "locationName":"item" + } + }, + "AccountAttributeName":{ + "type":"string", + "enum":[ + "supported-platforms", + "default-vpc" + ] + }, + "AccountAttributeNameStringList":{ + "type":"list", + "member":{ + "shape":"AccountAttributeName", + "locationName":"attributeName" + } + }, + "AccountAttributeValue":{ + "type":"structure", + "members":{ + "AttributeValue":{ + "shape":"String", + "locationName":"attributeValue" + } + } + }, + "AccountAttributeValueList":{ + "type":"list", + "member":{ + "shape":"AccountAttributeValue", + "locationName":"item" + } + }, + "ActiveInstance":{ + "type":"structure", + "members":{ + "InstanceType":{ + "shape":"String", + "locationName":"instanceType" + }, + "InstanceId":{ + "shape":"String", + "locationName":"instanceId" + }, + "SpotInstanceRequestId":{ + "shape":"String", + "locationName":"spotInstanceRequestId" + } + } + }, + "ActiveInstanceSet":{ + "type":"list", + "member":{ + "shape":"ActiveInstance", + "locationName":"item" + } + }, + "Address":{ + "type":"structure", + "members":{ + "InstanceId":{ + "shape":"String", + "locationName":"instanceId" + }, + "PublicIp":{ + "shape":"String", + "locationName":"publicIp" + }, + "AllocationId":{ + "shape":"String", + "locationName":"allocationId" + }, + "AssociationId":{ + "shape":"String", + "locationName":"associationId" + }, + "Domain":{ + "shape":"DomainType", + "locationName":"domain" + }, + "NetworkInterfaceId":{ + "shape":"String", + "locationName":"networkInterfaceId" + }, + "NetworkInterfaceOwnerId":{ + "shape":"String", + "locationName":"networkInterfaceOwnerId" + }, + "PrivateIpAddress":{ + "shape":"String", + "locationName":"privateIpAddress" + } + } + }, + "AddressList":{ + "type":"list", + "member":{ + "shape":"Address", + "locationName":"item" + } + }, + "Affinity":{ + "type":"string", + "enum":[ + "default", + "host" + ] + }, + "AllocateAddressRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "Domain":{"shape":"DomainType"} + } + }, + "AllocateAddressResult":{ + "type":"structure", + "members":{ + "PublicIp":{ + "shape":"String", + "locationName":"publicIp" + }, + "Domain":{ + "shape":"DomainType", + "locationName":"domain" + }, + "AllocationId":{ + "shape":"String", + "locationName":"allocationId" + } + } + }, + "AllocateHostsRequest":{ + "type":"structure", + "required":[ + "InstanceType", + "Quantity", + "AvailabilityZone" + ], + "members":{ + "AutoPlacement":{ + "shape":"AutoPlacement", + "locationName":"autoPlacement" + }, + "ClientToken":{ + "shape":"String", + "locationName":"clientToken" + }, + "InstanceType":{ + "shape":"String", + "locationName":"instanceType" + }, + "Quantity":{ + "shape":"Integer", + "locationName":"quantity" + }, + "AvailabilityZone":{ + "shape":"String", + "locationName":"availabilityZone" + } + } + }, + "AllocateHostsResult":{ + "type":"structure", + "members":{ + "HostIds":{ + "shape":"ResponseHostIdList", + "locationName":"hostIdSet" + } + } + }, + "AllocationIdList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"AllocationId" + } + }, + "AllocationState":{ + "type":"string", + "enum":[ + "available", + "under-assessment", + "permanent-failure", + "released", + "released-permanent-failure" + ] + }, + "AllocationStrategy":{ + "type":"string", + "enum":[ + "lowestPrice", + "diversified" + ] + }, + "ArchitectureValues":{ + "type":"string", + "enum":[ + "i386", + "x86_64" + ] + }, + "AssignPrivateIpAddressesRequest":{ + "type":"structure", + "required":["NetworkInterfaceId"], + "members":{ + "NetworkInterfaceId":{ + "shape":"String", + "locationName":"networkInterfaceId" + }, + "PrivateIpAddresses":{ + "shape":"PrivateIpAddressStringList", + "locationName":"privateIpAddress" + }, + "SecondaryPrivateIpAddressCount":{ + "shape":"Integer", + "locationName":"secondaryPrivateIpAddressCount" + }, + "AllowReassignment":{ + "shape":"Boolean", + "locationName":"allowReassignment" + } + } + }, + "AssociateAddressRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "InstanceId":{"shape":"String"}, + "PublicIp":{"shape":"String"}, + "AllocationId":{"shape":"String"}, + "NetworkInterfaceId":{ + "shape":"String", + "locationName":"networkInterfaceId" + }, + "PrivateIpAddress":{ + "shape":"String", + "locationName":"privateIpAddress" + }, + "AllowReassociation":{ + "shape":"Boolean", + "locationName":"allowReassociation" + } + } + }, + "AssociateAddressResult":{ + "type":"structure", + "members":{ + "AssociationId":{ + "shape":"String", + "locationName":"associationId" + } + } + }, + "AssociateDhcpOptionsRequest":{ + "type":"structure", + "required":[ + "DhcpOptionsId", + "VpcId" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "DhcpOptionsId":{"shape":"String"}, + "VpcId":{"shape":"String"} + } + }, + "AssociateRouteTableRequest":{ + "type":"structure", + "required":[ + "SubnetId", + "RouteTableId" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "SubnetId":{ + "shape":"String", + "locationName":"subnetId" + }, + "RouteTableId":{ + "shape":"String", + "locationName":"routeTableId" + } + } + }, + "AssociateRouteTableResult":{ + "type":"structure", + "members":{ + "AssociationId":{ + "shape":"String", + "locationName":"associationId" + } + } + }, + "AttachClassicLinkVpcRequest":{ + "type":"structure", + "required":[ + "InstanceId", + "VpcId", + "Groups" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "InstanceId":{ + "shape":"String", + "locationName":"instanceId" + }, + "VpcId":{ + "shape":"String", + "locationName":"vpcId" + }, + "Groups":{ + "shape":"GroupIdStringList", + "locationName":"SecurityGroupId" + } + } + }, + "AttachClassicLinkVpcResult":{ + "type":"structure", + "members":{ + "Return":{ + "shape":"Boolean", + "locationName":"return" + } + } + }, + "AttachInternetGatewayRequest":{ + "type":"structure", + "required":[ + "InternetGatewayId", + "VpcId" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "InternetGatewayId":{ + "shape":"String", + "locationName":"internetGatewayId" + }, + "VpcId":{ + "shape":"String", + "locationName":"vpcId" + } + } + }, + "AttachNetworkInterfaceRequest":{ + "type":"structure", + "required":[ + "NetworkInterfaceId", + "InstanceId", + "DeviceIndex" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "NetworkInterfaceId":{ + "shape":"String", + "locationName":"networkInterfaceId" + }, + "InstanceId":{ + "shape":"String", + "locationName":"instanceId" + }, + "DeviceIndex":{ + "shape":"Integer", + "locationName":"deviceIndex" + } + } + }, + "AttachNetworkInterfaceResult":{ + "type":"structure", + "members":{ + "AttachmentId":{ + "shape":"String", + "locationName":"attachmentId" + } + } + }, + "AttachVolumeRequest":{ + "type":"structure", + "required":[ + "VolumeId", + "InstanceId", + "Device" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "VolumeId":{"shape":"String"}, + "InstanceId":{"shape":"String"}, + "Device":{"shape":"String"} + } + }, + "AttachVpnGatewayRequest":{ + "type":"structure", + "required":[ + "VpnGatewayId", + "VpcId" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "VpnGatewayId":{"shape":"String"}, + "VpcId":{"shape":"String"} + } + }, + "AttachVpnGatewayResult":{ + "type":"structure", + "members":{ + "VpcAttachment":{ + "shape":"VpcAttachment", + "locationName":"attachment" + } + } + }, + "AttachmentStatus":{ + "type":"string", + "enum":[ + "attaching", + "attached", + "detaching", + "detached" + ] + }, + "AttributeBooleanValue":{ + "type":"structure", + "members":{ + "Value":{ + "shape":"Boolean", + "locationName":"value" + } + } + }, + "AttributeValue":{ + "type":"structure", + "members":{ + "Value":{ + "shape":"String", + "locationName":"value" + } + } + }, + "AuthorizeSecurityGroupEgressRequest":{ + "type":"structure", + "required":["GroupId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "GroupId":{ + "shape":"String", + "locationName":"groupId" + }, + "SourceSecurityGroupName":{ + "shape":"String", + "locationName":"sourceSecurityGroupName" + }, + "SourceSecurityGroupOwnerId":{ + "shape":"String", + "locationName":"sourceSecurityGroupOwnerId" + }, + "IpProtocol":{ + "shape":"String", + "locationName":"ipProtocol" + }, + "FromPort":{ + "shape":"Integer", + "locationName":"fromPort" + }, + "ToPort":{ + "shape":"Integer", + "locationName":"toPort" + }, + "CidrIp":{ + "shape":"String", + "locationName":"cidrIp" + }, + "IpPermissions":{ + "shape":"IpPermissionList", + "locationName":"ipPermissions" + } + } + }, + "AuthorizeSecurityGroupIngressRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "GroupName":{"shape":"String"}, + "GroupId":{"shape":"String"}, + "SourceSecurityGroupName":{"shape":"String"}, + "SourceSecurityGroupOwnerId":{"shape":"String"}, + "IpProtocol":{"shape":"String"}, + "FromPort":{"shape":"Integer"}, + "ToPort":{"shape":"Integer"}, + "CidrIp":{"shape":"String"}, + "IpPermissions":{"shape":"IpPermissionList"} + } + }, + "AutoPlacement":{ + "type":"string", + "enum":[ + "on", + "off" + ] + }, + "AvailabilityZone":{ + "type":"structure", + "members":{ + "ZoneName":{ + "shape":"String", + "locationName":"zoneName" + }, + "State":{ + "shape":"AvailabilityZoneState", + "locationName":"zoneState" + }, + "RegionName":{ + "shape":"String", + "locationName":"regionName" + }, + "Messages":{ + "shape":"AvailabilityZoneMessageList", + "locationName":"messageSet" + } + } + }, + "AvailabilityZoneList":{ + "type":"list", + "member":{ + "shape":"AvailabilityZone", + "locationName":"item" + } + }, + "AvailabilityZoneMessage":{ + "type":"structure", + "members":{ + "Message":{ + "shape":"String", + "locationName":"message" + } + } + }, + "AvailabilityZoneMessageList":{ + "type":"list", + "member":{ + "shape":"AvailabilityZoneMessage", + "locationName":"item" + } + }, + "AvailabilityZoneState":{ + "type":"string", + "enum":[ + "available", + "information", + "impaired", + "unavailable" + ] + }, + "AvailableCapacity":{ + "type":"structure", + "members":{ + "AvailableInstanceCapacity":{ + "shape":"AvailableInstanceCapacityList", + "locationName":"availableInstanceCapacity" + }, + "AvailableVCpus":{ + "shape":"Integer", + "locationName":"availableVCpus" + } + } + }, + "AvailableInstanceCapacityList":{ + "type":"list", + "member":{ + "shape":"InstanceCapacity", + "locationName":"item" + } + }, + "BatchState":{ + "type":"string", + "enum":[ + "submitted", + "active", + "cancelled", + "failed", + "cancelled_running", + "cancelled_terminating", + "modifying" + ] + }, + "Blob":{"type":"blob"}, + "BlobAttributeValue":{ + "type":"structure", + "members":{ + "Value":{ + "shape":"Blob", + "locationName":"value" + } + } + }, + "BlockDeviceMapping":{ + "type":"structure", + "members":{ + "VirtualName":{ + "shape":"String", + "locationName":"virtualName" + }, + "DeviceName":{ + "shape":"String", + "locationName":"deviceName" + }, + "Ebs":{ + "shape":"EbsBlockDevice", + "locationName":"ebs" + }, + "NoDevice":{ + "shape":"String", + "locationName":"noDevice" + } + } + }, + "BlockDeviceMappingList":{ + "type":"list", + "member":{ + "shape":"BlockDeviceMapping", + "locationName":"item" + } + }, + "BlockDeviceMappingRequestList":{ + "type":"list", + "member":{ + "shape":"BlockDeviceMapping", + "locationName":"BlockDeviceMapping" + } + }, + "Boolean":{"type":"boolean"}, + "BundleIdStringList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"BundleId" + } + }, + "BundleInstanceRequest":{ + "type":"structure", + "required":[ + "InstanceId", + "Storage" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "InstanceId":{"shape":"String"}, + "Storage":{"shape":"Storage"} + } + }, + "BundleInstanceResult":{ + "type":"structure", + "members":{ + "BundleTask":{ + "shape":"BundleTask", + "locationName":"bundleInstanceTask" + } + } + }, + "BundleTask":{ + "type":"structure", + "members":{ + "InstanceId":{ + "shape":"String", + "locationName":"instanceId" + }, + "BundleId":{ + "shape":"String", + "locationName":"bundleId" + }, + "State":{ + "shape":"BundleTaskState", + "locationName":"state" + }, + "StartTime":{ + "shape":"DateTime", + "locationName":"startTime" + }, + "UpdateTime":{ + "shape":"DateTime", + "locationName":"updateTime" + }, + "Storage":{ + "shape":"Storage", + "locationName":"storage" + }, + "Progress":{ + "shape":"String", + "locationName":"progress" + }, + "BundleTaskError":{ + "shape":"BundleTaskError", + "locationName":"error" + } + } + }, + "BundleTaskError":{ + "type":"structure", + "members":{ + "Code":{ + "shape":"String", + "locationName":"code" + }, + "Message":{ + "shape":"String", + "locationName":"message" + } + } + }, + "BundleTaskList":{ + "type":"list", + "member":{ + "shape":"BundleTask", + "locationName":"item" + } + }, + "BundleTaskState":{ + "type":"string", + "enum":[ + "pending", + "waiting-for-shutdown", + "bundling", + "storing", + "cancelling", + "complete", + "failed" + ] + }, + "CancelBatchErrorCode":{ + "type":"string", + "enum":[ + "fleetRequestIdDoesNotExist", + "fleetRequestIdMalformed", + "fleetRequestNotInCancellableState", + "unexpectedError" + ] + }, + "CancelBundleTaskRequest":{ + "type":"structure", + "required":["BundleId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "BundleId":{"shape":"String"} + } + }, + "CancelBundleTaskResult":{ + "type":"structure", + "members":{ + "BundleTask":{ + "shape":"BundleTask", + "locationName":"bundleInstanceTask" + } + } + }, + "CancelConversionRequest":{ + "type":"structure", + "required":["ConversionTaskId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "ConversionTaskId":{ + "shape":"String", + "locationName":"conversionTaskId" + }, + "ReasonMessage":{ + "shape":"String", + "locationName":"reasonMessage" + } + } + }, + "CancelExportTaskRequest":{ + "type":"structure", + "required":["ExportTaskId"], + "members":{ + "ExportTaskId":{ + "shape":"String", + "locationName":"exportTaskId" + } + } + }, + "CancelImportTaskRequest":{ + "type":"structure", + "members":{ + "DryRun":{"shape":"Boolean"}, + "ImportTaskId":{"shape":"String"}, + "CancelReason":{"shape":"String"} + } + }, + "CancelImportTaskResult":{ + "type":"structure", + "members":{ + "ImportTaskId":{ + "shape":"String", + "locationName":"importTaskId" + }, + "State":{ + "shape":"String", + "locationName":"state" + }, + "PreviousState":{ + "shape":"String", + "locationName":"previousState" + } + } + }, + "CancelReservedInstancesListingRequest":{ + "type":"structure", + "required":["ReservedInstancesListingId"], + "members":{ + "ReservedInstancesListingId":{ + "shape":"String", + "locationName":"reservedInstancesListingId" + } + } + }, + "CancelReservedInstancesListingResult":{ + "type":"structure", + "members":{ + "ReservedInstancesListings":{ + "shape":"ReservedInstancesListingList", + "locationName":"reservedInstancesListingsSet" + } + } + }, + "CancelSpotFleetRequestsError":{ + "type":"structure", + "required":[ + "Code", + "Message" + ], + "members":{ + "Code":{ + "shape":"CancelBatchErrorCode", + "locationName":"code" + }, + "Message":{ + "shape":"String", + "locationName":"message" + } + } + }, + "CancelSpotFleetRequestsErrorItem":{ + "type":"structure", + "required":[ + "SpotFleetRequestId", + "Error" + ], + "members":{ + "SpotFleetRequestId":{ + "shape":"String", + "locationName":"spotFleetRequestId" + }, + "Error":{ + "shape":"CancelSpotFleetRequestsError", + "locationName":"error" + } + } + }, + "CancelSpotFleetRequestsErrorSet":{ + "type":"list", + "member":{ + "shape":"CancelSpotFleetRequestsErrorItem", + "locationName":"item" + } + }, + "CancelSpotFleetRequestsRequest":{ + "type":"structure", + "required":[ + "SpotFleetRequestIds", + "TerminateInstances" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "SpotFleetRequestIds":{ + "shape":"ValueStringList", + "locationName":"spotFleetRequestId" + }, + "TerminateInstances":{ + "shape":"Boolean", + "locationName":"terminateInstances" + } + } + }, + "CancelSpotFleetRequestsResponse":{ + "type":"structure", + "members":{ + "UnsuccessfulFleetRequests":{ + "shape":"CancelSpotFleetRequestsErrorSet", + "locationName":"unsuccessfulFleetRequestSet" + }, + "SuccessfulFleetRequests":{ + "shape":"CancelSpotFleetRequestsSuccessSet", + "locationName":"successfulFleetRequestSet" + } + } + }, + "CancelSpotFleetRequestsSuccessItem":{ + "type":"structure", + "required":[ + "SpotFleetRequestId", + "CurrentSpotFleetRequestState", + "PreviousSpotFleetRequestState" + ], + "members":{ + "SpotFleetRequestId":{ + "shape":"String", + "locationName":"spotFleetRequestId" + }, + "CurrentSpotFleetRequestState":{ + "shape":"BatchState", + "locationName":"currentSpotFleetRequestState" + }, + "PreviousSpotFleetRequestState":{ + "shape":"BatchState", + "locationName":"previousSpotFleetRequestState" + } + } + }, + "CancelSpotFleetRequestsSuccessSet":{ + "type":"list", + "member":{ + "shape":"CancelSpotFleetRequestsSuccessItem", + "locationName":"item" + } + }, + "CancelSpotInstanceRequestState":{ + "type":"string", + "enum":[ + "active", + "open", + "closed", + "cancelled", + "completed" + ] + }, + "CancelSpotInstanceRequestsRequest":{ + "type":"structure", + "required":["SpotInstanceRequestIds"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "SpotInstanceRequestIds":{ + "shape":"SpotInstanceRequestIdList", + "locationName":"SpotInstanceRequestId" + } + } + }, + "CancelSpotInstanceRequestsResult":{ + "type":"structure", + "members":{ + "CancelledSpotInstanceRequests":{ + "shape":"CancelledSpotInstanceRequestList", + "locationName":"spotInstanceRequestSet" + } + } + }, + "CancelledSpotInstanceRequest":{ + "type":"structure", + "members":{ + "SpotInstanceRequestId":{ + "shape":"String", + "locationName":"spotInstanceRequestId" + }, + "State":{ + "shape":"CancelSpotInstanceRequestState", + "locationName":"state" + } + } + }, + "CancelledSpotInstanceRequestList":{ + "type":"list", + "member":{ + "shape":"CancelledSpotInstanceRequest", + "locationName":"item" + } + }, + "ClassicLinkDnsSupport":{ + "type":"structure", + "members":{ + "VpcId":{ + "shape":"String", + "locationName":"vpcId" + }, + "ClassicLinkDnsSupported":{ + "shape":"Boolean", + "locationName":"classicLinkDnsSupported" + } + } + }, + "ClassicLinkDnsSupportList":{ + "type":"list", + "member":{ + "shape":"ClassicLinkDnsSupport", + "locationName":"item" + } + }, + "ClassicLinkInstance":{ + "type":"structure", + "members":{ + "InstanceId":{ + "shape":"String", + "locationName":"instanceId" + }, + "VpcId":{ + "shape":"String", + "locationName":"vpcId" + }, + "Groups":{ + "shape":"GroupIdentifierList", + "locationName":"groupSet" + }, + "Tags":{ + "shape":"TagList", + "locationName":"tagSet" + } + } + }, + "ClassicLinkInstanceList":{ + "type":"list", + "member":{ + "shape":"ClassicLinkInstance", + "locationName":"item" + } + }, + "ClientData":{ + "type":"structure", + "members":{ + "UploadStart":{"shape":"DateTime"}, + "UploadEnd":{"shape":"DateTime"}, + "UploadSize":{"shape":"Double"}, + "Comment":{"shape":"String"} + } + }, + "ConfirmProductInstanceRequest":{ + "type":"structure", + "required":[ + "ProductCode", + "InstanceId" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "ProductCode":{"shape":"String"}, + "InstanceId":{"shape":"String"} + } + }, + "ConfirmProductInstanceResult":{ + "type":"structure", + "members":{ + "OwnerId":{ + "shape":"String", + "locationName":"ownerId" + }, + "Return":{ + "shape":"Boolean", + "locationName":"return" + } + } + }, + "ContainerFormat":{ + "type":"string", + "enum":["ova"] + }, + "ConversionIdStringList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"item" + } + }, + "ConversionTask":{ + "type":"structure", + "required":[ + "ConversionTaskId", + "State" + ], + "members":{ + "ConversionTaskId":{ + "shape":"String", + "locationName":"conversionTaskId" + }, + "ExpirationTime":{ + "shape":"String", + "locationName":"expirationTime" + }, + "ImportInstance":{ + "shape":"ImportInstanceTaskDetails", + "locationName":"importInstance" + }, + "ImportVolume":{ + "shape":"ImportVolumeTaskDetails", + "locationName":"importVolume" + }, + "State":{ + "shape":"ConversionTaskState", + "locationName":"state" + }, + "StatusMessage":{ + "shape":"String", + "locationName":"statusMessage" + }, + "Tags":{ + "shape":"TagList", + "locationName":"tagSet" + } + } + }, + "ConversionTaskState":{ + "type":"string", + "enum":[ + "active", + "cancelling", + "cancelled", + "completed" + ] + }, + "CopyImageRequest":{ + "type":"structure", + "required":[ + "SourceRegion", + "SourceImageId", + "Name" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "SourceRegion":{"shape":"String"}, + "SourceImageId":{"shape":"String"}, + "Name":{"shape":"String"}, + "Description":{"shape":"String"}, + "ClientToken":{"shape":"String"}, + "Encrypted":{ + "shape":"Boolean", + "locationName":"encrypted" + }, + "KmsKeyId":{ + "shape":"String", + "locationName":"kmsKeyId" + } + } + }, + "CopyImageResult":{ + "type":"structure", + "members":{ + "ImageId":{ + "shape":"String", + "locationName":"imageId" + } + } + }, + "CopySnapshotRequest":{ + "type":"structure", + "required":[ + "SourceRegion", + "SourceSnapshotId" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "SourceRegion":{"shape":"String"}, + "SourceSnapshotId":{"shape":"String"}, + "Description":{"shape":"String"}, + "DestinationRegion":{ + "shape":"String", + "locationName":"destinationRegion" + }, + "PresignedUrl":{ + "shape":"String", + "locationName":"presignedUrl" + }, + "Encrypted":{ + "shape":"Boolean", + "locationName":"encrypted" + }, + "KmsKeyId":{ + "shape":"String", + "locationName":"kmsKeyId" + } + } + }, + "CopySnapshotResult":{ + "type":"structure", + "members":{ + "SnapshotId":{ + "shape":"String", + "locationName":"snapshotId" + } + } + }, + "CreateCustomerGatewayRequest":{ + "type":"structure", + "required":[ + "Type", + "PublicIp", + "BgpAsn" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "Type":{"shape":"GatewayType"}, + "PublicIp":{ + "shape":"String", + "locationName":"IpAddress" + }, + "BgpAsn":{"shape":"Integer"} + } + }, + "CreateCustomerGatewayResult":{ + "type":"structure", + "members":{ + "CustomerGateway":{ + "shape":"CustomerGateway", + "locationName":"customerGateway" + } + } + }, + "CreateDhcpOptionsRequest":{ + "type":"structure", + "required":["DhcpConfigurations"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "DhcpConfigurations":{ + "shape":"NewDhcpConfigurationList", + "locationName":"dhcpConfiguration" + } + } + }, + "CreateDhcpOptionsResult":{ + "type":"structure", + "members":{ + "DhcpOptions":{ + "shape":"DhcpOptions", + "locationName":"dhcpOptions" + } + } + }, + "CreateFlowLogsRequest":{ + "type":"structure", + "required":[ + "ResourceIds", + "ResourceType", + "TrafficType", + "LogGroupName", + "DeliverLogsPermissionArn" + ], + "members":{ + "ResourceIds":{ + "shape":"ValueStringList", + "locationName":"ResourceId" + }, + "ResourceType":{"shape":"FlowLogsResourceType"}, + "TrafficType":{"shape":"TrafficType"}, + "LogGroupName":{"shape":"String"}, + "DeliverLogsPermissionArn":{"shape":"String"}, + "ClientToken":{"shape":"String"} + } + }, + "CreateFlowLogsResult":{ + "type":"structure", + "members":{ + "FlowLogIds":{ + "shape":"ValueStringList", + "locationName":"flowLogIdSet" + }, + "ClientToken":{ + "shape":"String", + "locationName":"clientToken" + }, + "Unsuccessful":{ + "shape":"UnsuccessfulItemSet", + "locationName":"unsuccessful" + } + } + }, + "CreateImageRequest":{ + "type":"structure", + "required":[ + "InstanceId", + "Name" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "InstanceId":{ + "shape":"String", + "locationName":"instanceId" + }, + "Name":{ + "shape":"String", + "locationName":"name" + }, + "Description":{ + "shape":"String", + "locationName":"description" + }, + "NoReboot":{ + "shape":"Boolean", + "locationName":"noReboot" + }, + "BlockDeviceMappings":{ + "shape":"BlockDeviceMappingRequestList", + "locationName":"blockDeviceMapping" + } + } + }, + "CreateImageResult":{ + "type":"structure", + "members":{ + "ImageId":{ + "shape":"String", + "locationName":"imageId" + } + } + }, + "CreateInstanceExportTaskRequest":{ + "type":"structure", + "required":["InstanceId"], + "members":{ + "Description":{ + "shape":"String", + "locationName":"description" + }, + "InstanceId":{ + "shape":"String", + "locationName":"instanceId" + }, + "TargetEnvironment":{ + "shape":"ExportEnvironment", + "locationName":"targetEnvironment" + }, + "ExportToS3Task":{ + "shape":"ExportToS3TaskSpecification", + "locationName":"exportToS3" + } + } + }, + "CreateInstanceExportTaskResult":{ + "type":"structure", + "members":{ + "ExportTask":{ + "shape":"ExportTask", + "locationName":"exportTask" + } + } + }, + "CreateInternetGatewayRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + } + } + }, + "CreateInternetGatewayResult":{ + "type":"structure", + "members":{ + "InternetGateway":{ + "shape":"InternetGateway", + "locationName":"internetGateway" + } + } + }, + "CreateKeyPairRequest":{ + "type":"structure", + "required":["KeyName"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "KeyName":{"shape":"String"} + } + }, + "CreateNatGatewayRequest":{ + "type":"structure", + "required":[ + "SubnetId", + "AllocationId" + ], + "members":{ + "SubnetId":{"shape":"String"}, + "AllocationId":{"shape":"String"}, + "ClientToken":{"shape":"String"} + } + }, + "CreateNatGatewayResult":{ + "type":"structure", + "members":{ + "NatGateway":{ + "shape":"NatGateway", + "locationName":"natGateway" + }, + "ClientToken":{ + "shape":"String", + "locationName":"clientToken" + } + } + }, + "CreateNetworkAclEntryRequest":{ + "type":"structure", + "required":[ + "NetworkAclId", + "RuleNumber", + "Protocol", + "RuleAction", + "Egress", + "CidrBlock" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "NetworkAclId":{ + "shape":"String", + "locationName":"networkAclId" + }, + "RuleNumber":{ + "shape":"Integer", + "locationName":"ruleNumber" + }, + "Protocol":{ + "shape":"String", + "locationName":"protocol" + }, + "RuleAction":{ + "shape":"RuleAction", + "locationName":"ruleAction" + }, + "Egress":{ + "shape":"Boolean", + "locationName":"egress" + }, + "CidrBlock":{ + "shape":"String", + "locationName":"cidrBlock" + }, + "IcmpTypeCode":{ + "shape":"IcmpTypeCode", + "locationName":"Icmp" + }, + "PortRange":{ + "shape":"PortRange", + "locationName":"portRange" + } + } + }, + "CreateNetworkAclRequest":{ + "type":"structure", + "required":["VpcId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "VpcId":{ + "shape":"String", + "locationName":"vpcId" + } + } + }, + "CreateNetworkAclResult":{ + "type":"structure", + "members":{ + "NetworkAcl":{ + "shape":"NetworkAcl", + "locationName":"networkAcl" + } + } + }, + "CreateNetworkInterfaceRequest":{ + "type":"structure", + "required":["SubnetId"], + "members":{ + "SubnetId":{ + "shape":"String", + "locationName":"subnetId" + }, + "Description":{ + "shape":"String", + "locationName":"description" + }, + "PrivateIpAddress":{ + "shape":"String", + "locationName":"privateIpAddress" + }, + "Groups":{ + "shape":"SecurityGroupIdStringList", + "locationName":"SecurityGroupId" + }, + "PrivateIpAddresses":{ + "shape":"PrivateIpAddressSpecificationList", + "locationName":"privateIpAddresses" + }, + "SecondaryPrivateIpAddressCount":{ + "shape":"Integer", + "locationName":"secondaryPrivateIpAddressCount" + }, + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + } + } + }, + "CreateNetworkInterfaceResult":{ + "type":"structure", + "members":{ + "NetworkInterface":{ + "shape":"NetworkInterface", + "locationName":"networkInterface" + } + } + }, + "CreatePlacementGroupRequest":{ + "type":"structure", + "required":[ + "GroupName", + "Strategy" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "GroupName":{ + "shape":"String", + "locationName":"groupName" + }, + "Strategy":{ + "shape":"PlacementStrategy", + "locationName":"strategy" + } + } + }, + "CreateReservedInstancesListingRequest":{ + "type":"structure", + "required":[ + "ReservedInstancesId", + "InstanceCount", + "PriceSchedules", + "ClientToken" + ], + "members":{ + "ReservedInstancesId":{ + "shape":"String", + "locationName":"reservedInstancesId" + }, + "InstanceCount":{ + "shape":"Integer", + "locationName":"instanceCount" + }, + "PriceSchedules":{ + "shape":"PriceScheduleSpecificationList", + "locationName":"priceSchedules" + }, + "ClientToken":{ + "shape":"String", + "locationName":"clientToken" + } + } + }, + "CreateReservedInstancesListingResult":{ + "type":"structure", + "members":{ + "ReservedInstancesListings":{ + "shape":"ReservedInstancesListingList", + "locationName":"reservedInstancesListingsSet" + } + } + }, + "CreateRouteRequest":{ + "type":"structure", + "required":[ + "RouteTableId", + "DestinationCidrBlock" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "RouteTableId":{ + "shape":"String", + "locationName":"routeTableId" + }, + "DestinationCidrBlock":{ + "shape":"String", + "locationName":"destinationCidrBlock" + }, + "GatewayId":{ + "shape":"String", + "locationName":"gatewayId" + }, + "InstanceId":{ + "shape":"String", + "locationName":"instanceId" + }, + "NetworkInterfaceId":{ + "shape":"String", + "locationName":"networkInterfaceId" + }, + "VpcPeeringConnectionId":{ + "shape":"String", + "locationName":"vpcPeeringConnectionId" + }, + "NatGatewayId":{ + "shape":"String", + "locationName":"natGatewayId" + } + } + }, + "CreateRouteResult":{ + "type":"structure", + "members":{ + "Return":{ + "shape":"Boolean", + "locationName":"return" + } + } + }, + "CreateRouteTableRequest":{ + "type":"structure", + "required":["VpcId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "VpcId":{ + "shape":"String", + "locationName":"vpcId" + } + } + }, + "CreateRouteTableResult":{ + "type":"structure", + "members":{ + "RouteTable":{ + "shape":"RouteTable", + "locationName":"routeTable" + } + } + }, + "CreateSecurityGroupRequest":{ + "type":"structure", + "required":[ + "GroupName", + "Description" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "GroupName":{"shape":"String"}, + "Description":{ + "shape":"String", + "locationName":"GroupDescription" + }, + "VpcId":{"shape":"String"} + } + }, + "CreateSecurityGroupResult":{ + "type":"structure", + "members":{ + "GroupId":{ + "shape":"String", + "locationName":"groupId" + } + } + }, + "CreateSnapshotRequest":{ + "type":"structure", + "required":["VolumeId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "VolumeId":{"shape":"String"}, + "Description":{"shape":"String"} + } + }, + "CreateSpotDatafeedSubscriptionRequest":{ + "type":"structure", + "required":["Bucket"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "Bucket":{ + "shape":"String", + "locationName":"bucket" + }, + "Prefix":{ + "shape":"String", + "locationName":"prefix" + } + } + }, + "CreateSpotDatafeedSubscriptionResult":{ + "type":"structure", + "members":{ + "SpotDatafeedSubscription":{ + "shape":"SpotDatafeedSubscription", + "locationName":"spotDatafeedSubscription" + } + } + }, + "CreateSubnetRequest":{ + "type":"structure", + "required":[ + "VpcId", + "CidrBlock" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "VpcId":{"shape":"String"}, + "CidrBlock":{"shape":"String"}, + "AvailabilityZone":{"shape":"String"} + } + }, + "CreateSubnetResult":{ + "type":"structure", + "members":{ + "Subnet":{ + "shape":"Subnet", + "locationName":"subnet" + } + } + }, + "CreateTagsRequest":{ + "type":"structure", + "required":[ + "Resources", + "Tags" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "Resources":{ + "shape":"ResourceIdList", + "locationName":"ResourceId" + }, + "Tags":{ + "shape":"TagList", + "locationName":"Tag" + } + } + }, + "CreateVolumePermission":{ + "type":"structure", + "members":{ + "UserId":{ + "shape":"String", + "locationName":"userId" + }, + "Group":{ + "shape":"PermissionGroup", + "locationName":"group" + } + } + }, + "CreateVolumePermissionList":{ + "type":"list", + "member":{ + "shape":"CreateVolumePermission", + "locationName":"item" + } + }, + "CreateVolumePermissionModifications":{ + "type":"structure", + "members":{ + "Add":{"shape":"CreateVolumePermissionList"}, + "Remove":{"shape":"CreateVolumePermissionList"} + } + }, + "CreateVolumeRequest":{ + "type":"structure", + "required":["AvailabilityZone"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "Size":{"shape":"Integer"}, + "SnapshotId":{"shape":"String"}, + "AvailabilityZone":{"shape":"String"}, + "VolumeType":{"shape":"VolumeType"}, + "Iops":{"shape":"Integer"}, + "Encrypted":{ + "shape":"Boolean", + "locationName":"encrypted" + }, + "KmsKeyId":{"shape":"String"} + } + }, + "CreateVpcEndpointRequest":{ + "type":"structure", + "required":[ + "VpcId", + "ServiceName" + ], + "members":{ + "DryRun":{"shape":"Boolean"}, + "VpcId":{"shape":"String"}, + "ServiceName":{"shape":"String"}, + "PolicyDocument":{"shape":"String"}, + "RouteTableIds":{ + "shape":"ValueStringList", + "locationName":"RouteTableId" + }, + "ClientToken":{"shape":"String"} + } + }, + "CreateVpcEndpointResult":{ + "type":"structure", + "members":{ + "VpcEndpoint":{ + "shape":"VpcEndpoint", + "locationName":"vpcEndpoint" + }, + "ClientToken":{ + "shape":"String", + "locationName":"clientToken" + } + } + }, + "CreateVpcPeeringConnectionRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "VpcId":{ + "shape":"String", + "locationName":"vpcId" + }, + "PeerVpcId":{ + "shape":"String", + "locationName":"peerVpcId" + }, + "PeerOwnerId":{ + "shape":"String", + "locationName":"peerOwnerId" + } + } + }, + "CreateVpcPeeringConnectionResult":{ + "type":"structure", + "members":{ + "VpcPeeringConnection":{ + "shape":"VpcPeeringConnection", + "locationName":"vpcPeeringConnection" + } + } + }, + "CreateVpcRequest":{ + "type":"structure", + "required":["CidrBlock"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "CidrBlock":{"shape":"String"}, + "InstanceTenancy":{ + "shape":"Tenancy", + "locationName":"instanceTenancy" + } + } + }, + "CreateVpcResult":{ + "type":"structure", + "members":{ + "Vpc":{ + "shape":"Vpc", + "locationName":"vpc" + } + } + }, + "CreateVpnConnectionRequest":{ + "type":"structure", + "required":[ + "Type", + "CustomerGatewayId", + "VpnGatewayId" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "Type":{"shape":"String"}, + "CustomerGatewayId":{"shape":"String"}, + "VpnGatewayId":{"shape":"String"}, + "Options":{ + "shape":"VpnConnectionOptionsSpecification", + "locationName":"options" + } + } + }, + "CreateVpnConnectionResult":{ + "type":"structure", + "members":{ + "VpnConnection":{ + "shape":"VpnConnection", + "locationName":"vpnConnection" + } + } + }, + "CreateVpnConnectionRouteRequest":{ + "type":"structure", + "required":[ + "VpnConnectionId", + "DestinationCidrBlock" + ], + "members":{ + "VpnConnectionId":{"shape":"String"}, + "DestinationCidrBlock":{"shape":"String"} + } + }, + "CreateVpnGatewayRequest":{ + "type":"structure", + "required":["Type"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "Type":{"shape":"GatewayType"}, + "AvailabilityZone":{"shape":"String"} + } + }, + "CreateVpnGatewayResult":{ + "type":"structure", + "members":{ + "VpnGateway":{ + "shape":"VpnGateway", + "locationName":"vpnGateway" + } + } + }, + "CurrencyCodeValues":{ + "type":"string", + "enum":["USD"] + }, + "CustomerGateway":{ + "type":"structure", + "members":{ + "CustomerGatewayId":{ + "shape":"String", + "locationName":"customerGatewayId" + }, + "State":{ + "shape":"String", + "locationName":"state" + }, + "Type":{ + "shape":"String", + "locationName":"type" + }, + "IpAddress":{ + "shape":"String", + "locationName":"ipAddress" + }, + "BgpAsn":{ + "shape":"String", + "locationName":"bgpAsn" + }, + "Tags":{ + "shape":"TagList", + "locationName":"tagSet" + } + } + }, + "CustomerGatewayIdStringList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"CustomerGatewayId" + } + }, + "CustomerGatewayList":{ + "type":"list", + "member":{ + "shape":"CustomerGateway", + "locationName":"item" + } + }, + "DatafeedSubscriptionState":{ + "type":"string", + "enum":[ + "Active", + "Inactive" + ] + }, + "DateTime":{"type":"timestamp"}, + "DeleteCustomerGatewayRequest":{ + "type":"structure", + "required":["CustomerGatewayId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "CustomerGatewayId":{"shape":"String"} + } + }, + "DeleteDhcpOptionsRequest":{ + "type":"structure", + "required":["DhcpOptionsId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "DhcpOptionsId":{"shape":"String"} + } + }, + "DeleteFlowLogsRequest":{ + "type":"structure", + "required":["FlowLogIds"], + "members":{ + "FlowLogIds":{ + "shape":"ValueStringList", + "locationName":"FlowLogId" + } + } + }, + "DeleteFlowLogsResult":{ + "type":"structure", + "members":{ + "Unsuccessful":{ + "shape":"UnsuccessfulItemSet", + "locationName":"unsuccessful" + } + } + }, + "DeleteInternetGatewayRequest":{ + "type":"structure", + "required":["InternetGatewayId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "InternetGatewayId":{ + "shape":"String", + "locationName":"internetGatewayId" + } + } + }, + "DeleteKeyPairRequest":{ + "type":"structure", + "required":["KeyName"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "KeyName":{"shape":"String"} + } + }, + "DeleteNatGatewayRequest":{ + "type":"structure", + "required":["NatGatewayId"], + "members":{ + "NatGatewayId":{"shape":"String"} + } + }, + "DeleteNatGatewayResult":{ + "type":"structure", + "members":{ + "NatGatewayId":{ + "shape":"String", + "locationName":"natGatewayId" + } + } + }, + "DeleteNetworkAclEntryRequest":{ + "type":"structure", + "required":[ + "NetworkAclId", + "RuleNumber", + "Egress" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "NetworkAclId":{ + "shape":"String", + "locationName":"networkAclId" + }, + "RuleNumber":{ + "shape":"Integer", + "locationName":"ruleNumber" + }, + "Egress":{ + "shape":"Boolean", + "locationName":"egress" + } + } + }, + "DeleteNetworkAclRequest":{ + "type":"structure", + "required":["NetworkAclId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "NetworkAclId":{ + "shape":"String", + "locationName":"networkAclId" + } + } + }, + "DeleteNetworkInterfaceRequest":{ + "type":"structure", + "required":["NetworkInterfaceId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "NetworkInterfaceId":{ + "shape":"String", + "locationName":"networkInterfaceId" + } + } + }, + "DeletePlacementGroupRequest":{ + "type":"structure", + "required":["GroupName"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "GroupName":{ + "shape":"String", + "locationName":"groupName" + } + } + }, + "DeleteRouteRequest":{ + "type":"structure", + "required":[ + "RouteTableId", + "DestinationCidrBlock" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "RouteTableId":{ + "shape":"String", + "locationName":"routeTableId" + }, + "DestinationCidrBlock":{ + "shape":"String", + "locationName":"destinationCidrBlock" + } + } + }, + "DeleteRouteTableRequest":{ + "type":"structure", + "required":["RouteTableId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "RouteTableId":{ + "shape":"String", + "locationName":"routeTableId" + } + } + }, + "DeleteSecurityGroupRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "GroupName":{"shape":"String"}, + "GroupId":{"shape":"String"} + } + }, + "DeleteSnapshotRequest":{ + "type":"structure", + "required":["SnapshotId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "SnapshotId":{"shape":"String"} + } + }, + "DeleteSpotDatafeedSubscriptionRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + } + } + }, + "DeleteSubnetRequest":{ + "type":"structure", + "required":["SubnetId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "SubnetId":{"shape":"String"} + } + }, + "DeleteTagsRequest":{ + "type":"structure", + "required":["Resources"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "Resources":{ + "shape":"ResourceIdList", + "locationName":"resourceId" + }, + "Tags":{ + "shape":"TagList", + "locationName":"tag" + } + } + }, + "DeleteVolumeRequest":{ + "type":"structure", + "required":["VolumeId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "VolumeId":{"shape":"String"} + } + }, + "DeleteVpcEndpointsRequest":{ + "type":"structure", + "required":["VpcEndpointIds"], + "members":{ + "DryRun":{"shape":"Boolean"}, + "VpcEndpointIds":{ + "shape":"ValueStringList", + "locationName":"VpcEndpointId" + } + } + }, + "DeleteVpcEndpointsResult":{ + "type":"structure", + "members":{ + "Unsuccessful":{ + "shape":"UnsuccessfulItemSet", + "locationName":"unsuccessful" + } + } + }, + "DeleteVpcPeeringConnectionRequest":{ + "type":"structure", + "required":["VpcPeeringConnectionId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "VpcPeeringConnectionId":{ + "shape":"String", + "locationName":"vpcPeeringConnectionId" + } + } + }, + "DeleteVpcPeeringConnectionResult":{ + "type":"structure", + "members":{ + "Return":{ + "shape":"Boolean", + "locationName":"return" + } + } + }, + "DeleteVpcRequest":{ + "type":"structure", + "required":["VpcId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "VpcId":{"shape":"String"} + } + }, + "DeleteVpnConnectionRequest":{ + "type":"structure", + "required":["VpnConnectionId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "VpnConnectionId":{"shape":"String"} + } + }, + "DeleteVpnConnectionRouteRequest":{ + "type":"structure", + "required":[ + "VpnConnectionId", + "DestinationCidrBlock" + ], + "members":{ + "VpnConnectionId":{"shape":"String"}, + "DestinationCidrBlock":{"shape":"String"} + } + }, + "DeleteVpnGatewayRequest":{ + "type":"structure", + "required":["VpnGatewayId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "VpnGatewayId":{"shape":"String"} + } + }, + "DeregisterImageRequest":{ + "type":"structure", + "required":["ImageId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "ImageId":{"shape":"String"} + } + }, + "DescribeAccountAttributesRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "AttributeNames":{ + "shape":"AccountAttributeNameStringList", + "locationName":"attributeName" + } + } + }, + "DescribeAccountAttributesResult":{ + "type":"structure", + "members":{ + "AccountAttributes":{ + "shape":"AccountAttributeList", + "locationName":"accountAttributeSet" + } + } + }, + "DescribeAddressesRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "PublicIps":{ + "shape":"PublicIpStringList", + "locationName":"PublicIp" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"Filter" + }, + "AllocationIds":{ + "shape":"AllocationIdList", + "locationName":"AllocationId" + } + } + }, + "DescribeAddressesResult":{ + "type":"structure", + "members":{ + "Addresses":{ + "shape":"AddressList", + "locationName":"addressesSet" + } + } + }, + "DescribeAvailabilityZonesRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "ZoneNames":{ + "shape":"ZoneNameStringList", + "locationName":"ZoneName" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"Filter" + } + } + }, + "DescribeAvailabilityZonesResult":{ + "type":"structure", + "members":{ + "AvailabilityZones":{ + "shape":"AvailabilityZoneList", + "locationName":"availabilityZoneInfo" + } + } + }, + "DescribeBundleTasksRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "BundleIds":{ + "shape":"BundleIdStringList", + "locationName":"BundleId" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"Filter" + } + } + }, + "DescribeBundleTasksResult":{ + "type":"structure", + "members":{ + "BundleTasks":{ + "shape":"BundleTaskList", + "locationName":"bundleInstanceTasksSet" + } + } + }, + "DescribeClassicLinkInstancesRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "InstanceIds":{ + "shape":"InstanceIdStringList", + "locationName":"InstanceId" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"Filter" + }, + "NextToken":{ + "shape":"String", + "locationName":"nextToken" + }, + "MaxResults":{ + "shape":"Integer", + "locationName":"maxResults" + } + } + }, + "DescribeClassicLinkInstancesResult":{ + "type":"structure", + "members":{ + "Instances":{ + "shape":"ClassicLinkInstanceList", + "locationName":"instancesSet" + }, + "NextToken":{ + "shape":"String", + "locationName":"nextToken" + } + } + }, + "DescribeConversionTaskList":{ + "type":"list", + "member":{ + "shape":"ConversionTask", + "locationName":"item" + } + }, + "DescribeConversionTasksRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"filter" + }, + "ConversionTaskIds":{ + "shape":"ConversionIdStringList", + "locationName":"conversionTaskId" + } + } + }, + "DescribeConversionTasksResult":{ + "type":"structure", + "members":{ + "ConversionTasks":{ + "shape":"DescribeConversionTaskList", + "locationName":"conversionTasks" + } + } + }, + "DescribeCustomerGatewaysRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "CustomerGatewayIds":{ + "shape":"CustomerGatewayIdStringList", + "locationName":"CustomerGatewayId" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"Filter" + } + } + }, + "DescribeCustomerGatewaysResult":{ + "type":"structure", + "members":{ + "CustomerGateways":{ + "shape":"CustomerGatewayList", + "locationName":"customerGatewaySet" + } + } + }, + "DescribeDhcpOptionsRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "DhcpOptionsIds":{ + "shape":"DhcpOptionsIdStringList", + "locationName":"DhcpOptionsId" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"Filter" + } + } + }, + "DescribeDhcpOptionsResult":{ + "type":"structure", + "members":{ + "DhcpOptions":{ + "shape":"DhcpOptionsList", + "locationName":"dhcpOptionsSet" + } + } + }, + "DescribeExportTasksRequest":{ + "type":"structure", + "members":{ + "ExportTaskIds":{ + "shape":"ExportTaskIdStringList", + "locationName":"exportTaskId" + } + } + }, + "DescribeExportTasksResult":{ + "type":"structure", + "members":{ + "ExportTasks":{ + "shape":"ExportTaskList", + "locationName":"exportTaskSet" + } + } + }, + "DescribeFlowLogsRequest":{ + "type":"structure", + "members":{ + "FlowLogIds":{ + "shape":"ValueStringList", + "locationName":"FlowLogId" + }, + "Filter":{"shape":"FilterList"}, + "NextToken":{"shape":"String"}, + "MaxResults":{"shape":"Integer"} + } + }, + "DescribeFlowLogsResult":{ + "type":"structure", + "members":{ + "FlowLogs":{ + "shape":"FlowLogSet", + "locationName":"flowLogSet" + }, + "NextToken":{ + "shape":"String", + "locationName":"nextToken" + } + } + }, + "DescribeHostsRequest":{ + "type":"structure", + "members":{ + "HostIds":{ + "shape":"RequestHostIdList", + "locationName":"hostId" + }, + "NextToken":{ + "shape":"String", + "locationName":"nextToken" + }, + "MaxResults":{ + "shape":"Integer", + "locationName":"maxResults" + }, + "Filter":{ + "shape":"FilterList", + "locationName":"filter" + } + } + }, + "DescribeHostsResult":{ + "type":"structure", + "members":{ + "Hosts":{ + "shape":"HostList", + "locationName":"hostSet" + }, + "NextToken":{ + "shape":"String", + "locationName":"nextToken" + } + } + }, + "DescribeIdFormatRequest":{ + "type":"structure", + "members":{ + "Resource":{"shape":"String"} + } + }, + "DescribeIdFormatResult":{ + "type":"structure", + "members":{ + "Statuses":{ + "shape":"IdFormatList", + "locationName":"statusSet" + } + } + }, + "DescribeIdentityIdFormatRequest":{ + "type":"structure", + "required":["PrincipalArn"], + "members":{ + "Resource":{ + "shape":"String", + "locationName":"resource" + }, + "PrincipalArn":{ + "shape":"String", + "locationName":"principalArn" + } + } + }, + "DescribeIdentityIdFormatResult":{ + "type":"structure", + "members":{ + "Statuses":{ + "shape":"IdFormatList", + "locationName":"statusSet" + } + } + }, + "DescribeImageAttributeRequest":{ + "type":"structure", + "required":[ + "ImageId", + "Attribute" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "ImageId":{"shape":"String"}, + "Attribute":{"shape":"ImageAttributeName"} + } + }, + "DescribeImagesRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "ImageIds":{ + "shape":"ImageIdStringList", + "locationName":"ImageId" + }, + "Owners":{ + "shape":"OwnerStringList", + "locationName":"Owner" + }, + "ExecutableUsers":{ + "shape":"ExecutableByStringList", + "locationName":"ExecutableBy" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"Filter" + } + } + }, + "DescribeImagesResult":{ + "type":"structure", + "members":{ + "Images":{ + "shape":"ImageList", + "locationName":"imagesSet" + } + } + }, + "DescribeImportImageTasksRequest":{ + "type":"structure", + "members":{ + "DryRun":{"shape":"Boolean"}, + "ImportTaskIds":{ + "shape":"ImportTaskIdList", + "locationName":"ImportTaskId" + }, + "NextToken":{"shape":"String"}, + "MaxResults":{"shape":"Integer"}, + "Filters":{"shape":"FilterList"} + } + }, + "DescribeImportImageTasksResult":{ + "type":"structure", + "members":{ + "ImportImageTasks":{ + "shape":"ImportImageTaskList", + "locationName":"importImageTaskSet" + }, + "NextToken":{ + "shape":"String", + "locationName":"nextToken" + } + } + }, + "DescribeImportSnapshotTasksRequest":{ + "type":"structure", + "members":{ + "DryRun":{"shape":"Boolean"}, + "ImportTaskIds":{ + "shape":"ImportTaskIdList", + "locationName":"ImportTaskId" + }, + "NextToken":{"shape":"String"}, + "MaxResults":{"shape":"Integer"}, + "Filters":{"shape":"FilterList"} + } + }, + "DescribeImportSnapshotTasksResult":{ + "type":"structure", + "members":{ + "ImportSnapshotTasks":{ + "shape":"ImportSnapshotTaskList", + "locationName":"importSnapshotTaskSet" + }, + "NextToken":{ + "shape":"String", + "locationName":"nextToken" + } + } + }, + "DescribeInstanceAttributeRequest":{ + "type":"structure", + "required":[ + "InstanceId", + "Attribute" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "InstanceId":{ + "shape":"String", + "locationName":"instanceId" + }, + "Attribute":{ + "shape":"InstanceAttributeName", + "locationName":"attribute" + } + } + }, + "DescribeInstanceStatusRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "InstanceIds":{ + "shape":"InstanceIdStringList", + "locationName":"InstanceId" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"Filter" + }, + "NextToken":{"shape":"String"}, + "MaxResults":{"shape":"Integer"}, + "IncludeAllInstances":{ + "shape":"Boolean", + "locationName":"includeAllInstances" + } + } + }, + "DescribeInstanceStatusResult":{ + "type":"structure", + "members":{ + "InstanceStatuses":{ + "shape":"InstanceStatusList", + "locationName":"instanceStatusSet" + }, + "NextToken":{ + "shape":"String", + "locationName":"nextToken" + } + } + }, + "DescribeInstancesRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "InstanceIds":{ + "shape":"InstanceIdStringList", + "locationName":"InstanceId" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"Filter" + }, + "NextToken":{ + "shape":"String", + "locationName":"nextToken" + }, + "MaxResults":{ + "shape":"Integer", + "locationName":"maxResults" + } + } + }, + "DescribeInstancesResult":{ + "type":"structure", + "members":{ + "Reservations":{ + "shape":"ReservationList", + "locationName":"reservationSet" + }, + "NextToken":{ + "shape":"String", + "locationName":"nextToken" + } + } + }, + "DescribeInternetGatewaysRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "InternetGatewayIds":{ + "shape":"ValueStringList", + "locationName":"internetGatewayId" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"Filter" + } + } + }, + "DescribeInternetGatewaysResult":{ + "type":"structure", + "members":{ + "InternetGateways":{ + "shape":"InternetGatewayList", + "locationName":"internetGatewaySet" + } + } + }, + "DescribeKeyPairsRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "KeyNames":{ + "shape":"KeyNameStringList", + "locationName":"KeyName" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"Filter" + } + } + }, + "DescribeKeyPairsResult":{ + "type":"structure", + "members":{ + "KeyPairs":{ + "shape":"KeyPairList", + "locationName":"keySet" + } + } + }, + "DescribeMovingAddressesRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "PublicIps":{ + "shape":"ValueStringList", + "locationName":"publicIp" + }, + "NextToken":{ + "shape":"String", + "locationName":"nextToken" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"filter" + }, + "MaxResults":{ + "shape":"Integer", + "locationName":"maxResults" + } + } + }, + "DescribeMovingAddressesResult":{ + "type":"structure", + "members":{ + "MovingAddressStatuses":{ + "shape":"MovingAddressStatusSet", + "locationName":"movingAddressStatusSet" + }, + "NextToken":{ + "shape":"String", + "locationName":"nextToken" + } + } + }, + "DescribeNatGatewaysRequest":{ + "type":"structure", + "members":{ + "NatGatewayIds":{ + "shape":"ValueStringList", + "locationName":"NatGatewayId" + }, + "Filter":{"shape":"FilterList"}, + "MaxResults":{"shape":"Integer"}, + "NextToken":{"shape":"String"} + } + }, + "DescribeNatGatewaysResult":{ + "type":"structure", + "members":{ + "NatGateways":{ + "shape":"NatGatewayList", + "locationName":"natGatewaySet" + }, + "NextToken":{ + "shape":"String", + "locationName":"nextToken" + } + } + }, + "DescribeNetworkAclsRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "NetworkAclIds":{ + "shape":"ValueStringList", + "locationName":"NetworkAclId" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"Filter" + } + } + }, + "DescribeNetworkAclsResult":{ + "type":"structure", + "members":{ + "NetworkAcls":{ + "shape":"NetworkAclList", + "locationName":"networkAclSet" + } + } + }, + "DescribeNetworkInterfaceAttributeRequest":{ + "type":"structure", + "required":["NetworkInterfaceId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "NetworkInterfaceId":{ + "shape":"String", + "locationName":"networkInterfaceId" + }, + "Attribute":{ + "shape":"NetworkInterfaceAttribute", + "locationName":"attribute" + } + } + }, + "DescribeNetworkInterfaceAttributeResult":{ + "type":"structure", + "members":{ + "NetworkInterfaceId":{ + "shape":"String", + "locationName":"networkInterfaceId" + }, + "Description":{ + "shape":"AttributeValue", + "locationName":"description" + }, + "SourceDestCheck":{ + "shape":"AttributeBooleanValue", + "locationName":"sourceDestCheck" + }, + "Groups":{ + "shape":"GroupIdentifierList", + "locationName":"groupSet" + }, + "Attachment":{ + "shape":"NetworkInterfaceAttachment", + "locationName":"attachment" + } + } + }, + "DescribeNetworkInterfacesRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "NetworkInterfaceIds":{ + "shape":"NetworkInterfaceIdList", + "locationName":"NetworkInterfaceId" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"filter" + } + } + }, + "DescribeNetworkInterfacesResult":{ + "type":"structure", + "members":{ + "NetworkInterfaces":{ + "shape":"NetworkInterfaceList", + "locationName":"networkInterfaceSet" + } + } + }, + "DescribePlacementGroupsRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "GroupNames":{ + "shape":"PlacementGroupStringList", + "locationName":"groupName" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"Filter" + } + } + }, + "DescribePlacementGroupsResult":{ + "type":"structure", + "members":{ + "PlacementGroups":{ + "shape":"PlacementGroupList", + "locationName":"placementGroupSet" + } + } + }, + "DescribePrefixListsRequest":{ + "type":"structure", + "members":{ + "DryRun":{"shape":"Boolean"}, + "PrefixListIds":{ + "shape":"ValueStringList", + "locationName":"PrefixListId" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"Filter" + }, + "MaxResults":{"shape":"Integer"}, + "NextToken":{"shape":"String"} + } + }, + "DescribePrefixListsResult":{ + "type":"structure", + "members":{ + "PrefixLists":{ + "shape":"PrefixListSet", + "locationName":"prefixListSet" + }, + "NextToken":{ + "shape":"String", + "locationName":"nextToken" + } + } + }, + "DescribeRegionsRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "RegionNames":{ + "shape":"RegionNameStringList", + "locationName":"RegionName" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"Filter" + } + } + }, + "DescribeRegionsResult":{ + "type":"structure", + "members":{ + "Regions":{ + "shape":"RegionList", + "locationName":"regionInfo" + } + } + }, + "DescribeReservedInstancesListingsRequest":{ + "type":"structure", + "members":{ + "ReservedInstancesId":{ + "shape":"String", + "locationName":"reservedInstancesId" + }, + "ReservedInstancesListingId":{ + "shape":"String", + "locationName":"reservedInstancesListingId" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"filters" + } + } + }, + "DescribeReservedInstancesListingsResult":{ + "type":"structure", + "members":{ + "ReservedInstancesListings":{ + "shape":"ReservedInstancesListingList", + "locationName":"reservedInstancesListingsSet" + } + } + }, + "DescribeReservedInstancesModificationsRequest":{ + "type":"structure", + "members":{ + "ReservedInstancesModificationIds":{ + "shape":"ReservedInstancesModificationIdStringList", + "locationName":"ReservedInstancesModificationId" + }, + "NextToken":{ + "shape":"String", + "locationName":"nextToken" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"Filter" + } + } + }, + "DescribeReservedInstancesModificationsResult":{ + "type":"structure", + "members":{ + "ReservedInstancesModifications":{ + "shape":"ReservedInstancesModificationList", + "locationName":"reservedInstancesModificationsSet" + }, + "NextToken":{ + "shape":"String", + "locationName":"nextToken" + } + } + }, + "DescribeReservedInstancesOfferingsRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "ReservedInstancesOfferingIds":{ + "shape":"ReservedInstancesOfferingIdStringList", + "locationName":"ReservedInstancesOfferingId" + }, + "InstanceType":{"shape":"InstanceType"}, + "AvailabilityZone":{"shape":"String"}, + "ProductDescription":{"shape":"RIProductDescription"}, + "Filters":{ + "shape":"FilterList", + "locationName":"Filter" + }, + "InstanceTenancy":{ + "shape":"Tenancy", + "locationName":"instanceTenancy" + }, + "OfferingType":{ + "shape":"OfferingTypeValues", + "locationName":"offeringType" + }, + "NextToken":{ + "shape":"String", + "locationName":"nextToken" + }, + "MaxResults":{ + "shape":"Integer", + "locationName":"maxResults" + }, + "IncludeMarketplace":{"shape":"Boolean"}, + "MinDuration":{"shape":"Long"}, + "MaxDuration":{"shape":"Long"}, + "MaxInstanceCount":{"shape":"Integer"} + } + }, + "DescribeReservedInstancesOfferingsResult":{ + "type":"structure", + "members":{ + "ReservedInstancesOfferings":{ + "shape":"ReservedInstancesOfferingList", + "locationName":"reservedInstancesOfferingsSet" + }, + "NextToken":{ + "shape":"String", + "locationName":"nextToken" + } + } + }, + "DescribeReservedInstancesRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "ReservedInstancesIds":{ + "shape":"ReservedInstancesIdStringList", + "locationName":"ReservedInstancesId" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"Filter" + }, + "OfferingType":{ + "shape":"OfferingTypeValues", + "locationName":"offeringType" + } + } + }, + "DescribeReservedInstancesResult":{ + "type":"structure", + "members":{ + "ReservedInstances":{ + "shape":"ReservedInstancesList", + "locationName":"reservedInstancesSet" + } + } + }, + "DescribeRouteTablesRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "RouteTableIds":{ + "shape":"ValueStringList", + "locationName":"RouteTableId" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"Filter" + } + } + }, + "DescribeRouteTablesResult":{ + "type":"structure", + "members":{ + "RouteTables":{ + "shape":"RouteTableList", + "locationName":"routeTableSet" + } + } + }, + "DescribeScheduledInstanceAvailabilityRequest":{ + "type":"structure", + "required":[ + "Recurrence", + "FirstSlotStartTimeRange" + ], + "members":{ + "DryRun":{"shape":"Boolean"}, + "Recurrence":{"shape":"ScheduledInstanceRecurrenceRequest"}, + "FirstSlotStartTimeRange":{"shape":"SlotDateTimeRangeRequest"}, + "MinSlotDurationInHours":{"shape":"Integer"}, + "MaxSlotDurationInHours":{"shape":"Integer"}, + "NextToken":{"shape":"String"}, + "MaxResults":{"shape":"Integer"}, + "Filters":{ + "shape":"FilterList", + "locationName":"Filter" + } + } + }, + "DescribeScheduledInstanceAvailabilityResult":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"String", + "locationName":"nextToken" + }, + "ScheduledInstanceAvailabilitySet":{ + "shape":"ScheduledInstanceAvailabilitySet", + "locationName":"scheduledInstanceAvailabilitySet" + } + } + }, + "DescribeScheduledInstancesRequest":{ + "type":"structure", + "members":{ + "DryRun":{"shape":"Boolean"}, + "ScheduledInstanceIds":{ + "shape":"ScheduledInstanceIdRequestSet", + "locationName":"ScheduledInstanceId" + }, + "SlotStartTimeRange":{"shape":"SlotStartTimeRangeRequest"}, + "NextToken":{"shape":"String"}, + "MaxResults":{"shape":"Integer"}, + "Filters":{ + "shape":"FilterList", + "locationName":"Filter" + } + } + }, + "DescribeScheduledInstancesResult":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"String", + "locationName":"nextToken" + }, + "ScheduledInstanceSet":{ + "shape":"ScheduledInstanceSet", + "locationName":"scheduledInstanceSet" + } + } + }, + "DescribeSecurityGroupReferencesRequest":{ + "type":"structure", + "required":["GroupId"], + "members":{ + "DryRun":{"shape":"Boolean"}, + "GroupId":{"shape":"GroupIds"} + } + }, + "DescribeSecurityGroupReferencesResult":{ + "type":"structure", + "members":{ + "SecurityGroupReferenceSet":{ + "shape":"SecurityGroupReferences", + "locationName":"securityGroupReferenceSet" + } + } + }, + "DescribeSecurityGroupsRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "GroupNames":{ + "shape":"GroupNameStringList", + "locationName":"GroupName" + }, + "GroupIds":{ + "shape":"GroupIdStringList", + "locationName":"GroupId" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"Filter" + } + } + }, + "DescribeSecurityGroupsResult":{ + "type":"structure", + "members":{ + "SecurityGroups":{ + "shape":"SecurityGroupList", + "locationName":"securityGroupInfo" + } + } + }, + "DescribeSnapshotAttributeRequest":{ + "type":"structure", + "required":[ + "SnapshotId", + "Attribute" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "SnapshotId":{"shape":"String"}, + "Attribute":{"shape":"SnapshotAttributeName"} + } + }, + "DescribeSnapshotAttributeResult":{ + "type":"structure", + "members":{ + "SnapshotId":{ + "shape":"String", + "locationName":"snapshotId" + }, + "CreateVolumePermissions":{ + "shape":"CreateVolumePermissionList", + "locationName":"createVolumePermission" + }, + "ProductCodes":{ + "shape":"ProductCodeList", + "locationName":"productCodes" + } + } + }, + "DescribeSnapshotsRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "SnapshotIds":{ + "shape":"SnapshotIdStringList", + "locationName":"SnapshotId" + }, + "OwnerIds":{ + "shape":"OwnerStringList", + "locationName":"Owner" + }, + "RestorableByUserIds":{ + "shape":"RestorableByStringList", + "locationName":"RestorableBy" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"Filter" + }, + "NextToken":{"shape":"String"}, + "MaxResults":{"shape":"Integer"} + } + }, + "DescribeSnapshotsResult":{ + "type":"structure", + "members":{ + "Snapshots":{ + "shape":"SnapshotList", + "locationName":"snapshotSet" + }, + "NextToken":{ + "shape":"String", + "locationName":"nextToken" + } + } + }, + "DescribeSpotDatafeedSubscriptionRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + } + } + }, + "DescribeSpotDatafeedSubscriptionResult":{ + "type":"structure", + "members":{ + "SpotDatafeedSubscription":{ + "shape":"SpotDatafeedSubscription", + "locationName":"spotDatafeedSubscription" + } + } + }, + "DescribeSpotFleetInstancesRequest":{ + "type":"structure", + "required":["SpotFleetRequestId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "SpotFleetRequestId":{ + "shape":"String", + "locationName":"spotFleetRequestId" + }, + "NextToken":{ + "shape":"String", + "locationName":"nextToken" + }, + "MaxResults":{ + "shape":"Integer", + "locationName":"maxResults" + } + } + }, + "DescribeSpotFleetInstancesResponse":{ + "type":"structure", + "required":[ + "SpotFleetRequestId", + "ActiveInstances" + ], + "members":{ + "SpotFleetRequestId":{ + "shape":"String", + "locationName":"spotFleetRequestId" + }, + "ActiveInstances":{ + "shape":"ActiveInstanceSet", + "locationName":"activeInstanceSet" + }, + "NextToken":{ + "shape":"String", + "locationName":"nextToken" + } + } + }, + "DescribeSpotFleetRequestHistoryRequest":{ + "type":"structure", + "required":[ + "SpotFleetRequestId", + "StartTime" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "SpotFleetRequestId":{ + "shape":"String", + "locationName":"spotFleetRequestId" + }, + "EventType":{ + "shape":"EventType", + "locationName":"eventType" + }, + "StartTime":{ + "shape":"DateTime", + "locationName":"startTime" + }, + "NextToken":{ + "shape":"String", + "locationName":"nextToken" + }, + "MaxResults":{ + "shape":"Integer", + "locationName":"maxResults" + } + } + }, + "DescribeSpotFleetRequestHistoryResponse":{ + "type":"structure", + "required":[ + "SpotFleetRequestId", + "StartTime", + "LastEvaluatedTime", + "HistoryRecords" + ], + "members":{ + "SpotFleetRequestId":{ + "shape":"String", + "locationName":"spotFleetRequestId" + }, + "StartTime":{ + "shape":"DateTime", + "locationName":"startTime" + }, + "LastEvaluatedTime":{ + "shape":"DateTime", + "locationName":"lastEvaluatedTime" + }, + "HistoryRecords":{ + "shape":"HistoryRecords", + "locationName":"historyRecordSet" + }, + "NextToken":{ + "shape":"String", + "locationName":"nextToken" + } + } + }, + "DescribeSpotFleetRequestsRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "SpotFleetRequestIds":{ + "shape":"ValueStringList", + "locationName":"spotFleetRequestId" + }, + "NextToken":{ + "shape":"String", + "locationName":"nextToken" + }, + "MaxResults":{ + "shape":"Integer", + "locationName":"maxResults" + } + } + }, + "DescribeSpotFleetRequestsResponse":{ + "type":"structure", + "required":["SpotFleetRequestConfigs"], + "members":{ + "SpotFleetRequestConfigs":{ + "shape":"SpotFleetRequestConfigSet", + "locationName":"spotFleetRequestConfigSet" + }, + "NextToken":{ + "shape":"String", + "locationName":"nextToken" + } + } + }, + "DescribeSpotInstanceRequestsRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "SpotInstanceRequestIds":{ + "shape":"SpotInstanceRequestIdList", + "locationName":"SpotInstanceRequestId" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"Filter" + } + } + }, + "DescribeSpotInstanceRequestsResult":{ + "type":"structure", + "members":{ + "SpotInstanceRequests":{ + "shape":"SpotInstanceRequestList", + "locationName":"spotInstanceRequestSet" + } + } + }, + "DescribeSpotPriceHistoryRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "StartTime":{ + "shape":"DateTime", + "locationName":"startTime" + }, + "EndTime":{ + "shape":"DateTime", + "locationName":"endTime" + }, + "InstanceTypes":{ + "shape":"InstanceTypeList", + "locationName":"InstanceType" + }, + "ProductDescriptions":{ + "shape":"ProductDescriptionList", + "locationName":"ProductDescription" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"Filter" + }, + "AvailabilityZone":{ + "shape":"String", + "locationName":"availabilityZone" + }, + "MaxResults":{ + "shape":"Integer", + "locationName":"maxResults" + }, + "NextToken":{ + "shape":"String", + "locationName":"nextToken" + } + } + }, + "DescribeSpotPriceHistoryResult":{ + "type":"structure", + "members":{ + "SpotPriceHistory":{ + "shape":"SpotPriceHistoryList", + "locationName":"spotPriceHistorySet" + }, + "NextToken":{ + "shape":"String", + "locationName":"nextToken" + } + } + }, + "DescribeStaleSecurityGroupsRequest":{ + "type":"structure", + "required":["VpcId"], + "members":{ + "DryRun":{"shape":"Boolean"}, + "VpcId":{"shape":"String"}, + "MaxResults":{"shape":"MaxResults"}, + "NextToken":{"shape":"NextToken"} + } + }, + "DescribeStaleSecurityGroupsResult":{ + "type":"structure", + "members":{ + "StaleSecurityGroupSet":{ + "shape":"StaleSecurityGroupSet", + "locationName":"staleSecurityGroupSet" + }, + "NextToken":{ + "shape":"String", + "locationName":"nextToken" + } + } + }, + "DescribeSubnetsRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "SubnetIds":{ + "shape":"SubnetIdStringList", + "locationName":"SubnetId" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"Filter" + } + } + }, + "DescribeSubnetsResult":{ + "type":"structure", + "members":{ + "Subnets":{ + "shape":"SubnetList", + "locationName":"subnetSet" + } + } + }, + "DescribeTagsRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"Filter" + }, + "MaxResults":{ + "shape":"Integer", + "locationName":"maxResults" + }, + "NextToken":{ + "shape":"String", + "locationName":"nextToken" + } + } + }, + "DescribeTagsResult":{ + "type":"structure", + "members":{ + "Tags":{ + "shape":"TagDescriptionList", + "locationName":"tagSet" + }, + "NextToken":{ + "shape":"String", + "locationName":"nextToken" + } + } + }, + "DescribeVolumeAttributeRequest":{ + "type":"structure", + "required":["VolumeId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "VolumeId":{"shape":"String"}, + "Attribute":{"shape":"VolumeAttributeName"} + } + }, + "DescribeVolumeAttributeResult":{ + "type":"structure", + "members":{ + "VolumeId":{ + "shape":"String", + "locationName":"volumeId" + }, + "AutoEnableIO":{ + "shape":"AttributeBooleanValue", + "locationName":"autoEnableIO" + }, + "ProductCodes":{ + "shape":"ProductCodeList", + "locationName":"productCodes" + } + } + }, + "DescribeVolumeStatusRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "VolumeIds":{ + "shape":"VolumeIdStringList", + "locationName":"VolumeId" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"Filter" + }, + "NextToken":{"shape":"String"}, + "MaxResults":{"shape":"Integer"} + } + }, + "DescribeVolumeStatusResult":{ + "type":"structure", + "members":{ + "VolumeStatuses":{ + "shape":"VolumeStatusList", + "locationName":"volumeStatusSet" + }, + "NextToken":{ + "shape":"String", + "locationName":"nextToken" + } + } + }, + "DescribeVolumesRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "VolumeIds":{ + "shape":"VolumeIdStringList", + "locationName":"VolumeId" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"Filter" + }, + "NextToken":{ + "shape":"String", + "locationName":"nextToken" + }, + "MaxResults":{ + "shape":"Integer", + "locationName":"maxResults" + } + } + }, + "DescribeVolumesResult":{ + "type":"structure", + "members":{ + "Volumes":{ + "shape":"VolumeList", + "locationName":"volumeSet" + }, + "NextToken":{ + "shape":"String", + "locationName":"nextToken" + } + } + }, + "DescribeVpcAttributeRequest":{ + "type":"structure", + "required":[ + "VpcId", + "Attribute" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "VpcId":{"shape":"String"}, + "Attribute":{"shape":"VpcAttributeName"} + } + }, + "DescribeVpcAttributeResult":{ + "type":"structure", + "members":{ + "VpcId":{ + "shape":"String", + "locationName":"vpcId" + }, + "EnableDnsSupport":{ + "shape":"AttributeBooleanValue", + "locationName":"enableDnsSupport" + }, + "EnableDnsHostnames":{ + "shape":"AttributeBooleanValue", + "locationName":"enableDnsHostnames" + } + } + }, + "DescribeVpcClassicLinkDnsSupportRequest":{ + "type":"structure", + "members":{ + "VpcIds":{"shape":"VpcClassicLinkIdList"}, + "MaxResults":{ + "shape":"MaxResults", + "locationName":"maxResults" + }, + "NextToken":{ + "shape":"NextToken", + "locationName":"nextToken" + } + } + }, + "DescribeVpcClassicLinkDnsSupportResult":{ + "type":"structure", + "members":{ + "Vpcs":{ + "shape":"ClassicLinkDnsSupportList", + "locationName":"vpcs" + }, + "NextToken":{ + "shape":"NextToken", + "locationName":"nextToken" + } + } + }, + "DescribeVpcClassicLinkRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "VpcIds":{ + "shape":"VpcClassicLinkIdList", + "locationName":"VpcId" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"Filter" + } + } + }, + "DescribeVpcClassicLinkResult":{ + "type":"structure", + "members":{ + "Vpcs":{ + "shape":"VpcClassicLinkList", + "locationName":"vpcSet" + } + } + }, + "DescribeVpcEndpointServicesRequest":{ + "type":"structure", + "members":{ + "DryRun":{"shape":"Boolean"}, + "MaxResults":{"shape":"Integer"}, + "NextToken":{"shape":"String"} + } + }, + "DescribeVpcEndpointServicesResult":{ + "type":"structure", + "members":{ + "ServiceNames":{ + "shape":"ValueStringList", + "locationName":"serviceNameSet" + }, + "NextToken":{ + "shape":"String", + "locationName":"nextToken" + } + } + }, + "DescribeVpcEndpointsRequest":{ + "type":"structure", + "members":{ + "DryRun":{"shape":"Boolean"}, + "VpcEndpointIds":{ + "shape":"ValueStringList", + "locationName":"VpcEndpointId" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"Filter" + }, + "MaxResults":{"shape":"Integer"}, + "NextToken":{"shape":"String"} + } + }, + "DescribeVpcEndpointsResult":{ + "type":"structure", + "members":{ + "VpcEndpoints":{ + "shape":"VpcEndpointSet", + "locationName":"vpcEndpointSet" + }, + "NextToken":{ + "shape":"String", + "locationName":"nextToken" + } + } + }, + "DescribeVpcPeeringConnectionsRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "VpcPeeringConnectionIds":{ + "shape":"ValueStringList", + "locationName":"VpcPeeringConnectionId" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"Filter" + } + } + }, + "DescribeVpcPeeringConnectionsResult":{ + "type":"structure", + "members":{ + "VpcPeeringConnections":{ + "shape":"VpcPeeringConnectionList", + "locationName":"vpcPeeringConnectionSet" + } + } + }, + "DescribeVpcsRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "VpcIds":{ + "shape":"VpcIdStringList", + "locationName":"VpcId" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"Filter" + } + } + }, + "DescribeVpcsResult":{ + "type":"structure", + "members":{ + "Vpcs":{ + "shape":"VpcList", + "locationName":"vpcSet" + } + } + }, + "DescribeVpnConnectionsRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "VpnConnectionIds":{ + "shape":"VpnConnectionIdStringList", + "locationName":"VpnConnectionId" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"Filter" + } + } + }, + "DescribeVpnConnectionsResult":{ + "type":"structure", + "members":{ + "VpnConnections":{ + "shape":"VpnConnectionList", + "locationName":"vpnConnectionSet" + } + } + }, + "DescribeVpnGatewaysRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "VpnGatewayIds":{ + "shape":"VpnGatewayIdStringList", + "locationName":"VpnGatewayId" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"Filter" + } + } + }, + "DescribeVpnGatewaysResult":{ + "type":"structure", + "members":{ + "VpnGateways":{ + "shape":"VpnGatewayList", + "locationName":"vpnGatewaySet" + } + } + }, + "DetachClassicLinkVpcRequest":{ + "type":"structure", + "required":[ + "InstanceId", + "VpcId" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "InstanceId":{ + "shape":"String", + "locationName":"instanceId" + }, + "VpcId":{ + "shape":"String", + "locationName":"vpcId" + } + } + }, + "DetachClassicLinkVpcResult":{ + "type":"structure", + "members":{ + "Return":{ + "shape":"Boolean", + "locationName":"return" + } + } + }, + "DetachInternetGatewayRequest":{ + "type":"structure", + "required":[ + "InternetGatewayId", + "VpcId" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "InternetGatewayId":{ + "shape":"String", + "locationName":"internetGatewayId" + }, + "VpcId":{ + "shape":"String", + "locationName":"vpcId" + } + } + }, + "DetachNetworkInterfaceRequest":{ + "type":"structure", + "required":["AttachmentId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "AttachmentId":{ + "shape":"String", + "locationName":"attachmentId" + }, + "Force":{ + "shape":"Boolean", + "locationName":"force" + } + } + }, + "DetachVolumeRequest":{ + "type":"structure", + "required":["VolumeId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "VolumeId":{"shape":"String"}, + "InstanceId":{"shape":"String"}, + "Device":{"shape":"String"}, + "Force":{"shape":"Boolean"} + } + }, + "DetachVpnGatewayRequest":{ + "type":"structure", + "required":[ + "VpnGatewayId", + "VpcId" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "VpnGatewayId":{"shape":"String"}, + "VpcId":{"shape":"String"} + } + }, + "DeviceType":{ + "type":"string", + "enum":[ + "ebs", + "instance-store" + ] + }, + "DhcpConfiguration":{ + "type":"structure", + "members":{ + "Key":{ + "shape":"String", + "locationName":"key" + }, + "Values":{ + "shape":"DhcpConfigurationValueList", + "locationName":"valueSet" + } + } + }, + "DhcpConfigurationList":{ + "type":"list", + "member":{ + "shape":"DhcpConfiguration", + "locationName":"item" + } + }, + "DhcpConfigurationValueList":{ + "type":"list", + "member":{ + "shape":"AttributeValue", + "locationName":"item" + } + }, + "DhcpOptions":{ + "type":"structure", + "members":{ + "DhcpOptionsId":{ + "shape":"String", + "locationName":"dhcpOptionsId" + }, + "DhcpConfigurations":{ + "shape":"DhcpConfigurationList", + "locationName":"dhcpConfigurationSet" + }, + "Tags":{ + "shape":"TagList", + "locationName":"tagSet" + } + } + }, + "DhcpOptionsIdStringList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"DhcpOptionsId" + } + }, + "DhcpOptionsList":{ + "type":"list", + "member":{ + "shape":"DhcpOptions", + "locationName":"item" + } + }, + "DisableVgwRoutePropagationRequest":{ + "type":"structure", + "required":[ + "RouteTableId", + "GatewayId" + ], + "members":{ + "RouteTableId":{"shape":"String"}, + "GatewayId":{"shape":"String"} + } + }, + "DisableVpcClassicLinkDnsSupportRequest":{ + "type":"structure", + "members":{ + "VpcId":{"shape":"String"} + } + }, + "DisableVpcClassicLinkDnsSupportResult":{ + "type":"structure", + "members":{ + "Return":{ + "shape":"Boolean", + "locationName":"return" + } + } + }, + "DisableVpcClassicLinkRequest":{ + "type":"structure", + "required":["VpcId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "VpcId":{ + "shape":"String", + "locationName":"vpcId" + } + } + }, + "DisableVpcClassicLinkResult":{ + "type":"structure", + "members":{ + "Return":{ + "shape":"Boolean", + "locationName":"return" + } + } + }, + "DisassociateAddressRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "PublicIp":{"shape":"String"}, + "AssociationId":{"shape":"String"} + } + }, + "DisassociateRouteTableRequest":{ + "type":"structure", + "required":["AssociationId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "AssociationId":{ + "shape":"String", + "locationName":"associationId" + } + } + }, + "DiskImage":{ + "type":"structure", + "members":{ + "Image":{"shape":"DiskImageDetail"}, + "Description":{"shape":"String"}, + "Volume":{"shape":"VolumeDetail"} + } + }, + "DiskImageDescription":{ + "type":"structure", + "required":[ + "Format", + "Size", + "ImportManifestUrl" + ], + "members":{ + "Format":{ + "shape":"DiskImageFormat", + "locationName":"format" + }, + "Size":{ + "shape":"Long", + "locationName":"size" + }, + "ImportManifestUrl":{ + "shape":"String", + "locationName":"importManifestUrl" + }, + "Checksum":{ + "shape":"String", + "locationName":"checksum" + } + } + }, + "DiskImageDetail":{ + "type":"structure", + "required":[ + "Format", + "Bytes", + "ImportManifestUrl" + ], + "members":{ + "Format":{ + "shape":"DiskImageFormat", + "locationName":"format" + }, + "Bytes":{ + "shape":"Long", + "locationName":"bytes" + }, + "ImportManifestUrl":{ + "shape":"String", + "locationName":"importManifestUrl" + } + } + }, + "DiskImageFormat":{ + "type":"string", + "enum":[ + "VMDK", + "RAW", + "VHD" + ] + }, + "DiskImageList":{ + "type":"list", + "member":{"shape":"DiskImage"} + }, + "DiskImageVolumeDescription":{ + "type":"structure", + "required":["Id"], + "members":{ + "Size":{ + "shape":"Long", + "locationName":"size" + }, + "Id":{ + "shape":"String", + "locationName":"id" + } + } + }, + "DomainType":{ + "type":"string", + "enum":[ + "vpc", + "standard" + ] + }, + "Double":{"type":"double"}, + "EbsBlockDevice":{ + "type":"structure", + "members":{ + "SnapshotId":{ + "shape":"String", + "locationName":"snapshotId" + }, + "VolumeSize":{ + "shape":"Integer", + "locationName":"volumeSize" + }, + "DeleteOnTermination":{ + "shape":"Boolean", + "locationName":"deleteOnTermination" + }, + "VolumeType":{ + "shape":"VolumeType", + "locationName":"volumeType" + }, + "Iops":{ + "shape":"Integer", + "locationName":"iops" + }, + "Encrypted":{ + "shape":"Boolean", + "locationName":"encrypted" + } + } + }, + "EbsInstanceBlockDevice":{ + "type":"structure", + "members":{ + "VolumeId":{ + "shape":"String", + "locationName":"volumeId" + }, + "Status":{ + "shape":"AttachmentStatus", + "locationName":"status" + }, + "AttachTime":{ + "shape":"DateTime", + "locationName":"attachTime" + }, + "DeleteOnTermination":{ + "shape":"Boolean", + "locationName":"deleteOnTermination" + } + } + }, + "EbsInstanceBlockDeviceSpecification":{ + "type":"structure", + "members":{ + "VolumeId":{ + "shape":"String", + "locationName":"volumeId" + }, + "DeleteOnTermination":{ + "shape":"Boolean", + "locationName":"deleteOnTermination" + } + } + }, + "EnableVgwRoutePropagationRequest":{ + "type":"structure", + "required":[ + "RouteTableId", + "GatewayId" + ], + "members":{ + "RouteTableId":{"shape":"String"}, + "GatewayId":{"shape":"String"} + } + }, + "EnableVolumeIORequest":{ + "type":"structure", + "required":["VolumeId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "VolumeId":{ + "shape":"String", + "locationName":"volumeId" + } + } + }, + "EnableVpcClassicLinkDnsSupportRequest":{ + "type":"structure", + "members":{ + "VpcId":{"shape":"String"} + } + }, + "EnableVpcClassicLinkDnsSupportResult":{ + "type":"structure", + "members":{ + "Return":{ + "shape":"Boolean", + "locationName":"return" + } + } + }, + "EnableVpcClassicLinkRequest":{ + "type":"structure", + "required":["VpcId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "VpcId":{ + "shape":"String", + "locationName":"vpcId" + } + } + }, + "EnableVpcClassicLinkResult":{ + "type":"structure", + "members":{ + "Return":{ + "shape":"Boolean", + "locationName":"return" + } + } + }, + "EventCode":{ + "type":"string", + "enum":[ + "instance-reboot", + "system-reboot", + "system-maintenance", + "instance-retirement", + "instance-stop" + ] + }, + "EventInformation":{ + "type":"structure", + "members":{ + "InstanceId":{ + "shape":"String", + "locationName":"instanceId" + }, + "EventSubType":{ + "shape":"String", + "locationName":"eventSubType" + }, + "EventDescription":{ + "shape":"String", + "locationName":"eventDescription" + } + } + }, + "EventType":{ + "type":"string", + "enum":[ + "instanceChange", + "fleetRequestChange", + "error" + ] + }, + "ExcessCapacityTerminationPolicy":{ + "type":"string", + "enum":[ + "noTermination", + "default" + ] + }, + "ExecutableByStringList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"ExecutableBy" + } + }, + "ExportEnvironment":{ + "type":"string", + "enum":[ + "citrix", + "vmware", + "microsoft" + ] + }, + "ExportTask":{ + "type":"structure", + "members":{ + "ExportTaskId":{ + "shape":"String", + "locationName":"exportTaskId" + }, + "Description":{ + "shape":"String", + "locationName":"description" + }, + "State":{ + "shape":"ExportTaskState", + "locationName":"state" + }, + "StatusMessage":{ + "shape":"String", + "locationName":"statusMessage" + }, + "InstanceExportDetails":{ + "shape":"InstanceExportDetails", + "locationName":"instanceExport" + }, + "ExportToS3Task":{ + "shape":"ExportToS3Task", + "locationName":"exportToS3" + } + } + }, + "ExportTaskIdStringList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"ExportTaskId" + } + }, + "ExportTaskList":{ + "type":"list", + "member":{ + "shape":"ExportTask", + "locationName":"item" + } + }, + "ExportTaskState":{ + "type":"string", + "enum":[ + "active", + "cancelling", + "cancelled", + "completed" + ] + }, + "ExportToS3Task":{ + "type":"structure", + "members":{ + "DiskImageFormat":{ + "shape":"DiskImageFormat", + "locationName":"diskImageFormat" + }, + "ContainerFormat":{ + "shape":"ContainerFormat", + "locationName":"containerFormat" + }, + "S3Bucket":{ + "shape":"String", + "locationName":"s3Bucket" + }, + "S3Key":{ + "shape":"String", + "locationName":"s3Key" + } + } + }, + "ExportToS3TaskSpecification":{ + "type":"structure", + "members":{ + "DiskImageFormat":{ + "shape":"DiskImageFormat", + "locationName":"diskImageFormat" + }, + "ContainerFormat":{ + "shape":"ContainerFormat", + "locationName":"containerFormat" + }, + "S3Bucket":{ + "shape":"String", + "locationName":"s3Bucket" + }, + "S3Prefix":{ + "shape":"String", + "locationName":"s3Prefix" + } + } + }, + "Filter":{ + "type":"structure", + "members":{ + "Name":{"shape":"String"}, + "Values":{ + "shape":"ValueStringList", + "locationName":"Value" + } + } + }, + "FilterList":{ + "type":"list", + "member":{ + "shape":"Filter", + "locationName":"Filter" + } + }, + "FleetType":{ + "type":"string", + "enum":[ + "request", + "maintain" + ] + }, + "Float":{"type":"float"}, + "FlowLog":{ + "type":"structure", + "members":{ + "CreationTime":{ + "shape":"DateTime", + "locationName":"creationTime" + }, + "FlowLogId":{ + "shape":"String", + "locationName":"flowLogId" + }, + "FlowLogStatus":{ + "shape":"String", + "locationName":"flowLogStatus" + }, + "ResourceId":{ + "shape":"String", + "locationName":"resourceId" + }, + "TrafficType":{ + "shape":"TrafficType", + "locationName":"trafficType" + }, + "LogGroupName":{ + "shape":"String", + "locationName":"logGroupName" + }, + "DeliverLogsStatus":{ + "shape":"String", + "locationName":"deliverLogsStatus" + }, + "DeliverLogsErrorMessage":{ + "shape":"String", + "locationName":"deliverLogsErrorMessage" + }, + "DeliverLogsPermissionArn":{ + "shape":"String", + "locationName":"deliverLogsPermissionArn" + } + } + }, + "FlowLogSet":{ + "type":"list", + "member":{ + "shape":"FlowLog", + "locationName":"item" + } + }, + "FlowLogsResourceType":{ + "type":"string", + "enum":[ + "VPC", + "Subnet", + "NetworkInterface" + ] + }, + "GatewayType":{ + "type":"string", + "enum":["ipsec.1"] + }, + "GetConsoleOutputRequest":{ + "type":"structure", + "required":["InstanceId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "InstanceId":{"shape":"String"} + } + }, + "GetConsoleOutputResult":{ + "type":"structure", + "members":{ + "InstanceId":{ + "shape":"String", + "locationName":"instanceId" + }, + "Timestamp":{ + "shape":"DateTime", + "locationName":"timestamp" + }, + "Output":{ + "shape":"String", + "locationName":"output" + } + } + }, + "GetConsoleScreenshotRequest":{ + "type":"structure", + "required":["InstanceId"], + "members":{ + "DryRun":{"shape":"Boolean"}, + "InstanceId":{"shape":"String"}, + "WakeUp":{"shape":"Boolean"} + } + }, + "GetConsoleScreenshotResult":{ + "type":"structure", + "members":{ + "InstanceId":{ + "shape":"String", + "locationName":"instanceId" + }, + "ImageData":{ + "shape":"String", + "locationName":"imageData" + } + } + }, + "GetPasswordDataRequest":{ + "type":"structure", + "required":["InstanceId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "InstanceId":{"shape":"String"} + } + }, + "GetPasswordDataResult":{ + "type":"structure", + "members":{ + "InstanceId":{ + "shape":"String", + "locationName":"instanceId" + }, + "Timestamp":{ + "shape":"DateTime", + "locationName":"timestamp" + }, + "PasswordData":{ + "shape":"String", + "locationName":"passwordData" + } + } + }, + "GroupIdStringList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"groupId" + } + }, + "GroupIdentifier":{ + "type":"structure", + "members":{ + "GroupName":{ + "shape":"String", + "locationName":"groupName" + }, + "GroupId":{ + "shape":"String", + "locationName":"groupId" + } + } + }, + "GroupIdentifierList":{ + "type":"list", + "member":{ + "shape":"GroupIdentifier", + "locationName":"item" + } + }, + "GroupIds":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"item" + } + }, + "GroupNameStringList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"GroupName" + } + }, + "HistoryRecord":{ + "type":"structure", + "required":[ + "Timestamp", + "EventType", + "EventInformation" + ], + "members":{ + "Timestamp":{ + "shape":"DateTime", + "locationName":"timestamp" + }, + "EventType":{ + "shape":"EventType", + "locationName":"eventType" + }, + "EventInformation":{ + "shape":"EventInformation", + "locationName":"eventInformation" + } + } + }, + "HistoryRecords":{ + "type":"list", + "member":{ + "shape":"HistoryRecord", + "locationName":"item" + } + }, + "Host":{ + "type":"structure", + "members":{ + "HostId":{ + "shape":"String", + "locationName":"hostId" + }, + "AutoPlacement":{ + "shape":"AutoPlacement", + "locationName":"autoPlacement" + }, + "HostReservationId":{ + "shape":"String", + "locationName":"hostReservationId" + }, + "ClientToken":{ + "shape":"String", + "locationName":"clientToken" + }, + "HostProperties":{ + "shape":"HostProperties", + "locationName":"hostProperties" + }, + "State":{ + "shape":"AllocationState", + "locationName":"state" + }, + "AvailabilityZone":{ + "shape":"String", + "locationName":"availabilityZone" + }, + "Instances":{ + "shape":"HostInstanceList", + "locationName":"instances" + }, + "AvailableCapacity":{ + "shape":"AvailableCapacity", + "locationName":"availableCapacity" + } + } + }, + "HostInstance":{ + "type":"structure", + "members":{ + "InstanceId":{ + "shape":"String", + "locationName":"instanceId" + }, + "InstanceType":{ + "shape":"String", + "locationName":"instanceType" + } + } + }, + "HostInstanceList":{ + "type":"list", + "member":{ + "shape":"HostInstance", + "locationName":"item" + } + }, + "HostList":{ + "type":"list", + "member":{ + "shape":"Host", + "locationName":"item" + } + }, + "HostProperties":{ + "type":"structure", + "members":{ + "Sockets":{ + "shape":"Integer", + "locationName":"sockets" + }, + "Cores":{ + "shape":"Integer", + "locationName":"cores" + }, + "TotalVCpus":{ + "shape":"Integer", + "locationName":"totalVCpus" + }, + "InstanceType":{ + "shape":"String", + "locationName":"instanceType" + } + } + }, + "HostTenancy":{ + "type":"string", + "enum":[ + "dedicated", + "host" + ] + }, + "HypervisorType":{ + "type":"string", + "enum":[ + "ovm", + "xen" + ] + }, + "IamInstanceProfile":{ + "type":"structure", + "members":{ + "Arn":{ + "shape":"String", + "locationName":"arn" + }, + "Id":{ + "shape":"String", + "locationName":"id" + } + } + }, + "IamInstanceProfileSpecification":{ + "type":"structure", + "members":{ + "Arn":{ + "shape":"String", + "locationName":"arn" + }, + "Name":{ + "shape":"String", + "locationName":"name" + } + } + }, + "IcmpTypeCode":{ + "type":"structure", + "members":{ + "Type":{ + "shape":"Integer", + "locationName":"type" + }, + "Code":{ + "shape":"Integer", + "locationName":"code" + } + } + }, + "IdFormat":{ + "type":"structure", + "members":{ + "Resource":{ + "shape":"String", + "locationName":"resource" + }, + "UseLongIds":{ + "shape":"Boolean", + "locationName":"useLongIds" + }, + "Deadline":{ + "shape":"DateTime", + "locationName":"deadline" + } + } + }, + "IdFormatList":{ + "type":"list", + "member":{ + "shape":"IdFormat", + "locationName":"item" + } + }, + "Image":{ + "type":"structure", + "members":{ + "ImageId":{ + "shape":"String", + "locationName":"imageId" + }, + "ImageLocation":{ + "shape":"String", + "locationName":"imageLocation" + }, + "State":{ + "shape":"ImageState", + "locationName":"imageState" + }, + "OwnerId":{ + "shape":"String", + "locationName":"imageOwnerId" + }, + "CreationDate":{ + "shape":"String", + "locationName":"creationDate" + }, + "Public":{ + "shape":"Boolean", + "locationName":"isPublic" + }, + "ProductCodes":{ + "shape":"ProductCodeList", + "locationName":"productCodes" + }, + "Architecture":{ + "shape":"ArchitectureValues", + "locationName":"architecture" + }, + "ImageType":{ + "shape":"ImageTypeValues", + "locationName":"imageType" + }, + "KernelId":{ + "shape":"String", + "locationName":"kernelId" + }, + "RamdiskId":{ + "shape":"String", + "locationName":"ramdiskId" + }, + "Platform":{ + "shape":"PlatformValues", + "locationName":"platform" + }, + "SriovNetSupport":{ + "shape":"String", + "locationName":"sriovNetSupport" + }, + "EnaSupport":{ + "shape":"Boolean", + "locationName":"enaSupport" + }, + "StateReason":{ + "shape":"StateReason", + "locationName":"stateReason" + }, + "ImageOwnerAlias":{ + "shape":"String", + "locationName":"imageOwnerAlias" + }, + "Name":{ + "shape":"String", + "locationName":"name" + }, + "Description":{ + "shape":"String", + "locationName":"description" + }, + "RootDeviceType":{ + "shape":"DeviceType", + "locationName":"rootDeviceType" + }, + "RootDeviceName":{ + "shape":"String", + "locationName":"rootDeviceName" + }, + "BlockDeviceMappings":{ + "shape":"BlockDeviceMappingList", + "locationName":"blockDeviceMapping" + }, + "VirtualizationType":{ + "shape":"VirtualizationType", + "locationName":"virtualizationType" + }, + "Tags":{ + "shape":"TagList", + "locationName":"tagSet" + }, + "Hypervisor":{ + "shape":"HypervisorType", + "locationName":"hypervisor" + } + } + }, + "ImageAttribute":{ + "type":"structure", + "members":{ + "ImageId":{ + "shape":"String", + "locationName":"imageId" + }, + "LaunchPermissions":{ + "shape":"LaunchPermissionList", + "locationName":"launchPermission" + }, + "ProductCodes":{ + "shape":"ProductCodeList", + "locationName":"productCodes" + }, + "KernelId":{ + "shape":"AttributeValue", + "locationName":"kernel" + }, + "RamdiskId":{ + "shape":"AttributeValue", + "locationName":"ramdisk" + }, + "Description":{ + "shape":"AttributeValue", + "locationName":"description" + }, + "SriovNetSupport":{ + "shape":"AttributeValue", + "locationName":"sriovNetSupport" + }, + "BlockDeviceMappings":{ + "shape":"BlockDeviceMappingList", + "locationName":"blockDeviceMapping" + } + } + }, + "ImageAttributeName":{ + "type":"string", + "enum":[ + "description", + "kernel", + "ramdisk", + "launchPermission", + "productCodes", + "blockDeviceMapping", + "sriovNetSupport" + ] + }, + "ImageDiskContainer":{ + "type":"structure", + "members":{ + "Description":{"shape":"String"}, + "Format":{"shape":"String"}, + "Url":{"shape":"String"}, + "UserBucket":{"shape":"UserBucket"}, + "DeviceName":{"shape":"String"}, + "SnapshotId":{"shape":"String"} + } + }, + "ImageDiskContainerList":{ + "type":"list", + "member":{ + "shape":"ImageDiskContainer", + "locationName":"item" + } + }, + "ImageIdStringList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"ImageId" + } + }, + "ImageList":{ + "type":"list", + "member":{ + "shape":"Image", + "locationName":"item" + } + }, + "ImageState":{ + "type":"string", + "enum":[ + "pending", + "available", + "invalid", + "deregistered", + "transient", + "failed", + "error" + ] + }, + "ImageTypeValues":{ + "type":"string", + "enum":[ + "machine", + "kernel", + "ramdisk" + ] + }, + "ImportImageRequest":{ + "type":"structure", + "members":{ + "DryRun":{"shape":"Boolean"}, + "Description":{"shape":"String"}, + "DiskContainers":{ + "shape":"ImageDiskContainerList", + "locationName":"DiskContainer" + }, + "LicenseType":{"shape":"String"}, + "Hypervisor":{"shape":"String"}, + "Architecture":{"shape":"String"}, + "Platform":{"shape":"String"}, + "ClientData":{"shape":"ClientData"}, + "ClientToken":{"shape":"String"}, + "RoleName":{"shape":"String"} + } + }, + "ImportImageResult":{ + "type":"structure", + "members":{ + "ImportTaskId":{ + "shape":"String", + "locationName":"importTaskId" + }, + "Architecture":{ + "shape":"String", + "locationName":"architecture" + }, + "LicenseType":{ + "shape":"String", + "locationName":"licenseType" + }, + "Platform":{ + "shape":"String", + "locationName":"platform" + }, + "Hypervisor":{ + "shape":"String", + "locationName":"hypervisor" + }, + "Description":{ + "shape":"String", + "locationName":"description" + }, + "SnapshotDetails":{ + "shape":"SnapshotDetailList", + "locationName":"snapshotDetailSet" + }, + "ImageId":{ + "shape":"String", + "locationName":"imageId" + }, + "Progress":{ + "shape":"String", + "locationName":"progress" + }, + "StatusMessage":{ + "shape":"String", + "locationName":"statusMessage" + }, + "Status":{ + "shape":"String", + "locationName":"status" + } + } + }, + "ImportImageTask":{ + "type":"structure", + "members":{ + "ImportTaskId":{ + "shape":"String", + "locationName":"importTaskId" + }, + "Architecture":{ + "shape":"String", + "locationName":"architecture" + }, + "LicenseType":{ + "shape":"String", + "locationName":"licenseType" + }, + "Platform":{ + "shape":"String", + "locationName":"platform" + }, + "Hypervisor":{ + "shape":"String", + "locationName":"hypervisor" + }, + "Description":{ + "shape":"String", + "locationName":"description" + }, + "SnapshotDetails":{ + "shape":"SnapshotDetailList", + "locationName":"snapshotDetailSet" + }, + "ImageId":{ + "shape":"String", + "locationName":"imageId" + }, + "Progress":{ + "shape":"String", + "locationName":"progress" + }, + "StatusMessage":{ + "shape":"String", + "locationName":"statusMessage" + }, + "Status":{ + "shape":"String", + "locationName":"status" + } + } + }, + "ImportImageTaskList":{ + "type":"list", + "member":{ + "shape":"ImportImageTask", + "locationName":"item" + } + }, + "ImportInstanceLaunchSpecification":{ + "type":"structure", + "members":{ + "Architecture":{ + "shape":"ArchitectureValues", + "locationName":"architecture" + }, + "GroupNames":{ + "shape":"SecurityGroupStringList", + "locationName":"GroupName" + }, + "GroupIds":{ + "shape":"SecurityGroupIdStringList", + "locationName":"GroupId" + }, + "AdditionalInfo":{ + "shape":"String", + "locationName":"additionalInfo" + }, + "UserData":{ + "shape":"UserData", + "locationName":"userData" + }, + "InstanceType":{ + "shape":"InstanceType", + "locationName":"instanceType" + }, + "Placement":{ + "shape":"Placement", + "locationName":"placement" + }, + "Monitoring":{ + "shape":"Boolean", + "locationName":"monitoring" + }, + "SubnetId":{ + "shape":"String", + "locationName":"subnetId" + }, + "InstanceInitiatedShutdownBehavior":{ + "shape":"ShutdownBehavior", + "locationName":"instanceInitiatedShutdownBehavior" + }, + "PrivateIpAddress":{ + "shape":"String", + "locationName":"privateIpAddress" + } + } + }, + "ImportInstanceRequest":{ + "type":"structure", + "required":["Platform"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "Description":{ + "shape":"String", + "locationName":"description" + }, + "LaunchSpecification":{ + "shape":"ImportInstanceLaunchSpecification", + "locationName":"launchSpecification" + }, + "DiskImages":{ + "shape":"DiskImageList", + "locationName":"diskImage" + }, + "Platform":{ + "shape":"PlatformValues", + "locationName":"platform" + } + } + }, + "ImportInstanceResult":{ + "type":"structure", + "members":{ + "ConversionTask":{ + "shape":"ConversionTask", + "locationName":"conversionTask" + } + } + }, + "ImportInstanceTaskDetails":{ + "type":"structure", + "required":["Volumes"], + "members":{ + "Volumes":{ + "shape":"ImportInstanceVolumeDetailSet", + "locationName":"volumes" + }, + "InstanceId":{ + "shape":"String", + "locationName":"instanceId" + }, + "Platform":{ + "shape":"PlatformValues", + "locationName":"platform" + }, + "Description":{ + "shape":"String", + "locationName":"description" + } + } + }, + "ImportInstanceVolumeDetailItem":{ + "type":"structure", + "required":[ + "BytesConverted", + "AvailabilityZone", + "Image", + "Volume", + "Status" + ], + "members":{ + "BytesConverted":{ + "shape":"Long", + "locationName":"bytesConverted" + }, + "AvailabilityZone":{ + "shape":"String", + "locationName":"availabilityZone" + }, + "Image":{ + "shape":"DiskImageDescription", + "locationName":"image" + }, + "Volume":{ + "shape":"DiskImageVolumeDescription", + "locationName":"volume" + }, + "Status":{ + "shape":"String", + "locationName":"status" + }, + "StatusMessage":{ + "shape":"String", + "locationName":"statusMessage" + }, + "Description":{ + "shape":"String", + "locationName":"description" + } + } + }, + "ImportInstanceVolumeDetailSet":{ + "type":"list", + "member":{ + "shape":"ImportInstanceVolumeDetailItem", + "locationName":"item" + } + }, + "ImportKeyPairRequest":{ + "type":"structure", + "required":[ + "KeyName", + "PublicKeyMaterial" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "KeyName":{ + "shape":"String", + "locationName":"keyName" + }, + "PublicKeyMaterial":{ + "shape":"Blob", + "locationName":"publicKeyMaterial" + } + } + }, + "ImportKeyPairResult":{ + "type":"structure", + "members":{ + "KeyName":{ + "shape":"String", + "locationName":"keyName" + }, + "KeyFingerprint":{ + "shape":"String", + "locationName":"keyFingerprint" + } + } + }, + "ImportSnapshotRequest":{ + "type":"structure", + "members":{ + "DryRun":{"shape":"Boolean"}, + "Description":{"shape":"String"}, + "DiskContainer":{"shape":"SnapshotDiskContainer"}, + "ClientData":{"shape":"ClientData"}, + "ClientToken":{"shape":"String"}, + "RoleName":{"shape":"String"} + } + }, + "ImportSnapshotResult":{ + "type":"structure", + "members":{ + "ImportTaskId":{ + "shape":"String", + "locationName":"importTaskId" + }, + "SnapshotTaskDetail":{ + "shape":"SnapshotTaskDetail", + "locationName":"snapshotTaskDetail" + }, + "Description":{ + "shape":"String", + "locationName":"description" + } + } + }, + "ImportSnapshotTask":{ + "type":"structure", + "members":{ + "ImportTaskId":{ + "shape":"String", + "locationName":"importTaskId" + }, + "SnapshotTaskDetail":{ + "shape":"SnapshotTaskDetail", + "locationName":"snapshotTaskDetail" + }, + "Description":{ + "shape":"String", + "locationName":"description" + } + } + }, + "ImportSnapshotTaskList":{ + "type":"list", + "member":{ + "shape":"ImportSnapshotTask", + "locationName":"item" + } + }, + "ImportTaskIdList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"ImportTaskId" + } + }, + "ImportVolumeRequest":{ + "type":"structure", + "required":[ + "AvailabilityZone", + "Image", + "Volume" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "AvailabilityZone":{ + "shape":"String", + "locationName":"availabilityZone" + }, + "Image":{ + "shape":"DiskImageDetail", + "locationName":"image" + }, + "Description":{ + "shape":"String", + "locationName":"description" + }, + "Volume":{ + "shape":"VolumeDetail", + "locationName":"volume" + } + } + }, + "ImportVolumeResult":{ + "type":"structure", + "members":{ + "ConversionTask":{ + "shape":"ConversionTask", + "locationName":"conversionTask" + } + } + }, + "ImportVolumeTaskDetails":{ + "type":"structure", + "required":[ + "BytesConverted", + "AvailabilityZone", + "Image", + "Volume" + ], + "members":{ + "BytesConverted":{ + "shape":"Long", + "locationName":"bytesConverted" + }, + "AvailabilityZone":{ + "shape":"String", + "locationName":"availabilityZone" + }, + "Description":{ + "shape":"String", + "locationName":"description" + }, + "Image":{ + "shape":"DiskImageDescription", + "locationName":"image" + }, + "Volume":{ + "shape":"DiskImageVolumeDescription", + "locationName":"volume" + } + } + }, + "Instance":{ + "type":"structure", + "members":{ + "InstanceId":{ + "shape":"String", + "locationName":"instanceId" + }, + "ImageId":{ + "shape":"String", + "locationName":"imageId" + }, + "State":{ + "shape":"InstanceState", + "locationName":"instanceState" + }, + "PrivateDnsName":{ + "shape":"String", + "locationName":"privateDnsName" + }, + "PublicDnsName":{ + "shape":"String", + "locationName":"dnsName" + }, + "StateTransitionReason":{ + "shape":"String", + "locationName":"reason" + }, + "KeyName":{ + "shape":"String", + "locationName":"keyName" + }, + "AmiLaunchIndex":{ + "shape":"Integer", + "locationName":"amiLaunchIndex" + }, + "ProductCodes":{ + "shape":"ProductCodeList", + "locationName":"productCodes" + }, + "InstanceType":{ + "shape":"InstanceType", + "locationName":"instanceType" + }, + "LaunchTime":{ + "shape":"DateTime", + "locationName":"launchTime" + }, + "Placement":{ + "shape":"Placement", + "locationName":"placement" + }, + "KernelId":{ + "shape":"String", + "locationName":"kernelId" + }, + "RamdiskId":{ + "shape":"String", + "locationName":"ramdiskId" + }, + "Platform":{ + "shape":"PlatformValues", + "locationName":"platform" + }, + "Monitoring":{ + "shape":"Monitoring", + "locationName":"monitoring" + }, + "SubnetId":{ + "shape":"String", + "locationName":"subnetId" + }, + "VpcId":{ + "shape":"String", + "locationName":"vpcId" + }, + "PrivateIpAddress":{ + "shape":"String", + "locationName":"privateIpAddress" + }, + "PublicIpAddress":{ + "shape":"String", + "locationName":"ipAddress" + }, + "StateReason":{ + "shape":"StateReason", + "locationName":"stateReason" + }, + "Architecture":{ + "shape":"ArchitectureValues", + "locationName":"architecture" + }, + "RootDeviceType":{ + "shape":"DeviceType", + "locationName":"rootDeviceType" + }, + "RootDeviceName":{ + "shape":"String", + "locationName":"rootDeviceName" + }, + "BlockDeviceMappings":{ + "shape":"InstanceBlockDeviceMappingList", + "locationName":"blockDeviceMapping" + }, + "VirtualizationType":{ + "shape":"VirtualizationType", + "locationName":"virtualizationType" + }, + "InstanceLifecycle":{ + "shape":"InstanceLifecycleType", + "locationName":"instanceLifecycle" + }, + "SpotInstanceRequestId":{ + "shape":"String", + "locationName":"spotInstanceRequestId" + }, + "ClientToken":{ + "shape":"String", + "locationName":"clientToken" + }, + "Tags":{ + "shape":"TagList", + "locationName":"tagSet" + }, + "SecurityGroups":{ + "shape":"GroupIdentifierList", + "locationName":"groupSet" + }, + "SourceDestCheck":{ + "shape":"Boolean", + "locationName":"sourceDestCheck" + }, + "Hypervisor":{ + "shape":"HypervisorType", + "locationName":"hypervisor" + }, + "NetworkInterfaces":{ + "shape":"InstanceNetworkInterfaceList", + "locationName":"networkInterfaceSet" + }, + "IamInstanceProfile":{ + "shape":"IamInstanceProfile", + "locationName":"iamInstanceProfile" + }, + "EbsOptimized":{ + "shape":"Boolean", + "locationName":"ebsOptimized" + }, + "SriovNetSupport":{ + "shape":"String", + "locationName":"sriovNetSupport" + }, + "EnaSupport":{ + "shape":"Boolean", + "locationName":"enaSupport" + } + } + }, + "InstanceAttribute":{ + "type":"structure", + "members":{ + "InstanceId":{ + "shape":"String", + "locationName":"instanceId" + }, + "InstanceType":{ + "shape":"AttributeValue", + "locationName":"instanceType" + }, + "KernelId":{ + "shape":"AttributeValue", + "locationName":"kernel" + }, + "RamdiskId":{ + "shape":"AttributeValue", + "locationName":"ramdisk" + }, + "UserData":{ + "shape":"AttributeValue", + "locationName":"userData" + }, + "DisableApiTermination":{ + "shape":"AttributeBooleanValue", + "locationName":"disableApiTermination" + }, + "InstanceInitiatedShutdownBehavior":{ + "shape":"AttributeValue", + "locationName":"instanceInitiatedShutdownBehavior" + }, + "RootDeviceName":{ + "shape":"AttributeValue", + "locationName":"rootDeviceName" + }, + "BlockDeviceMappings":{ + "shape":"InstanceBlockDeviceMappingList", + "locationName":"blockDeviceMapping" + }, + "ProductCodes":{ + "shape":"ProductCodeList", + "locationName":"productCodes" + }, + "EbsOptimized":{ + "shape":"AttributeBooleanValue", + "locationName":"ebsOptimized" + }, + "SriovNetSupport":{ + "shape":"AttributeValue", + "locationName":"sriovNetSupport" + }, + "EnaSupport":{ + "shape":"AttributeBooleanValue", + "locationName":"enaSupport" + }, + "SourceDestCheck":{ + "shape":"AttributeBooleanValue", + "locationName":"sourceDestCheck" + }, + "Groups":{ + "shape":"GroupIdentifierList", + "locationName":"groupSet" + } + } + }, + "InstanceAttributeName":{ + "type":"string", + "enum":[ + "instanceType", + "kernel", + "ramdisk", + "userData", + "disableApiTermination", + "instanceInitiatedShutdownBehavior", + "rootDeviceName", + "blockDeviceMapping", + "productCodes", + "sourceDestCheck", + "groupSet", + "ebsOptimized", + "sriovNetSupport", + "enaSupport" + ] + }, + "InstanceBlockDeviceMapping":{ + "type":"structure", + "members":{ + "DeviceName":{ + "shape":"String", + "locationName":"deviceName" + }, + "Ebs":{ + "shape":"EbsInstanceBlockDevice", + "locationName":"ebs" + } + } + }, + "InstanceBlockDeviceMappingList":{ + "type":"list", + "member":{ + "shape":"InstanceBlockDeviceMapping", + "locationName":"item" + } + }, + "InstanceBlockDeviceMappingSpecification":{ + "type":"structure", + "members":{ + "DeviceName":{ + "shape":"String", + "locationName":"deviceName" + }, + "Ebs":{ + "shape":"EbsInstanceBlockDeviceSpecification", + "locationName":"ebs" + }, + "VirtualName":{ + "shape":"String", + "locationName":"virtualName" + }, + "NoDevice":{ + "shape":"String", + "locationName":"noDevice" + } + } + }, + "InstanceBlockDeviceMappingSpecificationList":{ + "type":"list", + "member":{ + "shape":"InstanceBlockDeviceMappingSpecification", + "locationName":"item" + } + }, + "InstanceCapacity":{ + "type":"structure", + "members":{ + "InstanceType":{ + "shape":"String", + "locationName":"instanceType" + }, + "AvailableCapacity":{ + "shape":"Integer", + "locationName":"availableCapacity" + }, + "TotalCapacity":{ + "shape":"Integer", + "locationName":"totalCapacity" + } + } + }, + "InstanceCount":{ + "type":"structure", + "members":{ + "State":{ + "shape":"ListingState", + "locationName":"state" + }, + "InstanceCount":{ + "shape":"Integer", + "locationName":"instanceCount" + } + } + }, + "InstanceCountList":{ + "type":"list", + "member":{ + "shape":"InstanceCount", + "locationName":"item" + } + }, + "InstanceExportDetails":{ + "type":"structure", + "members":{ + "InstanceId":{ + "shape":"String", + "locationName":"instanceId" + }, + "TargetEnvironment":{ + "shape":"ExportEnvironment", + "locationName":"targetEnvironment" + } + } + }, + "InstanceIdSet":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"item" + } + }, + "InstanceIdStringList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"InstanceId" + } + }, + "InstanceLifecycleType":{ + "type":"string", + "enum":[ + "spot", + "scheduled" + ] + }, + "InstanceList":{ + "type":"list", + "member":{ + "shape":"Instance", + "locationName":"item" + } + }, + "InstanceMonitoring":{ + "type":"structure", + "members":{ + "InstanceId":{ + "shape":"String", + "locationName":"instanceId" + }, + "Monitoring":{ + "shape":"Monitoring", + "locationName":"monitoring" + } + } + }, + "InstanceMonitoringList":{ + "type":"list", + "member":{ + "shape":"InstanceMonitoring", + "locationName":"item" + } + }, + "InstanceNetworkInterface":{ + "type":"structure", + "members":{ + "NetworkInterfaceId":{ + "shape":"String", + "locationName":"networkInterfaceId" + }, + "SubnetId":{ + "shape":"String", + "locationName":"subnetId" + }, + "VpcId":{ + "shape":"String", + "locationName":"vpcId" + }, + "Description":{ + "shape":"String", + "locationName":"description" + }, + "OwnerId":{ + "shape":"String", + "locationName":"ownerId" + }, + "Status":{ + "shape":"NetworkInterfaceStatus", + "locationName":"status" + }, + "MacAddress":{ + "shape":"String", + "locationName":"macAddress" + }, + "PrivateIpAddress":{ + "shape":"String", + "locationName":"privateIpAddress" + }, + "PrivateDnsName":{ + "shape":"String", + "locationName":"privateDnsName" + }, + "SourceDestCheck":{ + "shape":"Boolean", + "locationName":"sourceDestCheck" + }, + "Groups":{ + "shape":"GroupIdentifierList", + "locationName":"groupSet" + }, + "Attachment":{ + "shape":"InstanceNetworkInterfaceAttachment", + "locationName":"attachment" + }, + "Association":{ + "shape":"InstanceNetworkInterfaceAssociation", + "locationName":"association" + }, + "PrivateIpAddresses":{ + "shape":"InstancePrivateIpAddressList", + "locationName":"privateIpAddressesSet" + } + } + }, + "InstanceNetworkInterfaceAssociation":{ + "type":"structure", + "members":{ + "PublicIp":{ + "shape":"String", + "locationName":"publicIp" + }, + "PublicDnsName":{ + "shape":"String", + "locationName":"publicDnsName" + }, + "IpOwnerId":{ + "shape":"String", + "locationName":"ipOwnerId" + } + } + }, + "InstanceNetworkInterfaceAttachment":{ + "type":"structure", + "members":{ + "AttachmentId":{ + "shape":"String", + "locationName":"attachmentId" + }, + "DeviceIndex":{ + "shape":"Integer", + "locationName":"deviceIndex" + }, + "Status":{ + "shape":"AttachmentStatus", + "locationName":"status" + }, + "AttachTime":{ + "shape":"DateTime", + "locationName":"attachTime" + }, + "DeleteOnTermination":{ + "shape":"Boolean", + "locationName":"deleteOnTermination" + } + } + }, + "InstanceNetworkInterfaceList":{ + "type":"list", + "member":{ + "shape":"InstanceNetworkInterface", + "locationName":"item" + } + }, + "InstanceNetworkInterfaceSpecification":{ + "type":"structure", + "members":{ + "NetworkInterfaceId":{ + "shape":"String", + "locationName":"networkInterfaceId" + }, + "DeviceIndex":{ + "shape":"Integer", + "locationName":"deviceIndex" + }, + "SubnetId":{ + "shape":"String", + "locationName":"subnetId" + }, + "Description":{ + "shape":"String", + "locationName":"description" + }, + "PrivateIpAddress":{ + "shape":"String", + "locationName":"privateIpAddress" + }, + "Groups":{ + "shape":"SecurityGroupIdStringList", + "locationName":"SecurityGroupId" + }, + "DeleteOnTermination":{ + "shape":"Boolean", + "locationName":"deleteOnTermination" + }, + "PrivateIpAddresses":{ + "shape":"PrivateIpAddressSpecificationList", + "locationName":"privateIpAddressesSet", + "queryName":"PrivateIpAddresses" + }, + "SecondaryPrivateIpAddressCount":{ + "shape":"Integer", + "locationName":"secondaryPrivateIpAddressCount" + }, + "AssociatePublicIpAddress":{ + "shape":"Boolean", + "locationName":"associatePublicIpAddress" + } + } + }, + "InstanceNetworkInterfaceSpecificationList":{ + "type":"list", + "member":{ + "shape":"InstanceNetworkInterfaceSpecification", + "locationName":"item" + } + }, + "InstancePrivateIpAddress":{ + "type":"structure", + "members":{ + "PrivateIpAddress":{ + "shape":"String", + "locationName":"privateIpAddress" + }, + "PrivateDnsName":{ + "shape":"String", + "locationName":"privateDnsName" + }, + "Primary":{ + "shape":"Boolean", + "locationName":"primary" + }, + "Association":{ + "shape":"InstanceNetworkInterfaceAssociation", + "locationName":"association" + } + } + }, + "InstancePrivateIpAddressList":{ + "type":"list", + "member":{ + "shape":"InstancePrivateIpAddress", + "locationName":"item" + } + }, + "InstanceState":{ + "type":"structure", + "members":{ + "Code":{ + "shape":"Integer", + "locationName":"code" + }, + "Name":{ + "shape":"InstanceStateName", + "locationName":"name" + } + } + }, + "InstanceStateChange":{ + "type":"structure", + "members":{ + "InstanceId":{ + "shape":"String", + "locationName":"instanceId" + }, + "CurrentState":{ + "shape":"InstanceState", + "locationName":"currentState" + }, + "PreviousState":{ + "shape":"InstanceState", + "locationName":"previousState" + } + } + }, + "InstanceStateChangeList":{ + "type":"list", + "member":{ + "shape":"InstanceStateChange", + "locationName":"item" + } + }, + "InstanceStateName":{ + "type":"string", + "enum":[ + "pending", + "running", + "shutting-down", + "terminated", + "stopping", + "stopped" + ] + }, + "InstanceStatus":{ + "type":"structure", + "members":{ + "InstanceId":{ + "shape":"String", + "locationName":"instanceId" + }, + "AvailabilityZone":{ + "shape":"String", + "locationName":"availabilityZone" + }, + "Events":{ + "shape":"InstanceStatusEventList", + "locationName":"eventsSet" + }, + "InstanceState":{ + "shape":"InstanceState", + "locationName":"instanceState" + }, + "SystemStatus":{ + "shape":"InstanceStatusSummary", + "locationName":"systemStatus" + }, + "InstanceStatus":{ + "shape":"InstanceStatusSummary", + "locationName":"instanceStatus" + } + } + }, + "InstanceStatusDetails":{ + "type":"structure", + "members":{ + "Name":{ + "shape":"StatusName", + "locationName":"name" + }, + "Status":{ + "shape":"StatusType", + "locationName":"status" + }, + "ImpairedSince":{ + "shape":"DateTime", + "locationName":"impairedSince" + } + } + }, + "InstanceStatusDetailsList":{ + "type":"list", + "member":{ + "shape":"InstanceStatusDetails", + "locationName":"item" + } + }, + "InstanceStatusEvent":{ + "type":"structure", + "members":{ + "Code":{ + "shape":"EventCode", + "locationName":"code" + }, + "Description":{ + "shape":"String", + "locationName":"description" + }, + "NotBefore":{ + "shape":"DateTime", + "locationName":"notBefore" + }, + "NotAfter":{ + "shape":"DateTime", + "locationName":"notAfter" + } + } + }, + "InstanceStatusEventList":{ + "type":"list", + "member":{ + "shape":"InstanceStatusEvent", + "locationName":"item" + } + }, + "InstanceStatusList":{ + "type":"list", + "member":{ + "shape":"InstanceStatus", + "locationName":"item" + } + }, + "InstanceStatusSummary":{ + "type":"structure", + "members":{ + "Status":{ + "shape":"SummaryStatus", + "locationName":"status" + }, + "Details":{ + "shape":"InstanceStatusDetailsList", + "locationName":"details" + } + } + }, + "InstanceType":{ + "type":"string", + "enum":[ + "t1.micro", + "t2.nano", + "t2.micro", + "t2.small", + "t2.medium", + "t2.large", + "m1.small", + "m1.medium", + "m1.large", + "m1.xlarge", + "m3.medium", + "m3.large", + "m3.xlarge", + "m3.2xlarge", + "m4.large", + "m4.xlarge", + "m4.2xlarge", + "m4.4xlarge", + "m4.10xlarge", + "m2.xlarge", + "m2.2xlarge", + "m2.4xlarge", + "cr1.8xlarge", + "r3.large", + "r3.xlarge", + "r3.2xlarge", + "r3.4xlarge", + "r3.8xlarge", + "x1.4xlarge", + "x1.8xlarge", + "x1.16xlarge", + "x1.32xlarge", + "i2.xlarge", + "i2.2xlarge", + "i2.4xlarge", + "i2.8xlarge", + "hi1.4xlarge", + "hs1.8xlarge", + "c1.medium", + "c1.xlarge", + "c3.large", + "c3.xlarge", + "c3.2xlarge", + "c3.4xlarge", + "c3.8xlarge", + "c4.large", + "c4.xlarge", + "c4.2xlarge", + "c4.4xlarge", + "c4.8xlarge", + "cc1.4xlarge", + "cc2.8xlarge", + "g2.2xlarge", + "g2.8xlarge", + "cg1.4xlarge", + "d2.xlarge", + "d2.2xlarge", + "d2.4xlarge", + "d2.8xlarge" + ] + }, + "InstanceTypeList":{ + "type":"list", + "member":{"shape":"InstanceType"} + }, + "Integer":{"type":"integer"}, + "InternetGateway":{ + "type":"structure", + "members":{ + "InternetGatewayId":{ + "shape":"String", + "locationName":"internetGatewayId" + }, + "Attachments":{ + "shape":"InternetGatewayAttachmentList", + "locationName":"attachmentSet" + }, + "Tags":{ + "shape":"TagList", + "locationName":"tagSet" + } + } + }, + "InternetGatewayAttachment":{ + "type":"structure", + "members":{ + "VpcId":{ + "shape":"String", + "locationName":"vpcId" + }, + "State":{ + "shape":"AttachmentStatus", + "locationName":"state" + } + } + }, + "InternetGatewayAttachmentList":{ + "type":"list", + "member":{ + "shape":"InternetGatewayAttachment", + "locationName":"item" + } + }, + "InternetGatewayList":{ + "type":"list", + "member":{ + "shape":"InternetGateway", + "locationName":"item" + } + }, + "IpPermission":{ + "type":"structure", + "members":{ + "IpProtocol":{ + "shape":"String", + "locationName":"ipProtocol" + }, + "FromPort":{ + "shape":"Integer", + "locationName":"fromPort" + }, + "ToPort":{ + "shape":"Integer", + "locationName":"toPort" + }, + "UserIdGroupPairs":{ + "shape":"UserIdGroupPairList", + "locationName":"groups" + }, + "IpRanges":{ + "shape":"IpRangeList", + "locationName":"ipRanges" + }, + "PrefixListIds":{ + "shape":"PrefixListIdList", + "locationName":"prefixListIds" + } + } + }, + "IpPermissionList":{ + "type":"list", + "member":{ + "shape":"IpPermission", + "locationName":"item" + } + }, + "IpRange":{ + "type":"structure", + "members":{ + "CidrIp":{ + "shape":"String", + "locationName":"cidrIp" + } + } + }, + "IpRangeList":{ + "type":"list", + "member":{ + "shape":"IpRange", + "locationName":"item" + } + }, + "IpRanges":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"item" + } + }, + "KeyNameStringList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"KeyName" + } + }, + "KeyPair":{ + "type":"structure", + "members":{ + "KeyName":{ + "shape":"String", + "locationName":"keyName" + }, + "KeyFingerprint":{ + "shape":"String", + "locationName":"keyFingerprint" + }, + "KeyMaterial":{ + "shape":"String", + "locationName":"keyMaterial" + } + } + }, + "KeyPairInfo":{ + "type":"structure", + "members":{ + "KeyName":{ + "shape":"String", + "locationName":"keyName" + }, + "KeyFingerprint":{ + "shape":"String", + "locationName":"keyFingerprint" + } + } + }, + "KeyPairList":{ + "type":"list", + "member":{ + "shape":"KeyPairInfo", + "locationName":"item" + } + }, + "LaunchPermission":{ + "type":"structure", + "members":{ + "UserId":{ + "shape":"String", + "locationName":"userId" + }, + "Group":{ + "shape":"PermissionGroup", + "locationName":"group" + } + } + }, + "LaunchPermissionList":{ + "type":"list", + "member":{ + "shape":"LaunchPermission", + "locationName":"item" + } + }, + "LaunchPermissionModifications":{ + "type":"structure", + "members":{ + "Add":{"shape":"LaunchPermissionList"}, + "Remove":{"shape":"LaunchPermissionList"} + } + }, + "LaunchSpecification":{ + "type":"structure", + "members":{ + "ImageId":{ + "shape":"String", + "locationName":"imageId" + }, + "KeyName":{ + "shape":"String", + "locationName":"keyName" + }, + "SecurityGroups":{ + "shape":"GroupIdentifierList", + "locationName":"groupSet" + }, + "UserData":{ + "shape":"String", + "locationName":"userData" + }, + "AddressingType":{ + "shape":"String", + "locationName":"addressingType" + }, + "InstanceType":{ + "shape":"InstanceType", + "locationName":"instanceType" + }, + "Placement":{ + "shape":"SpotPlacement", + "locationName":"placement" + }, + "KernelId":{ + "shape":"String", + "locationName":"kernelId" + }, + "RamdiskId":{ + "shape":"String", + "locationName":"ramdiskId" + }, + "BlockDeviceMappings":{ + "shape":"BlockDeviceMappingList", + "locationName":"blockDeviceMapping" + }, + "SubnetId":{ + "shape":"String", + "locationName":"subnetId" + }, + "NetworkInterfaces":{ + "shape":"InstanceNetworkInterfaceSpecificationList", + "locationName":"networkInterfaceSet" + }, + "IamInstanceProfile":{ + "shape":"IamInstanceProfileSpecification", + "locationName":"iamInstanceProfile" + }, + "EbsOptimized":{ + "shape":"Boolean", + "locationName":"ebsOptimized" + }, + "Monitoring":{ + "shape":"RunInstancesMonitoringEnabled", + "locationName":"monitoring" + } + } + }, + "LaunchSpecsList":{ + "type":"list", + "member":{ + "shape":"SpotFleetLaunchSpecification", + "locationName":"item" + }, + "min":1 + }, + "ListingState":{ + "type":"string", + "enum":[ + "available", + "sold", + "cancelled", + "pending" + ] + }, + "ListingStatus":{ + "type":"string", + "enum":[ + "active", + "pending", + "cancelled", + "closed" + ] + }, + "Long":{"type":"long"}, + "MaxResults":{ + "type":"integer", + "max":255, + "min":5 + }, + "ModifyHostsRequest":{ + "type":"structure", + "required":[ + "HostIds", + "AutoPlacement" + ], + "members":{ + "HostIds":{ + "shape":"RequestHostIdList", + "locationName":"hostId" + }, + "AutoPlacement":{ + "shape":"AutoPlacement", + "locationName":"autoPlacement" + } + } + }, + "ModifyHostsResult":{ + "type":"structure", + "members":{ + "Successful":{ + "shape":"ResponseHostIdList", + "locationName":"successful" + }, + "Unsuccessful":{ + "shape":"UnsuccessfulItemList", + "locationName":"unsuccessful" + } + } + }, + "ModifyIdFormatRequest":{ + "type":"structure", + "required":[ + "Resource", + "UseLongIds" + ], + "members":{ + "Resource":{"shape":"String"}, + "UseLongIds":{"shape":"Boolean"} + } + }, + "ModifyIdentityIdFormatRequest":{ + "type":"structure", + "required":[ + "Resource", + "UseLongIds", + "PrincipalArn" + ], + "members":{ + "Resource":{ + "shape":"String", + "locationName":"resource" + }, + "UseLongIds":{ + "shape":"Boolean", + "locationName":"useLongIds" + }, + "PrincipalArn":{ + "shape":"String", + "locationName":"principalArn" + } + } + }, + "ModifyImageAttributeRequest":{ + "type":"structure", + "required":["ImageId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "ImageId":{"shape":"String"}, + "Attribute":{"shape":"String"}, + "OperationType":{"shape":"OperationType"}, + "UserIds":{ + "shape":"UserIdStringList", + "locationName":"UserId" + }, + "UserGroups":{ + "shape":"UserGroupStringList", + "locationName":"UserGroup" + }, + "ProductCodes":{ + "shape":"ProductCodeStringList", + "locationName":"ProductCode" + }, + "Value":{"shape":"String"}, + "LaunchPermission":{"shape":"LaunchPermissionModifications"}, + "Description":{"shape":"AttributeValue"} + } + }, + "ModifyInstanceAttributeRequest":{ + "type":"structure", + "required":["InstanceId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "InstanceId":{ + "shape":"String", + "locationName":"instanceId" + }, + "Attribute":{ + "shape":"InstanceAttributeName", + "locationName":"attribute" + }, + "Value":{ + "shape":"String", + "locationName":"value" + }, + "BlockDeviceMappings":{ + "shape":"InstanceBlockDeviceMappingSpecificationList", + "locationName":"blockDeviceMapping" + }, + "SourceDestCheck":{"shape":"AttributeBooleanValue"}, + "DisableApiTermination":{ + "shape":"AttributeBooleanValue", + "locationName":"disableApiTermination" + }, + "InstanceType":{ + "shape":"AttributeValue", + "locationName":"instanceType" + }, + "Kernel":{ + "shape":"AttributeValue", + "locationName":"kernel" + }, + "Ramdisk":{ + "shape":"AttributeValue", + "locationName":"ramdisk" + }, + "UserData":{ + "shape":"BlobAttributeValue", + "locationName":"userData" + }, + "InstanceInitiatedShutdownBehavior":{ + "shape":"AttributeValue", + "locationName":"instanceInitiatedShutdownBehavior" + }, + "Groups":{ + "shape":"GroupIdStringList", + "locationName":"GroupId" + }, + "EbsOptimized":{ + "shape":"AttributeBooleanValue", + "locationName":"ebsOptimized" + }, + "SriovNetSupport":{ + "shape":"AttributeValue", + "locationName":"sriovNetSupport" + }, + "EnaSupport":{ + "shape":"AttributeBooleanValue", + "locationName":"enaSupport" + } + } + }, + "ModifyInstancePlacementRequest":{ + "type":"structure", + "required":["InstanceId"], + "members":{ + "InstanceId":{ + "shape":"String", + "locationName":"instanceId" + }, + "Tenancy":{ + "shape":"HostTenancy", + "locationName":"tenancy" + }, + "Affinity":{ + "shape":"Affinity", + "locationName":"affinity" + }, + "HostId":{ + "shape":"String", + "locationName":"hostId" + } + } + }, + "ModifyInstancePlacementResult":{ + "type":"structure", + "members":{ + "Return":{ + "shape":"Boolean", + "locationName":"return" + } + } + }, + "ModifyNetworkInterfaceAttributeRequest":{ + "type":"structure", + "required":["NetworkInterfaceId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "NetworkInterfaceId":{ + "shape":"String", + "locationName":"networkInterfaceId" + }, + "Description":{ + "shape":"AttributeValue", + "locationName":"description" + }, + "SourceDestCheck":{ + "shape":"AttributeBooleanValue", + "locationName":"sourceDestCheck" + }, + "Groups":{ + "shape":"SecurityGroupIdStringList", + "locationName":"SecurityGroupId" + }, + "Attachment":{ + "shape":"NetworkInterfaceAttachmentChanges", + "locationName":"attachment" + } + } + }, + "ModifyReservedInstancesRequest":{ + "type":"structure", + "required":[ + "ReservedInstancesIds", + "TargetConfigurations" + ], + "members":{ + "ClientToken":{ + "shape":"String", + "locationName":"clientToken" + }, + "ReservedInstancesIds":{ + "shape":"ReservedInstancesIdStringList", + "locationName":"ReservedInstancesId" + }, + "TargetConfigurations":{ + "shape":"ReservedInstancesConfigurationList", + "locationName":"ReservedInstancesConfigurationSetItemType" + } + } + }, + "ModifyReservedInstancesResult":{ + "type":"structure", + "members":{ + "ReservedInstancesModificationId":{ + "shape":"String", + "locationName":"reservedInstancesModificationId" + } + } + }, + "ModifySnapshotAttributeRequest":{ + "type":"structure", + "required":["SnapshotId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "SnapshotId":{"shape":"String"}, + "Attribute":{"shape":"SnapshotAttributeName"}, + "OperationType":{"shape":"OperationType"}, + "UserIds":{ + "shape":"UserIdStringList", + "locationName":"UserId" + }, + "GroupNames":{ + "shape":"GroupNameStringList", + "locationName":"UserGroup" + }, + "CreateVolumePermission":{"shape":"CreateVolumePermissionModifications"} + } + }, + "ModifySpotFleetRequestRequest":{ + "type":"structure", + "required":["SpotFleetRequestId"], + "members":{ + "SpotFleetRequestId":{ + "shape":"String", + "locationName":"spotFleetRequestId" + }, + "TargetCapacity":{ + "shape":"Integer", + "locationName":"targetCapacity" + }, + "ExcessCapacityTerminationPolicy":{ + "shape":"ExcessCapacityTerminationPolicy", + "locationName":"excessCapacityTerminationPolicy" + } + } + }, + "ModifySpotFleetRequestResponse":{ + "type":"structure", + "members":{ + "Return":{ + "shape":"Boolean", + "locationName":"return" + } + } + }, + "ModifySubnetAttributeRequest":{ + "type":"structure", + "required":["SubnetId"], + "members":{ + "SubnetId":{ + "shape":"String", + "locationName":"subnetId" + }, + "MapPublicIpOnLaunch":{"shape":"AttributeBooleanValue"} + } + }, + "ModifyVolumeAttributeRequest":{ + "type":"structure", + "required":["VolumeId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "VolumeId":{"shape":"String"}, + "AutoEnableIO":{"shape":"AttributeBooleanValue"} + } + }, + "ModifyVpcAttributeRequest":{ + "type":"structure", + "required":["VpcId"], + "members":{ + "VpcId":{ + "shape":"String", + "locationName":"vpcId" + }, + "EnableDnsSupport":{"shape":"AttributeBooleanValue"}, + "EnableDnsHostnames":{"shape":"AttributeBooleanValue"} + } + }, + "ModifyVpcEndpointRequest":{ + "type":"structure", + "required":["VpcEndpointId"], + "members":{ + "DryRun":{"shape":"Boolean"}, + "VpcEndpointId":{"shape":"String"}, + "ResetPolicy":{"shape":"Boolean"}, + "PolicyDocument":{"shape":"String"}, + "AddRouteTableIds":{ + "shape":"ValueStringList", + "locationName":"AddRouteTableId" + }, + "RemoveRouteTableIds":{ + "shape":"ValueStringList", + "locationName":"RemoveRouteTableId" + } + } + }, + "ModifyVpcEndpointResult":{ + "type":"structure", + "members":{ + "Return":{ + "shape":"Boolean", + "locationName":"return" + } + } + }, + "ModifyVpcPeeringConnectionOptionsRequest":{ + "type":"structure", + "required":["VpcPeeringConnectionId"], + "members":{ + "DryRun":{"shape":"Boolean"}, + "VpcPeeringConnectionId":{"shape":"String"}, + "RequesterPeeringConnectionOptions":{"shape":"PeeringConnectionOptionsRequest"}, + "AccepterPeeringConnectionOptions":{"shape":"PeeringConnectionOptionsRequest"} + } + }, + "ModifyVpcPeeringConnectionOptionsResult":{ + "type":"structure", + "members":{ + "RequesterPeeringConnectionOptions":{ + "shape":"PeeringConnectionOptions", + "locationName":"requesterPeeringConnectionOptions" + }, + "AccepterPeeringConnectionOptions":{ + "shape":"PeeringConnectionOptions", + "locationName":"accepterPeeringConnectionOptions" + } + } + }, + "MonitorInstancesRequest":{ + "type":"structure", + "required":["InstanceIds"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "InstanceIds":{ + "shape":"InstanceIdStringList", + "locationName":"InstanceId" + } + } + }, + "MonitorInstancesResult":{ + "type":"structure", + "members":{ + "InstanceMonitorings":{ + "shape":"InstanceMonitoringList", + "locationName":"instancesSet" + } + } + }, + "Monitoring":{ + "type":"structure", + "members":{ + "State":{ + "shape":"MonitoringState", + "locationName":"state" + } + } + }, + "MonitoringState":{ + "type":"string", + "enum":[ + "disabled", + "disabling", + "enabled", + "pending" + ] + }, + "MoveAddressToVpcRequest":{ + "type":"structure", + "required":["PublicIp"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "PublicIp":{ + "shape":"String", + "locationName":"publicIp" + } + } + }, + "MoveAddressToVpcResult":{ + "type":"structure", + "members":{ + "AllocationId":{ + "shape":"String", + "locationName":"allocationId" + }, + "Status":{ + "shape":"Status", + "locationName":"status" + } + } + }, + "MoveStatus":{ + "type":"string", + "enum":[ + "movingToVpc", + "restoringToClassic" + ] + }, + "MovingAddressStatus":{ + "type":"structure", + "members":{ + "PublicIp":{ + "shape":"String", + "locationName":"publicIp" + }, + "MoveStatus":{ + "shape":"MoveStatus", + "locationName":"moveStatus" + } + } + }, + "MovingAddressStatusSet":{ + "type":"list", + "member":{ + "shape":"MovingAddressStatus", + "locationName":"item" + } + }, + "NatGateway":{ + "type":"structure", + "members":{ + "VpcId":{ + "shape":"String", + "locationName":"vpcId" + }, + "SubnetId":{ + "shape":"String", + "locationName":"subnetId" + }, + "NatGatewayId":{ + "shape":"String", + "locationName":"natGatewayId" + }, + "CreateTime":{ + "shape":"DateTime", + "locationName":"createTime" + }, + "DeleteTime":{ + "shape":"DateTime", + "locationName":"deleteTime" + }, + "NatGatewayAddresses":{ + "shape":"NatGatewayAddressList", + "locationName":"natGatewayAddressSet" + }, + "State":{ + "shape":"NatGatewayState", + "locationName":"state" + }, + "FailureCode":{ + "shape":"String", + "locationName":"failureCode" + }, + "FailureMessage":{ + "shape":"String", + "locationName":"failureMessage" + }, + "ProvisionedBandwidth":{ + "shape":"ProvisionedBandwidth", + "locationName":"provisionedBandwidth" + } + } + }, + "NatGatewayAddress":{ + "type":"structure", + "members":{ + "PublicIp":{ + "shape":"String", + "locationName":"publicIp" + }, + "AllocationId":{ + "shape":"String", + "locationName":"allocationId" + }, + "PrivateIp":{ + "shape":"String", + "locationName":"privateIp" + }, + "NetworkInterfaceId":{ + "shape":"String", + "locationName":"networkInterfaceId" + } + } + }, + "NatGatewayAddressList":{ + "type":"list", + "member":{ + "shape":"NatGatewayAddress", + "locationName":"item" + } + }, + "NatGatewayList":{ + "type":"list", + "member":{ + "shape":"NatGateway", + "locationName":"item" + } + }, + "NatGatewayState":{ + "type":"string", + "enum":[ + "pending", + "failed", + "available", + "deleting", + "deleted" + ] + }, + "NetworkAcl":{ + "type":"structure", + "members":{ + "NetworkAclId":{ + "shape":"String", + "locationName":"networkAclId" + }, + "VpcId":{ + "shape":"String", + "locationName":"vpcId" + }, + "IsDefault":{ + "shape":"Boolean", + "locationName":"default" + }, + "Entries":{ + "shape":"NetworkAclEntryList", + "locationName":"entrySet" + }, + "Associations":{ + "shape":"NetworkAclAssociationList", + "locationName":"associationSet" + }, + "Tags":{ + "shape":"TagList", + "locationName":"tagSet" + } + } + }, + "NetworkAclAssociation":{ + "type":"structure", + "members":{ + "NetworkAclAssociationId":{ + "shape":"String", + "locationName":"networkAclAssociationId" + }, + "NetworkAclId":{ + "shape":"String", + "locationName":"networkAclId" + }, + "SubnetId":{ + "shape":"String", + "locationName":"subnetId" + } + } + }, + "NetworkAclAssociationList":{ + "type":"list", + "member":{ + "shape":"NetworkAclAssociation", + "locationName":"item" + } + }, + "NetworkAclEntry":{ + "type":"structure", + "members":{ + "RuleNumber":{ + "shape":"Integer", + "locationName":"ruleNumber" + }, + "Protocol":{ + "shape":"String", + "locationName":"protocol" + }, + "RuleAction":{ + "shape":"RuleAction", + "locationName":"ruleAction" + }, + "Egress":{ + "shape":"Boolean", + "locationName":"egress" + }, + "CidrBlock":{ + "shape":"String", + "locationName":"cidrBlock" + }, + "IcmpTypeCode":{ + "shape":"IcmpTypeCode", + "locationName":"icmpTypeCode" + }, + "PortRange":{ + "shape":"PortRange", + "locationName":"portRange" + } + } + }, + "NetworkAclEntryList":{ + "type":"list", + "member":{ + "shape":"NetworkAclEntry", + "locationName":"item" + } + }, + "NetworkAclList":{ + "type":"list", + "member":{ + "shape":"NetworkAcl", + "locationName":"item" + } + }, + "NetworkInterface":{ + "type":"structure", + "members":{ + "NetworkInterfaceId":{ + "shape":"String", + "locationName":"networkInterfaceId" + }, + "SubnetId":{ + "shape":"String", + "locationName":"subnetId" + }, + "VpcId":{ + "shape":"String", + "locationName":"vpcId" + }, + "AvailabilityZone":{ + "shape":"String", + "locationName":"availabilityZone" + }, + "Description":{ + "shape":"String", + "locationName":"description" + }, + "OwnerId":{ + "shape":"String", + "locationName":"ownerId" + }, + "RequesterId":{ + "shape":"String", + "locationName":"requesterId" + }, + "RequesterManaged":{ + "shape":"Boolean", + "locationName":"requesterManaged" + }, + "Status":{ + "shape":"NetworkInterfaceStatus", + "locationName":"status" + }, + "MacAddress":{ + "shape":"String", + "locationName":"macAddress" + }, + "PrivateIpAddress":{ + "shape":"String", + "locationName":"privateIpAddress" + }, + "PrivateDnsName":{ + "shape":"String", + "locationName":"privateDnsName" + }, + "SourceDestCheck":{ + "shape":"Boolean", + "locationName":"sourceDestCheck" + }, + "Groups":{ + "shape":"GroupIdentifierList", + "locationName":"groupSet" + }, + "Attachment":{ + "shape":"NetworkInterfaceAttachment", + "locationName":"attachment" + }, + "Association":{ + "shape":"NetworkInterfaceAssociation", + "locationName":"association" + }, + "TagSet":{ + "shape":"TagList", + "locationName":"tagSet" + }, + "PrivateIpAddresses":{ + "shape":"NetworkInterfacePrivateIpAddressList", + "locationName":"privateIpAddressesSet" + }, + "InterfaceType":{ + "shape":"NetworkInterfaceType", + "locationName":"interfaceType" + } + } + }, + "NetworkInterfaceAssociation":{ + "type":"structure", + "members":{ + "PublicIp":{ + "shape":"String", + "locationName":"publicIp" + }, + "PublicDnsName":{ + "shape":"String", + "locationName":"publicDnsName" + }, + "IpOwnerId":{ + "shape":"String", + "locationName":"ipOwnerId" + }, + "AllocationId":{ + "shape":"String", + "locationName":"allocationId" + }, + "AssociationId":{ + "shape":"String", + "locationName":"associationId" + } + } + }, + "NetworkInterfaceAttachment":{ + "type":"structure", + "members":{ + "AttachmentId":{ + "shape":"String", + "locationName":"attachmentId" + }, + "InstanceId":{ + "shape":"String", + "locationName":"instanceId" + }, + "InstanceOwnerId":{ + "shape":"String", + "locationName":"instanceOwnerId" + }, + "DeviceIndex":{ + "shape":"Integer", + "locationName":"deviceIndex" + }, + "Status":{ + "shape":"AttachmentStatus", + "locationName":"status" + }, + "AttachTime":{ + "shape":"DateTime", + "locationName":"attachTime" + }, + "DeleteOnTermination":{ + "shape":"Boolean", + "locationName":"deleteOnTermination" + } + } + }, + "NetworkInterfaceAttachmentChanges":{ + "type":"structure", + "members":{ + "AttachmentId":{ + "shape":"String", + "locationName":"attachmentId" + }, + "DeleteOnTermination":{ + "shape":"Boolean", + "locationName":"deleteOnTermination" + } + } + }, + "NetworkInterfaceAttribute":{ + "type":"string", + "enum":[ + "description", + "groupSet", + "sourceDestCheck", + "attachment" + ] + }, + "NetworkInterfaceIdList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"item" + } + }, + "NetworkInterfaceList":{ + "type":"list", + "member":{ + "shape":"NetworkInterface", + "locationName":"item" + } + }, + "NetworkInterfacePrivateIpAddress":{ + "type":"structure", + "members":{ + "PrivateIpAddress":{ + "shape":"String", + "locationName":"privateIpAddress" + }, + "PrivateDnsName":{ + "shape":"String", + "locationName":"privateDnsName" + }, + "Primary":{ + "shape":"Boolean", + "locationName":"primary" + }, + "Association":{ + "shape":"NetworkInterfaceAssociation", + "locationName":"association" + } + } + }, + "NetworkInterfacePrivateIpAddressList":{ + "type":"list", + "member":{ + "shape":"NetworkInterfacePrivateIpAddress", + "locationName":"item" + } + }, + "NetworkInterfaceStatus":{ + "type":"string", + "enum":[ + "available", + "attaching", + "in-use", + "detaching" + ] + }, + "NetworkInterfaceType":{ + "type":"string", + "enum":[ + "interface", + "natGateway" + ] + }, + "NewDhcpConfiguration":{ + "type":"structure", + "members":{ + "Key":{ + "shape":"String", + "locationName":"key" + }, + "Values":{ + "shape":"ValueStringList", + "locationName":"Value" + } + } + }, + "NewDhcpConfigurationList":{ + "type":"list", + "member":{ + "shape":"NewDhcpConfiguration", + "locationName":"item" + } + }, + "NextToken":{ + "type":"string", + "max":1024, + "min":1 + }, + "OccurrenceDayRequestSet":{ + "type":"list", + "member":{ + "shape":"Integer", + "locationName":"OccurenceDay" + } + }, + "OccurrenceDaySet":{ + "type":"list", + "member":{ + "shape":"Integer", + "locationName":"item" + } + }, + "OfferingTypeValues":{ + "type":"string", + "enum":[ + "Heavy Utilization", + "Medium Utilization", + "Light Utilization", + "No Upfront", + "Partial Upfront", + "All Upfront" + ] + }, + "OperationType":{ + "type":"string", + "enum":[ + "add", + "remove" + ] + }, + "OwnerStringList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"Owner" + } + }, + "PeeringConnectionOptions":{ + "type":"structure", + "members":{ + "AllowEgressFromLocalClassicLinkToRemoteVpc":{ + "shape":"Boolean", + "locationName":"allowEgressFromLocalClassicLinkToRemoteVpc" + }, + "AllowEgressFromLocalVpcToRemoteClassicLink":{ + "shape":"Boolean", + "locationName":"allowEgressFromLocalVpcToRemoteClassicLink" + } + } + }, + "PeeringConnectionOptionsRequest":{ + "type":"structure", + "required":[ + "AllowEgressFromLocalClassicLinkToRemoteVpc", + "AllowEgressFromLocalVpcToRemoteClassicLink" + ], + "members":{ + "AllowEgressFromLocalClassicLinkToRemoteVpc":{"shape":"Boolean"}, + "AllowEgressFromLocalVpcToRemoteClassicLink":{"shape":"Boolean"} + } + }, + "PermissionGroup":{ + "type":"string", + "enum":["all"] + }, + "Placement":{ + "type":"structure", + "members":{ + "AvailabilityZone":{ + "shape":"String", + "locationName":"availabilityZone" + }, + "GroupName":{ + "shape":"String", + "locationName":"groupName" + }, + "Tenancy":{ + "shape":"Tenancy", + "locationName":"tenancy" + }, + "HostId":{ + "shape":"String", + "locationName":"hostId" + }, + "Affinity":{ + "shape":"String", + "locationName":"affinity" + } + } + }, + "PlacementGroup":{ + "type":"structure", + "members":{ + "GroupName":{ + "shape":"String", + "locationName":"groupName" + }, + "Strategy":{ + "shape":"PlacementStrategy", + "locationName":"strategy" + }, + "State":{ + "shape":"PlacementGroupState", + "locationName":"state" + } + } + }, + "PlacementGroupList":{ + "type":"list", + "member":{ + "shape":"PlacementGroup", + "locationName":"item" + } + }, + "PlacementGroupState":{ + "type":"string", + "enum":[ + "pending", + "available", + "deleting", + "deleted" + ] + }, + "PlacementGroupStringList":{ + "type":"list", + "member":{"shape":"String"} + }, + "PlacementStrategy":{ + "type":"string", + "enum":["cluster"] + }, + "PlatformValues":{ + "type":"string", + "enum":["Windows"] + }, + "PortRange":{ + "type":"structure", + "members":{ + "From":{ + "shape":"Integer", + "locationName":"from" + }, + "To":{ + "shape":"Integer", + "locationName":"to" + } + } + }, + "PrefixList":{ + "type":"structure", + "members":{ + "PrefixListId":{ + "shape":"String", + "locationName":"prefixListId" + }, + "PrefixListName":{ + "shape":"String", + "locationName":"prefixListName" + }, + "Cidrs":{ + "shape":"ValueStringList", + "locationName":"cidrSet" + } + } + }, + "PrefixListId":{ + "type":"structure", + "members":{ + "PrefixListId":{ + "shape":"String", + "locationName":"prefixListId" + } + } + }, + "PrefixListIdList":{ + "type":"list", + "member":{ + "shape":"PrefixListId", + "locationName":"item" + } + }, + "PrefixListIdSet":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"item" + } + }, + "PrefixListSet":{ + "type":"list", + "member":{ + "shape":"PrefixList", + "locationName":"item" + } + }, + "PriceSchedule":{ + "type":"structure", + "members":{ + "Term":{ + "shape":"Long", + "locationName":"term" + }, + "Price":{ + "shape":"Double", + "locationName":"price" + }, + "CurrencyCode":{ + "shape":"CurrencyCodeValues", + "locationName":"currencyCode" + }, + "Active":{ + "shape":"Boolean", + "locationName":"active" + } + } + }, + "PriceScheduleList":{ + "type":"list", + "member":{ + "shape":"PriceSchedule", + "locationName":"item" + } + }, + "PriceScheduleSpecification":{ + "type":"structure", + "members":{ + "Term":{ + "shape":"Long", + "locationName":"term" + }, + "Price":{ + "shape":"Double", + "locationName":"price" + }, + "CurrencyCode":{ + "shape":"CurrencyCodeValues", + "locationName":"currencyCode" + } + } + }, + "PriceScheduleSpecificationList":{ + "type":"list", + "member":{ + "shape":"PriceScheduleSpecification", + "locationName":"item" + } + }, + "PricingDetail":{ + "type":"structure", + "members":{ + "Price":{ + "shape":"Double", + "locationName":"price" + }, + "Count":{ + "shape":"Integer", + "locationName":"count" + } + } + }, + "PricingDetailsList":{ + "type":"list", + "member":{ + "shape":"PricingDetail", + "locationName":"item" + } + }, + "PrivateIpAddressConfigSet":{ + "type":"list", + "member":{ + "shape":"ScheduledInstancesPrivateIpAddressConfig", + "locationName":"PrivateIpAddressConfigSet" + } + }, + "PrivateIpAddressSpecification":{ + "type":"structure", + "required":["PrivateIpAddress"], + "members":{ + "PrivateIpAddress":{ + "shape":"String", + "locationName":"privateIpAddress" + }, + "Primary":{ + "shape":"Boolean", + "locationName":"primary" + } + } + }, + "PrivateIpAddressSpecificationList":{ + "type":"list", + "member":{ + "shape":"PrivateIpAddressSpecification", + "locationName":"item" + } + }, + "PrivateIpAddressStringList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"PrivateIpAddress" + } + }, + "ProductCode":{ + "type":"structure", + "members":{ + "ProductCodeId":{ + "shape":"String", + "locationName":"productCode" + }, + "ProductCodeType":{ + "shape":"ProductCodeValues", + "locationName":"type" + } + } + }, + "ProductCodeList":{ + "type":"list", + "member":{ + "shape":"ProductCode", + "locationName":"item" + } + }, + "ProductCodeStringList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"ProductCode" + } + }, + "ProductCodeValues":{ + "type":"string", + "enum":[ + "devpay", + "marketplace" + ] + }, + "ProductDescriptionList":{ + "type":"list", + "member":{"shape":"String"} + }, + "PropagatingVgw":{ + "type":"structure", + "members":{ + "GatewayId":{ + "shape":"String", + "locationName":"gatewayId" + } + } + }, + "PropagatingVgwList":{ + "type":"list", + "member":{ + "shape":"PropagatingVgw", + "locationName":"item" + } + }, + "ProvisionedBandwidth":{ + "type":"structure", + "members":{ + "Provisioned":{ + "shape":"String", + "locationName":"provisioned" + }, + "Requested":{ + "shape":"String", + "locationName":"requested" + }, + "RequestTime":{ + "shape":"DateTime", + "locationName":"requestTime" + }, + "ProvisionTime":{ + "shape":"DateTime", + "locationName":"provisionTime" + }, + "Status":{ + "shape":"String", + "locationName":"status" + } + } + }, + "PublicIpStringList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"PublicIp" + } + }, + "PurchaseRequest":{ + "type":"structure", + "required":[ + "PurchaseToken", + "InstanceCount" + ], + "members":{ + "PurchaseToken":{"shape":"String"}, + "InstanceCount":{"shape":"Integer"} + } + }, + "PurchaseRequestSet":{ + "type":"list", + "member":{ + "shape":"PurchaseRequest", + "locationName":"PurchaseRequest" + }, + "min":1 + }, + "PurchaseReservedInstancesOfferingRequest":{ + "type":"structure", + "required":[ + "ReservedInstancesOfferingId", + "InstanceCount" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "ReservedInstancesOfferingId":{"shape":"String"}, + "InstanceCount":{"shape":"Integer"}, + "LimitPrice":{ + "shape":"ReservedInstanceLimitPrice", + "locationName":"limitPrice" + } + } + }, + "PurchaseReservedInstancesOfferingResult":{ + "type":"structure", + "members":{ + "ReservedInstancesId":{ + "shape":"String", + "locationName":"reservedInstancesId" + } + } + }, + "PurchaseScheduledInstancesRequest":{ + "type":"structure", + "required":["PurchaseRequests"], + "members":{ + "DryRun":{"shape":"Boolean"}, + "ClientToken":{ + "shape":"String", + "idempotencyToken":true + }, + "PurchaseRequests":{ + "shape":"PurchaseRequestSet", + "locationName":"PurchaseRequest" + } + } + }, + "PurchaseScheduledInstancesResult":{ + "type":"structure", + "members":{ + "ScheduledInstanceSet":{ + "shape":"PurchasedScheduledInstanceSet", + "locationName":"scheduledInstanceSet" + } + } + }, + "PurchasedScheduledInstanceSet":{ + "type":"list", + "member":{ + "shape":"ScheduledInstance", + "locationName":"item" + } + }, + "RIProductDescription":{ + "type":"string", + "enum":[ + "Linux/UNIX", + "Linux/UNIX (Amazon VPC)", + "Windows", + "Windows (Amazon VPC)" + ] + }, + "ReasonCodesList":{ + "type":"list", + "member":{ + "shape":"ReportInstanceReasonCodes", + "locationName":"item" + } + }, + "RebootInstancesRequest":{ + "type":"structure", + "required":["InstanceIds"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "InstanceIds":{ + "shape":"InstanceIdStringList", + "locationName":"InstanceId" + } + } + }, + "RecurringCharge":{ + "type":"structure", + "members":{ + "Frequency":{ + "shape":"RecurringChargeFrequency", + "locationName":"frequency" + }, + "Amount":{ + "shape":"Double", + "locationName":"amount" + } + } + }, + "RecurringChargeFrequency":{ + "type":"string", + "enum":["Hourly"] + }, + "RecurringChargesList":{ + "type":"list", + "member":{ + "shape":"RecurringCharge", + "locationName":"item" + } + }, + "Region":{ + "type":"structure", + "members":{ + "RegionName":{ + "shape":"String", + "locationName":"regionName" + }, + "Endpoint":{ + "shape":"String", + "locationName":"regionEndpoint" + } + } + }, + "RegionList":{ + "type":"list", + "member":{ + "shape":"Region", + "locationName":"item" + } + }, + "RegionNameStringList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"RegionName" + } + }, + "RegisterImageRequest":{ + "type":"structure", + "required":["Name"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "ImageLocation":{"shape":"String"}, + "Name":{ + "shape":"String", + "locationName":"name" + }, + "Description":{ + "shape":"String", + "locationName":"description" + }, + "Architecture":{ + "shape":"ArchitectureValues", + "locationName":"architecture" + }, + "KernelId":{ + "shape":"String", + "locationName":"kernelId" + }, + "RamdiskId":{ + "shape":"String", + "locationName":"ramdiskId" + }, + "RootDeviceName":{ + "shape":"String", + "locationName":"rootDeviceName" + }, + "BlockDeviceMappings":{ + "shape":"BlockDeviceMappingRequestList", + "locationName":"BlockDeviceMapping" + }, + "VirtualizationType":{ + "shape":"String", + "locationName":"virtualizationType" + }, + "SriovNetSupport":{ + "shape":"String", + "locationName":"sriovNetSupport" + }, + "EnaSupport":{ + "shape":"Boolean", + "locationName":"enaSupport" + } + } + }, + "RegisterImageResult":{ + "type":"structure", + "members":{ + "ImageId":{ + "shape":"String", + "locationName":"imageId" + } + } + }, + "RejectVpcPeeringConnectionRequest":{ + "type":"structure", + "required":["VpcPeeringConnectionId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "VpcPeeringConnectionId":{ + "shape":"String", + "locationName":"vpcPeeringConnectionId" + } + } + }, + "RejectVpcPeeringConnectionResult":{ + "type":"structure", + "members":{ + "Return":{ + "shape":"Boolean", + "locationName":"return" + } + } + }, + "ReleaseAddressRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "PublicIp":{"shape":"String"}, + "AllocationId":{"shape":"String"} + } + }, + "ReleaseHostsRequest":{ + "type":"structure", + "required":["HostIds"], + "members":{ + "HostIds":{ + "shape":"RequestHostIdList", + "locationName":"hostId" + } + } + }, + "ReleaseHostsResult":{ + "type":"structure", + "members":{ + "Successful":{ + "shape":"ResponseHostIdList", + "locationName":"successful" + }, + "Unsuccessful":{ + "shape":"UnsuccessfulItemList", + "locationName":"unsuccessful" + } + } + }, + "ReplaceNetworkAclAssociationRequest":{ + "type":"structure", + "required":[ + "AssociationId", + "NetworkAclId" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "AssociationId":{ + "shape":"String", + "locationName":"associationId" + }, + "NetworkAclId":{ + "shape":"String", + "locationName":"networkAclId" + } + } + }, + "ReplaceNetworkAclAssociationResult":{ + "type":"structure", + "members":{ + "NewAssociationId":{ + "shape":"String", + "locationName":"newAssociationId" + } + } + }, + "ReplaceNetworkAclEntryRequest":{ + "type":"structure", + "required":[ + "NetworkAclId", + "RuleNumber", + "Protocol", + "RuleAction", + "Egress", + "CidrBlock" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "NetworkAclId":{ + "shape":"String", + "locationName":"networkAclId" + }, + "RuleNumber":{ + "shape":"Integer", + "locationName":"ruleNumber" + }, + "Protocol":{ + "shape":"String", + "locationName":"protocol" + }, + "RuleAction":{ + "shape":"RuleAction", + "locationName":"ruleAction" + }, + "Egress":{ + "shape":"Boolean", + "locationName":"egress" + }, + "CidrBlock":{ + "shape":"String", + "locationName":"cidrBlock" + }, + "IcmpTypeCode":{ + "shape":"IcmpTypeCode", + "locationName":"Icmp" + }, + "PortRange":{ + "shape":"PortRange", + "locationName":"portRange" + } + } + }, + "ReplaceRouteRequest":{ + "type":"structure", + "required":[ + "RouteTableId", + "DestinationCidrBlock" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "RouteTableId":{ + "shape":"String", + "locationName":"routeTableId" + }, + "DestinationCidrBlock":{ + "shape":"String", + "locationName":"destinationCidrBlock" + }, + "GatewayId":{ + "shape":"String", + "locationName":"gatewayId" + }, + "InstanceId":{ + "shape":"String", + "locationName":"instanceId" + }, + "NetworkInterfaceId":{ + "shape":"String", + "locationName":"networkInterfaceId" + }, + "VpcPeeringConnectionId":{ + "shape":"String", + "locationName":"vpcPeeringConnectionId" + }, + "NatGatewayId":{ + "shape":"String", + "locationName":"natGatewayId" + } + } + }, + "ReplaceRouteTableAssociationRequest":{ + "type":"structure", + "required":[ + "AssociationId", + "RouteTableId" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "AssociationId":{ + "shape":"String", + "locationName":"associationId" + }, + "RouteTableId":{ + "shape":"String", + "locationName":"routeTableId" + } + } + }, + "ReplaceRouteTableAssociationResult":{ + "type":"structure", + "members":{ + "NewAssociationId":{ + "shape":"String", + "locationName":"newAssociationId" + } + } + }, + "ReportInstanceReasonCodes":{ + "type":"string", + "enum":[ + "instance-stuck-in-state", + "unresponsive", + "not-accepting-credentials", + "password-not-available", + "performance-network", + "performance-instance-store", + "performance-ebs-volume", + "performance-other", + "other" + ] + }, + "ReportInstanceStatusRequest":{ + "type":"structure", + "required":[ + "Instances", + "Status", + "ReasonCodes" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "Instances":{ + "shape":"InstanceIdStringList", + "locationName":"instanceId" + }, + "Status":{ + "shape":"ReportStatusType", + "locationName":"status" + }, + "StartTime":{ + "shape":"DateTime", + "locationName":"startTime" + }, + "EndTime":{ + "shape":"DateTime", + "locationName":"endTime" + }, + "ReasonCodes":{ + "shape":"ReasonCodesList", + "locationName":"reasonCode" + }, + "Description":{ + "shape":"String", + "locationName":"description" + } + } + }, + "ReportStatusType":{ + "type":"string", + "enum":[ + "ok", + "impaired" + ] + }, + "RequestHostIdList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"item" + } + }, + "RequestSpotFleetRequest":{ + "type":"structure", + "required":["SpotFleetRequestConfig"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "SpotFleetRequestConfig":{ + "shape":"SpotFleetRequestConfigData", + "locationName":"spotFleetRequestConfig" + } + } + }, + "RequestSpotFleetResponse":{ + "type":"structure", + "required":["SpotFleetRequestId"], + "members":{ + "SpotFleetRequestId":{ + "shape":"String", + "locationName":"spotFleetRequestId" + } + } + }, + "RequestSpotInstancesRequest":{ + "type":"structure", + "required":["SpotPrice"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "SpotPrice":{ + "shape":"String", + "locationName":"spotPrice" + }, + "ClientToken":{ + "shape":"String", + "locationName":"clientToken" + }, + "InstanceCount":{ + "shape":"Integer", + "locationName":"instanceCount" + }, + "Type":{ + "shape":"SpotInstanceType", + "locationName":"type" + }, + "ValidFrom":{ + "shape":"DateTime", + "locationName":"validFrom" + }, + "ValidUntil":{ + "shape":"DateTime", + "locationName":"validUntil" + }, + "LaunchGroup":{ + "shape":"String", + "locationName":"launchGroup" + }, + "AvailabilityZoneGroup":{ + "shape":"String", + "locationName":"availabilityZoneGroup" + }, + "BlockDurationMinutes":{ + "shape":"Integer", + "locationName":"blockDurationMinutes" + }, + "LaunchSpecification":{"shape":"RequestSpotLaunchSpecification"} + } + }, + "RequestSpotInstancesResult":{ + "type":"structure", + "members":{ + "SpotInstanceRequests":{ + "shape":"SpotInstanceRequestList", + "locationName":"spotInstanceRequestSet" + } + } + }, + "RequestSpotLaunchSpecification":{ + "type":"structure", + "members":{ + "ImageId":{ + "shape":"String", + "locationName":"imageId" + }, + "KeyName":{ + "shape":"String", + "locationName":"keyName" + }, + "SecurityGroups":{ + "shape":"ValueStringList", + "locationName":"SecurityGroup" + }, + "UserData":{ + "shape":"String", + "locationName":"userData" + }, + "AddressingType":{ + "shape":"String", + "locationName":"addressingType" + }, + "InstanceType":{ + "shape":"InstanceType", + "locationName":"instanceType" + }, + "Placement":{ + "shape":"SpotPlacement", + "locationName":"placement" + }, + "KernelId":{ + "shape":"String", + "locationName":"kernelId" + }, + "RamdiskId":{ + "shape":"String", + "locationName":"ramdiskId" + }, + "BlockDeviceMappings":{ + "shape":"BlockDeviceMappingList", + "locationName":"blockDeviceMapping" + }, + "SubnetId":{ + "shape":"String", + "locationName":"subnetId" + }, + "NetworkInterfaces":{ + "shape":"InstanceNetworkInterfaceSpecificationList", + "locationName":"NetworkInterface" + }, + "IamInstanceProfile":{ + "shape":"IamInstanceProfileSpecification", + "locationName":"iamInstanceProfile" + }, + "EbsOptimized":{ + "shape":"Boolean", + "locationName":"ebsOptimized" + }, + "Monitoring":{ + "shape":"RunInstancesMonitoringEnabled", + "locationName":"monitoring" + }, + "SecurityGroupIds":{ + "shape":"ValueStringList", + "locationName":"SecurityGroupId" + } + } + }, + "Reservation":{ + "type":"structure", + "members":{ + "ReservationId":{ + "shape":"String", + "locationName":"reservationId" + }, + "OwnerId":{ + "shape":"String", + "locationName":"ownerId" + }, + "RequesterId":{ + "shape":"String", + "locationName":"requesterId" + }, + "Groups":{ + "shape":"GroupIdentifierList", + "locationName":"groupSet" + }, + "Instances":{ + "shape":"InstanceList", + "locationName":"instancesSet" + } + } + }, + "ReservationList":{ + "type":"list", + "member":{ + "shape":"Reservation", + "locationName":"item" + } + }, + "ReservedInstanceLimitPrice":{ + "type":"structure", + "members":{ + "Amount":{ + "shape":"Double", + "locationName":"amount" + }, + "CurrencyCode":{ + "shape":"CurrencyCodeValues", + "locationName":"currencyCode" + } + } + }, + "ReservedInstanceState":{ + "type":"string", + "enum":[ + "payment-pending", + "active", + "payment-failed", + "retired" + ] + }, + "ReservedInstances":{ + "type":"structure", + "members":{ + "ReservedInstancesId":{ + "shape":"String", + "locationName":"reservedInstancesId" + }, + "InstanceType":{ + "shape":"InstanceType", + "locationName":"instanceType" + }, + "AvailabilityZone":{ + "shape":"String", + "locationName":"availabilityZone" + }, + "Start":{ + "shape":"DateTime", + "locationName":"start" + }, + "End":{ + "shape":"DateTime", + "locationName":"end" + }, + "Duration":{ + "shape":"Long", + "locationName":"duration" + }, + "UsagePrice":{ + "shape":"Float", + "locationName":"usagePrice" + }, + "FixedPrice":{ + "shape":"Float", + "locationName":"fixedPrice" + }, + "InstanceCount":{ + "shape":"Integer", + "locationName":"instanceCount" + }, + "ProductDescription":{ + "shape":"RIProductDescription", + "locationName":"productDescription" + }, + "State":{ + "shape":"ReservedInstanceState", + "locationName":"state" + }, + "Tags":{ + "shape":"TagList", + "locationName":"tagSet" + }, + "InstanceTenancy":{ + "shape":"Tenancy", + "locationName":"instanceTenancy" + }, + "CurrencyCode":{ + "shape":"CurrencyCodeValues", + "locationName":"currencyCode" + }, + "OfferingType":{ + "shape":"OfferingTypeValues", + "locationName":"offeringType" + }, + "RecurringCharges":{ + "shape":"RecurringChargesList", + "locationName":"recurringCharges" + } + } + }, + "ReservedInstancesConfiguration":{ + "type":"structure", + "members":{ + "AvailabilityZone":{ + "shape":"String", + "locationName":"availabilityZone" + }, + "Platform":{ + "shape":"String", + "locationName":"platform" + }, + "InstanceCount":{ + "shape":"Integer", + "locationName":"instanceCount" + }, + "InstanceType":{ + "shape":"InstanceType", + "locationName":"instanceType" + } + } + }, + "ReservedInstancesConfigurationList":{ + "type":"list", + "member":{ + "shape":"ReservedInstancesConfiguration", + "locationName":"item" + } + }, + "ReservedInstancesId":{ + "type":"structure", + "members":{ + "ReservedInstancesId":{ + "shape":"String", + "locationName":"reservedInstancesId" + } + } + }, + "ReservedInstancesIdStringList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"ReservedInstancesId" + } + }, + "ReservedInstancesList":{ + "type":"list", + "member":{ + "shape":"ReservedInstances", + "locationName":"item" + } + }, + "ReservedInstancesListing":{ + "type":"structure", + "members":{ + "ReservedInstancesListingId":{ + "shape":"String", + "locationName":"reservedInstancesListingId" + }, + "ReservedInstancesId":{ + "shape":"String", + "locationName":"reservedInstancesId" + }, + "CreateDate":{ + "shape":"DateTime", + "locationName":"createDate" + }, + "UpdateDate":{ + "shape":"DateTime", + "locationName":"updateDate" + }, + "Status":{ + "shape":"ListingStatus", + "locationName":"status" + }, + "StatusMessage":{ + "shape":"String", + "locationName":"statusMessage" + }, + "InstanceCounts":{ + "shape":"InstanceCountList", + "locationName":"instanceCounts" + }, + "PriceSchedules":{ + "shape":"PriceScheduleList", + "locationName":"priceSchedules" + }, + "Tags":{ + "shape":"TagList", + "locationName":"tagSet" + }, + "ClientToken":{ + "shape":"String", + "locationName":"clientToken" + } + } + }, + "ReservedInstancesListingList":{ + "type":"list", + "member":{ + "shape":"ReservedInstancesListing", + "locationName":"item" + } + }, + "ReservedInstancesModification":{ + "type":"structure", + "members":{ + "ReservedInstancesModificationId":{ + "shape":"String", + "locationName":"reservedInstancesModificationId" + }, + "ReservedInstancesIds":{ + "shape":"ReservedIntancesIds", + "locationName":"reservedInstancesSet" + }, + "ModificationResults":{ + "shape":"ReservedInstancesModificationResultList", + "locationName":"modificationResultSet" + }, + "CreateDate":{ + "shape":"DateTime", + "locationName":"createDate" + }, + "UpdateDate":{ + "shape":"DateTime", + "locationName":"updateDate" + }, + "EffectiveDate":{ + "shape":"DateTime", + "locationName":"effectiveDate" + }, + "Status":{ + "shape":"String", + "locationName":"status" + }, + "StatusMessage":{ + "shape":"String", + "locationName":"statusMessage" + }, + "ClientToken":{ + "shape":"String", + "locationName":"clientToken" + } + } + }, + "ReservedInstancesModificationIdStringList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"ReservedInstancesModificationId" + } + }, + "ReservedInstancesModificationList":{ + "type":"list", + "member":{ + "shape":"ReservedInstancesModification", + "locationName":"item" + } + }, + "ReservedInstancesModificationResult":{ + "type":"structure", + "members":{ + "ReservedInstancesId":{ + "shape":"String", + "locationName":"reservedInstancesId" + }, + "TargetConfiguration":{ + "shape":"ReservedInstancesConfiguration", + "locationName":"targetConfiguration" + } + } + }, + "ReservedInstancesModificationResultList":{ + "type":"list", + "member":{ + "shape":"ReservedInstancesModificationResult", + "locationName":"item" + } + }, + "ReservedInstancesOffering":{ + "type":"structure", + "members":{ + "ReservedInstancesOfferingId":{ + "shape":"String", + "locationName":"reservedInstancesOfferingId" + }, + "InstanceType":{ + "shape":"InstanceType", + "locationName":"instanceType" + }, + "AvailabilityZone":{ + "shape":"String", + "locationName":"availabilityZone" + }, + "Duration":{ + "shape":"Long", + "locationName":"duration" + }, + "UsagePrice":{ + "shape":"Float", + "locationName":"usagePrice" + }, + "FixedPrice":{ + "shape":"Float", + "locationName":"fixedPrice" + }, + "ProductDescription":{ + "shape":"RIProductDescription", + "locationName":"productDescription" + }, + "InstanceTenancy":{ + "shape":"Tenancy", + "locationName":"instanceTenancy" + }, + "CurrencyCode":{ + "shape":"CurrencyCodeValues", + "locationName":"currencyCode" + }, + "OfferingType":{ + "shape":"OfferingTypeValues", + "locationName":"offeringType" + }, + "RecurringCharges":{ + "shape":"RecurringChargesList", + "locationName":"recurringCharges" + }, + "Marketplace":{ + "shape":"Boolean", + "locationName":"marketplace" + }, + "PricingDetails":{ + "shape":"PricingDetailsList", + "locationName":"pricingDetailsSet" + } + } + }, + "ReservedInstancesOfferingIdStringList":{ + "type":"list", + "member":{"shape":"String"} + }, + "ReservedInstancesOfferingList":{ + "type":"list", + "member":{ + "shape":"ReservedInstancesOffering", + "locationName":"item" + } + }, + "ReservedIntancesIds":{ + "type":"list", + "member":{ + "shape":"ReservedInstancesId", + "locationName":"item" + } + }, + "ResetImageAttributeName":{ + "type":"string", + "enum":["launchPermission"] + }, + "ResetImageAttributeRequest":{ + "type":"structure", + "required":[ + "ImageId", + "Attribute" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "ImageId":{"shape":"String"}, + "Attribute":{"shape":"ResetImageAttributeName"} + } + }, + "ResetInstanceAttributeRequest":{ + "type":"structure", + "required":[ + "InstanceId", + "Attribute" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "InstanceId":{ + "shape":"String", + "locationName":"instanceId" + }, + "Attribute":{ + "shape":"InstanceAttributeName", + "locationName":"attribute" + } + } + }, + "ResetNetworkInterfaceAttributeRequest":{ + "type":"structure", + "required":["NetworkInterfaceId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "NetworkInterfaceId":{ + "shape":"String", + "locationName":"networkInterfaceId" + }, + "SourceDestCheck":{ + "shape":"String", + "locationName":"sourceDestCheck" + } + } + }, + "ResetSnapshotAttributeRequest":{ + "type":"structure", + "required":[ + "SnapshotId", + "Attribute" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "SnapshotId":{"shape":"String"}, + "Attribute":{"shape":"SnapshotAttributeName"} + } + }, + "ResourceIdList":{ + "type":"list", + "member":{"shape":"String"} + }, + "ResourceType":{ + "type":"string", + "enum":[ + "customer-gateway", + "dhcp-options", + "image", + "instance", + "internet-gateway", + "network-acl", + "network-interface", + "reserved-instances", + "route-table", + "snapshot", + "spot-instances-request", + "subnet", + "security-group", + "volume", + "vpc", + "vpn-connection", + "vpn-gateway" + ] + }, + "ResponseHostIdList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"item" + } + }, + "RestorableByStringList":{ + "type":"list", + "member":{"shape":"String"} + }, + "RestoreAddressToClassicRequest":{ + "type":"structure", + "required":["PublicIp"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "PublicIp":{ + "shape":"String", + "locationName":"publicIp" + } + } + }, + "RestoreAddressToClassicResult":{ + "type":"structure", + "members":{ + "Status":{ + "shape":"Status", + "locationName":"status" + }, + "PublicIp":{ + "shape":"String", + "locationName":"publicIp" + } + } + }, + "RevokeSecurityGroupEgressRequest":{ + "type":"structure", + "required":["GroupId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "GroupId":{ + "shape":"String", + "locationName":"groupId" + }, + "SourceSecurityGroupName":{ + "shape":"String", + "locationName":"sourceSecurityGroupName" + }, + "SourceSecurityGroupOwnerId":{ + "shape":"String", + "locationName":"sourceSecurityGroupOwnerId" + }, + "IpProtocol":{ + "shape":"String", + "locationName":"ipProtocol" + }, + "FromPort":{ + "shape":"Integer", + "locationName":"fromPort" + }, + "ToPort":{ + "shape":"Integer", + "locationName":"toPort" + }, + "CidrIp":{ + "shape":"String", + "locationName":"cidrIp" + }, + "IpPermissions":{ + "shape":"IpPermissionList", + "locationName":"ipPermissions" + } + } + }, + "RevokeSecurityGroupIngressRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "GroupName":{"shape":"String"}, + "GroupId":{"shape":"String"}, + "SourceSecurityGroupName":{"shape":"String"}, + "SourceSecurityGroupOwnerId":{"shape":"String"}, + "IpProtocol":{"shape":"String"}, + "FromPort":{"shape":"Integer"}, + "ToPort":{"shape":"Integer"}, + "CidrIp":{"shape":"String"}, + "IpPermissions":{"shape":"IpPermissionList"} + } + }, + "Route":{ + "type":"structure", + "members":{ + "DestinationCidrBlock":{ + "shape":"String", + "locationName":"destinationCidrBlock" + }, + "DestinationPrefixListId":{ + "shape":"String", + "locationName":"destinationPrefixListId" + }, + "GatewayId":{ + "shape":"String", + "locationName":"gatewayId" + }, + "InstanceId":{ + "shape":"String", + "locationName":"instanceId" + }, + "InstanceOwnerId":{ + "shape":"String", + "locationName":"instanceOwnerId" + }, + "NetworkInterfaceId":{ + "shape":"String", + "locationName":"networkInterfaceId" + }, + "VpcPeeringConnectionId":{ + "shape":"String", + "locationName":"vpcPeeringConnectionId" + }, + "NatGatewayId":{ + "shape":"String", + "locationName":"natGatewayId" + }, + "State":{ + "shape":"RouteState", + "locationName":"state" + }, + "Origin":{ + "shape":"RouteOrigin", + "locationName":"origin" + } + } + }, + "RouteList":{ + "type":"list", + "member":{ + "shape":"Route", + "locationName":"item" + } + }, + "RouteOrigin":{ + "type":"string", + "enum":[ + "CreateRouteTable", + "CreateRoute", + "EnableVgwRoutePropagation" + ] + }, + "RouteState":{ + "type":"string", + "enum":[ + "active", + "blackhole" + ] + }, + "RouteTable":{ + "type":"structure", + "members":{ + "RouteTableId":{ + "shape":"String", + "locationName":"routeTableId" + }, + "VpcId":{ + "shape":"String", + "locationName":"vpcId" + }, + "Routes":{ + "shape":"RouteList", + "locationName":"routeSet" + }, + "Associations":{ + "shape":"RouteTableAssociationList", + "locationName":"associationSet" + }, + "Tags":{ + "shape":"TagList", + "locationName":"tagSet" + }, + "PropagatingVgws":{ + "shape":"PropagatingVgwList", + "locationName":"propagatingVgwSet" + } + } + }, + "RouteTableAssociation":{ + "type":"structure", + "members":{ + "RouteTableAssociationId":{ + "shape":"String", + "locationName":"routeTableAssociationId" + }, + "RouteTableId":{ + "shape":"String", + "locationName":"routeTableId" + }, + "SubnetId":{ + "shape":"String", + "locationName":"subnetId" + }, + "Main":{ + "shape":"Boolean", + "locationName":"main" + } + } + }, + "RouteTableAssociationList":{ + "type":"list", + "member":{ + "shape":"RouteTableAssociation", + "locationName":"item" + } + }, + "RouteTableList":{ + "type":"list", + "member":{ + "shape":"RouteTable", + "locationName":"item" + } + }, + "RuleAction":{ + "type":"string", + "enum":[ + "allow", + "deny" + ] + }, + "RunInstancesMonitoringEnabled":{ + "type":"structure", + "required":["Enabled"], + "members":{ + "Enabled":{ + "shape":"Boolean", + "locationName":"enabled" + } + } + }, + "RunInstancesRequest":{ + "type":"structure", + "required":[ + "ImageId", + "MinCount", + "MaxCount" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "ImageId":{"shape":"String"}, + "MinCount":{"shape":"Integer"}, + "MaxCount":{"shape":"Integer"}, + "KeyName":{"shape":"String"}, + "SecurityGroups":{ + "shape":"SecurityGroupStringList", + "locationName":"SecurityGroup" + }, + "SecurityGroupIds":{ + "shape":"SecurityGroupIdStringList", + "locationName":"SecurityGroupId" + }, + "UserData":{"shape":"String"}, + "InstanceType":{"shape":"InstanceType"}, + "Placement":{"shape":"Placement"}, + "KernelId":{"shape":"String"}, + "RamdiskId":{"shape":"String"}, + "BlockDeviceMappings":{ + "shape":"BlockDeviceMappingRequestList", + "locationName":"BlockDeviceMapping" + }, + "Monitoring":{"shape":"RunInstancesMonitoringEnabled"}, + "SubnetId":{"shape":"String"}, + "DisableApiTermination":{ + "shape":"Boolean", + "locationName":"disableApiTermination" + }, + "InstanceInitiatedShutdownBehavior":{ + "shape":"ShutdownBehavior", + "locationName":"instanceInitiatedShutdownBehavior" + }, + "PrivateIpAddress":{ + "shape":"String", + "locationName":"privateIpAddress" + }, + "ClientToken":{ + "shape":"String", + "locationName":"clientToken" + }, + "AdditionalInfo":{ + "shape":"String", + "locationName":"additionalInfo" + }, + "NetworkInterfaces":{ + "shape":"InstanceNetworkInterfaceSpecificationList", + "locationName":"networkInterface" + }, + "IamInstanceProfile":{ + "shape":"IamInstanceProfileSpecification", + "locationName":"iamInstanceProfile" + }, + "EbsOptimized":{ + "shape":"Boolean", + "locationName":"ebsOptimized" + } + } + }, + "RunScheduledInstancesRequest":{ + "type":"structure", + "required":[ + "ScheduledInstanceId", + "LaunchSpecification" + ], + "members":{ + "DryRun":{"shape":"Boolean"}, + "ClientToken":{ + "shape":"String", + "idempotencyToken":true + }, + "InstanceCount":{"shape":"Integer"}, + "ScheduledInstanceId":{"shape":"String"}, + "LaunchSpecification":{"shape":"ScheduledInstancesLaunchSpecification"} + } + }, + "RunScheduledInstancesResult":{ + "type":"structure", + "members":{ + "InstanceIdSet":{ + "shape":"InstanceIdSet", + "locationName":"instanceIdSet" + } + } + }, + "S3Storage":{ + "type":"structure", + "members":{ + "Bucket":{ + "shape":"String", + "locationName":"bucket" + }, + "Prefix":{ + "shape":"String", + "locationName":"prefix" + }, + "AWSAccessKeyId":{"shape":"String"}, + "UploadPolicy":{ + "shape":"Blob", + "locationName":"uploadPolicy" + }, + "UploadPolicySignature":{ + "shape":"String", + "locationName":"uploadPolicySignature" + } + } + }, + "ScheduledInstance":{ + "type":"structure", + "members":{ + "ScheduledInstanceId":{ + "shape":"String", + "locationName":"scheduledInstanceId" + }, + "InstanceType":{ + "shape":"String", + "locationName":"instanceType" + }, + "Platform":{ + "shape":"String", + "locationName":"platform" + }, + "NetworkPlatform":{ + "shape":"String", + "locationName":"networkPlatform" + }, + "AvailabilityZone":{ + "shape":"String", + "locationName":"availabilityZone" + }, + "SlotDurationInHours":{ + "shape":"Integer", + "locationName":"slotDurationInHours" + }, + "Recurrence":{ + "shape":"ScheduledInstanceRecurrence", + "locationName":"recurrence" + }, + "PreviousSlotEndTime":{ + "shape":"DateTime", + "locationName":"previousSlotEndTime" + }, + "NextSlotStartTime":{ + "shape":"DateTime", + "locationName":"nextSlotStartTime" + }, + "HourlyPrice":{ + "shape":"String", + "locationName":"hourlyPrice" + }, + "TotalScheduledInstanceHours":{ + "shape":"Integer", + "locationName":"totalScheduledInstanceHours" + }, + "InstanceCount":{ + "shape":"Integer", + "locationName":"instanceCount" + }, + "TermStartDate":{ + "shape":"DateTime", + "locationName":"termStartDate" + }, + "TermEndDate":{ + "shape":"DateTime", + "locationName":"termEndDate" + }, + "CreateDate":{ + "shape":"DateTime", + "locationName":"createDate" + } + } + }, + "ScheduledInstanceAvailability":{ + "type":"structure", + "members":{ + "InstanceType":{ + "shape":"String", + "locationName":"instanceType" + }, + "Platform":{ + "shape":"String", + "locationName":"platform" + }, + "NetworkPlatform":{ + "shape":"String", + "locationName":"networkPlatform" + }, + "AvailabilityZone":{ + "shape":"String", + "locationName":"availabilityZone" + }, + "PurchaseToken":{ + "shape":"String", + "locationName":"purchaseToken" + }, + "SlotDurationInHours":{ + "shape":"Integer", + "locationName":"slotDurationInHours" + }, + "Recurrence":{ + "shape":"ScheduledInstanceRecurrence", + "locationName":"recurrence" + }, + "FirstSlotStartTime":{ + "shape":"DateTime", + "locationName":"firstSlotStartTime" + }, + "HourlyPrice":{ + "shape":"String", + "locationName":"hourlyPrice" + }, + "TotalScheduledInstanceHours":{ + "shape":"Integer", + "locationName":"totalScheduledInstanceHours" + }, + "AvailableInstanceCount":{ + "shape":"Integer", + "locationName":"availableInstanceCount" + }, + "MinTermDurationInDays":{ + "shape":"Integer", + "locationName":"minTermDurationInDays" + }, + "MaxTermDurationInDays":{ + "shape":"Integer", + "locationName":"maxTermDurationInDays" + } + } + }, + "ScheduledInstanceAvailabilitySet":{ + "type":"list", + "member":{ + "shape":"ScheduledInstanceAvailability", + "locationName":"item" + } + }, + "ScheduledInstanceIdRequestSet":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"ScheduledInstanceId" + } + }, + "ScheduledInstanceRecurrence":{ + "type":"structure", + "members":{ + "Frequency":{ + "shape":"String", + "locationName":"frequency" + }, + "Interval":{ + "shape":"Integer", + "locationName":"interval" + }, + "OccurrenceDaySet":{ + "shape":"OccurrenceDaySet", + "locationName":"occurrenceDaySet" + }, + "OccurrenceRelativeToEnd":{ + "shape":"Boolean", + "locationName":"occurrenceRelativeToEnd" + }, + "OccurrenceUnit":{ + "shape":"String", + "locationName":"occurrenceUnit" + } + } + }, + "ScheduledInstanceRecurrenceRequest":{ + "type":"structure", + "members":{ + "Frequency":{"shape":"String"}, + "Interval":{"shape":"Integer"}, + "OccurrenceDays":{ + "shape":"OccurrenceDayRequestSet", + "locationName":"OccurrenceDay" + }, + "OccurrenceRelativeToEnd":{"shape":"Boolean"}, + "OccurrenceUnit":{"shape":"String"} + } + }, + "ScheduledInstanceSet":{ + "type":"list", + "member":{ + "shape":"ScheduledInstance", + "locationName":"item" + } + }, + "ScheduledInstancesBlockDeviceMapping":{ + "type":"structure", + "members":{ + "DeviceName":{"shape":"String"}, + "NoDevice":{"shape":"String"}, + "VirtualName":{"shape":"String"}, + "Ebs":{"shape":"ScheduledInstancesEbs"} + } + }, + "ScheduledInstancesBlockDeviceMappingSet":{ + "type":"list", + "member":{ + "shape":"ScheduledInstancesBlockDeviceMapping", + "locationName":"BlockDeviceMapping" + } + }, + "ScheduledInstancesEbs":{ + "type":"structure", + "members":{ + "SnapshotId":{"shape":"String"}, + "VolumeSize":{"shape":"Integer"}, + "DeleteOnTermination":{"shape":"Boolean"}, + "VolumeType":{"shape":"String"}, + "Iops":{"shape":"Integer"}, + "Encrypted":{"shape":"Boolean"} + } + }, + "ScheduledInstancesIamInstanceProfile":{ + "type":"structure", + "members":{ + "Arn":{"shape":"String"}, + "Name":{"shape":"String"} + } + }, + "ScheduledInstancesLaunchSpecification":{ + "type":"structure", + "required":["ImageId"], + "members":{ + "ImageId":{"shape":"String"}, + "KeyName":{"shape":"String"}, + "SecurityGroupIds":{ + "shape":"ScheduledInstancesSecurityGroupIdSet", + "locationName":"SecurityGroupId" + }, + "UserData":{"shape":"String"}, + "Placement":{"shape":"ScheduledInstancesPlacement"}, + "KernelId":{"shape":"String"}, + "InstanceType":{"shape":"String"}, + "RamdiskId":{"shape":"String"}, + "BlockDeviceMappings":{ + "shape":"ScheduledInstancesBlockDeviceMappingSet", + "locationName":"BlockDeviceMapping" + }, + "Monitoring":{"shape":"ScheduledInstancesMonitoring"}, + "SubnetId":{"shape":"String"}, + "NetworkInterfaces":{ + "shape":"ScheduledInstancesNetworkInterfaceSet", + "locationName":"NetworkInterface" + }, + "IamInstanceProfile":{"shape":"ScheduledInstancesIamInstanceProfile"}, + "EbsOptimized":{"shape":"Boolean"} + } + }, + "ScheduledInstancesMonitoring":{ + "type":"structure", + "members":{ + "Enabled":{"shape":"Boolean"} + } + }, + "ScheduledInstancesNetworkInterface":{ + "type":"structure", + "members":{ + "NetworkInterfaceId":{"shape":"String"}, + "DeviceIndex":{"shape":"Integer"}, + "SubnetId":{"shape":"String"}, + "Description":{"shape":"String"}, + "PrivateIpAddress":{"shape":"String"}, + "PrivateIpAddressConfigs":{ + "shape":"PrivateIpAddressConfigSet", + "locationName":"PrivateIpAddressConfig" + }, + "SecondaryPrivateIpAddressCount":{"shape":"Integer"}, + "AssociatePublicIpAddress":{"shape":"Boolean"}, + "Groups":{ + "shape":"ScheduledInstancesSecurityGroupIdSet", + "locationName":"Group" + }, + "DeleteOnTermination":{"shape":"Boolean"} + } + }, + "ScheduledInstancesNetworkInterfaceSet":{ + "type":"list", + "member":{ + "shape":"ScheduledInstancesNetworkInterface", + "locationName":"NetworkInterface" + } + }, + "ScheduledInstancesPlacement":{ + "type":"structure", + "members":{ + "AvailabilityZone":{"shape":"String"}, + "GroupName":{"shape":"String"} + } + }, + "ScheduledInstancesPrivateIpAddressConfig":{ + "type":"structure", + "members":{ + "PrivateIpAddress":{"shape":"String"}, + "Primary":{"shape":"Boolean"} + } + }, + "ScheduledInstancesSecurityGroupIdSet":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"SecurityGroupId" + } + }, + "SecurityGroup":{ + "type":"structure", + "members":{ + "OwnerId":{ + "shape":"String", + "locationName":"ownerId" + }, + "GroupName":{ + "shape":"String", + "locationName":"groupName" + }, + "GroupId":{ + "shape":"String", + "locationName":"groupId" + }, + "Description":{ + "shape":"String", + "locationName":"groupDescription" + }, + "IpPermissions":{ + "shape":"IpPermissionList", + "locationName":"ipPermissions" + }, + "IpPermissionsEgress":{ + "shape":"IpPermissionList", + "locationName":"ipPermissionsEgress" + }, + "VpcId":{ + "shape":"String", + "locationName":"vpcId" + }, + "Tags":{ + "shape":"TagList", + "locationName":"tagSet" + } + } + }, + "SecurityGroupIdStringList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"SecurityGroupId" + } + }, + "SecurityGroupList":{ + "type":"list", + "member":{ + "shape":"SecurityGroup", + "locationName":"item" + } + }, + "SecurityGroupReference":{ + "type":"structure", + "required":[ + "GroupId", + "ReferencingVpcId" + ], + "members":{ + "GroupId":{ + "shape":"String", + "locationName":"groupId" + }, + "ReferencingVpcId":{ + "shape":"String", + "locationName":"referencingVpcId" + }, + "VpcPeeringConnectionId":{ + "shape":"String", + "locationName":"vpcPeeringConnectionId" + } + } + }, + "SecurityGroupReferences":{ + "type":"list", + "member":{ + "shape":"SecurityGroupReference", + "locationName":"item" + } + }, + "SecurityGroupStringList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"SecurityGroup" + } + }, + "ShutdownBehavior":{ + "type":"string", + "enum":[ + "stop", + "terminate" + ] + }, + "SlotDateTimeRangeRequest":{ + "type":"structure", + "required":[ + "EarliestTime", + "LatestTime" + ], + "members":{ + "EarliestTime":{"shape":"DateTime"}, + "LatestTime":{"shape":"DateTime"} + } + }, + "SlotStartTimeRangeRequest":{ + "type":"structure", + "members":{ + "EarliestTime":{"shape":"DateTime"}, + "LatestTime":{"shape":"DateTime"} + } + }, + "Snapshot":{ + "type":"structure", + "members":{ + "SnapshotId":{ + "shape":"String", + "locationName":"snapshotId" + }, + "VolumeId":{ + "shape":"String", + "locationName":"volumeId" + }, + "State":{ + "shape":"SnapshotState", + "locationName":"status" + }, + "StateMessage":{ + "shape":"String", + "locationName":"statusMessage" + }, + "StartTime":{ + "shape":"DateTime", + "locationName":"startTime" + }, + "Progress":{ + "shape":"String", + "locationName":"progress" + }, + "OwnerId":{ + "shape":"String", + "locationName":"ownerId" + }, + "Description":{ + "shape":"String", + "locationName":"description" + }, + "VolumeSize":{ + "shape":"Integer", + "locationName":"volumeSize" + }, + "OwnerAlias":{ + "shape":"String", + "locationName":"ownerAlias" + }, + "Tags":{ + "shape":"TagList", + "locationName":"tagSet" + }, + "Encrypted":{ + "shape":"Boolean", + "locationName":"encrypted" + }, + "KmsKeyId":{ + "shape":"String", + "locationName":"kmsKeyId" + }, + "DataEncryptionKeyId":{ + "shape":"String", + "locationName":"dataEncryptionKeyId" + } + } + }, + "SnapshotAttributeName":{ + "type":"string", + "enum":[ + "productCodes", + "createVolumePermission" + ] + }, + "SnapshotDetail":{ + "type":"structure", + "members":{ + "DiskImageSize":{ + "shape":"Double", + "locationName":"diskImageSize" + }, + "Description":{ + "shape":"String", + "locationName":"description" + }, + "Format":{ + "shape":"String", + "locationName":"format" + }, + "Url":{ + "shape":"String", + "locationName":"url" + }, + "UserBucket":{ + "shape":"UserBucketDetails", + "locationName":"userBucket" + }, + "DeviceName":{ + "shape":"String", + "locationName":"deviceName" + }, + "SnapshotId":{ + "shape":"String", + "locationName":"snapshotId" + }, + "Progress":{ + "shape":"String", + "locationName":"progress" + }, + "StatusMessage":{ + "shape":"String", + "locationName":"statusMessage" + }, + "Status":{ + "shape":"String", + "locationName":"status" + } + } + }, + "SnapshotDetailList":{ + "type":"list", + "member":{ + "shape":"SnapshotDetail", + "locationName":"item" + } + }, + "SnapshotDiskContainer":{ + "type":"structure", + "members":{ + "Description":{"shape":"String"}, + "Format":{"shape":"String"}, + "Url":{"shape":"String"}, + "UserBucket":{"shape":"UserBucket"} + } + }, + "SnapshotIdStringList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"SnapshotId" + } + }, + "SnapshotList":{ + "type":"list", + "member":{ + "shape":"Snapshot", + "locationName":"item" + } + }, + "SnapshotState":{ + "type":"string", + "enum":[ + "pending", + "completed", + "error" + ] + }, + "SnapshotTaskDetail":{ + "type":"structure", + "members":{ + "DiskImageSize":{ + "shape":"Double", + "locationName":"diskImageSize" + }, + "Description":{ + "shape":"String", + "locationName":"description" + }, + "Format":{ + "shape":"String", + "locationName":"format" + }, + "Url":{ + "shape":"String", + "locationName":"url" + }, + "UserBucket":{ + "shape":"UserBucketDetails", + "locationName":"userBucket" + }, + "SnapshotId":{ + "shape":"String", + "locationName":"snapshotId" + }, + "Progress":{ + "shape":"String", + "locationName":"progress" + }, + "StatusMessage":{ + "shape":"String", + "locationName":"statusMessage" + }, + "Status":{ + "shape":"String", + "locationName":"status" + } + } + }, + "SpotDatafeedSubscription":{ + "type":"structure", + "members":{ + "OwnerId":{ + "shape":"String", + "locationName":"ownerId" + }, + "Bucket":{ + "shape":"String", + "locationName":"bucket" + }, + "Prefix":{ + "shape":"String", + "locationName":"prefix" + }, + "State":{ + "shape":"DatafeedSubscriptionState", + "locationName":"state" + }, + "Fault":{ + "shape":"SpotInstanceStateFault", + "locationName":"fault" + } + } + }, + "SpotFleetLaunchSpecification":{ + "type":"structure", + "members":{ + "ImageId":{ + "shape":"String", + "locationName":"imageId" + }, + "KeyName":{ + "shape":"String", + "locationName":"keyName" + }, + "SecurityGroups":{ + "shape":"GroupIdentifierList", + "locationName":"groupSet" + }, + "UserData":{ + "shape":"String", + "locationName":"userData" + }, + "AddressingType":{ + "shape":"String", + "locationName":"addressingType" + }, + "InstanceType":{ + "shape":"InstanceType", + "locationName":"instanceType" + }, + "Placement":{ + "shape":"SpotPlacement", + "locationName":"placement" + }, + "KernelId":{ + "shape":"String", + "locationName":"kernelId" + }, + "RamdiskId":{ + "shape":"String", + "locationName":"ramdiskId" + }, + "BlockDeviceMappings":{ + "shape":"BlockDeviceMappingList", + "locationName":"blockDeviceMapping" + }, + "Monitoring":{ + "shape":"SpotFleetMonitoring", + "locationName":"monitoring" + }, + "SubnetId":{ + "shape":"String", + "locationName":"subnetId" + }, + "NetworkInterfaces":{ + "shape":"InstanceNetworkInterfaceSpecificationList", + "locationName":"networkInterfaceSet" + }, + "IamInstanceProfile":{ + "shape":"IamInstanceProfileSpecification", + "locationName":"iamInstanceProfile" + }, + "EbsOptimized":{ + "shape":"Boolean", + "locationName":"ebsOptimized" + }, + "WeightedCapacity":{ + "shape":"Double", + "locationName":"weightedCapacity" + }, + "SpotPrice":{ + "shape":"String", + "locationName":"spotPrice" + } + } + }, + "SpotFleetMonitoring":{ + "type":"structure", + "members":{ + "Enabled":{ + "shape":"Boolean", + "locationName":"enabled" + } + } + }, + "SpotFleetRequestConfig":{ + "type":"structure", + "required":[ + "SpotFleetRequestId", + "SpotFleetRequestState", + "SpotFleetRequestConfig", + "CreateTime" + ], + "members":{ + "SpotFleetRequestId":{ + "shape":"String", + "locationName":"spotFleetRequestId" + }, + "SpotFleetRequestState":{ + "shape":"BatchState", + "locationName":"spotFleetRequestState" + }, + "SpotFleetRequestConfig":{ + "shape":"SpotFleetRequestConfigData", + "locationName":"spotFleetRequestConfig" + }, + "CreateTime":{ + "shape":"DateTime", + "locationName":"createTime" + } + } + }, + "SpotFleetRequestConfigData":{ + "type":"structure", + "required":[ + "SpotPrice", + "TargetCapacity", + "IamFleetRole", + "LaunchSpecifications" + ], + "members":{ + "ClientToken":{ + "shape":"String", + "locationName":"clientToken" + }, + "SpotPrice":{ + "shape":"String", + "locationName":"spotPrice" + }, + "TargetCapacity":{ + "shape":"Integer", + "locationName":"targetCapacity" + }, + "ValidFrom":{ + "shape":"DateTime", + "locationName":"validFrom" + }, + "ValidUntil":{ + "shape":"DateTime", + "locationName":"validUntil" + }, + "TerminateInstancesWithExpiration":{ + "shape":"Boolean", + "locationName":"terminateInstancesWithExpiration" + }, + "IamFleetRole":{ + "shape":"String", + "locationName":"iamFleetRole" + }, + "LaunchSpecifications":{ + "shape":"LaunchSpecsList", + "locationName":"launchSpecifications" + }, + "ExcessCapacityTerminationPolicy":{ + "shape":"ExcessCapacityTerminationPolicy", + "locationName":"excessCapacityTerminationPolicy" + }, + "AllocationStrategy":{ + "shape":"AllocationStrategy", + "locationName":"allocationStrategy" + }, + "FulfilledCapacity":{ + "shape":"Double", + "locationName":"fulfilledCapacity" + }, + "Type":{ + "shape":"FleetType", + "locationName":"type" + } + } + }, + "SpotFleetRequestConfigSet":{ + "type":"list", + "member":{ + "shape":"SpotFleetRequestConfig", + "locationName":"item" + } + }, + "SpotInstanceRequest":{ + "type":"structure", + "members":{ + "SpotInstanceRequestId":{ + "shape":"String", + "locationName":"spotInstanceRequestId" + }, + "SpotPrice":{ + "shape":"String", + "locationName":"spotPrice" + }, + "Type":{ + "shape":"SpotInstanceType", + "locationName":"type" + }, + "State":{ + "shape":"SpotInstanceState", + "locationName":"state" + }, + "Fault":{ + "shape":"SpotInstanceStateFault", + "locationName":"fault" + }, + "Status":{ + "shape":"SpotInstanceStatus", + "locationName":"status" + }, + "ValidFrom":{ + "shape":"DateTime", + "locationName":"validFrom" + }, + "ValidUntil":{ + "shape":"DateTime", + "locationName":"validUntil" + }, + "LaunchGroup":{ + "shape":"String", + "locationName":"launchGroup" + }, + "AvailabilityZoneGroup":{ + "shape":"String", + "locationName":"availabilityZoneGroup" + }, + "LaunchSpecification":{ + "shape":"LaunchSpecification", + "locationName":"launchSpecification" + }, + "InstanceId":{ + "shape":"String", + "locationName":"instanceId" + }, + "CreateTime":{ + "shape":"DateTime", + "locationName":"createTime" + }, + "ProductDescription":{ + "shape":"RIProductDescription", + "locationName":"productDescription" + }, + "BlockDurationMinutes":{ + "shape":"Integer", + "locationName":"blockDurationMinutes" + }, + "ActualBlockHourlyPrice":{ + "shape":"String", + "locationName":"actualBlockHourlyPrice" + }, + "Tags":{ + "shape":"TagList", + "locationName":"tagSet" + }, + "LaunchedAvailabilityZone":{ + "shape":"String", + "locationName":"launchedAvailabilityZone" + } + } + }, + "SpotInstanceRequestIdList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"SpotInstanceRequestId" + } + }, + "SpotInstanceRequestList":{ + "type":"list", + "member":{ + "shape":"SpotInstanceRequest", + "locationName":"item" + } + }, + "SpotInstanceState":{ + "type":"string", + "enum":[ + "open", + "active", + "closed", + "cancelled", + "failed" + ] + }, + "SpotInstanceStateFault":{ + "type":"structure", + "members":{ + "Code":{ + "shape":"String", + "locationName":"code" + }, + "Message":{ + "shape":"String", + "locationName":"message" + } + } + }, + "SpotInstanceStatus":{ + "type":"structure", + "members":{ + "Code":{ + "shape":"String", + "locationName":"code" + }, + "UpdateTime":{ + "shape":"DateTime", + "locationName":"updateTime" + }, + "Message":{ + "shape":"String", + "locationName":"message" + } + } + }, + "SpotInstanceType":{ + "type":"string", + "enum":[ + "one-time", + "persistent" + ] + }, + "SpotPlacement":{ + "type":"structure", + "members":{ + "AvailabilityZone":{ + "shape":"String", + "locationName":"availabilityZone" + }, + "GroupName":{ + "shape":"String", + "locationName":"groupName" + } + } + }, + "SpotPrice":{ + "type":"structure", + "members":{ + "InstanceType":{ + "shape":"InstanceType", + "locationName":"instanceType" + }, + "ProductDescription":{ + "shape":"RIProductDescription", + "locationName":"productDescription" + }, + "SpotPrice":{ + "shape":"String", + "locationName":"spotPrice" + }, + "Timestamp":{ + "shape":"DateTime", + "locationName":"timestamp" + }, + "AvailabilityZone":{ + "shape":"String", + "locationName":"availabilityZone" + } + } + }, + "SpotPriceHistoryList":{ + "type":"list", + "member":{ + "shape":"SpotPrice", + "locationName":"item" + } + }, + "StaleIpPermission":{ + "type":"structure", + "members":{ + "FromPort":{ + "shape":"Integer", + "locationName":"fromPort" + }, + "IpProtocol":{ + "shape":"String", + "locationName":"ipProtocol" + }, + "IpRanges":{ + "shape":"IpRanges", + "locationName":"ipRanges" + }, + "PrefixListIds":{ + "shape":"PrefixListIdSet", + "locationName":"prefixListIds" + }, + "ToPort":{ + "shape":"Integer", + "locationName":"toPort" + }, + "UserIdGroupPairs":{ + "shape":"UserIdGroupPairSet", + "locationName":"groups" + } + } + }, + "StaleIpPermissionSet":{ + "type":"list", + "member":{ + "shape":"StaleIpPermission", + "locationName":"item" + } + }, + "StaleSecurityGroup":{ + "type":"structure", + "required":["GroupId"], + "members":{ + "GroupId":{ + "shape":"String", + "locationName":"groupId" + }, + "GroupName":{ + "shape":"String", + "locationName":"groupName" + }, + "Description":{ + "shape":"String", + "locationName":"description" + }, + "VpcId":{ + "shape":"String", + "locationName":"vpcId" + }, + "StaleIpPermissions":{ + "shape":"StaleIpPermissionSet", + "locationName":"staleIpPermissions" + }, + "StaleIpPermissionsEgress":{ + "shape":"StaleIpPermissionSet", + "locationName":"staleIpPermissionsEgress" + } + } + }, + "StaleSecurityGroupSet":{ + "type":"list", + "member":{ + "shape":"StaleSecurityGroup", + "locationName":"item" + } + }, + "StartInstancesRequest":{ + "type":"structure", + "required":["InstanceIds"], + "members":{ + "InstanceIds":{ + "shape":"InstanceIdStringList", + "locationName":"InstanceId" + }, + "AdditionalInfo":{ + "shape":"String", + "locationName":"additionalInfo" + }, + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + } + } + }, + "StartInstancesResult":{ + "type":"structure", + "members":{ + "StartingInstances":{ + "shape":"InstanceStateChangeList", + "locationName":"instancesSet" + } + } + }, + "State":{ + "type":"string", + "enum":[ + "Pending", + "Available", + "Deleting", + "Deleted" + ] + }, + "StateReason":{ + "type":"structure", + "members":{ + "Code":{ + "shape":"String", + "locationName":"code" + }, + "Message":{ + "shape":"String", + "locationName":"message" + } + } + }, + "Status":{ + "type":"string", + "enum":[ + "MoveInProgress", + "InVpc", + "InClassic" + ] + }, + "StatusName":{ + "type":"string", + "enum":["reachability"] + }, + "StatusType":{ + "type":"string", + "enum":[ + "passed", + "failed", + "insufficient-data", + "initializing" + ] + }, + "StopInstancesRequest":{ + "type":"structure", + "required":["InstanceIds"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "InstanceIds":{ + "shape":"InstanceIdStringList", + "locationName":"InstanceId" + }, + "Force":{ + "shape":"Boolean", + "locationName":"force" + } + } + }, + "StopInstancesResult":{ + "type":"structure", + "members":{ + "StoppingInstances":{ + "shape":"InstanceStateChangeList", + "locationName":"instancesSet" + } + } + }, + "Storage":{ + "type":"structure", + "members":{ + "S3":{"shape":"S3Storage"} + } + }, + "String":{"type":"string"}, + "Subnet":{ + "type":"structure", + "members":{ + "SubnetId":{ + "shape":"String", + "locationName":"subnetId" + }, + "State":{ + "shape":"SubnetState", + "locationName":"state" + }, + "VpcId":{ + "shape":"String", + "locationName":"vpcId" + }, + "CidrBlock":{ + "shape":"String", + "locationName":"cidrBlock" + }, + "AvailableIpAddressCount":{ + "shape":"Integer", + "locationName":"availableIpAddressCount" + }, + "AvailabilityZone":{ + "shape":"String", + "locationName":"availabilityZone" + }, + "DefaultForAz":{ + "shape":"Boolean", + "locationName":"defaultForAz" + }, + "MapPublicIpOnLaunch":{ + "shape":"Boolean", + "locationName":"mapPublicIpOnLaunch" + }, + "Tags":{ + "shape":"TagList", + "locationName":"tagSet" + } + } + }, + "SubnetIdStringList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"SubnetId" + } + }, + "SubnetList":{ + "type":"list", + "member":{ + "shape":"Subnet", + "locationName":"item" + } + }, + "SubnetState":{ + "type":"string", + "enum":[ + "pending", + "available" + ] + }, + "SummaryStatus":{ + "type":"string", + "enum":[ + "ok", + "impaired", + "insufficient-data", + "not-applicable", + "initializing" + ] + }, + "Tag":{ + "type":"structure", + "members":{ + "Key":{ + "shape":"String", + "locationName":"key" + }, + "Value":{ + "shape":"String", + "locationName":"value" + } + } + }, + "TagDescription":{ + "type":"structure", + "members":{ + "ResourceId":{ + "shape":"String", + "locationName":"resourceId" + }, + "ResourceType":{ + "shape":"ResourceType", + "locationName":"resourceType" + }, + "Key":{ + "shape":"String", + "locationName":"key" + }, + "Value":{ + "shape":"String", + "locationName":"value" + } + } + }, + "TagDescriptionList":{ + "type":"list", + "member":{ + "shape":"TagDescription", + "locationName":"item" + } + }, + "TagList":{ + "type":"list", + "member":{ + "shape":"Tag", + "locationName":"item" + } + }, + "TelemetryStatus":{ + "type":"string", + "enum":[ + "UP", + "DOWN" + ] + }, + "Tenancy":{ + "type":"string", + "enum":[ + "default", + "dedicated", + "host" + ] + }, + "TerminateInstancesRequest":{ + "type":"structure", + "required":["InstanceIds"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "InstanceIds":{ + "shape":"InstanceIdStringList", + "locationName":"InstanceId" + } + } + }, + "TerminateInstancesResult":{ + "type":"structure", + "members":{ + "TerminatingInstances":{ + "shape":"InstanceStateChangeList", + "locationName":"instancesSet" + } + } + }, + "TrafficType":{ + "type":"string", + "enum":[ + "ACCEPT", + "REJECT", + "ALL" + ] + }, + "UnassignPrivateIpAddressesRequest":{ + "type":"structure", + "required":[ + "NetworkInterfaceId", + "PrivateIpAddresses" + ], + "members":{ + "NetworkInterfaceId":{ + "shape":"String", + "locationName":"networkInterfaceId" + }, + "PrivateIpAddresses":{ + "shape":"PrivateIpAddressStringList", + "locationName":"privateIpAddress" + } + } + }, + "UnmonitorInstancesRequest":{ + "type":"structure", + "required":["InstanceIds"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "InstanceIds":{ + "shape":"InstanceIdStringList", + "locationName":"InstanceId" + } + } + }, + "UnmonitorInstancesResult":{ + "type":"structure", + "members":{ + "InstanceMonitorings":{ + "shape":"InstanceMonitoringList", + "locationName":"instancesSet" + } + } + }, + "UnsuccessfulItem":{ + "type":"structure", + "required":["Error"], + "members":{ + "ResourceId":{ + "shape":"String", + "locationName":"resourceId" + }, + "Error":{ + "shape":"UnsuccessfulItemError", + "locationName":"error" + } + } + }, + "UnsuccessfulItemError":{ + "type":"structure", + "required":[ + "Code", + "Message" + ], + "members":{ + "Code":{ + "shape":"String", + "locationName":"code" + }, + "Message":{ + "shape":"String", + "locationName":"message" + } + } + }, + "UnsuccessfulItemList":{ + "type":"list", + "member":{ + "shape":"UnsuccessfulItem", + "locationName":"item" + } + }, + "UnsuccessfulItemSet":{ + "type":"list", + "member":{ + "shape":"UnsuccessfulItem", + "locationName":"item" + } + }, + "UserBucket":{ + "type":"structure", + "members":{ + "S3Bucket":{"shape":"String"}, + "S3Key":{"shape":"String"} + } + }, + "UserBucketDetails":{ + "type":"structure", + "members":{ + "S3Bucket":{ + "shape":"String", + "locationName":"s3Bucket" + }, + "S3Key":{ + "shape":"String", + "locationName":"s3Key" + } + } + }, + "UserData":{ + "type":"structure", + "members":{ + "Data":{ + "shape":"String", + "locationName":"data" + } + } + }, + "UserGroupStringList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"UserGroup" + } + }, + "UserIdGroupPair":{ + "type":"structure", + "members":{ + "UserId":{ + "shape":"String", + "locationName":"userId" + }, + "GroupName":{ + "shape":"String", + "locationName":"groupName" + }, + "GroupId":{ + "shape":"String", + "locationName":"groupId" + }, + "VpcId":{ + "shape":"String", + "locationName":"vpcId" + }, + "VpcPeeringConnectionId":{ + "shape":"String", + "locationName":"vpcPeeringConnectionId" + }, + "PeeringStatus":{ + "shape":"String", + "locationName":"peeringStatus" + } + } + }, + "UserIdGroupPairList":{ + "type":"list", + "member":{ + "shape":"UserIdGroupPair", + "locationName":"item" + } + }, + "UserIdGroupPairSet":{ + "type":"list", + "member":{ + "shape":"UserIdGroupPair", + "locationName":"item" + } + }, + "UserIdStringList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"UserId" + } + }, + "ValueStringList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"item" + } + }, + "VgwTelemetry":{ + "type":"structure", + "members":{ + "OutsideIpAddress":{ + "shape":"String", + "locationName":"outsideIpAddress" + }, + "Status":{ + "shape":"TelemetryStatus", + "locationName":"status" + }, + "LastStatusChange":{ + "shape":"DateTime", + "locationName":"lastStatusChange" + }, + "StatusMessage":{ + "shape":"String", + "locationName":"statusMessage" + }, + "AcceptedRouteCount":{ + "shape":"Integer", + "locationName":"acceptedRouteCount" + } + } + }, + "VgwTelemetryList":{ + "type":"list", + "member":{ + "shape":"VgwTelemetry", + "locationName":"item" + } + }, + "VirtualizationType":{ + "type":"string", + "enum":[ + "hvm", + "paravirtual" + ] + }, + "Volume":{ + "type":"structure", + "members":{ + "VolumeId":{ + "shape":"String", + "locationName":"volumeId" + }, + "Size":{ + "shape":"Integer", + "locationName":"size" + }, + "SnapshotId":{ + "shape":"String", + "locationName":"snapshotId" + }, + "AvailabilityZone":{ + "shape":"String", + "locationName":"availabilityZone" + }, + "State":{ + "shape":"VolumeState", + "locationName":"status" + }, + "CreateTime":{ + "shape":"DateTime", + "locationName":"createTime" + }, + "Attachments":{ + "shape":"VolumeAttachmentList", + "locationName":"attachmentSet" + }, + "Tags":{ + "shape":"TagList", + "locationName":"tagSet" + }, + "VolumeType":{ + "shape":"VolumeType", + "locationName":"volumeType" + }, + "Iops":{ + "shape":"Integer", + "locationName":"iops" + }, + "Encrypted":{ + "shape":"Boolean", + "locationName":"encrypted" + }, + "KmsKeyId":{ + "shape":"String", + "locationName":"kmsKeyId" + } + } + }, + "VolumeAttachment":{ + "type":"structure", + "members":{ + "VolumeId":{ + "shape":"String", + "locationName":"volumeId" + }, + "InstanceId":{ + "shape":"String", + "locationName":"instanceId" + }, + "Device":{ + "shape":"String", + "locationName":"device" + }, + "State":{ + "shape":"VolumeAttachmentState", + "locationName":"status" + }, + "AttachTime":{ + "shape":"DateTime", + "locationName":"attachTime" + }, + "DeleteOnTermination":{ + "shape":"Boolean", + "locationName":"deleteOnTermination" + } + } + }, + "VolumeAttachmentList":{ + "type":"list", + "member":{ + "shape":"VolumeAttachment", + "locationName":"item" + } + }, + "VolumeAttachmentState":{ + "type":"string", + "enum":[ + "attaching", + "attached", + "detaching", + "detached" + ] + }, + "VolumeAttributeName":{ + "type":"string", + "enum":[ + "autoEnableIO", + "productCodes" + ] + }, + "VolumeDetail":{ + "type":"structure", + "required":["Size"], + "members":{ + "Size":{ + "shape":"Long", + "locationName":"size" + } + } + }, + "VolumeIdStringList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"VolumeId" + } + }, + "VolumeList":{ + "type":"list", + "member":{ + "shape":"Volume", + "locationName":"item" + } + }, + "VolumeState":{ + "type":"string", + "enum":[ + "creating", + "available", + "in-use", + "deleting", + "deleted", + "error" + ] + }, + "VolumeStatusAction":{ + "type":"structure", + "members":{ + "Code":{ + "shape":"String", + "locationName":"code" + }, + "Description":{ + "shape":"String", + "locationName":"description" + }, + "EventType":{ + "shape":"String", + "locationName":"eventType" + }, + "EventId":{ + "shape":"String", + "locationName":"eventId" + } + } + }, + "VolumeStatusActionsList":{ + "type":"list", + "member":{ + "shape":"VolumeStatusAction", + "locationName":"item" + } + }, + "VolumeStatusDetails":{ + "type":"structure", + "members":{ + "Name":{ + "shape":"VolumeStatusName", + "locationName":"name" + }, + "Status":{ + "shape":"String", + "locationName":"status" + } + } + }, + "VolumeStatusDetailsList":{ + "type":"list", + "member":{ + "shape":"VolumeStatusDetails", + "locationName":"item" + } + }, + "VolumeStatusEvent":{ + "type":"structure", + "members":{ + "EventType":{ + "shape":"String", + "locationName":"eventType" + }, + "Description":{ + "shape":"String", + "locationName":"description" + }, + "NotBefore":{ + "shape":"DateTime", + "locationName":"notBefore" + }, + "NotAfter":{ + "shape":"DateTime", + "locationName":"notAfter" + }, + "EventId":{ + "shape":"String", + "locationName":"eventId" + } + } + }, + "VolumeStatusEventsList":{ + "type":"list", + "member":{ + "shape":"VolumeStatusEvent", + "locationName":"item" + } + }, + "VolumeStatusInfo":{ + "type":"structure", + "members":{ + "Status":{ + "shape":"VolumeStatusInfoStatus", + "locationName":"status" + }, + "Details":{ + "shape":"VolumeStatusDetailsList", + "locationName":"details" + } + } + }, + "VolumeStatusInfoStatus":{ + "type":"string", + "enum":[ + "ok", + "impaired", + "insufficient-data" + ] + }, + "VolumeStatusItem":{ + "type":"structure", + "members":{ + "VolumeId":{ + "shape":"String", + "locationName":"volumeId" + }, + "AvailabilityZone":{ + "shape":"String", + "locationName":"availabilityZone" + }, + "VolumeStatus":{ + "shape":"VolumeStatusInfo", + "locationName":"volumeStatus" + }, + "Events":{ + "shape":"VolumeStatusEventsList", + "locationName":"eventsSet" + }, + "Actions":{ + "shape":"VolumeStatusActionsList", + "locationName":"actionsSet" + } + } + }, + "VolumeStatusList":{ + "type":"list", + "member":{ + "shape":"VolumeStatusItem", + "locationName":"item" + } + }, + "VolumeStatusName":{ + "type":"string", + "enum":[ + "io-enabled", + "io-performance" + ] + }, + "VolumeType":{ + "type":"string", + "enum":[ + "standard", + "io1", + "gp2", + "sc1", + "st1" + ] + }, + "Vpc":{ + "type":"structure", + "members":{ + "VpcId":{ + "shape":"String", + "locationName":"vpcId" + }, + "State":{ + "shape":"VpcState", + "locationName":"state" + }, + "CidrBlock":{ + "shape":"String", + "locationName":"cidrBlock" + }, + "DhcpOptionsId":{ + "shape":"String", + "locationName":"dhcpOptionsId" + }, + "Tags":{ + "shape":"TagList", + "locationName":"tagSet" + }, + "InstanceTenancy":{ + "shape":"Tenancy", + "locationName":"instanceTenancy" + }, + "IsDefault":{ + "shape":"Boolean", + "locationName":"isDefault" + } + } + }, + "VpcAttachment":{ + "type":"structure", + "members":{ + "VpcId":{ + "shape":"String", + "locationName":"vpcId" + }, + "State":{ + "shape":"AttachmentStatus", + "locationName":"state" + } + } + }, + "VpcAttachmentList":{ + "type":"list", + "member":{ + "shape":"VpcAttachment", + "locationName":"item" + } + }, + "VpcAttributeName":{ + "type":"string", + "enum":[ + "enableDnsSupport", + "enableDnsHostnames" + ] + }, + "VpcClassicLink":{ + "type":"structure", + "members":{ + "VpcId":{ + "shape":"String", + "locationName":"vpcId" + }, + "ClassicLinkEnabled":{ + "shape":"Boolean", + "locationName":"classicLinkEnabled" + }, + "Tags":{ + "shape":"TagList", + "locationName":"tagSet" + } + } + }, + "VpcClassicLinkIdList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"VpcId" + } + }, + "VpcClassicLinkList":{ + "type":"list", + "member":{ + "shape":"VpcClassicLink", + "locationName":"item" + } + }, + "VpcEndpoint":{ + "type":"structure", + "members":{ + "VpcEndpointId":{ + "shape":"String", + "locationName":"vpcEndpointId" + }, + "VpcId":{ + "shape":"String", + "locationName":"vpcId" + }, + "ServiceName":{ + "shape":"String", + "locationName":"serviceName" + }, + "State":{ + "shape":"State", + "locationName":"state" + }, + "PolicyDocument":{ + "shape":"String", + "locationName":"policyDocument" + }, + "RouteTableIds":{ + "shape":"ValueStringList", + "locationName":"routeTableIdSet" + }, + "CreationTimestamp":{ + "shape":"DateTime", + "locationName":"creationTimestamp" + } + } + }, + "VpcEndpointSet":{ + "type":"list", + "member":{ + "shape":"VpcEndpoint", + "locationName":"item" + } + }, + "VpcIdStringList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"VpcId" + } + }, + "VpcList":{ + "type":"list", + "member":{ + "shape":"Vpc", + "locationName":"item" + } + }, + "VpcPeeringConnection":{ + "type":"structure", + "members":{ + "AccepterVpcInfo":{ + "shape":"VpcPeeringConnectionVpcInfo", + "locationName":"accepterVpcInfo" + }, + "ExpirationTime":{ + "shape":"DateTime", + "locationName":"expirationTime" + }, + "RequesterVpcInfo":{ + "shape":"VpcPeeringConnectionVpcInfo", + "locationName":"requesterVpcInfo" + }, + "Status":{ + "shape":"VpcPeeringConnectionStateReason", + "locationName":"status" + }, + "Tags":{ + "shape":"TagList", + "locationName":"tagSet" + }, + "VpcPeeringConnectionId":{ + "shape":"String", + "locationName":"vpcPeeringConnectionId" + } + } + }, + "VpcPeeringConnectionList":{ + "type":"list", + "member":{ + "shape":"VpcPeeringConnection", + "locationName":"item" + } + }, + "VpcPeeringConnectionOptionsDescription":{ + "type":"structure", + "members":{ + "AllowEgressFromLocalClassicLinkToRemoteVpc":{ + "shape":"Boolean", + "locationName":"allowEgressFromLocalClassicLinkToRemoteVpc" + }, + "AllowEgressFromLocalVpcToRemoteClassicLink":{ + "shape":"Boolean", + "locationName":"allowEgressFromLocalVpcToRemoteClassicLink" + } + } + }, + "VpcPeeringConnectionStateReason":{ + "type":"structure", + "members":{ + "Code":{ + "shape":"VpcPeeringConnectionStateReasonCode", + "locationName":"code" + }, + "Message":{ + "shape":"String", + "locationName":"message" + } + } + }, + "VpcPeeringConnectionStateReasonCode":{ + "type":"string", + "enum":[ + "initiating-request", + "pending-acceptance", + "active", + "deleted", + "rejected", + "failed", + "expired", + "provisioning", + "deleting" + ] + }, + "VpcPeeringConnectionVpcInfo":{ + "type":"structure", + "members":{ + "CidrBlock":{ + "shape":"String", + "locationName":"cidrBlock" + }, + "OwnerId":{ + "shape":"String", + "locationName":"ownerId" + }, + "VpcId":{ + "shape":"String", + "locationName":"vpcId" + }, + "PeeringOptions":{ + "shape":"VpcPeeringConnectionOptionsDescription", + "locationName":"peeringOptions" + } + } + }, + "VpcState":{ + "type":"string", + "enum":[ + "pending", + "available" + ] + }, + "VpnConnection":{ + "type":"structure", + "members":{ + "VpnConnectionId":{ + "shape":"String", + "locationName":"vpnConnectionId" + }, + "State":{ + "shape":"VpnState", + "locationName":"state" + }, + "CustomerGatewayConfiguration":{ + "shape":"String", + "locationName":"customerGatewayConfiguration" + }, + "Type":{ + "shape":"GatewayType", + "locationName":"type" + }, + "CustomerGatewayId":{ + "shape":"String", + "locationName":"customerGatewayId" + }, + "VpnGatewayId":{ + "shape":"String", + "locationName":"vpnGatewayId" + }, + "Tags":{ + "shape":"TagList", + "locationName":"tagSet" + }, + "VgwTelemetry":{ + "shape":"VgwTelemetryList", + "locationName":"vgwTelemetry" + }, + "Options":{ + "shape":"VpnConnectionOptions", + "locationName":"options" + }, + "Routes":{ + "shape":"VpnStaticRouteList", + "locationName":"routes" + } + } + }, + "VpnConnectionIdStringList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"VpnConnectionId" + } + }, + "VpnConnectionList":{ + "type":"list", + "member":{ + "shape":"VpnConnection", + "locationName":"item" + } + }, + "VpnConnectionOptions":{ + "type":"structure", + "members":{ + "StaticRoutesOnly":{ + "shape":"Boolean", + "locationName":"staticRoutesOnly" + } + } + }, + "VpnConnectionOptionsSpecification":{ + "type":"structure", + "members":{ + "StaticRoutesOnly":{ + "shape":"Boolean", + "locationName":"staticRoutesOnly" + } + } + }, + "VpnGateway":{ + "type":"structure", + "members":{ + "VpnGatewayId":{ + "shape":"String", + "locationName":"vpnGatewayId" + }, + "State":{ + "shape":"VpnState", + "locationName":"state" + }, + "Type":{ + "shape":"GatewayType", + "locationName":"type" + }, + "AvailabilityZone":{ + "shape":"String", + "locationName":"availabilityZone" + }, + "VpcAttachments":{ + "shape":"VpcAttachmentList", + "locationName":"attachments" + }, + "Tags":{ + "shape":"TagList", + "locationName":"tagSet" + } + } + }, + "VpnGatewayIdStringList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"VpnGatewayId" + } + }, + "VpnGatewayList":{ + "type":"list", + "member":{ + "shape":"VpnGateway", + "locationName":"item" + } + }, + "VpnState":{ + "type":"string", + "enum":[ + "pending", + "available", + "deleting", + "deleted" + ] + }, + "VpnStaticRoute":{ + "type":"structure", + "members":{ + "DestinationCidrBlock":{ + "shape":"String", + "locationName":"destinationCidrBlock" + }, + "Source":{ + "shape":"VpnStaticRouteSource", + "locationName":"source" + }, + "State":{ + "shape":"VpnState", + "locationName":"state" + } + } + }, + "VpnStaticRouteList":{ + "type":"list", + "member":{ + "shape":"VpnStaticRoute", + "locationName":"item" + } + }, + "VpnStaticRouteSource":{ + "type":"string", + "enum":["Static"] + }, + "ZoneNameStringList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"ZoneName" + } + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/ec2/2016-04-01/docs-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/ec2/2016-04-01/docs-2.json new file mode 100644 index 000000000..5f732e89a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/ec2/2016-04-01/docs-2.json @@ -0,0 +1,6410 @@ +{ + "version": "2.0", + "service": "Amazon Elastic Compute Cloud

    Amazon Elastic Compute Cloud (Amazon EC2) provides resizable computing capacity in the Amazon Web Services (AWS) cloud. Using Amazon EC2 eliminates your need to invest in hardware up front, so you can develop and deploy applications faster.

    ", + "operations": { + "AcceptVpcPeeringConnection": "

    Accept a VPC peering connection request. To accept a request, the VPC peering connection must be in the pending-acceptance state, and you must be the owner of the peer VPC. Use the DescribeVpcPeeringConnections request to view your outstanding VPC peering connection requests.

    ", + "AllocateAddress": "

    Acquires an Elastic IP address.

    An Elastic IP address is for use either in the EC2-Classic platform or in a VPC. For more information, see Elastic IP Addresses in the Amazon Elastic Compute Cloud User Guide.

    ", + "AllocateHosts": "

    Allocates a Dedicated host to your account. At minimum you need to specify the instance size type, Availability Zone, and quantity of hosts you want to allocate.

    ", + "AssignPrivateIpAddresses": "

    Assigns one or more secondary private IP addresses to the specified network interface. You can specify one or more specific secondary IP addresses, or you can specify the number of secondary IP addresses to be automatically assigned within the subnet's CIDR block range. The number of secondary IP addresses that you can assign to an instance varies by instance type. For information about instance types, see Instance Types in the Amazon Elastic Compute Cloud User Guide. For more information about Elastic IP addresses, see Elastic IP Addresses in the Amazon Elastic Compute Cloud User Guide.

    AssignPrivateIpAddresses is available only in EC2-VPC.

    ", + "AssociateAddress": "

    Associates an Elastic IP address with an instance or a network interface.

    An Elastic IP address is for use in either the EC2-Classic platform or in a VPC. For more information, see Elastic IP Addresses in the Amazon Elastic Compute Cloud User Guide.

    [EC2-Classic, VPC in an EC2-VPC-only account] If the Elastic IP address is already associated with a different instance, it is disassociated from that instance and associated with the specified instance.

    [VPC in an EC2-Classic account] If you don't specify a private IP address, the Elastic IP address is associated with the primary IP address. If the Elastic IP address is already associated with a different instance or a network interface, you get an error unless you allow reassociation.

    This is an idempotent operation. If you perform the operation more than once, Amazon EC2 doesn't return an error.

    ", + "AssociateDhcpOptions": "

    Associates a set of DHCP options (that you've previously created) with the specified VPC, or associates no DHCP options with the VPC.

    After you associate the options with the VPC, any existing instances and all new instances that you launch in that VPC use the options. You don't need to restart or relaunch the instances. They automatically pick up the changes within a few hours, depending on how frequently the instance renews its DHCP lease. You can explicitly renew the lease using the operating system on the instance.

    For more information, see DHCP Options Sets in the Amazon Virtual Private Cloud User Guide.

    ", + "AssociateRouteTable": "

    Associates a subnet with a route table. The subnet and route table must be in the same VPC. This association causes traffic originating from the subnet to be routed according to the routes in the route table. The action returns an association ID, which you need in order to disassociate the route table from the subnet later. A route table can be associated with multiple subnets.

    For more information about route tables, see Route Tables in the Amazon Virtual Private Cloud User Guide.

    ", + "AttachClassicLinkVpc": "

    Links an EC2-Classic instance to a ClassicLink-enabled VPC through one or more of the VPC's security groups. You cannot link an EC2-Classic instance to more than one VPC at a time. You can only link an instance that's in the running state. An instance is automatically unlinked from a VPC when it's stopped - you can link it to the VPC again when you restart it.

    After you've linked an instance, you cannot change the VPC security groups that are associated with it. To change the security groups, you must first unlink the instance, and then link it again.

    Linking your instance to a VPC is sometimes referred to as attaching your instance.

    ", + "AttachInternetGateway": "

    Attaches an Internet gateway to a VPC, enabling connectivity between the Internet and the VPC. For more information about your VPC and Internet gateway, see the Amazon Virtual Private Cloud User Guide.

    ", + "AttachNetworkInterface": "

    Attaches a network interface to an instance.

    ", + "AttachVolume": "

    Attaches an EBS volume to a running or stopped instance and exposes it to the instance with the specified device name.

    Encrypted EBS volumes may only be attached to instances that support Amazon EBS encryption. For more information, see Amazon EBS Encryption in the Amazon Elastic Compute Cloud User Guide.

    For a list of supported device names, see Attaching an EBS Volume to an Instance. Any device names that aren't reserved for instance store volumes can be used for EBS volumes. For more information, see Amazon EC2 Instance Store in the Amazon Elastic Compute Cloud User Guide.

    If a volume has an AWS Marketplace product code:

    • The volume can be attached only to a stopped instance.

    • AWS Marketplace product codes are copied from the volume to the instance.

    • You must be subscribed to the product.

    • The instance type and operating system of the instance must support the product. For example, you can't detach a volume from a Windows instance and attach it to a Linux instance.

    For an overview of the AWS Marketplace, see Introducing AWS Marketplace.

    For more information about EBS volumes, see Attaching Amazon EBS Volumes in the Amazon Elastic Compute Cloud User Guide.

    ", + "AttachVpnGateway": "

    Attaches a virtual private gateway to a VPC. For more information, see Adding a Hardware Virtual Private Gateway to Your VPC in the Amazon Virtual Private Cloud User Guide.

    ", + "AuthorizeSecurityGroupEgress": "

    [EC2-VPC only] Adds one or more egress rules to a security group for use with a VPC. Specifically, this action permits instances to send traffic to one or more destination CIDR IP address ranges, or to one or more destination security groups for the same VPC. This action doesn't apply to security groups for use in EC2-Classic. For more information, see Security Groups for Your VPC in the Amazon Virtual Private Cloud User Guide.

    You can have up to 50 rules per security group (covering both ingress and egress rules).

    Each rule consists of the protocol (for example, TCP), plus either a CIDR range or a source group. For the TCP and UDP protocols, you must also specify the destination port or port range. For the ICMP protocol, you must also specify the ICMP type and code. You can use -1 for the type or code to mean all types or all codes.

    Rule changes are propagated to affected instances as quickly as possible. However, a small delay might occur.

    ", + "AuthorizeSecurityGroupIngress": "

    Adds one or more ingress rules to a security group.

    EC2-Classic: You can have up to 100 rules per group.

    EC2-VPC: You can have up to 50 rules per group (covering both ingress and egress rules).

    Rule changes are propagated to instances within the security group as quickly as possible. However, a small delay might occur.

    [EC2-Classic] This action gives one or more CIDR IP address ranges permission to access a security group in your account, or gives one or more security groups (called the source groups) permission to access a security group for your account. A source group can be for your own AWS account, or another.

    [EC2-VPC] This action gives one or more CIDR IP address ranges permission to access a security group in your VPC, or gives one or more other security groups (called the source groups) permission to access a security group for your VPC. The security groups must all be for the same VPC.

    ", + "BundleInstance": "

    Bundles an Amazon instance store-backed Windows instance.

    During bundling, only the root device volume (C:\\) is bundled. Data on other instance store volumes is not preserved.

    This action is not applicable for Linux/Unix instances or Windows instances that are backed by Amazon EBS.

    For more information, see Creating an Instance Store-Backed Windows AMI.

    ", + "CancelBundleTask": "

    Cancels a bundling operation for an instance store-backed Windows instance.

    ", + "CancelConversionTask": "

    Cancels an active conversion task. The task can be the import of an instance or volume. The action removes all artifacts of the conversion, including a partially uploaded volume or instance. If the conversion is complete or is in the process of transferring the final disk image, the command fails and returns an exception.

    For more information, see Using the Command Line Tools to Import Your Virtual Machine to Amazon EC2 in the Amazon Elastic Compute Cloud User Guide.

    ", + "CancelExportTask": "

    Cancels an active export task. The request removes all artifacts of the export, including any partially-created Amazon S3 objects. If the export task is complete or is in the process of transferring the final disk image, the command fails and returns an error.

    ", + "CancelImportTask": "

    Cancels an in-process import virtual machine or import snapshot task.

    ", + "CancelReservedInstancesListing": "

    Cancels the specified Reserved Instance listing in the Reserved Instance Marketplace.

    For more information, see Reserved Instance Marketplace in the Amazon Elastic Compute Cloud User Guide.

    ", + "CancelSpotFleetRequests": "

    Cancels the specified Spot fleet requests.

    After you cancel a Spot fleet request, the Spot fleet launches no new Spot instances. You must specify whether the Spot fleet should also terminate its Spot instances. If you terminate the instances, the Spot fleet request enters the cancelled_terminating state. Otherwise, the Spot fleet request enters the cancelled_running state and the instances continue to run until they are interrupted or you terminate them manually.

    ", + "CancelSpotInstanceRequests": "

    Cancels one or more Spot instance requests. Spot instances are instances that Amazon EC2 starts on your behalf when the bid price that you specify exceeds the current Spot price. Amazon EC2 periodically sets the Spot price based on available Spot instance capacity and current Spot instance requests. For more information, see Spot Instance Requests in the Amazon Elastic Compute Cloud User Guide.

    Canceling a Spot instance request does not terminate running Spot instances associated with the request.

    ", + "ConfirmProductInstance": "

    Determines whether a product code is associated with an instance. This action can only be used by the owner of the product code. It is useful when a product code owner needs to verify whether another user's instance is eligible for support.

    ", + "CopyImage": "

    Initiates the copy of an AMI from the specified source region to the current region. You specify the destination region by using its endpoint when making the request.

    For more information, see Copying AMIs in the Amazon Elastic Compute Cloud User Guide.

    ", + "CopySnapshot": "

    Copies a point-in-time snapshot of an EBS volume and stores it in Amazon S3. You can copy the snapshot within the same region or from one region to another. You can use the snapshot to create EBS volumes or Amazon Machine Images (AMIs). The snapshot is copied to the regional endpoint that you send the HTTP request to.

    Copies of encrypted EBS snapshots remain encrypted. Copies of unencrypted snapshots remain unencrypted, unless the Encrypted flag is specified during the snapshot copy operation. By default, encrypted snapshot copies use the default AWS Key Management Service (AWS KMS) customer master key (CMK); however, you can specify a non-default CMK with the KmsKeyId parameter.

    To copy an encrypted snapshot that has been shared from another account, you must have permissions for the CMK used to encrypt the snapshot.

    For more information, see Copying an Amazon EBS Snapshot in the Amazon Elastic Compute Cloud User Guide.

    ", + "CreateCustomerGateway": "

    Provides information to AWS about your VPN customer gateway device. The customer gateway is the appliance at your end of the VPN connection. (The device on the AWS side of the VPN connection is the virtual private gateway.) You must provide the Internet-routable IP address of the customer gateway's external interface. The IP address must be static and may be behind a device performing network address translation (NAT).

    For devices that use Border Gateway Protocol (BGP), you can also provide the device's BGP Autonomous System Number (ASN). You can use an existing ASN assigned to your network. If you don't have an ASN already, you can use a private ASN (in the 64512 - 65534 range).

    Amazon EC2 supports all 2-byte ASN numbers in the range of 1 - 65534, with the exception of 7224, which is reserved in the us-east-1 region, and 9059, which is reserved in the eu-west-1 region.

    For more information about VPN customer gateways, see Adding a Hardware Virtual Private Gateway to Your VPC in the Amazon Virtual Private Cloud User Guide.

    You cannot create more than one customer gateway with the same VPN type, IP address, and BGP ASN parameter values. If you run an identical request more than one time, the first request creates the customer gateway, and subsequent requests return information about the existing customer gateway. The subsequent requests do not create new customer gateway resources.

    ", + "CreateDhcpOptions": "

    Creates a set of DHCP options for your VPC. After creating the set, you must associate it with the VPC, causing all existing and new instances that you launch in the VPC to use this set of DHCP options. The following are the individual DHCP options you can specify. For more information about the options, see RFC 2132.

    • domain-name-servers - The IP addresses of up to four domain name servers, or AmazonProvidedDNS. The default DHCP option set specifies AmazonProvidedDNS. If specifying more than one domain name server, specify the IP addresses in a single parameter, separated by commas.

    • domain-name - If you're using AmazonProvidedDNS in \"us-east-1\", specify \"ec2.internal\". If you're using AmazonProvidedDNS in another region, specify \"region.compute.internal\" (for example, \"ap-northeast-1.compute.internal\"). Otherwise, specify a domain name (for example, \"MyCompany.com\"). Important: Some Linux operating systems accept multiple domain names separated by spaces. However, Windows and other Linux operating systems treat the value as a single domain, which results in unexpected behavior. If your DHCP options set is associated with a VPC that has instances with multiple operating systems, specify only one domain name.

    • ntp-servers - The IP addresses of up to four Network Time Protocol (NTP) servers.

    • netbios-name-servers - The IP addresses of up to four NetBIOS name servers.

    • netbios-node-type - The NetBIOS node type (1, 2, 4, or 8). We recommend that you specify 2 (broadcast and multicast are not currently supported). For more information about these node types, see RFC 2132.

    Your VPC automatically starts out with a set of DHCP options that includes only a DNS server that we provide (AmazonProvidedDNS). If you create a set of options, and if your VPC has an Internet gateway, make sure to set the domain-name-servers option either to AmazonProvidedDNS or to a domain name server of your choice. For more information about DHCP options, see DHCP Options Sets in the Amazon Virtual Private Cloud User Guide.

    ", + "CreateFlowLogs": "

    Creates one or more flow logs to capture IP traffic for a specific network interface, subnet, or VPC. Flow logs are delivered to a specified log group in Amazon CloudWatch Logs. If you specify a VPC or subnet in the request, a log stream is created in CloudWatch Logs for each network interface in the subnet or VPC. Log streams can include information about accepted and rejected traffic to a network interface. You can view the data in your log streams using Amazon CloudWatch Logs.

    In your request, you must also specify an IAM role that has permission to publish logs to CloudWatch Logs.

    ", + "CreateImage": "

    Creates an Amazon EBS-backed AMI from an Amazon EBS-backed instance that is either running or stopped.

    If you customized your instance with instance store volumes or EBS volumes in addition to the root device volume, the new AMI contains block device mapping information for those volumes. When you launch an instance from this new AMI, the instance automatically launches with those additional volumes.

    For more information, see Creating Amazon EBS-Backed Linux AMIs in the Amazon Elastic Compute Cloud User Guide.

    ", + "CreateInstanceExportTask": "

    Exports a running or stopped instance to an S3 bucket.

    For information about the supported operating systems, image formats, and known limitations for the types of instances you can export, see Exporting EC2 Instances in the Amazon Elastic Compute Cloud User Guide.

    ", + "CreateInternetGateway": "

    Creates an Internet gateway for use with a VPC. After creating the Internet gateway, you attach it to a VPC using AttachInternetGateway.

    For more information about your VPC and Internet gateway, see the Amazon Virtual Private Cloud User Guide.

    ", + "CreateKeyPair": "

    Creates a 2048-bit RSA key pair with the specified name. Amazon EC2 stores the public key and displays the private key for you to save to a file. The private key is returned as an unencrypted PEM encoded PKCS#8 private key. If a key with the specified name already exists, Amazon EC2 returns an error.

    You can have up to five thousand key pairs per region.

    The key pair returned to you is available only in the region in which you create it. To create a key pair that is available in all regions, use ImportKeyPair.

    For more information about key pairs, see Key Pairs in the Amazon Elastic Compute Cloud User Guide.

    ", + "CreateNatGateway": "

    Creates a NAT gateway in the specified subnet. A NAT gateway can be used to enable instances in a private subnet to connect to the Internet. This action creates a network interface in the specified subnet with a private IP address from the IP address range of the subnet. For more information, see NAT Gateways in the Amazon Virtual Private Cloud User Guide.

    ", + "CreateNetworkAcl": "

    Creates a network ACL in a VPC. Network ACLs provide an optional layer of security (in addition to security groups) for the instances in your VPC.

    For more information about network ACLs, see Network ACLs in the Amazon Virtual Private Cloud User Guide.

    ", + "CreateNetworkAclEntry": "

    Creates an entry (a rule) in a network ACL with the specified rule number. Each network ACL has a set of numbered ingress rules and a separate set of numbered egress rules. When determining whether a packet should be allowed in or out of a subnet associated with the ACL, we process the entries in the ACL according to the rule numbers, in ascending order. Each network ACL has a set of ingress rules and a separate set of egress rules.

    We recommend that you leave room between the rule numbers (for example, 100, 110, 120, ...), and not number them one right after the other (for example, 101, 102, 103, ...). This makes it easier to add a rule between existing ones without having to renumber the rules.

    After you add an entry, you can't modify it; you must either replace it, or create an entry and delete the old one.

    For more information about network ACLs, see Network ACLs in the Amazon Virtual Private Cloud User Guide.

    ", + "CreateNetworkInterface": "

    Creates a network interface in the specified subnet.

    For more information about network interfaces, see Elastic Network Interfaces in the Amazon Elastic Compute Cloud User Guide.

    ", + "CreatePlacementGroup": "

    Creates a placement group that you launch cluster instances into. You must give the group a name that's unique within the scope of your account.

    For more information about placement groups and cluster instances, see Cluster Instances in the Amazon Elastic Compute Cloud User Guide.

    ", + "CreateReservedInstancesListing": "

    Creates a listing for Amazon EC2 Reserved Instances to be sold in the Reserved Instance Marketplace. You can submit one Reserved Instance listing at a time. To get a list of your Reserved Instances, you can use the DescribeReservedInstances operation.

    The Reserved Instance Marketplace matches sellers who want to resell Reserved Instance capacity that they no longer need with buyers who want to purchase additional capacity. Reserved Instances bought and sold through the Reserved Instance Marketplace work like any other Reserved Instances.

    To sell your Reserved Instances, you must first register as a seller in the Reserved Instance Marketplace. After completing the registration process, you can create a Reserved Instance Marketplace listing of some or all of your Reserved Instances, and specify the upfront price to receive for them. Your Reserved Instance listings then become available for purchase. To view the details of your Reserved Instance listing, you can use the DescribeReservedInstancesListings operation.

    For more information, see Reserved Instance Marketplace in the Amazon Elastic Compute Cloud User Guide.

    ", + "CreateRoute": "

    Creates a route in a route table within a VPC.

    You must specify one of the following targets: Internet gateway or virtual private gateway, NAT instance, NAT gateway, VPC peering connection, or network interface.

    When determining how to route traffic, we use the route with the most specific match. For example, let's say the traffic is destined for 192.0.2.3, and the route table includes the following two routes:

    • 192.0.2.0/24 (goes to some target A)

    • 192.0.2.0/28 (goes to some target B)

    Both routes apply to the traffic destined for 192.0.2.3. However, the second route in the list covers a smaller number of IP addresses and is therefore more specific, so we use that route to determine where to target the traffic.

    For more information about route tables, see Route Tables in the Amazon Virtual Private Cloud User Guide.

    ", + "CreateRouteTable": "

    Creates a route table for the specified VPC. After you create a route table, you can add routes and associate the table with a subnet.

    For more information about route tables, see Route Tables in the Amazon Virtual Private Cloud User Guide.

    ", + "CreateSecurityGroup": "

    Creates a security group.

    A security group is for use with instances either in the EC2-Classic platform or in a specific VPC. For more information, see Amazon EC2 Security Groups in the Amazon Elastic Compute Cloud User Guide and Security Groups for Your VPC in the Amazon Virtual Private Cloud User Guide.

    EC2-Classic: You can have up to 500 security groups.

    EC2-VPC: You can create up to 500 security groups per VPC.

    When you create a security group, you specify a friendly name of your choice. You can have a security group for use in EC2-Classic with the same name as a security group for use in a VPC. However, you can't have two security groups for use in EC2-Classic with the same name or two security groups for use in a VPC with the same name.

    You have a default security group for use in EC2-Classic and a default security group for use in your VPC. If you don't specify a security group when you launch an instance, the instance is launched into the appropriate default security group. A default security group includes a default rule that grants instances unrestricted network access to each other.

    You can add or remove rules from your security groups using AuthorizeSecurityGroupIngress, AuthorizeSecurityGroupEgress, RevokeSecurityGroupIngress, and RevokeSecurityGroupEgress.

    ", + "CreateSnapshot": "

    Creates a snapshot of an EBS volume and stores it in Amazon S3. You can use snapshots for backups, to make copies of EBS volumes, and to save data before shutting down an instance.

    When a snapshot is created, any AWS Marketplace product codes that are associated with the source volume are propagated to the snapshot.

    You can take a snapshot of an attached volume that is in use. However, snapshots only capture data that has been written to your EBS volume at the time the snapshot command is issued; this may exclude any data that has been cached by any applications or the operating system. If you can pause any file systems on the volume long enough to take a snapshot, your snapshot should be complete. However, if you cannot pause all file writes to the volume, you should unmount the volume from within the instance, issue the snapshot command, and then remount the volume to ensure a consistent and complete snapshot. You may remount and use your volume while the snapshot status is pending.

    To create a snapshot for EBS volumes that serve as root devices, you should stop the instance before taking the snapshot.

    Snapshots that are taken from encrypted volumes are automatically encrypted. Volumes that are created from encrypted snapshots are also automatically encrypted. Your encrypted volumes and any associated snapshots always remain protected.

    For more information, see Amazon Elastic Block Store and Amazon EBS Encryption in the Amazon Elastic Compute Cloud User Guide.

    ", + "CreateSpotDatafeedSubscription": "

    Creates a data feed for Spot instances, enabling you to view Spot instance usage logs. You can create one data feed per AWS account. For more information, see Spot Instance Data Feed in the Amazon Elastic Compute Cloud User Guide.

    ", + "CreateSubnet": "

    Creates a subnet in an existing VPC.

    When you create each subnet, you provide the VPC ID and the CIDR block you want for the subnet. After you create a subnet, you can't change its CIDR block. The subnet's CIDR block can be the same as the VPC's CIDR block (assuming you want only a single subnet in the VPC), or a subset of the VPC's CIDR block. If you create more than one subnet in a VPC, the subnets' CIDR blocks must not overlap. The smallest subnet (and VPC) you can create uses a /28 netmask (16 IP addresses), and the largest uses a /16 netmask (65,536 IP addresses).

    AWS reserves both the first four and the last IP address in each subnet's CIDR block. They're not available for use.

    If you add more than one subnet to a VPC, they're set up in a star topology with a logical router in the middle.

    If you launch an instance in a VPC using an Amazon EBS-backed AMI, the IP address doesn't change if you stop and restart the instance (unlike a similar instance launched outside a VPC, which gets a new IP address when restarted). It's therefore possible to have a subnet with no running instances (they're all stopped), but no remaining IP addresses available.

    For more information about subnets, see Your VPC and Subnets in the Amazon Virtual Private Cloud User Guide.

    ", + "CreateTags": "

    Adds or overwrites one or more tags for the specified Amazon EC2 resource or resources. Each resource can have a maximum of 10 tags. Each tag consists of a key and optional value. Tag keys must be unique per resource.

    For more information about tags, see Tagging Your Resources in the Amazon Elastic Compute Cloud User Guide. For more information about creating IAM policies that control users' access to resources based on tags, see Supported Resource-Level Permissions for Amazon EC2 API Actions in the Amazon Elastic Compute Cloud User Guide.

    ", + "CreateVolume": "

    Creates an EBS volume that can be attached to an instance in the same Availability Zone. The volume is created in the regional endpoint that you send the HTTP request to. For more information see Regions and Endpoints.

    You can create a new empty volume or restore a volume from an EBS snapshot. Any AWS Marketplace product codes from the snapshot are propagated to the volume.

    You can create encrypted volumes with the Encrypted parameter. Encrypted volumes may only be attached to instances that support Amazon EBS encryption. Volumes that are created from encrypted snapshots are also automatically encrypted. For more information, see Amazon EBS Encryption in the Amazon Elastic Compute Cloud User Guide.

    For more information, see Creating or Restoring an Amazon EBS Volume in the Amazon Elastic Compute Cloud User Guide.

    ", + "CreateVpc": "

    Creates a VPC with the specified CIDR block.

    The smallest VPC you can create uses a /28 netmask (16 IP addresses), and the largest uses a /16 netmask (65,536 IP addresses). To help you decide how big to make your VPC, see Your VPC and Subnets in the Amazon Virtual Private Cloud User Guide.

    By default, each instance you launch in the VPC has the default DHCP options, which includes only a default DNS server that we provide (AmazonProvidedDNS). For more information about DHCP options, see DHCP Options Sets in the Amazon Virtual Private Cloud User Guide.

    You can specify the instance tenancy value for the VPC when you create it. You can't change this value for the VPC after you create it. For more information, see Dedicated Instances in the Amazon Virtual Private Cloud User Guide.

    ", + "CreateVpcEndpoint": "

    Creates a VPC endpoint for a specified AWS service. An endpoint enables you to create a private connection between your VPC and another AWS service in your account. You can specify an endpoint policy to attach to the endpoint that will control access to the service from your VPC. You can also specify the VPC route tables that use the endpoint.

    Currently, only endpoints to Amazon S3 are supported.

    ", + "CreateVpcPeeringConnection": "

    Requests a VPC peering connection between two VPCs: a requester VPC that you own and a peer VPC with which to create the connection. The peer VPC can belong to another AWS account. The requester VPC and peer VPC cannot have overlapping CIDR blocks.

    The owner of the peer VPC must accept the peering request to activate the peering connection. The VPC peering connection request expires after 7 days, after which it cannot be accepted or rejected.

    A CreateVpcPeeringConnection request between VPCs with overlapping CIDR blocks results in the VPC peering connection having a status of failed.

    ", + "CreateVpnConnection": "

    Creates a VPN connection between an existing virtual private gateway and a VPN customer gateway. The only supported connection type is ipsec.1.

    The response includes information that you need to give to your network administrator to configure your customer gateway.

    We strongly recommend that you use HTTPS when calling this operation because the response contains sensitive cryptographic information for configuring your customer gateway.

    If you decide to shut down your VPN connection for any reason and later create a new VPN connection, you must reconfigure your customer gateway with the new information returned from this call.

    This is an idempotent operation. If you perform the operation more than once, Amazon EC2 doesn't return an error.

    For more information about VPN connections, see Adding a Hardware Virtual Private Gateway to Your VPC in the Amazon Virtual Private Cloud User Guide.

    ", + "CreateVpnConnectionRoute": "

    Creates a static route associated with a VPN connection between an existing virtual private gateway and a VPN customer gateway. The static route allows traffic to be routed from the virtual private gateway to the VPN customer gateway.

    For more information about VPN connections, see Adding a Hardware Virtual Private Gateway to Your VPC in the Amazon Virtual Private Cloud User Guide.

    ", + "CreateVpnGateway": "

    Creates a virtual private gateway. A virtual private gateway is the endpoint on the VPC side of your VPN connection. You can create a virtual private gateway before creating the VPC itself.

    For more information about virtual private gateways, see Adding a Hardware Virtual Private Gateway to Your VPC in the Amazon Virtual Private Cloud User Guide.

    ", + "DeleteCustomerGateway": "

    Deletes the specified customer gateway. You must delete the VPN connection before you can delete the customer gateway.

    ", + "DeleteDhcpOptions": "

    Deletes the specified set of DHCP options. You must disassociate the set of DHCP options before you can delete it. You can disassociate the set of DHCP options by associating either a new set of options or the default set of options with the VPC.

    ", + "DeleteFlowLogs": "

    Deletes one or more flow logs.

    ", + "DeleteInternetGateway": "

    Deletes the specified Internet gateway. You must detach the Internet gateway from the VPC before you can delete it.

    ", + "DeleteKeyPair": "

    Deletes the specified key pair, by removing the public key from Amazon EC2.

    ", + "DeleteNatGateway": "

    Deletes the specified NAT gateway. Deleting a NAT gateway disassociates its Elastic IP address, but does not release the address from your account. Deleting a NAT gateway does not delete any NAT gateway routes in your route tables.

    ", + "DeleteNetworkAcl": "

    Deletes the specified network ACL. You can't delete the ACL if it's associated with any subnets. You can't delete the default network ACL.

    ", + "DeleteNetworkAclEntry": "

    Deletes the specified ingress or egress entry (rule) from the specified network ACL.

    ", + "DeleteNetworkInterface": "

    Deletes the specified network interface. You must detach the network interface before you can delete it.

    ", + "DeletePlacementGroup": "

    Deletes the specified placement group. You must terminate all instances in the placement group before you can delete the placement group. For more information about placement groups and cluster instances, see Cluster Instances in the Amazon Elastic Compute Cloud User Guide.

    ", + "DeleteRoute": "

    Deletes the specified route from the specified route table.

    ", + "DeleteRouteTable": "

    Deletes the specified route table. You must disassociate the route table from any subnets before you can delete it. You can't delete the main route table.

    ", + "DeleteSecurityGroup": "

    Deletes a security group.

    If you attempt to delete a security group that is associated with an instance, or is referenced by another security group, the operation fails with InvalidGroup.InUse in EC2-Classic or DependencyViolation in EC2-VPC.

    ", + "DeleteSnapshot": "

    Deletes the specified snapshot.

    When you make periodic snapshots of a volume, the snapshots are incremental, and only the blocks on the device that have changed since your last snapshot are saved in the new snapshot. When you delete a snapshot, only the data not needed for any other snapshot is removed. So regardless of which prior snapshots have been deleted, all active snapshots will have access to all the information needed to restore the volume.

    You cannot delete a snapshot of the root device of an EBS volume used by a registered AMI. You must first de-register the AMI before you can delete the snapshot.

    For more information, see Deleting an Amazon EBS Snapshot in the Amazon Elastic Compute Cloud User Guide.

    ", + "DeleteSpotDatafeedSubscription": "

    Deletes the data feed for Spot instances.

    ", + "DeleteSubnet": "

    Deletes the specified subnet. You must terminate all running instances in the subnet before you can delete the subnet.

    ", + "DeleteTags": "

    Deletes the specified set of tags from the specified set of resources. This call is designed to follow a DescribeTags request.

    For more information about tags, see Tagging Your Resources in the Amazon Elastic Compute Cloud User Guide.

    ", + "DeleteVolume": "

    Deletes the specified EBS volume. The volume must be in the available state (not attached to an instance).

    The volume may remain in the deleting state for several minutes.

    For more information, see Deleting an Amazon EBS Volume in the Amazon Elastic Compute Cloud User Guide.

    ", + "DeleteVpc": "

    Deletes the specified VPC. You must detach or delete all gateways and resources that are associated with the VPC before you can delete it. For example, you must terminate all instances running in the VPC, delete all security groups associated with the VPC (except the default one), delete all route tables associated with the VPC (except the default one), and so on.

    ", + "DeleteVpcEndpoints": "

    Deletes one or more specified VPC endpoints. Deleting the endpoint also deletes the endpoint routes in the route tables that were associated with the endpoint.

    ", + "DeleteVpcPeeringConnection": "

    Deletes a VPC peering connection. Either the owner of the requester VPC or the owner of the peer VPC can delete the VPC peering connection if it's in the active state. The owner of the requester VPC can delete a VPC peering connection in the pending-acceptance state.

    ", + "DeleteVpnConnection": "

    Deletes the specified VPN connection.

    If you're deleting the VPC and its associated components, we recommend that you detach the virtual private gateway from the VPC and delete the VPC before deleting the VPN connection. If you believe that the tunnel credentials for your VPN connection have been compromised, you can delete the VPN connection and create a new one that has new keys, without needing to delete the VPC or virtual private gateway. If you create a new VPN connection, you must reconfigure the customer gateway using the new configuration information returned with the new VPN connection ID.

    ", + "DeleteVpnConnectionRoute": "

    Deletes the specified static route associated with a VPN connection between an existing virtual private gateway and a VPN customer gateway. The static route allows traffic to be routed from the virtual private gateway to the VPN customer gateway.

    ", + "DeleteVpnGateway": "

    Deletes the specified virtual private gateway. We recommend that before you delete a virtual private gateway, you detach it from the VPC and delete the VPN connection. Note that you don't need to delete the virtual private gateway if you plan to delete and recreate the VPN connection between your VPC and your network.

    ", + "DeregisterImage": "

    Deregisters the specified AMI. After you deregister an AMI, it can't be used to launch new instances.

    This command does not delete the AMI.

    ", + "DescribeAccountAttributes": "

    Describes attributes of your AWS account. The following are the supported account attributes:

    • supported-platforms: Indicates whether your account can launch instances into EC2-Classic and EC2-VPC, or only into EC2-VPC.

    • default-vpc: The ID of the default VPC for your account, or none.

    • max-instances: The maximum number of On-Demand instances that you can run.

    • vpc-max-security-groups-per-interface: The maximum number of security groups that you can assign to a network interface.

    • max-elastic-ips: The maximum number of Elastic IP addresses that you can allocate for use with EC2-Classic.

    • vpc-max-elastic-ips: The maximum number of Elastic IP addresses that you can allocate for use with EC2-VPC.

    ", + "DescribeAddresses": "

    Describes one or more of your Elastic IP addresses.

    An Elastic IP address is for use in either the EC2-Classic platform or in a VPC. For more information, see Elastic IP Addresses in the Amazon Elastic Compute Cloud User Guide.

    ", + "DescribeAvailabilityZones": "

    Describes one or more of the Availability Zones that are available to you. The results include zones only for the region you're currently using. If there is an event impacting an Availability Zone, you can use this request to view the state and any provided message for that Availability Zone.

    For more information, see Regions and Availability Zones in the Amazon Elastic Compute Cloud User Guide.

    ", + "DescribeBundleTasks": "

    Describes one or more of your bundling tasks.

    Completed bundle tasks are listed for only a limited time. If your bundle task is no longer in the list, you can still register an AMI from it. Just use RegisterImage with the Amazon S3 bucket name and image manifest name you provided to the bundle task.

    ", + "DescribeClassicLinkInstances": "

    Describes one or more of your linked EC2-Classic instances. This request only returns information about EC2-Classic instances linked to a VPC through ClassicLink; you cannot use this request to return information about other instances.

    ", + "DescribeConversionTasks": "

    Describes one or more of your conversion tasks. For more information, see Using the Command Line Tools to Import Your Virtual Machine to Amazon EC2 in the Amazon Elastic Compute Cloud User Guide.

    For information about the import manifest referenced by this API action, see VM Import Manifest.

    ", + "DescribeCustomerGateways": "

    Describes one or more of your VPN customer gateways.

    For more information about VPN customer gateways, see Adding a Hardware Virtual Private Gateway to Your VPC in the Amazon Virtual Private Cloud User Guide.

    ", + "DescribeDhcpOptions": "

    Describes one or more of your DHCP options sets.

    For more information about DHCP options sets, see DHCP Options Sets in the Amazon Virtual Private Cloud User Guide.

    ", + "DescribeExportTasks": "

    Describes one or more of your export tasks.

    ", + "DescribeFlowLogs": "

    Describes one or more flow logs. To view the information in your flow logs (the log streams for the network interfaces), you must use the CloudWatch Logs console or the CloudWatch Logs API.

    ", + "DescribeHosts": "

    Describes one or more of your Dedicated hosts.

    The results describe only the Dedicated hosts in the region you're currently using. All listed instances consume capacity on your Dedicated host. Dedicated hosts that have recently been released will be listed with the state released.

    ", + "DescribeIdFormat": "

    Describes the ID format settings for your resources on a per-region basis, for example, to view which resource types are enabled for longer IDs. This request only returns information about resource types whose ID formats can be modified; it does not return information about other resource types.

    The following resource types support longer IDs: instance | reservation | snapshot | volume.

    These settings apply to the IAM user who makes the request; they do not apply to the entire AWS account. By default, an IAM user defaults to the same settings as the root user, unless they explicitly override the settings by running the ModifyIdFormat command. Resources created with longer IDs are visible to all IAM users, regardless of these settings and provided that they have permission to use the relevant Describe command for the resource type.

    ", + "DescribeIdentityIdFormat": "

    Describes the ID format settings for resources for the specified IAM user, IAM role, or root user. For example, you can view the resource types that are enabled for longer IDs. This request only returns information about resource types whose ID formats can be modified; it does not return information about other resource types. For more information, see Resource IDs in the Amazon Elastic Compute Cloud User Guide.

    The following resource types support longer IDs: instance | reservation | snapshot | volume.

    These settings apply to the principal specified in the request. They do not apply to the principal that makes the request.

    ", + "DescribeImageAttribute": "

    Describes the specified attribute of the specified AMI. You can specify only one attribute at a time.

    ", + "DescribeImages": "

    Describes one or more of the images (AMIs, AKIs, and ARIs) available to you. Images available to you include public images, private images that you own, and private images owned by other AWS accounts but for which you have explicit launch permissions.

    Deregistered images are included in the returned results for an unspecified interval after deregistration.

    ", + "DescribeImportImageTasks": "

    Displays details about an import virtual machine or import snapshot tasks that are already created.

    ", + "DescribeImportSnapshotTasks": "

    Describes your import snapshot tasks.

    ", + "DescribeInstanceAttribute": "

    Describes the specified attribute of the specified instance. You can specify only one attribute at a time. Valid attribute values are: instanceType | kernel | ramdisk | userData | disableApiTermination | instanceInitiatedShutdownBehavior | rootDeviceName | blockDeviceMapping | productCodes | sourceDestCheck | groupSet | ebsOptimized | sriovNetSupport

    ", + "DescribeInstanceStatus": "

    Describes the status of one or more instances. By default, only running instances are described, unless specified otherwise.

    Instance status includes the following components:

    • Status checks - Amazon EC2 performs status checks on running EC2 instances to identify hardware and software issues. For more information, see Status Checks for Your Instances and Troubleshooting Instances with Failed Status Checks in the Amazon Elastic Compute Cloud User Guide.

    • Scheduled events - Amazon EC2 can schedule events (such as reboot, stop, or terminate) for your instances related to hardware issues, software updates, or system maintenance. For more information, see Scheduled Events for Your Instances in the Amazon Elastic Compute Cloud User Guide.

    • Instance state - You can manage your instances from the moment you launch them through their termination. For more information, see Instance Lifecycle in the Amazon Elastic Compute Cloud User Guide.

    ", + "DescribeInstances": "

    Describes one or more of your instances.

    If you specify one or more instance IDs, Amazon EC2 returns information for those instances. If you do not specify instance IDs, Amazon EC2 returns information for all relevant instances. If you specify an instance ID that is not valid, an error is returned. If you specify an instance that you do not own, it is not included in the returned results.

    Recently terminated instances might appear in the returned results. This interval is usually less than one hour.

    If you describe instances in the rare case where an Availability Zone is experiencing a service disruption and you specify instance IDs that are in the affected zone, or do not specify any instance IDs at all, the call fails. If you describe instances and specify only instance IDs that are in an unaffected zone, the call works normally.

    ", + "DescribeInternetGateways": "

    Describes one or more of your Internet gateways.

    ", + "DescribeKeyPairs": "

    Describes one or more of your key pairs.

    For more information about key pairs, see Key Pairs in the Amazon Elastic Compute Cloud User Guide.

    ", + "DescribeMovingAddresses": "

    Describes your Elastic IP addresses that are being moved to the EC2-VPC platform, or that are being restored to the EC2-Classic platform. This request does not return information about any other Elastic IP addresses in your account.

    ", + "DescribeNatGateways": "

    Describes one or more of the your NAT gateways.

    ", + "DescribeNetworkAcls": "

    Describes one or more of your network ACLs.

    For more information about network ACLs, see Network ACLs in the Amazon Virtual Private Cloud User Guide.

    ", + "DescribeNetworkInterfaceAttribute": "

    Describes a network interface attribute. You can specify only one attribute at a time.

    ", + "DescribeNetworkInterfaces": "

    Describes one or more of your network interfaces.

    ", + "DescribePlacementGroups": "

    Describes one or more of your placement groups. For more information about placement groups and cluster instances, see Cluster Instances in the Amazon Elastic Compute Cloud User Guide.

    ", + "DescribePrefixLists": "

    Describes available AWS services in a prefix list format, which includes the prefix list name and prefix list ID of the service and the IP address range for the service. A prefix list ID is required for creating an outbound security group rule that allows traffic from a VPC to access an AWS service through a VPC endpoint.

    ", + "DescribeRegions": "

    Describes one or more regions that are currently available to you.

    For a list of the regions supported by Amazon EC2, see Regions and Endpoints.

    ", + "DescribeReservedInstances": "

    Describes one or more of the Reserved Instances that you purchased.

    For more information about Reserved Instances, see Reserved Instances in the Amazon Elastic Compute Cloud User Guide.

    ", + "DescribeReservedInstancesListings": "

    Describes your account's Reserved Instance listings in the Reserved Instance Marketplace.

    The Reserved Instance Marketplace matches sellers who want to resell Reserved Instance capacity that they no longer need with buyers who want to purchase additional capacity. Reserved Instances bought and sold through the Reserved Instance Marketplace work like any other Reserved Instances.

    As a seller, you choose to list some or all of your Reserved Instances, and you specify the upfront price to receive for them. Your Reserved Instances are then listed in the Reserved Instance Marketplace and are available for purchase.

    As a buyer, you specify the configuration of the Reserved Instance to purchase, and the Marketplace matches what you're searching for with what's available. The Marketplace first sells the lowest priced Reserved Instances to you, and continues to sell available Reserved Instance listings to you until your demand is met. You are charged based on the total price of all of the listings that you purchase.

    For more information, see Reserved Instance Marketplace in the Amazon Elastic Compute Cloud User Guide.

    ", + "DescribeReservedInstancesModifications": "

    Describes the modifications made to your Reserved Instances. If no parameter is specified, information about all your Reserved Instances modification requests is returned. If a modification ID is specified, only information about the specific modification is returned.

    For more information, see Modifying Reserved Instances in the Amazon Elastic Compute Cloud User Guide.

    ", + "DescribeReservedInstancesOfferings": "

    Describes Reserved Instance offerings that are available for purchase. With Reserved Instances, you purchase the right to launch instances for a period of time. During that time period, you do not receive insufficient capacity errors, and you pay a lower usage rate than the rate charged for On-Demand instances for the actual time used.

    If you have listed your own Reserved Instances for sale in the Reserved Instance Marketplace, they will be excluded from these results. This is to ensure that you do not purchase your own Reserved Instances.

    For more information, see Reserved Instance Marketplace in the Amazon Elastic Compute Cloud User Guide.

    ", + "DescribeRouteTables": "

    Describes one or more of your route tables.

    Each subnet in your VPC must be associated with a route table. If a subnet is not explicitly associated with any route table, it is implicitly associated with the main route table. This command does not return the subnet ID for implicit associations.

    For more information about route tables, see Route Tables in the Amazon Virtual Private Cloud User Guide.

    ", + "DescribeScheduledInstanceAvailability": "

    Finds available schedules that meet the specified criteria.

    You can search for an available schedule no more than 3 months in advance. You must meet the minimum required duration of 1,200 hours per year. For example, the minimum daily schedule is 4 hours, the minimum weekly schedule is 24 hours, and the minimum monthly schedule is 100 hours.

    After you find a schedule that meets your needs, call PurchaseScheduledInstances to purchase Scheduled Instances with that schedule.

    ", + "DescribeScheduledInstances": "

    Describes one or more of your Scheduled Instances.

    ", + "DescribeSecurityGroupReferences": "

    [EC2-VPC only] Describes the VPCs on the other side of a VPC peering connection that are referencing the security groups you've specified in this request.

    ", + "DescribeSecurityGroups": "

    Describes one or more of your security groups.

    A security group is for use with instances either in the EC2-Classic platform or in a specific VPC. For more information, see Amazon EC2 Security Groups in the Amazon Elastic Compute Cloud User Guide and Security Groups for Your VPC in the Amazon Virtual Private Cloud User Guide.

    ", + "DescribeSnapshotAttribute": "

    Describes the specified attribute of the specified snapshot. You can specify only one attribute at a time.

    For more information about EBS snapshots, see Amazon EBS Snapshots in the Amazon Elastic Compute Cloud User Guide.

    ", + "DescribeSnapshots": "

    Describes one or more of the EBS snapshots available to you. Available snapshots include public snapshots available for any AWS account to launch, private snapshots that you own, and private snapshots owned by another AWS account but for which you've been given explicit create volume permissions.

    The create volume permissions fall into the following categories:

    • public: The owner of the snapshot granted create volume permissions for the snapshot to the all group. All AWS accounts have create volume permissions for these snapshots.

    • explicit: The owner of the snapshot granted create volume permissions to a specific AWS account.

    • implicit: An AWS account has implicit create volume permissions for all snapshots it owns.

    The list of snapshots returned can be modified by specifying snapshot IDs, snapshot owners, or AWS accounts with create volume permissions. If no options are specified, Amazon EC2 returns all snapshots for which you have create volume permissions.

    If you specify one or more snapshot IDs, only snapshots that have the specified IDs are returned. If you specify an invalid snapshot ID, an error is returned. If you specify a snapshot ID for which you do not have access, it is not included in the returned results.

    If you specify one or more snapshot owners using the OwnerIds option, only snapshots from the specified owners and for which you have access are returned. The results can include the AWS account IDs of the specified owners, amazon for snapshots owned by Amazon, or self for snapshots that you own.

    If you specify a list of restorable users, only snapshots with create snapshot permissions for those users are returned. You can specify AWS account IDs (if you own the snapshots), self for snapshots for which you own or have explicit permissions, or all for public snapshots.

    If you are describing a long list of snapshots, you can paginate the output to make the list more manageable. The MaxResults parameter sets the maximum number of results returned in a single page. If the list of results exceeds your MaxResults value, then that number of results is returned along with a NextToken value that can be passed to a subsequent DescribeSnapshots request to retrieve the remaining results.

    For more information about EBS snapshots, see Amazon EBS Snapshots in the Amazon Elastic Compute Cloud User Guide.

    ", + "DescribeSpotDatafeedSubscription": "

    Describes the data feed for Spot instances. For more information, see Spot Instance Data Feed in the Amazon Elastic Compute Cloud User Guide.

    ", + "DescribeSpotFleetInstances": "

    Describes the running instances for the specified Spot fleet.

    ", + "DescribeSpotFleetRequestHistory": "

    Describes the events for the specified Spot fleet request during the specified time.

    Spot fleet events are delayed by up to 30 seconds before they can be described. This ensures that you can query by the last evaluated time and not miss a recorded event.

    ", + "DescribeSpotFleetRequests": "

    Describes your Spot fleet requests.

    ", + "DescribeSpotInstanceRequests": "

    Describes the Spot instance requests that belong to your account. Spot instances are instances that Amazon EC2 launches when the bid price that you specify exceeds the current Spot price. Amazon EC2 periodically sets the Spot price based on available Spot instance capacity and current Spot instance requests. For more information, see Spot Instance Requests in the Amazon Elastic Compute Cloud User Guide.

    You can use DescribeSpotInstanceRequests to find a running Spot instance by examining the response. If the status of the Spot instance is fulfilled, the instance ID appears in the response and contains the identifier of the instance. Alternatively, you can use DescribeInstances with a filter to look for instances where the instance lifecycle is spot.

    ", + "DescribeSpotPriceHistory": "

    Describes the Spot price history. The prices returned are listed in chronological order, from the oldest to the most recent, for up to the past 90 days. For more information, see Spot Instance Pricing History in the Amazon Elastic Compute Cloud User Guide.

    When you specify a start and end time, this operation returns the prices of the instance types within the time range that you specified and the time when the price changed. The price is valid within the time period that you specified; the response merely indicates the last time that the price changed.

    ", + "DescribeStaleSecurityGroups": "

    [EC2-VPC only] Describes the stale security group rules for security groups in a specified VPC. Rules are stale when they reference a deleted security group in a peer VPC, or a security group in a peer VPC for which the VPC peering connection has been deleted.

    ", + "DescribeSubnets": "

    Describes one or more of your subnets.

    For more information about subnets, see Your VPC and Subnets in the Amazon Virtual Private Cloud User Guide.

    ", + "DescribeTags": "

    Describes one or more of the tags for your EC2 resources.

    For more information about tags, see Tagging Your Resources in the Amazon Elastic Compute Cloud User Guide.

    ", + "DescribeVolumeAttribute": "

    Describes the specified attribute of the specified volume. You can specify only one attribute at a time.

    For more information about EBS volumes, see Amazon EBS Volumes in the Amazon Elastic Compute Cloud User Guide.

    ", + "DescribeVolumeStatus": "

    Describes the status of the specified volumes. Volume status provides the result of the checks performed on your volumes to determine events that can impair the performance of your volumes. The performance of a volume can be affected if an issue occurs on the volume's underlying host. If the volume's underlying host experiences a power outage or system issue, after the system is restored, there could be data inconsistencies on the volume. Volume events notify you if this occurs. Volume actions notify you if any action needs to be taken in response to the event.

    The DescribeVolumeStatus operation provides the following information about the specified volumes:

    Status: Reflects the current status of the volume. The possible values are ok, impaired , warning, or insufficient-data. If all checks pass, the overall status of the volume is ok. If the check fails, the overall status is impaired. If the status is insufficient-data, then the checks may still be taking place on your volume at the time. We recommend that you retry the request. For more information on volume status, see Monitoring the Status of Your Volumes.

    Events: Reflect the cause of a volume status and may require you to take action. For example, if your volume returns an impaired status, then the volume event might be potential-data-inconsistency. This means that your volume has been affected by an issue with the underlying host, has all I/O operations disabled, and may have inconsistent data.

    Actions: Reflect the actions you may have to take in response to an event. For example, if the status of the volume is impaired and the volume event shows potential-data-inconsistency, then the action shows enable-volume-io. This means that you may want to enable the I/O operations for the volume by calling the EnableVolumeIO action and then check the volume for data consistency.

    Volume status is based on the volume status checks, and does not reflect the volume state. Therefore, volume status does not indicate volumes in the error state (for example, when a volume is incapable of accepting I/O.)

    ", + "DescribeVolumes": "

    Describes the specified EBS volumes.

    If you are describing a long list of volumes, you can paginate the output to make the list more manageable. The MaxResults parameter sets the maximum number of results returned in a single page. If the list of results exceeds your MaxResults value, then that number of results is returned along with a NextToken value that can be passed to a subsequent DescribeVolumes request to retrieve the remaining results.

    For more information about EBS volumes, see Amazon EBS Volumes in the Amazon Elastic Compute Cloud User Guide.

    ", + "DescribeVpcAttribute": "

    Describes the specified attribute of the specified VPC. You can specify only one attribute at a time.

    ", + "DescribeVpcClassicLink": "

    Describes the ClassicLink status of one or more VPCs.

    ", + "DescribeVpcClassicLinkDnsSupport": "

    Describes the ClassicLink DNS support status of one or more VPCs. If enabled, the DNS hostname of a linked EC2-Classic instance resolves to its private IP address when addressed from an instance in the VPC to which it's linked. Similarly, the DNS hostname of an instance in a VPC resolves to its private IP address when addressed from a linked EC2-Classic instance. For more information about ClassicLink, see ClassicLink in the Amazon Elastic Compute Cloud User Guide.

    ", + "DescribeVpcEndpointServices": "

    Describes all supported AWS services that can be specified when creating a VPC endpoint.

    ", + "DescribeVpcEndpoints": "

    Describes one or more of your VPC endpoints.

    ", + "DescribeVpcPeeringConnections": "

    Describes one or more of your VPC peering connections.

    ", + "DescribeVpcs": "

    Describes one or more of your VPCs.

    ", + "DescribeVpnConnections": "

    Describes one or more of your VPN connections.

    For more information about VPN connections, see Adding a Hardware Virtual Private Gateway to Your VPC in the Amazon Virtual Private Cloud User Guide.

    ", + "DescribeVpnGateways": "

    Describes one or more of your virtual private gateways.

    For more information about virtual private gateways, see Adding an IPsec Hardware VPN to Your VPC in the Amazon Virtual Private Cloud User Guide.

    ", + "DetachClassicLinkVpc": "

    Unlinks (detaches) a linked EC2-Classic instance from a VPC. After the instance has been unlinked, the VPC security groups are no longer associated with it. An instance is automatically unlinked from a VPC when it's stopped.

    ", + "DetachInternetGateway": "

    Detaches an Internet gateway from a VPC, disabling connectivity between the Internet and the VPC. The VPC must not contain any running instances with Elastic IP addresses.

    ", + "DetachNetworkInterface": "

    Detaches a network interface from an instance.

    ", + "DetachVolume": "

    Detaches an EBS volume from an instance. Make sure to unmount any file systems on the device within your operating system before detaching the volume. Failure to do so results in the volume being stuck in a busy state while detaching.

    If an Amazon EBS volume is the root device of an instance, it can't be detached while the instance is running. To detach the root volume, stop the instance first.

    When a volume with an AWS Marketplace product code is detached from an instance, the product code is no longer associated with the instance.

    For more information, see Detaching an Amazon EBS Volume in the Amazon Elastic Compute Cloud User Guide.

    ", + "DetachVpnGateway": "

    Detaches a virtual private gateway from a VPC. You do this if you're planning to turn off the VPC and not use it anymore. You can confirm a virtual private gateway has been completely detached from a VPC by describing the virtual private gateway (any attachments to the virtual private gateway are also described).

    You must wait for the attachment's state to switch to detached before you can delete the VPC or attach a different VPC to the virtual private gateway.

    ", + "DisableVgwRoutePropagation": "

    Disables a virtual private gateway (VGW) from propagating routes to a specified route table of a VPC.

    ", + "DisableVpcClassicLink": "

    Disables ClassicLink for a VPC. You cannot disable ClassicLink for a VPC that has EC2-Classic instances linked to it.

    ", + "DisableVpcClassicLinkDnsSupport": "

    Disables ClassicLink DNS support for a VPC. If disabled, DNS hostnames resolve to public IP addresses when addressed between a linked EC2-Classic instance and instances in the VPC to which it's linked. For more information about ClassicLink, see ClassicLink in the Amazon Elastic Compute Cloud User Guide.

    ", + "DisassociateAddress": "

    Disassociates an Elastic IP address from the instance or network interface it's associated with.

    An Elastic IP address is for use in either the EC2-Classic platform or in a VPC. For more information, see Elastic IP Addresses in the Amazon Elastic Compute Cloud User Guide.

    This is an idempotent operation. If you perform the operation more than once, Amazon EC2 doesn't return an error.

    ", + "DisassociateRouteTable": "

    Disassociates a subnet from a route table.

    After you perform this action, the subnet no longer uses the routes in the route table. Instead, it uses the routes in the VPC's main route table. For more information about route tables, see Route Tables in the Amazon Virtual Private Cloud User Guide.

    ", + "EnableVgwRoutePropagation": "

    Enables a virtual private gateway (VGW) to propagate routes to the specified route table of a VPC.

    ", + "EnableVolumeIO": "

    Enables I/O operations for a volume that had I/O operations disabled because the data on the volume was potentially inconsistent.

    ", + "EnableVpcClassicLink": "

    Enables a VPC for ClassicLink. You can then link EC2-Classic instances to your ClassicLink-enabled VPC to allow communication over private IP addresses. You cannot enable your VPC for ClassicLink if any of your VPC's route tables have existing routes for address ranges within the 10.0.0.0/8 IP address range, excluding local routes for VPCs in the 10.0.0.0/16 and 10.1.0.0/16 IP address ranges. For more information, see ClassicLink in the Amazon Elastic Compute Cloud User Guide.

    ", + "EnableVpcClassicLinkDnsSupport": "

    Enables a VPC to support DNS hostname resolution for ClassicLink. If enabled, the DNS hostname of a linked EC2-Classic instance resolves to its private IP address when addressed from an instance in the VPC to which it's linked. Similarly, the DNS hostname of an instance in a VPC resolves to its private IP address when addressed from a linked EC2-Classic instance. For more information about ClassicLink, see ClassicLink in the Amazon Elastic Compute Cloud User Guide.

    ", + "GetConsoleOutput": "

    Gets the console output for the specified instance.

    Instances do not have a physical monitor through which you can view their console output. They also lack physical controls that allow you to power up, reboot, or shut them down. To allow these actions, we provide them through the Amazon EC2 API and command line interface.

    Instance console output is buffered and posted shortly after instance boot, reboot, and termination. Amazon EC2 preserves the most recent 64 KB output which is available for at least one hour after the most recent post.

    For Linux instances, the instance console output displays the exact console output that would normally be displayed on a physical monitor attached to a computer. This output is buffered because the instance produces it and then posts it to a store where the instance's owner can retrieve it.

    For Windows instances, the instance console output includes output from the EC2Config service.

    ", + "GetConsoleScreenshot": "

    Retrieve a JPG-format screenshot of a running instance to help with troubleshooting.

    The returned content is Base64-encoded.

    ", + "GetPasswordData": "

    Retrieves the encrypted administrator password for an instance running Windows.

    The Windows password is generated at boot if the EC2Config service plugin, Ec2SetPassword, is enabled. This usually only happens the first time an AMI is launched, and then Ec2SetPassword is automatically disabled. The password is not generated for rebundled AMIs unless Ec2SetPassword is enabled before bundling.

    The password is encrypted using the key pair that you specified when you launched the instance. You must provide the corresponding key pair file.

    Password generation and encryption takes a few moments. We recommend that you wait up to 15 minutes after launching an instance before trying to retrieve the generated password.

    ", + "ImportImage": "

    Import single or multi-volume disk images or EBS snapshots into an Amazon Machine Image (AMI).

    ", + "ImportInstance": "

    Creates an import instance task using metadata from the specified disk image. ImportInstance only supports single-volume VMs. To import multi-volume VMs, use ImportImage. After importing the image, you then upload it using the ec2-import-volume command in the EC2 command line tools. For more information, see Using the Command Line Tools to Import Your Virtual Machine to Amazon EC2 in the Amazon Elastic Compute Cloud User Guide.

    For information about the import manifest referenced by this API action, see VM Import Manifest.

    ", + "ImportKeyPair": "

    Imports the public key from an RSA key pair that you created with a third-party tool. Compare this with CreateKeyPair, in which AWS creates the key pair and gives the keys to you (AWS keeps a copy of the public key). With ImportKeyPair, you create the key pair and give AWS just the public key. The private key is never transferred between you and AWS.

    For more information about key pairs, see Key Pairs in the Amazon Elastic Compute Cloud User Guide.

    ", + "ImportSnapshot": "

    Imports a disk into an EBS snapshot.

    ", + "ImportVolume": "

    Creates an import volume task using metadata from the specified disk image. After importing the image, you then upload it using the ec2-import-volume command in the Amazon EC2 command-line interface (CLI) tools. For more information, see Using the Command Line Tools to Import Your Virtual Machine to Amazon EC2 in the Amazon Elastic Compute Cloud User Guide.

    For information about the import manifest referenced by this API action, see VM Import Manifest.

    ", + "ModifyHosts": "

    Modify the auto-placement setting of a Dedicated host. When auto-placement is enabled, AWS will place instances that you launch with a tenancy of host, but without targeting a specific host ID, onto any available Dedicated host in your account which has auto-placement enabled. When auto-placement is disabled, you need to provide a host ID if you want the instance to launch onto a specific host. If no host ID is provided, the instance will be launched onto a suitable host which has auto-placement enabled.

    ", + "ModifyIdFormat": "

    Modifies the ID format for the specified resource on a per-region basis. You can specify that resources should receive longer IDs (17-character IDs) when they are created. The following resource types support longer IDs: instance | reservation | snapshot | volume.

    This setting applies to the IAM user who makes the request; it does not apply to the entire AWS account. By default, an IAM user defaults to the same settings as the root user. If you're using this action as the root user, then these settings apply to the entire account, unless an IAM user explicitly overrides these settings for themselves. For more information, see Resource IDs in the Amazon Elastic Compute Cloud User Guide.

    Resources created with longer IDs are visible to all IAM roles and users, regardless of these settings and provided that they have permission to use the relevant Describe command for the resource type.

    ", + "ModifyIdentityIdFormat": "

    Modifies the ID format of a resource for the specified IAM user, IAM role, or root user. You can specify that resources should receive longer IDs (17-character IDs) when they are created. The following resource types support longer IDs: instance | reservation | snapshot | volume. For more information, see Resource IDs in the Amazon Elastic Compute Cloud User Guide.

    This setting applies to the principal specified in the request; it does not apply to the principal that makes the request.

    Resources created with longer IDs are visible to all IAM roles and users, regardless of these settings and provided that they have permission to use the relevant Describe command for the resource type.

    ", + "ModifyImageAttribute": "

    Modifies the specified attribute of the specified AMI. You can specify only one attribute at a time.

    AWS Marketplace product codes cannot be modified. Images with an AWS Marketplace product code cannot be made public.

    ", + "ModifyInstanceAttribute": "

    Modifies the specified attribute of the specified instance. You can specify only one attribute at a time.

    To modify some attributes, the instance must be stopped. For more information, see Modifying Attributes of a Stopped Instance in the Amazon Elastic Compute Cloud User Guide.

    ", + "ModifyInstancePlacement": "

    Set the instance affinity value for a specific stopped instance and modify the instance tenancy setting.

    Instance affinity is disabled by default. When instance affinity is host and it is not associated with a specific Dedicated host, the next time it is launched it will automatically be associated with the host it lands on. This relationship will persist if the instance is stopped/started, or rebooted.

    You can modify the host ID associated with a stopped instance. If a stopped instance has a new host ID association, the instance will target that host when restarted.

    You can modify the tenancy of a stopped instance with a tenancy of host or dedicated.

    Affinity, hostID, and tenancy are not required parameters, but at least one of them must be specified in the request. Affinity and tenancy can be modified in the same request, but tenancy can only be modified on instances that are stopped.

    ", + "ModifyNetworkInterfaceAttribute": "

    Modifies the specified network interface attribute. You can specify only one attribute at a time.

    ", + "ModifyReservedInstances": "

    Modifies the Availability Zone, instance count, instance type, or network platform (EC2-Classic or EC2-VPC) of your Reserved Instances. The Reserved Instances to be modified must be identical, except for Availability Zone, network platform, and instance type.

    For more information, see Modifying Reserved Instances in the Amazon Elastic Compute Cloud User Guide.

    ", + "ModifySnapshotAttribute": "

    Adds or removes permission settings for the specified snapshot. You may add or remove specified AWS account IDs from a snapshot's list of create volume permissions, but you cannot do both in a single API call. If you need to both add and remove account IDs for a snapshot, you must use multiple API calls.

    Encrypted snapshots and snapshots with AWS Marketplace product codes cannot be made public. Snapshots encrypted with your default CMK cannot be shared with other accounts.

    For more information on modifying snapshot permissions, see Sharing Snapshots in the Amazon Elastic Compute Cloud User Guide.

    ", + "ModifySpotFleetRequest": "

    Modifies the specified Spot fleet request.

    While the Spot fleet request is being modified, it is in the modifying state.

    To scale up your Spot fleet, increase its target capacity. The Spot fleet launches the additional Spot instances according to the allocation strategy for the Spot fleet request. If the allocation strategy is lowestPrice, the Spot fleet launches instances using the Spot pool with the lowest price. If the allocation strategy is diversified, the Spot fleet distributes the instances across the Spot pools.

    To scale down your Spot fleet, decrease its target capacity. First, the Spot fleet cancels any open bids that exceed the new target capacity. You can request that the Spot fleet terminate Spot instances until the size of the fleet no longer exceeds the new target capacity. If the allocation strategy is lowestPrice, the Spot fleet terminates the instances with the highest price per unit. If the allocation strategy is diversified, the Spot fleet terminates instances across the Spot pools. Alternatively, you can request that the Spot fleet keep the fleet at its current size, but not replace any Spot instances that are interrupted or that you terminate manually.

    ", + "ModifySubnetAttribute": "

    Modifies a subnet attribute.

    ", + "ModifyVolumeAttribute": "

    Modifies a volume attribute.

    By default, all I/O operations for the volume are suspended when the data on the volume is determined to be potentially inconsistent, to prevent undetectable, latent data corruption. The I/O access to the volume can be resumed by first enabling I/O access and then checking the data consistency on your volume.

    You can change the default behavior to resume I/O operations. We recommend that you change this only for boot volumes or for volumes that are stateless or disposable.

    ", + "ModifyVpcAttribute": "

    Modifies the specified attribute of the specified VPC.

    ", + "ModifyVpcEndpoint": "

    Modifies attributes of a specified VPC endpoint. You can modify the policy associated with the endpoint, and you can add and remove route tables associated with the endpoint.

    ", + "ModifyVpcPeeringConnectionOptions": "

    Modifies the VPC peering connection options on one side of a VPC peering connection. You can do the following:

    • Enable/disable communication over the peering connection between an EC2-Classic instance that's linked to your VPC (using ClassicLink) and instances in the peer VPC.

    • Enable/disable communication over the peering connection between instances in your VPC and an EC2-Classic instance that's linked to the peer VPC.

    If the peered VPCs are in different accounts, each owner must initiate a separate request to enable or disable communication in either direction, depending on whether their VPC was the requester or accepter for the VPC peering connection. If the peered VPCs are in the same account, you can modify the requester and accepter options in the same request. To confirm which VPC is the accepter and requester for a VPC peering connection, use the DescribeVpcPeeringConnections command.

    ", + "MonitorInstances": "

    Enables monitoring for a running instance. For more information about monitoring instances, see Monitoring Your Instances and Volumes in the Amazon Elastic Compute Cloud User Guide.

    ", + "MoveAddressToVpc": "

    Moves an Elastic IP address from the EC2-Classic platform to the EC2-VPC platform. The Elastic IP address must be allocated to your account for more than 24 hours, and it must not be associated with an instance. After the Elastic IP address is moved, it is no longer available for use in the EC2-Classic platform, unless you move it back using the RestoreAddressToClassic request. You cannot move an Elastic IP address that was originally allocated for use in the EC2-VPC platform to the EC2-Classic platform.

    ", + "PurchaseReservedInstancesOffering": "

    Purchases a Reserved Instance for use with your account. With Reserved Instances, you obtain a capacity reservation for a certain instance configuration over a specified period of time and pay a lower hourly rate compared to On-Demand instance pricing.

    Use DescribeReservedInstancesOfferings to get a list of Reserved Instance offerings that match your specifications. After you've purchased a Reserved Instance, you can check for your new Reserved Instance with DescribeReservedInstances.

    For more information, see Reserved Instances and Reserved Instance Marketplace in the Amazon Elastic Compute Cloud User Guide.

    ", + "PurchaseScheduledInstances": "

    Purchases one or more Scheduled Instances with the specified schedule.

    Scheduled Instances enable you to purchase Amazon EC2 compute capacity by the hour for a one-year term. Before you can purchase a Scheduled Instance, you must call DescribeScheduledInstanceAvailability to check for available schedules and obtain a purchase token. After you purchase a Scheduled Instance, you must call RunScheduledInstances during each scheduled time period.

    After you purchase a Scheduled Instance, you can't cancel, modify, or resell your purchase.

    ", + "RebootInstances": "

    Requests a reboot of one or more instances. This operation is asynchronous; it only queues a request to reboot the specified instances. The operation succeeds if the instances are valid and belong to you. Requests to reboot terminated instances are ignored.

    If an instance does not cleanly shut down within four minutes, Amazon EC2 performs a hard reboot.

    For more information about troubleshooting, see Getting Console Output and Rebooting Instances in the Amazon Elastic Compute Cloud User Guide.

    ", + "RegisterImage": "

    Registers an AMI. When you're creating an AMI, this is the final step you must complete before you can launch an instance from the AMI. For more information about creating AMIs, see Creating Your Own AMIs in the Amazon Elastic Compute Cloud User Guide.

    For Amazon EBS-backed instances, CreateImage creates and registers the AMI in a single request, so you don't have to register the AMI yourself.

    You can also use RegisterImage to create an Amazon EBS-backed Linux AMI from a snapshot of a root device volume. For more information, see Launching an Instance from a Snapshot in the Amazon Elastic Compute Cloud User Guide.

    Some Linux distributions, such as Red Hat Enterprise Linux (RHEL) and SUSE Linux Enterprise Server (SLES), use the EC2 billingProduct code associated with an AMI to verify subscription status for package updates. Creating an AMI from an EBS snapshot does not maintain this billing code, and subsequent instances launched from such an AMI will not be able to connect to package update infrastructure.

    Similarly, although you can create a Windows AMI from a snapshot, you can't successfully launch an instance from the AMI.

    To create Windows AMIs or to create AMIs for Linux operating systems that must retain AMI billing codes to work properly, see CreateImage.

    If needed, you can deregister an AMI at any time. Any modifications you make to an AMI backed by an instance store volume invalidates its registration. If you make changes to an image, deregister the previous image and register the new image.

    You can't register an image where a secondary (non-root) snapshot has AWS Marketplace product codes.

    ", + "RejectVpcPeeringConnection": "

    Rejects a VPC peering connection request. The VPC peering connection must be in the pending-acceptance state. Use the DescribeVpcPeeringConnections request to view your outstanding VPC peering connection requests. To delete an active VPC peering connection, or to delete a VPC peering connection request that you initiated, use DeleteVpcPeeringConnection.

    ", + "ReleaseAddress": "

    Releases the specified Elastic IP address.

    After releasing an Elastic IP address, it is released to the IP address pool and might be unavailable to you. Be sure to update your DNS records and any servers or devices that communicate with the address. If you attempt to release an Elastic IP address that you already released, you'll get an AuthFailure error if the address is already allocated to another AWS account.

    [EC2-Classic, default VPC] Releasing an Elastic IP address automatically disassociates it from any instance that it's associated with. To disassociate an Elastic IP address without releasing it, use DisassociateAddress.

    [Nondefault VPC] You must use DisassociateAddress to disassociate the Elastic IP address before you try to release it. Otherwise, Amazon EC2 returns an error (InvalidIPAddress.InUse).

    ", + "ReleaseHosts": "

    When you no longer want to use a Dedicated host it can be released. On-Demand billing is stopped and the host goes into released state. The host ID of Dedicated hosts that have been released can no longer be specified in another request, e.g., ModifyHosts. You must stop or terminate all instances on a host before it can be released.

    When Dedicated hosts are released, it make take some time for them to stop counting toward your limit and you may receive capacity errors when trying to allocate new Dedicated hosts. Try waiting a few minutes, and then try again.

    Released hosts will still appear in a DescribeHosts response.

    ", + "ReplaceNetworkAclAssociation": "

    Changes which network ACL a subnet is associated with. By default when you create a subnet, it's automatically associated with the default network ACL. For more information about network ACLs, see Network ACLs in the Amazon Virtual Private Cloud User Guide.

    ", + "ReplaceNetworkAclEntry": "

    Replaces an entry (rule) in a network ACL. For more information about network ACLs, see Network ACLs in the Amazon Virtual Private Cloud User Guide.

    ", + "ReplaceRoute": "

    Replaces an existing route within a route table in a VPC. You must provide only one of the following: Internet gateway or virtual private gateway, NAT instance, NAT gateway, VPC peering connection, or network interface.

    For more information about route tables, see Route Tables in the Amazon Virtual Private Cloud User Guide.

    ", + "ReplaceRouteTableAssociation": "

    Changes the route table associated with a given subnet in a VPC. After the operation completes, the subnet uses the routes in the new route table it's associated with. For more information about route tables, see Route Tables in the Amazon Virtual Private Cloud User Guide.

    You can also use ReplaceRouteTableAssociation to change which table is the main route table in the VPC. You just specify the main route table's association ID and the route table to be the new main route table.

    ", + "ReportInstanceStatus": "

    Submits feedback about the status of an instance. The instance must be in the running state. If your experience with the instance differs from the instance status returned by DescribeInstanceStatus, use ReportInstanceStatus to report your experience with the instance. Amazon EC2 collects this information to improve the accuracy of status checks.

    Use of this action does not change the value returned by DescribeInstanceStatus.

    ", + "RequestSpotFleet": "

    Creates a Spot fleet request.

    You can submit a single request that includes multiple launch specifications that vary by instance type, AMI, Availability Zone, or subnet.

    By default, the Spot fleet requests Spot instances in the Spot pool where the price per unit is the lowest. Each launch specification can include its own instance weighting that reflects the value of the instance type to your application workload.

    Alternatively, you can specify that the Spot fleet distribute the target capacity across the Spot pools included in its launch specifications. By ensuring that the Spot instances in your Spot fleet are in different Spot pools, you can improve the availability of your fleet.

    For more information, see Spot Fleet Requests in the Amazon Elastic Compute Cloud User Guide.

    ", + "RequestSpotInstances": "

    Creates a Spot instance request. Spot instances are instances that Amazon EC2 launches when the bid price that you specify exceeds the current Spot price. Amazon EC2 periodically sets the Spot price based on available Spot Instance capacity and current Spot instance requests. For more information, see Spot Instance Requests in the Amazon Elastic Compute Cloud User Guide.

    ", + "ResetImageAttribute": "

    Resets an attribute of an AMI to its default value.

    The productCodes attribute can't be reset.

    ", + "ResetInstanceAttribute": "

    Resets an attribute of an instance to its default value. To reset the kernel or ramdisk, the instance must be in a stopped state. To reset the sourceDestCheck, the instance can be either running or stopped.

    The sourceDestCheck attribute controls whether source/destination checking is enabled. The default value is true, which means checking is enabled. This value must be false for a NAT instance to perform NAT. For more information, see NAT Instances in the Amazon Virtual Private Cloud User Guide.

    ", + "ResetNetworkInterfaceAttribute": "

    Resets a network interface attribute. You can specify only one attribute at a time.

    ", + "ResetSnapshotAttribute": "

    Resets permission settings for the specified snapshot.

    For more information on modifying snapshot permissions, see Sharing Snapshots in the Amazon Elastic Compute Cloud User Guide.

    ", + "RestoreAddressToClassic": "

    Restores an Elastic IP address that was previously moved to the EC2-VPC platform back to the EC2-Classic platform. You cannot move an Elastic IP address that was originally allocated for use in EC2-VPC. The Elastic IP address must not be associated with an instance or network interface.

    ", + "RevokeSecurityGroupEgress": "

    [EC2-VPC only] Removes one or more egress rules from a security group for EC2-VPC. This action doesn't apply to security groups for use in EC2-Classic. The values that you specify in the revoke request (for example, ports) must match the existing rule's values for the rule to be revoked.

    Each rule consists of the protocol and the CIDR range or source security group. For the TCP and UDP protocols, you must also specify the destination port or range of ports. For the ICMP protocol, you must also specify the ICMP type and code.

    Rule changes are propagated to instances within the security group as quickly as possible. However, a small delay might occur.

    ", + "RevokeSecurityGroupIngress": "

    Removes one or more ingress rules from a security group. The values that you specify in the revoke request (for example, ports) must match the existing rule's values for the rule to be removed.

    Each rule consists of the protocol and the CIDR range or source security group. For the TCP and UDP protocols, you must also specify the destination port or range of ports. For the ICMP protocol, you must also specify the ICMP type and code.

    Rule changes are propagated to instances within the security group as quickly as possible. However, a small delay might occur.

    ", + "RunInstances": "

    Launches the specified number of instances using an AMI for which you have permissions.

    When you launch an instance, it enters the pending state. After the instance is ready for you, it enters the running state. To check the state of your instance, call DescribeInstances.

    To ensure faster instance launches, break up large requests into smaller batches. For example, create five separate launch requests for 100 instances each instead of one launch request for 500 instances.

    To tag your instance, ensure that it is running as CreateTags requires a resource ID. For more information about tagging, see Tagging Your Amazon EC2 Resources.

    If you don't specify a security group when launching an instance, Amazon EC2 uses the default security group. For more information, see Security Groups in the Amazon Elastic Compute Cloud User Guide.

    [EC2-VPC only accounts] If you don't specify a subnet in the request, we choose a default subnet from your default VPC for you.

    [EC2-Classic accounts] If you're launching into EC2-Classic and you don't specify an Availability Zone, we choose one for you.

    Linux instances have access to the public key of the key pair at boot. You can use this key to provide secure access to the instance. Amazon EC2 public images use this feature to provide secure access without passwords. For more information, see Key Pairs in the Amazon Elastic Compute Cloud User Guide.

    You can provide optional user data when launching an instance. For more information, see Instance Metadata in the Amazon Elastic Compute Cloud User Guide.

    If any of the AMIs have a product code attached for which the user has not subscribed, RunInstances fails.

    Some instance types can only be launched into a VPC. If you do not have a default VPC, or if you do not specify a subnet ID in the request, RunInstances fails. For more information, see Instance Types Available Only in a VPC.

    For more information about troubleshooting, see What To Do If An Instance Immediately Terminates, and Troubleshooting Connecting to Your Instance in the Amazon Elastic Compute Cloud User Guide.

    ", + "RunScheduledInstances": "

    Launches the specified Scheduled Instances.

    Before you can launch a Scheduled Instance, you must purchase it and obtain an identifier using PurchaseScheduledInstances.

    You must launch a Scheduled Instance during its scheduled time period. You can't stop or reboot a Scheduled Instance, but you can terminate it as needed. If you terminate a Scheduled Instance before the current scheduled time period ends, you can launch it again after a few minutes. For more information, see Scheduled Instances in the Amazon Elastic Compute Cloud User Guide.

    ", + "StartInstances": "

    Starts an Amazon EBS-backed AMI that you've previously stopped.

    Instances that use Amazon EBS volumes as their root devices can be quickly stopped and started. When an instance is stopped, the compute resources are released and you are not billed for hourly instance usage. However, your root partition Amazon EBS volume remains, continues to persist your data, and you are charged for Amazon EBS volume usage. You can restart your instance at any time. Each time you transition an instance from stopped to started, Amazon EC2 charges a full instance hour, even if transitions happen multiple times within a single hour.

    Before stopping an instance, make sure it is in a state from which it can be restarted. Stopping an instance does not preserve data stored in RAM.

    Performing this operation on an instance that uses an instance store as its root device returns an error.

    For more information, see Stopping Instances in the Amazon Elastic Compute Cloud User Guide.

    ", + "StopInstances": "

    Stops an Amazon EBS-backed instance.

    We don't charge hourly usage for a stopped instance, or data transfer fees; however, your root partition Amazon EBS volume remains, continues to persist your data, and you are charged for Amazon EBS volume usage. Each time you transition an instance from stopped to started, Amazon EC2 charges a full instance hour, even if transitions happen multiple times within a single hour.

    You can't start or stop Spot instances, and you can't stop instance store-backed instances.

    When you stop an instance, we shut it down. You can restart your instance at any time. Before stopping an instance, make sure it is in a state from which it can be restarted. Stopping an instance does not preserve data stored in RAM.

    Stopping an instance is different to rebooting or terminating it. For example, when you stop an instance, the root device and any other devices attached to the instance persist. When you terminate an instance, the root device and any other devices attached during the instance launch are automatically deleted. For more information about the differences between rebooting, stopping, and terminating instances, see Instance Lifecycle in the Amazon Elastic Compute Cloud User Guide.

    When you stop an instance, we attempt to shut it down forcibly after a short while. If your instance appears stuck in the stopping state after a period of time, there may be an issue with the underlying host computer. For more information, see Troubleshooting Stopping Your Instance in the Amazon Elastic Compute Cloud User Guide.

    ", + "TerminateInstances": "

    Shuts down one or more instances. This operation is idempotent; if you terminate an instance more than once, each call succeeds.

    Terminated instances remain visible after termination (for approximately one hour).

    By default, Amazon EC2 deletes all EBS volumes that were attached when the instance launched. Volumes attached after instance launch continue running.

    You can stop, start, and terminate EBS-backed instances. You can only terminate instance store-backed instances. What happens to an instance differs if you stop it or terminate it. For example, when you stop an instance, the root device and any other devices attached to the instance persist. When you terminate an instance, any attached EBS volumes with the DeleteOnTermination block device mapping parameter set to true are automatically deleted. For more information about the differences between stopping and terminating instances, see Instance Lifecycle in the Amazon Elastic Compute Cloud User Guide.

    For more information about troubleshooting, see Troubleshooting Terminating Your Instance in the Amazon Elastic Compute Cloud User Guide.

    ", + "UnassignPrivateIpAddresses": "

    Unassigns one or more secondary private IP addresses from a network interface.

    ", + "UnmonitorInstances": "

    Disables monitoring for a running instance. For more information about monitoring instances, see Monitoring Your Instances and Volumes in the Amazon Elastic Compute Cloud User Guide.

    " + }, + "shapes": { + "AcceptVpcPeeringConnectionRequest": { + "base": "

    Contains the parameters for AcceptVpcPeeringConnection.

    ", + "refs": { + } + }, + "AcceptVpcPeeringConnectionResult": { + "base": "

    Contains the output of AcceptVpcPeeringConnection.

    ", + "refs": { + } + }, + "AccountAttribute": { + "base": "

    Describes an account attribute.

    ", + "refs": { + "AccountAttributeList$member": null + } + }, + "AccountAttributeList": { + "base": null, + "refs": { + "DescribeAccountAttributesResult$AccountAttributes": "

    Information about one or more account attributes.

    " + } + }, + "AccountAttributeName": { + "base": null, + "refs": { + "AccountAttributeNameStringList$member": null + } + }, + "AccountAttributeNameStringList": { + "base": null, + "refs": { + "DescribeAccountAttributesRequest$AttributeNames": "

    One or more account attribute names.

    " + } + }, + "AccountAttributeValue": { + "base": "

    Describes a value of an account attribute.

    ", + "refs": { + "AccountAttributeValueList$member": null + } + }, + "AccountAttributeValueList": { + "base": null, + "refs": { + "AccountAttribute$AttributeValues": "

    One or more values for the account attribute.

    " + } + }, + "ActiveInstance": { + "base": "

    Describes a running instance in a Spot fleet.

    ", + "refs": { + "ActiveInstanceSet$member": null + } + }, + "ActiveInstanceSet": { + "base": null, + "refs": { + "DescribeSpotFleetInstancesResponse$ActiveInstances": "

    The running instances. Note that this list is refreshed periodically and might be out of date.

    " + } + }, + "Address": { + "base": "

    Describes an Elastic IP address.

    ", + "refs": { + "AddressList$member": null + } + }, + "AddressList": { + "base": null, + "refs": { + "DescribeAddressesResult$Addresses": "

    Information about one or more Elastic IP addresses.

    " + } + }, + "Affinity": { + "base": null, + "refs": { + "ModifyInstancePlacementRequest$Affinity": "

    The new affinity setting for the instance.

    " + } + }, + "AllocateAddressRequest": { + "base": "

    Contains the parameters for AllocateAddress.

    ", + "refs": { + } + }, + "AllocateAddressResult": { + "base": "

    Contains the output of AllocateAddress.

    ", + "refs": { + } + }, + "AllocateHostsRequest": { + "base": "

    Contains the parameters for AllocateHosts.

    ", + "refs": { + } + }, + "AllocateHostsResult": { + "base": "

    Contains the output of AllocateHosts.

    ", + "refs": { + } + }, + "AllocationIdList": { + "base": null, + "refs": { + "DescribeAddressesRequest$AllocationIds": "

    [EC2-VPC] One or more allocation IDs.

    Default: Describes all your Elastic IP addresses.

    " + } + }, + "AllocationState": { + "base": null, + "refs": { + "Host$State": "

    The Dedicated host's state.

    " + } + }, + "AllocationStrategy": { + "base": null, + "refs": { + "SpotFleetRequestConfigData$AllocationStrategy": "

    Indicates how to allocate the target capacity across the Spot pools specified by the Spot fleet request. The default is lowestPrice.

    " + } + }, + "ArchitectureValues": { + "base": null, + "refs": { + "Image$Architecture": "

    The architecture of the image.

    ", + "ImportInstanceLaunchSpecification$Architecture": "

    The architecture of the instance.

    ", + "Instance$Architecture": "

    The architecture of the image.

    ", + "RegisterImageRequest$Architecture": "

    The architecture of the AMI.

    Default: For Amazon EBS-backed AMIs, i386. For instance store-backed AMIs, the architecture specified in the manifest file.

    " + } + }, + "AssignPrivateIpAddressesRequest": { + "base": "

    Contains the parameters for AssignPrivateIpAddresses.

    ", + "refs": { + } + }, + "AssociateAddressRequest": { + "base": "

    Contains the parameters for AssociateAddress.

    ", + "refs": { + } + }, + "AssociateAddressResult": { + "base": "

    Contains the output of AssociateAddress.

    ", + "refs": { + } + }, + "AssociateDhcpOptionsRequest": { + "base": "

    Contains the parameters for AssociateDhcpOptions.

    ", + "refs": { + } + }, + "AssociateRouteTableRequest": { + "base": "

    Contains the parameters for AssociateRouteTable.

    ", + "refs": { + } + }, + "AssociateRouteTableResult": { + "base": "

    Contains the output of AssociateRouteTable.

    ", + "refs": { + } + }, + "AttachClassicLinkVpcRequest": { + "base": "

    Contains the parameters for AttachClassicLinkVpc.

    ", + "refs": { + } + }, + "AttachClassicLinkVpcResult": { + "base": "

    Contains the output of AttachClassicLinkVpc.

    ", + "refs": { + } + }, + "AttachInternetGatewayRequest": { + "base": "

    Contains the parameters for AttachInternetGateway.

    ", + "refs": { + } + }, + "AttachNetworkInterfaceRequest": { + "base": "

    Contains the parameters for AttachNetworkInterface.

    ", + "refs": { + } + }, + "AttachNetworkInterfaceResult": { + "base": "

    Contains the output of AttachNetworkInterface.

    ", + "refs": { + } + }, + "AttachVolumeRequest": { + "base": "

    Contains the parameters for AttachVolume.

    ", + "refs": { + } + }, + "AttachVpnGatewayRequest": { + "base": "

    Contains the parameters for AttachVpnGateway.

    ", + "refs": { + } + }, + "AttachVpnGatewayResult": { + "base": "

    Contains the output of AttachVpnGateway.

    ", + "refs": { + } + }, + "AttachmentStatus": { + "base": null, + "refs": { + "EbsInstanceBlockDevice$Status": "

    The attachment state.

    ", + "InstanceNetworkInterfaceAttachment$Status": "

    The attachment state.

    ", + "InternetGatewayAttachment$State": "

    The current state of the attachment.

    ", + "NetworkInterfaceAttachment$Status": "

    The attachment state.

    ", + "VpcAttachment$State": "

    The current state of the attachment.

    " + } + }, + "AttributeBooleanValue": { + "base": "

    Describes a value for a resource attribute that is a Boolean value.

    ", + "refs": { + "DescribeNetworkInterfaceAttributeResult$SourceDestCheck": "

    Indicates whether source/destination checking is enabled.

    ", + "DescribeVolumeAttributeResult$AutoEnableIO": "

    The state of autoEnableIO attribute.

    ", + "DescribeVpcAttributeResult$EnableDnsSupport": "

    Indicates whether DNS resolution is enabled for the VPC. If this attribute is true, the Amazon DNS server resolves DNS hostnames for your instances to their corresponding IP addresses; otherwise, it does not.

    ", + "DescribeVpcAttributeResult$EnableDnsHostnames": "

    Indicates whether the instances launched in the VPC get DNS hostnames. If this attribute is true, instances in the VPC get DNS hostnames; otherwise, they do not.

    ", + "InstanceAttribute$DisableApiTermination": "

    If the value is true, you can't terminate the instance through the Amazon EC2 console, CLI, or API; otherwise, you can.

    ", + "InstanceAttribute$EbsOptimized": "

    Indicates whether the instance is optimized for EBS I/O.

    ", + "InstanceAttribute$EnaSupport": "

    Indicates whether enhanced networking with ENA is enabled.

    ", + "InstanceAttribute$SourceDestCheck": "

    Indicates whether source/destination checking is enabled. A value of true means checking is enabled, and false means checking is disabled. This value must be false for a NAT instance to perform NAT.

    ", + "ModifyInstanceAttributeRequest$SourceDestCheck": "

    Specifies whether source/destination checking is enabled. A value of true means that checking is enabled, and false means checking is disabled. This value must be false for a NAT instance to perform NAT.

    ", + "ModifyInstanceAttributeRequest$DisableApiTermination": "

    If the value is true, you can't terminate the instance using the Amazon EC2 console, CLI, or API; otherwise, you can. You cannot use this paramater for Spot Instances.

    ", + "ModifyInstanceAttributeRequest$EbsOptimized": "

    Specifies whether the instance is optimized for EBS I/O. This optimization provides dedicated throughput to Amazon EBS and an optimized configuration stack to provide optimal EBS I/O performance. This optimization isn't available with all instance types. Additional usage charges apply when using an EBS Optimized instance.

    ", + "ModifyInstanceAttributeRequest$EnaSupport": "

    Set to true to enable enhanced networking with ENA for the instance.

    This option is supported only for HVM instances. Specifying this option with a PV instance can make it unreachable.

    ", + "ModifyNetworkInterfaceAttributeRequest$SourceDestCheck": "

    Indicates whether source/destination checking is enabled. A value of true means checking is enabled, and false means checking is disabled. This value must be false for a NAT instance to perform NAT. For more information, see NAT Instances in the Amazon Virtual Private Cloud User Guide.

    ", + "ModifySubnetAttributeRequest$MapPublicIpOnLaunch": "

    Specify true to indicate that instances launched into the specified subnet should be assigned public IP address.

    ", + "ModifyVolumeAttributeRequest$AutoEnableIO": "

    Indicates whether the volume should be auto-enabled for I/O operations.

    ", + "ModifyVpcAttributeRequest$EnableDnsSupport": "

    Indicates whether the DNS resolution is supported for the VPC. If enabled, queries to the Amazon provided DNS server at the 169.254.169.253 IP address, or the reserved IP address at the base of the VPC network range \"plus two\" will succeed. If disabled, the Amazon provided DNS service in the VPC that resolves public DNS hostnames to IP addresses is not enabled.

    You cannot modify the DNS resolution and DNS hostnames attributes in the same request. Use separate requests for each attribute.

    ", + "ModifyVpcAttributeRequest$EnableDnsHostnames": "

    Indicates whether the instances launched in the VPC get DNS hostnames. If enabled, instances in the VPC get DNS hostnames; otherwise, they do not.

    You cannot modify the DNS resolution and DNS hostnames attributes in the same request. Use separate requests for each attribute. You can only enable DNS hostnames if you've enabled DNS support.

    " + } + }, + "AttributeValue": { + "base": "

    Describes a value for a resource attribute that is a String.

    ", + "refs": { + "DescribeNetworkInterfaceAttributeResult$Description": "

    The description of the network interface.

    ", + "DhcpConfigurationValueList$member": null, + "ImageAttribute$KernelId": "

    The kernel ID.

    ", + "ImageAttribute$RamdiskId": "

    The RAM disk ID.

    ", + "ImageAttribute$Description": "

    A description for the AMI.

    ", + "ImageAttribute$SriovNetSupport": "

    Indicates whether enhanced networking with the Intel 82599 Virtual Function interface is enabled.

    ", + "InstanceAttribute$InstanceType": "

    The instance type.

    ", + "InstanceAttribute$KernelId": "

    The kernel ID.

    ", + "InstanceAttribute$RamdiskId": "

    The RAM disk ID.

    ", + "InstanceAttribute$UserData": "

    The user data.

    ", + "InstanceAttribute$InstanceInitiatedShutdownBehavior": "

    Indicates whether an instance stops or terminates when you initiate shutdown from the instance (using the operating system command for system shutdown).

    ", + "InstanceAttribute$RootDeviceName": "

    The name of the root device (for example, /dev/sda1 or /dev/xvda).

    ", + "InstanceAttribute$SriovNetSupport": "

    Indicates whether enhanced networking with the Intel 82599 Virtual Function interface is enabled.

    ", + "ModifyImageAttributeRequest$Description": "

    A description for the AMI.

    ", + "ModifyInstanceAttributeRequest$InstanceType": "

    Changes the instance type to the specified value. For more information, see Instance Types. If the instance type is not valid, the error returned is InvalidInstanceAttributeValue.

    ", + "ModifyInstanceAttributeRequest$Kernel": "

    Changes the instance's kernel to the specified value. We recommend that you use PV-GRUB instead of kernels and RAM disks. For more information, see PV-GRUB.

    ", + "ModifyInstanceAttributeRequest$Ramdisk": "

    Changes the instance's RAM disk to the specified value. We recommend that you use PV-GRUB instead of kernels and RAM disks. For more information, see PV-GRUB.

    ", + "ModifyInstanceAttributeRequest$InstanceInitiatedShutdownBehavior": "

    Specifies whether an instance stops or terminates when you initiate shutdown from the instance (using the operating system command for system shutdown).

    ", + "ModifyInstanceAttributeRequest$SriovNetSupport": "

    Set to simple to enable enhanced networking with the Intel 82599 Virtual Function interface for the instance.

    There is no way to disable enhanced networking with the Intel 82599 Virtual Function interface at this time.

    This option is supported only for HVM instances. Specifying this option with a PV instance can make it unreachable.

    ", + "ModifyNetworkInterfaceAttributeRequest$Description": "

    A description for the network interface.

    " + } + }, + "AuthorizeSecurityGroupEgressRequest": { + "base": "

    Contains the parameters for AuthorizeSecurityGroupEgress.

    ", + "refs": { + } + }, + "AuthorizeSecurityGroupIngressRequest": { + "base": "

    Contains the parameters for AuthorizeSecurityGroupIngress.

    ", + "refs": { + } + }, + "AutoPlacement": { + "base": null, + "refs": { + "AllocateHostsRequest$AutoPlacement": "

    This is enabled by default. This property allows instances to be automatically placed onto available Dedicated hosts, when you are launching instances without specifying a host ID.

    Default: Enabled

    ", + "Host$AutoPlacement": "

    Whether auto-placement is on or off.

    ", + "ModifyHostsRequest$AutoPlacement": "

    Specify whether to enable or disable auto-placement.

    " + } + }, + "AvailabilityZone": { + "base": "

    Describes an Availability Zone.

    ", + "refs": { + "AvailabilityZoneList$member": null + } + }, + "AvailabilityZoneList": { + "base": null, + "refs": { + "DescribeAvailabilityZonesResult$AvailabilityZones": "

    Information about one or more Availability Zones.

    " + } + }, + "AvailabilityZoneMessage": { + "base": "

    Describes a message about an Availability Zone.

    ", + "refs": { + "AvailabilityZoneMessageList$member": null + } + }, + "AvailabilityZoneMessageList": { + "base": null, + "refs": { + "AvailabilityZone$Messages": "

    Any messages about the Availability Zone.

    " + } + }, + "AvailabilityZoneState": { + "base": null, + "refs": { + "AvailabilityZone$State": "

    The state of the Availability Zone.

    " + } + }, + "AvailableCapacity": { + "base": "

    The capacity information for instances launched onto the Dedicated host.

    ", + "refs": { + "Host$AvailableCapacity": "

    The number of new instances that can be launched onto the Dedicated host.

    " + } + }, + "AvailableInstanceCapacityList": { + "base": null, + "refs": { + "AvailableCapacity$AvailableInstanceCapacity": "

    The total number of instances that the Dedicated host supports.

    " + } + }, + "BatchState": { + "base": null, + "refs": { + "CancelSpotFleetRequestsSuccessItem$CurrentSpotFleetRequestState": "

    The current state of the Spot fleet request.

    ", + "CancelSpotFleetRequestsSuccessItem$PreviousSpotFleetRequestState": "

    The previous state of the Spot fleet request.

    ", + "SpotFleetRequestConfig$SpotFleetRequestState": "

    The state of the Spot fleet request.

    " + } + }, + "Blob": { + "base": null, + "refs": { + "BlobAttributeValue$Value": null, + "ImportKeyPairRequest$PublicKeyMaterial": "

    The public key. For API calls, the text must be base64-encoded. For command line tools, base64 encoding is performed for you.

    ", + "S3Storage$UploadPolicy": "

    An Amazon S3 upload policy that gives Amazon EC2 permission to upload items into Amazon S3 on your behalf.

    " + } + }, + "BlobAttributeValue": { + "base": null, + "refs": { + "ModifyInstanceAttributeRequest$UserData": "

    Changes the instance's user data to the specified value. If you are using an AWS SDK or command line tool, Base64-encoding is performed for you, and you can load the text from a file. Otherwise, you must provide Base64-encoded text.

    " + } + }, + "BlockDeviceMapping": { + "base": "

    Describes a block device mapping.

    ", + "refs": { + "BlockDeviceMappingList$member": null, + "BlockDeviceMappingRequestList$member": null + } + }, + "BlockDeviceMappingList": { + "base": null, + "refs": { + "Image$BlockDeviceMappings": "

    Any block device mapping entries.

    ", + "ImageAttribute$BlockDeviceMappings": "

    One or more block device mapping entries.

    ", + "LaunchSpecification$BlockDeviceMappings": "

    One or more block device mapping entries.

    Although you can specify encrypted EBS volumes in this block device mapping for your Spot Instances, these volumes are not encrypted.

    ", + "RequestSpotLaunchSpecification$BlockDeviceMappings": "

    One or more block device mapping entries.

    Although you can specify encrypted EBS volumes in this block device mapping for your Spot Instances, these volumes are not encrypted.

    ", + "SpotFleetLaunchSpecification$BlockDeviceMappings": "

    One or more block device mapping entries.

    " + } + }, + "BlockDeviceMappingRequestList": { + "base": null, + "refs": { + "CreateImageRequest$BlockDeviceMappings": "

    Information about one or more block device mappings.

    ", + "RegisterImageRequest$BlockDeviceMappings": "

    One or more block device mapping entries.

    ", + "RunInstancesRequest$BlockDeviceMappings": "

    The block device mapping.

    " + } + }, + "Boolean": { + "base": null, + "refs": { + "AcceptVpcPeeringConnectionRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "AllocateAddressRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "AssignPrivateIpAddressesRequest$AllowReassignment": "

    Indicates whether to allow an IP address that is already assigned to another network interface or instance to be reassigned to the specified network interface.

    ", + "AssociateAddressRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "AssociateAddressRequest$AllowReassociation": "

    [EC2-VPC] For a VPC in an EC2-Classic account, specify true to allow an Elastic IP address that is already associated with an instance or network interface to be reassociated with the specified instance or network interface. Otherwise, the operation fails. In a VPC in an EC2-VPC-only account, reassociation is automatic, therefore you can specify false to ensure the operation fails if the Elastic IP address is already associated with another resource.

    ", + "AssociateDhcpOptionsRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "AssociateRouteTableRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "AttachClassicLinkVpcRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "AttachClassicLinkVpcResult$Return": "

    Returns true if the request succeeds; otherwise, it returns an error.

    ", + "AttachInternetGatewayRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "AttachNetworkInterfaceRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "AttachVolumeRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "AttachVpnGatewayRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "AttributeBooleanValue$Value": "

    The attribute value. The valid values are true or false.

    ", + "AuthorizeSecurityGroupEgressRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "AuthorizeSecurityGroupIngressRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "BundleInstanceRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "CancelBundleTaskRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "CancelConversionRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "CancelImportTaskRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "CancelSpotFleetRequestsRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "CancelSpotFleetRequestsRequest$TerminateInstances": "

    Indicates whether to terminate instances for a Spot fleet request if it is canceled successfully.

    ", + "CancelSpotInstanceRequestsRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "ClassicLinkDnsSupport$ClassicLinkDnsSupported": "

    Indicates whether ClassicLink DNS support is enabled for the VPC.

    ", + "ConfirmProductInstanceRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "ConfirmProductInstanceResult$Return": "

    The return value of the request. Returns true if the specified product code is owned by the requester and associated with the specified instance.

    ", + "CopyImageRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "CopyImageRequest$Encrypted": "

    Specifies whether the destination snapshots of the copied image should be encrypted. The default CMK for EBS is used unless a non-default AWS Key Management Service (AWS KMS) CMK is specified with KmsKeyId. For more information, see Amazon EBS Encryption in the Amazon Elastic Compute Cloud User Guide.

    ", + "CopySnapshotRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "CopySnapshotRequest$Encrypted": "

    Specifies whether the destination snapshot should be encrypted. You can encrypt a copy of an unencrypted snapshot using this flag, but you cannot use it to create an unencrypted copy from an encrypted snapshot. Your default CMK for EBS is used unless a non-default AWS Key Management Service (AWS KMS) CMK is specified with KmsKeyId. For more information, see Amazon EBS Encryption in the Amazon Elastic Compute Cloud User Guide.

    ", + "CreateCustomerGatewayRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "CreateDhcpOptionsRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "CreateImageRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "CreateImageRequest$NoReboot": "

    By default, Amazon EC2 attempts to shut down and reboot the instance before creating the image. If the 'No Reboot' option is set, Amazon EC2 doesn't shut down the instance before creating the image. When this option is used, file system integrity on the created image can't be guaranteed.

    ", + "CreateInternetGatewayRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "CreateKeyPairRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "CreateNetworkAclEntryRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "CreateNetworkAclEntryRequest$Egress": "

    Indicates whether this is an egress rule (rule is applied to traffic leaving the subnet).

    ", + "CreateNetworkAclRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "CreateNetworkInterfaceRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "CreatePlacementGroupRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "CreateRouteRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "CreateRouteResult$Return": "

    Returns true if the request succeeds; otherwise, it returns an error.

    ", + "CreateRouteTableRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "CreateSecurityGroupRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "CreateSnapshotRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "CreateSpotDatafeedSubscriptionRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "CreateSubnetRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "CreateTagsRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "CreateVolumeRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "CreateVolumeRequest$Encrypted": "

    Specifies whether the volume should be encrypted. Encrypted Amazon EBS volumes may only be attached to instances that support Amazon EBS encryption. Volumes that are created from encrypted snapshots are automatically encrypted. There is no way to create an encrypted volume from an unencrypted snapshot or vice versa. If your AMI uses encrypted volumes, you can only launch it on supported instance types. For more information, see Amazon EBS Encryption in the Amazon Elastic Compute Cloud User Guide.

    ", + "CreateVpcEndpointRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "CreateVpcPeeringConnectionRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "CreateVpcRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "CreateVpnConnectionRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "CreateVpnGatewayRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DeleteCustomerGatewayRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DeleteDhcpOptionsRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DeleteInternetGatewayRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DeleteKeyPairRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DeleteNetworkAclEntryRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DeleteNetworkAclEntryRequest$Egress": "

    Indicates whether the rule is an egress rule.

    ", + "DeleteNetworkAclRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DeleteNetworkInterfaceRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DeletePlacementGroupRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DeleteRouteRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DeleteRouteTableRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DeleteSecurityGroupRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DeleteSnapshotRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DeleteSpotDatafeedSubscriptionRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DeleteSubnetRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DeleteTagsRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DeleteVolumeRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DeleteVpcEndpointsRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DeleteVpcPeeringConnectionRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DeleteVpcPeeringConnectionResult$Return": "

    Returns true if the request succeeds; otherwise, it returns an error.

    ", + "DeleteVpcRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DeleteVpnConnectionRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DeleteVpnGatewayRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DeregisterImageRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeAccountAttributesRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeAddressesRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeAvailabilityZonesRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeBundleTasksRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeClassicLinkInstancesRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeConversionTasksRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeCustomerGatewaysRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeDhcpOptionsRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeImageAttributeRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeImagesRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeImportImageTasksRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeImportSnapshotTasksRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeInstanceAttributeRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeInstanceStatusRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeInstanceStatusRequest$IncludeAllInstances": "

    When true, includes the health status for all instances. When false, includes the health status for running instances only.

    Default: false

    ", + "DescribeInstancesRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeInternetGatewaysRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeKeyPairsRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeMovingAddressesRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeNetworkAclsRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeNetworkInterfaceAttributeRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeNetworkInterfacesRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribePlacementGroupsRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribePrefixListsRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeRegionsRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeReservedInstancesOfferingsRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeReservedInstancesOfferingsRequest$IncludeMarketplace": "

    Include Reserved Instance Marketplace offerings in the response.

    ", + "DescribeReservedInstancesRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeRouteTablesRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeScheduledInstanceAvailabilityRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeScheduledInstancesRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeSecurityGroupReferencesRequest$DryRun": "

    Checks whether you have the required permissions for the operation, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeSecurityGroupsRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeSnapshotAttributeRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeSnapshotsRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeSpotDatafeedSubscriptionRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeSpotFleetInstancesRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeSpotFleetRequestHistoryRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeSpotFleetRequestsRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeSpotInstanceRequestsRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeSpotPriceHistoryRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeStaleSecurityGroupsRequest$DryRun": "

    Checks whether you have the required permissions for the operation, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeSubnetsRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeTagsRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeVolumeAttributeRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeVolumeStatusRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeVolumesRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeVpcAttributeRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeVpcClassicLinkRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeVpcEndpointServicesRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeVpcEndpointsRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeVpcPeeringConnectionsRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeVpcsRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeVpnConnectionsRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeVpnGatewaysRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DetachClassicLinkVpcRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DetachClassicLinkVpcResult$Return": "

    Returns true if the request succeeds; otherwise, it returns an error.

    ", + "DetachInternetGatewayRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DetachNetworkInterfaceRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DetachNetworkInterfaceRequest$Force": "

    Specifies whether to force a detachment.

    ", + "DetachVolumeRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DetachVolumeRequest$Force": "

    Forces detachment if the previous detachment attempt did not occur cleanly (for example, logging into an instance, unmounting the volume, and detaching normally). This option can lead to data loss or a corrupted file system. Use this option only as a last resort to detach a volume from a failed instance. The instance won't have an opportunity to flush file system caches or file system metadata. If you use this option, you must perform file system check and repair procedures.

    ", + "DetachVpnGatewayRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DisableVpcClassicLinkDnsSupportResult$Return": "

    Returns true if the request succeeds; otherwise, it returns an error.

    ", + "DisableVpcClassicLinkRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DisableVpcClassicLinkResult$Return": "

    Returns true if the request succeeds; otherwise, it returns an error.

    ", + "DisassociateAddressRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DisassociateRouteTableRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "EbsBlockDevice$DeleteOnTermination": "

    Indicates whether the EBS volume is deleted on instance termination.

    ", + "EbsBlockDevice$Encrypted": "

    Indicates whether the EBS volume is encrypted. Encrypted Amazon EBS volumes may only be attached to instances that support Amazon EBS encryption.

    ", + "EbsInstanceBlockDevice$DeleteOnTermination": "

    Indicates whether the volume is deleted on instance termination.

    ", + "EbsInstanceBlockDeviceSpecification$DeleteOnTermination": "

    Indicates whether the volume is deleted on instance termination.

    ", + "EnableVolumeIORequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "EnableVpcClassicLinkDnsSupportResult$Return": "

    Returns true if the request succeeds; otherwise, it returns an error.

    ", + "EnableVpcClassicLinkRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "EnableVpcClassicLinkResult$Return": "

    Returns true if the request succeeds; otherwise, it returns an error.

    ", + "GetConsoleOutputRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "GetConsoleScreenshotRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "GetConsoleScreenshotRequest$WakeUp": "

    When set to true, acts as keystroke input and wakes up an instance that's in standby or \"sleep\" mode.

    ", + "GetPasswordDataRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "IdFormat$UseLongIds": "

    Indicates whether longer IDs (17-character IDs) are enabled for the resource.

    ", + "Image$Public": "

    Indicates whether the image has public launch permissions. The value is true if this image has public launch permissions or false if it has only implicit and explicit launch permissions.

    ", + "Image$EnaSupport": "

    Specifies whether enhanced networking with ENA is enabled.

    ", + "ImportImageRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "ImportInstanceLaunchSpecification$Monitoring": "

    Indicates whether monitoring is enabled.

    ", + "ImportInstanceRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "ImportKeyPairRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "ImportSnapshotRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "ImportVolumeRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "Instance$SourceDestCheck": "

    Specifies whether to enable an instance launched in a VPC to perform NAT. This controls whether source/destination checking is enabled on the instance. A value of true means checking is enabled, and false means checking is disabled. The value must be false for the instance to perform NAT. For more information, see NAT Instances in the Amazon Virtual Private Cloud User Guide.

    ", + "Instance$EbsOptimized": "

    Indicates whether the instance is optimized for EBS I/O. This optimization provides dedicated throughput to Amazon EBS and an optimized configuration stack to provide optimal I/O performance. This optimization isn't available with all instance types. Additional usage charges apply when using an EBS Optimized instance.

    ", + "Instance$EnaSupport": "

    Specifies whether enhanced networking with ENA is enabled.

    ", + "InstanceNetworkInterface$SourceDestCheck": "

    Indicates whether to validate network traffic to or from this network interface.

    ", + "InstanceNetworkInterfaceAttachment$DeleteOnTermination": "

    Indicates whether the network interface is deleted when the instance is terminated.

    ", + "InstanceNetworkInterfaceSpecification$DeleteOnTermination": "

    If set to true, the interface is deleted when the instance is terminated. You can specify true only if creating a new network interface when launching an instance.

    ", + "InstanceNetworkInterfaceSpecification$AssociatePublicIpAddress": "

    Indicates whether to assign a public IP address to an instance you launch in a VPC. The public IP address can only be assigned to a network interface for eth0, and can only be assigned to a new network interface, not an existing one. You cannot specify more than one network interface in the request. If launching into a default subnet, the default value is true.

    ", + "InstancePrivateIpAddress$Primary": "

    Indicates whether this IP address is the primary private IP address of the network interface.

    ", + "LaunchSpecification$EbsOptimized": "

    Indicates whether the instance is optimized for EBS I/O. This optimization provides dedicated throughput to Amazon EBS and an optimized configuration stack to provide optimal EBS I/O performance. This optimization isn't available with all instance types. Additional usage charges apply when using an EBS Optimized instance.

    Default: false

    ", + "ModifyIdFormatRequest$UseLongIds": "

    Indicate whether the resource should use longer IDs (17-character IDs).

    ", + "ModifyIdentityIdFormatRequest$UseLongIds": "

    Indicates whether the resource should use longer IDs (17-character IDs)

    ", + "ModifyImageAttributeRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "ModifyInstanceAttributeRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "ModifyInstancePlacementResult$Return": "

    Is true if the request succeeds, and an error otherwise.

    ", + "ModifyNetworkInterfaceAttributeRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "ModifySnapshotAttributeRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "ModifySpotFleetRequestResponse$Return": "

    Is true if the request succeeds, and an error otherwise.

    ", + "ModifyVolumeAttributeRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "ModifyVpcEndpointRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "ModifyVpcEndpointRequest$ResetPolicy": "

    Specify true to reset the policy document to the default policy. The default policy allows access to the service.

    ", + "ModifyVpcEndpointResult$Return": "

    Returns true if the request succeeds; otherwise, it returns an error.

    ", + "ModifyVpcPeeringConnectionOptionsRequest$DryRun": "

    Checks whether you have the required permissions for the operation, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "MonitorInstancesRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "MoveAddressToVpcRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "NetworkAcl$IsDefault": "

    Indicates whether this is the default network ACL for the VPC.

    ", + "NetworkAclEntry$Egress": "

    Indicates whether the rule is an egress rule (applied to traffic leaving the subnet).

    ", + "NetworkInterface$RequesterManaged": "

    Indicates whether the network interface is being managed by AWS.

    ", + "NetworkInterface$SourceDestCheck": "

    Indicates whether traffic to or from the instance is validated.

    ", + "NetworkInterfaceAttachment$DeleteOnTermination": "

    Indicates whether the network interface is deleted when the instance is terminated.

    ", + "NetworkInterfaceAttachmentChanges$DeleteOnTermination": "

    Indicates whether the network interface is deleted when the instance is terminated.

    ", + "NetworkInterfacePrivateIpAddress$Primary": "

    Indicates whether this IP address is the primary private IP address of the network interface.

    ", + "PeeringConnectionOptions$AllowEgressFromLocalClassicLinkToRemoteVpc": "

    If true, enables outbound communication from an EC2-Classic instance that's linked to a local VPC via ClassicLink to instances in a peer VPC.

    ", + "PeeringConnectionOptions$AllowEgressFromLocalVpcToRemoteClassicLink": "

    If true, enables outbound communication from instances in a local VPC to an EC2-Classic instance that's linked to a peer VPC via ClassicLink.

    ", + "PeeringConnectionOptionsRequest$AllowEgressFromLocalClassicLinkToRemoteVpc": "

    If true, enables outbound communication from an EC2-Classic instance that's linked to a local VPC via ClassicLink to instances in a peer VPC.

    ", + "PeeringConnectionOptionsRequest$AllowEgressFromLocalVpcToRemoteClassicLink": "

    If true, enables outbound communication from instances in a local VPC to an EC2-Classic instance that's linked to a peer VPC via ClassicLink.

    ", + "PriceSchedule$Active": "

    The current price schedule, as determined by the term remaining for the Reserved Instance in the listing.

    A specific price schedule is always in effect, but only one price schedule can be active at any time. Take, for example, a Reserved Instance listing that has five months remaining in its term. When you specify price schedules for five months and two months, this means that schedule 1, covering the first three months of the remaining term, will be active during months 5, 4, and 3. Then schedule 2, covering the last two months of the term, will be active for months 2 and 1.

    ", + "PrivateIpAddressSpecification$Primary": "

    Indicates whether the private IP address is the primary private IP address. Only one IP address can be designated as primary.

    ", + "PurchaseReservedInstancesOfferingRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "PurchaseScheduledInstancesRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "RebootInstancesRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "RegisterImageRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "RegisterImageRequest$EnaSupport": "

    Set to true to enable enhanced networking with ENA for the AMI and any instances that you launch from the AMI.

    This option is supported only for HVM AMIs. Specifying this option with a PV AMI can make instances launched from the AMI unreachable.

    ", + "RejectVpcPeeringConnectionRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "RejectVpcPeeringConnectionResult$Return": "

    Returns true if the request succeeds; otherwise, it returns an error.

    ", + "ReleaseAddressRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "ReplaceNetworkAclAssociationRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "ReplaceNetworkAclEntryRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "ReplaceNetworkAclEntryRequest$Egress": "

    Indicates whether to replace the egress rule.

    Default: If no value is specified, we replace the ingress rule.

    ", + "ReplaceRouteRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "ReplaceRouteTableAssociationRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "ReportInstanceStatusRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "RequestSpotFleetRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "RequestSpotInstancesRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "RequestSpotLaunchSpecification$EbsOptimized": "

    Indicates whether the instance is optimized for EBS I/O. This optimization provides dedicated throughput to Amazon EBS and an optimized configuration stack to provide optimal EBS I/O performance. This optimization isn't available with all instance types. Additional usage charges apply when using an EBS Optimized instance.

    Default: false

    ", + "ReservedInstancesOffering$Marketplace": "

    Indicates whether the offering is available through the Reserved Instance Marketplace (resale) or AWS. If it's a Reserved Instance Marketplace offering, this is true.

    ", + "ResetImageAttributeRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "ResetInstanceAttributeRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "ResetNetworkInterfaceAttributeRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "ResetSnapshotAttributeRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "RestoreAddressToClassicRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "RevokeSecurityGroupEgressRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "RevokeSecurityGroupIngressRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "RouteTableAssociation$Main": "

    Indicates whether this is the main route table.

    ", + "RunInstancesMonitoringEnabled$Enabled": "

    Indicates whether monitoring is enabled for the instance.

    ", + "RunInstancesRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "RunInstancesRequest$DisableApiTermination": "

    If you set this parameter to true, you can't terminate the instance using the Amazon EC2 console, CLI, or API; otherwise, you can. If you set this parameter to true and then later want to be able to terminate the instance, you must first change the value of the disableApiTermination attribute to false using ModifyInstanceAttribute. Alternatively, if you set InstanceInitiatedShutdownBehavior to terminate, you can terminate the instance by running the shutdown command from the instance.

    Default: false

    ", + "RunInstancesRequest$EbsOptimized": "

    Indicates whether the instance is optimized for EBS I/O. This optimization provides dedicated throughput to Amazon EBS and an optimized configuration stack to provide optimal EBS I/O performance. This optimization isn't available with all instance types. Additional usage charges apply when using an EBS-optimized instance.

    Default: false

    ", + "RunScheduledInstancesRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "ScheduledInstanceRecurrence$OccurrenceRelativeToEnd": "

    Indicates whether the occurrence is relative to the end of the specified week or month.

    ", + "ScheduledInstanceRecurrenceRequest$OccurrenceRelativeToEnd": "

    Indicates whether the occurrence is relative to the end of the specified week or month. You can't specify this value with a daily schedule.

    ", + "ScheduledInstancesEbs$DeleteOnTermination": "

    Indicates whether the volume is deleted on instance termination.

    ", + "ScheduledInstancesEbs$Encrypted": "

    Indicates whether the volume is encrypted. You can attached encrypted volumes only to instances that support them.

    ", + "ScheduledInstancesLaunchSpecification$EbsOptimized": "

    Indicates whether the instances are optimized for EBS I/O. This optimization provides dedicated throughput to Amazon EBS and an optimized configuration stack to provide optimal EBS I/O performance. This optimization isn't available with all instance types. Additional usage charges apply when using an EBS-optimized instance.

    Default: false

    ", + "ScheduledInstancesMonitoring$Enabled": "

    Indicates whether monitoring is enabled.

    ", + "ScheduledInstancesNetworkInterface$AssociatePublicIpAddress": "

    Indicates whether to assign a public IP address to instances launched in a VPC. The public IP address can only be assigned to a network interface for eth0, and can only be assigned to a new network interface, not an existing one. You cannot specify more than one network interface in the request. If launching into a default subnet, the default value is true.

    ", + "ScheduledInstancesNetworkInterface$DeleteOnTermination": "

    Indicates whether to delete the interface when the instance is terminated.

    ", + "ScheduledInstancesPrivateIpAddressConfig$Primary": "

    Indicates whether this is a primary IP address. Otherwise, this is a secondary IP address.

    ", + "Snapshot$Encrypted": "

    Indicates whether the snapshot is encrypted.

    ", + "SpotFleetLaunchSpecification$EbsOptimized": "

    Indicates whether the instances are optimized for EBS I/O. This optimization provides dedicated throughput to Amazon EBS and an optimized configuration stack to provide optimal EBS I/O performance. This optimization isn't available with all instance types. Additional usage charges apply when using an EBS Optimized instance.

    Default: false

    ", + "SpotFleetMonitoring$Enabled": "

    Enables monitoring for the instance.

    Default: false

    ", + "SpotFleetRequestConfigData$TerminateInstancesWithExpiration": "

    Indicates whether running Spot instances should be terminated when the Spot fleet request expires.

    ", + "StartInstancesRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "StopInstancesRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "StopInstancesRequest$Force": "

    Forces the instances to stop. The instances do not have an opportunity to flush file system caches or file system metadata. If you use this option, you must perform file system check and repair procedures. This option is not recommended for Windows instances.

    Default: false

    ", + "Subnet$DefaultForAz": "

    Indicates whether this is the default subnet for the Availability Zone.

    ", + "Subnet$MapPublicIpOnLaunch": "

    Indicates whether instances launched in this subnet receive a public IP address.

    ", + "TerminateInstancesRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "UnmonitorInstancesRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "Volume$Encrypted": "

    Indicates whether the volume will be encrypted.

    ", + "VolumeAttachment$DeleteOnTermination": "

    Indicates whether the EBS volume is deleted on instance termination.

    ", + "Vpc$IsDefault": "

    Indicates whether the VPC is the default VPC.

    ", + "VpcClassicLink$ClassicLinkEnabled": "

    Indicates whether the VPC is enabled for ClassicLink.

    ", + "VpcPeeringConnectionOptionsDescription$AllowEgressFromLocalClassicLinkToRemoteVpc": "

    Indicates whether a local ClassicLink connection can communicate with the peer VPC over the VPC peering connection.

    ", + "VpcPeeringConnectionOptionsDescription$AllowEgressFromLocalVpcToRemoteClassicLink": "

    Indicates whether a local VPC can communicate with a ClassicLink connection in the peer VPC over the VPC peering connection.

    ", + "VpnConnectionOptions$StaticRoutesOnly": "

    Indicates whether the VPN connection uses static routes only. Static routes must be used for devices that don't support BGP.

    ", + "VpnConnectionOptionsSpecification$StaticRoutesOnly": "

    Indicates whether the VPN connection uses static routes only. Static routes must be used for devices that don't support BGP.

    " + } + }, + "BundleIdStringList": { + "base": null, + "refs": { + "DescribeBundleTasksRequest$BundleIds": "

    One or more bundle task IDs.

    Default: Describes all your bundle tasks.

    " + } + }, + "BundleInstanceRequest": { + "base": "

    Contains the parameters for BundleInstance.

    ", + "refs": { + } + }, + "BundleInstanceResult": { + "base": "

    Contains the output of BundleInstance.

    ", + "refs": { + } + }, + "BundleTask": { + "base": "

    Describes a bundle task.

    ", + "refs": { + "BundleInstanceResult$BundleTask": "

    Information about the bundle task.

    ", + "BundleTaskList$member": null, + "CancelBundleTaskResult$BundleTask": "

    Information about the bundle task.

    " + } + }, + "BundleTaskError": { + "base": "

    Describes an error for BundleInstance.

    ", + "refs": { + "BundleTask$BundleTaskError": "

    If the task fails, a description of the error.

    " + } + }, + "BundleTaskList": { + "base": null, + "refs": { + "DescribeBundleTasksResult$BundleTasks": "

    Information about one or more bundle tasks.

    " + } + }, + "BundleTaskState": { + "base": null, + "refs": { + "BundleTask$State": "

    The state of the task.

    " + } + }, + "CancelBatchErrorCode": { + "base": null, + "refs": { + "CancelSpotFleetRequestsError$Code": "

    The error code.

    " + } + }, + "CancelBundleTaskRequest": { + "base": "

    Contains the parameters for CancelBundleTask.

    ", + "refs": { + } + }, + "CancelBundleTaskResult": { + "base": "

    Contains the output of CancelBundleTask.

    ", + "refs": { + } + }, + "CancelConversionRequest": { + "base": "

    Contains the parameters for CancelConversionTask.

    ", + "refs": { + } + }, + "CancelExportTaskRequest": { + "base": "

    Contains the parameters for CancelExportTask.

    ", + "refs": { + } + }, + "CancelImportTaskRequest": { + "base": "

    Contains the parameters for CancelImportTask.

    ", + "refs": { + } + }, + "CancelImportTaskResult": { + "base": "

    Contains the output for CancelImportTask.

    ", + "refs": { + } + }, + "CancelReservedInstancesListingRequest": { + "base": "

    Contains the parameters for CancelReservedInstancesListing.

    ", + "refs": { + } + }, + "CancelReservedInstancesListingResult": { + "base": "

    Contains the output of CancelReservedInstancesListing.

    ", + "refs": { + } + }, + "CancelSpotFleetRequestsError": { + "base": "

    Describes a Spot fleet error.

    ", + "refs": { + "CancelSpotFleetRequestsErrorItem$Error": "

    The error.

    " + } + }, + "CancelSpotFleetRequestsErrorItem": { + "base": "

    Describes a Spot fleet request that was not successfully canceled.

    ", + "refs": { + "CancelSpotFleetRequestsErrorSet$member": null + } + }, + "CancelSpotFleetRequestsErrorSet": { + "base": null, + "refs": { + "CancelSpotFleetRequestsResponse$UnsuccessfulFleetRequests": "

    Information about the Spot fleet requests that are not successfully canceled.

    " + } + }, + "CancelSpotFleetRequestsRequest": { + "base": "

    Contains the parameters for CancelSpotFleetRequests.

    ", + "refs": { + } + }, + "CancelSpotFleetRequestsResponse": { + "base": "

    Contains the output of CancelSpotFleetRequests.

    ", + "refs": { + } + }, + "CancelSpotFleetRequestsSuccessItem": { + "base": "

    Describes a Spot fleet request that was successfully canceled.

    ", + "refs": { + "CancelSpotFleetRequestsSuccessSet$member": null + } + }, + "CancelSpotFleetRequestsSuccessSet": { + "base": null, + "refs": { + "CancelSpotFleetRequestsResponse$SuccessfulFleetRequests": "

    Information about the Spot fleet requests that are successfully canceled.

    " + } + }, + "CancelSpotInstanceRequestState": { + "base": null, + "refs": { + "CancelledSpotInstanceRequest$State": "

    The state of the Spot instance request.

    " + } + }, + "CancelSpotInstanceRequestsRequest": { + "base": "

    Contains the parameters for CancelSpotInstanceRequests.

    ", + "refs": { + } + }, + "CancelSpotInstanceRequestsResult": { + "base": "

    Contains the output of CancelSpotInstanceRequests.

    ", + "refs": { + } + }, + "CancelledSpotInstanceRequest": { + "base": "

    Describes a request to cancel a Spot instance.

    ", + "refs": { + "CancelledSpotInstanceRequestList$member": null + } + }, + "CancelledSpotInstanceRequestList": { + "base": null, + "refs": { + "CancelSpotInstanceRequestsResult$CancelledSpotInstanceRequests": "

    One or more Spot instance requests.

    " + } + }, + "ClassicLinkDnsSupport": { + "base": "

    Describes the ClassicLink DNS support status of a VPC.

    ", + "refs": { + "ClassicLinkDnsSupportList$member": null + } + }, + "ClassicLinkDnsSupportList": { + "base": null, + "refs": { + "DescribeVpcClassicLinkDnsSupportResult$Vpcs": "

    Information about the ClassicLink DNS support status of the VPCs.

    " + } + }, + "ClassicLinkInstance": { + "base": "

    Describes a linked EC2-Classic instance.

    ", + "refs": { + "ClassicLinkInstanceList$member": null + } + }, + "ClassicLinkInstanceList": { + "base": null, + "refs": { + "DescribeClassicLinkInstancesResult$Instances": "

    Information about one or more linked EC2-Classic instances.

    " + } + }, + "ClientData": { + "base": "

    Describes the client-specific data.

    ", + "refs": { + "ImportImageRequest$ClientData": "

    The client-specific data.

    ", + "ImportSnapshotRequest$ClientData": "

    The client-specific data.

    " + } + }, + "ConfirmProductInstanceRequest": { + "base": "

    Contains the parameters for ConfirmProductInstance.

    ", + "refs": { + } + }, + "ConfirmProductInstanceResult": { + "base": "

    Contains the output of ConfirmProductInstance.

    ", + "refs": { + } + }, + "ContainerFormat": { + "base": null, + "refs": { + "ExportToS3Task$ContainerFormat": "

    The container format used to combine disk images with metadata (such as OVF). If absent, only the disk image is exported.

    ", + "ExportToS3TaskSpecification$ContainerFormat": "

    The container format used to combine disk images with metadata (such as OVF). If absent, only the disk image is exported.

    " + } + }, + "ConversionIdStringList": { + "base": null, + "refs": { + "DescribeConversionTasksRequest$ConversionTaskIds": "

    One or more conversion task IDs.

    " + } + }, + "ConversionTask": { + "base": "

    Describes a conversion task.

    ", + "refs": { + "DescribeConversionTaskList$member": null, + "ImportInstanceResult$ConversionTask": "

    Information about the conversion task.

    ", + "ImportVolumeResult$ConversionTask": "

    Information about the conversion task.

    " + } + }, + "ConversionTaskState": { + "base": null, + "refs": { + "ConversionTask$State": "

    The state of the conversion task.

    " + } + }, + "CopyImageRequest": { + "base": "

    Contains the parameters for CopyImage.

    ", + "refs": { + } + }, + "CopyImageResult": { + "base": "

    Contains the output of CopyImage.

    ", + "refs": { + } + }, + "CopySnapshotRequest": { + "base": "

    Contains the parameters for CopySnapshot.

    ", + "refs": { + } + }, + "CopySnapshotResult": { + "base": "

    Contains the output of CopySnapshot.

    ", + "refs": { + } + }, + "CreateCustomerGatewayRequest": { + "base": "

    Contains the parameters for CreateCustomerGateway.

    ", + "refs": { + } + }, + "CreateCustomerGatewayResult": { + "base": "

    Contains the output of CreateCustomerGateway.

    ", + "refs": { + } + }, + "CreateDhcpOptionsRequest": { + "base": "

    Contains the parameters for CreateDhcpOptions.

    ", + "refs": { + } + }, + "CreateDhcpOptionsResult": { + "base": "

    Contains the output of CreateDhcpOptions.

    ", + "refs": { + } + }, + "CreateFlowLogsRequest": { + "base": "

    Contains the parameters for CreateFlowLogs.

    ", + "refs": { + } + }, + "CreateFlowLogsResult": { + "base": "

    Contains the output of CreateFlowLogs.

    ", + "refs": { + } + }, + "CreateImageRequest": { + "base": "

    Contains the parameters for CreateImage.

    ", + "refs": { + } + }, + "CreateImageResult": { + "base": "

    Contains the output of CreateImage.

    ", + "refs": { + } + }, + "CreateInstanceExportTaskRequest": { + "base": "

    Contains the parameters for CreateInstanceExportTask.

    ", + "refs": { + } + }, + "CreateInstanceExportTaskResult": { + "base": "

    Contains the output for CreateInstanceExportTask.

    ", + "refs": { + } + }, + "CreateInternetGatewayRequest": { + "base": "

    Contains the parameters for CreateInternetGateway.

    ", + "refs": { + } + }, + "CreateInternetGatewayResult": { + "base": "

    Contains the output of CreateInternetGateway.

    ", + "refs": { + } + }, + "CreateKeyPairRequest": { + "base": "

    Contains the parameters for CreateKeyPair.

    ", + "refs": { + } + }, + "CreateNatGatewayRequest": { + "base": "

    Contains the parameters for CreateNatGateway.

    ", + "refs": { + } + }, + "CreateNatGatewayResult": { + "base": "

    Contains the output of CreateNatGateway.

    ", + "refs": { + } + }, + "CreateNetworkAclEntryRequest": { + "base": "

    Contains the parameters for CreateNetworkAclEntry.

    ", + "refs": { + } + }, + "CreateNetworkAclRequest": { + "base": "

    Contains the parameters for CreateNetworkAcl.

    ", + "refs": { + } + }, + "CreateNetworkAclResult": { + "base": "

    Contains the output of CreateNetworkAcl.

    ", + "refs": { + } + }, + "CreateNetworkInterfaceRequest": { + "base": "

    Contains the parameters for CreateNetworkInterface.

    ", + "refs": { + } + }, + "CreateNetworkInterfaceResult": { + "base": "

    Contains the output of CreateNetworkInterface.

    ", + "refs": { + } + }, + "CreatePlacementGroupRequest": { + "base": "

    Contains the parameters for CreatePlacementGroup.

    ", + "refs": { + } + }, + "CreateReservedInstancesListingRequest": { + "base": "

    Contains the parameters for CreateReservedInstancesListing.

    ", + "refs": { + } + }, + "CreateReservedInstancesListingResult": { + "base": "

    Contains the output of CreateReservedInstancesListing.

    ", + "refs": { + } + }, + "CreateRouteRequest": { + "base": "

    Contains the parameters for CreateRoute.

    ", + "refs": { + } + }, + "CreateRouteResult": { + "base": "

    Contains the output of CreateRoute.

    ", + "refs": { + } + }, + "CreateRouteTableRequest": { + "base": "

    Contains the parameters for CreateRouteTable.

    ", + "refs": { + } + }, + "CreateRouteTableResult": { + "base": "

    Contains the output of CreateRouteTable.

    ", + "refs": { + } + }, + "CreateSecurityGroupRequest": { + "base": "

    Contains the parameters for CreateSecurityGroup.

    ", + "refs": { + } + }, + "CreateSecurityGroupResult": { + "base": "

    Contains the output of CreateSecurityGroup.

    ", + "refs": { + } + }, + "CreateSnapshotRequest": { + "base": "

    Contains the parameters for CreateSnapshot.

    ", + "refs": { + } + }, + "CreateSpotDatafeedSubscriptionRequest": { + "base": "

    Contains the parameters for CreateSpotDatafeedSubscription.

    ", + "refs": { + } + }, + "CreateSpotDatafeedSubscriptionResult": { + "base": "

    Contains the output of CreateSpotDatafeedSubscription.

    ", + "refs": { + } + }, + "CreateSubnetRequest": { + "base": "

    Contains the parameters for CreateSubnet.

    ", + "refs": { + } + }, + "CreateSubnetResult": { + "base": "

    Contains the output of CreateSubnet.

    ", + "refs": { + } + }, + "CreateTagsRequest": { + "base": "

    Contains the parameters for CreateTags.

    ", + "refs": { + } + }, + "CreateVolumePermission": { + "base": "

    Describes the user or group to be added or removed from the permissions for a volume.

    ", + "refs": { + "CreateVolumePermissionList$member": null + } + }, + "CreateVolumePermissionList": { + "base": null, + "refs": { + "CreateVolumePermissionModifications$Add": "

    Adds a specific AWS account ID or group to a volume's list of create volume permissions.

    ", + "CreateVolumePermissionModifications$Remove": "

    Removes a specific AWS account ID or group from a volume's list of create volume permissions.

    ", + "DescribeSnapshotAttributeResult$CreateVolumePermissions": "

    A list of permissions for creating volumes from the snapshot.

    " + } + }, + "CreateVolumePermissionModifications": { + "base": "

    Describes modifications to the permissions for a volume.

    ", + "refs": { + "ModifySnapshotAttributeRequest$CreateVolumePermission": "

    A JSON representation of the snapshot attribute modification.

    " + } + }, + "CreateVolumeRequest": { + "base": "

    Contains the parameters for CreateVolume.

    ", + "refs": { + } + }, + "CreateVpcEndpointRequest": { + "base": "

    Contains the parameters for CreateVpcEndpoint.

    ", + "refs": { + } + }, + "CreateVpcEndpointResult": { + "base": "

    Contains the output of CreateVpcEndpoint.

    ", + "refs": { + } + }, + "CreateVpcPeeringConnectionRequest": { + "base": "

    Contains the parameters for CreateVpcPeeringConnection.

    ", + "refs": { + } + }, + "CreateVpcPeeringConnectionResult": { + "base": "

    Contains the output of CreateVpcPeeringConnection.

    ", + "refs": { + } + }, + "CreateVpcRequest": { + "base": "

    Contains the parameters for CreateVpc.

    ", + "refs": { + } + }, + "CreateVpcResult": { + "base": "

    Contains the output of CreateVpc.

    ", + "refs": { + } + }, + "CreateVpnConnectionRequest": { + "base": "

    Contains the parameters for CreateVpnConnection.

    ", + "refs": { + } + }, + "CreateVpnConnectionResult": { + "base": "

    Contains the output of CreateVpnConnection.

    ", + "refs": { + } + }, + "CreateVpnConnectionRouteRequest": { + "base": "

    Contains the parameters for CreateVpnConnectionRoute.

    ", + "refs": { + } + }, + "CreateVpnGatewayRequest": { + "base": "

    Contains the parameters for CreateVpnGateway.

    ", + "refs": { + } + }, + "CreateVpnGatewayResult": { + "base": "

    Contains the output of CreateVpnGateway.

    ", + "refs": { + } + }, + "CurrencyCodeValues": { + "base": null, + "refs": { + "PriceSchedule$CurrencyCode": "

    The currency for transacting the Reserved Instance resale. At this time, the only supported currency is USD.

    ", + "PriceScheduleSpecification$CurrencyCode": "

    The currency for transacting the Reserved Instance resale. At this time, the only supported currency is USD.

    ", + "ReservedInstanceLimitPrice$CurrencyCode": "

    The currency in which the limitPrice amount is specified. At this time, the only supported currency is USD.

    ", + "ReservedInstances$CurrencyCode": "

    The currency of the Reserved Instance. It's specified using ISO 4217 standard currency codes. At this time, the only supported currency is USD.

    ", + "ReservedInstancesOffering$CurrencyCode": "

    The currency of the Reserved Instance offering you are purchasing. It's specified using ISO 4217 standard currency codes. At this time, the only supported currency is USD.

    " + } + }, + "CustomerGateway": { + "base": "

    Describes a customer gateway.

    ", + "refs": { + "CreateCustomerGatewayResult$CustomerGateway": "

    Information about the customer gateway.

    ", + "CustomerGatewayList$member": null + } + }, + "CustomerGatewayIdStringList": { + "base": null, + "refs": { + "DescribeCustomerGatewaysRequest$CustomerGatewayIds": "

    One or more customer gateway IDs.

    Default: Describes all your customer gateways.

    " + } + }, + "CustomerGatewayList": { + "base": null, + "refs": { + "DescribeCustomerGatewaysResult$CustomerGateways": "

    Information about one or more customer gateways.

    " + } + }, + "DatafeedSubscriptionState": { + "base": null, + "refs": { + "SpotDatafeedSubscription$State": "

    The state of the Spot instance data feed subscription.

    " + } + }, + "DateTime": { + "base": null, + "refs": { + "BundleTask$StartTime": "

    The time this task started.

    ", + "BundleTask$UpdateTime": "

    The time of the most recent update for the task.

    ", + "ClientData$UploadStart": "

    The time that the disk upload starts.

    ", + "ClientData$UploadEnd": "

    The time that the disk upload ends.

    ", + "DescribeSpotFleetRequestHistoryRequest$StartTime": "

    The starting date and time for the events, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ).

    ", + "DescribeSpotFleetRequestHistoryResponse$StartTime": "

    The starting date and time for the events, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ).

    ", + "DescribeSpotFleetRequestHistoryResponse$LastEvaluatedTime": "

    The last date and time for the events, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ). All records up to this time were retrieved.

    If nextToken indicates that there are more results, this value is not present.

    ", + "DescribeSpotPriceHistoryRequest$StartTime": "

    The date and time, up to the past 90 days, from which to start retrieving the price history data, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ).

    ", + "DescribeSpotPriceHistoryRequest$EndTime": "

    The date and time, up to the current date, from which to stop retrieving the price history data, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ).

    ", + "EbsInstanceBlockDevice$AttachTime": "

    The time stamp when the attachment initiated.

    ", + "FlowLog$CreationTime": "

    The date and time the flow log was created.

    ", + "GetConsoleOutputResult$Timestamp": "

    The time the output was last updated.

    ", + "GetPasswordDataResult$Timestamp": "

    The time the data was last updated.

    ", + "HistoryRecord$Timestamp": "

    The date and time of the event, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ).

    ", + "IdFormat$Deadline": "

    The date in UTC at which you are permanently switched over to using longer IDs. If a deadline is not yet available for this resource type, this field is not returned.

    ", + "Instance$LaunchTime": "

    The time the instance was launched.

    ", + "InstanceNetworkInterfaceAttachment$AttachTime": "

    The time stamp when the attachment initiated.

    ", + "InstanceStatusDetails$ImpairedSince": "

    The time when a status check failed. For an instance that was launched and impaired, this is the time when the instance was launched.

    ", + "InstanceStatusEvent$NotBefore": "

    The earliest scheduled start time for the event.

    ", + "InstanceStatusEvent$NotAfter": "

    The latest scheduled end time for the event.

    ", + "NatGateway$CreateTime": "

    The date and time the NAT gateway was created.

    ", + "NatGateway$DeleteTime": "

    The date and time the NAT gateway was deleted, if applicable.

    ", + "NetworkInterfaceAttachment$AttachTime": "

    The timestamp indicating when the attachment initiated.

    ", + "ProvisionedBandwidth$RequestTime": "

    Reserved. If you need to sustain traffic greater than the documented limits, contact us through the Support Center.

    ", + "ProvisionedBandwidth$ProvisionTime": "

    Reserved. If you need to sustain traffic greater than the documented limits, contact us through the Support Center.

    ", + "ReportInstanceStatusRequest$StartTime": "

    The time at which the reported instance health state began.

    ", + "ReportInstanceStatusRequest$EndTime": "

    The time at which the reported instance health state ended.

    ", + "RequestSpotInstancesRequest$ValidFrom": "

    The start date of the request. If this is a one-time request, the request becomes active at this date and time and remains active until all instances launch, the request expires, or the request is canceled. If the request is persistent, the request becomes active at this date and time and remains active until it expires or is canceled.

    Default: The request is effective indefinitely.

    ", + "RequestSpotInstancesRequest$ValidUntil": "

    The end date of the request. If this is a one-time request, the request remains active until all instances launch, the request is canceled, or this date is reached. If the request is persistent, it remains active until it is canceled or this date and time is reached.

    Default: The request is effective indefinitely.

    ", + "ReservedInstances$Start": "

    The date and time the Reserved Instance started.

    ", + "ReservedInstances$End": "

    The time when the Reserved Instance expires.

    ", + "ReservedInstancesListing$CreateDate": "

    The time the listing was created.

    ", + "ReservedInstancesListing$UpdateDate": "

    The last modified timestamp of the listing.

    ", + "ReservedInstancesModification$CreateDate": "

    The time when the modification request was created.

    ", + "ReservedInstancesModification$UpdateDate": "

    The time when the modification request was last updated.

    ", + "ReservedInstancesModification$EffectiveDate": "

    The time for the modification to become effective.

    ", + "ScheduledInstance$PreviousSlotEndTime": "

    The time that the previous schedule ended or will end.

    ", + "ScheduledInstance$NextSlotStartTime": "

    The time for the next schedule to start.

    ", + "ScheduledInstance$TermStartDate": "

    The start date for the Scheduled Instance.

    ", + "ScheduledInstance$TermEndDate": "

    The end date for the Scheduled Instance.

    ", + "ScheduledInstance$CreateDate": "

    The date when the Scheduled Instance was purchased.

    ", + "ScheduledInstanceAvailability$FirstSlotStartTime": "

    The time period for the first schedule to start.

    ", + "SlotDateTimeRangeRequest$EarliestTime": "

    The earliest date and time, in UTC, for the Scheduled Instance to start.

    ", + "SlotDateTimeRangeRequest$LatestTime": "

    The latest date and time, in UTC, for the Scheduled Instance to start. This value must be later than or equal to the earliest date and at most three months in the future.

    ", + "SlotStartTimeRangeRequest$EarliestTime": "

    The earliest date and time, in UTC, for the Scheduled Instance to start.

    ", + "SlotStartTimeRangeRequest$LatestTime": "

    The latest date and time, in UTC, for the Scheduled Instance to start.

    ", + "Snapshot$StartTime": "

    The time stamp when the snapshot was initiated.

    ", + "SpotFleetRequestConfig$CreateTime": "

    The creation date and time of the request.

    ", + "SpotFleetRequestConfigData$ValidFrom": "

    The start date and time of the request, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ). The default is to start fulfilling the request immediately.

    ", + "SpotFleetRequestConfigData$ValidUntil": "

    The end date and time of the request, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ). At this point, no new Spot instance requests are placed or enabled to fulfill the request.

    ", + "SpotInstanceRequest$ValidFrom": "

    The start date of the request, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ). The request becomes active at this date and time.

    ", + "SpotInstanceRequest$ValidUntil": "

    The end date of the request, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ). If this is a one-time request, it remains active until all instances launch, the request is canceled, or this date is reached. If the request is persistent, it remains active until it is canceled or this date is reached.

    ", + "SpotInstanceRequest$CreateTime": "

    The date and time when the Spot instance request was created, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ).

    ", + "SpotInstanceStatus$UpdateTime": "

    The date and time of the most recent status update, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ).

    ", + "SpotPrice$Timestamp": "

    The date and time the request was created, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ).

    ", + "VgwTelemetry$LastStatusChange": "

    The date and time of the last change in status.

    ", + "Volume$CreateTime": "

    The time stamp when volume creation was initiated.

    ", + "VolumeAttachment$AttachTime": "

    The time stamp when the attachment initiated.

    ", + "VolumeStatusEvent$NotBefore": "

    The earliest start time of the event.

    ", + "VolumeStatusEvent$NotAfter": "

    The latest end time of the event.

    ", + "VpcEndpoint$CreationTimestamp": "

    The date and time the VPC endpoint was created.

    ", + "VpcPeeringConnection$ExpirationTime": "

    The time that an unaccepted VPC peering connection will expire.

    " + } + }, + "DeleteCustomerGatewayRequest": { + "base": "

    Contains the parameters for DeleteCustomerGateway.

    ", + "refs": { + } + }, + "DeleteDhcpOptionsRequest": { + "base": "

    Contains the parameters for DeleteDhcpOptions.

    ", + "refs": { + } + }, + "DeleteFlowLogsRequest": { + "base": "

    Contains the parameters for DeleteFlowLogs.

    ", + "refs": { + } + }, + "DeleteFlowLogsResult": { + "base": "

    Contains the output of DeleteFlowLogs.

    ", + "refs": { + } + }, + "DeleteInternetGatewayRequest": { + "base": "

    Contains the parameters for DeleteInternetGateway.

    ", + "refs": { + } + }, + "DeleteKeyPairRequest": { + "base": "

    Contains the parameters for DeleteKeyPair.

    ", + "refs": { + } + }, + "DeleteNatGatewayRequest": { + "base": "

    Contains the parameters for DeleteNatGateway.

    ", + "refs": { + } + }, + "DeleteNatGatewayResult": { + "base": "

    Contains the output of DeleteNatGateway.

    ", + "refs": { + } + }, + "DeleteNetworkAclEntryRequest": { + "base": "

    Contains the parameters for DeleteNetworkAclEntry.

    ", + "refs": { + } + }, + "DeleteNetworkAclRequest": { + "base": "

    Contains the parameters for DeleteNetworkAcl.

    ", + "refs": { + } + }, + "DeleteNetworkInterfaceRequest": { + "base": "

    Contains the parameters for DeleteNetworkInterface.

    ", + "refs": { + } + }, + "DeletePlacementGroupRequest": { + "base": "

    Contains the parameters for DeletePlacementGroup.

    ", + "refs": { + } + }, + "DeleteRouteRequest": { + "base": "

    Contains the parameters for DeleteRoute.

    ", + "refs": { + } + }, + "DeleteRouteTableRequest": { + "base": "

    Contains the parameters for DeleteRouteTable.

    ", + "refs": { + } + }, + "DeleteSecurityGroupRequest": { + "base": "

    Contains the parameters for DeleteSecurityGroup.

    ", + "refs": { + } + }, + "DeleteSnapshotRequest": { + "base": "

    Contains the parameters for DeleteSnapshot.

    ", + "refs": { + } + }, + "DeleteSpotDatafeedSubscriptionRequest": { + "base": "

    Contains the parameters for DeleteSpotDatafeedSubscription.

    ", + "refs": { + } + }, + "DeleteSubnetRequest": { + "base": "

    Contains the parameters for DeleteSubnet.

    ", + "refs": { + } + }, + "DeleteTagsRequest": { + "base": "

    Contains the parameters for DeleteTags.

    ", + "refs": { + } + }, + "DeleteVolumeRequest": { + "base": "

    Contains the parameters for DeleteVolume.

    ", + "refs": { + } + }, + "DeleteVpcEndpointsRequest": { + "base": "

    Contains the parameters for DeleteVpcEndpoints.

    ", + "refs": { + } + }, + "DeleteVpcEndpointsResult": { + "base": "

    Contains the output of DeleteVpcEndpoints.

    ", + "refs": { + } + }, + "DeleteVpcPeeringConnectionRequest": { + "base": "

    Contains the parameters for DeleteVpcPeeringConnection.

    ", + "refs": { + } + }, + "DeleteVpcPeeringConnectionResult": { + "base": "

    Contains the output of DeleteVpcPeeringConnection.

    ", + "refs": { + } + }, + "DeleteVpcRequest": { + "base": "

    Contains the parameters for DeleteVpc.

    ", + "refs": { + } + }, + "DeleteVpnConnectionRequest": { + "base": "

    Contains the parameters for DeleteVpnConnection.

    ", + "refs": { + } + }, + "DeleteVpnConnectionRouteRequest": { + "base": "

    Contains the parameters for DeleteVpnConnectionRoute.

    ", + "refs": { + } + }, + "DeleteVpnGatewayRequest": { + "base": "

    Contains the parameters for DeleteVpnGateway.

    ", + "refs": { + } + }, + "DeregisterImageRequest": { + "base": "

    Contains the parameters for DeregisterImage.

    ", + "refs": { + } + }, + "DescribeAccountAttributesRequest": { + "base": "

    Contains the parameters for DescribeAccountAttributes.

    ", + "refs": { + } + }, + "DescribeAccountAttributesResult": { + "base": "

    Contains the output of DescribeAccountAttributes.

    ", + "refs": { + } + }, + "DescribeAddressesRequest": { + "base": "

    Contains the parameters for DescribeAddresses.

    ", + "refs": { + } + }, + "DescribeAddressesResult": { + "base": "

    Contains the output of DescribeAddresses.

    ", + "refs": { + } + }, + "DescribeAvailabilityZonesRequest": { + "base": "

    Contains the parameters for DescribeAvailabilityZones.

    ", + "refs": { + } + }, + "DescribeAvailabilityZonesResult": { + "base": "

    Contains the output of DescribeAvailabiltyZones.

    ", + "refs": { + } + }, + "DescribeBundleTasksRequest": { + "base": "

    Contains the parameters for DescribeBundleTasks.

    ", + "refs": { + } + }, + "DescribeBundleTasksResult": { + "base": "

    Contains the output of DescribeBundleTasks.

    ", + "refs": { + } + }, + "DescribeClassicLinkInstancesRequest": { + "base": "

    Contains the parameters for DescribeClassicLinkInstances.

    ", + "refs": { + } + }, + "DescribeClassicLinkInstancesResult": { + "base": "

    Contains the output of DescribeClassicLinkInstances.

    ", + "refs": { + } + }, + "DescribeConversionTaskList": { + "base": null, + "refs": { + "DescribeConversionTasksResult$ConversionTasks": "

    Information about the conversion tasks.

    " + } + }, + "DescribeConversionTasksRequest": { + "base": "

    Contains the parameters for DescribeConversionTasks.

    ", + "refs": { + } + }, + "DescribeConversionTasksResult": { + "base": "

    Contains the output for DescribeConversionTasks.

    ", + "refs": { + } + }, + "DescribeCustomerGatewaysRequest": { + "base": "

    Contains the parameters for DescribeCustomerGateways.

    ", + "refs": { + } + }, + "DescribeCustomerGatewaysResult": { + "base": "

    Contains the output of DescribeCustomerGateways.

    ", + "refs": { + } + }, + "DescribeDhcpOptionsRequest": { + "base": "

    Contains the parameters for DescribeDhcpOptions.

    ", + "refs": { + } + }, + "DescribeDhcpOptionsResult": { + "base": "

    Contains the output of DescribeDhcpOptions.

    ", + "refs": { + } + }, + "DescribeExportTasksRequest": { + "base": "

    Contains the parameters for DescribeExportTasks.

    ", + "refs": { + } + }, + "DescribeExportTasksResult": { + "base": "

    Contains the output for DescribeExportTasks.

    ", + "refs": { + } + }, + "DescribeFlowLogsRequest": { + "base": "

    Contains the parameters for DescribeFlowLogs.

    ", + "refs": { + } + }, + "DescribeFlowLogsResult": { + "base": "

    Contains the output of DescribeFlowLogs.

    ", + "refs": { + } + }, + "DescribeHostsRequest": { + "base": "

    Contains the parameters for DescribeHosts.

    ", + "refs": { + } + }, + "DescribeHostsResult": { + "base": "

    Contains the output of DescribeHosts.

    ", + "refs": { + } + }, + "DescribeIdFormatRequest": { + "base": "

    Contains the parameters for DescribeIdFormat.

    ", + "refs": { + } + }, + "DescribeIdFormatResult": { + "base": "

    Contains the output of DescribeIdFormat.

    ", + "refs": { + } + }, + "DescribeIdentityIdFormatRequest": { + "base": "

    Contains the parameters for DescribeIdentityIdFormat.

    ", + "refs": { + } + }, + "DescribeIdentityIdFormatResult": { + "base": "

    Contains the output of DescribeIdentityIdFormat.

    ", + "refs": { + } + }, + "DescribeImageAttributeRequest": { + "base": "

    Contains the parameters for DescribeImageAttribute.

    ", + "refs": { + } + }, + "DescribeImagesRequest": { + "base": "

    Contains the parameters for DescribeImages.

    ", + "refs": { + } + }, + "DescribeImagesResult": { + "base": "

    Contains the output of DescribeImages.

    ", + "refs": { + } + }, + "DescribeImportImageTasksRequest": { + "base": "

    Contains the parameters for DescribeImportImageTasks.

    ", + "refs": { + } + }, + "DescribeImportImageTasksResult": { + "base": "

    Contains the output for DescribeImportImageTasks.

    ", + "refs": { + } + }, + "DescribeImportSnapshotTasksRequest": { + "base": "

    Contains the parameters for DescribeImportSnapshotTasks.

    ", + "refs": { + } + }, + "DescribeImportSnapshotTasksResult": { + "base": "

    Contains the output for DescribeImportSnapshotTasks.

    ", + "refs": { + } + }, + "DescribeInstanceAttributeRequest": { + "base": "

    Contains the parameters for DescribeInstanceAttribute.

    ", + "refs": { + } + }, + "DescribeInstanceStatusRequest": { + "base": "

    Contains the parameters for DescribeInstanceStatus.

    ", + "refs": { + } + }, + "DescribeInstanceStatusResult": { + "base": "

    Contains the output of DescribeInstanceStatus.

    ", + "refs": { + } + }, + "DescribeInstancesRequest": { + "base": "

    Contains the parameters for DescribeInstances.

    ", + "refs": { + } + }, + "DescribeInstancesResult": { + "base": "

    Contains the output of DescribeInstances.

    ", + "refs": { + } + }, + "DescribeInternetGatewaysRequest": { + "base": "

    Contains the parameters for DescribeInternetGateways.

    ", + "refs": { + } + }, + "DescribeInternetGatewaysResult": { + "base": "

    Contains the output of DescribeInternetGateways.

    ", + "refs": { + } + }, + "DescribeKeyPairsRequest": { + "base": "

    Contains the parameters for DescribeKeyPairs.

    ", + "refs": { + } + }, + "DescribeKeyPairsResult": { + "base": "

    Contains the output of DescribeKeyPairs.

    ", + "refs": { + } + }, + "DescribeMovingAddressesRequest": { + "base": "

    Contains the parameters for DescribeMovingAddresses.

    ", + "refs": { + } + }, + "DescribeMovingAddressesResult": { + "base": "

    Contains the output of DescribeMovingAddresses.

    ", + "refs": { + } + }, + "DescribeNatGatewaysRequest": { + "base": "

    Contains the parameters for DescribeNatGateways.

    ", + "refs": { + } + }, + "DescribeNatGatewaysResult": { + "base": "

    Contains the output of DescribeNatGateways.

    ", + "refs": { + } + }, + "DescribeNetworkAclsRequest": { + "base": "

    Contains the parameters for DescribeNetworkAcls.

    ", + "refs": { + } + }, + "DescribeNetworkAclsResult": { + "base": "

    Contains the output of DescribeNetworkAcls.

    ", + "refs": { + } + }, + "DescribeNetworkInterfaceAttributeRequest": { + "base": "

    Contains the parameters for DescribeNetworkInterfaceAttribute.

    ", + "refs": { + } + }, + "DescribeNetworkInterfaceAttributeResult": { + "base": "

    Contains the output of DescribeNetworkInterfaceAttribute.

    ", + "refs": { + } + }, + "DescribeNetworkInterfacesRequest": { + "base": "

    Contains the parameters for DescribeNetworkInterfaces.

    ", + "refs": { + } + }, + "DescribeNetworkInterfacesResult": { + "base": "

    Contains the output of DescribeNetworkInterfaces.

    ", + "refs": { + } + }, + "DescribePlacementGroupsRequest": { + "base": "

    Contains the parameters for DescribePlacementGroups.

    ", + "refs": { + } + }, + "DescribePlacementGroupsResult": { + "base": "

    Contains the output of DescribePlacementGroups.

    ", + "refs": { + } + }, + "DescribePrefixListsRequest": { + "base": "

    Contains the parameters for DescribePrefixLists.

    ", + "refs": { + } + }, + "DescribePrefixListsResult": { + "base": "

    Contains the output of DescribePrefixLists.

    ", + "refs": { + } + }, + "DescribeRegionsRequest": { + "base": "

    Contains the parameters for DescribeRegions.

    ", + "refs": { + } + }, + "DescribeRegionsResult": { + "base": "

    Contains the output of DescribeRegions.

    ", + "refs": { + } + }, + "DescribeReservedInstancesListingsRequest": { + "base": "

    Contains the parameters for DescribeReservedInstancesListings.

    ", + "refs": { + } + }, + "DescribeReservedInstancesListingsResult": { + "base": "

    Contains the output of DescribeReservedInstancesListings.

    ", + "refs": { + } + }, + "DescribeReservedInstancesModificationsRequest": { + "base": "

    Contains the parameters for DescribeReservedInstancesModifications.

    ", + "refs": { + } + }, + "DescribeReservedInstancesModificationsResult": { + "base": "

    Contains the output of DescribeReservedInstancesModifications.

    ", + "refs": { + } + }, + "DescribeReservedInstancesOfferingsRequest": { + "base": "

    Contains the parameters for DescribeReservedInstancesOfferings.

    ", + "refs": { + } + }, + "DescribeReservedInstancesOfferingsResult": { + "base": "

    Contains the output of DescribeReservedInstancesOfferings.

    ", + "refs": { + } + }, + "DescribeReservedInstancesRequest": { + "base": "

    Contains the parameters for DescribeReservedInstances.

    ", + "refs": { + } + }, + "DescribeReservedInstancesResult": { + "base": "

    Contains the output for DescribeReservedInstances.

    ", + "refs": { + } + }, + "DescribeRouteTablesRequest": { + "base": "

    Contains the parameters for DescribeRouteTables.

    ", + "refs": { + } + }, + "DescribeRouteTablesResult": { + "base": "

    Contains the output of DescribeRouteTables.

    ", + "refs": { + } + }, + "DescribeScheduledInstanceAvailabilityRequest": { + "base": "

    Contains the parameters for DescribeScheduledInstanceAvailability.

    ", + "refs": { + } + }, + "DescribeScheduledInstanceAvailabilityResult": { + "base": "

    Contains the output of DescribeScheduledInstanceAvailability.

    ", + "refs": { + } + }, + "DescribeScheduledInstancesRequest": { + "base": "

    Contains the parameters for DescribeScheduledInstances.

    ", + "refs": { + } + }, + "DescribeScheduledInstancesResult": { + "base": "

    Contains the output of DescribeScheduledInstances.

    ", + "refs": { + } + }, + "DescribeSecurityGroupReferencesRequest": { + "base": null, + "refs": { + } + }, + "DescribeSecurityGroupReferencesResult": { + "base": null, + "refs": { + } + }, + "DescribeSecurityGroupsRequest": { + "base": "

    Contains the parameters for DescribeSecurityGroups.

    ", + "refs": { + } + }, + "DescribeSecurityGroupsResult": { + "base": "

    Contains the output of DescribeSecurityGroups.

    ", + "refs": { + } + }, + "DescribeSnapshotAttributeRequest": { + "base": "

    Contains the parameters for DescribeSnapshotAttribute.

    ", + "refs": { + } + }, + "DescribeSnapshotAttributeResult": { + "base": "

    Contains the output of DescribeSnapshotAttribute.

    ", + "refs": { + } + }, + "DescribeSnapshotsRequest": { + "base": "

    Contains the parameters for DescribeSnapshots.

    ", + "refs": { + } + }, + "DescribeSnapshotsResult": { + "base": "

    Contains the output of DescribeSnapshots.

    ", + "refs": { + } + }, + "DescribeSpotDatafeedSubscriptionRequest": { + "base": "

    Contains the parameters for DescribeSpotDatafeedSubscription.

    ", + "refs": { + } + }, + "DescribeSpotDatafeedSubscriptionResult": { + "base": "

    Contains the output of DescribeSpotDatafeedSubscription.

    ", + "refs": { + } + }, + "DescribeSpotFleetInstancesRequest": { + "base": "

    Contains the parameters for DescribeSpotFleetInstances.

    ", + "refs": { + } + }, + "DescribeSpotFleetInstancesResponse": { + "base": "

    Contains the output of DescribeSpotFleetInstances.

    ", + "refs": { + } + }, + "DescribeSpotFleetRequestHistoryRequest": { + "base": "

    Contains the parameters for DescribeSpotFleetRequestHistory.

    ", + "refs": { + } + }, + "DescribeSpotFleetRequestHistoryResponse": { + "base": "

    Contains the output of DescribeSpotFleetRequestHistory.

    ", + "refs": { + } + }, + "DescribeSpotFleetRequestsRequest": { + "base": "

    Contains the parameters for DescribeSpotFleetRequests.

    ", + "refs": { + } + }, + "DescribeSpotFleetRequestsResponse": { + "base": "

    Contains the output of DescribeSpotFleetRequests.

    ", + "refs": { + } + }, + "DescribeSpotInstanceRequestsRequest": { + "base": "

    Contains the parameters for DescribeSpotInstanceRequests.

    ", + "refs": { + } + }, + "DescribeSpotInstanceRequestsResult": { + "base": "

    Contains the output of DescribeSpotInstanceRequests.

    ", + "refs": { + } + }, + "DescribeSpotPriceHistoryRequest": { + "base": "

    Contains the parameters for DescribeSpotPriceHistory.

    ", + "refs": { + } + }, + "DescribeSpotPriceHistoryResult": { + "base": "

    Contains the output of DescribeSpotPriceHistory.

    ", + "refs": { + } + }, + "DescribeStaleSecurityGroupsRequest": { + "base": null, + "refs": { + } + }, + "DescribeStaleSecurityGroupsResult": { + "base": null, + "refs": { + } + }, + "DescribeSubnetsRequest": { + "base": "

    Contains the parameters for DescribeSubnets.

    ", + "refs": { + } + }, + "DescribeSubnetsResult": { + "base": "

    Contains the output of DescribeSubnets.

    ", + "refs": { + } + }, + "DescribeTagsRequest": { + "base": "

    Contains the parameters for DescribeTags.

    ", + "refs": { + } + }, + "DescribeTagsResult": { + "base": "

    Contains the output of DescribeTags.

    ", + "refs": { + } + }, + "DescribeVolumeAttributeRequest": { + "base": "

    Contains the parameters for DescribeVolumeAttribute.

    ", + "refs": { + } + }, + "DescribeVolumeAttributeResult": { + "base": "

    Contains the output of DescribeVolumeAttribute.

    ", + "refs": { + } + }, + "DescribeVolumeStatusRequest": { + "base": "

    Contains the parameters for DescribeVolumeStatus.

    ", + "refs": { + } + }, + "DescribeVolumeStatusResult": { + "base": "

    Contains the output of DescribeVolumeStatus.

    ", + "refs": { + } + }, + "DescribeVolumesRequest": { + "base": "

    Contains the parameters for DescribeVolumes.

    ", + "refs": { + } + }, + "DescribeVolumesResult": { + "base": "

    Contains the output of DescribeVolumes.

    ", + "refs": { + } + }, + "DescribeVpcAttributeRequest": { + "base": "

    Contains the parameters for DescribeVpcAttribute.

    ", + "refs": { + } + }, + "DescribeVpcAttributeResult": { + "base": "

    Contains the output of DescribeVpcAttribute.

    ", + "refs": { + } + }, + "DescribeVpcClassicLinkDnsSupportRequest": { + "base": "

    Contains the parameters for DescribeVpcClassicLinkDnsSupport.

    ", + "refs": { + } + }, + "DescribeVpcClassicLinkDnsSupportResult": { + "base": "

    Contains the output of DescribeVpcClassicLinkDnsSupport.

    ", + "refs": { + } + }, + "DescribeVpcClassicLinkRequest": { + "base": "

    Contains the parameters for DescribeVpcClassicLink.

    ", + "refs": { + } + }, + "DescribeVpcClassicLinkResult": { + "base": "

    Contains the output of DescribeVpcClassicLink.

    ", + "refs": { + } + }, + "DescribeVpcEndpointServicesRequest": { + "base": "

    Contains the parameters for DescribeVpcEndpointServices.

    ", + "refs": { + } + }, + "DescribeVpcEndpointServicesResult": { + "base": "

    Contains the output of DescribeVpcEndpointServices.

    ", + "refs": { + } + }, + "DescribeVpcEndpointsRequest": { + "base": "

    Contains the parameters for DescribeVpcEndpoints.

    ", + "refs": { + } + }, + "DescribeVpcEndpointsResult": { + "base": "

    Contains the output of DescribeVpcEndpoints.

    ", + "refs": { + } + }, + "DescribeVpcPeeringConnectionsRequest": { + "base": "

    Contains the parameters for DescribeVpcPeeringConnections.

    ", + "refs": { + } + }, + "DescribeVpcPeeringConnectionsResult": { + "base": "

    Contains the output of DescribeVpcPeeringConnections.

    ", + "refs": { + } + }, + "DescribeVpcsRequest": { + "base": "

    Contains the parameters for DescribeVpcs.

    ", + "refs": { + } + }, + "DescribeVpcsResult": { + "base": "

    Contains the output of DescribeVpcs.

    ", + "refs": { + } + }, + "DescribeVpnConnectionsRequest": { + "base": "

    Contains the parameters for DescribeVpnConnections.

    ", + "refs": { + } + }, + "DescribeVpnConnectionsResult": { + "base": "

    Contains the output of DescribeVpnConnections.

    ", + "refs": { + } + }, + "DescribeVpnGatewaysRequest": { + "base": "

    Contains the parameters for DescribeVpnGateways.

    ", + "refs": { + } + }, + "DescribeVpnGatewaysResult": { + "base": "

    Contains the output of DescribeVpnGateways.

    ", + "refs": { + } + }, + "DetachClassicLinkVpcRequest": { + "base": "

    Contains the parameters for DetachClassicLinkVpc.

    ", + "refs": { + } + }, + "DetachClassicLinkVpcResult": { + "base": "

    Contains the output of DetachClassicLinkVpc.

    ", + "refs": { + } + }, + "DetachInternetGatewayRequest": { + "base": "

    Contains the parameters for DetachInternetGateway.

    ", + "refs": { + } + }, + "DetachNetworkInterfaceRequest": { + "base": "

    Contains the parameters for DetachNetworkInterface.

    ", + "refs": { + } + }, + "DetachVolumeRequest": { + "base": "

    Contains the parameters for DetachVolume.

    ", + "refs": { + } + }, + "DetachVpnGatewayRequest": { + "base": "

    Contains the parameters for DetachVpnGateway.

    ", + "refs": { + } + }, + "DeviceType": { + "base": null, + "refs": { + "Image$RootDeviceType": "

    The type of root device used by the AMI. The AMI can use an EBS volume or an instance store volume.

    ", + "Instance$RootDeviceType": "

    The root device type used by the AMI. The AMI can use an EBS volume or an instance store volume.

    " + } + }, + "DhcpConfiguration": { + "base": "

    Describes a DHCP configuration option.

    ", + "refs": { + "DhcpConfigurationList$member": null + } + }, + "DhcpConfigurationList": { + "base": null, + "refs": { + "DhcpOptions$DhcpConfigurations": "

    One or more DHCP options in the set.

    " + } + }, + "DhcpConfigurationValueList": { + "base": null, + "refs": { + "DhcpConfiguration$Values": "

    One or more values for the DHCP option.

    " + } + }, + "DhcpOptions": { + "base": "

    Describes a set of DHCP options.

    ", + "refs": { + "CreateDhcpOptionsResult$DhcpOptions": "

    A set of DHCP options.

    ", + "DhcpOptionsList$member": null + } + }, + "DhcpOptionsIdStringList": { + "base": null, + "refs": { + "DescribeDhcpOptionsRequest$DhcpOptionsIds": "

    The IDs of one or more DHCP options sets.

    Default: Describes all your DHCP options sets.

    " + } + }, + "DhcpOptionsList": { + "base": null, + "refs": { + "DescribeDhcpOptionsResult$DhcpOptions": "

    Information about one or more DHCP options sets.

    " + } + }, + "DisableVgwRoutePropagationRequest": { + "base": "

    Contains the parameters for DisableVgwRoutePropagation.

    ", + "refs": { + } + }, + "DisableVpcClassicLinkDnsSupportRequest": { + "base": "

    Contains the parameters for DisableVpcClassicLinkDnsSupport.

    ", + "refs": { + } + }, + "DisableVpcClassicLinkDnsSupportResult": { + "base": "

    Contains the output of DisableVpcClassicLinkDnsSupport.

    ", + "refs": { + } + }, + "DisableVpcClassicLinkRequest": { + "base": "

    Contains the parameters for DisableVpcClassicLink.

    ", + "refs": { + } + }, + "DisableVpcClassicLinkResult": { + "base": "

    Contains the output of DisableVpcClassicLink.

    ", + "refs": { + } + }, + "DisassociateAddressRequest": { + "base": "

    Contains the parameters for DisassociateAddress.

    ", + "refs": { + } + }, + "DisassociateRouteTableRequest": { + "base": "

    Contains the parameters for DisassociateRouteTable.

    ", + "refs": { + } + }, + "DiskImage": { + "base": "

    Describes a disk image.

    ", + "refs": { + "DiskImageList$member": null + } + }, + "DiskImageDescription": { + "base": "

    Describes a disk image.

    ", + "refs": { + "ImportInstanceVolumeDetailItem$Image": "

    The image.

    ", + "ImportVolumeTaskDetails$Image": "

    The image.

    " + } + }, + "DiskImageDetail": { + "base": "

    Describes a disk image.

    ", + "refs": { + "DiskImage$Image": "

    Information about the disk image.

    ", + "ImportVolumeRequest$Image": "

    The disk image.

    " + } + }, + "DiskImageFormat": { + "base": null, + "refs": { + "DiskImageDescription$Format": "

    The disk image format.

    ", + "DiskImageDetail$Format": "

    The disk image format.

    ", + "ExportToS3Task$DiskImageFormat": "

    The format for the exported image.

    ", + "ExportToS3TaskSpecification$DiskImageFormat": "

    The format for the exported image.

    " + } + }, + "DiskImageList": { + "base": null, + "refs": { + "ImportInstanceRequest$DiskImages": "

    The disk image.

    " + } + }, + "DiskImageVolumeDescription": { + "base": "

    Describes a disk image volume.

    ", + "refs": { + "ImportInstanceVolumeDetailItem$Volume": "

    The volume.

    ", + "ImportVolumeTaskDetails$Volume": "

    The volume.

    " + } + }, + "DomainType": { + "base": null, + "refs": { + "Address$Domain": "

    Indicates whether this Elastic IP address is for use with instances in EC2-Classic (standard) or instances in a VPC (vpc).

    ", + "AllocateAddressRequest$Domain": "

    Set to vpc to allocate the address for use with instances in a VPC.

    Default: The address is for use with instances in EC2-Classic.

    ", + "AllocateAddressResult$Domain": "

    Indicates whether this Elastic IP address is for use with instances in EC2-Classic (standard) or instances in a VPC (vpc).

    " + } + }, + "Double": { + "base": null, + "refs": { + "ClientData$UploadSize": "

    The size of the uploaded disk image, in GiB.

    ", + "PriceSchedule$Price": "

    The fixed price for the term.

    ", + "PriceScheduleSpecification$Price": "

    The fixed price for the term.

    ", + "PricingDetail$Price": "

    The price per instance.

    ", + "RecurringCharge$Amount": "

    The amount of the recurring charge.

    ", + "ReservedInstanceLimitPrice$Amount": "

    Used for Reserved Instance Marketplace offerings. Specifies the limit price on the total order (instanceCount * price).

    ", + "SnapshotDetail$DiskImageSize": "

    The size of the disk in the snapshot, in GiB.

    ", + "SnapshotTaskDetail$DiskImageSize": "

    The size of the disk in the snapshot, in GiB.

    ", + "SpotFleetLaunchSpecification$WeightedCapacity": "

    The number of units provided by the specified instance type. These are the same units that you chose to set the target capacity in terms (instances or a performance characteristic such as vCPUs, memory, or I/O).

    If the target capacity divided by this value is not a whole number, we round the number of instances to the next whole number. If this value is not specified, the default is 1.

    ", + "SpotFleetRequestConfigData$FulfilledCapacity": "

    The number of units fulfilled by this request compared to the set target capacity.

    " + } + }, + "EbsBlockDevice": { + "base": "

    Describes a block device for an EBS volume.

    ", + "refs": { + "BlockDeviceMapping$Ebs": "

    Parameters used to automatically set up EBS volumes when the instance is launched.

    " + } + }, + "EbsInstanceBlockDevice": { + "base": "

    Describes a parameter used to set up an EBS volume in a block device mapping.

    ", + "refs": { + "InstanceBlockDeviceMapping$Ebs": "

    Parameters used to automatically set up EBS volumes when the instance is launched.

    " + } + }, + "EbsInstanceBlockDeviceSpecification": { + "base": "

    Describes information used to set up an EBS volume specified in a block device mapping.

    ", + "refs": { + "InstanceBlockDeviceMappingSpecification$Ebs": "

    Parameters used to automatically set up EBS volumes when the instance is launched.

    " + } + }, + "EnableVgwRoutePropagationRequest": { + "base": "

    Contains the parameters for EnableVgwRoutePropagation.

    ", + "refs": { + } + }, + "EnableVolumeIORequest": { + "base": "

    Contains the parameters for EnableVolumeIO.

    ", + "refs": { + } + }, + "EnableVpcClassicLinkDnsSupportRequest": { + "base": "

    Contains the parameters for EnableVpcClassicLinkDnsSupport.

    ", + "refs": { + } + }, + "EnableVpcClassicLinkDnsSupportResult": { + "base": "

    Contains the output of EnableVpcClassicLinkDnsSupport.

    ", + "refs": { + } + }, + "EnableVpcClassicLinkRequest": { + "base": "

    Contains the parameters for EnableVpcClassicLink.

    ", + "refs": { + } + }, + "EnableVpcClassicLinkResult": { + "base": "

    Contains the output of EnableVpcClassicLink.

    ", + "refs": { + } + }, + "EventCode": { + "base": null, + "refs": { + "InstanceStatusEvent$Code": "

    The event code.

    " + } + }, + "EventInformation": { + "base": "

    Describes a Spot fleet event.

    ", + "refs": { + "HistoryRecord$EventInformation": "

    Information about the event.

    " + } + }, + "EventType": { + "base": null, + "refs": { + "DescribeSpotFleetRequestHistoryRequest$EventType": "

    The type of events to describe. By default, all events are described.

    ", + "HistoryRecord$EventType": "

    The event type.

    • error - Indicates an error with the Spot fleet request.

    • fleetRequestChange - Indicates a change in the status or configuration of the Spot fleet request.

    • instanceChange - Indicates that an instance was launched or terminated.

    " + } + }, + "ExcessCapacityTerminationPolicy": { + "base": null, + "refs": { + "ModifySpotFleetRequestRequest$ExcessCapacityTerminationPolicy": "

    Indicates whether running Spot instances should be terminated if the target capacity of the Spot fleet request is decreased below the current size of the Spot fleet.

    ", + "SpotFleetRequestConfigData$ExcessCapacityTerminationPolicy": "

    Indicates whether running Spot instances should be terminated if the target capacity of the Spot fleet request is decreased below the current size of the Spot fleet.

    " + } + }, + "ExecutableByStringList": { + "base": null, + "refs": { + "DescribeImagesRequest$ExecutableUsers": "

    Scopes the images by users with explicit launch permissions. Specify an AWS account ID, self (the sender of the request), or all (public AMIs).

    " + } + }, + "ExportEnvironment": { + "base": null, + "refs": { + "CreateInstanceExportTaskRequest$TargetEnvironment": "

    The target virtualization environment.

    ", + "InstanceExportDetails$TargetEnvironment": "

    The target virtualization environment.

    " + } + }, + "ExportTask": { + "base": "

    Describes an instance export task.

    ", + "refs": { + "CreateInstanceExportTaskResult$ExportTask": "

    Information about the instance export task.

    ", + "ExportTaskList$member": null + } + }, + "ExportTaskIdStringList": { + "base": null, + "refs": { + "DescribeExportTasksRequest$ExportTaskIds": "

    One or more export task IDs.

    " + } + }, + "ExportTaskList": { + "base": null, + "refs": { + "DescribeExportTasksResult$ExportTasks": "

    Information about the export tasks.

    " + } + }, + "ExportTaskState": { + "base": null, + "refs": { + "ExportTask$State": "

    The state of the export task.

    " + } + }, + "ExportToS3Task": { + "base": "

    Describes the format and location for an instance export task.

    ", + "refs": { + "ExportTask$ExportToS3Task": "

    Information about the export task.

    " + } + }, + "ExportToS3TaskSpecification": { + "base": "

    Describes an instance export task.

    ", + "refs": { + "CreateInstanceExportTaskRequest$ExportToS3Task": "

    The format and location for an instance export task.

    " + } + }, + "Filter": { + "base": "

    A filter name and value pair that is used to return a more specific list of results. Filters can be used to match a set of resources by various criteria, such as tags, attributes, or IDs.

    ", + "refs": { + "FilterList$member": null + } + }, + "FilterList": { + "base": null, + "refs": { + "DescribeAddressesRequest$Filters": "

    One or more filters. Filter names and values are case-sensitive.

    • allocation-id - [EC2-VPC] The allocation ID for the address.

    • association-id - [EC2-VPC] The association ID for the address.

    • domain - Indicates whether the address is for use in EC2-Classic (standard) or in a VPC (vpc).

    • instance-id - The ID of the instance the address is associated with, if any.

    • network-interface-id - [EC2-VPC] The ID of the network interface that the address is associated with, if any.

    • network-interface-owner-id - The AWS account ID of the owner.

    • private-ip-address - [EC2-VPC] The private IP address associated with the Elastic IP address.

    • public-ip - The Elastic IP address.

    ", + "DescribeAvailabilityZonesRequest$Filters": "

    One or more filters.

    • message - Information about the Availability Zone.

    • region-name - The name of the region for the Availability Zone (for example, us-east-1).

    • state - The state of the Availability Zone (available | information | impaired | unavailable).

    • zone-name - The name of the Availability Zone (for example, us-east-1a).

    ", + "DescribeBundleTasksRequest$Filters": "

    One or more filters.

    • bundle-id - The ID of the bundle task.

    • error-code - If the task failed, the error code returned.

    • error-message - If the task failed, the error message returned.

    • instance-id - The ID of the instance.

    • progress - The level of task completion, as a percentage (for example, 20%).

    • s3-bucket - The Amazon S3 bucket to store the AMI.

    • s3-prefix - The beginning of the AMI name.

    • start-time - The time the task started (for example, 2013-09-15T17:15:20.000Z).

    • state - The state of the task (pending | waiting-for-shutdown | bundling | storing | cancelling | complete | failed).

    • update-time - The time of the most recent update for the task.

    ", + "DescribeClassicLinkInstancesRequest$Filters": "

    One or more filters.

    • group-id - The ID of a VPC security group that's associated with the instance.

    • instance-id - The ID of the instance.

    • tag:key=value - The key/value combination of a tag assigned to the resource.

    • tag-key - The key of a tag assigned to the resource. This filter is independent of the tag-value filter. For example, if you use both the filter \"tag-key=Purpose\" and the filter \"tag-value=X\", you get any resources assigned both the tag key Purpose (regardless of what the tag's value is), and the tag value X (regardless of what the tag's key is). If you want to list only resources where Purpose is X, see the tag:key=value filter.

    • tag-value - The value of a tag assigned to the resource. This filter is independent of the tag-key filter.

    • vpc-id - The ID of the VPC that the instance is linked to.

    ", + "DescribeConversionTasksRequest$Filters": "

    One or more filters.

    ", + "DescribeCustomerGatewaysRequest$Filters": "

    One or more filters.

    • bgp-asn - The customer gateway's Border Gateway Protocol (BGP) Autonomous System Number (ASN).

    • customer-gateway-id - The ID of the customer gateway.

    • ip-address - The IP address of the customer gateway's Internet-routable external interface.

    • state - The state of the customer gateway (pending | available | deleting | deleted).

    • type - The type of customer gateway. Currently, the only supported type is ipsec.1.

    • tag:key=value - The key/value combination of a tag assigned to the resource.

    • tag-key - The key of a tag assigned to the resource. This filter is independent of the tag-value filter. For example, if you use both the filter \"tag-key=Purpose\" and the filter \"tag-value=X\", you get any resources assigned both the tag key Purpose (regardless of what the tag's value is), and the tag value X (regardless of what the tag's key is). If you want to list only resources where Purpose is X, see the tag:key=value filter.

    • tag-value - The value of a tag assigned to the resource. This filter is independent of the tag-key filter.

    ", + "DescribeDhcpOptionsRequest$Filters": "

    One or more filters.

    • dhcp-options-id - The ID of a set of DHCP options.

    • key - The key for one of the options (for example, domain-name).

    • value - The value for one of the options.

    • tag:key=value - The key/value combination of a tag assigned to the resource.

    • tag-key - The key of a tag assigned to the resource. This filter is independent of the tag-value filter. For example, if you use both the filter \"tag-key=Purpose\" and the filter \"tag-value=X\", you get any resources assigned both the tag key Purpose (regardless of what the tag's value is), and the tag value X (regardless of what the tag's key is). If you want to list only resources where Purpose is X, see the tag:key=value filter.

    • tag-value - The value of a tag assigned to the resource. This filter is independent of the tag-key filter.

    ", + "DescribeFlowLogsRequest$Filter": "

    One or more filters.

    • deliver-log-status - The status of the logs delivery (SUCCESS | FAILED).

    • flow-log-id - The ID of the flow log.

    • log-group-name - The name of the log group.

    • resource-id - The ID of the VPC, subnet, or network interface.

    • traffic-type - The type of traffic (ACCEPT | REJECT | ALL)

    ", + "DescribeHostsRequest$Filter": "

    One or more filters.

    • instance-type - The instance type size that the Dedicated host is configured to support.

    • auto-placement - Whether auto-placement is enabled or disabled (on | off).

    • host-reservation-id - The ID of the reservation associated with this host.

    • client-token - The idempotency token you provided when you launched the instance

    • state- The allocation state of the Dedicated host (available | under-assessment | permanent-failure | released | released-permanent-failure).

    • availability-zone - The Availability Zone of the host.

    ", + "DescribeImagesRequest$Filters": "

    One or more filters.

    • architecture - The image architecture (i386 | x86_64).

    • block-device-mapping.delete-on-termination - A Boolean value that indicates whether the Amazon EBS volume is deleted on instance termination.

    • block-device-mapping.device-name - The device name for the EBS volume (for example, /dev/sdh).

    • block-device-mapping.snapshot-id - The ID of the snapshot used for the EBS volume.

    • block-device-mapping.volume-size - The volume size of the EBS volume, in GiB.

    • block-device-mapping.volume-type - The volume type of the EBS volume (gp2 | io1 | st1 | sc1 | standard).

    • description - The description of the image (provided during image creation).

    • hypervisor - The hypervisor type (ovm | xen).

    • image-id - The ID of the image.

    • image-type - The image type (machine | kernel | ramdisk).

    • is-public - A Boolean that indicates whether the image is public.

    • kernel-id - The kernel ID.

    • manifest-location - The location of the image manifest.

    • name - The name of the AMI (provided during image creation).

    • owner-alias - The AWS account alias (for example, amazon).

    • owner-id - The AWS account ID of the image owner.

    • platform - The platform. To only list Windows-based AMIs, use windows.

    • product-code - The product code.

    • product-code.type - The type of the product code (devpay | marketplace).

    • ramdisk-id - The RAM disk ID.

    • root-device-name - The name of the root device volume (for example, /dev/sda1).

    • root-device-type - The type of the root device volume (ebs | instance-store).

    • state - The state of the image (available | pending | failed).

    • state-reason-code - The reason code for the state change.

    • state-reason-message - The message for the state change.

    • tag:key=value - The key/value combination of a tag assigned to the resource.

    • tag-key - The key of a tag assigned to the resource. This filter is independent of the tag-value filter. For example, if you use both the filter \"tag-key=Purpose\" and the filter \"tag-value=X\", you get any resources assigned both the tag key Purpose (regardless of what the tag's value is), and the tag value X (regardless of what the tag's key is). If you want to list only resources where Purpose is X, see the tag:key=value filter.

    • tag-value - The value of a tag assigned to the resource. This filter is independent of the tag-key filter.

    • virtualization-type - The virtualization type (paravirtual | hvm).

    ", + "DescribeImportImageTasksRequest$Filters": "

    Filter tasks using the task-state filter and one of the following values: active, completed, deleting, deleted.

    ", + "DescribeImportSnapshotTasksRequest$Filters": "

    One or more filters.

    ", + "DescribeInstanceStatusRequest$Filters": "

    One or more filters.

    • availability-zone - The Availability Zone of the instance.

    • event.code - The code for the scheduled event (instance-reboot | system-reboot | system-maintenance | instance-retirement | instance-stop).

    • event.description - A description of the event.

    • event.not-after - The latest end time for the scheduled event (for example, 2014-09-15T17:15:20.000Z).

    • event.not-before - The earliest start time for the scheduled event (for example, 2014-09-15T17:15:20.000Z).

    • instance-state-code - The code for the instance state, as a 16-bit unsigned integer. The high byte is an opaque internal value and should be ignored. The low byte is set based on the state represented. The valid values are 0 (pending), 16 (running), 32 (shutting-down), 48 (terminated), 64 (stopping), and 80 (stopped).

    • instance-state-name - The state of the instance (pending | running | shutting-down | terminated | stopping | stopped).

    • instance-status.reachability - Filters on instance status where the name is reachability (passed | failed | initializing | insufficient-data).

    • instance-status.status - The status of the instance (ok | impaired | initializing | insufficient-data | not-applicable).

    • system-status.reachability - Filters on system status where the name is reachability (passed | failed | initializing | insufficient-data).

    • system-status.status - The system status of the instance (ok | impaired | initializing | insufficient-data | not-applicable).

    ", + "DescribeInstancesRequest$Filters": "

    One or more filters.

    • affinity - The affinity setting for an instance running on a Dedicated host (default | host).

    • architecture - The instance architecture (i386 | x86_64).

    • availability-zone - The Availability Zone of the instance.

    • block-device-mapping.attach-time - The attach time for an EBS volume mapped to the instance, for example, 2010-09-15T17:15:20.000Z.

    • block-device-mapping.delete-on-termination - A Boolean that indicates whether the EBS volume is deleted on instance termination.

    • block-device-mapping.device-name - The device name for the EBS volume (for example, /dev/sdh or xvdh).

    • block-device-mapping.status - The status for the EBS volume (attaching | attached | detaching | detached).

    • block-device-mapping.volume-id - The volume ID of the EBS volume.

    • client-token - The idempotency token you provided when you launched the instance.

    • dns-name - The public DNS name of the instance.

    • group-id - The ID of the security group for the instance. EC2-Classic only.

    • group-name - The name of the security group for the instance. EC2-Classic only.

    • host-Id - The ID of the Dedicated host on which the instance is running, if applicable.

    • hypervisor - The hypervisor type of the instance (ovm | xen).

    • iam-instance-profile.arn - The instance profile associated with the instance. Specified as an ARN.

    • image-id - The ID of the image used to launch the instance.

    • instance-id - The ID of the instance.

    • instance-lifecycle - Indicates whether this is a Spot Instance or a Scheduled Instance (spot | scheduled).

    • instance-state-code - The state of the instance, as a 16-bit unsigned integer. The high byte is an opaque internal value and should be ignored. The low byte is set based on the state represented. The valid values are: 0 (pending), 16 (running), 32 (shutting-down), 48 (terminated), 64 (stopping), and 80 (stopped).

    • instance-state-name - The state of the instance (pending | running | shutting-down | terminated | stopping | stopped).

    • instance-type - The type of instance (for example, t2.micro).

    • instance.group-id - The ID of the security group for the instance.

    • instance.group-name - The name of the security group for the instance.

    • ip-address - The public IP address of the instance.

    • kernel-id - The kernel ID.

    • key-name - The name of the key pair used when the instance was launched.

    • launch-index - When launching multiple instances, this is the index for the instance in the launch group (for example, 0, 1, 2, and so on).

    • launch-time - The time when the instance was launched.

    • monitoring-state - Indicates whether monitoring is enabled for the instance (disabled | enabled).

    • owner-id - The AWS account ID of the instance owner.

    • placement-group-name - The name of the placement group for the instance.

    • platform - The platform. Use windows if you have Windows instances; otherwise, leave blank.

    • private-dns-name - The private DNS name of the instance.

    • private-ip-address - The private IP address of the instance.

    • product-code - The product code associated with the AMI used to launch the instance.

    • product-code.type - The type of product code (devpay | marketplace).

    • ramdisk-id - The RAM disk ID.

    • reason - The reason for the current state of the instance (for example, shows \"User Initiated [date]\" when you stop or terminate the instance). Similar to the state-reason-code filter.

    • requester-id - The ID of the entity that launched the instance on your behalf (for example, AWS Management Console, Auto Scaling, and so on).

    • reservation-id - The ID of the instance's reservation. A reservation ID is created any time you launch an instance. A reservation ID has a one-to-one relationship with an instance launch request, but can be associated with more than one instance if you launch multiple instances using the same launch request. For example, if you launch one instance, you'll get one reservation ID. If you launch ten instances using the same launch request, you'll also get one reservation ID.

    • root-device-name - The name of the root device for the instance (for example, /dev/sda1 or /dev/xvda).

    • root-device-type - The type of root device that the instance uses (ebs | instance-store).

    • source-dest-check - Indicates whether the instance performs source/destination checking. A value of true means that checking is enabled, and false means checking is disabled. The value must be false for the instance to perform network address translation (NAT) in your VPC.

    • spot-instance-request-id - The ID of the Spot instance request.

    • state-reason-code - The reason code for the state change.

    • state-reason-message - A message that describes the state change.

    • subnet-id - The ID of the subnet for the instance.

    • tag:key=value - The key/value combination of a tag assigned to the resource, where tag:key is the tag's key.

    • tag-key - The key of a tag assigned to the resource. This filter is independent of the tag-value filter. For example, if you use both the filter \"tag-key=Purpose\" and the filter \"tag-value=X\", you get any resources assigned both the tag key Purpose (regardless of what the tag's value is), and the tag value X (regardless of what the tag's key is). If you want to list only resources where Purpose is X, see the tag:key=value filter.

    • tag-value - The value of a tag assigned to the resource. This filter is independent of the tag-key filter.

    • tenancy - The tenancy of an instance (dedicated | default | host).

    • virtualization-type - The virtualization type of the instance (paravirtual | hvm).

    • vpc-id - The ID of the VPC that the instance is running in.

    • network-interface.description - The description of the network interface.

    • network-interface.subnet-id - The ID of the subnet for the network interface.

    • network-interface.vpc-id - The ID of the VPC for the network interface.

    • network-interface.network-interface-id - The ID of the network interface.

    • network-interface.owner-id - The ID of the owner of the network interface.

    • network-interface.availability-zone - The Availability Zone for the network interface.

    • network-interface.requester-id - The requester ID for the network interface.

    • network-interface.requester-managed - Indicates whether the network interface is being managed by AWS.

    • network-interface.status - The status of the network interface (available) | in-use).

    • network-interface.mac-address - The MAC address of the network interface.

    • network-interface.private-dns-name - The private DNS name of the network interface.

    • network-interface.source-dest-check - Whether the network interface performs source/destination checking. A value of true means checking is enabled, and false means checking is disabled. The value must be false for the network interface to perform network address translation (NAT) in your VPC.

    • network-interface.group-id - The ID of a security group associated with the network interface.

    • network-interface.group-name - The name of a security group associated with the network interface.

    • network-interface.attachment.attachment-id - The ID of the interface attachment.

    • network-interface.attachment.instance-id - The ID of the instance to which the network interface is attached.

    • network-interface.attachment.instance-owner-id - The owner ID of the instance to which the network interface is attached.

    • network-interface.addresses.private-ip-address - The private IP address associated with the network interface.

    • network-interface.attachment.device-index - The device index to which the network interface is attached.

    • network-interface.attachment.status - The status of the attachment (attaching | attached | detaching | detached).

    • network-interface.attachment.attach-time - The time that the network interface was attached to an instance.

    • network-interface.attachment.delete-on-termination - Specifies whether the attachment is deleted when an instance is terminated.

    • network-interface.addresses.primary - Specifies whether the IP address of the network interface is the primary private IP address.

    • network-interface.addresses.association.public-ip - The ID of the association of an Elastic IP address with a network interface.

    • network-interface.addresses.association.ip-owner-id - The owner ID of the private IP address associated with the network interface.

    • association.public-ip - The address of the Elastic IP address bound to the network interface.

    • association.ip-owner-id - The owner of the Elastic IP address associated with the network interface.

    • association.allocation-id - The allocation ID returned when you allocated the Elastic IP address for your network interface.

    • association.association-id - The association ID returned when the network interface was associated with an IP address.

    ", + "DescribeInternetGatewaysRequest$Filters": "

    One or more filters.

    • attachment.state - The current state of the attachment between the gateway and the VPC (available). Present only if a VPC is attached.

    • attachment.vpc-id - The ID of an attached VPC.

    • internet-gateway-id - The ID of the Internet gateway.

    • tag:key=value - The key/value combination of a tag assigned to the resource.

    • tag-key - The key of a tag assigned to the resource. This filter is independent of the tag-value filter. For example, if you use both the filter \"tag-key=Purpose\" and the filter \"tag-value=X\", you get any resources assigned both the tag key Purpose (regardless of what the tag's value is), and the tag value X (regardless of what the tag's key is). If you want to list only resources where Purpose is X, see the tag:key=value filter.

    • tag-value - The value of a tag assigned to the resource. This filter is independent of the tag-key filter.

    ", + "DescribeKeyPairsRequest$Filters": "

    One or more filters.

    • fingerprint - The fingerprint of the key pair.

    • key-name - The name of the key pair.

    ", + "DescribeMovingAddressesRequest$Filters": "

    One or more filters.

    • moving-status - The status of the Elastic IP address (MovingToVpc | RestoringToClassic).

    ", + "DescribeNatGatewaysRequest$Filter": "

    One or more filters.

    • nat-gateway-id - The ID of the NAT gateway.

    • state - The state of the NAT gateway (pending | failed | available | deleting | deleted).

    • subnet-id - The ID of the subnet in which the NAT gateway resides.

    • vpc-id - The ID of the VPC in which the NAT gateway resides.

    ", + "DescribeNetworkAclsRequest$Filters": "

    One or more filters.

    • association.association-id - The ID of an association ID for the ACL.

    • association.network-acl-id - The ID of the network ACL involved in the association.

    • association.subnet-id - The ID of the subnet involved in the association.

    • default - Indicates whether the ACL is the default network ACL for the VPC.

    • entry.cidr - The CIDR range specified in the entry.

    • entry.egress - Indicates whether the entry applies to egress traffic.

    • entry.icmp.code - The ICMP code specified in the entry, if any.

    • entry.icmp.type - The ICMP type specified in the entry, if any.

    • entry.port-range.from - The start of the port range specified in the entry.

    • entry.port-range.to - The end of the port range specified in the entry.

    • entry.protocol - The protocol specified in the entry (tcp | udp | icmp or a protocol number).

    • entry.rule-action - Allows or denies the matching traffic (allow | deny).

    • entry.rule-number - The number of an entry (in other words, rule) in the ACL's set of entries.

    • network-acl-id - The ID of the network ACL.

    • tag:key=value - The key/value combination of a tag assigned to the resource.

    • tag-key - The key of a tag assigned to the resource. This filter is independent of the tag-value filter. For example, if you use both the filter \"tag-key=Purpose\" and the filter \"tag-value=X\", you get any resources assigned both the tag key Purpose (regardless of what the tag's value is), and the tag value X (regardless of what the tag's key is). If you want to list only resources where Purpose is X, see the tag:key=value filter.

    • tag-value - The value of a tag assigned to the resource. This filter is independent of the tag-key filter.

    • vpc-id - The ID of the VPC for the network ACL.

    ", + "DescribeNetworkInterfacesRequest$Filters": "

    One or more filters.

    • addresses.private-ip-address - The private IP addresses associated with the network interface.

    • addresses.primary - Whether the private IP address is the primary IP address associated with the network interface.

    • addresses.association.public-ip - The association ID returned when the network interface was associated with the Elastic IP address.

    • addresses.association.owner-id - The owner ID of the addresses associated with the network interface.

    • association.association-id - The association ID returned when the network interface was associated with an IP address.

    • association.allocation-id - The allocation ID returned when you allocated the Elastic IP address for your network interface.

    • association.ip-owner-id - The owner of the Elastic IP address associated with the network interface.

    • association.public-ip - The address of the Elastic IP address bound to the network interface.

    • association.public-dns-name - The public DNS name for the network interface.

    • attachment.attachment-id - The ID of the interface attachment.

    • attachment.attach.time - The time that the network interface was attached to an instance.

    • attachment.delete-on-termination - Indicates whether the attachment is deleted when an instance is terminated.

    • attachment.device-index - The device index to which the network interface is attached.

    • attachment.instance-id - The ID of the instance to which the network interface is attached.

    • attachment.instance-owner-id - The owner ID of the instance to which the network interface is attached.

    • attachment.nat-gateway-id - The ID of the NAT gateway to which the network interface is attached.

    • attachment.status - The status of the attachment (attaching | attached | detaching | detached).

    • availability-zone - The Availability Zone of the network interface.

    • description - The description of the network interface.

    • group-id - The ID of a security group associated with the network interface.

    • group-name - The name of a security group associated with the network interface.

    • mac-address - The MAC address of the network interface.

    • network-interface-id - The ID of the network interface.

    • owner-id - The AWS account ID of the network interface owner.

    • private-ip-address - The private IP address or addresses of the network interface.

    • private-dns-name - The private DNS name of the network interface.

    • requester-id - The ID of the entity that launched the instance on your behalf (for example, AWS Management Console, Auto Scaling, and so on).

    • requester-managed - Indicates whether the network interface is being managed by an AWS service (for example, AWS Management Console, Auto Scaling, and so on).

    • source-desk-check - Indicates whether the network interface performs source/destination checking. A value of true means checking is enabled, and false means checking is disabled. The value must be false for the network interface to perform network address translation (NAT) in your VPC.

    • status - The status of the network interface. If the network interface is not attached to an instance, the status is available; if a network interface is attached to an instance the status is in-use.

    • subnet-id - The ID of the subnet for the network interface.

    • tag:key=value - The key/value combination of a tag assigned to the resource.

    • tag-key - The key of a tag assigned to the resource. This filter is independent of the tag-value filter. For example, if you use both the filter \"tag-key=Purpose\" and the filter \"tag-value=X\", you get any resources assigned both the tag key Purpose (regardless of what the tag's value is), and the tag value X (regardless of what the tag's key is). If you want to list only resources where Purpose is X, see the tag:key=value filter.

    • tag-value - The value of a tag assigned to the resource. This filter is independent of the tag-key filter.

    • vpc-id - The ID of the VPC for the network interface.

    ", + "DescribePlacementGroupsRequest$Filters": "

    One or more filters.

    • group-name - The name of the placement group.

    • state - The state of the placement group (pending | available | deleting | deleted).

    • strategy - The strategy of the placement group (cluster).

    ", + "DescribePrefixListsRequest$Filters": "

    One or more filters.

    • prefix-list-id: The ID of a prefix list.

    • prefix-list-name: The name of a prefix list.

    ", + "DescribeRegionsRequest$Filters": "

    One or more filters.

    • endpoint - The endpoint of the region (for example, ec2.us-east-1.amazonaws.com).

    • region-name - The name of the region (for example, us-east-1).

    ", + "DescribeReservedInstancesListingsRequest$Filters": "

    One or more filters.

    • reserved-instances-id - The ID of the Reserved Instances.

    • reserved-instances-listing-id - The ID of the Reserved Instances listing.

    • status - The status of the Reserved Instance listing (pending | active | cancelled | closed).

    • status-message - The reason for the status.

    ", + "DescribeReservedInstancesModificationsRequest$Filters": "

    One or more filters.

    • client-token - The idempotency token for the modification request.

    • create-date - The time when the modification request was created.

    • effective-date - The time when the modification becomes effective.

    • modification-result.reserved-instances-id - The ID for the Reserved Instances created as part of the modification request. This ID is only available when the status of the modification is fulfilled.

    • modification-result.target-configuration.availability-zone - The Availability Zone for the new Reserved Instances.

    • modification-result.target-configuration.instance-count - The number of new Reserved Instances.

    • modification-result.target-configuration.instance-type - The instance type of the new Reserved Instances.

    • modification-result.target-configuration.platform - The network platform of the new Reserved Instances (EC2-Classic | EC2-VPC).

    • reserved-instances-id - The ID of the Reserved Instances modified.

    • reserved-instances-modification-id - The ID of the modification request.

    • status - The status of the Reserved Instances modification request (processing | fulfilled | failed).

    • status-message - The reason for the status.

    • update-date - The time when the modification request was last updated.

    ", + "DescribeReservedInstancesOfferingsRequest$Filters": "

    One or more filters.

    • availability-zone - The Availability Zone where the Reserved Instance can be used.

    • duration - The duration of the Reserved Instance (for example, one year or three years), in seconds (31536000 | 94608000).

    • fixed-price - The purchase price of the Reserved Instance (for example, 9800.0).

    • instance-type - The instance type that is covered by the reservation.

    • marketplace - Set to true to show only Reserved Instance Marketplace offerings. When this filter is not used, which is the default behavior, all offerings from both AWS and the Reserved Instance Marketplace are listed.

    • product-description - The Reserved Instance product platform description. Instances that include (Amazon VPC) in the product platform description will only be displayed to EC2-Classic account holders and are for use with Amazon VPC. (Linux/UNIX | Linux/UNIX (Amazon VPC) | SUSE Linux | SUSE Linux (Amazon VPC) | Red Hat Enterprise Linux | Red Hat Enterprise Linux (Amazon VPC) | Windows | Windows (Amazon VPC) | Windows with SQL Server Standard | Windows with SQL Server Standard (Amazon VPC) | Windows with SQL Server Web | Windows with SQL Server Web (Amazon VPC) | Windows with SQL Server Enterprise | Windows with SQL Server Enterprise (Amazon VPC))

    • reserved-instances-offering-id - The Reserved Instances offering ID.

    • usage-price - The usage price of the Reserved Instance, per hour (for example, 0.84).

    ", + "DescribeReservedInstancesRequest$Filters": "

    One or more filters.

    • availability-zone - The Availability Zone where the Reserved Instance can be used.

    • duration - The duration of the Reserved Instance (one year or three years), in seconds (31536000 | 94608000).

    • end - The time when the Reserved Instance expires (for example, 2015-08-07T11:54:42.000Z).

    • fixed-price - The purchase price of the Reserved Instance (for example, 9800.0).

    • instance-type - The instance type that is covered by the reservation.

    • product-description - The Reserved Instance product platform description. Instances that include (Amazon VPC) in the product platform description will only be displayed to EC2-Classic account holders and are for use with Amazon VPC (Linux/UNIX | Linux/UNIX (Amazon VPC) | SUSE Linux | SUSE Linux (Amazon VPC) | Red Hat Enterprise Linux | Red Hat Enterprise Linux (Amazon VPC) | Windows | Windows (Amazon VPC) | Windows with SQL Server Standard | Windows with SQL Server Standard (Amazon VPC) | Windows with SQL Server Web | Windows with SQL Server Web (Amazon VPC) | Windows with SQL Server Enterprise | Windows with SQL Server Enterprise (Amazon VPC)).

    • reserved-instances-id - The ID of the Reserved Instance.

    • start - The time at which the Reserved Instance purchase request was placed (for example, 2014-08-07T11:54:42.000Z).

    • state - The state of the Reserved Instance (payment-pending | active | payment-failed | retired).

    • tag:key=value - The key/value combination of a tag assigned to the resource.

    • tag-key - The key of a tag assigned to the resource. This filter is independent of the tag-value filter. For example, if you use both the filter \"tag-key=Purpose\" and the filter \"tag-value=X\", you get any resources assigned both the tag key Purpose (regardless of what the tag's value is), and the tag value X (regardless of what the tag's key is). If you want to list only resources where Purpose is X, see the tag:key=value filter.

    • tag-value - The value of a tag assigned to the resource. This filter is independent of the tag-key filter.

    • usage-price - The usage price of the Reserved Instance, per hour (for example, 0.84).

    ", + "DescribeRouteTablesRequest$Filters": "

    One or more filters.

    • association.route-table-association-id - The ID of an association ID for the route table.

    • association.route-table-id - The ID of the route table involved in the association.

    • association.subnet-id - The ID of the subnet involved in the association.

    • association.main - Indicates whether the route table is the main route table for the VPC (true | false).

    • route-table-id - The ID of the route table.

    • route.destination-cidr-block - The CIDR range specified in a route in the table.

    • route.destination-prefix-list-id - The ID (prefix) of the AWS service specified in a route in the table.

    • route.gateway-id - The ID of a gateway specified in a route in the table.

    • route.instance-id - The ID of an instance specified in a route in the table.

    • route.nat-gateway-id - The ID of a NAT gateway.

    • route.origin - Describes how the route was created. CreateRouteTable indicates that the route was automatically created when the route table was created; CreateRoute indicates that the route was manually added to the route table; EnableVgwRoutePropagation indicates that the route was propagated by route propagation.

    • route.state - The state of a route in the route table (active | blackhole). The blackhole state indicates that the route's target isn't available (for example, the specified gateway isn't attached to the VPC, the specified NAT instance has been terminated, and so on).

    • route.vpc-peering-connection-id - The ID of a VPC peering connection specified in a route in the table.

    • tag:key=value - The key/value combination of a tag assigned to the resource.

    • tag-key - The key of a tag assigned to the resource. This filter is independent of the tag-value filter. For example, if you use both the filter \"tag-key=Purpose\" and the filter \"tag-value=X\", you get any resources assigned both the tag key Purpose (regardless of what the tag's value is), and the tag value X (regardless of what the tag's key is). If you want to list only resources where Purpose is X, see the tag:key=value filter.

    • tag-value - The value of a tag assigned to the resource. This filter is independent of the tag-key filter.

    • vpc-id - The ID of the VPC for the route table.

    ", + "DescribeScheduledInstanceAvailabilityRequest$Filters": "

    One or more filters.

    • availability-zone - The Availability Zone (for example, us-west-2a).

    • instance-type - The instance type (for example, c4.large).

    • network-platform - The network platform (EC2-Classic or EC2-VPC).

    • platform - The platform (Linux/UNIX or Windows).

    ", + "DescribeScheduledInstancesRequest$Filters": "

    One or more filters.

    • availability-zone - The Availability Zone (for example, us-west-2a).

    • instance-type - The instance type (for example, c4.large).

    • network-platform - The network platform (EC2-Classic or EC2-VPC).

    • platform - The platform (Linux/UNIX or Windows).

    ", + "DescribeSecurityGroupsRequest$Filters": "

    One or more filters. If using multiple filters for rules, the results include security groups for which any combination of rules - not necessarily a single rule - match all filters.

    • description - The description of the security group.

    • egress.ip-permission.prefix-list-id - The ID (prefix) of the AWS service to which the security group allows access.

    • group-id - The ID of the security group.

    • group-name - The name of the security group.

    • ip-permission.cidr - A CIDR range that has been granted permission.

    • ip-permission.from-port - The start of port range for the TCP and UDP protocols, or an ICMP type number.

    • ip-permission.group-id - The ID of a security group that has been granted permission.

    • ip-permission.group-name - The name of a security group that has been granted permission.

    • ip-permission.protocol - The IP protocol for the permission (tcp | udp | icmp or a protocol number).

    • ip-permission.to-port - The end of port range for the TCP and UDP protocols, or an ICMP code.

    • ip-permission.user-id - The ID of an AWS account that has been granted permission.

    • owner-id - The AWS account ID of the owner of the security group.

    • tag-key - The key of a tag assigned to the security group.

    • tag-value - The value of a tag assigned to the security group.

    • vpc-id - The ID of the VPC specified when the security group was created.

    ", + "DescribeSnapshotsRequest$Filters": "

    One or more filters.

    • description - A description of the snapshot.

    • owner-alias - The AWS account alias (for example, amazon) that owns the snapshot.

    • owner-id - The ID of the AWS account that owns the snapshot.

    • progress - The progress of the snapshot, as a percentage (for example, 80%).

    • snapshot-id - The snapshot ID.

    • start-time - The time stamp when the snapshot was initiated.

    • status - The status of the snapshot (pending | completed | error).

    • tag:key=value - The key/value combination of a tag assigned to the resource.

    • tag-key - The key of a tag assigned to the resource. This filter is independent of the tag-value filter. For example, if you use both the filter \"tag-key=Purpose\" and the filter \"tag-value=X\", you get any resources assigned both the tag key Purpose (regardless of what the tag's value is), and the tag value X (regardless of what the tag's key is). If you want to list only resources where Purpose is X, see the tag:key=value filter.

    • tag-value - The value of a tag assigned to the resource. This filter is independent of the tag-key filter.

    • volume-id - The ID of the volume the snapshot is for.

    • volume-size - The size of the volume, in GiB.

    ", + "DescribeSpotInstanceRequestsRequest$Filters": "

    One or more filters.

    • availability-zone-group - The Availability Zone group.

    • create-time - The time stamp when the Spot instance request was created.

    • fault-code - The fault code related to the request.

    • fault-message - The fault message related to the request.

    • instance-id - The ID of the instance that fulfilled the request.

    • launch-group - The Spot instance launch group.

    • launch.block-device-mapping.delete-on-termination - Indicates whether the Amazon EBS volume is deleted on instance termination.

    • launch.block-device-mapping.device-name - The device name for the Amazon EBS volume (for example, /dev/sdh).

    • launch.block-device-mapping.snapshot-id - The ID of the snapshot used for the Amazon EBS volume.

    • launch.block-device-mapping.volume-size - The size of the Amazon EBS volume, in GiB.

    • launch.block-device-mapping.volume-type - The type of the Amazon EBS volume: gp2 for General Purpose SSD, io1 for Provisioned IOPS SSD, st1 for Throughput Optimized HDD, sc1for Cold HDD, or standard for Magnetic.

    • launch.group-id - The security group for the instance.

    • launch.image-id - The ID of the AMI.

    • launch.instance-type - The type of instance (for example, m3.medium).

    • launch.kernel-id - The kernel ID.

    • launch.key-name - The name of the key pair the instance launched with.

    • launch.monitoring-enabled - Whether monitoring is enabled for the Spot instance.

    • launch.ramdisk-id - The RAM disk ID.

    • network-interface.network-interface-id - The ID of the network interface.

    • network-interface.device-index - The index of the device for the network interface attachment on the instance.

    • network-interface.subnet-id - The ID of the subnet for the instance.

    • network-interface.description - A description of the network interface.

    • network-interface.private-ip-address - The primary private IP address of the network interface.

    • network-interface.delete-on-termination - Indicates whether the network interface is deleted when the instance is terminated.

    • network-interface.group-id - The ID of the security group associated with the network interface.

    • network-interface.group-name - The name of the security group associated with the network interface.

    • network-interface.addresses.primary - Indicates whether the IP address is the primary private IP address.

    • product-description - The product description associated with the instance (Linux/UNIX | Windows).

    • spot-instance-request-id - The Spot instance request ID.

    • spot-price - The maximum hourly price for any Spot instance launched to fulfill the request.

    • state - The state of the Spot instance request (open | active | closed | cancelled | failed). Spot bid status information can help you track your Amazon EC2 Spot instance requests. For more information, see Spot Bid Status in the Amazon Elastic Compute Cloud User Guide.

    • status-code - The short code describing the most recent evaluation of your Spot instance request.

    • status-message - The message explaining the status of the Spot instance request.

    • tag:key=value - The key/value combination of a tag assigned to the resource.

    • tag-key - The key of a tag assigned to the resource. This filter is independent of the tag-value filter. For example, if you use both the filter \"tag-key=Purpose\" and the filter \"tag-value=X\", you get any resources assigned both the tag key Purpose (regardless of what the tag's value is), and the tag value X (regardless of what the tag's key is). If you want to list only resources where Purpose is X, see the tag:key=value filter.

    • tag-value - The value of a tag assigned to the resource. This filter is independent of the tag-key filter.

    • type - The type of Spot instance request (one-time | persistent).

    • launched-availability-zone - The Availability Zone in which the bid is launched.

    • valid-from - The start date of the request.

    • valid-until - The end date of the request.

    ", + "DescribeSpotPriceHistoryRequest$Filters": "

    One or more filters.

    • availability-zone - The Availability Zone for which prices should be returned.

    • instance-type - The type of instance (for example, m3.medium).

    • product-description - The product description for the Spot price (Linux/UNIX | SUSE Linux | Windows | Linux/UNIX (Amazon VPC) | SUSE Linux (Amazon VPC) | Windows (Amazon VPC)).

    • spot-price - The Spot price. The value must match exactly (or use wildcards; greater than or less than comparison is not supported).

    • timestamp - The timestamp of the Spot price history, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ). You can use wildcards (* and ?). Greater than or less than comparison is not supported.

    ", + "DescribeSubnetsRequest$Filters": "

    One or more filters.

    • availabilityZone - The Availability Zone for the subnet. You can also use availability-zone as the filter name.

    • available-ip-address-count - The number of IP addresses in the subnet that are available.

    • cidrBlock - The CIDR block of the subnet. The CIDR block you specify must exactly match the subnet's CIDR block for information to be returned for the subnet. You can also use cidr or cidr-block as the filter names.

    • defaultForAz - Indicates whether this is the default subnet for the Availability Zone. You can also use default-for-az as the filter name.

    • state - The state of the subnet (pending | available).

    • subnet-id - The ID of the subnet.

    • tag:key=value - The key/value combination of a tag assigned to the resource.

    • tag-key - The key of a tag assigned to the resource. This filter is independent of the tag-value filter. For example, if you use both the filter \"tag-key=Purpose\" and the filter \"tag-value=X\", you get any resources assigned both the tag key Purpose (regardless of what the tag's value is), and the tag value X (regardless of what the tag's key is). If you want to list only resources where Purpose is X, see the tag:key=value filter.

    • tag-value - The value of a tag assigned to the resource. This filter is independent of the tag-key filter.

    • vpc-id - The ID of the VPC for the subnet.

    ", + "DescribeTagsRequest$Filters": "

    One or more filters.

    • key - The tag key.

    • resource-id - The resource ID.

    • resource-type - The resource type (customer-gateway | dhcp-options | image | instance | internet-gateway | network-acl | network-interface | reserved-instances | route-table | security-group | snapshot | spot-instances-request | subnet | volume | vpc | vpn-connection | vpn-gateway).

    • value - The tag value.

    ", + "DescribeVolumeStatusRequest$Filters": "

    One or more filters.

    • action.code - The action code for the event (for example, enable-volume-io).

    • action.description - A description of the action.

    • action.event-id - The event ID associated with the action.

    • availability-zone - The Availability Zone of the instance.

    • event.description - A description of the event.

    • event.event-id - The event ID.

    • event.event-type - The event type (for io-enabled: passed | failed; for io-performance: io-performance:degraded | io-performance:severely-degraded | io-performance:stalled).

    • event.not-after - The latest end time for the event.

    • event.not-before - The earliest start time for the event.

    • volume-status.details-name - The cause for volume-status.status (io-enabled | io-performance).

    • volume-status.details-status - The status of volume-status.details-name (for io-enabled: passed | failed; for io-performance: normal | degraded | severely-degraded | stalled).

    • volume-status.status - The status of the volume (ok | impaired | warning | insufficient-data).

    ", + "DescribeVolumesRequest$Filters": "

    One or more filters.

    • attachment.attach-time - The time stamp when the attachment initiated.

    • attachment.delete-on-termination - Whether the volume is deleted on instance termination.

    • attachment.device - The device name that is exposed to the instance (for example, /dev/sda1).

    • attachment.instance-id - The ID of the instance the volume is attached to.

    • attachment.status - The attachment state (attaching | attached | detaching | detached).

    • availability-zone - The Availability Zone in which the volume was created.

    • create-time - The time stamp when the volume was created.

    • encrypted - The encryption status of the volume.

    • size - The size of the volume, in GiB.

    • snapshot-id - The snapshot from which the volume was created.

    • status - The status of the volume (creating | available | in-use | deleting | deleted | error).

    • tag:key=value - The key/value combination of a tag assigned to the resource.

    • tag-key - The key of a tag assigned to the resource. This filter is independent of the tag-value filter. For example, if you use both the filter \"tag-key=Purpose\" and the filter \"tag-value=X\", you get any resources assigned both the tag key Purpose (regardless of what the tag's value is), and the tag value X (regardless of what the tag's key is). If you want to list only resources where Purpose is X, see the tag:key=value filter.

    • tag-value - The value of a tag assigned to the resource. This filter is independent of the tag-key filter.

    • volume-id - The volume ID.

    • volume-type - The Amazon EBS volume type. This can be gp2 for General Purpose SSD, io1 for Provisioned IOPS SSD, st1 for Throughput Optimized HDD, sc1 for Cold HDD, or standard for Magnetic volumes.

    ", + "DescribeVpcClassicLinkRequest$Filters": "

    One or more filters.

    • is-classic-link-enabled - Whether the VPC is enabled for ClassicLink (true | false).

    • tag:key=value - The key/value combination of a tag assigned to the resource.

    • tag-key - The key of a tag assigned to the resource. This filter is independent of the tag-value filter. For example, if you use both the filter \"tag-key=Purpose\" and the filter \"tag-value=X\", you get any resources assigned both the tag key Purpose (regardless of what the tag's value is), and the tag value X (regardless of what the tag's key is). If you want to list only resources where Purpose is X, see the tag:key=value filter.

    • tag-value - The value of a tag assigned to the resource. This filter is independent of the tag-key filter.

    ", + "DescribeVpcEndpointsRequest$Filters": "

    One or more filters.

    • service-name: The name of the AWS service.

    • vpc-id: The ID of the VPC in which the endpoint resides.

    • vpc-endpoint-id: The ID of the endpoint.

    • vpc-endpoint-state: The state of the endpoint. (pending | available | deleting | deleted)

    ", + "DescribeVpcPeeringConnectionsRequest$Filters": "

    One or more filters.

    • accepter-vpc-info.cidr-block - The CIDR block of the peer VPC.

    • accepter-vpc-info.owner-id - The AWS account ID of the owner of the peer VPC.

    • accepter-vpc-info.vpc-id - The ID of the peer VPC.

    • expiration-time - The expiration date and time for the VPC peering connection.

    • requester-vpc-info.cidr-block - The CIDR block of the requester's VPC.

    • requester-vpc-info.owner-id - The AWS account ID of the owner of the requester VPC.

    • requester-vpc-info.vpc-id - The ID of the requester VPC.

    • status-code - The status of the VPC peering connection (pending-acceptance | failed | expired | provisioning | active | deleted | rejected).

    • status-message - A message that provides more information about the status of the VPC peering connection, if applicable.

    • tag:key=value - The key/value combination of a tag assigned to the resource.

    • tag-key - The key of a tag assigned to the resource. This filter is independent of the tag-value filter. For example, if you use both the filter \"tag-key=Purpose\" and the filter \"tag-value=X\", you get any resources assigned both the tag key Purpose (regardless of what the tag's value is), and the tag value X (regardless of what the tag's key is). If you want to list only resources where Purpose is X, see the tag:key=value filter.

    • tag-value - The value of a tag assigned to the resource. This filter is independent of the tag-key filter.

    • vpc-peering-connection-id - The ID of the VPC peering connection.

    ", + "DescribeVpcsRequest$Filters": "

    One or more filters.

    • cidr - The CIDR block of the VPC. The CIDR block you specify must exactly match the VPC's CIDR block for information to be returned for the VPC. Must contain the slash followed by one or two digits (for example, /28).

    • dhcp-options-id - The ID of a set of DHCP options.

    • isDefault - Indicates whether the VPC is the default VPC.

    • state - The state of the VPC (pending | available).

    • tag:key=value - The key/value combination of a tag assigned to the resource.

    • tag-key - The key of a tag assigned to the resource. This filter is independent of the tag-value filter. For example, if you use both the filter \"tag-key=Purpose\" and the filter \"tag-value=X\", you get any resources assigned both the tag key Purpose (regardless of what the tag's value is), and the tag value X (regardless of what the tag's key is). If you want to list only resources where Purpose is X, see the tag:key=value filter.

    • tag-value - The value of a tag assigned to the resource. This filter is independent of the tag-key filter.

    • vpc-id - The ID of the VPC.

    ", + "DescribeVpnConnectionsRequest$Filters": "

    One or more filters.

    • customer-gateway-configuration - The configuration information for the customer gateway.

    • customer-gateway-id - The ID of a customer gateway associated with the VPN connection.

    • state - The state of the VPN connection (pending | available | deleting | deleted).

    • option.static-routes-only - Indicates whether the connection has static routes only. Used for devices that do not support Border Gateway Protocol (BGP).

    • route.destination-cidr-block - The destination CIDR block. This corresponds to the subnet used in a customer data center.

    • bgp-asn - The BGP Autonomous System Number (ASN) associated with a BGP device.

    • tag:key=value - The key/value combination of a tag assigned to the resource.

    • tag-key - The key of a tag assigned to the resource. This filter is independent of the tag-value filter. For example, if you use both the filter \"tag-key=Purpose\" and the filter \"tag-value=X\", you get any resources assigned both the tag key Purpose (regardless of what the tag's value is), and the tag value X (regardless of what the tag's key is). If you want to list only resources where Purpose is X, see the tag:key=value filter.

    • tag-value - The value of a tag assigned to the resource. This filter is independent of the tag-key filter.

    • type - The type of VPN connection. Currently the only supported type is ipsec.1.

    • vpn-connection-id - The ID of the VPN connection.

    • vpn-gateway-id - The ID of a virtual private gateway associated with the VPN connection.

    ", + "DescribeVpnGatewaysRequest$Filters": "

    One or more filters.

    • attachment.state - The current state of the attachment between the gateway and the VPC (attaching | attached | detaching | detached).

    • attachment.vpc-id - The ID of an attached VPC.

    • availability-zone - The Availability Zone for the virtual private gateway (if applicable).

    • state - The state of the virtual private gateway (pending | available | deleting | deleted).

    • tag:key=value - The key/value combination of a tag assigned to the resource.

    • tag-key - The key of a tag assigned to the resource. This filter is independent of the tag-value filter. For example, if you use both the filter \"tag-key=Purpose\" and the filter \"tag-value=X\", you get any resources assigned both the tag key Purpose (regardless of what the tag's value is), and the tag value X (regardless of what the tag's key is). If you want to list only resources where Purpose is X, see the tag:key=value filter.

    • tag-value - The value of a tag assigned to the resource. This filter is independent of the tag-key filter.

    • type - The type of virtual private gateway. Currently the only supported type is ipsec.1.

    • vpn-gateway-id - The ID of the virtual private gateway.

    " + } + }, + "FleetType": { + "base": null, + "refs": { + "SpotFleetRequestConfigData$Type": "

    The type of request. Indicates whether the fleet will only request the target capacity or also attempt to maintain it. When you request a certain target capacity, the fleet will only place the required bids. It will not attempt to replenish Spot instances if capacity is diminished, nor will it submit bids in alternative Spot pools if capacity is not available. When you want to maintain a certain target capacity, fleet will place the required bids to meet this target capacity. It will also automatically replenish any interrupted instances. Default: maintain.

    " + } + }, + "Float": { + "base": null, + "refs": { + "ReservedInstances$UsagePrice": "

    The usage price of the Reserved Instance, per hour.

    ", + "ReservedInstances$FixedPrice": "

    The purchase price of the Reserved Instance.

    ", + "ReservedInstancesOffering$UsagePrice": "

    The usage price of the Reserved Instance, per hour.

    ", + "ReservedInstancesOffering$FixedPrice": "

    The purchase price of the Reserved Instance.

    " + } + }, + "FlowLog": { + "base": "

    Describes a flow log.

    ", + "refs": { + "FlowLogSet$member": null + } + }, + "FlowLogSet": { + "base": null, + "refs": { + "DescribeFlowLogsResult$FlowLogs": "

    Information about the flow logs.

    " + } + }, + "FlowLogsResourceType": { + "base": null, + "refs": { + "CreateFlowLogsRequest$ResourceType": "

    The type of resource on which to create the flow log.

    " + } + }, + "GatewayType": { + "base": null, + "refs": { + "CreateCustomerGatewayRequest$Type": "

    The type of VPN connection that this customer gateway supports (ipsec.1).

    ", + "CreateVpnGatewayRequest$Type": "

    The type of VPN connection this virtual private gateway supports.

    ", + "VpnConnection$Type": "

    The type of VPN connection.

    ", + "VpnGateway$Type": "

    The type of VPN connection the virtual private gateway supports.

    " + } + }, + "GetConsoleOutputRequest": { + "base": "

    Contains the parameters for GetConsoleOutput.

    ", + "refs": { + } + }, + "GetConsoleOutputResult": { + "base": "

    Contains the output of GetConsoleOutput.

    ", + "refs": { + } + }, + "GetConsoleScreenshotRequest": { + "base": "

    Contains the parameters for the request.

    ", + "refs": { + } + }, + "GetConsoleScreenshotResult": { + "base": "

    Contains the output of the request.

    ", + "refs": { + } + }, + "GetPasswordDataRequest": { + "base": "

    Contains the parameters for GetPasswordData.

    ", + "refs": { + } + }, + "GetPasswordDataResult": { + "base": "

    Contains the output of GetPasswordData.

    ", + "refs": { + } + }, + "GroupIdStringList": { + "base": null, + "refs": { + "AttachClassicLinkVpcRequest$Groups": "

    The ID of one or more of the VPC's security groups. You cannot specify security groups from a different VPC.

    ", + "DescribeSecurityGroupsRequest$GroupIds": "

    One or more security group IDs. Required for security groups in a nondefault VPC.

    Default: Describes all your security groups.

    ", + "ModifyInstanceAttributeRequest$Groups": "

    [EC2-VPC] Changes the security groups of the instance. You must specify at least one security group, even if it's just the default security group for the VPC. You must specify the security group ID, not the security group name.

    " + } + }, + "GroupIdentifier": { + "base": "

    Describes a security group.

    ", + "refs": { + "GroupIdentifierList$member": null + } + }, + "GroupIdentifierList": { + "base": null, + "refs": { + "ClassicLinkInstance$Groups": "

    A list of security groups.

    ", + "DescribeNetworkInterfaceAttributeResult$Groups": "

    The security groups associated with the network interface.

    ", + "Instance$SecurityGroups": "

    One or more security groups for the instance.

    ", + "InstanceAttribute$Groups": "

    The security groups associated with the instance.

    ", + "InstanceNetworkInterface$Groups": "

    One or more security groups.

    ", + "LaunchSpecification$SecurityGroups": "

    One or more security groups. When requesting instances in a VPC, you must specify the IDs of the security groups. When requesting instances in EC2-Classic, you can specify the names or the IDs of the security groups.

    ", + "NetworkInterface$Groups": "

    Any security groups for the network interface.

    ", + "Reservation$Groups": "

    [EC2-Classic only] One or more security groups.

    ", + "SpotFleetLaunchSpecification$SecurityGroups": "

    One or more security groups. When requesting instances in a VPC, you must specify the IDs of the security groups. When requesting instances in EC2-Classic, you can specify the names or the IDs of the security groups.

    " + } + }, + "GroupIds": { + "base": null, + "refs": { + "DescribeSecurityGroupReferencesRequest$GroupId": "

    One or more security group IDs in your account.

    " + } + }, + "GroupNameStringList": { + "base": null, + "refs": { + "DescribeSecurityGroupsRequest$GroupNames": "

    [EC2-Classic and default VPC only] One or more security group names. You can specify either the security group name or the security group ID. For security groups in a nondefault VPC, use the group-name filter to describe security groups by name.

    Default: Describes all your security groups.

    ", + "ModifySnapshotAttributeRequest$GroupNames": "

    The group to modify for the snapshot.

    " + } + }, + "HistoryRecord": { + "base": "

    Describes an event in the history of the Spot fleet request.

    ", + "refs": { + "HistoryRecords$member": null + } + }, + "HistoryRecords": { + "base": null, + "refs": { + "DescribeSpotFleetRequestHistoryResponse$HistoryRecords": "

    Information about the events in the history of the Spot fleet request.

    " + } + }, + "Host": { + "base": "

    Describes the properties of the Dedicated host.

    ", + "refs": { + "HostList$member": null + } + }, + "HostInstance": { + "base": "

    Describes an instance running on a Dedicated host.

    ", + "refs": { + "HostInstanceList$member": null + } + }, + "HostInstanceList": { + "base": null, + "refs": { + "Host$Instances": "

    The IDs and instance type that are currently running on the Dedicated host.

    " + } + }, + "HostList": { + "base": null, + "refs": { + "DescribeHostsResult$Hosts": "

    Information about the Dedicated hosts.

    " + } + }, + "HostProperties": { + "base": "

    Describes properties of a Dedicated host.

    ", + "refs": { + "Host$HostProperties": "

    The hardware specifications of the Dedicated host.

    " + } + }, + "HostTenancy": { + "base": null, + "refs": { + "ModifyInstancePlacementRequest$Tenancy": "

    The tenancy of the instance that you are modifying.

    " + } + }, + "HypervisorType": { + "base": null, + "refs": { + "Image$Hypervisor": "

    The hypervisor type of the image.

    ", + "Instance$Hypervisor": "

    The hypervisor type of the instance.

    " + } + }, + "IamInstanceProfile": { + "base": "

    Describes an IAM instance profile.

    ", + "refs": { + "Instance$IamInstanceProfile": "

    The IAM instance profile associated with the instance, if applicable.

    " + } + }, + "IamInstanceProfileSpecification": { + "base": "

    Describes an IAM instance profile.

    ", + "refs": { + "LaunchSpecification$IamInstanceProfile": "

    The IAM instance profile.

    ", + "RequestSpotLaunchSpecification$IamInstanceProfile": "

    The IAM instance profile.

    ", + "RunInstancesRequest$IamInstanceProfile": "

    The IAM instance profile.

    ", + "SpotFleetLaunchSpecification$IamInstanceProfile": "

    The IAM instance profile.

    " + } + }, + "IcmpTypeCode": { + "base": "

    Describes the ICMP type and code.

    ", + "refs": { + "CreateNetworkAclEntryRequest$IcmpTypeCode": "

    ICMP protocol: The ICMP type and code. Required if specifying ICMP for the protocol.

    ", + "NetworkAclEntry$IcmpTypeCode": "

    ICMP protocol: The ICMP type and code.

    ", + "ReplaceNetworkAclEntryRequest$IcmpTypeCode": "

    ICMP protocol: The ICMP type and code. Required if specifying 1 (ICMP) for the protocol.

    " + } + }, + "IdFormat": { + "base": "

    Describes the ID format for a resource.

    ", + "refs": { + "IdFormatList$member": null + } + }, + "IdFormatList": { + "base": null, + "refs": { + "DescribeIdFormatResult$Statuses": "

    Information about the ID format for the resource.

    ", + "DescribeIdentityIdFormatResult$Statuses": "

    Information about the ID format for the resources.

    " + } + }, + "Image": { + "base": "

    Describes an image.

    ", + "refs": { + "ImageList$member": null + } + }, + "ImageAttribute": { + "base": "

    Describes an image attribute.

    ", + "refs": { + } + }, + "ImageAttributeName": { + "base": null, + "refs": { + "DescribeImageAttributeRequest$Attribute": "

    The AMI attribute.

    Note: Depending on your account privileges, the blockDeviceMapping attribute may return a Client.AuthFailure error. If this happens, use DescribeImages to get information about the block device mapping for the AMI.

    " + } + }, + "ImageDiskContainer": { + "base": "

    Describes the disk container object for an import image task.

    ", + "refs": { + "ImageDiskContainerList$member": null + } + }, + "ImageDiskContainerList": { + "base": null, + "refs": { + "ImportImageRequest$DiskContainers": "

    Information about the disk containers.

    " + } + }, + "ImageIdStringList": { + "base": null, + "refs": { + "DescribeImagesRequest$ImageIds": "

    One or more image IDs.

    Default: Describes all images available to you.

    " + } + }, + "ImageList": { + "base": null, + "refs": { + "DescribeImagesResult$Images": "

    Information about one or more images.

    " + } + }, + "ImageState": { + "base": null, + "refs": { + "Image$State": "

    The current state of the AMI. If the state is available, the image is successfully registered and can be used to launch an instance.

    " + } + }, + "ImageTypeValues": { + "base": null, + "refs": { + "Image$ImageType": "

    The type of image.

    " + } + }, + "ImportImageRequest": { + "base": "

    Contains the parameters for ImportImage.

    ", + "refs": { + } + }, + "ImportImageResult": { + "base": "

    Contains the output for ImportImage.

    ", + "refs": { + } + }, + "ImportImageTask": { + "base": "

    Describes an import image task.

    ", + "refs": { + "ImportImageTaskList$member": null + } + }, + "ImportImageTaskList": { + "base": null, + "refs": { + "DescribeImportImageTasksResult$ImportImageTasks": "

    A list of zero or more import image tasks that are currently active or were completed or canceled in the previous 7 days.

    " + } + }, + "ImportInstanceLaunchSpecification": { + "base": "

    Describes the launch specification for VM import.

    ", + "refs": { + "ImportInstanceRequest$LaunchSpecification": "

    The launch specification.

    " + } + }, + "ImportInstanceRequest": { + "base": "

    Contains the parameters for ImportInstance.

    ", + "refs": { + } + }, + "ImportInstanceResult": { + "base": "

    Contains the output for ImportInstance.

    ", + "refs": { + } + }, + "ImportInstanceTaskDetails": { + "base": "

    Describes an import instance task.

    ", + "refs": { + "ConversionTask$ImportInstance": "

    If the task is for importing an instance, this contains information about the import instance task.

    " + } + }, + "ImportInstanceVolumeDetailItem": { + "base": "

    Describes an import volume task.

    ", + "refs": { + "ImportInstanceVolumeDetailSet$member": null + } + }, + "ImportInstanceVolumeDetailSet": { + "base": null, + "refs": { + "ImportInstanceTaskDetails$Volumes": "

    One or more volumes.

    " + } + }, + "ImportKeyPairRequest": { + "base": "

    Contains the parameters for ImportKeyPair.

    ", + "refs": { + } + }, + "ImportKeyPairResult": { + "base": "

    Contains the output of ImportKeyPair.

    ", + "refs": { + } + }, + "ImportSnapshotRequest": { + "base": "

    Contains the parameters for ImportSnapshot.

    ", + "refs": { + } + }, + "ImportSnapshotResult": { + "base": "

    Contains the output for ImportSnapshot.

    ", + "refs": { + } + }, + "ImportSnapshotTask": { + "base": "

    Describes an import snapshot task.

    ", + "refs": { + "ImportSnapshotTaskList$member": null + } + }, + "ImportSnapshotTaskList": { + "base": null, + "refs": { + "DescribeImportSnapshotTasksResult$ImportSnapshotTasks": "

    A list of zero or more import snapshot tasks that are currently active or were completed or canceled in the previous 7 days.

    " + } + }, + "ImportTaskIdList": { + "base": null, + "refs": { + "DescribeImportImageTasksRequest$ImportTaskIds": "

    A list of import image task IDs.

    ", + "DescribeImportSnapshotTasksRequest$ImportTaskIds": "

    A list of import snapshot task IDs.

    " + } + }, + "ImportVolumeRequest": { + "base": "

    Contains the parameters for ImportVolume.

    ", + "refs": { + } + }, + "ImportVolumeResult": { + "base": "

    Contains the output for ImportVolume.

    ", + "refs": { + } + }, + "ImportVolumeTaskDetails": { + "base": "

    Describes an import volume task.

    ", + "refs": { + "ConversionTask$ImportVolume": "

    If the task is for importing a volume, this contains information about the import volume task.

    " + } + }, + "Instance": { + "base": "

    Describes an instance.

    ", + "refs": { + "InstanceList$member": null + } + }, + "InstanceAttribute": { + "base": "

    Describes an instance attribute.

    ", + "refs": { + } + }, + "InstanceAttributeName": { + "base": null, + "refs": { + "DescribeInstanceAttributeRequest$Attribute": "

    The instance attribute.

    Note: The enaSupport attribute is not supported at this time.

    ", + "ModifyInstanceAttributeRequest$Attribute": "

    The name of the attribute.

    ", + "ResetInstanceAttributeRequest$Attribute": "

    The attribute to reset.

    You can only reset the following attributes: kernel | ramdisk | sourceDestCheck. To change an instance attribute, use ModifyInstanceAttribute.

    " + } + }, + "InstanceBlockDeviceMapping": { + "base": "

    Describes a block device mapping.

    ", + "refs": { + "InstanceBlockDeviceMappingList$member": null + } + }, + "InstanceBlockDeviceMappingList": { + "base": null, + "refs": { + "Instance$BlockDeviceMappings": "

    Any block device mapping entries for the instance.

    ", + "InstanceAttribute$BlockDeviceMappings": "

    The block device mapping of the instance.

    " + } + }, + "InstanceBlockDeviceMappingSpecification": { + "base": "

    Describes a block device mapping entry.

    ", + "refs": { + "InstanceBlockDeviceMappingSpecificationList$member": null + } + }, + "InstanceBlockDeviceMappingSpecificationList": { + "base": null, + "refs": { + "ModifyInstanceAttributeRequest$BlockDeviceMappings": "

    Modifies the DeleteOnTermination attribute for volumes that are currently attached. The volume must be owned by the caller. If no value is specified for DeleteOnTermination, the default is true and the volume is deleted when the instance is terminated.

    To add instance store volumes to an Amazon EBS-backed instance, you must add them when you launch the instance. For more information, see Updating the Block Device Mapping when Launching an Instance in the Amazon Elastic Compute Cloud User Guide.

    " + } + }, + "InstanceCapacity": { + "base": "

    Information about the instance type that the Dedicated host supports.

    ", + "refs": { + "AvailableInstanceCapacityList$member": null + } + }, + "InstanceCount": { + "base": "

    Describes a Reserved Instance listing state.

    ", + "refs": { + "InstanceCountList$member": null + } + }, + "InstanceCountList": { + "base": null, + "refs": { + "ReservedInstancesListing$InstanceCounts": "

    The number of instances in this state.

    " + } + }, + "InstanceExportDetails": { + "base": "

    Describes an instance to export.

    ", + "refs": { + "ExportTask$InstanceExportDetails": "

    Information about the instance to export.

    " + } + }, + "InstanceIdSet": { + "base": null, + "refs": { + "RunScheduledInstancesResult$InstanceIdSet": "

    The IDs of the newly launched instances.

    " + } + }, + "InstanceIdStringList": { + "base": null, + "refs": { + "DescribeClassicLinkInstancesRequest$InstanceIds": "

    One or more instance IDs. Must be instances linked to a VPC through ClassicLink.

    ", + "DescribeInstanceStatusRequest$InstanceIds": "

    One or more instance IDs.

    Default: Describes all your instances.

    Constraints: Maximum 100 explicitly specified instance IDs.

    ", + "DescribeInstancesRequest$InstanceIds": "

    One or more instance IDs.

    Default: Describes all your instances.

    ", + "MonitorInstancesRequest$InstanceIds": "

    One or more instance IDs.

    ", + "RebootInstancesRequest$InstanceIds": "

    One or more instance IDs.

    ", + "ReportInstanceStatusRequest$Instances": "

    One or more instances.

    ", + "StartInstancesRequest$InstanceIds": "

    One or more instance IDs.

    ", + "StopInstancesRequest$InstanceIds": "

    One or more instance IDs.

    ", + "TerminateInstancesRequest$InstanceIds": "

    One or more instance IDs.

    ", + "UnmonitorInstancesRequest$InstanceIds": "

    One or more instance IDs.

    " + } + }, + "InstanceLifecycleType": { + "base": null, + "refs": { + "Instance$InstanceLifecycle": "

    Indicates whether this is a Spot instance or a Scheduled Instance.

    " + } + }, + "InstanceList": { + "base": null, + "refs": { + "Reservation$Instances": "

    One or more instances.

    " + } + }, + "InstanceMonitoring": { + "base": "

    Describes the monitoring information of the instance.

    ", + "refs": { + "InstanceMonitoringList$member": null + } + }, + "InstanceMonitoringList": { + "base": null, + "refs": { + "MonitorInstancesResult$InstanceMonitorings": "

    Monitoring information for one or more instances.

    ", + "UnmonitorInstancesResult$InstanceMonitorings": "

    Monitoring information for one or more instances.

    " + } + }, + "InstanceNetworkInterface": { + "base": "

    Describes a network interface.

    ", + "refs": { + "InstanceNetworkInterfaceList$member": null + } + }, + "InstanceNetworkInterfaceAssociation": { + "base": "

    Describes association information for an Elastic IP address.

    ", + "refs": { + "InstanceNetworkInterface$Association": "

    The association information for an Elastic IP associated with the network interface.

    ", + "InstancePrivateIpAddress$Association": "

    The association information for an Elastic IP address for the network interface.

    " + } + }, + "InstanceNetworkInterfaceAttachment": { + "base": "

    Describes a network interface attachment.

    ", + "refs": { + "InstanceNetworkInterface$Attachment": "

    The network interface attachment.

    " + } + }, + "InstanceNetworkInterfaceList": { + "base": null, + "refs": { + "Instance$NetworkInterfaces": "

    [EC2-VPC] One or more network interfaces for the instance.

    " + } + }, + "InstanceNetworkInterfaceSpecification": { + "base": "

    Describes a network interface.

    ", + "refs": { + "InstanceNetworkInterfaceSpecificationList$member": null + } + }, + "InstanceNetworkInterfaceSpecificationList": { + "base": null, + "refs": { + "LaunchSpecification$NetworkInterfaces": "

    One or more network interfaces.

    ", + "RequestSpotLaunchSpecification$NetworkInterfaces": "

    One or more network interfaces.

    ", + "RunInstancesRequest$NetworkInterfaces": "

    One or more network interfaces.

    ", + "SpotFleetLaunchSpecification$NetworkInterfaces": "

    One or more network interfaces.

    " + } + }, + "InstancePrivateIpAddress": { + "base": "

    Describes a private IP address.

    ", + "refs": { + "InstancePrivateIpAddressList$member": null + } + }, + "InstancePrivateIpAddressList": { + "base": null, + "refs": { + "InstanceNetworkInterface$PrivateIpAddresses": "

    The private IP addresses associated with the network interface.

    " + } + }, + "InstanceState": { + "base": "

    Describes the current state of the instance.

    ", + "refs": { + "Instance$State": "

    The current state of the instance.

    ", + "InstanceStateChange$CurrentState": "

    The current state of the instance.

    ", + "InstanceStateChange$PreviousState": "

    The previous state of the instance.

    ", + "InstanceStatus$InstanceState": "

    The intended state of the instance. DescribeInstanceStatus requires that an instance be in the running state.

    " + } + }, + "InstanceStateChange": { + "base": "

    Describes an instance state change.

    ", + "refs": { + "InstanceStateChangeList$member": null + } + }, + "InstanceStateChangeList": { + "base": null, + "refs": { + "StartInstancesResult$StartingInstances": "

    Information about one or more started instances.

    ", + "StopInstancesResult$StoppingInstances": "

    Information about one or more stopped instances.

    ", + "TerminateInstancesResult$TerminatingInstances": "

    Information about one or more terminated instances.

    " + } + }, + "InstanceStateName": { + "base": null, + "refs": { + "InstanceState$Name": "

    The current state of the instance.

    " + } + }, + "InstanceStatus": { + "base": "

    Describes the status of an instance.

    ", + "refs": { + "InstanceStatusList$member": null + } + }, + "InstanceStatusDetails": { + "base": "

    Describes the instance status.

    ", + "refs": { + "InstanceStatusDetailsList$member": null + } + }, + "InstanceStatusDetailsList": { + "base": null, + "refs": { + "InstanceStatusSummary$Details": "

    The system instance health or application instance health.

    " + } + }, + "InstanceStatusEvent": { + "base": "

    Describes a scheduled event for an instance.

    ", + "refs": { + "InstanceStatusEventList$member": null + } + }, + "InstanceStatusEventList": { + "base": null, + "refs": { + "InstanceStatus$Events": "

    Any scheduled events associated with the instance.

    " + } + }, + "InstanceStatusList": { + "base": null, + "refs": { + "DescribeInstanceStatusResult$InstanceStatuses": "

    One or more instance status descriptions.

    " + } + }, + "InstanceStatusSummary": { + "base": "

    Describes the status of an instance.

    ", + "refs": { + "InstanceStatus$SystemStatus": "

    Reports impaired functionality that stems from issues related to the systems that support an instance, such as hardware failures and network connectivity problems.

    ", + "InstanceStatus$InstanceStatus": "

    Reports impaired functionality that stems from issues internal to the instance, such as impaired reachability.

    " + } + }, + "InstanceType": { + "base": null, + "refs": { + "DescribeReservedInstancesOfferingsRequest$InstanceType": "

    The instance type that the reservation will cover (for example, m1.small). For more information, see Instance Types in the Amazon Elastic Compute Cloud User Guide.

    ", + "ImportInstanceLaunchSpecification$InstanceType": "

    The instance type. For more information about the instance types that you can import, see Before You Get Started in the Amazon Elastic Compute Cloud User Guide.

    ", + "Instance$InstanceType": "

    The instance type.

    ", + "InstanceTypeList$member": null, + "LaunchSpecification$InstanceType": "

    The instance type.

    ", + "RequestSpotLaunchSpecification$InstanceType": "

    The instance type.

    ", + "ReservedInstances$InstanceType": "

    The instance type on which the Reserved Instance can be used.

    ", + "ReservedInstancesConfiguration$InstanceType": "

    The instance type for the modified Reserved Instances.

    ", + "ReservedInstancesOffering$InstanceType": "

    The instance type on which the Reserved Instance can be used.

    ", + "RunInstancesRequest$InstanceType": "

    The instance type. For more information, see Instance Types in the Amazon Elastic Compute Cloud User Guide.

    Default: m1.small

    ", + "SpotFleetLaunchSpecification$InstanceType": "

    The instance type.

    ", + "SpotPrice$InstanceType": "

    The instance type.

    " + } + }, + "InstanceTypeList": { + "base": null, + "refs": { + "DescribeSpotPriceHistoryRequest$InstanceTypes": "

    Filters the results by the specified instance types.

    " + } + }, + "Integer": { + "base": null, + "refs": { + "AllocateHostsRequest$Quantity": "

    The number of Dedicated hosts you want to allocate to your account with these parameters.

    ", + "AssignPrivateIpAddressesRequest$SecondaryPrivateIpAddressCount": "

    The number of secondary IP addresses to assign to the network interface. You can't specify this parameter when also specifying private IP addresses.

    ", + "AttachNetworkInterfaceRequest$DeviceIndex": "

    The index of the device for the network interface attachment.

    ", + "AuthorizeSecurityGroupEgressRequest$FromPort": "

    The start of port range for the TCP and UDP protocols, or an ICMP type number. We recommend that you specify the port range in a set of IP permissions instead.

    ", + "AuthorizeSecurityGroupEgressRequest$ToPort": "

    The end of port range for the TCP and UDP protocols, or an ICMP type number. We recommend that you specify the port range in a set of IP permissions instead.

    ", + "AuthorizeSecurityGroupIngressRequest$FromPort": "

    The start of port range for the TCP and UDP protocols, or an ICMP type number. For the ICMP type number, use -1 to specify all ICMP types.

    ", + "AuthorizeSecurityGroupIngressRequest$ToPort": "

    The end of port range for the TCP and UDP protocols, or an ICMP code number. For the ICMP code number, use -1 to specify all ICMP codes for the ICMP type.

    ", + "AvailableCapacity$AvailableVCpus": "

    The number of vCPUs available on the Dedicated host.

    ", + "CreateCustomerGatewayRequest$BgpAsn": "

    For devices that support BGP, the customer gateway's BGP ASN.

    Default: 65000

    ", + "CreateNetworkAclEntryRequest$RuleNumber": "

    The rule number for the entry (for example, 100). ACL entries are processed in ascending order by rule number.

    Constraints: Positive integer from 1 to 32766. The range 32767 to 65535 is reserved for internal use.

    ", + "CreateNetworkInterfaceRequest$SecondaryPrivateIpAddressCount": "

    The number of secondary private IP addresses to assign to a network interface. When you specify a number of secondary IP addresses, Amazon EC2 selects these IP addresses within the subnet range. You can't specify this option and specify more than one private IP address using privateIpAddresses.

    The number of IP addresses you can assign to a network interface varies by instance type. For more information, see Private IP Addresses Per ENI Per Instance Type in the Amazon Elastic Compute Cloud User Guide.

    ", + "CreateReservedInstancesListingRequest$InstanceCount": "

    The number of instances that are a part of a Reserved Instance account to be listed in the Reserved Instance Marketplace. This number should be less than or equal to the instance count associated with the Reserved Instance ID specified in this call.

    ", + "CreateVolumeRequest$Size": "

    The size of the volume, in GiBs.

    Constraints: 1-16384 for gp2, 4-16384 for io1, 500-16384 for st1, 500-16384 for sc1, and 1-1024 for standard. If you specify a snapshot, the volume size must be equal to or larger than the snapshot size.

    Default: If you're creating the volume from a snapshot and don't specify a volume size, the default is the snapshot size.

    ", + "CreateVolumeRequest$Iops": "

    Only valid for Provisioned IOPS SSD volumes. The number of I/O operations per second (IOPS) to provision for the volume, with a maximum ratio of 30 IOPS/GiB.

    Constraint: Range is 100 to 20000 for Provisioned IOPS SSD volumes

    ", + "DeleteNetworkAclEntryRequest$RuleNumber": "

    The rule number of the entry to delete.

    ", + "DescribeClassicLinkInstancesRequest$MaxResults": "

    The maximum number of results to return for the request in a single page. The remaining results of the initial request can be seen by sending another request with the returned NextToken value. This value can be between 5 and 1000; if MaxResults is given a value larger than 1000, only 1000 results are returned. You cannot specify this parameter and the instance IDs parameter in the same request.

    Constraint: If the value is greater than 1000, we return only 1000 items.

    ", + "DescribeFlowLogsRequest$MaxResults": "

    The maximum number of results to return for the request in a single page. The remaining results can be seen by sending another request with the returned NextToken value. This value can be between 5 and 1000; if MaxResults is given a value larger than 1000, only 1000 results are returned. You cannot specify this parameter and the flow log IDs parameter in the same request.

    ", + "DescribeHostsRequest$MaxResults": "

    The maximum number of results to return for the request in a single page. The remaining results can be seen by sending another request with the returned nextToken value. This value can be between 5 and 500; if maxResults is given a larger value than 500, you will receive an error. You cannot specify this parameter and the host IDs parameter in the same request.

    ", + "DescribeImportImageTasksRequest$MaxResults": "

    The maximum number of results to return in a single call. To retrieve the remaining results, make another call with the returned NextToken value.

    ", + "DescribeImportSnapshotTasksRequest$MaxResults": "

    The maximum number of results to return in a single call. To retrieve the remaining results, make another call with the returned NextToken value.

    ", + "DescribeInstanceStatusRequest$MaxResults": "

    The maximum number of results to return in a single call. To retrieve the remaining results, make another call with the returned NextToken value. This value can be between 5 and 1000. You cannot specify this parameter and the instance IDs parameter in the same call.

    ", + "DescribeInstancesRequest$MaxResults": "

    The maximum number of results to return in a single call. To retrieve the remaining results, make another call with the returned NextToken value. This value can be between 5 and 1000. You cannot specify this parameter and the instance IDs parameter or tag filters in the same call.

    ", + "DescribeMovingAddressesRequest$MaxResults": "

    The maximum number of results to return for the request in a single page. The remaining results of the initial request can be seen by sending another request with the returned NextToken value. This value can be between 5 and 1000; if MaxResults is given a value outside of this range, an error is returned.

    Default: If no value is provided, the default is 1000.

    ", + "DescribeNatGatewaysRequest$MaxResults": "

    The maximum number of items to return for this request. The request returns a token that you can specify in a subsequent call to get the next set of results.

    Constraint: If the value specified is greater than 1000, we return only 1000 items.

    ", + "DescribePrefixListsRequest$MaxResults": "

    The maximum number of items to return for this request. The request returns a token that you can specify in a subsequent call to get the next set of results.

    Constraint: If the value specified is greater than 1000, we return only 1000 items.

    ", + "DescribeReservedInstancesOfferingsRequest$MaxResults": "

    The maximum number of results to return for the request in a single page. The remaining results of the initial request can be seen by sending another request with the returned NextToken value. The maximum is 100.

    Default: 100

    ", + "DescribeReservedInstancesOfferingsRequest$MaxInstanceCount": "

    The maximum number of instances to filter when searching for offerings.

    Default: 20

    ", + "DescribeScheduledInstanceAvailabilityRequest$MinSlotDurationInHours": "

    The minimum available duration, in hours. The minimum required duration is 1,200 hours per year. For example, the minimum daily schedule is 4 hours, the minimum weekly schedule is 24 hours, and the minimum monthly schedule is 100 hours.

    ", + "DescribeScheduledInstanceAvailabilityRequest$MaxSlotDurationInHours": "

    The maximum available duration, in hours. This value must be greater than MinSlotDurationInHours and less than 1,720.

    ", + "DescribeScheduledInstanceAvailabilityRequest$MaxResults": "

    The maximum number of results to return in a single call. This value can be between 5 and 300. The default value is 300. To retrieve the remaining results, make another call with the returned NextToken value.

    ", + "DescribeScheduledInstancesRequest$MaxResults": "

    The maximum number of results to return in a single call. This value can be between 5 and 300. The default value is 100. To retrieve the remaining results, make another call with the returned NextToken value.

    ", + "DescribeSnapshotsRequest$MaxResults": "

    The maximum number of snapshot results returned by DescribeSnapshots in paginated output. When this parameter is used, DescribeSnapshots only returns MaxResults results in a single page along with a NextToken response element. The remaining results of the initial request can be seen by sending another DescribeSnapshots request with the returned NextToken value. This value can be between 5 and 1000; if MaxResults is given a value larger than 1000, only 1000 results are returned. If this parameter is not used, then DescribeSnapshots returns all results. You cannot specify this parameter and the snapshot IDs parameter in the same request.

    ", + "DescribeSpotFleetInstancesRequest$MaxResults": "

    The maximum number of results to return in a single call. Specify a value between 1 and 1000. The default value is 1000. To retrieve the remaining results, make another call with the returned NextToken value.

    ", + "DescribeSpotFleetRequestHistoryRequest$MaxResults": "

    The maximum number of results to return in a single call. Specify a value between 1 and 1000. The default value is 1000. To retrieve the remaining results, make another call with the returned NextToken value.

    ", + "DescribeSpotFleetRequestsRequest$MaxResults": "

    The maximum number of results to return in a single call. Specify a value between 1 and 1000. The default value is 1000. To retrieve the remaining results, make another call with the returned NextToken value.

    ", + "DescribeSpotPriceHistoryRequest$MaxResults": "

    The maximum number of results to return in a single call. Specify a value between 1 and 1000. The default value is 1000. To retrieve the remaining results, make another call with the returned NextToken value.

    ", + "DescribeTagsRequest$MaxResults": "

    The maximum number of results to return in a single call. This value can be between 5 and 1000. To retrieve the remaining results, make another call with the returned NextToken value.

    ", + "DescribeVolumeStatusRequest$MaxResults": "

    The maximum number of volume results returned by DescribeVolumeStatus in paginated output. When this parameter is used, the request only returns MaxResults results in a single page along with a NextToken response element. The remaining results of the initial request can be seen by sending another request with the returned NextToken value. This value can be between 5 and 1000; if MaxResults is given a value larger than 1000, only 1000 results are returned. If this parameter is not used, then DescribeVolumeStatus returns all results. You cannot specify this parameter and the volume IDs parameter in the same request.

    ", + "DescribeVolumesRequest$MaxResults": "

    The maximum number of volume results returned by DescribeVolumes in paginated output. When this parameter is used, DescribeVolumes only returns MaxResults results in a single page along with a NextToken response element. The remaining results of the initial request can be seen by sending another DescribeVolumes request with the returned NextToken value. This value can be between 5 and 1000; if MaxResults is given a value larger than 1000, only 1000 results are returned. If this parameter is not used, then DescribeVolumes returns all results. You cannot specify this parameter and the volume IDs parameter in the same request.

    ", + "DescribeVpcEndpointServicesRequest$MaxResults": "

    The maximum number of items to return for this request. The request returns a token that you can specify in a subsequent call to get the next set of results.

    Constraint: If the value is greater than 1000, we return only 1000 items.

    ", + "DescribeVpcEndpointsRequest$MaxResults": "

    The maximum number of items to return for this request. The request returns a token that you can specify in a subsequent call to get the next set of results.

    Constraint: If the value is greater than 1000, we return only 1000 items.

    ", + "EbsBlockDevice$VolumeSize": "

    The size of the volume, in GiB.

    Constraints: 1-16384 for General Purpose SSD (gp2), 4-16384 for Provisioned IOPS SSD (io1), 500-16384 for Throughput Optimized HDD (st1), 500-16384 for Cold HDD (sc1), and 1-1024 for Magnetic (standard) volumes. If you specify a snapshot, the volume size must be equal to or larger than the snapshot size.

    Default: If you're creating the volume from a snapshot and don't specify a volume size, the default is the snapshot size.

    ", + "EbsBlockDevice$Iops": "

    The number of I/O operations per second (IOPS) that the volume supports. For io1, this represents the number of IOPS that are provisioned for the volume. For gp2, this represents the baseline performance of the volume and the rate at which the volume accumulates I/O credits for bursting. For more information about General Purpose SSD baseline performance, I/O credits, and bursting, see Amazon EBS Volume Types in the Amazon Elastic Compute Cloud User Guide.

    Constraint: Range is 100-20000 IOPS for io1 volumes and 100-10000 IOPS for gp2 volumes.

    Condition: This parameter is required for requests to create io1 volumes; it is not used in requests to create gp2, st1, sc1, or standard volumes.

    ", + "HostProperties$Sockets": "

    The number of sockets on the Dedicated host.

    ", + "HostProperties$Cores": "

    The number of cores on the Dedicated host.

    ", + "HostProperties$TotalVCpus": "

    The number of vCPUs on the Dedicated host.

    ", + "IcmpTypeCode$Type": "

    The ICMP code. A value of -1 means all codes for the specified ICMP type.

    ", + "IcmpTypeCode$Code": "

    The ICMP type. A value of -1 means all types.

    ", + "Instance$AmiLaunchIndex": "

    The AMI launch index, which can be used to find this instance in the launch group.

    ", + "InstanceCapacity$AvailableCapacity": "

    The number of instances that can still be launched onto the Dedicated host.

    ", + "InstanceCapacity$TotalCapacity": "

    The total number of instances that can be launched onto the Dedicated host.

    ", + "InstanceCount$InstanceCount": "

    The number of listed Reserved Instances in the state specified by the state.

    ", + "InstanceNetworkInterfaceAttachment$DeviceIndex": "

    The index of the device on the instance for the network interface attachment.

    ", + "InstanceNetworkInterfaceSpecification$DeviceIndex": "

    The index of the device on the instance for the network interface attachment. If you are specifying a network interface in a RunInstances request, you must provide the device index.

    ", + "InstanceNetworkInterfaceSpecification$SecondaryPrivateIpAddressCount": "

    The number of secondary private IP addresses. You can't specify this option and specify more than one private IP address using the private IP addresses option.

    ", + "InstanceState$Code": "

    The low byte represents the state. The high byte is an opaque internal value and should be ignored.

    • 0 : pending

    • 16 : running

    • 32 : shutting-down

    • 48 : terminated

    • 64 : stopping

    • 80 : stopped

    ", + "IpPermission$FromPort": "

    The start of port range for the TCP and UDP protocols, or an ICMP type number. A value of -1 indicates all ICMP types.

    ", + "IpPermission$ToPort": "

    The end of port range for the TCP and UDP protocols, or an ICMP code. A value of -1 indicates all ICMP codes for the specified ICMP type.

    ", + "ModifySpotFleetRequestRequest$TargetCapacity": "

    The size of the fleet.

    ", + "NetworkAclEntry$RuleNumber": "

    The rule number for the entry. ACL entries are processed in ascending order by rule number.

    ", + "NetworkInterfaceAttachment$DeviceIndex": "

    The device index of the network interface attachment on the instance.

    ", + "OccurrenceDayRequestSet$member": null, + "OccurrenceDaySet$member": null, + "PortRange$From": "

    The first port in the range.

    ", + "PortRange$To": "

    The last port in the range.

    ", + "PricingDetail$Count": "

    The number of reservations available for the price.

    ", + "PurchaseRequest$InstanceCount": "

    The number of instances.

    ", + "PurchaseReservedInstancesOfferingRequest$InstanceCount": "

    The number of Reserved Instances to purchase.

    ", + "ReplaceNetworkAclEntryRequest$RuleNumber": "

    The rule number of the entry to replace.

    ", + "RequestSpotInstancesRequest$InstanceCount": "

    The maximum number of Spot instances to launch.

    Default: 1

    ", + "RequestSpotInstancesRequest$BlockDurationMinutes": "

    The required duration for the Spot instances (also known as Spot blocks), in minutes. This value must be a multiple of 60 (60, 120, 180, 240, 300, or 360).

    The duration period starts as soon as your Spot instance receives its instance ID. At the end of the duration period, Amazon EC2 marks the Spot instance for termination and provides a Spot instance termination notice, which gives the instance a two-minute warning before it terminates.

    Note that you can't specify an Availability Zone group or a launch group if you specify a duration.

    ", + "ReservedInstances$InstanceCount": "

    The number of reservations purchased.

    ", + "ReservedInstancesConfiguration$InstanceCount": "

    The number of modified Reserved Instances.

    ", + "RevokeSecurityGroupEgressRequest$FromPort": "

    The start of port range for the TCP and UDP protocols, or an ICMP type number. We recommend that you specify the port range in a set of IP permissions instead.

    ", + "RevokeSecurityGroupEgressRequest$ToPort": "

    The end of port range for the TCP and UDP protocols, or an ICMP type number. We recommend that you specify the port range in a set of IP permissions instead.

    ", + "RevokeSecurityGroupIngressRequest$FromPort": "

    The start of port range for the TCP and UDP protocols, or an ICMP type number. For the ICMP type number, use -1 to specify all ICMP types.

    ", + "RevokeSecurityGroupIngressRequest$ToPort": "

    The end of port range for the TCP and UDP protocols, or an ICMP code number. For the ICMP code number, use -1 to specify all ICMP codes for the ICMP type.

    ", + "RunInstancesRequest$MinCount": "

    The minimum number of instances to launch. If you specify a minimum that is more instances than Amazon EC2 can launch in the target Availability Zone, Amazon EC2 launches no instances.

    Constraints: Between 1 and the maximum number you're allowed for the specified instance type. For more information about the default limits, and how to request an increase, see How many instances can I run in Amazon EC2 in the Amazon EC2 General FAQ.

    ", + "RunInstancesRequest$MaxCount": "

    The maximum number of instances to launch. If you specify more instances than Amazon EC2 can launch in the target Availability Zone, Amazon EC2 launches the largest possible number of instances above MinCount.

    Constraints: Between 1 and the maximum number you're allowed for the specified instance type. For more information about the default limits, and how to request an increase, see How many instances can I run in Amazon EC2 in the Amazon EC2 FAQ.

    ", + "RunScheduledInstancesRequest$InstanceCount": "

    The number of instances.

    Default: 1

    ", + "ScheduledInstance$SlotDurationInHours": "

    The number of hours in the schedule.

    ", + "ScheduledInstance$TotalScheduledInstanceHours": "

    The total number of hours for a single instance for the entire term.

    ", + "ScheduledInstance$InstanceCount": "

    The number of instances.

    ", + "ScheduledInstanceAvailability$SlotDurationInHours": "

    The number of hours in the schedule.

    ", + "ScheduledInstanceAvailability$TotalScheduledInstanceHours": "

    The total number of hours for a single instance for the entire term.

    ", + "ScheduledInstanceAvailability$AvailableInstanceCount": "

    The number of available instances.

    ", + "ScheduledInstanceAvailability$MinTermDurationInDays": "

    The minimum term. The only possible value is 365 days.

    ", + "ScheduledInstanceAvailability$MaxTermDurationInDays": "

    The maximum term. The only possible value is 365 days.

    ", + "ScheduledInstanceRecurrence$Interval": "

    The interval quantity. The interval unit depends on the value of frequency. For example, every 2 weeks or every 2 months.

    ", + "ScheduledInstanceRecurrenceRequest$Interval": "

    The interval quantity. The interval unit depends on the value of Frequency. For example, every 2 weeks or every 2 months.

    ", + "ScheduledInstancesEbs$VolumeSize": "

    The size of the volume, in GiB.

    Default: If you're creating the volume from a snapshot and don't specify a volume size, the default is the snapshot size.

    ", + "ScheduledInstancesEbs$Iops": "

    The number of I/O operations per second (IOPS) that the volume supports. For io1 volumes, this represents the number of IOPS that are provisioned for the volume. For gp2 volumes, this represents the baseline performance of the volume and the rate at which the volume accumulates I/O credits for bursting. For more information about gp2 baseline performance, I/O credits, and bursting, see Amazon EBS Volume Types in the Amazon Elastic Compute Cloud User Guide.

    Constraint: Range is 100-20000 IOPS for io1 volumes and 100-10000 IOPS for gp2 volumes.

    Condition: This parameter is required for requests to create io1volumes; it is not used in requests to create gp2, st1, sc1, or standard volumes.

    ", + "ScheduledInstancesNetworkInterface$DeviceIndex": "

    The index of the device for the network interface attachment.

    ", + "ScheduledInstancesNetworkInterface$SecondaryPrivateIpAddressCount": "

    The number of secondary private IP addresses.

    ", + "Snapshot$VolumeSize": "

    The size of the volume, in GiB.

    ", + "SpotFleetRequestConfigData$TargetCapacity": "

    The number of units to request. You can choose to set the target capacity in terms of instances or a performance characteristic that is important to your application workload, such as vCPUs, memory, or I/O.

    ", + "SpotInstanceRequest$BlockDurationMinutes": "

    The duration for the Spot instance, in minutes.

    ", + "StaleIpPermission$FromPort": "

    The start of the port range for the TCP and UDP protocols, or an ICMP type number. A value of -1 indicates all ICMP types.

    ", + "StaleIpPermission$ToPort": "

    The end of the port range for the TCP and UDP protocols, or an ICMP type number. A value of -1 indicates all ICMP types.

    ", + "Subnet$AvailableIpAddressCount": "

    The number of unused IP addresses in the subnet. Note that the IP addresses for any stopped instances are considered unavailable.

    ", + "VgwTelemetry$AcceptedRouteCount": "

    The number of accepted routes.

    ", + "Volume$Size": "

    The size of the volume, in GiBs.

    ", + "Volume$Iops": "

    The number of I/O operations per second (IOPS) that the volume supports. For Provisioned IOPS SSD volumes, this represents the number of IOPS that are provisioned for the volume. For General Purpose SSD volumes, this represents the baseline performance of the volume and the rate at which the volume accumulates I/O credits for bursting. For more information on General Purpose SSD baseline performance, I/O credits, and bursting, see Amazon EBS Volume Types in the Amazon Elastic Compute Cloud User Guide.

    Constraint: Range is 100-20000 IOPS for io1 volumes and 100-10000 IOPS for gp2 volumes.

    Condition: This parameter is required for requests to create io1 volumes; it is not used in requests to create gp2, st1, sc1, or standard volumes.

    " + } + }, + "InternetGateway": { + "base": "

    Describes an Internet gateway.

    ", + "refs": { + "CreateInternetGatewayResult$InternetGateway": "

    Information about the Internet gateway.

    ", + "InternetGatewayList$member": null + } + }, + "InternetGatewayAttachment": { + "base": "

    Describes the attachment of a VPC to an Internet gateway.

    ", + "refs": { + "InternetGatewayAttachmentList$member": null + } + }, + "InternetGatewayAttachmentList": { + "base": null, + "refs": { + "InternetGateway$Attachments": "

    Any VPCs attached to the Internet gateway.

    " + } + }, + "InternetGatewayList": { + "base": null, + "refs": { + "DescribeInternetGatewaysResult$InternetGateways": "

    Information about one or more Internet gateways.

    " + } + }, + "IpPermission": { + "base": "

    Describes a security group rule.

    ", + "refs": { + "IpPermissionList$member": null + } + }, + "IpPermissionList": { + "base": null, + "refs": { + "AuthorizeSecurityGroupEgressRequest$IpPermissions": "

    A set of IP permissions. You can't specify a destination security group and a CIDR IP address range.

    ", + "AuthorizeSecurityGroupIngressRequest$IpPermissions": "

    A set of IP permissions. Can be used to specify multiple rules in a single command.

    ", + "RevokeSecurityGroupEgressRequest$IpPermissions": "

    A set of IP permissions. You can't specify a destination security group and a CIDR IP address range.

    ", + "RevokeSecurityGroupIngressRequest$IpPermissions": "

    A set of IP permissions. You can't specify a source security group and a CIDR IP address range.

    ", + "SecurityGroup$IpPermissions": "

    One or more inbound rules associated with the security group.

    ", + "SecurityGroup$IpPermissionsEgress": "

    [EC2-VPC] One or more outbound rules associated with the security group.

    " + } + }, + "IpRange": { + "base": "

    Describes an IP range.

    ", + "refs": { + "IpRangeList$member": null + } + }, + "IpRangeList": { + "base": null, + "refs": { + "IpPermission$IpRanges": "

    One or more IP ranges.

    " + } + }, + "IpRanges": { + "base": null, + "refs": { + "StaleIpPermission$IpRanges": "

    One or more IP ranges. Not applicable for stale security group rules.

    " + } + }, + "KeyNameStringList": { + "base": null, + "refs": { + "DescribeKeyPairsRequest$KeyNames": "

    One or more key pair names.

    Default: Describes all your key pairs.

    " + } + }, + "KeyPair": { + "base": "

    Describes a key pair.

    ", + "refs": { + } + }, + "KeyPairInfo": { + "base": "

    Describes a key pair.

    ", + "refs": { + "KeyPairList$member": null + } + }, + "KeyPairList": { + "base": null, + "refs": { + "DescribeKeyPairsResult$KeyPairs": "

    Information about one or more key pairs.

    " + } + }, + "LaunchPermission": { + "base": "

    Describes a launch permission.

    ", + "refs": { + "LaunchPermissionList$member": null + } + }, + "LaunchPermissionList": { + "base": null, + "refs": { + "ImageAttribute$LaunchPermissions": "

    One or more launch permissions.

    ", + "LaunchPermissionModifications$Add": "

    The AWS account ID to add to the list of launch permissions for the AMI.

    ", + "LaunchPermissionModifications$Remove": "

    The AWS account ID to remove from the list of launch permissions for the AMI.

    " + } + }, + "LaunchPermissionModifications": { + "base": "

    Describes a launch permission modification.

    ", + "refs": { + "ModifyImageAttributeRequest$LaunchPermission": "

    A launch permission modification.

    " + } + }, + "LaunchSpecification": { + "base": "

    Describes the launch specification for an instance.

    ", + "refs": { + "SpotInstanceRequest$LaunchSpecification": "

    Additional information for launching instances.

    " + } + }, + "LaunchSpecsList": { + "base": null, + "refs": { + "SpotFleetRequestConfigData$LaunchSpecifications": "

    Information about the launch specifications for the Spot fleet request.

    " + } + }, + "ListingState": { + "base": null, + "refs": { + "InstanceCount$State": "

    The states of the listed Reserved Instances.

    " + } + }, + "ListingStatus": { + "base": null, + "refs": { + "ReservedInstancesListing$Status": "

    The status of the Reserved Instance listing.

    " + } + }, + "Long": { + "base": null, + "refs": { + "DescribeReservedInstancesOfferingsRequest$MinDuration": "

    The minimum duration (in seconds) to filter when searching for offerings.

    Default: 2592000 (1 month)

    ", + "DescribeReservedInstancesOfferingsRequest$MaxDuration": "

    The maximum duration (in seconds) to filter when searching for offerings.

    Default: 94608000 (3 years)

    ", + "DiskImageDescription$Size": "

    The size of the disk image, in GiB.

    ", + "DiskImageDetail$Bytes": "

    The size of the disk image, in GiB.

    ", + "DiskImageVolumeDescription$Size": "

    The size of the volume, in GiB.

    ", + "ImportInstanceVolumeDetailItem$BytesConverted": "

    The number of bytes converted so far.

    ", + "ImportVolumeTaskDetails$BytesConverted": "

    The number of bytes converted so far.

    ", + "PriceSchedule$Term": "

    The number of months remaining in the reservation. For example, 2 is the second to the last month before the capacity reservation expires.

    ", + "PriceScheduleSpecification$Term": "

    The number of months remaining in the reservation. For example, 2 is the second to the last month before the capacity reservation expires.

    ", + "ReservedInstances$Duration": "

    The duration of the Reserved Instance, in seconds.

    ", + "ReservedInstancesOffering$Duration": "

    The duration of the Reserved Instance, in seconds.

    ", + "VolumeDetail$Size": "

    The size of the volume, in GiB.

    " + } + }, + "MaxResults": { + "base": null, + "refs": { + "DescribeStaleSecurityGroupsRequest$MaxResults": "

    The maximum number of items to return for this request. The request returns a token that you can specify in a subsequent call to get the next set of results.

    ", + "DescribeVpcClassicLinkDnsSupportRequest$MaxResults": "

    The maximum number of items to return for this request. The request returns a token that you can specify in a subsequent call to get the next set of results.

    " + } + }, + "ModifyHostsRequest": { + "base": "

    Contains the parameters for ModifyHosts.

    ", + "refs": { + } + }, + "ModifyHostsResult": { + "base": "

    Contains the output of ModifyHosts.

    ", + "refs": { + } + }, + "ModifyIdFormatRequest": { + "base": "

    Contains the parameters of ModifyIdFormat.

    ", + "refs": { + } + }, + "ModifyIdentityIdFormatRequest": { + "base": "

    Contains the parameters of ModifyIdentityIdFormat.

    ", + "refs": { + } + }, + "ModifyImageAttributeRequest": { + "base": "

    Contains the parameters for ModifyImageAttribute.

    ", + "refs": { + } + }, + "ModifyInstanceAttributeRequest": { + "base": "

    Contains the parameters for ModifyInstanceAttribute.

    ", + "refs": { + } + }, + "ModifyInstancePlacementRequest": { + "base": "

    Contains the parameters for ModifyInstancePlacement.

    ", + "refs": { + } + }, + "ModifyInstancePlacementResult": { + "base": "

    Contains the output of ModifyInstancePlacement.

    ", + "refs": { + } + }, + "ModifyNetworkInterfaceAttributeRequest": { + "base": "

    Contains the parameters for ModifyNetworkInterfaceAttribute.

    ", + "refs": { + } + }, + "ModifyReservedInstancesRequest": { + "base": "

    Contains the parameters for ModifyReservedInstances.

    ", + "refs": { + } + }, + "ModifyReservedInstancesResult": { + "base": "

    Contains the output of ModifyReservedInstances.

    ", + "refs": { + } + }, + "ModifySnapshotAttributeRequest": { + "base": "

    Contains the parameters for ModifySnapshotAttribute.

    ", + "refs": { + } + }, + "ModifySpotFleetRequestRequest": { + "base": "

    Contains the parameters for ModifySpotFleetRequest.

    ", + "refs": { + } + }, + "ModifySpotFleetRequestResponse": { + "base": "

    Contains the output of ModifySpotFleetRequest.

    ", + "refs": { + } + }, + "ModifySubnetAttributeRequest": { + "base": "

    Contains the parameters for ModifySubnetAttribute.

    ", + "refs": { + } + }, + "ModifyVolumeAttributeRequest": { + "base": "

    Contains the parameters for ModifyVolumeAttribute.

    ", + "refs": { + } + }, + "ModifyVpcAttributeRequest": { + "base": "

    Contains the parameters for ModifyVpcAttribute.

    ", + "refs": { + } + }, + "ModifyVpcEndpointRequest": { + "base": "

    Contains the parameters for ModifyVpcEndpoint.

    ", + "refs": { + } + }, + "ModifyVpcEndpointResult": { + "base": "

    Contains the output of ModifyVpcEndpoint.

    ", + "refs": { + } + }, + "ModifyVpcPeeringConnectionOptionsRequest": { + "base": null, + "refs": { + } + }, + "ModifyVpcPeeringConnectionOptionsResult": { + "base": null, + "refs": { + } + }, + "MonitorInstancesRequest": { + "base": "

    Contains the parameters for MonitorInstances.

    ", + "refs": { + } + }, + "MonitorInstancesResult": { + "base": "

    Contains the output of MonitorInstances.

    ", + "refs": { + } + }, + "Monitoring": { + "base": "

    Describes the monitoring for the instance.

    ", + "refs": { + "Instance$Monitoring": "

    The monitoring information for the instance.

    ", + "InstanceMonitoring$Monitoring": "

    The monitoring information.

    " + } + }, + "MonitoringState": { + "base": null, + "refs": { + "Monitoring$State": "

    Indicates whether monitoring is enabled for the instance.

    " + } + }, + "MoveAddressToVpcRequest": { + "base": "

    Contains the parameters for MoveAddressToVpc.

    ", + "refs": { + } + }, + "MoveAddressToVpcResult": { + "base": "

    Contains the output of MoveAddressToVpc.

    ", + "refs": { + } + }, + "MoveStatus": { + "base": null, + "refs": { + "MovingAddressStatus$MoveStatus": "

    The status of the Elastic IP address that's being moved to the EC2-VPC platform, or restored to the EC2-Classic platform.

    " + } + }, + "MovingAddressStatus": { + "base": "

    Describes the status of a moving Elastic IP address.

    ", + "refs": { + "MovingAddressStatusSet$member": null + } + }, + "MovingAddressStatusSet": { + "base": null, + "refs": { + "DescribeMovingAddressesResult$MovingAddressStatuses": "

    The status for each Elastic IP address.

    " + } + }, + "NatGateway": { + "base": "

    Describes a NAT gateway.

    ", + "refs": { + "CreateNatGatewayResult$NatGateway": "

    Information about the NAT gateway.

    ", + "NatGatewayList$member": null + } + }, + "NatGatewayAddress": { + "base": "

    Describes the IP addresses and network interface associated with a NAT gateway.

    ", + "refs": { + "NatGatewayAddressList$member": null + } + }, + "NatGatewayAddressList": { + "base": null, + "refs": { + "NatGateway$NatGatewayAddresses": "

    Information about the IP addresses and network interface associated with the NAT gateway.

    " + } + }, + "NatGatewayList": { + "base": null, + "refs": { + "DescribeNatGatewaysResult$NatGateways": "

    Information about the NAT gateways.

    " + } + }, + "NatGatewayState": { + "base": null, + "refs": { + "NatGateway$State": "

    The state of the NAT gateway.

    • pending: The NAT gateway is being created and is not ready to process traffic.

    • failed: The NAT gateway could not be created. Check the failureCode and failureMessage fields for the reason.

    • available: The NAT gateway is able to process traffic. This status remains until you delete the NAT gateway, and does not indicate the health of the NAT gateway.

    • deleting: The NAT gateway is in the process of being terminated and may still be processing traffic.

    • deleted: The NAT gateway has been terminated and is no longer processing traffic.

    " + } + }, + "NetworkAcl": { + "base": "

    Describes a network ACL.

    ", + "refs": { + "CreateNetworkAclResult$NetworkAcl": "

    Information about the network ACL.

    ", + "NetworkAclList$member": null + } + }, + "NetworkAclAssociation": { + "base": "

    Describes an association between a network ACL and a subnet.

    ", + "refs": { + "NetworkAclAssociationList$member": null + } + }, + "NetworkAclAssociationList": { + "base": null, + "refs": { + "NetworkAcl$Associations": "

    Any associations between the network ACL and one or more subnets

    " + } + }, + "NetworkAclEntry": { + "base": "

    Describes an entry in a network ACL.

    ", + "refs": { + "NetworkAclEntryList$member": null + } + }, + "NetworkAclEntryList": { + "base": null, + "refs": { + "NetworkAcl$Entries": "

    One or more entries (rules) in the network ACL.

    " + } + }, + "NetworkAclList": { + "base": null, + "refs": { + "DescribeNetworkAclsResult$NetworkAcls": "

    Information about one or more network ACLs.

    " + } + }, + "NetworkInterface": { + "base": "

    Describes a network interface.

    ", + "refs": { + "CreateNetworkInterfaceResult$NetworkInterface": "

    Information about the network interface.

    ", + "NetworkInterfaceList$member": null + } + }, + "NetworkInterfaceAssociation": { + "base": "

    Describes association information for an Elastic IP address.

    ", + "refs": { + "NetworkInterface$Association": "

    The association information for an Elastic IP associated with the network interface.

    ", + "NetworkInterfacePrivateIpAddress$Association": "

    The association information for an Elastic IP address associated with the network interface.

    " + } + }, + "NetworkInterfaceAttachment": { + "base": "

    Describes a network interface attachment.

    ", + "refs": { + "DescribeNetworkInterfaceAttributeResult$Attachment": "

    The attachment (if any) of the network interface.

    ", + "NetworkInterface$Attachment": "

    The network interface attachment.

    " + } + }, + "NetworkInterfaceAttachmentChanges": { + "base": "

    Describes an attachment change.

    ", + "refs": { + "ModifyNetworkInterfaceAttributeRequest$Attachment": "

    Information about the interface attachment. If modifying the 'delete on termination' attribute, you must specify the ID of the interface attachment.

    " + } + }, + "NetworkInterfaceAttribute": { + "base": null, + "refs": { + "DescribeNetworkInterfaceAttributeRequest$Attribute": "

    The attribute of the network interface.

    " + } + }, + "NetworkInterfaceIdList": { + "base": null, + "refs": { + "DescribeNetworkInterfacesRequest$NetworkInterfaceIds": "

    One or more network interface IDs.

    Default: Describes all your network interfaces.

    " + } + }, + "NetworkInterfaceList": { + "base": null, + "refs": { + "DescribeNetworkInterfacesResult$NetworkInterfaces": "

    Information about one or more network interfaces.

    " + } + }, + "NetworkInterfacePrivateIpAddress": { + "base": "

    Describes the private IP address of a network interface.

    ", + "refs": { + "NetworkInterfacePrivateIpAddressList$member": null + } + }, + "NetworkInterfacePrivateIpAddressList": { + "base": null, + "refs": { + "NetworkInterface$PrivateIpAddresses": "

    The private IP addresses associated with the network interface.

    " + } + }, + "NetworkInterfaceStatus": { + "base": null, + "refs": { + "InstanceNetworkInterface$Status": "

    The status of the network interface.

    ", + "NetworkInterface$Status": "

    The status of the network interface.

    " + } + }, + "NetworkInterfaceType": { + "base": null, + "refs": { + "NetworkInterface$InterfaceType": "

    The type of interface.

    " + } + }, + "NewDhcpConfiguration": { + "base": null, + "refs": { + "NewDhcpConfigurationList$member": null + } + }, + "NewDhcpConfigurationList": { + "base": null, + "refs": { + "CreateDhcpOptionsRequest$DhcpConfigurations": "

    A DHCP configuration option.

    " + } + }, + "NextToken": { + "base": null, + "refs": { + "DescribeStaleSecurityGroupsRequest$NextToken": "

    The token for the next set of items to return. (You received this token from a prior call.)

    ", + "DescribeVpcClassicLinkDnsSupportRequest$NextToken": "

    The token for the next set of items to return. (You received this token from a prior call.)

    ", + "DescribeVpcClassicLinkDnsSupportResult$NextToken": "

    The token to use when requesting the next set of items.

    " + } + }, + "OccurrenceDayRequestSet": { + "base": null, + "refs": { + "ScheduledInstanceRecurrenceRequest$OccurrenceDays": "

    The days. For a monthly schedule, this is one or more days of the month (1-31). For a weekly schedule, this is one or more days of the week (1-7, where 1 is Sunday). You can't specify this value with a daily schedule. If the occurrence is relative to the end of the month, you can specify only a single day.

    " + } + }, + "OccurrenceDaySet": { + "base": null, + "refs": { + "ScheduledInstanceRecurrence$OccurrenceDaySet": "

    The days. For a monthly schedule, this is one or more days of the month (1-31). For a weekly schedule, this is one or more days of the week (1-7, where 1 is Sunday).

    " + } + }, + "OfferingTypeValues": { + "base": null, + "refs": { + "DescribeReservedInstancesOfferingsRequest$OfferingType": "

    The Reserved Instance offering type. If you are using tools that predate the 2011-11-01 API version, you only have access to the Medium Utilization Reserved Instance offering type.

    ", + "DescribeReservedInstancesRequest$OfferingType": "

    The Reserved Instance offering type. If you are using tools that predate the 2011-11-01 API version, you only have access to the Medium Utilization Reserved Instance offering type.

    ", + "ReservedInstances$OfferingType": "

    The Reserved Instance offering type.

    ", + "ReservedInstancesOffering$OfferingType": "

    The Reserved Instance offering type.

    " + } + }, + "OperationType": { + "base": null, + "refs": { + "ModifyImageAttributeRequest$OperationType": "

    The operation type.

    ", + "ModifySnapshotAttributeRequest$OperationType": "

    The type of operation to perform to the attribute.

    " + } + }, + "OwnerStringList": { + "base": null, + "refs": { + "DescribeImagesRequest$Owners": "

    Filters the images by the owner. Specify an AWS account ID, amazon (owner is Amazon), aws-marketplace (owner is AWS Marketplace), self (owner is the sender of the request). Omitting this option returns all images for which you have launch permissions, regardless of ownership.

    ", + "DescribeSnapshotsRequest$OwnerIds": "

    Returns the snapshots owned by the specified owner. Multiple owners can be specified.

    " + } + }, + "PeeringConnectionOptions": { + "base": "

    Describes the VPC peering connection options.

    ", + "refs": { + "ModifyVpcPeeringConnectionOptionsResult$RequesterPeeringConnectionOptions": "

    Information about the VPC peering connection options for the requester VPC.

    ", + "ModifyVpcPeeringConnectionOptionsResult$AccepterPeeringConnectionOptions": "

    Information about the VPC peering connection options for the accepter VPC.

    " + } + }, + "PeeringConnectionOptionsRequest": { + "base": "

    The VPC peering connection options.

    ", + "refs": { + "ModifyVpcPeeringConnectionOptionsRequest$RequesterPeeringConnectionOptions": "

    The VPC peering connection options for the requester VPC.

    ", + "ModifyVpcPeeringConnectionOptionsRequest$AccepterPeeringConnectionOptions": "

    The VPC peering connection options for the accepter VPC.

    " + } + }, + "PermissionGroup": { + "base": null, + "refs": { + "CreateVolumePermission$Group": "

    The specific group that is to be added or removed from a volume's list of create volume permissions.

    ", + "LaunchPermission$Group": "

    The name of the group.

    " + } + }, + "Placement": { + "base": "

    Describes the placement for the instance.

    ", + "refs": { + "ImportInstanceLaunchSpecification$Placement": "

    The placement information for the instance.

    ", + "Instance$Placement": "

    The location where the instance launched, if applicable.

    ", + "RunInstancesRequest$Placement": "

    The placement for the instance.

    " + } + }, + "PlacementGroup": { + "base": "

    Describes a placement group.

    ", + "refs": { + "PlacementGroupList$member": null + } + }, + "PlacementGroupList": { + "base": null, + "refs": { + "DescribePlacementGroupsResult$PlacementGroups": "

    One or more placement groups.

    " + } + }, + "PlacementGroupState": { + "base": null, + "refs": { + "PlacementGroup$State": "

    The state of the placement group.

    " + } + }, + "PlacementGroupStringList": { + "base": null, + "refs": { + "DescribePlacementGroupsRequest$GroupNames": "

    One or more placement group names.

    Default: Describes all your placement groups, or only those otherwise specified.

    " + } + }, + "PlacementStrategy": { + "base": null, + "refs": { + "CreatePlacementGroupRequest$Strategy": "

    The placement strategy.

    ", + "PlacementGroup$Strategy": "

    The placement strategy.

    " + } + }, + "PlatformValues": { + "base": null, + "refs": { + "Image$Platform": "

    The value is Windows for Windows AMIs; otherwise blank.

    ", + "ImportInstanceRequest$Platform": "

    The instance operating system.

    ", + "ImportInstanceTaskDetails$Platform": "

    The instance operating system.

    ", + "Instance$Platform": "

    The value is Windows for Windows instances; otherwise blank.

    " + } + }, + "PortRange": { + "base": "

    Describes a range of ports.

    ", + "refs": { + "CreateNetworkAclEntryRequest$PortRange": "

    TCP or UDP protocols: The range of ports the rule applies to.

    ", + "NetworkAclEntry$PortRange": "

    TCP or UDP protocols: The range of ports the rule applies to.

    ", + "ReplaceNetworkAclEntryRequest$PortRange": "

    TCP or UDP protocols: The range of ports the rule applies to. Required if specifying 6 (TCP) or 17 (UDP) for the protocol.

    " + } + }, + "PrefixList": { + "base": "

    Describes prefixes for AWS services.

    ", + "refs": { + "PrefixListSet$member": null + } + }, + "PrefixListId": { + "base": "

    The ID of the prefix.

    ", + "refs": { + "PrefixListIdList$member": null + } + }, + "PrefixListIdList": { + "base": null, + "refs": { + "IpPermission$PrefixListIds": "

    (Valid for AuthorizeSecurityGroupEgress, RevokeSecurityGroupEgress and DescribeSecurityGroups only) One or more prefix list IDs for an AWS service. In an AuthorizeSecurityGroupEgress request, this is the AWS service that you want to access through a VPC endpoint from instances associated with the security group.

    " + } + }, + "PrefixListIdSet": { + "base": null, + "refs": { + "StaleIpPermission$PrefixListIds": "

    One or more prefix list IDs for an AWS service. Not applicable for stale security group rules.

    " + } + }, + "PrefixListSet": { + "base": null, + "refs": { + "DescribePrefixListsResult$PrefixLists": "

    All available prefix lists.

    " + } + }, + "PriceSchedule": { + "base": "

    Describes the price for a Reserved Instance.

    ", + "refs": { + "PriceScheduleList$member": null + } + }, + "PriceScheduleList": { + "base": null, + "refs": { + "ReservedInstancesListing$PriceSchedules": "

    The price of the Reserved Instance listing.

    " + } + }, + "PriceScheduleSpecification": { + "base": "

    Describes the price for a Reserved Instance.

    ", + "refs": { + "PriceScheduleSpecificationList$member": null + } + }, + "PriceScheduleSpecificationList": { + "base": null, + "refs": { + "CreateReservedInstancesListingRequest$PriceSchedules": "

    A list specifying the price of the Reserved Instance for each month remaining in the Reserved Instance term.

    " + } + }, + "PricingDetail": { + "base": "

    Describes a Reserved Instance offering.

    ", + "refs": { + "PricingDetailsList$member": null + } + }, + "PricingDetailsList": { + "base": null, + "refs": { + "ReservedInstancesOffering$PricingDetails": "

    The pricing details of the Reserved Instance offering.

    " + } + }, + "PrivateIpAddressConfigSet": { + "base": null, + "refs": { + "ScheduledInstancesNetworkInterface$PrivateIpAddressConfigs": "

    The private IP addresses.

    " + } + }, + "PrivateIpAddressSpecification": { + "base": "

    Describes a secondary private IP address for a network interface.

    ", + "refs": { + "PrivateIpAddressSpecificationList$member": null + } + }, + "PrivateIpAddressSpecificationList": { + "base": null, + "refs": { + "CreateNetworkInterfaceRequest$PrivateIpAddresses": "

    One or more private IP addresses.

    ", + "InstanceNetworkInterfaceSpecification$PrivateIpAddresses": "

    One or more private IP addresses to assign to the network interface. Only one private IP address can be designated as primary.

    " + } + }, + "PrivateIpAddressStringList": { + "base": null, + "refs": { + "AssignPrivateIpAddressesRequest$PrivateIpAddresses": "

    One or more IP addresses to be assigned as a secondary private IP address to the network interface. You can't specify this parameter when also specifying a number of secondary IP addresses.

    If you don't specify an IP address, Amazon EC2 automatically selects an IP address within the subnet range.

    ", + "UnassignPrivateIpAddressesRequest$PrivateIpAddresses": "

    The secondary private IP addresses to unassign from the network interface. You can specify this option multiple times to unassign more than one IP address.

    " + } + }, + "ProductCode": { + "base": "

    Describes a product code.

    ", + "refs": { + "ProductCodeList$member": null + } + }, + "ProductCodeList": { + "base": null, + "refs": { + "DescribeSnapshotAttributeResult$ProductCodes": "

    A list of product codes.

    ", + "DescribeVolumeAttributeResult$ProductCodes": "

    A list of product codes.

    ", + "Image$ProductCodes": "

    Any product codes associated with the AMI.

    ", + "ImageAttribute$ProductCodes": "

    One or more product codes.

    ", + "Instance$ProductCodes": "

    The product codes attached to this instance, if applicable.

    ", + "InstanceAttribute$ProductCodes": "

    A list of product codes.

    " + } + }, + "ProductCodeStringList": { + "base": null, + "refs": { + "ModifyImageAttributeRequest$ProductCodes": "

    One or more product codes. After you add a product code to an AMI, it can't be removed. This is only valid when modifying the productCodes attribute.

    " + } + }, + "ProductCodeValues": { + "base": null, + "refs": { + "ProductCode$ProductCodeType": "

    The type of product code.

    " + } + }, + "ProductDescriptionList": { + "base": null, + "refs": { + "DescribeSpotPriceHistoryRequest$ProductDescriptions": "

    Filters the results by the specified basic product descriptions.

    " + } + }, + "PropagatingVgw": { + "base": "

    Describes a virtual private gateway propagating route.

    ", + "refs": { + "PropagatingVgwList$member": null + } + }, + "PropagatingVgwList": { + "base": null, + "refs": { + "RouteTable$PropagatingVgws": "

    Any virtual private gateway (VGW) propagating routes.

    " + } + }, + "ProvisionedBandwidth": { + "base": "

    Reserved. If you need to sustain traffic greater than the documented limits, contact us through the Support Center.

    ", + "refs": { + "NatGateway$ProvisionedBandwidth": "

    Reserved. If you need to sustain traffic greater than the documented limits, contact us through the Support Center.

    " + } + }, + "PublicIpStringList": { + "base": null, + "refs": { + "DescribeAddressesRequest$PublicIps": "

    [EC2-Classic] One or more Elastic IP addresses.

    Default: Describes all your Elastic IP addresses.

    " + } + }, + "PurchaseRequest": { + "base": "

    Describes a request to purchase Scheduled Instances.

    ", + "refs": { + "PurchaseRequestSet$member": null + } + }, + "PurchaseRequestSet": { + "base": null, + "refs": { + "PurchaseScheduledInstancesRequest$PurchaseRequests": "

    One or more purchase requests.

    " + } + }, + "PurchaseReservedInstancesOfferingRequest": { + "base": "

    Contains the parameters for PurchaseReservedInstancesOffering.

    ", + "refs": { + } + }, + "PurchaseReservedInstancesOfferingResult": { + "base": "

    Contains the output of PurchaseReservedInstancesOffering.

    ", + "refs": { + } + }, + "PurchaseScheduledInstancesRequest": { + "base": "

    Contains the parameters for PurchaseScheduledInstances.

    ", + "refs": { + } + }, + "PurchaseScheduledInstancesResult": { + "base": "

    Contains the output of PurchaseScheduledInstances.

    ", + "refs": { + } + }, + "PurchasedScheduledInstanceSet": { + "base": null, + "refs": { + "PurchaseScheduledInstancesResult$ScheduledInstanceSet": "

    Information about the Scheduled Instances.

    " + } + }, + "RIProductDescription": { + "base": null, + "refs": { + "DescribeReservedInstancesOfferingsRequest$ProductDescription": "

    The Reserved Instance product platform description. Instances that include (Amazon VPC) in the description are for use with Amazon VPC.

    ", + "ReservedInstances$ProductDescription": "

    The Reserved Instance product platform description.

    ", + "ReservedInstancesOffering$ProductDescription": "

    The Reserved Instance product platform description.

    ", + "SpotInstanceRequest$ProductDescription": "

    The product description associated with the Spot instance.

    ", + "SpotPrice$ProductDescription": "

    A general description of the AMI.

    " + } + }, + "ReasonCodesList": { + "base": null, + "refs": { + "ReportInstanceStatusRequest$ReasonCodes": "

    One or more reason codes that describes the health state of your instance.

    • instance-stuck-in-state: My instance is stuck in a state.

    • unresponsive: My instance is unresponsive.

    • not-accepting-credentials: My instance is not accepting my credentials.

    • password-not-available: A password is not available for my instance.

    • performance-network: My instance is experiencing performance problems which I believe are network related.

    • performance-instance-store: My instance is experiencing performance problems which I believe are related to the instance stores.

    • performance-ebs-volume: My instance is experiencing performance problems which I believe are related to an EBS volume.

    • performance-other: My instance is experiencing performance problems.

    • other: [explain using the description parameter]

    " + } + }, + "RebootInstancesRequest": { + "base": "

    Contains the parameters for RebootInstances.

    ", + "refs": { + } + }, + "RecurringCharge": { + "base": "

    Describes a recurring charge.

    ", + "refs": { + "RecurringChargesList$member": null + } + }, + "RecurringChargeFrequency": { + "base": null, + "refs": { + "RecurringCharge$Frequency": "

    The frequency of the recurring charge.

    " + } + }, + "RecurringChargesList": { + "base": null, + "refs": { + "ReservedInstances$RecurringCharges": "

    The recurring charge tag assigned to the resource.

    ", + "ReservedInstancesOffering$RecurringCharges": "

    The recurring charge tag assigned to the resource.

    " + } + }, + "Region": { + "base": "

    Describes a region.

    ", + "refs": { + "RegionList$member": null + } + }, + "RegionList": { + "base": null, + "refs": { + "DescribeRegionsResult$Regions": "

    Information about one or more regions.

    " + } + }, + "RegionNameStringList": { + "base": null, + "refs": { + "DescribeRegionsRequest$RegionNames": "

    The names of one or more regions.

    " + } + }, + "RegisterImageRequest": { + "base": "

    Contains the parameters for RegisterImage.

    ", + "refs": { + } + }, + "RegisterImageResult": { + "base": "

    Contains the output of RegisterImage.

    ", + "refs": { + } + }, + "RejectVpcPeeringConnectionRequest": { + "base": "

    Contains the parameters for RejectVpcPeeringConnection.

    ", + "refs": { + } + }, + "RejectVpcPeeringConnectionResult": { + "base": "

    Contains the output of RejectVpcPeeringConnection.

    ", + "refs": { + } + }, + "ReleaseAddressRequest": { + "base": "

    Contains the parameters for ReleaseAddress.

    ", + "refs": { + } + }, + "ReleaseHostsRequest": { + "base": "

    Contains the parameters for ReleaseHosts.

    ", + "refs": { + } + }, + "ReleaseHostsResult": { + "base": "

    Contains the output of ReleaseHosts.

    ", + "refs": { + } + }, + "ReplaceNetworkAclAssociationRequest": { + "base": "

    Contains the parameters for ReplaceNetworkAclAssociation.

    ", + "refs": { + } + }, + "ReplaceNetworkAclAssociationResult": { + "base": "

    Contains the output of ReplaceNetworkAclAssociation.

    ", + "refs": { + } + }, + "ReplaceNetworkAclEntryRequest": { + "base": "

    Contains the parameters for ReplaceNetworkAclEntry.

    ", + "refs": { + } + }, + "ReplaceRouteRequest": { + "base": "

    Contains the parameters for ReplaceRoute.

    ", + "refs": { + } + }, + "ReplaceRouteTableAssociationRequest": { + "base": "

    Contains the parameters for ReplaceRouteTableAssociation.

    ", + "refs": { + } + }, + "ReplaceRouteTableAssociationResult": { + "base": "

    Contains the output of ReplaceRouteTableAssociation.

    ", + "refs": { + } + }, + "ReportInstanceReasonCodes": { + "base": null, + "refs": { + "ReasonCodesList$member": null + } + }, + "ReportInstanceStatusRequest": { + "base": "

    Contains the parameters for ReportInstanceStatus.

    ", + "refs": { + } + }, + "ReportStatusType": { + "base": null, + "refs": { + "ReportInstanceStatusRequest$Status": "

    The status of all instances listed.

    " + } + }, + "RequestHostIdList": { + "base": null, + "refs": { + "DescribeHostsRequest$HostIds": "

    The IDs of the Dedicated hosts. The IDs are used for targeted instance launches.

    ", + "ModifyHostsRequest$HostIds": "

    The host IDs of the Dedicated hosts you want to modify.

    ", + "ReleaseHostsRequest$HostIds": "

    The IDs of the Dedicated hosts you want to release.

    " + } + }, + "RequestSpotFleetRequest": { + "base": "

    Contains the parameters for RequestSpotFleet.

    ", + "refs": { + } + }, + "RequestSpotFleetResponse": { + "base": "

    Contains the output of RequestSpotFleet.

    ", + "refs": { + } + }, + "RequestSpotInstancesRequest": { + "base": "

    Contains the parameters for RequestSpotInstances.

    ", + "refs": { + } + }, + "RequestSpotInstancesResult": { + "base": "

    Contains the output of RequestSpotInstances.

    ", + "refs": { + } + }, + "RequestSpotLaunchSpecification": { + "base": "

    Describes the launch specification for an instance.

    ", + "refs": { + "RequestSpotInstancesRequest$LaunchSpecification": null + } + }, + "Reservation": { + "base": "

    Describes a reservation.

    ", + "refs": { + "ReservationList$member": null + } + }, + "ReservationList": { + "base": null, + "refs": { + "DescribeInstancesResult$Reservations": "

    Zero or more reservations.

    " + } + }, + "ReservedInstanceLimitPrice": { + "base": "

    Describes the limit price of a Reserved Instance offering.

    ", + "refs": { + "PurchaseReservedInstancesOfferingRequest$LimitPrice": "

    Specified for Reserved Instance Marketplace offerings to limit the total order and ensure that the Reserved Instances are not purchased at unexpected prices.

    " + } + }, + "ReservedInstanceState": { + "base": null, + "refs": { + "ReservedInstances$State": "

    The state of the Reserved Instance purchase.

    " + } + }, + "ReservedInstances": { + "base": "

    Describes a Reserved Instance.

    ", + "refs": { + "ReservedInstancesList$member": null + } + }, + "ReservedInstancesConfiguration": { + "base": "

    Describes the configuration settings for the modified Reserved Instances.

    ", + "refs": { + "ReservedInstancesConfigurationList$member": null, + "ReservedInstancesModificationResult$TargetConfiguration": "

    The target Reserved Instances configurations supplied as part of the modification request.

    " + } + }, + "ReservedInstancesConfigurationList": { + "base": null, + "refs": { + "ModifyReservedInstancesRequest$TargetConfigurations": "

    The configuration settings for the Reserved Instances to modify.

    " + } + }, + "ReservedInstancesId": { + "base": "

    Describes the ID of a Reserved Instance.

    ", + "refs": { + "ReservedIntancesIds$member": null + } + }, + "ReservedInstancesIdStringList": { + "base": null, + "refs": { + "DescribeReservedInstancesRequest$ReservedInstancesIds": "

    One or more Reserved Instance IDs.

    Default: Describes all your Reserved Instances, or only those otherwise specified.

    ", + "ModifyReservedInstancesRequest$ReservedInstancesIds": "

    The IDs of the Reserved Instances to modify.

    " + } + }, + "ReservedInstancesList": { + "base": null, + "refs": { + "DescribeReservedInstancesResult$ReservedInstances": "

    A list of Reserved Instances.

    " + } + }, + "ReservedInstancesListing": { + "base": "

    Describes a Reserved Instance listing.

    ", + "refs": { + "ReservedInstancesListingList$member": null + } + }, + "ReservedInstancesListingList": { + "base": null, + "refs": { + "CancelReservedInstancesListingResult$ReservedInstancesListings": "

    The Reserved Instance listing.

    ", + "CreateReservedInstancesListingResult$ReservedInstancesListings": "

    Information about the Reserved Instance listing.

    ", + "DescribeReservedInstancesListingsResult$ReservedInstancesListings": "

    Information about the Reserved Instance listing.

    " + } + }, + "ReservedInstancesModification": { + "base": "

    Describes a Reserved Instance modification.

    ", + "refs": { + "ReservedInstancesModificationList$member": null + } + }, + "ReservedInstancesModificationIdStringList": { + "base": null, + "refs": { + "DescribeReservedInstancesModificationsRequest$ReservedInstancesModificationIds": "

    IDs for the submitted modification request.

    " + } + }, + "ReservedInstancesModificationList": { + "base": null, + "refs": { + "DescribeReservedInstancesModificationsResult$ReservedInstancesModifications": "

    The Reserved Instance modification information.

    " + } + }, + "ReservedInstancesModificationResult": { + "base": "

    Describes the modification request/s.

    ", + "refs": { + "ReservedInstancesModificationResultList$member": null + } + }, + "ReservedInstancesModificationResultList": { + "base": null, + "refs": { + "ReservedInstancesModification$ModificationResults": "

    Contains target configurations along with their corresponding new Reserved Instance IDs.

    " + } + }, + "ReservedInstancesOffering": { + "base": "

    Describes a Reserved Instance offering.

    ", + "refs": { + "ReservedInstancesOfferingList$member": null + } + }, + "ReservedInstancesOfferingIdStringList": { + "base": null, + "refs": { + "DescribeReservedInstancesOfferingsRequest$ReservedInstancesOfferingIds": "

    One or more Reserved Instances offering IDs.

    " + } + }, + "ReservedInstancesOfferingList": { + "base": null, + "refs": { + "DescribeReservedInstancesOfferingsResult$ReservedInstancesOfferings": "

    A list of Reserved Instances offerings.

    " + } + }, + "ReservedIntancesIds": { + "base": null, + "refs": { + "ReservedInstancesModification$ReservedInstancesIds": "

    The IDs of one or more Reserved Instances.

    " + } + }, + "ResetImageAttributeName": { + "base": null, + "refs": { + "ResetImageAttributeRequest$Attribute": "

    The attribute to reset (currently you can only reset the launch permission attribute).

    " + } + }, + "ResetImageAttributeRequest": { + "base": "

    Contains the parameters for ResetImageAttribute.

    ", + "refs": { + } + }, + "ResetInstanceAttributeRequest": { + "base": "

    Contains the parameters for ResetInstanceAttribute.

    ", + "refs": { + } + }, + "ResetNetworkInterfaceAttributeRequest": { + "base": "

    Contains the parameters for ResetNetworkInterfaceAttribute.

    ", + "refs": { + } + }, + "ResetSnapshotAttributeRequest": { + "base": "

    Contains the parameters for ResetSnapshotAttribute.

    ", + "refs": { + } + }, + "ResourceIdList": { + "base": null, + "refs": { + "CreateTagsRequest$Resources": "

    The IDs of one or more resources to tag. For example, ami-1a2b3c4d.

    ", + "DeleteTagsRequest$Resources": "

    The ID of the resource. For example, ami-1a2b3c4d. You can specify more than one resource ID.

    " + } + }, + "ResourceType": { + "base": null, + "refs": { + "TagDescription$ResourceType": "

    The resource type.

    " + } + }, + "ResponseHostIdList": { + "base": null, + "refs": { + "AllocateHostsResult$HostIds": "

    The ID of the allocated Dedicated host. This is used when you want to launch an instance onto a specific host.

    ", + "ModifyHostsResult$Successful": "

    The IDs of the Dedicated hosts that were successfully modified.

    ", + "ReleaseHostsResult$Successful": "

    The IDs of the Dedicated hosts that were successfully released.

    " + } + }, + "RestorableByStringList": { + "base": null, + "refs": { + "DescribeSnapshotsRequest$RestorableByUserIds": "

    One or more AWS accounts IDs that can create volumes from the snapshot.

    " + } + }, + "RestoreAddressToClassicRequest": { + "base": "

    Contains the parameters for RestoreAddressToClassic.

    ", + "refs": { + } + }, + "RestoreAddressToClassicResult": { + "base": "

    Contains the output of RestoreAddressToClassic.

    ", + "refs": { + } + }, + "RevokeSecurityGroupEgressRequest": { + "base": "

    Contains the parameters for RevokeSecurityGroupEgress.

    ", + "refs": { + } + }, + "RevokeSecurityGroupIngressRequest": { + "base": "

    Contains the parameters for RevokeSecurityGroupIngress.

    ", + "refs": { + } + }, + "Route": { + "base": "

    Describes a route in a route table.

    ", + "refs": { + "RouteList$member": null + } + }, + "RouteList": { + "base": null, + "refs": { + "RouteTable$Routes": "

    The routes in the route table.

    " + } + }, + "RouteOrigin": { + "base": null, + "refs": { + "Route$Origin": "

    Describes how the route was created.

    • CreateRouteTable - The route was automatically created when the route table was created.

    • CreateRoute - The route was manually added to the route table.

    • EnableVgwRoutePropagation - The route was propagated by route propagation.

    " + } + }, + "RouteState": { + "base": null, + "refs": { + "Route$State": "

    The state of the route. The blackhole state indicates that the route's target isn't available (for example, the specified gateway isn't attached to the VPC, or the specified NAT instance has been terminated).

    " + } + }, + "RouteTable": { + "base": "

    Describes a route table.

    ", + "refs": { + "CreateRouteTableResult$RouteTable": "

    Information about the route table.

    ", + "RouteTableList$member": null + } + }, + "RouteTableAssociation": { + "base": "

    Describes an association between a route table and a subnet.

    ", + "refs": { + "RouteTableAssociationList$member": null + } + }, + "RouteTableAssociationList": { + "base": null, + "refs": { + "RouteTable$Associations": "

    The associations between the route table and one or more subnets.

    " + } + }, + "RouteTableList": { + "base": null, + "refs": { + "DescribeRouteTablesResult$RouteTables": "

    Information about one or more route tables.

    " + } + }, + "RuleAction": { + "base": null, + "refs": { + "CreateNetworkAclEntryRequest$RuleAction": "

    Indicates whether to allow or deny the traffic that matches the rule.

    ", + "NetworkAclEntry$RuleAction": "

    Indicates whether to allow or deny the traffic that matches the rule.

    ", + "ReplaceNetworkAclEntryRequest$RuleAction": "

    Indicates whether to allow or deny the traffic that matches the rule.

    " + } + }, + "RunInstancesMonitoringEnabled": { + "base": "

    Describes the monitoring for the instance.

    ", + "refs": { + "LaunchSpecification$Monitoring": null, + "RequestSpotLaunchSpecification$Monitoring": null, + "RunInstancesRequest$Monitoring": "

    The monitoring for the instance.

    " + } + }, + "RunInstancesRequest": { + "base": "

    Contains the parameters for RunInstances.

    ", + "refs": { + } + }, + "RunScheduledInstancesRequest": { + "base": "

    Contains the parameters for RunScheduledInstances.

    ", + "refs": { + } + }, + "RunScheduledInstancesResult": { + "base": "

    Contains the output of RunScheduledInstances.

    ", + "refs": { + } + }, + "S3Storage": { + "base": "

    Describes the storage parameters for S3 and S3 buckets for an instance store-backed AMI.

    ", + "refs": { + "Storage$S3": "

    An Amazon S3 storage location.

    " + } + }, + "ScheduledInstance": { + "base": "

    Describes a Scheduled Instance.

    ", + "refs": { + "PurchasedScheduledInstanceSet$member": null, + "ScheduledInstanceSet$member": null + } + }, + "ScheduledInstanceAvailability": { + "base": "

    Describes a schedule that is available for your Scheduled Instances.

    ", + "refs": { + "ScheduledInstanceAvailabilitySet$member": null + } + }, + "ScheduledInstanceAvailabilitySet": { + "base": null, + "refs": { + "DescribeScheduledInstanceAvailabilityResult$ScheduledInstanceAvailabilitySet": "

    Information about the available Scheduled Instances.

    " + } + }, + "ScheduledInstanceIdRequestSet": { + "base": null, + "refs": { + "DescribeScheduledInstancesRequest$ScheduledInstanceIds": "

    One or more Scheduled Instance IDs.

    " + } + }, + "ScheduledInstanceRecurrence": { + "base": "

    Describes the recurring schedule for a Scheduled Instance.

    ", + "refs": { + "ScheduledInstance$Recurrence": "

    The schedule recurrence.

    ", + "ScheduledInstanceAvailability$Recurrence": "

    The schedule recurrence.

    " + } + }, + "ScheduledInstanceRecurrenceRequest": { + "base": "

    Describes the recurring schedule for a Scheduled Instance.

    ", + "refs": { + "DescribeScheduledInstanceAvailabilityRequest$Recurrence": "

    The schedule recurrence.

    " + } + }, + "ScheduledInstanceSet": { + "base": null, + "refs": { + "DescribeScheduledInstancesResult$ScheduledInstanceSet": "

    Information about the Scheduled Instances.

    " + } + }, + "ScheduledInstancesBlockDeviceMapping": { + "base": "

    Describes a block device mapping for a Scheduled Instance.

    ", + "refs": { + "ScheduledInstancesBlockDeviceMappingSet$member": null + } + }, + "ScheduledInstancesBlockDeviceMappingSet": { + "base": null, + "refs": { + "ScheduledInstancesLaunchSpecification$BlockDeviceMappings": "

    One or more block device mapping entries.

    " + } + }, + "ScheduledInstancesEbs": { + "base": "

    Describes an EBS volume for a Scheduled Instance.

    ", + "refs": { + "ScheduledInstancesBlockDeviceMapping$Ebs": "

    Parameters used to set up EBS volumes automatically when the instance is launched.

    " + } + }, + "ScheduledInstancesIamInstanceProfile": { + "base": "

    Describes an IAM instance profile for a Scheduled Instance.

    ", + "refs": { + "ScheduledInstancesLaunchSpecification$IamInstanceProfile": "

    The IAM instance profile.

    " + } + }, + "ScheduledInstancesLaunchSpecification": { + "base": "

    Describes the launch specification for a Scheduled Instance.

    If you are launching the Scheduled Instance in EC2-VPC, you must specify the ID of the subnet. You can specify the subnet using either SubnetId or NetworkInterface.

    ", + "refs": { + "RunScheduledInstancesRequest$LaunchSpecification": "

    The launch specification.

    " + } + }, + "ScheduledInstancesMonitoring": { + "base": "

    Describes whether monitoring is enabled for a Scheduled Instance.

    ", + "refs": { + "ScheduledInstancesLaunchSpecification$Monitoring": "

    Enable or disable monitoring for the instances.

    " + } + }, + "ScheduledInstancesNetworkInterface": { + "base": "

    Describes a network interface for a Scheduled Instance.

    ", + "refs": { + "ScheduledInstancesNetworkInterfaceSet$member": null + } + }, + "ScheduledInstancesNetworkInterfaceSet": { + "base": null, + "refs": { + "ScheduledInstancesLaunchSpecification$NetworkInterfaces": "

    One or more network interfaces.

    " + } + }, + "ScheduledInstancesPlacement": { + "base": "

    Describes the placement for a Scheduled Instance.

    ", + "refs": { + "ScheduledInstancesLaunchSpecification$Placement": "

    The placement information.

    " + } + }, + "ScheduledInstancesPrivateIpAddressConfig": { + "base": "

    Describes a private IP address for a Scheduled Instance.

    ", + "refs": { + "PrivateIpAddressConfigSet$member": null + } + }, + "ScheduledInstancesSecurityGroupIdSet": { + "base": null, + "refs": { + "ScheduledInstancesLaunchSpecification$SecurityGroupIds": "

    The IDs of one or more security groups.

    ", + "ScheduledInstancesNetworkInterface$Groups": "

    The IDs of one or more security groups.

    " + } + }, + "SecurityGroup": { + "base": "

    Describes a security group

    ", + "refs": { + "SecurityGroupList$member": null + } + }, + "SecurityGroupIdStringList": { + "base": null, + "refs": { + "CreateNetworkInterfaceRequest$Groups": "

    The IDs of one or more security groups.

    ", + "ImportInstanceLaunchSpecification$GroupIds": "

    One or more security group IDs.

    ", + "InstanceNetworkInterfaceSpecification$Groups": "

    The IDs of the security groups for the network interface. Applies only if creating a network interface when launching an instance.

    ", + "ModifyNetworkInterfaceAttributeRequest$Groups": "

    Changes the security groups for the network interface. The new set of groups you specify replaces the current set. You must specify at least one group, even if it's just the default security group in the VPC. You must specify the ID of the security group, not the name.

    ", + "RunInstancesRequest$SecurityGroupIds": "

    One or more security group IDs. You can create a security group using CreateSecurityGroup.

    Default: Amazon EC2 uses the default security group.

    " + } + }, + "SecurityGroupList": { + "base": null, + "refs": { + "DescribeSecurityGroupsResult$SecurityGroups": "

    Information about one or more security groups.

    " + } + }, + "SecurityGroupReference": { + "base": "

    Describes a VPC with a security group that references your security group.

    ", + "refs": { + "SecurityGroupReferences$member": null + } + }, + "SecurityGroupReferences": { + "base": null, + "refs": { + "DescribeSecurityGroupReferencesResult$SecurityGroupReferenceSet": "

    Information about the VPCs with the referencing security groups.

    " + } + }, + "SecurityGroupStringList": { + "base": null, + "refs": { + "ImportInstanceLaunchSpecification$GroupNames": "

    One or more security group names.

    ", + "RunInstancesRequest$SecurityGroups": "

    [EC2-Classic, default VPC] One or more security group names. For a nondefault VPC, you must use security group IDs instead.

    Default: Amazon EC2 uses the default security group.

    " + } + }, + "ShutdownBehavior": { + "base": null, + "refs": { + "ImportInstanceLaunchSpecification$InstanceInitiatedShutdownBehavior": "

    Indicates whether an instance stops or terminates when you initiate shutdown from the instance (using the operating system command for system shutdown).

    ", + "RunInstancesRequest$InstanceInitiatedShutdownBehavior": "

    Indicates whether an instance stops or terminates when you initiate shutdown from the instance (using the operating system command for system shutdown).

    Default: stop

    " + } + }, + "SlotDateTimeRangeRequest": { + "base": "

    Describes the time period for a Scheduled Instance to start its first schedule. The time period must span less than one day.

    ", + "refs": { + "DescribeScheduledInstanceAvailabilityRequest$FirstSlotStartTimeRange": "

    The time period for the first schedule to start.

    " + } + }, + "SlotStartTimeRangeRequest": { + "base": "

    Describes the time period for a Scheduled Instance to start its first schedule.

    ", + "refs": { + "DescribeScheduledInstancesRequest$SlotStartTimeRange": "

    The time period for the first schedule to start.

    " + } + }, + "Snapshot": { + "base": "

    Describes a snapshot.

    ", + "refs": { + "SnapshotList$member": null + } + }, + "SnapshotAttributeName": { + "base": null, + "refs": { + "DescribeSnapshotAttributeRequest$Attribute": "

    The snapshot attribute you would like to view.

    ", + "ModifySnapshotAttributeRequest$Attribute": "

    The snapshot attribute to modify.

    Only volume creation permissions may be modified at the customer level.

    ", + "ResetSnapshotAttributeRequest$Attribute": "

    The attribute to reset. Currently, only the attribute for permission to create volumes can be reset.

    " + } + }, + "SnapshotDetail": { + "base": "

    Describes the snapshot created from the imported disk.

    ", + "refs": { + "SnapshotDetailList$member": null + } + }, + "SnapshotDetailList": { + "base": null, + "refs": { + "ImportImageResult$SnapshotDetails": "

    Information about the snapshots.

    ", + "ImportImageTask$SnapshotDetails": "

    Information about the snapshots.

    " + } + }, + "SnapshotDiskContainer": { + "base": "

    The disk container object for the import snapshot request.

    ", + "refs": { + "ImportSnapshotRequest$DiskContainer": "

    Information about the disk container.

    " + } + }, + "SnapshotIdStringList": { + "base": null, + "refs": { + "DescribeSnapshotsRequest$SnapshotIds": "

    One or more snapshot IDs.

    Default: Describes snapshots for which you have launch permissions.

    " + } + }, + "SnapshotList": { + "base": null, + "refs": { + "DescribeSnapshotsResult$Snapshots": "

    Information about the snapshots.

    " + } + }, + "SnapshotState": { + "base": null, + "refs": { + "Snapshot$State": "

    The snapshot state.

    " + } + }, + "SnapshotTaskDetail": { + "base": "

    Details about the import snapshot task.

    ", + "refs": { + "ImportSnapshotResult$SnapshotTaskDetail": "

    Information about the import snapshot task.

    ", + "ImportSnapshotTask$SnapshotTaskDetail": "

    Describes an import snapshot task.

    " + } + }, + "SpotDatafeedSubscription": { + "base": "

    Describes the data feed for a Spot instance.

    ", + "refs": { + "CreateSpotDatafeedSubscriptionResult$SpotDatafeedSubscription": "

    The Spot instance data feed subscription.

    ", + "DescribeSpotDatafeedSubscriptionResult$SpotDatafeedSubscription": "

    The Spot instance data feed subscription.

    " + } + }, + "SpotFleetLaunchSpecification": { + "base": "

    Describes the launch specification for one or more Spot instances.

    ", + "refs": { + "LaunchSpecsList$member": null + } + }, + "SpotFleetMonitoring": { + "base": "

    Describes whether monitoring is enabled.

    ", + "refs": { + "SpotFleetLaunchSpecification$Monitoring": "

    Enable or disable monitoring for the instances.

    " + } + }, + "SpotFleetRequestConfig": { + "base": "

    Describes a Spot fleet request.

    ", + "refs": { + "SpotFleetRequestConfigSet$member": null + } + }, + "SpotFleetRequestConfigData": { + "base": "

    Describes the configuration of a Spot fleet request.

    ", + "refs": { + "RequestSpotFleetRequest$SpotFleetRequestConfig": "

    The configuration for the Spot fleet request.

    ", + "SpotFleetRequestConfig$SpotFleetRequestConfig": "

    Information about the configuration of the Spot fleet request.

    " + } + }, + "SpotFleetRequestConfigSet": { + "base": null, + "refs": { + "DescribeSpotFleetRequestsResponse$SpotFleetRequestConfigs": "

    Information about the configuration of your Spot fleet.

    " + } + }, + "SpotInstanceRequest": { + "base": "

    Describes a Spot instance request.

    ", + "refs": { + "SpotInstanceRequestList$member": null + } + }, + "SpotInstanceRequestIdList": { + "base": null, + "refs": { + "CancelSpotInstanceRequestsRequest$SpotInstanceRequestIds": "

    One or more Spot instance request IDs.

    ", + "DescribeSpotInstanceRequestsRequest$SpotInstanceRequestIds": "

    One or more Spot instance request IDs.

    " + } + }, + "SpotInstanceRequestList": { + "base": null, + "refs": { + "DescribeSpotInstanceRequestsResult$SpotInstanceRequests": "

    One or more Spot instance requests.

    ", + "RequestSpotInstancesResult$SpotInstanceRequests": "

    One or more Spot instance requests.

    " + } + }, + "SpotInstanceState": { + "base": null, + "refs": { + "SpotInstanceRequest$State": "

    The state of the Spot instance request. Spot bid status information can help you track your Spot instance requests. For more information, see Spot Bid Status in the Amazon Elastic Compute Cloud User Guide.

    " + } + }, + "SpotInstanceStateFault": { + "base": "

    Describes a Spot instance state change.

    ", + "refs": { + "SpotDatafeedSubscription$Fault": "

    The fault codes for the Spot instance request, if any.

    ", + "SpotInstanceRequest$Fault": "

    The fault codes for the Spot instance request, if any.

    " + } + }, + "SpotInstanceStatus": { + "base": "

    Describes the status of a Spot instance request.

    ", + "refs": { + "SpotInstanceRequest$Status": "

    The status code and status message describing the Spot instance request.

    " + } + }, + "SpotInstanceType": { + "base": null, + "refs": { + "RequestSpotInstancesRequest$Type": "

    The Spot instance request type.

    Default: one-time

    ", + "SpotInstanceRequest$Type": "

    The Spot instance request type.

    " + } + }, + "SpotPlacement": { + "base": "

    Describes Spot instance placement.

    ", + "refs": { + "LaunchSpecification$Placement": "

    The placement information for the instance.

    ", + "RequestSpotLaunchSpecification$Placement": "

    The placement information for the instance.

    ", + "SpotFleetLaunchSpecification$Placement": "

    The placement information.

    " + } + }, + "SpotPrice": { + "base": "

    Describes the maximum hourly price (bid) for any Spot instance launched to fulfill the request.

    ", + "refs": { + "SpotPriceHistoryList$member": null + } + }, + "SpotPriceHistoryList": { + "base": null, + "refs": { + "DescribeSpotPriceHistoryResult$SpotPriceHistory": "

    The historical Spot prices.

    " + } + }, + "StaleIpPermission": { + "base": "

    Describes a stale rule in a security group.

    ", + "refs": { + "StaleIpPermissionSet$member": null + } + }, + "StaleIpPermissionSet": { + "base": null, + "refs": { + "StaleSecurityGroup$StaleIpPermissions": "

    Information about the stale inbound rules in the security group.

    ", + "StaleSecurityGroup$StaleIpPermissionsEgress": "

    Information about the stale outbound rules in the security group.

    " + } + }, + "StaleSecurityGroup": { + "base": "

    Describes a stale security group (a security group that contains stale rules).

    ", + "refs": { + "StaleSecurityGroupSet$member": null + } + }, + "StaleSecurityGroupSet": { + "base": null, + "refs": { + "DescribeStaleSecurityGroupsResult$StaleSecurityGroupSet": "

    Information about the stale security groups.

    " + } + }, + "StartInstancesRequest": { + "base": "

    Contains the parameters for StartInstances.

    ", + "refs": { + } + }, + "StartInstancesResult": { + "base": "

    Contains the output of StartInstances.

    ", + "refs": { + } + }, + "State": { + "base": null, + "refs": { + "VpcEndpoint$State": "

    The state of the VPC endpoint.

    " + } + }, + "StateReason": { + "base": "

    Describes a state change.

    ", + "refs": { + "Image$StateReason": "

    The reason for the state change.

    ", + "Instance$StateReason": "

    The reason for the most recent state transition.

    " + } + }, + "Status": { + "base": null, + "refs": { + "MoveAddressToVpcResult$Status": "

    The status of the move of the IP address.

    ", + "RestoreAddressToClassicResult$Status": "

    The move status for the IP address.

    " + } + }, + "StatusName": { + "base": null, + "refs": { + "InstanceStatusDetails$Name": "

    The type of instance status.

    " + } + }, + "StatusType": { + "base": null, + "refs": { + "InstanceStatusDetails$Status": "

    The status.

    " + } + }, + "StopInstancesRequest": { + "base": "

    Contains the parameters for StopInstances.

    ", + "refs": { + } + }, + "StopInstancesResult": { + "base": "

    Contains the output of StopInstances.

    ", + "refs": { + } + }, + "Storage": { + "base": "

    Describes the storage location for an instance store-backed AMI.

    ", + "refs": { + "BundleInstanceRequest$Storage": "

    The bucket in which to store the AMI. You can specify a bucket that you already own or a new bucket that Amazon EC2 creates on your behalf. If you specify a bucket that belongs to someone else, Amazon EC2 returns an error.

    ", + "BundleTask$Storage": "

    The Amazon S3 storage locations.

    " + } + }, + "String": { + "base": null, + "refs": { + "AcceptVpcPeeringConnectionRequest$VpcPeeringConnectionId": "

    The ID of the VPC peering connection.

    ", + "AccountAttribute$AttributeName": "

    The name of the account attribute.

    ", + "AccountAttributeValue$AttributeValue": "

    The value of the attribute.

    ", + "ActiveInstance$InstanceType": "

    The instance type.

    ", + "ActiveInstance$InstanceId": "

    The ID of the instance.

    ", + "ActiveInstance$SpotInstanceRequestId": "

    The ID of the Spot instance request.

    ", + "Address$InstanceId": "

    The ID of the instance that the address is associated with (if any).

    ", + "Address$PublicIp": "

    The Elastic IP address.

    ", + "Address$AllocationId": "

    The ID representing the allocation of the address for use with EC2-VPC.

    ", + "Address$AssociationId": "

    The ID representing the association of the address with an instance in a VPC.

    ", + "Address$NetworkInterfaceId": "

    The ID of the network interface.

    ", + "Address$NetworkInterfaceOwnerId": "

    The ID of the AWS account that owns the network interface.

    ", + "Address$PrivateIpAddress": "

    The private IP address associated with the Elastic IP address.

    ", + "AllocateAddressResult$PublicIp": "

    The Elastic IP address.

    ", + "AllocateAddressResult$AllocationId": "

    [EC2-VPC] The ID that AWS assigns to represent the allocation of the Elastic IP address for use with instances in a VPC.

    ", + "AllocateHostsRequest$ClientToken": "

    Unique, case-sensitive identifier you provide to ensure idempotency of the request. For more information, see How to Ensure Idempotency in the Amazon Elastic Compute Cloud User Guide.

    ", + "AllocateHostsRequest$InstanceType": "

    Specify the instance type that you want your Dedicated hosts to be configured for. When you specify the instance type, that is the only instance type that you can launch onto that host.

    ", + "AllocateHostsRequest$AvailabilityZone": "

    The Availability Zone for the Dedicated hosts.

    ", + "AllocationIdList$member": null, + "AssignPrivateIpAddressesRequest$NetworkInterfaceId": "

    The ID of the network interface.

    ", + "AssociateAddressRequest$InstanceId": "

    The ID of the instance. This is required for EC2-Classic. For EC2-VPC, you can specify either the instance ID or the network interface ID, but not both. The operation fails if you specify an instance ID unless exactly one network interface is attached.

    ", + "AssociateAddressRequest$PublicIp": "

    The Elastic IP address. This is required for EC2-Classic.

    ", + "AssociateAddressRequest$AllocationId": "

    [EC2-VPC] The allocation ID. This is required for EC2-VPC.

    ", + "AssociateAddressRequest$NetworkInterfaceId": "

    [EC2-VPC] The ID of the network interface. If the instance has more than one network interface, you must specify a network interface ID.

    ", + "AssociateAddressRequest$PrivateIpAddress": "

    [EC2-VPC] The primary or secondary private IP address to associate with the Elastic IP address. If no private IP address is specified, the Elastic IP address is associated with the primary private IP address.

    ", + "AssociateAddressResult$AssociationId": "

    [EC2-VPC] The ID that represents the association of the Elastic IP address with an instance.

    ", + "AssociateDhcpOptionsRequest$DhcpOptionsId": "

    The ID of the DHCP options set, or default to associate no DHCP options with the VPC.

    ", + "AssociateDhcpOptionsRequest$VpcId": "

    The ID of the VPC.

    ", + "AssociateRouteTableRequest$SubnetId": "

    The ID of the subnet.

    ", + "AssociateRouteTableRequest$RouteTableId": "

    The ID of the route table.

    ", + "AssociateRouteTableResult$AssociationId": "

    The route table association ID (needed to disassociate the route table).

    ", + "AttachClassicLinkVpcRequest$InstanceId": "

    The ID of an EC2-Classic instance to link to the ClassicLink-enabled VPC.

    ", + "AttachClassicLinkVpcRequest$VpcId": "

    The ID of a ClassicLink-enabled VPC.

    ", + "AttachInternetGatewayRequest$InternetGatewayId": "

    The ID of the Internet gateway.

    ", + "AttachInternetGatewayRequest$VpcId": "

    The ID of the VPC.

    ", + "AttachNetworkInterfaceRequest$NetworkInterfaceId": "

    The ID of the network interface.

    ", + "AttachNetworkInterfaceRequest$InstanceId": "

    The ID of the instance.

    ", + "AttachNetworkInterfaceResult$AttachmentId": "

    The ID of the network interface attachment.

    ", + "AttachVolumeRequest$VolumeId": "

    The ID of the EBS volume. The volume and instance must be within the same Availability Zone.

    ", + "AttachVolumeRequest$InstanceId": "

    The ID of the instance.

    ", + "AttachVolumeRequest$Device": "

    The device name to expose to the instance (for example, /dev/sdh or xvdh).

    ", + "AttachVpnGatewayRequest$VpnGatewayId": "

    The ID of the virtual private gateway.

    ", + "AttachVpnGatewayRequest$VpcId": "

    The ID of the VPC.

    ", + "AttributeValue$Value": "

    The attribute value. Note that the value is case-sensitive.

    ", + "AuthorizeSecurityGroupEgressRequest$GroupId": "

    The ID of the security group.

    ", + "AuthorizeSecurityGroupEgressRequest$SourceSecurityGroupName": "

    The name of a destination security group. To authorize outbound access to a destination security group, we recommend that you use a set of IP permissions instead.

    ", + "AuthorizeSecurityGroupEgressRequest$SourceSecurityGroupOwnerId": "

    The AWS account number for a destination security group. To authorize outbound access to a destination security group, we recommend that you use a set of IP permissions instead.

    ", + "AuthorizeSecurityGroupEgressRequest$IpProtocol": "

    The IP protocol name or number. We recommend that you specify the protocol in a set of IP permissions instead.

    ", + "AuthorizeSecurityGroupEgressRequest$CidrIp": "

    The CIDR IP address range. We recommend that you specify the CIDR range in a set of IP permissions instead.

    ", + "AuthorizeSecurityGroupIngressRequest$GroupName": "

    [EC2-Classic, default VPC] The name of the security group.

    ", + "AuthorizeSecurityGroupIngressRequest$GroupId": "

    The ID of the security group. Required for a nondefault VPC.

    ", + "AuthorizeSecurityGroupIngressRequest$SourceSecurityGroupName": "

    [EC2-Classic, default VPC] The name of the source security group. You can't specify this parameter in combination with the following parameters: the CIDR IP address range, the start of the port range, the IP protocol, and the end of the port range. Creates rules that grant full ICMP, UDP, and TCP access. To create a rule with a specific IP protocol and port range, use a set of IP permissions instead. For EC2-VPC, the source security group must be in the same VPC.

    ", + "AuthorizeSecurityGroupIngressRequest$SourceSecurityGroupOwnerId": "

    [EC2-Classic] The AWS account number for the source security group, if the source security group is in a different account. You can't specify this parameter in combination with the following parameters: the CIDR IP address range, the IP protocol, the start of the port range, and the end of the port range. Creates rules that grant full ICMP, UDP, and TCP access. To create a rule with a specific IP protocol and port range, use a set of IP permissions instead.

    ", + "AuthorizeSecurityGroupIngressRequest$IpProtocol": "

    The IP protocol name (tcp, udp, icmp) or number (see Protocol Numbers). (VPC only) Use -1 to specify all.

    ", + "AuthorizeSecurityGroupIngressRequest$CidrIp": "

    The CIDR IP address range. You can't specify this parameter when specifying a source security group.

    ", + "AvailabilityZone$ZoneName": "

    The name of the Availability Zone.

    ", + "AvailabilityZone$RegionName": "

    The name of the region.

    ", + "AvailabilityZoneMessage$Message": "

    The message about the Availability Zone.

    ", + "BlockDeviceMapping$VirtualName": "

    The virtual device name (ephemeralN). Instance store volumes are numbered starting from 0. An instance type with 2 available instance store volumes can specify mappings for ephemeral0 and ephemeral1.The number of available instance store volumes depends on the instance type. After you connect to the instance, you must mount the volume.

    Constraints: For M3 instances, you must specify instance store volumes in the block device mapping for the instance. When you launch an M3 instance, we ignore any instance store volumes specified in the block device mapping for the AMI.

    ", + "BlockDeviceMapping$DeviceName": "

    The device name exposed to the instance (for example, /dev/sdh or xvdh).

    ", + "BlockDeviceMapping$NoDevice": "

    Suppresses the specified device included in the block device mapping of the AMI.

    ", + "BundleIdStringList$member": null, + "BundleInstanceRequest$InstanceId": "

    The ID of the instance to bundle.

    Type: String

    Default: None

    Required: Yes

    ", + "BundleTask$InstanceId": "

    The ID of the instance associated with this bundle task.

    ", + "BundleTask$BundleId": "

    The ID of the bundle task.

    ", + "BundleTask$Progress": "

    The level of task completion, as a percent (for example, 20%).

    ", + "BundleTaskError$Code": "

    The error code.

    ", + "BundleTaskError$Message": "

    The error message.

    ", + "CancelBundleTaskRequest$BundleId": "

    The ID of the bundle task.

    ", + "CancelConversionRequest$ConversionTaskId": "

    The ID of the conversion task.

    ", + "CancelConversionRequest$ReasonMessage": "

    The reason for canceling the conversion task.

    ", + "CancelExportTaskRequest$ExportTaskId": "

    The ID of the export task. This is the ID returned by CreateInstanceExportTask.

    ", + "CancelImportTaskRequest$ImportTaskId": "

    The ID of the import image or import snapshot task to be canceled.

    ", + "CancelImportTaskRequest$CancelReason": "

    The reason for canceling the task.

    ", + "CancelImportTaskResult$ImportTaskId": "

    The ID of the task being canceled.

    ", + "CancelImportTaskResult$State": "

    The current state of the task being canceled.

    ", + "CancelImportTaskResult$PreviousState": "

    The current state of the task being canceled.

    ", + "CancelReservedInstancesListingRequest$ReservedInstancesListingId": "

    The ID of the Reserved Instance listing.

    ", + "CancelSpotFleetRequestsError$Message": "

    The description for the error code.

    ", + "CancelSpotFleetRequestsErrorItem$SpotFleetRequestId": "

    The ID of the Spot fleet request.

    ", + "CancelSpotFleetRequestsSuccessItem$SpotFleetRequestId": "

    The ID of the Spot fleet request.

    ", + "CancelledSpotInstanceRequest$SpotInstanceRequestId": "

    The ID of the Spot instance request.

    ", + "ClassicLinkDnsSupport$VpcId": "

    The ID of the VPC.

    ", + "ClassicLinkInstance$InstanceId": "

    The ID of the instance.

    ", + "ClassicLinkInstance$VpcId": "

    The ID of the VPC.

    ", + "ClientData$Comment": "

    A user-defined comment about the disk upload.

    ", + "ConfirmProductInstanceRequest$ProductCode": "

    The product code. This must be a product code that you own.

    ", + "ConfirmProductInstanceRequest$InstanceId": "

    The ID of the instance.

    ", + "ConfirmProductInstanceResult$OwnerId": "

    The AWS account ID of the instance owner. This is only present if the product code is attached to the instance.

    ", + "ConversionIdStringList$member": null, + "ConversionTask$ConversionTaskId": "

    The ID of the conversion task.

    ", + "ConversionTask$ExpirationTime": "

    The time when the task expires. If the upload isn't complete before the expiration time, we automatically cancel the task.

    ", + "ConversionTask$StatusMessage": "

    The status message related to the conversion task.

    ", + "CopyImageRequest$SourceRegion": "

    The name of the region that contains the AMI to copy.

    ", + "CopyImageRequest$SourceImageId": "

    The ID of the AMI to copy.

    ", + "CopyImageRequest$Name": "

    The name of the new AMI in the destination region.

    ", + "CopyImageRequest$Description": "

    A description for the new AMI in the destination region.

    ", + "CopyImageRequest$ClientToken": "

    Unique, case-sensitive identifier you provide to ensure idempotency of the request. For more information, see How to Ensure Idempotency in the Amazon Elastic Compute Cloud User Guide.

    ", + "CopyImageRequest$KmsKeyId": "

    The full ARN of the AWS Key Management Service (AWS KMS) CMK to use when encrypting the snapshots of an image during a copy operation. This parameter is only required if you want to use a non-default CMK; if this parameter is not specified, the default CMK for EBS is used. The ARN contains the arn:aws:kms namespace, followed by the region of the CMK, the AWS account ID of the CMK owner, the key namespace, and then the CMK ID. For example, arn:aws:kms:us-east-1:012345678910:key/abcd1234-a123-456a-a12b-a123b4cd56ef. The specified CMK must exist in the region that the snapshot is being copied to. If a KmsKeyId is specified, the Encrypted flag must also be set.

    ", + "CopyImageResult$ImageId": "

    The ID of the new AMI.

    ", + "CopySnapshotRequest$SourceRegion": "

    The ID of the region that contains the snapshot to be copied.

    ", + "CopySnapshotRequest$SourceSnapshotId": "

    The ID of the EBS snapshot to copy.

    ", + "CopySnapshotRequest$Description": "

    A description for the EBS snapshot.

    ", + "CopySnapshotRequest$DestinationRegion": "

    The destination region to use in the PresignedUrl parameter of a snapshot copy operation. This parameter is only valid for specifying the destination region in a PresignedUrl parameter, where it is required.

    CopySnapshot sends the snapshot copy to the regional endpoint that you send the HTTP request to, such as ec2.us-east-1.amazonaws.com (in the AWS CLI, this is specified with the --region parameter or the default region in your AWS configuration file).

    ", + "CopySnapshotRequest$PresignedUrl": "

    The pre-signed URL that facilitates copying an encrypted snapshot. This parameter is only required when copying an encrypted snapshot with the Amazon EC2 Query API; it is available as an optional parameter in all other cases. The PresignedUrl should use the snapshot source endpoint, the CopySnapshot action, and include the SourceRegion, SourceSnapshotId, and DestinationRegion parameters. The PresignedUrl must be signed using AWS Signature Version 4. Because EBS snapshots are stored in Amazon S3, the signing algorithm for this parameter uses the same logic that is described in Authenticating Requests by Using Query Parameters (AWS Signature Version 4) in the Amazon Simple Storage Service API Reference. An invalid or improperly signed PresignedUrl will cause the copy operation to fail asynchronously, and the snapshot will move to an error state.

    ", + "CopySnapshotRequest$KmsKeyId": "

    The full ARN of the AWS Key Management Service (AWS KMS) CMK to use when creating the snapshot copy. This parameter is only required if you want to use a non-default CMK; if this parameter is not specified, the default CMK for EBS is used. The ARN contains the arn:aws:kms namespace, followed by the region of the CMK, the AWS account ID of the CMK owner, the key namespace, and then the CMK ID. For example, arn:aws:kms:us-east-1:012345678910:key/abcd1234-a123-456a-a12b-a123b4cd56ef. The specified CMK must exist in the region that the snapshot is being copied to. If a KmsKeyId is specified, the Encrypted flag must also be set.

    ", + "CopySnapshotResult$SnapshotId": "

    The ID of the new snapshot.

    ", + "CreateCustomerGatewayRequest$PublicIp": "

    The Internet-routable IP address for the customer gateway's outside interface. The address must be static.

    ", + "CreateFlowLogsRequest$LogGroupName": "

    The name of the CloudWatch log group.

    ", + "CreateFlowLogsRequest$DeliverLogsPermissionArn": "

    The ARN for the IAM role that's used to post flow logs to a CloudWatch Logs log group.

    ", + "CreateFlowLogsRequest$ClientToken": "

    Unique, case-sensitive identifier you provide to ensure the idempotency of the request. For more information, see How to Ensure Idempotency.

    ", + "CreateFlowLogsResult$ClientToken": "

    Unique, case-sensitive identifier you provide to ensure the idempotency of the request.

    ", + "CreateImageRequest$InstanceId": "

    The ID of the instance.

    ", + "CreateImageRequest$Name": "

    A name for the new image.

    Constraints: 3-128 alphanumeric characters, parentheses (()), square brackets ([]), spaces ( ), periods (.), slashes (/), dashes (-), single quotes ('), at-signs (@), or underscores(_)

    ", + "CreateImageRequest$Description": "

    A description for the new image.

    ", + "CreateImageResult$ImageId": "

    The ID of the new AMI.

    ", + "CreateInstanceExportTaskRequest$Description": "

    A description for the conversion task or the resource being exported. The maximum length is 255 bytes.

    ", + "CreateInstanceExportTaskRequest$InstanceId": "

    The ID of the instance.

    ", + "CreateKeyPairRequest$KeyName": "

    A unique name for the key pair.

    Constraints: Up to 255 ASCII characters

    ", + "CreateNatGatewayRequest$SubnetId": "

    The subnet in which to create the NAT gateway.

    ", + "CreateNatGatewayRequest$AllocationId": "

    The allocation ID of an Elastic IP address to associate with the NAT gateway. If the Elastic IP address is associated with another resource, you must first disassociate it.

    ", + "CreateNatGatewayRequest$ClientToken": "

    Unique, case-sensitive identifier you provide to ensure the idempotency of the request. For more information, see How to Ensure Idempotency.

    Constraint: Maximum 64 ASCII characters.

    ", + "CreateNatGatewayResult$ClientToken": "

    Unique, case-sensitive identifier to ensure the idempotency of the request. Only returned if a client token was provided in the request.

    ", + "CreateNetworkAclEntryRequest$NetworkAclId": "

    The ID of the network ACL.

    ", + "CreateNetworkAclEntryRequest$Protocol": "

    The protocol. A value of -1 means all protocols.

    ", + "CreateNetworkAclEntryRequest$CidrBlock": "

    The network range to allow or deny, in CIDR notation (for example 172.16.0.0/24).

    ", + "CreateNetworkAclRequest$VpcId": "

    The ID of the VPC.

    ", + "CreateNetworkInterfaceRequest$SubnetId": "

    The ID of the subnet to associate with the network interface.

    ", + "CreateNetworkInterfaceRequest$Description": "

    A description for the network interface.

    ", + "CreateNetworkInterfaceRequest$PrivateIpAddress": "

    The primary private IP address of the network interface. If you don't specify an IP address, Amazon EC2 selects one for you from the subnet range. If you specify an IP address, you cannot indicate any IP addresses specified in privateIpAddresses as primary (only one IP address can be designated as primary).

    ", + "CreatePlacementGroupRequest$GroupName": "

    A name for the placement group.

    Constraints: Up to 255 ASCII characters

    ", + "CreateReservedInstancesListingRequest$ReservedInstancesId": "

    The ID of the active Reserved Instance.

    ", + "CreateReservedInstancesListingRequest$ClientToken": "

    Unique, case-sensitive identifier you provide to ensure idempotency of your listings. This helps avoid duplicate listings. For more information, see Ensuring Idempotency.

    ", + "CreateRouteRequest$RouteTableId": "

    The ID of the route table for the route.

    ", + "CreateRouteRequest$DestinationCidrBlock": "

    The CIDR address block used for the destination match. Routing decisions are based on the most specific match.

    ", + "CreateRouteRequest$GatewayId": "

    The ID of an Internet gateway or virtual private gateway attached to your VPC.

    ", + "CreateRouteRequest$InstanceId": "

    The ID of a NAT instance in your VPC. The operation fails if you specify an instance ID unless exactly one network interface is attached.

    ", + "CreateRouteRequest$NetworkInterfaceId": "

    The ID of a network interface.

    ", + "CreateRouteRequest$VpcPeeringConnectionId": "

    The ID of a VPC peering connection.

    ", + "CreateRouteRequest$NatGatewayId": "

    The ID of a NAT gateway.

    ", + "CreateRouteTableRequest$VpcId": "

    The ID of the VPC.

    ", + "CreateSecurityGroupRequest$GroupName": "

    The name of the security group.

    Constraints: Up to 255 characters in length

    Constraints for EC2-Classic: ASCII characters

    Constraints for EC2-VPC: a-z, A-Z, 0-9, spaces, and ._-:/()#,@[]+=&;{}!$*

    ", + "CreateSecurityGroupRequest$Description": "

    A description for the security group. This is informational only.

    Constraints: Up to 255 characters in length

    Constraints for EC2-Classic: ASCII characters

    Constraints for EC2-VPC: a-z, A-Z, 0-9, spaces, and ._-:/()#,@[]+=&;{}!$*

    ", + "CreateSecurityGroupRequest$VpcId": "

    [EC2-VPC] The ID of the VPC. Required for EC2-VPC.

    ", + "CreateSecurityGroupResult$GroupId": "

    The ID of the security group.

    ", + "CreateSnapshotRequest$VolumeId": "

    The ID of the EBS volume.

    ", + "CreateSnapshotRequest$Description": "

    A description for the snapshot.

    ", + "CreateSpotDatafeedSubscriptionRequest$Bucket": "

    The Amazon S3 bucket in which to store the Spot instance data feed.

    ", + "CreateSpotDatafeedSubscriptionRequest$Prefix": "

    A prefix for the data feed file names.

    ", + "CreateSubnetRequest$VpcId": "

    The ID of the VPC.

    ", + "CreateSubnetRequest$CidrBlock": "

    The network range for the subnet, in CIDR notation. For example, 10.0.0.0/24.

    ", + "CreateSubnetRequest$AvailabilityZone": "

    The Availability Zone for the subnet.

    Default: AWS selects one for you. If you create more than one subnet in your VPC, we may not necessarily select a different zone for each subnet.

    ", + "CreateVolumePermission$UserId": "

    The specific AWS account ID that is to be added or removed from a volume's list of create volume permissions.

    ", + "CreateVolumeRequest$SnapshotId": "

    The snapshot from which to create the volume.

    ", + "CreateVolumeRequest$AvailabilityZone": "

    The Availability Zone in which to create the volume. Use DescribeAvailabilityZones to list the Availability Zones that are currently available to you.

    ", + "CreateVolumeRequest$KmsKeyId": "

    The full ARN of the AWS Key Management Service (AWS KMS) customer master key (CMK) to use when creating the encrypted volume. This parameter is only required if you want to use a non-default CMK; if this parameter is not specified, the default CMK for EBS is used. The ARN contains the arn:aws:kms namespace, followed by the region of the CMK, the AWS account ID of the CMK owner, the key namespace, and then the CMK ID. For example, arn:aws:kms:us-east-1:012345678910:key/abcd1234-a123-456a-a12b-a123b4cd56ef. If a KmsKeyId is specified, the Encrypted flag must also be set.

    ", + "CreateVpcEndpointRequest$VpcId": "

    The ID of the VPC in which the endpoint will be used.

    ", + "CreateVpcEndpointRequest$ServiceName": "

    The AWS service name, in the form com.amazonaws.region.service . To get a list of available services, use the DescribeVpcEndpointServices request.

    ", + "CreateVpcEndpointRequest$PolicyDocument": "

    A policy to attach to the endpoint that controls access to the service. The policy must be in valid JSON format. If this parameter is not specified, we attach a default policy that allows full access to the service.

    ", + "CreateVpcEndpointRequest$ClientToken": "

    Unique, case-sensitive identifier you provide to ensure the idempotency of the request. For more information, see How to Ensure Idempotency.

    ", + "CreateVpcEndpointResult$ClientToken": "

    Unique, case-sensitive identifier you provide to ensure the idempotency of the request.

    ", + "CreateVpcPeeringConnectionRequest$VpcId": "

    The ID of the requester VPC.

    ", + "CreateVpcPeeringConnectionRequest$PeerVpcId": "

    The ID of the VPC with which you are creating the VPC peering connection.

    ", + "CreateVpcPeeringConnectionRequest$PeerOwnerId": "

    The AWS account ID of the owner of the peer VPC.

    Default: Your AWS account ID

    ", + "CreateVpcRequest$CidrBlock": "

    The network range for the VPC, in CIDR notation. For example, 10.0.0.0/16.

    ", + "CreateVpnConnectionRequest$Type": "

    The type of VPN connection (ipsec.1).

    ", + "CreateVpnConnectionRequest$CustomerGatewayId": "

    The ID of the customer gateway.

    ", + "CreateVpnConnectionRequest$VpnGatewayId": "

    The ID of the virtual private gateway.

    ", + "CreateVpnConnectionRouteRequest$VpnConnectionId": "

    The ID of the VPN connection.

    ", + "CreateVpnConnectionRouteRequest$DestinationCidrBlock": "

    The CIDR block associated with the local subnet of the customer network.

    ", + "CreateVpnGatewayRequest$AvailabilityZone": "

    The Availability Zone for the virtual private gateway.

    ", + "CustomerGateway$CustomerGatewayId": "

    The ID of the customer gateway.

    ", + "CustomerGateway$State": "

    The current state of the customer gateway (pending | available | deleting | deleted).

    ", + "CustomerGateway$Type": "

    The type of VPN connection the customer gateway supports (ipsec.1).

    ", + "CustomerGateway$IpAddress": "

    The Internet-routable IP address of the customer gateway's outside interface.

    ", + "CustomerGateway$BgpAsn": "

    The customer gateway's Border Gateway Protocol (BGP) Autonomous System Number (ASN).

    ", + "CustomerGatewayIdStringList$member": null, + "DeleteCustomerGatewayRequest$CustomerGatewayId": "

    The ID of the customer gateway.

    ", + "DeleteDhcpOptionsRequest$DhcpOptionsId": "

    The ID of the DHCP options set.

    ", + "DeleteInternetGatewayRequest$InternetGatewayId": "

    The ID of the Internet gateway.

    ", + "DeleteKeyPairRequest$KeyName": "

    The name of the key pair.

    ", + "DeleteNatGatewayRequest$NatGatewayId": "

    The ID of the NAT gateway.

    ", + "DeleteNatGatewayResult$NatGatewayId": "

    The ID of the NAT gateway.

    ", + "DeleteNetworkAclEntryRequest$NetworkAclId": "

    The ID of the network ACL.

    ", + "DeleteNetworkAclRequest$NetworkAclId": "

    The ID of the network ACL.

    ", + "DeleteNetworkInterfaceRequest$NetworkInterfaceId": "

    The ID of the network interface.

    ", + "DeletePlacementGroupRequest$GroupName": "

    The name of the placement group.

    ", + "DeleteRouteRequest$RouteTableId": "

    The ID of the route table.

    ", + "DeleteRouteRequest$DestinationCidrBlock": "

    The CIDR range for the route. The value you specify must match the CIDR for the route exactly.

    ", + "DeleteRouteTableRequest$RouteTableId": "

    The ID of the route table.

    ", + "DeleteSecurityGroupRequest$GroupName": "

    [EC2-Classic, default VPC] The name of the security group. You can specify either the security group name or the security group ID.

    ", + "DeleteSecurityGroupRequest$GroupId": "

    The ID of the security group. Required for a nondefault VPC.

    ", + "DeleteSnapshotRequest$SnapshotId": "

    The ID of the EBS snapshot.

    ", + "DeleteSubnetRequest$SubnetId": "

    The ID of the subnet.

    ", + "DeleteVolumeRequest$VolumeId": "

    The ID of the volume.

    ", + "DeleteVpcPeeringConnectionRequest$VpcPeeringConnectionId": "

    The ID of the VPC peering connection.

    ", + "DeleteVpcRequest$VpcId": "

    The ID of the VPC.

    ", + "DeleteVpnConnectionRequest$VpnConnectionId": "

    The ID of the VPN connection.

    ", + "DeleteVpnConnectionRouteRequest$VpnConnectionId": "

    The ID of the VPN connection.

    ", + "DeleteVpnConnectionRouteRequest$DestinationCidrBlock": "

    The CIDR block associated with the local subnet of the customer network.

    ", + "DeleteVpnGatewayRequest$VpnGatewayId": "

    The ID of the virtual private gateway.

    ", + "DeregisterImageRequest$ImageId": "

    The ID of the AMI.

    ", + "DescribeClassicLinkInstancesRequest$NextToken": "

    The token to retrieve the next page of results.

    ", + "DescribeClassicLinkInstancesResult$NextToken": "

    The token to use to retrieve the next page of results. This value is null when there are no more results to return.

    ", + "DescribeFlowLogsRequest$NextToken": "

    The token to retrieve the next page of results.

    ", + "DescribeFlowLogsResult$NextToken": "

    The token to use to retrieve the next page of results. This value is null when there are no more results to return.

    ", + "DescribeHostsRequest$NextToken": "

    The token to retrieve the next page of results.

    ", + "DescribeHostsResult$NextToken": "

    The token to use to retrieve the next page of results. This value is null when there are no more results to return.

    ", + "DescribeIdFormatRequest$Resource": "

    The type of resource.

    ", + "DescribeIdentityIdFormatRequest$Resource": "

    The type of resource.

    ", + "DescribeIdentityIdFormatRequest$PrincipalArn": "

    The ARN of the principal, which can be an IAM role, IAM user, or the root user.

    ", + "DescribeImageAttributeRequest$ImageId": "

    The ID of the AMI.

    ", + "DescribeImportImageTasksRequest$NextToken": "

    A token that indicates the next page of results.

    ", + "DescribeImportImageTasksResult$NextToken": "

    The token to use to get the next page of results. This value is null when there are no more results to return.

    ", + "DescribeImportSnapshotTasksRequest$NextToken": "

    A token that indicates the next page of results.

    ", + "DescribeImportSnapshotTasksResult$NextToken": "

    The token to use to get the next page of results. This value is null when there are no more results to return.

    ", + "DescribeInstanceAttributeRequest$InstanceId": "

    The ID of the instance.

    ", + "DescribeInstanceStatusRequest$NextToken": "

    The token to retrieve the next page of results.

    ", + "DescribeInstanceStatusResult$NextToken": "

    The token to use to retrieve the next page of results. This value is null when there are no more results to return.

    ", + "DescribeInstancesRequest$NextToken": "

    The token to request the next page of results.

    ", + "DescribeInstancesResult$NextToken": "

    The token to use to retrieve the next page of results. This value is null when there are no more results to return.

    ", + "DescribeMovingAddressesRequest$NextToken": "

    The token to use to retrieve the next page of results.

    ", + "DescribeMovingAddressesResult$NextToken": "

    The token to use to retrieve the next page of results. This value is null when there are no more results to return.

    ", + "DescribeNatGatewaysRequest$NextToken": "

    The token to retrieve the next page of results.

    ", + "DescribeNatGatewaysResult$NextToken": "

    The token to use to retrieve the next page of results. This value is null when there are no more results to return.

    ", + "DescribeNetworkInterfaceAttributeRequest$NetworkInterfaceId": "

    The ID of the network interface.

    ", + "DescribeNetworkInterfaceAttributeResult$NetworkInterfaceId": "

    The ID of the network interface.

    ", + "DescribePrefixListsRequest$NextToken": "

    The token for the next set of items to return. (You received this token from a prior call.)

    ", + "DescribePrefixListsResult$NextToken": "

    The token to use when requesting the next set of items. If there are no additional items to return, the string is empty.

    ", + "DescribeReservedInstancesListingsRequest$ReservedInstancesId": "

    One or more Reserved Instance IDs.

    ", + "DescribeReservedInstancesListingsRequest$ReservedInstancesListingId": "

    One or more Reserved Instance listing IDs.

    ", + "DescribeReservedInstancesModificationsRequest$NextToken": "

    The token to retrieve the next page of results.

    ", + "DescribeReservedInstancesModificationsResult$NextToken": "

    The token to use to retrieve the next page of results. This value is null when there are no more results to return.

    ", + "DescribeReservedInstancesOfferingsRequest$AvailabilityZone": "

    The Availability Zone in which the Reserved Instance can be used.

    ", + "DescribeReservedInstancesOfferingsRequest$NextToken": "

    The token to retrieve the next page of results.

    ", + "DescribeReservedInstancesOfferingsResult$NextToken": "

    The token to use to retrieve the next page of results. This value is null when there are no more results to return.

    ", + "DescribeScheduledInstanceAvailabilityRequest$NextToken": "

    The token for the next set of results.

    ", + "DescribeScheduledInstanceAvailabilityResult$NextToken": "

    The token required to retrieve the next set of results. This value is null when there are no more results to return.

    ", + "DescribeScheduledInstancesRequest$NextToken": "

    The token for the next set of results.

    ", + "DescribeScheduledInstancesResult$NextToken": "

    The token required to retrieve the next set of results. This value is null when there are no more results to return.

    ", + "DescribeSnapshotAttributeRequest$SnapshotId": "

    The ID of the EBS snapshot.

    ", + "DescribeSnapshotAttributeResult$SnapshotId": "

    The ID of the EBS snapshot.

    ", + "DescribeSnapshotsRequest$NextToken": "

    The NextToken value returned from a previous paginated DescribeSnapshots request where MaxResults was used and the results exceeded the value of that parameter. Pagination continues from the end of the previous results that returned the NextToken value. This value is null when there are no more results to return.

    ", + "DescribeSnapshotsResult$NextToken": "

    The NextToken value to include in a future DescribeSnapshots request. When the results of a DescribeSnapshots request exceed MaxResults, this value can be used to retrieve the next page of results. This value is null when there are no more results to return.

    ", + "DescribeSpotFleetInstancesRequest$SpotFleetRequestId": "

    The ID of the Spot fleet request.

    ", + "DescribeSpotFleetInstancesRequest$NextToken": "

    The token for the next set of results.

    ", + "DescribeSpotFleetInstancesResponse$SpotFleetRequestId": "

    The ID of the Spot fleet request.

    ", + "DescribeSpotFleetInstancesResponse$NextToken": "

    The token required to retrieve the next set of results. This value is null when there are no more results to return.

    ", + "DescribeSpotFleetRequestHistoryRequest$SpotFleetRequestId": "

    The ID of the Spot fleet request.

    ", + "DescribeSpotFleetRequestHistoryRequest$NextToken": "

    The token for the next set of results.

    ", + "DescribeSpotFleetRequestHistoryResponse$SpotFleetRequestId": "

    The ID of the Spot fleet request.

    ", + "DescribeSpotFleetRequestHistoryResponse$NextToken": "

    The token required to retrieve the next set of results. This value is null when there are no more results to return.

    ", + "DescribeSpotFleetRequestsRequest$NextToken": "

    The token for the next set of results.

    ", + "DescribeSpotFleetRequestsResponse$NextToken": "

    The token required to retrieve the next set of results. This value is null when there are no more results to return.

    ", + "DescribeSpotPriceHistoryRequest$AvailabilityZone": "

    Filters the results by the specified Availability Zone.

    ", + "DescribeSpotPriceHistoryRequest$NextToken": "

    The token for the next set of results.

    ", + "DescribeSpotPriceHistoryResult$NextToken": "

    The token required to retrieve the next set of results. This value is null when there are no more results to return.

    ", + "DescribeStaleSecurityGroupsRequest$VpcId": "

    The ID of the VPC.

    ", + "DescribeStaleSecurityGroupsResult$NextToken": "

    The token to use when requesting the next set of items. If there are no additional items to return, the string is empty.

    ", + "DescribeTagsRequest$NextToken": "

    The token to retrieve the next page of results.

    ", + "DescribeTagsResult$NextToken": "

    The token to use to retrieve the next page of results. This value is null when there are no more results to return..

    ", + "DescribeVolumeAttributeRequest$VolumeId": "

    The ID of the volume.

    ", + "DescribeVolumeAttributeResult$VolumeId": "

    The ID of the volume.

    ", + "DescribeVolumeStatusRequest$NextToken": "

    The NextToken value to include in a future DescribeVolumeStatus request. When the results of the request exceed MaxResults, this value can be used to retrieve the next page of results. This value is null when there are no more results to return.

    ", + "DescribeVolumeStatusResult$NextToken": "

    The token to use to retrieve the next page of results. This value is null when there are no more results to return.

    ", + "DescribeVolumesRequest$NextToken": "

    The NextToken value returned from a previous paginated DescribeVolumes request where MaxResults was used and the results exceeded the value of that parameter. Pagination continues from the end of the previous results that returned the NextToken value. This value is null when there are no more results to return.

    ", + "DescribeVolumesResult$NextToken": "

    The NextToken value to include in a future DescribeVolumes request. When the results of a DescribeVolumes request exceed MaxResults, this value can be used to retrieve the next page of results. This value is null when there are no more results to return.

    ", + "DescribeVpcAttributeRequest$VpcId": "

    The ID of the VPC.

    ", + "DescribeVpcAttributeResult$VpcId": "

    The ID of the VPC.

    ", + "DescribeVpcEndpointServicesRequest$NextToken": "

    The token for the next set of items to return. (You received this token from a prior call.)

    ", + "DescribeVpcEndpointServicesResult$NextToken": "

    The token to use when requesting the next set of items. If there are no additional items to return, the string is empty.

    ", + "DescribeVpcEndpointsRequest$NextToken": "

    The token for the next set of items to return. (You received this token from a prior call.)

    ", + "DescribeVpcEndpointsResult$NextToken": "

    The token to use when requesting the next set of items. If there are no additional items to return, the string is empty.

    ", + "DetachClassicLinkVpcRequest$InstanceId": "

    The ID of the instance to unlink from the VPC.

    ", + "DetachClassicLinkVpcRequest$VpcId": "

    The ID of the VPC to which the instance is linked.

    ", + "DetachInternetGatewayRequest$InternetGatewayId": "

    The ID of the Internet gateway.

    ", + "DetachInternetGatewayRequest$VpcId": "

    The ID of the VPC.

    ", + "DetachNetworkInterfaceRequest$AttachmentId": "

    The ID of the attachment.

    ", + "DetachVolumeRequest$VolumeId": "

    The ID of the volume.

    ", + "DetachVolumeRequest$InstanceId": "

    The ID of the instance.

    ", + "DetachVolumeRequest$Device": "

    The device name.

    ", + "DetachVpnGatewayRequest$VpnGatewayId": "

    The ID of the virtual private gateway.

    ", + "DetachVpnGatewayRequest$VpcId": "

    The ID of the VPC.

    ", + "DhcpConfiguration$Key": "

    The name of a DHCP option.

    ", + "DhcpOptions$DhcpOptionsId": "

    The ID of the set of DHCP options.

    ", + "DhcpOptionsIdStringList$member": null, + "DisableVgwRoutePropagationRequest$RouteTableId": "

    The ID of the route table.

    ", + "DisableVgwRoutePropagationRequest$GatewayId": "

    The ID of the virtual private gateway.

    ", + "DisableVpcClassicLinkDnsSupportRequest$VpcId": "

    The ID of the VPC.

    ", + "DisableVpcClassicLinkRequest$VpcId": "

    The ID of the VPC.

    ", + "DisassociateAddressRequest$PublicIp": "

    [EC2-Classic] The Elastic IP address. Required for EC2-Classic.

    ", + "DisassociateAddressRequest$AssociationId": "

    [EC2-VPC] The association ID. Required for EC2-VPC.

    ", + "DisassociateRouteTableRequest$AssociationId": "

    The association ID representing the current association between the route table and subnet.

    ", + "DiskImage$Description": "

    A description of the disk image.

    ", + "DiskImageDescription$ImportManifestUrl": "

    A presigned URL for the import manifest stored in Amazon S3. For information about creating a presigned URL for an Amazon S3 object, read the \"Query String Request Authentication Alternative\" section of the Authenticating REST Requests topic in the Amazon Simple Storage Service Developer Guide.

    For information about the import manifest referenced by this API action, see VM Import Manifest.

    ", + "DiskImageDescription$Checksum": "

    The checksum computed for the disk image.

    ", + "DiskImageDetail$ImportManifestUrl": "

    A presigned URL for the import manifest stored in Amazon S3 and presented here as an Amazon S3 presigned URL. For information about creating a presigned URL for an Amazon S3 object, read the \"Query String Request Authentication Alternative\" section of the Authenticating REST Requests topic in the Amazon Simple Storage Service Developer Guide.

    For information about the import manifest referenced by this API action, see VM Import Manifest.

    ", + "DiskImageVolumeDescription$Id": "

    The volume identifier.

    ", + "EbsBlockDevice$SnapshotId": "

    The ID of the snapshot.

    ", + "EbsInstanceBlockDevice$VolumeId": "

    The ID of the EBS volume.

    ", + "EbsInstanceBlockDeviceSpecification$VolumeId": "

    The ID of the EBS volume.

    ", + "EnableVgwRoutePropagationRequest$RouteTableId": "

    The ID of the route table.

    ", + "EnableVgwRoutePropagationRequest$GatewayId": "

    The ID of the virtual private gateway.

    ", + "EnableVolumeIORequest$VolumeId": "

    The ID of the volume.

    ", + "EnableVpcClassicLinkDnsSupportRequest$VpcId": "

    The ID of the VPC.

    ", + "EnableVpcClassicLinkRequest$VpcId": "

    The ID of the VPC.

    ", + "EventInformation$InstanceId": "

    The ID of the instance. This information is available only for instanceChange events.

    ", + "EventInformation$EventSubType": "

    The event.

    The following are the error events.

    • iamFleetRoleInvalid - The Spot fleet did not have the required permissions either to launch or terminate an instance.

    • launchSpecTemporarilyBlacklisted - The configuration is not valid and several attempts to launch instances have failed. For more information, see the description of the event.

    • spotFleetRequestConfigurationInvalid - The configuration is not valid. For more information, see the description of the event.

    • spotInstanceCountLimitExceeded - You've reached the limit on the number of Spot instances that you can launch.

    The following are the fleetRequestChange events.

    • active - The Spot fleet has been validated and Amazon EC2 is attempting to maintain the target number of running Spot instances.

    • cancelled - The Spot fleet is canceled and has no running Spot instances. The Spot fleet will be deleted two days after its instances were terminated.

    • cancelled_running - The Spot fleet is canceled and will not launch additional Spot instances, but its existing Spot instances continue to run until they are interrupted or terminated.

    • cancelled_terminating - The Spot fleet is canceled and its Spot instances are terminating.

    • expired - The Spot fleet request has expired. A subsequent event indicates that the instances were terminated, if the request was created with TerminateInstancesWithExpiration set.

    • modify_in_progress - A request to modify the Spot fleet request was accepted and is in progress.

    • modify_successful - The Spot fleet request was modified.

    • price_update - The bid price for a launch configuration was adjusted because it was too high. This change is permanent.

    • submitted - The Spot fleet request is being evaluated and Amazon EC2 is preparing to launch the target number of Spot instances.

    The following are the instanceChange events.

    • launched - A bid was fulfilled and a new instance was launched.

    • terminated - An instance was terminated by the user.

    ", + "EventInformation$EventDescription": "

    The description of the event.

    ", + "ExecutableByStringList$member": null, + "ExportTask$ExportTaskId": "

    The ID of the export task.

    ", + "ExportTask$Description": "

    A description of the resource being exported.

    ", + "ExportTask$StatusMessage": "

    The status message related to the export task.

    ", + "ExportTaskIdStringList$member": null, + "ExportToS3Task$S3Bucket": "

    The S3 bucket for the destination image. The destination bucket must exist and grant WRITE and READ_ACP permissions to the AWS account vm-import-export@amazon.com.

    ", + "ExportToS3Task$S3Key": "

    The encryption key for your S3 bucket.

    ", + "ExportToS3TaskSpecification$S3Bucket": "

    The S3 bucket for the destination image. The destination bucket must exist and grant WRITE and READ_ACP permissions to the AWS account vm-import-export@amazon.com.

    ", + "ExportToS3TaskSpecification$S3Prefix": "

    The image is written to a single object in the S3 bucket at the S3 key s3prefix + exportTaskId + '.' + diskImageFormat.

    ", + "Filter$Name": "

    The name of the filter. Filter names are case-sensitive.

    ", + "FlowLog$FlowLogId": "

    The flow log ID.

    ", + "FlowLog$FlowLogStatus": "

    The status of the flow log (ACTIVE).

    ", + "FlowLog$ResourceId": "

    The ID of the resource on which the flow log was created.

    ", + "FlowLog$LogGroupName": "

    The name of the flow log group.

    ", + "FlowLog$DeliverLogsStatus": "

    The status of the logs delivery (SUCCESS | FAILED).

    ", + "FlowLog$DeliverLogsErrorMessage": "

    Information about the error that occurred. Rate limited indicates that CloudWatch logs throttling has been applied for one or more network interfaces, or that you've reached the limit on the number of CloudWatch Logs log groups that you can create. Access error indicates that the IAM role associated with the flow log does not have sufficient permissions to publish to CloudWatch Logs. Unknown error indicates an internal error.

    ", + "FlowLog$DeliverLogsPermissionArn": "

    The ARN of the IAM role that posts logs to CloudWatch Logs.

    ", + "GetConsoleOutputRequest$InstanceId": "

    The ID of the instance.

    ", + "GetConsoleOutputResult$InstanceId": "

    The ID of the instance.

    ", + "GetConsoleOutputResult$Output": "

    The console output, Base64-encoded. If using a command line tool, the tool decodes the output for you.

    ", + "GetConsoleScreenshotRequest$InstanceId": "

    The ID of the instance.

    ", + "GetConsoleScreenshotResult$InstanceId": "

    The ID of the instance.

    ", + "GetConsoleScreenshotResult$ImageData": "

    The data that comprises the image.

    ", + "GetPasswordDataRequest$InstanceId": "

    The ID of the Windows instance.

    ", + "GetPasswordDataResult$InstanceId": "

    The ID of the Windows instance.

    ", + "GetPasswordDataResult$PasswordData": "

    The password of the instance.

    ", + "GroupIdStringList$member": null, + "GroupIdentifier$GroupName": "

    The name of the security group.

    ", + "GroupIdentifier$GroupId": "

    The ID of the security group.

    ", + "GroupIds$member": null, + "GroupNameStringList$member": null, + "Host$HostId": "

    The ID of the Dedicated host.

    ", + "Host$HostReservationId": "

    The reservation ID of the Dedicated host. This returns a null response if the Dedicated host doesn't have an associated reservation.

    ", + "Host$ClientToken": "

    Unique, case-sensitive identifier you provide to ensure idempotency of the request. For more information, see How to Ensure Idempotency in the Amazon Elastic Compute Cloud User Guide.

    ", + "Host$AvailabilityZone": "

    The Availability Zone of the Dedicated host.

    ", + "HostInstance$InstanceId": "

    the IDs of instances that are running on the Dedicated host.

    ", + "HostInstance$InstanceType": "

    The instance type size (for example, m3.medium) of the running instance.

    ", + "HostProperties$InstanceType": "

    The instance type size that the Dedicated host supports (for example, m3.medium).

    ", + "IamInstanceProfile$Arn": "

    The Amazon Resource Name (ARN) of the instance profile.

    ", + "IamInstanceProfile$Id": "

    The ID of the instance profile.

    ", + "IamInstanceProfileSpecification$Arn": "

    The Amazon Resource Name (ARN) of the instance profile.

    ", + "IamInstanceProfileSpecification$Name": "

    The name of the instance profile.

    ", + "IdFormat$Resource": "

    The type of resource.

    ", + "Image$ImageId": "

    The ID of the AMI.

    ", + "Image$ImageLocation": "

    The location of the AMI.

    ", + "Image$OwnerId": "

    The AWS account ID of the image owner.

    ", + "Image$CreationDate": "

    The date and time the image was created.

    ", + "Image$KernelId": "

    The kernel associated with the image, if any. Only applicable for machine images.

    ", + "Image$RamdiskId": "

    The RAM disk associated with the image, if any. Only applicable for machine images.

    ", + "Image$SriovNetSupport": "

    Specifies whether enhanced networking with the Intel 82599 Virtual Function interface is enabled.

    ", + "Image$ImageOwnerAlias": "

    The AWS account alias (for example, amazon, self) or the AWS account ID of the AMI owner.

    ", + "Image$Name": "

    The name of the AMI that was provided during image creation.

    ", + "Image$Description": "

    The description of the AMI that was provided during image creation.

    ", + "Image$RootDeviceName": "

    The device name of the root device (for example, /dev/sda1 or /dev/xvda).

    ", + "ImageAttribute$ImageId": "

    The ID of the AMI.

    ", + "ImageDiskContainer$Description": "

    The description of the disk image.

    ", + "ImageDiskContainer$Format": "

    The format of the disk image being imported.

    Valid values: RAW | VHD | VMDK | OVA

    ", + "ImageDiskContainer$Url": "

    The URL to the Amazon S3-based disk image being imported. The URL can either be a https URL (https://..) or an Amazon S3 URL (s3://..)

    ", + "ImageDiskContainer$DeviceName": "

    The block device mapping for the disk.

    ", + "ImageDiskContainer$SnapshotId": "

    The ID of the EBS snapshot to be used for importing the snapshot.

    ", + "ImageIdStringList$member": null, + "ImportImageRequest$Description": "

    A description string for the import image task.

    ", + "ImportImageRequest$LicenseType": "

    The license type to be used for the Amazon Machine Image (AMI) after importing.

    Note: You may only use BYOL if you have existing licenses with rights to use these licenses in a third party cloud like AWS. For more information, see VM Import/Export Prerequisites in the Amazon Elastic Compute Cloud User Guide.

    Valid values: AWS | BYOL

    ", + "ImportImageRequest$Hypervisor": "

    The target hypervisor platform.

    Valid values: xen

    ", + "ImportImageRequest$Architecture": "

    The architecture of the virtual machine.

    Valid values: i386 | x86_64

    ", + "ImportImageRequest$Platform": "

    The operating system of the virtual machine.

    Valid values: Windows | Linux

    ", + "ImportImageRequest$ClientToken": "

    The token to enable idempotency for VM import requests.

    ", + "ImportImageRequest$RoleName": "

    The name of the role to use when not using the default role, 'vmimport'.

    ", + "ImportImageResult$ImportTaskId": "

    The task ID of the import image task.

    ", + "ImportImageResult$Architecture": "

    The architecture of the virtual machine.

    ", + "ImportImageResult$LicenseType": "

    The license type of the virtual machine.

    ", + "ImportImageResult$Platform": "

    The operating system of the virtual machine.

    ", + "ImportImageResult$Hypervisor": "

    The target hypervisor of the import task.

    ", + "ImportImageResult$Description": "

    A description of the import task.

    ", + "ImportImageResult$ImageId": "

    The ID of the Amazon Machine Image (AMI) created by the import task.

    ", + "ImportImageResult$Progress": "

    The progress of the task.

    ", + "ImportImageResult$StatusMessage": "

    A detailed status message of the import task.

    ", + "ImportImageResult$Status": "

    A brief status of the task.

    ", + "ImportImageTask$ImportTaskId": "

    The ID of the import image task.

    ", + "ImportImageTask$Architecture": "

    The architecture of the virtual machine.

    Valid values: i386 | x86_64

    ", + "ImportImageTask$LicenseType": "

    The license type of the virtual machine.

    ", + "ImportImageTask$Platform": "

    The description string for the import image task.

    ", + "ImportImageTask$Hypervisor": "

    The target hypervisor for the import task.

    Valid values: xen

    ", + "ImportImageTask$Description": "

    A description of the import task.

    ", + "ImportImageTask$ImageId": "

    The ID of the Amazon Machine Image (AMI) of the imported virtual machine.

    ", + "ImportImageTask$Progress": "

    The percentage of progress of the import image task.

    ", + "ImportImageTask$StatusMessage": "

    A descriptive status message for the import image task.

    ", + "ImportImageTask$Status": "

    A brief status for the import image task.

    ", + "ImportInstanceLaunchSpecification$AdditionalInfo": "

    Reserved.

    ", + "ImportInstanceLaunchSpecification$SubnetId": "

    [EC2-VPC] The ID of the subnet in which to launch the instance.

    ", + "ImportInstanceLaunchSpecification$PrivateIpAddress": "

    [EC2-VPC] An available IP address from the IP address range of the subnet.

    ", + "ImportInstanceRequest$Description": "

    A description for the instance being imported.

    ", + "ImportInstanceTaskDetails$InstanceId": "

    The ID of the instance.

    ", + "ImportInstanceTaskDetails$Description": "

    A description of the task.

    ", + "ImportInstanceVolumeDetailItem$AvailabilityZone": "

    The Availability Zone where the resulting instance will reside.

    ", + "ImportInstanceVolumeDetailItem$Status": "

    The status of the import of this particular disk image.

    ", + "ImportInstanceVolumeDetailItem$StatusMessage": "

    The status information or errors related to the disk image.

    ", + "ImportInstanceVolumeDetailItem$Description": "

    A description of the task.

    ", + "ImportKeyPairRequest$KeyName": "

    A unique name for the key pair.

    ", + "ImportKeyPairResult$KeyName": "

    The key pair name you provided.

    ", + "ImportKeyPairResult$KeyFingerprint": "

    The MD5 public key fingerprint as specified in section 4 of RFC 4716.

    ", + "ImportSnapshotRequest$Description": "

    The description string for the import snapshot task.

    ", + "ImportSnapshotRequest$ClientToken": "

    Token to enable idempotency for VM import requests.

    ", + "ImportSnapshotRequest$RoleName": "

    The name of the role to use when not using the default role, 'vmimport'.

    ", + "ImportSnapshotResult$ImportTaskId": "

    The ID of the import snapshot task.

    ", + "ImportSnapshotResult$Description": "

    A description of the import snapshot task.

    ", + "ImportSnapshotTask$ImportTaskId": "

    The ID of the import snapshot task.

    ", + "ImportSnapshotTask$Description": "

    A description of the import snapshot task.

    ", + "ImportTaskIdList$member": null, + "ImportVolumeRequest$AvailabilityZone": "

    The Availability Zone for the resulting EBS volume.

    ", + "ImportVolumeRequest$Description": "

    A description of the volume.

    ", + "ImportVolumeTaskDetails$AvailabilityZone": "

    The Availability Zone where the resulting volume will reside.

    ", + "ImportVolumeTaskDetails$Description": "

    The description you provided when starting the import volume task.

    ", + "Instance$InstanceId": "

    The ID of the instance.

    ", + "Instance$ImageId": "

    The ID of the AMI used to launch the instance.

    ", + "Instance$PrivateDnsName": "

    The private DNS name assigned to the instance. This DNS name can only be used inside the Amazon EC2 network. This name is not available until the instance enters the running state. For EC2-VPC, this name is only available if you've enabled DNS hostnames for your VPC.

    ", + "Instance$PublicDnsName": "

    The public DNS name assigned to the instance. This name is not available until the instance enters the running state. For EC2-VPC, this name is only available if you've enabled DNS hostnames for your VPC.

    ", + "Instance$StateTransitionReason": "

    The reason for the most recent state transition. This might be an empty string.

    ", + "Instance$KeyName": "

    The name of the key pair, if this instance was launched with an associated key pair.

    ", + "Instance$KernelId": "

    The kernel associated with this instance, if applicable.

    ", + "Instance$RamdiskId": "

    The RAM disk associated with this instance, if applicable.

    ", + "Instance$SubnetId": "

    [EC2-VPC] The ID of the subnet in which the instance is running.

    ", + "Instance$VpcId": "

    [EC2-VPC] The ID of the VPC in which the instance is running.

    ", + "Instance$PrivateIpAddress": "

    The private IP address assigned to the instance.

    ", + "Instance$PublicIpAddress": "

    The public IP address assigned to the instance, if applicable.

    ", + "Instance$RootDeviceName": "

    The root device name (for example, /dev/sda1 or /dev/xvda).

    ", + "Instance$SpotInstanceRequestId": "

    If the request is a Spot instance request, the ID of the request.

    ", + "Instance$ClientToken": "

    The idempotency token you provided when you launched the instance, if applicable.

    ", + "Instance$SriovNetSupport": "

    Specifies whether enhanced networking with the Intel 82599 Virtual Function interface is enabled.

    ", + "InstanceAttribute$InstanceId": "

    The ID of the instance.

    ", + "InstanceBlockDeviceMapping$DeviceName": "

    The device name exposed to the instance (for example, /dev/sdh or xvdh).

    ", + "InstanceBlockDeviceMappingSpecification$DeviceName": "

    The device name exposed to the instance (for example, /dev/sdh or xvdh).

    ", + "InstanceBlockDeviceMappingSpecification$VirtualName": "

    The virtual device name.

    ", + "InstanceBlockDeviceMappingSpecification$NoDevice": "

    suppress the specified device included in the block device mapping.

    ", + "InstanceCapacity$InstanceType": "

    The instance type size supported by the Dedicated host.

    ", + "InstanceExportDetails$InstanceId": "

    The ID of the resource being exported.

    ", + "InstanceIdSet$member": null, + "InstanceIdStringList$member": null, + "InstanceMonitoring$InstanceId": "

    The ID of the instance.

    ", + "InstanceNetworkInterface$NetworkInterfaceId": "

    The ID of the network interface.

    ", + "InstanceNetworkInterface$SubnetId": "

    The ID of the subnet.

    ", + "InstanceNetworkInterface$VpcId": "

    The ID of the VPC.

    ", + "InstanceNetworkInterface$Description": "

    The description.

    ", + "InstanceNetworkInterface$OwnerId": "

    The ID of the AWS account that created the network interface.

    ", + "InstanceNetworkInterface$MacAddress": "

    The MAC address.

    ", + "InstanceNetworkInterface$PrivateIpAddress": "

    The IP address of the network interface within the subnet.

    ", + "InstanceNetworkInterface$PrivateDnsName": "

    The private DNS name.

    ", + "InstanceNetworkInterfaceAssociation$PublicIp": "

    The public IP address or Elastic IP address bound to the network interface.

    ", + "InstanceNetworkInterfaceAssociation$PublicDnsName": "

    The public DNS name.

    ", + "InstanceNetworkInterfaceAssociation$IpOwnerId": "

    The ID of the owner of the Elastic IP address.

    ", + "InstanceNetworkInterfaceAttachment$AttachmentId": "

    The ID of the network interface attachment.

    ", + "InstanceNetworkInterfaceSpecification$NetworkInterfaceId": "

    The ID of the network interface.

    ", + "InstanceNetworkInterfaceSpecification$SubnetId": "

    The ID of the subnet associated with the network string. Applies only if creating a network interface when launching an instance.

    ", + "InstanceNetworkInterfaceSpecification$Description": "

    The description of the network interface. Applies only if creating a network interface when launching an instance.

    ", + "InstanceNetworkInterfaceSpecification$PrivateIpAddress": "

    The private IP address of the network interface. Applies only if creating a network interface when launching an instance.

    ", + "InstancePrivateIpAddress$PrivateIpAddress": "

    The private IP address of the network interface.

    ", + "InstancePrivateIpAddress$PrivateDnsName": "

    The private DNS name.

    ", + "InstanceStateChange$InstanceId": "

    The ID of the instance.

    ", + "InstanceStatus$InstanceId": "

    The ID of the instance.

    ", + "InstanceStatus$AvailabilityZone": "

    The Availability Zone of the instance.

    ", + "InstanceStatusEvent$Description": "

    A description of the event.

    After a scheduled event is completed, it can still be described for up to a week. If the event has been completed, this description starts with the following text: [Completed].

    ", + "InternetGateway$InternetGatewayId": "

    The ID of the Internet gateway.

    ", + "InternetGatewayAttachment$VpcId": "

    The ID of the VPC.

    ", + "IpPermission$IpProtocol": "

    The IP protocol name (for tcp, udp, and icmp) or number (see Protocol Numbers).

    [EC2-VPC only] When you authorize or revoke security group rules, you can use -1 to specify all.

    ", + "IpRange$CidrIp": "

    The CIDR range. You can either specify a CIDR range or a source security group, not both.

    ", + "IpRanges$member": null, + "KeyNameStringList$member": null, + "KeyPair$KeyName": "

    The name of the key pair.

    ", + "KeyPair$KeyFingerprint": "

    The SHA-1 digest of the DER encoded private key.

    ", + "KeyPair$KeyMaterial": "

    An unencrypted PEM encoded RSA private key.

    ", + "KeyPairInfo$KeyName": "

    The name of the key pair.

    ", + "KeyPairInfo$KeyFingerprint": "

    If you used CreateKeyPair to create the key pair, this is the SHA-1 digest of the DER encoded private key. If you used ImportKeyPair to provide AWS the public key, this is the MD5 public key fingerprint as specified in section 4 of RFC4716.

    ", + "LaunchPermission$UserId": "

    The AWS account ID.

    ", + "LaunchSpecification$ImageId": "

    The ID of the AMI.

    ", + "LaunchSpecification$KeyName": "

    The name of the key pair.

    ", + "LaunchSpecification$UserData": "

    The user data to make available to the instances. If you are using an AWS SDK or command line tool, Base64-encoding is performed for you, and you can load the text from a file. Otherwise, you must provide Base64-encoded text.

    ", + "LaunchSpecification$AddressingType": "

    Deprecated.

    ", + "LaunchSpecification$KernelId": "

    The ID of the kernel.

    ", + "LaunchSpecification$RamdiskId": "

    The ID of the RAM disk.

    ", + "LaunchSpecification$SubnetId": "

    The ID of the subnet in which to launch the instance.

    ", + "ModifyIdFormatRequest$Resource": "

    The type of resource.

    ", + "ModifyIdentityIdFormatRequest$Resource": "

    The type of resource.

    ", + "ModifyIdentityIdFormatRequest$PrincipalArn": "

    The ARN of the principal, which can be an IAM user, IAM role, or the root user.

    ", + "ModifyImageAttributeRequest$ImageId": "

    The ID of the AMI.

    ", + "ModifyImageAttributeRequest$Attribute": "

    The name of the attribute to modify.

    ", + "ModifyImageAttributeRequest$Value": "

    The value of the attribute being modified. This is only valid when modifying the description attribute.

    ", + "ModifyInstanceAttributeRequest$InstanceId": "

    The ID of the instance.

    ", + "ModifyInstanceAttributeRequest$Value": "

    A new value for the attribute. Use only with the kernel, ramdisk, userData, disableApiTermination, or instanceInitiatedShutdownBehavior attribute.

    ", + "ModifyInstancePlacementRequest$InstanceId": "

    The ID of the instance that you are modifying.

    ", + "ModifyInstancePlacementRequest$HostId": "

    The ID of the Dedicated host that the instance will have affinity with.

    ", + "ModifyNetworkInterfaceAttributeRequest$NetworkInterfaceId": "

    The ID of the network interface.

    ", + "ModifyReservedInstancesRequest$ClientToken": "

    A unique, case-sensitive token you provide to ensure idempotency of your modification request. For more information, see Ensuring Idempotency.

    ", + "ModifyReservedInstancesResult$ReservedInstancesModificationId": "

    The ID for the modification.

    ", + "ModifySnapshotAttributeRequest$SnapshotId": "

    The ID of the snapshot.

    ", + "ModifySpotFleetRequestRequest$SpotFleetRequestId": "

    The ID of the Spot fleet request.

    ", + "ModifySubnetAttributeRequest$SubnetId": "

    The ID of the subnet.

    ", + "ModifyVolumeAttributeRequest$VolumeId": "

    The ID of the volume.

    ", + "ModifyVpcAttributeRequest$VpcId": "

    The ID of the VPC.

    ", + "ModifyVpcEndpointRequest$VpcEndpointId": "

    The ID of the endpoint.

    ", + "ModifyVpcEndpointRequest$PolicyDocument": "

    A policy document to attach to the endpoint. The policy must be in valid JSON format.

    ", + "ModifyVpcPeeringConnectionOptionsRequest$VpcPeeringConnectionId": "

    The ID of the VPC peering connection.

    ", + "MoveAddressToVpcRequest$PublicIp": "

    The Elastic IP address.

    ", + "MoveAddressToVpcResult$AllocationId": "

    The allocation ID for the Elastic IP address.

    ", + "MovingAddressStatus$PublicIp": "

    The Elastic IP address.

    ", + "NatGateway$VpcId": "

    The ID of the VPC in which the NAT gateway is located.

    ", + "NatGateway$SubnetId": "

    The ID of the subnet in which the NAT gateway is located.

    ", + "NatGateway$NatGatewayId": "

    The ID of the NAT gateway.

    ", + "NatGateway$FailureCode": "

    If the NAT gateway could not be created, specifies the error code for the failure. (InsufficientFreeAddressesInSubnet | Gateway.NotAttached | InvalidAllocationID.NotFound | Resource.AlreadyAssociated | InternalError | InvalidSubnetID.NotFound)

    ", + "NatGateway$FailureMessage": "

    If the NAT gateway could not be created, specifies the error message for the failure, that corresponds to the error code.

    • For InsufficientFreeAddressesInSubnet: \"Subnet has insufficient free addresses to create this NAT gateway\"

    • For Gateway.NotAttached: \"Network vpc-xxxxxxxx has no Internet gateway attached\"

    • For InvalidAllocationID.NotFound: \"Elastic IP address eipalloc-xxxxxxxx could not be associated with this NAT gateway\"

    • For Resource.AlreadyAssociated: \"Elastic IP address eipalloc-xxxxxxxx is already associated\"

    • For InternalError: \"Network interface eni-xxxxxxxx, created and used internally by this NAT gateway is in an invalid state. Please try again.\"

    • For InvalidSubnetID.NotFound: \"The specified subnet subnet-xxxxxxxx does not exist or could not be found.\"

    ", + "NatGatewayAddress$PublicIp": "

    The Elastic IP address associated with the NAT gateway.

    ", + "NatGatewayAddress$AllocationId": "

    The allocation ID of the Elastic IP address that's associated with the NAT gateway.

    ", + "NatGatewayAddress$PrivateIp": "

    The private IP address associated with the Elastic IP address.

    ", + "NatGatewayAddress$NetworkInterfaceId": "

    The ID of the network interface associated with the NAT gateway.

    ", + "NetworkAcl$NetworkAclId": "

    The ID of the network ACL.

    ", + "NetworkAcl$VpcId": "

    The ID of the VPC for the network ACL.

    ", + "NetworkAclAssociation$NetworkAclAssociationId": "

    The ID of the association between a network ACL and a subnet.

    ", + "NetworkAclAssociation$NetworkAclId": "

    The ID of the network ACL.

    ", + "NetworkAclAssociation$SubnetId": "

    The ID of the subnet.

    ", + "NetworkAclEntry$Protocol": "

    The protocol. A value of -1 means all protocols.

    ", + "NetworkAclEntry$CidrBlock": "

    The network range to allow or deny, in CIDR notation.

    ", + "NetworkInterface$NetworkInterfaceId": "

    The ID of the network interface.

    ", + "NetworkInterface$SubnetId": "

    The ID of the subnet.

    ", + "NetworkInterface$VpcId": "

    The ID of the VPC.

    ", + "NetworkInterface$AvailabilityZone": "

    The Availability Zone.

    ", + "NetworkInterface$Description": "

    A description.

    ", + "NetworkInterface$OwnerId": "

    The AWS account ID of the owner of the network interface.

    ", + "NetworkInterface$RequesterId": "

    The ID of the entity that launched the instance on your behalf (for example, AWS Management Console or Auto Scaling).

    ", + "NetworkInterface$MacAddress": "

    The MAC address.

    ", + "NetworkInterface$PrivateIpAddress": "

    The IP address of the network interface within the subnet.

    ", + "NetworkInterface$PrivateDnsName": "

    The private DNS name.

    ", + "NetworkInterfaceAssociation$PublicIp": "

    The address of the Elastic IP address bound to the network interface.

    ", + "NetworkInterfaceAssociation$PublicDnsName": "

    The public DNS name.

    ", + "NetworkInterfaceAssociation$IpOwnerId": "

    The ID of the Elastic IP address owner.

    ", + "NetworkInterfaceAssociation$AllocationId": "

    The allocation ID.

    ", + "NetworkInterfaceAssociation$AssociationId": "

    The association ID.

    ", + "NetworkInterfaceAttachment$AttachmentId": "

    The ID of the network interface attachment.

    ", + "NetworkInterfaceAttachment$InstanceId": "

    The ID of the instance.

    ", + "NetworkInterfaceAttachment$InstanceOwnerId": "

    The AWS account ID of the owner of the instance.

    ", + "NetworkInterfaceAttachmentChanges$AttachmentId": "

    The ID of the network interface attachment.

    ", + "NetworkInterfaceIdList$member": null, + "NetworkInterfacePrivateIpAddress$PrivateIpAddress": "

    The private IP address.

    ", + "NetworkInterfacePrivateIpAddress$PrivateDnsName": "

    The private DNS name.

    ", + "NewDhcpConfiguration$Key": null, + "OwnerStringList$member": null, + "Placement$AvailabilityZone": "

    The Availability Zone of the instance.

    ", + "Placement$GroupName": "

    The name of the placement group the instance is in (for cluster compute instances).

    ", + "Placement$HostId": "

    The ID of the Dedicted host on which the instance resides. This parameter is not support for the ImportInstance command.

    ", + "Placement$Affinity": "

    The affinity setting for the instance on the Dedicated host. This parameter is not supported for the ImportInstance command.

    ", + "PlacementGroup$GroupName": "

    The name of the placement group.

    ", + "PlacementGroupStringList$member": null, + "PrefixList$PrefixListId": "

    The ID of the prefix.

    ", + "PrefixList$PrefixListName": "

    The name of the prefix.

    ", + "PrefixListId$PrefixListId": "

    The ID of the prefix.

    ", + "PrefixListIdSet$member": null, + "PrivateIpAddressSpecification$PrivateIpAddress": "

    The private IP addresses.

    ", + "PrivateIpAddressStringList$member": null, + "ProductCode$ProductCodeId": "

    The product code.

    ", + "ProductCodeStringList$member": null, + "ProductDescriptionList$member": null, + "PropagatingVgw$GatewayId": "

    The ID of the virtual private gateway (VGW).

    ", + "ProvisionedBandwidth$Provisioned": "

    Reserved. If you need to sustain traffic greater than the documented limits, contact us through the Support Center.

    ", + "ProvisionedBandwidth$Requested": "

    Reserved. If you need to sustain traffic greater than the documented limits, contact us through the Support Center.

    ", + "ProvisionedBandwidth$Status": "

    Reserved. If you need to sustain traffic greater than the documented limits, contact us through the Support Center.

    ", + "PublicIpStringList$member": null, + "PurchaseRequest$PurchaseToken": "

    The purchase token.

    ", + "PurchaseReservedInstancesOfferingRequest$ReservedInstancesOfferingId": "

    The ID of the Reserved Instance offering to purchase.

    ", + "PurchaseReservedInstancesOfferingResult$ReservedInstancesId": "

    The IDs of the purchased Reserved Instances.

    ", + "PurchaseScheduledInstancesRequest$ClientToken": "

    Unique, case-sensitive identifier that ensures the idempotency of the request. For more information, see Ensuring Idempotency.

    ", + "Region$RegionName": "

    The name of the region.

    ", + "Region$Endpoint": "

    The region service endpoint.

    ", + "RegionNameStringList$member": null, + "RegisterImageRequest$ImageLocation": "

    The full path to your AMI manifest in Amazon S3 storage.

    ", + "RegisterImageRequest$Name": "

    A name for your AMI.

    Constraints: 3-128 alphanumeric characters, parentheses (()), square brackets ([]), spaces ( ), periods (.), slashes (/), dashes (-), single quotes ('), at-signs (@), or underscores(_)

    ", + "RegisterImageRequest$Description": "

    A description for your AMI.

    ", + "RegisterImageRequest$KernelId": "

    The ID of the kernel.

    ", + "RegisterImageRequest$RamdiskId": "

    The ID of the RAM disk.

    ", + "RegisterImageRequest$RootDeviceName": "

    The name of the root device (for example, /dev/sda1, or /dev/xvda).

    ", + "RegisterImageRequest$VirtualizationType": "

    The type of virtualization.

    Default: paravirtual

    ", + "RegisterImageRequest$SriovNetSupport": "

    Set to simple to enable enhanced networking with the Intel 82599 Virtual Function interface for the AMI and any instances that you launch from the AMI.

    There is no way to disable sriovNetSupport at this time.

    This option is supported only for HVM AMIs. Specifying this option with a PV AMI can make instances launched from the AMI unreachable.

    ", + "RegisterImageResult$ImageId": "

    The ID of the newly registered AMI.

    ", + "RejectVpcPeeringConnectionRequest$VpcPeeringConnectionId": "

    The ID of the VPC peering connection.

    ", + "ReleaseAddressRequest$PublicIp": "

    [EC2-Classic] The Elastic IP address. Required for EC2-Classic.

    ", + "ReleaseAddressRequest$AllocationId": "

    [EC2-VPC] The allocation ID. Required for EC2-VPC.

    ", + "ReplaceNetworkAclAssociationRequest$AssociationId": "

    The ID of the current association between the original network ACL and the subnet.

    ", + "ReplaceNetworkAclAssociationRequest$NetworkAclId": "

    The ID of the new network ACL to associate with the subnet.

    ", + "ReplaceNetworkAclAssociationResult$NewAssociationId": "

    The ID of the new association.

    ", + "ReplaceNetworkAclEntryRequest$NetworkAclId": "

    The ID of the ACL.

    ", + "ReplaceNetworkAclEntryRequest$Protocol": "

    The IP protocol. You can specify all or -1 to mean all protocols.

    ", + "ReplaceNetworkAclEntryRequest$CidrBlock": "

    The network range to allow or deny, in CIDR notation.

    ", + "ReplaceRouteRequest$RouteTableId": "

    The ID of the route table.

    ", + "ReplaceRouteRequest$DestinationCidrBlock": "

    The CIDR address block used for the destination match. The value you provide must match the CIDR of an existing route in the table.

    ", + "ReplaceRouteRequest$GatewayId": "

    The ID of an Internet gateway or virtual private gateway.

    ", + "ReplaceRouteRequest$InstanceId": "

    The ID of a NAT instance in your VPC.

    ", + "ReplaceRouteRequest$NetworkInterfaceId": "

    The ID of a network interface.

    ", + "ReplaceRouteRequest$VpcPeeringConnectionId": "

    The ID of a VPC peering connection.

    ", + "ReplaceRouteRequest$NatGatewayId": "

    The ID of a NAT gateway.

    ", + "ReplaceRouteTableAssociationRequest$AssociationId": "

    The association ID.

    ", + "ReplaceRouteTableAssociationRequest$RouteTableId": "

    The ID of the new route table to associate with the subnet.

    ", + "ReplaceRouteTableAssociationResult$NewAssociationId": "

    The ID of the new association.

    ", + "ReportInstanceStatusRequest$Description": "

    Descriptive text about the health state of your instance.

    ", + "RequestHostIdList$member": null, + "RequestSpotFleetResponse$SpotFleetRequestId": "

    The ID of the Spot fleet request.

    ", + "RequestSpotInstancesRequest$SpotPrice": "

    The maximum hourly price (bid) for any Spot instance launched to fulfill the request.

    ", + "RequestSpotInstancesRequest$ClientToken": "

    Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see How to Ensure Idempotency in the Amazon Elastic Compute Cloud User Guide.

    ", + "RequestSpotInstancesRequest$LaunchGroup": "

    The instance launch group. Launch groups are Spot instances that launch together and terminate together.

    Default: Instances are launched and terminated individually

    ", + "RequestSpotInstancesRequest$AvailabilityZoneGroup": "

    The user-specified name for a logical grouping of bids.

    When you specify an Availability Zone group in a Spot Instance request, all Spot instances in the request are launched in the same Availability Zone. Instance proximity is maintained with this parameter, but the choice of Availability Zone is not. The group applies only to bids for Spot Instances of the same instance type. Any additional Spot instance requests that are specified with the same Availability Zone group name are launched in that same Availability Zone, as long as at least one instance from the group is still active.

    If there is no active instance running in the Availability Zone group that you specify for a new Spot instance request (all instances are terminated, the bid is expired, or the bid falls below current market), then Amazon EC2 launches the instance in any Availability Zone where the constraint can be met. Consequently, the subsequent set of Spot instances could be placed in a different zone from the original request, even if you specified the same Availability Zone group.

    Default: Instances are launched in any available Availability Zone.

    ", + "RequestSpotLaunchSpecification$ImageId": "

    The ID of the AMI.

    ", + "RequestSpotLaunchSpecification$KeyName": "

    The name of the key pair.

    ", + "RequestSpotLaunchSpecification$UserData": "

    The user data to make available to the instances. If you are using an AWS SDK or command line tool, Base64-encoding is performed for you, and you can load the text from a file. Otherwise, you must provide Base64-encoded text.

    ", + "RequestSpotLaunchSpecification$AddressingType": "

    Deprecated.

    ", + "RequestSpotLaunchSpecification$KernelId": "

    The ID of the kernel.

    ", + "RequestSpotLaunchSpecification$RamdiskId": "

    The ID of the RAM disk.

    ", + "RequestSpotLaunchSpecification$SubnetId": "

    The ID of the subnet in which to launch the instance.

    ", + "Reservation$ReservationId": "

    The ID of the reservation.

    ", + "Reservation$OwnerId": "

    The ID of the AWS account that owns the reservation.

    ", + "Reservation$RequesterId": "

    The ID of the requester that launched the instances on your behalf (for example, AWS Management Console or Auto Scaling).

    ", + "ReservedInstances$ReservedInstancesId": "

    The ID of the Reserved Instance.

    ", + "ReservedInstances$AvailabilityZone": "

    The Availability Zone in which the Reserved Instance can be used.

    ", + "ReservedInstancesConfiguration$AvailabilityZone": "

    The Availability Zone for the modified Reserved Instances.

    ", + "ReservedInstancesConfiguration$Platform": "

    The network platform of the modified Reserved Instances, which is either EC2-Classic or EC2-VPC.

    ", + "ReservedInstancesId$ReservedInstancesId": "

    The ID of the Reserved Instance.

    ", + "ReservedInstancesIdStringList$member": null, + "ReservedInstancesListing$ReservedInstancesListingId": "

    The ID of the Reserved Instance listing.

    ", + "ReservedInstancesListing$ReservedInstancesId": "

    The ID of the Reserved Instance.

    ", + "ReservedInstancesListing$StatusMessage": "

    The reason for the current status of the Reserved Instance listing. The response can be blank.

    ", + "ReservedInstancesListing$ClientToken": "

    A unique, case-sensitive key supplied by the client to ensure that the request is idempotent. For more information, see Ensuring Idempotency.

    ", + "ReservedInstancesModification$ReservedInstancesModificationId": "

    A unique ID for the Reserved Instance modification.

    ", + "ReservedInstancesModification$Status": "

    The status of the Reserved Instances modification request.

    ", + "ReservedInstancesModification$StatusMessage": "

    The reason for the status.

    ", + "ReservedInstancesModification$ClientToken": "

    A unique, case-sensitive key supplied by the client to ensure that the request is idempotent. For more information, see Ensuring Idempotency.

    ", + "ReservedInstancesModificationIdStringList$member": null, + "ReservedInstancesModificationResult$ReservedInstancesId": "

    The ID for the Reserved Instances that were created as part of the modification request. This field is only available when the modification is fulfilled.

    ", + "ReservedInstancesOffering$ReservedInstancesOfferingId": "

    The ID of the Reserved Instance offering.

    ", + "ReservedInstancesOffering$AvailabilityZone": "

    The Availability Zone in which the Reserved Instance can be used.

    ", + "ReservedInstancesOfferingIdStringList$member": null, + "ResetImageAttributeRequest$ImageId": "

    The ID of the AMI.

    ", + "ResetInstanceAttributeRequest$InstanceId": "

    The ID of the instance.

    ", + "ResetNetworkInterfaceAttributeRequest$NetworkInterfaceId": "

    The ID of the network interface.

    ", + "ResetNetworkInterfaceAttributeRequest$SourceDestCheck": "

    The source/destination checking attribute. Resets the value to true.

    ", + "ResetSnapshotAttributeRequest$SnapshotId": "

    The ID of the snapshot.

    ", + "ResourceIdList$member": null, + "ResponseHostIdList$member": null, + "RestorableByStringList$member": null, + "RestoreAddressToClassicRequest$PublicIp": "

    The Elastic IP address.

    ", + "RestoreAddressToClassicResult$PublicIp": "

    The Elastic IP address.

    ", + "RevokeSecurityGroupEgressRequest$GroupId": "

    The ID of the security group.

    ", + "RevokeSecurityGroupEgressRequest$SourceSecurityGroupName": "

    The name of a destination security group. To revoke outbound access to a destination security group, we recommend that you use a set of IP permissions instead.

    ", + "RevokeSecurityGroupEgressRequest$SourceSecurityGroupOwnerId": "

    The AWS account number for a destination security group. To revoke outbound access to a destination security group, we recommend that you use a set of IP permissions instead.

    ", + "RevokeSecurityGroupEgressRequest$IpProtocol": "

    The IP protocol name or number. We recommend that you specify the protocol in a set of IP permissions instead.

    ", + "RevokeSecurityGroupEgressRequest$CidrIp": "

    The CIDR IP address range. We recommend that you specify the CIDR range in a set of IP permissions instead.

    ", + "RevokeSecurityGroupIngressRequest$GroupName": "

    [EC2-Classic, default VPC] The name of the security group.

    ", + "RevokeSecurityGroupIngressRequest$GroupId": "

    The ID of the security group. Required for a security group in a nondefault VPC.

    ", + "RevokeSecurityGroupIngressRequest$SourceSecurityGroupName": "

    [EC2-Classic, default VPC] The name of the source security group. You can't specify this parameter in combination with the following parameters: the CIDR IP address range, the start of the port range, the IP protocol, and the end of the port range. For EC2-VPC, the source security group must be in the same VPC. To revoke a specific rule for an IP protocol and port range, use a set of IP permissions instead.

    ", + "RevokeSecurityGroupIngressRequest$SourceSecurityGroupOwnerId": "

    [EC2-Classic] The AWS account ID of the source security group, if the source security group is in a different account. You can't specify this parameter in combination with the following parameters: the CIDR IP address range, the IP protocol, the start of the port range, and the end of the port range. To revoke a specific rule for an IP protocol and port range, use a set of IP permissions instead.

    ", + "RevokeSecurityGroupIngressRequest$IpProtocol": "

    The IP protocol name (tcp, udp, icmp) or number (see Protocol Numbers). Use -1 to specify all.

    ", + "RevokeSecurityGroupIngressRequest$CidrIp": "

    The CIDR IP address range. You can't specify this parameter when specifying a source security group.

    ", + "Route$DestinationCidrBlock": "

    The CIDR block used for the destination match.

    ", + "Route$DestinationPrefixListId": "

    The prefix of the AWS service.

    ", + "Route$GatewayId": "

    The ID of a gateway attached to your VPC.

    ", + "Route$InstanceId": "

    The ID of a NAT instance in your VPC.

    ", + "Route$InstanceOwnerId": "

    The AWS account ID of the owner of the instance.

    ", + "Route$NetworkInterfaceId": "

    The ID of the network interface.

    ", + "Route$VpcPeeringConnectionId": "

    The ID of the VPC peering connection.

    ", + "Route$NatGatewayId": "

    The ID of a NAT gateway.

    ", + "RouteTable$RouteTableId": "

    The ID of the route table.

    ", + "RouteTable$VpcId": "

    The ID of the VPC.

    ", + "RouteTableAssociation$RouteTableAssociationId": "

    The ID of the association between a route table and a subnet.

    ", + "RouteTableAssociation$RouteTableId": "

    The ID of the route table.

    ", + "RouteTableAssociation$SubnetId": "

    The ID of the subnet. A subnet ID is not returned for an implicit association.

    ", + "RunInstancesRequest$ImageId": "

    The ID of the AMI, which you can get by calling DescribeImages.

    ", + "RunInstancesRequest$KeyName": "

    The name of the key pair. You can create a key pair using CreateKeyPair or ImportKeyPair.

    If you do not specify a key pair, you can't connect to the instance unless you choose an AMI that is configured to allow users another way to log in.

    ", + "RunInstancesRequest$UserData": "

    The user data to make available to the instance. For more information, see Running Commands on Your Linux Instance at Launch (Linux) and Adding User Data (Windows). If you are using an AWS SDK or command line tool, Base64-encoding is performed for you, and you can load the text from a file. Otherwise, you must provide Base64-encoded text.

    ", + "RunInstancesRequest$KernelId": "

    The ID of the kernel.

    We recommend that you use PV-GRUB instead of kernels and RAM disks. For more information, see PV-GRUB in the Amazon Elastic Compute Cloud User Guide.

    ", + "RunInstancesRequest$RamdiskId": "

    The ID of the RAM disk.

    We recommend that you use PV-GRUB instead of kernels and RAM disks. For more information, see PV-GRUB in the Amazon Elastic Compute Cloud User Guide.

    ", + "RunInstancesRequest$SubnetId": "

    [EC2-VPC] The ID of the subnet to launch the instance into.

    ", + "RunInstancesRequest$PrivateIpAddress": "

    [EC2-VPC] The primary IP address. You must specify a value from the IP address range of the subnet.

    Only one private IP address can be designated as primary. Therefore, you can't specify this parameter if PrivateIpAddresses.n.Primary is set to true and PrivateIpAddresses.n.PrivateIpAddress is set to an IP address.

    Default: We select an IP address from the IP address range of the subnet.

    ", + "RunInstancesRequest$ClientToken": "

    Unique, case-sensitive identifier you provide to ensure the idempotency of the request. For more information, see Ensuring Idempotency.

    Constraints: Maximum 64 ASCII characters

    ", + "RunInstancesRequest$AdditionalInfo": "

    Reserved.

    ", + "RunScheduledInstancesRequest$ClientToken": "

    Unique, case-sensitive identifier that ensures the idempotency of the request. For more information, see Ensuring Idempotency.

    ", + "RunScheduledInstancesRequest$ScheduledInstanceId": "

    The Scheduled Instance ID.

    ", + "S3Storage$Bucket": "

    The bucket in which to store the AMI. You can specify a bucket that you already own or a new bucket that Amazon EC2 creates on your behalf. If you specify a bucket that belongs to someone else, Amazon EC2 returns an error.

    ", + "S3Storage$Prefix": "

    The beginning of the file name of the AMI.

    ", + "S3Storage$AWSAccessKeyId": "

    The access key ID of the owner of the bucket. Before you specify a value for your access key ID, review and follow the guidance in Best Practices for Managing AWS Access Keys.

    ", + "S3Storage$UploadPolicySignature": "

    The signature of the JSON document.

    ", + "ScheduledInstance$ScheduledInstanceId": "

    The Scheduled Instance ID.

    ", + "ScheduledInstance$InstanceType": "

    The instance type.

    ", + "ScheduledInstance$Platform": "

    The platform (Linux/UNIX or Windows).

    ", + "ScheduledInstance$NetworkPlatform": "

    The network platform (EC2-Classic or EC2-VPC).

    ", + "ScheduledInstance$AvailabilityZone": "

    The Availability Zone.

    ", + "ScheduledInstance$HourlyPrice": "

    The hourly price for a single instance.

    ", + "ScheduledInstanceAvailability$InstanceType": "

    The instance type. You can specify one of the C3, C4, M4, or R3 instance types.

    ", + "ScheduledInstanceAvailability$Platform": "

    The platform (Linux/UNIX or Windows).

    ", + "ScheduledInstanceAvailability$NetworkPlatform": "

    The network platform (EC2-Classic or EC2-VPC).

    ", + "ScheduledInstanceAvailability$AvailabilityZone": "

    The Availability Zone.

    ", + "ScheduledInstanceAvailability$PurchaseToken": "

    The purchase token. This token expires in two hours.

    ", + "ScheduledInstanceAvailability$HourlyPrice": "

    The hourly price for a single instance.

    ", + "ScheduledInstanceIdRequestSet$member": null, + "ScheduledInstanceRecurrence$Frequency": "

    The frequency (Daily, Weekly, or Monthly).

    ", + "ScheduledInstanceRecurrence$OccurrenceUnit": "

    The unit for occurrenceDaySet (DayOfWeek or DayOfMonth).

    ", + "ScheduledInstanceRecurrenceRequest$Frequency": "

    The frequency (Daily, Weekly, or Monthly).

    ", + "ScheduledInstanceRecurrenceRequest$OccurrenceUnit": "

    The unit for OccurrenceDays (DayOfWeek or DayOfMonth). This value is required for a monthly schedule. You can't specify DayOfWeek with a weekly schedule. You can't specify this value with a daily schedule.

    ", + "ScheduledInstancesBlockDeviceMapping$DeviceName": "

    The device name exposed to the instance (for example, /dev/sdh or xvdh).

    ", + "ScheduledInstancesBlockDeviceMapping$NoDevice": "

    Suppresses the specified device included in the block device mapping of the AMI.

    ", + "ScheduledInstancesBlockDeviceMapping$VirtualName": "

    The virtual device name (ephemeralN). Instance store volumes are numbered starting from 0. An instance type with two available instance store volumes can specify mappings for ephemeral0 and ephemeral1.The number of available instance store volumes depends on the instance type. After you connect to the instance, you must mount the volume.

    Constraints: For M3 instances, you must specify instance store volumes in the block device mapping for the instance. When you launch an M3 instance, we ignore any instance store volumes specified in the block device mapping for the AMI.

    ", + "ScheduledInstancesEbs$SnapshotId": "

    The ID of the snapshot.

    ", + "ScheduledInstancesEbs$VolumeType": "

    The volume type. gp2 for General Purpose SSD, io1 for Provisioned IOPS SSD, Throughput Optimized HDD for st1, Cold HDD for sc1, or standard for Magnetic.

    Default: standard

    ", + "ScheduledInstancesIamInstanceProfile$Arn": "

    The Amazon Resource Name (ARN).

    ", + "ScheduledInstancesIamInstanceProfile$Name": "

    The name.

    ", + "ScheduledInstancesLaunchSpecification$ImageId": "

    The ID of the Amazon Machine Image (AMI).

    ", + "ScheduledInstancesLaunchSpecification$KeyName": "

    The name of the key pair.

    ", + "ScheduledInstancesLaunchSpecification$UserData": "

    The base64-encoded MIME user data.

    ", + "ScheduledInstancesLaunchSpecification$KernelId": "

    The ID of the kernel.

    ", + "ScheduledInstancesLaunchSpecification$InstanceType": "

    The instance type.

    ", + "ScheduledInstancesLaunchSpecification$RamdiskId": "

    The ID of the RAM disk.

    ", + "ScheduledInstancesLaunchSpecification$SubnetId": "

    The ID of the subnet in which to launch the instances.

    ", + "ScheduledInstancesNetworkInterface$NetworkInterfaceId": "

    The ID of the network interface.

    ", + "ScheduledInstancesNetworkInterface$SubnetId": "

    The ID of the subnet.

    ", + "ScheduledInstancesNetworkInterface$Description": "

    The description.

    ", + "ScheduledInstancesNetworkInterface$PrivateIpAddress": "

    The IP address of the network interface within the subnet.

    ", + "ScheduledInstancesPlacement$AvailabilityZone": "

    The Availability Zone.

    ", + "ScheduledInstancesPlacement$GroupName": "

    The name of the placement group.

    ", + "ScheduledInstancesPrivateIpAddressConfig$PrivateIpAddress": "

    The IP address.

    ", + "ScheduledInstancesSecurityGroupIdSet$member": null, + "SecurityGroup$OwnerId": "

    The AWS account ID of the owner of the security group.

    ", + "SecurityGroup$GroupName": "

    The name of the security group.

    ", + "SecurityGroup$GroupId": "

    The ID of the security group.

    ", + "SecurityGroup$Description": "

    A description of the security group.

    ", + "SecurityGroup$VpcId": "

    [EC2-VPC] The ID of the VPC for the security group.

    ", + "SecurityGroupIdStringList$member": null, + "SecurityGroupReference$GroupId": "

    The ID of your security group.

    ", + "SecurityGroupReference$ReferencingVpcId": "

    The ID of the VPC with the referencing security group.

    ", + "SecurityGroupReference$VpcPeeringConnectionId": "

    The ID of the VPC peering connection.

    ", + "SecurityGroupStringList$member": null, + "Snapshot$SnapshotId": "

    The ID of the snapshot. Each snapshot receives a unique identifier when it is created.

    ", + "Snapshot$VolumeId": "

    The ID of the volume that was used to create the snapshot.

    ", + "Snapshot$StateMessage": "

    Encrypted Amazon EBS snapshots are copied asynchronously. If a snapshot copy operation fails (for example, if the proper AWS Key Management Service (AWS KMS) permissions are not obtained) this field displays error state details to help you diagnose why the error occurred. This parameter is only returned by the DescribeSnapshots API operation.

    ", + "Snapshot$Progress": "

    The progress of the snapshot, as a percentage.

    ", + "Snapshot$OwnerId": "

    The AWS account ID of the EBS snapshot owner.

    ", + "Snapshot$Description": "

    The description for the snapshot.

    ", + "Snapshot$OwnerAlias": "

    The AWS account alias (for example, amazon, self) or AWS account ID that owns the snapshot.

    ", + "Snapshot$KmsKeyId": "

    The full ARN of the AWS Key Management Service (AWS KMS) customer master key (CMK) that was used to protect the volume encryption key for the parent volume.

    ", + "Snapshot$DataEncryptionKeyId": "

    The data encryption key identifier for the snapshot. This value is a unique identifier that corresponds to the data encryption key that was used to encrypt the original volume or snapshot copy. Because data encryption keys are inherited by volumes created from snapshots, and vice versa, if snapshots share the same data encryption key identifier, then they belong to the same volume/snapshot lineage. This parameter is only returned by the DescribeSnapshots API operation.

    ", + "SnapshotDetail$Description": "

    A description for the snapshot.

    ", + "SnapshotDetail$Format": "

    The format of the disk image from which the snapshot is created.

    ", + "SnapshotDetail$Url": "

    The URL used to access the disk image.

    ", + "SnapshotDetail$DeviceName": "

    The block device mapping for the snapshot.

    ", + "SnapshotDetail$SnapshotId": "

    The snapshot ID of the disk being imported.

    ", + "SnapshotDetail$Progress": "

    The percentage of progress for the task.

    ", + "SnapshotDetail$StatusMessage": "

    A detailed status message for the snapshot creation.

    ", + "SnapshotDetail$Status": "

    A brief status of the snapshot creation.

    ", + "SnapshotDiskContainer$Description": "

    The description of the disk image being imported.

    ", + "SnapshotDiskContainer$Format": "

    The format of the disk image being imported.

    Valid values: RAW | VHD | VMDK | OVA

    ", + "SnapshotDiskContainer$Url": "

    The URL to the Amazon S3-based disk image being imported. It can either be a https URL (https://..) or an Amazon S3 URL (s3://..).

    ", + "SnapshotIdStringList$member": null, + "SnapshotTaskDetail$Description": "

    The description of the snapshot.

    ", + "SnapshotTaskDetail$Format": "

    The format of the disk image from which the snapshot is created.

    ", + "SnapshotTaskDetail$Url": "

    The URL of the disk image from which the snapshot is created.

    ", + "SnapshotTaskDetail$SnapshotId": "

    The snapshot ID of the disk being imported.

    ", + "SnapshotTaskDetail$Progress": "

    The percentage of completion for the import snapshot task.

    ", + "SnapshotTaskDetail$StatusMessage": "

    A detailed status message for the import snapshot task.

    ", + "SnapshotTaskDetail$Status": "

    A brief status for the import snapshot task.

    ", + "SpotDatafeedSubscription$OwnerId": "

    The AWS account ID of the account.

    ", + "SpotDatafeedSubscription$Bucket": "

    The Amazon S3 bucket where the Spot instance data feed is located.

    ", + "SpotDatafeedSubscription$Prefix": "

    The prefix that is prepended to data feed files.

    ", + "SpotFleetLaunchSpecification$ImageId": "

    The ID of the AMI.

    ", + "SpotFleetLaunchSpecification$KeyName": "

    The name of the key pair.

    ", + "SpotFleetLaunchSpecification$UserData": "

    The user data to make available to the instances. If you are using an AWS SDK or command line tool, Base64-encoding is performed for you, and you can load the text from a file. Otherwise, you must provide Base64-encoded text.

    ", + "SpotFleetLaunchSpecification$AddressingType": "

    Deprecated.

    ", + "SpotFleetLaunchSpecification$KernelId": "

    The ID of the kernel.

    ", + "SpotFleetLaunchSpecification$RamdiskId": "

    The ID of the RAM disk.

    ", + "SpotFleetLaunchSpecification$SubnetId": "

    The ID of the subnet in which to launch the instances. To specify multiple subnets, separate them using commas; for example, \"subnet-a61dafcf, subnet-65ea5f08\".

    ", + "SpotFleetLaunchSpecification$SpotPrice": "

    The bid price per unit hour for the specified instance type. If this value is not specified, the default is the Spot bid price specified for the fleet. To determine the bid price per unit hour, divide the Spot bid price by the value of WeightedCapacity.

    ", + "SpotFleetRequestConfig$SpotFleetRequestId": "

    The ID of the Spot fleet request.

    ", + "SpotFleetRequestConfigData$ClientToken": "

    A unique, case-sensitive identifier you provide to ensure idempotency of your listings. This helps avoid duplicate listings. For more information, see Ensuring Idempotency.

    ", + "SpotFleetRequestConfigData$SpotPrice": "

    The bid price per unit hour.

    ", + "SpotFleetRequestConfigData$IamFleetRole": "

    Grants the Spot fleet permission to terminate Spot instances on your behalf when you cancel its Spot fleet request using CancelSpotFleetRequests or when the Spot fleet request expires, if you set terminateInstancesWithExpiration.

    ", + "SpotInstanceRequest$SpotInstanceRequestId": "

    The ID of the Spot instance request.

    ", + "SpotInstanceRequest$SpotPrice": "

    The maximum hourly price (bid) for the Spot instance launched to fulfill the request.

    ", + "SpotInstanceRequest$LaunchGroup": "

    The instance launch group. Launch groups are Spot instances that launch together and terminate together.

    ", + "SpotInstanceRequest$AvailabilityZoneGroup": "

    The Availability Zone group. If you specify the same Availability Zone group for all Spot instance requests, all Spot instances are launched in the same Availability Zone.

    ", + "SpotInstanceRequest$InstanceId": "

    The instance ID, if an instance has been launched to fulfill the Spot instance request.

    ", + "SpotInstanceRequest$ActualBlockHourlyPrice": "

    If you specified a duration and your Spot instance request was fulfilled, this is the fixed hourly price in effect for the Spot instance while it runs.

    ", + "SpotInstanceRequest$LaunchedAvailabilityZone": "

    The Availability Zone in which the bid is launched.

    ", + "SpotInstanceRequestIdList$member": null, + "SpotInstanceStateFault$Code": "

    The reason code for the Spot instance state change.

    ", + "SpotInstanceStateFault$Message": "

    The message for the Spot instance state change.

    ", + "SpotInstanceStatus$Code": "

    The status code. For a list of status codes, see Spot Bid Status Codes in the Amazon Elastic Compute Cloud User Guide.

    ", + "SpotInstanceStatus$Message": "

    The description for the status code.

    ", + "SpotPlacement$AvailabilityZone": "

    The Availability Zone.

    [Spot fleet only] To specify multiple Availability Zones, separate them using commas; for example, \"us-west-2a, us-west-2b\".

    ", + "SpotPlacement$GroupName": "

    The name of the placement group (for cluster instances).

    ", + "SpotPrice$SpotPrice": "

    The maximum price (bid) that you are willing to pay for a Spot instance.

    ", + "SpotPrice$AvailabilityZone": "

    The Availability Zone.

    ", + "StaleIpPermission$IpProtocol": "

    The IP protocol name (for tcp, udp, and icmp) or number (see Protocol Numbers).

    ", + "StaleSecurityGroup$GroupId": "

    The ID of the security group.

    ", + "StaleSecurityGroup$GroupName": "

    The name of the security group.

    ", + "StaleSecurityGroup$Description": "

    The description of the security group.

    ", + "StaleSecurityGroup$VpcId": "

    The ID of the VPC for the security group.

    ", + "StartInstancesRequest$AdditionalInfo": "

    Reserved.

    ", + "StateReason$Code": "

    The reason code for the state change.

    ", + "StateReason$Message": "

    The message for the state change.

    • Server.SpotInstanceTermination: A Spot instance was terminated due to an increase in the market price.

    • Server.InternalError: An internal error occurred during instance launch, resulting in termination.

    • Server.InsufficientInstanceCapacity: There was insufficient instance capacity to satisfy the launch request.

    • Client.InternalError: A client error caused the instance to terminate on launch.

    • Client.InstanceInitiatedShutdown: The instance was shut down using the shutdown -h command from the instance.

    • Client.UserInitiatedShutdown: The instance was shut down using the Amazon EC2 API.

    • Client.VolumeLimitExceeded: The limit on the number of EBS volumes or total storage was exceeded. Decrease usage or request an increase in your limits.

    • Client.InvalidSnapshot.NotFound: The specified snapshot was not found.

    ", + "Subnet$SubnetId": "

    The ID of the subnet.

    ", + "Subnet$VpcId": "

    The ID of the VPC the subnet is in.

    ", + "Subnet$CidrBlock": "

    The CIDR block assigned to the subnet.

    ", + "Subnet$AvailabilityZone": "

    The Availability Zone of the subnet.

    ", + "SubnetIdStringList$member": null, + "Tag$Key": "

    The key of the tag.

    Constraints: Tag keys are case-sensitive and accept a maximum of 127 Unicode characters. May not begin with aws:

    ", + "Tag$Value": "

    The value of the tag.

    Constraints: Tag values are case-sensitive and accept a maximum of 255 Unicode characters.

    ", + "TagDescription$ResourceId": "

    The ID of the resource. For example, ami-1a2b3c4d.

    ", + "TagDescription$Key": "

    The tag key.

    ", + "TagDescription$Value": "

    The tag value.

    ", + "UnassignPrivateIpAddressesRequest$NetworkInterfaceId": "

    The ID of the network interface.

    ", + "UnsuccessfulItem$ResourceId": "

    The ID of the resource.

    ", + "UnsuccessfulItemError$Code": "

    The error code.

    ", + "UnsuccessfulItemError$Message": "

    The error message accompanying the error code.

    ", + "UserBucket$S3Bucket": "

    The name of the S3 bucket where the disk image is located.

    ", + "UserBucket$S3Key": "

    The file name of the disk image.

    ", + "UserBucketDetails$S3Bucket": "

    The S3 bucket from which the disk image was created.

    ", + "UserBucketDetails$S3Key": "

    The file name of the disk image.

    ", + "UserData$Data": "

    The user data. If you are using an AWS SDK or command line tool, Base64-encoding is performed for you, and you can load the text from a file. Otherwise, you must provide Base64-encoded text.

    ", + "UserGroupStringList$member": null, + "UserIdGroupPair$UserId": "

    The ID of an AWS account. For a referenced security group in another VPC, the account ID of the referenced security group is returned.

    [EC2-Classic] Required when adding or removing rules that reference a security group in another AWS account.

    ", + "UserIdGroupPair$GroupName": "

    The name of the security group. In a request, use this parameter for a security group in EC2-Classic or a default VPC only. For a security group in a nondefault VPC, use the security group ID.

    ", + "UserIdGroupPair$GroupId": "

    The ID of the security group.

    ", + "UserIdGroupPair$VpcId": "

    The ID of the VPC for the referenced security group, if applicable.

    ", + "UserIdGroupPair$VpcPeeringConnectionId": "

    The ID of the VPC peering connection, if applicable.

    ", + "UserIdGroupPair$PeeringStatus": "

    The status of a VPC peering connection, if applicable.

    ", + "UserIdStringList$member": null, + "ValueStringList$member": null, + "VgwTelemetry$OutsideIpAddress": "

    The Internet-routable IP address of the virtual private gateway's outside interface.

    ", + "VgwTelemetry$StatusMessage": "

    If an error occurs, a description of the error.

    ", + "Volume$VolumeId": "

    The ID of the volume.

    ", + "Volume$SnapshotId": "

    The snapshot from which the volume was created, if applicable.

    ", + "Volume$AvailabilityZone": "

    The Availability Zone for the volume.

    ", + "Volume$KmsKeyId": "

    The full ARN of the AWS Key Management Service (AWS KMS) customer master key (CMK) that was used to protect the volume encryption key for the volume.

    ", + "VolumeAttachment$VolumeId": "

    The ID of the volume.

    ", + "VolumeAttachment$InstanceId": "

    The ID of the instance.

    ", + "VolumeAttachment$Device": "

    The device name.

    ", + "VolumeIdStringList$member": null, + "VolumeStatusAction$Code": "

    The code identifying the operation, for example, enable-volume-io.

    ", + "VolumeStatusAction$Description": "

    A description of the operation.

    ", + "VolumeStatusAction$EventType": "

    The event type associated with this operation.

    ", + "VolumeStatusAction$EventId": "

    The ID of the event associated with this operation.

    ", + "VolumeStatusDetails$Status": "

    The intended status of the volume status.

    ", + "VolumeStatusEvent$EventType": "

    The type of this event.

    ", + "VolumeStatusEvent$Description": "

    A description of the event.

    ", + "VolumeStatusEvent$EventId": "

    The ID of this event.

    ", + "VolumeStatusItem$VolumeId": "

    The volume ID.

    ", + "VolumeStatusItem$AvailabilityZone": "

    The Availability Zone of the volume.

    ", + "Vpc$VpcId": "

    The ID of the VPC.

    ", + "Vpc$CidrBlock": "

    The CIDR block for the VPC.

    ", + "Vpc$DhcpOptionsId": "

    The ID of the set of DHCP options you've associated with the VPC (or default if the default options are associated with the VPC).

    ", + "VpcAttachment$VpcId": "

    The ID of the VPC.

    ", + "VpcClassicLink$VpcId": "

    The ID of the VPC.

    ", + "VpcClassicLinkIdList$member": null, + "VpcEndpoint$VpcEndpointId": "

    The ID of the VPC endpoint.

    ", + "VpcEndpoint$VpcId": "

    The ID of the VPC to which the endpoint is associated.

    ", + "VpcEndpoint$ServiceName": "

    The name of the AWS service to which the endpoint is associated.

    ", + "VpcEndpoint$PolicyDocument": "

    The policy document associated with the endpoint.

    ", + "VpcIdStringList$member": null, + "VpcPeeringConnection$VpcPeeringConnectionId": "

    The ID of the VPC peering connection.

    ", + "VpcPeeringConnectionStateReason$Message": "

    A message that provides more information about the status, if applicable.

    ", + "VpcPeeringConnectionVpcInfo$CidrBlock": "

    The CIDR block for the VPC.

    ", + "VpcPeeringConnectionVpcInfo$OwnerId": "

    The AWS account ID of the VPC owner.

    ", + "VpcPeeringConnectionVpcInfo$VpcId": "

    The ID of the VPC.

    ", + "VpnConnection$VpnConnectionId": "

    The ID of the VPN connection.

    ", + "VpnConnection$CustomerGatewayConfiguration": "

    The configuration information for the VPN connection's customer gateway (in the native XML format). This element is always present in the CreateVpnConnection response; however, it's present in the DescribeVpnConnections response only if the VPN connection is in the pending or available state.

    ", + "VpnConnection$CustomerGatewayId": "

    The ID of the customer gateway at your end of the VPN connection.

    ", + "VpnConnection$VpnGatewayId": "

    The ID of the virtual private gateway at the AWS side of the VPN connection.

    ", + "VpnConnectionIdStringList$member": null, + "VpnGateway$VpnGatewayId": "

    The ID of the virtual private gateway.

    ", + "VpnGateway$AvailabilityZone": "

    The Availability Zone where the virtual private gateway was created, if applicable. This field may be empty or not returned.

    ", + "VpnGatewayIdStringList$member": null, + "VpnStaticRoute$DestinationCidrBlock": "

    The CIDR block associated with the local subnet of the customer data center.

    ", + "ZoneNameStringList$member": null + } + }, + "Subnet": { + "base": "

    Describes a subnet.

    ", + "refs": { + "CreateSubnetResult$Subnet": "

    Information about the subnet.

    ", + "SubnetList$member": null + } + }, + "SubnetIdStringList": { + "base": null, + "refs": { + "DescribeSubnetsRequest$SubnetIds": "

    One or more subnet IDs.

    Default: Describes all your subnets.

    " + } + }, + "SubnetList": { + "base": null, + "refs": { + "DescribeSubnetsResult$Subnets": "

    Information about one or more subnets.

    " + } + }, + "SubnetState": { + "base": null, + "refs": { + "Subnet$State": "

    The current state of the subnet.

    " + } + }, + "SummaryStatus": { + "base": null, + "refs": { + "InstanceStatusSummary$Status": "

    The status.

    " + } + }, + "Tag": { + "base": "

    Describes a tag.

    ", + "refs": { + "TagList$member": null + } + }, + "TagDescription": { + "base": "

    Describes a tag.

    ", + "refs": { + "TagDescriptionList$member": null + } + }, + "TagDescriptionList": { + "base": null, + "refs": { + "DescribeTagsResult$Tags": "

    A list of tags.

    " + } + }, + "TagList": { + "base": null, + "refs": { + "ClassicLinkInstance$Tags": "

    Any tags assigned to the instance.

    ", + "ConversionTask$Tags": "

    Any tags assigned to the task.

    ", + "CreateTagsRequest$Tags": "

    One or more tags. The value parameter is required, but if you don't want the tag to have a value, specify the parameter with no value, and we set the value to an empty string.

    ", + "CustomerGateway$Tags": "

    Any tags assigned to the customer gateway.

    ", + "DeleteTagsRequest$Tags": "

    One or more tags to delete. If you omit the value parameter, we delete the tag regardless of its value. If you specify this parameter with an empty string as the value, we delete the key only if its value is an empty string.

    ", + "DhcpOptions$Tags": "

    Any tags assigned to the DHCP options set.

    ", + "Image$Tags": "

    Any tags assigned to the image.

    ", + "Instance$Tags": "

    Any tags assigned to the instance.

    ", + "InternetGateway$Tags": "

    Any tags assigned to the Internet gateway.

    ", + "NetworkAcl$Tags": "

    Any tags assigned to the network ACL.

    ", + "NetworkInterface$TagSet": "

    Any tags assigned to the network interface.

    ", + "ReservedInstances$Tags": "

    Any tags assigned to the resource.

    ", + "ReservedInstancesListing$Tags": "

    Any tags assigned to the resource.

    ", + "RouteTable$Tags": "

    Any tags assigned to the route table.

    ", + "SecurityGroup$Tags": "

    Any tags assigned to the security group.

    ", + "Snapshot$Tags": "

    Any tags assigned to the snapshot.

    ", + "SpotInstanceRequest$Tags": "

    Any tags assigned to the resource.

    ", + "Subnet$Tags": "

    Any tags assigned to the subnet.

    ", + "Volume$Tags": "

    Any tags assigned to the volume.

    ", + "Vpc$Tags": "

    Any tags assigned to the VPC.

    ", + "VpcClassicLink$Tags": "

    Any tags assigned to the VPC.

    ", + "VpcPeeringConnection$Tags": "

    Any tags assigned to the resource.

    ", + "VpnConnection$Tags": "

    Any tags assigned to the VPN connection.

    ", + "VpnGateway$Tags": "

    Any tags assigned to the virtual private gateway.

    " + } + }, + "TelemetryStatus": { + "base": null, + "refs": { + "VgwTelemetry$Status": "

    The status of the VPN tunnel.

    " + } + }, + "Tenancy": { + "base": null, + "refs": { + "CreateVpcRequest$InstanceTenancy": "

    The tenancy options for instances launched into the VPC. For default, instances are launched with shared tenancy by default. You can launch instances with any tenancy into a shared tenancy VPC. For dedicated, instances are launched as dedicated tenancy instances by default. You can only launch instances with a tenancy of dedicated or host into a dedicated tenancy VPC.

    Important: The host value cannot be used with this parameter. Use the default or dedicated values only.

    Default: default

    ", + "DescribeReservedInstancesOfferingsRequest$InstanceTenancy": "

    The tenancy of the instances covered by the reservation. A Reserved Instance with a tenancy of dedicated is applied to instances that run in a VPC on single-tenant hardware (i.e., Dedicated Instances).

    Default: default

    ", + "Placement$Tenancy": "

    The tenancy of the instance (if the instance is running in a VPC). An instance with a tenancy of dedicated runs on single-tenant hardware. The host tenancy is not supported for the ImportInstance command.

    ", + "ReservedInstances$InstanceTenancy": "

    The tenancy of the instance.

    ", + "ReservedInstancesOffering$InstanceTenancy": "

    The tenancy of the instance.

    ", + "Vpc$InstanceTenancy": "

    The allowed tenancy of instances launched into the VPC.

    " + } + }, + "TerminateInstancesRequest": { + "base": "

    Contains the parameters for TerminateInstances.

    ", + "refs": { + } + }, + "TerminateInstancesResult": { + "base": "

    Contains the output of TerminateInstances.

    ", + "refs": { + } + }, + "TrafficType": { + "base": null, + "refs": { + "CreateFlowLogsRequest$TrafficType": "

    The type of traffic to log.

    ", + "FlowLog$TrafficType": "

    The type of traffic captured for the flow log.

    " + } + }, + "UnassignPrivateIpAddressesRequest": { + "base": "

    Contains the parameters for UnassignPrivateIpAddresses.

    ", + "refs": { + } + }, + "UnmonitorInstancesRequest": { + "base": "

    Contains the parameters for UnmonitorInstances.

    ", + "refs": { + } + }, + "UnmonitorInstancesResult": { + "base": "

    Contains the output of UnmonitorInstances.

    ", + "refs": { + } + }, + "UnsuccessfulItem": { + "base": "

    Information about items that were not successfully processed in a batch call.

    ", + "refs": { + "UnsuccessfulItemList$member": null, + "UnsuccessfulItemSet$member": null + } + }, + "UnsuccessfulItemError": { + "base": "

    Information about the error that occurred. For more information about errors, see Error Codes.

    ", + "refs": { + "UnsuccessfulItem$Error": "

    Information about the error.

    " + } + }, + "UnsuccessfulItemList": { + "base": null, + "refs": { + "ModifyHostsResult$Unsuccessful": "

    The IDs of the Dedicated hosts that could not be modified. Check whether the setting you requested can be used.

    ", + "ReleaseHostsResult$Unsuccessful": "

    The IDs of the Dedicated hosts that could not be released, including an error message.

    " + } + }, + "UnsuccessfulItemSet": { + "base": null, + "refs": { + "CreateFlowLogsResult$Unsuccessful": "

    Information about the flow logs that could not be created successfully.

    ", + "DeleteFlowLogsResult$Unsuccessful": "

    Information about the flow logs that could not be deleted successfully.

    ", + "DeleteVpcEndpointsResult$Unsuccessful": "

    Information about the endpoints that were not successfully deleted.

    " + } + }, + "UserBucket": { + "base": "

    Describes the S3 bucket for the disk image.

    ", + "refs": { + "ImageDiskContainer$UserBucket": "

    The S3 bucket for the disk image.

    ", + "SnapshotDiskContainer$UserBucket": "

    The S3 bucket for the disk image.

    " + } + }, + "UserBucketDetails": { + "base": "

    Describes the S3 bucket for the disk image.

    ", + "refs": { + "SnapshotDetail$UserBucket": "

    The S3 bucket for the disk image.

    ", + "SnapshotTaskDetail$UserBucket": "

    The S3 bucket for the disk image.

    " + } + }, + "UserData": { + "base": "

    Describes the user data for an instance.

    ", + "refs": { + "ImportInstanceLaunchSpecification$UserData": "

    The user data to make available to the instance. If you are using an AWS SDK or command line tool, Base64-encoding is performed for you, and you can load the text from a file. Otherwise, you must provide Base64-encoded text.

    " + } + }, + "UserGroupStringList": { + "base": null, + "refs": { + "ModifyImageAttributeRequest$UserGroups": "

    One or more user groups. This is only valid when modifying the launchPermission attribute.

    " + } + }, + "UserIdGroupPair": { + "base": "

    Describes a security group and AWS account ID pair.

    ", + "refs": { + "UserIdGroupPairList$member": null, + "UserIdGroupPairSet$member": null + } + }, + "UserIdGroupPairList": { + "base": null, + "refs": { + "IpPermission$UserIdGroupPairs": "

    One or more security group and AWS account ID pairs.

    " + } + }, + "UserIdGroupPairSet": { + "base": null, + "refs": { + "StaleIpPermission$UserIdGroupPairs": "

    One or more security group pairs. Returns the ID of the referenced security group and VPC, and the ID and status of the VPC peering connection.

    " + } + }, + "UserIdStringList": { + "base": null, + "refs": { + "ModifyImageAttributeRequest$UserIds": "

    One or more AWS account IDs. This is only valid when modifying the launchPermission attribute.

    ", + "ModifySnapshotAttributeRequest$UserIds": "

    The account ID to modify for the snapshot.

    " + } + }, + "ValueStringList": { + "base": null, + "refs": { + "CancelSpotFleetRequestsRequest$SpotFleetRequestIds": "

    The IDs of the Spot fleet requests.

    ", + "CreateFlowLogsRequest$ResourceIds": "

    One or more subnet, network interface, or VPC IDs.

    Constraints: Maximum of 1000 resources

    ", + "CreateFlowLogsResult$FlowLogIds": "

    The IDs of the flow logs.

    ", + "CreateVpcEndpointRequest$RouteTableIds": "

    One or more route table IDs.

    ", + "DeleteFlowLogsRequest$FlowLogIds": "

    One or more flow log IDs.

    ", + "DeleteVpcEndpointsRequest$VpcEndpointIds": "

    One or more endpoint IDs.

    ", + "DescribeFlowLogsRequest$FlowLogIds": "

    One or more flow log IDs.

    ", + "DescribeInternetGatewaysRequest$InternetGatewayIds": "

    One or more Internet gateway IDs.

    Default: Describes all your Internet gateways.

    ", + "DescribeMovingAddressesRequest$PublicIps": "

    One or more Elastic IP addresses.

    ", + "DescribeNatGatewaysRequest$NatGatewayIds": "

    One or more NAT gateway IDs.

    ", + "DescribeNetworkAclsRequest$NetworkAclIds": "

    One or more network ACL IDs.

    Default: Describes all your network ACLs.

    ", + "DescribePrefixListsRequest$PrefixListIds": "

    One or more prefix list IDs.

    ", + "DescribeRouteTablesRequest$RouteTableIds": "

    One or more route table IDs.

    Default: Describes all your route tables.

    ", + "DescribeSpotFleetRequestsRequest$SpotFleetRequestIds": "

    The IDs of the Spot fleet requests.

    ", + "DescribeVpcEndpointServicesResult$ServiceNames": "

    A list of supported AWS services.

    ", + "DescribeVpcEndpointsRequest$VpcEndpointIds": "

    One or more endpoint IDs.

    ", + "DescribeVpcPeeringConnectionsRequest$VpcPeeringConnectionIds": "

    One or more VPC peering connection IDs.

    Default: Describes all your VPC peering connections.

    ", + "Filter$Values": "

    One or more filter values. Filter values are case-sensitive.

    ", + "ModifyVpcEndpointRequest$AddRouteTableIds": "

    One or more route tables IDs to associate with the endpoint.

    ", + "ModifyVpcEndpointRequest$RemoveRouteTableIds": "

    One or more route table IDs to disassociate from the endpoint.

    ", + "NewDhcpConfiguration$Values": null, + "PrefixList$Cidrs": "

    The IP address range of the AWS service.

    ", + "RequestSpotLaunchSpecification$SecurityGroups": null, + "RequestSpotLaunchSpecification$SecurityGroupIds": null, + "VpcEndpoint$RouteTableIds": "

    One or more route tables associated with the endpoint.

    " + } + }, + "VgwTelemetry": { + "base": "

    Describes telemetry for a VPN tunnel.

    ", + "refs": { + "VgwTelemetryList$member": null + } + }, + "VgwTelemetryList": { + "base": null, + "refs": { + "VpnConnection$VgwTelemetry": "

    Information about the VPN tunnel.

    " + } + }, + "VirtualizationType": { + "base": null, + "refs": { + "Image$VirtualizationType": "

    The type of virtualization of the AMI.

    ", + "Instance$VirtualizationType": "

    The virtualization type of the instance.

    " + } + }, + "Volume": { + "base": "

    Describes a volume.

    ", + "refs": { + "VolumeList$member": null + } + }, + "VolumeAttachment": { + "base": "

    Describes volume attachment details.

    ", + "refs": { + "VolumeAttachmentList$member": null + } + }, + "VolumeAttachmentList": { + "base": null, + "refs": { + "Volume$Attachments": "

    Information about the volume attachments.

    " + } + }, + "VolumeAttachmentState": { + "base": null, + "refs": { + "VolumeAttachment$State": "

    The attachment state of the volume.

    " + } + }, + "VolumeAttributeName": { + "base": null, + "refs": { + "DescribeVolumeAttributeRequest$Attribute": "

    The instance attribute.

    " + } + }, + "VolumeDetail": { + "base": "

    Describes an EBS volume.

    ", + "refs": { + "DiskImage$Volume": "

    Information about the volume.

    ", + "ImportVolumeRequest$Volume": "

    The volume size.

    " + } + }, + "VolumeIdStringList": { + "base": null, + "refs": { + "DescribeVolumeStatusRequest$VolumeIds": "

    One or more volume IDs.

    Default: Describes all your volumes.

    ", + "DescribeVolumesRequest$VolumeIds": "

    One or more volume IDs.

    " + } + }, + "VolumeList": { + "base": null, + "refs": { + "DescribeVolumesResult$Volumes": "

    Information about the volumes.

    " + } + }, + "VolumeState": { + "base": null, + "refs": { + "Volume$State": "

    The volume state.

    " + } + }, + "VolumeStatusAction": { + "base": "

    Describes a volume status operation code.

    ", + "refs": { + "VolumeStatusActionsList$member": null + } + }, + "VolumeStatusActionsList": { + "base": null, + "refs": { + "VolumeStatusItem$Actions": "

    The details of the operation.

    " + } + }, + "VolumeStatusDetails": { + "base": "

    Describes a volume status.

    ", + "refs": { + "VolumeStatusDetailsList$member": null + } + }, + "VolumeStatusDetailsList": { + "base": null, + "refs": { + "VolumeStatusInfo$Details": "

    The details of the volume status.

    " + } + }, + "VolumeStatusEvent": { + "base": "

    Describes a volume status event.

    ", + "refs": { + "VolumeStatusEventsList$member": null + } + }, + "VolumeStatusEventsList": { + "base": null, + "refs": { + "VolumeStatusItem$Events": "

    A list of events associated with the volume.

    " + } + }, + "VolumeStatusInfo": { + "base": "

    Describes the status of a volume.

    ", + "refs": { + "VolumeStatusItem$VolumeStatus": "

    The volume status.

    " + } + }, + "VolumeStatusInfoStatus": { + "base": null, + "refs": { + "VolumeStatusInfo$Status": "

    The status of the volume.

    " + } + }, + "VolumeStatusItem": { + "base": "

    Describes the volume status.

    ", + "refs": { + "VolumeStatusList$member": null + } + }, + "VolumeStatusList": { + "base": null, + "refs": { + "DescribeVolumeStatusResult$VolumeStatuses": "

    A list of volumes.

    " + } + }, + "VolumeStatusName": { + "base": null, + "refs": { + "VolumeStatusDetails$Name": "

    The name of the volume status.

    " + } + }, + "VolumeType": { + "base": null, + "refs": { + "CreateVolumeRequest$VolumeType": "

    The volume type. This can be gp2 for General Purpose SSD, io1 for Provisioned IOPS SSD, st1 for Throughput Optimized HDD, sc1 for Cold HDD, or standard for Magnetic volumes.

    Default: standard

    ", + "EbsBlockDevice$VolumeType": "

    The volume type: gp2, io1, st1, sc1, or standard.

    Default: standard

    ", + "Volume$VolumeType": "

    The volume type. This can be gp2 for General Purpose SSD, io1 for Provisioned IOPS SSD, st1 for Throughput Optimized HDD, sc1 for Cold HDD, or standard for Magnetic volumes.

    " + } + }, + "Vpc": { + "base": "

    Describes a VPC.

    ", + "refs": { + "CreateVpcResult$Vpc": "

    Information about the VPC.

    ", + "VpcList$member": null + } + }, + "VpcAttachment": { + "base": "

    Describes an attachment between a virtual private gateway and a VPC.

    ", + "refs": { + "AttachVpnGatewayResult$VpcAttachment": "

    Information about the attachment.

    ", + "VpcAttachmentList$member": null + } + }, + "VpcAttachmentList": { + "base": null, + "refs": { + "VpnGateway$VpcAttachments": "

    Any VPCs attached to the virtual private gateway.

    " + } + }, + "VpcAttributeName": { + "base": null, + "refs": { + "DescribeVpcAttributeRequest$Attribute": "

    The VPC attribute.

    " + } + }, + "VpcClassicLink": { + "base": "

    Describes whether a VPC is enabled for ClassicLink.

    ", + "refs": { + "VpcClassicLinkList$member": null + } + }, + "VpcClassicLinkIdList": { + "base": null, + "refs": { + "DescribeVpcClassicLinkDnsSupportRequest$VpcIds": "

    One or more VPC IDs.

    ", + "DescribeVpcClassicLinkRequest$VpcIds": "

    One or more VPCs for which you want to describe the ClassicLink status.

    " + } + }, + "VpcClassicLinkList": { + "base": null, + "refs": { + "DescribeVpcClassicLinkResult$Vpcs": "

    The ClassicLink status of one or more VPCs.

    " + } + }, + "VpcEndpoint": { + "base": "

    Describes a VPC endpoint.

    ", + "refs": { + "CreateVpcEndpointResult$VpcEndpoint": "

    Information about the endpoint.

    ", + "VpcEndpointSet$member": null + } + }, + "VpcEndpointSet": { + "base": null, + "refs": { + "DescribeVpcEndpointsResult$VpcEndpoints": "

    Information about the endpoints.

    " + } + }, + "VpcIdStringList": { + "base": null, + "refs": { + "DescribeVpcsRequest$VpcIds": "

    One or more VPC IDs.

    Default: Describes all your VPCs.

    " + } + }, + "VpcList": { + "base": null, + "refs": { + "DescribeVpcsResult$Vpcs": "

    Information about one or more VPCs.

    " + } + }, + "VpcPeeringConnection": { + "base": "

    Describes a VPC peering connection.

    ", + "refs": { + "AcceptVpcPeeringConnectionResult$VpcPeeringConnection": "

    Information about the VPC peering connection.

    ", + "CreateVpcPeeringConnectionResult$VpcPeeringConnection": "

    Information about the VPC peering connection.

    ", + "VpcPeeringConnectionList$member": null + } + }, + "VpcPeeringConnectionList": { + "base": null, + "refs": { + "DescribeVpcPeeringConnectionsResult$VpcPeeringConnections": "

    Information about the VPC peering connections.

    " + } + }, + "VpcPeeringConnectionOptionsDescription": { + "base": "

    Describes the VPC peering connection options.

    ", + "refs": { + "VpcPeeringConnectionVpcInfo$PeeringOptions": "

    Information about the VPC peering connection options for the accepter or requester VPC.

    " + } + }, + "VpcPeeringConnectionStateReason": { + "base": "

    Describes the status of a VPC peering connection.

    ", + "refs": { + "VpcPeeringConnection$Status": "

    The status of the VPC peering connection.

    " + } + }, + "VpcPeeringConnectionStateReasonCode": { + "base": null, + "refs": { + "VpcPeeringConnectionStateReason$Code": "

    The status of the VPC peering connection.

    " + } + }, + "VpcPeeringConnectionVpcInfo": { + "base": "

    Describes a VPC in a VPC peering connection.

    ", + "refs": { + "VpcPeeringConnection$AccepterVpcInfo": "

    Information about the accepter VPC. CIDR block information is not returned when creating a VPC peering connection, or when describing a VPC peering connection that's in the initiating-request or pending-acceptance state.

    ", + "VpcPeeringConnection$RequesterVpcInfo": "

    Information about the requester VPC.

    " + } + }, + "VpcState": { + "base": null, + "refs": { + "Vpc$State": "

    The current state of the VPC.

    " + } + }, + "VpnConnection": { + "base": "

    Describes a VPN connection.

    ", + "refs": { + "CreateVpnConnectionResult$VpnConnection": "

    Information about the VPN connection.

    ", + "VpnConnectionList$member": null + } + }, + "VpnConnectionIdStringList": { + "base": null, + "refs": { + "DescribeVpnConnectionsRequest$VpnConnectionIds": "

    One or more VPN connection IDs.

    Default: Describes your VPN connections.

    " + } + }, + "VpnConnectionList": { + "base": null, + "refs": { + "DescribeVpnConnectionsResult$VpnConnections": "

    Information about one or more VPN connections.

    " + } + }, + "VpnConnectionOptions": { + "base": "

    Describes VPN connection options.

    ", + "refs": { + "VpnConnection$Options": "

    The VPN connection options.

    " + } + }, + "VpnConnectionOptionsSpecification": { + "base": "

    Describes VPN connection options.

    ", + "refs": { + "CreateVpnConnectionRequest$Options": "

    Indicates whether the VPN connection requires static routes. If you are creating a VPN connection for a device that does not support BGP, you must specify true.

    Default: false

    " + } + }, + "VpnGateway": { + "base": "

    Describes a virtual private gateway.

    ", + "refs": { + "CreateVpnGatewayResult$VpnGateway": "

    Information about the virtual private gateway.

    ", + "VpnGatewayList$member": null + } + }, + "VpnGatewayIdStringList": { + "base": null, + "refs": { + "DescribeVpnGatewaysRequest$VpnGatewayIds": "

    One or more virtual private gateway IDs.

    Default: Describes all your virtual private gateways.

    " + } + }, + "VpnGatewayList": { + "base": null, + "refs": { + "DescribeVpnGatewaysResult$VpnGateways": "

    Information about one or more virtual private gateways.

    " + } + }, + "VpnState": { + "base": null, + "refs": { + "VpnConnection$State": "

    The current state of the VPN connection.

    ", + "VpnGateway$State": "

    The current state of the virtual private gateway.

    ", + "VpnStaticRoute$State": "

    The current state of the static route.

    " + } + }, + "VpnStaticRoute": { + "base": "

    Describes a static route for a VPN connection.

    ", + "refs": { + "VpnStaticRouteList$member": null + } + }, + "VpnStaticRouteList": { + "base": null, + "refs": { + "VpnConnection$Routes": "

    The static routes associated with the VPN connection.

    " + } + }, + "VpnStaticRouteSource": { + "base": null, + "refs": { + "VpnStaticRoute$Source": "

    Indicates how the routes were provided.

    " + } + }, + "ZoneNameStringList": { + "base": null, + "refs": { + "DescribeAvailabilityZonesRequest$ZoneNames": "

    The names of one or more Availability Zones.

    " + } + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/ec2/2016-04-01/examples-1.json b/vendor/github.com/aws/aws-sdk-go/models/apis/ec2/2016-04-01/examples-1.json new file mode 100644 index 000000000..0ea7e3b0b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/ec2/2016-04-01/examples-1.json @@ -0,0 +1,5 @@ +{ + "version": "1.0", + "examples": { + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/ec2/2016-04-01/paginators-1.json b/vendor/github.com/aws/aws-sdk-go/models/apis/ec2/2016-04-01/paginators-1.json new file mode 100644 index 000000000..9d04d89ab --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/ec2/2016-04-01/paginators-1.json @@ -0,0 +1,138 @@ +{ + "pagination": { + "DescribeAccountAttributes": { + "result_key": "AccountAttributes" + }, + "DescribeAddresses": { + "result_key": "Addresses" + }, + "DescribeAvailabilityZones": { + "result_key": "AvailabilityZones" + }, + "DescribeBundleTasks": { + "result_key": "BundleTasks" + }, + "DescribeConversionTasks": { + "result_key": "ConversionTasks" + }, + "DescribeCustomerGateways": { + "result_key": "CustomerGateways" + }, + "DescribeDhcpOptions": { + "result_key": "DhcpOptions" + }, + "DescribeExportTasks": { + "result_key": "ExportTasks" + }, + "DescribeImages": { + "result_key": "Images" + }, + "DescribeInstanceStatus": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "InstanceStatuses" + }, + "DescribeInstances": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "Reservations" + }, + "DescribeInternetGateways": { + "result_key": "InternetGateways" + }, + "DescribeKeyPairs": { + "result_key": "KeyPairs" + }, + "DescribeNetworkAcls": { + "result_key": "NetworkAcls" + }, + "DescribeNetworkInterfaces": { + "result_key": "NetworkInterfaces" + }, + "DescribePlacementGroups": { + "result_key": "PlacementGroups" + }, + "DescribeRegions": { + "result_key": "Regions" + }, + "DescribeReservedInstances": { + "result_key": "ReservedInstances" + }, + "DescribeReservedInstancesListings": { + "result_key": "ReservedInstancesListings" + }, + "DescribeReservedInstancesOfferings": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "ReservedInstancesOfferings" + }, + "DescribeReservedInstancesModifications": { + "input_token": "NextToken", + "output_token": "NextToken", + "result_key": "ReservedInstancesModifications" + }, + "DescribeRouteTables": { + "result_key": "RouteTables" + }, + "DescribeSecurityGroups": { + "result_key": "SecurityGroups" + }, + "DescribeSnapshots": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "Snapshots" + }, + "DescribeSpotInstanceRequests": { + "result_key": "SpotInstanceRequests" + }, + "DescribeSpotFleetRequests": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "SpotFleetRequestConfigs" + }, + "DescribeSpotPriceHistory": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "SpotPriceHistory" + }, + "DescribeSubnets": { + "result_key": "Subnets" + }, + "DescribeTags": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "Tags" + }, + "DescribeVolumeStatus": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "VolumeStatuses" + }, + "DescribeVolumes": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "Volumes" + }, + "DescribeVpcs": { + "result_key": "Vpcs" + }, + "DescribeVpcPeeringConnections": { + "result_key": "VpcPeeringConnections" + }, + "DescribeVpnConnections": { + "result_key": "VpnConnections" + }, + "DescribeVpnGateways": { + "result_key": "VpnGateways" + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/ec2/2016-04-01/waiters-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/ec2/2016-04-01/waiters-2.json new file mode 100644 index 000000000..ecc9f1b6f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/ec2/2016-04-01/waiters-2.json @@ -0,0 +1,593 @@ +{ + "version": 2, + "waiters": { + "InstanceExists": { + "delay": 5, + "maxAttempts": 40, + "operation": "DescribeInstances", + "acceptors": [ + { + "matcher": "path", + "expected": true, + "argument": "length(Reservations[]) > `0`", + "state": "success" + }, + { + "matcher": "error", + "expected": "InvalidInstanceID.NotFound", + "state": "retry" + } + ] + }, + "BundleTaskComplete": { + "delay": 15, + "operation": "DescribeBundleTasks", + "maxAttempts": 40, + "acceptors": [ + { + "expected": "complete", + "matcher": "pathAll", + "state": "success", + "argument": "BundleTasks[].State" + }, + { + "expected": "failed", + "matcher": "pathAny", + "state": "failure", + "argument": "BundleTasks[].State" + } + ] + }, + "ConversionTaskCancelled": { + "delay": 15, + "operation": "DescribeConversionTasks", + "maxAttempts": 40, + "acceptors": [ + { + "expected": "cancelled", + "matcher": "pathAll", + "state": "success", + "argument": "ConversionTasks[].State" + } + ] + }, + "ConversionTaskCompleted": { + "delay": 15, + "operation": "DescribeConversionTasks", + "maxAttempts": 40, + "acceptors": [ + { + "expected": "completed", + "matcher": "pathAll", + "state": "success", + "argument": "ConversionTasks[].State" + }, + { + "expected": "cancelled", + "matcher": "pathAny", + "state": "failure", + "argument": "ConversionTasks[].State" + }, + { + "expected": "cancelling", + "matcher": "pathAny", + "state": "failure", + "argument": "ConversionTasks[].State" + } + ] + }, + "ConversionTaskDeleted": { + "delay": 15, + "operation": "DescribeConversionTasks", + "maxAttempts": 40, + "acceptors": [ + { + "expected": "deleted", + "matcher": "pathAll", + "state": "success", + "argument": "ConversionTasks[].State" + } + ] + }, + "CustomerGatewayAvailable": { + "delay": 15, + "operation": "DescribeCustomerGateways", + "maxAttempts": 40, + "acceptors": [ + { + "expected": "available", + "matcher": "pathAll", + "state": "success", + "argument": "CustomerGateways[].State" + }, + { + "expected": "deleted", + "matcher": "pathAny", + "state": "failure", + "argument": "CustomerGateways[].State" + }, + { + "expected": "deleting", + "matcher": "pathAny", + "state": "failure", + "argument": "CustomerGateways[].State" + } + ] + }, + "ExportTaskCancelled": { + "delay": 15, + "operation": "DescribeExportTasks", + "maxAttempts": 40, + "acceptors": [ + { + "expected": "cancelled", + "matcher": "pathAll", + "state": "success", + "argument": "ExportTasks[].State" + } + ] + }, + "ExportTaskCompleted": { + "delay": 15, + "operation": "DescribeExportTasks", + "maxAttempts": 40, + "acceptors": [ + { + "expected": "completed", + "matcher": "pathAll", + "state": "success", + "argument": "ExportTasks[].State" + } + ] + }, + "ImageExists": { + "operation": "DescribeImages", + "maxAttempts": 40, + "delay": 15, + "acceptors": [ + { + "matcher": "path", + "expected": true, + "argument": "length(Images[]) > `0`", + "state": "success" + }, + { + "matcher": "error", + "expected": "InvalidAMIID.NotFound", + "state": "retry" + } + ] + }, + "ImageAvailable": { + "operation": "DescribeImages", + "maxAttempts": 40, + "delay": 15, + "acceptors": [ + { + "state": "success", + "matcher": "pathAll", + "argument": "Images[].State", + "expected": "available" + }, + { + "state": "failure", + "matcher": "pathAny", + "argument": "Images[].State", + "expected": "failed" + } + ] + }, + "InstanceRunning": { + "delay": 15, + "operation": "DescribeInstances", + "maxAttempts": 40, + "acceptors": [ + { + "expected": "running", + "matcher": "pathAll", + "state": "success", + "argument": "Reservations[].Instances[].State.Name" + }, + { + "expected": "shutting-down", + "matcher": "pathAny", + "state": "failure", + "argument": "Reservations[].Instances[].State.Name" + }, + { + "expected": "terminated", + "matcher": "pathAny", + "state": "failure", + "argument": "Reservations[].Instances[].State.Name" + }, + { + "expected": "stopping", + "matcher": "pathAny", + "state": "failure", + "argument": "Reservations[].Instances[].State.Name" + }, + { + "matcher": "error", + "expected": "InvalidInstanceID.NotFound", + "state": "retry" + } + ] + }, + "InstanceStatusOk": { + "operation": "DescribeInstanceStatus", + "maxAttempts": 40, + "delay": 15, + "acceptors": [ + { + "state": "success", + "matcher": "pathAll", + "argument": "InstanceStatuses[].InstanceStatus.Status", + "expected": "ok" + }, + { + "matcher": "error", + "expected": "InvalidInstanceID.NotFound", + "state": "retry" + } + ] + }, + "InstanceStopped": { + "delay": 15, + "operation": "DescribeInstances", + "maxAttempts": 40, + "acceptors": [ + { + "expected": "stopped", + "matcher": "pathAll", + "state": "success", + "argument": "Reservations[].Instances[].State.Name" + }, + { + "expected": "pending", + "matcher": "pathAny", + "state": "failure", + "argument": "Reservations[].Instances[].State.Name" + }, + { + "expected": "terminated", + "matcher": "pathAny", + "state": "failure", + "argument": "Reservations[].Instances[].State.Name" + } + ] + }, + "InstanceTerminated": { + "delay": 15, + "operation": "DescribeInstances", + "maxAttempts": 40, + "acceptors": [ + { + "expected": "terminated", + "matcher": "pathAll", + "state": "success", + "argument": "Reservations[].Instances[].State.Name" + }, + { + "expected": "pending", + "matcher": "pathAny", + "state": "failure", + "argument": "Reservations[].Instances[].State.Name" + }, + { + "expected": "stopping", + "matcher": "pathAny", + "state": "failure", + "argument": "Reservations[].Instances[].State.Name" + } + ] + }, + "KeyPairExists": { + "operation": "DescribeKeyPairs", + "delay": 5, + "maxAttempts": 6, + "acceptors": [ + { + "expected": true, + "matcher": "pathAll", + "state": "success", + "argument": "length(KeyPairs[].KeyName) > `0`" + }, + { + "expected": "InvalidKeyPair.NotFound", + "matcher": "error", + "state": "retry" + } + ] + }, + "NatGatewayAvailable": { + "operation": "DescribeNatGateways", + "delay": 15, + "maxAttempts": 40, + "acceptors": [ + { + "state": "success", + "matcher": "pathAll", + "argument": "NatGateways[].State", + "expected": "available" + }, + { + "state": "failure", + "matcher": "pathAny", + "argument": "NatGateways[].State", + "expected": "failed" + }, + { + "state": "failure", + "matcher": "pathAny", + "argument": "NatGateways[].State", + "expected": "deleting" + }, + { + "state": "failure", + "matcher": "pathAny", + "argument": "NatGateways[].State", + "expected": "deleted" + }, + { + "state": "retry", + "matcher": "error", + "expected": "NatGatewayNotFound" + } + ] + }, + "NetworkInterfaceAvailable": { + "operation": "DescribeNetworkInterfaces", + "delay": 20, + "maxAttempts": 10, + "acceptors": [ + { + "expected": "available", + "matcher": "pathAll", + "state": "success", + "argument": "NetworkInterfaces[].Status" + }, + { + "expected": "InvalidNetworkInterfaceID.NotFound", + "matcher": "error", + "state": "failure" + } + ] + }, + "PasswordDataAvailable": { + "operation": "GetPasswordData", + "maxAttempts": 40, + "delay": 15, + "acceptors": [ + { + "state": "success", + "matcher": "path", + "argument": "length(PasswordData) > `0`", + "expected": true + } + ] + }, + "SnapshotCompleted": { + "delay": 15, + "operation": "DescribeSnapshots", + "maxAttempts": 40, + "acceptors": [ + { + "expected": "completed", + "matcher": "pathAll", + "state": "success", + "argument": "Snapshots[].State" + } + ] + }, + "SpotInstanceRequestFulfilled": { + "operation": "DescribeSpotInstanceRequests", + "maxAttempts": 40, + "delay": 15, + "acceptors": [ + { + "state": "success", + "matcher": "pathAll", + "argument": "SpotInstanceRequests[].Status.Code", + "expected": "fulfilled" + }, + { + "state": "failure", + "matcher": "pathAny", + "argument": "SpotInstanceRequests[].Status.Code", + "expected": "schedule-expired" + }, + { + "state": "failure", + "matcher": "pathAny", + "argument": "SpotInstanceRequests[].Status.Code", + "expected": "canceled-before-fulfillment" + }, + { + "state": "failure", + "matcher": "pathAny", + "argument": "SpotInstanceRequests[].Status.Code", + "expected": "bad-parameters" + }, + { + "state": "failure", + "matcher": "pathAny", + "argument": "SpotInstanceRequests[].Status.Code", + "expected": "system-error" + } + ] + }, + "SubnetAvailable": { + "delay": 15, + "operation": "DescribeSubnets", + "maxAttempts": 40, + "acceptors": [ + { + "expected": "available", + "matcher": "pathAll", + "state": "success", + "argument": "Subnets[].State" + } + ] + }, + "SystemStatusOk": { + "operation": "DescribeInstanceStatus", + "maxAttempts": 40, + "delay": 15, + "acceptors": [ + { + "state": "success", + "matcher": "pathAll", + "argument": "InstanceStatuses[].SystemStatus.Status", + "expected": "ok" + } + ] + }, + "VolumeAvailable": { + "delay": 15, + "operation": "DescribeVolumes", + "maxAttempts": 40, + "acceptors": [ + { + "expected": "available", + "matcher": "pathAll", + "state": "success", + "argument": "Volumes[].State" + }, + { + "expected": "deleted", + "matcher": "pathAny", + "state": "failure", + "argument": "Volumes[].State" + } + ] + }, + "VolumeDeleted": { + "delay": 15, + "operation": "DescribeVolumes", + "maxAttempts": 40, + "acceptors": [ + { + "expected": "deleted", + "matcher": "pathAll", + "state": "success", + "argument": "Volumes[].State" + }, + { + "matcher": "error", + "expected": "InvalidVolume.NotFound", + "state": "success" + } + ] + }, + "VolumeInUse": { + "delay": 15, + "operation": "DescribeVolumes", + "maxAttempts": 40, + "acceptors": [ + { + "expected": "in-use", + "matcher": "pathAll", + "state": "success", + "argument": "Volumes[].State" + }, + { + "expected": "deleted", + "matcher": "pathAny", + "state": "failure", + "argument": "Volumes[].State" + } + ] + }, + "VpcAvailable": { + "delay": 15, + "operation": "DescribeVpcs", + "maxAttempts": 40, + "acceptors": [ + { + "expected": "available", + "matcher": "pathAll", + "state": "success", + "argument": "Vpcs[].State" + } + ] + }, + "VpcExists": { + "operation": "DescribeVpcs", + "delay": 1, + "maxAttempts": 5, + "acceptors": [ + { + "matcher": "status", + "expected": 200, + "state": "success" + }, + { + "matcher": "error", + "expected": "InvalidVpcID.NotFound", + "state": "retry" + } + ] + }, + "VpnConnectionAvailable": { + "delay": 15, + "operation": "DescribeVpnConnections", + "maxAttempts": 40, + "acceptors": [ + { + "expected": "available", + "matcher": "pathAll", + "state": "success", + "argument": "VpnConnections[].State" + }, + { + "expected": "deleting", + "matcher": "pathAny", + "state": "failure", + "argument": "VpnConnections[].State" + }, + { + "expected": "deleted", + "matcher": "pathAny", + "state": "failure", + "argument": "VpnConnections[].State" + } + ] + }, + "VpnConnectionDeleted": { + "delay": 15, + "operation": "DescribeVpnConnections", + "maxAttempts": 40, + "acceptors": [ + { + "expected": "deleted", + "matcher": "pathAll", + "state": "success", + "argument": "VpnConnections[].State" + }, + { + "expected": "pending", + "matcher": "pathAny", + "state": "failure", + "argument": "VpnConnections[].State" + } + ] + }, + "VpcPeeringConnectionExists": { + "delay": 15, + "operation": "DescribeVpcPeeringConnections", + "maxAttempts": 40, + "acceptors": [ + { + "matcher": "status", + "expected": 200, + "state": "success" + }, + { + "matcher": "error", + "expected": "InvalidVpcPeeringConnectionID.NotFound", + "state": "retry" + } + ] + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/ecr/2015-09-21/api-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/ecr/2015-09-21/api-2.json new file mode 100644 index 000000000..6b6f68763 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/ecr/2015-09-21/api-2.json @@ -0,0 +1,849 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2015-09-21", + "endpointPrefix":"ecr", + "jsonVersion":"1.1", + "protocol":"json", + "serviceAbbreviation":"Amazon ECR", + "serviceFullName":"Amazon EC2 Container Registry", + "signatureVersion":"v4", + "targetPrefix":"AmazonEC2ContainerRegistry_V20150921" + }, + "operations":{ + "BatchCheckLayerAvailability":{ + "name":"BatchCheckLayerAvailability", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"BatchCheckLayerAvailabilityRequest"}, + "output":{"shape":"BatchCheckLayerAvailabilityResponse"}, + "errors":[ + {"shape":"RepositoryNotFoundException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ServerException"} + ] + }, + "BatchDeleteImage":{ + "name":"BatchDeleteImage", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"BatchDeleteImageRequest"}, + "output":{"shape":"BatchDeleteImageResponse"}, + "errors":[ + {"shape":"ServerException"}, + {"shape":"InvalidParameterException"}, + {"shape":"RepositoryNotFoundException"} + ] + }, + "BatchGetImage":{ + "name":"BatchGetImage", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"BatchGetImageRequest"}, + "output":{"shape":"BatchGetImageResponse"}, + "errors":[ + {"shape":"ServerException"}, + {"shape":"InvalidParameterException"}, + {"shape":"RepositoryNotFoundException"} + ] + }, + "CompleteLayerUpload":{ + "name":"CompleteLayerUpload", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CompleteLayerUploadRequest"}, + "output":{"shape":"CompleteLayerUploadResponse"}, + "errors":[ + {"shape":"ServerException"}, + {"shape":"InvalidParameterException"}, + {"shape":"RepositoryNotFoundException"}, + {"shape":"UploadNotFoundException"}, + {"shape":"InvalidLayerException"}, + {"shape":"LayerPartTooSmallException"}, + {"shape":"LayerAlreadyExistsException"}, + {"shape":"EmptyUploadException"} + ] + }, + "CreateRepository":{ + "name":"CreateRepository", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateRepositoryRequest"}, + "output":{"shape":"CreateRepositoryResponse"}, + "errors":[ + {"shape":"ServerException"}, + {"shape":"InvalidParameterException"}, + {"shape":"RepositoryAlreadyExistsException"}, + {"shape":"LimitExceededException"} + ] + }, + "DeleteRepository":{ + "name":"DeleteRepository", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteRepositoryRequest"}, + "output":{"shape":"DeleteRepositoryResponse"}, + "errors":[ + {"shape":"ServerException"}, + {"shape":"InvalidParameterException"}, + {"shape":"RepositoryNotFoundException"}, + {"shape":"RepositoryNotEmptyException"} + ] + }, + "DeleteRepositoryPolicy":{ + "name":"DeleteRepositoryPolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteRepositoryPolicyRequest"}, + "output":{"shape":"DeleteRepositoryPolicyResponse"}, + "errors":[ + {"shape":"ServerException"}, + {"shape":"InvalidParameterException"}, + {"shape":"RepositoryNotFoundException"}, + {"shape":"RepositoryPolicyNotFoundException"} + ] + }, + "DescribeRepositories":{ + "name":"DescribeRepositories", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeRepositoriesRequest"}, + "output":{"shape":"DescribeRepositoriesResponse"}, + "errors":[ + {"shape":"ServerException"}, + {"shape":"InvalidParameterException"}, + {"shape":"RepositoryNotFoundException"} + ] + }, + "GetAuthorizationToken":{ + "name":"GetAuthorizationToken", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetAuthorizationTokenRequest"}, + "output":{"shape":"GetAuthorizationTokenResponse"}, + "errors":[ + {"shape":"ServerException"}, + {"shape":"InvalidParameterException"} + ] + }, + "GetDownloadUrlForLayer":{ + "name":"GetDownloadUrlForLayer", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetDownloadUrlForLayerRequest"}, + "output":{"shape":"GetDownloadUrlForLayerResponse"}, + "errors":[ + {"shape":"ServerException"}, + {"shape":"InvalidParameterException"}, + {"shape":"LayersNotFoundException"}, + {"shape":"LayerInaccessibleException"}, + {"shape":"RepositoryNotFoundException"} + ] + }, + "GetRepositoryPolicy":{ + "name":"GetRepositoryPolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetRepositoryPolicyRequest"}, + "output":{"shape":"GetRepositoryPolicyResponse"}, + "errors":[ + {"shape":"ServerException"}, + {"shape":"InvalidParameterException"}, + {"shape":"RepositoryNotFoundException"}, + {"shape":"RepositoryPolicyNotFoundException"} + ] + }, + "InitiateLayerUpload":{ + "name":"InitiateLayerUpload", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"InitiateLayerUploadRequest"}, + "output":{"shape":"InitiateLayerUploadResponse"}, + "errors":[ + {"shape":"ServerException"}, + {"shape":"InvalidParameterException"}, + {"shape":"RepositoryNotFoundException"} + ] + }, + "ListImages":{ + "name":"ListImages", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListImagesRequest"}, + "output":{"shape":"ListImagesResponse"}, + "errors":[ + {"shape":"ServerException"}, + {"shape":"InvalidParameterException"}, + {"shape":"RepositoryNotFoundException"} + ] + }, + "PutImage":{ + "name":"PutImage", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PutImageRequest"}, + "output":{"shape":"PutImageResponse"}, + "errors":[ + {"shape":"ServerException"}, + {"shape":"InvalidParameterException"}, + {"shape":"RepositoryNotFoundException"}, + {"shape":"ImageAlreadyExistsException"}, + {"shape":"LayersNotFoundException"}, + {"shape":"LimitExceededException"} + ] + }, + "SetRepositoryPolicy":{ + "name":"SetRepositoryPolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"SetRepositoryPolicyRequest"}, + "output":{"shape":"SetRepositoryPolicyResponse"}, + "errors":[ + {"shape":"ServerException"}, + {"shape":"InvalidParameterException"}, + {"shape":"RepositoryNotFoundException"} + ] + }, + "UploadLayerPart":{ + "name":"UploadLayerPart", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UploadLayerPartRequest"}, + "output":{"shape":"UploadLayerPartResponse"}, + "errors":[ + {"shape":"ServerException"}, + {"shape":"InvalidParameterException"}, + {"shape":"InvalidLayerPartException"}, + {"shape":"RepositoryNotFoundException"}, + {"shape":"UploadNotFoundException"}, + {"shape":"LimitExceededException"} + ] + } + }, + "shapes":{ + "Arn":{"type":"string"}, + "AuthorizationData":{ + "type":"structure", + "members":{ + "authorizationToken":{"shape":"Base64"}, + "expiresAt":{"shape":"ExpirationTimestamp"}, + "proxyEndpoint":{"shape":"ProxyEndpoint"} + } + }, + "AuthorizationDataList":{ + "type":"list", + "member":{"shape":"AuthorizationData"} + }, + "Base64":{ + "type":"string", + "pattern":"^\\S+$" + }, + "BatchCheckLayerAvailabilityRequest":{ + "type":"structure", + "required":[ + "repositoryName", + "layerDigests" + ], + "members":{ + "registryId":{"shape":"RegistryId"}, + "repositoryName":{"shape":"RepositoryName"}, + "layerDigests":{"shape":"BatchedOperationLayerDigestList"} + } + }, + "BatchCheckLayerAvailabilityResponse":{ + "type":"structure", + "members":{ + "layers":{"shape":"LayerList"}, + "failures":{"shape":"LayerFailureList"} + } + }, + "BatchDeleteImageRequest":{ + "type":"structure", + "required":[ + "repositoryName", + "imageIds" + ], + "members":{ + "registryId":{"shape":"RegistryId"}, + "repositoryName":{"shape":"RepositoryName"}, + "imageIds":{"shape":"ImageIdentifierList"} + } + }, + "BatchDeleteImageResponse":{ + "type":"structure", + "members":{ + "imageIds":{"shape":"ImageIdentifierList"}, + "failures":{"shape":"ImageFailureList"} + } + }, + "BatchGetImageRequest":{ + "type":"structure", + "required":[ + "repositoryName", + "imageIds" + ], + "members":{ + "registryId":{"shape":"RegistryId"}, + "repositoryName":{"shape":"RepositoryName"}, + "imageIds":{"shape":"ImageIdentifierList"} + } + }, + "BatchGetImageResponse":{ + "type":"structure", + "members":{ + "images":{"shape":"ImageList"}, + "failures":{"shape":"ImageFailureList"} + } + }, + "BatchedOperationLayerDigest":{ + "type":"string", + "max":1000, + "min":0 + }, + "BatchedOperationLayerDigestList":{ + "type":"list", + "member":{"shape":"BatchedOperationLayerDigest"}, + "max":100, + "min":1 + }, + "CompleteLayerUploadRequest":{ + "type":"structure", + "required":[ + "repositoryName", + "uploadId", + "layerDigests" + ], + "members":{ + "registryId":{"shape":"RegistryId"}, + "repositoryName":{"shape":"RepositoryName"}, + "uploadId":{"shape":"UploadId"}, + "layerDigests":{"shape":"LayerDigestList"} + } + }, + "CompleteLayerUploadResponse":{ + "type":"structure", + "members":{ + "registryId":{"shape":"RegistryId"}, + "repositoryName":{"shape":"RepositoryName"}, + "uploadId":{"shape":"UploadId"}, + "layerDigest":{"shape":"LayerDigest"} + } + }, + "CreateRepositoryRequest":{ + "type":"structure", + "required":["repositoryName"], + "members":{ + "repositoryName":{"shape":"RepositoryName"} + } + }, + "CreateRepositoryResponse":{ + "type":"structure", + "members":{ + "repository":{"shape":"Repository"} + } + }, + "DeleteRepositoryPolicyRequest":{ + "type":"structure", + "required":["repositoryName"], + "members":{ + "registryId":{"shape":"RegistryId"}, + "repositoryName":{"shape":"RepositoryName"} + } + }, + "DeleteRepositoryPolicyResponse":{ + "type":"structure", + "members":{ + "registryId":{"shape":"RegistryId"}, + "repositoryName":{"shape":"RepositoryName"}, + "policyText":{"shape":"RepositoryPolicyText"} + } + }, + "DeleteRepositoryRequest":{ + "type":"structure", + "required":["repositoryName"], + "members":{ + "registryId":{"shape":"RegistryId"}, + "repositoryName":{"shape":"RepositoryName"}, + "force":{"shape":"ForceFlag"} + } + }, + "DeleteRepositoryResponse":{ + "type":"structure", + "members":{ + "repository":{"shape":"Repository"} + } + }, + "DescribeRepositoriesRequest":{ + "type":"structure", + "members":{ + "registryId":{"shape":"RegistryId"}, + "repositoryNames":{"shape":"RepositoryNameList"}, + "nextToken":{"shape":"NextToken"}, + "maxResults":{"shape":"MaxResults"} + } + }, + "DescribeRepositoriesResponse":{ + "type":"structure", + "members":{ + "repositories":{"shape":"RepositoryList"}, + "nextToken":{"shape":"NextToken"} + } + }, + "EmptyUploadException":{ + "type":"structure", + "members":{ + "message":{"shape":"ExceptionMessage"} + }, + "exception":true + }, + "ExceptionMessage":{"type":"string"}, + "ExpirationTimestamp":{"type":"timestamp"}, + "ForceFlag":{"type":"boolean"}, + "GetAuthorizationTokenRegistryIdList":{ + "type":"list", + "member":{"shape":"RegistryId"}, + "max":10, + "min":1 + }, + "GetAuthorizationTokenRequest":{ + "type":"structure", + "members":{ + "registryIds":{"shape":"GetAuthorizationTokenRegistryIdList"} + } + }, + "GetAuthorizationTokenResponse":{ + "type":"structure", + "members":{ + "authorizationData":{"shape":"AuthorizationDataList"} + } + }, + "GetDownloadUrlForLayerRequest":{ + "type":"structure", + "required":[ + "repositoryName", + "layerDigest" + ], + "members":{ + "registryId":{"shape":"RegistryId"}, + "repositoryName":{"shape":"RepositoryName"}, + "layerDigest":{"shape":"LayerDigest"} + } + }, + "GetDownloadUrlForLayerResponse":{ + "type":"structure", + "members":{ + "downloadUrl":{"shape":"Url"}, + "layerDigest":{"shape":"LayerDigest"} + } + }, + "GetRepositoryPolicyRequest":{ + "type":"structure", + "required":["repositoryName"], + "members":{ + "registryId":{"shape":"RegistryId"}, + "repositoryName":{"shape":"RepositoryName"} + } + }, + "GetRepositoryPolicyResponse":{ + "type":"structure", + "members":{ + "registryId":{"shape":"RegistryId"}, + "repositoryName":{"shape":"RepositoryName"}, + "policyText":{"shape":"RepositoryPolicyText"} + } + }, + "Image":{ + "type":"structure", + "members":{ + "registryId":{"shape":"RegistryId"}, + "repositoryName":{"shape":"RepositoryName"}, + "imageId":{"shape":"ImageIdentifier"}, + "imageManifest":{"shape":"ImageManifest"} + } + }, + "ImageAlreadyExistsException":{ + "type":"structure", + "members":{ + "message":{"shape":"ExceptionMessage"} + }, + "exception":true + }, + "ImageDigest":{"type":"string"}, + "ImageFailure":{ + "type":"structure", + "members":{ + "imageId":{"shape":"ImageIdentifier"}, + "failureCode":{"shape":"ImageFailureCode"}, + "failureReason":{"shape":"ImageFailureReason"} + } + }, + "ImageFailureCode":{ + "type":"string", + "enum":[ + "InvalidImageDigest", + "InvalidImageTag", + "ImageTagDoesNotMatchDigest", + "ImageNotFound", + "MissingDigestAndTag" + ] + }, + "ImageFailureList":{ + "type":"list", + "member":{"shape":"ImageFailure"} + }, + "ImageFailureReason":{"type":"string"}, + "ImageIdentifier":{ + "type":"structure", + "members":{ + "imageDigest":{"shape":"ImageDigest"}, + "imageTag":{"shape":"ImageTag"} + } + }, + "ImageIdentifierList":{ + "type":"list", + "member":{"shape":"ImageIdentifier"}, + "max":100, + "min":1 + }, + "ImageList":{ + "type":"list", + "member":{"shape":"Image"} + }, + "ImageManifest":{"type":"string"}, + "ImageTag":{"type":"string"}, + "InitiateLayerUploadRequest":{ + "type":"structure", + "required":["repositoryName"], + "members":{ + "registryId":{"shape":"RegistryId"}, + "repositoryName":{"shape":"RepositoryName"} + } + }, + "InitiateLayerUploadResponse":{ + "type":"structure", + "members":{ + "uploadId":{"shape":"UploadId"}, + "partSize":{"shape":"PartSize"} + } + }, + "InvalidLayerException":{ + "type":"structure", + "members":{ + "message":{"shape":"ExceptionMessage"} + }, + "exception":true + }, + "InvalidLayerPartException":{ + "type":"structure", + "members":{ + "registryId":{"shape":"RegistryId"}, + "repositoryName":{"shape":"RepositoryName"}, + "uploadId":{"shape":"UploadId"}, + "lastValidByteReceived":{"shape":"PartSize"}, + "message":{"shape":"ExceptionMessage"} + }, + "exception":true + }, + "InvalidParameterException":{ + "type":"structure", + "members":{ + "message":{"shape":"ExceptionMessage"} + }, + "exception":true + }, + "Layer":{ + "type":"structure", + "members":{ + "layerDigest":{"shape":"LayerDigest"}, + "layerAvailability":{"shape":"LayerAvailability"}, + "layerSize":{"shape":"LayerSizeInBytes"} + } + }, + "LayerAlreadyExistsException":{ + "type":"structure", + "members":{ + "message":{"shape":"ExceptionMessage"} + }, + "exception":true + }, + "LayerAvailability":{ + "type":"string", + "enum":[ + "AVAILABLE", + "UNAVAILABLE" + ] + }, + "LayerDigest":{ + "type":"string", + "pattern":"[a-zA-Z0-9-_+.]+:[a-fA-F0-9]+" + }, + "LayerDigestList":{ + "type":"list", + "member":{"shape":"LayerDigest"}, + "max":100, + "min":1 + }, + "LayerFailure":{ + "type":"structure", + "members":{ + "layerDigest":{"shape":"BatchedOperationLayerDigest"}, + "failureCode":{"shape":"LayerFailureCode"}, + "failureReason":{"shape":"LayerFailureReason"} + } + }, + "LayerFailureCode":{ + "type":"string", + "enum":[ + "InvalidLayerDigest", + "MissingLayerDigest" + ] + }, + "LayerFailureList":{ + "type":"list", + "member":{"shape":"LayerFailure"} + }, + "LayerFailureReason":{"type":"string"}, + "LayerInaccessibleException":{ + "type":"structure", + "members":{ + "message":{"shape":"ExceptionMessage"} + }, + "exception":true + }, + "LayerList":{ + "type":"list", + "member":{"shape":"Layer"} + }, + "LayerPartBlob":{"type":"blob"}, + "LayerPartTooSmallException":{ + "type":"structure", + "members":{ + "message":{"shape":"ExceptionMessage"} + }, + "exception":true + }, + "LayerSizeInBytes":{"type":"long"}, + "LayersNotFoundException":{ + "type":"structure", + "members":{ + "message":{"shape":"ExceptionMessage"} + }, + "exception":true + }, + "LimitExceededException":{ + "type":"structure", + "members":{ + "message":{"shape":"ExceptionMessage"} + }, + "exception":true + }, + "ListImagesRequest":{ + "type":"structure", + "required":["repositoryName"], + "members":{ + "registryId":{"shape":"RegistryId"}, + "repositoryName":{"shape":"RepositoryName"}, + "nextToken":{"shape":"NextToken"}, + "maxResults":{"shape":"MaxResults"} + } + }, + "ListImagesResponse":{ + "type":"structure", + "members":{ + "imageIds":{"shape":"ImageIdentifierList"}, + "nextToken":{"shape":"NextToken"} + } + }, + "MaxResults":{ + "type":"integer", + "max":100, + "min":1 + }, + "NextToken":{"type":"string"}, + "PartSize":{ + "type":"long", + "min":0 + }, + "ProxyEndpoint":{"type":"string"}, + "PutImageRequest":{ + "type":"structure", + "required":[ + "repositoryName", + "imageManifest" + ], + "members":{ + "registryId":{"shape":"RegistryId"}, + "repositoryName":{"shape":"RepositoryName"}, + "imageManifest":{"shape":"ImageManifest"} + } + }, + "PutImageResponse":{ + "type":"structure", + "members":{ + "image":{"shape":"Image"} + } + }, + "RegistryId":{ + "type":"string", + "pattern":"[0-9]{12}" + }, + "Repository":{ + "type":"structure", + "members":{ + "repositoryArn":{"shape":"Arn"}, + "registryId":{"shape":"RegistryId"}, + "repositoryName":{"shape":"RepositoryName"}, + "repositoryUri":{"shape":"Url"} + } + }, + "RepositoryAlreadyExistsException":{ + "type":"structure", + "members":{ + "message":{"shape":"ExceptionMessage"} + }, + "exception":true + }, + "RepositoryList":{ + "type":"list", + "member":{"shape":"Repository"} + }, + "RepositoryName":{ + "type":"string", + "max":256, + "min":2, + "pattern":"(?:[a-z0-9]+(?:[._-][a-z0-9]+)*/)*[a-z0-9]+(?:[._-][a-z0-9]+)*" + }, + "RepositoryNameList":{ + "type":"list", + "member":{"shape":"RepositoryName"}, + "max":100, + "min":1 + }, + "RepositoryNotEmptyException":{ + "type":"structure", + "members":{ + "message":{"shape":"ExceptionMessage"} + }, + "exception":true + }, + "RepositoryNotFoundException":{ + "type":"structure", + "members":{ + "message":{"shape":"ExceptionMessage"} + }, + "exception":true + }, + "RepositoryPolicyNotFoundException":{ + "type":"structure", + "members":{ + "message":{"shape":"ExceptionMessage"} + }, + "exception":true + }, + "RepositoryPolicyText":{ + "type":"string", + "max":10240, + "min":0 + }, + "ServerException":{ + "type":"structure", + "members":{ + "message":{"shape":"ExceptionMessage"} + }, + "exception":true, + "fault":true + }, + "SetRepositoryPolicyRequest":{ + "type":"structure", + "required":[ + "repositoryName", + "policyText" + ], + "members":{ + "registryId":{"shape":"RegistryId"}, + "repositoryName":{"shape":"RepositoryName"}, + "policyText":{"shape":"RepositoryPolicyText"}, + "force":{"shape":"ForceFlag"} + } + }, + "SetRepositoryPolicyResponse":{ + "type":"structure", + "members":{ + "registryId":{"shape":"RegistryId"}, + "repositoryName":{"shape":"RepositoryName"}, + "policyText":{"shape":"RepositoryPolicyText"} + } + }, + "UploadId":{ + "type":"string", + "pattern":"[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}" + }, + "UploadLayerPartRequest":{ + "type":"structure", + "required":[ + "repositoryName", + "uploadId", + "partFirstByte", + "partLastByte", + "layerPartBlob" + ], + "members":{ + "registryId":{"shape":"RegistryId"}, + "repositoryName":{"shape":"RepositoryName"}, + "uploadId":{"shape":"UploadId"}, + "partFirstByte":{"shape":"PartSize"}, + "partLastByte":{"shape":"PartSize"}, + "layerPartBlob":{"shape":"LayerPartBlob"} + } + }, + "UploadLayerPartResponse":{ + "type":"structure", + "members":{ + "registryId":{"shape":"RegistryId"}, + "repositoryName":{"shape":"RepositoryName"}, + "uploadId":{"shape":"UploadId"}, + "lastByteReceived":{"shape":"PartSize"} + } + }, + "UploadNotFoundException":{ + "type":"structure", + "members":{ + "message":{"shape":"ExceptionMessage"} + }, + "exception":true + }, + "Url":{"type":"string"} + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/ecr/2015-09-21/docs-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/ecr/2015-09-21/docs-2.json new file mode 100644 index 000000000..b8f6aa2e6 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/ecr/2015-09-21/docs-2.json @@ -0,0 +1,620 @@ +{ + "version": "2.0", + "service": "

    Amazon EC2 Container Registry (Amazon ECR) is a managed AWS Docker registry service. Customers can use the familiar Docker CLI to push, pull, and manage images. Amazon ECR provides a secure, scalable, and reliable registry. Amazon ECR supports private Docker repositories with resource-based permissions using AWS IAM so that specific users or Amazon EC2 instances can access repositories and images. Developers can use the Docker CLI to author and manage images.

    ", + "operations": { + "BatchCheckLayerAvailability": "

    Check the availability of multiple image layers in a specified registry and repository.

    This operation is used by the Amazon ECR proxy, and it is not intended for general use by customers. Use the docker CLI to pull, tag, and push images.

    ", + "BatchDeleteImage": "

    Deletes a list of specified images within a specified repository. Images are specified with either imageTag or imageDigest.

    ", + "BatchGetImage": "

    Gets detailed information for specified images within a specified repository. Images are specified with either imageTag or imageDigest.

    ", + "CompleteLayerUpload": "

    Inform Amazon ECR that the image layer upload for a specified registry, repository name, and upload ID, has completed. You can optionally provide a sha256 digest of the image layer for data validation purposes.

    This operation is used by the Amazon ECR proxy, and it is not intended for general use by customers. Use the docker CLI to pull, tag, and push images.

    ", + "CreateRepository": "

    Creates an image repository.

    ", + "DeleteRepository": "

    Deletes an existing image repository. If a repository contains images, you must use the force option to delete it.

    ", + "DeleteRepositoryPolicy": "

    Deletes the repository policy from a specified repository.

    ", + "DescribeRepositories": "

    Describes image repositories in a registry.

    ", + "GetAuthorizationToken": "

    Retrieves a token that is valid for a specified registry for 12 hours. This command allows you to use the docker CLI to push and pull images with Amazon ECR. If you do not specify a registry, the default registry is assumed.

    The authorizationToken returned for each registry specified is a base64 encoded string that can be decoded and used in a docker login command to authenticate to a registry. The AWS CLI offers an aws ecr get-login command that simplifies the login process.

    ", + "GetDownloadUrlForLayer": "

    Retrieves the pre-signed Amazon S3 download URL corresponding to an image layer. You can only get URLs for image layers that are referenced in an image.

    This operation is used by the Amazon ECR proxy, and it is not intended for general use by customers. Use the docker CLI to pull, tag, and push images.

    ", + "GetRepositoryPolicy": "

    Retrieves the repository policy for a specified repository.

    ", + "InitiateLayerUpload": "

    Notify Amazon ECR that you intend to upload an image layer.

    This operation is used by the Amazon ECR proxy, and it is not intended for general use by customers. Use the docker CLI to pull, tag, and push images.

    ", + "ListImages": "

    Lists all the image IDs for a given repository.

    ", + "PutImage": "

    Creates or updates the image manifest associated with an image.

    This operation is used by the Amazon ECR proxy, and it is not intended for general use by customers. Use the docker CLI to pull, tag, and push images.

    ", + "SetRepositoryPolicy": "

    Applies a repository policy on a specified repository to control access permissions.

    ", + "UploadLayerPart": "

    Uploads an image layer part to Amazon ECR.

    This operation is used by the Amazon ECR proxy, and it is not intended for general use by customers. Use the docker CLI to pull, tag, and push images.

    " + }, + "shapes": { + "Arn": { + "base": null, + "refs": { + "Repository$repositoryArn": "

    The Amazon Resource Name (ARN) that identifies the repository. The ARN contains the arn:aws:ecr namespace, followed by the region of the repository, the AWS account ID of the repository owner, the repository namespace, and then the repository name. For example, arn:aws:ecr:region:012345678910:repository/test.

    " + } + }, + "AuthorizationData": { + "base": "

    An object representing authorization data for an Amazon ECR registry.

    ", + "refs": { + "AuthorizationDataList$member": null + } + }, + "AuthorizationDataList": { + "base": null, + "refs": { + "GetAuthorizationTokenResponse$authorizationData": "

    A list of authorization token data objects that correspond to the registryIds values in the request.

    " + } + }, + "Base64": { + "base": null, + "refs": { + "AuthorizationData$authorizationToken": "

    A base64-encoded string that contains authorization data for the specified Amazon ECR registry. When the string is decoded, it is presented in the format user:password for private registry authentication using docker login.

    " + } + }, + "BatchCheckLayerAvailabilityRequest": { + "base": null, + "refs": { + } + }, + "BatchCheckLayerAvailabilityResponse": { + "base": null, + "refs": { + } + }, + "BatchDeleteImageRequest": { + "base": "

    Deletes specified images within a specified repository. Images are specified with either the imageTag or imageDigest.

    ", + "refs": { + } + }, + "BatchDeleteImageResponse": { + "base": null, + "refs": { + } + }, + "BatchGetImageRequest": { + "base": null, + "refs": { + } + }, + "BatchGetImageResponse": { + "base": null, + "refs": { + } + }, + "BatchedOperationLayerDigest": { + "base": null, + "refs": { + "BatchedOperationLayerDigestList$member": null, + "LayerFailure$layerDigest": "

    The layer digest associated with the failure.

    " + } + }, + "BatchedOperationLayerDigestList": { + "base": null, + "refs": { + "BatchCheckLayerAvailabilityRequest$layerDigests": "

    The digests of the image layers to check.

    " + } + }, + "CompleteLayerUploadRequest": { + "base": null, + "refs": { + } + }, + "CompleteLayerUploadResponse": { + "base": null, + "refs": { + } + }, + "CreateRepositoryRequest": { + "base": null, + "refs": { + } + }, + "CreateRepositoryResponse": { + "base": null, + "refs": { + } + }, + "DeleteRepositoryPolicyRequest": { + "base": null, + "refs": { + } + }, + "DeleteRepositoryPolicyResponse": { + "base": null, + "refs": { + } + }, + "DeleteRepositoryRequest": { + "base": null, + "refs": { + } + }, + "DeleteRepositoryResponse": { + "base": null, + "refs": { + } + }, + "DescribeRepositoriesRequest": { + "base": null, + "refs": { + } + }, + "DescribeRepositoriesResponse": { + "base": null, + "refs": { + } + }, + "EmptyUploadException": { + "base": "

    The specified layer upload does not contain any layer parts.

    ", + "refs": { + } + }, + "ExceptionMessage": { + "base": null, + "refs": { + "EmptyUploadException$message": "

    The error message associated with the exception.

    ", + "ImageAlreadyExistsException$message": "

    The error message associated with the exception.

    ", + "InvalidLayerException$message": "

    The error message associated with the exception.

    ", + "InvalidLayerPartException$message": "

    The error message associated with the exception.

    ", + "InvalidParameterException$message": "

    The error message associated with the exception.

    ", + "LayerAlreadyExistsException$message": "

    The error message associated with the exception.

    ", + "LayerInaccessibleException$message": "

    The error message associated with the exception.

    ", + "LayerPartTooSmallException$message": "

    The error message associated with the exception.

    ", + "LayersNotFoundException$message": "

    The error message associated with the exception.

    ", + "LimitExceededException$message": "

    The error message associated with the exception.

    ", + "RepositoryAlreadyExistsException$message": "

    The error message associated with the exception.

    ", + "RepositoryNotEmptyException$message": "

    The error message associated with the exception.

    ", + "RepositoryNotFoundException$message": "

    The error message associated with the exception.

    ", + "RepositoryPolicyNotFoundException$message": "

    The error message associated with the exception.

    ", + "ServerException$message": "

    The error message associated with the exception.

    ", + "UploadNotFoundException$message": "

    The error message associated with the exception.

    " + } + }, + "ExpirationTimestamp": { + "base": null, + "refs": { + "AuthorizationData$expiresAt": "

    The Unix time in seconds and milliseconds when the authorization token expires. Authorization tokens are valid for 12 hours.

    " + } + }, + "ForceFlag": { + "base": null, + "refs": { + "DeleteRepositoryRequest$force": "

    Force the deletion of the repository if it contains images.

    ", + "SetRepositoryPolicyRequest$force": "

    If the policy you are attempting to set on a repository policy would prevent you from setting another policy in the future, you must force the SetRepositoryPolicy operation. This is intended to prevent accidental repository lock outs.

    " + } + }, + "GetAuthorizationTokenRegistryIdList": { + "base": null, + "refs": { + "GetAuthorizationTokenRequest$registryIds": "

    A list of AWS account IDs that are associated with the registries for which to get authorization tokens. If you do not specify a registry, the default registry is assumed.

    " + } + }, + "GetAuthorizationTokenRequest": { + "base": null, + "refs": { + } + }, + "GetAuthorizationTokenResponse": { + "base": null, + "refs": { + } + }, + "GetDownloadUrlForLayerRequest": { + "base": null, + "refs": { + } + }, + "GetDownloadUrlForLayerResponse": { + "base": null, + "refs": { + } + }, + "GetRepositoryPolicyRequest": { + "base": null, + "refs": { + } + }, + "GetRepositoryPolicyResponse": { + "base": null, + "refs": { + } + }, + "Image": { + "base": "

    Object representing an image.

    ", + "refs": { + "ImageList$member": null, + "PutImageResponse$image": "

    Details of the image uploaded.

    " + } + }, + "ImageAlreadyExistsException": { + "base": "

    The specified image has already been pushed, and there are no changes to the manifest or image tag since the last push.

    ", + "refs": { + } + }, + "ImageDigest": { + "base": null, + "refs": { + "ImageIdentifier$imageDigest": "

    The sha256 digest of the image manifest.

    " + } + }, + "ImageFailure": { + "base": null, + "refs": { + "ImageFailureList$member": null + } + }, + "ImageFailureCode": { + "base": null, + "refs": { + "ImageFailure$failureCode": "

    The code associated with the failure.

    " + } + }, + "ImageFailureList": { + "base": null, + "refs": { + "BatchDeleteImageResponse$failures": "

    Any failures associated with the call.

    ", + "BatchGetImageResponse$failures": "

    Any failures associated with the call.

    " + } + }, + "ImageFailureReason": { + "base": null, + "refs": { + "ImageFailure$failureReason": "

    The reason for the failure.

    " + } + }, + "ImageIdentifier": { + "base": null, + "refs": { + "Image$imageId": "

    An object containing the image tag and image digest associated with an image.

    ", + "ImageFailure$imageId": "

    The image ID associated with the failure.

    ", + "ImageIdentifierList$member": null + } + }, + "ImageIdentifierList": { + "base": null, + "refs": { + "BatchDeleteImageRequest$imageIds": "

    A list of image ID references that correspond to images to delete. The format of the imageIds reference is imageTag=tag or imageDigest=digest.

    ", + "BatchDeleteImageResponse$imageIds": "

    The image IDs of the deleted images.

    ", + "BatchGetImageRequest$imageIds": "

    A list of image ID references that correspond to images to describe. The format of the imageIds reference is imageTag=tag or imageDigest=digest.

    ", + "ListImagesResponse$imageIds": "

    The list of image IDs for the requested repository.

    " + } + }, + "ImageList": { + "base": null, + "refs": { + "BatchGetImageResponse$images": "

    A list of image objects corresponding to the image references in the request.

    " + } + }, + "ImageManifest": { + "base": null, + "refs": { + "Image$imageManifest": "

    The image manifest associated with the image.

    ", + "PutImageRequest$imageManifest": "

    The image manifest corresponding to the image to be uploaded.

    " + } + }, + "ImageTag": { + "base": null, + "refs": { + "ImageIdentifier$imageTag": "

    The tag used for the image.

    " + } + }, + "InitiateLayerUploadRequest": { + "base": null, + "refs": { + } + }, + "InitiateLayerUploadResponse": { + "base": null, + "refs": { + } + }, + "InvalidLayerException": { + "base": "

    The layer digest calculation performed by Amazon ECR upon receipt of the image layer does not match the digest specified.

    ", + "refs": { + } + }, + "InvalidLayerPartException": { + "base": "

    The layer part size is not valid, or the first byte specified is not consecutive to the last byte of a previous layer part upload.

    ", + "refs": { + } + }, + "InvalidParameterException": { + "base": "

    The specified parameter is invalid. Review the available parameters for the API request.

    ", + "refs": { + } + }, + "Layer": { + "base": null, + "refs": { + "LayerList$member": null + } + }, + "LayerAlreadyExistsException": { + "base": "

    The image layer already exists in the associated repository.

    ", + "refs": { + } + }, + "LayerAvailability": { + "base": null, + "refs": { + "Layer$layerAvailability": "

    The availability status of the image layer. Valid values are AVAILABLE and UNAVAILABLE.

    " + } + }, + "LayerDigest": { + "base": null, + "refs": { + "CompleteLayerUploadResponse$layerDigest": "

    The sha256 digest of the image layer.

    ", + "GetDownloadUrlForLayerRequest$layerDigest": "

    The digest of the image layer to download.

    ", + "GetDownloadUrlForLayerResponse$layerDigest": "

    The digest of the image layer to download.

    ", + "Layer$layerDigest": "

    The sha256 digest of the image layer.

    ", + "LayerDigestList$member": null + } + }, + "LayerDigestList": { + "base": null, + "refs": { + "CompleteLayerUploadRequest$layerDigests": "

    The sha256 digest of the image layer.

    " + } + }, + "LayerFailure": { + "base": null, + "refs": { + "LayerFailureList$member": null + } + }, + "LayerFailureCode": { + "base": null, + "refs": { + "LayerFailure$failureCode": "

    The failure code associated with the failure.

    " + } + }, + "LayerFailureList": { + "base": null, + "refs": { + "BatchCheckLayerAvailabilityResponse$failures": "

    Any failures associated with the call.

    " + } + }, + "LayerFailureReason": { + "base": null, + "refs": { + "LayerFailure$failureReason": "

    The reason for the failure.

    " + } + }, + "LayerInaccessibleException": { + "base": "

    The specified layer is not available because it is not associated with an image. Unassociated image layers may be cleaned up at any time.

    ", + "refs": { + } + }, + "LayerList": { + "base": null, + "refs": { + "BatchCheckLayerAvailabilityResponse$layers": "

    A list of image layer objects corresponding to the image layer references in the request.

    " + } + }, + "LayerPartBlob": { + "base": null, + "refs": { + "UploadLayerPartRequest$layerPartBlob": "

    The base64-encoded layer part payload.

    " + } + }, + "LayerPartTooSmallException": { + "base": "

    Layer parts must be at least 5 MiB in size.

    ", + "refs": { + } + }, + "LayerSizeInBytes": { + "base": null, + "refs": { + "Layer$layerSize": "

    The size, in bytes, of the image layer.

    " + } + }, + "LayersNotFoundException": { + "base": "

    The specified layers could not be found, or the specified layer is not valid for this repository.

    ", + "refs": { + } + }, + "LimitExceededException": { + "base": "

    The operation did not succeed because it would have exceeded a service limit for your account. For more information, see Amazon ECR Default Service Limits in the Amazon EC2 Container Registry User Guide.

    ", + "refs": { + } + }, + "ListImagesRequest": { + "base": null, + "refs": { + } + }, + "ListImagesResponse": { + "base": null, + "refs": { + } + }, + "MaxResults": { + "base": null, + "refs": { + "DescribeRepositoriesRequest$maxResults": "

    The maximum number of repository results returned by DescribeRepositories in paginated output. When this parameter is used, DescribeRepositories only returns maxResults results in a single page along with a nextToken response element. The remaining results of the initial request can be seen by sending another DescribeRepositories request with the returned nextToken value. This value can be between 1 and 100. If this parameter is not used, then DescribeRepositories returns up to 100 results and a nextToken value, if applicable.

    ", + "ListImagesRequest$maxResults": "

    The maximum number of image results returned by ListImages in paginated output. When this parameter is used, ListImages only returns maxResults results in a single page along with a nextToken response element. The remaining results of the initial request can be seen by sending another ListImages request with the returned nextToken value. This value can be between 1 and 100. If this parameter is not used, then ListImages returns up to 100 results and a nextToken value, if applicable.

    " + } + }, + "NextToken": { + "base": null, + "refs": { + "DescribeRepositoriesRequest$nextToken": "

    The nextToken value returned from a previous paginated DescribeRepositories request where maxResults was used and the results exceeded the value of that parameter. Pagination continues from the end of the previous results that returned the nextToken value. This value is null when there are no more results to return.

    ", + "DescribeRepositoriesResponse$nextToken": "

    The nextToken value to include in a future DescribeRepositories request. When the results of a DescribeRepositories request exceed maxResults, this value can be used to retrieve the next page of results. This value is null when there are no more results to return.

    ", + "ListImagesRequest$nextToken": "

    The nextToken value returned from a previous paginated ListImages request where maxResults was used and the results exceeded the value of that parameter. Pagination continues from the end of the previous results that returned the nextToken value. This value is null when there are no more results to return.

    ", + "ListImagesResponse$nextToken": "

    The nextToken value to include in a future ListImages request. When the results of a ListImages request exceed maxResults, this value can be used to retrieve the next page of results. This value is null when there are no more results to return.

    " + } + }, + "PartSize": { + "base": null, + "refs": { + "InitiateLayerUploadResponse$partSize": "

    The size, in bytes, that Amazon ECR expects future layer part uploads to be.

    ", + "InvalidLayerPartException$lastValidByteReceived": "

    The last valid byte received from the layer part upload that is associated with the exception.

    ", + "UploadLayerPartRequest$partFirstByte": "

    The integer value of the first byte of the layer part.

    ", + "UploadLayerPartRequest$partLastByte": "

    The integer value of the last byte of the layer part.

    ", + "UploadLayerPartResponse$lastByteReceived": "

    The integer value of the last byte received in the request.

    " + } + }, + "ProxyEndpoint": { + "base": null, + "refs": { + "AuthorizationData$proxyEndpoint": "

    The registry URL to use for this authorization token in a docker login command. The Amazon ECR registry URL format is https://aws_account_id.dkr.ecr.region.amazonaws.com. For example, https://012345678910.dkr.ecr.us-east-1.amazonaws.com..

    " + } + }, + "PutImageRequest": { + "base": null, + "refs": { + } + }, + "PutImageResponse": { + "base": null, + "refs": { + } + }, + "RegistryId": { + "base": null, + "refs": { + "BatchCheckLayerAvailabilityRequest$registryId": "

    The AWS account ID associated with the registry that contains the image layers to check. If you do not specify a registry, the default registry is assumed.

    ", + "BatchDeleteImageRequest$registryId": "

    The AWS account ID associated with the registry that contains the image to delete. If you do not specify a registry, the default registry is assumed.

    ", + "BatchGetImageRequest$registryId": "

    The AWS account ID associated with the registry that contains the images to describe. If you do not specify a registry, the default registry is assumed.

    ", + "CompleteLayerUploadRequest$registryId": "

    The AWS account ID associated with the registry to which to upload layers. If you do not specify a registry, the default registry is assumed.

    ", + "CompleteLayerUploadResponse$registryId": "

    The registry ID associated with the request.

    ", + "DeleteRepositoryPolicyRequest$registryId": "

    The AWS account ID associated with the registry that contains the repository policy to delete. If you do not specify a registry, the default registry is assumed.

    ", + "DeleteRepositoryPolicyResponse$registryId": "

    The registry ID associated with the request.

    ", + "DeleteRepositoryRequest$registryId": "

    The AWS account ID associated with the registry that contains the repository to delete. If you do not specify a registry, the default registry is assumed.

    ", + "DescribeRepositoriesRequest$registryId": "

    The AWS account ID associated with the registry that contains the repositories to be described. If you do not specify a registry, the default registry is assumed.

    ", + "GetAuthorizationTokenRegistryIdList$member": null, + "GetDownloadUrlForLayerRequest$registryId": "

    The AWS account ID associated with the registry that contains the image layer to download. If you do not specify a registry, the default registry is assumed.

    ", + "GetRepositoryPolicyRequest$registryId": "

    The AWS account ID associated with the registry that contains the repository. If you do not specify a registry, the default registry is assumed.

    ", + "GetRepositoryPolicyResponse$registryId": "

    The registry ID associated with the request.

    ", + "Image$registryId": "

    The AWS account ID associated with the registry containing the image.

    ", + "InitiateLayerUploadRequest$registryId": "

    The AWS account ID associated with the registry that you intend to upload layers to. If you do not specify a registry, the default registry is assumed.

    ", + "InvalidLayerPartException$registryId": "

    The registry ID associated with the exception.

    ", + "ListImagesRequest$registryId": "

    The AWS account ID associated with the registry that contains the repository to list images in. If you do not specify a registry, the default registry is assumed.

    ", + "PutImageRequest$registryId": "

    The AWS account ID associated with the registry that contains the repository in which to put the image. If you do not specify a registry, the default registry is assumed.

    ", + "Repository$registryId": "

    The AWS account ID associated with the registry that contains the repository.

    ", + "SetRepositoryPolicyRequest$registryId": "

    The AWS account ID associated with the registry that contains the repository. If you do not specify a registry, the default registry is assumed.

    ", + "SetRepositoryPolicyResponse$registryId": "

    The registry ID associated with the request.

    ", + "UploadLayerPartRequest$registryId": "

    The AWS account ID associated with the registry that you are uploading layer parts to. If you do not specify a registry, the default registry is assumed.

    ", + "UploadLayerPartResponse$registryId": "

    The registry ID associated with the request.

    " + } + }, + "Repository": { + "base": "

    Object representing a repository.

    ", + "refs": { + "CreateRepositoryResponse$repository": null, + "DeleteRepositoryResponse$repository": null, + "RepositoryList$member": null + } + }, + "RepositoryAlreadyExistsException": { + "base": "

    The specified repository already exists in the specified registry.

    ", + "refs": { + } + }, + "RepositoryList": { + "base": null, + "refs": { + "DescribeRepositoriesResponse$repositories": "

    A list of repository objects corresponding to valid repositories.

    " + } + }, + "RepositoryName": { + "base": null, + "refs": { + "BatchCheckLayerAvailabilityRequest$repositoryName": "

    The name of the repository that is associated with the image layers to check.

    ", + "BatchDeleteImageRequest$repositoryName": "

    The repository that contains the image to delete.

    ", + "BatchGetImageRequest$repositoryName": "

    The repository that contains the images to describe.

    ", + "CompleteLayerUploadRequest$repositoryName": "

    The name of the repository to associate with the image layer.

    ", + "CompleteLayerUploadResponse$repositoryName": "

    The repository name associated with the request.

    ", + "CreateRepositoryRequest$repositoryName": "

    The name to use for the repository. The repository name may be specified on its own (such as nginx-web-app) or it can be prepended with a namespace to group the repository into a category (such as project-a/nginx-web-app).

    ", + "DeleteRepositoryPolicyRequest$repositoryName": "

    The name of the repository that is associated with the repository policy to delete.

    ", + "DeleteRepositoryPolicyResponse$repositoryName": "

    The repository name associated with the request.

    ", + "DeleteRepositoryRequest$repositoryName": "

    The name of the repository to delete.

    ", + "GetDownloadUrlForLayerRequest$repositoryName": "

    The name of the repository that is associated with the image layer to download.

    ", + "GetRepositoryPolicyRequest$repositoryName": "

    The name of the repository whose policy you want to retrieve.

    ", + "GetRepositoryPolicyResponse$repositoryName": "

    The repository name associated with the request.

    ", + "Image$repositoryName": "

    The name of the repository associated with the image.

    ", + "InitiateLayerUploadRequest$repositoryName": "

    The name of the repository that you intend to upload layers to.

    ", + "InvalidLayerPartException$repositoryName": "

    The repository name associated with the exception.

    ", + "ListImagesRequest$repositoryName": "

    The repository whose image IDs are to be listed.

    ", + "PutImageRequest$repositoryName": "

    The name of the repository in which to put the image.

    ", + "Repository$repositoryName": "

    The name of the repository.

    ", + "RepositoryNameList$member": null, + "SetRepositoryPolicyRequest$repositoryName": "

    The name of the repository to receive the policy.

    ", + "SetRepositoryPolicyResponse$repositoryName": "

    The repository name associated with the request.

    ", + "UploadLayerPartRequest$repositoryName": "

    The name of the repository that you are uploading layer parts to.

    ", + "UploadLayerPartResponse$repositoryName": "

    The repository name associated with the request.

    " + } + }, + "RepositoryNameList": { + "base": null, + "refs": { + "DescribeRepositoriesRequest$repositoryNames": "

    A list of repositories to describe. If this parameter is omitted, then all repositories in a registry are described.

    " + } + }, + "RepositoryNotEmptyException": { + "base": "

    The specified repository contains images. To delete a repository that contains images, you must force the deletion with the force parameter.

    ", + "refs": { + } + }, + "RepositoryNotFoundException": { + "base": "

    The specified repository could not be found. Check the spelling of the specified repository and ensure that you are performing operations on the correct registry.

    ", + "refs": { + } + }, + "RepositoryPolicyNotFoundException": { + "base": "

    The specified repository and registry combination does not have an associated repository policy.

    ", + "refs": { + } + }, + "RepositoryPolicyText": { + "base": null, + "refs": { + "DeleteRepositoryPolicyResponse$policyText": "

    The JSON repository policy that was deleted from the repository.

    ", + "GetRepositoryPolicyResponse$policyText": "

    The JSON repository policy text associated with the repository.

    ", + "SetRepositoryPolicyRequest$policyText": "

    The JSON repository policy text to apply to the repository.

    ", + "SetRepositoryPolicyResponse$policyText": "

    The JSON repository policy text applied to the repository.

    " + } + }, + "ServerException": { + "base": "

    These errors are usually caused by a server-side issue.

    ", + "refs": { + } + }, + "SetRepositoryPolicyRequest": { + "base": null, + "refs": { + } + }, + "SetRepositoryPolicyResponse": { + "base": null, + "refs": { + } + }, + "UploadId": { + "base": null, + "refs": { + "CompleteLayerUploadRequest$uploadId": "

    The upload ID from a previous InitiateLayerUpload operation to associate with the image layer.

    ", + "CompleteLayerUploadResponse$uploadId": "

    The upload ID associated with the layer.

    ", + "InitiateLayerUploadResponse$uploadId": "

    The upload ID for the layer upload. This parameter is passed to further UploadLayerPart and CompleteLayerUpload operations.

    ", + "InvalidLayerPartException$uploadId": "

    The upload ID associated with the exception.

    ", + "UploadLayerPartRequest$uploadId": "

    The upload ID from a previous InitiateLayerUpload operation to associate with the layer part upload.

    ", + "UploadLayerPartResponse$uploadId": "

    The upload ID associated with the request.

    " + } + }, + "UploadLayerPartRequest": { + "base": null, + "refs": { + } + }, + "UploadLayerPartResponse": { + "base": null, + "refs": { + } + }, + "UploadNotFoundException": { + "base": "

    The upload could not be found, or the specified upload id is not valid for this repository.

    ", + "refs": { + } + }, + "Url": { + "base": null, + "refs": { + "GetDownloadUrlForLayerResponse$downloadUrl": "

    The pre-signed Amazon S3 download URL for the requested layer.

    ", + "Repository$repositoryUri": "

    The URI for the repository. You can use this URI for Docker push and pull operations.

    " + } + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/ecr/2015-09-21/examples-1.json b/vendor/github.com/aws/aws-sdk-go/models/apis/ecr/2015-09-21/examples-1.json new file mode 100644 index 000000000..0ea7e3b0b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/ecr/2015-09-21/examples-1.json @@ -0,0 +1,5 @@ +{ + "version": "1.0", + "examples": { + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/ecs/2014-11-13/api-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/ecs/2014-11-13/api-2.json new file mode 100644 index 000000000..b556c0577 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/ecs/2014-11-13/api-2.json @@ -0,0 +1,1393 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2014-11-13", + "endpointPrefix":"ecs", + "jsonVersion":"1.1", + "protocol":"json", + "serviceAbbreviation":"Amazon ECS", + "serviceFullName":"Amazon EC2 Container Service", + "signatureVersion":"v4", + "targetPrefix":"AmazonEC2ContainerServiceV20141113" + }, + "operations":{ + "CreateCluster":{ + "name":"CreateCluster", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateClusterRequest"}, + "output":{"shape":"CreateClusterResponse"}, + "errors":[ + {"shape":"ServerException"}, + {"shape":"ClientException"}, + {"shape":"InvalidParameterException"} + ] + }, + "CreateService":{ + "name":"CreateService", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateServiceRequest"}, + "output":{"shape":"CreateServiceResponse"}, + "errors":[ + {"shape":"ServerException"}, + {"shape":"ClientException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ClusterNotFoundException"} + ] + }, + "DeleteCluster":{ + "name":"DeleteCluster", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteClusterRequest"}, + "output":{"shape":"DeleteClusterResponse"}, + "errors":[ + {"shape":"ServerException"}, + {"shape":"ClientException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ClusterNotFoundException"}, + {"shape":"ClusterContainsContainerInstancesException"}, + {"shape":"ClusterContainsServicesException"} + ] + }, + "DeleteService":{ + "name":"DeleteService", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteServiceRequest"}, + "output":{"shape":"DeleteServiceResponse"}, + "errors":[ + {"shape":"ServerException"}, + {"shape":"ClientException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ClusterNotFoundException"}, + {"shape":"ServiceNotFoundException"} + ] + }, + "DeregisterContainerInstance":{ + "name":"DeregisterContainerInstance", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeregisterContainerInstanceRequest"}, + "output":{"shape":"DeregisterContainerInstanceResponse"}, + "errors":[ + {"shape":"ServerException"}, + {"shape":"ClientException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ClusterNotFoundException"} + ] + }, + "DeregisterTaskDefinition":{ + "name":"DeregisterTaskDefinition", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeregisterTaskDefinitionRequest"}, + "output":{"shape":"DeregisterTaskDefinitionResponse"}, + "errors":[ + {"shape":"ServerException"}, + {"shape":"ClientException"}, + {"shape":"InvalidParameterException"} + ] + }, + "DescribeClusters":{ + "name":"DescribeClusters", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeClustersRequest"}, + "output":{"shape":"DescribeClustersResponse"}, + "errors":[ + {"shape":"ServerException"}, + {"shape":"ClientException"}, + {"shape":"InvalidParameterException"} + ] + }, + "DescribeContainerInstances":{ + "name":"DescribeContainerInstances", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeContainerInstancesRequest"}, + "output":{"shape":"DescribeContainerInstancesResponse"}, + "errors":[ + {"shape":"ServerException"}, + {"shape":"ClientException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ClusterNotFoundException"} + ] + }, + "DescribeServices":{ + "name":"DescribeServices", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeServicesRequest"}, + "output":{"shape":"DescribeServicesResponse"}, + "errors":[ + {"shape":"ServerException"}, + {"shape":"ClientException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ClusterNotFoundException"} + ] + }, + "DescribeTaskDefinition":{ + "name":"DescribeTaskDefinition", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeTaskDefinitionRequest"}, + "output":{"shape":"DescribeTaskDefinitionResponse"}, + "errors":[ + {"shape":"ServerException"}, + {"shape":"ClientException"}, + {"shape":"InvalidParameterException"} + ] + }, + "DescribeTasks":{ + "name":"DescribeTasks", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeTasksRequest"}, + "output":{"shape":"DescribeTasksResponse"}, + "errors":[ + {"shape":"ServerException"}, + {"shape":"ClientException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ClusterNotFoundException"} + ] + }, + "DiscoverPollEndpoint":{ + "name":"DiscoverPollEndpoint", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DiscoverPollEndpointRequest"}, + "output":{"shape":"DiscoverPollEndpointResponse"}, + "errors":[ + {"shape":"ServerException"}, + {"shape":"ClientException"} + ] + }, + "ListClusters":{ + "name":"ListClusters", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListClustersRequest"}, + "output":{"shape":"ListClustersResponse"}, + "errors":[ + {"shape":"ServerException"}, + {"shape":"ClientException"}, + {"shape":"InvalidParameterException"} + ] + }, + "ListContainerInstances":{ + "name":"ListContainerInstances", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListContainerInstancesRequest"}, + "output":{"shape":"ListContainerInstancesResponse"}, + "errors":[ + {"shape":"ServerException"}, + {"shape":"ClientException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ClusterNotFoundException"} + ] + }, + "ListServices":{ + "name":"ListServices", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListServicesRequest"}, + "output":{"shape":"ListServicesResponse"}, + "errors":[ + {"shape":"ServerException"}, + {"shape":"ClientException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ClusterNotFoundException"} + ] + }, + "ListTaskDefinitionFamilies":{ + "name":"ListTaskDefinitionFamilies", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListTaskDefinitionFamiliesRequest"}, + "output":{"shape":"ListTaskDefinitionFamiliesResponse"}, + "errors":[ + {"shape":"ServerException"}, + {"shape":"ClientException"}, + {"shape":"InvalidParameterException"} + ] + }, + "ListTaskDefinitions":{ + "name":"ListTaskDefinitions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListTaskDefinitionsRequest"}, + "output":{"shape":"ListTaskDefinitionsResponse"}, + "errors":[ + {"shape":"ServerException"}, + {"shape":"ClientException"}, + {"shape":"InvalidParameterException"} + ] + }, + "ListTasks":{ + "name":"ListTasks", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListTasksRequest"}, + "output":{"shape":"ListTasksResponse"}, + "errors":[ + {"shape":"ServerException"}, + {"shape":"ClientException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ClusterNotFoundException"}, + {"shape":"ServiceNotFoundException"} + ] + }, + "RegisterContainerInstance":{ + "name":"RegisterContainerInstance", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RegisterContainerInstanceRequest"}, + "output":{"shape":"RegisterContainerInstanceResponse"}, + "errors":[ + {"shape":"ServerException"}, + {"shape":"ClientException"} + ] + }, + "RegisterTaskDefinition":{ + "name":"RegisterTaskDefinition", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RegisterTaskDefinitionRequest"}, + "output":{"shape":"RegisterTaskDefinitionResponse"}, + "errors":[ + {"shape":"ServerException"}, + {"shape":"ClientException"}, + {"shape":"InvalidParameterException"} + ] + }, + "RunTask":{ + "name":"RunTask", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RunTaskRequest"}, + "output":{"shape":"RunTaskResponse"}, + "errors":[ + {"shape":"ServerException"}, + {"shape":"ClientException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ClusterNotFoundException"} + ] + }, + "StartTask":{ + "name":"StartTask", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StartTaskRequest"}, + "output":{"shape":"StartTaskResponse"}, + "errors":[ + {"shape":"ServerException"}, + {"shape":"ClientException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ClusterNotFoundException"} + ] + }, + "StopTask":{ + "name":"StopTask", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StopTaskRequest"}, + "output":{"shape":"StopTaskResponse"}, + "errors":[ + {"shape":"ServerException"}, + {"shape":"ClientException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ClusterNotFoundException"} + ] + }, + "SubmitContainerStateChange":{ + "name":"SubmitContainerStateChange", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"SubmitContainerStateChangeRequest"}, + "output":{"shape":"SubmitContainerStateChangeResponse"}, + "errors":[ + {"shape":"ServerException"}, + {"shape":"ClientException"} + ] + }, + "SubmitTaskStateChange":{ + "name":"SubmitTaskStateChange", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"SubmitTaskStateChangeRequest"}, + "output":{"shape":"SubmitTaskStateChangeResponse"}, + "errors":[ + {"shape":"ServerException"}, + {"shape":"ClientException"} + ] + }, + "UpdateContainerAgent":{ + "name":"UpdateContainerAgent", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateContainerAgentRequest"}, + "output":{"shape":"UpdateContainerAgentResponse"}, + "errors":[ + {"shape":"ServerException"}, + {"shape":"ClientException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ClusterNotFoundException"}, + {"shape":"UpdateInProgressException"}, + {"shape":"NoUpdateAvailableException"}, + {"shape":"MissingVersionException"} + ] + }, + "UpdateService":{ + "name":"UpdateService", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateServiceRequest"}, + "output":{"shape":"UpdateServiceResponse"}, + "errors":[ + {"shape":"ServerException"}, + {"shape":"ClientException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ClusterNotFoundException"}, + {"shape":"ServiceNotFoundException"}, + {"shape":"ServiceNotActiveException"} + ] + } + }, + "shapes":{ + "AgentUpdateStatus":{ + "type":"string", + "enum":[ + "PENDING", + "STAGING", + "STAGED", + "UPDATING", + "UPDATED", + "FAILED" + ] + }, + "Attribute":{ + "type":"structure", + "required":["name"], + "members":{ + "name":{"shape":"String"}, + "value":{"shape":"String"} + } + }, + "Attributes":{ + "type":"list", + "member":{"shape":"Attribute"} + }, + "Boolean":{"type":"boolean"}, + "BoxedBoolean":{ + "type":"boolean", + "box":true + }, + "BoxedInteger":{ + "type":"integer", + "box":true + }, + "ClientException":{ + "type":"structure", + "members":{ + "message":{"shape":"String"} + }, + "exception":true + }, + "Cluster":{ + "type":"structure", + "members":{ + "clusterArn":{"shape":"String"}, + "clusterName":{"shape":"String"}, + "status":{"shape":"String"}, + "registeredContainerInstancesCount":{"shape":"Integer"}, + "runningTasksCount":{"shape":"Integer"}, + "pendingTasksCount":{"shape":"Integer"}, + "activeServicesCount":{"shape":"Integer"} + } + }, + "ClusterContainsContainerInstancesException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "ClusterContainsServicesException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "ClusterNotFoundException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "Clusters":{ + "type":"list", + "member":{"shape":"Cluster"} + }, + "Container":{ + "type":"structure", + "members":{ + "containerArn":{"shape":"String"}, + "taskArn":{"shape":"String"}, + "name":{"shape":"String"}, + "lastStatus":{"shape":"String"}, + "exitCode":{"shape":"BoxedInteger"}, + "reason":{"shape":"String"}, + "networkBindings":{"shape":"NetworkBindings"} + } + }, + "ContainerDefinition":{ + "type":"structure", + "members":{ + "name":{"shape":"String"}, + "image":{"shape":"String"}, + "cpu":{"shape":"Integer"}, + "memory":{"shape":"Integer"}, + "links":{"shape":"StringList"}, + "portMappings":{"shape":"PortMappingList"}, + "essential":{"shape":"BoxedBoolean"}, + "entryPoint":{"shape":"StringList"}, + "command":{"shape":"StringList"}, + "environment":{"shape":"EnvironmentVariables"}, + "mountPoints":{"shape":"MountPointList"}, + "volumesFrom":{"shape":"VolumeFromList"}, + "hostname":{"shape":"String"}, + "user":{"shape":"String"}, + "workingDirectory":{"shape":"String"}, + "disableNetworking":{"shape":"BoxedBoolean"}, + "privileged":{"shape":"BoxedBoolean"}, + "readonlyRootFilesystem":{"shape":"BoxedBoolean"}, + "dnsServers":{"shape":"StringList"}, + "dnsSearchDomains":{"shape":"StringList"}, + "extraHosts":{"shape":"HostEntryList"}, + "dockerSecurityOptions":{"shape":"StringList"}, + "dockerLabels":{"shape":"DockerLabelsMap"}, + "ulimits":{"shape":"UlimitList"}, + "logConfiguration":{"shape":"LogConfiguration"} + } + }, + "ContainerDefinitions":{ + "type":"list", + "member":{"shape":"ContainerDefinition"} + }, + "ContainerInstance":{ + "type":"structure", + "members":{ + "containerInstanceArn":{"shape":"String"}, + "ec2InstanceId":{"shape":"String"}, + "versionInfo":{"shape":"VersionInfo"}, + "remainingResources":{"shape":"Resources"}, + "registeredResources":{"shape":"Resources"}, + "status":{"shape":"String"}, + "agentConnected":{"shape":"Boolean"}, + "runningTasksCount":{"shape":"Integer"}, + "pendingTasksCount":{"shape":"Integer"}, + "agentUpdateStatus":{"shape":"AgentUpdateStatus"}, + "attributes":{"shape":"Attributes"} + } + }, + "ContainerInstances":{ + "type":"list", + "member":{"shape":"ContainerInstance"} + }, + "ContainerOverride":{ + "type":"structure", + "members":{ + "name":{"shape":"String"}, + "command":{"shape":"StringList"}, + "environment":{"shape":"EnvironmentVariables"} + } + }, + "ContainerOverrides":{ + "type":"list", + "member":{"shape":"ContainerOverride"} + }, + "Containers":{ + "type":"list", + "member":{"shape":"Container"} + }, + "CreateClusterRequest":{ + "type":"structure", + "members":{ + "clusterName":{"shape":"String"} + } + }, + "CreateClusterResponse":{ + "type":"structure", + "members":{ + "cluster":{"shape":"Cluster"} + } + }, + "CreateServiceRequest":{ + "type":"structure", + "required":[ + "serviceName", + "taskDefinition", + "desiredCount" + ], + "members":{ + "cluster":{"shape":"String"}, + "serviceName":{"shape":"String"}, + "taskDefinition":{"shape":"String"}, + "loadBalancers":{"shape":"LoadBalancers"}, + "desiredCount":{"shape":"BoxedInteger"}, + "clientToken":{"shape":"String"}, + "role":{"shape":"String"}, + "deploymentConfiguration":{"shape":"DeploymentConfiguration"} + } + }, + "CreateServiceResponse":{ + "type":"structure", + "members":{ + "service":{"shape":"Service"} + } + }, + "DeleteClusterRequest":{ + "type":"structure", + "required":["cluster"], + "members":{ + "cluster":{"shape":"String"} + } + }, + "DeleteClusterResponse":{ + "type":"structure", + "members":{ + "cluster":{"shape":"Cluster"} + } + }, + "DeleteServiceRequest":{ + "type":"structure", + "required":["service"], + "members":{ + "cluster":{"shape":"String"}, + "service":{"shape":"String"} + } + }, + "DeleteServiceResponse":{ + "type":"structure", + "members":{ + "service":{"shape":"Service"} + } + }, + "Deployment":{ + "type":"structure", + "members":{ + "id":{"shape":"String"}, + "status":{"shape":"String"}, + "taskDefinition":{"shape":"String"}, + "desiredCount":{"shape":"Integer"}, + "pendingCount":{"shape":"Integer"}, + "runningCount":{"shape":"Integer"}, + "createdAt":{"shape":"Timestamp"}, + "updatedAt":{"shape":"Timestamp"} + } + }, + "DeploymentConfiguration":{ + "type":"structure", + "members":{ + "maximumPercent":{"shape":"BoxedInteger"}, + "minimumHealthyPercent":{"shape":"BoxedInteger"} + } + }, + "Deployments":{ + "type":"list", + "member":{"shape":"Deployment"} + }, + "DeregisterContainerInstanceRequest":{ + "type":"structure", + "required":["containerInstance"], + "members":{ + "cluster":{"shape":"String"}, + "containerInstance":{"shape":"String"}, + "force":{"shape":"BoxedBoolean"} + } + }, + "DeregisterContainerInstanceResponse":{ + "type":"structure", + "members":{ + "containerInstance":{"shape":"ContainerInstance"} + } + }, + "DeregisterTaskDefinitionRequest":{ + "type":"structure", + "required":["taskDefinition"], + "members":{ + "taskDefinition":{"shape":"String"} + } + }, + "DeregisterTaskDefinitionResponse":{ + "type":"structure", + "members":{ + "taskDefinition":{"shape":"TaskDefinition"} + } + }, + "DescribeClustersRequest":{ + "type":"structure", + "members":{ + "clusters":{"shape":"StringList"} + } + }, + "DescribeClustersResponse":{ + "type":"structure", + "members":{ + "clusters":{"shape":"Clusters"}, + "failures":{"shape":"Failures"} + } + }, + "DescribeContainerInstancesRequest":{ + "type":"structure", + "required":["containerInstances"], + "members":{ + "cluster":{"shape":"String"}, + "containerInstances":{"shape":"StringList"} + } + }, + "DescribeContainerInstancesResponse":{ + "type":"structure", + "members":{ + "containerInstances":{"shape":"ContainerInstances"}, + "failures":{"shape":"Failures"} + } + }, + "DescribeServicesRequest":{ + "type":"structure", + "required":["services"], + "members":{ + "cluster":{"shape":"String"}, + "services":{"shape":"StringList"} + } + }, + "DescribeServicesResponse":{ + "type":"structure", + "members":{ + "services":{"shape":"Services"}, + "failures":{"shape":"Failures"} + } + }, + "DescribeTaskDefinitionRequest":{ + "type":"structure", + "required":["taskDefinition"], + "members":{ + "taskDefinition":{"shape":"String"} + } + }, + "DescribeTaskDefinitionResponse":{ + "type":"structure", + "members":{ + "taskDefinition":{"shape":"TaskDefinition"} + } + }, + "DescribeTasksRequest":{ + "type":"structure", + "required":["tasks"], + "members":{ + "cluster":{"shape":"String"}, + "tasks":{"shape":"StringList"} + } + }, + "DescribeTasksResponse":{ + "type":"structure", + "members":{ + "tasks":{"shape":"Tasks"}, + "failures":{"shape":"Failures"} + } + }, + "DesiredStatus":{ + "type":"string", + "enum":[ + "RUNNING", + "PENDING", + "STOPPED" + ] + }, + "DiscoverPollEndpointRequest":{ + "type":"structure", + "members":{ + "containerInstance":{"shape":"String"}, + "cluster":{"shape":"String"} + } + }, + "DiscoverPollEndpointResponse":{ + "type":"structure", + "members":{ + "endpoint":{"shape":"String"}, + "telemetryEndpoint":{"shape":"String"} + } + }, + "DockerLabelsMap":{ + "type":"map", + "key":{"shape":"String"}, + "value":{"shape":"String"} + }, + "Double":{"type":"double"}, + "EnvironmentVariables":{ + "type":"list", + "member":{"shape":"KeyValuePair"} + }, + "Failure":{ + "type":"structure", + "members":{ + "arn":{"shape":"String"}, + "reason":{"shape":"String"} + } + }, + "Failures":{ + "type":"list", + "member":{"shape":"Failure"} + }, + "HostEntry":{ + "type":"structure", + "required":[ + "hostname", + "ipAddress" + ], + "members":{ + "hostname":{"shape":"String"}, + "ipAddress":{"shape":"String"} + } + }, + "HostEntryList":{ + "type":"list", + "member":{"shape":"HostEntry"} + }, + "HostVolumeProperties":{ + "type":"structure", + "members":{ + "sourcePath":{"shape":"String"} + } + }, + "Integer":{"type":"integer"}, + "InvalidParameterException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "KeyValuePair":{ + "type":"structure", + "members":{ + "name":{"shape":"String"}, + "value":{"shape":"String"} + } + }, + "ListClustersRequest":{ + "type":"structure", + "members":{ + "nextToken":{"shape":"String"}, + "maxResults":{"shape":"BoxedInteger"} + } + }, + "ListClustersResponse":{ + "type":"structure", + "members":{ + "clusterArns":{"shape":"StringList"}, + "nextToken":{"shape":"String"} + } + }, + "ListContainerInstancesRequest":{ + "type":"structure", + "members":{ + "cluster":{"shape":"String"}, + "nextToken":{"shape":"String"}, + "maxResults":{"shape":"BoxedInteger"} + } + }, + "ListContainerInstancesResponse":{ + "type":"structure", + "members":{ + "containerInstanceArns":{"shape":"StringList"}, + "nextToken":{"shape":"String"} + } + }, + "ListServicesRequest":{ + "type":"structure", + "members":{ + "cluster":{"shape":"String"}, + "nextToken":{"shape":"String"}, + "maxResults":{"shape":"BoxedInteger"} + } + }, + "ListServicesResponse":{ + "type":"structure", + "members":{ + "serviceArns":{"shape":"StringList"}, + "nextToken":{"shape":"String"} + } + }, + "ListTaskDefinitionFamiliesRequest":{ + "type":"structure", + "members":{ + "familyPrefix":{"shape":"String"}, + "status":{"shape":"TaskDefinitionFamilyStatus"}, + "nextToken":{"shape":"String"}, + "maxResults":{"shape":"BoxedInteger"} + } + }, + "ListTaskDefinitionFamiliesResponse":{ + "type":"structure", + "members":{ + "families":{"shape":"StringList"}, + "nextToken":{"shape":"String"} + } + }, + "ListTaskDefinitionsRequest":{ + "type":"structure", + "members":{ + "familyPrefix":{"shape":"String"}, + "status":{"shape":"TaskDefinitionStatus"}, + "sort":{"shape":"SortOrder"}, + "nextToken":{"shape":"String"}, + "maxResults":{"shape":"BoxedInteger"} + } + }, + "ListTaskDefinitionsResponse":{ + "type":"structure", + "members":{ + "taskDefinitionArns":{"shape":"StringList"}, + "nextToken":{"shape":"String"} + } + }, + "ListTasksRequest":{ + "type":"structure", + "members":{ + "cluster":{"shape":"String"}, + "containerInstance":{"shape":"String"}, + "family":{"shape":"String"}, + "nextToken":{"shape":"String"}, + "maxResults":{"shape":"BoxedInteger"}, + "startedBy":{"shape":"String"}, + "serviceName":{"shape":"String"}, + "desiredStatus":{"shape":"DesiredStatus"} + } + }, + "ListTasksResponse":{ + "type":"structure", + "members":{ + "taskArns":{"shape":"StringList"}, + "nextToken":{"shape":"String"} + } + }, + "LoadBalancer":{ + "type":"structure", + "members":{ + "loadBalancerName":{"shape":"String"}, + "containerName":{"shape":"String"}, + "containerPort":{"shape":"BoxedInteger"} + } + }, + "LoadBalancers":{ + "type":"list", + "member":{"shape":"LoadBalancer"} + }, + "LogConfiguration":{ + "type":"structure", + "required":["logDriver"], + "members":{ + "logDriver":{"shape":"LogDriver"}, + "options":{"shape":"LogConfigurationOptionsMap"} + } + }, + "LogConfigurationOptionsMap":{ + "type":"map", + "key":{"shape":"String"}, + "value":{"shape":"String"} + }, + "LogDriver":{ + "type":"string", + "enum":[ + "json-file", + "syslog", + "journald", + "gelf", + "fluentd", + "awslogs" + ] + }, + "Long":{"type":"long"}, + "MissingVersionException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "MountPoint":{ + "type":"structure", + "members":{ + "sourceVolume":{"shape":"String"}, + "containerPath":{"shape":"String"}, + "readOnly":{"shape":"BoxedBoolean"} + } + }, + "MountPointList":{ + "type":"list", + "member":{"shape":"MountPoint"} + }, + "NetworkBinding":{ + "type":"structure", + "members":{ + "bindIP":{"shape":"String"}, + "containerPort":{"shape":"BoxedInteger"}, + "hostPort":{"shape":"BoxedInteger"}, + "protocol":{"shape":"TransportProtocol"} + } + }, + "NetworkBindings":{ + "type":"list", + "member":{"shape":"NetworkBinding"} + }, + "NoUpdateAvailableException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "PortMapping":{ + "type":"structure", + "members":{ + "containerPort":{"shape":"Integer"}, + "hostPort":{"shape":"Integer"}, + "protocol":{"shape":"TransportProtocol"} + } + }, + "PortMappingList":{ + "type":"list", + "member":{"shape":"PortMapping"} + }, + "RegisterContainerInstanceRequest":{ + "type":"structure", + "members":{ + "cluster":{"shape":"String"}, + "instanceIdentityDocument":{"shape":"String"}, + "instanceIdentityDocumentSignature":{"shape":"String"}, + "totalResources":{"shape":"Resources"}, + "versionInfo":{"shape":"VersionInfo"}, + "containerInstanceArn":{"shape":"String"}, + "attributes":{"shape":"Attributes"} + } + }, + "RegisterContainerInstanceResponse":{ + "type":"structure", + "members":{ + "containerInstance":{"shape":"ContainerInstance"} + } + }, + "RegisterTaskDefinitionRequest":{ + "type":"structure", + "required":[ + "family", + "containerDefinitions" + ], + "members":{ + "family":{"shape":"String"}, + "containerDefinitions":{"shape":"ContainerDefinitions"}, + "volumes":{"shape":"VolumeList"} + } + }, + "RegisterTaskDefinitionResponse":{ + "type":"structure", + "members":{ + "taskDefinition":{"shape":"TaskDefinition"} + } + }, + "RequiresAttributes":{ + "type":"list", + "member":{"shape":"Attribute"} + }, + "Resource":{ + "type":"structure", + "members":{ + "name":{"shape":"String"}, + "type":{"shape":"String"}, + "doubleValue":{"shape":"Double"}, + "longValue":{"shape":"Long"}, + "integerValue":{"shape":"Integer"}, + "stringSetValue":{"shape":"StringList"} + } + }, + "Resources":{ + "type":"list", + "member":{"shape":"Resource"} + }, + "RunTaskRequest":{ + "type":"structure", + "required":["taskDefinition"], + "members":{ + "cluster":{"shape":"String"}, + "taskDefinition":{"shape":"String"}, + "overrides":{"shape":"TaskOverride"}, + "count":{"shape":"BoxedInteger"}, + "startedBy":{"shape":"String"} + } + }, + "RunTaskResponse":{ + "type":"structure", + "members":{ + "tasks":{"shape":"Tasks"}, + "failures":{"shape":"Failures"} + } + }, + "ServerException":{ + "type":"structure", + "members":{ + "message":{"shape":"String"} + }, + "exception":true, + "fault":true + }, + "Service":{ + "type":"structure", + "members":{ + "serviceArn":{"shape":"String"}, + "serviceName":{"shape":"String"}, + "clusterArn":{"shape":"String"}, + "loadBalancers":{"shape":"LoadBalancers"}, + "status":{"shape":"String"}, + "desiredCount":{"shape":"Integer"}, + "runningCount":{"shape":"Integer"}, + "pendingCount":{"shape":"Integer"}, + "taskDefinition":{"shape":"String"}, + "deploymentConfiguration":{"shape":"DeploymentConfiguration"}, + "deployments":{"shape":"Deployments"}, + "roleArn":{"shape":"String"}, + "events":{"shape":"ServiceEvents"}, + "createdAt":{"shape":"Timestamp"} + } + }, + "ServiceEvent":{ + "type":"structure", + "members":{ + "id":{"shape":"String"}, + "createdAt":{"shape":"Timestamp"}, + "message":{"shape":"String"} + } + }, + "ServiceEvents":{ + "type":"list", + "member":{"shape":"ServiceEvent"} + }, + "ServiceNotActiveException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "ServiceNotFoundException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "Services":{ + "type":"list", + "member":{"shape":"Service"} + }, + "SortOrder":{ + "type":"string", + "enum":[ + "ASC", + "DESC" + ] + }, + "StartTaskRequest":{ + "type":"structure", + "required":[ + "taskDefinition", + "containerInstances" + ], + "members":{ + "cluster":{"shape":"String"}, + "taskDefinition":{"shape":"String"}, + "overrides":{"shape":"TaskOverride"}, + "containerInstances":{"shape":"StringList"}, + "startedBy":{"shape":"String"} + } + }, + "StartTaskResponse":{ + "type":"structure", + "members":{ + "tasks":{"shape":"Tasks"}, + "failures":{"shape":"Failures"} + } + }, + "StopTaskRequest":{ + "type":"structure", + "required":["task"], + "members":{ + "cluster":{"shape":"String"}, + "task":{"shape":"String"}, + "reason":{"shape":"String"} + } + }, + "StopTaskResponse":{ + "type":"structure", + "members":{ + "task":{"shape":"Task"} + } + }, + "String":{"type":"string"}, + "StringList":{ + "type":"list", + "member":{"shape":"String"} + }, + "SubmitContainerStateChangeRequest":{ + "type":"structure", + "members":{ + "cluster":{"shape":"String"}, + "task":{"shape":"String"}, + "containerName":{"shape":"String"}, + "status":{"shape":"String"}, + "exitCode":{"shape":"BoxedInteger"}, + "reason":{"shape":"String"}, + "networkBindings":{"shape":"NetworkBindings"} + } + }, + "SubmitContainerStateChangeResponse":{ + "type":"structure", + "members":{ + "acknowledgment":{"shape":"String"} + } + }, + "SubmitTaskStateChangeRequest":{ + "type":"structure", + "members":{ + "cluster":{"shape":"String"}, + "task":{"shape":"String"}, + "status":{"shape":"String"}, + "reason":{"shape":"String"} + } + }, + "SubmitTaskStateChangeResponse":{ + "type":"structure", + "members":{ + "acknowledgment":{"shape":"String"} + } + }, + "Task":{ + "type":"structure", + "members":{ + "taskArn":{"shape":"String"}, + "clusterArn":{"shape":"String"}, + "taskDefinitionArn":{"shape":"String"}, + "containerInstanceArn":{"shape":"String"}, + "overrides":{"shape":"TaskOverride"}, + "lastStatus":{"shape":"String"}, + "desiredStatus":{"shape":"String"}, + "containers":{"shape":"Containers"}, + "startedBy":{"shape":"String"}, + "stoppedReason":{"shape":"String"}, + "createdAt":{"shape":"Timestamp"}, + "startedAt":{"shape":"Timestamp"}, + "stoppedAt":{"shape":"Timestamp"} + } + }, + "TaskDefinition":{ + "type":"structure", + "members":{ + "taskDefinitionArn":{"shape":"String"}, + "containerDefinitions":{"shape":"ContainerDefinitions"}, + "family":{"shape":"String"}, + "revision":{"shape":"Integer"}, + "volumes":{"shape":"VolumeList"}, + "status":{"shape":"TaskDefinitionStatus"}, + "requiresAttributes":{"shape":"RequiresAttributes"} + } + }, + "TaskDefinitionFamilyStatus":{ + "type":"string", + "enum":[ + "ACTIVE", + "INACTIVE", + "ALL" + ] + }, + "TaskDefinitionStatus":{ + "type":"string", + "enum":[ + "ACTIVE", + "INACTIVE" + ] + }, + "TaskOverride":{ + "type":"structure", + "members":{ + "containerOverrides":{"shape":"ContainerOverrides"} + } + }, + "Tasks":{ + "type":"list", + "member":{"shape":"Task"} + }, + "Timestamp":{"type":"timestamp"}, + "TransportProtocol":{ + "type":"string", + "enum":[ + "tcp", + "udp" + ] + }, + "Ulimit":{ + "type":"structure", + "required":[ + "name", + "softLimit", + "hardLimit" + ], + "members":{ + "name":{"shape":"UlimitName"}, + "softLimit":{"shape":"Integer"}, + "hardLimit":{"shape":"Integer"} + } + }, + "UlimitList":{ + "type":"list", + "member":{"shape":"Ulimit"} + }, + "UlimitName":{ + "type":"string", + "enum":[ + "core", + "cpu", + "data", + "fsize", + "locks", + "memlock", + "msgqueue", + "nice", + "nofile", + "nproc", + "rss", + "rtprio", + "rttime", + "sigpending", + "stack" + ] + }, + "UpdateContainerAgentRequest":{ + "type":"structure", + "required":["containerInstance"], + "members":{ + "cluster":{"shape":"String"}, + "containerInstance":{"shape":"String"} + } + }, + "UpdateContainerAgentResponse":{ + "type":"structure", + "members":{ + "containerInstance":{"shape":"ContainerInstance"} + } + }, + "UpdateInProgressException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "UpdateServiceRequest":{ + "type":"structure", + "required":["service"], + "members":{ + "cluster":{"shape":"String"}, + "service":{"shape":"String"}, + "desiredCount":{"shape":"BoxedInteger"}, + "taskDefinition":{"shape":"String"}, + "deploymentConfiguration":{"shape":"DeploymentConfiguration"} + } + }, + "UpdateServiceResponse":{ + "type":"structure", + "members":{ + "service":{"shape":"Service"} + } + }, + "VersionInfo":{ + "type":"structure", + "members":{ + "agentVersion":{"shape":"String"}, + "agentHash":{"shape":"String"}, + "dockerVersion":{"shape":"String"} + } + }, + "Volume":{ + "type":"structure", + "members":{ + "name":{"shape":"String"}, + "host":{"shape":"HostVolumeProperties"} + } + }, + "VolumeFrom":{ + "type":"structure", + "members":{ + "sourceContainer":{"shape":"String"}, + "readOnly":{"shape":"BoxedBoolean"} + } + }, + "VolumeFromList":{ + "type":"list", + "member":{"shape":"VolumeFrom"} + }, + "VolumeList":{ + "type":"list", + "member":{"shape":"Volume"} + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/ecs/2014-11-13/docs-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/ecs/2014-11-13/docs-2.json new file mode 100644 index 000000000..09e9c6c52 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/ecs/2014-11-13/docs-2.json @@ -0,0 +1,1002 @@ +{ + "version": "2.0", + "service": "

    Amazon EC2 Container Service (Amazon ECS) is a highly scalable, fast, container management service that makes it easy to run, stop, and manage Docker containers on a cluster of EC2 instances. Amazon ECS lets you launch and stop container-enabled applications with simple API calls, allows you to get the state of your cluster from a centralized service, and gives you access to many familiar Amazon EC2 features like security groups, Amazon EBS volumes, and IAM roles.

    You can use Amazon ECS to schedule the placement of containers across your cluster based on your resource needs, isolation policies, and availability requirements. Amazon EC2 Container Service eliminates the need for you to operate your own cluster management and configuration management systems or worry about scaling your management infrastructure.

    ", + "operations": { + "CreateCluster": "

    Creates a new Amazon ECS cluster. By default, your account receives a default cluster when you launch your first container instance. However, you can create your own cluster with a unique name with the CreateCluster action.

    ", + "CreateService": "

    Runs and maintains a desired number of tasks from a specified task definition. If the number of tasks running in a service drops below desiredCount, Amazon ECS spawns another instantiation of the task in the specified cluster. To update an existing service, see UpdateService.

    In addition to maintaining the desired count of tasks in your service, you can optionally run your service behind a load balancer. The load balancer distributes traffic across the tasks that are associated with the service.

    You can optionally specify a deployment configuration for your service. During a deployment (which is triggered by changing the task definition of a service with an UpdateService operation), the service scheduler uses the minimumHealthyPercent and maximumPercent parameters to determine the deployment strategy.

    If the minimumHealthyPercent is below 100%, the scheduler can ignore the desiredCount temporarily during a deployment. For example, if your service has a desiredCount of four tasks, a minimumHealthyPercent of 50% allows the scheduler to stop two existing tasks before starting two new tasks. Tasks for services that do not use a load balancer are considered healthy if they are in the RUNNING state; tasks for services that do use a load balancer are considered healthy if they are in the RUNNING state and the container instance it is hosted on is reported as healthy by the load balancer. The default value for minimumHealthyPercent is 50% in the console and 100% for the AWS CLI, the AWS SDKs, and the APIs.

    The maximumPercent parameter represents an upper limit on the number of running tasks during a deployment, which enables you to define the deployment batch size. For example, if your service has a desiredCount of four tasks, a maximumPercent value of 200% starts four new tasks before stopping the four older tasks (provided that the cluster resources required to do this are available). The default value for maximumPercent is 200%.

    When the service scheduler launches new tasks, it attempts to balance them across the Availability Zones in your cluster with the following logic:

    • Determine which of the container instances in your cluster can support your service's task definition (for example, they have the required CPU, memory, ports, and container instance attributes).

    • Sort the valid container instances by the fewest number of running tasks for this service in the same Availability Zone as the instance. For example, if zone A has one running service task and zones B and C each have zero, valid container instances in either zone B or C are considered optimal for placement.

    • Place the new service task on a valid container instance in an optimal Availability Zone (based on the previous steps), favoring container instances with the fewest number of running tasks for this service.

    ", + "DeleteCluster": "

    Deletes the specified cluster. You must deregister all container instances from this cluster before you may delete it. You can list the container instances in a cluster with ListContainerInstances and deregister them with DeregisterContainerInstance.

    ", + "DeleteService": "

    Deletes a specified service within a cluster. You can delete a service if you have no running tasks in it and the desired task count is zero. If the service is actively maintaining tasks, you cannot delete it, and you must update the service to a desired task count of zero. For more information, see UpdateService.

    When you delete a service, if there are still running tasks that require cleanup, the service status moves from ACTIVE to DRAINING, and the service is no longer visible in the console or in ListServices API operations. After the tasks have stopped, then the service status moves from DRAINING to INACTIVE. Services in the DRAINING or INACTIVE status can still be viewed with DescribeServices API operations; however, in the future, INACTIVE services may be cleaned up and purged from Amazon ECS record keeping, and DescribeServices API operations on those services will return a ServiceNotFoundException error.

    ", + "DeregisterContainerInstance": "

    Deregisters an Amazon ECS container instance from the specified cluster. This instance is no longer available to run tasks.

    If you intend to use the container instance for some other purpose after deregistration, you should stop all of the tasks running on the container instance before deregistration to avoid any orphaned tasks from consuming resources.

    Deregistering a container instance removes the instance from a cluster, but it does not terminate the EC2 instance; if you are finished using the instance, be sure to terminate it in the Amazon EC2 console to stop billing.

    If you terminate a running container instance with a connected Amazon ECS container agent, the agent automatically deregisters the instance from your cluster (stopped container instances or instances with disconnected agents are not automatically deregistered when terminated).

    ", + "DeregisterTaskDefinition": "

    Deregisters the specified task definition by family and revision. Upon deregistration, the task definition is marked as INACTIVE. Existing tasks and services that reference an INACTIVE task definition continue to run without disruption. Existing services that reference an INACTIVE task definition can still scale up or down by modifying the service's desired count.

    You cannot use an INACTIVE task definition to run new tasks or create new services, and you cannot update an existing service to reference an INACTIVE task definition (although there may be up to a 10 minute window following deregistration where these restrictions have not yet taken effect).

    ", + "DescribeClusters": "

    Describes one or more of your clusters.

    ", + "DescribeContainerInstances": "

    Describes Amazon EC2 Container Service container instances. Returns metadata about registered and remaining resources on each container instance requested.

    ", + "DescribeServices": "

    Describes the specified services running in your cluster.

    ", + "DescribeTaskDefinition": "

    Describes a task definition. You can specify a family and revision to find information about a specific task definition, or you can simply specify the family to find the latest ACTIVE revision in that family.

    You can only describe INACTIVE task definitions while an active task or service references them.

    ", + "DescribeTasks": "

    Describes a specified task or tasks.

    ", + "DiscoverPollEndpoint": "

    This action is only used by the Amazon EC2 Container Service agent, and it is not intended for use outside of the agent.

    Returns an endpoint for the Amazon EC2 Container Service agent to poll for updates.

    ", + "ListClusters": "

    Returns a list of existing clusters.

    ", + "ListContainerInstances": "

    Returns a list of container instances in a specified cluster.

    ", + "ListServices": "

    Lists the services that are running in a specified cluster.

    ", + "ListTaskDefinitionFamilies": "

    Returns a list of task definition families that are registered to your account (which may include task definition families that no longer have any ACTIVE task definition revisions).

    You can filter out task definition families that do not contain any ACTIVE task definition revisions by setting the status parameter to ACTIVE. You can also filter the results with the familyPrefix parameter.

    ", + "ListTaskDefinitions": "

    Returns a list of task definitions that are registered to your account. You can filter the results by family name with the familyPrefix parameter or by status with the status parameter.

    ", + "ListTasks": "

    Returns a list of tasks for a specified cluster. You can filter the results by family name, by a particular container instance, or by the desired status of the task with the family, containerInstance, and desiredStatus parameters.

    ", + "RegisterContainerInstance": "

    This action is only used by the Amazon EC2 Container Service agent, and it is not intended for use outside of the agent.

    Registers an EC2 instance into the specified cluster. This instance becomes available to place containers on.

    ", + "RegisterTaskDefinition": "

    Registers a new task definition from the supplied family and containerDefinitions. Optionally, you can add data volumes to your containers with the volumes parameter. For more information about task definition parameters and defaults, see Amazon ECS Task Definitions in the Amazon EC2 Container Service Developer Guide.

    ", + "RunTask": "

    Start a task using random placement and the default Amazon ECS scheduler. To use your own scheduler or place a task on a specific container instance, use StartTask instead.

    The count parameter is limited to 10 tasks per call.

    ", + "StartTask": "

    Starts a new task from the specified task definition on the specified container instance or instances. To use the default Amazon ECS scheduler to place your task, use RunTask instead.

    The list of container instances to start tasks on is limited to 10.

    ", + "StopTask": "

    Stops a running task.

    When StopTask is called on a task, the equivalent of docker stop is issued to the containers running in the task. This results in a SIGTERM and a 30-second timeout, after which SIGKILL is sent and the containers are forcibly stopped. If the container handles the SIGTERM gracefully and exits within 30 seconds from receiving it, no SIGKILL is sent.

    ", + "SubmitContainerStateChange": "

    This action is only used by the Amazon EC2 Container Service agent, and it is not intended for use outside of the agent.

    Sent to acknowledge that a container changed states.

    ", + "SubmitTaskStateChange": "

    This action is only used by the Amazon EC2 Container Service agent, and it is not intended for use outside of the agent.

    Sent to acknowledge that a task changed states.

    ", + "UpdateContainerAgent": "

    Updates the Amazon ECS container agent on a specified container instance. Updating the Amazon ECS container agent does not interrupt running tasks or services on the container instance. The process for updating the agent differs depending on whether your container instance was launched with the Amazon ECS-optimized AMI or another operating system.

    UpdateContainerAgent requires the Amazon ECS-optimized AMI or Amazon Linux with the ecs-init service installed and running. For help updating the Amazon ECS container agent on other operating systems, see Manually Updating the Amazon ECS Container Agent in the Amazon EC2 Container Service Developer Guide.

    ", + "UpdateService": "

    Modifies the desired count, deployment configuration, or task definition used in a service.

    You can add to or subtract from the number of instantiations of a task definition in a service by specifying the cluster that the service is running in and a new desiredCount parameter.

    You can use UpdateService to modify your task definition and deploy a new version of your service.

    You can also update the deployment configuration of a service. When a deployment is triggered by updating the task definition of a service, the service scheduler uses the deployment configuration parameters, minimumHealthyPercent and maximumPercent, to determine the deployment strategy.

    If the minimumHealthyPercent is below 100%, the scheduler can ignore the desiredCount temporarily during a deployment. For example, if your service has a desiredCount of four tasks, a minimumHealthyPercent of 50% allows the scheduler to stop two existing tasks before starting two new tasks. Tasks for services that do not use a load balancer are considered healthy if they are in the RUNNING state; tasks for services that do use a load balancer are considered healthy if they are in the RUNNING state and the container instance it is hosted on is reported as healthy by the load balancer.

    The maximumPercent parameter represents an upper limit on the number of running tasks during a deployment, which enables you to define the deployment batch size. For example, if your service has a desiredCount of four tasks, a maximumPercent value of 200% starts four new tasks before stopping the four older tasks (provided that the cluster resources required to do this are available).

    When UpdateService stops a task during a deployment, the equivalent of docker stop is issued to the containers running in the task. This results in a SIGTERM and a 30-second timeout, after which SIGKILL is sent and the containers are forcibly stopped. If the container handles the SIGTERM gracefully and exits within 30 seconds from receiving it, no SIGKILL is sent.

    When the service scheduler launches new tasks, it attempts to balance them across the Availability Zones in your cluster with the following logic:

    • Determine which of the container instances in your cluster can support your service's task definition (for example, they have the required CPU, memory, ports, and container instance attributes).

    • Sort the valid container instances by the fewest number of running tasks for this service in the same Availability Zone as the instance. For example, if zone A has one running service task and zones B and C each have zero, valid container instances in either zone B or C are considered optimal for placement.

    • Place the new service task on a valid container instance in an optimal Availability Zone (based on the previous steps), favoring container instances with the fewest number of running tasks for this service.

    " + }, + "shapes": { + "AgentUpdateStatus": { + "base": null, + "refs": { + "ContainerInstance$agentUpdateStatus": "

    The status of the most recent agent update. If an update has never been requested, this value is NULL.

    " + } + }, + "Attribute": { + "base": "

    The attributes applicable to a container instance when it is registered.

    ", + "refs": { + "Attributes$member": null, + "RequiresAttributes$member": null + } + }, + "Attributes": { + "base": null, + "refs": { + "ContainerInstance$attributes": "

    The attributes set for the container instance by the Amazon ECS container agent at instance registration.

    ", + "RegisterContainerInstanceRequest$attributes": "

    The container instance attributes that this container instance supports.

    " + } + }, + "Boolean": { + "base": null, + "refs": { + "ContainerInstance$agentConnected": "

    This parameter returns true if the agent is actually connected to Amazon ECS. Registered instances with an agent that may be unhealthy or stopped return false, and instances without a connected agent cannot accept placement requests.

    " + } + }, + "BoxedBoolean": { + "base": null, + "refs": { + "ContainerDefinition$essential": "

    If the essential parameter of a container is marked as true, and that container fails or stops for any reason, all other containers that are part of the task are stopped. If the essential parameter of a container is marked as false, then its failure does not affect the rest of the containers in a task. If this parameter is omitted, a container is assumed to be essential.

    All tasks must have at least one essential container. If you have an application that is composed of multiple containers, you should group containers that are used for a common purpose into components, and separate the different components into multiple task definitions. For more information, see Application Architecture in the Amazon EC2 Container Service Developer Guide.

    ", + "ContainerDefinition$disableNetworking": "

    When this parameter is true, networking is disabled within the container. This parameter maps to NetworkDisabled in the Create a container section of the Docker Remote API.

    ", + "ContainerDefinition$privileged": "

    When this parameter is true, the container is given elevated privileges on the host container instance (similar to the root user). This parameter maps to Privileged in the Create a container section of the Docker Remote API and the --privileged option to docker run.

    ", + "ContainerDefinition$readonlyRootFilesystem": "

    When this parameter is true, the container is given read-only access to its root file system. This parameter maps to ReadonlyRootfs in the Create a container section of the Docker Remote API and the --read-only option to docker run.

    ", + "DeregisterContainerInstanceRequest$force": "

    Forces the deregistration of the container instance. If you have tasks running on the container instance when you deregister it with the force option, these tasks remain running and they continue to pass Elastic Load Balancing load balancer health checks until you terminate the instance or the tasks stop through some other means, but they are orphaned (no longer monitored or accounted for by Amazon ECS). If an orphaned task on your container instance is part of an Amazon ECS service, then the service scheduler starts another copy of that task, on a different container instance if possible.

    ", + "MountPoint$readOnly": "

    If this value is true, the container has read-only access to the volume. If this value is false, then the container can write to the volume. The default value is false.

    ", + "VolumeFrom$readOnly": "

    If this value is true, the container has read-only access to the volume. If this value is false, then the container can write to the volume. The default value is false.

    " + } + }, + "BoxedInteger": { + "base": null, + "refs": { + "Container$exitCode": "

    The exit code returned from the container.

    ", + "CreateServiceRequest$desiredCount": "

    The number of instantiations of the specified task definition to place and keep running on your cluster.

    ", + "DeploymentConfiguration$maximumPercent": "

    The upper limit (as a percentage of the service's desiredCount) of the number of running tasks that can be running in a service during a deployment. The maximum number of tasks during a deployment is the desiredCount multiplied by the maximumPercent/100, rounded down to the nearest integer value.

    ", + "DeploymentConfiguration$minimumHealthyPercent": "

    The lower limit (as a percentage of the service's desiredCount) of the number of running tasks that must remain running and healthy in a service during a deployment. The minimum healthy tasks during a deployment is the desiredCount multiplied by the minimumHealthyPercent/100, rounded up to the nearest integer value.

    ", + "ListClustersRequest$maxResults": "

    The maximum number of cluster results returned by ListClusters in paginated output. When this parameter is used, ListClusters only returns maxResults results in a single page along with a nextToken response element. The remaining results of the initial request can be seen by sending another ListClusters request with the returned nextToken value. This value can be between 1 and 100. If this parameter is not used, then ListClusters returns up to 100 results and a nextToken value if applicable.

    ", + "ListContainerInstancesRequest$maxResults": "

    The maximum number of container instance results returned by ListContainerInstances in paginated output. When this parameter is used, ListContainerInstances only returns maxResults results in a single page along with a nextToken response element. The remaining results of the initial request can be seen by sending another ListContainerInstances request with the returned nextToken value. This value can be between 1 and 100. If this parameter is not used, then ListContainerInstances returns up to 100 results and a nextToken value if applicable.

    ", + "ListServicesRequest$maxResults": "

    The maximum number of container instance results returned by ListServices in paginated output. When this parameter is used, ListServices only returns maxResults results in a single page along with a nextToken response element. The remaining results of the initial request can be seen by sending another ListServices request with the returned nextToken value. This value can be between 1 and 10. If this parameter is not used, then ListServices returns up to 10 results and a nextToken value if applicable.

    ", + "ListTaskDefinitionFamiliesRequest$maxResults": "

    The maximum number of task definition family results returned by ListTaskDefinitionFamilies in paginated output. When this parameter is used, ListTaskDefinitions only returns maxResults results in a single page along with a nextToken response element. The remaining results of the initial request can be seen by sending another ListTaskDefinitionFamilies request with the returned nextToken value. This value can be between 1 and 100. If this parameter is not used, then ListTaskDefinitionFamilies returns up to 100 results and a nextToken value if applicable.

    ", + "ListTaskDefinitionsRequest$maxResults": "

    The maximum number of task definition results returned by ListTaskDefinitions in paginated output. When this parameter is used, ListTaskDefinitions only returns maxResults results in a single page along with a nextToken response element. The remaining results of the initial request can be seen by sending another ListTaskDefinitions request with the returned nextToken value. This value can be between 1 and 100. If this parameter is not used, then ListTaskDefinitions returns up to 100 results and a nextToken value if applicable.

    ", + "ListTasksRequest$maxResults": "

    The maximum number of task results returned by ListTasks in paginated output. When this parameter is used, ListTasks only returns maxResults results in a single page along with a nextToken response element. The remaining results of the initial request can be seen by sending another ListTasks request with the returned nextToken value. This value can be between 1 and 100. If this parameter is not used, then ListTasks returns up to 100 results and a nextToken value if applicable.

    ", + "LoadBalancer$containerPort": "

    The port on the container to associate with the load balancer. This port must correspond to a containerPort in the service's task definition. Your container instances must allow ingress traffic on the hostPort of the port mapping.

    ", + "NetworkBinding$containerPort": "

    The port number on the container that is be used with the network binding.

    ", + "NetworkBinding$hostPort": "

    The port number on the host that is used with the network binding.

    ", + "RunTaskRequest$count": "

    The number of instantiations of the specified task to place on your cluster.

    The count parameter is limited to 10 tasks per call.

    ", + "SubmitContainerStateChangeRequest$exitCode": "

    The exit code returned for the state change request.

    ", + "UpdateServiceRequest$desiredCount": "

    The number of instantiations of the task to place and keep running in your service.

    " + } + }, + "ClientException": { + "base": "

    These errors are usually caused by a client action, such as using an action or resource on behalf of a user that doesn't have permission to use the action or resource, or specifying an identifier that is not valid.

    ", + "refs": { + } + }, + "Cluster": { + "base": "

    A regional grouping of one or more container instances on which you can run task requests. Each account receives a default cluster the first time you use the Amazon ECS service, but you may also create other clusters. Clusters may contain more than one instance type simultaneously.

    ", + "refs": { + "Clusters$member": null, + "CreateClusterResponse$cluster": "

    The full description of your new cluster.

    ", + "DeleteClusterResponse$cluster": "

    The full description of the deleted cluster.

    " + } + }, + "ClusterContainsContainerInstancesException": { + "base": "

    You cannot delete a cluster that has registered container instances. You must first deregister the container instances before you can delete the cluster. For more information, see DeregisterContainerInstance.

    ", + "refs": { + } + }, + "ClusterContainsServicesException": { + "base": "

    You cannot delete a cluster that contains services. You must first update the service to reduce its desired task count to 0 and then delete the service. For more information, see UpdateService and DeleteService.

    ", + "refs": { + } + }, + "ClusterNotFoundException": { + "base": "

    The specified cluster could not be found. You can view your available clusters with ListClusters. Amazon ECS clusters are region-specific.

    ", + "refs": { + } + }, + "Clusters": { + "base": null, + "refs": { + "DescribeClustersResponse$clusters": "

    The list of clusters.

    " + } + }, + "Container": { + "base": "

    A Docker container that is part of a task.

    ", + "refs": { + "Containers$member": null + } + }, + "ContainerDefinition": { + "base": "

    Container definitions are used in task definitions to describe the different containers that are launched as part of a task.

    ", + "refs": { + "ContainerDefinitions$member": null + } + }, + "ContainerDefinitions": { + "base": null, + "refs": { + "RegisterTaskDefinitionRequest$containerDefinitions": "

    A list of container definitions in JSON format that describe the different containers that make up your task.

    ", + "TaskDefinition$containerDefinitions": "

    A list of container definitions in JSON format that describe the different containers that make up your task. For more information about container definition parameters and defaults, see Amazon ECS Task Definitions in the Amazon EC2 Container Service Developer Guide.

    " + } + }, + "ContainerInstance": { + "base": "

    An EC2 instance that is running the Amazon ECS agent and has been registered with a cluster.

    ", + "refs": { + "ContainerInstances$member": null, + "DeregisterContainerInstanceResponse$containerInstance": null, + "RegisterContainerInstanceResponse$containerInstance": null, + "UpdateContainerAgentResponse$containerInstance": null + } + }, + "ContainerInstances": { + "base": null, + "refs": { + "DescribeContainerInstancesResponse$containerInstances": "

    The list of container instances.

    " + } + }, + "ContainerOverride": { + "base": "

    The overrides that should be sent to a container.

    ", + "refs": { + "ContainerOverrides$member": null + } + }, + "ContainerOverrides": { + "base": null, + "refs": { + "TaskOverride$containerOverrides": "

    One or more container overrides sent to a task.

    " + } + }, + "Containers": { + "base": null, + "refs": { + "Task$containers": "

    The containers associated with the task.

    " + } + }, + "CreateClusterRequest": { + "base": null, + "refs": { + } + }, + "CreateClusterResponse": { + "base": null, + "refs": { + } + }, + "CreateServiceRequest": { + "base": null, + "refs": { + } + }, + "CreateServiceResponse": { + "base": null, + "refs": { + } + }, + "DeleteClusterRequest": { + "base": null, + "refs": { + } + }, + "DeleteClusterResponse": { + "base": null, + "refs": { + } + }, + "DeleteServiceRequest": { + "base": null, + "refs": { + } + }, + "DeleteServiceResponse": { + "base": null, + "refs": { + } + }, + "Deployment": { + "base": "

    The details of an Amazon ECS service deployment.

    ", + "refs": { + "Deployments$member": null + } + }, + "DeploymentConfiguration": { + "base": "

    Optional deployment parameters that control how many tasks run during the deployment and the ordering of stopping and starting tasks.

    ", + "refs": { + "CreateServiceRequest$deploymentConfiguration": "

    Optional deployment parameters that control how many tasks run during the deployment and the ordering of stopping and starting tasks.

    ", + "Service$deploymentConfiguration": "

    Optional deployment parameters that control how many tasks run during the deployment and the ordering of stopping and starting tasks.

    ", + "UpdateServiceRequest$deploymentConfiguration": "

    Optional deployment parameters that control how many tasks run during the deployment and the ordering of stopping and starting tasks.

    " + } + }, + "Deployments": { + "base": null, + "refs": { + "Service$deployments": "

    The current state of deployments for the service.

    " + } + }, + "DeregisterContainerInstanceRequest": { + "base": null, + "refs": { + } + }, + "DeregisterContainerInstanceResponse": { + "base": null, + "refs": { + } + }, + "DeregisterTaskDefinitionRequest": { + "base": null, + "refs": { + } + }, + "DeregisterTaskDefinitionResponse": { + "base": null, + "refs": { + } + }, + "DescribeClustersRequest": { + "base": null, + "refs": { + } + }, + "DescribeClustersResponse": { + "base": null, + "refs": { + } + }, + "DescribeContainerInstancesRequest": { + "base": null, + "refs": { + } + }, + "DescribeContainerInstancesResponse": { + "base": null, + "refs": { + } + }, + "DescribeServicesRequest": { + "base": null, + "refs": { + } + }, + "DescribeServicesResponse": { + "base": null, + "refs": { + } + }, + "DescribeTaskDefinitionRequest": { + "base": null, + "refs": { + } + }, + "DescribeTaskDefinitionResponse": { + "base": null, + "refs": { + } + }, + "DescribeTasksRequest": { + "base": null, + "refs": { + } + }, + "DescribeTasksResponse": { + "base": null, + "refs": { + } + }, + "DesiredStatus": { + "base": null, + "refs": { + "ListTasksRequest$desiredStatus": "

    The task status with which to filter the ListTasks results. Specifying a desiredStatus of STOPPED limits the results to tasks that are in the STOPPED status, which can be useful for debugging tasks that are not starting properly or have died or finished. The default status filter is RUNNING.

    " + } + }, + "DiscoverPollEndpointRequest": { + "base": null, + "refs": { + } + }, + "DiscoverPollEndpointResponse": { + "base": null, + "refs": { + } + }, + "DockerLabelsMap": { + "base": null, + "refs": { + "ContainerDefinition$dockerLabels": "

    A key/value map of labels to add to the container. This parameter maps to Labels in the Create a container section of the Docker Remote API and the --label option to docker run. This parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log into your container instance and run the following command: sudo docker version | grep \"Server API version\"

    " + } + }, + "Double": { + "base": null, + "refs": { + "Resource$doubleValue": "

    When the doubleValue type is set, the value of the resource must be a double precision floating-point type.

    " + } + }, + "EnvironmentVariables": { + "base": null, + "refs": { + "ContainerDefinition$environment": "

    The environment variables to pass to a container. This parameter maps to Env in the Create a container section of the Docker Remote API and the --env option to docker run.

    We do not recommend using plain text environment variables for sensitive information, such as credential data.

    ", + "ContainerOverride$environment": "

    The environment variables to send to the container. You can add new environment variables, which are added to the container at launch, or you can override the existing environment variables from the Docker image or the task definition.

    " + } + }, + "Failure": { + "base": "

    A failed resource.

    ", + "refs": { + "Failures$member": null + } + }, + "Failures": { + "base": null, + "refs": { + "DescribeClustersResponse$failures": "

    Any failures associated with the call.

    ", + "DescribeContainerInstancesResponse$failures": "

    Any failures associated with the call.

    ", + "DescribeServicesResponse$failures": "

    Any failures associated with the call.

    ", + "DescribeTasksResponse$failures": "

    Any failures associated with the call.

    ", + "RunTaskResponse$failures": "

    Any failures associated with the call.

    ", + "StartTaskResponse$failures": "

    Any failures associated with the call.

    " + } + }, + "HostEntry": { + "base": "

    Hostnames and IP address entries that are added to the /etc/hosts file of a container via the extraHosts parameter of its ContainerDefinition.

    ", + "refs": { + "HostEntryList$member": null + } + }, + "HostEntryList": { + "base": null, + "refs": { + "ContainerDefinition$extraHosts": "

    A list of hostnames and IP address mappings to append to the /etc/hosts file on the container. This parameter maps to ExtraHosts in the Create a container section of the Docker Remote API and the --add-host option to docker run.

    " + } + }, + "HostVolumeProperties": { + "base": "

    Details on a container instance host volume.

    ", + "refs": { + "Volume$host": "

    The contents of the host parameter determine whether your data volume persists on the host container instance and where it is stored. If the host parameter is empty, then the Docker daemon assigns a host path for your data volume, but the data is not guaranteed to persist after the containers associated with it stop running.

    " + } + }, + "Integer": { + "base": null, + "refs": { + "Cluster$registeredContainerInstancesCount": "

    The number of container instances registered into the cluster.

    ", + "Cluster$runningTasksCount": "

    The number of tasks in the cluster that are in the RUNNING state.

    ", + "Cluster$pendingTasksCount": "

    The number of tasks in the cluster that are in the PENDING state.

    ", + "Cluster$activeServicesCount": "

    The number of services that are running on the cluster in an ACTIVE state. You can view these services with ListServices.

    ", + "ContainerDefinition$cpu": "

    The number of cpu units reserved for the container. A container instance has 1,024 cpu units for every CPU core. This parameter specifies the minimum amount of CPU to reserve for a container, and containers share unallocated CPU units with other containers on the instance with the same ratio as their allocated amount. This parameter maps to CpuShares in the Create a container section of the Docker Remote API and the --cpu-shares option to docker run.

    You can determine the number of CPU units that are available per EC2 instance type by multiplying the vCPUs listed for that instance type on the Amazon EC2 Instances detail page by 1,024.

    For example, if you run a single-container task on a single-core instance type with 512 CPU units specified for that container, and that is the only task running on the container instance, that container could use the full 1,024 CPU unit share at any given time. However, if you launched another copy of the same task on that container instance, each task would be guaranteed a minimum of 512 CPU units when needed, and each container could float to higher CPU usage if the other container was not using it, but if both tasks were 100% active all of the time, they would be limited to 512 CPU units.

    The Docker daemon on the container instance uses the CPU value to calculate the relative CPU share ratios for running containers. For more information, see CPU share constraint in the Docker documentation. The minimum valid CPU share value that the Linux kernel allows is 2; however, the CPU parameter is not required, and you can use CPU values below 2 in your container definitions. For CPU values below 2 (including null), the behavior varies based on your Amazon ECS container agent version:

    • Agent versions less than or equal to 1.1.0: Null and zero CPU values are passed to Docker as 0, which Docker then converts to 1,024 CPU shares. CPU values of 1 are passed to Docker as 1, which the Linux kernel converts to 2 CPU shares.

    • Agent versions greater than or equal to 1.2.0: Null, zero, and CPU values of 1 are passed to Docker as 2.

    ", + "ContainerDefinition$memory": "

    The number of MiB of memory to reserve for the container. You must specify a non-zero integer for this parameter; the Docker daemon reserves a minimum of 4 MiB of memory for a container, so you should not specify fewer than 4 MiB of memory for your containers. If your container attempts to exceed the memory allocated here, the container is killed. This parameter maps to Memory in the Create a container section of the Docker Remote API and the --memory option to docker run.

    ", + "ContainerInstance$runningTasksCount": "

    The number of tasks on the container instance that are in the RUNNING status.

    ", + "ContainerInstance$pendingTasksCount": "

    The number of tasks on the container instance that are in the PENDING status.

    ", + "Deployment$desiredCount": "

    The most recent desired count of tasks that was specified for the service to deploy or maintain.

    ", + "Deployment$pendingCount": "

    The number of tasks in the deployment that are in the PENDING status.

    ", + "Deployment$runningCount": "

    The number of tasks in the deployment that are in the RUNNING status.

    ", + "PortMapping$containerPort": "

    The port number on the container that is bound to the user-specified or automatically assigned host port. If you specify a container port and not a host port, your container automatically receives a host port in the ephemeral port range (for more information, see hostPort). Port mappings that are automatically assigned in this way do not count toward the 100 reserved ports limit of a container instance.

    ", + "PortMapping$hostPort": "

    The port number on the container instance to reserve for your container. You can specify a non-reserved host port for your container port mapping, or you can omit the hostPort (or set it to 0) while specifying a containerPort and your container automatically receives a port in the ephemeral port range for your container instance operating system and Docker version.

    The default ephemeral port range is 49153 to 65535, and this range is used for Docker versions prior to 1.6.0. For Docker version 1.6.0 and later, the Docker daemon tries to read the ephemeral port range from /proc/sys/net/ipv4/ip_local_port_range; if this kernel parameter is unavailable, the default ephemeral port range is used. You should not attempt to specify a host port in the ephemeral port range, because these are reserved for automatic assignment. In general, ports below 32768 are outside of the ephemeral port range.

    The default reserved ports are 22 for SSH, the Docker ports 2375 and 2376, and the Amazon ECS container agent port 51678. Any host port that was previously specified in a running task is also reserved while the task is running (after a task stops, the host port is released).The current reserved ports are displayed in the remainingResources of DescribeContainerInstances output, and a container instance may have up to 100 reserved ports at a time, including the default reserved ports (automatically assigned ports do not count toward the 100 reserved ports limit).

    ", + "Resource$integerValue": "

    When the integerValue type is set, the value of the resource must be an integer.

    ", + "Service$desiredCount": "

    The desired number of instantiations of the task definition to keep running on the service. This value is specified when the service is created with CreateService, and it can be modified with UpdateService.

    ", + "Service$runningCount": "

    The number of tasks in the cluster that are in the RUNNING state.

    ", + "Service$pendingCount": "

    The number of tasks in the cluster that are in the PENDING state.

    ", + "TaskDefinition$revision": "

    The revision of the task in a particular family. The revision is a version number of a task definition in a family. When you register a task definition for the first time, the revision is 1; each time you register a new revision of a task definition in the same family, the revision value always increases by one (even if you have deregistered previous revisions in this family).

    ", + "Ulimit$softLimit": "

    The soft limit for the ulimit type.

    ", + "Ulimit$hardLimit": "

    The hard limit for the ulimit type.

    " + } + }, + "InvalidParameterException": { + "base": "

    The specified parameter is invalid. Review the available parameters for the API request.

    ", + "refs": { + } + }, + "KeyValuePair": { + "base": "

    A key and value pair object.

    ", + "refs": { + "EnvironmentVariables$member": null + } + }, + "ListClustersRequest": { + "base": null, + "refs": { + } + }, + "ListClustersResponse": { + "base": null, + "refs": { + } + }, + "ListContainerInstancesRequest": { + "base": null, + "refs": { + } + }, + "ListContainerInstancesResponse": { + "base": null, + "refs": { + } + }, + "ListServicesRequest": { + "base": null, + "refs": { + } + }, + "ListServicesResponse": { + "base": null, + "refs": { + } + }, + "ListTaskDefinitionFamiliesRequest": { + "base": null, + "refs": { + } + }, + "ListTaskDefinitionFamiliesResponse": { + "base": null, + "refs": { + } + }, + "ListTaskDefinitionsRequest": { + "base": null, + "refs": { + } + }, + "ListTaskDefinitionsResponse": { + "base": null, + "refs": { + } + }, + "ListTasksRequest": { + "base": null, + "refs": { + } + }, + "ListTasksResponse": { + "base": null, + "refs": { + } + }, + "LoadBalancer": { + "base": "

    Details on a load balancer that is used with a service.

    ", + "refs": { + "LoadBalancers$member": null + } + }, + "LoadBalancers": { + "base": null, + "refs": { + "CreateServiceRequest$loadBalancers": "

    A list of load balancer objects, containing the load balancer name, the container name (as it appears in a container definition), and the container port to access from the load balancer.

    ", + "Service$loadBalancers": "

    A list of load balancer objects, containing the load balancer name, the container name (as it appears in a container definition), and the container port to access from the load balancer.

    " + } + }, + "LogConfiguration": { + "base": "

    Log configuration options to send to a custom log driver for the container.

    ", + "refs": { + "ContainerDefinition$logConfiguration": "

    The log configuration specification for the container. This parameter maps to LogConfig in the Create a container section of the Docker Remote API and the --log-driver option to docker run. By default, containers use the same logging driver that the Docker daemon uses; however the container may use a different logging driver than the Docker daemon by specifying a log driver with this parameter in the container definition. To use a different logging driver for a container, the log system must be configured properly on the container instance (or on a different log server for remote logging options). For more information on the options for different supported log drivers, see Configure logging drivers in the Docker documentation.

    Amazon ECS currently supports a subset of the logging drivers available to the Docker daemon (shown in the LogConfiguration data type). Currently unsupported log drivers may be available in future releases of the Amazon ECS container agent.

    This parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log into your container instance and run the following command: sudo docker version | grep \"Server API version\"

    The Amazon ECS container agent running on a container instance must register the logging drivers available on that instance with the ECS_AVAILABLE_LOGGING_DRIVERS environment variable before containers placed on that instance can use these log configuration options. For more information, see Amazon ECS Container Agent Configuration in the Amazon EC2 Container Service Developer Guide.

    " + } + }, + "LogConfigurationOptionsMap": { + "base": null, + "refs": { + "LogConfiguration$options": "

    The configuration options to send to the log driver. This parameter requires version 1.19 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log into your container instance and run the following command: sudo docker version | grep \"Server API version\"

    " + } + }, + "LogDriver": { + "base": null, + "refs": { + "LogConfiguration$logDriver": "

    The log driver to use for the container. The valid values listed for this parameter are log drivers that the Amazon ECS container agent can communicate with by default.

    If you have a custom driver that is not listed above that you would like to work with the Amazon ECS container agent, you can fork the Amazon ECS container agent project that is available on GitHub and customize it to work with that driver. We encourage you to submit pull requests for changes that you would like to have included. However, Amazon Web Services does not currently provide support for running modified copies of this software.

    This parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log into your container instance and run the following command: sudo docker version | grep \"Server API version\"

    " + } + }, + "Long": { + "base": null, + "refs": { + "Resource$longValue": "

    When the longValue type is set, the value of the resource must be an extended precision floating-point type.

    " + } + }, + "MissingVersionException": { + "base": "

    Amazon ECS is unable to determine the current version of the Amazon ECS container agent on the container instance and does not have enough information to proceed with an update. This could be because the agent running on the container instance is an older or custom version that does not use our version information.

    ", + "refs": { + } + }, + "MountPoint": { + "base": "

    Details on a volume mount point that is used in a container definition.

    ", + "refs": { + "MountPointList$member": null + } + }, + "MountPointList": { + "base": null, + "refs": { + "ContainerDefinition$mountPoints": "

    The mount points for data volumes in your container. This parameter maps to Volumes in the Create a container section of the Docker Remote API and the --volume option to docker run.

    " + } + }, + "NetworkBinding": { + "base": "

    Details on the network bindings between a container and its host container instance. After a task reaches the RUNNING status, manual and automatic host and container port assignments are visible in the networkBindings section of DescribeTasks API responses.

    ", + "refs": { + "NetworkBindings$member": null + } + }, + "NetworkBindings": { + "base": null, + "refs": { + "Container$networkBindings": "

    The network bindings associated with the container.

    ", + "SubmitContainerStateChangeRequest$networkBindings": "

    The network bindings of the container.

    " + } + }, + "NoUpdateAvailableException": { + "base": "

    There is no update available for this Amazon ECS container agent. This could be because the agent is already running the latest version, or it is so old that there is no update path to the current version.

    ", + "refs": { + } + }, + "PortMapping": { + "base": "

    Port mappings allow containers to access ports on the host container instance to send or receive traffic. Port mappings are specified as part of the container definition. After a task reaches the RUNNING status, manual and automatic host and container port assignments are visible in the networkBindings section of DescribeTasks API responses.

    ", + "refs": { + "PortMappingList$member": null + } + }, + "PortMappingList": { + "base": null, + "refs": { + "ContainerDefinition$portMappings": "

    The list of port mappings for the container. Port mappings allow containers to access ports on the host container instance to send or receive traffic. This parameter maps to PortBindings in the Create a container section of the Docker Remote API and the --publish option to docker run.

    After a task reaches the RUNNING status, manual and automatic host and container port assignments are visible in the Network Bindings section of a container description of a selected task in the Amazon ECS console, or the networkBindings section DescribeTasks responses.

    " + } + }, + "RegisterContainerInstanceRequest": { + "base": null, + "refs": { + } + }, + "RegisterContainerInstanceResponse": { + "base": null, + "refs": { + } + }, + "RegisterTaskDefinitionRequest": { + "base": null, + "refs": { + } + }, + "RegisterTaskDefinitionResponse": { + "base": null, + "refs": { + } + }, + "RequiresAttributes": { + "base": null, + "refs": { + "TaskDefinition$requiresAttributes": "

    The container instance attributes required by your task.

    " + } + }, + "Resource": { + "base": "

    Describes the resources available for a container instance.

    ", + "refs": { + "Resources$member": null + } + }, + "Resources": { + "base": null, + "refs": { + "ContainerInstance$remainingResources": "

    The remaining resources of the container instance that are available for new tasks.

    ", + "ContainerInstance$registeredResources": "

    The registered resources on the container instance that are in use by current tasks.

    ", + "RegisterContainerInstanceRequest$totalResources": "

    The resources available on the instance.

    " + } + }, + "RunTaskRequest": { + "base": null, + "refs": { + } + }, + "RunTaskResponse": { + "base": null, + "refs": { + } + }, + "ServerException": { + "base": "

    These errors are usually caused by a server issue.

    ", + "refs": { + } + }, + "Service": { + "base": "

    Details on a service within a cluster

    ", + "refs": { + "CreateServiceResponse$service": "

    The full description of your service following the create call.

    ", + "DeleteServiceResponse$service": "

    The full description of the deleted service.

    ", + "Services$member": null, + "UpdateServiceResponse$service": "

    The full description of your service following the update call.

    " + } + }, + "ServiceEvent": { + "base": "

    Details on an event associated with a service.

    ", + "refs": { + "ServiceEvents$member": null + } + }, + "ServiceEvents": { + "base": null, + "refs": { + "Service$events": "

    The event stream for your service. A maximum of 100 of the latest events are displayed.

    " + } + }, + "ServiceNotActiveException": { + "base": "

    The specified service is not active. You cannot update a service that is not active. If you have previously deleted a service, you can re-create it with CreateService.

    ", + "refs": { + } + }, + "ServiceNotFoundException": { + "base": "

    The specified service could not be found. You can view your available services with ListServices. Amazon ECS services are cluster-specific and region-specific.

    ", + "refs": { + } + }, + "Services": { + "base": null, + "refs": { + "DescribeServicesResponse$services": "

    The list of services described.

    " + } + }, + "SortOrder": { + "base": null, + "refs": { + "ListTaskDefinitionsRequest$sort": "

    The order in which to sort the results. Valid values are ASC and DESC. By default (ASC), task definitions are listed lexicographically by family name and in ascending numerical order by revision so that the newest task definitions in a family are listed last. Setting this parameter to DESC reverses the sort order on family name and revision so that the newest task definitions in a family are listed first.

    " + } + }, + "StartTaskRequest": { + "base": null, + "refs": { + } + }, + "StartTaskResponse": { + "base": null, + "refs": { + } + }, + "StopTaskRequest": { + "base": null, + "refs": { + } + }, + "StopTaskResponse": { + "base": null, + "refs": { + } + }, + "String": { + "base": null, + "refs": { + "Attribute$name": "

    The name of the container instance attribute.

    ", + "Attribute$value": "

    The value of the container instance attribute (at this time, the value here is Null, but this could change in future revisions for expandability).

    ", + "ClientException$message": null, + "Cluster$clusterArn": "

    The Amazon Resource Name (ARN) that identifies the cluster. The ARN contains the arn:aws:ecs namespace, followed by the region of the cluster, the AWS account ID of the cluster owner, the cluster namespace, and then the cluster name. For example, arn:aws:ecs:region:012345678910:cluster/test ..

    ", + "Cluster$clusterName": "

    A user-generated string that you use to identify your cluster.

    ", + "Cluster$status": "

    The status of the cluster. The valid values are ACTIVE or INACTIVE. ACTIVE indicates that you can register container instances with the cluster and the associated instances can accept tasks.

    ", + "Container$containerArn": "

    The Amazon Resource Name (ARN) of the container.

    ", + "Container$taskArn": "

    The Amazon Resource Name (ARN) of the task.

    ", + "Container$name": "

    The name of the container.

    ", + "Container$lastStatus": "

    The last known status of the container.

    ", + "Container$reason": "

    A short (255 max characters) human-readable string to provide additional detail about a running or stopped container.

    ", + "ContainerDefinition$name": "

    The name of a container. If you are linking multiple containers together in a task definition, the name of one container can be entered in the links of another container to connect the containers. Up to 255 letters (uppercase and lowercase), numbers, hyphens, and underscores are allowed. This parameter maps to name in the Create a container section of the Docker Remote API and the --name option to docker run.

    ", + "ContainerDefinition$image": "

    The image used to start a container. This string is passed directly to the Docker daemon. Images in the Docker Hub registry are available by default. Other repositories are specified with repository-url/image:tag . Up to 255 letters (uppercase and lowercase), numbers, hyphens, underscores, colons, periods, forward slashes, and number signs are allowed. This parameter maps to Image in the Create a container section of the Docker Remote API and the IMAGE parameter of docker run.

    • Images in official repositories on Docker Hub use a single name (for example, ubuntu or mongo).

    • Images in other repositories on Docker Hub are qualified with an organization name (for example, amazon/amazon-ecs-agent).

    • Images in other online repositories are qualified further by a domain name (for example, quay.io/assemblyline/ubuntu).

    ", + "ContainerDefinition$hostname": "

    The hostname to use for your container. This parameter maps to Hostname in the Create a container section of the Docker Remote API and the --hostname option to docker run.

    ", + "ContainerDefinition$user": "

    The user name to use inside the container. This parameter maps to User in the Create a container section of the Docker Remote API and the --user option to docker run.

    ", + "ContainerDefinition$workingDirectory": "

    The working directory in which to run commands inside the container. This parameter maps to WorkingDir in the Create a container section of the Docker Remote API and the --workdir option to docker run.

    ", + "ContainerInstance$containerInstanceArn": "

    The Amazon Resource Name (ARN) of the container instance. The ARN contains the arn:aws:ecs namespace, followed by the region of the container instance, the AWS account ID of the container instance owner, the container-instance namespace, and then the container instance ID. For example, arn:aws:ecs:region:aws_account_id:container-instance/container_instance_ID .

    ", + "ContainerInstance$ec2InstanceId": "

    The EC2 instance ID of the container instance.

    ", + "ContainerInstance$status": "

    The status of the container instance. The valid values are ACTIVE or INACTIVE. ACTIVE indicates that the container instance can accept tasks.

    ", + "ContainerOverride$name": "

    The name of the container that receives the override.

    ", + "CreateClusterRequest$clusterName": "

    The name of your cluster. If you do not specify a name for your cluster, you create a cluster named default. Up to 255 letters (uppercase and lowercase), numbers, hyphens, and underscores are allowed.

    ", + "CreateServiceRequest$cluster": "

    The short name or full Amazon Resource Name (ARN) of the cluster on which to run your service. If you do not specify a cluster, the default cluster is assumed.

    ", + "CreateServiceRequest$serviceName": "

    The name of your service. Up to 255 letters (uppercase and lowercase), numbers, hyphens, and underscores are allowed. Service names must be unique within a cluster, but you can have similarly named services in multiple clusters within a region or across multiple regions.

    ", + "CreateServiceRequest$taskDefinition": "

    The family and revision (family:revision) or full Amazon Resource Name (ARN) of the task definition to run in your service. If a revision is not specified, the latest ACTIVE revision is used.

    ", + "CreateServiceRequest$clientToken": "

    Unique, case-sensitive identifier you provide to ensure the idempotency of the request. Up to 32 ASCII characters are allowed.

    ", + "CreateServiceRequest$role": "

    The name or full Amazon Resource Name (ARN) of the IAM role that allows Amazon ECS to make calls to your load balancer on your behalf. This parameter is required if you are using a load balancer with your service. If you specify the role parameter, you must also specify a load balancer object with the loadBalancers parameter.

    If your specified role has a path other than /, then you must either specify the full role ARN (this is recommended) or prefix the role name with the path. For example, if a role with the name bar has a path of /foo/ then you would specify /foo/bar as the role name. For more information, see Friendly Names and Paths in the IAM User Guide.

    ", + "DeleteClusterRequest$cluster": "

    The short name or full Amazon Resource Name (ARN) of the cluster to delete.

    ", + "DeleteServiceRequest$cluster": "

    The name of the cluster that hosts the service to delete. If you do not specify a cluster, the default cluster is assumed.

    ", + "DeleteServiceRequest$service": "

    The name of the service to delete.

    ", + "Deployment$id": "

    The ID of the deployment.

    ", + "Deployment$status": "

    The status of the deployment. Valid values are PRIMARY (for the most recent deployment), ACTIVE (for previous deployments that still have tasks running, but are being replaced with the PRIMARY deployment), and INACTIVE (for deployments that have been completely replaced).

    ", + "Deployment$taskDefinition": "

    The most recent task definition that was specified for the service to use.

    ", + "DeregisterContainerInstanceRequest$cluster": "

    The short name or full Amazon Resource Name (ARN) of the cluster that hosts the container instance to deregister. If you do not specify a cluster, the default cluster is assumed.

    ", + "DeregisterContainerInstanceRequest$containerInstance": "

    The container instance ID or full Amazon Resource Name (ARN) of the container instance to deregister. The ARN contains the arn:aws:ecs namespace, followed by the region of the container instance, the AWS account ID of the container instance owner, the container-instance namespace, and then the container instance ID. For example, arn:aws:ecs:region:aws_account_id:container-instance/container_instance_ID .

    ", + "DeregisterTaskDefinitionRequest$taskDefinition": "

    The family and revision (family:revision) or full Amazon Resource Name (ARN) of the task definition to deregister. You must specify a revision.

    ", + "DescribeContainerInstancesRequest$cluster": "

    The short name or full Amazon Resource Name (ARN) of the cluster that hosts the container instances to describe. If you do not specify a cluster, the default cluster is assumed.

    ", + "DescribeServicesRequest$cluster": "

    The name of the cluster that hosts the service to describe. If you do not specify a cluster, the default cluster is assumed.

    ", + "DescribeTaskDefinitionRequest$taskDefinition": "

    The family for the latest ACTIVE revision, family and revision (family:revision) for a specific revision in the family, or full Amazon Resource Name (ARN) of the task definition to describe.

    ", + "DescribeTasksRequest$cluster": "

    The short name or full Amazon Resource Name (ARN) of the cluster that hosts the task to describe. If you do not specify a cluster, the default cluster is assumed.

    ", + "DiscoverPollEndpointRequest$containerInstance": "

    The container instance ID or full Amazon Resource Name (ARN) of the container instance. The ARN contains the arn:aws:ecs namespace, followed by the region of the container instance, the AWS account ID of the container instance owner, the container-instance namespace, and then the container instance ID. For example, arn:aws:ecs:region:aws_account_id:container-instance/container_instance_ID .

    ", + "DiscoverPollEndpointRequest$cluster": "

    The cluster that the container instance belongs to.

    ", + "DiscoverPollEndpointResponse$endpoint": "

    The endpoint for the Amazon ECS agent to poll.

    ", + "DiscoverPollEndpointResponse$telemetryEndpoint": "

    The telemetry endpoint for the Amazon ECS agent.

    ", + "DockerLabelsMap$key": null, + "DockerLabelsMap$value": null, + "Failure$arn": "

    The Amazon Resource Name (ARN) of the failed resource.

    ", + "Failure$reason": "

    The reason for the failure.

    ", + "HostEntry$hostname": "

    The hostname to use in the /etc/hosts entry.

    ", + "HostEntry$ipAddress": "

    The IP address to use in the /etc/hosts entry.

    ", + "HostVolumeProperties$sourcePath": "

    The path on the host container instance that is presented to the container. If this parameter is empty, then the Docker daemon has assigned a host path for you. If the host parameter contains a sourcePath file location, then the data volume persists at the specified location on the host container instance until you delete it manually. If the sourcePath value does not exist on the host container instance, the Docker daemon creates it. If the location does exist, the contents of the source path folder are exported.

    ", + "KeyValuePair$name": "

    The name of the key value pair. For environment variables, this is the name of the environment variable.

    ", + "KeyValuePair$value": "

    The value of the key value pair. For environment variables, this is the value of the environment variable.

    ", + "ListClustersRequest$nextToken": "

    The nextToken value returned from a previous paginated ListClusters request where maxResults was used and the results exceeded the value of that parameter. Pagination continues from the end of the previous results that returned the nextToken value. This value is null when there are no more results to return.

    This token should be treated as an opaque identifier that is only used to retrieve the next items in a list and not for other programmatic purposes.

    ", + "ListClustersResponse$nextToken": "

    The nextToken value to include in a future ListClusters request. When the results of a ListClusters request exceed maxResults, this value can be used to retrieve the next page of results. This value is null when there are no more results to return.

    ", + "ListContainerInstancesRequest$cluster": "

    The short name or full Amazon Resource Name (ARN) of the cluster that hosts the container instances to list. If you do not specify a cluster, the default cluster is assumed..

    ", + "ListContainerInstancesRequest$nextToken": "

    The nextToken value returned from a previous paginated ListContainerInstances request where maxResults was used and the results exceeded the value of that parameter. Pagination continues from the end of the previous results that returned the nextToken value. This value is null when there are no more results to return.

    This token should be treated as an opaque identifier that is only used to retrieve the next items in a list and not for other programmatic purposes.

    ", + "ListContainerInstancesResponse$nextToken": "

    The nextToken value to include in a future ListContainerInstances request. When the results of a ListContainerInstances request exceed maxResults, this value can be used to retrieve the next page of results. This value is null when there are no more results to return.

    ", + "ListServicesRequest$cluster": "

    The short name or full Amazon Resource Name (ARN) of the cluster that hosts the services to list. If you do not specify a cluster, the default cluster is assumed..

    ", + "ListServicesRequest$nextToken": "

    The nextToken value returned from a previous paginated ListServices request where maxResults was used and the results exceeded the value of that parameter. Pagination continues from the end of the previous results that returned the nextToken value. This value is null when there are no more results to return.

    This token should be treated as an opaque identifier that is only used to retrieve the next items in a list and not for other programmatic purposes.

    ", + "ListServicesResponse$nextToken": "

    The nextToken value to include in a future ListServices request. When the results of a ListServices request exceed maxResults, this value can be used to retrieve the next page of results. This value is null when there are no more results to return.

    ", + "ListTaskDefinitionFamiliesRequest$familyPrefix": "

    The familyPrefix is a string that is used to filter the results of ListTaskDefinitionFamilies. If you specify a familyPrefix, only task definition family names that begin with the familyPrefix string are returned.

    ", + "ListTaskDefinitionFamiliesRequest$nextToken": "

    The nextToken value returned from a previous paginated ListTaskDefinitionFamilies request where maxResults was used and the results exceeded the value of that parameter. Pagination continues from the end of the previous results that returned the nextToken value. This value is null when there are no more results to return.

    This token should be treated as an opaque identifier that is only used to retrieve the next items in a list and not for other programmatic purposes.

    ", + "ListTaskDefinitionFamiliesResponse$nextToken": "

    The nextToken value to include in a future ListTaskDefinitionFamilies request. When the results of a ListTaskDefinitionFamilies request exceed maxResults, this value can be used to retrieve the next page of results. This value is null when there are no more results to return.

    ", + "ListTaskDefinitionsRequest$familyPrefix": "

    The full family name with which to filter the ListTaskDefinitions results. Specifying a familyPrefix limits the listed task definitions to task definition revisions that belong to that family.

    ", + "ListTaskDefinitionsRequest$nextToken": "

    The nextToken value returned from a previous paginated ListTaskDefinitions request where maxResults was used and the results exceeded the value of that parameter. Pagination continues from the end of the previous results that returned the nextToken value. This value is null when there are no more results to return.

    This token should be treated as an opaque identifier that is only used to retrieve the next items in a list and not for other programmatic purposes.

    ", + "ListTaskDefinitionsResponse$nextToken": "

    The nextToken value to include in a future ListTaskDefinitions request. When the results of a ListTaskDefinitions request exceed maxResults, this value can be used to retrieve the next page of results. This value is null when there are no more results to return.

    ", + "ListTasksRequest$cluster": "

    The short name or full Amazon Resource Name (ARN) of the cluster that hosts the tasks to list. If you do not specify a cluster, the default cluster is assumed..

    ", + "ListTasksRequest$containerInstance": "

    The container instance ID or full Amazon Resource Name (ARN) of the container instance with which to filter the ListTasks results. Specifying a containerInstance limits the results to tasks that belong to that container instance.

    ", + "ListTasksRequest$family": "

    The name of the family with which to filter the ListTasks results. Specifying a family limits the results to tasks that belong to that family.

    ", + "ListTasksRequest$nextToken": "

    The nextToken value returned from a previous paginated ListTasks request where maxResults was used and the results exceeded the value of that parameter. Pagination continues from the end of the previous results that returned the nextToken value. This value is null when there are no more results to return.

    This token should be treated as an opaque identifier that is only used to retrieve the next items in a list and not for other programmatic purposes.

    ", + "ListTasksRequest$startedBy": "

    The startedBy value with which to filter the task results. Specifying a startedBy value limits the results to tasks that were started with that value.

    ", + "ListTasksRequest$serviceName": "

    The name of the service with which to filter the ListTasks results. Specifying a serviceName limits the results to tasks that belong to that service.

    ", + "ListTasksResponse$nextToken": "

    The nextToken value to include in a future ListTasks request. When the results of a ListTasks request exceed maxResults, this value can be used to retrieve the next page of results. This value is null when there are no more results to return.

    ", + "LoadBalancer$loadBalancerName": "

    The name of the load balancer.

    ", + "LoadBalancer$containerName": "

    The name of the container (as it appears in a container definition) to associate with the load balancer.

    ", + "LogConfigurationOptionsMap$key": null, + "LogConfigurationOptionsMap$value": null, + "MountPoint$sourceVolume": "

    The name of the volume to mount.

    ", + "MountPoint$containerPath": "

    The path on the container to mount the host volume at.

    ", + "NetworkBinding$bindIP": "

    The IP address that the container is bound to on the container instance.

    ", + "RegisterContainerInstanceRequest$cluster": "

    The short name or full Amazon Resource Name (ARN) of the cluster with which to register your container instance. If you do not specify a cluster, the default cluster is assumed..

    ", + "RegisterContainerInstanceRequest$instanceIdentityDocument": "

    The instance identity document for the EC2 instance to register. This document can be found by running the following command from the instance: curl http://169.254.169.254/latest/dynamic/instance-identity/document/

    ", + "RegisterContainerInstanceRequest$instanceIdentityDocumentSignature": "

    The instance identity document signature for the EC2 instance to register. This signature can be found by running the following command from the instance: curl http://169.254.169.254/latest/dynamic/instance-identity/signature/

    ", + "RegisterContainerInstanceRequest$containerInstanceArn": "

    The Amazon Resource Name (ARN) of the container instance (if it was previously registered).

    ", + "RegisterTaskDefinitionRequest$family": "

    You must specify a family for a task definition, which allows you to track multiple versions of the same task definition. The family is used as a name for your task definition. Up to 255 letters (uppercase and lowercase), numbers, hyphens, and underscores are allowed.

    ", + "Resource$name": "

    The name of the resource, such as CPU, MEMORY, PORTS, or a user-defined resource.

    ", + "Resource$type": "

    The type of the resource, such as INTEGER, DOUBLE, LONG, or STRINGSET.

    ", + "RunTaskRequest$cluster": "

    The short name or full Amazon Resource Name (ARN) of the cluster on which to run your task. If you do not specify a cluster, the default cluster is assumed..

    ", + "RunTaskRequest$taskDefinition": "

    The family and revision (family:revision) or full Amazon Resource Name (ARN) of the task definition to run. If a revision is not specified, the latest ACTIVE revision is used.

    ", + "RunTaskRequest$startedBy": "

    An optional tag specified when a task is started. For example if you automatically trigger a task to run a batch process job, you could apply a unique identifier for that job to your task with the startedBy parameter. You can then identify which tasks belong to that job by filtering the results of a ListTasks call with the startedBy value. Up to 36 letters (uppercase and lowercase), numbers, hyphens, and underscores are allowed.

    If a task is started by an Amazon ECS service, then the startedBy parameter contains the deployment ID of the service that starts it.

    ", + "ServerException$message": null, + "Service$serviceArn": "

    The Amazon Resource Name (ARN) that identifies the service. The ARN contains the arn:aws:ecs namespace, followed by the region of the service, the AWS account ID of the service owner, the service namespace, and then the service name. For example, arn:aws:ecs:region:012345678910:service/my-service .

    ", + "Service$serviceName": "

    The name of your service. Up to 255 letters (uppercase and lowercase), numbers, hyphens, and underscores are allowed. Service names must be unique within a cluster, but you can have similarly named services in multiple clusters within a region or across multiple regions.

    ", + "Service$clusterArn": "

    The Amazon Resource Name (ARN) of the cluster that hosts the service.

    ", + "Service$status": "

    The status of the service. The valid values are ACTIVE, DRAINING, or INACTIVE.

    ", + "Service$taskDefinition": "

    The task definition to use for tasks in the service. This value is specified when the service is created with CreateService, and it can be modified with UpdateService.

    ", + "Service$roleArn": "

    The Amazon Resource Name (ARN) of the IAM role associated with the service that allows the Amazon ECS container agent to register container instances with a load balancer.

    ", + "ServiceEvent$id": "

    The ID string of the event.

    ", + "ServiceEvent$message": "

    The event message.

    ", + "StartTaskRequest$cluster": "

    The short name or full Amazon Resource Name (ARN) of the cluster on which to start your task. If you do not specify a cluster, the default cluster is assumed..

    ", + "StartTaskRequest$taskDefinition": "

    The family and revision (family:revision) or full Amazon Resource Name (ARN) of the task definition to start. If a revision is not specified, the latest ACTIVE revision is used.

    ", + "StartTaskRequest$startedBy": "

    An optional tag specified when a task is started. For example if you automatically trigger a task to run a batch process job, you could apply a unique identifier for that job to your task with the startedBy parameter. You can then identify which tasks belong to that job by filtering the results of a ListTasks call with the startedBy value. Up to 36 letters (uppercase and lowercase), numbers, hyphens, and underscores are allowed.

    If a task is started by an Amazon ECS service, then the startedBy parameter contains the deployment ID of the service that starts it.

    ", + "StopTaskRequest$cluster": "

    The short name or full Amazon Resource Name (ARN) of the cluster that hosts the task to stop. If you do not specify a cluster, the default cluster is assumed..

    ", + "StopTaskRequest$task": "

    The task ID or full Amazon Resource Name (ARN) entry of the task to stop.

    ", + "StopTaskRequest$reason": "

    An optional message specified when a task is stopped. For example, if you are using a custom scheduler, you can use this parameter to specify the reason for stopping the task here, and the message will appear in subsequent DescribeTasks API operations on this task. Up to 255 characters are allowed in this message.

    ", + "StringList$member": null, + "SubmitContainerStateChangeRequest$cluster": "

    The short name or full Amazon Resource Name (ARN) of the cluster that hosts the container.

    ", + "SubmitContainerStateChangeRequest$task": "

    The task ID or full Amazon Resource Name (ARN) of the task that hosts the container.

    ", + "SubmitContainerStateChangeRequest$containerName": "

    The name of the container.

    ", + "SubmitContainerStateChangeRequest$status": "

    The status of the state change request.

    ", + "SubmitContainerStateChangeRequest$reason": "

    The reason for the state change request.

    ", + "SubmitContainerStateChangeResponse$acknowledgment": "

    Acknowledgement of the state change.

    ", + "SubmitTaskStateChangeRequest$cluster": "

    The short name or full Amazon Resource Name (ARN) of the cluster that hosts the task.

    ", + "SubmitTaskStateChangeRequest$task": "

    The task ID or full Amazon Resource Name (ARN) of the task in the state change request.

    ", + "SubmitTaskStateChangeRequest$status": "

    The status of the state change request.

    ", + "SubmitTaskStateChangeRequest$reason": "

    The reason for the state change request.

    ", + "SubmitTaskStateChangeResponse$acknowledgment": "

    Acknowledgement of the state change.

    ", + "Task$taskArn": "

    The Amazon Resource Name (ARN) of the task.

    ", + "Task$clusterArn": "

    The Amazon Resource Name (ARN) of the cluster that hosts the task.

    ", + "Task$taskDefinitionArn": "

    The Amazon Resource Name (ARN) of the task definition that creates the task.

    ", + "Task$containerInstanceArn": "

    The Amazon Resource Name (ARN) of the container instances that host the task.

    ", + "Task$lastStatus": "

    The last known status of the task.

    ", + "Task$desiredStatus": "

    The desired status of the task.

    ", + "Task$startedBy": "

    The tag specified when a task is started. If the task is started by an Amazon ECS service, then the startedBy parameter contains the deployment ID of the service that starts it.

    ", + "Task$stoppedReason": "

    The reason the task was stopped.

    ", + "TaskDefinition$taskDefinitionArn": "

    The full Amazon Resource Name (ARN) of the task definition.

    ", + "TaskDefinition$family": "

    The family of your task definition, used as the definition name.

    ", + "UpdateContainerAgentRequest$cluster": "

    The short name or full Amazon Resource Name (ARN) of the cluster that your container instance is running on. If you do not specify a cluster, the default cluster is assumed.

    ", + "UpdateContainerAgentRequest$containerInstance": "

    The container instance ID or full Amazon Resource Name (ARN) entries for the container instance on which you would like to update the Amazon ECS container agent.

    ", + "UpdateServiceRequest$cluster": "

    The short name or full Amazon Resource Name (ARN) of the cluster that your service is running on. If you do not specify a cluster, the default cluster is assumed.

    ", + "UpdateServiceRequest$service": "

    The name of the service to update.

    ", + "UpdateServiceRequest$taskDefinition": "

    The family and revision (family:revision) or full Amazon Resource Name (ARN) of the task definition to run in your service. If a revision is not specified, the latest ACTIVE revision is used. If you modify the task definition with UpdateService, Amazon ECS spawns a task with the new version of the task definition and then stops an old task after the new version is running.

    ", + "VersionInfo$agentVersion": "

    The version number of the Amazon ECS container agent.

    ", + "VersionInfo$agentHash": "

    The Git commit hash for the Amazon ECS container agent build on the amazon-ecs-agent GitHub repository.

    ", + "VersionInfo$dockerVersion": "

    The Docker version running on the container instance.

    ", + "Volume$name": "

    The name of the volume. Up to 255 letters (uppercase and lowercase), numbers, hyphens, and underscores are allowed. This name is referenced in the sourceVolume parameter of container definition mountPoints.

    ", + "VolumeFrom$sourceContainer": "

    The name of the container to mount volumes from.

    " + } + }, + "StringList": { + "base": null, + "refs": { + "ContainerDefinition$links": "

    The link parameter allows containers to communicate with each other without the need for port mappings, using the name parameter and optionally, an alias for the link. This construct is analogous to name:alias in Docker links. Up to 255 letters (uppercase and lowercase), numbers, hyphens, and underscores are allowed for each name and alias. For more information on linking Docker containers, see https://docs.docker.com/userguide/dockerlinks/. This parameter maps to Links in the Create a container section of the Docker Remote API and the --link option to docker run .

    Containers that are collocated on a single container instance may be able to communicate with each other without requiring links or host port mappings. Network isolation is achieved on the container instance using security groups and VPC settings.

    ", + "ContainerDefinition$entryPoint": "

    Early versions of the Amazon ECS container agent do not properly handle entryPoint parameters. If you have problems using entryPoint, update your container agent or enter your commands and arguments as command array items instead.

    The entry point that is passed to the container. This parameter maps to Entrypoint in the Create a container section of the Docker Remote API and the --entrypoint option to docker run. For more information, see https://docs.docker.com/reference/builder/#entrypoint.

    ", + "ContainerDefinition$command": "

    The command that is passed to the container. This parameter maps to Cmd in the Create a container section of the Docker Remote API and the COMMAND parameter to docker run. For more information, see https://docs.docker.com/reference/builder/#cmd.

    ", + "ContainerDefinition$dnsServers": "

    A list of DNS servers that are presented to the container. This parameter maps to Dns in the Create a container section of the Docker Remote API and the --dns option to docker run.

    ", + "ContainerDefinition$dnsSearchDomains": "

    A list of DNS search domains that are presented to the container. This parameter maps to DnsSearch in the Create a container section of the Docker Remote API and the --dns-search option to docker run.

    ", + "ContainerDefinition$dockerSecurityOptions": "

    A list of strings to provide custom labels for SELinux and AppArmor multi-level security systems. This parameter maps to SecurityOpt in the Create a container section of the Docker Remote API and the --security-opt option to docker run.

    The Amazon ECS container agent running on a container instance must register with the ECS_SELINUX_CAPABLE=true or ECS_APPARMOR_CAPABLE=true environment variables before containers placed on that instance can use these security options. For more information, see Amazon ECS Container Agent Configuration in the Amazon EC2 Container Service Developer Guide.

    ", + "ContainerOverride$command": "

    The command to send to the container that overrides the default command from the Docker image or the task definition.

    ", + "DescribeClustersRequest$clusters": "

    A space-separated list of cluster names or full cluster Amazon Resource Name (ARN) entries. If you do not specify a cluster, the default cluster is assumed.

    ", + "DescribeContainerInstancesRequest$containerInstances": "

    A space-separated list of container instance IDs or full Amazon Resource Name (ARN) entries.

    ", + "DescribeServicesRequest$services": "

    A list of services to describe.

    ", + "DescribeTasksRequest$tasks": "

    A space-separated list of task IDs or full Amazon Resource Name (ARN) entries.

    ", + "ListClustersResponse$clusterArns": "

    The list of full Amazon Resource Name (ARN) entries for each cluster associated with your account.

    ", + "ListContainerInstancesResponse$containerInstanceArns": "

    The list of container instances with full Amazon Resource Name (ARN) entries for each container instance associated with the specified cluster.

    ", + "ListServicesResponse$serviceArns": "

    The list of full Amazon Resource Name (ARN) entries for each service associated with the specified cluster.

    ", + "ListTaskDefinitionFamiliesResponse$families": "

    The list of task definition family names that match the ListTaskDefinitionFamilies request.

    ", + "ListTaskDefinitionsResponse$taskDefinitionArns": "

    The list of task definition Amazon Resource Name (ARN) entries for the ListTaskDefinitions request.

    ", + "ListTasksResponse$taskArns": "

    The list of task Amazon Resource Name (ARN) entries for the ListTasks request.

    ", + "Resource$stringSetValue": "

    When the stringSetValue type is set, the value of the resource must be a string type.

    ", + "StartTaskRequest$containerInstances": "

    The container instance IDs or full Amazon Resource Name (ARN) entries for the container instances on which you would like to place your task.

    The list of container instances to start tasks on is limited to 10.

    " + } + }, + "SubmitContainerStateChangeRequest": { + "base": null, + "refs": { + } + }, + "SubmitContainerStateChangeResponse": { + "base": null, + "refs": { + } + }, + "SubmitTaskStateChangeRequest": { + "base": null, + "refs": { + } + }, + "SubmitTaskStateChangeResponse": { + "base": null, + "refs": { + } + }, + "Task": { + "base": "

    Details on a task in a cluster.

    ", + "refs": { + "StopTaskResponse$task": null, + "Tasks$member": null + } + }, + "TaskDefinition": { + "base": "

    Details of a task definition.

    ", + "refs": { + "DeregisterTaskDefinitionResponse$taskDefinition": "

    The full description of the deregistered task.

    ", + "DescribeTaskDefinitionResponse$taskDefinition": "

    The full task definition description.

    ", + "RegisterTaskDefinitionResponse$taskDefinition": "

    The full description of the registered task definition.

    " + } + }, + "TaskDefinitionFamilyStatus": { + "base": null, + "refs": { + "ListTaskDefinitionFamiliesRequest$status": "

    The task definition family status with which to filter the ListTaskDefinitionFamilies results. By default, both ACTIVE and INACTIVE task definition families are listed. If this parameter is set to ACTIVE, only task definition families that have an ACTIVE task definition revision are returned. If this parameter is set to INACTIVE, only task definition families that do not have any ACTIVE task definition revisions are returned. If you paginate the resulting output, be sure to keep the status value constant in each subsequent request.

    " + } + }, + "TaskDefinitionStatus": { + "base": null, + "refs": { + "ListTaskDefinitionsRequest$status": "

    The task definition status with which to filter the ListTaskDefinitions results. By default, only ACTIVE task definitions are listed. By setting this parameter to INACTIVE, you can view task definitions that are INACTIVE as long as an active task or service still references them. If you paginate the resulting output, be sure to keep the status value constant in each subsequent request.

    ", + "TaskDefinition$status": "

    The status of the task definition.

    " + } + }, + "TaskOverride": { + "base": "

    The overrides associated with a task.

    ", + "refs": { + "RunTaskRequest$overrides": "

    A list of container overrides in JSON format that specify the name of a container in the specified task definition and the overrides it should receive. You can override the default command for a container (that is specified in the task definition or Docker image) with a command override. You can also override existing environment variables (that are specified in the task definition or Docker image) on a container or add new environment variables to it with an environment override.

    A total of 8192 characters are allowed for overrides. This limit includes the JSON formatting characters of the override structure.

    ", + "StartTaskRequest$overrides": "

    A list of container overrides in JSON format that specify the name of a container in the specified task definition and the overrides it should receive. You can override the default command for a container (that is specified in the task definition or Docker image) with a command override. You can also override existing environment variables (that are specified in the task definition or Docker image) on a container or add new environment variables to it with an environment override.

    A total of 8192 characters are allowed for overrides. This limit includes the JSON formatting characters of the override structure.

    ", + "Task$overrides": "

    One or more container overrides.

    " + } + }, + "Tasks": { + "base": null, + "refs": { + "DescribeTasksResponse$tasks": "

    The list of tasks.

    ", + "RunTaskResponse$tasks": "

    A full description of the tasks that were run. Each task that was successfully placed on your cluster are described here.

    ", + "StartTaskResponse$tasks": "

    A full description of the tasks that were started. Each task that was successfully placed on your container instances are described here.

    " + } + }, + "Timestamp": { + "base": null, + "refs": { + "Deployment$createdAt": "

    The Unix time in seconds and milliseconds when the service was created.

    ", + "Deployment$updatedAt": "

    The Unix time in seconds and milliseconds when the service was last updated.

    ", + "Service$createdAt": "

    The Unix time in seconds and milliseconds when the service was created.

    ", + "ServiceEvent$createdAt": "

    The Unix time in seconds and milliseconds when the event was triggered.

    ", + "Task$createdAt": "

    The Unix time in seconds and milliseconds when the task was created (the task entered the PENDING state).

    ", + "Task$startedAt": "

    The Unix time in seconds and milliseconds when the task was started (the task transitioned from the PENDING state to the RUNNING state).

    ", + "Task$stoppedAt": "

    The Unix time in seconds and milliseconds when the task was stopped (the task transitioned from the RUNNING state to the STOPPED state).

    " + } + }, + "TransportProtocol": { + "base": null, + "refs": { + "NetworkBinding$protocol": "

    The protocol used for the network binding.

    ", + "PortMapping$protocol": "

    The protocol used for the port mapping. Valid values are tcp and udp. The default is tcp.

    " + } + }, + "Ulimit": { + "base": "

    The ulimit settings to pass to the container.

    ", + "refs": { + "UlimitList$member": null + } + }, + "UlimitList": { + "base": null, + "refs": { + "ContainerDefinition$ulimits": "

    A list of ulimits to set in the container. This parameter maps to Ulimits in the Create a container section of the Docker Remote API and the --ulimit option to docker run. Valid naming values are displayed in the Ulimit data type. This parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log into your container instance and run the following command: sudo docker version | grep \"Server API version\"

    " + } + }, + "UlimitName": { + "base": null, + "refs": { + "Ulimit$name": "

    The type of the ulimit.

    " + } + }, + "UpdateContainerAgentRequest": { + "base": null, + "refs": { + } + }, + "UpdateContainerAgentResponse": { + "base": null, + "refs": { + } + }, + "UpdateInProgressException": { + "base": "

    There is already a current Amazon ECS container agent update in progress on the specified container instance. If the container agent becomes disconnected while it is in a transitional stage, such as PENDING or STAGING, the update process can get stuck in that state. However, when the agent reconnects, it resumes where it stopped previously.

    ", + "refs": { + } + }, + "UpdateServiceRequest": { + "base": null, + "refs": { + } + }, + "UpdateServiceResponse": { + "base": null, + "refs": { + } + }, + "VersionInfo": { + "base": "

    The Docker and Amazon ECS container agent version information about a container instance.

    ", + "refs": { + "ContainerInstance$versionInfo": "

    The version information for the Amazon ECS container agent and Docker daemon running on the container instance.

    ", + "RegisterContainerInstanceRequest$versionInfo": "

    The version information for the Amazon ECS container agent and Docker daemon running on the container instance.

    " + } + }, + "Volume": { + "base": "

    A data volume used in a task definition.

    ", + "refs": { + "VolumeList$member": null + } + }, + "VolumeFrom": { + "base": "

    Details on a data volume from another container.

    ", + "refs": { + "VolumeFromList$member": null + } + }, + "VolumeFromList": { + "base": null, + "refs": { + "ContainerDefinition$volumesFrom": "

    Data volumes to mount from another container. This parameter maps to VolumesFrom in the Create a container section of the Docker Remote API and the --volumes-from option to docker run.

    " + } + }, + "VolumeList": { + "base": null, + "refs": { + "RegisterTaskDefinitionRequest$volumes": "

    A list of volume definitions in JSON format that containers in your task may use.

    ", + "TaskDefinition$volumes": "

    The list of volumes in a task. For more information about volume definition parameters and defaults, see Amazon ECS Task Definitions in the Amazon EC2 Container Service Developer Guide.

    " + } + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/ecs/2014-11-13/examples-1.json b/vendor/github.com/aws/aws-sdk-go/models/apis/ecs/2014-11-13/examples-1.json new file mode 100644 index 000000000..0ea7e3b0b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/ecs/2014-11-13/examples-1.json @@ -0,0 +1,5 @@ +{ + "version": "1.0", + "examples": { + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/ecs/2014-11-13/paginators-1.json b/vendor/github.com/aws/aws-sdk-go/models/apis/ecs/2014-11-13/paginators-1.json new file mode 100644 index 000000000..081a2df00 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/ecs/2014-11-13/paginators-1.json @@ -0,0 +1,40 @@ +{ + "pagination": { + "ListClusters": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "clusterArns" + }, + "ListContainerInstances": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "containerInstanceArns" + }, + "ListTaskDefinitions": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "taskDefinitionArns" + }, + "ListTaskDefinitionFamilies": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "families" + }, + "ListTasks": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "taskArns" + }, + "ListServices": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "serviceArns" + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/ecs/2014-11-13/waiters-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/ecs/2014-11-13/waiters-2.json new file mode 100644 index 000000000..8866d15fd --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/ecs/2014-11-13/waiters-2.json @@ -0,0 +1,93 @@ +{ + "version": 2, + "waiters": { + "TasksRunning": { + "delay": 6, + "operation": "DescribeTasks", + "maxAttempts": 100, + "acceptors": [ + { + "expected": "STOPPED", + "matcher": "pathAny", + "state": "failure", + "argument": "tasks[].lastStatus" + }, + { + "expected": "MISSING", + "matcher": "pathAny", + "state": "failure", + "argument": "failures[].reason" + }, + { + "expected": "RUNNING", + "matcher": "pathAll", + "state": "success", + "argument": "tasks[].lastStatus" + } + ] + }, + "TasksStopped": { + "delay": 6, + "operation": "DescribeTasks", + "maxAttempts": 100, + "acceptors": [ + { + "expected": "STOPPED", + "matcher": "pathAll", + "state": "success", + "argument": "tasks[].lastStatus" + } + ] + }, + "ServicesStable": { + "delay": 15, + "operation": "DescribeServices", + "maxAttempts": 40, + "acceptors": [ + { + "expected": "MISSING", + "matcher": "pathAny", + "state": "failure", + "argument": "failures[].reason" + }, + { + "expected": "DRAINING", + "matcher": "pathAny", + "state": "failure", + "argument": "services[].status" + }, + { + "expected": "INACTIVE", + "matcher": "pathAny", + "state": "failure", + "argument": "services[].status" + }, + { + "expected": true, + "matcher": "path", + "state": "success", + "argument": "services | [@[?length(deployments)!=`1`], @[?desiredCount!=runningCount]][] | length(@) == `0`" + } + ] + }, + "ServicesInactive": { + "delay": 15, + "operation": "DescribeServices", + "maxAttempts": 40, + "acceptors": [ + { + "expected": "MISSING", + "matcher": "pathAny", + "state": "failure", + "argument": "failures[].reason" + }, + { + "expected": "INACTIVE", + "matcher": "pathAny", + "state": "success", + "argument": "services[].status" + } + ] + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/elasticache/2015-02-02/api-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/elasticache/2015-02-02/api-2.json new file mode 100644 index 000000000..9039e8a0f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/elasticache/2015-02-02/api-2.json @@ -0,0 +1,2426 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2015-02-02", + "endpointPrefix":"elasticache", + "protocol":"query", + "serviceFullName":"Amazon ElastiCache", + "signatureVersion":"v4", + "xmlNamespace":"http://elasticache.amazonaws.com/doc/2015-02-02/" + }, + "operations":{ + "AddTagsToResource":{ + "name":"AddTagsToResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AddTagsToResourceMessage"}, + "output":{ + "shape":"TagListMessage", + "resultWrapper":"AddTagsToResourceResult" + }, + "errors":[ + {"shape":"CacheClusterNotFoundFault"}, + {"shape":"SnapshotNotFoundFault"}, + {"shape":"TagQuotaPerResourceExceeded"}, + {"shape":"InvalidARNFault"} + ] + }, + "AuthorizeCacheSecurityGroupIngress":{ + "name":"AuthorizeCacheSecurityGroupIngress", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AuthorizeCacheSecurityGroupIngressMessage"}, + "output":{ + "shape":"AuthorizeCacheSecurityGroupIngressResult", + "resultWrapper":"AuthorizeCacheSecurityGroupIngressResult" + }, + "errors":[ + {"shape":"CacheSecurityGroupNotFoundFault"}, + {"shape":"InvalidCacheSecurityGroupStateFault"}, + {"shape":"AuthorizationAlreadyExistsFault"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"InvalidParameterCombinationException"} + ] + }, + "CopySnapshot":{ + "name":"CopySnapshot", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CopySnapshotMessage"}, + "output":{ + "shape":"CopySnapshotResult", + "resultWrapper":"CopySnapshotResult" + }, + "errors":[ + {"shape":"SnapshotAlreadyExistsFault"}, + {"shape":"SnapshotNotFoundFault"}, + {"shape":"SnapshotQuotaExceededFault"}, + {"shape":"InvalidSnapshotStateFault"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"InvalidParameterCombinationException"} + ] + }, + "CreateCacheCluster":{ + "name":"CreateCacheCluster", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateCacheClusterMessage"}, + "output":{ + "shape":"CreateCacheClusterResult", + "resultWrapper":"CreateCacheClusterResult" + }, + "errors":[ + {"shape":"ReplicationGroupNotFoundFault"}, + {"shape":"InvalidReplicationGroupStateFault"}, + {"shape":"CacheClusterAlreadyExistsFault"}, + {"shape":"InsufficientCacheClusterCapacityFault"}, + {"shape":"CacheSecurityGroupNotFoundFault"}, + {"shape":"CacheSubnetGroupNotFoundFault"}, + {"shape":"ClusterQuotaForCustomerExceededFault"}, + {"shape":"NodeQuotaForClusterExceededFault"}, + {"shape":"NodeQuotaForCustomerExceededFault"}, + {"shape":"CacheParameterGroupNotFoundFault"}, + {"shape":"InvalidVPCNetworkStateFault"}, + {"shape":"TagQuotaPerResourceExceeded"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"InvalidParameterCombinationException"} + ] + }, + "CreateCacheParameterGroup":{ + "name":"CreateCacheParameterGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateCacheParameterGroupMessage"}, + "output":{ + "shape":"CreateCacheParameterGroupResult", + "resultWrapper":"CreateCacheParameterGroupResult" + }, + "errors":[ + {"shape":"CacheParameterGroupQuotaExceededFault"}, + {"shape":"CacheParameterGroupAlreadyExistsFault"}, + {"shape":"InvalidCacheParameterGroupStateFault"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"InvalidParameterCombinationException"} + ] + }, + "CreateCacheSecurityGroup":{ + "name":"CreateCacheSecurityGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateCacheSecurityGroupMessage"}, + "output":{ + "shape":"CreateCacheSecurityGroupResult", + "resultWrapper":"CreateCacheSecurityGroupResult" + }, + "errors":[ + {"shape":"CacheSecurityGroupAlreadyExistsFault"}, + {"shape":"CacheSecurityGroupQuotaExceededFault"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"InvalidParameterCombinationException"} + ] + }, + "CreateCacheSubnetGroup":{ + "name":"CreateCacheSubnetGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateCacheSubnetGroupMessage"}, + "output":{ + "shape":"CreateCacheSubnetGroupResult", + "resultWrapper":"CreateCacheSubnetGroupResult" + }, + "errors":[ + {"shape":"CacheSubnetGroupAlreadyExistsFault"}, + {"shape":"CacheSubnetGroupQuotaExceededFault"}, + {"shape":"CacheSubnetQuotaExceededFault"}, + {"shape":"InvalidSubnet"} + ] + }, + "CreateReplicationGroup":{ + "name":"CreateReplicationGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateReplicationGroupMessage"}, + "output":{ + "shape":"CreateReplicationGroupResult", + "resultWrapper":"CreateReplicationGroupResult" + }, + "errors":[ + {"shape":"CacheClusterNotFoundFault"}, + {"shape":"InvalidCacheClusterStateFault"}, + {"shape":"ReplicationGroupAlreadyExistsFault"}, + {"shape":"InsufficientCacheClusterCapacityFault"}, + {"shape":"CacheSecurityGroupNotFoundFault"}, + {"shape":"CacheSubnetGroupNotFoundFault"}, + {"shape":"ClusterQuotaForCustomerExceededFault"}, + {"shape":"NodeQuotaForClusterExceededFault"}, + {"shape":"NodeQuotaForCustomerExceededFault"}, + {"shape":"CacheParameterGroupNotFoundFault"}, + {"shape":"InvalidVPCNetworkStateFault"}, + {"shape":"TagQuotaPerResourceExceeded"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"InvalidParameterCombinationException"} + ] + }, + "CreateSnapshot":{ + "name":"CreateSnapshot", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateSnapshotMessage"}, + "output":{ + "shape":"CreateSnapshotResult", + "resultWrapper":"CreateSnapshotResult" + }, + "errors":[ + {"shape":"SnapshotAlreadyExistsFault"}, + {"shape":"CacheClusterNotFoundFault"}, + {"shape":"InvalidCacheClusterStateFault"}, + {"shape":"SnapshotQuotaExceededFault"}, + {"shape":"SnapshotFeatureNotSupportedFault"}, + {"shape":"InvalidParameterCombinationException"}, + {"shape":"InvalidParameterValueException"} + ] + }, + "DeleteCacheCluster":{ + "name":"DeleteCacheCluster", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteCacheClusterMessage"}, + "output":{ + "shape":"DeleteCacheClusterResult", + "resultWrapper":"DeleteCacheClusterResult" + }, + "errors":[ + {"shape":"CacheClusterNotFoundFault"}, + {"shape":"InvalidCacheClusterStateFault"}, + {"shape":"SnapshotAlreadyExistsFault"}, + {"shape":"SnapshotFeatureNotSupportedFault"}, + {"shape":"SnapshotQuotaExceededFault"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"InvalidParameterCombinationException"} + ] + }, + "DeleteCacheParameterGroup":{ + "name":"DeleteCacheParameterGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteCacheParameterGroupMessage"}, + "errors":[ + {"shape":"InvalidCacheParameterGroupStateFault"}, + {"shape":"CacheParameterGroupNotFoundFault"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"InvalidParameterCombinationException"} + ] + }, + "DeleteCacheSecurityGroup":{ + "name":"DeleteCacheSecurityGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteCacheSecurityGroupMessage"}, + "errors":[ + {"shape":"InvalidCacheSecurityGroupStateFault"}, + {"shape":"CacheSecurityGroupNotFoundFault"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"InvalidParameterCombinationException"} + ] + }, + "DeleteCacheSubnetGroup":{ + "name":"DeleteCacheSubnetGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteCacheSubnetGroupMessage"}, + "errors":[ + {"shape":"CacheSubnetGroupInUse"}, + {"shape":"CacheSubnetGroupNotFoundFault"} + ] + }, + "DeleteReplicationGroup":{ + "name":"DeleteReplicationGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteReplicationGroupMessage"}, + "output":{ + "shape":"DeleteReplicationGroupResult", + "resultWrapper":"DeleteReplicationGroupResult" + }, + "errors":[ + {"shape":"ReplicationGroupNotFoundFault"}, + {"shape":"InvalidReplicationGroupStateFault"}, + {"shape":"SnapshotAlreadyExistsFault"}, + {"shape":"SnapshotFeatureNotSupportedFault"}, + {"shape":"SnapshotQuotaExceededFault"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"InvalidParameterCombinationException"} + ] + }, + "DeleteSnapshot":{ + "name":"DeleteSnapshot", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteSnapshotMessage"}, + "output":{ + "shape":"DeleteSnapshotResult", + "resultWrapper":"DeleteSnapshotResult" + }, + "errors":[ + {"shape":"SnapshotNotFoundFault"}, + {"shape":"InvalidSnapshotStateFault"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"InvalidParameterCombinationException"} + ] + }, + "DescribeCacheClusters":{ + "name":"DescribeCacheClusters", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeCacheClustersMessage"}, + "output":{ + "shape":"CacheClusterMessage", + "resultWrapper":"DescribeCacheClustersResult" + }, + "errors":[ + {"shape":"CacheClusterNotFoundFault"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"InvalidParameterCombinationException"} + ] + }, + "DescribeCacheEngineVersions":{ + "name":"DescribeCacheEngineVersions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeCacheEngineVersionsMessage"}, + "output":{ + "shape":"CacheEngineVersionMessage", + "resultWrapper":"DescribeCacheEngineVersionsResult" + } + }, + "DescribeCacheParameterGroups":{ + "name":"DescribeCacheParameterGroups", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeCacheParameterGroupsMessage"}, + "output":{ + "shape":"CacheParameterGroupsMessage", + "resultWrapper":"DescribeCacheParameterGroupsResult" + }, + "errors":[ + {"shape":"CacheParameterGroupNotFoundFault"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"InvalidParameterCombinationException"} + ] + }, + "DescribeCacheParameters":{ + "name":"DescribeCacheParameters", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeCacheParametersMessage"}, + "output":{ + "shape":"CacheParameterGroupDetails", + "resultWrapper":"DescribeCacheParametersResult" + }, + "errors":[ + {"shape":"CacheParameterGroupNotFoundFault"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"InvalidParameterCombinationException"} + ] + }, + "DescribeCacheSecurityGroups":{ + "name":"DescribeCacheSecurityGroups", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeCacheSecurityGroupsMessage"}, + "output":{ + "shape":"CacheSecurityGroupMessage", + "resultWrapper":"DescribeCacheSecurityGroupsResult" + }, + "errors":[ + {"shape":"CacheSecurityGroupNotFoundFault"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"InvalidParameterCombinationException"} + ] + }, + "DescribeCacheSubnetGroups":{ + "name":"DescribeCacheSubnetGroups", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeCacheSubnetGroupsMessage"}, + "output":{ + "shape":"CacheSubnetGroupMessage", + "resultWrapper":"DescribeCacheSubnetGroupsResult" + }, + "errors":[ + {"shape":"CacheSubnetGroupNotFoundFault"} + ] + }, + "DescribeEngineDefaultParameters":{ + "name":"DescribeEngineDefaultParameters", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeEngineDefaultParametersMessage"}, + "output":{ + "shape":"DescribeEngineDefaultParametersResult", + "resultWrapper":"DescribeEngineDefaultParametersResult" + }, + "errors":[ + {"shape":"InvalidParameterValueException"}, + {"shape":"InvalidParameterCombinationException"} + ] + }, + "DescribeEvents":{ + "name":"DescribeEvents", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeEventsMessage"}, + "output":{ + "shape":"EventsMessage", + "resultWrapper":"DescribeEventsResult" + }, + "errors":[ + {"shape":"InvalidParameterValueException"}, + {"shape":"InvalidParameterCombinationException"} + ] + }, + "DescribeReplicationGroups":{ + "name":"DescribeReplicationGroups", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeReplicationGroupsMessage"}, + "output":{ + "shape":"ReplicationGroupMessage", + "resultWrapper":"DescribeReplicationGroupsResult" + }, + "errors":[ + {"shape":"ReplicationGroupNotFoundFault"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"InvalidParameterCombinationException"} + ] + }, + "DescribeReservedCacheNodes":{ + "name":"DescribeReservedCacheNodes", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeReservedCacheNodesMessage"}, + "output":{ + "shape":"ReservedCacheNodeMessage", + "resultWrapper":"DescribeReservedCacheNodesResult" + }, + "errors":[ + {"shape":"ReservedCacheNodeNotFoundFault"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"InvalidParameterCombinationException"} + ] + }, + "DescribeReservedCacheNodesOfferings":{ + "name":"DescribeReservedCacheNodesOfferings", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeReservedCacheNodesOfferingsMessage"}, + "output":{ + "shape":"ReservedCacheNodesOfferingMessage", + "resultWrapper":"DescribeReservedCacheNodesOfferingsResult" + }, + "errors":[ + {"shape":"ReservedCacheNodesOfferingNotFoundFault"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"InvalidParameterCombinationException"} + ] + }, + "DescribeSnapshots":{ + "name":"DescribeSnapshots", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeSnapshotsMessage"}, + "output":{ + "shape":"DescribeSnapshotsListMessage", + "resultWrapper":"DescribeSnapshotsResult" + }, + "errors":[ + {"shape":"CacheClusterNotFoundFault"}, + {"shape":"SnapshotNotFoundFault"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"InvalidParameterCombinationException"} + ] + }, + "ListAllowedNodeTypeModifications":{ + "name":"ListAllowedNodeTypeModifications", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListAllowedNodeTypeModificationsMessage"}, + "output":{ + "shape":"AllowedNodeTypeModificationsMessage", + "resultWrapper":"ListAllowedNodeTypeModificationsResult" + }, + "errors":[ + {"shape":"CacheClusterNotFoundFault"}, + {"shape":"ReplicationGroupNotFoundFault"}, + {"shape":"InvalidParameterCombinationException"}, + {"shape":"InvalidParameterValueException"} + ] + }, + "ListTagsForResource":{ + "name":"ListTagsForResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListTagsForResourceMessage"}, + "output":{ + "shape":"TagListMessage", + "resultWrapper":"ListTagsForResourceResult" + }, + "errors":[ + {"shape":"CacheClusterNotFoundFault"}, + {"shape":"SnapshotNotFoundFault"}, + {"shape":"InvalidARNFault"} + ] + }, + "ModifyCacheCluster":{ + "name":"ModifyCacheCluster", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyCacheClusterMessage"}, + "output":{ + "shape":"ModifyCacheClusterResult", + "resultWrapper":"ModifyCacheClusterResult" + }, + "errors":[ + {"shape":"InvalidCacheClusterStateFault"}, + {"shape":"InvalidCacheSecurityGroupStateFault"}, + {"shape":"InsufficientCacheClusterCapacityFault"}, + {"shape":"CacheClusterNotFoundFault"}, + {"shape":"NodeQuotaForClusterExceededFault"}, + {"shape":"NodeQuotaForCustomerExceededFault"}, + {"shape":"CacheSecurityGroupNotFoundFault"}, + {"shape":"CacheParameterGroupNotFoundFault"}, + {"shape":"InvalidVPCNetworkStateFault"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"InvalidParameterCombinationException"} + ] + }, + "ModifyCacheParameterGroup":{ + "name":"ModifyCacheParameterGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyCacheParameterGroupMessage"}, + "output":{ + "shape":"CacheParameterGroupNameMessage", + "resultWrapper":"ModifyCacheParameterGroupResult" + }, + "errors":[ + {"shape":"CacheParameterGroupNotFoundFault"}, + {"shape":"InvalidCacheParameterGroupStateFault"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"InvalidParameterCombinationException"} + ] + }, + "ModifyCacheSubnetGroup":{ + "name":"ModifyCacheSubnetGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyCacheSubnetGroupMessage"}, + "output":{ + "shape":"ModifyCacheSubnetGroupResult", + "resultWrapper":"ModifyCacheSubnetGroupResult" + }, + "errors":[ + {"shape":"CacheSubnetGroupNotFoundFault"}, + {"shape":"CacheSubnetQuotaExceededFault"}, + {"shape":"SubnetInUse"}, + {"shape":"InvalidSubnet"} + ] + }, + "ModifyReplicationGroup":{ + "name":"ModifyReplicationGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyReplicationGroupMessage"}, + "output":{ + "shape":"ModifyReplicationGroupResult", + "resultWrapper":"ModifyReplicationGroupResult" + }, + "errors":[ + {"shape":"ReplicationGroupNotFoundFault"}, + {"shape":"InvalidReplicationGroupStateFault"}, + {"shape":"InvalidCacheClusterStateFault"}, + {"shape":"InvalidCacheSecurityGroupStateFault"}, + {"shape":"InsufficientCacheClusterCapacityFault"}, + {"shape":"CacheClusterNotFoundFault"}, + {"shape":"NodeQuotaForClusterExceededFault"}, + {"shape":"NodeQuotaForCustomerExceededFault"}, + {"shape":"CacheSecurityGroupNotFoundFault"}, + {"shape":"CacheParameterGroupNotFoundFault"}, + {"shape":"InvalidVPCNetworkStateFault"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"InvalidParameterCombinationException"} + ] + }, + "PurchaseReservedCacheNodesOffering":{ + "name":"PurchaseReservedCacheNodesOffering", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PurchaseReservedCacheNodesOfferingMessage"}, + "output":{ + "shape":"PurchaseReservedCacheNodesOfferingResult", + "resultWrapper":"PurchaseReservedCacheNodesOfferingResult" + }, + "errors":[ + {"shape":"ReservedCacheNodesOfferingNotFoundFault"}, + {"shape":"ReservedCacheNodeAlreadyExistsFault"}, + {"shape":"ReservedCacheNodeQuotaExceededFault"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"InvalidParameterCombinationException"} + ] + }, + "RebootCacheCluster":{ + "name":"RebootCacheCluster", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RebootCacheClusterMessage"}, + "output":{ + "shape":"RebootCacheClusterResult", + "resultWrapper":"RebootCacheClusterResult" + }, + "errors":[ + {"shape":"InvalidCacheClusterStateFault"}, + {"shape":"CacheClusterNotFoundFault"} + ] + }, + "RemoveTagsFromResource":{ + "name":"RemoveTagsFromResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RemoveTagsFromResourceMessage"}, + "output":{ + "shape":"TagListMessage", + "resultWrapper":"RemoveTagsFromResourceResult" + }, + "errors":[ + {"shape":"CacheClusterNotFoundFault"}, + {"shape":"SnapshotNotFoundFault"}, + {"shape":"InvalidARNFault"}, + {"shape":"TagNotFoundFault"} + ] + }, + "ResetCacheParameterGroup":{ + "name":"ResetCacheParameterGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ResetCacheParameterGroupMessage"}, + "output":{ + "shape":"CacheParameterGroupNameMessage", + "resultWrapper":"ResetCacheParameterGroupResult" + }, + "errors":[ + {"shape":"InvalidCacheParameterGroupStateFault"}, + {"shape":"CacheParameterGroupNotFoundFault"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"InvalidParameterCombinationException"} + ] + }, + "RevokeCacheSecurityGroupIngress":{ + "name":"RevokeCacheSecurityGroupIngress", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RevokeCacheSecurityGroupIngressMessage"}, + "output":{ + "shape":"RevokeCacheSecurityGroupIngressResult", + "resultWrapper":"RevokeCacheSecurityGroupIngressResult" + }, + "errors":[ + {"shape":"CacheSecurityGroupNotFoundFault"}, + {"shape":"AuthorizationNotFoundFault"}, + {"shape":"InvalidCacheSecurityGroupStateFault"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"InvalidParameterCombinationException"} + ] + } + }, + "shapes":{ + "AZMode":{ + "type":"string", + "enum":[ + "single-az", + "cross-az" + ] + }, + "AddTagsToResourceMessage":{ + "type":"structure", + "required":[ + "ResourceName", + "Tags" + ], + "members":{ + "ResourceName":{"shape":"String"}, + "Tags":{"shape":"TagList"} + } + }, + "AllowedNodeTypeModificationsMessage":{ + "type":"structure", + "members":{ + "ScaleUpModifications":{"shape":"NodeTypeList"} + } + }, + "AuthorizationAlreadyExistsFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"AuthorizationAlreadyExists", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "AuthorizationNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"AuthorizationNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "AuthorizeCacheSecurityGroupIngressMessage":{ + "type":"structure", + "required":[ + "CacheSecurityGroupName", + "EC2SecurityGroupName", + "EC2SecurityGroupOwnerId" + ], + "members":{ + "CacheSecurityGroupName":{"shape":"String"}, + "EC2SecurityGroupName":{"shape":"String"}, + "EC2SecurityGroupOwnerId":{"shape":"String"} + } + }, + "AuthorizeCacheSecurityGroupIngressResult":{ + "type":"structure", + "members":{ + "CacheSecurityGroup":{"shape":"CacheSecurityGroup"} + } + }, + "AutomaticFailoverStatus":{ + "type":"string", + "enum":[ + "enabled", + "disabled", + "enabling", + "disabling" + ] + }, + "AvailabilityZone":{ + "type":"structure", + "members":{ + "Name":{"shape":"String"} + }, + "wrapper":true + }, + "AvailabilityZonesList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"AvailabilityZone" + } + }, + "AwsQueryErrorMessage":{"type":"string"}, + "Boolean":{"type":"boolean"}, + "BooleanOptional":{"type":"boolean"}, + "CacheCluster":{ + "type":"structure", + "members":{ + "CacheClusterId":{"shape":"String"}, + "ConfigurationEndpoint":{"shape":"Endpoint"}, + "ClientDownloadLandingPage":{"shape":"String"}, + "CacheNodeType":{"shape":"String"}, + "Engine":{"shape":"String"}, + "EngineVersion":{"shape":"String"}, + "CacheClusterStatus":{"shape":"String"}, + "NumCacheNodes":{"shape":"IntegerOptional"}, + "PreferredAvailabilityZone":{"shape":"String"}, + "CacheClusterCreateTime":{"shape":"TStamp"}, + "PreferredMaintenanceWindow":{"shape":"String"}, + "PendingModifiedValues":{"shape":"PendingModifiedValues"}, + "NotificationConfiguration":{"shape":"NotificationConfiguration"}, + "CacheSecurityGroups":{"shape":"CacheSecurityGroupMembershipList"}, + "CacheParameterGroup":{"shape":"CacheParameterGroupStatus"}, + "CacheSubnetGroupName":{"shape":"String"}, + "CacheNodes":{"shape":"CacheNodeList"}, + "AutoMinorVersionUpgrade":{"shape":"Boolean"}, + "SecurityGroups":{"shape":"SecurityGroupMembershipList"}, + "ReplicationGroupId":{"shape":"String"}, + "SnapshotRetentionLimit":{"shape":"IntegerOptional"}, + "SnapshotWindow":{"shape":"String"} + }, + "wrapper":true + }, + "CacheClusterAlreadyExistsFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"CacheClusterAlreadyExists", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "CacheClusterList":{ + "type":"list", + "member":{ + "shape":"CacheCluster", + "locationName":"CacheCluster" + } + }, + "CacheClusterMessage":{ + "type":"structure", + "members":{ + "Marker":{"shape":"String"}, + "CacheClusters":{"shape":"CacheClusterList"} + } + }, + "CacheClusterNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"CacheClusterNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "CacheEngineVersion":{ + "type":"structure", + "members":{ + "Engine":{"shape":"String"}, + "EngineVersion":{"shape":"String"}, + "CacheParameterGroupFamily":{"shape":"String"}, + "CacheEngineDescription":{"shape":"String"}, + "CacheEngineVersionDescription":{"shape":"String"} + } + }, + "CacheEngineVersionList":{ + "type":"list", + "member":{ + "shape":"CacheEngineVersion", + "locationName":"CacheEngineVersion" + } + }, + "CacheEngineVersionMessage":{ + "type":"structure", + "members":{ + "Marker":{"shape":"String"}, + "CacheEngineVersions":{"shape":"CacheEngineVersionList"} + } + }, + "CacheNode":{ + "type":"structure", + "members":{ + "CacheNodeId":{"shape":"String"}, + "CacheNodeStatus":{"shape":"String"}, + "CacheNodeCreateTime":{"shape":"TStamp"}, + "Endpoint":{"shape":"Endpoint"}, + "ParameterGroupStatus":{"shape":"String"}, + "SourceCacheNodeId":{"shape":"String"}, + "CustomerAvailabilityZone":{"shape":"String"} + } + }, + "CacheNodeIdsList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"CacheNodeId" + } + }, + "CacheNodeList":{ + "type":"list", + "member":{ + "shape":"CacheNode", + "locationName":"CacheNode" + } + }, + "CacheNodeTypeSpecificParameter":{ + "type":"structure", + "members":{ + "ParameterName":{"shape":"String"}, + "Description":{"shape":"String"}, + "Source":{"shape":"String"}, + "DataType":{"shape":"String"}, + "AllowedValues":{"shape":"String"}, + "IsModifiable":{"shape":"Boolean"}, + "MinimumEngineVersion":{"shape":"String"}, + "CacheNodeTypeSpecificValues":{"shape":"CacheNodeTypeSpecificValueList"}, + "ChangeType":{"shape":"ChangeType"} + } + }, + "CacheNodeTypeSpecificParametersList":{ + "type":"list", + "member":{ + "shape":"CacheNodeTypeSpecificParameter", + "locationName":"CacheNodeTypeSpecificParameter" + } + }, + "CacheNodeTypeSpecificValue":{ + "type":"structure", + "members":{ + "CacheNodeType":{"shape":"String"}, + "Value":{"shape":"String"} + } + }, + "CacheNodeTypeSpecificValueList":{ + "type":"list", + "member":{ + "shape":"CacheNodeTypeSpecificValue", + "locationName":"CacheNodeTypeSpecificValue" + } + }, + "CacheParameterGroup":{ + "type":"structure", + "members":{ + "CacheParameterGroupName":{"shape":"String"}, + "CacheParameterGroupFamily":{"shape":"String"}, + "Description":{"shape":"String"} + }, + "wrapper":true + }, + "CacheParameterGroupAlreadyExistsFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"CacheParameterGroupAlreadyExists", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "CacheParameterGroupDetails":{ + "type":"structure", + "members":{ + "Marker":{"shape":"String"}, + "Parameters":{"shape":"ParametersList"}, + "CacheNodeTypeSpecificParameters":{"shape":"CacheNodeTypeSpecificParametersList"} + } + }, + "CacheParameterGroupList":{ + "type":"list", + "member":{ + "shape":"CacheParameterGroup", + "locationName":"CacheParameterGroup" + } + }, + "CacheParameterGroupNameMessage":{ + "type":"structure", + "members":{ + "CacheParameterGroupName":{"shape":"String"} + } + }, + "CacheParameterGroupNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"CacheParameterGroupNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "CacheParameterGroupQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"CacheParameterGroupQuotaExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "CacheParameterGroupStatus":{ + "type":"structure", + "members":{ + "CacheParameterGroupName":{"shape":"String"}, + "ParameterApplyStatus":{"shape":"String"}, + "CacheNodeIdsToReboot":{"shape":"CacheNodeIdsList"} + } + }, + "CacheParameterGroupsMessage":{ + "type":"structure", + "members":{ + "Marker":{"shape":"String"}, + "CacheParameterGroups":{"shape":"CacheParameterGroupList"} + } + }, + "CacheSecurityGroup":{ + "type":"structure", + "members":{ + "OwnerId":{"shape":"String"}, + "CacheSecurityGroupName":{"shape":"String"}, + "Description":{"shape":"String"}, + "EC2SecurityGroups":{"shape":"EC2SecurityGroupList"} + }, + "wrapper":true + }, + "CacheSecurityGroupAlreadyExistsFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"CacheSecurityGroupAlreadyExists", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "CacheSecurityGroupMembership":{ + "type":"structure", + "members":{ + "CacheSecurityGroupName":{"shape":"String"}, + "Status":{"shape":"String"} + } + }, + "CacheSecurityGroupMembershipList":{ + "type":"list", + "member":{ + "shape":"CacheSecurityGroupMembership", + "locationName":"CacheSecurityGroup" + } + }, + "CacheSecurityGroupMessage":{ + "type":"structure", + "members":{ + "Marker":{"shape":"String"}, + "CacheSecurityGroups":{"shape":"CacheSecurityGroups"} + } + }, + "CacheSecurityGroupNameList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"CacheSecurityGroupName" + } + }, + "CacheSecurityGroupNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"CacheSecurityGroupNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "CacheSecurityGroupQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"QuotaExceeded.CacheSecurityGroup", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "CacheSecurityGroups":{ + "type":"list", + "member":{ + "shape":"CacheSecurityGroup", + "locationName":"CacheSecurityGroup" + } + }, + "CacheSubnetGroup":{ + "type":"structure", + "members":{ + "CacheSubnetGroupName":{"shape":"String"}, + "CacheSubnetGroupDescription":{"shape":"String"}, + "VpcId":{"shape":"String"}, + "Subnets":{"shape":"SubnetList"} + }, + "wrapper":true + }, + "CacheSubnetGroupAlreadyExistsFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"CacheSubnetGroupAlreadyExists", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "CacheSubnetGroupInUse":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"CacheSubnetGroupInUse", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "CacheSubnetGroupMessage":{ + "type":"structure", + "members":{ + "Marker":{"shape":"String"}, + "CacheSubnetGroups":{"shape":"CacheSubnetGroups"} + } + }, + "CacheSubnetGroupNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"CacheSubnetGroupNotFoundFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "CacheSubnetGroupQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"CacheSubnetGroupQuotaExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "CacheSubnetGroups":{ + "type":"list", + "member":{ + "shape":"CacheSubnetGroup", + "locationName":"CacheSubnetGroup" + } + }, + "CacheSubnetQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"CacheSubnetQuotaExceededFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "ChangeType":{ + "type":"string", + "enum":[ + "immediate", + "requires-reboot" + ] + }, + "ClusterIdList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"ClusterId" + } + }, + "ClusterQuotaForCustomerExceededFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"ClusterQuotaForCustomerExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "CopySnapshotMessage":{ + "type":"structure", + "required":[ + "SourceSnapshotName", + "TargetSnapshotName" + ], + "members":{ + "SourceSnapshotName":{"shape":"String"}, + "TargetSnapshotName":{"shape":"String"}, + "TargetBucket":{"shape":"String"} + } + }, + "CopySnapshotResult":{ + "type":"structure", + "members":{ + "Snapshot":{"shape":"Snapshot"} + } + }, + "CreateCacheClusterMessage":{ + "type":"structure", + "required":["CacheClusterId"], + "members":{ + "CacheClusterId":{"shape":"String"}, + "ReplicationGroupId":{"shape":"String"}, + "AZMode":{"shape":"AZMode"}, + "PreferredAvailabilityZone":{"shape":"String"}, + "PreferredAvailabilityZones":{"shape":"PreferredAvailabilityZoneList"}, + "NumCacheNodes":{"shape":"IntegerOptional"}, + "CacheNodeType":{"shape":"String"}, + "Engine":{"shape":"String"}, + "EngineVersion":{"shape":"String"}, + "CacheParameterGroupName":{"shape":"String"}, + "CacheSubnetGroupName":{"shape":"String"}, + "CacheSecurityGroupNames":{"shape":"CacheSecurityGroupNameList"}, + "SecurityGroupIds":{"shape":"SecurityGroupIdsList"}, + "Tags":{"shape":"TagList"}, + "SnapshotArns":{"shape":"SnapshotArnsList"}, + "SnapshotName":{"shape":"String"}, + "PreferredMaintenanceWindow":{"shape":"String"}, + "Port":{"shape":"IntegerOptional"}, + "NotificationTopicArn":{"shape":"String"}, + "AutoMinorVersionUpgrade":{"shape":"BooleanOptional"}, + "SnapshotRetentionLimit":{"shape":"IntegerOptional"}, + "SnapshotWindow":{"shape":"String"} + } + }, + "CreateCacheClusterResult":{ + "type":"structure", + "members":{ + "CacheCluster":{"shape":"CacheCluster"} + } + }, + "CreateCacheParameterGroupMessage":{ + "type":"structure", + "required":[ + "CacheParameterGroupName", + "CacheParameterGroupFamily", + "Description" + ], + "members":{ + "CacheParameterGroupName":{"shape":"String"}, + "CacheParameterGroupFamily":{"shape":"String"}, + "Description":{"shape":"String"} + } + }, + "CreateCacheParameterGroupResult":{ + "type":"structure", + "members":{ + "CacheParameterGroup":{"shape":"CacheParameterGroup"} + } + }, + "CreateCacheSecurityGroupMessage":{ + "type":"structure", + "required":[ + "CacheSecurityGroupName", + "Description" + ], + "members":{ + "CacheSecurityGroupName":{"shape":"String"}, + "Description":{"shape":"String"} + } + }, + "CreateCacheSecurityGroupResult":{ + "type":"structure", + "members":{ + "CacheSecurityGroup":{"shape":"CacheSecurityGroup"} + } + }, + "CreateCacheSubnetGroupMessage":{ + "type":"structure", + "required":[ + "CacheSubnetGroupName", + "CacheSubnetGroupDescription", + "SubnetIds" + ], + "members":{ + "CacheSubnetGroupName":{"shape":"String"}, + "CacheSubnetGroupDescription":{"shape":"String"}, + "SubnetIds":{"shape":"SubnetIdentifierList"} + } + }, + "CreateCacheSubnetGroupResult":{ + "type":"structure", + "members":{ + "CacheSubnetGroup":{"shape":"CacheSubnetGroup"} + } + }, + "CreateReplicationGroupMessage":{ + "type":"structure", + "required":[ + "ReplicationGroupId", + "ReplicationGroupDescription" + ], + "members":{ + "ReplicationGroupId":{"shape":"String"}, + "ReplicationGroupDescription":{"shape":"String"}, + "PrimaryClusterId":{"shape":"String"}, + "AutomaticFailoverEnabled":{"shape":"BooleanOptional"}, + "NumCacheClusters":{"shape":"IntegerOptional"}, + "PreferredCacheClusterAZs":{"shape":"AvailabilityZonesList"}, + "CacheNodeType":{"shape":"String"}, + "Engine":{"shape":"String"}, + "EngineVersion":{"shape":"String"}, + "CacheParameterGroupName":{"shape":"String"}, + "CacheSubnetGroupName":{"shape":"String"}, + "CacheSecurityGroupNames":{"shape":"CacheSecurityGroupNameList"}, + "SecurityGroupIds":{"shape":"SecurityGroupIdsList"}, + "Tags":{"shape":"TagList"}, + "SnapshotArns":{"shape":"SnapshotArnsList"}, + "SnapshotName":{"shape":"String"}, + "PreferredMaintenanceWindow":{"shape":"String"}, + "Port":{"shape":"IntegerOptional"}, + "NotificationTopicArn":{"shape":"String"}, + "AutoMinorVersionUpgrade":{"shape":"BooleanOptional"}, + "SnapshotRetentionLimit":{"shape":"IntegerOptional"}, + "SnapshotWindow":{"shape":"String"} + } + }, + "CreateReplicationGroupResult":{ + "type":"structure", + "members":{ + "ReplicationGroup":{"shape":"ReplicationGroup"} + } + }, + "CreateSnapshotMessage":{ + "type":"structure", + "required":[ + "CacheClusterId", + "SnapshotName" + ], + "members":{ + "CacheClusterId":{"shape":"String"}, + "SnapshotName":{"shape":"String"} + } + }, + "CreateSnapshotResult":{ + "type":"structure", + "members":{ + "Snapshot":{"shape":"Snapshot"} + } + }, + "DeleteCacheClusterMessage":{ + "type":"structure", + "required":["CacheClusterId"], + "members":{ + "CacheClusterId":{"shape":"String"}, + "FinalSnapshotIdentifier":{"shape":"String"} + } + }, + "DeleteCacheClusterResult":{ + "type":"structure", + "members":{ + "CacheCluster":{"shape":"CacheCluster"} + } + }, + "DeleteCacheParameterGroupMessage":{ + "type":"structure", + "required":["CacheParameterGroupName"], + "members":{ + "CacheParameterGroupName":{"shape":"String"} + } + }, + "DeleteCacheSecurityGroupMessage":{ + "type":"structure", + "required":["CacheSecurityGroupName"], + "members":{ + "CacheSecurityGroupName":{"shape":"String"} + } + }, + "DeleteCacheSubnetGroupMessage":{ + "type":"structure", + "required":["CacheSubnetGroupName"], + "members":{ + "CacheSubnetGroupName":{"shape":"String"} + } + }, + "DeleteReplicationGroupMessage":{ + "type":"structure", + "required":["ReplicationGroupId"], + "members":{ + "ReplicationGroupId":{"shape":"String"}, + "RetainPrimaryCluster":{"shape":"BooleanOptional"}, + "FinalSnapshotIdentifier":{"shape":"String"} + } + }, + "DeleteReplicationGroupResult":{ + "type":"structure", + "members":{ + "ReplicationGroup":{"shape":"ReplicationGroup"} + } + }, + "DeleteSnapshotMessage":{ + "type":"structure", + "required":["SnapshotName"], + "members":{ + "SnapshotName":{"shape":"String"} + } + }, + "DeleteSnapshotResult":{ + "type":"structure", + "members":{ + "Snapshot":{"shape":"Snapshot"} + } + }, + "DescribeCacheClustersMessage":{ + "type":"structure", + "members":{ + "CacheClusterId":{"shape":"String"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"}, + "ShowCacheNodeInfo":{"shape":"BooleanOptional"} + } + }, + "DescribeCacheEngineVersionsMessage":{ + "type":"structure", + "members":{ + "Engine":{"shape":"String"}, + "EngineVersion":{"shape":"String"}, + "CacheParameterGroupFamily":{"shape":"String"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"}, + "DefaultOnly":{"shape":"Boolean"} + } + }, + "DescribeCacheParameterGroupsMessage":{ + "type":"structure", + "members":{ + "CacheParameterGroupName":{"shape":"String"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeCacheParametersMessage":{ + "type":"structure", + "required":["CacheParameterGroupName"], + "members":{ + "CacheParameterGroupName":{"shape":"String"}, + "Source":{"shape":"String"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeCacheSecurityGroupsMessage":{ + "type":"structure", + "members":{ + "CacheSecurityGroupName":{"shape":"String"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeCacheSubnetGroupsMessage":{ + "type":"structure", + "members":{ + "CacheSubnetGroupName":{"shape":"String"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeEngineDefaultParametersMessage":{ + "type":"structure", + "required":["CacheParameterGroupFamily"], + "members":{ + "CacheParameterGroupFamily":{"shape":"String"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeEngineDefaultParametersResult":{ + "type":"structure", + "members":{ + "EngineDefaults":{"shape":"EngineDefaults"} + } + }, + "DescribeEventsMessage":{ + "type":"structure", + "members":{ + "SourceIdentifier":{"shape":"String"}, + "SourceType":{"shape":"SourceType"}, + "StartTime":{"shape":"TStamp"}, + "EndTime":{"shape":"TStamp"}, + "Duration":{"shape":"IntegerOptional"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeReplicationGroupsMessage":{ + "type":"structure", + "members":{ + "ReplicationGroupId":{"shape":"String"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeReservedCacheNodesMessage":{ + "type":"structure", + "members":{ + "ReservedCacheNodeId":{"shape":"String"}, + "ReservedCacheNodesOfferingId":{"shape":"String"}, + "CacheNodeType":{"shape":"String"}, + "Duration":{"shape":"String"}, + "ProductDescription":{"shape":"String"}, + "OfferingType":{"shape":"String"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeReservedCacheNodesOfferingsMessage":{ + "type":"structure", + "members":{ + "ReservedCacheNodesOfferingId":{"shape":"String"}, + "CacheNodeType":{"shape":"String"}, + "Duration":{"shape":"String"}, + "ProductDescription":{"shape":"String"}, + "OfferingType":{"shape":"String"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeSnapshotsListMessage":{ + "type":"structure", + "members":{ + "Marker":{"shape":"String"}, + "Snapshots":{"shape":"SnapshotList"} + } + }, + "DescribeSnapshotsMessage":{ + "type":"structure", + "members":{ + "CacheClusterId":{"shape":"String"}, + "SnapshotName":{"shape":"String"}, + "SnapshotSource":{"shape":"String"}, + "Marker":{"shape":"String"}, + "MaxRecords":{"shape":"IntegerOptional"} + } + }, + "Double":{"type":"double"}, + "EC2SecurityGroup":{ + "type":"structure", + "members":{ + "Status":{"shape":"String"}, + "EC2SecurityGroupName":{"shape":"String"}, + "EC2SecurityGroupOwnerId":{"shape":"String"} + } + }, + "EC2SecurityGroupList":{ + "type":"list", + "member":{ + "shape":"EC2SecurityGroup", + "locationName":"EC2SecurityGroup" + } + }, + "Endpoint":{ + "type":"structure", + "members":{ + "Address":{"shape":"String"}, + "Port":{"shape":"Integer"} + } + }, + "EngineDefaults":{ + "type":"structure", + "members":{ + "CacheParameterGroupFamily":{"shape":"String"}, + "Marker":{"shape":"String"}, + "Parameters":{"shape":"ParametersList"}, + "CacheNodeTypeSpecificParameters":{"shape":"CacheNodeTypeSpecificParametersList"} + }, + "wrapper":true + }, + "Event":{ + "type":"structure", + "members":{ + "SourceIdentifier":{"shape":"String"}, + "SourceType":{"shape":"SourceType"}, + "Message":{"shape":"String"}, + "Date":{"shape":"TStamp"} + } + }, + "EventList":{ + "type":"list", + "member":{ + "shape":"Event", + "locationName":"Event" + } + }, + "EventsMessage":{ + "type":"structure", + "members":{ + "Marker":{"shape":"String"}, + "Events":{"shape":"EventList"} + } + }, + "InsufficientCacheClusterCapacityFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InsufficientCacheClusterCapacity", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "Integer":{"type":"integer"}, + "IntegerOptional":{"type":"integer"}, + "InvalidARNFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidARN", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidCacheClusterStateFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidCacheClusterState", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidCacheParameterGroupStateFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidCacheParameterGroupState", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidCacheSecurityGroupStateFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidCacheSecurityGroupState", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidParameterCombinationException":{ + "type":"structure", + "members":{ + "message":{"shape":"AwsQueryErrorMessage"} + }, + "error":{ + "code":"InvalidParameterCombination", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidParameterValueException":{ + "type":"structure", + "members":{ + "message":{"shape":"AwsQueryErrorMessage"} + }, + "error":{ + "code":"InvalidParameterValue", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidReplicationGroupStateFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidReplicationGroupState", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidSnapshotStateFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidSnapshotState", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidSubnet":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidSubnet", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidVPCNetworkStateFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidVPCNetworkStateFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "KeyList":{ + "type":"list", + "member":{"shape":"String"} + }, + "ListAllowedNodeTypeModificationsMessage":{ + "type":"structure", + "members":{ + "CacheClusterId":{"shape":"String"}, + "ReplicationGroupId":{"shape":"String"} + } + }, + "ListTagsForResourceMessage":{ + "type":"structure", + "required":["ResourceName"], + "members":{ + "ResourceName":{"shape":"String"} + } + }, + "ModifyCacheClusterMessage":{ + "type":"structure", + "required":["CacheClusterId"], + "members":{ + "CacheClusterId":{"shape":"String"}, + "NumCacheNodes":{"shape":"IntegerOptional"}, + "CacheNodeIdsToRemove":{"shape":"CacheNodeIdsList"}, + "AZMode":{"shape":"AZMode"}, + "NewAvailabilityZones":{"shape":"PreferredAvailabilityZoneList"}, + "CacheSecurityGroupNames":{"shape":"CacheSecurityGroupNameList"}, + "SecurityGroupIds":{"shape":"SecurityGroupIdsList"}, + "PreferredMaintenanceWindow":{"shape":"String"}, + "NotificationTopicArn":{"shape":"String"}, + "CacheParameterGroupName":{"shape":"String"}, + "NotificationTopicStatus":{"shape":"String"}, + "ApplyImmediately":{"shape":"Boolean"}, + "EngineVersion":{"shape":"String"}, + "AutoMinorVersionUpgrade":{"shape":"BooleanOptional"}, + "SnapshotRetentionLimit":{"shape":"IntegerOptional"}, + "SnapshotWindow":{"shape":"String"}, + "CacheNodeType":{"shape":"String"} + } + }, + "ModifyCacheClusterResult":{ + "type":"structure", + "members":{ + "CacheCluster":{"shape":"CacheCluster"} + } + }, + "ModifyCacheParameterGroupMessage":{ + "type":"structure", + "required":[ + "CacheParameterGroupName", + "ParameterNameValues" + ], + "members":{ + "CacheParameterGroupName":{"shape":"String"}, + "ParameterNameValues":{"shape":"ParameterNameValueList"} + } + }, + "ModifyCacheSubnetGroupMessage":{ + "type":"structure", + "required":["CacheSubnetGroupName"], + "members":{ + "CacheSubnetGroupName":{"shape":"String"}, + "CacheSubnetGroupDescription":{"shape":"String"}, + "SubnetIds":{"shape":"SubnetIdentifierList"} + } + }, + "ModifyCacheSubnetGroupResult":{ + "type":"structure", + "members":{ + "CacheSubnetGroup":{"shape":"CacheSubnetGroup"} + } + }, + "ModifyReplicationGroupMessage":{ + "type":"structure", + "required":["ReplicationGroupId"], + "members":{ + "ReplicationGroupId":{"shape":"String"}, + "ReplicationGroupDescription":{"shape":"String"}, + "PrimaryClusterId":{"shape":"String"}, + "SnapshottingClusterId":{"shape":"String"}, + "AutomaticFailoverEnabled":{"shape":"BooleanOptional"}, + "CacheSecurityGroupNames":{"shape":"CacheSecurityGroupNameList"}, + "SecurityGroupIds":{"shape":"SecurityGroupIdsList"}, + "PreferredMaintenanceWindow":{"shape":"String"}, + "NotificationTopicArn":{"shape":"String"}, + "CacheParameterGroupName":{"shape":"String"}, + "NotificationTopicStatus":{"shape":"String"}, + "ApplyImmediately":{"shape":"Boolean"}, + "EngineVersion":{"shape":"String"}, + "AutoMinorVersionUpgrade":{"shape":"BooleanOptional"}, + "SnapshotRetentionLimit":{"shape":"IntegerOptional"}, + "SnapshotWindow":{"shape":"String"}, + "CacheNodeType":{"shape":"String"} + } + }, + "ModifyReplicationGroupResult":{ + "type":"structure", + "members":{ + "ReplicationGroup":{"shape":"ReplicationGroup"} + } + }, + "NodeGroup":{ + "type":"structure", + "members":{ + "NodeGroupId":{"shape":"String"}, + "Status":{"shape":"String"}, + "PrimaryEndpoint":{"shape":"Endpoint"}, + "NodeGroupMembers":{"shape":"NodeGroupMemberList"} + } + }, + "NodeGroupList":{ + "type":"list", + "member":{ + "shape":"NodeGroup", + "locationName":"NodeGroup" + } + }, + "NodeGroupMember":{ + "type":"structure", + "members":{ + "CacheClusterId":{"shape":"String"}, + "CacheNodeId":{"shape":"String"}, + "ReadEndpoint":{"shape":"Endpoint"}, + "PreferredAvailabilityZone":{"shape":"String"}, + "CurrentRole":{"shape":"String"} + } + }, + "NodeGroupMemberList":{ + "type":"list", + "member":{ + "shape":"NodeGroupMember", + "locationName":"NodeGroupMember" + } + }, + "NodeQuotaForClusterExceededFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"NodeQuotaForClusterExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "NodeQuotaForCustomerExceededFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"NodeQuotaForCustomerExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "NodeSnapshot":{ + "type":"structure", + "members":{ + "CacheNodeId":{"shape":"String"}, + "CacheSize":{"shape":"String"}, + "CacheNodeCreateTime":{"shape":"TStamp"}, + "SnapshotCreateTime":{"shape":"TStamp"} + }, + "wrapper":true + }, + "NodeSnapshotList":{ + "type":"list", + "member":{ + "shape":"NodeSnapshot", + "locationName":"NodeSnapshot" + } + }, + "NodeTypeList":{ + "type":"list", + "member":{"shape":"String"} + }, + "NotificationConfiguration":{ + "type":"structure", + "members":{ + "TopicArn":{"shape":"String"}, + "TopicStatus":{"shape":"String"} + } + }, + "Parameter":{ + "type":"structure", + "members":{ + "ParameterName":{"shape":"String"}, + "ParameterValue":{"shape":"String"}, + "Description":{"shape":"String"}, + "Source":{"shape":"String"}, + "DataType":{"shape":"String"}, + "AllowedValues":{"shape":"String"}, + "IsModifiable":{"shape":"Boolean"}, + "MinimumEngineVersion":{"shape":"String"}, + "ChangeType":{"shape":"ChangeType"} + } + }, + "ParameterNameValue":{ + "type":"structure", + "members":{ + "ParameterName":{"shape":"String"}, + "ParameterValue":{"shape":"String"} + } + }, + "ParameterNameValueList":{ + "type":"list", + "member":{ + "shape":"ParameterNameValue", + "locationName":"ParameterNameValue" + } + }, + "ParametersList":{ + "type":"list", + "member":{ + "shape":"Parameter", + "locationName":"Parameter" + } + }, + "PendingAutomaticFailoverStatus":{ + "type":"string", + "enum":[ + "enabled", + "disabled" + ] + }, + "PendingModifiedValues":{ + "type":"structure", + "members":{ + "NumCacheNodes":{"shape":"IntegerOptional"}, + "CacheNodeIdsToRemove":{"shape":"CacheNodeIdsList"}, + "EngineVersion":{"shape":"String"}, + "CacheNodeType":{"shape":"String"} + } + }, + "PreferredAvailabilityZoneList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"PreferredAvailabilityZone" + } + }, + "PurchaseReservedCacheNodesOfferingMessage":{ + "type":"structure", + "required":["ReservedCacheNodesOfferingId"], + "members":{ + "ReservedCacheNodesOfferingId":{"shape":"String"}, + "ReservedCacheNodeId":{"shape":"String"}, + "CacheNodeCount":{"shape":"IntegerOptional"} + } + }, + "PurchaseReservedCacheNodesOfferingResult":{ + "type":"structure", + "members":{ + "ReservedCacheNode":{"shape":"ReservedCacheNode"} + } + }, + "RebootCacheClusterMessage":{ + "type":"structure", + "required":[ + "CacheClusterId", + "CacheNodeIdsToReboot" + ], + "members":{ + "CacheClusterId":{"shape":"String"}, + "CacheNodeIdsToReboot":{"shape":"CacheNodeIdsList"} + } + }, + "RebootCacheClusterResult":{ + "type":"structure", + "members":{ + "CacheCluster":{"shape":"CacheCluster"} + } + }, + "RecurringCharge":{ + "type":"structure", + "members":{ + "RecurringChargeAmount":{"shape":"Double"}, + "RecurringChargeFrequency":{"shape":"String"} + }, + "wrapper":true + }, + "RecurringChargeList":{ + "type":"list", + "member":{ + "shape":"RecurringCharge", + "locationName":"RecurringCharge" + } + }, + "RemoveTagsFromResourceMessage":{ + "type":"structure", + "required":[ + "ResourceName", + "TagKeys" + ], + "members":{ + "ResourceName":{"shape":"String"}, + "TagKeys":{"shape":"KeyList"} + } + }, + "ReplicationGroup":{ + "type":"structure", + "members":{ + "ReplicationGroupId":{"shape":"String"}, + "Description":{"shape":"String"}, + "Status":{"shape":"String"}, + "PendingModifiedValues":{"shape":"ReplicationGroupPendingModifiedValues"}, + "MemberClusters":{"shape":"ClusterIdList"}, + "NodeGroups":{"shape":"NodeGroupList"}, + "SnapshottingClusterId":{"shape":"String"}, + "AutomaticFailover":{"shape":"AutomaticFailoverStatus"} + }, + "wrapper":true + }, + "ReplicationGroupAlreadyExistsFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"ReplicationGroupAlreadyExists", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "ReplicationGroupList":{ + "type":"list", + "member":{ + "shape":"ReplicationGroup", + "locationName":"ReplicationGroup" + } + }, + "ReplicationGroupMessage":{ + "type":"structure", + "members":{ + "Marker":{"shape":"String"}, + "ReplicationGroups":{"shape":"ReplicationGroupList"} + } + }, + "ReplicationGroupNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"ReplicationGroupNotFoundFault", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "ReplicationGroupPendingModifiedValues":{ + "type":"structure", + "members":{ + "PrimaryClusterId":{"shape":"String"}, + "AutomaticFailoverStatus":{"shape":"PendingAutomaticFailoverStatus"} + } + }, + "ReservedCacheNode":{ + "type":"structure", + "members":{ + "ReservedCacheNodeId":{"shape":"String"}, + "ReservedCacheNodesOfferingId":{"shape":"String"}, + "CacheNodeType":{"shape":"String"}, + "StartTime":{"shape":"TStamp"}, + "Duration":{"shape":"Integer"}, + "FixedPrice":{"shape":"Double"}, + "UsagePrice":{"shape":"Double"}, + "CacheNodeCount":{"shape":"Integer"}, + "ProductDescription":{"shape":"String"}, + "OfferingType":{"shape":"String"}, + "State":{"shape":"String"}, + "RecurringCharges":{"shape":"RecurringChargeList"} + }, + "wrapper":true + }, + "ReservedCacheNodeAlreadyExistsFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"ReservedCacheNodeAlreadyExists", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "ReservedCacheNodeList":{ + "type":"list", + "member":{ + "shape":"ReservedCacheNode", + "locationName":"ReservedCacheNode" + } + }, + "ReservedCacheNodeMessage":{ + "type":"structure", + "members":{ + "Marker":{"shape":"String"}, + "ReservedCacheNodes":{"shape":"ReservedCacheNodeList"} + } + }, + "ReservedCacheNodeNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"ReservedCacheNodeNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "ReservedCacheNodeQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"ReservedCacheNodeQuotaExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "ReservedCacheNodesOffering":{ + "type":"structure", + "members":{ + "ReservedCacheNodesOfferingId":{"shape":"String"}, + "CacheNodeType":{"shape":"String"}, + "Duration":{"shape":"Integer"}, + "FixedPrice":{"shape":"Double"}, + "UsagePrice":{"shape":"Double"}, + "ProductDescription":{"shape":"String"}, + "OfferingType":{"shape":"String"}, + "RecurringCharges":{"shape":"RecurringChargeList"} + }, + "wrapper":true + }, + "ReservedCacheNodesOfferingList":{ + "type":"list", + "member":{ + "shape":"ReservedCacheNodesOffering", + "locationName":"ReservedCacheNodesOffering" + } + }, + "ReservedCacheNodesOfferingMessage":{ + "type":"structure", + "members":{ + "Marker":{"shape":"String"}, + "ReservedCacheNodesOfferings":{"shape":"ReservedCacheNodesOfferingList"} + } + }, + "ReservedCacheNodesOfferingNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"ReservedCacheNodesOfferingNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "ResetCacheParameterGroupMessage":{ + "type":"structure", + "required":["CacheParameterGroupName"], + "members":{ + "CacheParameterGroupName":{"shape":"String"}, + "ResetAllParameters":{"shape":"Boolean"}, + "ParameterNameValues":{"shape":"ParameterNameValueList"} + } + }, + "RevokeCacheSecurityGroupIngressMessage":{ + "type":"structure", + "required":[ + "CacheSecurityGroupName", + "EC2SecurityGroupName", + "EC2SecurityGroupOwnerId" + ], + "members":{ + "CacheSecurityGroupName":{"shape":"String"}, + "EC2SecurityGroupName":{"shape":"String"}, + "EC2SecurityGroupOwnerId":{"shape":"String"} + } + }, + "RevokeCacheSecurityGroupIngressResult":{ + "type":"structure", + "members":{ + "CacheSecurityGroup":{"shape":"CacheSecurityGroup"} + } + }, + "SecurityGroupIdsList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"SecurityGroupId" + } + }, + "SecurityGroupMembership":{ + "type":"structure", + "members":{ + "SecurityGroupId":{"shape":"String"}, + "Status":{"shape":"String"} + } + }, + "SecurityGroupMembershipList":{ + "type":"list", + "member":{"shape":"SecurityGroupMembership"} + }, + "Snapshot":{ + "type":"structure", + "members":{ + "SnapshotName":{"shape":"String"}, + "CacheClusterId":{"shape":"String"}, + "SnapshotStatus":{"shape":"String"}, + "SnapshotSource":{"shape":"String"}, + "CacheNodeType":{"shape":"String"}, + "Engine":{"shape":"String"}, + "EngineVersion":{"shape":"String"}, + "NumCacheNodes":{"shape":"IntegerOptional"}, + "PreferredAvailabilityZone":{"shape":"String"}, + "CacheClusterCreateTime":{"shape":"TStamp"}, + "PreferredMaintenanceWindow":{"shape":"String"}, + "TopicArn":{"shape":"String"}, + "Port":{"shape":"IntegerOptional"}, + "CacheParameterGroupName":{"shape":"String"}, + "CacheSubnetGroupName":{"shape":"String"}, + "VpcId":{"shape":"String"}, + "AutoMinorVersionUpgrade":{"shape":"Boolean"}, + "SnapshotRetentionLimit":{"shape":"IntegerOptional"}, + "SnapshotWindow":{"shape":"String"}, + "NodeSnapshots":{"shape":"NodeSnapshotList"} + }, + "wrapper":true + }, + "SnapshotAlreadyExistsFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"SnapshotAlreadyExistsFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "SnapshotArnsList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"SnapshotArn" + } + }, + "SnapshotFeatureNotSupportedFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"SnapshotFeatureNotSupportedFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "SnapshotList":{ + "type":"list", + "member":{ + "shape":"Snapshot", + "locationName":"Snapshot" + } + }, + "SnapshotNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"SnapshotNotFoundFault", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "SnapshotQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"SnapshotQuotaExceededFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "SourceType":{ + "type":"string", + "enum":[ + "cache-cluster", + "cache-parameter-group", + "cache-security-group", + "cache-subnet-group" + ] + }, + "String":{"type":"string"}, + "Subnet":{ + "type":"structure", + "members":{ + "SubnetIdentifier":{"shape":"String"}, + "SubnetAvailabilityZone":{"shape":"AvailabilityZone"} + } + }, + "SubnetIdentifierList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"SubnetIdentifier" + } + }, + "SubnetInUse":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"SubnetInUse", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "SubnetList":{ + "type":"list", + "member":{ + "shape":"Subnet", + "locationName":"Subnet" + } + }, + "TStamp":{"type":"timestamp"}, + "Tag":{ + "type":"structure", + "members":{ + "Key":{"shape":"String"}, + "Value":{"shape":"String"} + } + }, + "TagList":{ + "type":"list", + "member":{ + "shape":"Tag", + "locationName":"Tag" + } + }, + "TagListMessage":{ + "type":"structure", + "members":{ + "TagList":{"shape":"TagList"} + } + }, + "TagNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"TagNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "TagQuotaPerResourceExceeded":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"TagQuotaPerResourceExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/elasticache/2015-02-02/docs-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/elasticache/2015-02-02/docs-2.json new file mode 100644 index 000000000..6d0a35025 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/elasticache/2015-02-02/docs-2.json @@ -0,0 +1,1421 @@ +{ + "version": "2.0", + "service": "Amazon ElastiCache

    Amazon ElastiCache is a web service that makes it easier to set up, operate, and scale a distributed cache in the cloud.

    With ElastiCache, customers gain all of the benefits of a high-performance, in-memory cache with far less of the administrative burden of launching and managing a distributed cache. The service makes setup, scaling, and cluster failure handling much simpler than in a self-managed cache deployment.

    In addition, through integration with Amazon CloudWatch, customers get enhanced visibility into the key performance statistics associated with their cache and can receive alarms if a part of their cache runs hot.

    ", + "operations": { + "AddTagsToResource": "

    The AddTagsToResource action adds up to 10 cost allocation tags to the named resource. A cost allocation tag is a key-value pair where the key and value are case-sensitive. Cost allocation tags can be used to categorize and track your AWS costs.

    When you apply tags to your ElastiCache resources, AWS generates a cost allocation report as a comma-separated value (CSV) file with your usage and costs aggregated by your tags. You can apply tags that represent business categories (such as cost centers, application names, or owners) to organize your costs across multiple services. For more information, see Using Cost Allocation Tags in Amazon ElastiCache in the ElastiCache User Guide.

    ", + "AuthorizeCacheSecurityGroupIngress": "

    The AuthorizeCacheSecurityGroupIngress action allows network ingress to a cache security group. Applications using ElastiCache must be running on Amazon EC2, and Amazon EC2 security groups are used as the authorization mechanism.

    You cannot authorize ingress from an Amazon EC2 security group in one region to an ElastiCache cluster in another region.

    ", + "CopySnapshot": "

    The CopySnapshot action makes a copy of an existing snapshot.

    Users or groups that have permissions to use the CopySnapshot API can create their own Amazon S3 buckets and copy snapshots to it. To control access to your snapshots, use an IAM policy to control who has the ability to use the CopySnapshot API. For more information about using IAM to control the use of ElastiCache APIs, see Exporting Snapshots and Authentication & Access Control.

    Erorr Message:

    • Error Message: The authenticated user does not have sufficient permissions to perform the desired activity.

      Solution: Contact your system administrator to get the needed permissions.

    ", + "CreateCacheCluster": "

    The CreateCacheCluster action creates a cache cluster. All nodes in the cache cluster run the same protocol-compliant cache engine software, either Memcached or Redis.

    ", + "CreateCacheParameterGroup": "

    The CreateCacheParameterGroup action creates a new cache parameter group. A cache parameter group is a collection of parameters that you apply to all of the nodes in a cache cluster.

    ", + "CreateCacheSecurityGroup": "

    The CreateCacheSecurityGroup action creates a new cache security group. Use a cache security group to control access to one or more cache clusters.

    Cache security groups are only used when you are creating a cache cluster outside of an Amazon Virtual Private Cloud (VPC). If you are creating a cache cluster inside of a VPC, use a cache subnet group instead. For more information, see CreateCacheSubnetGroup.

    ", + "CreateCacheSubnetGroup": "

    The CreateCacheSubnetGroup action creates a new cache subnet group.

    Use this parameter only when you are creating a cluster in an Amazon Virtual Private Cloud (VPC).

    ", + "CreateReplicationGroup": "

    The CreateReplicationGroup action creates a replication group. A replication group is a collection of cache clusters, where one of the cache clusters is a read/write primary and the others are read-only replicas. Writes to the primary are automatically propagated to the replicas.

    When you create a replication group, you must specify an existing cache cluster that is in the primary role. When the replication group has been successfully created, you can add one or more read replica replicas to it, up to a total of five read replicas.

    This action is valid only for Redis.

    ", + "CreateSnapshot": "

    The CreateSnapshot action creates a copy of an entire cache cluster at a specific moment in time.

    ", + "DeleteCacheCluster": "

    The DeleteCacheCluster action deletes a previously provisioned cache cluster. DeleteCacheCluster deletes all associated cache nodes, node endpoints and the cache cluster itself. When you receive a successful response from this action, Amazon ElastiCache immediately begins deleting the cache cluster; you cannot cancel or revert this action.

    This API cannot be used to delete a cache cluster that is the last read replica of a replication group that has Multi-AZ mode enabled.

    ", + "DeleteCacheParameterGroup": "

    The DeleteCacheParameterGroup action deletes the specified cache parameter group. You cannot delete a cache parameter group if it is associated with any cache clusters.

    ", + "DeleteCacheSecurityGroup": "

    The DeleteCacheSecurityGroup action deletes a cache security group.

    You cannot delete a cache security group if it is associated with any cache clusters.

    ", + "DeleteCacheSubnetGroup": "

    The DeleteCacheSubnetGroup action deletes a cache subnet group.

    You cannot delete a cache subnet group if it is associated with any cache clusters.

    ", + "DeleteReplicationGroup": "

    The DeleteReplicationGroup action deletes an existing replication group. By default, this action deletes the entire replication group, including the primary cluster and all of the read replicas. You can optionally delete only the read replicas, while retaining the primary cluster.

    When you receive a successful response from this action, Amazon ElastiCache immediately begins deleting the selected resources; you cannot cancel or revert this action.

    ", + "DeleteSnapshot": "

    The DeleteSnapshot action deletes an existing snapshot. When you receive a successful response from this action, ElastiCache immediately begins deleting the snapshot; you cannot cancel or revert this action.

    ", + "DescribeCacheClusters": "

    The DescribeCacheClusters action returns information about all provisioned cache clusters if no cache cluster identifier is specified, or about a specific cache cluster if a cache cluster identifier is supplied.

    By default, abbreviated information about the cache clusters(s) will be returned. You can use the optional ShowDetails flag to retrieve detailed information about the cache nodes associated with the cache clusters. These details include the DNS address and port for the cache node endpoint.

    If the cluster is in the CREATING state, only cluster level information will be displayed until all of the nodes are successfully provisioned.

    If the cluster is in the DELETING state, only cluster level information will be displayed.

    If cache nodes are currently being added to the cache cluster, node endpoint information and creation time for the additional nodes will not be displayed until they are completely provisioned. When the cache cluster state is available, the cluster is ready for use.

    If cache nodes are currently being removed from the cache cluster, no endpoint information for the removed nodes is displayed.

    ", + "DescribeCacheEngineVersions": "

    The DescribeCacheEngineVersions action returns a list of the available cache engines and their versions.

    ", + "DescribeCacheParameterGroups": "

    The DescribeCacheParameterGroups action returns a list of cache parameter group descriptions. If a cache parameter group name is specified, the list will contain only the descriptions for that group.

    ", + "DescribeCacheParameters": "

    The DescribeCacheParameters action returns the detailed parameter list for a particular cache parameter group.

    ", + "DescribeCacheSecurityGroups": "

    The DescribeCacheSecurityGroups action returns a list of cache security group descriptions. If a cache security group name is specified, the list will contain only the description of that group.

    ", + "DescribeCacheSubnetGroups": "

    The DescribeCacheSubnetGroups action returns a list of cache subnet group descriptions. If a subnet group name is specified, the list will contain only the description of that group.

    ", + "DescribeEngineDefaultParameters": "

    The DescribeEngineDefaultParameters action returns the default engine and system parameter information for the specified cache engine.

    ", + "DescribeEvents": "

    The DescribeEvents action returns events related to cache clusters, cache security groups, and cache parameter groups. You can obtain events specific to a particular cache cluster, cache security group, or cache parameter group by providing the name as a parameter.

    By default, only the events occurring within the last hour are returned; however, you can retrieve up to 14 days' worth of events if necessary.

    ", + "DescribeReplicationGroups": "

    The DescribeReplicationGroups action returns information about a particular replication group. If no identifier is specified, DescribeReplicationGroups returns information about all replication groups.

    ", + "DescribeReservedCacheNodes": "

    The DescribeReservedCacheNodes action returns information about reserved cache nodes for this account, or about a specified reserved cache node.

    ", + "DescribeReservedCacheNodesOfferings": "

    The DescribeReservedCacheNodesOfferings action lists available reserved cache node offerings.

    ", + "DescribeSnapshots": "

    The DescribeSnapshots action returns information about cache cluster snapshots. By default, DescribeSnapshots lists all of your snapshots; it can optionally describe a single snapshot, or just the snapshots associated with a particular cache cluster.

    ", + "ListAllowedNodeTypeModifications": "

    The ListAllowedNodeTypeModifications action lists all available node types that you can scale your Redis cluster's or replication group's current node type up to.

    When you use the ModifyCacheCluster or ModifyReplicationGroup APIs to scale up your cluster or replication group, the value of the CacheNodeType parameter must be one of the node types returned by this action.

    ", + "ListTagsForResource": "

    The ListTagsForResource action lists all cost allocation tags currently on the named resource. A cost allocation tag is a key-value pair where the key is case-sensitive and the value is optional. Cost allocation tags can be used to categorize and track your AWS costs.

    You can have a maximum of 10 cost allocation tags on an ElastiCache resource. For more information, see Using Cost Allocation Tags in Amazon ElastiCache.

    ", + "ModifyCacheCluster": "

    The ModifyCacheCluster action modifies the settings for a cache cluster. You can use this action to change one or more cluster configuration parameters by specifying the parameters and the new values.

    ", + "ModifyCacheParameterGroup": "

    The ModifyCacheParameterGroup action modifies the parameters of a cache parameter group. You can modify up to 20 parameters in a single request by submitting a list parameter name and value pairs.

    ", + "ModifyCacheSubnetGroup": "

    The ModifyCacheSubnetGroup action modifies an existing cache subnet group.

    ", + "ModifyReplicationGroup": "

    The ModifyReplicationGroup action modifies the settings for a replication group.

    ", + "PurchaseReservedCacheNodesOffering": "

    The PurchaseReservedCacheNodesOffering action allows you to purchase a reserved cache node offering.

    ", + "RebootCacheCluster": "

    The RebootCacheCluster action reboots some, or all, of the cache nodes within a provisioned cache cluster. This API will apply any modified cache parameter groups to the cache cluster. The reboot action takes place as soon as possible, and results in a momentary outage to the cache cluster. During the reboot, the cache cluster status is set to REBOOTING.

    The reboot causes the contents of the cache (for each cache node being rebooted) to be lost.

    When the reboot is complete, a cache cluster event is created.

    ", + "RemoveTagsFromResource": "

    The RemoveTagsFromResource action removes the tags identified by the TagKeys list from the named resource.

    ", + "ResetCacheParameterGroup": "

    The ResetCacheParameterGroup action modifies the parameters of a cache parameter group to the engine or system default value. You can reset specific parameters by submitting a list of parameter names. To reset the entire cache parameter group, specify the ResetAllParameters and CacheParameterGroupName parameters.

    ", + "RevokeCacheSecurityGroupIngress": "

    The RevokeCacheSecurityGroupIngress action revokes ingress from a cache security group. Use this action to disallow access from an Amazon EC2 security group that had been previously authorized.

    " + }, + "shapes": { + "AZMode": { + "base": null, + "refs": { + "CreateCacheClusterMessage$AZMode": "

    Specifies whether the nodes in this Memcached node group are created in a single Availability Zone or created across multiple Availability Zones in the cluster's region.

    This parameter is only supported for Memcached cache clusters.

    If the AZMode and PreferredAvailabilityZones are not specified, ElastiCache assumes single-az mode.

    ", + "ModifyCacheClusterMessage$AZMode": "

    Specifies whether the new nodes in this Memcached cache cluster are all created in a single Availability Zone or created across multiple Availability Zones.

    Valid values: single-az | cross-az.

    This option is only supported for Memcached cache clusters.

    You cannot specify single-az if the Memcached cache cluster already has cache nodes in different Availability Zones. If cross-az is specified, existing Memcached nodes remain in their current Availability Zone.

    Only newly created nodes will be located in different Availability Zones. For instructions on how to move existing Memcached nodes to different Availability Zones, see the Availability Zone Considerations section of Cache Node Considerations for Memcached.

    " + } + }, + "AddTagsToResourceMessage": { + "base": "

    Represents the input of an AddTagsToResource action.

    ", + "refs": { + } + }, + "AllowedNodeTypeModificationsMessage": { + "base": "

    Represents the allowed node types you can use to modify your cache cluster or replication group.

    ", + "refs": { + } + }, + "AuthorizationAlreadyExistsFault": { + "base": "

    The specified Amazon EC2 security group is already authorized for the specified cache security group.

    ", + "refs": { + } + }, + "AuthorizationNotFoundFault": { + "base": "

    The specified Amazon EC2 security group is not authorized for the specified cache security group.

    ", + "refs": { + } + }, + "AuthorizeCacheSecurityGroupIngressMessage": { + "base": "

    Represents the input of an AuthorizeCacheSecurityGroupIngress action.

    ", + "refs": { + } + }, + "AuthorizeCacheSecurityGroupIngressResult": { + "base": null, + "refs": { + } + }, + "AutomaticFailoverStatus": { + "base": null, + "refs": { + "ReplicationGroup$AutomaticFailover": "

    Indicates the status of Multi-AZ for this replication group.

    ElastiCache Multi-AZ replication groups are not supported on:

    • Redis versions earlier than 2.8.6.

    • T1 and T2 cache node types.

    " + } + }, + "AvailabilityZone": { + "base": "

    Describes an Availability Zone in which the cache cluster is launched.

    ", + "refs": { + "Subnet$SubnetAvailabilityZone": "

    The Availability Zone associated with the subnet.

    " + } + }, + "AvailabilityZonesList": { + "base": null, + "refs": { + "CreateReplicationGroupMessage$PreferredCacheClusterAZs": "

    A list of EC2 availability zones in which the replication group's cache clusters will be created. The order of the availability zones in the list is not important.

    If you are creating your replication group in an Amazon VPC (recommended), you can only locate cache clusters in availability zones associated with the subnets in the selected subnet group.

    The number of availability zones listed must equal the value of NumCacheClusters.

    Default: system chosen availability zones.

    Example: One Redis cache cluster in each of three availability zones.

    PreferredAvailabilityZones.member.1=us-west-2a PreferredAvailabilityZones.member.2=us-west-2c PreferredAvailabilityZones.member.3=us-west-2c

    " + } + }, + "AwsQueryErrorMessage": { + "base": null, + "refs": { + "InvalidParameterCombinationException$message": "

    Two or more parameters that must not be used together were used together.

    ", + "InvalidParameterValueException$message": "

    A parameter value is invalid.

    " + } + }, + "Boolean": { + "base": null, + "refs": { + "CacheCluster$AutoMinorVersionUpgrade": "

    This parameter is currently disabled.

    ", + "CacheNodeTypeSpecificParameter$IsModifiable": "

    Indicates whether (true) or not (false) the parameter can be modified. Some parameters have security or operational implications that prevent them from being changed.

    ", + "DescribeCacheEngineVersionsMessage$DefaultOnly": "

    If true, specifies that only the default version of the specified engine or engine and major version combination is to be returned.

    ", + "ModifyCacheClusterMessage$ApplyImmediately": "

    If true, this parameter causes the modifications in this request and any pending modifications to be applied, asynchronously and as soon as possible, regardless of the PreferredMaintenanceWindow setting for the cache cluster.

    If false, then changes to the cache cluster are applied on the next maintenance reboot, or the next failure reboot, whichever occurs first.

    If you perform a ModifyCacheCluster before a pending modification is applied, the pending modification is replaced by the newer modification.

    Valid values: true | false

    Default: false

    ", + "ModifyReplicationGroupMessage$ApplyImmediately": "

    If true, this parameter causes the modifications in this request and any pending modifications to be applied, asynchronously and as soon as possible, regardless of the PreferredMaintenanceWindow setting for the replication group.

    If false, then changes to the nodes in the replication group are applied on the next maintenance reboot, or the next failure reboot, whichever occurs first.

    Valid values: true | false

    Default: false

    ", + "Parameter$IsModifiable": "

    Indicates whether (true) or not (false) the parameter can be modified. Some parameters have security or operational implications that prevent them from being changed.

    ", + "ResetCacheParameterGroupMessage$ResetAllParameters": "

    If true, all parameters in the cache parameter group will be reset to their default values. If false, only the parameters listed by ParameterNameValues are reset to their default values.

    Valid values: true | false

    ", + "Snapshot$AutoMinorVersionUpgrade": "

    This parameter is currently disabled.

    " + } + }, + "BooleanOptional": { + "base": null, + "refs": { + "CreateCacheClusterMessage$AutoMinorVersionUpgrade": "

    This parameter is currently disabled.

    ", + "CreateReplicationGroupMessage$AutomaticFailoverEnabled": "

    Specifies whether a read-only replica will be automatically promoted to read/write primary if the existing primary fails.

    If true, Multi-AZ is enabled for this replication group. If false, Multi-AZ is disabled for this replication group.

    Default: false

    ElastiCache Multi-AZ replication groups is not supported on:

    • Redis versions earlier than 2.8.6.

    • T1 and T2 cache node types.

    ", + "CreateReplicationGroupMessage$AutoMinorVersionUpgrade": "

    This parameter is currently disabled.

    ", + "DeleteReplicationGroupMessage$RetainPrimaryCluster": "

    If set to true, all of the read replicas will be deleted, but the primary node will be retained.

    ", + "DescribeCacheClustersMessage$ShowCacheNodeInfo": "

    An optional flag that can be included in the DescribeCacheCluster request to retrieve information about the individual cache nodes.

    ", + "ModifyCacheClusterMessage$AutoMinorVersionUpgrade": "

    This parameter is currently disabled.

    ", + "ModifyReplicationGroupMessage$AutomaticFailoverEnabled": "

    Whether a read replica will be automatically promoted to read/write primary if the existing primary encounters a failure.

    Valid values: true | false

    ElastiCache Multi-AZ replication groups are not supported on:

    • Redis versions earlier than 2.8.6.

    • T1 and T2 cache node types.

    ", + "ModifyReplicationGroupMessage$AutoMinorVersionUpgrade": "

    This parameter is currently disabled.

    " + } + }, + "CacheCluster": { + "base": "

    Contains all of the attributes of a specific cache cluster.

    ", + "refs": { + "CacheClusterList$member": null, + "CreateCacheClusterResult$CacheCluster": null, + "DeleteCacheClusterResult$CacheCluster": null, + "ModifyCacheClusterResult$CacheCluster": null, + "RebootCacheClusterResult$CacheCluster": null + } + }, + "CacheClusterAlreadyExistsFault": { + "base": "

    You already have a cache cluster with the given identifier.

    ", + "refs": { + } + }, + "CacheClusterList": { + "base": null, + "refs": { + "CacheClusterMessage$CacheClusters": "

    A list of cache clusters. Each item in the list contains detailed information about one cache cluster.

    " + } + }, + "CacheClusterMessage": { + "base": "

    Represents the output of a DescribeCacheClusters action.

    ", + "refs": { + } + }, + "CacheClusterNotFoundFault": { + "base": "

    The requested cache cluster ID does not refer to an existing cache cluster.

    ", + "refs": { + } + }, + "CacheEngineVersion": { + "base": "

    Provides all of the details about a particular cache engine version.

    ", + "refs": { + "CacheEngineVersionList$member": null + } + }, + "CacheEngineVersionList": { + "base": null, + "refs": { + "CacheEngineVersionMessage$CacheEngineVersions": "

    A list of cache engine version details. Each element in the list contains detailed information about one cache engine version.

    " + } + }, + "CacheEngineVersionMessage": { + "base": "

    Represents the output of a DescribeCacheEngineVersions action.

    ", + "refs": { + } + }, + "CacheNode": { + "base": "

    Represents an individual cache node within a cache cluster. Each cache node runs its own instance of the cluster's protocol-compliant caching software - either Memcached or Redis.

    Valid node types are as follows:

    • General purpose:

      • Current generation: cache.t2.micro, cache.t2.small, cache.t2.medium, cache.m3.medium, cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge

      • Previous generation: cache.t1.micro, cache.m1.small, cache.m1.medium, cache.m1.large, cache.m1.xlarge

    • Compute optimized: cache.c1.xlarge

    • Memory optimized:

      • Current generation: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, cache.r3.8xlarge

      • Previous generation: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge

    Notes:

    • All t2 instances are created in an Amazon Virtual Private Cloud (VPC).

    • Redis backup/restore is not supported for t2 instances.

    • Redis Append-only files (AOF) functionality is not supported for t1 or t2 instances.

    For a complete listing of cache node types and specifications, see Amazon ElastiCache Product Features and Details and either Cache Node Type-Specific Parameters for Memcached or Cache Node Type-Specific Parameters for Redis.

    ", + "refs": { + "CacheNodeList$member": null + } + }, + "CacheNodeIdsList": { + "base": null, + "refs": { + "CacheParameterGroupStatus$CacheNodeIdsToReboot": "

    A list of the cache node IDs which need to be rebooted for parameter changes to be applied. A node ID is a numeric identifier (0001, 0002, etc.).

    ", + "ModifyCacheClusterMessage$CacheNodeIdsToRemove": "

    A list of cache node IDs to be removed. A node ID is a numeric identifier (0001, 0002, etc.). This parameter is only valid when NumCacheNodes is less than the existing number of cache nodes. The number of cache node IDs supplied in this parameter must match the difference between the existing number of cache nodes in the cluster or pending cache nodes, whichever is greater, and the value of NumCacheNodes in the request.

    For example: If you have 3 active cache nodes, 7 pending cache nodes, and the number of cache nodes in this ModifyCacheCluser call is 5, you must list 2 (7 - 5) cache node IDs to remove.

    ", + "PendingModifiedValues$CacheNodeIdsToRemove": "

    A list of cache node IDs that are being removed (or will be removed) from the cache cluster. A node ID is a numeric identifier (0001, 0002, etc.).

    ", + "RebootCacheClusterMessage$CacheNodeIdsToReboot": "

    A list of cache node IDs to reboot. A node ID is a numeric identifier (0001, 0002, etc.). To reboot an entire cache cluster, specify all of the cache node IDs.

    " + } + }, + "CacheNodeList": { + "base": null, + "refs": { + "CacheCluster$CacheNodes": "

    A list of cache nodes that are members of the cache cluster.

    " + } + }, + "CacheNodeTypeSpecificParameter": { + "base": "

    A parameter that has a different value for each cache node type it is applied to. For example, in a Redis cache cluster, a cache.m1.large cache node type would have a larger maxmemory value than a cache.m1.small type.

    ", + "refs": { + "CacheNodeTypeSpecificParametersList$member": null + } + }, + "CacheNodeTypeSpecificParametersList": { + "base": null, + "refs": { + "CacheParameterGroupDetails$CacheNodeTypeSpecificParameters": "

    A list of parameters specific to a particular cache node type. Each element in the list contains detailed information about one parameter.

    ", + "EngineDefaults$CacheNodeTypeSpecificParameters": "

    A list of parameters specific to a particular cache node type. Each element in the list contains detailed information about one parameter.

    " + } + }, + "CacheNodeTypeSpecificValue": { + "base": "

    A value that applies only to a certain cache node type.

    ", + "refs": { + "CacheNodeTypeSpecificValueList$member": null + } + }, + "CacheNodeTypeSpecificValueList": { + "base": null, + "refs": { + "CacheNodeTypeSpecificParameter$CacheNodeTypeSpecificValues": "

    A list of cache node types and their corresponding values for this parameter.

    " + } + }, + "CacheParameterGroup": { + "base": "

    Represents the output of a CreateCacheParameterGroup action.

    ", + "refs": { + "CacheParameterGroupList$member": null, + "CreateCacheParameterGroupResult$CacheParameterGroup": null + } + }, + "CacheParameterGroupAlreadyExistsFault": { + "base": "

    A cache parameter group with the requested name already exists.

    ", + "refs": { + } + }, + "CacheParameterGroupDetails": { + "base": "

    Represents the output of a DescribeCacheParameters action.

    ", + "refs": { + } + }, + "CacheParameterGroupList": { + "base": null, + "refs": { + "CacheParameterGroupsMessage$CacheParameterGroups": "

    A list of cache parameter groups. Each element in the list contains detailed information about one cache parameter group.

    " + } + }, + "CacheParameterGroupNameMessage": { + "base": "

    Represents the output of one of the following actions:

    • ModifyCacheParameterGroup

    • ResetCacheParameterGroup

    ", + "refs": { + } + }, + "CacheParameterGroupNotFoundFault": { + "base": "

    The requested cache parameter group name does not refer to an existing cache parameter group.

    ", + "refs": { + } + }, + "CacheParameterGroupQuotaExceededFault": { + "base": "

    The request cannot be processed because it would exceed the maximum number of cache security groups.

    ", + "refs": { + } + }, + "CacheParameterGroupStatus": { + "base": "

    The status of the cache parameter group.

    ", + "refs": { + "CacheCluster$CacheParameterGroup": null + } + }, + "CacheParameterGroupsMessage": { + "base": "

    Represents the output of a DescribeCacheParameterGroups action.

    ", + "refs": { + } + }, + "CacheSecurityGroup": { + "base": "

    Represents the output of one of the following actions:

    • AuthorizeCacheSecurityGroupIngress

    • CreateCacheSecurityGroup

    • RevokeCacheSecurityGroupIngress

    ", + "refs": { + "AuthorizeCacheSecurityGroupIngressResult$CacheSecurityGroup": null, + "CacheSecurityGroups$member": null, + "CreateCacheSecurityGroupResult$CacheSecurityGroup": null, + "RevokeCacheSecurityGroupIngressResult$CacheSecurityGroup": null + } + }, + "CacheSecurityGroupAlreadyExistsFault": { + "base": "

    A cache security group with the specified name already exists.

    ", + "refs": { + } + }, + "CacheSecurityGroupMembership": { + "base": "

    Represents a cache cluster's status within a particular cache security group.

    ", + "refs": { + "CacheSecurityGroupMembershipList$member": null + } + }, + "CacheSecurityGroupMembershipList": { + "base": null, + "refs": { + "CacheCluster$CacheSecurityGroups": "

    A list of cache security group elements, composed of name and status sub-elements.

    " + } + }, + "CacheSecurityGroupMessage": { + "base": "

    Represents the output of a DescribeCacheSecurityGroups action.

    ", + "refs": { + } + }, + "CacheSecurityGroupNameList": { + "base": null, + "refs": { + "CreateCacheClusterMessage$CacheSecurityGroupNames": "

    A list of security group names to associate with this cache cluster.

    Use this parameter only when you are creating a cache cluster outside of an Amazon Virtual Private Cloud (VPC).

    ", + "CreateReplicationGroupMessage$CacheSecurityGroupNames": "

    A list of cache security group names to associate with this replication group.

    ", + "ModifyCacheClusterMessage$CacheSecurityGroupNames": "

    A list of cache security group names to authorize on this cache cluster. This change is asynchronously applied as soon as possible.

    This parameter can be used only with clusters that are created outside of an Amazon Virtual Private Cloud (VPC).

    Constraints: Must contain no more than 255 alphanumeric characters. Must not be \"Default\".

    ", + "ModifyReplicationGroupMessage$CacheSecurityGroupNames": "

    A list of cache security group names to authorize for the clusters in this replication group. This change is asynchronously applied as soon as possible.

    This parameter can be used only with replication group containing cache clusters running outside of an Amazon Virtual Private Cloud (VPC).

    Constraints: Must contain no more than 255 alphanumeric characters. Must not be \"Default\".

    " + } + }, + "CacheSecurityGroupNotFoundFault": { + "base": "

    The requested cache security group name does not refer to an existing cache security group.

    ", + "refs": { + } + }, + "CacheSecurityGroupQuotaExceededFault": { + "base": "

    The request cannot be processed because it would exceed the allowed number of cache security groups.

    ", + "refs": { + } + }, + "CacheSecurityGroups": { + "base": null, + "refs": { + "CacheSecurityGroupMessage$CacheSecurityGroups": "

    A list of cache security groups. Each element in the list contains detailed information about one group.

    " + } + }, + "CacheSubnetGroup": { + "base": "

    Represents the output of one of the following actions:

    • CreateCacheSubnetGroup

    • ModifyCacheSubnetGroup

    ", + "refs": { + "CacheSubnetGroups$member": null, + "CreateCacheSubnetGroupResult$CacheSubnetGroup": null, + "ModifyCacheSubnetGroupResult$CacheSubnetGroup": null + } + }, + "CacheSubnetGroupAlreadyExistsFault": { + "base": "

    The requested cache subnet group name is already in use by an existing cache subnet group.

    ", + "refs": { + } + }, + "CacheSubnetGroupInUse": { + "base": "

    The requested cache subnet group is currently in use.

    ", + "refs": { + } + }, + "CacheSubnetGroupMessage": { + "base": "

    Represents the output of a DescribeCacheSubnetGroups action.

    ", + "refs": { + } + }, + "CacheSubnetGroupNotFoundFault": { + "base": "

    The requested cache subnet group name does not refer to an existing cache subnet group.

    ", + "refs": { + } + }, + "CacheSubnetGroupQuotaExceededFault": { + "base": "

    The request cannot be processed because it would exceed the allowed number of cache subnet groups.

    ", + "refs": { + } + }, + "CacheSubnetGroups": { + "base": null, + "refs": { + "CacheSubnetGroupMessage$CacheSubnetGroups": "

    A list of cache subnet groups. Each element in the list contains detailed information about one group.

    " + } + }, + "CacheSubnetQuotaExceededFault": { + "base": "

    The request cannot be processed because it would exceed the allowed number of subnets in a cache subnet group.

    ", + "refs": { + } + }, + "ChangeType": { + "base": null, + "refs": { + "CacheNodeTypeSpecificParameter$ChangeType": "

    ChangeType indicates whether a change to the parameter will be applied immediately or requires a reboot for the change to be applied. You can force a reboot or wait until the next maintenance window's reboot. For more information, see Rebooting a Cluster.

    ", + "Parameter$ChangeType": "

    ChangeType indicates whether a change to the parameter will be applied immediately or requires a reboot for the change to be applied. You can force a reboot or wait until the next maintenance window's reboot. For more information, see Rebooting a Cluster.

    " + } + }, + "ClusterIdList": { + "base": null, + "refs": { + "ReplicationGroup$MemberClusters": "

    The names of all the cache clusters that are part of this replication group.

    " + } + }, + "ClusterQuotaForCustomerExceededFault": { + "base": "

    The request cannot be processed because it would exceed the allowed number of cache clusters per customer.

    ", + "refs": { + } + }, + "CopySnapshotMessage": { + "base": "

    Represents the input of a CopySnapshotMessage action.

    ", + "refs": { + } + }, + "CopySnapshotResult": { + "base": null, + "refs": { + } + }, + "CreateCacheClusterMessage": { + "base": "

    Represents the input of a CreateCacheCluster action.

    ", + "refs": { + } + }, + "CreateCacheClusterResult": { + "base": null, + "refs": { + } + }, + "CreateCacheParameterGroupMessage": { + "base": "

    Represents the input of a CreateCacheParameterGroup action.

    ", + "refs": { + } + }, + "CreateCacheParameterGroupResult": { + "base": null, + "refs": { + } + }, + "CreateCacheSecurityGroupMessage": { + "base": "

    Represents the input of a CreateCacheSecurityGroup action.

    ", + "refs": { + } + }, + "CreateCacheSecurityGroupResult": { + "base": null, + "refs": { + } + }, + "CreateCacheSubnetGroupMessage": { + "base": "

    Represents the input of a CreateCacheSubnetGroup action.

    ", + "refs": { + } + }, + "CreateCacheSubnetGroupResult": { + "base": null, + "refs": { + } + }, + "CreateReplicationGroupMessage": { + "base": "

    Represents the input of a CreateReplicationGroup action.

    ", + "refs": { + } + }, + "CreateReplicationGroupResult": { + "base": null, + "refs": { + } + }, + "CreateSnapshotMessage": { + "base": "

    Represents the input of a CreateSnapshot action.

    ", + "refs": { + } + }, + "CreateSnapshotResult": { + "base": null, + "refs": { + } + }, + "DeleteCacheClusterMessage": { + "base": "

    Represents the input of a DeleteCacheCluster action.

    ", + "refs": { + } + }, + "DeleteCacheClusterResult": { + "base": null, + "refs": { + } + }, + "DeleteCacheParameterGroupMessage": { + "base": "

    Represents the input of a DeleteCacheParameterGroup action.

    ", + "refs": { + } + }, + "DeleteCacheSecurityGroupMessage": { + "base": "

    Represents the input of a DeleteCacheSecurityGroup action.

    ", + "refs": { + } + }, + "DeleteCacheSubnetGroupMessage": { + "base": "

    Represents the input of a DeleteCacheSubnetGroup action.

    ", + "refs": { + } + }, + "DeleteReplicationGroupMessage": { + "base": "

    Represents the input of a DeleteReplicationGroup action.

    ", + "refs": { + } + }, + "DeleteReplicationGroupResult": { + "base": null, + "refs": { + } + }, + "DeleteSnapshotMessage": { + "base": "

    Represents the input of a DeleteSnapshot action.

    ", + "refs": { + } + }, + "DeleteSnapshotResult": { + "base": null, + "refs": { + } + }, + "DescribeCacheClustersMessage": { + "base": "

    Represents the input of a DescribeCacheClusters action.

    ", + "refs": { + } + }, + "DescribeCacheEngineVersionsMessage": { + "base": "

    Represents the input of a DescribeCacheEngineVersions action.

    ", + "refs": { + } + }, + "DescribeCacheParameterGroupsMessage": { + "base": "

    Represents the input of a DescribeCacheParameterGroups action.

    ", + "refs": { + } + }, + "DescribeCacheParametersMessage": { + "base": "

    Represents the input of a DescribeCacheParameters action.

    ", + "refs": { + } + }, + "DescribeCacheSecurityGroupsMessage": { + "base": "

    Represents the input of a DescribeCacheSecurityGroups action.

    ", + "refs": { + } + }, + "DescribeCacheSubnetGroupsMessage": { + "base": "

    Represents the input of a DescribeCacheSubnetGroups action.

    ", + "refs": { + } + }, + "DescribeEngineDefaultParametersMessage": { + "base": "

    Represents the input of a DescribeEngineDefaultParameters action.

    ", + "refs": { + } + }, + "DescribeEngineDefaultParametersResult": { + "base": null, + "refs": { + } + }, + "DescribeEventsMessage": { + "base": "

    Represents the input of a DescribeEvents action.

    ", + "refs": { + } + }, + "DescribeReplicationGroupsMessage": { + "base": "

    Represents the input of a DescribeReplicationGroups action.

    ", + "refs": { + } + }, + "DescribeReservedCacheNodesMessage": { + "base": "

    Represents the input of a DescribeReservedCacheNodes action.

    ", + "refs": { + } + }, + "DescribeReservedCacheNodesOfferingsMessage": { + "base": "

    Represents the input of a DescribeReservedCacheNodesOfferings action.

    ", + "refs": { + } + }, + "DescribeSnapshotsListMessage": { + "base": "

    Represents the output of a DescribeSnapshots action.

    ", + "refs": { + } + }, + "DescribeSnapshotsMessage": { + "base": "

    Represents the input of a DescribeSnapshotsMessage action.

    ", + "refs": { + } + }, + "Double": { + "base": null, + "refs": { + "RecurringCharge$RecurringChargeAmount": "

    The monetary amount of the recurring charge.

    ", + "ReservedCacheNode$FixedPrice": "

    The fixed price charged for this reserved cache node.

    ", + "ReservedCacheNode$UsagePrice": "

    The hourly price charged for this reserved cache node.

    ", + "ReservedCacheNodesOffering$FixedPrice": "

    The fixed price charged for this offering.

    ", + "ReservedCacheNodesOffering$UsagePrice": "

    The hourly price charged for this offering.

    " + } + }, + "EC2SecurityGroup": { + "base": "

    Provides ownership and status information for an Amazon EC2 security group.

    ", + "refs": { + "EC2SecurityGroupList$member": null + } + }, + "EC2SecurityGroupList": { + "base": null, + "refs": { + "CacheSecurityGroup$EC2SecurityGroups": "

    A list of Amazon EC2 security groups that are associated with this cache security group.

    " + } + }, + "Endpoint": { + "base": "

    Represents the information required for client programs to connect to a cache node.

    ", + "refs": { + "CacheCluster$ConfigurationEndpoint": null, + "CacheNode$Endpoint": "

    The hostname for connecting to this cache node.

    ", + "NodeGroup$PrimaryEndpoint": null, + "NodeGroupMember$ReadEndpoint": null + } + }, + "EngineDefaults": { + "base": "

    Represents the output of a DescribeEngineDefaultParameters action.

    ", + "refs": { + "DescribeEngineDefaultParametersResult$EngineDefaults": null + } + }, + "Event": { + "base": "

    Represents a single occurrence of something interesting within the system. Some examples of events are creating a cache cluster, adding or removing a cache node, or rebooting a node.

    ", + "refs": { + "EventList$member": null + } + }, + "EventList": { + "base": null, + "refs": { + "EventsMessage$Events": "

    A list of events. Each element in the list contains detailed information about one event.

    " + } + }, + "EventsMessage": { + "base": "

    Represents the output of a DescribeEvents action.

    ", + "refs": { + } + }, + "InsufficientCacheClusterCapacityFault": { + "base": "

    The requested cache node type is not available in the specified Availability Zone.

    ", + "refs": { + } + }, + "Integer": { + "base": null, + "refs": { + "Endpoint$Port": "

    The port number that the cache engine is listening on.

    ", + "ReservedCacheNode$Duration": "

    The duration of the reservation in seconds.

    ", + "ReservedCacheNode$CacheNodeCount": "

    The number of cache nodes that have been reserved.

    ", + "ReservedCacheNodesOffering$Duration": "

    The duration of the offering. in seconds.

    " + } + }, + "IntegerOptional": { + "base": null, + "refs": { + "CacheCluster$NumCacheNodes": "

    The number of cache nodes in the cache cluster.

    For clusters running Redis, this value must be 1. For clusters running Memcached, this value must be between 1 and 20.

    ", + "CacheCluster$SnapshotRetentionLimit": "

    The number of days for which ElastiCache will retain automatic cache cluster snapshots before deleting them. For example, if you set SnapshotRetentionLimit to 5, then a snapshot that was taken today will be retained for 5 days before being deleted.

    If the value of SnapshotRetentionLimit is set to zero (0), backups are turned off.

    ", + "CreateCacheClusterMessage$NumCacheNodes": "

    The initial number of cache nodes that the cache cluster will have.

    For clusters running Redis, this value must be 1. For clusters running Memcached, this value must be between 1 and 20.

    If you need more than 20 nodes for your Memcached cluster, please fill out the ElastiCache Limit Increase Request form at http://aws.amazon.com/contact-us/elasticache-node-limit-request/.

    ", + "CreateCacheClusterMessage$Port": "

    The port number on which each of the cache nodes will accept connections.

    ", + "CreateCacheClusterMessage$SnapshotRetentionLimit": "

    The number of days for which ElastiCache will retain automatic snapshots before deleting them. For example, if you set SnapshotRetentionLimit to 5, then a snapshot that was taken today will be retained for 5 days before being deleted.

    This parameter is only valid if the Engine parameter is redis.

    Default: 0 (i.e., automatic backups are disabled for this cache cluster).

    ", + "CreateReplicationGroupMessage$NumCacheClusters": "

    The number of cache clusters this replication group will initially have.

    If Multi-AZ is enabled, the value of this parameter must be at least 2.

    The maximum permitted value for NumCacheClusters is 6 (primary plus 5 replicas). If you need to exceed this limit, please fill out the ElastiCache Limit Increase Request form at http://aws.amazon.com/contact-us/elasticache-node-limit-request.

    ", + "CreateReplicationGroupMessage$Port": "

    The port number on which each member of the replication group will accept connections.

    ", + "CreateReplicationGroupMessage$SnapshotRetentionLimit": "

    The number of days for which ElastiCache will retain automatic snapshots before deleting them. For example, if you set SnapshotRetentionLimit to 5, then a snapshot that was taken today will be retained for 5 days before being deleted.

    This parameter is only valid if the Engine parameter is redis.

    Default: 0 (i.e., automatic backups are disabled for this cache cluster).

    ", + "DescribeCacheClustersMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a marker is included in the response so that the remaining results can be retrieved.

    Default: 100

    Constraints: minimum 20; maximum 100.

    ", + "DescribeCacheEngineVersionsMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a marker is included in the response so that the remaining results can be retrieved.

    Default: 100

    Constraints: minimum 20; maximum 100.

    ", + "DescribeCacheParameterGroupsMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a marker is included in the response so that the remaining results can be retrieved.

    Default: 100

    Constraints: minimum 20; maximum 100.

    ", + "DescribeCacheParametersMessage$MaxRecords": "

    The maximum number of brecords to include in the response. If more records exist than the specified MaxRecords value, a marker is included in the response so that the remaining results can be retrieved.

    Default: 100

    Constraints: minimum 20; maximum 100.

    ", + "DescribeCacheSecurityGroupsMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a marker is included in the response so that the remaining results can be retrieved.

    Default: 100

    Constraints: minimum 20; maximum 100.

    ", + "DescribeCacheSubnetGroupsMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a marker is included in the response so that the remaining results can be retrieved.

    Default: 100

    Constraints: minimum 20; maximum 100.

    ", + "DescribeEngineDefaultParametersMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a marker is included in the response so that the remaining results can be retrieved.

    Default: 100

    Constraints: minimum 20; maximum 100.

    ", + "DescribeEventsMessage$Duration": "

    The number of minutes' worth of events to retrieve.

    ", + "DescribeEventsMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a marker is included in the response so that the remaining results can be retrieved.

    Default: 100

    Constraints: minimum 20; maximum 100.

    ", + "DescribeReplicationGroupsMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a marker is included in the response so that the remaining results can be retrieved.

    Default: 100

    Constraints: minimum 20; maximum 100.

    ", + "DescribeReservedCacheNodesMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a marker is included in the response so that the remaining results can be retrieved.

    Default: 100

    Constraints: minimum 20; maximum 100.

    ", + "DescribeReservedCacheNodesOfferingsMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a marker is included in the response so that the remaining results can be retrieved.

    Default: 100

    Constraints: minimum 20; maximum 100.

    ", + "DescribeSnapshotsMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a marker is included in the response so that the remaining results can be retrieved.

    Default: 50

    Constraints: minimum 20; maximum 50.

    ", + "ModifyCacheClusterMessage$NumCacheNodes": "

    The number of cache nodes that the cache cluster should have. If the value for NumCacheNodes is greater than the sum of the number of current cache nodes and the number of cache nodes pending creation (which may be zero), then more nodes will be added. If the value is less than the number of existing cache nodes, then nodes will be removed. If the value is equal to the number of current cache nodes, then any pending add or remove requests are canceled.

    If you are removing cache nodes, you must use the CacheNodeIdsToRemove parameter to provide the IDs of the specific cache nodes to remove.

    For clusters running Redis, this value must be 1. For clusters running Memcached, this value must be between 1 and 20.

    Adding or removing Memcached cache nodes can be applied immediately or as a pending action. See ApplyImmediately.

    A pending action to modify the number of cache nodes in a cluster during its maintenance window, whether by adding or removing nodes in accordance with the scale out architecture, is not queued. The customer's latest request to add or remove nodes to the cluster overrides any previous pending actions to modify the number of cache nodes in the cluster. For example, a request to remove 2 nodes would override a previous pending action to remove 3 nodes. Similarly, a request to add 2 nodes would override a previous pending action to remove 3 nodes and vice versa. As Memcached cache nodes may now be provisioned in different Availability Zones with flexible cache node placement, a request to add nodes does not automatically override a previous pending action to add nodes. The customer can modify the previous pending action to add more nodes or explicitly cancel the pending request and retry the new request. To cancel pending actions to modify the number of cache nodes in a cluster, use the ModifyCacheCluster request and set NumCacheNodes equal to the number of cache nodes currently in the cache cluster.

    ", + "ModifyCacheClusterMessage$SnapshotRetentionLimit": "

    The number of days for which ElastiCache will retain automatic cache cluster snapshots before deleting them. For example, if you set SnapshotRetentionLimit to 5, then a snapshot that was taken today will be retained for 5 days before being deleted.

    If the value of SnapshotRetentionLimit is set to zero (0), backups are turned off.

    ", + "ModifyReplicationGroupMessage$SnapshotRetentionLimit": "

    The number of days for which ElastiCache will retain automatic node group snapshots before deleting them. For example, if you set SnapshotRetentionLimit to 5, then a snapshot that was taken today will be retained for 5 days before being deleted.

    Important If the value of SnapshotRetentionLimit is set to zero (0), backups are turned off.

    ", + "PendingModifiedValues$NumCacheNodes": "

    The new number of cache nodes for the cache cluster.

    For clusters running Redis, this value must be 1. For clusters running Memcached, this value must be between 1 and 20.

    ", + "PurchaseReservedCacheNodesOfferingMessage$CacheNodeCount": "

    The number of cache node instances to reserve.

    Default: 1

    ", + "Snapshot$NumCacheNodes": "

    The number of cache nodes in the source cache cluster.

    For clusters running Redis, this value must be 1. For clusters running Memcached, this value must be between 1 and 20.

    ", + "Snapshot$Port": "

    The port number used by each cache nodes in the source cache cluster.

    ", + "Snapshot$SnapshotRetentionLimit": "

    For an automatic snapshot, the number of days for which ElastiCache will retain the snapshot before deleting it.

    For manual snapshots, this field reflects the SnapshotRetentionLimit for the source cache cluster when the snapshot was created. This field is otherwise ignored: Manual snapshots do not expire, and can only be deleted using the DeleteSnapshot action.

    Important If the value of SnapshotRetentionLimit is set to zero (0), backups are turned off.

    " + } + }, + "InvalidARNFault": { + "base": "

    The requested Amazon Resource Name (ARN) does not refer to an existing resource.

    ", + "refs": { + } + }, + "InvalidCacheClusterStateFault": { + "base": "

    The requested cache cluster is not in the available state.

    ", + "refs": { + } + }, + "InvalidCacheParameterGroupStateFault": { + "base": "

    The current state of the cache parameter group does not allow the requested action to occur.

    ", + "refs": { + } + }, + "InvalidCacheSecurityGroupStateFault": { + "base": "

    The current state of the cache security group does not allow deletion.

    ", + "refs": { + } + }, + "InvalidParameterCombinationException": { + "base": "

    Two or more incompatible parameters were specified.

    ", + "refs": { + } + }, + "InvalidParameterValueException": { + "base": "

    The value for a parameter is invalid.

    ", + "refs": { + } + }, + "InvalidReplicationGroupStateFault": { + "base": "

    The requested replication group is not in the available state.

    ", + "refs": { + } + }, + "InvalidSnapshotStateFault": { + "base": "

    The current state of the snapshot does not allow the requested action to occur.

    ", + "refs": { + } + }, + "InvalidSubnet": { + "base": "

    An invalid subnet identifier was specified.

    ", + "refs": { + } + }, + "InvalidVPCNetworkStateFault": { + "base": "

    The VPC network is in an invalid state.

    ", + "refs": { + } + }, + "KeyList": { + "base": null, + "refs": { + "RemoveTagsFromResourceMessage$TagKeys": "

    A list of TagKeys identifying the tags you want removed from the named resource. For example, TagKeys.member.1=Region removes the cost allocation tag with the key name Region from the resource named by the ResourceName parameter.

    " + } + }, + "ListAllowedNodeTypeModificationsMessage": { + "base": "

    The input parameters for the ListAllowedNodeTypeModifications action.

    ", + "refs": { + } + }, + "ListTagsForResourceMessage": { + "base": "

    The input parameters for the ListTagsForResource action.

    ", + "refs": { + } + }, + "ModifyCacheClusterMessage": { + "base": "

    Represents the input of a ModifyCacheCluster action.

    ", + "refs": { + } + }, + "ModifyCacheClusterResult": { + "base": null, + "refs": { + } + }, + "ModifyCacheParameterGroupMessage": { + "base": "

    Represents the input of a ModifyCacheParameterGroup action.

    ", + "refs": { + } + }, + "ModifyCacheSubnetGroupMessage": { + "base": "

    Represents the input of a ModifyCacheSubnetGroup action.

    ", + "refs": { + } + }, + "ModifyCacheSubnetGroupResult": { + "base": null, + "refs": { + } + }, + "ModifyReplicationGroupMessage": { + "base": "

    Represents the input of a ModifyReplicationGroups action.

    ", + "refs": { + } + }, + "ModifyReplicationGroupResult": { + "base": null, + "refs": { + } + }, + "NodeGroup": { + "base": "

    Represents a collection of cache nodes in a replication group.

    ", + "refs": { + "NodeGroupList$member": null + } + }, + "NodeGroupList": { + "base": null, + "refs": { + "ReplicationGroup$NodeGroups": "

    A single element list with information about the nodes in the replication group.

    " + } + }, + "NodeGroupMember": { + "base": "

    Represents a single node within a node group.

    ", + "refs": { + "NodeGroupMemberList$member": null + } + }, + "NodeGroupMemberList": { + "base": null, + "refs": { + "NodeGroup$NodeGroupMembers": "

    A list containing information about individual nodes within the node group.

    " + } + }, + "NodeQuotaForClusterExceededFault": { + "base": "

    The request cannot be processed because it would exceed the allowed number of cache nodes in a single cache cluster.

    ", + "refs": { + } + }, + "NodeQuotaForCustomerExceededFault": { + "base": "

    The request cannot be processed because it would exceed the allowed number of cache nodes per customer.

    ", + "refs": { + } + }, + "NodeSnapshot": { + "base": "

    Represents an individual cache node in a snapshot of a cache cluster.

    ", + "refs": { + "NodeSnapshotList$member": null + } + }, + "NodeSnapshotList": { + "base": null, + "refs": { + "Snapshot$NodeSnapshots": "

    A list of the cache nodes in the source cache cluster.

    " + } + }, + "NodeTypeList": { + "base": null, + "refs": { + "AllowedNodeTypeModificationsMessage$ScaleUpModifications": "

    A string list, each element of which specifies a cache node type which you can use to scale your cache cluster or replication group.

    When scaling up a Redis cluster or replication group using ModifyCacheCluster or ModifyReplicationGroup, use a value from this list for the CacheNodeType parameter.

    " + } + }, + "NotificationConfiguration": { + "base": "

    Describes a notification topic and its status. Notification topics are used for publishing ElastiCache events to subscribers using Amazon Simple Notification Service (SNS).

    ", + "refs": { + "CacheCluster$NotificationConfiguration": null + } + }, + "Parameter": { + "base": "

    Describes an individual setting that controls some aspect of ElastiCache behavior.

    ", + "refs": { + "ParametersList$member": null + } + }, + "ParameterNameValue": { + "base": "

    Describes a name-value pair that is used to update the value of a parameter.

    ", + "refs": { + "ParameterNameValueList$member": null + } + }, + "ParameterNameValueList": { + "base": null, + "refs": { + "ModifyCacheParameterGroupMessage$ParameterNameValues": "

    An array of parameter names and values for the parameter update. You must supply at least one parameter name and value; subsequent arguments are optional. A maximum of 20 parameters may be modified per request.

    ", + "ResetCacheParameterGroupMessage$ParameterNameValues": "

    An array of parameter names to reset to their default values. If ResetAllParameters is false, you must specify the name of at least one parameter to reset.

    " + } + }, + "ParametersList": { + "base": null, + "refs": { + "CacheParameterGroupDetails$Parameters": "

    A list of Parameter instances.

    ", + "EngineDefaults$Parameters": "

    Contains a list of engine default parameters.

    " + } + }, + "PendingAutomaticFailoverStatus": { + "base": null, + "refs": { + "ReplicationGroupPendingModifiedValues$AutomaticFailoverStatus": "

    Indicates the status of Multi-AZ for this replication group.

    ElastiCache Multi-AZ replication groups are not supported on:

    • Redis versions earlier than 2.8.6.

    • T1 and T2 cache node types.

    " + } + }, + "PendingModifiedValues": { + "base": "

    A group of settings that will be applied to the cache cluster in the future, or that are currently being applied.

    ", + "refs": { + "CacheCluster$PendingModifiedValues": null + } + }, + "PreferredAvailabilityZoneList": { + "base": null, + "refs": { + "CreateCacheClusterMessage$PreferredAvailabilityZones": "

    A list of the Availability Zones in which cache nodes will be created. The order of the zones in the list is not important.

    This option is only supported on Memcached.

    If you are creating your cache cluster in an Amazon VPC (recommended) you can only locate nodes in Availability Zones that are associated with the subnets in the selected subnet group.

    The number of Availability Zones listed must equal the value of NumCacheNodes.

    If you want all the nodes in the same Availability Zone, use PreferredAvailabilityZone instead, or repeat the Availability Zone multiple times in the list.

    Default: System chosen Availability Zones.

    Example: One Memcached node in each of three different Availability Zones: PreferredAvailabilityZones.member.1=us-west-2a&amp;PreferredAvailabilityZones.member.2=us-west-2b&amp;PreferredAvailabilityZones.member.3=us-west-2c

    Example: All three Memcached nodes in one Availability Zone: PreferredAvailabilityZones.member.1=us-west-2a&amp;PreferredAvailabilityZones.member.2=us-west-2a&amp;PreferredAvailabilityZones.member.3=us-west-2a

    ", + "ModifyCacheClusterMessage$NewAvailabilityZones": "

    The list of Availability Zones where the new Memcached cache nodes will be created.

    This parameter is only valid when NumCacheNodes in the request is greater than the sum of the number of active cache nodes and the number of cache nodes pending creation (which may be zero). The number of Availability Zones supplied in this list must match the cache nodes being added in this request.

    This option is only supported on Memcached clusters.

    Scenarios:

    • Scenario 1: You have 3 active nodes and wish to add 2 nodes. Specify NumCacheNodes=5 (3 + 2) and optionally specify two Availability Zones for the two new nodes.

    • Scenario 2: You have 3 active nodes and 2 nodes pending creation (from the scenario 1 call) and want to add 1 more node. Specify NumCacheNodes=6 ((3 + 2) + 1) and optionally specify an Availability Zone for the new node.

    • Scenario 3: You want to cancel all pending actions. Specify NumCacheNodes=3 to cancel all pending actions.

    The Availability Zone placement of nodes pending creation cannot be modified. If you wish to cancel any nodes pending creation, add 0 nodes by setting NumCacheNodes to the number of current nodes.

    If cross-az is specified, existing Memcached nodes remain in their current Availability Zone. Only newly created nodes can be located in different Availability Zones. For guidance on how to move existing Memcached nodes to different Availability Zones, see the Availability Zone Considerations section of Cache Node Considerations for Memcached.

    Impact of new add/remove requests upon pending requests

    • Scenario-1

      • Pending Action: Delete

      • New Request: Delete

      • Result: The new delete, pending or immediate, replaces the pending delete.

    • Scenario-2

      • Pending Action: Delete

      • New Request: Create

      • Result: The new create, pending or immediate, replaces the pending delete.

    • Scenario-3

      • Pending Action: Create

      • New Request: Delete

      • Result: The new delete, pending or immediate, replaces the pending create.

    • Scenario-4

      • Pending Action: Create

      • New Request: Create

      • Result: The new create is added to the pending create.

        Important: If the new create request is Apply Immediately - Yes, all creates are performed immediately. If the new create request is Apply Immediately - No, all creates are pending.

    Example:

    NewAvailabilityZones.member.1=us-west-2a&amp;NewAvailabilityZones.member.2=us-west-2b&amp;NewAvailabilityZones.member.3=us-west-2c

    " + } + }, + "PurchaseReservedCacheNodesOfferingMessage": { + "base": "

    Represents the input of a PurchaseReservedCacheNodesOffering action.

    ", + "refs": { + } + }, + "PurchaseReservedCacheNodesOfferingResult": { + "base": null, + "refs": { + } + }, + "RebootCacheClusterMessage": { + "base": "

    Represents the input of a RebootCacheCluster action.

    ", + "refs": { + } + }, + "RebootCacheClusterResult": { + "base": null, + "refs": { + } + }, + "RecurringCharge": { + "base": "

    Contains the specific price and frequency of a recurring charges for a reserved cache node, or for a reserved cache node offering.

    ", + "refs": { + "RecurringChargeList$member": null + } + }, + "RecurringChargeList": { + "base": null, + "refs": { + "ReservedCacheNode$RecurringCharges": "

    The recurring price charged to run this reserved cache node.

    ", + "ReservedCacheNodesOffering$RecurringCharges": "

    The recurring price charged to run this reserved cache node.

    " + } + }, + "RemoveTagsFromResourceMessage": { + "base": "

    Represents the input of a RemoveTagsFromResource action.

    ", + "refs": { + } + }, + "ReplicationGroup": { + "base": "

    Contains all of the attributes of a specific replication group.

    ", + "refs": { + "CreateReplicationGroupResult$ReplicationGroup": null, + "DeleteReplicationGroupResult$ReplicationGroup": null, + "ModifyReplicationGroupResult$ReplicationGroup": null, + "ReplicationGroupList$member": null + } + }, + "ReplicationGroupAlreadyExistsFault": { + "base": "

    The specified replication group already exists.

    ", + "refs": { + } + }, + "ReplicationGroupList": { + "base": null, + "refs": { + "ReplicationGroupMessage$ReplicationGroups": "

    A list of replication groups. Each item in the list contains detailed information about one replication group.

    " + } + }, + "ReplicationGroupMessage": { + "base": "

    Represents the output of a DescribeReplicationGroups action.

    ", + "refs": { + } + }, + "ReplicationGroupNotFoundFault": { + "base": "

    The specified replication group does not exist.

    ", + "refs": { + } + }, + "ReplicationGroupPendingModifiedValues": { + "base": "

    The settings to be applied to the replication group, either immediately or during the next maintenance window.

    ", + "refs": { + "ReplicationGroup$PendingModifiedValues": "

    A group of settings to be applied to the replication group, either immediately or during the next maintenance window.

    " + } + }, + "ReservedCacheNode": { + "base": "

    Represents the output of a PurchaseReservedCacheNodesOffering action.

    ", + "refs": { + "PurchaseReservedCacheNodesOfferingResult$ReservedCacheNode": null, + "ReservedCacheNodeList$member": null + } + }, + "ReservedCacheNodeAlreadyExistsFault": { + "base": "

    You already have a reservation with the given identifier.

    ", + "refs": { + } + }, + "ReservedCacheNodeList": { + "base": null, + "refs": { + "ReservedCacheNodeMessage$ReservedCacheNodes": "

    A list of reserved cache nodes. Each element in the list contains detailed information about one node.

    " + } + }, + "ReservedCacheNodeMessage": { + "base": "

    Represents the output of a DescribeReservedCacheNodes action.

    ", + "refs": { + } + }, + "ReservedCacheNodeNotFoundFault": { + "base": "

    The requested reserved cache node was not found.

    ", + "refs": { + } + }, + "ReservedCacheNodeQuotaExceededFault": { + "base": "

    The request cannot be processed because it would exceed the user's cache node quota.

    ", + "refs": { + } + }, + "ReservedCacheNodesOffering": { + "base": "

    Describes all of the attributes of a reserved cache node offering.

    ", + "refs": { + "ReservedCacheNodesOfferingList$member": null + } + }, + "ReservedCacheNodesOfferingList": { + "base": null, + "refs": { + "ReservedCacheNodesOfferingMessage$ReservedCacheNodesOfferings": "

    A list of reserved cache node offerings. Each element in the list contains detailed information about one offering.

    " + } + }, + "ReservedCacheNodesOfferingMessage": { + "base": "

    Represents the output of a DescribeReservedCacheNodesOfferings action.

    ", + "refs": { + } + }, + "ReservedCacheNodesOfferingNotFoundFault": { + "base": "

    The requested cache node offering does not exist.

    ", + "refs": { + } + }, + "ResetCacheParameterGroupMessage": { + "base": "

    Represents the input of a ResetCacheParameterGroup action.

    ", + "refs": { + } + }, + "RevokeCacheSecurityGroupIngressMessage": { + "base": "

    Represents the input of a RevokeCacheSecurityGroupIngress action.

    ", + "refs": { + } + }, + "RevokeCacheSecurityGroupIngressResult": { + "base": null, + "refs": { + } + }, + "SecurityGroupIdsList": { + "base": null, + "refs": { + "CreateCacheClusterMessage$SecurityGroupIds": "

    One or more VPC security groups associated with the cache cluster.

    Use this parameter only when you are creating a cache cluster in an Amazon Virtual Private Cloud (VPC).

    ", + "CreateReplicationGroupMessage$SecurityGroupIds": "

    One or more Amazon VPC security groups associated with this replication group.

    Use this parameter only when you are creating a replication group in an Amazon Virtual Private Cloud (VPC).

    ", + "ModifyCacheClusterMessage$SecurityGroupIds": "

    Specifies the VPC Security Groups associated with the cache cluster.

    This parameter can be used only with clusters that are created in an Amazon Virtual Private Cloud (VPC).

    ", + "ModifyReplicationGroupMessage$SecurityGroupIds": "

    Specifies the VPC Security Groups associated with the cache clusters in the replication group.

    This parameter can be used only with replication group containing cache clusters running in an Amazon Virtual Private Cloud (VPC).

    " + } + }, + "SecurityGroupMembership": { + "base": "

    Represents a single cache security group and its status.

    ", + "refs": { + "SecurityGroupMembershipList$member": null + } + }, + "SecurityGroupMembershipList": { + "base": null, + "refs": { + "CacheCluster$SecurityGroups": "

    A list of VPC Security Groups associated with the cache cluster.

    " + } + }, + "Snapshot": { + "base": "

    Represents a copy of an entire cache cluster as of the time when the snapshot was taken.

    ", + "refs": { + "CopySnapshotResult$Snapshot": null, + "CreateSnapshotResult$Snapshot": null, + "DeleteSnapshotResult$Snapshot": null, + "SnapshotList$member": null + } + }, + "SnapshotAlreadyExistsFault": { + "base": "

    You already have a snapshot with the given name.

    ", + "refs": { + } + }, + "SnapshotArnsList": { + "base": null, + "refs": { + "CreateCacheClusterMessage$SnapshotArns": "

    A single-element string list containing an Amazon Resource Name (ARN) that uniquely identifies a Redis RDB snapshot file stored in Amazon S3. The snapshot file will be used to populate the node group. The Amazon S3 object name in the ARN cannot contain any commas.

    This parameter is only valid if the Engine parameter is redis.

    Example of an Amazon S3 ARN: arn:aws:s3:::my_bucket/snapshot1.rdb

    ", + "CreateReplicationGroupMessage$SnapshotArns": "

    A single-element string list containing an Amazon Resource Name (ARN) that uniquely identifies a Redis RDB snapshot file stored in Amazon S3. The snapshot file will be used to populate the node group. The Amazon S3 object name in the ARN cannot contain any commas.

    This parameter is only valid if the Engine parameter is redis.

    Example of an Amazon S3 ARN: arn:aws:s3:::my_bucket/snapshot1.rdb

    " + } + }, + "SnapshotFeatureNotSupportedFault": { + "base": "

    You attempted one of the following actions:

    • Creating a snapshot of a Redis cache cluster running on a t1.micro cache node.

    • Creating a snapshot of a cache cluster that is running Memcached rather than Redis.

    Neither of these are supported by ElastiCache.

    ", + "refs": { + } + }, + "SnapshotList": { + "base": null, + "refs": { + "DescribeSnapshotsListMessage$Snapshots": "

    A list of snapshots. Each item in the list contains detailed information about one snapshot.

    " + } + }, + "SnapshotNotFoundFault": { + "base": "

    The requested snapshot name does not refer to an existing snapshot.

    ", + "refs": { + } + }, + "SnapshotQuotaExceededFault": { + "base": "

    The request cannot be processed because it would exceed the maximum number of snapshots.

    ", + "refs": { + } + }, + "SourceType": { + "base": null, + "refs": { + "DescribeEventsMessage$SourceType": "

    The event source to retrieve events for. If no value is specified, all events are returned.

    Valid values are: cache-cluster | cache-parameter-group | cache-security-group | cache-subnet-group

    ", + "Event$SourceType": "

    Specifies the origin of this event - a cache cluster, a parameter group, a security group, etc.

    " + } + }, + "String": { + "base": null, + "refs": { + "AddTagsToResourceMessage$ResourceName": "

    The Amazon Resource Name (ARN) of the resource to which the tags are to be added, for example arn:aws:elasticache:us-west-2:0123456789:cluster:myCluster or arn:aws:elasticache:us-west-2:0123456789:snapshot:mySnapshot.

    For more information on ARNs, go to Amazon Resource Names (ARNs) and AWS Service Namespaces.

    ", + "AuthorizeCacheSecurityGroupIngressMessage$CacheSecurityGroupName": "

    The cache security group which will allow network ingress.

    ", + "AuthorizeCacheSecurityGroupIngressMessage$EC2SecurityGroupName": "

    The Amazon EC2 security group to be authorized for ingress to the cache security group.

    ", + "AuthorizeCacheSecurityGroupIngressMessage$EC2SecurityGroupOwnerId": "

    The AWS account number of the Amazon EC2 security group owner. Note that this is not the same thing as an AWS access key ID - you must provide a valid AWS account number for this parameter.

    ", + "AvailabilityZone$Name": "

    The name of the Availability Zone.

    ", + "AvailabilityZonesList$member": null, + "CacheCluster$CacheClusterId": "

    The user-supplied identifier of the cache cluster. This identifier is a unique key that identifies a cache cluster.

    ", + "CacheCluster$ClientDownloadLandingPage": "

    The URL of the web page where you can download the latest ElastiCache client library.

    ", + "CacheCluster$CacheNodeType": "

    The name of the compute and memory capacity node type for the cache cluster.

    Valid node types are as follows:

    • General purpose:

      • Current generation: cache.t2.micro, cache.t2.small, cache.t2.medium, cache.m3.medium, cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge

      • Previous generation: cache.t1.micro, cache.m1.small, cache.m1.medium, cache.m1.large, cache.m1.xlarge

    • Compute optimized: cache.c1.xlarge

    • Memory optimized:

      • Current generation: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, cache.r3.8xlarge

      • Previous generation: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge

    Notes:

    • All t2 instances are created in an Amazon Virtual Private Cloud (VPC).

    • Redis backup/restore is not supported for t2 instances.

    • Redis Append-only files (AOF) functionality is not supported for t1 or t2 instances.

    For a complete listing of cache node types and specifications, see Amazon ElastiCache Product Features and Details and Cache Node Type-Specific Parameters for Memcached or Cache Node Type-Specific Parameters for Redis.

    ", + "CacheCluster$Engine": "

    The name of the cache engine (memcached or redis) to be used for this cache cluster.

    ", + "CacheCluster$EngineVersion": "

    The version of the cache engine that is used in this cache cluster.

    ", + "CacheCluster$CacheClusterStatus": "

    The current state of this cache cluster, one of the following values: available, creating, deleted, deleting, incompatible-network, modifying, rebooting cache cluster nodes, restore-failed, or snapshotting.

    ", + "CacheCluster$PreferredAvailabilityZone": "

    The name of the Availability Zone in which the cache cluster is located or \"Multiple\" if the cache nodes are located in different Availability Zones.

    ", + "CacheCluster$PreferredMaintenanceWindow": "

    Specifies the weekly time range during which maintenance on the cache cluster is performed. It is specified as a range in the format ddd:hh24:mi-ddd:hh24:mi (24H Clock UTC). The minimum maintenance window is a 60 minute period. Valid values for ddd are:

    • sun

    • mon

    • tue

    • wed

    • thu

    • fri

    • sat

    Example: sun:05:00-sun:09:00

    ", + "CacheCluster$CacheSubnetGroupName": "

    The name of the cache subnet group associated with the cache cluster.

    ", + "CacheCluster$ReplicationGroupId": "

    The replication group to which this cache cluster belongs. If this field is empty, the cache cluster is not associated with any replication group.

    ", + "CacheCluster$SnapshotWindow": "

    The daily time range (in UTC) during which ElastiCache will begin taking a daily snapshot of your cache cluster.

    Example: 05:00-09:00

    ", + "CacheClusterMessage$Marker": "

    Provides an identifier to allow retrieval of paginated results.

    ", + "CacheEngineVersion$Engine": "

    The name of the cache engine.

    ", + "CacheEngineVersion$EngineVersion": "

    The version number of the cache engine.

    ", + "CacheEngineVersion$CacheParameterGroupFamily": "

    The name of the cache parameter group family associated with this cache engine.

    ", + "CacheEngineVersion$CacheEngineDescription": "

    The description of the cache engine.

    ", + "CacheEngineVersion$CacheEngineVersionDescription": "

    The description of the cache engine version.

    ", + "CacheEngineVersionMessage$Marker": "

    Provides an identifier to allow retrieval of paginated results.

    ", + "CacheNode$CacheNodeId": "

    The cache node identifier. A node ID is a numeric identifier (0001, 0002, etc.). The combination of cluster ID and node ID uniquely identifies every cache node used in a customer's AWS account.

    ", + "CacheNode$CacheNodeStatus": "

    The current state of this cache node.

    ", + "CacheNode$ParameterGroupStatus": "

    The status of the parameter group applied to this cache node.

    ", + "CacheNode$SourceCacheNodeId": "

    The ID of the primary node to which this read replica node is synchronized. If this field is empty, then this node is not associated with a primary cache cluster.

    ", + "CacheNode$CustomerAvailabilityZone": "

    The Availability Zone where this node was created and now resides.

    ", + "CacheNodeIdsList$member": null, + "CacheNodeTypeSpecificParameter$ParameterName": "

    The name of the parameter.

    ", + "CacheNodeTypeSpecificParameter$Description": "

    A description of the parameter.

    ", + "CacheNodeTypeSpecificParameter$Source": "

    The source of the parameter value.

    ", + "CacheNodeTypeSpecificParameter$DataType": "

    The valid data type for the parameter.

    ", + "CacheNodeTypeSpecificParameter$AllowedValues": "

    The valid range of values for the parameter.

    ", + "CacheNodeTypeSpecificParameter$MinimumEngineVersion": "

    The earliest cache engine version to which the parameter can apply.

    ", + "CacheNodeTypeSpecificValue$CacheNodeType": "

    The cache node type for which this value applies.

    ", + "CacheNodeTypeSpecificValue$Value": "

    The value for the cache node type.

    ", + "CacheParameterGroup$CacheParameterGroupName": "

    The name of the cache parameter group.

    ", + "CacheParameterGroup$CacheParameterGroupFamily": "

    The name of the cache parameter group family that this cache parameter group is compatible with.

    ", + "CacheParameterGroup$Description": "

    The description for this cache parameter group.

    ", + "CacheParameterGroupDetails$Marker": "

    Provides an identifier to allow retrieval of paginated results.

    ", + "CacheParameterGroupNameMessage$CacheParameterGroupName": "

    The name of the cache parameter group.

    ", + "CacheParameterGroupStatus$CacheParameterGroupName": "

    The name of the cache parameter group.

    ", + "CacheParameterGroupStatus$ParameterApplyStatus": "

    The status of parameter updates.

    ", + "CacheParameterGroupsMessage$Marker": "

    Provides an identifier to allow retrieval of paginated results.

    ", + "CacheSecurityGroup$OwnerId": "

    The AWS account ID of the cache security group owner.

    ", + "CacheSecurityGroup$CacheSecurityGroupName": "

    The name of the cache security group.

    ", + "CacheSecurityGroup$Description": "

    The description of the cache security group.

    ", + "CacheSecurityGroupMembership$CacheSecurityGroupName": "

    The name of the cache security group.

    ", + "CacheSecurityGroupMembership$Status": "

    The membership status in the cache security group. The status changes when a cache security group is modified, or when the cache security groups assigned to a cache cluster are modified.

    ", + "CacheSecurityGroupMessage$Marker": "

    Provides an identifier to allow retrieval of paginated results.

    ", + "CacheSecurityGroupNameList$member": null, + "CacheSubnetGroup$CacheSubnetGroupName": "

    The name of the cache subnet group.

    ", + "CacheSubnetGroup$CacheSubnetGroupDescription": "

    The description of the cache subnet group.

    ", + "CacheSubnetGroup$VpcId": "

    The Amazon Virtual Private Cloud identifier (VPC ID) of the cache subnet group.

    ", + "CacheSubnetGroupMessage$Marker": "

    Provides an identifier to allow retrieval of paginated results.

    ", + "ClusterIdList$member": null, + "CopySnapshotMessage$SourceSnapshotName": "

    The name of an existing snapshot from which to make a copy.

    ", + "CopySnapshotMessage$TargetSnapshotName": "

    A name for the snapshot copy. ElastiCache does not permit overwriting a snapshot, therefore this name must be unique within its context - ElastiCache or an Amazon S3 bucket if exporting.

    Error Message

    • Error Message: The S3 bucket %s already contains an object with key %s.

      Solution: Give the TargetSnapshotName a new and unique value. If exporting a snapshot, you could alternatively create a new Amazon S3 bucket and use this same value for TargetSnapshotName.

    ", + "CopySnapshotMessage$TargetBucket": "

    The Amazon S3 bucket to which the snapshot will be exported. This parameter is used only when exporting a snapshot for external access.

    When using this parameter to export a snapshot, be sure Amazon ElastiCache has the needed permissions to this S3 bucket. For more information, see Step 2: Grant ElastiCache Access to Your Amazon S3 Bucket in the Amazon ElastiCache User Guide.

    Error Messages:

    You could receive one of the following error messages.

    Erorr Messages

    • Error Message: ElastiCache has not been granted READ permissions %s on the S3 Bucket.

      Solution: Add List and Read permissions on the bucket.

    • Error Message: ElastiCache has not been granted WRITE permissions %s on the S3 Bucket.

      Solution: Add Upload/Delete permissions on the bucket.

    • Error Message: ElastiCache has not been granted READ_ACP permissions %s on the S3 Bucket.

      Solution: Add View Permissions permissions on the bucket.

    • Error Message: The S3 bucket %s is outside of the region.

      Solution: Before exporting your snapshot, create a new Amazon S3 bucket in the same region as your snapshot. For more information, see Step 1: Create an Amazon S3 Bucket.

    • Error Message: The S3 bucket %s does not exist.

      Solution: Create an Amazon S3 bucket in the same region as your snapshot. For more information, see Step 1: Create an Amazon S3 Bucket.

    • Error Message: The S3 bucket %s is not owned by the authenticated user.

      Solution: Create an Amazon S3 bucket in the same region as your snapshot. For more information, see Step 1: Create an Amazon S3 Bucket.

    • Error Message: The authenticated user does not have sufficient permissions to perform the desired activity.

      Solution: Contact your system administrator to get the needed permissions.

    For more information, see Exporting a Snapshot in the Amazon ElastiCache User Guide.

    ", + "CreateCacheClusterMessage$CacheClusterId": "

    The node group identifier. This parameter is stored as a lowercase string.

    Constraints:

    • A name must contain from 1 to 20 alphanumeric characters or hyphens.

    • The first character must be a letter.

    • A name cannot end with a hyphen or contain two consecutive hyphens.

    ", + "CreateCacheClusterMessage$ReplicationGroupId": "

    The ID of the replication group to which this cache cluster should belong. If this parameter is specified, the cache cluster will be added to the specified replication group as a read replica; otherwise, the cache cluster will be a standalone primary that is not part of any replication group.

    If the specified replication group is Multi-AZ enabled and the availability zone is not specified, the cache cluster will be created in availability zones that provide the best spread of read replicas across availability zones.

    This parameter is only valid if the Engine parameter is redis.

    ", + "CreateCacheClusterMessage$PreferredAvailabilityZone": "

    The EC2 Availability Zone in which the cache cluster will be created.

    All nodes belonging to this Memcached cache cluster are placed in the preferred Availability Zone. If you want to create your nodes across multiple Availability Zones, use PreferredAvailabilityZones.

    Default: System chosen Availability Zone.

    ", + "CreateCacheClusterMessage$CacheNodeType": "

    The compute and memory capacity of the nodes in the node group.

    Valid node types are as follows:

    • General purpose:

      • Current generation: cache.t2.micro, cache.t2.small, cache.t2.medium, cache.m3.medium, cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge

      • Previous generation: cache.t1.micro, cache.m1.small, cache.m1.medium, cache.m1.large, cache.m1.xlarge

    • Compute optimized: cache.c1.xlarge

    • Memory optimized:

      • Current generation: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, cache.r3.8xlarge

      • Previous generation: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge

    Notes:

    • All t2 instances are created in an Amazon Virtual Private Cloud (VPC).

    • Redis backup/restore is not supported for t2 instances.

    • Redis Append-only files (AOF) functionality is not supported for t1 or t2 instances.

    For a complete listing of cache node types and specifications, see Amazon ElastiCache Product Features and Details and Cache Node Type-Specific Parameters for Memcached or Cache Node Type-Specific Parameters for Redis.

    ", + "CreateCacheClusterMessage$Engine": "

    The name of the cache engine to be used for this cache cluster.

    Valid values for this parameter are:

    memcached | redis

    ", + "CreateCacheClusterMessage$EngineVersion": "

    The version number of the cache engine to be used for this cache cluster. To view the supported cache engine versions, use the DescribeCacheEngineVersions action.

    Important: You can upgrade to a newer engine version (see Selecting a Cache Engine and Version), but you cannot downgrade to an earlier engine version. If you want to use an earlier engine version, you must delete the existing cache cluster or replication group and create it anew with the earlier engine version.

    ", + "CreateCacheClusterMessage$CacheParameterGroupName": "

    The name of the parameter group to associate with this cache cluster. If this argument is omitted, the default parameter group for the specified engine is used.

    ", + "CreateCacheClusterMessage$CacheSubnetGroupName": "

    The name of the subnet group to be used for the cache cluster.

    Use this parameter only when you are creating a cache cluster in an Amazon Virtual Private Cloud (VPC).

    ", + "CreateCacheClusterMessage$SnapshotName": "

    The name of a snapshot from which to restore data into the new node group. The snapshot status changes to restoring while the new node group is being created.

    This parameter is only valid if the Engine parameter is redis.

    ", + "CreateCacheClusterMessage$PreferredMaintenanceWindow": "

    Specifies the weekly time range during which maintenance on the cache cluster is performed. It is specified as a range in the format ddd:hh24:mi-ddd:hh24:mi (24H Clock UTC). The minimum maintenance window is a 60 minute period. Valid values for ddd are:

    • sun

    • mon

    • tue

    • wed

    • thu

    • fri

    • sat

    Example: sun:05:00-sun:09:00

    ", + "CreateCacheClusterMessage$NotificationTopicArn": "

    The Amazon Resource Name (ARN) of the Amazon Simple Notification Service (SNS) topic to which notifications will be sent.

    The Amazon SNS topic owner must be the same as the cache cluster owner.

    ", + "CreateCacheClusterMessage$SnapshotWindow": "

    The daily time range (in UTC) during which ElastiCache will begin taking a daily snapshot of your node group.

    Example: 05:00-09:00

    If you do not specify this parameter, then ElastiCache will automatically choose an appropriate time range.

    Note: This parameter is only valid if the Engine parameter is redis.

    ", + "CreateCacheParameterGroupMessage$CacheParameterGroupName": "

    A user-specified name for the cache parameter group.

    ", + "CreateCacheParameterGroupMessage$CacheParameterGroupFamily": "

    The name of the cache parameter group family the cache parameter group can be used with.

    Valid values are: memcached1.4 | redis2.6 | redis2.8

    ", + "CreateCacheParameterGroupMessage$Description": "

    A user-specified description for the cache parameter group.

    ", + "CreateCacheSecurityGroupMessage$CacheSecurityGroupName": "

    A name for the cache security group. This value is stored as a lowercase string.

    Constraints: Must contain no more than 255 alphanumeric characters. Cannot be the word \"Default\".

    Example: mysecuritygroup

    ", + "CreateCacheSecurityGroupMessage$Description": "

    A description for the cache security group.

    ", + "CreateCacheSubnetGroupMessage$CacheSubnetGroupName": "

    A name for the cache subnet group. This value is stored as a lowercase string.

    Constraints: Must contain no more than 255 alphanumeric characters or hyphens.

    Example: mysubnetgroup

    ", + "CreateCacheSubnetGroupMessage$CacheSubnetGroupDescription": "

    A description for the cache subnet group.

    ", + "CreateReplicationGroupMessage$ReplicationGroupId": "

    The replication group identifier. This parameter is stored as a lowercase string.

    Constraints:

    • A name must contain from 1 to 20 alphanumeric characters or hyphens.

    • The first character must be a letter.

    • A name cannot end with a hyphen or contain two consecutive hyphens.

    ", + "CreateReplicationGroupMessage$ReplicationGroupDescription": "

    A user-created description for the replication group.

    ", + "CreateReplicationGroupMessage$PrimaryClusterId": "

    The identifier of the cache cluster that will serve as the primary for this replication group. This cache cluster must already exist and have a status of available.

    This parameter is not required if NumCacheClusters is specified.

    ", + "CreateReplicationGroupMessage$CacheNodeType": "

    The compute and memory capacity of the nodes in the node group.

    Valid node types are as follows:

    • General purpose:

      • Current generation: cache.t2.micro, cache.t2.small, cache.t2.medium, cache.m3.medium, cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge

      • Previous generation: cache.t1.micro, cache.m1.small, cache.m1.medium, cache.m1.large, cache.m1.xlarge

    • Compute optimized: cache.c1.xlarge

    • Memory optimized:

      • Current generation: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, cache.r3.8xlarge

      • Previous generation: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge

    Notes:

    • All t2 instances are created in an Amazon Virtual Private Cloud (VPC).

    • Redis backup/restore is not supported for t2 instances.

    • Redis Append-only files (AOF) functionality is not supported for t1 or t2 instances.

    For a complete listing of cache node types and specifications, see Amazon ElastiCache Product Features and Details and Cache Node Type-Specific Parameters for Memcached or Cache Node Type-Specific Parameters for Redis.

    ", + "CreateReplicationGroupMessage$Engine": "

    The name of the cache engine to be used for the cache clusters in this replication group.

    Default: redis

    ", + "CreateReplicationGroupMessage$EngineVersion": "

    The version number of the cache engine to be used for the cache clusters in this replication group. To view the supported cache engine versions, use the DescribeCacheEngineVersions action.

    Important: You can upgrade to a newer engine version (see Selecting a Cache Engine and Version) in the ElastiCache User Guide, but you cannot downgrade to an earlier engine version. If you want to use an earlier engine version, you must delete the existing cache cluster or replication group and create it anew with the earlier engine version.

    ", + "CreateReplicationGroupMessage$CacheParameterGroupName": "

    The name of the parameter group to associate with this replication group. If this argument is omitted, the default cache parameter group for the specified engine is used.

    ", + "CreateReplicationGroupMessage$CacheSubnetGroupName": "

    The name of the cache subnet group to be used for the replication group.

    ", + "CreateReplicationGroupMessage$SnapshotName": "

    The name of a snapshot from which to restore data into the new node group. The snapshot status changes to restoring while the new node group is being created.

    This parameter is only valid if the Engine parameter is redis.

    ", + "CreateReplicationGroupMessage$PreferredMaintenanceWindow": "

    Specifies the weekly time range during which maintenance on the cache cluster is performed. It is specified as a range in the format ddd:hh24:mi-ddd:hh24:mi (24H Clock UTC). The minimum maintenance window is a 60 minute period. Valid values for ddd are:

    • sun

    • mon

    • tue

    • wed

    • thu

    • fri

    • sat

    Example: sun:05:00-sun:09:00

    ", + "CreateReplicationGroupMessage$NotificationTopicArn": "

    The Amazon Resource Name (ARN) of the Amazon Simple Notification Service (SNS) topic to which notifications will be sent.

    The Amazon SNS topic owner must be the same as the cache cluster owner.

    ", + "CreateReplicationGroupMessage$SnapshotWindow": "

    The daily time range (in UTC) during which ElastiCache will begin taking a daily snapshot of your node group.

    Example: 05:00-09:00

    If you do not specify this parameter, then ElastiCache will automatically choose an appropriate time range.

    This parameter is only valid if the Engine parameter is redis.

    ", + "CreateSnapshotMessage$CacheClusterId": "

    The identifier of an existing cache cluster. The snapshot will be created from this cache cluster.

    ", + "CreateSnapshotMessage$SnapshotName": "

    A name for the snapshot being created.

    ", + "DeleteCacheClusterMessage$CacheClusterId": "

    The cache cluster identifier for the cluster to be deleted. This parameter is not case sensitive.

    ", + "DeleteCacheClusterMessage$FinalSnapshotIdentifier": "

    The user-supplied name of a final cache cluster snapshot. This is the unique name that identifies the snapshot. ElastiCache creates the snapshot, and then deletes the cache cluster immediately afterward.

    ", + "DeleteCacheParameterGroupMessage$CacheParameterGroupName": "

    The name of the cache parameter group to delete.

    The specified cache security group must not be associated with any cache clusters.

    ", + "DeleteCacheSecurityGroupMessage$CacheSecurityGroupName": "

    The name of the cache security group to delete.

    You cannot delete the default security group.

    ", + "DeleteCacheSubnetGroupMessage$CacheSubnetGroupName": "

    The name of the cache subnet group to delete.

    Constraints: Must contain no more than 255 alphanumeric characters or hyphens.

    ", + "DeleteReplicationGroupMessage$ReplicationGroupId": "

    The identifier for the cluster to be deleted. This parameter is not case sensitive.

    ", + "DeleteReplicationGroupMessage$FinalSnapshotIdentifier": "

    The name of a final node group snapshot. ElastiCache creates the snapshot from the primary node in the cluster, rather than one of the replicas; this is to ensure that it captures the freshest data. After the final snapshot is taken, the cluster is immediately deleted.

    ", + "DeleteSnapshotMessage$SnapshotName": "

    The name of the snapshot to be deleted.

    ", + "DescribeCacheClustersMessage$CacheClusterId": "

    The user-supplied cluster identifier. If this parameter is specified, only information about that specific cache cluster is returned. This parameter isn't case sensitive.

    ", + "DescribeCacheClustersMessage$Marker": "

    An optional marker returned from a prior request. Use this marker for pagination of results from this action. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DescribeCacheEngineVersionsMessage$Engine": "

    The cache engine to return. Valid values: memcached | redis

    ", + "DescribeCacheEngineVersionsMessage$EngineVersion": "

    The cache engine version to return.

    Example: 1.4.14

    ", + "DescribeCacheEngineVersionsMessage$CacheParameterGroupFamily": "

    The name of a specific cache parameter group family to return details for.

    Constraints:

    • Must be 1 to 255 alphanumeric characters

    • First character must be a letter

    • Cannot end with a hyphen or contain two consecutive hyphens

    ", + "DescribeCacheEngineVersionsMessage$Marker": "

    An optional marker returned from a prior request. Use this marker for pagination of results from this action. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DescribeCacheParameterGroupsMessage$CacheParameterGroupName": "

    The name of a specific cache parameter group to return details for.

    ", + "DescribeCacheParameterGroupsMessage$Marker": "

    An optional marker returned from a prior request. Use this marker for pagination of results from this action. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DescribeCacheParametersMessage$CacheParameterGroupName": "

    The name of a specific cache parameter group to return details for.

    ", + "DescribeCacheParametersMessage$Source": "

    The parameter types to return.

    Valid values: user | system | engine-default

    ", + "DescribeCacheParametersMessage$Marker": "

    An optional marker returned from a prior request. Use this marker for pagination of results from this action. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DescribeCacheSecurityGroupsMessage$CacheSecurityGroupName": "

    The name of the cache security group to return details for.

    ", + "DescribeCacheSecurityGroupsMessage$Marker": "

    An optional marker returned from a prior request. Use this marker for pagination of results from this action. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DescribeCacheSubnetGroupsMessage$CacheSubnetGroupName": "

    The name of the cache subnet group to return details for.

    ", + "DescribeCacheSubnetGroupsMessage$Marker": "

    An optional marker returned from a prior request. Use this marker for pagination of results from this action. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DescribeEngineDefaultParametersMessage$CacheParameterGroupFamily": "

    The name of the cache parameter group family. Valid values are: memcached1.4 | redis2.6 | redis2.8

    ", + "DescribeEngineDefaultParametersMessage$Marker": "

    An optional marker returned from a prior request. Use this marker for pagination of results from this action. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DescribeEventsMessage$SourceIdentifier": "

    The identifier of the event source for which events will be returned. If not specified, then all sources are included in the response.

    ", + "DescribeEventsMessage$Marker": "

    An optional marker returned from a prior request. Use this marker for pagination of results from this action. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DescribeReplicationGroupsMessage$ReplicationGroupId": "

    The identifier for the replication group to be described. This parameter is not case sensitive.

    If you do not specify this parameter, information about all replication groups is returned.

    ", + "DescribeReplicationGroupsMessage$Marker": "

    An optional marker returned from a prior request. Use this marker for pagination of results from this action. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DescribeReservedCacheNodesMessage$ReservedCacheNodeId": "

    The reserved cache node identifier filter value. Use this parameter to show only the reservation that matches the specified reservation ID.

    ", + "DescribeReservedCacheNodesMessage$ReservedCacheNodesOfferingId": "

    The offering identifier filter value. Use this parameter to show only purchased reservations matching the specified offering identifier.

    ", + "DescribeReservedCacheNodesMessage$CacheNodeType": "

    The cache node type filter value. Use this parameter to show only those reservations matching the specified cache node type.

    Valid node types are as follows:

    • General purpose:

      • Current generation: cache.t2.micro, cache.t2.small, cache.t2.medium, cache.m3.medium, cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge

      • Previous generation: cache.t1.micro, cache.m1.small, cache.m1.medium, cache.m1.large, cache.m1.xlarge

    • Compute optimized: cache.c1.xlarge

    • Memory optimized:

      • Current generation: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, cache.r3.8xlarge

      • Previous generation: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge

    Notes:

    • All t2 instances are created in an Amazon Virtual Private Cloud (VPC).

    • Redis backup/restore is not supported for t2 instances.

    • Redis Append-only files (AOF) functionality is not supported for t1 or t2 instances.

    For a complete listing of cache node types and specifications, see Amazon ElastiCache Product Features and Details and Cache Node Type-Specific Parameters for Memcached or Cache Node Type-Specific Parameters for Redis.

    ", + "DescribeReservedCacheNodesMessage$Duration": "

    The duration filter value, specified in years or seconds. Use this parameter to show only reservations for this duration.

    Valid Values: 1 | 3 | 31536000 | 94608000

    ", + "DescribeReservedCacheNodesMessage$ProductDescription": "

    The product description filter value. Use this parameter to show only those reservations matching the specified product description.

    ", + "DescribeReservedCacheNodesMessage$OfferingType": "

    The offering type filter value. Use this parameter to show only the available offerings matching the specified offering type.

    Valid values: \"Light Utilization\"|\"Medium Utilization\"|\"Heavy Utilization\"

    ", + "DescribeReservedCacheNodesMessage$Marker": "

    An optional marker returned from a prior request. Use this marker for pagination of results from this action. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DescribeReservedCacheNodesOfferingsMessage$ReservedCacheNodesOfferingId": "

    The offering identifier filter value. Use this parameter to show only the available offering that matches the specified reservation identifier.

    Example: 438012d3-4052-4cc7-b2e3-8d3372e0e706

    ", + "DescribeReservedCacheNodesOfferingsMessage$CacheNodeType": "

    The cache node type filter value. Use this parameter to show only the available offerings matching the specified cache node type.

    Valid node types are as follows:

    • General purpose:

      • Current generation: cache.t2.micro, cache.t2.small, cache.t2.medium, cache.m3.medium, cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge

      • Previous generation: cache.t1.micro, cache.m1.small, cache.m1.medium, cache.m1.large, cache.m1.xlarge

    • Compute optimized: cache.c1.xlarge

    • Memory optimized:

      • Current generation: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, cache.r3.8xlarge

      • Previous generation: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge

    Notes:

    • All t2 instances are created in an Amazon Virtual Private Cloud (VPC).

    • Redis backup/restore is not supported for t2 instances.

    • Redis Append-only files (AOF) functionality is not supported for t1 or t2 instances.

    For a complete listing of cache node types and specifications, see Amazon ElastiCache Product Features and Details and Cache Node Type-Specific Parameters for Memcached or Cache Node Type-Specific Parameters for Redis.

    ", + "DescribeReservedCacheNodesOfferingsMessage$Duration": "

    Duration filter value, specified in years or seconds. Use this parameter to show only reservations for a given duration.

    Valid Values: 1 | 3 | 31536000 | 94608000

    ", + "DescribeReservedCacheNodesOfferingsMessage$ProductDescription": "

    The product description filter value. Use this parameter to show only the available offerings matching the specified product description.

    ", + "DescribeReservedCacheNodesOfferingsMessage$OfferingType": "

    The offering type filter value. Use this parameter to show only the available offerings matching the specified offering type.

    Valid Values: \"Light Utilization\"|\"Medium Utilization\"|\"Heavy Utilization\"

    ", + "DescribeReservedCacheNodesOfferingsMessage$Marker": "

    An optional marker returned from a prior request. Use this marker for pagination of results from this action. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DescribeSnapshotsListMessage$Marker": "

    An optional marker returned from a prior request. Use this marker for pagination of results from this action. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DescribeSnapshotsMessage$CacheClusterId": "

    A user-supplied cluster identifier. If this parameter is specified, only snapshots associated with that specific cache cluster will be described.

    ", + "DescribeSnapshotsMessage$SnapshotName": "

    A user-supplied name of the snapshot. If this parameter is specified, only this snapshot will be described.

    ", + "DescribeSnapshotsMessage$SnapshotSource": "

    If set to system, the output shows snapshots that were automatically created by ElastiCache. If set to user the output shows snapshots that were manually created. If omitted, the output shows both automatically and manually created snapshots.

    ", + "DescribeSnapshotsMessage$Marker": "

    An optional marker returned from a prior request. Use this marker for pagination of results from this action. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "EC2SecurityGroup$Status": "

    The status of the Amazon EC2 security group.

    ", + "EC2SecurityGroup$EC2SecurityGroupName": "

    The name of the Amazon EC2 security group.

    ", + "EC2SecurityGroup$EC2SecurityGroupOwnerId": "

    The AWS account ID of the Amazon EC2 security group owner.

    ", + "Endpoint$Address": "

    The DNS hostname of the cache node.

    ", + "EngineDefaults$CacheParameterGroupFamily": "

    Specifies the name of the cache parameter group family to which the engine default parameters apply.

    ", + "EngineDefaults$Marker": "

    Provides an identifier to allow retrieval of paginated results.

    ", + "Event$SourceIdentifier": "

    The identifier for the source of the event. For example, if the event occurred at the cache cluster level, the identifier would be the name of the cache cluster.

    ", + "Event$Message": "

    The text of the event.

    ", + "EventsMessage$Marker": "

    Provides an identifier to allow retrieval of paginated results.

    ", + "KeyList$member": null, + "ListAllowedNodeTypeModificationsMessage$CacheClusterId": "

    The name of the cache cluster you want to scale up to a larger node instanced type. ElastiCache uses the cluster id to identify the current node type of this cluster and from that to to create a list of node types you can scale up to.

    You must provide a value for either the CacheClusterId or the ReplicationGroupId.

    ", + "ListAllowedNodeTypeModificationsMessage$ReplicationGroupId": "

    The name of the replication group want to scale up to a larger node type. ElastiCache uses the replication group id to identify the current node type being used by this replication group, and from that to create a list of node types you can scale up to.

    You must provide a value for either the CacheClusterId or the ReplicationGroupId.

    ", + "ListTagsForResourceMessage$ResourceName": "

    The Amazon Resource Name (ARN) of the resource for which you want the list of tags, for example arn:aws:elasticache:us-west-2:0123456789:cluster:myCluster or arn:aws:elasticache:us-west-2:0123456789:snapshot:mySnapshot.

    For more information on ARNs, go to Amazon Resource Names (ARNs) and AWS Service Namespaces.

    ", + "ModifyCacheClusterMessage$CacheClusterId": "

    The cache cluster identifier. This value is stored as a lowercase string.

    ", + "ModifyCacheClusterMessage$PreferredMaintenanceWindow": "

    Specifies the weekly time range during which maintenance on the cache cluster is performed. It is specified as a range in the format ddd:hh24:mi-ddd:hh24:mi (24H Clock UTC). The minimum maintenance window is a 60 minute period. Valid values for ddd are:

    • sun

    • mon

    • tue

    • wed

    • thu

    • fri

    • sat

    Example: sun:05:00-sun:09:00

    ", + "ModifyCacheClusterMessage$NotificationTopicArn": "

    The Amazon Resource Name (ARN) of the Amazon SNS topic to which notifications will be sent.

    The Amazon SNS topic owner must be same as the cache cluster owner.

    ", + "ModifyCacheClusterMessage$CacheParameterGroupName": "

    The name of the cache parameter group to apply to this cache cluster. This change is asynchronously applied as soon as possible for parameters when the ApplyImmediately parameter is specified as true for this request.

    ", + "ModifyCacheClusterMessage$NotificationTopicStatus": "

    The status of the Amazon SNS notification topic. Notifications are sent only if the status is active.

    Valid values: active | inactive

    ", + "ModifyCacheClusterMessage$EngineVersion": "

    The upgraded version of the cache engine to be run on the cache nodes.

    Important: You can upgrade to a newer engine version (see Selecting a Cache Engine and Version), but you cannot downgrade to an earlier engine version. If you want to use an earlier engine version, you must delete the existing cache cluster and create it anew with the earlier engine version.

    ", + "ModifyCacheClusterMessage$SnapshotWindow": "

    The daily time range (in UTC) during which ElastiCache will begin taking a daily snapshot of your cache cluster.

    ", + "ModifyCacheClusterMessage$CacheNodeType": "

    A valid cache node type that you want to scale this cache cluster to. The value of this parameter must be one of the ScaleUpModifications values returned by the ListAllowedCacheNodeTypeModification action.

    ", + "ModifyCacheParameterGroupMessage$CacheParameterGroupName": "

    The name of the cache parameter group to modify.

    ", + "ModifyCacheSubnetGroupMessage$CacheSubnetGroupName": "

    The name for the cache subnet group. This value is stored as a lowercase string.

    Constraints: Must contain no more than 255 alphanumeric characters or hyphens.

    Example: mysubnetgroup

    ", + "ModifyCacheSubnetGroupMessage$CacheSubnetGroupDescription": "

    A description for the cache subnet group.

    ", + "ModifyReplicationGroupMessage$ReplicationGroupId": "

    The identifier of the replication group to modify.

    ", + "ModifyReplicationGroupMessage$ReplicationGroupDescription": "

    A description for the replication group. Maximum length is 255 characters.

    ", + "ModifyReplicationGroupMessage$PrimaryClusterId": "

    If this parameter is specified, ElastiCache will promote the specified cluster in the specified replication group to the primary role. The nodes of all other clusters in the replication group will be read replicas.

    ", + "ModifyReplicationGroupMessage$SnapshottingClusterId": "

    The cache cluster ID that will be used as the daily snapshot source for the replication group.

    ", + "ModifyReplicationGroupMessage$PreferredMaintenanceWindow": "

    Specifies the weekly time range during which maintenance on the cache cluster is performed. It is specified as a range in the format ddd:hh24:mi-ddd:hh24:mi (24H Clock UTC). The minimum maintenance window is a 60 minute period. Valid values for ddd are:

    • sun

    • mon

    • tue

    • wed

    • thu

    • fri

    • sat

    Example: sun:05:00-sun:09:00

    ", + "ModifyReplicationGroupMessage$NotificationTopicArn": "

    The Amazon Resource Name (ARN) of the Amazon SNS topic to which notifications will be sent.

    The Amazon SNS topic owner must be same as the replication group owner.

    ", + "ModifyReplicationGroupMessage$CacheParameterGroupName": "

    The name of the cache parameter group to apply to all of the clusters in this replication group. This change is asynchronously applied as soon as possible for parameters when the ApplyImmediately parameter is specified as true for this request.

    ", + "ModifyReplicationGroupMessage$NotificationTopicStatus": "

    The status of the Amazon SNS notification topic for the replication group. Notifications are sent only if the status is active.

    Valid values: active | inactive

    ", + "ModifyReplicationGroupMessage$EngineVersion": "

    The upgraded version of the cache engine to be run on the cache clusters in the replication group.

    Important: You can upgrade to a newer engine version (see Selecting a Cache Engine and Version), but you cannot downgrade to an earlier engine version. If you want to use an earlier engine version, you must delete the existing replication group and create it anew with the earlier engine version.

    ", + "ModifyReplicationGroupMessage$SnapshotWindow": "

    The daily time range (in UTC) during which ElastiCache will begin taking a daily snapshot of the node group specified by SnapshottingClusterId.

    Example: 05:00-09:00

    If you do not specify this parameter, then ElastiCache will automatically choose an appropriate time range.

    ", + "ModifyReplicationGroupMessage$CacheNodeType": "

    A valid cache node type that you want to scale this replication group to. The value of this parameter must be one of the ScaleUpModifications values returned by the ListAllowedCacheNodeTypeModification action.

    ", + "NodeGroup$NodeGroupId": "

    The identifier for the node group. A replication group contains only one node group; therefore, the node group ID is 0001.

    ", + "NodeGroup$Status": "

    The current state of this replication group - creating, available, etc.

    ", + "NodeGroupMember$CacheClusterId": "

    The ID of the cache cluster to which the node belongs.

    ", + "NodeGroupMember$CacheNodeId": "

    The ID of the node within its cache cluster. A node ID is a numeric identifier (0001, 0002, etc.).

    ", + "NodeGroupMember$PreferredAvailabilityZone": "

    The name of the Availability Zone in which the node is located.

    ", + "NodeGroupMember$CurrentRole": "

    The role that is currently assigned to the node - primary or replica.

    ", + "NodeSnapshot$CacheNodeId": "

    The cache node identifier for the node in the source cache cluster.

    ", + "NodeSnapshot$CacheSize": "

    The size of the cache on the source cache node.

    ", + "NodeTypeList$member": null, + "NotificationConfiguration$TopicArn": "

    The Amazon Resource Name (ARN) that identifies the topic.

    ", + "NotificationConfiguration$TopicStatus": "

    The current state of the topic.

    ", + "Parameter$ParameterName": "

    The name of the parameter.

    ", + "Parameter$ParameterValue": "

    The value of the parameter.

    ", + "Parameter$Description": "

    A description of the parameter.

    ", + "Parameter$Source": "

    The source of the parameter.

    ", + "Parameter$DataType": "

    The valid data type for the parameter.

    ", + "Parameter$AllowedValues": "

    The valid range of values for the parameter.

    ", + "Parameter$MinimumEngineVersion": "

    The earliest cache engine version to which the parameter can apply.

    ", + "ParameterNameValue$ParameterName": "

    The name of the parameter.

    ", + "ParameterNameValue$ParameterValue": "

    The value of the parameter.

    ", + "PendingModifiedValues$EngineVersion": "

    The new cache engine version that the cache cluster will run.

    ", + "PendingModifiedValues$CacheNodeType": "

    The cache node type that this cache cluster or replication group will be scaled to.

    ", + "PreferredAvailabilityZoneList$member": null, + "PurchaseReservedCacheNodesOfferingMessage$ReservedCacheNodesOfferingId": "

    The ID of the reserved cache node offering to purchase.

    Example: 438012d3-4052-4cc7-b2e3-8d3372e0e706

    ", + "PurchaseReservedCacheNodesOfferingMessage$ReservedCacheNodeId": "

    A customer-specified identifier to track this reservation.

    The Reserved Cache Node ID is an unique customer-specified identifier to track this reservation. If this parameter is not specified, ElastiCache automatically generates an identifier for the reservation.

    Example: myreservationID

    ", + "RebootCacheClusterMessage$CacheClusterId": "

    The cache cluster identifier. This parameter is stored as a lowercase string.

    ", + "RecurringCharge$RecurringChargeFrequency": "

    The frequency of the recurring charge.

    ", + "RemoveTagsFromResourceMessage$ResourceName": "

    The Amazon Resource Name (ARN) of the resource from which you want the tags removed, for example arn:aws:elasticache:us-west-2:0123456789:cluster:myCluster or arn:aws:elasticache:us-west-2:0123456789:snapshot:mySnapshot.

    For more information on ARNs, go to Amazon Resource Names (ARNs) and AWS Service Namespaces.

    ", + "ReplicationGroup$ReplicationGroupId": "

    The identifier for the replication group.

    ", + "ReplicationGroup$Description": "

    The description of the replication group.

    ", + "ReplicationGroup$Status": "

    The current state of this replication group - creating, available, etc.

    ", + "ReplicationGroup$SnapshottingClusterId": "

    The cache cluster ID that is used as the daily snapshot source for the replication group.

    ", + "ReplicationGroupMessage$Marker": "

    Provides an identifier to allow retrieval of paginated results.

    ", + "ReplicationGroupPendingModifiedValues$PrimaryClusterId": "

    The primary cluster ID which will be applied immediately (if --apply-immediately was specified), or during the next maintenance window.

    ", + "ReservedCacheNode$ReservedCacheNodeId": "

    The unique identifier for the reservation.

    ", + "ReservedCacheNode$ReservedCacheNodesOfferingId": "

    The offering identifier.

    ", + "ReservedCacheNode$CacheNodeType": "

    The cache node type for the reserved cache nodes.

    Valid node types are as follows:

    • General purpose:

      • Current generation: cache.t2.micro, cache.t2.small, cache.t2.medium, cache.m3.medium, cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge

      • Previous generation: cache.t1.micro, cache.m1.small, cache.m1.medium, cache.m1.large, cache.m1.xlarge

    • Compute optimized: cache.c1.xlarge

    • Memory optimized:

      • Current generation: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, cache.r3.8xlarge

      • Previous generation: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge

    Notes:

    • All t2 instances are created in an Amazon Virtual Private Cloud (VPC).

    • Redis backup/restore is not supported for t2 instances.

    • Redis Append-only files (AOF) functionality is not supported for t1 or t2 instances.

    For a complete listing of cache node types and specifications, see Amazon ElastiCache Product Features and Details and Cache Node Type-Specific Parameters for Memcached or Cache Node Type-Specific Parameters for Redis.

    ", + "ReservedCacheNode$ProductDescription": "

    The description of the reserved cache node.

    ", + "ReservedCacheNode$OfferingType": "

    The offering type of this reserved cache node.

    ", + "ReservedCacheNode$State": "

    The state of the reserved cache node.

    ", + "ReservedCacheNodeMessage$Marker": "

    Provides an identifier to allow retrieval of paginated results.

    ", + "ReservedCacheNodesOffering$ReservedCacheNodesOfferingId": "

    A unique identifier for the reserved cache node offering.

    ", + "ReservedCacheNodesOffering$CacheNodeType": "

    The cache node type for the reserved cache node.

    Valid node types are as follows:

    • General purpose:

      • Current generation: cache.t2.micro, cache.t2.small, cache.t2.medium, cache.m3.medium, cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge

      • Previous generation: cache.t1.micro, cache.m1.small, cache.m1.medium, cache.m1.large, cache.m1.xlarge

    • Compute optimized: cache.c1.xlarge

    • Memory optimized:

      • Current generation: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, cache.r3.8xlarge

      • Previous generation: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge

    Notes:

    • All t2 instances are created in an Amazon Virtual Private Cloud (VPC).

    • Redis backup/restore is not supported for t2 instances.

    • Redis Append-only files (AOF) functionality is not supported for t1 or t2 instances.

    For a complete listing of cache node types and specifications, see Amazon ElastiCache Product Features and Details and Cache Node Type-Specific Parameters for Memcached or Cache Node Type-Specific Parameters for Redis.

    ", + "ReservedCacheNodesOffering$ProductDescription": "

    The cache engine used by the offering.

    ", + "ReservedCacheNodesOffering$OfferingType": "

    The offering type.

    ", + "ReservedCacheNodesOfferingMessage$Marker": "

    Provides an identifier to allow retrieval of paginated results.

    ", + "ResetCacheParameterGroupMessage$CacheParameterGroupName": "

    The name of the cache parameter group to reset.

    ", + "RevokeCacheSecurityGroupIngressMessage$CacheSecurityGroupName": "

    The name of the cache security group to revoke ingress from.

    ", + "RevokeCacheSecurityGroupIngressMessage$EC2SecurityGroupName": "

    The name of the Amazon EC2 security group to revoke access from.

    ", + "RevokeCacheSecurityGroupIngressMessage$EC2SecurityGroupOwnerId": "

    The AWS account number of the Amazon EC2 security group owner. Note that this is not the same thing as an AWS access key ID - you must provide a valid AWS account number for this parameter.

    ", + "SecurityGroupIdsList$member": null, + "SecurityGroupMembership$SecurityGroupId": "

    The identifier of the cache security group.

    ", + "SecurityGroupMembership$Status": "

    The status of the cache security group membership. The status changes whenever a cache security group is modified, or when the cache security groups assigned to a cache cluster are modified.

    ", + "Snapshot$SnapshotName": "

    The name of a snapshot. For an automatic snapshot, the name is system-generated; for a manual snapshot, this is the user-provided name.

    ", + "Snapshot$CacheClusterId": "

    The user-supplied identifier of the source cache cluster.

    ", + "Snapshot$SnapshotStatus": "

    The status of the snapshot. Valid values: creating | available | restoring | copying | deleting.

    ", + "Snapshot$SnapshotSource": "

    Indicates whether the snapshot is from an automatic backup (automated) or was created manually (manual).

    ", + "Snapshot$CacheNodeType": "

    The name of the compute and memory capacity node type for the source cache cluster.

    Valid node types are as follows:

    • General purpose:

      • Current generation: cache.t2.micro, cache.t2.small, cache.t2.medium, cache.m3.medium, cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge

      • Previous generation: cache.t1.micro, cache.m1.small, cache.m1.medium, cache.m1.large, cache.m1.xlarge

    • Compute optimized: cache.c1.xlarge

    • Memory optimized:

      • Current generation: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, cache.r3.8xlarge

      • Previous generation: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge

    Notes:

    • All t2 instances are created in an Amazon Virtual Private Cloud (VPC).

    • Redis backup/restore is not supported for t2 instances.

    • Redis Append-only files (AOF) functionality is not supported for t1 or t2 instances.

    For a complete listing of cache node types and specifications, see Amazon ElastiCache Product Features and Details and Cache Node Type-Specific Parameters for Memcached or Cache Node Type-Specific Parameters for Redis.

    ", + "Snapshot$Engine": "

    The name of the cache engine (memcached or redis) used by the source cache cluster.

    ", + "Snapshot$EngineVersion": "

    The version of the cache engine version that is used by the source cache cluster.

    ", + "Snapshot$PreferredAvailabilityZone": "

    The name of the Availability Zone in which the source cache cluster is located.

    ", + "Snapshot$PreferredMaintenanceWindow": "

    Specifies the weekly time range during which maintenance on the cache cluster is performed. It is specified as a range in the format ddd:hh24:mi-ddd:hh24:mi (24H Clock UTC). The minimum maintenance window is a 60 minute period. Valid values for ddd are:

    • sun

    • mon

    • tue

    • wed

    • thu

    • fri

    • sat

    Example: sun:05:00-sun:09:00

    ", + "Snapshot$TopicArn": "

    The Amazon Resource Name (ARN) for the topic used by the source cache cluster for publishing notifications.

    ", + "Snapshot$CacheParameterGroupName": "

    The cache parameter group that is associated with the source cache cluster.

    ", + "Snapshot$CacheSubnetGroupName": "

    The name of the cache subnet group associated with the source cache cluster.

    ", + "Snapshot$VpcId": "

    The Amazon Virtual Private Cloud identifier (VPC ID) of the cache subnet group for the source cache cluster.

    ", + "Snapshot$SnapshotWindow": "

    The daily time range during which ElastiCache takes daily snapshots of the source cache cluster.

    ", + "SnapshotArnsList$member": null, + "Subnet$SubnetIdentifier": "

    The unique identifier for the subnet.

    ", + "SubnetIdentifierList$member": null, + "Tag$Key": "

    The key for the tag.

    ", + "Tag$Value": "

    The tag's value. May not be null.

    " + } + }, + "Subnet": { + "base": "

    Represents the subnet associated with a cache cluster. This parameter refers to subnets defined in Amazon Virtual Private Cloud (Amazon VPC) and used with ElastiCache.

    ", + "refs": { + "SubnetList$member": null + } + }, + "SubnetIdentifierList": { + "base": null, + "refs": { + "CreateCacheSubnetGroupMessage$SubnetIds": "

    A list of VPC subnet IDs for the cache subnet group.

    ", + "ModifyCacheSubnetGroupMessage$SubnetIds": "

    The EC2 subnet IDs for the cache subnet group.

    " + } + }, + "SubnetInUse": { + "base": "

    The requested subnet is being used by another cache subnet group.

    ", + "refs": { + } + }, + "SubnetList": { + "base": null, + "refs": { + "CacheSubnetGroup$Subnets": "

    A list of subnets associated with the cache subnet group.

    " + } + }, + "TStamp": { + "base": null, + "refs": { + "CacheCluster$CacheClusterCreateTime": "

    The date and time when the cache cluster was created.

    ", + "CacheNode$CacheNodeCreateTime": "

    The date and time when the cache node was created.

    ", + "DescribeEventsMessage$StartTime": "

    The beginning of the time interval to retrieve events for, specified in ISO 8601 format.

    ", + "DescribeEventsMessage$EndTime": "

    The end of the time interval for which to retrieve events, specified in ISO 8601 format.

    ", + "Event$Date": "

    The date and time when the event occurred.

    ", + "NodeSnapshot$CacheNodeCreateTime": "

    The date and time when the cache node was created in the source cache cluster.

    ", + "NodeSnapshot$SnapshotCreateTime": "

    The date and time when the source node's metadata and cache data set was obtained for the snapshot.

    ", + "ReservedCacheNode$StartTime": "

    The time the reservation started.

    ", + "Snapshot$CacheClusterCreateTime": "

    The date and time when the source cache cluster was created.

    " + } + }, + "Tag": { + "base": "

    A cost allocation Tag that can be added to an ElastiCache cluster or replication group. Tags are composed of a Key/Value pair. A tag with a null Value is permitted.

    ", + "refs": { + "TagList$member": null + } + }, + "TagList": { + "base": null, + "refs": { + "AddTagsToResourceMessage$Tags": "

    A list of cost allocation tags to be added to this resource. A tag is a key-value pair. A tag key must be accompanied by a tag value.

    ", + "CreateCacheClusterMessage$Tags": "

    A list of cost allocation tags to be added to this resource. A tag is a key-value pair. A tag key must be accompanied by a tag value.

    ", + "CreateReplicationGroupMessage$Tags": "

    A list of cost allocation tags to be added to this resource. A tag is a key-value pair. A tag key must be accompanied by a tag value.

    ", + "TagListMessage$TagList": "

    A list of cost allocation tags as key-value pairs.

    " + } + }, + "TagListMessage": { + "base": "

    Represents the output from the AddTagsToResource, ListTagsOnResource, and RemoveTagsFromResource actions.

    ", + "refs": { + } + }, + "TagNotFoundFault": { + "base": "

    The requested tag was not found on this resource.

    ", + "refs": { + } + }, + "TagQuotaPerResourceExceeded": { + "base": "

    The request cannot be processed because it would cause the resource to have more than the allowed number of tags. The maximum number of tags permitted on a resource is 10.

    ", + "refs": { + } + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/elasticache/2015-02-02/examples-1.json b/vendor/github.com/aws/aws-sdk-go/models/apis/elasticache/2015-02-02/examples-1.json new file mode 100644 index 000000000..0ea7e3b0b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/elasticache/2015-02-02/examples-1.json @@ -0,0 +1,5 @@ +{ + "version": "1.0", + "examples": { + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/elasticache/2015-02-02/paginators-1.json b/vendor/github.com/aws/aws-sdk-go/models/apis/elasticache/2015-02-02/paginators-1.json new file mode 100644 index 000000000..8724740d6 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/elasticache/2015-02-02/paginators-1.json @@ -0,0 +1,76 @@ +{ + "pagination": { + "DescribeCacheClusters": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords", + "result_key": "CacheClusters" + }, + "DescribeCacheEngineVersions": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords", + "result_key": "CacheEngineVersions" + }, + "DescribeCacheParameterGroups": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords", + "result_key": "CacheParameterGroups" + }, + "DescribeCacheParameters": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords", + "result_key": "Parameters" + }, + "DescribeCacheSecurityGroups": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords", + "result_key": "CacheSecurityGroups" + }, + "DescribeCacheSubnetGroups": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords", + "result_key": "CacheSubnetGroups" + }, + "DescribeEngineDefaultParameters": { + "input_token": "Marker", + "output_token": "EngineDefaults.Marker", + "limit_key": "MaxRecords", + "result_key": "EngineDefaults.Parameters" + }, + "DescribeEvents": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords", + "result_key": "Events" + }, + "DescribeReservedCacheNodes": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords", + "result_key": "ReservedCacheNodes" + }, + "DescribeReservedCacheNodesOfferings": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords", + "result_key": "ReservedCacheNodesOfferings" + }, + "DescribeReplicationGroups": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords", + "result_key": "ReplicationGroups" + }, + "DescribeSnapshots": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords", + "result_key": "Snapshots" + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/elasticache/2015-02-02/waiters-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/elasticache/2015-02-02/waiters-2.json new file mode 100644 index 000000000..c177d7b91 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/elasticache/2015-02-02/waiters-2.json @@ -0,0 +1,143 @@ +{ + "version":2, + "waiters":{ + "CacheClusterAvailable":{ + "acceptors":[ + { + "argument":"CacheClusters[].CacheClusterStatus", + "expected":"available", + "matcher":"pathAll", + "state":"success" + }, + { + "argument":"CacheClusters[].CacheClusterStatus", + "expected":"deleted", + "matcher":"pathAny", + "state":"failure" + }, + { + "argument":"CacheClusters[].CacheClusterStatus", + "expected":"deleting", + "matcher":"pathAny", + "state":"failure" + }, + { + "argument":"CacheClusters[].CacheClusterStatus", + "expected":"incompatible-network", + "matcher":"pathAny", + "state":"failure" + }, + { + "argument":"CacheClusters[].CacheClusterStatus", + "expected":"restore-failed", + "matcher":"pathAny", + "state":"failure" + } + ], + "delay":15, + "description":"Wait until ElastiCache cluster is available.", + "maxAttempts":40, + "operation":"DescribeCacheClusters" + }, + "CacheClusterDeleted":{ + "acceptors":[ + { + "argument":"CacheClusters[].CacheClusterStatus", + "expected":"deleted", + "matcher":"pathAll", + "state":"success" + }, + { + "expected":"CacheClusterNotFound", + "matcher":"error", + "state":"success" + }, + { + "argument":"CacheClusters[].CacheClusterStatus", + "expected":"available", + "matcher":"pathAny", + "state":"failure" + }, + { + "argument":"CacheClusters[].CacheClusterStatus", + "expected":"creating", + "matcher":"pathAny", + "state":"failure" + }, + { + "argument":"CacheClusters[].CacheClusterStatus", + "expected":"incompatible-network", + "matcher":"pathAny", + "state":"failure" + }, + { + "argument":"CacheClusters[].CacheClusterStatus", + "expected":"modifying", + "matcher":"pathAny", + "state":"failure" + }, + { + "argument":"CacheClusters[].CacheClusterStatus", + "expected":"restore-failed", + "matcher":"pathAny", + "state":"failure" + }, + { + "argument":"CacheClusters[].CacheClusterStatus", + "expected":"snapshotting", + "matcher":"pathAny", + "state":"failure" + } + ], + "delay":15, + "description":"Wait until ElastiCache cluster is deleted.", + "maxAttempts":40, + "operation":"DescribeCacheClusters" + }, + "ReplicationGroupAvailable":{ + "acceptors":[ + { + "argument":"ReplicationGroups[].Status", + "expected":"available", + "matcher":"pathAll", + "state":"success" + }, + { + "argument":"ReplicationGroups[].Status", + "expected":"deleted", + "matcher":"pathAny", + "state":"failure" + } + ], + "delay":15, + "description":"Wait until ElastiCache replication group is available.", + "maxAttempts":40, + "operation":"DescribeReplicationGroups" + }, + "ReplicationGroupDeleted":{ + "acceptors":[ + { + "argument":"ReplicationGroups[].Status", + "expected":"deleted", + "matcher":"pathAll", + "state":"success" + }, + { + "argument":"ReplicationGroups[].Status", + "expected":"available", + "matcher":"pathAny", + "state":"failure" + }, + { + "expected":"ReplicationGroupNotFoundFault", + "matcher":"error", + "state":"success" + } + ], + "delay":15, + "description":"Wait until ElastiCache replication group is deleted.", + "maxAttempts":40, + "operation":"DescribeReplicationGroups" + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/elasticbeanstalk/2010-12-01/api-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/elasticbeanstalk/2010-12-01/api-2.json new file mode 100644 index 000000000..4a7d2e705 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/elasticbeanstalk/2010-12-01/api-2.json @@ -0,0 +1,1894 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2010-12-01", + "endpointPrefix":"elasticbeanstalk", + "protocol":"query", + "serviceAbbreviation":"Elastic Beanstalk", + "serviceFullName":"AWS Elastic Beanstalk", + "signatureVersion":"v4", + "xmlNamespace":"http://elasticbeanstalk.amazonaws.com/docs/2010-12-01/" + }, + "operations":{ + "AbortEnvironmentUpdate":{ + "name":"AbortEnvironmentUpdate", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AbortEnvironmentUpdateMessage"}, + "errors":[ + {"shape":"InsufficientPrivilegesException"} + ] + }, + "ApplyEnvironmentManagedAction":{ + "name":"ApplyEnvironmentManagedAction", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ApplyEnvironmentManagedActionRequest"}, + "output":{ + "shape":"ApplyEnvironmentManagedActionResult", + "resultWrapper":"ApplyEnvironmentManagedActionResult" + }, + "errors":[ + {"shape":"ElasticBeanstalkServiceException"}, + {"shape":"ManagedActionInvalidStateException"} + ] + }, + "CheckDNSAvailability":{ + "name":"CheckDNSAvailability", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CheckDNSAvailabilityMessage"}, + "output":{ + "shape":"CheckDNSAvailabilityResultMessage", + "resultWrapper":"CheckDNSAvailabilityResult" + } + }, + "ComposeEnvironments":{ + "name":"ComposeEnvironments", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ComposeEnvironmentsMessage"}, + "output":{ + "shape":"EnvironmentDescriptionsMessage", + "resultWrapper":"ComposeEnvironmentsResult" + }, + "errors":[ + {"shape":"TooManyEnvironmentsException"}, + {"shape":"InsufficientPrivilegesException"} + ] + }, + "CreateApplication":{ + "name":"CreateApplication", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateApplicationMessage"}, + "output":{ + "shape":"ApplicationDescriptionMessage", + "resultWrapper":"CreateApplicationResult" + }, + "errors":[ + {"shape":"TooManyApplicationsException"} + ] + }, + "CreateApplicationVersion":{ + "name":"CreateApplicationVersion", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateApplicationVersionMessage"}, + "output":{ + "shape":"ApplicationVersionDescriptionMessage", + "resultWrapper":"CreateApplicationVersionResult" + }, + "errors":[ + {"shape":"TooManyApplicationsException"}, + {"shape":"TooManyApplicationVersionsException"}, + {"shape":"InsufficientPrivilegesException"}, + {"shape":"S3LocationNotInServiceRegionException"} + ] + }, + "CreateConfigurationTemplate":{ + "name":"CreateConfigurationTemplate", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateConfigurationTemplateMessage"}, + "output":{ + "shape":"ConfigurationSettingsDescription", + "resultWrapper":"CreateConfigurationTemplateResult" + }, + "errors":[ + {"shape":"InsufficientPrivilegesException"}, + {"shape":"TooManyBucketsException"}, + {"shape":"TooManyConfigurationTemplatesException"} + ] + }, + "CreateEnvironment":{ + "name":"CreateEnvironment", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateEnvironmentMessage"}, + "output":{ + "shape":"EnvironmentDescription", + "resultWrapper":"CreateEnvironmentResult" + }, + "errors":[ + {"shape":"TooManyEnvironmentsException"}, + {"shape":"InsufficientPrivilegesException"} + ] + }, + "CreateStorageLocation":{ + "name":"CreateStorageLocation", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "output":{ + "shape":"CreateStorageLocationResultMessage", + "resultWrapper":"CreateStorageLocationResult" + }, + "errors":[ + {"shape":"TooManyBucketsException"}, + {"shape":"S3SubscriptionRequiredException"}, + {"shape":"InsufficientPrivilegesException"} + ] + }, + "DeleteApplication":{ + "name":"DeleteApplication", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteApplicationMessage"}, + "errors":[ + {"shape":"OperationInProgressException"} + ] + }, + "DeleteApplicationVersion":{ + "name":"DeleteApplicationVersion", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteApplicationVersionMessage"}, + "errors":[ + {"shape":"SourceBundleDeletionException"}, + {"shape":"InsufficientPrivilegesException"}, + {"shape":"OperationInProgressException"}, + {"shape":"S3LocationNotInServiceRegionException"} + ] + }, + "DeleteConfigurationTemplate":{ + "name":"DeleteConfigurationTemplate", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteConfigurationTemplateMessage"}, + "errors":[ + {"shape":"OperationInProgressException"} + ] + }, + "DeleteEnvironmentConfiguration":{ + "name":"DeleteEnvironmentConfiguration", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteEnvironmentConfigurationMessage"} + }, + "DescribeApplicationVersions":{ + "name":"DescribeApplicationVersions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeApplicationVersionsMessage"}, + "output":{ + "shape":"ApplicationVersionDescriptionsMessage", + "resultWrapper":"DescribeApplicationVersionsResult" + } + }, + "DescribeApplications":{ + "name":"DescribeApplications", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeApplicationsMessage"}, + "output":{ + "shape":"ApplicationDescriptionsMessage", + "resultWrapper":"DescribeApplicationsResult" + } + }, + "DescribeConfigurationOptions":{ + "name":"DescribeConfigurationOptions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeConfigurationOptionsMessage"}, + "output":{ + "shape":"ConfigurationOptionsDescription", + "resultWrapper":"DescribeConfigurationOptionsResult" + }, + "errors":[ + {"shape":"TooManyBucketsException"} + ] + }, + "DescribeConfigurationSettings":{ + "name":"DescribeConfigurationSettings", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeConfigurationSettingsMessage"}, + "output":{ + "shape":"ConfigurationSettingsDescriptions", + "resultWrapper":"DescribeConfigurationSettingsResult" + }, + "errors":[ + {"shape":"TooManyBucketsException"} + ] + }, + "DescribeEnvironmentHealth":{ + "name":"DescribeEnvironmentHealth", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeEnvironmentHealthRequest"}, + "output":{ + "shape":"DescribeEnvironmentHealthResult", + "resultWrapper":"DescribeEnvironmentHealthResult" + }, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"ElasticBeanstalkServiceException"} + ] + }, + "DescribeEnvironmentManagedActionHistory":{ + "name":"DescribeEnvironmentManagedActionHistory", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeEnvironmentManagedActionHistoryRequest"}, + "output":{ + "shape":"DescribeEnvironmentManagedActionHistoryResult", + "resultWrapper":"DescribeEnvironmentManagedActionHistoryResult" + }, + "errors":[ + {"shape":"ElasticBeanstalkServiceException"} + ] + }, + "DescribeEnvironmentManagedActions":{ + "name":"DescribeEnvironmentManagedActions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeEnvironmentManagedActionsRequest"}, + "output":{ + "shape":"DescribeEnvironmentManagedActionsResult", + "resultWrapper":"DescribeEnvironmentManagedActionsResult" + }, + "errors":[ + {"shape":"ElasticBeanstalkServiceException"} + ] + }, + "DescribeEnvironmentResources":{ + "name":"DescribeEnvironmentResources", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeEnvironmentResourcesMessage"}, + "output":{ + "shape":"EnvironmentResourceDescriptionsMessage", + "resultWrapper":"DescribeEnvironmentResourcesResult" + }, + "errors":[ + {"shape":"InsufficientPrivilegesException"} + ] + }, + "DescribeEnvironments":{ + "name":"DescribeEnvironments", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeEnvironmentsMessage"}, + "output":{ + "shape":"EnvironmentDescriptionsMessage", + "resultWrapper":"DescribeEnvironmentsResult" + } + }, + "DescribeEvents":{ + "name":"DescribeEvents", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeEventsMessage"}, + "output":{ + "shape":"EventDescriptionsMessage", + "resultWrapper":"DescribeEventsResult" + } + }, + "DescribeInstancesHealth":{ + "name":"DescribeInstancesHealth", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeInstancesHealthRequest"}, + "output":{ + "shape":"DescribeInstancesHealthResult", + "resultWrapper":"DescribeInstancesHealthResult" + }, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"ElasticBeanstalkServiceException"} + ] + }, + "ListAvailableSolutionStacks":{ + "name":"ListAvailableSolutionStacks", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "output":{ + "shape":"ListAvailableSolutionStacksResultMessage", + "resultWrapper":"ListAvailableSolutionStacksResult" + } + }, + "RebuildEnvironment":{ + "name":"RebuildEnvironment", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RebuildEnvironmentMessage"}, + "errors":[ + {"shape":"InsufficientPrivilegesException"} + ] + }, + "RequestEnvironmentInfo":{ + "name":"RequestEnvironmentInfo", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RequestEnvironmentInfoMessage"} + }, + "RestartAppServer":{ + "name":"RestartAppServer", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RestartAppServerMessage"} + }, + "RetrieveEnvironmentInfo":{ + "name":"RetrieveEnvironmentInfo", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RetrieveEnvironmentInfoMessage"}, + "output":{ + "shape":"RetrieveEnvironmentInfoResultMessage", + "resultWrapper":"RetrieveEnvironmentInfoResult" + } + }, + "SwapEnvironmentCNAMEs":{ + "name":"SwapEnvironmentCNAMEs", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"SwapEnvironmentCNAMEsMessage"} + }, + "TerminateEnvironment":{ + "name":"TerminateEnvironment", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"TerminateEnvironmentMessage"}, + "output":{ + "shape":"EnvironmentDescription", + "resultWrapper":"TerminateEnvironmentResult" + }, + "errors":[ + {"shape":"InsufficientPrivilegesException"} + ] + }, + "UpdateApplication":{ + "name":"UpdateApplication", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateApplicationMessage"}, + "output":{ + "shape":"ApplicationDescriptionMessage", + "resultWrapper":"UpdateApplicationResult" + } + }, + "UpdateApplicationVersion":{ + "name":"UpdateApplicationVersion", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateApplicationVersionMessage"}, + "output":{ + "shape":"ApplicationVersionDescriptionMessage", + "resultWrapper":"UpdateApplicationVersionResult" + } + }, + "UpdateConfigurationTemplate":{ + "name":"UpdateConfigurationTemplate", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateConfigurationTemplateMessage"}, + "output":{ + "shape":"ConfigurationSettingsDescription", + "resultWrapper":"UpdateConfigurationTemplateResult" + }, + "errors":[ + {"shape":"InsufficientPrivilegesException"}, + {"shape":"TooManyBucketsException"} + ] + }, + "UpdateEnvironment":{ + "name":"UpdateEnvironment", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateEnvironmentMessage"}, + "output":{ + "shape":"EnvironmentDescription", + "resultWrapper":"UpdateEnvironmentResult" + }, + "errors":[ + {"shape":"InsufficientPrivilegesException"}, + {"shape":"TooManyBucketsException"} + ] + }, + "ValidateConfigurationSettings":{ + "name":"ValidateConfigurationSettings", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ValidateConfigurationSettingsMessage"}, + "output":{ + "shape":"ConfigurationSettingsValidationMessages", + "resultWrapper":"ValidateConfigurationSettingsResult" + }, + "errors":[ + {"shape":"InsufficientPrivilegesException"}, + {"shape":"TooManyBucketsException"} + ] + } + }, + "shapes":{ + "AbortEnvironmentUpdateMessage":{ + "type":"structure", + "members":{ + "EnvironmentId":{"shape":"EnvironmentId"}, + "EnvironmentName":{"shape":"EnvironmentName"} + } + }, + "AbortableOperationInProgress":{"type":"boolean"}, + "ActionHistoryStatus":{ + "type":"string", + "enum":[ + "Completed", + "Failed", + "Unknown" + ] + }, + "ActionStatus":{ + "type":"string", + "enum":[ + "Scheduled", + "Pending", + "Running", + "Unknown" + ] + }, + "ActionType":{ + "type":"string", + "enum":[ + "InstanceRefresh", + "PlatformUpdate", + "Unknown" + ] + }, + "ApplicationDescription":{ + "type":"structure", + "members":{ + "ApplicationName":{"shape":"ApplicationName"}, + "Description":{"shape":"Description"}, + "DateCreated":{"shape":"CreationDate"}, + "DateUpdated":{"shape":"UpdateDate"}, + "Versions":{"shape":"VersionLabelsList"}, + "ConfigurationTemplates":{"shape":"ConfigurationTemplateNamesList"} + } + }, + "ApplicationDescriptionList":{ + "type":"list", + "member":{"shape":"ApplicationDescription"} + }, + "ApplicationDescriptionMessage":{ + "type":"structure", + "members":{ + "Application":{"shape":"ApplicationDescription"} + } + }, + "ApplicationDescriptionsMessage":{ + "type":"structure", + "members":{ + "Applications":{"shape":"ApplicationDescriptionList"} + } + }, + "ApplicationMetrics":{ + "type":"structure", + "members":{ + "Duration":{"shape":"NullableInteger"}, + "RequestCount":{"shape":"RequestCount"}, + "StatusCodes":{"shape":"StatusCodes"}, + "Latency":{"shape":"Latency"} + } + }, + "ApplicationName":{ + "type":"string", + "max":100, + "min":1 + }, + "ApplicationNamesList":{ + "type":"list", + "member":{"shape":"ApplicationName"} + }, + "ApplicationVersionDescription":{ + "type":"structure", + "members":{ + "ApplicationName":{"shape":"ApplicationName"}, + "Description":{"shape":"Description"}, + "VersionLabel":{"shape":"VersionLabel"}, + "SourceBundle":{"shape":"S3Location"}, + "DateCreated":{"shape":"CreationDate"}, + "DateUpdated":{"shape":"UpdateDate"}, + "Status":{"shape":"ApplicationVersionStatus"} + } + }, + "ApplicationVersionDescriptionList":{ + "type":"list", + "member":{"shape":"ApplicationVersionDescription"} + }, + "ApplicationVersionDescriptionMessage":{ + "type":"structure", + "members":{ + "ApplicationVersion":{"shape":"ApplicationVersionDescription"} + } + }, + "ApplicationVersionDescriptionsMessage":{ + "type":"structure", + "members":{ + "ApplicationVersions":{"shape":"ApplicationVersionDescriptionList"} + } + }, + "ApplicationVersionProccess":{"type":"boolean"}, + "ApplicationVersionStatus":{ + "type":"string", + "enum":[ + "Processed", + "Unprocessed", + "Failed", + "Processing" + ] + }, + "ApplyEnvironmentManagedActionRequest":{ + "type":"structure", + "required":["ActionId"], + "members":{ + "EnvironmentName":{"shape":"String"}, + "EnvironmentId":{"shape":"String"}, + "ActionId":{"shape":"String"} + } + }, + "ApplyEnvironmentManagedActionResult":{ + "type":"structure", + "members":{ + "ActionId":{"shape":"String"}, + "ActionDescription":{"shape":"String"}, + "ActionType":{"shape":"ActionType"}, + "Status":{"shape":"String"} + } + }, + "AutoCreateApplication":{"type":"boolean"}, + "AutoScalingGroup":{ + "type":"structure", + "members":{ + "Name":{"shape":"ResourceId"} + } + }, + "AutoScalingGroupList":{ + "type":"list", + "member":{"shape":"AutoScalingGroup"} + }, + "AvailableSolutionStackDetailsList":{ + "type":"list", + "member":{"shape":"SolutionStackDescription"} + }, + "AvailableSolutionStackNamesList":{ + "type":"list", + "member":{"shape":"SolutionStackName"} + }, + "CPUUtilization":{ + "type":"structure", + "members":{ + "User":{"shape":"NullableDouble"}, + "Nice":{"shape":"NullableDouble"}, + "System":{"shape":"NullableDouble"}, + "Idle":{"shape":"NullableDouble"}, + "IOWait":{"shape":"NullableDouble"}, + "IRQ":{"shape":"NullableDouble"}, + "SoftIRQ":{"shape":"NullableDouble"} + } + }, + "Cause":{ + "type":"string", + "max":255, + "min":1 + }, + "Causes":{ + "type":"list", + "member":{"shape":"Cause"} + }, + "CheckDNSAvailabilityMessage":{ + "type":"structure", + "required":["CNAMEPrefix"], + "members":{ + "CNAMEPrefix":{"shape":"DNSCnamePrefix"} + } + }, + "CheckDNSAvailabilityResultMessage":{ + "type":"structure", + "members":{ + "Available":{"shape":"CnameAvailability"}, + "FullyQualifiedCNAME":{"shape":"DNSCname"} + } + }, + "CnameAvailability":{"type":"boolean"}, + "ComposeEnvironmentsMessage":{ + "type":"structure", + "members":{ + "ApplicationName":{"shape":"ApplicationName"}, + "GroupName":{"shape":"GroupName"}, + "VersionLabels":{"shape":"VersionLabels"} + } + }, + "ConfigurationDeploymentStatus":{ + "type":"string", + "enum":[ + "deployed", + "pending", + "failed" + ] + }, + "ConfigurationOptionDefaultValue":{"type":"string"}, + "ConfigurationOptionDescription":{ + "type":"structure", + "members":{ + "Namespace":{"shape":"OptionNamespace"}, + "Name":{"shape":"ConfigurationOptionName"}, + "DefaultValue":{"shape":"ConfigurationOptionDefaultValue"}, + "ChangeSeverity":{"shape":"ConfigurationOptionSeverity"}, + "UserDefined":{"shape":"UserDefinedOption"}, + "ValueType":{"shape":"ConfigurationOptionValueType"}, + "ValueOptions":{"shape":"ConfigurationOptionPossibleValues"}, + "MinValue":{"shape":"OptionRestrictionMinValue"}, + "MaxValue":{"shape":"OptionRestrictionMaxValue"}, + "MaxLength":{"shape":"OptionRestrictionMaxLength"}, + "Regex":{"shape":"OptionRestrictionRegex"} + } + }, + "ConfigurationOptionDescriptionsList":{ + "type":"list", + "member":{"shape":"ConfigurationOptionDescription"} + }, + "ConfigurationOptionName":{"type":"string"}, + "ConfigurationOptionPossibleValue":{"type":"string"}, + "ConfigurationOptionPossibleValues":{ + "type":"list", + "member":{"shape":"ConfigurationOptionPossibleValue"} + }, + "ConfigurationOptionSetting":{ + "type":"structure", + "members":{ + "ResourceName":{"shape":"ResourceName"}, + "Namespace":{"shape":"OptionNamespace"}, + "OptionName":{"shape":"ConfigurationOptionName"}, + "Value":{"shape":"ConfigurationOptionValue"} + } + }, + "ConfigurationOptionSettingsList":{ + "type":"list", + "member":{"shape":"ConfigurationOptionSetting"} + }, + "ConfigurationOptionSeverity":{"type":"string"}, + "ConfigurationOptionValue":{"type":"string"}, + "ConfigurationOptionValueType":{ + "type":"string", + "enum":[ + "Scalar", + "List" + ] + }, + "ConfigurationOptionsDescription":{ + "type":"structure", + "members":{ + "SolutionStackName":{"shape":"SolutionStackName"}, + "Options":{"shape":"ConfigurationOptionDescriptionsList"} + } + }, + "ConfigurationSettingsDescription":{ + "type":"structure", + "members":{ + "SolutionStackName":{"shape":"SolutionStackName"}, + "ApplicationName":{"shape":"ApplicationName"}, + "TemplateName":{"shape":"ConfigurationTemplateName"}, + "Description":{"shape":"Description"}, + "EnvironmentName":{"shape":"EnvironmentName"}, + "DeploymentStatus":{"shape":"ConfigurationDeploymentStatus"}, + "DateCreated":{"shape":"CreationDate"}, + "DateUpdated":{"shape":"UpdateDate"}, + "OptionSettings":{"shape":"ConfigurationOptionSettingsList"} + } + }, + "ConfigurationSettingsDescriptionList":{ + "type":"list", + "member":{"shape":"ConfigurationSettingsDescription"} + }, + "ConfigurationSettingsDescriptions":{ + "type":"structure", + "members":{ + "ConfigurationSettings":{"shape":"ConfigurationSettingsDescriptionList"} + } + }, + "ConfigurationSettingsValidationMessages":{ + "type":"structure", + "members":{ + "Messages":{"shape":"ValidationMessagesList"} + } + }, + "ConfigurationTemplateName":{ + "type":"string", + "max":100, + "min":1 + }, + "ConfigurationTemplateNamesList":{ + "type":"list", + "member":{"shape":"ConfigurationTemplateName"} + }, + "CreateApplicationMessage":{ + "type":"structure", + "required":["ApplicationName"], + "members":{ + "ApplicationName":{"shape":"ApplicationName"}, + "Description":{"shape":"Description"} + } + }, + "CreateApplicationVersionMessage":{ + "type":"structure", + "required":[ + "ApplicationName", + "VersionLabel" + ], + "members":{ + "ApplicationName":{"shape":"ApplicationName"}, + "VersionLabel":{"shape":"VersionLabel"}, + "Description":{"shape":"Description"}, + "SourceBundle":{"shape":"S3Location"}, + "AutoCreateApplication":{"shape":"AutoCreateApplication"}, + "Process":{"shape":"ApplicationVersionProccess"} + } + }, + "CreateConfigurationTemplateMessage":{ + "type":"structure", + "required":[ + "ApplicationName", + "TemplateName" + ], + "members":{ + "ApplicationName":{"shape":"ApplicationName"}, + "TemplateName":{"shape":"ConfigurationTemplateName"}, + "SolutionStackName":{"shape":"SolutionStackName"}, + "SourceConfiguration":{"shape":"SourceConfiguration"}, + "EnvironmentId":{"shape":"EnvironmentId"}, + "Description":{"shape":"Description"}, + "OptionSettings":{"shape":"ConfigurationOptionSettingsList"} + } + }, + "CreateEnvironmentMessage":{ + "type":"structure", + "required":["ApplicationName"], + "members":{ + "ApplicationName":{"shape":"ApplicationName"}, + "EnvironmentName":{"shape":"EnvironmentName"}, + "GroupName":{"shape":"GroupName"}, + "Description":{"shape":"Description"}, + "CNAMEPrefix":{"shape":"DNSCnamePrefix"}, + "Tier":{"shape":"EnvironmentTier"}, + "Tags":{"shape":"Tags"}, + "VersionLabel":{"shape":"VersionLabel"}, + "TemplateName":{"shape":"ConfigurationTemplateName"}, + "SolutionStackName":{"shape":"SolutionStackName"}, + "OptionSettings":{"shape":"ConfigurationOptionSettingsList"}, + "OptionsToRemove":{"shape":"OptionsSpecifierList"} + } + }, + "CreateStorageLocationResultMessage":{ + "type":"structure", + "members":{ + "S3Bucket":{"shape":"S3Bucket"} + } + }, + "CreationDate":{"type":"timestamp"}, + "DNSCname":{ + "type":"string", + "max":255, + "min":1 + }, + "DNSCnamePrefix":{ + "type":"string", + "max":63, + "min":4 + }, + "DeleteApplicationMessage":{ + "type":"structure", + "required":["ApplicationName"], + "members":{ + "ApplicationName":{"shape":"ApplicationName"}, + "TerminateEnvByForce":{"shape":"TerminateEnvForce"} + } + }, + "DeleteApplicationVersionMessage":{ + "type":"structure", + "required":[ + "ApplicationName", + "VersionLabel" + ], + "members":{ + "ApplicationName":{"shape":"ApplicationName"}, + "VersionLabel":{"shape":"VersionLabel"}, + "DeleteSourceBundle":{"shape":"DeleteSourceBundle"} + } + }, + "DeleteConfigurationTemplateMessage":{ + "type":"structure", + "required":[ + "ApplicationName", + "TemplateName" + ], + "members":{ + "ApplicationName":{"shape":"ApplicationName"}, + "TemplateName":{"shape":"ConfigurationTemplateName"} + } + }, + "DeleteEnvironmentConfigurationMessage":{ + "type":"structure", + "required":[ + "ApplicationName", + "EnvironmentName" + ], + "members":{ + "ApplicationName":{"shape":"ApplicationName"}, + "EnvironmentName":{"shape":"EnvironmentName"} + } + }, + "DeleteSourceBundle":{"type":"boolean"}, + "Deployment":{ + "type":"structure", + "members":{ + "VersionLabel":{"shape":"String"}, + "DeploymentId":{"shape":"NullableLong"}, + "Status":{"shape":"String"}, + "DeploymentTime":{"shape":"DeploymentTimestamp"} + } + }, + "DeploymentTimestamp":{"type":"timestamp"}, + "DescribeApplicationVersionsMessage":{ + "type":"structure", + "members":{ + "ApplicationName":{"shape":"ApplicationName"}, + "VersionLabels":{"shape":"VersionLabelsList"} + } + }, + "DescribeApplicationsMessage":{ + "type":"structure", + "members":{ + "ApplicationNames":{"shape":"ApplicationNamesList"} + } + }, + "DescribeConfigurationOptionsMessage":{ + "type":"structure", + "members":{ + "ApplicationName":{"shape":"ApplicationName"}, + "TemplateName":{"shape":"ConfigurationTemplateName"}, + "EnvironmentName":{"shape":"EnvironmentName"}, + "SolutionStackName":{"shape":"SolutionStackName"}, + "Options":{"shape":"OptionsSpecifierList"} + } + }, + "DescribeConfigurationSettingsMessage":{ + "type":"structure", + "required":["ApplicationName"], + "members":{ + "ApplicationName":{"shape":"ApplicationName"}, + "TemplateName":{"shape":"ConfigurationTemplateName"}, + "EnvironmentName":{"shape":"EnvironmentName"} + } + }, + "DescribeEnvironmentHealthRequest":{ + "type":"structure", + "members":{ + "EnvironmentName":{"shape":"EnvironmentName"}, + "EnvironmentId":{"shape":"EnvironmentId"}, + "AttributeNames":{"shape":"EnvironmentHealthAttributes"} + } + }, + "DescribeEnvironmentHealthResult":{ + "type":"structure", + "members":{ + "EnvironmentName":{"shape":"EnvironmentName"}, + "HealthStatus":{"shape":"String"}, + "Status":{"shape":"EnvironmentHealth"}, + "Color":{"shape":"String"}, + "Causes":{"shape":"Causes"}, + "ApplicationMetrics":{"shape":"ApplicationMetrics"}, + "InstancesHealth":{"shape":"InstanceHealthSummary"}, + "RefreshedAt":{"shape":"RefreshedAt"} + } + }, + "DescribeEnvironmentManagedActionHistoryRequest":{ + "type":"structure", + "members":{ + "EnvironmentId":{"shape":"EnvironmentId"}, + "EnvironmentName":{"shape":"EnvironmentName"}, + "NextToken":{"shape":"String"}, + "MaxItems":{"shape":"Integer"} + } + }, + "DescribeEnvironmentManagedActionHistoryResult":{ + "type":"structure", + "members":{ + "ManagedActionHistoryItems":{"shape":"ManagedActionHistoryItems"}, + "NextToken":{"shape":"String"} + } + }, + "DescribeEnvironmentManagedActionsRequest":{ + "type":"structure", + "members":{ + "EnvironmentName":{"shape":"String"}, + "EnvironmentId":{"shape":"String"}, + "Status":{"shape":"ActionStatus"} + } + }, + "DescribeEnvironmentManagedActionsResult":{ + "type":"structure", + "members":{ + "ManagedActions":{"shape":"ManagedActions"} + } + }, + "DescribeEnvironmentResourcesMessage":{ + "type":"structure", + "members":{ + "EnvironmentId":{"shape":"EnvironmentId"}, + "EnvironmentName":{"shape":"EnvironmentName"} + } + }, + "DescribeEnvironmentsMessage":{ + "type":"structure", + "members":{ + "ApplicationName":{"shape":"ApplicationName"}, + "VersionLabel":{"shape":"VersionLabel"}, + "EnvironmentIds":{"shape":"EnvironmentIdList"}, + "EnvironmentNames":{"shape":"EnvironmentNamesList"}, + "IncludeDeleted":{"shape":"IncludeDeleted"}, + "IncludedDeletedBackTo":{"shape":"IncludeDeletedBackTo"} + } + }, + "DescribeEventsMessage":{ + "type":"structure", + "members":{ + "ApplicationName":{"shape":"ApplicationName"}, + "VersionLabel":{"shape":"VersionLabel"}, + "TemplateName":{"shape":"ConfigurationTemplateName"}, + "EnvironmentId":{"shape":"EnvironmentId"}, + "EnvironmentName":{"shape":"EnvironmentName"}, + "RequestId":{"shape":"RequestId"}, + "Severity":{"shape":"EventSeverity"}, + "StartTime":{"shape":"TimeFilterStart"}, + "EndTime":{"shape":"TimeFilterEnd"}, + "MaxRecords":{"shape":"MaxRecords"}, + "NextToken":{"shape":"Token"} + } + }, + "DescribeInstancesHealthRequest":{ + "type":"structure", + "members":{ + "EnvironmentName":{"shape":"EnvironmentName"}, + "EnvironmentId":{"shape":"EnvironmentId"}, + "AttributeNames":{"shape":"InstancesHealthAttributes"}, + "NextToken":{"shape":"NextToken"} + } + }, + "DescribeInstancesHealthResult":{ + "type":"structure", + "members":{ + "InstanceHealthList":{"shape":"InstanceHealthList"}, + "RefreshedAt":{"shape":"RefreshedAt"}, + "NextToken":{"shape":"NextToken"} + } + }, + "Description":{ + "type":"string", + "max":200 + }, + "Ec2InstanceId":{"type":"string"}, + "ElasticBeanstalkServiceException":{ + "type":"structure", + "members":{ + "message":{"shape":"ExceptionMessage"} + }, + "exception":true + }, + "EndpointURL":{"type":"string"}, + "EnvironmentDescription":{ + "type":"structure", + "members":{ + "EnvironmentName":{"shape":"EnvironmentName"}, + "EnvironmentId":{"shape":"EnvironmentId"}, + "ApplicationName":{"shape":"ApplicationName"}, + "VersionLabel":{"shape":"VersionLabel"}, + "SolutionStackName":{"shape":"SolutionStackName"}, + "TemplateName":{"shape":"ConfigurationTemplateName"}, + "Description":{"shape":"Description"}, + "EndpointURL":{"shape":"EndpointURL"}, + "CNAME":{"shape":"DNSCname"}, + "DateCreated":{"shape":"CreationDate"}, + "DateUpdated":{"shape":"UpdateDate"}, + "Status":{"shape":"EnvironmentStatus"}, + "AbortableOperationInProgress":{"shape":"AbortableOperationInProgress"}, + "Health":{"shape":"EnvironmentHealth"}, + "HealthStatus":{"shape":"EnvironmentHealthStatus"}, + "Resources":{"shape":"EnvironmentResourcesDescription"}, + "Tier":{"shape":"EnvironmentTier"}, + "EnvironmentLinks":{"shape":"EnvironmentLinks"} + } + }, + "EnvironmentDescriptionsList":{ + "type":"list", + "member":{"shape":"EnvironmentDescription"} + }, + "EnvironmentDescriptionsMessage":{ + "type":"structure", + "members":{ + "Environments":{"shape":"EnvironmentDescriptionsList"} + } + }, + "EnvironmentHealth":{ + "type":"string", + "enum":[ + "Green", + "Yellow", + "Red", + "Grey" + ] + }, + "EnvironmentHealthAttribute":{ + "type":"string", + "enum":[ + "Status", + "Color", + "Causes", + "ApplicationMetrics", + "InstancesHealth", + "All", + "HealthStatus", + "RefreshedAt" + ] + }, + "EnvironmentHealthAttributes":{ + "type":"list", + "member":{"shape":"EnvironmentHealthAttribute"} + }, + "EnvironmentHealthStatus":{ + "type":"string", + "enum":[ + "NoData", + "Unknown", + "Pending", + "Ok", + "Info", + "Warning", + "Degraded", + "Severe" + ] + }, + "EnvironmentId":{"type":"string"}, + "EnvironmentIdList":{ + "type":"list", + "member":{"shape":"EnvironmentId"} + }, + "EnvironmentInfoDescription":{ + "type":"structure", + "members":{ + "InfoType":{"shape":"EnvironmentInfoType"}, + "Ec2InstanceId":{"shape":"Ec2InstanceId"}, + "SampleTimestamp":{"shape":"SampleTimestamp"}, + "Message":{"shape":"Message"} + } + }, + "EnvironmentInfoDescriptionList":{ + "type":"list", + "member":{"shape":"EnvironmentInfoDescription"} + }, + "EnvironmentInfoType":{ + "type":"string", + "enum":[ + "tail", + "bundle" + ] + }, + "EnvironmentLink":{ + "type":"structure", + "members":{ + "LinkName":{"shape":"String"}, + "EnvironmentName":{"shape":"String"} + } + }, + "EnvironmentLinks":{ + "type":"list", + "member":{"shape":"EnvironmentLink"} + }, + "EnvironmentName":{ + "type":"string", + "max":40, + "min":4 + }, + "EnvironmentNamesList":{ + "type":"list", + "member":{"shape":"EnvironmentName"} + }, + "EnvironmentResourceDescription":{ + "type":"structure", + "members":{ + "EnvironmentName":{"shape":"EnvironmentName"}, + "AutoScalingGroups":{"shape":"AutoScalingGroupList"}, + "Instances":{"shape":"InstanceList"}, + "LaunchConfigurations":{"shape":"LaunchConfigurationList"}, + "LoadBalancers":{"shape":"LoadBalancerList"}, + "Triggers":{"shape":"TriggerList"}, + "Queues":{"shape":"QueueList"} + } + }, + "EnvironmentResourceDescriptionsMessage":{ + "type":"structure", + "members":{ + "EnvironmentResources":{"shape":"EnvironmentResourceDescription"} + } + }, + "EnvironmentResourcesDescription":{ + "type":"structure", + "members":{ + "LoadBalancer":{"shape":"LoadBalancerDescription"} + } + }, + "EnvironmentStatus":{ + "type":"string", + "enum":[ + "Launching", + "Updating", + "Ready", + "Terminating", + "Terminated" + ] + }, + "EnvironmentTier":{ + "type":"structure", + "members":{ + "Name":{"shape":"String"}, + "Type":{"shape":"String"}, + "Version":{"shape":"String"} + } + }, + "EventDate":{"type":"timestamp"}, + "EventDescription":{ + "type":"structure", + "members":{ + "EventDate":{"shape":"EventDate"}, + "Message":{"shape":"EventMessage"}, + "ApplicationName":{"shape":"ApplicationName"}, + "VersionLabel":{"shape":"VersionLabel"}, + "TemplateName":{"shape":"ConfigurationTemplateName"}, + "EnvironmentName":{"shape":"EnvironmentName"}, + "RequestId":{"shape":"RequestId"}, + "Severity":{"shape":"EventSeverity"} + } + }, + "EventDescriptionList":{ + "type":"list", + "member":{"shape":"EventDescription"} + }, + "EventDescriptionsMessage":{ + "type":"structure", + "members":{ + "Events":{"shape":"EventDescriptionList"}, + "NextToken":{"shape":"Token"} + } + }, + "EventMessage":{"type":"string"}, + "EventSeverity":{ + "type":"string", + "enum":[ + "TRACE", + "DEBUG", + "INFO", + "WARN", + "ERROR", + "FATAL" + ] + }, + "ExceptionMessage":{"type":"string"}, + "FailureType":{ + "type":"string", + "enum":[ + "UpdateCancelled", + "CancellationFailed", + "RollbackFailed", + "RollbackSuccessful", + "InternalFailure", + "InvalidEnvironmentState", + "PermissionsError" + ] + }, + "FileTypeExtension":{ + "type":"string", + "max":100, + "min":1 + }, + "ForceTerminate":{"type":"boolean"}, + "GroupName":{ + "type":"string", + "max":19, + "min":1 + }, + "IncludeDeleted":{"type":"boolean"}, + "IncludeDeletedBackTo":{"type":"timestamp"}, + "Instance":{ + "type":"structure", + "members":{ + "Id":{"shape":"ResourceId"} + } + }, + "InstanceHealthList":{ + "type":"list", + "member":{"shape":"SingleInstanceHealth"} + }, + "InstanceHealthSummary":{ + "type":"structure", + "members":{ + "NoData":{"shape":"NullableInteger"}, + "Unknown":{"shape":"NullableInteger"}, + "Pending":{"shape":"NullableInteger"}, + "Ok":{"shape":"NullableInteger"}, + "Info":{"shape":"NullableInteger"}, + "Warning":{"shape":"NullableInteger"}, + "Degraded":{"shape":"NullableInteger"}, + "Severe":{"shape":"NullableInteger"} + } + }, + "InstanceId":{ + "type":"string", + "max":255, + "min":1 + }, + "InstanceList":{ + "type":"list", + "member":{"shape":"Instance"} + }, + "InstancesHealthAttribute":{ + "type":"string", + "enum":[ + "HealthStatus", + "Color", + "Causes", + "ApplicationMetrics", + "RefreshedAt", + "LaunchedAt", + "System", + "Deployment", + "AvailabilityZone", + "InstanceType", + "All" + ] + }, + "InstancesHealthAttributes":{ + "type":"list", + "member":{"shape":"InstancesHealthAttribute"} + }, + "InsufficientPrivilegesException":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InsufficientPrivilegesException", + "httpStatusCode":403, + "senderFault":true + }, + "exception":true + }, + "Integer":{"type":"integer"}, + "InvalidRequestException":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidRequestException", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "Latency":{ + "type":"structure", + "members":{ + "P999":{"shape":"NullableDouble"}, + "P99":{"shape":"NullableDouble"}, + "P95":{"shape":"NullableDouble"}, + "P90":{"shape":"NullableDouble"}, + "P85":{"shape":"NullableDouble"}, + "P75":{"shape":"NullableDouble"}, + "P50":{"shape":"NullableDouble"}, + "P10":{"shape":"NullableDouble"} + } + }, + "LaunchConfiguration":{ + "type":"structure", + "members":{ + "Name":{"shape":"ResourceId"} + } + }, + "LaunchConfigurationList":{ + "type":"list", + "member":{"shape":"LaunchConfiguration"} + }, + "LaunchedAt":{"type":"timestamp"}, + "ListAvailableSolutionStacksResultMessage":{ + "type":"structure", + "members":{ + "SolutionStacks":{"shape":"AvailableSolutionStackNamesList"}, + "SolutionStackDetails":{"shape":"AvailableSolutionStackDetailsList"} + } + }, + "Listener":{ + "type":"structure", + "members":{ + "Protocol":{"shape":"String"}, + "Port":{"shape":"Integer"} + } + }, + "LoadAverage":{ + "type":"list", + "member":{"shape":"LoadAverageValue"} + }, + "LoadAverageValue":{"type":"double"}, + "LoadBalancer":{ + "type":"structure", + "members":{ + "Name":{"shape":"ResourceId"} + } + }, + "LoadBalancerDescription":{ + "type":"structure", + "members":{ + "LoadBalancerName":{"shape":"String"}, + "Domain":{"shape":"String"}, + "Listeners":{"shape":"LoadBalancerListenersDescription"} + } + }, + "LoadBalancerList":{ + "type":"list", + "member":{"shape":"LoadBalancer"} + }, + "LoadBalancerListenersDescription":{ + "type":"list", + "member":{"shape":"Listener"} + }, + "ManagedAction":{ + "type":"structure", + "members":{ + "ActionId":{"shape":"String"}, + "ActionDescription":{"shape":"String"}, + "ActionType":{"shape":"ActionType"}, + "Status":{"shape":"ActionStatus"}, + "WindowStartTime":{"shape":"Timestamp"} + } + }, + "ManagedActionHistoryItem":{ + "type":"structure", + "members":{ + "ActionId":{"shape":"String"}, + "ActionType":{"shape":"ActionType"}, + "ActionDescription":{"shape":"String"}, + "FailureType":{"shape":"FailureType"}, + "Status":{"shape":"ActionHistoryStatus"}, + "FailureDescription":{"shape":"String"}, + "ExecutedTime":{"shape":"Timestamp"}, + "FinishedTime":{"shape":"Timestamp"} + } + }, + "ManagedActionHistoryItems":{ + "type":"list", + "member":{"shape":"ManagedActionHistoryItem"}, + "max":100, + "min":1 + }, + "ManagedActionInvalidStateException":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"ManagedActionInvalidStateException", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "ManagedActions":{ + "type":"list", + "member":{"shape":"ManagedAction"}, + "max":100, + "min":1 + }, + "MaxRecords":{ + "type":"integer", + "max":1000, + "min":1 + }, + "Message":{"type":"string"}, + "NextToken":{ + "type":"string", + "max":100, + "min":1 + }, + "NullableDouble":{"type":"double"}, + "NullableInteger":{"type":"integer"}, + "NullableLong":{"type":"long"}, + "OperationInProgressException":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"OperationInProgressFailure", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "OptionNamespace":{"type":"string"}, + "OptionRestrictionMaxLength":{"type":"integer"}, + "OptionRestrictionMaxValue":{"type":"integer"}, + "OptionRestrictionMinValue":{"type":"integer"}, + "OptionRestrictionRegex":{ + "type":"structure", + "members":{ + "Pattern":{"shape":"RegexPattern"}, + "Label":{"shape":"RegexLabel"} + } + }, + "OptionSpecification":{ + "type":"structure", + "members":{ + "ResourceName":{"shape":"ResourceName"}, + "Namespace":{"shape":"OptionNamespace"}, + "OptionName":{"shape":"ConfigurationOptionName"} + } + }, + "OptionsSpecifierList":{ + "type":"list", + "member":{"shape":"OptionSpecification"} + }, + "Queue":{ + "type":"structure", + "members":{ + "Name":{"shape":"String"}, + "URL":{"shape":"String"} + } + }, + "QueueList":{ + "type":"list", + "member":{"shape":"Queue"} + }, + "RebuildEnvironmentMessage":{ + "type":"structure", + "members":{ + "EnvironmentId":{"shape":"EnvironmentId"}, + "EnvironmentName":{"shape":"EnvironmentName"} + } + }, + "RefreshedAt":{"type":"timestamp"}, + "RegexLabel":{"type":"string"}, + "RegexPattern":{"type":"string"}, + "RequestCount":{"type":"integer"}, + "RequestEnvironmentInfoMessage":{ + "type":"structure", + "required":["InfoType"], + "members":{ + "EnvironmentId":{"shape":"EnvironmentId"}, + "EnvironmentName":{"shape":"EnvironmentName"}, + "InfoType":{"shape":"EnvironmentInfoType"} + } + }, + "RequestId":{"type":"string"}, + "ResourceId":{"type":"string"}, + "ResourceName":{ + "type":"string", + "max":256, + "min":1 + }, + "RestartAppServerMessage":{ + "type":"structure", + "members":{ + "EnvironmentId":{"shape":"EnvironmentId"}, + "EnvironmentName":{"shape":"EnvironmentName"} + } + }, + "RetrieveEnvironmentInfoMessage":{ + "type":"structure", + "required":["InfoType"], + "members":{ + "EnvironmentId":{"shape":"EnvironmentId"}, + "EnvironmentName":{"shape":"EnvironmentName"}, + "InfoType":{"shape":"EnvironmentInfoType"} + } + }, + "RetrieveEnvironmentInfoResultMessage":{ + "type":"structure", + "members":{ + "EnvironmentInfo":{"shape":"EnvironmentInfoDescriptionList"} + } + }, + "S3Bucket":{ + "type":"string", + "max":255 + }, + "S3Key":{ + "type":"string", + "max":1024 + }, + "S3Location":{ + "type":"structure", + "members":{ + "S3Bucket":{"shape":"S3Bucket"}, + "S3Key":{"shape":"S3Key"} + } + }, + "S3LocationNotInServiceRegionException":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"S3LocationNotInServiceRegionException", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "S3SubscriptionRequiredException":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"S3SubscriptionRequiredException", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "SampleTimestamp":{"type":"timestamp"}, + "SingleInstanceHealth":{ + "type":"structure", + "members":{ + "InstanceId":{"shape":"InstanceId"}, + "HealthStatus":{"shape":"String"}, + "Color":{"shape":"String"}, + "Causes":{"shape":"Causes"}, + "LaunchedAt":{"shape":"LaunchedAt"}, + "ApplicationMetrics":{"shape":"ApplicationMetrics"}, + "System":{"shape":"SystemStatus"}, + "Deployment":{"shape":"Deployment"}, + "AvailabilityZone":{"shape":"String"}, + "InstanceType":{"shape":"String"} + } + }, + "SolutionStackDescription":{ + "type":"structure", + "members":{ + "SolutionStackName":{"shape":"SolutionStackName"}, + "PermittedFileTypes":{"shape":"SolutionStackFileTypeList"} + } + }, + "SolutionStackFileTypeList":{ + "type":"list", + "member":{"shape":"FileTypeExtension"} + }, + "SolutionStackName":{ + "type":"string", + "max":100 + }, + "SourceBundleDeletionException":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"SourceBundleDeletionFailure", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "SourceConfiguration":{ + "type":"structure", + "members":{ + "ApplicationName":{"shape":"ApplicationName"}, + "TemplateName":{"shape":"ConfigurationTemplateName"} + } + }, + "StatusCodes":{ + "type":"structure", + "members":{ + "Status2xx":{"shape":"NullableInteger"}, + "Status3xx":{"shape":"NullableInteger"}, + "Status4xx":{"shape":"NullableInteger"}, + "Status5xx":{"shape":"NullableInteger"} + } + }, + "String":{"type":"string"}, + "SwapEnvironmentCNAMEsMessage":{ + "type":"structure", + "members":{ + "SourceEnvironmentId":{"shape":"EnvironmentId"}, + "SourceEnvironmentName":{"shape":"EnvironmentName"}, + "DestinationEnvironmentId":{"shape":"EnvironmentId"}, + "DestinationEnvironmentName":{"shape":"EnvironmentName"} + } + }, + "SystemStatus":{ + "type":"structure", + "members":{ + "CPUUtilization":{"shape":"CPUUtilization"}, + "LoadAverage":{"shape":"LoadAverage"} + } + }, + "Tag":{ + "type":"structure", + "members":{ + "Key":{"shape":"TagKey"}, + "Value":{"shape":"TagValue"} + } + }, + "TagKey":{ + "type":"string", + "max":128, + "min":1 + }, + "TagValue":{ + "type":"string", + "max":256, + "min":1 + }, + "Tags":{ + "type":"list", + "member":{"shape":"Tag"} + }, + "TerminateEnvForce":{"type":"boolean"}, + "TerminateEnvironmentMessage":{ + "type":"structure", + "members":{ + "EnvironmentId":{"shape":"EnvironmentId"}, + "EnvironmentName":{"shape":"EnvironmentName"}, + "TerminateResources":{"shape":"TerminateEnvironmentResources"}, + "ForceTerminate":{"shape":"ForceTerminate"} + } + }, + "TerminateEnvironmentResources":{"type":"boolean"}, + "TimeFilterEnd":{"type":"timestamp"}, + "TimeFilterStart":{"type":"timestamp"}, + "Timestamp":{"type":"timestamp"}, + "Token":{"type":"string"}, + "TooManyApplicationVersionsException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "TooManyApplicationsException":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"TooManyApplicationsException", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "TooManyBucketsException":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"TooManyBucketsException", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "TooManyConfigurationTemplatesException":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"TooManyConfigurationTemplatesException", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "TooManyEnvironmentsException":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"TooManyEnvironmentsException", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "Trigger":{ + "type":"structure", + "members":{ + "Name":{"shape":"ResourceId"} + } + }, + "TriggerList":{ + "type":"list", + "member":{"shape":"Trigger"} + }, + "UpdateApplicationMessage":{ + "type":"structure", + "required":["ApplicationName"], + "members":{ + "ApplicationName":{"shape":"ApplicationName"}, + "Description":{"shape":"Description"} + } + }, + "UpdateApplicationVersionMessage":{ + "type":"structure", + "required":[ + "ApplicationName", + "VersionLabel" + ], + "members":{ + "ApplicationName":{"shape":"ApplicationName"}, + "VersionLabel":{"shape":"VersionLabel"}, + "Description":{"shape":"Description"} + } + }, + "UpdateConfigurationTemplateMessage":{ + "type":"structure", + "required":[ + "ApplicationName", + "TemplateName" + ], + "members":{ + "ApplicationName":{"shape":"ApplicationName"}, + "TemplateName":{"shape":"ConfigurationTemplateName"}, + "Description":{"shape":"Description"}, + "OptionSettings":{"shape":"ConfigurationOptionSettingsList"}, + "OptionsToRemove":{"shape":"OptionsSpecifierList"} + } + }, + "UpdateDate":{"type":"timestamp"}, + "UpdateEnvironmentMessage":{ + "type":"structure", + "members":{ + "ApplicationName":{"shape":"ApplicationName"}, + "EnvironmentId":{"shape":"EnvironmentId"}, + "EnvironmentName":{"shape":"EnvironmentName"}, + "GroupName":{"shape":"GroupName"}, + "Description":{"shape":"Description"}, + "Tier":{"shape":"EnvironmentTier"}, + "VersionLabel":{"shape":"VersionLabel"}, + "TemplateName":{"shape":"ConfigurationTemplateName"}, + "SolutionStackName":{"shape":"SolutionStackName"}, + "OptionSettings":{"shape":"ConfigurationOptionSettingsList"}, + "OptionsToRemove":{"shape":"OptionsSpecifierList"} + } + }, + "UserDefinedOption":{"type":"boolean"}, + "ValidateConfigurationSettingsMessage":{ + "type":"structure", + "required":[ + "ApplicationName", + "OptionSettings" + ], + "members":{ + "ApplicationName":{"shape":"ApplicationName"}, + "TemplateName":{"shape":"ConfigurationTemplateName"}, + "EnvironmentName":{"shape":"EnvironmentName"}, + "OptionSettings":{"shape":"ConfigurationOptionSettingsList"} + } + }, + "ValidationMessage":{ + "type":"structure", + "members":{ + "Message":{"shape":"ValidationMessageString"}, + "Severity":{"shape":"ValidationSeverity"}, + "Namespace":{"shape":"OptionNamespace"}, + "OptionName":{"shape":"ConfigurationOptionName"} + } + }, + "ValidationMessageString":{"type":"string"}, + "ValidationMessagesList":{ + "type":"list", + "member":{"shape":"ValidationMessage"} + }, + "ValidationSeverity":{ + "type":"string", + "enum":[ + "error", + "warning" + ] + }, + "VersionLabel":{ + "type":"string", + "max":100, + "min":1 + }, + "VersionLabels":{ + "type":"list", + "member":{"shape":"VersionLabel"} + }, + "VersionLabelsList":{ + "type":"list", + "member":{"shape":"VersionLabel"} + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/elasticbeanstalk/2010-12-01/docs-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/elasticbeanstalk/2010-12-01/docs-2.json new file mode 100644 index 000000000..cf1ac1f8f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/elasticbeanstalk/2010-12-01/docs-2.json @@ -0,0 +1,1528 @@ +{ + "version": "2.0", + "service": "AWS Elastic Beanstalk

    AWS Elastic Beanstalk makes it easy for you to create, deploy, and manage scalable, fault-tolerant applications running on the Amazon Web Services cloud.

    For more information about this product, go to the AWS Elastic Beanstalk details page. The location of the latest AWS Elastic Beanstalk WSDL is http://elasticbeanstalk.s3.amazonaws.com/doc/2010-12-01/AWSElasticBeanstalk.wsdl. To install the Software Development Kits (SDKs), Integrated Development Environment (IDE) Toolkits, and command line tools that enable you to access the API, go to Tools for Amazon Web Services.

    Endpoints

    For a list of region-specific endpoints that AWS Elastic Beanstalk supports, go to Regions and Endpoints in the Amazon Web Services Glossary.

    ", + "operations": { + "AbortEnvironmentUpdate": "

    Cancels in-progress environment configuration update or application version deployment.

    ", + "ApplyEnvironmentManagedAction": "

    Applies a scheduled managed action immediately. A managed action can be applied only if its status is Scheduled. Get the status and action ID of a managed action with DescribeEnvironmentManagedActions.

    ", + "CheckDNSAvailability": "

    Checks if the specified CNAME is available.

    ", + "ComposeEnvironments": "

    Create or update a group of environments that each run a separate component of a single application. Takes a list of version labels that specify application source bundles for each of the environments to create or update. The name of each environment and other required information must be included in the source bundles in an environment manifest named env.yaml. See Compose Environments for details.

    ", + "CreateApplication": "

    Creates an application that has one configuration template named default and no application versions.

    ", + "CreateApplicationVersion": "

    Creates an application version for the specified application.

    Once you create an application version with a specified Amazon S3 bucket and key location, you cannot change that Amazon S3 location. If you change the Amazon S3 location, you receive an exception when you attempt to launch an environment from the application version. ", + "CreateConfigurationTemplate": "

    Creates a configuration template. Templates are associated with a specific application and are used to deploy different versions of the application with the same configuration settings.

    Related Topics

    ", + "CreateEnvironment": "

    Launches an environment for the specified application using the specified configuration.

    ", + "CreateStorageLocation": "

    Creates the Amazon S3 storage location for the account.

    This location is used to store user log files.

    ", + "DeleteApplication": "

    Deletes the specified application along with all associated versions and configurations. The application versions will not be deleted from your Amazon S3 bucket.

    You cannot delete an application that has a running environment. ", + "DeleteApplicationVersion": "

    Deletes the specified version from the specified application.

    You cannot delete an application version that is associated with a running environment.", + "DeleteConfigurationTemplate": "

    Deletes the specified configuration template.

    When you launch an environment using a configuration template, the environment gets a copy of the template. You can delete or modify the environment's copy of the template without affecting the running environment.", + "DeleteEnvironmentConfiguration": "

    Deletes the draft configuration associated with the running environment.

    Updating a running environment with any configuration changes creates a draft configuration set. You can get the draft configuration using DescribeConfigurationSettings while the update is in progress or if the update fails. The DeploymentStatus for the draft configuration indicates whether the deployment is in process or has failed. The draft configuration remains in existence until it is deleted with this action.

    ", + "DescribeApplicationVersions": "

    Retrieve a list of application versions stored in your AWS Elastic Beanstalk storage bucket.

    ", + "DescribeApplications": "

    Returns the descriptions of existing applications.

    ", + "DescribeConfigurationOptions": "

    Describes the configuration options that are used in a particular configuration template or environment, or that a specified solution stack defines. The description includes the values the options, their default values, and an indication of the required action on a running environment if an option value is changed.

    ", + "DescribeConfigurationSettings": "

    Returns a description of the settings for the specified configuration set, that is, either a configuration template or the configuration set associated with a running environment.

    When describing the settings for the configuration set associated with a running environment, it is possible to receive two sets of setting descriptions. One is the deployed configuration set, and the other is a draft configuration of an environment that is either in the process of deployment or that failed to deploy.

    Related Topics

    ", + "DescribeEnvironmentHealth": "

    Returns information about the overall health of the specified environment. The DescribeEnvironmentHealth operation is only available with AWS Elastic Beanstalk Enhanced Health.

    ", + "DescribeEnvironmentManagedActionHistory": "

    Lists an environment's completed and failed managed actions.

    ", + "DescribeEnvironmentManagedActions": "

    Lists an environment's upcoming and in-progress managed actions.

    ", + "DescribeEnvironmentResources": "

    Returns AWS resources for this environment.

    ", + "DescribeEnvironments": "

    Returns descriptions for existing environments.

    ", + "DescribeEvents": "

    Returns list of event descriptions matching criteria up to the last 6 weeks.

    This action returns the most recent 1,000 events from the specified NextToken.", + "DescribeInstancesHealth": "

    Returns more detailed information about the health of the specified instances (for example, CPU utilization, load average, and causes). The DescribeInstancesHealth operation is only available with AWS Elastic Beanstalk Enhanced Health.

    ", + "ListAvailableSolutionStacks": "

    Returns a list of the available solution stack names.

    ", + "RebuildEnvironment": "

    Deletes and recreates all of the AWS resources (for example: the Auto Scaling group, load balancer, etc.) for a specified environment and forces a restart.

    ", + "RequestEnvironmentInfo": "

    Initiates a request to compile the specified type of information of the deployed environment.

    Setting the InfoType to tail compiles the last lines from the application server log files of every Amazon EC2 instance in your environment.

    Setting the InfoType to bundle compresses the application server log files for every Amazon EC2 instance into a .zip file. Legacy and .NET containers do not support bundle logs.

    Use RetrieveEnvironmentInfo to obtain the set of logs.

    Related Topics

    ", + "RestartAppServer": "

    Causes the environment to restart the application container server running on each Amazon EC2 instance.

    ", + "RetrieveEnvironmentInfo": "

    Retrieves the compiled information from a RequestEnvironmentInfo request.

    Related Topics

    ", + "SwapEnvironmentCNAMEs": "

    Swaps the CNAMEs of two environments.

    ", + "TerminateEnvironment": "

    Terminates the specified environment.

    ", + "UpdateApplication": "

    Updates the specified application to have the specified properties.

    If a property (for example, description) is not provided, the value remains unchanged. To clear these properties, specify an empty string. ", + "UpdateApplicationVersion": "

    Updates the specified application version to have the specified properties.

    If a property (for example, description) is not provided, the value remains unchanged. To clear properties, specify an empty string. ", + "UpdateConfigurationTemplate": "

    Updates the specified configuration template to have the specified properties or configuration option values.

    If a property (for example, ApplicationName) is not provided, its value remains unchanged. To clear such properties, specify an empty string.

    Related Topics

    ", + "UpdateEnvironment": "

    Updates the environment description, deploys a new application version, updates the configuration settings to an entirely new configuration template, or updates select configuration option values in the running environment.

    Attempting to update both the release and configuration is not allowed and AWS Elastic Beanstalk returns an InvalidParameterCombination error.

    When updating the configuration settings to a new template or individual settings, a draft configuration is created and DescribeConfigurationSettings for this environment returns two setting descriptions with different DeploymentStatus values.

    ", + "ValidateConfigurationSettings": "

    Takes a set of configuration settings and either a configuration template or environment, and determines whether those values are valid.

    This action returns a list of messages indicating any errors or warnings associated with the selection of option values.

    " + }, + "shapes": { + "AbortEnvironmentUpdateMessage": { + "base": "

    ", + "refs": { + } + }, + "AbortableOperationInProgress": { + "base": null, + "refs": { + "EnvironmentDescription$AbortableOperationInProgress": "

    Indicates if there is an in-progress environment configuration update or application version deployment that you can cancel.

    true: There is an update in progress.

    false: There are no updates currently in progress.

    " + } + }, + "ActionHistoryStatus": { + "base": null, + "refs": { + "ManagedActionHistoryItem$Status": "

    The status of the action.

    " + } + }, + "ActionStatus": { + "base": null, + "refs": { + "DescribeEnvironmentManagedActionsRequest$Status": "

    To show only actions with a particular status, specify a status.

    ", + "ManagedAction$Status": "

    The status of the managed action. If the action is Scheduled, you can apply it immediately with ApplyEnvironmentManagedAction.

    " + } + }, + "ActionType": { + "base": null, + "refs": { + "ApplyEnvironmentManagedActionResult$ActionType": "

    The type of managed action.

    ", + "ManagedAction$ActionType": "

    The type of managed action.

    ", + "ManagedActionHistoryItem$ActionType": "

    The type of the managed action.

    " + } + }, + "ApplicationDescription": { + "base": "

    Describes the properties of an application.

    ", + "refs": { + "ApplicationDescriptionList$member": null, + "ApplicationDescriptionMessage$Application": "

    The ApplicationDescription of the application.

    " + } + }, + "ApplicationDescriptionList": { + "base": null, + "refs": { + "ApplicationDescriptionsMessage$Applications": "

    This parameter contains a list of ApplicationDescription.

    " + } + }, + "ApplicationDescriptionMessage": { + "base": "

    Result message containing a single description of an application.

    ", + "refs": { + } + }, + "ApplicationDescriptionsMessage": { + "base": "

    Result message containing a list of application descriptions.

    ", + "refs": { + } + }, + "ApplicationMetrics": { + "base": "

    Represents the application metrics for a specified environment.

    ", + "refs": { + "DescribeEnvironmentHealthResult$ApplicationMetrics": null, + "SingleInstanceHealth$ApplicationMetrics": null + } + }, + "ApplicationName": { + "base": null, + "refs": { + "ApplicationDescription$ApplicationName": "

    The name of the application.

    ", + "ApplicationNamesList$member": null, + "ApplicationVersionDescription$ApplicationName": "

    The name of the application associated with this release.

    ", + "ComposeEnvironmentsMessage$ApplicationName": "

    The name of the application to which the specified source bundles belong.

    ", + "ConfigurationSettingsDescription$ApplicationName": "

    The name of the application associated with this configuration set.

    ", + "CreateApplicationMessage$ApplicationName": "

    The name of the application.

    Constraint: This name must be unique within your account. If the specified name already exists, the action returns an InvalidParameterValue error.

    ", + "CreateApplicationVersionMessage$ApplicationName": "

    The name of the application. If no application is found with this name, and AutoCreateApplication is false, returns an InvalidParameterValue error.

    ", + "CreateConfigurationTemplateMessage$ApplicationName": "

    The name of the application to associate with this configuration template. If no application is found with this name, AWS Elastic Beanstalk returns an InvalidParameterValue error.

    ", + "CreateEnvironmentMessage$ApplicationName": "

    The name of the application that contains the version to be deployed.

    If no application is found with this name, CreateEnvironment returns an InvalidParameterValue error.

    ", + "DeleteApplicationMessage$ApplicationName": "

    The name of the application to delete.

    ", + "DeleteApplicationVersionMessage$ApplicationName": "

    The name of the application to delete releases from.

    ", + "DeleteConfigurationTemplateMessage$ApplicationName": "

    The name of the application to delete the configuration template from.

    ", + "DeleteEnvironmentConfigurationMessage$ApplicationName": "

    The name of the application the environment is associated with.

    ", + "DescribeApplicationVersionsMessage$ApplicationName": "

    If specified, AWS Elastic Beanstalk restricts the returned descriptions to only include ones that are associated with the specified application.

    ", + "DescribeConfigurationOptionsMessage$ApplicationName": "

    The name of the application associated with the configuration template or environment. Only needed if you want to describe the configuration options associated with either the configuration template or environment.

    ", + "DescribeConfigurationSettingsMessage$ApplicationName": "

    The application for the environment or configuration template.

    ", + "DescribeEnvironmentsMessage$ApplicationName": "

    If specified, AWS Elastic Beanstalk restricts the returned descriptions to include only those that are associated with this application.

    ", + "DescribeEventsMessage$ApplicationName": "

    If specified, AWS Elastic Beanstalk restricts the returned descriptions to include only those associated with this application.

    ", + "EnvironmentDescription$ApplicationName": "

    The name of the application associated with this environment.

    ", + "EventDescription$ApplicationName": "

    The application associated with the event.

    ", + "SourceConfiguration$ApplicationName": "

    The name of the application associated with the configuration.

    ", + "UpdateApplicationMessage$ApplicationName": "

    The name of the application to update. If no such application is found, UpdateApplication returns an InvalidParameterValue error.

    ", + "UpdateApplicationVersionMessage$ApplicationName": "

    The name of the application associated with this version.

    If no application is found with this name, UpdateApplication returns an InvalidParameterValue error.

    ", + "UpdateConfigurationTemplateMessage$ApplicationName": "

    The name of the application associated with the configuration template to update.

    If no application is found with this name, UpdateConfigurationTemplate returns an InvalidParameterValue error.

    ", + "UpdateEnvironmentMessage$ApplicationName": "

    The name of the application with which the environment is associated.

    ", + "ValidateConfigurationSettingsMessage$ApplicationName": "

    The name of the application that the configuration template or environment belongs to.

    " + } + }, + "ApplicationNamesList": { + "base": null, + "refs": { + "DescribeApplicationsMessage$ApplicationNames": "

    If specified, AWS Elastic Beanstalk restricts the returned descriptions to only include those with the specified names.

    " + } + }, + "ApplicationVersionDescription": { + "base": "

    Describes the properties of an application version.

    ", + "refs": { + "ApplicationVersionDescriptionList$member": null, + "ApplicationVersionDescriptionMessage$ApplicationVersion": "

    The ApplicationVersionDescription of the application version.

    " + } + }, + "ApplicationVersionDescriptionList": { + "base": null, + "refs": { + "ApplicationVersionDescriptionsMessage$ApplicationVersions": "

    List of ApplicationVersionDescription objects sorted by order of creation.

    " + } + }, + "ApplicationVersionDescriptionMessage": { + "base": "

    Result message wrapping a single description of an application version.

    ", + "refs": { + } + }, + "ApplicationVersionDescriptionsMessage": { + "base": "

    Result message wrapping a list of application version descriptions.

    ", + "refs": { + } + }, + "ApplicationVersionProccess": { + "base": null, + "refs": { + "CreateApplicationVersionMessage$Process": "

    Preprocesses and validates the environment manifest and configuration files in the source bundle. Validating configuration files can identify issues prior to deploying the application version to an environment.

    " + } + }, + "ApplicationVersionStatus": { + "base": null, + "refs": { + "ApplicationVersionDescription$Status": "

    The processing status of the application version.

    " + } + }, + "ApplyEnvironmentManagedActionRequest": { + "base": "

    Request to execute a scheduled managed action immediately.

    ", + "refs": { + } + }, + "ApplyEnvironmentManagedActionResult": { + "base": "

    The result message containing information about the managed action.

    ", + "refs": { + } + }, + "AutoCreateApplication": { + "base": null, + "refs": { + "CreateApplicationVersionMessage$AutoCreateApplication": "

    Determines how the system behaves if the specified application for this version does not already exist:

    • true : Automatically creates the specified application for this release if it does not already exist.
    • false : Throws an InvalidParameterValue if the specified application for this release does not already exist.

    Default: false

    Valid Values: true | false

    " + } + }, + "AutoScalingGroup": { + "base": "

    Describes an Auto Scaling launch configuration.

    ", + "refs": { + "AutoScalingGroupList$member": null + } + }, + "AutoScalingGroupList": { + "base": null, + "refs": { + "EnvironmentResourceDescription$AutoScalingGroups": "

    The AutoScalingGroups used by this environment.

    " + } + }, + "AvailableSolutionStackDetailsList": { + "base": null, + "refs": { + "ListAvailableSolutionStacksResultMessage$SolutionStackDetails": "

    A list of available solution stacks and their SolutionStackDescription.

    " + } + }, + "AvailableSolutionStackNamesList": { + "base": null, + "refs": { + "ListAvailableSolutionStacksResultMessage$SolutionStacks": "

    A list of available solution stacks.

    " + } + }, + "CPUUtilization": { + "base": "

    Represents CPU utilization information from the specified instance that belongs to the AWS Elastic Beanstalk environment. Use the instanceId property to specify the application instance for which you'd like to return data.

    ", + "refs": { + "SystemStatus$CPUUtilization": null + } + }, + "Cause": { + "base": null, + "refs": { + "Causes$member": null + } + }, + "Causes": { + "base": null, + "refs": { + "DescribeEnvironmentHealthResult$Causes": "

    Returns potential causes for the reported status.

    ", + "SingleInstanceHealth$Causes": "

    Represents the causes, which provide more information about the current health status.

    " + } + }, + "CheckDNSAvailabilityMessage": { + "base": "

    Results message indicating whether a CNAME is available.

    ", + "refs": { + } + }, + "CheckDNSAvailabilityResultMessage": { + "base": "

    Indicates if the specified CNAME is available.

    ", + "refs": { + } + }, + "CnameAvailability": { + "base": null, + "refs": { + "CheckDNSAvailabilityResultMessage$Available": "

    Indicates if the specified CNAME is available:

    • true : The CNAME is available.
    • false : The CNAME is not available.
    " + } + }, + "ComposeEnvironmentsMessage": { + "base": "

    Request to create or update a group of environments.

    ", + "refs": { + } + }, + "ConfigurationDeploymentStatus": { + "base": null, + "refs": { + "ConfigurationSettingsDescription$DeploymentStatus": "

    If this configuration set is associated with an environment, the DeploymentStatus parameter indicates the deployment status of this configuration set:

    • null: This configuration is not associated with a running environment.
    • pending: This is a draft configuration that is not deployed to the associated environment but is in the process of deploying.
    • deployed: This is the configuration that is currently deployed to the associated running environment.
    • failed: This is a draft configuration that failed to successfully deploy.
    " + } + }, + "ConfigurationOptionDefaultValue": { + "base": null, + "refs": { + "ConfigurationOptionDescription$DefaultValue": "

    The default value for this configuration option.

    " + } + }, + "ConfigurationOptionDescription": { + "base": "

    Describes the possible values for a configuration option.

    ", + "refs": { + "ConfigurationOptionDescriptionsList$member": null + } + }, + "ConfigurationOptionDescriptionsList": { + "base": null, + "refs": { + "ConfigurationOptionsDescription$Options": "

    A list of ConfigurationOptionDescription.

    " + } + }, + "ConfigurationOptionName": { + "base": null, + "refs": { + "ConfigurationOptionDescription$Name": "

    The name of the configuration option.

    ", + "ConfigurationOptionSetting$OptionName": "

    The name of the configuration option.

    ", + "OptionSpecification$OptionName": "

    The name of the configuration option.

    ", + "ValidationMessage$OptionName": "

    " + } + }, + "ConfigurationOptionPossibleValue": { + "base": null, + "refs": { + "ConfigurationOptionPossibleValues$member": null + } + }, + "ConfigurationOptionPossibleValues": { + "base": null, + "refs": { + "ConfigurationOptionDescription$ValueOptions": "

    If specified, values for the configuration option are selected from this list.

    " + } + }, + "ConfigurationOptionSetting": { + "base": "

    A specification identifying an individual configuration option along with its current value. For a list of possible option values, go to Option Values in the AWS Elastic Beanstalk Developer Guide.

    ", + "refs": { + "ConfigurationOptionSettingsList$member": null + } + }, + "ConfigurationOptionSettingsList": { + "base": null, + "refs": { + "ConfigurationSettingsDescription$OptionSettings": "

    A list of the configuration options and their values in this configuration set.

    ", + "CreateConfigurationTemplateMessage$OptionSettings": "

    If specified, AWS Elastic Beanstalk sets the specified configuration option to the requested value. The new value overrides the value obtained from the solution stack or the source configuration template.

    ", + "CreateEnvironmentMessage$OptionSettings": "

    If specified, AWS Elastic Beanstalk sets the specified configuration options to the requested value in the configuration set for the new environment. These override the values obtained from the solution stack or the configuration template.

    ", + "UpdateConfigurationTemplateMessage$OptionSettings": "

    A list of configuration option settings to update with the new specified option value.

    ", + "UpdateEnvironmentMessage$OptionSettings": "

    If specified, AWS Elastic Beanstalk updates the configuration set associated with the running environment and sets the specified configuration options to the requested value.

    ", + "ValidateConfigurationSettingsMessage$OptionSettings": "

    A list of the options and desired values to evaluate.

    " + } + }, + "ConfigurationOptionSeverity": { + "base": null, + "refs": { + "ConfigurationOptionDescription$ChangeSeverity": "

    An indication of which action is required if the value for this configuration option changes:

    • NoInterruption : There is no interruption to the environment or application availability.
    • RestartEnvironment : The environment is entirely restarted, all AWS resources are deleted and recreated, and the environment is unavailable during the process.
    • RestartApplicationServer : The environment is available the entire time. However, a short application outage occurs when the application servers on the running Amazon EC2 instances are restarted.
    " + } + }, + "ConfigurationOptionValue": { + "base": null, + "refs": { + "ConfigurationOptionSetting$Value": "

    The current value for the configuration option.

    " + } + }, + "ConfigurationOptionValueType": { + "base": null, + "refs": { + "ConfigurationOptionDescription$ValueType": "

    An indication of which type of values this option has and whether it is allowable to select one or more than one of the possible values:

    • Scalar : Values for this option are a single selection from the possible values, or an unformatted string, or numeric value governed by the MIN/MAX/Regex constraints.
    • List : Values for this option are multiple selections from the possible values.
    • Boolean : Values for this option are either true or false .
    • Json : Values for this option are a JSON representation of a ConfigDocument.
    " + } + }, + "ConfigurationOptionsDescription": { + "base": "

    Describes the settings for a specified configuration set.

    ", + "refs": { + } + }, + "ConfigurationSettingsDescription": { + "base": "

    Describes the settings for a configuration set.

    ", + "refs": { + "ConfigurationSettingsDescriptionList$member": null + } + }, + "ConfigurationSettingsDescriptionList": { + "base": null, + "refs": { + "ConfigurationSettingsDescriptions$ConfigurationSettings": "

    A list of ConfigurationSettingsDescription.

    " + } + }, + "ConfigurationSettingsDescriptions": { + "base": "

    The results from a request to change the configuration settings of an environment.

    ", + "refs": { + } + }, + "ConfigurationSettingsValidationMessages": { + "base": "

    Provides a list of validation messages.

    ", + "refs": { + } + }, + "ConfigurationTemplateName": { + "base": null, + "refs": { + "ConfigurationSettingsDescription$TemplateName": "

    If not null, the name of the configuration template for this configuration set.

    ", + "ConfigurationTemplateNamesList$member": null, + "CreateConfigurationTemplateMessage$TemplateName": "

    The name of the configuration template.

    Constraint: This name must be unique per application.

    Default: If a configuration template already exists with this name, AWS Elastic Beanstalk returns an InvalidParameterValue error.

    ", + "CreateEnvironmentMessage$TemplateName": "

    The name of the configuration template to use in deployment. If no configuration template is found with this name, AWS Elastic Beanstalk returns an InvalidParameterValue error.

    Condition: You must specify either this parameter or a SolutionStackName, but not both. If you specify both, AWS Elastic Beanstalk returns an InvalidParameterCombination error. If you do not specify either, AWS Elastic Beanstalk returns a MissingRequiredParameter error.

    ", + "DeleteConfigurationTemplateMessage$TemplateName": "

    The name of the configuration template to delete.

    ", + "DescribeConfigurationOptionsMessage$TemplateName": "

    The name of the configuration template whose configuration options you want to describe.

    ", + "DescribeConfigurationSettingsMessage$TemplateName": "

    The name of the configuration template to describe.

    Conditional: You must specify either this parameter or an EnvironmentName, but not both. If you specify both, AWS Elastic Beanstalk returns an InvalidParameterCombination error. If you do not specify either, AWS Elastic Beanstalk returns a MissingRequiredParameter error.

    ", + "DescribeEventsMessage$TemplateName": "

    If specified, AWS Elastic Beanstalk restricts the returned descriptions to those that are associated with this environment configuration.

    ", + "EnvironmentDescription$TemplateName": "

    The name of the configuration template used to originally launch this environment.

    ", + "EventDescription$TemplateName": "

    The name of the configuration associated with this event.

    ", + "SourceConfiguration$TemplateName": "

    The name of the configuration template.

    ", + "UpdateConfigurationTemplateMessage$TemplateName": "

    The name of the configuration template to update.

    If no configuration template is found with this name, UpdateConfigurationTemplate returns an InvalidParameterValue error.

    ", + "UpdateEnvironmentMessage$TemplateName": "

    If this parameter is specified, AWS Elastic Beanstalk deploys this configuration template to the environment. If no such configuration template is found, AWS Elastic Beanstalk returns an InvalidParameterValue error.

    ", + "ValidateConfigurationSettingsMessage$TemplateName": "

    The name of the configuration template to validate the settings against.

    Condition: You cannot specify both this and an environment name.

    " + } + }, + "ConfigurationTemplateNamesList": { + "base": null, + "refs": { + "ApplicationDescription$ConfigurationTemplates": "

    The names of the configuration templates associated with this application.

    " + } + }, + "CreateApplicationMessage": { + "base": "

    Request to create an application.

    ", + "refs": { + } + }, + "CreateApplicationVersionMessage": { + "base": "

    ", + "refs": { + } + }, + "CreateConfigurationTemplateMessage": { + "base": "

    Request to create a configuration template.

    ", + "refs": { + } + }, + "CreateEnvironmentMessage": { + "base": "

    ", + "refs": { + } + }, + "CreateStorageLocationResultMessage": { + "base": "

    Results of a CreateStorageLocationResult call.

    ", + "refs": { + } + }, + "CreationDate": { + "base": null, + "refs": { + "ApplicationDescription$DateCreated": "

    The date when the application was created.

    ", + "ApplicationVersionDescription$DateCreated": "

    The creation date of the application version.

    ", + "ConfigurationSettingsDescription$DateCreated": "

    The date (in UTC time) when this configuration set was created.

    ", + "EnvironmentDescription$DateCreated": "

    The creation date for this environment.

    " + } + }, + "DNSCname": { + "base": null, + "refs": { + "CheckDNSAvailabilityResultMessage$FullyQualifiedCNAME": "

    The fully qualified CNAME to reserve when CreateEnvironment is called with the provided prefix.

    ", + "EnvironmentDescription$CNAME": "

    The URL to the CNAME for this environment.

    " + } + }, + "DNSCnamePrefix": { + "base": null, + "refs": { + "CheckDNSAvailabilityMessage$CNAMEPrefix": "

    The prefix used when this CNAME is reserved.

    ", + "CreateEnvironmentMessage$CNAMEPrefix": "

    If specified, the environment attempts to use this value as the prefix for the CNAME. If not specified, the CNAME is generated automatically by appending a random alphanumeric string to the environment name.

    " + } + }, + "DeleteApplicationMessage": { + "base": "

    Request to delete an application.

    ", + "refs": { + } + }, + "DeleteApplicationVersionMessage": { + "base": "

    Request to delete an application version.

    ", + "refs": { + } + }, + "DeleteConfigurationTemplateMessage": { + "base": "

    Request to delete a configuration template.

    ", + "refs": { + } + }, + "DeleteEnvironmentConfigurationMessage": { + "base": "

    Request to delete a draft environment configuration.

    ", + "refs": { + } + }, + "DeleteSourceBundle": { + "base": null, + "refs": { + "DeleteApplicationVersionMessage$DeleteSourceBundle": "

    Indicates whether to delete the associated source bundle from Amazon S3:

    • true: An attempt is made to delete the associated Amazon S3 source bundle specified at time of creation.
    • false: No action is taken on the Amazon S3 source bundle specified at time of creation.

    Valid Values: true | false

    " + } + }, + "Deployment": { + "base": "

    Information about an application version deployment.

    ", + "refs": { + "SingleInstanceHealth$Deployment": "

    Information about the most recent deployment to an instance.

    " + } + }, + "DeploymentTimestamp": { + "base": null, + "refs": { + "Deployment$DeploymentTime": "

    For in-progress deployments, the time that the deloyment started.

    For completed deployments, the time that the deployment ended.

    " + } + }, + "DescribeApplicationVersionsMessage": { + "base": "

    Result message containing a list of configuration descriptions.

    ", + "refs": { + } + }, + "DescribeApplicationsMessage": { + "base": "

    Request to describe one or more applications.

    ", + "refs": { + } + }, + "DescribeConfigurationOptionsMessage": { + "base": "

    Result message containig a list of application version descriptions.

    ", + "refs": { + } + }, + "DescribeConfigurationSettingsMessage": { + "base": "

    Result message containing all of the configuration settings for a specified solution stack or configuration template.

    ", + "refs": { + } + }, + "DescribeEnvironmentHealthRequest": { + "base": "

    See the example below to learn how to create a request body.

    ", + "refs": { + } + }, + "DescribeEnvironmentHealthResult": { + "base": "

    See the example below for a sample response.

    ", + "refs": { + } + }, + "DescribeEnvironmentManagedActionHistoryRequest": { + "base": "

    Request to list completed and failed managed actions.

    ", + "refs": { + } + }, + "DescribeEnvironmentManagedActionHistoryResult": { + "base": "

    A result message containing a list of completed and failed managed actions.

    ", + "refs": { + } + }, + "DescribeEnvironmentManagedActionsRequest": { + "base": "

    Request to list an environment's upcoming and in-progress managed actions.

    ", + "refs": { + } + }, + "DescribeEnvironmentManagedActionsResult": { + "base": "

    The result message containing a list of managed actions.

    ", + "refs": { + } + }, + "DescribeEnvironmentResourcesMessage": { + "base": "

    Request to describe the resources in an environment.

    ", + "refs": { + } + }, + "DescribeEnvironmentsMessage": { + "base": "

    Request to describe one or more environments.

    ", + "refs": { + } + }, + "DescribeEventsMessage": { + "base": "

    Request to retrieve a list of events for an environment.

    ", + "refs": { + } + }, + "DescribeInstancesHealthRequest": { + "base": "

    See the example below to learn how to create a request body.

    ", + "refs": { + } + }, + "DescribeInstancesHealthResult": { + "base": "

    See the example below for a sample response.

    ", + "refs": { + } + }, + "Description": { + "base": null, + "refs": { + "ApplicationDescription$Description": "

    User-defined description of the application.

    ", + "ApplicationVersionDescription$Description": "

    The description of this application version.

    ", + "ConfigurationSettingsDescription$Description": "

    Describes this configuration set.

    ", + "CreateApplicationMessage$Description": "

    Describes the application.

    ", + "CreateApplicationVersionMessage$Description": "

    Describes this version.

    ", + "CreateConfigurationTemplateMessage$Description": "

    Describes this configuration.

    ", + "CreateEnvironmentMessage$Description": "

    Describes this environment.

    ", + "EnvironmentDescription$Description": "

    Describes this environment.

    ", + "UpdateApplicationMessage$Description": "

    A new description for the application.

    Default: If not specified, AWS Elastic Beanstalk does not update the description.

    ", + "UpdateApplicationVersionMessage$Description": "

    A new description for this release.

    ", + "UpdateConfigurationTemplateMessage$Description": "

    A new description for the configuration.

    ", + "UpdateEnvironmentMessage$Description": "

    If this parameter is specified, AWS Elastic Beanstalk updates the description of this environment.

    " + } + }, + "Ec2InstanceId": { + "base": null, + "refs": { + "EnvironmentInfoDescription$Ec2InstanceId": "

    The Amazon EC2 Instance ID for this information.

    " + } + }, + "ElasticBeanstalkServiceException": { + "base": "

    A generic service exception has occurred.

    ", + "refs": { + } + }, + "EndpointURL": { + "base": null, + "refs": { + "EnvironmentDescription$EndpointURL": "

    For load-balanced, autoscaling environments, the URL to the LoadBalancer. For single-instance environments, the IP address of the instance.

    " + } + }, + "EnvironmentDescription": { + "base": "

    Describes the properties of an environment.

    ", + "refs": { + "EnvironmentDescriptionsList$member": null + } + }, + "EnvironmentDescriptionsList": { + "base": null, + "refs": { + "EnvironmentDescriptionsMessage$Environments": "

    Returns an EnvironmentDescription list.

    " + } + }, + "EnvironmentDescriptionsMessage": { + "base": "

    Result message containing a list of environment descriptions.

    ", + "refs": { + } + }, + "EnvironmentHealth": { + "base": null, + "refs": { + "DescribeEnvironmentHealthResult$Status": "

    Returns the health status value of the environment. For more information, see Health Colors and Statuses.

    ", + "EnvironmentDescription$Health": "

    Describes the health status of the environment. AWS Elastic Beanstalk indicates the failure levels for a running environment:

    • Red: Indicates the environment is not responsive. Occurs when three or more consecutive failures occur for an environment.
    • Yellow: Indicates that something is wrong. Occurs when two consecutive failures occur for an environment.
    • Green: Indicates the environment is healthy and fully functional.
    • Grey: Default health for a new environment. The environment is not fully launched and health checks have not started or health checks are suspended during an UpdateEnvironment or RestartEnvironement request.

    Default: Grey

    " + } + }, + "EnvironmentHealthAttribute": { + "base": null, + "refs": { + "EnvironmentHealthAttributes$member": null + } + }, + "EnvironmentHealthAttributes": { + "base": null, + "refs": { + "DescribeEnvironmentHealthRequest$AttributeNames": "

    Specifies the response elements you wish to receive. If no attribute names are specified, AWS Elastic Beanstalk only returns the name of the environment.

    " + } + }, + "EnvironmentHealthStatus": { + "base": null, + "refs": { + "EnvironmentDescription$HealthStatus": "

    Returns the health status of the application running in your environment. For more information, see Health Colors and Statuses.

    " + } + }, + "EnvironmentId": { + "base": null, + "refs": { + "AbortEnvironmentUpdateMessage$EnvironmentId": "

    This specifies the ID of the environment with the in-progress update that you want to cancel.

    ", + "CreateConfigurationTemplateMessage$EnvironmentId": "

    The ID of the environment used with this configuration template.

    ", + "DescribeEnvironmentHealthRequest$EnvironmentId": "

    Specifies the AWS Elastic Beanstalk environment ID.

    Condition: You must specify either this or an EnvironmentName, or both. If you do not specify either, AWS Elastic Beanstalk returns MissingRequiredParameter error.

    ", + "DescribeEnvironmentManagedActionHistoryRequest$EnvironmentId": "

    The environment ID of the target environment.

    ", + "DescribeEnvironmentResourcesMessage$EnvironmentId": "

    The ID of the environment to retrieve AWS resource usage data.

    Condition: You must specify either this or an EnvironmentName, or both. If you do not specify either, AWS Elastic Beanstalk returns MissingRequiredParameter error.

    ", + "DescribeEventsMessage$EnvironmentId": "

    If specified, AWS Elastic Beanstalk restricts the returned descriptions to those associated with this environment.

    ", + "DescribeInstancesHealthRequest$EnvironmentId": "

    Specifies the AWS Elastic Beanstalk environment ID.

    ", + "EnvironmentDescription$EnvironmentId": "

    The ID of this environment.

    ", + "EnvironmentIdList$member": null, + "RebuildEnvironmentMessage$EnvironmentId": "

    The ID of the environment to rebuild.

    Condition: You must specify either this or an EnvironmentName, or both. If you do not specify either, AWS Elastic Beanstalk returns MissingRequiredParameter error.

    ", + "RequestEnvironmentInfoMessage$EnvironmentId": "

    The ID of the environment of the requested data.

    If no such environment is found, RequestEnvironmentInfo returns an InvalidParameterValue error.

    Condition: You must specify either this or an EnvironmentName, or both. If you do not specify either, AWS Elastic Beanstalk returns MissingRequiredParameter error.

    ", + "RestartAppServerMessage$EnvironmentId": "

    The ID of the environment to restart the server for.

    Condition: You must specify either this or an EnvironmentName, or both. If you do not specify either, AWS Elastic Beanstalk returns MissingRequiredParameter error.

    ", + "RetrieveEnvironmentInfoMessage$EnvironmentId": "

    The ID of the data's environment.

    If no such environment is found, returns an InvalidParameterValue error.

    Condition: You must specify either this or an EnvironmentName, or both. If you do not specify either, AWS Elastic Beanstalk returns MissingRequiredParameter error.

    ", + "SwapEnvironmentCNAMEsMessage$SourceEnvironmentId": "

    The ID of the source environment.

    Condition: You must specify at least the SourceEnvironmentID or the SourceEnvironmentName. You may also specify both. If you specify the SourceEnvironmentId, you must specify the DestinationEnvironmentId.

    ", + "SwapEnvironmentCNAMEsMessage$DestinationEnvironmentId": "

    The ID of the destination environment.

    Condition: You must specify at least the DestinationEnvironmentID or the DestinationEnvironmentName. You may also specify both. You must specify the SourceEnvironmentId with the DestinationEnvironmentId.

    ", + "TerminateEnvironmentMessage$EnvironmentId": "

    The ID of the environment to terminate.

    Condition: You must specify either this or an EnvironmentName, or both. If you do not specify either, AWS Elastic Beanstalk returns MissingRequiredParameter error.

    ", + "UpdateEnvironmentMessage$EnvironmentId": "

    The ID of the environment to update.

    If no environment with this ID exists, AWS Elastic Beanstalk returns an InvalidParameterValue error.

    Condition: You must specify either this or an EnvironmentName, or both. If you do not specify either, AWS Elastic Beanstalk returns MissingRequiredParameter error.

    " + } + }, + "EnvironmentIdList": { + "base": null, + "refs": { + "DescribeEnvironmentsMessage$EnvironmentIds": "

    If specified, AWS Elastic Beanstalk restricts the returned descriptions to include only those that have the specified IDs.

    " + } + }, + "EnvironmentInfoDescription": { + "base": "

    The information retrieved from the Amazon EC2 instances.

    ", + "refs": { + "EnvironmentInfoDescriptionList$member": null + } + }, + "EnvironmentInfoDescriptionList": { + "base": null, + "refs": { + "RetrieveEnvironmentInfoResultMessage$EnvironmentInfo": "

    The EnvironmentInfoDescription of the environment.

    " + } + }, + "EnvironmentInfoType": { + "base": null, + "refs": { + "EnvironmentInfoDescription$InfoType": "

    The type of information retrieved.

    ", + "RequestEnvironmentInfoMessage$InfoType": "

    The type of information to request.

    ", + "RetrieveEnvironmentInfoMessage$InfoType": "

    The type of information to retrieve.

    " + } + }, + "EnvironmentLink": { + "base": "

    A link to another environment, defined in the environment's manifest. Links provide connection information in system properties that can be used to connect to another environment in the same group. See Environment Manifest (env.yaml) for details.

    ", + "refs": { + "EnvironmentLinks$member": null + } + }, + "EnvironmentLinks": { + "base": null, + "refs": { + "EnvironmentDescription$EnvironmentLinks": "

    A list of links to other environments in the same group.

    " + } + }, + "EnvironmentName": { + "base": null, + "refs": { + "AbortEnvironmentUpdateMessage$EnvironmentName": "

    This specifies the name of the environment with the in-progress update that you want to cancel.

    ", + "ConfigurationSettingsDescription$EnvironmentName": "

    If not null, the name of the environment for this configuration set.

    ", + "CreateEnvironmentMessage$EnvironmentName": "

    A unique name for the deployment environment. Used in the application URL.

    Constraint: Must be from 4 to 40 characters in length. The name can contain only letters, numbers, and hyphens. It cannot start or end with a hyphen. This name must be unique in your account. If the specified name already exists, AWS Elastic Beanstalk returns an InvalidParameterValue error.

    Default: If the CNAME parameter is not specified, the environment name becomes part of the CNAME, and therefore part of the visible URL for your application.

    ", + "DeleteEnvironmentConfigurationMessage$EnvironmentName": "

    The name of the environment to delete the draft configuration from.

    ", + "DescribeConfigurationOptionsMessage$EnvironmentName": "

    The name of the environment whose configuration options you want to describe.

    ", + "DescribeConfigurationSettingsMessage$EnvironmentName": "

    The name of the environment to describe.

    Condition: You must specify either this or a TemplateName, but not both. If you specify both, AWS Elastic Beanstalk returns an InvalidParameterCombination error. If you do not specify either, AWS Elastic Beanstalk returns MissingRequiredParameter error.

    ", + "DescribeEnvironmentHealthRequest$EnvironmentName": "

    Specifies the AWS Elastic Beanstalk environment name.

    Condition: You must specify either this or an EnvironmentId, or both. If you do not specify either, AWS Elastic Beanstalk returns MissingRequiredParameter error.

    ", + "DescribeEnvironmentHealthResult$EnvironmentName": "

    The AWS Elastic Beanstalk environment name.

    ", + "DescribeEnvironmentManagedActionHistoryRequest$EnvironmentName": "

    The name of the target environment.

    ", + "DescribeEnvironmentResourcesMessage$EnvironmentName": "

    The name of the environment to retrieve AWS resource usage data.

    Condition: You must specify either this or an EnvironmentId, or both. If you do not specify either, AWS Elastic Beanstalk returns MissingRequiredParameter error.

    ", + "DescribeEventsMessage$EnvironmentName": "

    If specified, AWS Elastic Beanstalk restricts the returned descriptions to those associated with this environment.

    ", + "DescribeInstancesHealthRequest$EnvironmentName": "

    Specifies the AWS Elastic Beanstalk environment name.

    ", + "EnvironmentDescription$EnvironmentName": "

    The name of this environment.

    ", + "EnvironmentNamesList$member": null, + "EnvironmentResourceDescription$EnvironmentName": "

    The name of the environment.

    ", + "EventDescription$EnvironmentName": "

    The name of the environment associated with this event.

    ", + "RebuildEnvironmentMessage$EnvironmentName": "

    The name of the environment to rebuild.

    Condition: You must specify either this or an EnvironmentId, or both. If you do not specify either, AWS Elastic Beanstalk returns MissingRequiredParameter error.

    ", + "RequestEnvironmentInfoMessage$EnvironmentName": "

    The name of the environment of the requested data.

    If no such environment is found, RequestEnvironmentInfo returns an InvalidParameterValue error.

    Condition: You must specify either this or an EnvironmentId, or both. If you do not specify either, AWS Elastic Beanstalk returns MissingRequiredParameter error.

    ", + "RestartAppServerMessage$EnvironmentName": "

    The name of the environment to restart the server for.

    Condition: You must specify either this or an EnvironmentId, or both. If you do not specify either, AWS Elastic Beanstalk returns MissingRequiredParameter error.

    ", + "RetrieveEnvironmentInfoMessage$EnvironmentName": "

    The name of the data's environment.

    If no such environment is found, returns an InvalidParameterValue error.

    Condition: You must specify either this or an EnvironmentId, or both. If you do not specify either, AWS Elastic Beanstalk returns MissingRequiredParameter error.

    ", + "SwapEnvironmentCNAMEsMessage$SourceEnvironmentName": "

    The name of the source environment.

    Condition: You must specify at least the SourceEnvironmentID or the SourceEnvironmentName. You may also specify both. If you specify the SourceEnvironmentName, you must specify the DestinationEnvironmentName.

    ", + "SwapEnvironmentCNAMEsMessage$DestinationEnvironmentName": "

    The name of the destination environment.

    Condition: You must specify at least the DestinationEnvironmentID or the DestinationEnvironmentName. You may also specify both. You must specify the SourceEnvironmentName with the DestinationEnvironmentName.

    ", + "TerminateEnvironmentMessage$EnvironmentName": "

    The name of the environment to terminate.

    Condition: You must specify either this or an EnvironmentId, or both. If you do not specify either, AWS Elastic Beanstalk returns MissingRequiredParameter error.

    ", + "UpdateEnvironmentMessage$EnvironmentName": "

    The name of the environment to update. If no environment with this name exists, AWS Elastic Beanstalk returns an InvalidParameterValue error.

    Condition: You must specify either this or an EnvironmentId, or both. If you do not specify either, AWS Elastic Beanstalk returns MissingRequiredParameter error.

    ", + "ValidateConfigurationSettingsMessage$EnvironmentName": "

    The name of the environment to validate the settings against.

    Condition: You cannot specify both this and a configuration template name.

    " + } + }, + "EnvironmentNamesList": { + "base": null, + "refs": { + "DescribeEnvironmentsMessage$EnvironmentNames": "

    If specified, AWS Elastic Beanstalk restricts the returned descriptions to include only those that have the specified names.

    " + } + }, + "EnvironmentResourceDescription": { + "base": "

    Describes the AWS resources in use by this environment. This data is live.

    ", + "refs": { + "EnvironmentResourceDescriptionsMessage$EnvironmentResources": "

    A list of EnvironmentResourceDescription.

    " + } + }, + "EnvironmentResourceDescriptionsMessage": { + "base": "

    Result message containing a list of environment resource descriptions.

    ", + "refs": { + } + }, + "EnvironmentResourcesDescription": { + "base": "

    Describes the AWS resources in use by this environment. This data is not live data.

    ", + "refs": { + "EnvironmentDescription$Resources": "

    The description of the AWS resources used by this environment.

    " + } + }, + "EnvironmentStatus": { + "base": null, + "refs": { + "EnvironmentDescription$Status": "

    The current operational status of the environment:

    • Launching: Environment is in the process of initial deployment.
    • Updating: Environment is in the process of updating its configuration settings or application version.
    • Ready: Environment is available to have an action performed on it, such as update or terminate.
    • Terminating: Environment is in the shut-down process.
    • Terminated: Environment is not running.
    " + } + }, + "EnvironmentTier": { + "base": "

    Describes the properties of an environment tier

    ", + "refs": { + "CreateEnvironmentMessage$Tier": "

    This specifies the tier to use for creating this environment.

    ", + "EnvironmentDescription$Tier": "

    Describes the current tier of this environment.

    ", + "UpdateEnvironmentMessage$Tier": "

    This specifies the tier to use to update the environment.

    Condition: At this time, if you change the tier version, name, or type, AWS Elastic Beanstalk returns InvalidParameterValue error.

    " + } + }, + "EventDate": { + "base": null, + "refs": { + "EventDescription$EventDate": "

    The date when the event occurred.

    " + } + }, + "EventDescription": { + "base": "

    Describes an event.

    ", + "refs": { + "EventDescriptionList$member": null + } + }, + "EventDescriptionList": { + "base": null, + "refs": { + "EventDescriptionsMessage$Events": "

    A list of EventDescription.

    " + } + }, + "EventDescriptionsMessage": { + "base": "

    Result message wrapping a list of event descriptions.

    ", + "refs": { + } + }, + "EventMessage": { + "base": null, + "refs": { + "EventDescription$Message": "

    The event message.

    " + } + }, + "EventSeverity": { + "base": null, + "refs": { + "DescribeEventsMessage$Severity": "

    If specified, limits the events returned from this call to include only those with the specified severity or higher.

    ", + "EventDescription$Severity": "

    The severity level of this event.

    " + } + }, + "ExceptionMessage": { + "base": null, + "refs": { + "ElasticBeanstalkServiceException$message": "

    The exception error message.

    " + } + }, + "FailureType": { + "base": null, + "refs": { + "ManagedActionHistoryItem$FailureType": "

    If the action failed, the type of failure.

    " + } + }, + "FileTypeExtension": { + "base": null, + "refs": { + "SolutionStackFileTypeList$member": null + } + }, + "ForceTerminate": { + "base": null, + "refs": { + "TerminateEnvironmentMessage$ForceTerminate": "

    Terminates the target environment even if another environment in the same group is dependent on it.

    " + } + }, + "GroupName": { + "base": null, + "refs": { + "ComposeEnvironmentsMessage$GroupName": "

    The name of the group to which the target environments belong. Specify a group name only if the environment name defined in each target environment's manifest ends with a + (plus) character. See Environment Manifest (env.yaml) for details.

    ", + "CreateEnvironmentMessage$GroupName": "

    The name of the group to which the target environment belongs. Specify a group name only if the environment's name is specified in an environment manifest and not with the environment name parameter. See Environment Manifest (env.yaml) for details.

    ", + "UpdateEnvironmentMessage$GroupName": "

    The name of the group to which the target environment belongs. Specify a group name only if the environment's name is specified in an environment manifest and not with the environment name or environment ID parameters. See Environment Manifest (env.yaml) for details.

    " + } + }, + "IncludeDeleted": { + "base": null, + "refs": { + "DescribeEnvironmentsMessage$IncludeDeleted": "

    Indicates whether to include deleted environments:

    true: Environments that have been deleted after IncludedDeletedBackTo are displayed.

    false: Do not include deleted environments.

    " + } + }, + "IncludeDeletedBackTo": { + "base": null, + "refs": { + "DescribeEnvironmentsMessage$IncludedDeletedBackTo": "

    If specified when IncludeDeleted is set to true, then environments deleted after this date are displayed.

    " + } + }, + "Instance": { + "base": "

    The description of an Amazon EC2 instance.

    ", + "refs": { + "InstanceList$member": null + } + }, + "InstanceHealthList": { + "base": null, + "refs": { + "DescribeInstancesHealthResult$InstanceHealthList": "

    Contains the response body with information about the health of the instance.

    " + } + }, + "InstanceHealthSummary": { + "base": "

    Represents summary information about the health of an instance. For more information, see Health Colors and Statuses.

    ", + "refs": { + "DescribeEnvironmentHealthResult$InstancesHealth": null + } + }, + "InstanceId": { + "base": null, + "refs": { + "SingleInstanceHealth$InstanceId": "

    The ID of the Amazon EC2 instance.

    " + } + }, + "InstanceList": { + "base": null, + "refs": { + "EnvironmentResourceDescription$Instances": "

    The Amazon EC2 instances used by this environment.

    " + } + }, + "InstancesHealthAttribute": { + "base": null, + "refs": { + "InstancesHealthAttributes$member": null + } + }, + "InstancesHealthAttributes": { + "base": null, + "refs": { + "DescribeInstancesHealthRequest$AttributeNames": "

    Specifies the response elements you wish to receive. If no attribute names are specified, AWS Elastic Beanstalk only returns a list of instances.

    " + } + }, + "InsufficientPrivilegesException": { + "base": "

    The specified account does not have sufficient privileges for one of more AWS services.

    ", + "refs": { + } + }, + "Integer": { + "base": null, + "refs": { + "DescribeEnvironmentManagedActionHistoryRequest$MaxItems": "

    The maximum number of items to return for a single request.

    ", + "Listener$Port": "

    The port that is used by the Listener.

    " + } + }, + "InvalidRequestException": { + "base": "

    One or more input parameters is not valid. Please correct the input parameters and try the operation again.

    ", + "refs": { + } + }, + "Latency": { + "base": "

    Represents the average latency for the slowest X percent of requests over the last 10 seconds.

    ", + "refs": { + "ApplicationMetrics$Latency": "

    Represents the average latency for the slowest X percent of requests over the last 10 seconds. Latencies are in seconds with one milisecond resolution.

    " + } + }, + "LaunchConfiguration": { + "base": "

    Describes an Auto Scaling launch configuration.

    ", + "refs": { + "LaunchConfigurationList$member": null + } + }, + "LaunchConfigurationList": { + "base": null, + "refs": { + "EnvironmentResourceDescription$LaunchConfigurations": "

    The Auto Scaling launch configurations in use by this environment.

    " + } + }, + "LaunchedAt": { + "base": null, + "refs": { + "SingleInstanceHealth$LaunchedAt": "

    The time at which the EC2 instance was launched.

    " + } + }, + "ListAvailableSolutionStacksResultMessage": { + "base": "

    A list of available AWS Elastic Beanstalk solution stacks.

    ", + "refs": { + } + }, + "Listener": { + "base": "

    Describes the properties of a Listener for the LoadBalancer.

    ", + "refs": { + "LoadBalancerListenersDescription$member": null + } + }, + "LoadAverage": { + "base": null, + "refs": { + "SystemStatus$LoadAverage": "

    Load average in the last 1-minute and 5-minute periods. For more information, see Operating System Metrics.

    " + } + }, + "LoadAverageValue": { + "base": null, + "refs": { + "LoadAverage$member": null + } + }, + "LoadBalancer": { + "base": "

    Describes a LoadBalancer.

    ", + "refs": { + "LoadBalancerList$member": null + } + }, + "LoadBalancerDescription": { + "base": "

    Describes the details of a LoadBalancer.

    ", + "refs": { + "EnvironmentResourcesDescription$LoadBalancer": "

    Describes the LoadBalancer.

    " + } + }, + "LoadBalancerList": { + "base": null, + "refs": { + "EnvironmentResourceDescription$LoadBalancers": "

    The LoadBalancers in use by this environment.

    " + } + }, + "LoadBalancerListenersDescription": { + "base": null, + "refs": { + "LoadBalancerDescription$Listeners": "

    A list of Listeners used by the LoadBalancer.

    " + } + }, + "ManagedAction": { + "base": "

    The record of an upcoming or in-progress managed action.

    ", + "refs": { + "ManagedActions$member": null + } + }, + "ManagedActionHistoryItem": { + "base": "

    The record of a completed or failed managed action.

    ", + "refs": { + "ManagedActionHistoryItems$member": null + } + }, + "ManagedActionHistoryItems": { + "base": null, + "refs": { + "DescribeEnvironmentManagedActionHistoryResult$ManagedActionHistoryItems": "

    A list of completed and failed managed actions.

    " + } + }, + "ManagedActionInvalidStateException": { + "base": "

    Cannot modify the managed action in its current state.

    ", + "refs": { + } + }, + "ManagedActions": { + "base": null, + "refs": { + "DescribeEnvironmentManagedActionsResult$ManagedActions": "

    A list of upcoming and in-progress managed actions.

    " + } + }, + "MaxRecords": { + "base": null, + "refs": { + "DescribeEventsMessage$MaxRecords": "

    Specifies the maximum number of events that can be returned, beginning with the most recent event.

    " + } + }, + "Message": { + "base": null, + "refs": { + "EnvironmentInfoDescription$Message": "

    The retrieved information.

    " + } + }, + "NextToken": { + "base": null, + "refs": { + "DescribeInstancesHealthRequest$NextToken": "

    Specifies the next token of the request.

    ", + "DescribeInstancesHealthResult$NextToken": "

    The next token.

    " + } + }, + "NullableDouble": { + "base": null, + "refs": { + "CPUUtilization$User": "

    Percentage of time that the CPU has spent in the User state over the last 10 seconds.

    ", + "CPUUtilization$Nice": "

    Percentage of time that the CPU has spent in the Nice state over the last 10 seconds.

    ", + "CPUUtilization$System": "

    Percentage of time that the CPU has spent in the System state over the last 10 seconds.

    ", + "CPUUtilization$Idle": "

    Percentage of time that the CPU has spent in the Idle state over the last 10 seconds.

    ", + "CPUUtilization$IOWait": "

    Percentage of time that the CPU has spent in the I/O Wait state over the last 10 seconds.

    ", + "CPUUtilization$IRQ": "

    Percentage of time that the CPU has spent in the IRQ state over the last 10 seconds.

    ", + "CPUUtilization$SoftIRQ": "

    Percentage of time that the CPU has spent in the SoftIRQ state over the last 10 seconds.

    ", + "Latency$P999": "

    The average latency for the slowest 0.1 percent of requests over the last 10 seconds.

    ", + "Latency$P99": "

    The average latency for the slowest 1 percent of requests over the last 10 seconds.

    ", + "Latency$P95": "

    The average latency for the slowest 5 percent of requests over the last 10 seconds.

    ", + "Latency$P90": "

    The average latency for the slowest 10 percent of requests over the last 10 seconds.

    ", + "Latency$P85": "

    The average latency for the slowest 15 percent of requests over the last 10 seconds.

    ", + "Latency$P75": "

    The average latency for the slowest 25 percent of requests over the last 10 seconds.

    ", + "Latency$P50": "

    The average latency for the slowest 50 percent of requests over the last 10 seconds.

    ", + "Latency$P10": "

    The average latency for the slowest 90 percent of requests over the last 10 seconds.

    " + } + }, + "NullableInteger": { + "base": null, + "refs": { + "ApplicationMetrics$Duration": "

    The amount of time that the metrics cover (usually 10 seconds). For example, you might have 5 requests (request_count) within the most recent time slice of 10 seconds (duration).

    ", + "InstanceHealthSummary$NoData": "

    Grey. AWS Elastic Beanstalk and the health agent are reporting no data on an instance.

    ", + "InstanceHealthSummary$Unknown": "

    Grey. AWS Elastic Beanstalk and the health agent are reporting an insufficient amount of data on an instance.

    ", + "InstanceHealthSummary$Pending": "

    Grey. An operation is in progress on an instance within the command timeout.

    ", + "InstanceHealthSummary$Ok": "

    Green. An instance is passing health checks and the health agent is not reporting any problems.

    ", + "InstanceHealthSummary$Info": "

    Green. An operation is in progress on an instance.

    ", + "InstanceHealthSummary$Warning": "

    Yellow. The health agent is reporting a moderate number of request failures or other issues for an instance or environment.

    ", + "InstanceHealthSummary$Degraded": "

    Red. The health agent is reporting a high number of request failures or other issues for an instance or environment.

    ", + "InstanceHealthSummary$Severe": "

    Red. The health agent is reporting a very high number of request failures or other issues for an instance or environment.

    ", + "StatusCodes$Status2xx": "

    The percentage of requests over the last 10 seconds that resulted in a 2xx (200, 201, etc.) status code.

    ", + "StatusCodes$Status3xx": "

    The percentage of requests over the last 10 seconds that resulted in a 3xx (300, 301, etc.) status code.

    ", + "StatusCodes$Status4xx": "

    The percentage of requests over the last 10 seconds that resulted in a 4xx (400, 401, etc.) status code.

    ", + "StatusCodes$Status5xx": "

    The percentage of requests over the last 10 seconds that resulted in a 5xx (500, 501, etc.) status code.

    " + } + }, + "NullableLong": { + "base": null, + "refs": { + "Deployment$DeploymentId": "

    The ID of the deployment. This number increases by one each time that you deploy source code or change instance configuration settings.

    " + } + }, + "OperationInProgressException": { + "base": "

    Unable to perform the specified operation because another operation that effects an element in this activity is already in progress.

    ", + "refs": { + } + }, + "OptionNamespace": { + "base": null, + "refs": { + "ConfigurationOptionDescription$Namespace": "

    A unique namespace identifying the option's associated AWS resource.

    ", + "ConfigurationOptionSetting$Namespace": "

    A unique namespace identifying the option's associated AWS resource.

    ", + "OptionSpecification$Namespace": "

    A unique namespace identifying the option's associated AWS resource.

    ", + "ValidationMessage$Namespace": "

    " + } + }, + "OptionRestrictionMaxLength": { + "base": null, + "refs": { + "ConfigurationOptionDescription$MaxLength": "

    If specified, the configuration option must be a string value no longer than this value.

    " + } + }, + "OptionRestrictionMaxValue": { + "base": null, + "refs": { + "ConfigurationOptionDescription$MaxValue": "

    If specified, the configuration option must be a numeric value less than this value.

    " + } + }, + "OptionRestrictionMinValue": { + "base": null, + "refs": { + "ConfigurationOptionDescription$MinValue": "

    If specified, the configuration option must be a numeric value greater than this value.

    " + } + }, + "OptionRestrictionRegex": { + "base": "

    A regular expression representing a restriction on a string configuration option value.

    ", + "refs": { + "ConfigurationOptionDescription$Regex": "

    If specified, the configuration option must be a string value that satisfies this regular expression.

    " + } + }, + "OptionSpecification": { + "base": "

    A specification identifying an individual configuration option.

    ", + "refs": { + "OptionsSpecifierList$member": null + } + }, + "OptionsSpecifierList": { + "base": null, + "refs": { + "CreateEnvironmentMessage$OptionsToRemove": "

    A list of custom user-defined configuration options to remove from the configuration set for this new environment.

    ", + "DescribeConfigurationOptionsMessage$Options": "

    If specified, restricts the descriptions to only the specified options.

    ", + "UpdateConfigurationTemplateMessage$OptionsToRemove": "

    A list of configuration options to remove from the configuration set.

    Constraint: You can remove only UserDefined configuration options.

    ", + "UpdateEnvironmentMessage$OptionsToRemove": "

    A list of custom user-defined configuration options to remove from the configuration set for this environment.

    " + } + }, + "Queue": { + "base": "

    Describes a queue.

    ", + "refs": { + "QueueList$member": null + } + }, + "QueueList": { + "base": null, + "refs": { + "EnvironmentResourceDescription$Queues": "

    The queues used by this environment.

    " + } + }, + "RebuildEnvironmentMessage": { + "base": "

    ", + "refs": { + } + }, + "RefreshedAt": { + "base": null, + "refs": { + "DescribeEnvironmentHealthResult$RefreshedAt": "

    The date and time the information was last refreshed.

    ", + "DescribeInstancesHealthResult$RefreshedAt": "

    The date and time the information was last refreshed.

    " + } + }, + "RegexLabel": { + "base": null, + "refs": { + "OptionRestrictionRegex$Label": "

    A unique name representing this regular expression.

    " + } + }, + "RegexPattern": { + "base": null, + "refs": { + "OptionRestrictionRegex$Pattern": "

    The regular expression pattern that a string configuration option value with this restriction must match.

    " + } + }, + "RequestCount": { + "base": null, + "refs": { + "ApplicationMetrics$RequestCount": "

    Average number of requests handled by the web server per second over the last 10 seconds.

    " + } + }, + "RequestEnvironmentInfoMessage": { + "base": "

    Request to retrieve logs from an environment and store them in your Elastic Beanstalk storage bucket.

    ", + "refs": { + } + }, + "RequestId": { + "base": null, + "refs": { + "DescribeEventsMessage$RequestId": "

    If specified, AWS Elastic Beanstalk restricts the described events to include only those associated with this request ID.

    ", + "EventDescription$RequestId": "

    The web service request ID for the activity of this event.

    " + } + }, + "ResourceId": { + "base": null, + "refs": { + "AutoScalingGroup$Name": "

    The name of the AutoScalingGroup .

    ", + "Instance$Id": "

    The ID of the Amazon EC2 instance.

    ", + "LaunchConfiguration$Name": "

    The name of the launch configuration.

    ", + "LoadBalancer$Name": "

    The name of the LoadBalancer.

    ", + "Trigger$Name": "

    The name of the trigger.

    " + } + }, + "ResourceName": { + "base": null, + "refs": { + "ConfigurationOptionSetting$ResourceName": "

    A unique resource name for a time-based scaling configuration option.

    ", + "OptionSpecification$ResourceName": "

    A unique resource name for a time-based scaling configuration option.

    " + } + }, + "RestartAppServerMessage": { + "base": "

    ", + "refs": { + } + }, + "RetrieveEnvironmentInfoMessage": { + "base": "

    Request to download logs retrieved with RequestEnvironmentInfo.

    ", + "refs": { + } + }, + "RetrieveEnvironmentInfoResultMessage": { + "base": "

    Result message containing a description of the requested environment info.

    ", + "refs": { + } + }, + "S3Bucket": { + "base": null, + "refs": { + "CreateStorageLocationResultMessage$S3Bucket": "

    The name of the Amazon S3 bucket created.

    ", + "S3Location$S3Bucket": "

    The Amazon S3 bucket where the data is located.

    " + } + }, + "S3Key": { + "base": null, + "refs": { + "S3Location$S3Key": "

    The Amazon S3 key where the data is located.

    " + } + }, + "S3Location": { + "base": "

    A specification of a location in Amazon S3.

    ", + "refs": { + "ApplicationVersionDescription$SourceBundle": "

    The location where the source bundle is located for this version.

    ", + "CreateApplicationVersionMessage$SourceBundle": "

    The Amazon S3 bucket and key that identify the location of the source bundle for this version.

    If data found at the Amazon S3 location exceeds the maximum allowed source bundle size, AWS Elastic Beanstalk returns an InvalidParameterValue error. The maximum size allowed is 512 MB.

    Default: If not specified, AWS Elastic Beanstalk uses a sample application. If only partially specified (for example, a bucket is provided but not the key) or if no data is found at the Amazon S3 location, AWS Elastic Beanstalk returns an InvalidParameterCombination error.

    " + } + }, + "S3LocationNotInServiceRegionException": { + "base": "

    The specified S3 bucket does not belong to the S3 region in which the service is running.

    ", + "refs": { + } + }, + "S3SubscriptionRequiredException": { + "base": "

    The specified account does not have a subscription to Amazon S3.

    ", + "refs": { + } + }, + "SampleTimestamp": { + "base": null, + "refs": { + "EnvironmentInfoDescription$SampleTimestamp": "

    The time stamp when this information was retrieved.

    " + } + }, + "SingleInstanceHealth": { + "base": "

    Represents health information from the specified instance that belongs to the AWS Elastic Beanstalk environment. Use the InstanceId property to specify the application instance for which you'd like to return data.

    ", + "refs": { + "InstanceHealthList$member": null + } + }, + "SolutionStackDescription": { + "base": "

    Describes the solution stack.

    ", + "refs": { + "AvailableSolutionStackDetailsList$member": null + } + }, + "SolutionStackFileTypeList": { + "base": null, + "refs": { + "SolutionStackDescription$PermittedFileTypes": "

    The permitted file types allowed for a solution stack.

    " + } + }, + "SolutionStackName": { + "base": null, + "refs": { + "AvailableSolutionStackNamesList$member": null, + "ConfigurationOptionsDescription$SolutionStackName": "

    The name of the solution stack these configuration options belong to.

    ", + "ConfigurationSettingsDescription$SolutionStackName": "

    The name of the solution stack this configuration set uses.

    ", + "CreateConfigurationTemplateMessage$SolutionStackName": "

    The name of the solution stack used by this configuration. The solution stack specifies the operating system, architecture, and application server for a configuration template. It determines the set of configuration options as well as the possible and default values.

    Use ListAvailableSolutionStacks to obtain a list of available solution stacks.

    A solution stack name or a source configuration parameter must be specified, otherwise AWS Elastic Beanstalk returns an InvalidParameterValue error.

    If a solution stack name is not specified and the source configuration parameter is specified, AWS Elastic Beanstalk uses the same solution stack as the source configuration template.

    ", + "CreateEnvironmentMessage$SolutionStackName": "

    This is an alternative to specifying a template name. If specified, AWS Elastic Beanstalk sets the configuration values to the default values associated with the specified solution stack.

    Condition: You must specify either this or a TemplateName, but not both. If you specify both, AWS Elastic Beanstalk returns an InvalidParameterCombination error. If you do not specify either, AWS Elastic Beanstalk returns a MissingRequiredParameter error.

    ", + "DescribeConfigurationOptionsMessage$SolutionStackName": "

    The name of the solution stack whose configuration options you want to describe.

    ", + "EnvironmentDescription$SolutionStackName": "

    The name of the SolutionStack deployed with this environment.

    ", + "SolutionStackDescription$SolutionStackName": "

    The name of the solution stack.

    ", + "UpdateEnvironmentMessage$SolutionStackName": "

    This specifies the platform version that the environment will run after the environment is updated.

    " + } + }, + "SourceBundleDeletionException": { + "base": "

    Unable to delete the Amazon S3 source bundle associated with the application version. The application version was deleted successfully.

    ", + "refs": { + } + }, + "SourceConfiguration": { + "base": "

    A specification for an environment configuration

    ", + "refs": { + "CreateConfigurationTemplateMessage$SourceConfiguration": "

    If specified, AWS Elastic Beanstalk uses the configuration values from the specified configuration template to create a new configuration.

    Values specified in the OptionSettings parameter of this call overrides any values obtained from the SourceConfiguration.

    If no configuration template is found, returns an InvalidParameterValue error.

    Constraint: If both the solution stack name parameter and the source configuration parameters are specified, the solution stack of the source configuration template must match the specified solution stack name or else AWS Elastic Beanstalk returns an InvalidParameterCombination error.

    " + } + }, + "StatusCodes": { + "base": "

    Represents the percentage of requests over the last 10 seconds that resulted in each type of status code response. For more information, see Status Code Definitions.

    ", + "refs": { + "ApplicationMetrics$StatusCodes": "

    Represents the percentage of requests over the last 10 seconds that resulted in each type of status code response.

    " + } + }, + "String": { + "base": null, + "refs": { + "ApplyEnvironmentManagedActionRequest$EnvironmentName": "

    The name of the target environment.

    ", + "ApplyEnvironmentManagedActionRequest$EnvironmentId": "

    The environment ID of the target environment.

    ", + "ApplyEnvironmentManagedActionRequest$ActionId": "

    The action ID of the scheduled managed action to execute.

    ", + "ApplyEnvironmentManagedActionResult$ActionId": "

    The action ID of the managed action.

    ", + "ApplyEnvironmentManagedActionResult$ActionDescription": "

    A description of the managed action.

    ", + "ApplyEnvironmentManagedActionResult$Status": "

    The status of the managed action.

    ", + "Deployment$VersionLabel": "

    The version label of the application version in the deployment.

    ", + "Deployment$Status": "

    The status of the deployment:

    • In Progress : The deployment is in progress.
    • Deployed : The deployment succeeded.
    • Failed : The deployment failed.
    ", + "DescribeEnvironmentHealthResult$HealthStatus": "

    Contains the response body with information about the health of the environment.

    ", + "DescribeEnvironmentHealthResult$Color": "

    Returns the color indicator that tells you information about the health of the environment. For more information, see Health Colors and Statuses.

    ", + "DescribeEnvironmentManagedActionHistoryRequest$NextToken": "

    The pagination token returned by a previous request.

    ", + "DescribeEnvironmentManagedActionHistoryResult$NextToken": "

    A pagination token that you pass to DescribeEnvironmentManagedActionHistory to get the next page of results.

    ", + "DescribeEnvironmentManagedActionsRequest$EnvironmentName": "

    The name of the target environment.

    ", + "DescribeEnvironmentManagedActionsRequest$EnvironmentId": "

    The environment ID of the target environment.

    ", + "EnvironmentLink$LinkName": "

    The name of the link.

    ", + "EnvironmentLink$EnvironmentName": "

    The name of the linked environment (the dependency).

    ", + "EnvironmentTier$Name": "

    The name of this environment tier.

    ", + "EnvironmentTier$Type": "

    The type of this environment tier.

    ", + "EnvironmentTier$Version": "

    The version of this environment tier.

    ", + "Listener$Protocol": "

    The protocol that is used by the Listener.

    ", + "LoadBalancerDescription$LoadBalancerName": "

    The name of the LoadBalancer.

    ", + "LoadBalancerDescription$Domain": "

    The domain name of the LoadBalancer.

    ", + "ManagedAction$ActionId": "

    A unique identifier for the managed action.

    ", + "ManagedAction$ActionDescription": "

    A description of the managed action.

    ", + "ManagedActionHistoryItem$ActionId": "

    A unique identifier for the managed action.

    ", + "ManagedActionHistoryItem$ActionDescription": "

    A description of the managed action.

    ", + "ManagedActionHistoryItem$FailureDescription": "

    If the action failed, a description of the failure.

    ", + "Queue$Name": "

    The name of the queue.

    ", + "Queue$URL": "

    The URL of the queue.

    ", + "SingleInstanceHealth$HealthStatus": "

    Returns the health status of the specified instance. For more information, see Health Colors and Statuses.

    ", + "SingleInstanceHealth$Color": "

    Represents the color indicator that gives you information about the health of the EC2 instance. For more information, see Health Colors and Statuses.

    ", + "SingleInstanceHealth$AvailabilityZone": "

    The availability zone in which the instance runs.

    ", + "SingleInstanceHealth$InstanceType": "

    The instance's type.

    " + } + }, + "SwapEnvironmentCNAMEsMessage": { + "base": "

    Swaps the CNAMEs of two environments.

    ", + "refs": { + } + }, + "SystemStatus": { + "base": "

    Represents CPU utilization and load average information for applications running in the specified environment.

    ", + "refs": { + "SingleInstanceHealth$System": null + } + }, + "Tag": { + "base": "

    Describes a tag applied to a resource in an environment.

    ", + "refs": { + "Tags$member": null + } + }, + "TagKey": { + "base": null, + "refs": { + "Tag$Key": "

    The key of the tag.

    " + } + }, + "TagValue": { + "base": null, + "refs": { + "Tag$Value": "

    The value of the tag.

    " + } + }, + "Tags": { + "base": null, + "refs": { + "CreateEnvironmentMessage$Tags": "

    This specifies the tags applied to resources in the environment.

    " + } + }, + "TerminateEnvForce": { + "base": null, + "refs": { + "DeleteApplicationMessage$TerminateEnvByForce": "

    When set to true, running environments will be terminated before deleting the application.

    " + } + }, + "TerminateEnvironmentMessage": { + "base": "

    Request to terminate an environment.

    ", + "refs": { + } + }, + "TerminateEnvironmentResources": { + "base": null, + "refs": { + "TerminateEnvironmentMessage$TerminateResources": "

    Indicates whether the associated AWS resources should shut down when the environment is terminated:

    • true: The specified environment as well as the associated AWS resources, such as Auto Scaling group and LoadBalancer, are terminated.
    • false: AWS Elastic Beanstalk resource management is removed from the environment, but the AWS resources continue to operate.

    For more information, see the AWS Elastic Beanstalk User Guide.

    Default: true

    Valid Values: true | false

    " + } + }, + "TimeFilterEnd": { + "base": null, + "refs": { + "DescribeEventsMessage$EndTime": "

    If specified, AWS Elastic Beanstalk restricts the returned descriptions to those that occur up to, but not including, the EndTime.

    " + } + }, + "TimeFilterStart": { + "base": null, + "refs": { + "DescribeEventsMessage$StartTime": "

    If specified, AWS Elastic Beanstalk restricts the returned descriptions to those that occur on or after this time.

    " + } + }, + "Timestamp": { + "base": null, + "refs": { + "ManagedAction$WindowStartTime": "

    The start time of the maintenance window in which the managed action will execute.

    ", + "ManagedActionHistoryItem$ExecutedTime": "

    The date and time that the action started executing.

    ", + "ManagedActionHistoryItem$FinishedTime": "

    The date and time that the action finished executing.

    " + } + }, + "Token": { + "base": null, + "refs": { + "DescribeEventsMessage$NextToken": "

    Pagination token. If specified, the events return the next batch of results.

    ", + "EventDescriptionsMessage$NextToken": "

    If returned, this indicates that there are more results to obtain. Use this token in the next DescribeEvents call to get the next batch of events.

    " + } + }, + "TooManyApplicationVersionsException": { + "base": "

    The specified account has reached its limit of application versions.

    ", + "refs": { + } + }, + "TooManyApplicationsException": { + "base": "

    The specified account has reached its limit of applications.

    ", + "refs": { + } + }, + "TooManyBucketsException": { + "base": "

    The specified account has reached its limit of Amazon S3 buckets.

    ", + "refs": { + } + }, + "TooManyConfigurationTemplatesException": { + "base": "

    The specified account has reached its limit of configuration templates.

    ", + "refs": { + } + }, + "TooManyEnvironmentsException": { + "base": "

    The specified account has reached its limit of environments.

    ", + "refs": { + } + }, + "Trigger": { + "base": "

    Describes a trigger.

    ", + "refs": { + "TriggerList$member": null + } + }, + "TriggerList": { + "base": null, + "refs": { + "EnvironmentResourceDescription$Triggers": "

    The AutoScaling triggers in use by this environment.

    " + } + }, + "UpdateApplicationMessage": { + "base": "

    Request to update an application.

    ", + "refs": { + } + }, + "UpdateApplicationVersionMessage": { + "base": "

    ", + "refs": { + } + }, + "UpdateConfigurationTemplateMessage": { + "base": "

    The result message containing the options for the specified solution stack.

    ", + "refs": { + } + }, + "UpdateDate": { + "base": null, + "refs": { + "ApplicationDescription$DateUpdated": "

    The date when the application was last modified.

    ", + "ApplicationVersionDescription$DateUpdated": "

    The last modified date of the application version.

    ", + "ConfigurationSettingsDescription$DateUpdated": "

    The date (in UTC time) when this configuration set was last modified.

    ", + "EnvironmentDescription$DateUpdated": "

    The last modified date for this environment.

    " + } + }, + "UpdateEnvironmentMessage": { + "base": "

    Request to update an environment.

    ", + "refs": { + } + }, + "UserDefinedOption": { + "base": null, + "refs": { + "ConfigurationOptionDescription$UserDefined": "

    An indication of whether the user defined this configuration option:

    • true : This configuration option was defined by the user. It is a valid choice for specifying if this as an Option to Remove when updating configuration settings.

    • false : This configuration was not defined by the user.

    Constraint: You can remove only UserDefined options from a configuration.

    Valid Values: true | false

    " + } + }, + "ValidateConfigurationSettingsMessage": { + "base": "

    A list of validation messages for a specified configuration template.

    ", + "refs": { + } + }, + "ValidationMessage": { + "base": "

    An error or warning for a desired configuration option value.

    ", + "refs": { + "ValidationMessagesList$member": null + } + }, + "ValidationMessageString": { + "base": null, + "refs": { + "ValidationMessage$Message": "

    A message describing the error or warning.

    " + } + }, + "ValidationMessagesList": { + "base": null, + "refs": { + "ConfigurationSettingsValidationMessages$Messages": "

    A list of ValidationMessage.

    " + } + }, + "ValidationSeverity": { + "base": null, + "refs": { + "ValidationMessage$Severity": "

    An indication of the severity of this message:

    • error: This message indicates that this is not a valid setting for an option.
    • warning: This message is providing information you should take into account.
    " + } + }, + "VersionLabel": { + "base": null, + "refs": { + "ApplicationVersionDescription$VersionLabel": "

    A label uniquely identifying the version for the associated application.

    ", + "CreateApplicationVersionMessage$VersionLabel": "

    A label identifying this version.

    Constraint: Must be unique per application. If an application version already exists with this label for the specified application, AWS Elastic Beanstalk returns an InvalidParameterValue error.

    ", + "CreateEnvironmentMessage$VersionLabel": "

    The name of the application version to deploy.

    If the specified application has no associated application versions, AWS Elastic Beanstalk UpdateEnvironment returns an InvalidParameterValue error.

    Default: If not specified, AWS Elastic Beanstalk attempts to launch the sample application in the container.

    ", + "DeleteApplicationVersionMessage$VersionLabel": "

    The label of the version to delete.

    ", + "DescribeEnvironmentsMessage$VersionLabel": "

    If specified, AWS Elastic Beanstalk restricts the returned descriptions to include only those that are associated with this application version.

    ", + "DescribeEventsMessage$VersionLabel": "

    If specified, AWS Elastic Beanstalk restricts the returned descriptions to those associated with this application version.

    ", + "EnvironmentDescription$VersionLabel": "

    The application version deployed in this environment.

    ", + "EventDescription$VersionLabel": "

    The release label for the application version associated with this event.

    ", + "UpdateApplicationVersionMessage$VersionLabel": "

    The name of the version to update.

    If no application version is found with this label, UpdateApplication returns an InvalidParameterValue error.

    ", + "UpdateEnvironmentMessage$VersionLabel": "

    If this parameter is specified, AWS Elastic Beanstalk deploys the named application version to the environment. If no such application version is found, returns an InvalidParameterValue error.

    ", + "VersionLabels$member": null, + "VersionLabelsList$member": null + } + }, + "VersionLabels": { + "base": null, + "refs": { + "ComposeEnvironmentsMessage$VersionLabels": "

    A list of version labels, specifying one or more application source bundles that belong to the target application. Each source bundle must include an environment manifest that specifies the name of the environment and the name of the solution stack to use, and optionally can specify environment links to create.

    " + } + }, + "VersionLabelsList": { + "base": null, + "refs": { + "ApplicationDescription$Versions": "

    The names of the versions for this application.

    ", + "DescribeApplicationVersionsMessage$VersionLabels": "

    If specified, restricts the returned descriptions to only include ones that have the specified version labels.

    " + } + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/elasticbeanstalk/2010-12-01/examples-1.json b/vendor/github.com/aws/aws-sdk-go/models/apis/elasticbeanstalk/2010-12-01/examples-1.json new file mode 100644 index 000000000..0fded6281 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/elasticbeanstalk/2010-12-01/examples-1.json @@ -0,0 +1,1109 @@ +{ + "version": "1.0", + "examples": { + "AbortEnvironmentUpdate": [ + { + "input": { + "EnvironmentName": "my-env" + }, + "comments": { + "input": { + }, + "output": { + } + }, + "description": "The following code aborts a running application version deployment for an environment named my-env:", + "id": "to-abort-a-deployment-1456267848227", + "title": "To abort a deployment" + } + ], + "CheckDNSAvailability": [ + { + "input": { + "CNAMEPrefix": "my-cname" + }, + "output": { + "Available": true, + "FullyQualifiedCNAME": "my-cname.us-west-2.elasticbeanstalk.com" + }, + "comments": { + "input": { + }, + "output": { + } + }, + "description": "The following operation checks the availability of the subdomain my-cname:", + "id": "to-check-the-availability-of-a-cname-1456268589537", + "title": "To check the availability of a CNAME" + } + ], + "CreateApplication": [ + { + "input": { + "ApplicationName": "my-app", + "Description": "my application" + }, + "output": { + "Application": { + "ApplicationName": "my-app", + "ConfigurationTemplates": [ + + ], + "DateCreated": "2015-02-12T18:32:21.181Z", + "DateUpdated": "2015-02-12T18:32:21.181Z", + "Description": "my application" + } + }, + "comments": { + "input": { + }, + "output": { + } + }, + "description": "The following operation creates a new application named my-app:", + "id": "to-create-a-new-application-1456268895683", + "title": "To create a new application" + } + ], + "CreateApplicationVersion": [ + { + "input": { + "ApplicationName": "my-app", + "AutoCreateApplication": true, + "Description": "my-app-v1", + "Process": true, + "SourceBundle": { + "S3Bucket": "my-bucket", + "S3Key": "sample.war" + }, + "VersionLabel": "v1" + }, + "output": { + "ApplicationVersion": { + "ApplicationName": "my-app", + "DateCreated": "2015-02-03T23:01:25.412Z", + "DateUpdated": "2015-02-03T23:01:25.412Z", + "Description": "my-app-v1", + "SourceBundle": { + "S3Bucket": "my-bucket", + "S3Key": "sample.war" + }, + "VersionLabel": "v1" + } + }, + "comments": { + "input": { + }, + "output": { + } + }, + "description": "The following operation creates a new version (v1) of an application named my-app:", + "id": "to-create-a-new-application-1456268895683", + "title": "To create a new application" + } + ], + "CreateConfigurationTemplate": [ + { + "input": { + "ApplicationName": "my-app", + "EnvironmentId": "e-rpqsewtp2j", + "TemplateName": "my-app-v1" + }, + "output": { + "ApplicationName": "my-app", + "DateCreated": "2015-08-12T18:40:39Z", + "DateUpdated": "2015-08-12T18:40:39Z", + "SolutionStackName": "64bit Amazon Linux 2015.03 v2.0.0 running Tomcat 8 Java 8", + "TemplateName": "my-app-v1" + }, + "comments": { + "input": { + }, + "output": { + } + }, + "description": "The following operation creates a configuration template named my-app-v1 from the settings applied to an environment with the id e-rpqsewtp2j:", + "id": "to-create-a-configuration-template-1456269283586", + "title": "To create a configuration template" + } + ], + "CreateEnvironment": [ + { + "input": { + "ApplicationName": "my-app", + "CNAMEPrefix": "my-app", + "EnvironmentName": "my-env", + "SolutionStackName": "64bit Amazon Linux 2015.03 v2.0.0 running Tomcat 8 Java 8", + "VersionLabel": "v1" + }, + "output": { + "ApplicationName": "my-app", + "CNAME": "my-app.elasticbeanstalk.com", + "DateCreated": "2015-02-03T23:04:54.479Z", + "DateUpdated": "2015-02-03T23:04:54.479Z", + "EnvironmentId": "e-izqpassy4h", + "EnvironmentName": "my-env", + "Health": "Grey", + "SolutionStackName": "64bit Amazon Linux 2015.03 v2.0.0 running Tomcat 8 Java 8", + "Status": "Launching", + "Tier": { + "Name": "WebServer", + "Type": "Standard", + "Version": " " + }, + "VersionLabel": "v1" + }, + "comments": { + "input": { + }, + "output": { + } + }, + "description": "The following operation creates a new environment for version v1 of a java application named my-app:", + "id": "to-create-a-new-environment-for-an-application-1456269380396", + "title": "To create a new environment for an application" + } + ], + "CreateStorageLocation": [ + { + "output": { + "S3Bucket": "elasticbeanstalk-us-west-2-0123456789012" + }, + "comments": { + "input": { + }, + "output": { + } + }, + "description": "The following operation creates a new environment for version v1 of a java application named my-app:", + "id": "to-create-a-new-environment-for-an-application-1456269380396", + "title": "To create a new environment for an application" + } + ], + "DeleteApplication": [ + { + "input": { + "ApplicationName": "my-app" + }, + "comments": { + "input": { + }, + "output": { + } + }, + "description": "The following operation deletes an application named my-app:", + "id": "to-delete-an-application-1456269699366", + "title": "To delete an application" + } + ], + "DeleteApplicationVersion": [ + { + "input": { + "ApplicationName": "my-app", + "DeleteSourceBundle": true, + "VersionLabel": "22a0-stage-150819_182129" + }, + "comments": { + "input": { + }, + "output": { + } + }, + "description": "The following operation deletes an application version named 22a0-stage-150819_182129 for an application named my-app:", + "id": "to-delete-an-application-version-1456269792956", + "title": "To delete an application version" + } + ], + "DeleteConfigurationTemplate": [ + { + "input": { + "ApplicationName": "my-app", + "TemplateName": "my-template" + }, + "comments": { + "input": { + }, + "output": { + } + }, + "description": "The following operation deletes a configuration template named my-template for an application named my-app:", + "id": "to-delete-a-configuration-template-1456269836701", + "title": "To delete a configuration template" + } + ], + "DeleteEnvironmentConfiguration": [ + { + "input": { + "ApplicationName": "my-app", + "EnvironmentName": "my-env" + }, + "comments": { + "input": { + }, + "output": { + } + }, + "description": "The following operation deletes a draft configuration for an environment named my-env:", + "id": "to-delete-a-draft-configuration-1456269886654", + "title": "To delete a draft configuration" + } + ], + "DescribeApplicationVersions": [ + { + "input": { + "ApplicationName": "my-app", + "VersionLabels": [ + "v2" + ] + }, + "output": { + "ApplicationVersions": [ + { + "ApplicationName": "my-app", + "DateCreated": "2015-07-23T01:32:26.079Z", + "DateUpdated": "2015-07-23T01:32:26.079Z", + "Description": "update cover page", + "SourceBundle": { + "S3Bucket": "elasticbeanstalk-us-west-2-015321684451", + "S3Key": "my-app/5026-stage-150723_224258.war" + }, + "VersionLabel": "v2" + }, + { + "ApplicationName": "my-app", + "DateCreated": "2015-07-23T22:26:10.816Z", + "DateUpdated": "2015-07-23T22:26:10.816Z", + "Description": "initial version", + "SourceBundle": { + "S3Bucket": "elasticbeanstalk-us-west-2-015321684451", + "S3Key": "my-app/5026-stage-150723_222618.war" + }, + "VersionLabel": "v1" + } + ] + }, + "comments": { + "input": { + }, + "output": { + } + }, + "description": "The following operation retrieves information about an application version labeled v2:", + "id": "to-view-information-about-an-application-version-1456269947428", + "title": "To view information about an application version" + } + ], + "DescribeApplications": [ + { + "input": { + }, + "output": { + "Applications": [ + { + "ApplicationName": "ruby", + "ConfigurationTemplates": [ + + ], + "DateCreated": "2015-08-13T21:05:44.376Z", + "DateUpdated": "2015-08-13T21:05:44.376Z", + "Versions": [ + "Sample Application" + ] + }, + { + "ApplicationName": "pythonsample", + "ConfigurationTemplates": [ + + ], + "DateCreated": "2015-08-13T19:05:43.637Z", + "DateUpdated": "2015-08-13T19:05:43.637Z", + "Description": "Application created from the EB CLI using \"eb init\"", + "Versions": [ + "Sample Application" + ] + }, + { + "ApplicationName": "nodejs-example", + "ConfigurationTemplates": [ + + ], + "DateCreated": "2015-08-06T17:50:02.486Z", + "DateUpdated": "2015-08-06T17:50:02.486Z", + "Versions": [ + "add elasticache", + "First Release" + ] + } + ] + }, + "comments": { + "input": { + }, + "output": { + } + }, + "description": "The following operation retrieves information about applications in the current region:", + "id": "to-view-a-list-of-applications-1456270027373", + "title": "To view a list of applications" + } + ], + "DescribeConfigurationOptions": [ + { + "input": { + "ApplicationName": "my-app", + "EnvironmentName": "my-env" + }, + "output": { + "Options": [ + { + "ChangeSeverity": "NoInterruption", + "DefaultValue": "30", + "MaxValue": 300, + "MinValue": 5, + "Name": "Interval", + "Namespace": "aws:elb:healthcheck", + "UserDefined": false, + "ValueType": "Scalar" + }, + { + "ChangeSeverity": "NoInterruption", + "DefaultValue": "2000000", + "MinValue": 0, + "Name": "LowerThreshold", + "Namespace": "aws:autoscaling:trigger", + "UserDefined": false, + "ValueType": "Scalar" + } + ] + }, + "comments": { + "input": { + }, + "output": { + } + }, + "description": "The following operation retrieves descriptions of all available configuration options for an environment named my-env:", + "id": "to-view-configuration-options-for-an-environment-1456276763917", + "title": "To view configuration options for an environment" + } + ], + "DescribeConfigurationSettings": [ + { + "input": { + "ApplicationName": "my-app", + "EnvironmentName": "my-env" + }, + "output": { + "ConfigurationSettings": [ + { + "ApplicationName": "my-app", + "DateCreated": "2015-08-13T19:16:25Z", + "DateUpdated": "2015-08-13T23:30:07Z", + "DeploymentStatus": "deployed", + "Description": "Environment created from the EB CLI using \"eb create\"", + "EnvironmentName": "my-env", + "OptionSettings": [ + { + "Namespace": "aws:autoscaling:asg", + "OptionName": "Availability Zones", + "ResourceName": "AWSEBAutoScalingGroup", + "Value": "Any" + }, + { + "Namespace": "aws:autoscaling:asg", + "OptionName": "Cooldown", + "ResourceName": "AWSEBAutoScalingGroup", + "Value": "360" + }, + { + "Namespace": "aws:elb:policies", + "OptionName": "ConnectionDrainingTimeout", + "ResourceName": "AWSEBLoadBalancer", + "Value": "20" + }, + { + "Namespace": "aws:elb:policies", + "OptionName": "ConnectionSettingIdleTimeout", + "ResourceName": "AWSEBLoadBalancer", + "Value": "60" + } + ], + "SolutionStackName": "64bit Amazon Linux 2015.03 v2.0.0 running Tomcat 8 Java 8" + } + ] + }, + "comments": { + "input": { + }, + "output": { + "abbreviated": "Output is abbreviated" + } + }, + "description": "The following operation retrieves configuration settings for an environment named my-env:", + "id": "to-view-configurations-settings-for-an-environment-1456276924537", + "title": "To view configurations settings for an environment" + } + ], + "DescribeEnvironmentHealth": [ + { + "input": { + "AttributeNames": [ + "All" + ], + "EnvironmentName": "my-env" + }, + "output": { + "ApplicationMetrics": { + "Duration": 10, + "Latency": { + "P10": 0.001, + "P50": 0.001, + "P75": 0.002, + "P85": 0.003, + "P90": 0.003, + "P95": 0.004, + "P99": 0.004, + "P999": 0.004 + }, + "RequestCount": 45, + "StatusCodes": { + "Status2xx": 45, + "Status3xx": 0, + "Status4xx": 0, + "Status5xx": 0 + } + }, + "Causes": [ + + ], + "Color": "Green", + "EnvironmentName": "my-env", + "HealthStatus": "Ok", + "InstancesHealth": { + "Degraded": 0, + "Info": 0, + "NoData": 0, + "Ok": 1, + "Pending": 0, + "Severe": 0, + "Unknown": 0, + "Warning": 0 + }, + "RefreshedAt": "2015-08-20T21:09:18Z" + }, + "comments": { + "input": { + }, + "output": { + } + }, + "description": "The following operation retrieves overall health information for an environment named my-env:", + "id": "to-view-environment-health-1456277109510", + "title": "To view environment health" + } + ], + "DescribeEnvironmentResources": [ + { + "input": { + "EnvironmentName": "my-env" + }, + "output": { + "EnvironmentResources": { + "AutoScalingGroups": [ + { + "Name": "awseb-e-qu3fyyjyjs-stack-AWSEBAutoScalingGroup-QSB2ZO88SXZT" + } + ], + "EnvironmentName": "my-env", + "Instances": [ + { + "Id": "i-0c91c786" + } + ], + "LaunchConfigurations": [ + { + "Name": "awseb-e-qu3fyyjyjs-stack-AWSEBAutoScalingLaunchConfiguration-1UUVQIBC96TQ2" + } + ], + "LoadBalancers": [ + { + "Name": "awseb-e-q-AWSEBLoa-1EEPZ0K98BIF0" + } + ], + "Queues": [ + + ], + "Triggers": [ + + ] + } + }, + "comments": { + "input": { + }, + "output": { + } + }, + "description": "The following operation retrieves information about resources in an environment named my-env:", + "id": "to-view-information-about-the-aws-resources-in-your-environment-1456277206232", + "title": "To view information about the AWS resources in your environment" + } + ], + "DescribeEnvironments": [ + { + "input": { + "EnvironmentNames": [ + "my-env" + ] + }, + "output": { + "Environments": [ + { + "AbortableOperationInProgress": false, + "ApplicationName": "my-app", + "CNAME": "my-env.elasticbeanstalk.com", + "DateCreated": "2015-08-07T20:48:49.599Z", + "DateUpdated": "2015-08-12T18:16:55.019Z", + "EndpointURL": "awseb-e-w-AWSEBLoa-1483140XB0Q4L-109QXY8121.us-west-2.elb.amazonaws.com", + "EnvironmentId": "e-rpqsewtp2j", + "EnvironmentName": "my-env", + "Health": "Green", + "SolutionStackName": "64bit Amazon Linux 2015.03 v2.0.0 running Tomcat 8 Java 8", + "Status": "Ready", + "Tier": { + "Name": "WebServer", + "Type": "Standard", + "Version": " " + }, + "VersionLabel": "7f58-stage-150812_025409" + } + ] + }, + "comments": { + "input": { + }, + "output": { + } + }, + "description": "The following operation retrieves information about an environment named my-env:", + "id": "to-view-information-about-an-environment-1456277288662", + "title": "To view information about an environment" + } + ], + "DescribeEvents": [ + { + "input": { + "EnvironmentName": "my-env" + }, + "output": { + "Events": [ + { + "ApplicationName": "my-app", + "EnvironmentName": "my-env", + "EventDate": "2015-08-20T07:06:53.535Z", + "Message": "Environment health has transitioned from Info to Ok.", + "Severity": "INFO" + }, + { + "ApplicationName": "my-app", + "EnvironmentName": "my-env", + "EventDate": "2015-08-20T07:06:02.049Z", + "Message": "Environment update completed successfully.", + "RequestId": "b7f3960b-4709-11e5-ba1e-07e16200da41", + "Severity": "INFO" + }, + { + "ApplicationName": "my-app", + "EnvironmentName": "my-env", + "EventDate": "2015-08-13T19:16:27.561Z", + "Message": "Using elasticbeanstalk-us-west-2-012445113685 as Amazon S3 storage bucket for environment data.", + "RequestId": "ca8dfbf6-41ef-11e5-988b-651aa638f46b", + "Severity": "INFO" + }, + { + "ApplicationName": "my-app", + "EnvironmentName": "my-env", + "EventDate": "2015-08-13T19:16:26.581Z", + "Message": "createEnvironment is starting.", + "RequestId": "cdfba8f6-41ef-11e5-988b-65638f41aa6b", + "Severity": "INFO" + } + ] + }, + "comments": { + "input": { + }, + "output": { + } + }, + "description": "The following operation retrieves events for an environment named my-env:", + "id": "to-view-events-for-an-environment-1456277367589", + "title": "To view events for an environment" + } + ], + "DescribeInstancesHealth": [ + { + "input": { + "AttributeNames": [ + "All" + ], + "EnvironmentName": "my-env" + }, + "output": { + "InstanceHealthList": [ + { + "ApplicationMetrics": { + "Duration": 10, + "Latency": { + "P10": 0, + "P50": 0.001, + "P75": 0.002, + "P85": 0.003, + "P90": 0.004, + "P95": 0.005, + "P99": 0.006, + "P999": 0.006 + }, + "RequestCount": 48, + "StatusCodes": { + "Status2xx": 47, + "Status3xx": 0, + "Status4xx": 1, + "Status5xx": 0 + } + }, + "Causes": [ + + ], + "Color": "Green", + "HealthStatus": "Ok", + "InstanceId": "i-08691cc7", + "LaunchedAt": "2015-08-13T19:17:09Z", + "System": { + "CPUUtilization": { + "IOWait": 0.2, + "IRQ": 0, + "Idle": 97.8, + "Nice": 0.1, + "SoftIRQ": 0.1, + "System": 0.3, + "User": 1.5 + }, + "LoadAverage": [ + 0, + 0.02, + 0.05 + ] + } + } + ], + "RefreshedAt": "2015-08-20T21:09:08Z" + }, + "comments": { + "input": { + }, + "output": { + } + }, + "description": "The following operation retrieves health information for instances in an environment named my-env:", + "id": "to-view-environment-health-1456277424757", + "title": "To view environment health" + } + ], + "ListAvailableSolutionStacks": [ + { + "output": { + "SolutionStackDetails": [ + { + "PermittedFileTypes": [ + "zip" + ], + "SolutionStackName": "64bit Amazon Linux 2015.03 v2.0.0 running Node.js" + } + ], + "SolutionStacks": [ + "64bit Amazon Linux 2015.03 v2.0.0 running Node.js", + "64bit Amazon Linux 2015.03 v2.0.0 running PHP 5.6", + "64bit Amazon Linux 2015.03 v2.0.0 running PHP 5.5", + "64bit Amazon Linux 2015.03 v2.0.0 running PHP 5.4", + "64bit Amazon Linux 2015.03 v2.0.0 running Python 3.4", + "64bit Amazon Linux 2015.03 v2.0.0 running Python 2.7", + "64bit Amazon Linux 2015.03 v2.0.0 running Python", + "64bit Amazon Linux 2015.03 v2.0.0 running Ruby 2.2 (Puma)", + "64bit Amazon Linux 2015.03 v2.0.0 running Ruby 2.2 (Passenger Standalone)", + "64bit Amazon Linux 2015.03 v2.0.0 running Ruby 2.1 (Puma)", + "64bit Amazon Linux 2015.03 v2.0.0 running Ruby 2.1 (Passenger Standalone)", + "64bit Amazon Linux 2015.03 v2.0.0 running Ruby 2.0 (Puma)", + "64bit Amazon Linux 2015.03 v2.0.0 running Ruby 2.0 (Passenger Standalone)", + "64bit Amazon Linux 2015.03 v2.0.0 running Ruby 1.9.3", + "64bit Amazon Linux 2015.03 v2.0.0 running Tomcat 8 Java 8", + "64bit Amazon Linux 2015.03 v2.0.0 running Tomcat 7 Java 7", + "64bit Amazon Linux 2015.03 v2.0.0 running Tomcat 7 Java 6", + "64bit Windows Server Core 2012 R2 running IIS 8.5", + "64bit Windows Server 2012 R2 running IIS 8.5", + "64bit Windows Server 2012 running IIS 8", + "64bit Windows Server 2008 R2 running IIS 7.5", + "64bit Amazon Linux 2015.03 v2.0.0 running Docker 1.6.2", + "64bit Amazon Linux 2015.03 v2.0.0 running Multi-container Docker 1.6.2 (Generic)", + "64bit Debian jessie v2.0.0 running GlassFish 4.1 Java 8 (Preconfigured - Docker)", + "64bit Debian jessie v2.0.0 running GlassFish 4.0 Java 7 (Preconfigured - Docker)", + "64bit Debian jessie v2.0.0 running Go 1.4 (Preconfigured - Docker)", + "64bit Debian jessie v2.0.0 running Go 1.3 (Preconfigured - Docker)", + "64bit Debian jessie v2.0.0 running Python 3.4 (Preconfigured - Docker)" + ] + }, + "comments": { + "input": { + }, + "output": { + } + }, + "description": "The following operation lists solution stacks for all currently available platform configurations and any that you have used in the past:", + "id": "to-view-solution-stacks-1456277504811", + "title": "To view solution stacks" + } + ], + "RebuildEnvironment": [ + { + "input": { + "EnvironmentName": "my-env" + }, + "comments": { + "input": { + }, + "output": { + } + }, + "description": "The following operation terminates and recreates the resources in an environment named my-env:", + "id": "to-rebuild-an-environment-1456277600918", + "title": "To rebuild an environment" + } + ], + "RequestEnvironmentInfo": [ + { + "input": { + "EnvironmentName": "my-env", + "InfoType": "tail" + }, + "comments": { + "input": { + }, + "output": { + } + }, + "description": "The following operation requests logs from an environment named my-env:", + "id": "to-request-tailed-logs-1456277657045", + "title": "To request tailed logs" + } + ], + "RestartAppServer": [ + { + "input": { + "EnvironmentName": "my-env" + }, + "comments": { + "input": { + }, + "output": { + } + }, + "description": "The following operation restarts application servers on all instances in an environment named my-env:", + "id": "to-restart-application-servers-1456277739302", + "title": "To restart application servers" + } + ], + "RetrieveEnvironmentInfo": [ + { + "input": { + "EnvironmentName": "my-env", + "InfoType": "tail" + }, + "output": { + "EnvironmentInfo": [ + { + "Ec2InstanceId": "i-09c1c867", + "InfoType": "tail", + "Message": "https://elasticbeanstalk-us-west-2-0123456789012.s3.amazonaws.com/resources/environments/logs/tail/e-fyqyju3yjs/i-09c1c867/TailLogs-1440109397703.out?AWSAccessKeyId=AKGPT4J56IAJ2EUBL5CQ&Expires=1440195891&Signature=n%2BEalOV6A2HIOx4Rcfb7LT16bBM%3D", + "SampleTimestamp": "2015-08-20T22:23:17.703Z" + } + ] + }, + "comments": { + "input": { + }, + "output": { + } + }, + "description": "The following operation retrieves a link to logs from an environment named my-env:", + "id": "to-retrieve-tailed-logs-1456277792734", + "title": "To retrieve tailed logs" + } + ], + "SwapEnvironmentCNAMEs": [ + { + "input": { + "DestinationEnvironmentName": "my-env-green", + "SourceEnvironmentName": "my-env-blue" + }, + "comments": { + "input": { + }, + "output": { + } + }, + "description": "The following operation swaps the assigned subdomains of two environments:", + "id": "to-swap-environment-cnames-1456277839438", + "title": "To swap environment CNAMES" + } + ], + "TerminateEnvironment": [ + { + "input": { + "EnvironmentName": "my-env" + }, + "output": { + "AbortableOperationInProgress": false, + "ApplicationName": "my-app", + "CNAME": "my-env.elasticbeanstalk.com", + "DateCreated": "2015-08-12T18:52:53.622Z", + "DateUpdated": "2015-08-12T19:05:54.744Z", + "EndpointURL": "awseb-e-f-AWSEBLoa-1I9XUMP4-8492WNUP202574.us-west-2.elb.amazonaws.com", + "EnvironmentId": "e-fh2eravpns", + "EnvironmentName": "my-env", + "Health": "Grey", + "SolutionStackName": "64bit Amazon Linux 2015.03 v2.0.0 running Tomcat 8 Java 8", + "Status": "Terminating", + "Tier": { + "Name": "WebServer", + "Type": "Standard", + "Version": " " + } + }, + "comments": { + "input": { + }, + "output": { + } + }, + "description": "The following operation terminates an Elastic Beanstalk environment named my-env:", + "id": "to-terminate-an-environment-1456277888556", + "title": "To terminate an environment" + } + ], + "UpdateApplication": [ + { + "input": { + "ApplicationName": "my-app", + "Description": "my Elastic Beanstalk application" + }, + "output": { + "Application": { + "ApplicationName": "my-app", + "ConfigurationTemplates": [ + + ], + "DateCreated": "2015-08-13T19:15:50.449Z", + "DateUpdated": "2015-08-20T22:34:56.195Z", + "Description": "my Elastic Beanstalk application", + "Versions": [ + "2fba-stage-150819_234450", + "bf07-stage-150820_214945", + "93f8", + "fd7c-stage-150820_000431", + "22a0-stage-150819_185942" + ] + } + }, + "comments": { + "input": { + }, + "output": { + } + }, + "description": "The following operation updates the description of an application named my-app:", + "id": "to-change-an-applications-description-1456277957075", + "title": "To change an application's description" + } + ], + "UpdateApplicationVersion": [ + { + "input": { + "ApplicationName": "my-app", + "Description": "new description", + "VersionLabel": "22a0-stage-150819_185942" + }, + "output": { + "ApplicationVersion": { + "ApplicationName": "my-app", + "DateCreated": "2015-08-19T18:59:17.646Z", + "DateUpdated": "2015-08-20T22:53:28.871Z", + "Description": "new description", + "SourceBundle": { + "S3Bucket": "elasticbeanstalk-us-west-2-0123456789012", + "S3Key": "my-app/22a0-stage-150819_185942.war" + }, + "VersionLabel": "22a0-stage-150819_185942" + } + }, + "comments": { + "input": { + }, + "output": { + } + }, + "description": "The following operation updates the description of an application version named 22a0-stage-150819_185942:", + "id": "to-change-an-application-versions-description-1456278019237", + "title": "To change an application version's description" + } + ], + "UpdateConfigurationTemplate": [ + { + "input": { + "ApplicationName": "my-app", + "OptionsToRemove": [ + { + "Namespace": "aws:elasticbeanstalk:healthreporting:system", + "OptionName": "ConfigDocument" + } + ], + "TemplateName": "my-template" + }, + "output": { + "ApplicationName": "my-app", + "DateCreated": "2015-08-20T22:39:31Z", + "DateUpdated": "2015-08-20T22:43:11Z", + "SolutionStackName": "64bit Amazon Linux 2015.03 v2.0.0 running Tomcat 8 Java 8", + "TemplateName": "my-template" + }, + "comments": { + "input": { + }, + "output": { + } + }, + "description": "The following operation removes the configured CloudWatch custom health metrics configuration ConfigDocument from a saved configuration template named my-template:", + "id": "to-update-a-configuration-template-1456278075300", + "title": "To update a configuration template" + } + ], + "UpdateEnvironment": [ + { + "input": { + "EnvironmentName": "my-env", + "VersionLabel": "v2" + }, + "output": { + "ApplicationName": "my-app", + "CNAME": "my-env.elasticbeanstalk.com", + "DateCreated": "2015-02-03T23:04:54.453Z", + "DateUpdated": "2015-02-03T23:12:29.119Z", + "EndpointURL": "awseb-e-i-AWSEBLoa-1RDLX6TC9VUAO-0123456789.us-west-2.elb.amazonaws.com", + "EnvironmentId": "e-szqipays4h", + "EnvironmentName": "my-env", + "Health": "Grey", + "SolutionStackName": "64bit Amazon Linux running Tomcat 7", + "Status": "Updating", + "Tier": { + "Name": "WebServer", + "Type": "Standard", + "Version": " " + }, + "VersionLabel": "v2" + }, + "comments": { + "input": { + }, + "output": { + } + }, + "description": "The following operation updates an environment named \"my-env\" to version \"v2\" of the application to which it belongs:", + "id": "to-update-an-environment-to-a-new-version-1456278210718", + "title": "To update an environment to a new version" + }, + { + "input": { + "EnvironmentName": "my-env", + "OptionSettings": [ + { + "Namespace": "aws:elb:healthcheck", + "OptionName": "Interval", + "Value": "15" + }, + { + "Namespace": "aws:elb:healthcheck", + "OptionName": "Timeout", + "Value": "8" + }, + { + "Namespace": "aws:elb:healthcheck", + "OptionName": "HealthyThreshold", + "Value": "2" + }, + { + "Namespace": "aws:elb:healthcheck", + "OptionName": "UnhealthyThreshold", + "Value": "3" + } + ] + }, + "output": { + "AbortableOperationInProgress": true, + "ApplicationName": "my-app", + "CNAME": "my-env.elasticbeanstalk.com", + "DateCreated": "2015-08-07T20:48:49.599Z", + "DateUpdated": "2015-08-12T18:15:23.804Z", + "EndpointURL": "awseb-e-w-AWSEBLoa-14XB83101Q4L-104QXY80921.sa-east-1.elb.amazonaws.com", + "EnvironmentId": "e-wtp2rpqsej", + "EnvironmentName": "my-env", + "Health": "Grey", + "SolutionStackName": "64bit Amazon Linux 2015.03 v2.0.0 running Tomcat 8 Java 8", + "Status": "Updating", + "Tier": { + "Name": "WebServer", + "Type": "Standard", + "Version": " " + }, + "VersionLabel": "7f58-stage-150812_025409" + }, + "comments": { + "input": { + }, + "output": { + } + }, + "description": "The following operation configures several options in the aws:elb:loadbalancer namespace:", + "id": "to-configure-option-settings-1456278286349", + "title": "To configure option settings" + } + ], + "ValidateConfigurationSettings": [ + { + "input": { + "ApplicationName": "my-app", + "EnvironmentName": "my-env", + "OptionSettings": [ + { + "Namespace": "aws:elasticbeanstalk:healthreporting:system", + "OptionName": "ConfigDocument", + "Value": "{\"CloudWatchMetrics\": {\"Environment\": {\"ApplicationLatencyP99.9\": null,\"InstancesSevere\": 60,\"ApplicationLatencyP90\": 60,\"ApplicationLatencyP99\": null,\"ApplicationLatencyP95\": 60,\"InstancesUnknown\": 60,\"ApplicationLatencyP85\": 60,\"InstancesInfo\": null,\"ApplicationRequests2xx\": null,\"InstancesDegraded\": null,\"InstancesWarning\": 60,\"ApplicationLatencyP50\": 60,\"ApplicationRequestsTotal\": null,\"InstancesNoData\": null,\"InstancesPending\": 60,\"ApplicationLatencyP10\": null,\"ApplicationRequests5xx\": null,\"ApplicationLatencyP75\": null,\"InstancesOk\": 60,\"ApplicationRequests3xx\": null,\"ApplicationRequests4xx\": null},\"Instance\": {\"ApplicationLatencyP99.9\": null,\"ApplicationLatencyP90\": 60,\"ApplicationLatencyP99\": null,\"ApplicationLatencyP95\": null,\"ApplicationLatencyP85\": null,\"CPUUser\": 60,\"ApplicationRequests2xx\": null,\"CPUIdle\": null,\"ApplicationLatencyP50\": null,\"ApplicationRequestsTotal\": 60,\"RootFilesystemUtil\": null,\"LoadAverage1min\": null,\"CPUIrq\": null,\"CPUNice\": 60,\"CPUIowait\": 60,\"ApplicationLatencyP10\": null,\"LoadAverage5min\": null,\"ApplicationRequests5xx\": null,\"ApplicationLatencyP75\": 60,\"CPUSystem\": 60,\"ApplicationRequests3xx\": 60,\"ApplicationRequests4xx\": null,\"InstanceHealth\": null,\"CPUSoftirq\": 60}},\"Version\": 1}" + } + ] + }, + "output": { + "Messages": [ + + ] + }, + "comments": { + "input": { + }, + "output": { + } + }, + "description": "The following operation validates a CloudWatch custom metrics config document:", + "id": "to-validate-configuration-settings-1456278393654", + "title": "To validate configuration settings" + } + ] + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/elasticbeanstalk/2010-12-01/paginators-1.json b/vendor/github.com/aws/aws-sdk-go/models/apis/elasticbeanstalk/2010-12-01/paginators-1.json new file mode 100644 index 000000000..383ce8d6f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/elasticbeanstalk/2010-12-01/paginators-1.json @@ -0,0 +1,25 @@ +{ + "pagination": { + "DescribeApplicationVersions": { + "result_key": "ApplicationVersions" + }, + "DescribeApplications": { + "result_key": "Applications" + }, + "DescribeConfigurationOptions": { + "result_key": "Options" + }, + "DescribeEnvironments": { + "result_key": "Environments" + }, + "DescribeEvents": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxRecords", + "result_key": "Events" + }, + "ListAvailableSolutionStacks": { + "result_key": "SolutionStacks" + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/elasticfilesystem/2015-02-01/api-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/elasticfilesystem/2015-02-01/api-2.json new file mode 100644 index 000000000..84193647d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/elasticfilesystem/2015-02-01/api-2.json @@ -0,0 +1,713 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2015-02-01", + "endpointPrefix":"elasticfilesystem", + "protocol":"rest-json", + "serviceAbbreviation":"EFS", + "serviceFullName":"Amazon Elastic File System", + "signatureVersion":"v4" + }, + "operations":{ + "CreateFileSystem":{ + "name":"CreateFileSystem", + "http":{ + "method":"POST", + "requestUri":"/2015-02-01/file-systems", + "responseCode":201 + }, + "input":{"shape":"CreateFileSystemRequest"}, + "output":{"shape":"FileSystemDescription"}, + "errors":[ + {"shape":"BadRequest"}, + {"shape":"InternalServerError"}, + {"shape":"FileSystemAlreadyExists"}, + {"shape":"FileSystemLimitExceeded"} + ] + }, + "CreateMountTarget":{ + "name":"CreateMountTarget", + "http":{ + "method":"POST", + "requestUri":"/2015-02-01/mount-targets", + "responseCode":200 + }, + "input":{"shape":"CreateMountTargetRequest"}, + "output":{"shape":"MountTargetDescription"}, + "errors":[ + {"shape":"BadRequest"}, + {"shape":"InternalServerError"}, + {"shape":"FileSystemNotFound"}, + {"shape":"IncorrectFileSystemLifeCycleState"}, + {"shape":"MountTargetConflict"}, + {"shape":"SubnetNotFound"}, + {"shape":"NoFreeAddressesInSubnet"}, + {"shape":"IpAddressInUse"}, + {"shape":"NetworkInterfaceLimitExceeded"}, + {"shape":"SecurityGroupLimitExceeded"}, + {"shape":"SecurityGroupNotFound"}, + {"shape":"UnsupportedAvailabilityZone"} + ] + }, + "CreateTags":{ + "name":"CreateTags", + "http":{ + "method":"POST", + "requestUri":"/2015-02-01/create-tags/{FileSystemId}", + "responseCode":204 + }, + "input":{"shape":"CreateTagsRequest"}, + "errors":[ + {"shape":"BadRequest"}, + {"shape":"InternalServerError"}, + {"shape":"FileSystemNotFound"} + ] + }, + "DeleteFileSystem":{ + "name":"DeleteFileSystem", + "http":{ + "method":"DELETE", + "requestUri":"/2015-02-01/file-systems/{FileSystemId}", + "responseCode":204 + }, + "input":{"shape":"DeleteFileSystemRequest"}, + "errors":[ + {"shape":"BadRequest"}, + {"shape":"InternalServerError"}, + {"shape":"FileSystemNotFound"}, + {"shape":"FileSystemInUse"} + ] + }, + "DeleteMountTarget":{ + "name":"DeleteMountTarget", + "http":{ + "method":"DELETE", + "requestUri":"/2015-02-01/mount-targets/{MountTargetId}", + "responseCode":204 + }, + "input":{"shape":"DeleteMountTargetRequest"}, + "errors":[ + {"shape":"BadRequest"}, + {"shape":"InternalServerError"}, + {"shape":"DependencyTimeout"}, + {"shape":"MountTargetNotFound"} + ] + }, + "DeleteTags":{ + "name":"DeleteTags", + "http":{ + "method":"POST", + "requestUri":"/2015-02-01/delete-tags/{FileSystemId}", + "responseCode":204 + }, + "input":{"shape":"DeleteTagsRequest"}, + "errors":[ + {"shape":"BadRequest"}, + {"shape":"InternalServerError"}, + {"shape":"FileSystemNotFound"} + ] + }, + "DescribeFileSystems":{ + "name":"DescribeFileSystems", + "http":{ + "method":"GET", + "requestUri":"/2015-02-01/file-systems", + "responseCode":200 + }, + "input":{"shape":"DescribeFileSystemsRequest"}, + "output":{"shape":"DescribeFileSystemsResponse"}, + "errors":[ + {"shape":"BadRequest"}, + {"shape":"InternalServerError"}, + {"shape":"FileSystemNotFound"} + ] + }, + "DescribeMountTargetSecurityGroups":{ + "name":"DescribeMountTargetSecurityGroups", + "http":{ + "method":"GET", + "requestUri":"/2015-02-01/mount-targets/{MountTargetId}/security-groups", + "responseCode":200 + }, + "input":{"shape":"DescribeMountTargetSecurityGroupsRequest"}, + "output":{"shape":"DescribeMountTargetSecurityGroupsResponse"}, + "errors":[ + {"shape":"BadRequest"}, + {"shape":"InternalServerError"}, + {"shape":"MountTargetNotFound"}, + {"shape":"IncorrectMountTargetState"} + ] + }, + "DescribeMountTargets":{ + "name":"DescribeMountTargets", + "http":{ + "method":"GET", + "requestUri":"/2015-02-01/mount-targets", + "responseCode":200 + }, + "input":{"shape":"DescribeMountTargetsRequest"}, + "output":{"shape":"DescribeMountTargetsResponse"}, + "errors":[ + {"shape":"BadRequest"}, + {"shape":"InternalServerError"}, + {"shape":"FileSystemNotFound"}, + {"shape":"MountTargetNotFound"} + ] + }, + "DescribeTags":{ + "name":"DescribeTags", + "http":{ + "method":"GET", + "requestUri":"/2015-02-01/tags/{FileSystemId}/", + "responseCode":200 + }, + "input":{"shape":"DescribeTagsRequest"}, + "output":{"shape":"DescribeTagsResponse"}, + "errors":[ + {"shape":"BadRequest"}, + {"shape":"InternalServerError"}, + {"shape":"FileSystemNotFound"} + ] + }, + "ModifyMountTargetSecurityGroups":{ + "name":"ModifyMountTargetSecurityGroups", + "http":{ + "method":"PUT", + "requestUri":"/2015-02-01/mount-targets/{MountTargetId}/security-groups", + "responseCode":204 + }, + "input":{"shape":"ModifyMountTargetSecurityGroupsRequest"}, + "errors":[ + {"shape":"BadRequest"}, + {"shape":"InternalServerError"}, + {"shape":"MountTargetNotFound"}, + {"shape":"IncorrectMountTargetState"}, + {"shape":"SecurityGroupLimitExceeded"}, + {"shape":"SecurityGroupNotFound"} + ] + } + }, + "shapes":{ + "AwsAccountId":{"type":"string"}, + "BadRequest":{ + "type":"structure", + "required":["ErrorCode"], + "members":{ + "ErrorCode":{"shape":"ErrorCode"}, + "Message":{"shape":"ErrorMessage"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "CreateFileSystemRequest":{ + "type":"structure", + "required":["CreationToken"], + "members":{ + "CreationToken":{"shape":"CreationToken"}, + "PerformanceMode":{"shape":"PerformanceMode"} + } + }, + "CreateMountTargetRequest":{ + "type":"structure", + "required":[ + "FileSystemId", + "SubnetId" + ], + "members":{ + "FileSystemId":{"shape":"FileSystemId"}, + "SubnetId":{"shape":"SubnetId"}, + "IpAddress":{"shape":"IpAddress"}, + "SecurityGroups":{"shape":"SecurityGroups"} + } + }, + "CreateTagsRequest":{ + "type":"structure", + "required":[ + "FileSystemId", + "Tags" + ], + "members":{ + "FileSystemId":{ + "shape":"FileSystemId", + "location":"uri", + "locationName":"FileSystemId" + }, + "Tags":{"shape":"Tags"} + } + }, + "CreationToken":{ + "type":"string", + "max":64, + "min":1 + }, + "DeleteFileSystemRequest":{ + "type":"structure", + "required":["FileSystemId"], + "members":{ + "FileSystemId":{ + "shape":"FileSystemId", + "location":"uri", + "locationName":"FileSystemId" + } + } + }, + "DeleteMountTargetRequest":{ + "type":"structure", + "required":["MountTargetId"], + "members":{ + "MountTargetId":{ + "shape":"MountTargetId", + "location":"uri", + "locationName":"MountTargetId" + } + } + }, + "DeleteTagsRequest":{ + "type":"structure", + "required":[ + "FileSystemId", + "TagKeys" + ], + "members":{ + "FileSystemId":{ + "shape":"FileSystemId", + "location":"uri", + "locationName":"FileSystemId" + }, + "TagKeys":{"shape":"TagKeys"} + } + }, + "DependencyTimeout":{ + "type":"structure", + "required":["ErrorCode"], + "members":{ + "ErrorCode":{"shape":"ErrorCode"}, + "Message":{"shape":"ErrorMessage"} + }, + "error":{"httpStatusCode":504}, + "exception":true + }, + "DescribeFileSystemsRequest":{ + "type":"structure", + "members":{ + "MaxItems":{ + "shape":"MaxItems", + "location":"querystring", + "locationName":"MaxItems" + }, + "Marker":{ + "shape":"Marker", + "location":"querystring", + "locationName":"Marker" + }, + "CreationToken":{ + "shape":"CreationToken", + "location":"querystring", + "locationName":"CreationToken" + }, + "FileSystemId":{ + "shape":"FileSystemId", + "location":"querystring", + "locationName":"FileSystemId" + } + } + }, + "DescribeFileSystemsResponse":{ + "type":"structure", + "members":{ + "Marker":{"shape":"Marker"}, + "FileSystems":{"shape":"FileSystemDescriptions"}, + "NextMarker":{"shape":"Marker"} + } + }, + "DescribeMountTargetSecurityGroupsRequest":{ + "type":"structure", + "required":["MountTargetId"], + "members":{ + "MountTargetId":{ + "shape":"MountTargetId", + "location":"uri", + "locationName":"MountTargetId" + } + } + }, + "DescribeMountTargetSecurityGroupsResponse":{ + "type":"structure", + "required":["SecurityGroups"], + "members":{ + "SecurityGroups":{"shape":"SecurityGroups"} + } + }, + "DescribeMountTargetsRequest":{ + "type":"structure", + "members":{ + "MaxItems":{ + "shape":"MaxItems", + "location":"querystring", + "locationName":"MaxItems" + }, + "Marker":{ + "shape":"Marker", + "location":"querystring", + "locationName":"Marker" + }, + "FileSystemId":{ + "shape":"FileSystemId", + "location":"querystring", + "locationName":"FileSystemId" + }, + "MountTargetId":{ + "shape":"MountTargetId", + "location":"querystring", + "locationName":"MountTargetId" + } + } + }, + "DescribeMountTargetsResponse":{ + "type":"structure", + "members":{ + "Marker":{"shape":"Marker"}, + "MountTargets":{"shape":"MountTargetDescriptions"}, + "NextMarker":{"shape":"Marker"} + } + }, + "DescribeTagsRequest":{ + "type":"structure", + "required":["FileSystemId"], + "members":{ + "MaxItems":{ + "shape":"MaxItems", + "location":"querystring", + "locationName":"MaxItems" + }, + "Marker":{ + "shape":"Marker", + "location":"querystring", + "locationName":"Marker" + }, + "FileSystemId":{ + "shape":"FileSystemId", + "location":"uri", + "locationName":"FileSystemId" + } + } + }, + "DescribeTagsResponse":{ + "type":"structure", + "required":["Tags"], + "members":{ + "Marker":{"shape":"Marker"}, + "Tags":{"shape":"Tags"}, + "NextMarker":{"shape":"Marker"} + } + }, + "ErrorCode":{ + "type":"string", + "min":1 + }, + "ErrorMessage":{"type":"string"}, + "FileSystemAlreadyExists":{ + "type":"structure", + "required":[ + "ErrorCode", + "FileSystemId" + ], + "members":{ + "ErrorCode":{"shape":"ErrorCode"}, + "Message":{"shape":"ErrorMessage"}, + "FileSystemId":{"shape":"FileSystemId"} + }, + "error":{"httpStatusCode":409}, + "exception":true + }, + "FileSystemDescription":{ + "type":"structure", + "required":[ + "OwnerId", + "CreationToken", + "FileSystemId", + "CreationTime", + "LifeCycleState", + "NumberOfMountTargets", + "SizeInBytes", + "PerformanceMode" + ], + "members":{ + "OwnerId":{"shape":"AwsAccountId"}, + "CreationToken":{"shape":"CreationToken"}, + "FileSystemId":{"shape":"FileSystemId"}, + "CreationTime":{"shape":"Timestamp"}, + "LifeCycleState":{"shape":"LifeCycleState"}, + "Name":{"shape":"TagValue"}, + "NumberOfMountTargets":{"shape":"MountTargetCount"}, + "SizeInBytes":{"shape":"FileSystemSize"}, + "PerformanceMode":{"shape":"PerformanceMode"} + } + }, + "FileSystemDescriptions":{ + "type":"list", + "member":{"shape":"FileSystemDescription"} + }, + "FileSystemId":{"type":"string"}, + "FileSystemInUse":{ + "type":"structure", + "required":["ErrorCode"], + "members":{ + "ErrorCode":{"shape":"ErrorCode"}, + "Message":{"shape":"ErrorMessage"} + }, + "error":{"httpStatusCode":409}, + "exception":true + }, + "FileSystemLimitExceeded":{ + "type":"structure", + "required":["ErrorCode"], + "members":{ + "ErrorCode":{"shape":"ErrorCode"}, + "Message":{"shape":"ErrorMessage"} + }, + "error":{"httpStatusCode":403}, + "exception":true + }, + "FileSystemNotFound":{ + "type":"structure", + "required":["ErrorCode"], + "members":{ + "ErrorCode":{"shape":"ErrorCode"}, + "Message":{"shape":"ErrorMessage"} + }, + "error":{"httpStatusCode":404}, + "exception":true + }, + "FileSystemSize":{ + "type":"structure", + "required":["Value"], + "members":{ + "Value":{"shape":"FileSystemSizeValue"}, + "Timestamp":{"shape":"Timestamp"} + } + }, + "FileSystemSizeValue":{ + "type":"long", + "min":0 + }, + "IncorrectFileSystemLifeCycleState":{ + "type":"structure", + "required":["ErrorCode"], + "members":{ + "ErrorCode":{"shape":"ErrorCode"}, + "Message":{"shape":"ErrorMessage"} + }, + "error":{"httpStatusCode":409}, + "exception":true + }, + "IncorrectMountTargetState":{ + "type":"structure", + "required":["ErrorCode"], + "members":{ + "ErrorCode":{"shape":"ErrorCode"}, + "Message":{"shape":"ErrorMessage"} + }, + "error":{"httpStatusCode":409}, + "exception":true + }, + "InternalServerError":{ + "type":"structure", + "required":["ErrorCode"], + "members":{ + "ErrorCode":{"shape":"ErrorCode"}, + "Message":{"shape":"ErrorMessage"} + }, + "error":{"httpStatusCode":500}, + "exception":true + }, + "IpAddress":{"type":"string"}, + "IpAddressInUse":{ + "type":"structure", + "required":["ErrorCode"], + "members":{ + "ErrorCode":{"shape":"ErrorCode"}, + "Message":{"shape":"ErrorMessage"} + }, + "error":{"httpStatusCode":409}, + "exception":true + }, + "LifeCycleState":{ + "type":"string", + "enum":[ + "creating", + "available", + "deleting", + "deleted" + ] + }, + "Marker":{"type":"string"}, + "MaxItems":{ + "type":"integer", + "min":1 + }, + "ModifyMountTargetSecurityGroupsRequest":{ + "type":"structure", + "required":["MountTargetId"], + "members":{ + "MountTargetId":{ + "shape":"MountTargetId", + "location":"uri", + "locationName":"MountTargetId" + }, + "SecurityGroups":{"shape":"SecurityGroups"} + } + }, + "MountTargetConflict":{ + "type":"structure", + "required":["ErrorCode"], + "members":{ + "ErrorCode":{"shape":"ErrorCode"}, + "Message":{"shape":"ErrorMessage"} + }, + "error":{"httpStatusCode":409}, + "exception":true + }, + "MountTargetCount":{ + "type":"integer", + "min":0 + }, + "MountTargetDescription":{ + "type":"structure", + "required":[ + "MountTargetId", + "FileSystemId", + "SubnetId", + "LifeCycleState" + ], + "members":{ + "OwnerId":{"shape":"AwsAccountId"}, + "MountTargetId":{"shape":"MountTargetId"}, + "FileSystemId":{"shape":"FileSystemId"}, + "SubnetId":{"shape":"SubnetId"}, + "LifeCycleState":{"shape":"LifeCycleState"}, + "IpAddress":{"shape":"IpAddress"}, + "NetworkInterfaceId":{"shape":"NetworkInterfaceId"} + } + }, + "MountTargetDescriptions":{ + "type":"list", + "member":{"shape":"MountTargetDescription"} + }, + "MountTargetId":{"type":"string"}, + "MountTargetNotFound":{ + "type":"structure", + "required":["ErrorCode"], + "members":{ + "ErrorCode":{"shape":"ErrorCode"}, + "Message":{"shape":"ErrorMessage"} + }, + "error":{"httpStatusCode":404}, + "exception":true + }, + "NetworkInterfaceId":{"type":"string"}, + "NetworkInterfaceLimitExceeded":{ + "type":"structure", + "required":["ErrorCode"], + "members":{ + "ErrorCode":{"shape":"ErrorCode"}, + "Message":{"shape":"ErrorMessage"} + }, + "error":{"httpStatusCode":409}, + "exception":true + }, + "NoFreeAddressesInSubnet":{ + "type":"structure", + "required":["ErrorCode"], + "members":{ + "ErrorCode":{"shape":"ErrorCode"}, + "Message":{"shape":"ErrorMessage"} + }, + "error":{"httpStatusCode":409}, + "exception":true + }, + "PerformanceMode":{ + "type":"string", + "enum":[ + "generalPurpose", + "maxIO" + ] + }, + "SecurityGroup":{"type":"string"}, + "SecurityGroupLimitExceeded":{ + "type":"structure", + "required":["ErrorCode"], + "members":{ + "ErrorCode":{"shape":"ErrorCode"}, + "Message":{"shape":"ErrorMessage"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "SecurityGroupNotFound":{ + "type":"structure", + "required":["ErrorCode"], + "members":{ + "ErrorCode":{"shape":"ErrorCode"}, + "Message":{"shape":"ErrorMessage"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "SecurityGroups":{ + "type":"list", + "member":{"shape":"SecurityGroup"}, + "max":5 + }, + "SubnetId":{"type":"string"}, + "SubnetNotFound":{ + "type":"structure", + "required":["ErrorCode"], + "members":{ + "ErrorCode":{"shape":"ErrorCode"}, + "Message":{"shape":"ErrorMessage"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "Tag":{ + "type":"structure", + "required":[ + "Key", + "Value" + ], + "members":{ + "Key":{"shape":"TagKey"}, + "Value":{"shape":"TagValue"} + } + }, + "TagKey":{ + "type":"string", + "max":128, + "min":1 + }, + "TagKeys":{ + "type":"list", + "member":{"shape":"TagKey"} + }, + "TagValue":{ + "type":"string", + "max":256 + }, + "Tags":{ + "type":"list", + "member":{"shape":"Tag"} + }, + "Timestamp":{"type":"timestamp"}, + "UnsupportedAvailabilityZone":{ + "type":"structure", + "required":["ErrorCode"], + "members":{ + "ErrorCode":{"shape":"ErrorCode"}, + "Message":{"shape":"ErrorMessage"} + }, + "error":{"httpStatusCode":400}, + "exception":true + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/elasticfilesystem/2015-02-01/docs-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/elasticfilesystem/2015-02-01/docs-2.json new file mode 100644 index 000000000..c103fc621 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/elasticfilesystem/2015-02-01/docs-2.json @@ -0,0 +1,422 @@ +{ + "version": "2.0", + "service": "Amazon Elastic File System", + "operations": { + "CreateFileSystem": "

    Creates a new, empty file system. The operation requires a creation token in the request that Amazon EFS uses to ensure idempotent creation (calling the operation with same creation token has no effect). If a file system does not currently exist that is owned by the caller's AWS account with the specified creation token, this operation does the following:

    • Creates a new, empty file system. The file system will have an Amazon EFS assigned ID, and an initial lifecycle state creating.

    • Returns with the description of the created file system.

    Otherwise, this operation returns a FileSystemAlreadyExists error with the ID of the existing file system.

    For basic use cases, you can use a randomly generated UUID for the creation token.

    The idempotent operation allows you to retry a CreateFileSystem call without risk of creating an extra file system. This can happen when an initial call fails in a way that leaves it uncertain whether or not a file system was actually created. An example might be that a transport level timeout occurred or your connection was reset. As long as you use the same creation token, if the initial call had succeeded in creating a file system, the client can learn of its existence from the FileSystemAlreadyExists error.

    The CreateFileSystem call returns while the file system's lifecycle state is still creating. You can check the file system creation status by calling the DescribeFileSystems operation, which among other things returns the file system state.

    This operation also takes an optional PerformanceMode parameter that you choose for your file system. We recommend generalPurpose performance mode for most file systems. File systems using the maxIO performance mode can scale to higher levels of aggregate throughput and operations per second with a tradeoff of slightly higher latencies for most file operations. The performance mode can't be changed after the file system has been created. For more information, see Amazon EFS: Performance Modes.

    After the file system is fully created, Amazon EFS sets its lifecycle state to available, at which point you can create one or more mount targets for the file system in your VPC. For more information, see CreateMountTarget. You mount your Amazon EFS file system on an EC2 instances in your VPC via the mount target. For more information, see Amazon EFS: How it Works.

    This operation requires permissions for the elasticfilesystem:CreateFileSystem action.

    ", + "CreateMountTarget": "

    Creates a mount target for a file system. You can then mount the file system on EC2 instances via the mount target.

    You can create one mount target in each Availability Zone in your VPC. All EC2 instances in a VPC within a given Availability Zone share a single mount target for a given file system. If you have multiple subnets in an Availability Zone, you create a mount target in one of the subnets. EC2 instances do not need to be in the same subnet as the mount target in order to access their file system. For more information, see Amazon EFS: How it Works.

    In the request, you also specify a file system ID for which you are creating the mount target and the file system's lifecycle state must be available. For more information, see DescribeFileSystems.

    In the request, you also provide a subnet ID, which determines the following:

    • VPC in which Amazon EFS creates the mount target

    • Availability Zone in which Amazon EFS creates the mount target

    • IP address range from which Amazon EFS selects the IP address of the mount target (if you don't specify an IP address in the request)

    After creating the mount target, Amazon EFS returns a response that includes, a MountTargetId and an IpAddress. You use this IP address when mounting the file system in an EC2 instance. You can also use the mount target's DNS name when mounting the file system. The EC2 instance on which you mount the file system via the mount target can resolve the mount target's DNS name to its IP address. For more information, see How it Works: Implementation Overview.

    Note that you can create mount targets for a file system in only one VPC, and there can be only one mount target per Availability Zone. That is, if the file system already has one or more mount targets created for it, the subnet specified in the request to add another mount target must meet the following requirements:

    • Must belong to the same VPC as the subnets of the existing mount targets

    • Must not be in the same Availability Zone as any of the subnets of the existing mount targets

    If the request satisfies the requirements, Amazon EFS does the following:

    • Creates a new mount target in the specified subnet.

    • Also creates a new network interface in the subnet as follows:

      • If the request provides an IpAddress, Amazon EFS assigns that IP address to the network interface. Otherwise, Amazon EFS assigns a free address in the subnet (in the same way that the Amazon EC2 CreateNetworkInterface call does when a request does not specify a primary private IP address).

      • If the request provides SecurityGroups, this network interface is associated with those security groups. Otherwise, it belongs to the default security group for the subnet's VPC.

      • Assigns the description Mount target fsmt-id for file system fs-id where fsmt-id is the mount target ID, and fs-id is the FileSystemId.

      • Sets the requesterManaged property of the network interface to true, and the requesterId value to EFS.

      Each Amazon EFS mount target has one corresponding requestor-managed EC2 network interface. After the network interface is created, Amazon EFS sets the NetworkInterfaceId field in the mount target's description to the network interface ID, and the IpAddress field to its address. If network interface creation fails, the entire CreateMountTarget operation fails.

    The CreateMountTarget call returns only after creating the network interface, but while the mount target state is still creating. You can check the mount target creation status by calling the DescribeFileSystems operation, which among other things returns the mount target state.

    We recommend you create a mount target in each of the Availability Zones. There are cost considerations for using a file system in an Availability Zone through a mount target created in another Availability Zone. For more information, see Amazon EFS. In addition, by always using a mount target local to the instance's Availability Zone, you eliminate a partial failure scenario. If the Availability Zone in which your mount target is created goes down, then you won't be able to access your file system through that mount target.

    This operation requires permissions for the following action on the file system:

    • elasticfilesystem:CreateMountTarget

    This operation also requires permissions for the following Amazon EC2 actions:

    • ec2:DescribeSubnets

    • ec2:DescribeNetworkInterfaces

    • ec2:CreateNetworkInterface

    ", + "CreateTags": "

    Creates or overwrites tags associated with a file system. Each tag is a key-value pair. If a tag key specified in the request already exists on the file system, this operation overwrites its value with the value provided in the request. If you add the Name tag to your file system, Amazon EFS returns it in the response to the DescribeFileSystems operation.

    This operation requires permission for the elasticfilesystem:CreateTags action.

    ", + "DeleteFileSystem": "

    Deletes a file system, permanently severing access to its contents. Upon return, the file system no longer exists and you can't access any contents of the deleted file system.

    You can't delete a file system that is in use. That is, if the file system has any mount targets, you must first delete them. For more information, see DescribeMountTargets and DeleteMountTarget.

    The DeleteFileSystem call returns while the file system state is still deleting. You can check the file system deletion status by calling the DescribeFileSystems operation, which returns a list of file systems in your account. If you pass file system ID or creation token for the deleted file system, the DescribeFileSystems returns a 404 FileSystemNotFound error.

    This operation requires permissions for the elasticfilesystem:DeleteFileSystem action.

    ", + "DeleteMountTarget": "

    Deletes the specified mount target.

    This operation forcibly breaks any mounts of the file system via the mount target that is being deleted, which might disrupt instances or applications using those mounts. To avoid applications getting cut off abruptly, you might consider unmounting any mounts of the mount target, if feasible. The operation also deletes the associated network interface. Uncommitted writes may be lost, but breaking a mount target using this operation does not corrupt the file system itself. The file system you created remains. You can mount an EC2 instance in your VPC via another mount target.

    This operation requires permissions for the following action on the file system:

    • elasticfilesystem:DeleteMountTarget

    The DeleteMountTarget call returns while the mount target state is still deleting. You can check the mount target deletion by calling the DescribeMountTargets operation, which returns a list of mount target descriptions for the given file system.

    The operation also requires permissions for the following Amazon EC2 action on the mount target's network interface:

    • ec2:DeleteNetworkInterface

    ", + "DeleteTags": "

    Deletes the specified tags from a file system. If the DeleteTags request includes a tag key that does not exist, Amazon EFS ignores it and doesn't cause an error. For more information about tags and related restrictions, see Tag Restrictions in the AWS Billing and Cost Management User Guide.

    This operation requires permissions for the elasticfilesystem:DeleteTags action.

    ", + "DescribeFileSystems": "

    Returns the description of a specific Amazon EFS file system if either the file system CreationToken or the FileSystemId is provided. Otherwise, it returns descriptions of all file systems owned by the caller's AWS account in the AWS Region of the endpoint that you're calling.

    When retrieving all file system descriptions, you can optionally specify the MaxItems parameter to limit the number of descriptions in a response. If more file system descriptions remain, Amazon EFS returns a NextMarker, an opaque token, in the response. In this case, you should send a subsequent request with the Marker request parameter set to the value of NextMarker.

    To retrieve a list of your file system descriptions, this operation is used in an iterative process, where DescribeFileSystems is called first without the Marker and then the operation continues to call it with the Marker parameter set to the value of the NextMarker from the previous response until the response has no NextMarker.

    The implementation may return fewer than MaxItems file system descriptions while still including a NextMarker value.

    The order of file systems returned in the response of one DescribeFileSystems call and the order of file systems returned across the responses of a multi-call iteration is unspecified.

    This operation requires permissions for the elasticfilesystem:DescribeFileSystems action.

    ", + "DescribeMountTargetSecurityGroups": "

    Returns the security groups currently in effect for a mount target. This operation requires that the network interface of the mount target has been created and the lifecycle state of the mount target is not deleted.

    This operation requires permissions for the following actions:

    • elasticfilesystem:DescribeMountTargetSecurityGroups action on the mount target's file system.

    • ec2:DescribeNetworkInterfaceAttribute action on the mount target's network interface.

    ", + "DescribeMountTargets": "

    Returns the descriptions of all the current mount targets, or a specific mount target, for a file system. When requesting all of the current mount targets, the order of mount targets returned in the response is unspecified.

    This operation requires permissions for the elasticfilesystem:DescribeMountTargets action, on either the file system ID that you specify in FileSystemId, or on the file system of the mount target that you specify in MountTargetId.

    ", + "DescribeTags": "

    Returns the tags associated with a file system. The order of tags returned in the response of one DescribeTags call and the order of tags returned across the responses of a multi-call iteration (when using pagination) is unspecified.

    This operation requires permissions for the elasticfilesystem:DescribeTags action.

    ", + "ModifyMountTargetSecurityGroups": "

    Modifies the set of security groups in effect for a mount target.

    When you create a mount target, Amazon EFS also creates a new network interface. For more information, see CreateMountTarget. This operation replaces the security groups in effect for the network interface associated with a mount target, with the SecurityGroups provided in the request. This operation requires that the network interface of the mount target has been created and the lifecycle state of the mount target is not deleted.

    The operation requires permissions for the following actions:

    • elasticfilesystem:ModifyMountTargetSecurityGroups action on the mount target's file system.

    • ec2:ModifyNetworkInterfaceAttribute action on the mount target's network interface.

    " + }, + "shapes": { + "AwsAccountId": { + "base": null, + "refs": { + "FileSystemDescription$OwnerId": "

    AWS account that created the file system. If the file system was created by an IAM user, the parent account to which the user belongs is the owner.

    ", + "MountTargetDescription$OwnerId": "

    AWS account ID that owns the resource.

    " + } + }, + "BadRequest": { + "base": "

    Returned if the request is malformed or contains an error such as an invalid parameter value or a missing required parameter.

    ", + "refs": { + } + }, + "CreateFileSystemRequest": { + "base": null, + "refs": { + } + }, + "CreateMountTargetRequest": { + "base": "

    ", + "refs": { + } + }, + "CreateTagsRequest": { + "base": "

    ", + "refs": { + } + }, + "CreationToken": { + "base": null, + "refs": { + "CreateFileSystemRequest$CreationToken": "

    String of up to 64 ASCII characters. Amazon EFS uses this to ensure idempotent creation.

    ", + "DescribeFileSystemsRequest$CreationToken": "

    (Optional) Restricts the list to the file system with this creation token (String). You specify a creation token when you create an Amazon EFS file system.

    ", + "FileSystemDescription$CreationToken": "

    Opaque string specified in the request.

    " + } + }, + "DeleteFileSystemRequest": { + "base": "

    ", + "refs": { + } + }, + "DeleteMountTargetRequest": { + "base": "

    ", + "refs": { + } + }, + "DeleteTagsRequest": { + "base": "

    ", + "refs": { + } + }, + "DependencyTimeout": { + "base": "

    The service timed out trying to fulfill the request, and the client should try the call again.

    ", + "refs": { + } + }, + "DescribeFileSystemsRequest": { + "base": "

    ", + "refs": { + } + }, + "DescribeFileSystemsResponse": { + "base": null, + "refs": { + } + }, + "DescribeMountTargetSecurityGroupsRequest": { + "base": "

    ", + "refs": { + } + }, + "DescribeMountTargetSecurityGroupsResponse": { + "base": null, + "refs": { + } + }, + "DescribeMountTargetsRequest": { + "base": "

    ", + "refs": { + } + }, + "DescribeMountTargetsResponse": { + "base": "

    ", + "refs": { + } + }, + "DescribeTagsRequest": { + "base": "

    ", + "refs": { + } + }, + "DescribeTagsResponse": { + "base": "

    ", + "refs": { + } + }, + "ErrorCode": { + "base": null, + "refs": { + "BadRequest$ErrorCode": null, + "DependencyTimeout$ErrorCode": null, + "FileSystemAlreadyExists$ErrorCode": null, + "FileSystemInUse$ErrorCode": null, + "FileSystemLimitExceeded$ErrorCode": null, + "FileSystemNotFound$ErrorCode": null, + "IncorrectFileSystemLifeCycleState$ErrorCode": null, + "IncorrectMountTargetState$ErrorCode": null, + "InternalServerError$ErrorCode": null, + "IpAddressInUse$ErrorCode": null, + "MountTargetConflict$ErrorCode": null, + "MountTargetNotFound$ErrorCode": null, + "NetworkInterfaceLimitExceeded$ErrorCode": null, + "NoFreeAddressesInSubnet$ErrorCode": null, + "SecurityGroupLimitExceeded$ErrorCode": null, + "SecurityGroupNotFound$ErrorCode": null, + "SubnetNotFound$ErrorCode": null, + "UnsupportedAvailabilityZone$ErrorCode": null + } + }, + "ErrorMessage": { + "base": null, + "refs": { + "BadRequest$Message": null, + "DependencyTimeout$Message": null, + "FileSystemAlreadyExists$Message": null, + "FileSystemInUse$Message": null, + "FileSystemLimitExceeded$Message": null, + "FileSystemNotFound$Message": null, + "IncorrectFileSystemLifeCycleState$Message": null, + "IncorrectMountTargetState$Message": null, + "InternalServerError$Message": null, + "IpAddressInUse$Message": null, + "MountTargetConflict$Message": null, + "MountTargetNotFound$Message": null, + "NetworkInterfaceLimitExceeded$Message": null, + "NoFreeAddressesInSubnet$Message": null, + "SecurityGroupLimitExceeded$Message": null, + "SecurityGroupNotFound$Message": null, + "SubnetNotFound$Message": null, + "UnsupportedAvailabilityZone$Message": null + } + }, + "FileSystemAlreadyExists": { + "base": "

    Returned if the file system you are trying to create already exists, with the creation token you provided.

    ", + "refs": { + } + }, + "FileSystemDescription": { + "base": "

    Description of the file system.

    ", + "refs": { + "FileSystemDescriptions$member": null + } + }, + "FileSystemDescriptions": { + "base": null, + "refs": { + "DescribeFileSystemsResponse$FileSystems": "

    Array of file system descriptions.

    " + } + }, + "FileSystemId": { + "base": null, + "refs": { + "CreateMountTargetRequest$FileSystemId": "

    ID of the file system for which to create the mount target.

    ", + "CreateTagsRequest$FileSystemId": "

    ID of the file system whose tags you want to modify (String). This operation modifies the tags only, not the file system.

    ", + "DeleteFileSystemRequest$FileSystemId": "

    ID of the file system you want to delete.

    ", + "DeleteTagsRequest$FileSystemId": "

    ID of the file system whose tags you want to delete (String).

    ", + "DescribeFileSystemsRequest$FileSystemId": "

    (Optional) ID of the file system whose description you want to retrieve (String).

    ", + "DescribeMountTargetsRequest$FileSystemId": "

    (Optional) ID of the file system whose mount targets you want to list (String). It must be included in your request if MountTargetId is not included.

    ", + "DescribeTagsRequest$FileSystemId": "

    ID of the file system whose tag set you want to retrieve.

    ", + "FileSystemAlreadyExists$FileSystemId": null, + "FileSystemDescription$FileSystemId": "

    ID of the file system, assigned by Amazon EFS.

    ", + "MountTargetDescription$FileSystemId": "

    ID of the file system for which the mount target is intended.

    " + } + }, + "FileSystemInUse": { + "base": "

    Returned if a file system has mount targets.

    ", + "refs": { + } + }, + "FileSystemLimitExceeded": { + "base": "

    Returned if the AWS account has already created maximum number of file systems allowed per account.

    ", + "refs": { + } + }, + "FileSystemNotFound": { + "base": "

    Returned if the specified FileSystemId does not exist in the requester's AWS account.

    ", + "refs": { + } + }, + "FileSystemSize": { + "base": "

    Latest known metered size (in bytes) of data stored in the file system, in its Value field, and the time at which that size was determined in its Timestamp field. Note that the value does not represent the size of a consistent snapshot of the file system, but it is eventually consistent when there are no writes to the file system. That is, the value will represent the actual size only if the file system is not modified for a period longer than a couple of hours. Otherwise, the value is not necessarily the exact size the file system was at any instant in time.

    ", + "refs": { + "FileSystemDescription$SizeInBytes": "

    Latest known metered size (in bytes) of data stored in the file system, in bytes, in its Value field, and the time at which that size was determined in its Timestamp field. The Timestamp value is the integer number of seconds since 1970-01-01T00:00:00Z. Note that the value does not represent the size of a consistent snapshot of the file system, but it is eventually consistent when there are no writes to the file system. That is, the value will represent actual size only if the file system is not modified for a period longer than a couple of hours. Otherwise, the value is not the exact size the file system was at any instant in time.

    " + } + }, + "FileSystemSizeValue": { + "base": null, + "refs": { + "FileSystemSize$Value": "

    Latest known metered size (in bytes) of data stored in the file system.

    " + } + }, + "IncorrectFileSystemLifeCycleState": { + "base": "

    Returned if the file system's life cycle state is not \"created\".

    ", + "refs": { + } + }, + "IncorrectMountTargetState": { + "base": "

    Returned if the mount target is not in the correct state for the operation.

    ", + "refs": { + } + }, + "InternalServerError": { + "base": "

    Returned if an error occurred on the server side.

    ", + "refs": { + } + }, + "IpAddress": { + "base": null, + "refs": { + "CreateMountTargetRequest$IpAddress": "

    Valid IPv4 address within the address range of the specified subnet.

    ", + "MountTargetDescription$IpAddress": "

    Address at which the file system may be mounted via the mount target.

    " + } + }, + "IpAddressInUse": { + "base": "

    Returned if the request specified an IpAddress that is already in use in the subnet.

    ", + "refs": { + } + }, + "LifeCycleState": { + "base": null, + "refs": { + "FileSystemDescription$LifeCycleState": "

    Lifecycle phase of the file system.

    ", + "MountTargetDescription$LifeCycleState": "

    Lifecycle state of the mount target.

    " + } + }, + "Marker": { + "base": null, + "refs": { + "DescribeFileSystemsRequest$Marker": "

    (Optional) Opaque pagination token returned from a previous DescribeFileSystems operation (String). If present, specifies to continue the list from where the returning call had left off.

    ", + "DescribeFileSystemsResponse$Marker": "

    Present if provided by caller in the request (String).

    ", + "DescribeFileSystemsResponse$NextMarker": "

    Present if there are more file systems than returned in the response (String). You can use the NextMarker in the subsequent request to fetch the descriptions.

    ", + "DescribeMountTargetsRequest$Marker": "

    (Optional) Opaque pagination token returned from a previous DescribeMountTargets operation (String). If present, it specifies to continue the list from where the previous returning call left off.

    ", + "DescribeMountTargetsResponse$Marker": "

    If the request included the Marker, the response returns that value in this field.

    ", + "DescribeMountTargetsResponse$NextMarker": "

    If a value is present, there are more mount targets to return. In a subsequent request, you can provide Marker in your request with this value to retrieve the next set of mount targets.

    ", + "DescribeTagsRequest$Marker": "

    (Optional) Opaque pagination token returned from a previous DescribeTags operation (String). If present, it specifies to continue the list from where the previous call left off.

    ", + "DescribeTagsResponse$Marker": "

    If the request included a Marker, the response returns that value in this field.

    ", + "DescribeTagsResponse$NextMarker": "

    If a value is present, there are more tags to return. In a subsequent request, you can provide the value of NextMarker as the value of the Marker parameter in your next request to retrieve the next set of tags.

    " + } + }, + "MaxItems": { + "base": null, + "refs": { + "DescribeFileSystemsRequest$MaxItems": "

    (Optional) Specifies the maximum number of file systems to return in the response (integer). This parameter value must be greater than 0. The number of items that Amazon EFS returns is the minimum of the MaxItems parameter specified in the request and the service's internal maximum number of items per page.

    ", + "DescribeMountTargetsRequest$MaxItems": "

    (Optional) Maximum number of mount targets to return in the response. It must be an integer with a value greater than zero.

    ", + "DescribeTagsRequest$MaxItems": "

    (Optional) Maximum number of file system tags to return in the response. It must be an integer with a value greater than zero.

    " + } + }, + "ModifyMountTargetSecurityGroupsRequest": { + "base": "

    ", + "refs": { + } + }, + "MountTargetConflict": { + "base": "

    Returned if the mount target would violate one of the specified restrictions based on the file system's existing mount targets.

    ", + "refs": { + } + }, + "MountTargetCount": { + "base": null, + "refs": { + "FileSystemDescription$NumberOfMountTargets": "

    Current number of mount targets that the file system has. For more information, see CreateMountTarget.

    " + } + }, + "MountTargetDescription": { + "base": "

    Provides a description of a mount target.

    ", + "refs": { + "MountTargetDescriptions$member": null + } + }, + "MountTargetDescriptions": { + "base": null, + "refs": { + "DescribeMountTargetsResponse$MountTargets": "

    Returns the file system's mount targets as an array of MountTargetDescription objects.

    " + } + }, + "MountTargetId": { + "base": null, + "refs": { + "DeleteMountTargetRequest$MountTargetId": "

    ID of the mount target to delete (String).

    ", + "DescribeMountTargetSecurityGroupsRequest$MountTargetId": "

    ID of the mount target whose security groups you want to retrieve.

    ", + "DescribeMountTargetsRequest$MountTargetId": "

    (Optional) ID of the mount target that you want to have described (String). It must be included in your request if FileSystemId is not included.

    ", + "ModifyMountTargetSecurityGroupsRequest$MountTargetId": "

    ID of the mount target whose security groups you want to modify.

    ", + "MountTargetDescription$MountTargetId": "

    System-assigned mount target ID.

    " + } + }, + "MountTargetNotFound": { + "base": "

    Returned if there is no mount target with the specified ID found in the caller's account.

    ", + "refs": { + } + }, + "NetworkInterfaceId": { + "base": null, + "refs": { + "MountTargetDescription$NetworkInterfaceId": "

    ID of the network interface that Amazon EFS created when it created the mount target.

    " + } + }, + "NetworkInterfaceLimitExceeded": { + "base": "

    The calling account has reached the ENI limit for the specific AWS region. Client should try to delete some ENIs or get its account limit raised. For more information, see Amazon VPC Limits in the Amazon Virtual Private Cloud User Guide (see the Network interfaces per VPC entry in the table).

    ", + "refs": { + } + }, + "NoFreeAddressesInSubnet": { + "base": "

    Returned if IpAddress was not specified in the request and there are no free IP addresses in the subnet.

    ", + "refs": { + } + }, + "PerformanceMode": { + "base": null, + "refs": { + "CreateFileSystemRequest$PerformanceMode": "

    The PerformanceMode of the file system. We recommend generalPurpose performance mode for most file systems. File systems using the maxIO performance mode can scale to higher levels of aggregate throughput and operations per second with a tradeoff of slightly higher latencies for most file operations. This can't be changed after the file system has been created.

    ", + "FileSystemDescription$PerformanceMode": "

    The PerformanceMode of the file system.

    " + } + }, + "SecurityGroup": { + "base": null, + "refs": { + "SecurityGroups$member": null + } + }, + "SecurityGroupLimitExceeded": { + "base": "

    Returned if the size of SecurityGroups specified in the request is greater than five.

    ", + "refs": { + } + }, + "SecurityGroupNotFound": { + "base": "

    Returned if one of the specified security groups does not exist in the subnet's VPC.

    ", + "refs": { + } + }, + "SecurityGroups": { + "base": null, + "refs": { + "CreateMountTargetRequest$SecurityGroups": "

    Up to five VPC security group IDs, of the form sg-xxxxxxxx. These must be for the same VPC as subnet specified.

    ", + "DescribeMountTargetSecurityGroupsResponse$SecurityGroups": "

    Array of security groups.

    ", + "ModifyMountTargetSecurityGroupsRequest$SecurityGroups": "

    Array of up to five VPC security group IDs.

    " + } + }, + "SubnetId": { + "base": null, + "refs": { + "CreateMountTargetRequest$SubnetId": "

    ID of the subnet to add the mount target in.

    ", + "MountTargetDescription$SubnetId": "

    ID of the mount target's subnet.

    " + } + }, + "SubnetNotFound": { + "base": "

    Returned if there is no subnet with ID SubnetId provided in the request.

    ", + "refs": { + } + }, + "Tag": { + "base": "

    A tag is a key-value pair. Allowed characters: letters, whitespace, and numbers, representable in UTF-8, and the following characters: + - = . _ : /

    ", + "refs": { + "Tags$member": null + } + }, + "TagKey": { + "base": null, + "refs": { + "Tag$Key": "

    Tag key (String). The key can't start with aws:.

    ", + "TagKeys$member": null + } + }, + "TagKeys": { + "base": null, + "refs": { + "DeleteTagsRequest$TagKeys": "

    List of tag keys to delete.

    " + } + }, + "TagValue": { + "base": null, + "refs": { + "FileSystemDescription$Name": "

    You can add tags to a file system, including a Name tag. For more information, see CreateTags. If the file system has a Name tag, Amazon EFS returns the value in this field.

    ", + "Tag$Value": "

    Value of the tag key.

    " + } + }, + "Tags": { + "base": null, + "refs": { + "CreateTagsRequest$Tags": "

    Array of Tag objects to add. Each Tag object is a key-value pair.

    ", + "DescribeTagsResponse$Tags": "

    Returns tags associated with the file system as an array of Tag objects.

    " + } + }, + "Timestamp": { + "base": null, + "refs": { + "FileSystemDescription$CreationTime": "

    Time that the file system was created, in seconds (since 1970-01-01T00:00:00Z).

    ", + "FileSystemSize$Timestamp": "

    Time at which the size of data, returned in the Value field, was determined. The value is the integer number of seconds since 1970-01-01T00:00:00Z.

    " + } + }, + "UnsupportedAvailabilityZone": { + "base": null, + "refs": { + } + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/elasticfilesystem/2015-02-01/examples-1.json b/vendor/github.com/aws/aws-sdk-go/models/apis/elasticfilesystem/2015-02-01/examples-1.json new file mode 100644 index 000000000..0ea7e3b0b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/elasticfilesystem/2015-02-01/examples-1.json @@ -0,0 +1,5 @@ +{ + "version": "1.0", + "examples": { + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/elasticloadbalancing/2012-06-01/api-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/elasticloadbalancing/2012-06-01/api-2.json new file mode 100644 index 000000000..849d31ff7 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/elasticloadbalancing/2012-06-01/api-2.json @@ -0,0 +1,2145 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2012-06-01", + "endpointPrefix":"elasticloadbalancing", + "serviceFullName":"Elastic Load Balancing", + "signatureVersion":"v4", + "xmlNamespace":"http://elasticloadbalancing.amazonaws.com/doc/2012-06-01/", + "protocol":"query" + }, + "operations":{ + "AddTags":{ + "name":"AddTags", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AddTagsInput"}, + "output":{ + "shape":"AddTagsOutput", + "resultWrapper":"AddTagsResult" + }, + "errors":[ + { + "shape":"AccessPointNotFoundException", + "error":{ + "code":"LoadBalancerNotFound", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"TooManyTagsException", + "error":{ + "code":"TooManyTags", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"DuplicateTagKeysException", + "error":{ + "code":"DuplicateTagKeys", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + } + ] + }, + "ApplySecurityGroupsToLoadBalancer":{ + "name":"ApplySecurityGroupsToLoadBalancer", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ApplySecurityGroupsToLoadBalancerInput"}, + "output":{ + "shape":"ApplySecurityGroupsToLoadBalancerOutput", + "resultWrapper":"ApplySecurityGroupsToLoadBalancerResult" + }, + "errors":[ + { + "shape":"AccessPointNotFoundException", + "error":{ + "code":"LoadBalancerNotFound", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidConfigurationRequestException", + "error":{ + "code":"InvalidConfigurationRequest", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidSecurityGroupException", + "error":{ + "code":"InvalidSecurityGroup", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + } + ] + }, + "AttachLoadBalancerToSubnets":{ + "name":"AttachLoadBalancerToSubnets", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AttachLoadBalancerToSubnetsInput"}, + "output":{ + "shape":"AttachLoadBalancerToSubnetsOutput", + "resultWrapper":"AttachLoadBalancerToSubnetsResult" + }, + "errors":[ + { + "shape":"AccessPointNotFoundException", + "error":{ + "code":"LoadBalancerNotFound", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidConfigurationRequestException", + "error":{ + "code":"InvalidConfigurationRequest", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + }, + { + "shape":"SubnetNotFoundException", + "error":{ + "code":"SubnetNotFound", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidSubnetException", + "error":{ + "code":"InvalidSubnet", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + } + ] + }, + "ConfigureHealthCheck":{ + "name":"ConfigureHealthCheck", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ConfigureHealthCheckInput"}, + "output":{ + "shape":"ConfigureHealthCheckOutput", + "resultWrapper":"ConfigureHealthCheckResult" + }, + "errors":[ + { + "shape":"AccessPointNotFoundException", + "error":{ + "code":"LoadBalancerNotFound", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + } + ] + }, + "CreateAppCookieStickinessPolicy":{ + "name":"CreateAppCookieStickinessPolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateAppCookieStickinessPolicyInput"}, + "output":{ + "shape":"CreateAppCookieStickinessPolicyOutput", + "resultWrapper":"CreateAppCookieStickinessPolicyResult" + }, + "errors":[ + { + "shape":"AccessPointNotFoundException", + "error":{ + "code":"LoadBalancerNotFound", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"DuplicatePolicyNameException", + "error":{ + "code":"DuplicatePolicyName", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"TooManyPoliciesException", + "error":{ + "code":"TooManyPolicies", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidConfigurationRequestException", + "error":{ + "code":"InvalidConfigurationRequest", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + } + ] + }, + "CreateLBCookieStickinessPolicy":{ + "name":"CreateLBCookieStickinessPolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateLBCookieStickinessPolicyInput"}, + "output":{ + "shape":"CreateLBCookieStickinessPolicyOutput", + "resultWrapper":"CreateLBCookieStickinessPolicyResult" + }, + "errors":[ + { + "shape":"AccessPointNotFoundException", + "error":{ + "code":"LoadBalancerNotFound", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"DuplicatePolicyNameException", + "error":{ + "code":"DuplicatePolicyName", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"TooManyPoliciesException", + "error":{ + "code":"TooManyPolicies", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidConfigurationRequestException", + "error":{ + "code":"InvalidConfigurationRequest", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + } + ] + }, + "CreateLoadBalancer":{ + "name":"CreateLoadBalancer", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateAccessPointInput"}, + "output":{ + "shape":"CreateAccessPointOutput", + "resultWrapper":"CreateLoadBalancerResult" + }, + "errors":[ + { + "shape":"DuplicateAccessPointNameException", + "error":{ + "code":"DuplicateLoadBalancerName", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"TooManyAccessPointsException", + "error":{ + "code":"TooManyLoadBalancers", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"CertificateNotFoundException", + "error":{ + "code":"CertificateNotFound", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidConfigurationRequestException", + "error":{ + "code":"InvalidConfigurationRequest", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + }, + { + "shape":"SubnetNotFoundException", + "error":{ + "code":"SubnetNotFound", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidSubnetException", + "error":{ + "code":"InvalidSubnet", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidSecurityGroupException", + "error":{ + "code":"InvalidSecurityGroup", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidSchemeException", + "error":{ + "code":"InvalidScheme", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"TooManyTagsException", + "error":{ + "code":"TooManyTags", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"DuplicateTagKeysException", + "error":{ + "code":"DuplicateTagKeys", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + } + ] + }, + "CreateLoadBalancerListeners":{ + "name":"CreateLoadBalancerListeners", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateLoadBalancerListenerInput"}, + "output":{ + "shape":"CreateLoadBalancerListenerOutput", + "resultWrapper":"CreateLoadBalancerListenersResult" + }, + "errors":[ + { + "shape":"AccessPointNotFoundException", + "error":{ + "code":"LoadBalancerNotFound", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"DuplicateListenerException", + "error":{ + "code":"DuplicateListener", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"CertificateNotFoundException", + "error":{ + "code":"CertificateNotFound", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidConfigurationRequestException", + "error":{ + "code":"InvalidConfigurationRequest", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + } + ] + }, + "CreateLoadBalancerPolicy":{ + "name":"CreateLoadBalancerPolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateLoadBalancerPolicyInput"}, + "output":{ + "shape":"CreateLoadBalancerPolicyOutput", + "resultWrapper":"CreateLoadBalancerPolicyResult" + }, + "errors":[ + { + "shape":"AccessPointNotFoundException", + "error":{ + "code":"LoadBalancerNotFound", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"PolicyTypeNotFoundException", + "error":{ + "code":"PolicyTypeNotFound", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"DuplicatePolicyNameException", + "error":{ + "code":"DuplicatePolicyName", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"TooManyPoliciesException", + "error":{ + "code":"TooManyPolicies", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidConfigurationRequestException", + "error":{ + "code":"InvalidConfigurationRequest", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + } + ] + }, + "DeleteLoadBalancer":{ + "name":"DeleteLoadBalancer", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteAccessPointInput"}, + "output":{ + "shape":"DeleteAccessPointOutput", + "resultWrapper":"DeleteLoadBalancerResult" + } + }, + "DeleteLoadBalancerListeners":{ + "name":"DeleteLoadBalancerListeners", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteLoadBalancerListenerInput"}, + "output":{ + "shape":"DeleteLoadBalancerListenerOutput", + "resultWrapper":"DeleteLoadBalancerListenersResult" + }, + "errors":[ + { + "shape":"AccessPointNotFoundException", + "error":{ + "code":"LoadBalancerNotFound", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + } + ] + }, + "DeleteLoadBalancerPolicy":{ + "name":"DeleteLoadBalancerPolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteLoadBalancerPolicyInput"}, + "output":{ + "shape":"DeleteLoadBalancerPolicyOutput", + "resultWrapper":"DeleteLoadBalancerPolicyResult" + }, + "errors":[ + { + "shape":"AccessPointNotFoundException", + "error":{ + "code":"LoadBalancerNotFound", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidConfigurationRequestException", + "error":{ + "code":"InvalidConfigurationRequest", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + } + ] + }, + "DeregisterInstancesFromLoadBalancer":{ + "name":"DeregisterInstancesFromLoadBalancer", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeregisterEndPointsInput"}, + "output":{ + "shape":"DeregisterEndPointsOutput", + "resultWrapper":"DeregisterInstancesFromLoadBalancerResult" + }, + "errors":[ + { + "shape":"AccessPointNotFoundException", + "error":{ + "code":"LoadBalancerNotFound", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidEndPointException", + "error":{ + "code":"InvalidInstance", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + } + ] + }, + "DescribeInstanceHealth":{ + "name":"DescribeInstanceHealth", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeEndPointStateInput"}, + "output":{ + "shape":"DescribeEndPointStateOutput", + "resultWrapper":"DescribeInstanceHealthResult" + }, + "errors":[ + { + "shape":"AccessPointNotFoundException", + "error":{ + "code":"LoadBalancerNotFound", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidEndPointException", + "error":{ + "code":"InvalidInstance", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + } + ] + }, + "DescribeLoadBalancerAttributes":{ + "name":"DescribeLoadBalancerAttributes", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeLoadBalancerAttributesInput"}, + "output":{ + "shape":"DescribeLoadBalancerAttributesOutput", + "resultWrapper":"DescribeLoadBalancerAttributesResult" + }, + "errors":[ + { + "shape":"AccessPointNotFoundException", + "error":{ + "code":"LoadBalancerNotFound", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"LoadBalancerAttributeNotFoundException", + "error":{ + "code":"LoadBalancerAttributeNotFound", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + } + ] + }, + "DescribeLoadBalancerPolicies":{ + "name":"DescribeLoadBalancerPolicies", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeLoadBalancerPoliciesInput"}, + "output":{ + "shape":"DescribeLoadBalancerPoliciesOutput", + "resultWrapper":"DescribeLoadBalancerPoliciesResult" + }, + "errors":[ + { + "shape":"AccessPointNotFoundException", + "error":{ + "code":"LoadBalancerNotFound", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"PolicyNotFoundException", + "error":{ + "code":"PolicyNotFound", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + } + ] + }, + "DescribeLoadBalancerPolicyTypes":{ + "name":"DescribeLoadBalancerPolicyTypes", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeLoadBalancerPolicyTypesInput"}, + "output":{ + "shape":"DescribeLoadBalancerPolicyTypesOutput", + "resultWrapper":"DescribeLoadBalancerPolicyTypesResult" + }, + "errors":[ + { + "shape":"PolicyTypeNotFoundException", + "error":{ + "code":"PolicyTypeNotFound", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + } + ] + }, + "DescribeLoadBalancers":{ + "name":"DescribeLoadBalancers", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeAccessPointsInput"}, + "output":{ + "shape":"DescribeAccessPointsOutput", + "resultWrapper":"DescribeLoadBalancersResult" + }, + "errors":[ + { + "shape":"AccessPointNotFoundException", + "error":{ + "code":"LoadBalancerNotFound", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + } + ] + }, + "DescribeTags":{ + "name":"DescribeTags", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeTagsInput"}, + "output":{ + "shape":"DescribeTagsOutput", + "resultWrapper":"DescribeTagsResult" + }, + "errors":[ + { + "shape":"AccessPointNotFoundException", + "error":{ + "code":"LoadBalancerNotFound", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + } + ] + }, + "DetachLoadBalancerFromSubnets":{ + "name":"DetachLoadBalancerFromSubnets", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DetachLoadBalancerFromSubnetsInput"}, + "output":{ + "shape":"DetachLoadBalancerFromSubnetsOutput", + "resultWrapper":"DetachLoadBalancerFromSubnetsResult" + }, + "errors":[ + { + "shape":"AccessPointNotFoundException", + "error":{ + "code":"LoadBalancerNotFound", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidConfigurationRequestException", + "error":{ + "code":"InvalidConfigurationRequest", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + } + ] + }, + "DisableAvailabilityZonesForLoadBalancer":{ + "name":"DisableAvailabilityZonesForLoadBalancer", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RemoveAvailabilityZonesInput"}, + "output":{ + "shape":"RemoveAvailabilityZonesOutput", + "resultWrapper":"DisableAvailabilityZonesForLoadBalancerResult" + }, + "errors":[ + { + "shape":"AccessPointNotFoundException", + "error":{ + "code":"LoadBalancerNotFound", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidConfigurationRequestException", + "error":{ + "code":"InvalidConfigurationRequest", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + } + ] + }, + "EnableAvailabilityZonesForLoadBalancer":{ + "name":"EnableAvailabilityZonesForLoadBalancer", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AddAvailabilityZonesInput"}, + "output":{ + "shape":"AddAvailabilityZonesOutput", + "resultWrapper":"EnableAvailabilityZonesForLoadBalancerResult" + }, + "errors":[ + { + "shape":"AccessPointNotFoundException", + "error":{ + "code":"LoadBalancerNotFound", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + } + ] + }, + "ModifyLoadBalancerAttributes":{ + "name":"ModifyLoadBalancerAttributes", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyLoadBalancerAttributesInput"}, + "output":{ + "shape":"ModifyLoadBalancerAttributesOutput", + "resultWrapper":"ModifyLoadBalancerAttributesResult" + }, + "errors":[ + { + "shape":"AccessPointNotFoundException", + "error":{ + "code":"LoadBalancerNotFound", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"LoadBalancerAttributeNotFoundException", + "error":{ + "code":"LoadBalancerAttributeNotFound", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidConfigurationRequestException", + "error":{ + "code":"InvalidConfigurationRequest", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + } + ] + }, + "RegisterInstancesWithLoadBalancer":{ + "name":"RegisterInstancesWithLoadBalancer", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RegisterEndPointsInput"}, + "output":{ + "shape":"RegisterEndPointsOutput", + "resultWrapper":"RegisterInstancesWithLoadBalancerResult" + }, + "errors":[ + { + "shape":"AccessPointNotFoundException", + "error":{ + "code":"LoadBalancerNotFound", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidEndPointException", + "error":{ + "code":"InvalidInstance", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + } + ] + }, + "RemoveTags":{ + "name":"RemoveTags", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RemoveTagsInput"}, + "output":{ + "shape":"RemoveTagsOutput", + "resultWrapper":"RemoveTagsResult" + }, + "errors":[ + { + "shape":"AccessPointNotFoundException", + "error":{ + "code":"LoadBalancerNotFound", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + } + ] + }, + "SetLoadBalancerListenerSSLCertificate":{ + "name":"SetLoadBalancerListenerSSLCertificate", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"SetLoadBalancerListenerSSLCertificateInput"}, + "output":{ + "shape":"SetLoadBalancerListenerSSLCertificateOutput", + "resultWrapper":"SetLoadBalancerListenerSSLCertificateResult" + }, + "errors":[ + { + "shape":"CertificateNotFoundException", + "error":{ + "code":"CertificateNotFound", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"AccessPointNotFoundException", + "error":{ + "code":"LoadBalancerNotFound", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"ListenerNotFoundException", + "error":{ + "code":"ListenerNotFound", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidConfigurationRequestException", + "error":{ + "code":"InvalidConfigurationRequest", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + } + ] + }, + "SetLoadBalancerPoliciesForBackendServer":{ + "name":"SetLoadBalancerPoliciesForBackendServer", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"SetLoadBalancerPoliciesForBackendServerInput"}, + "output":{ + "shape":"SetLoadBalancerPoliciesForBackendServerOutput", + "resultWrapper":"SetLoadBalancerPoliciesForBackendServerResult" + }, + "errors":[ + { + "shape":"AccessPointNotFoundException", + "error":{ + "code":"LoadBalancerNotFound", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"PolicyNotFoundException", + "error":{ + "code":"PolicyNotFound", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidConfigurationRequestException", + "error":{ + "code":"InvalidConfigurationRequest", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + } + ] + }, + "SetLoadBalancerPoliciesOfListener":{ + "name":"SetLoadBalancerPoliciesOfListener", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"SetLoadBalancerPoliciesOfListenerInput"}, + "output":{ + "shape":"SetLoadBalancerPoliciesOfListenerOutput", + "resultWrapper":"SetLoadBalancerPoliciesOfListenerResult" + }, + "errors":[ + { + "shape":"AccessPointNotFoundException", + "error":{ + "code":"LoadBalancerNotFound", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"PolicyNotFoundException", + "error":{ + "code":"PolicyNotFound", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"ListenerNotFoundException", + "error":{ + "code":"ListenerNotFound", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidConfigurationRequestException", + "error":{ + "code":"InvalidConfigurationRequest", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + } + ] + } + }, + "shapes":{ + "AccessLog":{ + "type":"structure", + "required":["Enabled"], + "members":{ + "Enabled":{"shape":"AccessLogEnabled"}, + "S3BucketName":{"shape":"S3BucketName"}, + "EmitInterval":{"shape":"AccessLogInterval"}, + "S3BucketPrefix":{"shape":"AccessLogPrefix"} + } + }, + "AccessLogEnabled":{"type":"boolean"}, + "AccessLogInterval":{"type":"integer"}, + "AccessLogPrefix":{"type":"string"}, + "AccessPointName":{"type":"string"}, + "AccessPointNotFoundException":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"LoadBalancerNotFound", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "AccessPointPort":{"type":"integer"}, + "AddAvailabilityZonesInput":{ + "type":"structure", + "required":[ + "LoadBalancerName", + "AvailabilityZones" + ], + "members":{ + "LoadBalancerName":{"shape":"AccessPointName"}, + "AvailabilityZones":{"shape":"AvailabilityZones"} + } + }, + "AddAvailabilityZonesOutput":{ + "type":"structure", + "members":{ + "AvailabilityZones":{"shape":"AvailabilityZones"} + } + }, + "AddTagsInput":{ + "type":"structure", + "required":[ + "LoadBalancerNames", + "Tags" + ], + "members":{ + "LoadBalancerNames":{"shape":"LoadBalancerNames"}, + "Tags":{"shape":"TagList"} + } + }, + "AddTagsOutput":{ + "type":"structure", + "members":{ + } + }, + "AdditionalAttribute":{ + "type":"structure", + "members":{ + "Key":{"shape":"StringVal"}, + "Value":{"shape":"StringVal"} + } + }, + "AdditionalAttributes":{ + "type":"list", + "member":{"shape":"AdditionalAttribute"} + }, + "AppCookieStickinessPolicies":{ + "type":"list", + "member":{"shape":"AppCookieStickinessPolicy"} + }, + "AppCookieStickinessPolicy":{ + "type":"structure", + "members":{ + "PolicyName":{"shape":"PolicyName"}, + "CookieName":{"shape":"CookieName"} + } + }, + "ApplySecurityGroupsToLoadBalancerInput":{ + "type":"structure", + "required":[ + "LoadBalancerName", + "SecurityGroups" + ], + "members":{ + "LoadBalancerName":{"shape":"AccessPointName"}, + "SecurityGroups":{"shape":"SecurityGroups"} + } + }, + "ApplySecurityGroupsToLoadBalancerOutput":{ + "type":"structure", + "members":{ + "SecurityGroups":{"shape":"SecurityGroups"} + } + }, + "AttachLoadBalancerToSubnetsInput":{ + "type":"structure", + "required":[ + "LoadBalancerName", + "Subnets" + ], + "members":{ + "LoadBalancerName":{"shape":"AccessPointName"}, + "Subnets":{"shape":"Subnets"} + } + }, + "AttachLoadBalancerToSubnetsOutput":{ + "type":"structure", + "members":{ + "Subnets":{"shape":"Subnets"} + } + }, + "AttributeName":{"type":"string"}, + "AttributeType":{"type":"string"}, + "AttributeValue":{"type":"string"}, + "AvailabilityZone":{"type":"string"}, + "AvailabilityZones":{ + "type":"list", + "member":{"shape":"AvailabilityZone"} + }, + "BackendServerDescription":{ + "type":"structure", + "members":{ + "InstancePort":{"shape":"InstancePort"}, + "PolicyNames":{"shape":"PolicyNames"} + } + }, + "BackendServerDescriptions":{ + "type":"list", + "member":{"shape":"BackendServerDescription"} + }, + "Cardinality":{"type":"string"}, + "CertificateNotFoundException":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"CertificateNotFound", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "ConfigureHealthCheckInput":{ + "type":"structure", + "required":[ + "LoadBalancerName", + "HealthCheck" + ], + "members":{ + "LoadBalancerName":{"shape":"AccessPointName"}, + "HealthCheck":{"shape":"HealthCheck"} + } + }, + "ConfigureHealthCheckOutput":{ + "type":"structure", + "members":{ + "HealthCheck":{"shape":"HealthCheck"} + } + }, + "ConnectionDraining":{ + "type":"structure", + "required":["Enabled"], + "members":{ + "Enabled":{"shape":"ConnectionDrainingEnabled"}, + "Timeout":{"shape":"ConnectionDrainingTimeout"} + } + }, + "ConnectionDrainingEnabled":{"type":"boolean"}, + "ConnectionDrainingTimeout":{"type":"integer"}, + "ConnectionSettings":{ + "type":"structure", + "required":["IdleTimeout"], + "members":{ + "IdleTimeout":{"shape":"IdleTimeout"} + } + }, + "CookieExpirationPeriod":{"type":"long"}, + "CookieName":{"type":"string"}, + "CreateAccessPointInput":{ + "type":"structure", + "required":[ + "LoadBalancerName", + "Listeners" + ], + "members":{ + "LoadBalancerName":{"shape":"AccessPointName"}, + "Listeners":{"shape":"Listeners"}, + "AvailabilityZones":{"shape":"AvailabilityZones"}, + "Subnets":{"shape":"Subnets"}, + "SecurityGroups":{"shape":"SecurityGroups"}, + "Scheme":{"shape":"LoadBalancerScheme"}, + "Tags":{"shape":"TagList"} + } + }, + "CreateAccessPointOutput":{ + "type":"structure", + "members":{ + "DNSName":{"shape":"DNSName"} + } + }, + "CreateAppCookieStickinessPolicyInput":{ + "type":"structure", + "required":[ + "LoadBalancerName", + "PolicyName", + "CookieName" + ], + "members":{ + "LoadBalancerName":{"shape":"AccessPointName"}, + "PolicyName":{"shape":"PolicyName"}, + "CookieName":{"shape":"CookieName"} + } + }, + "CreateAppCookieStickinessPolicyOutput":{ + "type":"structure", + "members":{ + } + }, + "CreateLBCookieStickinessPolicyInput":{ + "type":"structure", + "required":[ + "LoadBalancerName", + "PolicyName" + ], + "members":{ + "LoadBalancerName":{"shape":"AccessPointName"}, + "PolicyName":{"shape":"PolicyName"}, + "CookieExpirationPeriod":{"shape":"CookieExpirationPeriod"} + } + }, + "CreateLBCookieStickinessPolicyOutput":{ + "type":"structure", + "members":{ + } + }, + "CreateLoadBalancerListenerInput":{ + "type":"structure", + "required":[ + "LoadBalancerName", + "Listeners" + ], + "members":{ + "LoadBalancerName":{"shape":"AccessPointName"}, + "Listeners":{"shape":"Listeners"} + } + }, + "CreateLoadBalancerListenerOutput":{ + "type":"structure", + "members":{ + } + }, + "CreateLoadBalancerPolicyInput":{ + "type":"structure", + "required":[ + "LoadBalancerName", + "PolicyName", + "PolicyTypeName" + ], + "members":{ + "LoadBalancerName":{"shape":"AccessPointName"}, + "PolicyName":{"shape":"PolicyName"}, + "PolicyTypeName":{"shape":"PolicyTypeName"}, + "PolicyAttributes":{"shape":"PolicyAttributes"} + } + }, + "CreateLoadBalancerPolicyOutput":{ + "type":"structure", + "members":{ + } + }, + "CreatedTime":{"type":"timestamp"}, + "CrossZoneLoadBalancing":{ + "type":"structure", + "required":["Enabled"], + "members":{ + "Enabled":{"shape":"CrossZoneLoadBalancingEnabled"} + } + }, + "CrossZoneLoadBalancingEnabled":{"type":"boolean"}, + "DNSName":{"type":"string"}, + "DefaultValue":{"type":"string"}, + "DeleteAccessPointInput":{ + "type":"structure", + "required":["LoadBalancerName"], + "members":{ + "LoadBalancerName":{"shape":"AccessPointName"} + } + }, + "DeleteAccessPointOutput":{ + "type":"structure", + "members":{ + } + }, + "DeleteLoadBalancerListenerInput":{ + "type":"structure", + "required":[ + "LoadBalancerName", + "LoadBalancerPorts" + ], + "members":{ + "LoadBalancerName":{"shape":"AccessPointName"}, + "LoadBalancerPorts":{"shape":"Ports"} + } + }, + "DeleteLoadBalancerListenerOutput":{ + "type":"structure", + "members":{ + } + }, + "DeleteLoadBalancerPolicyInput":{ + "type":"structure", + "required":[ + "LoadBalancerName", + "PolicyName" + ], + "members":{ + "LoadBalancerName":{"shape":"AccessPointName"}, + "PolicyName":{"shape":"PolicyName"} + } + }, + "DeleteLoadBalancerPolicyOutput":{ + "type":"structure", + "members":{ + } + }, + "DeregisterEndPointsInput":{ + "type":"structure", + "required":[ + "LoadBalancerName", + "Instances" + ], + "members":{ + "LoadBalancerName":{"shape":"AccessPointName"}, + "Instances":{"shape":"Instances"} + } + }, + "DeregisterEndPointsOutput":{ + "type":"structure", + "members":{ + "Instances":{"shape":"Instances"} + } + }, + "DescribeAccessPointsInput":{ + "type":"structure", + "members":{ + "LoadBalancerNames":{"shape":"LoadBalancerNames"}, + "Marker":{"shape":"Marker"}, + "PageSize":{"shape":"PageSize"} + } + }, + "DescribeAccessPointsOutput":{ + "type":"structure", + "members":{ + "LoadBalancerDescriptions":{"shape":"LoadBalancerDescriptions"}, + "NextMarker":{"shape":"Marker"} + } + }, + "DescribeEndPointStateInput":{ + "type":"structure", + "required":["LoadBalancerName"], + "members":{ + "LoadBalancerName":{"shape":"AccessPointName"}, + "Instances":{"shape":"Instances"} + } + }, + "DescribeEndPointStateOutput":{ + "type":"structure", + "members":{ + "InstanceStates":{"shape":"InstanceStates"} + } + }, + "DescribeLoadBalancerAttributesInput":{ + "type":"structure", + "required":["LoadBalancerName"], + "members":{ + "LoadBalancerName":{"shape":"AccessPointName"} + } + }, + "DescribeLoadBalancerAttributesOutput":{ + "type":"structure", + "members":{ + "LoadBalancerAttributes":{"shape":"LoadBalancerAttributes"} + } + }, + "DescribeLoadBalancerPoliciesInput":{ + "type":"structure", + "members":{ + "LoadBalancerName":{"shape":"AccessPointName"}, + "PolicyNames":{"shape":"PolicyNames"} + } + }, + "DescribeLoadBalancerPoliciesOutput":{ + "type":"structure", + "members":{ + "PolicyDescriptions":{"shape":"PolicyDescriptions"} + } + }, + "DescribeLoadBalancerPolicyTypesInput":{ + "type":"structure", + "members":{ + "PolicyTypeNames":{"shape":"PolicyTypeNames"} + } + }, + "DescribeLoadBalancerPolicyTypesOutput":{ + "type":"structure", + "members":{ + "PolicyTypeDescriptions":{"shape":"PolicyTypeDescriptions"} + } + }, + "DescribeTagsInput":{ + "type":"structure", + "required":["LoadBalancerNames"], + "members":{ + "LoadBalancerNames":{"shape":"LoadBalancerNamesMax20"} + } + }, + "DescribeTagsOutput":{ + "type":"structure", + "members":{ + "TagDescriptions":{"shape":"TagDescriptions"} + } + }, + "Description":{"type":"string"}, + "DetachLoadBalancerFromSubnetsInput":{ + "type":"structure", + "required":[ + "LoadBalancerName", + "Subnets" + ], + "members":{ + "LoadBalancerName":{"shape":"AccessPointName"}, + "Subnets":{"shape":"Subnets"} + } + }, + "DetachLoadBalancerFromSubnetsOutput":{ + "type":"structure", + "members":{ + "Subnets":{"shape":"Subnets"} + } + }, + "DuplicateAccessPointNameException":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DuplicateLoadBalancerName", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "DuplicateListenerException":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DuplicateListener", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "DuplicatePolicyNameException":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DuplicatePolicyName", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "DuplicateTagKeysException":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DuplicateTagKeys", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "EndPointPort":{"type":"integer"}, + "HealthCheck":{ + "type":"structure", + "required":[ + "Target", + "Interval", + "Timeout", + "UnhealthyThreshold", + "HealthyThreshold" + ], + "members":{ + "Target":{"shape":"HealthCheckTarget"}, + "Interval":{"shape":"HealthCheckInterval"}, + "Timeout":{"shape":"HealthCheckTimeout"}, + "UnhealthyThreshold":{"shape":"UnhealthyThreshold"}, + "HealthyThreshold":{"shape":"HealthyThreshold"} + } + }, + "HealthCheckInterval":{ + "type":"integer", + "min":1, + "max":300 + }, + "HealthCheckTarget":{"type":"string"}, + "HealthCheckTimeout":{ + "type":"integer", + "min":1, + "max":300 + }, + "HealthyThreshold":{ + "type":"integer", + "min":2, + "max":10 + }, + "IdleTimeout":{ + "type":"integer", + "min":1, + "max":3600 + }, + "Instance":{ + "type":"structure", + "members":{ + "InstanceId":{"shape":"InstanceId"} + } + }, + "InstanceId":{"type":"string"}, + "InstancePort":{ + "type":"integer", + "min":1, + "max":65535 + }, + "InstanceState":{ + "type":"structure", + "members":{ + "InstanceId":{"shape":"InstanceId"}, + "State":{"shape":"State"}, + "ReasonCode":{"shape":"ReasonCode"}, + "Description":{"shape":"Description"} + } + }, + "InstanceStates":{ + "type":"list", + "member":{"shape":"InstanceState"} + }, + "Instances":{ + "type":"list", + "member":{"shape":"Instance"} + }, + "InvalidConfigurationRequestException":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidConfigurationRequest", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + }, + "InvalidEndPointException":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidInstance", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidSchemeException":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidScheme", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidSecurityGroupException":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidSecurityGroup", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidSubnetException":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidSubnet", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "LBCookieStickinessPolicies":{ + "type":"list", + "member":{"shape":"LBCookieStickinessPolicy"} + }, + "LBCookieStickinessPolicy":{ + "type":"structure", + "members":{ + "PolicyName":{"shape":"PolicyName"}, + "CookieExpirationPeriod":{"shape":"CookieExpirationPeriod"} + } + }, + "Listener":{ + "type":"structure", + "required":[ + "Protocol", + "LoadBalancerPort", + "InstancePort" + ], + "members":{ + "Protocol":{"shape":"Protocol"}, + "LoadBalancerPort":{"shape":"AccessPointPort"}, + "InstanceProtocol":{"shape":"Protocol"}, + "InstancePort":{"shape":"InstancePort"}, + "SSLCertificateId":{"shape":"SSLCertificateId"} + } + }, + "ListenerDescription":{ + "type":"structure", + "members":{ + "Listener":{"shape":"Listener"}, + "PolicyNames":{"shape":"PolicyNames"} + } + }, + "ListenerDescriptions":{ + "type":"list", + "member":{"shape":"ListenerDescription"} + }, + "ListenerNotFoundException":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"ListenerNotFound", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "Listeners":{ + "type":"list", + "member":{"shape":"Listener"} + }, + "LoadBalancerAttributeNotFoundException":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"LoadBalancerAttributeNotFound", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "LoadBalancerAttributes":{ + "type":"structure", + "members":{ + "CrossZoneLoadBalancing":{"shape":"CrossZoneLoadBalancing"}, + "AccessLog":{"shape":"AccessLog"}, + "ConnectionDraining":{"shape":"ConnectionDraining"}, + "ConnectionSettings":{"shape":"ConnectionSettings"}, + "AdditionalAttributes":{"shape":"AdditionalAttributes"} + } + }, + "LoadBalancerDescription":{ + "type":"structure", + "members":{ + "LoadBalancerName":{"shape":"AccessPointName"}, + "DNSName":{"shape":"DNSName"}, + "CanonicalHostedZoneName":{"shape":"DNSName"}, + "CanonicalHostedZoneNameID":{"shape":"DNSName"}, + "ListenerDescriptions":{"shape":"ListenerDescriptions"}, + "Policies":{"shape":"Policies"}, + "BackendServerDescriptions":{"shape":"BackendServerDescriptions"}, + "AvailabilityZones":{"shape":"AvailabilityZones"}, + "Subnets":{"shape":"Subnets"}, + "VPCId":{"shape":"VPCId"}, + "Instances":{"shape":"Instances"}, + "HealthCheck":{"shape":"HealthCheck"}, + "SourceSecurityGroup":{"shape":"SourceSecurityGroup"}, + "SecurityGroups":{"shape":"SecurityGroups"}, + "CreatedTime":{"shape":"CreatedTime"}, + "Scheme":{"shape":"LoadBalancerScheme"} + } + }, + "LoadBalancerDescriptions":{ + "type":"list", + "member":{"shape":"LoadBalancerDescription"} + }, + "LoadBalancerNames":{ + "type":"list", + "member":{"shape":"AccessPointName"} + }, + "LoadBalancerNamesMax20":{ + "type":"list", + "member":{"shape":"AccessPointName"}, + "min":1, + "max":20 + }, + "LoadBalancerScheme":{"type":"string"}, + "Marker":{"type":"string"}, + "ModifyLoadBalancerAttributesInput":{ + "type":"structure", + "required":[ + "LoadBalancerName", + "LoadBalancerAttributes" + ], + "members":{ + "LoadBalancerName":{"shape":"AccessPointName"}, + "LoadBalancerAttributes":{"shape":"LoadBalancerAttributes"} + } + }, + "ModifyLoadBalancerAttributesOutput":{ + "type":"structure", + "members":{ + "LoadBalancerName":{"shape":"AccessPointName"}, + "LoadBalancerAttributes":{"shape":"LoadBalancerAttributes"} + } + }, + "PageSize":{ + "type":"integer", + "min":1, + "max":400 + }, + "Policies":{ + "type":"structure", + "members":{ + "AppCookieStickinessPolicies":{"shape":"AppCookieStickinessPolicies"}, + "LBCookieStickinessPolicies":{"shape":"LBCookieStickinessPolicies"}, + "OtherPolicies":{"shape":"PolicyNames"} + } + }, + "PolicyAttribute":{ + "type":"structure", + "members":{ + "AttributeName":{"shape":"AttributeName"}, + "AttributeValue":{"shape":"AttributeValue"} + } + }, + "PolicyAttributeDescription":{ + "type":"structure", + "members":{ + "AttributeName":{"shape":"AttributeName"}, + "AttributeValue":{"shape":"AttributeValue"} + } + }, + "PolicyAttributeDescriptions":{ + "type":"list", + "member":{"shape":"PolicyAttributeDescription"} + }, + "PolicyAttributeTypeDescription":{ + "type":"structure", + "members":{ + "AttributeName":{"shape":"AttributeName"}, + "AttributeType":{"shape":"AttributeType"}, + "Description":{"shape":"Description"}, + "DefaultValue":{"shape":"DefaultValue"}, + "Cardinality":{"shape":"Cardinality"} + } + }, + "PolicyAttributeTypeDescriptions":{ + "type":"list", + "member":{"shape":"PolicyAttributeTypeDescription"} + }, + "PolicyAttributes":{ + "type":"list", + "member":{"shape":"PolicyAttribute"} + }, + "PolicyDescription":{ + "type":"structure", + "members":{ + "PolicyName":{"shape":"PolicyName"}, + "PolicyTypeName":{"shape":"PolicyTypeName"}, + "PolicyAttributeDescriptions":{"shape":"PolicyAttributeDescriptions"} + } + }, + "PolicyDescriptions":{ + "type":"list", + "member":{"shape":"PolicyDescription"} + }, + "PolicyName":{"type":"string"}, + "PolicyNames":{ + "type":"list", + "member":{"shape":"PolicyName"} + }, + "PolicyNotFoundException":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"PolicyNotFound", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "PolicyTypeDescription":{ + "type":"structure", + "members":{ + "PolicyTypeName":{"shape":"PolicyTypeName"}, + "Description":{"shape":"Description"}, + "PolicyAttributeTypeDescriptions":{"shape":"PolicyAttributeTypeDescriptions"} + } + }, + "PolicyTypeDescriptions":{ + "type":"list", + "member":{"shape":"PolicyTypeDescription"} + }, + "PolicyTypeName":{"type":"string"}, + "PolicyTypeNames":{ + "type":"list", + "member":{"shape":"PolicyTypeName"} + }, + "PolicyTypeNotFoundException":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"PolicyTypeNotFound", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "Ports":{ + "type":"list", + "member":{"shape":"AccessPointPort"} + }, + "Protocol":{"type":"string"}, + "ReasonCode":{"type":"string"}, + "RegisterEndPointsInput":{ + "type":"structure", + "required":[ + "LoadBalancerName", + "Instances" + ], + "members":{ + "LoadBalancerName":{"shape":"AccessPointName"}, + "Instances":{"shape":"Instances"} + } + }, + "RegisterEndPointsOutput":{ + "type":"structure", + "members":{ + "Instances":{"shape":"Instances"} + } + }, + "RemoveAvailabilityZonesInput":{ + "type":"structure", + "required":[ + "LoadBalancerName", + "AvailabilityZones" + ], + "members":{ + "LoadBalancerName":{"shape":"AccessPointName"}, + "AvailabilityZones":{"shape":"AvailabilityZones"} + } + }, + "RemoveAvailabilityZonesOutput":{ + "type":"structure", + "members":{ + "AvailabilityZones":{"shape":"AvailabilityZones"} + } + }, + "RemoveTagsInput":{ + "type":"structure", + "required":[ + "LoadBalancerNames", + "Tags" + ], + "members":{ + "LoadBalancerNames":{"shape":"LoadBalancerNames"}, + "Tags":{"shape":"TagKeyList"} + } + }, + "RemoveTagsOutput":{ + "type":"structure", + "members":{ + } + }, + "S3BucketName":{"type":"string"}, + "SSLCertificateId":{"type":"string"}, + "SecurityGroupId":{"type":"string"}, + "SecurityGroupName":{"type":"string"}, + "SecurityGroupOwnerAlias":{"type":"string"}, + "SecurityGroups":{ + "type":"list", + "member":{"shape":"SecurityGroupId"} + }, + "SetLoadBalancerListenerSSLCertificateInput":{ + "type":"structure", + "required":[ + "LoadBalancerName", + "LoadBalancerPort", + "SSLCertificateId" + ], + "members":{ + "LoadBalancerName":{"shape":"AccessPointName"}, + "LoadBalancerPort":{"shape":"AccessPointPort"}, + "SSLCertificateId":{"shape":"SSLCertificateId"} + } + }, + "SetLoadBalancerListenerSSLCertificateOutput":{ + "type":"structure", + "members":{ + } + }, + "SetLoadBalancerPoliciesForBackendServerInput":{ + "type":"structure", + "required":[ + "LoadBalancerName", + "InstancePort", + "PolicyNames" + ], + "members":{ + "LoadBalancerName":{"shape":"AccessPointName"}, + "InstancePort":{"shape":"EndPointPort"}, + "PolicyNames":{"shape":"PolicyNames"} + } + }, + "SetLoadBalancerPoliciesForBackendServerOutput":{ + "type":"structure", + "members":{ + } + }, + "SetLoadBalancerPoliciesOfListenerInput":{ + "type":"structure", + "required":[ + "LoadBalancerName", + "LoadBalancerPort", + "PolicyNames" + ], + "members":{ + "LoadBalancerName":{"shape":"AccessPointName"}, + "LoadBalancerPort":{"shape":"AccessPointPort"}, + "PolicyNames":{"shape":"PolicyNames"} + } + }, + "SetLoadBalancerPoliciesOfListenerOutput":{ + "type":"structure", + "members":{ + } + }, + "SourceSecurityGroup":{ + "type":"structure", + "members":{ + "OwnerAlias":{"shape":"SecurityGroupOwnerAlias"}, + "GroupName":{"shape":"SecurityGroupName"} + } + }, + "State":{"type":"string"}, + "StringVal":{"type":"string"}, + "SubnetId":{"type":"string"}, + "SubnetNotFoundException":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"SubnetNotFound", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "Subnets":{ + "type":"list", + "member":{"shape":"SubnetId"} + }, + "Tag":{ + "type":"structure", + "required":["Key"], + "members":{ + "Key":{"shape":"TagKey"}, + "Value":{"shape":"TagValue"} + } + }, + "TagDescription":{ + "type":"structure", + "members":{ + "LoadBalancerName":{"shape":"AccessPointName"}, + "Tags":{"shape":"TagList"} + } + }, + "TagDescriptions":{ + "type":"list", + "member":{"shape":"TagDescription"} + }, + "TagKey":{ + "type":"string", + "min":1, + "max":128, + "pattern":"^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*)$" + }, + "TagKeyList":{ + "type":"list", + "member":{"shape":"TagKeyOnly"}, + "min":1 + }, + "TagKeyOnly":{ + "type":"structure", + "members":{ + "Key":{"shape":"TagKey"} + } + }, + "TagList":{ + "type":"list", + "member":{"shape":"Tag"}, + "min":1 + }, + "TagValue":{ + "type":"string", + "min":0, + "max":256, + "pattern":"^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*)$" + }, + "TooManyAccessPointsException":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"TooManyLoadBalancers", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "TooManyPoliciesException":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"TooManyPolicies", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "TooManyTagsException":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"TooManyTags", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "UnhealthyThreshold":{ + "type":"integer", + "min":2, + "max":10 + }, + "VPCId":{"type":"string"} + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/elasticloadbalancing/2012-06-01/docs-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/elasticloadbalancing/2012-06-01/docs-2.json new file mode 100644 index 000000000..165c0bc0f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/elasticloadbalancing/2012-06-01/docs-2.json @@ -0,0 +1,1078 @@ +{ + "version": "2.0", + "operations": { + "AddTags": "

    Adds the specified tags to the specified load balancer. Each load balancer can have a maximum of 10 tags.

    Each tag consists of a key and an optional value. If a tag with the same key is already associated with the load balancer, AddTags updates its value.

    For more information, see Tag Your Load Balancer in the Elastic Load Balancing Developer Guide.

    ", + "ApplySecurityGroupsToLoadBalancer": "

    Associates one or more security groups with your load balancer in a virtual private cloud (VPC). The specified security groups override the previously associated security groups.

    For more information, see Security Groups for Load Balancers in a VPC in the Elastic Load Balancing Developer Guide.

    ", + "AttachLoadBalancerToSubnets": "

    Adds one or more subnets to the set of configured subnets for the specified load balancer.

    The load balancer evenly distributes requests across all registered subnets. For more information, see Add or Remove Subnets for Your Load Balancer in a VPC in the Elastic Load Balancing Developer Guide.

    ", + "ConfigureHealthCheck": "

    Specifies the health check settings to use when evaluating the health state of your back-end instances.

    For more information, see Configure Health Checks in the Elastic Load Balancing Developer Guide.

    ", + "CreateAppCookieStickinessPolicy": "

    Generates a stickiness policy with sticky session lifetimes that follow that of an application-generated cookie. This policy can be associated only with HTTP/HTTPS listeners.

    This policy is similar to the policy created by CreateLBCookieStickinessPolicy, except that the lifetime of the special Elastic Load Balancing cookie, AWSELB, follows the lifetime of the application-generated cookie specified in the policy configuration. The load balancer only inserts a new stickiness cookie when the application response includes a new application cookie.

    If the application cookie is explicitly removed or expires, the session stops being sticky until a new application cookie is issued.

    For more information, see Application-Controlled Session Stickiness in the Elastic Load Balancing Developer Guide.

    ", + "CreateLBCookieStickinessPolicy": "

    Generates a stickiness policy with sticky session lifetimes controlled by the lifetime of the browser (user-agent) or a specified expiration period. This policy can be associated only with HTTP/HTTPS listeners.

    When a load balancer implements this policy, the load balancer uses a special cookie to track the back-end server instance for each request. When the load balancer receives a request, it first checks to see if this cookie is present in the request. If so, the load balancer sends the request to the application server specified in the cookie. If not, the load balancer sends the request to a server that is chosen based on the existing load-balancing algorithm.

    A cookie is inserted into the response for binding subsequent requests from the same user to that server. The validity of the cookie is based on the cookie expiration time, which is specified in the policy configuration.

    For more information, see Duration-Based Session Stickiness in the Elastic Load Balancing Developer Guide.

    ", + "CreateLoadBalancer": "

    Creates a load balancer.

    If the call completes successfully, a new load balancer is created with a unique Domain Name Service (DNS) name. The load balancer receives incoming traffic and routes it to the registered instances. For more information, see How Elastic Load Balancing Works in the Elastic Load Balancing Developer Guide.

    You can create up to 20 load balancers per region per account. You can request an increase for the number of load balancers for your account. For more information, see Elastic Load Balancing Limits in the Elastic Load Balancing Developer Guide.

    ", + "CreateLoadBalancerListeners": "

    Creates one or more listeners for the specified load balancer. If a listener with the specified port does not already exist, it is created; otherwise, the properties of the new listener must match the properties of the existing listener.

    For more information, see Add a Listener to Your Load Balancer in the Elastic Load Balancing Developer Guide.

    ", + "CreateLoadBalancerPolicy": "

    Creates a policy with the specified attributes for the specified load balancer.

    Policies are settings that are saved for your load balancer and that can be applied to the front-end listener or the back-end application server, depending on the policy type.

    ", + "DeleteLoadBalancer": "

    Deletes the specified load balancer.

    If you are attempting to recreate a load balancer, you must reconfigure all settings. The DNS name associated with a deleted load balancer are no longer usable. The name and associated DNS record of the deleted load balancer no longer exist and traffic sent to any of its IP addresses is no longer delivered to back-end instances.

    If the load balancer does not exist or has already been deleted, the call to DeleteLoadBalancer still succeeds.

    ", + "DeleteLoadBalancerListeners": "

    Deletes the specified listeners from the specified load balancer.

    ", + "DeleteLoadBalancerPolicy": "

    Deletes the specified policy from the specified load balancer. This policy must not be enabled for any listeners.

    ", + "DeregisterInstancesFromLoadBalancer": "

    Deregisters the specified instances from the specified load balancer. After the instance is deregistered, it no longer receives traffic from the load balancer.

    You can use DescribeLoadBalancers to verify that the instance is deregistered from the load balancer.

    For more information, see Deregister and Register Amazon EC2 Instances in the Elastic Load Balancing Developer Guide.

    ", + "DescribeInstanceHealth": "

    Describes the state of the specified instances with respect to the specified load balancer. If no instances are specified, the call describes the state of all instances that are currently registered with the load balancer. If instances are specified, their state is returned even if they are no longer registered with the load balancer. The state of terminated instances is not returned.

    ", + "DescribeLoadBalancerAttributes": "

    Describes the attributes for the specified load balancer.

    ", + "DescribeLoadBalancerPolicies": "

    Describes the specified policies.

    If you specify a load balancer name, the action returns the descriptions of all policies created for the load balancer. If you specify a policy name associated with your load balancer, the action returns the description of that policy. If you don't specify a load balancer name, the action returns descriptions of the specified sample policies, or descriptions of all sample policies. The names of the sample policies have the ELBSample- prefix.

    ", + "DescribeLoadBalancerPolicyTypes": "

    Describes the specified load balancer policy types.

    You can use these policy types with CreateLoadBalancerPolicy to create policy configurations for a load balancer.

    ", + "DescribeLoadBalancers": "

    Describes the specified the load balancers. If no load balancers are specified, the call describes all of your load balancers.

    ", + "DescribeTags": "

    Describes the tags associated with the specified load balancers.

    ", + "DetachLoadBalancerFromSubnets": "

    Removes the specified subnets from the set of configured subnets for the load balancer.

    After a subnet is removed, all EC2 instances registered with the load balancer in the removed subnet go into the OutOfService state. Then, the load balancer balances the traffic among the remaining routable subnets.

    ", + "DisableAvailabilityZonesForLoadBalancer": "

    Removes the specified Availability Zones from the set of Availability Zones for the specified load balancer.

    There must be at least one Availability Zone registered with a load balancer at all times. After an Availability Zone is removed, all instances registered with the load balancer that are in the removed Availability Zone go into the OutOfService state. Then, the load balancer attempts to equally balance the traffic among its remaining Availability Zones.

    For more information, see Disable an Availability Zone from a Load-Balanced Application in the Elastic Load Balancing Developer Guide.

    ", + "EnableAvailabilityZonesForLoadBalancer": "

    Adds the specified Availability Zones to the set of Availability Zones for the specified load balancer.

    The load balancer evenly distributes requests across all its registered Availability Zones that contain instances.

    For more information, see Add Availability Zone in the Elastic Load Balancing Developer Guide.

    ", + "ModifyLoadBalancerAttributes": "

    Modifies the attributes of the specified load balancer.

    You can modify the load balancer attributes, such as AccessLogs, ConnectionDraining, and CrossZoneLoadBalancing by either enabling or disabling them. Or, you can modify the load balancer attribute ConnectionSettings by specifying an idle connection timeout value for your load balancer.

    For more information, see the following in the Elastic Load Balancing Developer Guide:

    ", + "RegisterInstancesWithLoadBalancer": "

    Adds the specified instances to the specified load balancer.

    The instance must be a running instance in the same network as the load balancer (EC2-Classic or the same VPC). If you have EC2-Classic instances and a load balancer in a VPC with ClassicLink enabled, you can link the EC2-Classic instances to that VPC and then register the linked EC2-Classic instances with the load balancer in the VPC.

    Note that RegisterInstanceWithLoadBalancer completes when the request has been registered. Instance registration takes a little time to complete. To check the state of the registered instances, use DescribeLoadBalancers or DescribeInstanceHealth.

    After the instance is registered, it starts receiving traffic and requests from the load balancer. Any instance that is not in one of the Availability Zones registered for the load balancer is moved to the OutOfService state. If an Availability Zone is added to the load balancer later, any instances registered with the load balancer move to the InService state.

    If you stop an instance registered with a load balancer and then start it, the IP addresses associated with the instance changes. Elastic Load Balancing cannot recognize the new IP address, which prevents it from routing traffic to the instances. We recommend that you use the following sequence: stop the instance, deregister the instance, start the instance, and then register the instance. To deregister instances from a load balancer, use DeregisterInstancesFromLoadBalancer.

    For more information, see Deregister and Register EC2 Instances in the Elastic Load Balancing Developer Guide.

    ", + "RemoveTags": "

    Removes one or more tags from the specified load balancer.

    ", + "SetLoadBalancerListenerSSLCertificate": "

    Sets the certificate that terminates the specified listener's SSL connections. The specified certificate replaces any prior certificate that was used on the same load balancer and port.

    For more information about updating your SSL certificate, see Updating an SSL Certificate for a Load Balancer in the Elastic Load Balancing Developer Guide.

    ", + "SetLoadBalancerPoliciesForBackendServer": "

    Replaces the set of policies associated with the specified port on which the back-end server is listening with a new set of policies. At this time, only the back-end server authentication policy type can be applied to the back-end ports; this policy type is composed of multiple public key policies.

    Each time you use SetLoadBalancerPoliciesForBackendServer to enable the policies, use the PolicyNames parameter to list the policies that you want to enable.

    You can use DescribeLoadBalancers or DescribeLoadBalancerPolicies to verify that the policy is associated with the back-end server.

    ", + "SetLoadBalancerPoliciesOfListener": "

    Associates, updates, or disables a policy with a listener for the specified load balancer. You can associate multiple policies with a listener.

    " + }, + "service": "Elastic Load Balancing

    Elastic Load Balancing distributes incoming traffic across your EC2 instances.

    For information about the features of Elastic Load Balancing, see What Is Elastic Load Balancing? in the Elastic Load Balancing Developer Guide.

    For information about the AWS regions supported by Elastic Load Balancing, see Regions and Endpoints - Elastic Load Balancing in the Amazon Web Services General Reference.

    All Elastic Load Balancing operations are idempotent, which means that they complete at most one time. If you repeat an operation, it succeeds with a 200 OK response code.

    ", + "shapes": { + "AccessLog": { + "base": "

    Information about the AccessLog attribute.

    ", + "refs": { + "LoadBalancerAttributes$AccessLog": "

    If enabled, the load balancer captures detailed information of all requests and delivers the information to the Amazon S3 bucket that you specify.

    For more information, see Enable Access Logs in the Elastic Load Balancing Developer Guide.

    " + } + }, + "AccessLogEnabled": { + "base": null, + "refs": { + "AccessLog$Enabled": "

    Specifies whether access log is enabled for the load balancer.

    " + } + }, + "AccessLogInterval": { + "base": null, + "refs": { + "AccessLog$EmitInterval": "

    The interval for publishing the access logs. You can specify an interval of either 5 minutes or 60 minutes.

    Default: 60 minutes

    " + } + }, + "AccessLogPrefix": { + "base": null, + "refs": { + "AccessLog$S3BucketPrefix": "

    The logical hierarchy you created for your Amazon S3 bucket, for example my-bucket-prefix/prod. If the prefix is not provided, the log is placed at the root level of the bucket.

    " + } + }, + "AccessPointName": { + "base": null, + "refs": { + "AddAvailabilityZonesInput$LoadBalancerName": "

    The name of the load balancer.

    ", + "ApplySecurityGroupsToLoadBalancerInput$LoadBalancerName": "

    The name of the load balancer.

    ", + "AttachLoadBalancerToSubnetsInput$LoadBalancerName": "

    The name of the load balancer.

    ", + "ConfigureHealthCheckInput$LoadBalancerName": "

    The name of the load balancer.

    ", + "CreateAccessPointInput$LoadBalancerName": "

    The name of the load balancer.

    This name must be unique within your set of load balancers for the region, must have a maximum of 32 characters, must contain only alphanumeric characters or hyphens, and cannot begin or end with a hyphen.

    ", + "CreateAppCookieStickinessPolicyInput$LoadBalancerName": "

    The name of the load balancer.

    ", + "CreateLBCookieStickinessPolicyInput$LoadBalancerName": "

    The name of the load balancer.

    ", + "CreateLoadBalancerListenerInput$LoadBalancerName": "

    The name of the load balancer.

    ", + "CreateLoadBalancerPolicyInput$LoadBalancerName": "

    The name of the load balancer.

    ", + "DeleteAccessPointInput$LoadBalancerName": "

    The name of the load balancer.

    ", + "DeleteLoadBalancerListenerInput$LoadBalancerName": "

    The name of the load balancer.

    ", + "DeleteLoadBalancerPolicyInput$LoadBalancerName": "

    The name of the load balancer.

    ", + "DeregisterEndPointsInput$LoadBalancerName": "

    The name of the load balancer.

    ", + "DescribeEndPointStateInput$LoadBalancerName": "

    The name of the load balancer.

    ", + "DescribeLoadBalancerAttributesInput$LoadBalancerName": "

    The name of the load balancer.

    ", + "DescribeLoadBalancerPoliciesInput$LoadBalancerName": "

    The name of the load balancer.

    ", + "DetachLoadBalancerFromSubnetsInput$LoadBalancerName": "

    The name of the load balancer.

    ", + "LoadBalancerDescription$LoadBalancerName": "

    The name of the load balancer.

    ", + "LoadBalancerNames$member": null, + "LoadBalancerNamesMax20$member": null, + "ModifyLoadBalancerAttributesInput$LoadBalancerName": "

    The name of the load balancer.

    ", + "ModifyLoadBalancerAttributesOutput$LoadBalancerName": "

    The name of the load balancer.

    ", + "RegisterEndPointsInput$LoadBalancerName": "

    The name of the load balancer.

    ", + "RemoveAvailabilityZonesInput$LoadBalancerName": "

    The name of the load balancer.

    ", + "SetLoadBalancerListenerSSLCertificateInput$LoadBalancerName": "

    The name of the load balancer.

    ", + "SetLoadBalancerPoliciesForBackendServerInput$LoadBalancerName": "

    The name of the load balancer.

    ", + "SetLoadBalancerPoliciesOfListenerInput$LoadBalancerName": "

    The name of the load balancer.

    ", + "TagDescription$LoadBalancerName": "

    The name of the load balancer.

    " + } + }, + "AccessPointNotFoundException": { + "base": "

    The specified load balancer does not exist.

    ", + "refs": { + } + }, + "AccessPointPort": { + "base": null, + "refs": { + "Listener$LoadBalancerPort": "

    The port on which the load balancer is listening. On EC2-VPC, you can specify any port from the range 1-65535. On EC2-Classic, you can specify any port from the following list: 25, 80, 443, 465, 587, 1024-65535.

    ", + "Ports$member": null, + "SetLoadBalancerListenerSSLCertificateInput$LoadBalancerPort": "

    The port that uses the specified SSL certificate.

    ", + "SetLoadBalancerPoliciesOfListenerInput$LoadBalancerPort": "

    The external port of the load balancer for the policy.

    " + } + }, + "AddAvailabilityZonesInput": { + "base": null, + "refs": { + } + }, + "AddAvailabilityZonesOutput": { + "base": null, + "refs": { + } + }, + "AddTagsInput": { + "base": null, + "refs": { + } + }, + "AddTagsOutput": { + "base": null, + "refs": { + } + }, + "AdditionalAttribute": { + "base": "

    This data type is reserved.

    ", + "refs": { + "AdditionalAttributes$member": null + } + }, + "AdditionalAttributes": { + "base": null, + "refs": { + "LoadBalancerAttributes$AdditionalAttributes": "

    This parameter is reserved.

    " + } + }, + "AppCookieStickinessPolicies": { + "base": null, + "refs": { + "Policies$AppCookieStickinessPolicies": "

    The stickiness policies created using CreateAppCookieStickinessPolicy.

    " + } + }, + "AppCookieStickinessPolicy": { + "base": "

    Information about a policy for application-controlled session stickiness.

    ", + "refs": { + "AppCookieStickinessPolicies$member": null + } + }, + "ApplySecurityGroupsToLoadBalancerInput": { + "base": null, + "refs": { + } + }, + "ApplySecurityGroupsToLoadBalancerOutput": { + "base": null, + "refs": { + } + }, + "AttachLoadBalancerToSubnetsInput": { + "base": null, + "refs": { + } + }, + "AttachLoadBalancerToSubnetsOutput": { + "base": null, + "refs": { + } + }, + "AttributeName": { + "base": null, + "refs": { + "PolicyAttribute$AttributeName": "

    The name of the attribute.

    ", + "PolicyAttributeDescription$AttributeName": "

    The name of the attribute.

    ", + "PolicyAttributeTypeDescription$AttributeName": "

    The name of the attribute.

    " + } + }, + "AttributeType": { + "base": null, + "refs": { + "PolicyAttributeTypeDescription$AttributeType": "

    The type of the attribute. For example, Boolean or Integer.

    " + } + }, + "AttributeValue": { + "base": null, + "refs": { + "PolicyAttribute$AttributeValue": "

    The value of the attribute.

    ", + "PolicyAttributeDescription$AttributeValue": "

    The value of the attribute.

    " + } + }, + "AvailabilityZone": { + "base": null, + "refs": { + "AvailabilityZones$member": null + } + }, + "AvailabilityZones": { + "base": null, + "refs": { + "AddAvailabilityZonesInput$AvailabilityZones": "

    The Availability Zones. These must be in the same region as the load balancer.

    ", + "AddAvailabilityZonesOutput$AvailabilityZones": "

    The updated list of Availability Zones for the load balancer.

    ", + "CreateAccessPointInput$AvailabilityZones": "

    One or more Availability Zones from the same region as the load balancer. Traffic is equally distributed across all specified Availability Zones.

    You must specify at least one Availability Zone.

    You can add more Availability Zones after you create the load balancer using EnableAvailabilityZonesForLoadBalancer.

    ", + "LoadBalancerDescription$AvailabilityZones": "

    The Availability Zones for the load balancer.

    ", + "RemoveAvailabilityZonesInput$AvailabilityZones": "

    The Availability Zones.

    ", + "RemoveAvailabilityZonesOutput$AvailabilityZones": "

    The remaining Availability Zones for the load balancer.

    " + } + }, + "BackendServerDescription": { + "base": "

    Information about the configuration of a back-end server.

    ", + "refs": { + "BackendServerDescriptions$member": null + } + }, + "BackendServerDescriptions": { + "base": null, + "refs": { + "LoadBalancerDescription$BackendServerDescriptions": "

    Information about the back-end servers.

    " + } + }, + "Cardinality": { + "base": null, + "refs": { + "PolicyAttributeTypeDescription$Cardinality": "

    The cardinality of the attribute.

    Valid values:

    • ONE(1) : Single value required
    • ZERO_OR_ONE(0..1) : Up to one value can be supplied
    • ZERO_OR_MORE(0..*) : Optional. Multiple values are allowed
    • ONE_OR_MORE(1..*0) : Required. Multiple values are allowed
    " + } + }, + "CertificateNotFoundException": { + "base": "

    The specified SSL ID does not refer to a valid SSL certificate in AWS Identity and Access Management (IAM).

    ", + "refs": { + } + }, + "ConfigureHealthCheckInput": { + "base": null, + "refs": { + } + }, + "ConfigureHealthCheckOutput": { + "base": null, + "refs": { + } + }, + "ConnectionDraining": { + "base": "

    Information about the ConnectionDraining attribute.

    ", + "refs": { + "LoadBalancerAttributes$ConnectionDraining": "

    If enabled, the load balancer allows existing requests to complete before the load balancer shifts traffic away from a deregistered or unhealthy back-end instance.

    For more information, see Enable Connection Draining in the Elastic Load Balancing Developer Guide.

    " + } + }, + "ConnectionDrainingEnabled": { + "base": null, + "refs": { + "ConnectionDraining$Enabled": "

    Specifies whether connection draining is enabled for the load balancer.

    " + } + }, + "ConnectionDrainingTimeout": { + "base": null, + "refs": { + "ConnectionDraining$Timeout": "

    The maximum time, in seconds, to keep the existing connections open before deregistering the instances.

    " + } + }, + "ConnectionSettings": { + "base": "

    Information about the ConnectionSettings attribute.

    ", + "refs": { + "LoadBalancerAttributes$ConnectionSettings": "

    If enabled, the load balancer allows the connections to remain idle (no data is sent over the connection) for the specified duration.

    By default, Elastic Load Balancing maintains a 60-second idle connection timeout for both front-end and back-end connections of your load balancer. For more information, see Configure Idle Connection Timeout in the Elastic Load Balancing Developer Guide.

    " + } + }, + "CookieExpirationPeriod": { + "base": null, + "refs": { + "CreateLBCookieStickinessPolicyInput$CookieExpirationPeriod": "

    The time period, in seconds, after which the cookie should be considered stale. If you do not specify this parameter, the sticky session lasts for the duration of the browser session.

    ", + "LBCookieStickinessPolicy$CookieExpirationPeriod": "

    The time period, in seconds, after which the cookie should be considered stale. If this parameter is not specified, the stickiness session lasts for the duration of the browser session.

    " + } + }, + "CookieName": { + "base": null, + "refs": { + "AppCookieStickinessPolicy$CookieName": "

    The name of the application cookie used for stickiness.

    ", + "CreateAppCookieStickinessPolicyInput$CookieName": "

    The name of the application cookie used for stickiness.

    " + } + }, + "CreateAccessPointInput": { + "base": null, + "refs": { + } + }, + "CreateAccessPointOutput": { + "base": null, + "refs": { + } + }, + "CreateAppCookieStickinessPolicyInput": { + "base": null, + "refs": { + } + }, + "CreateAppCookieStickinessPolicyOutput": { + "base": null, + "refs": { + } + }, + "CreateLBCookieStickinessPolicyInput": { + "base": null, + "refs": { + } + }, + "CreateLBCookieStickinessPolicyOutput": { + "base": null, + "refs": { + } + }, + "CreateLoadBalancerListenerInput": { + "base": null, + "refs": { + } + }, + "CreateLoadBalancerListenerOutput": { + "base": null, + "refs": { + } + }, + "CreateLoadBalancerPolicyInput": { + "base": null, + "refs": { + } + }, + "CreateLoadBalancerPolicyOutput": { + "base": null, + "refs": { + } + }, + "CreatedTime": { + "base": null, + "refs": { + "LoadBalancerDescription$CreatedTime": "

    The date and time the load balancer was created.

    " + } + }, + "CrossZoneLoadBalancing": { + "base": "

    Information about the CrossZoneLoadBalancing attribute.

    ", + "refs": { + "LoadBalancerAttributes$CrossZoneLoadBalancing": "

    If enabled, the load balancer routes the request traffic evenly across all back-end instances regardless of the Availability Zones.

    For more information, see Enable Cross-Zone Load Balancing in the Elastic Load Balancing Developer Guide.

    " + } + }, + "CrossZoneLoadBalancingEnabled": { + "base": null, + "refs": { + "CrossZoneLoadBalancing$Enabled": "

    Specifies whether cross-zone load balancing is enabled for the load balancer.

    " + } + }, + "DNSName": { + "base": null, + "refs": { + "CreateAccessPointOutput$DNSName": "

    The DNS name of the load balancer.

    ", + "LoadBalancerDescription$DNSName": "

    The external DNS name of the load balancer.

    ", + "LoadBalancerDescription$CanonicalHostedZoneName": "

    The Amazon Route 53 hosted zone associated with the load balancer.

    For more information, see Using Domain Names With Elastic Load Balancing in the Elastic Load Balancing Developer Guide.

    ", + "LoadBalancerDescription$CanonicalHostedZoneNameID": "

    The ID of the Amazon Route 53 hosted zone name associated with the load balancer.

    " + } + }, + "DefaultValue": { + "base": null, + "refs": { + "PolicyAttributeTypeDescription$DefaultValue": "

    The default value of the attribute, if applicable.

    " + } + }, + "DeleteAccessPointInput": { + "base": null, + "refs": { + } + }, + "DeleteAccessPointOutput": { + "base": null, + "refs": { + } + }, + "DeleteLoadBalancerListenerInput": { + "base": null, + "refs": { + } + }, + "DeleteLoadBalancerListenerOutput": { + "base": null, + "refs": { + } + }, + "DeleteLoadBalancerPolicyInput": { + "base": "=", + "refs": { + } + }, + "DeleteLoadBalancerPolicyOutput": { + "base": null, + "refs": { + } + }, + "DeregisterEndPointsInput": { + "base": null, + "refs": { + } + }, + "DeregisterEndPointsOutput": { + "base": null, + "refs": { + } + }, + "DescribeAccessPointsInput": { + "base": null, + "refs": { + } + }, + "DescribeAccessPointsOutput": { + "base": null, + "refs": { + } + }, + "DescribeEndPointStateInput": { + "base": null, + "refs": { + } + }, + "DescribeEndPointStateOutput": { + "base": null, + "refs": { + } + }, + "DescribeLoadBalancerAttributesInput": { + "base": null, + "refs": { + } + }, + "DescribeLoadBalancerAttributesOutput": { + "base": null, + "refs": { + } + }, + "DescribeLoadBalancerPoliciesInput": { + "base": null, + "refs": { + } + }, + "DescribeLoadBalancerPoliciesOutput": { + "base": null, + "refs": { + } + }, + "DescribeLoadBalancerPolicyTypesInput": { + "base": null, + "refs": { + } + }, + "DescribeLoadBalancerPolicyTypesOutput": { + "base": null, + "refs": { + } + }, + "DescribeTagsInput": { + "base": null, + "refs": { + } + }, + "DescribeTagsOutput": { + "base": null, + "refs": { + } + }, + "Description": { + "base": null, + "refs": { + "InstanceState$Description": "

    A description of the instance state. This string can contain one or more of the following messages.

    • N/A

    • A transient error occurred. Please try again later.

    • Instance has failed at least the UnhealthyThreshold number of health checks consecutively.

    • Instance has not passed the configured HealthyThreshold number of health checks consecutively.

    • Instance registration is still in progress.

    • Instance is in the EC2 Availability Zone for which LoadBalancer is not configured to route traffic to.

    • Instance is not currently registered with the LoadBalancer.

    • Instance deregistration currently in progress.

    • Disable Availability Zone is currently in progress.

    • Instance is in pending state.

    • Instance is in stopped state.

    • Instance is in terminated state.

    ", + "PolicyAttributeTypeDescription$Description": "

    A description of the attribute.

    ", + "PolicyTypeDescription$Description": "

    A description of the policy type.

    " + } + }, + "DetachLoadBalancerFromSubnetsInput": { + "base": null, + "refs": { + } + }, + "DetachLoadBalancerFromSubnetsOutput": { + "base": null, + "refs": { + } + }, + "DuplicateAccessPointNameException": { + "base": "

    The specified load balancer name already exists for this account.

    ", + "refs": { + } + }, + "DuplicateListenerException": { + "base": "

    A listener already exists for the specified LoadBalancerName and LoadBalancerPort, but with a different InstancePort, Protocol, or SSLCertificateId.

    ", + "refs": { + } + }, + "DuplicatePolicyNameException": { + "base": "

    A policy with the specified name already exists for this load balancer.

    ", + "refs": { + } + }, + "DuplicateTagKeysException": { + "base": "

    A tag key was specified more than once.

    ", + "refs": { + } + }, + "EndPointPort": { + "base": null, + "refs": { + "SetLoadBalancerPoliciesForBackendServerInput$InstancePort": "

    The port number associated with the back-end server.

    " + } + }, + "HealthCheck": { + "base": "

    Information about a health check.

    ", + "refs": { + "ConfigureHealthCheckInput$HealthCheck": "

    The configuration information for the new health check.

    ", + "ConfigureHealthCheckOutput$HealthCheck": "

    The updated health check.

    ", + "LoadBalancerDescription$HealthCheck": "

    Information about the health checks conducted on the load balancer.

    " + } + }, + "HealthCheckInterval": { + "base": null, + "refs": { + "HealthCheck$Interval": "

    The approximate interval, in seconds, between health checks of an individual instance.

    " + } + }, + "HealthCheckTarget": { + "base": null, + "refs": { + "HealthCheck$Target": "

    The instance being checked. The protocol is either TCP, HTTP, HTTPS, or SSL. The range of valid ports is one (1) through 65535.

    TCP is the default, specified as a TCP: port pair, for example \"TCP:5000\". In this case, a health check simply attempts to open a TCP connection to the instance on the specified port. Failure to connect within the configured timeout is considered unhealthy.

    SSL is also specified as SSL: port pair, for example, SSL:5000.

    For HTTP/HTTPS, you must include a ping path in the string. HTTP is specified as a HTTP:port;/;PathToPing; grouping, for example \"HTTP:80/weather/us/wa/seattle\". In this case, a HTTP GET request is issued to the instance on the given port and path. Any answer other than \"200 OK\" within the timeout period is considered unhealthy.

    The total length of the HTTP ping target must be 1024 16-bit Unicode characters or less.

    " + } + }, + "HealthCheckTimeout": { + "base": null, + "refs": { + "HealthCheck$Timeout": "

    The amount of time, in seconds, during which no response means a failed health check.

    This value must be less than the Interval value.

    " + } + }, + "HealthyThreshold": { + "base": null, + "refs": { + "HealthCheck$HealthyThreshold": "

    The number of consecutive health checks successes required before moving the instance to the Healthy state.

    " + } + }, + "IdleTimeout": { + "base": null, + "refs": { + "ConnectionSettings$IdleTimeout": "

    The time, in seconds, that the connection is allowed to be idle (no data has been sent over the connection) before it is closed by the load balancer.

    " + } + }, + "Instance": { + "base": "

    The ID of a back-end instance.

    ", + "refs": { + "Instances$member": null + } + }, + "InstanceId": { + "base": null, + "refs": { + "Instance$InstanceId": "

    The ID of the instance.

    ", + "InstanceState$InstanceId": "

    The ID of the instance.

    " + } + }, + "InstancePort": { + "base": null, + "refs": { + "BackendServerDescription$InstancePort": "

    The port on which the back-end server is listening.

    ", + "Listener$InstancePort": "

    The port on which the instance is listening.

    " + } + }, + "InstanceState": { + "base": "

    Information about the state of a back-end instance.

    ", + "refs": { + "InstanceStates$member": null + } + }, + "InstanceStates": { + "base": null, + "refs": { + "DescribeEndPointStateOutput$InstanceStates": "

    Information about the health of the instances.

    " + } + }, + "Instances": { + "base": null, + "refs": { + "DeregisterEndPointsInput$Instances": "

    The IDs of the instances.

    ", + "DeregisterEndPointsOutput$Instances": "

    The remaining instances registered with the load balancer.

    ", + "DescribeEndPointStateInput$Instances": "

    The IDs of the instances.

    ", + "LoadBalancerDescription$Instances": "

    The IDs of the instances for the load balancer.

    ", + "RegisterEndPointsInput$Instances": "

    The IDs of the instances.

    ", + "RegisterEndPointsOutput$Instances": "

    The updated list of instances for the load balancer.

    " + } + }, + "InvalidConfigurationRequestException": { + "base": "

    The requested configuration change is not valid.

    ", + "refs": { + } + }, + "InvalidEndPointException": { + "base": "

    The specified endpoint is not valid.

    ", + "refs": { + } + }, + "InvalidSchemeException": { + "base": "

    The specified value for the schema is not valid. You can only specify a scheme for load balancers in a VPC.

    ", + "refs": { + } + }, + "InvalidSecurityGroupException": { + "base": "

    One or more of the specified security groups do not exist.

    ", + "refs": { + } + }, + "InvalidSubnetException": { + "base": "

    The specified VPC has no associated Internet gateway.

    ", + "refs": { + } + }, + "LBCookieStickinessPolicies": { + "base": null, + "refs": { + "Policies$LBCookieStickinessPolicies": "

    The stickiness policies created using CreateLBCookieStickinessPolicy.

    " + } + }, + "LBCookieStickinessPolicy": { + "base": "

    Information about a policy for duration-based session stickiness.

    ", + "refs": { + "LBCookieStickinessPolicies$member": null + } + }, + "Listener": { + "base": "

    Information about a listener.

    For information about the protocols and the ports supported by Elastic Load Balancing, see Listener Configurations for Elastic Load Balancing in the Elastic Load Balancing Developer Guide.

    ", + "refs": { + "ListenerDescription$Listener": null, + "Listeners$member": null + } + }, + "ListenerDescription": { + "base": "

    The policies enabled for a listener.

    ", + "refs": { + "ListenerDescriptions$member": null + } + }, + "ListenerDescriptions": { + "base": null, + "refs": { + "LoadBalancerDescription$ListenerDescriptions": "

    The listeners for the load balancer.

    " + } + }, + "ListenerNotFoundException": { + "base": "

    The load balancer does not have a listener configured at the specified port.

    ", + "refs": { + } + }, + "Listeners": { + "base": null, + "refs": { + "CreateAccessPointInput$Listeners": "

    The listeners.

    For more information, see Listeners for Your Load Balancer in the Elastic Load Balancing Developer Guide.

    ", + "CreateLoadBalancerListenerInput$Listeners": "

    The listeners.

    " + } + }, + "LoadBalancerAttributeNotFoundException": { + "base": "

    The specified load balancer attribute does not exist.

    ", + "refs": { + } + }, + "LoadBalancerAttributes": { + "base": "

    The attributes for a load balancer.

    ", + "refs": { + "DescribeLoadBalancerAttributesOutput$LoadBalancerAttributes": "

    Information about the load balancer attributes.

    ", + "ModifyLoadBalancerAttributesInput$LoadBalancerAttributes": "

    The attributes of the load balancer.

    ", + "ModifyLoadBalancerAttributesOutput$LoadBalancerAttributes": null + } + }, + "LoadBalancerDescription": { + "base": "

    Information about a load balancer.

    ", + "refs": { + "LoadBalancerDescriptions$member": null + } + }, + "LoadBalancerDescriptions": { + "base": null, + "refs": { + "DescribeAccessPointsOutput$LoadBalancerDescriptions": "

    Information about the load balancers.

    " + } + }, + "LoadBalancerNames": { + "base": null, + "refs": { + "AddTagsInput$LoadBalancerNames": "

    The name of the load balancer. You can specify one load balancer only.

    ", + "DescribeAccessPointsInput$LoadBalancerNames": "

    The names of the load balancers.

    ", + "RemoveTagsInput$LoadBalancerNames": "

    The name of the load balancer. You can specify a maximum of one load balancer name.

    " + } + }, + "LoadBalancerNamesMax20": { + "base": null, + "refs": { + "DescribeTagsInput$LoadBalancerNames": "

    The names of the load balancers.

    " + } + }, + "LoadBalancerScheme": { + "base": null, + "refs": { + "CreateAccessPointInput$Scheme": "

    The type of a load balancer. Valid only for load balancers in a VPC.

    By default, Elastic Load Balancing creates an Internet-facing load balancer with a publicly resolvable DNS name, which resolves to public IP addresses. For more information about Internet-facing and Internal load balancers, see Internet-facing and Internal Load Balancers in the Elastic Load Balancing Developer Guide.

    Specify internal to create an internal load balancer with a DNS name that resolves to private IP addresses.

    ", + "LoadBalancerDescription$Scheme": "

    The type of load balancer. Valid only for load balancers in a VPC.

    If Scheme is internet-facing, the load balancer has a public DNS name that resolves to a public IP address.

    If Scheme is internal, the load balancer has a public DNS name that resolves to a private IP address.

    " + } + }, + "Marker": { + "base": null, + "refs": { + "DescribeAccessPointsInput$Marker": "

    The marker for the next set of results. (You received this marker from a previous call.)

    ", + "DescribeAccessPointsOutput$NextMarker": "

    The marker to use when requesting the next set of results. If there are no additional results, the string is empty.

    " + } + }, + "ModifyLoadBalancerAttributesInput": { + "base": null, + "refs": { + } + }, + "ModifyLoadBalancerAttributesOutput": { + "base": null, + "refs": { + } + }, + "PageSize": { + "base": null, + "refs": { + "DescribeAccessPointsInput$PageSize": "

    The maximum number of results to return with this call (a number from 1 to 400). The default is 400.

    " + } + }, + "Policies": { + "base": "

    The policies for a load balancer.

    ", + "refs": { + "LoadBalancerDescription$Policies": "

    The policies defined for the load balancer.

    " + } + }, + "PolicyAttribute": { + "base": "

    Information about a policy attribute.

    ", + "refs": { + "PolicyAttributes$member": null + } + }, + "PolicyAttributeDescription": { + "base": "

    Information about a policy attribute.

    ", + "refs": { + "PolicyAttributeDescriptions$member": null + } + }, + "PolicyAttributeDescriptions": { + "base": null, + "refs": { + "PolicyDescription$PolicyAttributeDescriptions": "

    The policy attributes.

    " + } + }, + "PolicyAttributeTypeDescription": { + "base": "

    Information about a policy attribute type.

    ", + "refs": { + "PolicyAttributeTypeDescriptions$member": null + } + }, + "PolicyAttributeTypeDescriptions": { + "base": null, + "refs": { + "PolicyTypeDescription$PolicyAttributeTypeDescriptions": "

    The description of the policy attributes associated with the policies defined by Elastic Load Balancing.

    " + } + }, + "PolicyAttributes": { + "base": null, + "refs": { + "CreateLoadBalancerPolicyInput$PolicyAttributes": "

    The attributes for the policy.

    " + } + }, + "PolicyDescription": { + "base": "

    Information about a policy.

    ", + "refs": { + "PolicyDescriptions$member": null + } + }, + "PolicyDescriptions": { + "base": null, + "refs": { + "DescribeLoadBalancerPoliciesOutput$PolicyDescriptions": "

    Information about the policies.

    " + } + }, + "PolicyName": { + "base": null, + "refs": { + "AppCookieStickinessPolicy$PolicyName": "

    The mnemonic name for the policy being created. The name must be unique within a set of policies for this load balancer.

    ", + "CreateAppCookieStickinessPolicyInput$PolicyName": "

    The name of the policy being created. Policy names must consist of alphanumeric characters and dashes (-). This name must be unique within the set of policies for this load balancer.

    ", + "CreateLBCookieStickinessPolicyInput$PolicyName": "

    The name of the policy being created. Policy names must consist of alphanumeric characters and dashes (-). This name must be unique within the set of policies for this load balancer.

    ", + "CreateLoadBalancerPolicyInput$PolicyName": "

    The name of the load balancer policy to be created. This name must be unique within the set of policies for this load balancer.

    ", + "DeleteLoadBalancerPolicyInput$PolicyName": "

    The name of the policy.

    ", + "LBCookieStickinessPolicy$PolicyName": "

    The name for the policy being created. The name must be unique within the set of policies for this load balancer.

    ", + "PolicyDescription$PolicyName": "

    The name of the policy.

    ", + "PolicyNames$member": null + } + }, + "PolicyNames": { + "base": null, + "refs": { + "BackendServerDescription$PolicyNames": "

    The names of the policies enabled for the back-end server.

    ", + "DescribeLoadBalancerPoliciesInput$PolicyNames": "

    The names of the policies.

    ", + "ListenerDescription$PolicyNames": "

    The policies. If there are no policies enabled, the list is empty.

    ", + "Policies$OtherPolicies": "

    The policies other than the stickiness policies.

    ", + "SetLoadBalancerPoliciesForBackendServerInput$PolicyNames": "

    The names of the policies. If the list is empty, then all current polices are removed from the back-end server.

    ", + "SetLoadBalancerPoliciesOfListenerInput$PolicyNames": "

    The names of the policies. If the list is empty, the current policy is removed from the listener.

    " + } + }, + "PolicyNotFoundException": { + "base": "

    One or more of the specified policies do not exist.

    ", + "refs": { + } + }, + "PolicyTypeDescription": { + "base": "

    Information about a policy type.

    ", + "refs": { + "PolicyTypeDescriptions$member": null + } + }, + "PolicyTypeDescriptions": { + "base": null, + "refs": { + "DescribeLoadBalancerPolicyTypesOutput$PolicyTypeDescriptions": "

    Information about the policy types.

    " + } + }, + "PolicyTypeName": { + "base": null, + "refs": { + "CreateLoadBalancerPolicyInput$PolicyTypeName": "

    The name of the base policy type. To get the list of policy types, use DescribeLoadBalancerPolicyTypes.

    ", + "PolicyDescription$PolicyTypeName": "

    The name of the policy type.

    ", + "PolicyTypeDescription$PolicyTypeName": "

    The name of the policy type.

    ", + "PolicyTypeNames$member": null + } + }, + "PolicyTypeNames": { + "base": null, + "refs": { + "DescribeLoadBalancerPolicyTypesInput$PolicyTypeNames": "

    The names of the policy types. If no names are specified, describes all policy types defined by Elastic Load Balancing.

    " + } + }, + "PolicyTypeNotFoundException": { + "base": "

    One or more of the specified policy types do not exist.

    ", + "refs": { + } + }, + "Ports": { + "base": null, + "refs": { + "DeleteLoadBalancerListenerInput$LoadBalancerPorts": "

    The client port numbers of the listeners.

    " + } + }, + "Protocol": { + "base": null, + "refs": { + "Listener$Protocol": "

    The load balancer transport protocol to use for routing: HTTP, HTTPS, TCP, or SSL.

    ", + "Listener$InstanceProtocol": "

    The protocol to use for routing traffic to back-end instances: HTTP, HTTPS, TCP, or SSL.

    If the front-end protocol is HTTP, HTTPS, TCP, or SSL, InstanceProtocol must be at the same protocol.

    If there is another listener with the same InstancePort whose InstanceProtocol is secure, (HTTPS or SSL), the listener's InstanceProtocol must also be secure.

    If there is another listener with the same InstancePort whose InstanceProtocol is HTTP or TCP, the listener's InstanceProtocol must be HTTP or TCP.

    " + } + }, + "ReasonCode": { + "base": null, + "refs": { + "InstanceState$ReasonCode": "

    Information about the cause of OutOfService instances. Specifically, whether the cause is Elastic Load Balancing or the instance.

    Valid values: ELB | Instance | N/A

    " + } + }, + "RegisterEndPointsInput": { + "base": null, + "refs": { + } + }, + "RegisterEndPointsOutput": { + "base": null, + "refs": { + } + }, + "RemoveAvailabilityZonesInput": { + "base": null, + "refs": { + } + }, + "RemoveAvailabilityZonesOutput": { + "base": null, + "refs": { + } + }, + "RemoveTagsInput": { + "base": null, + "refs": { + } + }, + "RemoveTagsOutput": { + "base": null, + "refs": { + } + }, + "S3BucketName": { + "base": null, + "refs": { + "AccessLog$S3BucketName": "

    The name of the Amazon S3 bucket where the access logs are stored.

    " + } + }, + "SSLCertificateId": { + "base": null, + "refs": { + "Listener$SSLCertificateId": "

    The Amazon Resource Name (ARN) of the server certificate.

    ", + "SetLoadBalancerListenerSSLCertificateInput$SSLCertificateId": "

    The Amazon Resource Name (ARN) of the SSL certificate.

    " + } + }, + "SecurityGroupId": { + "base": null, + "refs": { + "SecurityGroups$member": null + } + }, + "SecurityGroupName": { + "base": null, + "refs": { + "SourceSecurityGroup$GroupName": "

    The name of the security group.

    " + } + }, + "SecurityGroupOwnerAlias": { + "base": null, + "refs": { + "SourceSecurityGroup$OwnerAlias": "

    The owner of the security group.

    " + } + }, + "SecurityGroups": { + "base": null, + "refs": { + "ApplySecurityGroupsToLoadBalancerInput$SecurityGroups": "

    The IDs of the security groups to associate with the load balancer. Note that you cannot specify the name of the security group.

    ", + "ApplySecurityGroupsToLoadBalancerOutput$SecurityGroups": "

    The IDs of the security groups associated with the load balancer.

    ", + "CreateAccessPointInput$SecurityGroups": "

    The IDs of the security groups to assign to the load balancer.

    ", + "LoadBalancerDescription$SecurityGroups": "

    The security groups for the load balancer. Valid only for load balancers in a VPC.

    " + } + }, + "SetLoadBalancerListenerSSLCertificateInput": { + "base": null, + "refs": { + } + }, + "SetLoadBalancerListenerSSLCertificateOutput": { + "base": null, + "refs": { + } + }, + "SetLoadBalancerPoliciesForBackendServerInput": { + "base": null, + "refs": { + } + }, + "SetLoadBalancerPoliciesForBackendServerOutput": { + "base": null, + "refs": { + } + }, + "SetLoadBalancerPoliciesOfListenerInput": { + "base": null, + "refs": { + } + }, + "SetLoadBalancerPoliciesOfListenerOutput": { + "base": null, + "refs": { + } + }, + "SourceSecurityGroup": { + "base": "

    Information about a source security group.

    ", + "refs": { + "LoadBalancerDescription$SourceSecurityGroup": "

    The security group that you can use as part of your inbound rules for your load balancer's back-end application instances. To only allow traffic from load balancers, add a security group rule to your back end instance that specifies this source security group as the inbound source.

    " + } + }, + "State": { + "base": null, + "refs": { + "InstanceState$State": "

    The current state of the instance.

    Valid values: InService | OutOfService | Unknown

    " + } + }, + "StringVal": { + "base": null, + "refs": { + "AdditionalAttribute$Key": "

    This parameter is reserved.

    ", + "AdditionalAttribute$Value": "

    This parameter is reserved.

    " + } + }, + "SubnetId": { + "base": null, + "refs": { + "Subnets$member": null + } + }, + "SubnetNotFoundException": { + "base": "

    One or more of the specified subnets do not exist.

    ", + "refs": { + } + }, + "Subnets": { + "base": null, + "refs": { + "AttachLoadBalancerToSubnetsInput$Subnets": "

    The IDs of the subnets to add for the load balancer. You can add only one subnet per Availability Zone.

    ", + "AttachLoadBalancerToSubnetsOutput$Subnets": "

    The IDs of the subnets attached to the load balancer.

    ", + "CreateAccessPointInput$Subnets": "

    The IDs of the subnets in your VPC to attach to the load balancer. Specify one subnet per Availability Zone specified in AvailabilityZones.

    ", + "DetachLoadBalancerFromSubnetsInput$Subnets": "

    The IDs of the subnets.

    ", + "DetachLoadBalancerFromSubnetsOutput$Subnets": "

    The IDs of the remaining subnets for the load balancer.

    ", + "LoadBalancerDescription$Subnets": "

    The IDs of the subnets for the load balancer.

    " + } + }, + "Tag": { + "base": "

    Information about a tag.

    ", + "refs": { + "TagList$member": null + } + }, + "TagDescription": { + "base": "

    The tags associated with a load balancer.

    ", + "refs": { + "TagDescriptions$member": null + } + }, + "TagDescriptions": { + "base": null, + "refs": { + "DescribeTagsOutput$TagDescriptions": "

    Information about the tags.

    " + } + }, + "TagKey": { + "base": null, + "refs": { + "Tag$Key": "

    The key of the tag.

    ", + "TagKeyOnly$Key": "

    The name of the key.

    " + } + }, + "TagKeyList": { + "base": null, + "refs": { + "RemoveTagsInput$Tags": "

    The list of tag keys to remove.

    " + } + }, + "TagKeyOnly": { + "base": "

    The key of a tag.

    ", + "refs": { + "TagKeyList$member": null + } + }, + "TagList": { + "base": null, + "refs": { + "AddTagsInput$Tags": "

    The tags.

    ", + "CreateAccessPointInput$Tags": "

    A list of tags to assign to the load balancer.

    For more information about tagging your load balancer, see Tagging in the Elastic Load Balancing Developer Guide.

    ", + "TagDescription$Tags": "

    The tags.

    " + } + }, + "TagValue": { + "base": null, + "refs": { + "Tag$Value": "

    The value of the tag.

    " + } + }, + "TooManyAccessPointsException": { + "base": "

    The quota for the number of load balancers has been reached.

    ", + "refs": { + } + }, + "TooManyPoliciesException": { + "base": "

    The quota for the number of policies for this load balancer has been reached.

    ", + "refs": { + } + }, + "TooManyTagsException": { + "base": "

    The quota for the number of tags that can be assigned to a load balancer has been reached.

    ", + "refs": { + } + }, + "UnhealthyThreshold": { + "base": null, + "refs": { + "HealthCheck$UnhealthyThreshold": "

    The number of consecutive health check failures required before moving the instance to the Unhealthy state.

    " + } + }, + "VPCId": { + "base": null, + "refs": { + "LoadBalancerDescription$VPCId": "

    The ID of the VPC for the load balancer.

    " + } + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/elasticloadbalancing/2012-06-01/paginators-1.json b/vendor/github.com/aws/aws-sdk-go/models/apis/elasticloadbalancing/2012-06-01/paginators-1.json new file mode 100644 index 000000000..82113a706 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/elasticloadbalancing/2012-06-01/paginators-1.json @@ -0,0 +1,18 @@ +{ + "pagination": { + "DescribeInstanceHealth": { + "result_key": "InstanceStates" + }, + "DescribeLoadBalancerPolicies": { + "result_key": "PolicyDescriptions" + }, + "DescribeLoadBalancerPolicyTypes": { + "result_key": "PolicyTypeDescriptions" + }, + "DescribeLoadBalancers": { + "input_token": "Marker", + "output_token": "NextMarker", + "result_key": "LoadBalancerDescriptions" + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/elasticloadbalancing/2012-06-01/waiters-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/elasticloadbalancing/2012-06-01/waiters-2.json new file mode 100644 index 000000000..b03901a4c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/elasticloadbalancing/2012-06-01/waiters-2.json @@ -0,0 +1,49 @@ +{ + "version": 2, + "waiters": { + "InstanceInService": { + "delay": 15, + "operation": "DescribeInstanceHealth", + "maxAttempts": 40, + "acceptors": [ + { + "expected": "InService", + "matcher": "pathAll", + "state": "success", + "argument": "InstanceStates[].State" + } + ] + }, + "InstanceDeregistered": { + "delay": 15, + "operation": "DescribeInstanceHealth", + "maxAttempts": 40, + "acceptors": [ + { + "expected": "OutOfService", + "matcher": "pathAll", + "state": "success", + "argument": "InstanceStates[].State" + }, + { + "matcher": "error", + "expected": "InvalidInstance", + "state": "success" + } + ] + }, + "AnyInstanceInService": { + "delay": 15, + "operation": "DescribeInstanceHealth", + "maxAttempts": 40, + "acceptors": [ + { + "expected": "InService", + "matcher": "pathAny", + "state": "success", + "argument": "InstanceStates[].State" + } + ] + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/elasticmapreduce/2009-03-31/api-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/elasticmapreduce/2009-03-31/api-2.json new file mode 100644 index 000000000..9b0ceff50 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/elasticmapreduce/2009-03-31/api-2.json @@ -0,0 +1,1341 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2009-03-31", + "endpointPrefix":"elasticmapreduce", + "jsonVersion":"1.1", + "protocol":"json", + "serviceAbbreviation":"Amazon EMR", + "serviceFullName":"Amazon Elastic MapReduce", + "signatureVersion":"v4", + "targetPrefix":"ElasticMapReduce", + "timestampFormat":"unixTimestamp" + }, + "operations":{ + "AddInstanceGroups":{ + "name":"AddInstanceGroups", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AddInstanceGroupsInput"}, + "output":{"shape":"AddInstanceGroupsOutput"}, + "errors":[ + {"shape":"InternalServerError"} + ] + }, + "AddJobFlowSteps":{ + "name":"AddJobFlowSteps", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AddJobFlowStepsInput"}, + "output":{"shape":"AddJobFlowStepsOutput"}, + "errors":[ + {"shape":"InternalServerError"} + ] + }, + "AddTags":{ + "name":"AddTags", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AddTagsInput"}, + "output":{"shape":"AddTagsOutput"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"InvalidRequestException"} + ] + }, + "DescribeCluster":{ + "name":"DescribeCluster", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeClusterInput"}, + "output":{"shape":"DescribeClusterOutput"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"InvalidRequestException"} + ] + }, + "DescribeJobFlows":{ + "name":"DescribeJobFlows", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeJobFlowsInput"}, + "output":{"shape":"DescribeJobFlowsOutput"}, + "errors":[ + {"shape":"InternalServerError"} + ], + "deprecated":true + }, + "DescribeStep":{ + "name":"DescribeStep", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeStepInput"}, + "output":{"shape":"DescribeStepOutput"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"InvalidRequestException"} + ] + }, + "ListBootstrapActions":{ + "name":"ListBootstrapActions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListBootstrapActionsInput"}, + "output":{"shape":"ListBootstrapActionsOutput"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"InvalidRequestException"} + ] + }, + "ListClusters":{ + "name":"ListClusters", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListClustersInput"}, + "output":{"shape":"ListClustersOutput"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"InvalidRequestException"} + ] + }, + "ListInstanceGroups":{ + "name":"ListInstanceGroups", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListInstanceGroupsInput"}, + "output":{"shape":"ListInstanceGroupsOutput"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"InvalidRequestException"} + ] + }, + "ListInstances":{ + "name":"ListInstances", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListInstancesInput"}, + "output":{"shape":"ListInstancesOutput"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"InvalidRequestException"} + ] + }, + "ListSteps":{ + "name":"ListSteps", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListStepsInput"}, + "output":{"shape":"ListStepsOutput"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"InvalidRequestException"} + ] + }, + "ModifyInstanceGroups":{ + "name":"ModifyInstanceGroups", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyInstanceGroupsInput"}, + "errors":[ + {"shape":"InternalServerError"} + ] + }, + "RemoveTags":{ + "name":"RemoveTags", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RemoveTagsInput"}, + "output":{"shape":"RemoveTagsOutput"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"InvalidRequestException"} + ] + }, + "RunJobFlow":{ + "name":"RunJobFlow", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RunJobFlowInput"}, + "output":{"shape":"RunJobFlowOutput"}, + "errors":[ + {"shape":"InternalServerError"} + ] + }, + "SetTerminationProtection":{ + "name":"SetTerminationProtection", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"SetTerminationProtectionInput"}, + "errors":[ + {"shape":"InternalServerError"} + ] + }, + "SetVisibleToAllUsers":{ + "name":"SetVisibleToAllUsers", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"SetVisibleToAllUsersInput"}, + "errors":[ + {"shape":"InternalServerError"} + ] + }, + "TerminateJobFlows":{ + "name":"TerminateJobFlows", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"TerminateJobFlowsInput"}, + "errors":[ + {"shape":"InternalServerError"} + ] + } + }, + "shapes":{ + "ActionOnFailure":{ + "type":"string", + "enum":[ + "TERMINATE_JOB_FLOW", + "TERMINATE_CLUSTER", + "CANCEL_AND_WAIT", + "CONTINUE" + ] + }, + "AddInstanceGroupsInput":{ + "type":"structure", + "required":[ + "InstanceGroups", + "JobFlowId" + ], + "members":{ + "InstanceGroups":{"shape":"InstanceGroupConfigList"}, + "JobFlowId":{"shape":"XmlStringMaxLen256"} + } + }, + "AddInstanceGroupsOutput":{ + "type":"structure", + "members":{ + "JobFlowId":{"shape":"XmlStringMaxLen256"}, + "InstanceGroupIds":{"shape":"InstanceGroupIdsList"} + } + }, + "AddJobFlowStepsInput":{ + "type":"structure", + "required":[ + "JobFlowId", + "Steps" + ], + "members":{ + "JobFlowId":{"shape":"XmlStringMaxLen256"}, + "Steps":{"shape":"StepConfigList"} + } + }, + "AddJobFlowStepsOutput":{ + "type":"structure", + "members":{ + "StepIds":{"shape":"StepIdsList"} + } + }, + "AddTagsInput":{ + "type":"structure", + "required":[ + "ResourceId", + "Tags" + ], + "members":{ + "ResourceId":{"shape":"ResourceId"}, + "Tags":{"shape":"TagList"} + } + }, + "AddTagsOutput":{ + "type":"structure", + "members":{ + } + }, + "Application":{ + "type":"structure", + "members":{ + "Name":{"shape":"String"}, + "Version":{"shape":"String"}, + "Args":{"shape":"StringList"}, + "AdditionalInfo":{"shape":"StringMap"} + } + }, + "ApplicationList":{ + "type":"list", + "member":{"shape":"Application"} + }, + "Boolean":{"type":"boolean"}, + "BooleanObject":{"type":"boolean"}, + "BootstrapActionConfig":{ + "type":"structure", + "required":[ + "Name", + "ScriptBootstrapAction" + ], + "members":{ + "Name":{"shape":"XmlStringMaxLen256"}, + "ScriptBootstrapAction":{"shape":"ScriptBootstrapActionConfig"} + } + }, + "BootstrapActionConfigList":{ + "type":"list", + "member":{"shape":"BootstrapActionConfig"} + }, + "BootstrapActionDetail":{ + "type":"structure", + "members":{ + "BootstrapActionConfig":{"shape":"BootstrapActionConfig"} + } + }, + "BootstrapActionDetailList":{ + "type":"list", + "member":{"shape":"BootstrapActionDetail"} + }, + "Cluster":{ + "type":"structure", + "members":{ + "Id":{"shape":"ClusterId"}, + "Name":{"shape":"String"}, + "Status":{"shape":"ClusterStatus"}, + "Ec2InstanceAttributes":{"shape":"Ec2InstanceAttributes"}, + "LogUri":{"shape":"String"}, + "RequestedAmiVersion":{"shape":"String"}, + "RunningAmiVersion":{"shape":"String"}, + "ReleaseLabel":{"shape":"String"}, + "AutoTerminate":{"shape":"Boolean"}, + "TerminationProtected":{"shape":"Boolean"}, + "VisibleToAllUsers":{"shape":"Boolean"}, + "Applications":{"shape":"ApplicationList"}, + "Tags":{"shape":"TagList"}, + "ServiceRole":{"shape":"String"}, + "NormalizedInstanceHours":{"shape":"Integer"}, + "MasterPublicDnsName":{"shape":"String"}, + "Configurations":{"shape":"ConfigurationList"} + } + }, + "ClusterId":{"type":"string"}, + "ClusterState":{ + "type":"string", + "enum":[ + "STARTING", + "BOOTSTRAPPING", + "RUNNING", + "WAITING", + "TERMINATING", + "TERMINATED", + "TERMINATED_WITH_ERRORS" + ] + }, + "ClusterStateChangeReason":{ + "type":"structure", + "members":{ + "Code":{"shape":"ClusterStateChangeReasonCode"}, + "Message":{"shape":"String"} + } + }, + "ClusterStateChangeReasonCode":{ + "type":"string", + "enum":[ + "INTERNAL_ERROR", + "VALIDATION_ERROR", + "INSTANCE_FAILURE", + "BOOTSTRAP_FAILURE", + "USER_REQUEST", + "STEP_FAILURE", + "ALL_STEPS_COMPLETED" + ] + }, + "ClusterStateList":{ + "type":"list", + "member":{"shape":"ClusterState"} + }, + "ClusterStatus":{ + "type":"structure", + "members":{ + "State":{"shape":"ClusterState"}, + "StateChangeReason":{"shape":"ClusterStateChangeReason"}, + "Timeline":{"shape":"ClusterTimeline"} + } + }, + "ClusterSummary":{ + "type":"structure", + "members":{ + "Id":{"shape":"ClusterId"}, + "Name":{"shape":"String"}, + "Status":{"shape":"ClusterStatus"}, + "NormalizedInstanceHours":{"shape":"Integer"} + } + }, + "ClusterSummaryList":{ + "type":"list", + "member":{"shape":"ClusterSummary"} + }, + "ClusterTimeline":{ + "type":"structure", + "members":{ + "CreationDateTime":{"shape":"Date"}, + "ReadyDateTime":{"shape":"Date"}, + "EndDateTime":{"shape":"Date"} + } + }, + "Command":{ + "type":"structure", + "members":{ + "Name":{"shape":"String"}, + "ScriptPath":{"shape":"String"}, + "Args":{"shape":"StringList"} + } + }, + "CommandList":{ + "type":"list", + "member":{"shape":"Command"} + }, + "Configuration":{ + "type":"structure", + "members":{ + "Classification":{"shape":"String"}, + "Configurations":{"shape":"ConfigurationList"}, + "Properties":{"shape":"StringMap"} + } + }, + "ConfigurationList":{ + "type":"list", + "member":{"shape":"Configuration"} + }, + "Date":{"type":"timestamp"}, + "DescribeClusterInput":{ + "type":"structure", + "required":["ClusterId"], + "members":{ + "ClusterId":{"shape":"ClusterId"} + } + }, + "DescribeClusterOutput":{ + "type":"structure", + "members":{ + "Cluster":{"shape":"Cluster"} + } + }, + "DescribeJobFlowsInput":{ + "type":"structure", + "members":{ + "CreatedAfter":{"shape":"Date"}, + "CreatedBefore":{"shape":"Date"}, + "JobFlowIds":{"shape":"XmlStringList"}, + "JobFlowStates":{"shape":"JobFlowExecutionStateList"} + } + }, + "DescribeJobFlowsOutput":{ + "type":"structure", + "members":{ + "JobFlows":{"shape":"JobFlowDetailList"} + } + }, + "DescribeStepInput":{ + "type":"structure", + "required":[ + "ClusterId", + "StepId" + ], + "members":{ + "ClusterId":{"shape":"ClusterId"}, + "StepId":{"shape":"StepId"} + } + }, + "DescribeStepOutput":{ + "type":"structure", + "members":{ + "Step":{"shape":"Step"} + } + }, + "EC2InstanceIdsList":{ + "type":"list", + "member":{"shape":"InstanceId"} + }, + "EC2InstanceIdsToTerminateList":{ + "type":"list", + "member":{"shape":"InstanceId"} + }, + "EbsBlockDevice":{ + "type":"structure", + "members":{ + "VolumeSpecification":{"shape":"VolumeSpecification"}, + "Device":{"shape":"String"} + } + }, + "EbsBlockDeviceConfig":{ + "type":"structure", + "required":["VolumeSpecification"], + "members":{ + "VolumeSpecification":{"shape":"VolumeSpecification"}, + "VolumesPerInstance":{"shape":"Integer"} + } + }, + "EbsBlockDeviceConfigList":{ + "type":"list", + "member":{"shape":"EbsBlockDeviceConfig"} + }, + "EbsBlockDeviceList":{ + "type":"list", + "member":{"shape":"EbsBlockDevice"} + }, + "EbsConfiguration":{ + "type":"structure", + "members":{ + "EbsBlockDeviceConfigs":{"shape":"EbsBlockDeviceConfigList"}, + "EbsOptimized":{"shape":"BooleanObject"} + } + }, + "EbsVolume":{ + "type":"structure", + "members":{ + "Device":{"shape":"String"}, + "VolumeId":{"shape":"String"} + } + }, + "EbsVolumeList":{ + "type":"list", + "member":{"shape":"EbsVolume"} + }, + "Ec2InstanceAttributes":{ + "type":"structure", + "members":{ + "Ec2KeyName":{"shape":"String"}, + "Ec2SubnetId":{"shape":"String"}, + "Ec2AvailabilityZone":{"shape":"String"}, + "IamInstanceProfile":{"shape":"String"}, + "EmrManagedMasterSecurityGroup":{"shape":"String"}, + "EmrManagedSlaveSecurityGroup":{"shape":"String"}, + "ServiceAccessSecurityGroup":{"shape":"String"}, + "AdditionalMasterSecurityGroups":{"shape":"StringList"}, + "AdditionalSlaveSecurityGroups":{"shape":"StringList"} + } + }, + "ErrorCode":{ + "type":"string", + "max":256, + "min":1 + }, + "ErrorMessage":{"type":"string"}, + "HadoopJarStepConfig":{ + "type":"structure", + "required":["Jar"], + "members":{ + "Properties":{"shape":"KeyValueList"}, + "Jar":{"shape":"XmlString"}, + "MainClass":{"shape":"XmlString"}, + "Args":{"shape":"XmlStringList"} + } + }, + "HadoopStepConfig":{ + "type":"structure", + "members":{ + "Jar":{"shape":"String"}, + "Properties":{"shape":"StringMap"}, + "MainClass":{"shape":"String"}, + "Args":{"shape":"StringList"} + } + }, + "Instance":{ + "type":"structure", + "members":{ + "Id":{"shape":"InstanceId"}, + "Ec2InstanceId":{"shape":"InstanceId"}, + "PublicDnsName":{"shape":"String"}, + "PublicIpAddress":{"shape":"String"}, + "PrivateDnsName":{"shape":"String"}, + "PrivateIpAddress":{"shape":"String"}, + "Status":{"shape":"InstanceStatus"}, + "InstanceGroupId":{"shape":"String"}, + "EbsVolumes":{"shape":"EbsVolumeList"} + } + }, + "InstanceGroup":{ + "type":"structure", + "members":{ + "Id":{"shape":"InstanceGroupId"}, + "Name":{"shape":"String"}, + "Market":{"shape":"MarketType"}, + "InstanceGroupType":{"shape":"InstanceGroupType"}, + "BidPrice":{"shape":"String"}, + "InstanceType":{"shape":"InstanceType"}, + "RequestedInstanceCount":{"shape":"Integer"}, + "RunningInstanceCount":{"shape":"Integer"}, + "Status":{"shape":"InstanceGroupStatus"}, + "Configurations":{"shape":"ConfigurationList"}, + "EbsBlockDevices":{"shape":"EbsBlockDeviceList"}, + "EbsOptimized":{"shape":"BooleanObject"}, + "ShrinkPolicy":{"shape":"ShrinkPolicy"} + } + }, + "InstanceGroupConfig":{ + "type":"structure", + "required":[ + "InstanceRole", + "InstanceType", + "InstanceCount" + ], + "members":{ + "Name":{"shape":"XmlStringMaxLen256"}, + "Market":{"shape":"MarketType"}, + "InstanceRole":{"shape":"InstanceRoleType"}, + "BidPrice":{"shape":"XmlStringMaxLen256"}, + "InstanceType":{"shape":"InstanceType"}, + "InstanceCount":{"shape":"Integer"}, + "Configurations":{"shape":"ConfigurationList"}, + "EbsConfiguration":{"shape":"EbsConfiguration"} + } + }, + "InstanceGroupConfigList":{ + "type":"list", + "member":{"shape":"InstanceGroupConfig"} + }, + "InstanceGroupDetail":{ + "type":"structure", + "required":[ + "Market", + "InstanceRole", + "InstanceType", + "InstanceRequestCount", + "InstanceRunningCount", + "State", + "CreationDateTime" + ], + "members":{ + "InstanceGroupId":{"shape":"XmlStringMaxLen256"}, + "Name":{"shape":"XmlStringMaxLen256"}, + "Market":{"shape":"MarketType"}, + "InstanceRole":{"shape":"InstanceRoleType"}, + "BidPrice":{"shape":"XmlStringMaxLen256"}, + "InstanceType":{"shape":"InstanceType"}, + "InstanceRequestCount":{"shape":"Integer"}, + "InstanceRunningCount":{"shape":"Integer"}, + "State":{"shape":"InstanceGroupState"}, + "LastStateChangeReason":{"shape":"XmlString"}, + "CreationDateTime":{"shape":"Date"}, + "StartDateTime":{"shape":"Date"}, + "ReadyDateTime":{"shape":"Date"}, + "EndDateTime":{"shape":"Date"} + } + }, + "InstanceGroupDetailList":{ + "type":"list", + "member":{"shape":"InstanceGroupDetail"} + }, + "InstanceGroupId":{"type":"string"}, + "InstanceGroupIdsList":{ + "type":"list", + "member":{"shape":"XmlStringMaxLen256"} + }, + "InstanceGroupList":{ + "type":"list", + "member":{"shape":"InstanceGroup"} + }, + "InstanceGroupModifyConfig":{ + "type":"structure", + "required":["InstanceGroupId"], + "members":{ + "InstanceGroupId":{"shape":"XmlStringMaxLen256"}, + "InstanceCount":{"shape":"Integer"}, + "EC2InstanceIdsToTerminate":{"shape":"EC2InstanceIdsToTerminateList"}, + "ShrinkPolicy":{"shape":"ShrinkPolicy"} + } + }, + "InstanceGroupModifyConfigList":{ + "type":"list", + "member":{"shape":"InstanceGroupModifyConfig"} + }, + "InstanceGroupState":{ + "type":"string", + "enum":[ + "PROVISIONING", + "BOOTSTRAPPING", + "RUNNING", + "RESIZING", + "SUSPENDED", + "TERMINATING", + "TERMINATED", + "ARRESTED", + "SHUTTING_DOWN", + "ENDED" + ] + }, + "InstanceGroupStateChangeReason":{ + "type":"structure", + "members":{ + "Code":{"shape":"InstanceGroupStateChangeReasonCode"}, + "Message":{"shape":"String"} + } + }, + "InstanceGroupStateChangeReasonCode":{ + "type":"string", + "enum":[ + "INTERNAL_ERROR", + "VALIDATION_ERROR", + "INSTANCE_FAILURE", + "CLUSTER_TERMINATED" + ] + }, + "InstanceGroupStatus":{ + "type":"structure", + "members":{ + "State":{"shape":"InstanceGroupState"}, + "StateChangeReason":{"shape":"InstanceGroupStateChangeReason"}, + "Timeline":{"shape":"InstanceGroupTimeline"} + } + }, + "InstanceGroupTimeline":{ + "type":"structure", + "members":{ + "CreationDateTime":{"shape":"Date"}, + "ReadyDateTime":{"shape":"Date"}, + "EndDateTime":{"shape":"Date"} + } + }, + "InstanceGroupType":{ + "type":"string", + "enum":[ + "MASTER", + "CORE", + "TASK" + ] + }, + "InstanceGroupTypeList":{ + "type":"list", + "member":{"shape":"InstanceGroupType"} + }, + "InstanceId":{"type":"string"}, + "InstanceList":{ + "type":"list", + "member":{"shape":"Instance"} + }, + "InstanceResizePolicy":{ + "type":"structure", + "members":{ + "InstancesToTerminate":{"shape":"EC2InstanceIdsList"}, + "InstancesToProtect":{"shape":"EC2InstanceIdsList"}, + "InstanceTerminationTimeout":{"shape":"Integer"} + } + }, + "InstanceRoleType":{ + "type":"string", + "enum":[ + "MASTER", + "CORE", + "TASK" + ] + }, + "InstanceState":{ + "type":"string", + "enum":[ + "AWAITING_FULFILLMENT", + "PROVISIONING", + "BOOTSTRAPPING", + "RUNNING", + "TERMINATED" + ] + }, + "InstanceStateChangeReason":{ + "type":"structure", + "members":{ + "Code":{"shape":"InstanceStateChangeReasonCode"}, + "Message":{"shape":"String"} + } + }, + "InstanceStateChangeReasonCode":{ + "type":"string", + "enum":[ + "INTERNAL_ERROR", + "VALIDATION_ERROR", + "INSTANCE_FAILURE", + "BOOTSTRAP_FAILURE", + "CLUSTER_TERMINATED" + ] + }, + "InstanceStateList":{ + "type":"list", + "member":{"shape":"InstanceState"} + }, + "InstanceStatus":{ + "type":"structure", + "members":{ + "State":{"shape":"InstanceState"}, + "StateChangeReason":{"shape":"InstanceStateChangeReason"}, + "Timeline":{"shape":"InstanceTimeline"} + } + }, + "InstanceTimeline":{ + "type":"structure", + "members":{ + "CreationDateTime":{"shape":"Date"}, + "ReadyDateTime":{"shape":"Date"}, + "EndDateTime":{"shape":"Date"} + } + }, + "InstanceType":{ + "type":"string", + "max":256, + "min":1, + "pattern":"[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\r\\n\\t]*" + }, + "Integer":{"type":"integer"}, + "InternalServerError":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InternalServerException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "exception":true, + "fault":true + }, + "InvalidRequestException":{ + "type":"structure", + "members":{ + "ErrorCode":{"shape":"ErrorCode"}, + "Message":{"shape":"ErrorMessage"} + }, + "exception":true + }, + "JobFlowDetail":{ + "type":"structure", + "required":[ + "JobFlowId", + "Name", + "ExecutionStatusDetail", + "Instances" + ], + "members":{ + "JobFlowId":{"shape":"XmlStringMaxLen256"}, + "Name":{"shape":"XmlStringMaxLen256"}, + "LogUri":{"shape":"XmlString"}, + "AmiVersion":{"shape":"XmlStringMaxLen256"}, + "ExecutionStatusDetail":{"shape":"JobFlowExecutionStatusDetail"}, + "Instances":{"shape":"JobFlowInstancesDetail"}, + "Steps":{"shape":"StepDetailList"}, + "BootstrapActions":{"shape":"BootstrapActionDetailList"}, + "SupportedProducts":{"shape":"SupportedProductsList"}, + "VisibleToAllUsers":{"shape":"Boolean"}, + "JobFlowRole":{"shape":"XmlString"}, + "ServiceRole":{"shape":"XmlString"} + } + }, + "JobFlowDetailList":{ + "type":"list", + "member":{"shape":"JobFlowDetail"} + }, + "JobFlowExecutionState":{ + "type":"string", + "enum":[ + "STARTING", + "BOOTSTRAPPING", + "RUNNING", + "WAITING", + "SHUTTING_DOWN", + "TERMINATED", + "COMPLETED", + "FAILED" + ] + }, + "JobFlowExecutionStateList":{ + "type":"list", + "member":{"shape":"JobFlowExecutionState"} + }, + "JobFlowExecutionStatusDetail":{ + "type":"structure", + "required":[ + "State", + "CreationDateTime" + ], + "members":{ + "State":{"shape":"JobFlowExecutionState"}, + "CreationDateTime":{"shape":"Date"}, + "StartDateTime":{"shape":"Date"}, + "ReadyDateTime":{"shape":"Date"}, + "EndDateTime":{"shape":"Date"}, + "LastStateChangeReason":{"shape":"XmlString"} + } + }, + "JobFlowInstancesConfig":{ + "type":"structure", + "members":{ + "MasterInstanceType":{"shape":"InstanceType"}, + "SlaveInstanceType":{"shape":"InstanceType"}, + "InstanceCount":{"shape":"Integer"}, + "InstanceGroups":{"shape":"InstanceGroupConfigList"}, + "Ec2KeyName":{"shape":"XmlStringMaxLen256"}, + "Placement":{"shape":"PlacementType"}, + "KeepJobFlowAliveWhenNoSteps":{"shape":"Boolean"}, + "TerminationProtected":{"shape":"Boolean"}, + "HadoopVersion":{"shape":"XmlStringMaxLen256"}, + "Ec2SubnetId":{"shape":"XmlStringMaxLen256"}, + "EmrManagedMasterSecurityGroup":{"shape":"XmlStringMaxLen256"}, + "EmrManagedSlaveSecurityGroup":{"shape":"XmlStringMaxLen256"}, + "ServiceAccessSecurityGroup":{"shape":"XmlStringMaxLen256"}, + "AdditionalMasterSecurityGroups":{"shape":"SecurityGroupsList"}, + "AdditionalSlaveSecurityGroups":{"shape":"SecurityGroupsList"} + } + }, + "JobFlowInstancesDetail":{ + "type":"structure", + "required":[ + "MasterInstanceType", + "SlaveInstanceType", + "InstanceCount" + ], + "members":{ + "MasterInstanceType":{"shape":"InstanceType"}, + "MasterPublicDnsName":{"shape":"XmlString"}, + "MasterInstanceId":{"shape":"XmlString"}, + "SlaveInstanceType":{"shape":"InstanceType"}, + "InstanceCount":{"shape":"Integer"}, + "InstanceGroups":{"shape":"InstanceGroupDetailList"}, + "NormalizedInstanceHours":{"shape":"Integer"}, + "Ec2KeyName":{"shape":"XmlStringMaxLen256"}, + "Ec2SubnetId":{"shape":"XmlStringMaxLen256"}, + "Placement":{"shape":"PlacementType"}, + "KeepJobFlowAliveWhenNoSteps":{"shape":"Boolean"}, + "TerminationProtected":{"shape":"Boolean"}, + "HadoopVersion":{"shape":"XmlStringMaxLen256"} + } + }, + "KeyValue":{ + "type":"structure", + "members":{ + "Key":{"shape":"XmlString"}, + "Value":{"shape":"XmlString"} + } + }, + "KeyValueList":{ + "type":"list", + "member":{"shape":"KeyValue"} + }, + "ListBootstrapActionsInput":{ + "type":"structure", + "required":["ClusterId"], + "members":{ + "ClusterId":{"shape":"ClusterId"}, + "Marker":{"shape":"Marker"} + } + }, + "ListBootstrapActionsOutput":{ + "type":"structure", + "members":{ + "BootstrapActions":{"shape":"CommandList"}, + "Marker":{"shape":"Marker"} + } + }, + "ListClustersInput":{ + "type":"structure", + "members":{ + "CreatedAfter":{"shape":"Date"}, + "CreatedBefore":{"shape":"Date"}, + "ClusterStates":{"shape":"ClusterStateList"}, + "Marker":{"shape":"Marker"} + } + }, + "ListClustersOutput":{ + "type":"structure", + "members":{ + "Clusters":{"shape":"ClusterSummaryList"}, + "Marker":{"shape":"Marker"} + } + }, + "ListInstanceGroupsInput":{ + "type":"structure", + "required":["ClusterId"], + "members":{ + "ClusterId":{"shape":"ClusterId"}, + "Marker":{"shape":"Marker"} + } + }, + "ListInstanceGroupsOutput":{ + "type":"structure", + "members":{ + "InstanceGroups":{"shape":"InstanceGroupList"}, + "Marker":{"shape":"Marker"} + } + }, + "ListInstancesInput":{ + "type":"structure", + "required":["ClusterId"], + "members":{ + "ClusterId":{"shape":"ClusterId"}, + "InstanceGroupId":{"shape":"InstanceGroupId"}, + "InstanceGroupTypes":{"shape":"InstanceGroupTypeList"}, + "InstanceStates":{"shape":"InstanceStateList"}, + "Marker":{"shape":"Marker"} + } + }, + "ListInstancesOutput":{ + "type":"structure", + "members":{ + "Instances":{"shape":"InstanceList"}, + "Marker":{"shape":"Marker"} + } + }, + "ListStepsInput":{ + "type":"structure", + "required":["ClusterId"], + "members":{ + "ClusterId":{"shape":"ClusterId"}, + "StepStates":{"shape":"StepStateList"}, + "StepIds":{"shape":"XmlStringList"}, + "Marker":{"shape":"Marker"} + } + }, + "ListStepsOutput":{ + "type":"structure", + "members":{ + "Steps":{"shape":"StepSummaryList"}, + "Marker":{"shape":"Marker"} + } + }, + "Marker":{"type":"string"}, + "MarketType":{ + "type":"string", + "enum":[ + "ON_DEMAND", + "SPOT" + ] + }, + "ModifyInstanceGroupsInput":{ + "type":"structure", + "members":{ + "InstanceGroups":{"shape":"InstanceGroupModifyConfigList"} + } + }, + "NewSupportedProductsList":{ + "type":"list", + "member":{"shape":"SupportedProductConfig"} + }, + "PlacementType":{ + "type":"structure", + "required":["AvailabilityZone"], + "members":{ + "AvailabilityZone":{"shape":"XmlString"} + } + }, + "RemoveTagsInput":{ + "type":"structure", + "required":[ + "ResourceId", + "TagKeys" + ], + "members":{ + "ResourceId":{"shape":"ResourceId"}, + "TagKeys":{"shape":"StringList"} + } + }, + "RemoveTagsOutput":{ + "type":"structure", + "members":{ + } + }, + "ResourceId":{"type":"string"}, + "RunJobFlowInput":{ + "type":"structure", + "required":[ + "Name", + "Instances" + ], + "members":{ + "Name":{"shape":"XmlStringMaxLen256"}, + "LogUri":{"shape":"XmlString"}, + "AdditionalInfo":{"shape":"XmlString"}, + "AmiVersion":{"shape":"XmlStringMaxLen256"}, + "ReleaseLabel":{"shape":"XmlStringMaxLen256"}, + "Instances":{"shape":"JobFlowInstancesConfig"}, + "Steps":{"shape":"StepConfigList"}, + "BootstrapActions":{"shape":"BootstrapActionConfigList"}, + "SupportedProducts":{"shape":"SupportedProductsList"}, + "NewSupportedProducts":{"shape":"NewSupportedProductsList"}, + "Applications":{"shape":"ApplicationList"}, + "Configurations":{"shape":"ConfigurationList"}, + "VisibleToAllUsers":{"shape":"Boolean"}, + "JobFlowRole":{"shape":"XmlString"}, + "ServiceRole":{"shape":"XmlString"}, + "Tags":{"shape":"TagList"} + } + }, + "RunJobFlowOutput":{ + "type":"structure", + "members":{ + "JobFlowId":{"shape":"XmlStringMaxLen256"} + } + }, + "ScriptBootstrapActionConfig":{ + "type":"structure", + "required":["Path"], + "members":{ + "Path":{"shape":"XmlString"}, + "Args":{"shape":"XmlStringList"} + } + }, + "SecurityGroupsList":{ + "type":"list", + "member":{"shape":"XmlStringMaxLen256"} + }, + "SetTerminationProtectionInput":{ + "type":"structure", + "required":[ + "JobFlowIds", + "TerminationProtected" + ], + "members":{ + "JobFlowIds":{"shape":"XmlStringList"}, + "TerminationProtected":{"shape":"Boolean"} + } + }, + "SetVisibleToAllUsersInput":{ + "type":"structure", + "required":[ + "JobFlowIds", + "VisibleToAllUsers" + ], + "members":{ + "JobFlowIds":{"shape":"XmlStringList"}, + "VisibleToAllUsers":{"shape":"Boolean"} + } + }, + "ShrinkPolicy":{ + "type":"structure", + "members":{ + "DecommissionTimeout":{"shape":"Integer"}, + "InstanceResizePolicy":{"shape":"InstanceResizePolicy"} + } + }, + "Step":{ + "type":"structure", + "members":{ + "Id":{"shape":"StepId"}, + "Name":{"shape":"String"}, + "Config":{"shape":"HadoopStepConfig"}, + "ActionOnFailure":{"shape":"ActionOnFailure"}, + "Status":{"shape":"StepStatus"} + } + }, + "StepConfig":{ + "type":"structure", + "required":[ + "Name", + "HadoopJarStep" + ], + "members":{ + "Name":{"shape":"XmlStringMaxLen256"}, + "ActionOnFailure":{"shape":"ActionOnFailure"}, + "HadoopJarStep":{"shape":"HadoopJarStepConfig"} + } + }, + "StepConfigList":{ + "type":"list", + "member":{"shape":"StepConfig"} + }, + "StepDetail":{ + "type":"structure", + "required":[ + "StepConfig", + "ExecutionStatusDetail" + ], + "members":{ + "StepConfig":{"shape":"StepConfig"}, + "ExecutionStatusDetail":{"shape":"StepExecutionStatusDetail"} + } + }, + "StepDetailList":{ + "type":"list", + "member":{"shape":"StepDetail"} + }, + "StepExecutionState":{ + "type":"string", + "enum":[ + "PENDING", + "RUNNING", + "CONTINUE", + "COMPLETED", + "CANCELLED", + "FAILED", + "INTERRUPTED" + ] + }, + "StepExecutionStatusDetail":{ + "type":"structure", + "required":[ + "State", + "CreationDateTime" + ], + "members":{ + "State":{"shape":"StepExecutionState"}, + "CreationDateTime":{"shape":"Date"}, + "StartDateTime":{"shape":"Date"}, + "EndDateTime":{"shape":"Date"}, + "LastStateChangeReason":{"shape":"XmlString"} + } + }, + "StepId":{"type":"string"}, + "StepIdsList":{ + "type":"list", + "member":{"shape":"XmlStringMaxLen256"} + }, + "StepState":{ + "type":"string", + "enum":[ + "PENDING", + "RUNNING", + "COMPLETED", + "CANCELLED", + "FAILED", + "INTERRUPTED" + ] + }, + "StepStateChangeReason":{ + "type":"structure", + "members":{ + "Code":{"shape":"StepStateChangeReasonCode"}, + "Message":{"shape":"String"} + } + }, + "StepStateChangeReasonCode":{ + "type":"string", + "enum":["NONE"] + }, + "StepStateList":{ + "type":"list", + "member":{"shape":"StepState"} + }, + "StepStatus":{ + "type":"structure", + "members":{ + "State":{"shape":"StepState"}, + "StateChangeReason":{"shape":"StepStateChangeReason"}, + "Timeline":{"shape":"StepTimeline"} + } + }, + "StepSummary":{ + "type":"structure", + "members":{ + "Id":{"shape":"StepId"}, + "Name":{"shape":"String"}, + "Config":{"shape":"HadoopStepConfig"}, + "ActionOnFailure":{"shape":"ActionOnFailure"}, + "Status":{"shape":"StepStatus"} + } + }, + "StepSummaryList":{ + "type":"list", + "member":{"shape":"StepSummary"} + }, + "StepTimeline":{ + "type":"structure", + "members":{ + "CreationDateTime":{"shape":"Date"}, + "StartDateTime":{"shape":"Date"}, + "EndDateTime":{"shape":"Date"} + } + }, + "String":{"type":"string"}, + "StringList":{ + "type":"list", + "member":{"shape":"String"} + }, + "StringMap":{ + "type":"map", + "key":{"shape":"String"}, + "value":{"shape":"String"} + }, + "SupportedProductConfig":{ + "type":"structure", + "members":{ + "Name":{"shape":"XmlStringMaxLen256"}, + "Args":{"shape":"XmlStringList"} + } + }, + "SupportedProductsList":{ + "type":"list", + "member":{"shape":"XmlStringMaxLen256"} + }, + "Tag":{ + "type":"structure", + "members":{ + "Key":{"shape":"String"}, + "Value":{"shape":"String"} + } + }, + "TagList":{ + "type":"list", + "member":{"shape":"Tag"} + }, + "TerminateJobFlowsInput":{ + "type":"structure", + "required":["JobFlowIds"], + "members":{ + "JobFlowIds":{"shape":"XmlStringList"} + } + }, + "VolumeSpecification":{ + "type":"structure", + "required":[ + "VolumeType", + "SizeInGB" + ], + "members":{ + "VolumeType":{"shape":"String"}, + "Iops":{"shape":"Integer"}, + "SizeInGB":{"shape":"Integer"} + } + }, + "XmlString":{ + "type":"string", + "max":10280, + "min":0, + "pattern":"[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\r\\n\\t]*" + }, + "XmlStringList":{ + "type":"list", + "member":{"shape":"XmlString"} + }, + "XmlStringMaxLen256":{ + "type":"string", + "max":256, + "min":0, + "pattern":"[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\r\\n\\t]*" + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/elasticmapreduce/2009-03-31/docs-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/elasticmapreduce/2009-03-31/docs-2.json new file mode 100644 index 000000000..6553815ad --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/elasticmapreduce/2009-03-31/docs-2.json @@ -0,0 +1,1087 @@ +{ + "version": "2.0", + "service": "

    Amazon Elastic MapReduce (Amazon EMR) is a web service that makes it easy to process large amounts of data efficiently. Amazon EMR uses Hadoop processing combined with several AWS products to do tasks such as web indexing, data mining, log file analysis, machine learning, scientific simulation, and data warehousing.

    ", + "operations": { + "AddInstanceGroups": "

    AddInstanceGroups adds an instance group to a running cluster.

    ", + "AddJobFlowSteps": "

    AddJobFlowSteps adds new steps to a running job flow. A maximum of 256 steps are allowed in each job flow.

    If your job flow is long-running (such as a Hive data warehouse) or complex, you may require more than 256 steps to process your data. You can bypass the 256-step limitation in various ways, including using the SSH shell to connect to the master node and submitting queries directly to the software running on the master node, such as Hive and Hadoop. For more information on how to do this, go to Add More than 256 Steps to a Job Flow in the Amazon Elastic MapReduce Developer's Guide.

    A step specifies the location of a JAR file stored either on the master node of the job flow or in Amazon S3. Each step is performed by the main function of the main class of the JAR file. The main class can be specified either in the manifest of the JAR or by using the MainFunction parameter of the step.

    Elastic MapReduce executes each step in the order listed. For a step to be considered complete, the main function must exit with a zero exit code and all Hadoop jobs started while the step was running must have completed and run successfully.

    You can only add steps to a job flow that is in one of the following states: STARTING, BOOTSTRAPPING, RUNNING, or WAITING.

    ", + "AddTags": "

    Adds tags to an Amazon EMR resource. Tags make it easier to associate clusters in various ways, such as grouping clusters to track your Amazon EMR resource allocation costs. For more information, see Tagging Amazon EMR Resources.

    ", + "DescribeCluster": "

    Provides cluster-level details including status, hardware and software configuration, VPC settings, and so on. For information about the cluster steps, see ListSteps.

    ", + "DescribeJobFlows": "

    This API is deprecated and will eventually be removed. We recommend you use ListClusters, DescribeCluster, ListSteps, ListInstanceGroups and ListBootstrapActions instead.

    DescribeJobFlows returns a list of job flows that match all of the supplied parameters. The parameters can include a list of job flow IDs, job flow states, and restrictions on job flow creation date and time.

    Regardless of supplied parameters, only job flows created within the last two months are returned.

    If no parameters are supplied, then job flows matching either of the following criteria are returned:

    • Job flows created and completed in the last two weeks
    • Job flows created within the last two months that are in one of the following states: RUNNING, WAITING, SHUTTING_DOWN, STARTING

    Amazon Elastic MapReduce can return a maximum of 512 job flow descriptions.

    ", + "DescribeStep": "

    Provides more detail about the cluster step.

    ", + "ListBootstrapActions": "

    Provides information about the bootstrap actions associated with a cluster.

    ", + "ListClusters": "

    Provides the status of all clusters visible to this AWS account. Allows you to filter the list of clusters based on certain criteria; for example, filtering by cluster creation date and time or by status. This call returns a maximum of 50 clusters per call, but returns a marker to track the paging of the cluster list across multiple ListClusters calls.

    ", + "ListInstanceGroups": "

    Provides all available details about the instance groups in a cluster.

    ", + "ListInstances": "

    Provides information about the cluster instances that Amazon EMR provisions on behalf of a user when it creates the cluster. For example, this operation indicates when the EC2 instances reach the Ready state, when instances become available to Amazon EMR to use for jobs, and the IP addresses for cluster instances, etc.

    ", + "ListSteps": "

    Provides a list of steps for the cluster.

    ", + "ModifyInstanceGroups": "

    ModifyInstanceGroups modifies the number of nodes and configuration settings of an instance group. The input parameters include the new target instance count for the group and the instance group ID. The call will either succeed or fail atomically.

    ", + "RemoveTags": "

    Removes tags from an Amazon EMR resource. Tags make it easier to associate clusters in various ways, such as grouping clusters to track your Amazon EMR resource allocation costs. For more information, see Tagging Amazon EMR Resources.

    The following example removes the stack tag with value Prod from a cluster:

    ", + "RunJobFlow": "

    RunJobFlow creates and starts running a new job flow. The job flow will run the steps specified. Once the job flow completes, the cluster is stopped and the HDFS partition is lost. To prevent loss of data, configure the last step of the job flow to store results in Amazon S3. If the JobFlowInstancesConfig KeepJobFlowAliveWhenNoSteps parameter is set to TRUE, the job flow will transition to the WAITING state rather than shutting down once the steps have completed.

    For additional protection, you can set the JobFlowInstancesConfig TerminationProtected parameter to TRUE to lock the job flow and prevent it from being terminated by API call, user intervention, or in the event of a job flow error.

    A maximum of 256 steps are allowed in each job flow.

    If your job flow is long-running (such as a Hive data warehouse) or complex, you may require more than 256 steps to process your data. You can bypass the 256-step limitation in various ways, including using the SSH shell to connect to the master node and submitting queries directly to the software running on the master node, such as Hive and Hadoop. For more information on how to do this, go to Add More than 256 Steps to a Job Flow in the Amazon Elastic MapReduce Developer's Guide.

    For long running job flows, we recommend that you periodically store your results.

    ", + "SetTerminationProtection": "

    SetTerminationProtection locks a job flow so the Amazon EC2 instances in the cluster cannot be terminated by user intervention, an API call, or in the event of a job-flow error. The cluster still terminates upon successful completion of the job flow. Calling SetTerminationProtection on a job flow is analogous to calling the Amazon EC2 DisableAPITermination API on all of the EC2 instances in a cluster.

    SetTerminationProtection is used to prevent accidental termination of a job flow and to ensure that in the event of an error, the instances will persist so you can recover any data stored in their ephemeral instance storage.

    To terminate a job flow that has been locked by setting SetTerminationProtection to true, you must first unlock the job flow by a subsequent call to SetTerminationProtection in which you set the value to false.

    For more information, go to Protecting a Job Flow from Termination in the Amazon Elastic MapReduce Developer's Guide.

    ", + "SetVisibleToAllUsers": "

    Sets whether all AWS Identity and Access Management (IAM) users under your account can access the specified job flows. This action works on running job flows. You can also set the visibility of a job flow when you launch it using the VisibleToAllUsers parameter of RunJobFlow. The SetVisibleToAllUsers action can be called only by an IAM user who created the job flow or the AWS account that owns the job flow.

    ", + "TerminateJobFlows": "

    TerminateJobFlows shuts a list of job flows down. When a job flow is shut down, any step not yet completed is canceled and the EC2 instances on which the job flow is running are stopped. Any log files not already saved are uploaded to Amazon S3 if a LogUri was specified when the job flow was created.

    The maximum number of JobFlows allowed is 10. The call to TerminateJobFlows is asynchronous. Depending on the configuration of the job flow, it may take up to 5-20 minutes for the job flow to completely terminate and release allocated resources, such as Amazon EC2 instances.

    " + }, + "shapes": { + "ActionOnFailure": { + "base": null, + "refs": { + "Step$ActionOnFailure": "

    This specifies what action to take when the cluster step fails. Possible values are TERMINATE_CLUSTER, CANCEL_AND_WAIT, and CONTINUE.

    ", + "StepConfig$ActionOnFailure": "

    The action to take if the job flow step fails.

    ", + "StepSummary$ActionOnFailure": "

    This specifies what action to take when the cluster step fails. Possible values are TERMINATE_CLUSTER, CANCEL_AND_WAIT, and CONTINUE.

    " + } + }, + "AddInstanceGroupsInput": { + "base": "

    Input to an AddInstanceGroups call.

    ", + "refs": { + } + }, + "AddInstanceGroupsOutput": { + "base": "

    Output from an AddInstanceGroups call.

    ", + "refs": { + } + }, + "AddJobFlowStepsInput": { + "base": "

    The input argument to the AddJobFlowSteps operation.

    ", + "refs": { + } + }, + "AddJobFlowStepsOutput": { + "base": "

    The output for the AddJobFlowSteps operation.

    ", + "refs": { + } + }, + "AddTagsInput": { + "base": "

    This input identifies a cluster and a list of tags to attach.

    ", + "refs": { + } + }, + "AddTagsOutput": { + "base": "

    This output indicates the result of adding tags to a resource.

    ", + "refs": { + } + }, + "Application": { + "base": "

    An application is any Amazon or third-party software that you can add to the cluster. This structure contains a list of strings that indicates the software to use with the cluster and accepts a user argument list. Amazon EMR accepts and forwards the argument list to the corresponding installation script as bootstrap action argument. For more information, see Launch a Job Flow on the MapR Distribution for Hadoop. Currently supported values are:

    • \"mapr-m3\" - launch the job flow using MapR M3 Edition.
    • \"mapr-m5\" - launch the job flow using MapR M5 Edition.
    • \"mapr\" with the user arguments specifying \"--edition,m3\" or \"--edition,m5\" - launch the job flow using MapR M3 or M5 Edition, respectively.

    In Amazon EMR releases 4.0 and greater, the only accepted parameter is the application name. To pass arguments to applications, you supply a configuration for each application.

    ", + "refs": { + "ApplicationList$member": null + } + }, + "ApplicationList": { + "base": null, + "refs": { + "Cluster$Applications": "

    The applications installed on this cluster.

    ", + "RunJobFlowInput$Applications": "

    Amazon EMR releases 4.x or later.

    A list of applications for the cluster. Valid values are: \"Hadoop\", \"Hive\", \"Mahout\", \"Pig\", and \"Spark.\" They are case insensitive.

    " + } + }, + "Boolean": { + "base": null, + "refs": { + "Cluster$AutoTerminate": "

    Specifies whether the cluster should terminate after completing all steps.

    ", + "Cluster$TerminationProtected": "

    Indicates whether Amazon EMR will lock the cluster to prevent the EC2 instances from being terminated by an API call or user intervention, or in the event of a cluster error.

    ", + "Cluster$VisibleToAllUsers": "

    Indicates whether the job flow is visible to all IAM users of the AWS account associated with the job flow. If this value is set to true, all IAM users of that AWS account can view and manage the job flow if they have the proper policy permissions set. If this value is false, only the IAM user that created the cluster can view and manage it. This value can be changed using the SetVisibleToAllUsers action.

    ", + "JobFlowDetail$VisibleToAllUsers": "

    Specifies whether the job flow is visible to all IAM users of the AWS account associated with the job flow. If this value is set to true, all IAM users of that AWS account can view and (if they have the proper policy permissions set) manage the job flow. If it is set to false, only the IAM user that created the job flow can view and manage it. This value can be changed using the SetVisibleToAllUsers action.

    ", + "JobFlowInstancesConfig$KeepJobFlowAliveWhenNoSteps": "

    Specifies whether the job flow should be kept alive after completing all steps.

    ", + "JobFlowInstancesConfig$TerminationProtected": "

    Specifies whether to lock the job flow to prevent the Amazon EC2 instances from being terminated by API call, user intervention, or in the event of a job flow error.

    ", + "JobFlowInstancesDetail$KeepJobFlowAliveWhenNoSteps": "

    Specifies whether the job flow should terminate after completing all steps.

    ", + "JobFlowInstancesDetail$TerminationProtected": "

    Specifies whether the Amazon EC2 instances in the cluster are protected from termination by API calls, user intervention, or in the event of a job flow error.

    ", + "RunJobFlowInput$VisibleToAllUsers": "

    Whether the job flow is visible to all IAM users of the AWS account associated with the job flow. If this value is set to true, all IAM users of that AWS account can view and (if they have the proper policy permissions set) manage the job flow. If it is set to false, only the IAM user that created the job flow can view and manage it.

    ", + "SetTerminationProtectionInput$TerminationProtected": "

    A Boolean that indicates whether to protect the job flow and prevent the Amazon EC2 instances in the cluster from shutting down due to API calls, user intervention, or job-flow error.

    ", + "SetVisibleToAllUsersInput$VisibleToAllUsers": "

    Whether the specified job flows are visible to all IAM users of the AWS account associated with the job flow. If this value is set to True, all IAM users of that AWS account can view and, if they have the proper IAM policy permissions set, manage the job flows. If it is set to False, only the IAM user that created a job flow can view and manage it.

    " + } + }, + "BooleanObject": { + "base": null, + "refs": { + "EbsConfiguration$EbsOptimized": null, + "InstanceGroup$EbsOptimized": "

    If the instance group is EBS-optimized. An Amazon EBS–optimized instance uses an optimized configuration stack and provides additional, dedicated capacity for Amazon EBS I/O.

    " + } + }, + "BootstrapActionConfig": { + "base": "

    Configuration of a bootstrap action.

    ", + "refs": { + "BootstrapActionConfigList$member": null, + "BootstrapActionDetail$BootstrapActionConfig": "

    A description of the bootstrap action.

    " + } + }, + "BootstrapActionConfigList": { + "base": null, + "refs": { + "RunJobFlowInput$BootstrapActions": "

    A list of bootstrap actions that will be run before Hadoop is started on the cluster nodes.

    " + } + }, + "BootstrapActionDetail": { + "base": "

    Reports the configuration of a bootstrap action in a job flow.

    ", + "refs": { + "BootstrapActionDetailList$member": null + } + }, + "BootstrapActionDetailList": { + "base": null, + "refs": { + "JobFlowDetail$BootstrapActions": "

    A list of the bootstrap actions run by the job flow.

    " + } + }, + "Cluster": { + "base": "

    The detailed description of the cluster.

    ", + "refs": { + "DescribeClusterOutput$Cluster": "

    This output contains the details for the requested cluster.

    " + } + }, + "ClusterId": { + "base": null, + "refs": { + "Cluster$Id": "

    The unique identifier for the cluster.

    ", + "ClusterSummary$Id": "

    The unique identifier for the cluster.

    ", + "DescribeClusterInput$ClusterId": "

    The identifier of the cluster to describe.

    ", + "DescribeStepInput$ClusterId": "

    The identifier of the cluster with steps to describe.

    ", + "ListBootstrapActionsInput$ClusterId": "

    The cluster identifier for the bootstrap actions to list .

    ", + "ListInstanceGroupsInput$ClusterId": "

    The identifier of the cluster for which to list the instance groups.

    ", + "ListInstancesInput$ClusterId": "

    The identifier of the cluster for which to list the instances.

    ", + "ListStepsInput$ClusterId": "

    The identifier of the cluster for which to list the steps.

    " + } + }, + "ClusterState": { + "base": null, + "refs": { + "ClusterStateList$member": null, + "ClusterStatus$State": "

    The current state of the cluster.

    " + } + }, + "ClusterStateChangeReason": { + "base": "

    The reason that the cluster changed to its current state.

    ", + "refs": { + "ClusterStatus$StateChangeReason": "

    The reason for the cluster status change.

    " + } + }, + "ClusterStateChangeReasonCode": { + "base": null, + "refs": { + "ClusterStateChangeReason$Code": "

    The programmatic code for the state change reason.

    " + } + }, + "ClusterStateList": { + "base": null, + "refs": { + "ListClustersInput$ClusterStates": "

    The cluster state filters to apply when listing clusters.

    " + } + }, + "ClusterStatus": { + "base": "

    The detailed status of the cluster.

    ", + "refs": { + "Cluster$Status": "

    The current status details about the cluster.

    ", + "ClusterSummary$Status": "

    The details about the current status of the cluster.

    " + } + }, + "ClusterSummary": { + "base": "

    The summary description of the cluster.

    ", + "refs": { + "ClusterSummaryList$member": null + } + }, + "ClusterSummaryList": { + "base": null, + "refs": { + "ListClustersOutput$Clusters": "

    The list of clusters for the account based on the given filters.

    " + } + }, + "ClusterTimeline": { + "base": "

    Represents the timeline of the cluster's lifecycle.

    ", + "refs": { + "ClusterStatus$Timeline": "

    A timeline that represents the status of a cluster over the lifetime of the cluster.

    " + } + }, + "Command": { + "base": "

    An entity describing an executable that runs on a cluster.

    ", + "refs": { + "CommandList$member": null + } + }, + "CommandList": { + "base": null, + "refs": { + "ListBootstrapActionsOutput$BootstrapActions": "

    The bootstrap actions associated with the cluster .

    " + } + }, + "Configuration": { + "base": "

    Amazon EMR releases 4.x or later.

    Specifies a hardware and software configuration of the EMR cluster. This includes configurations for applications and software bundled with Amazon EMR. The Configuration object is a JSON object which is defined by a classification and a set of properties. Configurations can be nested, so a configuration may have its own Configuration objects listed.

    ", + "refs": { + "ConfigurationList$member": null + } + }, + "ConfigurationList": { + "base": null, + "refs": { + "Cluster$Configurations": "

    Amazon EMR releases 4.x or later.

    The list of Configurations supplied to the EMR cluster.

    ", + "Configuration$Configurations": "

    A list of configurations you apply to this configuration object.

    ", + "InstanceGroup$Configurations": "

    Amazon EMR releases 4.x or later.

    The list of configurations supplied for an EMR cluster instance group. You can specify a separate configuration for each instance group (master, core, and task).

    ", + "InstanceGroupConfig$Configurations": "

    Amazon EMR releases 4.x or later.

    The list of configurations supplied for an EMR cluster instance group. You can specify a separate configuration for each instance group (master, core, and task).

    ", + "RunJobFlowInput$Configurations": "

    Amazon EMR releases 4.x or later.

    The list of configurations supplied for the EMR cluster you are creating.

    " + } + }, + "Date": { + "base": null, + "refs": { + "ClusterTimeline$CreationDateTime": "

    The creation date and time of the cluster.

    ", + "ClusterTimeline$ReadyDateTime": "

    The date and time when the cluster was ready to execute steps.

    ", + "ClusterTimeline$EndDateTime": "

    The date and time when the cluster was terminated.

    ", + "DescribeJobFlowsInput$CreatedAfter": "

    Return only job flows created after this date and time.

    ", + "DescribeJobFlowsInput$CreatedBefore": "

    Return only job flows created before this date and time.

    ", + "InstanceGroupDetail$CreationDateTime": "

    The date/time the instance group was created.

    ", + "InstanceGroupDetail$StartDateTime": "

    The date/time the instance group was started.

    ", + "InstanceGroupDetail$ReadyDateTime": "

    The date/time the instance group was available to the cluster.

    ", + "InstanceGroupDetail$EndDateTime": "

    The date/time the instance group was terminated.

    ", + "InstanceGroupTimeline$CreationDateTime": "

    The creation date and time of the instance group.

    ", + "InstanceGroupTimeline$ReadyDateTime": "

    The date and time when the instance group became ready to perform tasks.

    ", + "InstanceGroupTimeline$EndDateTime": "

    The date and time when the instance group terminated.

    ", + "InstanceTimeline$CreationDateTime": "

    The creation date and time of the instance.

    ", + "InstanceTimeline$ReadyDateTime": "

    The date and time when the instance was ready to perform tasks.

    ", + "InstanceTimeline$EndDateTime": "

    The date and time when the instance was terminated.

    ", + "JobFlowExecutionStatusDetail$CreationDateTime": "

    The creation date and time of the job flow.

    ", + "JobFlowExecutionStatusDetail$StartDateTime": "

    The start date and time of the job flow.

    ", + "JobFlowExecutionStatusDetail$ReadyDateTime": "

    The date and time when the job flow was ready to start running bootstrap actions.

    ", + "JobFlowExecutionStatusDetail$EndDateTime": "

    The completion date and time of the job flow.

    ", + "ListClustersInput$CreatedAfter": "

    The creation date and time beginning value filter for listing clusters .

    ", + "ListClustersInput$CreatedBefore": "

    The creation date and time end value filter for listing clusters .

    ", + "StepExecutionStatusDetail$CreationDateTime": "

    The creation date and time of the step.

    ", + "StepExecutionStatusDetail$StartDateTime": "

    The start date and time of the step.

    ", + "StepExecutionStatusDetail$EndDateTime": "

    The completion date and time of the step.

    ", + "StepTimeline$CreationDateTime": "

    The date and time when the cluster step was created.

    ", + "StepTimeline$StartDateTime": "

    The date and time when the cluster step execution started.

    ", + "StepTimeline$EndDateTime": "

    The date and time when the cluster step execution completed or failed.

    " + } + }, + "DescribeClusterInput": { + "base": "

    This input determines which cluster to describe.

    ", + "refs": { + } + }, + "DescribeClusterOutput": { + "base": "

    This output contains the description of the cluster.

    ", + "refs": { + } + }, + "DescribeJobFlowsInput": { + "base": "

    The input for the DescribeJobFlows operation.

    ", + "refs": { + } + }, + "DescribeJobFlowsOutput": { + "base": "

    The output for the DescribeJobFlows operation.

    ", + "refs": { + } + }, + "DescribeStepInput": { + "base": "

    This input determines which step to describe.

    ", + "refs": { + } + }, + "DescribeStepOutput": { + "base": "

    This output contains the description of the cluster step.

    ", + "refs": { + } + }, + "EC2InstanceIdsList": { + "base": null, + "refs": { + "InstanceResizePolicy$InstancesToTerminate": "

    Specific list of instances to be terminated when shrinking an instance group.

    ", + "InstanceResizePolicy$InstancesToProtect": "

    Specific list of instances to be protected when shrinking an instance group.

    " + } + }, + "EC2InstanceIdsToTerminateList": { + "base": null, + "refs": { + "InstanceGroupModifyConfig$EC2InstanceIdsToTerminate": "

    The EC2 InstanceIds to terminate. Once you terminate the instances, the instance group will not return to its original requested size.

    " + } + }, + "EbsBlockDevice": { + "base": "

    Configuration of requested EBS block device associated with the instance group.

    ", + "refs": { + "EbsBlockDeviceList$member": null + } + }, + "EbsBlockDeviceConfig": { + "base": "

    Configuration of requested EBS block device associated with the instance group with count of volumes that will be associated to every instance.

    ", + "refs": { + "EbsBlockDeviceConfigList$member": null + } + }, + "EbsBlockDeviceConfigList": { + "base": null, + "refs": { + "EbsConfiguration$EbsBlockDeviceConfigs": null + } + }, + "EbsBlockDeviceList": { + "base": null, + "refs": { + "InstanceGroup$EbsBlockDevices": "

    The EBS block devices that are mapped to this instance group.

    " + } + }, + "EbsConfiguration": { + "base": null, + "refs": { + "InstanceGroupConfig$EbsConfiguration": "

    EBS configurations that will be attached to each Amazon EC2 instance in the instance group.

    " + } + }, + "EbsVolume": { + "base": "

    EBS block device that's attached to an EC2 instance.

    ", + "refs": { + "EbsVolumeList$member": null + } + }, + "EbsVolumeList": { + "base": null, + "refs": { + "Instance$EbsVolumes": "

    The list of EBS volumes that are attached to this instance.

    " + } + }, + "Ec2InstanceAttributes": { + "base": "

    Provides information about the EC2 instances in a cluster grouped by category. For example, key name, subnet ID, IAM instance profile, and so on.

    ", + "refs": { + "Cluster$Ec2InstanceAttributes": null + } + }, + "ErrorCode": { + "base": null, + "refs": { + "InvalidRequestException$ErrorCode": "

    The error code associated with the exception.

    " + } + }, + "ErrorMessage": { + "base": null, + "refs": { + "InternalServerException$Message": "

    The message associated with the exception.

    ", + "InvalidRequestException$Message": "

    The message associated with the exception.

    " + } + }, + "HadoopJarStepConfig": { + "base": "

    A job flow step consisting of a JAR file whose main function will be executed. The main function submits a job for Hadoop to execute and waits for the job to finish or fail.

    ", + "refs": { + "StepConfig$HadoopJarStep": "

    The JAR file used for the job flow step.

    " + } + }, + "HadoopStepConfig": { + "base": "

    A cluster step consisting of a JAR file whose main function will be executed. The main function submits a job for Hadoop to execute and waits for the job to finish or fail.

    ", + "refs": { + "Step$Config": "

    The Hadoop job configuration of the cluster step.

    ", + "StepSummary$Config": "

    The Hadoop job configuration of the cluster step.

    " + } + }, + "Instance": { + "base": "

    Represents an EC2 instance provisioned as part of cluster.

    ", + "refs": { + "InstanceList$member": null + } + }, + "InstanceGroup": { + "base": "

    This entity represents an instance group, which is a group of instances that have common purpose. For example, CORE instance group is used for HDFS.

    ", + "refs": { + "InstanceGroupList$member": null + } + }, + "InstanceGroupConfig": { + "base": "

    Configuration defining a new instance group.

    ", + "refs": { + "InstanceGroupConfigList$member": null + } + }, + "InstanceGroupConfigList": { + "base": null, + "refs": { + "AddInstanceGroupsInput$InstanceGroups": "

    Instance Groups to add.

    ", + "JobFlowInstancesConfig$InstanceGroups": "

    Configuration for the job flow's instance groups.

    " + } + }, + "InstanceGroupDetail": { + "base": "

    Detailed information about an instance group.

    ", + "refs": { + "InstanceGroupDetailList$member": null + } + }, + "InstanceGroupDetailList": { + "base": null, + "refs": { + "JobFlowInstancesDetail$InstanceGroups": "

    Details about the job flow's instance groups.

    " + } + }, + "InstanceGroupId": { + "base": null, + "refs": { + "InstanceGroup$Id": "

    The identifier of the instance group.

    ", + "ListInstancesInput$InstanceGroupId": "

    The identifier of the instance group for which to list the instances.

    " + } + }, + "InstanceGroupIdsList": { + "base": null, + "refs": { + "AddInstanceGroupsOutput$InstanceGroupIds": "

    Instance group IDs of the newly created instance groups.

    " + } + }, + "InstanceGroupList": { + "base": null, + "refs": { + "ListInstanceGroupsOutput$InstanceGroups": "

    The list of instance groups for the cluster and given filters.

    " + } + }, + "InstanceGroupModifyConfig": { + "base": "

    Modify an instance group size.

    ", + "refs": { + "InstanceGroupModifyConfigList$member": null + } + }, + "InstanceGroupModifyConfigList": { + "base": null, + "refs": { + "ModifyInstanceGroupsInput$InstanceGroups": "

    Instance groups to change.

    " + } + }, + "InstanceGroupState": { + "base": null, + "refs": { + "InstanceGroupDetail$State": "

    State of instance group. The following values are deprecated: STARTING, TERMINATED, and FAILED.

    ", + "InstanceGroupStatus$State": "

    The current state of the instance group.

    " + } + }, + "InstanceGroupStateChangeReason": { + "base": "

    The status change reason details for the instance group.

    ", + "refs": { + "InstanceGroupStatus$StateChangeReason": "

    The status change reason details for the instance group.

    " + } + }, + "InstanceGroupStateChangeReasonCode": { + "base": null, + "refs": { + "InstanceGroupStateChangeReason$Code": "

    The programmable code for the state change reason.

    " + } + }, + "InstanceGroupStatus": { + "base": "

    The details of the instance group status.

    ", + "refs": { + "InstanceGroup$Status": "

    The current status of the instance group.

    " + } + }, + "InstanceGroupTimeline": { + "base": "

    The timeline of the instance group lifecycle.

    ", + "refs": { + "InstanceGroupStatus$Timeline": "

    The timeline of the instance group status over time.

    " + } + }, + "InstanceGroupType": { + "base": null, + "refs": { + "InstanceGroup$InstanceGroupType": "

    The type of the instance group. Valid values are MASTER, CORE or TASK.

    ", + "InstanceGroupTypeList$member": null + } + }, + "InstanceGroupTypeList": { + "base": null, + "refs": { + "ListInstancesInput$InstanceGroupTypes": "

    The type of instance group for which to list the instances.

    " + } + }, + "InstanceId": { + "base": null, + "refs": { + "EC2InstanceIdsList$member": null, + "EC2InstanceIdsToTerminateList$member": null, + "Instance$Id": "

    The unique identifier for the instance in Amazon EMR.

    ", + "Instance$Ec2InstanceId": "

    The unique identifier of the instance in Amazon EC2.

    " + } + }, + "InstanceList": { + "base": null, + "refs": { + "ListInstancesOutput$Instances": "

    The list of instances for the cluster and given filters.

    " + } + }, + "InstanceResizePolicy": { + "base": "

    Custom policy for requesting termination protection or termination of specific instances when shrinking an instance group.

    ", + "refs": { + "ShrinkPolicy$InstanceResizePolicy": "

    Custom policy for requesting termination protection or termination of specific instances when shrinking an instance group.

    " + } + }, + "InstanceRoleType": { + "base": null, + "refs": { + "InstanceGroupConfig$InstanceRole": "

    The role of the instance group in the cluster.

    ", + "InstanceGroupDetail$InstanceRole": "

    Instance group role in the cluster

    " + } + }, + "InstanceState": { + "base": null, + "refs": { + "InstanceStateList$member": null, + "InstanceStatus$State": "

    The current state of the instance.

    " + } + }, + "InstanceStateChangeReason": { + "base": "

    The details of the status change reason for the instance.

    ", + "refs": { + "InstanceStatus$StateChangeReason": "

    The details of the status change reason for the instance.

    " + } + }, + "InstanceStateChangeReasonCode": { + "base": null, + "refs": { + "InstanceStateChangeReason$Code": "

    The programmable code for the state change reason.

    " + } + }, + "InstanceStateList": { + "base": null, + "refs": { + "ListInstancesInput$InstanceStates": "

    A list of instance states that will filter the instances returned with this request.

    " + } + }, + "InstanceStatus": { + "base": "

    The instance status details.

    ", + "refs": { + "Instance$Status": "

    The current status of the instance.

    " + } + }, + "InstanceTimeline": { + "base": "

    The timeline of the instance lifecycle.

    ", + "refs": { + "InstanceStatus$Timeline": "

    The timeline of the instance status over time.

    " + } + }, + "InstanceType": { + "base": null, + "refs": { + "InstanceGroup$InstanceType": "

    The EC2 instance type for all instances in the instance group.

    ", + "InstanceGroupConfig$InstanceType": "

    The Amazon EC2 instance type for all instances in the instance group.

    ", + "InstanceGroupDetail$InstanceType": "

    Amazon EC2 Instance type.

    ", + "JobFlowInstancesConfig$MasterInstanceType": "

    The EC2 instance type of the master node.

    ", + "JobFlowInstancesConfig$SlaveInstanceType": "

    The EC2 instance type of the slave nodes.

    ", + "JobFlowInstancesDetail$MasterInstanceType": "

    The Amazon EC2 master node instance type.

    ", + "JobFlowInstancesDetail$SlaveInstanceType": "

    The Amazon EC2 slave node instance type.

    " + } + }, + "Integer": { + "base": null, + "refs": { + "Cluster$NormalizedInstanceHours": "

    An approximation of the cost of the job flow, represented in m1.small/hours. This value is incremented one time for every hour an m1.small instance runs. Larger instances are weighted more, so an EC2 instance that is roughly four times more expensive would result in the normalized instance hours being incremented by four. This result is only an approximation and does not reflect the actual billing rate.

    ", + "ClusterSummary$NormalizedInstanceHours": "

    An approximation of the cost of the job flow, represented in m1.small/hours. This value is incremented one time for every hour an m1.small instance runs. Larger instances are weighted more, so an EC2 instance that is roughly four times more expensive would result in the normalized instance hours being incremented by four. This result is only an approximation and does not reflect the actual billing rate.

    ", + "EbsBlockDeviceConfig$VolumesPerInstance": "

    Number of EBS volumes with specific volume configuration, that will be associated with every instance in the instance group

    ", + "InstanceGroup$RequestedInstanceCount": "

    The target number of instances for the instance group.

    ", + "InstanceGroup$RunningInstanceCount": "

    The number of instances currently running in this instance group.

    ", + "InstanceGroupConfig$InstanceCount": "

    Target number of instances for the instance group.

    ", + "InstanceGroupDetail$InstanceRequestCount": "

    Target number of instances to run in the instance group.

    ", + "InstanceGroupDetail$InstanceRunningCount": "

    Actual count of running instances.

    ", + "InstanceGroupModifyConfig$InstanceCount": "

    Target size for the instance group.

    ", + "InstanceResizePolicy$InstanceTerminationTimeout": "

    Decommissioning timeout override for the specific list of instances to be terminated.

    ", + "JobFlowInstancesConfig$InstanceCount": "

    The number of Amazon EC2 instances used to execute the job flow.

    ", + "JobFlowInstancesDetail$InstanceCount": "

    The number of Amazon EC2 instances in the cluster. If the value is 1, the same instance serves as both the master and slave node. If the value is greater than 1, one instance is the master node and all others are slave nodes.

    ", + "JobFlowInstancesDetail$NormalizedInstanceHours": "

    An approximation of the cost of the job flow, represented in m1.small/hours. This value is incremented once for every hour an m1.small runs. Larger instances are weighted more, so an Amazon EC2 instance that is roughly four times more expensive would result in the normalized instance hours being incremented by four. This result is only an approximation and does not reflect the actual billing rate.

    ", + "ShrinkPolicy$DecommissionTimeout": "

    The desired timeout for decommissioning an instance. Overrides the default YARN decommissioning timeout.

    ", + "VolumeSpecification$Iops": "

    The number of I/O operations per second (IOPS) that the volume supports.

    ", + "VolumeSpecification$SizeInGB": "

    The volume size, in gibibytes (GiB). This can be a number from 1 – 1024. If the volume type is EBS-optimized, the minimum value is 10.

    " + } + }, + "InternalServerError": { + "base": "

    Indicates that an error occurred while processing the request and that the request was not completed.

    ", + "refs": { + } + }, + "InternalServerException": { + "base": "

    This exception occurs when there is an internal failure in the EMR service.

    ", + "refs": { + } + }, + "InvalidRequestException": { + "base": "

    This exception occurs when there is something wrong with user input.

    ", + "refs": { + } + }, + "JobFlowDetail": { + "base": "

    A description of a job flow.

    ", + "refs": { + "JobFlowDetailList$member": null + } + }, + "JobFlowDetailList": { + "base": null, + "refs": { + "DescribeJobFlowsOutput$JobFlows": "

    A list of job flows matching the parameters supplied.

    " + } + }, + "JobFlowExecutionState": { + "base": "

    The type of instance.

    A small instance

    A large instance

    ", + "refs": { + "JobFlowExecutionStateList$member": null, + "JobFlowExecutionStatusDetail$State": "

    The state of the job flow.

    " + } + }, + "JobFlowExecutionStateList": { + "base": null, + "refs": { + "DescribeJobFlowsInput$JobFlowStates": "

    Return only job flows whose state is contained in this list.

    " + } + }, + "JobFlowExecutionStatusDetail": { + "base": "

    Describes the status of the job flow.

    ", + "refs": { + "JobFlowDetail$ExecutionStatusDetail": "

    Describes the execution status of the job flow.

    " + } + }, + "JobFlowInstancesConfig": { + "base": "

    A description of the Amazon EC2 instance running the job flow. A valid JobFlowInstancesConfig must contain at least InstanceGroups, which is the recommended configuration. However, a valid alternative is to have MasterInstanceType, SlaveInstanceType, and InstanceCount (all three must be present).

    ", + "refs": { + "RunJobFlowInput$Instances": "

    A specification of the number and type of Amazon EC2 instances on which to run the job flow.

    " + } + }, + "JobFlowInstancesDetail": { + "base": "

    Specify the type of Amazon EC2 instances to run the job flow on.

    ", + "refs": { + "JobFlowDetail$Instances": "

    Describes the Amazon EC2 instances of the job flow.

    " + } + }, + "KeyValue": { + "base": "

    A key value pair.

    ", + "refs": { + "KeyValueList$member": null + } + }, + "KeyValueList": { + "base": null, + "refs": { + "HadoopJarStepConfig$Properties": "

    A list of Java properties that are set when the step runs. You can use these properties to pass key value pairs to your main function.

    " + } + }, + "ListBootstrapActionsInput": { + "base": "

    This input determines which bootstrap actions to retrieve.

    ", + "refs": { + } + }, + "ListBootstrapActionsOutput": { + "base": "

    This output contains the boostrap actions detail .

    ", + "refs": { + } + }, + "ListClustersInput": { + "base": "

    This input determines how the ListClusters action filters the list of clusters that it returns.

    ", + "refs": { + } + }, + "ListClustersOutput": { + "base": "

    This contains a ClusterSummaryList with the cluster details; for example, the cluster IDs, names, and status.

    ", + "refs": { + } + }, + "ListInstanceGroupsInput": { + "base": "

    This input determines which instance groups to retrieve.

    ", + "refs": { + } + }, + "ListInstanceGroupsOutput": { + "base": "

    This input determines which instance groups to retrieve.

    ", + "refs": { + } + }, + "ListInstancesInput": { + "base": "

    This input determines which instances to list.

    ", + "refs": { + } + }, + "ListInstancesOutput": { + "base": "

    This output contains the list of instances.

    ", + "refs": { + } + }, + "ListStepsInput": { + "base": "

    This input determines which steps to list.

    ", + "refs": { + } + }, + "ListStepsOutput": { + "base": "

    This output contains the list of steps returned in reverse order. This means that the last step is the first element in the list.

    ", + "refs": { + } + }, + "Marker": { + "base": null, + "refs": { + "ListBootstrapActionsInput$Marker": "

    The pagination token that indicates the next set of results to retrieve .

    ", + "ListBootstrapActionsOutput$Marker": "

    The pagination token that indicates the next set of results to retrieve .

    ", + "ListClustersInput$Marker": "

    The pagination token that indicates the next set of results to retrieve.

    ", + "ListClustersOutput$Marker": "

    The pagination token that indicates the next set of results to retrieve.

    ", + "ListInstanceGroupsInput$Marker": "

    The pagination token that indicates the next set of results to retrieve.

    ", + "ListInstanceGroupsOutput$Marker": "

    The pagination token that indicates the next set of results to retrieve.

    ", + "ListInstancesInput$Marker": "

    The pagination token that indicates the next set of results to retrieve.

    ", + "ListInstancesOutput$Marker": "

    The pagination token that indicates the next set of results to retrieve.

    ", + "ListStepsInput$Marker": "

    The pagination token that indicates the next set of results to retrieve.

    ", + "ListStepsOutput$Marker": "

    The pagination token that indicates the next set of results to retrieve.

    " + } + }, + "MarketType": { + "base": null, + "refs": { + "InstanceGroup$Market": "

    The marketplace to provision instances for this group. Valid values are ON_DEMAND or SPOT.

    ", + "InstanceGroupConfig$Market": "

    Market type of the Amazon EC2 instances used to create a cluster node.

    ", + "InstanceGroupDetail$Market": "

    Market type of the Amazon EC2 instances used to create a cluster node.

    " + } + }, + "ModifyInstanceGroupsInput": { + "base": "

    Change the size of some instance groups.

    ", + "refs": { + } + }, + "NewSupportedProductsList": { + "base": null, + "refs": { + "RunJobFlowInput$NewSupportedProducts": "

    For Amazon EMR releases 3.x and 2.x. For Amazon EMR releases 4.x and greater, use Applications.

    A list of strings that indicates third-party software to use with the job flow that accepts a user argument list. EMR accepts and forwards the argument list to the corresponding installation script as bootstrap action arguments. For more information, see Launch a Job Flow on the MapR Distribution for Hadoop. Currently supported values are:

    • \"mapr-m3\" - launch the cluster using MapR M3 Edition.
    • \"mapr-m5\" - launch the cluster using MapR M5 Edition.
    • \"mapr\" with the user arguments specifying \"--edition,m3\" or \"--edition,m5\" - launch the job flow using MapR M3 or M5 Edition respectively.
    • \"mapr-m7\" - launch the cluster using MapR M7 Edition.
    • \"hunk\" - launch the cluster with the Hunk Big Data Analtics Platform.
    • \"hue\"- launch the cluster with Hue installed.
    • \"spark\" - launch the cluster with Apache Spark installed.
    • \"ganglia\" - launch the cluster with the Ganglia Monitoring System installed.
    " + } + }, + "PlacementType": { + "base": "

    The Amazon EC2 location for the job flow.

    ", + "refs": { + "JobFlowInstancesConfig$Placement": "

    The Availability Zone the job flow will run in.

    ", + "JobFlowInstancesDetail$Placement": "

    The Amazon EC2 Availability Zone for the job flow.

    " + } + }, + "RemoveTagsInput": { + "base": "

    This input identifies a cluster and a list of tags to remove.

    ", + "refs": { + } + }, + "RemoveTagsOutput": { + "base": "

    This output indicates the result of removing tags from a resource.

    ", + "refs": { + } + }, + "ResourceId": { + "base": null, + "refs": { + "AddTagsInput$ResourceId": "

    The Amazon EMR resource identifier to which tags will be added. This value must be a cluster identifier.

    ", + "RemoveTagsInput$ResourceId": "

    The Amazon EMR resource identifier from which tags will be removed. This value must be a cluster identifier.

    " + } + }, + "RunJobFlowInput": { + "base": "

    Input to the RunJobFlow operation.

    ", + "refs": { + } + }, + "RunJobFlowOutput": { + "base": "

    The result of the RunJobFlow operation.

    ", + "refs": { + } + }, + "ScriptBootstrapActionConfig": { + "base": "

    Configuration of the script to run during a bootstrap action.

    ", + "refs": { + "BootstrapActionConfig$ScriptBootstrapAction": "

    The script run by the bootstrap action.

    " + } + }, + "SecurityGroupsList": { + "base": null, + "refs": { + "JobFlowInstancesConfig$AdditionalMasterSecurityGroups": "

    A list of additional Amazon EC2 security group IDs for the master node.

    ", + "JobFlowInstancesConfig$AdditionalSlaveSecurityGroups": "

    A list of additional Amazon EC2 security group IDs for the slave nodes.

    " + } + }, + "SetTerminationProtectionInput": { + "base": "

    The input argument to the TerminationProtection operation.

    ", + "refs": { + } + }, + "SetVisibleToAllUsersInput": { + "base": "

    The input to the SetVisibleToAllUsers action.

    ", + "refs": { + } + }, + "ShrinkPolicy": { + "base": "

    Policy for customizing shrink operations. Allows configuration of decommissioning timeout and targeted instance shrinking.

    ", + "refs": { + "InstanceGroup$ShrinkPolicy": "

    Policy for customizing shrink operations.

    ", + "InstanceGroupModifyConfig$ShrinkPolicy": "

    Policy for customizing shrink operations.

    " + } + }, + "Step": { + "base": "

    This represents a step in a cluster.

    ", + "refs": { + "DescribeStepOutput$Step": "

    The step details for the requested step identifier.

    " + } + }, + "StepConfig": { + "base": "

    Specification of a job flow step.

    ", + "refs": { + "StepConfigList$member": null, + "StepDetail$StepConfig": "

    The step configuration.

    " + } + }, + "StepConfigList": { + "base": null, + "refs": { + "AddJobFlowStepsInput$Steps": "

    A list of StepConfig to be executed by the job flow.

    ", + "RunJobFlowInput$Steps": "

    A list of steps to be executed by the job flow.

    " + } + }, + "StepDetail": { + "base": "

    Combines the execution state and configuration of a step.

    ", + "refs": { + "StepDetailList$member": null + } + }, + "StepDetailList": { + "base": null, + "refs": { + "JobFlowDetail$Steps": "

    A list of steps run by the job flow.

    " + } + }, + "StepExecutionState": { + "base": null, + "refs": { + "StepExecutionStatusDetail$State": "

    The state of the job flow step.

    " + } + }, + "StepExecutionStatusDetail": { + "base": "

    The execution state of a step.

    ", + "refs": { + "StepDetail$ExecutionStatusDetail": "

    The description of the step status.

    " + } + }, + "StepId": { + "base": null, + "refs": { + "DescribeStepInput$StepId": "

    The identifier of the step to describe.

    ", + "Step$Id": "

    The identifier of the cluster step.

    ", + "StepSummary$Id": "

    The identifier of the cluster step.

    " + } + }, + "StepIdsList": { + "base": null, + "refs": { + "AddJobFlowStepsOutput$StepIds": "

    The identifiers of the list of steps added to the job flow.

    " + } + }, + "StepState": { + "base": null, + "refs": { + "StepStateList$member": null, + "StepStatus$State": "

    The execution state of the cluster step.

    " + } + }, + "StepStateChangeReason": { + "base": "

    The details of the step state change reason.

    ", + "refs": { + "StepStatus$StateChangeReason": "

    The reason for the step execution status change.

    " + } + }, + "StepStateChangeReasonCode": { + "base": null, + "refs": { + "StepStateChangeReason$Code": "

    The programmable code for the state change reason. Note: Currently, the service provides no code for the state change.

    " + } + }, + "StepStateList": { + "base": null, + "refs": { + "ListStepsInput$StepStates": "

    The filter to limit the step list based on certain states.

    " + } + }, + "StepStatus": { + "base": "

    The execution status details of the cluster step.

    ", + "refs": { + "Step$Status": "

    The current execution status details of the cluster step.

    ", + "StepSummary$Status": "

    The current execution status details of the cluster step.

    " + } + }, + "StepSummary": { + "base": "

    The summary of the cluster step.

    ", + "refs": { + "StepSummaryList$member": null + } + }, + "StepSummaryList": { + "base": null, + "refs": { + "ListStepsOutput$Steps": "

    The filtered list of steps for the cluster.

    " + } + }, + "StepTimeline": { + "base": "

    The timeline of the cluster step lifecycle.

    ", + "refs": { + "StepStatus$Timeline": "

    The timeline of the cluster step status over time.

    " + } + }, + "String": { + "base": null, + "refs": { + "Application$Name": "

    The name of the application.

    ", + "Application$Version": "

    The version of the application.

    ", + "Cluster$Name": "

    The name of the cluster.

    ", + "Cluster$LogUri": "

    The path to the Amazon S3 location where logs for this cluster are stored.

    ", + "Cluster$RequestedAmiVersion": "

    The AMI version requested for this cluster.

    ", + "Cluster$RunningAmiVersion": "

    The AMI version running on this cluster.

    ", + "Cluster$ReleaseLabel": "

    The release label for the Amazon EMR release. For Amazon EMR 3.x and 2.x AMIs, use amiVersion instead instead of ReleaseLabel.

    ", + "Cluster$ServiceRole": "

    The IAM role that will be assumed by the Amazon EMR service to access AWS resources on your behalf.

    ", + "Cluster$MasterPublicDnsName": "

    The public DNS name of the master EC2 instance.

    ", + "ClusterStateChangeReason$Message": "

    The descriptive message for the state change reason.

    ", + "ClusterSummary$Name": "

    The name of the cluster.

    ", + "Command$Name": "

    The name of the command.

    ", + "Command$ScriptPath": "

    The Amazon S3 location of the command script.

    ", + "Configuration$Classification": "

    The classification of a configuration. For more information see, Amazon EMR Configurations.

    ", + "EbsBlockDevice$Device": "

    The device name that is exposed to the instance, such as /dev/sdh.

    ", + "EbsVolume$Device": "

    The device name that is exposed to the instance, such as /dev/sdh.

    ", + "EbsVolume$VolumeId": "

    The volume identifier of the EBS volume.

    ", + "Ec2InstanceAttributes$Ec2KeyName": "

    The name of the Amazon EC2 key pair to use when connecting with SSH into the master node as a user named \"hadoop\".

    ", + "Ec2InstanceAttributes$Ec2SubnetId": "

    To launch the job flow in Amazon VPC, set this parameter to the identifier of the Amazon VPC subnet where you want the job flow to launch. If you do not specify this value, the job flow is launched in the normal AWS cloud, outside of a VPC.

    Amazon VPC currently does not support cluster compute quadruple extra large (cc1.4xlarge) instances. Thus, you cannot specify the cc1.4xlarge instance type for nodes of a job flow launched in a VPC.

    ", + "Ec2InstanceAttributes$Ec2AvailabilityZone": "

    The Availability Zone in which the cluster will run.

    ", + "Ec2InstanceAttributes$IamInstanceProfile": "

    The IAM role that was specified when the job flow was launched. The EC2 instances of the job flow assume this role.

    ", + "Ec2InstanceAttributes$EmrManagedMasterSecurityGroup": "

    The identifier of the Amazon EC2 security group for the master node.

    ", + "Ec2InstanceAttributes$EmrManagedSlaveSecurityGroup": "

    The identifier of the Amazon EC2 security group for the slave nodes.

    ", + "Ec2InstanceAttributes$ServiceAccessSecurityGroup": "

    The identifier of the Amazon EC2 security group for the Amazon EMR service to access clusters in VPC private subnets.

    ", + "HadoopStepConfig$Jar": "

    The path to the JAR file that runs during the step.

    ", + "HadoopStepConfig$MainClass": "

    The name of the main class in the specified Java file. If not specified, the JAR file should specify a main class in its manifest file.

    ", + "Instance$PublicDnsName": "

    The public DNS name of the instance.

    ", + "Instance$PublicIpAddress": "

    The public IP address of the instance.

    ", + "Instance$PrivateDnsName": "

    The private DNS name of the instance.

    ", + "Instance$PrivateIpAddress": "

    The private IP address of the instance.

    ", + "Instance$InstanceGroupId": "

    The identifier of the instance group to which this instance belongs.

    ", + "InstanceGroup$Name": "

    The name of the instance group.

    ", + "InstanceGroup$BidPrice": "

    The bid price for each EC2 instance in the instance group when launching nodes as Spot Instances, expressed in USD.

    ", + "InstanceGroupStateChangeReason$Message": "

    The status change reason description.

    ", + "InstanceStateChangeReason$Message": "

    The status change reason description.

    ", + "Step$Name": "

    The name of the cluster step.

    ", + "StepStateChangeReason$Message": "

    The descriptive message for the state change reason.

    ", + "StepSummary$Name": "

    The name of the cluster step.

    ", + "StringList$member": null, + "StringMap$key": null, + "StringMap$value": null, + "Tag$Key": "

    A user-defined key, which is the minimum required information for a valid tag. For more information, see Tagging Amazon EMR Resources.

    ", + "Tag$Value": "

    A user-defined value, which is optional in a tag. For more information, see Tagging Amazon EMR Resources.

    ", + "VolumeSpecification$VolumeType": "

    The volume type. Volume types supported are gp2, io1, standard.

    " + } + }, + "StringList": { + "base": null, + "refs": { + "Application$Args": "

    Arguments for Amazon EMR to pass to the application.

    ", + "Command$Args": "

    Arguments for Amazon EMR to pass to the command for execution.

    ", + "Ec2InstanceAttributes$AdditionalMasterSecurityGroups": "

    A list of additional Amazon EC2 security group IDs for the master node.

    ", + "Ec2InstanceAttributes$AdditionalSlaveSecurityGroups": "

    A list of additional Amazon EC2 security group IDs for the slave nodes.

    ", + "HadoopStepConfig$Args": "

    The list of command line arguments to pass to the JAR file's main function for execution.

    ", + "RemoveTagsInput$TagKeys": "

    A list of tag keys to remove from a resource.

    " + } + }, + "StringMap": { + "base": null, + "refs": { + "Application$AdditionalInfo": "

    This option is for advanced users only. This is meta information about third-party applications that third-party vendors use for testing purposes.

    ", + "Configuration$Properties": "

    A set of properties supplied to the Configuration object.

    ", + "HadoopStepConfig$Properties": "

    The list of Java properties that are set when the step runs. You can use these properties to pass key value pairs to your main function.

    " + } + }, + "SupportedProductConfig": { + "base": "

    The list of supported product configurations which allow user-supplied arguments. EMR accepts these arguments and forwards them to the corresponding installation script as bootstrap action arguments.

    ", + "refs": { + "NewSupportedProductsList$member": null + } + }, + "SupportedProductsList": { + "base": null, + "refs": { + "JobFlowDetail$SupportedProducts": "

    A list of strings set by third party software when the job flow is launched. If you are not using third party software to manage the job flow this value is empty.

    ", + "RunJobFlowInput$SupportedProducts": "

    For Amazon EMR releases 3.x and 2.x. For Amazon EMR releases 4.x and greater, use Applications.

    A list of strings that indicates third-party software to use with the job flow. For more information, go to Use Third Party Applications with Amazon EMR. Currently supported values are:

    • \"mapr-m3\" - launch the job flow using MapR M3 Edition.
    • \"mapr-m5\" - launch the job flow using MapR M5 Edition.
    " + } + }, + "Tag": { + "base": "

    A key/value pair containing user-defined metadata that you can associate with an Amazon EMR resource. Tags make it easier to associate clusters in various ways, such as grouping clu\\ sters to track your Amazon EMR resource allocation costs. For more information, see Tagging Amazon EMR Resources.

    ", + "refs": { + "TagList$member": null + } + }, + "TagList": { + "base": null, + "refs": { + "AddTagsInput$Tags": "

    A list of tags to associate with a cluster and propagate to Amazon EC2 instances. Tags are user-defined key/value pairs that consist of a required key string with a maximum of 128 characters, and an optional value string with a maximum of 256 characters.

    ", + "Cluster$Tags": "

    A list of tags associated with a cluster.

    ", + "RunJobFlowInput$Tags": "

    A list of tags to associate with a cluster and propagate to Amazon EC2 instances.

    " + } + }, + "TerminateJobFlowsInput": { + "base": "

    Input to the TerminateJobFlows operation.

    ", + "refs": { + } + }, + "VolumeSpecification": { + "base": "

    EBS volume specifications such as volume type, IOPS, and size(GiB) that will be requested for the EBS volume attached to an EC2 instance in the cluster.

    ", + "refs": { + "EbsBlockDevice$VolumeSpecification": "

    EBS volume specifications such as volume type, IOPS, and size(GiB) that will be requested for the EBS volume attached to an EC2 instance in the cluster.

    ", + "EbsBlockDeviceConfig$VolumeSpecification": "

    EBS volume specifications such as volume type, IOPS, and size(GiB) that will be requested for the EBS volume attached to an EC2 instance in the cluster.

    " + } + }, + "XmlString": { + "base": null, + "refs": { + "HadoopJarStepConfig$Jar": "

    A path to a JAR file run during the step.

    ", + "HadoopJarStepConfig$MainClass": "

    The name of the main class in the specified Java file. If not specified, the JAR file should specify a Main-Class in its manifest file.

    ", + "InstanceGroupDetail$LastStateChangeReason": "

    Details regarding the state of the instance group.

    ", + "JobFlowDetail$LogUri": "

    The location in Amazon S3 where log files for the job are stored.

    ", + "JobFlowDetail$JobFlowRole": "

    The IAM role that was specified when the job flow was launched. The EC2 instances of the job flow assume this role.

    ", + "JobFlowDetail$ServiceRole": "

    The IAM role that will be assumed by the Amazon EMR service to access AWS resources on your behalf.

    ", + "JobFlowExecutionStatusDetail$LastStateChangeReason": "

    Description of the job flow last changed state.

    ", + "JobFlowInstancesDetail$MasterPublicDnsName": "

    The DNS name of the master node.

    ", + "JobFlowInstancesDetail$MasterInstanceId": "

    The Amazon EC2 instance identifier of the master node.

    ", + "KeyValue$Key": "

    The unique identifier of a key value pair.

    ", + "KeyValue$Value": "

    The value part of the identified key.

    ", + "PlacementType$AvailabilityZone": "

    The Amazon EC2 Availability Zone for the job flow.

    ", + "RunJobFlowInput$LogUri": "

    The location in Amazon S3 to write the log files of the job flow. If a value is not provided, logs are not created.

    ", + "RunJobFlowInput$AdditionalInfo": "

    A JSON string for selecting additional features.

    ", + "RunJobFlowInput$JobFlowRole": "

    Also called instance profile and EC2 role. An IAM role for an EMR cluster. The EC2 instances of the cluster assume this role. The default role is EMR_EC2_DefaultRole. In order to use the default role, you must have already created it using the CLI or console.

    ", + "RunJobFlowInput$ServiceRole": "

    The IAM role that will be assumed by the Amazon EMR service to access AWS resources on your behalf.

    ", + "ScriptBootstrapActionConfig$Path": "

    Location of the script to run during a bootstrap action. Can be either a location in Amazon S3 or on a local file system.

    ", + "StepExecutionStatusDetail$LastStateChangeReason": "

    A description of the step's current state.

    ", + "XmlStringList$member": null + } + }, + "XmlStringList": { + "base": null, + "refs": { + "DescribeJobFlowsInput$JobFlowIds": "

    Return only job flows whose job flow ID is contained in this list.

    ", + "HadoopJarStepConfig$Args": "

    A list of command line arguments passed to the JAR file's main function when executed.

    ", + "ListStepsInput$StepIds": "

    The filter to limit the step list based on the identifier of the steps.

    ", + "ScriptBootstrapActionConfig$Args": "

    A list of command line arguments to pass to the bootstrap action script.

    ", + "SetTerminationProtectionInput$JobFlowIds": "

    A list of strings that uniquely identify the job flows to protect. This identifier is returned by RunJobFlow and can also be obtained from DescribeJobFlows .

    ", + "SetVisibleToAllUsersInput$JobFlowIds": "

    Identifiers of the job flows to receive the new visibility setting.

    ", + "SupportedProductConfig$Args": "

    The list of user-supplied arguments.

    ", + "TerminateJobFlowsInput$JobFlowIds": "

    A list of job flows to be shutdown.

    " + } + }, + "XmlStringMaxLen256": { + "base": null, + "refs": { + "AddInstanceGroupsInput$JobFlowId": "

    Job flow in which to add the instance groups.

    ", + "AddInstanceGroupsOutput$JobFlowId": "

    The job flow ID in which the instance groups are added.

    ", + "AddJobFlowStepsInput$JobFlowId": "

    A string that uniquely identifies the job flow. This identifier is returned by RunJobFlow and can also be obtained from ListClusters.

    ", + "BootstrapActionConfig$Name": "

    The name of the bootstrap action.

    ", + "InstanceGroupConfig$Name": "

    Friendly name given to the instance group.

    ", + "InstanceGroupConfig$BidPrice": "

    Bid price for each Amazon EC2 instance in the instance group when launching nodes as Spot Instances, expressed in USD.

    ", + "InstanceGroupDetail$InstanceGroupId": "

    Unique identifier for the instance group.

    ", + "InstanceGroupDetail$Name": "

    Friendly name for the instance group.

    ", + "InstanceGroupDetail$BidPrice": "

    Bid price for EC2 Instances when launching nodes as Spot Instances, expressed in USD.

    ", + "InstanceGroupIdsList$member": null, + "InstanceGroupModifyConfig$InstanceGroupId": "

    Unique ID of the instance group to expand or shrink.

    ", + "JobFlowDetail$JobFlowId": "

    The job flow identifier.

    ", + "JobFlowDetail$Name": "

    The name of the job flow.

    ", + "JobFlowDetail$AmiVersion": "

    The version of the AMI used to initialize Amazon EC2 instances in the job flow. For a list of AMI versions currently supported by Amazon ElasticMapReduce, go to AMI Versions Supported in Elastic MapReduce in the Amazon Elastic MapReduce Developer Guide.

    ", + "JobFlowInstancesConfig$Ec2KeyName": "

    The name of the Amazon EC2 key pair that can be used to ssh to the master node as the user called \"hadoop.\"

    ", + "JobFlowInstancesConfig$HadoopVersion": "

    The Hadoop version for the job flow. Valid inputs are \"0.18\" (deprecated), \"0.20\" (deprecated), \"0.20.205\" (deprecated), \"1.0.3\", \"2.2.0\", or \"2.4.0\". If you do not set this value, the default of 0.18 is used, unless the AmiVersion parameter is set in the RunJobFlow call, in which case the default version of Hadoop for that AMI version is used.

    ", + "JobFlowInstancesConfig$Ec2SubnetId": "

    To launch the job flow in Amazon Virtual Private Cloud (Amazon VPC), set this parameter to the identifier of the Amazon VPC subnet where you want the job flow to launch. If you do not specify this value, the job flow is launched in the normal Amazon Web Services cloud, outside of an Amazon VPC.

    Amazon VPC currently does not support cluster compute quadruple extra large (cc1.4xlarge) instances. Thus you cannot specify the cc1.4xlarge instance type for nodes of a job flow launched in a Amazon VPC.

    ", + "JobFlowInstancesConfig$EmrManagedMasterSecurityGroup": "

    The identifier of the Amazon EC2 security group for the master node.

    ", + "JobFlowInstancesConfig$EmrManagedSlaveSecurityGroup": "

    The identifier of the Amazon EC2 security group for the slave nodes.

    ", + "JobFlowInstancesConfig$ServiceAccessSecurityGroup": "

    The identifier of the Amazon EC2 security group for the Amazon EMR service to access clusters in VPC private subnets.

    ", + "JobFlowInstancesDetail$Ec2KeyName": "

    The name of an Amazon EC2 key pair that can be used to ssh to the master node of job flow.

    ", + "JobFlowInstancesDetail$Ec2SubnetId": "

    For job flows launched within Amazon Virtual Private Cloud, this value specifies the identifier of the subnet where the job flow was launched.

    ", + "JobFlowInstancesDetail$HadoopVersion": "

    The Hadoop version for the job flow.

    ", + "RunJobFlowInput$Name": "

    The name of the job flow.

    ", + "RunJobFlowInput$AmiVersion": "

    For Amazon EMR releases 3.x and 2.x. For Amazon EMR releases 4.x and greater, use ReleaseLabel.

    The version of the Amazon Machine Image (AMI) to use when launching Amazon EC2 instances in the job flow. The following values are valid:

    • The version number of the AMI to use, for example, \"2.0.\"

    If the AMI supports multiple versions of Hadoop (for example, AMI 1.0 supports both Hadoop 0.18 and 0.20) you can use the JobFlowInstancesConfig HadoopVersion parameter to modify the version of Hadoop from the defaults shown above.

    For details about the AMI versions currently supported by Amazon Elastic MapReduce, go to AMI Versions Supported in Elastic MapReduce in the Amazon Elastic MapReduce Developer's Guide.

    ", + "RunJobFlowInput$ReleaseLabel": "

    Amazon EMR releases 4.x or later.

    The release label for the Amazon EMR release. For Amazon EMR 3.x and 2.x AMIs, use amiVersion instead instead of ReleaseLabel.

    ", + "RunJobFlowOutput$JobFlowId": "

    An unique identifier for the job flow.

    ", + "SecurityGroupsList$member": null, + "StepConfig$Name": "

    The name of the job flow step.

    ", + "StepIdsList$member": null, + "SupportedProductConfig$Name": "

    The name of the product configuration.

    ", + "SupportedProductsList$member": null + } + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/elasticmapreduce/2009-03-31/examples-1.json b/vendor/github.com/aws/aws-sdk-go/models/apis/elasticmapreduce/2009-03-31/examples-1.json new file mode 100644 index 000000000..0ea7e3b0b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/elasticmapreduce/2009-03-31/examples-1.json @@ -0,0 +1,5 @@ +{ + "version": "1.0", + "examples": { + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/elasticmapreduce/2009-03-31/paginators-1.json b/vendor/github.com/aws/aws-sdk-go/models/apis/elasticmapreduce/2009-03-31/paginators-1.json new file mode 100644 index 000000000..87f282d77 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/elasticmapreduce/2009-03-31/paginators-1.json @@ -0,0 +1,32 @@ +{ + "pagination": { + "DescribeJobFlows": { + "result_key": "JobFlows" + }, + "ListBootstrapActions": { + "input_token": "Marker", + "output_token": "Marker", + "result_key": "BootstrapActions" + }, + "ListClusters": { + "input_token": "Marker", + "output_token": "Marker", + "result_key": "Clusters" + }, + "ListInstanceGroups": { + "input_token": "Marker", + "output_token": "Marker", + "result_key": "InstanceGroups" + }, + "ListInstances": { + "input_token": "Marker", + "output_token": "Marker", + "result_key": "Instances" + }, + "ListSteps": { + "input_token": "Marker", + "output_token": "Marker", + "result_key": "Steps" + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/elasticmapreduce/2009-03-31/waiters-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/elasticmapreduce/2009-03-31/waiters-2.json new file mode 100644 index 000000000..829f1b1ac --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/elasticmapreduce/2009-03-31/waiters-2.json @@ -0,0 +1,67 @@ +{ + "version": 2, + "waiters": { + "ClusterRunning": { + "delay": 30, + "operation": "DescribeCluster", + "maxAttempts": 60, + "acceptors": [ + { + "state": "success", + "matcher": "path", + "argument": "Cluster.Status.State", + "expected": "RUNNING" + }, + { + "state": "success", + "matcher": "path", + "argument": "Cluster.Status.State", + "expected": "WAITING" + }, + { + "state": "failure", + "matcher": "path", + "argument": "Cluster.Status.State", + "expected": "TERMINATING" + }, + { + "state": "failure", + "matcher": "path", + "argument": "Cluster.Status.State", + "expected": "TERMINATED" + }, + { + "state": "failure", + "matcher": "path", + "argument": "Cluster.Status.State", + "expected": "TERMINATED_WITH_ERRORS" + } + ] + }, + "StepComplete": { + "delay": 30, + "operation": "DescribeStep", + "maxAttempts": 60, + "acceptors": [ + { + "state": "success", + "matcher": "path", + "argument": "Step.Status.State", + "expected": "COMPLETED" + }, + { + "state": "failure", + "matcher": "path", + "argument": "Step.Status.State", + "expected": "FAILED" + }, + { + "state": "failure", + "matcher": "path", + "argument": "Step.Status.State", + "expected": "CANCELLED" + } + ] + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/elastictranscoder/2012-09-25/api-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/elastictranscoder/2012-09-25/api-2.json new file mode 100644 index 000000000..419e9588d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/elastictranscoder/2012-09-25/api-2.json @@ -0,0 +1,1807 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2012-09-25", + "endpointPrefix":"elastictranscoder", + "serviceFullName":"Amazon Elastic Transcoder", + "signatureVersion":"v4", + "protocol":"rest-json" + }, + "operations":{ + "CancelJob":{ + "name":"CancelJob", + "http":{ + "method":"DELETE", + "requestUri":"/2012-09-25/jobs/{Id}", + "responseCode":202 + }, + "input":{"shape":"CancelJobRequest"}, + "output":{"shape":"CancelJobResponse"}, + "errors":[ + { + "shape":"ValidationException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"IncompatibleVersionException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"ResourceInUseException", + "error":{"httpStatusCode":409}, + "exception":true + }, + { + "shape":"AccessDeniedException", + "error":{"httpStatusCode":403}, + "exception":true + }, + { + "shape":"InternalServiceException", + "exception":true, + "fault":true + } + ] + }, + "CreateJob":{ + "name":"CreateJob", + "http":{ + "method":"POST", + "requestUri":"/2012-09-25/jobs", + "responseCode":201 + }, + "input":{"shape":"CreateJobRequest"}, + "output":{"shape":"CreateJobResponse"}, + "errors":[ + { + "shape":"ValidationException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"IncompatibleVersionException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"AccessDeniedException", + "error":{"httpStatusCode":403}, + "exception":true + }, + { + "shape":"LimitExceededException", + "error":{"httpStatusCode":429}, + "exception":true + }, + { + "shape":"InternalServiceException", + "exception":true, + "fault":true + } + ] + }, + "CreatePipeline":{ + "name":"CreatePipeline", + "http":{ + "method":"POST", + "requestUri":"/2012-09-25/pipelines", + "responseCode":201 + }, + "input":{"shape":"CreatePipelineRequest"}, + "output":{"shape":"CreatePipelineResponse"}, + "errors":[ + { + "shape":"ValidationException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"IncompatibleVersionException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"AccessDeniedException", + "error":{"httpStatusCode":403}, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"LimitExceededException", + "error":{"httpStatusCode":429}, + "exception":true + }, + { + "shape":"InternalServiceException", + "exception":true, + "fault":true + } + ] + }, + "CreatePreset":{ + "name":"CreatePreset", + "http":{ + "method":"POST", + "requestUri":"/2012-09-25/presets", + "responseCode":201 + }, + "input":{"shape":"CreatePresetRequest"}, + "output":{"shape":"CreatePresetResponse"}, + "errors":[ + { + "shape":"ValidationException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"IncompatibleVersionException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"AccessDeniedException", + "error":{"httpStatusCode":403}, + "exception":true + }, + { + "shape":"LimitExceededException", + "error":{"httpStatusCode":429}, + "exception":true + }, + { + "shape":"InternalServiceException", + "exception":true, + "fault":true + } + ] + }, + "DeletePipeline":{ + "name":"DeletePipeline", + "http":{ + "method":"DELETE", + "requestUri":"/2012-09-25/pipelines/{Id}", + "responseCode":202 + }, + "input":{"shape":"DeletePipelineRequest"}, + "output":{"shape":"DeletePipelineResponse"}, + "errors":[ + { + "shape":"ValidationException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"IncompatibleVersionException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"ResourceInUseException", + "error":{"httpStatusCode":409}, + "exception":true + }, + { + "shape":"AccessDeniedException", + "error":{"httpStatusCode":403}, + "exception":true + }, + { + "shape":"InternalServiceException", + "exception":true, + "fault":true + } + ] + }, + "DeletePreset":{ + "name":"DeletePreset", + "http":{ + "method":"DELETE", + "requestUri":"/2012-09-25/presets/{Id}", + "responseCode":202 + }, + "input":{"shape":"DeletePresetRequest"}, + "output":{"shape":"DeletePresetResponse"}, + "errors":[ + { + "shape":"ValidationException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"IncompatibleVersionException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"AccessDeniedException", + "error":{"httpStatusCode":403}, + "exception":true + }, + { + "shape":"InternalServiceException", + "exception":true, + "fault":true + } + ] + }, + "ListJobsByPipeline":{ + "name":"ListJobsByPipeline", + "http":{ + "method":"GET", + "requestUri":"/2012-09-25/jobsByPipeline/{PipelineId}" + }, + "input":{"shape":"ListJobsByPipelineRequest"}, + "output":{"shape":"ListJobsByPipelineResponse"}, + "errors":[ + { + "shape":"ValidationException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"IncompatibleVersionException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"AccessDeniedException", + "error":{"httpStatusCode":403}, + "exception":true + }, + { + "shape":"InternalServiceException", + "exception":true, + "fault":true + } + ] + }, + "ListJobsByStatus":{ + "name":"ListJobsByStatus", + "http":{ + "method":"GET", + "requestUri":"/2012-09-25/jobsByStatus/{Status}" + }, + "input":{"shape":"ListJobsByStatusRequest"}, + "output":{"shape":"ListJobsByStatusResponse"}, + "errors":[ + { + "shape":"ValidationException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"IncompatibleVersionException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"AccessDeniedException", + "error":{"httpStatusCode":403}, + "exception":true + }, + { + "shape":"InternalServiceException", + "exception":true, + "fault":true + } + ] + }, + "ListPipelines":{ + "name":"ListPipelines", + "http":{ + "method":"GET", + "requestUri":"/2012-09-25/pipelines" + }, + "input":{"shape":"ListPipelinesRequest"}, + "output":{"shape":"ListPipelinesResponse"}, + "errors":[ + { + "shape":"ValidationException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"IncompatibleVersionException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"AccessDeniedException", + "error":{"httpStatusCode":403}, + "exception":true + }, + { + "shape":"InternalServiceException", + "exception":true, + "fault":true + } + ] + }, + "ListPresets":{ + "name":"ListPresets", + "http":{ + "method":"GET", + "requestUri":"/2012-09-25/presets" + }, + "input":{"shape":"ListPresetsRequest"}, + "output":{"shape":"ListPresetsResponse"}, + "errors":[ + { + "shape":"ValidationException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"IncompatibleVersionException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"AccessDeniedException", + "error":{"httpStatusCode":403}, + "exception":true + }, + { + "shape":"InternalServiceException", + "exception":true, + "fault":true + } + ] + }, + "ReadJob":{ + "name":"ReadJob", + "http":{ + "method":"GET", + "requestUri":"/2012-09-25/jobs/{Id}" + }, + "input":{"shape":"ReadJobRequest"}, + "output":{"shape":"ReadJobResponse"}, + "errors":[ + { + "shape":"ValidationException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"IncompatibleVersionException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"AccessDeniedException", + "error":{"httpStatusCode":403}, + "exception":true + }, + { + "shape":"InternalServiceException", + "exception":true, + "fault":true + } + ] + }, + "ReadPipeline":{ + "name":"ReadPipeline", + "http":{ + "method":"GET", + "requestUri":"/2012-09-25/pipelines/{Id}" + }, + "input":{"shape":"ReadPipelineRequest"}, + "output":{"shape":"ReadPipelineResponse"}, + "errors":[ + { + "shape":"ValidationException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"IncompatibleVersionException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"AccessDeniedException", + "error":{"httpStatusCode":403}, + "exception":true + }, + { + "shape":"InternalServiceException", + "exception":true, + "fault":true + } + ] + }, + "ReadPreset":{ + "name":"ReadPreset", + "http":{ + "method":"GET", + "requestUri":"/2012-09-25/presets/{Id}" + }, + "input":{"shape":"ReadPresetRequest"}, + "output":{"shape":"ReadPresetResponse"}, + "errors":[ + { + "shape":"ValidationException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"IncompatibleVersionException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"AccessDeniedException", + "error":{"httpStatusCode":403}, + "exception":true + }, + { + "shape":"InternalServiceException", + "exception":true, + "fault":true + } + ] + }, + "TestRole":{ + "name":"TestRole", + "http":{ + "method":"POST", + "requestUri":"/2012-09-25/roleTests", + "responseCode":200 + }, + "input":{"shape":"TestRoleRequest"}, + "output":{"shape":"TestRoleResponse"}, + "errors":[ + { + "shape":"ValidationException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"IncompatibleVersionException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"AccessDeniedException", + "error":{"httpStatusCode":403}, + "exception":true + }, + { + "shape":"InternalServiceException", + "exception":true, + "fault":true + } + ] + }, + "UpdatePipeline":{ + "name":"UpdatePipeline", + "http":{ + "method":"PUT", + "requestUri":"/2012-09-25/pipelines/{Id}", + "responseCode":200 + }, + "input":{"shape":"UpdatePipelineRequest"}, + "output":{"shape":"UpdatePipelineResponse"}, + "errors":[ + { + "shape":"ValidationException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"IncompatibleVersionException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"AccessDeniedException", + "error":{"httpStatusCode":403}, + "exception":true + }, + { + "shape":"ResourceInUseException", + "error":{"httpStatusCode":409}, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"InternalServiceException", + "exception":true, + "fault":true + } + ] + }, + "UpdatePipelineNotifications":{ + "name":"UpdatePipelineNotifications", + "http":{ + "method":"POST", + "requestUri":"/2012-09-25/pipelines/{Id}/notifications" + }, + "input":{"shape":"UpdatePipelineNotificationsRequest"}, + "output":{"shape":"UpdatePipelineNotificationsResponse"}, + "errors":[ + { + "shape":"ValidationException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"IncompatibleVersionException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"ResourceInUseException", + "error":{"httpStatusCode":409}, + "exception":true + }, + { + "shape":"AccessDeniedException", + "error":{"httpStatusCode":403}, + "exception":true + }, + { + "shape":"InternalServiceException", + "exception":true, + "fault":true + } + ] + }, + "UpdatePipelineStatus":{ + "name":"UpdatePipelineStatus", + "http":{ + "method":"POST", + "requestUri":"/2012-09-25/pipelines/{Id}/status" + }, + "input":{"shape":"UpdatePipelineStatusRequest"}, + "output":{"shape":"UpdatePipelineStatusResponse"}, + "errors":[ + { + "shape":"ValidationException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"IncompatibleVersionException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"ResourceInUseException", + "error":{"httpStatusCode":409}, + "exception":true + }, + { + "shape":"AccessDeniedException", + "error":{"httpStatusCode":403}, + "exception":true + }, + { + "shape":"InternalServiceException", + "exception":true, + "fault":true + } + ] + } + }, + "shapes":{ + "AccessControl":{ + "type":"string", + "pattern":"(^FullControl$)|(^Read$)|(^ReadAcp$)|(^WriteAcp$)" + }, + "AccessControls":{ + "type":"list", + "member":{"shape":"AccessControl"}, + "max":30 + }, + "AccessDeniedException":{ + "type":"structure", + "members":{ + }, + "error":{"httpStatusCode":403}, + "exception":true + }, + "Artwork":{ + "type":"structure", + "members":{ + "InputKey":{"shape":"WatermarkKey"}, + "MaxWidth":{"shape":"DigitsOrAuto"}, + "MaxHeight":{"shape":"DigitsOrAuto"}, + "SizingPolicy":{"shape":"SizingPolicy"}, + "PaddingPolicy":{"shape":"PaddingPolicy"}, + "AlbumArtFormat":{"shape":"JpgOrPng"}, + "Encryption":{"shape":"Encryption"} + } + }, + "Artworks":{ + "type":"list", + "member":{"shape":"Artwork"} + }, + "Ascending":{ + "type":"string", + "pattern":"(^true$)|(^false$)" + }, + "AspectRatio":{ + "type":"string", + "pattern":"(^auto$)|(^1:1$)|(^4:3$)|(^3:2$)|(^16:9$)" + }, + "AudioBitDepth":{ + "type":"string", + "pattern":"(^16$)|(^24$)" + }, + "AudioBitOrder":{ + "type":"string", + "pattern":"(^LittleEndian$)" + }, + "AudioBitRate":{ + "type":"string", + "pattern":"^\\d{1,3}$" + }, + "AudioChannels":{ + "type":"string", + "pattern":"(^auto$)|(^0$)|(^1$)|(^2$)" + }, + "AudioCodec":{ + "type":"string", + "pattern":"(^AAC$)|(^vorbis$)|(^mp3$)|(^mp2$)|(^pcm$)|(^flac$)" + }, + "AudioCodecOptions":{ + "type":"structure", + "members":{ + "Profile":{"shape":"AudioCodecProfile"}, + "BitDepth":{"shape":"AudioBitDepth"}, + "BitOrder":{"shape":"AudioBitOrder"}, + "Signed":{"shape":"AudioSigned"} + } + }, + "AudioCodecProfile":{ + "type":"string", + "pattern":"(^auto$)|(^AAC-LC$)|(^HE-AAC$)|(^HE-AACv2$)" + }, + "AudioPackingMode":{ + "type":"string", + "pattern":"(^SingleTrack$)|(^OneChannelPerTrack$)|(^OneChannelPerTrackWithMosTo8Tracks$)" + }, + "AudioParameters":{ + "type":"structure", + "members":{ + "Codec":{"shape":"AudioCodec"}, + "SampleRate":{"shape":"AudioSampleRate"}, + "BitRate":{"shape":"AudioBitRate"}, + "Channels":{"shape":"AudioChannels"}, + "AudioPackingMode":{"shape":"AudioPackingMode"}, + "CodecOptions":{"shape":"AudioCodecOptions"} + } + }, + "AudioSampleRate":{ + "type":"string", + "pattern":"(^auto$)|(^22050$)|(^32000$)|(^44100$)|(^48000$)|(^96000$)|(^192000$)" + }, + "AudioSigned":{ + "type":"string", + "pattern":"(^Signed$)" + }, + "Base64EncodedString":{ + "type":"string", + "pattern":"^$|(^(?:[A-Za-z0-9\\+/]{4})*(?:[A-Za-z0-9\\+/]{2}==|[A-Za-z0-9\\+/]{3}=)?$)" + }, + "BucketName":{ + "type":"string", + "pattern":"^(\\w|\\.|-){1,255}$" + }, + "CancelJobRequest":{ + "type":"structure", + "required":["Id"], + "members":{ + "Id":{ + "shape":"Id", + "location":"uri", + "locationName":"Id" + } + } + }, + "CancelJobResponse":{ + "type":"structure", + "members":{ + } + }, + "CaptionFormat":{ + "type":"structure", + "members":{ + "Format":{"shape":"CaptionFormatFormat"}, + "Pattern":{"shape":"CaptionFormatPattern"}, + "Encryption":{"shape":"Encryption"} + } + }, + "CaptionFormatFormat":{ + "type":"string", + "pattern":"(^mov-text$)|(^srt$)|(^scc$)|(^webvtt$)|(^dfxp$)" + }, + "CaptionFormatPattern":{ + "type":"string", + "pattern":"(^$)|(^.*\\{language\\}.*$)" + }, + "CaptionFormats":{ + "type":"list", + "member":{"shape":"CaptionFormat"}, + "max":4 + }, + "CaptionMergePolicy":{ + "type":"string", + "pattern":"(^MergeOverride$)|(^MergeRetain$)|(^Override$)" + }, + "CaptionSource":{ + "type":"structure", + "members":{ + "Key":{"shape":"Key"}, + "Language":{"shape":"Key"}, + "TimeOffset":{"shape":"TimeOffset"}, + "Label":{"shape":"Name"}, + "Encryption":{"shape":"Encryption"} + } + }, + "CaptionSources":{ + "type":"list", + "member":{"shape":"CaptionSource"}, + "max":20 + }, + "Captions":{ + "type":"structure", + "members":{ + "MergePolicy":{"shape":"CaptionMergePolicy"}, + "CaptionSources":{"shape":"CaptionSources"}, + "CaptionFormats":{"shape":"CaptionFormats"} + } + }, + "Clip":{ + "type":"structure", + "members":{ + "TimeSpan":{"shape":"TimeSpan"} + } + }, + "CodecOption":{ + "type":"string", + "min":1, + "max":255 + }, + "CodecOptions":{ + "type":"map", + "key":{"shape":"CodecOption"}, + "value":{"shape":"CodecOption"}, + "max":30 + }, + "Composition":{ + "type":"list", + "member":{"shape":"Clip"} + }, + "CreateJobOutput":{ + "type":"structure", + "members":{ + "Key":{"shape":"Key"}, + "ThumbnailPattern":{"shape":"ThumbnailPattern"}, + "ThumbnailEncryption":{"shape":"Encryption"}, + "Rotate":{"shape":"Rotate"}, + "PresetId":{"shape":"Id"}, + "SegmentDuration":{"shape":"FloatString"}, + "Watermarks":{"shape":"JobWatermarks"}, + "AlbumArt":{"shape":"JobAlbumArt"}, + "Composition":{"shape":"Composition"}, + "Captions":{"shape":"Captions"}, + "Encryption":{"shape":"Encryption"} + } + }, + "CreateJobOutputs":{ + "type":"list", + "member":{"shape":"CreateJobOutput"}, + "max":30 + }, + "CreateJobPlaylist":{ + "type":"structure", + "members":{ + "Name":{"shape":"Filename"}, + "Format":{"shape":"PlaylistFormat"}, + "OutputKeys":{"shape":"OutputKeys"}, + "HlsContentProtection":{"shape":"HlsContentProtection"}, + "PlayReadyDrm":{"shape":"PlayReadyDrm"} + } + }, + "CreateJobPlaylists":{ + "type":"list", + "member":{"shape":"CreateJobPlaylist"}, + "max":30 + }, + "CreateJobRequest":{ + "type":"structure", + "required":[ + "PipelineId", + "Input" + ], + "members":{ + "PipelineId":{"shape":"Id"}, + "Input":{"shape":"JobInput"}, + "Output":{"shape":"CreateJobOutput"}, + "Outputs":{"shape":"CreateJobOutputs"}, + "OutputKeyPrefix":{"shape":"Key"}, + "Playlists":{"shape":"CreateJobPlaylists"}, + "UserMetadata":{"shape":"UserMetadata"} + } + }, + "CreateJobResponse":{ + "type":"structure", + "members":{ + "Job":{"shape":"Job"} + } + }, + "CreatePipelineRequest":{ + "type":"structure", + "required":[ + "Name", + "InputBucket", + "Role" + ], + "members":{ + "Name":{"shape":"Name"}, + "InputBucket":{"shape":"BucketName"}, + "OutputBucket":{"shape":"BucketName"}, + "Role":{"shape":"Role"}, + "AwsKmsKeyArn":{"shape":"KeyArn"}, + "Notifications":{"shape":"Notifications"}, + "ContentConfig":{"shape":"PipelineOutputConfig"}, + "ThumbnailConfig":{"shape":"PipelineOutputConfig"} + } + }, + "CreatePipelineResponse":{ + "type":"structure", + "members":{ + "Pipeline":{"shape":"Pipeline"}, + "Warnings":{"shape":"Warnings"} + } + }, + "CreatePresetRequest":{ + "type":"structure", + "required":[ + "Name", + "Container" + ], + "members":{ + "Name":{"shape":"Name"}, + "Description":{"shape":"Description"}, + "Container":{"shape":"PresetContainer"}, + "Video":{"shape":"VideoParameters"}, + "Audio":{"shape":"AudioParameters"}, + "Thumbnails":{"shape":"Thumbnails"} + } + }, + "CreatePresetResponse":{ + "type":"structure", + "members":{ + "Preset":{"shape":"Preset"}, + "Warning":{"shape":"String"} + } + }, + "DeletePipelineRequest":{ + "type":"structure", + "required":["Id"], + "members":{ + "Id":{ + "shape":"Id", + "location":"uri", + "locationName":"Id" + } + } + }, + "DeletePipelineResponse":{ + "type":"structure", + "members":{ + } + }, + "DeletePresetRequest":{ + "type":"structure", + "required":["Id"], + "members":{ + "Id":{ + "shape":"Id", + "location":"uri", + "locationName":"Id" + } + } + }, + "DeletePresetResponse":{ + "type":"structure", + "members":{ + } + }, + "Description":{ + "type":"string", + "min":0, + "max":255 + }, + "DetectedProperties":{ + "type":"structure", + "members":{ + "Width":{"shape":"NullableInteger"}, + "Height":{"shape":"NullableInteger"}, + "FrameRate":{"shape":"FloatString"}, + "FileSize":{"shape":"NullableLong"}, + "DurationMillis":{"shape":"NullableLong"} + } + }, + "Digits":{ + "type":"string", + "pattern":"^\\d{1,5}$" + }, + "DigitsOrAuto":{ + "type":"string", + "pattern":"(^auto$)|(^\\d{2,4}$)" + }, + "Encryption":{ + "type":"structure", + "members":{ + "Mode":{"shape":"EncryptionMode"}, + "Key":{"shape":"Base64EncodedString"}, + "KeyMd5":{"shape":"Base64EncodedString"}, + "InitializationVector":{"shape":"ZeroTo255String"} + } + }, + "EncryptionMode":{ + "type":"string", + "pattern":"(^s3$)|(^s3-aws-kms$)|(^aes-cbc-pkcs7$)|(^aes-ctr$)|(^aes-gcm$)" + }, + "ExceptionMessages":{ + "type":"list", + "member":{"shape":"String"} + }, + "Filename":{ + "type":"string", + "min":1, + "max":255 + }, + "FixedGOP":{ + "type":"string", + "pattern":"(^true$)|(^false$)" + }, + "FloatString":{ + "type":"string", + "pattern":"^\\d{1,5}(\\.\\d{0,5})?$" + }, + "FrameRate":{ + "type":"string", + "pattern":"(^auto$)|(^10$)|(^15$)|(^23.97$)|(^24$)|(^25$)|(^29.97$)|(^30$)|(^50$)|(^60$)" + }, + "Grantee":{ + "type":"string", + "min":1, + "max":255 + }, + "GranteeType":{ + "type":"string", + "pattern":"(^Canonical$)|(^Email$)|(^Group$)" + }, + "HlsContentProtection":{ + "type":"structure", + "members":{ + "Method":{"shape":"HlsContentProtectionMethod"}, + "Key":{"shape":"Base64EncodedString"}, + "KeyMd5":{"shape":"Base64EncodedString"}, + "InitializationVector":{"shape":"ZeroTo255String"}, + "LicenseAcquisitionUrl":{"shape":"ZeroTo512String"}, + "KeyStoragePolicy":{"shape":"KeyStoragePolicy"} + } + }, + "HlsContentProtectionMethod":{ + "type":"string", + "pattern":"(^aes-128$)" + }, + "HorizontalAlign":{ + "type":"string", + "pattern":"(^Left$)|(^Right$)|(^Center$)" + }, + "Id":{ + "type":"string", + "pattern":"^\\d{13}-\\w{6}$" + }, + "IncompatibleVersionException":{ + "type":"structure", + "members":{ + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "Interlaced":{ + "type":"string", + "pattern":"(^auto$)|(^true$)|(^false$)" + }, + "InternalServiceException":{ + "type":"structure", + "members":{ + }, + "exception":true, + "fault":true + }, + "Job":{ + "type":"structure", + "members":{ + "Id":{"shape":"Id"}, + "Arn":{"shape":"String"}, + "PipelineId":{"shape":"Id"}, + "Input":{"shape":"JobInput"}, + "Output":{"shape":"JobOutput"}, + "Outputs":{"shape":"JobOutputs"}, + "OutputKeyPrefix":{"shape":"Key"}, + "Playlists":{"shape":"Playlists"}, + "Status":{"shape":"JobStatus"}, + "UserMetadata":{"shape":"UserMetadata"}, + "Timing":{"shape":"Timing"} + } + }, + "JobAlbumArt":{ + "type":"structure", + "members":{ + "MergePolicy":{"shape":"MergePolicy"}, + "Artwork":{"shape":"Artworks"} + } + }, + "JobContainer":{ + "type":"string", + "pattern":"(^auto$)|(^3gp$)|(^asf$)|(^avi$)|(^divx$)|(^flv$)|(^mkv$)|(^mov$)|(^mp4$)|(^mpeg$)|(^mpeg-ps$)|(^mpeg-ts$)|(^mxf$)|(^ogg$)|(^ts$)|(^vob$)|(^wav$)|(^webm$)|(^mp3$)|(^m4a$)|(^aac$)" + }, + "JobInput":{ + "type":"structure", + "members":{ + "Key":{"shape":"Key"}, + "FrameRate":{"shape":"FrameRate"}, + "Resolution":{"shape":"Resolution"}, + "AspectRatio":{"shape":"AspectRatio"}, + "Interlaced":{"shape":"Interlaced"}, + "Container":{"shape":"JobContainer"}, + "Encryption":{"shape":"Encryption"}, + "DetectedProperties":{"shape":"DetectedProperties"} + } + }, + "JobOutput":{ + "type":"structure", + "members":{ + "Id":{"shape":"String"}, + "Key":{"shape":"Key"}, + "ThumbnailPattern":{"shape":"ThumbnailPattern"}, + "ThumbnailEncryption":{"shape":"Encryption"}, + "Rotate":{"shape":"Rotate"}, + "PresetId":{"shape":"Id"}, + "SegmentDuration":{"shape":"FloatString"}, + "Status":{"shape":"JobStatus"}, + "StatusDetail":{"shape":"Description"}, + "Duration":{"shape":"NullableLong"}, + "Width":{"shape":"NullableInteger"}, + "Height":{"shape":"NullableInteger"}, + "FrameRate":{"shape":"FloatString"}, + "FileSize":{"shape":"NullableLong"}, + "DurationMillis":{"shape":"NullableLong"}, + "Watermarks":{"shape":"JobWatermarks"}, + "AlbumArt":{"shape":"JobAlbumArt"}, + "Composition":{"shape":"Composition"}, + "Captions":{"shape":"Captions"}, + "Encryption":{"shape":"Encryption"}, + "AppliedColorSpaceConversion":{"shape":"String"} + } + }, + "JobOutputs":{ + "type":"list", + "member":{"shape":"JobOutput"} + }, + "JobStatus":{ + "type":"string", + "pattern":"(^Submitted$)|(^Progressing$)|(^Complete$)|(^Canceled$)|(^Error$)" + }, + "JobWatermark":{ + "type":"structure", + "members":{ + "PresetWatermarkId":{"shape":"PresetWatermarkId"}, + "InputKey":{"shape":"WatermarkKey"}, + "Encryption":{"shape":"Encryption"} + } + }, + "JobWatermarks":{ + "type":"list", + "member":{"shape":"JobWatermark"} + }, + "Jobs":{ + "type":"list", + "member":{"shape":"Job"} + }, + "JpgOrPng":{ + "type":"string", + "pattern":"(^jpg$)|(^png$)" + }, + "Key":{ + "type":"string", + "min":1, + "max":255 + }, + "KeyArn":{ + "type":"string", + "min":0, + "max":255 + }, + "KeyIdGuid":{ + "type":"string", + "pattern":"(^[0-9A-Fa-f]{8}-[0-9A-Fa-f]{4}-[0-9A-Fa-f]{4}-[0-9A-Fa-f]{4}-[0-9A-Fa-f]{12}$)|(^[0-9A-Fa-f]{32}$)" + }, + "KeyStoragePolicy":{ + "type":"string", + "pattern":"(^NoStore$)|(^WithVariantPlaylists$)" + }, + "KeyframesMaxDist":{ + "type":"string", + "pattern":"^\\d{1,6}$" + }, + "LimitExceededException":{ + "type":"structure", + "members":{ + }, + "error":{"httpStatusCode":429}, + "exception":true + }, + "ListJobsByPipelineRequest":{ + "type":"structure", + "required":["PipelineId"], + "members":{ + "PipelineId":{ + "shape":"Id", + "location":"uri", + "locationName":"PipelineId" + }, + "Ascending":{ + "shape":"Ascending", + "location":"querystring", + "locationName":"Ascending" + }, + "PageToken":{ + "shape":"Id", + "location":"querystring", + "locationName":"PageToken" + } + } + }, + "ListJobsByPipelineResponse":{ + "type":"structure", + "members":{ + "Jobs":{"shape":"Jobs"}, + "NextPageToken":{"shape":"Id"} + } + }, + "ListJobsByStatusRequest":{ + "type":"structure", + "required":["Status"], + "members":{ + "Status":{ + "shape":"JobStatus", + "location":"uri", + "locationName":"Status" + }, + "Ascending":{ + "shape":"Ascending", + "location":"querystring", + "locationName":"Ascending" + }, + "PageToken":{ + "shape":"Id", + "location":"querystring", + "locationName":"PageToken" + } + } + }, + "ListJobsByStatusResponse":{ + "type":"structure", + "members":{ + "Jobs":{"shape":"Jobs"}, + "NextPageToken":{"shape":"Id"} + } + }, + "ListPipelinesRequest":{ + "type":"structure", + "members":{ + "Ascending":{ + "shape":"Ascending", + "location":"querystring", + "locationName":"Ascending" + }, + "PageToken":{ + "shape":"Id", + "location":"querystring", + "locationName":"PageToken" + } + } + }, + "ListPipelinesResponse":{ + "type":"structure", + "members":{ + "Pipelines":{"shape":"Pipelines"}, + "NextPageToken":{"shape":"Id"} + } + }, + "ListPresetsRequest":{ + "type":"structure", + "members":{ + "Ascending":{ + "shape":"Ascending", + "location":"querystring", + "locationName":"Ascending" + }, + "PageToken":{ + "shape":"Id", + "location":"querystring", + "locationName":"PageToken" + } + } + }, + "ListPresetsResponse":{ + "type":"structure", + "members":{ + "Presets":{"shape":"Presets"}, + "NextPageToken":{"shape":"Id"} + } + }, + "MaxFrameRate":{ + "type":"string", + "pattern":"(^10$)|(^15$)|(^23.97$)|(^24$)|(^25$)|(^29.97$)|(^30$)|(^50$)|(^60$)" + }, + "MergePolicy":{ + "type":"string", + "pattern":"(^Replace$)|(^Prepend$)|(^Append$)|(^Fallback$)" + }, + "Name":{ + "type":"string", + "min":1, + "max":40 + }, + "NonEmptyBase64EncodedString":{ + "type":"string", + "pattern":"(^(?:[A-Za-z0-9\\+/]{4})*(?:[A-Za-z0-9\\+/]{2}==|[A-Za-z0-9\\+/]{3}=)?$)" + }, + "Notifications":{ + "type":"structure", + "members":{ + "Progressing":{"shape":"SnsTopic"}, + "Completed":{"shape":"SnsTopic"}, + "Warning":{"shape":"SnsTopic"}, + "Error":{"shape":"SnsTopic"} + } + }, + "NullableInteger":{"type":"integer"}, + "NullableLong":{"type":"long"}, + "OneTo512String":{ + "type":"string", + "min":1, + "max":512 + }, + "Opacity":{ + "type":"string", + "pattern":"^\\d{1,3}(\\.\\d{0,20})?$" + }, + "OutputKeys":{ + "type":"list", + "member":{"shape":"Key"}, + "max":30 + }, + "PaddingPolicy":{ + "type":"string", + "pattern":"(^Pad$)|(^NoPad$)" + }, + "Permission":{ + "type":"structure", + "members":{ + "GranteeType":{"shape":"GranteeType"}, + "Grantee":{"shape":"Grantee"}, + "Access":{"shape":"AccessControls"} + } + }, + "Permissions":{ + "type":"list", + "member":{"shape":"Permission"}, + "max":30 + }, + "Pipeline":{ + "type":"structure", + "members":{ + "Id":{"shape":"Id"}, + "Arn":{"shape":"String"}, + "Name":{"shape":"Name"}, + "Status":{"shape":"PipelineStatus"}, + "InputBucket":{"shape":"BucketName"}, + "OutputBucket":{"shape":"BucketName"}, + "Role":{"shape":"Role"}, + "AwsKmsKeyArn":{"shape":"KeyArn"}, + "Notifications":{"shape":"Notifications"}, + "ContentConfig":{"shape":"PipelineOutputConfig"}, + "ThumbnailConfig":{"shape":"PipelineOutputConfig"} + } + }, + "PipelineOutputConfig":{ + "type":"structure", + "members":{ + "Bucket":{"shape":"BucketName"}, + "StorageClass":{"shape":"StorageClass"}, + "Permissions":{"shape":"Permissions"} + } + }, + "PipelineStatus":{ + "type":"string", + "pattern":"(^Active$)|(^Paused$)" + }, + "Pipelines":{ + "type":"list", + "member":{"shape":"Pipeline"} + }, + "PixelsOrPercent":{ + "type":"string", + "pattern":"(^\\d{1,3}(\\.\\d{0,5})?%$)|(^\\d{1,4}?px$)" + }, + "PlayReadyDrm":{ + "type":"structure", + "members":{ + "Format":{"shape":"PlayReadyDrmFormatString"}, + "Key":{"shape":"NonEmptyBase64EncodedString"}, + "KeyMd5":{"shape":"NonEmptyBase64EncodedString"}, + "KeyId":{"shape":"KeyIdGuid"}, + "InitializationVector":{"shape":"ZeroTo255String"}, + "LicenseAcquisitionUrl":{"shape":"OneTo512String"} + } + }, + "PlayReadyDrmFormatString":{ + "type":"string", + "pattern":"(^microsoft$)|(^discretix-3.0$)" + }, + "Playlist":{ + "type":"structure", + "members":{ + "Name":{"shape":"Filename"}, + "Format":{"shape":"PlaylistFormat"}, + "OutputKeys":{"shape":"OutputKeys"}, + "HlsContentProtection":{"shape":"HlsContentProtection"}, + "PlayReadyDrm":{"shape":"PlayReadyDrm"}, + "Status":{"shape":"JobStatus"}, + "StatusDetail":{"shape":"Description"} + } + }, + "PlaylistFormat":{ + "type":"string", + "pattern":"(^HLSv3$)|(^HLSv4$)|(^Smooth$)" + }, + "Playlists":{ + "type":"list", + "member":{"shape":"Playlist"} + }, + "Preset":{ + "type":"structure", + "members":{ + "Id":{"shape":"Id"}, + "Arn":{"shape":"String"}, + "Name":{"shape":"Name"}, + "Description":{"shape":"Description"}, + "Container":{"shape":"PresetContainer"}, + "Audio":{"shape":"AudioParameters"}, + "Video":{"shape":"VideoParameters"}, + "Thumbnails":{"shape":"Thumbnails"}, + "Type":{"shape":"PresetType"} + } + }, + "PresetContainer":{ + "type":"string", + "pattern":"(^mp4$)|(^ts$)|(^webm$)|(^mp3$)|(^flac$)|(^oga$)|(^ogg$)|(^fmp4$)|(^mpg$)|(^flv$)|(^gif$)|(^mxf$)" + }, + "PresetType":{ + "type":"string", + "pattern":"(^System$)|(^Custom$)" + }, + "PresetWatermark":{ + "type":"structure", + "members":{ + "Id":{"shape":"PresetWatermarkId"}, + "MaxWidth":{"shape":"PixelsOrPercent"}, + "MaxHeight":{"shape":"PixelsOrPercent"}, + "SizingPolicy":{"shape":"WatermarkSizingPolicy"}, + "HorizontalAlign":{"shape":"HorizontalAlign"}, + "HorizontalOffset":{"shape":"PixelsOrPercent"}, + "VerticalAlign":{"shape":"VerticalAlign"}, + "VerticalOffset":{"shape":"PixelsOrPercent"}, + "Opacity":{"shape":"Opacity"}, + "Target":{"shape":"Target"} + } + }, + "PresetWatermarkId":{ + "type":"string", + "min":1, + "max":40 + }, + "PresetWatermarks":{ + "type":"list", + "member":{"shape":"PresetWatermark"} + }, + "Presets":{ + "type":"list", + "member":{"shape":"Preset"} + }, + "ReadJobRequest":{ + "type":"structure", + "required":["Id"], + "members":{ + "Id":{ + "shape":"Id", + "location":"uri", + "locationName":"Id" + } + } + }, + "ReadJobResponse":{ + "type":"structure", + "members":{ + "Job":{"shape":"Job"} + } + }, + "ReadPipelineRequest":{ + "type":"structure", + "required":["Id"], + "members":{ + "Id":{ + "shape":"Id", + "location":"uri", + "locationName":"Id" + } + } + }, + "ReadPipelineResponse":{ + "type":"structure", + "members":{ + "Pipeline":{"shape":"Pipeline"}, + "Warnings":{"shape":"Warnings"} + } + }, + "ReadPresetRequest":{ + "type":"structure", + "required":["Id"], + "members":{ + "Id":{ + "shape":"Id", + "location":"uri", + "locationName":"Id" + } + } + }, + "ReadPresetResponse":{ + "type":"structure", + "members":{ + "Preset":{"shape":"Preset"} + } + }, + "Resolution":{ + "type":"string", + "pattern":"(^auto$)|(^\\d{1,5}x\\d{1,5}$)" + }, + "ResourceInUseException":{ + "type":"structure", + "members":{ + }, + "error":{"httpStatusCode":409}, + "exception":true + }, + "ResourceNotFoundException":{ + "type":"structure", + "members":{ + }, + "error":{"httpStatusCode":404}, + "exception":true + }, + "Role":{ + "type":"string", + "pattern":"^arn:aws:iam::\\w{12}:role/.+$" + }, + "Rotate":{ + "type":"string", + "pattern":"(^auto$)|(^0$)|(^90$)|(^180$)|(^270$)" + }, + "SizingPolicy":{ + "type":"string", + "pattern":"(^Fit$)|(^Fill$)|(^Stretch$)|(^Keep$)|(^ShrinkToFit$)|(^ShrinkToFill$)" + }, + "SnsTopic":{ + "type":"string", + "pattern":"(^$)|(^arn:aws:sns:.*:\\w{12}:.+$)" + }, + "SnsTopics":{ + "type":"list", + "member":{"shape":"SnsTopic"}, + "max":30 + }, + "StorageClass":{ + "type":"string", + "pattern":"(^ReducedRedundancy$)|(^Standard$)" + }, + "String":{"type":"string"}, + "Success":{ + "type":"string", + "pattern":"(^true$)|(^false$)" + }, + "Target":{ + "type":"string", + "pattern":"(^Content$)|(^Frame$)" + }, + "TestRoleRequest":{ + "type":"structure", + "required":[ + "Role", + "InputBucket", + "OutputBucket", + "Topics" + ], + "members":{ + "Role":{"shape":"Role"}, + "InputBucket":{"shape":"BucketName"}, + "OutputBucket":{"shape":"BucketName"}, + "Topics":{"shape":"SnsTopics"} + } + }, + "TestRoleResponse":{ + "type":"structure", + "members":{ + "Success":{"shape":"Success"}, + "Messages":{"shape":"ExceptionMessages"} + } + }, + "ThumbnailPattern":{ + "type":"string", + "pattern":"(^$)|(^.*\\{count\\}.*$)" + }, + "ThumbnailResolution":{ + "type":"string", + "pattern":"^\\d{1,5}x\\d{1,5}$" + }, + "Thumbnails":{ + "type":"structure", + "members":{ + "Format":{"shape":"JpgOrPng"}, + "Interval":{"shape":"Digits"}, + "Resolution":{"shape":"ThumbnailResolution"}, + "AspectRatio":{"shape":"AspectRatio"}, + "MaxWidth":{"shape":"DigitsOrAuto"}, + "MaxHeight":{"shape":"DigitsOrAuto"}, + "SizingPolicy":{"shape":"SizingPolicy"}, + "PaddingPolicy":{"shape":"PaddingPolicy"} + } + }, + "Time":{ + "type":"string", + "pattern":"(^\\d{1,5}(\\.\\d{0,3})?$)|(^([0-1]?[0-9]:|2[0-3]:)?([0-5]?[0-9]:)?[0-5]?[0-9](\\.\\d{0,3})?$)" + }, + "TimeOffset":{ + "type":"string", + "pattern":"(^[+-]?\\d{1,5}(\\.\\d{0,3})?$)|(^[+-]?([0-1]?[0-9]:|2[0-3]:)?([0-5]?[0-9]:)?[0-5]?[0-9](\\.\\d{0,3})?$)" + }, + "TimeSpan":{ + "type":"structure", + "members":{ + "StartTime":{"shape":"Time"}, + "Duration":{"shape":"Time"} + } + }, + "Timing":{ + "type":"structure", + "members":{ + "SubmitTimeMillis":{"shape":"NullableLong"}, + "StartTimeMillis":{"shape":"NullableLong"}, + "FinishTimeMillis":{"shape":"NullableLong"} + } + }, + "UpdatePipelineNotificationsRequest":{ + "type":"structure", + "required":[ + "Id", + "Notifications" + ], + "members":{ + "Id":{ + "shape":"Id", + "location":"uri", + "locationName":"Id" + }, + "Notifications":{"shape":"Notifications"} + } + }, + "UpdatePipelineNotificationsResponse":{ + "type":"structure", + "members":{ + "Pipeline":{"shape":"Pipeline"} + } + }, + "UpdatePipelineRequest":{ + "type":"structure", + "required":["Id"], + "members":{ + "Id":{ + "shape":"Id", + "location":"uri", + "locationName":"Id" + }, + "Name":{"shape":"Name"}, + "InputBucket":{"shape":"BucketName"}, + "Role":{"shape":"Role"}, + "AwsKmsKeyArn":{"shape":"KeyArn"}, + "Notifications":{"shape":"Notifications"}, + "ContentConfig":{"shape":"PipelineOutputConfig"}, + "ThumbnailConfig":{"shape":"PipelineOutputConfig"} + } + }, + "UpdatePipelineResponse":{ + "type":"structure", + "members":{ + "Pipeline":{"shape":"Pipeline"}, + "Warnings":{"shape":"Warnings"} + } + }, + "UpdatePipelineStatusRequest":{ + "type":"structure", + "required":[ + "Id", + "Status" + ], + "members":{ + "Id":{ + "shape":"Id", + "location":"uri", + "locationName":"Id" + }, + "Status":{"shape":"PipelineStatus"} + } + }, + "UpdatePipelineStatusResponse":{ + "type":"structure", + "members":{ + "Pipeline":{"shape":"Pipeline"} + } + }, + "UserMetadata":{ + "type":"map", + "key":{"shape":"String"}, + "value":{"shape":"String"} + }, + "ValidationException":{ + "type":"structure", + "members":{ + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "VerticalAlign":{ + "type":"string", + "pattern":"(^Top$)|(^Bottom$)|(^Center$)" + }, + "VideoBitRate":{ + "type":"string", + "pattern":"(^\\d{2,5}$)|(^auto$)" + }, + "VideoCodec":{ + "type":"string", + "pattern":"(^H\\.264$)|(^vp8$)|(^mpeg2$)|(^gif$)" + }, + "VideoParameters":{ + "type":"structure", + "members":{ + "Codec":{"shape":"VideoCodec"}, + "CodecOptions":{"shape":"CodecOptions"}, + "KeyframesMaxDist":{"shape":"KeyframesMaxDist"}, + "FixedGOP":{"shape":"FixedGOP"}, + "BitRate":{"shape":"VideoBitRate"}, + "FrameRate":{"shape":"FrameRate"}, + "MaxFrameRate":{"shape":"MaxFrameRate"}, + "Resolution":{"shape":"Resolution"}, + "AspectRatio":{"shape":"AspectRatio"}, + "MaxWidth":{"shape":"DigitsOrAuto"}, + "MaxHeight":{"shape":"DigitsOrAuto"}, + "DisplayAspectRatio":{"shape":"AspectRatio"}, + "SizingPolicy":{"shape":"SizingPolicy"}, + "PaddingPolicy":{"shape":"PaddingPolicy"}, + "Watermarks":{"shape":"PresetWatermarks"} + } + }, + "Warning":{ + "type":"structure", + "members":{ + "Code":{"shape":"String"}, + "Message":{"shape":"String"} + } + }, + "Warnings":{ + "type":"list", + "member":{"shape":"Warning"} + }, + "WatermarkKey":{ + "type":"string", + "min":1, + "max":255, + "pattern":"(^.{1,}.jpg$)|(^.{1,}.jpeg$)|(^.{1,}.png$)" + }, + "WatermarkSizingPolicy":{ + "type":"string", + "pattern":"(^Fit$)|(^Stretch$)|(^ShrinkToFit$)" + }, + "ZeroTo255String":{ + "type":"string", + "min":0, + "max":255 + }, + "ZeroTo512String":{ + "type":"string", + "min":0, + "max":512 + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/elastictranscoder/2012-09-25/docs-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/elastictranscoder/2012-09-25/docs-2.json new file mode 100644 index 000000000..0989c649b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/elastictranscoder/2012-09-25/docs-2.json @@ -0,0 +1,1152 @@ +{ + "version": "2.0", + "operations": { + "CancelJob": "

    The CancelJob operation cancels an unfinished job.

    You can only cancel a job that has a status of Submitted. To prevent a pipeline from starting to process a job while you're getting the job identifier, use UpdatePipelineStatus to temporarily pause the pipeline.", + "CreateJob": "

    When you create a job, Elastic Transcoder returns JSON data that includes the values that you specified plus information about the job that is created.

    If you have specified more than one output for your jobs (for example, one output for the Kindle Fire and another output for the Apple iPhone 4s), you currently must use the Elastic Transcoder API to list the jobs (as opposed to the AWS Console).

    ", + "CreatePipeline": "

    The CreatePipeline operation creates a pipeline with settings that you specify.

    ", + "CreatePreset": "

    The CreatePreset operation creates a preset with settings that you specify.

    Elastic Transcoder checks the CreatePreset settings to ensure that they meet Elastic Transcoder requirements and to determine whether they comply with H.264 standards. If your settings are not valid for Elastic Transcoder, Elastic Transcoder returns an HTTP 400 response (ValidationException) and does not create the preset. If the settings are valid for Elastic Transcoder but aren't strictly compliant with the H.264 standard, Elastic Transcoder creates the preset and returns a warning message in the response. This helps you determine whether your settings comply with the H.264 standard while giving you greater flexibility with respect to the video that Elastic Transcoder produces.

    Elastic Transcoder uses the H.264 video-compression format. For more information, see the International Telecommunication Union publication Recommendation ITU-T H.264: Advanced video coding for generic audiovisual services.

    ", + "DeletePipeline": "

    The DeletePipeline operation removes a pipeline.

    You can only delete a pipeline that has never been used or that is not currently in use (doesn't contain any active jobs). If the pipeline is currently in use, DeletePipeline returns an error.

    ", + "DeletePreset": "

    The DeletePreset operation removes a preset that you've added in an AWS region.

    You can't delete the default presets that are included with Elastic Transcoder.

    ", + "ListJobsByPipeline": "

    The ListJobsByPipeline operation gets a list of the jobs currently in a pipeline.

    Elastic Transcoder returns all of the jobs currently in the specified pipeline. The response body contains one element for each job that satisfies the search criteria.

    ", + "ListJobsByStatus": "

    The ListJobsByStatus operation gets a list of jobs that have a specified status. The response body contains one element for each job that satisfies the search criteria.

    ", + "ListPipelines": "

    The ListPipelines operation gets a list of the pipelines associated with the current AWS account.

    ", + "ListPresets": "

    The ListPresets operation gets a list of the default presets included with Elastic Transcoder and the presets that you've added in an AWS region.

    ", + "ReadJob": "

    The ReadJob operation returns detailed information about a job.

    ", + "ReadPipeline": "

    The ReadPipeline operation gets detailed information about a pipeline.

    ", + "ReadPreset": "

    The ReadPreset operation gets detailed information about a preset.

    ", + "TestRole": "

    The TestRole operation tests the IAM role used to create the pipeline.

    The TestRole action lets you determine whether the IAM role you are using has sufficient permissions to let Elastic Transcoder perform tasks associated with the transcoding process. The action attempts to assume the specified IAM role, checks read access to the input and output buckets, and tries to send a test notification to Amazon SNS topics that you specify.

    ", + "UpdatePipeline": "

    Use the UpdatePipeline operation to update settings for a pipeline. When you change pipeline settings, your changes take effect immediately. Jobs that you have already submitted and that Elastic Transcoder has not started to process are affected in addition to jobs that you submit after you change settings.

    ", + "UpdatePipelineNotifications": "

    With the UpdatePipelineNotifications operation, you can update Amazon Simple Notification Service (Amazon SNS) notifications for a pipeline.

    When you update notifications for a pipeline, Elastic Transcoder returns the values that you specified in the request.

    ", + "UpdatePipelineStatus": "

    The UpdatePipelineStatus operation pauses or reactivates a pipeline, so that the pipeline stops or restarts the processing of jobs.

    Changing the pipeline status is useful if you want to cancel one or more jobs. You can't cancel jobs after Elastic Transcoder has started processing them; if you pause the pipeline to which you submitted the jobs, you have more time to get the job IDs for the jobs that you want to cancel, and to send a CancelJob request.

    " + }, + "service": "AWS Elastic Transcoder Service

    The AWS Elastic Transcoder Service.

    ", + "shapes": { + "AccessControl": { + "base": null, + "refs": { + "AccessControls$member": null + } + }, + "AccessControls": { + "base": null, + "refs": { + "Permission$Access": "

    The permission that you want to give to the AWS user that is listed in Grantee. Valid values include:

    • READ: The grantee can read the thumbnails and metadata for thumbnails that Elastic Transcoder adds to the Amazon S3 bucket.
    • READ_ACP: The grantee can read the object ACL for thumbnails that Elastic Transcoder adds to the Amazon S3 bucket.
    • WRITE_ACP: The grantee can write the ACL for the thumbnails that Elastic Transcoder adds to the Amazon S3 bucket.
    • FULL_CONTROL: The grantee has READ, READ_ACP, and WRITE_ACP permissions for the thumbnails that Elastic Transcoder adds to the Amazon S3 bucket.

    " + } + }, + "AccessDeniedException": { + "base": "

    General authentication failure. The request was not signed correctly.

    ", + "refs": { + } + }, + "Artwork": { + "base": "

    The file to be used as album art. There can be multiple artworks associated with an audio file, to a maximum of 20.

    To remove artwork or leave the artwork empty, you can either set Artwork to null, or set the Merge Policy to \"Replace\" and use an empty Artwork array.

    To pass through existing artwork unchanged, set the Merge Policy to \"Prepend\", \"Append\", or \"Fallback\", and use an empty Artwork array.

    ", + "refs": { + "Artworks$member": null + } + }, + "Artworks": { + "base": null, + "refs": { + "JobAlbumArt$Artwork": "

    The file to be used as album art. There can be multiple artworks associated with an audio file, to a maximum of 20. Valid formats are .jpg and .png

    " + } + }, + "Ascending": { + "base": null, + "refs": { + "ListJobsByPipelineRequest$Ascending": "

    To list jobs in chronological order by the date and time that they were submitted, enter true. To list jobs in reverse chronological order, enter false.

    ", + "ListJobsByStatusRequest$Ascending": "

    To list jobs in chronological order by the date and time that they were submitted, enter true. To list jobs in reverse chronological order, enter false.

    ", + "ListPipelinesRequest$Ascending": "

    To list pipelines in chronological order by the date and time that they were created, enter true. To list pipelines in reverse chronological order, enter false.

    ", + "ListPresetsRequest$Ascending": "

    To list presets in chronological order by the date and time that they were created, enter true. To list presets in reverse chronological order, enter false.

    " + } + }, + "AspectRatio": { + "base": null, + "refs": { + "JobInput$AspectRatio": "

    The aspect ratio of the input file. If you want Elastic Transcoder to automatically detect the aspect ratio of the input file, specify auto. If you want to specify the aspect ratio for the output file, enter one of the following values:

    1:1, 4:3, 3:2, 16:9

    If you specify a value other than auto, Elastic Transcoder disables automatic detection of the aspect ratio.

    ", + "Thumbnails$AspectRatio": "

    To better control resolution and aspect ratio of thumbnails, we recommend that you use the values MaxWidth, MaxHeight, SizingPolicy, and PaddingPolicy instead of Resolution and AspectRatio. The two groups of settings are mutually exclusive. Do not use them together.

    The aspect ratio of thumbnails. Valid values include:

    auto, 1:1, 4:3, 3:2, 16:9

    If you specify auto, Elastic Transcoder tries to preserve the aspect ratio of the video in the output file.

    ", + "VideoParameters$AspectRatio": "

    To better control resolution and aspect ratio of output videos, we recommend that you use the values MaxWidth, MaxHeight, SizingPolicy, PaddingPolicy, and DisplayAspectRatio instead of Resolution and AspectRatio. The two groups of settings are mutually exclusive. Do not use them together.

    The display aspect ratio of the video in the output file. Valid values include:

    auto, 1:1, 4:3, 3:2, 16:9

    If you specify auto, Elastic Transcoder tries to preserve the aspect ratio of the input file.

    If you specify an aspect ratio for the output file that differs from aspect ratio of the input file, Elastic Transcoder adds pillarboxing (black bars on the sides) or letterboxing (black bars on the top and bottom) to maintain the aspect ratio of the active region of the video.

    ", + "VideoParameters$DisplayAspectRatio": "

    The value that Elastic Transcoder adds to the metadata in the output file.

    " + } + }, + "AudioBitDepth": { + "base": null, + "refs": { + "AudioCodecOptions$BitDepth": "

    You can only choose an audio bit depth when you specify flac or pcm for the value of Audio:Codec.

    The bit depth of a sample is how many bits of information are included in the audio samples. The higher the bit depth, the better the audio, but the larger the file.

    Valid values are 16 and 24.

    The most common bit depth is 24.

    " + } + }, + "AudioBitOrder": { + "base": null, + "refs": { + "AudioCodecOptions$BitOrder": "

    You can only choose an audio bit order when you specify pcm for the value of Audio:Codec.

    The order the bits of a PCM sample are stored in.

    The supported value is LittleEndian.

    " + } + }, + "AudioBitRate": { + "base": null, + "refs": { + "AudioParameters$BitRate": "

    The bit rate of the audio stream in the output file, in kilobits/second. Enter an integer between 64 and 320, inclusive.

    " + } + }, + "AudioChannels": { + "base": null, + "refs": { + "AudioParameters$Channels": "

    The number of audio channels in the output file. The following values are valid:

    auto, 0, 1, 2

    One channel carries the information played by a single speaker. For example, a stereo track with two channels sends one channel to the left speaker, and the other channel to the right speaker. The output channels are organized into tracks. If you want Elastic Transcoder to automatically detect the number of audio channels in the input file and use that value for the output file, select auto.

    The output of a specific channel value and inputs are as follows:

    • auto channel specified, with any input: Pass through up to eight input channels.
    • 0 channels specified, with any input: Audio omitted from the output.
    • 1 channel specified, with at least one input channel: Mono sound.
    • 2 channels specified, with any input: Two identical mono channels or stereo. For more information about tracks, see Audio:AudioPackingMode.

    For more information about how Elastic Transcoder organizes channels and tracks, see Audio:AudioPackingMode.

    " + } + }, + "AudioCodec": { + "base": null, + "refs": { + "AudioParameters$Codec": "

    The audio codec for the output file. Valid values include aac, flac, mp2, mp3, pcm, and vorbis.

    " + } + }, + "AudioCodecOptions": { + "base": "

    Options associated with your audio codec.

    ", + "refs": { + "AudioParameters$CodecOptions": "

    If you specified AAC for Audio:Codec, this is the AAC compression profile to use. Valid values include:

    auto, AAC-LC, HE-AAC, HE-AACv2

    If you specify auto, Elastic Transcoder chooses a profile based on the bit rate of the output file.

    " + } + }, + "AudioCodecProfile": { + "base": null, + "refs": { + "AudioCodecOptions$Profile": "

    You can only choose an audio profile when you specify AAC for the value of Audio:Codec.

    Specify the AAC profile for the output file. Elastic Transcoder supports the following profiles:

    • auto: If you specify auto, Elastic Transcoder will select the profile based on the bit rate selected for the output file.
    • AAC-LC: The most common AAC profile. Use for bit rates larger than 64 kbps.
    • HE-AAC: Not supported on some older players and devices. Use for bit rates between 40 and 80 kbps.
    • HE-AACv2: Not supported on some players and devices. Use for bit rates less than 48 kbps.

    All outputs in a Smooth playlist must have the same value for Profile.

    If you created any presets before AAC profiles were added, Elastic Transcoder automatically updated your presets to use AAC-LC. You can change the value as required.

    " + } + }, + "AudioPackingMode": { + "base": null, + "refs": { + "AudioParameters$AudioPackingMode": "

    The method of organizing audio channels and tracks. Use Audio:Channels to specify the number of channels in your output, and Audio:AudioPackingMode to specify the number of tracks and their relation to the channels. If you do not specify an Audio:AudioPackingMode, Elastic Transcoder uses SingleTrack.

    The following values are valid:

    SingleTrack, OneChannelPerTrack, and OneChannelPerTrackWithMosTo8Tracks

    When you specify SingleTrack, Elastic Transcoder creates a single track for your output. The track can have up to eight channels. Use SingleTrack for all non-mxf containers.

    The outputs of SingleTrack for a specific channel value and inputs are as follows:

    • 0 channels with any input: Audio omitted from the output
    • 1, 2, or auto channels with no audio input: Audio omitted from the output
    • 1 channel with any input with audio: One track with one channel, downmixed if necessary
    • 2 channels with one track with one channel: One track with two identical channels
    • 2 or auto channels with two tracks with one channel each: One track with two channels
    • 2 or auto channels with one track with two channels: One track with two channels
    • 2 channels with one track with multiple channels: One track with two channels
    • auto channels with one track with one channel: One track with one channel
    • auto channels with one track with multiple channels: One track with multiple channels

    When you specify OneChannelPerTrack, Elastic Transcoder creates a new track for every channel in your output. Your output can have up to eight single-channel tracks.

    The outputs of OneChannelPerTrack for a specific channel value and inputs are as follows:

    • 0 channels with any input: Audio omitted from the output
    • 1, 2, or auto channels with no audio input: Audio omitted from the output
    • 1 channel with any input with audio: One track with one channel, downmixed if necessary
    • 2 channels with one track with one channel: Two tracks with one identical channel each
    • 2 or auto channels with two tracks with one channel each: Two tracks with one channel each
    • 2 or auto channels with one track with two channels: Two tracks with one channel each
    • 2 channels with one track with multiple channels: Two tracks with one channel each
    • auto channels with one track with one channel: One track with one channel
    • auto channels with one track with multiple channels: Up to eight tracks with one channel each

    When you specify OneChannelPerTrackWithMosTo8Tracks, Elastic Transcoder creates eight single-channel tracks for your output. All tracks that do not contain audio data from an input channel are MOS, or Mit Out Sound, tracks.

    The outputs of OneChannelPerTrackWithMosTo8Tracks for a specific channel value and inputs are as follows:

    • 0 channels with any input: Audio omitted from the output
    • 1, 2, or auto channels with no audio input: Audio omitted from the output
    • 1 channel with any input with audio: One track with one channel, downmixed if necessary, plus six MOS tracks
    • 2 channels with one track with one channel: Two tracks with one identical channel each, plus six MOS tracks
    • 2 or auto channels with two tracks with one channel each: Two tracks with one channel each, plus six MOS tracks
    • 2 or auto channels with one track with two channels: Two tracks with one channel each, plus six MOS tracks
    • 2 channels with one track with multiple channels: Two tracks with one channel each, plus six MOS tracks
    • auto channels with one track with one channel: One track with one channel, plus seven MOS tracks
    • auto channels with one track with multiple channels: Up to eight tracks with one channel each, plus MOS tracks until there are eight tracks in all
    " + } + }, + "AudioParameters": { + "base": "

    Parameters required for transcoding audio.

    ", + "refs": { + "CreatePresetRequest$Audio": "

    A section of the request body that specifies the audio parameters.

    ", + "Preset$Audio": "

    A section of the response body that provides information about the audio preset values.

    " + } + }, + "AudioSampleRate": { + "base": null, + "refs": { + "AudioParameters$SampleRate": "

    The sample rate of the audio stream in the output file, in Hertz. Valid values include:

    auto, 22050, 32000, 44100, 48000, 96000

    If you specify auto, Elastic Transcoder automatically detects the sample rate.

    " + } + }, + "AudioSigned": { + "base": null, + "refs": { + "AudioCodecOptions$Signed": "

    You can only choose whether an audio sample is signed when you specify pcm for the value of Audio:Codec.

    Whether audio samples are represented with negative and positive numbers (signed) or only positive numbers (unsigned).

    The supported value is Signed.

    " + } + }, + "Base64EncodedString": { + "base": null, + "refs": { + "Encryption$Key": "

    The data encryption key that you want Elastic Transcoder to use to encrypt your output file, or that was used to encrypt your input file. The key must be base64-encoded and it must be one of the following bit lengths before being base64-encoded:

    128, 192, or 256.

    The key must also be encrypted by using the Amazon Key Management Service.

    ", + "Encryption$KeyMd5": "

    The MD5 digest of the key that you used to encrypt your input file, or that you want Elastic Transcoder to use to encrypt your output file. Elastic Transcoder uses the key digest as a checksum to make sure your key was not corrupted in transit. The key MD5 must be base64-encoded, and it must be exactly 16 bytes long before being base64-encoded.

    ", + "HlsContentProtection$Key": "

    If you want Elastic Transcoder to generate a key for you, leave this field blank.

    If you choose to supply your own key, you must encrypt the key by using AWS KMS. The key must be base64-encoded, and it must be one of the following bit lengths before being base64-encoded:

    128, 192, or 256.

    ", + "HlsContentProtection$KeyMd5": "

    If Elastic Transcoder is generating your key for you, you must leave this field blank.

    The MD5 digest of the key that you want Elastic Transcoder to use to encrypt your output file, and that you want Elastic Transcoder to use as a checksum to make sure your key was not corrupted in transit. The key MD5 must be base64-encoded, and it must be exactly 16 bytes before being base64- encoded.

    " + } + }, + "BucketName": { + "base": null, + "refs": { + "CreatePipelineRequest$InputBucket": "

    The Amazon S3 bucket in which you saved the media files that you want to transcode.

    ", + "CreatePipelineRequest$OutputBucket": "

    The Amazon S3 bucket in which you want Elastic Transcoder to save the transcoded files. (Use this, or use ContentConfig:Bucket plus ThumbnailConfig:Bucket.)

    Specify this value when all of the following are true:

    • You want to save transcoded files, thumbnails (if any), and playlists (if any) together in one bucket.
    • You do not want to specify the users or groups who have access to the transcoded files, thumbnails, and playlists.
    • You do not want to specify the permissions that Elastic Transcoder grants to the files. When Elastic Transcoder saves files in OutputBucket, it grants full control over the files only to the AWS account that owns the role that is specified by Role.
    • You want to associate the transcoded files and thumbnails with the Amazon S3 Standard storage class.

    If you want to save transcoded files and playlists in one bucket and thumbnails in another bucket, specify which users can access the transcoded files or the permissions the users have, or change the Amazon S3 storage class, omit OutputBucket and specify values for ContentConfig and ThumbnailConfig instead.

    ", + "Pipeline$InputBucket": "

    The Amazon S3 bucket from which Elastic Transcoder gets media files for transcoding and the graphics files, if any, that you want to use for watermarks.

    ", + "Pipeline$OutputBucket": "

    The Amazon S3 bucket in which you want Elastic Transcoder to save transcoded files, thumbnails, and playlists. Either you specify this value, or you specify both ContentConfig and ThumbnailConfig.

    ", + "PipelineOutputConfig$Bucket": "

    The Amazon S3 bucket in which you want Elastic Transcoder to save the transcoded files. Specify this value when all of the following are true:

    • You want to save transcoded files, thumbnails (if any), and playlists (if any) together in one bucket.
    • You do not want to specify the users or groups who have access to the transcoded files, thumbnails, and playlists.
    • You do not want to specify the permissions that Elastic Transcoder grants to the files.
    • You want to associate the transcoded files and thumbnails with the Amazon S3 Standard storage class.
    If you want to save transcoded files and playlists in one bucket and thumbnails in another bucket, specify which users can access the transcoded files or the permissions the users have, or change the Amazon S3 storage class, omit OutputBucket and specify values for ContentConfig and ThumbnailConfig instead.

    ", + "TestRoleRequest$InputBucket": "

    The Amazon S3 bucket that contains media files to be transcoded. The action attempts to read from this bucket.

    ", + "TestRoleRequest$OutputBucket": "

    The Amazon S3 bucket that Elastic Transcoder will write transcoded media files to. The action attempts to read from this bucket.

    ", + "UpdatePipelineRequest$InputBucket": "

    The Amazon S3 bucket in which you saved the media files that you want to transcode and the graphics that you want to use as watermarks.

    " + } + }, + "CancelJobRequest": { + "base": "

    The CancelJobRequest structure.

    ", + "refs": { + } + }, + "CancelJobResponse": { + "base": "

    The response body contains a JSON object. If the job is successfully canceled, the value of Success is true.

    ", + "refs": { + } + }, + "CaptionFormat": { + "base": "

    The file format of the output captions. If you leave this value blank, Elastic Transcoder returns an error.

    ", + "refs": { + "CaptionFormats$member": null + } + }, + "CaptionFormatFormat": { + "base": null, + "refs": { + "CaptionFormat$Format": "

    The format you specify determines whether Elastic Transcoder generates an embedded or sidecar caption for this output.

    • Valid Embedded Caption Formats:

      • for FLAC: None

      • For MP3: None

      • For MP4: mov-text

      • For MPEG-TS: None

      • For ogg: None

      • For webm: None

    • Valid Sidecar Caption Formats: Elastic Transcoder supports dfxp (first div element only), scc, srt, and webvtt. If you want ttml or smpte-tt compatible captions, specify dfxp as your output format.

      • For FMP4: dfxp

      • Non-FMP4 outputs: All sidecar types

      fmp4 captions have an extension of .ismt

    " + } + }, + "CaptionFormatPattern": { + "base": null, + "refs": { + "CaptionFormat$Pattern": "

    The prefix for caption filenames, in the form description-{language}, where:

    • description is a description of the video.
    • {language} is a literal value that Elastic Transcoder replaces with the two- or three-letter code for the language of the caption in the output file names.

    If you don't include {language} in the file name pattern, Elastic Transcoder automatically appends \"{language}\" to the value that you specify for the description. In addition, Elastic Transcoder automatically appends the count to the end of the segment files.

    For example, suppose you're transcoding into srt format. When you enter \"Sydney-{language}-sunrise\", and the language of the captions is English (en), the name of the first caption file will be Sydney-en-sunrise00000.srt.

    " + } + }, + "CaptionFormats": { + "base": null, + "refs": { + "Captions$CaptionFormats": "

    The array of file formats for the output captions. If you leave this value blank, Elastic Transcoder returns an error.

    " + } + }, + "CaptionMergePolicy": { + "base": null, + "refs": { + "Captions$MergePolicy": "

    A policy that determines how Elastic Transcoder handles the existence of multiple captions.

    • MergeOverride: Elastic Transcoder transcodes both embedded and sidecar captions into outputs. If captions for a language are embedded in the input file and also appear in a sidecar file, Elastic Transcoder uses the sidecar captions and ignores the embedded captions for that language.

    • MergeRetain: Elastic Transcoder transcodes both embedded and sidecar captions into outputs. If captions for a language are embedded in the input file and also appear in a sidecar file, Elastic Transcoder uses the embedded captions and ignores the sidecar captions for that language. If CaptionSources is empty, Elastic Transcoder omits all sidecar captions from the output files.

    • Override: Elastic Transcoder transcodes only the sidecar captions that you specify in CaptionSources.

    MergePolicy cannot be null.

    " + } + }, + "CaptionSource": { + "base": "

    A source file for the input sidecar captions used during the transcoding process.

    ", + "refs": { + "CaptionSources$member": null + } + }, + "CaptionSources": { + "base": null, + "refs": { + "Captions$CaptionSources": "

    Source files for the input sidecar captions used during the transcoding process. To omit all sidecar captions, leave CaptionSources blank.

    " + } + }, + "Captions": { + "base": "

    The captions to be created, if any.

    ", + "refs": { + "CreateJobOutput$Captions": "

    You can configure Elastic Transcoder to transcode captions, or subtitles, from one format to another. All captions must be in UTF-8. Elastic Transcoder supports two types of captions:

    • Embedded: Embedded captions are included in the same file as the audio and video. Elastic Transcoder supports only one embedded caption per language, to a maximum of 300 embedded captions per file.

      Valid input values include: CEA-608 (EIA-608, first non-empty channel only), CEA-708 (EIA-708, first non-empty channel only), and mov-text

      Valid outputs include: mov-text

      Elastic Transcoder supports a maximum of one embedded format per output.

    • Sidecar: Sidecar captions are kept in a separate metadata file from the audio and video data. Sidecar captions require a player that is capable of understanding the relationship between the video file and the sidecar file. Elastic Transcoder supports only one sidecar caption per language, to a maximum of 20 sidecar captions per file.

      Valid input values include: dfxp (first div element only), ebu-tt, scc, smpt, srt, ttml (first div element only), and webvtt

      Valid outputs include: dfxp (first div element only), scc, srt, and webvtt.

    If you want ttml or smpte-tt compatible captions, specify dfxp as your output format.

    Elastic Transcoder does not support OCR (Optical Character Recognition), does not accept pictures as a valid input for captions, and is not available for audio-only transcoding. Elastic Transcoder does not preserve text formatting (for example, italics) during the transcoding process.

    To remove captions or leave the captions empty, set Captions to null. To pass through existing captions unchanged, set the MergePolicy to MergeRetain, and pass in a null CaptionSources array.

    For more information on embedded files, see the Subtitles Wikipedia page.

    For more information on sidecar files, see the Extensible Metadata Platform and Sidecar file Wikipedia pages.

    ", + "JobOutput$Captions": "

    You can configure Elastic Transcoder to transcode captions, or subtitles, from one format to another. All captions must be in UTF-8. Elastic Transcoder supports two types of captions:

    • Embedded: Embedded captions are included in the same file as the audio and video. Elastic Transcoder supports only one embedded caption per language, to a maximum of 300 embedded captions per file.

      Valid input values include: CEA-608 (EIA-608, first non-empty channel only), CEA-708 (EIA-708, first non-empty channel only), and mov-text

      Valid outputs include: mov-text

      Elastic Transcoder supports a maximum of one embedded format per output.

    • Sidecar: Sidecar captions are kept in a separate metadata file from the audio and video data. Sidecar captions require a player that is capable of understanding the relationship between the video file and the sidecar file. Elastic Transcoder supports only one sidecar caption per language, to a maximum of 20 sidecar captions per file.

      Valid input values include: dfxp (first div element only), ebu-tt, scc, smpt, srt, ttml (first div element only), and webvtt

      Valid outputs include: dfxp (first div element only), scc, srt, and webvtt.

    If you want ttml or smpte-tt compatible captions, specify dfxp as your output format.

    Elastic Transcoder does not support OCR (Optical Character Recognition), does not accept pictures as a valid input for captions, and is not available for audio-only transcoding. Elastic Transcoder does not preserve text formatting (for example, italics) during the transcoding process.

    To remove captions or leave the captions empty, set Captions to null. To pass through existing captions unchanged, set the MergePolicy to MergeRetain, and pass in a null CaptionSources array.

    For more information on embedded files, see the Subtitles Wikipedia page.

    For more information on sidecar files, see the Extensible Metadata Platform and Sidecar file Wikipedia pages.

    " + } + }, + "Clip": { + "base": "

    Settings for one clip in a composition. All jobs in a playlist must have the same clip settings.

    ", + "refs": { + "Composition$member": null + } + }, + "CodecOption": { + "base": null, + "refs": { + "CodecOptions$key": null, + "CodecOptions$value": null + } + }, + "CodecOptions": { + "base": null, + "refs": { + "VideoParameters$CodecOptions": "

    Profile (H.264/VP8 Only)

    The H.264 profile that you want to use for the output file. Elastic Transcoder supports the following profiles:

    • baseline: The profile most commonly used for videoconferencing and for mobile applications.
    • main: The profile used for standard-definition digital TV broadcasts.
    • high: The profile used for high-definition digital TV broadcasts and for Blu-ray discs.

    Level (H.264 Only)

    The H.264 level that you want to use for the output file. Elastic Transcoder supports the following levels:

    1, 1b, 1.1, 1.2, 1.3, 2, 2.1, 2.2, 3, 3.1, 3.2, 4, 4.1

    MaxReferenceFrames (H.264 Only)

    Applicable only when the value of Video:Codec is H.264. The maximum number of previously decoded frames to use as a reference for decoding future frames. Valid values are integers 0 through 16, but we recommend that you not use a value greater than the following:

    Min(Floor(Maximum decoded picture buffer in macroblocks * 256 / (Width in pixels * Height in pixels)), 16)

    where Width in pixels and Height in pixels represent either MaxWidth and MaxHeight, or Resolution. Maximum decoded picture buffer in macroblocks depends on the value of the Level object. See the list below. (A macroblock is a block of pixels measuring 16x16.)

    • 1 - 396
    • 1b - 396
    • 1.1 - 900
    • 1.2 - 2376
    • 1.3 - 2376
    • 2 - 2376
    • 2.1 - 4752
    • 2.2 - 8100
    • 3 - 8100
    • 3.1 - 18000
    • 3.2 - 20480
    • 4 - 32768
    • 4.1 - 32768

    MaxBitRate (Optional, H.264/MPEG2/VP8 only)

    The maximum number of bits per second in a video buffer; the size of the buffer is specified by BufferSize. Specify a value between 16 and 62,500. You can reduce the bandwidth required to stream a video by reducing the maximum bit rate, but this also reduces the quality of the video.

    BufferSize (Optional, H.264/MPEG2/VP8 only)

    The maximum number of bits in any x seconds of the output video. This window is commonly 10 seconds, the standard segment duration when you're using FMP4 or MPEG-TS for the container type of the output video. Specify an integer greater than 0. If you specify MaxBitRate and omit BufferSize, Elastic Transcoder sets BufferSize to 10 times the value of MaxBitRate.

    InterlacedMode (Optional, H.264/MPEG2 Only)

    The interlace mode for the output video.

    Interlaced video is used to double the perceived frame rate for a video by interlacing two fields (one field on every other line, the other field on the other lines) so that the human eye registers multiple pictures per frame. Interlacing reduces the bandwidth required for transmitting a video, but can result in blurred images and flickering.

    Valid values include Progressive (no interlacing, top to bottom), TopFirst (top field first), BottomFirst (bottom field first), and Auto.

    If InterlaceMode is not specified, Elastic Transcoder uses Progressive for the output. If Auto is specified, Elastic Transcoder interlaces the output.

    ColorSpaceConversionMode (Optional, H.264/MPEG2 Only)

    The color space conversion Elastic Transcoder applies to the output video. Color spaces are the algorithms used by the computer to store information about how to render color. Bt.601 is the standard for standard definition video, while Bt.709 is the standard for high definition video.

    Valid values include None, Bt709toBt601, Bt601toBt709, and Auto.

    If you chose Auto for ColorSpaceConversionMode and your output is interlaced, your frame rate is one of 23.97, 24, 25, 29.97, 50, or 60, your SegmentDuration is null, and you are using one of the resolution changes from the list below, Elastic Transcoder applies the following color space conversions:

    • Standard to HD, 720x480 to 1920x1080 - Elastic Transcoder applies Bt601ToBt709
    • Standard to HD, 720x576 to 1920x1080 - Elastic Transcoder applies Bt601ToBt709
    • HD to Standard, 1920x1080 to 720x480 - Elastic Transcoder applies Bt709ToBt601
    • HD to Standard, 1920x1080 to 720x576 - Elastic Transcoder applies Bt709ToBt601
    Elastic Transcoder may change the behavior of the ColorspaceConversionMode Auto mode in the future. All outputs in a playlist must use the same ColorSpaceConversionMode.

    If you do not specify a ColorSpaceConversionMode, Elastic Transcoder does not change the color space of a file. If you are unsure what ColorSpaceConversionMode was applied to your output file, you can check the AppliedColorSpaceConversion parameter included in your job response. If your job does not have an AppliedColorSpaceConversion in its response, no ColorSpaceConversionMode was applied.

    ChromaSubsampling

    The sampling pattern for the chroma (color) channels of the output video. Valid values include yuv420p and yuv422p.

    yuv420p samples the chroma information of every other horizontal and every other vertical line, yuv422p samples the color information of every horizontal line and every other vertical line.

    LoopCount (Gif Only)

    The number of times you want the output gif to loop. Valid values include Infinite and integers between 0 and 100, inclusive.

    " + } + }, + "Composition": { + "base": null, + "refs": { + "CreateJobOutput$Composition": "

    You can create an output file that contains an excerpt from the input file. This excerpt, called a clip, can come from the beginning, middle, or end of the file. The Composition object contains settings for the clips that make up an output file. For the current release, you can only specify settings for a single clip per output file. The Composition object cannot be null.

    ", + "JobOutput$Composition": "

    You can create an output file that contains an excerpt from the input file. This excerpt, called a clip, can come from the beginning, middle, or end of the file. The Composition object contains settings for the clips that make up an output file. For the current release, you can only specify settings for a single clip per output file. The Composition object cannot be null.

    " + } + }, + "CreateJobOutput": { + "base": "

    The CreateJobOutput structure.

    ", + "refs": { + "CreateJobOutputs$member": null, + "CreateJobRequest$Output": null + } + }, + "CreateJobOutputs": { + "base": null, + "refs": { + "CreateJobRequest$Outputs": "

    A section of the request body that provides information about the transcoded (target) files. We recommend that you use the Outputs syntax instead of the Output syntax.

    " + } + }, + "CreateJobPlaylist": { + "base": "

    Information about the master playlist.

    ", + "refs": { + "CreateJobPlaylists$member": null + } + }, + "CreateJobPlaylists": { + "base": null, + "refs": { + "CreateJobRequest$Playlists": "

    If you specify a preset in PresetId for which the value of Container is fmp4 (Fragmented MP4) or ts (MPEG-TS), Playlists contains information about the master playlists that you want Elastic Transcoder to create.

    The maximum number of master playlists in a job is 30.

    " + } + }, + "CreateJobRequest": { + "base": "

    The CreateJobRequest structure.

    ", + "refs": { + } + }, + "CreateJobResponse": { + "base": "

    The CreateJobResponse structure.

    ", + "refs": { + } + }, + "CreatePipelineRequest": { + "base": "

    The CreatePipelineRequest structure.

    ", + "refs": { + } + }, + "CreatePipelineResponse": { + "base": "

    When you create a pipeline, Elastic Transcoder returns the values that you specified in the request.

    ", + "refs": { + } + }, + "CreatePresetRequest": { + "base": "

    The CreatePresetRequest structure.

    ", + "refs": { + } + }, + "CreatePresetResponse": { + "base": "

    The CreatePresetResponse structure.

    ", + "refs": { + } + }, + "DeletePipelineRequest": { + "base": "

    The DeletePipelineRequest structure.

    ", + "refs": { + } + }, + "DeletePipelineResponse": { + "base": "

    The DeletePipelineResponse structure.

    ", + "refs": { + } + }, + "DeletePresetRequest": { + "base": "

    The DeletePresetRequest structure.

    ", + "refs": { + } + }, + "DeletePresetResponse": { + "base": "

    The DeletePresetResponse structure.

    ", + "refs": { + } + }, + "Description": { + "base": null, + "refs": { + "CreatePresetRequest$Description": "

    A description of the preset.

    ", + "JobOutput$StatusDetail": "

    Information that further explains Status.

    ", + "Playlist$StatusDetail": "

    Information that further explains the status.

    ", + "Preset$Description": "

    A description of the preset.

    " + } + }, + "DetectedProperties": { + "base": "

    The detected properties of the input file. Elastic Transcoder identifies these values from the input file.

    ", + "refs": { + "JobInput$DetectedProperties": "

    The detected properties of the input file.

    " + } + }, + "Digits": { + "base": null, + "refs": { + "Thumbnails$Interval": "

    The approximate number of seconds between thumbnails. Specify an integer value.

    " + } + }, + "DigitsOrAuto": { + "base": null, + "refs": { + "Artwork$MaxWidth": "

    The maximum width of the output album art in pixels. If you specify auto, Elastic Transcoder uses 600 as the default value. If you specify a numeric value, enter an even integer between 32 and 4096, inclusive.

    ", + "Artwork$MaxHeight": "

    The maximum height of the output album art in pixels. If you specify auto, Elastic Transcoder uses 600 as the default value. If you specify a numeric value, enter an even integer between 32 and 3072, inclusive.

    ", + "Thumbnails$MaxWidth": "

    The maximum width of thumbnails in pixels. If you specify auto, Elastic Transcoder uses 1920 (Full HD) as the default value. If you specify a numeric value, enter an even integer between 32 and 4096.

    ", + "Thumbnails$MaxHeight": "

    The maximum height of thumbnails in pixels. If you specify auto, Elastic Transcoder uses 1080 (Full HD) as the default value. If you specify a numeric value, enter an even integer between 32 and 3072.

    ", + "VideoParameters$MaxWidth": "

    The maximum width of the output video in pixels. If you specify auto, Elastic Transcoder uses 1920 (Full HD) as the default value. If you specify a numeric value, enter an even integer between 128 and 4096.

    ", + "VideoParameters$MaxHeight": "

    The maximum height of the output video in pixels. If you specify auto, Elastic Transcoder uses 1080 (Full HD) as the default value. If you specify a numeric value, enter an even integer between 96 and 3072.

    " + } + }, + "Encryption": { + "base": "

    The encryption settings, if any, that are used for decrypting your input files or encrypting your output files. If your input file is encrypted, you must specify the mode that Elastic Transcoder will use to decrypt your file, otherwise you must specify the mode you want Elastic Transcoder to use to encrypt your output files.

    ", + "refs": { + "Artwork$Encryption": "

    The encryption settings, if any, that you want Elastic Transcoder to apply to your artwork.

    ", + "CaptionFormat$Encryption": "

    The encryption settings, if any, that you want Elastic Transcoder to apply to your caption formats.

    ", + "CaptionSource$Encryption": "

    The encryption settings, if any, that you want Elastic Transcoder to apply to your caption sources.

    ", + "CreateJobOutput$ThumbnailEncryption": "

    The encryption settings, if any, that you want Elastic Transcoder to apply to your thumbnail.

    ", + "CreateJobOutput$Encryption": "

    You can specify encryption settings for any output files that you want to use for a transcoding job. This includes the output file and any watermarks, thumbnails, album art, or captions that you want to use. You must specify encryption settings for each file individually.

    ", + "JobInput$Encryption": "

    The encryption settings, if any, that are used for decrypting your input files. If your input file is encrypted, you must specify the mode that Elastic Transcoder will use to decrypt your file.

    ", + "JobOutput$ThumbnailEncryption": "

    The encryption settings, if any, that you want Elastic Transcoder to apply to your thumbnail.

    ", + "JobOutput$Encryption": "

    The encryption settings, if any, that you want Elastic Transcoder to apply to your output files. If you choose to use encryption, you must specify a mode to use. If you choose not to use encryption, Elastic Transcoder will write an unencrypted file to your Amazon S3 bucket.

    ", + "JobWatermark$Encryption": "

    The encryption settings, if any, that you want Elastic Transcoder to apply to your watermarks.

    " + } + }, + "EncryptionMode": { + "base": null, + "refs": { + "Encryption$Mode": "

    The specific server-side encryption mode that you want Elastic Transcoder to use when decrypting your input files or encrypting your output files. Elastic Transcoder supports the following options:

    • S3: Amazon S3 creates and manages the keys used for encrypting your files.

    • S3-AWS-KMS: Amazon S3 calls the Amazon Key Management Service, which creates and manages the keys that are used for encrypting your files. If you specify S3-AWS-KMS and you don't want to use the default key, you must add the AWS-KMS key that you want to use to your pipeline.

    • AES-CBC-PKCS7: A padded cipher-block mode of operation originally used for HLS files.

    • AES-CTR: AES Counter Mode.

    • AES-GCM: AES Galois Counter Mode, a mode of operation that is an authenticated encryption format, meaning that a file, key, or initialization vector that has been tampered with will fail the decryption process.

    For all three AES options, you must provide the following settings, which must be base64-encoded:

    • Key

    • Key MD5

    • Initialization Vector

    For the AES modes, your private encryption keys and your unencrypted data are never stored by AWS; therefore, it is important that you safely manage your encryption keys. If you lose them, you won't be able to unencrypt your data.

    " + } + }, + "ExceptionMessages": { + "base": null, + "refs": { + "TestRoleResponse$Messages": "

    If the Success element contains false, this value is an array of one or more error messages that were generated during the test process.

    " + } + }, + "Filename": { + "base": null, + "refs": { + "CreateJobPlaylist$Name": "

    The name that you want Elastic Transcoder to assign to the master playlist, for example, nyc-vacation.m3u8. If the name includes a / character, the section of the name before the last / must be identical for all Name objects. If you create more than one master playlist, the values of all Name objects must be unique.

    Note: Elastic Transcoder automatically appends the relevant file extension to the file name (.m3u8 for HLSv3 and HLSv4 playlists, and .ism and .ismc for Smooth playlists). If you include a file extension in Name, the file name will have two extensions.

    ", + "Playlist$Name": "

    The name that you want Elastic Transcoder to assign to the master playlist, for example, nyc-vacation.m3u8. If the name includes a / character, the section of the name before the last / must be identical for all Name objects. If you create more than one master playlist, the values of all Name objects must be unique.

    Note: Elastic Transcoder automatically appends the relevant file extension to the file name (.m3u8 for HLSv3 and HLSv4 playlists, and .ism and .ismc for Smooth playlists). If you include a file extension in Name, the file name will have two extensions.

    " + } + }, + "FixedGOP": { + "base": null, + "refs": { + "VideoParameters$FixedGOP": "

    Applicable only when the value of Video:Codec is one of H.264, MPEG2, or VP8.

    Whether to use a fixed value for FixedGOP. Valid values are true and false:

    • true: Elastic Transcoder uses the value of KeyframesMaxDist for the distance between key frames (the number of frames in a group of pictures, or GOP).
    • false: The distance between key frames can vary.

    FixedGOP must be set to true for fmp4 containers.

    " + } + }, + "FloatString": { + "base": null, + "refs": { + "CreateJobOutput$SegmentDuration": "

    (Outputs in Fragmented MP4 or MPEG-TS format only.If you specify a preset in PresetId for which the value of Container is fmp4 (Fragmented MP4) or ts (MPEG-TS), SegmentDuration is the target maximum duration of each segment in seconds. For HLSv3 format playlists, each media segment is stored in a separate .ts file. For HLSv4 and Smooth playlists, all media segments for an output are stored in a single file. Each segment is approximately the length of the SegmentDuration, though individual segments might be shorter or longer.

    The range of valid values is 1 to 60 seconds. If the duration of the video is not evenly divisible by SegmentDuration, the duration of the last segment is the remainder of total length/SegmentDuration.

    Elastic Transcoder creates an output-specific playlist for each output HLS output that you specify in OutputKeys. To add an output to the master playlist for this job, include it in the OutputKeys of the associated playlist.

    ", + "DetectedProperties$FrameRate": "

    The detected frame rate of the input file, in frames per second.

    ", + "JobOutput$SegmentDuration": "

    (Outputs in Fragmented MP4 or MPEG-TS format only.If you specify a preset in PresetId for which the value of Container is fmp4 (Fragmented MP4) or ts (MPEG-TS), SegmentDuration is the target maximum duration of each segment in seconds. For HLSv3 format playlists, each media segment is stored in a separate .ts file. For HLSv4 and Smooth playlists, all media segments for an output are stored in a single file. Each segment is approximately the length of the SegmentDuration, though individual segments might be shorter or longer.

    The range of valid values is 1 to 60 seconds. If the duration of the video is not evenly divisible by SegmentDuration, the duration of the last segment is the remainder of total length/SegmentDuration.

    Elastic Transcoder creates an output-specific playlist for each output HLS output that you specify in OutputKeys. To add an output to the master playlist for this job, include it in the OutputKeys of the associated playlist.

    ", + "JobOutput$FrameRate": "

    Frame rate of the output file, in frames per second.

    " + } + }, + "FrameRate": { + "base": null, + "refs": { + "JobInput$FrameRate": "

    The frame rate of the input file. If you want Elastic Transcoder to automatically detect the frame rate of the input file, specify auto. If you want to specify the frame rate for the input file, enter one of the following values:

    10, 15, 23.97, 24, 25, 29.97, 30, 60

    If you specify a value other than auto, Elastic Transcoder disables automatic detection of the frame rate.

    ", + "VideoParameters$FrameRate": "

    The frames per second for the video stream in the output file. Valid values include:

    auto, 10, 15, 23.97, 24, 25, 29.97, 30, 60

    If you specify auto, Elastic Transcoder uses the detected frame rate of the input source. If you specify a frame rate, we recommend that you perform the following calculation:

    Frame rate = maximum recommended decoding speed in luma samples/second / (width in pixels * height in pixels)

    where:

    • width in pixels and height in pixels represent the Resolution of the output video.
    • maximum recommended decoding speed in Luma samples/second is less than or equal to the maximum value listed in the following table, based on the value that you specified for Level.

    The maximum recommended decoding speed in Luma samples/second for each level is described in the following list (Level - Decoding speed):

    • 1 - 380160
    • 1b - 380160
    • 1.1 - 76800
    • 1.2 - 1536000
    • 1.3 - 3041280
    • 2 - 3041280
    • 2.1 - 5068800
    • 2.2 - 5184000
    • 3 - 10368000
    • 3.1 - 27648000
    • 3.2 - 55296000
    • 4 - 62914560
    • 4.1 - 62914560
    " + } + }, + "Grantee": { + "base": null, + "refs": { + "Permission$Grantee": "

    The AWS user or group that you want to have access to transcoded files and playlists. To identify the user or group, you can specify the canonical user ID for an AWS account, an origin access identity for a CloudFront distribution, the registered email address of an AWS account, or a predefined Amazon S3 group.

    " + } + }, + "GranteeType": { + "base": null, + "refs": { + "Permission$GranteeType": "

    The type of value that appears in the Grantee object:

    • Canonical: Either the canonical user ID for an AWS account or an origin access identity for an Amazon CloudFront distribution. A canonical user ID is not the same as an AWS account number.
    • Email: The registered email address of an AWS account.
    • Group: One of the following predefined Amazon S3 groups: AllUsers, AuthenticatedUsers, or LogDelivery.

    " + } + }, + "HlsContentProtection": { + "base": "

    The HLS content protection settings, if any, that you want Elastic Transcoder to apply to your output files.

    ", + "refs": { + "CreateJobPlaylist$HlsContentProtection": "

    The HLS content protection settings, if any, that you want Elastic Transcoder to apply to the output files associated with this playlist.

    ", + "Playlist$HlsContentProtection": "

    The HLS content protection settings, if any, that you want Elastic Transcoder to apply to the output files associated with this playlist.

    " + } + }, + "HlsContentProtectionMethod": { + "base": null, + "refs": { + "HlsContentProtection$Method": "

    The content protection method for your output. The only valid value is: aes-128.

    This value will be written into the method attribute of the EXT-X-KEY metadata tag in the output playlist.

    " + } + }, + "HorizontalAlign": { + "base": null, + "refs": { + "PresetWatermark$HorizontalAlign": "

    The horizontal position of the watermark unless you specify a non-zero value for HorizontalOffset:

    • Left: The left edge of the watermark is aligned with the left border of the video.
    • Right: The right edge of the watermark is aligned with the right border of the video.
    • Center: The watermark is centered between the left and right borders.

    " + } + }, + "Id": { + "base": null, + "refs": { + "CancelJobRequest$Id": "

    The identifier of the job that you want to cancel.

    To get a list of the jobs (including their jobId) that have a status of Submitted, use the ListJobsByStatus API action.

    ", + "CreateJobOutput$PresetId": "

    The Id of the preset to use for this job. The preset determines the audio, video, and thumbnail settings that Elastic Transcoder uses for transcoding.

    ", + "CreateJobRequest$PipelineId": "

    The Id of the pipeline that you want Elastic Transcoder to use for transcoding. The pipeline determines several settings, including the Amazon S3 bucket from which Elastic Transcoder gets the files to transcode and the bucket into which Elastic Transcoder puts the transcoded files.

    ", + "DeletePipelineRequest$Id": "

    The identifier of the pipeline that you want to delete.

    ", + "DeletePresetRequest$Id": "

    The identifier of the preset for which you want to get detailed information.

    ", + "Job$Id": "

    The identifier that Elastic Transcoder assigned to the job. You use this value to get settings for the job or to delete the job.

    ", + "Job$PipelineId": "

    The Id of the pipeline that you want Elastic Transcoder to use for transcoding. The pipeline determines several settings, including the Amazon S3 bucket from which Elastic Transcoder gets the files to transcode and the bucket into which Elastic Transcoder puts the transcoded files.

    ", + "JobOutput$PresetId": "

    The value of the Id object for the preset that you want to use for this job. The preset determines the audio, video, and thumbnail settings that Elastic Transcoder uses for transcoding. To use a preset that you created, specify the preset ID that Elastic Transcoder returned in the response when you created the preset. You can also use the Elastic Transcoder system presets, which you can get with ListPresets.

    ", + "ListJobsByPipelineRequest$PipelineId": "

    The ID of the pipeline for which you want to get job information.

    ", + "ListJobsByPipelineRequest$PageToken": "

    When Elastic Transcoder returns more than one page of results, use pageToken in subsequent GET requests to get each successive page of results.

    ", + "ListJobsByPipelineResponse$NextPageToken": "

    A value that you use to access the second and subsequent pages of results, if any. When the jobs in the specified pipeline fit on one page or when you've reached the last page of results, the value of NextPageToken is null.

    ", + "ListJobsByStatusRequest$PageToken": "

    When Elastic Transcoder returns more than one page of results, use pageToken in subsequent GET requests to get each successive page of results.

    ", + "ListJobsByStatusResponse$NextPageToken": "

    A value that you use to access the second and subsequent pages of results, if any. When the jobs in the specified pipeline fit on one page or when you've reached the last page of results, the value of NextPageToken is null.

    ", + "ListPipelinesRequest$PageToken": "

    When Elastic Transcoder returns more than one page of results, use pageToken in subsequent GET requests to get each successive page of results.

    ", + "ListPipelinesResponse$NextPageToken": "

    A value that you use to access the second and subsequent pages of results, if any. When the pipelines fit on one page or when you've reached the last page of results, the value of NextPageToken is null.

    ", + "ListPresetsRequest$PageToken": "

    When Elastic Transcoder returns more than one page of results, use pageToken in subsequent GET requests to get each successive page of results.

    ", + "ListPresetsResponse$NextPageToken": "

    A value that you use to access the second and subsequent pages of results, if any. When the presets fit on one page or when you've reached the last page of results, the value of NextPageToken is null.

    ", + "Pipeline$Id": "

    The identifier for the pipeline. You use this value to identify the pipeline in which you want to perform a variety of operations, such as creating a job or a preset.

    ", + "Preset$Id": "

    Identifier for the new preset. You use this value to get settings for the preset or to delete it.

    ", + "ReadJobRequest$Id": "

    The identifier of the job for which you want to get detailed information.

    ", + "ReadPipelineRequest$Id": "

    The identifier of the pipeline to read.

    ", + "ReadPresetRequest$Id": "

    The identifier of the preset for which you want to get detailed information.

    ", + "UpdatePipelineNotificationsRequest$Id": "

    The identifier of the pipeline for which you want to change notification settings.

    ", + "UpdatePipelineRequest$Id": "

    The ID of the pipeline that you want to update.

    ", + "UpdatePipelineStatusRequest$Id": "

    The identifier of the pipeline to update.

    " + } + }, + "IncompatibleVersionException": { + "base": null, + "refs": { + } + }, + "Interlaced": { + "base": null, + "refs": { + "JobInput$Interlaced": "

    Whether the input file is interlaced. If you want Elastic Transcoder to automatically detect whether the input file is interlaced, specify auto. If you want to specify whether the input file is interlaced, enter one of the following values:

    true, false

    If you specify a value other than auto, Elastic Transcoder disables automatic detection of interlacing.

    " + } + }, + "InternalServiceException": { + "base": "

    Elastic Transcoder encountered an unexpected exception while trying to fulfill the request.

    ", + "refs": { + } + }, + "Job": { + "base": "

    A section of the response body that provides information about the job that is created.

    ", + "refs": { + "CreateJobResponse$Job": "

    A section of the response body that provides information about the job that is created.

    ", + "Jobs$member": null, + "ReadJobResponse$Job": "

    A section of the response body that provides information about the job.

    " + } + }, + "JobAlbumArt": { + "base": "

    The .jpg or .png file associated with an audio file.

    ", + "refs": { + "CreateJobOutput$AlbumArt": "

    Information about the album art that you want Elastic Transcoder to add to the file during transcoding. You can specify up to twenty album artworks for each output. Settings for each artwork must be defined in the job for the current output.

    ", + "JobOutput$AlbumArt": "

    The album art to be associated with the output file, if any.

    " + } + }, + "JobContainer": { + "base": null, + "refs": { + "JobInput$Container": "

    The container type for the input file. If you want Elastic Transcoder to automatically detect the container type of the input file, specify auto. If you want to specify the container type for the input file, enter one of the following values:

    3gp, aac, asf, avi, divx, flv, m4a, mkv, mov, mp3, mp4, mpeg, mpeg-ps, mpeg-ts, mxf, ogg, vob, wav, webm

    " + } + }, + "JobInput": { + "base": "

    Information about the file that you're transcoding.

    ", + "refs": { + "CreateJobRequest$Input": "

    A section of the request body that provides information about the file that is being transcoded.

    ", + "Job$Input": "

    A section of the request or response body that provides information about the file that is being transcoded.

    " + } + }, + "JobOutput": { + "base": "

    Outputs recommended instead.If you specified one output for a job, information about that output. If you specified multiple outputs for a job, the Output object lists information about the first output. This duplicates the information that is listed for the first output in the Outputs object.

    ", + "refs": { + "Job$Output": "

    If you specified one output for a job, information about that output. If you specified multiple outputs for a job, the Output object lists information about the first output. This duplicates the information that is listed for the first output in the Outputs object.

    Outputs recommended instead. A section of the request or response body that provides information about the transcoded (target) file.

    ", + "JobOutputs$member": null + } + }, + "JobOutputs": { + "base": null, + "refs": { + "Job$Outputs": "

    Information about the output files. We recommend that you use the Outputs syntax for all jobs, even when you want Elastic Transcoder to transcode a file into only one format. Do not use both the Outputs and Output syntaxes in the same request. You can create a maximum of 30 outputs per job.

    If you specify more than one output for a job, Elastic Transcoder creates the files for each output in the order in which you specify them in the job.

    " + } + }, + "JobStatus": { + "base": null, + "refs": { + "Job$Status": "

    The status of the job: Submitted, Progressing, Complete, Canceled, or Error.

    ", + "JobOutput$Status": "

    The status of one output in a job. If you specified only one output for the job, Outputs:Status is always the same as Job:Status. If you specified more than one output:

    • Job:Status and Outputs:Status for all of the outputs is Submitted until Elastic Transcoder starts to process the first output.
    • When Elastic Transcoder starts to process the first output, Outputs:Status for that output and Job:Status both change to Progressing. For each output, the value of Outputs:Status remains Submitted until Elastic Transcoder starts to process the output.
    • Job:Status remains Progressing until all of the outputs reach a terminal status, either Complete or Error.
    • When all of the outputs reach a terminal status, Job:Status changes to Complete only if Outputs:Status for all of the outputs is Complete. If Outputs:Status for one or more outputs is Error, the terminal status for Job:Status is also Error.
    The value of Status is one of the following: Submitted, Progressing, Complete, Canceled, or Error.

    ", + "ListJobsByStatusRequest$Status": "

    To get information about all of the jobs associated with the current AWS account that have a given status, specify the following status: Submitted, Progressing, Complete, Canceled, or Error.

    ", + "Playlist$Status": "

    The status of the job with which the playlist is associated.

    " + } + }, + "JobWatermark": { + "base": "

    Watermarks can be in .png or .jpg format. If you want to display a watermark that is not rectangular, use the .png format, which supports transparency.

    ", + "refs": { + "JobWatermarks$member": null + } + }, + "JobWatermarks": { + "base": null, + "refs": { + "CreateJobOutput$Watermarks": "

    Information about the watermarks that you want Elastic Transcoder to add to the video during transcoding. You can specify up to four watermarks for each output. Settings for each watermark must be defined in the preset for the current output.

    ", + "JobOutput$Watermarks": "

    Information about the watermarks that you want Elastic Transcoder to add to the video during transcoding. You can specify up to four watermarks for each output. Settings for each watermark must be defined in the preset that you specify in Preset for the current output.

    Watermarks are added to the output video in the sequence in which you list them in the job output—the first watermark in the list is added to the output video first, the second watermark in the list is added next, and so on. As a result, if the settings in a preset cause Elastic Transcoder to place all watermarks in the same location, the second watermark that you add will cover the first one, the third one will cover the second, and the fourth one will cover the third.

    " + } + }, + "Jobs": { + "base": null, + "refs": { + "ListJobsByPipelineResponse$Jobs": "

    An array of Job objects that are in the specified pipeline.

    ", + "ListJobsByStatusResponse$Jobs": "

    An array of Job objects that have the specified status.

    " + } + }, + "JpgOrPng": { + "base": null, + "refs": { + "Artwork$AlbumArtFormat": "

    The format of album art, if any. Valid formats are .jpg and .png.

    ", + "Thumbnails$Format": "

    The format of thumbnails, if any. Valid values are jpg and png.

    You specify whether you want Elastic Transcoder to create thumbnails when you create a job.

    " + } + }, + "Key": { + "base": null, + "refs": { + "CaptionSource$Key": "

    The name of the sidecar caption file that you want Elastic Transcoder to include in the output file.

    ", + "CaptionSource$Language": "

    A string that specifies the language of the caption. Specify this as one of:

    • 2-character ISO 639-1 code

    • 3-character ISO 639-2 code

    For more information on ISO language codes and language names, see the List of ISO 639-1 codes.

    ", + "CreateJobOutput$Key": "

    The name to assign to the transcoded file. Elastic Transcoder saves the file in the Amazon S3 bucket specified by the OutputBucket object in the pipeline that is specified by the pipeline ID. If a file with the specified name already exists in the output bucket, the job fails.

    ", + "CreateJobRequest$OutputKeyPrefix": "

    The value, if any, that you want Elastic Transcoder to prepend to the names of all files that this job creates, including output files, thumbnails, and playlists.

    ", + "Job$OutputKeyPrefix": "

    The value, if any, that you want Elastic Transcoder to prepend to the names of all files that this job creates, including output files, thumbnails, and playlists. We recommend that you add a / or some other delimiter to the end of the OutputKeyPrefix.

    ", + "JobInput$Key": "

    The name of the file to transcode. Elsewhere in the body of the JSON block is the the ID of the pipeline to use for processing the job. The InputBucket object in that pipeline tells Elastic Transcoder which Amazon S3 bucket to get the file from.

    If the file name includes a prefix, such as cooking/lasagna.mpg, include the prefix in the key. If the file isn't in the specified bucket, Elastic Transcoder returns an error.

    ", + "JobOutput$Key": "

    The name to assign to the transcoded file. Elastic Transcoder saves the file in the Amazon S3 bucket specified by the OutputBucket object in the pipeline that is specified by the pipeline ID.

    ", + "OutputKeys$member": null + } + }, + "KeyArn": { + "base": null, + "refs": { + "CreatePipelineRequest$AwsKmsKeyArn": "

    The AWS Key Management Service (AWS KMS) key that you want to use with this pipeline.

    If you use either S3 or S3-AWS-KMS as your Encryption:Mode, you don't need to provide a key with your job because a default key, known as an AWS-KMS key, is created for you automatically. You need to provide an AWS-KMS key only if you want to use a non-default AWS-KMS key, or if you are using an Encryption:Mode of AES-PKCS7, AES-CTR, or AES-GCM.

    ", + "Pipeline$AwsKmsKeyArn": "

    The AWS Key Management Service (AWS KMS) key that you want to use with this pipeline.

    If you use either S3 or S3-AWS-KMS as your Encryption:Mode, you don't need to provide a key with your job because a default key, known as an AWS-KMS key, is created for you automatically. You need to provide an AWS-KMS key only if you want to use a non-default AWS-KMS key, or if you are using an Encryption:Mode of AES-PKCS7, AES-CTR, or AES-GCM.

    ", + "UpdatePipelineRequest$AwsKmsKeyArn": "

    The AWS Key Management Service (AWS KMS) key that you want to use with this pipeline.

    If you use either S3 or S3-AWS-KMS as your Encryption:Mode, you don't need to provide a key with your job because a default key, known as an AWS-KMS key, is created for you automatically. You need to provide an AWS-KMS key only if you want to use a non-default AWS-KMS key, or if you are using an Encryption:Mode of AES-PKCS7, AES-CTR, or AES-GCM.

    " + } + }, + "KeyIdGuid": { + "base": null, + "refs": { + "PlayReadyDrm$KeyId": "

    The ID for your DRM key, so that your DRM license provider knows which key to provide.

    The key ID must be provided in big endian, and Elastic Transcoder will convert it to little endian before inserting it into the PlayReady DRM headers. If you are unsure whether your license server provides your key ID in big or little endian, check with your DRM provider.

    " + } + }, + "KeyStoragePolicy": { + "base": null, + "refs": { + "HlsContentProtection$KeyStoragePolicy": "

    Specify whether you want Elastic Transcoder to write your HLS license key to an Amazon S3 bucket. If you choose WithVariantPlaylists, LicenseAcquisitionUrl must be left blank and Elastic Transcoder writes your data key into the same bucket as the associated playlist.

    " + } + }, + "KeyframesMaxDist": { + "base": null, + "refs": { + "VideoParameters$KeyframesMaxDist": "

    Applicable only when the value of Video:Codec is one of H.264, MPEG2, or VP8.

    The maximum number of frames between key frames. Key frames are fully encoded frames; the frames between key frames are encoded based, in part, on the content of the key frames. The value is an integer formatted as a string; valid values are between 1 (every frame is a key frame) and 100000, inclusive. A higher value results in higher compression but may also discernibly decrease video quality.

    For Smooth outputs, the FrameRate must have a constant ratio to the KeyframesMaxDist. This allows Smooth playlists to switch between different quality levels while the file is being played.

    For example, an input file can have a FrameRate of 30 with a KeyframesMaxDist of 90. The output file then needs to have a ratio of 1:3. Valid outputs would have FrameRate of 30, 25, and 10, and KeyframesMaxDist of 90, 75, and 30, respectively.

    Alternately, this can be achieved by setting FrameRate to auto and having the same values for MaxFrameRate and KeyframesMaxDist.

    " + } + }, + "LimitExceededException": { + "base": "

    Too many operations for a given AWS account. For example, the number of pipelines exceeds the maximum allowed.

    ", + "refs": { + } + }, + "ListJobsByPipelineRequest": { + "base": "

    The ListJobsByPipelineRequest structure.

    ", + "refs": { + } + }, + "ListJobsByPipelineResponse": { + "base": "

    The ListJobsByPipelineResponse structure.

    ", + "refs": { + } + }, + "ListJobsByStatusRequest": { + "base": "

    The ListJobsByStatusRequest structure.

    ", + "refs": { + } + }, + "ListJobsByStatusResponse": { + "base": "

    The ListJobsByStatusResponse structure.

    ", + "refs": { + } + }, + "ListPipelinesRequest": { + "base": "

    The ListPipelineRequest structure.

    ", + "refs": { + } + }, + "ListPipelinesResponse": { + "base": "

    A list of the pipelines associated with the current AWS account.

    ", + "refs": { + } + }, + "ListPresetsRequest": { + "base": "

    The ListPresetsRequest structure.

    ", + "refs": { + } + }, + "ListPresetsResponse": { + "base": "

    The ListPresetsResponse structure.

    ", + "refs": { + } + }, + "MaxFrameRate": { + "base": null, + "refs": { + "VideoParameters$MaxFrameRate": "

    If you specify auto for FrameRate, Elastic Transcoder uses the frame rate of the input video for the frame rate of the output video. Specify the maximum frame rate that you want Elastic Transcoder to use when the frame rate of the input video is greater than the desired maximum frame rate of the output video. Valid values include: 10, 15, 23.97, 24, 25, 29.97, 30, 60.

    " + } + }, + "MergePolicy": { + "base": null, + "refs": { + "JobAlbumArt$MergePolicy": "

    A policy that determines how Elastic Transcoder will handle the existence of multiple album artwork files.

    • Replace: The specified album art will replace any existing album art.
    • Prepend: The specified album art will be placed in front of any existing album art.
    • Append: The specified album art will be placed after any existing album art.
    • Fallback: If the original input file contains artwork, Elastic Transcoder will use that artwork for the output. If the original input does not contain artwork, Elastic Transcoder will use the specified album art file.

    " + } + }, + "Name": { + "base": null, + "refs": { + "CaptionSource$Label": "

    The label of the caption shown in the player when choosing a language. We recommend that you put the caption language name here, in the language of the captions.

    ", + "CreatePipelineRequest$Name": "

    The name of the pipeline. We recommend that the name be unique within the AWS account, but uniqueness is not enforced.

    Constraints: Maximum 40 characters.

    ", + "CreatePresetRequest$Name": "

    The name of the preset. We recommend that the name be unique within the AWS account, but uniqueness is not enforced.

    ", + "Pipeline$Name": "

    The name of the pipeline. We recommend that the name be unique within the AWS account, but uniqueness is not enforced.

    Constraints: Maximum 40 characters

    ", + "Preset$Name": "

    The name of the preset.

    ", + "UpdatePipelineRequest$Name": "

    The name of the pipeline. We recommend that the name be unique within the AWS account, but uniqueness is not enforced.

    Constraints: Maximum 40 characters

    " + } + }, + "NonEmptyBase64EncodedString": { + "base": null, + "refs": { + "PlayReadyDrm$Key": "

    The DRM key for your file, provided by your DRM license provider. The key must be base64-encoded, and it must be one of the following bit lengths before being base64-encoded:

    128, 192, or 256.

    The key must also be encrypted by using AWS KMS.

    ", + "PlayReadyDrm$KeyMd5": "

    The MD5 digest of the key used for DRM on your file, and that you want Elastic Transcoder to use as a checksum to make sure your key was not corrupted in transit. The key MD5 must be base64-encoded, and it must be exactly 16 bytes before being base64-encoded.

    " + } + }, + "Notifications": { + "base": "

    The Amazon Simple Notification Service (Amazon SNS) topic or topics to notify in order to report job status.

    To receive notifications, you must also subscribe to the new topic in the Amazon SNS console.", + "refs": { + "CreatePipelineRequest$Notifications": "

    The Amazon Simple Notification Service (Amazon SNS) topic that you want to notify to report job status.

    To receive notifications, you must also subscribe to the new topic in the Amazon SNS console.
    • Progressing: The topic ARN for the Amazon Simple Notification Service (Amazon SNS) topic that you want to notify when Elastic Transcoder has started to process a job in this pipeline. This is the ARN that Amazon SNS returned when you created the topic. For more information, see Create a Topic in the Amazon Simple Notification Service Developer Guide.
    • Completed: The topic ARN for the Amazon SNS topic that you want to notify when Elastic Transcoder has finished processing a job in this pipeline. This is the ARN that Amazon SNS returned when you created the topic.
    • Warning: The topic ARN for the Amazon SNS topic that you want to notify when Elastic Transcoder encounters a warning condition while processing a job in this pipeline. This is the ARN that Amazon SNS returned when you created the topic.
    • Error: The topic ARN for the Amazon SNS topic that you want to notify when Elastic Transcoder encounters an error condition while processing a job in this pipeline. This is the ARN that Amazon SNS returned when you created the topic.
    ", + "Pipeline$Notifications": "

    The Amazon Simple Notification Service (Amazon SNS) topic that you want to notify to report job status.

    To receive notifications, you must also subscribe to the new topic in the Amazon SNS console.
    • Progressing (optional): The Amazon Simple Notification Service (Amazon SNS) topic that you want to notify when Elastic Transcoder has started to process the job.
    • Completed (optional): The Amazon SNS topic that you want to notify when Elastic Transcoder has finished processing the job.
    • Warning (optional): The Amazon SNS topic that you want to notify when Elastic Transcoder encounters a warning condition.
    • Error (optional): The Amazon SNS topic that you want to notify when Elastic Transcoder encounters an error condition.
    ", + "UpdatePipelineNotificationsRequest$Notifications": "

    The topic ARN for the Amazon Simple Notification Service (Amazon SNS) topic that you want to notify to report job status.

    To receive notifications, you must also subscribe to the new topic in the Amazon SNS console.
    • Progressing: The topic ARN for the Amazon Simple Notification Service (Amazon SNS) topic that you want to notify when Elastic Transcoder has started to process jobs that are added to this pipeline. This is the ARN that Amazon SNS returned when you created the topic.
    • Completed: The topic ARN for the Amazon SNS topic that you want to notify when Elastic Transcoder has finished processing a job. This is the ARN that Amazon SNS returned when you created the topic.
    • Warning: The topic ARN for the Amazon SNS topic that you want to notify when Elastic Transcoder encounters a warning condition. This is the ARN that Amazon SNS returned when you created the topic.
    • Error: The topic ARN for the Amazon SNS topic that you want to notify when Elastic Transcoder encounters an error condition. This is the ARN that Amazon SNS returned when you created the topic.
    ", + "UpdatePipelineRequest$Notifications": null + } + }, + "NullableInteger": { + "base": null, + "refs": { + "DetectedProperties$Width": "

    The detected width of the input file, in pixels.

    ", + "DetectedProperties$Height": "

    The detected height of the input file, in pixels.

    ", + "JobOutput$Width": "

    Specifies the width of the output file in pixels.

    ", + "JobOutput$Height": "

    Height of the output file, in pixels.

    " + } + }, + "NullableLong": { + "base": null, + "refs": { + "DetectedProperties$FileSize": "

    The detected file size of the input file, in bytes.

    ", + "DetectedProperties$DurationMillis": "

    The detected duration of the input file, in milliseconds.

    ", + "JobOutput$Duration": "

    Duration of the output file, in seconds.

    ", + "JobOutput$FileSize": "

    File size of the output file, in bytes.

    ", + "JobOutput$DurationMillis": "

    Duration of the output file, in milliseconds.

    ", + "Timing$SubmitTimeMillis": "

    The time the job was submitted to Elastic Transcoder, in epoch milliseconds.

    ", + "Timing$StartTimeMillis": "

    The time the job began transcoding, in epoch milliseconds.

    ", + "Timing$FinishTimeMillis": "

    The time the job finished transcoding, in epoch milliseconds.

    " + } + }, + "OneTo512String": { + "base": null, + "refs": { + "PlayReadyDrm$LicenseAcquisitionUrl": "

    The location of the license key required to play DRM content. The URL must be an absolute path, and is referenced by the PlayReady header. The PlayReady header is referenced in the protection header of the client manifest for Smooth Streaming outputs, and in the EXT-X-DXDRM and EXT-XDXDRMINFO metadata tags for HLS playlist outputs. An example URL looks like this: https://www.example.com/exampleKey/

    " + } + }, + "Opacity": { + "base": null, + "refs": { + "PresetWatermark$Opacity": "

    A percentage that indicates how much you want a watermark to obscure the video in the location where it appears. Valid values are 0 (the watermark is invisible) to 100 (the watermark completely obscures the video in the specified location). The datatype of Opacity is float.

    Elastic Transcoder supports transparent .png graphics. If you use a transparent .png, the transparent portion of the video appears as if you had specified a value of 0 for Opacity. The .jpg file format doesn't support transparency.

    " + } + }, + "OutputKeys": { + "base": null, + "refs": { + "CreateJobPlaylist$OutputKeys": "

    For each output in this job that you want to include in a master playlist, the value of the Outputs:Key object.

    • If your output is not HLS or does not have a segment duration set, the name of the output file is a concatenation of OutputKeyPrefix and Outputs:Key:

      OutputKeyPrefixOutputs:Key

    • If your output is HLSv3 and has a segment duration set, or is not included in a playlist, Elastic Transcoder creates an output playlist file with a file extension of .m3u8, and a series of .ts files that include a five-digit sequential counter beginning with 00000:

      OutputKeyPrefixOutputs:Key.m3u8

      OutputKeyPrefixOutputs:Key00000.ts

    • If your output is HLSv4, has a segment duration set, and is included in an HLSv4 playlist, Elastic Transcoder creates an output playlist file with a file extension of _v4.m3u8. If the output is video, Elastic Transcoder also creates an output file with an extension of _iframe.m3u8:

      OutputKeyPrefixOutputs:Key_v4.m3u8

      OutputKeyPrefixOutputs:Key_iframe.m3u8

      OutputKeyPrefixOutputs:Key.ts

    Elastic Transcoder automatically appends the relevant file extension to the file name. If you include a file extension in Output Key, the file name will have two extensions.

    If you include more than one output in a playlist, any segment duration settings, clip settings, or caption settings must be the same for all outputs in the playlist. For Smooth playlists, the Audio:Profile, Video:Profile, and Video:FrameRate to Video:KeyframesMaxDist ratio must be the same for all outputs.

    ", + "Playlist$OutputKeys": "

    For each output in this job that you want to include in a master playlist, the value of the Outputs:Key object.

    • If your output is not HLS or does not have a segment duration set, the name of the output file is a concatenation of OutputKeyPrefix and Outputs:Key:

      OutputKeyPrefixOutputs:Key

    • If your output is HLSv3 and has a segment duration set, or is not included in a playlist, Elastic Transcoder creates an output playlist file with a file extension of .m3u8, and a series of .ts files that include a five-digit sequential counter beginning with 00000:

      OutputKeyPrefixOutputs:Key.m3u8

      OutputKeyPrefixOutputs:Key00000.ts

    • If your output is HLSv4, has a segment duration set, and is included in an HLSv4 playlist, Elastic Transcoder creates an output playlist file with a file extension of _v4.m3u8. If the output is video, Elastic Transcoder also creates an output file with an extension of _iframe.m3u8:

      OutputKeyPrefixOutputs:Key_v4.m3u8

      OutputKeyPrefixOutputs:Key_iframe.m3u8

      OutputKeyPrefixOutputs:Key.ts

    Elastic Transcoder automatically appends the relevant file extension to the file name. If you include a file extension in Output Key, the file name will have two extensions.

    If you include more than one output in a playlist, any segment duration settings, clip settings, or caption settings must be the same for all outputs in the playlist. For Smooth playlists, the Audio:Profile, Video:Profile, and Video:FrameRate to Video:KeyframesMaxDist ratio must be the same for all outputs.

    " + } + }, + "PaddingPolicy": { + "base": null, + "refs": { + "Artwork$PaddingPolicy": "

    When you set PaddingPolicy to Pad, Elastic Transcoder may add white bars to the top and bottom and/or left and right sides of the output album art to make the total size of the output art match the values that you specified for MaxWidth and MaxHeight.

    ", + "Thumbnails$PaddingPolicy": "

    When you set PaddingPolicy to Pad, Elastic Transcoder may add black bars to the top and bottom and/or left and right sides of thumbnails to make the total size of the thumbnails match the values that you specified for thumbnail MaxWidth and MaxHeight settings.

    ", + "VideoParameters$PaddingPolicy": "

    When you set PaddingPolicy to Pad, Elastic Transcoder may add black bars to the top and bottom and/or left and right sides of the output video to make the total size of the output video match the values that you specified for MaxWidth and MaxHeight.

    " + } + }, + "Permission": { + "base": "

    The Permission structure.

    ", + "refs": { + "Permissions$member": null + } + }, + "Permissions": { + "base": null, + "refs": { + "PipelineOutputConfig$Permissions": "

    Optional. The Permissions object specifies which users and/or predefined Amazon S3 groups you want to have access to transcoded files and playlists, and the type of access you want them to have. You can grant permissions to a maximum of 30 users and/or predefined Amazon S3 groups.

    If you include Permissions, Elastic Transcoder grants only the permissions that you specify. It does not grant full permissions to the owner of the role specified by Role. If you want that user to have full control, you must explicitly grant full control to the user.

    If you omit Permissions, Elastic Transcoder grants full control over the transcoded files and playlists to the owner of the role specified by Role, and grants no other permissions to any other user or group.

    " + } + }, + "Pipeline": { + "base": "

    The pipeline (queue) that is used to manage jobs.

    ", + "refs": { + "CreatePipelineResponse$Pipeline": "

    A section of the response body that provides information about the pipeline that is created.

    ", + "Pipelines$member": null, + "ReadPipelineResponse$Pipeline": "

    A section of the response body that provides information about the pipeline.

    ", + "UpdatePipelineNotificationsResponse$Pipeline": "

    A section of the response body that provides information about the pipeline.

    ", + "UpdatePipelineResponse$Pipeline": null, + "UpdatePipelineStatusResponse$Pipeline": "

    A section of the response body that provides information about the pipeline.

    " + } + }, + "PipelineOutputConfig": { + "base": "

    The PipelineOutputConfig structure.

    ", + "refs": { + "CreatePipelineRequest$ContentConfig": "

    The optional ContentConfig object specifies information about the Amazon S3 bucket in which you want Elastic Transcoder to save transcoded files and playlists: which bucket to use, which users you want to have access to the files, the type of access you want users to have, and the storage class that you want to assign to the files.

    If you specify values for ContentConfig, you must also specify values for ThumbnailConfig.

    If you specify values for ContentConfig and ThumbnailConfig, omit the OutputBucket object.

    • Bucket: The Amazon S3 bucket in which you want Elastic Transcoder to save transcoded files and playlists.
    • Permissions (Optional): The Permissions object specifies which users you want to have access to transcoded files and the type of access you want them to have. You can grant permissions to a maximum of 30 users and/or predefined Amazon S3 groups.
    • Grantee Type: Specify the type of value that appears in the Grantee object:
      • Canonical: The value in the Grantee object is either the canonical user ID for an AWS account or an origin access identity for an Amazon CloudFront distribution. For more information about canonical user IDs, see Access Control List (ACL) Overview in the Amazon Simple Storage Service Developer Guide. For more information about using CloudFront origin access identities to require that users use CloudFront URLs instead of Amazon S3 URLs, see Using an Origin Access Identity to Restrict Access to Your Amazon S3 Content. A canonical user ID is not the same as an AWS account number.
      • Email: The value in the Grantee object is the registered email address of an AWS account.
      • Group: The value in the Grantee object is one of the following predefined Amazon S3 groups: AllUsers, AuthenticatedUsers, or LogDelivery.
    • Grantee: The AWS user or group that you want to have access to transcoded files and playlists. To identify the user or group, you can specify the canonical user ID for an AWS account, an origin access identity for a CloudFront distribution, the registered email address of an AWS account, or a predefined Amazon S3 group
    • Access: The permission that you want to give to the AWS user that you specified in Grantee. Permissions are granted on the files that Elastic Transcoder adds to the bucket, including playlists and video files. Valid values include:
      • READ: The grantee can read the objects and metadata for objects that Elastic Transcoder adds to the Amazon S3 bucket.
      • READ_ACP: The grantee can read the object ACL for objects that Elastic Transcoder adds to the Amazon S3 bucket.
      • WRITE_ACP: The grantee can write the ACL for the objects that Elastic Transcoder adds to the Amazon S3 bucket.
      • FULL_CONTROL: The grantee has READ, READ_ACP, and WRITE_ACP permissions for the objects that Elastic Transcoder adds to the Amazon S3 bucket.
    • StorageClass: The Amazon S3 storage class, Standard or ReducedRedundancy, that you want Elastic Transcoder to assign to the video files and playlists that it stores in your Amazon S3 bucket.
    ", + "CreatePipelineRequest$ThumbnailConfig": "

    The ThumbnailConfig object specifies several values, including the Amazon S3 bucket in which you want Elastic Transcoder to save thumbnail files, which users you want to have access to the files, the type of access you want users to have, and the storage class that you want to assign to the files.

    If you specify values for ContentConfig, you must also specify values for ThumbnailConfig even if you don't want to create thumbnails.

    If you specify values for ContentConfig and ThumbnailConfig, omit the OutputBucket object.

    • Bucket: The Amazon S3 bucket in which you want Elastic Transcoder to save thumbnail files.
    • Permissions (Optional): The Permissions object specifies which users and/or predefined Amazon S3 groups you want to have access to thumbnail files, and the type of access you want them to have. You can grant permissions to a maximum of 30 users and/or predefined Amazon S3 groups.
    • GranteeType: Specify the type of value that appears in the Grantee object:
      • Canonical: The value in the Grantee object is either the canonical user ID for an AWS account or an origin access identity for an Amazon CloudFront distribution. A canonical user ID is not the same as an AWS account number.
      • Email: The value in the Grantee object is the registered email address of an AWS account.
      • Group: The value in the Grantee object is one of the following predefined Amazon S3 groups: AllUsers, AuthenticatedUsers, or LogDelivery.
    • Grantee: The AWS user or group that you want to have access to thumbnail files. To identify the user or group, you can specify the canonical user ID for an AWS account, an origin access identity for a CloudFront distribution, the registered email address of an AWS account, or a predefined Amazon S3 group.
    • Access: The permission that you want to give to the AWS user that you specified in Grantee. Permissions are granted on the thumbnail files that Elastic Transcoder adds to the bucket. Valid values include:
      • READ: The grantee can read the thumbnails and metadata for objects that Elastic Transcoder adds to the Amazon S3 bucket.
      • READ_ACP: The grantee can read the object ACL for thumbnails that Elastic Transcoder adds to the Amazon S3 bucket.
      • WRITE_ACP: The grantee can write the ACL for the thumbnails that Elastic Transcoder adds to the Amazon S3 bucket.
      • FULL_CONTROL: The grantee has READ, READ_ACP, and WRITE_ACP permissions for the thumbnails that Elastic Transcoder adds to the Amazon S3 bucket.
    • StorageClass: The Amazon S3 storage class, Standard or ReducedRedundancy, that you want Elastic Transcoder to assign to the thumbnails that it stores in your Amazon S3 bucket.
    ", + "Pipeline$ContentConfig": "

    Information about the Amazon S3 bucket in which you want Elastic Transcoder to save transcoded files and playlists. Either you specify both ContentConfig and ThumbnailConfig, or you specify OutputBucket.

    • Bucket: The Amazon S3 bucket in which you want Elastic Transcoder to save transcoded files and playlists.
    • Permissions: A list of the users and/or predefined Amazon S3 groups you want to have access to transcoded files and playlists, and the type of access that you want them to have.
      • GranteeType: The type of value that appears in the Grantee object:
        • Canonical: Either the canonical user ID for an AWS account or an origin access identity for an Amazon CloudFront distribution.
        • Email: The registered email address of an AWS account.
        • Group: One of the following predefined Amazon S3 groups: AllUsers, AuthenticatedUsers, or LogDelivery.
      • Grantee: The AWS user or group that you want to have access to transcoded files and playlists.
      • Access: The permission that you want to give to the AWS user that is listed in Grantee. Valid values include:
        • READ: The grantee can read the objects and metadata for objects that Elastic Transcoder adds to the Amazon S3 bucket.
        • READ_ACP: The grantee can read the object ACL for objects that Elastic Transcoder adds to the Amazon S3 bucket.
        • WRITE_ACP: The grantee can write the ACL for the objects that Elastic Transcoder adds to the Amazon S3 bucket.
        • FULL_CONTROL: The grantee has READ, READ_ACP, and WRITE_ACP permissions for the objects that Elastic Transcoder adds to the Amazon S3 bucket.
    • StorageClass: The Amazon S3 storage class, Standard or ReducedRedundancy, that you want Elastic Transcoder to assign to the video files and playlists that it stores in your Amazon S3 bucket.
    ", + "Pipeline$ThumbnailConfig": "

    Information about the Amazon S3 bucket in which you want Elastic Transcoder to save thumbnail files. Either you specify both ContentConfig and ThumbnailConfig, or you specify OutputBucket.

    • Bucket: The Amazon S3 bucket in which you want Elastic Transcoder to save thumbnail files.
    • Permissions: A list of the users and/or predefined Amazon S3 groups you want to have access to thumbnail files, and the type of access that you want them to have.
      • GranteeType: The type of value that appears in the Grantee object:
        • Canonical: Either the canonical user ID for an AWS account or an origin access identity for an Amazon CloudFront distribution. A canonical user ID is not the same as an AWS account number.
        • Email: The registered email address of an AWS account.
        • Group: One of the following predefined Amazon S3 groups: AllUsers, AuthenticatedUsers, or LogDelivery.
      • Grantee: The AWS user or group that you want to have access to thumbnail files.
      • Access: The permission that you want to give to the AWS user that is listed in Grantee. Valid values include:
        • READ: The grantee can read the thumbnails and metadata for thumbnails that Elastic Transcoder adds to the Amazon S3 bucket.
        • READ_ACP: The grantee can read the object ACL for thumbnails that Elastic Transcoder adds to the Amazon S3 bucket.
        • WRITE_ACP: The grantee can write the ACL for the thumbnails that Elastic Transcoder adds to the Amazon S3 bucket.
        • FULL_CONTROL: The grantee has READ, READ_ACP, and WRITE_ACP permissions for the thumbnails that Elastic Transcoder adds to the Amazon S3 bucket.
    • StorageClass: The Amazon S3 storage class, Standard or ReducedRedundancy, that you want Elastic Transcoder to assign to the thumbnails that it stores in your Amazon S3 bucket.
    ", + "UpdatePipelineRequest$ContentConfig": "

    The optional ContentConfig object specifies information about the Amazon S3 bucket in which you want Elastic Transcoder to save transcoded files and playlists: which bucket to use, which users you want to have access to the files, the type of access you want users to have, and the storage class that you want to assign to the files.

    If you specify values for ContentConfig, you must also specify values for ThumbnailConfig.

    If you specify values for ContentConfig and ThumbnailConfig, omit the OutputBucket object.

    • Bucket: The Amazon S3 bucket in which you want Elastic Transcoder to save transcoded files and playlists.
    • Permissions (Optional): The Permissions object specifies which users you want to have access to transcoded files and the type of access you want them to have. You can grant permissions to a maximum of 30 users and/or predefined Amazon S3 groups.
    • Grantee Type: Specify the type of value that appears in the Grantee object:
      • Canonical: The value in the Grantee object is either the canonical user ID for an AWS account or an origin access identity for an Amazon CloudFront distribution. For more information about canonical user IDs, see Access Control List (ACL) Overview in the Amazon Simple Storage Service Developer Guide. For more information about using CloudFront origin access identities to require that users use CloudFront URLs instead of Amazon S3 URLs, see Using an Origin Access Identity to Restrict Access to Your Amazon S3 Content. A canonical user ID is not the same as an AWS account number.
      • Email: The value in the Grantee object is the registered email address of an AWS account.
      • Group: The value in the Grantee object is one of the following predefined Amazon S3 groups: AllUsers, AuthenticatedUsers, or LogDelivery.
    • Grantee: The AWS user or group that you want to have access to transcoded files and playlists. To identify the user or group, you can specify the canonical user ID for an AWS account, an origin access identity for a CloudFront distribution, the registered email address of an AWS account, or a predefined Amazon S3 group
    • Access: The permission that you want to give to the AWS user that you specified in Grantee. Permissions are granted on the files that Elastic Transcoder adds to the bucket, including playlists and video files. Valid values include:
      • READ: The grantee can read the objects and metadata for objects that Elastic Transcoder adds to the Amazon S3 bucket.
      • READ_ACP: The grantee can read the object ACL for objects that Elastic Transcoder adds to the Amazon S3 bucket.
      • WRITE_ACP: The grantee can write the ACL for the objects that Elastic Transcoder adds to the Amazon S3 bucket.
      • FULL_CONTROL: The grantee has READ, READ_ACP, and WRITE_ACP permissions for the objects that Elastic Transcoder adds to the Amazon S3 bucket.
    • StorageClass: The Amazon S3 storage class, Standard or ReducedRedundancy, that you want Elastic Transcoder to assign to the video files and playlists that it stores in your Amazon S3 bucket.
    ", + "UpdatePipelineRequest$ThumbnailConfig": "

    The ThumbnailConfig object specifies several values, including the Amazon S3 bucket in which you want Elastic Transcoder to save thumbnail files, which users you want to have access to the files, the type of access you want users to have, and the storage class that you want to assign to the files.

    If you specify values for ContentConfig, you must also specify values for ThumbnailConfig even if you don't want to create thumbnails.

    If you specify values for ContentConfig and ThumbnailConfig, omit the OutputBucket object.

    • Bucket: The Amazon S3 bucket in which you want Elastic Transcoder to save thumbnail files.
    • Permissions (Optional): The Permissions object specifies which users and/or predefined Amazon S3 groups you want to have access to thumbnail files, and the type of access you want them to have. You can grant permissions to a maximum of 30 users and/or predefined Amazon S3 groups.
    • GranteeType: Specify the type of value that appears in the Grantee object:
      • Canonical: The value in the Grantee object is either the canonical user ID for an AWS account or an origin access identity for an Amazon CloudFront distribution. A canonical user ID is not the same as an AWS account number.
      • Email: The value in the Grantee object is the registered email address of an AWS account.
      • Group: The value in the Grantee object is one of the following predefined Amazon S3 groups: AllUsers, AuthenticatedUsers, or LogDelivery.
    • Grantee: The AWS user or group that you want to have access to thumbnail files. To identify the user or group, you can specify the canonical user ID for an AWS account, an origin access identity for a CloudFront distribution, the registered email address of an AWS account, or a predefined Amazon S3 group.
    • Access: The permission that you want to give to the AWS user that you specified in Grantee. Permissions are granted on the thumbnail files that Elastic Transcoder adds to the bucket. Valid values include:
      • READ: The grantee can read the thumbnails and metadata for objects that Elastic Transcoder adds to the Amazon S3 bucket.
      • READ_ACP: The grantee can read the object ACL for thumbnails that Elastic Transcoder adds to the Amazon S3 bucket.
      • WRITE_ACP: The grantee can write the ACL for the thumbnails that Elastic Transcoder adds to the Amazon S3 bucket.
      • FULL_CONTROL: The grantee has READ, READ_ACP, and WRITE_ACP permissions for the thumbnails that Elastic Transcoder adds to the Amazon S3 bucket.
    • StorageClass: The Amazon S3 storage class, Standard or ReducedRedundancy, that you want Elastic Transcoder to assign to the thumbnails that it stores in your Amazon S3 bucket.
    " + } + }, + "PipelineStatus": { + "base": null, + "refs": { + "Pipeline$Status": "

    The current status of the pipeline:

    • Active: The pipeline is processing jobs.
    • Paused: The pipeline is not currently processing jobs.
    ", + "UpdatePipelineStatusRequest$Status": "

    The desired status of the pipeline:

    • Active: The pipeline is processing jobs.
    • Paused: The pipeline is not currently processing jobs.
    " + } + }, + "Pipelines": { + "base": null, + "refs": { + "ListPipelinesResponse$Pipelines": "

    An array of Pipeline objects.

    " + } + }, + "PixelsOrPercent": { + "base": null, + "refs": { + "PresetWatermark$MaxWidth": "

    The maximum width of the watermark in one of the following formats:

    • number of pixels (px): The minimum value is 16 pixels, and the maximum value is the value of MaxWidth.
    • integer percentage (%): The range of valid values is 0 to 100. Use the value of Target to specify whether you want Elastic Transcoder to include the black bars that are added by Elastic Transcoder, if any, in the calculation.
    • If you specify the value in pixels, it must be less than or equal to the value of MaxWidth.

    ", + "PresetWatermark$MaxHeight": "

    The maximum height of the watermark in one of the following formats:

    • number of pixels (px): The minimum value is 16 pixels, and the maximum value is the value of MaxHeight.
    • integer percentage (%): The range of valid values is 0 to 100. Use the value of Target to specify whether you want Elastic Transcoder to include the black bars that are added by Elastic Transcoder, if any, in the calculation.
    If you specify the value in pixels, it must be less than or equal to the value of MaxHeight.

    ", + "PresetWatermark$HorizontalOffset": "

    The amount by which you want the horizontal position of the watermark to be offset from the position specified by HorizontalAlign:

    • number of pixels (px): The minimum value is 0 pixels, and the maximum value is the value of MaxWidth.
    • integer percentage (%): The range of valid values is 0 to 100.
    For example, if you specify Left for HorizontalAlign and 5px for HorizontalOffset, the left side of the watermark appears 5 pixels from the left border of the output video.

    HorizontalOffset is only valid when the value of HorizontalAlign is Left or Right. If you specify an offset that causes the watermark to extend beyond the left or right border and Elastic Transcoder has not added black bars, the watermark is cropped. If Elastic Transcoder has added black bars, the watermark extends into the black bars. If the watermark extends beyond the black bars, it is cropped.

    Use the value of Target to specify whether you want to include the black bars that are added by Elastic Transcoder, if any, in the offset calculation.

    ", + "PresetWatermark$VerticalOffset": "VerticalOffset

    The amount by which you want the vertical position of the watermark to be offset from the position specified by VerticalAlign:

    • number of pixels (px): The minimum value is 0 pixels, and the maximum value is the value of MaxHeight.
    • integer percentage (%): The range of valid values is 0 to 100.
    For example, if you specify Top for VerticalAlign and 5px for VerticalOffset, the top of the watermark appears 5 pixels from the top border of the output video.

    VerticalOffset is only valid when the value of VerticalAlign is Top or Bottom.

    If you specify an offset that causes the watermark to extend beyond the top or bottom border and Elastic Transcoder has not added black bars, the watermark is cropped. If Elastic Transcoder has added black bars, the watermark extends into the black bars. If the watermark extends beyond the black bars, it is cropped.

    Use the value of Target to specify whether you want Elastic Transcoder to include the black bars that are added by Elastic Transcoder, if any, in the offset calculation.

    " + } + }, + "PlayReadyDrm": { + "base": "

    The PlayReady DRM settings, if any, that you want Elastic Transcoder to apply to the output files associated with this playlist.

    PlayReady DRM encrypts your media files using AES-CTR encryption.

    If you use DRM for an HLSv3 playlist, your outputs must have a master playlist.

    ", + "refs": { + "CreateJobPlaylist$PlayReadyDrm": "

    The DRM settings, if any, that you want Elastic Transcoder to apply to the output files associated with this playlist.

    ", + "Playlist$PlayReadyDrm": "

    The DRM settings, if any, that you want Elastic Transcoder to apply to the output files associated with this playlist.

    " + } + }, + "PlayReadyDrmFormatString": { + "base": null, + "refs": { + "PlayReadyDrm$Format": "

    The type of DRM, if any, that you want Elastic Transcoder to apply to the output files associated with this playlist.

    " + } + }, + "Playlist": { + "base": "

    Use Only for Fragmented MP4 or MPEG-TS Outputs. If you specify a preset for which the value of Container is fmp4 (Fragmented MP4) or ts (MPEG-TS), Playlists contains information about the master playlists that you want Elastic Transcoder to create. We recommend that you create only one master playlist per output format. The maximum number of master playlists in a job is 30.

    ", + "refs": { + "Playlists$member": null + } + }, + "PlaylistFormat": { + "base": null, + "refs": { + "CreateJobPlaylist$Format": "

    The format of the output playlist. Valid formats include HLSv3, HLSv4, and Smooth.

    ", + "Playlist$Format": "

    The format of the output playlist. Valid formats include HLSv3, HLSv4, and Smooth.

    " + } + }, + "Playlists": { + "base": null, + "refs": { + "Job$Playlists": "

    Outputs in Fragmented MP4 or MPEG-TS format only.If you specify a preset in PresetId for which the value of Container is fmp4 (Fragmented MP4) or ts (MPEG-TS), Playlists contains information about the master playlists that you want Elastic Transcoder to create.

    The maximum number of master playlists in a job is 30.

    " + } + }, + "Preset": { + "base": "

    Presets are templates that contain most of the settings for transcoding media files from one format to another. Elastic Transcoder includes some default presets for common formats, for example, several iPod and iPhone versions. You can also create your own presets for formats that aren't included among the default presets. You specify which preset you want to use when you create a job.

    ", + "refs": { + "CreatePresetResponse$Preset": "

    A section of the response body that provides information about the preset that is created.

    ", + "Presets$member": null, + "ReadPresetResponse$Preset": "

    A section of the response body that provides information about the preset.

    " + } + }, + "PresetContainer": { + "base": null, + "refs": { + "CreatePresetRequest$Container": "

    The container type for the output file. Valid values include flac, flv, fmp4, gif, mp3, mp4, mpg, mxf, oga, ogg, ts, and webm.

    ", + "Preset$Container": "

    The container type for the output file. Valid values include flac, flv, fmp4, gif, mp3, mp4, mpg, mxf, oga, ogg, ts, and webm.

    " + } + }, + "PresetType": { + "base": null, + "refs": { + "Preset$Type": "

    Whether the preset is a default preset provided by Elastic Transcoder (System) or a preset that you have defined (Custom).

    " + } + }, + "PresetWatermark": { + "base": "

    Settings for the size, location, and opacity of graphics that you want Elastic Transcoder to overlay over videos that are transcoded using this preset. You can specify settings for up to four watermarks. Watermarks appear in the specified size and location, and with the specified opacity for the duration of the transcoded video.

    Watermarks can be in .png or .jpg format. If you want to display a watermark that is not rectangular, use the .png format, which supports transparency.

    When you create a job that uses this preset, you specify the .png or .jpg graphics that you want Elastic Transcoder to include in the transcoded videos. You can specify fewer graphics in the job than you specify watermark settings in the preset, which allows you to use the same preset for up to four watermarks that have different dimensions.

    ", + "refs": { + "PresetWatermarks$member": null + } + }, + "PresetWatermarkId": { + "base": null, + "refs": { + "JobWatermark$PresetWatermarkId": "

    The ID of the watermark settings that Elastic Transcoder uses to add watermarks to the video during transcoding. The settings are in the preset specified by Preset for the current output. In that preset, the value of Watermarks Id tells Elastic Transcoder which settings to use.

    ", + "PresetWatermark$Id": "A unique identifier for the settings for one watermark. The value of Id can be up to 40 characters long." + } + }, + "PresetWatermarks": { + "base": null, + "refs": { + "VideoParameters$Watermarks": "

    Settings for the size, location, and opacity of graphics that you want Elastic Transcoder to overlay over videos that are transcoded using this preset. You can specify settings for up to four watermarks. Watermarks appear in the specified size and location, and with the specified opacity for the duration of the transcoded video.

    Watermarks can be in .png or .jpg format. If you want to display a watermark that is not rectangular, use the .png format, which supports transparency.

    When you create a job that uses this preset, you specify the .png or .jpg graphics that you want Elastic Transcoder to include in the transcoded videos. You can specify fewer graphics in the job than you specify watermark settings in the preset, which allows you to use the same preset for up to four watermarks that have different dimensions.

    " + } + }, + "Presets": { + "base": null, + "refs": { + "ListPresetsResponse$Presets": "

    An array of Preset objects.

    " + } + }, + "ReadJobRequest": { + "base": "

    The ReadJobRequest structure.

    ", + "refs": { + } + }, + "ReadJobResponse": { + "base": "

    The ReadJobResponse structure.

    ", + "refs": { + } + }, + "ReadPipelineRequest": { + "base": "

    The ReadPipelineRequest structure.

    ", + "refs": { + } + }, + "ReadPipelineResponse": { + "base": "

    The ReadPipelineResponse structure.

    ", + "refs": { + } + }, + "ReadPresetRequest": { + "base": "

    The ReadPresetRequest structure.

    ", + "refs": { + } + }, + "ReadPresetResponse": { + "base": "

    The ReadPresetResponse structure.

    ", + "refs": { + } + }, + "Resolution": { + "base": null, + "refs": { + "JobInput$Resolution": "

    This value must be auto, which causes Elastic Transcoder to automatically detect the resolution of the input file.

    ", + "VideoParameters$Resolution": "

    To better control resolution and aspect ratio of output videos, we recommend that you use the values MaxWidth, MaxHeight, SizingPolicy, PaddingPolicy, and DisplayAspectRatio instead of Resolution and AspectRatio. The two groups of settings are mutually exclusive. Do not use them together.

    The width and height of the video in the output file, in pixels. Valid values are auto and width x height:

    • auto: Elastic Transcoder attempts to preserve the width and height of the input file, subject to the following rules.
    • width x height: The width and height of the output video in pixels.

    Note the following about specifying the width and height:

    • The width must be an even integer between 128 and 4096, inclusive.
    • The height must be an even integer between 96 and 3072, inclusive.
    • If you specify a resolution that is less than the resolution of the input file, Elastic Transcoder rescales the output file to the lower resolution.
    • If you specify a resolution that is greater than the resolution of the input file, Elastic Transcoder rescales the output to the higher resolution.
    • We recommend that you specify a resolution for which the product of width and height is less than or equal to the applicable value in the following list (List - Max width x height value):
      • 1 - 25344
      • 1b - 25344
      • 1.1 - 101376
      • 1.2 - 101376
      • 1.3 - 101376
      • 2 - 101376
      • 2.1 - 202752
      • 2.2 - 404720
      • 3 - 404720
      • 3.1 - 921600
      • 3.2 - 1310720
      • 4 - 2097152
      • 4.1 - 2097152
    " + } + }, + "ResourceInUseException": { + "base": "

    The resource you are attempting to change is in use. For example, you are attempting to delete a pipeline that is currently in use.

    ", + "refs": { + } + }, + "ResourceNotFoundException": { + "base": "

    The requested resource does not exist or is not available. For example, the pipeline to which you're trying to add a job doesn't exist or is still being created.

    ", + "refs": { + } + }, + "Role": { + "base": null, + "refs": { + "CreatePipelineRequest$Role": "

    The IAM Amazon Resource Name (ARN) for the role that you want Elastic Transcoder to use to create the pipeline.

    ", + "Pipeline$Role": "

    The IAM Amazon Resource Name (ARN) for the role that Elastic Transcoder uses to transcode jobs for this pipeline.

    ", + "TestRoleRequest$Role": "

    The IAM Amazon Resource Name (ARN) for the role that you want Elastic Transcoder to test.

    ", + "UpdatePipelineRequest$Role": "

    The IAM Amazon Resource Name (ARN) for the role that you want Elastic Transcoder to use to transcode jobs for this pipeline.

    " + } + }, + "Rotate": { + "base": null, + "refs": { + "CreateJobOutput$Rotate": "

    The number of degrees clockwise by which you want Elastic Transcoder to rotate the output relative to the input. Enter one of the following values: auto, 0, 90, 180, 270. The value auto generally works only if the file that you're transcoding contains rotation metadata.

    ", + "JobOutput$Rotate": "

    The number of degrees clockwise by which you want Elastic Transcoder to rotate the output relative to the input. Enter one of the following values:

    auto, 0, 90, 180, 270

    The value auto generally works only if the file that you're transcoding contains rotation metadata.

    " + } + }, + "SizingPolicy": { + "base": null, + "refs": { + "Artwork$SizingPolicy": "

    Specify one of the following values to control scaling of the output album art:

    • Fit: Elastic Transcoder scales the output art so it matches the value that you specified in either MaxWidth or MaxHeight without exceeding the other value.
    • Fill: Elastic Transcoder scales the output art so it matches the value that you specified in either MaxWidth or MaxHeight and matches or exceeds the other value. Elastic Transcoder centers the output art and then crops it in the dimension (if any) that exceeds the maximum value.
    • Stretch: Elastic Transcoder stretches the output art to match the values that you specified for MaxWidth and MaxHeight. If the relative proportions of the input art and the output art are different, the output art will be distorted.
    • Keep: Elastic Transcoder does not scale the output art. If either dimension of the input art exceeds the values that you specified for MaxWidth and MaxHeight, Elastic Transcoder crops the output art.
    • ShrinkToFit: Elastic Transcoder scales the output art down so that its dimensions match the values that you specified for at least one of MaxWidth and MaxHeight without exceeding either value. If you specify this option, Elastic Transcoder does not scale the art up.
    • ShrinkToFill Elastic Transcoder scales the output art down so that its dimensions match the values that you specified for at least one of MaxWidth and MaxHeight without dropping below either value. If you specify this option, Elastic Transcoder does not scale the art up.

    ", + "Thumbnails$SizingPolicy": "

    Specify one of the following values to control scaling of thumbnails:

    • Fit: Elastic Transcoder scales thumbnails so they match the value that you specified in thumbnail MaxWidth or MaxHeight settings without exceeding the other value.
    • Fill: Elastic Transcoder scales thumbnails so they match the value that you specified in thumbnail MaxWidth or MaxHeight settings and matches or exceeds the other value. Elastic Transcoder centers the image in thumbnails and then crops in the dimension (if any) that exceeds the maximum value.
    • Stretch: Elastic Transcoder stretches thumbnails to match the values that you specified for thumbnail MaxWidth and MaxHeight settings. If the relative proportions of the input video and thumbnails are different, the thumbnails will be distorted.
    • Keep: Elastic Transcoder does not scale thumbnails. If either dimension of the input video exceeds the values that you specified for thumbnail MaxWidth and MaxHeight settings, Elastic Transcoder crops the thumbnails.
    • ShrinkToFit: Elastic Transcoder scales thumbnails down so that their dimensions match the values that you specified for at least one of thumbnail MaxWidth and MaxHeight without exceeding either value. If you specify this option, Elastic Transcoder does not scale thumbnails up.
    • ShrinkToFill: Elastic Transcoder scales thumbnails down so that their dimensions match the values that you specified for at least one of MaxWidth and MaxHeight without dropping below either value. If you specify this option, Elastic Transcoder does not scale thumbnails up.

    ", + "VideoParameters$SizingPolicy": "

    Specify one of the following values to control scaling of the output video:

    • Fit: Elastic Transcoder scales the output video so it matches the value that you specified in either MaxWidth or MaxHeight without exceeding the other value.
    • Fill: Elastic Transcoder scales the output video so it matches the value that you specified in either MaxWidth or MaxHeight and matches or exceeds the other value. Elastic Transcoder centers the output video and then crops it in the dimension (if any) that exceeds the maximum value.
    • Stretch: Elastic Transcoder stretches the output video to match the values that you specified for MaxWidth and MaxHeight. If the relative proportions of the input video and the output video are different, the output video will be distorted.
    • Keep: Elastic Transcoder does not scale the output video. If either dimension of the input video exceeds the values that you specified for MaxWidth and MaxHeight, Elastic Transcoder crops the output video.
    • ShrinkToFit: Elastic Transcoder scales the output video down so that its dimensions match the values that you specified for at least one of MaxWidth and MaxHeight without exceeding either value. If you specify this option, Elastic Transcoder does not scale the video up.
    • ShrinkToFill: Elastic Transcoder scales the output video down so that its dimensions match the values that you specified for at least one of MaxWidth and MaxHeight without dropping below either value. If you specify this option, Elastic Transcoder does not scale the video up.

    " + } + }, + "SnsTopic": { + "base": null, + "refs": { + "Notifications$Progressing": "

    The Amazon Simple Notification Service (Amazon SNS) topic that you want to notify when Elastic Transcoder has started to process the job.

    ", + "Notifications$Completed": "

    The Amazon SNS topic that you want to notify when Elastic Transcoder has finished processing the job.

    ", + "Notifications$Warning": "

    The Amazon SNS topic that you want to notify when Elastic Transcoder encounters a warning condition.

    ", + "Notifications$Error": "

    The Amazon SNS topic that you want to notify when Elastic Transcoder encounters an error condition.

    ", + "SnsTopics$member": null + } + }, + "SnsTopics": { + "base": null, + "refs": { + "TestRoleRequest$Topics": "

    The ARNs of one or more Amazon Simple Notification Service (Amazon SNS) topics that you want the action to send a test notification to.

    " + } + }, + "StorageClass": { + "base": null, + "refs": { + "PipelineOutputConfig$StorageClass": "

    The Amazon S3 storage class, Standard or ReducedRedundancy, that you want Elastic Transcoder to assign to the video files and playlists that it stores in your Amazon S3 bucket.

    " + } + }, + "String": { + "base": null, + "refs": { + "CreatePresetResponse$Warning": "

    If the preset settings don't comply with the standards for the video codec but Elastic Transcoder created the preset, this message explains the reason the preset settings don't meet the standard. Elastic Transcoder created the preset because the settings might produce acceptable output.

    ", + "ExceptionMessages$member": null, + "Job$Arn": "

    The Amazon Resource Name (ARN) for the job.

    ", + "JobOutput$Id": "

    A sequential counter, starting with 1, that identifies an output among the outputs from the current job. In the Output syntax, this value is always 1.

    ", + "JobOutput$AppliedColorSpaceConversion": "

    If Elastic Transcoder used a preset with a ColorSpaceConversionMode to transcode the output file, the AppliedColorSpaceConversion parameter shows the conversion used. If no ColorSpaceConversionMode was defined in the preset, this parameter will not be included in the job response.

    ", + "Pipeline$Arn": "

    The Amazon Resource Name (ARN) for the pipeline.

    ", + "Preset$Arn": "

    The Amazon Resource Name (ARN) for the preset.

    ", + "UserMetadata$key": null, + "UserMetadata$value": null, + "Warning$Code": "

    The code of the cross-regional warning.

    ", + "Warning$Message": "

    The message explaining what resources are in a different region from the pipeline.

    Note: AWS KMS keys must be in the same region as the pipeline.

    " + } + }, + "Success": { + "base": null, + "refs": { + "TestRoleResponse$Success": "

    If the operation is successful, this value is true; otherwise, the value is false.

    " + } + }, + "Target": { + "base": null, + "refs": { + "PresetWatermark$Target": "

    A value that determines how Elastic Transcoder interprets values that you specified for HorizontalOffset, VerticalOffset, MaxWidth, and MaxHeight:

    • Content: HorizontalOffset and VerticalOffset values are calculated based on the borders of the video excluding black bars added by Elastic Transcoder, if any. In addition, MaxWidth and MaxHeight, if specified as a percentage, are calculated based on the borders of the video excluding black bars added by Elastic Transcoder, if any.
    • Frame: HorizontalOffset and VerticalOffset values are calculated based on the borders of the video including black bars added by Elastic Transcoder, if any.
    • In addition, MaxWidth and MaxHeight, if specified as a percentage, are calculated based on the borders of the video including black bars added by Elastic Transcoder, if any.

    " + } + }, + "TestRoleRequest": { + "base": "

    The TestRoleRequest structure.

    ", + "refs": { + } + }, + "TestRoleResponse": { + "base": "

    The TestRoleResponse structure.

    ", + "refs": { + } + }, + "ThumbnailPattern": { + "base": null, + "refs": { + "CreateJobOutput$ThumbnailPattern": "

    Whether you want Elastic Transcoder to create thumbnails for your videos and, if so, how you want Elastic Transcoder to name the files.

    If you don't want Elastic Transcoder to create thumbnails, specify \"\".

    If you do want Elastic Transcoder to create thumbnails, specify the information that you want to include in the file name for each thumbnail. You can specify the following values in any sequence:

    • {count} (Required): If you want to create thumbnails, you must include {count} in the ThumbnailPattern object. Wherever you specify {count}, Elastic Transcoder adds a five-digit sequence number (beginning with 00001) to thumbnail file names. The number indicates where a given thumbnail appears in the sequence of thumbnails for a transcoded file.

      If you specify a literal value and/or {resolution} but you omit {count}, Elastic Transcoder returns a validation error and does not create the job.
    • Literal values (Optional): You can specify literal values anywhere in the ThumbnailPattern object. For example, you can include them as a file name prefix or as a delimiter between {resolution} and {count}.

    • {resolution} (Optional): If you want Elastic Transcoder to include the resolution in the file name, include {resolution} in the ThumbnailPattern object.

    When creating thumbnails, Elastic Transcoder automatically saves the files in the format (.jpg or .png) that appears in the preset that you specified in the PresetID value of CreateJobOutput. Elastic Transcoder also appends the applicable file name extension.

    ", + "JobOutput$ThumbnailPattern": "

    Whether you want Elastic Transcoder to create thumbnails for your videos and, if so, how you want Elastic Transcoder to name the files.

    If you don't want Elastic Transcoder to create thumbnails, specify \"\".

    If you do want Elastic Transcoder to create thumbnails, specify the information that you want to include in the file name for each thumbnail. You can specify the following values in any sequence:

    • {count} (Required): If you want to create thumbnails, you must include {count} in the ThumbnailPattern object. Wherever you specify {count}, Elastic Transcoder adds a five-digit sequence number (beginning with 00001) to thumbnail file names. The number indicates where a given thumbnail appears in the sequence of thumbnails for a transcoded file.

      If you specify a literal value and/or {resolution} but you omit {count}, Elastic Transcoder returns a validation error and does not create the job.
    • Literal values (Optional): You can specify literal values anywhere in the ThumbnailPattern object. For example, you can include them as a file name prefix or as a delimiter between {resolution} and {count}.

    • {resolution} (Optional): If you want Elastic Transcoder to include the resolution in the file name, include {resolution} in the ThumbnailPattern object.

    When creating thumbnails, Elastic Transcoder automatically saves the files in the format (.jpg or .png) that appears in the preset that you specified in the PresetID value of CreateJobOutput. Elastic Transcoder also appends the applicable file name extension.

    " + } + }, + "ThumbnailResolution": { + "base": null, + "refs": { + "Thumbnails$Resolution": "

    To better control resolution and aspect ratio of thumbnails, we recommend that you use the values MaxWidth, MaxHeight, SizingPolicy, and PaddingPolicy instead of Resolution and AspectRatio. The two groups of settings are mutually exclusive. Do not use them together.

    The width and height of thumbnail files in pixels. Specify a value in the format width x height where both values are even integers. The values cannot exceed the width and height that you specified in the Video:Resolution object.

    " + } + }, + "Thumbnails": { + "base": "

    Thumbnails for videos.

    ", + "refs": { + "CreatePresetRequest$Thumbnails": "

    A section of the request body that specifies the thumbnail parameters, if any.

    ", + "Preset$Thumbnails": "

    A section of the response body that provides information about the thumbnail preset values, if any.

    " + } + }, + "Time": { + "base": null, + "refs": { + "TimeSpan$StartTime": "

    The place in the input file where you want a clip to start. The format can be either HH:mm:ss.SSS (maximum value: 23:59:59.999; SSS is thousandths of a second) or sssss.SSS (maximum value: 86399.999). If you don't specify a value, Elastic Transcoder starts at the beginning of the input file.

    ", + "TimeSpan$Duration": "

    The duration of the clip. The format can be either HH:mm:ss.SSS (maximum value: 23:59:59.999; SSS is thousandths of a second) or sssss.SSS (maximum value: 86399.999). If you don't specify a value, Elastic Transcoder creates an output file from StartTime to the end of the file.

    If you specify a value longer than the duration of the input file, Elastic Transcoder transcodes the file and returns a warning message.

    " + } + }, + "TimeOffset": { + "base": null, + "refs": { + "CaptionSource$TimeOffset": "

    For clip generation or captions that do not start at the same time as the associated video file, the TimeOffset tells Elastic Transcoder how much of the video to encode before including captions.

    Specify the TimeOffset in the form [+-]SS.sss or [+-]HH:mm:SS.ss.

    " + } + }, + "TimeSpan": { + "base": "

    Settings that determine when a clip begins and how long it lasts.

    ", + "refs": { + "Clip$TimeSpan": "

    Settings that determine when a clip begins and how long it lasts.

    " + } + }, + "Timing": { + "base": "

    Details about the timing of a job.

    ", + "refs": { + "Job$Timing": "

    Details about the timing of a job.

    " + } + }, + "UpdatePipelineNotificationsRequest": { + "base": "

    The UpdatePipelineNotificationsRequest structure.

    ", + "refs": { + } + }, + "UpdatePipelineNotificationsResponse": { + "base": "

    The UpdatePipelineNotificationsResponse structure.

    ", + "refs": { + } + }, + "UpdatePipelineRequest": { + "base": "

    The UpdatePipelineRequest structure.

    ", + "refs": { + } + }, + "UpdatePipelineResponse": { + "base": "

    When you update a pipeline, Elastic Transcoder returns the values that you specified in the request.

    ", + "refs": { + } + }, + "UpdatePipelineStatusRequest": { + "base": "

    The UpdatePipelineStatusRequest structure.

    ", + "refs": { + } + }, + "UpdatePipelineStatusResponse": { + "base": "When you update status for a pipeline, Elastic Transcoder returns the values that you specified in the request.", + "refs": { + } + }, + "UserMetadata": { + "base": null, + "refs": { + "CreateJobRequest$UserMetadata": "

    User-defined metadata that you want to associate with an Elastic Transcoder job. You specify metadata in key/value pairs, and you can add up to 10 key/value pairs per job. Elastic Transcoder does not guarantee that key/value pairs will be returned in the same order in which you specify them.

    ", + "Job$UserMetadata": "

    User-defined metadata that you want to associate with an Elastic Transcoder job. You specify metadata in key/value pairs, and you can add up to 10 key/value pairs per job. Elastic Transcoder does not guarantee that key/value pairs will be returned in the same order in which you specify them.

    Metadata keys and values must use characters from the following list:

    • 0-9

    • A-Z and a-z

    • Space

    • The following symbols: _.:/=+-%@

    " + } + }, + "ValidationException": { + "base": "

    One or more required parameter values were not provided in the request.

    ", + "refs": { + } + }, + "VerticalAlign": { + "base": null, + "refs": { + "PresetWatermark$VerticalAlign": "

    The vertical position of the watermark unless you specify a non-zero value for VerticalOffset:

    • Top: The top edge of the watermark is aligned with the top border of the video.
    • Bottom: The bottom edge of the watermark is aligned with the bottom border of the video.
    • Center: The watermark is centered between the top and bottom borders.

    " + } + }, + "VideoBitRate": { + "base": null, + "refs": { + "VideoParameters$BitRate": "

    The bit rate of the video stream in the output file, in kilobits/second. Valid values depend on the values of Level and Profile. If you specify auto, Elastic Transcoder uses the detected bit rate of the input source. If you specify a value other than auto, we recommend that you specify a value less than or equal to the maximum H.264-compliant value listed for your level and profile:

    Level - Maximum video bit rate in kilobits/second (baseline and main Profile) : maximum video bit rate in kilobits/second (high Profile)

    • 1 - 64 : 80
    • 1b - 128 : 160
    • 1.1 - 192 : 240
    • 1.2 - 384 : 480
    • 1.3 - 768 : 960
    • 2 - 2000 : 2500
    • 3 - 10000 : 12500
    • 3.1 - 14000 : 17500
    • 3.2 - 20000 : 25000
    • 4 - 20000 : 25000
    • 4.1 - 50000 : 62500
    " + } + }, + "VideoCodec": { + "base": null, + "refs": { + "VideoParameters$Codec": "

    The video codec for the output file. Valid values include gif, H.264, mpeg2, and vp8. You can only specify vp8 when the container type is webm, gif when the container type is gif, and mpeg2 when the container type is mpg.

    " + } + }, + "VideoParameters": { + "base": "

    The VideoParameters structure.

    ", + "refs": { + "CreatePresetRequest$Video": "

    A section of the request body that specifies the video parameters.

    ", + "Preset$Video": "

    A section of the response body that provides information about the video preset values.

    " + } + }, + "Warning": { + "base": "

    Elastic Transcoder returns a warning if the resources used by your pipeline are not in the same region as the pipeline.

    Using resources in the same region, such as your Amazon S3 buckets, Amazon SNS notification topics, and AWS KMS key, reduces processing time and prevents cross-regional charges.

    ", + "refs": { + "Warnings$member": null + } + }, + "Warnings": { + "base": null, + "refs": { + "CreatePipelineResponse$Warnings": "

    Elastic Transcoder returns a warning if the resources used by your pipeline are not in the same region as the pipeline.

    Using resources in the same region, such as your Amazon S3 buckets, Amazon SNS notification topics, and AWS KMS key, reduces processing time and prevents cross-regional charges.

    ", + "ReadPipelineResponse$Warnings": "

    Elastic Transcoder returns a warning if the resources used by your pipeline are not in the same region as the pipeline.

    Using resources in the same region, such as your Amazon S3 buckets, Amazon SNS notification topics, and AWS KMS key, reduces processing time and prevents cross-regional charges.

    ", + "UpdatePipelineResponse$Warnings": "

    Elastic Transcoder returns a warning if the resources used by your pipeline are not in the same region as the pipeline.

    Using resources in the same region, such as your Amazon S3 buckets, Amazon SNS notification topics, and AWS KMS key, reduces processing time and prevents cross-regional charges.

    " + } + }, + "WatermarkKey": { + "base": null, + "refs": { + "Artwork$InputKey": "

    The name of the file to be used as album art. To determine which Amazon S3 bucket contains the specified file, Elastic Transcoder checks the pipeline specified by PipelineId; the InputBucket object in that pipeline identifies the bucket.

    If the file name includes a prefix, for example, cooking/pie.jpg, include the prefix in the key. If the file isn't in the specified bucket, Elastic Transcoder returns an error.

    ", + "JobWatermark$InputKey": "

    The name of the .png or .jpg file that you want to use for the watermark. To determine which Amazon S3 bucket contains the specified file, Elastic Transcoder checks the pipeline specified by Pipeline; the Input Bucket object in that pipeline identifies the bucket.

    If the file name includes a prefix, for example, logos/128x64.png, include the prefix in the key. If the file isn't in the specified bucket, Elastic Transcoder returns an error.

    " + } + }, + "WatermarkSizingPolicy": { + "base": null, + "refs": { + "PresetWatermark$SizingPolicy": "

    A value that controls scaling of the watermark:

    • Fit: Elastic Transcoder scales the watermark so it matches the value that you specified in either MaxWidth or MaxHeight without exceeding the other value.
    • Stretch: Elastic Transcoder stretches the watermark to match the values that you specified for MaxWidth and MaxHeight. If the relative proportions of the watermark and the values of MaxWidth and MaxHeight are different, the watermark will be distorted.
    • ShrinkToFit: Elastic Transcoder scales the watermark down so that its dimensions match the values that you specified for at least one of MaxWidth and MaxHeight without exceeding either value. If you specify this option, Elastic Transcoder does not scale the watermark up.

    " + } + }, + "ZeroTo255String": { + "base": null, + "refs": { + "Encryption$InitializationVector": "

    The series of random bits created by a random bit generator, unique for every encryption operation, that you used to encrypt your input files or that you want Elastic Transcoder to use to encrypt your output files. The initialization vector must be base64-encoded, and it must be exactly 16 bytes long before being base64-encoded.

    ", + "HlsContentProtection$InitializationVector": "

    If Elastic Transcoder is generating your key for you, you must leave this field blank.

    The series of random bits created by a random bit generator, unique for every encryption operation, that you want Elastic Transcoder to use to encrypt your output files. The initialization vector must be base64-encoded, and it must be exactly 16 bytes before being base64-encoded.

    ", + "PlayReadyDrm$InitializationVector": "

    The series of random bits created by a random bit generator, unique for every encryption operation, that you want Elastic Transcoder to use to encrypt your files. The initialization vector must be base64-encoded, and it must be exactly 8 bytes long before being base64-encoded. If no initialization vector is provided, Elastic Transcoder generates one for you.

    " + } + }, + "ZeroTo512String": { + "base": null, + "refs": { + "HlsContentProtection$LicenseAcquisitionUrl": "

    The location of the license key required to decrypt your HLS playlist. The URL must be an absolute path, and is referenced in the URI attribute of the EXT-X-KEY metadata tag in the playlist file.

    " + } + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/elastictranscoder/2012-09-25/paginators-1.json b/vendor/github.com/aws/aws-sdk-go/models/apis/elastictranscoder/2012-09-25/paginators-1.json new file mode 100644 index 000000000..5a145d368 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/elastictranscoder/2012-09-25/paginators-1.json @@ -0,0 +1,24 @@ +{ + "pagination": { + "ListJobsByPipeline": { + "input_token": "PageToken", + "output_token": "NextPageToken", + "result_key": "Jobs" + }, + "ListJobsByStatus": { + "input_token": "PageToken", + "output_token": "NextPageToken", + "result_key": "Jobs" + }, + "ListPipelines": { + "input_token": "PageToken", + "output_token": "NextPageToken", + "result_key": "Pipelines" + }, + "ListPresets": { + "input_token": "PageToken", + "output_token": "NextPageToken", + "result_key": "Presets" + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/elastictranscoder/2012-09-25/waiters-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/elastictranscoder/2012-09-25/waiters-2.json new file mode 100644 index 000000000..55c362807 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/elastictranscoder/2012-09-25/waiters-2.json @@ -0,0 +1,30 @@ +{ + "version": 2, + "waiters": { + "JobComplete": { + "delay": 30, + "operation": "ReadJob", + "maxAttempts": 120, + "acceptors": [ + { + "expected": "Complete", + "matcher": "path", + "state": "success", + "argument": "Job.Status" + }, + { + "expected": "Canceled", + "matcher": "path", + "state": "failure", + "argument": "Job.Status" + }, + { + "expected": "Error", + "matcher": "path", + "state": "failure", + "argument": "Job.Status" + } + ] + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/email/2010-12-01/api-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/email/2010-12-01/api-2.json new file mode 100644 index 000000000..7d21f193e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/email/2010-12-01/api-2.json @@ -0,0 +1,1791 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2010-12-01", + "endpointPrefix":"email", + "protocol":"query", + "serviceAbbreviation":"Amazon SES", + "serviceFullName":"Amazon Simple Email Service", + "signatureVersion":"v4", + "signingName":"ses", + "xmlNamespace":"http://ses.amazonaws.com/doc/2010-12-01/" + }, + "operations":{ + "CloneReceiptRuleSet":{ + "name":"CloneReceiptRuleSet", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CloneReceiptRuleSetRequest"}, + "output":{ + "shape":"CloneReceiptRuleSetResponse", + "resultWrapper":"CloneReceiptRuleSetResult" + }, + "errors":[ + {"shape":"RuleSetDoesNotExistException"}, + {"shape":"AlreadyExistsException"}, + {"shape":"LimitExceededException"} + ] + }, + "CreateReceiptFilter":{ + "name":"CreateReceiptFilter", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateReceiptFilterRequest"}, + "output":{ + "shape":"CreateReceiptFilterResponse", + "resultWrapper":"CreateReceiptFilterResult" + }, + "errors":[ + {"shape":"LimitExceededException"}, + {"shape":"AlreadyExistsException"} + ] + }, + "CreateReceiptRule":{ + "name":"CreateReceiptRule", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateReceiptRuleRequest"}, + "output":{ + "shape":"CreateReceiptRuleResponse", + "resultWrapper":"CreateReceiptRuleResult" + }, + "errors":[ + {"shape":"InvalidSnsTopicException"}, + {"shape":"InvalidS3ConfigurationException"}, + {"shape":"InvalidLambdaFunctionException"}, + {"shape":"AlreadyExistsException"}, + {"shape":"RuleDoesNotExistException"}, + {"shape":"RuleSetDoesNotExistException"}, + {"shape":"LimitExceededException"} + ] + }, + "CreateReceiptRuleSet":{ + "name":"CreateReceiptRuleSet", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateReceiptRuleSetRequest"}, + "output":{ + "shape":"CreateReceiptRuleSetResponse", + "resultWrapper":"CreateReceiptRuleSetResult" + }, + "errors":[ + {"shape":"AlreadyExistsException"}, + {"shape":"LimitExceededException"} + ] + }, + "DeleteIdentity":{ + "name":"DeleteIdentity", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteIdentityRequest"}, + "output":{ + "shape":"DeleteIdentityResponse", + "resultWrapper":"DeleteIdentityResult" + } + }, + "DeleteIdentityPolicy":{ + "name":"DeleteIdentityPolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteIdentityPolicyRequest"}, + "output":{ + "shape":"DeleteIdentityPolicyResponse", + "resultWrapper":"DeleteIdentityPolicyResult" + } + }, + "DeleteReceiptFilter":{ + "name":"DeleteReceiptFilter", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteReceiptFilterRequest"}, + "output":{ + "shape":"DeleteReceiptFilterResponse", + "resultWrapper":"DeleteReceiptFilterResult" + } + }, + "DeleteReceiptRule":{ + "name":"DeleteReceiptRule", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteReceiptRuleRequest"}, + "output":{ + "shape":"DeleteReceiptRuleResponse", + "resultWrapper":"DeleteReceiptRuleResult" + }, + "errors":[ + {"shape":"RuleSetDoesNotExistException"} + ] + }, + "DeleteReceiptRuleSet":{ + "name":"DeleteReceiptRuleSet", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteReceiptRuleSetRequest"}, + "output":{ + "shape":"DeleteReceiptRuleSetResponse", + "resultWrapper":"DeleteReceiptRuleSetResult" + }, + "errors":[ + {"shape":"CannotDeleteException"} + ] + }, + "DeleteVerifiedEmailAddress":{ + "name":"DeleteVerifiedEmailAddress", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteVerifiedEmailAddressRequest"} + }, + "DescribeActiveReceiptRuleSet":{ + "name":"DescribeActiveReceiptRuleSet", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeActiveReceiptRuleSetRequest"}, + "output":{ + "shape":"DescribeActiveReceiptRuleSetResponse", + "resultWrapper":"DescribeActiveReceiptRuleSetResult" + } + }, + "DescribeReceiptRule":{ + "name":"DescribeReceiptRule", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeReceiptRuleRequest"}, + "output":{ + "shape":"DescribeReceiptRuleResponse", + "resultWrapper":"DescribeReceiptRuleResult" + }, + "errors":[ + {"shape":"RuleDoesNotExistException"}, + {"shape":"RuleSetDoesNotExistException"} + ] + }, + "DescribeReceiptRuleSet":{ + "name":"DescribeReceiptRuleSet", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeReceiptRuleSetRequest"}, + "output":{ + "shape":"DescribeReceiptRuleSetResponse", + "resultWrapper":"DescribeReceiptRuleSetResult" + }, + "errors":[ + {"shape":"RuleSetDoesNotExistException"} + ] + }, + "GetIdentityDkimAttributes":{ + "name":"GetIdentityDkimAttributes", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetIdentityDkimAttributesRequest"}, + "output":{ + "shape":"GetIdentityDkimAttributesResponse", + "resultWrapper":"GetIdentityDkimAttributesResult" + } + }, + "GetIdentityMailFromDomainAttributes":{ + "name":"GetIdentityMailFromDomainAttributes", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetIdentityMailFromDomainAttributesRequest"}, + "output":{ + "shape":"GetIdentityMailFromDomainAttributesResponse", + "resultWrapper":"GetIdentityMailFromDomainAttributesResult" + } + }, + "GetIdentityNotificationAttributes":{ + "name":"GetIdentityNotificationAttributes", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetIdentityNotificationAttributesRequest"}, + "output":{ + "shape":"GetIdentityNotificationAttributesResponse", + "resultWrapper":"GetIdentityNotificationAttributesResult" + } + }, + "GetIdentityPolicies":{ + "name":"GetIdentityPolicies", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetIdentityPoliciesRequest"}, + "output":{ + "shape":"GetIdentityPoliciesResponse", + "resultWrapper":"GetIdentityPoliciesResult" + } + }, + "GetIdentityVerificationAttributes":{ + "name":"GetIdentityVerificationAttributes", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetIdentityVerificationAttributesRequest"}, + "output":{ + "shape":"GetIdentityVerificationAttributesResponse", + "resultWrapper":"GetIdentityVerificationAttributesResult" + } + }, + "GetSendQuota":{ + "name":"GetSendQuota", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "output":{ + "shape":"GetSendQuotaResponse", + "resultWrapper":"GetSendQuotaResult" + } + }, + "GetSendStatistics":{ + "name":"GetSendStatistics", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "output":{ + "shape":"GetSendStatisticsResponse", + "resultWrapper":"GetSendStatisticsResult" + } + }, + "ListIdentities":{ + "name":"ListIdentities", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListIdentitiesRequest"}, + "output":{ + "shape":"ListIdentitiesResponse", + "resultWrapper":"ListIdentitiesResult" + } + }, + "ListIdentityPolicies":{ + "name":"ListIdentityPolicies", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListIdentityPoliciesRequest"}, + "output":{ + "shape":"ListIdentityPoliciesResponse", + "resultWrapper":"ListIdentityPoliciesResult" + } + }, + "ListReceiptFilters":{ + "name":"ListReceiptFilters", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListReceiptFiltersRequest"}, + "output":{ + "shape":"ListReceiptFiltersResponse", + "resultWrapper":"ListReceiptFiltersResult" + } + }, + "ListReceiptRuleSets":{ + "name":"ListReceiptRuleSets", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListReceiptRuleSetsRequest"}, + "output":{ + "shape":"ListReceiptRuleSetsResponse", + "resultWrapper":"ListReceiptRuleSetsResult" + } + }, + "ListVerifiedEmailAddresses":{ + "name":"ListVerifiedEmailAddresses", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "output":{ + "shape":"ListVerifiedEmailAddressesResponse", + "resultWrapper":"ListVerifiedEmailAddressesResult" + } + }, + "PutIdentityPolicy":{ + "name":"PutIdentityPolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PutIdentityPolicyRequest"}, + "output":{ + "shape":"PutIdentityPolicyResponse", + "resultWrapper":"PutIdentityPolicyResult" + }, + "errors":[ + {"shape":"InvalidPolicyException"} + ] + }, + "ReorderReceiptRuleSet":{ + "name":"ReorderReceiptRuleSet", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ReorderReceiptRuleSetRequest"}, + "output":{ + "shape":"ReorderReceiptRuleSetResponse", + "resultWrapper":"ReorderReceiptRuleSetResult" + }, + "errors":[ + {"shape":"RuleSetDoesNotExistException"}, + {"shape":"RuleDoesNotExistException"} + ] + }, + "SendBounce":{ + "name":"SendBounce", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"SendBounceRequest"}, + "output":{ + "shape":"SendBounceResponse", + "resultWrapper":"SendBounceResult" + }, + "errors":[ + {"shape":"MessageRejected"} + ] + }, + "SendEmail":{ + "name":"SendEmail", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"SendEmailRequest"}, + "output":{ + "shape":"SendEmailResponse", + "resultWrapper":"SendEmailResult" + }, + "errors":[ + {"shape":"MessageRejected"}, + {"shape":"MailFromDomainNotVerifiedException"} + ] + }, + "SendRawEmail":{ + "name":"SendRawEmail", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"SendRawEmailRequest"}, + "output":{ + "shape":"SendRawEmailResponse", + "resultWrapper":"SendRawEmailResult" + }, + "errors":[ + {"shape":"MessageRejected"}, + {"shape":"MailFromDomainNotVerifiedException"} + ] + }, + "SetActiveReceiptRuleSet":{ + "name":"SetActiveReceiptRuleSet", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"SetActiveReceiptRuleSetRequest"}, + "output":{ + "shape":"SetActiveReceiptRuleSetResponse", + "resultWrapper":"SetActiveReceiptRuleSetResult" + }, + "errors":[ + {"shape":"RuleSetDoesNotExistException"} + ] + }, + "SetIdentityDkimEnabled":{ + "name":"SetIdentityDkimEnabled", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"SetIdentityDkimEnabledRequest"}, + "output":{ + "shape":"SetIdentityDkimEnabledResponse", + "resultWrapper":"SetIdentityDkimEnabledResult" + } + }, + "SetIdentityFeedbackForwardingEnabled":{ + "name":"SetIdentityFeedbackForwardingEnabled", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"SetIdentityFeedbackForwardingEnabledRequest"}, + "output":{ + "shape":"SetIdentityFeedbackForwardingEnabledResponse", + "resultWrapper":"SetIdentityFeedbackForwardingEnabledResult" + } + }, + "SetIdentityHeadersInNotificationsEnabled":{ + "name":"SetIdentityHeadersInNotificationsEnabled", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"SetIdentityHeadersInNotificationsEnabledRequest"}, + "output":{ + "shape":"SetIdentityHeadersInNotificationsEnabledResponse", + "resultWrapper":"SetIdentityHeadersInNotificationsEnabledResult" + } + }, + "SetIdentityMailFromDomain":{ + "name":"SetIdentityMailFromDomain", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"SetIdentityMailFromDomainRequest"}, + "output":{ + "shape":"SetIdentityMailFromDomainResponse", + "resultWrapper":"SetIdentityMailFromDomainResult" + } + }, + "SetIdentityNotificationTopic":{ + "name":"SetIdentityNotificationTopic", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"SetIdentityNotificationTopicRequest"}, + "output":{ + "shape":"SetIdentityNotificationTopicResponse", + "resultWrapper":"SetIdentityNotificationTopicResult" + } + }, + "SetReceiptRulePosition":{ + "name":"SetReceiptRulePosition", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"SetReceiptRulePositionRequest"}, + "output":{ + "shape":"SetReceiptRulePositionResponse", + "resultWrapper":"SetReceiptRulePositionResult" + }, + "errors":[ + {"shape":"RuleSetDoesNotExistException"}, + {"shape":"RuleDoesNotExistException"} + ] + }, + "UpdateReceiptRule":{ + "name":"UpdateReceiptRule", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateReceiptRuleRequest"}, + "output":{ + "shape":"UpdateReceiptRuleResponse", + "resultWrapper":"UpdateReceiptRuleResult" + }, + "errors":[ + {"shape":"InvalidSnsTopicException"}, + {"shape":"InvalidS3ConfigurationException"}, + {"shape":"InvalidLambdaFunctionException"}, + {"shape":"RuleSetDoesNotExistException"}, + {"shape":"RuleDoesNotExistException"}, + {"shape":"LimitExceededException"} + ] + }, + "VerifyDomainDkim":{ + "name":"VerifyDomainDkim", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"VerifyDomainDkimRequest"}, + "output":{ + "shape":"VerifyDomainDkimResponse", + "resultWrapper":"VerifyDomainDkimResult" + } + }, + "VerifyDomainIdentity":{ + "name":"VerifyDomainIdentity", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"VerifyDomainIdentityRequest"}, + "output":{ + "shape":"VerifyDomainIdentityResponse", + "resultWrapper":"VerifyDomainIdentityResult" + } + }, + "VerifyEmailAddress":{ + "name":"VerifyEmailAddress", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"VerifyEmailAddressRequest"} + }, + "VerifyEmailIdentity":{ + "name":"VerifyEmailIdentity", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"VerifyEmailIdentityRequest"}, + "output":{ + "shape":"VerifyEmailIdentityResponse", + "resultWrapper":"VerifyEmailIdentityResult" + } + } + }, + "shapes":{ + "AddHeaderAction":{ + "type":"structure", + "required":[ + "HeaderName", + "HeaderValue" + ], + "members":{ + "HeaderName":{"shape":"HeaderName"}, + "HeaderValue":{"shape":"HeaderValue"} + } + }, + "Address":{"type":"string"}, + "AddressList":{ + "type":"list", + "member":{"shape":"Address"} + }, + "AlreadyExistsException":{ + "type":"structure", + "members":{ + "Name":{"shape":"RuleOrRuleSetName"} + }, + "error":{ + "code":"AlreadyExists", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "AmazonResourceName":{"type":"string"}, + "ArrivalDate":{"type":"timestamp"}, + "BehaviorOnMXFailure":{ + "type":"string", + "enum":[ + "UseDefaultValue", + "RejectMessage" + ] + }, + "Body":{ + "type":"structure", + "members":{ + "Text":{"shape":"Content"}, + "Html":{"shape":"Content"} + } + }, + "BounceAction":{ + "type":"structure", + "required":[ + "SmtpReplyCode", + "Message", + "Sender" + ], + "members":{ + "TopicArn":{"shape":"AmazonResourceName"}, + "SmtpReplyCode":{"shape":"BounceSmtpReplyCode"}, + "StatusCode":{"shape":"BounceStatusCode"}, + "Message":{"shape":"BounceMessage"}, + "Sender":{"shape":"Address"} + } + }, + "BounceMessage":{"type":"string"}, + "BounceSmtpReplyCode":{"type":"string"}, + "BounceStatusCode":{"type":"string"}, + "BounceType":{ + "type":"string", + "enum":[ + "DoesNotExist", + "MessageTooLarge", + "ExceededQuota", + "ContentRejected", + "Undefined", + "TemporaryFailure" + ] + }, + "BouncedRecipientInfo":{ + "type":"structure", + "required":["Recipient"], + "members":{ + "Recipient":{"shape":"Address"}, + "RecipientArn":{"shape":"AmazonResourceName"}, + "BounceType":{"shape":"BounceType"}, + "RecipientDsnFields":{"shape":"RecipientDsnFields"} + } + }, + "BouncedRecipientInfoList":{ + "type":"list", + "member":{"shape":"BouncedRecipientInfo"} + }, + "CannotDeleteException":{ + "type":"structure", + "members":{ + "Name":{"shape":"RuleOrRuleSetName"} + }, + "error":{ + "code":"CannotDelete", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "Charset":{"type":"string"}, + "Cidr":{"type":"string"}, + "CloneReceiptRuleSetRequest":{ + "type":"structure", + "required":[ + "RuleSetName", + "OriginalRuleSetName" + ], + "members":{ + "RuleSetName":{"shape":"ReceiptRuleSetName"}, + "OriginalRuleSetName":{"shape":"ReceiptRuleSetName"} + } + }, + "CloneReceiptRuleSetResponse":{ + "type":"structure", + "members":{ + } + }, + "Content":{ + "type":"structure", + "required":["Data"], + "members":{ + "Data":{"shape":"MessageData"}, + "Charset":{"shape":"Charset"} + } + }, + "Counter":{"type":"long"}, + "CreateReceiptFilterRequest":{ + "type":"structure", + "required":["Filter"], + "members":{ + "Filter":{"shape":"ReceiptFilter"} + } + }, + "CreateReceiptFilterResponse":{ + "type":"structure", + "members":{ + } + }, + "CreateReceiptRuleRequest":{ + "type":"structure", + "required":[ + "RuleSetName", + "Rule" + ], + "members":{ + "RuleSetName":{"shape":"ReceiptRuleSetName"}, + "After":{"shape":"ReceiptRuleName"}, + "Rule":{"shape":"ReceiptRule"} + } + }, + "CreateReceiptRuleResponse":{ + "type":"structure", + "members":{ + } + }, + "CreateReceiptRuleSetRequest":{ + "type":"structure", + "required":["RuleSetName"], + "members":{ + "RuleSetName":{"shape":"ReceiptRuleSetName"} + } + }, + "CreateReceiptRuleSetResponse":{ + "type":"structure", + "members":{ + } + }, + "CustomMailFromStatus":{ + "type":"string", + "enum":[ + "Pending", + "Success", + "Failed", + "TemporaryFailure" + ] + }, + "DeleteIdentityPolicyRequest":{ + "type":"structure", + "required":[ + "Identity", + "PolicyName" + ], + "members":{ + "Identity":{"shape":"Identity"}, + "PolicyName":{"shape":"PolicyName"} + } + }, + "DeleteIdentityPolicyResponse":{ + "type":"structure", + "members":{ + } + }, + "DeleteIdentityRequest":{ + "type":"structure", + "required":["Identity"], + "members":{ + "Identity":{"shape":"Identity"} + } + }, + "DeleteIdentityResponse":{ + "type":"structure", + "members":{ + } + }, + "DeleteReceiptFilterRequest":{ + "type":"structure", + "required":["FilterName"], + "members":{ + "FilterName":{"shape":"ReceiptFilterName"} + } + }, + "DeleteReceiptFilterResponse":{ + "type":"structure", + "members":{ + } + }, + "DeleteReceiptRuleRequest":{ + "type":"structure", + "required":[ + "RuleSetName", + "RuleName" + ], + "members":{ + "RuleSetName":{"shape":"ReceiptRuleSetName"}, + "RuleName":{"shape":"ReceiptRuleName"} + } + }, + "DeleteReceiptRuleResponse":{ + "type":"structure", + "members":{ + } + }, + "DeleteReceiptRuleSetRequest":{ + "type":"structure", + "required":["RuleSetName"], + "members":{ + "RuleSetName":{"shape":"ReceiptRuleSetName"} + } + }, + "DeleteReceiptRuleSetResponse":{ + "type":"structure", + "members":{ + } + }, + "DeleteVerifiedEmailAddressRequest":{ + "type":"structure", + "required":["EmailAddress"], + "members":{ + "EmailAddress":{"shape":"Address"} + } + }, + "DescribeActiveReceiptRuleSetRequest":{ + "type":"structure", + "members":{ + } + }, + "DescribeActiveReceiptRuleSetResponse":{ + "type":"structure", + "members":{ + "Metadata":{"shape":"ReceiptRuleSetMetadata"}, + "Rules":{"shape":"ReceiptRulesList"} + } + }, + "DescribeReceiptRuleRequest":{ + "type":"structure", + "required":[ + "RuleSetName", + "RuleName" + ], + "members":{ + "RuleSetName":{"shape":"ReceiptRuleSetName"}, + "RuleName":{"shape":"ReceiptRuleName"} + } + }, + "DescribeReceiptRuleResponse":{ + "type":"structure", + "members":{ + "Rule":{"shape":"ReceiptRule"} + } + }, + "DescribeReceiptRuleSetRequest":{ + "type":"structure", + "required":["RuleSetName"], + "members":{ + "RuleSetName":{"shape":"ReceiptRuleSetName"} + } + }, + "DescribeReceiptRuleSetResponse":{ + "type":"structure", + "members":{ + "Metadata":{"shape":"ReceiptRuleSetMetadata"}, + "Rules":{"shape":"ReceiptRulesList"} + } + }, + "Destination":{ + "type":"structure", + "members":{ + "ToAddresses":{"shape":"AddressList"}, + "CcAddresses":{"shape":"AddressList"}, + "BccAddresses":{"shape":"AddressList"} + } + }, + "DiagnosticCode":{"type":"string"}, + "DkimAttributes":{ + "type":"map", + "key":{"shape":"Identity"}, + "value":{"shape":"IdentityDkimAttributes"} + }, + "Domain":{"type":"string"}, + "DsnAction":{ + "type":"string", + "enum":[ + "failed", + "delayed", + "delivered", + "relayed", + "expanded" + ] + }, + "DsnStatus":{"type":"string"}, + "Enabled":{"type":"boolean"}, + "Explanation":{"type":"string"}, + "ExtensionField":{ + "type":"structure", + "required":[ + "Name", + "Value" + ], + "members":{ + "Name":{"shape":"ExtensionFieldName"}, + "Value":{"shape":"ExtensionFieldValue"} + } + }, + "ExtensionFieldList":{ + "type":"list", + "member":{"shape":"ExtensionField"} + }, + "ExtensionFieldName":{"type":"string"}, + "ExtensionFieldValue":{"type":"string"}, + "GetIdentityDkimAttributesRequest":{ + "type":"structure", + "required":["Identities"], + "members":{ + "Identities":{"shape":"IdentityList"} + } + }, + "GetIdentityDkimAttributesResponse":{ + "type":"structure", + "required":["DkimAttributes"], + "members":{ + "DkimAttributes":{"shape":"DkimAttributes"} + } + }, + "GetIdentityMailFromDomainAttributesRequest":{ + "type":"structure", + "required":["Identities"], + "members":{ + "Identities":{"shape":"IdentityList"} + } + }, + "GetIdentityMailFromDomainAttributesResponse":{ + "type":"structure", + "required":["MailFromDomainAttributes"], + "members":{ + "MailFromDomainAttributes":{"shape":"MailFromDomainAttributes"} + } + }, + "GetIdentityNotificationAttributesRequest":{ + "type":"structure", + "required":["Identities"], + "members":{ + "Identities":{"shape":"IdentityList"} + } + }, + "GetIdentityNotificationAttributesResponse":{ + "type":"structure", + "required":["NotificationAttributes"], + "members":{ + "NotificationAttributes":{"shape":"NotificationAttributes"} + } + }, + "GetIdentityPoliciesRequest":{ + "type":"structure", + "required":[ + "Identity", + "PolicyNames" + ], + "members":{ + "Identity":{"shape":"Identity"}, + "PolicyNames":{"shape":"PolicyNameList"} + } + }, + "GetIdentityPoliciesResponse":{ + "type":"structure", + "required":["Policies"], + "members":{ + "Policies":{"shape":"PolicyMap"} + } + }, + "GetIdentityVerificationAttributesRequest":{ + "type":"structure", + "required":["Identities"], + "members":{ + "Identities":{"shape":"IdentityList"} + } + }, + "GetIdentityVerificationAttributesResponse":{ + "type":"structure", + "required":["VerificationAttributes"], + "members":{ + "VerificationAttributes":{"shape":"VerificationAttributes"} + } + }, + "GetSendQuotaResponse":{ + "type":"structure", + "members":{ + "Max24HourSend":{"shape":"Max24HourSend"}, + "MaxSendRate":{"shape":"MaxSendRate"}, + "SentLast24Hours":{"shape":"SentLast24Hours"} + } + }, + "GetSendStatisticsResponse":{ + "type":"structure", + "members":{ + "SendDataPoints":{"shape":"SendDataPointList"} + } + }, + "HeaderName":{"type":"string"}, + "HeaderValue":{"type":"string"}, + "Identity":{"type":"string"}, + "IdentityDkimAttributes":{ + "type":"structure", + "required":[ + "DkimEnabled", + "DkimVerificationStatus" + ], + "members":{ + "DkimEnabled":{"shape":"Enabled"}, + "DkimVerificationStatus":{"shape":"VerificationStatus"}, + "DkimTokens":{"shape":"VerificationTokenList"} + } + }, + "IdentityList":{ + "type":"list", + "member":{"shape":"Identity"} + }, + "IdentityMailFromDomainAttributes":{ + "type":"structure", + "required":[ + "MailFromDomain", + "MailFromDomainStatus", + "BehaviorOnMXFailure" + ], + "members":{ + "MailFromDomain":{"shape":"MailFromDomainName"}, + "MailFromDomainStatus":{"shape":"CustomMailFromStatus"}, + "BehaviorOnMXFailure":{"shape":"BehaviorOnMXFailure"} + } + }, + "IdentityNotificationAttributes":{ + "type":"structure", + "required":[ + "BounceTopic", + "ComplaintTopic", + "DeliveryTopic", + "ForwardingEnabled" + ], + "members":{ + "BounceTopic":{"shape":"NotificationTopic"}, + "ComplaintTopic":{"shape":"NotificationTopic"}, + "DeliveryTopic":{"shape":"NotificationTopic"}, + "ForwardingEnabled":{"shape":"Enabled"}, + "HeadersInBounceNotificationsEnabled":{"shape":"Enabled"}, + "HeadersInComplaintNotificationsEnabled":{"shape":"Enabled"}, + "HeadersInDeliveryNotificationsEnabled":{"shape":"Enabled"} + } + }, + "IdentityType":{ + "type":"string", + "enum":[ + "EmailAddress", + "Domain" + ] + }, + "IdentityVerificationAttributes":{ + "type":"structure", + "required":["VerificationStatus"], + "members":{ + "VerificationStatus":{"shape":"VerificationStatus"}, + "VerificationToken":{"shape":"VerificationToken"} + } + }, + "InvalidLambdaFunctionException":{ + "type":"structure", + "members":{ + "FunctionArn":{"shape":"AmazonResourceName"} + }, + "error":{ + "code":"InvalidLambdaFunction", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidPolicyException":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidPolicy", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidS3ConfigurationException":{ + "type":"structure", + "members":{ + "Bucket":{"shape":"S3BucketName"} + }, + "error":{ + "code":"InvalidS3Configuration", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidSnsTopicException":{ + "type":"structure", + "members":{ + "Topic":{"shape":"AmazonResourceName"} + }, + "error":{ + "code":"InvalidSnsTopic", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvocationType":{ + "type":"string", + "enum":[ + "Event", + "RequestResponse" + ] + }, + "LambdaAction":{ + "type":"structure", + "required":["FunctionArn"], + "members":{ + "TopicArn":{"shape":"AmazonResourceName"}, + "FunctionArn":{"shape":"AmazonResourceName"}, + "InvocationType":{"shape":"InvocationType"} + } + }, + "LastAttemptDate":{"type":"timestamp"}, + "LimitExceededException":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"LimitExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "ListIdentitiesRequest":{ + "type":"structure", + "members":{ + "IdentityType":{"shape":"IdentityType"}, + "NextToken":{"shape":"NextToken"}, + "MaxItems":{"shape":"MaxItems"} + } + }, + "ListIdentitiesResponse":{ + "type":"structure", + "required":["Identities"], + "members":{ + "Identities":{"shape":"IdentityList"}, + "NextToken":{"shape":"NextToken"} + } + }, + "ListIdentityPoliciesRequest":{ + "type":"structure", + "required":["Identity"], + "members":{ + "Identity":{"shape":"Identity"} + } + }, + "ListIdentityPoliciesResponse":{ + "type":"structure", + "required":["PolicyNames"], + "members":{ + "PolicyNames":{"shape":"PolicyNameList"} + } + }, + "ListReceiptFiltersRequest":{ + "type":"structure", + "members":{ + } + }, + "ListReceiptFiltersResponse":{ + "type":"structure", + "members":{ + "Filters":{"shape":"ReceiptFilterList"} + } + }, + "ListReceiptRuleSetsRequest":{ + "type":"structure", + "members":{ + "NextToken":{"shape":"NextToken"} + } + }, + "ListReceiptRuleSetsResponse":{ + "type":"structure", + "members":{ + "RuleSets":{"shape":"ReceiptRuleSetsLists"}, + "NextToken":{"shape":"NextToken"} + } + }, + "ListVerifiedEmailAddressesResponse":{ + "type":"structure", + "members":{ + "VerifiedEmailAddresses":{"shape":"AddressList"} + } + }, + "MailFromDomainAttributes":{ + "type":"map", + "key":{"shape":"Identity"}, + "value":{"shape":"IdentityMailFromDomainAttributes"} + }, + "MailFromDomainName":{"type":"string"}, + "MailFromDomainNotVerifiedException":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"MailFromDomainNotVerifiedException", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "Max24HourSend":{"type":"double"}, + "MaxItems":{"type":"integer"}, + "MaxSendRate":{"type":"double"}, + "Message":{ + "type":"structure", + "required":[ + "Subject", + "Body" + ], + "members":{ + "Subject":{"shape":"Content"}, + "Body":{"shape":"Body"} + } + }, + "MessageData":{"type":"string"}, + "MessageDsn":{ + "type":"structure", + "required":["ReportingMta"], + "members":{ + "ReportingMta":{"shape":"ReportingMta"}, + "ArrivalDate":{"shape":"ArrivalDate"}, + "ExtensionFields":{"shape":"ExtensionFieldList"} + } + }, + "MessageId":{"type":"string"}, + "MessageRejected":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"MessageRejected", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "NextToken":{"type":"string"}, + "NotificationAttributes":{ + "type":"map", + "key":{"shape":"Identity"}, + "value":{"shape":"IdentityNotificationAttributes"} + }, + "NotificationTopic":{"type":"string"}, + "NotificationType":{ + "type":"string", + "enum":[ + "Bounce", + "Complaint", + "Delivery" + ] + }, + "Policy":{ + "type":"string", + "min":1 + }, + "PolicyMap":{ + "type":"map", + "key":{"shape":"PolicyName"}, + "value":{"shape":"Policy"} + }, + "PolicyName":{ + "type":"string", + "max":64, + "min":1 + }, + "PolicyNameList":{ + "type":"list", + "member":{"shape":"PolicyName"} + }, + "PutIdentityPolicyRequest":{ + "type":"structure", + "required":[ + "Identity", + "PolicyName", + "Policy" + ], + "members":{ + "Identity":{"shape":"Identity"}, + "PolicyName":{"shape":"PolicyName"}, + "Policy":{"shape":"Policy"} + } + }, + "PutIdentityPolicyResponse":{ + "type":"structure", + "members":{ + } + }, + "RawMessage":{ + "type":"structure", + "required":["Data"], + "members":{ + "Data":{"shape":"RawMessageData"} + } + }, + "RawMessageData":{"type":"blob"}, + "ReceiptAction":{ + "type":"structure", + "members":{ + "S3Action":{"shape":"S3Action"}, + "BounceAction":{"shape":"BounceAction"}, + "WorkmailAction":{"shape":"WorkmailAction"}, + "LambdaAction":{"shape":"LambdaAction"}, + "StopAction":{"shape":"StopAction"}, + "AddHeaderAction":{"shape":"AddHeaderAction"}, + "SNSAction":{"shape":"SNSAction"} + } + }, + "ReceiptActionsList":{ + "type":"list", + "member":{"shape":"ReceiptAction"} + }, + "ReceiptFilter":{ + "type":"structure", + "required":[ + "Name", + "IpFilter" + ], + "members":{ + "Name":{"shape":"ReceiptFilterName"}, + "IpFilter":{"shape":"ReceiptIpFilter"} + } + }, + "ReceiptFilterList":{ + "type":"list", + "member":{"shape":"ReceiptFilter"} + }, + "ReceiptFilterName":{"type":"string"}, + "ReceiptFilterPolicy":{ + "type":"string", + "enum":[ + "Block", + "Allow" + ] + }, + "ReceiptIpFilter":{ + "type":"structure", + "required":[ + "Policy", + "Cidr" + ], + "members":{ + "Policy":{"shape":"ReceiptFilterPolicy"}, + "Cidr":{"shape":"Cidr"} + } + }, + "ReceiptRule":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{"shape":"ReceiptRuleName"}, + "Enabled":{"shape":"Enabled"}, + "TlsPolicy":{"shape":"TlsPolicy"}, + "Recipients":{"shape":"RecipientsList"}, + "Actions":{"shape":"ReceiptActionsList"}, + "ScanEnabled":{"shape":"Enabled"} + } + }, + "ReceiptRuleName":{"type":"string"}, + "ReceiptRuleNamesList":{ + "type":"list", + "member":{"shape":"ReceiptRuleName"} + }, + "ReceiptRuleSetMetadata":{ + "type":"structure", + "members":{ + "Name":{"shape":"ReceiptRuleSetName"}, + "CreatedTimestamp":{"shape":"Timestamp"} + } + }, + "ReceiptRuleSetName":{"type":"string"}, + "ReceiptRuleSetsLists":{ + "type":"list", + "member":{"shape":"ReceiptRuleSetMetadata"} + }, + "ReceiptRulesList":{ + "type":"list", + "member":{"shape":"ReceiptRule"} + }, + "Recipient":{"type":"string"}, + "RecipientDsnFields":{ + "type":"structure", + "required":[ + "Action", + "Status" + ], + "members":{ + "FinalRecipient":{"shape":"Address"}, + "Action":{"shape":"DsnAction"}, + "RemoteMta":{"shape":"RemoteMta"}, + "Status":{"shape":"DsnStatus"}, + "DiagnosticCode":{"shape":"DiagnosticCode"}, + "LastAttemptDate":{"shape":"LastAttemptDate"}, + "ExtensionFields":{"shape":"ExtensionFieldList"} + } + }, + "RecipientsList":{ + "type":"list", + "member":{"shape":"Recipient"} + }, + "RemoteMta":{"type":"string"}, + "ReorderReceiptRuleSetRequest":{ + "type":"structure", + "required":[ + "RuleSetName", + "RuleNames" + ], + "members":{ + "RuleSetName":{"shape":"ReceiptRuleSetName"}, + "RuleNames":{"shape":"ReceiptRuleNamesList"} + } + }, + "ReorderReceiptRuleSetResponse":{ + "type":"structure", + "members":{ + } + }, + "ReportingMta":{"type":"string"}, + "RuleDoesNotExistException":{ + "type":"structure", + "members":{ + "Name":{"shape":"RuleOrRuleSetName"} + }, + "error":{ + "code":"RuleDoesNotExist", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "RuleOrRuleSetName":{"type":"string"}, + "RuleSetDoesNotExistException":{ + "type":"structure", + "members":{ + "Name":{"shape":"RuleOrRuleSetName"} + }, + "error":{ + "code":"RuleSetDoesNotExist", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "S3Action":{ + "type":"structure", + "required":["BucketName"], + "members":{ + "TopicArn":{"shape":"AmazonResourceName"}, + "BucketName":{"shape":"S3BucketName"}, + "ObjectKeyPrefix":{"shape":"S3KeyPrefix"}, + "KmsKeyArn":{"shape":"AmazonResourceName"} + } + }, + "S3BucketName":{"type":"string"}, + "S3KeyPrefix":{"type":"string"}, + "SNSAction":{ + "type":"structure", + "required":["TopicArn"], + "members":{ + "TopicArn":{"shape":"AmazonResourceName"}, + "Encoding":{"shape":"SNSActionEncoding"} + } + }, + "SNSActionEncoding":{ + "type":"string", + "enum":[ + "UTF-8", + "Base64" + ] + }, + "SendBounceRequest":{ + "type":"structure", + "required":[ + "OriginalMessageId", + "BounceSender", + "BouncedRecipientInfoList" + ], + "members":{ + "OriginalMessageId":{"shape":"MessageId"}, + "BounceSender":{"shape":"Address"}, + "Explanation":{"shape":"Explanation"}, + "MessageDsn":{"shape":"MessageDsn"}, + "BouncedRecipientInfoList":{"shape":"BouncedRecipientInfoList"}, + "BounceSenderArn":{"shape":"AmazonResourceName"} + } + }, + "SendBounceResponse":{ + "type":"structure", + "members":{ + "MessageId":{"shape":"MessageId"} + } + }, + "SendDataPoint":{ + "type":"structure", + "members":{ + "Timestamp":{"shape":"Timestamp"}, + "DeliveryAttempts":{"shape":"Counter"}, + "Bounces":{"shape":"Counter"}, + "Complaints":{"shape":"Counter"}, + "Rejects":{"shape":"Counter"} + } + }, + "SendDataPointList":{ + "type":"list", + "member":{"shape":"SendDataPoint"} + }, + "SendEmailRequest":{ + "type":"structure", + "required":[ + "Source", + "Destination", + "Message" + ], + "members":{ + "Source":{"shape":"Address"}, + "Destination":{"shape":"Destination"}, + "Message":{"shape":"Message"}, + "ReplyToAddresses":{"shape":"AddressList"}, + "ReturnPath":{"shape":"Address"}, + "SourceArn":{"shape":"AmazonResourceName"}, + "ReturnPathArn":{"shape":"AmazonResourceName"} + } + }, + "SendEmailResponse":{ + "type":"structure", + "required":["MessageId"], + "members":{ + "MessageId":{"shape":"MessageId"} + } + }, + "SendRawEmailRequest":{ + "type":"structure", + "required":["RawMessage"], + "members":{ + "Source":{"shape":"Address"}, + "Destinations":{"shape":"AddressList"}, + "RawMessage":{"shape":"RawMessage"}, + "FromArn":{"shape":"AmazonResourceName"}, + "SourceArn":{"shape":"AmazonResourceName"}, + "ReturnPathArn":{"shape":"AmazonResourceName"} + } + }, + "SendRawEmailResponse":{ + "type":"structure", + "required":["MessageId"], + "members":{ + "MessageId":{"shape":"MessageId"} + } + }, + "SentLast24Hours":{"type":"double"}, + "SetActiveReceiptRuleSetRequest":{ + "type":"structure", + "members":{ + "RuleSetName":{"shape":"ReceiptRuleSetName"} + } + }, + "SetActiveReceiptRuleSetResponse":{ + "type":"structure", + "members":{ + } + }, + "SetIdentityDkimEnabledRequest":{ + "type":"structure", + "required":[ + "Identity", + "DkimEnabled" + ], + "members":{ + "Identity":{"shape":"Identity"}, + "DkimEnabled":{"shape":"Enabled"} + } + }, + "SetIdentityDkimEnabledResponse":{ + "type":"structure", + "members":{ + } + }, + "SetIdentityFeedbackForwardingEnabledRequest":{ + "type":"structure", + "required":[ + "Identity", + "ForwardingEnabled" + ], + "members":{ + "Identity":{"shape":"Identity"}, + "ForwardingEnabled":{"shape":"Enabled"} + } + }, + "SetIdentityFeedbackForwardingEnabledResponse":{ + "type":"structure", + "members":{ + } + }, + "SetIdentityHeadersInNotificationsEnabledRequest":{ + "type":"structure", + "required":[ + "Identity", + "NotificationType", + "Enabled" + ], + "members":{ + "Identity":{"shape":"Identity"}, + "NotificationType":{"shape":"NotificationType"}, + "Enabled":{"shape":"Enabled"} + } + }, + "SetIdentityHeadersInNotificationsEnabledResponse":{ + "type":"structure", + "members":{ + } + }, + "SetIdentityMailFromDomainRequest":{ + "type":"structure", + "required":["Identity"], + "members":{ + "Identity":{"shape":"Identity"}, + "MailFromDomain":{"shape":"MailFromDomainName"}, + "BehaviorOnMXFailure":{"shape":"BehaviorOnMXFailure"} + } + }, + "SetIdentityMailFromDomainResponse":{ + "type":"structure", + "members":{ + } + }, + "SetIdentityNotificationTopicRequest":{ + "type":"structure", + "required":[ + "Identity", + "NotificationType" + ], + "members":{ + "Identity":{"shape":"Identity"}, + "NotificationType":{"shape":"NotificationType"}, + "SnsTopic":{"shape":"NotificationTopic"} + } + }, + "SetIdentityNotificationTopicResponse":{ + "type":"structure", + "members":{ + } + }, + "SetReceiptRulePositionRequest":{ + "type":"structure", + "required":[ + "RuleSetName", + "RuleName" + ], + "members":{ + "RuleSetName":{"shape":"ReceiptRuleSetName"}, + "RuleName":{"shape":"ReceiptRuleName"}, + "After":{"shape":"ReceiptRuleName"} + } + }, + "SetReceiptRulePositionResponse":{ + "type":"structure", + "members":{ + } + }, + "StopAction":{ + "type":"structure", + "required":["Scope"], + "members":{ + "Scope":{"shape":"StopScope"}, + "TopicArn":{"shape":"AmazonResourceName"} + } + }, + "StopScope":{ + "type":"string", + "enum":["RuleSet"] + }, + "Timestamp":{"type":"timestamp"}, + "TlsPolicy":{ + "type":"string", + "enum":[ + "Require", + "Optional" + ] + }, + "UpdateReceiptRuleRequest":{ + "type":"structure", + "required":[ + "RuleSetName", + "Rule" + ], + "members":{ + "RuleSetName":{"shape":"ReceiptRuleSetName"}, + "Rule":{"shape":"ReceiptRule"} + } + }, + "UpdateReceiptRuleResponse":{ + "type":"structure", + "members":{ + } + }, + "VerificationAttributes":{ + "type":"map", + "key":{"shape":"Identity"}, + "value":{"shape":"IdentityVerificationAttributes"} + }, + "VerificationStatus":{ + "type":"string", + "enum":[ + "Pending", + "Success", + "Failed", + "TemporaryFailure", + "NotStarted" + ] + }, + "VerificationToken":{"type":"string"}, + "VerificationTokenList":{ + "type":"list", + "member":{"shape":"VerificationToken"} + }, + "VerifyDomainDkimRequest":{ + "type":"structure", + "required":["Domain"], + "members":{ + "Domain":{"shape":"Domain"} + } + }, + "VerifyDomainDkimResponse":{ + "type":"structure", + "required":["DkimTokens"], + "members":{ + "DkimTokens":{"shape":"VerificationTokenList"} + } + }, + "VerifyDomainIdentityRequest":{ + "type":"structure", + "required":["Domain"], + "members":{ + "Domain":{"shape":"Domain"} + } + }, + "VerifyDomainIdentityResponse":{ + "type":"structure", + "required":["VerificationToken"], + "members":{ + "VerificationToken":{"shape":"VerificationToken"} + } + }, + "VerifyEmailAddressRequest":{ + "type":"structure", + "required":["EmailAddress"], + "members":{ + "EmailAddress":{"shape":"Address"} + } + }, + "VerifyEmailIdentityRequest":{ + "type":"structure", + "required":["EmailAddress"], + "members":{ + "EmailAddress":{"shape":"Address"} + } + }, + "VerifyEmailIdentityResponse":{ + "type":"structure", + "members":{ + } + }, + "WorkmailAction":{ + "type":"structure", + "required":["OrganizationArn"], + "members":{ + "TopicArn":{"shape":"AmazonResourceName"}, + "OrganizationArn":{"shape":"AmazonResourceName"} + } + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/email/2010-12-01/docs-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/email/2010-12-01/docs-2.json new file mode 100644 index 000000000..73b9ca79e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/email/2010-12-01/docs-2.json @@ -0,0 +1,1212 @@ +{ + "version": "2.0", + "service": "Amazon Simple Email Service

    This is the API Reference for Amazon Simple Email Service (Amazon SES). This documentation is intended to be used in conjunction with the Amazon SES Developer Guide.

    For a list of Amazon SES endpoints to use in service requests, see Regions and Amazon SES in the Amazon SES Developer Guide.

    ", + "operations": { + "CloneReceiptRuleSet": "

    Creates a receipt rule set by cloning an existing one. All receipt rules and configurations are copied to the new receipt rule set and are completely independent of the source rule set.

    For information about setting up rule sets, see the Amazon SES Developer Guide.

    This action is throttled at one request per second.

    ", + "CreateReceiptFilter": "

    Creates a new IP address filter.

    For information about setting up IP address filters, see the Amazon SES Developer Guide.

    This action is throttled at one request per second.

    ", + "CreateReceiptRule": "

    Creates a receipt rule.

    For information about setting up receipt rules, see the Amazon SES Developer Guide.

    This action is throttled at one request per second.

    ", + "CreateReceiptRuleSet": "

    Creates an empty receipt rule set.

    For information about setting up receipt rule sets, see the Amazon SES Developer Guide.

    This action is throttled at one request per second.

    ", + "DeleteIdentity": "

    Deletes the specified identity (an email address or a domain) from the list of verified identities.

    This action is throttled at one request per second.

    ", + "DeleteIdentityPolicy": "

    Deletes the specified sending authorization policy for the given identity (an email address or a domain). This API returns successfully even if a policy with the specified name does not exist.

    This API is for the identity owner only. If you have not verified the identity, this API will return an error.

    Sending authorization is a feature that enables an identity owner to authorize other senders to use its identities. For information about using sending authorization, see the Amazon SES Developer Guide.

    This action is throttled at one request per second.

    ", + "DeleteReceiptFilter": "

    Deletes the specified IP address filter.

    For information about managing IP address filters, see the Amazon SES Developer Guide.

    This action is throttled at one request per second.

    ", + "DeleteReceiptRule": "

    Deletes the specified receipt rule.

    For information about managing receipt rules, see the Amazon SES Developer Guide.

    This action is throttled at one request per second.

    ", + "DeleteReceiptRuleSet": "

    Deletes the specified receipt rule set and all of the receipt rules it contains.

    The currently active rule set cannot be deleted.

    For information about managing receipt rule sets, see the Amazon SES Developer Guide.

    This action is throttled at one request per second.

    ", + "DeleteVerifiedEmailAddress": "

    Deletes the specified email address from the list of verified addresses.

    The DeleteVerifiedEmailAddress action is deprecated as of the May 15, 2012 release of Domain Verification. The DeleteIdentity action is now preferred.

    This action is throttled at one request per second.

    ", + "DescribeActiveReceiptRuleSet": "

    Returns the metadata and receipt rules for the receipt rule set that is currently active.

    For information about setting up receipt rule sets, see the Amazon SES Developer Guide.

    This action is throttled at one request per second.

    ", + "DescribeReceiptRule": "

    Returns the details of the specified receipt rule.

    For information about setting up receipt rules, see the Amazon SES Developer Guide.

    This action is throttled at one request per second.

    ", + "DescribeReceiptRuleSet": "

    Returns the details of the specified receipt rule set.

    For information about managing receipt rule sets, see the Amazon SES Developer Guide.

    This action is throttled at one request per second.

    ", + "GetIdentityDkimAttributes": "

    Returns the current status of Easy DKIM signing for an entity. For domain name identities, this action also returns the DKIM tokens that are required for Easy DKIM signing, and whether Amazon SES has successfully verified that these tokens have been published.

    This action takes a list of identities as input and returns the following information for each:

    • Whether Easy DKIM signing is enabled or disabled.

    • A set of DKIM tokens that represent the identity. If the identity is an email address, the tokens represent the domain of that address.

    • Whether Amazon SES has successfully verified the DKIM tokens published in the domain's DNS. This information is only returned for domain name identities, not for email addresses.

    This action is throttled at one request per second and can only get DKIM attributes for up to 100 identities at a time.

    For more information about creating DNS records using DKIM tokens, go to the Amazon SES Developer Guide.

    ", + "GetIdentityMailFromDomainAttributes": "

    Returns the custom MAIL FROM attributes for a list of identities (email addresses and/or domains).

    This action is throttled at one request per second and can only get custom MAIL FROM attributes for up to 100 identities at a time.

    ", + "GetIdentityNotificationAttributes": "

    Given a list of verified identities (email addresses and/or domains), returns a structure describing identity notification attributes.

    This action is throttled at one request per second and can only get notification attributes for up to 100 identities at a time.

    For more information about using notifications with Amazon SES, see the Amazon SES Developer Guide.

    ", + "GetIdentityPolicies": "

    Returns the requested sending authorization policies for the given identity (an email address or a domain). The policies are returned as a map of policy names to policy contents. You can retrieve a maximum of 20 policies at a time.

    This API is for the identity owner only. If you have not verified the identity, this API will return an error.

    Sending authorization is a feature that enables an identity owner to authorize other senders to use its identities. For information about using sending authorization, see the Amazon SES Developer Guide.

    This action is throttled at one request per second.

    ", + "GetIdentityVerificationAttributes": "

    Given a list of identities (email addresses and/or domains), returns the verification status and (for domain identities) the verification token for each identity.

    This action is throttled at one request per second and can only get verification attributes for up to 100 identities at a time.

    ", + "GetSendQuota": "

    Returns the user's current sending limits.

    This action is throttled at one request per second.

    ", + "GetSendStatistics": "

    Returns the user's sending statistics. The result is a list of data points, representing the last two weeks of sending activity.

    Each data point in the list contains statistics for a 15-minute interval.

    This action is throttled at one request per second.

    ", + "ListIdentities": "

    Returns a list containing all of the identities (email addresses and domains) for your AWS account, regardless of verification status.

    This action is throttled at one request per second.

    ", + "ListIdentityPolicies": "

    Returns a list of sending authorization policies that are attached to the given identity (an email address or a domain). This API returns only a list. If you want the actual policy content, you can use GetIdentityPolicies.

    This API is for the identity owner only. If you have not verified the identity, this API will return an error.

    Sending authorization is a feature that enables an identity owner to authorize other senders to use its identities. For information about using sending authorization, see the Amazon SES Developer Guide.

    This action is throttled at one request per second.

    ", + "ListReceiptFilters": "

    Lists the IP address filters associated with your AWS account.

    For information about managing IP address filters, see the Amazon SES Developer Guide.

    This action is throttled at one request per second.

    ", + "ListReceiptRuleSets": "

    Lists the receipt rule sets that exist under your AWS account. If there are additional receipt rule sets to be retrieved, you will receive a NextToken that you can provide to the next call to ListReceiptRuleSets to retrieve the additional entries.

    For information about managing receipt rule sets, see the Amazon SES Developer Guide.

    This action is throttled at one request per second.

    ", + "ListVerifiedEmailAddresses": "

    Returns a list containing all of the email addresses that have been verified.

    The ListVerifiedEmailAddresses action is deprecated as of the May 15, 2012 release of Domain Verification. The ListIdentities action is now preferred.

    This action is throttled at one request per second.

    ", + "PutIdentityPolicy": "

    Adds or updates a sending authorization policy for the specified identity (an email address or a domain).

    This API is for the identity owner only. If you have not verified the identity, this API will return an error.

    Sending authorization is a feature that enables an identity owner to authorize other senders to use its identities. For information about using sending authorization, see the Amazon SES Developer Guide.

    This action is throttled at one request per second.

    ", + "ReorderReceiptRuleSet": "

    Reorders the receipt rules within a receipt rule set.

    All of the rules in the rule set must be represented in this request. That is, this API will return an error if the reorder request doesn't explicitly position all of the rules.

    For information about managing receipt rule sets, see the Amazon SES Developer Guide.

    This action is throttled at one request per second.

    ", + "SendBounce": "

    Generates and sends a bounce message to the sender of an email you received through Amazon SES. You can only use this API on an email up to 24 hours after you receive it.

    You cannot use this API to send generic bounces for mail that was not received by Amazon SES.

    For information about receiving email through Amazon SES, see the Amazon SES Developer Guide.

    This action is throttled at one request per second.

    ", + "SendEmail": "

    Composes an email message based on input data, and then immediately queues the message for sending.

    There are several important points to know about SendEmail:

    • You can only send email from verified email addresses and domains; otherwise, you will get an \"Email address not verified\" error. If your account is still in the Amazon SES sandbox, you must also verify every recipient email address except for the recipients provided by the Amazon SES mailbox simulator. For more information, go to the Amazon SES Developer Guide.

    • The total size of the message cannot exceed 10 MB. This includes any attachments that are part of the message.

    • Amazon SES has a limit on the total number of recipients per message. The combined number of To:, CC: and BCC: email addresses cannot exceed 50. If you need to send an email message to a larger audience, you can divide your recipient list into groups of 50 or fewer, and then call Amazon SES repeatedly to send the message to each group.

    • For every message that you send, the total number of recipients (To:, CC: and BCC:) is counted against your sending quota - the maximum number of emails you can send in a 24-hour period. For information about your sending quota, go to the Amazon SES Developer Guide.

    ", + "SendRawEmail": "

    Sends an email message, with header and content specified by the client. The SendRawEmail action is useful for sending multipart MIME emails. The raw text of the message must comply with Internet email standards; otherwise, the message cannot be sent.

    There are several important points to know about SendRawEmail:

    • You can only send email from verified email addresses and domains; otherwise, you will get an \"Email address not verified\" error. If your account is still in the Amazon SES sandbox, you must also verify every recipient email address except for the recipients provided by the Amazon SES mailbox simulator. For more information, go to the Amazon SES Developer Guide.

    • The total size of the message cannot exceed 10 MB. This includes any attachments that are part of the message.

    • Amazon SES has a limit on the total number of recipients per message. The combined number of To:, CC: and BCC: email addresses cannot exceed 50. If you need to send an email message to a larger audience, you can divide your recipient list into groups of 50 or fewer, and then call Amazon SES repeatedly to send the message to each group.

    • The To:, CC:, and BCC: headers in the raw message can contain a group list. Note that each recipient in a group list counts towards the 50-recipient limit.

    • For every message that you send, the total number of recipients (To:, CC: and BCC:) is counted against your sending quota - the maximum number of emails you can send in a 24-hour period. For information about your sending quota, go to the Amazon SES Developer Guide.

    • If you are using sending authorization to send on behalf of another user, SendRawEmail enables you to specify the cross-account identity for the email's \"Source,\" \"From,\" and \"Return-Path\" parameters in one of two ways: you can pass optional parameters SourceArn, FromArn, and/or ReturnPathArn to the API, or you can include the following X-headers in the header of your raw email:

      • X-SES-SOURCE-ARN

      • X-SES-FROM-ARN

      • X-SES-RETURN-PATH-ARN

      Do not include these X-headers in the DKIM signature, because they are removed by Amazon SES before sending the email.

      For the most common sending authorization use case, we recommend that you specify the SourceIdentityArn and do not specify either the FromIdentityArn or ReturnPathIdentityArn. (The same note applies to the corresponding X-headers.) If you only specify the SourceIdentityArn, Amazon SES will simply set the \"From\" address and the \"Return Path\" address to the identity specified in SourceIdentityArn. For more information about sending authorization, see the Amazon SES Developer Guide.

    ", + "SetActiveReceiptRuleSet": "

    Sets the specified receipt rule set as the active receipt rule set.

    To disable your email-receiving through Amazon SES completely, you can call this API with RuleSetName set to null.

    For information about managing receipt rule sets, see the Amazon SES Developer Guide.

    This action is throttled at one request per second.

    ", + "SetIdentityDkimEnabled": "

    Enables or disables Easy DKIM signing of email sent from an identity:

    • If Easy DKIM signing is enabled for a domain name identity (e.g., example.com), then Amazon SES will DKIM-sign all email sent by addresses under that domain name (e.g., user@example.com).

    • If Easy DKIM signing is enabled for an email address, then Amazon SES will DKIM-sign all email sent by that email address.

    For email addresses (e.g., user@example.com), you can only enable Easy DKIM signing if the corresponding domain (e.g., example.com) has been set up for Easy DKIM using the AWS Console or the VerifyDomainDkim action.

    This action is throttled at one request per second.

    For more information about Easy DKIM signing, go to the Amazon SES Developer Guide.

    ", + "SetIdentityFeedbackForwardingEnabled": "

    Given an identity (an email address or a domain), enables or disables whether Amazon SES forwards bounce and complaint notifications as email. Feedback forwarding can only be disabled when Amazon Simple Notification Service (Amazon SNS) topics are specified for both bounces and complaints.

    Feedback forwarding does not apply to delivery notifications. Delivery notifications are only available through Amazon SNS.

    This action is throttled at one request per second.

    For more information about using notifications with Amazon SES, see the Amazon SES Developer Guide.

    ", + "SetIdentityHeadersInNotificationsEnabled": "

    Given an identity (an email address or a domain), sets whether Amazon SES includes the original email headers in the Amazon Simple Notification Service (Amazon SNS) notifications of a specified type.

    This action is throttled at one request per second.

    For more information about using notifications with Amazon SES, see the Amazon SES Developer Guide.

    ", + "SetIdentityMailFromDomain": "

    Enables or disables the custom MAIL FROM domain setup for a verified identity (an email address or a domain).

    To send emails using the specified MAIL FROM domain, you must add an MX record to your MAIL FROM domain's DNS settings. If you want your emails to pass Sender Policy Framework (SPF) checks, you must also add or update an SPF record. For more information, see the Amazon SES Developer Guide.

    This action is throttled at one request per second.

    ", + "SetIdentityNotificationTopic": "

    Given an identity (an email address or a domain), sets the Amazon Simple Notification Service (Amazon SNS) topic to which Amazon SES will publish bounce, complaint, and/or delivery notifications for emails sent with that identity as the Source.

    Unless feedback forwarding is enabled, you must specify Amazon SNS topics for bounce and complaint notifications. For more information, see SetIdentityFeedbackForwardingEnabled.

    This action is throttled at one request per second.

    For more information about feedback notification, see the Amazon SES Developer Guide.

    ", + "SetReceiptRulePosition": "

    Sets the position of the specified receipt rule in the receipt rule set.

    For information about managing receipt rules, see the Amazon SES Developer Guide.

    This action is throttled at one request per second.

    ", + "UpdateReceiptRule": "

    Updates a receipt rule.

    For information about managing receipt rules, see the Amazon SES Developer Guide.

    This action is throttled at one request per second.

    ", + "VerifyDomainDkim": "

    Returns a set of DKIM tokens for a domain. DKIM tokens are character strings that represent your domain's identity. Using these tokens, you will need to create DNS CNAME records that point to DKIM public keys hosted by Amazon SES. Amazon Web Services will eventually detect that you have updated your DNS records; this detection process may take up to 72 hours. Upon successful detection, Amazon SES will be able to DKIM-sign email originating from that domain.

    This action is throttled at one request per second.

    To enable or disable Easy DKIM signing for a domain, use the SetIdentityDkimEnabled action.

    For more information about creating DNS records using DKIM tokens, go to the Amazon SES Developer Guide.

    ", + "VerifyDomainIdentity": "

    Verifies a domain.

    This action is throttled at one request per second.

    ", + "VerifyEmailAddress": "

    Verifies an email address. This action causes a confirmation email message to be sent to the specified address.

    The VerifyEmailAddress action is deprecated as of the May 15, 2012 release of Domain Verification. The VerifyEmailIdentity action is now preferred.

    This action is throttled at one request per second.

    ", + "VerifyEmailIdentity": "

    Verifies an email address. This action causes a confirmation email message to be sent to the specified address.

    This action is throttled at one request per second.

    " + }, + "shapes": { + "AddHeaderAction": { + "base": "

    When included in a receipt rule, this action adds a header to the received email.

    For information about adding a header using a receipt rule, see the Amazon SES Developer Guide.

    ", + "refs": { + "ReceiptAction$AddHeaderAction": "

    Adds a header to the received email.

    " + } + }, + "Address": { + "base": null, + "refs": { + "AddressList$member": null, + "BounceAction$Sender": "

    The email address of the sender of the bounced email. This is the address from which the bounce message will be sent.

    ", + "BouncedRecipientInfo$Recipient": "

    The email address of the recipient of the bounced email.

    ", + "DeleteVerifiedEmailAddressRequest$EmailAddress": "

    An email address to be removed from the list of verified addresses.

    ", + "RecipientDsnFields$FinalRecipient": "

    The email address to which the message was ultimately delivered. This corresponds to the Final-Recipient in the DSN. If not specified, FinalRecipient will be set to the Recipient specified in the BouncedRecipientInfo structure. Either FinalRecipient or the recipient in BouncedRecipientInfo must be a recipient of the original bounced message.

    Do not prepend the FinalRecipient email address with rfc 822;, as described in RFC 3798.

    ", + "SendBounceRequest$BounceSender": "

    The address to use in the \"From\" header of the bounce message. This must be an identity that you have verified with Amazon SES.

    ", + "SendEmailRequest$Source": "

    The email address that is sending the email. This email address must be either individually verified with Amazon SES, or from a domain that has been verified with Amazon SES. For information about verifying identities, see the Amazon SES Developer Guide.

    If you are sending on behalf of another user and have been permitted to do so by a sending authorization policy, then you must also specify the SourceArn parameter. For more information about sending authorization, see the Amazon SES Developer Guide.

    In all cases, the email address must be 7-bit ASCII. If the text must contain any other characters, then you must use MIME encoded-word syntax (RFC 2047) instead of a literal string. MIME encoded-word syntax uses the following form: =?charset?encoding?encoded-text?=. For more information, see RFC 2047.

    ", + "SendEmailRequest$ReturnPath": "

    The email address to which bounces and complaints are to be forwarded when feedback forwarding is enabled. If the message cannot be delivered to the recipient, then an error message will be returned from the recipient's ISP; this message will then be forwarded to the email address specified by the ReturnPath parameter. The ReturnPath parameter is never overwritten. This email address must be either individually verified with Amazon SES, or from a domain that has been verified with Amazon SES.

    ", + "SendRawEmailRequest$Source": "

    The identity's email address. If you do not provide a value for this parameter, you must specify a \"From\" address in the raw text of the message. (You can also specify both.)

    By default, the string must be 7-bit ASCII. If the text must contain any other characters, then you must use MIME encoded-word syntax (RFC 2047) instead of a literal string. MIME encoded-word syntax uses the following form: =?charset?encoding?encoded-text?=. For more information, see RFC 2047.

    If you specify the Source parameter and have feedback forwarding enabled, then bounces and complaints will be sent to this email address. This takes precedence over any Return-Path header that you might include in the raw text of the message.

    ", + "VerifyEmailAddressRequest$EmailAddress": "

    The email address to be verified.

    ", + "VerifyEmailIdentityRequest$EmailAddress": "

    The email address to be verified.

    " + } + }, + "AddressList": { + "base": null, + "refs": { + "Destination$ToAddresses": "

    The To: field(s) of the message.

    ", + "Destination$CcAddresses": "

    The CC: field(s) of the message.

    ", + "Destination$BccAddresses": "

    The BCC: field(s) of the message.

    ", + "ListVerifiedEmailAddressesResponse$VerifiedEmailAddresses": "

    A list of email addresses that have been verified.

    ", + "SendEmailRequest$ReplyToAddresses": "

    The reply-to email address(es) for the message. If the recipient replies to the message, each reply-to address will receive the reply.

    ", + "SendRawEmailRequest$Destinations": "

    A list of destinations for the message, consisting of To:, CC:, and BCC: addresses.

    " + } + }, + "AlreadyExistsException": { + "base": "

    Indicates that a resource could not be created due to a naming conflict.

    ", + "refs": { + } + }, + "AmazonResourceName": { + "base": null, + "refs": { + "BounceAction$TopicArn": "

    The Amazon Resource Name (ARN) of the Amazon SNS topic to notify when the bounce action is taken. An example of an Amazon SNS topic ARN is arn:aws:sns:us-west-2:123456789012:MyTopic. For more information about Amazon SNS topics, see the Amazon SNS Developer Guide.

    ", + "BouncedRecipientInfo$RecipientArn": "

    This parameter is used only for sending authorization. It is the ARN of the identity that is associated with the sending authorization policy that permits you to receive email for the recipient of the bounced email. For more information about sending authorization, see the Amazon SES Developer Guide.

    ", + "InvalidLambdaFunctionException$FunctionArn": null, + "InvalidSnsTopicException$Topic": null, + "LambdaAction$TopicArn": "

    The Amazon Resource Name (ARN) of the Amazon SNS topic to notify when the Lambda action is taken. An example of an Amazon SNS topic ARN is arn:aws:sns:us-west-2:123456789012:MyTopic. For more information about Amazon SNS topics, see the Amazon SNS Developer Guide.

    ", + "LambdaAction$FunctionArn": "

    The Amazon Resource Name (ARN) of the AWS Lambda function. An example of an AWS Lambda function ARN is arn:aws:lambda:us-west-2:account-id:function:MyFunction. For more information about AWS Lambda, see the AWS Lambda Developer Guide.

    ", + "S3Action$TopicArn": "

    The ARN of the Amazon SNS topic to notify when the message is saved to the Amazon S3 bucket. An example of an Amazon SNS topic ARN is arn:aws:sns:us-west-2:123456789012:MyTopic. For more information about Amazon SNS topics, see the Amazon SNS Developer Guide.

    ", + "S3Action$KmsKeyArn": "

    The customer master key that Amazon SES should use to encrypt your emails before saving them to the Amazon S3 bucket. You can use the default master key or a custom master key you created in AWS KMS as follows:

    • To use the default master key, provide an ARN in the form of arn:aws:kms:REGION:ACCOUNT-ID-WITHOUT-HYPHENS:alias/aws/ses. For example, if your AWS account ID is 123456789012 and you want to use the default master key in the US West (Oregon) region, the ARN of the default master key would be arn:aws:kms:us-west-2:123456789012:alias/aws/ses. If you use the default master key, you don't need to perform any extra steps to give Amazon SES permission to use the key.

    • To use a custom master key you created in AWS KMS, provide the ARN of the master key and ensure that you add a statement to your key's policy to give Amazon SES permission to use it. For more information about giving permissions, see the Amazon SES Developer Guide.

    For more information about key policies, see the AWS KMS Developer Guide. If you do not specify a master key, Amazon SES will not encrypt your emails.

    Your mail is encrypted by Amazon SES using the Amazon S3 encryption client before the mail is submitted to Amazon S3 for storage. It is not encrypted using Amazon S3 server-side encryption. This means that you must use the Amazon S3 encryption client to decrypt the email after retrieving it from Amazon S3, as the service has no access to use your AWS KMS keys for decryption. This encryption client is currently available with the AWS Java SDK and AWS Ruby SDK only. For more information about client-side encryption using AWS KMS master keys, see the Amazon S3 Developer Guide.

    ", + "SNSAction$TopicArn": "

    The Amazon Resource Name (ARN) of the Amazon SNS topic to notify. An example of an Amazon SNS topic ARN is arn:aws:sns:us-west-2:123456789012:MyTopic. For more information about Amazon SNS topics, see the Amazon SNS Developer Guide.

    ", + "SendBounceRequest$BounceSenderArn": "

    This parameter is used only for sending authorization. It is the ARN of the identity that is associated with the sending authorization policy that permits you to use the address in the \"From\" header of the bounce. For more information about sending authorization, see the Amazon SES Developer Guide.

    ", + "SendEmailRequest$SourceArn": "

    This parameter is used only for sending authorization. It is the ARN of the identity that is associated with the sending authorization policy that permits you to send for the email address specified in the Source parameter.

    For example, if the owner of example.com (which has ARN arn:aws:ses:us-east-1:123456789012:identity/example.com) attaches a policy to it that authorizes you to send from user@example.com, then you would specify the SourceArn to be arn:aws:ses:us-east-1:123456789012:identity/example.com, and the Source to be user@example.com.

    For more information about sending authorization, see the Amazon SES Developer Guide.

    ", + "SendEmailRequest$ReturnPathArn": "

    This parameter is used only for sending authorization. It is the ARN of the identity that is associated with the sending authorization policy that permits you to use the email address specified in the ReturnPath parameter.

    For example, if the owner of example.com (which has ARN arn:aws:ses:us-east-1:123456789012:identity/example.com) attaches a policy to it that authorizes you to use feedback@example.com, then you would specify the ReturnPathArn to be arn:aws:ses:us-east-1:123456789012:identity/example.com, and the ReturnPath to be feedback@example.com.

    For more information about sending authorization, see the Amazon SES Developer Guide.

    ", + "SendRawEmailRequest$FromArn": "

    This parameter is used only for sending authorization. It is the ARN of the identity that is associated with the sending authorization policy that permits you to specify a particular \"From\" address in the header of the raw email.

    Instead of using this parameter, you can use the X-header X-SES-FROM-ARN in the raw message of the email. If you use both the FromArn parameter and the corresponding X-header, Amazon SES uses the value of the FromArn parameter.

    For information about when to use this parameter, see the description of SendRawEmail in this guide, or see the Amazon SES Developer Guide.

    ", + "SendRawEmailRequest$SourceArn": "

    This parameter is used only for sending authorization. It is the ARN of the identity that is associated with the sending authorization policy that permits you to send for the email address specified in the Source parameter.

    For example, if the owner of example.com (which has ARN arn:aws:ses:us-east-1:123456789012:identity/example.com) attaches a policy to it that authorizes you to send from user@example.com, then you would specify the SourceArn to be arn:aws:ses:us-east-1:123456789012:identity/example.com, and the Source to be user@example.com.

    Instead of using this parameter, you can use the X-header X-SES-SOURCE-ARN in the raw message of the email. If you use both the SourceArn parameter and the corresponding X-header, Amazon SES uses the value of the SourceArn parameter.

    For information about when to use this parameter, see the description of SendRawEmail in this guide, or see the Amazon SES Developer Guide.

    ", + "SendRawEmailRequest$ReturnPathArn": "

    This parameter is used only for sending authorization. It is the ARN of the identity that is associated with the sending authorization policy that permits you to use the email address specified in the ReturnPath parameter.

    For example, if the owner of example.com (which has ARN arn:aws:ses:us-east-1:123456789012:identity/example.com) attaches a policy to it that authorizes you to use feedback@example.com, then you would specify the ReturnPathArn to be arn:aws:ses:us-east-1:123456789012:identity/example.com, and the ReturnPath to be feedback@example.com.

    Instead of using this parameter, you can use the X-header X-SES-RETURN-PATH-ARN in the raw message of the email. If you use both the ReturnPathArn parameter and the corresponding X-header, Amazon SES uses the value of the ReturnPathArn parameter.

    For information about when to use this parameter, see the description of SendRawEmail in this guide, or see the Amazon SES Developer Guide.

    ", + "StopAction$TopicArn": "

    The Amazon Resource Name (ARN) of the Amazon SNS topic to notify when the stop action is taken. An example of an Amazon SNS topic ARN is arn:aws:sns:us-west-2:123456789012:MyTopic. For more information about Amazon SNS topics, see the Amazon SNS Developer Guide.

    ", + "WorkmailAction$TopicArn": "

    The Amazon Resource Name (ARN) of the Amazon SNS topic to notify when the WorkMail action is called. An example of an Amazon SNS topic ARN is arn:aws:sns:us-west-2:123456789012:MyTopic. For more information about Amazon SNS topics, see the Amazon SNS Developer Guide.

    ", + "WorkmailAction$OrganizationArn": "

    The ARN of the Amazon WorkMail organization. An example of an Amazon WorkMail organization ARN is arn:aws:workmail:us-west-2:123456789012:organization/m-68755160c4cb4e29a2b2f8fb58f359d7. For information about Amazon WorkMail organizations, see the Amazon WorkMail Administrator Guide.

    " + } + }, + "ArrivalDate": { + "base": null, + "refs": { + "MessageDsn$ArrivalDate": "

    When the message was received by the reporting mail transfer agent (MTA), in RFC 822 date-time format.

    " + } + }, + "BehaviorOnMXFailure": { + "base": null, + "refs": { + "IdentityMailFromDomainAttributes$BehaviorOnMXFailure": "

    The action that Amazon SES takes if it cannot successfully read the required MX record when you send an email. A value of UseDefaultValue indicates that if Amazon SES cannot read the required MX record, it uses amazonses.com (or a subdomain of that) as the MAIL FROM domain. A value of RejectMessage indicates that if Amazon SES cannot read the required MX record, Amazon SES returns a MailFromDomainNotVerified error and does not send the email.

    The custom MAIL FROM setup states that result in this behavior are Pending, Failed, and TemporaryFailure.

    ", + "SetIdentityMailFromDomainRequest$BehaviorOnMXFailure": "

    The action that you want Amazon SES to take if it cannot successfully read the required MX record when you send an email. If you choose UseDefaultValue, Amazon SES will use amazonses.com (or a subdomain of that) as the MAIL FROM domain. If you choose RejectMessage, Amazon SES will return a MailFromDomainNotVerified error and not send the email.

    The action specified in BehaviorOnMXFailure is taken when the custom MAIL FROM domain setup is in the Pending, Failed, and TemporaryFailure states.

    " + } + }, + "Body": { + "base": "

    Represents the body of the message. You can specify text, HTML, or both. If you use both, then the message should display correctly in the widest variety of email clients.

    ", + "refs": { + "Message$Body": "

    The message body.

    " + } + }, + "BounceAction": { + "base": "

    When included in a receipt rule, this action rejects the received email by returning a bounce response to the sender and, optionally, publishes a notification to Amazon Simple Notification Service (Amazon SNS).

    For information about sending a bounce message in response to a received email, see the Amazon SES Developer Guide.

    ", + "refs": { + "ReceiptAction$BounceAction": "

    Rejects the received email by returning a bounce response to the sender and, optionally, publishes a notification to Amazon Simple Notification Service (Amazon SNS).

    " + } + }, + "BounceMessage": { + "base": null, + "refs": { + "BounceAction$Message": "

    Human-readable text to include in the bounce message.

    " + } + }, + "BounceSmtpReplyCode": { + "base": null, + "refs": { + "BounceAction$SmtpReplyCode": "

    The SMTP reply code, as defined by RFC 5321.

    " + } + }, + "BounceStatusCode": { + "base": null, + "refs": { + "BounceAction$StatusCode": "

    The SMTP enhanced status code, as defined by RFC 3463.

    " + } + }, + "BounceType": { + "base": null, + "refs": { + "BouncedRecipientInfo$BounceType": "

    The reason for the bounce. You must provide either this parameter or RecipientDsnFields.

    " + } + }, + "BouncedRecipientInfo": { + "base": "

    Recipient-related information to include in the Delivery Status Notification (DSN) when an email that Amazon SES receives on your behalf bounces.

    For information about receiving email through Amazon SES, see the Amazon SES Developer Guide.

    ", + "refs": { + "BouncedRecipientInfoList$member": null + } + }, + "BouncedRecipientInfoList": { + "base": null, + "refs": { + "SendBounceRequest$BouncedRecipientInfoList": "

    A list of recipients of the bounced message, including the information required to create the Delivery Status Notifications (DSNs) for the recipients. You must specify at least one BouncedRecipientInfo in the list.

    " + } + }, + "CannotDeleteException": { + "base": "

    Indicates that the delete operation could not be completed.

    ", + "refs": { + } + }, + "Charset": { + "base": null, + "refs": { + "Content$Charset": "

    The character set of the content.

    " + } + }, + "Cidr": { + "base": null, + "refs": { + "ReceiptIpFilter$Cidr": "

    A single IP address or a range of IP addresses that you want to block or allow, specified in Classless Inter-Domain Routing (CIDR) notation. An example of a single email address is 10.0.0.1. An example of a range of IP addresses is 10.0.0.1/24. For more information about CIDR notation, see RFC 2317.

    " + } + }, + "CloneReceiptRuleSetRequest": { + "base": "

    Represents a request to create a receipt rule set by cloning an existing one. You use receipt rule sets to receive email with Amazon SES. For more information, see the Amazon SES Developer Guide.

    ", + "refs": { + } + }, + "CloneReceiptRuleSetResponse": { + "base": "

    An empty element returned on a successful request.

    ", + "refs": { + } + }, + "Content": { + "base": "

    Represents textual data, plus an optional character set specification.

    By default, the text must be 7-bit ASCII, due to the constraints of the SMTP protocol. If the text must contain any other characters, then you must also specify a character set. Examples include UTF-8, ISO-8859-1, and Shift_JIS.

    ", + "refs": { + "Body$Text": "

    The content of the message, in text format. Use this for text-based email clients, or clients on high-latency networks (such as mobile devices).

    ", + "Body$Html": "

    The content of the message, in HTML format. Use this for email clients that can process HTML. You can include clickable links, formatted text, and much more in an HTML message.

    ", + "Message$Subject": "

    The subject of the message: A short summary of the content, which will appear in the recipient's inbox.

    " + } + }, + "Counter": { + "base": null, + "refs": { + "SendDataPoint$DeliveryAttempts": "

    Number of emails that have been enqueued for sending.

    ", + "SendDataPoint$Bounces": "

    Number of emails that have bounced.

    ", + "SendDataPoint$Complaints": "

    Number of unwanted emails that were rejected by recipients.

    ", + "SendDataPoint$Rejects": "

    Number of emails rejected by Amazon SES.

    " + } + }, + "CreateReceiptFilterRequest": { + "base": "

    Represents a request to create a new IP address filter. You use IP address filters when you receive email with Amazon SES. For more information, see the Amazon SES Developer Guide.

    ", + "refs": { + } + }, + "CreateReceiptFilterResponse": { + "base": "

    An empty element returned on a successful request.

    ", + "refs": { + } + }, + "CreateReceiptRuleRequest": { + "base": "

    Represents a request to create a receipt rule. You use receipt rules to receive email with Amazon SES. For more information, see the Amazon SES Developer Guide.

    ", + "refs": { + } + }, + "CreateReceiptRuleResponse": { + "base": "

    An empty element returned on a successful request.

    ", + "refs": { + } + }, + "CreateReceiptRuleSetRequest": { + "base": "

    Represents a request to create an empty receipt rule set. You use receipt rule sets to receive email with Amazon SES. For more information, see the Amazon SES Developer Guide.

    ", + "refs": { + } + }, + "CreateReceiptRuleSetResponse": { + "base": "

    An empty element returned on a successful request.

    ", + "refs": { + } + }, + "CustomMailFromStatus": { + "base": null, + "refs": { + "IdentityMailFromDomainAttributes$MailFromDomainStatus": "

    The state that indicates whether Amazon SES has successfully read the MX record required for custom MAIL FROM domain setup. If the state is Success, Amazon SES uses the specified custom MAIL FROM domain when the verified identity sends an email. All other states indicate that Amazon SES takes the action described by BehaviorOnMXFailure.

    " + } + }, + "DeleteIdentityPolicyRequest": { + "base": "

    Represents a request to delete a sending authorization policy for an identity. Sending authorization is an Amazon SES feature that enables you to authorize other senders to use your identities. For information, see the Amazon SES Developer Guide.

    ", + "refs": { + } + }, + "DeleteIdentityPolicyResponse": { + "base": "

    An empty element returned on a successful request.

    ", + "refs": { + } + }, + "DeleteIdentityRequest": { + "base": "

    Represents a request to delete one of your Amazon SES identities (an email address or domain).

    ", + "refs": { + } + }, + "DeleteIdentityResponse": { + "base": "

    An empty element returned on a successful request.

    ", + "refs": { + } + }, + "DeleteReceiptFilterRequest": { + "base": "

    Represents a request to delete an IP address filter. You use IP address filters when you receive email with Amazon SES. For more information, see the Amazon SES Developer Guide.

    ", + "refs": { + } + }, + "DeleteReceiptFilterResponse": { + "base": "

    An empty element returned on a successful request.

    ", + "refs": { + } + }, + "DeleteReceiptRuleRequest": { + "base": "

    Represents a request to delete a receipt rule. You use receipt rules to receive email with Amazon SES. For more information, see the Amazon SES Developer Guide.

    ", + "refs": { + } + }, + "DeleteReceiptRuleResponse": { + "base": "

    An empty element returned on a successful request.

    ", + "refs": { + } + }, + "DeleteReceiptRuleSetRequest": { + "base": "

    Represents a request to delete a receipt rule set and all of the receipt rules it contains. You use receipt rule sets to receive email with Amazon SES. For more information, see the Amazon SES Developer Guide.

    ", + "refs": { + } + }, + "DeleteReceiptRuleSetResponse": { + "base": "

    An empty element returned on a successful request.

    ", + "refs": { + } + }, + "DeleteVerifiedEmailAddressRequest": { + "base": "

    Represents a request to delete an email address from the list of email addresses you have attempted to verify under your AWS account.

    ", + "refs": { + } + }, + "DescribeActiveReceiptRuleSetRequest": { + "base": "

    Represents a request to return the metadata and receipt rules for the receipt rule set that is currently active. You use receipt rule sets to receive email with Amazon SES. For more information, see the Amazon SES Developer Guide.

    ", + "refs": { + } + }, + "DescribeActiveReceiptRuleSetResponse": { + "base": "

    Represents the metadata and receipt rules for the receipt rule set that is currently active.

    ", + "refs": { + } + }, + "DescribeReceiptRuleRequest": { + "base": "

    Represents a request to return the details of a receipt rule. You use receipt rules to receive email with Amazon SES. For more information, see the Amazon SES Developer Guide.

    ", + "refs": { + } + }, + "DescribeReceiptRuleResponse": { + "base": "

    Represents the details of a receipt rule.

    ", + "refs": { + } + }, + "DescribeReceiptRuleSetRequest": { + "base": "

    Represents a request to return the details of a receipt rule set. You use receipt rule sets to receive email with Amazon SES. For more information, see the Amazon SES Developer Guide.

    ", + "refs": { + } + }, + "DescribeReceiptRuleSetResponse": { + "base": "

    Represents the details of the specified receipt rule set.

    ", + "refs": { + } + }, + "Destination": { + "base": "

    Represents the destination of the message, consisting of To:, CC:, and BCC: fields.

    By default, the string must be 7-bit ASCII. If the text must contain any other characters, then you must use MIME encoded-word syntax (RFC 2047) instead of a literal string. MIME encoded-word syntax uses the following form: =?charset?encoding?encoded-text?=. For more information, see RFC 2047.

    ", + "refs": { + "SendEmailRequest$Destination": "

    The destination for this email, composed of To:, CC:, and BCC: fields.

    " + } + }, + "DiagnosticCode": { + "base": null, + "refs": { + "RecipientDsnFields$DiagnosticCode": "

    An extended explanation of what went wrong; this is usually an SMTP response. See RFC 3463 for the correct formatting of this parameter.

    " + } + }, + "DkimAttributes": { + "base": null, + "refs": { + "GetIdentityDkimAttributesResponse$DkimAttributes": "

    The DKIM attributes for an email address or a domain.

    " + } + }, + "Domain": { + "base": null, + "refs": { + "VerifyDomainDkimRequest$Domain": "

    The name of the domain to be verified for Easy DKIM signing.

    ", + "VerifyDomainIdentityRequest$Domain": "

    The domain to be verified.

    " + } + }, + "DsnAction": { + "base": null, + "refs": { + "RecipientDsnFields$Action": "

    The action performed by the reporting mail transfer agent (MTA) as a result of its attempt to deliver the message to the recipient address. This is required by RFC 3464.

    " + } + }, + "DsnStatus": { + "base": null, + "refs": { + "RecipientDsnFields$Status": "

    The status code that indicates what went wrong. This is required by RFC 3464.

    " + } + }, + "Enabled": { + "base": null, + "refs": { + "IdentityDkimAttributes$DkimEnabled": "

    True if DKIM signing is enabled for email sent from the identity; false otherwise.

    ", + "IdentityNotificationAttributes$ForwardingEnabled": "

    Describes whether Amazon SES will forward bounce and complaint notifications as email. true indicates that Amazon SES will forward bounce and complaint notifications as email, while false indicates that bounce and complaint notifications will be published only to the specified bounce and complaint Amazon SNS topics.

    ", + "IdentityNotificationAttributes$HeadersInBounceNotificationsEnabled": "

    Describes whether Amazon SES includes the original email headers in Amazon SNS notifications of type Bounce. A value of true specifies that Amazon SES will include headers in bounce notifications, and a value of false specifies that Amazon SES will not include headers in bounce notifications.

    ", + "IdentityNotificationAttributes$HeadersInComplaintNotificationsEnabled": "

    Describes whether Amazon SES includes the original email headers in Amazon SNS notifications of type Complaint. A value of true specifies that Amazon SES will include headers in complaint notifications, and a value of false specifies that Amazon SES will not include headers in complaint notifications.

    ", + "IdentityNotificationAttributes$HeadersInDeliveryNotificationsEnabled": "

    Describes whether Amazon SES includes the original email headers in Amazon SNS notifications of type Delivery. A value of true specifies that Amazon SES will include headers in delivery notifications, and a value of false specifies that Amazon SES will not include headers in delivery notifications.

    ", + "ReceiptRule$Enabled": "

    If true, the receipt rule is active. The default value is false.

    ", + "ReceiptRule$ScanEnabled": "

    If true, then messages to which this receipt rule applies are scanned for spam and viruses. The default value is false.

    ", + "SetIdentityDkimEnabledRequest$DkimEnabled": "

    Sets whether DKIM signing is enabled for an identity. Set to true to enable DKIM signing for this identity; false to disable it.

    ", + "SetIdentityFeedbackForwardingEnabledRequest$ForwardingEnabled": "

    Sets whether Amazon SES will forward bounce and complaint notifications as email. true specifies that Amazon SES will forward bounce and complaint notifications as email, in addition to any Amazon SNS topic publishing otherwise specified. false specifies that Amazon SES will publish bounce and complaint notifications only through Amazon SNS. This value can only be set to false when Amazon SNS topics are set for both Bounce and Complaint notification types.

    ", + "SetIdentityHeadersInNotificationsEnabledRequest$Enabled": "

    Sets whether Amazon SES includes the original email headers in Amazon SNS notifications of the specified notification type. A value of true specifies that Amazon SES will include headers in notifications, and a value of false specifies that Amazon SES will not include headers in notifications.

    This value can only be set when NotificationType is already set to use a particular Amazon SNS topic.

    " + } + }, + "Explanation": { + "base": null, + "refs": { + "SendBounceRequest$Explanation": "

    Human-readable text for the bounce message to explain the failure. If not specified, the text will be auto-generated based on the bounced recipient information.

    " + } + }, + "ExtensionField": { + "base": "

    Additional X-headers to include in the Delivery Status Notification (DSN) when an email that Amazon SES receives on your behalf bounces.

    For information about receiving email through Amazon SES, see the Amazon SES Developer Guide.

    ", + "refs": { + "ExtensionFieldList$member": null + } + }, + "ExtensionFieldList": { + "base": null, + "refs": { + "MessageDsn$ExtensionFields": "

    Additional X-headers to include in the DSN.

    ", + "RecipientDsnFields$ExtensionFields": "

    Additional X-headers to include in the DSN.

    " + } + }, + "ExtensionFieldName": { + "base": null, + "refs": { + "ExtensionField$Name": "

    The name of the header to add. Must be between 1 and 50 characters, inclusive, and consist of alphanumeric (a-z, A-Z, 0-9) characters and dashes only.

    " + } + }, + "ExtensionFieldValue": { + "base": null, + "refs": { + "ExtensionField$Value": "

    The value of the header to add. Must be less than 2048 characters, and must not contain newline characters (\"\\r\" or \"\\n\").

    " + } + }, + "GetIdentityDkimAttributesRequest": { + "base": "

    Represents a request for the status of Amazon SES Easy DKIM signing for an identity. For domain identities, this request also returns the DKIM tokens that are required for Easy DKIM signing, and whether Amazon SES successfully verified that these tokens were published. For more information about Easy DKIM, see the Amazon SES Developer Guide.

    ", + "refs": { + } + }, + "GetIdentityDkimAttributesResponse": { + "base": "

    Represents the status of Amazon SES Easy DKIM signing for an identity. For domain identities, this response also contains the DKIM tokens that are required for Easy DKIM signing, and whether Amazon SES successfully verified that these tokens were published.

    ", + "refs": { + } + }, + "GetIdentityMailFromDomainAttributesRequest": { + "base": "

    Represents a request to return the Amazon SES custom MAIL FROM attributes for a list of identities. For information about using a custom MAIL FROM domain, see the Amazon SES Developer Guide.

    ", + "refs": { + } + }, + "GetIdentityMailFromDomainAttributesResponse": { + "base": "

    Represents the custom MAIL FROM attributes for a list of identities.

    ", + "refs": { + } + }, + "GetIdentityNotificationAttributesRequest": { + "base": "

    Represents a request to return the notification attributes for a list of identities you verified with Amazon SES. For information about Amazon SES notifications, see the Amazon SES Developer Guide.

    ", + "refs": { + } + }, + "GetIdentityNotificationAttributesResponse": { + "base": "

    Represents the notification attributes for a list of identities.

    ", + "refs": { + } + }, + "GetIdentityPoliciesRequest": { + "base": "

    Represents a request to return the requested sending authorization policies for an identity. Sending authorization is an Amazon SES feature that enables you to authorize other senders to use your identities. For information, see the Amazon SES Developer Guide.

    ", + "refs": { + } + }, + "GetIdentityPoliciesResponse": { + "base": "

    Represents the requested sending authorization policies.

    ", + "refs": { + } + }, + "GetIdentityVerificationAttributesRequest": { + "base": "

    Represents a request to return the Amazon SES verification status of a list of identities. For domain identities, this request also returns the verification token. For information about verifying identities with Amazon SES, see the Amazon SES Developer Guide.

    ", + "refs": { + } + }, + "GetIdentityVerificationAttributesResponse": { + "base": "

    The Amazon SES verification status of a list of identities. For domain identities, this response also contains the verification token.

    ", + "refs": { + } + }, + "GetSendQuotaResponse": { + "base": "

    Represents your Amazon SES daily sending quota, maximum send rate, and the number of emails you have sent in the last 24 hours.

    ", + "refs": { + } + }, + "GetSendStatisticsResponse": { + "base": "

    Represents a list of data points. This list contains aggregated data from the previous two weeks of your sending activity with Amazon SES.

    ", + "refs": { + } + }, + "HeaderName": { + "base": null, + "refs": { + "AddHeaderAction$HeaderName": "

    The name of the header to add. Must be between 1 and 50 characters, inclusive, and consist of alphanumeric (a-z, A-Z, 0-9) characters and dashes only.

    " + } + }, + "HeaderValue": { + "base": null, + "refs": { + "AddHeaderAction$HeaderValue": "

    Must be less than 2048 characters, and must not contain newline characters (\"\\r\" or \"\\n\").

    " + } + }, + "Identity": { + "base": null, + "refs": { + "DeleteIdentityPolicyRequest$Identity": "

    The identity that is associated with the policy that you want to delete. You can specify the identity by using its name or by using its Amazon Resource Name (ARN). Examples: user@example.com, example.com, arn:aws:ses:us-east-1:123456789012:identity/example.com.

    To successfully call this API, you must own the identity.

    ", + "DeleteIdentityRequest$Identity": "

    The identity to be removed from the list of identities for the AWS Account.

    ", + "DkimAttributes$key": null, + "GetIdentityPoliciesRequest$Identity": "

    The identity for which the policies will be retrieved. You can specify an identity by using its name or by using its Amazon Resource Name (ARN). Examples: user@example.com, example.com, arn:aws:ses:us-east-1:123456789012:identity/example.com.

    To successfully call this API, you must own the identity.

    ", + "IdentityList$member": null, + "ListIdentityPoliciesRequest$Identity": "

    The identity that is associated with the policy for which the policies will be listed. You can specify an identity by using its name or by using its Amazon Resource Name (ARN). Examples: user@example.com, example.com, arn:aws:ses:us-east-1:123456789012:identity/example.com.

    To successfully call this API, you must own the identity.

    ", + "MailFromDomainAttributes$key": null, + "NotificationAttributes$key": null, + "PutIdentityPolicyRequest$Identity": "

    The identity to which the policy will apply. You can specify an identity by using its name or by using its Amazon Resource Name (ARN). Examples: user@example.com, example.com, arn:aws:ses:us-east-1:123456789012:identity/example.com.

    To successfully call this API, you must own the identity.

    ", + "SetIdentityDkimEnabledRequest$Identity": "

    The identity for which DKIM signing should be enabled or disabled.

    ", + "SetIdentityFeedbackForwardingEnabledRequest$Identity": "

    The identity for which to set bounce and complaint notification forwarding. Examples: user@example.com, example.com.

    ", + "SetIdentityHeadersInNotificationsEnabledRequest$Identity": "

    The identity for which to enable or disable headers in notifications. Examples: user@example.com, example.com.

    ", + "SetIdentityMailFromDomainRequest$Identity": "

    The verified identity for which you want to enable or disable the specified custom MAIL FROM domain.

    ", + "SetIdentityNotificationTopicRequest$Identity": "

    The identity for which the Amazon SNS topic will be set. You can specify an identity by using its name or by using its Amazon Resource Name (ARN). Examples: user@example.com, example.com, arn:aws:ses:us-east-1:123456789012:identity/example.com.

    ", + "VerificationAttributes$key": null + } + }, + "IdentityDkimAttributes": { + "base": "

    Represents the DKIM attributes of a verified email address or a domain.

    ", + "refs": { + "DkimAttributes$value": null + } + }, + "IdentityList": { + "base": null, + "refs": { + "GetIdentityDkimAttributesRequest$Identities": "

    A list of one or more verified identities - email addresses, domains, or both.

    ", + "GetIdentityMailFromDomainAttributesRequest$Identities": "

    A list of one or more identities.

    ", + "GetIdentityNotificationAttributesRequest$Identities": "

    A list of one or more identities. You can specify an identity by using its name or by using its Amazon Resource Name (ARN). Examples: user@example.com, example.com, arn:aws:ses:us-east-1:123456789012:identity/example.com.

    ", + "GetIdentityVerificationAttributesRequest$Identities": "

    A list of identities.

    ", + "ListIdentitiesResponse$Identities": "

    A list of identities.

    " + } + }, + "IdentityMailFromDomainAttributes": { + "base": "

    Represents the custom MAIL FROM domain attributes of a verified identity (email address or domain).

    ", + "refs": { + "MailFromDomainAttributes$value": null + } + }, + "IdentityNotificationAttributes": { + "base": "

    Represents the notification attributes of an identity, including whether an identity has Amazon Simple Notification Service (Amazon SNS) topics set for bounce, complaint, and/or delivery notifications, and whether feedback forwarding is enabled for bounce and complaint notifications.

    ", + "refs": { + "NotificationAttributes$value": null + } + }, + "IdentityType": { + "base": null, + "refs": { + "ListIdentitiesRequest$IdentityType": "

    The type of the identities to list. Possible values are \"EmailAddress\" and \"Domain\". If this parameter is omitted, then all identities will be listed.

    " + } + }, + "IdentityVerificationAttributes": { + "base": "

    Represents the verification attributes of a single identity.

    ", + "refs": { + "VerificationAttributes$value": null + } + }, + "InvalidLambdaFunctionException": { + "base": "

    Indicates that the provided AWS Lambda function is invalid, or that Amazon SES could not execute the provided function, possibly due to permissions issues. For information about giving permissions, see the Amazon SES Developer Guide.

    ", + "refs": { + } + }, + "InvalidPolicyException": { + "base": "

    Indicates that the provided policy is invalid. Check the error stack for more information about what caused the error.

    ", + "refs": { + } + }, + "InvalidS3ConfigurationException": { + "base": "

    Indicates that the provided Amazon S3 bucket or AWS KMS encryption key is invalid, or that Amazon SES could not publish to the bucket, possibly due to permissions issues. For information about giving permissions, see the Amazon SES Developer Guide.

    ", + "refs": { + } + }, + "InvalidSnsTopicException": { + "base": "

    Indicates that the provided Amazon SNS topic is invalid, or that Amazon SES could not publish to the topic, possibly due to permissions issues. For information about giving permissions, see the Amazon SES Developer Guide.

    ", + "refs": { + } + }, + "InvocationType": { + "base": null, + "refs": { + "LambdaAction$InvocationType": "

    The invocation type of the AWS Lambda function. An invocation type of RequestResponse means that the execution of the function will immediately result in a response, and a value of Event means that the function will be invoked asynchronously. The default value is Event. For information about AWS Lambda invocation types, see the AWS Lambda Developer Guide.

    There is a 30-second timeout on RequestResponse invocations. You should use Event invocation in most cases. Use RequestResponse only when you want to make a mail flow decision, such as whether to stop the receipt rule or the receipt rule set.

    " + } + }, + "LambdaAction": { + "base": "

    When included in a receipt rule, this action calls an AWS Lambda function and, optionally, publishes a notification to Amazon Simple Notification Service (Amazon SNS).

    To enable Amazon SES to call your AWS Lambda function or to publish to an Amazon SNS topic of another account, Amazon SES must have permission to access those resources. For information about giving permissions, see the Amazon SES Developer Guide.

    For information about using AWS Lambda actions in receipt rules, see the Amazon SES Developer Guide.

    ", + "refs": { + "ReceiptAction$LambdaAction": "

    Calls an AWS Lambda function, and optionally, publishes a notification to Amazon SNS.

    " + } + }, + "LastAttemptDate": { + "base": null, + "refs": { + "RecipientDsnFields$LastAttemptDate": "

    The time the final delivery attempt was made, in RFC 822 date-time format.

    " + } + }, + "LimitExceededException": { + "base": "

    Indicates that a resource could not be created due to service limits. For a list of Amazon SES limits, see the Amazon SES Developer Guide.

    ", + "refs": { + } + }, + "ListIdentitiesRequest": { + "base": "

    Represents a request to return a list of all identities (email addresses and domains) that you have attempted to verify under your AWS account, regardless of verification status.

    ", + "refs": { + } + }, + "ListIdentitiesResponse": { + "base": "

    A list of all identities that you have attempted to verify under your AWS account, regardless of verification status.

    ", + "refs": { + } + }, + "ListIdentityPoliciesRequest": { + "base": "

    Represents a request to return a list of sending authorization policies that are attached to an identity. Sending authorization is an Amazon SES feature that enables you to authorize other senders to use your identities. For information, see the Amazon SES Developer Guide.

    ", + "refs": { + } + }, + "ListIdentityPoliciesResponse": { + "base": "

    A list of names of sending authorization policies that apply to an identity.

    ", + "refs": { + } + }, + "ListReceiptFiltersRequest": { + "base": "

    : Represents a request to list the IP address filters that exist under your AWS account. You use IP address filters when you receive email with Amazon SES. For more information, see the Amazon SES Developer Guide.

    ", + "refs": { + } + }, + "ListReceiptFiltersResponse": { + "base": "

    A list of IP address filters that exist under your AWS account.

    ", + "refs": { + } + }, + "ListReceiptRuleSetsRequest": { + "base": "

    Represents a request to list the receipt rule sets that exist under your AWS account. You use receipt rule sets to receive email with Amazon SES. For more information, see the Amazon SES Developer Guide.

    ", + "refs": { + } + }, + "ListReceiptRuleSetsResponse": { + "base": "

    A list of receipt rule sets that exist under your AWS account.

    ", + "refs": { + } + }, + "ListVerifiedEmailAddressesResponse": { + "base": "

    A list of email addresses that you have verified with Amazon SES under your AWS account.

    ", + "refs": { + } + }, + "MailFromDomainAttributes": { + "base": null, + "refs": { + "GetIdentityMailFromDomainAttributesResponse$MailFromDomainAttributes": "

    A map of identities to custom MAIL FROM attributes.

    " + } + }, + "MailFromDomainName": { + "base": null, + "refs": { + "IdentityMailFromDomainAttributes$MailFromDomain": "

    The custom MAIL FROM domain that the identity is configured to use.

    ", + "SetIdentityMailFromDomainRequest$MailFromDomain": "

    The custom MAIL FROM domain that you want the verified identity to use. The MAIL FROM domain must 1) be a subdomain of the verified identity, 2) not be used in a \"From\" address if the MAIL FROM domain is the destination of email feedback forwarding (for more information, see the Amazon SES Developer Guide), and 3) not be used to receive emails. A value of null disables the custom MAIL FROM setting for the identity.

    " + } + }, + "MailFromDomainNotVerifiedException": { + "base": "

    Indicates that the message could not be sent because Amazon SES could not read the MX record required to use the specified MAIL FROM domain. For information about editing the custom MAIL FROM domain settings for an identity, see the Amazon SES Developer Guide.

    ", + "refs": { + } + }, + "Max24HourSend": { + "base": null, + "refs": { + "GetSendQuotaResponse$Max24HourSend": "

    The maximum number of emails the user is allowed to send in a 24-hour interval. A value of -1 signifies an unlimited quota.

    " + } + }, + "MaxItems": { + "base": null, + "refs": { + "ListIdentitiesRequest$MaxItems": "

    The maximum number of identities per page. Possible values are 1-1000 inclusive.

    " + } + }, + "MaxSendRate": { + "base": null, + "refs": { + "GetSendQuotaResponse$MaxSendRate": "

    The maximum number of emails that Amazon SES can accept from the user's account per second.

    The rate at which Amazon SES accepts the user's messages might be less than the maximum send rate.

    " + } + }, + "Message": { + "base": "

    Represents the message to be sent, composed of a subject and a body.

    ", + "refs": { + "SendEmailRequest$Message": "

    The message to be sent.

    " + } + }, + "MessageData": { + "base": null, + "refs": { + "Content$Data": "

    The textual data of the content.

    " + } + }, + "MessageDsn": { + "base": "

    Message-related information to include in the Delivery Status Notification (DSN) when an email that Amazon SES receives on your behalf bounces.

    For information about receiving email through Amazon SES, see the Amazon SES Developer Guide.

    ", + "refs": { + "SendBounceRequest$MessageDsn": "

    Message-related DSN fields. If not specified, Amazon SES will choose the values.

    " + } + }, + "MessageId": { + "base": null, + "refs": { + "SendBounceRequest$OriginalMessageId": "

    The message ID of the message to be bounced.

    ", + "SendBounceResponse$MessageId": "

    The message ID of the bounce message.

    ", + "SendEmailResponse$MessageId": "

    The unique message identifier returned from the SendEmail action.

    ", + "SendRawEmailResponse$MessageId": "

    The unique message identifier returned from the SendRawEmail action.

    " + } + }, + "MessageRejected": { + "base": "

    Indicates that the action failed, and the message could not be sent. Check the error stack for more information about what caused the error.

    ", + "refs": { + } + }, + "NextToken": { + "base": null, + "refs": { + "ListIdentitiesRequest$NextToken": "

    The token to use for pagination.

    ", + "ListIdentitiesResponse$NextToken": "

    The token used for pagination.

    ", + "ListReceiptRuleSetsRequest$NextToken": "

    A token returned from a previous call to ListReceiptRuleSets to indicate the position in the receipt rule set list.

    ", + "ListReceiptRuleSetsResponse$NextToken": "

    A token indicating that there are additional receipt rule sets available to be listed. Pass this token to successive calls of ListReceiptRuleSets to retrieve up to 100 receipt rule sets at a time.

    " + } + }, + "NotificationAttributes": { + "base": null, + "refs": { + "GetIdentityNotificationAttributesResponse$NotificationAttributes": "

    A map of Identity to IdentityNotificationAttributes.

    " + } + }, + "NotificationTopic": { + "base": null, + "refs": { + "IdentityNotificationAttributes$BounceTopic": "

    The Amazon Resource Name (ARN) of the Amazon SNS topic where Amazon SES will publish bounce notifications.

    ", + "IdentityNotificationAttributes$ComplaintTopic": "

    The Amazon Resource Name (ARN) of the Amazon SNS topic where Amazon SES will publish complaint notifications.

    ", + "IdentityNotificationAttributes$DeliveryTopic": "

    The Amazon Resource Name (ARN) of the Amazon SNS topic where Amazon SES will publish delivery notifications.

    ", + "SetIdentityNotificationTopicRequest$SnsTopic": "

    The Amazon Resource Name (ARN) of the Amazon SNS topic. If the parameter is omitted from the request or a null value is passed, SnsTopic is cleared and publishing is disabled.

    " + } + }, + "NotificationType": { + "base": null, + "refs": { + "SetIdentityHeadersInNotificationsEnabledRequest$NotificationType": "

    The notification type for which to enable or disable headers in notifications.

    ", + "SetIdentityNotificationTopicRequest$NotificationType": "

    The type of notifications that will be published to the specified Amazon SNS topic.

    " + } + }, + "Policy": { + "base": null, + "refs": { + "PolicyMap$value": null, + "PutIdentityPolicyRequest$Policy": "

    The text of the policy in JSON format. The policy cannot exceed 4 KB.

    For information about the syntax of sending authorization policies, see the Amazon SES Developer Guide.

    " + } + }, + "PolicyMap": { + "base": null, + "refs": { + "GetIdentityPoliciesResponse$Policies": "

    A map of policy names to policies.

    " + } + }, + "PolicyName": { + "base": null, + "refs": { + "DeleteIdentityPolicyRequest$PolicyName": "

    The name of the policy to be deleted.

    ", + "PolicyMap$key": null, + "PolicyNameList$member": null, + "PutIdentityPolicyRequest$PolicyName": "

    The name of the policy.

    The policy name cannot exceed 64 characters and can only include alphanumeric characters, dashes, and underscores.

    " + } + }, + "PolicyNameList": { + "base": null, + "refs": { + "GetIdentityPoliciesRequest$PolicyNames": "

    A list of the names of policies to be retrieved. You can retrieve a maximum of 20 policies at a time. If you do not know the names of the policies that are attached to the identity, you can use ListIdentityPolicies.

    ", + "ListIdentityPoliciesResponse$PolicyNames": "

    A list of names of policies that apply to the specified identity.

    " + } + }, + "PutIdentityPolicyRequest": { + "base": "

    Represents a request to add or update a sending authorization policy for an identity. Sending authorization is an Amazon SES feature that enables you to authorize other senders to use your identities. For information, see the Amazon SES Developer Guide.

    ", + "refs": { + } + }, + "PutIdentityPolicyResponse": { + "base": "

    An empty element returned on a successful request.

    ", + "refs": { + } + }, + "RawMessage": { + "base": "

    Represents the raw data of the message.

    ", + "refs": { + "SendRawEmailRequest$RawMessage": "

    The raw text of the message. The client is responsible for ensuring the following:

    • Message must contain a header and a body, separated by a blank line.

    • All required header fields must be present.

    • Each part of a multipart MIME message must be formatted properly.

    • MIME content types must be among those supported by Amazon SES. For more information, go to the Amazon SES Developer Guide.

    • Content must be base64-encoded, if MIME requires it.

    " + } + }, + "RawMessageData": { + "base": null, + "refs": { + "RawMessage$Data": "

    The raw data of the message. The client must ensure that the message format complies with Internet email standards regarding email header fields, MIME types, MIME encoding, and base64 encoding (if necessary).

    The To:, CC:, and BCC: headers in the raw message can contain a group list.

    If you are using SendRawEmail with sending authorization, you can include X-headers in the raw message to specify the \"Source,\" \"From,\" and \"Return-Path\" addresses. For more information, see the documentation for SendRawEmail.

    Do not include these X-headers in the DKIM signature, because they are removed by Amazon SES before sending the email.

    For more information, go to the Amazon SES Developer Guide.

    " + } + }, + "ReceiptAction": { + "base": "

    An action that Amazon SES can take when it receives an email on behalf of one or more email addresses or domains that you own. An instance of this data type can represent only one action.

    For information about setting up receipt rules, see the Amazon SES Developer Guide.

    ", + "refs": { + "ReceiptActionsList$member": null + } + }, + "ReceiptActionsList": { + "base": null, + "refs": { + "ReceiptRule$Actions": "

    An ordered list of actions to perform on messages that match at least one of the recipient email addresses or domains specified in the receipt rule.

    " + } + }, + "ReceiptFilter": { + "base": "

    A receipt IP address filter enables you to specify whether to accept or reject mail originating from an IP address or range of IP addresses.

    For information about setting up IP address filters, see the Amazon SES Developer Guide.

    ", + "refs": { + "CreateReceiptFilterRequest$Filter": "

    A data structure that describes the IP address filter to create, which consists of a name, an IP address range, and whether to allow or block mail from it.

    ", + "ReceiptFilterList$member": null + } + }, + "ReceiptFilterList": { + "base": null, + "refs": { + "ListReceiptFiltersResponse$Filters": "

    A list of IP address filter data structures, which each consist of a name, an IP address range, and whether to allow or block mail from it.

    " + } + }, + "ReceiptFilterName": { + "base": null, + "refs": { + "DeleteReceiptFilterRequest$FilterName": "

    The name of the IP address filter to delete.

    ", + "ReceiptFilter$Name": "

    The name of the IP address filter. The name must:

    • Contain only ASCII letters (a-z, A-Z), numbers (0-9), periods (.), underscores (_), or dashes (-).

    • Start and end with a letter or number.

    • Contain less than 64 characters.

    " + } + }, + "ReceiptFilterPolicy": { + "base": null, + "refs": { + "ReceiptIpFilter$Policy": "

    Indicates whether to block or allow incoming mail from the specified IP addresses.

    " + } + }, + "ReceiptIpFilter": { + "base": "

    A receipt IP address filter enables you to specify whether to accept or reject mail originating from an IP address or range of IP addresses.

    For information about setting up IP address filters, see the Amazon SES Developer Guide.

    ", + "refs": { + "ReceiptFilter$IpFilter": "

    A structure that provides the IP addresses to block or allow, and whether to block or allow incoming mail from them.

    " + } + }, + "ReceiptRule": { + "base": "

    Receipt rules enable you to specify which actions Amazon SES should take when it receives mail on behalf of one or more email addresses or domains that you own.

    Each receipt rule defines a set of email addresses or domains to which it applies. If the email addresses or domains match at least one recipient address of the message, Amazon SES executes all of the receipt rule's actions on the message.

    For information about setting up receipt rules, see the Amazon SES Developer Guide.

    ", + "refs": { + "CreateReceiptRuleRequest$Rule": "

    A data structure that contains the specified rule's name, actions, recipients, domains, enabled status, scan status, and TLS policy.

    ", + "DescribeReceiptRuleResponse$Rule": "

    A data structure that contains the specified receipt rule's name, actions, recipients, domains, enabled status, scan status, and Transport Layer Security (TLS) policy.

    ", + "ReceiptRulesList$member": null, + "UpdateReceiptRuleRequest$Rule": "

    A data structure that contains the updated receipt rule information.

    " + } + }, + "ReceiptRuleName": { + "base": null, + "refs": { + "CreateReceiptRuleRequest$After": "

    The name of an existing rule after which the new rule will be placed. If this parameter is null, the new rule will be inserted at the beginning of the rule list.

    ", + "DeleteReceiptRuleRequest$RuleName": "

    The name of the receipt rule to delete.

    ", + "DescribeReceiptRuleRequest$RuleName": "

    The name of the receipt rule.

    ", + "ReceiptRule$Name": "

    The name of the receipt rule. The name must:

    • Contain only ASCII letters (a-z, A-Z), numbers (0-9), periods (.), underscores (_), or dashes (-).

    • Start and end with a letter or number.

    • Contain less than 64 characters.

    ", + "ReceiptRuleNamesList$member": null, + "SetReceiptRulePositionRequest$RuleName": "

    The name of the receipt rule to reposition.

    ", + "SetReceiptRulePositionRequest$After": "

    The name of the receipt rule after which to place the specified receipt rule.

    " + } + }, + "ReceiptRuleNamesList": { + "base": null, + "refs": { + "ReorderReceiptRuleSetRequest$RuleNames": "

    A list of the specified receipt rule set's receipt rules in the order that you want to put them.

    " + } + }, + "ReceiptRuleSetMetadata": { + "base": "

    Information about a receipt rule set.

    A receipt rule set is a collection of rules that specify what Amazon SES should do with mail it receives on behalf of your account's verified domains.

    For information about setting up receipt rule sets, see the Amazon SES Developer Guide.

    ", + "refs": { + "DescribeActiveReceiptRuleSetResponse$Metadata": "

    The metadata for the currently active receipt rule set. The metadata consists of the rule set name and a timestamp of when the rule set was created.

    ", + "DescribeReceiptRuleSetResponse$Metadata": "

    The metadata for the receipt rule set, which consists of the rule set name and the timestamp of when the rule set was created.

    ", + "ReceiptRuleSetsLists$member": null + } + }, + "ReceiptRuleSetName": { + "base": null, + "refs": { + "CloneReceiptRuleSetRequest$RuleSetName": "

    The name of the rule set to create. The name must:

    • Contain only ASCII letters (a-z, A-Z), numbers (0-9), periods (.), underscores (_), or dashes (-).

    • Start and end with a letter or number.

    • Contain less than 64 characters.

    ", + "CloneReceiptRuleSetRequest$OriginalRuleSetName": "

    The name of the rule set to clone.

    ", + "CreateReceiptRuleRequest$RuleSetName": "

    The name of the rule set to which to add the rule.

    ", + "CreateReceiptRuleSetRequest$RuleSetName": "

    The name of the rule set to create. The name must:

    • Contain only ASCII letters (a-z, A-Z), numbers (0-9), periods (.), underscores (_), or dashes (-).

    • Start and end with a letter or number.

    • Contain less than 64 characters.

    ", + "DeleteReceiptRuleRequest$RuleSetName": "

    The name of the receipt rule set that contains the receipt rule to delete.

    ", + "DeleteReceiptRuleSetRequest$RuleSetName": "

    The name of the receipt rule set to delete.

    ", + "DescribeReceiptRuleRequest$RuleSetName": "

    The name of the receipt rule set to which the receipt rule belongs.

    ", + "DescribeReceiptRuleSetRequest$RuleSetName": "

    The name of the receipt rule set to describe.

    ", + "ReceiptRuleSetMetadata$Name": "

    The name of the receipt rule set. The name must:

    • Contain only ASCII letters (a-z, A-Z), numbers (0-9), periods (.), underscores (_), or dashes (-).

    • Start and end with a letter or number.

    • Contain less than 64 characters.

    ", + "ReorderReceiptRuleSetRequest$RuleSetName": "

    The name of the receipt rule set to reorder.

    ", + "SetActiveReceiptRuleSetRequest$RuleSetName": "

    The name of the receipt rule set to make active. Setting this value to null disables all email receiving.

    ", + "SetReceiptRulePositionRequest$RuleSetName": "

    The name of the receipt rule set that contains the receipt rule to reposition.

    ", + "UpdateReceiptRuleRequest$RuleSetName": "

    The name of the receipt rule set to which the receipt rule belongs.

    " + } + }, + "ReceiptRuleSetsLists": { + "base": null, + "refs": { + "ListReceiptRuleSetsResponse$RuleSets": "

    The metadata for the currently active receipt rule set. The metadata consists of the rule set name and the timestamp of when the rule set was created.

    " + } + }, + "ReceiptRulesList": { + "base": null, + "refs": { + "DescribeActiveReceiptRuleSetResponse$Rules": "

    The receipt rules that belong to the active rule set.

    ", + "DescribeReceiptRuleSetResponse$Rules": "

    A list of the receipt rules that belong to the specified receipt rule set.

    " + } + }, + "Recipient": { + "base": null, + "refs": { + "RecipientsList$member": null + } + }, + "RecipientDsnFields": { + "base": "

    Recipient-related information to include in the Delivery Status Notification (DSN) when an email that Amazon SES receives on your behalf bounces.

    For information about receiving email through Amazon SES, see the Amazon SES Developer Guide.

    ", + "refs": { + "BouncedRecipientInfo$RecipientDsnFields": "

    Recipient-related DSN fields, most of which would normally be filled in automatically when provided with a BounceType. You must provide either this parameter or BounceType.

    " + } + }, + "RecipientsList": { + "base": null, + "refs": { + "ReceiptRule$Recipients": "

    The recipient domains and email addresses to which the receipt rule applies. If this field is not specified, this rule will match all recipients under all verified domains.

    " + } + }, + "RemoteMta": { + "base": null, + "refs": { + "RecipientDsnFields$RemoteMta": "

    The MTA to which the remote MTA attempted to deliver the message, formatted as specified in RFC 3464 (mta-name-type; mta-name). This parameter typically applies only to propagating synchronous bounces.

    " + } + }, + "ReorderReceiptRuleSetRequest": { + "base": "

    Represents a request to reorder the receipt rules within a receipt rule set. You use receipt rule sets to receive email with Amazon SES. For more information, see the Amazon SES Developer Guide.

    ", + "refs": { + } + }, + "ReorderReceiptRuleSetResponse": { + "base": "

    An empty element returned on a successful request.

    ", + "refs": { + } + }, + "ReportingMta": { + "base": null, + "refs": { + "MessageDsn$ReportingMta": "

    The reporting MTA that attempted to deliver the message, formatted as specified in RFC 3464 (mta-name-type; mta-name). The default value is dns; inbound-smtp.[region].amazonaws.com.

    " + } + }, + "RuleDoesNotExistException": { + "base": "

    Indicates that the provided receipt rule does not exist.

    ", + "refs": { + } + }, + "RuleOrRuleSetName": { + "base": null, + "refs": { + "AlreadyExistsException$Name": null, + "CannotDeleteException$Name": null, + "RuleDoesNotExistException$Name": null, + "RuleSetDoesNotExistException$Name": null + } + }, + "RuleSetDoesNotExistException": { + "base": "

    Indicates that the provided receipt rule set does not exist.

    ", + "refs": { + } + }, + "S3Action": { + "base": "

    When included in a receipt rule, this action saves the received message to an Amazon Simple Storage Service (Amazon S3) bucket and, optionally, publishes a notification to Amazon Simple Notification Service (Amazon SNS).

    To enable Amazon SES to write emails to your Amazon S3 bucket, use an AWS KMS key to encrypt your emails, or publish to an Amazon SNS topic of another account, Amazon SES must have permission to access those resources. For information about giving permissions, see the Amazon SES Developer Guide.

    When you save your emails to an Amazon S3 bucket, the maximum email size (including headers) is 30 MB. Emails larger than that will bounce.

    For information about specifying Amazon S3 actions in receipt rules, see the Amazon SES Developer Guide.

    ", + "refs": { + "ReceiptAction$S3Action": "

    Saves the received message to an Amazon Simple Storage Service (Amazon S3) bucket and, optionally, publishes a notification to Amazon SNS.

    " + } + }, + "S3BucketName": { + "base": null, + "refs": { + "InvalidS3ConfigurationException$Bucket": null, + "S3Action$BucketName": "

    The name of the Amazon S3 bucket to which to save the received email.

    " + } + }, + "S3KeyPrefix": { + "base": null, + "refs": { + "S3Action$ObjectKeyPrefix": "

    The key prefix of the Amazon S3 bucket. The key prefix is similar to a directory name that enables you to store similar data under the same directory in a bucket.

    " + } + }, + "SNSAction": { + "base": "

    When included in a receipt rule, this action publishes a notification to Amazon Simple Notification Service (Amazon SNS). This action includes a complete copy of the email content in the Amazon SNS notifications. Amazon SNS notifications for all other actions simply provide information about the email. They do not include the email content itself.

    If you own the Amazon SNS topic, you don't need to do anything to give Amazon SES permission to publish emails to it. However, if you don't own the Amazon SNS topic, you need to attach a policy to the topic to give Amazon SES permissions to access it. For information about giving permissions, see the Amazon SES Developer Guide.

    You can only publish emails that are 150 KB or less (including the header) to Amazon SNS. Larger emails will bounce. If you anticipate emails larger than 150 KB, use the S3 action instead.

    For information about using a receipt rule to publish an Amazon SNS notification, see the Amazon SES Developer Guide.

    ", + "refs": { + "ReceiptAction$SNSAction": "

    Publishes the email content within a notification to Amazon SNS.

    " + } + }, + "SNSActionEncoding": { + "base": null, + "refs": { + "SNSAction$Encoding": "

    The encoding to use for the email within the Amazon SNS notification. UTF-8 is easier to use, but may not preserve all special characters when a message was encoded with a different encoding format. Base64 preserves all special characters. The default value is UTF-8.

    " + } + }, + "SendBounceRequest": { + "base": "

    Represents a request to send a bounce message to the sender of an email you received through Amazon SES.

    ", + "refs": { + } + }, + "SendBounceResponse": { + "base": "

    Represents a unique message ID.

    ", + "refs": { + } + }, + "SendDataPoint": { + "base": "

    Represents sending statistics data. Each SendDataPoint contains statistics for a 15-minute period of sending activity.

    ", + "refs": { + "SendDataPointList$member": null + } + }, + "SendDataPointList": { + "base": null, + "refs": { + "GetSendStatisticsResponse$SendDataPoints": "

    A list of data points, each of which represents 15 minutes of activity.

    " + } + }, + "SendEmailRequest": { + "base": "

    Represents a request to send a single formatted email using Amazon SES. For more information, see the Amazon SES Developer Guide.

    ", + "refs": { + } + }, + "SendEmailResponse": { + "base": "

    Represents a unique message ID.

    ", + "refs": { + } + }, + "SendRawEmailRequest": { + "base": "

    Represents a request to send a single raw email using Amazon SES. For more information, see the Amazon SES Developer Guide.

    ", + "refs": { + } + }, + "SendRawEmailResponse": { + "base": "

    Represents a unique message ID.

    ", + "refs": { + } + }, + "SentLast24Hours": { + "base": null, + "refs": { + "GetSendQuotaResponse$SentLast24Hours": "

    The number of emails sent during the previous 24 hours.

    " + } + }, + "SetActiveReceiptRuleSetRequest": { + "base": "

    Represents a request to set a receipt rule set as the active receipt rule set. You use receipt rule sets to receive email with Amazon SES. For more information, see the Amazon SES Developer Guide.

    ", + "refs": { + } + }, + "SetActiveReceiptRuleSetResponse": { + "base": "

    An empty element returned on a successful request.

    ", + "refs": { + } + }, + "SetIdentityDkimEnabledRequest": { + "base": "

    Represents a request to enable or disable Amazon SES Easy DKIM signing for an identity. For more information about setting up Easy DKIM, see the Amazon SES Developer Guide.

    ", + "refs": { + } + }, + "SetIdentityDkimEnabledResponse": { + "base": "

    An empty element returned on a successful request.

    ", + "refs": { + } + }, + "SetIdentityFeedbackForwardingEnabledRequest": { + "base": "

    Represents a request to enable or disable whether Amazon SES forwards you bounce and complaint notifications through email. For information about email feedback forwarding, see the Amazon SES Developer Guide.

    ", + "refs": { + } + }, + "SetIdentityFeedbackForwardingEnabledResponse": { + "base": "

    An empty element returned on a successful request.

    ", + "refs": { + } + }, + "SetIdentityHeadersInNotificationsEnabledRequest": { + "base": "

    Represents a request to set whether Amazon SES includes the original email headers in the Amazon SNS notifications of a specified type. For information about notifications, see the Amazon SES Developer Guide.

    ", + "refs": { + } + }, + "SetIdentityHeadersInNotificationsEnabledResponse": { + "base": "

    An empty element returned on a successful request.

    ", + "refs": { + } + }, + "SetIdentityMailFromDomainRequest": { + "base": "

    Represents a request to enable or disable the Amazon SES custom MAIL FROM domain setup for a verified identity. For information about using a custom MAIL FROM domain, see the Amazon SES Developer Guide.

    ", + "refs": { + } + }, + "SetIdentityMailFromDomainResponse": { + "base": "

    An empty element returned on a successful request.

    ", + "refs": { + } + }, + "SetIdentityNotificationTopicRequest": { + "base": "

    Represents a request to specify the Amazon SNS topic to which Amazon SES will publish bounce, complaint, or delivery notifications for emails sent with that identity as the Source. For information about Amazon SES notifications, see the Amazon SES Developer Guide.

    ", + "refs": { + } + }, + "SetIdentityNotificationTopicResponse": { + "base": "

    An empty element returned on a successful request.

    ", + "refs": { + } + }, + "SetReceiptRulePositionRequest": { + "base": "

    Represents a request to set the position of a receipt rule in a receipt rule set. You use receipt rule sets to receive email with Amazon SES. For more information, see the Amazon SES Developer Guide.

    ", + "refs": { + } + }, + "SetReceiptRulePositionResponse": { + "base": "

    An empty element returned on a successful request.

    ", + "refs": { + } + }, + "StopAction": { + "base": "

    When included in a receipt rule, this action terminates the evaluation of the receipt rule set and, optionally, publishes a notification to Amazon Simple Notification Service (Amazon SNS).

    For information about setting a stop action in a receipt rule, see the Amazon SES Developer Guide.

    ", + "refs": { + "ReceiptAction$StopAction": "

    Terminates the evaluation of the receipt rule set and optionally publishes a notification to Amazon SNS.

    " + } + }, + "StopScope": { + "base": null, + "refs": { + "StopAction$Scope": "

    The scope to which the Stop action applies. That is, what is being stopped.

    " + } + }, + "Timestamp": { + "base": null, + "refs": { + "ReceiptRuleSetMetadata$CreatedTimestamp": "

    The date and time the receipt rule set was created.

    ", + "SendDataPoint$Timestamp": "

    Time of the data point.

    " + } + }, + "TlsPolicy": { + "base": null, + "refs": { + "ReceiptRule$TlsPolicy": "

    Specifies whether Amazon SES should require that incoming email is delivered over a connection encrypted with Transport Layer Security (TLS). If this parameter is set to Require, Amazon SES will bounce emails that are not received over TLS. The default is Optional.

    " + } + }, + "UpdateReceiptRuleRequest": { + "base": "

    Represents a request to update a receipt rule. You use receipt rules to receive email with Amazon SES. For more information, see the Amazon SES Developer Guide.

    ", + "refs": { + } + }, + "UpdateReceiptRuleResponse": { + "base": "

    An empty element returned on a successful request.

    ", + "refs": { + } + }, + "VerificationAttributes": { + "base": null, + "refs": { + "GetIdentityVerificationAttributesResponse$VerificationAttributes": "

    A map of Identities to IdentityVerificationAttributes objects.

    " + } + }, + "VerificationStatus": { + "base": null, + "refs": { + "IdentityDkimAttributes$DkimVerificationStatus": "

    Describes whether Amazon SES has successfully verified the DKIM DNS records (tokens) published in the domain name's DNS. (This only applies to domain identities, not email address identities.)

    ", + "IdentityVerificationAttributes$VerificationStatus": "

    The verification status of the identity: \"Pending\", \"Success\", \"Failed\", or \"TemporaryFailure\".

    " + } + }, + "VerificationToken": { + "base": null, + "refs": { + "IdentityVerificationAttributes$VerificationToken": "

    The verification token for a domain identity. Null for email address identities.

    ", + "VerificationTokenList$member": null, + "VerifyDomainIdentityResponse$VerificationToken": "

    A TXT record that must be placed in the DNS settings for the domain, in order to complete domain verification.

    " + } + }, + "VerificationTokenList": { + "base": null, + "refs": { + "IdentityDkimAttributes$DkimTokens": "

    A set of character strings that represent the domain's identity. Using these tokens, you will need to create DNS CNAME records that point to DKIM public keys hosted by Amazon SES. Amazon Web Services will eventually detect that you have updated your DNS records; this detection process may take up to 72 hours. Upon successful detection, Amazon SES will be able to DKIM-sign email originating from that domain. (This only applies to domain identities, not email address identities.)

    For more information about creating DNS records using DKIM tokens, go to the Amazon SES Developer Guide.

    ", + "VerifyDomainDkimResponse$DkimTokens": "

    A set of character strings that represent the domain's identity. If the identity is an email address, the tokens represent the domain of that address.

    Using these tokens, you will need to create DNS CNAME records that point to DKIM public keys hosted by Amazon SES. Amazon Web Services will eventually detect that you have updated your DNS records; this detection process may take up to 72 hours. Upon successful detection, Amazon SES will be able to DKIM-sign emails originating from that domain.

    For more information about creating DNS records using DKIM tokens, go to the Amazon SES Developer Guide.

    " + } + }, + "VerifyDomainDkimRequest": { + "base": "

    Represents a request to generate the CNAME records needed to set up Easy DKIM with Amazon SES. For more information about setting up Easy DKIM, see the Amazon SES Developer Guide.

    ", + "refs": { + } + }, + "VerifyDomainDkimResponse": { + "base": "

    Returns CNAME records that you must publish to the DNS server of your domain to set up Easy DKIM with Amazon SES.

    ", + "refs": { + } + }, + "VerifyDomainIdentityRequest": { + "base": "

    Represents a request to begin Amazon SES domain verification and to generate the TXT records that you must publish to the DNS server of your domain to complete the verification. For information about domain verification, see the Amazon SES Developer Guide.

    ", + "refs": { + } + }, + "VerifyDomainIdentityResponse": { + "base": "

    Returns a TXT record that you must publish to the DNS server of your domain to complete domain verification with Amazon SES.

    ", + "refs": { + } + }, + "VerifyEmailAddressRequest": { + "base": "

    Represents a request to begin email address verification with Amazon SES. For information about email address verification, see the Amazon SES Developer Guide.

    ", + "refs": { + } + }, + "VerifyEmailIdentityRequest": { + "base": "

    Represents a request to begin email address verification with Amazon SES. For information about email address verification, see the Amazon SES Developer Guide.

    ", + "refs": { + } + }, + "VerifyEmailIdentityResponse": { + "base": "

    An empty element returned on a successful request.

    ", + "refs": { + } + }, + "WorkmailAction": { + "base": "

    When included in a receipt rule, this action calls Amazon WorkMail and, optionally, publishes a notification to Amazon Simple Notification Service (Amazon SNS). You will typically not use this action directly because Amazon WorkMail adds the rule automatically during its setup procedure.

    For information using a receipt rule to call Amazon WorkMail, see the Amazon SES Developer Guide.

    ", + "refs": { + "ReceiptAction$WorkmailAction": "

    Calls Amazon WorkMail and, optionally, publishes a notification to Amazon SNS.

    " + } + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/email/2010-12-01/examples-1.json b/vendor/github.com/aws/aws-sdk-go/models/apis/email/2010-12-01/examples-1.json new file mode 100644 index 000000000..0ea7e3b0b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/email/2010-12-01/examples-1.json @@ -0,0 +1,5 @@ +{ + "version": "1.0", + "examples": { + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/email/2010-12-01/paginators-1.json b/vendor/github.com/aws/aws-sdk-go/models/apis/email/2010-12-01/paginators-1.json new file mode 100644 index 000000000..e12811f5e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/email/2010-12-01/paginators-1.json @@ -0,0 +1,13 @@ +{ + "pagination": { + "ListIdentities": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxItems", + "result_key": "Identities" + }, + "ListVerifiedEmailAddresses": { + "result_key": "VerifiedEmailAddresses" + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/email/2010-12-01/waiters-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/email/2010-12-01/waiters-2.json new file mode 100644 index 000000000..b585d309e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/email/2010-12-01/waiters-2.json @@ -0,0 +1,18 @@ +{ + "version": 2, + "waiters": { + "IdentityExists": { + "delay": 3, + "operation": "GetIdentityVerificationAttributes", + "maxAttempts": 20, + "acceptors": [ + { + "expected": "Success", + "matcher": "pathAll", + "state": "success", + "argument": "VerificationAttributes.*.VerificationStatus" + } + ] + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/es/2015-01-01/api-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/es/2015-01-01/api-2.json new file mode 100644 index 000000000..9154cb8a5 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/es/2015-01-01/api-2.json @@ -0,0 +1,764 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2015-01-01", + "endpointPrefix":"es", + "serviceFullName":"Amazon Elasticsearch Service", + "signatureVersion":"v4", + "protocol":"rest-json" + }, + "operations":{ + "AddTags":{ + "name":"AddTags", + "http":{ + "method":"POST", + "requestUri":"/2015-01-01/tags" + }, + "input":{"shape":"AddTagsRequest"}, + "errors":[ + { + "shape":"BaseException", + "exception":true + }, + { + "shape":"LimitExceededException", + "error":{"httpStatusCode":409}, + "exception":true + }, + { + "shape":"ValidationException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InternalException", + "error":{"httpStatusCode":500}, + "exception":true + } + ] + }, + "CreateElasticsearchDomain":{ + "name":"CreateElasticsearchDomain", + "http":{ + "method":"POST", + "requestUri":"/2015-01-01/es/domain" + }, + "input":{"shape":"CreateElasticsearchDomainRequest"}, + "output":{"shape":"CreateElasticsearchDomainResponse"}, + "errors":[ + { + "shape":"BaseException", + "exception":true + }, + { + "shape":"DisabledOperationException", + "error":{"httpStatusCode":409}, + "exception":true + }, + { + "shape":"InternalException", + "error":{"httpStatusCode":500}, + "exception":true + }, + { + "shape":"InvalidTypeException", + "error":{"httpStatusCode":409}, + "exception":true + }, + { + "shape":"LimitExceededException", + "error":{"httpStatusCode":409}, + "exception":true + }, + { + "shape":"ResourceAlreadyExistsException", + "error":{"httpStatusCode":409}, + "exception":true + }, + { + "shape":"ValidationException", + "error":{"httpStatusCode":400}, + "exception":true + } + ] + }, + "DeleteElasticsearchDomain":{ + "name":"DeleteElasticsearchDomain", + "http":{ + "method":"DELETE", + "requestUri":"/2015-01-01/es/domain/{DomainName}" + }, + "input":{"shape":"DeleteElasticsearchDomainRequest"}, + "output":{"shape":"DeleteElasticsearchDomainResponse"}, + "errors":[ + { + "shape":"BaseException", + "exception":true + }, + { + "shape":"InternalException", + "error":{"httpStatusCode":500}, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":409}, + "exception":true + }, + { + "shape":"ValidationException", + "error":{"httpStatusCode":400}, + "exception":true + } + ] + }, + "DescribeElasticsearchDomain":{ + "name":"DescribeElasticsearchDomain", + "http":{ + "method":"GET", + "requestUri":"/2015-01-01/es/domain/{DomainName}" + }, + "input":{"shape":"DescribeElasticsearchDomainRequest"}, + "output":{"shape":"DescribeElasticsearchDomainResponse"}, + "errors":[ + { + "shape":"BaseException", + "exception":true + }, + { + "shape":"InternalException", + "error":{"httpStatusCode":500}, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":409}, + "exception":true + }, + { + "shape":"ValidationException", + "error":{"httpStatusCode":400}, + "exception":true + } + ] + }, + "DescribeElasticsearchDomainConfig":{ + "name":"DescribeElasticsearchDomainConfig", + "http":{ + "method":"GET", + "requestUri":"/2015-01-01/es/domain/{DomainName}/config" + }, + "input":{"shape":"DescribeElasticsearchDomainConfigRequest"}, + "output":{"shape":"DescribeElasticsearchDomainConfigResponse"}, + "errors":[ + { + "shape":"BaseException", + "exception":true + }, + { + "shape":"InternalException", + "error":{"httpStatusCode":500}, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":409}, + "exception":true + }, + { + "shape":"ValidationException", + "error":{"httpStatusCode":400}, + "exception":true + } + ] + }, + "DescribeElasticsearchDomains":{ + "name":"DescribeElasticsearchDomains", + "http":{ + "method":"POST", + "requestUri":"/2015-01-01/es/domain-info" + }, + "input":{"shape":"DescribeElasticsearchDomainsRequest"}, + "output":{"shape":"DescribeElasticsearchDomainsResponse"}, + "errors":[ + { + "shape":"BaseException", + "exception":true + }, + { + "shape":"InternalException", + "error":{"httpStatusCode":500}, + "exception":true + }, + { + "shape":"ValidationException", + "error":{"httpStatusCode":400}, + "exception":true + } + ] + }, + "ListDomainNames":{ + "name":"ListDomainNames", + "http":{ + "method":"GET", + "requestUri":"/2015-01-01/domain" + }, + "output":{"shape":"ListDomainNamesResponse"}, + "errors":[ + { + "shape":"BaseException", + "exception":true + }, + { + "shape":"ValidationException", + "error":{"httpStatusCode":400}, + "exception":true + } + ] + }, + "ListTags":{ + "name":"ListTags", + "http":{ + "method":"GET", + "requestUri":"/2015-01-01/tags/" + }, + "input":{"shape":"ListTagsRequest"}, + "output":{"shape":"ListTagsResponse"}, + "errors":[ + { + "shape":"BaseException", + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":409}, + "exception":true + }, + { + "shape":"ValidationException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InternalException", + "error":{"httpStatusCode":500}, + "exception":true + } + ] + }, + "RemoveTags":{ + "name":"RemoveTags", + "http":{ + "method":"POST", + "requestUri":"/2015-01-01/tags-removal" + }, + "input":{"shape":"RemoveTagsRequest"}, + "errors":[ + { + "shape":"BaseException", + "exception":true + }, + { + "shape":"ValidationException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InternalException", + "error":{"httpStatusCode":500}, + "exception":true + } + ] + }, + "UpdateElasticsearchDomainConfig":{ + "name":"UpdateElasticsearchDomainConfig", + "http":{ + "method":"POST", + "requestUri":"/2015-01-01/es/domain/{DomainName}/config" + }, + "input":{"shape":"UpdateElasticsearchDomainConfigRequest"}, + "output":{"shape":"UpdateElasticsearchDomainConfigResponse"}, + "errors":[ + { + "shape":"BaseException", + "exception":true + }, + { + "shape":"InternalException", + "error":{"httpStatusCode":500}, + "exception":true + }, + { + "shape":"InvalidTypeException", + "error":{"httpStatusCode":409}, + "exception":true + }, + { + "shape":"LimitExceededException", + "error":{"httpStatusCode":409}, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":409}, + "exception":true + }, + { + "shape":"ValidationException", + "error":{"httpStatusCode":400}, + "exception":true + } + ] + } + }, + "shapes":{ + "ARN":{"type":"string"}, + "AccessPoliciesStatus":{ + "type":"structure", + "required":[ + "Options", + "Status" + ], + "members":{ + "Options":{"shape":"PolicyDocument"}, + "Status":{"shape":"OptionStatus"} + } + }, + "AddTagsRequest":{ + "type":"structure", + "required":[ + "ARN", + "TagList" + ], + "members":{ + "ARN":{"shape":"ARN"}, + "TagList":{"shape":"TagList"} + } + }, + "AdvancedOptions":{ + "type":"map", + "key":{"shape":"String"}, + "value":{"shape":"String"} + }, + "AdvancedOptionsStatus":{ + "type":"structure", + "required":[ + "Options", + "Status" + ], + "members":{ + "Options":{"shape":"AdvancedOptions"}, + "Status":{"shape":"OptionStatus"} + } + }, + "BaseException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "exception":true + }, + "Boolean":{"type":"boolean"}, + "CreateElasticsearchDomainRequest":{ + "type":"structure", + "required":["DomainName"], + "members":{ + "DomainName":{"shape":"DomainName"}, + "ElasticsearchClusterConfig":{"shape":"ElasticsearchClusterConfig"}, + "EBSOptions":{"shape":"EBSOptions"}, + "AccessPolicies":{"shape":"PolicyDocument"}, + "SnapshotOptions":{"shape":"SnapshotOptions"}, + "AdvancedOptions":{"shape":"AdvancedOptions"} + } + }, + "CreateElasticsearchDomainResponse":{ + "type":"structure", + "members":{ + "DomainStatus":{"shape":"ElasticsearchDomainStatus"} + } + }, + "DeleteElasticsearchDomainRequest":{ + "type":"structure", + "required":["DomainName"], + "members":{ + "DomainName":{ + "shape":"DomainName", + "location":"uri", + "locationName":"DomainName" + } + } + }, + "DeleteElasticsearchDomainResponse":{ + "type":"structure", + "members":{ + "DomainStatus":{"shape":"ElasticsearchDomainStatus"} + } + }, + "DescribeElasticsearchDomainConfigRequest":{ + "type":"structure", + "required":["DomainName"], + "members":{ + "DomainName":{ + "shape":"DomainName", + "location":"uri", + "locationName":"DomainName" + } + } + }, + "DescribeElasticsearchDomainConfigResponse":{ + "type":"structure", + "required":["DomainConfig"], + "members":{ + "DomainConfig":{"shape":"ElasticsearchDomainConfig"} + } + }, + "DescribeElasticsearchDomainRequest":{ + "type":"structure", + "required":["DomainName"], + "members":{ + "DomainName":{ + "shape":"DomainName", + "location":"uri", + "locationName":"DomainName" + } + } + }, + "DescribeElasticsearchDomainResponse":{ + "type":"structure", + "required":["DomainStatus"], + "members":{ + "DomainStatus":{"shape":"ElasticsearchDomainStatus"} + } + }, + "DescribeElasticsearchDomainsRequest":{ + "type":"structure", + "required":["DomainNames"], + "members":{ + "DomainNames":{"shape":"DomainNameList"} + } + }, + "DescribeElasticsearchDomainsResponse":{ + "type":"structure", + "required":["DomainStatusList"], + "members":{ + "DomainStatusList":{"shape":"ElasticsearchDomainStatusList"} + } + }, + "DisabledOperationException":{ + "type":"structure", + "members":{ + }, + "error":{"httpStatusCode":409}, + "exception":true + }, + "DomainId":{ + "type":"string", + "min":1, + "max":64 + }, + "DomainInfo":{ + "type":"structure", + "members":{ + "DomainName":{"shape":"DomainName"} + } + }, + "DomainInfoList":{ + "type":"list", + "member":{"shape":"DomainInfo"} + }, + "DomainName":{ + "type":"string", + "min":3, + "max":28, + "pattern":"[a-z][a-z0-9\\-]+" + }, + "DomainNameList":{ + "type":"list", + "member":{"shape":"DomainName"} + }, + "EBSOptions":{ + "type":"structure", + "members":{ + "EBSEnabled":{"shape":"Boolean"}, + "VolumeType":{"shape":"VolumeType"}, + "VolumeSize":{"shape":"IntegerClass"}, + "Iops":{"shape":"IntegerClass"} + } + }, + "EBSOptionsStatus":{ + "type":"structure", + "required":[ + "Options", + "Status" + ], + "members":{ + "Options":{"shape":"EBSOptions"}, + "Status":{"shape":"OptionStatus"} + } + }, + "ESPartitionInstanceType":{ + "type":"string", + "enum":[ + "m3.medium.elasticsearch", + "m3.large.elasticsearch", + "m3.xlarge.elasticsearch", + "m3.2xlarge.elasticsearch", + "t2.micro.elasticsearch", + "t2.small.elasticsearch", + "t2.medium.elasticsearch", + "r3.large.elasticsearch", + "r3.xlarge.elasticsearch", + "r3.2xlarge.elasticsearch", + "r3.4xlarge.elasticsearch", + "r3.8xlarge.elasticsearch", + "i2.xlarge.elasticsearch", + "i2.2xlarge.elasticsearch" + ] + }, + "ElasticsearchClusterConfig":{ + "type":"structure", + "members":{ + "InstanceType":{"shape":"ESPartitionInstanceType"}, + "InstanceCount":{"shape":"IntegerClass"}, + "DedicatedMasterEnabled":{"shape":"Boolean"}, + "ZoneAwarenessEnabled":{"shape":"Boolean"}, + "DedicatedMasterType":{"shape":"ESPartitionInstanceType"}, + "DedicatedMasterCount":{"shape":"IntegerClass"} + } + }, + "ElasticsearchClusterConfigStatus":{ + "type":"structure", + "required":[ + "Options", + "Status" + ], + "members":{ + "Options":{"shape":"ElasticsearchClusterConfig"}, + "Status":{"shape":"OptionStatus"} + } + }, + "ElasticsearchDomainConfig":{ + "type":"structure", + "members":{ + "ElasticsearchClusterConfig":{"shape":"ElasticsearchClusterConfigStatus"}, + "EBSOptions":{"shape":"EBSOptionsStatus"}, + "AccessPolicies":{"shape":"AccessPoliciesStatus"}, + "SnapshotOptions":{"shape":"SnapshotOptionsStatus"}, + "AdvancedOptions":{"shape":"AdvancedOptionsStatus"} + } + }, + "ElasticsearchDomainStatus":{ + "type":"structure", + "required":[ + "DomainId", + "DomainName", + "ARN", + "ElasticsearchClusterConfig" + ], + "members":{ + "DomainId":{"shape":"DomainId"}, + "DomainName":{"shape":"DomainName"}, + "ARN":{"shape":"ARN"}, + "Created":{"shape":"Boolean"}, + "Deleted":{"shape":"Boolean"}, + "Endpoint":{"shape":"ServiceUrl"}, + "Processing":{"shape":"Boolean"}, + "ElasticsearchClusterConfig":{"shape":"ElasticsearchClusterConfig"}, + "EBSOptions":{"shape":"EBSOptions"}, + "AccessPolicies":{"shape":"PolicyDocument"}, + "SnapshotOptions":{"shape":"SnapshotOptions"}, + "AdvancedOptions":{"shape":"AdvancedOptions"} + } + }, + "ElasticsearchDomainStatusList":{ + "type":"list", + "member":{"shape":"ElasticsearchDomainStatus"} + }, + "ErrorMessage":{"type":"string"}, + "IntegerClass":{"type":"integer"}, + "InternalException":{ + "type":"structure", + "members":{ + }, + "error":{"httpStatusCode":500}, + "exception":true + }, + "InvalidTypeException":{ + "type":"structure", + "members":{ + }, + "error":{"httpStatusCode":409}, + "exception":true + }, + "LimitExceededException":{ + "type":"structure", + "members":{ + }, + "error":{"httpStatusCode":409}, + "exception":true + }, + "ListDomainNamesResponse":{ + "type":"structure", + "members":{ + "DomainNames":{"shape":"DomainInfoList"} + } + }, + "ListTagsRequest":{ + "type":"structure", + "required":["ARN"], + "members":{ + "ARN":{ + "shape":"ARN", + "location":"querystring", + "locationName":"arn" + } + } + }, + "ListTagsResponse":{ + "type":"structure", + "members":{ + "TagList":{"shape":"TagList"} + } + }, + "OptionState":{ + "type":"string", + "enum":[ + "RequiresIndexDocuments", + "Processing", + "Active" + ] + }, + "OptionStatus":{ + "type":"structure", + "required":[ + "CreationDate", + "UpdateDate", + "State" + ], + "members":{ + "CreationDate":{"shape":"UpdateTimestamp"}, + "UpdateDate":{"shape":"UpdateTimestamp"}, + "UpdateVersion":{"shape":"UIntValue"}, + "State":{"shape":"OptionState"}, + "PendingDeletion":{"shape":"Boolean"} + } + }, + "PolicyDocument":{"type":"string"}, + "RemoveTagsRequest":{ + "type":"structure", + "required":[ + "ARN", + "TagKeys" + ], + "members":{ + "ARN":{"shape":"ARN"}, + "TagKeys":{"shape":"StringList"} + } + }, + "ResourceAlreadyExistsException":{ + "type":"structure", + "members":{ + }, + "error":{"httpStatusCode":409}, + "exception":true + }, + "ResourceNotFoundException":{ + "type":"structure", + "members":{ + }, + "error":{"httpStatusCode":409}, + "exception":true + }, + "ServiceUrl":{"type":"string"}, + "SnapshotOptions":{ + "type":"structure", + "members":{ + "AutomatedSnapshotStartHour":{"shape":"IntegerClass"} + } + }, + "SnapshotOptionsStatus":{ + "type":"structure", + "required":[ + "Options", + "Status" + ], + "members":{ + "Options":{"shape":"SnapshotOptions"}, + "Status":{"shape":"OptionStatus"} + } + }, + "String":{"type":"string"}, + "StringList":{ + "type":"list", + "member":{"shape":"String"} + }, + "Tag":{ + "type":"structure", + "required":[ + "Key", + "Value" + ], + "members":{ + "Key":{"shape":"TagKey"}, + "Value":{"shape":"TagValue"} + } + }, + "TagKey":{ + "type":"string", + "min":1, + "max":128 + }, + "TagList":{ + "type":"list", + "member":{"shape":"Tag"} + }, + "TagValue":{ + "type":"string", + "min":0, + "max":256 + }, + "UIntValue":{ + "type":"integer", + "min":0 + }, + "UpdateElasticsearchDomainConfigRequest":{ + "type":"structure", + "required":["DomainName"], + "members":{ + "DomainName":{ + "shape":"DomainName", + "location":"uri", + "locationName":"DomainName" + }, + "ElasticsearchClusterConfig":{"shape":"ElasticsearchClusterConfig"}, + "EBSOptions":{"shape":"EBSOptions"}, + "SnapshotOptions":{"shape":"SnapshotOptions"}, + "AdvancedOptions":{"shape":"AdvancedOptions"}, + "AccessPolicies":{"shape":"PolicyDocument"} + } + }, + "UpdateElasticsearchDomainConfigResponse":{ + "type":"structure", + "required":["DomainConfig"], + "members":{ + "DomainConfig":{"shape":"ElasticsearchDomainConfig"} + } + }, + "UpdateTimestamp":{"type":"timestamp"}, + "ValidationException":{ + "type":"structure", + "members":{ + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "VolumeType":{ + "type":"string", + "enum":[ + "standard", + "gp2", + "io1" + ] + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/es/2015-01-01/docs-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/es/2015-01-01/docs-2.json new file mode 100644 index 000000000..b03bde836 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/es/2015-01-01/docs-2.json @@ -0,0 +1,401 @@ +{ + "version": "2.0", + "operations": { + "AddTags": "

    Attaches tags to an existing Elasticsearch domain. Tags are a set of case-sensitive key value pairs. An Elasticsearch domain may have up to 10 tags. See Tagging Amazon Elasticsearch Service Domains for more information.

    ", + "CreateElasticsearchDomain": "

    Creates a new Elasticsearch domain. For more information, see Creating Elasticsearch Domains in the Amazon Elasticsearch Service Developer Guide.

    ", + "DeleteElasticsearchDomain": "

    Permanently deletes the specified Elasticsearch domain and all of its data. Once a domain is deleted, it cannot be recovered.

    ", + "DescribeElasticsearchDomain": "

    Returns domain configuration information about the specified Elasticsearch domain, including the domain ID, domain endpoint, and domain ARN.

    ", + "DescribeElasticsearchDomainConfig": "

    Provides cluster configuration information about the specified Elasticsearch domain, such as the state, creation date, update version, and update date for cluster options.

    ", + "DescribeElasticsearchDomains": "

    Returns domain configuration information about the specified Elasticsearch domains, including the domain ID, domain endpoint, and domain ARN.

    ", + "ListDomainNames": "

    Returns the name of all Elasticsearch domains owned by the current user's account.

    ", + "ListTags": "

    Returns all tags for the given Elasticsearch domain.

    ", + "RemoveTags": "

    Removes the specified set of tags from the specified Elasticsearch domain.

    ", + "UpdateElasticsearchDomainConfig": "

    Modifies the cluster configuration of the specified Elasticsearch domain, setting as setting the instance type and the number of instances.

    " + }, + "service": "Amazon Elasticsearch Configuration Service

    Use the Amazon Elasticsearch configuration API to create, configure, and manage Elasticsearch domains.

    The endpoint for configuration service requests is region-specific: es.region.amazonaws.com. For example, es.us-east-1.amazonaws.com. For a current list of supported regions and endpoints, see Regions and Endpoints.

    ", + "shapes": { + "ARN": { + "base": "

    The Amazon Resource Name (ARN) of the Elasticsearch domain. See Identifiers for IAM Entities in Using AWS Identity and Access Management for more information.

    ", + "refs": { + "AddTagsRequest$ARN": "

    Specify the ARN for which you want to add the tags.

    ", + "ElasticsearchDomainStatus$ARN": "

    The Amazon resource name (ARN) of an Elasticsearch domain. See Identifiers for IAM Entities in Using AWS Identity and Access Management for more information.

    ", + "ListTagsRequest$ARN": "

    Specify the ARN for the Elasticsearch domain to which the tags are attached that you want to view.

    ", + "RemoveTagsRequest$ARN": "

    Specifies the ARN for the Elasticsearch domain from which you want to delete the specified tags.

    " + } + }, + "AccessPoliciesStatus": { + "base": "

    The configured access rules for the domain's document and search endpoints, and the current status of those rules.

    ", + "refs": { + "ElasticsearchDomainConfig$AccessPolicies": "

    IAM access policy as a JSON-formatted string.

    " + } + }, + "AddTagsRequest": { + "base": "

    Container for the parameters to the AddTags operation. Specify the tags that you want to attach to the Elasticsearch domain.

    ", + "refs": { + } + }, + "AdvancedOptions": { + "base": "

    Exposes select native Elasticsearch configuration values from elasticsearch.yml. Currently, the following advanced options are available:

    • Option to allow references to indices in an HTTP request body. Must be false when configuring access to individual sub-resources. By default, the value is true. See Configuration Advanced Options for more information.
    • Option to specify the percentage of heap space that is allocated to field data. By default, this setting is unbounded.

    For more information, see Configuring Advanced Options.

    ", + "refs": { + "AdvancedOptionsStatus$Options": "

    Specifies the status of advanced options for the specified Elasticsearch domain.

    ", + "CreateElasticsearchDomainRequest$AdvancedOptions": "

    Option to allow references to indices in an HTTP request body. Must be false when configuring access to individual sub-resources. By default, the value is true. See Configuration Advanced Options for more information.

    ", + "ElasticsearchDomainStatus$AdvancedOptions": "

    Specifies the status of the AdvancedOptions

    ", + "UpdateElasticsearchDomainConfigRequest$AdvancedOptions": "

    Modifies the advanced option to allow references to indices in an HTTP request body. Must be false when configuring access to individual sub-resources. By default, the value is true. See Configuration Advanced Options for more information.

    " + } + }, + "AdvancedOptionsStatus": { + "base": "

    Status of the advanced options for the specified Elasticsearch domain. Currently, the following advanced options are available:

    • Option to allow references to indices in an HTTP request body. Must be false when configuring access to individual sub-resources. By default, the value is true. See Configuration Advanced Options for more information.
    • Option to specify the percentage of heap space that is allocated to field data. By default, this setting is unbounded.

    For more information, see Configuring Advanced Options.

    ", + "refs": { + "ElasticsearchDomainConfig$AdvancedOptions": "

    Specifies the AdvancedOptions for the domain. See Configuring Advanced Options for more information.

    " + } + }, + "BaseException": { + "base": "

    An error occurred while processing the request.

    ", + "refs": { + } + }, + "Boolean": { + "base": null, + "refs": { + "EBSOptions$EBSEnabled": "

    Specifies whether EBS-based storage is enabled.

    ", + "ElasticsearchClusterConfig$DedicatedMasterEnabled": "

    A boolean value to indicate whether a dedicated master node is enabled. See About Dedicated Master Nodes for more information.

    ", + "ElasticsearchClusterConfig$ZoneAwarenessEnabled": "

    A boolean value to indicate whether zone awareness is enabled. See About Zone Awareness for more information.

    ", + "ElasticsearchDomainStatus$Created": "

    The domain creation status. True if the creation of an Elasticsearch domain is complete. False if domain creation is still in progress.

    ", + "ElasticsearchDomainStatus$Deleted": "

    The domain deletion status. True if a delete request has been received for the domain but resource cleanup is still in progress. False if the domain has not been deleted. Once domain deletion is complete, the status of the domain is no longer returned.

    ", + "ElasticsearchDomainStatus$Processing": "

    The status of the Elasticsearch domain configuration. True if Amazon Elasticsearch Service is processing configuration changes. False if the configuration is active.

    ", + "OptionStatus$PendingDeletion": "

    Indicates whether the Elasticsearch domain is being deleted.

    " + } + }, + "CreateElasticsearchDomainRequest": { + "base": null, + "refs": { + } + }, + "CreateElasticsearchDomainResponse": { + "base": "

    The result of a CreateElasticsearchDomain operation. Contains the status of the newly created Elasticsearch domain.

    ", + "refs": { + } + }, + "DeleteElasticsearchDomainRequest": { + "base": "

    Container for the parameters to the DeleteElasticsearchDomain operation. Specifies the name of the Elasticsearch domain that you want to delete.

    ", + "refs": { + } + }, + "DeleteElasticsearchDomainResponse": { + "base": "

    The result of a DeleteElasticsearchDomain request. Contains the status of the pending deletion, or no status if the domain and all of its resources have been deleted.

    ", + "refs": { + } + }, + "DescribeElasticsearchDomainConfigRequest": { + "base": "

    Container for the parameters to the DescribeElasticsearchDomainConfig operation. Specifies the domain name for which you want configuration information.

    ", + "refs": { + } + }, + "DescribeElasticsearchDomainConfigResponse": { + "base": "

    The result of a DescribeElasticsearchDomainConfig request. Contains the configuration information of the requested domain.

    ", + "refs": { + } + }, + "DescribeElasticsearchDomainRequest": { + "base": "

    Container for the parameters to the DescribeElasticsearchDomain operation.

    ", + "refs": { + } + }, + "DescribeElasticsearchDomainResponse": { + "base": "

    The result of a DescribeElasticsearchDomain request. Contains the status of the domain specified in the request.

    ", + "refs": { + } + }, + "DescribeElasticsearchDomainsRequest": { + "base": "

    Container for the parameters to the DescribeElasticsearchDomains operation. By default, the API returns the status of all Elasticsearch domains.

    ", + "refs": { + } + }, + "DescribeElasticsearchDomainsResponse": { + "base": "

    The result of a DescribeElasticsearchDomains request. Contains the status of the specified domains or all domains owned by the account.

    ", + "refs": { + } + }, + "DisabledOperationException": { + "base": "

    An error occured because the client wanted to access a not supported operation. Gives http status code of 409.

    ", + "refs": { + } + }, + "DomainId": { + "base": "

    Unique identifier for an Elasticsearch domain.

    ", + "refs": { + "ElasticsearchDomainStatus$DomainId": "

    The unique identifier for the specified Elasticsearch domain.

    " + } + }, + "DomainInfo": { + "base": null, + "refs": { + "DomainInfoList$member": null + } + }, + "DomainInfoList": { + "base": "

    Contains the list of Elasticsearch domain information.

    ", + "refs": { + "ListDomainNamesResponse$DomainNames": "

    List of Elasticsearch domain names.

    " + } + }, + "DomainName": { + "base": "

    The name of an Elasticsearch domain. Domain names are unique across the domains owned by an account within an AWS region. Domain names start with a letter or number and can contain the following characters: a-z (lowercase), 0-9, and - (hyphen).

    ", + "refs": { + "CreateElasticsearchDomainRequest$DomainName": "

    The name of the Elasticsearch domain that you are creating. Domain names are unique across the domains owned by an account within an AWS region. Domain names must start with a letter or number and can contain the following characters: a-z (lowercase), 0-9, and - (hyphen).

    ", + "DeleteElasticsearchDomainRequest$DomainName": "

    The name of the Elasticsearch domain that you want to permanently delete.

    ", + "DescribeElasticsearchDomainConfigRequest$DomainName": "

    The Elasticsearch domain that you want to get information about.

    ", + "DescribeElasticsearchDomainRequest$DomainName": "

    The name of the Elasticsearch domain for which you want information.

    ", + "DomainInfo$DomainName": "

    Specifies the DomainName.

    ", + "DomainNameList$member": null, + "ElasticsearchDomainStatus$DomainName": "

    The name of an Elasticsearch domain. Domain names are unique across the domains owned by an account within an AWS region. Domain names start with a letter or number and can contain the following characters: a-z (lowercase), 0-9, and - (hyphen).

    ", + "UpdateElasticsearchDomainConfigRequest$DomainName": "

    The name of the Elasticsearch domain that you are updating.

    " + } + }, + "DomainNameList": { + "base": "

    A list of Elasticsearch domain names.

    ", + "refs": { + "DescribeElasticsearchDomainsRequest$DomainNames": "

    The Elasticsearch domains for which you want information.

    " + } + }, + "EBSOptions": { + "base": "

    Options to enable, disable, and specify the properties of EBS storage volumes. For more information, see Configuring EBS-based Storage.

    ", + "refs": { + "CreateElasticsearchDomainRequest$EBSOptions": "

    Options to enable, disable and specify the type and size of EBS storage volumes.

    ", + "EBSOptionsStatus$Options": "

    Specifies the EBS options for the specified Elasticsearch domain.

    ", + "ElasticsearchDomainStatus$EBSOptions": "

    The EBSOptions for the specified domain. See Configuring EBS-based Storage for more information.

    ", + "UpdateElasticsearchDomainConfigRequest$EBSOptions": "

    Specify the type and size of the EBS volume that you want to use.

    " + } + }, + "EBSOptionsStatus": { + "base": "

    Status of the EBS options for the specified Elasticsearch domain.

    ", + "refs": { + "ElasticsearchDomainConfig$EBSOptions": "

    Specifies the EBSOptions for the Elasticsearch domain.

    " + } + }, + "ESPartitionInstanceType": { + "base": null, + "refs": { + "ElasticsearchClusterConfig$InstanceType": "

    The instance type for an Elasticsearch cluster.

    ", + "ElasticsearchClusterConfig$DedicatedMasterType": "

    The instance type for a dedicated master node.

    " + } + }, + "ElasticsearchClusterConfig": { + "base": "

    Specifies the configuration for the domain cluster, such as the type and number of instances.

    ", + "refs": { + "CreateElasticsearchDomainRequest$ElasticsearchClusterConfig": "

    Configuration options for an Elasticsearch domain. Specifies the instance type and number of instances in the domain cluster.

    ", + "ElasticsearchClusterConfigStatus$Options": "

    Specifies the cluster configuration for the specified Elasticsearch domain.

    ", + "ElasticsearchDomainStatus$ElasticsearchClusterConfig": "

    The type and number of instances in the domain cluster.

    ", + "UpdateElasticsearchDomainConfigRequest$ElasticsearchClusterConfig": "

    The type and number of instances to instantiate for the domain cluster.

    " + } + }, + "ElasticsearchClusterConfigStatus": { + "base": "

    Specifies the configuration status for the specified Elasticsearch domain.

    ", + "refs": { + "ElasticsearchDomainConfig$ElasticsearchClusterConfig": "

    Specifies the ElasticsearchClusterConfig for the Elasticsearch domain.

    " + } + }, + "ElasticsearchDomainConfig": { + "base": "

    The configuration of an Elasticsearch domain.

    ", + "refs": { + "DescribeElasticsearchDomainConfigResponse$DomainConfig": "

    The configuration information of the domain requested in the DescribeElasticsearchDomainConfig request.

    ", + "UpdateElasticsearchDomainConfigResponse$DomainConfig": "

    The status of the updated Elasticsearch domain.

    " + } + }, + "ElasticsearchDomainStatus": { + "base": "

    The current status of an Elasticsearch domain.

    ", + "refs": { + "CreateElasticsearchDomainResponse$DomainStatus": "

    The status of the newly created Elasticsearch domain.

    ", + "DeleteElasticsearchDomainResponse$DomainStatus": "

    The status of the Elasticsearch domain being deleted.

    ", + "DescribeElasticsearchDomainResponse$DomainStatus": "

    The current status of the Elasticsearch domain.

    ", + "ElasticsearchDomainStatusList$member": null + } + }, + "ElasticsearchDomainStatusList": { + "base": "

    A list that contains the status of each requested Elasticsearch domain.

    ", + "refs": { + "DescribeElasticsearchDomainsResponse$DomainStatusList": "

    The status of the domains requested in the DescribeElasticsearchDomains request.

    " + } + }, + "ErrorMessage": { + "base": null, + "refs": { + "BaseException$message": "

    A description of the error.

    " + } + }, + "IntegerClass": { + "base": null, + "refs": { + "EBSOptions$VolumeSize": "

    Integer to specify the size of an EBS volume.

    ", + "EBSOptions$Iops": "

    Specifies the IOPD for a Provisioned IOPS EBS volume (SSD).

    ", + "ElasticsearchClusterConfig$InstanceCount": "

    The number of instances in the specified domain cluster.

    ", + "ElasticsearchClusterConfig$DedicatedMasterCount": "

    Total number of dedicated master nodes, active and on standby, for the cluster.

    ", + "SnapshotOptions$AutomatedSnapshotStartHour": "

    Specifies the time, in UTC format, when the service takes a daily automated snapshot of the specified Elasticsearch domain. Default value is 0 hours.

    " + } + }, + "InternalException": { + "base": "

    The request processing has failed because of an unknown error, exception or failure (the failure is internal to the service) . Gives http status code of 500.

    ", + "refs": { + } + }, + "InvalidTypeException": { + "base": "

    An exception for trying to create or access sub-resource that is either invalid or not supported. Gives http status code of 409.

    ", + "refs": { + } + }, + "LimitExceededException": { + "base": "

    An exception for trying to create more than allowed resources or sub-resources. Gives http status code of 409.

    ", + "refs": { + } + }, + "ListDomainNamesResponse": { + "base": "

    The result of a ListDomainNames operation. Contains the names of all Elasticsearch domains owned by this account.

    ", + "refs": { + } + }, + "ListTagsRequest": { + "base": "

    Container for the parameters to the ListTags operation. Specify the ARN for the Elasticsearch domain to which the tags are attached that you want to view are attached.

    ", + "refs": { + } + }, + "ListTagsResponse": { + "base": "

    The result of a ListTags operation. Contains tags for all requested Elasticsearch domains.

    ", + "refs": { + } + }, + "OptionState": { + "base": "

    The state of a requested change. One of the following:

    • Processing: The request change is still in-process.
    • Active: The request change is processed and deployed to the Elasticsearch domain.
    ", + "refs": { + "OptionStatus$State": "

    Provides the OptionState for the Elasticsearch domain.

    " + } + }, + "OptionStatus": { + "base": "

    Provides the current status of the entity.

    ", + "refs": { + "AccessPoliciesStatus$Status": "

    The status of the access policy for the Elasticsearch domain. See OptionStatus for the status information that's included.

    ", + "AdvancedOptionsStatus$Status": "

    Specifies the status of OptionStatus for advanced options for the specified Elasticsearch domain.

    ", + "EBSOptionsStatus$Status": "

    Specifies the status of the EBS options for the specified Elasticsearch domain.

    ", + "ElasticsearchClusterConfigStatus$Status": "

    Specifies the status of the configuration for the specified Elasticsearch domain.

    ", + "SnapshotOptionsStatus$Status": "

    Specifies the status of a daily automated snapshot.

    " + } + }, + "PolicyDocument": { + "base": "

    Access policy rules for an Elasticsearch domain service endpoints. For more information, see Configuring Access Policies in the Amazon Elasticsearch Service Developer Guide. The maximum size of a policy document is 100 KB.

    ", + "refs": { + "AccessPoliciesStatus$Options": "

    The access policy configured for the Elasticsearch domain. Access policies may be resource-based, IP-based, or IAM-based. See Configuring Access Policiesfor more information.

    ", + "CreateElasticsearchDomainRequest$AccessPolicies": "

    IAM access policy as a JSON-formatted string.

    ", + "ElasticsearchDomainStatus$AccessPolicies": "

    IAM access policy as a JSON-formatted string.

    ", + "UpdateElasticsearchDomainConfigRequest$AccessPolicies": "

    IAM access policy as a JSON-formatted string.

    " + } + }, + "RemoveTagsRequest": { + "base": "

    Container for the parameters to the RemoveTags operation. Specify the ARN for the Elasticsearch domain from which you want to remove the specified TagKey.

    ", + "refs": { + } + }, + "ResourceAlreadyExistsException": { + "base": "

    An exception for creating a resource that already exists. Gives http status code of 400.

    ", + "refs": { + } + }, + "ResourceNotFoundException": { + "base": "

    An exception for accessing or deleting a resource that does not exist. Gives http status code of 400.

    ", + "refs": { + } + }, + "ServiceUrl": { + "base": "

    The endpoint to which service requests are submitted. For example, search-imdb-movies-oopcnjfn6ugofer3zx5iadxxca.eu-west-1.es.amazonaws.com or doc-imdb-movies-oopcnjfn6ugofer3zx5iadxxca.eu-west-1.es.amazonaws.com.

    ", + "refs": { + "ElasticsearchDomainStatus$Endpoint": "

    The Elasticsearch domain endpoint that you use to submit index and search requests.

    " + } + }, + "SnapshotOptions": { + "base": "

    Specifies the time, in UTC format, when the service takes a daily automated snapshot of the specified Elasticsearch domain. Default value is 0 hours.

    ", + "refs": { + "CreateElasticsearchDomainRequest$SnapshotOptions": "

    Option to set time, in UTC format, of the daily automated snapshot. Default value is 0 hours.

    ", + "ElasticsearchDomainStatus$SnapshotOptions": "

    Specifies the status of the SnapshotOptions

    ", + "SnapshotOptionsStatus$Options": "

    Specifies the daily snapshot options specified for the Elasticsearch domain.

    ", + "UpdateElasticsearchDomainConfigRequest$SnapshotOptions": "

    Option to set the time, in UTC format, for the daily automated snapshot. Default value is 0 hours.

    " + } + }, + "SnapshotOptionsStatus": { + "base": "

    Status of a daily automated snapshot.

    ", + "refs": { + "ElasticsearchDomainConfig$SnapshotOptions": "

    Specifies the SnapshotOptions for the Elasticsearch domain.

    " + } + }, + "String": { + "base": null, + "refs": { + "AdvancedOptions$key": null, + "AdvancedOptions$value": null, + "StringList$member": null + } + }, + "StringList": { + "base": null, + "refs": { + "RemoveTagsRequest$TagKeys": "

    Specifies the TagKey list which you want to remove from the Elasticsearch domain.

    " + } + }, + "Tag": { + "base": "

    Specifies a key value pair for a resource tag.

    ", + "refs": { + "TagList$member": null + } + }, + "TagKey": { + "base": "

    A string of length from 1 to 128 characters that specifies the key for a Tag. Tag keys must be unique for the Elasticsearch domain to which they are attached.

    ", + "refs": { + "Tag$Key": "

    Specifies the TagKey, the name of the tag. Tag keys must be unique for the Elasticsearch domain to which they are attached.

    " + } + }, + "TagList": { + "base": "

    A list of Tag

    ", + "refs": { + "AddTagsRequest$TagList": "

    List of Tag that need to be added for the Elasticsearch domain.

    ", + "ListTagsResponse$TagList": "

    List of Tag for the requested Elasticsearch domain.

    " + } + }, + "TagValue": { + "base": "

    A string of length from 0 to 256 characters that specifies the value for a Tag. Tag values can be null and do not have to be unique in a tag set.

    ", + "refs": { + "Tag$Value": "

    Specifies the TagValue, the value assigned to the corresponding tag key. Tag values can be null and do not have to be unique in a tag set. For example, you can have a key value pair in a tag set of project : Trinity and cost-center : Trinity

    " + } + }, + "UIntValue": { + "base": null, + "refs": { + "OptionStatus$UpdateVersion": "

    Specifies the latest version for the entity.

    " + } + }, + "UpdateElasticsearchDomainConfigRequest": { + "base": "

    Container for the parameters to the UpdateElasticsearchDomain operation. Specifies the type and number of instances in the domain cluster.

    ", + "refs": { + } + }, + "UpdateElasticsearchDomainConfigResponse": { + "base": "

    The result of an UpdateElasticsearchDomain request. Contains the status of the Elasticsearch domain being updated.

    ", + "refs": { + } + }, + "UpdateTimestamp": { + "base": null, + "refs": { + "OptionStatus$CreationDate": "

    Timestamp which tells the creation date for the entity.

    ", + "OptionStatus$UpdateDate": "

    Timestamp which tells the last updated time for the entity.

    " + } + }, + "ValidationException": { + "base": "

    An exception for missing / invalid input fields. Gives http status code of 400.

    ", + "refs": { + } + }, + "VolumeType": { + "base": "

    The type of EBS volume, standard, gp2, or io1. See Configuring EBS-based Storagefor more information.

    ", + "refs": { + "EBSOptions$VolumeType": "

    Specifies the volume type for EBS-based storage.

    " + } + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/events/2014-02-03/api-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/events/2014-02-03/api-2.json new file mode 100644 index 000000000..c9980d9f5 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/events/2014-02-03/api-2.json @@ -0,0 +1,643 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2014-02-03", + "endpointPrefix":"events", + "jsonVersion":"1.1", + "serviceFullName":"Amazon CloudWatch Events", + "signatureVersion":"v4", + "targetPrefix":"AWSEvents", + "protocol":"json" + }, + "operations":{ + "DeleteRule":{ + "name":"DeleteRule", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteRuleRequest"}, + "errors":[ + { + "shape":"ConcurrentModificationException", + "exception":true + }, + { + "shape":"InternalException", + "exception":true, + "fault":true + } + ] + }, + "DescribeRule":{ + "name":"DescribeRule", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeRuleRequest"}, + "output":{"shape":"DescribeRuleResponse"}, + "errors":[ + { + "shape":"ResourceNotFoundException", + "exception":true + }, + { + "shape":"InternalException", + "exception":true, + "fault":true + } + ] + }, + "DisableRule":{ + "name":"DisableRule", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DisableRuleRequest"}, + "errors":[ + { + "shape":"ResourceNotFoundException", + "exception":true + }, + { + "shape":"ConcurrentModificationException", + "exception":true + }, + { + "shape":"InternalException", + "exception":true, + "fault":true + } + ] + }, + "EnableRule":{ + "name":"EnableRule", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"EnableRuleRequest"}, + "errors":[ + { + "shape":"ResourceNotFoundException", + "exception":true + }, + { + "shape":"ConcurrentModificationException", + "exception":true + }, + { + "shape":"InternalException", + "exception":true, + "fault":true + } + ] + }, + "ListRuleNamesByTarget":{ + "name":"ListRuleNamesByTarget", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListRuleNamesByTargetRequest"}, + "output":{"shape":"ListRuleNamesByTargetResponse"}, + "errors":[ + { + "shape":"InternalException", + "exception":true, + "fault":true + } + ] + }, + "ListRules":{ + "name":"ListRules", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListRulesRequest"}, + "output":{"shape":"ListRulesResponse"}, + "errors":[ + { + "shape":"InternalException", + "exception":true, + "fault":true + } + ] + }, + "ListTargetsByRule":{ + "name":"ListTargetsByRule", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListTargetsByRuleRequest"}, + "output":{"shape":"ListTargetsByRuleResponse"}, + "errors":[ + { + "shape":"ResourceNotFoundException", + "exception":true + }, + { + "shape":"InternalException", + "exception":true, + "fault":true + } + ] + }, + "PutEvents":{ + "name":"PutEvents", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PutEventsRequest"}, + "output":{"shape":"PutEventsResponse"}, + "errors":[ + { + "shape":"InternalException", + "exception":true, + "fault":true + } + ] + }, + "PutRule":{ + "name":"PutRule", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PutRuleRequest"}, + "output":{"shape":"PutRuleResponse"}, + "errors":[ + { + "shape":"InvalidEventPatternException", + "exception":true + }, + { + "shape":"LimitExceededException", + "exception":true + }, + { + "shape":"ConcurrentModificationException", + "exception":true + }, + { + "shape":"InternalException", + "exception":true, + "fault":true + } + ] + }, + "PutTargets":{ + "name":"PutTargets", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PutTargetsRequest"}, + "output":{"shape":"PutTargetsResponse"}, + "errors":[ + { + "shape":"ResourceNotFoundException", + "exception":true + }, + { + "shape":"ConcurrentModificationException", + "exception":true + }, + { + "shape":"LimitExceededException", + "exception":true + }, + { + "shape":"InternalException", + "exception":true, + "fault":true + } + ] + }, + "RemoveTargets":{ + "name":"RemoveTargets", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RemoveTargetsRequest"}, + "output":{"shape":"RemoveTargetsResponse"}, + "errors":[ + { + "shape":"ResourceNotFoundException", + "exception":true + }, + { + "shape":"ConcurrentModificationException", + "exception":true + }, + { + "shape":"InternalException", + "exception":true, + "fault":true + } + ] + }, + "TestEventPattern":{ + "name":"TestEventPattern", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"TestEventPatternRequest"}, + "output":{"shape":"TestEventPatternResponse"}, + "errors":[ + { + "shape":"InvalidEventPatternException", + "exception":true + }, + { + "shape":"InternalException", + "exception":true, + "fault":true + } + ] + } + }, + "shapes":{ + "Boolean":{"type":"boolean"}, + "ConcurrentModificationException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "DeleteRuleRequest":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{"shape":"RuleName"} + } + }, + "DescribeRuleRequest":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{"shape":"RuleName"} + } + }, + "DescribeRuleResponse":{ + "type":"structure", + "members":{ + "Name":{"shape":"RuleName"}, + "Arn":{"shape":"RuleArn"}, + "EventPattern":{"shape":"EventPattern"}, + "ScheduleExpression":{"shape":"ScheduleExpression"}, + "State":{"shape":"RuleState"}, + "Description":{"shape":"RuleDescription"}, + "RoleArn":{"shape":"RoleArn"} + } + }, + "DisableRuleRequest":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{"shape":"RuleName"} + } + }, + "EnableRuleRequest":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{"shape":"RuleName"} + } + }, + "ErrorCode":{"type":"string"}, + "ErrorMessage":{"type":"string"}, + "EventId":{"type":"string"}, + "EventPattern":{ + "type":"string", + "max":2048 + }, + "EventResource":{"type":"string"}, + "EventResourceList":{ + "type":"list", + "member":{"shape":"EventResource"} + }, + "EventTime":{"type":"timestamp"}, + "Integer":{"type":"integer"}, + "InternalException":{ + "type":"structure", + "members":{ + }, + "exception":true, + "fault":true + }, + "InvalidEventPatternException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "LimitExceededException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "LimitMax100":{ + "type":"integer", + "min":1, + "max":100 + }, + "ListRuleNamesByTargetRequest":{ + "type":"structure", + "required":["TargetArn"], + "members":{ + "TargetArn":{"shape":"TargetArn"}, + "NextToken":{"shape":"NextToken"}, + "Limit":{"shape":"LimitMax100"} + } + }, + "ListRuleNamesByTargetResponse":{ + "type":"structure", + "members":{ + "RuleNames":{"shape":"RuleNameList"}, + "NextToken":{"shape":"NextToken"} + } + }, + "ListRulesRequest":{ + "type":"structure", + "members":{ + "NamePrefix":{"shape":"RuleName"}, + "NextToken":{"shape":"NextToken"}, + "Limit":{"shape":"LimitMax100"} + } + }, + "ListRulesResponse":{ + "type":"structure", + "members":{ + "Rules":{"shape":"RuleResponseList"}, + "NextToken":{"shape":"NextToken"} + } + }, + "ListTargetsByRuleRequest":{ + "type":"structure", + "required":["Rule"], + "members":{ + "Rule":{"shape":"RuleName"}, + "NextToken":{"shape":"NextToken"}, + "Limit":{"shape":"LimitMax100"} + } + }, + "ListTargetsByRuleResponse":{ + "type":"structure", + "members":{ + "Targets":{"shape":"TargetList"}, + "NextToken":{"shape":"NextToken"} + } + }, + "NextToken":{ + "type":"string", + "min":1, + "max":2048 + }, + "PutEventsRequest":{ + "type":"structure", + "required":["Entries"], + "members":{ + "Entries":{"shape":"PutEventsRequestEntryList"} + } + }, + "PutEventsRequestEntry":{ + "type":"structure", + "members":{ + "Time":{"shape":"EventTime"}, + "Source":{"shape":"String"}, + "Resources":{"shape":"EventResourceList"}, + "DetailType":{"shape":"String"}, + "Detail":{"shape":"String"} + } + }, + "PutEventsRequestEntryList":{ + "type":"list", + "member":{"shape":"PutEventsRequestEntry"}, + "min":1, + "max":10 + }, + "PutEventsResponse":{ + "type":"structure", + "members":{ + "FailedEntryCount":{"shape":"Integer"}, + "Entries":{"shape":"PutEventsResultEntryList"} + } + }, + "PutEventsResultEntry":{ + "type":"structure", + "members":{ + "EventId":{"shape":"EventId"}, + "ErrorCode":{"shape":"ErrorCode"}, + "ErrorMessage":{"shape":"ErrorMessage"} + } + }, + "PutEventsResultEntryList":{ + "type":"list", + "member":{"shape":"PutEventsResultEntry"} + }, + "PutRuleRequest":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{"shape":"RuleName"}, + "ScheduleExpression":{"shape":"ScheduleExpression"}, + "EventPattern":{"shape":"EventPattern"}, + "State":{"shape":"RuleState"}, + "Description":{"shape":"RuleDescription"}, + "RoleArn":{"shape":"RoleArn"} + } + }, + "PutRuleResponse":{ + "type":"structure", + "members":{ + "RuleArn":{"shape":"RuleArn"} + } + }, + "PutTargetsRequest":{ + "type":"structure", + "required":[ + "Rule", + "Targets" + ], + "members":{ + "Rule":{"shape":"RuleName"}, + "Targets":{"shape":"TargetList"} + } + }, + "PutTargetsResponse":{ + "type":"structure", + "members":{ + "FailedEntryCount":{"shape":"Integer"}, + "FailedEntries":{"shape":"PutTargetsResultEntryList"} + } + }, + "PutTargetsResultEntry":{ + "type":"structure", + "members":{ + "TargetId":{"shape":"TargetId"}, + "ErrorCode":{"shape":"ErrorCode"}, + "ErrorMessage":{"shape":"ErrorMessage"} + } + }, + "PutTargetsResultEntryList":{ + "type":"list", + "member":{"shape":"PutTargetsResultEntry"} + }, + "RemoveTargetsRequest":{ + "type":"structure", + "required":[ + "Rule", + "Ids" + ], + "members":{ + "Rule":{"shape":"RuleName"}, + "Ids":{"shape":"TargetIdList"} + } + }, + "RemoveTargetsResponse":{ + "type":"structure", + "members":{ + "FailedEntryCount":{"shape":"Integer"}, + "FailedEntries":{"shape":"RemoveTargetsResultEntryList"} + } + }, + "RemoveTargetsResultEntry":{ + "type":"structure", + "members":{ + "TargetId":{"shape":"TargetId"}, + "ErrorCode":{"shape":"ErrorCode"}, + "ErrorMessage":{"shape":"ErrorMessage"} + } + }, + "RemoveTargetsResultEntryList":{ + "type":"list", + "member":{"shape":"RemoveTargetsResultEntry"} + }, + "ResourceNotFoundException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "RoleArn":{ + "type":"string", + "min":1, + "max":1600 + }, + "Rule":{ + "type":"structure", + "members":{ + "Name":{"shape":"RuleName"}, + "Arn":{"shape":"RuleArn"}, + "EventPattern":{"shape":"EventPattern"}, + "State":{"shape":"RuleState"}, + "Description":{"shape":"RuleDescription"}, + "ScheduleExpression":{"shape":"ScheduleExpression"}, + "RoleArn":{"shape":"RoleArn"} + } + }, + "RuleArn":{ + "type":"string", + "min":1, + "max":1600 + }, + "RuleDescription":{ + "type":"string", + "max":512 + }, + "RuleName":{ + "type":"string", + "min":1, + "max":64, + "pattern":"[\\.\\-_A-Za-z0-9]+" + }, + "RuleNameList":{ + "type":"list", + "member":{"shape":"RuleName"} + }, + "RuleResponseList":{ + "type":"list", + "member":{"shape":"Rule"} + }, + "RuleState":{ + "type":"string", + "enum":[ + "ENABLED", + "DISABLED" + ] + }, + "ScheduleExpression":{ + "type":"string", + "max":256 + }, + "String":{"type":"string"}, + "Target":{ + "type":"structure", + "required":[ + "Id", + "Arn" + ], + "members":{ + "Id":{"shape":"TargetId"}, + "Arn":{"shape":"TargetArn"}, + "Input":{"shape":"TargetInput"}, + "InputPath":{"shape":"TargetInputPath"} + } + }, + "TargetArn":{ + "type":"string", + "min":1, + "max":1600 + }, + "TargetId":{ + "type":"string", + "min":1, + "max":64, + "pattern":"[\\.\\-_A-Za-z0-9]+" + }, + "TargetIdList":{ + "type":"list", + "member":{"shape":"TargetId"}, + "min":1, + "max":100 + }, + "TargetInput":{ + "type":"string", + "max":8192 + }, + "TargetInputPath":{ + "type":"string", + "max":256 + }, + "TargetList":{ + "type":"list", + "member":{"shape":"Target"} + }, + "TestEventPatternRequest":{ + "type":"structure", + "required":[ + "EventPattern", + "Event" + ], + "members":{ + "EventPattern":{"shape":"EventPattern"}, + "Event":{"shape":"String"} + } + }, + "TestEventPatternResponse":{ + "type":"structure", + "members":{ + "Result":{"shape":"Boolean"} + } + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/events/2014-02-03/docs-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/events/2014-02-03/docs-2.json new file mode 100644 index 000000000..c6eaf8627 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/events/2014-02-03/docs-2.json @@ -0,0 +1,411 @@ +{ + "version": "2.0", + "operations": { + "DeleteRule": "

    Deletes a rule. You must remove all targets from a rule using RemoveTargets before you can delete the rule.

    Note: When you make a change with this action, incoming events might still continue to match to the deleted rule. Please allow a short period of time for changes to take effect.

    ", + "DescribeRule": "

    Describes the details of the specified rule.

    ", + "DisableRule": "

    Disables a rule. A disabled rule won't match any events, and won't self-trigger if it has a schedule expression.

    Note: When you make a change with this action, incoming events might still continue to match to the disabled rule. Please allow a short period of time for changes to take effect.

    ", + "EnableRule": "

    Enables a rule. If the rule does not exist, the operation fails.

    Note: When you make a change with this action, incoming events might not immediately start matching to a newly enabled rule. Please allow a short period of time for changes to take effect.

    ", + "ListRuleNamesByTarget": "

    Lists the names of the rules that the given target is put to. Using this action, you can find out which of the rules in Amazon CloudWatch Events can invoke a specific target in your account. If you have more rules in your account than the given limit, the results will be paginated. In that case, use the next token returned in the response and repeat the ListRulesByTarget action until the NextToken in the response is returned as null.

    ", + "ListRules": "

    Lists the Amazon CloudWatch Events rules in your account. You can either list all the rules or you can provide a prefix to match to the rule names. If you have more rules in your account than the given limit, the results will be paginated. In that case, use the next token returned in the response and repeat the ListRules action until the NextToken in the response is returned as null.

    ", + "ListTargetsByRule": "

    Lists of targets assigned to the rule.

    ", + "PutEvents": "

    Sends custom events to Amazon CloudWatch Events so that they can be matched to rules.

    ", + "PutRule": "

    Creates or updates a rule. Rules are enabled by default, or based on value of the State parameter. You can disable a rule using DisableRule.

    Note: When you make a change with this action, incoming events might not immediately start matching to new or updated rules. Please allow a short period of time for changes to take effect.

    A rule must contain at least an EventPattern or ScheduleExpression. Rules with EventPatterns are triggered when a matching event is observed. Rules with ScheduleExpressions self-trigger based on the given schedule. A rule can have both an EventPattern and a ScheduleExpression, in which case the rule will trigger on matching events as well as on a schedule.

    Note: Most services in AWS treat : or / as the same character in Amazon Resource Names (ARNs). However, CloudWatch Events uses an exact match in event patterns and rules. Be sure to use the correct ARN characters when creating event patterns so that they match the ARN syntax in the event you want to match.

    ", + "PutTargets": "

    Adds target(s) to a rule. Updates the target(s) if they are already associated with the role. In other words, if there is already a target with the given target ID, then the target associated with that ID is updated.

    Note: When you make a change with this action, when the associated rule triggers, new or updated targets might not be immediately invoked. Please allow a short period of time for changes to take effect.

    ", + "RemoveTargets": "

    Removes target(s) from a rule so that when the rule is triggered, those targets will no longer be invoked.

    Note: When you make a change with this action, when the associated rule triggers, removed targets might still continue to be invoked. Please allow a short period of time for changes to take effect.

    ", + "TestEventPattern": "

    Tests whether an event pattern matches the provided event.

    Note: Most services in AWS treat : or / as the same character in Amazon Resource Names (ARNs). However, CloudWatch Events uses an exact match in event patterns and rules. Be sure to use the correct ARN characters when creating event patterns so that they match the ARN syntax in the event you want to match.

    " + }, + "service": "

    Amazon CloudWatch Events helps you to respond to state changes in your AWS resources. When your resources change state they automatically send events into an event stream. You can create rules that match selected events in the stream and route them to targets to take action. You can also use rules to take action on a pre-determined schedule. For example, you can configure rules to:

    • Automatically invoke an AWS Lambda function to update DNS entries when an event notifies you that Amazon EC2 instance enters the running state.
    • Direct specific API records from CloudTrail to an Amazon Kinesis stream for detailed analysis of potential security or availability risks.
    • Periodically invoke a built-in target to create a snapshot of an Amazon EBS volume.

    For more information about Amazon CloudWatch Events features, see the Amazon CloudWatch Developer Guide.

    ", + "shapes": { + "Boolean": { + "base": null, + "refs": { + "TestEventPatternResponse$Result": "

    Indicates whether the event matches the event pattern.

    " + } + }, + "ConcurrentModificationException": { + "base": "

    This exception occurs if there is concurrent modification on rule or target.

    ", + "refs": { + } + }, + "DeleteRuleRequest": { + "base": "

    Container for the parameters to the DeleteRule operation.

    ", + "refs": { + } + }, + "DescribeRuleRequest": { + "base": "

    Container for the parameters to the DescribeRule operation.

    ", + "refs": { + } + }, + "DescribeRuleResponse": { + "base": "

    The result of the DescribeRule operation.

    ", + "refs": { + } + }, + "DisableRuleRequest": { + "base": "

    Container for the parameters to the DisableRule operation.

    ", + "refs": { + } + }, + "EnableRuleRequest": { + "base": "

    Container for the parameters to the EnableRule operation.

    ", + "refs": { + } + }, + "ErrorCode": { + "base": null, + "refs": { + "PutEventsResultEntry$ErrorCode": "

    The error code representing why the event submission failed on this entry.

    ", + "PutTargetsResultEntry$ErrorCode": "

    The error code representing why the target submission failed on this entry.

    ", + "RemoveTargetsResultEntry$ErrorCode": "

    The error code representing why the target removal failed on this entry.

    " + } + }, + "ErrorMessage": { + "base": null, + "refs": { + "PutEventsResultEntry$ErrorMessage": "

    The error message explaining why the event submission failed on this entry.

    ", + "PutTargetsResultEntry$ErrorMessage": "

    The error message explaining why the target submission failed on this entry.

    ", + "RemoveTargetsResultEntry$ErrorMessage": "

    The error message explaining why the target removal failed on this entry.

    " + } + }, + "EventId": { + "base": null, + "refs": { + "PutEventsResultEntry$EventId": "

    The ID of the event submitted to Amazon CloudWatch Events.

    " + } + }, + "EventPattern": { + "base": null, + "refs": { + "DescribeRuleResponse$EventPattern": "

    The event pattern.

    ", + "PutRuleRequest$EventPattern": "

    The event pattern.

    ", + "Rule$EventPattern": "

    The event pattern of the rule.

    ", + "TestEventPatternRequest$EventPattern": "

    The event pattern you want to test.

    " + } + }, + "EventResource": { + "base": null, + "refs": { + "EventResourceList$member": null + } + }, + "EventResourceList": { + "base": null, + "refs": { + "PutEventsRequestEntry$Resources": "

    AWS resources, identified by Amazon Resource Name (ARN), which the event primarily concerns. Any number, including zero, may be present.

    " + } + }, + "EventTime": { + "base": null, + "refs": { + "PutEventsRequestEntry$Time": "

    Timestamp of event, per RFC3339. If no timestamp is provided, the timestamp of the PutEvents call will be used.

    " + } + }, + "Integer": { + "base": null, + "refs": { + "PutEventsResponse$FailedEntryCount": "

    The number of failed entries.

    ", + "PutTargetsResponse$FailedEntryCount": "

    The number of failed entries.

    ", + "RemoveTargetsResponse$FailedEntryCount": "

    The number of failed entries.

    " + } + }, + "InternalException": { + "base": "

    This exception occurs due to unexpected causes.

    ", + "refs": { + } + }, + "InvalidEventPatternException": { + "base": "

    The event pattern is invalid.

    ", + "refs": { + } + }, + "LimitExceededException": { + "base": "

    This exception occurs if you try to create more rules or add more targets to a rule than allowed by default.

    ", + "refs": { + } + }, + "LimitMax100": { + "base": null, + "refs": { + "ListRuleNamesByTargetRequest$Limit": "

    The maximum number of results to return.

    ", + "ListRulesRequest$Limit": "

    The maximum number of results to return.

    ", + "ListTargetsByRuleRequest$Limit": "

    The maximum number of results to return.

    " + } + }, + "ListRuleNamesByTargetRequest": { + "base": "

    Container for the parameters to the ListRuleNamesByTarget operation.

    ", + "refs": { + } + }, + "ListRuleNamesByTargetResponse": { + "base": "

    The result of the ListRuleNamesByTarget operation.

    ", + "refs": { + } + }, + "ListRulesRequest": { + "base": "

    Container for the parameters to the ListRules operation.

    ", + "refs": { + } + }, + "ListRulesResponse": { + "base": "

    The result of the ListRules operation.

    ", + "refs": { + } + }, + "ListTargetsByRuleRequest": { + "base": "

    Container for the parameters to the ListTargetsByRule operation.

    ", + "refs": { + } + }, + "ListTargetsByRuleResponse": { + "base": "

    The result of the ListTargetsByRule operation.

    ", + "refs": { + } + }, + "NextToken": { + "base": null, + "refs": { + "ListRuleNamesByTargetRequest$NextToken": "

    The token returned by a previous call to indicate that there is more data available.

    ", + "ListRuleNamesByTargetResponse$NextToken": "

    Indicates that there are additional results to retrieve.

    ", + "ListRulesRequest$NextToken": "

    The token returned by a previous call to indicate that there is more data available.

    ", + "ListRulesResponse$NextToken": "

    Indicates that there are additional results to retrieve.

    ", + "ListTargetsByRuleRequest$NextToken": "

    The token returned by a previous call to indicate that there is more data available.

    ", + "ListTargetsByRuleResponse$NextToken": "

    Indicates that there are additional results to retrieve.

    " + } + }, + "PutEventsRequest": { + "base": "

    Container for the parameters to the PutEvents operation.

    ", + "refs": { + } + }, + "PutEventsRequestEntry": { + "base": "

    Contains information about the event to be used in the PutEvents action.

    ", + "refs": { + "PutEventsRequestEntryList$member": null + } + }, + "PutEventsRequestEntryList": { + "base": null, + "refs": { + "PutEventsRequest$Entries": "

    The entry that defines an event in your system. You can specify several parameters for the entry such as the source and type of the event, resources associated with the event, and so on.

    " + } + }, + "PutEventsResponse": { + "base": "

    The result of the PutEvents operation.

    ", + "refs": { + } + }, + "PutEventsResultEntry": { + "base": "

    A PutEventsResult contains a list of PutEventsResultEntry.

    ", + "refs": { + "PutEventsResultEntryList$member": null + } + }, + "PutEventsResultEntryList": { + "base": null, + "refs": { + "PutEventsResponse$Entries": "

    A list of successfully and unsuccessfully ingested events results. If the ingestion was successful, the entry will have the event ID in it. If not, then the ErrorCode and ErrorMessage can be used to identify the problem with the entry.

    " + } + }, + "PutRuleRequest": { + "base": "

    Container for the parameters to the PutRule operation.

    ", + "refs": { + } + }, + "PutRuleResponse": { + "base": "

    The result of the PutRule operation.

    ", + "refs": { + } + }, + "PutTargetsRequest": { + "base": "

    Container for the parameters to the PutTargets operation.

    ", + "refs": { + } + }, + "PutTargetsResponse": { + "base": "

    The result of the PutTargets operation.

    ", + "refs": { + } + }, + "PutTargetsResultEntry": { + "base": "

    A PutTargetsResult contains a list of PutTargetsResultEntry.

    ", + "refs": { + "PutTargetsResultEntryList$member": null + } + }, + "PutTargetsResultEntryList": { + "base": null, + "refs": { + "PutTargetsResponse$FailedEntries": "

    An array of failed target entries.

    " + } + }, + "RemoveTargetsRequest": { + "base": "

    Container for the parameters to the RemoveTargets operation.

    ", + "refs": { + } + }, + "RemoveTargetsResponse": { + "base": "

    The result of the RemoveTargets operation.

    ", + "refs": { + } + }, + "RemoveTargetsResultEntry": { + "base": "

    The ID of the target requested to be removed from the rule by Amazon CloudWatch Events.

    ", + "refs": { + "RemoveTargetsResultEntryList$member": null + } + }, + "RemoveTargetsResultEntryList": { + "base": null, + "refs": { + "RemoveTargetsResponse$FailedEntries": "

    An array of failed target entries.

    " + } + }, + "ResourceNotFoundException": { + "base": "

    The rule does not exist.

    ", + "refs": { + } + }, + "RoleArn": { + "base": null, + "refs": { + "DescribeRuleResponse$RoleArn": "

    The Amazon Resource Name (ARN) of the IAM role associated with the rule.

    ", + "PutRuleRequest$RoleArn": "

    The Amazon Resource Name (ARN) of the IAM role associated with the rule.

    ", + "Rule$RoleArn": "

    The Amazon Resource Name (ARN) associated with the role that is used for target invocation.

    " + } + }, + "Rule": { + "base": "

    Contains information about a rule in Amazon CloudWatch Events. A ListRulesResult contains a list of Rules.

    ", + "refs": { + "RuleResponseList$member": null + } + }, + "RuleArn": { + "base": null, + "refs": { + "DescribeRuleResponse$Arn": "

    The Amazon Resource Name (ARN) associated with the rule.

    ", + "PutRuleResponse$RuleArn": "

    The Amazon Resource Name (ARN) that identifies the rule.

    ", + "Rule$Arn": "

    The Amazon Resource Name (ARN) of the rule.

    " + } + }, + "RuleDescription": { + "base": null, + "refs": { + "DescribeRuleResponse$Description": "

    The rule's description.

    ", + "PutRuleRequest$Description": "

    A description of the rule.

    ", + "Rule$Description": "

    The description of the rule.

    " + } + }, + "RuleName": { + "base": null, + "refs": { + "DeleteRuleRequest$Name": "

    The name of the rule to be deleted.

    ", + "DescribeRuleRequest$Name": "

    The name of the rule you want to describe details for.

    ", + "DescribeRuleResponse$Name": "

    The rule's name.

    ", + "DisableRuleRequest$Name": "

    The name of the rule you want to disable.

    ", + "EnableRuleRequest$Name": "

    The name of the rule that you want to enable.

    ", + "ListRulesRequest$NamePrefix": "

    The prefix matching the rule name.

    ", + "ListTargetsByRuleRequest$Rule": "

    The name of the rule whose targets you want to list.

    ", + "PutRuleRequest$Name": "

    The name of the rule that you are creating or updating.

    ", + "PutTargetsRequest$Rule": "

    The name of the rule you want to add targets to.

    ", + "RemoveTargetsRequest$Rule": "

    The name of the rule you want to remove targets from.

    ", + "Rule$Name": "

    The rule's name.

    ", + "RuleNameList$member": null + } + }, + "RuleNameList": { + "base": null, + "refs": { + "ListRuleNamesByTargetResponse$RuleNames": "

    List of rules names that can invoke the given target.

    " + } + }, + "RuleResponseList": { + "base": null, + "refs": { + "ListRulesResponse$Rules": "

    List of rules matching the specified criteria.

    " + } + }, + "RuleState": { + "base": null, + "refs": { + "DescribeRuleResponse$State": "

    Specifies whether the rule is enabled or disabled.

    ", + "PutRuleRequest$State": "

    Indicates whether the rule is enabled or disabled.

    ", + "Rule$State": "

    The rule's state.

    " + } + }, + "ScheduleExpression": { + "base": null, + "refs": { + "DescribeRuleResponse$ScheduleExpression": "

    The scheduling expression. For example, \"cron(0 20 * * ? *)\", \"rate(5 minutes)\".

    ", + "PutRuleRequest$ScheduleExpression": "

    The scheduling expression. For example, \"cron(0 20 * * ? *)\", \"rate(5 minutes)\".

    ", + "Rule$ScheduleExpression": "

    The scheduling expression. For example, \"cron(0 20 * * ? *)\", \"rate(5 minutes)\".

    " + } + }, + "String": { + "base": null, + "refs": { + "PutEventsRequestEntry$Source": "

    The source of the event.

    ", + "PutEventsRequestEntry$DetailType": "

    Free-form string used to decide what fields to expect in the event detail.

    ", + "PutEventsRequestEntry$Detail": "

    In the JSON sense, an object containing fields, which may also contain nested sub-objects. No constraints are imposed on its contents.

    ", + "TestEventPatternRequest$Event": "

    The event in the JSON format to test against the event pattern.

    " + } + }, + "Target": { + "base": "

    Targets are the resources that can be invoked when a rule is triggered. For example, AWS Lambda functions, Amazon Kinesis streams, and built-in targets.

    Input and InputPath are mutually-exclusive and optional parameters of a target. When a rule is triggered due to a matched event, if for a target:

    • Neither Input nor InputPath is specified, then the entire event is passed to the target in JSON form.
    • InputPath is specified in the form of JSONPath (e.g. $.detail), then only the part of the event specified in the path is passed to the target (e.g. only the detail part of the event is passed).
    • Input is specified in the form of a valid JSON, then the matched event is overridden with this constant.
    ", + "refs": { + "TargetList$member": null + } + }, + "TargetArn": { + "base": null, + "refs": { + "ListRuleNamesByTargetRequest$TargetArn": "

    The Amazon Resource Name (ARN) of the target resource that you want to list the rules for.

    ", + "Target$Arn": "

    The Amazon Resource Name (ARN) associated of the target.

    " + } + }, + "TargetId": { + "base": null, + "refs": { + "PutTargetsResultEntry$TargetId": "

    The ID of the target submitted to Amazon CloudWatch Events.

    ", + "RemoveTargetsResultEntry$TargetId": "

    The ID of the target requested to be removed by Amazon CloudWatch Events.

    ", + "Target$Id": "

    The unique target assignment ID.

    ", + "TargetIdList$member": null + } + }, + "TargetIdList": { + "base": null, + "refs": { + "RemoveTargetsRequest$Ids": "

    The list of target IDs to remove from the rule.

    " + } + }, + "TargetInput": { + "base": null, + "refs": { + "Target$Input": "

    Valid JSON text passed to the target. For more information about JSON text, see The JavaScript Object Notation (JSON) Data Interchange Format.

    " + } + }, + "TargetInputPath": { + "base": null, + "refs": { + "Target$InputPath": "

    The value of the JSONPath that is used for extracting part of the matched event when passing it to the target. For more information about JSON paths, see JSONPath.

    " + } + }, + "TargetList": { + "base": null, + "refs": { + "ListTargetsByRuleResponse$Targets": "

    Lists the targets assigned to the rule.

    ", + "PutTargetsRequest$Targets": "

    List of targets you want to update or add to the rule.

    " + } + }, + "TestEventPatternRequest": { + "base": "

    Container for the parameters to the TestEventPattern operation.

    ", + "refs": { + } + }, + "TestEventPatternResponse": { + "base": "

    The result of the TestEventPattern operation.

    ", + "refs": { + } + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/events/2014-02-03/examples-1.json b/vendor/github.com/aws/aws-sdk-go/models/apis/events/2014-02-03/examples-1.json new file mode 100644 index 000000000..faff76894 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/events/2014-02-03/examples-1.json @@ -0,0 +1,5 @@ +{ + "version":"1.0", + "examples":{ + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/events/2015-10-07/api-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/events/2015-10-07/api-2.json new file mode 100644 index 000000000..85d852c9b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/events/2015-10-07/api-2.json @@ -0,0 +1,643 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2015-10-07", + "endpointPrefix":"events", + "jsonVersion":"1.1", + "serviceFullName":"Amazon CloudWatch Events", + "signatureVersion":"v4", + "targetPrefix":"AWSEvents", + "protocol":"json" + }, + "operations":{ + "DeleteRule":{ + "name":"DeleteRule", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteRuleRequest"}, + "errors":[ + { + "shape":"ConcurrentModificationException", + "exception":true + }, + { + "shape":"InternalException", + "exception":true, + "fault":true + } + ] + }, + "DescribeRule":{ + "name":"DescribeRule", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeRuleRequest"}, + "output":{"shape":"DescribeRuleResponse"}, + "errors":[ + { + "shape":"ResourceNotFoundException", + "exception":true + }, + { + "shape":"InternalException", + "exception":true, + "fault":true + } + ] + }, + "DisableRule":{ + "name":"DisableRule", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DisableRuleRequest"}, + "errors":[ + { + "shape":"ResourceNotFoundException", + "exception":true + }, + { + "shape":"ConcurrentModificationException", + "exception":true + }, + { + "shape":"InternalException", + "exception":true, + "fault":true + } + ] + }, + "EnableRule":{ + "name":"EnableRule", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"EnableRuleRequest"}, + "errors":[ + { + "shape":"ResourceNotFoundException", + "exception":true + }, + { + "shape":"ConcurrentModificationException", + "exception":true + }, + { + "shape":"InternalException", + "exception":true, + "fault":true + } + ] + }, + "ListRuleNamesByTarget":{ + "name":"ListRuleNamesByTarget", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListRuleNamesByTargetRequest"}, + "output":{"shape":"ListRuleNamesByTargetResponse"}, + "errors":[ + { + "shape":"InternalException", + "exception":true, + "fault":true + } + ] + }, + "ListRules":{ + "name":"ListRules", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListRulesRequest"}, + "output":{"shape":"ListRulesResponse"}, + "errors":[ + { + "shape":"InternalException", + "exception":true, + "fault":true + } + ] + }, + "ListTargetsByRule":{ + "name":"ListTargetsByRule", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListTargetsByRuleRequest"}, + "output":{"shape":"ListTargetsByRuleResponse"}, + "errors":[ + { + "shape":"ResourceNotFoundException", + "exception":true + }, + { + "shape":"InternalException", + "exception":true, + "fault":true + } + ] + }, + "PutEvents":{ + "name":"PutEvents", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PutEventsRequest"}, + "output":{"shape":"PutEventsResponse"}, + "errors":[ + { + "shape":"InternalException", + "exception":true, + "fault":true + } + ] + }, + "PutRule":{ + "name":"PutRule", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PutRuleRequest"}, + "output":{"shape":"PutRuleResponse"}, + "errors":[ + { + "shape":"InvalidEventPatternException", + "exception":true + }, + { + "shape":"LimitExceededException", + "exception":true + }, + { + "shape":"ConcurrentModificationException", + "exception":true + }, + { + "shape":"InternalException", + "exception":true, + "fault":true + } + ] + }, + "PutTargets":{ + "name":"PutTargets", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PutTargetsRequest"}, + "output":{"shape":"PutTargetsResponse"}, + "errors":[ + { + "shape":"ResourceNotFoundException", + "exception":true + }, + { + "shape":"ConcurrentModificationException", + "exception":true + }, + { + "shape":"LimitExceededException", + "exception":true + }, + { + "shape":"InternalException", + "exception":true, + "fault":true + } + ] + }, + "RemoveTargets":{ + "name":"RemoveTargets", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RemoveTargetsRequest"}, + "output":{"shape":"RemoveTargetsResponse"}, + "errors":[ + { + "shape":"ResourceNotFoundException", + "exception":true + }, + { + "shape":"ConcurrentModificationException", + "exception":true + }, + { + "shape":"InternalException", + "exception":true, + "fault":true + } + ] + }, + "TestEventPattern":{ + "name":"TestEventPattern", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"TestEventPatternRequest"}, + "output":{"shape":"TestEventPatternResponse"}, + "errors":[ + { + "shape":"InvalidEventPatternException", + "exception":true + }, + { + "shape":"InternalException", + "exception":true, + "fault":true + } + ] + } + }, + "shapes":{ + "Boolean":{"type":"boolean"}, + "ConcurrentModificationException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "DeleteRuleRequest":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{"shape":"RuleName"} + } + }, + "DescribeRuleRequest":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{"shape":"RuleName"} + } + }, + "DescribeRuleResponse":{ + "type":"structure", + "members":{ + "Name":{"shape":"RuleName"}, + "Arn":{"shape":"RuleArn"}, + "EventPattern":{"shape":"EventPattern"}, + "ScheduleExpression":{"shape":"ScheduleExpression"}, + "State":{"shape":"RuleState"}, + "Description":{"shape":"RuleDescription"}, + "RoleArn":{"shape":"RoleArn"} + } + }, + "DisableRuleRequest":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{"shape":"RuleName"} + } + }, + "EnableRuleRequest":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{"shape":"RuleName"} + } + }, + "ErrorCode":{"type":"string"}, + "ErrorMessage":{"type":"string"}, + "EventId":{"type":"string"}, + "EventPattern":{ + "type":"string", + "max":2048 + }, + "EventResource":{"type":"string"}, + "EventResourceList":{ + "type":"list", + "member":{"shape":"EventResource"} + }, + "EventTime":{"type":"timestamp"}, + "Integer":{"type":"integer"}, + "InternalException":{ + "type":"structure", + "members":{ + }, + "exception":true, + "fault":true + }, + "InvalidEventPatternException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "LimitExceededException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "LimitMax100":{ + "type":"integer", + "min":1, + "max":100 + }, + "ListRuleNamesByTargetRequest":{ + "type":"structure", + "required":["TargetArn"], + "members":{ + "TargetArn":{"shape":"TargetArn"}, + "NextToken":{"shape":"NextToken"}, + "Limit":{"shape":"LimitMax100"} + } + }, + "ListRuleNamesByTargetResponse":{ + "type":"structure", + "members":{ + "RuleNames":{"shape":"RuleNameList"}, + "NextToken":{"shape":"NextToken"} + } + }, + "ListRulesRequest":{ + "type":"structure", + "members":{ + "NamePrefix":{"shape":"RuleName"}, + "NextToken":{"shape":"NextToken"}, + "Limit":{"shape":"LimitMax100"} + } + }, + "ListRulesResponse":{ + "type":"structure", + "members":{ + "Rules":{"shape":"RuleResponseList"}, + "NextToken":{"shape":"NextToken"} + } + }, + "ListTargetsByRuleRequest":{ + "type":"structure", + "required":["Rule"], + "members":{ + "Rule":{"shape":"RuleName"}, + "NextToken":{"shape":"NextToken"}, + "Limit":{"shape":"LimitMax100"} + } + }, + "ListTargetsByRuleResponse":{ + "type":"structure", + "members":{ + "Targets":{"shape":"TargetList"}, + "NextToken":{"shape":"NextToken"} + } + }, + "NextToken":{ + "type":"string", + "min":1, + "max":2048 + }, + "PutEventsRequest":{ + "type":"structure", + "required":["Entries"], + "members":{ + "Entries":{"shape":"PutEventsRequestEntryList"} + } + }, + "PutEventsRequestEntry":{ + "type":"structure", + "members":{ + "Time":{"shape":"EventTime"}, + "Source":{"shape":"String"}, + "Resources":{"shape":"EventResourceList"}, + "DetailType":{"shape":"String"}, + "Detail":{"shape":"String"} + } + }, + "PutEventsRequestEntryList":{ + "type":"list", + "member":{"shape":"PutEventsRequestEntry"}, + "min":1, + "max":10 + }, + "PutEventsResponse":{ + "type":"structure", + "members":{ + "FailedEntryCount":{"shape":"Integer"}, + "Entries":{"shape":"PutEventsResultEntryList"} + } + }, + "PutEventsResultEntry":{ + "type":"structure", + "members":{ + "EventId":{"shape":"EventId"}, + "ErrorCode":{"shape":"ErrorCode"}, + "ErrorMessage":{"shape":"ErrorMessage"} + } + }, + "PutEventsResultEntryList":{ + "type":"list", + "member":{"shape":"PutEventsResultEntry"} + }, + "PutRuleRequest":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{"shape":"RuleName"}, + "ScheduleExpression":{"shape":"ScheduleExpression"}, + "EventPattern":{"shape":"EventPattern"}, + "State":{"shape":"RuleState"}, + "Description":{"shape":"RuleDescription"}, + "RoleArn":{"shape":"RoleArn"} + } + }, + "PutRuleResponse":{ + "type":"structure", + "members":{ + "RuleArn":{"shape":"RuleArn"} + } + }, + "PutTargetsRequest":{ + "type":"structure", + "required":[ + "Rule", + "Targets" + ], + "members":{ + "Rule":{"shape":"RuleName"}, + "Targets":{"shape":"TargetList"} + } + }, + "PutTargetsResponse":{ + "type":"structure", + "members":{ + "FailedEntryCount":{"shape":"Integer"}, + "FailedEntries":{"shape":"PutTargetsResultEntryList"} + } + }, + "PutTargetsResultEntry":{ + "type":"structure", + "members":{ + "TargetId":{"shape":"TargetId"}, + "ErrorCode":{"shape":"ErrorCode"}, + "ErrorMessage":{"shape":"ErrorMessage"} + } + }, + "PutTargetsResultEntryList":{ + "type":"list", + "member":{"shape":"PutTargetsResultEntry"} + }, + "RemoveTargetsRequest":{ + "type":"structure", + "required":[ + "Rule", + "Ids" + ], + "members":{ + "Rule":{"shape":"RuleName"}, + "Ids":{"shape":"TargetIdList"} + } + }, + "RemoveTargetsResponse":{ + "type":"structure", + "members":{ + "FailedEntryCount":{"shape":"Integer"}, + "FailedEntries":{"shape":"RemoveTargetsResultEntryList"} + } + }, + "RemoveTargetsResultEntry":{ + "type":"structure", + "members":{ + "TargetId":{"shape":"TargetId"}, + "ErrorCode":{"shape":"ErrorCode"}, + "ErrorMessage":{"shape":"ErrorMessage"} + } + }, + "RemoveTargetsResultEntryList":{ + "type":"list", + "member":{"shape":"RemoveTargetsResultEntry"} + }, + "ResourceNotFoundException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "RoleArn":{ + "type":"string", + "min":1, + "max":1600 + }, + "Rule":{ + "type":"structure", + "members":{ + "Name":{"shape":"RuleName"}, + "Arn":{"shape":"RuleArn"}, + "EventPattern":{"shape":"EventPattern"}, + "State":{"shape":"RuleState"}, + "Description":{"shape":"RuleDescription"}, + "ScheduleExpression":{"shape":"ScheduleExpression"}, + "RoleArn":{"shape":"RoleArn"} + } + }, + "RuleArn":{ + "type":"string", + "min":1, + "max":1600 + }, + "RuleDescription":{ + "type":"string", + "max":512 + }, + "RuleName":{ + "type":"string", + "min":1, + "max":64, + "pattern":"[\\.\\-_A-Za-z0-9]+" + }, + "RuleNameList":{ + "type":"list", + "member":{"shape":"RuleName"} + }, + "RuleResponseList":{ + "type":"list", + "member":{"shape":"Rule"} + }, + "RuleState":{ + "type":"string", + "enum":[ + "ENABLED", + "DISABLED" + ] + }, + "ScheduleExpression":{ + "type":"string", + "max":256 + }, + "String":{"type":"string"}, + "Target":{ + "type":"structure", + "required":[ + "Id", + "Arn" + ], + "members":{ + "Id":{"shape":"TargetId"}, + "Arn":{"shape":"TargetArn"}, + "Input":{"shape":"TargetInput"}, + "InputPath":{"shape":"TargetInputPath"} + } + }, + "TargetArn":{ + "type":"string", + "min":1, + "max":1600 + }, + "TargetId":{ + "type":"string", + "min":1, + "max":64, + "pattern":"[\\.\\-_A-Za-z0-9]+" + }, + "TargetIdList":{ + "type":"list", + "member":{"shape":"TargetId"}, + "min":1, + "max":100 + }, + "TargetInput":{ + "type":"string", + "max":8192 + }, + "TargetInputPath":{ + "type":"string", + "max":256 + }, + "TargetList":{ + "type":"list", + "member":{"shape":"Target"} + }, + "TestEventPatternRequest":{ + "type":"structure", + "required":[ + "EventPattern", + "Event" + ], + "members":{ + "EventPattern":{"shape":"EventPattern"}, + "Event":{"shape":"String"} + } + }, + "TestEventPatternResponse":{ + "type":"structure", + "members":{ + "Result":{"shape":"Boolean"} + } + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/events/2015-10-07/docs-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/events/2015-10-07/docs-2.json new file mode 100644 index 000000000..fed64cd20 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/events/2015-10-07/docs-2.json @@ -0,0 +1,411 @@ +{ + "version": "2.0", + "operations": { + "DeleteRule": "

    Deletes a rule. You must remove all targets from a rule using RemoveTargets before you can delete the rule.

    Note: When you delete a rule, incoming events might still continue to match to the deleted rule. Please allow a short period of time for changes to take effect.

    ", + "DescribeRule": "

    Describes the details of the specified rule.

    ", + "DisableRule": "

    Disables a rule. A disabled rule won't match any events, and won't self-trigger if it has a schedule expression.

    Note: When you disable a rule, incoming events might still continue to match to the disabled rule. Please allow a short period of time for changes to take effect.

    ", + "EnableRule": "

    Enables a rule. If the rule does not exist, the operation fails.

    Note: When you enable a rule, incoming events might not immediately start matching to a newly enabled rule. Please allow a short period of time for changes to take effect.

    ", + "ListRuleNamesByTarget": "

    Lists the names of the rules that the given target is put to. You can see which of the rules in Amazon CloudWatch Events can invoke a specific target in your account. If you have more rules in your account than the given limit, the results will be paginated. In that case, use the next token returned in the response and repeat ListRulesByTarget until the NextToken in the response is returned as null.

    ", + "ListRules": "

    Lists the Amazon CloudWatch Events rules in your account. You can either list all the rules or you can provide a prefix to match to the rule names. If you have more rules in your account than the given limit, the results will be paginated. In that case, use the next token returned in the response and repeat ListRules until the NextToken in the response is returned as null.

    ", + "ListTargetsByRule": "

    Lists of targets assigned to the rule.

    ", + "PutEvents": "

    Sends custom events to Amazon CloudWatch Events so that they can be matched to rules.

    ", + "PutRule": "

    Creates or updates a rule. Rules are enabled by default, or based on value of the State parameter. You can disable a rule using DisableRule.

    Note: When you create or update a rule, incoming events might not immediately start matching to new or updated rules. Please allow a short period of time for changes to take effect.

    A rule must contain at least an EventPattern or ScheduleExpression. Rules with EventPatterns are triggered when a matching event is observed. Rules with ScheduleExpressions self-trigger based on the given schedule. A rule can have both an EventPattern and a ScheduleExpression, in which case the rule will trigger on matching events as well as on a schedule.

    Note: Most services in AWS treat : or / as the same character in Amazon Resource Names (ARNs). However, CloudWatch Events uses an exact match in event patterns and rules. Be sure to use the correct ARN characters when creating event patterns so that they match the ARN syntax in the event you want to match.

    ", + "PutTargets": "

    Adds target(s) to a rule. Targets are the resources that can be invoked when a rule is triggered. For example, AWS Lambda functions, Amazon Kinesis streams, and built-in targets. Updates the target(s) if they are already associated with the role. In other words, if there is already a target with the given target ID, then the target associated with that ID is updated.

    In order to be able to make API calls against the resources you own, Amazon CloudWatch Events needs the appropriate permissions. For AWS Lambda and Amazon SNS resources, CloudWatch Events relies on resource-based policies. For Amazon Kinesis streams, CloudWatch Events relies on IAM roles. For more information, see Permissions for Sending Events to Targets in the Amazon CloudWatch Developer Guide.

    Input and InputPath are mutually-exclusive and optional parameters of a target. When a rule is triggered due to a matched event, if for a target:

    • Neither Input nor InputPath is specified, then the entire event is passed to the target in JSON form.
    • InputPath is specified in the form of JSONPath (e.g. $.detail), then only the part of the event specified in the path is passed to the target (e.g. only the detail part of the event is passed).
    • Input is specified in the form of a valid JSON, then the matched event is overridden with this constant.

    Note: When you add targets to a rule, when the associated rule triggers, new or updated targets might not be immediately invoked. Please allow a short period of time for changes to take effect.

    ", + "RemoveTargets": "

    Removes target(s) from a rule so that when the rule is triggered, those targets will no longer be invoked.

    Note: When you remove a target, when the associated rule triggers, removed targets might still continue to be invoked. Please allow a short period of time for changes to take effect.

    ", + "TestEventPattern": "

    Tests whether an event pattern matches the provided event.

    Note: Most services in AWS treat : or / as the same character in Amazon Resource Names (ARNs). However, CloudWatch Events uses an exact match in event patterns and rules. Be sure to use the correct ARN characters when creating event patterns so that they match the ARN syntax in the event you want to match.

    " + }, + "service": "

    Amazon CloudWatch Events helps you to respond to state changes in your AWS resources. When your resources change state they automatically send events into an event stream. You can create rules that match selected events in the stream and route them to targets to take action. You can also use rules to take action on a pre-determined schedule. For example, you can configure rules to:

    • Automatically invoke an AWS Lambda function to update DNS entries when an event notifies you that Amazon EC2 instance enters the running state.
    • Direct specific API records from CloudTrail to an Amazon Kinesis stream for detailed analysis of potential security or availability risks.
    • Periodically invoke a built-in target to create a snapshot of an Amazon EBS volume.

    For more information about Amazon CloudWatch Events features, see the Amazon CloudWatch Developer Guide.

    ", + "shapes": { + "Boolean": { + "base": null, + "refs": { + "TestEventPatternResponse$Result": "

    Indicates whether the event matches the event pattern.

    " + } + }, + "ConcurrentModificationException": { + "base": "

    This exception occurs if there is concurrent modification on rule or target.

    ", + "refs": { + } + }, + "DeleteRuleRequest": { + "base": "

    Container for the parameters to the DeleteRule operation.

    ", + "refs": { + } + }, + "DescribeRuleRequest": { + "base": "

    Container for the parameters to the DescribeRule operation.

    ", + "refs": { + } + }, + "DescribeRuleResponse": { + "base": "

    The result of the DescribeRule operation.

    ", + "refs": { + } + }, + "DisableRuleRequest": { + "base": "

    Container for the parameters to the DisableRule operation.

    ", + "refs": { + } + }, + "EnableRuleRequest": { + "base": "

    Container for the parameters to the EnableRule operation.

    ", + "refs": { + } + }, + "ErrorCode": { + "base": null, + "refs": { + "PutEventsResultEntry$ErrorCode": "

    The error code representing why the event submission failed on this entry.

    ", + "PutTargetsResultEntry$ErrorCode": "

    The error code representing why the target submission failed on this entry.

    ", + "RemoveTargetsResultEntry$ErrorCode": "

    The error code representing why the target removal failed on this entry.

    " + } + }, + "ErrorMessage": { + "base": null, + "refs": { + "PutEventsResultEntry$ErrorMessage": "

    The error message explaining why the event submission failed on this entry.

    ", + "PutTargetsResultEntry$ErrorMessage": "

    The error message explaining why the target submission failed on this entry.

    ", + "RemoveTargetsResultEntry$ErrorMessage": "

    The error message explaining why the target removal failed on this entry.

    " + } + }, + "EventId": { + "base": null, + "refs": { + "PutEventsResultEntry$EventId": "

    The ID of the event submitted to Amazon CloudWatch Events.

    " + } + }, + "EventPattern": { + "base": null, + "refs": { + "DescribeRuleResponse$EventPattern": "

    The event pattern.

    ", + "PutRuleRequest$EventPattern": "

    The event pattern.

    ", + "Rule$EventPattern": "

    The event pattern of the rule.

    ", + "TestEventPatternRequest$EventPattern": "

    The event pattern you want to test.

    " + } + }, + "EventResource": { + "base": null, + "refs": { + "EventResourceList$member": null + } + }, + "EventResourceList": { + "base": null, + "refs": { + "PutEventsRequestEntry$Resources": "

    AWS resources, identified by Amazon Resource Name (ARN), which the event primarily concerns. Any number, including zero, may be present.

    " + } + }, + "EventTime": { + "base": null, + "refs": { + "PutEventsRequestEntry$Time": "

    Timestamp of event, per RFC3339. If no timestamp is provided, the timestamp of the PutEvents call will be used.

    " + } + }, + "Integer": { + "base": null, + "refs": { + "PutEventsResponse$FailedEntryCount": "

    The number of failed entries.

    ", + "PutTargetsResponse$FailedEntryCount": "

    The number of failed entries.

    ", + "RemoveTargetsResponse$FailedEntryCount": "

    The number of failed entries.

    " + } + }, + "InternalException": { + "base": "

    This exception occurs due to unexpected causes.

    ", + "refs": { + } + }, + "InvalidEventPatternException": { + "base": "

    The event pattern is invalid.

    ", + "refs": { + } + }, + "LimitExceededException": { + "base": "

    This exception occurs if you try to create more rules or add more targets to a rule than allowed by default.

    ", + "refs": { + } + }, + "LimitMax100": { + "base": null, + "refs": { + "ListRuleNamesByTargetRequest$Limit": "

    The maximum number of results to return.

    ", + "ListRulesRequest$Limit": "

    The maximum number of results to return.

    ", + "ListTargetsByRuleRequest$Limit": "

    The maximum number of results to return.

    " + } + }, + "ListRuleNamesByTargetRequest": { + "base": "

    Container for the parameters to the ListRuleNamesByTarget operation.

    ", + "refs": { + } + }, + "ListRuleNamesByTargetResponse": { + "base": "

    The result of the ListRuleNamesByTarget operation.

    ", + "refs": { + } + }, + "ListRulesRequest": { + "base": "

    Container for the parameters to the ListRules operation.

    ", + "refs": { + } + }, + "ListRulesResponse": { + "base": "

    The result of the ListRules operation.

    ", + "refs": { + } + }, + "ListTargetsByRuleRequest": { + "base": "

    Container for the parameters to the ListTargetsByRule operation.

    ", + "refs": { + } + }, + "ListTargetsByRuleResponse": { + "base": "

    The result of the ListTargetsByRule operation.

    ", + "refs": { + } + }, + "NextToken": { + "base": null, + "refs": { + "ListRuleNamesByTargetRequest$NextToken": "

    The token returned by a previous call to indicate that there is more data available.

    ", + "ListRuleNamesByTargetResponse$NextToken": "

    Indicates that there are additional results to retrieve.

    ", + "ListRulesRequest$NextToken": "

    The token returned by a previous call to indicate that there is more data available.

    ", + "ListRulesResponse$NextToken": "

    Indicates that there are additional results to retrieve.

    ", + "ListTargetsByRuleRequest$NextToken": "

    The token returned by a previous call to indicate that there is more data available.

    ", + "ListTargetsByRuleResponse$NextToken": "

    Indicates that there are additional results to retrieve.

    " + } + }, + "PutEventsRequest": { + "base": "

    Container for the parameters to the PutEvents operation.

    ", + "refs": { + } + }, + "PutEventsRequestEntry": { + "base": "

    Contains information about the event to be used in PutEvents.

    ", + "refs": { + "PutEventsRequestEntryList$member": null + } + }, + "PutEventsRequestEntryList": { + "base": null, + "refs": { + "PutEventsRequest$Entries": "

    The entry that defines an event in your system. You can specify several parameters for the entry such as the source and type of the event, resources associated with the event, and so on.

    " + } + }, + "PutEventsResponse": { + "base": "

    The result of the PutEvents operation.

    ", + "refs": { + } + }, + "PutEventsResultEntry": { + "base": "

    A PutEventsResult contains a list of PutEventsResultEntry.

    ", + "refs": { + "PutEventsResultEntryList$member": null + } + }, + "PutEventsResultEntryList": { + "base": null, + "refs": { + "PutEventsResponse$Entries": "

    A list of successfully and unsuccessfully ingested events results. If the ingestion was successful, the entry will have the event ID in it. If not, then the ErrorCode and ErrorMessage can be used to identify the problem with the entry.

    " + } + }, + "PutRuleRequest": { + "base": "

    Container for the parameters to the PutRule operation.

    ", + "refs": { + } + }, + "PutRuleResponse": { + "base": "

    The result of the PutRule operation.

    ", + "refs": { + } + }, + "PutTargetsRequest": { + "base": "

    Container for the parameters to the PutTargets operation.

    ", + "refs": { + } + }, + "PutTargetsResponse": { + "base": "

    The result of the PutTargets operation.

    ", + "refs": { + } + }, + "PutTargetsResultEntry": { + "base": "

    A PutTargetsResult contains a list of PutTargetsResultEntry.

    ", + "refs": { + "PutTargetsResultEntryList$member": null + } + }, + "PutTargetsResultEntryList": { + "base": null, + "refs": { + "PutTargetsResponse$FailedEntries": "

    An array of failed target entries.

    " + } + }, + "RemoveTargetsRequest": { + "base": "

    Container for the parameters to the RemoveTargets operation.

    ", + "refs": { + } + }, + "RemoveTargetsResponse": { + "base": "

    The result of the RemoveTargets operation.

    ", + "refs": { + } + }, + "RemoveTargetsResultEntry": { + "base": "

    The ID of the target requested to be removed from the rule by Amazon CloudWatch Events.

    ", + "refs": { + "RemoveTargetsResultEntryList$member": null + } + }, + "RemoveTargetsResultEntryList": { + "base": null, + "refs": { + "RemoveTargetsResponse$FailedEntries": "

    An array of failed target entries.

    " + } + }, + "ResourceNotFoundException": { + "base": "

    The rule does not exist.

    ", + "refs": { + } + }, + "RoleArn": { + "base": null, + "refs": { + "DescribeRuleResponse$RoleArn": "

    The Amazon Resource Name (ARN) of the IAM role associated with the rule.

    ", + "PutRuleRequest$RoleArn": "

    The Amazon Resource Name (ARN) of the IAM role associated with the rule.

    ", + "Rule$RoleArn": "

    The Amazon Resource Name (ARN) associated with the role that is used for target invocation.

    " + } + }, + "Rule": { + "base": "

    Contains information about a rule in Amazon CloudWatch Events. A ListRulesResult contains a list of Rules.

    ", + "refs": { + "RuleResponseList$member": null + } + }, + "RuleArn": { + "base": null, + "refs": { + "DescribeRuleResponse$Arn": "

    The Amazon Resource Name (ARN) associated with the rule.

    ", + "PutRuleResponse$RuleArn": "

    The Amazon Resource Name (ARN) that identifies the rule.

    ", + "Rule$Arn": "

    The Amazon Resource Name (ARN) of the rule.

    " + } + }, + "RuleDescription": { + "base": null, + "refs": { + "DescribeRuleResponse$Description": "

    The rule's description.

    ", + "PutRuleRequest$Description": "

    A description of the rule.

    ", + "Rule$Description": "

    The description of the rule.

    " + } + }, + "RuleName": { + "base": null, + "refs": { + "DeleteRuleRequest$Name": "

    The name of the rule to be deleted.

    ", + "DescribeRuleRequest$Name": "

    The name of the rule you want to describe details for.

    ", + "DescribeRuleResponse$Name": "

    The rule's name.

    ", + "DisableRuleRequest$Name": "

    The name of the rule you want to disable.

    ", + "EnableRuleRequest$Name": "

    The name of the rule that you want to enable.

    ", + "ListRulesRequest$NamePrefix": "

    The prefix matching the rule name.

    ", + "ListTargetsByRuleRequest$Rule": "

    The name of the rule whose targets you want to list.

    ", + "PutRuleRequest$Name": "

    The name of the rule that you are creating or updating.

    ", + "PutTargetsRequest$Rule": "

    The name of the rule you want to add targets to.

    ", + "RemoveTargetsRequest$Rule": "

    The name of the rule you want to remove targets from.

    ", + "Rule$Name": "

    The rule's name.

    ", + "RuleNameList$member": null + } + }, + "RuleNameList": { + "base": null, + "refs": { + "ListRuleNamesByTargetResponse$RuleNames": "

    List of rules names that can invoke the given target.

    " + } + }, + "RuleResponseList": { + "base": null, + "refs": { + "ListRulesResponse$Rules": "

    List of rules matching the specified criteria.

    " + } + }, + "RuleState": { + "base": null, + "refs": { + "DescribeRuleResponse$State": "

    Specifies whether the rule is enabled or disabled.

    ", + "PutRuleRequest$State": "

    Indicates whether the rule is enabled or disabled.

    ", + "Rule$State": "

    The rule's state.

    " + } + }, + "ScheduleExpression": { + "base": null, + "refs": { + "DescribeRuleResponse$ScheduleExpression": "

    The scheduling expression. For example, \"cron(0 20 * * ? *)\", \"rate(5 minutes)\".

    ", + "PutRuleRequest$ScheduleExpression": "

    The scheduling expression. For example, \"cron(0 20 * * ? *)\", \"rate(5 minutes)\".

    ", + "Rule$ScheduleExpression": "

    The scheduling expression. For example, \"cron(0 20 * * ? *)\", \"rate(5 minutes)\".

    " + } + }, + "String": { + "base": null, + "refs": { + "PutEventsRequestEntry$Source": "

    The source of the event.

    ", + "PutEventsRequestEntry$DetailType": "

    Free-form string used to decide what fields to expect in the event detail.

    ", + "PutEventsRequestEntry$Detail": "

    In the JSON sense, an object containing fields, which may also contain nested sub-objects. No constraints are imposed on its contents.

    ", + "TestEventPatternRequest$Event": "

    The event in the JSON format to test against the event pattern.

    " + } + }, + "Target": { + "base": "

    Targets are the resources that can be invoked when a rule is triggered. For example, AWS Lambda functions, Amazon Kinesis streams, and built-in targets.

    Input and InputPath are mutually-exclusive and optional parameters of a target. When a rule is triggered due to a matched event, if for a target:

    • Neither Input nor InputPath is specified, then the entire event is passed to the target in JSON form.
    • InputPath is specified in the form of JSONPath (e.g. $.detail), then only the part of the event specified in the path is passed to the target (e.g. only the detail part of the event is passed).
    • Input is specified in the form of a valid JSON, then the matched event is overridden with this constant.
    ", + "refs": { + "TargetList$member": null + } + }, + "TargetArn": { + "base": null, + "refs": { + "ListRuleNamesByTargetRequest$TargetArn": "

    The Amazon Resource Name (ARN) of the target resource that you want to list the rules for.

    ", + "Target$Arn": "

    The Amazon Resource Name (ARN) associated of the target.

    " + } + }, + "TargetId": { + "base": null, + "refs": { + "PutTargetsResultEntry$TargetId": "

    The ID of the target submitted to Amazon CloudWatch Events.

    ", + "RemoveTargetsResultEntry$TargetId": "

    The ID of the target requested to be removed by Amazon CloudWatch Events.

    ", + "Target$Id": "

    The unique target assignment ID.

    ", + "TargetIdList$member": null + } + }, + "TargetIdList": { + "base": null, + "refs": { + "RemoveTargetsRequest$Ids": "

    The list of target IDs to remove from the rule.

    " + } + }, + "TargetInput": { + "base": null, + "refs": { + "Target$Input": "

    Valid JSON text passed to the target. For more information about JSON text, see The JavaScript Object Notation (JSON) Data Interchange Format.

    " + } + }, + "TargetInputPath": { + "base": null, + "refs": { + "Target$InputPath": "

    The value of the JSONPath that is used for extracting part of the matched event when passing it to the target. For more information about JSON paths, see JSONPath.

    " + } + }, + "TargetList": { + "base": null, + "refs": { + "ListTargetsByRuleResponse$Targets": "

    Lists the targets assigned to the rule.

    ", + "PutTargetsRequest$Targets": "

    List of targets you want to update or add to the rule.

    " + } + }, + "TestEventPatternRequest": { + "base": "

    Container for the parameters to the TestEventPattern operation.

    ", + "refs": { + } + }, + "TestEventPatternResponse": { + "base": "

    The result of the TestEventPattern operation.

    ", + "refs": { + } + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/events/2015-10-07/examples-1.json b/vendor/github.com/aws/aws-sdk-go/models/apis/events/2015-10-07/examples-1.json new file mode 100644 index 000000000..faff76894 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/events/2015-10-07/examples-1.json @@ -0,0 +1,5 @@ +{ + "version":"1.0", + "examples":{ + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/firehose/2015-08-04/api-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/firehose/2015-08-04/api-2.json new file mode 100644 index 000000000..b7be95052 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/firehose/2015-08-04/api-2.json @@ -0,0 +1,719 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2015-08-04", + "endpointPrefix":"firehose", + "jsonVersion":"1.1", + "protocol":"json", + "serviceAbbreviation":"Firehose", + "serviceFullName":"Amazon Kinesis Firehose", + "signatureVersion":"v4", + "targetPrefix":"Firehose_20150804" + }, + "operations":{ + "CreateDeliveryStream":{ + "name":"CreateDeliveryStream", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateDeliveryStreamInput"}, + "output":{"shape":"CreateDeliveryStreamOutput"}, + "errors":[ + {"shape":"InvalidArgumentException"}, + {"shape":"LimitExceededException"}, + {"shape":"ResourceInUseException"} + ] + }, + "DeleteDeliveryStream":{ + "name":"DeleteDeliveryStream", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteDeliveryStreamInput"}, + "output":{"shape":"DeleteDeliveryStreamOutput"}, + "errors":[ + {"shape":"ResourceInUseException"}, + {"shape":"ResourceNotFoundException"} + ] + }, + "DescribeDeliveryStream":{ + "name":"DescribeDeliveryStream", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeDeliveryStreamInput"}, + "output":{"shape":"DescribeDeliveryStreamOutput"}, + "errors":[ + {"shape":"ResourceNotFoundException"} + ] + }, + "ListDeliveryStreams":{ + "name":"ListDeliveryStreams", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListDeliveryStreamsInput"}, + "output":{"shape":"ListDeliveryStreamsOutput"} + }, + "PutRecord":{ + "name":"PutRecord", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PutRecordInput"}, + "output":{"shape":"PutRecordOutput"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidArgumentException"}, + {"shape":"ServiceUnavailableException"} + ] + }, + "PutRecordBatch":{ + "name":"PutRecordBatch", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PutRecordBatchInput"}, + "output":{"shape":"PutRecordBatchOutput"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidArgumentException"}, + {"shape":"ServiceUnavailableException"} + ] + }, + "UpdateDestination":{ + "name":"UpdateDestination", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateDestinationInput"}, + "output":{"shape":"UpdateDestinationOutput"}, + "errors":[ + {"shape":"InvalidArgumentException"}, + {"shape":"ResourceInUseException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ConcurrentModificationException"} + ] + } + }, + "shapes":{ + "AWSKMSKeyARN":{ + "type":"string", + "max":512, + "min":1, + "pattern":"arn:.*" + }, + "BooleanObject":{"type":"boolean"}, + "BucketARN":{ + "type":"string", + "max":2048, + "min":1, + "pattern":"arn:.*" + }, + "BufferingHints":{ + "type":"structure", + "members":{ + "SizeInMBs":{"shape":"SizeInMBs"}, + "IntervalInSeconds":{"shape":"IntervalInSeconds"} + } + }, + "CloudWatchLoggingOptions":{ + "type":"structure", + "members":{ + "Enabled":{"shape":"BooleanObject"}, + "LogGroupName":{"shape":"LogGroupName"}, + "LogStreamName":{"shape":"LogStreamName"} + } + }, + "ClusterJDBCURL":{ + "type":"string", + "min":1, + "pattern":"jdbc:(redshift|postgresql)://((?!-)[A-Za-z0-9-]{1,63}(?Amazon Kinesis Firehose API Reference

    Amazon Kinesis Firehose is a fully-managed service that delivers real-time streaming data to destinations such as Amazon Simple Storage Service (Amazon S3), Amazon Elasticsearch Service (Amazon ES), and Amazon Redshift.

    ", + "operations": { + "CreateDeliveryStream": "

    Creates a delivery stream.

    CreateDeliveryStream is an asynchronous operation that immediately returns. The initial status of the delivery stream is CREATING. After the delivery stream is created, its status is ACTIVE and it now accepts data. Attempts to send data to a delivery stream that is not in the ACTIVE state cause an exception. To check the state of a delivery stream, use DescribeDeliveryStream.

    The name of a delivery stream identifies it. You can't have two delivery streams with the same name in the same region. Two delivery streams in different AWS accounts or different regions in the same AWS account can have the same name.

    By default, you can create up to 20 delivery streams per region.

    A delivery stream can only be configured with a single destination, Amazon S3, Amazon Elasticsearch Service, or Amazon Redshift. For correct CreateDeliveryStream request syntax, specify only one destination configuration parameter: either S3DestinationConfiguration, ElasticsearchDestinationConfiguration, or RedshiftDestinationConfiguration.

    As part of S3DestinationConfiguration, optional values BufferingHints, EncryptionConfiguration, and CompressionFormat can be provided. By default, if no BufferingHints value is provided, Firehose buffers data up to 5 MB or for 5 minutes, whichever condition is satisfied first. Note that BufferingHints is a hint, so there are some cases where the service cannot adhere to these conditions strictly; for example, record boundaries are such that the size is a little over or under the configured buffering size. By default, no encryption is performed. We strongly recommend that you enable encryption to ensure secure data storage in Amazon S3.

    A few notes about RedshiftDestinationConfiguration:

    • An Amazon Redshift destination requires an S3 bucket as intermediate location, as Firehose first delivers data to S3 and then uses COPY syntax to load data into an Amazon Redshift table. This is specified in the RedshiftDestinationConfiguration.S3Configuration parameter element.

    • The compression formats SNAPPY or ZIP cannot be specified in RedshiftDestinationConfiguration.S3Configuration because the Amazon Redshift COPY operation that reads from the S3 bucket doesn't support these compression formats.

    • We strongly recommend that the username and password provided is used exclusively for Firehose purposes, and that the permissions for the account are restricted for Amazon Redshift INSERT permissions.

    Firehose assumes the IAM role that is configured as part of destinations. The IAM role should allow the Firehose principal to assume the role, and the role should have permissions that allows the service to deliver the data. For more information, see Amazon S3 Bucket Access in the Amazon Kinesis Firehose Developer Guide.

    ", + "DeleteDeliveryStream": "

    Deletes a delivery stream and its data.

    You can delete a delivery stream only if it is in ACTIVE or DELETING state, and not in the CREATING state. While the deletion request is in process, the delivery stream is in the DELETING state.

    To check the state of a delivery stream, use DescribeDeliveryStream.

    While the delivery stream is DELETING state, the service may continue to accept the records, but the service doesn't make any guarantees with respect to delivering the data. Therefore, as a best practice, you should first stop any applications that are sending records before deleting a delivery stream.

    ", + "DescribeDeliveryStream": "

    Describes the specified delivery stream and gets the status. For example, after your delivery stream is created, call DescribeDeliveryStream to see if the delivery stream is ACTIVE and therefore ready for data to be sent to it.

    ", + "ListDeliveryStreams": "

    Lists your delivery streams.

    The number of delivery streams might be too large to return using a single call to ListDeliveryStreams. You can limit the number of delivery streams returned, using the Limit parameter. To determine whether there are more delivery streams to list, check the value of HasMoreDeliveryStreams in the output. If there are more delivery streams to list, you can request them by specifying the name of the last delivery stream returned in the call in the ExclusiveStartDeliveryStreamName parameter of a subsequent call.

    ", + "PutRecord": "

    Writes a single data record into an Amazon Kinesis Firehose delivery stream. To write multiple data records into a delivery stream, use PutRecordBatch. Applications using these operations are referred to as producers.

    By default, each delivery stream can take in up to 2,000 transactions per second, 5,000 records per second, or 5 MB per second. Note that if you use PutRecord and PutRecordBatch, the limits are an aggregate across these two operations for each delivery stream. For more information about limits and how to request an increase, see Amazon Kinesis Firehose Limits.

    You must specify the name of the delivery stream and the data record when using PutRecord. The data record consists of a data blob that can be up to 1,000 KB in size, and any kind of data, for example, a segment from a log file, geographic location data, web site clickstream data, etc.

    Firehose buffers records before delivering them to the destination. To disambiguate the data blobs at the destination, a common solution is to use delimiters in the data, such as a newline (\\n) or some other character unique within the data. This allows the consumer application(s) to parse individual data items when reading the data from the destination.

    The PutRecord operation returns a RecordId, which is a unique string assigned to each record. Producer applications can use this ID for purposes such as auditability and investigation.

    If the PutRecord operation throws a ServiceUnavailableException, back off and retry. If the exception persists, it is possible that the throughput limits have been exceeded for the delivery stream.

    Data records sent to Firehose are stored for 24 hours from the time they are added to a delivery stream as it attempts to send the records to the destination. If the destination is unreachable for more than 24 hours, the data is no longer available.

    ", + "PutRecordBatch": "

    Writes multiple data records into a delivery stream in a single call, which can achieve higher throughput per producer than when writing single records. To write single data records into a delivery stream, use PutRecord. Applications using these operations are referred to as producers.

    Each PutRecordBatch request supports up to 500 records. Each record in the request can be as large as 1,000 KB (before 64-bit encoding), up to a limit of 4 MB for the entire request. By default, each delivery stream can take in up to 2,000 transactions per second, 5,000 records per second, or 5 MB per second. Note that if you use PutRecord and PutRecordBatch, the limits are an aggregate across these two operations for each delivery stream. For more information about limits and how to request an increase, see Amazon Kinesis Firehose Limits.

    You must specify the name of the delivery stream and the data record when using PutRecord. The data record consists of a data blob that can be up to 1,000 KB in size, and any kind of data, for example, a segment from a log file, geographic location data, web site clickstream data, and so on.

    Firehose buffers records before delivering them to the destination. To disambiguate the data blobs at the destination, a common solution is to use delimiters in the data, such as a newline (\\n) or some other character unique within the data. This allows the consumer application(s) to parse individual data items when reading the data from the destination.

    The PutRecordBatch response includes a count of any failed records, FailedPutCount, and an array of responses, RequestResponses. The FailedPutCount value is a count of records that failed. Each entry in the RequestResponses array gives additional information of the processed record. Each entry in RequestResponses directly correlates with a record in the request array using the same ordering, from the top to the bottom of the request and response. RequestResponses always includes the same number of records as the request array. RequestResponses both successfully and unsuccessfully processed records. Firehose attempts to process all records in each PutRecordBatch request. A single record failure does not stop the processing of subsequent records.

    A successfully processed record includes a RecordId value, which is a unique value identified for the record. An unsuccessfully processed record includes ErrorCode and ErrorMessage values. ErrorCode reflects the type of error and is one of the following values: ServiceUnavailable or InternalFailure. ErrorMessage provides more detailed information about the error.

    If FailedPutCount is greater than 0 (zero), retry the request. A retry of the entire batch of records is possible; however, we strongly recommend that you inspect the entire response and resend only those records that failed processing. This minimizes duplicate records and also reduces the total bytes sent (and corresponding charges).

    If the PutRecordBatch operation throws a ServiceUnavailableException, back off and retry. If the exception persists, it is possible that the throughput limits have been exceeded for the delivery stream.

    Data records sent to Firehose are stored for 24 hours from the time they are added to a delivery stream as it attempts to send the records to the destination. If the destination is unreachable for more than 24 hours, the data is no longer available.

    ", + "UpdateDestination": "

    Updates the specified destination of the specified delivery stream. Note: Switching between Elasticsearch and other services is not supported. For Elasticsearch destination, you can only update an existing Elasticsearch destination with this operation.

    This operation can be used to change the destination type (for example, to replace the Amazon S3 destination with Amazon Redshift) or change the parameters associated with a given destination (for example, to change the bucket name of the Amazon S3 destination). The update may not occur immediately. The target delivery stream remains active while the configurations are updated, so data writes to the delivery stream can continue during this process. The updated configurations are normally effective within a few minutes.

    If the destination type is the same, Firehose merges the configuration parameters specified in the UpdateDestination request with the destination configuration that already exists on the delivery stream. If any of the parameters are not specified in the update request, then the existing configuration parameters are retained. For example, in the Amazon S3 destination, if EncryptionConfiguration is not specified then the existing EncryptionConfiguration is maintained on the destination.

    If the destination type is not the same, for example, changing the destination from Amazon S3 to Amazon Redshift, Firehose does not merge any parameters. In this case, all parameters must be specified.

    Firehose uses the CurrentDeliveryStreamVersionId to avoid race conditions and conflicting merges. This is a required field in every request and the service only updates the configuration if the existing configuration matches the VersionId. After the update is applied successfully, the VersionId is updated, which can be retrieved with the DescribeDeliveryStream operation. The new VersionId should be uses to set CurrentDeliveryStreamVersionId in the next UpdateDestination operation.

    " + }, + "shapes": { + "AWSKMSKeyARN": { + "base": null, + "refs": { + "KMSEncryptionConfig$AWSKMSKeyARN": "

    The ARN of the encryption key. Must belong to the same region as the destination Amazon S3 bucket.

    " + } + }, + "BooleanObject": { + "base": null, + "refs": { + "CloudWatchLoggingOptions$Enabled": "

    Enables or disables CloudWatch logging.

    ", + "DeliveryStreamDescription$HasMoreDestinations": "

    Indicates whether there are more destinations available to list.

    ", + "ListDeliveryStreamsOutput$HasMoreDeliveryStreams": "

    Indicates whether there are more delivery streams available to list.

    " + } + }, + "BucketARN": { + "base": null, + "refs": { + "S3DestinationConfiguration$BucketARN": "

    The ARN of the S3 bucket.

    ", + "S3DestinationDescription$BucketARN": "

    The ARN of the S3 bucket.

    ", + "S3DestinationUpdate$BucketARN": "

    The ARN of the S3 bucket.

    " + } + }, + "BufferingHints": { + "base": "

    Describes hints for the buffering to perform before delivering data to the destination. Please note that these options are treated as hints, and therefore Firehose may choose to use different values when it is optimal.

    ", + "refs": { + "S3DestinationConfiguration$BufferingHints": "

    The buffering option. If no value is specified, BufferingHints object default values are used.

    ", + "S3DestinationDescription$BufferingHints": "

    The buffering option. If no value is specified, BufferingHints object default values are used.

    ", + "S3DestinationUpdate$BufferingHints": "

    The buffering option. If no value is specified, BufferingHints object default values are used.

    " + } + }, + "CloudWatchLoggingOptions": { + "base": "

    Describes CloudWatch logging options for your delivery stream.

    ", + "refs": { + "ElasticsearchDestinationConfiguration$CloudWatchLoggingOptions": "

    Describes CloudWatch logging options for your delivery stream.

    ", + "ElasticsearchDestinationDescription$CloudWatchLoggingOptions": "

    CloudWatch logging options.

    ", + "ElasticsearchDestinationUpdate$CloudWatchLoggingOptions": "

    Describes CloudWatch logging options for your delivery stream.

    ", + "RedshiftDestinationConfiguration$CloudWatchLoggingOptions": "

    Describes CloudWatch logging options for your delivery stream.

    ", + "RedshiftDestinationDescription$CloudWatchLoggingOptions": "

    Describes CloudWatch logging options for your delivery stream.

    ", + "RedshiftDestinationUpdate$CloudWatchLoggingOptions": "

    Describes CloudWatch logging options for your delivery stream.

    ", + "S3DestinationConfiguration$CloudWatchLoggingOptions": "

    Describes CloudWatch logging options for your delivery stream.

    ", + "S3DestinationDescription$CloudWatchLoggingOptions": "

    Describes CloudWatch logging options for your delivery stream.

    ", + "S3DestinationUpdate$CloudWatchLoggingOptions": "

    Describes CloudWatch logging options for your delivery stream.

    " + } + }, + "ClusterJDBCURL": { + "base": null, + "refs": { + "RedshiftDestinationConfiguration$ClusterJDBCURL": "

    The database connection string.

    ", + "RedshiftDestinationDescription$ClusterJDBCURL": "

    The database connection string.

    ", + "RedshiftDestinationUpdate$ClusterJDBCURL": "

    The database connection string.

    " + } + }, + "CompressionFormat": { + "base": null, + "refs": { + "S3DestinationConfiguration$CompressionFormat": "

    The compression format. If no value is specified, the default is UNCOMPRESSED.

    The compression formats SNAPPY or ZIP cannot be specified for Amazon Redshift destinations because they are not supported by the Amazon Redshift COPY operation that reads from the S3 bucket.

    ", + "S3DestinationDescription$CompressionFormat": "

    The compression format. If no value is specified, the default is NOCOMPRESSION.

    ", + "S3DestinationUpdate$CompressionFormat": "

    The compression format. If no value is specified, the default is NOCOMPRESSION.

    The compression formats SNAPPY or ZIP cannot be specified for Amazon Redshift destinations because they are not supported by the Amazon Redshift COPY operation that reads from the S3 bucket.

    " + } + }, + "ConcurrentModificationException": { + "base": "

    Another modification has already happened. Fetch VersionId again and use it to update the destination.

    ", + "refs": { + } + }, + "CopyCommand": { + "base": "

    Describes a COPY command for Amazon Redshift.

    ", + "refs": { + "RedshiftDestinationConfiguration$CopyCommand": "

    The COPY command.

    ", + "RedshiftDestinationDescription$CopyCommand": "

    The COPY command.

    ", + "RedshiftDestinationUpdate$CopyCommand": "

    The COPY command.

    " + } + }, + "CopyOptions": { + "base": null, + "refs": { + "CopyCommand$CopyOptions": "

    Optional parameters to use with the Amazon Redshift COPY command. For more information, see the \"Optional Parameters\" section of Amazon Redshift COPY command. Some possible examples that would apply to Firehose are as follows.

    delimiter '\\t' lzop; - fields are delimited with \"\\t\" (TAB character) and compressed using lzop.

    delimiter '| - fields are delimited with \"|\" (this is the default delimiter).

    delimiter '|' escape - the delimiter should be escaped.

    fixedwidth 'venueid:3,venuename:25,venuecity:12,venuestate:2,venueseats:6' - fields are fixed width in the source, with each width specified after every column in the table.

    JSON 's3://mybucket/jsonpaths.txt' - data is in JSON format, and the path specified is the format of the data.

    For more examples, see Amazon Redshift COPY command examples.

    " + } + }, + "CreateDeliveryStreamInput": { + "base": "

    Contains the parameters for CreateDeliveryStream.

    ", + "refs": { + } + }, + "CreateDeliveryStreamOutput": { + "base": "

    Contains the output of CreateDeliveryStream.

    ", + "refs": { + } + }, + "Data": { + "base": null, + "refs": { + "Record$Data": "

    The data blob, which is base64-encoded when the blob is serialized. The maximum size of the data blob, before base64-encoding, is 1,000 KB.

    " + } + }, + "DataTableColumns": { + "base": null, + "refs": { + "CopyCommand$DataTableColumns": "

    A comma-separated list of column names.

    " + } + }, + "DataTableName": { + "base": null, + "refs": { + "CopyCommand$DataTableName": "

    The name of the target table. The table must already exist in the database.

    " + } + }, + "DeleteDeliveryStreamInput": { + "base": "

    Contains the parameters for DeleteDeliveryStream.

    ", + "refs": { + } + }, + "DeleteDeliveryStreamOutput": { + "base": "

    Contains the output of DeleteDeliveryStream.

    ", + "refs": { + } + }, + "DeliveryStreamARN": { + "base": null, + "refs": { + "CreateDeliveryStreamOutput$DeliveryStreamARN": "

    The ARN of the delivery stream.

    ", + "DeliveryStreamDescription$DeliveryStreamARN": "

    The Amazon Resource Name (ARN) of the delivery stream.

    " + } + }, + "DeliveryStreamDescription": { + "base": "

    Contains information about a delivery stream.

    ", + "refs": { + "DescribeDeliveryStreamOutput$DeliveryStreamDescription": "

    Information about the delivery stream.

    " + } + }, + "DeliveryStreamName": { + "base": null, + "refs": { + "CreateDeliveryStreamInput$DeliveryStreamName": "

    The name of the delivery stream.

    ", + "DeleteDeliveryStreamInput$DeliveryStreamName": "

    The name of the delivery stream.

    ", + "DeliveryStreamDescription$DeliveryStreamName": "

    The name of the delivery stream.

    ", + "DeliveryStreamNameList$member": null, + "DescribeDeliveryStreamInput$DeliveryStreamName": "

    The name of the delivery stream.

    ", + "ListDeliveryStreamsInput$ExclusiveStartDeliveryStreamName": "

    The name of the delivery stream to start the list with.

    ", + "PutRecordBatchInput$DeliveryStreamName": "

    The name of the delivery stream.

    ", + "PutRecordInput$DeliveryStreamName": "

    The name of the delivery stream.

    ", + "UpdateDestinationInput$DeliveryStreamName": "

    The name of the delivery stream.

    " + } + }, + "DeliveryStreamNameList": { + "base": null, + "refs": { + "ListDeliveryStreamsOutput$DeliveryStreamNames": "

    The names of the delivery streams.

    " + } + }, + "DeliveryStreamStatus": { + "base": null, + "refs": { + "DeliveryStreamDescription$DeliveryStreamStatus": "

    The status of the delivery stream.

    " + } + }, + "DeliveryStreamVersionId": { + "base": null, + "refs": { + "DeliveryStreamDescription$VersionId": "

    Used when calling the UpdateDestination operation. Each time the destination is updated for the delivery stream, the VersionId is changed, and the current VersionId is required when updating the destination. This is so that the service knows it is applying the changes to the correct version of the delivery stream.

    ", + "UpdateDestinationInput$CurrentDeliveryStreamVersionId": "

    Obtain this value from the VersionId result of the DeliveryStreamDescription operation. This value is required, and helps the service to perform conditional operations. For example, if there is a interleaving update and this value is null, then the update destination fails. After the update is successful, the VersionId value is updated. The service then performs a merge of the old configuration with the new configuration.

    " + } + }, + "DescribeDeliveryStreamInput": { + "base": "

    Contains the parameters for DescribeDeliveryStream.

    ", + "refs": { + } + }, + "DescribeDeliveryStreamInputLimit": { + "base": null, + "refs": { + "DescribeDeliveryStreamInput$Limit": "

    The limit on the number of destinations to return. Currently, you can have one destination per delivery stream.

    " + } + }, + "DescribeDeliveryStreamOutput": { + "base": "

    Contains the output of DescribeDeliveryStream.

    ", + "refs": { + } + }, + "DestinationDescription": { + "base": "

    Describes the destination for a delivery stream.

    ", + "refs": { + "DestinationDescriptionList$member": null + } + }, + "DestinationDescriptionList": { + "base": null, + "refs": { + "DeliveryStreamDescription$Destinations": "

    The destinations.

    " + } + }, + "DestinationId": { + "base": null, + "refs": { + "DescribeDeliveryStreamInput$ExclusiveStartDestinationId": "

    Specifies the destination ID to start returning the destination information. Currently Firehose supports one destination per delivery stream.

    ", + "DestinationDescription$DestinationId": "

    The ID of the destination.

    ", + "UpdateDestinationInput$DestinationId": "

    The ID of the destination.

    " + } + }, + "ElasticsearchBufferingHints": { + "base": "

    Describes the buffering to perform before delivering data to the Amazon ES destination.

    ", + "refs": { + "ElasticsearchDestinationConfiguration$BufferingHints": "

    Buffering options. If no value is specified, ElasticsearchBufferingHints object default values are used.

    ", + "ElasticsearchDestinationDescription$BufferingHints": "

    Buffering options.

    ", + "ElasticsearchDestinationUpdate$BufferingHints": "

    Buffering options. If no value is specified, ElasticsearchBufferingHints object default values are used.

    " + } + }, + "ElasticsearchBufferingIntervalInSeconds": { + "base": null, + "refs": { + "ElasticsearchBufferingHints$IntervalInSeconds": "

    Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 300 (5 minutes).

    " + } + }, + "ElasticsearchBufferingSizeInMBs": { + "base": null, + "refs": { + "ElasticsearchBufferingHints$SizeInMBs": "

    Buffer incoming data to the specified size, in MBs, before delivering it to the destination. The default value is 5.

    We recommend setting SizeInMBs to a value greater than the amount of data you typically ingest into the delivery stream in 10 seconds. For example, if you typically ingest data at 1 MB/sec, set SizeInMBs to be 10 MB or higher.

    " + } + }, + "ElasticsearchDestinationConfiguration": { + "base": "

    Describes the configuration of a destination in Amazon ES.

    ", + "refs": { + "CreateDeliveryStreamInput$ElasticsearchDestinationConfiguration": "

    The destination in Amazon ES. This value cannot be specified if Amazon S3 or Amazon Redshift is the desired destination (see restrictions listed above).

    " + } + }, + "ElasticsearchDestinationDescription": { + "base": "

    The destination description in Amazon ES.

    ", + "refs": { + "DestinationDescription$ElasticsearchDestinationDescription": "

    The destination in Amazon ES.

    " + } + }, + "ElasticsearchDestinationUpdate": { + "base": "

    Describes an update for a destination in Amazon ES.

    ", + "refs": { + "UpdateDestinationInput$ElasticsearchDestinationUpdate": "

    Describes an update for a destination in Amazon ES.

    " + } + }, + "ElasticsearchDomainARN": { + "base": null, + "refs": { + "ElasticsearchDestinationConfiguration$DomainARN": "

    The ARN of the Amazon ES domain. The IAM role must have permission for DescribeElasticsearchDomain, DescribeElasticsearchDomains , and DescribeElasticsearchDomainConfig after assuming RoleARN.

    ", + "ElasticsearchDestinationDescription$DomainARN": "

    The ARN of the Amazon ES domain.

    ", + "ElasticsearchDestinationUpdate$DomainARN": "

    The ARN of the Amazon ES domain. The IAM role must have permission for DescribeElasticsearchDomain, DescribeElasticsearchDomains , and DescribeElasticsearchDomainConfig after assuming RoleARN.

    " + } + }, + "ElasticsearchIndexName": { + "base": null, + "refs": { + "ElasticsearchDestinationConfiguration$IndexName": "

    The Elasticsearch index name.

    ", + "ElasticsearchDestinationDescription$IndexName": "

    The Elasticsearch index name.

    ", + "ElasticsearchDestinationUpdate$IndexName": "

    The Elasticsearch index name.

    " + } + }, + "ElasticsearchIndexRotationPeriod": { + "base": null, + "refs": { + "ElasticsearchDestinationConfiguration$IndexRotationPeriod": "

    The Elasticsearch index rotation period. Index rotation appends a timestamp to the IndexName to facilitate expiration of old data. For more information, see Index Rotation for Amazon Elasticsearch Service Destination. Default value is OneDay.

    ", + "ElasticsearchDestinationDescription$IndexRotationPeriod": "

    The Elasticsearch index rotation period

    ", + "ElasticsearchDestinationUpdate$IndexRotationPeriod": "

    The Elasticsearch index rotation period. Index rotation appends a timestamp to the IndexName to facilitate the expiration of old data. For more information, see Index Rotation for Amazon Elasticsearch Service Destination. Default value is OneDay.

    " + } + }, + "ElasticsearchRetryDurationInSeconds": { + "base": null, + "refs": { + "ElasticsearchRetryOptions$DurationInSeconds": "

    After an initial failure to deliver to Amazon ES, the total amount of time during which Firehose re-attempts delivery (including the first attempt). After this time has elapsed, the failed documents are written to Amazon S3. Default value is 300 seconds (5 minutes). A value of 0 (zero) results in no retries.

    " + } + }, + "ElasticsearchRetryOptions": { + "base": "

    Configures retry behavior in the event that Firehose is unable to deliver documents to Amazon ES.

    ", + "refs": { + "ElasticsearchDestinationConfiguration$RetryOptions": "

    Configures retry behavior in the event that Firehose is unable to deliver documents to Amazon ES. Default value is 300 (5 minutes).

    ", + "ElasticsearchDestinationDescription$RetryOptions": "

    Elasticsearch retry options.

    ", + "ElasticsearchDestinationUpdate$RetryOptions": "

    Configures retry behavior in the event that Firehose is unable to deliver documents to Amazon ES. Default value is 300 (5 minutes).

    " + } + }, + "ElasticsearchS3BackupMode": { + "base": null, + "refs": { + "ElasticsearchDestinationConfiguration$S3BackupMode": "

    Defines how documents should be delivered to Amazon S3. When set to FailedDocumentsOnly, Firehose writes any documents that could not be indexed to the configured Amazon S3 destination, with elasticsearch-failed/ appended to the key prefix. When set to AllDocuments, Firehose delivers all incoming records to Amazon S3, and also writes failed documents with elasticsearch-failed/ appended to the prefix. For more information, see Amazon S3 Backup for Amazon Elasticsearch Service Destination. Default value is FailedDocumentsOnly.

    ", + "ElasticsearchDestinationDescription$S3BackupMode": "

    Amazon S3 backup mode.

    " + } + }, + "ElasticsearchTypeName": { + "base": null, + "refs": { + "ElasticsearchDestinationConfiguration$TypeName": "

    The Elasticsearch type name.

    ", + "ElasticsearchDestinationDescription$TypeName": "

    The Elasticsearch type name.

    ", + "ElasticsearchDestinationUpdate$TypeName": "

    The Elasticsearch type name.

    " + } + }, + "EncryptionConfiguration": { + "base": "

    Describes the encryption for a destination in Amazon S3.

    ", + "refs": { + "S3DestinationConfiguration$EncryptionConfiguration": "

    The encryption configuration. If no value is specified, the default is no encryption.

    ", + "S3DestinationDescription$EncryptionConfiguration": "

    The encryption configuration. If no value is specified, the default is no encryption.

    ", + "S3DestinationUpdate$EncryptionConfiguration": "

    The encryption configuration. If no value is specified, the default is no encryption.

    " + } + }, + "ErrorCode": { + "base": null, + "refs": { + "PutRecordBatchResponseEntry$ErrorCode": "

    The error code for an individual record result.

    " + } + }, + "ErrorMessage": { + "base": null, + "refs": { + "ConcurrentModificationException$message": "

    A message that provides information about the error.

    ", + "InvalidArgumentException$message": "

    A message that provides information about the error.

    ", + "LimitExceededException$message": "

    A message that provides information about the error.

    ", + "PutRecordBatchResponseEntry$ErrorMessage": "

    The error message for an individual record result.

    ", + "ResourceInUseException$message": "

    A message that provides information about the error.

    ", + "ResourceNotFoundException$message": "

    A message that provides information about the error.

    ", + "ServiceUnavailableException$message": "

    A message that provides information about the error.

    " + } + }, + "IntervalInSeconds": { + "base": null, + "refs": { + "BufferingHints$IntervalInSeconds": "

    Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 300.

    " + } + }, + "InvalidArgumentException": { + "base": "

    The specified input parameter has an value that is not valid.

    ", + "refs": { + } + }, + "KMSEncryptionConfig": { + "base": "

    Describes an encryption key for a destination in Amazon S3.

    ", + "refs": { + "EncryptionConfiguration$KMSEncryptionConfig": "

    The encryption key.

    " + } + }, + "LimitExceededException": { + "base": "

    You have already reached the limit for a requested resource.

    ", + "refs": { + } + }, + "ListDeliveryStreamsInput": { + "base": "

    Contains the parameters for ListDeliveryStreams.

    ", + "refs": { + } + }, + "ListDeliveryStreamsInputLimit": { + "base": null, + "refs": { + "ListDeliveryStreamsInput$Limit": "

    The maximum number of delivery streams to list.

    " + } + }, + "ListDeliveryStreamsOutput": { + "base": "

    Contains the output of ListDeliveryStreams.

    ", + "refs": { + } + }, + "LogGroupName": { + "base": null, + "refs": { + "CloudWatchLoggingOptions$LogGroupName": "

    The CloudWatch group name for logging. This value is required if Enabled is true.

    " + } + }, + "LogStreamName": { + "base": null, + "refs": { + "CloudWatchLoggingOptions$LogStreamName": "

    The CloudWatch log stream name for logging. This value is required if Enabled is true.

    " + } + }, + "NoEncryptionConfig": { + "base": null, + "refs": { + "EncryptionConfiguration$NoEncryptionConfig": "

    Specifically override existing encryption information to ensure no encryption is used.

    " + } + }, + "NonNegativeIntegerObject": { + "base": null, + "refs": { + "PutRecordBatchOutput$FailedPutCount": "

    The number of unsuccessfully written records.

    " + } + }, + "Password": { + "base": null, + "refs": { + "RedshiftDestinationConfiguration$Password": "

    The user password.

    ", + "RedshiftDestinationUpdate$Password": "

    The user password.

    " + } + }, + "Prefix": { + "base": null, + "refs": { + "S3DestinationConfiguration$Prefix": "

    The \"YYYY/MM/DD/HH\" time format prefix is automatically used for delivered S3 files. You can specify an extra prefix to be added in front of the time format prefix. Note that if the prefix ends with a slash, it appears as a folder in the S3 bucket. For more information, see Amazon S3 Object Name Format in the Amazon Kinesis Firehose Developer Guide.

    ", + "S3DestinationDescription$Prefix": "

    The \"YYYY/MM/DD/HH\" time format prefix is automatically used for delivered S3 files. You can specify an extra prefix to be added in front of the time format prefix. Note that if the prefix ends with a slash, it appears as a folder in the S3 bucket. For more information, see Amazon S3 Object Name Format in the Amazon Kinesis Firehose Developer Guide.

    ", + "S3DestinationUpdate$Prefix": "

    The \"YYYY/MM/DD/HH\" time format prefix is automatically used for delivered S3 files. You can specify an extra prefix to be added in front of the time format prefix. Note that if the prefix ends with a slash, it appears as a folder in the S3 bucket. For more information, see Amazon S3 Object Name Format in the Amazon Kinesis Firehose Developer Guide.

    " + } + }, + "PutRecordBatchInput": { + "base": "

    Contains the parameters for PutRecordBatch.

    ", + "refs": { + } + }, + "PutRecordBatchOutput": { + "base": "

    Contains the output of PutRecordBatch.

    ", + "refs": { + } + }, + "PutRecordBatchRequestEntryList": { + "base": null, + "refs": { + "PutRecordBatchInput$Records": "

    One or more records.

    " + } + }, + "PutRecordBatchResponseEntry": { + "base": "

    Contains the result for an individual record from a PutRecordBatch request. If the record is successfully added to your delivery stream, it receives a record ID. If the record fails to be added to your delivery stream, the result includes an error code and an error message.

    ", + "refs": { + "PutRecordBatchResponseEntryList$member": null + } + }, + "PutRecordBatchResponseEntryList": { + "base": null, + "refs": { + "PutRecordBatchOutput$RequestResponses": "

    The results for the individual records. The index of each element matches the same index in which records were sent.

    " + } + }, + "PutRecordInput": { + "base": "

    Contains the parameters for PutRecord.

    ", + "refs": { + } + }, + "PutRecordOutput": { + "base": "

    Contains the output of PutRecord.

    ", + "refs": { + } + }, + "PutResponseRecordId": { + "base": null, + "refs": { + "PutRecordBatchResponseEntry$RecordId": "

    The ID of the record.

    ", + "PutRecordOutput$RecordId": "

    The ID of the record.

    " + } + }, + "Record": { + "base": "

    The unit of data in a delivery stream.

    ", + "refs": { + "PutRecordBatchRequestEntryList$member": null, + "PutRecordInput$Record": "

    The record.

    " + } + }, + "RedshiftDestinationConfiguration": { + "base": "

    Describes the configuration of a destination in Amazon Redshift.

    ", + "refs": { + "CreateDeliveryStreamInput$RedshiftDestinationConfiguration": "

    The destination in Amazon Redshift. This value cannot be specified if Amazon S3 or Amazon Elasticsearch is the desired destination (see restrictions listed above).

    " + } + }, + "RedshiftDestinationDescription": { + "base": "

    Describes a destination in Amazon Redshift.

    ", + "refs": { + "DestinationDescription$RedshiftDestinationDescription": "

    The destination in Amazon Redshift.

    " + } + }, + "RedshiftDestinationUpdate": { + "base": "

    Describes an update for a destination in Amazon Redshift.

    ", + "refs": { + "UpdateDestinationInput$RedshiftDestinationUpdate": "

    Describes an update for a destination in Amazon Redshift.

    " + } + }, + "RedshiftRetryDurationInSeconds": { + "base": null, + "refs": { + "RedshiftRetryOptions$DurationInSeconds": "

    The length of time during which Firehose retries delivery after a failure, starting from the initial request and including the first attempt. The default value is 3600 seconds (60 minutes). Firehose does not retry if the value of DurationInSeconds is 0 (zero) or if the first delivery attempt takes longer than the current value.

    " + } + }, + "RedshiftRetryOptions": { + "base": "

    Configures retry behavior in the event that Firehose is unable to deliver documents to Amazon Redshift.

    ", + "refs": { + "RedshiftDestinationConfiguration$RetryOptions": "

    Configures retry behavior in the event that Firehose is unable to deliver documents to Amazon Redshift. Default value is 3600 (60 minutes).

    ", + "RedshiftDestinationDescription$RetryOptions": "

    Configures retry behavior in the event that Firehose is unable to deliver documents to Amazon Redshift. Default value is 3600 (60 minutes).

    ", + "RedshiftDestinationUpdate$RetryOptions": "

    Configures retry behavior in the event that Firehose is unable to deliver documents to Amazon Redshift. Default value is 3600 (60 minutes).

    " + } + }, + "ResourceInUseException": { + "base": "

    The resource is already in use and not available for this operation.

    ", + "refs": { + } + }, + "ResourceNotFoundException": { + "base": "

    The specified resource could not be found.

    ", + "refs": { + } + }, + "RoleARN": { + "base": null, + "refs": { + "ElasticsearchDestinationConfiguration$RoleARN": "

    The ARN of the IAM role to be assumed by Firehose for calling the Amazon ES Configuration API and for indexing documents. For more information, see Amazon S3 Bucket Access.

    ", + "ElasticsearchDestinationDescription$RoleARN": "

    The ARN of the AWS credentials.

    ", + "ElasticsearchDestinationUpdate$RoleARN": "

    The ARN of the IAM role to be assumed by Firehose for calling the Amazon ES Configuration API and for indexing documents. For more information, see Amazon S3 Bucket Access.

    ", + "RedshiftDestinationConfiguration$RoleARN": "

    The ARN of the AWS credentials.

    ", + "RedshiftDestinationDescription$RoleARN": "

    The ARN of the AWS credentials.

    ", + "RedshiftDestinationUpdate$RoleARN": "

    The ARN of the AWS credentials.

    ", + "S3DestinationConfiguration$RoleARN": "

    The ARN of the AWS credentials.

    ", + "S3DestinationDescription$RoleARN": "

    The ARN of the AWS credentials.

    ", + "S3DestinationUpdate$RoleARN": "

    The ARN of the AWS credentials.

    " + } + }, + "S3DestinationConfiguration": { + "base": "

    Describes the configuration of a destination in Amazon S3.

    ", + "refs": { + "CreateDeliveryStreamInput$S3DestinationConfiguration": "

    The destination in Amazon S3. This value must be specified if ElasticsearchDestinationConfiguration or RedshiftDestinationConfiguration is specified (see restrictions listed above).

    ", + "ElasticsearchDestinationConfiguration$S3Configuration": null, + "RedshiftDestinationConfiguration$S3Configuration": "

    The S3 configuration for the intermediate location from which Amazon Redshift obtains data. Restrictions are described in the topic for CreateDeliveryStream.

    The compression formats SNAPPY or ZIP cannot be specified in RedshiftDestinationConfiguration.S3Configuration because the Amazon Redshift COPY operation that reads from the S3 bucket doesn't support these compression formats.

    " + } + }, + "S3DestinationDescription": { + "base": "

    Describes a destination in Amazon S3.

    ", + "refs": { + "DestinationDescription$S3DestinationDescription": "

    The Amazon S3 destination.

    ", + "ElasticsearchDestinationDescription$S3DestinationDescription": null, + "RedshiftDestinationDescription$S3DestinationDescription": "

    The Amazon S3 destination.

    " + } + }, + "S3DestinationUpdate": { + "base": "

    Describes an update for a destination in Amazon S3.

    ", + "refs": { + "ElasticsearchDestinationUpdate$S3Update": null, + "RedshiftDestinationUpdate$S3Update": "

    The Amazon S3 destination.

    The compression formats SNAPPY or ZIP cannot be specified in RedshiftDestinationUpdate.S3Update because the Amazon Redshift COPY operation that reads from the S3 bucket doesn't support these compression formats.

    ", + "UpdateDestinationInput$S3DestinationUpdate": "

    Describes an update for a destination in Amazon S3.

    " + } + }, + "ServiceUnavailableException": { + "base": "

    The service is unavailable, back off and retry the operation. If you continue to see the exception, throughput limits for the delivery stream may have been exceeded. For more information about limits and how to request an increase, see Amazon Kinesis Firehose Limits.

    ", + "refs": { + } + }, + "SizeInMBs": { + "base": null, + "refs": { + "BufferingHints$SizeInMBs": "

    Buffer incoming data to the specified size, in MBs, before delivering it to the destination. The default value is 5.

    We recommend setting SizeInMBs to a value greater than the amount of data you typically ingest into the delivery stream in 10 seconds. For example, if you typically ingest data at 1 MB/sec set SizeInMBs to be 10 MB or higher.

    " + } + }, + "Timestamp": { + "base": null, + "refs": { + "DeliveryStreamDescription$CreateTimestamp": "

    The date and time that the delivery stream was created.

    ", + "DeliveryStreamDescription$LastUpdateTimestamp": "

    The date and time that the delivery stream was last updated.

    " + } + }, + "UpdateDestinationInput": { + "base": "

    Contains the parameters for UpdateDestination.

    ", + "refs": { + } + }, + "UpdateDestinationOutput": { + "base": "

    Contains the output of UpdateDestination.

    ", + "refs": { + } + }, + "Username": { + "base": null, + "refs": { + "RedshiftDestinationConfiguration$Username": "

    The name of the user.

    ", + "RedshiftDestinationDescription$Username": "

    The name of the user.

    ", + "RedshiftDestinationUpdate$Username": "

    The name of the user.

    " + } + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/gamelift/2015-10-01/api-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/gamelift/2015-10-01/api-2.json new file mode 100644 index 000000000..2ec85341b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/gamelift/2015-10-01/api-2.json @@ -0,0 +1,2307 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2015-10-01", + "endpointPrefix":"gamelift", + "jsonVersion":"1.1", + "serviceFullName":"Amazon GameLift", + "signatureVersion":"v4", + "targetPrefix":"GameLift", + "protocol":"json" + }, + "operations":{ + "CreateAlias":{ + "name":"CreateAlias", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateAliasInput"}, + "output":{"shape":"CreateAliasOutput"}, + "errors":[ + { + "shape":"UnauthorizedException", + "exception":true + }, + { + "shape":"InvalidRequestException", + "exception":true + }, + { + "shape":"ConflictException", + "exception":true + }, + { + "shape":"InternalServiceException", + "exception":true, + "fault":true + }, + { + "shape":"LimitExceededException", + "exception":true + } + ] + }, + "CreateBuild":{ + "name":"CreateBuild", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateBuildInput"}, + "output":{"shape":"CreateBuildOutput"}, + "errors":[ + { + "shape":"UnauthorizedException", + "exception":true + }, + { + "shape":"InvalidRequestException", + "exception":true + }, + { + "shape":"ConflictException", + "exception":true + }, + { + "shape":"InternalServiceException", + "exception":true, + "fault":true + } + ] + }, + "CreateFleet":{ + "name":"CreateFleet", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateFleetInput"}, + "output":{"shape":"CreateFleetOutput"}, + "errors":[ + { + "shape":"InternalServiceException", + "exception":true, + "fault":true + }, + { + "shape":"NotFoundException", + "exception":true + }, + { + "shape":"ConflictException", + "exception":true + }, + { + "shape":"LimitExceededException", + "exception":true + }, + { + "shape":"InvalidRequestException", + "exception":true + }, + { + "shape":"UnauthorizedException", + "exception":true + } + ] + }, + "CreateGameSession":{ + "name":"CreateGameSession", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateGameSessionInput"}, + "output":{"shape":"CreateGameSessionOutput"}, + "errors":[ + { + "shape":"ConflictException", + "exception":true + }, + { + "shape":"InternalServiceException", + "exception":true, + "fault":true + }, + { + "shape":"UnauthorizedException", + "exception":true + }, + { + "shape":"InvalidFleetStatusException", + "exception":true + }, + { + "shape":"TerminalRoutingStrategyException", + "exception":true + }, + { + "shape":"InvalidRequestException", + "exception":true + }, + { + "shape":"NotFoundException", + "exception":true + }, + { + "shape":"FleetCapacityExceededException", + "exception":true + } + ] + }, + "CreatePlayerSession":{ + "name":"CreatePlayerSession", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreatePlayerSessionInput"}, + "output":{"shape":"CreatePlayerSessionOutput"}, + "errors":[ + { + "shape":"InternalServiceException", + "exception":true, + "fault":true + }, + { + "shape":"UnauthorizedException", + "exception":true + }, + { + "shape":"InvalidGameSessionStatusException", + "exception":true + }, + { + "shape":"GameSessionFullException", + "exception":true + }, + { + "shape":"TerminalRoutingStrategyException", + "exception":true + }, + { + "shape":"InvalidRequestException", + "exception":true + }, + { + "shape":"NotFoundException", + "exception":true + } + ] + }, + "CreatePlayerSessions":{ + "name":"CreatePlayerSessions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreatePlayerSessionsInput"}, + "output":{"shape":"CreatePlayerSessionsOutput"}, + "errors":[ + { + "shape":"InternalServiceException", + "exception":true, + "fault":true + }, + { + "shape":"UnauthorizedException", + "exception":true + }, + { + "shape":"InvalidGameSessionStatusException", + "exception":true + }, + { + "shape":"GameSessionFullException", + "exception":true + }, + { + "shape":"TerminalRoutingStrategyException", + "exception":true + }, + { + "shape":"InvalidRequestException", + "exception":true + }, + { + "shape":"NotFoundException", + "exception":true + } + ] + }, + "DeleteAlias":{ + "name":"DeleteAlias", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteAliasInput"}, + "errors":[ + { + "shape":"UnauthorizedException", + "exception":true + }, + { + "shape":"NotFoundException", + "exception":true + }, + { + "shape":"InvalidRequestException", + "exception":true + }, + { + "shape":"InternalServiceException", + "exception":true, + "fault":true + } + ] + }, + "DeleteBuild":{ + "name":"DeleteBuild", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteBuildInput"}, + "errors":[ + { + "shape":"UnauthorizedException", + "exception":true + }, + { + "shape":"NotFoundException", + "exception":true + }, + { + "shape":"InternalServiceException", + "exception":true, + "fault":true + }, + { + "shape":"InvalidRequestException", + "exception":true + } + ] + }, + "DeleteFleet":{ + "name":"DeleteFleet", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteFleetInput"}, + "errors":[ + { + "shape":"NotFoundException", + "exception":true + }, + { + "shape":"InternalServiceException", + "exception":true, + "fault":true + }, + { + "shape":"InvalidFleetStatusException", + "exception":true + }, + { + "shape":"UnauthorizedException", + "exception":true + }, + { + "shape":"InvalidRequestException", + "exception":true + } + ] + }, + "DeleteScalingPolicy":{ + "name":"DeleteScalingPolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteScalingPolicyInput"}, + "errors":[ + { + "shape":"InternalServiceException", + "exception":true, + "fault":true + }, + { + "shape":"InvalidRequestException", + "exception":true + }, + { + "shape":"UnauthorizedException", + "exception":true + }, + { + "shape":"NotFoundException", + "exception":true + } + ] + }, + "DescribeAlias":{ + "name":"DescribeAlias", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeAliasInput"}, + "output":{"shape":"DescribeAliasOutput"}, + "errors":[ + { + "shape":"UnauthorizedException", + "exception":true + }, + { + "shape":"InvalidRequestException", + "exception":true + }, + { + "shape":"NotFoundException", + "exception":true + }, + { + "shape":"InternalServiceException", + "exception":true, + "fault":true + } + ] + }, + "DescribeBuild":{ + "name":"DescribeBuild", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeBuildInput"}, + "output":{"shape":"DescribeBuildOutput"}, + "errors":[ + { + "shape":"UnauthorizedException", + "exception":true + }, + { + "shape":"InvalidRequestException", + "exception":true + }, + { + "shape":"NotFoundException", + "exception":true + }, + { + "shape":"InternalServiceException", + "exception":true, + "fault":true + } + ] + }, + "DescribeEC2InstanceLimits":{ + "name":"DescribeEC2InstanceLimits", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeEC2InstanceLimitsInput"}, + "output":{"shape":"DescribeEC2InstanceLimitsOutput"}, + "errors":[ + { + "shape":"InvalidRequestException", + "exception":true + }, + { + "shape":"InternalServiceException", + "exception":true, + "fault":true + }, + { + "shape":"UnauthorizedException", + "exception":true + } + ] + }, + "DescribeFleetAttributes":{ + "name":"DescribeFleetAttributes", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeFleetAttributesInput"}, + "output":{"shape":"DescribeFleetAttributesOutput"}, + "errors":[ + { + "shape":"InternalServiceException", + "exception":true, + "fault":true + }, + { + "shape":"NotFoundException", + "exception":true + }, + { + "shape":"InvalidRequestException", + "exception":true + }, + { + "shape":"UnauthorizedException", + "exception":true + } + ] + }, + "DescribeFleetCapacity":{ + "name":"DescribeFleetCapacity", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeFleetCapacityInput"}, + "output":{"shape":"DescribeFleetCapacityOutput"}, + "errors":[ + { + "shape":"InternalServiceException", + "exception":true, + "fault":true + }, + { + "shape":"NotFoundException", + "exception":true + }, + { + "shape":"InvalidRequestException", + "exception":true + }, + { + "shape":"UnauthorizedException", + "exception":true + } + ] + }, + "DescribeFleetEvents":{ + "name":"DescribeFleetEvents", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeFleetEventsInput"}, + "output":{"shape":"DescribeFleetEventsOutput"}, + "errors":[ + { + "shape":"NotFoundException", + "exception":true + }, + { + "shape":"InternalServiceException", + "exception":true, + "fault":true + }, + { + "shape":"UnauthorizedException", + "exception":true + }, + { + "shape":"InvalidRequestException", + "exception":true + } + ] + }, + "DescribeFleetPortSettings":{ + "name":"DescribeFleetPortSettings", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeFleetPortSettingsInput"}, + "output":{"shape":"DescribeFleetPortSettingsOutput"}, + "errors":[ + { + "shape":"InternalServiceException", + "exception":true, + "fault":true + }, + { + "shape":"NotFoundException", + "exception":true + }, + { + "shape":"InvalidRequestException", + "exception":true + }, + { + "shape":"UnauthorizedException", + "exception":true + } + ] + }, + "DescribeFleetUtilization":{ + "name":"DescribeFleetUtilization", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeFleetUtilizationInput"}, + "output":{"shape":"DescribeFleetUtilizationOutput"}, + "errors":[ + { + "shape":"InternalServiceException", + "exception":true, + "fault":true + }, + { + "shape":"NotFoundException", + "exception":true + }, + { + "shape":"InvalidRequestException", + "exception":true + }, + { + "shape":"UnauthorizedException", + "exception":true + } + ] + }, + "DescribeGameSessionDetails":{ + "name":"DescribeGameSessionDetails", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeGameSessionDetailsInput"}, + "output":{"shape":"DescribeGameSessionDetailsOutput"}, + "errors":[ + { + "shape":"InternalServiceException", + "exception":true, + "fault":true + }, + { + "shape":"NotFoundException", + "exception":true + }, + { + "shape":"InvalidRequestException", + "exception":true + }, + { + "shape":"UnauthorizedException", + "exception":true + }, + { + "shape":"TerminalRoutingStrategyException", + "exception":true + } + ] + }, + "DescribeGameSessions":{ + "name":"DescribeGameSessions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeGameSessionsInput"}, + "output":{"shape":"DescribeGameSessionsOutput"}, + "errors":[ + { + "shape":"InternalServiceException", + "exception":true, + "fault":true + }, + { + "shape":"NotFoundException", + "exception":true + }, + { + "shape":"InvalidRequestException", + "exception":true + }, + { + "shape":"UnauthorizedException", + "exception":true + }, + { + "shape":"TerminalRoutingStrategyException", + "exception":true + } + ] + }, + "DescribePlayerSessions":{ + "name":"DescribePlayerSessions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribePlayerSessionsInput"}, + "output":{"shape":"DescribePlayerSessionsOutput"}, + "errors":[ + { + "shape":"InternalServiceException", + "exception":true, + "fault":true + }, + { + "shape":"NotFoundException", + "exception":true + }, + { + "shape":"InvalidRequestException", + "exception":true + }, + { + "shape":"UnauthorizedException", + "exception":true + } + ] + }, + "DescribeRuntimeConfiguration":{ + "name":"DescribeRuntimeConfiguration", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeRuntimeConfigurationInput"}, + "output":{"shape":"DescribeRuntimeConfigurationOutput"}, + "errors":[ + { + "shape":"UnauthorizedException", + "exception":true + }, + { + "shape":"NotFoundException", + "exception":true + }, + { + "shape":"InternalServiceException", + "exception":true, + "fault":true + }, + { + "shape":"InvalidRequestException", + "exception":true + } + ] + }, + "DescribeScalingPolicies":{ + "name":"DescribeScalingPolicies", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeScalingPoliciesInput"}, + "output":{"shape":"DescribeScalingPoliciesOutput"}, + "errors":[ + { + "shape":"InternalServiceException", + "exception":true, + "fault":true + }, + { + "shape":"InvalidRequestException", + "exception":true + }, + { + "shape":"UnauthorizedException", + "exception":true + }, + { + "shape":"NotFoundException", + "exception":true + } + ] + }, + "GetGameSessionLogUrl":{ + "name":"GetGameSessionLogUrl", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetGameSessionLogUrlInput"}, + "output":{"shape":"GetGameSessionLogUrlOutput"}, + "errors":[ + { + "shape":"InternalServiceException", + "exception":true, + "fault":true + }, + { + "shape":"NotFoundException", + "exception":true + }, + { + "shape":"UnauthorizedException", + "exception":true + }, + { + "shape":"InvalidRequestException", + "exception":true + } + ] + }, + "ListAliases":{ + "name":"ListAliases", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListAliasesInput"}, + "output":{"shape":"ListAliasesOutput"}, + "errors":[ + { + "shape":"UnauthorizedException", + "exception":true + }, + { + "shape":"InvalidRequestException", + "exception":true + }, + { + "shape":"InternalServiceException", + "exception":true, + "fault":true + } + ] + }, + "ListBuilds":{ + "name":"ListBuilds", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListBuildsInput"}, + "output":{"shape":"ListBuildsOutput"}, + "errors":[ + { + "shape":"UnauthorizedException", + "exception":true + }, + { + "shape":"InvalidRequestException", + "exception":true + }, + { + "shape":"InternalServiceException", + "exception":true, + "fault":true + } + ] + }, + "ListFleets":{ + "name":"ListFleets", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListFleetsInput"}, + "output":{"shape":"ListFleetsOutput"}, + "errors":[ + { + "shape":"InternalServiceException", + "exception":true, + "fault":true + }, + { + "shape":"NotFoundException", + "exception":true + }, + { + "shape":"InvalidRequestException", + "exception":true + }, + { + "shape":"UnauthorizedException", + "exception":true + } + ] + }, + "PutScalingPolicy":{ + "name":"PutScalingPolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PutScalingPolicyInput"}, + "output":{"shape":"PutScalingPolicyOutput"}, + "errors":[ + { + "shape":"InternalServiceException", + "exception":true, + "fault":true + }, + { + "shape":"InvalidRequestException", + "exception":true + }, + { + "shape":"UnauthorizedException", + "exception":true + }, + { + "shape":"NotFoundException", + "exception":true + } + ] + }, + "RequestUploadCredentials":{ + "name":"RequestUploadCredentials", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RequestUploadCredentialsInput"}, + "output":{"shape":"RequestUploadCredentialsOutput"}, + "errors":[ + { + "shape":"UnauthorizedException", + "exception":true + }, + { + "shape":"InvalidRequestException", + "exception":true + }, + { + "shape":"NotFoundException", + "exception":true + }, + { + "shape":"InternalServiceException", + "exception":true, + "fault":true + } + ] + }, + "ResolveAlias":{ + "name":"ResolveAlias", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ResolveAliasInput"}, + "output":{"shape":"ResolveAliasOutput"}, + "errors":[ + { + "shape":"UnauthorizedException", + "exception":true + }, + { + "shape":"InvalidRequestException", + "exception":true + }, + { + "shape":"NotFoundException", + "exception":true + }, + { + "shape":"TerminalRoutingStrategyException", + "exception":true + }, + { + "shape":"InternalServiceException", + "exception":true, + "fault":true + } + ] + }, + "UpdateAlias":{ + "name":"UpdateAlias", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateAliasInput"}, + "output":{"shape":"UpdateAliasOutput"}, + "errors":[ + { + "shape":"UnauthorizedException", + "exception":true + }, + { + "shape":"InvalidRequestException", + "exception":true + }, + { + "shape":"NotFoundException", + "exception":true + }, + { + "shape":"InternalServiceException", + "exception":true, + "fault":true + } + ] + }, + "UpdateBuild":{ + "name":"UpdateBuild", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateBuildInput"}, + "output":{"shape":"UpdateBuildOutput"}, + "errors":[ + { + "shape":"UnauthorizedException", + "exception":true + }, + { + "shape":"InvalidRequestException", + "exception":true + }, + { + "shape":"NotFoundException", + "exception":true + }, + { + "shape":"InternalServiceException", + "exception":true, + "fault":true + } + ] + }, + "UpdateFleetAttributes":{ + "name":"UpdateFleetAttributes", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateFleetAttributesInput"}, + "output":{"shape":"UpdateFleetAttributesOutput"}, + "errors":[ + { + "shape":"NotFoundException", + "exception":true + }, + { + "shape":"ConflictException", + "exception":true + }, + { + "shape":"InvalidFleetStatusException", + "exception":true + }, + { + "shape":"LimitExceededException", + "exception":true + }, + { + "shape":"InternalServiceException", + "exception":true, + "fault":true + }, + { + "shape":"InvalidRequestException", + "exception":true + }, + { + "shape":"UnauthorizedException", + "exception":true + } + ] + }, + "UpdateFleetCapacity":{ + "name":"UpdateFleetCapacity", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateFleetCapacityInput"}, + "output":{"shape":"UpdateFleetCapacityOutput"}, + "errors":[ + { + "shape":"NotFoundException", + "exception":true + }, + { + "shape":"ConflictException", + "exception":true + }, + { + "shape":"LimitExceededException", + "exception":true + }, + { + "shape":"InvalidFleetStatusException", + "exception":true + }, + { + "shape":"InternalServiceException", + "exception":true, + "fault":true + }, + { + "shape":"InvalidRequestException", + "exception":true + }, + { + "shape":"UnauthorizedException", + "exception":true + } + ] + }, + "UpdateFleetPortSettings":{ + "name":"UpdateFleetPortSettings", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateFleetPortSettingsInput"}, + "output":{"shape":"UpdateFleetPortSettingsOutput"}, + "errors":[ + { + "shape":"NotFoundException", + "exception":true + }, + { + "shape":"ConflictException", + "exception":true + }, + { + "shape":"InvalidFleetStatusException", + "exception":true + }, + { + "shape":"LimitExceededException", + "exception":true + }, + { + "shape":"InternalServiceException", + "exception":true, + "fault":true + }, + { + "shape":"InvalidRequestException", + "exception":true + }, + { + "shape":"UnauthorizedException", + "exception":true + } + ] + }, + "UpdateGameSession":{ + "name":"UpdateGameSession", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateGameSessionInput"}, + "output":{"shape":"UpdateGameSessionOutput"}, + "errors":[ + { + "shape":"NotFoundException", + "exception":true + }, + { + "shape":"ConflictException", + "exception":true + }, + { + "shape":"InternalServiceException", + "exception":true, + "fault":true + }, + { + "shape":"UnauthorizedException", + "exception":true + }, + { + "shape":"InvalidGameSessionStatusException", + "exception":true + }, + { + "shape":"InvalidRequestException", + "exception":true + } + ] + }, + "UpdateRuntimeConfiguration":{ + "name":"UpdateRuntimeConfiguration", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateRuntimeConfigurationInput"}, + "output":{"shape":"UpdateRuntimeConfigurationOutput"}, + "errors":[ + { + "shape":"UnauthorizedException", + "exception":true + }, + { + "shape":"NotFoundException", + "exception":true + }, + { + "shape":"InternalServiceException", + "exception":true, + "fault":true + }, + { + "shape":"InvalidRequestException", + "exception":true + }, + { + "shape":"InvalidFleetStatusException", + "exception":true + } + ] + } + }, + "shapes":{ + "Alias":{ + "type":"structure", + "members":{ + "AliasId":{"shape":"AliasId"}, + "Name":{"shape":"FreeText"}, + "Description":{"shape":"FreeText"}, + "RoutingStrategy":{"shape":"RoutingStrategy"}, + "CreationTime":{"shape":"Timestamp"}, + "LastUpdatedTime":{"shape":"Timestamp"} + } + }, + "AliasId":{ + "type":"string", + "pattern":"^alias-\\S+" + }, + "AliasList":{ + "type":"list", + "member":{"shape":"Alias"} + }, + "AwsCredentials":{ + "type":"structure", + "members":{ + "AccessKeyId":{"shape":"NonEmptyString"}, + "SecretAccessKey":{"shape":"NonEmptyString"}, + "SessionToken":{"shape":"NonEmptyString"} + }, + "sensitive":true + }, + "Build":{ + "type":"structure", + "members":{ + "BuildId":{"shape":"BuildId"}, + "Name":{"shape":"FreeText"}, + "Version":{"shape":"FreeText"}, + "Status":{"shape":"BuildStatus"}, + "SizeOnDisk":{"shape":"PositiveLong"}, + "CreationTime":{"shape":"Timestamp"} + } + }, + "BuildId":{ + "type":"string", + "pattern":"^build-\\S+" + }, + "BuildList":{ + "type":"list", + "member":{"shape":"Build"} + }, + "BuildStatus":{ + "type":"string", + "enum":[ + "INITIALIZED", + "READY", + "FAILED" + ] + }, + "ComparisonOperatorType":{ + "type":"string", + "enum":[ + "GreaterThanOrEqualToThreshold", + "GreaterThanThreshold", + "LessThanThreshold", + "LessThanOrEqualToThreshold" + ] + }, + "ConflictException":{ + "type":"structure", + "members":{ + "Message":{"shape":"NonEmptyString"} + }, + "exception":true + }, + "CreateAliasInput":{ + "type":"structure", + "required":[ + "Name", + "RoutingStrategy" + ], + "members":{ + "Name":{"shape":"NonZeroAndMaxString"}, + "Description":{"shape":"NonZeroAndMaxString"}, + "RoutingStrategy":{"shape":"RoutingStrategy"} + } + }, + "CreateAliasOutput":{ + "type":"structure", + "members":{ + "Alias":{"shape":"Alias"} + } + }, + "CreateBuildInput":{ + "type":"structure", + "members":{ + "Name":{"shape":"NonZeroAndMaxString"}, + "Version":{"shape":"NonZeroAndMaxString"}, + "StorageLocation":{"shape":"S3Location"} + } + }, + "CreateBuildOutput":{ + "type":"structure", + "members":{ + "Build":{"shape":"Build"}, + "UploadCredentials":{"shape":"AwsCredentials"}, + "StorageLocation":{"shape":"S3Location"} + } + }, + "CreateFleetInput":{ + "type":"structure", + "required":[ + "Name", + "BuildId", + "EC2InstanceType" + ], + "members":{ + "Name":{"shape":"NonZeroAndMaxString"}, + "Description":{"shape":"NonZeroAndMaxString"}, + "BuildId":{"shape":"BuildId"}, + "ServerLaunchPath":{"shape":"NonZeroAndMaxString"}, + "ServerLaunchParameters":{"shape":"NonZeroAndMaxString"}, + "LogPaths":{"shape":"StringList"}, + "EC2InstanceType":{"shape":"EC2InstanceType"}, + "EC2InboundPermissions":{"shape":"IpPermissionsList"}, + "NewGameSessionProtectionPolicy":{"shape":"ProtectionPolicy"}, + "RuntimeConfiguration":{"shape":"RuntimeConfiguration"} + } + }, + "CreateFleetOutput":{ + "type":"structure", + "members":{ + "FleetAttributes":{"shape":"FleetAttributes"} + } + }, + "CreateGameSessionInput":{ + "type":"structure", + "required":["MaximumPlayerSessionCount"], + "members":{ + "FleetId":{"shape":"FleetId"}, + "AliasId":{"shape":"AliasId"}, + "MaximumPlayerSessionCount":{"shape":"WholeNumber"}, + "Name":{"shape":"NonZeroAndMaxString"}, + "GameProperties":{"shape":"GamePropertyList"} + } + }, + "CreateGameSessionOutput":{ + "type":"structure", + "members":{ + "GameSession":{"shape":"GameSession"} + } + }, + "CreatePlayerSessionInput":{ + "type":"structure", + "required":[ + "GameSessionId", + "PlayerId" + ], + "members":{ + "GameSessionId":{"shape":"GameSessionId"}, + "PlayerId":{"shape":"NonZeroAndMaxString"} + } + }, + "CreatePlayerSessionOutput":{ + "type":"structure", + "members":{ + "PlayerSession":{"shape":"PlayerSession"} + } + }, + "CreatePlayerSessionsInput":{ + "type":"structure", + "required":[ + "GameSessionId", + "PlayerIds" + ], + "members":{ + "GameSessionId":{"shape":"GameSessionId"}, + "PlayerIds":{"shape":"PlayerIdList"} + } + }, + "CreatePlayerSessionsOutput":{ + "type":"structure", + "members":{ + "PlayerSessions":{"shape":"PlayerSessionList"} + } + }, + "DeleteAliasInput":{ + "type":"structure", + "required":["AliasId"], + "members":{ + "AliasId":{"shape":"AliasId"} + } + }, + "DeleteBuildInput":{ + "type":"structure", + "required":["BuildId"], + "members":{ + "BuildId":{"shape":"BuildId"} + } + }, + "DeleteFleetInput":{ + "type":"structure", + "required":["FleetId"], + "members":{ + "FleetId":{"shape":"FleetId"} + } + }, + "DeleteScalingPolicyInput":{ + "type":"structure", + "required":[ + "Name", + "FleetId" + ], + "members":{ + "Name":{"shape":"NonZeroAndMaxString"}, + "FleetId":{"shape":"FleetId"} + } + }, + "DescribeAliasInput":{ + "type":"structure", + "required":["AliasId"], + "members":{ + "AliasId":{"shape":"AliasId"} + } + }, + "DescribeAliasOutput":{ + "type":"structure", + "members":{ + "Alias":{"shape":"Alias"} + } + }, + "DescribeBuildInput":{ + "type":"structure", + "required":["BuildId"], + "members":{ + "BuildId":{"shape":"BuildId"} + } + }, + "DescribeBuildOutput":{ + "type":"structure", + "members":{ + "Build":{"shape":"Build"} + } + }, + "DescribeEC2InstanceLimitsInput":{ + "type":"structure", + "members":{ + "EC2InstanceType":{"shape":"EC2InstanceType"} + } + }, + "DescribeEC2InstanceLimitsOutput":{ + "type":"structure", + "members":{ + "EC2InstanceLimits":{"shape":"EC2InstanceLimitList"} + } + }, + "DescribeFleetAttributesInput":{ + "type":"structure", + "members":{ + "FleetIds":{"shape":"FleetIdList"}, + "Limit":{"shape":"PositiveInteger"}, + "NextToken":{"shape":"NonZeroAndMaxString"} + } + }, + "DescribeFleetAttributesOutput":{ + "type":"structure", + "members":{ + "FleetAttributes":{"shape":"FleetAttributesList"}, + "NextToken":{"shape":"NonZeroAndMaxString"} + } + }, + "DescribeFleetCapacityInput":{ + "type":"structure", + "members":{ + "FleetIds":{"shape":"FleetIdList"}, + "Limit":{"shape":"PositiveInteger"}, + "NextToken":{"shape":"NonZeroAndMaxString"} + } + }, + "DescribeFleetCapacityOutput":{ + "type":"structure", + "members":{ + "FleetCapacity":{"shape":"FleetCapacityList"}, + "NextToken":{"shape":"NonZeroAndMaxString"} + } + }, + "DescribeFleetEventsInput":{ + "type":"structure", + "required":["FleetId"], + "members":{ + "FleetId":{"shape":"FleetId"}, + "StartTime":{"shape":"Timestamp"}, + "EndTime":{"shape":"Timestamp"}, + "Limit":{"shape":"PositiveInteger"}, + "NextToken":{"shape":"NonZeroAndMaxString"} + } + }, + "DescribeFleetEventsOutput":{ + "type":"structure", + "members":{ + "Events":{"shape":"EventList"}, + "NextToken":{"shape":"NonZeroAndMaxString"} + } + }, + "DescribeFleetPortSettingsInput":{ + "type":"structure", + "required":["FleetId"], + "members":{ + "FleetId":{"shape":"FleetId"} + } + }, + "DescribeFleetPortSettingsOutput":{ + "type":"structure", + "members":{ + "InboundPermissions":{"shape":"IpPermissionsList"} + } + }, + "DescribeFleetUtilizationInput":{ + "type":"structure", + "members":{ + "FleetIds":{"shape":"FleetIdList"}, + "Limit":{"shape":"PositiveInteger"}, + "NextToken":{"shape":"NonZeroAndMaxString"} + } + }, + "DescribeFleetUtilizationOutput":{ + "type":"structure", + "members":{ + "FleetUtilization":{"shape":"FleetUtilizationList"}, + "NextToken":{"shape":"NonZeroAndMaxString"} + } + }, + "DescribeGameSessionDetailsInput":{ + "type":"structure", + "members":{ + "FleetId":{"shape":"FleetId"}, + "GameSessionId":{"shape":"GameSessionId"}, + "AliasId":{"shape":"AliasId"}, + "StatusFilter":{"shape":"NonZeroAndMaxString"}, + "Limit":{"shape":"PositiveInteger"}, + "NextToken":{"shape":"NonZeroAndMaxString"} + } + }, + "DescribeGameSessionDetailsOutput":{ + "type":"structure", + "members":{ + "GameSessionDetails":{"shape":"GameSessionDetailList"}, + "NextToken":{"shape":"NonZeroAndMaxString"} + } + }, + "DescribeGameSessionsInput":{ + "type":"structure", + "members":{ + "FleetId":{"shape":"FleetId"}, + "GameSessionId":{"shape":"GameSessionId"}, + "AliasId":{"shape":"AliasId"}, + "StatusFilter":{"shape":"NonZeroAndMaxString"}, + "Limit":{"shape":"PositiveInteger"}, + "NextToken":{"shape":"NonZeroAndMaxString"} + } + }, + "DescribeGameSessionsOutput":{ + "type":"structure", + "members":{ + "GameSessions":{"shape":"GameSessionList"}, + "NextToken":{"shape":"NonZeroAndMaxString"} + } + }, + "DescribePlayerSessionsInput":{ + "type":"structure", + "members":{ + "GameSessionId":{"shape":"GameSessionId"}, + "PlayerId":{"shape":"NonZeroAndMaxString"}, + "PlayerSessionId":{"shape":"PlayerSessionId"}, + "PlayerSessionStatusFilter":{"shape":"NonZeroAndMaxString"}, + "Limit":{"shape":"PositiveInteger"}, + "NextToken":{"shape":"NonZeroAndMaxString"} + } + }, + "DescribePlayerSessionsOutput":{ + "type":"structure", + "members":{ + "PlayerSessions":{"shape":"PlayerSessionList"}, + "NextToken":{"shape":"NonZeroAndMaxString"} + } + }, + "DescribeRuntimeConfigurationInput":{ + "type":"structure", + "required":["FleetId"], + "members":{ + "FleetId":{"shape":"FleetId"} + } + }, + "DescribeRuntimeConfigurationOutput":{ + "type":"structure", + "members":{ + "RuntimeConfiguration":{"shape":"RuntimeConfiguration"} + } + }, + "DescribeScalingPoliciesInput":{ + "type":"structure", + "required":["FleetId"], + "members":{ + "FleetId":{"shape":"FleetId"}, + "StatusFilter":{"shape":"ScalingStatusType"}, + "Limit":{"shape":"PositiveInteger"}, + "NextToken":{"shape":"NonZeroAndMaxString"} + } + }, + "DescribeScalingPoliciesOutput":{ + "type":"structure", + "members":{ + "ScalingPolicies":{"shape":"ScalingPolicyList"}, + "NextToken":{"shape":"NonZeroAndMaxString"} + } + }, + "Double":{"type":"double"}, + "EC2InstanceCounts":{ + "type":"structure", + "members":{ + "DESIRED":{"shape":"WholeNumber"}, + "MINIMUM":{"shape":"WholeNumber"}, + "MAXIMUM":{"shape":"WholeNumber"}, + "PENDING":{"shape":"WholeNumber"}, + "ACTIVE":{"shape":"WholeNumber"}, + "IDLE":{"shape":"WholeNumber"}, + "TERMINATING":{"shape":"WholeNumber"} + } + }, + "EC2InstanceLimit":{ + "type":"structure", + "members":{ + "EC2InstanceType":{"shape":"EC2InstanceType"}, + "CurrentInstances":{"shape":"WholeNumber"}, + "InstanceLimit":{"shape":"WholeNumber"} + } + }, + "EC2InstanceLimitList":{ + "type":"list", + "member":{"shape":"EC2InstanceLimit"} + }, + "EC2InstanceType":{ + "type":"string", + "enum":[ + "t2.micro", + "t2.small", + "t2.medium", + "t2.large", + "c3.large", + "c3.xlarge", + "c3.2xlarge", + "c3.4xlarge", + "c3.8xlarge", + "c4.large", + "c4.xlarge", + "c4.2xlarge", + "c4.4xlarge", + "c4.8xlarge", + "r3.large", + "r3.xlarge", + "r3.2xlarge", + "r3.4xlarge", + "r3.8xlarge", + "m3.medium", + "m3.large", + "m3.xlarge", + "m3.2xlarge", + "m4.large", + "m4.xlarge", + "m4.2xlarge", + "m4.4xlarge", + "m4.10xlarge" + ] + }, + "Event":{ + "type":"structure", + "members":{ + "EventId":{"shape":"NonZeroAndMaxString"}, + "ResourceId":{"shape":"NonZeroAndMaxString"}, + "EventCode":{"shape":"EventCode"}, + "Message":{"shape":"NonEmptyString"}, + "EventTime":{"shape":"Timestamp"} + } + }, + "EventCode":{ + "type":"string", + "enum":[ + "GENERIC_EVENT", + "FLEET_CREATED", + "FLEET_DELETED", + "FLEET_SCALING_EVENT", + "FLEET_STATE_DOWNLOADING", + "FLEET_STATE_VALIDATING", + "FLEET_STATE_BUILDING", + "FLEET_STATE_ACTIVATING", + "FLEET_STATE_ACTIVE", + "FLEET_STATE_ERROR", + "FLEET_INITIALIZATION_FAILED", + "FLEET_BINARY_DOWNLOAD_FAILED", + "FLEET_VALIDATION_LAUNCH_PATH_NOT_FOUND", + "FLEET_VALIDATION_EXECUTABLE_RUNTIME_FAILURE", + "FLEET_VALIDATION_TIMED_OUT", + "FLEET_ACTIVATION_FAILED", + "FLEET_ACTIVATION_FAILED_NO_INSTANCES", + "FLEET_NEW_GAME_SESSION_PROTECTION_POLICY_UPDATED" + ] + }, + "EventList":{ + "type":"list", + "member":{"shape":"Event"} + }, + "FleetAttributes":{ + "type":"structure", + "members":{ + "FleetId":{"shape":"FleetId"}, + "Description":{"shape":"NonZeroAndMaxString"}, + "Name":{"shape":"NonZeroAndMaxString"}, + "CreationTime":{"shape":"Timestamp"}, + "TerminationTime":{"shape":"Timestamp"}, + "Status":{"shape":"FleetStatus"}, + "BuildId":{"shape":"BuildId"}, + "ServerLaunchPath":{"shape":"NonZeroAndMaxString"}, + "ServerLaunchParameters":{"shape":"NonZeroAndMaxString"}, + "LogPaths":{"shape":"StringList"}, + "NewGameSessionProtectionPolicy":{"shape":"ProtectionPolicy"} + } + }, + "FleetAttributesList":{ + "type":"list", + "member":{"shape":"FleetAttributes"} + }, + "FleetCapacity":{ + "type":"structure", + "members":{ + "FleetId":{"shape":"FleetId"}, + "InstanceType":{"shape":"EC2InstanceType"}, + "InstanceCounts":{"shape":"EC2InstanceCounts"} + } + }, + "FleetCapacityExceededException":{ + "type":"structure", + "members":{ + "Message":{"shape":"NonEmptyString"} + }, + "exception":true + }, + "FleetCapacityList":{ + "type":"list", + "member":{"shape":"FleetCapacity"} + }, + "FleetId":{ + "type":"string", + "pattern":"^fleet-\\S+" + }, + "FleetIdList":{ + "type":"list", + "member":{"shape":"FleetId"}, + "min":1 + }, + "FleetStatus":{ + "type":"string", + "enum":[ + "NEW", + "DOWNLOADING", + "VALIDATING", + "BUILDING", + "ACTIVATING", + "ACTIVE", + "DELETING", + "ERROR", + "TERMINATED" + ] + }, + "FleetUtilization":{ + "type":"structure", + "members":{ + "FleetId":{"shape":"FleetId"}, + "ActiveServerProcessCount":{"shape":"WholeNumber"}, + "ActiveGameSessionCount":{"shape":"WholeNumber"}, + "CurrentPlayerSessionCount":{"shape":"WholeNumber"}, + "MaximumPlayerSessionCount":{"shape":"WholeNumber"} + } + }, + "FleetUtilizationList":{ + "type":"list", + "member":{"shape":"FleetUtilization"} + }, + "FreeText":{"type":"string"}, + "GameProperty":{ + "type":"structure", + "required":[ + "Key", + "Value" + ], + "members":{ + "Key":{"shape":"GamePropertyKey"}, + "Value":{"shape":"GamePropertyValue"} + } + }, + "GamePropertyKey":{ + "type":"string", + "max":32 + }, + "GamePropertyList":{ + "type":"list", + "member":{"shape":"GameProperty"}, + "max":16 + }, + "GamePropertyValue":{ + "type":"string", + "max":96 + }, + "GameSession":{ + "type":"structure", + "members":{ + "GameSessionId":{"shape":"GameSessionId"}, + "Name":{"shape":"NonZeroAndMaxString"}, + "FleetId":{"shape":"FleetId"}, + "CreationTime":{"shape":"Timestamp"}, + "TerminationTime":{"shape":"Timestamp"}, + "CurrentPlayerSessionCount":{"shape":"WholeNumber"}, + "MaximumPlayerSessionCount":{"shape":"WholeNumber"}, + "Status":{"shape":"GameSessionStatus"}, + "GameProperties":{"shape":"GamePropertyList"}, + "IpAddress":{"shape":"IpAddress"}, + "Port":{"shape":"PortNumber"}, + "PlayerSessionCreationPolicy":{"shape":"PlayerSessionCreationPolicy"} + } + }, + "GameSessionDetail":{ + "type":"structure", + "members":{ + "GameSession":{"shape":"GameSession"}, + "ProtectionPolicy":{"shape":"ProtectionPolicy"} + } + }, + "GameSessionDetailList":{ + "type":"list", + "member":{"shape":"GameSessionDetail"} + }, + "GameSessionFullException":{ + "type":"structure", + "members":{ + "Message":{"shape":"NonEmptyString"} + }, + "exception":true + }, + "GameSessionId":{ + "type":"string", + "pattern":"^(gamei-|gsess-)\\S+" + }, + "GameSessionList":{ + "type":"list", + "member":{"shape":"GameSession"} + }, + "GameSessionStatus":{ + "type":"string", + "enum":[ + "ACTIVE", + "ACTIVATING", + "TERMINATED", + "TERMINATING" + ] + }, + "GetGameSessionLogUrlInput":{ + "type":"structure", + "required":["GameSessionId"], + "members":{ + "GameSessionId":{"shape":"GameSessionId"} + } + }, + "GetGameSessionLogUrlOutput":{ + "type":"structure", + "members":{ + "PreSignedUrl":{"shape":"NonZeroAndMaxString"} + } + }, + "Integer":{"type":"integer"}, + "InternalServiceException":{ + "type":"structure", + "members":{ + "Message":{"shape":"NonEmptyString"} + }, + "exception":true, + "fault":true + }, + "InvalidFleetStatusException":{ + "type":"structure", + "members":{ + "Message":{"shape":"NonEmptyString"} + }, + "exception":true + }, + "InvalidGameSessionStatusException":{ + "type":"structure", + "members":{ + "Message":{"shape":"NonEmptyString"} + }, + "exception":true + }, + "InvalidRequestException":{ + "type":"structure", + "members":{ + "Message":{"shape":"NonEmptyString"} + }, + "exception":true + }, + "IpAddress":{"type":"string"}, + "IpPermission":{ + "type":"structure", + "required":[ + "FromPort", + "ToPort", + "IpRange", + "Protocol" + ], + "members":{ + "FromPort":{"shape":"PortNumber"}, + "ToPort":{"shape":"PortNumber"}, + "IpRange":{"shape":"NonBlankString"}, + "Protocol":{"shape":"IpProtocol"} + } + }, + "IpPermissionsList":{ + "type":"list", + "member":{"shape":"IpPermission"}, + "max":50 + }, + "IpProtocol":{ + "type":"string", + "enum":[ + "TCP", + "UDP" + ] + }, + "LimitExceededException":{ + "type":"structure", + "members":{ + "Message":{"shape":"NonEmptyString"} + }, + "exception":true + }, + "ListAliasesInput":{ + "type":"structure", + "members":{ + "RoutingStrategyType":{"shape":"RoutingStrategyType"}, + "Name":{"shape":"NonEmptyString"}, + "Limit":{"shape":"PositiveInteger"}, + "NextToken":{"shape":"NonEmptyString"} + } + }, + "ListAliasesOutput":{ + "type":"structure", + "members":{ + "Aliases":{"shape":"AliasList"}, + "NextToken":{"shape":"NonEmptyString"} + } + }, + "ListBuildsInput":{ + "type":"structure", + "members":{ + "Status":{"shape":"BuildStatus"}, + "Limit":{"shape":"PositiveInteger"}, + "NextToken":{"shape":"NonEmptyString"} + } + }, + "ListBuildsOutput":{ + "type":"structure", + "members":{ + "Builds":{"shape":"BuildList"}, + "NextToken":{"shape":"NonEmptyString"} + } + }, + "ListFleetsInput":{ + "type":"structure", + "members":{ + "BuildId":{"shape":"BuildId"}, + "Limit":{"shape":"PositiveInteger"}, + "NextToken":{"shape":"NonZeroAndMaxString"} + } + }, + "ListFleetsOutput":{ + "type":"structure", + "members":{ + "FleetIds":{"shape":"FleetIdList"}, + "NextToken":{"shape":"NonZeroAndMaxString"} + } + }, + "MetricName":{ + "type":"string", + "enum":[ + "ActivatingGameSessions", + "ActiveGameSessions", + "ActiveInstances", + "AvailablePlayerSessions", + "CurrentPlayerSessions", + "IdleInstances" + ] + }, + "NonBlankString":{ + "type":"string", + "pattern":"[^\\s]+" + }, + "NonEmptyString":{ + "type":"string", + "min":1 + }, + "NonZeroAndMaxString":{ + "type":"string", + "min":1, + "max":1024 + }, + "NotFoundException":{ + "type":"structure", + "members":{ + "Message":{"shape":"NonEmptyString"} + }, + "exception":true + }, + "PlayerIdList":{ + "type":"list", + "member":{"shape":"NonZeroAndMaxString"}, + "min":1, + "max":25 + }, + "PlayerSession":{ + "type":"structure", + "members":{ + "PlayerSessionId":{"shape":"PlayerSessionId"}, + "PlayerId":{"shape":"NonZeroAndMaxString"}, + "GameSessionId":{"shape":"GameSessionId"}, + "FleetId":{"shape":"FleetId"}, + "CreationTime":{"shape":"Timestamp"}, + "TerminationTime":{"shape":"Timestamp"}, + "Status":{"shape":"PlayerSessionStatus"}, + "IpAddress":{"shape":"IpAddress"}, + "Port":{"shape":"PortNumber"} + } + }, + "PlayerSessionCreationPolicy":{ + "type":"string", + "enum":[ + "ACCEPT_ALL", + "DENY_ALL" + ] + }, + "PlayerSessionId":{ + "type":"string", + "pattern":"^psess-\\S+" + }, + "PlayerSessionList":{ + "type":"list", + "member":{"shape":"PlayerSession"} + }, + "PlayerSessionStatus":{ + "type":"string", + "enum":[ + "RESERVED", + "ACTIVE", + "COMPLETED", + "TIMEDOUT" + ] + }, + "PortNumber":{ + "type":"integer", + "min":1025, + "max":60000 + }, + "PositiveInteger":{ + "type":"integer", + "min":1 + }, + "PositiveLong":{ + "type":"long", + "min":1 + }, + "ProtectionPolicy":{ + "type":"string", + "enum":[ + "NoProtection", + "FullProtection" + ] + }, + "PutScalingPolicyInput":{ + "type":"structure", + "required":[ + "Name", + "FleetId", + "ScalingAdjustment", + "ScalingAdjustmentType", + "Threshold", + "ComparisonOperator", + "EvaluationPeriods", + "MetricName" + ], + "members":{ + "Name":{"shape":"NonZeroAndMaxString"}, + "FleetId":{"shape":"FleetId"}, + "ScalingAdjustment":{"shape":"Integer"}, + "ScalingAdjustmentType":{"shape":"ScalingAdjustmentType"}, + "Threshold":{"shape":"Double"}, + "ComparisonOperator":{"shape":"ComparisonOperatorType"}, + "EvaluationPeriods":{"shape":"PositiveInteger"}, + "MetricName":{"shape":"MetricName"} + } + }, + "PutScalingPolicyOutput":{ + "type":"structure", + "members":{ + "Name":{"shape":"NonZeroAndMaxString"} + } + }, + "RequestUploadCredentialsInput":{ + "type":"structure", + "required":["BuildId"], + "members":{ + "BuildId":{"shape":"BuildId"} + } + }, + "RequestUploadCredentialsOutput":{ + "type":"structure", + "members":{ + "UploadCredentials":{"shape":"AwsCredentials"}, + "StorageLocation":{"shape":"S3Location"} + } + }, + "ResolveAliasInput":{ + "type":"structure", + "required":["AliasId"], + "members":{ + "AliasId":{"shape":"AliasId"} + } + }, + "ResolveAliasOutput":{ + "type":"structure", + "members":{ + "FleetId":{"shape":"FleetId"} + } + }, + "RoutingStrategy":{ + "type":"structure", + "members":{ + "Type":{"shape":"RoutingStrategyType"}, + "FleetId":{"shape":"FleetId"}, + "Message":{"shape":"FreeText"} + } + }, + "RoutingStrategyType":{ + "type":"string", + "enum":[ + "SIMPLE", + "TERMINAL" + ] + }, + "RuntimeConfiguration":{ + "type":"structure", + "members":{ + "ServerProcesses":{"shape":"ServerProcessList"} + } + }, + "S3Location":{ + "type":"structure", + "members":{ + "Bucket":{"shape":"NonEmptyString"}, + "Key":{"shape":"NonEmptyString"}, + "RoleArn":{"shape":"NonEmptyString"} + } + }, + "ScalingAdjustmentType":{ + "type":"string", + "enum":[ + "ChangeInCapacity", + "ExactCapacity", + "PercentChangeInCapacity" + ] + }, + "ScalingPolicy":{ + "type":"structure", + "members":{ + "FleetId":{"shape":"FleetId"}, + "Name":{"shape":"NonZeroAndMaxString"}, + "Status":{"shape":"ScalingStatusType"}, + "ScalingAdjustment":{"shape":"Integer"}, + "ScalingAdjustmentType":{"shape":"ScalingAdjustmentType"}, + "ComparisonOperator":{"shape":"ComparisonOperatorType"}, + "Threshold":{"shape":"Double"}, + "EvaluationPeriods":{"shape":"PositiveInteger"}, + "MetricName":{"shape":"MetricName"} + } + }, + "ScalingPolicyList":{ + "type":"list", + "member":{"shape":"ScalingPolicy"} + }, + "ScalingStatusType":{ + "type":"string", + "enum":[ + "ACTIVE", + "UPDATE_REQUESTED", + "UPDATING", + "DELETE_REQUESTED", + "DELETING", + "DELETED", + "ERROR" + ] + }, + "ServerProcess":{ + "type":"structure", + "required":[ + "LaunchPath", + "ConcurrentExecutions" + ], + "members":{ + "LaunchPath":{"shape":"NonZeroAndMaxString"}, + "Parameters":{"shape":"NonZeroAndMaxString"}, + "ConcurrentExecutions":{"shape":"PositiveInteger"} + } + }, + "ServerProcessList":{ + "type":"list", + "member":{"shape":"ServerProcess"}, + "min":1, + "max":50 + }, + "StringList":{ + "type":"list", + "member":{"shape":"NonZeroAndMaxString"} + }, + "TerminalRoutingStrategyException":{ + "type":"structure", + "members":{ + "Message":{"shape":"NonEmptyString"} + }, + "exception":true + }, + "Timestamp":{"type":"timestamp"}, + "UnauthorizedException":{ + "type":"structure", + "members":{ + "Message":{"shape":"NonEmptyString"} + }, + "exception":true + }, + "UpdateAliasInput":{ + "type":"structure", + "required":["AliasId"], + "members":{ + "AliasId":{"shape":"AliasId"}, + "Name":{"shape":"NonZeroAndMaxString"}, + "Description":{"shape":"NonZeroAndMaxString"}, + "RoutingStrategy":{"shape":"RoutingStrategy"} + } + }, + "UpdateAliasOutput":{ + "type":"structure", + "members":{ + "Alias":{"shape":"Alias"} + } + }, + "UpdateBuildInput":{ + "type":"structure", + "required":["BuildId"], + "members":{ + "BuildId":{"shape":"BuildId"}, + "Name":{"shape":"NonZeroAndMaxString"}, + "Version":{"shape":"NonZeroAndMaxString"} + } + }, + "UpdateBuildOutput":{ + "type":"structure", + "members":{ + "Build":{"shape":"Build"} + } + }, + "UpdateFleetAttributesInput":{ + "type":"structure", + "required":["FleetId"], + "members":{ + "FleetId":{"shape":"FleetId"}, + "Name":{"shape":"NonZeroAndMaxString"}, + "Description":{"shape":"NonZeroAndMaxString"}, + "NewGameSessionProtectionPolicy":{"shape":"ProtectionPolicy"} + } + }, + "UpdateFleetAttributesOutput":{ + "type":"structure", + "members":{ + "FleetId":{"shape":"FleetId"} + } + }, + "UpdateFleetCapacityInput":{ + "type":"structure", + "required":["FleetId"], + "members":{ + "FleetId":{"shape":"FleetId"}, + "DesiredInstances":{"shape":"WholeNumber"}, + "MinSize":{"shape":"WholeNumber"}, + "MaxSize":{"shape":"WholeNumber"} + } + }, + "UpdateFleetCapacityOutput":{ + "type":"structure", + "members":{ + "FleetId":{"shape":"FleetId"} + } + }, + "UpdateFleetPortSettingsInput":{ + "type":"structure", + "required":["FleetId"], + "members":{ + "FleetId":{"shape":"FleetId"}, + "InboundPermissionAuthorizations":{"shape":"IpPermissionsList"}, + "InboundPermissionRevocations":{"shape":"IpPermissionsList"} + } + }, + "UpdateFleetPortSettingsOutput":{ + "type":"structure", + "members":{ + "FleetId":{"shape":"FleetId"} + } + }, + "UpdateGameSessionInput":{ + "type":"structure", + "required":["GameSessionId"], + "members":{ + "GameSessionId":{"shape":"GameSessionId"}, + "MaximumPlayerSessionCount":{"shape":"WholeNumber"}, + "Name":{"shape":"NonZeroAndMaxString"}, + "PlayerSessionCreationPolicy":{"shape":"PlayerSessionCreationPolicy"}, + "ProtectionPolicy":{"shape":"ProtectionPolicy"} + } + }, + "UpdateGameSessionOutput":{ + "type":"structure", + "members":{ + "GameSession":{"shape":"GameSession"} + } + }, + "UpdateRuntimeConfigurationInput":{ + "type":"structure", + "required":[ + "FleetId", + "RuntimeConfiguration" + ], + "members":{ + "FleetId":{"shape":"FleetId"}, + "RuntimeConfiguration":{"shape":"RuntimeConfiguration"} + } + }, + "UpdateRuntimeConfigurationOutput":{ + "type":"structure", + "members":{ + "RuntimeConfiguration":{"shape":"RuntimeConfiguration"} + } + }, + "WholeNumber":{ + "type":"integer", + "min":0 + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/gamelift/2015-10-01/docs-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/gamelift/2015-10-01/docs-2.json new file mode 100644 index 000000000..56a26bc13 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/gamelift/2015-10-01/docs-2.json @@ -0,0 +1,1084 @@ +{ + "version": "2.0", + "operations": { + "CreateAlias": "

    Creates an alias for a fleet. You can use an alias to anonymize your fleet by referencing an alias instead of a specific fleet when you create game sessions. Amazon GameLift supports two types of routing strategies for aliases: simple and terminal. Use a simple alias to point to an active fleet. Use a terminal alias to display a message to incoming traffic instead of routing players to an active fleet. This option is useful when a game server is no longer supported but you want to provide better messaging than a standard 404 error.

    To create a fleet alias, specify an alias name, routing strategy, and optional description. If successful, a new alias record is returned, including an alias ID, which you can reference when creating a game session. To reassign the alias to another fleet ID, call UpdateAlias.

    ", + "CreateBuild": "

    Initializes a new build record and generates information required to upload a game build to Amazon GameLift. Once the build record has been created and is in an INITIALIZED state, you can upload your game build.

    Do not use this API action unless you are using your own Amazon Simple Storage Service (Amazon S3) client and need to manually upload your build files. Instead, to create a build, use the CLI command upload-build, which creates a new build record and uploads the build files in one step. (See the Amazon GameLift Developer Guide for more details on the CLI and the upload process.)

    To create a new build, optionally specify a build name and version. This metadata is stored with other properties in the build record and is displayed in the GameLift console (it is not visible to players). If successful, this action returns the newly created build record along with the Amazon S3 storage location and AWS account credentials. Use the location and credentials to upload your game build.

    ", + "CreateFleet": "

    Creates a new fleet to run your game servers. A fleet is a set of Amazon Elastic Compute Cloud (Amazon EC2) instances, each of which can run multiple server processes to host game sessions. You configure a fleet to create instances with certain hardware specifications (see Amazon EC2 Instance Types for more information), and deploy a specified game build to each instance. A newly created fleet passes through several states; once it reaches the ACTIVE state, it can begin hosting game sessions.

    To create a new fleet, provide a fleet name, an EC2 instance type, and a build ID of the game build to deploy. You can also configure the new fleet with the following settings: (1) a runtime configuration describing what server processes to run on each instance in the fleet (required to create fleet), (2) access permissions for inbound traffic, (3) fleet-wide game session protection, and (4) the location of default log files for GameLift to upload and store.

    If the CreateFleet call is successful, Amazon GameLift performs the following tasks:

    • Creates a fleet record and sets the state to NEW (followed by other states as the fleet is activated).
    • Sets the fleet's capacity to 1 \"desired\", which causes GameLift to start one new EC2 instance.
    • Starts launching server processes on the instance. If the fleet is configured to run multiple server processes per instance, GameLift staggers each launch by a few seconds.
    • Begins writing events to the fleet event log, which can be accessed in the GameLift console.
    • Sets the fleet's status to ACTIVE once one server process in the fleet is ready to host a game session.

    After a fleet is created, use the following actions to change fleet properties and configuration:

    • UpdateFleetAttributes -- Update fleet metadata, including name and description.
    • UpdateFleetCapacity -- Increase or decrease the number of instances you want the fleet to maintain.
    • UpdateFleetPortSettings -- Change the IP address and port ranges that allow access to incoming traffic.
    • UpdateRuntimeConfiguration -- Change how server processes are launched in the fleet, including launch path, launch parameters, and the number of concurrent processes.
    ", + "CreateGameSession": "

    Creates a multiplayer game session for players. This action creates a game session record and assigns the new session to an instance in the specified fleet, which initializes a new server process to host the game session. A fleet must be in an ACTIVE state before a game session can be created in it.

    To create a game session, specify either a fleet ID or an alias ID and indicate the maximum number of players the game session allows. You can also provide a name and a set of properties for your game (optional). If successful, a GameSession object is returned containing session properties, including an IP address. By default, newly created game sessions are set to accept adding any new players to the game session. Use UpdateGameSession to change the creation policy.

    ", + "CreatePlayerSession": "

    Adds a player to a game session and creates a player session record. A game session must be in an ACTIVE state, have a creation policy of ALLOW_ALL, and have an open player slot before players can be added to the session.

    To create a player session, specify a game session ID and player ID. If successful, the player is added to the game session and a new PlayerSession object is returned.

    ", + "CreatePlayerSessions": "

    Adds a group of players to a game session. Similar to CreatePlayerSession, this action allows you to add multiple players in a single call, which is useful for games that provide party and/or matchmaking features. A game session must be in an ACTIVE state, have a creation policy of ALLOW_ALL, and have an open player slot before players can be added to the session.

    To create player sessions, specify a game session ID and a list of player IDs. If successful, the players are added to the game session and a set of new PlayerSession objects is returned.

    ", + "DeleteAlias": "

    Deletes an alias. This action removes all record of the alias; game clients attempting to access a server process using the deleted alias receive an error. To delete an alias, specify the alias ID to be deleted.

    ", + "DeleteBuild": "

    Deletes a build. This action permanently deletes the build record and any uploaded build files.

    To delete a build, specify its ID. Deleting a build does not affect the status of any active fleets using the build, but you can no longer create new fleets with the deleted build.

    ", + "DeleteFleet": "

    Deletes everything related to a fleet. Before deleting a fleet, you must set the fleet's desired capacity to zero. See UpdateFleetCapacity.

    This action removes the fleet's resources and the fleet record. Once a fleet is deleted, you can no longer use that fleet.

    ", + "DeleteScalingPolicy": "

    Deletes a fleet scaling policy. This action means that the policy is no longer in force and removes all record of it. To delete a scaling policy, specify both the scaling policy name and the fleet ID it is associated with.

    ", + "DescribeAlias": "

    Retrieves properties for a specified alias. To get the alias, specify an alias ID. If successful, an Alias object is returned.

    ", + "DescribeBuild": "

    Retrieves properties for a build. To get a build record, specify a build ID. If successful, an object containing the build properties is returned.

    ", + "DescribeEC2InstanceLimits": "

    Retrieves the following information for the specified EC2 instance type:

    • maximum number of instances allowed per AWS account (service limit)
    • current usage level for the AWS account

    Service limits vary depending on region. Available regions for GameLift can be found in the AWS Management Console for GameLift (see the drop-down list in the upper right corner).

    ", + "DescribeFleetAttributes": "

    Retrieves fleet properties, including metadata, status, and configuration, for one or more fleets. You can request attributes for all fleets, or specify a list of one or more fleet IDs. When requesting multiple fleets, use the pagination parameters to retrieve results as a set of sequential pages. If successful, a FleetAttributes object is returned for each requested fleet ID. When specifying a list of fleet IDs, attribute objects are returned only for fleets that currently exist.

    Some API actions may limit the number of fleet IDs allowed in one request. If a request exceeds this limit, the request fails and the error message includes the maximum allowed.

    ", + "DescribeFleetCapacity": "

    Retrieves the current status of fleet capacity for one or more fleets. This information includes the number of instances that have been requested for the fleet and the number currently active. You can request capacity for all fleets, or specify a list of one or more fleet IDs. When requesting multiple fleets, use the pagination parameters to retrieve results as a set of sequential pages. If successful, a FleetCapacity object is returned for each requested fleet ID. When specifying a list of fleet IDs, attribute objects are returned only for fleets that currently exist.

    Some API actions may limit the number of fleet IDs allowed in one request. If a request exceeds this limit, the request fails and the error message includes the maximum allowed.

    ", + "DescribeFleetEvents": "

    Retrieves entries from the specified fleet's event log. You can specify a time range to limit the result set. Use the pagination parameters to retrieve results as a set of sequential pages. If successful, a collection of event log entries matching the request are returned.

    ", + "DescribeFleetPortSettings": "

    Retrieves the inbound connection permissions for a fleet. Connection permissions include a range of IP addresses and port settings that incoming traffic can use to access server processes in the fleet. To get a fleet's inbound connection permissions, specify a fleet ID. If successful, a collection of IpPermission objects is returned for the requested fleet ID. If the requested fleet has been deleted, the result set is empty.

    ", + "DescribeFleetUtilization": "

    Retrieves utilization statistics for one or more fleets. You can request utilization data for all fleets, or specify a list of one or more fleet IDs. When requesting multiple fleets, use the pagination parameters to retrieve results as a set of sequential pages. If successful, a FleetUtilization object is returned for each requested fleet ID. When specifying a list of fleet IDs, utilization objects are returned only for fleets that currently exist.

    Some API actions may limit the number of fleet IDs allowed in one request. If a request exceeds this limit, the request fails and the error message includes the maximum allowed.

    ", + "DescribeGameSessionDetails": "

    Retrieves properties, including the protection policy in force, for one or more game sessions. This action can be used in several ways: (1) provide a GameSessionId to request details for a specific game session; (2) provide either a FleetId or an AliasId to request properties for all game sessions running on a fleet.

    To get game session record(s), specify just one of the following: game session ID, fleet ID, or alias ID. You can filter this request by game session status. Use the pagination parameters to retrieve results as a set of sequential pages. If successful, a GameSessionDetail object is returned for each session matching the request.

    ", + "DescribeGameSessions": "

    Retrieves properties for one or more game sessions. This action can be used in several ways: (1) provide a GameSessionId to request properties for a specific game session; (2) provide a FleetId or an AliasId to request properties for all game sessions running on a fleet.

    To get game session record(s), specify just one of the following: game session ID, fleet ID, or alias ID. You can filter this request by game session status. Use the pagination parameters to retrieve results as a set of sequential pages. If successful, a GameSession object is returned for each session matching the request.

    ", + "DescribePlayerSessions": "

    Retrieves properties for one or more player sessions. This action can be used in several ways: (1) provide a PlayerSessionId parameter to request properties for a specific player session; (2) provide a GameSessionId parameter to request properties for all player sessions in the specified game session; (3) provide a PlayerId parameter to request properties for all player sessions of a specified player.

    To get game session record(s), specify only one of the following: a player session ID, a game session ID, or a player ID. You can filter this request by player session status. Use the pagination parameters to retrieve results as a set of sequential pages. If successful, a PlayerSession object is returned for each session matching the request.

    ", + "DescribeRuntimeConfiguration": "

    Retrieves the current runtime configuration for the specified fleet. The runtime configuration tells GameLift how to launch server processes on instances in the fleet.

    ", + "DescribeScalingPolicies": "

    Retrieves all scaling policies applied to a fleet.

    To get a fleet's scaling policies, specify the fleet ID. You can filter this request by policy status, such as to retrieve only active scaling policies. Use the pagination parameters to retrieve results as a set of sequential pages. If successful, set of ScalingPolicy objects is returned for the fleet.

    ", + "GetGameSessionLogUrl": "

    Retrieves the location of stored game session logs for a specified game session. When a game session is terminated, Amazon GameLift automatically stores the logs in Amazon S3. Use this URL to download the logs.

    See the AWS Service Limits page for maximum log file sizes. Log files that exceed this limit are not saved.

    ", + "ListAliases": "

    Retrieves a collection of alias records for this AWS account. You can filter the result set by alias name and/or routing strategy type. Use the pagination parameters to retrieve results in sequential pages.

    Aliases are not listed in any particular order.

    ", + "ListBuilds": "

    Retrieves build records for all builds associated with the AWS account in use. You can limit results to builds in a specific state using the Status parameter. Use the pagination parameters to retrieve results in a set of sequential pages.

    Build records are not listed in any particular order.

    ", + "ListFleets": "

    Retrieves a collection of fleet records for this AWS account. You can filter the result set by build ID. Use the pagination parameters to retrieve results in sequential pages.

    Fleet records are not listed in any particular order.

    ", + "PutScalingPolicy": "

    Creates or updates a scaling policy for a fleet. An active scaling policy prompts Amazon GameLift to track a certain metric for a fleet and automatically change the fleet's capacity in specific circumstances. Each scaling policy contains one rule statement. Fleets can have multiple scaling policies in force simultaneously.

    A scaling policy rule statement has the following structure:

    If [MetricName] is [ComparisonOperator] [Threshold] for [EvaluationPeriods] minutes, then [ScalingAdjustmentType] to/by [ScalingAdjustment].

    For example, this policy: \"If the number of idle instances exceeds 20 for more than 15 minutes, then reduce the fleet capacity by 10 instances\" could be implemented as the following rule statement:

    If [IdleInstances] is [GreaterThanOrEqualToThreshold] [20] for [15] minutes, then [ChangeInCapacity] by [-10].

    To create or update a scaling policy, specify a unique combination of name and fleet ID, and set the rule values. All parameters for this action are required. If successful, the policy name is returned. Scaling policies cannot be suspended or made inactive. To stop enforcing a scaling policy, call DeleteScalingPolicy.

    ", + "RequestUploadCredentials": "

    Retrieves a fresh set of upload credentials and the assigned Amazon S3 storage location for a specific build. Valid credentials are required to upload your game build files to Amazon S3.

    Call this action only if you need credentials for a build created with CreateBuild. This is a rare situation; in most cases, builds are created using the CLI command upload-build, which creates a build record and also uploads build files.

    Upload credentials are returned when you create the build, but they have a limited lifespan. You can get fresh credentials and use them to re-upload game files until the state of that build changes to READY. Once this happens, you must create a brand new build.

    ", + "ResolveAlias": "

    Retrieves the fleet ID that a specified alias is currently pointing to.

    ", + "UpdateAlias": "

    Updates properties for an alias. To update properties, specify the alias ID to be updated and provide the information to be changed. To reassign an alias to another fleet, provide an updated routing strategy. If successful, the updated alias record is returned.

    ", + "UpdateBuild": "

    Updates metadata in a build record, including the build name and version. To update the metadata, specify the build ID to update and provide the new values. If successful, a build object containing the updated metadata is returned.

    ", + "UpdateFleetAttributes": "

    Updates fleet properties, including name and description, for a fleet. To update metadata, specify the fleet ID and the property values you want to change. If successful, the fleet ID for the updated fleet is returned.

    ", + "UpdateFleetCapacity": "

    Updates capacity settings for a fleet. Use this action to specify the number of EC2 instances (hosts) that you want this fleet to contain. Before calling this action, you may want to call DescribeEC2InstanceLimits to get the maximum capacity based on the fleet's EC2 instance type.

    If you're using autoscaling (see PutScalingPolicy), you may want to specify a minimum and/or maximum capacity. If you don't provide these, autoscaling can set capacity anywhere between zero and the service limits.

    To update fleet capacity, specify the fleet ID and the number of instances you want the fleet to host. If successful, Amazon GameLift starts or terminates instances so that the fleet's active instance count matches the desired instance count. You can view a fleet's current capacity information by calling DescribeFleetCapacity. If the desired instance count is higher than the instance type's limit, the \"Limit Exceeded\" exception occurs.

    ", + "UpdateFleetPortSettings": "

    Updates port settings for a fleet. To update settings, specify the fleet ID to be updated and list the permissions you want to update. List the permissions you want to add in InboundPermissionAuthorizations, and permissions you want to remove in InboundPermissionRevocations. Permissions to be removed must match existing fleet permissions. If successful, the fleet ID for the updated fleet is returned.

    ", + "UpdateGameSession": "

    Updates game session properties. This includes the session name, maximum player count, protection policy, which controls whether or not an active game session can be terminated during a scale-down event, and the player session creation policy, which controls whether or not new players can join the session. To update a game session, specify the game session ID and the values you want to change. If successful, an updated GameSession object is returned.

    ", + "UpdateRuntimeConfiguration": "

    Updates the current runtime configuration for the specified fleet, which tells GameLift how to launch server processes on instances in the fleet. You can update a fleet's runtime configuration at any time after the fleet is created; it does not need to be in an ACTIVE state.

    To update runtime configuration, specify the fleet ID and provide a RuntimeConfiguration object with the updated collection of server process configurations.

    Each instance in a GameLift fleet checks regularly for an updated runtime configuration and changes how it launches server processes to comply with the latest version. Existing server processes are not affected by the update; they continue to run until they end, while GameLift simply adds new server processes to fit the current runtime configuration. As a result, the runtime configuration changes are applied gradually as existing processes shut down and new processes are launched in GameLift's normal process recycling activity.

    " + }, + "service": "Amazon GameLift Service

    Welcome to the Amazon GameLift API Reference. Amazon GameLift is a managed Amazon Web Services (AWS) service for developers who need a scalable, server-based solution for multiplayer games. Amazon GameLift provides setup and deployment of game servers, and handles infrastructure scaling and session management.

    This reference describes the low-level service API for GameLift. You can call this API directly or use the AWS SDK for your preferred language. The AWS SDK includes a set of high-level GameLift actions multiplayer game sessions. Alternatively, you can use the AWS command-line interface (CLI) tool, which includes commands for GameLift. For administrative actions, you can also use the Amazon GameLift console.

    More Resources

    Manage Games and Players Through GameLift

    Call these actions from your game clients and/or services to create and manage multiplayer game sessions and player sessions.

    Set Up and Manage Game Servers

    Use these administrative actions to configure GameLift to host your game servers. When setting up GameLift, you'll need to (1) configure a build for your game and upload build files, and (2) set up one or more fleets to host game sessions. Once you've created and activated a fleet, you can assign aliases to it, scale capacity, track performance and utilization, etc.

    To view changes to the API, see the GameLift Document History page.

    ", + "shapes": { + "Alias": { + "base": "

    Properties describing a fleet alias.

    ", + "refs": { + "AliasList$member": null, + "CreateAliasOutput$Alias": "

    Object containing the newly created alias record.

    ", + "DescribeAliasOutput$Alias": "

    Object containing the requested alias.

    ", + "UpdateAliasOutput$Alias": "

    Object containing the updated alias configuration.

    " + } + }, + "AliasId": { + "base": null, + "refs": { + "Alias$AliasId": "

    Unique identifier for a fleet alias.

    ", + "CreateGameSessionInput$AliasId": "

    Unique identifier for a fleet alias. Each request must reference either a fleet ID or alias ID, but not both.

    ", + "DeleteAliasInput$AliasId": "

    Unique identifier for a fleet alias. Specify the alias you want to delete.

    ", + "DescribeAliasInput$AliasId": "

    Unique identifier for a fleet alias. Specify the alias you want to retrieve.

    ", + "DescribeGameSessionDetailsInput$AliasId": "

    Unique identifier for a fleet alias. Specify an alias to retrieve information on all game sessions active on the fleet.

    ", + "DescribeGameSessionsInput$AliasId": "

    Unique identifier for a fleet alias. Specify an alias to retrieve information on all game sessions active on the fleet.

    ", + "ResolveAliasInput$AliasId": "

    Unique identifier for the alias you want to resolve.

    ", + "UpdateAliasInput$AliasId": "

    Unique identifier for a fleet alias. Specify the alias you want to update.

    " + } + }, + "AliasList": { + "base": null, + "refs": { + "ListAliasesOutput$Aliases": "

    Collection of alias records that match the list request.

    " + } + }, + "AwsCredentials": { + "base": "

    AWS access credentials required to upload game build files to Amazon GameLift. These credentials are generated with CreateBuild, and are valid for a limited time. If they expire before you upload your game build, get a new set by calling RequestUploadCredentials.

    ", + "refs": { + "CreateBuildOutput$UploadCredentials": "

    AWS credentials required when uploading a game build to the storage location. These credentials have a limited lifespan and are valid only for the build they were issued for. If you need to get fresh credentials, call RequestUploadCredentials.

    ", + "RequestUploadCredentialsOutput$UploadCredentials": "

    AWS credentials required when uploading a game build to the storage location. These credentials have a limited lifespan and are valid only for the build they were issued for.

    " + } + }, + "Build": { + "base": "

    Properties describing a game build.

    ", + "refs": { + "BuildList$member": null, + "CreateBuildOutput$Build": "

    Set of properties for the newly created build.

    ", + "DescribeBuildOutput$Build": "

    Set of properties describing the requested build.

    ", + "UpdateBuildOutput$Build": "

    Object containing the updated build record.

    " + } + }, + "BuildId": { + "base": null, + "refs": { + "Build$BuildId": "

    Unique identifier for a build.

    ", + "CreateFleetInput$BuildId": "

    Unique identifier of the build to be deployed on the new fleet. The build must have been successfully uploaded to GameLift and be in a READY state. This fleet setting cannot be changed once the fleet is created.

    ", + "DeleteBuildInput$BuildId": "

    Unique identifier for the build you want to delete.

    ", + "DescribeBuildInput$BuildId": "

    Unique identifier of the build that you want to retrieve properties for.

    ", + "FleetAttributes$BuildId": "

    Unique identifier for a build.

    ", + "ListFleetsInput$BuildId": "

    Unique identifier of the build to return fleets for. Use this parameter to return only fleets using the specified build. To retrieve all fleets, leave this parameter empty.

    ", + "RequestUploadCredentialsInput$BuildId": "

    Unique identifier for the build you want to get credentials for.

    ", + "UpdateBuildInput$BuildId": "

    Unique identifier of the build you want to update.

    " + } + }, + "BuildList": { + "base": null, + "refs": { + "ListBuildsOutput$Builds": "

    Collection of build records that match the request.

    " + } + }, + "BuildStatus": { + "base": null, + "refs": { + "Build$Status": "

    Current status of the build. Possible build states include the following:

    • INITIALIZED – A new build has been defined, but no files have been uploaded. You cannot create fleets for builds that are in this state. When a build is successfully created, the build state is set to this value.
    • READY – The game build has been successfully uploaded. You can now create new fleets for this build.
    • FAILED – The game build upload failed. You cannot create new fleets for this build.

    ", + "ListBuildsInput$Status": "

    Build state to filter results by. To retrieve all builds, leave this parameter empty. Possible build states include the following:

    • INITIALIZED – A new build has been defined, but no files have been uploaded. You cannot create fleets for builds that are in this state. When a build is successfully created, the build state is set to this value.
    • READY – The game build has been successfully uploaded. You can now create new fleets for this build.
    • FAILED – The game build upload failed. You cannot create new fleets for this build.

    " + } + }, + "ComparisonOperatorType": { + "base": null, + "refs": { + "PutScalingPolicyInput$ComparisonOperator": "

    Comparison operator to use when measuring the metric against the threshold value.

    ", + "ScalingPolicy$ComparisonOperator": "

    Comparison operator to use when measuring a metric against the threshold value.

    " + } + }, + "ConflictException": { + "base": "

    The requested operation would cause a conflict with the current state of a service resource associated with the request. Resolve the conflict before retrying this request.

    ", + "refs": { + } + }, + "CreateAliasInput": { + "base": "

    Represents the input for a request action.

    ", + "refs": { + } + }, + "CreateAliasOutput": { + "base": "

    Represents the returned data in response to a request action.

    ", + "refs": { + } + }, + "CreateBuildInput": { + "base": "

    Represents the input for a request action.

    ", + "refs": { + } + }, + "CreateBuildOutput": { + "base": "

    Represents the returned data in response to a request action.

    ", + "refs": { + } + }, + "CreateFleetInput": { + "base": "

    Represents the input for a request action.

    ", + "refs": { + } + }, + "CreateFleetOutput": { + "base": "

    Represents the returned data in response to a request action.

    ", + "refs": { + } + }, + "CreateGameSessionInput": { + "base": "

    Represents the input for a request action.

    ", + "refs": { + } + }, + "CreateGameSessionOutput": { + "base": "

    Represents the returned data in response to a request action.

    ", + "refs": { + } + }, + "CreatePlayerSessionInput": { + "base": "

    Represents the input for a request action.

    ", + "refs": { + } + }, + "CreatePlayerSessionOutput": { + "base": "

    Represents the returned data in response to a request action.

    ", + "refs": { + } + }, + "CreatePlayerSessionsInput": { + "base": "

    Represents the input for a request action.

    ", + "refs": { + } + }, + "CreatePlayerSessionsOutput": { + "base": "

    Represents the returned data in response to a request action.

    ", + "refs": { + } + }, + "DeleteAliasInput": { + "base": "

    Represents the input for a request action.

    ", + "refs": { + } + }, + "DeleteBuildInput": { + "base": "

    Represents the input for a request action.

    ", + "refs": { + } + }, + "DeleteFleetInput": { + "base": "

    Represents the input for a request action.

    ", + "refs": { + } + }, + "DeleteScalingPolicyInput": { + "base": "

    Represents the input for a request action.

    ", + "refs": { + } + }, + "DescribeAliasInput": { + "base": "

    Represents the input for a request action.

    ", + "refs": { + } + }, + "DescribeAliasOutput": { + "base": "

    Represents the returned data in response to a request action.

    ", + "refs": { + } + }, + "DescribeBuildInput": { + "base": "

    Represents the input for a request action.

    ", + "refs": { + } + }, + "DescribeBuildOutput": { + "base": "

    Represents the returned data in response to a request action.

    ", + "refs": { + } + }, + "DescribeEC2InstanceLimitsInput": { + "base": "

    Represents the input for a request action.

    ", + "refs": { + } + }, + "DescribeEC2InstanceLimitsOutput": { + "base": "

    Represents the returned data in response to a request action.

    ", + "refs": { + } + }, + "DescribeFleetAttributesInput": { + "base": "

    Represents the input for a request action.

    ", + "refs": { + } + }, + "DescribeFleetAttributesOutput": { + "base": "

    Represents the returned data in response to a request action.

    ", + "refs": { + } + }, + "DescribeFleetCapacityInput": { + "base": "

    Represents the input for a request action.

    ", + "refs": { + } + }, + "DescribeFleetCapacityOutput": { + "base": "

    Represents the returned data in response to a request action.

    ", + "refs": { + } + }, + "DescribeFleetEventsInput": { + "base": "

    Represents the input for a request action.

    ", + "refs": { + } + }, + "DescribeFleetEventsOutput": { + "base": "

    Represents the returned data in response to a request action.

    ", + "refs": { + } + }, + "DescribeFleetPortSettingsInput": { + "base": "

    Represents the input for a request action.

    ", + "refs": { + } + }, + "DescribeFleetPortSettingsOutput": { + "base": "

    Represents the returned data in response to a request action.

    ", + "refs": { + } + }, + "DescribeFleetUtilizationInput": { + "base": "

    Represents the input for a request action.

    ", + "refs": { + } + }, + "DescribeFleetUtilizationOutput": { + "base": "

    Represents the returned data in response to a request action.

    ", + "refs": { + } + }, + "DescribeGameSessionDetailsInput": { + "base": "

    Represents the input for a request action.

    ", + "refs": { + } + }, + "DescribeGameSessionDetailsOutput": { + "base": "

    Represents the returned data in response to a request action.

    ", + "refs": { + } + }, + "DescribeGameSessionsInput": { + "base": "

    Represents the input for a request action.

    ", + "refs": { + } + }, + "DescribeGameSessionsOutput": { + "base": "

    Represents the returned data in response to a request action.

    ", + "refs": { + } + }, + "DescribePlayerSessionsInput": { + "base": "

    Represents the input for a request action.

    ", + "refs": { + } + }, + "DescribePlayerSessionsOutput": { + "base": "

    Represents the returned data in response to a request action.

    ", + "refs": { + } + }, + "DescribeRuntimeConfigurationInput": { + "base": "

    Represents the input for a request action.

    ", + "refs": { + } + }, + "DescribeRuntimeConfigurationOutput": { + "base": "

    Represents the returned data in response to a request action.

    ", + "refs": { + } + }, + "DescribeScalingPoliciesInput": { + "base": "

    Represents the input for a request action.

    ", + "refs": { + } + }, + "DescribeScalingPoliciesOutput": { + "base": "

    Represents the returned data in response to a request action.

    ", + "refs": { + } + }, + "Double": { + "base": null, + "refs": { + "PutScalingPolicyInput$Threshold": "

    Metric value used to trigger a scaling event.

    ", + "ScalingPolicy$Threshold": "

    Metric value used to trigger a scaling event.

    " + } + }, + "EC2InstanceCounts": { + "base": "

    Current status of fleet capacity. The number of active instances should match or be in the process of matching the number of desired instances. Pending and terminating counts are non-zero only if fleet capacity is adjusting to an UpdateFleetCapacity request, or if access to resources is temporarily affected.

    ", + "refs": { + "FleetCapacity$InstanceCounts": "

    Current status of fleet capacity.

    " + } + }, + "EC2InstanceLimit": { + "base": "

    Maximum number of instances allowed based on the Amazon Elastic Compute Cloud (Amazon EC2) instance type. Instance limits can be retrieved by calling DescribeEC2InstanceLimits.

    ", + "refs": { + "EC2InstanceLimitList$member": null + } + }, + "EC2InstanceLimitList": { + "base": null, + "refs": { + "DescribeEC2InstanceLimitsOutput$EC2InstanceLimits": "

    Object containing the maximum number of instances for the specified instance type.

    " + } + }, + "EC2InstanceType": { + "base": null, + "refs": { + "CreateFleetInput$EC2InstanceType": "

    Name of an EC2 instance type that is supported in Amazon GameLift. A fleet instance type determines the computing resources of each instance in the fleet, including CPU, memory, storage, and networking capacity. GameLift supports the following EC2 instance types. See Amazon EC2 Instance Types for detailed descriptions.

    ", + "DescribeEC2InstanceLimitsInput$EC2InstanceType": "

    Name of an EC2 instance type that is supported in Amazon GameLift. A fleet instance type determines the computing resources of each instance in the fleet, including CPU, memory, storage, and networking capacity. GameLift supports the following EC2 instance types. See Amazon EC2 Instance Types for detailed descriptions. Leave this parameter blank to retrieve limits for all types.

    ", + "EC2InstanceLimit$EC2InstanceType": "

    Name of an EC2 instance type that is supported in Amazon GameLift. A fleet instance type determines the computing resources of each instance in the fleet, including CPU, memory, storage, and networking capacity. GameLift supports the following EC2 instance types. See Amazon EC2 Instance Types for detailed descriptions.

    ", + "FleetCapacity$InstanceType": "

    Name of an EC2 instance type that is supported in Amazon GameLift. A fleet instance type determines the computing resources of each instance in the fleet, including CPU, memory, storage, and networking capacity. GameLift supports the following EC2 instance types. See Amazon EC2 Instance Types for detailed descriptions.

    " + } + }, + "Event": { + "base": "

    Log entry describing an event involving an Amazon GameLift resource (such as a fleet).

    ", + "refs": { + "EventList$member": null + } + }, + "EventCode": { + "base": null, + "refs": { + "Event$EventCode": "

    Type of event being logged.

    " + } + }, + "EventList": { + "base": null, + "refs": { + "DescribeFleetEventsOutput$Events": "

    Collection of objects containing event log entries for the specified fleet.

    " + } + }, + "FleetAttributes": { + "base": "

    General properties describing a fleet.

    ", + "refs": { + "CreateFleetOutput$FleetAttributes": "

    Properties for the newly created fleet.

    ", + "FleetAttributesList$member": null + } + }, + "FleetAttributesList": { + "base": null, + "refs": { + "DescribeFleetAttributesOutput$FleetAttributes": "

    Collection of objects containing attribute metadata for each requested fleet ID.

    " + } + }, + "FleetCapacity": { + "base": "

    Information about the fleet's capacity. Fleet capacity is measured in EC2 instances. By default, new fleets have a capacity of one instance, but can be updated as needed. The maximum number of instances for a fleet is determined by the fleet's instance type.

    ", + "refs": { + "FleetCapacityList$member": null + } + }, + "FleetCapacityExceededException": { + "base": "

    The specified fleet has no available instances to fulfill a request to create a new game session. Such requests should only be retried once the fleet capacity has been increased.

    ", + "refs": { + } + }, + "FleetCapacityList": { + "base": null, + "refs": { + "DescribeFleetCapacityOutput$FleetCapacity": "

    Collection of objects containing capacity information for each requested fleet ID. Leave this parameter empty to retrieve capacity information for all fleets.

    " + } + }, + "FleetId": { + "base": null, + "refs": { + "CreateGameSessionInput$FleetId": "

    Unique identifier for a fleet. Each request must reference either a fleet ID or alias ID, but not both.

    ", + "DeleteFleetInput$FleetId": "

    Unique identifier for the fleet you want to delete.

    ", + "DeleteScalingPolicyInput$FleetId": "

    Unique identifier for a fleet.

    ", + "DescribeFleetEventsInput$FleetId": "

    Unique identifier for the fleet to get event logs for.

    ", + "DescribeFleetPortSettingsInput$FleetId": "

    Unique identifier for the fleet you want to retrieve port settings for.

    ", + "DescribeGameSessionDetailsInput$FleetId": "

    Unique identifier for a fleet. Specify a fleet to retrieve information on all game sessions active on the fleet.

    ", + "DescribeGameSessionsInput$FleetId": "

    Unique identifier for a fleet. Specify a fleet to retrieve information on all game sessions active on the fleet.

    ", + "DescribeRuntimeConfigurationInput$FleetId": "

    Unique identifier of the fleet to get the runtime configuration for.

    ", + "DescribeScalingPoliciesInput$FleetId": "

    Unique identifier for a fleet. Specify the fleet to retrieve scaling policies for.

    ", + "FleetAttributes$FleetId": "

    Unique identifier for a fleet.

    ", + "FleetCapacity$FleetId": "

    Unique identifier for a fleet.

    ", + "FleetIdList$member": null, + "FleetUtilization$FleetId": "

    Unique identifier for a fleet.

    ", + "GameSession$FleetId": "

    Unique identifier for a fleet.

    ", + "PlayerSession$FleetId": "

    Unique identifier for a fleet.

    ", + "PutScalingPolicyInput$FleetId": "

    Unique identity for the fleet to scale with this policy.

    ", + "ResolveAliasOutput$FleetId": "

    Fleet ID associated with the requested alias.

    ", + "RoutingStrategy$FleetId": "

    Unique identifier for a fleet.

    ", + "ScalingPolicy$FleetId": "

    Unique identity for the fleet associated with this scaling policy.

    ", + "UpdateFleetAttributesInput$FleetId": "

    Unique identifier for the fleet you want to update attribute metadata for.

    ", + "UpdateFleetAttributesOutput$FleetId": "

    Unique identifier for the updated fleet.

    ", + "UpdateFleetCapacityInput$FleetId": "

    Unique identifier for the fleet you want to update capacity for.

    ", + "UpdateFleetCapacityOutput$FleetId": "

    Unique identifier for the updated fleet.

    ", + "UpdateFleetPortSettingsInput$FleetId": "

    Unique identifier for the fleet you want to update port settings for.

    ", + "UpdateFleetPortSettingsOutput$FleetId": "

    Unique identifier for the updated fleet.

    ", + "UpdateRuntimeConfigurationInput$FleetId": "

    Unique identifier of the fleet to update runtime configuration for.

    " + } + }, + "FleetIdList": { + "base": null, + "refs": { + "DescribeFleetAttributesInput$FleetIds": "

    Unique identifiers for the fleet(s) that you want to retrieve attributes for. To request attributes for all fleets, leave this parameter empty.

    ", + "DescribeFleetCapacityInput$FleetIds": "

    Unique identifier for the fleet(s) you want to retrieve capacity information for. To request capacity information for all fleets, leave this parameter empty.

    ", + "DescribeFleetUtilizationInput$FleetIds": "

    Unique identifier for the fleet(s) you want to retrieve utilization data for. To request utilization data for all fleets, leave this parameter empty.

    ", + "ListFleetsOutput$FleetIds": "

    Set of fleet IDs matching the list request. You can retrieve additional information about all returned fleets by passing this result set to a call to DescribeFleetAttributes, DescribeFleetCapacity, and DescribeFleetUtilization.

    " + } + }, + "FleetStatus": { + "base": null, + "refs": { + "FleetAttributes$Status": "

    Current status of the fleet. Possible fleet states include the following:

    • NEW – A new fleet has been defined and desired instances is set to 1.
    • DOWNLOADING/VALIDATING/BUILDING/ACTIVATING – GameLift is setting up the new fleet, creating new instances with the game build and starting server processes.
    • ACTIVE – Hosts can now accept game sessions.
    • ERROR – An error occurred when downloading, validating, building, or activating the fleet.
    • DELETING – Hosts are responding to a delete fleet request.
    • TERMINATED – The fleet no longer exists.

    " + } + }, + "FleetUtilization": { + "base": "

    Current status of fleet utilization, including the number of game and player sessions being hosted.

    ", + "refs": { + "FleetUtilizationList$member": null + } + }, + "FleetUtilizationList": { + "base": null, + "refs": { + "DescribeFleetUtilizationOutput$FleetUtilization": "

    Collection of objects containing utilization information for each requested fleet ID.

    " + } + }, + "FreeText": { + "base": null, + "refs": { + "Alias$Name": "

    Descriptive label associated with an alias. Alias names do not need to be unique.

    ", + "Alias$Description": "

    Human-readable description of an alias.

    ", + "Build$Name": "

    Descriptive label associated with a build. Build names do not need to be unique. It can be set using CreateBuild or UpdateBuild.

    ", + "Build$Version": "

    Version associated with this build. Version strings do not need to be unique to a build. This value can be set using CreateBuild or UpdateBuild.

    ", + "RoutingStrategy$Message": "

    Message text to be used with a terminal routing strategy.

    " + } + }, + "GameProperty": { + "base": "

    Set of key-value pairs containing information a server process requires to set up a game session. This object allows you to pass in any set of data needed for your game. For more information, see the Amazon GameLift Developer Guide.

    ", + "refs": { + "GamePropertyList$member": null + } + }, + "GamePropertyKey": { + "base": null, + "refs": { + "GameProperty$Key": null + } + }, + "GamePropertyList": { + "base": null, + "refs": { + "CreateGameSessionInput$GameProperties": "

    Set of properties used to administer a game session. These properties are passed to the server process hosting it.

    ", + "GameSession$GameProperties": "

    Set of custom properties for the game session.

    " + } + }, + "GamePropertyValue": { + "base": null, + "refs": { + "GameProperty$Value": null + } + }, + "GameSession": { + "base": "

    Properties describing a game session.

    ", + "refs": { + "CreateGameSessionOutput$GameSession": "

    Object containing the newly created game session record.

    ", + "GameSessionDetail$GameSession": null, + "GameSessionList$member": null, + "UpdateGameSessionOutput$GameSession": "

    Object containing the updated game session metadata.

    " + } + }, + "GameSessionDetail": { + "base": "

    A game session's properties and the protection policy currently in force.

    ", + "refs": { + "GameSessionDetailList$member": null + } + }, + "GameSessionDetailList": { + "base": null, + "refs": { + "DescribeGameSessionDetailsOutput$GameSessionDetails": "

    Collection of objects containing game session properties and the protection policy currently in force for each session matching the request.

    " + } + }, + "GameSessionFullException": { + "base": "

    The game instance is currently full and cannot allow the requested player(s) to join. This exception occurs in response to a CreatePlayerSession request.

    ", + "refs": { + } + }, + "GameSessionId": { + "base": null, + "refs": { + "CreatePlayerSessionInput$GameSessionId": "

    Unique identifier for a game session. Specify the game session you want to add a player to.

    ", + "CreatePlayerSessionsInput$GameSessionId": "

    Unique identifier for a game session.

    ", + "DescribeGameSessionDetailsInput$GameSessionId": "

    Unique identifier for a game session. Specify the game session to retrieve information on.

    ", + "DescribeGameSessionsInput$GameSessionId": "

    Unique identifier for a game session. Specify the game session to retrieve information on.

    ", + "DescribePlayerSessionsInput$GameSessionId": "

    Unique identifier for a game session.

    ", + "GameSession$GameSessionId": "

    Unique identifier for a game session.

    ", + "GetGameSessionLogUrlInput$GameSessionId": "

    Unique identifier for a game session. Specify the game session you want to get logs for.

    ", + "PlayerSession$GameSessionId": "

    Unique identifier for a game session.

    ", + "UpdateGameSessionInput$GameSessionId": "

    Unique identifier for a game session. Specify the game session you want to update.

    " + } + }, + "GameSessionList": { + "base": null, + "refs": { + "DescribeGameSessionsOutput$GameSessions": "

    Collection of objects containing game session properties for each session matching the request.

    " + } + }, + "GameSessionStatus": { + "base": null, + "refs": { + "GameSession$Status": "

    Current status of the game session. A game session must be in an ACTIVE state to have player sessions.

    " + } + }, + "GetGameSessionLogUrlInput": { + "base": "

    Represents the input for a request action.

    ", + "refs": { + } + }, + "GetGameSessionLogUrlOutput": { + "base": "

    Represents the returned data in response to a request action.

    ", + "refs": { + } + }, + "Integer": { + "base": null, + "refs": { + "PutScalingPolicyInput$ScalingAdjustment": "

    Amount of adjustment to make, based on the scaling adjustment type.

    ", + "ScalingPolicy$ScalingAdjustment": "

    Amount of adjustment to make, based on the scaling adjustment type.

    " + } + }, + "InternalServiceException": { + "base": "

    The service encountered an unrecoverable internal failure while processing the request. Clients can retry such requests, either immediately or after a back-off period.

    ", + "refs": { + } + }, + "InvalidFleetStatusException": { + "base": "

    The requested operation would cause a conflict with the current state of a resource associated with the request and/or the fleet. Resolve the conflict before retrying.

    ", + "refs": { + } + }, + "InvalidGameSessionStatusException": { + "base": "

    The requested operation would cause a conflict with the current state of a resource associated with the request and/or the game instance. Clients should not retry such requests without resolving the conflict.

    ", + "refs": { + } + }, + "InvalidRequestException": { + "base": "

    One or more parameters specified as part of the request are invalid. Correct the invalid parameters before retrying.

    ", + "refs": { + } + }, + "IpAddress": { + "base": null, + "refs": { + "GameSession$IpAddress": "

    IP address of the game session. To connect to a GameLift server process, an app needs both the IP address and port number.

    ", + "PlayerSession$IpAddress": "

    Game session IP address. All player sessions reference the game session location.

    " + } + }, + "IpPermission": { + "base": "

    A range of IP addresses and port settings that allow inbound traffic to connect to server processes on GameLift. Each game session hosted on a fleet is assigned a unique combination of IP address and port number, which must fall into the fleet's allowed ranges. This combination is included in the GameSession object.

    ", + "refs": { + "IpPermissionsList$member": null + } + }, + "IpPermissionsList": { + "base": null, + "refs": { + "CreateFleetInput$EC2InboundPermissions": "

    Range of IP addresses and port settings that permit inbound traffic to access server processes running on the fleet. If no inbound permissions are set, including both IP address range and port range, the server processes in the fleet cannot accept connections. You can specify one or more sets of permissions for a fleet.

    ", + "DescribeFleetPortSettingsOutput$InboundPermissions": "

    Object containing port settings for the requested fleet ID.

    ", + "UpdateFleetPortSettingsInput$InboundPermissionAuthorizations": "

    Collection of port settings to be added to the fleet record.

    ", + "UpdateFleetPortSettingsInput$InboundPermissionRevocations": "

    Collection of port settings to be removed from the fleet record.

    " + } + }, + "IpProtocol": { + "base": null, + "refs": { + "IpPermission$Protocol": "

    Network communication protocol used by the fleet.

    " + } + }, + "LimitExceededException": { + "base": "

    The requested operation would cause the resource to exceed the allowed service limit. Resolve the issue before retrying.

    ", + "refs": { + } + }, + "ListAliasesInput": { + "base": "

    Represents the input for a request action.

    ", + "refs": { + } + }, + "ListAliasesOutput": { + "base": "

    Represents the returned data in response to a request action.

    ", + "refs": { + } + }, + "ListBuildsInput": { + "base": "

    Represents the input for a request action.

    ", + "refs": { + } + }, + "ListBuildsOutput": { + "base": "

    Represents the returned data in response to a request action.

    ", + "refs": { + } + }, + "ListFleetsInput": { + "base": "

    Represents the input for a request action.

    ", + "refs": { + } + }, + "ListFleetsOutput": { + "base": "

    Represents the returned data in response to a request action.

    ", + "refs": { + } + }, + "MetricName": { + "base": null, + "refs": { + "PutScalingPolicyInput$MetricName": "

    Name of the Amazon GameLift-defined metric that is used to trigger an adjustment.

    • ActivatingGameSessions – number of game sessions in the process of being created (game session status = ACTIVATING).
    • ActiveGameSessions – number of game sessions currently running (game session status = ACTIVE).
    • CurrentPlayerSessions – number of active or reserved player sessions (player session status = ACTIVE or RESERVED).
    • AvailablePlayerSessions – number of player session slots currently available in active game sessions across the fleet, calculated by subtracting a game session's current player session count from its maximum player session count. This number includes game sessions that are not currently accepting players (game session PlayerSessionCreationPolicy = DENY_ALL).
    • ActiveInstances – number of instances currently running a game session.
    • IdleInstances – number of instances not currently running a game session.

    ", + "ScalingPolicy$MetricName": "

    Name of the GameLift-defined metric that is used to trigger an adjustment.

    • ActivatingGameSessions – number of game sessions in the process of being created (game session status = ACTIVATING).
    • ActiveGameSessions – number of game sessions currently running (game session status = ACTIVE).
    • CurrentPlayerSessions – number of active or reserved player sessions (player session status = ACTIVE or RESERVED).
    • AvailablePlayerSessions – number of player session slots currently available in active game sessions across the fleet, calculated by subtracting a game session's current player session count from its maximum player session count. This number does include game sessions that are not currently accepting players (game session PlayerSessionCreationPolicy = DENY_ALL).
    • ActiveInstances – number of instances currently running a game session.
    • IdleInstances – number of instances not currently running a game session.

    " + } + }, + "NonBlankString": { + "base": null, + "refs": { + "IpPermission$IpRange": "

    Range of allowed IP addresses. This value must be expressed in CIDR notation. Example: \"000.000.000.000/[subnet mask]\" or optionally the shortened version \"0.0.0.0/[subnet mask]\".

    " + } + }, + "NonEmptyString": { + "base": null, + "refs": { + "AwsCredentials$AccessKeyId": "

    Access key for an AWS account.

    ", + "AwsCredentials$SecretAccessKey": "

    Secret key for an AWS account.

    ", + "AwsCredentials$SessionToken": "

    Token specific to a build ID.

    ", + "ConflictException$Message": null, + "Event$Message": "

    Additional information related to the event.

    ", + "FleetCapacityExceededException$Message": null, + "GameSessionFullException$Message": null, + "InternalServiceException$Message": null, + "InvalidFleetStatusException$Message": null, + "InvalidGameSessionStatusException$Message": null, + "InvalidRequestException$Message": null, + "LimitExceededException$Message": null, + "ListAliasesInput$Name": "

    Descriptive label associated with an alias. Alias names do not need to be unique.

    ", + "ListAliasesInput$NextToken": "

    Token indicating the start of the next sequential page of results. Use the token that is returned with a previous call to this action. To specify the start of the result set, do not specify a value.

    ", + "ListAliasesOutput$NextToken": "

    Token indicating where to resume retrieving results on the next call to this action. If no token is returned, these results represent the end of the list.

    If a request has a limit that exactly matches the number of remaining results, a token is returned even though there are no more results to retrieve.

    ", + "ListBuildsInput$NextToken": "

    Token indicating the start of the next sequential page of results. Use the token that is returned with a previous call to this action. To specify the start of the result set, do not specify a value.

    ", + "ListBuildsOutput$NextToken": "

    Token indicating where to resume retrieving results on the next call to this action. If no token is returned, these results represent the end of the list.

    If a request has a limit that exactly matches the number of remaining results, a token is returned even though there are no more results to retrieve.

    ", + "NotFoundException$Message": null, + "S3Location$Bucket": "

    Amazon S3 bucket identifier.

    ", + "S3Location$Key": "

    Amazon S3 bucket key.

    ", + "S3Location$RoleArn": "

    Amazon resource number for the cross-account access role that allows GameLift access to the S3 bucket.

    ", + "TerminalRoutingStrategyException$Message": null, + "UnauthorizedException$Message": null + } + }, + "NonZeroAndMaxString": { + "base": null, + "refs": { + "CreateAliasInput$Name": "

    Descriptive label associated with an alias. Alias names do not need to be unique.

    ", + "CreateAliasInput$Description": "

    Human-readable description of an alias.

    ", + "CreateBuildInput$Name": "

    Descriptive label associated with a build. Build names do not need to be unique. A build name can be changed later using UpdateBuild.

    ", + "CreateBuildInput$Version": "

    Version associated with this build. Version strings do not need to be unique to a build. A build version can be changed later using UpdateBuild.

    ", + "CreateFleetInput$Name": "

    Descriptive label associated with a fleet. Fleet names do not need to be unique.

    ", + "CreateFleetInput$Description": "

    Human-readable description of a fleet.

    ", + "CreateFleetInput$ServerLaunchPath": "

    This parameter is no longer used. Instead, specify a server launch path using the RuntimeConfiguration parameter. (Requests that specify a server launch path and launch parameters instead of a runtime configuration will continue to work.)

    ", + "CreateFleetInput$ServerLaunchParameters": "

    This parameter is no longer used. Instead, specify server launch parameters in the RuntimeConfiguration parameter. (Requests that specify a server launch path and launch parameters instead of a runtime configuration will continue to work.)

    ", + "CreateGameSessionInput$Name": "

    Descriptive label associated with a game session. Session names do not need to be unique.

    ", + "CreatePlayerSessionInput$PlayerId": "

    Unique identifier for the player to be added.

    ", + "DeleteScalingPolicyInput$Name": "

    Descriptive label associated with a scaling policy. Policy names do not need to be unique.

    ", + "DescribeFleetAttributesInput$NextToken": "

    Token indicating the start of the next sequential page of results. Use the token that is returned with a previous call to this action. To specify the start of the result set, do not specify a value. This parameter is ignored when the request specifies one or a list of fleet IDs.

    ", + "DescribeFleetAttributesOutput$NextToken": "

    Token indicating where to resume retrieving results on the next call to this action. If no token is returned, these results represent the end of the list.

    If a request has a limit that exactly matches the number of remaining results, a token is returned even though there are no more results to retrieve.

    ", + "DescribeFleetCapacityInput$NextToken": "

    Token indicating the start of the next sequential page of results. Use the token that is returned with a previous call to this action. To specify the start of the result set, do not specify a value. This parameter is ignored when the request specifies one or a list of fleet IDs.

    ", + "DescribeFleetCapacityOutput$NextToken": "

    Token indicating where to resume retrieving results on the next call to this action. If no token is returned, these results represent the end of the list.

    If a request has a limit that exactly matches the number of remaining results, a token is returned even though there are no more results to retrieve.

    ", + "DescribeFleetEventsInput$NextToken": "

    Token indicating the start of the next sequential page of results. Use the token that is returned with a previous call to this action. To specify the start of the result set, do not specify a value.

    ", + "DescribeFleetEventsOutput$NextToken": "

    Token indicating where to resume retrieving results on the next call to this action. If no token is returned, these results represent the end of the list.

    If a request has a limit that exactly matches the number of remaining results, a token is returned even though there are no more results to retrieve.

    ", + "DescribeFleetUtilizationInput$NextToken": "

    Token indicating the start of the next sequential page of results. Use the token that is returned with a previous call to this action. To specify the start of the result set, do not specify a value. This parameter is ignored when the request specifies one or a list of fleet IDs.

    ", + "DescribeFleetUtilizationOutput$NextToken": "

    Token indicating where to resume retrieving results on the next call to this action. If no token is returned, these results represent the end of the list.

    If a request has a limit that exactly matches the number of remaining results, a token is returned even though there are no more results to retrieve.

    ", + "DescribeGameSessionDetailsInput$StatusFilter": "

    Game session status to filter results on. Possible game session states include ACTIVE, TERMINATED, ACTIVATING and TERMINATING (the last two are transitory).

    ", + "DescribeGameSessionDetailsInput$NextToken": "

    Token indicating the start of the next sequential page of results. Use the token that is returned with a previous call to this action. To specify the start of the result set, do not specify a value.

    ", + "DescribeGameSessionDetailsOutput$NextToken": "

    Token indicating where to resume retrieving results on the next call to this action. If no token is returned, these results represent the end of the list.

    If a request has a limit that exactly matches the number of remaining results, a token is returned even though there are no more results to retrieve.

    ", + "DescribeGameSessionsInput$StatusFilter": "

    Game session status to filter results on. Possible game session states include ACTIVE, TERMINATED, ACTIVATING, and TERMINATING (the last two are transitory).

    ", + "DescribeGameSessionsInput$NextToken": "

    Token indicating the start of the next sequential page of results. Use the token that is returned with a previous call to this action. To specify the start of the result set, do not specify a value.

    ", + "DescribeGameSessionsOutput$NextToken": "

    Token indicating where to resume retrieving results on the next call to this action. If no token is returned, these results represent the end of the list.

    If a request has a limit that exactly matches the number of remaining results, a token is returned even though there are no more results to retrieve.

    ", + "DescribePlayerSessionsInput$PlayerId": "

    Unique identifier for a player.

    ", + "DescribePlayerSessionsInput$PlayerSessionStatusFilter": "

    Player session status to filter results on. Possible player session states include the following:

    • RESERVED – The player session request has been received, but the player has not yet connected to the server process and/or been validated.
    • ACTIVE – The player has been validated by the server process and is currently connected.
    • COMPLETED – The player connection has been dropped.
    • TIMEDOUT – A player session request was received, but the player did not connect and/or was not validated within the time-out limit (60 seconds).

    ", + "DescribePlayerSessionsInput$NextToken": "

    Token indicating the start of the next sequential page of results. Use the token that is returned with a previous call to this action. To specify the start of the result set, do not specify a value. If a player session ID is specified, this parameter is ignored.

    ", + "DescribePlayerSessionsOutput$NextToken": "

    Token indicating where to resume retrieving results on the next call to this action. If no token is returned, these results represent the end of the list.

    If a request has a limit that exactly matches the number of remaining results, a token is returned even though there are no more results to retrieve.

    ", + "DescribeScalingPoliciesInput$NextToken": "

    Token indicating the start of the next sequential page of results. Use the token that is returned with a previous call to this action. To specify the start of the result set, do not specify a value.

    ", + "DescribeScalingPoliciesOutput$NextToken": "

    Token indicating where to resume retrieving results on the next call to this action. If no token is returned, these results represent the end of the list.

    If a request has a limit that exactly matches the number of remaining results, a token is returned even though there are no more results to retrieve.

    ", + "Event$EventId": "

    Unique identifier for a fleet event.

    ", + "Event$ResourceId": "

    Unique identifier for the resource, such as a fleet ID.

    ", + "FleetAttributes$Description": "

    Human-readable description of the fleet.

    ", + "FleetAttributes$Name": "

    Descriptive label associated with a fleet. Fleet names do not need to be unique.

    ", + "FleetAttributes$ServerLaunchPath": "

    Deprecated. Server launch parameters are now set using a RuntimeConfiguration object.

    ", + "FleetAttributes$ServerLaunchParameters": "

    Deprecated. Server launch parameters are now specified using a RuntimeConfiguration object.

    ", + "GameSession$Name": "

    Descriptive label associated with a game session. Session names do not need to be unique.

    ", + "GetGameSessionLogUrlOutput$PreSignedUrl": "

    Location of the requested game session logs, available for download.

    ", + "ListFleetsInput$NextToken": "

    Token indicating the start of the next sequential page of results. Use the token that is returned with a previous call to this action. To specify the start of the result set, do not specify a value.

    ", + "ListFleetsOutput$NextToken": "

    Token indicating where to resume retrieving results on the next call to this action. If no token is returned, these results represent the end of the list.

    If a request has a limit that exactly matches the number of remaining results, a token is returned even though there are no more results to retrieve.

    ", + "PlayerIdList$member": null, + "PlayerSession$PlayerId": "

    Unique identifier for a player.

    ", + "PutScalingPolicyInput$Name": "

    Descriptive label associated with a scaling policy. Policy names do not need to be unique. A fleet can have only one scaling policy with the same name.

    ", + "PutScalingPolicyOutput$Name": "

    Descriptive label associated with a scaling policy. Policy names do not need to be unique.

    ", + "ScalingPolicy$Name": "

    Descriptive label associated with a scaling policy. Policy names do not need to be unique.

    ", + "ServerProcess$LaunchPath": "

    Location in the game build of the server executable. All game builds are installed on instances at the root C:\\game\\..., so an executable file located at MyGame\\latest\\server.exe has a launch path of \"C:\\game\\MyGame\\latest\\server.exe\".

    ", + "ServerProcess$Parameters": "

    Optional list of parameters to pass to the server executable on launch.

    ", + "StringList$member": null, + "UpdateAliasInput$Name": "

    Descriptive label associated with an alias. Alias names do not need to be unique.

    ", + "UpdateAliasInput$Description": "

    Human-readable description of an alias.

    ", + "UpdateBuildInput$Name": "

    Descriptive label associated with a build. Build names do not need to be unique.

    ", + "UpdateBuildInput$Version": "

    Version associated with this build. Version strings do not need to be unique to a build.

    ", + "UpdateFleetAttributesInput$Name": "

    Descriptive label associated with a fleet. Fleet names do not need to be unique.

    ", + "UpdateFleetAttributesInput$Description": "

    Human-readable description of a fleet.

    ", + "UpdateGameSessionInput$Name": "

    Descriptive label associated with a game session. Session names do not need to be unique.

    " + } + }, + "NotFoundException": { + "base": "

    A service resource associated with the request could not be found. Clients should not retry such requests

    ", + "refs": { + } + }, + "PlayerIdList": { + "base": null, + "refs": { + "CreatePlayerSessionsInput$PlayerIds": "

    List of unique identifiers for the players to be added.

    " + } + }, + "PlayerSession": { + "base": "

    Properties describing a player session.

    ", + "refs": { + "CreatePlayerSessionOutput$PlayerSession": "

    Object containing the newly created player session record.

    ", + "PlayerSessionList$member": null + } + }, + "PlayerSessionCreationPolicy": { + "base": null, + "refs": { + "GameSession$PlayerSessionCreationPolicy": "

    Indicates whether or not the game session is accepting new players.

    ", + "UpdateGameSessionInput$PlayerSessionCreationPolicy": "

    Policy determining whether or not the game session accepts new players.

    " + } + }, + "PlayerSessionId": { + "base": null, + "refs": { + "DescribePlayerSessionsInput$PlayerSessionId": "

    Unique identifier for a player session.

    ", + "PlayerSession$PlayerSessionId": "

    Unique identifier for a player session.

    " + } + }, + "PlayerSessionList": { + "base": null, + "refs": { + "CreatePlayerSessionsOutput$PlayerSessions": "

    Collection of player session objects created for the added players.

    ", + "DescribePlayerSessionsOutput$PlayerSessions": "

    Collection of objects containing properties for each player session that matches the request.

    " + } + }, + "PlayerSessionStatus": { + "base": null, + "refs": { + "PlayerSession$Status": "

    Current status of the player session. Possible player session states include the following:

    • RESERVED – The player session request has been received, but the player has not yet connected to the server process and/or been validated.
    • ACTIVE – The player has been validated by the server process and is currently connected.
    • COMPLETED – The player connection has been dropped.
    • TIMEDOUT – A player session request was received, but the player did not connect and/or was not validated within the time-out limit (60 seconds).

    " + } + }, + "PortNumber": { + "base": null, + "refs": { + "GameSession$Port": "

    Port number for the game session. To connect to a GameLift server process, an app needs both the IP address and port number.

    ", + "IpPermission$FromPort": "

    Starting value for a range of allowed port numbers.

    ", + "IpPermission$ToPort": "

    Ending value for a range of allowed port numbers. Port numbers are end-inclusive. This value must be higher than FromPort.

    ", + "PlayerSession$Port": "

    Port number for the game session. To connect to a GameLift server process, an app needs both the IP address and port number.

    " + } + }, + "PositiveInteger": { + "base": null, + "refs": { + "DescribeFleetAttributesInput$Limit": "

    Maximum number of results to return. Use this parameter with NextToken to get results as a set of sequential pages. This parameter is ignored when the request specifies one or a list of fleet IDs.

    ", + "DescribeFleetCapacityInput$Limit": "

    Maximum number of results to return. Use this parameter with NextToken to get results as a set of sequential pages. This parameter is ignored when the request specifies one or a list of fleet IDs.

    ", + "DescribeFleetEventsInput$Limit": "

    Maximum number of results to return. Use this parameter with NextToken to get results as a set of sequential pages.

    ", + "DescribeFleetUtilizationInput$Limit": "

    Maximum number of results to return. Use this parameter with NextToken to get results as a set of sequential pages. This parameter is ignored when the request specifies one or a list of fleet IDs.

    ", + "DescribeGameSessionDetailsInput$Limit": "

    Maximum number of results to return. Use this parameter with NextToken to get results as a set of sequential pages.

    ", + "DescribeGameSessionsInput$Limit": "

    Maximum number of results to return. Use this parameter with NextToken to get results as a set of sequential pages.

    ", + "DescribePlayerSessionsInput$Limit": "

    Maximum number of results to return. Use this parameter with NextToken to get results as a set of sequential pages. If a player session ID is specified, this parameter is ignored.

    ", + "DescribeScalingPoliciesInput$Limit": "

    Maximum number of results to return. Use this parameter with NextToken to get results as a set of sequential pages.

    ", + "ListAliasesInput$Limit": "

    Maximum number of results to return. Use this parameter with NextToken to get results as a set of sequential pages.

    ", + "ListBuildsInput$Limit": "

    Maximum number of results to return. Use this parameter with NextToken to get results as a set of sequential pages.

    ", + "ListFleetsInput$Limit": "

    Maximum number of results to return. Use this parameter with NextToken to get results as a set of sequential pages.

    ", + "PutScalingPolicyInput$EvaluationPeriods": "

    Length of time (in minutes) the metric must be at or beyond the threshold before a scaling event is triggered.

    ", + "ScalingPolicy$EvaluationPeriods": "

    Length of time (in minutes) the metric must be at or beyond the threshold before a scaling event is triggered.

    ", + "ServerProcess$ConcurrentExecutions": "

    Number of server processes using this configuration to run concurrently on an instance.

    " + } + }, + "PositiveLong": { + "base": null, + "refs": { + "Build$SizeOnDisk": "

    File size of the uploaded game build, expressed in bytes. When the build state is INITIALIZED, this value is 0.

    " + } + }, + "ProtectionPolicy": { + "base": null, + "refs": { + "CreateFleetInput$NewGameSessionProtectionPolicy": "

    Game session protection policy to apply to all instances in this fleet. If this parameter is not set, instances in this fleet default to no protection. You can change a fleet's protection policy using UpdateFleetAttributes, but this change will only affect sessions created after the policy change. You can also set protection for individual instances using UpdateGameSession.

    • NoProtection – The game session can be terminated during a scale-down event.
    • FullProtection – If the game session is in an ACTIVE status, it cannot be terminated during a scale-down event.

    ", + "FleetAttributes$NewGameSessionProtectionPolicy": "

    Type of game session protection to set for all new instances started in the fleet.

    • NoProtection – The game session can be terminated during a scale-down event.
    • FullProtection – If the game session is in an ACTIVE status, it cannot be terminated during a scale-down event.

    ", + "GameSessionDetail$ProtectionPolicy": "

    Current status of protection for the game session.

    • NoProtection – The game session can be terminated during a scale-down event.
    • FullProtection – If the game session is in an ACTIVE status, it cannot be terminated during a scale-down event.

    ", + "UpdateFleetAttributesInput$NewGameSessionProtectionPolicy": "

    Game session protection policy to apply to all new instances created in this fleet. Instances that already exist are not affected. You can set protection for individual instances using UpdateGameSession.

    • NoProtection – The game session can be terminated during a scale-down event.
    • FullProtection – If the game session is in an ACTIVE status, it cannot be terminated during a scale-down event.

    ", + "UpdateGameSessionInput$ProtectionPolicy": "

    Game session protection policy to apply to this game session only.

    • NoProtection – The game session can be terminated during a scale-down event.
    • FullProtection – If the game session is in an ACTIVE status, it cannot be terminated during a scale-down event.

    " + } + }, + "PutScalingPolicyInput": { + "base": "

    Represents the input for a request action.

    ", + "refs": { + } + }, + "PutScalingPolicyOutput": { + "base": "

    Represents the returned data in response to a request action.

    ", + "refs": { + } + }, + "RequestUploadCredentialsInput": { + "base": "

    Represents the input for a request action.

    ", + "refs": { + } + }, + "RequestUploadCredentialsOutput": { + "base": "

    Represents the returned data in response to a request action.

    ", + "refs": { + } + }, + "ResolveAliasInput": { + "base": "

    Represents the input for a request action.

    ", + "refs": { + } + }, + "ResolveAliasOutput": { + "base": "

    Represents the returned data in response to a request action.

    ", + "refs": { + } + }, + "RoutingStrategy": { + "base": "

    Routing configuration for a fleet alias.

    ", + "refs": { + "Alias$RoutingStrategy": null, + "CreateAliasInput$RoutingStrategy": "

    Object specifying the fleet and routing type to use for the alias.

    ", + "UpdateAliasInput$RoutingStrategy": "

    Object specifying the fleet and routing type to use for the alias.

    " + } + }, + "RoutingStrategyType": { + "base": null, + "refs": { + "ListAliasesInput$RoutingStrategyType": "

    Type of routing to filter results on. Use this parameter to retrieve only aliases of a certain type. To retrieve all aliases, leave this parameter empty. Possible routing types include the following:

    • SIMPLE – The alias resolves to one specific fleet. Use this type when routing to active fleets.
    • TERMINAL – The alias does not resolve to a fleet but instead can be used to display a message to the user. A terminal alias throws a TerminalRoutingStrategyException with the RoutingStrategy message embedded.

    ", + "RoutingStrategy$Type": "

    Type of routing strategy. Possible routing types include the following:

    • SIMPLE – The alias resolves to one specific fleet. Use this type when routing to active fleets.
    • TERMINAL – The alias does not resolve to a fleet but instead can be used to display a message to the user. A terminal alias throws a TerminalRoutingStrategyException with the RoutingStrategy message embedded.

    " + } + }, + "RuntimeConfiguration": { + "base": "

    Collection of server process configurations that describe what processes should be run on each instance in a fleet. An instance can launch and maintain multiple server processes based on the runtime configuration; it regularly checks for an updated runtime configuration and starts new server processes to match the latest version.

    The key purpose of a a runtime configuration with multiple server process configurations is to be able to run more than one kind of game server in a single fleet. You can include configurations for more than one server executable in order to run two or more different programs to run on the same instance. This option might be useful, for example, to run more than one version of your game server on the same fleet. Another option is to specify configurations for the same server executable but with different launch parameters.

    A GameLift instance is limited to 50 processes running simultaneously. To calculate the total number of processes specified in a runtime configuration, add the values of the ConcurrentExecutions parameter for each ServerProcess object in the runtime configuration.

    ", + "refs": { + "CreateFleetInput$RuntimeConfiguration": "

    Instructions for launching server processes on each instance in the fleet. The runtime configuration for a fleet has a collection of server process configurations, one for each type of server process to run on an instance. A server process configuration specifies the location of the server executable, launch parameters, and the number of concurrent processes with that configuration to maintain on each instance. A CreateFleet request must include a runtime configuration with at least one server process configuration; otherwise the request will fail with an invalid request exception. (This parameter replaces the parameters ServerLaunchPath and ServerLaunchParameters; requests that contain values for these parameters instead of a runtime configuration will continue to work.)

    ", + "DescribeRuntimeConfigurationOutput$RuntimeConfiguration": "

    Instructions describing how server processes should be launched and maintained on each instance in the fleet.

    ", + "UpdateRuntimeConfigurationInput$RuntimeConfiguration": "

    Instructions for launching server processes on each instance in the fleet. The runtime configuration for a fleet has a collection of server process configurations, one for each type of server process to run on an instance. A server process configuration specifies the location of the server executable, launch parameters, and the number of concurrent processes with that configuration to maintain on each instance.

    ", + "UpdateRuntimeConfigurationOutput$RuntimeConfiguration": "

    The runtime configuration currently in force. If the update was successful, this object matches the one in the request.

    " + } + }, + "S3Location": { + "base": "

    Location in Amazon Simple Storage Service (Amazon S3) where a build's files are stored. This location is assigned in response to a CreateBuild call, and is always in the same region as the service used to create the build. For more details see the Amazon S3 documentation.

    ", + "refs": { + "CreateBuildInput$StorageLocation": null, + "CreateBuildOutput$StorageLocation": "

    Amazon S3 path and key, identifying where the game build files are stored.

    ", + "RequestUploadCredentialsOutput$StorageLocation": "

    Amazon S3 path and key, identifying where the game build files are stored.

    " + } + }, + "ScalingAdjustmentType": { + "base": null, + "refs": { + "PutScalingPolicyInput$ScalingAdjustmentType": "

    Type of adjustment to make to a fleet's instance count (see FleetCapacity):

    • ChangeInCapacity – add (or subtract) the scaling adjustment value from the current instance count. Positive values scale up while negative values scale down.
    • ExactCapacity – set the instance count to the scaling adjustment value.
    • PercentChangeInCapacity – increase or reduce the current instance count by the scaling adjustment, read as a percentage. Positive values scale up while negative values scale down; for example, a value of \"-10\" scales the fleet down by 10%.

    ", + "ScalingPolicy$ScalingAdjustmentType": "

    Type of adjustment to make to a fleet's instance count (see FleetCapacity):

    • ChangeInCapacity – add (or subtract) the scaling adjustment value from the current instance count. Positive values scale up while negative values scale down.
    • ExactCapacity – set the instance count to the scaling adjustment value.
    • PercentChangeInCapacity – increase or reduce the current instance count by the scaling adjustment, read as a percentage. Positive values scale up while negative values scale down.

    " + } + }, + "ScalingPolicy": { + "base": "

    Rule that controls how a fleet is scaled. Scaling policies are uniquely identified by the combination of name and fleet ID.

    ", + "refs": { + "ScalingPolicyList$member": null + } + }, + "ScalingPolicyList": { + "base": null, + "refs": { + "DescribeScalingPoliciesOutput$ScalingPolicies": "

    Collection of objects containing the scaling policies matching the request.

    " + } + }, + "ScalingStatusType": { + "base": null, + "refs": { + "DescribeScalingPoliciesInput$StatusFilter": "

    Game session status to filter results on. A scaling policy is only in force when in an Active state.

    • ACTIVE – The scaling policy is currently in force.
    • UPDATEREQUESTED – A request to update the scaling policy has been received.
    • UPDATING – A change is being made to the scaling policy.
    • DELETEREQUESTED – A request to delete the scaling policy has been received.
    • DELETING – The scaling policy is being deleted.
    • DELETED – The scaling policy has been deleted.
    • ERROR – An error occurred in creating the policy. It should be removed and recreated.

    ", + "ScalingPolicy$Status": "

    Current status of the scaling policy. The scaling policy is only in force when in an Active state.

    • ACTIVE – The scaling policy is currently in force.
    • UPDATEREQUESTED – A request to update the scaling policy has been received.
    • UPDATING – A change is being made to the scaling policy.
    • DELETEREQUESTED – A request to delete the scaling policy has been received.
    • DELETING – The scaling policy is being deleted.
    • DELETED – The scaling policy has been deleted.
    • ERROR – An error occurred in creating the policy. It should be removed and recreated.

    " + } + }, + "ServerProcess": { + "base": "

    A set of instructions for launching server processes on each instance in a fleet. Each instruction set identifies the location of the server executable, optional launch parameters, and the number of server processes with this configuration to maintain concurrently on the instance. Server process configurations make up a fleet's RuntimeConfiguration.

    ", + "refs": { + "ServerProcessList$member": null + } + }, + "ServerProcessList": { + "base": null, + "refs": { + "RuntimeConfiguration$ServerProcesses": "

    Collection of server process configurations describing what server processes to run on each instance in a fleet

    " + } + }, + "StringList": { + "base": null, + "refs": { + "CreateFleetInput$LogPaths": "

    Location of default log files. When a server process is shut down, Amazon GameLift captures and stores any log files in this location. These logs are in addition to game session logs; see more on game session logs in the Amazon GameLift Developer Guide. If no default log path for a fleet is specified, GameLift will automatically upload logs stored on each instance at C:\\game\\logs. Use the GameLift console to access stored logs.

    ", + "FleetAttributes$LogPaths": "

    Location of default log files. When a server process is shut down, Amazon GameLift captures and stores any log files in this location. These logs are in addition to game session logs; see more on game session logs in the Amazon GameLift Developer Guide. If no default log path for a fleet is specified, GameLift will automatically upload logs stored on each instance at C:\\game\\logs. Use the GameLift console to access stored logs.

    " + } + }, + "TerminalRoutingStrategyException": { + "base": "

    The service is unable to resolve the routing for a particular alias because it has a terminal RoutingStrategy associated with it. The message returned in this exception is the message defined in the TerminalRoutingStrategy itself. Such requests should only be retried if the routing strategy for the specified alias is modified.

    ", + "refs": { + } + }, + "Timestamp": { + "base": null, + "refs": { + "Alias$CreationTime": "

    Time stamp indicating when this object was created. Format is an integer representing the number of seconds since the Unix epoch (Unix time).

    ", + "Alias$LastUpdatedTime": "

    Time stamp indicating when this object was last modified. Format is an integer representing the number of seconds since the Unix epoch (Unix time).

    ", + "Build$CreationTime": "

    Time stamp indicating when this object was created. Format is an integer representing the number of seconds since the Unix epoch (Unix time).

    ", + "DescribeFleetEventsInput$StartTime": "

    Earliest date to retrieve event logs for. If no start time is specified, this call returns entries starting from when the fleet was created to the specified end time. Format is an integer representing the number of seconds since the Unix epoch (Unix time).

    ", + "DescribeFleetEventsInput$EndTime": "

    Most recent date to retrieve event logs for. If no end time is specified, this call returns entries from the specified start time up to the present. Format is an integer representing the number of seconds since the Unix epoch (Unix time).

    ", + "Event$EventTime": "

    Time stamp indicating when this event occurred. Format is an integer representing the number of seconds since the Unix epoch (Unix time).

    ", + "FleetAttributes$CreationTime": "

    Time stamp indicating when this object was created. Format is an integer representing the number of seconds since the Unix epoch (Unix time).

    ", + "FleetAttributes$TerminationTime": "

    Time stamp indicating when this fleet was terminated. Format is an integer representing the number of seconds since the Unix epoch (Unix time).

    ", + "GameSession$CreationTime": "

    Time stamp indicating when this object was created. Format is an integer representing the number of seconds since the Unix epoch (Unix time).

    ", + "GameSession$TerminationTime": "

    Time stamp indicating when this fleet was terminated. Format is an integer representing the number of seconds since the Unix epoch (Unix time).

    ", + "PlayerSession$CreationTime": "

    Time stamp indicating when this object was created. Format is an integer representing the number of seconds since the Unix epoch (Unix time).

    ", + "PlayerSession$TerminationTime": "

    Time stamp indicating when this fleet was terminated. Format is an integer representing the number of seconds since the Unix epoch (Unix time).

    " + } + }, + "UnauthorizedException": { + "base": "

    The client failed authentication. Clients should not retry such requests

    ", + "refs": { + } + }, + "UpdateAliasInput": { + "base": "

    Represents the input for a request action.

    ", + "refs": { + } + }, + "UpdateAliasOutput": { + "base": "

    Represents the returned data in response to a request action.

    ", + "refs": { + } + }, + "UpdateBuildInput": { + "base": "

    Represents the input for a request action.

    ", + "refs": { + } + }, + "UpdateBuildOutput": { + "base": "

    Represents the returned data in response to a request action.

    ", + "refs": { + } + }, + "UpdateFleetAttributesInput": { + "base": "

    Represents the input for a request action.

    ", + "refs": { + } + }, + "UpdateFleetAttributesOutput": { + "base": "

    Represents the returned data in response to a request action.

    ", + "refs": { + } + }, + "UpdateFleetCapacityInput": { + "base": "

    Represents the input for a request action.

    ", + "refs": { + } + }, + "UpdateFleetCapacityOutput": { + "base": "

    Represents the returned data in response to a request action.

    ", + "refs": { + } + }, + "UpdateFleetPortSettingsInput": { + "base": "

    Represents the input for a request action.

    ", + "refs": { + } + }, + "UpdateFleetPortSettingsOutput": { + "base": "

    Represents the returned data in response to a request action.

    ", + "refs": { + } + }, + "UpdateGameSessionInput": { + "base": "

    Represents the input for a request action.

    ", + "refs": { + } + }, + "UpdateGameSessionOutput": { + "base": "

    Represents the returned data in response to a request action.

    ", + "refs": { + } + }, + "UpdateRuntimeConfigurationInput": { + "base": "

    Represents the input for a request action.

    ", + "refs": { + } + }, + "UpdateRuntimeConfigurationOutput": { + "base": "

    Represents the returned data in response to a request action.

    ", + "refs": { + } + }, + "WholeNumber": { + "base": null, + "refs": { + "CreateGameSessionInput$MaximumPlayerSessionCount": "

    Maximum number of players that can be connected simultaneously to the game session.

    ", + "EC2InstanceCounts$DESIRED": "

    Ideal number of active instances in the fleet.

    ", + "EC2InstanceCounts$MINIMUM": "

    Minimum value allowed for the fleet's instance count.

    ", + "EC2InstanceCounts$MAXIMUM": "

    Maximum value allowed for the fleet's instance count.

    ", + "EC2InstanceCounts$PENDING": "

    Number of instances in the fleet that are starting but not yet active.

    ", + "EC2InstanceCounts$ACTIVE": "

    Actual number of active instances in the fleet.

    ", + "EC2InstanceCounts$IDLE": "

    Number of active instances in the fleet that are not currently hosting a game session.

    ", + "EC2InstanceCounts$TERMINATING": "

    Number of instances in the fleet that are no longer active but haven't yet been terminated.

    ", + "EC2InstanceLimit$CurrentInstances": "

    Number of instances of the specified type that are currently in use by this AWS account.

    ", + "EC2InstanceLimit$InstanceLimit": "

    Number of instances allowed.

    ", + "FleetUtilization$ActiveServerProcessCount": "

    Number of server processes in an ACTIVE state currently running across all instances in the fleet

    ", + "FleetUtilization$ActiveGameSessionCount": "

    Number of active game sessions currently being hosted on all instances in the fleet.

    ", + "FleetUtilization$CurrentPlayerSessionCount": "

    Number of active player sessions currently being hosted on all instances in the fleet.

    ", + "FleetUtilization$MaximumPlayerSessionCount": "

    Maximum players allowed across all game sessions currently being hosted on all instances in the fleet.

    ", + "GameSession$CurrentPlayerSessionCount": "

    Number of players currently in the game session.

    ", + "GameSession$MaximumPlayerSessionCount": "

    Maximum number of players allowed in the game session.

    ", + "UpdateFleetCapacityInput$DesiredInstances": "

    Number of EC2 instances you want this fleet to host.

    ", + "UpdateFleetCapacityInput$MinSize": "

    Minimum value allowed for the fleet's instance count. Default if not set is 0.

    ", + "UpdateFleetCapacityInput$MaxSize": "

    Maximum value allowed for the fleet's instance count. Default if not set is 1.

    ", + "UpdateGameSessionInput$MaximumPlayerSessionCount": "

    Maximum number of players that can be simultaneously connected to the game session.

    " + } + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/gamelift/2015-10-01/examples-1.json b/vendor/github.com/aws/aws-sdk-go/models/apis/gamelift/2015-10-01/examples-1.json new file mode 100644 index 000000000..faff76894 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/gamelift/2015-10-01/examples-1.json @@ -0,0 +1,5 @@ +{ + "version":"1.0", + "examples":{ + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/glacier/2012-06-01/api-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/glacier/2012-06-01/api-2.json new file mode 100644 index 000000000..d7db69087 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/glacier/2012-06-01/api-2.json @@ -0,0 +1,2144 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2012-06-01", + "checksumFormat":"sha256", + "endpointPrefix":"glacier", + "serviceFullName":"Amazon Glacier", + "signatureVersion":"v4", + "protocol":"rest-json" + }, + "operations":{ + "AbortMultipartUpload":{ + "name":"AbortMultipartUpload", + "http":{ + "method":"DELETE", + "requestUri":"/{accountId}/vaults/{vaultName}/multipart-uploads/{uploadId}", + "responseCode":204 + }, + "input":{"shape":"AbortMultipartUploadInput"}, + "errors":[ + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"InvalidParameterValueException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"MissingParameterValueException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ServiceUnavailableException", + "error":{"httpStatusCode":500}, + "exception":true + } + ] + }, + "AbortVaultLock":{ + "name":"AbortVaultLock", + "http":{ + "method":"DELETE", + "requestUri":"/{accountId}/vaults/{vaultName}/lock-policy", + "responseCode":204 + }, + "input":{"shape":"AbortVaultLockInput"}, + "errors":[ + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"InvalidParameterValueException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"MissingParameterValueException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ServiceUnavailableException", + "error":{"httpStatusCode":500}, + "exception":true + } + ] + }, + "AddTagsToVault":{ + "name":"AddTagsToVault", + "http":{ + "method":"POST", + "requestUri":"/{accountId}/vaults/{vaultName}/tags?operation=add", + "responseCode":204 + }, + "input":{"shape":"AddTagsToVaultInput"}, + "errors":[ + { + "shape":"InvalidParameterValueException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"MissingParameterValueException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"LimitExceededException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ServiceUnavailableException", + "error":{"httpStatusCode":500}, + "exception":true + } + ] + }, + "CompleteMultipartUpload":{ + "name":"CompleteMultipartUpload", + "http":{ + "method":"POST", + "requestUri":"/{accountId}/vaults/{vaultName}/multipart-uploads/{uploadId}", + "responseCode":201 + }, + "input":{"shape":"CompleteMultipartUploadInput"}, + "output":{"shape":"ArchiveCreationOutput"}, + "errors":[ + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"InvalidParameterValueException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"MissingParameterValueException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ServiceUnavailableException", + "error":{"httpStatusCode":500}, + "exception":true + } + ] + }, + "CompleteVaultLock":{ + "name":"CompleteVaultLock", + "http":{ + "method":"POST", + "requestUri":"/{accountId}/vaults/{vaultName}/lock-policy/{lockId}", + "responseCode":204 + }, + "input":{"shape":"CompleteVaultLockInput"}, + "errors":[ + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"InvalidParameterValueException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"MissingParameterValueException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ServiceUnavailableException", + "error":{"httpStatusCode":500}, + "exception":true + } + ] + }, + "CreateVault":{ + "name":"CreateVault", + "http":{ + "method":"PUT", + "requestUri":"/{accountId}/vaults/{vaultName}", + "responseCode":201 + }, + "input":{"shape":"CreateVaultInput"}, + "output":{"shape":"CreateVaultOutput"}, + "errors":[ + { + "shape":"InvalidParameterValueException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"MissingParameterValueException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ServiceUnavailableException", + "error":{"httpStatusCode":500}, + "exception":true + }, + { + "shape":"LimitExceededException", + "error":{"httpStatusCode":400}, + "exception":true + } + ] + }, + "DeleteArchive":{ + "name":"DeleteArchive", + "http":{ + "method":"DELETE", + "requestUri":"/{accountId}/vaults/{vaultName}/archives/{archiveId}", + "responseCode":204 + }, + "input":{"shape":"DeleteArchiveInput"}, + "errors":[ + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"InvalidParameterValueException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"MissingParameterValueException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ServiceUnavailableException", + "error":{"httpStatusCode":500}, + "exception":true + } + ] + }, + "DeleteVault":{ + "name":"DeleteVault", + "http":{ + "method":"DELETE", + "requestUri":"/{accountId}/vaults/{vaultName}", + "responseCode":204 + }, + "input":{"shape":"DeleteVaultInput"}, + "errors":[ + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"InvalidParameterValueException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"MissingParameterValueException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ServiceUnavailableException", + "error":{"httpStatusCode":500}, + "exception":true + } + ] + }, + "DeleteVaultAccessPolicy":{ + "name":"DeleteVaultAccessPolicy", + "http":{ + "method":"DELETE", + "requestUri":"/{accountId}/vaults/{vaultName}/access-policy", + "responseCode":204 + }, + "input":{"shape":"DeleteVaultAccessPolicyInput"}, + "errors":[ + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"InvalidParameterValueException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"MissingParameterValueException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ServiceUnavailableException", + "error":{"httpStatusCode":500}, + "exception":true + } + ] + }, + "DeleteVaultNotifications":{ + "name":"DeleteVaultNotifications", + "http":{ + "method":"DELETE", + "requestUri":"/{accountId}/vaults/{vaultName}/notification-configuration", + "responseCode":204 + }, + "input":{"shape":"DeleteVaultNotificationsInput"}, + "errors":[ + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"InvalidParameterValueException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"MissingParameterValueException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ServiceUnavailableException", + "error":{"httpStatusCode":500}, + "exception":true + } + ] + }, + "DescribeJob":{ + "name":"DescribeJob", + "http":{ + "method":"GET", + "requestUri":"/{accountId}/vaults/{vaultName}/jobs/{jobId}" + }, + "input":{"shape":"DescribeJobInput"}, + "output":{"shape":"GlacierJobDescription"}, + "errors":[ + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"InvalidParameterValueException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"MissingParameterValueException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ServiceUnavailableException", + "error":{"httpStatusCode":500}, + "exception":true + } + ] + }, + "DescribeVault":{ + "name":"DescribeVault", + "http":{ + "method":"GET", + "requestUri":"/{accountId}/vaults/{vaultName}" + }, + "input":{"shape":"DescribeVaultInput"}, + "output":{"shape":"DescribeVaultOutput"}, + "errors":[ + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"InvalidParameterValueException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"MissingParameterValueException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ServiceUnavailableException", + "error":{"httpStatusCode":500}, + "exception":true + } + ] + }, + "GetDataRetrievalPolicy":{ + "name":"GetDataRetrievalPolicy", + "http":{ + "method":"GET", + "requestUri":"/{accountId}/policies/data-retrieval" + }, + "input":{"shape":"GetDataRetrievalPolicyInput"}, + "output":{"shape":"GetDataRetrievalPolicyOutput"}, + "errors":[ + { + "shape":"InvalidParameterValueException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"MissingParameterValueException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ServiceUnavailableException", + "error":{"httpStatusCode":500}, + "exception":true + } + ] + }, + "GetJobOutput":{ + "name":"GetJobOutput", + "http":{ + "method":"GET", + "requestUri":"/{accountId}/vaults/{vaultName}/jobs/{jobId}/output" + }, + "input":{"shape":"GetJobOutputInput"}, + "output":{"shape":"GetJobOutputOutput"}, + "errors":[ + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"InvalidParameterValueException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"MissingParameterValueException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ServiceUnavailableException", + "error":{"httpStatusCode":500}, + "exception":true + } + ] + }, + "GetVaultAccessPolicy":{ + "name":"GetVaultAccessPolicy", + "http":{ + "method":"GET", + "requestUri":"/{accountId}/vaults/{vaultName}/access-policy" + }, + "input":{"shape":"GetVaultAccessPolicyInput"}, + "output":{"shape":"GetVaultAccessPolicyOutput"}, + "errors":[ + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"InvalidParameterValueException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"MissingParameterValueException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ServiceUnavailableException", + "error":{"httpStatusCode":500}, + "exception":true + } + ] + }, + "GetVaultLock":{ + "name":"GetVaultLock", + "http":{ + "method":"GET", + "requestUri":"/{accountId}/vaults/{vaultName}/lock-policy" + }, + "input":{"shape":"GetVaultLockInput"}, + "output":{"shape":"GetVaultLockOutput"}, + "errors":[ + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"InvalidParameterValueException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"MissingParameterValueException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ServiceUnavailableException", + "error":{"httpStatusCode":500}, + "exception":true + } + ] + }, + "GetVaultNotifications":{ + "name":"GetVaultNotifications", + "http":{ + "method":"GET", + "requestUri":"/{accountId}/vaults/{vaultName}/notification-configuration" + }, + "input":{"shape":"GetVaultNotificationsInput"}, + "output":{"shape":"GetVaultNotificationsOutput"}, + "errors":[ + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"InvalidParameterValueException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"MissingParameterValueException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ServiceUnavailableException", + "error":{"httpStatusCode":500}, + "exception":true + } + ] + }, + "InitiateJob":{ + "name":"InitiateJob", + "http":{ + "method":"POST", + "requestUri":"/{accountId}/vaults/{vaultName}/jobs", + "responseCode":202 + }, + "input":{"shape":"InitiateJobInput"}, + "output":{"shape":"InitiateJobOutput"}, + "errors":[ + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"PolicyEnforcedException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InvalidParameterValueException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"MissingParameterValueException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ServiceUnavailableException", + "error":{"httpStatusCode":500}, + "exception":true + } + ] + }, + "InitiateMultipartUpload":{ + "name":"InitiateMultipartUpload", + "http":{ + "method":"POST", + "requestUri":"/{accountId}/vaults/{vaultName}/multipart-uploads", + "responseCode":201 + }, + "input":{"shape":"InitiateMultipartUploadInput"}, + "output":{"shape":"InitiateMultipartUploadOutput"}, + "errors":[ + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"InvalidParameterValueException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"MissingParameterValueException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ServiceUnavailableException", + "error":{"httpStatusCode":500}, + "exception":true + } + ] + }, + "InitiateVaultLock":{ + "name":"InitiateVaultLock", + "http":{ + "method":"POST", + "requestUri":"/{accountId}/vaults/{vaultName}/lock-policy", + "responseCode":201 + }, + "input":{"shape":"InitiateVaultLockInput"}, + "output":{"shape":"InitiateVaultLockOutput"}, + "errors":[ + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"InvalidParameterValueException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"MissingParameterValueException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ServiceUnavailableException", + "error":{"httpStatusCode":500}, + "exception":true + } + ] + }, + "ListJobs":{ + "name":"ListJobs", + "http":{ + "method":"GET", + "requestUri":"/{accountId}/vaults/{vaultName}/jobs" + }, + "input":{"shape":"ListJobsInput"}, + "output":{"shape":"ListJobsOutput"}, + "errors":[ + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"InvalidParameterValueException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"MissingParameterValueException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ServiceUnavailableException", + "error":{"httpStatusCode":500}, + "exception":true + } + ] + }, + "ListMultipartUploads":{ + "name":"ListMultipartUploads", + "http":{ + "method":"GET", + "requestUri":"/{accountId}/vaults/{vaultName}/multipart-uploads" + }, + "input":{"shape":"ListMultipartUploadsInput"}, + "output":{"shape":"ListMultipartUploadsOutput"}, + "errors":[ + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"InvalidParameterValueException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"MissingParameterValueException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ServiceUnavailableException", + "error":{"httpStatusCode":500}, + "exception":true + } + ] + }, + "ListParts":{ + "name":"ListParts", + "http":{ + "method":"GET", + "requestUri":"/{accountId}/vaults/{vaultName}/multipart-uploads/{uploadId}" + }, + "input":{"shape":"ListPartsInput"}, + "output":{"shape":"ListPartsOutput"}, + "errors":[ + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"InvalidParameterValueException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"MissingParameterValueException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ServiceUnavailableException", + "error":{"httpStatusCode":500}, + "exception":true + } + ] + }, + "ListTagsForVault":{ + "name":"ListTagsForVault", + "http":{ + "method":"GET", + "requestUri":"/{accountId}/vaults/{vaultName}/tags" + }, + "input":{"shape":"ListTagsForVaultInput"}, + "output":{"shape":"ListTagsForVaultOutput"}, + "errors":[ + { + "shape":"InvalidParameterValueException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"MissingParameterValueException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"ServiceUnavailableException", + "error":{"httpStatusCode":500}, + "exception":true + } + ] + }, + "ListVaults":{ + "name":"ListVaults", + "http":{ + "method":"GET", + "requestUri":"/{accountId}/vaults" + }, + "input":{"shape":"ListVaultsInput"}, + "output":{"shape":"ListVaultsOutput"}, + "errors":[ + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"InvalidParameterValueException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"MissingParameterValueException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ServiceUnavailableException", + "error":{"httpStatusCode":500}, + "exception":true + } + ] + }, + "RemoveTagsFromVault":{ + "name":"RemoveTagsFromVault", + "http":{ + "method":"POST", + "requestUri":"/{accountId}/vaults/{vaultName}/tags?operation=remove", + "responseCode":204 + }, + "input":{"shape":"RemoveTagsFromVaultInput"}, + "errors":[ + { + "shape":"InvalidParameterValueException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"MissingParameterValueException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"ServiceUnavailableException", + "error":{"httpStatusCode":500}, + "exception":true + } + ] + }, + "SetDataRetrievalPolicy":{ + "name":"SetDataRetrievalPolicy", + "http":{ + "method":"PUT", + "requestUri":"/{accountId}/policies/data-retrieval", + "responseCode":204 + }, + "input":{"shape":"SetDataRetrievalPolicyInput"}, + "errors":[ + { + "shape":"InvalidParameterValueException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"MissingParameterValueException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ServiceUnavailableException", + "error":{"httpStatusCode":500}, + "exception":true + } + ] + }, + "SetVaultAccessPolicy":{ + "name":"SetVaultAccessPolicy", + "http":{ + "method":"PUT", + "requestUri":"/{accountId}/vaults/{vaultName}/access-policy", + "responseCode":204 + }, + "input":{"shape":"SetVaultAccessPolicyInput"}, + "errors":[ + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"InvalidParameterValueException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"MissingParameterValueException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ServiceUnavailableException", + "error":{"httpStatusCode":500}, + "exception":true + } + ] + }, + "SetVaultNotifications":{ + "name":"SetVaultNotifications", + "http":{ + "method":"PUT", + "requestUri":"/{accountId}/vaults/{vaultName}/notification-configuration", + "responseCode":204 + }, + "input":{"shape":"SetVaultNotificationsInput"}, + "errors":[ + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"InvalidParameterValueException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"MissingParameterValueException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ServiceUnavailableException", + "error":{"httpStatusCode":500}, + "exception":true + } + ] + }, + "UploadArchive":{ + "name":"UploadArchive", + "http":{ + "method":"POST", + "requestUri":"/{accountId}/vaults/{vaultName}/archives", + "responseCode":201 + }, + "input":{"shape":"UploadArchiveInput"}, + "output":{"shape":"ArchiveCreationOutput"}, + "errors":[ + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"InvalidParameterValueException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"MissingParameterValueException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"RequestTimeoutException", + "error":{"httpStatusCode":408}, + "exception":true + }, + { + "shape":"ServiceUnavailableException", + "error":{"httpStatusCode":500}, + "exception":true + } + ] + }, + "UploadMultipartPart":{ + "name":"UploadMultipartPart", + "http":{ + "method":"PUT", + "requestUri":"/{accountId}/vaults/{vaultName}/multipart-uploads/{uploadId}", + "responseCode":204 + }, + "input":{"shape":"UploadMultipartPartInput"}, + "output":{"shape":"UploadMultipartPartOutput"}, + "errors":[ + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"InvalidParameterValueException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"MissingParameterValueException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"RequestTimeoutException", + "error":{"httpStatusCode":408}, + "exception":true + }, + { + "shape":"ServiceUnavailableException", + "error":{"httpStatusCode":500}, + "exception":true + } + ] + } + }, + "shapes":{ + "AbortMultipartUploadInput":{ + "type":"structure", + "members":{ + "accountId":{ + "shape":"string", + "location":"uri", + "locationName":"accountId" + }, + "vaultName":{ + "shape":"string", + "location":"uri", + "locationName":"vaultName" + }, + "uploadId":{ + "shape":"string", + "location":"uri", + "locationName":"uploadId" + } + }, + "required":[ + "accountId", + "vaultName", + "uploadId" + ] + }, + "AbortVaultLockInput":{ + "type":"structure", + "members":{ + "accountId":{ + "shape":"string", + "location":"uri", + "locationName":"accountId" + }, + "vaultName":{ + "shape":"string", + "location":"uri", + "locationName":"vaultName" + } + }, + "required":[ + "accountId", + "vaultName" + ] + }, + "ActionCode":{ + "type":"string", + "enum":[ + "ArchiveRetrieval", + "InventoryRetrieval" + ] + }, + "AddTagsToVaultInput":{ + "type":"structure", + "members":{ + "accountId":{ + "shape":"string", + "location":"uri", + "locationName":"accountId" + }, + "vaultName":{ + "shape":"string", + "location":"uri", + "locationName":"vaultName" + }, + "Tags":{"shape":"TagMap"} + }, + "required":[ + "accountId", + "vaultName" + ] + }, + "ArchiveCreationOutput":{ + "type":"structure", + "members":{ + "location":{ + "shape":"string", + "location":"header", + "locationName":"Location" + }, + "checksum":{ + "shape":"string", + "location":"header", + "locationName":"x-amz-sha256-tree-hash" + }, + "archiveId":{ + "shape":"string", + "location":"header", + "locationName":"x-amz-archive-id" + } + } + }, + "CompleteMultipartUploadInput":{ + "type":"structure", + "members":{ + "accountId":{ + "shape":"string", + "location":"uri", + "locationName":"accountId" + }, + "vaultName":{ + "shape":"string", + "location":"uri", + "locationName":"vaultName" + }, + "uploadId":{ + "shape":"string", + "location":"uri", + "locationName":"uploadId" + }, + "archiveSize":{ + "shape":"string", + "location":"header", + "locationName":"x-amz-archive-size" + }, + "checksum":{ + "shape":"string", + "location":"header", + "locationName":"x-amz-sha256-tree-hash" + } + }, + "required":[ + "accountId", + "vaultName", + "uploadId" + ] + }, + "CompleteVaultLockInput":{ + "type":"structure", + "members":{ + "accountId":{ + "shape":"string", + "location":"uri", + "locationName":"accountId" + }, + "vaultName":{ + "shape":"string", + "location":"uri", + "locationName":"vaultName" + }, + "lockId":{ + "shape":"string", + "location":"uri", + "locationName":"lockId" + } + }, + "required":[ + "accountId", + "vaultName", + "lockId" + ] + }, + "CreateVaultInput":{ + "type":"structure", + "members":{ + "accountId":{ + "shape":"string", + "location":"uri", + "locationName":"accountId" + }, + "vaultName":{ + "shape":"string", + "location":"uri", + "locationName":"vaultName" + } + }, + "required":[ + "accountId", + "vaultName" + ] + }, + "CreateVaultOutput":{ + "type":"structure", + "members":{ + "location":{ + "shape":"string", + "location":"header", + "locationName":"Location" + } + } + }, + "DataRetrievalPolicy":{ + "type":"structure", + "members":{ + "Rules":{"shape":"DataRetrievalRulesList"} + } + }, + "DataRetrievalRule":{ + "type":"structure", + "members":{ + "Strategy":{"shape":"string"}, + "BytesPerHour":{"shape":"NullableLong"} + } + }, + "DataRetrievalRulesList":{ + "type":"list", + "member":{"shape":"DataRetrievalRule"} + }, + "DateTime":{"type":"string"}, + "DeleteArchiveInput":{ + "type":"structure", + "members":{ + "accountId":{ + "shape":"string", + "location":"uri", + "locationName":"accountId" + }, + "vaultName":{ + "shape":"string", + "location":"uri", + "locationName":"vaultName" + }, + "archiveId":{ + "shape":"string", + "location":"uri", + "locationName":"archiveId" + } + }, + "required":[ + "accountId", + "vaultName", + "archiveId" + ] + }, + "DeleteVaultAccessPolicyInput":{ + "type":"structure", + "members":{ + "accountId":{ + "shape":"string", + "location":"uri", + "locationName":"accountId" + }, + "vaultName":{ + "shape":"string", + "location":"uri", + "locationName":"vaultName" + } + }, + "required":[ + "accountId", + "vaultName" + ] + }, + "DeleteVaultInput":{ + "type":"structure", + "members":{ + "accountId":{ + "shape":"string", + "location":"uri", + "locationName":"accountId" + }, + "vaultName":{ + "shape":"string", + "location":"uri", + "locationName":"vaultName" + } + }, + "required":[ + "accountId", + "vaultName" + ] + }, + "DeleteVaultNotificationsInput":{ + "type":"structure", + "members":{ + "accountId":{ + "shape":"string", + "location":"uri", + "locationName":"accountId" + }, + "vaultName":{ + "shape":"string", + "location":"uri", + "locationName":"vaultName" + } + }, + "required":[ + "accountId", + "vaultName" + ] + }, + "DescribeJobInput":{ + "type":"structure", + "members":{ + "accountId":{ + "shape":"string", + "location":"uri", + "locationName":"accountId" + }, + "vaultName":{ + "shape":"string", + "location":"uri", + "locationName":"vaultName" + }, + "jobId":{ + "shape":"string", + "location":"uri", + "locationName":"jobId" + } + }, + "required":[ + "accountId", + "vaultName", + "jobId" + ] + }, + "DescribeVaultInput":{ + "type":"structure", + "members":{ + "accountId":{ + "shape":"string", + "location":"uri", + "locationName":"accountId" + }, + "vaultName":{ + "shape":"string", + "location":"uri", + "locationName":"vaultName" + } + }, + "required":[ + "accountId", + "vaultName" + ] + }, + "DescribeVaultOutput":{ + "type":"structure", + "members":{ + "VaultARN":{"shape":"string"}, + "VaultName":{"shape":"string"}, + "CreationDate":{"shape":"string"}, + "LastInventoryDate":{"shape":"string"}, + "NumberOfArchives":{"shape":"long"}, + "SizeInBytes":{"shape":"long"} + } + }, + "GetDataRetrievalPolicyInput":{ + "type":"structure", + "members":{ + "accountId":{ + "shape":"string", + "location":"uri", + "locationName":"accountId" + } + }, + "required":["accountId"] + }, + "GetDataRetrievalPolicyOutput":{ + "type":"structure", + "members":{ + "Policy":{"shape":"DataRetrievalPolicy"} + } + }, + "GetJobOutputInput":{ + "type":"structure", + "members":{ + "accountId":{ + "shape":"string", + "location":"uri", + "locationName":"accountId" + }, + "vaultName":{ + "shape":"string", + "location":"uri", + "locationName":"vaultName" + }, + "jobId":{ + "shape":"string", + "location":"uri", + "locationName":"jobId" + }, + "range":{ + "shape":"string", + "location":"header", + "locationName":"Range" + } + }, + "required":[ + "accountId", + "vaultName", + "jobId" + ] + }, + "GetJobOutputOutput":{ + "type":"structure", + "members":{ + "body":{"shape":"Stream"}, + "checksum":{ + "shape":"string", + "location":"header", + "locationName":"x-amz-sha256-tree-hash" + }, + "status":{ + "shape":"httpstatus", + "location":"statusCode" + }, + "contentRange":{ + "shape":"string", + "location":"header", + "locationName":"Content-Range" + }, + "acceptRanges":{ + "shape":"string", + "location":"header", + "locationName":"Accept-Ranges" + }, + "contentType":{ + "shape":"string", + "location":"header", + "locationName":"Content-Type" + }, + "archiveDescription":{ + "shape":"string", + "location":"header", + "locationName":"x-amz-archive-description" + } + }, + "payload":"body" + }, + "GetVaultAccessPolicyInput":{ + "type":"structure", + "members":{ + "accountId":{ + "shape":"string", + "location":"uri", + "locationName":"accountId" + }, + "vaultName":{ + "shape":"string", + "location":"uri", + "locationName":"vaultName" + } + }, + "required":[ + "accountId", + "vaultName" + ] + }, + "GetVaultAccessPolicyOutput":{ + "type":"structure", + "members":{ + "policy":{"shape":"VaultAccessPolicy"} + }, + "payload":"policy" + }, + "GetVaultLockInput":{ + "type":"structure", + "members":{ + "accountId":{ + "shape":"string", + "location":"uri", + "locationName":"accountId" + }, + "vaultName":{ + "shape":"string", + "location":"uri", + "locationName":"vaultName" + } + }, + "required":[ + "accountId", + "vaultName" + ] + }, + "GetVaultLockOutput":{ + "type":"structure", + "members":{ + "Policy":{"shape":"string"}, + "State":{"shape":"string"}, + "ExpirationDate":{"shape":"string"}, + "CreationDate":{"shape":"string"} + } + }, + "GetVaultNotificationsInput":{ + "type":"structure", + "members":{ + "accountId":{ + "shape":"string", + "location":"uri", + "locationName":"accountId" + }, + "vaultName":{ + "shape":"string", + "location":"uri", + "locationName":"vaultName" + } + }, + "required":[ + "accountId", + "vaultName" + ] + }, + "GetVaultNotificationsOutput":{ + "type":"structure", + "members":{ + "vaultNotificationConfig":{"shape":"VaultNotificationConfig"} + }, + "payload":"vaultNotificationConfig" + }, + "GlacierJobDescription":{ + "type":"structure", + "members":{ + "JobId":{"shape":"string"}, + "JobDescription":{"shape":"string"}, + "Action":{"shape":"ActionCode"}, + "ArchiveId":{"shape":"string"}, + "VaultARN":{"shape":"string"}, + "CreationDate":{"shape":"string"}, + "Completed":{"shape":"boolean"}, + "StatusCode":{"shape":"StatusCode"}, + "StatusMessage":{"shape":"string"}, + "ArchiveSizeInBytes":{"shape":"Size"}, + "InventorySizeInBytes":{"shape":"Size"}, + "SNSTopic":{"shape":"string"}, + "CompletionDate":{"shape":"string"}, + "SHA256TreeHash":{"shape":"string"}, + "ArchiveSHA256TreeHash":{"shape":"string"}, + "RetrievalByteRange":{"shape":"string"}, + "InventoryRetrievalParameters":{"shape":"InventoryRetrievalJobDescription"} + } + }, + "InitiateJobInput":{ + "type":"structure", + "members":{ + "accountId":{ + "shape":"string", + "location":"uri", + "locationName":"accountId" + }, + "vaultName":{ + "shape":"string", + "location":"uri", + "locationName":"vaultName" + }, + "jobParameters":{"shape":"JobParameters"} + }, + "required":[ + "accountId", + "vaultName" + ], + "payload":"jobParameters" + }, + "InitiateJobOutput":{ + "type":"structure", + "members":{ + "location":{ + "shape":"string", + "location":"header", + "locationName":"Location" + }, + "jobId":{ + "shape":"string", + "location":"header", + "locationName":"x-amz-job-id" + } + } + }, + "InitiateMultipartUploadInput":{ + "type":"structure", + "members":{ + "accountId":{ + "shape":"string", + "location":"uri", + "locationName":"accountId" + }, + "vaultName":{ + "shape":"string", + "location":"uri", + "locationName":"vaultName" + }, + "archiveDescription":{ + "shape":"string", + "location":"header", + "locationName":"x-amz-archive-description" + }, + "partSize":{ + "shape":"string", + "location":"header", + "locationName":"x-amz-part-size" + } + }, + "required":[ + "accountId", + "vaultName" + ] + }, + "InitiateMultipartUploadOutput":{ + "type":"structure", + "members":{ + "location":{ + "shape":"string", + "location":"header", + "locationName":"Location" + }, + "uploadId":{ + "shape":"string", + "location":"header", + "locationName":"x-amz-multipart-upload-id" + } + } + }, + "InitiateVaultLockInput":{ + "type":"structure", + "members":{ + "accountId":{ + "shape":"string", + "location":"uri", + "locationName":"accountId" + }, + "vaultName":{ + "shape":"string", + "location":"uri", + "locationName":"vaultName" + }, + "policy":{"shape":"VaultLockPolicy"} + }, + "required":[ + "accountId", + "vaultName" + ], + "payload":"policy" + }, + "InitiateVaultLockOutput":{ + "type":"structure", + "members":{ + "lockId":{ + "shape":"string", + "location":"header", + "locationName":"x-amz-lock-id" + } + } + }, + "InvalidParameterValueException":{ + "type":"structure", + "members":{ + "type":{"shape":"string"}, + "code":{"shape":"string"}, + "message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InventoryRetrievalJobDescription":{ + "type":"structure", + "members":{ + "Format":{"shape":"string"}, + "StartDate":{"shape":"DateTime"}, + "EndDate":{"shape":"DateTime"}, + "Limit":{"shape":"string"}, + "Marker":{"shape":"string"} + } + }, + "InventoryRetrievalJobInput":{ + "type":"structure", + "members":{ + "StartDate":{"shape":"string"}, + "EndDate":{"shape":"string"}, + "Limit":{"shape":"string"}, + "Marker":{"shape":"string"} + } + }, + "JobList":{ + "type":"list", + "member":{"shape":"GlacierJobDescription"} + }, + "JobParameters":{ + "type":"structure", + "members":{ + "Format":{"shape":"string"}, + "Type":{"shape":"string"}, + "ArchiveId":{"shape":"string"}, + "Description":{"shape":"string"}, + "SNSTopic":{"shape":"string"}, + "RetrievalByteRange":{"shape":"string"}, + "InventoryRetrievalParameters":{"shape":"InventoryRetrievalJobInput"} + } + }, + "LimitExceededException":{ + "type":"structure", + "members":{ + "type":{"shape":"string"}, + "code":{"shape":"string"}, + "message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "ListJobsInput":{ + "type":"structure", + "members":{ + "accountId":{ + "shape":"string", + "location":"uri", + "locationName":"accountId" + }, + "vaultName":{ + "shape":"string", + "location":"uri", + "locationName":"vaultName" + }, + "limit":{ + "shape":"string", + "location":"querystring", + "locationName":"limit" + }, + "marker":{ + "shape":"string", + "location":"querystring", + "locationName":"marker" + }, + "statuscode":{ + "shape":"string", + "location":"querystring", + "locationName":"statuscode" + }, + "completed":{ + "shape":"string", + "location":"querystring", + "locationName":"completed" + } + }, + "required":[ + "accountId", + "vaultName" + ] + }, + "ListJobsOutput":{ + "type":"structure", + "members":{ + "JobList":{"shape":"JobList"}, + "Marker":{"shape":"string"} + } + }, + "ListMultipartUploadsInput":{ + "type":"structure", + "members":{ + "accountId":{ + "shape":"string", + "location":"uri", + "locationName":"accountId" + }, + "vaultName":{ + "shape":"string", + "location":"uri", + "locationName":"vaultName" + }, + "marker":{ + "shape":"string", + "location":"querystring", + "locationName":"marker" + }, + "limit":{ + "shape":"string", + "location":"querystring", + "locationName":"limit" + } + }, + "required":[ + "accountId", + "vaultName" + ] + }, + "ListMultipartUploadsOutput":{ + "type":"structure", + "members":{ + "UploadsList":{"shape":"UploadsList"}, + "Marker":{"shape":"string"} + } + }, + "ListPartsInput":{ + "type":"structure", + "members":{ + "accountId":{ + "shape":"string", + "location":"uri", + "locationName":"accountId" + }, + "vaultName":{ + "shape":"string", + "location":"uri", + "locationName":"vaultName" + }, + "uploadId":{ + "shape":"string", + "location":"uri", + "locationName":"uploadId" + }, + "marker":{ + "shape":"string", + "location":"querystring", + "locationName":"marker" + }, + "limit":{ + "shape":"string", + "location":"querystring", + "locationName":"limit" + } + }, + "required":[ + "accountId", + "vaultName", + "uploadId" + ] + }, + "ListPartsOutput":{ + "type":"structure", + "members":{ + "MultipartUploadId":{"shape":"string"}, + "VaultARN":{"shape":"string"}, + "ArchiveDescription":{"shape":"string"}, + "PartSizeInBytes":{"shape":"long"}, + "CreationDate":{"shape":"string"}, + "Parts":{"shape":"PartList"}, + "Marker":{"shape":"string"} + } + }, + "ListTagsForVaultInput":{ + "type":"structure", + "members":{ + "accountId":{ + "shape":"string", + "location":"uri", + "locationName":"accountId" + }, + "vaultName":{ + "shape":"string", + "location":"uri", + "locationName":"vaultName" + } + }, + "required":[ + "accountId", + "vaultName" + ] + }, + "ListTagsForVaultOutput":{ + "type":"structure", + "members":{ + "Tags":{"shape":"TagMap"} + } + }, + "ListVaultsInput":{ + "type":"structure", + "members":{ + "accountId":{ + "shape":"string", + "location":"uri", + "locationName":"accountId" + }, + "marker":{ + "shape":"string", + "location":"querystring", + "locationName":"marker" + }, + "limit":{ + "shape":"string", + "location":"querystring", + "locationName":"limit" + } + }, + "required":["accountId"] + }, + "ListVaultsOutput":{ + "type":"structure", + "members":{ + "VaultList":{"shape":"VaultList"}, + "Marker":{"shape":"string"} + } + }, + "MissingParameterValueException":{ + "type":"structure", + "members":{ + "type":{"shape":"string"}, + "code":{"shape":"string"}, + "message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "NotificationEventList":{ + "type":"list", + "member":{"shape":"string"} + }, + "NullableLong":{"type":"long"}, + "PartList":{ + "type":"list", + "member":{"shape":"PartListElement"} + }, + "PartListElement":{ + "type":"structure", + "members":{ + "RangeInBytes":{"shape":"string"}, + "SHA256TreeHash":{"shape":"string"} + } + }, + "PolicyEnforcedException":{ + "type":"structure", + "members":{ + "type":{"shape":"string"}, + "code":{"shape":"string"}, + "message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "RemoveTagsFromVaultInput":{ + "type":"structure", + "members":{ + "accountId":{ + "shape":"string", + "location":"uri", + "locationName":"accountId" + }, + "vaultName":{ + "shape":"string", + "location":"uri", + "locationName":"vaultName" + }, + "TagKeys":{"shape":"TagKeyList"} + }, + "required":[ + "accountId", + "vaultName" + ] + }, + "RequestTimeoutException":{ + "type":"structure", + "members":{ + "type":{"shape":"string"}, + "code":{"shape":"string"}, + "message":{"shape":"string"} + }, + "error":{"httpStatusCode":408}, + "exception":true + }, + "ResourceNotFoundException":{ + "type":"structure", + "members":{ + "type":{"shape":"string"}, + "code":{"shape":"string"}, + "message":{"shape":"string"} + }, + "error":{"httpStatusCode":404}, + "exception":true + }, + "ServiceUnavailableException":{ + "type":"structure", + "members":{ + "type":{"shape":"string"}, + "code":{"shape":"string"}, + "message":{"shape":"string"} + }, + "error":{"httpStatusCode":500}, + "exception":true + }, + "SetDataRetrievalPolicyInput":{ + "type":"structure", + "members":{ + "accountId":{ + "shape":"string", + "location":"uri", + "locationName":"accountId" + }, + "Policy":{"shape":"DataRetrievalPolicy"} + }, + "required":["accountId"] + }, + "SetVaultAccessPolicyInput":{ + "type":"structure", + "members":{ + "accountId":{ + "shape":"string", + "location":"uri", + "locationName":"accountId" + }, + "vaultName":{ + "shape":"string", + "location":"uri", + "locationName":"vaultName" + }, + "policy":{"shape":"VaultAccessPolicy"} + }, + "required":[ + "accountId", + "vaultName" + ], + "payload":"policy" + }, + "SetVaultNotificationsInput":{ + "type":"structure", + "members":{ + "accountId":{ + "shape":"string", + "location":"uri", + "locationName":"accountId" + }, + "vaultName":{ + "shape":"string", + "location":"uri", + "locationName":"vaultName" + }, + "vaultNotificationConfig":{"shape":"VaultNotificationConfig"} + }, + "required":[ + "accountId", + "vaultName" + ], + "payload":"vaultNotificationConfig" + }, + "Size":{"type":"long"}, + "StatusCode":{ + "type":"string", + "enum":[ + "InProgress", + "Succeeded", + "Failed" + ] + }, + "Stream":{ + "type":"blob", + "streaming":true + }, + "TagKey":{"type":"string"}, + "TagKeyList":{ + "type":"list", + "member":{"shape":"string"} + }, + "TagMap":{ + "type":"map", + "key":{"shape":"TagKey"}, + "value":{"shape":"TagValue"} + }, + "TagValue":{"type":"string"}, + "UploadArchiveInput":{ + "type":"structure", + "members":{ + "vaultName":{ + "shape":"string", + "location":"uri", + "locationName":"vaultName" + }, + "accountId":{ + "shape":"string", + "location":"uri", + "locationName":"accountId" + }, + "archiveDescription":{ + "shape":"string", + "location":"header", + "locationName":"x-amz-archive-description" + }, + "checksum":{ + "shape":"string", + "location":"header", + "locationName":"x-amz-sha256-tree-hash" + }, + "body":{"shape":"Stream"} + }, + "required":[ + "vaultName", + "accountId" + ], + "payload":"body" + }, + "UploadListElement":{ + "type":"structure", + "members":{ + "MultipartUploadId":{"shape":"string"}, + "VaultARN":{"shape":"string"}, + "ArchiveDescription":{"shape":"string"}, + "PartSizeInBytes":{"shape":"long"}, + "CreationDate":{"shape":"string"} + } + }, + "UploadMultipartPartInput":{ + "type":"structure", + "members":{ + "accountId":{ + "shape":"string", + "location":"uri", + "locationName":"accountId" + }, + "vaultName":{ + "shape":"string", + "location":"uri", + "locationName":"vaultName" + }, + "uploadId":{ + "shape":"string", + "location":"uri", + "locationName":"uploadId" + }, + "checksum":{ + "shape":"string", + "location":"header", + "locationName":"x-amz-sha256-tree-hash" + }, + "range":{ + "shape":"string", + "location":"header", + "locationName":"Content-Range" + }, + "body":{"shape":"Stream"} + }, + "required":[ + "accountId", + "vaultName", + "uploadId" + ], + "payload":"body" + }, + "UploadMultipartPartOutput":{ + "type":"structure", + "members":{ + "checksum":{ + "shape":"string", + "location":"header", + "locationName":"x-amz-sha256-tree-hash" + } + } + }, + "UploadsList":{ + "type":"list", + "member":{"shape":"UploadListElement"} + }, + "VaultAccessPolicy":{ + "type":"structure", + "members":{ + "Policy":{"shape":"string"} + } + }, + "VaultList":{ + "type":"list", + "member":{"shape":"DescribeVaultOutput"} + }, + "VaultLockPolicy":{ + "type":"structure", + "members":{ + "Policy":{"shape":"string"} + } + }, + "VaultNotificationConfig":{ + "type":"structure", + "members":{ + "SNSTopic":{"shape":"string"}, + "Events":{"shape":"NotificationEventList"} + } + }, + "boolean":{"type":"boolean"}, + "httpstatus":{"type":"integer"}, + "long":{"type":"long"}, + "string":{"type":"string"} + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/glacier/2012-06-01/docs-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/glacier/2012-06-01/docs-2.json new file mode 100644 index 000000000..3138266c6 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/glacier/2012-06-01/docs-2.json @@ -0,0 +1,685 @@ +{ + "version": "2.0", + "operations": { + "AbortMultipartUpload": "

    This operation aborts a multipart upload identified by the upload ID.

    After the Abort Multipart Upload request succeeds, you cannot upload any more parts to the multipart upload or complete the multipart upload. Aborting a completed upload fails. However, aborting an already-aborted upload will succeed, for a short time. For more information about uploading a part and completing a multipart upload, see UploadMultipartPart and CompleteMultipartUpload.

    This operation is idempotent.

    An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don't have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see Access Control Using AWS Identity and Access Management (IAM).

    For conceptual information and underlying REST API, go to Working with Archives in Amazon Glacier and Abort Multipart Upload in the Amazon Glacier Developer Guide.

    ", + "AbortVaultLock": "

    This operation aborts the vault locking process if the vault lock is not in the Locked state. If the vault lock is in the Locked state when this operation is requested, the operation returns an AccessDeniedException error. Aborting the vault locking process removes the vault lock policy from the specified vault.

    A vault lock is put into the InProgress state by calling InitiateVaultLock. A vault lock is put into the Locked state by calling CompleteVaultLock. You can get the state of a vault lock by calling GetVaultLock. For more information about the vault locking process, see Amazon Glacier Vault Lock. For more information about vault lock policies, see Amazon Glacier Access Control with Vault Lock Policies.

    This operation is idempotent. You can successfully invoke this operation multiple times, if the vault lock is in the InProgress state or if there is no policy associated with the vault.

    ", + "AddTagsToVault": "

    This operation adds the specified tags to a vault. Each tag is composed of a key and a value. Each vault can have up to 10 tags. If your request would cause the tag limit for the vault to be exceeded, the operation throws the LimitExceededException error. If a tag already exists on the vault under a specified key, the existing key value will be overwritten. For more information about tags, see Tagging Amazon Glacier Resources.

    ", + "CompleteMultipartUpload": "

    You call this operation to inform Amazon Glacier that all the archive parts have been uploaded and that Amazon Glacier can now assemble the archive from the uploaded parts. After assembling and saving the archive to the vault, Amazon Glacier returns the URI path of the newly created archive resource. Using the URI path, you can then access the archive. After you upload an archive, you should save the archive ID returned to retrieve the archive at a later point. You can also get the vault inventory to obtain a list of archive IDs in a vault. For more information, see InitiateJob.

    In the request, you must include the computed SHA256 tree hash of the entire archive you have uploaded. For information about computing a SHA256 tree hash, see Computing Checksums. On the server side, Amazon Glacier also constructs the SHA256 tree hash of the assembled archive. If the values match, Amazon Glacier saves the archive to the vault; otherwise, it returns an error, and the operation fails. The ListParts operation returns a list of parts uploaded for a specific multipart upload. It includes checksum information for each uploaded part that can be used to debug a bad checksum issue.

    Additionally, Amazon Glacier also checks for any missing content ranges when assembling the archive, if missing content ranges are found, Amazon Glacier returns an error and the operation fails.

    Complete Multipart Upload is an idempotent operation. After your first successful complete multipart upload, if you call the operation again within a short period, the operation will succeed and return the same archive ID. This is useful in the event you experience a network issue that causes an aborted connection or receive a 500 server error, in which case you can repeat your Complete Multipart Upload request and get the same archive ID without creating duplicate archives. Note, however, that after the multipart upload completes, you cannot call the List Parts operation and the multipart upload will not appear in List Multipart Uploads response, even if idempotent complete is possible.

    An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don't have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see Access Control Using AWS Identity and Access Management (IAM).

    For conceptual information and underlying REST API, go to Uploading Large Archives in Parts (Multipart Upload) and Complete Multipart Upload in the Amazon Glacier Developer Guide.

    ", + "CompleteVaultLock": "

    This operation completes the vault locking process by transitioning the vault lock from the InProgress state to the Locked state, which causes the vault lock policy to become unchangeable. A vault lock is put into the InProgress state by calling InitiateVaultLock. You can obtain the state of the vault lock by calling GetVaultLock. For more information about the vault locking process, Amazon Glacier Vault Lock.

    This operation is idempotent. This request is always successful if the vault lock is in the Locked state and the provided lock ID matches the lock ID originally used to lock the vault.

    If an invalid lock ID is passed in the request when the vault lock is in the Locked state, the operation returns an AccessDeniedException error. If an invalid lock ID is passed in the request when the vault lock is in the InProgress state, the operation throws an InvalidParameter error.

    ", + "CreateVault": "

    This operation creates a new vault with the specified name. The name of the vault must be unique within a region for an AWS account. You can create up to 1,000 vaults per account. If you need to create more vaults, contact Amazon Glacier.

    You must use the following guidelines when naming a vault.

    • Names can be between 1 and 255 characters long.

    • Allowed characters are a-z, A-Z, 0-9, '_' (underscore), '-' (hyphen), and '.' (period).

    This operation is idempotent.

    An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don't have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see Access Control Using AWS Identity and Access Management (IAM).

    For conceptual information and underlying REST API, go to Creating a Vault in Amazon Glacier and Create Vault in the Amazon Glacier Developer Guide.

    ", + "DeleteArchive": "

    This operation deletes an archive from a vault. Subsequent requests to initiate a retrieval of this archive will fail. Archive retrievals that are in progress for this archive ID may or may not succeed according to the following scenarios:

    • If the archive retrieval job is actively preparing the data for download when Amazon Glacier receives the delete archive request, the archival retrieval operation might fail.
    • If the archive retrieval job has successfully prepared the archive for download when Amazon Glacier receives the delete archive request, you will be able to download the output.

    This operation is idempotent. Attempting to delete an already-deleted archive does not result in an error.

    An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don't have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see Access Control Using AWS Identity and Access Management (IAM).

    For conceptual information and underlying REST API, go to Deleting an Archive in Amazon Glacier and Delete Archive in the Amazon Glacier Developer Guide.

    ", + "DeleteVault": "

    This operation deletes a vault. Amazon Glacier will delete a vault only if there are no archives in the vault as of the last inventory and there have been no writes to the vault since the last inventory. If either of these conditions is not satisfied, the vault deletion fails (that is, the vault is not removed) and Amazon Glacier returns an error. You can use DescribeVault to return the number of archives in a vault, and you can use Initiate a Job (POST jobs) to initiate a new inventory retrieval for a vault. The inventory contains the archive IDs you use to delete archives using Delete Archive (DELETE archive).

    This operation is idempotent.

    An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don't have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see Access Control Using AWS Identity and Access Management (IAM).

    For conceptual information and underlying REST API, go to Deleting a Vault in Amazon Glacier and Delete Vault in the Amazon Glacier Developer Guide.

    ", + "DeleteVaultAccessPolicy": "

    This operation deletes the access policy associated with the specified vault. The operation is eventually consistent; that is, it might take some time for Amazon Glacier to completely remove the access policy, and you might still see the effect of the policy for a short time after you send the delete request.

    This operation is idempotent. You can invoke delete multiple times, even if there is no policy associated with the vault. For more information about vault access policies, see Amazon Glacier Access Control with Vault Access Policies.

    ", + "DeleteVaultNotifications": "

    This operation deletes the notification configuration set for a vault. The operation is eventually consistent; that is, it might take some time for Amazon Glacier to completely disable the notifications and you might still receive some notifications for a short time after you send the delete request.

    An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don't have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see Access Control Using AWS Identity and Access Management (IAM).

    For conceptual information and underlying REST API, go to Configuring Vault Notifications in Amazon Glacier and Delete Vault Notification Configuration in the Amazon Glacier Developer Guide.

    ", + "DescribeJob": "

    This operation returns information about a job you previously initiated, including the job initiation date, the user who initiated the job, the job status code/message and the Amazon SNS topic to notify after Amazon Glacier completes the job. For more information about initiating a job, see InitiateJob.

    This operation enables you to check the status of your job. However, it is strongly recommended that you set up an Amazon SNS topic and specify it in your initiate job request so that Amazon Glacier can notify the topic after it completes the job.

    A job ID will not expire for at least 24 hours after Amazon Glacier completes the job.

    An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don't have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see Access Control Using AWS Identity and Access Management (IAM).

    For information about the underlying REST API, go to Working with Archives in Amazon Glacier in the Amazon Glacier Developer Guide.

    ", + "DescribeVault": "

    This operation returns information about a vault, including the vault's Amazon Resource Name (ARN), the date the vault was created, the number of archives it contains, and the total size of all the archives in the vault. The number of archives and their total size are as of the last inventory generation. This means that if you add or remove an archive from a vault, and then immediately use Describe Vault, the change in contents will not be immediately reflected. If you want to retrieve the latest inventory of the vault, use InitiateJob. Amazon Glacier generates vault inventories approximately daily. For more information, see Downloading a Vault Inventory in Amazon Glacier.

    An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don't have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see Access Control Using AWS Identity and Access Management (IAM).

    For conceptual information and underlying REST API, go to Retrieving Vault Metadata in Amazon Glacier and Describe Vault in the Amazon Glacier Developer Guide.

    ", + "GetDataRetrievalPolicy": "

    This operation returns the current data retrieval policy for the account and region specified in the GET request. For more information about data retrieval policies, see Amazon Glacier Data Retrieval Policies.

    ", + "GetJobOutput": "

    This operation downloads the output of the job you initiated using InitiateJob. Depending on the job type you specified when you initiated the job, the output will be either the content of an archive or a vault inventory.

    A job ID will not expire for at least 24 hours after Amazon Glacier completes the job. That is, you can download the job output within the 24 hours period after Amazon Glacier completes the job.

    If the job output is large, then you can use the Range request header to retrieve a portion of the output. This allows you to download the entire output in smaller chunks of bytes. For example, suppose you have 1 GB of job output you want to download and you decide to download 128 MB chunks of data at a time, which is a total of eight Get Job Output requests. You use the following process to download the job output:

    1. Download a 128 MB chunk of output by specifying the appropriate byte range using the Range header.

    2. Along with the data, the response includes a SHA256 tree hash of the payload. You compute the checksum of the payload on the client and compare it with the checksum you received in the response to ensure you received all the expected data.

    3. Repeat steps 1 and 2 for all the eight 128 MB chunks of output data, each time specifying the appropriate byte range.

    4. After downloading all the parts of the job output, you have a list of eight checksum values. Compute the tree hash of these values to find the checksum of the entire output. Using the DescribeJob API, obtain job information of the job that provided you the output. The response includes the checksum of the entire archive stored in Amazon Glacier. You compare this value with the checksum you computed to ensure you have downloaded the entire archive content with no errors.

    An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don't have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see Access Control Using AWS Identity and Access Management (IAM).

    For conceptual information and the underlying REST API, go to Downloading a Vault Inventory, Downloading an Archive, and Get Job Output

    ", + "GetVaultAccessPolicy": "

    This operation retrieves the access-policy subresource set on the vault; for more information on setting this subresource, see Set Vault Access Policy (PUT access-policy). If there is no access policy set on the vault, the operation returns a 404 Not found error. For more information about vault access policies, see Amazon Glacier Access Control with Vault Access Policies.

    ", + "GetVaultLock": "

    This operation retrieves the following attributes from the lock-policy subresource set on the specified vault:

    • The vault lock policy set on the vault.

    • The state of the vault lock, which is either InProgess or Locked.

    • When the lock ID expires. The lock ID is used to complete the vault locking process.

    • When the vault lock was initiated and put into the InProgress state.

    A vault lock is put into the InProgress state by calling InitiateVaultLock. A vault lock is put into the Locked state by calling CompleteVaultLock. You can abort the vault locking process by calling AbortVaultLock. For more information about the vault locking process, Amazon Glacier Vault Lock.

    If there is no vault lock policy set on the vault, the operation returns a 404 Not found error. For more information about vault lock policies, Amazon Glacier Access Control with Vault Lock Policies.

    ", + "GetVaultNotifications": "

    This operation retrieves the notification-configuration subresource of the specified vault.

    For information about setting a notification configuration on a vault, see SetVaultNotifications. If a notification configuration for a vault is not set, the operation returns a 404 Not Found error. For more information about vault notifications, see Configuring Vault Notifications in Amazon Glacier.

    An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don't have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see Access Control Using AWS Identity and Access Management (IAM).

    For conceptual information and underlying REST API, go to Configuring Vault Notifications in Amazon Glacier and Get Vault Notification Configuration in the Amazon Glacier Developer Guide.

    ", + "InitiateJob": "

    This operation initiates a job of the specified type. In this release, you can initiate a job to retrieve either an archive or a vault inventory (a list of archives in a vault).

    Retrieving data from Amazon Glacier is a two-step process:

    1. Initiate a retrieval job.

      A data retrieval policy can cause your initiate retrieval job request to fail with a PolicyEnforcedException exception. For more information about data retrieval policies, see Amazon Glacier Data Retrieval Policies. For more information about the PolicyEnforcedException exception, see Error Responses.

    2. After the job completes, download the bytes.

    The retrieval request is executed asynchronously. When you initiate a retrieval job, Amazon Glacier creates a job and returns a job ID in the response. When Amazon Glacier completes the job, you can get the job output (archive or inventory data). For information about getting job output, see GetJobOutput operation.

    The job must complete before you can get its output. To determine when a job is complete, you have the following options:

    • Use Amazon SNS Notification You can specify an Amazon Simple Notification Service (Amazon SNS) topic to which Amazon Glacier can post a notification after the job is completed. You can specify an SNS topic per job request. The notification is sent only after Amazon Glacier completes the job. In addition to specifying an SNS topic per job request, you can configure vault notifications for a vault so that job notifications are always sent. For more information, see SetVaultNotifications.

    • Get job details You can make a DescribeJob request to obtain job status information while a job is in progress. However, it is more efficient to use an Amazon SNS notification to determine when a job is complete.

    The information you get via notification is same that you get by calling DescribeJob.

    If for a specific event, you add both the notification configuration on the vault and also specify an SNS topic in your initiate job request, Amazon Glacier sends both notifications. For more information, see SetVaultNotifications.

    An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don't have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see Access Control Using AWS Identity and Access Management (IAM).

    About the Vault Inventory

    Amazon Glacier prepares an inventory for each vault periodically, every 24 hours. When you initiate a job for a vault inventory, Amazon Glacier returns the last inventory for the vault. The inventory data you get might be up to a day or two days old. Also, the initiate inventory job might take some time to complete before you can download the vault inventory. So you do not want to retrieve a vault inventory for each vault operation. However, in some scenarios, you might find the vault inventory useful. For example, when you upload an archive, you can provide an archive description but not an archive name. Amazon Glacier provides you a unique archive ID, an opaque string of characters. So, you might maintain your own database that maps archive names to their corresponding Amazon Glacier assigned archive IDs. You might find the vault inventory useful in the event you need to reconcile information in your database with the actual vault inventory.

    Range Inventory Retrieval

    You can limit the number of inventory items retrieved by filtering on the archive creation date or by setting a limit.

    Filtering by Archive Creation Date

    You can retrieve inventory items for archives created between StartDate and EndDate by specifying values for these parameters in the InitiateJob request. Archives created on or after the StartDate and before the EndDate will be returned. If you only provide the StartDate without the EndDate, you will retrieve the inventory for all archives created on or after the StartDate. If you only provide the EndDate without the StartDate, you will get back the inventory for all archives created before the EndDate.

    Limiting Inventory Items per Retrieval

    You can limit the number of inventory items returned by setting the Limit parameter in the InitiateJob request. The inventory job output will contain inventory items up to the specified Limit. If there are more inventory items available, the result is paginated. After a job is complete you can use the DescribeJob operation to get a marker that you use in a subsequent InitiateJob request. The marker will indicate the starting point to retrieve the next set of inventory items. You can page through your entire inventory by repeatedly making InitiateJob requests with the marker from the previous DescribeJob output, until you get a marker from DescribeJob that returns null, indicating that there are no more inventory items available.

    You can use the Limit parameter together with the date range parameters.

    About Ranged Archive Retrieval

    You can initiate an archive retrieval for the whole archive or a range of the archive. In the case of ranged archive retrieval, you specify a byte range to return or the whole archive. The range specified must be megabyte (MB) aligned, that is the range start value must be divisible by 1 MB and range end value plus 1 must be divisible by 1 MB or equal the end of the archive. If the ranged archive retrieval is not megabyte aligned, this operation returns a 400 response. Furthermore, to ensure you get checksum values for data you download using Get Job Output API, the range must be tree hash aligned.

    An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don't have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see Access Control Using AWS Identity and Access Management (IAM).

    For conceptual information and the underlying REST API, go to Initiate a Job and Downloading a Vault Inventory

    ", + "InitiateMultipartUpload": "

    This operation initiates a multipart upload. Amazon Glacier creates a multipart upload resource and returns its ID in the response. The multipart upload ID is used in subsequent requests to upload parts of an archive (see UploadMultipartPart).

    When you initiate a multipart upload, you specify the part size in number of bytes. The part size must be a megabyte (1024 KB) multiplied by a power of 2-for example, 1048576 (1 MB), 2097152 (2 MB), 4194304 (4 MB), 8388608 (8 MB), and so on. The minimum allowable part size is 1 MB, and the maximum is 4 GB.

    Every part you upload to this resource (see UploadMultipartPart), except the last one, must have the same size. The last one can be the same size or smaller. For example, suppose you want to upload a 16.2 MB file. If you initiate the multipart upload with a part size of 4 MB, you will upload four parts of 4 MB each and one part of 0.2 MB.

    You don't need to know the size of the archive when you start a multipart upload because Amazon Glacier does not require you to specify the overall archive size.

    After you complete the multipart upload, Amazon Glacier removes the multipart upload resource referenced by the ID. Amazon Glacier also removes the multipart upload resource if you cancel the multipart upload or it may be removed if there is no activity for a period of 24 hours.

    An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don't have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see Access Control Using AWS Identity and Access Management (IAM).

    For conceptual information and underlying REST API, go to Uploading Large Archives in Parts (Multipart Upload) and Initiate Multipart Upload in the Amazon Glacier Developer Guide.

    ", + "InitiateVaultLock": "

    This operation initiates the vault locking process by doing the following:

    • Installing a vault lock policy on the specified vault.

    • Setting the lock state of vault lock to InProgress.

    • Returning a lock ID, which is used to complete the vault locking process.

    You can set one vault lock policy for each vault and this policy can be up to 20 KB in size. For more information about vault lock policies, see Amazon Glacier Access Control with Vault Lock Policies.

    You must complete the vault locking process within 24 hours after the vault lock enters the InProgress state. After the 24 hour window ends, the lock ID expires, the vault automatically exits the InProgress state, and the vault lock policy is removed from the vault. You call CompleteVaultLock to complete the vault locking process by setting the state of the vault lock to Locked.

    After a vault lock is in the Locked state, you cannot initiate a new vault lock for the vault.

    You can abort the vault locking process by calling AbortVaultLock. You can get the state of the vault lock by calling GetVaultLock. For more information about the vault locking process, Amazon Glacier Vault Lock.

    If this operation is called when the vault lock is in the InProgress state, the operation returns an AccessDeniedException error. When the vault lock is in the InProgress state you must call AbortVaultLock before you can initiate a new vault lock policy.

    ", + "ListJobs": "

    This operation lists jobs for a vault, including jobs that are in-progress and jobs that have recently finished.

    Amazon Glacier retains recently completed jobs for a period before deleting them; however, it eventually removes completed jobs. The output of completed jobs can be retrieved. Retaining completed jobs for a period of time after they have completed enables you to get a job output in the event you miss the job completion notification or your first attempt to download it fails. For example, suppose you start an archive retrieval job to download an archive. After the job completes, you start to download the archive but encounter a network error. In this scenario, you can retry and download the archive while the job exists.

    To retrieve an archive or retrieve a vault inventory from Amazon Glacier, you first initiate a job, and after the job completes, you download the data. For an archive retrieval, the output is the archive data, and for an inventory retrieval, it is the inventory list. The List Job operation returns a list of these jobs sorted by job initiation time.

    This List Jobs operation supports pagination. By default, this operation returns up to 1,000 jobs in the response. You should always check the response for a marker at which to continue the list; if there are no more items the marker is null. To return a list of jobs that begins at a specific job, set the marker request parameter to the value you obtained from a previous List Jobs request. You can also limit the number of jobs returned in the response by specifying the limit parameter in the request.

    Additionally, you can filter the jobs list returned by specifying an optional statuscode (InProgress, Succeeded, or Failed) and completed (true, false) parameter. The statuscode allows you to specify that only jobs that match a specified status are returned. The completed parameter allows you to specify that only jobs in a specific completion state are returned.

    An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don't have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see Access Control Using AWS Identity and Access Management (IAM).

    For the underlying REST API, go to List Jobs

    ", + "ListMultipartUploads": "

    This operation lists in-progress multipart uploads for the specified vault. An in-progress multipart upload is a multipart upload that has been initiated by an InitiateMultipartUpload request, but has not yet been completed or aborted. The list returned in the List Multipart Upload response has no guaranteed order.

    The List Multipart Uploads operation supports pagination. By default, this operation returns up to 1,000 multipart uploads in the response. You should always check the response for a marker at which to continue the list; if there are no more items the marker is null. To return a list of multipart uploads that begins at a specific upload, set the marker request parameter to the value you obtained from a previous List Multipart Upload request. You can also limit the number of uploads returned in the response by specifying the limit parameter in the request.

    Note the difference between this operation and listing parts (ListParts). The List Multipart Uploads operation lists all multipart uploads for a vault and does not require a multipart upload ID. The List Parts operation requires a multipart upload ID since parts are associated with a single upload.

    An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don't have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see Access Control Using AWS Identity and Access Management (IAM).

    For conceptual information and the underlying REST API, go to Working with Archives in Amazon Glacier and List Multipart Uploads in the Amazon Glacier Developer Guide.

    ", + "ListParts": "

    This operation lists the parts of an archive that have been uploaded in a specific multipart upload. You can make this request at any time during an in-progress multipart upload before you complete the upload (see CompleteMultipartUpload. List Parts returns an error for completed uploads. The list returned in the List Parts response is sorted by part range.

    The List Parts operation supports pagination. By default, this operation returns up to 1,000 uploaded parts in the response. You should always check the response for a marker at which to continue the list; if there are no more items the marker is null. To return a list of parts that begins at a specific part, set the marker request parameter to the value you obtained from a previous List Parts request. You can also limit the number of parts returned in the response by specifying the limit parameter in the request.

    An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don't have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see Access Control Using AWS Identity and Access Management (IAM).

    For conceptual information and the underlying REST API, go to Working with Archives in Amazon Glacier and List Parts in the Amazon Glacier Developer Guide.

    ", + "ListTagsForVault": "

    This operation lists all the tags attached to a vault. The operation returns an empty map if there are no tags. For more information about tags, see Tagging Amazon Glacier Resources.

    ", + "ListVaults": "

    This operation lists all vaults owned by the calling user's account. The list returned in the response is ASCII-sorted by vault name.

    By default, this operation returns up to 1,000 items. If there are more vaults to list, the response marker field contains the vault Amazon Resource Name (ARN) at which to continue the list with a new List Vaults request; otherwise, the marker field is null. To return a list of vaults that begins at a specific vault, set the marker request parameter to the vault ARN you obtained from a previous List Vaults request. You can also limit the number of vaults returned in the response by specifying the limit parameter in the request.

    An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don't have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see Access Control Using AWS Identity and Access Management (IAM).

    For conceptual information and underlying REST API, go to Retrieving Vault Metadata in Amazon Glacier and List Vaults in the Amazon Glacier Developer Guide.

    ", + "RemoveTagsFromVault": "

    This operation removes one or more tags from the set of tags attached to a vault. For more information about tags, see Tagging Amazon Glacier Resources. This operation is idempotent. The operation will be successful, even if there are no tags attached to the vault.

    ", + "SetDataRetrievalPolicy": "

    This operation sets and then enacts a data retrieval policy in the region specified in the PUT request. You can set one policy per region for an AWS account. The policy is enacted within a few minutes of a successful PUT operation.

    The set policy operation does not affect retrieval jobs that were in progress before the policy was enacted. For more information about data retrieval policies, see Amazon Glacier Data Retrieval Policies.

    ", + "SetVaultAccessPolicy": "

    This operation configures an access policy for a vault and will overwrite an existing policy. To configure a vault access policy, send a PUT request to the access-policy subresource of the vault. An access policy is specific to a vault and is also called a vault subresource. You can set one access policy per vault and the policy can be up to 20 KB in size. For more information about vault access policies, see Amazon Glacier Access Control with Vault Access Policies.

    ", + "SetVaultNotifications": "

    This operation configures notifications that will be sent when specific events happen to a vault. By default, you don't get any notifications.

    To configure vault notifications, send a PUT request to the notification-configuration subresource of the vault. The request should include a JSON document that provides an Amazon SNS topic and specific events for which you want Amazon Glacier to send notifications to the topic.

    Amazon SNS topics must grant permission to the vault to be allowed to publish notifications to the topic. You can configure a vault to publish a notification for the following vault events:

    • ArchiveRetrievalCompleted This event occurs when a job that was initiated for an archive retrieval is completed (InitiateJob). The status of the completed job can be \"Succeeded\" or \"Failed\". The notification sent to the SNS topic is the same output as returned from DescribeJob.
    • InventoryRetrievalCompleted This event occurs when a job that was initiated for an inventory retrieval is completed (InitiateJob). The status of the completed job can be \"Succeeded\" or \"Failed\". The notification sent to the SNS topic is the same output as returned from DescribeJob.

    An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don't have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see Access Control Using AWS Identity and Access Management (IAM).

    For conceptual information and underlying REST API, go to Configuring Vault Notifications in Amazon Glacier and Set Vault Notification Configuration in the Amazon Glacier Developer Guide.

    ", + "UploadArchive": "

    This operation adds an archive to a vault. This is a synchronous operation, and for a successful upload, your data is durably persisted. Amazon Glacier returns the archive ID in the x-amz-archive-id header of the response.

    You must use the archive ID to access your data in Amazon Glacier. After you upload an archive, you should save the archive ID returned so that you can retrieve or delete the archive later. Besides saving the archive ID, you can also index it and give it a friendly name to allow for better searching. You can also use the optional archive description field to specify how the archive is referred to in an external index of archives, such as you might create in Amazon DynamoDB. You can also get the vault inventory to obtain a list of archive IDs in a vault. For more information, see InitiateJob.

    You must provide a SHA256 tree hash of the data you are uploading. For information about computing a SHA256 tree hash, see Computing Checksums.

    You can optionally specify an archive description of up to 1,024 printable ASCII characters. You can get the archive description when you either retrieve the archive or get the vault inventory. For more information, see InitiateJob. Amazon Glacier does not interpret the description in any way. An archive description does not need to be unique. You cannot use the description to retrieve or sort the archive list.

    Archives are immutable. After you upload an archive, you cannot edit the archive or its description.

    An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don't have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see Access Control Using AWS Identity and Access Management (IAM).

    For conceptual information and underlying REST API, go to Uploading an Archive in Amazon Glacier and Upload Archive in the Amazon Glacier Developer Guide.

    ", + "UploadMultipartPart": "

    This operation uploads a part of an archive. You can upload archive parts in any order. You can also upload them in parallel. You can upload up to 10,000 parts for a multipart upload.

    Amazon Glacier rejects your upload part request if any of the following conditions is true:

    • SHA256 tree hash does not matchTo ensure that part data is not corrupted in transmission, you compute a SHA256 tree hash of the part and include it in your request. Upon receiving the part data, Amazon Glacier also computes a SHA256 tree hash. If these hash values don't match, the operation fails. For information about computing a SHA256 tree hash, see Computing Checksums.

    • Part size does not matchThe size of each part except the last must match the size specified in the corresponding InitiateMultipartUpload request. The size of the last part must be the same size as, or smaller than, the specified size.

      If you upload a part whose size is smaller than the part size you specified in your initiate multipart upload request and that part is not the last part, then the upload part request will succeed. However, the subsequent Complete Multipart Upload request will fail.

    • Range does not alignThe byte range value in the request does not align with the part size specified in the corresponding initiate request. For example, if you specify a part size of 4194304 bytes (4 MB), then 0 to 4194303 bytes (4 MB - 1) and 4194304 (4 MB) to 8388607 (8 MB - 1) are valid part ranges. However, if you set a range value of 2 MB to 6 MB, the range does not align with the part size and the upload will fail.

    This operation is idempotent. If you upload the same part multiple times, the data included in the most recent request overwrites the previously uploaded data.

    An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don't have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see Access Control Using AWS Identity and Access Management (IAM).

    For conceptual information and underlying REST API, go to Uploading Large Archives in Parts (Multipart Upload) and Upload Part in the Amazon Glacier Developer Guide.

    " + }, + "service": "

    Amazon Glacier is a storage solution for \"cold data.\"

    Amazon Glacier is an extremely low-cost storage service that provides secure, durable, and easy-to-use storage for data backup and archival. With Amazon Glacier, customers can store their data cost effectively for months, years, or decades. Amazon Glacier also enables customers to offload the administrative burdens of operating and scaling storage to AWS, so they don't have to worry about capacity planning, hardware provisioning, data replication, hardware failure and recovery, or time-consuming hardware migrations.

    Amazon Glacier is a great storage choice when low storage cost is paramount, your data is rarely retrieved, and retrieval latency of several hours is acceptable. If your application requires fast or frequent access to your data, consider using Amazon S3. For more information, go to Amazon Simple Storage Service (Amazon S3).

    You can store any kind of data in any format. There is no maximum limit on the total amount of data you can store in Amazon Glacier.

    If you are a first-time user of Amazon Glacier, we recommend that you begin by reading the following sections in the Amazon Glacier Developer Guide:

    • What is Amazon Glacier - This section of the Developer Guide describes the underlying data model, the operations it supports, and the AWS SDKs that you can use to interact with the service.

    • Getting Started with Amazon Glacier - The Getting Started section walks you through the process of creating a vault, uploading archives, creating jobs to download archives, retrieving the job output, and deleting archives.

    ", + "shapes": { + "AbortMultipartUploadInput": { + "base": "

    Provides options to abort a multipart upload identified by the upload ID.

    For information about the underlying REST API, go to Abort Multipart Upload. For conceptual information, go to Working with Archives in Amazon Glacier.

    ", + "refs": { + } + }, + "AbortVaultLockInput": { + "base": "

    The input values for AbortVaultLock.

    ", + "refs": { + } + }, + "ActionCode": { + "base": null, + "refs": { + "GlacierJobDescription$Action": "

    The job type. It is either ArchiveRetrieval or InventoryRetrieval.

    " + } + }, + "AddTagsToVaultInput": { + "base": "

    The input values for AddTagsToVault.

    ", + "refs": { + } + }, + "ArchiveCreationOutput": { + "base": "

    Contains the Amazon Glacier response to your request.

    For information about the underlying REST API, go to Upload Archive. For conceptual information, go to Working with Archives in Amazon Glacier.

    ", + "refs": { + } + }, + "CompleteMultipartUploadInput": { + "base": "

    Provides options to complete a multipart upload operation. This informs Amazon Glacier that all the archive parts have been uploaded and Amazon Glacier can now assemble the archive from the uploaded parts. After assembling and saving the archive to the vault, Amazon Glacier returns the URI path of the newly created archive resource.

    ", + "refs": { + } + }, + "CompleteVaultLockInput": { + "base": "

    The input values for CompleteVaultLock.

    ", + "refs": { + } + }, + "CreateVaultInput": { + "base": "

    Provides options to create a vault.

    ", + "refs": { + } + }, + "CreateVaultOutput": { + "base": "

    Contains the Amazon Glacier response to your request.

    ", + "refs": { + } + }, + "DataRetrievalPolicy": { + "base": "

    Data retrieval policy.

    ", + "refs": { + "GetDataRetrievalPolicyOutput$Policy": "

    Contains the returned data retrieval policy in JSON format.

    ", + "SetDataRetrievalPolicyInput$Policy": "

    The data retrieval policy in JSON format.

    " + } + }, + "DataRetrievalRule": { + "base": "

    Data retrieval policy rule.

    ", + "refs": { + "DataRetrievalRulesList$member": null + } + }, + "DataRetrievalRulesList": { + "base": null, + "refs": { + "DataRetrievalPolicy$Rules": "

    The policy rule. Although this is a list type, currently there must be only one rule, which contains a Strategy field and optionally a BytesPerHour field.

    " + } + }, + "DateTime": { + "base": null, + "refs": { + "InventoryRetrievalJobDescription$StartDate": "

    The start of the date range in UTC for vault inventory retrieval that includes archives created on or after this date. A string representation of ISO 8601 date format, for example, 2013-03-20T17:03:43Z.

    ", + "InventoryRetrievalJobDescription$EndDate": "

    The end of the date range in UTC for vault inventory retrieval that includes archives created before this date. A string representation of ISO 8601 date format, for example, 2013-03-20T17:03:43Z.

    " + } + }, + "DeleteArchiveInput": { + "base": "

    Provides options for deleting an archive from an Amazon Glacier vault.

    ", + "refs": { + } + }, + "DeleteVaultAccessPolicyInput": { + "base": "

    DeleteVaultAccessPolicy input.

    ", + "refs": { + } + }, + "DeleteVaultInput": { + "base": "

    Provides options for deleting a vault from Amazon Glacier.

    ", + "refs": { + } + }, + "DeleteVaultNotificationsInput": { + "base": "

    Provides options for deleting a vault notification configuration from an Amazon Glacier vault.

    ", + "refs": { + } + }, + "DescribeJobInput": { + "base": "

    Provides options for retrieving a job description.

    ", + "refs": { + } + }, + "DescribeVaultInput": { + "base": "

    Provides options for retrieving metadata for a specific vault in Amazon Glacier.

    ", + "refs": { + } + }, + "DescribeVaultOutput": { + "base": "

    Contains the Amazon Glacier response to your request.

    ", + "refs": { + "VaultList$member": null + } + }, + "GetDataRetrievalPolicyInput": { + "base": "

    Input for GetDataRetrievalPolicy.

    ", + "refs": { + } + }, + "GetDataRetrievalPolicyOutput": { + "base": "

    Contains the Amazon Glacier response to the GetDataRetrievalPolicy request.

    ", + "refs": { + } + }, + "GetJobOutputInput": { + "base": "

    Provides options for downloading output of an Amazon Glacier job.

    ", + "refs": { + } + }, + "GetJobOutputOutput": { + "base": "

    Contains the Amazon Glacier response to your request.

    ", + "refs": { + } + }, + "GetVaultAccessPolicyInput": { + "base": "

    Input for GetVaultAccessPolicy.

    ", + "refs": { + } + }, + "GetVaultAccessPolicyOutput": { + "base": "

    Output for GetVaultAccessPolicy.

    ", + "refs": { + } + }, + "GetVaultLockInput": { + "base": "

    The input values for GetVaultLock.

    ", + "refs": { + } + }, + "GetVaultLockOutput": { + "base": "

    Contains the Amazon Glacier response to your request.

    ", + "refs": { + } + }, + "GetVaultNotificationsInput": { + "base": "

    Provides options for retrieving the notification configuration set on an Amazon Glacier vault.

    ", + "refs": { + } + }, + "GetVaultNotificationsOutput": { + "base": "

    Contains the Amazon Glacier response to your request.

    ", + "refs": { + } + }, + "GlacierJobDescription": { + "base": "

    Describes an Amazon Glacier job.

    ", + "refs": { + "JobList$member": null + } + }, + "InitiateJobInput": { + "base": "

    Provides options for initiating an Amazon Glacier job.

    ", + "refs": { + } + }, + "InitiateJobOutput": { + "base": "

    Contains the Amazon Glacier response to your request.

    ", + "refs": { + } + }, + "InitiateMultipartUploadInput": { + "base": "

    Provides options for initiating a multipart upload to an Amazon Glacier vault.

    ", + "refs": { + } + }, + "InitiateMultipartUploadOutput": { + "base": "

    The Amazon Glacier response to your request.

    ", + "refs": { + } + }, + "InitiateVaultLockInput": { + "base": "

    The input values for InitiateVaultLock.

    ", + "refs": { + } + }, + "InitiateVaultLockOutput": { + "base": "

    Contains the Amazon Glacier response to your request.

    ", + "refs": { + } + }, + "InvalidParameterValueException": { + "base": "

    Returned if a parameter of the request is incorrectly specified.

    ", + "refs": { + } + }, + "InventoryRetrievalJobDescription": { + "base": "

    Describes the options for a range inventory retrieval job.

    ", + "refs": { + "GlacierJobDescription$InventoryRetrievalParameters": "

    Parameters used for range inventory retrieval.

    " + } + }, + "InventoryRetrievalJobInput": { + "base": "

    Provides options for specifying a range inventory retrieval job.

    ", + "refs": { + "JobParameters$InventoryRetrievalParameters": "

    Input parameters used for range inventory retrieval.

    " + } + }, + "JobList": { + "base": null, + "refs": { + "ListJobsOutput$JobList": "

    A list of job objects. Each job object contains metadata describing the job.

    " + } + }, + "JobParameters": { + "base": "

    Provides options for defining a job.

    ", + "refs": { + "InitiateJobInput$jobParameters": "

    Provides options for specifying job information.

    " + } + }, + "LimitExceededException": { + "base": "

    Returned if the request results in a vault or account limit being exceeded.

    ", + "refs": { + } + }, + "ListJobsInput": { + "base": "

    Provides options for retrieving a job list for an Amazon Glacier vault.

    ", + "refs": { + } + }, + "ListJobsOutput": { + "base": "

    Contains the Amazon Glacier response to your request.

    ", + "refs": { + } + }, + "ListMultipartUploadsInput": { + "base": "

    Provides options for retrieving list of in-progress multipart uploads for an Amazon Glacier vault.

    ", + "refs": { + } + }, + "ListMultipartUploadsOutput": { + "base": "

    Contains the Amazon Glacier response to your request.

    ", + "refs": { + } + }, + "ListPartsInput": { + "base": "

    Provides options for retrieving a list of parts of an archive that have been uploaded in a specific multipart upload.

    ", + "refs": { + } + }, + "ListPartsOutput": { + "base": "

    Contains the Amazon Glacier response to your request.

    ", + "refs": { + } + }, + "ListTagsForVaultInput": { + "base": "

    The input value for ListTagsForVaultInput.

    ", + "refs": { + } + }, + "ListTagsForVaultOutput": { + "base": "

    Contains the Amazon Glacier response to your request.

    ", + "refs": { + } + }, + "ListVaultsInput": { + "base": "

    Provides options to retrieve the vault list owned by the calling user's account. The list provides metadata information for each vault.

    ", + "refs": { + } + }, + "ListVaultsOutput": { + "base": "

    Contains the Amazon Glacier response to your request.

    ", + "refs": { + } + }, + "MissingParameterValueException": { + "base": "

    Returned if a required header or parameter is missing from the request.

    ", + "refs": { + } + }, + "NotificationEventList": { + "base": null, + "refs": { + "VaultNotificationConfig$Events": "

    A list of one or more events for which Amazon Glacier will send a notification to the specified Amazon SNS topic.

    " + } + }, + "NullableLong": { + "base": null, + "refs": { + "DataRetrievalRule$BytesPerHour": "

    The maximum number of bytes that can be retrieved in an hour.

    This field is required only if the value of the Strategy field is BytesPerHour. Your PUT operation will be rejected if the Strategy field is not set to BytesPerHour and you set this field.

    " + } + }, + "PartList": { + "base": null, + "refs": { + "ListPartsOutput$Parts": "

    A list of the part sizes of the multipart upload.

    " + } + }, + "PartListElement": { + "base": "

    A list of the part sizes of the multipart upload.

    ", + "refs": { + "PartList$member": null + } + }, + "PolicyEnforcedException": { + "base": "

    Returned if a retrieval job would exceed the current data policy's retrieval rate limit. For more information about data retrieval policies,

    ", + "refs": { + } + }, + "RemoveTagsFromVaultInput": { + "base": "

    The input value for RemoveTagsFromVaultInput.

    ", + "refs": { + } + }, + "RequestTimeoutException": { + "base": "

    Returned if, when uploading an archive, Amazon Glacier times out while receiving the upload.

    ", + "refs": { + } + }, + "ResourceNotFoundException": { + "base": "

    Returned if the specified resource, such as a vault, upload ID, or job ID, does not exist.

    ", + "refs": { + } + }, + "ServiceUnavailableException": { + "base": "

    Returned if the service cannot complete the request.

    ", + "refs": { + } + }, + "SetDataRetrievalPolicyInput": { + "base": "

    SetDataRetrievalPolicy input.

    ", + "refs": { + } + }, + "SetVaultAccessPolicyInput": { + "base": "

    SetVaultAccessPolicy input.

    ", + "refs": { + } + }, + "SetVaultNotificationsInput": { + "base": "

    Provides options to configure notifications that will be sent when specific events happen to a vault.

    ", + "refs": { + } + }, + "Size": { + "base": null, + "refs": { + "GlacierJobDescription$ArchiveSizeInBytes": "

    For an ArchiveRetrieval job, this is the size in bytes of the archive being requested for download. For the InventoryRetrieval job, the value is null.

    ", + "GlacierJobDescription$InventorySizeInBytes": "

    For an InventoryRetrieval job, this is the size in bytes of the inventory requested for download. For the ArchiveRetrieval job, the value is null.

    " + } + }, + "StatusCode": { + "base": null, + "refs": { + "GlacierJobDescription$StatusCode": "

    The status code can be InProgress, Succeeded, or Failed, and indicates the status of the job.

    " + } + }, + "Stream": { + "base": null, + "refs": { + "GetJobOutputOutput$body": "

    The job data, either archive data or inventory data.

    ", + "UploadArchiveInput$body": "

    The data to upload.

    ", + "UploadMultipartPartInput$body": "

    The data to upload.

    " + } + }, + "TagKey": { + "base": null, + "refs": { + "TagMap$key": null + } + }, + "TagKeyList": { + "base": null, + "refs": { + "RemoveTagsFromVaultInput$TagKeys": "

    A list of tag keys. Each corresponding tag is removed from the vault.

    " + } + }, + "TagMap": { + "base": null, + "refs": { + "AddTagsToVaultInput$Tags": "

    The tags to add to the vault. Each tag is composed of a key and a value. The value can be an empty string.

    ", + "ListTagsForVaultOutput$Tags": "

    The tags attached to the vault. Each tag is composed of a key and a value.

    " + } + }, + "TagValue": { + "base": null, + "refs": { + "TagMap$value": null + } + }, + "UploadArchiveInput": { + "base": "

    Provides options to add an archive to a vault.

    ", + "refs": { + } + }, + "UploadListElement": { + "base": "

    A list of in-progress multipart uploads for a vault.

    ", + "refs": { + "UploadsList$member": null + } + }, + "UploadMultipartPartInput": { + "base": "

    Provides options to upload a part of an archive in a multipart upload operation.

    ", + "refs": { + } + }, + "UploadMultipartPartOutput": { + "base": "

    Contains the Amazon Glacier response to your request.

    ", + "refs": { + } + }, + "UploadsList": { + "base": null, + "refs": { + "ListMultipartUploadsOutput$UploadsList": "

    A list of in-progress multipart uploads.

    " + } + }, + "VaultAccessPolicy": { + "base": "

    Contains the vault access policy.

    ", + "refs": { + "GetVaultAccessPolicyOutput$policy": "

    Contains the returned vault access policy as a JSON string.

    ", + "SetVaultAccessPolicyInput$policy": "

    The vault access policy as a JSON string.

    " + } + }, + "VaultList": { + "base": null, + "refs": { + "ListVaultsOutput$VaultList": "

    List of vaults.

    " + } + }, + "VaultLockPolicy": { + "base": "

    Contains the vault lock policy.

    ", + "refs": { + "InitiateVaultLockInput$policy": "

    The vault lock policy as a JSON string, which uses \"\\\" as an escape character.

    " + } + }, + "VaultNotificationConfig": { + "base": "

    Represents a vault's notification configuration.

    ", + "refs": { + "GetVaultNotificationsOutput$vaultNotificationConfig": "

    Returns the notification configuration set on the vault.

    ", + "SetVaultNotificationsInput$vaultNotificationConfig": "

    Provides options for specifying notification configuration.

    " + } + }, + "boolean": { + "base": null, + "refs": { + "GlacierJobDescription$Completed": "

    The job status. When a job is completed, you get the job's output.

    " + } + }, + "httpstatus": { + "base": null, + "refs": { + "GetJobOutputOutput$status": "

    The HTTP response code for a job output request. The value depends on whether a range was specified in the request.

    " + } + }, + "long": { + "base": null, + "refs": { + "DescribeVaultOutput$NumberOfArchives": "

    The number of archives in the vault as of the last inventory date. This field will return null if an inventory has not yet run on the vault, for example, if you just created the vault.

    ", + "DescribeVaultOutput$SizeInBytes": "

    Total size, in bytes, of the archives in the vault as of the last inventory date. This field will return null if an inventory has not yet run on the vault, for example, if you just created the vault.

    ", + "ListPartsOutput$PartSizeInBytes": "

    The part size in bytes.

    ", + "UploadListElement$PartSizeInBytes": "

    The part size, in bytes, specified in the Initiate Multipart Upload request. This is the size of all the parts in the upload except the last part, which may be smaller than this size.

    " + } + }, + "string": { + "base": null, + "refs": { + "AbortMultipartUploadInput$accountId": "

    The AccountId value is the AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single apos-apos (hyphen), in which case Amazon Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, do not include any hyphens (apos-apos) in the ID.

    ", + "AbortMultipartUploadInput$vaultName": "

    The name of the vault.

    ", + "AbortMultipartUploadInput$uploadId": "

    The upload ID of the multipart upload to delete.

    ", + "AbortVaultLockInput$accountId": "

    The AccountId value is the AWS account ID. This value must match the AWS account ID associated with the credentials used to sign the request. You can either specify an AWS account ID or optionally a single apos-apos (hyphen), in which case Amazon Glacier uses the AWS account ID associated with the credentials used to sign the request. If you specify your account ID, do not include any hyphens (apos-apos) in the ID.

    ", + "AbortVaultLockInput$vaultName": "

    The name of the vault.

    ", + "AddTagsToVaultInput$accountId": "

    The AccountId value is the AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single apos-apos (hyphen), in which case Amazon Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, do not include any hyphens (apos-apos) in the ID.

    ", + "AddTagsToVaultInput$vaultName": "

    The name of the vault.

    ", + "ArchiveCreationOutput$location": "

    The relative URI path of the newly added archive resource.

    ", + "ArchiveCreationOutput$checksum": "

    The checksum of the archive computed by Amazon Glacier.

    ", + "ArchiveCreationOutput$archiveId": "

    The ID of the archive. This value is also included as part of the location.

    ", + "CompleteMultipartUploadInput$accountId": "

    The AccountId value is the AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single apos-apos (hyphen), in which case Amazon Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, do not include any hyphens (apos-apos) in the ID.

    ", + "CompleteMultipartUploadInput$vaultName": "

    The name of the vault.

    ", + "CompleteMultipartUploadInput$uploadId": "

    The upload ID of the multipart upload.

    ", + "CompleteMultipartUploadInput$archiveSize": "

    The total size, in bytes, of the entire archive. This value should be the sum of all the sizes of the individual parts that you uploaded.

    ", + "CompleteMultipartUploadInput$checksum": "

    The SHA256 tree hash of the entire archive. It is the tree hash of SHA256 tree hash of the individual parts. If the value you specify in the request does not match the SHA256 tree hash of the final assembled archive as computed by Amazon Glacier, Amazon Glacier returns an error and the request fails.

    ", + "CompleteVaultLockInput$accountId": "

    The AccountId value is the AWS account ID. This value must match the AWS account ID associated with the credentials used to sign the request. You can either specify an AWS account ID or optionally a single apos-apos (hyphen), in which case Amazon Glacier uses the AWS account ID associated with the credentials used to sign the request. If you specify your account ID, do not include any hyphens (apos-apos) in the ID.

    ", + "CompleteVaultLockInput$vaultName": "

    The name of the vault.

    ", + "CompleteVaultLockInput$lockId": "

    The lockId value is the lock ID obtained from a InitiateVaultLock request.

    ", + "CreateVaultInput$accountId": "

    The AccountId value is the AWS account ID. This value must match the AWS account ID associated with the credentials used to sign the request. You can either specify an AWS account ID or optionally a single apos-apos (hyphen), in which case Amazon Glacier uses the AWS account ID associated with the credentials used to sign the request. If you specify your account ID, do not include any hyphens (apos-apos) in the ID.

    ", + "CreateVaultInput$vaultName": "

    The name of the vault.

    ", + "CreateVaultOutput$location": "

    The URI of the vault that was created.

    ", + "DataRetrievalRule$Strategy": "

    The type of data retrieval policy to set.

    Valid values: BytesPerHour|FreeTier|None

    ", + "DeleteArchiveInput$accountId": "

    The AccountId value is the AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single apos-apos (hyphen), in which case Amazon Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, do not include any hyphens (apos-apos) in the ID.

    ", + "DeleteArchiveInput$vaultName": "

    The name of the vault.

    ", + "DeleteArchiveInput$archiveId": "

    The ID of the archive to delete.

    ", + "DeleteVaultAccessPolicyInput$accountId": "

    The AccountId value is the AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single apos-apos (hyphen), in which case Amazon Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, do not include any hyphens (apos-apos) in the ID.

    ", + "DeleteVaultAccessPolicyInput$vaultName": "

    The name of the vault.

    ", + "DeleteVaultInput$accountId": "

    The AccountId value is the AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single apos-apos (hyphen), in which case Amazon Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, do not include any hyphens (apos-apos) in the ID.

    ", + "DeleteVaultInput$vaultName": "

    The name of the vault.

    ", + "DeleteVaultNotificationsInput$accountId": "

    The AccountId value is the AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single apos-apos (hyphen), in which case Amazon Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, do not include any hyphens (apos-apos) in the ID.

    ", + "DeleteVaultNotificationsInput$vaultName": "

    The name of the vault.

    ", + "DescribeJobInput$accountId": "

    The AccountId value is the AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single apos-apos (hyphen), in which case Amazon Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, do not include any hyphens (apos-apos) in the ID.

    ", + "DescribeJobInput$vaultName": "

    The name of the vault.

    ", + "DescribeJobInput$jobId": "

    The ID of the job to describe.

    ", + "DescribeVaultInput$accountId": "

    The AccountId value is the AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single apos-apos (hyphen), in which case Amazon Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, do not include any hyphens (apos-apos) in the ID.

    ", + "DescribeVaultInput$vaultName": "

    The name of the vault.

    ", + "DescribeVaultOutput$VaultARN": "

    The Amazon Resource Name (ARN) of the vault.

    ", + "DescribeVaultOutput$VaultName": "

    The name of the vault.

    ", + "DescribeVaultOutput$CreationDate": "

    The UTC date when the vault was created. A string representation of ISO 8601 date format, for example, \"2012-03-20T17:03:43.221Z\".

    ", + "DescribeVaultOutput$LastInventoryDate": "

    The UTC date when Amazon Glacier completed the last vault inventory. A string representation of ISO 8601 date format, for example, \"2012-03-20T17:03:43.221Z\".

    ", + "GetDataRetrievalPolicyInput$accountId": "

    The AccountId value is the AWS account ID. This value must match the AWS account ID associated with the credentials used to sign the request. You can either specify an AWS account ID or optionally a single apos-apos (hyphen), in which case Amazon Glacier uses the AWS account ID associated with the credentials used to sign the request. If you specify your account ID, do not include any hyphens (apos-apos) in the ID.

    ", + "GetJobOutputInput$accountId": "

    The AccountId value is the AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single apos-apos (hyphen), in which case Amazon Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, do not include any hyphens (apos-apos) in the ID.

    ", + "GetJobOutputInput$vaultName": "

    The name of the vault.

    ", + "GetJobOutputInput$jobId": "

    The job ID whose data is downloaded.

    ", + "GetJobOutputInput$range": "

    The range of bytes to retrieve from the output. For example, if you want to download the first 1,048,576 bytes, specify \"Range: bytes=0-1048575\". By default, this operation downloads the entire output.

    ", + "GetJobOutputOutput$checksum": "

    The checksum of the data in the response. This header is returned only when retrieving the output for an archive retrieval job. Furthermore, this header appears only under the following conditions:

    • You get the entire range of the archive.
    • You request a range to return of the archive that starts and ends on a multiple of 1 MB. For example, if you have an 3.1 MB archive and you specify a range to return that starts at 1 MB and ends at 2 MB, then the x-amz-sha256-tree-hash is returned as a response header.
    • You request a range of the archive to return that starts on a multiple of 1 MB and goes to the end of the archive. For example, if you have a 3.1 MB archive and you specify a range that starts at 2 MB and ends at 3.1 MB (the end of the archive), then the x-amz-sha256-tree-hash is returned as a response header.

    ", + "GetJobOutputOutput$contentRange": "

    The range of bytes returned by Amazon Glacier. If only partial output is downloaded, the response provides the range of bytes Amazon Glacier returned. For example, bytes 0-1048575/8388608 returns the first 1 MB from 8 MB.

    ", + "GetJobOutputOutput$acceptRanges": "

    Indicates the range units accepted. For more information, go to RFC2616.

    ", + "GetJobOutputOutput$contentType": "

    The Content-Type depends on whether the job output is an archive or a vault inventory. For archive data, the Content-Type is application/octet-stream. For vault inventory, if you requested CSV format when you initiated the job, the Content-Type is text/csv. Otherwise, by default, vault inventory is returned as JSON, and the Content-Type is application/json.

    ", + "GetJobOutputOutput$archiveDescription": "

    The description of an archive.

    ", + "GetVaultAccessPolicyInput$accountId": "

    The AccountId value is the AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single apos-apos (hyphen), in which case Amazon Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, do not include any hyphens (apos-apos) in the ID.

    ", + "GetVaultAccessPolicyInput$vaultName": "

    The name of the vault.

    ", + "GetVaultLockInput$accountId": "

    The AccountId value is the AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single apos-apos (hyphen), in which case Amazon Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, do not include any hyphens (apos-apos) in the ID.

    ", + "GetVaultLockInput$vaultName": "

    The name of the vault.

    ", + "GetVaultLockOutput$Policy": "

    The vault lock policy as a JSON string, which uses \"\\\" as an escape character.

    ", + "GetVaultLockOutput$State": "

    The state of the vault lock. InProgress or Locked.

    ", + "GetVaultLockOutput$ExpirationDate": "

    The UTC date and time at which the lock ID expires. This value can be null if the vault lock is in a Locked state.

    ", + "GetVaultLockOutput$CreationDate": "

    The UTC date and time at which the vault lock was put into the InProgress state.

    ", + "GetVaultNotificationsInput$accountId": "

    The AccountId value is the AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single apos-apos (hyphen), in which case Amazon Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, do not include any hyphens (apos-apos) in the ID.

    ", + "GetVaultNotificationsInput$vaultName": "

    The name of the vault.

    ", + "GlacierJobDescription$JobId": "

    An opaque string that identifies an Amazon Glacier job.

    ", + "GlacierJobDescription$JobDescription": "

    The job description you provided when you initiated the job.

    ", + "GlacierJobDescription$ArchiveId": "

    For an ArchiveRetrieval job, this is the archive ID requested for download. Otherwise, this field is null.

    ", + "GlacierJobDescription$VaultARN": "

    The Amazon Resource Name (ARN) of the vault from which the archive retrieval was requested.

    ", + "GlacierJobDescription$CreationDate": "

    The UTC date when the job was created. A string representation of ISO 8601 date format, for example, \"2012-03-20T17:03:43.221Z\".

    ", + "GlacierJobDescription$StatusMessage": "

    A friendly message that describes the job status.

    ", + "GlacierJobDescription$SNSTopic": "

    An Amazon Simple Notification Service (Amazon SNS) topic that receives notification.

    ", + "GlacierJobDescription$CompletionDate": "

    The UTC time that the archive retrieval request completed. While the job is in progress, the value will be null.

    ", + "GlacierJobDescription$SHA256TreeHash": "

    For an ArchiveRetrieval job, it is the checksum of the archive. Otherwise, the value is null.

    The SHA256 tree hash value for the requested range of an archive. If the Initiate a Job request for an archive specified a tree-hash aligned range, then this field returns a value.

    For the specific case when the whole archive is retrieved, this value is the same as the ArchiveSHA256TreeHash value.

    This field is null in the following situations:

    • Archive retrieval jobs that specify a range that is not tree-hash aligned.

    • Archival jobs that specify a range that is equal to the whole archive and the job status is InProgress.

    • Inventory jobs.

    ", + "GlacierJobDescription$ArchiveSHA256TreeHash": "

    The SHA256 tree hash of the entire archive for an archive retrieval. For inventory retrieval jobs, this field is null.

    ", + "GlacierJobDescription$RetrievalByteRange": "

    The retrieved byte range for archive retrieval jobs in the form \"StartByteValue-EndByteValue\" If no range was specified in the archive retrieval, then the whole archive is retrieved and StartByteValue equals 0 and EndByteValue equals the size of the archive minus 1. For inventory retrieval jobs this field is null.

    ", + "InitiateJobInput$accountId": "

    The AccountId value is the AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single apos-apos (hyphen), in which case Amazon Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, do not include any hyphens (apos-apos) in the ID.

    ", + "InitiateJobInput$vaultName": "

    The name of the vault.

    ", + "InitiateJobOutput$location": "

    The relative URI path of the job.

    ", + "InitiateJobOutput$jobId": "

    The ID of the job.

    ", + "InitiateMultipartUploadInput$accountId": "

    The AccountId value is the AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single apos-apos (hyphen), in which case Amazon Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, do not include any hyphens (apos-apos) in the ID.

    ", + "InitiateMultipartUploadInput$vaultName": "

    The name of the vault.

    ", + "InitiateMultipartUploadInput$archiveDescription": "

    The archive description that you are uploading in parts.

    The part size must be a megabyte (1024 KB) multiplied by a power of 2, for example 1048576 (1 MB), 2097152 (2 MB), 4194304 (4 MB), 8388608 (8 MB), and so on. The minimum allowable part size is 1 MB, and the maximum is 4 GB (4096 MB).

    ", + "InitiateMultipartUploadInput$partSize": "

    The size of each part except the last, in bytes. The last part can be smaller than this part size.

    ", + "InitiateMultipartUploadOutput$location": "

    The relative URI path of the multipart upload ID Amazon Glacier created.

    ", + "InitiateMultipartUploadOutput$uploadId": "

    The ID of the multipart upload. This value is also included as part of the location.

    ", + "InitiateVaultLockInput$accountId": "

    The AccountId value is the AWS account ID. This value must match the AWS account ID associated with the credentials used to sign the request. You can either specify an AWS account ID or optionally a single apos-apos (hyphen), in which case Amazon Glacier uses the AWS account ID associated with the credentials used to sign the request. If you specify your account ID, do not include any hyphens (apos-apos) in the ID.

    ", + "InitiateVaultLockInput$vaultName": "

    The name of the vault.

    ", + "InitiateVaultLockOutput$lockId": "

    The lock ID, which is used to complete the vault locking process.

    ", + "InvalidParameterValueException$type": "

    Client

    ", + "InvalidParameterValueException$code": "

    400 Bad Request

    ", + "InvalidParameterValueException$message": null, + "InventoryRetrievalJobDescription$Format": "

    The output format for the vault inventory list, which is set by the InitiateJob request when initiating a job to retrieve a vault inventory. Valid values are \"CSV\" and \"JSON\".

    ", + "InventoryRetrievalJobDescription$Limit": "

    Specifies the maximum number of inventory items returned per vault inventory retrieval request. This limit is set when initiating the job with the a InitiateJob request.

    ", + "InventoryRetrievalJobDescription$Marker": "

    An opaque string that represents where to continue pagination of the vault inventory retrieval results. You use the marker in a new InitiateJob request to obtain additional inventory items. If there are no more inventory items, this value is null. For more information, see Range Inventory Retrieval.

    ", + "InventoryRetrievalJobInput$StartDate": "

    The start of the date range in UTC for vault inventory retrieval that includes archives created on or after this date. A string representation of ISO 8601 date format, for example, 2013-03-20T17:03:43Z.

    ", + "InventoryRetrievalJobInput$EndDate": "

    The end of the date range in UTC for vault inventory retrieval that includes archives created before this date. A string representation of ISO 8601 date format, for example, 2013-03-20T17:03:43Z.

    ", + "InventoryRetrievalJobInput$Limit": "

    Specifies the maximum number of inventory items returned per vault inventory retrieval request. Valid values are greater than or equal to 1.

    ", + "InventoryRetrievalJobInput$Marker": "

    An opaque string that represents where to continue pagination of the vault inventory retrieval results. You use the marker in a new InitiateJob request to obtain additional inventory items. If there are no more inventory items, this value is null.

    ", + "JobParameters$Format": "

    When initiating a job to retrieve a vault inventory, you can optionally add this parameter to your request to specify the output format. If you are initiating an inventory job and do not specify a Format field, JSON is the default format. Valid values are \"CSV\" and \"JSON\".

    ", + "JobParameters$Type": "

    The job type. You can initiate a job to retrieve an archive or get an inventory of a vault. Valid values are \"archive-retrieval\" and \"inventory-retrieval\".

    ", + "JobParameters$ArchiveId": "

    The ID of the archive that you want to retrieve. This field is required only if Type is set to archive-retrieval. An error occurs if you specify this request parameter for an inventory retrieval job request.

    ", + "JobParameters$Description": "

    The optional description for the job. The description must be less than or equal to 1,024 bytes. The allowable characters are 7-bit ASCII without control codes-specifically, ASCII values 32-126 decimal or 0x20-0x7E hexadecimal.

    ", + "JobParameters$SNSTopic": "

    The Amazon SNS topic ARN to which Amazon Glacier sends a notification when the job is completed and the output is ready for you to download. The specified topic publishes the notification to its subscribers. The SNS topic must exist.

    ", + "JobParameters$RetrievalByteRange": "

    The byte range to retrieve for an archive retrieval. in the form \"StartByteValue-EndByteValue\" If not specified, the whole archive is retrieved. If specified, the byte range must be megabyte (1024*1024) aligned which means that StartByteValue must be divisible by 1 MB and EndByteValue plus 1 must be divisible by 1 MB or be the end of the archive specified as the archive byte size value minus 1. If RetrievalByteRange is not megabyte aligned, this operation returns a 400 response.

    An error occurs if you specify this field for an inventory retrieval job request.

    ", + "LimitExceededException$type": "

    Client

    ", + "LimitExceededException$code": "

    400 Bad Request

    ", + "LimitExceededException$message": null, + "ListJobsInput$accountId": "

    The AccountId value is the AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single apos-apos (hyphen), in which case Amazon Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, do not include any hyphens (apos-apos) in the ID.

    ", + "ListJobsInput$vaultName": "

    The name of the vault.

    ", + "ListJobsInput$limit": "

    Specifies that the response be limited to the specified number of items or fewer. If not specified, the List Jobs operation returns up to 1,000 jobs.

    ", + "ListJobsInput$marker": "

    An opaque string used for pagination. This value specifies the job at which the listing of jobs should begin. Get the marker value from a previous List Jobs response. You need only include the marker if you are continuing the pagination of results started in a previous List Jobs request.

    ", + "ListJobsInput$statuscode": "

    Specifies the type of job status to return. You can specify the following values: \"InProgress\", \"Succeeded\", or \"Failed\".

    ", + "ListJobsInput$completed": "

    Specifies the state of the jobs to return. You can specify true or false.

    ", + "ListJobsOutput$Marker": "

    An opaque string that represents where to continue pagination of the results. You use this value in a new List Jobs request to obtain more jobs in the list. If there are no more jobs, this value is null.

    ", + "ListMultipartUploadsInput$accountId": "

    The AccountId value is the AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single apos-apos (hyphen), in which case Amazon Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, do not include any hyphens (apos-apos) in the ID.

    ", + "ListMultipartUploadsInput$vaultName": "

    The name of the vault.

    ", + "ListMultipartUploadsInput$marker": "

    An opaque string used for pagination. This value specifies the upload at which the listing of uploads should begin. Get the marker value from a previous List Uploads response. You need only include the marker if you are continuing the pagination of results started in a previous List Uploads request.

    ", + "ListMultipartUploadsInput$limit": "

    Specifies the maximum number of uploads returned in the response body. If this value is not specified, the List Uploads operation returns up to 1,000 uploads.

    ", + "ListMultipartUploadsOutput$Marker": "

    An opaque string that represents where to continue pagination of the results. You use the marker in a new List Multipart Uploads request to obtain more uploads in the list. If there are no more uploads, this value is null.

    ", + "ListPartsInput$accountId": "

    The AccountId value is the AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single apos-apos (hyphen), in which case Amazon Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, do not include any hyphens (apos-apos) in the ID.

    ", + "ListPartsInput$vaultName": "

    The name of the vault.

    ", + "ListPartsInput$uploadId": "

    The upload ID of the multipart upload.

    ", + "ListPartsInput$marker": "

    An opaque string used for pagination. This value specifies the part at which the listing of parts should begin. Get the marker value from the response of a previous List Parts response. You need only include the marker if you are continuing the pagination of results started in a previous List Parts request.

    ", + "ListPartsInput$limit": "

    Specifies the maximum number of parts returned in the response body. If this value is not specified, the List Parts operation returns up to 1,000 uploads.

    ", + "ListPartsOutput$MultipartUploadId": "

    The ID of the upload to which the parts are associated.

    ", + "ListPartsOutput$VaultARN": "

    The Amazon Resource Name (ARN) of the vault to which the multipart upload was initiated.

    ", + "ListPartsOutput$ArchiveDescription": "

    The description of the archive that was specified in the Initiate Multipart Upload request.

    ", + "ListPartsOutput$CreationDate": "

    The UTC time at which the multipart upload was initiated.

    ", + "ListPartsOutput$Marker": "

    An opaque string that represents where to continue pagination of the results. You use the marker in a new List Parts request to obtain more jobs in the list. If there are no more parts, this value is null.

    ", + "ListTagsForVaultInput$accountId": "

    The AccountId value is the AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single apos-apos (hyphen), in which case Amazon Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, do not include any hyphens (apos-apos) in the ID.

    ", + "ListTagsForVaultInput$vaultName": "

    The name of the vault.

    ", + "ListVaultsInput$accountId": "

    The AccountId value is the AWS account ID. This value must match the AWS account ID associated with the credentials used to sign the request. You can either specify an AWS account ID or optionally a single apos-apos (hyphen), in which case Amazon Glacier uses the AWS account ID associated with the credentials used to sign the request. If you specify your account ID, do not include any hyphens (apos-apos) in the ID.

    ", + "ListVaultsInput$marker": "

    A string used for pagination. The marker specifies the vault ARN after which the listing of vaults should begin.

    ", + "ListVaultsInput$limit": "

    The maximum number of items returned in the response. If you don't specify a value, the List Vaults operation returns up to 1,000 items.

    ", + "ListVaultsOutput$Marker": "

    The vault ARN at which to continue pagination of the results. You use the marker in another List Vaults request to obtain more vaults in the list.

    ", + "MissingParameterValueException$type": "

    Client.

    ", + "MissingParameterValueException$code": "

    400 Bad Request

    ", + "MissingParameterValueException$message": null, + "NotificationEventList$member": null, + "PartListElement$RangeInBytes": "

    The byte range of a part, inclusive of the upper value of the range.

    ", + "PartListElement$SHA256TreeHash": "

    The SHA256 tree hash value that Amazon Glacier calculated for the part. This field is never null.

    ", + "PolicyEnforcedException$type": "

    Client

    ", + "PolicyEnforcedException$code": "

    PolicyEnforcedException

    ", + "PolicyEnforcedException$message": "

    InitiateJob request denied by current data retrieval policy.

    ", + "RemoveTagsFromVaultInput$accountId": "

    The AccountId value is the AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single apos-apos (hyphen), in which case Amazon Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, do not include any hyphens (apos-apos) in the ID.

    ", + "RemoveTagsFromVaultInput$vaultName": "

    The name of the vault.

    ", + "RequestTimeoutException$type": "

    Client

    ", + "RequestTimeoutException$code": "

    408 Request Timeout

    ", + "RequestTimeoutException$message": null, + "ResourceNotFoundException$type": "

    Client

    ", + "ResourceNotFoundException$code": "

    404 Not Found

    ", + "ResourceNotFoundException$message": null, + "ServiceUnavailableException$type": "

    Server

    ", + "ServiceUnavailableException$code": "

    500 Internal Server Error

    ", + "ServiceUnavailableException$message": null, + "SetDataRetrievalPolicyInput$accountId": "

    The AccountId value is the AWS account ID. This value must match the AWS account ID associated with the credentials used to sign the request. You can either specify an AWS account ID or optionally a single apos-apos (hyphen), in which case Amazon Glacier uses the AWS account ID associated with the credentials used to sign the request. If you specify your account ID, do not include any hyphens (apos-apos) in the ID.

    ", + "SetVaultAccessPolicyInput$accountId": "

    The AccountId value is the AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single apos-apos (hyphen), in which case Amazon Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, do not include any hyphens (apos-apos) in the ID.

    ", + "SetVaultAccessPolicyInput$vaultName": "

    The name of the vault.

    ", + "SetVaultNotificationsInput$accountId": "

    The AccountId value is the AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single apos-apos (hyphen), in which case Amazon Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, do not include any hyphens (apos-apos) in the ID.

    ", + "SetVaultNotificationsInput$vaultName": "

    The name of the vault.

    ", + "TagKeyList$member": null, + "UploadArchiveInput$vaultName": "

    The name of the vault.

    ", + "UploadArchiveInput$accountId": "

    The AccountId value is the AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single apos-apos (hyphen), in which case Amazon Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, do not include any hyphens (apos-apos) in the ID.

    ", + "UploadArchiveInput$archiveDescription": "

    The optional description of the archive you are uploading.

    ", + "UploadArchiveInput$checksum": "

    The SHA256 tree hash of the data being uploaded.

    ", + "UploadListElement$MultipartUploadId": "

    The ID of a multipart upload.

    ", + "UploadListElement$VaultARN": "

    The Amazon Resource Name (ARN) of the vault that contains the archive.

    ", + "UploadListElement$ArchiveDescription": "

    The description of the archive that was specified in the Initiate Multipart Upload request.

    ", + "UploadListElement$CreationDate": "

    The UTC time at which the multipart upload was initiated.

    ", + "UploadMultipartPartInput$accountId": "

    The AccountId value is the AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single apos-apos (hyphen), in which case Amazon Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, do not include any hyphens (apos-apos) in the ID.

    ", + "UploadMultipartPartInput$vaultName": "

    The name of the vault.

    ", + "UploadMultipartPartInput$uploadId": "

    The upload ID of the multipart upload.

    ", + "UploadMultipartPartInput$checksum": "

    The SHA256 tree hash of the data being uploaded.

    ", + "UploadMultipartPartInput$range": "

    Identifies the range of bytes in the assembled archive that will be uploaded in this part. Amazon Glacier uses this information to assemble the archive in the proper sequence. The format of this header follows RFC 2616. An example header is Content-Range:bytes 0-4194303/*.

    ", + "UploadMultipartPartOutput$checksum": "

    The SHA256 tree hash that Amazon Glacier computed for the uploaded part.

    ", + "VaultAccessPolicy$Policy": "

    The vault access policy.

    ", + "VaultLockPolicy$Policy": "

    The vault lock policy.

    ", + "VaultNotificationConfig$SNSTopic": "

    The Amazon Simple Notification Service (Amazon SNS) topic Amazon Resource Name (ARN).

    " + } + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/glacier/2012-06-01/paginators-1.json b/vendor/github.com/aws/aws-sdk-go/models/apis/glacier/2012-06-01/paginators-1.json new file mode 100644 index 000000000..69691437e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/glacier/2012-06-01/paginators-1.json @@ -0,0 +1,28 @@ +{ + "pagination": { + "ListJobs": { + "input_token": "marker", + "output_token": "Marker", + "limit_key": "limit", + "result_key": "JobList" + }, + "ListMultipartUploads": { + "input_token": "marker", + "output_token": "Marker", + "limit_key": "limit", + "result_key": "UploadsList" + }, + "ListParts": { + "input_token": "marker", + "output_token": "Marker", + "limit_key": "limit", + "result_key": "Parts" + }, + "ListVaults": { + "input_token": "marker", + "output_token": "Marker", + "limit_key": "limit", + "result_key": "VaultList" + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/glacier/2012-06-01/waiters-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/glacier/2012-06-01/waiters-2.json new file mode 100644 index 000000000..07a64a056 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/glacier/2012-06-01/waiters-2.json @@ -0,0 +1,39 @@ +{ + "version": 2, + "waiters": { + "VaultExists": { + "operation": "DescribeVault", + "delay": 3, + "maxAttempts": 15, + "acceptors": [ + { + "state": "success", + "matcher": "status", + "expected": 200 + }, + { + "state": "retry", + "matcher": "error", + "expected": "ResourceNotFoundException" + } + ] + }, + "VaultNotExists": { + "operation": "DescribeVault", + "delay": 3, + "maxAttempts": 15, + "acceptors": [ + { + "state": "retry", + "matcher": "status", + "expected": 200 + }, + { + "state": "success", + "matcher": "error", + "expected": "ResourceNotFoundException" + } + ] + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/iam/2010-05-08/api-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/iam/2010-05-08/api-2.json new file mode 100644 index 000000000..a118fe568 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/iam/2010-05-08/api-2.json @@ -0,0 +1,4514 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2010-05-08", + "endpointPrefix":"iam", + "globalEndpoint":"iam.amazonaws.com", + "protocol":"query", + "serviceAbbreviation":"IAM", + "serviceFullName":"AWS Identity and Access Management", + "signatureVersion":"v4", + "xmlNamespace":"https://iam.amazonaws.com/doc/2010-05-08/" + }, + "operations":{ + "AddClientIDToOpenIDConnectProvider":{ + "name":"AddClientIDToOpenIDConnectProvider", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AddClientIDToOpenIDConnectProviderRequest"}, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"NoSuchEntityException"}, + {"shape":"LimitExceededException"}, + {"shape":"ServiceFailureException"} + ] + }, + "AddRoleToInstanceProfile":{ + "name":"AddRoleToInstanceProfile", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AddRoleToInstanceProfileRequest"}, + "errors":[ + {"shape":"NoSuchEntityException"}, + {"shape":"EntityAlreadyExistsException"}, + {"shape":"LimitExceededException"}, + {"shape":"ServiceFailureException"} + ] + }, + "AddUserToGroup":{ + "name":"AddUserToGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AddUserToGroupRequest"}, + "errors":[ + {"shape":"NoSuchEntityException"}, + {"shape":"LimitExceededException"}, + {"shape":"ServiceFailureException"} + ] + }, + "AttachGroupPolicy":{ + "name":"AttachGroupPolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AttachGroupPolicyRequest"}, + "errors":[ + {"shape":"NoSuchEntityException"}, + {"shape":"LimitExceededException"}, + {"shape":"InvalidInputException"}, + {"shape":"ServiceFailureException"} + ] + }, + "AttachRolePolicy":{ + "name":"AttachRolePolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AttachRolePolicyRequest"}, + "errors":[ + {"shape":"NoSuchEntityException"}, + {"shape":"LimitExceededException"}, + {"shape":"InvalidInputException"}, + {"shape":"ServiceFailureException"} + ] + }, + "AttachUserPolicy":{ + "name":"AttachUserPolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AttachUserPolicyRequest"}, + "errors":[ + {"shape":"NoSuchEntityException"}, + {"shape":"LimitExceededException"}, + {"shape":"InvalidInputException"}, + {"shape":"ServiceFailureException"} + ] + }, + "ChangePassword":{ + "name":"ChangePassword", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ChangePasswordRequest"}, + "errors":[ + {"shape":"NoSuchEntityException"}, + {"shape":"InvalidUserTypeException"}, + {"shape":"LimitExceededException"}, + {"shape":"EntityTemporarilyUnmodifiableException"}, + {"shape":"PasswordPolicyViolationException"}, + {"shape":"ServiceFailureException"} + ] + }, + "CreateAccessKey":{ + "name":"CreateAccessKey", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateAccessKeyRequest"}, + "output":{ + "shape":"CreateAccessKeyResponse", + "resultWrapper":"CreateAccessKeyResult" + }, + "errors":[ + {"shape":"NoSuchEntityException"}, + {"shape":"LimitExceededException"}, + {"shape":"ServiceFailureException"} + ] + }, + "CreateAccountAlias":{ + "name":"CreateAccountAlias", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateAccountAliasRequest"}, + "errors":[ + {"shape":"EntityAlreadyExistsException"}, + {"shape":"LimitExceededException"}, + {"shape":"ServiceFailureException"} + ] + }, + "CreateGroup":{ + "name":"CreateGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateGroupRequest"}, + "output":{ + "shape":"CreateGroupResponse", + "resultWrapper":"CreateGroupResult" + }, + "errors":[ + {"shape":"LimitExceededException"}, + {"shape":"EntityAlreadyExistsException"}, + {"shape":"NoSuchEntityException"}, + {"shape":"ServiceFailureException"} + ] + }, + "CreateInstanceProfile":{ + "name":"CreateInstanceProfile", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateInstanceProfileRequest"}, + "output":{ + "shape":"CreateInstanceProfileResponse", + "resultWrapper":"CreateInstanceProfileResult" + }, + "errors":[ + {"shape":"EntityAlreadyExistsException"}, + {"shape":"LimitExceededException"}, + {"shape":"ServiceFailureException"} + ] + }, + "CreateLoginProfile":{ + "name":"CreateLoginProfile", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateLoginProfileRequest"}, + "output":{ + "shape":"CreateLoginProfileResponse", + "resultWrapper":"CreateLoginProfileResult" + }, + "errors":[ + {"shape":"EntityAlreadyExistsException"}, + {"shape":"NoSuchEntityException"}, + {"shape":"PasswordPolicyViolationException"}, + {"shape":"LimitExceededException"}, + {"shape":"ServiceFailureException"} + ] + }, + "CreateOpenIDConnectProvider":{ + "name":"CreateOpenIDConnectProvider", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateOpenIDConnectProviderRequest"}, + "output":{ + "shape":"CreateOpenIDConnectProviderResponse", + "resultWrapper":"CreateOpenIDConnectProviderResult" + }, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"EntityAlreadyExistsException"}, + {"shape":"LimitExceededException"}, + {"shape":"ServiceFailureException"} + ] + }, + "CreatePolicy":{ + "name":"CreatePolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreatePolicyRequest"}, + "output":{ + "shape":"CreatePolicyResponse", + "resultWrapper":"CreatePolicyResult" + }, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"LimitExceededException"}, + {"shape":"EntityAlreadyExistsException"}, + {"shape":"MalformedPolicyDocumentException"}, + {"shape":"ServiceFailureException"} + ] + }, + "CreatePolicyVersion":{ + "name":"CreatePolicyVersion", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreatePolicyVersionRequest"}, + "output":{ + "shape":"CreatePolicyVersionResponse", + "resultWrapper":"CreatePolicyVersionResult" + }, + "errors":[ + {"shape":"NoSuchEntityException"}, + {"shape":"MalformedPolicyDocumentException"}, + {"shape":"InvalidInputException"}, + {"shape":"LimitExceededException"}, + {"shape":"ServiceFailureException"} + ] + }, + "CreateRole":{ + "name":"CreateRole", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateRoleRequest"}, + "output":{ + "shape":"CreateRoleResponse", + "resultWrapper":"CreateRoleResult" + }, + "errors":[ + {"shape":"LimitExceededException"}, + {"shape":"EntityAlreadyExistsException"}, + {"shape":"MalformedPolicyDocumentException"}, + {"shape":"ServiceFailureException"} + ] + }, + "CreateSAMLProvider":{ + "name":"CreateSAMLProvider", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateSAMLProviderRequest"}, + "output":{ + "shape":"CreateSAMLProviderResponse", + "resultWrapper":"CreateSAMLProviderResult" + }, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"EntityAlreadyExistsException"}, + {"shape":"LimitExceededException"}, + {"shape":"ServiceFailureException"} + ] + }, + "CreateUser":{ + "name":"CreateUser", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateUserRequest"}, + "output":{ + "shape":"CreateUserResponse", + "resultWrapper":"CreateUserResult" + }, + "errors":[ + {"shape":"LimitExceededException"}, + {"shape":"EntityAlreadyExistsException"}, + {"shape":"NoSuchEntityException"}, + {"shape":"ServiceFailureException"} + ] + }, + "CreateVirtualMFADevice":{ + "name":"CreateVirtualMFADevice", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateVirtualMFADeviceRequest"}, + "output":{ + "shape":"CreateVirtualMFADeviceResponse", + "resultWrapper":"CreateVirtualMFADeviceResult" + }, + "errors":[ + {"shape":"LimitExceededException"}, + {"shape":"EntityAlreadyExistsException"}, + {"shape":"ServiceFailureException"} + ] + }, + "DeactivateMFADevice":{ + "name":"DeactivateMFADevice", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeactivateMFADeviceRequest"}, + "errors":[ + {"shape":"EntityTemporarilyUnmodifiableException"}, + {"shape":"NoSuchEntityException"}, + {"shape":"LimitExceededException"}, + {"shape":"ServiceFailureException"} + ] + }, + "DeleteAccessKey":{ + "name":"DeleteAccessKey", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteAccessKeyRequest"}, + "errors":[ + {"shape":"NoSuchEntityException"}, + {"shape":"LimitExceededException"}, + {"shape":"ServiceFailureException"} + ] + }, + "DeleteAccountAlias":{ + "name":"DeleteAccountAlias", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteAccountAliasRequest"}, + "errors":[ + {"shape":"NoSuchEntityException"}, + {"shape":"LimitExceededException"}, + {"shape":"ServiceFailureException"} + ] + }, + "DeleteAccountPasswordPolicy":{ + "name":"DeleteAccountPasswordPolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "errors":[ + {"shape":"NoSuchEntityException"}, + {"shape":"LimitExceededException"}, + {"shape":"ServiceFailureException"} + ] + }, + "DeleteGroup":{ + "name":"DeleteGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteGroupRequest"}, + "errors":[ + {"shape":"NoSuchEntityException"}, + {"shape":"DeleteConflictException"}, + {"shape":"LimitExceededException"}, + {"shape":"ServiceFailureException"} + ] + }, + "DeleteGroupPolicy":{ + "name":"DeleteGroupPolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteGroupPolicyRequest"}, + "errors":[ + {"shape":"NoSuchEntityException"}, + {"shape":"LimitExceededException"}, + {"shape":"ServiceFailureException"} + ] + }, + "DeleteInstanceProfile":{ + "name":"DeleteInstanceProfile", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteInstanceProfileRequest"}, + "errors":[ + {"shape":"NoSuchEntityException"}, + {"shape":"DeleteConflictException"}, + {"shape":"LimitExceededException"}, + {"shape":"ServiceFailureException"} + ] + }, + "DeleteLoginProfile":{ + "name":"DeleteLoginProfile", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteLoginProfileRequest"}, + "errors":[ + {"shape":"EntityTemporarilyUnmodifiableException"}, + {"shape":"NoSuchEntityException"}, + {"shape":"LimitExceededException"}, + {"shape":"ServiceFailureException"} + ] + }, + "DeleteOpenIDConnectProvider":{ + "name":"DeleteOpenIDConnectProvider", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteOpenIDConnectProviderRequest"}, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"NoSuchEntityException"}, + {"shape":"ServiceFailureException"} + ] + }, + "DeletePolicy":{ + "name":"DeletePolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeletePolicyRequest"}, + "errors":[ + {"shape":"NoSuchEntityException"}, + {"shape":"LimitExceededException"}, + {"shape":"InvalidInputException"}, + {"shape":"DeleteConflictException"}, + {"shape":"ServiceFailureException"} + ] + }, + "DeletePolicyVersion":{ + "name":"DeletePolicyVersion", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeletePolicyVersionRequest"}, + "errors":[ + {"shape":"NoSuchEntityException"}, + {"shape":"LimitExceededException"}, + {"shape":"InvalidInputException"}, + {"shape":"DeleteConflictException"}, + {"shape":"ServiceFailureException"} + ] + }, + "DeleteRole":{ + "name":"DeleteRole", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteRoleRequest"}, + "errors":[ + {"shape":"NoSuchEntityException"}, + {"shape":"DeleteConflictException"}, + {"shape":"LimitExceededException"}, + {"shape":"ServiceFailureException"} + ] + }, + "DeleteRolePolicy":{ + "name":"DeleteRolePolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteRolePolicyRequest"}, + "errors":[ + {"shape":"NoSuchEntityException"}, + {"shape":"LimitExceededException"}, + {"shape":"ServiceFailureException"} + ] + }, + "DeleteSAMLProvider":{ + "name":"DeleteSAMLProvider", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteSAMLProviderRequest"}, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"LimitExceededException"}, + {"shape":"NoSuchEntityException"}, + {"shape":"ServiceFailureException"} + ] + }, + "DeleteSSHPublicKey":{ + "name":"DeleteSSHPublicKey", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteSSHPublicKeyRequest"}, + "errors":[ + {"shape":"NoSuchEntityException"} + ] + }, + "DeleteServerCertificate":{ + "name":"DeleteServerCertificate", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteServerCertificateRequest"}, + "errors":[ + {"shape":"NoSuchEntityException"}, + {"shape":"DeleteConflictException"}, + {"shape":"LimitExceededException"}, + {"shape":"ServiceFailureException"} + ] + }, + "DeleteSigningCertificate":{ + "name":"DeleteSigningCertificate", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteSigningCertificateRequest"}, + "errors":[ + {"shape":"NoSuchEntityException"}, + {"shape":"LimitExceededException"}, + {"shape":"ServiceFailureException"} + ] + }, + "DeleteUser":{ + "name":"DeleteUser", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteUserRequest"}, + "errors":[ + {"shape":"LimitExceededException"}, + {"shape":"NoSuchEntityException"}, + {"shape":"DeleteConflictException"}, + {"shape":"ServiceFailureException"} + ] + }, + "DeleteUserPolicy":{ + "name":"DeleteUserPolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteUserPolicyRequest"}, + "errors":[ + {"shape":"NoSuchEntityException"}, + {"shape":"LimitExceededException"}, + {"shape":"ServiceFailureException"} + ] + }, + "DeleteVirtualMFADevice":{ + "name":"DeleteVirtualMFADevice", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteVirtualMFADeviceRequest"}, + "errors":[ + {"shape":"NoSuchEntityException"}, + {"shape":"DeleteConflictException"}, + {"shape":"LimitExceededException"}, + {"shape":"ServiceFailureException"} + ] + }, + "DetachGroupPolicy":{ + "name":"DetachGroupPolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DetachGroupPolicyRequest"}, + "errors":[ + {"shape":"NoSuchEntityException"}, + {"shape":"LimitExceededException"}, + {"shape":"InvalidInputException"}, + {"shape":"ServiceFailureException"} + ] + }, + "DetachRolePolicy":{ + "name":"DetachRolePolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DetachRolePolicyRequest"}, + "errors":[ + {"shape":"NoSuchEntityException"}, + {"shape":"LimitExceededException"}, + {"shape":"InvalidInputException"}, + {"shape":"ServiceFailureException"} + ] + }, + "DetachUserPolicy":{ + "name":"DetachUserPolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DetachUserPolicyRequest"}, + "errors":[ + {"shape":"NoSuchEntityException"}, + {"shape":"LimitExceededException"}, + {"shape":"InvalidInputException"}, + {"shape":"ServiceFailureException"} + ] + }, + "EnableMFADevice":{ + "name":"EnableMFADevice", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"EnableMFADeviceRequest"}, + "errors":[ + {"shape":"EntityAlreadyExistsException"}, + {"shape":"EntityTemporarilyUnmodifiableException"}, + {"shape":"InvalidAuthenticationCodeException"}, + {"shape":"LimitExceededException"}, + {"shape":"NoSuchEntityException"}, + {"shape":"ServiceFailureException"} + ] + }, + "GenerateCredentialReport":{ + "name":"GenerateCredentialReport", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "output":{ + "shape":"GenerateCredentialReportResponse", + "resultWrapper":"GenerateCredentialReportResult" + }, + "errors":[ + {"shape":"LimitExceededException"}, + {"shape":"ServiceFailureException"} + ] + }, + "GetAccessKeyLastUsed":{ + "name":"GetAccessKeyLastUsed", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetAccessKeyLastUsedRequest"}, + "output":{ + "shape":"GetAccessKeyLastUsedResponse", + "resultWrapper":"GetAccessKeyLastUsedResult" + }, + "errors":[ + {"shape":"NoSuchEntityException"} + ] + }, + "GetAccountAuthorizationDetails":{ + "name":"GetAccountAuthorizationDetails", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetAccountAuthorizationDetailsRequest"}, + "output":{ + "shape":"GetAccountAuthorizationDetailsResponse", + "resultWrapper":"GetAccountAuthorizationDetailsResult" + }, + "errors":[ + {"shape":"ServiceFailureException"} + ] + }, + "GetAccountPasswordPolicy":{ + "name":"GetAccountPasswordPolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "output":{ + "shape":"GetAccountPasswordPolicyResponse", + "resultWrapper":"GetAccountPasswordPolicyResult" + }, + "errors":[ + {"shape":"NoSuchEntityException"}, + {"shape":"ServiceFailureException"} + ] + }, + "GetAccountSummary":{ + "name":"GetAccountSummary", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "output":{ + "shape":"GetAccountSummaryResponse", + "resultWrapper":"GetAccountSummaryResult" + }, + "errors":[ + {"shape":"ServiceFailureException"} + ] + }, + "GetContextKeysForCustomPolicy":{ + "name":"GetContextKeysForCustomPolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetContextKeysForCustomPolicyRequest"}, + "output":{ + "shape":"GetContextKeysForPolicyResponse", + "resultWrapper":"GetContextKeysForCustomPolicyResult" + }, + "errors":[ + {"shape":"InvalidInputException"} + ] + }, + "GetContextKeysForPrincipalPolicy":{ + "name":"GetContextKeysForPrincipalPolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetContextKeysForPrincipalPolicyRequest"}, + "output":{ + "shape":"GetContextKeysForPolicyResponse", + "resultWrapper":"GetContextKeysForPrincipalPolicyResult" + }, + "errors":[ + {"shape":"NoSuchEntityException"}, + {"shape":"InvalidInputException"} + ] + }, + "GetCredentialReport":{ + "name":"GetCredentialReport", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "output":{ + "shape":"GetCredentialReportResponse", + "resultWrapper":"GetCredentialReportResult" + }, + "errors":[ + {"shape":"CredentialReportNotPresentException"}, + {"shape":"CredentialReportExpiredException"}, + {"shape":"CredentialReportNotReadyException"}, + {"shape":"ServiceFailureException"} + ] + }, + "GetGroup":{ + "name":"GetGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetGroupRequest"}, + "output":{ + "shape":"GetGroupResponse", + "resultWrapper":"GetGroupResult" + }, + "errors":[ + {"shape":"NoSuchEntityException"}, + {"shape":"ServiceFailureException"} + ] + }, + "GetGroupPolicy":{ + "name":"GetGroupPolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetGroupPolicyRequest"}, + "output":{ + "shape":"GetGroupPolicyResponse", + "resultWrapper":"GetGroupPolicyResult" + }, + "errors":[ + {"shape":"NoSuchEntityException"}, + {"shape":"ServiceFailureException"} + ] + }, + "GetInstanceProfile":{ + "name":"GetInstanceProfile", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetInstanceProfileRequest"}, + "output":{ + "shape":"GetInstanceProfileResponse", + "resultWrapper":"GetInstanceProfileResult" + }, + "errors":[ + {"shape":"NoSuchEntityException"}, + {"shape":"ServiceFailureException"} + ] + }, + "GetLoginProfile":{ + "name":"GetLoginProfile", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetLoginProfileRequest"}, + "output":{ + "shape":"GetLoginProfileResponse", + "resultWrapper":"GetLoginProfileResult" + }, + "errors":[ + {"shape":"NoSuchEntityException"}, + {"shape":"ServiceFailureException"} + ] + }, + "GetOpenIDConnectProvider":{ + "name":"GetOpenIDConnectProvider", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetOpenIDConnectProviderRequest"}, + "output":{ + "shape":"GetOpenIDConnectProviderResponse", + "resultWrapper":"GetOpenIDConnectProviderResult" + }, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"NoSuchEntityException"}, + {"shape":"ServiceFailureException"} + ] + }, + "GetPolicy":{ + "name":"GetPolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetPolicyRequest"}, + "output":{ + "shape":"GetPolicyResponse", + "resultWrapper":"GetPolicyResult" + }, + "errors":[ + {"shape":"NoSuchEntityException"}, + {"shape":"InvalidInputException"}, + {"shape":"ServiceFailureException"} + ] + }, + "GetPolicyVersion":{ + "name":"GetPolicyVersion", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetPolicyVersionRequest"}, + "output":{ + "shape":"GetPolicyVersionResponse", + "resultWrapper":"GetPolicyVersionResult" + }, + "errors":[ + {"shape":"NoSuchEntityException"}, + {"shape":"InvalidInputException"}, + {"shape":"ServiceFailureException"} + ] + }, + "GetRole":{ + "name":"GetRole", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetRoleRequest"}, + "output":{ + "shape":"GetRoleResponse", + "resultWrapper":"GetRoleResult" + }, + "errors":[ + {"shape":"NoSuchEntityException"}, + {"shape":"ServiceFailureException"} + ] + }, + "GetRolePolicy":{ + "name":"GetRolePolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetRolePolicyRequest"}, + "output":{ + "shape":"GetRolePolicyResponse", + "resultWrapper":"GetRolePolicyResult" + }, + "errors":[ + {"shape":"NoSuchEntityException"}, + {"shape":"ServiceFailureException"} + ] + }, + "GetSAMLProvider":{ + "name":"GetSAMLProvider", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetSAMLProviderRequest"}, + "output":{ + "shape":"GetSAMLProviderResponse", + "resultWrapper":"GetSAMLProviderResult" + }, + "errors":[ + {"shape":"NoSuchEntityException"}, + {"shape":"InvalidInputException"}, + {"shape":"ServiceFailureException"} + ] + }, + "GetSSHPublicKey":{ + "name":"GetSSHPublicKey", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetSSHPublicKeyRequest"}, + "output":{ + "shape":"GetSSHPublicKeyResponse", + "resultWrapper":"GetSSHPublicKeyResult" + }, + "errors":[ + {"shape":"NoSuchEntityException"}, + {"shape":"UnrecognizedPublicKeyEncodingException"} + ] + }, + "GetServerCertificate":{ + "name":"GetServerCertificate", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetServerCertificateRequest"}, + "output":{ + "shape":"GetServerCertificateResponse", + "resultWrapper":"GetServerCertificateResult" + }, + "errors":[ + {"shape":"NoSuchEntityException"}, + {"shape":"ServiceFailureException"} + ] + }, + "GetUser":{ + "name":"GetUser", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetUserRequest"}, + "output":{ + "shape":"GetUserResponse", + "resultWrapper":"GetUserResult" + }, + "errors":[ + {"shape":"NoSuchEntityException"}, + {"shape":"ServiceFailureException"} + ] + }, + "GetUserPolicy":{ + "name":"GetUserPolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetUserPolicyRequest"}, + "output":{ + "shape":"GetUserPolicyResponse", + "resultWrapper":"GetUserPolicyResult" + }, + "errors":[ + {"shape":"NoSuchEntityException"}, + {"shape":"ServiceFailureException"} + ] + }, + "ListAccessKeys":{ + "name":"ListAccessKeys", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListAccessKeysRequest"}, + "output":{ + "shape":"ListAccessKeysResponse", + "resultWrapper":"ListAccessKeysResult" + }, + "errors":[ + {"shape":"NoSuchEntityException"}, + {"shape":"ServiceFailureException"} + ] + }, + "ListAccountAliases":{ + "name":"ListAccountAliases", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListAccountAliasesRequest"}, + "output":{ + "shape":"ListAccountAliasesResponse", + "resultWrapper":"ListAccountAliasesResult" + }, + "errors":[ + {"shape":"ServiceFailureException"} + ] + }, + "ListAttachedGroupPolicies":{ + "name":"ListAttachedGroupPolicies", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListAttachedGroupPoliciesRequest"}, + "output":{ + "shape":"ListAttachedGroupPoliciesResponse", + "resultWrapper":"ListAttachedGroupPoliciesResult" + }, + "errors":[ + {"shape":"NoSuchEntityException"}, + {"shape":"InvalidInputException"}, + {"shape":"ServiceFailureException"} + ] + }, + "ListAttachedRolePolicies":{ + "name":"ListAttachedRolePolicies", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListAttachedRolePoliciesRequest"}, + "output":{ + "shape":"ListAttachedRolePoliciesResponse", + "resultWrapper":"ListAttachedRolePoliciesResult" + }, + "errors":[ + {"shape":"NoSuchEntityException"}, + {"shape":"InvalidInputException"}, + {"shape":"ServiceFailureException"} + ] + }, + "ListAttachedUserPolicies":{ + "name":"ListAttachedUserPolicies", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListAttachedUserPoliciesRequest"}, + "output":{ + "shape":"ListAttachedUserPoliciesResponse", + "resultWrapper":"ListAttachedUserPoliciesResult" + }, + "errors":[ + {"shape":"NoSuchEntityException"}, + {"shape":"InvalidInputException"}, + {"shape":"ServiceFailureException"} + ] + }, + "ListEntitiesForPolicy":{ + "name":"ListEntitiesForPolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListEntitiesForPolicyRequest"}, + "output":{ + "shape":"ListEntitiesForPolicyResponse", + "resultWrapper":"ListEntitiesForPolicyResult" + }, + "errors":[ + {"shape":"NoSuchEntityException"}, + {"shape":"InvalidInputException"}, + {"shape":"ServiceFailureException"} + ] + }, + "ListGroupPolicies":{ + "name":"ListGroupPolicies", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListGroupPoliciesRequest"}, + "output":{ + "shape":"ListGroupPoliciesResponse", + "resultWrapper":"ListGroupPoliciesResult" + }, + "errors":[ + {"shape":"NoSuchEntityException"}, + {"shape":"ServiceFailureException"} + ] + }, + "ListGroups":{ + "name":"ListGroups", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListGroupsRequest"}, + "output":{ + "shape":"ListGroupsResponse", + "resultWrapper":"ListGroupsResult" + }, + "errors":[ + {"shape":"ServiceFailureException"} + ] + }, + "ListGroupsForUser":{ + "name":"ListGroupsForUser", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListGroupsForUserRequest"}, + "output":{ + "shape":"ListGroupsForUserResponse", + "resultWrapper":"ListGroupsForUserResult" + }, + "errors":[ + {"shape":"NoSuchEntityException"}, + {"shape":"ServiceFailureException"} + ] + }, + "ListInstanceProfiles":{ + "name":"ListInstanceProfiles", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListInstanceProfilesRequest"}, + "output":{ + "shape":"ListInstanceProfilesResponse", + "resultWrapper":"ListInstanceProfilesResult" + }, + "errors":[ + {"shape":"ServiceFailureException"} + ] + }, + "ListInstanceProfilesForRole":{ + "name":"ListInstanceProfilesForRole", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListInstanceProfilesForRoleRequest"}, + "output":{ + "shape":"ListInstanceProfilesForRoleResponse", + "resultWrapper":"ListInstanceProfilesForRoleResult" + }, + "errors":[ + {"shape":"NoSuchEntityException"}, + {"shape":"ServiceFailureException"} + ] + }, + "ListMFADevices":{ + "name":"ListMFADevices", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListMFADevicesRequest"}, + "output":{ + "shape":"ListMFADevicesResponse", + "resultWrapper":"ListMFADevicesResult" + }, + "errors":[ + {"shape":"NoSuchEntityException"}, + {"shape":"ServiceFailureException"} + ] + }, + "ListOpenIDConnectProviders":{ + "name":"ListOpenIDConnectProviders", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListOpenIDConnectProvidersRequest"}, + "output":{ + "shape":"ListOpenIDConnectProvidersResponse", + "resultWrapper":"ListOpenIDConnectProvidersResult" + }, + "errors":[ + {"shape":"ServiceFailureException"} + ] + }, + "ListPolicies":{ + "name":"ListPolicies", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListPoliciesRequest"}, + "output":{ + "shape":"ListPoliciesResponse", + "resultWrapper":"ListPoliciesResult" + }, + "errors":[ + {"shape":"ServiceFailureException"} + ] + }, + "ListPolicyVersions":{ + "name":"ListPolicyVersions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListPolicyVersionsRequest"}, + "output":{ + "shape":"ListPolicyVersionsResponse", + "resultWrapper":"ListPolicyVersionsResult" + }, + "errors":[ + {"shape":"NoSuchEntityException"}, + {"shape":"InvalidInputException"}, + {"shape":"ServiceFailureException"} + ] + }, + "ListRolePolicies":{ + "name":"ListRolePolicies", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListRolePoliciesRequest"}, + "output":{ + "shape":"ListRolePoliciesResponse", + "resultWrapper":"ListRolePoliciesResult" + }, + "errors":[ + {"shape":"NoSuchEntityException"}, + {"shape":"ServiceFailureException"} + ] + }, + "ListRoles":{ + "name":"ListRoles", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListRolesRequest"}, + "output":{ + "shape":"ListRolesResponse", + "resultWrapper":"ListRolesResult" + }, + "errors":[ + {"shape":"ServiceFailureException"} + ] + }, + "ListSAMLProviders":{ + "name":"ListSAMLProviders", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListSAMLProvidersRequest"}, + "output":{ + "shape":"ListSAMLProvidersResponse", + "resultWrapper":"ListSAMLProvidersResult" + }, + "errors":[ + {"shape":"ServiceFailureException"} + ] + }, + "ListSSHPublicKeys":{ + "name":"ListSSHPublicKeys", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListSSHPublicKeysRequest"}, + "output":{ + "shape":"ListSSHPublicKeysResponse", + "resultWrapper":"ListSSHPublicKeysResult" + }, + "errors":[ + {"shape":"NoSuchEntityException"} + ] + }, + "ListServerCertificates":{ + "name":"ListServerCertificates", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListServerCertificatesRequest"}, + "output":{ + "shape":"ListServerCertificatesResponse", + "resultWrapper":"ListServerCertificatesResult" + }, + "errors":[ + {"shape":"ServiceFailureException"} + ] + }, + "ListSigningCertificates":{ + "name":"ListSigningCertificates", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListSigningCertificatesRequest"}, + "output":{ + "shape":"ListSigningCertificatesResponse", + "resultWrapper":"ListSigningCertificatesResult" + }, + "errors":[ + {"shape":"NoSuchEntityException"}, + {"shape":"ServiceFailureException"} + ] + }, + "ListUserPolicies":{ + "name":"ListUserPolicies", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListUserPoliciesRequest"}, + "output":{ + "shape":"ListUserPoliciesResponse", + "resultWrapper":"ListUserPoliciesResult" + }, + "errors":[ + {"shape":"NoSuchEntityException"}, + {"shape":"ServiceFailureException"} + ] + }, + "ListUsers":{ + "name":"ListUsers", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListUsersRequest"}, + "output":{ + "shape":"ListUsersResponse", + "resultWrapper":"ListUsersResult" + }, + "errors":[ + {"shape":"ServiceFailureException"} + ] + }, + "ListVirtualMFADevices":{ + "name":"ListVirtualMFADevices", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListVirtualMFADevicesRequest"}, + "output":{ + "shape":"ListVirtualMFADevicesResponse", + "resultWrapper":"ListVirtualMFADevicesResult" + } + }, + "PutGroupPolicy":{ + "name":"PutGroupPolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PutGroupPolicyRequest"}, + "errors":[ + {"shape":"LimitExceededException"}, + {"shape":"MalformedPolicyDocumentException"}, + {"shape":"NoSuchEntityException"}, + {"shape":"ServiceFailureException"} + ] + }, + "PutRolePolicy":{ + "name":"PutRolePolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PutRolePolicyRequest"}, + "errors":[ + {"shape":"LimitExceededException"}, + {"shape":"MalformedPolicyDocumentException"}, + {"shape":"NoSuchEntityException"}, + {"shape":"ServiceFailureException"} + ] + }, + "PutUserPolicy":{ + "name":"PutUserPolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PutUserPolicyRequest"}, + "errors":[ + {"shape":"LimitExceededException"}, + {"shape":"MalformedPolicyDocumentException"}, + {"shape":"NoSuchEntityException"}, + {"shape":"ServiceFailureException"} + ] + }, + "RemoveClientIDFromOpenIDConnectProvider":{ + "name":"RemoveClientIDFromOpenIDConnectProvider", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RemoveClientIDFromOpenIDConnectProviderRequest"}, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"NoSuchEntityException"}, + {"shape":"ServiceFailureException"} + ] + }, + "RemoveRoleFromInstanceProfile":{ + "name":"RemoveRoleFromInstanceProfile", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RemoveRoleFromInstanceProfileRequest"}, + "errors":[ + {"shape":"NoSuchEntityException"}, + {"shape":"LimitExceededException"}, + {"shape":"ServiceFailureException"} + ] + }, + "RemoveUserFromGroup":{ + "name":"RemoveUserFromGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RemoveUserFromGroupRequest"}, + "errors":[ + {"shape":"NoSuchEntityException"}, + {"shape":"LimitExceededException"}, + {"shape":"ServiceFailureException"} + ] + }, + "ResyncMFADevice":{ + "name":"ResyncMFADevice", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ResyncMFADeviceRequest"}, + "errors":[ + {"shape":"InvalidAuthenticationCodeException"}, + {"shape":"NoSuchEntityException"}, + {"shape":"LimitExceededException"}, + {"shape":"ServiceFailureException"} + ] + }, + "SetDefaultPolicyVersion":{ + "name":"SetDefaultPolicyVersion", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"SetDefaultPolicyVersionRequest"}, + "errors":[ + {"shape":"NoSuchEntityException"}, + {"shape":"InvalidInputException"}, + {"shape":"LimitExceededException"}, + {"shape":"ServiceFailureException"} + ] + }, + "SimulateCustomPolicy":{ + "name":"SimulateCustomPolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"SimulateCustomPolicyRequest"}, + "output":{ + "shape":"SimulatePolicyResponse", + "resultWrapper":"SimulateCustomPolicyResult" + }, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"PolicyEvaluationException"} + ] + }, + "SimulatePrincipalPolicy":{ + "name":"SimulatePrincipalPolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"SimulatePrincipalPolicyRequest"}, + "output":{ + "shape":"SimulatePolicyResponse", + "resultWrapper":"SimulatePrincipalPolicyResult" + }, + "errors":[ + {"shape":"NoSuchEntityException"}, + {"shape":"InvalidInputException"}, + {"shape":"PolicyEvaluationException"} + ] + }, + "UpdateAccessKey":{ + "name":"UpdateAccessKey", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateAccessKeyRequest"}, + "errors":[ + {"shape":"NoSuchEntityException"}, + {"shape":"LimitExceededException"}, + {"shape":"ServiceFailureException"} + ] + }, + "UpdateAccountPasswordPolicy":{ + "name":"UpdateAccountPasswordPolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateAccountPasswordPolicyRequest"}, + "errors":[ + {"shape":"NoSuchEntityException"}, + {"shape":"MalformedPolicyDocumentException"}, + {"shape":"LimitExceededException"}, + {"shape":"ServiceFailureException"} + ] + }, + "UpdateAssumeRolePolicy":{ + "name":"UpdateAssumeRolePolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateAssumeRolePolicyRequest"}, + "errors":[ + {"shape":"NoSuchEntityException"}, + {"shape":"MalformedPolicyDocumentException"}, + {"shape":"LimitExceededException"}, + {"shape":"ServiceFailureException"} + ] + }, + "UpdateGroup":{ + "name":"UpdateGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateGroupRequest"}, + "errors":[ + {"shape":"NoSuchEntityException"}, + {"shape":"EntityAlreadyExistsException"}, + {"shape":"LimitExceededException"}, + {"shape":"ServiceFailureException"} + ] + }, + "UpdateLoginProfile":{ + "name":"UpdateLoginProfile", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateLoginProfileRequest"}, + "errors":[ + {"shape":"EntityTemporarilyUnmodifiableException"}, + {"shape":"NoSuchEntityException"}, + {"shape":"PasswordPolicyViolationException"}, + {"shape":"LimitExceededException"}, + {"shape":"ServiceFailureException"} + ] + }, + "UpdateOpenIDConnectProviderThumbprint":{ + "name":"UpdateOpenIDConnectProviderThumbprint", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateOpenIDConnectProviderThumbprintRequest"}, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"NoSuchEntityException"}, + {"shape":"ServiceFailureException"} + ] + }, + "UpdateSAMLProvider":{ + "name":"UpdateSAMLProvider", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateSAMLProviderRequest"}, + "output":{ + "shape":"UpdateSAMLProviderResponse", + "resultWrapper":"UpdateSAMLProviderResult" + }, + "errors":[ + {"shape":"NoSuchEntityException"}, + {"shape":"InvalidInputException"}, + {"shape":"LimitExceededException"}, + {"shape":"ServiceFailureException"} + ] + }, + "UpdateSSHPublicKey":{ + "name":"UpdateSSHPublicKey", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateSSHPublicKeyRequest"}, + "errors":[ + {"shape":"NoSuchEntityException"} + ] + }, + "UpdateServerCertificate":{ + "name":"UpdateServerCertificate", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateServerCertificateRequest"}, + "errors":[ + {"shape":"NoSuchEntityException"}, + {"shape":"EntityAlreadyExistsException"}, + {"shape":"LimitExceededException"}, + {"shape":"ServiceFailureException"} + ] + }, + "UpdateSigningCertificate":{ + "name":"UpdateSigningCertificate", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateSigningCertificateRequest"}, + "errors":[ + {"shape":"NoSuchEntityException"}, + {"shape":"LimitExceededException"}, + {"shape":"ServiceFailureException"} + ] + }, + "UpdateUser":{ + "name":"UpdateUser", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateUserRequest"}, + "errors":[ + {"shape":"NoSuchEntityException"}, + {"shape":"LimitExceededException"}, + {"shape":"EntityAlreadyExistsException"}, + {"shape":"EntityTemporarilyUnmodifiableException"}, + {"shape":"ServiceFailureException"} + ] + }, + "UploadSSHPublicKey":{ + "name":"UploadSSHPublicKey", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UploadSSHPublicKeyRequest"}, + "output":{ + "shape":"UploadSSHPublicKeyResponse", + "resultWrapper":"UploadSSHPublicKeyResult" + }, + "errors":[ + {"shape":"LimitExceededException"}, + {"shape":"NoSuchEntityException"}, + {"shape":"InvalidPublicKeyException"}, + {"shape":"DuplicateSSHPublicKeyException"}, + {"shape":"UnrecognizedPublicKeyEncodingException"} + ] + }, + "UploadServerCertificate":{ + "name":"UploadServerCertificate", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UploadServerCertificateRequest"}, + "output":{ + "shape":"UploadServerCertificateResponse", + "resultWrapper":"UploadServerCertificateResult" + }, + "errors":[ + {"shape":"LimitExceededException"}, + {"shape":"EntityAlreadyExistsException"}, + {"shape":"MalformedCertificateException"}, + {"shape":"KeyPairMismatchException"}, + {"shape":"ServiceFailureException"} + ] + }, + "UploadSigningCertificate":{ + "name":"UploadSigningCertificate", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UploadSigningCertificateRequest"}, + "output":{ + "shape":"UploadSigningCertificateResponse", + "resultWrapper":"UploadSigningCertificateResult" + }, + "errors":[ + {"shape":"LimitExceededException"}, + {"shape":"EntityAlreadyExistsException"}, + {"shape":"MalformedCertificateException"}, + {"shape":"InvalidCertificateException"}, + {"shape":"DuplicateCertificateException"}, + {"shape":"NoSuchEntityException"}, + {"shape":"ServiceFailureException"} + ] + } + }, + "shapes":{ + "AccessKey":{ + "type":"structure", + "required":[ + "UserName", + "AccessKeyId", + "Status", + "SecretAccessKey" + ], + "members":{ + "UserName":{"shape":"userNameType"}, + "AccessKeyId":{"shape":"accessKeyIdType"}, + "Status":{"shape":"statusType"}, + "SecretAccessKey":{"shape":"accessKeySecretType"}, + "CreateDate":{"shape":"dateType"} + } + }, + "AccessKeyLastUsed":{ + "type":"structure", + "required":[ + "LastUsedDate", + "ServiceName", + "Region" + ], + "members":{ + "LastUsedDate":{"shape":"dateType"}, + "ServiceName":{"shape":"stringType"}, + "Region":{"shape":"stringType"} + } + }, + "AccessKeyMetadata":{ + "type":"structure", + "members":{ + "UserName":{"shape":"userNameType"}, + "AccessKeyId":{"shape":"accessKeyIdType"}, + "Status":{"shape":"statusType"}, + "CreateDate":{"shape":"dateType"} + } + }, + "ActionNameListType":{ + "type":"list", + "member":{"shape":"ActionNameType"} + }, + "ActionNameType":{ + "type":"string", + "max":128, + "min":3 + }, + "AddClientIDToOpenIDConnectProviderRequest":{ + "type":"structure", + "required":[ + "OpenIDConnectProviderArn", + "ClientID" + ], + "members":{ + "OpenIDConnectProviderArn":{"shape":"arnType"}, + "ClientID":{"shape":"clientIDType"} + } + }, + "AddRoleToInstanceProfileRequest":{ + "type":"structure", + "required":[ + "InstanceProfileName", + "RoleName" + ], + "members":{ + "InstanceProfileName":{"shape":"instanceProfileNameType"}, + "RoleName":{"shape":"roleNameType"} + } + }, + "AddUserToGroupRequest":{ + "type":"structure", + "required":[ + "GroupName", + "UserName" + ], + "members":{ + "GroupName":{"shape":"groupNameType"}, + "UserName":{"shape":"existingUserNameType"} + } + }, + "AttachGroupPolicyRequest":{ + "type":"structure", + "required":[ + "GroupName", + "PolicyArn" + ], + "members":{ + "GroupName":{"shape":"groupNameType"}, + "PolicyArn":{"shape":"arnType"} + } + }, + "AttachRolePolicyRequest":{ + "type":"structure", + "required":[ + "RoleName", + "PolicyArn" + ], + "members":{ + "RoleName":{"shape":"roleNameType"}, + "PolicyArn":{"shape":"arnType"} + } + }, + "AttachUserPolicyRequest":{ + "type":"structure", + "required":[ + "UserName", + "PolicyArn" + ], + "members":{ + "UserName":{"shape":"userNameType"}, + "PolicyArn":{"shape":"arnType"} + } + }, + "AttachedPolicy":{ + "type":"structure", + "members":{ + "PolicyName":{"shape":"policyNameType"}, + "PolicyArn":{"shape":"arnType"} + } + }, + "BootstrapDatum":{ + "type":"blob", + "sensitive":true + }, + "ChangePasswordRequest":{ + "type":"structure", + "required":[ + "OldPassword", + "NewPassword" + ], + "members":{ + "OldPassword":{"shape":"passwordType"}, + "NewPassword":{"shape":"passwordType"} + } + }, + "ColumnNumber":{"type":"integer"}, + "ContextEntry":{ + "type":"structure", + "members":{ + "ContextKeyName":{"shape":"ContextKeyNameType"}, + "ContextKeyValues":{"shape":"ContextKeyValueListType"}, + "ContextKeyType":{"shape":"ContextKeyTypeEnum"} + } + }, + "ContextEntryListType":{ + "type":"list", + "member":{"shape":"ContextEntry"} + }, + "ContextKeyNameType":{ + "type":"string", + "max":256, + "min":5 + }, + "ContextKeyNamesResultListType":{ + "type":"list", + "member":{"shape":"ContextKeyNameType"} + }, + "ContextKeyTypeEnum":{ + "type":"string", + "enum":[ + "string", + "stringList", + "numeric", + "numericList", + "boolean", + "booleanList", + "ip", + "ipList", + "binary", + "binaryList", + "date", + "dateList" + ] + }, + "ContextKeyValueListType":{ + "type":"list", + "member":{"shape":"ContextKeyValueType"} + }, + "ContextKeyValueType":{"type":"string"}, + "CreateAccessKeyRequest":{ + "type":"structure", + "members":{ + "UserName":{"shape":"existingUserNameType"} + } + }, + "CreateAccessKeyResponse":{ + "type":"structure", + "required":["AccessKey"], + "members":{ + "AccessKey":{"shape":"AccessKey"} + } + }, + "CreateAccountAliasRequest":{ + "type":"structure", + "required":["AccountAlias"], + "members":{ + "AccountAlias":{"shape":"accountAliasType"} + } + }, + "CreateGroupRequest":{ + "type":"structure", + "required":["GroupName"], + "members":{ + "Path":{"shape":"pathType"}, + "GroupName":{"shape":"groupNameType"} + } + }, + "CreateGroupResponse":{ + "type":"structure", + "required":["Group"], + "members":{ + "Group":{"shape":"Group"} + } + }, + "CreateInstanceProfileRequest":{ + "type":"structure", + "required":["InstanceProfileName"], + "members":{ + "InstanceProfileName":{"shape":"instanceProfileNameType"}, + "Path":{"shape":"pathType"} + } + }, + "CreateInstanceProfileResponse":{ + "type":"structure", + "required":["InstanceProfile"], + "members":{ + "InstanceProfile":{"shape":"InstanceProfile"} + } + }, + "CreateLoginProfileRequest":{ + "type":"structure", + "required":[ + "UserName", + "Password" + ], + "members":{ + "UserName":{"shape":"userNameType"}, + "Password":{"shape":"passwordType"}, + "PasswordResetRequired":{"shape":"booleanType"} + } + }, + "CreateLoginProfileResponse":{ + "type":"structure", + "required":["LoginProfile"], + "members":{ + "LoginProfile":{"shape":"LoginProfile"} + } + }, + "CreateOpenIDConnectProviderRequest":{ + "type":"structure", + "required":[ + "Url", + "ThumbprintList" + ], + "members":{ + "Url":{"shape":"OpenIDConnectProviderUrlType"}, + "ClientIDList":{"shape":"clientIDListType"}, + "ThumbprintList":{"shape":"thumbprintListType"} + } + }, + "CreateOpenIDConnectProviderResponse":{ + "type":"structure", + "members":{ + "OpenIDConnectProviderArn":{"shape":"arnType"} + } + }, + "CreatePolicyRequest":{ + "type":"structure", + "required":[ + "PolicyName", + "PolicyDocument" + ], + "members":{ + "PolicyName":{"shape":"policyNameType"}, + "Path":{"shape":"policyPathType"}, + "PolicyDocument":{"shape":"policyDocumentType"}, + "Description":{"shape":"policyDescriptionType"} + } + }, + "CreatePolicyResponse":{ + "type":"structure", + "members":{ + "Policy":{"shape":"Policy"} + } + }, + "CreatePolicyVersionRequest":{ + "type":"structure", + "required":[ + "PolicyArn", + "PolicyDocument" + ], + "members":{ + "PolicyArn":{"shape":"arnType"}, + "PolicyDocument":{"shape":"policyDocumentType"}, + "SetAsDefault":{"shape":"booleanType"} + } + }, + "CreatePolicyVersionResponse":{ + "type":"structure", + "members":{ + "PolicyVersion":{"shape":"PolicyVersion"} + } + }, + "CreateRoleRequest":{ + "type":"structure", + "required":[ + "RoleName", + "AssumeRolePolicyDocument" + ], + "members":{ + "Path":{"shape":"pathType"}, + "RoleName":{"shape":"roleNameType"}, + "AssumeRolePolicyDocument":{"shape":"policyDocumentType"} + } + }, + "CreateRoleResponse":{ + "type":"structure", + "required":["Role"], + "members":{ + "Role":{"shape":"Role"} + } + }, + "CreateSAMLProviderRequest":{ + "type":"structure", + "required":[ + "SAMLMetadataDocument", + "Name" + ], + "members":{ + "SAMLMetadataDocument":{"shape":"SAMLMetadataDocumentType"}, + "Name":{"shape":"SAMLProviderNameType"} + } + }, + "CreateSAMLProviderResponse":{ + "type":"structure", + "members":{ + "SAMLProviderArn":{"shape":"arnType"} + } + }, + "CreateUserRequest":{ + "type":"structure", + "required":["UserName"], + "members":{ + "Path":{"shape":"pathType"}, + "UserName":{"shape":"userNameType"} + } + }, + "CreateUserResponse":{ + "type":"structure", + "members":{ + "User":{"shape":"User"} + } + }, + "CreateVirtualMFADeviceRequest":{ + "type":"structure", + "required":["VirtualMFADeviceName"], + "members":{ + "Path":{"shape":"pathType"}, + "VirtualMFADeviceName":{"shape":"virtualMFADeviceName"} + } + }, + "CreateVirtualMFADeviceResponse":{ + "type":"structure", + "required":["VirtualMFADevice"], + "members":{ + "VirtualMFADevice":{"shape":"VirtualMFADevice"} + } + }, + "CredentialReportExpiredException":{ + "type":"structure", + "members":{ + "message":{"shape":"credentialReportExpiredExceptionMessage"} + }, + "error":{ + "code":"ReportExpired", + "httpStatusCode":410, + "senderFault":true + }, + "exception":true + }, + "CredentialReportNotPresentException":{ + "type":"structure", + "members":{ + "message":{"shape":"credentialReportNotPresentExceptionMessage"} + }, + "error":{ + "code":"ReportNotPresent", + "httpStatusCode":410, + "senderFault":true + }, + "exception":true + }, + "CredentialReportNotReadyException":{ + "type":"structure", + "members":{ + "message":{"shape":"credentialReportNotReadyExceptionMessage"} + }, + "error":{ + "code":"ReportInProgress", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "DeactivateMFADeviceRequest":{ + "type":"structure", + "required":[ + "UserName", + "SerialNumber" + ], + "members":{ + "UserName":{"shape":"existingUserNameType"}, + "SerialNumber":{"shape":"serialNumberType"} + } + }, + "DeleteAccessKeyRequest":{ + "type":"structure", + "required":["AccessKeyId"], + "members":{ + "UserName":{"shape":"existingUserNameType"}, + "AccessKeyId":{"shape":"accessKeyIdType"} + } + }, + "DeleteAccountAliasRequest":{ + "type":"structure", + "required":["AccountAlias"], + "members":{ + "AccountAlias":{"shape":"accountAliasType"} + } + }, + "DeleteConflictException":{ + "type":"structure", + "members":{ + "message":{"shape":"deleteConflictMessage"} + }, + "error":{ + "code":"DeleteConflict", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + }, + "DeleteGroupPolicyRequest":{ + "type":"structure", + "required":[ + "GroupName", + "PolicyName" + ], + "members":{ + "GroupName":{"shape":"groupNameType"}, + "PolicyName":{"shape":"policyNameType"} + } + }, + "DeleteGroupRequest":{ + "type":"structure", + "required":["GroupName"], + "members":{ + "GroupName":{"shape":"groupNameType"} + } + }, + "DeleteInstanceProfileRequest":{ + "type":"structure", + "required":["InstanceProfileName"], + "members":{ + "InstanceProfileName":{"shape":"instanceProfileNameType"} + } + }, + "DeleteLoginProfileRequest":{ + "type":"structure", + "required":["UserName"], + "members":{ + "UserName":{"shape":"userNameType"} + } + }, + "DeleteOpenIDConnectProviderRequest":{ + "type":"structure", + "required":["OpenIDConnectProviderArn"], + "members":{ + "OpenIDConnectProviderArn":{"shape":"arnType"} + } + }, + "DeletePolicyRequest":{ + "type":"structure", + "required":["PolicyArn"], + "members":{ + "PolicyArn":{"shape":"arnType"} + } + }, + "DeletePolicyVersionRequest":{ + "type":"structure", + "required":[ + "PolicyArn", + "VersionId" + ], + "members":{ + "PolicyArn":{"shape":"arnType"}, + "VersionId":{"shape":"policyVersionIdType"} + } + }, + "DeleteRolePolicyRequest":{ + "type":"structure", + "required":[ + "RoleName", + "PolicyName" + ], + "members":{ + "RoleName":{"shape":"roleNameType"}, + "PolicyName":{"shape":"policyNameType"} + } + }, + "DeleteRoleRequest":{ + "type":"structure", + "required":["RoleName"], + "members":{ + "RoleName":{"shape":"roleNameType"} + } + }, + "DeleteSAMLProviderRequest":{ + "type":"structure", + "required":["SAMLProviderArn"], + "members":{ + "SAMLProviderArn":{"shape":"arnType"} + } + }, + "DeleteSSHPublicKeyRequest":{ + "type":"structure", + "required":[ + "UserName", + "SSHPublicKeyId" + ], + "members":{ + "UserName":{"shape":"userNameType"}, + "SSHPublicKeyId":{"shape":"publicKeyIdType"} + } + }, + "DeleteServerCertificateRequest":{ + "type":"structure", + "required":["ServerCertificateName"], + "members":{ + "ServerCertificateName":{"shape":"serverCertificateNameType"} + } + }, + "DeleteSigningCertificateRequest":{ + "type":"structure", + "required":["CertificateId"], + "members":{ + "UserName":{"shape":"existingUserNameType"}, + "CertificateId":{"shape":"certificateIdType"} + } + }, + "DeleteUserPolicyRequest":{ + "type":"structure", + "required":[ + "UserName", + "PolicyName" + ], + "members":{ + "UserName":{"shape":"existingUserNameType"}, + "PolicyName":{"shape":"policyNameType"} + } + }, + "DeleteUserRequest":{ + "type":"structure", + "required":["UserName"], + "members":{ + "UserName":{"shape":"existingUserNameType"} + } + }, + "DeleteVirtualMFADeviceRequest":{ + "type":"structure", + "required":["SerialNumber"], + "members":{ + "SerialNumber":{"shape":"serialNumberType"} + } + }, + "DetachGroupPolicyRequest":{ + "type":"structure", + "required":[ + "GroupName", + "PolicyArn" + ], + "members":{ + "GroupName":{"shape":"groupNameType"}, + "PolicyArn":{"shape":"arnType"} + } + }, + "DetachRolePolicyRequest":{ + "type":"structure", + "required":[ + "RoleName", + "PolicyArn" + ], + "members":{ + "RoleName":{"shape":"roleNameType"}, + "PolicyArn":{"shape":"arnType"} + } + }, + "DetachUserPolicyRequest":{ + "type":"structure", + "required":[ + "UserName", + "PolicyArn" + ], + "members":{ + "UserName":{"shape":"userNameType"}, + "PolicyArn":{"shape":"arnType"} + } + }, + "DuplicateCertificateException":{ + "type":"structure", + "members":{ + "message":{"shape":"duplicateCertificateMessage"} + }, + "error":{ + "code":"DuplicateCertificate", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + }, + "DuplicateSSHPublicKeyException":{ + "type":"structure", + "members":{ + "message":{"shape":"duplicateSSHPublicKeyMessage"} + }, + "error":{ + "code":"DuplicateSSHPublicKey", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "EnableMFADeviceRequest":{ + "type":"structure", + "required":[ + "UserName", + "SerialNumber", + "AuthenticationCode1", + "AuthenticationCode2" + ], + "members":{ + "UserName":{"shape":"existingUserNameType"}, + "SerialNumber":{"shape":"serialNumberType"}, + "AuthenticationCode1":{"shape":"authenticationCodeType"}, + "AuthenticationCode2":{"shape":"authenticationCodeType"} + } + }, + "EntityAlreadyExistsException":{ + "type":"structure", + "members":{ + "message":{"shape":"entityAlreadyExistsMessage"} + }, + "error":{ + "code":"EntityAlreadyExists", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + }, + "EntityTemporarilyUnmodifiableException":{ + "type":"structure", + "members":{ + "message":{"shape":"entityTemporarilyUnmodifiableMessage"} + }, + "error":{ + "code":"EntityTemporarilyUnmodifiable", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + }, + "EntityType":{ + "type":"string", + "enum":[ + "User", + "Role", + "Group", + "LocalManagedPolicy", + "AWSManagedPolicy" + ] + }, + "EvalDecisionDetailsType":{ + "type":"map", + "key":{"shape":"EvalDecisionSourceType"}, + "value":{"shape":"PolicyEvaluationDecisionType"} + }, + "EvalDecisionSourceType":{ + "type":"string", + "max":256, + "min":3 + }, + "EvaluationResult":{ + "type":"structure", + "required":[ + "EvalActionName", + "EvalDecision" + ], + "members":{ + "EvalActionName":{"shape":"ActionNameType"}, + "EvalResourceName":{"shape":"ResourceNameType"}, + "EvalDecision":{"shape":"PolicyEvaluationDecisionType"}, + "MatchedStatements":{"shape":"StatementListType"}, + "MissingContextValues":{"shape":"ContextKeyNamesResultListType"}, + "EvalDecisionDetails":{"shape":"EvalDecisionDetailsType"}, + "ResourceSpecificResults":{"shape":"ResourceSpecificResultListType"} + } + }, + "EvaluationResultsListType":{ + "type":"list", + "member":{"shape":"EvaluationResult"} + }, + "GenerateCredentialReportResponse":{ + "type":"structure", + "members":{ + "State":{"shape":"ReportStateType"}, + "Description":{"shape":"ReportStateDescriptionType"} + } + }, + "GetAccessKeyLastUsedRequest":{ + "type":"structure", + "required":["AccessKeyId"], + "members":{ + "AccessKeyId":{"shape":"accessKeyIdType"} + } + }, + "GetAccessKeyLastUsedResponse":{ + "type":"structure", + "members":{ + "UserName":{"shape":"existingUserNameType"}, + "AccessKeyLastUsed":{"shape":"AccessKeyLastUsed"} + } + }, + "GetAccountAuthorizationDetailsRequest":{ + "type":"structure", + "members":{ + "Filter":{"shape":"entityListType"}, + "MaxItems":{"shape":"maxItemsType"}, + "Marker":{"shape":"markerType"} + } + }, + "GetAccountAuthorizationDetailsResponse":{ + "type":"structure", + "members":{ + "UserDetailList":{"shape":"userDetailListType"}, + "GroupDetailList":{"shape":"groupDetailListType"}, + "RoleDetailList":{"shape":"roleDetailListType"}, + "Policies":{"shape":"ManagedPolicyDetailListType"}, + "IsTruncated":{"shape":"booleanType"}, + "Marker":{"shape":"markerType"} + } + }, + "GetAccountPasswordPolicyResponse":{ + "type":"structure", + "required":["PasswordPolicy"], + "members":{ + "PasswordPolicy":{"shape":"PasswordPolicy"} + } + }, + "GetAccountSummaryResponse":{ + "type":"structure", + "members":{ + "SummaryMap":{"shape":"summaryMapType"} + } + }, + "GetContextKeysForCustomPolicyRequest":{ + "type":"structure", + "required":["PolicyInputList"], + "members":{ + "PolicyInputList":{"shape":"SimulationPolicyListType"} + } + }, + "GetContextKeysForPolicyResponse":{ + "type":"structure", + "members":{ + "ContextKeyNames":{"shape":"ContextKeyNamesResultListType"} + } + }, + "GetContextKeysForPrincipalPolicyRequest":{ + "type":"structure", + "required":["PolicySourceArn"], + "members":{ + "PolicySourceArn":{"shape":"arnType"}, + "PolicyInputList":{"shape":"SimulationPolicyListType"} + } + }, + "GetCredentialReportResponse":{ + "type":"structure", + "members":{ + "Content":{"shape":"ReportContentType"}, + "ReportFormat":{"shape":"ReportFormatType"}, + "GeneratedTime":{"shape":"dateType"} + } + }, + "GetGroupPolicyRequest":{ + "type":"structure", + "required":[ + "GroupName", + "PolicyName" + ], + "members":{ + "GroupName":{"shape":"groupNameType"}, + "PolicyName":{"shape":"policyNameType"} + } + }, + "GetGroupPolicyResponse":{ + "type":"structure", + "required":[ + "GroupName", + "PolicyName", + "PolicyDocument" + ], + "members":{ + "GroupName":{"shape":"groupNameType"}, + "PolicyName":{"shape":"policyNameType"}, + "PolicyDocument":{"shape":"policyDocumentType"} + } + }, + "GetGroupRequest":{ + "type":"structure", + "required":["GroupName"], + "members":{ + "GroupName":{"shape":"groupNameType"}, + "Marker":{"shape":"markerType"}, + "MaxItems":{"shape":"maxItemsType"} + } + }, + "GetGroupResponse":{ + "type":"structure", + "required":[ + "Group", + "Users" + ], + "members":{ + "Group":{"shape":"Group"}, + "Users":{"shape":"userListType"}, + "IsTruncated":{"shape":"booleanType"}, + "Marker":{"shape":"markerType"} + } + }, + "GetInstanceProfileRequest":{ + "type":"structure", + "required":["InstanceProfileName"], + "members":{ + "InstanceProfileName":{"shape":"instanceProfileNameType"} + } + }, + "GetInstanceProfileResponse":{ + "type":"structure", + "required":["InstanceProfile"], + "members":{ + "InstanceProfile":{"shape":"InstanceProfile"} + } + }, + "GetLoginProfileRequest":{ + "type":"structure", + "required":["UserName"], + "members":{ + "UserName":{"shape":"userNameType"} + } + }, + "GetLoginProfileResponse":{ + "type":"structure", + "required":["LoginProfile"], + "members":{ + "LoginProfile":{"shape":"LoginProfile"} + } + }, + "GetOpenIDConnectProviderRequest":{ + "type":"structure", + "required":["OpenIDConnectProviderArn"], + "members":{ + "OpenIDConnectProviderArn":{"shape":"arnType"} + } + }, + "GetOpenIDConnectProviderResponse":{ + "type":"structure", + "members":{ + "Url":{"shape":"OpenIDConnectProviderUrlType"}, + "ClientIDList":{"shape":"clientIDListType"}, + "ThumbprintList":{"shape":"thumbprintListType"}, + "CreateDate":{"shape":"dateType"} + } + }, + "GetPolicyRequest":{ + "type":"structure", + "required":["PolicyArn"], + "members":{ + "PolicyArn":{"shape":"arnType"} + } + }, + "GetPolicyResponse":{ + "type":"structure", + "members":{ + "Policy":{"shape":"Policy"} + } + }, + "GetPolicyVersionRequest":{ + "type":"structure", + "required":[ + "PolicyArn", + "VersionId" + ], + "members":{ + "PolicyArn":{"shape":"arnType"}, + "VersionId":{"shape":"policyVersionIdType"} + } + }, + "GetPolicyVersionResponse":{ + "type":"structure", + "members":{ + "PolicyVersion":{"shape":"PolicyVersion"} + } + }, + "GetRolePolicyRequest":{ + "type":"structure", + "required":[ + "RoleName", + "PolicyName" + ], + "members":{ + "RoleName":{"shape":"roleNameType"}, + "PolicyName":{"shape":"policyNameType"} + } + }, + "GetRolePolicyResponse":{ + "type":"structure", + "required":[ + "RoleName", + "PolicyName", + "PolicyDocument" + ], + "members":{ + "RoleName":{"shape":"roleNameType"}, + "PolicyName":{"shape":"policyNameType"}, + "PolicyDocument":{"shape":"policyDocumentType"} + } + }, + "GetRoleRequest":{ + "type":"structure", + "required":["RoleName"], + "members":{ + "RoleName":{"shape":"roleNameType"} + } + }, + "GetRoleResponse":{ + "type":"structure", + "required":["Role"], + "members":{ + "Role":{"shape":"Role"} + } + }, + "GetSAMLProviderRequest":{ + "type":"structure", + "required":["SAMLProviderArn"], + "members":{ + "SAMLProviderArn":{"shape":"arnType"} + } + }, + "GetSAMLProviderResponse":{ + "type":"structure", + "members":{ + "SAMLMetadataDocument":{"shape":"SAMLMetadataDocumentType"}, + "CreateDate":{"shape":"dateType"}, + "ValidUntil":{"shape":"dateType"} + } + }, + "GetSSHPublicKeyRequest":{ + "type":"structure", + "required":[ + "UserName", + "SSHPublicKeyId", + "Encoding" + ], + "members":{ + "UserName":{"shape":"userNameType"}, + "SSHPublicKeyId":{"shape":"publicKeyIdType"}, + "Encoding":{"shape":"encodingType"} + } + }, + "GetSSHPublicKeyResponse":{ + "type":"structure", + "members":{ + "SSHPublicKey":{"shape":"SSHPublicKey"} + } + }, + "GetServerCertificateRequest":{ + "type":"structure", + "required":["ServerCertificateName"], + "members":{ + "ServerCertificateName":{"shape":"serverCertificateNameType"} + } + }, + "GetServerCertificateResponse":{ + "type":"structure", + "required":["ServerCertificate"], + "members":{ + "ServerCertificate":{"shape":"ServerCertificate"} + } + }, + "GetUserPolicyRequest":{ + "type":"structure", + "required":[ + "UserName", + "PolicyName" + ], + "members":{ + "UserName":{"shape":"existingUserNameType"}, + "PolicyName":{"shape":"policyNameType"} + } + }, + "GetUserPolicyResponse":{ + "type":"structure", + "required":[ + "UserName", + "PolicyName", + "PolicyDocument" + ], + "members":{ + "UserName":{"shape":"existingUserNameType"}, + "PolicyName":{"shape":"policyNameType"}, + "PolicyDocument":{"shape":"policyDocumentType"} + } + }, + "GetUserRequest":{ + "type":"structure", + "members":{ + "UserName":{"shape":"existingUserNameType"} + } + }, + "GetUserResponse":{ + "type":"structure", + "required":["User"], + "members":{ + "User":{"shape":"User"} + } + }, + "Group":{ + "type":"structure", + "required":[ + "Path", + "GroupName", + "GroupId", + "Arn", + "CreateDate" + ], + "members":{ + "Path":{"shape":"pathType"}, + "GroupName":{"shape":"groupNameType"}, + "GroupId":{"shape":"idType"}, + "Arn":{"shape":"arnType"}, + "CreateDate":{"shape":"dateType"} + } + }, + "GroupDetail":{ + "type":"structure", + "members":{ + "Path":{"shape":"pathType"}, + "GroupName":{"shape":"groupNameType"}, + "GroupId":{"shape":"idType"}, + "Arn":{"shape":"arnType"}, + "CreateDate":{"shape":"dateType"}, + "GroupPolicyList":{"shape":"policyDetailListType"}, + "AttachedManagedPolicies":{"shape":"attachedPoliciesListType"} + } + }, + "InstanceProfile":{ + "type":"structure", + "required":[ + "Path", + "InstanceProfileName", + "InstanceProfileId", + "Arn", + "CreateDate", + "Roles" + ], + "members":{ + "Path":{"shape":"pathType"}, + "InstanceProfileName":{"shape":"instanceProfileNameType"}, + "InstanceProfileId":{"shape":"idType"}, + "Arn":{"shape":"arnType"}, + "CreateDate":{"shape":"dateType"}, + "Roles":{"shape":"roleListType"} + } + }, + "InvalidAuthenticationCodeException":{ + "type":"structure", + "members":{ + "message":{"shape":"invalidAuthenticationCodeMessage"} + }, + "error":{ + "code":"InvalidAuthenticationCode", + "httpStatusCode":403, + "senderFault":true + }, + "exception":true + }, + "InvalidCertificateException":{ + "type":"structure", + "members":{ + "message":{"shape":"invalidCertificateMessage"} + }, + "error":{ + "code":"InvalidCertificate", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidInputException":{ + "type":"structure", + "members":{ + "message":{"shape":"invalidInputMessage"} + }, + "error":{ + "code":"InvalidInput", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidPublicKeyException":{ + "type":"structure", + "members":{ + "message":{"shape":"invalidPublicKeyMessage"} + }, + "error":{ + "code":"InvalidPublicKey", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidUserTypeException":{ + "type":"structure", + "members":{ + "message":{"shape":"invalidUserTypeMessage"} + }, + "error":{ + "code":"InvalidUserType", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "KeyPairMismatchException":{ + "type":"structure", + "members":{ + "message":{"shape":"keyPairMismatchMessage"} + }, + "error":{ + "code":"KeyPairMismatch", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "LimitExceededException":{ + "type":"structure", + "members":{ + "message":{"shape":"limitExceededMessage"} + }, + "error":{ + "code":"LimitExceeded", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + }, + "LineNumber":{"type":"integer"}, + "ListAccessKeysRequest":{ + "type":"structure", + "members":{ + "UserName":{"shape":"existingUserNameType"}, + "Marker":{"shape":"markerType"}, + "MaxItems":{"shape":"maxItemsType"} + } + }, + "ListAccessKeysResponse":{ + "type":"structure", + "required":["AccessKeyMetadata"], + "members":{ + "AccessKeyMetadata":{"shape":"accessKeyMetadataListType"}, + "IsTruncated":{"shape":"booleanType"}, + "Marker":{"shape":"markerType"} + } + }, + "ListAccountAliasesRequest":{ + "type":"structure", + "members":{ + "Marker":{"shape":"markerType"}, + "MaxItems":{"shape":"maxItemsType"} + } + }, + "ListAccountAliasesResponse":{ + "type":"structure", + "required":["AccountAliases"], + "members":{ + "AccountAliases":{"shape":"accountAliasListType"}, + "IsTruncated":{"shape":"booleanType"}, + "Marker":{"shape":"markerType"} + } + }, + "ListAttachedGroupPoliciesRequest":{ + "type":"structure", + "required":["GroupName"], + "members":{ + "GroupName":{"shape":"groupNameType"}, + "PathPrefix":{"shape":"policyPathType"}, + "Marker":{"shape":"markerType"}, + "MaxItems":{"shape":"maxItemsType"} + } + }, + "ListAttachedGroupPoliciesResponse":{ + "type":"structure", + "members":{ + "AttachedPolicies":{"shape":"attachedPoliciesListType"}, + "IsTruncated":{"shape":"booleanType"}, + "Marker":{"shape":"markerType"} + } + }, + "ListAttachedRolePoliciesRequest":{ + "type":"structure", + "required":["RoleName"], + "members":{ + "RoleName":{"shape":"roleNameType"}, + "PathPrefix":{"shape":"policyPathType"}, + "Marker":{"shape":"markerType"}, + "MaxItems":{"shape":"maxItemsType"} + } + }, + "ListAttachedRolePoliciesResponse":{ + "type":"structure", + "members":{ + "AttachedPolicies":{"shape":"attachedPoliciesListType"}, + "IsTruncated":{"shape":"booleanType"}, + "Marker":{"shape":"markerType"} + } + }, + "ListAttachedUserPoliciesRequest":{ + "type":"structure", + "required":["UserName"], + "members":{ + "UserName":{"shape":"userNameType"}, + "PathPrefix":{"shape":"policyPathType"}, + "Marker":{"shape":"markerType"}, + "MaxItems":{"shape":"maxItemsType"} + } + }, + "ListAttachedUserPoliciesResponse":{ + "type":"structure", + "members":{ + "AttachedPolicies":{"shape":"attachedPoliciesListType"}, + "IsTruncated":{"shape":"booleanType"}, + "Marker":{"shape":"markerType"} + } + }, + "ListEntitiesForPolicyRequest":{ + "type":"structure", + "required":["PolicyArn"], + "members":{ + "PolicyArn":{"shape":"arnType"}, + "EntityFilter":{"shape":"EntityType"}, + "PathPrefix":{"shape":"pathType"}, + "Marker":{"shape":"markerType"}, + "MaxItems":{"shape":"maxItemsType"} + } + }, + "ListEntitiesForPolicyResponse":{ + "type":"structure", + "members":{ + "PolicyGroups":{"shape":"PolicyGroupListType"}, + "PolicyUsers":{"shape":"PolicyUserListType"}, + "PolicyRoles":{"shape":"PolicyRoleListType"}, + "IsTruncated":{"shape":"booleanType"}, + "Marker":{"shape":"markerType"} + } + }, + "ListGroupPoliciesRequest":{ + "type":"structure", + "required":["GroupName"], + "members":{ + "GroupName":{"shape":"groupNameType"}, + "Marker":{"shape":"markerType"}, + "MaxItems":{"shape":"maxItemsType"} + } + }, + "ListGroupPoliciesResponse":{ + "type":"structure", + "required":["PolicyNames"], + "members":{ + "PolicyNames":{"shape":"policyNameListType"}, + "IsTruncated":{"shape":"booleanType"}, + "Marker":{"shape":"markerType"} + } + }, + "ListGroupsForUserRequest":{ + "type":"structure", + "required":["UserName"], + "members":{ + "UserName":{"shape":"existingUserNameType"}, + "Marker":{"shape":"markerType"}, + "MaxItems":{"shape":"maxItemsType"} + } + }, + "ListGroupsForUserResponse":{ + "type":"structure", + "required":["Groups"], + "members":{ + "Groups":{"shape":"groupListType"}, + "IsTruncated":{"shape":"booleanType"}, + "Marker":{"shape":"markerType"} + } + }, + "ListGroupsRequest":{ + "type":"structure", + "members":{ + "PathPrefix":{"shape":"pathPrefixType"}, + "Marker":{"shape":"markerType"}, + "MaxItems":{"shape":"maxItemsType"} + } + }, + "ListGroupsResponse":{ + "type":"structure", + "required":["Groups"], + "members":{ + "Groups":{"shape":"groupListType"}, + "IsTruncated":{"shape":"booleanType"}, + "Marker":{"shape":"markerType"} + } + }, + "ListInstanceProfilesForRoleRequest":{ + "type":"structure", + "required":["RoleName"], + "members":{ + "RoleName":{"shape":"roleNameType"}, + "Marker":{"shape":"markerType"}, + "MaxItems":{"shape":"maxItemsType"} + } + }, + "ListInstanceProfilesForRoleResponse":{ + "type":"structure", + "required":["InstanceProfiles"], + "members":{ + "InstanceProfiles":{"shape":"instanceProfileListType"}, + "IsTruncated":{"shape":"booleanType"}, + "Marker":{"shape":"markerType"} + } + }, + "ListInstanceProfilesRequest":{ + "type":"structure", + "members":{ + "PathPrefix":{"shape":"pathPrefixType"}, + "Marker":{"shape":"markerType"}, + "MaxItems":{"shape":"maxItemsType"} + } + }, + "ListInstanceProfilesResponse":{ + "type":"structure", + "required":["InstanceProfiles"], + "members":{ + "InstanceProfiles":{"shape":"instanceProfileListType"}, + "IsTruncated":{"shape":"booleanType"}, + "Marker":{"shape":"markerType"} + } + }, + "ListMFADevicesRequest":{ + "type":"structure", + "members":{ + "UserName":{"shape":"existingUserNameType"}, + "Marker":{"shape":"markerType"}, + "MaxItems":{"shape":"maxItemsType"} + } + }, + "ListMFADevicesResponse":{ + "type":"structure", + "required":["MFADevices"], + "members":{ + "MFADevices":{"shape":"mfaDeviceListType"}, + "IsTruncated":{"shape":"booleanType"}, + "Marker":{"shape":"markerType"} + } + }, + "ListOpenIDConnectProvidersRequest":{ + "type":"structure", + "members":{ + } + }, + "ListOpenIDConnectProvidersResponse":{ + "type":"structure", + "members":{ + "OpenIDConnectProviderList":{"shape":"OpenIDConnectProviderListType"} + } + }, + "ListPoliciesRequest":{ + "type":"structure", + "members":{ + "Scope":{"shape":"policyScopeType"}, + "OnlyAttached":{"shape":"booleanType"}, + "PathPrefix":{"shape":"policyPathType"}, + "Marker":{"shape":"markerType"}, + "MaxItems":{"shape":"maxItemsType"} + } + }, + "ListPoliciesResponse":{ + "type":"structure", + "members":{ + "Policies":{"shape":"policyListType"}, + "IsTruncated":{"shape":"booleanType"}, + "Marker":{"shape":"markerType"} + } + }, + "ListPolicyVersionsRequest":{ + "type":"structure", + "required":["PolicyArn"], + "members":{ + "PolicyArn":{"shape":"arnType"}, + "Marker":{"shape":"markerType"}, + "MaxItems":{"shape":"maxItemsType"} + } + }, + "ListPolicyVersionsResponse":{ + "type":"structure", + "members":{ + "Versions":{"shape":"policyDocumentVersionListType"}, + "IsTruncated":{"shape":"booleanType"}, + "Marker":{"shape":"markerType"} + } + }, + "ListRolePoliciesRequest":{ + "type":"structure", + "required":["RoleName"], + "members":{ + "RoleName":{"shape":"roleNameType"}, + "Marker":{"shape":"markerType"}, + "MaxItems":{"shape":"maxItemsType"} + } + }, + "ListRolePoliciesResponse":{ + "type":"structure", + "required":["PolicyNames"], + "members":{ + "PolicyNames":{"shape":"policyNameListType"}, + "IsTruncated":{"shape":"booleanType"}, + "Marker":{"shape":"markerType"} + } + }, + "ListRolesRequest":{ + "type":"structure", + "members":{ + "PathPrefix":{"shape":"pathPrefixType"}, + "Marker":{"shape":"markerType"}, + "MaxItems":{"shape":"maxItemsType"} + } + }, + "ListRolesResponse":{ + "type":"structure", + "required":["Roles"], + "members":{ + "Roles":{"shape":"roleListType"}, + "IsTruncated":{"shape":"booleanType"}, + "Marker":{"shape":"markerType"} + } + }, + "ListSAMLProvidersRequest":{ + "type":"structure", + "members":{ + } + }, + "ListSAMLProvidersResponse":{ + "type":"structure", + "members":{ + "SAMLProviderList":{"shape":"SAMLProviderListType"} + } + }, + "ListSSHPublicKeysRequest":{ + "type":"structure", + "members":{ + "UserName":{"shape":"userNameType"}, + "Marker":{"shape":"markerType"}, + "MaxItems":{"shape":"maxItemsType"} + } + }, + "ListSSHPublicKeysResponse":{ + "type":"structure", + "members":{ + "SSHPublicKeys":{"shape":"SSHPublicKeyListType"}, + "IsTruncated":{"shape":"booleanType"}, + "Marker":{"shape":"markerType"} + } + }, + "ListServerCertificatesRequest":{ + "type":"structure", + "members":{ + "PathPrefix":{"shape":"pathPrefixType"}, + "Marker":{"shape":"markerType"}, + "MaxItems":{"shape":"maxItemsType"} + } + }, + "ListServerCertificatesResponse":{ + "type":"structure", + "required":["ServerCertificateMetadataList"], + "members":{ + "ServerCertificateMetadataList":{"shape":"serverCertificateMetadataListType"}, + "IsTruncated":{"shape":"booleanType"}, + "Marker":{"shape":"markerType"} + } + }, + "ListSigningCertificatesRequest":{ + "type":"structure", + "members":{ + "UserName":{"shape":"existingUserNameType"}, + "Marker":{"shape":"markerType"}, + "MaxItems":{"shape":"maxItemsType"} + } + }, + "ListSigningCertificatesResponse":{ + "type":"structure", + "required":["Certificates"], + "members":{ + "Certificates":{"shape":"certificateListType"}, + "IsTruncated":{"shape":"booleanType"}, + "Marker":{"shape":"markerType"} + } + }, + "ListUserPoliciesRequest":{ + "type":"structure", + "required":["UserName"], + "members":{ + "UserName":{"shape":"existingUserNameType"}, + "Marker":{"shape":"markerType"}, + "MaxItems":{"shape":"maxItemsType"} + } + }, + "ListUserPoliciesResponse":{ + "type":"structure", + "required":["PolicyNames"], + "members":{ + "PolicyNames":{"shape":"policyNameListType"}, + "IsTruncated":{"shape":"booleanType"}, + "Marker":{"shape":"markerType"} + } + }, + "ListUsersRequest":{ + "type":"structure", + "members":{ + "PathPrefix":{"shape":"pathPrefixType"}, + "Marker":{"shape":"markerType"}, + "MaxItems":{"shape":"maxItemsType"} + } + }, + "ListUsersResponse":{ + "type":"structure", + "required":["Users"], + "members":{ + "Users":{"shape":"userListType"}, + "IsTruncated":{"shape":"booleanType"}, + "Marker":{"shape":"markerType"} + } + }, + "ListVirtualMFADevicesRequest":{ + "type":"structure", + "members":{ + "AssignmentStatus":{"shape":"assignmentStatusType"}, + "Marker":{"shape":"markerType"}, + "MaxItems":{"shape":"maxItemsType"} + } + }, + "ListVirtualMFADevicesResponse":{ + "type":"structure", + "required":["VirtualMFADevices"], + "members":{ + "VirtualMFADevices":{"shape":"virtualMFADeviceListType"}, + "IsTruncated":{"shape":"booleanType"}, + "Marker":{"shape":"markerType"} + } + }, + "LoginProfile":{ + "type":"structure", + "required":[ + "UserName", + "CreateDate" + ], + "members":{ + "UserName":{"shape":"userNameType"}, + "CreateDate":{"shape":"dateType"}, + "PasswordResetRequired":{"shape":"booleanType"} + } + }, + "MFADevice":{ + "type":"structure", + "required":[ + "UserName", + "SerialNumber", + "EnableDate" + ], + "members":{ + "UserName":{"shape":"userNameType"}, + "SerialNumber":{"shape":"serialNumberType"}, + "EnableDate":{"shape":"dateType"} + } + }, + "MalformedCertificateException":{ + "type":"structure", + "members":{ + "message":{"shape":"malformedCertificateMessage"} + }, + "error":{ + "code":"MalformedCertificate", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "MalformedPolicyDocumentException":{ + "type":"structure", + "members":{ + "message":{"shape":"malformedPolicyDocumentMessage"} + }, + "error":{ + "code":"MalformedPolicyDocument", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "ManagedPolicyDetail":{ + "type":"structure", + "members":{ + "PolicyName":{"shape":"policyNameType"}, + "PolicyId":{"shape":"idType"}, + "Arn":{"shape":"arnType"}, + "Path":{"shape":"policyPathType"}, + "DefaultVersionId":{"shape":"policyVersionIdType"}, + "AttachmentCount":{"shape":"attachmentCountType"}, + "IsAttachable":{"shape":"booleanType"}, + "Description":{"shape":"policyDescriptionType"}, + "CreateDate":{"shape":"dateType"}, + "UpdateDate":{"shape":"dateType"}, + "PolicyVersionList":{"shape":"policyDocumentVersionListType"} + } + }, + "ManagedPolicyDetailListType":{ + "type":"list", + "member":{"shape":"ManagedPolicyDetail"} + }, + "NoSuchEntityException":{ + "type":"structure", + "members":{ + "message":{"shape":"noSuchEntityMessage"} + }, + "error":{ + "code":"NoSuchEntity", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "OpenIDConnectProviderListEntry":{ + "type":"structure", + "members":{ + "Arn":{"shape":"arnType"} + } + }, + "OpenIDConnectProviderListType":{ + "type":"list", + "member":{"shape":"OpenIDConnectProviderListEntry"} + }, + "OpenIDConnectProviderUrlType":{ + "type":"string", + "max":255, + "min":1 + }, + "PasswordPolicy":{ + "type":"structure", + "members":{ + "MinimumPasswordLength":{"shape":"minimumPasswordLengthType"}, + "RequireSymbols":{"shape":"booleanType"}, + "RequireNumbers":{"shape":"booleanType"}, + "RequireUppercaseCharacters":{"shape":"booleanType"}, + "RequireLowercaseCharacters":{"shape":"booleanType"}, + "AllowUsersToChangePassword":{"shape":"booleanType"}, + "ExpirePasswords":{"shape":"booleanType"}, + "MaxPasswordAge":{"shape":"maxPasswordAgeType"}, + "PasswordReusePrevention":{"shape":"passwordReusePreventionType"}, + "HardExpiry":{"shape":"booleanObjectType"} + } + }, + "PasswordPolicyViolationException":{ + "type":"structure", + "members":{ + "message":{"shape":"passwordPolicyViolationMessage"} + }, + "error":{ + "code":"PasswordPolicyViolation", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "Policy":{ + "type":"structure", + "members":{ + "PolicyName":{"shape":"policyNameType"}, + "PolicyId":{"shape":"idType"}, + "Arn":{"shape":"arnType"}, + "Path":{"shape":"policyPathType"}, + "DefaultVersionId":{"shape":"policyVersionIdType"}, + "AttachmentCount":{"shape":"attachmentCountType"}, + "IsAttachable":{"shape":"booleanType"}, + "Description":{"shape":"policyDescriptionType"}, + "CreateDate":{"shape":"dateType"}, + "UpdateDate":{"shape":"dateType"} + } + }, + "PolicyDetail":{ + "type":"structure", + "members":{ + "PolicyName":{"shape":"policyNameType"}, + "PolicyDocument":{"shape":"policyDocumentType"} + } + }, + "PolicyEvaluationDecisionType":{ + "type":"string", + "enum":[ + "allowed", + "explicitDeny", + "implicitDeny" + ] + }, + "PolicyEvaluationException":{ + "type":"structure", + "members":{ + "message":{"shape":"policyEvaluationErrorMessage"} + }, + "error":{ + "code":"PolicyEvaluation", + "httpStatusCode":500 + }, + "exception":true + }, + "PolicyGroup":{ + "type":"structure", + "members":{ + "GroupName":{"shape":"groupNameType"}, + "GroupId":{"shape":"idType"} + } + }, + "PolicyGroupListType":{ + "type":"list", + "member":{"shape":"PolicyGroup"} + }, + "PolicyIdentifierType":{"type":"string"}, + "PolicyRole":{ + "type":"structure", + "members":{ + "RoleName":{"shape":"roleNameType"}, + "RoleId":{"shape":"idType"} + } + }, + "PolicyRoleListType":{ + "type":"list", + "member":{"shape":"PolicyRole"} + }, + "PolicySourceType":{ + "type":"string", + "enum":[ + "user", + "group", + "role", + "aws-managed", + "user-managed", + "resource", + "none" + ] + }, + "PolicyUser":{ + "type":"structure", + "members":{ + "UserName":{"shape":"userNameType"}, + "UserId":{"shape":"idType"} + } + }, + "PolicyUserListType":{ + "type":"list", + "member":{"shape":"PolicyUser"} + }, + "PolicyVersion":{ + "type":"structure", + "members":{ + "Document":{"shape":"policyDocumentType"}, + "VersionId":{"shape":"policyVersionIdType"}, + "IsDefaultVersion":{"shape":"booleanType"}, + "CreateDate":{"shape":"dateType"} + } + }, + "Position":{ + "type":"structure", + "members":{ + "Line":{"shape":"LineNumber"}, + "Column":{"shape":"ColumnNumber"} + } + }, + "PutGroupPolicyRequest":{ + "type":"structure", + "required":[ + "GroupName", + "PolicyName", + "PolicyDocument" + ], + "members":{ + "GroupName":{"shape":"groupNameType"}, + "PolicyName":{"shape":"policyNameType"}, + "PolicyDocument":{"shape":"policyDocumentType"} + } + }, + "PutRolePolicyRequest":{ + "type":"structure", + "required":[ + "RoleName", + "PolicyName", + "PolicyDocument" + ], + "members":{ + "RoleName":{"shape":"roleNameType"}, + "PolicyName":{"shape":"policyNameType"}, + "PolicyDocument":{"shape":"policyDocumentType"} + } + }, + "PutUserPolicyRequest":{ + "type":"structure", + "required":[ + "UserName", + "PolicyName", + "PolicyDocument" + ], + "members":{ + "UserName":{"shape":"existingUserNameType"}, + "PolicyName":{"shape":"policyNameType"}, + "PolicyDocument":{"shape":"policyDocumentType"} + } + }, + "RemoveClientIDFromOpenIDConnectProviderRequest":{ + "type":"structure", + "required":[ + "OpenIDConnectProviderArn", + "ClientID" + ], + "members":{ + "OpenIDConnectProviderArn":{"shape":"arnType"}, + "ClientID":{"shape":"clientIDType"} + } + }, + "RemoveRoleFromInstanceProfileRequest":{ + "type":"structure", + "required":[ + "InstanceProfileName", + "RoleName" + ], + "members":{ + "InstanceProfileName":{"shape":"instanceProfileNameType"}, + "RoleName":{"shape":"roleNameType"} + } + }, + "RemoveUserFromGroupRequest":{ + "type":"structure", + "required":[ + "GroupName", + "UserName" + ], + "members":{ + "GroupName":{"shape":"groupNameType"}, + "UserName":{"shape":"existingUserNameType"} + } + }, + "ReportContentType":{"type":"blob"}, + "ReportFormatType":{ + "type":"string", + "enum":["text/csv"] + }, + "ReportStateDescriptionType":{"type":"string"}, + "ReportStateType":{ + "type":"string", + "enum":[ + "STARTED", + "INPROGRESS", + "COMPLETE" + ] + }, + "ResourceHandlingOptionType":{ + "type":"string", + "max":64, + "min":1 + }, + "ResourceNameListType":{ + "type":"list", + "member":{"shape":"ResourceNameType"} + }, + "ResourceNameType":{ + "type":"string", + "max":2048, + "min":1 + }, + "ResourceSpecificResult":{ + "type":"structure", + "required":[ + "EvalResourceName", + "EvalResourceDecision" + ], + "members":{ + "EvalResourceName":{"shape":"ResourceNameType"}, + "EvalResourceDecision":{"shape":"PolicyEvaluationDecisionType"}, + "MatchedStatements":{"shape":"StatementListType"}, + "MissingContextValues":{"shape":"ContextKeyNamesResultListType"}, + "EvalDecisionDetails":{"shape":"EvalDecisionDetailsType"} + } + }, + "ResourceSpecificResultListType":{ + "type":"list", + "member":{"shape":"ResourceSpecificResult"} + }, + "ResyncMFADeviceRequest":{ + "type":"structure", + "required":[ + "UserName", + "SerialNumber", + "AuthenticationCode1", + "AuthenticationCode2" + ], + "members":{ + "UserName":{"shape":"existingUserNameType"}, + "SerialNumber":{"shape":"serialNumberType"}, + "AuthenticationCode1":{"shape":"authenticationCodeType"}, + "AuthenticationCode2":{"shape":"authenticationCodeType"} + } + }, + "Role":{ + "type":"structure", + "required":[ + "Path", + "RoleName", + "RoleId", + "Arn", + "CreateDate" + ], + "members":{ + "Path":{"shape":"pathType"}, + "RoleName":{"shape":"roleNameType"}, + "RoleId":{"shape":"idType"}, + "Arn":{"shape":"arnType"}, + "CreateDate":{"shape":"dateType"}, + "AssumeRolePolicyDocument":{"shape":"policyDocumentType"} + } + }, + "RoleDetail":{ + "type":"structure", + "members":{ + "Path":{"shape":"pathType"}, + "RoleName":{"shape":"roleNameType"}, + "RoleId":{"shape":"idType"}, + "Arn":{"shape":"arnType"}, + "CreateDate":{"shape":"dateType"}, + "AssumeRolePolicyDocument":{"shape":"policyDocumentType"}, + "InstanceProfileList":{"shape":"instanceProfileListType"}, + "RolePolicyList":{"shape":"policyDetailListType"}, + "AttachedManagedPolicies":{"shape":"attachedPoliciesListType"} + } + }, + "SAMLMetadataDocumentType":{ + "type":"string", + "max":10000000, + "min":1000 + }, + "SAMLProviderListEntry":{ + "type":"structure", + "members":{ + "Arn":{"shape":"arnType"}, + "ValidUntil":{"shape":"dateType"}, + "CreateDate":{"shape":"dateType"} + } + }, + "SAMLProviderListType":{ + "type":"list", + "member":{"shape":"SAMLProviderListEntry"} + }, + "SAMLProviderNameType":{ + "type":"string", + "max":128, + "min":1, + "pattern":"[\\w._-]+" + }, + "SSHPublicKey":{ + "type":"structure", + "required":[ + "UserName", + "SSHPublicKeyId", + "Fingerprint", + "SSHPublicKeyBody", + "Status" + ], + "members":{ + "UserName":{"shape":"userNameType"}, + "SSHPublicKeyId":{"shape":"publicKeyIdType"}, + "Fingerprint":{"shape":"publicKeyFingerprintType"}, + "SSHPublicKeyBody":{"shape":"publicKeyMaterialType"}, + "Status":{"shape":"statusType"}, + "UploadDate":{"shape":"dateType"} + } + }, + "SSHPublicKeyListType":{ + "type":"list", + "member":{"shape":"SSHPublicKeyMetadata"} + }, + "SSHPublicKeyMetadata":{ + "type":"structure", + "required":[ + "UserName", + "SSHPublicKeyId", + "Status", + "UploadDate" + ], + "members":{ + "UserName":{"shape":"userNameType"}, + "SSHPublicKeyId":{"shape":"publicKeyIdType"}, + "Status":{"shape":"statusType"}, + "UploadDate":{"shape":"dateType"} + } + }, + "ServerCertificate":{ + "type":"structure", + "required":[ + "ServerCertificateMetadata", + "CertificateBody" + ], + "members":{ + "ServerCertificateMetadata":{"shape":"ServerCertificateMetadata"}, + "CertificateBody":{"shape":"certificateBodyType"}, + "CertificateChain":{"shape":"certificateChainType"} + } + }, + "ServerCertificateMetadata":{ + "type":"structure", + "required":[ + "Path", + "ServerCertificateName", + "ServerCertificateId", + "Arn" + ], + "members":{ + "Path":{"shape":"pathType"}, + "ServerCertificateName":{"shape":"serverCertificateNameType"}, + "ServerCertificateId":{"shape":"idType"}, + "Arn":{"shape":"arnType"}, + "UploadDate":{"shape":"dateType"}, + "Expiration":{"shape":"dateType"} + } + }, + "ServiceFailureException":{ + "type":"structure", + "members":{ + "message":{"shape":"serviceFailureExceptionMessage"} + }, + "error":{ + "code":"ServiceFailure", + "httpStatusCode":500 + }, + "exception":true + }, + "SetDefaultPolicyVersionRequest":{ + "type":"structure", + "required":[ + "PolicyArn", + "VersionId" + ], + "members":{ + "PolicyArn":{"shape":"arnType"}, + "VersionId":{"shape":"policyVersionIdType"} + } + }, + "SigningCertificate":{ + "type":"structure", + "required":[ + "UserName", + "CertificateId", + "CertificateBody", + "Status" + ], + "members":{ + "UserName":{"shape":"userNameType"}, + "CertificateId":{"shape":"certificateIdType"}, + "CertificateBody":{"shape":"certificateBodyType"}, + "Status":{"shape":"statusType"}, + "UploadDate":{"shape":"dateType"} + } + }, + "SimulateCustomPolicyRequest":{ + "type":"structure", + "required":[ + "PolicyInputList", + "ActionNames" + ], + "members":{ + "PolicyInputList":{"shape":"SimulationPolicyListType"}, + "ActionNames":{"shape":"ActionNameListType"}, + "ResourceArns":{"shape":"ResourceNameListType"}, + "ResourcePolicy":{"shape":"policyDocumentType"}, + "ResourceOwner":{"shape":"ResourceNameType"}, + "CallerArn":{"shape":"ResourceNameType"}, + "ContextEntries":{"shape":"ContextEntryListType"}, + "ResourceHandlingOption":{"shape":"ResourceHandlingOptionType"}, + "MaxItems":{"shape":"maxItemsType"}, + "Marker":{"shape":"markerType"} + } + }, + "SimulatePolicyResponse":{ + "type":"structure", + "members":{ + "EvaluationResults":{"shape":"EvaluationResultsListType"}, + "IsTruncated":{"shape":"booleanType"}, + "Marker":{"shape":"markerType"} + } + }, + "SimulatePrincipalPolicyRequest":{ + "type":"structure", + "required":[ + "PolicySourceArn", + "ActionNames" + ], + "members":{ + "PolicySourceArn":{"shape":"arnType"}, + "PolicyInputList":{"shape":"SimulationPolicyListType"}, + "ActionNames":{"shape":"ActionNameListType"}, + "ResourceArns":{"shape":"ResourceNameListType"}, + "ResourcePolicy":{"shape":"policyDocumentType"}, + "ResourceOwner":{"shape":"ResourceNameType"}, + "CallerArn":{"shape":"ResourceNameType"}, + "ContextEntries":{"shape":"ContextEntryListType"}, + "ResourceHandlingOption":{"shape":"ResourceHandlingOptionType"}, + "MaxItems":{"shape":"maxItemsType"}, + "Marker":{"shape":"markerType"} + } + }, + "SimulationPolicyListType":{ + "type":"list", + "member":{"shape":"policyDocumentType"} + }, + "Statement":{ + "type":"structure", + "members":{ + "SourcePolicyId":{"shape":"PolicyIdentifierType"}, + "SourcePolicyType":{"shape":"PolicySourceType"}, + "StartPosition":{"shape":"Position"}, + "EndPosition":{"shape":"Position"} + } + }, + "StatementListType":{ + "type":"list", + "member":{"shape":"Statement"} + }, + "UnrecognizedPublicKeyEncodingException":{ + "type":"structure", + "members":{ + "message":{"shape":"unrecognizedPublicKeyEncodingMessage"} + }, + "error":{ + "code":"UnrecognizedPublicKeyEncoding", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "UpdateAccessKeyRequest":{ + "type":"structure", + "required":[ + "AccessKeyId", + "Status" + ], + "members":{ + "UserName":{"shape":"existingUserNameType"}, + "AccessKeyId":{"shape":"accessKeyIdType"}, + "Status":{"shape":"statusType"} + } + }, + "UpdateAccountPasswordPolicyRequest":{ + "type":"structure", + "members":{ + "MinimumPasswordLength":{"shape":"minimumPasswordLengthType"}, + "RequireSymbols":{"shape":"booleanType"}, + "RequireNumbers":{"shape":"booleanType"}, + "RequireUppercaseCharacters":{"shape":"booleanType"}, + "RequireLowercaseCharacters":{"shape":"booleanType"}, + "AllowUsersToChangePassword":{"shape":"booleanType"}, + "MaxPasswordAge":{"shape":"maxPasswordAgeType"}, + "PasswordReusePrevention":{"shape":"passwordReusePreventionType"}, + "HardExpiry":{"shape":"booleanObjectType"} + } + }, + "UpdateAssumeRolePolicyRequest":{ + "type":"structure", + "required":[ + "RoleName", + "PolicyDocument" + ], + "members":{ + "RoleName":{"shape":"roleNameType"}, + "PolicyDocument":{"shape":"policyDocumentType"} + } + }, + "UpdateGroupRequest":{ + "type":"structure", + "required":["GroupName"], + "members":{ + "GroupName":{"shape":"groupNameType"}, + "NewPath":{"shape":"pathType"}, + "NewGroupName":{"shape":"groupNameType"} + } + }, + "UpdateLoginProfileRequest":{ + "type":"structure", + "required":["UserName"], + "members":{ + "UserName":{"shape":"userNameType"}, + "Password":{"shape":"passwordType"}, + "PasswordResetRequired":{"shape":"booleanObjectType"} + } + }, + "UpdateOpenIDConnectProviderThumbprintRequest":{ + "type":"structure", + "required":[ + "OpenIDConnectProviderArn", + "ThumbprintList" + ], + "members":{ + "OpenIDConnectProviderArn":{"shape":"arnType"}, + "ThumbprintList":{"shape":"thumbprintListType"} + } + }, + "UpdateSAMLProviderRequest":{ + "type":"structure", + "required":[ + "SAMLMetadataDocument", + "SAMLProviderArn" + ], + "members":{ + "SAMLMetadataDocument":{"shape":"SAMLMetadataDocumentType"}, + "SAMLProviderArn":{"shape":"arnType"} + } + }, + "UpdateSAMLProviderResponse":{ + "type":"structure", + "members":{ + "SAMLProviderArn":{"shape":"arnType"} + } + }, + "UpdateSSHPublicKeyRequest":{ + "type":"structure", + "required":[ + "UserName", + "SSHPublicKeyId", + "Status" + ], + "members":{ + "UserName":{"shape":"userNameType"}, + "SSHPublicKeyId":{"shape":"publicKeyIdType"}, + "Status":{"shape":"statusType"} + } + }, + "UpdateServerCertificateRequest":{ + "type":"structure", + "required":["ServerCertificateName"], + "members":{ + "ServerCertificateName":{"shape":"serverCertificateNameType"}, + "NewPath":{"shape":"pathType"}, + "NewServerCertificateName":{"shape":"serverCertificateNameType"} + } + }, + "UpdateSigningCertificateRequest":{ + "type":"structure", + "required":[ + "CertificateId", + "Status" + ], + "members":{ + "UserName":{"shape":"existingUserNameType"}, + "CertificateId":{"shape":"certificateIdType"}, + "Status":{"shape":"statusType"} + } + }, + "UpdateUserRequest":{ + "type":"structure", + "required":["UserName"], + "members":{ + "UserName":{"shape":"existingUserNameType"}, + "NewPath":{"shape":"pathType"}, + "NewUserName":{"shape":"userNameType"} + } + }, + "UploadSSHPublicKeyRequest":{ + "type":"structure", + "required":[ + "UserName", + "SSHPublicKeyBody" + ], + "members":{ + "UserName":{"shape":"userNameType"}, + "SSHPublicKeyBody":{"shape":"publicKeyMaterialType"} + } + }, + "UploadSSHPublicKeyResponse":{ + "type":"structure", + "members":{ + "SSHPublicKey":{"shape":"SSHPublicKey"} + } + }, + "UploadServerCertificateRequest":{ + "type":"structure", + "required":[ + "ServerCertificateName", + "CertificateBody", + "PrivateKey" + ], + "members":{ + "Path":{"shape":"pathType"}, + "ServerCertificateName":{"shape":"serverCertificateNameType"}, + "CertificateBody":{"shape":"certificateBodyType"}, + "PrivateKey":{"shape":"privateKeyType"}, + "CertificateChain":{"shape":"certificateChainType"} + } + }, + "UploadServerCertificateResponse":{ + "type":"structure", + "members":{ + "ServerCertificateMetadata":{"shape":"ServerCertificateMetadata"} + } + }, + "UploadSigningCertificateRequest":{ + "type":"structure", + "required":["CertificateBody"], + "members":{ + "UserName":{"shape":"existingUserNameType"}, + "CertificateBody":{"shape":"certificateBodyType"} + } + }, + "UploadSigningCertificateResponse":{ + "type":"structure", + "required":["Certificate"], + "members":{ + "Certificate":{"shape":"SigningCertificate"} + } + }, + "User":{ + "type":"structure", + "required":[ + "Path", + "UserName", + "UserId", + "Arn", + "CreateDate" + ], + "members":{ + "Path":{"shape":"pathType"}, + "UserName":{"shape":"userNameType"}, + "UserId":{"shape":"idType"}, + "Arn":{"shape":"arnType"}, + "CreateDate":{"shape":"dateType"}, + "PasswordLastUsed":{"shape":"dateType"} + } + }, + "UserDetail":{ + "type":"structure", + "members":{ + "Path":{"shape":"pathType"}, + "UserName":{"shape":"userNameType"}, + "UserId":{"shape":"idType"}, + "Arn":{"shape":"arnType"}, + "CreateDate":{"shape":"dateType"}, + "UserPolicyList":{"shape":"policyDetailListType"}, + "GroupList":{"shape":"groupNameListType"}, + "AttachedManagedPolicies":{"shape":"attachedPoliciesListType"} + } + }, + "VirtualMFADevice":{ + "type":"structure", + "required":["SerialNumber"], + "members":{ + "SerialNumber":{"shape":"serialNumberType"}, + "Base32StringSeed":{"shape":"BootstrapDatum"}, + "QRCodePNG":{"shape":"BootstrapDatum"}, + "User":{"shape":"User"}, + "EnableDate":{"shape":"dateType"} + } + }, + "accessKeyIdType":{ + "type":"string", + "max":32, + "min":16, + "pattern":"[\\w]+" + }, + "accessKeyMetadataListType":{ + "type":"list", + "member":{"shape":"AccessKeyMetadata"} + }, + "accessKeySecretType":{ + "type":"string", + "sensitive":true + }, + "accountAliasListType":{ + "type":"list", + "member":{"shape":"accountAliasType"} + }, + "accountAliasType":{ + "type":"string", + "max":63, + "min":3, + "pattern":"^[a-z0-9](([a-z0-9]|-(?!-))*[a-z0-9])?$" + }, + "arnType":{ + "type":"string", + "max":2048, + "min":20 + }, + "assignmentStatusType":{ + "type":"string", + "enum":[ + "Assigned", + "Unassigned", + "Any" + ] + }, + "attachedPoliciesListType":{ + "type":"list", + "member":{"shape":"AttachedPolicy"} + }, + "attachmentCountType":{"type":"integer"}, + "authenticationCodeType":{ + "type":"string", + "max":6, + "min":6, + "pattern":"[\\d]+" + }, + "booleanObjectType":{ + "type":"boolean", + "box":true + }, + "booleanType":{"type":"boolean"}, + "certificateBodyType":{ + "type":"string", + "max":16384, + "min":1, + "pattern":"[\\u0009\\u000A\\u000D\\u0020-\\u00FF]+" + }, + "certificateChainType":{ + "type":"string", + "max":2097152, + "min":1, + "pattern":"[\\u0009\\u000A\\u000D\\u0020-\\u00FF]+" + }, + "certificateIdType":{ + "type":"string", + "max":128, + "min":24, + "pattern":"[\\w]+" + }, + "certificateListType":{ + "type":"list", + "member":{"shape":"SigningCertificate"} + }, + "clientIDListType":{ + "type":"list", + "member":{"shape":"clientIDType"} + }, + "clientIDType":{ + "type":"string", + "max":255, + "min":1 + }, + "credentialReportExpiredExceptionMessage":{"type":"string"}, + "credentialReportNotPresentExceptionMessage":{"type":"string"}, + "credentialReportNotReadyExceptionMessage":{"type":"string"}, + "dateType":{"type":"timestamp"}, + "deleteConflictMessage":{"type":"string"}, + "duplicateCertificateMessage":{"type":"string"}, + "duplicateSSHPublicKeyMessage":{"type":"string"}, + "encodingType":{ + "type":"string", + "enum":[ + "SSH", + "PEM" + ] + }, + "entityAlreadyExistsMessage":{"type":"string"}, + "entityListType":{ + "type":"list", + "member":{"shape":"EntityType"} + }, + "entityTemporarilyUnmodifiableMessage":{"type":"string"}, + "existingUserNameType":{ + "type":"string", + "max":128, + "min":1, + "pattern":"[\\w+=,.@-]+" + }, + "groupDetailListType":{ + "type":"list", + "member":{"shape":"GroupDetail"} + }, + "groupListType":{ + "type":"list", + "member":{"shape":"Group"} + }, + "groupNameListType":{ + "type":"list", + "member":{"shape":"groupNameType"} + }, + "groupNameType":{ + "type":"string", + "max":128, + "min":1, + "pattern":"[\\w+=,.@-]+" + }, + "idType":{ + "type":"string", + "max":32, + "min":16, + "pattern":"[\\w]+" + }, + "instanceProfileListType":{ + "type":"list", + "member":{"shape":"InstanceProfile"} + }, + "instanceProfileNameType":{ + "type":"string", + "max":128, + "min":1, + "pattern":"[\\w+=,.@-]+" + }, + "invalidAuthenticationCodeMessage":{"type":"string"}, + "invalidCertificateMessage":{"type":"string"}, + "invalidInputMessage":{"type":"string"}, + "invalidPublicKeyMessage":{"type":"string"}, + "invalidUserTypeMessage":{"type":"string"}, + "keyPairMismatchMessage":{"type":"string"}, + "limitExceededMessage":{"type":"string"}, + "malformedCertificateMessage":{"type":"string"}, + "malformedPolicyDocumentMessage":{"type":"string"}, + "markerType":{ + "type":"string", + "max":320, + "min":1, + "pattern":"[\\u0020-\\u00FF]+" + }, + "maxItemsType":{ + "type":"integer", + "max":1000, + "min":1 + }, + "maxPasswordAgeType":{ + "type":"integer", + "box":true, + "max":1095, + "min":1 + }, + "mfaDeviceListType":{ + "type":"list", + "member":{"shape":"MFADevice"} + }, + "minimumPasswordLengthType":{ + "type":"integer", + "max":128, + "min":6 + }, + "noSuchEntityMessage":{"type":"string"}, + "passwordPolicyViolationMessage":{"type":"string"}, + "passwordReusePreventionType":{ + "type":"integer", + "box":true, + "max":24, + "min":1 + }, + "passwordType":{ + "type":"string", + "max":128, + "min":1, + "pattern":"[\\u0009\\u000A\\u000D\\u0020-\\u00FF]+", + "sensitive":true + }, + "pathPrefixType":{ + "type":"string", + "max":512, + "min":1, + "pattern":"\\u002F[\\u0021-\\u007F]*" + }, + "pathType":{ + "type":"string", + "max":512, + "min":1, + "pattern":"(\\u002F)|(\\u002F[\\u0021-\\u007F]+\\u002F)" + }, + "policyDescriptionType":{ + "type":"string", + "max":1000 + }, + "policyDetailListType":{ + "type":"list", + "member":{"shape":"PolicyDetail"} + }, + "policyDocumentType":{ + "type":"string", + "max":131072, + "min":1, + "pattern":"[\\u0009\\u000A\\u000D\\u0020-\\u00FF]+" + }, + "policyDocumentVersionListType":{ + "type":"list", + "member":{"shape":"PolicyVersion"} + }, + "policyEvaluationErrorMessage":{"type":"string"}, + "policyListType":{ + "type":"list", + "member":{"shape":"Policy"} + }, + "policyNameListType":{ + "type":"list", + "member":{"shape":"policyNameType"} + }, + "policyNameType":{ + "type":"string", + "max":128, + "min":1, + "pattern":"[\\w+=,.@-]+" + }, + "policyPathType":{ + "type":"string", + "pattern":"((/[A-Za-z0-9\\.,\\+@=_-]+)*)/" + }, + "policyScopeType":{ + "type":"string", + "enum":[ + "All", + "AWS", + "Local" + ] + }, + "policyVersionIdType":{ + "type":"string", + "pattern":"v[1-9][0-9]*(\\.[A-Za-z0-9-]*)?" + }, + "privateKeyType":{ + "type":"string", + "max":16384, + "min":1, + "pattern":"[\\u0009\\u000A\\u000D\\u0020-\\u00FF]+", + "sensitive":true + }, + "publicKeyFingerprintType":{ + "type":"string", + "max":48, + "min":48, + "pattern":"[:\\w]+" + }, + "publicKeyIdType":{ + "type":"string", + "max":128, + "min":20, + "pattern":"[\\w]+" + }, + "publicKeyMaterialType":{ + "type":"string", + "max":16384, + "min":1, + "pattern":"[\\u0009\\u000A\\u000D\\u0020-\\u00FF]+" + }, + "roleDetailListType":{ + "type":"list", + "member":{"shape":"RoleDetail"} + }, + "roleListType":{ + "type":"list", + "member":{"shape":"Role"} + }, + "roleNameType":{ + "type":"string", + "max":64, + "min":1, + "pattern":"[\\w+=,.@-]+" + }, + "serialNumberType":{ + "type":"string", + "max":256, + "min":9, + "pattern":"[\\w+=/:,.@-]+" + }, + "serverCertificateMetadataListType":{ + "type":"list", + "member":{"shape":"ServerCertificateMetadata"} + }, + "serverCertificateNameType":{ + "type":"string", + "max":128, + "min":1, + "pattern":"[\\w+=,.@-]+" + }, + "serviceFailureExceptionMessage":{"type":"string"}, + "statusType":{ + "type":"string", + "enum":[ + "Active", + "Inactive" + ] + }, + "stringType":{"type":"string"}, + "summaryKeyType":{ + "type":"string", + "enum":[ + "Users", + "UsersQuota", + "Groups", + "GroupsQuota", + "ServerCertificates", + "ServerCertificatesQuota", + "UserPolicySizeQuota", + "GroupPolicySizeQuota", + "GroupsPerUserQuota", + "SigningCertificatesPerUserQuota", + "AccessKeysPerUserQuota", + "MFADevices", + "MFADevicesInUse", + "AccountMFAEnabled", + "AccountAccessKeysPresent", + "AccountSigningCertificatesPresent", + "AttachedPoliciesPerGroupQuota", + "AttachedPoliciesPerRoleQuota", + "AttachedPoliciesPerUserQuota", + "Policies", + "PoliciesQuota", + "PolicySizeQuota", + "PolicyVersionsInUse", + "PolicyVersionsInUseQuota", + "VersionsPerPolicyQuota" + ] + }, + "summaryMapType":{ + "type":"map", + "key":{"shape":"summaryKeyType"}, + "value":{"shape":"summaryValueType"} + }, + "summaryValueType":{"type":"integer"}, + "thumbprintListType":{ + "type":"list", + "member":{"shape":"thumbprintType"} + }, + "thumbprintType":{ + "type":"string", + "max":40, + "min":40 + }, + "unrecognizedPublicKeyEncodingMessage":{"type":"string"}, + "userDetailListType":{ + "type":"list", + "member":{"shape":"UserDetail"} + }, + "userListType":{ + "type":"list", + "member":{"shape":"User"} + }, + "userNameType":{ + "type":"string", + "max":64, + "min":1, + "pattern":"[\\w+=,.@-]+" + }, + "virtualMFADeviceListType":{ + "type":"list", + "member":{"shape":"VirtualMFADevice"} + }, + "virtualMFADeviceName":{ + "type":"string", + "min":1, + "pattern":"[\\w+=,.@-]+" + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/iam/2010-05-08/docs-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/iam/2010-05-08/docs-2.json new file mode 100644 index 000000000..1eb92c480 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/iam/2010-05-08/docs-2.json @@ -0,0 +1,2523 @@ +{ + "version": "2.0", + "service": "AWS Identity and Access Management

    AWS Identity and Access Management (IAM) is a web service that you can use to manage users and user permissions under your AWS account. This guide provides descriptions of IAM actions that you can call programmatically. For general information about IAM, see AWS Identity and Access Management (IAM). For the user guide for IAM, see Using IAM.

    AWS provides SDKs that consist of libraries and sample code for various programming languages and platforms (Java, Ruby, .NET, iOS, Android, etc.). The SDKs provide a convenient way to create programmatic access to IAM and AWS. For example, the SDKs take care of tasks such as cryptographically signing requests (see below), managing errors, and retrying requests automatically. For information about the AWS SDKs, including how to download and install them, see the Tools for Amazon Web Services page.

    We recommend that you use the AWS SDKs to make programmatic API calls to IAM. However, you can also use the IAM Query API to make direct calls to the IAM web service. To learn more about the IAM Query API, see Making Query Requests in the Using IAM guide. IAM supports GET and POST requests for all actions. That is, the API does not require you to use GET for some actions and POST for others. However, GET requests are subject to the limitation size of a URL. Therefore, for operations that require larger sizes, use a POST request.

    Signing Requests

    Requests must be signed using an access key ID and a secret access key. We strongly recommend that you do not use your AWS account access key ID and secret access key for everyday work with IAM. You can use the access key ID and secret access key for an IAM user or you can use the AWS Security Token Service to generate temporary security credentials and use those to sign requests.

    To sign requests, we recommend that you use Signature Version 4. If you have an existing application that uses Signature Version 2, you do not have to update it to use Signature Version 4. However, some operations now require Signature Version 4. The documentation for operations that require version 4 indicate this requirement.

    Additional Resources

    For more information, see the following:

    • AWS Security Credentials. This topic provides general information about the types of credentials used for accessing AWS.

    • IAM Best Practices. This topic presents a list of suggestions for using the IAM service to help secure your AWS resources.

    • Signing AWS API Requests. This set of topics walk you through the process of signing a request using an access key ID and secret access key.

    ", + "operations": { + "AddClientIDToOpenIDConnectProvider": "

    Adds a new client ID (also known as audience) to the list of client IDs already registered for the specified IAM OpenID Connect (OIDC) provider resource.

    This action is idempotent; it does not fail or return an error if you add an existing client ID to the provider.

    ", + "AddRoleToInstanceProfile": "

    Adds the specified IAM role to the specified instance profile.

    The caller of this API must be granted the PassRole permission on the IAM role by a permission policy.

    For more information about roles, go to Working with Roles. For more information about instance profiles, go to About Instance Profiles.

    ", + "AddUserToGroup": "

    Adds the specified user to the specified group.

    ", + "AttachGroupPolicy": "

    Attaches the specified managed policy to the specified IAM group.

    You use this API to attach a managed policy to a group. To embed an inline policy in a group, use PutGroupPolicy.

    For more information about policies, see Managed Policies and Inline Policies in the IAM User Guide.

    ", + "AttachRolePolicy": "

    Attaches the specified managed policy to the specified IAM role.

    When you attach a managed policy to a role, the managed policy becomes part of the role's permission (access) policy. You cannot use a managed policy as the role's trust policy. The role's trust policy is created at the same time as the role, using CreateRole. You can update a role's trust policy using UpdateAssumeRolePolicy.

    Use this API to attach a managed policy to a role. To embed an inline policy in a role, use PutRolePolicy. For more information about policies, see Managed Policies and Inline Policies in the IAM User Guide.

    ", + "AttachUserPolicy": "

    Attaches the specified managed policy to the specified user.

    You use this API to attach a managed policy to a user. To embed an inline policy in a user, use PutUserPolicy.

    For more information about policies, see Managed Policies and Inline Policies in the IAM User Guide.

    ", + "ChangePassword": "

    Changes the password of the IAM user who is calling this action. The root account password is not affected by this action.

    To change the password for a different user, see UpdateLoginProfile. For more information about modifying passwords, see Managing Passwords in the IAM User Guide.

    ", + "CreateAccessKey": "

    Creates a new AWS secret access key and corresponding AWS access key ID for the specified user. The default status for new keys is Active.

    If you do not specify a user name, IAM determines the user name implicitly based on the AWS access key ID signing the request. Because this action works for access keys under the AWS account, you can use this action to manage root credentials even if the AWS account has no associated users.

    For information about limits on the number of keys you can create, see Limitations on IAM Entities in the IAM User Guide.

    To ensure the security of your AWS account, the secret access key is accessible only during key and user creation. You must save the key (for example, in a text file) if you want to be able to access it again. If a secret key is lost, you can delete the access keys for the associated user and then create new keys.

    ", + "CreateAccountAlias": "

    Creates an alias for your AWS account. For information about using an AWS account alias, see Using an Alias for Your AWS Account ID in the IAM User Guide.

    ", + "CreateGroup": "

    Creates a new group.

    For information about the number of groups you can create, see Limitations on IAM Entities in the IAM User Guide.

    ", + "CreateInstanceProfile": "

    Creates a new instance profile. For information about instance profiles, go to About Instance Profiles.

    For information about the number of instance profiles you can create, see Limitations on IAM Entities in the IAM User Guide.

    ", + "CreateLoginProfile": "

    Creates a password for the specified user, giving the user the ability to access AWS services through the AWS Management Console. For more information about managing passwords, see Managing Passwords in the IAM User Guide.

    ", + "CreateOpenIDConnectProvider": "

    Creates an IAM entity to describe an identity provider (IdP) that supports OpenID Connect (OIDC).

    The OIDC provider that you create with this operation can be used as a principal in a role's trust policy to establish a trust relationship between AWS and the OIDC provider.

    When you create the IAM OIDC provider, you specify the URL of the OIDC identity provider (IdP) to trust, a list of client IDs (also known as audiences) that identify the application or applications that are allowed to authenticate using the OIDC provider, and a list of thumbprints of the server certificate(s) that the IdP uses. You get all of this information from the OIDC IdP that you want to use for access to AWS.

    Because trust for the OIDC provider is ultimately derived from the IAM provider that this action creates, it is a best practice to limit access to the CreateOpenIDConnectProvider action to highly-privileged users.

    ", + "CreatePolicy": "

    Creates a new managed policy for your AWS account.

    This operation creates a policy version with a version identifier of v1 and sets v1 as the policy's default version. For more information about policy versions, see Versioning for Managed Policies in the IAM User Guide.

    For more information about managed policies in general, see Managed Policies and Inline Policies in the IAM User Guide.

    ", + "CreatePolicyVersion": "

    Creates a new version of the specified managed policy. To update a managed policy, you create a new policy version. A managed policy can have up to five versions. If the policy has five versions, you must delete an existing version using DeletePolicyVersion before you create a new version.

    Optionally, you can set the new version as the policy's default version. The default version is the version that is in effect for the IAM users, groups, and roles to which the policy is attached.

    For more information about managed policy versions, see Versioning for Managed Policies in the IAM User Guide.

    ", + "CreateRole": "

    Creates a new role for your AWS account. For more information about roles, go to Working with Roles. For information about limitations on role names and the number of roles you can create, go to Limitations on IAM Entities in the IAM User Guide.

    ", + "CreateSAMLProvider": "

    Creates an IAM resource that describes an identity provider (IdP) that supports SAML 2.0.

    The SAML provider resource that you create with this operation can be used as a principal in an IAM role's trust policy to enable federated users who sign-in using the SAML IdP to assume the role. You can create an IAM role that supports Web-based single sign-on (SSO) to the AWS Management Console or one that supports API access to AWS.

    When you create the SAML provider resource, you upload an a SAML metadata document that you get from your IdP and that includes the issuer's name, expiration information, and keys that can be used to validate the SAML authentication response (assertions) that the IdP sends. You must generate the metadata document using the identity management software that is used as your organization's IdP.

    This operation requires Signature Version 4.

    For more information, see Enabling SAML 2.0 Federated Users to Access the AWS Management Console and About SAML 2.0-based Federation in the IAM User Guide.

    ", + "CreateUser": "

    Creates a new IAM user for your AWS account.

    For information about limitations on the number of IAM users you can create, see Limitations on IAM Entities in the IAM User Guide.

    ", + "CreateVirtualMFADevice": "

    Creates a new virtual MFA device for the AWS account. After creating the virtual MFA, use EnableMFADevice to attach the MFA device to an IAM user. For more information about creating and working with virtual MFA devices, go to Using a Virtual MFA Device in the IAM User Guide.

    For information about limits on the number of MFA devices you can create, see Limitations on Entities in the IAM User Guide.

    The seed information contained in the QR code and the Base32 string should be treated like any other secret access information, such as your AWS access keys or your passwords. After you provision your virtual device, you should ensure that the information is destroyed following secure procedures.

    ", + "DeactivateMFADevice": "

    Deactivates the specified MFA device and removes it from association with the user name for which it was originally enabled.

    For more information about creating and working with virtual MFA devices, go to Using a Virtual MFA Device in the IAM User Guide.

    ", + "DeleteAccessKey": "

    Deletes the access key pair associated with the specified IAM user.

    If you do not specify a user name, IAM determines the user name implicitly based on the AWS access key ID signing the request. Because this action works for access keys under the AWS account, you can use this action to manage root credentials even if the AWS account has no associated users.

    ", + "DeleteAccountAlias": "

    Deletes the specified AWS account alias. For information about using an AWS account alias, see Using an Alias for Your AWS Account ID in the IAM User Guide.

    ", + "DeleteAccountPasswordPolicy": "

    Deletes the password policy for the AWS account. There are no parameters.

    ", + "DeleteGroup": "

    Deletes the specified IAM group. The group must not contain any users or have any attached policies.

    ", + "DeleteGroupPolicy": "

    Deletes the specified inline policy that is embedded in the specified IAM group.

    A group can also have managed policies attached to it. To detach a managed policy from a group, use DetachGroupPolicy. For more information about policies, refer to Managed Policies and Inline Policies in the IAM User Guide.

    ", + "DeleteInstanceProfile": "

    Deletes the specified instance profile. The instance profile must not have an associated role.

    Make sure you do not have any Amazon EC2 instances running with the instance profile you are about to delete. Deleting a role or instance profile that is associated with a running instance will break any applications running on the instance.

    For more information about instance profiles, go to About Instance Profiles.

    ", + "DeleteLoginProfile": "

    Deletes the password for the specified IAM user, which terminates the user's ability to access AWS services through the AWS Management Console.

    Deleting a user's password does not prevent a user from accessing AWS through the command line interface or the API. To prevent all user access you must also either make any access keys inactive or delete them. For more information about making keys inactive or deleting them, see UpdateAccessKey and DeleteAccessKey.

    ", + "DeleteOpenIDConnectProvider": "

    Deletes an OpenID Connect identity provider (IdP) resource object in IAM.

    Deleting an IAM OIDC provider resource does not update any roles that reference the provider as a principal in their trust policies. Any attempt to assume a role that references a deleted provider fails.

    This action is idempotent; it does not fail or return an error if you call the action for a provider that does not exist.

    ", + "DeletePolicy": "

    Deletes the specified managed policy.

    Before you can delete a managed policy, you must first detach the policy from all users, groups, and roles that it is attached to, and you must delete all of the policy's versions. The following steps describe the process for deleting a managed policy:

    • Detach the policy from all users, groups, and roles that the policy is attached to, using the DetachUserPolicy, DetachGroupPolicy, or DetachRolePolicy APIs. To list all the users, groups, and roles that a policy is attached to, use ListEntitiesForPolicy.

    • Delete all versions of the policy using DeletePolicyVersion. To list the policy's versions, use ListPolicyVersions. You cannot use DeletePolicyVersion to delete the version that is marked as the default version. You delete the policy's default version in the next step of the process.

    • Delete the policy (this automatically deletes the policy's default version) using this API.

    For information about managed policies, see Managed Policies and Inline Policies in the IAM User Guide.

    ", + "DeletePolicyVersion": "

    Deletes the specified version from the specified managed policy.

    You cannot delete the default version from a policy using this API. To delete the default version from a policy, use DeletePolicy. To find out which version of a policy is marked as the default version, use ListPolicyVersions.

    For information about versions for managed policies, see Versioning for Managed Policies in the IAM User Guide.

    ", + "DeleteRole": "

    Deletes the specified role. The role must not have any policies attached. For more information about roles, go to Working with Roles.

    Make sure you do not have any Amazon EC2 instances running with the role you are about to delete. Deleting a role or instance profile that is associated with a running instance will break any applications running on the instance.

    ", + "DeleteRolePolicy": "

    Deletes the specified inline policy that is embedded in the specified IAM role.

    A role can also have managed policies attached to it. To detach a managed policy from a role, use DetachRolePolicy. For more information about policies, refer to Managed Policies and Inline Policies in the IAM User Guide.

    ", + "DeleteSAMLProvider": "

    Deletes a SAML provider resource in IAM.

    Deleting the provider resource from IAM does not update any roles that reference the SAML provider resource's ARN as a principal in their trust policies. Any attempt to assume a role that references a non-existent provider resource ARN fails.

    This operation requires Signature Version 4.

    ", + "DeleteSSHPublicKey": "

    Deletes the specified SSH public key.

    The SSH public key deleted by this action is used only for authenticating the associated IAM user to an AWS CodeCommit repository. For more information about using SSH keys to authenticate to an AWS CodeCommit repository, see Set up AWS CodeCommit for SSH Connections in the AWS CodeCommit User Guide.

    ", + "DeleteServerCertificate": "

    Deletes the specified server certificate.

    For more information about working with server certificates, including a list of AWS services that can use the server certificates that you manage with IAM, go to Working with Server Certificates in the IAM User Guide.

    If you are using a server certificate with Elastic Load Balancing, deleting the certificate could have implications for your application. If Elastic Load Balancing doesn't detect the deletion of bound certificates, it may continue to use the certificates. This could cause Elastic Load Balancing to stop accepting traffic. We recommend that you remove the reference to the certificate from Elastic Load Balancing before using this command to delete the certificate. For more information, go to DeleteLoadBalancerListeners in the Elastic Load Balancing API Reference.

    ", + "DeleteSigningCertificate": "

    Deletes a signing certificate associated with the specified IAM user.

    If you do not specify a user name, IAM determines the user name implicitly based on the AWS access key ID signing the request. Because this action works for access keys under the AWS account, you can use this action to manage root credentials even if the AWS account has no associated IAM users.

    ", + "DeleteUser": "

    Deletes the specified IAM user. The user must not belong to any groups or have any access keys, signing certificates, or attached policies.

    ", + "DeleteUserPolicy": "

    Deletes the specified inline policy that is embedded in the specified IAM user.

    A user can also have managed policies attached to it. To detach a managed policy from a user, use DetachUserPolicy. For more information about policies, refer to Managed Policies and Inline Policies in the IAM User Guide.

    ", + "DeleteVirtualMFADevice": "

    Deletes a virtual MFA device.

    You must deactivate a user's virtual MFA device before you can delete it. For information about deactivating MFA devices, see DeactivateMFADevice.

    ", + "DetachGroupPolicy": "

    Removes the specified managed policy from the specified IAM group.

    A group can also have inline policies embedded with it. To delete an inline policy, use the DeleteGroupPolicy API. For information about policies, see Managed Policies and Inline Policies in the IAM User Guide.

    ", + "DetachRolePolicy": "

    Removes the specified managed policy from the specified role.

    A role can also have inline policies embedded with it. To delete an inline policy, use the DeleteRolePolicy API. For information about policies, see Managed Policies and Inline Policies in the IAM User Guide.

    ", + "DetachUserPolicy": "

    Removes the specified managed policy from the specified user.

    A user can also have inline policies embedded with it. To delete an inline policy, use the DeleteUserPolicy API. For information about policies, see Managed Policies and Inline Policies in the IAM User Guide.

    ", + "EnableMFADevice": "

    Enables the specified MFA device and associates it with the specified IAM user. When enabled, the MFA device is required for every subsequent login by the IAM user associated with the device.

    ", + "GenerateCredentialReport": "

    Generates a credential report for the AWS account. For more information about the credential report, see Getting Credential Reports in the IAM User Guide.

    ", + "GetAccessKeyLastUsed": "

    Retrieves information about when the specified access key was last used. The information includes the date and time of last use, along with the AWS service and region that were specified in the last request made with that key.

    ", + "GetAccountAuthorizationDetails": "

    Retrieves information about all IAM users, groups, roles, and policies in your AWS account, including their relationships to one another. Use this API to obtain a snapshot of the configuration of IAM permissions (users, groups, roles, and policies) in your account.

    You can optionally filter the results using the Filter parameter. You can paginate the results using the MaxItems and Marker parameters.

    ", + "GetAccountPasswordPolicy": "

    Retrieves the password policy for the AWS account. For more information about using a password policy, go to Managing an IAM Password Policy.

    ", + "GetAccountSummary": "

    Retrieves information about IAM entity usage and IAM quotas in the AWS account.

    For information about limitations on IAM entities, see Limitations on IAM Entities in the IAM User Guide.

    ", + "GetContextKeysForCustomPolicy": "

    Gets a list of all of the context keys referenced in the input policies. The policies are supplied as a list of one or more strings. To get the context keys from policies associated with an IAM user, group, or role, use GetContextKeysForPrincipalPolicy.

    Context keys are variables maintained by AWS and its services that provide details about the context of an API query request, and can be evaluated by testing against a value specified in an IAM policy. Use GetContextKeysForCustomPolicy to understand what key names and values you must supply when you call SimulateCustomPolicy. Note that all parameters are shown in unencoded form here for clarity, but must be URL encoded to be included as a part of a real HTML request.

    ", + "GetContextKeysForPrincipalPolicy": "

    Gets a list of all of the context keys referenced in all of the IAM policies attached to the specified IAM entity. The entity can be an IAM user, group, or role. If you specify a user, then the request also includes all of the policies attached to groups that the user is a member of.

    You can optionally include a list of one or more additional policies, specified as strings. If you want to include only a list of policies by string, use GetContextKeysForCustomPolicy instead.

    Note: This API discloses information about the permissions granted to other users. If you do not want users to see other user's permissions, then consider allowing them to use GetContextKeysForCustomPolicy instead.

    Context keys are variables maintained by AWS and its services that provide details about the context of an API query request, and can be evaluated by testing against a value in an IAM policy. Use GetContextKeysForPrincipalPolicy to understand what key names and values you must supply when you call SimulatePrincipalPolicy.

    ", + "GetCredentialReport": "

    Retrieves a credential report for the AWS account. For more information about the credential report, see Getting Credential Reports in the IAM User Guide.

    ", + "GetGroup": "

    Returns a list of IAM users that are in the specified IAM group. You can paginate the results using the MaxItems and Marker parameters.

    ", + "GetGroupPolicy": "

    Retrieves the specified inline policy document that is embedded in the specified IAM group.

    Policies returned by this API are URL-encoded compliant with RFC 3986. You can use a URL decoding method to convert the policy back to plain JSON text. For example, if you use Java, you can use the decode method of the java.net.URLDecoder utility class in the Java SDK. Other languages and SDKs provide similar functionality.

    An IAM group can also have managed policies attached to it. To retrieve a managed policy document that is attached to a group, use GetPolicy to determine the policy's default version, then use GetPolicyVersion to retrieve the policy document.

    For more information about policies, see Managed Policies and Inline Policies in the IAM User Guide.

    ", + "GetInstanceProfile": "

    Retrieves information about the specified instance profile, including the instance profile's path, GUID, ARN, and role. For more information about instance profiles, see About Instance Profiles in the IAM User Guide.

    ", + "GetLoginProfile": "

    Retrieves the user name and password-creation date for the specified IAM user. If the user has not been assigned a password, the action returns a 404 (NoSuchEntity) error.

    ", + "GetOpenIDConnectProvider": "

    Returns information about the specified OpenID Connect (OIDC) provider resource object in IAM.

    ", + "GetPolicy": "

    Retrieves information about the specified managed policy, including the policy's default version and the total number of IAM users, groups, and roles to which the policy is attached. To retrieve the list of the specific users, groups, and roles that the policy is attached to, use the ListEntitiesForPolicy API. This API returns metadata about the policy. To retrieve the actual policy document for a specific version of the policy, use GetPolicyVersion.

    This API retrieves information about managed policies. To retrieve information about an inline policy that is embedded with an IAM user, group, or role, use the GetUserPolicy, GetGroupPolicy, or GetRolePolicy API.

    For more information about policies, see Managed Policies and Inline Policies in the IAM User Guide.

    ", + "GetPolicyVersion": "

    Retrieves information about the specified version of the specified managed policy, including the policy document.

    Policies returned by this API are URL-encoded compliant with RFC 3986. You can use a URL decoding method to convert the policy back to plain JSON text. For example, if you use Java, you can use the decode method of the java.net.URLDecoder utility class in the Java SDK. Other languages and SDKs provide similar functionality.

    To list the available versions for a policy, use ListPolicyVersions.

    This API retrieves information about managed policies. To retrieve information about an inline policy that is embedded in a user, group, or role, use the GetUserPolicy, GetGroupPolicy, or GetRolePolicy API.

    For more information about the types of policies, see Managed Policies and Inline Policies in the IAM User Guide.

    For more information about managed policy versions, see Versioning for Managed Policies in the IAM User Guide.

    ", + "GetRole": "

    Retrieves information about the specified role, including the role's path, GUID, ARN, and the role's trust policy that grants permission to assume the role. For more information about roles, see Working with Roles.

    Policies returned by this API are URL-encoded compliant with RFC 3986. You can use a URL decoding method to convert the policy back to plain JSON text. For example, if you use Java, you can use the decode method of the java.net.URLDecoder utility class in the Java SDK. Other languages and SDKs provide similar functionality.

    ", + "GetRolePolicy": "

    Retrieves the specified inline policy document that is embedded with the specified IAM role.

    Policies returned by this API are URL-encoded compliant with RFC 3986. You can use a URL decoding method to convert the policy back to plain JSON text. For example, if you use Java, you can use the decode method of the java.net.URLDecoder utility class in the Java SDK. Other languages and SDKs provide similar functionality.

    An IAM role can also have managed policies attached to it. To retrieve a managed policy document that is attached to a role, use GetPolicy to determine the policy's default version, then use GetPolicyVersion to retrieve the policy document.

    For more information about policies, see Managed Policies and Inline Policies in the IAM User Guide.

    For more information about roles, see Using Roles to Delegate Permissions and Federate Identities.

    ", + "GetSAMLProvider": "

    Returns the SAML provider metadocument that was uploaded when the IAM SAML provider resource object was created or updated.

    This operation requires Signature Version 4.

    ", + "GetSSHPublicKey": "

    Retrieves the specified SSH public key, including metadata about the key.

    The SSH public key retrieved by this action is used only for authenticating the associated IAM user to an AWS CodeCommit repository. For more information about using SSH keys to authenticate to an AWS CodeCommit repository, see Set up AWS CodeCommit for SSH Connections in the AWS CodeCommit User Guide.

    ", + "GetServerCertificate": "

    Retrieves information about the specified server certificate stored in IAM.

    For more information about working with server certificates, including a list of AWS services that can use the server certificates that you manage with IAM, go to Working with Server Certificates in the IAM User Guide.

    ", + "GetUser": "

    Retrieves information about the specified IAM user, including the user's creation date, path, unique ID, and ARN.

    If you do not specify a user name, IAM determines the user name implicitly based on the AWS access key ID used to sign the request to this API.

    ", + "GetUserPolicy": "

    Retrieves the specified inline policy document that is embedded in the specified IAM user.

    Policies returned by this API are URL-encoded compliant with RFC 3986. You can use a URL decoding method to convert the policy back to plain JSON text. For example, if you use Java, you can use the decode method of the java.net.URLDecoder utility class in the Java SDK. Other languages and SDKs provide similar functionality.

    An IAM user can also have managed policies attached to it. To retrieve a managed policy document that is attached to a user, use GetPolicy to determine the policy's default version, then use GetPolicyVersion to retrieve the policy document.

    For more information about policies, see Managed Policies and Inline Policies in the IAM User Guide.

    ", + "ListAccessKeys": "

    Returns information about the access key IDs associated with the specified IAM user. If there are none, the action returns an empty list.

    Although each user is limited to a small number of keys, you can still paginate the results using the MaxItems and Marker parameters.

    If the UserName field is not specified, the UserName is determined implicitly based on the AWS access key ID used to sign the request. Because this action works for access keys under the AWS account, you can use this action to manage root credentials even if the AWS account has no associated users.

    To ensure the security of your AWS account, the secret access key is accessible only during key and user creation.

    ", + "ListAccountAliases": "

    Lists the account alias associated with the AWS account (Note: you can have only one). For information about using an AWS account alias, see Using an Alias for Your AWS Account ID in the IAM User Guide.

    ", + "ListAttachedGroupPolicies": "

    Lists all managed policies that are attached to the specified IAM group.

    An IAM group can also have inline policies embedded with it. To list the inline policies for a group, use the ListGroupPolicies API. For information about policies, see Managed Policies and Inline Policies in the IAM User Guide.

    You can paginate the results using the MaxItems and Marker parameters. You can use the PathPrefix parameter to limit the list of policies to only those matching the specified path prefix. If there are no policies attached to the specified group (or none that match the specified path prefix), the action returns an empty list.

    ", + "ListAttachedRolePolicies": "

    Lists all managed policies that are attached to the specified IAM role.

    An IAM role can also have inline policies embedded with it. To list the inline policies for a role, use the ListRolePolicies API. For information about policies, see Managed Policies and Inline Policies in the IAM User Guide.

    You can paginate the results using the MaxItems and Marker parameters. You can use the PathPrefix parameter to limit the list of policies to only those matching the specified path prefix. If there are no policies attached to the specified role (or none that match the specified path prefix), the action returns an empty list.

    ", + "ListAttachedUserPolicies": "

    Lists all managed policies that are attached to the specified IAM user.

    An IAM user can also have inline policies embedded with it. To list the inline policies for a user, use the ListUserPolicies API. For information about policies, see Managed Policies and Inline Policies in the IAM User Guide.

    You can paginate the results using the MaxItems and Marker parameters. You can use the PathPrefix parameter to limit the list of policies to only those matching the specified path prefix. If there are no policies attached to the specified group (or none that match the specified path prefix), the action returns an empty list.

    ", + "ListEntitiesForPolicy": "

    Lists all IAM users, groups, and roles that the specified managed policy is attached to.

    You can use the optional EntityFilter parameter to limit the results to a particular type of entity (users, groups, or roles). For example, to list only the roles that are attached to the specified policy, set EntityFilter to Role.

    You can paginate the results using the MaxItems and Marker parameters.

    ", + "ListGroupPolicies": "

    Lists the names of the inline policies that are embedded in the specified IAM group.

    An IAM group can also have managed policies attached to it. To list the managed policies that are attached to a group, use ListAttachedGroupPolicies. For more information about policies, see Managed Policies and Inline Policies in the IAM User Guide.

    You can paginate the results using the MaxItems and Marker parameters. If there are no inline policies embedded with the specified group, the action returns an empty list.

    ", + "ListGroups": "

    Lists the IAM groups that have the specified path prefix.

    You can paginate the results using the MaxItems and Marker parameters.

    ", + "ListGroupsForUser": "

    Lists the IAM groups that the specified IAM user belongs to.

    You can paginate the results using the MaxItems and Marker parameters.

    ", + "ListInstanceProfiles": "

    Lists the instance profiles that have the specified path prefix. If there are none, the action returns an empty list. For more information about instance profiles, go to About Instance Profiles.

    You can paginate the results using the MaxItems and Marker parameters.

    ", + "ListInstanceProfilesForRole": "

    Lists the instance profiles that have the specified associated IAM role. If there are none, the action returns an empty list. For more information about instance profiles, go to About Instance Profiles.

    You can paginate the results using the MaxItems and Marker parameters.

    ", + "ListMFADevices": "

    Lists the MFA devices for an IAM user. If the request includes a IAM user name, then this action lists all the MFA devices associated with the specified user. If you do not specify a user name, IAM determines the user name implicitly based on the AWS access key ID signing the request for this API.

    You can paginate the results using the MaxItems and Marker parameters.

    ", + "ListOpenIDConnectProviders": "

    Lists information about the IAM OpenID Connect (OIDC) provider resource objects defined in the AWS account.

    ", + "ListPolicies": "

    Lists all the managed policies that are available in your AWS account, including your own customer-defined managed policies and all AWS managed policies.

    You can filter the list of policies that is returned using the optional OnlyAttached, Scope, and PathPrefix parameters. For example, to list only the customer managed policies in your AWS account, set Scope to Local. To list only AWS managed policies, set Scope to AWS.

    You can paginate the results using the MaxItems and Marker parameters.

    For more information about managed policies, see Managed Policies and Inline Policies in the IAM User Guide.

    ", + "ListPolicyVersions": "

    Lists information about the versions of the specified managed policy, including the version that is currently set as the policy's default version.

    For more information about managed policies, see Managed Policies and Inline Policies in the IAM User Guide.

    ", + "ListRolePolicies": "

    Lists the names of the inline policies that are embedded in the specified IAM role.

    An IAM role can also have managed policies attached to it. To list the managed policies that are attached to a role, use ListAttachedRolePolicies. For more information about policies, see Managed Policies and Inline Policies in the IAM User Guide.

    You can paginate the results using the MaxItems and Marker parameters. If there are no inline policies embedded with the specified role, the action returns an empty list.

    ", + "ListRoles": "

    Lists the IAM roles that have the specified path prefix. If there are none, the action returns an empty list. For more information about roles, go to Working with Roles.

    You can paginate the results using the MaxItems and Marker parameters.

    ", + "ListSAMLProviders": "

    Lists the SAML provider resource objects defined in IAM in the account.

    This operation requires Signature Version 4.

    ", + "ListSSHPublicKeys": "

    Returns information about the SSH public keys associated with the specified IAM user. If there are none, the action returns an empty list.

    The SSH public keys returned by this action are used only for authenticating the IAM user to an AWS CodeCommit repository. For more information about using SSH keys to authenticate to an AWS CodeCommit repository, see Set up AWS CodeCommit for SSH Connections in the AWS CodeCommit User Guide.

    Although each user is limited to a small number of keys, you can still paginate the results using the MaxItems and Marker parameters.

    ", + "ListServerCertificates": "

    Lists the server certificates stored in IAM that have the specified path prefix. If none exist, the action returns an empty list.

    You can paginate the results using the MaxItems and Marker parameters.

    For more information about working with server certificates, including a list of AWS services that can use the server certificates that you manage with IAM, go to Working with Server Certificates in the IAM User Guide.

    ", + "ListSigningCertificates": "

    Returns information about the signing certificates associated with the specified IAM user. If there are none, the action returns an empty list.

    Although each user is limited to a small number of signing certificates, you can still paginate the results using the MaxItems and Marker parameters.

    If the UserName field is not specified, the user name is determined implicitly based on the AWS access key ID used to sign the request for this API. Because this action works for access keys under the AWS account, you can use this action to manage root credentials even if the AWS account has no associated users.

    ", + "ListUserPolicies": "

    Lists the names of the inline policies embedded in the specified IAM user.

    An IAM user can also have managed policies attached to it. To list the managed policies that are attached to a user, use ListAttachedUserPolicies. For more information about policies, see Managed Policies and Inline Policies in the IAM User Guide.

    You can paginate the results using the MaxItems and Marker parameters. If there are no inline policies embedded with the specified user, the action returns an empty list.

    ", + "ListUsers": "

    Lists the IAM users that have the specified path prefix. If no path prefix is specified, the action returns all users in the AWS account. If there are none, the action returns an empty list.

    You can paginate the results using the MaxItems and Marker parameters.

    ", + "ListVirtualMFADevices": "

    Lists the virtual MFA devices defined in the AWS account by assignment status. If you do not specify an assignment status, the action returns a list of all virtual MFA devices. Assignment status can be Assigned, Unassigned, or Any.

    You can paginate the results using the MaxItems and Marker parameters.

    ", + "PutGroupPolicy": "

    Adds or updates an inline policy document that is embedded in the specified IAM group.

    A user can also have managed policies attached to it. To attach a managed policy to a group, use AttachGroupPolicy. To create a new managed policy, use CreatePolicy. For information about policies, see Managed Policies and Inline Policies in the IAM User Guide.

    For information about limits on the number of inline policies that you can embed in a group, see Limitations on IAM Entities in the IAM User Guide.

    Because policy documents can be large, you should use POST rather than GET when calling PutGroupPolicy. For general information about using the Query API with IAM, go to Making Query Requests in the IAM User Guide.

    ", + "PutRolePolicy": "

    Adds or updates an inline policy document that is embedded in the specified IAM role.

    When you embed an inline policy in a role, the inline policy is used as part of the role's access (permissions) policy. The role's trust policy is created at the same time as the role, using CreateRole. You can update a role's trust policy using UpdateAssumeRolePolicy. For more information about IAM roles, go to Using Roles to Delegate Permissions and Federate Identities.

    A role can also have a managed policy attached to it. To attach a managed policy to a role, use AttachRolePolicy. To create a new managed policy, use CreatePolicy. For information about policies, see Managed Policies and Inline Policies in the IAM User Guide.

    For information about limits on the number of inline policies that you can embed with a role, see Limitations on IAM Entities in the IAM User Guide.

    Because policy documents can be large, you should use POST rather than GET when calling PutRolePolicy. For general information about using the Query API with IAM, go to Making Query Requests in the IAM User Guide.

    ", + "PutUserPolicy": "

    Adds or updates an inline policy document that is embedded in the specified IAM user.

    An IAM user can also have a managed policy attached to it. To attach a managed policy to a user, use AttachUserPolicy. To create a new managed policy, use CreatePolicy. For information about policies, see Managed Policies and Inline Policies in the IAM User Guide.

    For information about limits on the number of inline policies that you can embed in a user, see Limitations on IAM Entities in the IAM User Guide.

    Because policy documents can be large, you should use POST rather than GET when calling PutUserPolicy. For general information about using the Query API with IAM, go to Making Query Requests in the IAM User Guide.

    ", + "RemoveClientIDFromOpenIDConnectProvider": "

    Removes the specified client ID (also known as audience) from the list of client IDs registered for the specified IAM OpenID Connect (OIDC) provider resource object.

    This action is idempotent; it does not fail or return an error if you try to remove a client ID that does not exist.

    ", + "RemoveRoleFromInstanceProfile": "

    Removes the specified IAM role from the specified EC2 instance profile.

    Make sure you do not have any Amazon EC2 instances running with the role you are about to remove from the instance profile. Removing a role from an instance profile that is associated with a running instance break any applications running on the instance.

    For more information about IAM roles, go to Working with Roles. For more information about instance profiles, go to About Instance Profiles.

    ", + "RemoveUserFromGroup": "

    Removes the specified user from the specified group.

    ", + "ResyncMFADevice": "

    Synchronizes the specified MFA device with its IAM resource object on the AWS servers.

    For more information about creating and working with virtual MFA devices, go to Using a Virtual MFA Device in the IAM User Guide.

    ", + "SetDefaultPolicyVersion": "

    Sets the specified version of the specified policy as the policy's default (operative) version.

    This action affects all users, groups, and roles that the policy is attached to. To list the users, groups, and roles that the policy is attached to, use the ListEntitiesForPolicy API.

    For information about managed policies, see Managed Policies and Inline Policies in the IAM User Guide.

    ", + "SimulateCustomPolicy": "

    Simulate how a set of IAM policies and optionally a resource-based policy works with a list of API actions and AWS resources to determine the policies' effective permissions. The policies are provided as strings.

    The simulation does not perform the API actions; it only checks the authorization to determine if the simulated policies allow or deny the actions.

    If you want to simulate existing policies attached to an IAM user, group, or role, use SimulatePrincipalPolicy instead.

    Context keys are variables maintained by AWS and its services that provide details about the context of an API query request. You can use the Condition element of an IAM policy to evaluate context keys. To get the list of context keys that the policies require for correct simulation, use GetContextKeysForCustomPolicy.

    If the output is long, you can use MaxItems and Marker parameters to paginate the results.

    ", + "SimulatePrincipalPolicy": "

    Simulate how a set of IAM policies attached to an IAM entity works with a list of API actions and AWS resources to determine the policies' effective permissions. The entity can be an IAM user, group, or role. If you specify a user, then the simulation also includes all of the policies that are attached to groups that the user belongs to .

    You can optionally include a list of one or more additional policies specified as strings to include in the simulation. If you want to simulate only policies specified as strings, use SimulateCustomPolicy instead.

    You can also optionally include one resource-based policy to be evaluated with each of the resources included in the simulation.

    The simulation does not perform the API actions, it only checks the authorization to determine if the simulated policies allow or deny the actions.

    Note: This API discloses information about the permissions granted to other users. If you do not want users to see other user's permissions, then consider allowing them to use SimulateCustomPolicy instead.

    Context keys are variables maintained by AWS and its services that provide details about the context of an API query request. You can use the Condition element of an IAM policy to evaluate context keys. To get the list of context keys that the policies require for correct simulation, use GetContextKeysForPrincipalPolicy.

    If the output is long, you can use the MaxItems and Marker parameters to paginate the results.

    ", + "UpdateAccessKey": "

    Changes the status of the specified access key from Active to Inactive, or vice versa. This action can be used to disable a user's key as part of a key rotation work flow.

    If the UserName field is not specified, the UserName is determined implicitly based on the AWS access key ID used to sign the request. Because this action works for access keys under the AWS account, you can use this action to manage root credentials even if the AWS account has no associated users.

    For information about rotating keys, see Managing Keys and Certificates in the IAM User Guide.

    ", + "UpdateAccountPasswordPolicy": "

    Updates the password policy settings for the AWS account.

    This action does not support partial updates. No parameters are required, but if you do not specify a parameter, that parameter's value reverts to its default value. See the Request Parameters section for each parameter's default value.

    For more information about using a password policy, see Managing an IAM Password Policy in the IAM User Guide.

    ", + "UpdateAssumeRolePolicy": "

    Updates the policy that grants an IAM entity permission to assume a role. This is typically referred to as the \"role trust policy\". For more information about roles, go to Using Roles to Delegate Permissions and Federate Identities.

    ", + "UpdateGroup": "

    Updates the name and/or the path of the specified IAM group.

    You should understand the implications of changing a group's path or name. For more information, see Renaming Users and Groups in the IAM User Guide.

    To change an IAM group name the requester must have appropriate permissions on both the source object and the target object. For example, to change \"Managers\" to \"MGRs\", the entity making the request must have permission on both \"Managers\" and \"MGRs\", or must have permission on all (*). For more information about permissions, see Permissions and Policies.

    ", + "UpdateLoginProfile": "

    Changes the password for the specified IAM user.

    IAM users can change their own passwords by calling ChangePassword. For more information about modifying passwords, see Managing Passwords in the IAM User Guide.

    ", + "UpdateOpenIDConnectProviderThumbprint": "

    Replaces the existing list of server certificate thumbprints associated with an OpenID Connect (OIDC) provider resource object with a new list of thumbprints.

    The list that you pass with this action completely replaces the existing list of thumbprints. (The lists are not merged.)

    Typically, you need to update a thumbprint only when the identity provider's certificate changes, which occurs rarely. However, if the provider's certificate does change, any attempt to assume an IAM role that specifies the OIDC provider as a principal fails until the certificate thumbprint is updated.

    Because trust for the OIDC provider is ultimately derived from the provider's certificate and is validated by the thumbprint, it is a best practice to limit access to the UpdateOpenIDConnectProviderThumbprint action to highly-privileged users.

    ", + "UpdateSAMLProvider": "

    Updates the metadata document for an existing SAML provider resource object.

    This operation requires Signature Version 4.

    ", + "UpdateSSHPublicKey": "

    Sets the status of an IAM user's SSH public key to active or inactive. SSH public keys that are inactive cannot be used for authentication. This action can be used to disable a user's SSH public key as part of a key rotation work flow.

    The SSH public key affected by this action is used only for authenticating the associated IAM user to an AWS CodeCommit repository. For more information about using SSH keys to authenticate to an AWS CodeCommit repository, see Set up AWS CodeCommit for SSH Connections in the AWS CodeCommit User Guide.

    ", + "UpdateServerCertificate": "

    Updates the name and/or the path of the specified server certificate stored in IAM.

    For more information about working with server certificates, including a list of AWS services that can use the server certificates that you manage with IAM, go to Working with Server Certificates in the IAM User Guide.

    You should understand the implications of changing a server certificate's path or name. For more information, see Renaming a Server Certificate in the IAM User Guide.

    To change a server certificate name the requester must have appropriate permissions on both the source object and the target object. For example, to change the name from \"ProductionCert\" to \"ProdCert\", the entity making the request must have permission on \"ProductionCert\" and \"ProdCert\", or must have permission on all (*). For more information about permissions, see Access Management in the IAM User Guide.

    ", + "UpdateSigningCertificate": "

    Changes the status of the specified user signing certificate from active to disabled, or vice versa. This action can be used to disable an IAM user's signing certificate as part of a certificate rotation work flow.

    If the UserName field is not specified, the UserName is determined implicitly based on the AWS access key ID used to sign the request. Because this action works for access keys under the AWS account, you can use this action to manage root credentials even if the AWS account has no associated users.

    ", + "UpdateUser": "

    Updates the name and/or the path of the specified IAM user.

    You should understand the implications of changing an IAM user's path or name. For more information, see Renaming an IAM User and Renaming an IAM Group in the IAM User Guide.

    To change a user name the requester must have appropriate permissions on both the source object and the target object. For example, to change Bob to Robert, the entity making the request must have permission on Bob and Robert, or must have permission on all (*). For more information about permissions, see Permissions and Policies.

    ", + "UploadSSHPublicKey": "

    Uploads an SSH public key and associates it with the specified IAM user.

    The SSH public key uploaded by this action can be used only for authenticating the associated IAM user to an AWS CodeCommit repository. For more information about using SSH keys to authenticate to an AWS CodeCommit repository, see Set up AWS CodeCommit for SSH Connections in the AWS CodeCommit User Guide.

    ", + "UploadServerCertificate": "

    Uploads a server certificate entity for the AWS account. The server certificate entity includes a public key certificate, a private key, and an optional certificate chain, which should all be PEM-encoded.

    For more information about working with server certificates, including a list of AWS services that can use the server certificates that you manage with IAM, go to Working with Server Certificates in the IAM User Guide.

    For information about the number of server certificates you can upload, see Limitations on IAM Entities and Objects in the IAM User Guide.

    Because the body of the public key certificate, private key, and the certificate chain can be large, you should use POST rather than GET when calling UploadServerCertificate. For information about setting up signatures and authorization through the API, go to Signing AWS API Requests in the AWS General Reference. For general information about using the Query API with IAM, go to Calling the API by Making HTTP Query Requests in the IAM User Guide.

    ", + "UploadSigningCertificate": "

    Uploads an X.509 signing certificate and associates it with the specified IAM user. Some AWS services use X.509 signing certificates to validate requests that are signed with a corresponding private key. When you upload the certificate, its default status is Active.

    If the UserName field is not specified, the IAM user name is determined implicitly based on the AWS access key ID used to sign the request. Because this action works for access keys under the AWS account, you can use this action to manage root credentials even if the AWS account has no associated users.

    Because the body of a X.509 certificate can be large, you should use POST rather than GET when calling UploadSigningCertificate. For information about setting up signatures and authorization through the API, go to Signing AWS API Requests in the AWS General Reference. For general information about using the Query API with IAM, go to Making Query Requests in the IAM User Guide.

    " + }, + "shapes": { + "AccessKey": { + "base": "

    Contains information about an AWS access key.

    This data type is used as a response element in the CreateAccessKey and ListAccessKeys actions.

    The SecretAccessKey value is returned only in response to CreateAccessKey. You can get a secret access key only when you first create an access key; you cannot recover the secret access key later. If you lose a secret access key, you must create a new access key.

    ", + "refs": { + "CreateAccessKeyResponse$AccessKey": "

    A structure with details about the access key.

    " + } + }, + "AccessKeyLastUsed": { + "base": "

    Contains information about the last time an AWS access key was used.

    This data type is used as a response element in the GetAccessKeyLastUsed action.

    ", + "refs": { + "GetAccessKeyLastUsedResponse$AccessKeyLastUsed": "

    Contains information about the last time the access key was used.

    " + } + }, + "AccessKeyMetadata": { + "base": "

    Contains information about an AWS access key, without its secret key.

    This data type is used as a response element in the ListAccessKeys action.

    ", + "refs": { + "accessKeyMetadataListType$member": null + } + }, + "ActionNameListType": { + "base": null, + "refs": { + "SimulateCustomPolicyRequest$ActionNames": "

    A list of names of API actions to evaluate in the simulation. Each action is evaluated against each resource. Each action must include the service identifier, such as iam:CreateUser.

    ", + "SimulatePrincipalPolicyRequest$ActionNames": "

    A list of names of API actions to evaluate in the simulation. Each action is evaluated for each resource. Each action must include the service identifier, such as iam:CreateUser.

    " + } + }, + "ActionNameType": { + "base": null, + "refs": { + "ActionNameListType$member": null, + "EvaluationResult$EvalActionName": "

    The name of the API action tested on the indicated resource.

    " + } + }, + "AddClientIDToOpenIDConnectProviderRequest": { + "base": null, + "refs": { + } + }, + "AddRoleToInstanceProfileRequest": { + "base": null, + "refs": { + } + }, + "AddUserToGroupRequest": { + "base": null, + "refs": { + } + }, + "AttachGroupPolicyRequest": { + "base": null, + "refs": { + } + }, + "AttachRolePolicyRequest": { + "base": null, + "refs": { + } + }, + "AttachUserPolicyRequest": { + "base": null, + "refs": { + } + }, + "AttachedPolicy": { + "base": "

    Contains information about an attached policy.

    An attached policy is a managed policy that has been attached to a user, group, or role. This data type is used as a response element in the ListAttachedGroupPolicies, ListAttachedRolePolicies, ListAttachedUserPolicies, and GetAccountAuthorizationDetails actions.

    For more information about managed policies, refer to Managed Policies and Inline Policies in the Using IAM guide.

    ", + "refs": { + "attachedPoliciesListType$member": null + } + }, + "BootstrapDatum": { + "base": null, + "refs": { + "VirtualMFADevice$Base32StringSeed": "

    The Base32 seed defined as specified in RFC3548. The Base32StringSeed is Base64-encoded.

    ", + "VirtualMFADevice$QRCodePNG": "

    A QR code PNG image that encodes otpauth://totp/$virtualMFADeviceName@$AccountName?secret=$Base32String where $virtualMFADeviceName is one of the create call arguments, AccountName is the user name if set (otherwise, the account ID otherwise), and Base32String is the seed in Base32 format. The Base32String value is Base64-encoded.

    " + } + }, + "ChangePasswordRequest": { + "base": null, + "refs": { + } + }, + "ColumnNumber": { + "base": null, + "refs": { + "Position$Column": "

    The column in the line containing the specified position in the document.

    " + } + }, + "ContextEntry": { + "base": "

    Contains information about a condition context key. It includes the name of the key and specifies the value (or values, if the context key supports multiple values) to use in the simulation. This information is used when evaluating the Condition elements of the input policies.

    This data type is used as an input parameter to SimulateCustomPolicy and SimulateCustomPolicy .

    ", + "refs": { + "ContextEntryListType$member": null + } + }, + "ContextEntryListType": { + "base": null, + "refs": { + "SimulateCustomPolicyRequest$ContextEntries": "

    A list of context keys and corresponding values for the simulation to use. Whenever a context key is evaluated in one of the simulated IAM permission policies, the corresponding value is supplied.

    ", + "SimulatePrincipalPolicyRequest$ContextEntries": "

    A list of context keys and corresponding values for the simulation to use. Whenever a context key is evaluated in one of the simulated IAM permission policies, the corresponding value is supplied.

    " + } + }, + "ContextKeyNameType": { + "base": null, + "refs": { + "ContextEntry$ContextKeyName": "

    The full name of a condition context key, including the service prefix. For example, aws:SourceIp or s3:VersionId.

    ", + "ContextKeyNamesResultListType$member": null + } + }, + "ContextKeyNamesResultListType": { + "base": null, + "refs": { + "EvaluationResult$MissingContextValues": "

    A list of context keys that are required by the included input policies but that were not provided by one of the input parameters. This list is used when the resource in a simulation is \"*\", either explicitly, or when the ResourceArns parameter blank. If you include a list of resources, then any missing context values are instead included under the ResourceSpecificResults section. To discover the context keys used by a set of policies, you can call GetContextKeysForCustomPolicy or GetContextKeysForPrincipalPolicy.

    ", + "GetContextKeysForPolicyResponse$ContextKeyNames": "

    The list of context keys that are referenced in the input policies.

    ", + "ResourceSpecificResult$MissingContextValues": "

    A list of context keys that are required by the included input policies but that were not provided by one of the input parameters. This list is used when a list of ARNs is included in the ResourceArns parameter instead of \"*\". If you do not specify individual resources, by setting ResourceArns to \"*\" or by not including the ResourceArns parameter, then any missing context values are instead included under the EvaluationResults section. To discover the context keys used by a set of policies, you can call GetContextKeysForCustomPolicy or GetContextKeysForPrincipalPolicy.

    " + } + }, + "ContextKeyTypeEnum": { + "base": null, + "refs": { + "ContextEntry$ContextKeyType": "

    The data type of the value (or values) specified in the ContextKeyValues parameter.

    " + } + }, + "ContextKeyValueListType": { + "base": null, + "refs": { + "ContextEntry$ContextKeyValues": "

    The value (or values, if the condition context key supports multiple values) to provide to the simulation for use when the key is referenced by a Condition element in an input policy.

    " + } + }, + "ContextKeyValueType": { + "base": null, + "refs": { + "ContextKeyValueListType$member": null + } + }, + "CreateAccessKeyRequest": { + "base": null, + "refs": { + } + }, + "CreateAccessKeyResponse": { + "base": "

    Contains the response to a successful CreateAccessKey request.

    ", + "refs": { + } + }, + "CreateAccountAliasRequest": { + "base": null, + "refs": { + } + }, + "CreateGroupRequest": { + "base": null, + "refs": { + } + }, + "CreateGroupResponse": { + "base": "

    Contains the response to a successful CreateGroup request.

    ", + "refs": { + } + }, + "CreateInstanceProfileRequest": { + "base": null, + "refs": { + } + }, + "CreateInstanceProfileResponse": { + "base": "

    Contains the response to a successful CreateInstanceProfile request.

    ", + "refs": { + } + }, + "CreateLoginProfileRequest": { + "base": null, + "refs": { + } + }, + "CreateLoginProfileResponse": { + "base": "

    Contains the response to a successful CreateLoginProfile request.

    ", + "refs": { + } + }, + "CreateOpenIDConnectProviderRequest": { + "base": null, + "refs": { + } + }, + "CreateOpenIDConnectProviderResponse": { + "base": "

    Contains the response to a successful CreateOpenIDConnectProvider request.

    ", + "refs": { + } + }, + "CreatePolicyRequest": { + "base": null, + "refs": { + } + }, + "CreatePolicyResponse": { + "base": "

    Contains the response to a successful CreatePolicy request.

    ", + "refs": { + } + }, + "CreatePolicyVersionRequest": { + "base": null, + "refs": { + } + }, + "CreatePolicyVersionResponse": { + "base": "

    Contains the response to a successful CreatePolicyVersion request.

    ", + "refs": { + } + }, + "CreateRoleRequest": { + "base": null, + "refs": { + } + }, + "CreateRoleResponse": { + "base": "

    Contains the response to a successful CreateRole request.

    ", + "refs": { + } + }, + "CreateSAMLProviderRequest": { + "base": null, + "refs": { + } + }, + "CreateSAMLProviderResponse": { + "base": "

    Contains the response to a successful CreateSAMLProvider request.

    ", + "refs": { + } + }, + "CreateUserRequest": { + "base": null, + "refs": { + } + }, + "CreateUserResponse": { + "base": "

    Contains the response to a successful CreateUser request.

    ", + "refs": { + } + }, + "CreateVirtualMFADeviceRequest": { + "base": null, + "refs": { + } + }, + "CreateVirtualMFADeviceResponse": { + "base": "

    Contains the response to a successful CreateVirtualMFADevice request.

    ", + "refs": { + } + }, + "CredentialReportExpiredException": { + "base": "

    The request was rejected because the most recent credential report has expired. To generate a new credential report, use GenerateCredentialReport. For more information about credential report expiration, see Getting Credential Reports in the IAM User Guide.

    ", + "refs": { + } + }, + "CredentialReportNotPresentException": { + "base": "

    The request was rejected because the credential report does not exist. To generate a credential report, use GenerateCredentialReport.

    ", + "refs": { + } + }, + "CredentialReportNotReadyException": { + "base": "

    The request was rejected because the credential report is still being generated.

    ", + "refs": { + } + }, + "DeactivateMFADeviceRequest": { + "base": null, + "refs": { + } + }, + "DeleteAccessKeyRequest": { + "base": null, + "refs": { + } + }, + "DeleteAccountAliasRequest": { + "base": null, + "refs": { + } + }, + "DeleteConflictException": { + "base": "

    The request was rejected because it attempted to delete a resource that has attached subordinate entities. The error message describes these entities.

    ", + "refs": { + } + }, + "DeleteGroupPolicyRequest": { + "base": null, + "refs": { + } + }, + "DeleteGroupRequest": { + "base": null, + "refs": { + } + }, + "DeleteInstanceProfileRequest": { + "base": null, + "refs": { + } + }, + "DeleteLoginProfileRequest": { + "base": null, + "refs": { + } + }, + "DeleteOpenIDConnectProviderRequest": { + "base": null, + "refs": { + } + }, + "DeletePolicyRequest": { + "base": null, + "refs": { + } + }, + "DeletePolicyVersionRequest": { + "base": null, + "refs": { + } + }, + "DeleteRolePolicyRequest": { + "base": null, + "refs": { + } + }, + "DeleteRoleRequest": { + "base": null, + "refs": { + } + }, + "DeleteSAMLProviderRequest": { + "base": null, + "refs": { + } + }, + "DeleteSSHPublicKeyRequest": { + "base": null, + "refs": { + } + }, + "DeleteServerCertificateRequest": { + "base": null, + "refs": { + } + }, + "DeleteSigningCertificateRequest": { + "base": null, + "refs": { + } + }, + "DeleteUserPolicyRequest": { + "base": null, + "refs": { + } + }, + "DeleteUserRequest": { + "base": null, + "refs": { + } + }, + "DeleteVirtualMFADeviceRequest": { + "base": null, + "refs": { + } + }, + "DetachGroupPolicyRequest": { + "base": null, + "refs": { + } + }, + "DetachRolePolicyRequest": { + "base": null, + "refs": { + } + }, + "DetachUserPolicyRequest": { + "base": null, + "refs": { + } + }, + "DuplicateCertificateException": { + "base": "

    The request was rejected because the same certificate is associated with an IAM user in the account.

    ", + "refs": { + } + }, + "DuplicateSSHPublicKeyException": { + "base": "

    The request was rejected because the SSH public key is already associated with the specified IAM user.

    ", + "refs": { + } + }, + "EnableMFADeviceRequest": { + "base": null, + "refs": { + } + }, + "EntityAlreadyExistsException": { + "base": "

    The request was rejected because it attempted to create a resource that already exists.

    ", + "refs": { + } + }, + "EntityTemporarilyUnmodifiableException": { + "base": "

    The request was rejected because it referenced an entity that is temporarily unmodifiable, such as a user name that was deleted and then recreated. The error indicates that the request is likely to succeed if you try again after waiting several minutes. The error message describes the entity.

    ", + "refs": { + } + }, + "EntityType": { + "base": null, + "refs": { + "ListEntitiesForPolicyRequest$EntityFilter": "

    The entity type to use for filtering the results.

    For example, when EntityFilter is Role, only the roles that are attached to the specified policy are returned. This parameter is optional. If it is not included, all attached entities (users, groups, and roles) are returned. The argument for this parameter must be one of the valid values listed below.

    ", + "entityListType$member": null + } + }, + "EvalDecisionDetailsType": { + "base": null, + "refs": { + "EvaluationResult$EvalDecisionDetails": "

    Additional details about the results of the evaluation decision. When there are both IAM policies and resource policies, this parameter explains how each set of policies contributes to the final evaluation decision. When simulating cross-account access to a resource, both the resource-based policy and the caller's IAM policy must grant access. See How IAM Roles Differ from Resource-based Policies

    ", + "ResourceSpecificResult$EvalDecisionDetails": "

    Additional details about the results of the evaluation decision. When there are both IAM policies and resource policies, this parameter explains how each set of policies contributes to the final evaluation decision. When simulating cross-account access to a resource, both the resource-based policy and the caller's IAM policy must grant access.

    " + } + }, + "EvalDecisionSourceType": { + "base": null, + "refs": { + "EvalDecisionDetailsType$key": null + } + }, + "EvaluationResult": { + "base": "

    Contains the results of a simulation.

    This data type is used by the return parameter of SimulateCustomPolicy and SimulatePrincipalPolicy .

    ", + "refs": { + "EvaluationResultsListType$member": null + } + }, + "EvaluationResultsListType": { + "base": null, + "refs": { + "SimulatePolicyResponse$EvaluationResults": "

    The results of the simulation.

    " + } + }, + "GenerateCredentialReportResponse": { + "base": "

    Contains the response to a successful GenerateCredentialReport request.

    ", + "refs": { + } + }, + "GetAccessKeyLastUsedRequest": { + "base": null, + "refs": { + } + }, + "GetAccessKeyLastUsedResponse": { + "base": "

    Contains the response to a successful GetAccessKeyLastUsed request. It is also returned as a member of the AccessKeyMetaData structure returned by the ListAccessKeys action.

    ", + "refs": { + } + }, + "GetAccountAuthorizationDetailsRequest": { + "base": null, + "refs": { + } + }, + "GetAccountAuthorizationDetailsResponse": { + "base": "

    Contains the response to a successful GetAccountAuthorizationDetails request.

    ", + "refs": { + } + }, + "GetAccountPasswordPolicyResponse": { + "base": "

    Contains the response to a successful GetAccountPasswordPolicy request.

    ", + "refs": { + } + }, + "GetAccountSummaryResponse": { + "base": "

    Contains the response to a successful GetAccountSummary request.

    ", + "refs": { + } + }, + "GetContextKeysForCustomPolicyRequest": { + "base": null, + "refs": { + } + }, + "GetContextKeysForPolicyResponse": { + "base": "

    Contains the response to a successful GetContextKeysForPrincipalPolicy or GetContextKeysForCustomPolicy request.

    ", + "refs": { + } + }, + "GetContextKeysForPrincipalPolicyRequest": { + "base": null, + "refs": { + } + }, + "GetCredentialReportResponse": { + "base": "

    Contains the response to a successful GetCredentialReport request.

    ", + "refs": { + } + }, + "GetGroupPolicyRequest": { + "base": null, + "refs": { + } + }, + "GetGroupPolicyResponse": { + "base": "

    Contains the response to a successful GetGroupPolicy request.

    ", + "refs": { + } + }, + "GetGroupRequest": { + "base": null, + "refs": { + } + }, + "GetGroupResponse": { + "base": "

    Contains the response to a successful GetGroup request.

    ", + "refs": { + } + }, + "GetInstanceProfileRequest": { + "base": null, + "refs": { + } + }, + "GetInstanceProfileResponse": { + "base": "

    Contains the response to a successful GetInstanceProfile request.

    ", + "refs": { + } + }, + "GetLoginProfileRequest": { + "base": null, + "refs": { + } + }, + "GetLoginProfileResponse": { + "base": "

    Contains the response to a successful GetLoginProfile request.

    ", + "refs": { + } + }, + "GetOpenIDConnectProviderRequest": { + "base": null, + "refs": { + } + }, + "GetOpenIDConnectProviderResponse": { + "base": "

    Contains the response to a successful GetOpenIDConnectProvider request.

    ", + "refs": { + } + }, + "GetPolicyRequest": { + "base": null, + "refs": { + } + }, + "GetPolicyResponse": { + "base": "

    Contains the response to a successful GetPolicy request.

    ", + "refs": { + } + }, + "GetPolicyVersionRequest": { + "base": null, + "refs": { + } + }, + "GetPolicyVersionResponse": { + "base": "

    Contains the response to a successful GetPolicyVersion request.

    ", + "refs": { + } + }, + "GetRolePolicyRequest": { + "base": null, + "refs": { + } + }, + "GetRolePolicyResponse": { + "base": "

    Contains the response to a successful GetRolePolicy request.

    ", + "refs": { + } + }, + "GetRoleRequest": { + "base": null, + "refs": { + } + }, + "GetRoleResponse": { + "base": "

    Contains the response to a successful GetRole request.

    ", + "refs": { + } + }, + "GetSAMLProviderRequest": { + "base": null, + "refs": { + } + }, + "GetSAMLProviderResponse": { + "base": "

    Contains the response to a successful GetSAMLProvider request.

    ", + "refs": { + } + }, + "GetSSHPublicKeyRequest": { + "base": null, + "refs": { + } + }, + "GetSSHPublicKeyResponse": { + "base": "

    Contains the response to a successful GetSSHPublicKey request.

    ", + "refs": { + } + }, + "GetServerCertificateRequest": { + "base": null, + "refs": { + } + }, + "GetServerCertificateResponse": { + "base": "

    Contains the response to a successful GetServerCertificate request.

    ", + "refs": { + } + }, + "GetUserPolicyRequest": { + "base": null, + "refs": { + } + }, + "GetUserPolicyResponse": { + "base": "

    Contains the response to a successful GetUserPolicy request.

    ", + "refs": { + } + }, + "GetUserRequest": { + "base": null, + "refs": { + } + }, + "GetUserResponse": { + "base": "

    Contains the response to a successful GetUser request.

    ", + "refs": { + } + }, + "Group": { + "base": "

    Contains information about an IAM group entity.

    This data type is used as a response element in the following actions:

    ", + "refs": { + "CreateGroupResponse$Group": "

    A structure containing details about the new group.

    ", + "GetGroupResponse$Group": "

    A structure that contains details about the group.

    ", + "groupListType$member": null + } + }, + "GroupDetail": { + "base": "

    Contains information about an IAM group, including all of the group's policies.

    This data type is used as a response element in the GetAccountAuthorizationDetails action.

    ", + "refs": { + "groupDetailListType$member": null + } + }, + "InstanceProfile": { + "base": "

    Contains information about an instance profile.

    This data type is used as a response element in the following actions:

    ", + "refs": { + "CreateInstanceProfileResponse$InstanceProfile": "

    A structure containing details about the new instance profile.

    ", + "GetInstanceProfileResponse$InstanceProfile": "

    A structure containing details about the instance profile.

    ", + "instanceProfileListType$member": null + } + }, + "InvalidAuthenticationCodeException": { + "base": "

    The request was rejected because the authentication code was not recognized. The error message describes the specific error.

    ", + "refs": { + } + }, + "InvalidCertificateException": { + "base": "

    The request was rejected because the certificate is invalid.

    ", + "refs": { + } + }, + "InvalidInputException": { + "base": "

    The request was rejected because an invalid or out-of-range value was supplied for an input parameter.

    ", + "refs": { + } + }, + "InvalidPublicKeyException": { + "base": "

    The request was rejected because the public key is malformed or otherwise invalid.

    ", + "refs": { + } + }, + "InvalidUserTypeException": { + "base": "

    The request was rejected because the type of user for the transaction was incorrect.

    ", + "refs": { + } + }, + "KeyPairMismatchException": { + "base": "

    The request was rejected because the public key certificate and the private key do not match.

    ", + "refs": { + } + }, + "LimitExceededException": { + "base": "

    The request was rejected because it attempted to create resources beyond the current AWS account limits. The error message describes the limit exceeded.

    ", + "refs": { + } + }, + "LineNumber": { + "base": null, + "refs": { + "Position$Line": "

    The line containing the specified position in the document.

    " + } + }, + "ListAccessKeysRequest": { + "base": null, + "refs": { + } + }, + "ListAccessKeysResponse": { + "base": "

    Contains the response to a successful ListAccessKeys request.

    ", + "refs": { + } + }, + "ListAccountAliasesRequest": { + "base": null, + "refs": { + } + }, + "ListAccountAliasesResponse": { + "base": "

    Contains the response to a successful ListAccountAliases request.

    ", + "refs": { + } + }, + "ListAttachedGroupPoliciesRequest": { + "base": null, + "refs": { + } + }, + "ListAttachedGroupPoliciesResponse": { + "base": "

    Contains the response to a successful ListAttachedGroupPolicies request.

    ", + "refs": { + } + }, + "ListAttachedRolePoliciesRequest": { + "base": null, + "refs": { + } + }, + "ListAttachedRolePoliciesResponse": { + "base": "

    Contains the response to a successful ListAttachedRolePolicies request.

    ", + "refs": { + } + }, + "ListAttachedUserPoliciesRequest": { + "base": null, + "refs": { + } + }, + "ListAttachedUserPoliciesResponse": { + "base": "

    Contains the response to a successful ListAttachedUserPolicies request.

    ", + "refs": { + } + }, + "ListEntitiesForPolicyRequest": { + "base": null, + "refs": { + } + }, + "ListEntitiesForPolicyResponse": { + "base": "

    Contains the response to a successful ListEntitiesForPolicy request.

    ", + "refs": { + } + }, + "ListGroupPoliciesRequest": { + "base": null, + "refs": { + } + }, + "ListGroupPoliciesResponse": { + "base": "

    Contains the response to a successful ListGroupPolicies request.

    ", + "refs": { + } + }, + "ListGroupsForUserRequest": { + "base": null, + "refs": { + } + }, + "ListGroupsForUserResponse": { + "base": "

    Contains the response to a successful ListGroupsForUser request.

    ", + "refs": { + } + }, + "ListGroupsRequest": { + "base": null, + "refs": { + } + }, + "ListGroupsResponse": { + "base": "

    Contains the response to a successful ListGroups request.

    ", + "refs": { + } + }, + "ListInstanceProfilesForRoleRequest": { + "base": null, + "refs": { + } + }, + "ListInstanceProfilesForRoleResponse": { + "base": "

    Contains the response to a successful ListInstanceProfilesForRole request.

    ", + "refs": { + } + }, + "ListInstanceProfilesRequest": { + "base": null, + "refs": { + } + }, + "ListInstanceProfilesResponse": { + "base": "

    Contains the response to a successful ListInstanceProfiles request.

    ", + "refs": { + } + }, + "ListMFADevicesRequest": { + "base": null, + "refs": { + } + }, + "ListMFADevicesResponse": { + "base": "

    Contains the response to a successful ListMFADevices request.

    ", + "refs": { + } + }, + "ListOpenIDConnectProvidersRequest": { + "base": null, + "refs": { + } + }, + "ListOpenIDConnectProvidersResponse": { + "base": "

    Contains the response to a successful ListOpenIDConnectProviders request.

    ", + "refs": { + } + }, + "ListPoliciesRequest": { + "base": null, + "refs": { + } + }, + "ListPoliciesResponse": { + "base": "

    Contains the response to a successful ListPolicies request.

    ", + "refs": { + } + }, + "ListPolicyVersionsRequest": { + "base": null, + "refs": { + } + }, + "ListPolicyVersionsResponse": { + "base": "

    Contains the response to a successful ListPolicyVersions request.

    ", + "refs": { + } + }, + "ListRolePoliciesRequest": { + "base": null, + "refs": { + } + }, + "ListRolePoliciesResponse": { + "base": "

    Contains the response to a successful ListRolePolicies request.

    ", + "refs": { + } + }, + "ListRolesRequest": { + "base": null, + "refs": { + } + }, + "ListRolesResponse": { + "base": "

    Contains the response to a successful ListRoles request.

    ", + "refs": { + } + }, + "ListSAMLProvidersRequest": { + "base": null, + "refs": { + } + }, + "ListSAMLProvidersResponse": { + "base": "

    Contains the response to a successful ListSAMLProviders request.

    ", + "refs": { + } + }, + "ListSSHPublicKeysRequest": { + "base": null, + "refs": { + } + }, + "ListSSHPublicKeysResponse": { + "base": "

    Contains the response to a successful ListSSHPublicKeys request.

    ", + "refs": { + } + }, + "ListServerCertificatesRequest": { + "base": null, + "refs": { + } + }, + "ListServerCertificatesResponse": { + "base": "

    Contains the response to a successful ListServerCertificates request.

    ", + "refs": { + } + }, + "ListSigningCertificatesRequest": { + "base": null, + "refs": { + } + }, + "ListSigningCertificatesResponse": { + "base": "

    Contains the response to a successful ListSigningCertificates request.

    ", + "refs": { + } + }, + "ListUserPoliciesRequest": { + "base": null, + "refs": { + } + }, + "ListUserPoliciesResponse": { + "base": "

    Contains the response to a successful ListUserPolicies request.

    ", + "refs": { + } + }, + "ListUsersRequest": { + "base": null, + "refs": { + } + }, + "ListUsersResponse": { + "base": "

    Contains the response to a successful ListUsers request.

    ", + "refs": { + } + }, + "ListVirtualMFADevicesRequest": { + "base": null, + "refs": { + } + }, + "ListVirtualMFADevicesResponse": { + "base": "

    Contains the response to a successful ListVirtualMFADevices request.

    ", + "refs": { + } + }, + "LoginProfile": { + "base": "

    Contains the user name and password create date for a user.

    This data type is used as a response element in the CreateLoginProfile and GetLoginProfile actions.

    ", + "refs": { + "CreateLoginProfileResponse$LoginProfile": "

    A structure containing the user name and password create date.

    ", + "GetLoginProfileResponse$LoginProfile": "

    A structure containing the user name and password create date for the user.

    " + } + }, + "MFADevice": { + "base": "

    Contains information about an MFA device.

    This data type is used as a response element in the ListMFADevices action.

    ", + "refs": { + "mfaDeviceListType$member": null + } + }, + "MalformedCertificateException": { + "base": "

    The request was rejected because the certificate was malformed or expired. The error message describes the specific error.

    ", + "refs": { + } + }, + "MalformedPolicyDocumentException": { + "base": "

    The request was rejected because the policy document was malformed. The error message describes the specific error.

    ", + "refs": { + } + }, + "ManagedPolicyDetail": { + "base": "

    Contains information about a managed policy, including the policy's ARN, versions, and the number of principal entities (users, groups, and roles) that the policy is attached to.

    This data type is used as a response element in the GetAccountAuthorizationDetails action.

    For more information about managed policies, see Managed Policies and Inline Policies in the Using IAM guide.

    ", + "refs": { + "ManagedPolicyDetailListType$member": null + } + }, + "ManagedPolicyDetailListType": { + "base": null, + "refs": { + "GetAccountAuthorizationDetailsResponse$Policies": "

    A list containing information about managed policies.

    " + } + }, + "NoSuchEntityException": { + "base": "

    The request was rejected because it referenced an entity that does not exist. The error message describes the entity.

    ", + "refs": { + } + }, + "OpenIDConnectProviderListEntry": { + "base": "

    Contains the Amazon Resource Name (ARN) for an IAM OpenID Connect provider.

    ", + "refs": { + "OpenIDConnectProviderListType$member": null + } + }, + "OpenIDConnectProviderListType": { + "base": "

    Contains a list of IAM OpenID Connect providers.

    ", + "refs": { + "ListOpenIDConnectProvidersResponse$OpenIDConnectProviderList": "

    The list of IAM OIDC provider resource objects defined in the AWS account.

    " + } + }, + "OpenIDConnectProviderUrlType": { + "base": "

    Contains a URL that specifies the endpoint for an OpenID Connect provider.

    ", + "refs": { + "CreateOpenIDConnectProviderRequest$Url": "

    The URL of the identity provider. The URL must begin with \"https://\" and should correspond to the iss claim in the provider's OpenID Connect ID tokens. Per the OIDC standard, path components are allowed but query parameters are not. Typically the URL consists of only a host name, like \"https://server.example.org\" or \"https://example.com\".

    You cannot register the same provider multiple times in a single AWS account. If you try to submit a URL that has already been used for an OpenID Connect provider in the AWS account, you will get an error.

    ", + "GetOpenIDConnectProviderResponse$Url": "

    The URL that the IAM OIDC provider resource object is associated with. For more information, see CreateOpenIDConnectProvider.

    " + } + }, + "PasswordPolicy": { + "base": "

    Contains information about the account password policy.

    This data type is used as a response element in the GetAccountPasswordPolicy action.

    ", + "refs": { + "GetAccountPasswordPolicyResponse$PasswordPolicy": null + } + }, + "PasswordPolicyViolationException": { + "base": "

    The request was rejected because the provided password did not meet the requirements imposed by the account password policy.

    ", + "refs": { + } + }, + "Policy": { + "base": "

    Contains information about a managed policy.

    This data type is used as a response element in the CreatePolicy, GetPolicy, and ListPolicies actions.

    For more information about managed policies, refer to Managed Policies and Inline Policies in the Using IAM guide.

    ", + "refs": { + "CreatePolicyResponse$Policy": "

    A structure containing details about the new policy.

    ", + "GetPolicyResponse$Policy": "

    A structure containing details about the policy.

    ", + "policyListType$member": null + } + }, + "PolicyDetail": { + "base": "

    Contains information about an IAM policy, including the policy document.

    This data type is used as a response element in the GetAccountAuthorizationDetails action.

    ", + "refs": { + "policyDetailListType$member": null + } + }, + "PolicyEvaluationDecisionType": { + "base": null, + "refs": { + "EvalDecisionDetailsType$value": null, + "EvaluationResult$EvalDecision": "

    The result of the simulation.

    ", + "ResourceSpecificResult$EvalResourceDecision": "

    The result of the simulation of the simulated API action on the resource specified in EvalResourceName.

    " + } + }, + "PolicyEvaluationException": { + "base": "

    The request failed because a provided policy could not be successfully evaluated. An additional detail message indicates the source of the failure.

    ", + "refs": { + } + }, + "PolicyGroup": { + "base": "

    Contains information about a group that a managed policy is attached to.

    This data type is used as a response element in the ListEntitiesForPolicy action.

    For more information about managed policies, refer to Managed Policies and Inline Policies in the Using IAM guide.

    ", + "refs": { + "PolicyGroupListType$member": null + } + }, + "PolicyGroupListType": { + "base": null, + "refs": { + "ListEntitiesForPolicyResponse$PolicyGroups": "

    A list of IAM groups that the policy is attached to.

    " + } + }, + "PolicyIdentifierType": { + "base": null, + "refs": { + "Statement$SourcePolicyId": "

    The identifier of the policy that was provided as an input.

    " + } + }, + "PolicyRole": { + "base": "

    Contains information about a role that a managed policy is attached to.

    This data type is used as a response element in the ListEntitiesForPolicy action.

    For more information about managed policies, refer to Managed Policies and Inline Policies in the Using IAM guide.

    ", + "refs": { + "PolicyRoleListType$member": null + } + }, + "PolicyRoleListType": { + "base": null, + "refs": { + "ListEntitiesForPolicyResponse$PolicyRoles": "

    A list of IAM roles that the policy is attached to.

    " + } + }, + "PolicySourceType": { + "base": null, + "refs": { + "Statement$SourcePolicyType": "

    The type of the policy.

    " + } + }, + "PolicyUser": { + "base": "

    Contains information about a user that a managed policy is attached to.

    This data type is used as a response element in the ListEntitiesForPolicy action.

    For more information about managed policies, refer to Managed Policies and Inline Policies in the Using IAM guide.

    ", + "refs": { + "PolicyUserListType$member": null + } + }, + "PolicyUserListType": { + "base": null, + "refs": { + "ListEntitiesForPolicyResponse$PolicyUsers": "

    A list of IAM users that the policy is attached to.

    " + } + }, + "PolicyVersion": { + "base": "

    Contains information about a version of a managed policy.

    This data type is used as a response element in the CreatePolicyVersion, GetPolicyVersion, ListPolicyVersions, and GetAccountAuthorizationDetails actions.

    For more information about managed policies, refer to Managed Policies and Inline Policies in the Using IAM guide.

    ", + "refs": { + "CreatePolicyVersionResponse$PolicyVersion": "

    A structure containing details about the new policy version.

    ", + "GetPolicyVersionResponse$PolicyVersion": "

    A structure containing details about the policy version.

    ", + "policyDocumentVersionListType$member": null + } + }, + "Position": { + "base": "

    Contains the row and column of a location of a Statement element in a policy document.

    This data type is used as a member of the Statement type.

    ", + "refs": { + "Statement$StartPosition": "

    The row and column of the beginning of the Statement in an IAM policy.

    ", + "Statement$EndPosition": "

    The row and column of the end of a Statement in an IAM policy.

    " + } + }, + "PutGroupPolicyRequest": { + "base": null, + "refs": { + } + }, + "PutRolePolicyRequest": { + "base": null, + "refs": { + } + }, + "PutUserPolicyRequest": { + "base": null, + "refs": { + } + }, + "RemoveClientIDFromOpenIDConnectProviderRequest": { + "base": null, + "refs": { + } + }, + "RemoveRoleFromInstanceProfileRequest": { + "base": null, + "refs": { + } + }, + "RemoveUserFromGroupRequest": { + "base": null, + "refs": { + } + }, + "ReportContentType": { + "base": null, + "refs": { + "GetCredentialReportResponse$Content": "

    Contains the credential report. The report is Base64-encoded.

    " + } + }, + "ReportFormatType": { + "base": null, + "refs": { + "GetCredentialReportResponse$ReportFormat": "

    The format (MIME type) of the credential report.

    " + } + }, + "ReportStateDescriptionType": { + "base": null, + "refs": { + "GenerateCredentialReportResponse$Description": "

    Information about the credential report.

    " + } + }, + "ReportStateType": { + "base": null, + "refs": { + "GenerateCredentialReportResponse$State": "

    Information about the state of the credential report.

    " + } + }, + "ResourceHandlingOptionType": { + "base": null, + "refs": { + "SimulateCustomPolicyRequest$ResourceHandlingOption": "

    Specifies the type of simulation to run. Different APIs that support resource-based policies require different combinations of resources. By specifying the type of simulation to run, you enable the policy simulator to enforce the presence of the required resources to ensure reliable simulation results. If your simulation does not match one of the following scenarios, then you can omit this parameter. The following list shows each of the supported scenario values and the resources that you must define to run the simulation.

    Each of the EC2 scenarios requires that you specify instance, image, and security-group resources. If your scenario includes an EBS volume, then you must specify that volume as a resource. If the EC2 scenario includes VPC, then you must supply the network-interface resource. If it includes an IP subnet, then you must specify the subnet resource. For more information on the EC2 scenario options, see Supported Platforms in the AWS EC2 User Guide.

    • EC2-Classic-InstanceStore

      instance, image, security-group

    • EC2-Classic-EBS

      instance, image, security-group, volume

    • EC2-VPC-InstanceStore

      instance, image, security-group, network-interface

    • EC2-VPC-InstanceStore-Subnet

      instance, image, security-group, network-interface, subnet

    • EC2-VPC-EBS

      instance, image, security-group, network-interface, volume

    • EC2-VPC-EBS-Subnet

      instance, image, security-group, network-interface, subnet, volume

    ", + "SimulatePrincipalPolicyRequest$ResourceHandlingOption": "

    Specifies the type of simulation to run. Different APIs that support resource-based policies require different combinations of resources. By specifying the type of simulation to run, you enable the policy simulator to enforce the presence of the required resources to ensure reliable simulation results. If your simulation does not match one of the following scenarios, then you can omit this parameter. The following list shows each of the supported scenario values and the resources that you must define to run the simulation.

    Each of the EC2 scenarios requires that you specify instance, image, and security-group resources. If your scenario includes an EBS volume, then you must specify that volume as a resource. If the EC2 scenario includes VPC, then you must supply the network-interface resource. If it includes an IP subnet, then you must specify the subnet resource. For more information on the EC2 scenario options, see Supported Platforms in the AWS EC2 User Guide.

    • EC2-Classic-InstanceStore

      instance, image, security-group

    • EC2-Classic-EBS

      instance, image, security-group, volume

    • EC2-VPC-InstanceStore

      instance, image, security-group, network-interface

    • EC2-VPC-InstanceStore-Subnet

      instance, image, security-group, network-interface, subnet

    • EC2-VPC-EBS

      instance, image, security-group, network-interface, volume

    • EC2-VPC-EBS-Subnet

      instance, image, security-group, network-interface, subnet, volume

    " + } + }, + "ResourceNameListType": { + "base": null, + "refs": { + "SimulateCustomPolicyRequest$ResourceArns": "

    A list of ARNs of AWS resources to include in the simulation. If this parameter is not provided then the value defaults to * (all resources). Each API in the ActionNames parameter is evaluated for each resource in this list. The simulation determines the access result (allowed or denied) of each combination and reports it in the response.

    The simulation does not automatically retrieve policies for the specified resources. If you want to include a resource policy in the simulation, then you must include the policy as a string in the ResourcePolicy parameter.

    If you include a ResourcePolicy, then it must be applicable to all of the resources included in the simulation or you receive an invalid input error.

    For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces in the AWS General Reference.

    ", + "SimulatePrincipalPolicyRequest$ResourceArns": "

    A list of ARNs of AWS resources to include in the simulation. If this parameter is not provided then the value defaults to * (all resources). Each API in the ActionNames parameter is evaluated for each resource in this list. The simulation determines the access result (allowed or denied) of each combination and reports it in the response.

    The simulation does not automatically retrieve policies for the specified resources. If you want to include a resource policy in the simulation, then you must include the policy as a string in the ResourcePolicy parameter.

    For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces in the AWS General Reference.

    " + } + }, + "ResourceNameType": { + "base": null, + "refs": { + "EvaluationResult$EvalResourceName": "

    The ARN of the resource that the indicated API action was tested on.

    ", + "ResourceNameListType$member": null, + "ResourceSpecificResult$EvalResourceName": "

    The name of the simulated resource, in Amazon Resource Name (ARN) format.

    ", + "SimulateCustomPolicyRequest$ResourceOwner": "

    An AWS account ID that specifies the owner of any simulated resource that does not identify its owner in the resource ARN, such as an S3 bucket or object. If ResourceOwner is specified, it is also used as the account owner of any ResourcePolicy included in the simulation. If the ResourceOwner parameter is not specified, then the owner of the resources and the resource policy defaults to the account of the identity provided in CallerArn. This parameter is required only if you specify a resource-based policy and account that owns the resource is different from the account that owns the simulated calling user CallerArn.

    ", + "SimulateCustomPolicyRequest$CallerArn": "

    The ARN of the IAM user that you want to use as the simulated caller of the APIs. CallerArn is required if you include a ResourcePolicy so that the policy's Principal element has a value to use in evaluating the policy.

    You can specify only the ARN of an IAM user. You cannot specify the ARN of an assumed role, federated user, or a service principal.

    ", + "SimulatePrincipalPolicyRequest$ResourceOwner": "

    An AWS account ID that specifies the owner of any simulated resource that does not identify its owner in the resource ARN, such as an S3 bucket or object. If ResourceOwner is specified, it is also used as the account owner of any ResourcePolicy included in the simulation. If the ResourceOwner parameter is not specified, then the owner of the resources and the resource policy defaults to the account of the identity provided in CallerArn. This parameter is required only if you specify a resource-based policy and account that owns the resource is different from the account that owns the simulated calling user CallerArn.

    ", + "SimulatePrincipalPolicyRequest$CallerArn": "

    The ARN of the IAM user that you want to specify as the simulated caller of the APIs. If you do not specify a CallerArn, it defaults to the ARN of the user that you specify in PolicySourceArn, if you specified a user. If you include both a PolicySourceArn (for example, arn:aws:iam::123456789012:user/David) and a CallerArn (for example, arn:aws:iam::123456789012:user/Bob), the result is that you simulate calling the APIs as Bob, as if Bob had David's policies.

    You can specify only the ARN of an IAM user. You cannot specify the ARN of an assumed role, federated user, or a service principal.

    CallerArn is required if you include a ResourcePolicy and the PolicySourceArn is not the ARN for an IAM user. This is required so that the resource-based policy's Principal element has a value to use in evaluating the policy.

    For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces in the AWS General Reference.

    " + } + }, + "ResourceSpecificResult": { + "base": "

    Contains the result of the simulation of a single API action call on a single resource.

    This data type is used by a member of the EvaluationResult data type.

    ", + "refs": { + "ResourceSpecificResultListType$member": null + } + }, + "ResourceSpecificResultListType": { + "base": null, + "refs": { + "EvaluationResult$ResourceSpecificResults": "

    The individual results of the simulation of the API action specified in EvalActionName on each resource.

    " + } + }, + "ResyncMFADeviceRequest": { + "base": null, + "refs": { + } + }, + "Role": { + "base": "

    Contains information about an IAM role.

    This data type is used as a response element in the following actions:

    ", + "refs": { + "CreateRoleResponse$Role": "

    A structure containing details about the new role.

    ", + "GetRoleResponse$Role": "

    A structure containing details about the IAM role.

    ", + "roleListType$member": null + } + }, + "RoleDetail": { + "base": "

    Contains information about an IAM role, including all of the role's policies.

    This data type is used as a response element in the GetAccountAuthorizationDetails action.

    ", + "refs": { + "roleDetailListType$member": null + } + }, + "SAMLMetadataDocumentType": { + "base": null, + "refs": { + "CreateSAMLProviderRequest$SAMLMetadataDocument": "

    An XML document generated by an identity provider (IdP) that supports SAML 2.0. The document includes the issuer's name, expiration information, and keys that can be used to validate the SAML authentication response (assertions) that are received from the IdP. You must generate the metadata document using the identity management software that is used as your organization's IdP.

    For more information, see About SAML 2.0-based Federation in the IAM User Guide

    ", + "GetSAMLProviderResponse$SAMLMetadataDocument": "

    The XML metadata document that includes information about an identity provider.

    ", + "UpdateSAMLProviderRequest$SAMLMetadataDocument": "

    An XML document generated by an identity provider (IdP) that supports SAML 2.0. The document includes the issuer's name, expiration information, and keys that can be used to validate the SAML authentication response (assertions) that are received from the IdP. You must generate the metadata document using the identity management software that is used as your organization's IdP.

    " + } + }, + "SAMLProviderListEntry": { + "base": "

    Contains the list of SAML providers for this account.

    ", + "refs": { + "SAMLProviderListType$member": null + } + }, + "SAMLProviderListType": { + "base": null, + "refs": { + "ListSAMLProvidersResponse$SAMLProviderList": "

    The list of SAML provider resource objects defined in IAM for this AWS account.

    " + } + }, + "SAMLProviderNameType": { + "base": null, + "refs": { + "CreateSAMLProviderRequest$Name": "

    The name of the provider to create.

    The regex pattern for this parameter is a string of characters consisting of upper and lowercase alphanumeric characters with no spaces. You can also include any of the following characters: =,.@-

    " + } + }, + "SSHPublicKey": { + "base": "

    Contains information about an SSH public key.

    This data type is used as a response element in the GetSSHPublicKey and UploadSSHPublicKey actions.

    ", + "refs": { + "GetSSHPublicKeyResponse$SSHPublicKey": "

    A structure containing details about the SSH public key.

    ", + "UploadSSHPublicKeyResponse$SSHPublicKey": "

    Contains information about the SSH public key.

    " + } + }, + "SSHPublicKeyListType": { + "base": null, + "refs": { + "ListSSHPublicKeysResponse$SSHPublicKeys": "

    A list of the SSH public keys assigned to IAM user.

    " + } + }, + "SSHPublicKeyMetadata": { + "base": "

    Contains information about an SSH public key, without the key's body or fingerprint.

    This data type is used as a response element in the ListSSHPublicKeys action.

    ", + "refs": { + "SSHPublicKeyListType$member": null + } + }, + "ServerCertificate": { + "base": "

    Contains information about a server certificate.

    This data type is used as a response element in the GetServerCertificate action.

    ", + "refs": { + "GetServerCertificateResponse$ServerCertificate": "

    A structure containing details about the server certificate.

    " + } + }, + "ServerCertificateMetadata": { + "base": "

    Contains information about a server certificate without its certificate body, certificate chain, and private key.

    This data type is used as a response element in the UploadServerCertificate and ListServerCertificates actions.

    ", + "refs": { + "ServerCertificate$ServerCertificateMetadata": "

    The meta information of the server certificate, such as its name, path, ID, and ARN.

    ", + "UploadServerCertificateResponse$ServerCertificateMetadata": "

    The meta information of the uploaded server certificate without its certificate body, certificate chain, and private key.

    ", + "serverCertificateMetadataListType$member": null + } + }, + "ServiceFailureException": { + "base": "

    The request processing has failed because of an unknown error, exception or failure.

    ", + "refs": { + } + }, + "SetDefaultPolicyVersionRequest": { + "base": null, + "refs": { + } + }, + "SigningCertificate": { + "base": "

    Contains information about an X.509 signing certificate.

    This data type is used as a response element in the UploadSigningCertificate and ListSigningCertificates actions.

    ", + "refs": { + "UploadSigningCertificateResponse$Certificate": "

    Information about the certificate.

    ", + "certificateListType$member": null + } + }, + "SimulateCustomPolicyRequest": { + "base": null, + "refs": { + } + }, + "SimulatePolicyResponse": { + "base": "

    Contains the response to a successful SimulatePrincipalPolicy or SimulateCustomPolicy request.

    ", + "refs": { + } + }, + "SimulatePrincipalPolicyRequest": { + "base": null, + "refs": { + } + }, + "SimulationPolicyListType": { + "base": null, + "refs": { + "GetContextKeysForCustomPolicyRequest$PolicyInputList": "

    A list of policies for which you want the list of context keys referenced in those policies. Each document is specified as a string containing the complete, valid JSON text of an IAM policy.

    The regex pattern for this parameter is a string of characters consisting of any printable ASCII character ranging from the space character (\\u0020) through end of the ASCII character range (\\u00FF). It also includes the special characters tab (\\u0009), line feed (\\u000A), and carriage return (\\u000D).

    ", + "GetContextKeysForPrincipalPolicyRequest$PolicyInputList": "

    An optional list of additional policies for which you want the list of context keys that are referenced.

    The regex pattern for this parameter is a string of characters consisting of any printable ASCII character ranging from the space character (\\u0020) through end of the ASCII character range (\\u00FF). It also includes the special characters tab (\\u0009), line feed (\\u000A), and carriage return (\\u000D).

    ", + "SimulateCustomPolicyRequest$PolicyInputList": "

    A list of policy documents to include in the simulation. Each document is specified as a string containing the complete, valid JSON text of an IAM policy. Do not include any resource-based policies in this parameter. Any resource-based policy must be submitted with the ResourcePolicy parameter. The policies cannot be \"scope-down\" policies, such as you could include in a call to GetFederationToken or one of the AssumeRole APIs to restrict what a user can do while using the temporary credentials.

    The regex pattern for this parameter is a string of characters consisting of any printable ASCII character ranging from the space character (\\u0020) through end of the ASCII character range (\\u00FF). It also includes the special characters tab (\\u0009), line feed (\\u000A), and carriage return (\\u000D).

    ", + "SimulatePrincipalPolicyRequest$PolicyInputList": "

    An optional list of additional policy documents to include in the simulation. Each document is specified as a string containing the complete, valid JSON text of an IAM policy.

    The regex pattern for this parameter is a string of characters consisting of any printable ASCII character ranging from the space character (\\u0020) through end of the ASCII character range (\\u00FF). It also includes the special characters tab (\\u0009), line feed (\\u000A), and carriage return (\\u000D).

    " + } + }, + "Statement": { + "base": "

    Contains a reference to a Statement element in a policy document that determines the result of the simulation.

    This data type is used by the MatchedStatements member of the EvaluationResult type.

    ", + "refs": { + "StatementListType$member": null + } + }, + "StatementListType": { + "base": null, + "refs": { + "EvaluationResult$MatchedStatements": "

    A list of the statements in the input policies that determine the result for this scenario. Remember that even if multiple statements allow the action on the resource, if only one statement denies that action, then the explicit deny overrides any allow, and the deny statement is the only entry included in the result.

    ", + "ResourceSpecificResult$MatchedStatements": "

    A list of the statements in the input policies that determine the result for this part of the simulation. Remember that even if multiple statements allow the action on the resource, if any statement denies that action, then the explicit deny overrides any allow, and the deny statement is the only entry included in the result.

    " + } + }, + "UnrecognizedPublicKeyEncodingException": { + "base": "

    The request was rejected because the public key encoding format is unsupported or unrecognized.

    ", + "refs": { + } + }, + "UpdateAccessKeyRequest": { + "base": null, + "refs": { + } + }, + "UpdateAccountPasswordPolicyRequest": { + "base": null, + "refs": { + } + }, + "UpdateAssumeRolePolicyRequest": { + "base": null, + "refs": { + } + }, + "UpdateGroupRequest": { + "base": null, + "refs": { + } + }, + "UpdateLoginProfileRequest": { + "base": null, + "refs": { + } + }, + "UpdateOpenIDConnectProviderThumbprintRequest": { + "base": null, + "refs": { + } + }, + "UpdateSAMLProviderRequest": { + "base": null, + "refs": { + } + }, + "UpdateSAMLProviderResponse": { + "base": "

    Contains the response to a successful UpdateSAMLProvider request.

    ", + "refs": { + } + }, + "UpdateSSHPublicKeyRequest": { + "base": null, + "refs": { + } + }, + "UpdateServerCertificateRequest": { + "base": null, + "refs": { + } + }, + "UpdateSigningCertificateRequest": { + "base": null, + "refs": { + } + }, + "UpdateUserRequest": { + "base": null, + "refs": { + } + }, + "UploadSSHPublicKeyRequest": { + "base": null, + "refs": { + } + }, + "UploadSSHPublicKeyResponse": { + "base": "

    Contains the response to a successful UploadSSHPublicKey request.

    ", + "refs": { + } + }, + "UploadServerCertificateRequest": { + "base": null, + "refs": { + } + }, + "UploadServerCertificateResponse": { + "base": "

    Contains the response to a successful UploadServerCertificate request.

    ", + "refs": { + } + }, + "UploadSigningCertificateRequest": { + "base": null, + "refs": { + } + }, + "UploadSigningCertificateResponse": { + "base": "

    Contains the response to a successful UploadSigningCertificate request.

    ", + "refs": { + } + }, + "User": { + "base": "

    Contains information about an IAM user entity.

    This data type is used as a response element in the following actions:

    ", + "refs": { + "CreateUserResponse$User": "

    A structure with details about the new IAM user.

    ", + "GetUserResponse$User": "

    A structure containing details about the IAM user.

    ", + "VirtualMFADevice$User": null, + "userListType$member": null + } + }, + "UserDetail": { + "base": "

    Contains information about an IAM user, including all the user's policies and all the IAM groups the user is in.

    This data type is used as a response element in the GetAccountAuthorizationDetails action.

    ", + "refs": { + "userDetailListType$member": null + } + }, + "VirtualMFADevice": { + "base": "

    Contains information about a virtual MFA device.

    ", + "refs": { + "CreateVirtualMFADeviceResponse$VirtualMFADevice": "

    A structure containing details about the new virtual MFA device.

    ", + "virtualMFADeviceListType$member": null + } + }, + "accessKeyIdType": { + "base": null, + "refs": { + "AccessKey$AccessKeyId": "

    The ID for this access key.

    ", + "AccessKeyMetadata$AccessKeyId": "

    The ID for this access key.

    ", + "DeleteAccessKeyRequest$AccessKeyId": "

    The access key ID for the access key ID and secret access key you want to delete.

    The regex pattern for this parameter is a string of characters that can consist of any upper or lowercased letter or digit.

    ", + "GetAccessKeyLastUsedRequest$AccessKeyId": "

    The identifier of an access key.

    The regex pattern for this parameter is a string of characters that can consist of any upper or lowercased letter or digit.

    ", + "UpdateAccessKeyRequest$AccessKeyId": "

    The access key ID of the secret access key you want to update.

    The regex pattern for this parameter is a string of characters that can consist of any upper or lowercased letter or digit.

    " + } + }, + "accessKeyMetadataListType": { + "base": "

    Contains a list of access key metadata.

    This data type is used as a response element in the ListAccessKeys action.

    ", + "refs": { + "ListAccessKeysResponse$AccessKeyMetadata": "

    A list of objects containing metadata about the access keys.

    " + } + }, + "accessKeySecretType": { + "base": null, + "refs": { + "AccessKey$SecretAccessKey": "

    The secret key used to sign requests.

    " + } + }, + "accountAliasListType": { + "base": null, + "refs": { + "ListAccountAliasesResponse$AccountAliases": "

    A list of aliases associated with the account. AWS supports only one alias per account.

    " + } + }, + "accountAliasType": { + "base": null, + "refs": { + "CreateAccountAliasRequest$AccountAlias": "

    The account alias to create.

    The regex pattern for this parameter is a string of characters consisting of lowercase letters, digits, and dashes. You cannot start or finish with a dash, nor can you have two dashes in a row.

    ", + "DeleteAccountAliasRequest$AccountAlias": "

    The name of the account alias to delete.

    The regex pattern for this parameter is a string of characters consisting of lowercase letters, digits, and dashes. You cannot start or finish with a dash, nor can you have two dashes in a row.

    ", + "accountAliasListType$member": null + } + }, + "arnType": { + "base": "

    The Amazon Resource Name (ARN). ARNs are unique identifiers for AWS resources.

    For more information about ARNs, go to Amazon Resource Names (ARNs) and AWS Service Namespaces in the AWS General Reference.

    ", + "refs": { + "AddClientIDToOpenIDConnectProviderRequest$OpenIDConnectProviderArn": "

    The Amazon Resource Name (ARN) of the IAM OpenID Connect (OIDC) provider resource to add the client ID to. You can get a list of OIDC provider ARNs by using the ListOpenIDConnectProviders action.

    ", + "AttachGroupPolicyRequest$PolicyArn": "

    The Amazon Resource Name (ARN) of the IAM policy you want to attach.

    For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces in the AWS General Reference.

    ", + "AttachRolePolicyRequest$PolicyArn": "

    The Amazon Resource Name (ARN) of the IAM policy you want to attach.

    For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces in the AWS General Reference.

    ", + "AttachUserPolicyRequest$PolicyArn": "

    The Amazon Resource Name (ARN) of the IAM policy you want to attach.

    For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces in the AWS General Reference.

    ", + "AttachedPolicy$PolicyArn": null, + "CreateOpenIDConnectProviderResponse$OpenIDConnectProviderArn": "

    The Amazon Resource Name (ARN) of the new IAM OpenID Connect provider that is created. For more information, see OpenIDConnectProviderListEntry.

    ", + "CreatePolicyVersionRequest$PolicyArn": "

    The Amazon Resource Name (ARN) of the IAM policy to which you want to add a new version.

    For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces in the AWS General Reference.

    ", + "CreateSAMLProviderResponse$SAMLProviderArn": "

    The Amazon Resource Name (ARN) of the new SAML provider resource in IAM.

    ", + "DeleteOpenIDConnectProviderRequest$OpenIDConnectProviderArn": "

    The Amazon Resource Name (ARN) of the IAM OpenID Connect provider resource object to delete. You can get a list of OpenID Connect provider resource ARNs by using the ListOpenIDConnectProviders action.

    ", + "DeletePolicyRequest$PolicyArn": "

    The Amazon Resource Name (ARN) of the IAM policy you want to delete.

    For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces in the AWS General Reference.

    ", + "DeletePolicyVersionRequest$PolicyArn": "

    The Amazon Resource Name (ARN) of the IAM policy from which you want to delete a version.

    For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces in the AWS General Reference.

    ", + "DeleteSAMLProviderRequest$SAMLProviderArn": "

    The Amazon Resource Name (ARN) of the SAML provider to delete.

    ", + "DetachGroupPolicyRequest$PolicyArn": "

    The Amazon Resource Name (ARN) of the IAM policy you want to detach.

    For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces in the AWS General Reference.

    ", + "DetachRolePolicyRequest$PolicyArn": "

    The Amazon Resource Name (ARN) of the IAM policy you want to detach.

    For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces in the AWS General Reference.

    ", + "DetachUserPolicyRequest$PolicyArn": "

    The Amazon Resource Name (ARN) of the IAM policy you want to detach.

    For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces in the AWS General Reference.

    ", + "GetContextKeysForPrincipalPolicyRequest$PolicySourceArn": "

    The ARN of a user, group, or role whose policies contain the context keys that you want listed. If you specify a user, the list includes context keys that are found in all policies attached to the user as well as to all groups that the user is a member of. If you pick a group or a role, then it includes only those context keys that are found in policies attached to that entity. Note that all parameters are shown in unencoded form here for clarity, but must be URL encoded to be included as a part of a real HTML request.

    For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces in the AWS General Reference.

    ", + "GetOpenIDConnectProviderRequest$OpenIDConnectProviderArn": "

    The Amazon Resource Name (ARN) of the OIDC provider resource object in IAM to get information for. You can get a list of OIDC provider resource ARNs by using the ListOpenIDConnectProviders action.

    For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces in the AWS General Reference.

    ", + "GetPolicyRequest$PolicyArn": "

    The Amazon Resource Name (ARN) of the managed policy that you want information about.

    For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces in the AWS General Reference.

    ", + "GetPolicyVersionRequest$PolicyArn": "

    The Amazon Resource Name (ARN) of the managed policy that you want information about.

    For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces in the AWS General Reference.

    ", + "GetSAMLProviderRequest$SAMLProviderArn": "

    The Amazon Resource Name (ARN) of the SAML provider resource object in IAM to get information about.

    For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces in the AWS General Reference.

    ", + "Group$Arn": "

    The Amazon Resource Name (ARN) specifying the group. For more information about ARNs and how to use them in policies, see IAM Identifiers in the Using IAM guide.

    ", + "GroupDetail$Arn": null, + "InstanceProfile$Arn": "

    The Amazon Resource Name (ARN) specifying the instance profile. For more information about ARNs and how to use them in policies, see IAM Identifiers in the Using IAM guide.

    ", + "ListEntitiesForPolicyRequest$PolicyArn": "

    The Amazon Resource Name (ARN) of the IAM policy for which you want the versions.

    For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces in the AWS General Reference.

    ", + "ListPolicyVersionsRequest$PolicyArn": "

    The Amazon Resource Name (ARN) of the IAM policy for which you want the versions.

    For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces in the AWS General Reference.

    ", + "ManagedPolicyDetail$Arn": null, + "OpenIDConnectProviderListEntry$Arn": null, + "Policy$Arn": null, + "RemoveClientIDFromOpenIDConnectProviderRequest$OpenIDConnectProviderArn": "

    The Amazon Resource Name (ARN) of the IAM OIDC provider resource to remove the client ID from. You can get a list of OIDC provider ARNs by using the ListOpenIDConnectProviders action.

    For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces in the AWS General Reference.

    ", + "Role$Arn": "

    The Amazon Resource Name (ARN) specifying the role. For more information about ARNs and how to use them in policies, see IAM Identifiers in the Using IAM guide.

    ", + "RoleDetail$Arn": null, + "SAMLProviderListEntry$Arn": "

    The Amazon Resource Name (ARN) of the SAML provider.

    ", + "ServerCertificateMetadata$Arn": "

    The Amazon Resource Name (ARN) specifying the server certificate. For more information about ARNs and how to use them in policies, see IAM Identifiers in the Using IAM guide.

    ", + "SetDefaultPolicyVersionRequest$PolicyArn": "

    The Amazon Resource Name (ARN) of the IAM policy whose default version you want to set.

    For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces in the AWS General Reference.

    ", + "SimulatePrincipalPolicyRequest$PolicySourceArn": "

    The Amazon Resource Name (ARN) of a user, group, or role whose policies you want to include in the simulation. If you specify a user, group, or role, the simulation includes all policies that are associated with that entity. If you specify a user, the simulation also includes all policies that are attached to any groups the user belongs to.

    For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces in the AWS General Reference.

    ", + "UpdateOpenIDConnectProviderThumbprintRequest$OpenIDConnectProviderArn": "

    The Amazon Resource Name (ARN) of the IAM OIDC provider resource object for which you want to update the thumbprint. You can get a list of OIDC provider ARNs by using the ListOpenIDConnectProviders action.

    For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces in the AWS General Reference.

    ", + "UpdateSAMLProviderRequest$SAMLProviderArn": "

    The Amazon Resource Name (ARN) of the SAML provider to update.

    For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces in the AWS General Reference.

    ", + "UpdateSAMLProviderResponse$SAMLProviderArn": "

    The Amazon Resource Name (ARN) of the SAML provider that was updated.

    ", + "User$Arn": "

    The Amazon Resource Name (ARN) that identifies the user. For more information about ARNs and how to use ARNs in policies, see IAM Identifiers in the Using IAM guide.

    ", + "UserDetail$Arn": null + } + }, + "assignmentStatusType": { + "base": null, + "refs": { + "ListVirtualMFADevicesRequest$AssignmentStatus": "

    The status (Unassigned or Assigned) of the devices to list. If you do not specify an AssignmentStatus, the action defaults to Any which lists both assigned and unassigned virtual MFA devices.

    " + } + }, + "attachedPoliciesListType": { + "base": null, + "refs": { + "GroupDetail$AttachedManagedPolicies": "

    A list of the managed policies attached to the group.

    ", + "ListAttachedGroupPoliciesResponse$AttachedPolicies": "

    A list of the attached policies.

    ", + "ListAttachedRolePoliciesResponse$AttachedPolicies": "

    A list of the attached policies.

    ", + "ListAttachedUserPoliciesResponse$AttachedPolicies": "

    A list of the attached policies.

    ", + "RoleDetail$AttachedManagedPolicies": "

    A list of managed policies attached to the role. These policies are the role's access (permissions) policies.

    ", + "UserDetail$AttachedManagedPolicies": "

    A list of the managed policies attached to the user.

    " + } + }, + "attachmentCountType": { + "base": null, + "refs": { + "ManagedPolicyDetail$AttachmentCount": "

    The number of principal entities (users, groups, and roles) that the policy is attached to.

    ", + "Policy$AttachmentCount": "

    The number of entities (users, groups, and roles) that the policy is attached to.

    " + } + }, + "authenticationCodeType": { + "base": null, + "refs": { + "EnableMFADeviceRequest$AuthenticationCode1": "

    An authentication code emitted by the device.

    The format for this parameter is a string of 6 digits.

    ", + "EnableMFADeviceRequest$AuthenticationCode2": "

    A subsequent authentication code emitted by the device.

    The format for this parameter is a string of 6 digits.

    ", + "ResyncMFADeviceRequest$AuthenticationCode1": "

    An authentication code emitted by the device.

    The format for this parameter is a sequence of six digits.

    ", + "ResyncMFADeviceRequest$AuthenticationCode2": "

    A subsequent authentication code emitted by the device.

    The format for this parameter is a sequence of six digits.

    " + } + }, + "booleanObjectType": { + "base": null, + "refs": { + "PasswordPolicy$HardExpiry": "

    Specifies whether IAM users are prevented from setting a new password after their password has expired.

    ", + "UpdateAccountPasswordPolicyRequest$HardExpiry": "

    Prevents IAM users from setting a new password after their password has expired.

    Default value: false

    ", + "UpdateLoginProfileRequest$PasswordResetRequired": "

    Allows this new password to be used only once by requiring the specified IAM user to set a new password on next sign-in.

    " + } + }, + "booleanType": { + "base": null, + "refs": { + "CreateLoginProfileRequest$PasswordResetRequired": "

    Specifies whether the user is required to set a new password on next sign-in.

    ", + "CreatePolicyVersionRequest$SetAsDefault": "

    Specifies whether to set this version as the policy's default version.

    When this parameter is true, the new policy version becomes the operative version; that is, the version that is in effect for the IAM users, groups, and roles that the policy is attached to.

    For more information about managed policy versions, see Versioning for Managed Policies in the IAM User Guide.

    ", + "GetAccountAuthorizationDetailsResponse$IsTruncated": "

    A flag that indicates whether there are more items to return. If your results were truncated, you can make a subsequent pagination request using the Marker request parameter to retrieve more items. Note that IAM might return fewer than the MaxItems number of results even when there are more results available. We recommend that you check IsTruncated after every call to ensure that you receive all of your results.

    ", + "GetGroupResponse$IsTruncated": "

    A flag that indicates whether there are more items to return. If your results were truncated, you can make a subsequent pagination request using the Marker request parameter to retrieve more items. Note that IAM might return fewer than the MaxItems number of results even when there are more results available. We recommend that you check IsTruncated after every call to ensure that you receive all of your results.

    ", + "ListAccessKeysResponse$IsTruncated": "

    A flag that indicates whether there are more items to return. If your results were truncated, you can make a subsequent pagination request using the Marker request parameter to retrieve more items. Note that IAM might return fewer than the MaxItems number of results even when there are more results available. We recommend that you check IsTruncated after every call to ensure that you receive all of your results.

    ", + "ListAccountAliasesResponse$IsTruncated": "

    A flag that indicates whether there are more items to return. If your results were truncated, you can make a subsequent pagination request using the Marker request parameter to retrieve more items. Note that IAM might return fewer than the MaxItems number of results even when there are more results available. We recommend that you check IsTruncated after every call to ensure that you receive all of your results.

    ", + "ListAttachedGroupPoliciesResponse$IsTruncated": "

    A flag that indicates whether there are more items to return. If your results were truncated, you can make a subsequent pagination request using the Marker request parameter to retrieve more items. Note that IAM might return fewer than the MaxItems number of results even when there are more results available. We recommend that you check IsTruncated after every call to ensure that you receive all of your results.

    ", + "ListAttachedRolePoliciesResponse$IsTruncated": "

    A flag that indicates whether there are more items to return. If your results were truncated, you can make a subsequent pagination request using the Marker request parameter to retrieve more items. Note that IAM might return fewer than the MaxItems number of results even when there are more results available. We recommend that you check IsTruncated after every call to ensure that you receive all of your results.

    ", + "ListAttachedUserPoliciesResponse$IsTruncated": "

    A flag that indicates whether there are more items to return. If your results were truncated, you can make a subsequent pagination request using the Marker request parameter to retrieve more items. Note that IAM might return fewer than the MaxItems number of results even when there are more results available. We recommend that you check IsTruncated after every call to ensure that you receive all of your results.

    ", + "ListEntitiesForPolicyResponse$IsTruncated": "

    A flag that indicates whether there are more items to return. If your results were truncated, you can make a subsequent pagination request using the Marker request parameter to retrieve more items. Note that IAM might return fewer than the MaxItems number of results even when there are more results available. We recommend that you check IsTruncated after every call to ensure that you receive all of your results.

    ", + "ListGroupPoliciesResponse$IsTruncated": "

    A flag that indicates whether there are more items to return. If your results were truncated, you can make a subsequent pagination request using the Marker request parameter to retrieve more items. Note that IAM might return fewer than the MaxItems number of results even when there are more results available. We recommend that you check IsTruncated after every call to ensure that you receive all of your results.

    ", + "ListGroupsForUserResponse$IsTruncated": "

    A flag that indicates whether there are more items to return. If your results were truncated, you can make a subsequent pagination request using the Marker request parameter to retrieve more items. Note that IAM might return fewer than the MaxItems number of results even when there are more results available. We recommend that you check IsTruncated after every call to ensure that you receive all of your results.

    ", + "ListGroupsResponse$IsTruncated": "

    A flag that indicates whether there are more items to return. If your results were truncated, you can make a subsequent pagination request using the Marker request parameter to retrieve more items. Note that IAM might return fewer than the MaxItems number of results even when there are more results available. We recommend that you check IsTruncated after every call to ensure that you receive all of your results.

    ", + "ListInstanceProfilesForRoleResponse$IsTruncated": "

    A flag that indicates whether there are more items to return. If your results were truncated, you can make a subsequent pagination request using the Marker request parameter to retrieve more items. Note that IAM might return fewer than the MaxItems number of results even when there are more results available. We recommend that you check IsTruncated after every call to ensure that you receive all of your results.

    ", + "ListInstanceProfilesResponse$IsTruncated": "

    A flag that indicates whether there are more items to return. If your results were truncated, you can make a subsequent pagination request using the Marker request parameter to retrieve more items. Note that IAM might return fewer than the MaxItems number of results even when there are more results available. We recommend that you check IsTruncated after every call to ensure that you receive all of your results.

    ", + "ListMFADevicesResponse$IsTruncated": "

    A flag that indicates whether there are more items to return. If your results were truncated, you can make a subsequent pagination request using the Marker request parameter to retrieve more items. Note that IAM might return fewer than the MaxItems number of results even when there are more results available. We recommend that you check IsTruncated after every call to ensure that you receive all of your results.

    ", + "ListPoliciesRequest$OnlyAttached": "

    A flag to filter the results to only the attached policies.

    When OnlyAttached is true, the returned list contains only the policies that are attached to an IAM user, group, or role. When OnlyAttached is false, or when the parameter is not included, all policies are returned.

    ", + "ListPoliciesResponse$IsTruncated": "

    A flag that indicates whether there are more items to return. If your results were truncated, you can make a subsequent pagination request using the Marker request parameter to retrieve more items. Note that IAM might return fewer than the MaxItems number of results even when there are more results available. We recommend that you check IsTruncated after every call to ensure that you receive all of your results.

    ", + "ListPolicyVersionsResponse$IsTruncated": "

    A flag that indicates whether there are more items to return. If your results were truncated, you can make a subsequent pagination request using the Marker request parameter to retrieve more items. Note that IAM might return fewer than the MaxItems number of results even when there are more results available. We recommend that you check IsTruncated after every call to ensure that you receive all of your results.

    ", + "ListRolePoliciesResponse$IsTruncated": "

    A flag that indicates whether there are more items to return. If your results were truncated, you can make a subsequent pagination request using the Marker request parameter to retrieve more items. Note that IAM might return fewer than the MaxItems number of results even when there are more results available. We recommend that you check IsTruncated after every call to ensure that you receive all of your results.

    ", + "ListRolesResponse$IsTruncated": "

    A flag that indicates whether there are more items to return. If your results were truncated, you can make a subsequent pagination request using the Marker request parameter to retrieve more items. Note that IAM might return fewer than the MaxItems number of results even when there are more results available. We recommend that you check IsTruncated after every call to ensure that you receive all of your results.

    ", + "ListSSHPublicKeysResponse$IsTruncated": "

    A flag that indicates whether there are more items to return. If your results were truncated, you can make a subsequent pagination request using the Marker request parameter to retrieve more items. Note that IAM might return fewer than the MaxItems number of results even when there are more results available. We recommend that you check IsTruncated after every call to ensure that you receive all of your results.

    ", + "ListServerCertificatesResponse$IsTruncated": "

    A flag that indicates whether there are more items to return. If your results were truncated, you can make a subsequent pagination request using the Marker request parameter to retrieve more items. Note that IAM might return fewer than the MaxItems number of results even when there are more results available. We recommend that you check IsTruncated after every call to ensure that you receive all of your results.

    ", + "ListSigningCertificatesResponse$IsTruncated": "

    A flag that indicates whether there are more items to return. If your results were truncated, you can make a subsequent pagination request using the Marker request parameter to retrieve more items. Note that IAM might return fewer than the MaxItems number of results even when there are more results available. We recommend that you check IsTruncated after every call to ensure that you receive all of your results.

    ", + "ListUserPoliciesResponse$IsTruncated": "

    A flag that indicates whether there are more items to return. If your results were truncated, you can make a subsequent pagination request using the Marker request parameter to retrieve more items. Note that IAM might return fewer than the MaxItems number of results even when there are more results available. We recommend that you check IsTruncated after every call to ensure that you receive all of your results.

    ", + "ListUsersResponse$IsTruncated": "

    A flag that indicates whether there are more items to return. If your results were truncated, you can make a subsequent pagination request using the Marker request parameter to retrieve more items. Note that IAM might return fewer than the MaxItems number of results even when there are more results available. We recommend that you check IsTruncated after every call to ensure that you receive all of your results.

    ", + "ListVirtualMFADevicesResponse$IsTruncated": "

    A flag that indicates whether there are more items to return. If your results were truncated, you can make a subsequent pagination request using the Marker request parameter to retrieve more items. Note that IAM might return fewer than the MaxItems number of results even when there are more results available. We recommend that you check IsTruncated after every call to ensure that you receive all of your results.

    ", + "LoginProfile$PasswordResetRequired": "

    Specifies whether the user is required to set a new password on next sign-in.

    ", + "ManagedPolicyDetail$IsAttachable": "

    Specifies whether the policy can be attached to an IAM user, group, or role.

    ", + "PasswordPolicy$RequireSymbols": "

    Specifies whether to require symbols for IAM user passwords.

    ", + "PasswordPolicy$RequireNumbers": "

    Specifies whether to require numbers for IAM user passwords.

    ", + "PasswordPolicy$RequireUppercaseCharacters": "

    Specifies whether to require uppercase characters for IAM user passwords.

    ", + "PasswordPolicy$RequireLowercaseCharacters": "

    Specifies whether to require lowercase characters for IAM user passwords.

    ", + "PasswordPolicy$AllowUsersToChangePassword": "

    Specifies whether IAM users are allowed to change their own password.

    ", + "PasswordPolicy$ExpirePasswords": "

    Indicates whether passwords in the account expire. Returns true if MaxPasswordAge is contains a value greater than 0. Returns false if MaxPasswordAge is 0 or not present.

    ", + "Policy$IsAttachable": "

    Specifies whether the policy can be attached to an IAM user, group, or role.

    ", + "PolicyVersion$IsDefaultVersion": "

    Specifies whether the policy version is set as the policy's default version.

    ", + "SimulatePolicyResponse$IsTruncated": "

    A flag that indicates whether there are more items to return. If your results were truncated, you can make a subsequent pagination request using the Marker request parameter to retrieve more items. Note that IAM might return fewer than the MaxItems number of results even when there are more results available. We recommend that you check IsTruncated after every call to ensure that you receive all of your results.

    ", + "UpdateAccountPasswordPolicyRequest$RequireSymbols": "

    Specifies whether IAM user passwords must contain at least one of the following non-alphanumeric characters:

    ! @ # $ % ^ &amp; * ( ) _ + - = [ ] { } | '

    Default value: false

    ", + "UpdateAccountPasswordPolicyRequest$RequireNumbers": "

    Specifies whether IAM user passwords must contain at least one numeric character (0 to 9).

    Default value: false

    ", + "UpdateAccountPasswordPolicyRequest$RequireUppercaseCharacters": "

    Specifies whether IAM user passwords must contain at least one uppercase character from the ISO basic Latin alphabet (A to Z).

    Default value: false

    ", + "UpdateAccountPasswordPolicyRequest$RequireLowercaseCharacters": "

    Specifies whether IAM user passwords must contain at least one lowercase character from the ISO basic Latin alphabet (a to z).

    Default value: false

    ", + "UpdateAccountPasswordPolicyRequest$AllowUsersToChangePassword": "

    Allows all IAM users in your account to use the AWS Management Console to change their own passwords. For more information, see Letting IAM Users Change Their Own Passwords in the IAM User Guide.

    Default value: false

    " + } + }, + "certificateBodyType": { + "base": null, + "refs": { + "ServerCertificate$CertificateBody": "

    The contents of the public key certificate.

    ", + "SigningCertificate$CertificateBody": "

    The contents of the signing certificate.

    ", + "UploadServerCertificateRequest$CertificateBody": "

    The contents of the public key certificate in PEM-encoded format.

    The regex pattern for this parameter is a string of characters consisting of any printable ASCII character ranging from the space character (\\u0020) through end of the ASCII character range (\\u00FF). It also includes the special characters tab (\\u0009), line feed (\\u000A), and carriage return (\\u000D).

    ", + "UploadSigningCertificateRequest$CertificateBody": "

    The contents of the signing certificate.

    The regex pattern for this parameter is a string of characters consisting of any printable ASCII character ranging from the space character (\\u0020) through end of the ASCII character range (\\u00FF). It also includes the special characters tab (\\u0009), line feed (\\u000A), and carriage return (\\u000D).

    " + } + }, + "certificateChainType": { + "base": null, + "refs": { + "ServerCertificate$CertificateChain": "

    The contents of the public key certificate chain.

    ", + "UploadServerCertificateRequest$CertificateChain": "

    The contents of the certificate chain. This is typically a concatenation of the PEM-encoded public key certificates of the chain.

    The regex pattern for this parameter is a string of characters consisting of any printable ASCII character ranging from the space character (\\u0020) through end of the ASCII character range (\\u00FF). It also includes the special characters tab (\\u0009), line feed (\\u000A), and carriage return (\\u000D).

    " + } + }, + "certificateIdType": { + "base": null, + "refs": { + "DeleteSigningCertificateRequest$CertificateId": "

    The ID of the signing certificate to delete.

    The format of this parameter, as described by its regex pattern, is a string of characters that can be upper- or lower-cased letters or digits.

    ", + "SigningCertificate$CertificateId": "

    The ID for the signing certificate.

    ", + "UpdateSigningCertificateRequest$CertificateId": "

    The ID of the signing certificate you want to update.

    The regex pattern for this parameter is a string of characters that can consist of any upper or lowercased letter or digit.

    " + } + }, + "certificateListType": { + "base": "

    Contains a list of signing certificates.

    This data type is used as a response element in the ListSigningCertificates action.

    ", + "refs": { + "ListSigningCertificatesResponse$Certificates": "

    A list of the user's signing certificate information.

    " + } + }, + "clientIDListType": { + "base": null, + "refs": { + "CreateOpenIDConnectProviderRequest$ClientIDList": "

    A list of client IDs (also known as audiences). When a mobile or web app registers with an OpenID Connect provider, they establish a value that identifies the application. (This is the value that's sent as the client_id parameter on OAuth requests.)

    You can register multiple client IDs with the same provider. For example, you might have multiple applications that use the same OIDC provider. You cannot register more than 100 client IDs with a single IAM OIDC provider.

    There is no defined format for a client ID. The CreateOpenIDConnectProviderRequest action accepts client IDs up to 255 characters long.

    ", + "GetOpenIDConnectProviderResponse$ClientIDList": "

    A list of client IDs (also known as audiences) that are associated with the specified IAM OIDC provider resource object. For more information, see CreateOpenIDConnectProvider.

    " + } + }, + "clientIDType": { + "base": null, + "refs": { + "AddClientIDToOpenIDConnectProviderRequest$ClientID": "

    The client ID (also known as audience) to add to the IAM OpenID Connect provider resource.

    ", + "RemoveClientIDFromOpenIDConnectProviderRequest$ClientID": "

    The client ID (also known as audience) to remove from the IAM OIDC provider resource. For more information about client IDs, see CreateOpenIDConnectProvider.

    ", + "clientIDListType$member": null + } + }, + "credentialReportExpiredExceptionMessage": { + "base": null, + "refs": { + "CredentialReportExpiredException$message": null + } + }, + "credentialReportNotPresentExceptionMessage": { + "base": null, + "refs": { + "CredentialReportNotPresentException$message": null + } + }, + "credentialReportNotReadyExceptionMessage": { + "base": null, + "refs": { + "CredentialReportNotReadyException$message": null + } + }, + "dateType": { + "base": null, + "refs": { + "AccessKey$CreateDate": "

    The date when the access key was created.

    ", + "AccessKeyLastUsed$LastUsedDate": "

    The date and time, in ISO 8601 date-time format, when the access key was most recently used. This field is null when:

    • The user does not have an access key.

    • An access key exists but has never been used, at least not since IAM started tracking this information on April 22nd, 2015.

    • There is no sign-in data associated with the user

    ", + "AccessKeyMetadata$CreateDate": "

    The date when the access key was created.

    ", + "GetCredentialReportResponse$GeneratedTime": "

    The date and time when the credential report was created, in ISO 8601 date-time format.

    ", + "GetOpenIDConnectProviderResponse$CreateDate": "

    The date and time when the IAM OIDC provider resource object was created in the AWS account.

    ", + "GetSAMLProviderResponse$CreateDate": "

    The date and time when the SAML provider was created.

    ", + "GetSAMLProviderResponse$ValidUntil": "

    The expiration date and time for the SAML provider.

    ", + "Group$CreateDate": "

    The date and time, in ISO 8601 date-time format, when the group was created.

    ", + "GroupDetail$CreateDate": "

    The date and time, in ISO 8601 date-time format, when the group was created.

    ", + "InstanceProfile$CreateDate": "

    The date when the instance profile was created.

    ", + "LoginProfile$CreateDate": "

    The date when the password for the user was created.

    ", + "MFADevice$EnableDate": "

    The date when the MFA device was enabled for the user.

    ", + "ManagedPolicyDetail$CreateDate": "

    The date and time, in ISO 8601 date-time format, when the policy was created.

    ", + "ManagedPolicyDetail$UpdateDate": "

    The date and time, in ISO 8601 date-time format, when the policy was last updated.

    When a policy has only one version, this field contains the date and time when the policy was created. When a policy has more than one version, this field contains the date and time when the most recent policy version was created.

    ", + "Policy$CreateDate": "

    The date and time, in ISO 8601 date-time format, when the policy was created.

    ", + "Policy$UpdateDate": "

    The date and time, in ISO 8601 date-time format, when the policy was last updated.

    When a policy has only one version, this field contains the date and time when the policy was created. When a policy has more than one version, this field contains the date and time when the most recent policy version was created.

    ", + "PolicyVersion$CreateDate": "

    The date and time, in ISO 8601 date-time format, when the policy version was created.

    ", + "Role$CreateDate": "

    The date and time, in ISO 8601 date-time format, when the role was created.

    ", + "RoleDetail$CreateDate": "

    The date and time, in ISO 8601 date-time format, when the role was created.

    ", + "SAMLProviderListEntry$ValidUntil": "

    The expiration date and time for the SAML provider.

    ", + "SAMLProviderListEntry$CreateDate": "

    The date and time when the SAML provider was created.

    ", + "SSHPublicKey$UploadDate": "

    The date and time, in ISO 8601 date-time format, when the SSH public key was uploaded.

    ", + "SSHPublicKeyMetadata$UploadDate": "

    The date and time, in ISO 8601 date-time format, when the SSH public key was uploaded.

    ", + "ServerCertificateMetadata$UploadDate": "

    The date when the server certificate was uploaded.

    ", + "ServerCertificateMetadata$Expiration": "

    The date on which the certificate is set to expire.

    ", + "SigningCertificate$UploadDate": "

    The date when the signing certificate was uploaded.

    ", + "User$CreateDate": "

    The date and time, in ISO 8601 date-time format, when the user was created.

    ", + "User$PasswordLastUsed": "

    The date and time, in ISO 8601 date-time format, when the user's password was last used to sign in to an AWS website. For a list of AWS websites that capture a user's last sign-in time, see the Credential Reports topic in the Using IAM guide. If a password is used more than once in a five-minute span, only the first use is returned in this field. This field is null (not present) when:

    • The user does not have a password

    • The password exists but has never been used (at least not since IAM started tracking this information on October 20th, 2014

    • there is no sign-in data associated with the user

    This value is returned only in the GetUser and ListUsers actions.

    ", + "UserDetail$CreateDate": "

    The date and time, in ISO 8601 date-time format, when the user was created.

    ", + "VirtualMFADevice$EnableDate": "

    The date and time on which the virtual MFA device was enabled.

    " + } + }, + "deleteConflictMessage": { + "base": null, + "refs": { + "DeleteConflictException$message": null + } + }, + "duplicateCertificateMessage": { + "base": null, + "refs": { + "DuplicateCertificateException$message": null + } + }, + "duplicateSSHPublicKeyMessage": { + "base": null, + "refs": { + "DuplicateSSHPublicKeyException$message": null + } + }, + "encodingType": { + "base": null, + "refs": { + "GetSSHPublicKeyRequest$Encoding": "

    Specifies the public key encoding format to use in the response. To retrieve the public key in ssh-rsa format, use SSH. To retrieve the public key in PEM format, use PEM.

    " + } + }, + "entityAlreadyExistsMessage": { + "base": null, + "refs": { + "EntityAlreadyExistsException$message": null + } + }, + "entityListType": { + "base": null, + "refs": { + "GetAccountAuthorizationDetailsRequest$Filter": "

    A list of entity types used to filter the results. Only the entities that match the types you specify are included in the output. Use the value LocalManagedPolicy to include customer managed policies.

    The format for this parameter is a comma-separated (if more than one) list of strings. Each string value in the list must be one of the valid values listed below.

    " + } + }, + "entityTemporarilyUnmodifiableMessage": { + "base": null, + "refs": { + "EntityTemporarilyUnmodifiableException$message": null + } + }, + "existingUserNameType": { + "base": null, + "refs": { + "AddUserToGroupRequest$UserName": "

    The name of the user to add.

    The regex pattern for this parameter is a string of characters consisting of upper and lowercase alphanumeric characters with no spaces. You can also include any of the following characters: =,.@-

    ", + "CreateAccessKeyRequest$UserName": "

    The name of the IAM user that the new key will belong to.

    The regex pattern for this parameter is a string of characters consisting of upper and lowercase alphanumeric characters with no spaces. You can also include any of the following characters: =,.@-

    ", + "DeactivateMFADeviceRequest$UserName": "

    The name of the user whose MFA device you want to deactivate.

    The regex pattern for this parameter is a string of characters consisting of upper and lowercase alphanumeric characters with no spaces. You can also include any of the following characters: =,.@-

    ", + "DeleteAccessKeyRequest$UserName": "

    The name of the user whose access key pair you want to delete.

    The regex pattern for this parameter is a string of characters consisting of upper and lowercase alphanumeric characters with no spaces. You can also include any of the following characters: =,.@-

    ", + "DeleteSigningCertificateRequest$UserName": "

    The name of the user the signing certificate belongs to.

    The regex pattern for this parameter is a string of characters consisting of upper and lowercase alphanumeric characters with no spaces. You can also include any of the following characters: =,.@-

    ", + "DeleteUserPolicyRequest$UserName": "

    The name (friendly name, not ARN) identifying the user that the policy is embedded in.

    The regex pattern for this parameter is a string of characters consisting of upper and lowercase alphanumeric characters with no spaces. You can also include any of the following characters: =,.@-

    ", + "DeleteUserRequest$UserName": "

    The name of the user to delete.

    The regex pattern for this parameter is a string of characters consisting of upper and lowercase alphanumeric characters with no spaces. You can also include any of the following characters: =,.@-

    ", + "EnableMFADeviceRequest$UserName": "

    The name of the IAM user for whom you want to enable the MFA device.

    The regex pattern for this parameter is a string of characters consisting of upper and lowercase alphanumeric characters with no spaces. You can also include any of the following characters: =,.@-

    ", + "GetAccessKeyLastUsedResponse$UserName": "

    The name of the AWS IAM user that owns this access key.

    ", + "GetUserPolicyRequest$UserName": "

    The name of the user who the policy is associated with.

    The regex pattern for this parameter is a string of characters consisting of upper and lowercase alphanumeric characters with no spaces. You can also include any of the following characters: =,.@-

    ", + "GetUserPolicyResponse$UserName": "

    The user the policy is associated with.

    ", + "GetUserRequest$UserName": "

    The name of the user to get information about.

    This parameter is optional. If it is not included, it defaults to the user making the request. The regex pattern for this parameter is a string of characters consisting of upper and lowercase alphanumeric characters with no spaces. You can also include any of the following characters: =,.@-

    ", + "ListAccessKeysRequest$UserName": "

    The name of the user.

    The regex pattern for this parameter is a string of characters consisting of upper and lowercase alphanumeric characters with no spaces. You can also include any of the following characters: =,.@-

    ", + "ListGroupsForUserRequest$UserName": "

    The name of the user to list groups for.

    The regex pattern for this parameter is a string of characters consisting of upper and lowercase alphanumeric characters with no spaces. You can also include any of the following characters: =,.@-

    ", + "ListMFADevicesRequest$UserName": "

    The name of the user whose MFA devices you want to list.

    The regex pattern for this parameter is a string of characters consisting of upper and lowercase alphanumeric characters with no spaces. You can also include any of the following characters: =,.@-

    ", + "ListSigningCertificatesRequest$UserName": "

    The name of the IAM user whose signing certificates you want to examine.

    The regex pattern for this parameter is a string of characters consisting of upper and lowercase alphanumeric characters with no spaces. You can also include any of the following characters: =,.@-

    ", + "ListUserPoliciesRequest$UserName": "

    The name of the user to list policies for.

    The regex pattern for this parameter is a string of characters consisting of upper and lowercase alphanumeric characters with no spaces. You can also include any of the following characters: =,.@-

    ", + "PutUserPolicyRequest$UserName": "

    The name of the user to associate the policy with.

    The regex pattern for this parameter is a string of characters consisting of upper and lowercase alphanumeric characters with no spaces. You can also include any of the following characters: =,.@-

    ", + "RemoveUserFromGroupRequest$UserName": "

    The name of the user to remove.

    The regex pattern for this parameter is a string of characters consisting of upper and lowercase alphanumeric characters with no spaces. You can also include any of the following characters: =,.@-

    ", + "ResyncMFADeviceRequest$UserName": "

    The name of the user whose MFA device you want to resynchronize.

    The regex pattern for this parameter is a string of characters consisting of upper and lowercase alphanumeric characters with no spaces. You can also include any of the following characters: =,.@-

    ", + "UpdateAccessKeyRequest$UserName": "

    The name of the user whose key you want to update.

    The regex pattern for this parameter is a string of characters consisting of upper and lowercase alphanumeric characters with no spaces. You can also include any of the following characters: =,.@-

    ", + "UpdateSigningCertificateRequest$UserName": "

    The name of the IAM user the signing certificate belongs to.

    The regex pattern for this parameter is a string of characters consisting of upper and lowercase alphanumeric characters with no spaces. You can also include any of the following characters: =,.@-

    ", + "UpdateUserRequest$UserName": "

    Name of the user to update. If you're changing the name of the user, this is the original user name.

    The regex pattern for this parameter is a string of characters consisting of upper and lowercase alphanumeric characters with no spaces. You can also include any of the following characters: =,.@-

    ", + "UploadSigningCertificateRequest$UserName": "

    The name of the user the signing certificate is for.

    The regex pattern for this parameter is a string of characters consisting of upper and lowercase alphanumeric characters with no spaces. You can also include any of the following characters: =,.@-

    " + } + }, + "groupDetailListType": { + "base": null, + "refs": { + "GetAccountAuthorizationDetailsResponse$GroupDetailList": "

    A list containing information about IAM groups.

    " + } + }, + "groupListType": { + "base": "

    Contains a list of IAM groups.

    This data type is used as a response element in the ListGroups action.

    ", + "refs": { + "ListGroupsForUserResponse$Groups": "

    A list of groups.

    ", + "ListGroupsResponse$Groups": "

    A list of groups.

    " + } + }, + "groupNameListType": { + "base": null, + "refs": { + "UserDetail$GroupList": "

    A list of IAM groups that the user is in.

    " + } + }, + "groupNameType": { + "base": null, + "refs": { + "AddUserToGroupRequest$GroupName": "

    The name of the group to update.

    The regex pattern for this parameter is a string of characters consisting of upper and lowercase alphanumeric characters with no spaces. You can also include any of the following characters: =,.@-

    ", + "AttachGroupPolicyRequest$GroupName": "

    The name (friendly name, not ARN) of the group to attach the policy to.

    The regex pattern for this parameter is a string of characters consisting of upper and lowercase alphanumeric characters with no spaces. You can also include any of the following characters: =,.@-

    ", + "CreateGroupRequest$GroupName": "

    The name of the group to create. Do not include the path in this value.

    The regex pattern for this parameter is a string of characters consisting of upper and lowercase alphanumeric characters with no spaces. You can also include any of the following characters: =,.@-

    ", + "DeleteGroupPolicyRequest$GroupName": "

    The name (friendly name, not ARN) identifying the group that the policy is embedded in.

    The regex pattern for this parameter is a string of characters consisting of upper and lowercase alphanumeric characters with no spaces. You can also include any of the following characters: =,.@-

    ", + "DeleteGroupRequest$GroupName": "

    The name of the IAM group to delete.

    The regex pattern for this parameter is a string of characters consisting of upper and lowercase alphanumeric characters with no spaces. You can also include any of the following characters: =,.@-

    ", + "DetachGroupPolicyRequest$GroupName": "

    The name (friendly name, not ARN) of the IAM group to detach the policy from.

    The regex pattern for this parameter is a string of characters consisting of upper and lowercase alphanumeric characters with no spaces. You can also include any of the following characters: =,.@-

    ", + "GetGroupPolicyRequest$GroupName": "

    The name of the group the policy is associated with.

    The regex pattern for this parameter is a string of characters consisting of upper and lowercase alphanumeric characters with no spaces. You can also include any of the following characters: =,.@-

    ", + "GetGroupPolicyResponse$GroupName": "

    The group the policy is associated with.

    ", + "GetGroupRequest$GroupName": "

    The name of the group.

    The regex pattern for this parameter is a string of characters consisting of upper and lowercase alphanumeric characters with no spaces. You can also include any of the following characters: =,.@-

    ", + "Group$GroupName": "

    The friendly name that identifies the group.

    ", + "GroupDetail$GroupName": "

    The friendly name that identifies the group.

    ", + "ListAttachedGroupPoliciesRequest$GroupName": "

    The name (friendly name, not ARN) of the group to list attached policies for.

    The regex pattern for this parameter is a string of characters consisting of upper and lowercase alphanumeric characters with no spaces. You can also include any of the following characters: =,.@-

    ", + "ListGroupPoliciesRequest$GroupName": "

    The name of the group to list policies for.

    The regex pattern for this parameter is a string of characters consisting of upper and lowercase alphanumeric characters with no spaces. You can also include any of the following characters: =,.@-

    ", + "PolicyGroup$GroupName": "

    The name (friendly name, not ARN) identifying the group.

    ", + "PutGroupPolicyRequest$GroupName": "

    The name of the group to associate the policy with.

    The regex pattern for this parameter is a string of characters consisting of upper and lowercase alphanumeric characters with no spaces. You can also include any of the following characters: =,.@-

    ", + "RemoveUserFromGroupRequest$GroupName": "

    The name of the group to update.

    The regex pattern for this parameter is a string of characters consisting of upper and lowercase alphanumeric characters with no spaces. You can also include any of the following characters: =,.@-

    ", + "UpdateGroupRequest$GroupName": "

    Name of the IAM group to update. If you're changing the name of the group, this is the original name.

    The regex pattern for this parameter is a string of characters consisting of upper and lowercase alphanumeric characters with no spaces. You can also include any of the following characters: =,.@-

    ", + "UpdateGroupRequest$NewGroupName": "

    New name for the IAM group. Only include this if changing the group's name.

    The regex pattern for this parameter is a string of characters consisting of upper and lowercase alphanumeric characters with no spaces. You can also include any of the following characters: =,.@-

    ", + "groupNameListType$member": null + } + }, + "idType": { + "base": null, + "refs": { + "Group$GroupId": "

    The stable and unique string identifying the group. For more information about IDs, see IAM Identifiers in the Using IAM guide.

    ", + "GroupDetail$GroupId": "

    The stable and unique string identifying the group. For more information about IDs, see IAM Identifiers in the Using IAM guide.

    ", + "InstanceProfile$InstanceProfileId": "

    The stable and unique string identifying the instance profile. For more information about IDs, see IAM Identifiers in the Using IAM guide.

    ", + "ManagedPolicyDetail$PolicyId": "

    The stable and unique string identifying the policy.

    For more information about IDs, see IAM Identifiers in the Using IAM guide.

    ", + "Policy$PolicyId": "

    The stable and unique string identifying the policy.

    For more information about IDs, see IAM Identifiers in the Using IAM guide.

    ", + "PolicyGroup$GroupId": "

    The stable and unique string identifying the group. For more information about IDs, see IAM Identifiers in the IAM User Guide.

    ", + "PolicyRole$RoleId": "

    The stable and unique string identifying the role. For more information about IDs, see IAM Identifiers in the IAM User Guide.

    ", + "PolicyUser$UserId": "

    The stable and unique string identifying the user. For more information about IDs, see IAM Identifiers in the IAM User Guide.

    ", + "Role$RoleId": "

    The stable and unique string identifying the role. For more information about IDs, see IAM Identifiers in the Using IAM guide.

    ", + "RoleDetail$RoleId": "

    The stable and unique string identifying the role. For more information about IDs, see IAM Identifiers in the Using IAM guide.

    ", + "ServerCertificateMetadata$ServerCertificateId": "

    The stable and unique string identifying the server certificate. For more information about IDs, see IAM Identifiers in the Using IAM guide.

    ", + "User$UserId": "

    The stable and unique string identifying the user. For more information about IDs, see IAM Identifiers in the Using IAM guide.

    ", + "UserDetail$UserId": "

    The stable and unique string identifying the user. For more information about IDs, see IAM Identifiers in the Using IAM guide.

    " + } + }, + "instanceProfileListType": { + "base": "

    Contains a list of instance profiles.

    ", + "refs": { + "ListInstanceProfilesForRoleResponse$InstanceProfiles": "

    A list of instance profiles.

    ", + "ListInstanceProfilesResponse$InstanceProfiles": "

    A list of instance profiles.

    ", + "RoleDetail$InstanceProfileList": null + } + }, + "instanceProfileNameType": { + "base": null, + "refs": { + "AddRoleToInstanceProfileRequest$InstanceProfileName": "

    The name of the instance profile to update.

    The regex pattern for this parameter is a string of characters consisting of upper and lowercase alphanumeric characters with no spaces. You can also include any of the following characters: =,.@-

    ", + "CreateInstanceProfileRequest$InstanceProfileName": "

    The name of the instance profile to create.

    The regex pattern for this parameter is a string of characters consisting of upper and lowercase alphanumeric characters with no spaces. You can also include any of the following characters: =,.@-

    ", + "DeleteInstanceProfileRequest$InstanceProfileName": "

    The name of the instance profile to delete.

    The regex pattern for this parameter is a string of characters consisting of upper and lowercase alphanumeric characters with no spaces. You can also include any of the following characters: =,.@-

    ", + "GetInstanceProfileRequest$InstanceProfileName": "

    The name of the instance profile to get information about.

    The regex pattern for this parameter is a string of characters consisting of upper and lowercase alphanumeric characters with no spaces. You can also include any of the following characters: =,.@-

    ", + "InstanceProfile$InstanceProfileName": "

    The name identifying the instance profile.

    ", + "RemoveRoleFromInstanceProfileRequest$InstanceProfileName": "

    The name of the instance profile to update.

    The regex pattern for this parameter is a string of characters consisting of upper and lowercase alphanumeric characters with no spaces. You can also include any of the following characters: =,.@-

    " + } + }, + "invalidAuthenticationCodeMessage": { + "base": null, + "refs": { + "InvalidAuthenticationCodeException$message": null + } + }, + "invalidCertificateMessage": { + "base": null, + "refs": { + "InvalidCertificateException$message": null + } + }, + "invalidInputMessage": { + "base": null, + "refs": { + "InvalidInputException$message": null + } + }, + "invalidPublicKeyMessage": { + "base": null, + "refs": { + "InvalidPublicKeyException$message": null + } + }, + "invalidUserTypeMessage": { + "base": null, + "refs": { + "InvalidUserTypeException$message": null + } + }, + "keyPairMismatchMessage": { + "base": null, + "refs": { + "KeyPairMismatchException$message": null + } + }, + "limitExceededMessage": { + "base": null, + "refs": { + "LimitExceededException$message": null + } + }, + "malformedCertificateMessage": { + "base": null, + "refs": { + "MalformedCertificateException$message": null + } + }, + "malformedPolicyDocumentMessage": { + "base": null, + "refs": { + "MalformedPolicyDocumentException$message": null + } + }, + "markerType": { + "base": null, + "refs": { + "GetAccountAuthorizationDetailsRequest$Marker": "

    Use this parameter only when paginating results and only after you receive a response indicating that the results are truncated. Set it to the value of the Marker element in the response that you received to indicate where the next call should start.

    ", + "GetAccountAuthorizationDetailsResponse$Marker": "

    When IsTruncated is true, this element is present and contains the value to use for the Marker parameter in a subsequent pagination request.

    ", + "GetGroupRequest$Marker": "

    Use this parameter only when paginating results and only after you receive a response indicating that the results are truncated. Set it to the value of the Marker element in the response that you received to indicate where the next call should start.

    ", + "GetGroupResponse$Marker": "

    When IsTruncated is true, this element is present and contains the value to use for the Marker parameter in a subsequent pagination request.

    ", + "ListAccessKeysRequest$Marker": "

    Use this parameter only when paginating results and only after you receive a response indicating that the results are truncated. Set it to the value of the Marker element in the response that you received to indicate where the next call should start.

    ", + "ListAccessKeysResponse$Marker": "

    When IsTruncated is true, this element is present and contains the value to use for the Marker parameter in a subsequent pagination request.

    ", + "ListAccountAliasesRequest$Marker": "

    Use this parameter only when paginating results and only after you receive a response indicating that the results are truncated. Set it to the value of the Marker element in the response that you received to indicate where the next call should start.

    ", + "ListAccountAliasesResponse$Marker": "

    When IsTruncated is true, this element is present and contains the value to use for the Marker parameter in a subsequent pagination request.

    ", + "ListAttachedGroupPoliciesRequest$Marker": "

    Use this parameter only when paginating results and only after you receive a response indicating that the results are truncated. Set it to the value of the Marker element in the response that you received to indicate where the next call should start.

    ", + "ListAttachedGroupPoliciesResponse$Marker": "

    When IsTruncated is true, this element is present and contains the value to use for the Marker parameter in a subsequent pagination request.

    ", + "ListAttachedRolePoliciesRequest$Marker": "

    Use this parameter only when paginating results and only after you receive a response indicating that the results are truncated. Set it to the value of the Marker element in the response that you received to indicate where the next call should start.

    ", + "ListAttachedRolePoliciesResponse$Marker": "

    When IsTruncated is true, this element is present and contains the value to use for the Marker parameter in a subsequent pagination request.

    ", + "ListAttachedUserPoliciesRequest$Marker": "

    Use this parameter only when paginating results and only after you receive a response indicating that the results are truncated. Set it to the value of the Marker element in the response that you received to indicate where the next call should start.

    ", + "ListAttachedUserPoliciesResponse$Marker": "

    When IsTruncated is true, this element is present and contains the value to use for the Marker parameter in a subsequent pagination request.

    ", + "ListEntitiesForPolicyRequest$Marker": "

    Use this parameter only when paginating results and only after you receive a response indicating that the results are truncated. Set it to the value of the Marker element in the response that you received to indicate where the next call should start.

    ", + "ListEntitiesForPolicyResponse$Marker": "

    When IsTruncated is true, this element is present and contains the value to use for the Marker parameter in a subsequent pagination request.

    ", + "ListGroupPoliciesRequest$Marker": "

    Use this parameter only when paginating results and only after you receive a response indicating that the results are truncated. Set it to the value of the Marker element in the response that you received to indicate where the next call should start.

    ", + "ListGroupPoliciesResponse$Marker": "

    When IsTruncated is true, this element is present and contains the value to use for the Marker parameter in a subsequent pagination request.

    ", + "ListGroupsForUserRequest$Marker": "

    Use this parameter only when paginating results and only after you receive a response indicating that the results are truncated. Set it to the value of the Marker element in the response that you received to indicate where the next call should start.

    ", + "ListGroupsForUserResponse$Marker": "

    When IsTruncated is true, this element is present and contains the value to use for the Marker parameter in a subsequent pagination request.

    ", + "ListGroupsRequest$Marker": "

    Use this parameter only when paginating results and only after you receive a response indicating that the results are truncated. Set it to the value of the Marker element in the response that you received to indicate where the next call should start.

    ", + "ListGroupsResponse$Marker": "

    When IsTruncated is true, this element is present and contains the value to use for the Marker parameter in a subsequent pagination request.

    ", + "ListInstanceProfilesForRoleRequest$Marker": "

    Use this parameter only when paginating results and only after you receive a response indicating that the results are truncated. Set it to the value of the Marker element in the response that you received to indicate where the next call should start.

    ", + "ListInstanceProfilesForRoleResponse$Marker": "

    When IsTruncated is true, this element is present and contains the value to use for the Marker parameter in a subsequent pagination request.

    ", + "ListInstanceProfilesRequest$Marker": "

    Use this parameter only when paginating results and only after you receive a response indicating that the results are truncated. Set it to the value of the Marker element in the response that you received to indicate where the next call should start.

    ", + "ListInstanceProfilesResponse$Marker": "

    When IsTruncated is true, this element is present and contains the value to use for the Marker parameter in a subsequent pagination request.

    ", + "ListMFADevicesRequest$Marker": "

    Use this parameter only when paginating results and only after you receive a response indicating that the results are truncated. Set it to the value of the Marker element in the response that you received to indicate where the next call should start.

    ", + "ListMFADevicesResponse$Marker": "

    When IsTruncated is true, this element is present and contains the value to use for the Marker parameter in a subsequent pagination request.

    ", + "ListPoliciesRequest$Marker": "

    Use this parameter only when paginating results and only after you receive a response indicating that the results are truncated. Set it to the value of the Marker element in the response that you received to indicate where the next call should start.

    ", + "ListPoliciesResponse$Marker": "

    When IsTruncated is true, this element is present and contains the value to use for the Marker parameter in a subsequent pagination request.

    ", + "ListPolicyVersionsRequest$Marker": "

    Use this parameter only when paginating results and only after you receive a response indicating that the results are truncated. Set it to the value of the Marker element in the response that you received to indicate where the next call should start.

    ", + "ListPolicyVersionsResponse$Marker": "

    When IsTruncated is true, this element is present and contains the value to use for the Marker parameter in a subsequent pagination request.

    ", + "ListRolePoliciesRequest$Marker": "

    Use this parameter only when paginating results and only after you receive a response indicating that the results are truncated. Set it to the value of the Marker element in the response that you received to indicate where the next call should start.

    ", + "ListRolePoliciesResponse$Marker": "

    When IsTruncated is true, this element is present and contains the value to use for the Marker parameter in a subsequent pagination request.

    ", + "ListRolesRequest$Marker": "

    Use this parameter only when paginating results and only after you receive a response indicating that the results are truncated. Set it to the value of the Marker element in the response that you received to indicate where the next call should start.

    ", + "ListRolesResponse$Marker": "

    When IsTruncated is true, this element is present and contains the value to use for the Marker parameter in a subsequent pagination request.

    ", + "ListSSHPublicKeysRequest$Marker": "

    Use this parameter only when paginating results and only after you receive a response indicating that the results are truncated. Set it to the value of the Marker element in the response that you received to indicate where the next call should start.

    ", + "ListSSHPublicKeysResponse$Marker": "

    When IsTruncated is true, this element is present and contains the value to use for the Marker parameter in a subsequent pagination request.

    ", + "ListServerCertificatesRequest$Marker": "

    Use this parameter only when paginating results and only after you receive a response indicating that the results are truncated. Set it to the value of the Marker element in the response that you received to indicate where the next call should start.

    ", + "ListServerCertificatesResponse$Marker": "

    When IsTruncated is true, this element is present and contains the value to use for the Marker parameter in a subsequent pagination request.

    ", + "ListSigningCertificatesRequest$Marker": "

    Use this parameter only when paginating results and only after you receive a response indicating that the results are truncated. Set it to the value of the Marker element in the response that you received to indicate where the next call should start.

    ", + "ListSigningCertificatesResponse$Marker": "

    When IsTruncated is true, this element is present and contains the value to use for the Marker parameter in a subsequent pagination request.

    ", + "ListUserPoliciesRequest$Marker": "

    Use this parameter only when paginating results and only after you receive a response indicating that the results are truncated. Set it to the value of the Marker element in the response that you received to indicate where the next call should start.

    ", + "ListUserPoliciesResponse$Marker": "

    When IsTruncated is true, this element is present and contains the value to use for the Marker parameter in a subsequent pagination request.

    ", + "ListUsersRequest$Marker": "

    Use this parameter only when paginating results and only after you receive a response indicating that the results are truncated. Set it to the value of the Marker element in the response that you received to indicate where the next call should start.

    ", + "ListUsersResponse$Marker": "

    When IsTruncated is true, this element is present and contains the value to use for the Marker parameter in a subsequent pagination request.

    ", + "ListVirtualMFADevicesRequest$Marker": "

    Use this parameter only when paginating results and only after you receive a response indicating that the results are truncated. Set it to the value of the Marker element in the response that you received to indicate where the next call should start.

    ", + "ListVirtualMFADevicesResponse$Marker": "

    When IsTruncated is true, this element is present and contains the value to use for the Marker parameter in a subsequent pagination request.

    ", + "SimulateCustomPolicyRequest$Marker": "

    Use this parameter only when paginating results and only after you receive a response indicating that the results are truncated. Set it to the value of the Marker element in the response that you received to indicate where the next call should start.

    ", + "SimulatePolicyResponse$Marker": "

    When IsTruncated is true, this element is present and contains the value to use for the Marker parameter in a subsequent pagination request.

    ", + "SimulatePrincipalPolicyRequest$Marker": "

    Use this parameter only when paginating results and only after you receive a response indicating that the results are truncated. Set it to the value of the Marker element in the response that you received to indicate where the next call should start.

    " + } + }, + "maxItemsType": { + "base": null, + "refs": { + "GetAccountAuthorizationDetailsRequest$MaxItems": "

    Use this only when paginating results to indicate the maximum number of items you want in the response. If additional items exist beyond the maximum you specify, the IsTruncated response element is true.

    This parameter is optional. If you do not include it, it defaults to 100. Note that IAM might return fewer results, even when there are more results available. In that case, the IsTruncated response element returns true and Marker contains a value to include in the subsequent call that tells the service where to continue from.

    ", + "GetGroupRequest$MaxItems": "

    Use this only when paginating results to indicate the maximum number of items you want in the response. If additional items exist beyond the maximum you specify, the IsTruncated response element is true.

    This parameter is optional. If you do not include it, it defaults to 100. Note that IAM might return fewer results, even when there are more results available. In that case, the IsTruncated response element returns true and Marker contains a value to include in the subsequent call that tells the service where to continue from.

    ", + "ListAccessKeysRequest$MaxItems": "

    Use this only when paginating results to indicate the maximum number of items you want in the response. If additional items exist beyond the maximum you specify, the IsTruncated response element is true.

    This parameter is optional. If you do not include it, it defaults to 100. Note that IAM might return fewer results, even when there are more results available. In that case, the IsTruncated response element returns true and Marker contains a value to include in the subsequent call that tells the service where to continue from.

    ", + "ListAccountAliasesRequest$MaxItems": "

    Use this only when paginating results to indicate the maximum number of items you want in the response. If additional items exist beyond the maximum you specify, the IsTruncated response element is true.

    This parameter is optional. If you do not include it, it defaults to 100. Note that IAM might return fewer results, even when there are more results available. In that case, the IsTruncated response element returns true and Marker contains a value to include in the subsequent call that tells the service where to continue from.

    ", + "ListAttachedGroupPoliciesRequest$MaxItems": "

    Use this only when paginating results to indicate the maximum number of items you want in the response. If additional items exist beyond the maximum you specify, the IsTruncated response element is true.

    This parameter is optional. If you do not include it, it defaults to 100. Note that IAM might return fewer results, even when there are more results available. In that case, the IsTruncated response element returns true and Marker contains a value to include in the subsequent call that tells the service where to continue from.

    ", + "ListAttachedRolePoliciesRequest$MaxItems": "

    Use this only when paginating results to indicate the maximum number of items you want in the response. If additional items exist beyond the maximum you specify, the IsTruncated response element is true.

    This parameter is optional. If you do not include it, it defaults to 100. Note that IAM might return fewer results, even when there are more results available. In that case, the IsTruncated response element returns true and Marker contains a value to include in the subsequent call that tells the service where to continue from.

    ", + "ListAttachedUserPoliciesRequest$MaxItems": "

    Use this only when paginating results to indicate the maximum number of items you want in the response. If additional items exist beyond the maximum you specify, the IsTruncated response element is true.

    This parameter is optional. If you do not include it, it defaults to 100. Note that IAM might return fewer results, even when there are more results available. In that case, the IsTruncated response element returns true and Marker contains a value to include in the subsequent call that tells the service where to continue from.

    ", + "ListEntitiesForPolicyRequest$MaxItems": "

    Use this only when paginating results to indicate the maximum number of items you want in the response. If additional items exist beyond the maximum you specify, the IsTruncated response element is true.

    This parameter is optional. If you do not include it, it defaults to 100. Note that IAM might return fewer results, even when there are more results available. In that case, the IsTruncated response element returns true and Marker contains a value to include in the subsequent call that tells the service where to continue from.

    ", + "ListGroupPoliciesRequest$MaxItems": "

    Use this only when paginating results to indicate the maximum number of items you want in the response. If additional items exist beyond the maximum you specify, the IsTruncated response element is true.

    This parameter is optional. If you do not include it, it defaults to 100. Note that IAM might return fewer results, even when there are more results available. In that case, the IsTruncated response element returns true and Marker contains a value to include in the subsequent call that tells the service where to continue from.

    ", + "ListGroupsForUserRequest$MaxItems": "

    Use this only when paginating results to indicate the maximum number of items you want in the response. If additional items exist beyond the maximum you specify, the IsTruncated response element is true.

    This parameter is optional. If you do not include it, it defaults to 100. Note that IAM might return fewer results, even when there are more results available. In that case, the IsTruncated response element returns true and Marker contains a value to include in the subsequent call that tells the service where to continue from.

    ", + "ListGroupsRequest$MaxItems": "

    Use this only when paginating results to indicate the maximum number of items you want in the response. If additional items exist beyond the maximum you specify, the IsTruncated response element is true.

    This parameter is optional. If you do not include it, it defaults to 100. Note that IAM might return fewer results, even when there are more results available. In that case, the IsTruncated response element returns true and Marker contains a value to include in the subsequent call that tells the service where to continue from.

    ", + "ListInstanceProfilesForRoleRequest$MaxItems": "

    Use this only when paginating results to indicate the maximum number of items you want in the response. If additional items exist beyond the maximum you specify, the IsTruncated response element is true.

    This parameter is optional. If you do not include it, it defaults to 100. Note that IAM might return fewer results, even when there are more results available. In that case, the IsTruncated response element returns true and Marker contains a value to include in the subsequent call that tells the service where to continue from.

    ", + "ListInstanceProfilesRequest$MaxItems": "

    Use this only when paginating results to indicate the maximum number of items you want in the response. If additional items exist beyond the maximum you specify, the IsTruncated response element is true.

    This parameter is optional. If you do not include it, it defaults to 100. Note that IAM might return fewer results, even when there are more results available. In that case, the IsTruncated response element returns true and Marker contains a value to include in the subsequent call that tells the service where to continue from.

    ", + "ListMFADevicesRequest$MaxItems": "

    Use this only when paginating results to indicate the maximum number of items you want in the response. If additional items exist beyond the maximum you specify, the IsTruncated response element is true.

    This parameter is optional. If you do not include it, it defaults to 100. Note that IAM might return fewer results, even when there are more results available. In that case, the IsTruncated response element returns true and Marker contains a value to include in the subsequent call that tells the service where to continue from.

    ", + "ListPoliciesRequest$MaxItems": "

    Use this only when paginating results to indicate the maximum number of items you want in the response. If additional items exist beyond the maximum you specify, the IsTruncated response element is true.

    This parameter is optional. If you do not include it, it defaults to 100. Note that IAM might return fewer results, even when there are more results available. In that case, the IsTruncated response element returns true and Marker contains a value to include in the subsequent call that tells the service where to continue from.

    ", + "ListPolicyVersionsRequest$MaxItems": "

    Use this only when paginating results to indicate the maximum number of items you want in the response. If additional items exist beyond the maximum you specify, the IsTruncated response element is true.

    This parameter is optional. If you do not include it, it defaults to 100. Note that IAM might return fewer results, even when there are more results available. In that case, the IsTruncated response element returns true and Marker contains a value to include in the subsequent call that tells the service where to continue from.

    ", + "ListRolePoliciesRequest$MaxItems": "

    Use this only when paginating results to indicate the maximum number of items you want in the response. If additional items exist beyond the maximum you specify, the IsTruncated response element is true.

    This parameter is optional. If you do not include it, it defaults to 100. Note that IAM might return fewer results, even when there are more results available. In that case, the IsTruncated response element returns true and Marker contains a value to include in the subsequent call that tells the service where to continue from.

    ", + "ListRolesRequest$MaxItems": "

    Use this only when paginating results to indicate the maximum number of items you want in the response. If additional items exist beyond the maximum you specify, the IsTruncated response element is true.

    This parameter is optional. If you do not include it, it defaults to 100. Note that IAM might return fewer results, even when there are more results available. In that case, the IsTruncated response element returns true and Marker contains a value to include in the subsequent call that tells the service where to continue from.

    ", + "ListSSHPublicKeysRequest$MaxItems": "

    Use this only when paginating results to indicate the maximum number of items you want in the response. If additional items exist beyond the maximum you specify, the IsTruncated response element is true.

    This parameter is optional. If you do not include it, it defaults to 100. Note that IAM might return fewer results, even when there are more results available. In that case, the IsTruncated response element returns true and Marker contains a value to include in the subsequent call that tells the service where to continue from.

    ", + "ListServerCertificatesRequest$MaxItems": "

    Use this only when paginating results to indicate the maximum number of items you want in the response. If additional items exist beyond the maximum you specify, the IsTruncated response element is true.

    This parameter is optional. If you do not include it, it defaults to 100. Note that IAM might return fewer results, even when there are more results available. In that case, the IsTruncated response element returns true and Marker contains a value to include in the subsequent call that tells the service where to continue from.

    ", + "ListSigningCertificatesRequest$MaxItems": "

    Use this only when paginating results to indicate the maximum number of items you want in the response. If additional items exist beyond the maximum you specify, the IsTruncated response element is true.

    This parameter is optional. If you do not include it, it defaults to 100. Note that IAM might return fewer results, even when there are more results available. In that case, the IsTruncated response element returns true and Marker contains a value to include in the subsequent call that tells the service where to continue from.

    ", + "ListUserPoliciesRequest$MaxItems": "

    Use this only when paginating results to indicate the maximum number of items you want in the response. If additional items exist beyond the maximum you specify, the IsTruncated response element is true.

    This parameter is optional. If you do not include it, it defaults to 100. Note that IAM might return fewer results, even when there are more results available. In that case, the IsTruncated response element returns true and Marker contains a value to include in the subsequent call that tells the service where to continue from.

    ", + "ListUsersRequest$MaxItems": "

    Use this only when paginating results to indicate the maximum number of items you want in the response. If additional items exist beyond the maximum you specify, the IsTruncated response element is true.

    This parameter is optional. If you do not include it, it defaults to 100. Note that IAM might return fewer results, even when there are more results available. In that case, the IsTruncated response element returns true and Marker contains a value to include in the subsequent call that tells the service where to continue from.

    ", + "ListVirtualMFADevicesRequest$MaxItems": "

    Use this only when paginating results to indicate the maximum number of items you want in the response. If additional items exist beyond the maximum you specify, the IsTruncated response element is true.

    This parameter is optional. If you do not include it, it defaults to 100. Note that IAM might return fewer results, even when there are more results available. In that case, the IsTruncated response element returns true and Marker contains a value to include in the subsequent call that tells the service where to continue from.

    ", + "SimulateCustomPolicyRequest$MaxItems": "

    Use this only when paginating results to indicate the maximum number of items you want in the response. If additional items exist beyond the maximum you specify, the IsTruncated response element is true.

    This parameter is optional. If you do not include it, it defaults to 100. Note that IAM might return fewer results, even when there are more results available. In that case, the IsTruncated response element returns true and Marker contains a value to include in the subsequent call that tells the service where to continue from.

    ", + "SimulatePrincipalPolicyRequest$MaxItems": "

    Use this only when paginating results to indicate the maximum number of items you want in the response. If additional items exist beyond the maximum you specify, the IsTruncated response element is true.

    This parameter is optional. If you do not include it, it defaults to 100. Note that IAM might return fewer results, even when there are more results available. In that case, the IsTruncated response element returns true and Marker contains a value to include in the subsequent call that tells the service where to continue from.

    " + } + }, + "maxPasswordAgeType": { + "base": null, + "refs": { + "PasswordPolicy$MaxPasswordAge": "

    The number of days that an IAM user password is valid.

    ", + "UpdateAccountPasswordPolicyRequest$MaxPasswordAge": "

    The number of days that an IAM user password is valid. The default value of 0 means IAM user passwords never expire.

    Default value: 0

    " + } + }, + "mfaDeviceListType": { + "base": "

    Contains a list of MFA devices.

    This data type is used as a response element in the ListMFADevices and ListVirtualMFADevices actions.

    ", + "refs": { + "ListMFADevicesResponse$MFADevices": "

    A list of MFA devices.

    " + } + }, + "minimumPasswordLengthType": { + "base": null, + "refs": { + "PasswordPolicy$MinimumPasswordLength": "

    Minimum length to require for IAM user passwords.

    ", + "UpdateAccountPasswordPolicyRequest$MinimumPasswordLength": "

    The minimum number of characters allowed in an IAM user password.

    Default value: 6

    " + } + }, + "noSuchEntityMessage": { + "base": null, + "refs": { + "NoSuchEntityException$message": null + } + }, + "passwordPolicyViolationMessage": { + "base": null, + "refs": { + "PasswordPolicyViolationException$message": null + } + }, + "passwordReusePreventionType": { + "base": null, + "refs": { + "PasswordPolicy$PasswordReusePrevention": "

    Specifies the number of previous passwords that IAM users are prevented from reusing.

    ", + "UpdateAccountPasswordPolicyRequest$PasswordReusePrevention": "

    Specifies the number of previous passwords that IAM users are prevented from reusing. The default value of 0 means IAM users are not prevented from reusing previous passwords.

    Default value: 0

    " + } + }, + "passwordType": { + "base": null, + "refs": { + "ChangePasswordRequest$OldPassword": "

    The IAM user's current password.

    ", + "ChangePasswordRequest$NewPassword": "

    The new password. The new password must conform to the AWS account's password policy, if one exists.

    The regex pattern for this parameter is a string of characters consisting of almost any printable ASCII character from the space (\\u0020) through the end of the ASCII character range (\\u00FF). You can also include the tab (\\u0009), line feed (\\u000A), and carriage return (\\u000D) characters. Although any of these characters are valid in a password, note that many tools, such as the AWS Management Console, might restrict the ability to enter certain characters because they have special meaning within that tool.

    ", + "CreateLoginProfileRequest$Password": "

    The new password for the user.

    The regex pattern for this parameter is a string of characters consisting of almost any printable ASCII character from the space (\\u0020) through the end of the ASCII character range (\\u00FF). You can also include the tab (\\u0009), line feed (\\u000A), and carriage return (\\u000D) characters. Although any of these characters are valid in a password, note that many tools, such as the AWS Management Console, might restrict the ability to enter certain characters because they have special meaning within that tool.

    ", + "UpdateLoginProfileRequest$Password": "

    The new password for the specified IAM user.

    The regex pattern for this parameter is a string of characters consisting of any printable ASCII character ranging from the space character (\\u0020) through end of the ASCII character range (\\u00FF). It also includes the special characters tab (\\u0009), line feed (\\u000A), and carriage return (\\u000D). However, the format can be further restricted by the account administrator by setting a password policy on the AWS account. For more information, see UpdateAccountPasswordPolicy.

    " + } + }, + "pathPrefixType": { + "base": null, + "refs": { + "ListGroupsRequest$PathPrefix": "

    The path prefix for filtering the results. For example, the prefix /division_abc/subdivision_xyz/ gets all groups whose path starts with /division_abc/subdivision_xyz/.

    This parameter is optional. If it is not included, it defaults to a slash (/), listing all groups. The regex pattern for this parameter is a string of characters consisting of either a forward slash (/) by itself or a string that must begin and end with forward slashes, containing any ASCII character from the ! (\\u0021) thru the DEL character (\\u007F), including most punctuation characters, digits, and upper and lowercased letters.

    ", + "ListInstanceProfilesRequest$PathPrefix": "

    The path prefix for filtering the results. For example, the prefix /application_abc/component_xyz/ gets all instance profiles whose path starts with /application_abc/component_xyz/.

    This parameter is optional. If it is not included, it defaults to a slash (/), listing all instance profiles. The regex pattern for this parameter is a string of characters consisting of either a forward slash (/) by itself or a string that must begin and end with forward slashes, containing any ASCII character from the ! (\\u0021) thru the DEL character (\\u007F), including most punctuation characters, digits, and upper and lowercased letters.

    ", + "ListRolesRequest$PathPrefix": "

    The path prefix for filtering the results. For example, the prefix /application_abc/component_xyz/ gets all roles whose path starts with /application_abc/component_xyz/.

    This parameter is optional. If it is not included, it defaults to a slash (/), listing all roles. The regex pattern for this parameter is a string of characters consisting of either a forward slash (/) by itself or a string that must begin and end with forward slashes, containing any ASCII character from the ! (\\u0021) thru the DEL character (\\u007F), including most punctuation characters, digits, and upper and lowercased letters.

    ", + "ListServerCertificatesRequest$PathPrefix": "

    The path prefix for filtering the results. For example: /company/servercerts would get all server certificates for which the path starts with /company/servercerts.

    This parameter is optional. If it is not included, it defaults to a slash (/), listing all server certificates. The regex pattern for this parameter is a string of characters consisting of either a forward slash (/) by itself or a string that must begin and end with forward slashes, containing any ASCII character from the ! (\\u0021) thru the DEL character (\\u007F), including most punctuation characters, digits, and upper and lowercased letters.

    ", + "ListUsersRequest$PathPrefix": "

    The path prefix for filtering the results. For example: /division_abc/subdivision_xyz/, which would get all user names whose path starts with /division_abc/subdivision_xyz/.

    This parameter is optional. If it is not included, it defaults to a slash (/), listing all user names. The regex pattern for this parameter is a string of characters consisting of either a forward slash (/) by itself or a string that must begin and end with forward slashes, containing any ASCII character from the ! (\\u0021) thru the DEL character (\\u007F), including most punctuation characters, digits, and upper and lowercased letters.

    " + } + }, + "pathType": { + "base": null, + "refs": { + "CreateGroupRequest$Path": "

    The path to the group. For more information about paths, see IAM Identifiers in the IAM User Guide.

    This parameter is optional. If it is not included, it defaults to a slash (/).

    The regex pattern for this parameter is a string of characters consisting of either a forward slash (/) by itself or a string that must begin and end with forward slashes, containing any ASCII character from the ! (\\u0021) thru the DEL character (\\u007F), including most punctuation characters, digits, and upper and lowercased letters.

    ", + "CreateInstanceProfileRequest$Path": "

    The path to the instance profile. For more information about paths, see IAM Identifiers in the IAM User Guide.

    This parameter is optional. If it is not included, it defaults to a slash (/).

    The regex pattern for this parameter is a string of characters consisting of either a forward slash (/) by itself or a string that must begin and end with forward slashes, containing any ASCII character from the ! (\\u0021) thru the DEL character (\\u007F), including most punctuation characters, digits, and upper and lowercased letters.

    ", + "CreateRoleRequest$Path": "

    The path to the role. For more information about paths, see IAM Identifiers in the IAM User Guide.

    This parameter is optional. If it is not included, it defaults to a slash (/).

    The regex pattern for this parameter is a string of characters consisting of either a forward slash (/) by itself or a string that must begin and end with forward slashes, containing any ASCII character from the ! (\\u0021) thru the DEL character (\\u007F), including most punctuation characters, digits, and upper and lowercased letters.

    ", + "CreateUserRequest$Path": "

    The path for the user name. For more information about paths, see IAM Identifiers in the IAM User Guide.

    This parameter is optional. If it is not included, it defaults to a slash (/).

    The regex pattern for this parameter is a string of characters consisting of either a forward slash (/) by itself or a string that must begin and end with forward slashes, containing any ASCII character from the ! (\\u0021) thru the DEL character (\\u007F), including most punctuation characters, digits, and upper and lowercased letters.

    ", + "CreateVirtualMFADeviceRequest$Path": "

    The path for the virtual MFA device. For more information about paths, see IAM Identifiers in the IAM User Guide.

    This parameter is optional. If it is not included, it defaults to a slash (/).

    The regex pattern for this parameter is a string of characters consisting of either a forward slash (/) by itself or a string that must begin and end with forward slashes, containing any ASCII character from the ! (\\u0021) thru the DEL character (\\u007F), including most punctuation characters, digits, and upper and lowercased letters.

    ", + "Group$Path": "

    The path to the group. For more information about paths, see IAM Identifiers in the Using IAM guide.

    ", + "GroupDetail$Path": "

    The path to the group. For more information about paths, see IAM Identifiers in the Using IAM guide.

    ", + "InstanceProfile$Path": "

    The path to the instance profile. For more information about paths, see IAM Identifiers in the Using IAM guide.

    ", + "ListEntitiesForPolicyRequest$PathPrefix": "

    The path prefix for filtering the results. This parameter is optional. If it is not included, it defaults to a slash (/), listing all entities.

    The regex pattern for this parameter is a string of characters consisting of either a forward slash (/) by itself or a string that must begin and end with forward slashes, containing any ASCII character from the ! (\\u0021) thru the DEL character (\\u007F), including most punctuation characters, digits, and upper and lowercased letters.

    ", + "Role$Path": "

    The path to the role. For more information about paths, see IAM Identifiers in the Using IAM guide.

    ", + "RoleDetail$Path": "

    The path to the role. For more information about paths, see IAM Identifiers in the Using IAM guide.

    ", + "ServerCertificateMetadata$Path": "

    The path to the server certificate. For more information about paths, see IAM Identifiers in the Using IAM guide.

    ", + "UpdateGroupRequest$NewPath": "

    New path for the IAM group. Only include this if changing the group's path.

    The regex pattern for this parameter is a string of characters consisting of either a forward slash (/) by itself or a string that must begin and end with forward slashes, containing any ASCII character from the ! (\\u0021) thru the DEL character (\\u007F), including most punctuation characters, digits, and upper and lowercased letters.

    ", + "UpdateServerCertificateRequest$NewPath": "

    The new path for the server certificate. Include this only if you are updating the server certificate's path.

    The regex pattern for this parameter is a string of characters consisting of either a forward slash (/) by itself or a string that must begin and end with forward slashes, containing any ASCII character from the ! (\\u0021) thru the DEL character (\\u007F), including most punctuation characters, digits, and upper and lowercased letters.

    ", + "UpdateUserRequest$NewPath": "

    New path for the IAM user. Include this parameter only if you're changing the user's path.

    The regex pattern for this parameter is a string of characters consisting of either a forward slash (/) by itself or a string that must begin and end with forward slashes, containing any ASCII character from the ! (\\u0021) thru the DEL character (\\u007F), including most punctuation characters, digits, and upper and lowercased letters.

    ", + "UploadServerCertificateRequest$Path": "

    The path for the server certificate. For more information about paths, see IAM Identifiers in the IAM User Guide.

    This parameter is optional. If it is not included, it defaults to a slash (/). The regex pattern for this parameter is a string of characters consisting of either a forward slash (/) by itself or a string that must begin and end with forward slashes, containing any ASCII character from the ! (\\u0021) thru the DEL character (\\u007F), including most punctuation characters, digits, and upper and lowercased letters.

    If you are uploading a server certificate specifically for use with Amazon CloudFront distributions, you must specify a path using the --path option. The path must begin with /cloudfront and must include a trailing slash (for example, /cloudfront/test/).

    ", + "User$Path": "

    The path to the user. For more information about paths, see IAM Identifiers in the Using IAM guide.

    ", + "UserDetail$Path": "

    The path to the user. For more information about paths, see IAM Identifiers in the Using IAM guide.

    " + } + }, + "policyDescriptionType": { + "base": null, + "refs": { + "CreatePolicyRequest$Description": "

    A friendly description of the policy.

    Typically used to store information about the permissions defined in the policy. For example, \"Grants access to production DynamoDB tables.\"

    The policy description is immutable. After a value is assigned, it cannot be changed.

    ", + "ManagedPolicyDetail$Description": "

    A friendly description of the policy.

    ", + "Policy$Description": "

    A friendly description of the policy.

    This element is included in the response to the GetPolicy operation. It is not included in the response to the ListPolicies operation.

    " + } + }, + "policyDetailListType": { + "base": null, + "refs": { + "GroupDetail$GroupPolicyList": "

    A list of the inline policies embedded in the group.

    ", + "RoleDetail$RolePolicyList": "

    A list of inline policies embedded in the role. These policies are the role's access (permissions) policies.

    ", + "UserDetail$UserPolicyList": "

    A list of the inline policies embedded in the user.

    " + } + }, + "policyDocumentType": { + "base": null, + "refs": { + "CreatePolicyRequest$PolicyDocument": "

    The JSON policy document that you want to use as the content for the new policy.

    The regex pattern for this parameter is a string of characters consisting of any printable ASCII character ranging from the space character (\\u0020) through end of the ASCII character range (\\u00FF). It also includes the special characters tab (\\u0009), line feed (\\u000A), and carriage return (\\u000D).

    ", + "CreatePolicyVersionRequest$PolicyDocument": "

    The JSON policy document that you want to use as the content for this new version of the policy.

    The regex pattern for this parameter is a string of characters consisting of any printable ASCII character ranging from the space character (\\u0020) through end of the ASCII character range (\\u00FF). It also includes the special characters tab (\\u0009), line feed (\\u000A), and carriage return (\\u000D).

    ", + "CreateRoleRequest$AssumeRolePolicyDocument": "

    The trust relationship policy document that grants an entity permission to assume the role.

    The regex pattern for this parameter is a string of characters consisting of any printable ASCII character ranging from the space character (\\u0020) through end of the ASCII character range (\\u00FF). It also includes the special characters tab (\\u0009), line feed (\\u000A), and carriage return (\\u000D).

    ", + "GetGroupPolicyResponse$PolicyDocument": "

    The policy document.

    ", + "GetRolePolicyResponse$PolicyDocument": "

    The policy document.

    ", + "GetUserPolicyResponse$PolicyDocument": "

    The policy document.

    ", + "PolicyDetail$PolicyDocument": "

    The policy document.

    ", + "PolicyVersion$Document": "

    The policy document.

    The policy document is returned in the response to the GetPolicyVersion and GetAccountAuthorizationDetails operations. It is not returned in the response to the CreatePolicyVersion or ListPolicyVersions operations.

    ", + "PutGroupPolicyRequest$PolicyDocument": "

    The policy document.

    The regex pattern for this parameter is a string of characters consisting of any printable ASCII character ranging from the space character (\\u0020) through end of the ASCII character range (\\u00FF). It also includes the special characters tab (\\u0009), line feed (\\u000A), and carriage return (\\u000D).

    ", + "PutRolePolicyRequest$PolicyDocument": "

    The policy document.

    The regex pattern for this parameter is a string of characters consisting of any printable ASCII character ranging from the space character (\\u0020) through end of the ASCII character range (\\u00FF). It also includes the special characters tab (\\u0009), line feed (\\u000A), and carriage return (\\u000D).

    ", + "PutUserPolicyRequest$PolicyDocument": "

    The policy document.

    The regex pattern for this parameter is a string of characters consisting of any printable ASCII character ranging from the space character (\\u0020) through end of the ASCII character range (\\u00FF). It also includes the special characters tab (\\u0009), line feed (\\u000A), and carriage return (\\u000D).

    ", + "Role$AssumeRolePolicyDocument": "

    The policy that grants an entity permission to assume the role.

    ", + "RoleDetail$AssumeRolePolicyDocument": "

    The trust policy that grants permission to assume the role.

    ", + "SimulateCustomPolicyRequest$ResourcePolicy": "

    A resource-based policy to include in the simulation provided as a string. Each resource in the simulation is treated as if it had this policy attached. You can include only one resource-based policy in a simulation.

    The regex pattern for this parameter is a string of characters consisting of any printable ASCII character ranging from the space character (\\u0020) through end of the ASCII character range (\\u00FF). It also includes the special characters tab (\\u0009), line feed (\\u000A), and carriage return (\\u000D).

    ", + "SimulatePrincipalPolicyRequest$ResourcePolicy": "

    A resource-based policy to include in the simulation provided as a string. Each resource in the simulation is treated as if it had this policy attached. You can include only one resource-based policy in a simulation.

    The regex pattern for this parameter is a string of characters consisting of any printable ASCII character ranging from the space character (\\u0020) through end of the ASCII character range (\\u00FF). It also includes the special characters tab (\\u0009), line feed (\\u000A), and carriage return (\\u000D).

    ", + "SimulationPolicyListType$member": null, + "UpdateAssumeRolePolicyRequest$PolicyDocument": "

    The policy that grants an entity permission to assume the role.

    The regex pattern for this parameter is a string of characters consisting of any printable ASCII character ranging from the space character (\\u0020) through end of the ASCII character range (\\u00FF). It also includes the special characters tab (\\u0009), line feed (\\u000A), and carriage return (\\u000D).

    " + } + }, + "policyDocumentVersionListType": { + "base": null, + "refs": { + "ListPolicyVersionsResponse$Versions": "

    A list of policy versions.

    For more information about managed policy versions, see Versioning for Managed Policies in the IAM User Guide.

    ", + "ManagedPolicyDetail$PolicyVersionList": "

    A list containing information about the versions of the policy.

    " + } + }, + "policyEvaluationErrorMessage": { + "base": null, + "refs": { + "PolicyEvaluationException$message": null + } + }, + "policyListType": { + "base": null, + "refs": { + "ListPoliciesResponse$Policies": "

    A list of policies.

    " + } + }, + "policyNameListType": { + "base": "

    Contains a list of policy names.

    This data type is used as a response element in the ListPolicies action.

    ", + "refs": { + "ListGroupPoliciesResponse$PolicyNames": "

    A list of policy names.

    ", + "ListRolePoliciesResponse$PolicyNames": "

    A list of policy names.

    ", + "ListUserPoliciesResponse$PolicyNames": "

    A list of policy names.

    " + } + }, + "policyNameType": { + "base": null, + "refs": { + "AttachedPolicy$PolicyName": "

    The friendly name of the attached policy.

    ", + "CreatePolicyRequest$PolicyName": "

    The friendly name of the policy.

    The regex pattern for this parameter is a string of characters consisting of upper and lowercase alphanumeric characters with no spaces. You can also include any of the following characters: =,.@-

    ", + "DeleteGroupPolicyRequest$PolicyName": "

    The name identifying the policy document to delete.

    The regex pattern for this parameter is a string of characters consisting of upper and lowercase alphanumeric characters with no spaces. You can also include any of the following characters: =,.@-

    ", + "DeleteRolePolicyRequest$PolicyName": "

    The name of the inline policy to delete from the specified IAM role.

    The regex pattern for this parameter is a string of characters consisting of upper and lowercase alphanumeric characters with no spaces. You can also include any of the following characters: =,.@-

    ", + "DeleteUserPolicyRequest$PolicyName": "

    The name identifying the policy document to delete.

    The regex pattern for this parameter is a string of characters consisting of upper and lowercase alphanumeric characters with no spaces. You can also include any of the following characters: =,.@-

    ", + "GetGroupPolicyRequest$PolicyName": "

    The name of the policy document to get.

    The regex pattern for this parameter is a string of characters consisting of upper and lowercase alphanumeric characters with no spaces. You can also include any of the following characters: =,.@-

    ", + "GetGroupPolicyResponse$PolicyName": "

    The name of the policy.

    ", + "GetRolePolicyRequest$PolicyName": "

    The name of the policy document to get.

    The regex pattern for this parameter is a string of characters consisting of upper and lowercase alphanumeric characters with no spaces. You can also include any of the following characters: =,.@-

    ", + "GetRolePolicyResponse$PolicyName": "

    The name of the policy.

    ", + "GetUserPolicyRequest$PolicyName": "

    The name of the policy document to get.

    The regex pattern for this parameter is a string of characters consisting of upper and lowercase alphanumeric characters with no spaces. You can also include any of the following characters: =,.@-

    ", + "GetUserPolicyResponse$PolicyName": "

    The name of the policy.

    ", + "ManagedPolicyDetail$PolicyName": "

    The friendly name (not ARN) identifying the policy.

    ", + "Policy$PolicyName": "

    The friendly name (not ARN) identifying the policy.

    ", + "PolicyDetail$PolicyName": "

    The name of the policy.

    ", + "PutGroupPolicyRequest$PolicyName": "

    The name of the policy document.

    The regex pattern for this parameter is a string of characters consisting of upper and lowercase alphanumeric characters with no spaces. You can also include any of the following characters: =,.@-

    ", + "PutRolePolicyRequest$PolicyName": "

    The name of the policy document.

    The regex pattern for this parameter is a string of characters consisting of upper and lowercase alphanumeric characters with no spaces. You can also include any of the following characters: =,.@-

    ", + "PutUserPolicyRequest$PolicyName": "

    The name of the policy document.

    The regex pattern for this parameter is a string of characters consisting of upper and lowercase alphanumeric characters with no spaces. You can also include any of the following characters: =,.@-

    ", + "policyNameListType$member": null + } + }, + "policyPathType": { + "base": null, + "refs": { + "CreatePolicyRequest$Path": "

    The path for the policy.

    For more information about paths, see IAM Identifiers in the IAM User Guide.

    This parameter is optional. If it is not included, it defaults to a slash (/).

    The regex pattern for this parameter is a string of characters consisting of either a forward slash (/) by itself or a string that must begin and end with forward slashes, containing any ASCII character from the ! (\\u0021) thru the DEL character (\\u007F), including most punctuation characters, digits, and upper and lowercased letters.

    ", + "ListAttachedGroupPoliciesRequest$PathPrefix": "

    The path prefix for filtering the results. This parameter is optional. If it is not included, it defaults to a slash (/), listing all policies.

    The regex pattern for this parameter is a string of characters consisting of either a forward slash (/) by itself or a string that must begin and end with forward slashes, containing any ASCII character from the ! (\\u0021) thru the DEL character (\\u007F), including most punctuation characters, digits, and upper and lowercased letters.

    ", + "ListAttachedRolePoliciesRequest$PathPrefix": "

    The path prefix for filtering the results. This parameter is optional. If it is not included, it defaults to a slash (/), listing all policies.

    The regex pattern for this parameter is a string of characters consisting of either a forward slash (/) by itself or a string that must begin and end with forward slashes, containing any ASCII character from the ! (\\u0021) thru the DEL character (\\u007F), including most punctuation characters, digits, and upper and lowercased letters.

    ", + "ListAttachedUserPoliciesRequest$PathPrefix": "

    The path prefix for filtering the results. This parameter is optional. If it is not included, it defaults to a slash (/), listing all policies.

    The regex pattern for this parameter is a string of characters consisting of either a forward slash (/) by itself or a string that must begin and end with forward slashes, containing any ASCII character from the ! (\\u0021) thru the DEL character (\\u007F), including most punctuation characters, digits, and upper and lowercased letters.

    ", + "ListPoliciesRequest$PathPrefix": "

    The path prefix for filtering the results. This parameter is optional. If it is not included, it defaults to a slash (/), listing all policies. The regex pattern for this parameter is a string of characters consisting of either a forward slash (/) by itself or a string that must begin and end with forward slashes, containing any ASCII character from the ! (\\u0021) thru the DEL character (\\u007F), including most punctuation characters, digits, and upper and lowercased letters.

    ", + "ManagedPolicyDetail$Path": "

    The path to the policy.

    For more information about paths, see IAM Identifiers in the Using IAM guide.

    ", + "Policy$Path": "

    The path to the policy.

    For more information about paths, see IAM Identifiers in the Using IAM guide.

    " + } + }, + "policyScopeType": { + "base": null, + "refs": { + "ListPoliciesRequest$Scope": "

    The scope to use for filtering the results.

    To list only AWS managed policies, set Scope to AWS. To list only the customer managed policies in your AWS account, set Scope to Local.

    This parameter is optional. If it is not included, or if it is set to All, all policies are returned.

    " + } + }, + "policyVersionIdType": { + "base": null, + "refs": { + "DeletePolicyVersionRequest$VersionId": "

    The policy version to delete.

    The regex pattern for this parameter is a string of characters that consists of the lowercase letter 'v' followed by one or two digits, and optionally followed by a period '.' and a string of letters and digits.

    For more information about managed policy versions, see Versioning for Managed Policies in the IAM User Guide.

    ", + "GetPolicyVersionRequest$VersionId": "

    Identifies the policy version to retrieve.

    The regex pattern for this parameter is a string of characters that consists of the lowercase letter 'v' followed by one or two digits, and optionally followed by a period '.' and a string of letters and digits.

    ", + "ManagedPolicyDetail$DefaultVersionId": "

    The identifier for the version of the policy that is set as the default (operative) version.

    For more information about policy versions, see Versioning for Managed Policies in the Using IAM guide.

    ", + "Policy$DefaultVersionId": "

    The identifier for the version of the policy that is set as the default version.

    ", + "PolicyVersion$VersionId": "

    The identifier for the policy version.

    Policy version identifiers always begin with v (always lowercase). When a policy is created, the first policy version is v1.

    ", + "SetDefaultPolicyVersionRequest$VersionId": "

    The version of the policy to set as the default (operative) version.

    For more information about managed policy versions, see Versioning for Managed Policies in the IAM User Guide.

    " + } + }, + "privateKeyType": { + "base": null, + "refs": { + "UploadServerCertificateRequest$PrivateKey": "

    The contents of the private key in PEM-encoded format.

    The regex pattern for this parameter is a string of characters consisting of any printable ASCII character ranging from the space character (\\u0020) through end of the ASCII character range (\\u00FF). It also includes the special characters tab (\\u0009), line feed (\\u000A), and carriage return (\\u000D).

    " + } + }, + "publicKeyFingerprintType": { + "base": null, + "refs": { + "SSHPublicKey$Fingerprint": "

    The MD5 message digest of the SSH public key.

    " + } + }, + "publicKeyIdType": { + "base": null, + "refs": { + "DeleteSSHPublicKeyRequest$SSHPublicKeyId": "

    The unique identifier for the SSH public key.

    The regex pattern for this parameter is a string of characters that can consist of any upper or lowercased letter or digit.

    ", + "GetSSHPublicKeyRequest$SSHPublicKeyId": "

    The unique identifier for the SSH public key.

    The regex pattern for this parameter is a string of characters that can consist of any upper or lowercased letter or digit.

    ", + "SSHPublicKey$SSHPublicKeyId": "

    The unique identifier for the SSH public key.

    ", + "SSHPublicKeyMetadata$SSHPublicKeyId": "

    The unique identifier for the SSH public key.

    ", + "UpdateSSHPublicKeyRequest$SSHPublicKeyId": "

    The unique identifier for the SSH public key.

    The regex pattern for this parameter is a string of characters that can consist of any upper or lowercased letter or digit.

    " + } + }, + "publicKeyMaterialType": { + "base": null, + "refs": { + "SSHPublicKey$SSHPublicKeyBody": "

    The SSH public key.

    ", + "UploadSSHPublicKeyRequest$SSHPublicKeyBody": "

    The SSH public key. The public key must be encoded in ssh-rsa format or PEM format.

    The regex pattern for this parameter is a string of characters consisting of any printable ASCII character ranging from the space character (\\u0020) through end of the ASCII character range (\\u00FF). It also includes the special characters tab (\\u0009), line feed (\\u000A), and carriage return (\\u000D).

    " + } + }, + "roleDetailListType": { + "base": null, + "refs": { + "GetAccountAuthorizationDetailsResponse$RoleDetailList": "

    A list containing information about IAM roles.

    " + } + }, + "roleListType": { + "base": "

    Contains a list of IAM roles.

    This data type is used as a response element in the ListRoles action.

    ", + "refs": { + "InstanceProfile$Roles": "

    The role associated with the instance profile.

    ", + "ListRolesResponse$Roles": "

    A list of roles.

    " + } + }, + "roleNameType": { + "base": null, + "refs": { + "AddRoleToInstanceProfileRequest$RoleName": "

    The name of the role to add.

    The regex pattern for this parameter is a string of characters consisting of upper and lowercase alphanumeric characters with no spaces. You can also include any of the following characters: =,.@-

    ", + "AttachRolePolicyRequest$RoleName": "

    The name (friendly name, not ARN) of the role to attach the policy to.

    The regex pattern for this parameter is a string of characters consisting of upper and lowercase alphanumeric characters with no spaces. You can also include any of the following characters: =,.@-

    ", + "CreateRoleRequest$RoleName": "

    The name of the role to create.

    The regex pattern for this parameter is a string of characters consisting of upper and lowercase alphanumeric characters with no spaces. You can also include any of the following characters: =,.@-

    ", + "DeleteRolePolicyRequest$RoleName": "

    The name (friendly name, not ARN) identifying the role that the policy is embedded in.

    The regex pattern for this parameter is a string of characters consisting of upper and lowercase alphanumeric characters with no spaces. You can also include any of the following characters: =,.@-

    ", + "DeleteRoleRequest$RoleName": "

    The name of the role to delete.

    The regex pattern for this parameter is a string of characters consisting of upper and lowercase alphanumeric characters with no spaces. You can also include any of the following characters: =,.@-

    ", + "DetachRolePolicyRequest$RoleName": "

    The name (friendly name, not ARN) of the IAM role to detach the policy from.

    The regex pattern for this parameter is a string of characters consisting of upper and lowercase alphanumeric characters with no spaces. You can also include any of the following characters: =,.@-

    ", + "GetRolePolicyRequest$RoleName": "

    The name of the role associated with the policy.

    The regex pattern for this parameter is a string of characters consisting of upper and lowercase alphanumeric characters with no spaces. You can also include any of the following characters: =,.@-

    ", + "GetRolePolicyResponse$RoleName": "

    The role the policy is associated with.

    ", + "GetRoleRequest$RoleName": "

    The name of the IAM role to get information about.

    The regex pattern for this parameter is a string of characters consisting of upper and lowercase alphanumeric characters with no spaces. You can also include any of the following characters: =,.@-

    ", + "ListAttachedRolePoliciesRequest$RoleName": "

    The name (friendly name, not ARN) of the role to list attached policies for.

    The regex pattern for this parameter is a string of characters consisting of upper and lowercase alphanumeric characters with no spaces. You can also include any of the following characters: =,.@-

    ", + "ListInstanceProfilesForRoleRequest$RoleName": "

    The name of the role to list instance profiles for.

    The regex pattern for this parameter is a string of characters consisting of upper and lowercase alphanumeric characters with no spaces. You can also include any of the following characters: =,.@-

    ", + "ListRolePoliciesRequest$RoleName": "

    The name of the role to list policies for.

    The regex pattern for this parameter is a string of characters consisting of upper and lowercase alphanumeric characters with no spaces. You can also include any of the following characters: =,.@-

    ", + "PolicyRole$RoleName": "

    The name (friendly name, not ARN) identifying the role.

    ", + "PutRolePolicyRequest$RoleName": "

    The name of the role to associate the policy with.

    The regex pattern for this parameter is a string of characters consisting of upper and lowercase alphanumeric characters with no spaces. You can also include any of the following characters: =,.@-

    ", + "RemoveRoleFromInstanceProfileRequest$RoleName": "

    The name of the role to remove.

    The regex pattern for this parameter is a string of characters consisting of upper and lowercase alphanumeric characters with no spaces. You can also include any of the following characters: =,.@-

    ", + "Role$RoleName": "

    The friendly name that identifies the role.

    ", + "RoleDetail$RoleName": "

    The friendly name that identifies the role.

    ", + "UpdateAssumeRolePolicyRequest$RoleName": "

    The name of the role to update with the new policy.

    The regex pattern for this parameter is a string of characters consisting of upper and lowercase alphanumeric characters with no spaces. You can also include any of the following characters: =,.@-

    " + } + }, + "serialNumberType": { + "base": null, + "refs": { + "DeactivateMFADeviceRequest$SerialNumber": "

    The serial number that uniquely identifies the MFA device. For virtual MFA devices, the serial number is the device ARN.

    The regex pattern for this parameter is a string of characters consisting of upper and lowercase alphanumeric characters with no spaces. You can also include any of the following characters: =/:,.@-

    ", + "DeleteVirtualMFADeviceRequest$SerialNumber": "

    The serial number that uniquely identifies the MFA device. For virtual MFA devices, the serial number is the same as the ARN.

    The regex pattern for this parameter is a string of characters consisting of upper and lowercase alphanumeric characters with no spaces. You can also include any of the following characters: =/:,.@-

    ", + "EnableMFADeviceRequest$SerialNumber": "

    The serial number that uniquely identifies the MFA device. For virtual MFA devices, the serial number is the device ARN.

    The regex pattern for this parameter is a string of characters consisting of upper and lowercase alphanumeric characters with no spaces. You can also include any of the following characters: =/:,.@-

    ", + "MFADevice$SerialNumber": "

    The serial number that uniquely identifies the MFA device. For virtual MFA devices, the serial number is the device ARN.

    ", + "ResyncMFADeviceRequest$SerialNumber": "

    Serial number that uniquely identifies the MFA device.

    The regex pattern for this parameter is a string of characters consisting of upper and lowercase alphanumeric characters with no spaces. You can also include any of the following characters: =,.@-

    ", + "VirtualMFADevice$SerialNumber": "

    The serial number associated with VirtualMFADevice.

    " + } + }, + "serverCertificateMetadataListType": { + "base": null, + "refs": { + "ListServerCertificatesResponse$ServerCertificateMetadataList": "

    A list of server certificates.

    " + } + }, + "serverCertificateNameType": { + "base": null, + "refs": { + "DeleteServerCertificateRequest$ServerCertificateName": "

    The name of the server certificate you want to delete.

    The regex pattern for this parameter is a string of characters consisting of upper and lowercase alphanumeric characters with no spaces. You can also include any of the following characters: =,.@-

    ", + "GetServerCertificateRequest$ServerCertificateName": "

    The name of the server certificate you want to retrieve information about.

    The regex pattern for this parameter is a string of characters consisting of upper and lowercase alphanumeric characters with no spaces. You can also include any of the following characters: =,.@-

    ", + "ServerCertificateMetadata$ServerCertificateName": "

    The name that identifies the server certificate.

    ", + "UpdateServerCertificateRequest$ServerCertificateName": "

    The name of the server certificate that you want to update.

    The regex pattern for this parameter is a string of characters consisting of upper and lowercase alphanumeric characters with no spaces. You can also include any of the following characters: =,.@-

    ", + "UpdateServerCertificateRequest$NewServerCertificateName": "

    The new name for the server certificate. Include this only if you are updating the server certificate's name. The name of the certificate cannot contain any spaces.

    The regex pattern for this parameter is a string of characters consisting of upper and lowercase alphanumeric characters with no spaces. You can also include any of the following characters: =,.@-

    ", + "UploadServerCertificateRequest$ServerCertificateName": "

    The name for the server certificate. Do not include the path in this value. The name of the certificate cannot contain any spaces.

    The regex pattern for this parameter is a string of characters consisting of upper and lowercase alphanumeric characters with no spaces. You can also include any of the following characters: =,.@-

    " + } + }, + "serviceFailureExceptionMessage": { + "base": null, + "refs": { + "ServiceFailureException$message": null + } + }, + "statusType": { + "base": null, + "refs": { + "AccessKey$Status": "

    The status of the access key. Active means the key is valid for API calls, while Inactive means it is not.

    ", + "AccessKeyMetadata$Status": "

    The status of the access key. Active means the key is valid for API calls; Inactive means it is not.

    ", + "SSHPublicKey$Status": "

    The status of the SSH public key. Active means the key can be used for authentication with an AWS CodeCommit repository. Inactive means the key cannot be used.

    ", + "SSHPublicKeyMetadata$Status": "

    The status of the SSH public key. Active means the key can be used for authentication with an AWS CodeCommit repository. Inactive means the key cannot be used.

    ", + "SigningCertificate$Status": "

    The status of the signing certificate. Active means the key is valid for API calls, while Inactive means it is not.

    ", + "UpdateAccessKeyRequest$Status": "

    The status you want to assign to the secret access key. Active means the key can be used for API calls to AWS, while Inactive means the key cannot be used.

    ", + "UpdateSSHPublicKeyRequest$Status": "

    The status to assign to the SSH public key. Active means the key can be used for authentication with an AWS CodeCommit repository. Inactive means the key cannot be used.

    ", + "UpdateSigningCertificateRequest$Status": "

    The status you want to assign to the certificate. Active means the certificate can be used for API calls to AWS, while Inactive means the certificate cannot be used.

    " + } + }, + "stringType": { + "base": null, + "refs": { + "AccessKeyLastUsed$ServiceName": "

    The name of the AWS service with which this access key was most recently used. This field is null when:

    • The user does not have an access key.

    • An access key exists but has never been used, at least not since IAM started tracking this information on April 22nd, 2015.

    • There is no sign-in data associated with the user

    ", + "AccessKeyLastUsed$Region": "

    The AWS region where this access key was most recently used. This field is null when:

    • The user does not have an access key.

    • An access key exists but has never been used, at least not since IAM started tracking this information on April 22nd, 2015.

    • There is no sign-in data associated with the user

    For more information about AWS regions, see Regions and Endpoints in the Amazon Web Services General Reference.

    " + } + }, + "summaryKeyType": { + "base": null, + "refs": { + "summaryMapType$key": null + } + }, + "summaryMapType": { + "base": null, + "refs": { + "GetAccountSummaryResponse$SummaryMap": "

    A set of key value pairs containing information about IAM entity usage and IAM quotas.

    " + } + }, + "summaryValueType": { + "base": null, + "refs": { + "summaryMapType$value": null + } + }, + "thumbprintListType": { + "base": "

    Contains a list of thumbprints of identity provider server certificates.

    ", + "refs": { + "CreateOpenIDConnectProviderRequest$ThumbprintList": "

    A list of server certificate thumbprints for the OpenID Connect (OIDC) identity provider's server certificate(s). Typically this list includes only one entry. However, IAM lets you have up to five thumbprints for an OIDC provider. This lets you maintain multiple thumbprints if the identity provider is rotating certificates.

    The server certificate thumbprint is the hex-encoded SHA-1 hash value of the X.509 certificate used by the domain where the OpenID Connect provider makes its keys available. It is always a 40-character string.

    You must provide at least one thumbprint when creating an IAM OIDC provider. For example, if the OIDC provider is server.example.com and the provider stores its keys at \"https://keys.server.example.com/openid-connect\", the thumbprint string would be the hex-encoded SHA-1 hash value of the certificate used by https://keys.server.example.com.

    For more information about obtaining the OIDC provider's thumbprint, see Obtaining the Thumbprint for an OpenID Connect Provider in the IAM User Guide.

    ", + "GetOpenIDConnectProviderResponse$ThumbprintList": "

    A list of certificate thumbprints that are associated with the specified IAM OIDC provider resource object. For more information, see CreateOpenIDConnectProvider.

    ", + "UpdateOpenIDConnectProviderThumbprintRequest$ThumbprintList": "

    A list of certificate thumbprints that are associated with the specified IAM OpenID Connect provider. For more information, see CreateOpenIDConnectProvider.

    " + } + }, + "thumbprintType": { + "base": "

    Contains a thumbprint for an identity provider's server certificate.

    The identity provider's server certificate thumbprint is the hex-encoded SHA-1 hash value of the self-signed X.509 certificate used by the domain where the OpenID Connect provider makes its keys available. It is always a 40-character string.

    ", + "refs": { + "thumbprintListType$member": null + } + }, + "unrecognizedPublicKeyEncodingMessage": { + "base": null, + "refs": { + "UnrecognizedPublicKeyEncodingException$message": null + } + }, + "userDetailListType": { + "base": null, + "refs": { + "GetAccountAuthorizationDetailsResponse$UserDetailList": "

    A list containing information about IAM users.

    " + } + }, + "userListType": { + "base": "

    Contains a list of users.

    This data type is used as a response element in the GetGroup and ListUsers actions.

    ", + "refs": { + "GetGroupResponse$Users": "

    A list of users in the group.

    ", + "ListUsersResponse$Users": "

    A list of users.

    " + } + }, + "userNameType": { + "base": null, + "refs": { + "AccessKey$UserName": "

    The name of the IAM user that the access key is associated with.

    ", + "AccessKeyMetadata$UserName": "

    The name of the IAM user that the key is associated with.

    ", + "AttachUserPolicyRequest$UserName": "

    The name (friendly name, not ARN) of the IAM user to attach the policy to.

    The regex pattern for this parameter is a string of characters consisting of upper and lowercase alphanumeric characters with no spaces. You can also include any of the following characters: =,.@-

    ", + "CreateLoginProfileRequest$UserName": "

    The name of the IAM user to create a password for. The user must already exist.

    The regex pattern for this parameter is a string of characters consisting of upper and lowercase alphanumeric characters with no spaces. You can also include any of the following characters: =,.@-

    ", + "CreateUserRequest$UserName": "

    The name of the user to create.

    The regex pattern for this parameter is a string of characters consisting of upper and lowercase alphanumeric characters with no spaces. You can also include any of the following characters: =,.@-

    ", + "DeleteLoginProfileRequest$UserName": "

    The name of the user whose password you want to delete.

    The regex pattern for this parameter is a string of characters consisting of upper and lowercase alphanumeric characters with no spaces. You can also include any of the following characters: =,.@-

    ", + "DeleteSSHPublicKeyRequest$UserName": "

    The name of the IAM user associated with the SSH public key.

    The regex pattern for this parameter is a string of characters consisting of upper and lowercase alphanumeric characters with no spaces. You can also include any of the following characters: =,.@-

    ", + "DetachUserPolicyRequest$UserName": "

    The name (friendly name, not ARN) of the IAM user to detach the policy from.

    The regex pattern for this parameter is a string of characters consisting of upper and lowercase alphanumeric characters with no spaces. You can also include any of the following characters: =,.@-

    ", + "GetLoginProfileRequest$UserName": "

    The name of the user whose login profile you want to retrieve.

    The regex pattern for this parameter is a string of characters consisting of upper and lowercase alphanumeric characters with no spaces. You can also include any of the following characters: =,.@-

    ", + "GetSSHPublicKeyRequest$UserName": "

    The name of the IAM user associated with the SSH public key.

    The regex pattern for this parameter is a string of characters consisting of upper and lowercase alphanumeric characters with no spaces. You can also include any of the following characters: =,.@-

    ", + "ListAttachedUserPoliciesRequest$UserName": "

    The name (friendly name, not ARN) of the user to list attached policies for.

    The regex pattern for this parameter is a string of characters consisting of upper and lowercase alphanumeric characters with no spaces. You can also include any of the following characters: =,.@-

    ", + "ListSSHPublicKeysRequest$UserName": "

    The name of the IAM user to list SSH public keys for. If none is specified, the UserName field is determined implicitly based on the AWS access key used to sign the request.

    The regex pattern for this parameter is a string of characters consisting of upper and lowercase alphanumeric characters with no spaces. You can also include any of the following characters: =,.@-

    ", + "LoginProfile$UserName": "

    The name of the user, which can be used for signing in to the AWS Management Console.

    ", + "MFADevice$UserName": "

    The user with whom the MFA device is associated.

    ", + "PolicyUser$UserName": "

    The name (friendly name, not ARN) identifying the user.

    ", + "SSHPublicKey$UserName": "

    The name of the IAM user associated with the SSH public key.

    ", + "SSHPublicKeyMetadata$UserName": "

    The name of the IAM user associated with the SSH public key.

    ", + "SigningCertificate$UserName": "

    The name of the user the signing certificate is associated with.

    ", + "UpdateLoginProfileRequest$UserName": "

    The name of the user whose password you want to update.

    The regex pattern for this parameter is a string of characters consisting of upper and lowercase alphanumeric characters with no spaces. You can also include any of the following characters: =,.@-

    ", + "UpdateSSHPublicKeyRequest$UserName": "

    The name of the IAM user associated with the SSH public key.

    The regex pattern for this parameter is a string of characters consisting of upper and lowercase alphanumeric characters with no spaces. You can also include any of the following characters: =,.@-

    ", + "UpdateUserRequest$NewUserName": "

    New name for the user. Include this parameter only if you're changing the user's name.

    The regex pattern for this parameter is a string of characters consisting of upper and lowercase alphanumeric characters with no spaces. You can also include any of the following characters: =,.@-

    ", + "UploadSSHPublicKeyRequest$UserName": "

    The name of the IAM user to associate the SSH public key with.

    The regex pattern for this parameter is a string of characters consisting of upper and lowercase alphanumeric characters with no spaces. You can also include any of the following characters: =,.@-

    ", + "User$UserName": "

    The friendly name identifying the user.

    ", + "UserDetail$UserName": "

    The friendly name identifying the user.

    " + } + }, + "virtualMFADeviceListType": { + "base": null, + "refs": { + "ListVirtualMFADevicesResponse$VirtualMFADevices": "

    The list of virtual MFA devices in the current account that match the AssignmentStatus value that was passed in the request.

    " + } + }, + "virtualMFADeviceName": { + "base": null, + "refs": { + "CreateVirtualMFADeviceRequest$VirtualMFADeviceName": "

    The name of the virtual MFA device. Use with path to uniquely identify a virtual MFA device.

    The regex pattern for this parameter is a string of characters consisting of upper and lowercase alphanumeric characters with no spaces. You can also include any of the following characters: =,.@-

    " + } + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/iam/2010-05-08/examples-1.json b/vendor/github.com/aws/aws-sdk-go/models/apis/iam/2010-05-08/examples-1.json new file mode 100644 index 000000000..0ea7e3b0b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/iam/2010-05-08/examples-1.json @@ -0,0 +1,5 @@ +{ + "version": "1.0", + "examples": { + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/iam/2010-05-08/paginators-1.json b/vendor/github.com/aws/aws-sdk-go/models/apis/iam/2010-05-08/paginators-1.json new file mode 100644 index 000000000..c476fe16b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/iam/2010-05-08/paginators-1.json @@ -0,0 +1,198 @@ +{ + "pagination": { + "GetAccountAuthorizationDetails": { + "input_token": "Marker", + "limit_key": "MaxItems", + "more_results": "IsTruncated", + "output_token": "Marker", + "result_key": [ + "UserDetailList", + "GroupDetailList", + "RoleDetailList", + "Policies" + ] + }, + "GetGroup": { + "input_token": "Marker", + "limit_key": "MaxItems", + "more_results": "IsTruncated", + "output_token": "Marker", + "result_key": "Users" + }, + "ListAccessKeys": { + "input_token": "Marker", + "limit_key": "MaxItems", + "more_results": "IsTruncated", + "output_token": "Marker", + "result_key": "AccessKeyMetadata" + }, + "ListAccountAliases": { + "input_token": "Marker", + "limit_key": "MaxItems", + "more_results": "IsTruncated", + "output_token": "Marker", + "result_key": "AccountAliases" + }, + "ListAttachedGroupPolicies": { + "input_token": "Marker", + "limit_key": "MaxItems", + "more_results": "IsTruncated", + "output_token": "Marker", + "result_key": "AttachedPolicies" + }, + "ListAttachedRolePolicies": { + "input_token": "Marker", + "limit_key": "MaxItems", + "more_results": "IsTruncated", + "output_token": "Marker", + "result_key": "AttachedPolicies" + }, + "ListAttachedUserPolicies": { + "input_token": "Marker", + "limit_key": "MaxItems", + "more_results": "IsTruncated", + "output_token": "Marker", + "result_key": "AttachedPolicies" + }, + "ListEntitiesForPolicy": { + "input_token": "Marker", + "limit_key": "MaxItems", + "more_results": "IsTruncated", + "output_token": "Marker", + "result_key": [ + "PolicyGroups", + "PolicyUsers", + "PolicyRoles" + ] + }, + "ListGroupPolicies": { + "input_token": "Marker", + "limit_key": "MaxItems", + "more_results": "IsTruncated", + "output_token": "Marker", + "result_key": "PolicyNames" + }, + "ListGroups": { + "input_token": "Marker", + "limit_key": "MaxItems", + "more_results": "IsTruncated", + "output_token": "Marker", + "result_key": "Groups" + }, + "ListGroupsForUser": { + "input_token": "Marker", + "limit_key": "MaxItems", + "more_results": "IsTruncated", + "output_token": "Marker", + "result_key": "Groups" + }, + "ListInstanceProfiles": { + "input_token": "Marker", + "limit_key": "MaxItems", + "more_results": "IsTruncated", + "output_token": "Marker", + "result_key": "InstanceProfiles" + }, + "ListInstanceProfilesForRole": { + "input_token": "Marker", + "limit_key": "MaxItems", + "more_results": "IsTruncated", + "output_token": "Marker", + "result_key": "InstanceProfiles" + }, + "ListMFADevices": { + "input_token": "Marker", + "limit_key": "MaxItems", + "more_results": "IsTruncated", + "output_token": "Marker", + "result_key": "MFADevices" + }, + "ListPolicies": { + "input_token": "Marker", + "limit_key": "MaxItems", + "more_results": "IsTruncated", + "output_token": "Marker", + "result_key": "Policies" + }, + "ListPolicyVersions": { + "input_token": "Marker", + "limit_key": "MaxItems", + "more_results": "IsTruncated", + "output_token": "Marker", + "result_key": "Versions" + }, + "ListRolePolicies": { + "input_token": "Marker", + "limit_key": "MaxItems", + "more_results": "IsTruncated", + "output_token": "Marker", + "result_key": "PolicyNames" + }, + "ListRoles": { + "input_token": "Marker", + "limit_key": "MaxItems", + "more_results": "IsTruncated", + "output_token": "Marker", + "result_key": "Roles" + }, + "ListSAMLProviders": { + "result_key": "SAMLProviderList" + }, + "ListServerCertificates": { + "input_token": "Marker", + "limit_key": "MaxItems", + "more_results": "IsTruncated", + "output_token": "Marker", + "result_key": "ServerCertificateMetadataList" + }, + "ListSigningCertificates": { + "input_token": "Marker", + "limit_key": "MaxItems", + "more_results": "IsTruncated", + "output_token": "Marker", + "result_key": "Certificates" + }, + "ListSSHPublicKeys": { + "input_token": "Marker", + "limit_key": "MaxItems", + "more_results": "IsTruncated", + "output_token": "Marker", + "result_key": "SSHPublicKeys" + }, + "ListUserPolicies": { + "input_token": "Marker", + "limit_key": "MaxItems", + "more_results": "IsTruncated", + "output_token": "Marker", + "result_key": "PolicyNames" + }, + "ListUsers": { + "input_token": "Marker", + "limit_key": "MaxItems", + "more_results": "IsTruncated", + "output_token": "Marker", + "result_key": "Users" + }, + "ListVirtualMFADevices": { + "input_token": "Marker", + "limit_key": "MaxItems", + "more_results": "IsTruncated", + "output_token": "Marker", + "result_key": "VirtualMFADevices" + }, + "SimulateCustomPolicy": { + "input_token": "Marker", + "limit_key": "MaxItems", + "more_results": "IsTruncated", + "output_token": "Marker", + "result_key": "EvaluationResults" + }, + "SimulatePrincipalPolicy": { + "input_token": "Marker", + "limit_key": "MaxItems", + "more_results": "IsTruncated", + "output_token": "Marker", + "result_key": "EvaluationResults" + } + } +} \ No newline at end of file diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/iam/2010-05-08/waiters-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/iam/2010-05-08/waiters-2.json new file mode 100644 index 000000000..ba4538269 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/iam/2010-05-08/waiters-2.json @@ -0,0 +1,39 @@ +{ + "version": 2, + "waiters": { + "InstanceProfileExists": { + "delay": 1, + "operation": "GetInstanceProfile", + "maxAttempts": 40, + "acceptors": [ + { + "expected": 200, + "matcher": "status", + "state": "success" + }, + { + "state": "retry", + "matcher": "status", + "expected": 404 + } + ] + }, + "UserExists": { + "delay": 1, + "operation": "GetUser", + "maxAttempts": 20, + "acceptors": [ + { + "state": "success", + "matcher": "status", + "expected": 200 + }, + { + "state": "retry", + "matcher": "error", + "expected": "NoSuchEntity" + } + ] + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/importexport/2010-06-01/api-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/importexport/2010-06-01/api-2.json new file mode 100644 index 000000000..308dd9eea --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/importexport/2010-06-01/api-2.json @@ -0,0 +1,666 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2010-06-01", + "endpointPrefix":"importexport", + "globalEndpoint":"importexport.amazonaws.com", + "serviceFullName":"AWS Import/Export", + "signatureVersion":"v2", + "xmlNamespace":"http://importexport.amazonaws.com/doc/2010-06-01/", + "protocol":"query" + }, + "operations":{ + "CancelJob":{ + "name":"CancelJob", + "http":{ + "method":"POST", + "requestUri":"/?Operation=CancelJob" + }, + "input":{"shape":"CancelJobInput"}, + "output":{ + "shape":"CancelJobOutput", + "resultWrapper":"CancelJobResult" + }, + "errors":[ + { + "shape":"InvalidJobIdException", + "exception":true + }, + { + "shape":"ExpiredJobIdException", + "exception":true + }, + { + "shape":"CanceledJobIdException", + "exception":true + }, + { + "shape":"UnableToCancelJobIdException", + "exception":true + }, + { + "shape":"InvalidAccessKeyIdException", + "exception":true + }, + { + "shape":"InvalidVersionException", + "exception":true + } + ] + }, + "CreateJob":{ + "name":"CreateJob", + "http":{ + "method":"POST", + "requestUri":"/?Operation=CreateJob" + }, + "input":{"shape":"CreateJobInput"}, + "output":{ + "shape":"CreateJobOutput", + "resultWrapper":"CreateJobResult" + }, + "errors":[ + { + "shape":"MissingParameterException", + "exception":true + }, + { + "shape":"InvalidParameterException", + "exception":true + }, + { + "shape":"InvalidAccessKeyIdException", + "exception":true + }, + { + "shape":"InvalidAddressException", + "exception":true + }, + { + "shape":"InvalidManifestFieldException", + "exception":true + }, + { + "shape":"MissingManifestFieldException", + "exception":true + }, + { + "shape":"NoSuchBucketException", + "exception":true + }, + { + "shape":"MissingCustomsException", + "exception":true + }, + { + "shape":"InvalidCustomsException", + "exception":true + }, + { + "shape":"InvalidFileSystemException", + "exception":true + }, + { + "shape":"MultipleRegionsException", + "exception":true + }, + { + "shape":"BucketPermissionException", + "exception":true + }, + { + "shape":"MalformedManifestException", + "exception":true + }, + { + "shape":"CreateJobQuotaExceededException", + "exception":true + }, + { + "shape":"InvalidJobIdException", + "exception":true + }, + { + "shape":"InvalidVersionException", + "exception":true + } + ] + }, + "GetShippingLabel":{ + "name":"GetShippingLabel", + "http":{ + "method":"POST", + "requestUri":"/?Operation=GetShippingLabel" + }, + "input":{"shape":"GetShippingLabelInput"}, + "output":{ + "shape":"GetShippingLabelOutput", + "resultWrapper":"GetShippingLabelResult" + }, + "errors":[ + { + "shape":"InvalidJobIdException", + "exception":true + }, + { + "shape":"ExpiredJobIdException", + "exception":true + }, + { + "shape":"CanceledJobIdException", + "exception":true + }, + { + "shape":"InvalidAccessKeyIdException", + "exception":true + }, + { + "shape":"InvalidAddressException", + "exception":true + }, + { + "shape":"InvalidVersionException", + "exception":true + }, + { + "shape":"InvalidParameterException", + "exception":true + } + ] + }, + "GetStatus":{ + "name":"GetStatus", + "http":{ + "method":"POST", + "requestUri":"/?Operation=GetStatus" + }, + "input":{"shape":"GetStatusInput"}, + "output":{ + "shape":"GetStatusOutput", + "resultWrapper":"GetStatusResult" + }, + "errors":[ + { + "shape":"InvalidJobIdException", + "exception":true + }, + { + "shape":"ExpiredJobIdException", + "exception":true + }, + { + "shape":"CanceledJobIdException", + "exception":true + }, + { + "shape":"InvalidAccessKeyIdException", + "exception":true + }, + { + "shape":"InvalidVersionException", + "exception":true + } + ] + }, + "ListJobs":{ + "name":"ListJobs", + "http":{ + "method":"POST", + "requestUri":"/?Operation=ListJobs" + }, + "input":{"shape":"ListJobsInput"}, + "output":{ + "shape":"ListJobsOutput", + "resultWrapper":"ListJobsResult" + }, + "errors":[ + { + "shape":"InvalidParameterException", + "exception":true + }, + { + "shape":"InvalidAccessKeyIdException", + "exception":true + }, + { + "shape":"InvalidVersionException", + "exception":true + } + ] + }, + "UpdateJob":{ + "name":"UpdateJob", + "http":{ + "method":"POST", + "requestUri":"/?Operation=UpdateJob" + }, + "input":{"shape":"UpdateJobInput"}, + "output":{ + "shape":"UpdateJobOutput", + "resultWrapper":"UpdateJobResult" + }, + "errors":[ + { + "shape":"MissingParameterException", + "exception":true + }, + { + "shape":"InvalidParameterException", + "exception":true + }, + { + "shape":"InvalidAccessKeyIdException", + "exception":true + }, + { + "shape":"InvalidAddressException", + "exception":true + }, + { + "shape":"InvalidManifestFieldException", + "exception":true + }, + { + "shape":"InvalidJobIdException", + "exception":true + }, + { + "shape":"MissingManifestFieldException", + "exception":true + }, + { + "shape":"NoSuchBucketException", + "exception":true + }, + { + "shape":"ExpiredJobIdException", + "exception":true + }, + { + "shape":"CanceledJobIdException", + "exception":true + }, + { + "shape":"MissingCustomsException", + "exception":true + }, + { + "shape":"InvalidCustomsException", + "exception":true + }, + { + "shape":"InvalidFileSystemException", + "exception":true + }, + { + "shape":"MultipleRegionsException", + "exception":true + }, + { + "shape":"BucketPermissionException", + "exception":true + }, + { + "shape":"MalformedManifestException", + "exception":true + }, + { + "shape":"UnableToUpdateJobIdException", + "exception":true + }, + { + "shape":"InvalidVersionException", + "exception":true + } + ] + } + }, + "shapes":{ + "APIVersion":{"type":"string"}, + "Artifact":{ + "type":"structure", + "members":{ + "Description":{"shape":"Description"}, + "URL":{"shape":"URL"} + } + }, + "ArtifactList":{ + "type":"list", + "member":{"shape":"Artifact"} + }, + "BucketPermissionException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "exception":true + }, + "CancelJobInput":{ + "type":"structure", + "required":["JobId"], + "members":{ + "JobId":{"shape":"JobId"}, + "APIVersion":{"shape":"APIVersion"} + } + }, + "CancelJobOutput":{ + "type":"structure", + "members":{ + "Success":{"shape":"Success"} + } + }, + "CanceledJobIdException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "exception":true + }, + "Carrier":{"type":"string"}, + "CreateJobInput":{ + "type":"structure", + "required":[ + "JobType", + "Manifest", + "ValidateOnly" + ], + "members":{ + "JobType":{"shape":"JobType"}, + "Manifest":{"shape":"Manifest"}, + "ManifestAddendum":{"shape":"ManifestAddendum"}, + "ValidateOnly":{"shape":"ValidateOnly"}, + "APIVersion":{"shape":"APIVersion"} + } + }, + "CreateJobOutput":{ + "type":"structure", + "members":{ + "JobId":{"shape":"JobId"}, + "JobType":{"shape":"JobType"}, + "Signature":{"shape":"Signature"}, + "SignatureFileContents":{"shape":"SignatureFileContents"}, + "WarningMessage":{"shape":"WarningMessage"}, + "ArtifactList":{"shape":"ArtifactList"} + } + }, + "CreateJobQuotaExceededException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "exception":true + }, + "CreationDate":{"type":"timestamp"}, + "CurrentManifest":{"type":"string"}, + "Description":{"type":"string"}, + "ErrorCount":{"type":"integer"}, + "ErrorMessage":{"type":"string"}, + "ExpiredJobIdException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "exception":true + }, + "GenericString":{"type":"string"}, + "GetShippingLabelInput":{ + "type":"structure", + "required":["jobIds"], + "members":{ + "jobIds":{"shape":"JobIdList"}, + "name":{"shape":"name"}, + "company":{"shape":"company"}, + "phoneNumber":{"shape":"phoneNumber"}, + "country":{"shape":"country"}, + "stateOrProvince":{"shape":"stateOrProvince"}, + "city":{"shape":"city"}, + "postalCode":{"shape":"postalCode"}, + "street1":{"shape":"street1"}, + "street2":{"shape":"street2"}, + "street3":{"shape":"street3"}, + "APIVersion":{"shape":"APIVersion"} + } + }, + "GetShippingLabelOutput":{ + "type":"structure", + "members":{ + "ShippingLabelURL":{"shape":"GenericString"}, + "Warning":{"shape":"GenericString"} + } + }, + "GetStatusInput":{ + "type":"structure", + "required":["JobId"], + "members":{ + "JobId":{"shape":"JobId"}, + "APIVersion":{"shape":"APIVersion"} + } + }, + "GetStatusOutput":{ + "type":"structure", + "members":{ + "JobId":{"shape":"JobId"}, + "JobType":{"shape":"JobType"}, + "LocationCode":{"shape":"LocationCode"}, + "LocationMessage":{"shape":"LocationMessage"}, + "ProgressCode":{"shape":"ProgressCode"}, + "ProgressMessage":{"shape":"ProgressMessage"}, + "Carrier":{"shape":"Carrier"}, + "TrackingNumber":{"shape":"TrackingNumber"}, + "LogBucket":{"shape":"LogBucket"}, + "LogKey":{"shape":"LogKey"}, + "ErrorCount":{"shape":"ErrorCount"}, + "Signature":{"shape":"Signature"}, + "SignatureFileContents":{"shape":"Signature"}, + "CurrentManifest":{"shape":"CurrentManifest"}, + "CreationDate":{"shape":"CreationDate"}, + "ArtifactList":{"shape":"ArtifactList"} + } + }, + "InvalidAccessKeyIdException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "exception":true + }, + "InvalidAddressException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "exception":true + }, + "InvalidCustomsException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "exception":true + }, + "InvalidFileSystemException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "exception":true + }, + "InvalidJobIdException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "exception":true + }, + "InvalidManifestFieldException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "exception":true + }, + "InvalidParameterException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "exception":true + }, + "InvalidVersionException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "exception":true + }, + "IsCanceled":{"type":"boolean"}, + "IsTruncated":{"type":"boolean"}, + "Job":{ + "type":"structure", + "members":{ + "JobId":{"shape":"JobId"}, + "CreationDate":{"shape":"CreationDate"}, + "IsCanceled":{"shape":"IsCanceled"}, + "JobType":{"shape":"JobType"} + } + }, + "JobId":{"type":"string"}, + "JobIdList":{ + "type":"list", + "member":{"shape":"GenericString"} + }, + "JobType":{ + "type":"string", + "enum":[ + "Import", + "Export" + ] + }, + "JobsList":{ + "type":"list", + "member":{"shape":"Job"} + }, + "ListJobsInput":{ + "type":"structure", + "members":{ + "MaxJobs":{"shape":"MaxJobs"}, + "Marker":{"shape":"Marker"}, + "APIVersion":{"shape":"APIVersion"} + } + }, + "ListJobsOutput":{ + "type":"structure", + "members":{ + "Jobs":{"shape":"JobsList"}, + "IsTruncated":{"shape":"IsTruncated"} + } + }, + "LocationCode":{"type":"string"}, + "LocationMessage":{"type":"string"}, + "LogBucket":{"type":"string"}, + "LogKey":{"type":"string"}, + "MalformedManifestException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "exception":true + }, + "Manifest":{"type":"string"}, + "ManifestAddendum":{"type":"string"}, + "Marker":{"type":"string"}, + "MaxJobs":{"type":"integer"}, + "MissingCustomsException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "exception":true + }, + "MissingManifestFieldException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "exception":true + }, + "MissingParameterException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "exception":true + }, + "MultipleRegionsException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "exception":true + }, + "NoSuchBucketException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "exception":true + }, + "ProgressCode":{"type":"string"}, + "ProgressMessage":{"type":"string"}, + "Signature":{"type":"string"}, + "SignatureFileContents":{"type":"string"}, + "Success":{"type":"boolean"}, + "TrackingNumber":{"type":"string"}, + "URL":{"type":"string"}, + "UnableToCancelJobIdException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "exception":true + }, + "UnableToUpdateJobIdException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "exception":true + }, + "UpdateJobInput":{ + "type":"structure", + "required":[ + "JobId", + "Manifest", + "JobType", + "ValidateOnly" + ], + "members":{ + "JobId":{"shape":"JobId"}, + "Manifest":{"shape":"Manifest"}, + "JobType":{"shape":"JobType"}, + "ValidateOnly":{"shape":"ValidateOnly"}, + "APIVersion":{"shape":"APIVersion"} + } + }, + "UpdateJobOutput":{ + "type":"structure", + "members":{ + "Success":{"shape":"Success"}, + "WarningMessage":{"shape":"WarningMessage"}, + "ArtifactList":{"shape":"ArtifactList"} + } + }, + "ValidateOnly":{"type":"boolean"}, + "WarningMessage":{"type":"string"}, + "city":{"type":"string"}, + "company":{"type":"string"}, + "country":{"type":"string"}, + "name":{"type":"string"}, + "phoneNumber":{"type":"string"}, + "postalCode":{"type":"string"}, + "stateOrProvince":{"type":"string"}, + "street1":{"type":"string"}, + "street2":{"type":"string"}, + "street3":{"type":"string"} + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/importexport/2010-06-01/docs-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/importexport/2010-06-01/docs-2.json new file mode 100644 index 000000000..601090e96 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/importexport/2010-06-01/docs-2.json @@ -0,0 +1,482 @@ +{ + "version": "2.0", + "operations": { + "CancelJob": "This operation cancels a specified job. Only the job owner can cancel it. The operation fails if the job has already started or is complete.", + "CreateJob": "This operation initiates the process of scheduling an upload or download of your data. You include in the request a manifest that describes the data transfer specifics. The response to the request includes a job ID, which you can use in other operations, a signature that you use to identify your storage device, and the address where you should ship your storage device.", + "GetShippingLabel": "This operation generates a pre-paid UPS shipping label that you will use to ship your device to AWS for processing.", + "GetStatus": "This operation returns information about a job, including where the job is in the processing pipeline, the status of the results, and the signature value associated with the job. You can only return information about jobs you own.", + "ListJobs": "This operation returns the jobs associated with the requester. AWS Import/Export lists the jobs in reverse chronological order based on the date of creation. For example if Job Test1 was created 2009Dec30 and Test2 was created 2010Feb05, the ListJobs operation would return Test2 followed by Test1.", + "UpdateJob": "You use this operation to change the parameters specified in the original manifest file by supplying a new manifest file. The manifest file attached to this request replaces the original manifest file. You can only use the operation after a CreateJob request but before the data transfer starts and you can only use it on jobs you own." + }, + "service": "AWS Import/Export Service AWS Import/Export accelerates transferring large amounts of data between the AWS cloud and portable storage devices that you mail to us. AWS Import/Export transfers data directly onto and off of your storage devices using Amazon's high-speed internal network and bypassing the Internet. For large data sets, AWS Import/Export is often faster than Internet transfer and more cost effective than upgrading your connectivity.", + "shapes": { + "APIVersion": { + "base": "Specifies the version of the client tool.", + "refs": { + "CancelJobInput$APIVersion": null, + "CreateJobInput$APIVersion": null, + "GetShippingLabelInput$APIVersion": null, + "GetStatusInput$APIVersion": null, + "ListJobsInput$APIVersion": null, + "UpdateJobInput$APIVersion": null + } + }, + "Artifact": { + "base": "A discrete item that contains the description and URL of an artifact (such as a PDF).", + "refs": { + "ArtifactList$member": null + } + }, + "ArtifactList": { + "base": "A collection of artifacts.", + "refs": { + "CreateJobOutput$ArtifactList": null, + "GetStatusOutput$ArtifactList": null, + "UpdateJobOutput$ArtifactList": null + } + }, + "BucketPermissionException": { + "base": "The account specified does not have the appropriate bucket permissions.", + "refs": { + } + }, + "CancelJobInput": { + "base": "Input structure for the CancelJob operation.", + "refs": { + } + }, + "CancelJobOutput": { + "base": "Output structure for the CancelJob operation.", + "refs": { + } + }, + "CanceledJobIdException": { + "base": "The specified job ID has been canceled and is no longer valid.", + "refs": { + } + }, + "Carrier": { + "base": "Name of the shipping company. This value is included when the LocationCode is \"Returned\".", + "refs": { + "GetStatusOutput$Carrier": null + } + }, + "CreateJobInput": { + "base": "Input structure for the CreateJob operation.", + "refs": { + } + }, + "CreateJobOutput": { + "base": "Output structure for the CreateJob operation.", + "refs": { + } + }, + "CreateJobQuotaExceededException": { + "base": "Each account can create only a certain number of jobs per day. If you need to create more than this, please contact awsimportexport@amazon.com to explain your particular use case.", + "refs": { + } + }, + "CreationDate": { + "base": "Timestamp of the CreateJob request in ISO8601 date format. For example \"2010-03-28T20:27:35Z\".", + "refs": { + "GetStatusOutput$CreationDate": null, + "Job$CreationDate": null + } + }, + "CurrentManifest": { + "base": "The last manifest submitted, which will be used to process the job.", + "refs": { + "GetStatusOutput$CurrentManifest": null + } + }, + "Description": { + "base": "The associated description for this object.", + "refs": { + "Artifact$Description": null + } + }, + "ErrorCount": { + "base": "Number of errors. We return this value when the ProgressCode is Success or SuccessWithErrors.", + "refs": { + "GetStatusOutput$ErrorCount": null + } + }, + "ErrorMessage": { + "base": "The human-readable description of a particular error.", + "refs": { + "BucketPermissionException$message": null, + "CanceledJobIdException$message": null, + "CreateJobQuotaExceededException$message": null, + "ExpiredJobIdException$message": null, + "InvalidAccessKeyIdException$message": null, + "InvalidAddressException$message": null, + "InvalidCustomsException$message": null, + "InvalidFileSystemException$message": null, + "InvalidJobIdException$message": null, + "InvalidManifestFieldException$message": null, + "InvalidParameterException$message": null, + "InvalidVersionException$message": null, + "MalformedManifestException$message": null, + "MissingCustomsException$message": null, + "MissingManifestFieldException$message": null, + "MissingParameterException$message": null, + "MultipleRegionsException$message": null, + "NoSuchBucketException$message": null, + "UnableToCancelJobIdException$message": null, + "UnableToUpdateJobIdException$message": null + } + }, + "ExpiredJobIdException": { + "base": "Indicates that the specified job has expired out of the system.", + "refs": { + } + }, + "GenericString": { + "base": null, + "refs": { + "GetShippingLabelOutput$ShippingLabelURL": null, + "GetShippingLabelOutput$Warning": null, + "JobIdList$member": null + } + }, + "GetShippingLabelInput": { + "base": null, + "refs": { + } + }, + "GetShippingLabelOutput": { + "base": null, + "refs": { + } + }, + "GetStatusInput": { + "base": "Input structure for the GetStatus operation.", + "refs": { + } + }, + "GetStatusOutput": { + "base": "Output structure for the GetStatus operation.", + "refs": { + } + }, + "InvalidAccessKeyIdException": { + "base": "The AWS Access Key ID specified in the request did not match the manifest's accessKeyId value. The manifest and the request authentication must use the same AWS Access Key ID.", + "refs": { + } + }, + "InvalidAddressException": { + "base": "The address specified in the manifest is invalid.", + "refs": { + } + }, + "InvalidCustomsException": { + "base": "One or more customs parameters was invalid. Please correct and resubmit.", + "refs": { + } + }, + "InvalidFileSystemException": { + "base": "File system specified in export manifest is invalid.", + "refs": { + } + }, + "InvalidJobIdException": { + "base": "The JOBID was missing, not found, or not associated with the AWS account.", + "refs": { + } + }, + "InvalidManifestFieldException": { + "base": "One or more manifest fields was invalid. Please correct and resubmit.", + "refs": { + } + }, + "InvalidParameterException": { + "base": "One or more parameters had an invalid value.", + "refs": { + } + }, + "InvalidVersionException": { + "base": "The client tool version is invalid.", + "refs": { + } + }, + "IsCanceled": { + "base": "Indicates whether the job was canceled.", + "refs": { + "Job$IsCanceled": null + } + }, + "IsTruncated": { + "base": "Indicates whether the list of jobs was truncated. If true, then call ListJobs again using the last JobId element as the marker.", + "refs": { + "ListJobsOutput$IsTruncated": null + } + }, + "Job": { + "base": "Representation of a job returned by the ListJobs operation.", + "refs": { + "JobsList$member": null + } + }, + "JobId": { + "base": "A unique identifier which refers to a particular job.", + "refs": { + "CancelJobInput$JobId": null, + "CreateJobOutput$JobId": null, + "GetStatusInput$JobId": null, + "GetStatusOutput$JobId": null, + "Job$JobId": null, + "UpdateJobInput$JobId": null + } + }, + "JobIdList": { + "base": null, + "refs": { + "GetShippingLabelInput$jobIds": null + } + }, + "JobType": { + "base": "Specifies whether the job to initiate is an import or export job.", + "refs": { + "CreateJobInput$JobType": null, + "CreateJobOutput$JobType": null, + "GetStatusOutput$JobType": null, + "Job$JobType": null, + "UpdateJobInput$JobType": null + } + }, + "JobsList": { + "base": "A list container for Jobs returned by the ListJobs operation.", + "refs": { + "ListJobsOutput$Jobs": null + } + }, + "ListJobsInput": { + "base": "Input structure for the ListJobs operation.", + "refs": { + } + }, + "ListJobsOutput": { + "base": "Output structure for the ListJobs operation.", + "refs": { + } + }, + "LocationCode": { + "base": "A token representing the location of the storage device, such as \"AtAWS\".", + "refs": { + "GetStatusOutput$LocationCode": null + } + }, + "LocationMessage": { + "base": "A more human readable form of the physical location of the storage device.", + "refs": { + "GetStatusOutput$LocationMessage": null + } + }, + "LogBucket": { + "base": "Amazon S3 bucket for user logs.", + "refs": { + "GetStatusOutput$LogBucket": null + } + }, + "LogKey": { + "base": "The key where the user logs were stored.", + "refs": { + "GetStatusOutput$LogKey": null + } + }, + "MalformedManifestException": { + "base": "Your manifest is not well-formed.", + "refs": { + } + }, + "Manifest": { + "base": "The UTF-8 encoded text of the manifest file.", + "refs": { + "CreateJobInput$Manifest": null, + "UpdateJobInput$Manifest": null + } + }, + "ManifestAddendum": { + "base": "For internal use only.", + "refs": { + "CreateJobInput$ManifestAddendum": null + } + }, + "Marker": { + "base": "Specifies the JOBID to start after when listing the jobs created with your account. AWS Import/Export lists your jobs in reverse chronological order. See MaxJobs.", + "refs": { + "ListJobsInput$Marker": null + } + }, + "MaxJobs": { + "base": "Sets the maximum number of jobs returned in the response. If there are additional jobs that were not returned because MaxJobs was exceeded, the response contains <IsTruncated>true</IsTruncated>. To return the additional jobs, see Marker.", + "refs": { + "ListJobsInput$MaxJobs": null + } + }, + "MissingCustomsException": { + "base": "One or more required customs parameters was missing from the manifest.", + "refs": { + } + }, + "MissingManifestFieldException": { + "base": "One or more required fields were missing from the manifest file. Please correct and resubmit.", + "refs": { + } + }, + "MissingParameterException": { + "base": "One or more required parameters was missing from the request.", + "refs": { + } + }, + "MultipleRegionsException": { + "base": "Your manifest file contained buckets from multiple regions. A job is restricted to buckets from one region. Please correct and resubmit.", + "refs": { + } + }, + "NoSuchBucketException": { + "base": "The specified bucket does not exist. Create the specified bucket or change the manifest's bucket, exportBucket, or logBucket field to a bucket that the account, as specified by the manifest's Access Key ID, has write permissions to.", + "refs": { + } + }, + "ProgressCode": { + "base": "A token representing the state of the job, such as \"Started\".", + "refs": { + "GetStatusOutput$ProgressCode": null + } + }, + "ProgressMessage": { + "base": "A more human readable form of the job status.", + "refs": { + "GetStatusOutput$ProgressMessage": null + } + }, + "Signature": { + "base": "An encrypted code used to authenticate the request and response, for example, \"DV+TpDfx1/TdSE9ktyK9k/bDTVI=\". Only use this value is you want to create the signature file yourself. Generally you should use the SignatureFileContents value.", + "refs": { + "CreateJobOutput$Signature": null, + "GetStatusOutput$Signature": null, + "GetStatusOutput$SignatureFileContents": null + } + }, + "SignatureFileContents": { + "base": "The actual text of the SIGNATURE file to be written to disk.", + "refs": { + "CreateJobOutput$SignatureFileContents": null + } + }, + "Success": { + "base": "Specifies whether (true) or not (false) AWS Import/Export updated your job.", + "refs": { + "CancelJobOutput$Success": null, + "UpdateJobOutput$Success": null + } + }, + "TrackingNumber": { + "base": "The shipping tracking number assigned by AWS Import/Export to the storage device when it's returned to you. We return this value when the LocationCode is \"Returned\".", + "refs": { + "GetStatusOutput$TrackingNumber": null + } + }, + "URL": { + "base": "The URL for a given Artifact.", + "refs": { + "Artifact$URL": null + } + }, + "UnableToCancelJobIdException": { + "base": "AWS Import/Export cannot cancel the job", + "refs": { + } + }, + "UnableToUpdateJobIdException": { + "base": "AWS Import/Export cannot update the job", + "refs": { + } + }, + "UpdateJobInput": { + "base": "Input structure for the UpateJob operation.", + "refs": { + } + }, + "UpdateJobOutput": { + "base": "Output structure for the UpateJob operation.", + "refs": { + } + }, + "ValidateOnly": { + "base": "Validate the manifest and parameter values in the request but do not actually create a job.", + "refs": { + "CreateJobInput$ValidateOnly": null, + "UpdateJobInput$ValidateOnly": null + } + }, + "WarningMessage": { + "base": "An optional message notifying you of non-fatal issues with the job, such as use of an incompatible Amazon S3 bucket name.", + "refs": { + "CreateJobOutput$WarningMessage": null, + "UpdateJobOutput$WarningMessage": null + } + }, + "city": { + "base": "Specifies the name of your city for the return address.", + "refs": { + "GetShippingLabelInput$city": null + } + }, + "company": { + "base": "Specifies the name of the company that will ship this package.", + "refs": { + "GetShippingLabelInput$company": null + } + }, + "country": { + "base": "Specifies the name of your country for the return address.", + "refs": { + "GetShippingLabelInput$country": null + } + }, + "name": { + "base": "Specifies the name of the person responsible for shipping this package.", + "refs": { + "GetShippingLabelInput$name": null + } + }, + "phoneNumber": { + "base": "Specifies the phone number of the person responsible for shipping this package.", + "refs": { + "GetShippingLabelInput$phoneNumber": null + } + }, + "postalCode": { + "base": "Specifies the postal code for the return address.", + "refs": { + "GetShippingLabelInput$postalCode": null + } + }, + "stateOrProvince": { + "base": "Specifies the name of your state or your province for the return address.", + "refs": { + "GetShippingLabelInput$stateOrProvince": null + } + }, + "street1": { + "base": "Specifies the first part of the street address for the return address, for example 1234 Main Street.", + "refs": { + "GetShippingLabelInput$street1": null + } + }, + "street2": { + "base": "Specifies the optional second part of the street address for the return address, for example Suite 100.", + "refs": { + "GetShippingLabelInput$street2": null + } + }, + "street3": { + "base": "Specifies the optional third part of the street address for the return address, for example c/o Jane Doe.", + "refs": { + "GetShippingLabelInput$street3": null + } + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/importexport/2010-06-01/paginators-1.json b/vendor/github.com/aws/aws-sdk-go/models/apis/importexport/2010-06-01/paginators-1.json new file mode 100644 index 000000000..702385ea6 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/importexport/2010-06-01/paginators-1.json @@ -0,0 +1,11 @@ +{ + "pagination": { + "ListJobs": { + "input_token": "Marker", + "output_token": "Jobs[-1].JobId", + "more_results": "IsTruncated", + "limit_key": "MaxJobs", + "result_key": "Jobs" + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/inspector/2015-08-18/api-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/inspector/2015-08-18/api-2.json new file mode 100644 index 000000000..d1a6d91b2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/inspector/2015-08-18/api-2.json @@ -0,0 +1,1426 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2015-08-18", + "endpointPrefix":"inspector", + "jsonVersion":"1.1", + "protocol":"json", + "serviceFullName":"Amazon Inspector", + "signatureVersion":"v4", + "targetPrefix":"InspectorService" + }, + "operations":{ + "AddAttributesToFindings":{ + "name":"AddAttributesToFindings", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AddAttributesToFindingsRequest"}, + "output":{"shape":"AddAttributesToFindingsResponse"}, + "errors":[ + {"shape":"InternalException"}, + {"shape":"InvalidInputException"}, + {"shape":"AccessDeniedException"}, + {"shape":"NoSuchEntityException"} + ] + }, + "AttachAssessmentAndRulesPackage":{ + "name":"AttachAssessmentAndRulesPackage", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AttachAssessmentAndRulesPackageRequest"}, + "output":{"shape":"AttachAssessmentAndRulesPackageResponse"}, + "errors":[ + {"shape":"InternalException"}, + {"shape":"InvalidInputException"}, + {"shape":"AccessDeniedException"}, + {"shape":"NoSuchEntityException"} + ] + }, + "CreateApplication":{ + "name":"CreateApplication", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateApplicationRequest"}, + "output":{"shape":"CreateApplicationResponse"}, + "errors":[ + {"shape":"InternalException"}, + {"shape":"InvalidInputException"}, + {"shape":"AccessDeniedException"}, + {"shape":"NoSuchEntityException"} + ] + }, + "CreateAssessment":{ + "name":"CreateAssessment", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateAssessmentRequest"}, + "output":{"shape":"CreateAssessmentResponse"}, + "errors":[ + {"shape":"InternalException"}, + {"shape":"InvalidInputException"}, + {"shape":"AccessDeniedException"}, + {"shape":"NoSuchEntityException"} + ] + }, + "CreateResourceGroup":{ + "name":"CreateResourceGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateResourceGroupRequest"}, + "output":{"shape":"CreateResourceGroupResponse"}, + "errors":[ + {"shape":"InternalException"}, + {"shape":"InvalidInputException"}, + {"shape":"AccessDeniedException"} + ] + }, + "DeleteApplication":{ + "name":"DeleteApplication", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteApplicationRequest"}, + "output":{"shape":"DeleteApplicationResponse"}, + "errors":[ + {"shape":"InternalException"}, + {"shape":"InvalidInputException"}, + {"shape":"OperationInProgressException"}, + {"shape":"AccessDeniedException"}, + {"shape":"NoSuchEntityException"} + ] + }, + "DeleteAssessment":{ + "name":"DeleteAssessment", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteAssessmentRequest"}, + "output":{"shape":"DeleteAssessmentResponse"}, + "errors":[ + {"shape":"InternalException"}, + {"shape":"InvalidInputException"}, + {"shape":"OperationInProgressException"}, + {"shape":"AccessDeniedException"}, + {"shape":"NoSuchEntityException"} + ] + }, + "DeleteRun":{ + "name":"DeleteRun", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteRunRequest"}, + "output":{"shape":"DeleteRunResponse"}, + "errors":[ + {"shape":"InternalException"}, + {"shape":"InvalidInputException"}, + {"shape":"AccessDeniedException"}, + {"shape":"NoSuchEntityException"} + ] + }, + "DescribeApplication":{ + "name":"DescribeApplication", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeApplicationRequest"}, + "output":{"shape":"DescribeApplicationResponse"}, + "errors":[ + {"shape":"InternalException"}, + {"shape":"InvalidInputException"}, + {"shape":"AccessDeniedException"}, + {"shape":"NoSuchEntityException"} + ] + }, + "DescribeAssessment":{ + "name":"DescribeAssessment", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeAssessmentRequest"}, + "output":{"shape":"DescribeAssessmentResponse"}, + "errors":[ + {"shape":"InternalException"}, + {"shape":"InvalidInputException"}, + {"shape":"AccessDeniedException"}, + {"shape":"NoSuchEntityException"} + ] + }, + "DescribeCrossAccountAccessRole":{ + "name":"DescribeCrossAccountAccessRole", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "output":{"shape":"DescribeCrossAccountAccessRoleResponse"}, + "errors":[ + {"shape":"InternalException"}, + {"shape":"AccessDeniedException"} + ] + }, + "DescribeFinding":{ + "name":"DescribeFinding", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeFindingRequest"}, + "output":{"shape":"DescribeFindingResponse"}, + "errors":[ + {"shape":"InternalException"}, + {"shape":"InvalidInputException"}, + {"shape":"AccessDeniedException"}, + {"shape":"NoSuchEntityException"} + ] + }, + "DescribeResourceGroup":{ + "name":"DescribeResourceGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeResourceGroupRequest"}, + "output":{"shape":"DescribeResourceGroupResponse"}, + "errors":[ + {"shape":"InternalException"}, + {"shape":"InvalidInputException"}, + {"shape":"AccessDeniedException"}, + {"shape":"NoSuchEntityException"} + ] + }, + "DescribeRulesPackage":{ + "name":"DescribeRulesPackage", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeRulesPackageRequest"}, + "output":{"shape":"DescribeRulesPackageResponse"}, + "errors":[ + {"shape":"InternalException"}, + {"shape":"InvalidInputException"}, + {"shape":"AccessDeniedException"}, + {"shape":"NoSuchEntityException"} + ] + }, + "DescribeRun":{ + "name":"DescribeRun", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeRunRequest"}, + "output":{"shape":"DescribeRunResponse"}, + "errors":[ + {"shape":"InternalException"}, + {"shape":"InvalidInputException"}, + {"shape":"AccessDeniedException"}, + {"shape":"NoSuchEntityException"} + ] + }, + "DetachAssessmentAndRulesPackage":{ + "name":"DetachAssessmentAndRulesPackage", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DetachAssessmentAndRulesPackageRequest"}, + "output":{"shape":"DetachAssessmentAndRulesPackageResponse"}, + "errors":[ + {"shape":"InternalException"}, + {"shape":"InvalidInputException"}, + {"shape":"AccessDeniedException"}, + {"shape":"NoSuchEntityException"} + ] + }, + "GetAssessmentTelemetry":{ + "name":"GetAssessmentTelemetry", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetAssessmentTelemetryRequest"}, + "output":{"shape":"GetAssessmentTelemetryResponse"}, + "errors":[ + {"shape":"InternalException"}, + {"shape":"InvalidInputException"}, + {"shape":"AccessDeniedException"}, + {"shape":"NoSuchEntityException"} + ] + }, + "ListApplications":{ + "name":"ListApplications", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListApplicationsRequest"}, + "output":{"shape":"ListApplicationsResponse"}, + "errors":[ + {"shape":"InternalException"}, + {"shape":"InvalidInputException"}, + {"shape":"AccessDeniedException"} + ] + }, + "ListAssessmentAgents":{ + "name":"ListAssessmentAgents", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListAssessmentAgentsRequest"}, + "output":{"shape":"ListAssessmentAgentsResponse"}, + "errors":[ + {"shape":"InternalException"}, + {"shape":"InvalidInputException"}, + {"shape":"AccessDeniedException"}, + {"shape":"NoSuchEntityException"} + ] + }, + "ListAssessments":{ + "name":"ListAssessments", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListAssessmentsRequest"}, + "output":{"shape":"ListAssessmentsResponse"}, + "errors":[ + {"shape":"InternalException"}, + {"shape":"InvalidInputException"}, + {"shape":"AccessDeniedException"}, + {"shape":"NoSuchEntityException"} + ] + }, + "ListAttachedAssessments":{ + "name":"ListAttachedAssessments", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListAttachedAssessmentsRequest"}, + "output":{"shape":"ListAttachedAssessmentsResponse"}, + "errors":[ + {"shape":"InternalException"}, + {"shape":"InvalidInputException"}, + {"shape":"AccessDeniedException"}, + {"shape":"NoSuchEntityException"} + ] + }, + "ListAttachedRulesPackages":{ + "name":"ListAttachedRulesPackages", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListAttachedRulesPackagesRequest"}, + "output":{"shape":"ListAttachedRulesPackagesResponse"}, + "errors":[ + {"shape":"InternalException"}, + {"shape":"InvalidInputException"}, + {"shape":"AccessDeniedException"}, + {"shape":"NoSuchEntityException"} + ] + }, + "ListFindings":{ + "name":"ListFindings", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListFindingsRequest"}, + "output":{"shape":"ListFindingsResponse"}, + "errors":[ + {"shape":"InternalException"}, + {"shape":"InvalidInputException"}, + {"shape":"AccessDeniedException"}, + {"shape":"NoSuchEntityException"} + ] + }, + "ListRulesPackages":{ + "name":"ListRulesPackages", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListRulesPackagesRequest"}, + "output":{"shape":"ListRulesPackagesResponse"}, + "errors":[ + {"shape":"InternalException"}, + {"shape":"InvalidInputException"}, + {"shape":"AccessDeniedException"} + ] + }, + "ListRuns":{ + "name":"ListRuns", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListRunsRequest"}, + "output":{"shape":"ListRunsResponse"}, + "errors":[ + {"shape":"InternalException"}, + {"shape":"InvalidInputException"}, + {"shape":"AccessDeniedException"}, + {"shape":"NoSuchEntityException"} + ] + }, + "ListTagsForResource":{ + "name":"ListTagsForResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListTagsForResourceRequest"}, + "output":{"shape":"ListTagsForResourceResponse"}, + "errors":[ + {"shape":"InternalException"}, + {"shape":"InvalidInputException"}, + {"shape":"AccessDeniedException"}, + {"shape":"NoSuchEntityException"} + ] + }, + "LocalizeText":{ + "name":"LocalizeText", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"LocalizeTextRequest"}, + "output":{"shape":"LocalizeTextResponse"}, + "errors":[ + {"shape":"InternalException"}, + {"shape":"InvalidInputException"}, + {"shape":"AccessDeniedException"}, + {"shape":"NoSuchEntityException"} + ] + }, + "PreviewAgentsForResourceGroup":{ + "name":"PreviewAgentsForResourceGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PreviewAgentsForResourceGroupRequest"}, + "output":{"shape":"PreviewAgentsForResourceGroupResponse"}, + "errors":[ + {"shape":"InternalException"}, + {"shape":"InvalidInputException"}, + {"shape":"AccessDeniedException"}, + {"shape":"NoSuchEntityException"}, + {"shape":"InvalidCrossAccountRoleException"} + ] + }, + "RegisterCrossAccountAccessRole":{ + "name":"RegisterCrossAccountAccessRole", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RegisterCrossAccountAccessRoleRequest"}, + "output":{"shape":"RegisterCrossAccountAccessRoleResponse"}, + "errors":[ + {"shape":"InternalException"}, + {"shape":"InvalidInputException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InvalidCrossAccountRoleException"} + ] + }, + "RemoveAttributesFromFindings":{ + "name":"RemoveAttributesFromFindings", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RemoveAttributesFromFindingsRequest"}, + "output":{"shape":"RemoveAttributesFromFindingsResponse"}, + "errors":[ + {"shape":"InternalException"}, + {"shape":"InvalidInputException"}, + {"shape":"AccessDeniedException"}, + {"shape":"NoSuchEntityException"} + ] + }, + "RunAssessment":{ + "name":"RunAssessment", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RunAssessmentRequest"}, + "output":{"shape":"RunAssessmentResponse"}, + "errors":[ + {"shape":"InternalException"}, + {"shape":"InvalidInputException"}, + {"shape":"AccessDeniedException"}, + {"shape":"NoSuchEntityException"} + ] + }, + "SetTagsForResource":{ + "name":"SetTagsForResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"SetTagsForResourceRequest"}, + "output":{"shape":"SetTagsForResourceResponse"}, + "errors":[ + {"shape":"InternalException"}, + {"shape":"InvalidInputException"}, + {"shape":"AccessDeniedException"}, + {"shape":"NoSuchEntityException"} + ] + }, + "StartDataCollection":{ + "name":"StartDataCollection", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StartDataCollectionRequest"}, + "output":{"shape":"StartDataCollectionResponse"}, + "errors":[ + {"shape":"InternalException"}, + {"shape":"InvalidInputException"}, + {"shape":"AccessDeniedException"}, + {"shape":"NoSuchEntityException"}, + {"shape":"InvalidCrossAccountRoleException"} + ] + }, + "StopDataCollection":{ + "name":"StopDataCollection", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StopDataCollectionRequest"}, + "output":{"shape":"StopDataCollectionResponse"}, + "errors":[ + {"shape":"InternalException"}, + {"shape":"InvalidInputException"}, + {"shape":"AccessDeniedException"}, + {"shape":"NoSuchEntityException"} + ] + }, + "UpdateApplication":{ + "name":"UpdateApplication", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateApplicationRequest"}, + "output":{"shape":"UpdateApplicationResponse"}, + "errors":[ + {"shape":"InternalException"}, + {"shape":"InvalidInputException"}, + {"shape":"AccessDeniedException"}, + {"shape":"NoSuchEntityException"} + ] + }, + "UpdateAssessment":{ + "name":"UpdateAssessment", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateAssessmentRequest"}, + "output":{"shape":"UpdateAssessmentResponse"}, + "errors":[ + {"shape":"InternalException"}, + {"shape":"InvalidInputException"}, + {"shape":"AccessDeniedException"}, + {"shape":"NoSuchEntityException"} + ] + } + }, + "shapes":{ + "AccessDeniedException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "AddAttributesToFindingsRequest":{ + "type":"structure", + "required":[ + "findingArns", + "attributes" + ], + "members":{ + "findingArns":{"shape":"ArnList"}, + "attributes":{"shape":"AttributeList"} + } + }, + "AddAttributesToFindingsResponse":{ + "type":"structure", + "members":{ + "message":{"shape":"Message"} + } + }, + "Agent":{ + "type":"structure", + "members":{ + "agentId":{"shape":"AgentId"}, + "assessmentArn":{"shape":"Arn"}, + "agentHealth":{"shape":"AgentHealth"}, + "agentHealthCode":{"shape":"AgentHealthCode"}, + "agentHealthDetails":{"shape":"AgentHealthDetails"}, + "autoScalingGroup":{"shape":"AutoScalingGroup"}, + "accountId":{"shape":"AwsAccount"}, + "telemetry":{"shape":"TelemetryList"} + } + }, + "AgentHealth":{"type":"string"}, + "AgentHealthCode":{"type":"string"}, + "AgentHealthDetails":{"type":"string"}, + "AgentHealthList":{ + "type":"list", + "member":{"shape":"AgentHealth"} + }, + "AgentId":{"type":"string"}, + "AgentList":{ + "type":"list", + "member":{"shape":"Agent"} + }, + "AgentPreview":{ + "type":"structure", + "members":{ + "agentId":{"shape":"AgentId"}, + "autoScalingGroup":{"shape":"AutoScalingGroup"} + } + }, + "AgentPreviewList":{ + "type":"list", + "member":{"shape":"AgentPreview"} + }, + "AgentsFilter":{ + "type":"structure", + "members":{ + "agentHealthList":{"shape":"AgentHealthList"} + } + }, + "Application":{ + "type":"structure", + "members":{ + "applicationArn":{"shape":"Arn"}, + "applicationName":{"shape":"Name"}, + "resourceGroupArn":{"shape":"Arn"} + } + }, + "ApplicationsFilter":{ + "type":"structure", + "members":{ + "applicationNamePatterns":{"shape":"NamePatternList"} + } + }, + "Arn":{"type":"string"}, + "ArnList":{ + "type":"list", + "member":{"shape":"Arn"} + }, + "Assessment":{ + "type":"structure", + "members":{ + "assessmentArn":{"shape":"Arn"}, + "assessmentName":{"shape":"Name"}, + "applicationArn":{"shape":"Arn"}, + "assessmentState":{"shape":"AssessmentState"}, + "failureMessage":{"shape":"FailureMessage"}, + "dataCollected":{"shape":"Bool"}, + "startTime":{"shape":"Timestamp"}, + "endTime":{"shape":"Timestamp"}, + "durationInSeconds":{"shape":"Duration"}, + "userAttributesForFindings":{"shape":"AttributeList"} + } + }, + "AssessmentState":{"type":"string"}, + "AssessmentStateList":{ + "type":"list", + "member":{"shape":"AssessmentState"} + }, + "AssessmentsFilter":{ + "type":"structure", + "members":{ + "assessmentNamePatterns":{"shape":"NamePatternList"}, + "assessmentStates":{"shape":"AssessmentStateList"}, + "dataCollected":{"shape":"Bool"}, + "startTimeRange":{"shape":"TimestampRange"}, + "endTimeRange":{"shape":"TimestampRange"}, + "durationRange":{"shape":"DurationRange"} + } + }, + "AttachAssessmentAndRulesPackageRequest":{ + "type":"structure", + "required":[ + "assessmentArn", + "rulesPackageArn" + ], + "members":{ + "assessmentArn":{"shape":"Arn"}, + "rulesPackageArn":{"shape":"Arn"} + } + }, + "AttachAssessmentAndRulesPackageResponse":{ + "type":"structure", + "members":{ + "message":{"shape":"Message"} + } + }, + "Attribute":{ + "type":"structure", + "members":{ + "key":{"shape":"AttributeKey"}, + "value":{"shape":"AttributeValue"} + } + }, + "AttributeKey":{"type":"string"}, + "AttributeKeyList":{ + "type":"list", + "member":{"shape":"AttributeKey"} + }, + "AttributeList":{ + "type":"list", + "member":{"shape":"Attribute"} + }, + "AttributeValue":{"type":"string"}, + "AutoScalingGroup":{"type":"string"}, + "AwsAccount":{"type":"string"}, + "Bool":{"type":"boolean"}, + "CreateApplicationRequest":{ + "type":"structure", + "required":[ + "applicationName", + "resourceGroupArn" + ], + "members":{ + "applicationName":{"shape":"Name"}, + "resourceGroupArn":{"shape":"Arn"} + } + }, + "CreateApplicationResponse":{ + "type":"structure", + "members":{ + "applicationArn":{"shape":"Arn"} + } + }, + "CreateAssessmentRequest":{ + "type":"structure", + "required":[ + "applicationArn", + "assessmentName", + "durationInSeconds" + ], + "members":{ + "applicationArn":{"shape":"Arn"}, + "assessmentName":{"shape":"Name"}, + "durationInSeconds":{"shape":"Duration"}, + "userAttributesForFindings":{"shape":"AttributeList"} + } + }, + "CreateAssessmentResponse":{ + "type":"structure", + "members":{ + "assessmentArn":{"shape":"Arn"} + } + }, + "CreateResourceGroupRequest":{ + "type":"structure", + "required":["resourceGroupTags"], + "members":{ + "resourceGroupTags":{"shape":"ResourceGroupTags"} + } + }, + "CreateResourceGroupResponse":{ + "type":"structure", + "members":{ + "resourceGroupArn":{"shape":"Arn"} + } + }, + "DeleteApplicationRequest":{ + "type":"structure", + "required":["applicationArn"], + "members":{ + "applicationArn":{"shape":"Arn"} + } + }, + "DeleteApplicationResponse":{ + "type":"structure", + "members":{ + "message":{"shape":"Message"} + } + }, + "DeleteAssessmentRequest":{ + "type":"structure", + "required":["assessmentArn"], + "members":{ + "assessmentArn":{"shape":"Arn"} + } + }, + "DeleteAssessmentResponse":{ + "type":"structure", + "members":{ + "message":{"shape":"Message"} + } + }, + "DeleteRunRequest":{ + "type":"structure", + "required":["runArn"], + "members":{ + "runArn":{"shape":"Arn"} + } + }, + "DeleteRunResponse":{ + "type":"structure", + "members":{ + "message":{"shape":"Message"} + } + }, + "DescribeApplicationRequest":{ + "type":"structure", + "required":["applicationArn"], + "members":{ + "applicationArn":{"shape":"Arn"} + } + }, + "DescribeApplicationResponse":{ + "type":"structure", + "members":{ + "application":{"shape":"Application"} + } + }, + "DescribeAssessmentRequest":{ + "type":"structure", + "required":["assessmentArn"], + "members":{ + "assessmentArn":{"shape":"Arn"} + } + }, + "DescribeAssessmentResponse":{ + "type":"structure", + "members":{ + "assessment":{"shape":"Assessment"} + } + }, + "DescribeCrossAccountAccessRoleResponse":{ + "type":"structure", + "members":{ + "roleArn":{"shape":"Arn"}, + "valid":{"shape":"Bool"} + } + }, + "DescribeFindingRequest":{ + "type":"structure", + "required":["findingArn"], + "members":{ + "findingArn":{"shape":"Arn"} + } + }, + "DescribeFindingResponse":{ + "type":"structure", + "members":{ + "finding":{"shape":"Finding"} + } + }, + "DescribeResourceGroupRequest":{ + "type":"structure", + "required":["resourceGroupArn"], + "members":{ + "resourceGroupArn":{"shape":"Arn"} + } + }, + "DescribeResourceGroupResponse":{ + "type":"structure", + "members":{ + "resourceGroup":{"shape":"ResourceGroup"} + } + }, + "DescribeRulesPackageRequest":{ + "type":"structure", + "required":["rulesPackageArn"], + "members":{ + "rulesPackageArn":{"shape":"Arn"} + } + }, + "DescribeRulesPackageResponse":{ + "type":"structure", + "members":{ + "rulesPackage":{"shape":"RulesPackage"} + } + }, + "DescribeRunRequest":{ + "type":"structure", + "required":["runArn"], + "members":{ + "runArn":{"shape":"Arn"} + } + }, + "DescribeRunResponse":{ + "type":"structure", + "members":{ + "run":{"shape":"Run"} + } + }, + "DetachAssessmentAndRulesPackageRequest":{ + "type":"structure", + "required":[ + "assessmentArn", + "rulesPackageArn" + ], + "members":{ + "assessmentArn":{"shape":"Arn"}, + "rulesPackageArn":{"shape":"Arn"} + } + }, + "DetachAssessmentAndRulesPackageResponse":{ + "type":"structure", + "members":{ + "message":{"shape":"Message"} + } + }, + "Duration":{"type":"integer"}, + "DurationRange":{ + "type":"structure", + "members":{ + "minimum":{"shape":"Duration"}, + "maximum":{"shape":"Duration"} + } + }, + "FailureMessage":{"type":"string"}, + "Finding":{ + "type":"structure", + "members":{ + "findingArn":{"shape":"Arn"}, + "runArn":{"shape":"Arn"}, + "rulesPackageArn":{"shape":"Arn"}, + "ruleName":{"shape":"Name"}, + "agentId":{"shape":"AgentId"}, + "autoScalingGroup":{"shape":"AutoScalingGroup"}, + "severity":{"shape":"Severity"}, + "finding":{"shape":"LocalizedText"}, + "description":{"shape":"LocalizedText"}, + "recommendation":{"shape":"LocalizedText"}, + "attributes":{"shape":"AttributeList"}, + "userAttributes":{"shape":"AttributeList"} + } + }, + "FindingsFilter":{ + "type":"structure", + "members":{ + "rulesPackageArns":{"shape":"ArnList"}, + "ruleNames":{"shape":"NameList"}, + "severities":{"shape":"SeverityList"}, + "attributes":{"shape":"AttributeList"}, + "userAttributes":{"shape":"AttributeList"} + } + }, + "GetAssessmentTelemetryRequest":{ + "type":"structure", + "required":["assessmentArn"], + "members":{ + "assessmentArn":{"shape":"Arn"} + } + }, + "GetAssessmentTelemetryResponse":{ + "type":"structure", + "members":{ + "telemetry":{"shape":"TelemetryList"} + } + }, + "Integer":{"type":"integer"}, + "InternalException":{ + "type":"structure", + "members":{ + }, + "exception":true, + "fault":true + }, + "InvalidCrossAccountRoleException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InvalidInputException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "ListApplicationsRequest":{ + "type":"structure", + "members":{ + "filter":{"shape":"ApplicationsFilter"}, + "nextToken":{"shape":"PaginationToken"}, + "maxResults":{"shape":"Integer"} + } + }, + "ListApplicationsResponse":{ + "type":"structure", + "members":{ + "applicationArnList":{"shape":"ArnList"}, + "nextToken":{"shape":"PaginationToken"} + } + }, + "ListAssessmentAgentsRequest":{ + "type":"structure", + "required":["assessmentArn"], + "members":{ + "assessmentArn":{"shape":"Arn"}, + "filter":{"shape":"AgentsFilter"}, + "nextToken":{"shape":"PaginationToken"}, + "maxResults":{"shape":"Integer"} + } + }, + "ListAssessmentAgentsResponse":{ + "type":"structure", + "members":{ + "agentList":{"shape":"AgentList"}, + "nextToken":{"shape":"PaginationToken"} + } + }, + "ListAssessmentsRequest":{ + "type":"structure", + "members":{ + "applicationArns":{"shape":"ArnList"}, + "filter":{"shape":"AssessmentsFilter"}, + "nextToken":{"shape":"PaginationToken"}, + "maxResults":{"shape":"Integer"} + } + }, + "ListAssessmentsResponse":{ + "type":"structure", + "members":{ + "assessmentArnList":{"shape":"ArnList"}, + "nextToken":{"shape":"PaginationToken"} + } + }, + "ListAttachedAssessmentsRequest":{ + "type":"structure", + "required":["rulesPackageArn"], + "members":{ + "rulesPackageArn":{"shape":"Arn"}, + "filter":{"shape":"AssessmentsFilter"}, + "nextToken":{"shape":"PaginationToken"}, + "maxResults":{"shape":"Integer"} + } + }, + "ListAttachedAssessmentsResponse":{ + "type":"structure", + "members":{ + "assessmentArnList":{"shape":"ArnList"}, + "nextToken":{"shape":"PaginationToken"} + } + }, + "ListAttachedRulesPackagesRequest":{ + "type":"structure", + "required":["assessmentArn"], + "members":{ + "assessmentArn":{"shape":"Arn"}, + "nextToken":{"shape":"PaginationToken"}, + "maxResults":{"shape":"Integer"} + } + }, + "ListAttachedRulesPackagesResponse":{ + "type":"structure", + "members":{ + "rulesPackageArnList":{"shape":"ArnList"}, + "nextToken":{"shape":"PaginationToken"} + } + }, + "ListFindingsRequest":{ + "type":"structure", + "members":{ + "runArns":{"shape":"ArnList"}, + "filter":{"shape":"FindingsFilter"}, + "nextToken":{"shape":"PaginationToken"}, + "maxResults":{"shape":"Integer"} + } + }, + "ListFindingsResponse":{ + "type":"structure", + "members":{ + "findingArnList":{"shape":"ArnList"}, + "nextToken":{"shape":"PaginationToken"} + } + }, + "ListRulesPackagesRequest":{ + "type":"structure", + "members":{ + "nextToken":{"shape":"PaginationToken"}, + "maxResults":{"shape":"Integer"} + } + }, + "ListRulesPackagesResponse":{ + "type":"structure", + "members":{ + "rulesPackageArnList":{"shape":"ArnList"}, + "nextToken":{"shape":"PaginationToken"} + } + }, + "ListRunsRequest":{ + "type":"structure", + "members":{ + "assessmentArns":{"shape":"ArnList"}, + "filter":{"shape":"RunsFilter"}, + "nextToken":{"shape":"PaginationToken"}, + "maxResults":{"shape":"Integer"} + } + }, + "ListRunsResponse":{ + "type":"structure", + "members":{ + "runArnList":{"shape":"ArnList"}, + "nextToken":{"shape":"PaginationToken"} + } + }, + "ListTagsForResourceRequest":{ + "type":"structure", + "required":["resourceArn"], + "members":{ + "resourceArn":{"shape":"Arn"} + } + }, + "ListTagsForResourceResponse":{ + "type":"structure", + "members":{ + "tagList":{"shape":"TagList"} + } + }, + "Locale":{"type":"string"}, + "LocalizeTextRequest":{ + "type":"structure", + "required":[ + "localizedTexts", + "locale" + ], + "members":{ + "localizedTexts":{"shape":"LocalizedTextList"}, + "locale":{"shape":"Locale"} + } + }, + "LocalizeTextResponse":{ + "type":"structure", + "members":{ + "message":{"shape":"Message"}, + "results":{"shape":"TextList"} + } + }, + "LocalizedFacility":{"type":"string"}, + "LocalizedText":{ + "type":"structure", + "members":{ + "key":{"shape":"LocalizedTextKey"}, + "parameters":{"shape":"ParameterList"} + } + }, + "LocalizedTextId":{"type":"string"}, + "LocalizedTextKey":{ + "type":"structure", + "members":{ + "facility":{"shape":"LocalizedFacility"}, + "id":{"shape":"LocalizedTextId"} + } + }, + "LocalizedTextList":{ + "type":"list", + "member":{"shape":"LocalizedText"} + }, + "Long":{"type":"long"}, + "Message":{"type":"string"}, + "MessageType":{"type":"string"}, + "MessageTypeTelemetry":{ + "type":"structure", + "members":{ + "messageType":{"shape":"MessageType"}, + "count":{"shape":"Long"}, + "dataSize":{"shape":"Long"} + } + }, + "MessageTypeTelemetryList":{ + "type":"list", + "member":{"shape":"MessageTypeTelemetry"} + }, + "Name":{"type":"string"}, + "NameList":{ + "type":"list", + "member":{"shape":"Name"} + }, + "NamePattern":{"type":"string"}, + "NamePatternList":{ + "type":"list", + "member":{"shape":"NamePattern"} + }, + "NoSuchEntityException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "OperationInProgressException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "PaginationToken":{"type":"string"}, + "Parameter":{ + "type":"structure", + "members":{ + "name":{"shape":"ParameterName"}, + "value":{"shape":"ParameterValue"} + } + }, + "ParameterList":{ + "type":"list", + "member":{"shape":"Parameter"} + }, + "ParameterName":{"type":"string"}, + "ParameterValue":{"type":"string"}, + "PreviewAgentsForResourceGroupRequest":{ + "type":"structure", + "required":["resourceGroupArn"], + "members":{ + "resourceGroupArn":{"shape":"Arn"}, + "nextToken":{"shape":"PaginationToken"}, + "maxResults":{"shape":"Integer"} + } + }, + "PreviewAgentsForResourceGroupResponse":{ + "type":"structure", + "members":{ + "agentPreviewList":{"shape":"AgentPreviewList"}, + "nextToken":{"shape":"PaginationToken"} + } + }, + "RegisterCrossAccountAccessRoleRequest":{ + "type":"structure", + "required":["roleArn"], + "members":{ + "roleArn":{"shape":"Arn"} + } + }, + "RegisterCrossAccountAccessRoleResponse":{ + "type":"structure", + "members":{ + "message":{"shape":"Message"} + } + }, + "RemoveAttributesFromFindingsRequest":{ + "type":"structure", + "required":[ + "findingArns", + "attributeKeys" + ], + "members":{ + "findingArns":{"shape":"ArnList"}, + "attributeKeys":{"shape":"AttributeKeyList"} + } + }, + "RemoveAttributesFromFindingsResponse":{ + "type":"structure", + "members":{ + "message":{"shape":"Message"} + } + }, + "ResourceGroup":{ + "type":"structure", + "members":{ + "resourceGroupArn":{"shape":"Arn"}, + "resourceGroupTags":{"shape":"ResourceGroupTags"} + } + }, + "ResourceGroupTags":{"type":"string"}, + "RulesPackage":{ + "type":"structure", + "members":{ + "rulesPackageArn":{"shape":"Arn"}, + "rulesPackageName":{"shape":"Name"}, + "version":{"shape":"Version"}, + "provider":{"shape":"Name"}, + "description":{"shape":"LocalizedText"} + } + }, + "Run":{ + "type":"structure", + "members":{ + "runArn":{"shape":"Arn"}, + "runName":{"shape":"Name"}, + "assessmentArn":{"shape":"Arn"}, + "runState":{"shape":"RunState"}, + "rulesPackages":{"shape":"ArnList"}, + "creationTime":{"shape":"Timestamp"}, + "completionTime":{"shape":"Timestamp"} + } + }, + "RunAssessmentRequest":{ + "type":"structure", + "required":[ + "assessmentArn", + "runName" + ], + "members":{ + "assessmentArn":{"shape":"Arn"}, + "runName":{"shape":"Name"} + } + }, + "RunAssessmentResponse":{ + "type":"structure", + "members":{ + "runArn":{"shape":"Arn"} + } + }, + "RunState":{"type":"string"}, + "RunStateList":{ + "type":"list", + "member":{"shape":"RunState"} + }, + "RunsFilter":{ + "type":"structure", + "members":{ + "runNamePatterns":{"shape":"NamePatternList"}, + "runStates":{"shape":"RunStateList"}, + "rulesPackages":{"shape":"ArnList"}, + "creationTime":{"shape":"TimestampRange"}, + "completionTime":{"shape":"TimestampRange"} + } + }, + "SetTagsForResourceRequest":{ + "type":"structure", + "required":["resourceArn"], + "members":{ + "resourceArn":{"shape":"Arn"}, + "tags":{"shape":"TagList"} + } + }, + "SetTagsForResourceResponse":{ + "type":"structure", + "members":{ + "message":{"shape":"Message"} + } + }, + "Severity":{"type":"string"}, + "SeverityList":{ + "type":"list", + "member":{"shape":"Severity"} + }, + "StartDataCollectionRequest":{ + "type":"structure", + "required":["assessmentArn"], + "members":{ + "assessmentArn":{"shape":"Arn"} + } + }, + "StartDataCollectionResponse":{ + "type":"structure", + "members":{ + "message":{"shape":"Message"} + } + }, + "StopDataCollectionRequest":{ + "type":"structure", + "required":["assessmentArn"], + "members":{ + "assessmentArn":{"shape":"Arn"} + } + }, + "StopDataCollectionResponse":{ + "type":"structure", + "members":{ + "message":{"shape":"Message"} + } + }, + "Tag":{ + "type":"structure", + "members":{ + "Key":{"shape":"TagKey"}, + "Value":{"shape":"TagValue"} + } + }, + "TagKey":{"type":"string"}, + "TagList":{ + "type":"list", + "member":{"shape":"Tag"} + }, + "TagValue":{"type":"string"}, + "Telemetry":{ + "type":"structure", + "members":{ + "status":{"shape":"TelemetryStatus"}, + "messageTypeTelemetries":{"shape":"MessageTypeTelemetryList"} + } + }, + "TelemetryList":{ + "type":"list", + "member":{"shape":"Telemetry"} + }, + "TelemetryStatus":{"type":"string"}, + "Text":{"type":"string"}, + "TextList":{ + "type":"list", + "member":{"shape":"Text"} + }, + "Timestamp":{"type":"timestamp"}, + "TimestampRange":{ + "type":"structure", + "members":{ + "minimum":{"shape":"Timestamp"}, + "maximum":{"shape":"Timestamp"} + } + }, + "UpdateApplicationRequest":{ + "type":"structure", + "required":[ + "applicationArn", + "applicationName", + "resourceGroupArn" + ], + "members":{ + "applicationArn":{"shape":"Arn"}, + "applicationName":{"shape":"Name"}, + "resourceGroupArn":{"shape":"Arn"} + } + }, + "UpdateApplicationResponse":{ + "type":"structure", + "members":{ + "message":{"shape":"Message"} + } + }, + "UpdateAssessmentRequest":{ + "type":"structure", + "required":[ + "assessmentArn", + "assessmentName", + "durationInSeconds" + ], + "members":{ + "assessmentArn":{"shape":"Arn"}, + "assessmentName":{"shape":"Name"}, + "durationInSeconds":{"shape":"Duration"} + } + }, + "UpdateAssessmentResponse":{ + "type":"structure", + "members":{ + "message":{"shape":"Message"} + } + }, + "Version":{"type":"string"} + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/inspector/2015-08-18/docs-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/inspector/2015-08-18/docs-2.json new file mode 100644 index 000000000..a7ef77a7e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/inspector/2015-08-18/docs-2.json @@ -0,0 +1,1016 @@ +{ + "version": "2.0", + "service": "Amazon Inspector

    Amazon Inspector enables you to analyze the behavior of the applications you run in AWS and to identify potential security issues. For more information, see Amazon Inspector User Guide.

    ", + "operations": { + "AddAttributesToFindings": "

    Assigns attributes (key and value pair) to the findings specified by the findings' ARNs.

    ", + "AttachAssessmentAndRulesPackage": "

    Attaches the rules package specified by the rules package ARN to the assessment specified by the assessment ARN.

    ", + "CreateApplication": "

    Creates a new application using the resource group ARN generated by CreateResourceGroup. You can create up to 50 applications per AWS account. You can run up to 500 concurrent agents per AWS account. For more information, see Inspector Applications.

    ", + "CreateAssessment": "

    Creates an assessment for the application specified by the application ARN. You can create up to 500 assessments per AWS account.

    ", + "CreateResourceGroup": "

    Creates a resource group using the specified set of tags (key and value pairs) that are used to select the EC2 instances to be included in an Inspector application. The created resource group is then used to create an Inspector application.

    ", + "DeleteApplication": "

    Deletes the application specified by the application ARN.

    ", + "DeleteAssessment": "

    Deletes the assessment specified by the assessment ARN.

    ", + "DeleteRun": "

    Deletes the assessment run specified by the run ARN.

    ", + "DescribeApplication": "

    Describes the application specified by the application ARN.

    ", + "DescribeAssessment": "

    Describes the assessment specified by the assessment ARN.

    ", + "DescribeCrossAccountAccessRole": "

    Describes the IAM role that enables Inspector to access your AWS account.

    ", + "DescribeFinding": "

    Describes the finding specified by the finding ARN.

    ", + "DescribeResourceGroup": "

    Describes the resource group specified by the resource group ARN.

    ", + "DescribeRulesPackage": "

    Describes the rules package specified by the rules package ARN.

    ", + "DescribeRun": "

    Describes the assessment run specified by the run ARN.

    ", + "DetachAssessmentAndRulesPackage": "

    Detaches the rules package specified by the rules package ARN from the assessment specified by the assessment ARN.

    ", + "GetAssessmentTelemetry": "

    Returns the metadata about the telemetry (application behavioral data) for the assessment specified by the assessment ARN.

    ", + "ListApplications": "

    Lists the ARNs of the applications within this AWS account. For more information about applications, see Inspector Applications.

    ", + "ListAssessmentAgents": "

    Lists the agents of the assessment specified by the assessment ARN.

    ", + "ListAssessments": "

    Lists the assessments corresponding to applications specified by the applications' ARNs.

    ", + "ListAttachedAssessments": "

    Lists the assessments attached to the rules package specified by the rules package ARN.

    ", + "ListAttachedRulesPackages": "

    Lists the rules packages attached to the assessment specified by the assessment ARN.

    ", + "ListFindings": "

    Lists findings generated by the assessment run specified by the run ARNs.

    ", + "ListRulesPackages": "

    Lists all available Inspector rules packages.

    ", + "ListRuns": "

    Lists the assessment runs associated with the assessments specified by the assessment ARNs.

    ", + "ListTagsForResource": "

    Lists all tags associated with a resource.

    ", + "LocalizeText": "

    Translates a textual identifier into a user-readable text in a specified locale.

    ", + "PreviewAgentsForResourceGroup": "

    Previews the agents installed on the EC2 instances that are included in the application created with the specified resource group.

    ", + "RegisterCrossAccountAccessRole": "

    Register the role that Inspector uses to list your EC2 instances during the assessment.

    ", + "RemoveAttributesFromFindings": "

    Removes the entire attribute (key and value pair) from the findings specified by the finding ARNs where an attribute with the specified key exists.

    ", + "RunAssessment": "

    Starts the analysis of the application’s behavior against selected rule packages for the assessment specified by the assessment ARN.

    ", + "SetTagsForResource": "

    Sets tags (key and value pairs) to the assessment specified by the assessment ARN.

    ", + "StartDataCollection": "

    Starts data collection for the assessment specified by the assessment ARN. For this API to function properly, you must not exceed the limit of running up to 500 concurrent agents per AWS account.

    ", + "StopDataCollection": "

    Stop data collection for the assessment specified by the assessment ARN.

    ", + "UpdateApplication": "

    Updates application specified by the application ARN.

    ", + "UpdateAssessment": "

    Updates the assessment specified by the assessment ARN.

    " + }, + "shapes": { + "AccessDeniedException": { + "base": null, + "refs": { + } + }, + "AddAttributesToFindingsRequest": { + "base": null, + "refs": { + } + }, + "AddAttributesToFindingsResponse": { + "base": null, + "refs": { + } + }, + "Agent": { + "base": "

    Contains information about an Inspector agent. This data type is used as a response element in the ListAssessmentAgents action.

    ", + "refs": { + "AgentList$member": null + } + }, + "AgentHealth": { + "base": null, + "refs": { + "Agent$agentHealth": "

    The current health state of the agent. Values can be set to HEALTHY or UNHEALTHY.

    ", + "AgentHealthList$member": null + } + }, + "AgentHealthCode": { + "base": null, + "refs": { + "Agent$agentHealthCode": "

    The detailed health state of the agent. Values can be set to RUNNING, HEALTHY, UNHEALTHY, UNKNOWN, BLACKLISTED, SHUTDOWN, THROTTLED.

    " + } + }, + "AgentHealthDetails": { + "base": null, + "refs": { + "Agent$agentHealthDetails": "

    The description for the agent health code.

    " + } + }, + "AgentHealthList": { + "base": null, + "refs": { + "AgentsFilter$agentHealthList": "

    For a record to match a filter, the value specified for this data type property must be the exact match of the value of the agentHealth property of the Agent data type.

    " + } + }, + "AgentId": { + "base": null, + "refs": { + "Agent$agentId": "

    The EC2 instance ID where the agent is installed.

    ", + "AgentPreview$agentId": "

    The id of the EC2 instance where the agent is intalled.

    ", + "Finding$agentId": "

    The EC2 instance ID where the agent is installed that is used during the assessment that generates the finding.

    " + } + }, + "AgentList": { + "base": null, + "refs": { + "ListAssessmentAgentsResponse$agentList": "

    A list of ARNs specifying the agents returned by the action.

    " + } + }, + "AgentPreview": { + "base": "

    This data type is used as a response element in the PreviewAgentsForResourceGroup action.

    ", + "refs": { + "AgentPreviewList$member": null + } + }, + "AgentPreviewList": { + "base": null, + "refs": { + "PreviewAgentsForResourceGroupResponse$agentPreviewList": "

    The resulting list of agents.

    " + } + }, + "AgentsFilter": { + "base": "

    This data type is used as a response element in the ListAssessmentAgents action.

    ", + "refs": { + "ListAssessmentAgentsRequest$filter": "

    You can use this parameter to specify a subset of data to be included in the action's response.

    For a record to match a filter, all specified filter attributes must match. When multiple values are specified for a filter attribute, any of the values can match.

    " + } + }, + "Application": { + "base": "

    Contains information about an Inspector application.

    This data type is used as the response element in the DescribeApplication action.

    ", + "refs": { + "DescribeApplicationResponse$application": "

    Information about the application.

    " + } + }, + "ApplicationsFilter": { + "base": "

    This data type is used as the request parameter in the ListApplications action.

    ", + "refs": { + "ListApplicationsRequest$filter": "

    You can use this parameter to specify a subset of data to be included in the action's response.

    For a record to match a filter, all specified filter attributes must match. When multiple values are specified for a filter attribute, any of the values can match.

    " + } + }, + "Arn": { + "base": null, + "refs": { + "Agent$assessmentArn": "

    The ARN of the assessment that is associated with the agent.

    ", + "Application$applicationArn": "

    The ARN specifying the Inspector application.

    ", + "Application$resourceGroupArn": "

    The ARN specifying the resource group that is associated with the application.

    ", + "ArnList$member": null, + "Assessment$assessmentArn": "

    The ARN of the assessment.

    ", + "Assessment$applicationArn": "

    The ARN of the application that corresponds to this assessment.

    ", + "AttachAssessmentAndRulesPackageRequest$assessmentArn": "

    The ARN specifying the assessment to which you want to attach a rules package.

    ", + "AttachAssessmentAndRulesPackageRequest$rulesPackageArn": "

    The ARN specifying the rules package that you want to attach to the assessment.

    ", + "CreateApplicationRequest$resourceGroupArn": "

    The ARN specifying the resource group that is used to create the application.

    ", + "CreateApplicationResponse$applicationArn": "

    The ARN specifying the application that is created.

    ", + "CreateAssessmentRequest$applicationArn": "

    The ARN specifying the application for which you want to create an assessment.

    ", + "CreateAssessmentResponse$assessmentArn": "

    The ARN specifying the assessment that is created.

    ", + "CreateResourceGroupResponse$resourceGroupArn": "

    The ARN specifying the resource group that is created.

    ", + "DeleteApplicationRequest$applicationArn": "

    The ARN specifying the application that you want to delete.

    ", + "DeleteAssessmentRequest$assessmentArn": "

    The ARN specifying the assessment that you want to delete.

    ", + "DeleteRunRequest$runArn": "

    The ARN specifying the assessment run that you want to delete.

    ", + "DescribeApplicationRequest$applicationArn": "

    The ARN specifying the application that you want to describe.

    ", + "DescribeAssessmentRequest$assessmentArn": "

    The ARN specifying the assessment that you want to describe.

    ", + "DescribeCrossAccountAccessRoleResponse$roleArn": "

    The ARN specifying the IAM role that Inspector uses to access your AWS account.

    ", + "DescribeFindingRequest$findingArn": "

    The ARN specifying the finding that you want to describe.

    ", + "DescribeResourceGroupRequest$resourceGroupArn": "

    The ARN specifying the resource group that you want to describe.

    ", + "DescribeRulesPackageRequest$rulesPackageArn": "

    The ARN specifying the rules package that you want to describe.

    ", + "DescribeRunRequest$runArn": "

    The ARN specifying the assessment run that you want to describe.

    ", + "DetachAssessmentAndRulesPackageRequest$assessmentArn": "

    The ARN specifying the assessment from which you want to detach a rules package.

    ", + "DetachAssessmentAndRulesPackageRequest$rulesPackageArn": "

    The ARN specifying the rules package that you want to detach from the assessment.

    ", + "Finding$findingArn": "

    The ARN specifying the finding.

    ", + "Finding$runArn": "

    The ARN of the assessment run that generated the finding.

    ", + "Finding$rulesPackageArn": "

    The ARN of the rules package that is used to generate the finding.

    ", + "GetAssessmentTelemetryRequest$assessmentArn": "

    The ARN specifying the assessment the telemetry of which you want to obtain.

    ", + "ListAssessmentAgentsRequest$assessmentArn": "

    The ARN specifying the assessment whose agents you want to list.

    ", + "ListAttachedAssessmentsRequest$rulesPackageArn": "

    The ARN specifying the rules package whose assessments you want to list.

    ", + "ListAttachedRulesPackagesRequest$assessmentArn": "

    The ARN specifying the assessment whose rules packages you want to list.

    ", + "ListTagsForResourceRequest$resourceArn": "

    The ARN specifying the resource whose tags you want to list.

    ", + "PreviewAgentsForResourceGroupRequest$resourceGroupArn": "

    The ARN of the resource group that is used to create an application.

    ", + "RegisterCrossAccountAccessRoleRequest$roleArn": "The ARN of the IAM role that Inspector uses to list your EC2 instances during the assessment.", + "ResourceGroup$resourceGroupArn": "

    The ARN of the resource group.

    ", + "RulesPackage$rulesPackageArn": "

    The ARN of the rules package.

    ", + "Run$runArn": "

    The ARN of the run.

    ", + "Run$assessmentArn": "

    The ARN of the assessment that is associated with the run.

    ", + "RunAssessmentRequest$assessmentArn": "

    The ARN of the assessment that you want to run.

    ", + "RunAssessmentResponse$runArn": "

    The ARN specifying the run of the assessment.

    ", + "SetTagsForResourceRequest$resourceArn": "

    The ARN of the assessment that you want to set tags to.

    ", + "StartDataCollectionRequest$assessmentArn": "

    The ARN of the assessment for which you want to start the data collection process.

    ", + "StopDataCollectionRequest$assessmentArn": "

    The ARN of the assessment for which you want to stop the data collection process.

    ", + "UpdateApplicationRequest$applicationArn": "

    Application ARN that you want to update.

    ", + "UpdateApplicationRequest$resourceGroupArn": "

    The resource group ARN that you want to update.

    ", + "UpdateAssessmentRequest$assessmentArn": "

    Asessment ARN that you want to update.

    " + } + }, + "ArnList": { + "base": null, + "refs": { + "AddAttributesToFindingsRequest$findingArns": "

    The ARNs specifying the findings that you want to assign attributes to.

    ", + "FindingsFilter$rulesPackageArns": "

    For a record to match a filter, the value specified for this data type property must be the exact match of the value of the rulesPackageArn property of the Finding data type.

    ", + "ListApplicationsResponse$applicationArnList": "

    A list of ARNs specifying the applications returned by the action.

    ", + "ListAssessmentsRequest$applicationArns": "

    A list of ARNs specifying the applications the assessments of which you want to list.

    ", + "ListAssessmentsResponse$assessmentArnList": "

    A list of ARNs specifying the assessments returned by the action.

    ", + "ListAttachedAssessmentsResponse$assessmentArnList": "

    A list of ARNs specifying the assessments returned by the action.

    ", + "ListAttachedRulesPackagesResponse$rulesPackageArnList": "

    A list of ARNs specifying the rules packages returned by the action.

    ", + "ListFindingsRequest$runArns": "

    The ARNs of the assessment runs that generate the findings that you want to list.

    ", + "ListFindingsResponse$findingArnList": "

    A list of ARNs specifying the findings returned by the action.

    ", + "ListRulesPackagesResponse$rulesPackageArnList": "

    The list of ARNs specifying the rules packages returned by the action.

    ", + "ListRunsRequest$assessmentArns": "

    The ARNs specifying the assessments whose runs you want to list.

    ", + "ListRunsResponse$runArnList": "

    A list of ARNs specifying the assessment runs returned by the action.

    ", + "RemoveAttributesFromFindingsRequest$findingArns": "

    The ARNs specifying the findings that you want to remove attributes from.

    ", + "Run$rulesPackages": "

    Rules packages selected for the run of the assessment.

    ", + "RunsFilter$rulesPackages": "

    For a record to match a filter, the value specified for this data type property must match a list of values of the rulesPackages property of the Run data type.

    " + } + }, + "Assessment": { + "base": "

    Contains information about an Inspector assessment.

    This data type is used as the response element in the DescribeAssessment action.

    ", + "refs": { + "DescribeAssessmentResponse$assessment": "

    Information about the assessment.

    " + } + }, + "AssessmentState": { + "base": null, + "refs": { + "Assessment$assessmentState": "

    The state of the assessment. Values can be set to Created, Collecting Data, Stopping, and Completed.

    ", + "AssessmentStateList$member": null + } + }, + "AssessmentStateList": { + "base": null, + "refs": { + "AssessmentsFilter$assessmentStates": "

    For a record to match a filter, the value specified for this data type property must be the exact match of the value of the assessmentState property of the Assessment data type.

    " + } + }, + "AssessmentsFilter": { + "base": "

    This data type is used as the request parameter in the ListAssessments and ListAttachedAssessments actions.

    ", + "refs": { + "ListAssessmentsRequest$filter": "

    You can use this parameter to specify a subset of data to be included in the action's response.

    For a record to match a filter, all specified filter attributes must match. When multiple values are specified for a filter attribute, any of the values can match.

    ", + "ListAttachedAssessmentsRequest$filter": "

    You can use this parameter to specify a subset of data to be included in the action's response.

    For a record to match a filter, all specified filter attributes must match. When multiple values are specified for a filter attribute, any of the values can match.

    " + } + }, + "AttachAssessmentAndRulesPackageRequest": { + "base": null, + "refs": { + } + }, + "AttachAssessmentAndRulesPackageResponse": { + "base": null, + "refs": { + } + }, + "Attribute": { + "base": "

    This data type is used as a response element in the AddAttributesToFindings action and a request parameter in the CreateAssessment action.

    ", + "refs": { + "AttributeList$member": null + } + }, + "AttributeKey": { + "base": null, + "refs": { + "Attribute$key": "

    The attribute key.

    ", + "AttributeKeyList$member": null + } + }, + "AttributeKeyList": { + "base": null, + "refs": { + "RemoveAttributesFromFindingsRequest$attributeKeys": "

    The array of attribute keys that you want to remove from specified findings.

    " + } + }, + "AttributeList": { + "base": null, + "refs": { + "AddAttributesToFindingsRequest$attributes": "

    The array of attributes that you want to assign to specified findings.

    ", + "Assessment$userAttributesForFindings": "

    The user-defined attributes that are assigned to every generated finding.

    ", + "CreateAssessmentRequest$userAttributesForFindings": "

    The user-defined attributes that are assigned to every finding generated by running this assessment.

    ", + "Finding$attributes": "

    The system-defined attributes for the finding.

    ", + "Finding$userAttributes": "

    The user-defined attributes that are assigned to the finding.

    ", + "FindingsFilter$attributes": "

    For a record to match a filter, the value specified for this data type property must be the exact match of the value of the attributes property of the Finding data type.

    ", + "FindingsFilter$userAttributes": "

    For a record to match a filter, the value specified for this data type property must be the exact match of the value of the userAttributes property of the Finding data type.

    " + } + }, + "AttributeValue": { + "base": null, + "refs": { + "Attribute$value": "

    The value assigned to the attribute key.

    " + } + }, + "AutoScalingGroup": { + "base": null, + "refs": { + "Agent$autoScalingGroup": "

    This data type property is currently not used.

    ", + "AgentPreview$autoScalingGroup": "

    The autoscaling group for the EC2 instance where the agent is installed.

    ", + "Finding$autoScalingGroup": "

    The autoscaling group of the EC2 instance where the agent is installed that is used during the assessment that generates the finding.

    " + } + }, + "AwsAccount": { + "base": null, + "refs": { + "Agent$accountId": "

    AWS account of the EC2 instance where the agent is installed.

    " + } + }, + "Bool": { + "base": null, + "refs": { + "Assessment$dataCollected": "

    Boolean value (true or false) specifying whether the data collection process is completed.

    ", + "AssessmentsFilter$dataCollected": "

    For a record to match a filter, the value specified for this data type property must be the exact match of the value of the dataCollected property of the Assessment data type.

    ", + "DescribeCrossAccountAccessRoleResponse$valid": "

    A Boolean value that specifies whether the IAM role has the necessary policies attached to enable Inspector to access your AWS account.

    " + } + }, + "CreateApplicationRequest": { + "base": null, + "refs": { + } + }, + "CreateApplicationResponse": { + "base": null, + "refs": { + } + }, + "CreateAssessmentRequest": { + "base": null, + "refs": { + } + }, + "CreateAssessmentResponse": { + "base": null, + "refs": { + } + }, + "CreateResourceGroupRequest": { + "base": null, + "refs": { + } + }, + "CreateResourceGroupResponse": { + "base": null, + "refs": { + } + }, + "DeleteApplicationRequest": { + "base": null, + "refs": { + } + }, + "DeleteApplicationResponse": { + "base": null, + "refs": { + } + }, + "DeleteAssessmentRequest": { + "base": null, + "refs": { + } + }, + "DeleteAssessmentResponse": { + "base": null, + "refs": { + } + }, + "DeleteRunRequest": { + "base": null, + "refs": { + } + }, + "DeleteRunResponse": { + "base": null, + "refs": { + } + }, + "DescribeApplicationRequest": { + "base": null, + "refs": { + } + }, + "DescribeApplicationResponse": { + "base": null, + "refs": { + } + }, + "DescribeAssessmentRequest": { + "base": null, + "refs": { + } + }, + "DescribeAssessmentResponse": { + "base": null, + "refs": { + } + }, + "DescribeCrossAccountAccessRoleResponse": { + "base": null, + "refs": { + } + }, + "DescribeFindingRequest": { + "base": null, + "refs": { + } + }, + "DescribeFindingResponse": { + "base": null, + "refs": { + } + }, + "DescribeResourceGroupRequest": { + "base": null, + "refs": { + } + }, + "DescribeResourceGroupResponse": { + "base": null, + "refs": { + } + }, + "DescribeRulesPackageRequest": { + "base": null, + "refs": { + } + }, + "DescribeRulesPackageResponse": { + "base": null, + "refs": { + } + }, + "DescribeRunRequest": { + "base": null, + "refs": { + } + }, + "DescribeRunResponse": { + "base": null, + "refs": { + } + }, + "DetachAssessmentAndRulesPackageRequest": { + "base": null, + "refs": { + } + }, + "DetachAssessmentAndRulesPackageResponse": { + "base": null, + "refs": { + } + }, + "Duration": { + "base": null, + "refs": { + "Assessment$durationInSeconds": "

    The assessment duration in seconds. The default value is 3600 seconds (one hour). The maximum value is 86400 seconds (one day).

    ", + "CreateAssessmentRequest$durationInSeconds": "

    The duration of the assessment in seconds. The default value is 3600 seconds (one hour). The maximum value is 86400 seconds (one day).

    ", + "DurationRange$minimum": "

    The minimum value of the duration range. Must be greater than zero.

    ", + "DurationRange$maximum": "

    The maximum value of the duration range. Must be less than or equal to 604800 seconds (1 week).

    ", + "UpdateAssessmentRequest$durationInSeconds": "

    Assessment duration in seconds that you want to update. The default value is 3600 seconds (one hour). The maximum value is 86400 seconds (one day).

    " + } + }, + "DurationRange": { + "base": "

    This data type is used in the AssessmentsFilter data type.

    ", + "refs": { + "AssessmentsFilter$durationRange": "

    For a record to match a filter, the value specified for this data type property must inclusively match any value between the specified minimum and maximum values of the durationInSeconds property of the Assessment data type.

    " + } + }, + "FailureMessage": { + "base": null, + "refs": { + "Assessment$failureMessage": "

    This data type property is not currently used.

    " + } + }, + "Finding": { + "base": "

    Contains information about an Inspector finding.

    This data type is used as the response element in the DescribeFinding action.

    ", + "refs": { + "DescribeFindingResponse$finding": "

    Information about the finding.

    " + } + }, + "FindingsFilter": { + "base": "

    This data type is used as a request parameter in the ListFindings action.

    ", + "refs": { + "ListFindingsRequest$filter": "

    You can use this parameter to specify a subset of data to be included in the action's response.

    For a record to match a filter, all specified filter attributes must match. When multiple values are specified for a filter attribute, any of the values can match.

    " + } + }, + "GetAssessmentTelemetryRequest": { + "base": null, + "refs": { + } + }, + "GetAssessmentTelemetryResponse": { + "base": null, + "refs": { + } + }, + "Integer": { + "base": null, + "refs": { + "ListApplicationsRequest$maxResults": "

    You can use this parameter to indicate the maximum number of items you want in the response. The default value is 10. The maximum value is 500.

    ", + "ListAssessmentAgentsRequest$maxResults": "

    You can use this parameter to indicate the maximum number of items you want in the response. The default value is 10. The maximum value is 500.

    ", + "ListAssessmentsRequest$maxResults": "

    You can use this parameter to indicate the maximum number of items you want in the response. The default value is 10. The maximum value is 500.

    ", + "ListAttachedAssessmentsRequest$maxResults": "

    You can use this parameter to indicate the maximum number of items you want in the response. The default value is 10. The maximum value is 500.

    ", + "ListAttachedRulesPackagesRequest$maxResults": "

    You can use this parameter to indicate the maximum number of items you want in the response. The default value is 10. The maximum value is 500.

    ", + "ListFindingsRequest$maxResults": "

    You can use this parameter to indicate the maximum number of items you want in the response. The default value is 10. The maximum value is 500.

    ", + "ListRulesPackagesRequest$maxResults": "

    You can use this parameter to indicate the maximum number of items you want in the response. The default value is 10. The maximum value is 500.

    ", + "ListRunsRequest$maxResults": "

    You can use this parameter to indicate the maximum number of items you want in the response. The default value is 10. The maximum value is 500.

    ", + "PreviewAgentsForResourceGroupRequest$maxResults": "

    You can use this parameter to indicate the maximum number of items you want in the response. The default value is 10. The maximum value is 500.

    " + } + }, + "InternalException": { + "base": null, + "refs": { + } + }, + "InvalidCrossAccountRoleException": { + "base": null, + "refs": { + } + }, + "InvalidInputException": { + "base": null, + "refs": { + } + }, + "ListApplicationsRequest": { + "base": null, + "refs": { + } + }, + "ListApplicationsResponse": { + "base": null, + "refs": { + } + }, + "ListAssessmentAgentsRequest": { + "base": null, + "refs": { + } + }, + "ListAssessmentAgentsResponse": { + "base": null, + "refs": { + } + }, + "ListAssessmentsRequest": { + "base": null, + "refs": { + } + }, + "ListAssessmentsResponse": { + "base": null, + "refs": { + } + }, + "ListAttachedAssessmentsRequest": { + "base": null, + "refs": { + } + }, + "ListAttachedAssessmentsResponse": { + "base": null, + "refs": { + } + }, + "ListAttachedRulesPackagesRequest": { + "base": null, + "refs": { + } + }, + "ListAttachedRulesPackagesResponse": { + "base": null, + "refs": { + } + }, + "ListFindingsRequest": { + "base": null, + "refs": { + } + }, + "ListFindingsResponse": { + "base": null, + "refs": { + } + }, + "ListRulesPackagesRequest": { + "base": null, + "refs": { + } + }, + "ListRulesPackagesResponse": { + "base": null, + "refs": { + } + }, + "ListRunsRequest": { + "base": null, + "refs": { + } + }, + "ListRunsResponse": { + "base": null, + "refs": { + } + }, + "ListTagsForResourceRequest": { + "base": null, + "refs": { + } + }, + "ListTagsForResourceResponse": { + "base": null, + "refs": { + } + }, + "Locale": { + "base": null, + "refs": { + "LocalizeTextRequest$locale": "

    The locale that you want to translate a textual identifier into.

    " + } + }, + "LocalizeTextRequest": { + "base": null, + "refs": { + } + }, + "LocalizeTextResponse": { + "base": null, + "refs": { + } + }, + "LocalizedFacility": { + "base": null, + "refs": { + "LocalizedTextKey$facility": "

    The module response source of the text.

    " + } + }, + "LocalizedText": { + "base": "

    The textual identifier. This data type is used as the request parameter in the LocalizeText action.

    ", + "refs": { + "Finding$finding": "

    A short description that identifies the finding.

    ", + "Finding$description": "

    The description of the finding.

    ", + "Finding$recommendation": "

    The recommendation for the finding.

    ", + "LocalizedTextList$member": null, + "RulesPackage$description": "

    The description of the rules package.

    " + } + }, + "LocalizedTextId": { + "base": null, + "refs": { + "LocalizedTextKey$id": "

    Part of the module response source of the text.

    " + } + }, + "LocalizedTextKey": { + "base": "

    This data type is used in the LocalizedText data type.

    ", + "refs": { + "LocalizedText$key": "

    The facility and id properties of the LocalizedTextKey data type.

    " + } + }, + "LocalizedTextList": { + "base": null, + "refs": { + "LocalizeTextRequest$localizedTexts": "

    A list of textual identifiers.

    " + } + }, + "Long": { + "base": null, + "refs": { + "MessageTypeTelemetry$count": "

    The number of times that the behavioral data is collected by the agent during an assessment.

    ", + "MessageTypeTelemetry$dataSize": "

    The total size of the behavioral data that is collected by the agent during an assessment.

    " + } + }, + "Message": { + "base": null, + "refs": { + "AddAttributesToFindingsResponse$message": "

    Confirmation details of the action performed.

    ", + "AttachAssessmentAndRulesPackageResponse$message": "

    Confirmation details of the action performed.

    ", + "DeleteApplicationResponse$message": "

    Confirmation details of the action performed.

    ", + "DeleteAssessmentResponse$message": "

    Confirmation details of the action performed.

    ", + "DeleteRunResponse$message": "

    Confirmation details of the action performed.

    ", + "DetachAssessmentAndRulesPackageResponse$message": "

    Confirmation details of the action performed.

    ", + "LocalizeTextResponse$message": "

    Confirmation details of the action performed.

    ", + "RegisterCrossAccountAccessRoleResponse$message": "

    Confirmation details of the action performed.

    ", + "RemoveAttributesFromFindingsResponse$message": "

    Confirmation details of the action performed.

    ", + "SetTagsForResourceResponse$message": "

    Confirmation details of the action performed.

    ", + "StartDataCollectionResponse$message": "

    Confirmation details of the action performed.

    ", + "StopDataCollectionResponse$message": "

    Confirmation details of the action performed.

    ", + "UpdateApplicationResponse$message": "

    Confirmation details of the action performed.

    ", + "UpdateAssessmentResponse$message": "

    Confirmation details of the action performed.

    " + } + }, + "MessageType": { + "base": null, + "refs": { + "MessageTypeTelemetry$messageType": "

    A specific type of behavioral data that is collected by the agent.

    " + } + }, + "MessageTypeTelemetry": { + "base": "

    This data type is used in the Telemetry data type.

    This is metadata about the behavioral data collected by the Inspector agent on your EC2 instances during an assessment and passed to the Inspector service for analysis.

    ", + "refs": { + "MessageTypeTelemetryList$member": null + } + }, + "MessageTypeTelemetryList": { + "base": null, + "refs": { + "Telemetry$messageTypeTelemetries": "

    Counts of individual metrics received by Inspector from the agent.

    " + } + }, + "Name": { + "base": null, + "refs": { + "Application$applicationName": "

    The name of the Inspector application.

    ", + "Assessment$assessmentName": "

    The name of the assessment.

    ", + "CreateApplicationRequest$applicationName": "

    The user-defined name identifying the application that you want to create. The name must be unique within the AWS account.

    ", + "CreateAssessmentRequest$assessmentName": "

    The user-defined name identifying the assessment that you want to create. You can create several assessments for an application. The names of the assessments corresponding to a particular application must be unique.

    ", + "Finding$ruleName": "

    The rule name that is used to generate the finding.

    ", + "NameList$member": null, + "RulesPackage$rulesPackageName": "

    The name of the rules package.

    ", + "RulesPackage$provider": "

    The provider of the rules package.

    ", + "Run$runName": "

    The auto-generated name for the run.

    ", + "RunAssessmentRequest$runName": "

    A name specifying the run of the assessment.

    ", + "UpdateApplicationRequest$applicationName": "

    Application name that you want to update.

    ", + "UpdateAssessmentRequest$assessmentName": "

    Assessment name that you want to update.

    " + } + }, + "NameList": { + "base": null, + "refs": { + "FindingsFilter$ruleNames": "

    For a record to match a filter, the value specified for this data type property must be the exact match of the value of the ruleName property of the Finding data type.

    " + } + }, + "NamePattern": { + "base": null, + "refs": { + "NamePatternList$member": null + } + }, + "NamePatternList": { + "base": null, + "refs": { + "ApplicationsFilter$applicationNamePatterns": "

    For a record to match a filter, an explicit value or a string containing a wildcard specified for this data type property must match the value of the applicationName property of the Application data type.

    ", + "AssessmentsFilter$assessmentNamePatterns": "

    For a record to match a filter, an explicit value or a string containing a wildcard specified for this data type property must match the value of the assessmentName property of the Assessment data type.

    ", + "RunsFilter$runNamePatterns": "

    For a record to match a filter, an explicit value or a string containing a wildcard specified for this data type property must match the value of the runName property of the Run data type.

    " + } + }, + "NoSuchEntityException": { + "base": null, + "refs": { + } + }, + "OperationInProgressException": { + "base": null, + "refs": { + } + }, + "PaginationToken": { + "base": null, + "refs": { + "ListApplicationsRequest$nextToken": "

    You can use this parameter when paginating results. Set the value of this parameter to 'null' on your first call to the ListApplications action. Subsequent calls to the action fill nextToken in the request with the value of NextToken from previous response to continue listing data.

    ", + "ListApplicationsResponse$nextToken": "

    When a response is generated, if there is more data to be listed, this parameter is present in the response and contains the value to use for the nextToken parameter in a subsequent pagination request. If there is no more data to be listed, this parameter is set to 'null'.

    ", + "ListAssessmentAgentsRequest$nextToken": "

    You can use this parameter when paginating results. Set the value of this parameter to 'null' on your first call to the ListAssessmentAgents action. Subsequent calls to the action fill nextToken in the request with the value of NextToken from previous response to continue listing data.

    ", + "ListAssessmentAgentsResponse$nextToken": "

    When a response is generated, if there is more data to be listed, this parameter is present in the response and contains the value to use for the nextToken parameter in a subsequent pagination request. If there is no more data to be listed, this parameter is set to 'null'.

    ", + "ListAssessmentsRequest$nextToken": "

    You can use this parameter when paginating results. Set the value of this parameter to 'null' on your first call to the ListAssessments action. Subsequent calls to the action fill nextToken in the request with the value of NextToken from previous response to continue listing data.

    ", + "ListAssessmentsResponse$nextToken": "

    When a response is generated, if there is more data to be listed, this parameter is present in the response and contains the value to use for the nextToken parameter in a subsequent pagination request. If there is no more data to be listed, this parameter is set to 'null'.

    ", + "ListAttachedAssessmentsRequest$nextToken": "

    You can use this parameter when paginating results. Set the value of this parameter to 'null' on your first call to the ListAttachedAssessments action. Subsequent calls to the action fill nextToken in the request with the value of NextToken from previous response to continue listing data.

    ", + "ListAttachedAssessmentsResponse$nextToken": "

    When a response is generated, if there is more data to be listed, this parameter is present in the response and contains the value to use for the nextToken parameter in a subsequent pagination request. If there is no more data to be listed, this parameter is set to 'null'.

    ", + "ListAttachedRulesPackagesRequest$nextToken": "

    You can use this parameter when paginating results. Set the value of this parameter to 'null' on your first call to the ListAttachedRulesPackages action. Subsequent calls to the action fill nextToken in the request with the value of NextToken from previous response to continue listing data.

    ", + "ListAttachedRulesPackagesResponse$nextToken": "

    When a response is generated, if there is more data to be listed, this parameter is present in the response and contains the value to use for the nextToken parameter in a subsequent pagination request. If there is no more data to be listed, this parameter is set to 'null'.

    ", + "ListFindingsRequest$nextToken": "

    You can use this parameter when paginating results. Set the value of this parameter to 'null' on your first call to the ListFindings action. Subsequent calls to the action fill nextToken in the request with the value of NextToken from previous response to continue listing data.

    ", + "ListFindingsResponse$nextToken": "

    When a response is generated, if there is more data to be listed, this parameter is present in the response and contains the value to use for the nextToken parameter in a subsequent pagination request. If there is no more data to be listed, this parameter is set to 'null'.

    ", + "ListRulesPackagesRequest$nextToken": "

    You can use this parameter when paginating results. Set the value of this parameter to 'null' on your first call to the ListRulesPackages action. Subsequent calls to the action fill nextToken in the request with the value of NextToken from previous response to continue listing data.

    ", + "ListRulesPackagesResponse$nextToken": "

    When a response is generated, if there is more data to be listed, this parameter is present in the response and contains the value to use for the nextToken parameter in a subsequent pagination request. If there is no more data to be listed, this parameter is set to 'null'.

    ", + "ListRunsRequest$nextToken": "

    You can use this parameter when paginating results. Set the value of this parameter to 'null' on your first call to the ListRuns action. Subsequent calls to the action fill nextToken in the request with the value of NextToken from previous response to continue listing data.

    ", + "ListRunsResponse$nextToken": "

    When a response is generated, if there is more data to be listed, this parameter is present in the response and contains the value to use for the nextToken parameter in a subsequent pagination request. If there is no more data to be listed, this parameter is set to 'null'.

    ", + "PreviewAgentsForResourceGroupRequest$nextToken": "

    You can use this parameter when paginating results. Set the value of this parameter to 'null' on your first call to the PreviewAgentsForResourceGroup action. Subsequent calls to the action fill nextToken in the request with the value of NextToken from previous response to continue listing data.

    ", + "PreviewAgentsForResourceGroupResponse$nextToken": "

    When a response is generated, if there is more data to be listed, this parameter is present in the response and contains the value to use for the nextToken parameter in a subsequent pagination request. If there is no more data to be listed, this parameter is set to 'null'.

    " + } + }, + "Parameter": { + "base": "

    This data type is used in the LocalizedText data type.

    ", + "refs": { + "ParameterList$member": null + } + }, + "ParameterList": { + "base": null, + "refs": { + "LocalizedText$parameters": "

    Values for the dynamic elements of the string specified by the textual identifier.

    " + } + }, + "ParameterName": { + "base": null, + "refs": { + "Parameter$name": "

    The name of the variable that is being replaced.

    " + } + }, + "ParameterValue": { + "base": null, + "refs": { + "Parameter$value": "

    The value assigned to the variable that is being replaced.

    " + } + }, + "PreviewAgentsForResourceGroupRequest": { + "base": null, + "refs": { + } + }, + "PreviewAgentsForResourceGroupResponse": { + "base": null, + "refs": { + } + }, + "RegisterCrossAccountAccessRoleRequest": { + "base": null, + "refs": { + } + }, + "RegisterCrossAccountAccessRoleResponse": { + "base": null, + "refs": { + } + }, + "RemoveAttributesFromFindingsRequest": { + "base": null, + "refs": { + } + }, + "RemoveAttributesFromFindingsResponse": { + "base": null, + "refs": { + } + }, + "ResourceGroup": { + "base": "

    Contains information about a resource group. The resource group defines a set of tags that, when queried, identify the AWS resources that comprise the application.

    This data type is used as the response element in the DescribeResourceGroup action.

    ", + "refs": { + "DescribeResourceGroupResponse$resourceGroup": "

    Information about the resource group.

    " + } + }, + "ResourceGroupTags": { + "base": null, + "refs": { + "CreateResourceGroupRequest$resourceGroupTags": "

    A collection of keys and an array of possible values in JSON format.

    For example, [{ \"key1\" : [\"Value1\",\"Value2\"]},{\"Key2\": [\"Value3\"]}]

    ", + "ResourceGroup$resourceGroupTags": "

    The tags (key and value pairs) of the resource group.

    This data type property is used in the CreateResourceGroup action.

    A collection of keys and an array of possible values in JSON format.

    For example, [{ \"key1\" : [\"Value1\",\"Value2\"]},{\"Key2\": [\"Value3\"]}]

    " + } + }, + "RulesPackage": { + "base": "

    Contains information about an Inspector rules package.

    This data type is used as the response element in the DescribeRulesPackage action.

    ", + "refs": { + "DescribeRulesPackageResponse$rulesPackage": "

    Information about the rules package.

    " + } + }, + "Run": { + "base": "

    A snapshot of an Inspector assessment that contains the assessment's findings.

    This data type is used as the response element in the DescribeRun action.

    ", + "refs": { + "DescribeRunResponse$run": "

    Information about the assessment run.

    " + } + }, + "RunAssessmentRequest": { + "base": null, + "refs": { + } + }, + "RunAssessmentResponse": { + "base": null, + "refs": { + } + }, + "RunState": { + "base": null, + "refs": { + "Run$runState": "

    The state of the run. Values can be set to DataCollectionComplete, EvaluatingPolicies, EvaluatingPoliciesErrorCanRetry, Completed, Failed, TombStoned.

    ", + "RunStateList$member": null + } + }, + "RunStateList": { + "base": null, + "refs": { + "RunsFilter$runStates": "

    For a record to match a filter, the value specified for this data type property must be the exact match of the value of the runState property of the Run data type.

    " + } + }, + "RunsFilter": { + "base": "

    This data type is used as the request parameter in the ListRuns action.

    ", + "refs": { + "ListRunsRequest$filter": "

    You can use this parameter to specify a subset of data to be included in the action's response.

    For a record to match a filter, all specified filter attributes must match. When multiple values are specified for a filter attribute, any of the values can match.

    " + } + }, + "SetTagsForResourceRequest": { + "base": null, + "refs": { + } + }, + "SetTagsForResourceResponse": { + "base": null, + "refs": { + } + }, + "Severity": { + "base": null, + "refs": { + "Finding$severity": "

    The finding severity. Values can be set to High, Medium, Low, and Informational.

    ", + "SeverityList$member": null + } + }, + "SeverityList": { + "base": null, + "refs": { + "FindingsFilter$severities": "

    For a record to match a filter, the value specified for this data type property must be the exact match of the value of the severity property of the Finding data type.

    " + } + }, + "StartDataCollectionRequest": { + "base": null, + "refs": { + } + }, + "StartDataCollectionResponse": { + "base": null, + "refs": { + } + }, + "StopDataCollectionRequest": { + "base": null, + "refs": { + } + }, + "StopDataCollectionResponse": { + "base": null, + "refs": { + } + }, + "Tag": { + "base": "

    A key and value pair.

    This data type is used as a request parameter in the SetTagsForResource action and a response element in the ListTagsForResource action.

    ", + "refs": { + "TagList$member": null + } + }, + "TagKey": { + "base": null, + "refs": { + "Tag$Key": "

    The tag key.

    " + } + }, + "TagList": { + "base": null, + "refs": { + "ListTagsForResourceResponse$tagList": "

    A collection of key and value pairs.

    ", + "SetTagsForResourceRequest$tags": "

    A collection of key and value pairs that you want to set to an assessment.

    " + } + }, + "TagValue": { + "base": null, + "refs": { + "Tag$Value": "

    The value assigned to a tag key.

    " + } + }, + "Telemetry": { + "base": "

    The metadata about the Inspector application data metrics collected by the agent.

    This data type is used as the response element in the GetAssessmentTelemetry action.

    ", + "refs": { + "TelemetryList$member": null + } + }, + "TelemetryList": { + "base": null, + "refs": { + "Agent$telemetry": "

    The Inspector application data metrics collected by the agent.

    ", + "GetAssessmentTelemetryResponse$telemetry": "

    Telemetry details.

    " + } + }, + "TelemetryStatus": { + "base": null, + "refs": { + "Telemetry$status": "

    The category of the individual metrics that together constitute the telemetry that Inspector received from the agent.

    " + } + }, + "Text": { + "base": null, + "refs": { + "TextList$member": null + } + }, + "TextList": { + "base": null, + "refs": { + "LocalizeTextResponse$results": "

    The resulting list of user-readable texts.

    " + } + }, + "Timestamp": { + "base": null, + "refs": { + "Assessment$startTime": "

    The assessment start time.

    ", + "Assessment$endTime": "

    The assessment end time.

    ", + "Run$creationTime": "

    Run creation time that corresponds to the data collection completion time or failure.

    ", + "Run$completionTime": "

    Run completion time that corresponds to the rules packages evaluation completion time or failure.

    ", + "TimestampRange$minimum": "

    The minimum value of the timestamp range.

    ", + "TimestampRange$maximum": "

    The maximum value of the timestamp range.

    " + } + }, + "TimestampRange": { + "base": "

    This data type is used in the AssessmentsFilter and RunsFilter data types.

    ", + "refs": { + "AssessmentsFilter$startTimeRange": "

    For a record to match a filter, the value specified for this data type property must inclusively match any value between the specified minimum and maximum values of the startTime property of the Assessment data type.

    ", + "AssessmentsFilter$endTimeRange": "

    For a record to match a filter, the value specified for this data type property must inclusively match any value between the specified minimum and maximum values of the endTime property of the Assessment data type.

    ", + "RunsFilter$creationTime": "

    For a record to match a filter, the value specified for this data type property must inclusively match any value between the specified minimum and maximum values of the creationTime property of the Run data type.

    ", + "RunsFilter$completionTime": "

    For a record to match a filter, the value specified for this data type property must inclusively match any value between the specified minimum and maximum values of the completionTime property of the Run data type.

    " + } + }, + "UpdateApplicationRequest": { + "base": null, + "refs": { + } + }, + "UpdateApplicationResponse": { + "base": null, + "refs": { + } + }, + "UpdateAssessmentRequest": { + "base": null, + "refs": { + } + }, + "UpdateAssessmentResponse": { + "base": null, + "refs": { + } + }, + "Version": { + "base": null, + "refs": { + "RulesPackage$version": "

    The version id of the rules package.

    " + } + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/inspector/2015-08-18/examples-1.json b/vendor/github.com/aws/aws-sdk-go/models/apis/inspector/2015-08-18/examples-1.json new file mode 100644 index 000000000..0ea7e3b0b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/inspector/2015-08-18/examples-1.json @@ -0,0 +1,5 @@ +{ + "version": "1.0", + "examples": { + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/inspector/2016-02-16/api-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/inspector/2016-02-16/api-2.json new file mode 100644 index 000000000..43e7b62be --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/inspector/2016-02-16/api-2.json @@ -0,0 +1,1964 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2016-02-16", + "endpointPrefix":"inspector", + "jsonVersion":"1.1", + "protocol":"json", + "serviceFullName":"Amazon Inspector", + "signatureVersion":"v4", + "targetPrefix":"InspectorService" + }, + "operations":{ + "AddAttributesToFindings":{ + "name":"AddAttributesToFindings", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AddAttributesToFindingsRequest"}, + "output":{"shape":"AddAttributesToFindingsResponse"}, + "errors":[ + {"shape":"InternalException"}, + {"shape":"InvalidInputException"}, + {"shape":"AccessDeniedException"}, + {"shape":"NoSuchEntityException"} + ] + }, + "CreateAssessmentTarget":{ + "name":"CreateAssessmentTarget", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateAssessmentTargetRequest"}, + "output":{"shape":"CreateAssessmentTargetResponse"}, + "errors":[ + {"shape":"InternalException"}, + {"shape":"InvalidInputException"}, + {"shape":"LimitExceededException"}, + {"shape":"AccessDeniedException"}, + {"shape":"NoSuchEntityException"} + ] + }, + "CreateAssessmentTemplate":{ + "name":"CreateAssessmentTemplate", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateAssessmentTemplateRequest"}, + "output":{"shape":"CreateAssessmentTemplateResponse"}, + "errors":[ + {"shape":"InternalException"}, + {"shape":"InvalidInputException"}, + {"shape":"LimitExceededException"}, + {"shape":"AccessDeniedException"}, + {"shape":"NoSuchEntityException"} + ] + }, + "CreateResourceGroup":{ + "name":"CreateResourceGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateResourceGroupRequest"}, + "output":{"shape":"CreateResourceGroupResponse"}, + "errors":[ + {"shape":"InternalException"}, + {"shape":"InvalidInputException"}, + {"shape":"LimitExceededException"}, + {"shape":"AccessDeniedException"} + ] + }, + "DeleteAssessmentRun":{ + "name":"DeleteAssessmentRun", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteAssessmentRunRequest"}, + "errors":[ + {"shape":"InternalException"}, + {"shape":"InvalidInputException"}, + {"shape":"AssessmentRunInProgressException"}, + {"shape":"AccessDeniedException"}, + {"shape":"NoSuchEntityException"} + ] + }, + "DeleteAssessmentTarget":{ + "name":"DeleteAssessmentTarget", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteAssessmentTargetRequest"}, + "errors":[ + {"shape":"InternalException"}, + {"shape":"InvalidInputException"}, + {"shape":"AssessmentRunInProgressException"}, + {"shape":"AccessDeniedException"}, + {"shape":"NoSuchEntityException"} + ] + }, + "DeleteAssessmentTemplate":{ + "name":"DeleteAssessmentTemplate", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteAssessmentTemplateRequest"}, + "errors":[ + {"shape":"InternalException"}, + {"shape":"InvalidInputException"}, + {"shape":"AssessmentRunInProgressException"}, + {"shape":"AccessDeniedException"}, + {"shape":"NoSuchEntityException"} + ] + }, + "DescribeAssessmentRuns":{ + "name":"DescribeAssessmentRuns", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeAssessmentRunsRequest"}, + "output":{"shape":"DescribeAssessmentRunsResponse"}, + "errors":[ + {"shape":"InternalException"}, + {"shape":"InvalidInputException"} + ] + }, + "DescribeAssessmentTargets":{ + "name":"DescribeAssessmentTargets", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeAssessmentTargetsRequest"}, + "output":{"shape":"DescribeAssessmentTargetsResponse"}, + "errors":[ + {"shape":"InternalException"}, + {"shape":"InvalidInputException"} + ] + }, + "DescribeAssessmentTemplates":{ + "name":"DescribeAssessmentTemplates", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeAssessmentTemplatesRequest"}, + "output":{"shape":"DescribeAssessmentTemplatesResponse"}, + "errors":[ + {"shape":"InternalException"}, + {"shape":"InvalidInputException"} + ] + }, + "DescribeCrossAccountAccessRole":{ + "name":"DescribeCrossAccountAccessRole", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "output":{"shape":"DescribeCrossAccountAccessRoleResponse"}, + "errors":[ + {"shape":"InternalException"} + ] + }, + "DescribeFindings":{ + "name":"DescribeFindings", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeFindingsRequest"}, + "output":{"shape":"DescribeFindingsResponse"}, + "errors":[ + {"shape":"InternalException"}, + {"shape":"InvalidInputException"} + ] + }, + "DescribeResourceGroups":{ + "name":"DescribeResourceGroups", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeResourceGroupsRequest"}, + "output":{"shape":"DescribeResourceGroupsResponse"}, + "errors":[ + {"shape":"InternalException"}, + {"shape":"InvalidInputException"} + ] + }, + "DescribeRulesPackages":{ + "name":"DescribeRulesPackages", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeRulesPackagesRequest"}, + "output":{"shape":"DescribeRulesPackagesResponse"}, + "errors":[ + {"shape":"InternalException"}, + {"shape":"InvalidInputException"} + ] + }, + "GetTelemetryMetadata":{ + "name":"GetTelemetryMetadata", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetTelemetryMetadataRequest"}, + "output":{"shape":"GetTelemetryMetadataResponse"}, + "errors":[ + {"shape":"InternalException"}, + {"shape":"InvalidInputException"}, + {"shape":"AccessDeniedException"}, + {"shape":"NoSuchEntityException"} + ] + }, + "ListAssessmentRunAgents":{ + "name":"ListAssessmentRunAgents", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListAssessmentRunAgentsRequest"}, + "output":{"shape":"ListAssessmentRunAgentsResponse"}, + "errors":[ + {"shape":"InternalException"}, + {"shape":"InvalidInputException"}, + {"shape":"AccessDeniedException"}, + {"shape":"NoSuchEntityException"} + ] + }, + "ListAssessmentRuns":{ + "name":"ListAssessmentRuns", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListAssessmentRunsRequest"}, + "output":{"shape":"ListAssessmentRunsResponse"}, + "errors":[ + {"shape":"InternalException"}, + {"shape":"InvalidInputException"}, + {"shape":"AccessDeniedException"}, + {"shape":"NoSuchEntityException"} + ] + }, + "ListAssessmentTargets":{ + "name":"ListAssessmentTargets", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListAssessmentTargetsRequest"}, + "output":{"shape":"ListAssessmentTargetsResponse"}, + "errors":[ + {"shape":"InternalException"}, + {"shape":"InvalidInputException"}, + {"shape":"AccessDeniedException"} + ] + }, + "ListAssessmentTemplates":{ + "name":"ListAssessmentTemplates", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListAssessmentTemplatesRequest"}, + "output":{"shape":"ListAssessmentTemplatesResponse"}, + "errors":[ + {"shape":"InternalException"}, + {"shape":"InvalidInputException"}, + {"shape":"AccessDeniedException"}, + {"shape":"NoSuchEntityException"} + ] + }, + "ListEventSubscriptions":{ + "name":"ListEventSubscriptions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListEventSubscriptionsRequest"}, + "output":{"shape":"ListEventSubscriptionsResponse"}, + "errors":[ + {"shape":"InternalException"}, + {"shape":"InvalidInputException"}, + {"shape":"AccessDeniedException"}, + {"shape":"NoSuchEntityException"} + ] + }, + "ListFindings":{ + "name":"ListFindings", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListFindingsRequest"}, + "output":{"shape":"ListFindingsResponse"}, + "errors":[ + {"shape":"InternalException"}, + {"shape":"InvalidInputException"}, + {"shape":"AccessDeniedException"}, + {"shape":"NoSuchEntityException"} + ] + }, + "ListRulesPackages":{ + "name":"ListRulesPackages", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListRulesPackagesRequest"}, + "output":{"shape":"ListRulesPackagesResponse"}, + "errors":[ + {"shape":"InternalException"}, + {"shape":"InvalidInputException"}, + {"shape":"AccessDeniedException"} + ] + }, + "ListTagsForResource":{ + "name":"ListTagsForResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListTagsForResourceRequest"}, + "output":{"shape":"ListTagsForResourceResponse"}, + "errors":[ + {"shape":"InternalException"}, + {"shape":"InvalidInputException"}, + {"shape":"AccessDeniedException"}, + {"shape":"NoSuchEntityException"} + ] + }, + "PreviewAgents":{ + "name":"PreviewAgents", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PreviewAgentsRequest"}, + "output":{"shape":"PreviewAgentsResponse"}, + "errors":[ + {"shape":"InternalException"}, + {"shape":"InvalidInputException"}, + {"shape":"AccessDeniedException"}, + {"shape":"NoSuchEntityException"}, + {"shape":"InvalidCrossAccountRoleException"} + ] + }, + "RegisterCrossAccountAccessRole":{ + "name":"RegisterCrossAccountAccessRole", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RegisterCrossAccountAccessRoleRequest"}, + "errors":[ + {"shape":"InternalException"}, + {"shape":"InvalidInputException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InvalidCrossAccountRoleException"} + ] + }, + "RemoveAttributesFromFindings":{ + "name":"RemoveAttributesFromFindings", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RemoveAttributesFromFindingsRequest"}, + "output":{"shape":"RemoveAttributesFromFindingsResponse"}, + "errors":[ + {"shape":"InternalException"}, + {"shape":"InvalidInputException"}, + {"shape":"AccessDeniedException"}, + {"shape":"NoSuchEntityException"} + ] + }, + "SetTagsForResource":{ + "name":"SetTagsForResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"SetTagsForResourceRequest"}, + "errors":[ + {"shape":"InternalException"}, + {"shape":"InvalidInputException"}, + {"shape":"AccessDeniedException"}, + {"shape":"NoSuchEntityException"} + ] + }, + "StartAssessmentRun":{ + "name":"StartAssessmentRun", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StartAssessmentRunRequest"}, + "output":{"shape":"StartAssessmentRunResponse"}, + "errors":[ + {"shape":"InternalException"}, + {"shape":"InvalidInputException"}, + {"shape":"LimitExceededException"}, + {"shape":"AccessDeniedException"}, + {"shape":"NoSuchEntityException"}, + {"shape":"InvalidCrossAccountRoleException"}, + {"shape":"AgentsAlreadyRunningAssessmentException"} + ] + }, + "StopAssessmentRun":{ + "name":"StopAssessmentRun", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StopAssessmentRunRequest"}, + "errors":[ + {"shape":"InternalException"}, + {"shape":"InvalidInputException"}, + {"shape":"AccessDeniedException"}, + {"shape":"NoSuchEntityException"} + ] + }, + "SubscribeToEvent":{ + "name":"SubscribeToEvent", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"SubscribeToEventRequest"}, + "errors":[ + {"shape":"InternalException"}, + {"shape":"InvalidInputException"}, + {"shape":"LimitExceededException"}, + {"shape":"AccessDeniedException"}, + {"shape":"NoSuchEntityException"} + ] + }, + "UnsubscribeFromEvent":{ + "name":"UnsubscribeFromEvent", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UnsubscribeFromEventRequest"}, + "errors":[ + {"shape":"InternalException"}, + {"shape":"InvalidInputException"}, + {"shape":"AccessDeniedException"}, + {"shape":"NoSuchEntityException"} + ] + }, + "UpdateAssessmentTarget":{ + "name":"UpdateAssessmentTarget", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateAssessmentTargetRequest"}, + "errors":[ + {"shape":"InternalException"}, + {"shape":"InvalidInputException"}, + {"shape":"AccessDeniedException"}, + {"shape":"NoSuchEntityException"} + ] + } + }, + "shapes":{ + "AccessDeniedErrorCode":{ + "type":"string", + "enum":[ + "ACCESS_DENIED_TO_ASSESSMENT_TARGET", + "ACCESS_DENIED_TO_ASSESSMENT_TEMPLATE", + "ACCESS_DENIED_TO_ASSESSMENT_RUN", + "ACCESS_DENIED_TO_FINDING", + "ACCESS_DENIED_TO_RESOURCE_GROUP", + "ACCESS_DENIED_TO_RULES_PACKAGE", + "ACCESS_DENIED_TO_SNS_TOPIC", + "ACCESS_DENIED_TO_IAM_ROLE" + ] + }, + "AccessDeniedException":{ + "type":"structure", + "required":[ + "message", + "errorCode", + "canRetry" + ], + "members":{ + "message":{"shape":"ErrorMessage"}, + "errorCode":{"shape":"AccessDeniedErrorCode"}, + "canRetry":{"shape":"Bool"} + }, + "exception":true + }, + "AddAttributesToFindingsRequest":{ + "type":"structure", + "required":[ + "findingArns", + "attributes" + ], + "members":{ + "findingArns":{"shape":"AddRemoveAttributesFindingArnList"}, + "attributes":{"shape":"UserAttributeList"} + } + }, + "AddAttributesToFindingsResponse":{ + "type":"structure", + "required":["failedItems"], + "members":{ + "failedItems":{"shape":"FailedItems"} + } + }, + "AddRemoveAttributesFindingArnList":{ + "type":"list", + "member":{"shape":"Arn"}, + "max":10, + "min":1 + }, + "AgentAlreadyRunningAssessment":{ + "type":"structure", + "required":[ + "agentId", + "assessmentRunArn" + ], + "members":{ + "agentId":{"shape":"AgentId"}, + "assessmentRunArn":{"shape":"Arn"} + } + }, + "AgentAlreadyRunningAssessmentList":{ + "type":"list", + "member":{"shape":"AgentAlreadyRunningAssessment"}, + "max":10, + "min":1 + }, + "AgentFilter":{ + "type":"structure", + "required":[ + "agentHealths", + "agentHealthCodes" + ], + "members":{ + "agentHealths":{"shape":"AgentHealthList"}, + "agentHealthCodes":{"shape":"AgentHealthCodeList"} + } + }, + "AgentHealth":{ + "type":"string", + "enum":[ + "HEALTHY", + "UNHEALTHY" + ] + }, + "AgentHealthCode":{ + "type":"string", + "enum":[ + "IDLE", + "RUNNING", + "SHUTDOWN", + "UNHEALTHY", + "THROTTLED", + "UNKNOWN" + ] + }, + "AgentHealthCodeList":{ + "type":"list", + "member":{"shape":"AgentHealthCode"}, + "max":10, + "min":0 + }, + "AgentHealthList":{ + "type":"list", + "member":{"shape":"AgentHealth"}, + "max":10, + "min":0 + }, + "AgentId":{ + "type":"string", + "max":128, + "min":1 + }, + "AgentIdList":{ + "type":"list", + "member":{"shape":"AgentId"}, + "max":500, + "min":0 + }, + "AgentPreview":{ + "type":"structure", + "required":["agentId"], + "members":{ + "agentId":{"shape":"AgentId"}, + "autoScalingGroup":{"shape":"AutoScalingGroup"} + } + }, + "AgentPreviewList":{ + "type":"list", + "member":{"shape":"AgentPreview"}, + "max":100, + "min":0 + }, + "AgentsAlreadyRunningAssessmentException":{ + "type":"structure", + "required":[ + "message", + "agents", + "agentsTruncated", + "canRetry" + ], + "members":{ + "message":{"shape":"ErrorMessage"}, + "agents":{"shape":"AgentAlreadyRunningAssessmentList"}, + "agentsTruncated":{"shape":"Bool"}, + "canRetry":{"shape":"Bool"} + }, + "exception":true + }, + "AmiId":{ + "type":"string", + "max":256, + "min":0 + }, + "Arn":{ + "type":"string", + "max":300, + "min":1 + }, + "AssessmentRulesPackageArnList":{ + "type":"list", + "member":{"shape":"Arn"}, + "max":50, + "min":1 + }, + "AssessmentRun":{ + "type":"structure", + "required":[ + "arn", + "name", + "assessmentTemplateArn", + "state", + "durationInSeconds", + "rulesPackageArns", + "userAttributesForFindings", + "createdAt", + "stateChangedAt", + "dataCollected", + "stateChanges", + "notifications" + ], + "members":{ + "arn":{"shape":"Arn"}, + "name":{"shape":"AssessmentRunName"}, + "assessmentTemplateArn":{"shape":"Arn"}, + "state":{"shape":"AssessmentRunState"}, + "durationInSeconds":{"shape":"AssessmentRunDuration"}, + "rulesPackageArns":{"shape":"AssessmentRulesPackageArnList"}, + "userAttributesForFindings":{"shape":"UserAttributeList"}, + "createdAt":{"shape":"Timestamp"}, + "startedAt":{"shape":"Timestamp"}, + "completedAt":{"shape":"Timestamp"}, + "stateChangedAt":{"shape":"Timestamp"}, + "dataCollected":{"shape":"Bool"}, + "stateChanges":{"shape":"AssessmentRunStateChangeList"}, + "notifications":{"shape":"AssessmentRunNotificationList"} + } + }, + "AssessmentRunAgent":{ + "type":"structure", + "required":[ + "agentId", + "assessmentRunArn", + "agentHealth", + "agentHealthCode", + "telemetryMetadata" + ], + "members":{ + "agentId":{"shape":"AgentId"}, + "assessmentRunArn":{"shape":"Arn"}, + "agentHealth":{"shape":"AgentHealth"}, + "agentHealthCode":{"shape":"AgentHealthCode"}, + "agentHealthDetails":{"shape":"Message"}, + "autoScalingGroup":{"shape":"AutoScalingGroup"}, + "telemetryMetadata":{"shape":"TelemetryMetadataList"} + } + }, + "AssessmentRunAgentList":{ + "type":"list", + "member":{"shape":"AssessmentRunAgent"}, + "max":500, + "min":0 + }, + "AssessmentRunDuration":{ + "type":"integer", + "max":86400, + "min":180 + }, + "AssessmentRunFilter":{ + "type":"structure", + "members":{ + "namePattern":{"shape":"NamePattern"}, + "states":{"shape":"AssessmentRunStateList"}, + "durationRange":{"shape":"DurationRange"}, + "rulesPackageArns":{"shape":"FilterRulesPackageArnList"}, + "startTimeRange":{"shape":"TimestampRange"}, + "completionTimeRange":{"shape":"TimestampRange"}, + "stateChangeTimeRange":{"shape":"TimestampRange"} + } + }, + "AssessmentRunInProgressArnList":{ + "type":"list", + "member":{"shape":"Arn"}, + "max":10, + "min":1 + }, + "AssessmentRunInProgressException":{ + "type":"structure", + "required":[ + "message", + "assessmentRunArns", + "assessmentRunArnsTruncated", + "canRetry" + ], + "members":{ + "message":{"shape":"ErrorMessage"}, + "assessmentRunArns":{"shape":"AssessmentRunInProgressArnList"}, + "assessmentRunArnsTruncated":{"shape":"Bool"}, + "canRetry":{"shape":"Bool"} + }, + "exception":true + }, + "AssessmentRunList":{ + "type":"list", + "member":{"shape":"AssessmentRun"}, + "max":10, + "min":0 + }, + "AssessmentRunName":{ + "type":"string", + "max":140, + "min":1 + }, + "AssessmentRunNotification":{ + "type":"structure", + "required":[ + "date", + "event", + "error" + ], + "members":{ + "date":{"shape":"Timestamp"}, + "event":{"shape":"InspectorEvent"}, + "message":{"shape":"Message"}, + "error":{"shape":"Bool"}, + "snsTopicArn":{"shape":"Arn"}, + "snsPublishStatusCode":{"shape":"AssessmentRunNotificationSnsStatusCode"} + } + }, + "AssessmentRunNotificationList":{ + "type":"list", + "member":{"shape":"AssessmentRunNotification"}, + "max":50, + "min":0 + }, + "AssessmentRunNotificationSnsStatusCode":{ + "type":"string", + "enum":[ + "SUCCESS", + "TOPIC_DOES_NOT_EXIST", + "ACCESS_DENIED", + "INTERNAL_ERROR" + ] + }, + "AssessmentRunState":{ + "type":"string", + "enum":[ + "CREATED", + "START_DATA_COLLECTION_PENDING", + "START_DATA_COLLECTION_IN_PROGRESS", + "COLLECTING_DATA", + "STOP_DATA_COLLECTION_PENDING", + "DATA_COLLECTED", + "EVALUATING_RULES", + "FAILED", + "COMPLETED", + "COMPLETED_WITH_ERRORS" + ] + }, + "AssessmentRunStateChange":{ + "type":"structure", + "required":[ + "stateChangedAt", + "state" + ], + "members":{ + "stateChangedAt":{"shape":"Timestamp"}, + "state":{"shape":"AssessmentRunState"} + } + }, + "AssessmentRunStateChangeList":{ + "type":"list", + "member":{"shape":"AssessmentRunStateChange"}, + "max":50, + "min":0 + }, + "AssessmentRunStateList":{ + "type":"list", + "member":{"shape":"AssessmentRunState"}, + "max":50, + "min":0 + }, + "AssessmentTarget":{ + "type":"structure", + "required":[ + "arn", + "name", + "resourceGroupArn", + "createdAt", + "updatedAt" + ], + "members":{ + "arn":{"shape":"Arn"}, + "name":{"shape":"AssessmentTargetName"}, + "resourceGroupArn":{"shape":"Arn"}, + "createdAt":{"shape":"Timestamp"}, + "updatedAt":{"shape":"Timestamp"} + } + }, + "AssessmentTargetFilter":{ + "type":"structure", + "members":{ + "assessmentTargetNamePattern":{"shape":"NamePattern"} + } + }, + "AssessmentTargetList":{ + "type":"list", + "member":{"shape":"AssessmentTarget"}, + "max":10, + "min":0 + }, + "AssessmentTargetName":{ + "type":"string", + "max":140, + "min":1 + }, + "AssessmentTemplate":{ + "type":"structure", + "required":[ + "arn", + "name", + "assessmentTargetArn", + "durationInSeconds", + "rulesPackageArns", + "userAttributesForFindings", + "createdAt" + ], + "members":{ + "arn":{"shape":"Arn"}, + "name":{"shape":"AssessmentTemplateName"}, + "assessmentTargetArn":{"shape":"Arn"}, + "durationInSeconds":{"shape":"AssessmentRunDuration"}, + "rulesPackageArns":{"shape":"AssessmentTemplateRulesPackageArnList"}, + "userAttributesForFindings":{"shape":"UserAttributeList"}, + "createdAt":{"shape":"Timestamp"} + } + }, + "AssessmentTemplateFilter":{ + "type":"structure", + "members":{ + "namePattern":{"shape":"NamePattern"}, + "durationRange":{"shape":"DurationRange"}, + "rulesPackageArns":{"shape":"FilterRulesPackageArnList"} + } + }, + "AssessmentTemplateList":{ + "type":"list", + "member":{"shape":"AssessmentTemplate"}, + "max":10, + "min":0 + }, + "AssessmentTemplateName":{ + "type":"string", + "max":140, + "min":1 + }, + "AssessmentTemplateRulesPackageArnList":{ + "type":"list", + "member":{"shape":"Arn"}, + "max":50, + "min":0 + }, + "AssetAttributes":{ + "type":"structure", + "required":["schemaVersion"], + "members":{ + "schemaVersion":{"shape":"NumericVersion"}, + "agentId":{"shape":"AgentId"}, + "autoScalingGroup":{"shape":"AutoScalingGroup"}, + "amiId":{"shape":"AmiId"}, + "hostname":{"shape":"Hostname"}, + "ipv4Addresses":{"shape":"Ipv4AddressList"} + } + }, + "AssetType":{ + "type":"string", + "enum":["ec2-instance"] + }, + "Attribute":{ + "type":"structure", + "required":["key"], + "members":{ + "key":{"shape":"AttributeKey"}, + "value":{"shape":"AttributeValue"} + } + }, + "AttributeKey":{ + "type":"string", + "max":128, + "min":1 + }, + "AttributeList":{ + "type":"list", + "member":{"shape":"Attribute"}, + "max":50, + "min":0 + }, + "AttributeValue":{ + "type":"string", + "max":256, + "min":1 + }, + "AutoScalingGroup":{ + "type":"string", + "max":256, + "min":1 + }, + "AutoScalingGroupList":{ + "type":"list", + "member":{"shape":"AutoScalingGroup"}, + "max":20, + "min":0 + }, + "BatchDescribeArnList":{ + "type":"list", + "member":{"shape":"Arn"}, + "max":10, + "min":1 + }, + "Bool":{"type":"boolean"}, + "CreateAssessmentTargetRequest":{ + "type":"structure", + "required":[ + "assessmentTargetName", + "resourceGroupArn" + ], + "members":{ + "assessmentTargetName":{"shape":"AssessmentTargetName"}, + "resourceGroupArn":{"shape":"Arn"} + } + }, + "CreateAssessmentTargetResponse":{ + "type":"structure", + "required":["assessmentTargetArn"], + "members":{ + "assessmentTargetArn":{"shape":"Arn"} + } + }, + "CreateAssessmentTemplateRequest":{ + "type":"structure", + "required":[ + "assessmentTargetArn", + "assessmentTemplateName", + "durationInSeconds", + "rulesPackageArns" + ], + "members":{ + "assessmentTargetArn":{"shape":"Arn"}, + "assessmentTemplateName":{"shape":"AssessmentTemplateName"}, + "durationInSeconds":{"shape":"AssessmentRunDuration"}, + "rulesPackageArns":{"shape":"AssessmentTemplateRulesPackageArnList"}, + "userAttributesForFindings":{"shape":"UserAttributeList"} + } + }, + "CreateAssessmentTemplateResponse":{ + "type":"structure", + "required":["assessmentTemplateArn"], + "members":{ + "assessmentTemplateArn":{"shape":"Arn"} + } + }, + "CreateResourceGroupRequest":{ + "type":"structure", + "required":["resourceGroupTags"], + "members":{ + "resourceGroupTags":{"shape":"ResourceGroupTags"} + } + }, + "CreateResourceGroupResponse":{ + "type":"structure", + "required":["resourceGroupArn"], + "members":{ + "resourceGroupArn":{"shape":"Arn"} + } + }, + "DeleteAssessmentRunRequest":{ + "type":"structure", + "required":["assessmentRunArn"], + "members":{ + "assessmentRunArn":{"shape":"Arn"} + } + }, + "DeleteAssessmentTargetRequest":{ + "type":"structure", + "required":["assessmentTargetArn"], + "members":{ + "assessmentTargetArn":{"shape":"Arn"} + } + }, + "DeleteAssessmentTemplateRequest":{ + "type":"structure", + "required":["assessmentTemplateArn"], + "members":{ + "assessmentTemplateArn":{"shape":"Arn"} + } + }, + "DescribeAssessmentRunsRequest":{ + "type":"structure", + "required":["assessmentRunArns"], + "members":{ + "assessmentRunArns":{"shape":"BatchDescribeArnList"} + } + }, + "DescribeAssessmentRunsResponse":{ + "type":"structure", + "required":[ + "assessmentRuns", + "failedItems" + ], + "members":{ + "assessmentRuns":{"shape":"AssessmentRunList"}, + "failedItems":{"shape":"FailedItems"} + } + }, + "DescribeAssessmentTargetsRequest":{ + "type":"structure", + "required":["assessmentTargetArns"], + "members":{ + "assessmentTargetArns":{"shape":"BatchDescribeArnList"} + } + }, + "DescribeAssessmentTargetsResponse":{ + "type":"structure", + "required":[ + "assessmentTargets", + "failedItems" + ], + "members":{ + "assessmentTargets":{"shape":"AssessmentTargetList"}, + "failedItems":{"shape":"FailedItems"} + } + }, + "DescribeAssessmentTemplatesRequest":{ + "type":"structure", + "required":["assessmentTemplateArns"], + "members":{ + "assessmentTemplateArns":{"shape":"BatchDescribeArnList"} + } + }, + "DescribeAssessmentTemplatesResponse":{ + "type":"structure", + "required":[ + "assessmentTemplates", + "failedItems" + ], + "members":{ + "assessmentTemplates":{"shape":"AssessmentTemplateList"}, + "failedItems":{"shape":"FailedItems"} + } + }, + "DescribeCrossAccountAccessRoleResponse":{ + "type":"structure", + "required":[ + "roleArn", + "valid", + "registeredAt" + ], + "members":{ + "roleArn":{"shape":"Arn"}, + "valid":{"shape":"Bool"}, + "registeredAt":{"shape":"Timestamp"} + } + }, + "DescribeFindingsRequest":{ + "type":"structure", + "required":["findingArns"], + "members":{ + "findingArns":{"shape":"BatchDescribeArnList"}, + "locale":{"shape":"Locale"} + } + }, + "DescribeFindingsResponse":{ + "type":"structure", + "required":[ + "findings", + "failedItems" + ], + "members":{ + "findings":{"shape":"FindingList"}, + "failedItems":{"shape":"FailedItems"} + } + }, + "DescribeResourceGroupsRequest":{ + "type":"structure", + "required":["resourceGroupArns"], + "members":{ + "resourceGroupArns":{"shape":"BatchDescribeArnList"} + } + }, + "DescribeResourceGroupsResponse":{ + "type":"structure", + "required":[ + "resourceGroups", + "failedItems" + ], + "members":{ + "resourceGroups":{"shape":"ResourceGroupList"}, + "failedItems":{"shape":"FailedItems"} + } + }, + "DescribeRulesPackagesRequest":{ + "type":"structure", + "required":["rulesPackageArns"], + "members":{ + "rulesPackageArns":{"shape":"BatchDescribeArnList"}, + "locale":{"shape":"Locale"} + } + }, + "DescribeRulesPackagesResponse":{ + "type":"structure", + "required":[ + "rulesPackages", + "failedItems" + ], + "members":{ + "rulesPackages":{"shape":"RulesPackageList"}, + "failedItems":{"shape":"FailedItems"} + } + }, + "DurationRange":{ + "type":"structure", + "members":{ + "minSeconds":{"shape":"AssessmentRunDuration"}, + "maxSeconds":{"shape":"AssessmentRunDuration"} + } + }, + "ErrorMessage":{ + "type":"string", + "max":1000, + "min":0 + }, + "EventSubscription":{ + "type":"structure", + "required":[ + "event", + "subscribedAt" + ], + "members":{ + "event":{"shape":"InspectorEvent"}, + "subscribedAt":{"shape":"Timestamp"} + } + }, + "EventSubscriptionList":{ + "type":"list", + "member":{"shape":"EventSubscription"}, + "max":50, + "min":1 + }, + "FailedItemDetails":{ + "type":"structure", + "required":[ + "failureCode", + "retryable" + ], + "members":{ + "failureCode":{"shape":"FailedItemErrorCode"}, + "retryable":{"shape":"Bool"} + } + }, + "FailedItemErrorCode":{ + "type":"string", + "enum":[ + "INVALID_ARN", + "DUPLICATE_ARN", + "ITEM_DOES_NOT_EXIST", + "ACCESS_DENIED", + "LIMIT_EXCEEDED", + "INTERNAL_ERROR" + ] + }, + "FailedItems":{ + "type":"map", + "key":{"shape":"Arn"}, + "value":{"shape":"FailedItemDetails"} + }, + "FilterRulesPackageArnList":{ + "type":"list", + "member":{"shape":"Arn"}, + "max":50, + "min":0 + }, + "Finding":{ + "type":"structure", + "required":[ + "arn", + "attributes", + "userAttributes", + "createdAt", + "updatedAt" + ], + "members":{ + "arn":{"shape":"Arn"}, + "schemaVersion":{"shape":"NumericVersion"}, + "service":{"shape":"ServiceName"}, + "serviceAttributes":{"shape":"InspectorServiceAttributes"}, + "assetType":{"shape":"AssetType"}, + "assetAttributes":{"shape":"AssetAttributes"}, + "id":{"shape":"FindingId"}, + "title":{"shape":"Text"}, + "description":{"shape":"Text"}, + "recommendation":{"shape":"Text"}, + "severity":{"shape":"Severity"}, + "numericSeverity":{"shape":"NumericSeverity"}, + "confidence":{"shape":"IocConfidence"}, + "indicatorOfCompromise":{"shape":"Bool"}, + "attributes":{"shape":"AttributeList"}, + "userAttributes":{"shape":"UserAttributeList"}, + "createdAt":{"shape":"Timestamp"}, + "updatedAt":{"shape":"Timestamp"} + } + }, + "FindingFilter":{ + "type":"structure", + "members":{ + "agentIds":{"shape":"AgentIdList"}, + "autoScalingGroups":{"shape":"AutoScalingGroupList"}, + "ruleNames":{"shape":"RuleNameList"}, + "severities":{"shape":"SeverityList"}, + "rulesPackageArns":{"shape":"FilterRulesPackageArnList"}, + "attributes":{"shape":"AttributeList"}, + "userAttributes":{"shape":"AttributeList"}, + "creationTimeRange":{"shape":"TimestampRange"} + } + }, + "FindingId":{ + "type":"string", + "max":128, + "min":0 + }, + "FindingList":{ + "type":"list", + "member":{"shape":"Finding"}, + "max":10, + "min":0 + }, + "GetTelemetryMetadataRequest":{ + "type":"structure", + "required":["assessmentRunArn"], + "members":{ + "assessmentRunArn":{"shape":"Arn"} + } + }, + "GetTelemetryMetadataResponse":{ + "type":"structure", + "required":["telemetryMetadata"], + "members":{ + "telemetryMetadata":{"shape":"TelemetryMetadataList"} + } + }, + "Hostname":{ + "type":"string", + "max":256, + "min":0 + }, + "InspectorEvent":{ + "type":"string", + "enum":[ + "ASSESSMENT_RUN_STARTED", + "ASSESSMENT_RUN_COMPLETED", + "ASSESSMENT_RUN_STATE_CHANGED", + "FINDING_REPORTED", + "OTHER" + ] + }, + "InspectorServiceAttributes":{ + "type":"structure", + "required":["schemaVersion"], + "members":{ + "schemaVersion":{"shape":"NumericVersion"}, + "assessmentRunArn":{"shape":"Arn"}, + "rulesPackageArn":{"shape":"Arn"} + } + }, + "InternalException":{ + "type":"structure", + "required":[ + "message", + "canRetry" + ], + "members":{ + "message":{"shape":"ErrorMessage"}, + "canRetry":{"shape":"Bool"} + }, + "exception":true, + "fault":true + }, + "InvalidCrossAccountRoleErrorCode":{ + "type":"string", + "enum":[ + "ROLE_DOES_NOT_EXIST_OR_INVALID_TRUST_RELATIONSHIP", + "ROLE_DOES_NOT_HAVE_CORRECT_POLICY" + ] + }, + "InvalidCrossAccountRoleException":{ + "type":"structure", + "required":[ + "message", + "errorCode", + "canRetry" + ], + "members":{ + "message":{"shape":"ErrorMessage"}, + "errorCode":{"shape":"InvalidCrossAccountRoleErrorCode"}, + "canRetry":{"shape":"Bool"} + }, + "exception":true + }, + "InvalidInputErrorCode":{ + "type":"string", + "enum":[ + "INVALID_ASSESSMENT_TARGET_ARN", + "INVALID_ASSESSMENT_TEMPLATE_ARN", + "INVALID_ASSESSMENT_RUN_ARN", + "INVALID_FINDING_ARN", + "INVALID_RESOURCE_GROUP_ARN", + "INVALID_RULES_PACKAGE_ARN", + "INVALID_RESOURCE_ARN", + "INVALID_SNS_TOPIC_ARN", + "INVALID_IAM_ROLE_ARN", + "INVALID_ASSESSMENT_TARGET_NAME", + "INVALID_ASSESSMENT_TARGET_NAME_PATTERN", + "INVALID_ASSESSMENT_TEMPLATE_NAME", + "INVALID_ASSESSMENT_TEMPLATE_NAME_PATTERN", + "INVALID_ASSESSMENT_TEMPLATE_DURATION", + "INVALID_ASSESSMENT_TEMPLATE_DURATION_RANGE", + "INVALID_ASSESSMENT_RUN_DURATION_RANGE", + "INVALID_ASSESSMENT_RUN_START_TIME_RANGE", + "INVALID_ASSESSMENT_RUN_COMPLETION_TIME_RANGE", + "INVALID_ASSESSMENT_RUN_STATE_CHANGE_TIME_RANGE", + "INVALID_ASSESSMENT_RUN_STATE", + "INVALID_TAG", + "INVALID_TAG_KEY", + "INVALID_TAG_VALUE", + "INVALID_RESOURCE_GROUP_TAG_KEY", + "INVALID_RESOURCE_GROUP_TAG_VALUE", + "INVALID_ATTRIBUTE", + "INVALID_USER_ATTRIBUTE", + "INVALID_USER_ATTRIBUTE_KEY", + "INVALID_USER_ATTRIBUTE_VALUE", + "INVALID_PAGINATION_TOKEN", + "INVALID_MAX_RESULTS", + "INVALID_AGENT_ID", + "INVALID_AUTO_SCALING_GROUP", + "INVALID_RULE_NAME", + "INVALID_SEVERITY", + "INVALID_LOCALE", + "INVALID_EVENT", + "ASSESSMENT_TARGET_NAME_ALREADY_TAKEN", + "ASSESSMENT_TEMPLATE_NAME_ALREADY_TAKEN", + "INVALID_NUMBER_OF_ASSESSMENT_TARGET_ARNS", + "INVALID_NUMBER_OF_ASSESSMENT_TEMPLATE_ARNS", + "INVALID_NUMBER_OF_ASSESSMENT_RUN_ARNS", + "INVALID_NUMBER_OF_FINDING_ARNS", + "INVALID_NUMBER_OF_RESOURCE_GROUP_ARNS", + "INVALID_NUMBER_OF_RULES_PACKAGE_ARNS", + "INVALID_NUMBER_OF_ASSESSMENT_RUN_STATES", + "INVALID_NUMBER_OF_TAGS", + "INVALID_NUMBER_OF_RESOURCE_GROUP_TAGS", + "INVALID_NUMBER_OF_ATTRIBUTES", + "INVALID_NUMBER_OF_USER_ATTRIBUTES", + "INVALID_NUMBER_OF_AGENT_IDS", + "INVALID_NUMBER_OF_AUTO_SCALING_GROUPS", + "INVALID_NUMBER_OF_RULE_NAMES", + "INVALID_NUMBER_OF_SEVERITIES" + ] + }, + "InvalidInputException":{ + "type":"structure", + "required":[ + "message", + "errorCode", + "canRetry" + ], + "members":{ + "message":{"shape":"ErrorMessage"}, + "errorCode":{"shape":"InvalidInputErrorCode"}, + "canRetry":{"shape":"Bool"} + }, + "exception":true + }, + "IocConfidence":{ + "type":"integer", + "max":10, + "min":0 + }, + "Ipv4Address":{ + "type":"string", + "max":15, + "min":7 + }, + "Ipv4AddressList":{ + "type":"list", + "member":{"shape":"Ipv4Address"}, + "max":50, + "min":0 + }, + "LimitExceededErrorCode":{ + "type":"string", + "enum":[ + "ASSESSMENT_TARGET_LIMIT_EXCEEDED", + "ASSESSMENT_TEMPLATE_LIMIT_EXCEEDED", + "ASSESSMENT_RUN_LIMIT_EXCEEDED", + "RESOURCE_GROUP_LIMIT_EXCEEDED", + "EVENT_SUBSCRIPTION_LIMIT_EXCEEDED" + ] + }, + "LimitExceededException":{ + "type":"structure", + "required":[ + "message", + "errorCode", + "canRetry" + ], + "members":{ + "message":{"shape":"ErrorMessage"}, + "errorCode":{"shape":"LimitExceededErrorCode"}, + "canRetry":{"shape":"Bool"} + }, + "exception":true + }, + "ListAssessmentRunAgentsRequest":{ + "type":"structure", + "required":["assessmentRunArn"], + "members":{ + "assessmentRunArn":{"shape":"Arn"}, + "filter":{"shape":"AgentFilter"}, + "nextToken":{"shape":"PaginationToken"}, + "maxResults":{"shape":"ListMaxResults"} + } + }, + "ListAssessmentRunAgentsResponse":{ + "type":"structure", + "required":["assessmentRunAgents"], + "members":{ + "assessmentRunAgents":{"shape":"AssessmentRunAgentList"}, + "nextToken":{"shape":"PaginationToken"} + } + }, + "ListAssessmentRunsRequest":{ + "type":"structure", + "members":{ + "assessmentTemplateArns":{"shape":"ListParentArnList"}, + "filter":{"shape":"AssessmentRunFilter"}, + "nextToken":{"shape":"PaginationToken"}, + "maxResults":{"shape":"ListMaxResults"} + } + }, + "ListAssessmentRunsResponse":{ + "type":"structure", + "required":["assessmentRunArns"], + "members":{ + "assessmentRunArns":{"shape":"ListReturnedArnList"}, + "nextToken":{"shape":"PaginationToken"} + } + }, + "ListAssessmentTargetsRequest":{ + "type":"structure", + "members":{ + "filter":{"shape":"AssessmentTargetFilter"}, + "nextToken":{"shape":"PaginationToken"}, + "maxResults":{"shape":"ListMaxResults"} + } + }, + "ListAssessmentTargetsResponse":{ + "type":"structure", + "required":["assessmentTargetArns"], + "members":{ + "assessmentTargetArns":{"shape":"ListReturnedArnList"}, + "nextToken":{"shape":"PaginationToken"} + } + }, + "ListAssessmentTemplatesRequest":{ + "type":"structure", + "members":{ + "assessmentTargetArns":{"shape":"ListParentArnList"}, + "filter":{"shape":"AssessmentTemplateFilter"}, + "nextToken":{"shape":"PaginationToken"}, + "maxResults":{"shape":"ListMaxResults"} + } + }, + "ListAssessmentTemplatesResponse":{ + "type":"structure", + "required":["assessmentTemplateArns"], + "members":{ + "assessmentTemplateArns":{"shape":"ListReturnedArnList"}, + "nextToken":{"shape":"PaginationToken"} + } + }, + "ListEventSubscriptionsMaxResults":{"type":"integer"}, + "ListEventSubscriptionsRequest":{ + "type":"structure", + "members":{ + "resourceArn":{"shape":"Arn"}, + "nextToken":{"shape":"PaginationToken"}, + "maxResults":{"shape":"ListEventSubscriptionsMaxResults"} + } + }, + "ListEventSubscriptionsResponse":{ + "type":"structure", + "required":["subscriptions"], + "members":{ + "subscriptions":{"shape":"SubscriptionList"}, + "nextToken":{"shape":"PaginationToken"} + } + }, + "ListFindingsRequest":{ + "type":"structure", + "members":{ + "assessmentRunArns":{"shape":"ListParentArnList"}, + "filter":{"shape":"FindingFilter"}, + "nextToken":{"shape":"PaginationToken"}, + "maxResults":{"shape":"ListMaxResults"} + } + }, + "ListFindingsResponse":{ + "type":"structure", + "required":["findingArns"], + "members":{ + "findingArns":{"shape":"ListReturnedArnList"}, + "nextToken":{"shape":"PaginationToken"} + } + }, + "ListMaxResults":{"type":"integer"}, + "ListParentArnList":{ + "type":"list", + "member":{"shape":"Arn"}, + "max":50, + "min":0 + }, + "ListReturnedArnList":{ + "type":"list", + "member":{"shape":"Arn"}, + "max":100, + "min":0 + }, + "ListRulesPackagesRequest":{ + "type":"structure", + "members":{ + "nextToken":{"shape":"PaginationToken"}, + "maxResults":{"shape":"ListMaxResults"} + } + }, + "ListRulesPackagesResponse":{ + "type":"structure", + "required":["rulesPackageArns"], + "members":{ + "rulesPackageArns":{"shape":"ListReturnedArnList"}, + "nextToken":{"shape":"PaginationToken"} + } + }, + "ListTagsForResourceRequest":{ + "type":"structure", + "required":["resourceArn"], + "members":{ + "resourceArn":{"shape":"Arn"} + } + }, + "ListTagsForResourceResponse":{ + "type":"structure", + "required":["tags"], + "members":{ + "tags":{"shape":"TagList"} + } + }, + "Locale":{ + "type":"string", + "enum":["EN_US"] + }, + "Long":{"type":"long"}, + "Message":{ + "type":"string", + "max":1000, + "min":0 + }, + "MessageType":{ + "type":"string", + "max":300, + "min":1 + }, + "NamePattern":{ + "type":"string", + "max":140, + "min":1 + }, + "NoSuchEntityErrorCode":{ + "type":"string", + "enum":[ + "ASSESSMENT_TARGET_DOES_NOT_EXIST", + "ASSESSMENT_TEMPLATE_DOES_NOT_EXIST", + "ASSESSMENT_RUN_DOES_NOT_EXIST", + "FINDING_DOES_NOT_EXIST", + "RESOURCE_GROUP_DOES_NOT_EXIST", + "RULES_PACKAGE_DOES_NOT_EXIST", + "SNS_TOPIC_DOES_NOT_EXIST", + "IAM_ROLE_DOES_NOT_EXIST" + ] + }, + "NoSuchEntityException":{ + "type":"structure", + "required":[ + "message", + "errorCode", + "canRetry" + ], + "members":{ + "message":{"shape":"ErrorMessage"}, + "errorCode":{"shape":"NoSuchEntityErrorCode"}, + "canRetry":{"shape":"Bool"} + }, + "exception":true + }, + "NumericSeverity":{ + "type":"double", + "max":10.0, + "min":0.0 + }, + "NumericVersion":{ + "type":"integer", + "min":0 + }, + "PaginationToken":{ + "type":"string", + "max":300, + "min":1 + }, + "PreviewAgentsMaxResults":{"type":"integer"}, + "PreviewAgentsRequest":{ + "type":"structure", + "required":["previewAgentsArn"], + "members":{ + "previewAgentsArn":{"shape":"Arn"}, + "nextToken":{"shape":"PaginationToken"}, + "maxResults":{"shape":"PreviewAgentsMaxResults"} + } + }, + "PreviewAgentsResponse":{ + "type":"structure", + "required":["agentPreviews"], + "members":{ + "agentPreviews":{"shape":"AgentPreviewList"}, + "nextToken":{"shape":"PaginationToken"} + } + }, + "ProviderName":{ + "type":"string", + "max":1000, + "min":0 + }, + "RegisterCrossAccountAccessRoleRequest":{ + "type":"structure", + "required":["roleArn"], + "members":{ + "roleArn":{"shape":"Arn"} + } + }, + "RemoveAttributesFromFindingsRequest":{ + "type":"structure", + "required":[ + "findingArns", + "attributeKeys" + ], + "members":{ + "findingArns":{"shape":"AddRemoveAttributesFindingArnList"}, + "attributeKeys":{"shape":"UserAttributeKeyList"} + } + }, + "RemoveAttributesFromFindingsResponse":{ + "type":"structure", + "required":["failedItems"], + "members":{ + "failedItems":{"shape":"FailedItems"} + } + }, + "ResourceGroup":{ + "type":"structure", + "required":[ + "arn", + "tags", + "createdAt" + ], + "members":{ + "arn":{"shape":"Arn"}, + "tags":{"shape":"ResourceGroupTags"}, + "createdAt":{"shape":"Timestamp"} + } + }, + "ResourceGroupList":{ + "type":"list", + "member":{"shape":"ResourceGroup"}, + "max":10, + "min":0 + }, + "ResourceGroupTag":{ + "type":"structure", + "required":["key"], + "members":{ + "key":{"shape":"TagKey"}, + "value":{"shape":"TagValue"} + } + }, + "ResourceGroupTags":{ + "type":"list", + "member":{"shape":"ResourceGroupTag"}, + "max":10, + "min":1 + }, + "RuleName":{ + "type":"string", + "max":1000 + }, + "RuleNameList":{ + "type":"list", + "member":{"shape":"RuleName"}, + "max":50, + "min":0 + }, + "RulesPackage":{ + "type":"structure", + "required":[ + "arn", + "name", + "version", + "provider" + ], + "members":{ + "arn":{"shape":"Arn"}, + "name":{"shape":"RulesPackageName"}, + "version":{"shape":"Version"}, + "provider":{"shape":"ProviderName"}, + "description":{"shape":"Text"} + } + }, + "RulesPackageList":{ + "type":"list", + "member":{"shape":"RulesPackage"}, + "max":10, + "min":0 + }, + "RulesPackageName":{ + "type":"string", + "max":1000, + "min":0 + }, + "ServiceName":{ + "type":"string", + "max":128, + "min":0 + }, + "SetTagsForResourceRequest":{ + "type":"structure", + "required":["resourceArn"], + "members":{ + "resourceArn":{"shape":"Arn"}, + "tags":{"shape":"TagList"} + } + }, + "Severity":{ + "type":"string", + "enum":[ + "Low", + "Medium", + "High", + "Informational", + "Undefined" + ] + }, + "SeverityList":{ + "type":"list", + "member":{"shape":"Severity"}, + "max":50, + "min":0 + }, + "StartAssessmentRunRequest":{ + "type":"structure", + "required":["assessmentTemplateArn"], + "members":{ + "assessmentTemplateArn":{"shape":"Arn"}, + "assessmentRunName":{"shape":"AssessmentRunName"} + } + }, + "StartAssessmentRunResponse":{ + "type":"structure", + "required":["assessmentRunArn"], + "members":{ + "assessmentRunArn":{"shape":"Arn"} + } + }, + "StopAssessmentRunRequest":{ + "type":"structure", + "required":["assessmentRunArn"], + "members":{ + "assessmentRunArn":{"shape":"Arn"} + } + }, + "SubscribeToEventRequest":{ + "type":"structure", + "required":[ + "resourceArn", + "event", + "topicArn" + ], + "members":{ + "resourceArn":{"shape":"Arn"}, + "event":{"shape":"InspectorEvent"}, + "topicArn":{"shape":"Arn"} + } + }, + "Subscription":{ + "type":"structure", + "required":[ + "resourceArn", + "topicArn", + "eventSubscriptions" + ], + "members":{ + "resourceArn":{"shape":"Arn"}, + "topicArn":{"shape":"Arn"}, + "eventSubscriptions":{"shape":"EventSubscriptionList"} + } + }, + "SubscriptionList":{ + "type":"list", + "member":{"shape":"Subscription"}, + "max":50, + "min":0 + }, + "Tag":{ + "type":"structure", + "required":["key"], + "members":{ + "key":{"shape":"TagKey"}, + "value":{"shape":"TagValue"} + } + }, + "TagKey":{ + "type":"string", + "max":128, + "min":1 + }, + "TagList":{ + "type":"list", + "member":{"shape":"Tag"}, + "max":10, + "min":0 + }, + "TagValue":{ + "type":"string", + "max":256, + "min":1 + }, + "TelemetryMetadata":{ + "type":"structure", + "required":[ + "messageType", + "count" + ], + "members":{ + "messageType":{"shape":"MessageType"}, + "count":{"shape":"Long"}, + "dataSize":{"shape":"Long"} + } + }, + "TelemetryMetadataList":{ + "type":"list", + "member":{"shape":"TelemetryMetadata"}, + "max":5000, + "min":0 + }, + "Text":{ + "type":"string", + "max":20000, + "min":0 + }, + "Timestamp":{"type":"timestamp"}, + "TimestampRange":{ + "type":"structure", + "members":{ + "beginDate":{"shape":"Timestamp"}, + "endDate":{"shape":"Timestamp"} + } + }, + "UnsubscribeFromEventRequest":{ + "type":"structure", + "required":[ + "resourceArn", + "event", + "topicArn" + ], + "members":{ + "resourceArn":{"shape":"Arn"}, + "event":{"shape":"InspectorEvent"}, + "topicArn":{"shape":"Arn"} + } + }, + "UpdateAssessmentTargetRequest":{ + "type":"structure", + "required":[ + "assessmentTargetArn", + "assessmentTargetName", + "resourceGroupArn" + ], + "members":{ + "assessmentTargetArn":{"shape":"Arn"}, + "assessmentTargetName":{"shape":"AssessmentTargetName"}, + "resourceGroupArn":{"shape":"Arn"} + } + }, + "UserAttributeKeyList":{ + "type":"list", + "member":{"shape":"AttributeKey"}, + "max":10, + "min":0 + }, + "UserAttributeList":{ + "type":"list", + "member":{"shape":"Attribute"}, + "max":10, + "min":0 + }, + "Version":{ + "type":"string", + "max":1000, + "min":0 + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/inspector/2016-02-16/docs-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/inspector/2016-02-16/docs-2.json new file mode 100644 index 000000000..2f3ec30b7 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/inspector/2016-02-16/docs-2.json @@ -0,0 +1,1199 @@ +{ + "version": "2.0", + "service": "Amazon Inspector

    Amazon Inspector enables you to analyze the behavior of your AWS resources and to identify potential security issues. For more information, see Amazon Inspector User Guide.

    ", + "operations": { + "AddAttributesToFindings": "

    Assigns attributes (key and value pairs) to the findings that are specified by the ARNs of the findings.

    ", + "CreateAssessmentTarget": "

    Creates a new assessment target using the ARN of the resource group that is generated by CreateResourceGroup. You can create up to 50 assessment targets per AWS account. You can run up to 500 concurrent agents per AWS account. For more information, see Amazon Inspector Assessment Targets.

    ", + "CreateAssessmentTemplate": "

    Creates an assessment template for the assessment target that is specified by the ARN of the assessment target.

    ", + "CreateResourceGroup": "

    Creates a resource group using the specified set of tags (key and value pairs) that are used to select the EC2 instances to be included in an Amazon Inspector assessment target. The created resource group is then used to create an Amazon Inspector assessment target. For more information, see CreateAssessmentTarget.

    ", + "DeleteAssessmentRun": "

    Deletes the assessment run that is specified by the ARN of the assessment run.

    ", + "DeleteAssessmentTarget": "

    Deletes the assessment target that is specified by the ARN of the assessment target.

    ", + "DeleteAssessmentTemplate": "

    Deletes the assessment template that is specified by the ARN of the assessment template.

    ", + "DescribeAssessmentRuns": "

    Describes the assessment runs that are specified by the ARNs of the assessment runs.

    ", + "DescribeAssessmentTargets": "

    Describes the assessment targets that are specified by the ARNs of the assessment targets.

    ", + "DescribeAssessmentTemplates": "

    Describes the assessment templates that are specified by the ARNs of the assessment templates.

    ", + "DescribeCrossAccountAccessRole": "

    Describes the IAM role that enables Amazon Inspector to access your AWS account.

    ", + "DescribeFindings": "

    Describes the findings that are specified by the ARNs of the findings.

    ", + "DescribeResourceGroups": "

    Describes the resource groups that are specified by the ARNs of the resource groups.

    ", + "DescribeRulesPackages": "

    Describes the rules packages that are specified by the ARNs of the rules packages.

    ", + "GetTelemetryMetadata": "

    Information about the data that is collected for the specified assessment run.

    ", + "ListAssessmentRunAgents": "

    Lists the agents of the assessment runs that are specified by the ARNs of the assessment runs.

    ", + "ListAssessmentRuns": "

    Lists the assessment runs that correspond to the assessment templates that are specified by the ARNs of the assessment templates.

    ", + "ListAssessmentTargets": "

    Lists the ARNs of the assessment targets within this AWS account. For more information about assessment targets, see Amazon Inspector Assessment Targets.

    ", + "ListAssessmentTemplates": "

    Lists the assessment templates that correspond to the assessment targets that are specified by the ARNs of the assessment targets.

    ", + "ListEventSubscriptions": "

    Lists all the event subscriptions for the assessment template that is specified by the ARN of the assessment template. For more information, see SubscribeToEvent and UnsubscribeFromEvent.

    ", + "ListFindings": "

    Lists findings that are generated by the assessment runs that are specified by the ARNs of the assessment runs.

    ", + "ListRulesPackages": "

    Lists all available Amazon Inspector rules packages.

    ", + "ListTagsForResource": "

    Lists all tags associated with an assessment template.

    ", + "PreviewAgents": "

    Previews the agents installed on the EC2 instances that are part of the specified assessment target.

    ", + "RegisterCrossAccountAccessRole": "

    Registers the IAM role that Amazon Inspector uses to list your EC2 instances at the start of the assessment run or when you call the PreviewAgents action.

    ", + "RemoveAttributesFromFindings": "

    Removes entire attributes (key and value pairs) from the findings that are specified by the ARNs of the findings where an attribute with the specified key exists.

    ", + "SetTagsForResource": "

    Sets tags (key and value pairs) to the assessment template that is specified by the ARN of the assessment template.

    ", + "StartAssessmentRun": "

    Starts the assessment run specified by the ARN of the assessment template. For this API to function properly, you must not exceed the limit of running up to 500 concurrent agents per AWS account.

    ", + "StopAssessmentRun": "

    Stops the assessment run that is specified by the ARN of the assessment run.

    ", + "SubscribeToEvent": "

    Enables the process of sending Amazon Simple Notification Service (SNS) notifications about a specified event to a specified SNS topic.

    ", + "UnsubscribeFromEvent": "

    Disables the process of sending Amazon Simple Notification Service (SNS) notifications about a specified event to a specified SNS topic.

    ", + "UpdateAssessmentTarget": "

    Updates the assessment target that is specified by the ARN of the assessment target.

    " + }, + "shapes": { + "AccessDeniedErrorCode": { + "base": null, + "refs": { + "AccessDeniedException$errorCode": "

    Code that indicates the type of error that is generated.

    " + } + }, + "AccessDeniedException": { + "base": "

    You do not have required permissions to access the requested resource.

    ", + "refs": { + } + }, + "AddAttributesToFindingsRequest": { + "base": null, + "refs": { + } + }, + "AddAttributesToFindingsResponse": { + "base": null, + "refs": { + } + }, + "AddRemoveAttributesFindingArnList": { + "base": null, + "refs": { + "AddAttributesToFindingsRequest$findingArns": "

    The ARNs that specify the findings that you want to assign attributes to.

    ", + "RemoveAttributesFromFindingsRequest$findingArns": "

    The ARNs that specify the findings that you want to remove attributes from.

    " + } + }, + "AgentAlreadyRunningAssessment": { + "base": "

    Used in the exception error that is thrown if you start an assessment run for an assessment target that includes an EC2 instance that is already participating in another started assessment run.

    ", + "refs": { + "AgentAlreadyRunningAssessmentList$member": null + } + }, + "AgentAlreadyRunningAssessmentList": { + "base": null, + "refs": { + "AgentsAlreadyRunningAssessmentException$agents": "

    " + } + }, + "AgentFilter": { + "base": "

    Contains information about an Amazon Inspector agent. This data type is used as a request parameter in the ListAssessmentRunAgents action.

    ", + "refs": { + "ListAssessmentRunAgentsRequest$filter": "

    You can use this parameter to specify a subset of data to be included in the action's response.

    For a record to match a filter, all specified filter attributes must match. When multiple values are specified for a filter attribute, any of the values can match.

    " + } + }, + "AgentHealth": { + "base": null, + "refs": { + "AgentHealthList$member": null, + "AssessmentRunAgent$agentHealth": "

    The current health state of the agent.

    " + } + }, + "AgentHealthCode": { + "base": null, + "refs": { + "AgentHealthCodeList$member": null, + "AssessmentRunAgent$agentHealthCode": "

    The detailed health state of the agent.

    " + } + }, + "AgentHealthCodeList": { + "base": null, + "refs": { + "AgentFilter$agentHealthCodes": "

    The detailed health state of the agent. Values can be set to IDLE, RUNNING, SHUTDOWN, UNHEALTHY, THROTTLED, and UNKNOWN.

    " + } + }, + "AgentHealthList": { + "base": null, + "refs": { + "AgentFilter$agentHealths": "

    The current health state of the agent. Values can be set to HEALTHY or UNHEALTHY.

    " + } + }, + "AgentId": { + "base": null, + "refs": { + "AgentAlreadyRunningAssessment$agentId": "

    ID of the agent that is running on an EC2 instance that is already participating in another started assessment run.

    ", + "AgentIdList$member": null, + "AgentPreview$agentId": "

    The ID of the EC2 instance where the agent is installed.

    ", + "AssessmentRunAgent$agentId": "

    The AWS account of the EC2 instance where the agent is installed.

    ", + "AssetAttributes$agentId": "

    The ID of the agent that is installed on the EC2 instance where the finding is generated.

    " + } + }, + "AgentIdList": { + "base": null, + "refs": { + "FindingFilter$agentIds": "

    For a record to match a filter, one of the values that is specified for this data type property must be the exact match of the value of the agentId property of the Finding data type.

    " + } + }, + "AgentPreview": { + "base": "

    Used as a response element in the PreviewAgents action.

    ", + "refs": { + "AgentPreviewList$member": null + } + }, + "AgentPreviewList": { + "base": null, + "refs": { + "PreviewAgentsResponse$agentPreviews": "

    The resulting list of agents.

    " + } + }, + "AgentsAlreadyRunningAssessmentException": { + "base": "

    You started an assessment run, but one of the instances is already participating in another assessment run.

    ", + "refs": { + } + }, + "AmiId": { + "base": null, + "refs": { + "AssetAttributes$amiId": "

    The ID of the Amazon Machine Image (AMI) that is installed on the EC2 instance where the finding is generated.

    " + } + }, + "Arn": { + "base": null, + "refs": { + "AddRemoveAttributesFindingArnList$member": null, + "AgentAlreadyRunningAssessment$assessmentRunArn": "

    The ARN of the assessment run that has already been started.

    ", + "AssessmentRulesPackageArnList$member": null, + "AssessmentRun$arn": "

    The ARN of the assessment run.

    ", + "AssessmentRun$assessmentTemplateArn": "

    The ARN of the assessment template that is associated with the assessment run.

    ", + "AssessmentRunAgent$assessmentRunArn": "

    The ARN of the assessment run that is associated with the agent.

    ", + "AssessmentRunInProgressArnList$member": null, + "AssessmentRunNotification$snsTopicArn": "

    The SNS topic to which the SNS notification is sent.

    ", + "AssessmentTarget$arn": "

    The ARN that specifies the Amazon Inspector assessment target.

    ", + "AssessmentTarget$resourceGroupArn": "

    The ARN that specifies the resource group that is associated with the assessment target.

    ", + "AssessmentTemplate$arn": "

    The ARN of the assessment template.

    ", + "AssessmentTemplate$assessmentTargetArn": "

    The ARN of the assessment target that corresponds to this assessment template.

    ", + "AssessmentTemplateRulesPackageArnList$member": null, + "BatchDescribeArnList$member": null, + "CreateAssessmentTargetRequest$resourceGroupArn": "

    The ARN that specifies the resource group that is used to create the assessment target.

    ", + "CreateAssessmentTargetResponse$assessmentTargetArn": "

    The ARN that specifies the assessment target that is created.

    ", + "CreateAssessmentTemplateRequest$assessmentTargetArn": "

    The ARN that specifies the assessment target for which you want to create the assessment template.

    ", + "CreateAssessmentTemplateResponse$assessmentTemplateArn": "

    The ARN that specifies the assessment template that is created.

    ", + "CreateResourceGroupResponse$resourceGroupArn": "

    The ARN that specifies the resource group that is created.

    ", + "DeleteAssessmentRunRequest$assessmentRunArn": "

    The ARN that specifies the assessment run that you want to delete.

    ", + "DeleteAssessmentTargetRequest$assessmentTargetArn": "

    The ARN that specifies the assessment target that you want to delete.

    ", + "DeleteAssessmentTemplateRequest$assessmentTemplateArn": "

    The ARN that specifies the assessment template that you want to delete.

    ", + "DescribeCrossAccountAccessRoleResponse$roleArn": "

    The ARN that specifies the IAM role that Amazon Inspector uses to access your AWS account.

    ", + "FailedItems$key": null, + "FilterRulesPackageArnList$member": null, + "Finding$arn": "

    The ARN that specifies the finding.

    ", + "GetTelemetryMetadataRequest$assessmentRunArn": "

    The ARN that specifies the assessment run that has the telemetry data that you want to obtain.

    ", + "InspectorServiceAttributes$assessmentRunArn": "

    The ARN of the assessment run during which the finding is generated.

    ", + "InspectorServiceAttributes$rulesPackageArn": "

    The ARN of the rules package that is used to generate the finding.

    ", + "ListAssessmentRunAgentsRequest$assessmentRunArn": "

    The ARN that specifies the assessment run whose agents you want to list.

    ", + "ListEventSubscriptionsRequest$resourceArn": "

    The ARN of the assessment template for which you want to list the existing event subscriptions.

    ", + "ListParentArnList$member": null, + "ListReturnedArnList$member": null, + "ListTagsForResourceRequest$resourceArn": "

    The ARN that specifies the assessment template whose tags you want to list.

    ", + "PreviewAgentsRequest$previewAgentsArn": "

    The ARN of the assessment target whose agents you want to preview.

    ", + "RegisterCrossAccountAccessRoleRequest$roleArn": "

    The ARN of the IAM role that Amazon Inspector uses to list your EC2 instances during the assessment run or when you call the PreviewAgents action.

    ", + "ResourceGroup$arn": "

    The ARN of the resource group.

    ", + "RulesPackage$arn": "

    The ARN of the rules package.

    ", + "SetTagsForResourceRequest$resourceArn": "

    The ARN of the assessment template that you want to set tags to.

    ", + "StartAssessmentRunRequest$assessmentTemplateArn": "

    The ARN of the assessment template of the assessment run that you want to start.

    ", + "StartAssessmentRunResponse$assessmentRunArn": "

    The ARN of the assessment run that has been started.

    ", + "StopAssessmentRunRequest$assessmentRunArn": "

    The ARN of the assessment run that you want to stop.

    ", + "SubscribeToEventRequest$resourceArn": "

    The ARN of the assessment template that is used during the event for which you want to receive SNS notifications.

    ", + "SubscribeToEventRequest$topicArn": "

    The ARN of the SNS topic to which the SNS notifications are sent.

    ", + "Subscription$resourceArn": "

    The ARN of the assessment template that is used during the event for which the SNS notification is sent.

    ", + "Subscription$topicArn": "

    The ARN of the Amazon Simple Notification Service (SNS) topic to which the SNS notifications are sent.

    ", + "UnsubscribeFromEventRequest$resourceArn": "

    The ARN of the assessment template that is used during the event for which you want to stop receiving SNS notifications.

    ", + "UnsubscribeFromEventRequest$topicArn": "

    The ARN of the SNS topic to which SNS notifications are sent.

    ", + "UpdateAssessmentTargetRequest$assessmentTargetArn": "

    The ARN of the assessment target that you want to update.

    ", + "UpdateAssessmentTargetRequest$resourceGroupArn": "

    The ARN of the resource group that is used to specify the new resource group to associate with the assessment target.

    " + } + }, + "AssessmentRulesPackageArnList": { + "base": null, + "refs": { + "AssessmentRun$rulesPackageArns": "

    The rules packages selected for the assessment run.

    " + } + }, + "AssessmentRun": { + "base": "

    A snapshot of an Amazon Inspector assessment run that contains the findings of the assessment run .

    Used as the response element in the DescribeAssessmentRuns action.

    ", + "refs": { + "AssessmentRunList$member": null + } + }, + "AssessmentRunAgent": { + "base": "

    Contains information about an Amazon Inspector agent. This data type is used as a response element in the ListAssessmentRunAgents action.

    ", + "refs": { + "AssessmentRunAgentList$member": null + } + }, + "AssessmentRunAgentList": { + "base": null, + "refs": { + "ListAssessmentRunAgentsResponse$assessmentRunAgents": "

    A list of ARNs that specifies the agents returned by the action.

    " + } + }, + "AssessmentRunDuration": { + "base": null, + "refs": { + "AssessmentRun$durationInSeconds": "

    The duration of the assessment run.

    ", + "AssessmentTemplate$durationInSeconds": "

    The duration in seconds specified for this assessment tempate. The default value is 3600 seconds (one hour). The maximum value is 86400 seconds (one day).

    ", + "CreateAssessmentTemplateRequest$durationInSeconds": "

    The duration of the assessment run in seconds. The default value is 3600 seconds (one hour).

    ", + "DurationRange$minSeconds": "

    The minimum value of the duration range. Must be greater than zero.

    ", + "DurationRange$maxSeconds": "

    The maximum value of the duration range. Must be less than or equal to 604800 seconds (1 week).

    " + } + }, + "AssessmentRunFilter": { + "base": "

    Used as the request parameter in the ListAssessmentRuns action.

    ", + "refs": { + "ListAssessmentRunsRequest$filter": "

    You can use this parameter to specify a subset of data to be included in the action's response.

    For a record to match a filter, all specified filter attributes must match. When multiple values are specified for a filter attribute, any of the values can match.

    " + } + }, + "AssessmentRunInProgressArnList": { + "base": null, + "refs": { + "AssessmentRunInProgressException$assessmentRunArns": "

    The ARNs of the assessment runs that are currently in progress.

    " + } + }, + "AssessmentRunInProgressException": { + "base": "

    You cannot perform a specified action if an assessment run is currently in progress.

    ", + "refs": { + } + }, + "AssessmentRunList": { + "base": null, + "refs": { + "DescribeAssessmentRunsResponse$assessmentRuns": "

    Information about the assessment run.

    " + } + }, + "AssessmentRunName": { + "base": null, + "refs": { + "AssessmentRun$name": "

    The auto-generated name for the assessment run.

    ", + "StartAssessmentRunRequest$assessmentRunName": "

    You can specify the name for the assessment run, or you can use the auto-generated name that is based on the assessment template name. The name must be unique for the assessment template.

    " + } + }, + "AssessmentRunNotification": { + "base": "

    Used as one of the elements of the AssessmentRun data type.

    ", + "refs": { + "AssessmentRunNotificationList$member": null + } + }, + "AssessmentRunNotificationList": { + "base": null, + "refs": { + "AssessmentRun$notifications": "

    A list of notifications for the event subscriptions. A notification about a particular generated finding is added to this list only once.

    " + } + }, + "AssessmentRunNotificationSnsStatusCode": { + "base": null, + "refs": { + "AssessmentRunNotification$snsPublishStatusCode": "

    The status code of the SNS notification.

    " + } + }, + "AssessmentRunState": { + "base": null, + "refs": { + "AssessmentRun$state": "

    The state of the assessment run.

    ", + "AssessmentRunStateChange$state": "

    The assessment run state.

    ", + "AssessmentRunStateList$member": null + } + }, + "AssessmentRunStateChange": { + "base": "

    Used as one of the elements of the AssessmentRun data type.

    ", + "refs": { + "AssessmentRunStateChangeList$member": null + } + }, + "AssessmentRunStateChangeList": { + "base": null, + "refs": { + "AssessmentRun$stateChanges": "

    A list of the assessment run state changes.

    " + } + }, + "AssessmentRunStateList": { + "base": null, + "refs": { + "AssessmentRunFilter$states": "

    For a record to match a filter, one of the values specified for this data type property must be the exact match of the value of the assessmentRunState property of the AssessmentRun data type.

    " + } + }, + "AssessmentTarget": { + "base": "

    Contains information about an Amazon Inspector application. This data type is used as the response element in the DescribeAssessmentTargets action.

    ", + "refs": { + "AssessmentTargetList$member": null + } + }, + "AssessmentTargetFilter": { + "base": "

    Used as the request parameter in the ListAssessmentTargets action.

    ", + "refs": { + "ListAssessmentTargetsRequest$filter": "

    You can use this parameter to specify a subset of data to be included in the action's response.

    For a record to match a filter, all specified filter attributes must match. When multiple values are specified for a filter attribute, any of the values can match.

    " + } + }, + "AssessmentTargetList": { + "base": null, + "refs": { + "DescribeAssessmentTargetsResponse$assessmentTargets": "

    Information about the assessment targets.

    " + } + }, + "AssessmentTargetName": { + "base": null, + "refs": { + "AssessmentTarget$name": "

    The name of the Amazon Inspector assessment target.

    ", + "CreateAssessmentTargetRequest$assessmentTargetName": "

    The user-defined name that identifies the assessment target that you want to create. The name must be unique within the AWS account.

    ", + "UpdateAssessmentTargetRequest$assessmentTargetName": "

    The name of the assessment target that you want to update.

    " + } + }, + "AssessmentTemplate": { + "base": "

    Contains information about an Amazon Inspector assessment template. This data type is used as the response element in the DescribeAssessmentTemplates action.

    ", + "refs": { + "AssessmentTemplateList$member": null + } + }, + "AssessmentTemplateFilter": { + "base": "

    Used as the request parameter in the ListAssessmentTemplates action.

    ", + "refs": { + "ListAssessmentTemplatesRequest$filter": "

    You can use this parameter to specify a subset of data to be included in the action's response.

    For a record to match a filter, all specified filter attributes must match. When multiple values are specified for a filter attribute, any of the values can match.

    " + } + }, + "AssessmentTemplateList": { + "base": null, + "refs": { + "DescribeAssessmentTemplatesResponse$assessmentTemplates": "

    Information about the assessment templates.

    " + } + }, + "AssessmentTemplateName": { + "base": null, + "refs": { + "AssessmentTemplate$name": "

    The name of the assessment template.

    ", + "CreateAssessmentTemplateRequest$assessmentTemplateName": "

    The user-defined name that identifies the assessment template that you want to create. You can create several assessment templates for an assessment target. The names of the assessment templates that correspond to a particular assessment target must be unique.

    " + } + }, + "AssessmentTemplateRulesPackageArnList": { + "base": null, + "refs": { + "AssessmentTemplate$rulesPackageArns": "

    The rules packages that are specified for this assessment template.

    ", + "CreateAssessmentTemplateRequest$rulesPackageArns": "

    The ARNs that specify the rules packages that you want to attach to the assessment template.

    " + } + }, + "AssetAttributes": { + "base": "

    A collection of attributes of the host from which the finding is generated.

    ", + "refs": { + "Finding$assetAttributes": "

    A collection of attributes of the host from which the finding is generated.

    " + } + }, + "AssetType": { + "base": null, + "refs": { + "Finding$assetType": "

    The type of the host from which the finding is generated.

    " + } + }, + "Attribute": { + "base": "

    This data type is used as a request parameter in the AddAttributesToFindings and CreateAssessmentTemplate actions.

    ", + "refs": { + "AttributeList$member": null, + "UserAttributeList$member": null + } + }, + "AttributeKey": { + "base": null, + "refs": { + "Attribute$key": "

    The attribute key.

    ", + "UserAttributeKeyList$member": null + } + }, + "AttributeList": { + "base": null, + "refs": { + "Finding$attributes": "

    The system-defined attributes for the finding.

    ", + "FindingFilter$attributes": "

    For a record to match a filter, the list of values that are specified for this data type property must be contained in the list of values of the attributes property of the Finding data type.

    ", + "FindingFilter$userAttributes": "

    For a record to match a filter, the value that is specified for this data type property must be contained in the list of values of the userAttributes property of the Finding data type.

    " + } + }, + "AttributeValue": { + "base": null, + "refs": { + "Attribute$value": "

    The value assigned to the attribute key.

    " + } + }, + "AutoScalingGroup": { + "base": null, + "refs": { + "AgentPreview$autoScalingGroup": "

    The Auto Scaling group for the EC2 instance where the agent is installed.

    ", + "AssessmentRunAgent$autoScalingGroup": "

    The Auto Scaling group of the EC2 instance that is specified by the agent ID.

    ", + "AssetAttributes$autoScalingGroup": "

    The Auto Scaling group of the EC2 instance where the finding is generated.

    ", + "AutoScalingGroupList$member": null + } + }, + "AutoScalingGroupList": { + "base": null, + "refs": { + "FindingFilter$autoScalingGroups": "

    For a record to match a filter, one of the values that is specified for this data type property must be the exact match of the value of the autoScalingGroup property of the Finding data type.

    " + } + }, + "BatchDescribeArnList": { + "base": null, + "refs": { + "DescribeAssessmentRunsRequest$assessmentRunArns": "

    The ARN that specifies the assessment run that you want to describe.

    ", + "DescribeAssessmentTargetsRequest$assessmentTargetArns": "

    The ARNs that specifies the assessment targets that you want to describe.

    ", + "DescribeAssessmentTemplatesRequest$assessmentTemplateArns": "

    The ARN that specifiesthe assessment templates that you want to describe.

    ", + "DescribeFindingsRequest$findingArns": "

    The ARN that specifies the finding that you want to describe.

    ", + "DescribeResourceGroupsRequest$resourceGroupArns": "

    The ARN that specifies the resource group that you want to describe.

    ", + "DescribeRulesPackagesRequest$rulesPackageArns": "

    The ARN that specifies the rules package that you want to describe.

    " + } + }, + "Bool": { + "base": null, + "refs": { + "AccessDeniedException$canRetry": "

    You can immediately retry your request.

    ", + "AgentsAlreadyRunningAssessmentException$agentsTruncated": "

    ", + "AgentsAlreadyRunningAssessmentException$canRetry": "

    You can immediately retry your request.

    ", + "AssessmentRun$dataCollected": "

    A Boolean value (true or false) that specifies whether the process of collecting data from the agents is completed.

    ", + "AssessmentRunInProgressException$assessmentRunArnsTruncated": "

    Boolean value that indicates whether the ARN list of the assessment runs is truncated.

    ", + "AssessmentRunInProgressException$canRetry": "

    You can immediately retry your request.

    ", + "AssessmentRunNotification$error": "

    The Boolean value that specifies whether the notification represents an error.

    ", + "DescribeCrossAccountAccessRoleResponse$valid": "

    A Boolean value that specifies whether the IAM role has the necessary policies attached to enable Amazon Inspector to access your AWS account.

    ", + "FailedItemDetails$retryable": "

    Indicates whether you can immediately retry a request for this item for a specified resource.

    ", + "Finding$indicatorOfCompromise": "

    This data element is currently not used.

    ", + "InternalException$canRetry": "

    You can immediately retry your request.

    ", + "InvalidCrossAccountRoleException$canRetry": "

    You can immediately retry your request.

    ", + "InvalidInputException$canRetry": "

    You can immediately retry your request.

    ", + "LimitExceededException$canRetry": "

    You can immediately retry your request.

    ", + "NoSuchEntityException$canRetry": "

    You can immediately retry your request.

    " + } + }, + "CreateAssessmentTargetRequest": { + "base": null, + "refs": { + } + }, + "CreateAssessmentTargetResponse": { + "base": null, + "refs": { + } + }, + "CreateAssessmentTemplateRequest": { + "base": null, + "refs": { + } + }, + "CreateAssessmentTemplateResponse": { + "base": null, + "refs": { + } + }, + "CreateResourceGroupRequest": { + "base": null, + "refs": { + } + }, + "CreateResourceGroupResponse": { + "base": null, + "refs": { + } + }, + "DeleteAssessmentRunRequest": { + "base": null, + "refs": { + } + }, + "DeleteAssessmentTargetRequest": { + "base": null, + "refs": { + } + }, + "DeleteAssessmentTemplateRequest": { + "base": null, + "refs": { + } + }, + "DescribeAssessmentRunsRequest": { + "base": null, + "refs": { + } + }, + "DescribeAssessmentRunsResponse": { + "base": null, + "refs": { + } + }, + "DescribeAssessmentTargetsRequest": { + "base": null, + "refs": { + } + }, + "DescribeAssessmentTargetsResponse": { + "base": null, + "refs": { + } + }, + "DescribeAssessmentTemplatesRequest": { + "base": null, + "refs": { + } + }, + "DescribeAssessmentTemplatesResponse": { + "base": null, + "refs": { + } + }, + "DescribeCrossAccountAccessRoleResponse": { + "base": null, + "refs": { + } + }, + "DescribeFindingsRequest": { + "base": null, + "refs": { + } + }, + "DescribeFindingsResponse": { + "base": null, + "refs": { + } + }, + "DescribeResourceGroupsRequest": { + "base": null, + "refs": { + } + }, + "DescribeResourceGroupsResponse": { + "base": null, + "refs": { + } + }, + "DescribeRulesPackagesRequest": { + "base": null, + "refs": { + } + }, + "DescribeRulesPackagesResponse": { + "base": null, + "refs": { + } + }, + "DurationRange": { + "base": "

    This data type is used in the AssessmentTemplateFilter data type.

    ", + "refs": { + "AssessmentRunFilter$durationRange": "

    For a record to match a filter, the value that is specified for this data type property must inclusively match any value between the specified minimum and maximum values of the durationInSeconds property of the AssessmentRun data type.

    ", + "AssessmentTemplateFilter$durationRange": "

    For a record to match a filter, the value specified for this data type property must inclusively match any value between the specified minimum and maximum values of the durationInSeconds property of the AssessmentTemplate data type.

    " + } + }, + "ErrorMessage": { + "base": null, + "refs": { + "AccessDeniedException$message": "

    Details of the exception error.

    ", + "AgentsAlreadyRunningAssessmentException$message": "

    Details of the exception error.

    ", + "AssessmentRunInProgressException$message": "

    Details of the exception error.

    ", + "InternalException$message": "

    Details of the exception error.

    ", + "InvalidCrossAccountRoleException$message": "

    Details of the exception error.

    ", + "InvalidInputException$message": "

    Details of the exception error.

    ", + "LimitExceededException$message": "

    Details of the exception error.

    ", + "NoSuchEntityException$message": "

    Details of the exception error.

    " + } + }, + "EventSubscription": { + "base": "

    This data type is used in the Subscription data type.

    ", + "refs": { + "EventSubscriptionList$member": null + } + }, + "EventSubscriptionList": { + "base": null, + "refs": { + "Subscription$eventSubscriptions": "

    The list of existing event subscriptions.

    " + } + }, + "FailedItemDetails": { + "base": "

    Includes details about the failed items.

    ", + "refs": { + "FailedItems$value": null + } + }, + "FailedItemErrorCode": { + "base": null, + "refs": { + "FailedItemDetails$failureCode": "

    The status code of a failed item.

    " + } + }, + "FailedItems": { + "base": null, + "refs": { + "AddAttributesToFindingsResponse$failedItems": "

    Attribute details that cannot be described. An error code is provided for each failed item.

    ", + "DescribeAssessmentRunsResponse$failedItems": "

    Assessment run details that cannot be described. An error code is provided for each failed item.

    ", + "DescribeAssessmentTargetsResponse$failedItems": "

    Assessment target details that cannot be described. An error code is provided for each failed item.

    ", + "DescribeAssessmentTemplatesResponse$failedItems": "

    Assessment template details that cannot be described. An error code is provided for each failed item.

    ", + "DescribeFindingsResponse$failedItems": "

    Finding details that cannot be described. An error code is provided for each failed item.

    ", + "DescribeResourceGroupsResponse$failedItems": "

    Resource group details that cannot be described. An error code is provided for each failed item.

    ", + "DescribeRulesPackagesResponse$failedItems": "

    Rules package details that cannot be described. An error code is provided for each failed item.

    ", + "RemoveAttributesFromFindingsResponse$failedItems": "

    Attributes details that cannot be described. An error code is provided for each failed item.

    " + } + }, + "FilterRulesPackageArnList": { + "base": null, + "refs": { + "AssessmentRunFilter$rulesPackageArns": "

    For a record to match a filter, the value that is specified for this data type property must be contained in the list of values of the rulesPackages property of the AssessmentRun data type.

    ", + "AssessmentTemplateFilter$rulesPackageArns": "

    For a record to match a filter, the values that are specified for this data type property must be contained in the list of values of the rulesPackageArns property of the AssessmentTemplate data type.

    ", + "FindingFilter$rulesPackageArns": "

    For a record to match a filter, one of the values that is specified for this data type property must be the exact match of the value of the rulesPackageArn property of the Finding data type.

    " + } + }, + "Finding": { + "base": "

    Contains information about an Amazon Inspector finding. This data type is used as the response element in the DescribeFindings action.

    ", + "refs": { + "FindingList$member": null + } + }, + "FindingFilter": { + "base": "

    This data type is used as a request parameter in the ListFindings action.

    ", + "refs": { + "ListFindingsRequest$filter": "

    You can use this parameter to specify a subset of data to be included in the action's response.

    For a record to match a filter, all specified filter attributes must match. When multiple values are specified for a filter attribute, any of the values can match.

    " + } + }, + "FindingId": { + "base": null, + "refs": { + "Finding$id": "

    The ID of the finding.

    " + } + }, + "FindingList": { + "base": null, + "refs": { + "DescribeFindingsResponse$findings": "

    Information about the finding.

    " + } + }, + "GetTelemetryMetadataRequest": { + "base": null, + "refs": { + } + }, + "GetTelemetryMetadataResponse": { + "base": null, + "refs": { + } + }, + "Hostname": { + "base": null, + "refs": { + "AssetAttributes$hostname": "

    The hostname of the EC2 instance where the finding is generated.

    " + } + }, + "InspectorEvent": { + "base": null, + "refs": { + "AssessmentRunNotification$event": "

    The event for which a notification is sent.

    ", + "EventSubscription$event": "

    The event for which Amazon Simple Notification Service (SNS) notifications are sent.

    ", + "SubscribeToEventRequest$event": "

    The event for which you want to receive SNS notifications.

    ", + "UnsubscribeFromEventRequest$event": "

    The event for which you want to stop receiving SNS notifications.

    " + } + }, + "InspectorServiceAttributes": { + "base": "

    This data type is used in the Finding data type.

    ", + "refs": { + "Finding$serviceAttributes": null + } + }, + "InternalException": { + "base": "

    Internal server error.

    ", + "refs": { + } + }, + "InvalidCrossAccountRoleErrorCode": { + "base": null, + "refs": { + "InvalidCrossAccountRoleException$errorCode": "

    Code that indicates the type of error that is generated.

    " + } + }, + "InvalidCrossAccountRoleException": { + "base": "

    Amazon Inspector cannot assume the cross-account role that it needs to list your EC2 instances during the assessment run.

    ", + "refs": { + } + }, + "InvalidInputErrorCode": { + "base": null, + "refs": { + "InvalidInputException$errorCode": "

    Code that indicates the type of error that is generated.

    " + } + }, + "InvalidInputException": { + "base": "

    The request was rejected because an invalid or out-of-range value was supplied for an input parameter.

    ", + "refs": { + } + }, + "IocConfidence": { + "base": null, + "refs": { + "Finding$confidence": "

    This data element is currently not used.

    " + } + }, + "Ipv4Address": { + "base": null, + "refs": { + "Ipv4AddressList$member": null + } + }, + "Ipv4AddressList": { + "base": null, + "refs": { + "AssetAttributes$ipv4Addresses": "

    The list of IP v4 addresses of the EC2 instance where the finding is generated.

    " + } + }, + "LimitExceededErrorCode": { + "base": null, + "refs": { + "LimitExceededException$errorCode": "

    Code that indicates the type of error that is generated.

    " + } + }, + "LimitExceededException": { + "base": "

    The request was rejected because it attempted to create resources beyond the current AWS account limits. The error code describes the limit exceeded.

    ", + "refs": { + } + }, + "ListAssessmentRunAgentsRequest": { + "base": null, + "refs": { + } + }, + "ListAssessmentRunAgentsResponse": { + "base": null, + "refs": { + } + }, + "ListAssessmentRunsRequest": { + "base": null, + "refs": { + } + }, + "ListAssessmentRunsResponse": { + "base": null, + "refs": { + } + }, + "ListAssessmentTargetsRequest": { + "base": null, + "refs": { + } + }, + "ListAssessmentTargetsResponse": { + "base": null, + "refs": { + } + }, + "ListAssessmentTemplatesRequest": { + "base": null, + "refs": { + } + }, + "ListAssessmentTemplatesResponse": { + "base": null, + "refs": { + } + }, + "ListEventSubscriptionsMaxResults": { + "base": null, + "refs": { + "ListEventSubscriptionsRequest$maxResults": "

    You can use this parameter to indicate the maximum number of items you want in the response. The default value is 10. The maximum value is 500.

    " + } + }, + "ListEventSubscriptionsRequest": { + "base": null, + "refs": { + } + }, + "ListEventSubscriptionsResponse": { + "base": null, + "refs": { + } + }, + "ListFindingsRequest": { + "base": null, + "refs": { + } + }, + "ListFindingsResponse": { + "base": null, + "refs": { + } + }, + "ListMaxResults": { + "base": null, + "refs": { + "ListAssessmentRunAgentsRequest$maxResults": "

    You can use this parameter to indicate the maximum number of items that you want in the response. The default value is 10. The maximum value is 500.

    ", + "ListAssessmentRunsRequest$maxResults": "

    You can use this parameter to indicate the maximum number of items that you want in the response. The default value is 10. The maximum value is 500.

    ", + "ListAssessmentTargetsRequest$maxResults": "

    You can use this parameter to indicate the maximum number of items you want in the response. The default value is 10. The maximum value is 500.

    ", + "ListAssessmentTemplatesRequest$maxResults": "

    You can use this parameter to indicate the maximum number of items you want in the response. The default value is 10. The maximum value is 500.

    ", + "ListFindingsRequest$maxResults": "

    You can use this parameter to indicate the maximum number of items you want in the response. The default value is 10. The maximum value is 500.

    ", + "ListRulesPackagesRequest$maxResults": "

    You can use this parameter to indicate the maximum number of items you want in the response. The default value is 10. The maximum value is 500.

    " + } + }, + "ListParentArnList": { + "base": null, + "refs": { + "ListAssessmentRunsRequest$assessmentTemplateArns": "

    The ARNs that specify the assessment templates whose assessment runs you want to list.

    ", + "ListAssessmentTemplatesRequest$assessmentTargetArns": "

    A list of ARNs that specifies the assessment targets whose assessment templates you want to list.

    ", + "ListFindingsRequest$assessmentRunArns": "

    The ARNs of the assessment runs that generate the findings that you want to list.

    " + } + }, + "ListReturnedArnList": { + "base": null, + "refs": { + "ListAssessmentRunsResponse$assessmentRunArns": "

    A list of ARNs that specifies the assessment runs that are returned by the action.

    ", + "ListAssessmentTargetsResponse$assessmentTargetArns": "

    A list of ARNs that specifies the assessment targets that are returned by the action.

    ", + "ListAssessmentTemplatesResponse$assessmentTemplateArns": "

    A list of ARNs that specifies the assessment templates returned by the action.

    ", + "ListFindingsResponse$findingArns": "

    A list of ARNs that specifies the findings returned by the action.

    ", + "ListRulesPackagesResponse$rulesPackageArns": "

    The list of ARNs that specifies the rules packages returned by the action.

    " + } + }, + "ListRulesPackagesRequest": { + "base": null, + "refs": { + } + }, + "ListRulesPackagesResponse": { + "base": null, + "refs": { + } + }, + "ListTagsForResourceRequest": { + "base": null, + "refs": { + } + }, + "ListTagsForResourceResponse": { + "base": null, + "refs": { + } + }, + "Locale": { + "base": null, + "refs": { + "DescribeFindingsRequest$locale": "

    The locale into which you want to translate a finding description, recommendation, and the short description that identifies the finding.

    ", + "DescribeRulesPackagesRequest$locale": "

    The locale that you want to translate a rules package description into.

    " + } + }, + "Long": { + "base": null, + "refs": { + "TelemetryMetadata$count": "

    The count of messages that the agent sends to the Amazon Inspector service.

    ", + "TelemetryMetadata$dataSize": "

    The data size of messages that the agent sends to the Amazon Inspector service.

    " + } + }, + "Message": { + "base": null, + "refs": { + "AssessmentRunAgent$agentHealthDetails": "

    The description for the agent health code.

    ", + "AssessmentRunNotification$message": null + } + }, + "MessageType": { + "base": null, + "refs": { + "TelemetryMetadata$messageType": "

    A specific type of behavioral data that is collected by the agent.

    " + } + }, + "NamePattern": { + "base": null, + "refs": { + "AssessmentRunFilter$namePattern": "

    For a record to match a filter, an explicit value or a string containing a wildcard that is specified for this data type property must match the value of the assessmentRunName property of the AssessmentRun data type.

    ", + "AssessmentTargetFilter$assessmentTargetNamePattern": "

    For a record to match a filter, an explicit value or a string that contains a wildcard that is specified for this data type property must match the value of the assessmentTargetName property of the AssessmentTarget data type.

    ", + "AssessmentTemplateFilter$namePattern": "

    For a record to match a filter, an explicit value or a string that contains a wildcard that is specified for this data type property must match the value of the assessmentTemplateName property of the AssessmentTemplate data type.

    " + } + }, + "NoSuchEntityErrorCode": { + "base": null, + "refs": { + "NoSuchEntityException$errorCode": "

    Code that indicates the type of error that is generated.

    " + } + }, + "NoSuchEntityException": { + "base": "

    The request was rejected because it referenced an entity that does not exist. The error code describes the entity.

    ", + "refs": { + } + }, + "NumericSeverity": { + "base": null, + "refs": { + "Finding$numericSeverity": "

    The numeric value of the finding severity.

    " + } + }, + "NumericVersion": { + "base": null, + "refs": { + "AssetAttributes$schemaVersion": "

    The schema version of this data type.

    ", + "Finding$schemaVersion": "

    The schema version of this data type.

    ", + "InspectorServiceAttributes$schemaVersion": "

    The schema version of this data type.

    " + } + }, + "PaginationToken": { + "base": null, + "refs": { + "ListAssessmentRunAgentsRequest$nextToken": "

    You can use this parameter when paginating results. Set the value of this parameter to null on your first call to the ListAssessmentRunAgents action. Subsequent calls to the action fill nextToken in the request with the value of NextToken from the previous response to continue listing data.

    ", + "ListAssessmentRunAgentsResponse$nextToken": "

    When a response is generated, if there is more data to be listed, this parameter is present in the response and contains the value to use for the nextToken parameter in a subsequent pagination request. If there is no more data to be listed, this parameter is set to null.

    ", + "ListAssessmentRunsRequest$nextToken": "

    You can use this parameter when paginating results. Set the value of this parameter to null on your first call to the ListAssessmentRuns action. Subsequent calls to the action fill nextToken in the request with the value of NextToken from the previous response to continue listing data.

    ", + "ListAssessmentRunsResponse$nextToken": "

    When a response is generated, if there is more data to be listed, this parameter is present in the response and contains the value to use for the nextToken parameter in a subsequent pagination request. If there is no more data to be listed, this parameter is set to null.

    ", + "ListAssessmentTargetsRequest$nextToken": "

    You can use this parameter when paginating results. Set the value of this parameter to null on your first call to the ListAssessmentTargets action. Subsequent calls to the action fill nextToken in the request with the value of NextToken from the previous response to continue listing data.

    ", + "ListAssessmentTargetsResponse$nextToken": "

    When a response is generated, if there is more data to be listed, this parameter is present in the response and contains the value to use for the nextToken parameter in a subsequent pagination request. If there is no more data to be listed, this parameter is set to null.

    ", + "ListAssessmentTemplatesRequest$nextToken": "

    You can use this parameter when paginating results. Set the value of this parameter to null on your first call to the ListAssessmentTemplates action. Subsequent calls to the action fill nextToken in the request with the value of NextToken from the previous response to continue listing data.

    ", + "ListAssessmentTemplatesResponse$nextToken": "

    When a response is generated, if there is more data to be listed, this parameter is present in the response and contains the value to use for the nextToken parameter in a subsequent pagination request. If there is no more data to be listed, this parameter is set to null.

    ", + "ListEventSubscriptionsRequest$nextToken": "

    You can use this parameter when paginating results. Set the value of this parameter to null on your first call to the ListEventSubscriptions action. Subsequent calls to the action fill nextToken in the request with the value of NextToken from the previous response to continue listing data.

    ", + "ListEventSubscriptionsResponse$nextToken": "

    When a response is generated, if there is more data to be listed, this parameter is present in the response and contains the value to use for the nextToken parameter in a subsequent pagination request. If there is no more data to be listed, this parameter is set to null.

    ", + "ListFindingsRequest$nextToken": "

    You can use this parameter when paginating results. Set the value of this parameter to null on your first call to the ListFindings action. Subsequent calls to the action fill nextToken in the request with the value of NextToken from the previous response to continue listing data.

    ", + "ListFindingsResponse$nextToken": "

    When a response is generated, if there is more data to be listed, this parameter is present in the response and contains the value to use for the nextToken parameter in a subsequent pagination request. If there is no more data to be listed, this parameter is set to null.

    ", + "ListRulesPackagesRequest$nextToken": "

    You can use this parameter when paginating results. Set the value of this parameter to null on your first call to the ListRulesPackages action. Subsequent calls to the action fill nextToken in the request with the value of NextToken from the previous response to continue listing data.

    ", + "ListRulesPackagesResponse$nextToken": "

    When a response is generated, if there is more data to be listed, this parameter is present in the response and contains the value to use for the nextToken parameter in a subsequent pagination request. If there is no more data to be listed, this parameter is set to null.

    ", + "PreviewAgentsRequest$nextToken": "

    You can use this parameter when paginating results. Set the value of this parameter to null on your first call to the PreviewAgents action. Subsequent calls to the action fill nextToken in the request with the value of NextToken from the previous response to continue listing data.

    ", + "PreviewAgentsResponse$nextToken": "

    When a response is generated, if there is more data to be listed, this parameter is present in the response and contains the value to use for the nextToken parameter in a subsequent pagination request. If there is no more data to be listed, this parameter is set to null.

    " + } + }, + "PreviewAgentsMaxResults": { + "base": null, + "refs": { + "PreviewAgentsRequest$maxResults": "

    You can use this parameter to indicate the maximum number of items you want in the response. The default value is 10. The maximum value is 500.

    " + } + }, + "PreviewAgentsRequest": { + "base": null, + "refs": { + } + }, + "PreviewAgentsResponse": { + "base": null, + "refs": { + } + }, + "ProviderName": { + "base": null, + "refs": { + "RulesPackage$provider": "

    The provider of the rules package.

    " + } + }, + "RegisterCrossAccountAccessRoleRequest": { + "base": null, + "refs": { + } + }, + "RemoveAttributesFromFindingsRequest": { + "base": null, + "refs": { + } + }, + "RemoveAttributesFromFindingsResponse": { + "base": null, + "refs": { + } + }, + "ResourceGroup": { + "base": "

    Contains information about a resource group. The resource group defines a set of tags that, when queried, identify the AWS resources that make up the assessment target. This data type is used as the response element in the DescribeResourceGroups action.

    ", + "refs": { + "ResourceGroupList$member": null + } + }, + "ResourceGroupList": { + "base": null, + "refs": { + "DescribeResourceGroupsResponse$resourceGroups": "

    Information about a resource group.

    " + } + }, + "ResourceGroupTag": { + "base": "

    This data type is used as one of the elements of the ResourceGroup data type.

    ", + "refs": { + "ResourceGroupTags$member": null + } + }, + "ResourceGroupTags": { + "base": null, + "refs": { + "CreateResourceGroupRequest$resourceGroupTags": "

    A collection of keys and an array of possible values, '[{\"key\":\"key1\",\"values\":[\"Value1\",\"Value2\"]},{\"key\":\"Key2\",\"values\":[\"Value3\"]}]'.

    For example,'[{\"key\":\"Name\",\"values\":[\"TestEC2Instance\"]}]'.

    ", + "ResourceGroup$tags": "

    The tags (key and value pairs) of the resource group. This data type property is used in the CreateResourceGroup action.

    " + } + }, + "RuleName": { + "base": null, + "refs": { + "RuleNameList$member": null + } + }, + "RuleNameList": { + "base": null, + "refs": { + "FindingFilter$ruleNames": "

    For a record to match a filter, one of the values that is specified for this data type property must be the exact match of the value of the ruleName property of the Finding data type.

    " + } + }, + "RulesPackage": { + "base": "

    Contains information about an Amazon Inspector rules package. This data type is used as the response element in the DescribeRulesPackages action.

    ", + "refs": { + "RulesPackageList$member": null + } + }, + "RulesPackageList": { + "base": null, + "refs": { + "DescribeRulesPackagesResponse$rulesPackages": "

    Information about the rules package.

    " + } + }, + "RulesPackageName": { + "base": null, + "refs": { + "RulesPackage$name": "

    The name of the rules package.

    " + } + }, + "ServiceName": { + "base": null, + "refs": { + "Finding$service": "

    The data element is set to \"Inspector\".

    " + } + }, + "SetTagsForResourceRequest": { + "base": null, + "refs": { + } + }, + "Severity": { + "base": null, + "refs": { + "Finding$severity": "

    The finding severity. Values can be set to High, Medium, Low, and Informational.

    ", + "SeverityList$member": null + } + }, + "SeverityList": { + "base": null, + "refs": { + "FindingFilter$severities": "

    For a record to match a filter, one of the values that is specified for this data type property must be the exact match of the value of the severity property of the Finding data type.

    " + } + }, + "StartAssessmentRunRequest": { + "base": null, + "refs": { + } + }, + "StartAssessmentRunResponse": { + "base": null, + "refs": { + } + }, + "StopAssessmentRunRequest": { + "base": null, + "refs": { + } + }, + "SubscribeToEventRequest": { + "base": null, + "refs": { + } + }, + "Subscription": { + "base": "

    This data type is used as a response element in the ListEventSubscriptions action.

    ", + "refs": { + "SubscriptionList$member": null + } + }, + "SubscriptionList": { + "base": null, + "refs": { + "ListEventSubscriptionsResponse$subscriptions": "

    Details of the returned event subscriptions.

    " + } + }, + "Tag": { + "base": "

    A key and value pair. This data type is used as a request parameter in the SetTagsForResource action and a response element in the ListTagsForResource action.

    ", + "refs": { + "TagList$member": null + } + }, + "TagKey": { + "base": null, + "refs": { + "ResourceGroupTag$key": "

    A tag key.

    ", + "Tag$key": "

    A tag key.

    " + } + }, + "TagList": { + "base": null, + "refs": { + "ListTagsForResourceResponse$tags": "

    A collection of key and value pairs.

    ", + "SetTagsForResourceRequest$tags": "

    A collection of key and value pairs that you want to set to the assessment template.

    " + } + }, + "TagValue": { + "base": null, + "refs": { + "ResourceGroupTag$value": "

    The value assigned to a tag key.

    ", + "Tag$value": "

    A value assigned to a tag key.

    " + } + }, + "TelemetryMetadata": { + "base": "

    The metadata about the Amazon Inspector application data metrics collected by the agent. This data type is used as the response element in the GetTelemetryMetadata action.

    ", + "refs": { + "TelemetryMetadataList$member": null + } + }, + "TelemetryMetadataList": { + "base": null, + "refs": { + "AssessmentRunAgent$telemetryMetadata": "

    The Amazon Inspector application data metrics that are collected by the agent.

    ", + "GetTelemetryMetadataResponse$telemetryMetadata": "

    Telemetry details.

    " + } + }, + "Text": { + "base": null, + "refs": { + "Finding$title": "

    The name of the finding.

    ", + "Finding$description": "

    The description of the finding.

    ", + "Finding$recommendation": "

    The recommendation for the finding.

    ", + "RulesPackage$description": "

    The description of the rules package.

    " + } + }, + "Timestamp": { + "base": null, + "refs": { + "AssessmentRun$createdAt": "

    The time when StartAssessmentRun was called.

    ", + "AssessmentRun$startedAt": "

    The time when StartAssessmentRun was called.

    ", + "AssessmentRun$completedAt": "

    The assessment run completion time that corresponds to the rules packages evaluation completion time or failure.

    ", + "AssessmentRun$stateChangedAt": "

    The last time when the assessment run's state changed.

    ", + "AssessmentRunNotification$date": "

    The date of the notification.

    ", + "AssessmentRunStateChange$stateChangedAt": "

    The last time the assessment run state changed.

    ", + "AssessmentTarget$createdAt": "

    The time at which the assessment target is created.

    ", + "AssessmentTarget$updatedAt": "

    The time at which UpdateAssessmentTarget is called.

    ", + "AssessmentTemplate$createdAt": "

    The time at which the assessment template is created.

    ", + "DescribeCrossAccountAccessRoleResponse$registeredAt": "

    The date when the cross-account access role was registered.

    ", + "EventSubscription$subscribedAt": "

    The time at which SubscribeToEvent is called.

    ", + "Finding$createdAt": "

    The time when the finding was generated.

    ", + "Finding$updatedAt": "

    The time when AddAttributesToFindings is called.

    ", + "ResourceGroup$createdAt": "

    The time at which resource group is created.

    ", + "TimestampRange$beginDate": "

    The minimum value of the timestamp range.

    ", + "TimestampRange$endDate": "

    The maximum value of the timestamp range.

    " + } + }, + "TimestampRange": { + "base": "

    This data type is used in the AssessmentRunFilter data type.

    ", + "refs": { + "AssessmentRunFilter$startTimeRange": "

    For a record to match a filter, the value that is specified for this data type property must inclusively match any value between the specified minimum and maximum values of the startTime property of the AssessmentRun data type.

    ", + "AssessmentRunFilter$completionTimeRange": "

    For a record to match a filter, the value that is specified for this data type property must inclusively match any value between the specified minimum and maximum values of the completedAt property of the AssessmentRun data type.

    ", + "AssessmentRunFilter$stateChangeTimeRange": "

    For a record to match a filter, the value that is specified for this data type property must match the stateChangedAt property of the AssessmentRun data type.

    ", + "FindingFilter$creationTimeRange": "

    The time range during which the finding is generated.

    " + } + }, + "UnsubscribeFromEventRequest": { + "base": null, + "refs": { + } + }, + "UpdateAssessmentTargetRequest": { + "base": null, + "refs": { + } + }, + "UserAttributeKeyList": { + "base": null, + "refs": { + "RemoveAttributesFromFindingsRequest$attributeKeys": "

    The array of attribute keys that you want to remove from specified findings.

    " + } + }, + "UserAttributeList": { + "base": null, + "refs": { + "AddAttributesToFindingsRequest$attributes": "

    The array of attributes that you want to assign to specified findings.

    ", + "AssessmentRun$userAttributesForFindings": "

    The user-defined attributes that are assigned to every generated finding.

    ", + "AssessmentTemplate$userAttributesForFindings": "

    The user-defined attributes that are assigned to every generated finding from the assessment run that uses this assessment template.

    ", + "CreateAssessmentTemplateRequest$userAttributesForFindings": "

    The user-defined attributes that are assigned to every finding that is generated by the assessment run that uses this assessment template.

    ", + "Finding$userAttributes": "

    The user-defined attributes that are assigned to the finding.

    " + } + }, + "Version": { + "base": null, + "refs": { + "RulesPackage$version": "

    The version ID of the rules package.

    " + } + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/inspector/2016-02-16/examples-1.json b/vendor/github.com/aws/aws-sdk-go/models/apis/inspector/2016-02-16/examples-1.json new file mode 100644 index 000000000..0ea7e3b0b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/inspector/2016-02-16/examples-1.json @@ -0,0 +1,5 @@ +{ + "version": "1.0", + "examples": { + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/iot-data/2015-05-28/api-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/iot-data/2015-05-28/api-2.json new file mode 100644 index 000000000..9f6a9f4af --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/iot-data/2015-05-28/api-2.json @@ -0,0 +1,263 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2015-05-28", + "endpointPrefix":"data.iot", + "protocol":"rest-json", + "serviceFullName":"AWS IoT Data Plane", + "signatureVersion":"v4", + "signingName":"iotdata" + }, + "operations":{ + "DeleteThingShadow":{ + "name":"DeleteThingShadow", + "http":{ + "method":"DELETE", + "requestUri":"/things/{thingName}/shadow" + }, + "input":{"shape":"DeleteThingShadowRequest"}, + "output":{"shape":"DeleteThingShadowResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidRequestException"}, + {"shape":"ThrottlingException"}, + {"shape":"UnauthorizedException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"InternalFailureException"}, + {"shape":"MethodNotAllowedException"}, + {"shape":"UnsupportedDocumentEncodingException"} + ] + }, + "GetThingShadow":{ + "name":"GetThingShadow", + "http":{ + "method":"GET", + "requestUri":"/things/{thingName}/shadow" + }, + "input":{"shape":"GetThingShadowRequest"}, + "output":{"shape":"GetThingShadowResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"UnauthorizedException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"InternalFailureException"}, + {"shape":"MethodNotAllowedException"}, + {"shape":"UnsupportedDocumentEncodingException"} + ] + }, + "Publish":{ + "name":"Publish", + "http":{ + "method":"POST", + "requestUri":"/topics/{topic}" + }, + "input":{"shape":"PublishRequest"}, + "errors":[ + {"shape":"InternalFailureException"}, + {"shape":"InvalidRequestException"}, + {"shape":"UnauthorizedException"}, + {"shape":"MethodNotAllowedException"} + ] + }, + "UpdateThingShadow":{ + "name":"UpdateThingShadow", + "http":{ + "method":"POST", + "requestUri":"/things/{thingName}/shadow" + }, + "input":{"shape":"UpdateThingShadowRequest"}, + "output":{"shape":"UpdateThingShadowResponse"}, + "errors":[ + {"shape":"ConflictException"}, + {"shape":"RequestEntityTooLargeException"}, + {"shape":"InvalidRequestException"}, + {"shape":"ThrottlingException"}, + {"shape":"UnauthorizedException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"InternalFailureException"}, + {"shape":"MethodNotAllowedException"}, + {"shape":"UnsupportedDocumentEncodingException"} + ] + } + }, + "shapes":{ + "ConflictException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "error":{"httpStatusCode":409}, + "exception":true + }, + "DeleteThingShadowRequest":{ + "type":"structure", + "required":["thingName"], + "members":{ + "thingName":{ + "shape":"ThingName", + "location":"uri", + "locationName":"thingName" + } + } + }, + "DeleteThingShadowResponse":{ + "type":"structure", + "required":["payload"], + "members":{ + "payload":{"shape":"JsonDocument"} + }, + "payload":"payload" + }, + "ErrorMessage":{"type":"string"}, + "GetThingShadowRequest":{ + "type":"structure", + "required":["thingName"], + "members":{ + "thingName":{ + "shape":"ThingName", + "location":"uri", + "locationName":"thingName" + } + } + }, + "GetThingShadowResponse":{ + "type":"structure", + "members":{ + "payload":{"shape":"JsonDocument"} + }, + "payload":"payload" + }, + "InternalFailureException":{ + "type":"structure", + "members":{ + "message":{"shape":"errorMessage"} + }, + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + }, + "InvalidRequestException":{ + "type":"structure", + "members":{ + "message":{"shape":"errorMessage"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "JsonDocument":{"type":"blob"}, + "MethodNotAllowedException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "error":{"httpStatusCode":405}, + "exception":true + }, + "Payload":{"type":"blob"}, + "PublishRequest":{ + "type":"structure", + "required":["topic"], + "members":{ + "topic":{ + "shape":"Topic", + "location":"uri", + "locationName":"topic" + }, + "qos":{ + "shape":"Qos", + "location":"querystring", + "locationName":"qos" + }, + "payload":{"shape":"Payload"} + }, + "payload":"payload" + }, + "Qos":{ + "type":"integer", + "max":1, + "min":0 + }, + "RequestEntityTooLargeException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "error":{"httpStatusCode":413}, + "exception":true + }, + "ResourceNotFoundException":{ + "type":"structure", + "members":{ + "message":{"shape":"errorMessage"} + }, + "error":{"httpStatusCode":404}, + "exception":true + }, + "ServiceUnavailableException":{ + "type":"structure", + "members":{ + "message":{"shape":"errorMessage"} + }, + "error":{"httpStatusCode":503}, + "exception":true, + "fault":true + }, + "ThingName":{ + "type":"string", + "max":128, + "min":1, + "pattern":"[a-zA-Z0-9_-]+" + }, + "ThrottlingException":{ + "type":"structure", + "members":{ + "message":{"shape":"errorMessage"} + }, + "error":{"httpStatusCode":429}, + "exception":true + }, + "Topic":{"type":"string"}, + "UnauthorizedException":{ + "type":"structure", + "members":{ + "message":{"shape":"errorMessage"} + }, + "error":{"httpStatusCode":401}, + "exception":true + }, + "UnsupportedDocumentEncodingException":{ + "type":"structure", + "members":{ + "message":{"shape":"errorMessage"} + }, + "error":{"httpStatusCode":415}, + "exception":true + }, + "UpdateThingShadowRequest":{ + "type":"structure", + "required":[ + "thingName", + "payload" + ], + "members":{ + "thingName":{ + "shape":"ThingName", + "location":"uri", + "locationName":"thingName" + }, + "payload":{"shape":"JsonDocument"} + }, + "payload":"payload" + }, + "UpdateThingShadowResponse":{ + "type":"structure", + "members":{ + "payload":{"shape":"JsonDocument"} + }, + "payload":"payload" + }, + "errorMessage":{"type":"string"} + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/iot-data/2015-05-28/docs-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/iot-data/2015-05-28/docs-2.json new file mode 100644 index 000000000..09e16dbd7 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/iot-data/2015-05-28/docs-2.json @@ -0,0 +1,152 @@ +{ + "version": "2.0", + "service": "AWS IoT

    AWS IoT-Data enables secure, bi-directional communication between Internet-connected things (such as sensors, actuators, embedded devices, or smart appliances) and the AWS cloud. It implements a broker for applications and things to publish messages over HTTP (Publish) and retrieve, update, and delete thing shadows. A thing shadow is a persistent representation of your things and their state in the AWS cloud.

    ", + "operations": { + "DeleteThingShadow": "

    Deletes the thing shadow for the specified thing.

    For more information, see DeleteThingShadow in the AWS IoT Developer Guide.

    ", + "GetThingShadow": "

    Gets the thing shadow for the specified thing.

    For more information, see GetThingShadow in the AWS IoT Developer Guide.

    ", + "Publish": "

    Publishes state information.

    For more information, see HTTP Protocol in the AWS IoT Developer Guide.

    ", + "UpdateThingShadow": "

    Updates the thing shadow for the specified thing.

    For more information, see UpdateThingShadow in the AWS IoT Developer Guide.

    " + }, + "shapes": { + "ConflictException": { + "base": "

    The specified version does not match the version of the document.

    ", + "refs": { + } + }, + "DeleteThingShadowRequest": { + "base": "

    The input for the DeleteThingShadow operation.

    ", + "refs": { + } + }, + "DeleteThingShadowResponse": { + "base": "

    The output from the DeleteThingShadow operation.

    ", + "refs": { + } + }, + "ErrorMessage": { + "base": null, + "refs": { + "ConflictException$message": "

    The message for the exception.

    ", + "MethodNotAllowedException$message": "

    The message for the exception.

    ", + "RequestEntityTooLargeException$message": "

    The message for the exception.

    " + } + }, + "GetThingShadowRequest": { + "base": "

    The input for the GetThingShadow operation.

    ", + "refs": { + } + }, + "GetThingShadowResponse": { + "base": "

    The output from the GetThingShadow operation.

    ", + "refs": { + } + }, + "InternalFailureException": { + "base": "

    An unexpected error has occurred.

    ", + "refs": { + } + }, + "InvalidRequestException": { + "base": "

    The request is not valid.

    ", + "refs": { + } + }, + "JsonDocument": { + "base": null, + "refs": { + "DeleteThingShadowResponse$payload": "

    The state information, in JSON format.

    ", + "GetThingShadowResponse$payload": "

    The state information, in JSON format.

    ", + "UpdateThingShadowRequest$payload": "

    The state information, in JSON format.

    ", + "UpdateThingShadowResponse$payload": "

    The state information, in JSON format.

    " + } + }, + "MethodNotAllowedException": { + "base": "

    The specified combination of HTTP verb and URI is not supported.

    ", + "refs": { + } + }, + "Payload": { + "base": null, + "refs": { + "PublishRequest$payload": "

    The state information, in JSON format.

    " + } + }, + "PublishRequest": { + "base": "

    The input for the Publish operation.

    ", + "refs": { + } + }, + "Qos": { + "base": null, + "refs": { + "PublishRequest$qos": "

    The Quality of Service (QoS) level.

    " + } + }, + "RequestEntityTooLargeException": { + "base": "

    The payload exceeds the maximum size allowed.

    ", + "refs": { + } + }, + "ResourceNotFoundException": { + "base": "

    The specified resource does not exist.

    ", + "refs": { + } + }, + "ServiceUnavailableException": { + "base": "

    The service is temporarily unavailable.

    ", + "refs": { + } + }, + "ThingName": { + "base": null, + "refs": { + "DeleteThingShadowRequest$thingName": "

    The name of the thing.

    ", + "GetThingShadowRequest$thingName": "

    The name of the thing.

    ", + "UpdateThingShadowRequest$thingName": "

    The name of the thing.

    " + } + }, + "ThrottlingException": { + "base": "

    The rate exceeds the limit.

    ", + "refs": { + } + }, + "Topic": { + "base": null, + "refs": { + "PublishRequest$topic": "

    The name of the MQTT topic.

    " + } + }, + "UnauthorizedException": { + "base": "

    You are not authorized to perform this operation.

    ", + "refs": { + } + }, + "UnsupportedDocumentEncodingException": { + "base": "

    The document encoding is not supported.

    ", + "refs": { + } + }, + "UpdateThingShadowRequest": { + "base": "

    The input for the UpdateThingShadow operation.

    ", + "refs": { + } + }, + "UpdateThingShadowResponse": { + "base": "

    The output from the UpdateThingShadow operation.

    ", + "refs": { + } + }, + "errorMessage": { + "base": null, + "refs": { + "InternalFailureException$message": "

    The message for the exception.

    ", + "InvalidRequestException$message": "

    The message for the exception.

    ", + "ResourceNotFoundException$message": "

    The message for the exception.

    ", + "ServiceUnavailableException$message": "

    The message for the exception.

    ", + "ThrottlingException$message": "

    The message for the exception.

    ", + "UnauthorizedException$message": "

    The message for the exception.

    ", + "UnsupportedDocumentEncodingException$message": "

    The message for the exception.

    " + } + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/iot-data/2015-05-28/examples-1.json b/vendor/github.com/aws/aws-sdk-go/models/apis/iot-data/2015-05-28/examples-1.json new file mode 100644 index 000000000..0ea7e3b0b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/iot-data/2015-05-28/examples-1.json @@ -0,0 +1,5 @@ +{ + "version": "1.0", + "examples": { + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/iot/2015-05-28/api-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/iot/2015-05-28/api-2.json new file mode 100644 index 000000000..ad5172074 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/iot/2015-05-28/api-2.json @@ -0,0 +1,3800 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2015-05-28", + "endpointPrefix":"iot", + "serviceFullName":"AWS IoT", + "signatureVersion":"v4", + "signingName":"execute-api", + "protocol":"rest-json" + }, + "operations":{ + "AcceptCertificateTransfer":{ + "name":"AcceptCertificateTransfer", + "http":{ + "method":"PATCH", + "requestUri":"/accept-certificate-transfer/{certificateId}" + }, + "input":{"shape":"AcceptCertificateTransferRequest"}, + "errors":[ + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"TransferAlreadyCompletedException", + "error":{"httpStatusCode":410}, + "exception":true + }, + { + "shape":"InvalidRequestException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ThrottlingException", + "error":{"httpStatusCode":429}, + "exception":true + }, + { + "shape":"UnauthorizedException", + "error":{"httpStatusCode":401}, + "exception":true + }, + { + "shape":"ServiceUnavailableException", + "error":{"httpStatusCode":503}, + "exception":true, + "fault":true + }, + { + "shape":"InternalFailureException", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + } + ] + }, + "AttachPrincipalPolicy":{ + "name":"AttachPrincipalPolicy", + "http":{ + "method":"PUT", + "requestUri":"/principal-policies/{policyName}" + }, + "input":{"shape":"AttachPrincipalPolicyRequest"}, + "errors":[ + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"InvalidRequestException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ThrottlingException", + "error":{"httpStatusCode":429}, + "exception":true + }, + { + "shape":"UnauthorizedException", + "error":{"httpStatusCode":401}, + "exception":true + }, + { + "shape":"ServiceUnavailableException", + "error":{"httpStatusCode":503}, + "exception":true, + "fault":true + }, + { + "shape":"InternalFailureException", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + }, + { + "shape":"LimitExceededException", + "error":{"httpStatusCode":410}, + "exception":true + } + ] + }, + "AttachThingPrincipal":{ + "name":"AttachThingPrincipal", + "http":{ + "method":"PUT", + "requestUri":"/things/{thingName}/principals" + }, + "input":{"shape":"AttachThingPrincipalRequest"}, + "output":{"shape":"AttachThingPrincipalResponse"}, + "errors":[ + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"InvalidRequestException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ThrottlingException", + "error":{"httpStatusCode":429}, + "exception":true + }, + { + "shape":"UnauthorizedException", + "error":{"httpStatusCode":401}, + "exception":true + }, + { + "shape":"ServiceUnavailableException", + "error":{"httpStatusCode":503}, + "exception":true, + "fault":true + }, + { + "shape":"InternalFailureException", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + } + ] + }, + "CancelCertificateTransfer":{ + "name":"CancelCertificateTransfer", + "http":{ + "method":"PATCH", + "requestUri":"/cancel-certificate-transfer/{certificateId}" + }, + "input":{"shape":"CancelCertificateTransferRequest"}, + "errors":[ + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"TransferAlreadyCompletedException", + "error":{"httpStatusCode":410}, + "exception":true + }, + { + "shape":"InvalidRequestException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ThrottlingException", + "error":{"httpStatusCode":429}, + "exception":true + }, + { + "shape":"UnauthorizedException", + "error":{"httpStatusCode":401}, + "exception":true + }, + { + "shape":"ServiceUnavailableException", + "error":{"httpStatusCode":503}, + "exception":true, + "fault":true + }, + { + "shape":"InternalFailureException", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + } + ] + }, + "CreateCertificateFromCsr":{ + "name":"CreateCertificateFromCsr", + "http":{ + "method":"POST", + "requestUri":"/certificates" + }, + "input":{"shape":"CreateCertificateFromCsrRequest"}, + "output":{"shape":"CreateCertificateFromCsrResponse"}, + "errors":[ + { + "shape":"InvalidRequestException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ThrottlingException", + "error":{"httpStatusCode":429}, + "exception":true + }, + { + "shape":"UnauthorizedException", + "error":{"httpStatusCode":401}, + "exception":true + }, + { + "shape":"ServiceUnavailableException", + "error":{"httpStatusCode":503}, + "exception":true, + "fault":true + }, + { + "shape":"InternalFailureException", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + } + ] + }, + "CreateKeysAndCertificate":{ + "name":"CreateKeysAndCertificate", + "http":{ + "method":"POST", + "requestUri":"/keys-and-certificate" + }, + "input":{"shape":"CreateKeysAndCertificateRequest"}, + "output":{"shape":"CreateKeysAndCertificateResponse"}, + "errors":[ + { + "shape":"InvalidRequestException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ThrottlingException", + "error":{"httpStatusCode":429}, + "exception":true + }, + { + "shape":"UnauthorizedException", + "error":{"httpStatusCode":401}, + "exception":true + }, + { + "shape":"ServiceUnavailableException", + "error":{"httpStatusCode":503}, + "exception":true, + "fault":true + }, + { + "shape":"InternalFailureException", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + } + ] + }, + "CreatePolicy":{ + "name":"CreatePolicy", + "http":{ + "method":"POST", + "requestUri":"/policies/{policyName}" + }, + "input":{"shape":"CreatePolicyRequest"}, + "output":{"shape":"CreatePolicyResponse"}, + "errors":[ + { + "shape":"ResourceAlreadyExistsException", + "error":{"httpStatusCode":409}, + "exception":true + }, + { + "shape":"MalformedPolicyException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InvalidRequestException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ThrottlingException", + "error":{"httpStatusCode":429}, + "exception":true + }, + { + "shape":"UnauthorizedException", + "error":{"httpStatusCode":401}, + "exception":true + }, + { + "shape":"ServiceUnavailableException", + "error":{"httpStatusCode":503}, + "exception":true, + "fault":true + }, + { + "shape":"InternalFailureException", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + } + ] + }, + "CreatePolicyVersion":{ + "name":"CreatePolicyVersion", + "http":{ + "method":"POST", + "requestUri":"/policies/{policyName}/version" + }, + "input":{"shape":"CreatePolicyVersionRequest"}, + "output":{"shape":"CreatePolicyVersionResponse"}, + "errors":[ + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"MalformedPolicyException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"VersionsLimitExceededException", + "error":{"httpStatusCode":409}, + "exception":true + }, + { + "shape":"InvalidRequestException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ThrottlingException", + "error":{"httpStatusCode":429}, + "exception":true + }, + { + "shape":"UnauthorizedException", + "error":{"httpStatusCode":401}, + "exception":true + }, + { + "shape":"ServiceUnavailableException", + "error":{"httpStatusCode":503}, + "exception":true, + "fault":true + }, + { + "shape":"InternalFailureException", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + } + ] + }, + "CreateThing":{ + "name":"CreateThing", + "http":{ + "method":"POST", + "requestUri":"/things/{thingName}" + }, + "input":{"shape":"CreateThingRequest"}, + "output":{"shape":"CreateThingResponse"}, + "errors":[ + { + "shape":"InvalidRequestException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ThrottlingException", + "error":{"httpStatusCode":429}, + "exception":true + }, + { + "shape":"UnauthorizedException", + "error":{"httpStatusCode":401}, + "exception":true + }, + { + "shape":"ServiceUnavailableException", + "error":{"httpStatusCode":503}, + "exception":true, + "fault":true + }, + { + "shape":"InternalFailureException", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + }, + { + "shape":"ResourceAlreadyExistsException", + "error":{"httpStatusCode":409}, + "exception":true + } + ] + }, + "CreateTopicRule":{ + "name":"CreateTopicRule", + "http":{ + "method":"POST", + "requestUri":"/rules/{ruleName}" + }, + "input":{"shape":"CreateTopicRuleRequest"}, + "errors":[ + { + "shape":"SqlParseException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InternalException", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + }, + { + "shape":"InvalidRequestException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ResourceAlreadyExistsException", + "error":{"httpStatusCode":409}, + "exception":true + }, + { + "shape":"ServiceUnavailableException", + "error":{"httpStatusCode":503}, + "exception":true, + "fault":true + } + ] + }, + "DeleteCACertificate":{ + "name":"DeleteCACertificate", + "http":{ + "method":"DELETE", + "requestUri":"/cacertificate/{caCertificateId}" + }, + "input":{"shape":"DeleteCACertificateRequest"}, + "output":{"shape":"DeleteCACertificateResponse"}, + "errors":[ + { + "shape":"InvalidRequestException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"CertificateStateException", + "error":{"httpStatusCode":406}, + "exception":true + }, + { + "shape":"ThrottlingException", + "error":{"httpStatusCode":429}, + "exception":true + }, + { + "shape":"UnauthorizedException", + "error":{"httpStatusCode":401}, + "exception":true + }, + { + "shape":"ServiceUnavailableException", + "error":{"httpStatusCode":503}, + "exception":true, + "fault":true + }, + { + "shape":"InternalFailureException", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + }, + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + } + ] + }, + "DeleteCertificate":{ + "name":"DeleteCertificate", + "http":{ + "method":"DELETE", + "requestUri":"/certificates/{certificateId}" + }, + "input":{"shape":"DeleteCertificateRequest"}, + "errors":[ + { + "shape":"CertificateStateException", + "error":{"httpStatusCode":406}, + "exception":true + }, + { + "shape":"DeleteConflictException", + "error":{"httpStatusCode":409}, + "exception":true + }, + { + "shape":"InvalidRequestException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ThrottlingException", + "error":{"httpStatusCode":429}, + "exception":true + }, + { + "shape":"UnauthorizedException", + "error":{"httpStatusCode":401}, + "exception":true + }, + { + "shape":"ServiceUnavailableException", + "error":{"httpStatusCode":503}, + "exception":true, + "fault":true + }, + { + "shape":"InternalFailureException", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + }, + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + } + ] + }, + "DeletePolicy":{ + "name":"DeletePolicy", + "http":{ + "method":"DELETE", + "requestUri":"/policies/{policyName}" + }, + "input":{"shape":"DeletePolicyRequest"}, + "errors":[ + { + "shape":"DeleteConflictException", + "error":{"httpStatusCode":409}, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"InvalidRequestException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ThrottlingException", + "error":{"httpStatusCode":429}, + "exception":true + }, + { + "shape":"UnauthorizedException", + "error":{"httpStatusCode":401}, + "exception":true + }, + { + "shape":"ServiceUnavailableException", + "error":{"httpStatusCode":503}, + "exception":true, + "fault":true + }, + { + "shape":"InternalFailureException", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + } + ] + }, + "DeletePolicyVersion":{ + "name":"DeletePolicyVersion", + "http":{ + "method":"DELETE", + "requestUri":"/policies/{policyName}/version/{policyVersionId}" + }, + "input":{"shape":"DeletePolicyVersionRequest"}, + "errors":[ + { + "shape":"DeleteConflictException", + "error":{"httpStatusCode":409}, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"InvalidRequestException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ThrottlingException", + "error":{"httpStatusCode":429}, + "exception":true + }, + { + "shape":"UnauthorizedException", + "error":{"httpStatusCode":401}, + "exception":true + }, + { + "shape":"ServiceUnavailableException", + "error":{"httpStatusCode":503}, + "exception":true, + "fault":true + }, + { + "shape":"InternalFailureException", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + } + ] + }, + "DeleteRegistrationCode":{ + "name":"DeleteRegistrationCode", + "http":{ + "method":"DELETE", + "requestUri":"/registrationcode" + }, + "input":{"shape":"DeleteRegistrationCodeRequest"}, + "output":{"shape":"DeleteRegistrationCodeResponse"}, + "errors":[ + { + "shape":"ThrottlingException", + "error":{"httpStatusCode":429}, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"UnauthorizedException", + "error":{"httpStatusCode":401}, + "exception":true + }, + { + "shape":"ServiceUnavailableException", + "error":{"httpStatusCode":503}, + "exception":true, + "fault":true + }, + { + "shape":"InternalFailureException", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + } + ] + }, + "DeleteThing":{ + "name":"DeleteThing", + "http":{ + "method":"DELETE", + "requestUri":"/things/{thingName}" + }, + "input":{"shape":"DeleteThingRequest"}, + "output":{"shape":"DeleteThingResponse"}, + "errors":[ + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"InvalidRequestException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ThrottlingException", + "error":{"httpStatusCode":429}, + "exception":true + }, + { + "shape":"UnauthorizedException", + "error":{"httpStatusCode":401}, + "exception":true + }, + { + "shape":"ServiceUnavailableException", + "error":{"httpStatusCode":503}, + "exception":true, + "fault":true + }, + { + "shape":"InternalFailureException", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + } + ] + }, + "DeleteTopicRule":{ + "name":"DeleteTopicRule", + "http":{ + "method":"DELETE", + "requestUri":"/rules/{ruleName}" + }, + "input":{"shape":"DeleteTopicRuleRequest"}, + "errors":[ + { + "shape":"InternalException", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + }, + { + "shape":"InvalidRequestException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ServiceUnavailableException", + "error":{"httpStatusCode":503}, + "exception":true, + "fault":true + }, + { + "shape":"UnauthorizedException", + "error":{"httpStatusCode":401}, + "exception":true + } + ] + }, + "DescribeCACertificate":{ + "name":"DescribeCACertificate", + "http":{ + "method":"GET", + "requestUri":"/cacertificate/{caCertificateId}" + }, + "input":{"shape":"DescribeCACertificateRequest"}, + "output":{"shape":"DescribeCACertificateResponse"}, + "errors":[ + { + "shape":"InvalidRequestException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ThrottlingException", + "error":{"httpStatusCode":429}, + "exception":true + }, + { + "shape":"UnauthorizedException", + "error":{"httpStatusCode":401}, + "exception":true + }, + { + "shape":"ServiceUnavailableException", + "error":{"httpStatusCode":503}, + "exception":true, + "fault":true + }, + { + "shape":"InternalFailureException", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + }, + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + } + ] + }, + "DescribeCertificate":{ + "name":"DescribeCertificate", + "http":{ + "method":"GET", + "requestUri":"/certificates/{certificateId}" + }, + "input":{"shape":"DescribeCertificateRequest"}, + "output":{"shape":"DescribeCertificateResponse"}, + "errors":[ + { + "shape":"InvalidRequestException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ThrottlingException", + "error":{"httpStatusCode":429}, + "exception":true + }, + { + "shape":"UnauthorizedException", + "error":{"httpStatusCode":401}, + "exception":true + }, + { + "shape":"ServiceUnavailableException", + "error":{"httpStatusCode":503}, + "exception":true, + "fault":true + }, + { + "shape":"InternalFailureException", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + }, + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + } + ] + }, + "DescribeEndpoint":{ + "name":"DescribeEndpoint", + "http":{ + "method":"GET", + "requestUri":"/endpoint" + }, + "input":{"shape":"DescribeEndpointRequest"}, + "output":{"shape":"DescribeEndpointResponse"}, + "errors":[ + { + "shape":"InternalFailureException", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + }, + { + "shape":"UnauthorizedException", + "error":{"httpStatusCode":401}, + "exception":true + }, + { + "shape":"ThrottlingException", + "error":{"httpStatusCode":429}, + "exception":true + } + ] + }, + "DescribeThing":{ + "name":"DescribeThing", + "http":{ + "method":"GET", + "requestUri":"/things/{thingName}" + }, + "input":{"shape":"DescribeThingRequest"}, + "output":{"shape":"DescribeThingResponse"}, + "errors":[ + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"InvalidRequestException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ThrottlingException", + "error":{"httpStatusCode":429}, + "exception":true + }, + { + "shape":"UnauthorizedException", + "error":{"httpStatusCode":401}, + "exception":true + }, + { + "shape":"ServiceUnavailableException", + "error":{"httpStatusCode":503}, + "exception":true, + "fault":true + }, + { + "shape":"InternalFailureException", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + } + ] + }, + "DetachPrincipalPolicy":{ + "name":"DetachPrincipalPolicy", + "http":{ + "method":"DELETE", + "requestUri":"/principal-policies/{policyName}" + }, + "input":{"shape":"DetachPrincipalPolicyRequest"}, + "errors":[ + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"InvalidRequestException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ThrottlingException", + "error":{"httpStatusCode":429}, + "exception":true + }, + { + "shape":"UnauthorizedException", + "error":{"httpStatusCode":401}, + "exception":true + }, + { + "shape":"ServiceUnavailableException", + "error":{"httpStatusCode":503}, + "exception":true, + "fault":true + }, + { + "shape":"InternalFailureException", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + } + ] + }, + "DetachThingPrincipal":{ + "name":"DetachThingPrincipal", + "http":{ + "method":"DELETE", + "requestUri":"/things/{thingName}/principals" + }, + "input":{"shape":"DetachThingPrincipalRequest"}, + "output":{"shape":"DetachThingPrincipalResponse"}, + "errors":[ + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"InvalidRequestException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ThrottlingException", + "error":{"httpStatusCode":429}, + "exception":true + }, + { + "shape":"UnauthorizedException", + "error":{"httpStatusCode":401}, + "exception":true + }, + { + "shape":"ServiceUnavailableException", + "error":{"httpStatusCode":503}, + "exception":true, + "fault":true + }, + { + "shape":"InternalFailureException", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + } + ] + }, + "DisableTopicRule":{ + "name":"DisableTopicRule", + "http":{ + "method":"POST", + "requestUri":"/rules/{ruleName}/disable" + }, + "input":{"shape":"DisableTopicRuleRequest"}, + "errors":[ + { + "shape":"InternalException", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + }, + { + "shape":"InvalidRequestException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ServiceUnavailableException", + "error":{"httpStatusCode":503}, + "exception":true, + "fault":true + }, + { + "shape":"UnauthorizedException", + "error":{"httpStatusCode":401}, + "exception":true + } + ] + }, + "EnableTopicRule":{ + "name":"EnableTopicRule", + "http":{ + "method":"POST", + "requestUri":"/rules/{ruleName}/enable" + }, + "input":{"shape":"EnableTopicRuleRequest"}, + "errors":[ + { + "shape":"InternalException", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + }, + { + "shape":"InvalidRequestException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ServiceUnavailableException", + "error":{"httpStatusCode":503}, + "exception":true, + "fault":true + }, + { + "shape":"UnauthorizedException", + "error":{"httpStatusCode":401}, + "exception":true + } + ] + }, + "GetLoggingOptions":{ + "name":"GetLoggingOptions", + "http":{ + "method":"GET", + "requestUri":"/loggingOptions" + }, + "input":{"shape":"GetLoggingOptionsRequest"}, + "output":{"shape":"GetLoggingOptionsResponse"}, + "errors":[ + { + "shape":"InternalException", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + }, + { + "shape":"InvalidRequestException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ServiceUnavailableException", + "error":{"httpStatusCode":503}, + "exception":true, + "fault":true + } + ] + }, + "GetPolicy":{ + "name":"GetPolicy", + "http":{ + "method":"GET", + "requestUri":"/policies/{policyName}" + }, + "input":{"shape":"GetPolicyRequest"}, + "output":{"shape":"GetPolicyResponse"}, + "errors":[ + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"InvalidRequestException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ThrottlingException", + "error":{"httpStatusCode":429}, + "exception":true + }, + { + "shape":"UnauthorizedException", + "error":{"httpStatusCode":401}, + "exception":true + }, + { + "shape":"ServiceUnavailableException", + "error":{"httpStatusCode":503}, + "exception":true, + "fault":true + }, + { + "shape":"InternalFailureException", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + } + ] + }, + "GetPolicyVersion":{ + "name":"GetPolicyVersion", + "http":{ + "method":"GET", + "requestUri":"/policies/{policyName}/version/{policyVersionId}" + }, + "input":{"shape":"GetPolicyVersionRequest"}, + "output":{"shape":"GetPolicyVersionResponse"}, + "errors":[ + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"InvalidRequestException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ThrottlingException", + "error":{"httpStatusCode":429}, + "exception":true + }, + { + "shape":"UnauthorizedException", + "error":{"httpStatusCode":401}, + "exception":true + }, + { + "shape":"ServiceUnavailableException", + "error":{"httpStatusCode":503}, + "exception":true, + "fault":true + }, + { + "shape":"InternalFailureException", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + } + ] + }, + "GetRegistrationCode":{ + "name":"GetRegistrationCode", + "http":{ + "method":"GET", + "requestUri":"/registrationcode" + }, + "input":{"shape":"GetRegistrationCodeRequest"}, + "output":{"shape":"GetRegistrationCodeResponse"}, + "errors":[ + { + "shape":"ThrottlingException", + "error":{"httpStatusCode":429}, + "exception":true + }, + { + "shape":"UnauthorizedException", + "error":{"httpStatusCode":401}, + "exception":true + }, + { + "shape":"ServiceUnavailableException", + "error":{"httpStatusCode":503}, + "exception":true, + "fault":true + }, + { + "shape":"InternalFailureException", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + }, + { + "shape":"InvalidRequestException", + "error":{"httpStatusCode":400}, + "exception":true + } + ] + }, + "GetTopicRule":{ + "name":"GetTopicRule", + "http":{ + "method":"GET", + "requestUri":"/rules/{ruleName}" + }, + "input":{"shape":"GetTopicRuleRequest"}, + "output":{"shape":"GetTopicRuleResponse"}, + "errors":[ + { + "shape":"InternalException", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + }, + { + "shape":"InvalidRequestException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ServiceUnavailableException", + "error":{"httpStatusCode":503}, + "exception":true, + "fault":true + }, + { + "shape":"UnauthorizedException", + "error":{"httpStatusCode":401}, + "exception":true + } + ] + }, + "ListCACertificates":{ + "name":"ListCACertificates", + "http":{ + "method":"GET", + "requestUri":"/cacertificates" + }, + "input":{"shape":"ListCACertificatesRequest"}, + "output":{"shape":"ListCACertificatesResponse"}, + "errors":[ + { + "shape":"InvalidRequestException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ThrottlingException", + "error":{"httpStatusCode":429}, + "exception":true + }, + { + "shape":"UnauthorizedException", + "error":{"httpStatusCode":401}, + "exception":true + }, + { + "shape":"ServiceUnavailableException", + "error":{"httpStatusCode":503}, + "exception":true, + "fault":true + }, + { + "shape":"InternalFailureException", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + } + ] + }, + "ListCertificates":{ + "name":"ListCertificates", + "http":{ + "method":"GET", + "requestUri":"/certificates" + }, + "input":{"shape":"ListCertificatesRequest"}, + "output":{"shape":"ListCertificatesResponse"}, + "errors":[ + { + "shape":"InvalidRequestException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ThrottlingException", + "error":{"httpStatusCode":429}, + "exception":true + }, + { + "shape":"UnauthorizedException", + "error":{"httpStatusCode":401}, + "exception":true + }, + { + "shape":"ServiceUnavailableException", + "error":{"httpStatusCode":503}, + "exception":true, + "fault":true + }, + { + "shape":"InternalFailureException", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + } + ] + }, + "ListCertificatesByCA":{ + "name":"ListCertificatesByCA", + "http":{ + "method":"GET", + "requestUri":"/certificates-by-ca/{caCertificateId}" + }, + "input":{"shape":"ListCertificatesByCARequest"}, + "output":{"shape":"ListCertificatesByCAResponse"}, + "errors":[ + { + "shape":"InvalidRequestException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ThrottlingException", + "error":{"httpStatusCode":429}, + "exception":true + }, + { + "shape":"UnauthorizedException", + "error":{"httpStatusCode":401}, + "exception":true + }, + { + "shape":"ServiceUnavailableException", + "error":{"httpStatusCode":503}, + "exception":true, + "fault":true + }, + { + "shape":"InternalFailureException", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + } + ] + }, + "ListPolicies":{ + "name":"ListPolicies", + "http":{ + "method":"GET", + "requestUri":"/policies" + }, + "input":{"shape":"ListPoliciesRequest"}, + "output":{"shape":"ListPoliciesResponse"}, + "errors":[ + { + "shape":"InvalidRequestException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ThrottlingException", + "error":{"httpStatusCode":429}, + "exception":true + }, + { + "shape":"UnauthorizedException", + "error":{"httpStatusCode":401}, + "exception":true + }, + { + "shape":"ServiceUnavailableException", + "error":{"httpStatusCode":503}, + "exception":true, + "fault":true + }, + { + "shape":"InternalFailureException", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + } + ] + }, + "ListPolicyPrincipals":{ + "name":"ListPolicyPrincipals", + "http":{ + "method":"GET", + "requestUri":"/policy-principals" + }, + "input":{"shape":"ListPolicyPrincipalsRequest"}, + "output":{"shape":"ListPolicyPrincipalsResponse"}, + "errors":[ + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"InvalidRequestException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ThrottlingException", + "error":{"httpStatusCode":429}, + "exception":true + }, + { + "shape":"UnauthorizedException", + "error":{"httpStatusCode":401}, + "exception":true + }, + { + "shape":"ServiceUnavailableException", + "error":{"httpStatusCode":503}, + "exception":true, + "fault":true + }, + { + "shape":"InternalFailureException", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + } + ] + }, + "ListPolicyVersions":{ + "name":"ListPolicyVersions", + "http":{ + "method":"GET", + "requestUri":"/policies/{policyName}/version" + }, + "input":{"shape":"ListPolicyVersionsRequest"}, + "output":{"shape":"ListPolicyVersionsResponse"}, + "errors":[ + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"InvalidRequestException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ThrottlingException", + "error":{"httpStatusCode":429}, + "exception":true + }, + { + "shape":"UnauthorizedException", + "error":{"httpStatusCode":401}, + "exception":true + }, + { + "shape":"ServiceUnavailableException", + "error":{"httpStatusCode":503}, + "exception":true, + "fault":true + }, + { + "shape":"InternalFailureException", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + } + ] + }, + "ListPrincipalPolicies":{ + "name":"ListPrincipalPolicies", + "http":{ + "method":"GET", + "requestUri":"/principal-policies" + }, + "input":{"shape":"ListPrincipalPoliciesRequest"}, + "output":{"shape":"ListPrincipalPoliciesResponse"}, + "errors":[ + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"InvalidRequestException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ThrottlingException", + "error":{"httpStatusCode":429}, + "exception":true + }, + { + "shape":"UnauthorizedException", + "error":{"httpStatusCode":401}, + "exception":true + }, + { + "shape":"ServiceUnavailableException", + "error":{"httpStatusCode":503}, + "exception":true, + "fault":true + }, + { + "shape":"InternalFailureException", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + } + ] + }, + "ListPrincipalThings":{ + "name":"ListPrincipalThings", + "http":{ + "method":"GET", + "requestUri":"/principals/things" + }, + "input":{"shape":"ListPrincipalThingsRequest"}, + "output":{"shape":"ListPrincipalThingsResponse"}, + "errors":[ + { + "shape":"InvalidRequestException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ThrottlingException", + "error":{"httpStatusCode":429}, + "exception":true + }, + { + "shape":"UnauthorizedException", + "error":{"httpStatusCode":401}, + "exception":true + }, + { + "shape":"ServiceUnavailableException", + "error":{"httpStatusCode":503}, + "exception":true, + "fault":true + }, + { + "shape":"InternalFailureException", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + } + ] + }, + "ListThingPrincipals":{ + "name":"ListThingPrincipals", + "http":{ + "method":"GET", + "requestUri":"/things/{thingName}/principals" + }, + "input":{"shape":"ListThingPrincipalsRequest"}, + "output":{"shape":"ListThingPrincipalsResponse"}, + "errors":[ + { + "shape":"InvalidRequestException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ThrottlingException", + "error":{"httpStatusCode":429}, + "exception":true + }, + { + "shape":"UnauthorizedException", + "error":{"httpStatusCode":401}, + "exception":true + }, + { + "shape":"ServiceUnavailableException", + "error":{"httpStatusCode":503}, + "exception":true, + "fault":true + }, + { + "shape":"InternalFailureException", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + } + ] + }, + "ListThings":{ + "name":"ListThings", + "http":{ + "method":"GET", + "requestUri":"/things" + }, + "input":{"shape":"ListThingsRequest"}, + "output":{"shape":"ListThingsResponse"}, + "errors":[ + { + "shape":"InvalidRequestException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ThrottlingException", + "error":{"httpStatusCode":429}, + "exception":true + }, + { + "shape":"UnauthorizedException", + "error":{"httpStatusCode":401}, + "exception":true + }, + { + "shape":"ServiceUnavailableException", + "error":{"httpStatusCode":503}, + "exception":true, + "fault":true + }, + { + "shape":"InternalFailureException", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + } + ] + }, + "ListTopicRules":{ + "name":"ListTopicRules", + "http":{ + "method":"GET", + "requestUri":"/rules" + }, + "input":{"shape":"ListTopicRulesRequest"}, + "output":{"shape":"ListTopicRulesResponse"}, + "errors":[ + { + "shape":"InternalException", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + }, + { + "shape":"InvalidRequestException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ServiceUnavailableException", + "error":{"httpStatusCode":503}, + "exception":true, + "fault":true + } + ] + }, + "RegisterCACertificate":{ + "name":"RegisterCACertificate", + "http":{ + "method":"POST", + "requestUri":"/cacertificate" + }, + "input":{"shape":"RegisterCACertificateRequest"}, + "output":{"shape":"RegisterCACertificateResponse"}, + "errors":[ + { + "shape":"ResourceAlreadyExistsException", + "error":{"httpStatusCode":409}, + "exception":true + }, + { + "shape":"RegistrationCodeValidationException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InvalidRequestException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"CertificateValidationException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ThrottlingException", + "error":{"httpStatusCode":429}, + "exception":true + }, + { + "shape":"LimitExceededException", + "error":{"httpStatusCode":410}, + "exception":true + }, + { + "shape":"UnauthorizedException", + "error":{"httpStatusCode":401}, + "exception":true + }, + { + "shape":"ServiceUnavailableException", + "error":{"httpStatusCode":503}, + "exception":true, + "fault":true + }, + { + "shape":"InternalFailureException", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + } + ] + }, + "RegisterCertificate":{ + "name":"RegisterCertificate", + "http":{ + "method":"POST", + "requestUri":"/certificate/register" + }, + "input":{"shape":"RegisterCertificateRequest"}, + "output":{"shape":"RegisterCertificateResponse"}, + "errors":[ + { + "shape":"ResourceAlreadyExistsException", + "error":{"httpStatusCode":409}, + "exception":true + }, + { + "shape":"InvalidRequestException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"CertificateValidationException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"CertificateStateException", + "error":{"httpStatusCode":406}, + "exception":true + }, + { + "shape":"CertificateConflictException", + "error":{"httpStatusCode":409}, + "exception":true + }, + { + "shape":"ThrottlingException", + "error":{"httpStatusCode":429}, + "exception":true + }, + { + "shape":"UnauthorizedException", + "error":{"httpStatusCode":401}, + "exception":true + }, + { + "shape":"ServiceUnavailableException", + "error":{"httpStatusCode":503}, + "exception":true, + "fault":true + }, + { + "shape":"InternalFailureException", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + } + ] + }, + "RejectCertificateTransfer":{ + "name":"RejectCertificateTransfer", + "http":{ + "method":"PATCH", + "requestUri":"/reject-certificate-transfer/{certificateId}" + }, + "input":{"shape":"RejectCertificateTransferRequest"}, + "errors":[ + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"TransferAlreadyCompletedException", + "error":{"httpStatusCode":410}, + "exception":true + }, + { + "shape":"InvalidRequestException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ThrottlingException", + "error":{"httpStatusCode":429}, + "exception":true + }, + { + "shape":"UnauthorizedException", + "error":{"httpStatusCode":401}, + "exception":true + }, + { + "shape":"ServiceUnavailableException", + "error":{"httpStatusCode":503}, + "exception":true, + "fault":true + }, + { + "shape":"InternalFailureException", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + } + ] + }, + "ReplaceTopicRule":{ + "name":"ReplaceTopicRule", + "http":{ + "method":"PATCH", + "requestUri":"/rules/{ruleName}" + }, + "input":{"shape":"ReplaceTopicRuleRequest"}, + "errors":[ + { + "shape":"SqlParseException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InternalException", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + }, + { + "shape":"InvalidRequestException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ServiceUnavailableException", + "error":{"httpStatusCode":503}, + "exception":true, + "fault":true + }, + { + "shape":"UnauthorizedException", + "error":{"httpStatusCode":401}, + "exception":true + } + ] + }, + "SetDefaultPolicyVersion":{ + "name":"SetDefaultPolicyVersion", + "http":{ + "method":"PATCH", + "requestUri":"/policies/{policyName}/version/{policyVersionId}" + }, + "input":{"shape":"SetDefaultPolicyVersionRequest"}, + "errors":[ + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"InvalidRequestException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ThrottlingException", + "error":{"httpStatusCode":429}, + "exception":true + }, + { + "shape":"UnauthorizedException", + "error":{"httpStatusCode":401}, + "exception":true + }, + { + "shape":"ServiceUnavailableException", + "error":{"httpStatusCode":503}, + "exception":true, + "fault":true + }, + { + "shape":"InternalFailureException", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + } + ] + }, + "SetLoggingOptions":{ + "name":"SetLoggingOptions", + "http":{ + "method":"POST", + "requestUri":"/loggingOptions" + }, + "input":{"shape":"SetLoggingOptionsRequest"}, + "errors":[ + { + "shape":"InternalException", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + }, + { + "shape":"InvalidRequestException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ServiceUnavailableException", + "error":{"httpStatusCode":503}, + "exception":true, + "fault":true + } + ] + }, + "TransferCertificate":{ + "name":"TransferCertificate", + "http":{ + "method":"PATCH", + "requestUri":"/transfer-certificate/{certificateId}" + }, + "input":{"shape":"TransferCertificateRequest"}, + "output":{"shape":"TransferCertificateResponse"}, + "errors":[ + { + "shape":"InvalidRequestException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"CertificateStateException", + "error":{"httpStatusCode":406}, + "exception":true + }, + { + "shape":"TransferConflictException", + "error":{"httpStatusCode":409}, + "exception":true + }, + { + "shape":"ThrottlingException", + "error":{"httpStatusCode":429}, + "exception":true + }, + { + "shape":"UnauthorizedException", + "error":{"httpStatusCode":401}, + "exception":true + }, + { + "shape":"ServiceUnavailableException", + "error":{"httpStatusCode":503}, + "exception":true, + "fault":true + }, + { + "shape":"InternalFailureException", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + } + ] + }, + "UpdateCACertificate":{ + "name":"UpdateCACertificate", + "http":{ + "method":"PUT", + "requestUri":"/cacertificate/{caCertificateId}" + }, + "input":{"shape":"UpdateCACertificateRequest"}, + "errors":[ + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"InvalidRequestException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ThrottlingException", + "error":{"httpStatusCode":429}, + "exception":true + }, + { + "shape":"UnauthorizedException", + "error":{"httpStatusCode":401}, + "exception":true + }, + { + "shape":"ServiceUnavailableException", + "error":{"httpStatusCode":503}, + "exception":true, + "fault":true + }, + { + "shape":"InternalFailureException", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + } + ] + }, + "UpdateCertificate":{ + "name":"UpdateCertificate", + "http":{ + "method":"PUT", + "requestUri":"/certificates/{certificateId}" + }, + "input":{"shape":"UpdateCertificateRequest"}, + "errors":[ + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"CertificateStateException", + "error":{"httpStatusCode":406}, + "exception":true + }, + { + "shape":"InvalidRequestException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ThrottlingException", + "error":{"httpStatusCode":429}, + "exception":true + }, + { + "shape":"UnauthorizedException", + "error":{"httpStatusCode":401}, + "exception":true + }, + { + "shape":"ServiceUnavailableException", + "error":{"httpStatusCode":503}, + "exception":true, + "fault":true + }, + { + "shape":"InternalFailureException", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + } + ] + }, + "UpdateThing":{ + "name":"UpdateThing", + "http":{ + "method":"PATCH", + "requestUri":"/things/{thingName}" + }, + "input":{"shape":"UpdateThingRequest"}, + "output":{"shape":"UpdateThingResponse"}, + "errors":[ + { + "shape":"InvalidRequestException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ThrottlingException", + "error":{"httpStatusCode":429}, + "exception":true + }, + { + "shape":"UnauthorizedException", + "error":{"httpStatusCode":401}, + "exception":true + }, + { + "shape":"ServiceUnavailableException", + "error":{"httpStatusCode":503}, + "exception":true, + "fault":true + }, + { + "shape":"InternalFailureException", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + }, + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + } + ] + } + }, + "shapes":{ + "AcceptCertificateTransferRequest":{ + "type":"structure", + "required":["certificateId"], + "members":{ + "certificateId":{ + "shape":"CertificateId", + "location":"uri", + "locationName":"certificateId" + }, + "setAsActive":{ + "shape":"SetAsActive", + "location":"querystring", + "locationName":"setAsActive" + } + } + }, + "Action":{ + "type":"structure", + "members":{ + "dynamoDB":{"shape":"DynamoDBAction"}, + "lambda":{"shape":"LambdaAction"}, + "sns":{"shape":"SnsAction"}, + "sqs":{"shape":"SqsAction"}, + "kinesis":{"shape":"KinesisAction"}, + "republish":{"shape":"RepublishAction"}, + "s3":{"shape":"S3Action"}, + "firehose":{"shape":"FirehoseAction"}, + "cloudwatchMetric":{"shape":"CloudwatchMetricAction"}, + "cloudwatchAlarm":{"shape":"CloudwatchAlarmAction"}, + "elasticsearch":{"shape":"ElasticsearchAction"} + } + }, + "ActionList":{ + "type":"list", + "member":{"shape":"Action"}, + "min":0, + "max":10 + }, + "AlarmName":{"type":"string"}, + "AscendingOrder":{"type":"boolean"}, + "AttachPrincipalPolicyRequest":{ + "type":"structure", + "required":[ + "policyName", + "principal" + ], + "members":{ + "policyName":{ + "shape":"PolicyName", + "location":"uri", + "locationName":"policyName" + }, + "principal":{ + "shape":"Principal", + "location":"header", + "locationName":"x-amzn-iot-principal" + } + } + }, + "AttachThingPrincipalRequest":{ + "type":"structure", + "required":[ + "thingName", + "principal" + ], + "members":{ + "thingName":{ + "shape":"ThingName", + "location":"uri", + "locationName":"thingName" + }, + "principal":{ + "shape":"Principal", + "location":"header", + "locationName":"x-amzn-principal" + } + } + }, + "AttachThingPrincipalResponse":{ + "type":"structure", + "members":{ + } + }, + "AttributeName":{ + "type":"string", + "max":128, + "pattern":"[a-zA-Z0-9_.,@/:#-]+" + }, + "AttributePayload":{ + "type":"structure", + "members":{ + "attributes":{"shape":"Attributes"} + } + }, + "AttributeValue":{ + "type":"string", + "max":1024, + "pattern":"[a-zA-Z0-9_.,@/:#-]+" + }, + "Attributes":{ + "type":"map", + "key":{"shape":"AttributeName"}, + "value":{"shape":"AttributeValue"} + }, + "AwsAccountId":{ + "type":"string", + "pattern":"[0-9]{12}" + }, + "AwsArn":{"type":"string"}, + "AwsIotSqlVersion":{"type":"string"}, + "BucketName":{"type":"string"}, + "CACertificate":{ + "type":"structure", + "members":{ + "certificateArn":{"shape":"CertificateArn"}, + "certificateId":{"shape":"CertificateId"}, + "status":{"shape":"CACertificateStatus"}, + "creationDate":{"shape":"DateType"} + } + }, + "CACertificateDescription":{ + "type":"structure", + "members":{ + "certificateArn":{"shape":"CertificateArn"}, + "certificateId":{"shape":"CertificateId"}, + "status":{"shape":"CACertificateStatus"}, + "certificatePem":{"shape":"CertificatePem"}, + "ownedBy":{"shape":"AwsAccountId"}, + "creationDate":{"shape":"DateType"} + } + }, + "CACertificateStatus":{ + "type":"string", + "enum":[ + "ACTIVE", + "INACTIVE" + ] + }, + "CACertificates":{ + "type":"list", + "member":{"shape":"CACertificate"} + }, + "CancelCertificateTransferRequest":{ + "type":"structure", + "required":["certificateId"], + "members":{ + "certificateId":{ + "shape":"CertificateId", + "location":"uri", + "locationName":"certificateId" + } + } + }, + "Certificate":{ + "type":"structure", + "members":{ + "certificateArn":{"shape":"CertificateArn"}, + "certificateId":{"shape":"CertificateId"}, + "status":{"shape":"CertificateStatus"}, + "creationDate":{"shape":"DateType"} + } + }, + "CertificateArn":{"type":"string"}, + "CertificateConflictException":{ + "type":"structure", + "members":{ + "message":{"shape":"errorMessage"} + }, + "error":{"httpStatusCode":409}, + "exception":true + }, + "CertificateDescription":{ + "type":"structure", + "members":{ + "certificateArn":{"shape":"CertificateArn"}, + "certificateId":{"shape":"CertificateId"}, + "caCertificateId":{"shape":"CertificateId"}, + "status":{"shape":"CertificateStatus"}, + "certificatePem":{"shape":"CertificatePem"}, + "ownedBy":{"shape":"AwsAccountId"}, + "previousOwnedBy":{"shape":"AwsAccountId"}, + "creationDate":{"shape":"DateType"}, + "lastModifiedDate":{"shape":"DateType"}, + "transferData":{"shape":"TransferData"} + } + }, + "CertificateId":{ + "type":"string", + "min":64, + "max":64, + "pattern":"(0x)?[a-fA-F0-9]+" + }, + "CertificatePem":{ + "type":"string", + "min":1, + "max":65536 + }, + "CertificateSigningRequest":{ + "type":"string", + "min":1 + }, + "CertificateStateException":{ + "type":"structure", + "members":{ + "message":{"shape":"errorMessage"} + }, + "error":{"httpStatusCode":406}, + "exception":true + }, + "CertificateStatus":{ + "type":"string", + "enum":[ + "ACTIVE", + "INACTIVE", + "REVOKED", + "PENDING_TRANSFER", + "REGISTER_INACTIVE" + ] + }, + "CertificateValidationException":{ + "type":"structure", + "members":{ + "message":{"shape":"errorMessage"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "Certificates":{ + "type":"list", + "member":{"shape":"Certificate"} + }, + "ClientId":{"type":"string"}, + "CloudwatchAlarmAction":{ + "type":"structure", + "required":[ + "roleArn", + "alarmName", + "stateReason", + "stateValue" + ], + "members":{ + "roleArn":{"shape":"AwsArn"}, + "alarmName":{"shape":"AlarmName"}, + "stateReason":{"shape":"StateReason"}, + "stateValue":{"shape":"StateValue"} + } + }, + "CloudwatchMetricAction":{ + "type":"structure", + "required":[ + "roleArn", + "metricNamespace", + "metricName", + "metricValue", + "metricUnit" + ], + "members":{ + "roleArn":{"shape":"AwsArn"}, + "metricNamespace":{"shape":"MetricNamespace"}, + "metricName":{"shape":"MetricName"}, + "metricValue":{"shape":"MetricValue"}, + "metricUnit":{"shape":"MetricUnit"}, + "metricTimestamp":{"shape":"MetricTimestamp"} + } + }, + "CreateCertificateFromCsrRequest":{ + "type":"structure", + "required":["certificateSigningRequest"], + "members":{ + "certificateSigningRequest":{"shape":"CertificateSigningRequest"}, + "setAsActive":{ + "shape":"SetAsActive", + "location":"querystring", + "locationName":"setAsActive" + } + } + }, + "CreateCertificateFromCsrResponse":{ + "type":"structure", + "members":{ + "certificateArn":{"shape":"CertificateArn"}, + "certificateId":{"shape":"CertificateId"}, + "certificatePem":{"shape":"CertificatePem"} + } + }, + "CreateKeysAndCertificateRequest":{ + "type":"structure", + "members":{ + "setAsActive":{ + "shape":"SetAsActive", + "location":"querystring", + "locationName":"setAsActive" + } + } + }, + "CreateKeysAndCertificateResponse":{ + "type":"structure", + "members":{ + "certificateArn":{"shape":"CertificateArn"}, + "certificateId":{"shape":"CertificateId"}, + "certificatePem":{"shape":"CertificatePem"}, + "keyPair":{"shape":"KeyPair"} + } + }, + "CreatePolicyRequest":{ + "type":"structure", + "required":[ + "policyName", + "policyDocument" + ], + "members":{ + "policyName":{ + "shape":"PolicyName", + "location":"uri", + "locationName":"policyName" + }, + "policyDocument":{"shape":"PolicyDocument"} + } + }, + "CreatePolicyResponse":{ + "type":"structure", + "members":{ + "policyName":{"shape":"PolicyName"}, + "policyArn":{"shape":"PolicyArn"}, + "policyDocument":{"shape":"PolicyDocument"}, + "policyVersionId":{"shape":"PolicyVersionId"} + } + }, + "CreatePolicyVersionRequest":{ + "type":"structure", + "required":[ + "policyName", + "policyDocument" + ], + "members":{ + "policyName":{ + "shape":"PolicyName", + "location":"uri", + "locationName":"policyName" + }, + "policyDocument":{"shape":"PolicyDocument"}, + "setAsDefault":{ + "shape":"SetAsDefault", + "location":"querystring", + "locationName":"setAsDefault" + } + } + }, + "CreatePolicyVersionResponse":{ + "type":"structure", + "members":{ + "policyArn":{"shape":"PolicyArn"}, + "policyDocument":{"shape":"PolicyDocument"}, + "policyVersionId":{"shape":"PolicyVersionId"}, + "isDefaultVersion":{"shape":"IsDefaultVersion"} + } + }, + "CreateThingRequest":{ + "type":"structure", + "required":["thingName"], + "members":{ + "thingName":{ + "shape":"ThingName", + "location":"uri", + "locationName":"thingName" + }, + "attributePayload":{"shape":"AttributePayload"} + } + }, + "CreateThingResponse":{ + "type":"structure", + "members":{ + "thingName":{"shape":"ThingName"}, + "thingArn":{"shape":"ThingArn"} + } + }, + "CreateTopicRuleRequest":{ + "type":"structure", + "required":[ + "ruleName", + "topicRulePayload" + ], + "members":{ + "ruleName":{ + "shape":"RuleName", + "location":"uri", + "locationName":"ruleName" + }, + "topicRulePayload":{"shape":"TopicRulePayload"} + }, + "payload":"topicRulePayload" + }, + "CreatedAtDate":{"type":"timestamp"}, + "DateType":{"type":"timestamp"}, + "DeleteCACertificateRequest":{ + "type":"structure", + "required":["certificateId"], + "members":{ + "certificateId":{ + "shape":"CertificateId", + "location":"uri", + "locationName":"caCertificateId" + } + } + }, + "DeleteCACertificateResponse":{ + "type":"structure", + "members":{ + } + }, + "DeleteCertificateRequest":{ + "type":"structure", + "required":["certificateId"], + "members":{ + "certificateId":{ + "shape":"CertificateId", + "location":"uri", + "locationName":"certificateId" + } + } + }, + "DeleteConflictException":{ + "type":"structure", + "members":{ + "message":{"shape":"errorMessage"} + }, + "error":{"httpStatusCode":409}, + "exception":true + }, + "DeletePolicyRequest":{ + "type":"structure", + "required":["policyName"], + "members":{ + "policyName":{ + "shape":"PolicyName", + "location":"uri", + "locationName":"policyName" + } + } + }, + "DeletePolicyVersionRequest":{ + "type":"structure", + "required":[ + "policyName", + "policyVersionId" + ], + "members":{ + "policyName":{ + "shape":"PolicyName", + "location":"uri", + "locationName":"policyName" + }, + "policyVersionId":{ + "shape":"PolicyVersionId", + "location":"uri", + "locationName":"policyVersionId" + } + } + }, + "DeleteRegistrationCodeRequest":{ + "type":"structure", + "members":{ + } + }, + "DeleteRegistrationCodeResponse":{ + "type":"structure", + "members":{ + } + }, + "DeleteThingRequest":{ + "type":"structure", + "required":["thingName"], + "members":{ + "thingName":{ + "shape":"ThingName", + "location":"uri", + "locationName":"thingName" + } + } + }, + "DeleteThingResponse":{ + "type":"structure", + "members":{ + } + }, + "DeleteTopicRuleRequest":{ + "type":"structure", + "members":{ + "ruleName":{ + "shape":"RuleName", + "location":"uri", + "locationName":"ruleName" + } + }, + "required":["ruleName"] + }, + "DeliveryStreamName":{"type":"string"}, + "DescribeCACertificateRequest":{ + "type":"structure", + "required":["certificateId"], + "members":{ + "certificateId":{ + "shape":"CertificateId", + "location":"uri", + "locationName":"caCertificateId" + } + } + }, + "DescribeCACertificateResponse":{ + "type":"structure", + "members":{ + "certificateDescription":{"shape":"CACertificateDescription"} + } + }, + "DescribeCertificateRequest":{ + "type":"structure", + "required":["certificateId"], + "members":{ + "certificateId":{ + "shape":"CertificateId", + "location":"uri", + "locationName":"certificateId" + } + } + }, + "DescribeCertificateResponse":{ + "type":"structure", + "members":{ + "certificateDescription":{"shape":"CertificateDescription"} + } + }, + "DescribeEndpointRequest":{ + "type":"structure", + "members":{ + } + }, + "DescribeEndpointResponse":{ + "type":"structure", + "members":{ + "endpointAddress":{"shape":"EndpointAddress"} + } + }, + "DescribeThingRequest":{ + "type":"structure", + "required":["thingName"], + "members":{ + "thingName":{ + "shape":"ThingName", + "location":"uri", + "locationName":"thingName" + } + } + }, + "DescribeThingResponse":{ + "type":"structure", + "members":{ + "defaultClientId":{"shape":"ClientId"}, + "thingName":{"shape":"ThingName"}, + "attributes":{"shape":"Attributes"} + } + }, + "Description":{"type":"string"}, + "DetachPrincipalPolicyRequest":{ + "type":"structure", + "required":[ + "policyName", + "principal" + ], + "members":{ + "policyName":{ + "shape":"PolicyName", + "location":"uri", + "locationName":"policyName" + }, + "principal":{ + "shape":"Principal", + "location":"header", + "locationName":"x-amzn-iot-principal" + } + } + }, + "DetachThingPrincipalRequest":{ + "type":"structure", + "required":[ + "thingName", + "principal" + ], + "members":{ + "thingName":{ + "shape":"ThingName", + "location":"uri", + "locationName":"thingName" + }, + "principal":{ + "shape":"Principal", + "location":"header", + "locationName":"x-amzn-principal" + } + } + }, + "DetachThingPrincipalResponse":{ + "type":"structure", + "members":{ + } + }, + "DisableTopicRuleRequest":{ + "type":"structure", + "required":["ruleName"], + "members":{ + "ruleName":{ + "shape":"RuleName", + "location":"uri", + "locationName":"ruleName" + } + } + }, + "DynamoDBAction":{ + "type":"structure", + "required":[ + "tableName", + "roleArn", + "hashKeyField", + "hashKeyValue" + ], + "members":{ + "tableName":{"shape":"TableName"}, + "roleArn":{"shape":"AwsArn"}, + "operation":{"shape":"DynamoOperation"}, + "hashKeyField":{"shape":"HashKeyField"}, + "hashKeyValue":{"shape":"HashKeyValue"}, + "hashKeyType":{"shape":"DynamoKeyType"}, + "rangeKeyField":{"shape":"RangeKeyField"}, + "rangeKeyValue":{"shape":"RangeKeyValue"}, + "rangeKeyType":{"shape":"DynamoKeyType"}, + "payloadField":{"shape":"PayloadField"} + } + }, + "DynamoKeyType":{ + "type":"string", + "enum":[ + "STRING", + "NUMBER" + ] + }, + "DynamoOperation":{"type":"string"}, + "ElasticsearchAction":{ + "type":"structure", + "required":[ + "roleArn", + "endpoint", + "index", + "type", + "id" + ], + "members":{ + "roleArn":{"shape":"AwsArn"}, + "endpoint":{"shape":"ElasticsearchEndpoint"}, + "index":{"shape":"ElasticsearchIndex"}, + "type":{"shape":"ElasticsearchType"}, + "id":{"shape":"ElasticsearchId"} + } + }, + "ElasticsearchEndpoint":{ + "type":"string", + "pattern":"https?://.*" + }, + "ElasticsearchId":{"type":"string"}, + "ElasticsearchIndex":{"type":"string"}, + "ElasticsearchType":{"type":"string"}, + "EnableTopicRuleRequest":{ + "type":"structure", + "required":["ruleName"], + "members":{ + "ruleName":{ + "shape":"RuleName", + "location":"uri", + "locationName":"ruleName" + } + } + }, + "EndpointAddress":{"type":"string"}, + "FirehoseAction":{ + "type":"structure", + "required":[ + "roleArn", + "deliveryStreamName" + ], + "members":{ + "roleArn":{"shape":"AwsArn"}, + "deliveryStreamName":{"shape":"DeliveryStreamName"} + } + }, + "FunctionArn":{"type":"string"}, + "GetLoggingOptionsRequest":{ + "type":"structure", + "members":{ + } + }, + "GetLoggingOptionsResponse":{ + "type":"structure", + "members":{ + "roleArn":{"shape":"AwsArn"}, + "logLevel":{"shape":"LogLevel"} + } + }, + "GetPolicyRequest":{ + "type":"structure", + "required":["policyName"], + "members":{ + "policyName":{ + "shape":"PolicyName", + "location":"uri", + "locationName":"policyName" + } + } + }, + "GetPolicyResponse":{ + "type":"structure", + "members":{ + "policyName":{"shape":"PolicyName"}, + "policyArn":{"shape":"PolicyArn"}, + "policyDocument":{"shape":"PolicyDocument"}, + "defaultVersionId":{"shape":"PolicyVersionId"} + } + }, + "GetPolicyVersionRequest":{ + "type":"structure", + "required":[ + "policyName", + "policyVersionId" + ], + "members":{ + "policyName":{ + "shape":"PolicyName", + "location":"uri", + "locationName":"policyName" + }, + "policyVersionId":{ + "shape":"PolicyVersionId", + "location":"uri", + "locationName":"policyVersionId" + } + } + }, + "GetPolicyVersionResponse":{ + "type":"structure", + "members":{ + "policyArn":{"shape":"PolicyArn"}, + "policyName":{"shape":"PolicyName"}, + "policyDocument":{"shape":"PolicyDocument"}, + "policyVersionId":{"shape":"PolicyVersionId"}, + "isDefaultVersion":{"shape":"IsDefaultVersion"} + } + }, + "GetRegistrationCodeRequest":{ + "type":"structure", + "members":{ + } + }, + "GetRegistrationCodeResponse":{ + "type":"structure", + "members":{ + "registrationCode":{"shape":"RegistrationCode"} + } + }, + "GetTopicRuleRequest":{ + "type":"structure", + "required":["ruleName"], + "members":{ + "ruleName":{ + "shape":"RuleName", + "location":"uri", + "locationName":"ruleName" + } + } + }, + "GetTopicRuleResponse":{ + "type":"structure", + "members":{ + "ruleArn":{"shape":"RuleArn"}, + "rule":{"shape":"TopicRule"} + } + }, + "HashKeyField":{"type":"string"}, + "HashKeyValue":{"type":"string"}, + "InternalException":{ + "type":"structure", + "members":{ + "message":{"shape":"errorMessage"} + }, + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + }, + "InternalFailureException":{ + "type":"structure", + "members":{ + "message":{"shape":"errorMessage"} + }, + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + }, + "InvalidRequestException":{ + "type":"structure", + "members":{ + "message":{"shape":"errorMessage"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "IsDefaultVersion":{"type":"boolean"}, + "IsDisabled":{"type":"boolean"}, + "Key":{"type":"string"}, + "KeyPair":{ + "type":"structure", + "members":{ + "PublicKey":{"shape":"PublicKey"}, + "PrivateKey":{"shape":"PrivateKey"} + } + }, + "KinesisAction":{ + "type":"structure", + "required":[ + "roleArn", + "streamName" + ], + "members":{ + "roleArn":{"shape":"AwsArn"}, + "streamName":{"shape":"StreamName"}, + "partitionKey":{"shape":"PartitionKey"} + } + }, + "LambdaAction":{ + "type":"structure", + "required":["functionArn"], + "members":{ + "functionArn":{"shape":"FunctionArn"} + } + }, + "LimitExceededException":{ + "type":"structure", + "members":{ + "message":{"shape":"errorMessage"} + }, + "error":{"httpStatusCode":410}, + "exception":true + }, + "ListCACertificatesRequest":{ + "type":"structure", + "members":{ + "pageSize":{ + "shape":"PageSize", + "location":"querystring", + "locationName":"pageSize" + }, + "marker":{ + "shape":"Marker", + "location":"querystring", + "locationName":"marker" + }, + "ascendingOrder":{ + "shape":"AscendingOrder", + "location":"querystring", + "locationName":"isAscendingOrder" + } + } + }, + "ListCACertificatesResponse":{ + "type":"structure", + "members":{ + "certificates":{"shape":"CACertificates"}, + "nextMarker":{"shape":"Marker"} + } + }, + "ListCertificatesByCARequest":{ + "type":"structure", + "required":["caCertificateId"], + "members":{ + "caCertificateId":{ + "shape":"CertificateId", + "location":"uri", + "locationName":"caCertificateId" + }, + "pageSize":{ + "shape":"PageSize", + "location":"querystring", + "locationName":"pageSize" + }, + "marker":{ + "shape":"Marker", + "location":"querystring", + "locationName":"marker" + }, + "ascendingOrder":{ + "shape":"AscendingOrder", + "location":"querystring", + "locationName":"isAscendingOrder" + } + } + }, + "ListCertificatesByCAResponse":{ + "type":"structure", + "members":{ + "certificates":{"shape":"Certificates"}, + "nextMarker":{"shape":"Marker"} + } + }, + "ListCertificatesRequest":{ + "type":"structure", + "members":{ + "pageSize":{ + "shape":"PageSize", + "location":"querystring", + "locationName":"pageSize" + }, + "marker":{ + "shape":"Marker", + "location":"querystring", + "locationName":"marker" + }, + "ascendingOrder":{ + "shape":"AscendingOrder", + "location":"querystring", + "locationName":"isAscendingOrder" + } + } + }, + "ListCertificatesResponse":{ + "type":"structure", + "members":{ + "certificates":{"shape":"Certificates"}, + "nextMarker":{"shape":"Marker"} + } + }, + "ListPoliciesRequest":{ + "type":"structure", + "members":{ + "marker":{ + "shape":"Marker", + "location":"querystring", + "locationName":"marker" + }, + "pageSize":{ + "shape":"PageSize", + "location":"querystring", + "locationName":"pageSize" + }, + "ascendingOrder":{ + "shape":"AscendingOrder", + "location":"querystring", + "locationName":"isAscendingOrder" + } + } + }, + "ListPoliciesResponse":{ + "type":"structure", + "members":{ + "policies":{"shape":"Policies"}, + "nextMarker":{"shape":"Marker"} + } + }, + "ListPolicyPrincipalsRequest":{ + "type":"structure", + "required":["policyName"], + "members":{ + "policyName":{ + "shape":"PolicyName", + "location":"header", + "locationName":"x-amzn-iot-policy" + }, + "marker":{ + "shape":"Marker", + "location":"querystring", + "locationName":"marker" + }, + "pageSize":{ + "shape":"PageSize", + "location":"querystring", + "locationName":"pageSize" + }, + "ascendingOrder":{ + "shape":"AscendingOrder", + "location":"querystring", + "locationName":"isAscendingOrder" + } + } + }, + "ListPolicyPrincipalsResponse":{ + "type":"structure", + "members":{ + "principals":{"shape":"Principals"}, + "nextMarker":{"shape":"Marker"} + } + }, + "ListPolicyVersionsRequest":{ + "type":"structure", + "required":["policyName"], + "members":{ + "policyName":{ + "shape":"PolicyName", + "location":"uri", + "locationName":"policyName" + } + } + }, + "ListPolicyVersionsResponse":{ + "type":"structure", + "members":{ + "policyVersions":{"shape":"PolicyVersions"} + } + }, + "ListPrincipalPoliciesRequest":{ + "type":"structure", + "required":["principal"], + "members":{ + "principal":{ + "shape":"Principal", + "location":"header", + "locationName":"x-amzn-iot-principal" + }, + "marker":{ + "shape":"Marker", + "location":"querystring", + "locationName":"marker" + }, + "pageSize":{ + "shape":"PageSize", + "location":"querystring", + "locationName":"pageSize" + }, + "ascendingOrder":{ + "shape":"AscendingOrder", + "location":"querystring", + "locationName":"isAscendingOrder" + } + } + }, + "ListPrincipalPoliciesResponse":{ + "type":"structure", + "members":{ + "policies":{"shape":"Policies"}, + "nextMarker":{"shape":"Marker"} + } + }, + "ListPrincipalThingsRequest":{ + "type":"structure", + "required":["principal"], + "members":{ + "nextToken":{ + "shape":"NextToken", + "location":"querystring", + "locationName":"nextToken" + }, + "maxResults":{ + "shape":"MaxResults", + "location":"querystring", + "locationName":"maxResults" + }, + "principal":{ + "shape":"Principal", + "location":"header", + "locationName":"x-amzn-principal" + } + } + }, + "ListPrincipalThingsResponse":{ + "type":"structure", + "members":{ + "things":{"shape":"ThingNameList"}, + "nextToken":{"shape":"NextToken"} + } + }, + "ListThingPrincipalsRequest":{ + "type":"structure", + "required":["thingName"], + "members":{ + "thingName":{ + "shape":"ThingName", + "location":"uri", + "locationName":"thingName" + } + } + }, + "ListThingPrincipalsResponse":{ + "type":"structure", + "members":{ + "principals":{"shape":"Principals"} + } + }, + "ListThingsRequest":{ + "type":"structure", + "members":{ + "nextToken":{ + "shape":"NextToken", + "location":"querystring", + "locationName":"nextToken" + }, + "maxResults":{ + "shape":"MaxResults", + "location":"querystring", + "locationName":"maxResults" + }, + "attributeName":{ + "shape":"AttributeName", + "location":"querystring", + "locationName":"attributeName" + }, + "attributeValue":{ + "shape":"AttributeValue", + "location":"querystring", + "locationName":"attributeValue" + } + } + }, + "ListThingsResponse":{ + "type":"structure", + "members":{ + "things":{"shape":"ThingAttributeList"}, + "nextToken":{"shape":"NextToken"} + } + }, + "ListTopicRulesRequest":{ + "type":"structure", + "members":{ + "topic":{ + "shape":"Topic", + "location":"querystring", + "locationName":"topic" + }, + "maxResults":{ + "shape":"MaxResults", + "location":"querystring", + "locationName":"maxResults" + }, + "nextToken":{ + "shape":"NextToken", + "location":"querystring", + "locationName":"nextToken" + }, + "ruleDisabled":{ + "shape":"IsDisabled", + "location":"querystring", + "locationName":"ruleDisabled" + } + } + }, + "ListTopicRulesResponse":{ + "type":"structure", + "members":{ + "rules":{"shape":"TopicRuleList"}, + "nextToken":{"shape":"NextToken"} + } + }, + "LogLevel":{ + "type":"string", + "enum":[ + "DEBUG", + "INFO", + "ERROR", + "WARN", + "DISABLED" + ] + }, + "LoggingOptionsPayload":{ + "type":"structure", + "required":["roleArn"], + "members":{ + "roleArn":{"shape":"AwsArn"}, + "logLevel":{"shape":"LogLevel"} + } + }, + "MalformedPolicyException":{ + "type":"structure", + "members":{ + "message":{"shape":"errorMessage"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "Marker":{"type":"string"}, + "MaxResults":{ + "type":"integer", + "min":1, + "max":10000 + }, + "Message":{ + "type":"string", + "max":128 + }, + "MessageFormat":{ + "type":"string", + "enum":[ + "RAW", + "JSON" + ] + }, + "MetricName":{"type":"string"}, + "MetricNamespace":{"type":"string"}, + "MetricTimestamp":{"type":"string"}, + "MetricUnit":{"type":"string"}, + "MetricValue":{"type":"string"}, + "NextToken":{"type":"string"}, + "PageSize":{ + "type":"integer", + "min":1, + "max":250 + }, + "PartitionKey":{"type":"string"}, + "PayloadField":{"type":"string"}, + "Policies":{ + "type":"list", + "member":{"shape":"Policy"} + }, + "Policy":{ + "type":"structure", + "members":{ + "policyName":{"shape":"PolicyName"}, + "policyArn":{"shape":"PolicyArn"} + } + }, + "PolicyArn":{"type":"string"}, + "PolicyDocument":{"type":"string"}, + "PolicyName":{ + "type":"string", + "min":1, + "max":128, + "pattern":"[\\w+=,.@-]+" + }, + "PolicyVersion":{ + "type":"structure", + "members":{ + "versionId":{"shape":"PolicyVersionId"}, + "isDefaultVersion":{"shape":"IsDefaultVersion"}, + "createDate":{"shape":"DateType"} + } + }, + "PolicyVersionId":{ + "type":"string", + "pattern":"[0-9]+" + }, + "PolicyVersions":{ + "type":"list", + "member":{"shape":"PolicyVersion"} + }, + "Principal":{"type":"string"}, + "PrincipalArn":{"type":"string"}, + "Principals":{ + "type":"list", + "member":{"shape":"PrincipalArn"} + }, + "PrivateKey":{ + "type":"string", + "min":1, + "sensitive":true + }, + "PublicKey":{ + "type":"string", + "min":1 + }, + "QueueUrl":{"type":"string"}, + "RangeKeyField":{"type":"string"}, + "RangeKeyValue":{"type":"string"}, + "RegisterCACertificateRequest":{ + "type":"structure", + "required":[ + "caCertificate", + "verificationCertificate" + ], + "members":{ + "caCertificate":{"shape":"CertificatePem"}, + "verificationCertificate":{"shape":"CertificatePem"}, + "setAsActive":{ + "shape":"SetAsActive", + "location":"querystring", + "locationName":"setAsActive" + } + } + }, + "RegisterCACertificateResponse":{ + "type":"structure", + "members":{ + "certificateArn":{"shape":"CertificateArn"}, + "certificateId":{"shape":"CertificateId"} + } + }, + "RegisterCertificateRequest":{ + "type":"structure", + "required":["certificatePem"], + "members":{ + "certificatePem":{"shape":"CertificatePem"}, + "caCertificatePem":{"shape":"CertificatePem"}, + "setAsActive":{ + "shape":"SetAsActive", + "location":"querystring", + "locationName":"setAsActive" + } + } + }, + "RegisterCertificateResponse":{ + "type":"structure", + "members":{ + "certificateArn":{"shape":"CertificateArn"}, + "certificateId":{"shape":"CertificateId"} + } + }, + "RegistrationCode":{ + "type":"string", + "min":64, + "max":64, + "pattern":"(0x)?[a-fA-F0-9]+" + }, + "RegistrationCodeValidationException":{ + "type":"structure", + "members":{ + "message":{"shape":"errorMessage"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "RejectCertificateTransferRequest":{ + "type":"structure", + "required":["certificateId"], + "members":{ + "certificateId":{ + "shape":"CertificateId", + "location":"uri", + "locationName":"certificateId" + }, + "rejectReason":{"shape":"Message"} + } + }, + "ReplaceTopicRuleRequest":{ + "type":"structure", + "required":[ + "ruleName", + "topicRulePayload" + ], + "members":{ + "ruleName":{ + "shape":"RuleName", + "location":"uri", + "locationName":"ruleName" + }, + "topicRulePayload":{"shape":"TopicRulePayload"} + }, + "payload":"topicRulePayload" + }, + "RepublishAction":{ + "type":"structure", + "required":[ + "roleArn", + "topic" + ], + "members":{ + "roleArn":{"shape":"AwsArn"}, + "topic":{"shape":"TopicPattern"} + } + }, + "ResourceAlreadyExistsException":{ + "type":"structure", + "members":{ + "message":{"shape":"errorMessage"} + }, + "error":{"httpStatusCode":409}, + "exception":true + }, + "ResourceNotFoundException":{ + "type":"structure", + "members":{ + "message":{"shape":"errorMessage"} + }, + "error":{"httpStatusCode":404}, + "exception":true + }, + "RuleArn":{"type":"string"}, + "RuleName":{ + "type":"string", + "min":1, + "max":128, + "pattern":"^[a-zA-Z0-9_]+$" + }, + "S3Action":{ + "type":"structure", + "required":[ + "roleArn", + "bucketName", + "key" + ], + "members":{ + "roleArn":{"shape":"AwsArn"}, + "bucketName":{"shape":"BucketName"}, + "key":{"shape":"Key"} + } + }, + "SQL":{"type":"string"}, + "ServiceUnavailableException":{ + "type":"structure", + "members":{ + "message":{"shape":"errorMessage"} + }, + "error":{"httpStatusCode":503}, + "exception":true, + "fault":true + }, + "SetAsActive":{"type":"boolean"}, + "SetAsDefault":{"type":"boolean"}, + "SetDefaultPolicyVersionRequest":{ + "type":"structure", + "required":[ + "policyName", + "policyVersionId" + ], + "members":{ + "policyName":{ + "shape":"PolicyName", + "location":"uri", + "locationName":"policyName" + }, + "policyVersionId":{ + "shape":"PolicyVersionId", + "location":"uri", + "locationName":"policyVersionId" + } + } + }, + "SetLoggingOptionsRequest":{ + "type":"structure", + "required":["loggingOptionsPayload"], + "members":{ + "loggingOptionsPayload":{"shape":"LoggingOptionsPayload"} + }, + "payload":"loggingOptionsPayload" + }, + "SnsAction":{ + "type":"structure", + "required":[ + "targetArn", + "roleArn" + ], + "members":{ + "targetArn":{"shape":"AwsArn"}, + "roleArn":{"shape":"AwsArn"}, + "messageFormat":{"shape":"MessageFormat"} + } + }, + "SqlParseException":{ + "type":"structure", + "members":{ + "message":{"shape":"errorMessage"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "SqsAction":{ + "type":"structure", + "required":[ + "roleArn", + "queueUrl" + ], + "members":{ + "roleArn":{"shape":"AwsArn"}, + "queueUrl":{"shape":"QueueUrl"}, + "useBase64":{"shape":"UseBase64"} + } + }, + "StateReason":{"type":"string"}, + "StateValue":{"type":"string"}, + "StreamName":{"type":"string"}, + "TableName":{"type":"string"}, + "ThingArn":{"type":"string"}, + "ThingAttribute":{ + "type":"structure", + "members":{ + "thingName":{"shape":"ThingName"}, + "attributes":{"shape":"Attributes"} + } + }, + "ThingAttributeList":{ + "type":"list", + "member":{"shape":"ThingAttribute"} + }, + "ThingName":{ + "type":"string", + "min":1, + "max":128, + "pattern":"[a-zA-Z0-9_-]+" + }, + "ThingNameList":{ + "type":"list", + "member":{"shape":"ThingName"} + }, + "ThrottlingException":{ + "type":"structure", + "members":{ + "message":{"shape":"errorMessage"} + }, + "error":{"httpStatusCode":429}, + "exception":true + }, + "Topic":{"type":"string"}, + "TopicPattern":{"type":"string"}, + "TopicRule":{ + "type":"structure", + "members":{ + "ruleName":{"shape":"RuleName"}, + "sql":{"shape":"SQL"}, + "description":{"shape":"Description"}, + "createdAt":{"shape":"CreatedAtDate"}, + "actions":{"shape":"ActionList"}, + "ruleDisabled":{"shape":"IsDisabled"}, + "awsIotSqlVersion":{"shape":"AwsIotSqlVersion"} + } + }, + "TopicRuleList":{ + "type":"list", + "member":{"shape":"TopicRuleListItem"} + }, + "TopicRuleListItem":{ + "type":"structure", + "members":{ + "ruleArn":{"shape":"RuleArn"}, + "ruleName":{"shape":"RuleName"}, + "topicPattern":{"shape":"TopicPattern"}, + "createdAt":{"shape":"CreatedAtDate"}, + "ruleDisabled":{"shape":"IsDisabled"} + } + }, + "TopicRulePayload":{ + "type":"structure", + "required":[ + "sql", + "actions" + ], + "members":{ + "sql":{"shape":"SQL"}, + "description":{"shape":"Description"}, + "actions":{"shape":"ActionList"}, + "ruleDisabled":{"shape":"IsDisabled"}, + "awsIotSqlVersion":{"shape":"AwsIotSqlVersion"} + } + }, + "TransferAlreadyCompletedException":{ + "type":"structure", + "members":{ + "message":{"shape":"errorMessage"} + }, + "error":{"httpStatusCode":410}, + "exception":true + }, + "TransferCertificateRequest":{ + "type":"structure", + "required":[ + "certificateId", + "targetAwsAccount" + ], + "members":{ + "certificateId":{ + "shape":"CertificateId", + "location":"uri", + "locationName":"certificateId" + }, + "targetAwsAccount":{ + "shape":"AwsAccountId", + "location":"querystring", + "locationName":"targetAwsAccount" + }, + "transferMessage":{"shape":"Message"} + } + }, + "TransferCertificateResponse":{ + "type":"structure", + "members":{ + "transferredCertificateArn":{"shape":"CertificateArn"} + } + }, + "TransferConflictException":{ + "type":"structure", + "members":{ + "message":{"shape":"errorMessage"} + }, + "error":{"httpStatusCode":409}, + "exception":true + }, + "TransferData":{ + "type":"structure", + "members":{ + "transferMessage":{"shape":"Message"}, + "rejectReason":{"shape":"Message"}, + "transferDate":{"shape":"DateType"}, + "acceptDate":{"shape":"DateType"}, + "rejectDate":{"shape":"DateType"} + } + }, + "UnauthorizedException":{ + "type":"structure", + "members":{ + "message":{"shape":"errorMessage"} + }, + "error":{"httpStatusCode":401}, + "exception":true + }, + "UpdateCACertificateRequest":{ + "type":"structure", + "required":[ + "certificateId", + "newStatus" + ], + "members":{ + "certificateId":{ + "shape":"CertificateId", + "location":"uri", + "locationName":"caCertificateId" + }, + "newStatus":{ + "shape":"CACertificateStatus", + "location":"querystring", + "locationName":"newStatus" + } + } + }, + "UpdateCertificateRequest":{ + "type":"structure", + "required":[ + "certificateId", + "newStatus" + ], + "members":{ + "certificateId":{ + "shape":"CertificateId", + "location":"uri", + "locationName":"certificateId" + }, + "newStatus":{ + "shape":"CertificateStatus", + "location":"querystring", + "locationName":"newStatus" + } + } + }, + "UpdateThingRequest":{ + "type":"structure", + "required":[ + "thingName", + "attributePayload" + ], + "members":{ + "thingName":{ + "shape":"ThingName", + "location":"uri", + "locationName":"thingName" + }, + "attributePayload":{"shape":"AttributePayload"} + } + }, + "UpdateThingResponse":{ + "type":"structure", + "members":{ + } + }, + "UseBase64":{"type":"boolean"}, + "VersionsLimitExceededException":{ + "type":"structure", + "members":{ + "message":{"shape":"errorMessage"} + }, + "error":{"httpStatusCode":409}, + "exception":true + }, + "errorMessage":{"type":"string"} + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/iot/2015-05-28/docs-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/iot/2015-05-28/docs-2.json new file mode 100644 index 000000000..0266957e9 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/iot/2015-05-28/docs-2.json @@ -0,0 +1,1422 @@ +{ + "version": "2.0", + "operations": { + "AcceptCertificateTransfer": "

    Accepts a pending certificate transfer. The default state of the certificate is INACTIVE.

    To check for pending certificate transfers, call ListCertificates to enumerate your certificates.

    ", + "AttachPrincipalPolicy": "

    Attaches the specified policy to the specified principal (certificate or other credential).

    ", + "AttachThingPrincipal": "

    Attaches the specified principal to the specified thing.

    ", + "CancelCertificateTransfer": "

    Cancels a pending transfer for the specified certificate.

    Note Only the transfer source account can use this operation to cancel a transfer. (Transfer destinations can use RejectCertificateTransfer instead.) After transfer, AWS IoT returns the certificate to the source account in the INACTIVE state. After the destination account has accepted the transfer, the transfer cannot be cancelled.

    After a certificate transfer is cancelled, the status of the certificate changes from PENDING_TRANSFER to INACTIVE.

    ", + "CreateCertificateFromCsr": "

    Creates an X.509 certificate using the specified certificate signing request.

    Note Reusing the same certificate signing request (CSR) results in a distinct certificate.

    You can create multiple certificates in a batch by creating a directory, copying multiple .csr files into that directory, and then specifying that directory on the command line. The following commands show how to create a batch of certificates given a batch of CSRs.

    Assuming a set of CSRs are located inside of the directory my-csr-directory:

    On Linux and OS X, the command is:

    $ ls my-csr-directory/ | xargs -I {} aws iot create-certificate-from-csr --certificate-signing-request file://my-csr-directory/{}

    This command lists all of the CSRs in my-csr-directory and pipes each CSR file name to the aws iot create-certificate-from-csr AWS CLI command to create a certificate for the corresponding CSR.

    The aws iot create-certificate-from-csr part of the command can also be run in parallel to speed up the certificate creation process:

    $ ls my-csr-directory/ | xargs -P 10 -I {} aws iot create-certificate-from-csr --certificate-signing-request file://my-csr-directory/{}

    On Windows PowerShell, the command to create certificates for all CSRs in my-csr-directory is:

    > ls -Name my-csr-directory | %{aws iot create-certificate-from-csr --certificate-signing-request file://my-csr-directory/$_}

    On a Windows command prompt, the command to create certificates for all CSRs in my-csr-directory is:

    > forfiles /p my-csr-directory /c \"cmd /c aws iot create-certificate-from-csr --certificate-signing-request file://@path\"

    ", + "CreateKeysAndCertificate": "

    Creates a 2048-bit RSA key pair and issues an X.509 certificate using the issued public key.

    Note This is the only time AWS IoT issues the private key for this certificate, so it is important to keep it in a secure location.

    ", + "CreatePolicy": "

    Creates an AWS IoT policy.

    The created policy is the default version for the policy. This operation creates a policy version with a version identifier of 1 and sets 1 as the policy's default version.

    ", + "CreatePolicyVersion": "

    Creates a new version of the specified AWS IoT policy. To update a policy, create a new policy version. A managed policy can have up to five versions. If the policy has five versions, you must use DeletePolicyVersion to delete an existing version before you create a new one.

    Optionally, you can set the new version as the policy's default version. The default version is the operative version (that is, the version that is in effect for the certificates to which the policy is attached).

    ", + "CreateThing": "

    Creates a thing in the Thing Registry.

    ", + "CreateTopicRule": "

    Creates a rule. Creating rules is an administrator-level action. Any user who has permission to create rules will be able to access data processed by the rule.

    ", + "DeleteCACertificate": "

    Deletes a registered CA certificate.

    ", + "DeleteCertificate": "

    Deletes the specified certificate.

    A certificate cannot be deleted if it has a policy attached to it or if its status is set to ACTIVE. To delete a certificate, first use the DetachPrincipalPolicy API to detach all policies. Next, use the UpdateCertificate API to set the certificate to the INACTIVE status.

    ", + "DeletePolicy": "

    Deletes the specified policy.

    A policy cannot be deleted if it has non-default versions or it is attached to any certificate.

    To delete a policy, use the DeletePolicyVersion API to delete all non-default versions of the policy; use the DetachPrincipalPolicy API to detach the policy from any certificate; and then use the DeletePolicy API to delete the policy.

    When a policy is deleted using DeletePolicy, its default version is deleted with it.

    ", + "DeletePolicyVersion": "

    Deletes the specified version of the specified policy. You cannot delete the default version of a policy using this API. To delete the default version of a policy, use DeletePolicy. To find out which version of a policy is marked as the default version, use ListPolicyVersions.

    ", + "DeleteRegistrationCode": "

    Deletes a CA certificate registration code.

    ", + "DeleteThing": "

    Deletes the specified thing from the Thing Registry.

    ", + "DeleteTopicRule": "

    Deletes the specified rule.

    ", + "DescribeCACertificate": "

    Describes a registered CA certificate.

    ", + "DescribeCertificate": "

    Gets information about the specified certificate.

    ", + "DescribeEndpoint": "

    Returns a unique endpoint specific to the AWS account making the call.

    ", + "DescribeThing": "

    Gets information about the specified thing.

    ", + "DetachPrincipalPolicy": "

    Removes the specified policy from the specified certificate.

    ", + "DetachThingPrincipal": "

    Detaches the specified principal from the specified thing.

    ", + "DisableTopicRule": "

    Disables the specified rule.

    ", + "EnableTopicRule": "

    Enables the specified rule.

    ", + "GetLoggingOptions": "

    Gets the logging options.

    ", + "GetPolicy": "

    Gets information about the specified policy with the policy document of the default version.

    ", + "GetPolicyVersion": "

    Gets information about the specified policy version.

    ", + "GetRegistrationCode": "

    Gets a registration code used to register a CA certificate with AWS IoT.

    ", + "GetTopicRule": "

    Gets information about the specified rule.

    ", + "ListCACertificates": "

    Lists the CA certificates registered for your AWS account.

    The results are paginated with a default page size of 25. You can use the returned marker to retrieve additional results.

    ", + "ListCertificates": "

    Lists the certificates registered in your AWS account.

    The results are paginated with a default page size of 25. You can use the returned marker to retrieve additional results.

    ", + "ListCertificatesByCA": "

    List the device certificates signed by the specified CA certificate.

    ", + "ListPolicies": "

    Lists your policies.

    ", + "ListPolicyPrincipals": "

    Lists the principals associated with the specified policy.

    ", + "ListPolicyVersions": "

    Lists the versions of the specified policy and identifies the default version.

    ", + "ListPrincipalPolicies": "

    Lists the policies attached to the specified principal. If you use an Cognito identity, the ID must be in AmazonCognito Identity format.

    ", + "ListPrincipalThings": "

    Lists the things associated with the specified principal.

    ", + "ListThingPrincipals": "

    Lists the principals associated with the specified thing.

    ", + "ListThings": "

    Lists your things. You can pass an AttributeName or AttributeValue to filter your things (for example, \"ListThings where AttributeName=Color and AttributeValue=Red\").

    ", + "ListTopicRules": "

    Lists the rules for the specific topic.

    ", + "RegisterCACertificate": "

    Registers a CA certificate with AWS IoT. This CA certificate can then be used to sign device certificates, which can be then registered with AWS IoT. You can register up to 10 CA certificates per AWS account that have the same subject field and public key. This enables you to have up to 10 certificate authorities sign your device certificates. If you have more than one CA certificate registered, make sure you pass the CA certificate when you register your device certificates with the RegisterCertificate API.

    ", + "RegisterCertificate": "

    Registers a device certificate with AWS IoT. If you have more than one CA certificate that has the same subject field, you must specify the CA certificate that was used to sign the device certificate being registered.

    ", + "RejectCertificateTransfer": "

    Rejects a pending certificate transfer. After AWS IoT rejects a certificate transfer, the certificate status changes from PENDING_TRANSFER to INACTIVE.

    To check for pending certificate transfers, call ListCertificates to enumerate your certificates.

    This operation can only be called by the transfer destination. After it is called, the certificate will be returned to the source's account in the INACTIVE state.

    ", + "ReplaceTopicRule": "

    Replaces the specified rule. You must specify all parameters for the new rule. Creating rules is an administrator-level action. Any user who has permission to create rules will be able to access data processed by the rule.

    ", + "SetDefaultPolicyVersion": "

    Sets the specified version of the specified policy as the policy's default (operative) version. This action affects all certificates to which the policy is attached. To list the principals the policy is attached to, use the ListPrincipalPolicy API.

    ", + "SetLoggingOptions": "

    Sets the logging options.

    ", + "TransferCertificate": "

    Transfers the specified certificate to the specified AWS account.

    You can cancel the transfer until it is acknowledged by the recipient.

    No notification is sent to the transfer destination's account. It is up to the caller to notify the transfer target.

    The certificate being transferred must not be in the ACTIVE state. You can use the UpdateCertificate API to deactivate it.

    The certificate must not have any policies attached to it. You can use the DetachPrincipalPolicy API to detach them.

    ", + "UpdateCACertificate": "

    Updates a registered CA certificate.

    ", + "UpdateCertificate": "

    Updates the status of the specified certificate. This operation is idempotent.

    Moving a certificate from the ACTIVE state (including REVOKED) will not disconnect currently connected devices, but these devices will be unable to reconnect.

    The ACTIVE state is required to authenticate devices connecting to AWS IoT using a certificate.

    ", + "UpdateThing": "

    Updates the data for a thing.

    " + }, + "service": "AWS IoT

    AWS IoT provides secure, bi-directional communication between Internet-connected things (such as sensors, actuators, embedded devices, or smart appliances) and the AWS cloud. You can discover your custom IoT-Data endpoint to communicate with, configure rules for data processing and integration with other services, organize resources associated with each thing (Thing Registry), configure logging, and create and manage policies and credentials to authenticate things.

    For more information about how AWS IoT works, see the Developer Guide.

    ", + "shapes": { + "AcceptCertificateTransferRequest": { + "base": "

    The input for the AcceptCertificateTransfer operation.

    ", + "refs": { + } + }, + "Action": { + "base": "

    Describes the actions associated with a rule.

    ", + "refs": { + "ActionList$member": null + } + }, + "ActionList": { + "base": null, + "refs": { + "TopicRule$actions": "

    The actions associated with the rule.

    ", + "TopicRulePayload$actions": "

    The actions associated with the rule.

    " + } + }, + "AlarmName": { + "base": null, + "refs": { + "CloudwatchAlarmAction$alarmName": "

    The CloudWatch alarm name.

    " + } + }, + "AscendingOrder": { + "base": null, + "refs": { + "ListCACertificatesRequest$ascendingOrder": "

    Determines the order of the results.

    ", + "ListCertificatesByCARequest$ascendingOrder": "

    Specifies the order for results. If True, the results are returned in ascending order, based on the creation date.

    ", + "ListCertificatesRequest$ascendingOrder": "

    Specifies the order for results. If True, the results are returned in ascending order, based on the creation date.

    ", + "ListPoliciesRequest$ascendingOrder": "

    Specifies the order for results. If true, the results are returned in ascending creation order.

    ", + "ListPolicyPrincipalsRequest$ascendingOrder": "

    Specifies the order for results. If true, the results are returned in ascending creation order.

    ", + "ListPrincipalPoliciesRequest$ascendingOrder": "

    Specifies the order for results. If true, results are returned in ascending creation order.

    " + } + }, + "AttachPrincipalPolicyRequest": { + "base": "

    The input for the AttachPrincipalPolicy operation.

    ", + "refs": { + } + }, + "AttachThingPrincipalRequest": { + "base": "

    The input for the AttachThingPrincipal operation.

    ", + "refs": { + } + }, + "AttachThingPrincipalResponse": { + "base": "

    The output from the AttachThingPrincipal operation.

    ", + "refs": { + } + }, + "AttributeName": { + "base": null, + "refs": { + "Attributes$key": null, + "ListThingsRequest$attributeName": "

    The attribute name.

    " + } + }, + "AttributePayload": { + "base": "

    The attribute payload, a JSON string containing up to three key-value pairs (for example, {\\\"attributes\\\":{\\\"string1\\\":\\\"string2\\\"}}).

    ", + "refs": { + "CreateThingRequest$attributePayload": "

    The attribute payload, which consists of up to 3 name/value pairs in a JSON document (for example, {\\\"attributes\\\":{\\\"string1\\\":\\\"string2\\\"}}).

    ", + "UpdateThingRequest$attributePayload": "

    The attribute payload, a JSON string containing up to three key-value pairs (for example, {\\\"attributes\\\":{\\\"string1\\\":\\\"string2\\\"}}).

    " + } + }, + "AttributeValue": { + "base": null, + "refs": { + "Attributes$value": null, + "ListThingsRequest$attributeValue": "

    The attribute value.

    " + } + }, + "Attributes": { + "base": null, + "refs": { + "AttributePayload$attributes": "

    A JSON string containing up to three key-value pair in JSON format (for example, {\\\"attributes\\\":{\\\"string1\\\":\\\"string2\\\"}}).

    ", + "DescribeThingResponse$attributes": "

    The attributes, which are name/value pairs in JSON format (for example: {\\\"attributes\\\":{\\\"some-name1\\\":\\\"some-value1\\\"}, {\\\"some-name2\\\":\\\"some-value2\\\"}, {\\\"some-name3\\\":\\\"some-value3\\\"}})

    ", + "ThingAttribute$attributes": "

    The attributes.

    " + } + }, + "AwsAccountId": { + "base": null, + "refs": { + "CACertificateDescription$ownedBy": "

    The owner of the CA certificate.

    ", + "CertificateDescription$ownedBy": "

    The ID of the AWS account that owns the certificate.

    ", + "CertificateDescription$previousOwnedBy": "

    The ID of the AWS account of the previous owner of the certificate.

    ", + "TransferCertificateRequest$targetAwsAccount": "

    The AWS account.

    " + } + }, + "AwsArn": { + "base": null, + "refs": { + "CloudwatchAlarmAction$roleArn": "

    The IAM role that allows access to the CloudWatch alarm.

    ", + "CloudwatchMetricAction$roleArn": "

    The IAM role that allows access to the CloudWatch metric.

    ", + "DynamoDBAction$roleArn": "

    The ARN of the IAM role that grants access to the DynamoDB table.

    ", + "ElasticsearchAction$roleArn": "

    The IAM role ARN that has access to Elasticsearch.

    ", + "FirehoseAction$roleArn": "

    The IAM role that grants access to the Amazon Kinesis Firehost stream.

    ", + "GetLoggingOptionsResponse$roleArn": "

    The ARN of the IAM role that grants access.

    ", + "KinesisAction$roleArn": "

    The ARN of the IAM role that grants access to the Amazon Kinesis stream.

    ", + "LoggingOptionsPayload$roleArn": "

    The ARN of the IAM role that grants access.

    ", + "RepublishAction$roleArn": "

    The ARN of the IAM role that grants access.

    ", + "S3Action$roleArn": "

    The ARN of the IAM role that grants access.

    ", + "SnsAction$targetArn": "

    The ARN of the SNS topic.

    ", + "SnsAction$roleArn": "

    The ARN of the IAM role that grants access.

    ", + "SqsAction$roleArn": "

    The ARN of the IAM role that grants access.

    " + } + }, + "AwsIotSqlVersion": { + "base": null, + "refs": { + "TopicRule$awsIotSqlVersion": "

    The version of the SQL rules engine to use when evaluating the rule.

    ", + "TopicRulePayload$awsIotSqlVersion": "

    The version of the SQL rules engine to use when evaluating the rule.

    " + } + }, + "BucketName": { + "base": null, + "refs": { + "S3Action$bucketName": "

    The Amazon S3 bucket.

    " + } + }, + "CACertificate": { + "base": "

    A CA certificate.

    ", + "refs": { + "CACertificates$member": null + } + }, + "CACertificateDescription": { + "base": "

    Describes a CA certificate.

    ", + "refs": { + "DescribeCACertificateResponse$certificateDescription": "

    The CA certificate description.

    " + } + }, + "CACertificateStatus": { + "base": null, + "refs": { + "CACertificate$status": "

    The status of the CA certificate.

    The status value REGISTER_INACTIVE is deprecated and should not be used.

    ", + "CACertificateDescription$status": "

    The status of a CA certificate.

    ", + "UpdateCACertificateRequest$newStatus": "

    The updated status of the CA certificate.

    Note: The status value REGISTER_INACTIVE is deprecated and should not be used.

    " + } + }, + "CACertificates": { + "base": null, + "refs": { + "ListCACertificatesResponse$certificates": "

    The CA certificates registered in your AWS account.

    " + } + }, + "CancelCertificateTransferRequest": { + "base": "

    The input for the CancelCertificateTransfer operation.

    ", + "refs": { + } + }, + "Certificate": { + "base": "

    Information about a certificate.

    ", + "refs": { + "Certificates$member": null + } + }, + "CertificateArn": { + "base": null, + "refs": { + "CACertificate$certificateArn": "

    The ARN of the CA certificate.

    ", + "CACertificateDescription$certificateArn": "

    The CA certificate ARN.

    ", + "Certificate$certificateArn": "

    The ARN of the certificate.

    ", + "CertificateDescription$certificateArn": "

    The ARN of the certificate.

    ", + "CreateCertificateFromCsrResponse$certificateArn": "

    The Amazon Resource Name (ARN) of the certificate. You can use the ARN as a principal for policy operations.

    ", + "CreateKeysAndCertificateResponse$certificateArn": "

    The ARN of the certificate.

    ", + "RegisterCACertificateResponse$certificateArn": "

    The CA certificate ARN.

    ", + "RegisterCertificateResponse$certificateArn": "

    The certificate ARN.

    ", + "TransferCertificateResponse$transferredCertificateArn": "

    The ARN of the certificate.

    " + } + }, + "CertificateConflictException": { + "base": "

    Unable to verify the CA certificate used to sign the device certificate you are attempting to register. This is happens when you have registered more than one CA certificate that has the same subject field and public key.

    ", + "refs": { + } + }, + "CertificateDescription": { + "base": "

    Describes a certificate.

    ", + "refs": { + "DescribeCertificateResponse$certificateDescription": "

    The description of the certificate.

    " + } + }, + "CertificateId": { + "base": null, + "refs": { + "AcceptCertificateTransferRequest$certificateId": "

    The ID of the certificate.

    ", + "CACertificate$certificateId": "

    The ID of the CA certificate.

    ", + "CACertificateDescription$certificateId": "

    The CA certificate ID.

    ", + "CancelCertificateTransferRequest$certificateId": "

    The ID of the certificate.

    ", + "Certificate$certificateId": "

    The ID of the certificate.

    ", + "CertificateDescription$certificateId": "

    The ID of the certificate.

    ", + "CertificateDescription$caCertificateId": "

    The certificate ID of the CA certificate used to sign this certificate.

    ", + "CreateCertificateFromCsrResponse$certificateId": "

    The ID of the certificate. Certificate management operations only take a certificateId.

    ", + "CreateKeysAndCertificateResponse$certificateId": "

    The ID of the certificate. AWS IoT issues a default subject name for the certificate (for example, AWS IoT Certificate).

    ", + "DeleteCACertificateRequest$certificateId": "

    The ID of the certificate to delete.

    ", + "DeleteCertificateRequest$certificateId": "

    The ID of the certificate.

    ", + "DescribeCACertificateRequest$certificateId": "

    The CA certificate identifier.

    ", + "DescribeCertificateRequest$certificateId": "

    The ID of the certificate.

    ", + "ListCertificatesByCARequest$caCertificateId": "

    The ID of the CA certificate. This operation will list all registered device certificate that were signed by this CA certificate.

    ", + "RegisterCACertificateResponse$certificateId": "

    The CA certificate identifier.

    ", + "RegisterCertificateResponse$certificateId": "

    The certificate identifier.

    ", + "RejectCertificateTransferRequest$certificateId": "

    The ID of the certificate.

    ", + "TransferCertificateRequest$certificateId": "

    The ID of the certificate.

    ", + "UpdateCACertificateRequest$certificateId": "

    The CA certificate identifier.

    ", + "UpdateCertificateRequest$certificateId": "

    The ID of the certificate.

    " + } + }, + "CertificatePem": { + "base": null, + "refs": { + "CACertificateDescription$certificatePem": "

    The CA certificate data, in PEM format.

    ", + "CertificateDescription$certificatePem": "

    The certificate data, in PEM format.

    ", + "CreateCertificateFromCsrResponse$certificatePem": "

    The certificate data, in PEM format.

    ", + "CreateKeysAndCertificateResponse$certificatePem": "

    The certificate data, in PEM format.

    ", + "RegisterCACertificateRequest$caCertificate": "

    The CA certificate.

    ", + "RegisterCACertificateRequest$verificationCertificate": "

    The private key verification certificate.

    ", + "RegisterCertificateRequest$certificatePem": "

    The certificate data, in PEM format.

    ", + "RegisterCertificateRequest$caCertificatePem": "

    The CA certificate used to sign the device certificate being registered.

    " + } + }, + "CertificateSigningRequest": { + "base": null, + "refs": { + "CreateCertificateFromCsrRequest$certificateSigningRequest": "

    The certificate signing request (CSR).

    " + } + }, + "CertificateStateException": { + "base": "

    The certificate operation is not allowed.

    ", + "refs": { + } + }, + "CertificateStatus": { + "base": null, + "refs": { + "Certificate$status": "

    The status of the certificate.

    The status value REGISTER_INACTIVE is deprecated and should not be used.

    ", + "CertificateDescription$status": "

    The status of the certificate.

    ", + "UpdateCertificateRequest$newStatus": "

    The new status.

    Note: Setting the status to PENDING_TRANSFER will result in an exception being thrown. PENDING_TRANSFER is a status used internally by AWS IoT. It is not intended for developer use.

    Note: The status value REGISTER_INACTIVE is deprecated and should not be used.

    " + } + }, + "CertificateValidationException": { + "base": "

    The certificate is invalid.

    ", + "refs": { + } + }, + "Certificates": { + "base": null, + "refs": { + "ListCertificatesByCAResponse$certificates": "

    The device certificates signed by the specified CA certificate.

    ", + "ListCertificatesResponse$certificates": "

    The descriptions of the certificates.

    " + } + }, + "ClientId": { + "base": null, + "refs": { + "DescribeThingResponse$defaultClientId": "

    The default client ID.

    " + } + }, + "CloudwatchAlarmAction": { + "base": "

    Describes an action that updates a CloudWatch alarm.

    ", + "refs": { + "Action$cloudwatchAlarm": "

    Change the state of a CloudWatch alarm.

    " + } + }, + "CloudwatchMetricAction": { + "base": "

    Describes an action that captures a CloudWatch metric.

    ", + "refs": { + "Action$cloudwatchMetric": "

    Capture a CloudWatch metric.

    " + } + }, + "CreateCertificateFromCsrRequest": { + "base": "

    The input for the CreateCertificateFromCsr operation.

    ", + "refs": { + } + }, + "CreateCertificateFromCsrResponse": { + "base": "

    The output from the CreateCertificateFromCsr operation.

    ", + "refs": { + } + }, + "CreateKeysAndCertificateRequest": { + "base": "

    The input for the CreateKeysAndCertificate operation.

    ", + "refs": { + } + }, + "CreateKeysAndCertificateResponse": { + "base": "

    The output of the CreateKeysAndCertificate operation.

    ", + "refs": { + } + }, + "CreatePolicyRequest": { + "base": "

    The input for the CreatePolicy operation.

    ", + "refs": { + } + }, + "CreatePolicyResponse": { + "base": "

    The output from the CreatePolicy operation.

    ", + "refs": { + } + }, + "CreatePolicyVersionRequest": { + "base": "

    The input for the CreatePolicyVersion operation.

    ", + "refs": { + } + }, + "CreatePolicyVersionResponse": { + "base": "

    The output of the CreatePolicyVersion operation.

    ", + "refs": { + } + }, + "CreateThingRequest": { + "base": "

    The input for the CreateThing operation.

    ", + "refs": { + } + }, + "CreateThingResponse": { + "base": "

    The output of the CreateThing operation.

    ", + "refs": { + } + }, + "CreateTopicRuleRequest": { + "base": "

    The input for the CreateTopicRule operation.

    ", + "refs": { + } + }, + "CreatedAtDate": { + "base": null, + "refs": { + "TopicRule$createdAt": "

    The date and time the rule was created.

    ", + "TopicRuleListItem$createdAt": "

    The date and time the rule was created.

    " + } + }, + "DateType": { + "base": null, + "refs": { + "CACertificate$creationDate": "

    The date the CA certificate was created.

    ", + "CACertificateDescription$creationDate": "

    The date the CA certificate was created.

    ", + "Certificate$creationDate": "

    The date and time the certificate was created.

    ", + "CertificateDescription$creationDate": "

    The date and time the certificate was created.

    ", + "CertificateDescription$lastModifiedDate": "

    The date and time the certificate was last modified.

    ", + "PolicyVersion$createDate": "

    The date and time the policy was created.

    ", + "TransferData$transferDate": "

    The date the transfer took place.

    ", + "TransferData$acceptDate": "

    The date the transfer was accepted.

    ", + "TransferData$rejectDate": "

    The date the transfer was rejected.

    " + } + }, + "DeleteCACertificateRequest": { + "base": "

    Input for the DeleteCACertificate operation.

    ", + "refs": { + } + }, + "DeleteCACertificateResponse": { + "base": "

    The output for the DeleteCACertificate operation.

    ", + "refs": { + } + }, + "DeleteCertificateRequest": { + "base": "

    The input for the DeleteCertificate operation.

    ", + "refs": { + } + }, + "DeleteConflictException": { + "base": "

    You can't delete the resource because it is attached to one or more resources.

    ", + "refs": { + } + }, + "DeletePolicyRequest": { + "base": "

    The input for the DeletePolicy operation.

    ", + "refs": { + } + }, + "DeletePolicyVersionRequest": { + "base": "

    The input for the DeletePolicyVersion operation.

    ", + "refs": { + } + }, + "DeleteRegistrationCodeRequest": { + "base": "

    The input for the DeleteRegistrationCode operation.

    ", + "refs": { + } + }, + "DeleteRegistrationCodeResponse": { + "base": "

    The output for the DeleteRegistrationCode operation.

    ", + "refs": { + } + }, + "DeleteThingRequest": { + "base": "

    The input for the DeleteThing operation.

    ", + "refs": { + } + }, + "DeleteThingResponse": { + "base": "

    The output of the DeleteThing operation.

    ", + "refs": { + } + }, + "DeleteTopicRuleRequest": { + "base": "

    The input for the DeleteTopicRule operation.

    ", + "refs": { + } + }, + "DeliveryStreamName": { + "base": null, + "refs": { + "FirehoseAction$deliveryStreamName": "

    The delivery stream name.

    " + } + }, + "DescribeCACertificateRequest": { + "base": "

    The input for the DescribeCACertificate operation.

    ", + "refs": { + } + }, + "DescribeCACertificateResponse": { + "base": "

    The output from the DescribeCACertificate operation.

    ", + "refs": { + } + }, + "DescribeCertificateRequest": { + "base": "

    The input for the DescribeCertificate operation.

    ", + "refs": { + } + }, + "DescribeCertificateResponse": { + "base": "

    The output of the DescribeCertificate operation.

    ", + "refs": { + } + }, + "DescribeEndpointRequest": { + "base": "

    The input for the DescribeEndpoint operation.

    ", + "refs": { + } + }, + "DescribeEndpointResponse": { + "base": "

    The output from the DescribeEndpoint operation.

    ", + "refs": { + } + }, + "DescribeThingRequest": { + "base": "

    The input for the DescribeThing operation.

    ", + "refs": { + } + }, + "DescribeThingResponse": { + "base": "

    The output from the DescribeThing operation.

    ", + "refs": { + } + }, + "Description": { + "base": null, + "refs": { + "TopicRule$description": "

    The description of the rule.

    ", + "TopicRulePayload$description": "

    The description of the rule.

    " + } + }, + "DetachPrincipalPolicyRequest": { + "base": "

    The input for the DetachPrincipalPolicy operation.

    ", + "refs": { + } + }, + "DetachThingPrincipalRequest": { + "base": "

    The input for the DetachThingPrincipal operation.

    ", + "refs": { + } + }, + "DetachThingPrincipalResponse": { + "base": "

    The output from the DetachThingPrincipal operation.

    ", + "refs": { + } + }, + "DisableTopicRuleRequest": { + "base": "

    The input for the DisableTopicRuleRequest operation.

    ", + "refs": { + } + }, + "DynamoDBAction": { + "base": "

    Describes an action to write to a DynamoDB table.

    The tableName, hashKeyField, and rangeKeyField values must match the values used when you created the table.

    The hashKeyValue and rangeKeyvalue fields use a substitution template syntax. These templates provide data at runtime. The syntax is as follows: ${sql-expression}.

    You can specify any valid expression in a WHERE or SELECT clause, including JSON properties, comparisons, calculations, and functions. For example, the following field uses the third level of the topic:

    \"hashKeyValue\": \"${topic(3)}\"

    The following field uses the timestamp:

    \"rangeKeyValue\": \"${timestamp()}\"

    ", + "refs": { + "Action$dynamoDB": "

    Write to a DynamoDB table.

    " + } + }, + "DynamoKeyType": { + "base": null, + "refs": { + "DynamoDBAction$hashKeyType": "

    The hash key type. Valid values are \"STRING\" or \"NUMBER\"

    ", + "DynamoDBAction$rangeKeyType": "

    The range key type. Valid values are \"STRING\" or \"NUMBER\"

    " + } + }, + "DynamoOperation": { + "base": null, + "refs": { + "DynamoDBAction$operation": "

    The type of operation to be performed. This follows the substitution template, so it can be ${operation}, but the substitution must result in one of the following: INSERT, UPDATE, or DELETE.

    " + } + }, + "ElasticsearchAction": { + "base": "

    Describes an action that writes data to an Amazon Elasticsearch Service; domain.

    ", + "refs": { + "Action$elasticsearch": "

    Write data to an Amazon Elasticsearch Service; domain.

    " + } + }, + "ElasticsearchEndpoint": { + "base": null, + "refs": { + "ElasticsearchAction$endpoint": "

    The endpoint of your Elasticsearch domain.

    " + } + }, + "ElasticsearchId": { + "base": null, + "refs": { + "ElasticsearchAction$id": "

    The unique identifier for the document you are storing.

    " + } + }, + "ElasticsearchIndex": { + "base": null, + "refs": { + "ElasticsearchAction$index": "

    The Elasticsearch index where you want to store your data.

    " + } + }, + "ElasticsearchType": { + "base": null, + "refs": { + "ElasticsearchAction$type": "

    The type of document you are storing.

    " + } + }, + "EnableTopicRuleRequest": { + "base": "

    The input for the EnableTopicRuleRequest operation.

    ", + "refs": { + } + }, + "EndpointAddress": { + "base": null, + "refs": { + "DescribeEndpointResponse$endpointAddress": "

    The endpoint. The format of the endpoint is as follows: identifier.iot.region.amazonaws.com.

    " + } + }, + "FirehoseAction": { + "base": "

    Describes an action that writes data to an Amazon Kinesis Firehose stream.

    ", + "refs": { + "Action$firehose": "

    Write to an Amazon Kinesis Firehose stream.

    " + } + }, + "FunctionArn": { + "base": null, + "refs": { + "LambdaAction$functionArn": "

    The ARN of the Lambda function.

    " + } + }, + "GetLoggingOptionsRequest": { + "base": "

    The input for the GetLoggingOptions operation.

    ", + "refs": { + } + }, + "GetLoggingOptionsResponse": { + "base": "

    The output from the GetLoggingOptions operation.

    ", + "refs": { + } + }, + "GetPolicyRequest": { + "base": "

    The input for the GetPolicy operation.

    ", + "refs": { + } + }, + "GetPolicyResponse": { + "base": "

    The output from the GetPolicy operation.

    ", + "refs": { + } + }, + "GetPolicyVersionRequest": { + "base": "

    The input for the GetPolicyVersion operation.

    ", + "refs": { + } + }, + "GetPolicyVersionResponse": { + "base": "

    The output from the GetPolicyVersion operation.

    ", + "refs": { + } + }, + "GetRegistrationCodeRequest": { + "base": "

    The input to the GetRegistrationCode operation.

    ", + "refs": { + } + }, + "GetRegistrationCodeResponse": { + "base": "

    The output from the GetRegistrationCode operation.

    ", + "refs": { + } + }, + "GetTopicRuleRequest": { + "base": "

    The input for the GetTopicRule operation.

    ", + "refs": { + } + }, + "GetTopicRuleResponse": { + "base": "

    The output from the GetTopicRule operation.

    ", + "refs": { + } + }, + "HashKeyField": { + "base": null, + "refs": { + "DynamoDBAction$hashKeyField": "

    The hash key name.

    " + } + }, + "HashKeyValue": { + "base": null, + "refs": { + "DynamoDBAction$hashKeyValue": "

    The hash key value.

    " + } + }, + "InternalException": { + "base": "

    An unexpected error has occurred.

    ", + "refs": { + } + }, + "InternalFailureException": { + "base": "

    An unexpected error has occurred.

    ", + "refs": { + } + }, + "InvalidRequestException": { + "base": "

    The request is not valid.

    ", + "refs": { + } + }, + "IsDefaultVersion": { + "base": null, + "refs": { + "CreatePolicyVersionResponse$isDefaultVersion": "

    Specifies whether the policy version is the default.

    ", + "GetPolicyVersionResponse$isDefaultVersion": "

    Specifies whether the policy version is the default.

    ", + "PolicyVersion$isDefaultVersion": "

    Specifies whether the policy version is the default.

    " + } + }, + "IsDisabled": { + "base": null, + "refs": { + "ListTopicRulesRequest$ruleDisabled": "

    Specifies whether the rule is disabled.

    ", + "TopicRule$ruleDisabled": "

    Specifies whether the rule is disabled.

    ", + "TopicRuleListItem$ruleDisabled": "

    Specifies whether the rule is disabled.

    ", + "TopicRulePayload$ruleDisabled": "

    Specifies whether the rule is disabled.

    " + } + }, + "Key": { + "base": null, + "refs": { + "S3Action$key": "

    The object key.

    " + } + }, + "KeyPair": { + "base": "

    Describes a key pair.

    ", + "refs": { + "CreateKeysAndCertificateResponse$keyPair": "

    The generated key pair.

    " + } + }, + "KinesisAction": { + "base": "

    Describes an action to write data to an Amazon Kinesis stream.

    ", + "refs": { + "Action$kinesis": "

    Write data to an Amazon Kinesis stream.

    " + } + }, + "LambdaAction": { + "base": "

    Describes an action to invoke a Lambda function.

    ", + "refs": { + "Action$lambda": "

    Invoke a Lambda function.

    " + } + }, + "LimitExceededException": { + "base": "

    The number of attached entities exceeds the limit.

    ", + "refs": { + } + }, + "ListCACertificatesRequest": { + "base": "

    Input for the ListCACertificates operation.

    ", + "refs": { + } + }, + "ListCACertificatesResponse": { + "base": "

    The output from the ListCACertificates operation.

    ", + "refs": { + } + }, + "ListCertificatesByCARequest": { + "base": "

    The input to the ListCertificatesByCA operation.

    ", + "refs": { + } + }, + "ListCertificatesByCAResponse": { + "base": "

    The output of the ListCertificatesByCA operation.

    ", + "refs": { + } + }, + "ListCertificatesRequest": { + "base": "

    The input for the ListCertificates operation.

    ", + "refs": { + } + }, + "ListCertificatesResponse": { + "base": "

    The output of the ListCertificates operation.

    ", + "refs": { + } + }, + "ListPoliciesRequest": { + "base": "

    The input for the ListPolicies operation.

    ", + "refs": { + } + }, + "ListPoliciesResponse": { + "base": "

    The output from the ListPolicies operation.

    ", + "refs": { + } + }, + "ListPolicyPrincipalsRequest": { + "base": "

    The input for the ListPolicyPrincipals operation.

    ", + "refs": { + } + }, + "ListPolicyPrincipalsResponse": { + "base": "

    The output from the ListPolicyPrincipals operation.

    ", + "refs": { + } + }, + "ListPolicyVersionsRequest": { + "base": "

    The input for the ListPolicyVersions operation.

    ", + "refs": { + } + }, + "ListPolicyVersionsResponse": { + "base": "

    The output from the ListPolicyVersions operation.

    ", + "refs": { + } + }, + "ListPrincipalPoliciesRequest": { + "base": "

    The input for the ListPrincipalPolicies operation.

    ", + "refs": { + } + }, + "ListPrincipalPoliciesResponse": { + "base": "

    The output from the ListPrincipalPolicies operation.

    ", + "refs": { + } + }, + "ListPrincipalThingsRequest": { + "base": "

    The input for the ListPrincipalThings operation.

    ", + "refs": { + } + }, + "ListPrincipalThingsResponse": { + "base": "

    The output from the ListPrincipalThings operation.

    ", + "refs": { + } + }, + "ListThingPrincipalsRequest": { + "base": "

    The input for the ListThingPrincipal operation.

    ", + "refs": { + } + }, + "ListThingPrincipalsResponse": { + "base": "

    The output from the ListThingPrincipals operation.

    ", + "refs": { + } + }, + "ListThingsRequest": { + "base": "

    The input for the ListThings operation.

    ", + "refs": { + } + }, + "ListThingsResponse": { + "base": "

    The output from the ListThings operation.

    ", + "refs": { + } + }, + "ListTopicRulesRequest": { + "base": "

    The input for the ListTopicRules operation.

    ", + "refs": { + } + }, + "ListTopicRulesResponse": { + "base": "

    The output from the ListTopicRules operation.

    ", + "refs": { + } + }, + "LogLevel": { + "base": null, + "refs": { + "GetLoggingOptionsResponse$logLevel": "

    The logging level.

    ", + "LoggingOptionsPayload$logLevel": "

    The logging level.

    " + } + }, + "LoggingOptionsPayload": { + "base": "

    Describes the logging options payload.

    ", + "refs": { + "SetLoggingOptionsRequest$loggingOptionsPayload": "

    The logging options payload.

    " + } + }, + "MalformedPolicyException": { + "base": "

    The policy documentation is not valid.

    ", + "refs": { + } + }, + "Marker": { + "base": null, + "refs": { + "ListCACertificatesRequest$marker": "

    The marker for the next set of results.

    ", + "ListCACertificatesResponse$nextMarker": "

    The current position within the list of CA certificates.

    ", + "ListCertificatesByCARequest$marker": "

    The marker for the next set of results.

    ", + "ListCertificatesByCAResponse$nextMarker": "

    The marker for the next set of results, or null if there are no additional results.

    ", + "ListCertificatesRequest$marker": "

    The marker for the next set of results.

    ", + "ListCertificatesResponse$nextMarker": "

    The marker for the next set of results, or null if there are no additional results.

    ", + "ListPoliciesRequest$marker": "

    The marker for the next set of results.

    ", + "ListPoliciesResponse$nextMarker": "

    The marker for the next set of results, or null if there are no additional results.

    ", + "ListPolicyPrincipalsRequest$marker": "

    The marker for the next set of results.

    ", + "ListPolicyPrincipalsResponse$nextMarker": "

    The marker for the next set of results, or null if there are no additional results.

    ", + "ListPrincipalPoliciesRequest$marker": "

    The marker for the next set of results.

    ", + "ListPrincipalPoliciesResponse$nextMarker": "

    The marker for the next set of results, or null if there are no additional results.

    " + } + }, + "MaxResults": { + "base": null, + "refs": { + "ListPrincipalThingsRequest$maxResults": "

    The maximum number of principals to return.

    ", + "ListThingsRequest$maxResults": "

    The maximum number of results.

    ", + "ListTopicRulesRequest$maxResults": "

    The maximum number of results to return.

    " + } + }, + "Message": { + "base": null, + "refs": { + "RejectCertificateTransferRequest$rejectReason": "

    The reason the certificate transfer was rejected.

    ", + "TransferCertificateRequest$transferMessage": "

    The transfer message.

    ", + "TransferData$transferMessage": "

    The transfer message.

    ", + "TransferData$rejectReason": "

    The reason why the transfer was rejected.

    " + } + }, + "MessageFormat": { + "base": null, + "refs": { + "SnsAction$messageFormat": "

    The message format of the message to publish. Optional. Accepted values are \"JSON\" and \"RAW\". The default value of the attribute is \"RAW\". SNS uses this setting to determine if the payload should be parsed and relevant platform-specific bits of the payload should be extracted. To read more about SNS message formats, see refer to their official documentation.

    " + } + }, + "MetricName": { + "base": null, + "refs": { + "CloudwatchMetricAction$metricName": "

    The CloudWatch metric name.

    " + } + }, + "MetricNamespace": { + "base": null, + "refs": { + "CloudwatchMetricAction$metricNamespace": "

    The CloudWatch metric namespace name.

    " + } + }, + "MetricTimestamp": { + "base": null, + "refs": { + "CloudwatchMetricAction$metricTimestamp": "

    An optional Unix timestamp.

    " + } + }, + "MetricUnit": { + "base": null, + "refs": { + "CloudwatchMetricAction$metricUnit": "

    The metric unit supported by CloudWatch.

    " + } + }, + "MetricValue": { + "base": null, + "refs": { + "CloudwatchMetricAction$metricValue": "

    The CloudWatch metric value.

    " + } + }, + "NextToken": { + "base": null, + "refs": { + "ListPrincipalThingsRequest$nextToken": "

    A token used to retrieve the next value.

    ", + "ListPrincipalThingsResponse$nextToken": "

    A token used to retrieve the next value.

    ", + "ListThingsRequest$nextToken": "

    The token for the next value.

    ", + "ListThingsResponse$nextToken": "

    A token used to retrieve the next value.

    ", + "ListTopicRulesRequest$nextToken": "

    A token used to retrieve the next value.

    ", + "ListTopicRulesResponse$nextToken": "

    A token used to retrieve the next value.

    " + } + }, + "PageSize": { + "base": null, + "refs": { + "ListCACertificatesRequest$pageSize": "

    The result page size.

    ", + "ListCertificatesByCARequest$pageSize": "

    The result page size.

    ", + "ListCertificatesRequest$pageSize": "

    The result page size.

    ", + "ListPoliciesRequest$pageSize": "

    The result page size.

    ", + "ListPolicyPrincipalsRequest$pageSize": "

    The result page size.

    ", + "ListPrincipalPoliciesRequest$pageSize": "

    The result page size.

    " + } + }, + "PartitionKey": { + "base": null, + "refs": { + "KinesisAction$partitionKey": "

    The partition key.

    " + } + }, + "PayloadField": { + "base": null, + "refs": { + "DynamoDBAction$payloadField": "

    The action payload. This name can be customized.

    " + } + }, + "Policies": { + "base": null, + "refs": { + "ListPoliciesResponse$policies": "

    The descriptions of the policies.

    ", + "ListPrincipalPoliciesResponse$policies": "

    The policies.

    " + } + }, + "Policy": { + "base": "

    Describes an AWS IoT policy.

    ", + "refs": { + "Policies$member": null + } + }, + "PolicyArn": { + "base": null, + "refs": { + "CreatePolicyResponse$policyArn": "

    The policy ARN.

    ", + "CreatePolicyVersionResponse$policyArn": "

    The policy ARN.

    ", + "GetPolicyResponse$policyArn": "

    The policy ARN.

    ", + "GetPolicyVersionResponse$policyArn": "

    The policy ARN.

    ", + "Policy$policyArn": "

    The policy ARN.

    " + } + }, + "PolicyDocument": { + "base": null, + "refs": { + "CreatePolicyRequest$policyDocument": "

    The JSON document that describes the policy. policyDocument must have a minimum length of 1, with a maximum length of 2048, excluding whitespace.

    ", + "CreatePolicyResponse$policyDocument": "

    The JSON document that describes the policy.

    ", + "CreatePolicyVersionRequest$policyDocument": "

    The JSON document that describes the policy. Minimum length of 1. Maximum length of 2048, excluding whitespaces

    ", + "CreatePolicyVersionResponse$policyDocument": "

    The JSON document that describes the policy.

    ", + "GetPolicyResponse$policyDocument": "

    The JSON document that describes the policy.

    ", + "GetPolicyVersionResponse$policyDocument": "

    The JSON document that describes the policy.

    " + } + }, + "PolicyName": { + "base": null, + "refs": { + "AttachPrincipalPolicyRequest$policyName": "

    The policy name.

    ", + "CreatePolicyRequest$policyName": "

    The policy name.

    ", + "CreatePolicyResponse$policyName": "

    The policy name.

    ", + "CreatePolicyVersionRequest$policyName": "

    The policy name.

    ", + "DeletePolicyRequest$policyName": "

    The name of the policy to delete.

    ", + "DeletePolicyVersionRequest$policyName": "

    The name of the policy.

    ", + "DetachPrincipalPolicyRequest$policyName": "

    The name of the policy to detach.

    ", + "GetPolicyRequest$policyName": "

    The name of the policy.

    ", + "GetPolicyResponse$policyName": "

    The policy name.

    ", + "GetPolicyVersionRequest$policyName": "

    The name of the policy.

    ", + "GetPolicyVersionResponse$policyName": "

    The policy name.

    ", + "ListPolicyPrincipalsRequest$policyName": "

    The policy name.

    ", + "ListPolicyVersionsRequest$policyName": "

    The policy name.

    ", + "Policy$policyName": "

    The policy name.

    ", + "SetDefaultPolicyVersionRequest$policyName": "

    The policy name.

    " + } + }, + "PolicyVersion": { + "base": "

    Describes a policy version.

    ", + "refs": { + "PolicyVersions$member": null + } + }, + "PolicyVersionId": { + "base": null, + "refs": { + "CreatePolicyResponse$policyVersionId": "

    The policy version ID.

    ", + "CreatePolicyVersionResponse$policyVersionId": "

    The policy version ID.

    ", + "DeletePolicyVersionRequest$policyVersionId": "

    The policy version ID.

    ", + "GetPolicyResponse$defaultVersionId": "

    The default policy version ID.

    ", + "GetPolicyVersionRequest$policyVersionId": "

    The policy version ID.

    ", + "GetPolicyVersionResponse$policyVersionId": "

    The policy version ID.

    ", + "PolicyVersion$versionId": "

    The policy version ID.

    ", + "SetDefaultPolicyVersionRequest$policyVersionId": "

    The policy version ID.

    " + } + }, + "PolicyVersions": { + "base": null, + "refs": { + "ListPolicyVersionsResponse$policyVersions": "

    The policy versions.

    " + } + }, + "Principal": { + "base": null, + "refs": { + "AttachPrincipalPolicyRequest$principal": "

    The principal, which can be a certificate ARN (as returned from the CreateCertificate operation) or an Amazon Cognito ID.

    ", + "AttachThingPrincipalRequest$principal": "

    The principal (certificate or other credential).

    ", + "DetachPrincipalPolicyRequest$principal": "

    The principal.

    If the principal is a certificate, specify the certificate ARN. If the principal is an Amazon Cognito identity, specify the identity ID.

    ", + "DetachThingPrincipalRequest$principal": "

    The principal.

    If the principal is a certificate, specify the certificate ARN. If the principal is an Amazon Cognito identity, specify the identity ID.

    ", + "ListPrincipalPoliciesRequest$principal": "

    The principal.

    ", + "ListPrincipalThingsRequest$principal": "

    The principal.

    " + } + }, + "PrincipalArn": { + "base": null, + "refs": { + "Principals$member": null + } + }, + "Principals": { + "base": null, + "refs": { + "ListPolicyPrincipalsResponse$principals": "

    The descriptions of the principals.

    ", + "ListThingPrincipalsResponse$principals": "

    The principals.

    " + } + }, + "PrivateKey": { + "base": null, + "refs": { + "KeyPair$PrivateKey": "

    The private key.

    " + } + }, + "PublicKey": { + "base": null, + "refs": { + "KeyPair$PublicKey": "

    The public key.

    " + } + }, + "QueueUrl": { + "base": null, + "refs": { + "SqsAction$queueUrl": "

    The URL of the Amazon SQS queue.

    " + } + }, + "RangeKeyField": { + "base": null, + "refs": { + "DynamoDBAction$rangeKeyField": "

    The range key name.

    " + } + }, + "RangeKeyValue": { + "base": null, + "refs": { + "DynamoDBAction$rangeKeyValue": "

    The range key value.

    " + } + }, + "RegisterCACertificateRequest": { + "base": "

    The input to the RegisterCACertificate operation.

    ", + "refs": { + } + }, + "RegisterCACertificateResponse": { + "base": "

    The output from the RegisterCACertificateResponse operation.

    ", + "refs": { + } + }, + "RegisterCertificateRequest": { + "base": "

    The input to the RegisterCertificate operation.

    ", + "refs": { + } + }, + "RegisterCertificateResponse": { + "base": "

    The output from the RegisterCertificate operation.

    ", + "refs": { + } + }, + "RegistrationCode": { + "base": null, + "refs": { + "GetRegistrationCodeResponse$registrationCode": "

    The CA certificate registration code.

    " + } + }, + "RegistrationCodeValidationException": { + "base": "

    The registration code is invalid.

    ", + "refs": { + } + }, + "RejectCertificateTransferRequest": { + "base": "

    The input for the RejectCertificateTransfer operation.

    ", + "refs": { + } + }, + "ReplaceTopicRuleRequest": { + "base": "

    The input for the ReplaceTopicRule operation.

    ", + "refs": { + } + }, + "RepublishAction": { + "base": "

    Describes an action to republish to another topic.

    ", + "refs": { + "Action$republish": "

    Publish to another MQTT topic.

    " + } + }, + "ResourceAlreadyExistsException": { + "base": "

    The resource already exists.

    ", + "refs": { + } + }, + "ResourceNotFoundException": { + "base": "

    The specified resource does not exist.

    ", + "refs": { + } + }, + "RuleArn": { + "base": null, + "refs": { + "GetTopicRuleResponse$ruleArn": "

    The rule ARN.

    ", + "TopicRuleListItem$ruleArn": "

    The rule ARN.

    " + } + }, + "RuleName": { + "base": null, + "refs": { + "CreateTopicRuleRequest$ruleName": "

    The name of the rule.

    ", + "DeleteTopicRuleRequest$ruleName": "

    The name of the rule.

    ", + "DisableTopicRuleRequest$ruleName": "

    The name of the rule to disable.

    ", + "EnableTopicRuleRequest$ruleName": "

    The name of the topic rule to enable.

    ", + "GetTopicRuleRequest$ruleName": "

    The name of the rule.

    ", + "ReplaceTopicRuleRequest$ruleName": "

    The name of the rule.

    ", + "TopicRule$ruleName": "

    The name of the rule.

    ", + "TopicRuleListItem$ruleName": "

    The name of the rule.

    " + } + }, + "S3Action": { + "base": "

    Describes an action to write data to an Amazon S3 bucket.

    ", + "refs": { + "Action$s3": "

    Write to an Amazon S3 bucket.

    " + } + }, + "SQL": { + "base": null, + "refs": { + "TopicRule$sql": "

    The SQL statement used to query the topic. When using a SQL query with multiple lines, be sure to escape the newline characters.

    ", + "TopicRulePayload$sql": "

    The SQL statement used to query the topic. For more information, see AWS IoT SQL Reference in the AWS IoT Developer Guide.

    " + } + }, + "ServiceUnavailableException": { + "base": "

    The service is temporarily unavailable.

    ", + "refs": { + } + }, + "SetAsActive": { + "base": null, + "refs": { + "AcceptCertificateTransferRequest$setAsActive": "

    Specifies whether the certificate is active.

    ", + "CreateCertificateFromCsrRequest$setAsActive": "

    Specifies whether the certificate is active.

    ", + "CreateKeysAndCertificateRequest$setAsActive": "

    Specifies whether the certificate is active.

    ", + "RegisterCACertificateRequest$setAsActive": "

    A boolean value that specifies if the CA certificate is set to active.

    ", + "RegisterCertificateRequest$setAsActive": "

    A boolean value that specifies if the CA certificate is set to active.

    " + } + }, + "SetAsDefault": { + "base": null, + "refs": { + "CreatePolicyVersionRequest$setAsDefault": "

    Specifies whether the policy version is set as the default. When this parameter is true, the new policy version becomes the operative version (that is, the version that is in effect for the certificates to which the policy is attached).

    " + } + }, + "SetDefaultPolicyVersionRequest": { + "base": "

    The input for the SetDefaultPolicyVersion operation.

    ", + "refs": { + } + }, + "SetLoggingOptionsRequest": { + "base": "

    The input for the SetLoggingOptions operation.

    ", + "refs": { + } + }, + "SnsAction": { + "base": "

    Describes an action to publish to an Amazon SNS topic.

    ", + "refs": { + "Action$sns": "

    Publish to an Amazon SNS topic.

    " + } + }, + "SqlParseException": { + "base": "

    The Rule-SQL expression can't be parsed correctly.

    ", + "refs": { + } + }, + "SqsAction": { + "base": "

    Describes an action to publish data to an Amazon SQS queue.

    ", + "refs": { + "Action$sqs": "

    Publish to an Amazon SQS queue.

    " + } + }, + "StateReason": { + "base": null, + "refs": { + "CloudwatchAlarmAction$stateReason": "

    The reason for the alarm change.

    " + } + }, + "StateValue": { + "base": null, + "refs": { + "CloudwatchAlarmAction$stateValue": "

    The value of the alarm state. Acceptable values are: OK, ALARM, INSUFFICIENT_DATA.

    " + } + }, + "StreamName": { + "base": null, + "refs": { + "KinesisAction$streamName": "

    The name of the Amazon Kinesis stream.

    " + } + }, + "TableName": { + "base": null, + "refs": { + "DynamoDBAction$tableName": "

    The name of the DynamoDB table.

    " + } + }, + "ThingArn": { + "base": null, + "refs": { + "CreateThingResponse$thingArn": "

    The thing ARN.

    " + } + }, + "ThingAttribute": { + "base": "

    Describes a thing attribute.

    ", + "refs": { + "ThingAttributeList$member": null + } + }, + "ThingAttributeList": { + "base": null, + "refs": { + "ListThingsResponse$things": "

    The things.

    " + } + }, + "ThingName": { + "base": null, + "refs": { + "AttachThingPrincipalRequest$thingName": "

    The name of the thing.

    ", + "CreateThingRequest$thingName": "

    The name of the thing.

    ", + "CreateThingResponse$thingName": "

    The name of the thing.

    ", + "DeleteThingRequest$thingName": "

    The thing name.

    ", + "DescribeThingRequest$thingName": "

    The name of the thing.

    ", + "DescribeThingResponse$thingName": "

    The name of the thing.

    ", + "DetachThingPrincipalRequest$thingName": "

    The name of the thing.

    ", + "ListThingPrincipalsRequest$thingName": "

    The name of the thing.

    ", + "ThingAttribute$thingName": "

    The name of the thing.

    ", + "ThingNameList$member": null, + "UpdateThingRequest$thingName": "

    The thing name.

    " + } + }, + "ThingNameList": { + "base": null, + "refs": { + "ListPrincipalThingsResponse$things": "

    The things.

    " + } + }, + "ThrottlingException": { + "base": "

    The rate exceeds the limit.

    ", + "refs": { + } + }, + "Topic": { + "base": null, + "refs": { + "ListTopicRulesRequest$topic": "

    The topic.

    " + } + }, + "TopicPattern": { + "base": null, + "refs": { + "RepublishAction$topic": "

    The name of the MQTT topic.

    ", + "TopicRuleListItem$topicPattern": "

    The pattern for the topic names that apply.

    " + } + }, + "TopicRule": { + "base": "

    Describes a rule.

    ", + "refs": { + "GetTopicRuleResponse$rule": "

    The rule.

    " + } + }, + "TopicRuleList": { + "base": null, + "refs": { + "ListTopicRulesResponse$rules": "

    The rules.

    " + } + }, + "TopicRuleListItem": { + "base": "

    Describes a rule.

    ", + "refs": { + "TopicRuleList$member": null + } + }, + "TopicRulePayload": { + "base": "

    Describes a rule.

    ", + "refs": { + "CreateTopicRuleRequest$topicRulePayload": "

    The rule payload.

    ", + "ReplaceTopicRuleRequest$topicRulePayload": "

    The rule payload.

    " + } + }, + "TransferAlreadyCompletedException": { + "base": "

    You can't revert the certificate transfer because the transfer is already complete.

    ", + "refs": { + } + }, + "TransferCertificateRequest": { + "base": "

    The input for the TransferCertificate operation.

    ", + "refs": { + } + }, + "TransferCertificateResponse": { + "base": "

    The output from the TransferCertificate operation.

    ", + "refs": { + } + }, + "TransferConflictException": { + "base": "

    You can't transfer the certificate because authorization policies are still attached.

    ", + "refs": { + } + }, + "TransferData": { + "base": "

    Data used to transfer a certificate to an AWS account.

    ", + "refs": { + "CertificateDescription$transferData": "

    The transfer data.

    " + } + }, + "UnauthorizedException": { + "base": "

    You are not authorized to perform this operation.

    ", + "refs": { + } + }, + "UpdateCACertificateRequest": { + "base": "

    The input to the UpdateCACertificate operation.

    ", + "refs": { + } + }, + "UpdateCertificateRequest": { + "base": "

    The input for the UpdateCertificate operation.

    ", + "refs": { + } + }, + "UpdateThingRequest": { + "base": "

    The input for the UpdateThing operation.

    ", + "refs": { + } + }, + "UpdateThingResponse": { + "base": "

    The output from the UpdateThing operation.

    ", + "refs": { + } + }, + "UseBase64": { + "base": null, + "refs": { + "SqsAction$useBase64": "

    Specifies whether to use Base64 encoding.

    " + } + }, + "VersionsLimitExceededException": { + "base": "

    The number of policy versions exceeds the limit.

    ", + "refs": { + } + }, + "errorMessage": { + "base": null, + "refs": { + "CertificateConflictException$message": "

    The message for the exception.

    ", + "CertificateStateException$message": "

    The message for the exception.

    ", + "CertificateValidationException$message": "

    Additional information about the exception.

    ", + "DeleteConflictException$message": "

    The message for the exception.

    ", + "InternalException$message": "

    The message for the exception.

    ", + "InternalFailureException$message": "

    The message for the exception.

    ", + "InvalidRequestException$message": "

    The message for the exception.

    ", + "LimitExceededException$message": "

    The message for the exception.

    ", + "MalformedPolicyException$message": "

    The message for the exception.

    ", + "RegistrationCodeValidationException$message": "

    Additional information about the exception.

    ", + "ResourceAlreadyExistsException$message": "

    The message for the exception.

    ", + "ResourceNotFoundException$message": "

    The message for the exception.

    ", + "ServiceUnavailableException$message": "

    The message for the exception.

    ", + "SqlParseException$message": "

    The message for the exception.

    ", + "ThrottlingException$message": "

    The message for the exception.

    ", + "TransferAlreadyCompletedException$message": "

    The message for the exception.

    ", + "TransferConflictException$message": "

    The message for the exception.

    ", + "UnauthorizedException$message": "

    The message for the exception.

    ", + "VersionsLimitExceededException$message": "

    The message for the exception.

    " + } + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/iot/2015-05-28/examples-1.json b/vendor/github.com/aws/aws-sdk-go/models/apis/iot/2015-05-28/examples-1.json new file mode 100644 index 000000000..faff76894 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/iot/2015-05-28/examples-1.json @@ -0,0 +1,5 @@ +{ + "version":"1.0", + "examples":{ + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/kinesis/2013-12-02/api-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/kinesis/2013-12-02/api-2.json new file mode 100644 index 000000000..1f24a5aaf --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/kinesis/2013-12-02/api-2.json @@ -0,0 +1,822 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2013-12-02", + "endpointPrefix":"kinesis", + "jsonVersion":"1.1", + "protocol":"json", + "serviceAbbreviation":"Kinesis", + "serviceFullName":"Amazon Kinesis", + "signatureVersion":"v4", + "targetPrefix":"Kinesis_20131202" + }, + "operations":{ + "AddTagsToStream":{ + "name":"AddTagsToStream", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AddTagsToStreamInput"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ResourceInUseException"}, + {"shape":"InvalidArgumentException"}, + {"shape":"LimitExceededException"} + ] + }, + "CreateStream":{ + "name":"CreateStream", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateStreamInput"}, + "errors":[ + {"shape":"ResourceInUseException"}, + {"shape":"LimitExceededException"}, + {"shape":"InvalidArgumentException"} + ] + }, + "DecreaseStreamRetentionPeriod":{ + "name":"DecreaseStreamRetentionPeriod", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DecreaseStreamRetentionPeriodInput"}, + "errors":[ + {"shape":"ResourceInUseException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"LimitExceededException"}, + {"shape":"InvalidArgumentException"} + ] + }, + "DeleteStream":{ + "name":"DeleteStream", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteStreamInput"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"LimitExceededException"} + ] + }, + "DescribeStream":{ + "name":"DescribeStream", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeStreamInput"}, + "output":{"shape":"DescribeStreamOutput"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"LimitExceededException"} + ] + }, + "DisableEnhancedMonitoring":{ + "name":"DisableEnhancedMonitoring", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DisableEnhancedMonitoringInput"}, + "output":{"shape":"EnhancedMonitoringOutput"}, + "errors":[ + {"shape":"InvalidArgumentException"}, + {"shape":"LimitExceededException"}, + {"shape":"ResourceInUseException"}, + {"shape":"ResourceNotFoundException"} + ] + }, + "EnableEnhancedMonitoring":{ + "name":"EnableEnhancedMonitoring", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"EnableEnhancedMonitoringInput"}, + "output":{"shape":"EnhancedMonitoringOutput"}, + "errors":[ + {"shape":"InvalidArgumentException"}, + {"shape":"LimitExceededException"}, + {"shape":"ResourceInUseException"}, + {"shape":"ResourceNotFoundException"} + ] + }, + "GetRecords":{ + "name":"GetRecords", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetRecordsInput"}, + "output":{"shape":"GetRecordsOutput"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidArgumentException"}, + {"shape":"ProvisionedThroughputExceededException"}, + {"shape":"ExpiredIteratorException"} + ] + }, + "GetShardIterator":{ + "name":"GetShardIterator", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetShardIteratorInput"}, + "output":{"shape":"GetShardIteratorOutput"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidArgumentException"}, + {"shape":"ProvisionedThroughputExceededException"} + ] + }, + "IncreaseStreamRetentionPeriod":{ + "name":"IncreaseStreamRetentionPeriod", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"IncreaseStreamRetentionPeriodInput"}, + "errors":[ + {"shape":"ResourceInUseException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"LimitExceededException"}, + {"shape":"InvalidArgumentException"} + ] + }, + "ListStreams":{ + "name":"ListStreams", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListStreamsInput"}, + "output":{"shape":"ListStreamsOutput"}, + "errors":[ + {"shape":"LimitExceededException"} + ] + }, + "ListTagsForStream":{ + "name":"ListTagsForStream", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListTagsForStreamInput"}, + "output":{"shape":"ListTagsForStreamOutput"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidArgumentException"}, + {"shape":"LimitExceededException"} + ] + }, + "MergeShards":{ + "name":"MergeShards", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"MergeShardsInput"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ResourceInUseException"}, + {"shape":"InvalidArgumentException"}, + {"shape":"LimitExceededException"} + ] + }, + "PutRecord":{ + "name":"PutRecord", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PutRecordInput"}, + "output":{"shape":"PutRecordOutput"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidArgumentException"}, + {"shape":"ProvisionedThroughputExceededException"} + ] + }, + "PutRecords":{ + "name":"PutRecords", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PutRecordsInput"}, + "output":{"shape":"PutRecordsOutput"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidArgumentException"}, + {"shape":"ProvisionedThroughputExceededException"} + ] + }, + "RemoveTagsFromStream":{ + "name":"RemoveTagsFromStream", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RemoveTagsFromStreamInput"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ResourceInUseException"}, + {"shape":"InvalidArgumentException"}, + {"shape":"LimitExceededException"} + ] + }, + "SplitShard":{ + "name":"SplitShard", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"SplitShardInput"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ResourceInUseException"}, + {"shape":"InvalidArgumentException"}, + {"shape":"LimitExceededException"} + ] + } + }, + "shapes":{ + "AddTagsToStreamInput":{ + "type":"structure", + "required":[ + "StreamName", + "Tags" + ], + "members":{ + "StreamName":{"shape":"StreamName"}, + "Tags":{"shape":"TagMap"} + } + }, + "ApproximateArrivalTimestamp":{"type":"timestamp"}, + "BooleanObject":{"type":"boolean"}, + "CreateStreamInput":{ + "type":"structure", + "required":[ + "StreamName", + "ShardCount" + ], + "members":{ + "StreamName":{"shape":"StreamName"}, + "ShardCount":{"shape":"PositiveIntegerObject"} + } + }, + "Data":{ + "type":"blob", + "max":1048576, + "min":0 + }, + "DecreaseStreamRetentionPeriodInput":{ + "type":"structure", + "required":[ + "StreamName", + "RetentionPeriodHours" + ], + "members":{ + "StreamName":{"shape":"StreamName"}, + "RetentionPeriodHours":{"shape":"RetentionPeriodHours"} + } + }, + "DeleteStreamInput":{ + "type":"structure", + "required":["StreamName"], + "members":{ + "StreamName":{"shape":"StreamName"} + } + }, + "DescribeStreamInput":{ + "type":"structure", + "required":["StreamName"], + "members":{ + "StreamName":{"shape":"StreamName"}, + "Limit":{"shape":"DescribeStreamInputLimit"}, + "ExclusiveStartShardId":{"shape":"ShardId"} + } + }, + "DescribeStreamInputLimit":{ + "type":"integer", + "max":10000, + "min":1 + }, + "DescribeStreamOutput":{ + "type":"structure", + "required":["StreamDescription"], + "members":{ + "StreamDescription":{"shape":"StreamDescription"} + } + }, + "DisableEnhancedMonitoringInput":{ + "type":"structure", + "required":[ + "StreamName", + "ShardLevelMetrics" + ], + "members":{ + "StreamName":{"shape":"StreamName"}, + "ShardLevelMetrics":{"shape":"MetricsNameList"} + } + }, + "EnableEnhancedMonitoringInput":{ + "type":"structure", + "required":[ + "StreamName", + "ShardLevelMetrics" + ], + "members":{ + "StreamName":{"shape":"StreamName"}, + "ShardLevelMetrics":{"shape":"MetricsNameList"} + } + }, + "EnhancedMetrics":{ + "type":"structure", + "members":{ + "ShardLevelMetrics":{"shape":"MetricsNameList"} + } + }, + "EnhancedMonitoringList":{ + "type":"list", + "member":{"shape":"EnhancedMetrics"} + }, + "EnhancedMonitoringOutput":{ + "type":"structure", + "members":{ + "StreamName":{"shape":"StreamName"}, + "CurrentShardLevelMetrics":{"shape":"MetricsNameList"}, + "DesiredShardLevelMetrics":{"shape":"MetricsNameList"} + } + }, + "ErrorCode":{"type":"string"}, + "ErrorMessage":{"type":"string"}, + "ExpiredIteratorException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "exception":true + }, + "GetRecordsInput":{ + "type":"structure", + "required":["ShardIterator"], + "members":{ + "ShardIterator":{"shape":"ShardIterator"}, + "Limit":{"shape":"GetRecordsInputLimit"} + } + }, + "GetRecordsInputLimit":{ + "type":"integer", + "max":10000, + "min":1 + }, + "GetRecordsOutput":{ + "type":"structure", + "required":["Records"], + "members":{ + "Records":{"shape":"RecordList"}, + "NextShardIterator":{"shape":"ShardIterator"}, + "MillisBehindLatest":{"shape":"MillisBehindLatest"} + } + }, + "GetShardIteratorInput":{ + "type":"structure", + "required":[ + "StreamName", + "ShardId", + "ShardIteratorType" + ], + "members":{ + "StreamName":{"shape":"StreamName"}, + "ShardId":{"shape":"ShardId"}, + "ShardIteratorType":{"shape":"ShardIteratorType"}, + "StartingSequenceNumber":{"shape":"SequenceNumber"}, + "Timestamp":{"shape":"Timestamp"} + } + }, + "GetShardIteratorOutput":{ + "type":"structure", + "members":{ + "ShardIterator":{"shape":"ShardIterator"} + } + }, + "HashKey":{ + "type":"string", + "pattern":"0|([1-9]\\d{0,38})" + }, + "HashKeyRange":{ + "type":"structure", + "required":[ + "StartingHashKey", + "EndingHashKey" + ], + "members":{ + "StartingHashKey":{"shape":"HashKey"}, + "EndingHashKey":{"shape":"HashKey"} + } + }, + "IncreaseStreamRetentionPeriodInput":{ + "type":"structure", + "required":[ + "StreamName", + "RetentionPeriodHours" + ], + "members":{ + "StreamName":{"shape":"StreamName"}, + "RetentionPeriodHours":{"shape":"RetentionPeriodHours"} + } + }, + "InvalidArgumentException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "exception":true + }, + "LimitExceededException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "exception":true + }, + "ListStreamsInput":{ + "type":"structure", + "members":{ + "Limit":{"shape":"ListStreamsInputLimit"}, + "ExclusiveStartStreamName":{"shape":"StreamName"} + } + }, + "ListStreamsInputLimit":{ + "type":"integer", + "max":10000, + "min":1 + }, + "ListStreamsOutput":{ + "type":"structure", + "required":[ + "StreamNames", + "HasMoreStreams" + ], + "members":{ + "StreamNames":{"shape":"StreamNameList"}, + "HasMoreStreams":{"shape":"BooleanObject"} + } + }, + "ListTagsForStreamInput":{ + "type":"structure", + "required":["StreamName"], + "members":{ + "StreamName":{"shape":"StreamName"}, + "ExclusiveStartTagKey":{"shape":"TagKey"}, + "Limit":{"shape":"ListTagsForStreamInputLimit"} + } + }, + "ListTagsForStreamInputLimit":{ + "type":"integer", + "max":10, + "min":1 + }, + "ListTagsForStreamOutput":{ + "type":"structure", + "required":[ + "Tags", + "HasMoreTags" + ], + "members":{ + "Tags":{"shape":"TagList"}, + "HasMoreTags":{"shape":"BooleanObject"} + } + }, + "MergeShardsInput":{ + "type":"structure", + "required":[ + "StreamName", + "ShardToMerge", + "AdjacentShardToMerge" + ], + "members":{ + "StreamName":{"shape":"StreamName"}, + "ShardToMerge":{"shape":"ShardId"}, + "AdjacentShardToMerge":{"shape":"ShardId"} + } + }, + "MetricsName":{ + "type":"string", + "enum":[ + "IncomingBytes", + "IncomingRecords", + "OutgoingBytes", + "OutgoingRecords", + "WriteProvisionedThroughputExceeded", + "ReadProvisionedThroughputExceeded", + "IteratorAgeMilliseconds", + "ALL" + ] + }, + "MetricsNameList":{ + "type":"list", + "member":{"shape":"MetricsName"}, + "max":7, + "min":1 + }, + "MillisBehindLatest":{ + "type":"long", + "min":0 + }, + "PartitionKey":{ + "type":"string", + "max":256, + "min":1 + }, + "PositiveIntegerObject":{ + "type":"integer", + "max":100000, + "min":1 + }, + "ProvisionedThroughputExceededException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "exception":true + }, + "PutRecordInput":{ + "type":"structure", + "required":[ + "StreamName", + "Data", + "PartitionKey" + ], + "members":{ + "StreamName":{"shape":"StreamName"}, + "Data":{"shape":"Data"}, + "PartitionKey":{"shape":"PartitionKey"}, + "ExplicitHashKey":{"shape":"HashKey"}, + "SequenceNumberForOrdering":{"shape":"SequenceNumber"} + } + }, + "PutRecordOutput":{ + "type":"structure", + "required":[ + "ShardId", + "SequenceNumber" + ], + "members":{ + "ShardId":{"shape":"ShardId"}, + "SequenceNumber":{"shape":"SequenceNumber"} + } + }, + "PutRecordsInput":{ + "type":"structure", + "required":[ + "Records", + "StreamName" + ], + "members":{ + "Records":{"shape":"PutRecordsRequestEntryList"}, + "StreamName":{"shape":"StreamName"} + } + }, + "PutRecordsOutput":{ + "type":"structure", + "required":["Records"], + "members":{ + "FailedRecordCount":{"shape":"PositiveIntegerObject"}, + "Records":{"shape":"PutRecordsResultEntryList"} + } + }, + "PutRecordsRequestEntry":{ + "type":"structure", + "required":[ + "Data", + "PartitionKey" + ], + "members":{ + "Data":{"shape":"Data"}, + "ExplicitHashKey":{"shape":"HashKey"}, + "PartitionKey":{"shape":"PartitionKey"} + } + }, + "PutRecordsRequestEntryList":{ + "type":"list", + "member":{"shape":"PutRecordsRequestEntry"}, + "max":500, + "min":1 + }, + "PutRecordsResultEntry":{ + "type":"structure", + "members":{ + "SequenceNumber":{"shape":"SequenceNumber"}, + "ShardId":{"shape":"ShardId"}, + "ErrorCode":{"shape":"ErrorCode"}, + "ErrorMessage":{"shape":"ErrorMessage"} + } + }, + "PutRecordsResultEntryList":{ + "type":"list", + "member":{"shape":"PutRecordsResultEntry"}, + "max":500, + "min":1 + }, + "Record":{ + "type":"structure", + "required":[ + "SequenceNumber", + "Data", + "PartitionKey" + ], + "members":{ + "SequenceNumber":{"shape":"SequenceNumber"}, + "ApproximateArrivalTimestamp":{"shape":"ApproximateArrivalTimestamp"}, + "Data":{"shape":"Data"}, + "PartitionKey":{"shape":"PartitionKey"} + } + }, + "RecordList":{ + "type":"list", + "member":{"shape":"Record"} + }, + "RemoveTagsFromStreamInput":{ + "type":"structure", + "required":[ + "StreamName", + "TagKeys" + ], + "members":{ + "StreamName":{"shape":"StreamName"}, + "TagKeys":{"shape":"TagKeyList"} + } + }, + "ResourceInUseException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "exception":true + }, + "ResourceNotFoundException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "exception":true + }, + "RetentionPeriodHours":{ + "type":"integer", + "max":168, + "min":24 + }, + "SequenceNumber":{ + "type":"string", + "pattern":"0|([1-9]\\d{0,128})" + }, + "SequenceNumberRange":{ + "type":"structure", + "required":["StartingSequenceNumber"], + "members":{ + "StartingSequenceNumber":{"shape":"SequenceNumber"}, + "EndingSequenceNumber":{"shape":"SequenceNumber"} + } + }, + "Shard":{ + "type":"structure", + "required":[ + "ShardId", + "HashKeyRange", + "SequenceNumberRange" + ], + "members":{ + "ShardId":{"shape":"ShardId"}, + "ParentShardId":{"shape":"ShardId"}, + "AdjacentParentShardId":{"shape":"ShardId"}, + "HashKeyRange":{"shape":"HashKeyRange"}, + "SequenceNumberRange":{"shape":"SequenceNumberRange"} + } + }, + "ShardId":{ + "type":"string", + "max":128, + "min":1, + "pattern":"[a-zA-Z0-9_.-]+" + }, + "ShardIterator":{ + "type":"string", + "max":512, + "min":1 + }, + "ShardIteratorType":{ + "type":"string", + "enum":[ + "AT_SEQUENCE_NUMBER", + "AFTER_SEQUENCE_NUMBER", + "TRIM_HORIZON", + "LATEST", + "AT_TIMESTAMP" + ] + }, + "ShardList":{ + "type":"list", + "member":{"shape":"Shard"} + }, + "SplitShardInput":{ + "type":"structure", + "required":[ + "StreamName", + "ShardToSplit", + "NewStartingHashKey" + ], + "members":{ + "StreamName":{"shape":"StreamName"}, + "ShardToSplit":{"shape":"ShardId"}, + "NewStartingHashKey":{"shape":"HashKey"} + } + }, + "StreamARN":{"type":"string"}, + "StreamDescription":{ + "type":"structure", + "required":[ + "StreamName", + "StreamARN", + "StreamStatus", + "Shards", + "HasMoreShards", + "RetentionPeriodHours", + "EnhancedMonitoring" + ], + "members":{ + "StreamName":{"shape":"StreamName"}, + "StreamARN":{"shape":"StreamARN"}, + "StreamStatus":{"shape":"StreamStatus"}, + "Shards":{"shape":"ShardList"}, + "HasMoreShards":{"shape":"BooleanObject"}, + "RetentionPeriodHours":{"shape":"RetentionPeriodHours"}, + "EnhancedMonitoring":{"shape":"EnhancedMonitoringList"} + } + }, + "StreamName":{ + "type":"string", + "max":128, + "min":1, + "pattern":"[a-zA-Z0-9_.-]+" + }, + "StreamNameList":{ + "type":"list", + "member":{"shape":"StreamName"} + }, + "StreamStatus":{ + "type":"string", + "enum":[ + "CREATING", + "DELETING", + "ACTIVE", + "UPDATING" + ] + }, + "Tag":{ + "type":"structure", + "required":["Key"], + "members":{ + "Key":{"shape":"TagKey"}, + "Value":{"shape":"TagValue"} + } + }, + "TagKey":{ + "type":"string", + "max":128, + "min":1 + }, + "TagKeyList":{ + "type":"list", + "member":{"shape":"TagKey"}, + "max":10, + "min":1 + }, + "TagList":{ + "type":"list", + "member":{"shape":"Tag"}, + "min":0 + }, + "TagMap":{ + "type":"map", + "key":{"shape":"TagKey"}, + "value":{"shape":"TagValue"}, + "max":10, + "min":1 + }, + "TagValue":{ + "type":"string", + "max":256, + "min":0 + }, + "Timestamp":{"type":"timestamp"} + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/kinesis/2013-12-02/docs-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/kinesis/2013-12-02/docs-2.json new file mode 100644 index 000000000..09cf0fc16 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/kinesis/2013-12-02/docs-2.json @@ -0,0 +1,506 @@ +{ + "version": "2.0", + "service": "Amazon Kinesis Streams Service API Reference

    Amazon Kinesis Streams is a managed service that scales elastically for real time processing of streaming big data.

    ", + "operations": { + "AddTagsToStream": "

    Adds or updates tags for the specified Amazon Kinesis stream. Each stream can have up to 10 tags.

    If tags have already been assigned to the stream, AddTagsToStream overwrites any existing tags that correspond to the specified tag keys.

    ", + "CreateStream": "

    Creates an Amazon Kinesis stream. A stream captures and transports data records that are continuously emitted from different data sources or producers. Scale-out within a stream is explicitly supported by means of shards, which are uniquely identified groups of data records in a stream.

    You specify and control the number of shards that a stream is composed of. Each shard can support reads up to 5 transactions per second, up to a maximum data read total of 2 MB per second. Each shard can support writes up to 1,000 records per second, up to a maximum data write total of 1 MB per second. You can add shards to a stream if the amount of data input increases and you can remove shards if the amount of data input decreases.

    The stream name identifies the stream. The name is scoped to the AWS account used by the application. It is also scoped by region. That is, two streams in two different accounts can have the same name, and two streams in the same account, but in two different regions, can have the same name.

    CreateStream is an asynchronous operation. Upon receiving a CreateStream request, Amazon Kinesis immediately returns and sets the stream status to CREATING. After the stream is created, Amazon Kinesis sets the stream status to ACTIVE. You should perform read and write operations only on an ACTIVE stream.

    You receive a LimitExceededException when making a CreateStream request if you try to do one of the following:

    • Have more than five streams in the CREATING state at any point in time.
    • Create more shards than are authorized for your account.

    For the default shard limit for an AWS account, see Streams Limits in the Amazon Kinesis Streams Developer Guide. If you need to increase this limit, contact AWS Support.

    You can use DescribeStream to check the stream status, which is returned in StreamStatus.

    CreateStream has a limit of 5 transactions per second per account.

    ", + "DecreaseStreamRetentionPeriod": "

    Decreases the Amazon Kinesis stream's retention period, which is the length of time data records are accessible after they are added to the stream. The minimum value of a stream's retention period is 24 hours.

    This operation may result in lost data. For example, if the stream's retention period is 48 hours and is decreased to 24 hours, any data already in the stream that is older than 24 hours is inaccessible.

    ", + "DeleteStream": "

    Deletes an Amazon Kinesis stream and all its shards and data. You must shut down any applications that are operating on the stream before you delete the stream. If an application attempts to operate on a deleted stream, it will receive the exception ResourceNotFoundException.

    If the stream is in the ACTIVE state, you can delete it. After a DeleteStream request, the specified stream is in the DELETING state until Amazon Kinesis completes the deletion.

    Note: Amazon Kinesis might continue to accept data read and write operations, such as PutRecord, PutRecords, and GetRecords, on a stream in the DELETING state until the stream deletion is complete.

    When you delete a stream, any shards in that stream are also deleted, and any tags are dissociated from the stream.

    You can use the DescribeStream operation to check the state of the stream, which is returned in StreamStatus.

    DeleteStream has a limit of 5 transactions per second per account.

    ", + "DescribeStream": "

    Describes the specified Amazon Kinesis stream.

    The information about the stream includes its current status, its Amazon Resource Name (ARN), and an array of shard objects. For each shard object, there is information about the hash key and sequence number ranges that the shard spans, and the IDs of any earlier shards that played in a role in creating the shard. A sequence number is the identifier associated with every record ingested in the stream. The sequence number is assigned when a record is put into the stream.

    You can limit the number of returned shards using the Limit parameter. The number of shards in a stream may be too large to return from a single call to DescribeStream. You can detect this by using the HasMoreShards flag in the returned output. HasMoreShards is set to true when there is more data available.

    DescribeStream is a paginated operation. If there are more shards available, you can request them using the shard ID of the last shard returned. Specify this ID in the ExclusiveStartShardId parameter in a subsequent request to DescribeStream.

    There are no guarantees about the chronological order shards returned in DescribeStream results. If you want to process shards in chronological order, use ParentShardId to track lineage to the oldest shard.

    DescribeStream has a limit of 10 transactions per second per account.

    ", + "DisableEnhancedMonitoring": "

    Disables enhanced monitoring.

    ", + "EnableEnhancedMonitoring": "

    Enables enhanced Amazon Kinesis stream monitoring for shard-level metrics.

    ", + "GetRecords": "

    Gets data records from an Amazon Kinesis stream's shard.

    Specify a shard iterator using the ShardIterator parameter. The shard iterator specifies the position in the shard from which you want to start reading data records sequentially. If there are no records available in the portion of the shard that the iterator points to, GetRecords returns an empty list. Note that it might take multiple calls to get to a portion of the shard that contains records.

    You can scale by provisioning multiple shards per stream while considering service limits (for more information, see Streams Limits in the Amazon Kinesis Streams Developer Guide). Your application should have one thread per shard, each reading continuously from its stream. To read from a stream continually, call GetRecords in a loop. Use GetShardIterator to get the shard iterator to specify in the first GetRecords call. GetRecords returns a new shard iterator in NextShardIterator. Specify the shard iterator returned in NextShardIterator in subsequent calls to GetRecords. Note that if the shard has been closed, the shard iterator can't return more data and GetRecords returns null in NextShardIterator. You can terminate the loop when the shard is closed, or when the shard iterator reaches the record with the sequence number or other attribute that marks it as the last record to process.

    Each data record can be up to 1 MB in size, and each shard can read up to 2 MB per second. You can ensure that your calls don't exceed the maximum supported size or throughput by using the Limit parameter to specify the maximum number of records that GetRecords can return. Consider your average record size when determining this limit.

    The size of the data returned by GetRecords varies depending on the utilization of the shard. The maximum size of data that GetRecords can return is 10 MB. If a call returns this amount of data, subsequent calls made within the next 5 seconds throw ProvisionedThroughputExceededException. If there is insufficient provisioned throughput on the shard, subsequent calls made within the next 1 second throw ProvisionedThroughputExceededException. Note that GetRecords won't return any data when it throws an exception. For this reason, we recommend that you wait one second between calls to GetRecords; however, it's possible that the application will get exceptions for longer than 1 second.

    To detect whether the application is falling behind in processing, you can use the MillisBehindLatest response attribute. You can also monitor the stream using CloudWatch metrics and other mechanisms (see Monitoring in the Amazon Kinesis Streams Developer Guide).

    Each Amazon Kinesis record includes a value, ApproximateArrivalTimestamp, that is set when a stream successfully receives and stores a record. This is commonly referred to as a server-side timestamp, whereas a client-side timestamp is set when a data producer creates or sends the record to a stream (a data producer is any data source putting data records into a stream, for example with PutRecords). The timestamp has millisecond precision. There are no guarantees about the timestamp accuracy, or that the timestamp is always increasing. For example, records in a shard or across a stream might have timestamps that are out of order.

    ", + "GetShardIterator": "

    Gets an Amazon Kinesis shard iterator. A shard iterator expires five minutes after it is returned to the requester.

    A shard iterator specifies the shard position from which to start reading data records sequentially. The position is specified using the sequence number of a data record in a shard. A sequence number is the identifier associated with every record ingested in the stream, and is assigned when a record is put into the stream. Each stream has one or more shards.

    You must specify the shard iterator type. For example, you can set the ShardIteratorType parameter to read exactly from the position denoted by a specific sequence number by using the AT_SEQUENCE_NUMBER shard iterator type, or right after the sequence number by using the AFTER_SEQUENCE_NUMBER shard iterator type, using sequence numbers returned by earlier calls to PutRecord, PutRecords, GetRecords, or DescribeStream. In the request, you can specify the shard iterator type AT_TIMESTAMP to read records from an arbitrary point in time, TRIM_HORIZON to cause ShardIterator to point to the last untrimmed record in the shard in the system (the oldest data record in the shard), or LATEST so that you always read the most recent data in the shard.

    When you read repeatedly from a stream, use a GetShardIterator request to get the first shard iterator for use in your first GetRecords request and for subsequent reads use the shard iterator returned by the GetRecords request in NextShardIterator. A new shard iterator is returned by every GetRecords request in NextShardIterator, which you use in the ShardIterator parameter of the next GetRecords request.

    If a GetShardIterator request is made too often, you receive a ProvisionedThroughputExceededException. For more information about throughput limits, see GetRecords, and Streams Limits in the Amazon Kinesis Streams Developer Guide.

    If the shard is closed, GetShardIterator returns a valid iterator for the last sequence number of the shard. Note that a shard can be closed as a result of using SplitShard or MergeShards.

    GetShardIterator has a limit of 5 transactions per second per account per open shard.

    ", + "IncreaseStreamRetentionPeriod": "

    Increases the Amazon Kinesis stream's retention period, which is the length of time data records are accessible after they are added to the stream. The maximum value of a stream's retention period is 168 hours (7 days).

    Upon choosing a longer stream retention period, this operation will increase the time period records are accessible that have not yet expired. However, it will not make previous data that has expired (older than the stream's previous retention period) accessible after the operation has been called. For example, if a stream's retention period is set to 24 hours and is increased to 168 hours, any data that is older than 24 hours will remain inaccessible to consumer applications.

    ", + "ListStreams": "

    Lists your Amazon Kinesis streams.

    The number of streams may be too large to return from a single call to ListStreams. You can limit the number of returned streams using the Limit parameter. If you do not specify a value for the Limit parameter, Amazon Kinesis uses the default limit, which is currently 10.

    You can detect if there are more streams available to list by using the HasMoreStreams flag from the returned output. If there are more streams available, you can request more streams by using the name of the last stream returned by the ListStreams request in the ExclusiveStartStreamName parameter in a subsequent request to ListStreams. The group of stream names returned by the subsequent request is then added to the list. You can continue this process until all the stream names have been collected in the list.

    ListStreams has a limit of 5 transactions per second per account.

    ", + "ListTagsForStream": "

    Lists the tags for the specified Amazon Kinesis stream.

    ", + "MergeShards": "

    Merges two adjacent shards in an Amazon Kinesis stream and combines them into a single shard to reduce the stream's capacity to ingest and transport data. Two shards are considered adjacent if the union of the hash key ranges for the two shards form a contiguous set with no gaps. For example, if you have two shards, one with a hash key range of 276...381 and the other with a hash key range of 382...454, then you could merge these two shards into a single shard that would have a hash key range of 276...454. After the merge, the single child shard receives data for all hash key values covered by the two parent shards.

    MergeShards is called when there is a need to reduce the overall capacity of a stream because of excess capacity that is not being used. You must specify the shard to be merged and the adjacent shard for a stream. For more information about merging shards, see Merge Two Shards in the Amazon Kinesis Streams Developer Guide.

    If the stream is in the ACTIVE state, you can call MergeShards. If a stream is in the CREATING, UPDATING, or DELETING state, MergeShards returns a ResourceInUseException. If the specified stream does not exist, MergeShards returns a ResourceNotFoundException.

    You can use DescribeStream to check the state of the stream, which is returned in StreamStatus.

    MergeShards is an asynchronous operation. Upon receiving a MergeShards request, Amazon Kinesis immediately returns a response and sets the StreamStatus to UPDATING. After the operation is completed, Amazon Kinesis sets the StreamStatus to ACTIVE. Read and write operations continue to work while the stream is in the UPDATING state.

    You use DescribeStream to determine the shard IDs that are specified in the MergeShards request.

    If you try to operate on too many streams in parallel using CreateStream, DeleteStream, MergeShards or SplitShard, you will receive a LimitExceededException.

    MergeShards has limit of 5 transactions per second per account.

    ", + "PutRecord": "

    Writes a single data record into an Amazon Kinesis stream. Call PutRecord to send data into the stream for real-time ingestion and subsequent processing, one record at a time. Each shard can support writes up to 1,000 records per second, up to a maximum data write total of 1 MB per second.

    You must specify the name of the stream that captures, stores, and transports the data; a partition key; and the data blob itself.

    The data blob can be any type of data; for example, a segment from a log file, geographic/location data, website clickstream data, and so on.

    The partition key is used by Amazon Kinesis to distribute data across shards. Amazon Kinesis segregates the data records that belong to a stream into multiple shards, using the partition key associated with each data record to determine which shard a given data record belongs to.

    Partition keys are Unicode strings, with a maximum length limit of 256 characters for each key. An MD5 hash function is used to map partition keys to 128-bit integer values and to map associated data records to shards using the hash key ranges of the shards. You can override hashing the partition key to determine the shard by explicitly specifying a hash value using the ExplicitHashKey parameter. For more information, see Adding Data to a Stream in the Amazon Kinesis Streams Developer Guide.

    PutRecord returns the shard ID of where the data record was placed and the sequence number that was assigned to the data record.

    Sequence numbers increase over time and are specific to a shard within a stream, not across all shards within a stream. To guarantee strictly increasing ordering, write serially to a shard and use the SequenceNumberForOrdering parameter. For more information, see Adding Data to a Stream in the Amazon Kinesis Streams Developer Guide.

    If a PutRecord request cannot be processed because of insufficient provisioned throughput on the shard involved in the request, PutRecord throws ProvisionedThroughputExceededException.

    Data records are accessible for only 24 hours from the time that they are added to a stream.

    ", + "PutRecords": "

    Writes multiple data records into an Amazon Kinesis stream in a single call (also referred to as a PutRecords request). Use this operation to send data into the stream for data ingestion and processing.

    Each PutRecords request can support up to 500 records. Each record in the request can be as large as 1 MB, up to a limit of 5 MB for the entire request, including partition keys. Each shard can support writes up to 1,000 records per second, up to a maximum data write total of 1 MB per second.

    You must specify the name of the stream that captures, stores, and transports the data; and an array of request Records, with each record in the array requiring a partition key and data blob. The record size limit applies to the total size of the partition key and data blob.

    The data blob can be any type of data; for example, a segment from a log file, geographic/location data, website clickstream data, and so on.

    The partition key is used by Amazon Kinesis as input to a hash function that maps the partition key and associated data to a specific shard. An MD5 hash function is used to map partition keys to 128-bit integer values and to map associated data records to shards. As a result of this hashing mechanism, all data records with the same partition key map to the same shard within the stream. For more information, see Adding Data to a Stream in the Amazon Kinesis Streams Developer Guide.

    Each record in the Records array may include an optional parameter, ExplicitHashKey, which overrides the partition key to shard mapping. This parameter allows a data producer to determine explicitly the shard where the record is stored. For more information, see Adding Multiple Records with PutRecords in the Amazon Kinesis Streams Developer Guide.

    The PutRecords response includes an array of response Records. Each record in the response array directly correlates with a record in the request array using natural ordering, from the top to the bottom of the request and response. The response Records array always includes the same number of records as the request array.

    The response Records array includes both successfully and unsuccessfully processed records. Amazon Kinesis attempts to process all records in each PutRecords request. A single record failure does not stop the processing of subsequent records.

    A successfully-processed record includes ShardId and SequenceNumber values. The ShardId parameter identifies the shard in the stream where the record is stored. The SequenceNumber parameter is an identifier assigned to the put record, unique to all records in the stream.

    An unsuccessfully-processed record includes ErrorCode and ErrorMessage values. ErrorCode reflects the type of error and can be one of the following values: ProvisionedThroughputExceededException or InternalFailure. ErrorMessage provides more detailed information about the ProvisionedThroughputExceededException exception including the account ID, stream name, and shard ID of the record that was throttled. For more information about partially successful responses, see Adding Multiple Records with PutRecords in the Amazon Kinesis Streams Developer Guide.

    By default, data records are accessible for only 24 hours from the time that they are added to an Amazon Kinesis stream. This retention period can be modified using the DecreaseStreamRetentionPeriod and IncreaseStreamRetentionPeriod operations.

    ", + "RemoveTagsFromStream": "

    Removes tags from the specified Amazon Kinesis stream. Removed tags are deleted and cannot be recovered after this operation successfully completes.

    If you specify a tag that does not exist, it is ignored.

    ", + "SplitShard": "

    Splits a shard into two new shards in the Amazon Kinesis stream to increase the stream's capacity to ingest and transport data. SplitShard is called when there is a need to increase the overall capacity of a stream because of an expected increase in the volume of data records being ingested.

    You can also use SplitShard when a shard appears to be approaching its maximum utilization; for example, the producers sending data into the specific shard are suddenly sending more than previously anticipated. You can also call SplitShard to increase stream capacity, so that more Amazon Kinesis applications can simultaneously read data from the stream for real-time processing.

    You must specify the shard to be split and the new hash key, which is the position in the shard where the shard gets split in two. In many cases, the new hash key might simply be the average of the beginning and ending hash key, but it can be any hash key value in the range being mapped into the shard. For more information about splitting shards, see Split a Shard in the Amazon Kinesis Streams Developer Guide.

    You can use DescribeStream to determine the shard ID and hash key values for the ShardToSplit and NewStartingHashKey parameters that are specified in the SplitShard request.

    SplitShard is an asynchronous operation. Upon receiving a SplitShard request, Amazon Kinesis immediately returns a response and sets the stream status to UPDATING. After the operation is completed, Amazon Kinesis sets the stream status to ACTIVE. Read and write operations continue to work while the stream is in the UPDATING state.

    You can use DescribeStream to check the status of the stream, which is returned in StreamStatus. If the stream is in the ACTIVE state, you can call SplitShard. If a stream is in CREATING or UPDATING or DELETING states, DescribeStream returns a ResourceInUseException.

    If the specified stream does not exist, DescribeStream returns a ResourceNotFoundException. If you try to create more shards than are authorized for your account, you receive a LimitExceededException.

    For the default shard limit for an AWS account, see Streams Limits in the Amazon Kinesis Streams Developer Guide. If you need to increase this limit, contact AWS Support.

    If you try to operate on too many streams simultaneously using CreateStream, DeleteStream, MergeShards, and/or SplitShard, you receive a LimitExceededException.

    SplitShard has limit of 5 transactions per second per account.

    " + }, + "shapes": { + "AddTagsToStreamInput": { + "base": "

    Represents the input for AddTagsToStream.

    ", + "refs": { + } + }, + "ApproximateArrivalTimestamp": { + "base": null, + "refs": { + "Record$ApproximateArrivalTimestamp": "

    The approximate time that the record was inserted into the stream.

    " + } + }, + "BooleanObject": { + "base": null, + "refs": { + "ListStreamsOutput$HasMoreStreams": "

    If set to true, there are more streams available to list.

    ", + "ListTagsForStreamOutput$HasMoreTags": "

    If set to true, more tags are available. To request additional tags, set ExclusiveStartTagKey to the key of the last tag returned.

    ", + "StreamDescription$HasMoreShards": "

    If set to true, more shards in the stream are available to describe.

    " + } + }, + "CreateStreamInput": { + "base": "

    Represents the input for CreateStream.

    ", + "refs": { + } + }, + "Data": { + "base": null, + "refs": { + "PutRecordInput$Data": "

    The data blob to put into the record, which is base64-encoded when the blob is serialized. When the data blob (the payload before base64-encoding) is added to the partition key size, the total size must not exceed the maximum record size (1 MB).

    ", + "PutRecordsRequestEntry$Data": "

    The data blob to put into the record, which is base64-encoded when the blob is serialized. When the data blob (the payload before base64-encoding) is added to the partition key size, the total size must not exceed the maximum record size (1 MB).

    ", + "Record$Data": "

    The data blob. The data in the blob is both opaque and immutable to the Amazon Kinesis service, which does not inspect, interpret, or change the data in the blob in any way. When the data blob (the payload before base64-encoding) is added to the partition key size, the total size must not exceed the maximum record size (1 MB).

    " + } + }, + "DecreaseStreamRetentionPeriodInput": { + "base": "

    Represents the input for DecreaseStreamRetentionPeriod.

    ", + "refs": { + } + }, + "DeleteStreamInput": { + "base": "

    Represents the input for DeleteStream.

    ", + "refs": { + } + }, + "DescribeStreamInput": { + "base": "

    Represents the input for DescribeStream.

    ", + "refs": { + } + }, + "DescribeStreamInputLimit": { + "base": null, + "refs": { + "DescribeStreamInput$Limit": "

    The maximum number of shards to return.

    " + } + }, + "DescribeStreamOutput": { + "base": "

    Represents the output for DescribeStream.

    ", + "refs": { + } + }, + "DisableEnhancedMonitoringInput": { + "base": "

    Represents the input for DisableEnhancedMonitoring.

    ", + "refs": { + } + }, + "EnableEnhancedMonitoringInput": { + "base": "

    Represents the input for EnableEnhancedMonitoring.

    ", + "refs": { + } + }, + "EnhancedMetrics": { + "base": "

    Represents enhanced metrics types.

    ", + "refs": { + "EnhancedMonitoringList$member": null + } + }, + "EnhancedMonitoringList": { + "base": null, + "refs": { + "StreamDescription$EnhancedMonitoring": "

    Represents the current enhanced monitoring settings of the stream.

    " + } + }, + "EnhancedMonitoringOutput": { + "base": "

    Represents the output for EnableEnhancedMonitoring and DisableEnhancedMonitoring.

    ", + "refs": { + } + }, + "ErrorCode": { + "base": null, + "refs": { + "PutRecordsResultEntry$ErrorCode": "

    The error code for an individual record result. ErrorCodes can be either ProvisionedThroughputExceededException or InternalFailure.

    " + } + }, + "ErrorMessage": { + "base": null, + "refs": { + "ExpiredIteratorException$message": "

    A message that provides information about the error.

    ", + "InvalidArgumentException$message": "

    A message that provides information about the error.

    ", + "LimitExceededException$message": "

    A message that provides information about the error.

    ", + "ProvisionedThroughputExceededException$message": "

    A message that provides information about the error.

    ", + "PutRecordsResultEntry$ErrorMessage": "

    The error message for an individual record result. An ErrorCode value of ProvisionedThroughputExceededException has an error message that includes the account ID, stream name, and shard ID. An ErrorCode value of InternalFailure has the error message \"Internal Service Failure\".

    ", + "ResourceInUseException$message": "

    A message that provides information about the error.

    ", + "ResourceNotFoundException$message": "

    A message that provides information about the error.

    " + } + }, + "ExpiredIteratorException": { + "base": "

    The provided iterator exceeds the maximum age allowed.

    ", + "refs": { + } + }, + "GetRecordsInput": { + "base": "

    Represents the input for GetRecords.

    ", + "refs": { + } + }, + "GetRecordsInputLimit": { + "base": null, + "refs": { + "GetRecordsInput$Limit": "

    The maximum number of records to return. Specify a value of up to 10,000. If you specify a value that is greater than 10,000, GetRecords throws InvalidArgumentException.

    " + } + }, + "GetRecordsOutput": { + "base": "

    Represents the output for GetRecords.

    ", + "refs": { + } + }, + "GetShardIteratorInput": { + "base": "

    Represents the input for GetShardIterator.

    ", + "refs": { + } + }, + "GetShardIteratorOutput": { + "base": "

    Represents the output for GetShardIterator.

    ", + "refs": { + } + }, + "HashKey": { + "base": null, + "refs": { + "HashKeyRange$StartingHashKey": "

    The starting hash key of the hash key range.

    ", + "HashKeyRange$EndingHashKey": "

    The ending hash key of the hash key range.

    ", + "PutRecordInput$ExplicitHashKey": "

    The hash value used to explicitly determine the shard the data record is assigned to by overriding the partition key hash.

    ", + "PutRecordsRequestEntry$ExplicitHashKey": "

    The hash value used to determine explicitly the shard that the data record is assigned to by overriding the partition key hash.

    ", + "SplitShardInput$NewStartingHashKey": "

    A hash key value for the starting hash key of one of the child shards created by the split. The hash key range for a given shard constitutes a set of ordered contiguous positive integers. The value for NewStartingHashKey must be in the range of hash keys being mapped into the shard. The NewStartingHashKey hash key value and all higher hash key values in hash key range are distributed to one of the child shards. All the lower hash key values in the range are distributed to the other child shard.

    " + } + }, + "HashKeyRange": { + "base": "

    The range of possible hash key values for the shard, which is a set of ordered contiguous positive integers.

    ", + "refs": { + "Shard$HashKeyRange": "

    The range of possible hash key values for the shard, which is a set of ordered contiguous positive integers.

    " + } + }, + "IncreaseStreamRetentionPeriodInput": { + "base": "

    Represents the input for IncreaseStreamRetentionPeriod.

    ", + "refs": { + } + }, + "InvalidArgumentException": { + "base": "

    A specified parameter exceeds its restrictions, is not supported, or can't be used. For more information, see the returned message.

    ", + "refs": { + } + }, + "LimitExceededException": { + "base": "

    The requested resource exceeds the maximum number allowed, or the number of concurrent stream requests exceeds the maximum number allowed (5).

    ", + "refs": { + } + }, + "ListStreamsInput": { + "base": "

    Represents the input for ListStreams.

    ", + "refs": { + } + }, + "ListStreamsInputLimit": { + "base": null, + "refs": { + "ListStreamsInput$Limit": "

    The maximum number of streams to list.

    " + } + }, + "ListStreamsOutput": { + "base": "

    Represents the output for ListStreams.

    ", + "refs": { + } + }, + "ListTagsForStreamInput": { + "base": "

    Represents the input for ListTagsForStream.

    ", + "refs": { + } + }, + "ListTagsForStreamInputLimit": { + "base": null, + "refs": { + "ListTagsForStreamInput$Limit": "

    The number of tags to return. If this number is less than the total number of tags associated with the stream, HasMoreTags is set to true. To list additional tags, set ExclusiveStartTagKey to the last key in the response.

    " + } + }, + "ListTagsForStreamOutput": { + "base": "

    Represents the output for ListTagsForStream.

    ", + "refs": { + } + }, + "MergeShardsInput": { + "base": "

    Represents the input for MergeShards.

    ", + "refs": { + } + }, + "MetricsName": { + "base": null, + "refs": { + "MetricsNameList$member": null + } + }, + "MetricsNameList": { + "base": null, + "refs": { + "DisableEnhancedMonitoringInput$ShardLevelMetrics": "

    List of shard-level metrics to disable.

    The following are the valid shard-level metrics. The value \"ALL\" disables every metric.

    • IncomingBytes
    • IncomingRecords
    • OutgoingBytes
    • OutgoingRecords
    • WriteProvisionedThroughputExceeded
    • ReadProvisionedThroughputExceeded
    • IteratorAgeMilliseconds
    • ALL

    For more information, see Monitoring the Amazon Kinesis Streams Service with Amazon CloudWatch in the Amazon Kinesis Streams Developer Guide.

    ", + "EnableEnhancedMonitoringInput$ShardLevelMetrics": "

    List of shard-level metrics to enable.

    The following are the valid shard-level metrics. The value \"ALL\" enables every metric.

    • IncomingBytes
    • IncomingRecords
    • OutgoingBytes
    • OutgoingRecords
    • WriteProvisionedThroughputExceeded
    • ReadProvisionedThroughputExceeded
    • IteratorAgeMilliseconds
    • ALL

    For more information, see Monitoring the Amazon Kinesis Streams Service with Amazon CloudWatch in the Amazon Kinesis Streams Developer Guide.

    ", + "EnhancedMetrics$ShardLevelMetrics": "

    List of shard-level metrics.

    The following are the valid shard-level metrics. The value \"ALL\" enhances every metric.

    • IncomingBytes
    • IncomingRecords
    • OutgoingBytes
    • OutgoingRecords
    • WriteProvisionedThroughputExceeded
    • ReadProvisionedThroughputExceeded
    • IteratorAgeMilliseconds
    • ALL

    For more information, see Monitoring the Amazon Kinesis Streams Service with Amazon CloudWatch in the Amazon Kinesis Streams Developer Guide.

    ", + "EnhancedMonitoringOutput$CurrentShardLevelMetrics": "

    Represents the current state of the metrics that are in the enhanced state before the operation.

    ", + "EnhancedMonitoringOutput$DesiredShardLevelMetrics": "

    Represents the list of all the metrics that would be in the enhanced state after the operation.

    " + } + }, + "MillisBehindLatest": { + "base": null, + "refs": { + "GetRecordsOutput$MillisBehindLatest": "

    The number of milliseconds the GetRecords response is from the tip of the stream, indicating how far behind current time the consumer is. A value of zero indicates record processing is caught up, and there are no new records to process at this moment.

    " + } + }, + "PartitionKey": { + "base": null, + "refs": { + "PutRecordInput$PartitionKey": "

    Determines which shard in the stream the data record is assigned to. Partition keys are Unicode strings with a maximum length limit of 256 characters for each key. Amazon Kinesis uses the partition key as input to a hash function that maps the partition key and associated data to a specific shard. Specifically, an MD5 hash function is used to map partition keys to 128-bit integer values and to map associated data records to shards. As a result of this hashing mechanism, all data records with the same partition key map to the same shard within the stream.

    ", + "PutRecordsRequestEntry$PartitionKey": "

    Determines which shard in the stream the data record is assigned to. Partition keys are Unicode strings with a maximum length limit of 256 characters for each key. Amazon Kinesis uses the partition key as input to a hash function that maps the partition key and associated data to a specific shard. Specifically, an MD5 hash function is used to map partition keys to 128-bit integer values and to map associated data records to shards. As a result of this hashing mechanism, all data records with the same partition key map to the same shard within the stream.

    ", + "Record$PartitionKey": "

    Identifies which shard in the stream the data record is assigned to.

    " + } + }, + "PositiveIntegerObject": { + "base": null, + "refs": { + "CreateStreamInput$ShardCount": "

    The number of shards that the stream will use. The throughput of the stream is a function of the number of shards; more shards are required for greater provisioned throughput.

    DefaultShardLimit;

    ", + "PutRecordsOutput$FailedRecordCount": "

    The number of unsuccessfully processed records in a PutRecords request.

    " + } + }, + "ProvisionedThroughputExceededException": { + "base": "

    The request rate for the stream is too high, or the requested data is too large for the available throughput. Reduce the frequency or size of your requests. For more information, see Streams Limits in the Amazon Kinesis Streams Developer Guide, and Error Retries and Exponential Backoff in AWS in the AWS General Reference.

    ", + "refs": { + } + }, + "PutRecordInput": { + "base": "

    Represents the input for PutRecord.

    ", + "refs": { + } + }, + "PutRecordOutput": { + "base": "

    Represents the output for PutRecord.

    ", + "refs": { + } + }, + "PutRecordsInput": { + "base": "

    A PutRecords request.

    ", + "refs": { + } + }, + "PutRecordsOutput": { + "base": "

    PutRecords results.

    ", + "refs": { + } + }, + "PutRecordsRequestEntry": { + "base": "

    Represents the output for PutRecords.

    ", + "refs": { + "PutRecordsRequestEntryList$member": null + } + }, + "PutRecordsRequestEntryList": { + "base": null, + "refs": { + "PutRecordsInput$Records": "

    The records associated with the request.

    " + } + }, + "PutRecordsResultEntry": { + "base": "

    Represents the result of an individual record from a PutRecords request. A record that is successfully added to a stream includes SequenceNumber and ShardId in the result. A record that fails to be added to the stream includes ErrorCode and ErrorMessage in the result.

    ", + "refs": { + "PutRecordsResultEntryList$member": null + } + }, + "PutRecordsResultEntryList": { + "base": null, + "refs": { + "PutRecordsOutput$Records": "

    An array of successfully and unsuccessfully processed record results, correlated with the request by natural ordering. A record that is successfully added to a stream includes SequenceNumber and ShardId in the result. A record that fails to be added to a stream includes ErrorCode and ErrorMessage in the result.

    " + } + }, + "Record": { + "base": "

    The unit of data of the Amazon Kinesis stream, which is composed of a sequence number, a partition key, and a data blob.

    ", + "refs": { + "RecordList$member": null + } + }, + "RecordList": { + "base": null, + "refs": { + "GetRecordsOutput$Records": "

    The data records retrieved from the shard.

    " + } + }, + "RemoveTagsFromStreamInput": { + "base": "

    Represents the input for RemoveTagsFromStream.

    ", + "refs": { + } + }, + "ResourceInUseException": { + "base": "

    The resource is not available for this operation. For successful operation, the resource needs to be in the ACTIVE state.

    ", + "refs": { + } + }, + "ResourceNotFoundException": { + "base": "

    The requested resource could not be found. The stream might not be specified correctly, or it might not be in the ACTIVE state if the operation requires it.

    ", + "refs": { + } + }, + "RetentionPeriodHours": { + "base": null, + "refs": { + "DecreaseStreamRetentionPeriodInput$RetentionPeriodHours": "

    The new retention period of the stream, in hours. Must be less than the current retention period.

    ", + "IncreaseStreamRetentionPeriodInput$RetentionPeriodHours": "

    The new retention period of the stream, in hours. Must be more than the current retention period.

    ", + "StreamDescription$RetentionPeriodHours": "

    The current retention period, in hours.

    " + } + }, + "SequenceNumber": { + "base": null, + "refs": { + "GetShardIteratorInput$StartingSequenceNumber": "

    The sequence number of the data record in the shard from which to start reading. Used with shard iterator type AT_SEQUENCE_NUMBER and AFTER_SEQUENCE_NUMBER.

    ", + "PutRecordInput$SequenceNumberForOrdering": "

    Guarantees strictly increasing sequence numbers, for puts from the same client and to the same partition key. Usage: set the SequenceNumberForOrdering of record n to the sequence number of record n-1 (as returned in the result when putting record n-1). If this parameter is not set, records will be coarsely ordered based on arrival time.

    ", + "PutRecordOutput$SequenceNumber": "

    The sequence number identifier that was assigned to the put data record. The sequence number for the record is unique across all records in the stream. A sequence number is the identifier associated with every record put into the stream.

    ", + "PutRecordsResultEntry$SequenceNumber": "

    The sequence number for an individual record result.

    ", + "Record$SequenceNumber": "

    The unique identifier of the record in the stream.

    ", + "SequenceNumberRange$StartingSequenceNumber": "

    The starting sequence number for the range.

    ", + "SequenceNumberRange$EndingSequenceNumber": "

    The ending sequence number for the range. Shards that are in the OPEN state have an ending sequence number of null.

    " + } + }, + "SequenceNumberRange": { + "base": "

    The range of possible sequence numbers for the shard.

    ", + "refs": { + "Shard$SequenceNumberRange": "

    The range of possible sequence numbers for the shard.

    " + } + }, + "Shard": { + "base": "

    A uniquely identified group of data records in an Amazon Kinesis stream.

    ", + "refs": { + "ShardList$member": null + } + }, + "ShardId": { + "base": null, + "refs": { + "DescribeStreamInput$ExclusiveStartShardId": "

    The shard ID of the shard to start with.

    ", + "GetShardIteratorInput$ShardId": "

    The shard ID of the Amazon Kinesis shard to get the iterator for.

    ", + "MergeShardsInput$ShardToMerge": "

    The shard ID of the shard to combine with the adjacent shard for the merge.

    ", + "MergeShardsInput$AdjacentShardToMerge": "

    The shard ID of the adjacent shard for the merge.

    ", + "PutRecordOutput$ShardId": "

    The shard ID of the shard where the data record was placed.

    ", + "PutRecordsResultEntry$ShardId": "

    The shard ID for an individual record result.

    ", + "Shard$ShardId": "

    The unique identifier of the shard within the stream.

    ", + "Shard$ParentShardId": "

    The shard ID of the shard's parent.

    ", + "Shard$AdjacentParentShardId": "

    The shard ID of the shard adjacent to the shard's parent.

    ", + "SplitShardInput$ShardToSplit": "

    The shard ID of the shard to split.

    " + } + }, + "ShardIterator": { + "base": null, + "refs": { + "GetRecordsInput$ShardIterator": "

    The position in the shard from which you want to start sequentially reading data records. A shard iterator specifies this position using the sequence number of a data record in the shard.

    ", + "GetRecordsOutput$NextShardIterator": "

    The next position in the shard from which to start sequentially reading data records. If set to null, the shard has been closed and the requested iterator will not return any more data.

    ", + "GetShardIteratorOutput$ShardIterator": "

    The position in the shard from which to start reading data records sequentially. A shard iterator specifies this position using the sequence number of a data record in a shard.

    " + } + }, + "ShardIteratorType": { + "base": null, + "refs": { + "GetShardIteratorInput$ShardIteratorType": "

    Determines how the shard iterator is used to start reading data records from the shard.

    The following are the valid Amazon Kinesis shard iterator types:

    • AT_SEQUENCE_NUMBER - Start reading from the position denoted by a specific sequence number, provided in the value StartingSequenceNumber.
    • AFTER_SEQUENCE_NUMBER - Start reading right after the position denoted by a specific sequence number, provided in the value StartingSequenceNumber.
    • AT_TIMESTAMP - Start reading from the position denoted by a specific timestamp, provided in the value Timestamp.
    • TRIM_HORIZON - Start reading at the last untrimmed record in the shard in the system, which is the oldest data record in the shard.
    • LATEST - Start reading just after the most recent record in the shard, so that you always read the most recent data in the shard.
    " + } + }, + "ShardList": { + "base": null, + "refs": { + "StreamDescription$Shards": "

    The shards that comprise the stream.

    " + } + }, + "SplitShardInput": { + "base": "

    Represents the input for SplitShard.

    ", + "refs": { + } + }, + "StreamARN": { + "base": null, + "refs": { + "StreamDescription$StreamARN": "

    The Amazon Resource Name (ARN) for the stream being described.

    " + } + }, + "StreamDescription": { + "base": "

    Represents the output for DescribeStream.

    ", + "refs": { + "DescribeStreamOutput$StreamDescription": "

    The current status of the stream, the stream ARN, an array of shard objects that comprise the stream, and states whether there are more shards available.

    " + } + }, + "StreamName": { + "base": null, + "refs": { + "AddTagsToStreamInput$StreamName": "

    The name of the stream.

    ", + "CreateStreamInput$StreamName": "

    A name to identify the stream. The stream name is scoped to the AWS account used by the application that creates the stream. It is also scoped by region. That is, two streams in two different AWS accounts can have the same name, and two streams in the same AWS account but in two different regions can have the same name.

    ", + "DecreaseStreamRetentionPeriodInput$StreamName": "

    The name of the stream to modify.

    ", + "DeleteStreamInput$StreamName": "

    The name of the stream to delete.

    ", + "DescribeStreamInput$StreamName": "

    The name of the stream to describe.

    ", + "DisableEnhancedMonitoringInput$StreamName": "

    The name of the Amazon Kinesis stream for which to disable enhanced monitoring.

    ", + "EnableEnhancedMonitoringInput$StreamName": "

    The name of the stream for which to enable enhanced monitoring.

    ", + "EnhancedMonitoringOutput$StreamName": "

    The name of the Amazon Kinesis stream.

    ", + "GetShardIteratorInput$StreamName": "

    The name of the Amazon Kinesis stream.

    ", + "IncreaseStreamRetentionPeriodInput$StreamName": "

    The name of the stream to modify.

    ", + "ListStreamsInput$ExclusiveStartStreamName": "

    The name of the stream to start the list with.

    ", + "ListTagsForStreamInput$StreamName": "

    The name of the stream.

    ", + "MergeShardsInput$StreamName": "

    The name of the stream for the merge.

    ", + "PutRecordInput$StreamName": "

    The name of the stream to put the data record into.

    ", + "PutRecordsInput$StreamName": "

    The stream name associated with the request.

    ", + "RemoveTagsFromStreamInput$StreamName": "

    The name of the stream.

    ", + "SplitShardInput$StreamName": "

    The name of the stream for the shard split.

    ", + "StreamDescription$StreamName": "

    The name of the stream being described.

    ", + "StreamNameList$member": null + } + }, + "StreamNameList": { + "base": null, + "refs": { + "ListStreamsOutput$StreamNames": "

    The names of the streams that are associated with the AWS account making the ListStreams request.

    " + } + }, + "StreamStatus": { + "base": null, + "refs": { + "StreamDescription$StreamStatus": "

    The current status of the stream being described. The stream status is one of the following states:

    • CREATING - The stream is being created. Amazon Kinesis immediately returns and sets StreamStatus to CREATING.
    • DELETING - The stream is being deleted. The specified stream is in the DELETING state until Amazon Kinesis completes the deletion.
    • ACTIVE - The stream exists and is ready for read and write operations or deletion. You should perform read and write operations only on an ACTIVE stream.
    • UPDATING - Shards in the stream are being merged or split. Read and write operations continue to work while the stream is in the UPDATING state.
    " + } + }, + "Tag": { + "base": "

    Metadata assigned to the stream, consisting of a key-value pair.

    ", + "refs": { + "TagList$member": null + } + }, + "TagKey": { + "base": null, + "refs": { + "ListTagsForStreamInput$ExclusiveStartTagKey": "

    The key to use as the starting point for the list of tags. If this parameter is set, ListTagsForStream gets all tags that occur after ExclusiveStartTagKey.

    ", + "Tag$Key": "

    A unique identifier for the tag. Maximum length: 128 characters. Valid characters: Unicode letters, digits, white space, _ . / = + - % @

    ", + "TagKeyList$member": null, + "TagMap$key": null + } + }, + "TagKeyList": { + "base": null, + "refs": { + "RemoveTagsFromStreamInput$TagKeys": "

    A list of tag keys. Each corresponding tag is removed from the stream.

    " + } + }, + "TagList": { + "base": null, + "refs": { + "ListTagsForStreamOutput$Tags": "

    A list of tags associated with StreamName, starting with the first tag after ExclusiveStartTagKey and up to the specified Limit.

    " + } + }, + "TagMap": { + "base": null, + "refs": { + "AddTagsToStreamInput$Tags": "

    The set of key-value pairs to use to create the tags.

    " + } + }, + "TagValue": { + "base": null, + "refs": { + "Tag$Value": "

    An optional string, typically used to describe or define the tag. Maximum length: 256 characters. Valid characters: Unicode letters, digits, white space, _ . / = + - % @

    ", + "TagMap$value": null + } + }, + "Timestamp": { + "base": null, + "refs": { + "GetShardIteratorInput$Timestamp": "

    The timestamp of the data record from which to start reading. Used with shard iterator type AT_TIMESTAMP. A timestamp is the Unix epoch date with precision in milliseconds. For example, 2016-04-04T19:58:46.480-00:00 or 1459799926.480. If a record with this exact timestamp does not exist, the iterator returned is for the next (later) record. If the timestamp is older than the current trim horizon, the iterator returned is for the oldest untrimmed data record (TRIM_HORIZON).

    " + } + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/kinesis/2013-12-02/examples-1.json b/vendor/github.com/aws/aws-sdk-go/models/apis/kinesis/2013-12-02/examples-1.json new file mode 100644 index 000000000..0ea7e3b0b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/kinesis/2013-12-02/examples-1.json @@ -0,0 +1,5 @@ +{ + "version": "1.0", + "examples": { + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/kinesis/2013-12-02/paginators-1.json b/vendor/github.com/aws/aws-sdk-go/models/apis/kinesis/2013-12-02/paginators-1.json new file mode 100644 index 000000000..007f563b4 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/kinesis/2013-12-02/paginators-1.json @@ -0,0 +1,18 @@ +{ + "pagination": { + "DescribeStream": { + "input_token": "ExclusiveStartShardId", + "limit_key": "Limit", + "more_results": "StreamDescription.HasMoreShards", + "output_token": "StreamDescription.Shards[-1].ShardId", + "result_key": "StreamDescription.Shards" + }, + "ListStreams": { + "input_token": "ExclusiveStartStreamName", + "limit_key": "Limit", + "more_results": "HasMoreStreams", + "output_token": "StreamNames[-1]", + "result_key": "StreamNames" + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/kinesis/2013-12-02/waiters-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/kinesis/2013-12-02/waiters-2.json new file mode 100644 index 000000000..8e3162ffa --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/kinesis/2013-12-02/waiters-2.json @@ -0,0 +1,18 @@ +{ + "version": 2, + "waiters": { + "StreamExists": { + "delay": 10, + "operation": "DescribeStream", + "maxAttempts": 18, + "acceptors": [ + { + "expected": "ACTIVE", + "matcher": "path", + "state": "success", + "argument": "StreamDescription.StreamStatus" + } + ] + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/kms/2014-11-01/api-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/kms/2014-11-01/api-2.json new file mode 100644 index 000000000..e956fc6b6 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/kms/2014-11-01/api-2.json @@ -0,0 +1,1209 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2014-11-01", + "endpointPrefix":"kms", + "jsonVersion":"1.1", + "protocol":"json", + "serviceAbbreviation":"KMS", + "serviceFullName":"AWS Key Management Service", + "signatureVersion":"v4", + "targetPrefix":"TrentService" + }, + "operations":{ + "CancelKeyDeletion":{ + "name":"CancelKeyDeletion", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CancelKeyDeletionRequest"}, + "output":{"shape":"CancelKeyDeletionResponse"}, + "errors":[ + {"shape":"NotFoundException"}, + {"shape":"InvalidArnException"}, + {"shape":"DependencyTimeoutException"}, + {"shape":"KMSInternalException"}, + {"shape":"KMSInvalidStateException"} + ] + }, + "CreateAlias":{ + "name":"CreateAlias", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateAliasRequest"}, + "errors":[ + {"shape":"DependencyTimeoutException"}, + {"shape":"AlreadyExistsException"}, + {"shape":"NotFoundException"}, + {"shape":"InvalidAliasNameException"}, + {"shape":"KMSInternalException"}, + {"shape":"LimitExceededException"}, + {"shape":"KMSInvalidStateException"} + ] + }, + "CreateGrant":{ + "name":"CreateGrant", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateGrantRequest"}, + "output":{"shape":"CreateGrantResponse"}, + "errors":[ + {"shape":"NotFoundException"}, + {"shape":"DisabledException"}, + {"shape":"DependencyTimeoutException"}, + {"shape":"InvalidArnException"}, + {"shape":"KMSInternalException"}, + {"shape":"InvalidGrantTokenException"}, + {"shape":"LimitExceededException"}, + {"shape":"KMSInvalidStateException"} + ] + }, + "CreateKey":{ + "name":"CreateKey", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateKeyRequest"}, + "output":{"shape":"CreateKeyResponse"}, + "errors":[ + {"shape":"MalformedPolicyDocumentException"}, + {"shape":"DependencyTimeoutException"}, + {"shape":"InvalidArnException"}, + {"shape":"UnsupportedOperationException"}, + {"shape":"KMSInternalException"}, + {"shape":"LimitExceededException"} + ] + }, + "Decrypt":{ + "name":"Decrypt", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DecryptRequest"}, + "output":{"shape":"DecryptResponse"}, + "errors":[ + {"shape":"NotFoundException"}, + {"shape":"DisabledException"}, + {"shape":"InvalidCiphertextException"}, + {"shape":"KeyUnavailableException"}, + {"shape":"DependencyTimeoutException"}, + {"shape":"InvalidGrantTokenException"}, + {"shape":"KMSInternalException"}, + {"shape":"KMSInvalidStateException"} + ] + }, + "DeleteAlias":{ + "name":"DeleteAlias", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteAliasRequest"}, + "errors":[ + {"shape":"DependencyTimeoutException"}, + {"shape":"NotFoundException"}, + {"shape":"KMSInternalException"}, + {"shape":"KMSInvalidStateException"} + ] + }, + "DescribeKey":{ + "name":"DescribeKey", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeKeyRequest"}, + "output":{"shape":"DescribeKeyResponse"}, + "errors":[ + {"shape":"NotFoundException"}, + {"shape":"InvalidArnException"}, + {"shape":"DependencyTimeoutException"}, + {"shape":"KMSInternalException"} + ] + }, + "DisableKey":{ + "name":"DisableKey", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DisableKeyRequest"}, + "errors":[ + {"shape":"NotFoundException"}, + {"shape":"InvalidArnException"}, + {"shape":"DependencyTimeoutException"}, + {"shape":"KMSInternalException"}, + {"shape":"KMSInvalidStateException"} + ] + }, + "DisableKeyRotation":{ + "name":"DisableKeyRotation", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DisableKeyRotationRequest"}, + "errors":[ + {"shape":"NotFoundException"}, + {"shape":"DisabledException"}, + {"shape":"InvalidArnException"}, + {"shape":"DependencyTimeoutException"}, + {"shape":"KMSInternalException"}, + {"shape":"KMSInvalidStateException"} + ] + }, + "EnableKey":{ + "name":"EnableKey", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"EnableKeyRequest"}, + "errors":[ + {"shape":"NotFoundException"}, + {"shape":"InvalidArnException"}, + {"shape":"DependencyTimeoutException"}, + {"shape":"KMSInternalException"}, + {"shape":"LimitExceededException"}, + {"shape":"KMSInvalidStateException"} + ] + }, + "EnableKeyRotation":{ + "name":"EnableKeyRotation", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"EnableKeyRotationRequest"}, + "errors":[ + {"shape":"NotFoundException"}, + {"shape":"DisabledException"}, + {"shape":"InvalidArnException"}, + {"shape":"DependencyTimeoutException"}, + {"shape":"KMSInternalException"}, + {"shape":"KMSInvalidStateException"} + ] + }, + "Encrypt":{ + "name":"Encrypt", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"EncryptRequest"}, + "output":{"shape":"EncryptResponse"}, + "errors":[ + {"shape":"NotFoundException"}, + {"shape":"DisabledException"}, + {"shape":"KeyUnavailableException"}, + {"shape":"DependencyTimeoutException"}, + {"shape":"InvalidKeyUsageException"}, + {"shape":"InvalidGrantTokenException"}, + {"shape":"KMSInternalException"}, + {"shape":"KMSInvalidStateException"} + ] + }, + "GenerateDataKey":{ + "name":"GenerateDataKey", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GenerateDataKeyRequest"}, + "output":{"shape":"GenerateDataKeyResponse"}, + "errors":[ + {"shape":"NotFoundException"}, + {"shape":"DisabledException"}, + {"shape":"KeyUnavailableException"}, + {"shape":"DependencyTimeoutException"}, + {"shape":"InvalidKeyUsageException"}, + {"shape":"InvalidGrantTokenException"}, + {"shape":"KMSInternalException"}, + {"shape":"KMSInvalidStateException"} + ] + }, + "GenerateDataKeyWithoutPlaintext":{ + "name":"GenerateDataKeyWithoutPlaintext", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GenerateDataKeyWithoutPlaintextRequest"}, + "output":{"shape":"GenerateDataKeyWithoutPlaintextResponse"}, + "errors":[ + {"shape":"NotFoundException"}, + {"shape":"DisabledException"}, + {"shape":"KeyUnavailableException"}, + {"shape":"DependencyTimeoutException"}, + {"shape":"InvalidKeyUsageException"}, + {"shape":"InvalidGrantTokenException"}, + {"shape":"KMSInternalException"}, + {"shape":"KMSInvalidStateException"} + ] + }, + "GenerateRandom":{ + "name":"GenerateRandom", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GenerateRandomRequest"}, + "output":{"shape":"GenerateRandomResponse"}, + "errors":[ + {"shape":"DependencyTimeoutException"}, + {"shape":"KMSInternalException"} + ] + }, + "GetKeyPolicy":{ + "name":"GetKeyPolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetKeyPolicyRequest"}, + "output":{"shape":"GetKeyPolicyResponse"}, + "errors":[ + {"shape":"NotFoundException"}, + {"shape":"InvalidArnException"}, + {"shape":"DependencyTimeoutException"}, + {"shape":"KMSInternalException"}, + {"shape":"KMSInvalidStateException"} + ] + }, + "GetKeyRotationStatus":{ + "name":"GetKeyRotationStatus", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetKeyRotationStatusRequest"}, + "output":{"shape":"GetKeyRotationStatusResponse"}, + "errors":[ + {"shape":"NotFoundException"}, + {"shape":"InvalidArnException"}, + {"shape":"DependencyTimeoutException"}, + {"shape":"KMSInternalException"}, + {"shape":"KMSInvalidStateException"} + ] + }, + "ListAliases":{ + "name":"ListAliases", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListAliasesRequest"}, + "output":{"shape":"ListAliasesResponse"}, + "errors":[ + {"shape":"DependencyTimeoutException"}, + {"shape":"InvalidMarkerException"}, + {"shape":"KMSInternalException"} + ] + }, + "ListGrants":{ + "name":"ListGrants", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListGrantsRequest"}, + "output":{"shape":"ListGrantsResponse"}, + "errors":[ + {"shape":"NotFoundException"}, + {"shape":"DependencyTimeoutException"}, + {"shape":"InvalidMarkerException"}, + {"shape":"InvalidArnException"}, + {"shape":"KMSInternalException"}, + {"shape":"KMSInvalidStateException"} + ] + }, + "ListKeyPolicies":{ + "name":"ListKeyPolicies", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListKeyPoliciesRequest"}, + "output":{"shape":"ListKeyPoliciesResponse"}, + "errors":[ + {"shape":"NotFoundException"}, + {"shape":"InvalidArnException"}, + {"shape":"DependencyTimeoutException"}, + {"shape":"KMSInternalException"}, + {"shape":"KMSInvalidStateException"} + ] + }, + "ListKeys":{ + "name":"ListKeys", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListKeysRequest"}, + "output":{"shape":"ListKeysResponse"}, + "errors":[ + {"shape":"DependencyTimeoutException"}, + {"shape":"KMSInternalException"} + ] + }, + "ListRetirableGrants":{ + "name":"ListRetirableGrants", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListRetirableGrantsRequest"}, + "output":{"shape":"ListGrantsResponse"}, + "errors":[ + {"shape":"DependencyTimeoutException"}, + {"shape":"InvalidMarkerException"}, + {"shape":"InvalidArnException"}, + {"shape":"NotFoundException"}, + {"shape":"KMSInternalException"} + ] + }, + "PutKeyPolicy":{ + "name":"PutKeyPolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PutKeyPolicyRequest"}, + "errors":[ + {"shape":"NotFoundException"}, + {"shape":"InvalidArnException"}, + {"shape":"MalformedPolicyDocumentException"}, + {"shape":"DependencyTimeoutException"}, + {"shape":"UnsupportedOperationException"}, + {"shape":"KMSInternalException"}, + {"shape":"LimitExceededException"}, + {"shape":"KMSInvalidStateException"} + ] + }, + "ReEncrypt":{ + "name":"ReEncrypt", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ReEncryptRequest"}, + "output":{"shape":"ReEncryptResponse"}, + "errors":[ + {"shape":"NotFoundException"}, + {"shape":"DisabledException"}, + {"shape":"InvalidCiphertextException"}, + {"shape":"KeyUnavailableException"}, + {"shape":"DependencyTimeoutException"}, + {"shape":"InvalidKeyUsageException"}, + {"shape":"InvalidGrantTokenException"}, + {"shape":"KMSInternalException"}, + {"shape":"KMSInvalidStateException"} + ] + }, + "RetireGrant":{ + "name":"RetireGrant", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RetireGrantRequest"}, + "errors":[ + {"shape":"InvalidGrantTokenException"}, + {"shape":"InvalidGrantIdException"}, + {"shape":"NotFoundException"}, + {"shape":"DependencyTimeoutException"}, + {"shape":"KMSInternalException"}, + {"shape":"KMSInvalidStateException"} + ] + }, + "RevokeGrant":{ + "name":"RevokeGrant", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RevokeGrantRequest"}, + "errors":[ + {"shape":"NotFoundException"}, + {"shape":"DependencyTimeoutException"}, + {"shape":"InvalidArnException"}, + {"shape":"InvalidGrantIdException"}, + {"shape":"KMSInternalException"}, + {"shape":"KMSInvalidStateException"} + ] + }, + "ScheduleKeyDeletion":{ + "name":"ScheduleKeyDeletion", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ScheduleKeyDeletionRequest"}, + "output":{"shape":"ScheduleKeyDeletionResponse"}, + "errors":[ + {"shape":"NotFoundException"}, + {"shape":"InvalidArnException"}, + {"shape":"DependencyTimeoutException"}, + {"shape":"KMSInternalException"}, + {"shape":"KMSInvalidStateException"} + ] + }, + "UpdateAlias":{ + "name":"UpdateAlias", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateAliasRequest"}, + "errors":[ + {"shape":"DependencyTimeoutException"}, + {"shape":"NotFoundException"}, + {"shape":"KMSInternalException"}, + {"shape":"KMSInvalidStateException"} + ] + }, + "UpdateKeyDescription":{ + "name":"UpdateKeyDescription", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateKeyDescriptionRequest"}, + "errors":[ + {"shape":"NotFoundException"}, + {"shape":"InvalidArnException"}, + {"shape":"DependencyTimeoutException"}, + {"shape":"KMSInternalException"}, + {"shape":"KMSInvalidStateException"} + ] + } + }, + "shapes":{ + "AWSAccountIdType":{"type":"string"}, + "AliasList":{ + "type":"list", + "member":{"shape":"AliasListEntry"} + }, + "AliasListEntry":{ + "type":"structure", + "members":{ + "AliasName":{"shape":"AliasNameType"}, + "AliasArn":{"shape":"ArnType"}, + "TargetKeyId":{"shape":"KeyIdType"} + } + }, + "AliasNameType":{ + "type":"string", + "max":256, + "min":1, + "pattern":"^[a-zA-Z0-9:/_-]+$" + }, + "AlreadyExistsException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessageType"} + }, + "exception":true + }, + "ArnType":{ + "type":"string", + "max":2048, + "min":20 + }, + "BooleanType":{"type":"boolean"}, + "CancelKeyDeletionRequest":{ + "type":"structure", + "required":["KeyId"], + "members":{ + "KeyId":{"shape":"KeyIdType"} + } + }, + "CancelKeyDeletionResponse":{ + "type":"structure", + "members":{ + "KeyId":{"shape":"KeyIdType"} + } + }, + "CiphertextType":{ + "type":"blob", + "max":6144, + "min":1 + }, + "CreateAliasRequest":{ + "type":"structure", + "required":[ + "AliasName", + "TargetKeyId" + ], + "members":{ + "AliasName":{"shape":"AliasNameType"}, + "TargetKeyId":{"shape":"KeyIdType"} + } + }, + "CreateGrantRequest":{ + "type":"structure", + "required":[ + "KeyId", + "GranteePrincipal" + ], + "members":{ + "KeyId":{"shape":"KeyIdType"}, + "GranteePrincipal":{"shape":"PrincipalIdType"}, + "RetiringPrincipal":{"shape":"PrincipalIdType"}, + "Operations":{"shape":"GrantOperationList"}, + "Constraints":{"shape":"GrantConstraints"}, + "GrantTokens":{"shape":"GrantTokenList"}, + "Name":{"shape":"GrantNameType"} + } + }, + "CreateGrantResponse":{ + "type":"structure", + "members":{ + "GrantToken":{"shape":"GrantTokenType"}, + "GrantId":{"shape":"GrantIdType"} + } + }, + "CreateKeyRequest":{ + "type":"structure", + "members":{ + "Policy":{"shape":"PolicyType"}, + "Description":{"shape":"DescriptionType"}, + "KeyUsage":{"shape":"KeyUsageType"}, + "BypassPolicyLockoutSafetyCheck":{"shape":"BooleanType"} + } + }, + "CreateKeyResponse":{ + "type":"structure", + "members":{ + "KeyMetadata":{"shape":"KeyMetadata"} + } + }, + "DataKeySpec":{ + "type":"string", + "enum":[ + "AES_256", + "AES_128" + ] + }, + "DateType":{"type":"timestamp"}, + "DecryptRequest":{ + "type":"structure", + "required":["CiphertextBlob"], + "members":{ + "CiphertextBlob":{"shape":"CiphertextType"}, + "EncryptionContext":{"shape":"EncryptionContextType"}, + "GrantTokens":{"shape":"GrantTokenList"} + } + }, + "DecryptResponse":{ + "type":"structure", + "members":{ + "KeyId":{"shape":"KeyIdType"}, + "Plaintext":{"shape":"PlaintextType"} + } + }, + "DeleteAliasRequest":{ + "type":"structure", + "required":["AliasName"], + "members":{ + "AliasName":{"shape":"AliasNameType"} + } + }, + "DependencyTimeoutException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessageType"} + }, + "exception":true, + "fault":true + }, + "DescribeKeyRequest":{ + "type":"structure", + "required":["KeyId"], + "members":{ + "KeyId":{"shape":"KeyIdType"}, + "GrantTokens":{"shape":"GrantTokenList"} + } + }, + "DescribeKeyResponse":{ + "type":"structure", + "members":{ + "KeyMetadata":{"shape":"KeyMetadata"} + } + }, + "DescriptionType":{ + "type":"string", + "max":8192, + "min":0 + }, + "DisableKeyRequest":{ + "type":"structure", + "required":["KeyId"], + "members":{ + "KeyId":{"shape":"KeyIdType"} + } + }, + "DisableKeyRotationRequest":{ + "type":"structure", + "required":["KeyId"], + "members":{ + "KeyId":{"shape":"KeyIdType"} + } + }, + "DisabledException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessageType"} + }, + "exception":true + }, + "EnableKeyRequest":{ + "type":"structure", + "required":["KeyId"], + "members":{ + "KeyId":{"shape":"KeyIdType"} + } + }, + "EnableKeyRotationRequest":{ + "type":"structure", + "required":["KeyId"], + "members":{ + "KeyId":{"shape":"KeyIdType"} + } + }, + "EncryptRequest":{ + "type":"structure", + "required":[ + "KeyId", + "Plaintext" + ], + "members":{ + "KeyId":{"shape":"KeyIdType"}, + "Plaintext":{"shape":"PlaintextType"}, + "EncryptionContext":{"shape":"EncryptionContextType"}, + "GrantTokens":{"shape":"GrantTokenList"} + } + }, + "EncryptResponse":{ + "type":"structure", + "members":{ + "CiphertextBlob":{"shape":"CiphertextType"}, + "KeyId":{"shape":"KeyIdType"} + } + }, + "EncryptionContextKey":{"type":"string"}, + "EncryptionContextType":{ + "type":"map", + "key":{"shape":"EncryptionContextKey"}, + "value":{"shape":"EncryptionContextValue"} + }, + "EncryptionContextValue":{"type":"string"}, + "ErrorMessageType":{"type":"string"}, + "GenerateDataKeyRequest":{ + "type":"structure", + "required":["KeyId"], + "members":{ + "KeyId":{"shape":"KeyIdType"}, + "EncryptionContext":{"shape":"EncryptionContextType"}, + "NumberOfBytes":{"shape":"NumberOfBytesType"}, + "KeySpec":{"shape":"DataKeySpec"}, + "GrantTokens":{"shape":"GrantTokenList"} + } + }, + "GenerateDataKeyResponse":{ + "type":"structure", + "members":{ + "CiphertextBlob":{"shape":"CiphertextType"}, + "Plaintext":{"shape":"PlaintextType"}, + "KeyId":{"shape":"KeyIdType"} + } + }, + "GenerateDataKeyWithoutPlaintextRequest":{ + "type":"structure", + "required":["KeyId"], + "members":{ + "KeyId":{"shape":"KeyIdType"}, + "EncryptionContext":{"shape":"EncryptionContextType"}, + "KeySpec":{"shape":"DataKeySpec"}, + "NumberOfBytes":{"shape":"NumberOfBytesType"}, + "GrantTokens":{"shape":"GrantTokenList"} + } + }, + "GenerateDataKeyWithoutPlaintextResponse":{ + "type":"structure", + "members":{ + "CiphertextBlob":{"shape":"CiphertextType"}, + "KeyId":{"shape":"KeyIdType"} + } + }, + "GenerateRandomRequest":{ + "type":"structure", + "members":{ + "NumberOfBytes":{"shape":"NumberOfBytesType"} + } + }, + "GenerateRandomResponse":{ + "type":"structure", + "members":{ + "Plaintext":{"shape":"PlaintextType"} + } + }, + "GetKeyPolicyRequest":{ + "type":"structure", + "required":[ + "KeyId", + "PolicyName" + ], + "members":{ + "KeyId":{"shape":"KeyIdType"}, + "PolicyName":{"shape":"PolicyNameType"} + } + }, + "GetKeyPolicyResponse":{ + "type":"structure", + "members":{ + "Policy":{"shape":"PolicyType"} + } + }, + "GetKeyRotationStatusRequest":{ + "type":"structure", + "required":["KeyId"], + "members":{ + "KeyId":{"shape":"KeyIdType"} + } + }, + "GetKeyRotationStatusResponse":{ + "type":"structure", + "members":{ + "KeyRotationEnabled":{"shape":"BooleanType"} + } + }, + "GrantConstraints":{ + "type":"structure", + "members":{ + "EncryptionContextSubset":{"shape":"EncryptionContextType"}, + "EncryptionContextEquals":{"shape":"EncryptionContextType"} + } + }, + "GrantIdType":{ + "type":"string", + "max":128, + "min":1 + }, + "GrantList":{ + "type":"list", + "member":{"shape":"GrantListEntry"} + }, + "GrantListEntry":{ + "type":"structure", + "members":{ + "KeyId":{"shape":"KeyIdType"}, + "GrantId":{"shape":"GrantIdType"}, + "Name":{"shape":"GrantNameType"}, + "CreationDate":{"shape":"DateType"}, + "GranteePrincipal":{"shape":"PrincipalIdType"}, + "RetiringPrincipal":{"shape":"PrincipalIdType"}, + "IssuingAccount":{"shape":"PrincipalIdType"}, + "Operations":{"shape":"GrantOperationList"}, + "Constraints":{"shape":"GrantConstraints"} + } + }, + "GrantNameType":{ + "type":"string", + "max":256, + "min":1, + "pattern":"^[a-zA-Z0-9:/_-]+$" + }, + "GrantOperation":{ + "type":"string", + "enum":[ + "Decrypt", + "Encrypt", + "GenerateDataKey", + "GenerateDataKeyWithoutPlaintext", + "ReEncryptFrom", + "ReEncryptTo", + "CreateGrant", + "RetireGrant", + "DescribeKey" + ] + }, + "GrantOperationList":{ + "type":"list", + "member":{"shape":"GrantOperation"} + }, + "GrantTokenList":{ + "type":"list", + "member":{"shape":"GrantTokenType"}, + "max":10, + "min":0 + }, + "GrantTokenType":{ + "type":"string", + "max":8192, + "min":1 + }, + "InvalidAliasNameException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessageType"} + }, + "exception":true + }, + "InvalidArnException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessageType"} + }, + "exception":true + }, + "InvalidCiphertextException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessageType"} + }, + "exception":true + }, + "InvalidGrantIdException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessageType"} + }, + "exception":true + }, + "InvalidGrantTokenException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessageType"} + }, + "exception":true + }, + "InvalidKeyUsageException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessageType"} + }, + "exception":true + }, + "InvalidMarkerException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessageType"} + }, + "exception":true + }, + "KMSInternalException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessageType"} + }, + "exception":true + }, + "KMSInvalidStateException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessageType"} + }, + "exception":true + }, + "KeyIdType":{ + "type":"string", + "max":256, + "min":1 + }, + "KeyList":{ + "type":"list", + "member":{"shape":"KeyListEntry"} + }, + "KeyListEntry":{ + "type":"structure", + "members":{ + "KeyId":{"shape":"KeyIdType"}, + "KeyArn":{"shape":"ArnType"} + } + }, + "KeyMetadata":{ + "type":"structure", + "required":["KeyId"], + "members":{ + "AWSAccountId":{"shape":"AWSAccountIdType"}, + "KeyId":{"shape":"KeyIdType"}, + "Arn":{"shape":"ArnType"}, + "CreationDate":{"shape":"DateType"}, + "Enabled":{"shape":"BooleanType"}, + "Description":{"shape":"DescriptionType"}, + "KeyUsage":{"shape":"KeyUsageType"}, + "KeyState":{"shape":"KeyState"}, + "DeletionDate":{"shape":"DateType"} + } + }, + "KeyState":{ + "type":"string", + "enum":[ + "Enabled", + "Disabled", + "PendingDeletion" + ] + }, + "KeyUnavailableException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessageType"} + }, + "exception":true, + "fault":true + }, + "KeyUsageType":{ + "type":"string", + "enum":["ENCRYPT_DECRYPT"] + }, + "LimitExceededException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessageType"} + }, + "exception":true + }, + "LimitType":{ + "type":"integer", + "max":1000, + "min":1 + }, + "ListAliasesRequest":{ + "type":"structure", + "members":{ + "Limit":{"shape":"LimitType"}, + "Marker":{"shape":"MarkerType"} + } + }, + "ListAliasesResponse":{ + "type":"structure", + "members":{ + "Aliases":{"shape":"AliasList"}, + "NextMarker":{"shape":"MarkerType"}, + "Truncated":{"shape":"BooleanType"} + } + }, + "ListGrantsRequest":{ + "type":"structure", + "required":["KeyId"], + "members":{ + "Limit":{"shape":"LimitType"}, + "Marker":{"shape":"MarkerType"}, + "KeyId":{"shape":"KeyIdType"} + } + }, + "ListGrantsResponse":{ + "type":"structure", + "members":{ + "Grants":{"shape":"GrantList"}, + "NextMarker":{"shape":"MarkerType"}, + "Truncated":{"shape":"BooleanType"} + } + }, + "ListKeyPoliciesRequest":{ + "type":"structure", + "required":["KeyId"], + "members":{ + "KeyId":{"shape":"KeyIdType"}, + "Limit":{"shape":"LimitType"}, + "Marker":{"shape":"MarkerType"} + } + }, + "ListKeyPoliciesResponse":{ + "type":"structure", + "members":{ + "PolicyNames":{"shape":"PolicyNameList"}, + "NextMarker":{"shape":"MarkerType"}, + "Truncated":{"shape":"BooleanType"} + } + }, + "ListKeysRequest":{ + "type":"structure", + "members":{ + "Limit":{"shape":"LimitType"}, + "Marker":{"shape":"MarkerType"} + } + }, + "ListKeysResponse":{ + "type":"structure", + "members":{ + "Keys":{"shape":"KeyList"}, + "NextMarker":{"shape":"MarkerType"}, + "Truncated":{"shape":"BooleanType"} + } + }, + "ListRetirableGrantsRequest":{ + "type":"structure", + "required":["RetiringPrincipal"], + "members":{ + "Limit":{"shape":"LimitType"}, + "Marker":{"shape":"MarkerType"}, + "RetiringPrincipal":{"shape":"PrincipalIdType"} + } + }, + "MalformedPolicyDocumentException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessageType"} + }, + "exception":true + }, + "MarkerType":{ + "type":"string", + "max":320, + "min":1, + "pattern":"[\\u0020-\\u00FF]*" + }, + "NotFoundException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessageType"} + }, + "exception":true + }, + "NumberOfBytesType":{ + "type":"integer", + "max":1024, + "min":1 + }, + "PendingWindowInDaysType":{ + "type":"integer", + "max":365, + "min":1 + }, + "PlaintextType":{ + "type":"blob", + "max":4096, + "min":1, + "sensitive":true + }, + "PolicyNameList":{ + "type":"list", + "member":{"shape":"PolicyNameType"} + }, + "PolicyNameType":{ + "type":"string", + "max":128, + "min":1, + "pattern":"[\\w]+" + }, + "PolicyType":{ + "type":"string", + "max":131072, + "min":1, + "pattern":"[\\u0009\\u000A\\u000D\\u0020-\\u00FF]+" + }, + "PrincipalIdType":{ + "type":"string", + "max":256, + "min":1 + }, + "PutKeyPolicyRequest":{ + "type":"structure", + "required":[ + "KeyId", + "PolicyName", + "Policy" + ], + "members":{ + "KeyId":{"shape":"KeyIdType"}, + "PolicyName":{"shape":"PolicyNameType"}, + "Policy":{"shape":"PolicyType"}, + "BypassPolicyLockoutSafetyCheck":{"shape":"BooleanType"} + } + }, + "ReEncryptRequest":{ + "type":"structure", + "required":[ + "CiphertextBlob", + "DestinationKeyId" + ], + "members":{ + "CiphertextBlob":{"shape":"CiphertextType"}, + "SourceEncryptionContext":{"shape":"EncryptionContextType"}, + "DestinationKeyId":{"shape":"KeyIdType"}, + "DestinationEncryptionContext":{"shape":"EncryptionContextType"}, + "GrantTokens":{"shape":"GrantTokenList"} + } + }, + "ReEncryptResponse":{ + "type":"structure", + "members":{ + "CiphertextBlob":{"shape":"CiphertextType"}, + "SourceKeyId":{"shape":"KeyIdType"}, + "KeyId":{"shape":"KeyIdType"} + } + }, + "RetireGrantRequest":{ + "type":"structure", + "members":{ + "GrantToken":{"shape":"GrantTokenType"}, + "KeyId":{"shape":"KeyIdType"}, + "GrantId":{"shape":"GrantIdType"} + } + }, + "RevokeGrantRequest":{ + "type":"structure", + "required":[ + "KeyId", + "GrantId" + ], + "members":{ + "KeyId":{"shape":"KeyIdType"}, + "GrantId":{"shape":"GrantIdType"} + } + }, + "ScheduleKeyDeletionRequest":{ + "type":"structure", + "required":["KeyId"], + "members":{ + "KeyId":{"shape":"KeyIdType"}, + "PendingWindowInDays":{"shape":"PendingWindowInDaysType"} + } + }, + "ScheduleKeyDeletionResponse":{ + "type":"structure", + "members":{ + "KeyId":{"shape":"KeyIdType"}, + "DeletionDate":{"shape":"DateType"} + } + }, + "UnsupportedOperationException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessageType"} + }, + "exception":true + }, + "UpdateAliasRequest":{ + "type":"structure", + "required":[ + "AliasName", + "TargetKeyId" + ], + "members":{ + "AliasName":{"shape":"AliasNameType"}, + "TargetKeyId":{"shape":"KeyIdType"} + } + }, + "UpdateKeyDescriptionRequest":{ + "type":"structure", + "required":[ + "KeyId", + "Description" + ], + "members":{ + "KeyId":{"shape":"KeyIdType"}, + "Description":{"shape":"DescriptionType"} + } + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/kms/2014-11-01/docs-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/kms/2014-11-01/docs-2.json new file mode 100644 index 000000000..024d33b7d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/kms/2014-11-01/docs-2.json @@ -0,0 +1,701 @@ +{ + "version": "2.0", + "service": "AWS Key Management Service

    AWS Key Management Service (AWS KMS) is an encryption and key management web service. This guide describes the AWS KMS operations that you can call programmatically. For general information about AWS KMS, see the AWS Key Management Service Developer Guide.

    AWS provides SDKs that consist of libraries and sample code for various programming languages and platforms (Java, Ruby, .Net, iOS, Android, etc.). The SDKs provide a convenient way to create programmatic access to AWS KMS and other AWS services. For example, the SDKs take care of tasks such as signing requests (see below), managing errors, and retrying requests automatically. For more information about the AWS SDKs, including how to download and install them, see Tools for Amazon Web Services.

    We recommend that you use the AWS SDKs to make programmatic API calls to AWS KMS.

    Clients must support TLS (Transport Layer Security) 1.0. We recommend TLS 1.2. Clients must also support cipher suites with Perfect Forward Secrecy (PFS) such as Ephemeral Diffie-Hellman (DHE) or Elliptic Curve Ephemeral Diffie-Hellman (ECDHE). Most modern systems such as Java 7 and later support these modes.

    Signing Requests

    Requests must be signed by using an access key ID and a secret access key. We strongly recommend that you do not use your AWS account (root) access key ID and secret key for everyday work with AWS KMS. Instead, use the access key ID and secret access key for an IAM user, or you can use the AWS Security Token Service to generate temporary security credentials that you can use to sign requests.

    All AWS KMS operations require Signature Version 4.

    Logging API Requests

    AWS KMS supports AWS CloudTrail, a service that logs AWS API calls and related events for your AWS account and delivers them to an Amazon S3 bucket that you specify. By using the information collected by CloudTrail, you can determine what requests were made to AWS KMS, who made the request, when it was made, and so on. To learn more about CloudTrail, including how to turn it on and find your log files, see the AWS CloudTrail User Guide.

    Additional Resources

    For more information about credentials and request signing, see the following:

    Commonly Used APIs

    Of the APIs discussed in this guide, the following will prove the most useful for most applications. You will likely perform actions other than these, such as creating keys and assigning policies, by using the console.

    ", + "operations": { + "CancelKeyDeletion": "

    Cancels the deletion of a customer master key (CMK). When this operation is successful, the CMK is set to the Disabled state. To enable a CMK, use EnableKey.

    For more information about scheduling and canceling deletion of a CMK, see Deleting Customer Master Keys in the AWS Key Management Service Developer Guide.

    ", + "CreateAlias": "

    Creates a display name for a customer master key. An alias can be used to identify a key and should be unique. The console enforces a one-to-one mapping between the alias and a key. An alias name can contain only alphanumeric characters, forward slashes (/), underscores (_), and dashes (-). An alias must start with the word \"alias\" followed by a forward slash (alias/). An alias that begins with \"aws\" after the forward slash (alias/aws...) is reserved by Amazon Web Services (AWS).

    The alias and the key it is mapped to must be in the same AWS account and the same region.

    To map an alias to a different key, call UpdateAlias.

    ", + "CreateGrant": "

    Adds a grant to a key to specify who can use the key and under what conditions. Grants are alternate permission mechanisms to key policies.

    For more information about grants, see Grants in the AWS Key Management Service Developer Guide.

    ", + "CreateKey": "

    Creates a customer master key (CMK).

    You can use a CMK to encrypt small amounts of data (4 KiB or less) directly, but CMKs are more commonly used to encrypt data encryption keys (DEKs), which are used to encrypt raw data. For more information about DEKs and the difference between CMKs and DEKs, see the following:

    ", + "Decrypt": "

    Decrypts ciphertext. Ciphertext is plaintext that has been previously encrypted by using any of the following functions:

    Note that if a caller has been granted access permissions to all keys (through, for example, IAM user policies that grant Decrypt permission on all resources), then ciphertext encrypted by using keys in other accounts where the key grants access to the caller can be decrypted. To remedy this, we recommend that you do not grant Decrypt access in an IAM user policy. Instead grant Decrypt access only in key policies. If you must grant Decrypt access in an IAM user policy, you should scope the resource to specific keys or to specific trusted accounts.

    ", + "DeleteAlias": "

    Deletes the specified alias. To map an alias to a different key, call UpdateAlias.

    ", + "DescribeKey": "

    Provides detailed information about the specified customer master key.

    ", + "DisableKey": "

    Sets the state of a customer master key (CMK) to disabled, thereby preventing its use for cryptographic operations. For more information about how key state affects the use of a CMK, see How Key State Affects the Use of a Customer Master Key in the AWS Key Management Service Developer Guide.

    ", + "DisableKeyRotation": "

    Disables rotation of the specified key.

    ", + "EnableKey": "

    Marks a key as enabled, thereby permitting its use.

    ", + "EnableKeyRotation": "

    Enables rotation of the specified customer master key.

    ", + "Encrypt": "

    Encrypts plaintext into ciphertext by using a customer master key. The Encrypt function has two primary use cases:

    • You can encrypt up to 4 KB of arbitrary data such as an RSA key, a database password, or other sensitive customer information.

    • If you are moving encrypted data from one region to another, you can use this API to encrypt in the new region the plaintext data key that was used to encrypt the data in the original region. This provides you with an encrypted copy of the data key that can be decrypted in the new region and used there to decrypt the encrypted data.

    Unless you are moving encrypted data from one region to another, you don't use this function to encrypt a generated data key within a region. You retrieve data keys already encrypted by calling the GenerateDataKey or GenerateDataKeyWithoutPlaintext function. Data keys don't need to be encrypted again by calling Encrypt.

    If you want to encrypt data locally in your application, you can use the GenerateDataKey function to return a plaintext data encryption key and a copy of the key encrypted under the customer master key (CMK) of your choosing.

    ", + "GenerateDataKey": "

    Generates a data key that you can use in your application to locally encrypt data. This call returns a plaintext version of the key in the Plaintext field of the response object and an encrypted copy of the key in the CiphertextBlob field. The key is encrypted by using the master key specified by the KeyId field. To decrypt the encrypted key, pass it to the Decrypt API.

    We recommend that you use the following pattern to locally encrypt data: call the GenerateDataKey API, use the key returned in the Plaintext response field to locally encrypt data, and then erase the plaintext data key from memory. Store the encrypted data key (contained in the CiphertextBlob field) alongside of the locally encrypted data.

    You should not call the Encrypt function to re-encrypt your data keys within a region. GenerateDataKey always returns the data key encrypted and tied to the customer master key that will be used to decrypt it. There is no need to decrypt it twice.

    If you decide to use the optional EncryptionContext parameter, you must also store the context in full or at least store enough information along with the encrypted data to be able to reconstruct the context when submitting the ciphertext to the Decrypt API. It is a good practice to choose a context that you can reconstruct on the fly to better secure the ciphertext. For more information about how this parameter is used, see Encryption Context.

    To decrypt data, pass the encrypted data key to the Decrypt API. Decrypt uses the associated master key to decrypt the encrypted data key and returns it as plaintext. Use the plaintext data key to locally decrypt your data and then erase the key from memory. You must specify the encryption context, if any, that you specified when you generated the key. The encryption context is logged by CloudTrail, and you can use this log to help track the use of particular data.

    ", + "GenerateDataKeyWithoutPlaintext": "

    Returns a data key encrypted by a customer master key without the plaintext copy of that key. Otherwise, this API functions exactly like GenerateDataKey. You can use this API to, for example, satisfy an audit requirement that an encrypted key be made available without exposing the plaintext copy of that key.

    ", + "GenerateRandom": "

    Generates an unpredictable byte string.

    ", + "GetKeyPolicy": "

    Retrieves a policy attached to the specified key.

    ", + "GetKeyRotationStatus": "

    Retrieves a Boolean value that indicates whether key rotation is enabled for the specified key.

    ", + "ListAliases": "

    Lists all of the key aliases in the account.

    ", + "ListGrants": "

    List the grants for a specified key.

    ", + "ListKeyPolicies": "

    Retrieves a list of policies attached to a key.

    ", + "ListKeys": "

    Lists the customer master keys.

    ", + "ListRetirableGrants": "

    Returns a list of all grants for which the grant's RetiringPrincipal matches the one specified.

    A typical use is to list all grants that you are able to retire. To retire a grant, use RetireGrant.

    ", + "PutKeyPolicy": "

    Attaches a key policy to the specified customer master key (CMK).

    For more information about key policies, see Key Policies in the AWS Key Management Service Developer Guide.

    ", + "ReEncrypt": "

    Encrypts data on the server side with a new customer master key without exposing the plaintext of the data on the client side. The data is first decrypted and then encrypted. This operation can also be used to change the encryption context of a ciphertext.

    Unlike other actions, ReEncrypt is authorized twice - once as ReEncryptFrom on the source key and once as ReEncryptTo on the destination key. We therefore recommend that you include the \"action\":\"kms:ReEncrypt*\" statement in your key policies to permit re-encryption from or to the key. The statement is included automatically when you authorize use of the key through the console but must be included manually when you set a policy by using the PutKeyPolicy function.

    ", + "RetireGrant": "

    Retires a grant. You can retire a grant when you're done using it to clean up. You should revoke a grant when you intend to actively deny operations that depend on it. The following are permitted to call this API:

    • The account that created the grant

    • The RetiringPrincipal, if present

    • The GranteePrincipal, if RetireGrant is a grantee operation

    The grant to retire must be identified by its grant token or by a combination of the key ARN and the grant ID. A grant token is a unique variable-length base64-encoded string. A grant ID is a 64 character unique identifier of a grant. Both are returned by the CreateGrant function.

    ", + "RevokeGrant": "

    Revokes a grant. You can revoke a grant to actively deny operations that depend on it.

    ", + "ScheduleKeyDeletion": "

    Schedules the deletion of a customer master key (CMK). You may provide a waiting period, specified in days, before deletion occurs. If you do not provide a waiting period, the default period of 30 days is used. When this operation is successful, the state of the CMK changes to PendingDeletion. Before the waiting period ends, you can use CancelKeyDeletion to cancel the deletion of the CMK. After the waiting period ends, AWS KMS deletes the CMK and all AWS KMS data associated with it, including all aliases that point to it.

    Deleting a CMK is a destructive and potentially dangerous operation. When a CMK is deleted, all data that was encrypted under the CMK is rendered unrecoverable. To restrict the use of a CMK without deleting it, use DisableKey.

    For more information about scheduling a CMK for deletion, see Deleting Customer Master Keys in the AWS Key Management Service Developer Guide.

    ", + "UpdateAlias": "

    Updates an alias to map it to a different key.

    An alias is not a property of a key. Therefore, an alias can be mapped to and unmapped from an existing key without changing the properties of the key.

    An alias name can contain only alphanumeric characters, forward slashes (/), underscores (_), and dashes (-). An alias must start with the word \"alias\" followed by a forward slash (alias/). An alias that begins with \"aws\" after the forward slash (alias/aws...) is reserved by Amazon Web Services (AWS).

    The alias and the key it is mapped to must be in the same AWS account and the same region.

    ", + "UpdateKeyDescription": "

    Updates the description of a key.

    " + }, + "shapes": { + "AWSAccountIdType": { + "base": null, + "refs": { + "KeyMetadata$AWSAccountId": "

    The twelve-digit account ID of the AWS account that owns the key.

    " + } + }, + "AliasList": { + "base": null, + "refs": { + "ListAliasesResponse$Aliases": "

    A list of key aliases in the user's account.

    " + } + }, + "AliasListEntry": { + "base": "

    Contains information about an alias.

    ", + "refs": { + "AliasList$member": null + } + }, + "AliasNameType": { + "base": null, + "refs": { + "AliasListEntry$AliasName": "

    String that contains the alias.

    ", + "CreateAliasRequest$AliasName": "

    String that contains the display name. The name must start with the word \"alias\" followed by a forward slash (alias/). Aliases that begin with \"alias/AWS\" are reserved.

    ", + "DeleteAliasRequest$AliasName": "

    The alias to be deleted. The name must start with the word \"alias\" followed by a forward slash (alias/). Aliases that begin with \"alias/AWS\" are reserved.

    ", + "UpdateAliasRequest$AliasName": "

    String that contains the name of the alias to be modified. The name must start with the word \"alias\" followed by a forward slash (alias/). Aliases that begin with \"alias/aws\" are reserved.

    " + } + }, + "AlreadyExistsException": { + "base": "

    The request was rejected because it attempted to create a resource that already exists.

    ", + "refs": { + } + }, + "ArnType": { + "base": null, + "refs": { + "AliasListEntry$AliasArn": "

    String that contains the key ARN.

    ", + "KeyListEntry$KeyArn": "

    ARN of the key.

    ", + "KeyMetadata$Arn": "

    The Amazon Resource Name (ARN) of the key. For examples, see AWS Key Management Service (AWS KMS) in the Example ARNs section of the AWS General Reference.

    " + } + }, + "BooleanType": { + "base": null, + "refs": { + "CreateKeyRequest$BypassPolicyLockoutSafetyCheck": "

    A flag to indicate whether to bypass the key policy lockout safety check.

    Setting this value to true increases the likelihood that the CMK becomes unmanageable. Do not set this value to true indiscriminately.

    For more information, refer to the scenario in the Default Key Policy section in the AWS Key Management Service Developer Guide.

    Use this parameter only when you include a policy in the request and you intend to prevent the principal making the request from making a subsequent PutKeyPolicy request on the CMK.

    The default value is false.

    ", + "GetKeyRotationStatusResponse$KeyRotationEnabled": "

    A Boolean value that specifies whether key rotation is enabled.

    ", + "KeyMetadata$Enabled": "

    Specifies whether the key is enabled. When KeyState is Enabled this value is true, otherwise it is false.

    ", + "ListAliasesResponse$Truncated": "

    A flag that indicates whether there are more items in the list. If your results were truncated, you can use the Marker parameter to make a subsequent pagination request to retrieve more items in the list.

    ", + "ListGrantsResponse$Truncated": "

    A flag that indicates whether there are more items in the list. If your results were truncated, you can use the Marker parameter to make a subsequent pagination request to retrieve more items in the list.

    ", + "ListKeyPoliciesResponse$Truncated": "

    A flag that indicates whether there are more items in the list. If your results were truncated, you can use the Marker parameter to make a subsequent pagination request to retrieve more items in the list.

    ", + "ListKeysResponse$Truncated": "

    A flag that indicates whether there are more items in the list. If your results were truncated, you can use the Marker parameter to make a subsequent pagination request to retrieve more items in the list.

    ", + "PutKeyPolicyRequest$BypassPolicyLockoutSafetyCheck": "

    A flag to indicate whether to bypass the key policy lockout safety check.

    Setting this value to true increases the likelihood that the CMK becomes unmanageable. Do not set this value to true indiscriminately.

    For more information, refer to the scenario in the Default Key Policy section in the AWS Key Management Service Developer Guide.

    Use this parameter only when you intend to prevent the principal making the request from making a subsequent PutKeyPolicy request on the CMK.

    The default value is false.

    " + } + }, + "CancelKeyDeletionRequest": { + "base": null, + "refs": { + } + }, + "CancelKeyDeletionResponse": { + "base": null, + "refs": { + } + }, + "CiphertextType": { + "base": null, + "refs": { + "DecryptRequest$CiphertextBlob": "

    Ciphertext to be decrypted. The blob includes metadata.

    ", + "EncryptResponse$CiphertextBlob": "

    The encrypted plaintext. If you are using the CLI, the value is Base64 encoded. Otherwise, it is not encoded.

    ", + "GenerateDataKeyResponse$CiphertextBlob": "

    Ciphertext that contains the encrypted data key. You must store the blob and enough information to reconstruct the encryption context so that the data encrypted by using the key can later be decrypted. You must provide both the ciphertext blob and the encryption context to the Decrypt API to recover the plaintext data key and decrypt the object.

    If you are using the CLI, the value is Base64 encoded. Otherwise, it is not encoded.

    ", + "GenerateDataKeyWithoutPlaintextResponse$CiphertextBlob": "

    Ciphertext that contains the wrapped data key. You must store the blob and encryption context so that the key can be used in a future decrypt operation.

    If you are using the CLI, the value is Base64 encoded. Otherwise, it is not encoded.

    ", + "ReEncryptRequest$CiphertextBlob": "

    Ciphertext of the data to re-encrypt.

    ", + "ReEncryptResponse$CiphertextBlob": "

    The re-encrypted data. If you are using the CLI, the value is Base64 encoded. Otherwise, it is not encoded.

    " + } + }, + "CreateAliasRequest": { + "base": null, + "refs": { + } + }, + "CreateGrantRequest": { + "base": null, + "refs": { + } + }, + "CreateGrantResponse": { + "base": null, + "refs": { + } + }, + "CreateKeyRequest": { + "base": null, + "refs": { + } + }, + "CreateKeyResponse": { + "base": null, + "refs": { + } + }, + "DataKeySpec": { + "base": null, + "refs": { + "GenerateDataKeyRequest$KeySpec": "

    Value that identifies the encryption algorithm and key size to generate a data key for. Currently this can be AES_128 or AES_256.

    ", + "GenerateDataKeyWithoutPlaintextRequest$KeySpec": "

    Value that identifies the encryption algorithm and key size. Currently this can be AES_128 or AES_256.

    " + } + }, + "DateType": { + "base": null, + "refs": { + "GrantListEntry$CreationDate": "

    The date and time when the grant was created.

    ", + "KeyMetadata$CreationDate": "

    The date and time when the key was created.

    ", + "KeyMetadata$DeletionDate": "

    The date and time after which AWS KMS deletes the customer master key (CMK). This value is present only when KeyState is PendingDeletion, otherwise this value is null.

    ", + "ScheduleKeyDeletionResponse$DeletionDate": "

    The date and time after which AWS KMS deletes the customer master key (CMK).

    " + } + }, + "DecryptRequest": { + "base": null, + "refs": { + } + }, + "DecryptResponse": { + "base": null, + "refs": { + } + }, + "DeleteAliasRequest": { + "base": null, + "refs": { + } + }, + "DependencyTimeoutException": { + "base": "

    The system timed out while trying to fulfill the request. The request can be retried.

    ", + "refs": { + } + }, + "DescribeKeyRequest": { + "base": null, + "refs": { + } + }, + "DescribeKeyResponse": { + "base": null, + "refs": { + } + }, + "DescriptionType": { + "base": null, + "refs": { + "CreateKeyRequest$Description": "

    A description of the CMK.

    Use a description that helps you decide whether the CMK is appropriate for a task.

    ", + "KeyMetadata$Description": "

    The friendly description of the key.

    ", + "UpdateKeyDescriptionRequest$Description": "

    New description for the key.

    " + } + }, + "DisableKeyRequest": { + "base": null, + "refs": { + } + }, + "DisableKeyRotationRequest": { + "base": null, + "refs": { + } + }, + "DisabledException": { + "base": "

    The request was rejected because the specified key was marked as disabled.

    ", + "refs": { + } + }, + "EnableKeyRequest": { + "base": null, + "refs": { + } + }, + "EnableKeyRotationRequest": { + "base": null, + "refs": { + } + }, + "EncryptRequest": { + "base": null, + "refs": { + } + }, + "EncryptResponse": { + "base": null, + "refs": { + } + }, + "EncryptionContextKey": { + "base": null, + "refs": { + "EncryptionContextType$key": null + } + }, + "EncryptionContextType": { + "base": null, + "refs": { + "DecryptRequest$EncryptionContext": "

    The encryption context. If this was specified in the Encrypt function, it must be specified here or the decryption operation will fail. For more information, see Encryption Context.

    ", + "EncryptRequest$EncryptionContext": "

    Name/value pair that specifies the encryption context to be used for authenticated encryption. If used here, the same value must be supplied to the Decrypt API or decryption will fail. For more information, see Encryption Context.

    ", + "GenerateDataKeyRequest$EncryptionContext": "

    Name/value pair that contains additional data to be authenticated during the encryption and decryption processes that use the key. This value is logged by AWS CloudTrail to provide context around the data encrypted by the key.

    ", + "GenerateDataKeyWithoutPlaintextRequest$EncryptionContext": "

    Name:value pair that contains additional data to be authenticated during the encryption and decryption processes.

    ", + "GrantConstraints$EncryptionContextSubset": "

    Contains a list of key-value pairs, a subset of which must be present in the encryption context of a subsequent operation permitted by the grant. When a subsequent operation permitted by the grant includes an encryption context that matches this list or is a subset of this list, the grant allows the operation. Otherwise, the operation is not allowed.

    ", + "GrantConstraints$EncryptionContextEquals": "

    Contains a list of key-value pairs that must be present in the encryption context of a subsequent operation permitted by the grant. When a subsequent operation permitted by the grant includes an encryption context that matches this list, the grant allows the operation. Otherwise, the operation is not allowed.

    ", + "ReEncryptRequest$SourceEncryptionContext": "

    Encryption context used to encrypt and decrypt the data specified in the CiphertextBlob parameter.

    ", + "ReEncryptRequest$DestinationEncryptionContext": "

    Encryption context to be used when the data is re-encrypted.

    " + } + }, + "EncryptionContextValue": { + "base": null, + "refs": { + "EncryptionContextType$value": null + } + }, + "ErrorMessageType": { + "base": null, + "refs": { + "AlreadyExistsException$message": null, + "DependencyTimeoutException$message": null, + "DisabledException$message": null, + "InvalidAliasNameException$message": null, + "InvalidArnException$message": null, + "InvalidCiphertextException$message": null, + "InvalidGrantIdException$message": null, + "InvalidGrantTokenException$message": null, + "InvalidKeyUsageException$message": null, + "InvalidMarkerException$message": null, + "KMSInternalException$message": null, + "KMSInvalidStateException$message": null, + "KeyUnavailableException$message": null, + "LimitExceededException$message": null, + "MalformedPolicyDocumentException$message": null, + "NotFoundException$message": null, + "UnsupportedOperationException$message": null + } + }, + "GenerateDataKeyRequest": { + "base": null, + "refs": { + } + }, + "GenerateDataKeyResponse": { + "base": null, + "refs": { + } + }, + "GenerateDataKeyWithoutPlaintextRequest": { + "base": null, + "refs": { + } + }, + "GenerateDataKeyWithoutPlaintextResponse": { + "base": null, + "refs": { + } + }, + "GenerateRandomRequest": { + "base": null, + "refs": { + } + }, + "GenerateRandomResponse": { + "base": null, + "refs": { + } + }, + "GetKeyPolicyRequest": { + "base": null, + "refs": { + } + }, + "GetKeyPolicyResponse": { + "base": null, + "refs": { + } + }, + "GetKeyRotationStatusRequest": { + "base": null, + "refs": { + } + }, + "GetKeyRotationStatusResponse": { + "base": null, + "refs": { + } + }, + "GrantConstraints": { + "base": "

    A structure for specifying the conditions under which the operations permitted by the grant are allowed.

    You can use this structure to allow the operations permitted by the grant only when a specified encryption context is present. For more information about encryption context, see Encryption Context in the AWS Key Management Service Developer Guide.

    ", + "refs": { + "CreateGrantRequest$Constraints": "

    The conditions under which the operations permitted by the grant are allowed.

    You can use this value to allow the operations permitted by the grant only when a specified encryption context is present. For more information, see Encryption Context in the AWS Key Management Service Developer Guide.

    ", + "GrantListEntry$Constraints": "

    The conditions under which the grant's operations are allowed.

    " + } + }, + "GrantIdType": { + "base": null, + "refs": { + "CreateGrantResponse$GrantId": "

    The unique identifier for the grant.

    You can use the GrantId in a subsequent RetireGrant or RevokeGrant operation.

    ", + "GrantListEntry$GrantId": "

    The unique identifier for the grant.

    ", + "RetireGrantRequest$GrantId": "

    Unique identifier of the grant to be retired. The grant ID is returned by the CreateGrant function.

    • Grant ID Example - 0123456789012345678901234567890123456789012345678901234567890123

    ", + "RevokeGrantRequest$GrantId": "

    Identifier of the grant to be revoked.

    " + } + }, + "GrantList": { + "base": null, + "refs": { + "ListGrantsResponse$Grants": "

    A list of grants.

    " + } + }, + "GrantListEntry": { + "base": "

    Contains information about an entry in a list of grants.

    ", + "refs": { + "GrantList$member": null + } + }, + "GrantNameType": { + "base": null, + "refs": { + "CreateGrantRequest$Name": "

    A friendly name for identifying the grant. Use this value to prevent unintended creation of duplicate grants when retrying this request.

    When this value is absent, all CreateGrant requests result in a new grant with a unique GrantId even if all the supplied parameters are identical. This can result in unintended duplicates when you retry the CreateGrant request.

    When this value is present, you can retry a CreateGrant request with identical parameters; if the grant already exists, the original GrantId is returned without creating a new grant. Note that the returned grant token is unique with every CreateGrant request, even when a duplicate GrantId is returned. All grant tokens obtained in this way can be used interchangeably.

    ", + "GrantListEntry$Name": "

    The friendly name that identifies the grant. If a name was provided in the CreateGrant request, that name is returned. Otherwise this value is null.

    " + } + }, + "GrantOperation": { + "base": null, + "refs": { + "GrantOperationList$member": null + } + }, + "GrantOperationList": { + "base": null, + "refs": { + "CreateGrantRequest$Operations": "

    A list of operations that the grant permits. The list can contain any combination of one or more of the following values:

    ", + "GrantListEntry$Operations": "

    The list of operations permitted by the grant.

    " + } + }, + "GrantTokenList": { + "base": null, + "refs": { + "CreateGrantRequest$GrantTokens": "

    A list of grant tokens.

    For more information, see Grant Tokens in the AWS Key Management Service Developer Guide.

    ", + "DecryptRequest$GrantTokens": "

    A list of grant tokens.

    For more information, see Grant Tokens in the AWS Key Management Service Developer Guide.

    ", + "DescribeKeyRequest$GrantTokens": "

    A list of grant tokens.

    For more information, see Grant Tokens in the AWS Key Management Service Developer Guide.

    ", + "EncryptRequest$GrantTokens": "

    A list of grant tokens.

    For more information, see Grant Tokens in the AWS Key Management Service Developer Guide.

    ", + "GenerateDataKeyRequest$GrantTokens": "

    A list of grant tokens.

    For more information, see Grant Tokens in the AWS Key Management Service Developer Guide.

    ", + "GenerateDataKeyWithoutPlaintextRequest$GrantTokens": "

    A list of grant tokens.

    For more information, see Grant Tokens in the AWS Key Management Service Developer Guide.

    ", + "ReEncryptRequest$GrantTokens": "

    A list of grant tokens.

    For more information, see Grant Tokens in the AWS Key Management Service Developer Guide.

    " + } + }, + "GrantTokenType": { + "base": null, + "refs": { + "CreateGrantResponse$GrantToken": "

    The grant token.

    For more information, see Grant Tokens in the AWS Key Management Service Developer Guide.

    ", + "GrantTokenList$member": null, + "RetireGrantRequest$GrantToken": "

    Token that identifies the grant to be retired.

    " + } + }, + "InvalidAliasNameException": { + "base": "

    The request was rejected because the specified alias name is not valid.

    ", + "refs": { + } + }, + "InvalidArnException": { + "base": "

    The request was rejected because a specified ARN was not valid.

    ", + "refs": { + } + }, + "InvalidCiphertextException": { + "base": "

    The request was rejected because the specified ciphertext has been corrupted or is otherwise invalid.

    ", + "refs": { + } + }, + "InvalidGrantIdException": { + "base": "

    The request was rejected because the specified GrantId is not valid.

    ", + "refs": { + } + }, + "InvalidGrantTokenException": { + "base": "

    The request was rejected because a grant token provided as part of the request is invalid.

    ", + "refs": { + } + }, + "InvalidKeyUsageException": { + "base": "

    The request was rejected because the specified KeySpec parameter is not valid. The currently supported value is ENCRYPT/DECRYPT.

    ", + "refs": { + } + }, + "InvalidMarkerException": { + "base": "

    The request was rejected because the marker that specifies where pagination should next begin is not valid.

    ", + "refs": { + } + }, + "KMSInternalException": { + "base": "

    The request was rejected because an internal exception occurred. The request can be retried.

    ", + "refs": { + } + }, + "KMSInvalidStateException": { + "base": "

    The request was rejected because the state of the specified resource is not valid for this request.

    For more information about how key state affects the use of a customer master key (CMK), see How Key State Affects the Use of a Customer Master Key in the AWS Key Management Service Developer Guide.

    ", + "refs": { + } + }, + "KeyIdType": { + "base": null, + "refs": { + "AliasListEntry$TargetKeyId": "

    String that contains the key identifier pointed to by the alias.

    ", + "CancelKeyDeletionRequest$KeyId": "

    The unique identifier for the customer master key (CMK) for which to cancel deletion.

    To specify this value, use the unique key ID or the Amazon Resource Name (ARN) of the CMK. Examples:

    • Unique key ID: 1234abcd-12ab-34cd-56ef-1234567890ab

    • Key ARN: arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab

    To obtain the unique key ID and key ARN for a given CMK, use ListKeys or DescribeKey.

    ", + "CancelKeyDeletionResponse$KeyId": "

    The unique identifier of the master key for which deletion is canceled.

    ", + "CreateAliasRequest$TargetKeyId": "

    An identifier of the key for which you are creating the alias. This value cannot be another alias but can be a globally unique identifier or a fully specified ARN to a key.

    • Key ARN Example - arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012

    • Globally Unique Key ID Example - 12345678-1234-1234-1234-123456789012

    ", + "CreateGrantRequest$KeyId": "

    The unique identifier for the customer master key (CMK) that the grant applies to.

    To specify this value, use the globally unique key ID or the Amazon Resource Name (ARN) of the key. Examples:

    • Globally unique key ID: 12345678-1234-1234-1234-123456789012

    • Key ARN: arn:aws:kms:us-west-2:123456789012:key/12345678-1234-1234-1234-123456789012

    ", + "DecryptResponse$KeyId": "

    ARN of the key used to perform the decryption. This value is returned if no errors are encountered during the operation.

    ", + "DescribeKeyRequest$KeyId": "

    A unique identifier for the customer master key. This value can be a globally unique identifier, a fully specified ARN to either an alias or a key, or an alias name prefixed by \"alias/\".

    • Key ARN Example - arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012

    • Alias ARN Example - arn:aws:kms:us-east-1:123456789012:alias/MyAliasName

    • Globally Unique Key ID Example - 12345678-1234-1234-1234-123456789012

    • Alias Name Example - alias/MyAliasName

    ", + "DisableKeyRequest$KeyId": "

    A unique identifier for the CMK.

    Use the CMK's unique identifier or its Amazon Resource Name (ARN). For example:

    • Unique ID: 1234abcd-12ab-34cd-56ef-1234567890ab

    • ARN: arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab

    ", + "DisableKeyRotationRequest$KeyId": "

    A unique identifier for the customer master key. This value can be a globally unique identifier or the fully specified ARN to a key.

    • Key ARN Example - arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012

    • Globally Unique Key ID Example - 12345678-1234-1234-1234-123456789012

    ", + "EnableKeyRequest$KeyId": "

    A unique identifier for the customer master key. This value can be a globally unique identifier or the fully specified ARN to a key.

    • Key ARN Example - arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012

    • Globally Unique Key ID Example - 12345678-1234-1234-1234-123456789012

    ", + "EnableKeyRotationRequest$KeyId": "

    A unique identifier for the customer master key. This value can be a globally unique identifier or the fully specified ARN to a key.

    • Key ARN Example - arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012

    • Globally Unique Key ID Example - 12345678-1234-1234-1234-123456789012

    ", + "EncryptRequest$KeyId": "

    A unique identifier for the customer master key. This value can be a globally unique identifier, a fully specified ARN to either an alias or a key, or an alias name prefixed by \"alias/\".

    • Key ARN Example - arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012

    • Alias ARN Example - arn:aws:kms:us-east-1:123456789012:alias/MyAliasName

    • Globally Unique Key ID Example - 12345678-1234-1234-1234-123456789012

    • Alias Name Example - alias/MyAliasName

    ", + "EncryptResponse$KeyId": "

    The ID of the key used during encryption.

    ", + "GenerateDataKeyRequest$KeyId": "

    A unique identifier for the customer master key. This value can be a globally unique identifier, a fully specified ARN to either an alias or a key, or an alias name prefixed by \"alias/\".

    • Key ARN Example - arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012

    • Alias ARN Example - arn:aws:kms:us-east-1:123456789012:alias/MyAliasName

    • Globally Unique Key ID Example - 12345678-1234-1234-1234-123456789012

    • Alias Name Example - alias/MyAliasName

    ", + "GenerateDataKeyResponse$KeyId": "

    System generated unique identifier of the key to be used to decrypt the encrypted copy of the data key.

    ", + "GenerateDataKeyWithoutPlaintextRequest$KeyId": "

    A unique identifier for the customer master key. This value can be a globally unique identifier, a fully specified ARN to either an alias or a key, or an alias name prefixed by \"alias/\".

    • Key ARN Example - arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012

    • Alias ARN Example - arn:aws:kms:us-east-1:123456789012:alias/MyAliasName

    • Globally Unique Key ID Example - 12345678-1234-1234-1234-123456789012

    • Alias Name Example - alias/MyAliasName

    ", + "GenerateDataKeyWithoutPlaintextResponse$KeyId": "

    System generated unique identifier of the key to be used to decrypt the encrypted copy of the data key.

    ", + "GetKeyPolicyRequest$KeyId": "

    A unique identifier for the customer master key. This value can be a globally unique identifier or the fully specified ARN to a key.

    • Key ARN Example - arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012

    • Globally Unique Key ID Example - 12345678-1234-1234-1234-123456789012

    ", + "GetKeyRotationStatusRequest$KeyId": "

    A unique identifier for the customer master key. This value can be a globally unique identifier or the fully specified ARN to a key.

    • Key ARN Example - arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012

    • Globally Unique Key ID Example - 12345678-1234-1234-1234-123456789012

    ", + "GrantListEntry$KeyId": "

    The unique identifier for the customer master key (CMK) to which the grant applies.

    ", + "KeyListEntry$KeyId": "

    Unique identifier of the key.

    ", + "KeyMetadata$KeyId": "

    The globally unique identifier for the key.

    ", + "ListGrantsRequest$KeyId": "

    A unique identifier for the customer master key. This value can be a globally unique identifier or the fully specified ARN to a key.

    • Key ARN Example - arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012

    • Globally Unique Key ID Example - 12345678-1234-1234-1234-123456789012

    ", + "ListKeyPoliciesRequest$KeyId": "

    A unique identifier for the customer master key. This value can be a globally unique identifier, a fully specified ARN to either an alias or a key, or an alias name prefixed by \"alias/\".

    • Key ARN Example - arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012

    • Alias ARN Example - arn:aws:kms:us-east-1:123456789012:alias/MyAliasName

    • Globally Unique Key ID Example - 12345678-1234-1234-1234-123456789012

    • Alias Name Example - alias/MyAliasName

    ", + "PutKeyPolicyRequest$KeyId": "

    A unique identifier for the CMK.

    Use the CMK's unique identifier or its Amazon Resource Name (ARN). For example:

    • Unique ID: 1234abcd-12ab-34cd-56ef-1234567890ab

    • ARN: arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab

    ", + "ReEncryptRequest$DestinationKeyId": "

    A unique identifier for the customer master key used to re-encrypt the data. This value can be a globally unique identifier, a fully specified ARN to either an alias or a key, or an alias name prefixed by \"alias/\".

    • Key ARN Example - arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012

    • Alias ARN Example - arn:aws:kms:us-east-1:123456789012:alias/MyAliasName

    • Globally Unique Key ID Example - 12345678-1234-1234-1234-123456789012

    • Alias Name Example - alias/MyAliasName

    ", + "ReEncryptResponse$SourceKeyId": "

    Unique identifier of the key used to originally encrypt the data.

    ", + "ReEncryptResponse$KeyId": "

    Unique identifier of the key used to re-encrypt the data.

    ", + "RetireGrantRequest$KeyId": "

    A unique identifier for the customer master key associated with the grant. This value can be a globally unique identifier or a fully specified ARN of the key.

    • Key ARN Example - arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012

    • Globally Unique Key ID Example - 12345678-1234-1234-1234-123456789012

    ", + "RevokeGrantRequest$KeyId": "

    A unique identifier for the customer master key associated with the grant. This value can be a globally unique identifier or the fully specified ARN to a key.

    • Key ARN Example - arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012

    • Globally Unique Key ID Example - 12345678-1234-1234-1234-123456789012

    ", + "ScheduleKeyDeletionRequest$KeyId": "

    The unique identifier for the customer master key (CMK) to delete.

    To specify this value, use the unique key ID or the Amazon Resource Name (ARN) of the CMK. Examples:

    • Unique key ID: 1234abcd-12ab-34cd-56ef-1234567890ab

    • Key ARN: arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab

    To obtain the unique key ID and key ARN for a given CMK, use ListKeys or DescribeKey.

    ", + "ScheduleKeyDeletionResponse$KeyId": "

    The unique identifier of the customer master key (CMK) for which deletion is scheduled.

    ", + "UpdateAliasRequest$TargetKeyId": "

    Unique identifier of the customer master key to be mapped to the alias. This value can be a globally unique identifier or the fully specified ARN of a key.

    • Key ARN Example - arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012

    • Globally Unique Key ID Example - 12345678-1234-1234-1234-123456789012

    You can call ListAliases to verify that the alias is mapped to the correct TargetKeyId.

    ", + "UpdateKeyDescriptionRequest$KeyId": "

    A unique identifier for the customer master key. This value can be a globally unique identifier or the fully specified ARN to a key.

    • Key ARN Example - arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012

    • Globally Unique Key ID Example - 12345678-1234-1234-1234-123456789012

    " + } + }, + "KeyList": { + "base": null, + "refs": { + "ListKeysResponse$Keys": "

    A list of keys.

    " + } + }, + "KeyListEntry": { + "base": "

    Contains information about each entry in the key list.

    ", + "refs": { + "KeyList$member": null + } + }, + "KeyMetadata": { + "base": "

    Contains metadata about a customer master key (CMK).

    This data type is used as a response element for the CreateKey and DescribeKey operations.

    ", + "refs": { + "CreateKeyResponse$KeyMetadata": "

    Metadata associated with the CMK.

    ", + "DescribeKeyResponse$KeyMetadata": "

    Metadata associated with the key.

    " + } + }, + "KeyState": { + "base": null, + "refs": { + "KeyMetadata$KeyState": "

    The state of the customer master key (CMK).

    For more information about how key state affects the use of a CMK, see How Key State Affects the Use of a Customer Master Key in the AWS Key Management Service Developer Guide.

    " + } + }, + "KeyUnavailableException": { + "base": "

    The request was rejected because the key was not available. The request can be retried.

    ", + "refs": { + } + }, + "KeyUsageType": { + "base": null, + "refs": { + "CreateKeyRequest$KeyUsage": "

    The intended use of the CMK.

    You can use CMKs only for symmetric encryption and decryption.

    ", + "KeyMetadata$KeyUsage": "

    The cryptographic operations for which you can use the key. Currently the only allowed value is ENCRYPT_DECRYPT, which means you can use the key for the Encrypt and Decrypt operations.

    " + } + }, + "LimitExceededException": { + "base": "

    The request was rejected because a limit was exceeded. For more information, see Limits in the AWS Key Management Service Developer Guide.

    ", + "refs": { + } + }, + "LimitType": { + "base": null, + "refs": { + "ListAliasesRequest$Limit": "

    When paginating results, specify the maximum number of items to return in the response. If additional items exist beyond the number you specify, the Truncated element in the response is set to true.

    This value is optional. If you include a value, it must be between 1 and 100, inclusive. If you do not include a value, it defaults to 50.

    ", + "ListGrantsRequest$Limit": "

    When paginating results, specify the maximum number of items to return in the response. If additional items exist beyond the number you specify, the Truncated element in the response is set to true.

    This value is optional. If you include a value, it must be between 1 and 100, inclusive. If you do not include a value, it defaults to 50.

    ", + "ListKeyPoliciesRequest$Limit": "

    When paginating results, specify the maximum number of items to return in the response. If additional items exist beyond the number you specify, the Truncated element in the response is set to true.

    This value is optional. If you include a value, it must be between 1 and 1000, inclusive. If you do not include a value, it defaults to 100.

    Currently only 1 policy can be attached to a key.

    ", + "ListKeysRequest$Limit": "

    When paginating results, specify the maximum number of items to return in the response. If additional items exist beyond the number you specify, the Truncated element in the response is set to true.

    This value is optional. If you include a value, it must be between 1 and 1000, inclusive. If you do not include a value, it defaults to 100.

    ", + "ListRetirableGrantsRequest$Limit": "

    When paginating results, specify the maximum number of items to return in the response. If additional items exist beyond the number you specify, the Truncated element in the response is set to true.

    This value is optional. If you include a value, it must be between 1 and 100, inclusive. If you do not include a value, it defaults to 50.

    " + } + }, + "ListAliasesRequest": { + "base": null, + "refs": { + } + }, + "ListAliasesResponse": { + "base": null, + "refs": { + } + }, + "ListGrantsRequest": { + "base": null, + "refs": { + } + }, + "ListGrantsResponse": { + "base": null, + "refs": { + } + }, + "ListKeyPoliciesRequest": { + "base": null, + "refs": { + } + }, + "ListKeyPoliciesResponse": { + "base": null, + "refs": { + } + }, + "ListKeysRequest": { + "base": null, + "refs": { + } + }, + "ListKeysResponse": { + "base": null, + "refs": { + } + }, + "ListRetirableGrantsRequest": { + "base": null, + "refs": { + } + }, + "MalformedPolicyDocumentException": { + "base": "

    The request was rejected because the specified policy is not syntactically or semantically correct.

    ", + "refs": { + } + }, + "MarkerType": { + "base": null, + "refs": { + "ListAliasesRequest$Marker": "

    Use this parameter only when paginating results and only in a subsequent request after you receive a response with truncated results. Set it to the value of NextMarker from the response you just received.

    ", + "ListAliasesResponse$NextMarker": "

    When Truncated is true, this value is present and contains the value to use for the Marker parameter in a subsequent pagination request.

    ", + "ListGrantsRequest$Marker": "

    Use this parameter only when paginating results and only in a subsequent request after you receive a response with truncated results. Set it to the value of NextMarker from the response you just received.

    ", + "ListGrantsResponse$NextMarker": "

    When Truncated is true, this value is present and contains the value to use for the Marker parameter in a subsequent pagination request.

    ", + "ListKeyPoliciesRequest$Marker": "

    Use this parameter only when paginating results and only in a subsequent request after you receive a response with truncated results. Set it to the value of NextMarker from the response you just received.

    ", + "ListKeyPoliciesResponse$NextMarker": "

    When Truncated is true, this value is present and contains the value to use for the Marker parameter in a subsequent pagination request.

    ", + "ListKeysRequest$Marker": "

    Use this parameter only when paginating results and only in a subsequent request after you receive a response with truncated results. Set it to the value of NextMarker from the response you just received.

    ", + "ListKeysResponse$NextMarker": "

    When Truncated is true, this value is present and contains the value to use for the Marker parameter in a subsequent pagination request.

    ", + "ListRetirableGrantsRequest$Marker": "

    Use this parameter only when paginating results and only in a subsequent request after you receive a response with truncated results. Set it to the value of NextMarker from the response you just received.

    " + } + }, + "NotFoundException": { + "base": "

    The request was rejected because the specified entity or resource could not be found.

    ", + "refs": { + } + }, + "NumberOfBytesType": { + "base": null, + "refs": { + "GenerateDataKeyRequest$NumberOfBytes": "

    Integer that contains the number of bytes to generate. Common values are 128, 256, 512, and 1024. 1024 is the current limit. We recommend that you use the KeySpec parameter instead.

    ", + "GenerateDataKeyWithoutPlaintextRequest$NumberOfBytes": "

    Integer that contains the number of bytes to generate. Common values are 128, 256, 512, 1024 and so on. We recommend that you use the KeySpec parameter instead.

    ", + "GenerateRandomRequest$NumberOfBytes": "

    Integer that contains the number of bytes to generate. Common values are 128, 256, 512, 1024 and so on. The current limit is 1024 bytes.

    " + } + }, + "PendingWindowInDaysType": { + "base": null, + "refs": { + "ScheduleKeyDeletionRequest$PendingWindowInDays": "

    The waiting period, specified in number of days. After the waiting period ends, AWS KMS deletes the customer master key (CMK).

    This value is optional. If you include a value, it must be between 7 and 30, inclusive. If you do not include a value, it defaults to 30.

    " + } + }, + "PlaintextType": { + "base": null, + "refs": { + "DecryptResponse$Plaintext": "

    Decrypted plaintext data. This value may not be returned if the customer master key is not available or if you didn't have permission to use it.

    ", + "EncryptRequest$Plaintext": "

    Data to be encrypted.

    ", + "GenerateDataKeyResponse$Plaintext": "

    Plaintext that contains the data key. Use this for encryption and decryption and then remove it from memory as soon as possible.

    ", + "GenerateRandomResponse$Plaintext": "

    Plaintext that contains the unpredictable byte string.

    " + } + }, + "PolicyNameList": { + "base": null, + "refs": { + "ListKeyPoliciesResponse$PolicyNames": "

    A list of policy names. Currently, there is only one policy and it is named \"Default\".

    " + } + }, + "PolicyNameType": { + "base": null, + "refs": { + "GetKeyPolicyRequest$PolicyName": "

    String that contains the name of the policy. Currently, this must be \"default\". Policy names can be discovered by calling ListKeyPolicies.

    ", + "PolicyNameList$member": null, + "PutKeyPolicyRequest$PolicyName": "

    The name of the key policy.

    This value must be default.

    " + } + }, + "PolicyType": { + "base": null, + "refs": { + "CreateKeyRequest$Policy": "

    The key policy to attach to the CMK.

    If you specify a key policy, it must meet the following criteria:

    • It must allow the principal making the CreateKey request to make a subsequent PutKeyPolicy request on the CMK. This reduces the likelihood that the CMK becomes unmanageable. For more information, refer to the scenario in the Default Key Policy section in the AWS Key Management Service Developer Guide.

    • The principal(s) specified in the key policy must exist and be visible to AWS KMS. When you create a new AWS principal (for example, an IAM user or role), you might need to enforce a delay before specifying the new principal in a key policy because the new principal might not immediately be visible to AWS KMS. For more information, see Changes that I make are not always immediately visible in the IAM User Guide.

    If you do not specify a policy, AWS KMS attaches a default key policy to the CMK. For more information, see Default Key Policy in the AWS Key Management Service Developer Guide.

    The policy size limit is 32 KiB (32768 bytes).

    ", + "GetKeyPolicyResponse$Policy": "

    A policy document in JSON format.

    ", + "PutKeyPolicyRequest$Policy": "

    The key policy to attach to the CMK.

    The key policy must meet the following criteria:

    • It must allow the principal making the PutKeyPolicy request to make a subsequent PutKeyPolicy request on the CMK. This reduces the likelihood that the CMK becomes unmanageable. For more information, refer to the scenario in the Default Key Policy section in the AWS Key Management Service Developer Guide.

    • The principal(s) specified in the key policy must exist and be visible to AWS KMS. When you create a new AWS principal (for example, an IAM user or role), you might need to enforce a delay before specifying the new principal in a key policy because the new principal might not immediately be visible to AWS KMS. For more information, see Changes that I make are not always immediately visible in the IAM User Guide.

    The policy size limit is 32 KiB (32768 bytes).

    " + } + }, + "PrincipalIdType": { + "base": null, + "refs": { + "CreateGrantRequest$GranteePrincipal": "

    The principal that is given permission to perform the operations that the grant permits.

    To specify the principal, use the Amazon Resource Name (ARN) of an AWS principal. Valid AWS principals include AWS accounts (root), IAM users, federated users, and assumed role users. For examples of the ARN syntax to use for specifying a principal, see AWS Identity and Access Management (IAM) in the Example ARNs section of the AWS General Reference.

    ", + "CreateGrantRequest$RetiringPrincipal": "

    The principal that is given permission to retire the grant by using RetireGrant operation.

    To specify the principal, use the Amazon Resource Name (ARN) of an AWS principal. Valid AWS principals include AWS accounts (root), IAM users, federated users, and assumed role users. For examples of the ARN syntax to use for specifying a principal, see AWS Identity and Access Management (IAM) in the Example ARNs section of the AWS General Reference.

    ", + "GrantListEntry$GranteePrincipal": "

    The principal that receives the grant's permissions.

    ", + "GrantListEntry$RetiringPrincipal": "

    The principal that can retire the grant.

    ", + "GrantListEntry$IssuingAccount": "

    The AWS account under which the grant was issued.

    ", + "ListRetirableGrantsRequest$RetiringPrincipal": "

    The retiring principal for which to list grants.

    To specify the retiring principal, use the Amazon Resource Name (ARN) of an AWS principal. Valid AWS principals include AWS accounts (root), IAM users, federated users, and assumed role users. For examples of the ARN syntax for specifying a principal, see AWS Identity and Access Management (IAM) in the Example ARNs section of the Amazon Web Services General Reference.

    " + } + }, + "PutKeyPolicyRequest": { + "base": null, + "refs": { + } + }, + "ReEncryptRequest": { + "base": null, + "refs": { + } + }, + "ReEncryptResponse": { + "base": null, + "refs": { + } + }, + "RetireGrantRequest": { + "base": null, + "refs": { + } + }, + "RevokeGrantRequest": { + "base": null, + "refs": { + } + }, + "ScheduleKeyDeletionRequest": { + "base": null, + "refs": { + } + }, + "ScheduleKeyDeletionResponse": { + "base": null, + "refs": { + } + }, + "UnsupportedOperationException": { + "base": "

    The request was rejected because a specified parameter is not supported.

    ", + "refs": { + } + }, + "UpdateAliasRequest": { + "base": null, + "refs": { + } + }, + "UpdateKeyDescriptionRequest": { + "base": null, + "refs": { + } + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/kms/2014-11-01/examples-1.json b/vendor/github.com/aws/aws-sdk-go/models/apis/kms/2014-11-01/examples-1.json new file mode 100644 index 000000000..0ea7e3b0b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/kms/2014-11-01/examples-1.json @@ -0,0 +1,5 @@ +{ + "version": "1.0", + "examples": { + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/kms/2014-11-01/paginators-1.json b/vendor/github.com/aws/aws-sdk-go/models/apis/kms/2014-11-01/paginators-1.json new file mode 100644 index 000000000..522d171f9 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/kms/2014-11-01/paginators-1.json @@ -0,0 +1,32 @@ +{ + "pagination": { + "ListAliases": { + "limit_key": "Limit", + "input_token": "Marker", + "output_token": "NextMarker", + "more_results": "Truncated", + "result_key": "Aliases" + }, + "ListGrants": { + "limit_key": "Limit", + "input_token": "Marker", + "output_token": "NextMarker", + "more_results": "Truncated", + "result_key": "Grants" + }, + "ListKeyPolicies": { + "limit_key": "Limit", + "input_token": "Marker", + "output_token": "NextMarker", + "more_results": "Truncated", + "result_key": "PolicyNames" + }, + "ListKeys": { + "limit_key": "Limit", + "input_token": "Marker", + "output_token": "NextMarker", + "more_results": "Truncated", + "result_key": "Keys" + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/lambda/2014-11-11/api-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/lambda/2014-11-11/api-2.json new file mode 100644 index 000000000..3e27914fe --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/lambda/2014-11-11/api-2.json @@ -0,0 +1,667 @@ +{ + "metadata":{ + "apiVersion":"2014-11-11", + "endpointPrefix":"lambda", + "serviceFullName":"AWS Lambda", + "signatureVersion":"v4", + "protocol":"rest-json" + }, + "operations":{ + "AddEventSource":{ + "name":"AddEventSource", + "http":{ + "method":"POST", + "requestUri":"/2014-11-13/event-source-mappings/" + }, + "input":{"shape":"AddEventSourceRequest"}, + "output":{"shape":"EventSourceConfiguration"}, + "errors":[ + { + "shape":"ServiceException", + "error":{"httpStatusCode":500}, + "exception":true + }, + { + "shape":"InvalidParameterValueException", + "error":{"httpStatusCode":400}, + "exception":true + } + ] + }, + "DeleteFunction":{ + "name":"DeleteFunction", + "http":{ + "method":"DELETE", + "requestUri":"/2014-11-13/functions/{FunctionName}", + "responseCode":204 + }, + "input":{"shape":"DeleteFunctionRequest"}, + "errors":[ + { + "shape":"ServiceException", + "error":{"httpStatusCode":500}, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + } + ] + }, + "GetEventSource":{ + "name":"GetEventSource", + "http":{ + "method":"GET", + "requestUri":"/2014-11-13/event-source-mappings/{UUID}", + "responseCode":200 + }, + "input":{"shape":"GetEventSourceRequest"}, + "output":{"shape":"EventSourceConfiguration"}, + "errors":[ + { + "shape":"ServiceException", + "error":{"httpStatusCode":500}, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"InvalidParameterValueException", + "error":{"httpStatusCode":400}, + "exception":true + } + ] + }, + "GetFunction":{ + "name":"GetFunction", + "http":{ + "method":"GET", + "requestUri":"/2014-11-13/functions/{FunctionName}", + "responseCode":200 + }, + "input":{"shape":"GetFunctionRequest"}, + "output":{"shape":"GetFunctionResponse"}, + "errors":[ + { + "shape":"ServiceException", + "error":{"httpStatusCode":500}, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + } + ] + }, + "GetFunctionConfiguration":{ + "name":"GetFunctionConfiguration", + "http":{ + "method":"GET", + "requestUri":"/2014-11-13/functions/{FunctionName}/configuration", + "responseCode":200 + }, + "input":{"shape":"GetFunctionConfigurationRequest"}, + "output":{"shape":"FunctionConfiguration"}, + "errors":[ + { + "shape":"ServiceException", + "error":{"httpStatusCode":500}, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + } + ] + }, + "InvokeAsync":{ + "name":"InvokeAsync", + "http":{ + "method":"POST", + "requestUri":"/2014-11-13/functions/{FunctionName}/invoke-async/", + "responseCode":202 + }, + "input":{"shape":"InvokeAsyncRequest"}, + "output":{"shape":"InvokeAsyncResponse"}, + "errors":[ + { + "shape":"ServiceException", + "error":{"httpStatusCode":500}, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"InvalidRequestContentException", + "error":{"httpStatusCode":400}, + "exception":true + } + ] + }, + "ListEventSources":{ + "name":"ListEventSources", + "http":{ + "method":"GET", + "requestUri":"/2014-11-13/event-source-mappings/", + "responseCode":200 + }, + "input":{"shape":"ListEventSourcesRequest"}, + "output":{"shape":"ListEventSourcesResponse"}, + "errors":[ + { + "shape":"ServiceException", + "error":{"httpStatusCode":500}, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"InvalidParameterValueException", + "error":{"httpStatusCode":400}, + "exception":true + } + ] + }, + "ListFunctions":{ + "name":"ListFunctions", + "http":{ + "method":"GET", + "requestUri":"/2014-11-13/functions/", + "responseCode":200 + }, + "input":{"shape":"ListFunctionsRequest"}, + "output":{"shape":"ListFunctionsResponse"}, + "errors":[ + { + "shape":"ServiceException", + "error":{"httpStatusCode":500}, + "exception":true + } + ] + }, + "RemoveEventSource":{ + "name":"RemoveEventSource", + "http":{ + "method":"DELETE", + "requestUri":"/2014-11-13/event-source-mappings/{UUID}", + "responseCode":204 + }, + "input":{"shape":"RemoveEventSourceRequest"}, + "errors":[ + { + "shape":"ServiceException", + "error":{"httpStatusCode":500}, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"InvalidParameterValueException", + "error":{"httpStatusCode":400}, + "exception":true + } + ] + }, + "UpdateFunctionConfiguration":{ + "name":"UpdateFunctionConfiguration", + "http":{ + "method":"PUT", + "requestUri":"/2014-11-13/functions/{FunctionName}/configuration", + "responseCode":200 + }, + "input":{"shape":"UpdateFunctionConfigurationRequest"}, + "output":{"shape":"FunctionConfiguration"}, + "errors":[ + { + "shape":"ServiceException", + "error":{"httpStatusCode":500}, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"InvalidParameterValueException", + "error":{"httpStatusCode":400}, + "exception":true + } + ] + }, + "UploadFunction":{ + "name":"UploadFunction", + "http":{ + "method":"PUT", + "requestUri":"/2014-11-13/functions/{FunctionName}", + "responseCode":201 + }, + "input":{"shape":"UploadFunctionRequest"}, + "output":{"shape":"FunctionConfiguration"}, + "errors":[ + { + "shape":"ServiceException", + "error":{"httpStatusCode":500}, + "exception":true + }, + { + "shape":"InvalidParameterValueException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + } + ] + } + }, + "shapes":{ + "AddEventSourceRequest":{ + "type":"structure", + "required":[ + "EventSource", + "FunctionName", + "Role" + ], + "members":{ + "EventSource":{"shape":"String"}, + "FunctionName":{"shape":"FunctionName"}, + "Role":{"shape":"RoleArn"}, + "BatchSize":{"shape":"Integer"}, + "Parameters":{"shape":"Map"} + } + }, + "Blob":{ + "type":"blob", + "streaming":true + }, + "DeleteFunctionRequest":{ + "type":"structure", + "required":["FunctionName"], + "members":{ + "FunctionName":{ + "shape":"FunctionName", + "location":"uri", + "locationName":"FunctionName" + } + } + }, + "Description":{ + "type":"string", + "min":0, + "max":256 + }, + "EventSourceConfiguration":{ + "type":"structure", + "members":{ + "UUID":{"shape":"String"}, + "BatchSize":{"shape":"Integer"}, + "EventSource":{"shape":"String"}, + "FunctionName":{"shape":"FunctionName"}, + "Parameters":{"shape":"Map"}, + "Role":{"shape":"RoleArn"}, + "LastModified":{"shape":"Timestamp"}, + "IsActive":{"shape":"Boolean"}, + "Status":{"shape":"String"} + } + }, + "EventSourceList":{ + "type":"list", + "member":{"shape":"EventSourceConfiguration"} + }, + "FunctionArn":{ + "type":"string", + "pattern":"arn:aws:lambda:[a-z]{2}-[a-z]+-\\d{1}:\\d{12}:function:[a-zA-Z0-9-_]+(\\/[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12})?" + }, + "FunctionCodeLocation":{ + "type":"structure", + "members":{ + "RepositoryType":{"shape":"String"}, + "Location":{"shape":"String"} + } + }, + "FunctionConfiguration":{ + "type":"structure", + "members":{ + "FunctionName":{"shape":"FunctionName"}, + "FunctionARN":{"shape":"FunctionArn"}, + "ConfigurationId":{"shape":"String"}, + "Runtime":{"shape":"Runtime"}, + "Role":{"shape":"RoleArn"}, + "Handler":{"shape":"Handler"}, + "Mode":{"shape":"Mode"}, + "CodeSize":{"shape":"Long"}, + "Description":{"shape":"Description"}, + "Timeout":{"shape":"Timeout"}, + "MemorySize":{"shape":"MemorySize"}, + "LastModified":{"shape":"Timestamp"} + } + }, + "FunctionList":{ + "type":"list", + "member":{"shape":"FunctionConfiguration"} + }, + "FunctionName":{ + "type":"string", + "min":1, + "max":64, + "pattern":"[a-zA-Z0-9-_]+" + }, + "GetEventSourceRequest":{ + "type":"structure", + "required":["UUID"], + "members":{ + "UUID":{ + "shape":"String", + "location":"uri", + "locationName":"UUID" + } + } + }, + "GetFunctionConfigurationRequest":{ + "type":"structure", + "required":["FunctionName"], + "members":{ + "FunctionName":{ + "shape":"FunctionName", + "location":"uri", + "locationName":"FunctionName" + } + } + }, + "GetFunctionRequest":{ + "type":"structure", + "required":["FunctionName"], + "members":{ + "FunctionName":{ + "shape":"FunctionName", + "location":"uri", + "locationName":"FunctionName" + } + } + }, + "GetFunctionResponse":{ + "type":"structure", + "members":{ + "Configuration":{"shape":"FunctionConfiguration"}, + "Code":{"shape":"FunctionCodeLocation"} + } + }, + "Handler":{ + "type":"string", + "pattern":"[a-zA-Z0-9./\\-_]+" + }, + "HttpStatus":{"type":"integer"}, + "Integer":{"type":"integer"}, + "InvalidParameterValueException":{ + "type":"structure", + "members":{ + "Type":{"shape":"String"}, + "message":{"shape":"String"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidRequestContentException":{ + "type":"structure", + "members":{ + "Type":{"shape":"String"}, + "message":{"shape":"String"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvokeAsyncRequest":{ + "type":"structure", + "required":[ + "FunctionName", + "InvokeArgs" + ], + "members":{ + "FunctionName":{ + "shape":"FunctionName", + "location":"uri", + "locationName":"FunctionName" + }, + "InvokeArgs":{"shape":"Blob"} + }, + "payload":"InvokeArgs" + }, + "InvokeAsyncResponse":{ + "type":"structure", + "members":{ + "Status":{ + "shape":"HttpStatus", + "location":"statusCode" + } + } + }, + "ListEventSourcesRequest":{ + "type":"structure", + "members":{ + "EventSourceArn":{ + "shape":"String", + "location":"querystring", + "locationName":"EventSource" + }, + "FunctionName":{ + "shape":"FunctionName", + "location":"querystring", + "locationName":"FunctionName" + }, + "Marker":{ + "shape":"String", + "location":"querystring", + "locationName":"Marker" + }, + "MaxItems":{ + "shape":"MaxListItems", + "location":"querystring", + "locationName":"MaxItems" + } + } + }, + "ListEventSourcesResponse":{ + "type":"structure", + "members":{ + "NextMarker":{"shape":"String"}, + "EventSources":{"shape":"EventSourceList"} + } + }, + "ListFunctionsRequest":{ + "type":"structure", + "members":{ + "Marker":{ + "shape":"String", + "location":"querystring", + "locationName":"Marker" + }, + "MaxItems":{ + "shape":"MaxListItems", + "location":"querystring", + "locationName":"MaxItems" + } + } + }, + "ListFunctionsResponse":{ + "type":"structure", + "members":{ + "NextMarker":{"shape":"String"}, + "Functions":{"shape":"FunctionList"} + } + }, + "Long":{"type":"long"}, + "Map":{ + "type":"map", + "key":{"shape":"String"}, + "value":{"shape":"String"} + }, + "MaxListItems":{ + "type":"integer", + "min":1, + "max":10000 + }, + "MemorySize":{ + "type":"integer", + "min":128, + "max":1024 + }, + "Mode":{ + "type":"string", + "enum":["event"] + }, + "RemoveEventSourceRequest":{ + "type":"structure", + "required":["UUID"], + "members":{ + "UUID":{ + "shape":"String", + "location":"uri", + "locationName":"UUID" + } + } + }, + "ResourceNotFoundException":{ + "type":"structure", + "members":{ + "Type":{"shape":"String"}, + "Message":{"shape":"String"} + }, + "error":{"httpStatusCode":404}, + "exception":true + }, + "RoleArn":{ + "type":"string", + "pattern":"arn:aws:iam::\\d{12}:role/?[a-zA-Z_0-9+=,.@\\-_/]+" + }, + "Runtime":{ + "type":"string", + "enum":["nodejs"] + }, + "ServiceException":{ + "type":"structure", + "members":{ + "Type":{"shape":"String"}, + "Message":{"shape":"String"} + }, + "error":{"httpStatusCode":500}, + "exception":true + }, + "String":{"type":"string"}, + "Timeout":{ + "type":"integer", + "min":1, + "max":60 + }, + "Timestamp":{"type":"string"}, + "UpdateFunctionConfigurationRequest":{ + "type":"structure", + "required":["FunctionName"], + "members":{ + "FunctionName":{ + "shape":"FunctionName", + "location":"uri", + "locationName":"FunctionName" + }, + "Role":{ + "shape":"RoleArn", + "location":"querystring", + "locationName":"Role" + }, + "Handler":{ + "shape":"Handler", + "location":"querystring", + "locationName":"Handler" + }, + "Description":{ + "shape":"Description", + "location":"querystring", + "locationName":"Description" + }, + "Timeout":{ + "shape":"Timeout", + "location":"querystring", + "locationName":"Timeout" + }, + "MemorySize":{ + "shape":"MemorySize", + "location":"querystring", + "locationName":"MemorySize" + } + } + }, + "UploadFunctionRequest":{ + "type":"structure", + "required":[ + "FunctionName", + "FunctionZip", + "Runtime", + "Role", + "Handler", + "Mode" + ], + "members":{ + "FunctionName":{ + "shape":"FunctionName", + "location":"uri", + "locationName":"FunctionName" + }, + "FunctionZip":{"shape":"Blob"}, + "Runtime":{ + "shape":"Runtime", + "location":"querystring", + "locationName":"Runtime" + }, + "Role":{ + "shape":"RoleArn", + "location":"querystring", + "locationName":"Role" + }, + "Handler":{ + "shape":"Handler", + "location":"querystring", + "locationName":"Handler" + }, + "Mode":{ + "shape":"Mode", + "location":"querystring", + "locationName":"Mode" + }, + "Description":{ + "shape":"Description", + "location":"querystring", + "locationName":"Description" + }, + "Timeout":{ + "shape":"Timeout", + "location":"querystring", + "locationName":"Timeout" + }, + "MemorySize":{ + "shape":"MemorySize", + "location":"querystring", + "locationName":"MemorySize" + } + }, + "payload":"FunctionZip" + }, + "Boolean":{"type":"boolean"} + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/lambda/2014-11-11/docs-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/lambda/2014-11-11/docs-2.json new file mode 100644 index 000000000..4ef27761b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/lambda/2014-11-11/docs-2.json @@ -0,0 +1,303 @@ +{ + "operations": { + "AddEventSource": "

    Identifies a stream as an event source for an AWS Lambda function. It can be either an Amazon Kinesis stream or a Amazon DynamoDB stream. AWS Lambda invokes the specified function when records are posted to the stream.

    This is the pull model, where AWS Lambda invokes the function. For more information, go to AWS Lambda: How it Works in the AWS Lambda Developer Guide.

    This association between an Amazon Kinesis stream and an AWS Lambda function is called the event source mapping. You provide the configuration information (for example, which stream to read from and which AWS Lambda function to invoke) for the event source mapping in the request body.

    Each event source, such as a Kinesis stream, can only be associated with one AWS Lambda function. If you call AddEventSource for an event source that is already mapped to another AWS Lambda function, the existing mapping is updated to call the new function instead of the old one.

    This operation requires permission for the iam:PassRole action for the IAM role. It also requires permission for the lambda:AddEventSource action.

    ", + "DeleteFunction": "

    Deletes the specified Lambda function code and configuration.

    This operation requires permission for the lambda:DeleteFunction action.

    ", + "GetEventSource": "

    Returns configuration information for the specified event source mapping (see AddEventSource).

    This operation requires permission for the lambda:GetEventSource action.

    ", + "GetFunction": "

    Returns the configuration information of the Lambda function and a presigned URL link to the .zip file you uploaded with UploadFunction so you can download the .zip file. Note that the URL is valid for up to 10 minutes. The configuration information is the same information you provided as parameters when uploading the function.

    This operation requires permission for the lambda:GetFunction action.

    ", + "GetFunctionConfiguration": "

    Returns the configuration information of the Lambda function. This the same information you provided as parameters when uploading the function by using UploadFunction.

    This operation requires permission for the lambda:GetFunctionConfiguration operation.

    ", + "InvokeAsync": "

    Submits an invocation request to AWS Lambda. Upon receiving the request, Lambda executes the specified function asynchronously. To see the logs generated by the Lambda function execution, see the CloudWatch logs console.

    This operation requires permission for the lambda:InvokeFunction action.

    ", + "ListEventSources": "

    Returns a list of event source mappings you created using the AddEventSource (see AddEventSource), where you identify a stream as event source. This list does not include Amazon S3 event sources.

    For each mapping, the API returns configuration information. You can optionally specify filters to retrieve specific event source mappings.

    This operation requires permission for the lambda:ListEventSources action.

    ", + "ListFunctions": "

    Returns a list of your Lambda functions. For each function, the response includes the function configuration information. You must use GetFunction to retrieve the code for your function.

    This operation requires permission for the lambda:ListFunctions action.

    ", + "RemoveEventSource": "

    Removes an event source mapping. This means AWS Lambda will no longer invoke the function for events in the associated source.

    This operation requires permission for the lambda:RemoveEventSource action.

    ", + "UpdateFunctionConfiguration": "

    Updates the configuration parameters for the specified Lambda function by using the values provided in the request. You provide only the parameters you want to change. This operation must only be used on an existing Lambda function and cannot be used to update the function's code.

    This operation requires permission for the lambda:UpdateFunctionConfiguration action.

    ", + "UploadFunction": "

    Creates a new Lambda function or updates an existing function. The function metadata is created from the request parameters, and the code for the function is provided by a .zip file in the request body. If the function name already exists, the existing Lambda function is updated with the new code and metadata.

    This operation requires permission for the lambda:UploadFunction action.

    " + }, + "service": "AWS Lambda

    Overview

    This is the AWS Lambda API Reference. The AWS Lambda Developer Guide provides additional information. For the service overview, go to What is AWS Lambda, and for information about how the service works, go to AWS LambdaL How it Works in the AWS Lambda Developer Guide.

    ", + "shapes": { + "AddEventSourceRequest": { + "base": null, + "refs": { + } + }, + "Blob": { + "base": null, + "refs": { + "InvokeAsyncRequest$InvokeArgs": "

    JSON that you want to provide to your Lambda function as input.

    ", + "UploadFunctionRequest$FunctionZip": "

    A .zip file containing your packaged source code. For more information about creating a .zip file, go to AWS LambdaL How it Works in the AWS Lambda Developer Guide.

    " + } + }, + "DeleteFunctionRequest": { + "base": null, + "refs": { + } + }, + "Description": { + "base": null, + "refs": { + "FunctionConfiguration$Description": "

    The user-provided description.

    ", + "UpdateFunctionConfigurationRequest$Description": "

    A short user-defined function description. Lambda does not use this value. Assign a meaningful description as you see fit.

    ", + "UploadFunctionRequest$Description": "

    A short, user-defined function description. Lambda does not use this value. Assign a meaningful description as you see fit.

    " + } + }, + "EventSourceConfiguration": { + "base": "

    Describes mapping between an Amazon Kinesis stream and a Lambda function.

    ", + "refs": { + "EventSourceList$member": null + } + }, + "EventSourceList": { + "base": null, + "refs": { + "ListEventSourcesResponse$EventSources": "

    An arrary of EventSourceConfiguration objects.

    " + } + }, + "FunctionArn": { + "base": null, + "refs": { + "FunctionConfiguration$FunctionARN": "

    The Amazon Resource Name (ARN) assigned to the function.

    " + } + }, + "FunctionCodeLocation": { + "base": "

    The object for the Lambda function location.

    ", + "refs": { + "GetFunctionResponse$Code": null + } + }, + "FunctionConfiguration": { + "base": "

    A complex type that describes function metadata.

    ", + "refs": { + "FunctionList$member": null, + "GetFunctionResponse$Configuration": null + } + }, + "FunctionList": { + "base": null, + "refs": { + "ListFunctionsResponse$Functions": "

    A list of Lambda functions.

    " + } + }, + "FunctionName": { + "base": null, + "refs": { + "AddEventSourceRequest$FunctionName": "

    The Lambda function to invoke when AWS Lambda detects an event on the stream.

    ", + "DeleteFunctionRequest$FunctionName": "

    The Lambda function to delete.

    ", + "EventSourceConfiguration$FunctionName": "

    The Lambda function to invoke when AWS Lambda detects an event on the stream.

    ", + "FunctionConfiguration$FunctionName": "

    The name of the function.

    ", + "GetFunctionConfigurationRequest$FunctionName": "

    The name of the Lambda function for which you want to retrieve the configuration information.

    ", + "GetFunctionRequest$FunctionName": "

    The Lambda function name.

    ", + "InvokeAsyncRequest$FunctionName": "

    The Lambda function name.

    ", + "ListEventSourcesRequest$FunctionName": "

    The name of the AWS Lambda function.

    ", + "UpdateFunctionConfigurationRequest$FunctionName": "

    The name of the Lambda function.

    ", + "UploadFunctionRequest$FunctionName": "

    The name you want to assign to the function you are uploading. The function names appear in the console and are returned in the ListFunctions API. Function names are used to specify functions to other AWS Lambda APIs, such as InvokeAsync.

    " + } + }, + "GetEventSourceRequest": { + "base": null, + "refs": { + } + }, + "GetFunctionConfigurationRequest": { + "base": null, + "refs": { + } + }, + "GetFunctionRequest": { + "base": null, + "refs": { + } + }, + "GetFunctionResponse": { + "base": "

    This response contains the object for AWS Lambda function location (see API_FunctionCodeLocation

    ", + "refs": { + } + }, + "Handler": { + "base": null, + "refs": { + "FunctionConfiguration$Handler": "

    The function Lambda calls to begin executing your function.

    ", + "UpdateFunctionConfigurationRequest$Handler": "

    The function that Lambda calls to begin executing your function. For Node.js, it is the module-name.export value in your function.

    ", + "UploadFunctionRequest$Handler": "

    The function that Lambda calls to begin execution. For Node.js, it is the module-name.export value in your function.

    " + } + }, + "HttpStatus": { + "base": null, + "refs": { + "InvokeAsyncResponse$Status": "

    It will be 202 upon success.

    " + } + }, + "Integer": { + "base": null, + "refs": { + "AddEventSourceRequest$BatchSize": "

    The largest number of records that AWS Lambda will give to your function in a single event. The default is 100 records.

    ", + "EventSourceConfiguration$BatchSize": "

    The largest number of records that AWS Lambda will POST in the invocation request to your function.

    " + } + }, + "InvalidParameterValueException": { + "base": "

    One of the parameters in the request is invalid. For example, if you provided an IAM role for AWS Lambda to assume in the UploadFunction or the UpdateFunctionConfiguration API, that AWS Lambda is unable to assume you will get this exception.

    ", + "refs": { + } + }, + "InvalidRequestContentException": { + "base": "

    The request body could not be parsed as JSON.

    ", + "refs": { + } + }, + "InvokeAsyncRequest": { + "base": null, + "refs": { + } + }, + "InvokeAsyncResponse": { + "base": "

    Upon success, it returns empty response. Otherwise, throws an exception.

    ", + "refs": { + } + }, + "ListEventSourcesRequest": { + "base": null, + "refs": { + } + }, + "ListEventSourcesResponse": { + "base": "

    Contains a list of event sources (see API_EventSourceConfiguration)

    ", + "refs": { + } + }, + "ListFunctionsRequest": { + "base": null, + "refs": { + } + }, + "ListFunctionsResponse": { + "base": "

    Contains a list of AWS Lambda function configurations (see API_FunctionConfiguration.

    ", + "refs": { + } + }, + "Long": { + "base": null, + "refs": { + "FunctionConfiguration$CodeSize": "

    The size, in bytes, of the function .zip file you uploaded.

    " + } + }, + "Map": { + "base": null, + "refs": { + "AddEventSourceRequest$Parameters": "

    A map (key-value pairs) defining the configuration for AWS Lambda to use when reading the event source. Currently, AWS Lambda supports only the InitialPositionInStream key. The valid values are: \"TRIM_HORIZON\" and \"LATEST\". The default value is \"TRIM_HORIZON\". For more information, go to ShardIteratorType in the Amazon Kinesis Service API Reference.

    ", + "EventSourceConfiguration$Parameters": "

    The map (key-value pairs) defining the configuration for AWS Lambda to use when reading the event source.

    " + } + }, + "MaxListItems": { + "base": null, + "refs": { + "ListEventSourcesRequest$MaxItems": "

    Optional integer. Specifies the maximum number of event sources to return in response. This value must be greater than 0.

    ", + "ListFunctionsRequest$MaxItems": "

    Optional integer. Specifies the maximum number of AWS Lambda functions to return in response. This parameter value must be greater than 0.

    " + } + }, + "MemorySize": { + "base": null, + "refs": { + "FunctionConfiguration$MemorySize": "

    The memory size, in MB, you configured for the function. Must be a multiple of 64 MB.

    ", + "UpdateFunctionConfigurationRequest$MemorySize": "

    The amount of memory, in MB, your Lambda function is given. Lambda uses this memory size to infer the amount of CPU allocated to your function. Your function use-case determines your CPU and memory requirements. For example, a database operation might need less memory compared to an image processing function. The default value is 128 MB. The value must be a multiple of 64 MB.

    ", + "UploadFunctionRequest$MemorySize": "

    The amount of memory, in MB, your Lambda function is given. Lambda uses this memory size to infer the amount of CPU allocated to your function. Your function use-case determines your CPU and memory requirements. For example, database operation might need less memory compared to image processing function. The default value is 128 MB. The value must be a multiple of 64 MB.

    " + } + }, + "Mode": { + "base": null, + "refs": { + "FunctionConfiguration$Mode": "

    The type of the Lambda function you uploaded.

    ", + "UploadFunctionRequest$Mode": "

    How the Lambda function will be invoked. Lambda supports only the \"event\" mode.

    " + } + }, + "RemoveEventSourceRequest": { + "base": null, + "refs": { + } + }, + "ResourceNotFoundException": { + "base": "

    The function or the event source specified in the request does not exist.

    ", + "refs": { + } + }, + "RoleArn": { + "base": null, + "refs": { + "AddEventSourceRequest$Role": "

    The ARN of the IAM role (invocation role) that AWS Lambda can assume to read from the stream and invoke the function.

    ", + "EventSourceConfiguration$Role": "

    The ARN of the IAM role (invocation role) that AWS Lambda can assume to read from the stream and invoke the function.

    ", + "FunctionConfiguration$Role": "

    The Amazon Resource Name (ARN) of the IAM role that Lambda assumes when it executes your function to access any other Amazon Web Services (AWS) resources.

    ", + "UpdateFunctionConfigurationRequest$Role": "

    The Amazon Resource Name (ARN) of the IAM role that Lambda will assume when it executes your function.

    ", + "UploadFunctionRequest$Role": "

    The Amazon Resource Name (ARN) of the IAM role that Lambda assumes when it executes your function to access any other Amazon Web Services (AWS) resources.

    " + } + }, + "Runtime": { + "base": null, + "refs": { + "FunctionConfiguration$Runtime": "

    The runtime environment for the Lambda function.

    ", + "UploadFunctionRequest$Runtime": "

    The runtime environment for the Lambda function you are uploading. Currently, Lambda supports only \"nodejs\" as the runtime.

    " + } + }, + "ServiceException": { + "base": "

    The AWS Lambda service encountered an internal error.

    ", + "refs": { + } + }, + "String": { + "base": null, + "refs": { + "AddEventSourceRequest$EventSource": "

    The Amazon Resource Name (ARN) of the Amazon Kinesis stream that is the event source. Any record added to this stream causes AWS Lambda to invoke your Lambda function. AWS Lambda POSTs the Amazon Kinesis event, containing records, to your Lambda function as JSON.

    ", + "EventSourceConfiguration$UUID": "

    The AWS Lambda assigned opaque identifier for the mapping.

    ", + "EventSourceConfiguration$EventSource": "

    The Amazon Resource Name (ARN) of the Amazon Kinesis stream that is the source of events.

    ", + "EventSourceConfiguration$Status": "

    The description of the health of the event source mapping. Valid values are: \"PENDING\", \"OK\", and \"PROBLEM:message\". Initially this staus is \"PENDING\". When AWS Lambda begins processing events, it changes the status to \"OK\".

    ", + "FunctionCodeLocation$RepositoryType": "

    The repository from which you can download the function.

    ", + "FunctionCodeLocation$Location": "

    The presigned URL you can use to download the function's .zip file that you previously uploaded. The URL is valid for up to 10 minutes.

    ", + "FunctionConfiguration$ConfigurationId": "

    A Lambda-assigned unique identifier for the current function code and related configuration.

    ", + "GetEventSourceRequest$UUID": "

    The AWS Lambda assigned ID of the event source mapping.

    ", + "InvalidParameterValueException$Type": null, + "InvalidParameterValueException$message": null, + "InvalidRequestContentException$Type": null, + "InvalidRequestContentException$message": null, + "ListEventSourcesRequest$EventSourceArn": "

    The Amazon Resource Name (ARN) of the Amazon Kinesis stream.

    ", + "ListEventSourcesRequest$Marker": "

    Optional string. An opaque pagination token returned from a previous ListEventSources operation. If present, specifies to continue the list from where the returning call left off.

    ", + "ListEventSourcesResponse$NextMarker": "

    A string, present if there are more event source mappings.

    ", + "ListFunctionsRequest$Marker": "

    Optional string. An opaque pagination token returned from a previous ListFunctions operation. If present, indicates where to continue the listing.

    ", + "ListFunctionsResponse$NextMarker": "

    A string, present if there are more functions.

    ", + "Map$key": null, + "Map$value": null, + "RemoveEventSourceRequest$UUID": "

    The event source mapping ID.

    ", + "ResourceNotFoundException$Type": null, + "ResourceNotFoundException$Message": null, + "ServiceException$Type": null, + "ServiceException$Message": null + } + }, + "Timeout": { + "base": null, + "refs": { + "FunctionConfiguration$Timeout": "

    The function execution time at which Lambda should terminate the function. Because the execution time has cost implications, we recommend you set this value based on your expected execution time. The default is 3 seconds.

    ", + "UpdateFunctionConfigurationRequest$Timeout": "

    The function execution time at which Lambda should terminate the function. Because the execution time has cost implications, we recommend you set this value based on your expected execution time. The default is 3 seconds.

    ", + "UploadFunctionRequest$Timeout": "

    The function execution time at which Lambda should terminate the function. Because the execution time has cost implications, we recommend you set this value based on your expected execution time. The default is 3 seconds.

    " + } + }, + "Timestamp": { + "base": null, + "refs": { + "EventSourceConfiguration$LastModified": "

    The UTC time string indicating the last time the event mapping was updated.

    ", + "FunctionConfiguration$LastModified": "

    The timestamp of the last time you updated the function.

    " + } + }, + "UpdateFunctionConfigurationRequest": { + "base": null, + "refs": { + } + }, + "UploadFunctionRequest": { + "base": null, + "refs": { + } + }, + "Boolean": { + "base": null, + "refs": { + "EventSourceConfiguration$IsActive": "

    Indicates whether the event source mapping is currently honored. Events are only processes if IsActive is true.

    " + } + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/lambda/2014-11-11/paginators-1.json b/vendor/github.com/aws/aws-sdk-go/models/apis/lambda/2014-11-11/paginators-1.json new file mode 100644 index 000000000..deaf07d38 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/lambda/2014-11-11/paginators-1.json @@ -0,0 +1,16 @@ +{ + "pagination": { + "ListEventSources": { + "input_token": "Marker", + "output_token": "NextMarker", + "limit_key": "MaxItems", + "result_key": "EventSources" + }, + "ListFunctions": { + "input_token": "Marker", + "output_token": "NextMarker", + "limit_key": "MaxItems", + "result_key": "Functions" + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/lambda/2015-03-31/api-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/lambda/2015-03-31/api-2.json new file mode 100644 index 000000000..93c5e2e72 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/lambda/2015-03-31/api-2.json @@ -0,0 +1,1342 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2015-03-31", + "endpointPrefix":"lambda", + "protocol":"rest-json", + "serviceFullName":"AWS Lambda", + "signatureVersion":"v4" + }, + "operations":{ + "AddPermission":{ + "name":"AddPermission", + "http":{ + "method":"POST", + "requestUri":"/2015-03-31/functions/{FunctionName}/policy", + "responseCode":201 + }, + "input":{"shape":"AddPermissionRequest"}, + "output":{"shape":"AddPermissionResponse"}, + "errors":[ + {"shape":"ServiceException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ResourceConflictException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"PolicyLengthExceededException"}, + {"shape":"TooManyRequestsException"} + ] + }, + "CreateAlias":{ + "name":"CreateAlias", + "http":{ + "method":"POST", + "requestUri":"/2015-03-31/functions/{FunctionName}/aliases", + "responseCode":201 + }, + "input":{"shape":"CreateAliasRequest"}, + "output":{"shape":"AliasConfiguration"}, + "errors":[ + {"shape":"ServiceException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ResourceConflictException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"TooManyRequestsException"} + ] + }, + "CreateEventSourceMapping":{ + "name":"CreateEventSourceMapping", + "http":{ + "method":"POST", + "requestUri":"/2015-03-31/event-source-mappings/", + "responseCode":202 + }, + "input":{"shape":"CreateEventSourceMappingRequest"}, + "output":{"shape":"EventSourceMappingConfiguration"}, + "errors":[ + {"shape":"ServiceException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"ResourceConflictException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"ResourceNotFoundException"} + ] + }, + "CreateFunction":{ + "name":"CreateFunction", + "http":{ + "method":"POST", + "requestUri":"/2015-03-31/functions", + "responseCode":201 + }, + "input":{"shape":"CreateFunctionRequest"}, + "output":{"shape":"FunctionConfiguration"}, + "errors":[ + {"shape":"ServiceException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ResourceConflictException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"CodeStorageExceededException"} + ] + }, + "DeleteAlias":{ + "name":"DeleteAlias", + "http":{ + "method":"DELETE", + "requestUri":"/2015-03-31/functions/{FunctionName}/aliases/{Name}", + "responseCode":204 + }, + "input":{"shape":"DeleteAliasRequest"}, + "errors":[ + {"shape":"ServiceException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"TooManyRequestsException"} + ] + }, + "DeleteEventSourceMapping":{ + "name":"DeleteEventSourceMapping", + "http":{ + "method":"DELETE", + "requestUri":"/2015-03-31/event-source-mappings/{UUID}", + "responseCode":202 + }, + "input":{"shape":"DeleteEventSourceMappingRequest"}, + "output":{"shape":"EventSourceMappingConfiguration"}, + "errors":[ + {"shape":"ServiceException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"TooManyRequestsException"} + ] + }, + "DeleteFunction":{ + "name":"DeleteFunction", + "http":{ + "method":"DELETE", + "requestUri":"/2015-03-31/functions/{FunctionName}", + "responseCode":204 + }, + "input":{"shape":"DeleteFunctionRequest"}, + "errors":[ + {"shape":"ServiceException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"ResourceConflictException"} + ] + }, + "GetAlias":{ + "name":"GetAlias", + "http":{ + "method":"GET", + "requestUri":"/2015-03-31/functions/{FunctionName}/aliases/{Name}", + "responseCode":200 + }, + "input":{"shape":"GetAliasRequest"}, + "output":{"shape":"AliasConfiguration"}, + "errors":[ + {"shape":"ServiceException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"TooManyRequestsException"} + ] + }, + "GetEventSourceMapping":{ + "name":"GetEventSourceMapping", + "http":{ + "method":"GET", + "requestUri":"/2015-03-31/event-source-mappings/{UUID}", + "responseCode":200 + }, + "input":{"shape":"GetEventSourceMappingRequest"}, + "output":{"shape":"EventSourceMappingConfiguration"}, + "errors":[ + {"shape":"ServiceException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"TooManyRequestsException"} + ] + }, + "GetFunction":{ + "name":"GetFunction", + "http":{ + "method":"GET", + "requestUri":"/2015-03-31/functions/{FunctionName}", + "responseCode":200 + }, + "input":{"shape":"GetFunctionRequest"}, + "output":{"shape":"GetFunctionResponse"}, + "errors":[ + {"shape":"ServiceException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"InvalidParameterValueException"} + ] + }, + "GetFunctionConfiguration":{ + "name":"GetFunctionConfiguration", + "http":{ + "method":"GET", + "requestUri":"/2015-03-31/functions/{FunctionName}/configuration", + "responseCode":200 + }, + "input":{"shape":"GetFunctionConfigurationRequest"}, + "output":{"shape":"FunctionConfiguration"}, + "errors":[ + {"shape":"ServiceException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"InvalidParameterValueException"} + ] + }, + "GetPolicy":{ + "name":"GetPolicy", + "http":{ + "method":"GET", + "requestUri":"/2015-03-31/functions/{FunctionName}/policy", + "responseCode":200 + }, + "input":{"shape":"GetPolicyRequest"}, + "output":{"shape":"GetPolicyResponse"}, + "errors":[ + {"shape":"ServiceException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"InvalidParameterValueException"} + ] + }, + "Invoke":{ + "name":"Invoke", + "http":{ + "method":"POST", + "requestUri":"/2015-03-31/functions/{FunctionName}/invocations" + }, + "input":{"shape":"InvocationRequest"}, + "output":{"shape":"InvocationResponse"}, + "errors":[ + {"shape":"ServiceException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidRequestContentException"}, + {"shape":"RequestTooLargeException"}, + {"shape":"UnsupportedMediaTypeException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"EC2UnexpectedException"}, + {"shape":"SubnetIPAddressLimitReachedException"}, + {"shape":"ENILimitReachedException"}, + {"shape":"EC2ThrottledException"}, + {"shape":"EC2AccessDeniedException"}, + {"shape":"InvalidSubnetIDException"}, + {"shape":"InvalidSecurityGroupIDException"} + ] + }, + "InvokeAsync":{ + "name":"InvokeAsync", + "http":{ + "method":"POST", + "requestUri":"/2014-11-13/functions/{FunctionName}/invoke-async/", + "responseCode":202 + }, + "input":{"shape":"InvokeAsyncRequest"}, + "output":{"shape":"InvokeAsyncResponse"}, + "errors":[ + {"shape":"ServiceException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidRequestContentException"} + ], + "deprecated":true + }, + "ListAliases":{ + "name":"ListAliases", + "http":{ + "method":"GET", + "requestUri":"/2015-03-31/functions/{FunctionName}/aliases", + "responseCode":200 + }, + "input":{"shape":"ListAliasesRequest"}, + "output":{"shape":"ListAliasesResponse"}, + "errors":[ + {"shape":"ServiceException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"TooManyRequestsException"} + ] + }, + "ListEventSourceMappings":{ + "name":"ListEventSourceMappings", + "http":{ + "method":"GET", + "requestUri":"/2015-03-31/event-source-mappings/", + "responseCode":200 + }, + "input":{"shape":"ListEventSourceMappingsRequest"}, + "output":{"shape":"ListEventSourceMappingsResponse"}, + "errors":[ + {"shape":"ServiceException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"TooManyRequestsException"} + ] + }, + "ListFunctions":{ + "name":"ListFunctions", + "http":{ + "method":"GET", + "requestUri":"/2015-03-31/functions/", + "responseCode":200 + }, + "input":{"shape":"ListFunctionsRequest"}, + "output":{"shape":"ListFunctionsResponse"}, + "errors":[ + {"shape":"ServiceException"}, + {"shape":"TooManyRequestsException"} + ] + }, + "ListVersionsByFunction":{ + "name":"ListVersionsByFunction", + "http":{ + "method":"GET", + "requestUri":"/2015-03-31/functions/{FunctionName}/versions", + "responseCode":200 + }, + "input":{"shape":"ListVersionsByFunctionRequest"}, + "output":{"shape":"ListVersionsByFunctionResponse"}, + "errors":[ + {"shape":"ServiceException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"TooManyRequestsException"} + ] + }, + "PublishVersion":{ + "name":"PublishVersion", + "http":{ + "method":"POST", + "requestUri":"/2015-03-31/functions/{FunctionName}/versions", + "responseCode":201 + }, + "input":{"shape":"PublishVersionRequest"}, + "output":{"shape":"FunctionConfiguration"}, + "errors":[ + {"shape":"ServiceException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"CodeStorageExceededException"} + ] + }, + "RemovePermission":{ + "name":"RemovePermission", + "http":{ + "method":"DELETE", + "requestUri":"/2015-03-31/functions/{FunctionName}/policy/{StatementId}", + "responseCode":204 + }, + "input":{"shape":"RemovePermissionRequest"}, + "errors":[ + {"shape":"ServiceException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"TooManyRequestsException"} + ] + }, + "UpdateAlias":{ + "name":"UpdateAlias", + "http":{ + "method":"PUT", + "requestUri":"/2015-03-31/functions/{FunctionName}/aliases/{Name}", + "responseCode":200 + }, + "input":{"shape":"UpdateAliasRequest"}, + "output":{"shape":"AliasConfiguration"}, + "errors":[ + {"shape":"ServiceException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"TooManyRequestsException"} + ] + }, + "UpdateEventSourceMapping":{ + "name":"UpdateEventSourceMapping", + "http":{ + "method":"PUT", + "requestUri":"/2015-03-31/event-source-mappings/{UUID}", + "responseCode":202 + }, + "input":{"shape":"UpdateEventSourceMappingRequest"}, + "output":{"shape":"EventSourceMappingConfiguration"}, + "errors":[ + {"shape":"ServiceException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"ResourceConflictException"} + ] + }, + "UpdateFunctionCode":{ + "name":"UpdateFunctionCode", + "http":{ + "method":"PUT", + "requestUri":"/2015-03-31/functions/{FunctionName}/code", + "responseCode":200 + }, + "input":{"shape":"UpdateFunctionCodeRequest"}, + "output":{"shape":"FunctionConfiguration"}, + "errors":[ + {"shape":"ServiceException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"CodeStorageExceededException"} + ] + }, + "UpdateFunctionConfiguration":{ + "name":"UpdateFunctionConfiguration", + "http":{ + "method":"PUT", + "requestUri":"/2015-03-31/functions/{FunctionName}/configuration", + "responseCode":200 + }, + "input":{"shape":"UpdateFunctionConfigurationRequest"}, + "output":{"shape":"FunctionConfiguration"}, + "errors":[ + {"shape":"ServiceException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"TooManyRequestsException"} + ] + } + }, + "shapes":{ + "Action":{ + "type":"string", + "pattern":"(lambda:[*]|lambda:[a-zA-Z]+|[*])" + }, + "AddPermissionRequest":{ + "type":"structure", + "required":[ + "FunctionName", + "StatementId", + "Action", + "Principal" + ], + "members":{ + "FunctionName":{ + "shape":"FunctionName", + "location":"uri", + "locationName":"FunctionName" + }, + "StatementId":{"shape":"StatementId"}, + "Action":{"shape":"Action"}, + "Principal":{"shape":"Principal"}, + "SourceArn":{"shape":"Arn"}, + "SourceAccount":{"shape":"SourceOwner"}, + "EventSourceToken":{"shape":"EventSourceToken"}, + "Qualifier":{ + "shape":"Qualifier", + "location":"querystring", + "locationName":"Qualifier" + } + } + }, + "AddPermissionResponse":{ + "type":"structure", + "members":{ + "Statement":{"shape":"String"} + } + }, + "Alias":{ + "type":"string", + "max":128, + "min":1, + "pattern":"(?!^[0-9]+$)([a-zA-Z0-9-_]+)" + }, + "AliasConfiguration":{ + "type":"structure", + "members":{ + "AliasArn":{"shape":"FunctionArn"}, + "Name":{"shape":"Alias"}, + "FunctionVersion":{"shape":"Version"}, + "Description":{"shape":"Description"} + } + }, + "AliasList":{ + "type":"list", + "member":{"shape":"AliasConfiguration"} + }, + "Arn":{ + "type":"string", + "pattern":"arn:aws:([a-zA-Z0-9\\-])+:([a-z]{2}-[a-z]+-\\d{1})?:(\\d{12})?:(.*)" + }, + "BatchSize":{ + "type":"integer", + "max":10000, + "min":1 + }, + "Blob":{"type":"blob"}, + "BlobStream":{ + "type":"blob", + "streaming":true + }, + "Boolean":{"type":"boolean"}, + "CodeStorageExceededException":{ + "type":"structure", + "members":{ + "Type":{"shape":"String"}, + "message":{"shape":"String"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "CreateAliasRequest":{ + "type":"structure", + "required":[ + "FunctionName", + "Name", + "FunctionVersion" + ], + "members":{ + "FunctionName":{ + "shape":"FunctionName", + "location":"uri", + "locationName":"FunctionName" + }, + "Name":{"shape":"Alias"}, + "FunctionVersion":{"shape":"Version"}, + "Description":{"shape":"Description"} + } + }, + "CreateEventSourceMappingRequest":{ + "type":"structure", + "required":[ + "EventSourceArn", + "FunctionName", + "StartingPosition" + ], + "members":{ + "EventSourceArn":{"shape":"Arn"}, + "FunctionName":{"shape":"FunctionName"}, + "Enabled":{"shape":"Enabled"}, + "BatchSize":{"shape":"BatchSize"}, + "StartingPosition":{"shape":"EventSourcePosition"} + } + }, + "CreateFunctionRequest":{ + "type":"structure", + "required":[ + "FunctionName", + "Runtime", + "Role", + "Handler", + "Code" + ], + "members":{ + "FunctionName":{"shape":"FunctionName"}, + "Runtime":{"shape":"Runtime"}, + "Role":{"shape":"RoleArn"}, + "Handler":{"shape":"Handler"}, + "Code":{"shape":"FunctionCode"}, + "Description":{"shape":"Description"}, + "Timeout":{"shape":"Timeout"}, + "MemorySize":{"shape":"MemorySize"}, + "Publish":{"shape":"Boolean"}, + "VpcConfig":{"shape":"VpcConfig"} + } + }, + "Date":{"type":"timestamp"}, + "DeleteAliasRequest":{ + "type":"structure", + "required":[ + "FunctionName", + "Name" + ], + "members":{ + "FunctionName":{ + "shape":"FunctionName", + "location":"uri", + "locationName":"FunctionName" + }, + "Name":{ + "shape":"Alias", + "location":"uri", + "locationName":"Name" + } + } + }, + "DeleteEventSourceMappingRequest":{ + "type":"structure", + "required":["UUID"], + "members":{ + "UUID":{ + "shape":"String", + "location":"uri", + "locationName":"UUID" + } + } + }, + "DeleteFunctionRequest":{ + "type":"structure", + "required":["FunctionName"], + "members":{ + "FunctionName":{ + "shape":"FunctionName", + "location":"uri", + "locationName":"FunctionName" + }, + "Qualifier":{ + "shape":"Qualifier", + "location":"querystring", + "locationName":"Qualifier" + } + } + }, + "Description":{ + "type":"string", + "max":256, + "min":0 + }, + "EC2AccessDeniedException":{ + "type":"structure", + "members":{ + "Type":{"shape":"String"}, + "Message":{"shape":"String"} + }, + "error":{"httpStatusCode":502}, + "exception":true + }, + "EC2ThrottledException":{ + "type":"structure", + "members":{ + "Type":{"shape":"String"}, + "Message":{"shape":"String"} + }, + "error":{"httpStatusCode":502}, + "exception":true + }, + "EC2UnexpectedException":{ + "type":"structure", + "members":{ + "Type":{"shape":"String"}, + "Message":{"shape":"String"}, + "EC2ErrorCode":{"shape":"String"} + }, + "error":{"httpStatusCode":502}, + "exception":true + }, + "ENILimitReachedException":{ + "type":"structure", + "members":{ + "Type":{"shape":"String"}, + "Message":{"shape":"String"} + }, + "error":{"httpStatusCode":502}, + "exception":true + }, + "Enabled":{"type":"boolean"}, + "EventSourceMappingConfiguration":{ + "type":"structure", + "members":{ + "UUID":{"shape":"String"}, + "BatchSize":{"shape":"BatchSize"}, + "EventSourceArn":{"shape":"Arn"}, + "FunctionArn":{"shape":"FunctionArn"}, + "LastModified":{"shape":"Date"}, + "LastProcessingResult":{"shape":"String"}, + "State":{"shape":"String"}, + "StateTransitionReason":{"shape":"String"} + } + }, + "EventSourceMappingsList":{ + "type":"list", + "member":{"shape":"EventSourceMappingConfiguration"} + }, + "EventSourcePosition":{ + "type":"string", + "enum":[ + "TRIM_HORIZON", + "LATEST" + ] + }, + "EventSourceToken":{ + "type":"string", + "max":256, + "min":0, + "pattern":"[a-zA-Z0-9._\\-]+" + }, + "FunctionArn":{ + "type":"string", + "pattern":"arn:aws:lambda:[a-z]{2}-[a-z]+-\\d{1}:\\d{12}:function:[a-zA-Z0-9-_]+(:(\\$LATEST|[a-zA-Z0-9-_]+))?" + }, + "FunctionCode":{ + "type":"structure", + "members":{ + "ZipFile":{"shape":"Blob"}, + "S3Bucket":{"shape":"S3Bucket"}, + "S3Key":{"shape":"S3Key"}, + "S3ObjectVersion":{"shape":"S3ObjectVersion"} + } + }, + "FunctionCodeLocation":{ + "type":"structure", + "members":{ + "RepositoryType":{"shape":"String"}, + "Location":{"shape":"String"} + } + }, + "FunctionConfiguration":{ + "type":"structure", + "members":{ + "FunctionName":{"shape":"FunctionName"}, + "FunctionArn":{"shape":"FunctionArn"}, + "Runtime":{"shape":"Runtime"}, + "Role":{"shape":"RoleArn"}, + "Handler":{"shape":"Handler"}, + "CodeSize":{"shape":"Long"}, + "Description":{"shape":"Description"}, + "Timeout":{"shape":"Timeout"}, + "MemorySize":{"shape":"MemorySize"}, + "LastModified":{"shape":"Timestamp"}, + "CodeSha256":{"shape":"String"}, + "Version":{"shape":"Version"}, + "VpcConfig":{"shape":"VpcConfigResponse"} + } + }, + "FunctionList":{ + "type":"list", + "member":{"shape":"FunctionConfiguration"} + }, + "FunctionName":{ + "type":"string", + "max":140, + "min":1, + "pattern":"(arn:aws:lambda:)?([a-z]{2}-[a-z]+-\\d{1}:)?(\\d{12}:)?(function:)?([a-zA-Z0-9-_]+)(:(\\$LATEST|[a-zA-Z0-9-_]+))?" + }, + "GetAliasRequest":{ + "type":"structure", + "required":[ + "FunctionName", + "Name" + ], + "members":{ + "FunctionName":{ + "shape":"FunctionName", + "location":"uri", + "locationName":"FunctionName" + }, + "Name":{ + "shape":"Alias", + "location":"uri", + "locationName":"Name" + } + } + }, + "GetEventSourceMappingRequest":{ + "type":"structure", + "required":["UUID"], + "members":{ + "UUID":{ + "shape":"String", + "location":"uri", + "locationName":"UUID" + } + } + }, + "GetFunctionConfigurationRequest":{ + "type":"structure", + "required":["FunctionName"], + "members":{ + "FunctionName":{ + "shape":"FunctionName", + "location":"uri", + "locationName":"FunctionName" + }, + "Qualifier":{ + "shape":"Qualifier", + "location":"querystring", + "locationName":"Qualifier" + } + } + }, + "GetFunctionRequest":{ + "type":"structure", + "required":["FunctionName"], + "members":{ + "FunctionName":{ + "shape":"FunctionName", + "location":"uri", + "locationName":"FunctionName" + }, + "Qualifier":{ + "shape":"Qualifier", + "location":"querystring", + "locationName":"Qualifier" + } + } + }, + "GetFunctionResponse":{ + "type":"structure", + "members":{ + "Configuration":{"shape":"FunctionConfiguration"}, + "Code":{"shape":"FunctionCodeLocation"} + } + }, + "GetPolicyRequest":{ + "type":"structure", + "required":["FunctionName"], + "members":{ + "FunctionName":{ + "shape":"FunctionName", + "location":"uri", + "locationName":"FunctionName" + }, + "Qualifier":{ + "shape":"Qualifier", + "location":"querystring", + "locationName":"Qualifier" + } + } + }, + "GetPolicyResponse":{ + "type":"structure", + "members":{ + "Policy":{"shape":"String"} + } + }, + "Handler":{ + "type":"string", + "max":128, + "pattern":"[^\\s]+" + }, + "HttpStatus":{"type":"integer"}, + "Integer":{"type":"integer"}, + "InvalidParameterValueException":{ + "type":"structure", + "members":{ + "Type":{"shape":"String"}, + "message":{"shape":"String"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidRequestContentException":{ + "type":"structure", + "members":{ + "Type":{"shape":"String"}, + "message":{"shape":"String"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidSecurityGroupIDException":{ + "type":"structure", + "members":{ + "Type":{"shape":"String"}, + "Message":{"shape":"String"} + }, + "error":{"httpStatusCode":502}, + "exception":true + }, + "InvalidSubnetIDException":{ + "type":"structure", + "members":{ + "Type":{"shape":"String"}, + "Message":{"shape":"String"} + }, + "error":{"httpStatusCode":502}, + "exception":true + }, + "InvocationRequest":{ + "type":"structure", + "required":["FunctionName"], + "members":{ + "FunctionName":{ + "shape":"FunctionName", + "location":"uri", + "locationName":"FunctionName" + }, + "InvocationType":{ + "shape":"InvocationType", + "location":"header", + "locationName":"X-Amz-Invocation-Type" + }, + "LogType":{ + "shape":"LogType", + "location":"header", + "locationName":"X-Amz-Log-Type" + }, + "ClientContext":{ + "shape":"String", + "location":"header", + "locationName":"X-Amz-Client-Context" + }, + "Payload":{"shape":"Blob"}, + "Qualifier":{ + "shape":"Qualifier", + "location":"querystring", + "locationName":"Qualifier" + } + }, + "payload":"Payload" + }, + "InvocationResponse":{ + "type":"structure", + "members":{ + "StatusCode":{ + "shape":"Integer", + "location":"statusCode" + }, + "FunctionError":{ + "shape":"String", + "location":"header", + "locationName":"X-Amz-Function-Error" + }, + "LogResult":{ + "shape":"String", + "location":"header", + "locationName":"X-Amz-Log-Result" + }, + "Payload":{"shape":"Blob"} + }, + "payload":"Payload" + }, + "InvocationType":{ + "type":"string", + "enum":[ + "Event", + "RequestResponse", + "DryRun" + ] + }, + "InvokeAsyncRequest":{ + "type":"structure", + "required":[ + "FunctionName", + "InvokeArgs" + ], + "members":{ + "FunctionName":{ + "shape":"FunctionName", + "location":"uri", + "locationName":"FunctionName" + }, + "InvokeArgs":{"shape":"BlobStream"} + }, + "deprecated":true, + "payload":"InvokeArgs" + }, + "InvokeAsyncResponse":{ + "type":"structure", + "members":{ + "Status":{ + "shape":"HttpStatus", + "location":"statusCode" + } + }, + "deprecated":true + }, + "ListAliasesRequest":{ + "type":"structure", + "required":["FunctionName"], + "members":{ + "FunctionName":{ + "shape":"FunctionName", + "location":"uri", + "locationName":"FunctionName" + }, + "FunctionVersion":{ + "shape":"Version", + "location":"querystring", + "locationName":"FunctionVersion" + }, + "Marker":{ + "shape":"String", + "location":"querystring", + "locationName":"Marker" + }, + "MaxItems":{ + "shape":"MaxListItems", + "location":"querystring", + "locationName":"MaxItems" + } + } + }, + "ListAliasesResponse":{ + "type":"structure", + "members":{ + "NextMarker":{"shape":"String"}, + "Aliases":{"shape":"AliasList"} + } + }, + "ListEventSourceMappingsRequest":{ + "type":"structure", + "members":{ + "EventSourceArn":{ + "shape":"Arn", + "location":"querystring", + "locationName":"EventSourceArn" + }, + "FunctionName":{ + "shape":"FunctionName", + "location":"querystring", + "locationName":"FunctionName" + }, + "Marker":{ + "shape":"String", + "location":"querystring", + "locationName":"Marker" + }, + "MaxItems":{ + "shape":"MaxListItems", + "location":"querystring", + "locationName":"MaxItems" + } + } + }, + "ListEventSourceMappingsResponse":{ + "type":"structure", + "members":{ + "NextMarker":{"shape":"String"}, + "EventSourceMappings":{"shape":"EventSourceMappingsList"} + } + }, + "ListFunctionsRequest":{ + "type":"structure", + "members":{ + "Marker":{ + "shape":"String", + "location":"querystring", + "locationName":"Marker" + }, + "MaxItems":{ + "shape":"MaxListItems", + "location":"querystring", + "locationName":"MaxItems" + } + } + }, + "ListFunctionsResponse":{ + "type":"structure", + "members":{ + "NextMarker":{"shape":"String"}, + "Functions":{"shape":"FunctionList"} + } + }, + "ListVersionsByFunctionRequest":{ + "type":"structure", + "required":["FunctionName"], + "members":{ + "FunctionName":{ + "shape":"FunctionName", + "location":"uri", + "locationName":"FunctionName" + }, + "Marker":{ + "shape":"String", + "location":"querystring", + "locationName":"Marker" + }, + "MaxItems":{ + "shape":"MaxListItems", + "location":"querystring", + "locationName":"MaxItems" + } + } + }, + "ListVersionsByFunctionResponse":{ + "type":"structure", + "members":{ + "NextMarker":{"shape":"String"}, + "Versions":{"shape":"FunctionList"} + } + }, + "LogType":{ + "type":"string", + "enum":[ + "None", + "Tail" + ] + }, + "Long":{"type":"long"}, + "MaxListItems":{ + "type":"integer", + "max":10000, + "min":1 + }, + "MemorySize":{ + "type":"integer", + "max":1536, + "min":128 + }, + "PolicyLengthExceededException":{ + "type":"structure", + "members":{ + "Type":{"shape":"String"}, + "message":{"shape":"String"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "Principal":{ + "type":"string", + "pattern":".*" + }, + "PublishVersionRequest":{ + "type":"structure", + "required":["FunctionName"], + "members":{ + "FunctionName":{ + "shape":"FunctionName", + "location":"uri", + "locationName":"FunctionName" + }, + "CodeSha256":{"shape":"String"}, + "Description":{"shape":"Description"} + } + }, + "Qualifier":{ + "type":"string", + "max":128, + "min":1, + "pattern":"(|[a-zA-Z0-9$_-]+)" + }, + "RemovePermissionRequest":{ + "type":"structure", + "required":[ + "FunctionName", + "StatementId" + ], + "members":{ + "FunctionName":{ + "shape":"FunctionName", + "location":"uri", + "locationName":"FunctionName" + }, + "StatementId":{ + "shape":"StatementId", + "location":"uri", + "locationName":"StatementId" + }, + "Qualifier":{ + "shape":"Qualifier", + "location":"querystring", + "locationName":"Qualifier" + } + } + }, + "RequestTooLargeException":{ + "type":"structure", + "members":{ + "Type":{"shape":"String"}, + "message":{"shape":"String"} + }, + "error":{"httpStatusCode":413}, + "exception":true + }, + "ResourceConflictException":{ + "type":"structure", + "members":{ + "Type":{"shape":"String"}, + "message":{"shape":"String"} + }, + "error":{"httpStatusCode":409}, + "exception":true + }, + "ResourceNotFoundException":{ + "type":"structure", + "members":{ + "Type":{"shape":"String"}, + "Message":{"shape":"String"} + }, + "error":{"httpStatusCode":404}, + "exception":true + }, + "RoleArn":{ + "type":"string", + "pattern":"arn:aws:iam::\\d{12}:role/?[a-zA-Z_0-9+=,.@\\-_/]+" + }, + "Runtime":{ + "type":"string", + "enum":[ + "nodejs", + "nodejs4.3", + "java8", + "python2.7" + ] + }, + "S3Bucket":{ + "type":"string", + "max":63, + "min":3, + "pattern":"^[0-9A-Za-z\\.\\-_]*(?AWS Lambda

    Overview

    This is the AWS Lambda API Reference. The AWS Lambda Developer Guide provides additional information. For the service overview, go to What is AWS Lambda, and for information about how the service works, go to AWS Lambda: How it Works in the AWS Lambda Developer Guide.

    ", + "operations": { + "AddPermission": "

    Adds a permission to the resource policy associated with the specified AWS Lambda function. You use resource policies to grant permissions to event sources that use push model. In a push model, event sources (such as Amazon S3 and custom applications) invoke your Lambda function. Each permission you add to the resource policy allows an event source, permission to invoke the Lambda function.

    For information about the push model, see AWS Lambda: How it Works.

    If you are using versioning, the permissions you add are specific to the Lambda function version or alias you specify in the AddPermission request via the Qualifier parameter. For more information about versioning, see AWS Lambda Function Versioning and Aliases.

    This operation requires permission for the lambda:AddPermission action.

    ", + "CreateAlias": "

    Creates an alias that points to the specified Lambda function version. For more information, see Introduction to AWS Lambda Aliases.

    Alias names are unique for a given function.

    This requires permission for the lambda:CreateAlias action.

    ", + "CreateEventSourceMapping": "

    Identifies a stream as an event source for a Lambda function. It can be either an Amazon Kinesis stream or an Amazon DynamoDB stream. AWS Lambda invokes the specified function when records are posted to the stream.

    This association between a stream source and a Lambda function is called the event source mapping.

    This event source mapping is relevant only in the AWS Lambda pull model, where AWS Lambda invokes the function. For more information, go to AWS Lambda: How it Works in the AWS Lambda Developer Guide.

    You provide mapping information (for example, which stream to read from and which Lambda function to invoke) in the request body.

    Each event source, such as an Amazon Kinesis or a DynamoDB stream, can be associated with multiple AWS Lambda function. A given Lambda function can be associated with multiple AWS event sources.

    If you are using versioning, you can specify a specific function version or an alias via the function name parameter. For more information about versioning, see AWS Lambda Function Versioning and Aliases.

    This operation requires permission for the lambda:CreateEventSourceMapping action.

    ", + "CreateFunction": "

    Creates a new Lambda function. The function metadata is created from the request parameters, and the code for the function is provided by a .zip file in the request body. If the function name already exists, the operation will fail. Note that the function name is case-sensitive.

    If you are using versioning, you can also publish a version of the Lambda function you are creating using the Publish parameter. For more information about versioning, see AWS Lambda Function Versioning and Aliases.

    This operation requires permission for the lambda:CreateFunction action.

    ", + "DeleteAlias": "

    Deletes the specified Lambda function alias. For more information, see Introduction to AWS Lambda Aliases.

    This requires permission for the lambda:DeleteAlias action.

    ", + "DeleteEventSourceMapping": "

    Removes an event source mapping. This means AWS Lambda will no longer invoke the function for events in the associated source.

    This operation requires permission for the lambda:DeleteEventSourceMapping action.

    ", + "DeleteFunction": "

    Deletes the specified Lambda function code and configuration.

    If you are using the versioning feature and you don't specify a function version in your DeleteFunction request, AWS Lambda will delete the function, including all its versions, and any aliases pointing to the function versions. To delete a specific function version, you must provide the function version via the Qualifier parameter. For information about function versioning, see AWS Lambda Function Versioning and Aliases.

    When you delete a function the associated resource policy is also deleted. You will need to delete the event source mappings explicitly.

    This operation requires permission for the lambda:DeleteFunction action.

    ", + "GetAlias": "

    Returns the specified alias information such as the alias ARN, description, and function version it is pointing to. For more information, see Introduction to AWS Lambda Aliases.

    This requires permission for the lambda:GetAlias action.

    ", + "GetEventSourceMapping": "

    Returns configuration information for the specified event source mapping (see CreateEventSourceMapping).

    This operation requires permission for the lambda:GetEventSourceMapping action.

    ", + "GetFunction": "

    Returns the configuration information of the Lambda function and a presigned URL link to the .zip file you uploaded with CreateFunction so you can download the .zip file. Note that the URL is valid for up to 10 minutes. The configuration information is the same information you provided as parameters when uploading the function.

    Using the optional Qualifier parameter, you can specify a specific function version for which you want this information. If you don't specify this parameter, the API uses unqualified function ARN which return information about the $LATEST version of the Lambda function. For more information, see AWS Lambda Function Versioning and Aliases.

    This operation requires permission for the lambda:GetFunction action.

    ", + "GetFunctionConfiguration": "

    Returns the configuration information of the Lambda function. This the same information you provided as parameters when uploading the function by using CreateFunction.

    If you are using the versioning feature, you can retrieve this information for a specific function version by using the optional Qualifier parameter and specifying the function version or alias that points to it. If you don't provide it, the API returns information about the $LATEST version of the function. For more information about versioning, see AWS Lambda Function Versioning and Aliases.

    This operation requires permission for the lambda:GetFunctionConfiguration operation.

    ", + "GetPolicy": "

    Returns the resource policy associated with the specified Lambda function.

    If you are using the versioning feature, you can get the resource policy associated with the specific Lambda function version or alias by specifying the version or alias name using the Qualifier parameter. For more information about versioning, see AWS Lambda Function Versioning and Aliases.

    For information about adding permissions, see AddPermission.

    You need permission for the lambda:GetPolicy action.

    ", + "Invoke": "

    Invokes a specific Lambda function.

    If you are using the versioning feature, you can invoke the specific function version by providing function version or alias name that is pointing to the function version using the Qualifier parameter in the request. If you don't provide the Qualifier parameter, the $LATEST version of the Lambda function is invoked. For information about the versioning feature, see AWS Lambda Function Versioning and Aliases.

    This operation requires permission for the lambda:InvokeFunction action.

    ", + "InvokeAsync": "This API is deprecated. We recommend you use Invoke API (see Invoke).

    Submits an invocation request to AWS Lambda. Upon receiving the request, Lambda executes the specified function asynchronously. To see the logs generated by the Lambda function execution, see the CloudWatch Logs console.

    This operation requires permission for the lambda:InvokeFunction action.

    ", + "ListAliases": "

    Returns list of aliases created for a Lambda function. For each alias, the response includes information such as the alias ARN, description, alias name, and the function version to which it points. For more information, see Introduction to AWS Lambda Aliases.

    This requires permission for the lambda:ListAliases action.

    ", + "ListEventSourceMappings": "

    Returns a list of event source mappings you created using the CreateEventSourceMapping (see CreateEventSourceMapping).

    For each mapping, the API returns configuration information. You can optionally specify filters to retrieve specific event source mappings.

    If you are using the versioning feature, you can get list of event source mappings for a specific Lambda function version or an alias as described in the FunctionName parameter. For information about the versioning feature, see AWS Lambda Function Versioning and Aliases.

    This operation requires permission for the lambda:ListEventSourceMappings action.

    ", + "ListFunctions": "

    Returns a list of your Lambda functions. For each function, the response includes the function configuration information. You must use GetFunction to retrieve the code for your function.

    This operation requires permission for the lambda:ListFunctions action.

    If you are using versioning feature, the response returns list of $LATEST versions of your functions. For information about the versioning feature, see AWS Lambda Function Versioning and Aliases.

    ", + "ListVersionsByFunction": "

    List all versions of a function. For information about the versioning feature, see AWS Lambda Function Versioning and Aliases.

    ", + "PublishVersion": "

    Publishes a version of your function from the current snapshot of $LATEST. That is, AWS Lambda takes a snapshot of the function code and configuration information from $LATEST and publishes a new version. The code and configuration cannot be modified after publication. For information about the versioning feature, see AWS Lambda Function Versioning and Aliases.

    ", + "RemovePermission": "

    You can remove individual permissions from an resource policy associated with a Lambda function by providing a statement ID that you provided when you added the permission.

    If you are using versioning, the permissions you remove are specific to the Lambda function version or alias you specify in the AddPermission request via the Qualifier parameter. For more information about versioning, see AWS Lambda Function Versioning and Aliases.

    Note that removal of a permission will cause an active event source to lose permission to the function.

    You need permission for the lambda:RemovePermission action.

    ", + "UpdateAlias": "

    Using this API you can update the function version to which the alias points and the alias description. For more information, see Introduction to AWS Lambda Aliases.

    This requires permission for the lambda:UpdateAlias action.

    ", + "UpdateEventSourceMapping": "

    You can update an event source mapping. This is useful if you want to change the parameters of the existing mapping without losing your position in the stream. You can change which function will receive the stream records, but to change the stream itself, you must create a new mapping.

    If you are using the versioning feature, you can update the event source mapping to map to a specific Lambda function version or alias as described in the FunctionName parameter. For information about the versioning feature, see AWS Lambda Function Versioning and Aliases.

    If you disable the event source mapping, AWS Lambda stops polling. If you enable again, it will resume polling from the time it had stopped polling, so you don't lose processing of any records. However, if you delete event source mapping and create it again, it will reset.

    This operation requires permission for the lambda:UpdateEventSourceMapping action.

    ", + "UpdateFunctionCode": "

    Updates the code for the specified Lambda function. This operation must only be used on an existing Lambda function and cannot be used to update the function configuration.

    If you are using the versioning feature, note this API will always update the $LATEST version of your Lambda function. For information about the versioning feature, see AWS Lambda Function Versioning and Aliases.

    This operation requires permission for the lambda:UpdateFunctionCode action.

    ", + "UpdateFunctionConfiguration": "

    Updates the configuration parameters for the specified Lambda function by using the values provided in the request. You provide only the parameters you want to change. This operation must only be used on an existing Lambda function and cannot be used to update the function's code.

    If you are using the versioning feature, note this API will always update the $LATEST version of your Lambda function. For information about the versioning feature, see AWS Lambda Function Versioning and Aliases.

    This operation requires permission for the lambda:UpdateFunctionConfiguration action.

    " + }, + "shapes": { + "Action": { + "base": null, + "refs": { + "AddPermissionRequest$Action": "

    The AWS Lambda action you want to allow in this statement. Each Lambda action is a string starting with lambda: followed by the API name (see Operations). For example, lambda:CreateFunction. You can use wildcard (lambda:*) to grant permission for all AWS Lambda actions.

    " + } + }, + "AddPermissionRequest": { + "base": null, + "refs": { + } + }, + "AddPermissionResponse": { + "base": null, + "refs": { + } + }, + "Alias": { + "base": null, + "refs": { + "AliasConfiguration$Name": "

    Alias name.

    ", + "CreateAliasRequest$Name": "

    Name for the alias you are creating.

    ", + "DeleteAliasRequest$Name": "

    Name of the alias to delete.

    ", + "GetAliasRequest$Name": "

    Name of the alias for which you want to retrieve information.

    ", + "UpdateAliasRequest$Name": "

    The alias name.

    " + } + }, + "AliasConfiguration": { + "base": "

    Provides configuration information about a Lambda function version alias.

    ", + "refs": { + "AliasList$member": null + } + }, + "AliasList": { + "base": null, + "refs": { + "ListAliasesResponse$Aliases": "

    A list of aliases.

    " + } + }, + "Arn": { + "base": null, + "refs": { + "AddPermissionRequest$SourceArn": "

    This is optional; however, when granting Amazon S3 permission to invoke your function, you should specify this field with the bucket Amazon Resource Name (ARN) as its value. This ensures that only events generated from the specified bucket can invoke the function.

    If you add a permission for the Amazon S3 principal without providing the source ARN, any AWS account that creates a mapping to your function ARN can send events to invoke your Lambda function from Amazon S3.", + "CreateEventSourceMappingRequest$EventSourceArn": "

    The Amazon Resource Name (ARN) of the Amazon Kinesis or the Amazon DynamoDB stream that is the event source. Any record added to this stream could cause AWS Lambda to invoke your Lambda function, it depends on the BatchSize. AWS Lambda POSTs the Amazon Kinesis event, containing records, to your Lambda function as JSON.

    ", + "EventSourceMappingConfiguration$EventSourceArn": "

    The Amazon Resource Name (ARN) of the Amazon Kinesis stream that is the source of events.

    ", + "ListEventSourceMappingsRequest$EventSourceArn": "

    The Amazon Resource Name (ARN) of the Amazon Kinesis stream.

    " + } + }, + "BatchSize": { + "base": null, + "refs": { + "CreateEventSourceMappingRequest$BatchSize": "

    The largest number of records that AWS Lambda will retrieve from your event source at the time of invoking your function. Your function receives an event with all the retrieved records. The default is 100 records.

    ", + "EventSourceMappingConfiguration$BatchSize": "

    The largest number of records that AWS Lambda will retrieve from your event source at the time of invoking your function. Your function receives an event with all the retrieved records.

    ", + "UpdateEventSourceMappingRequest$BatchSize": "

    The maximum number of stream records that can be sent to your Lambda function for a single invocation.

    " + } + }, + "Blob": { + "base": null, + "refs": { + "FunctionCode$ZipFile": "

    A zip file containing your deployment package. If you are using the API directly, the zip file must be base64-encoded (if you are using the AWS SDKs or the AWS CLI, the SDKs or CLI will do the encoding for you). For more information about creating a .zip file, go to Execution Permissions in the AWS Lambda Developer Guide.

    ", + "InvocationRequest$Payload": "

    JSON that you want to provide to your Lambda function as input.

    ", + "InvocationResponse$Payload": "

    It is the JSON representation of the object returned by the Lambda function. In This is present only if the invocation type is RequestResponse.

    In the event of a function error this field contains a message describing the error. For the Handled errors the Lambda function will report this message. For Unhandled errors AWS Lambda reports the message.

    ", + "UpdateFunctionCodeRequest$ZipFile": "

    Based64-encoded .zip file containing your packaged source code.

    " + } + }, + "BlobStream": { + "base": null, + "refs": { + "InvokeAsyncRequest$InvokeArgs": "

    JSON that you want to provide to your Lambda function as input.

    " + } + }, + "Boolean": { + "base": null, + "refs": { + "CreateFunctionRequest$Publish": "

    This boolean parameter can be used to request AWS Lambda to create the Lambda function and publish a version as an atomic operation.

    ", + "UpdateFunctionCodeRequest$Publish": "

    This boolean parameter can be used to request AWS Lambda to update the Lambda function and publish a version as an atomic operation.

    " + } + }, + "CodeStorageExceededException": { + "base": "

    You have exceeded your maximum total code size per account. Limits

    ", + "refs": { + } + }, + "CreateAliasRequest": { + "base": null, + "refs": { + } + }, + "CreateEventSourceMappingRequest": { + "base": null, + "refs": { + } + }, + "CreateFunctionRequest": { + "base": null, + "refs": { + } + }, + "Date": { + "base": null, + "refs": { + "EventSourceMappingConfiguration$LastModified": "

    The UTC time string indicating the last time the event mapping was updated.

    " + } + }, + "DeleteAliasRequest": { + "base": null, + "refs": { + } + }, + "DeleteEventSourceMappingRequest": { + "base": null, + "refs": { + } + }, + "DeleteFunctionRequest": { + "base": null, + "refs": { + } + }, + "Description": { + "base": null, + "refs": { + "AliasConfiguration$Description": "

    Alias description.

    ", + "CreateAliasRequest$Description": "

    Description of the alias.

    ", + "CreateFunctionRequest$Description": "

    A short, user-defined function description. Lambda does not use this value. Assign a meaningful description as you see fit.

    ", + "FunctionConfiguration$Description": "

    The user-provided description.

    ", + "PublishVersionRequest$Description": "

    The description for the version you are publishing. If not provided, AWS Lambda copies the description from the $LATEST version.

    ", + "UpdateAliasRequest$Description": "

    You can change the description of the alias using this parameter.

    ", + "UpdateFunctionConfigurationRequest$Description": "

    A short user-defined function description. AWS Lambda does not use this value. Assign a meaningful description as you see fit.

    " + } + }, + "EC2AccessDeniedException": { + "base": null, + "refs": { + } + }, + "EC2ThrottledException": { + "base": "

    AWS Lambda was throttled by Amazon EC2 during Lambda function initiatization using the execution role provided for the Lambda function.

    ", + "refs": { + } + }, + "EC2UnexpectedException": { + "base": "

    AWS Lambda received an unexpected EC2 client exception while setting up for the Lambda function.

    ", + "refs": { + } + }, + "ENILimitReachedException": { + "base": "

    AWS Lambda was not able to create an Elastic Network Interface (ENI) in the VPC, specified as part of Lambda function configuration, because the limit for network interfaces has been reached.

    ", + "refs": { + } + }, + "Enabled": { + "base": null, + "refs": { + "CreateEventSourceMappingRequest$Enabled": "

    Indicates whether AWS Lambda should begin polling the event source. By default, Enabled is true.

    ", + "UpdateEventSourceMappingRequest$Enabled": "

    Specifies whether AWS Lambda should actively poll the stream or not. If disabled, AWS Lambda will not poll the stream.

    " + } + }, + "EventSourceMappingConfiguration": { + "base": "

    Describes mapping between an Amazon Kinesis stream and a Lambda function.

    ", + "refs": { + "EventSourceMappingsList$member": null + } + }, + "EventSourceMappingsList": { + "base": null, + "refs": { + "ListEventSourceMappingsResponse$EventSourceMappings": "

    An array of EventSourceMappingConfiguration objects.

    " + } + }, + "EventSourcePosition": { + "base": null, + "refs": { + "CreateEventSourceMappingRequest$StartingPosition": "

    The position in the stream where AWS Lambda should start reading. For more information, go to ShardIteratorType in the Amazon Kinesis API Reference.

    " + } + }, + "EventSourceToken": { + "base": null, + "refs": { + "AddPermissionRequest$EventSourceToken": null + } + }, + "FunctionArn": { + "base": null, + "refs": { + "AliasConfiguration$AliasArn": "

    Lambda function ARN that is qualified using the alias name as the suffix. For example, if you create an alias called BETA that points to a helloworld function version, the ARN is arn:aws:lambda:aws-regions:acct-id:function:helloworld:BETA.

    ", + "EventSourceMappingConfiguration$FunctionArn": "

    The Lambda function to invoke when AWS Lambda detects an event on the stream.

    ", + "FunctionConfiguration$FunctionArn": "

    The Amazon Resource Name (ARN) assigned to the function.

    " + } + }, + "FunctionCode": { + "base": "

    The code for the Lambda function.

    ", + "refs": { + "CreateFunctionRequest$Code": "

    The code for the Lambda function.

    " + } + }, + "FunctionCodeLocation": { + "base": "

    The object for the Lambda function location.

    ", + "refs": { + "GetFunctionResponse$Code": null + } + }, + "FunctionConfiguration": { + "base": "

    A complex type that describes function metadata.

    ", + "refs": { + "FunctionList$member": null, + "GetFunctionResponse$Configuration": null + } + }, + "FunctionList": { + "base": null, + "refs": { + "ListFunctionsResponse$Functions": "

    A list of Lambda functions.

    ", + "ListVersionsByFunctionResponse$Versions": "

    A list of Lambda function versions.

    " + } + }, + "FunctionName": { + "base": null, + "refs": { + "AddPermissionRequest$FunctionName": "

    Name of the Lambda function whose resource policy you are updating by adding a new permission.

    You can specify a function name (for example, Thumbnail) or you can specify Amazon Resource Name (ARN) of the function (for example, arn:aws:lambda:us-west-2:account-id:function:ThumbNail). AWS Lambda also allows you to specify partial ARN (for example, account-id:Thumbnail). Note that the length constraint applies only to the ARN. If you specify only the function name, it is limited to 64 character in length.

    ", + "CreateAliasRequest$FunctionName": "

    Name of the Lambda function for which you want to create an alias.

    ", + "CreateEventSourceMappingRequest$FunctionName": "

    The Lambda function to invoke when AWS Lambda detects an event on the stream.

    You can specify the function name (for example, Thumbnail) or you can specify Amazon Resource Name (ARN) of the function (for example, arn:aws:lambda:us-west-2:account-id:function:ThumbNail).

    If you are using versioning, you can also provide a qualified function ARN (ARN that is qualified with function version or alias name as suffix). For more information about versioning, see AWS Lambda Function Versioning and Aliases

    AWS Lambda also allows you to specify only the function name with the account ID qualifier (for example, account-id:Thumbnail).

    Note that the length constraint applies only to the ARN. If you specify only the function name, it is limited to 64 character in length.

    ", + "CreateFunctionRequest$FunctionName": "

    The name you want to assign to the function you are uploading. The function names appear in the console and are returned in the ListFunctions API. Function names are used to specify functions to other AWS Lambda APIs, such as Invoke.

    ", + "DeleteAliasRequest$FunctionName": "

    The Lambda function name for which the alias is created. Deleting an alias does not delete the function version to which it is pointing.

    ", + "DeleteFunctionRequest$FunctionName": "

    The Lambda function to delete.

    You can specify the function name (for example, Thumbnail) or you can specify Amazon Resource Name (ARN) of the function (for example, arn:aws:lambda:us-west-2:account-id:function:ThumbNail). If you are using versioning, you can also provide a qualified function ARN (ARN that is qualified with function version or alias name as suffix). AWS Lambda also allows you to specify only the function name with the account ID qualifier (for example, account-id:Thumbnail). Note that the length constraint applies only to the ARN. If you specify only the function name, it is limited to 64 character in length.

    ", + "FunctionConfiguration$FunctionName": "

    The name of the function.

    ", + "GetAliasRequest$FunctionName": "

    Function name for which the alias is created. An alias is a subresource that exists only in the context of an existing Lambda function so you must specify the function name.

    ", + "GetFunctionConfigurationRequest$FunctionName": "

    The name of the Lambda function for which you want to retrieve the configuration information.

    You can specify a function name (for example, Thumbnail) or you can specify Amazon Resource Name (ARN) of the function (for example, arn:aws:lambda:us-west-2:account-id:function:ThumbNail). AWS Lambda also allows you to specify a partial ARN (for example, account-id:Thumbnail). Note that the length constraint applies only to the ARN. If you specify only the function name, it is limited to 64 character in length.

    ", + "GetFunctionRequest$FunctionName": "

    The Lambda function name.

    You can specify a function name (for example, Thumbnail) or you can specify Amazon Resource Name (ARN) of the function (for example, arn:aws:lambda:us-west-2:account-id:function:ThumbNail). AWS Lambda also allows you to specify a partial ARN (for example, account-id:Thumbnail). Note that the length constraint applies only to the ARN. If you specify only the function name, it is limited to 64 character in length.

    ", + "GetPolicyRequest$FunctionName": "

    Function name whose resource policy you want to retrieve.

    You can specify the function name (for example, Thumbnail) or you can specify Amazon Resource Name (ARN) of the function (for example, arn:aws:lambda:us-west-2:account-id:function:ThumbNail). If you are using versioning, you can also provide a qualified function ARN (ARN that is qualified with function version or alias name as suffix). AWS Lambda also allows you to specify only the function name with the account ID qualifier (for example, account-id:Thumbnail). Note that the length constraint applies only to the ARN. If you specify only the function name, it is limited to 64 character in length.

    ", + "InvocationRequest$FunctionName": "

    The Lambda function name.

    You can specify a function name (for example, Thumbnail) or you can specify Amazon Resource Name (ARN) of the function (for example, arn:aws:lambda:us-west-2:account-id:function:ThumbNail). AWS Lambda also allows you to specify a partial ARN (for example, account-id:Thumbnail). Note that the length constraint applies only to the ARN. If you specify only the function name, it is limited to 64 character in length.

    ", + "InvokeAsyncRequest$FunctionName": "

    The Lambda function name.

    ", + "ListAliasesRequest$FunctionName": "

    Lambda function name for which the alias is created.

    ", + "ListEventSourceMappingsRequest$FunctionName": "

    The name of the Lambda function.

    You can specify the function name (for example, Thumbnail) or you can specify Amazon Resource Name (ARN) of the function (for example, arn:aws:lambda:us-west-2:account-id:function:ThumbNail). If you are using versioning, you can also provide a qualified function ARN (ARN that is qualified with function version or alias name as suffix). AWS Lambda also allows you to specify only the function name with the account ID qualifier (for example, account-id:Thumbnail). Note that the length constraint applies only to the ARN. If you specify only the function name, it is limited to 64 character in length.

    ", + "ListVersionsByFunctionRequest$FunctionName": "

    Function name whose versions to list. You can specify a function name (for example, Thumbnail) or you can specify Amazon Resource Name (ARN) of the function (for example, arn:aws:lambda:us-west-2:account-id:function:ThumbNail). AWS Lambda also allows you to specify a partial ARN (for example, account-id:Thumbnail). Note that the length constraint applies only to the ARN. If you specify only the function name, it is limited to 64 character in length.

    ", + "PublishVersionRequest$FunctionName": "

    The Lambda function name. You can specify a function name (for example, Thumbnail) or you can specify Amazon Resource Name (ARN) of the function (for example, arn:aws:lambda:us-west-2:account-id:function:ThumbNail). AWS Lambda also allows you to specify a partial ARN (for example, account-id:Thumbnail). Note that the length constraint applies only to the ARN. If you specify only the function name, it is limited to 64 character in length.

    ", + "RemovePermissionRequest$FunctionName": "

    Lambda function whose resource policy you want to remove a permission from.

    You can specify a function name (for example, Thumbnail) or you can specify Amazon Resource Name (ARN) of the function (for example, arn:aws:lambda:us-west-2:account-id:function:ThumbNail). AWS Lambda also allows you to specify a partial ARN (for example, account-id:Thumbnail). Note that the length constraint applies only to the ARN. If you specify only the function name, it is limited to 64 character in length.

    ", + "UpdateAliasRequest$FunctionName": "

    The function name for which the alias is created.

    ", + "UpdateEventSourceMappingRequest$FunctionName": "

    The Lambda function to which you want the stream records sent.

    You can specify a function name (for example, Thumbnail) or you can specify Amazon Resource Name (ARN) of the function (for example, arn:aws:lambda:us-west-2:account-id:function:ThumbNail). AWS Lambda also allows you to specify a partial ARN (for example, account-id:Thumbnail).

    If you are using versioning, you can also provide a qualified function ARN (ARN that is qualified with function version or alias name as suffix). For more information about versioning, see AWS Lambda Function Versioning and Aliases

    Note that the length constraint applies only to the ARN. If you specify only the function name, it is limited to 64 character in length.

    ", + "UpdateFunctionCodeRequest$FunctionName": "

    The existing Lambda function name whose code you want to replace.

    You can specify a function name (for example, Thumbnail) or you can specify Amazon Resource Name (ARN) of the function (for example, arn:aws:lambda:us-west-2:account-id:function:ThumbNail). AWS Lambda also allows you to specify a partial ARN (for example, account-id:Thumbnail). Note that the length constraint applies only to the ARN. If you specify only the function name, it is limited to 64 character in length.

    ", + "UpdateFunctionConfigurationRequest$FunctionName": "

    The name of the Lambda function.

    You can specify a function name (for example, Thumbnail) or you can specify Amazon Resource Name (ARN) of the function (for example, arn:aws:lambda:us-west-2:account-id:function:ThumbNail). AWS Lambda also allows you to specify a partial ARN (for example, account-id:Thumbnail). Note that the length constraint applies only to the ARN. If you specify only the function name, it is limited to 64 character in length.

    " + } + }, + "GetAliasRequest": { + "base": null, + "refs": { + } + }, + "GetEventSourceMappingRequest": { + "base": null, + "refs": { + } + }, + "GetFunctionConfigurationRequest": { + "base": null, + "refs": { + } + }, + "GetFunctionRequest": { + "base": null, + "refs": { + } + }, + "GetFunctionResponse": { + "base": "

    This response contains the object for the Lambda function location (see API_FunctionCodeLocation.

    ", + "refs": { + } + }, + "GetPolicyRequest": { + "base": null, + "refs": { + } + }, + "GetPolicyResponse": { + "base": null, + "refs": { + } + }, + "Handler": { + "base": null, + "refs": { + "CreateFunctionRequest$Handler": "

    The function within your code that Lambda calls to begin execution. For Node.js, it is the module-name.export value in your function. For Java, it can be package.class-name::handler or package.class-name. For more information, see Lambda Function Handler (Java).

    ", + "FunctionConfiguration$Handler": "

    The function Lambda calls to begin executing your function.

    ", + "UpdateFunctionConfigurationRequest$Handler": "

    The function that Lambda calls to begin executing your function. For Node.js, it is the module-name.export value in your function.

    " + } + }, + "HttpStatus": { + "base": null, + "refs": { + "InvokeAsyncResponse$Status": "

    It will be 202 upon success.

    " + } + }, + "Integer": { + "base": null, + "refs": { + "InvocationResponse$StatusCode": "

    The HTTP status code will be in the 200 range for successful request. For the RequestResonse invocation type this status code will be 200. For the Event invocation type this status code will be 202. For the DryRun invocation type the status code will be 204.

    " + } + }, + "InvalidParameterValueException": { + "base": "

    One of the parameters in the request is invalid. For example, if you provided an IAM role for AWS Lambda to assume in the CreateFunction or the UpdateFunctionConfiguration API, that AWS Lambda is unable to assume you will get this exception.

    ", + "refs": { + } + }, + "InvalidRequestContentException": { + "base": "

    The request body could not be parsed as JSON.

    ", + "refs": { + } + }, + "InvalidSecurityGroupIDException": { + "base": "

    The Security Group ID provided in the Lambda function VPC configuration is invalid.

    ", + "refs": { + } + }, + "InvalidSubnetIDException": { + "base": "

    The Subnet ID provided in the Lambda function VPC configuration is invalid.

    ", + "refs": { + } + }, + "InvocationRequest": { + "base": null, + "refs": { + } + }, + "InvocationResponse": { + "base": "

    Upon success, returns an empty response. Otherwise, throws an exception.

    ", + "refs": { + } + }, + "InvocationType": { + "base": null, + "refs": { + "InvocationRequest$InvocationType": "

    By default, the Invoke API assumes RequestResponse invocation type. You can optionally request asynchronous execution by specifying Event as the InvocationType. You can also use this parameter to request AWS Lambda to not execute the function but do some verification, such as if the caller is authorized to invoke the function and if the inputs are valid. You request this by specifying DryRun as the InvocationType. This is useful in a cross-account scenario when you want to verify access to a function without running it.

    " + } + }, + "InvokeAsyncRequest": { + "base": null, + "refs": { + } + }, + "InvokeAsyncResponse": { + "base": "

    Upon success, it returns empty response. Otherwise, throws an exception.

    ", + "refs": { + } + }, + "ListAliasesRequest": { + "base": null, + "refs": { + } + }, + "ListAliasesResponse": { + "base": null, + "refs": { + } + }, + "ListEventSourceMappingsRequest": { + "base": null, + "refs": { + } + }, + "ListEventSourceMappingsResponse": { + "base": "

    Contains a list of event sources (see API_EventSourceMappingConfiguration)

    ", + "refs": { + } + }, + "ListFunctionsRequest": { + "base": null, + "refs": { + } + }, + "ListFunctionsResponse": { + "base": "

    Contains a list of AWS Lambda function configurations (see FunctionConfiguration.

    ", + "refs": { + } + }, + "ListVersionsByFunctionRequest": { + "base": null, + "refs": { + } + }, + "ListVersionsByFunctionResponse": { + "base": null, + "refs": { + } + }, + "LogType": { + "base": null, + "refs": { + "InvocationRequest$LogType": "

    You can set this optional parameter to Tail in the request only if you specify the InvocationType parameter with value RequestResponse. In this case, AWS Lambda returns the base64-encoded last 4 KB of log data produced by your Lambda function in the x-amz-log-results header.

    " + } + }, + "Long": { + "base": null, + "refs": { + "FunctionConfiguration$CodeSize": "

    The size, in bytes, of the function .zip file you uploaded.

    " + } + }, + "MaxListItems": { + "base": null, + "refs": { + "ListAliasesRequest$MaxItems": "

    Optional integer. Specifies the maximum number of aliases to return in response. This parameter value must be greater than 0.

    ", + "ListEventSourceMappingsRequest$MaxItems": "

    Optional integer. Specifies the maximum number of event sources to return in response. This value must be greater than 0.

    ", + "ListFunctionsRequest$MaxItems": "

    Optional integer. Specifies the maximum number of AWS Lambda functions to return in response. This parameter value must be greater than 0.

    ", + "ListVersionsByFunctionRequest$MaxItems": "

    Optional integer. Specifies the maximum number of AWS Lambda function versions to return in response. This parameter value must be greater than 0.

    " + } + }, + "MemorySize": { + "base": null, + "refs": { + "CreateFunctionRequest$MemorySize": "

    The amount of memory, in MB, your Lambda function is given. Lambda uses this memory size to infer the amount of CPU and memory allocated to your function. Your function use-case determines your CPU and memory requirements. For example, a database operation might need less memory compared to an image processing function. The default value is 128 MB. The value must be a multiple of 64 MB.

    ", + "FunctionConfiguration$MemorySize": "

    The memory size, in MB, you configured for the function. Must be a multiple of 64 MB.

    ", + "UpdateFunctionConfigurationRequest$MemorySize": "

    The amount of memory, in MB, your Lambda function is given. AWS Lambda uses this memory size to infer the amount of CPU allocated to your function. Your function use-case determines your CPU and memory requirements. For example, a database operation might need less memory compared to an image processing function. The default value is 128 MB. The value must be a multiple of 64 MB.

    " + } + }, + "PolicyLengthExceededException": { + "base": "

    Lambda function access policy is limited to 20 KB.

    ", + "refs": { + } + }, + "Principal": { + "base": null, + "refs": { + "AddPermissionRequest$Principal": "

    The principal who is getting this permission. It can be Amazon S3 service Principal (s3.amazonaws.com) if you want Amazon S3 to invoke the function, an AWS account ID if you are granting cross-account permission, or any valid AWS service principal such as sns.amazonaws.com. For example, you might want to allow a custom application in another AWS account to push events to AWS Lambda by invoking your function.

    " + } + }, + "PublishVersionRequest": { + "base": null, + "refs": { + } + }, + "Qualifier": { + "base": null, + "refs": { + "AddPermissionRequest$Qualifier": "

    You can use this optional query parameter to describe a qualified ARN using a function version or an alias name. The permission will then apply to the specific qualified ARN. For example, if you specify function version 2 as the qualifier, then permission applies only when request is made using qualified function ARN:

    arn:aws:lambda:aws-region:acct-id:function:function-name:2

    If you specify an alias name, for example PROD, then the permission is valid only for requests made using the alias ARN:

    arn:aws:lambda:aws-region:acct-id:function:function-name:PROD

    If the qualifier is not specified, the permission is valid only when requests is made using unqualified function ARN.

    arn:aws:lambda:aws-region:acct-id:function:function-name

    ", + "DeleteFunctionRequest$Qualifier": "

    Using this optional parameter you can specify a function version (but not the $LATEST version) to direct AWS Lambda to delete a specific function version. If the function version has one or more aliases pointing to it, you will get an error because you cannot have aliases pointing to it. You can delete any function version but not the $LATEST, that is, you cannot specify $LATEST as the value of this parameter. The $LATEST version can be deleted only when you want to delete all the function versions and aliases.

    You can only specify a function version, not an alias name, using this parameter. You cannot delete a function version using its alias.

    If you don't specify this parameter, AWS Lambda will delete the function, including all of its versions and aliases.

    ", + "GetFunctionConfigurationRequest$Qualifier": "

    Using this optional parameter you can specify a function version or an alias name. If you specify function version, the API uses qualified function ARN and returns information about the specific function version. If you specify an alias name, the API uses the alias ARN and returns information about the function version to which the alias points.

    If you don't specify this parameter, the API uses unqualified function ARN, and returns information about the $LATEST function version.

    ", + "GetFunctionRequest$Qualifier": "

    Using this optional parameter to specify a function version or an alias name. If you specify function version, the API uses qualified function ARN for the request and returns information about the specific Lambda function version. If you specify an alias name, the API uses the alias ARN and returns information about the function version to which the alias points. If you don't provide this parameter, the API uses unqualified function ARN and returns information about the $LATEST version of the Lambda function.

    ", + "GetPolicyRequest$Qualifier": "

    You can specify this optional query parameter to specify a function version or an alias name in which case this API will return all permissions associated with the specific qualified ARN. If you don't provide this parameter, the API will return permissions that apply to the unqualified function ARN.

    ", + "InvocationRequest$Qualifier": "

    You can use this optional parameter to specify a Lambda function version or alias name. If you specify a function version, the API uses the qualified function ARN to invoke a specific Lambda function. If you specify an alias name, the API uses the alias ARN to invoke the Lambda function version to which the alias points.

    If you don't provide this parameter, then the API uses unqualified function ARN which results in invocation of the $LATEST version.

    ", + "RemovePermissionRequest$Qualifier": "

    You can specify this optional parameter to remove permission associated with a specific function version or function alias. If you don't specify this parameter, the API removes permission associated with the unqualified function ARN.

    " + } + }, + "RemovePermissionRequest": { + "base": null, + "refs": { + } + }, + "RequestTooLargeException": { + "base": "

    The request payload exceeded the Invoke request body JSON input limit. For more information, see Limits.

    ", + "refs": { + } + }, + "ResourceConflictException": { + "base": "

    The resource already exists.

    ", + "refs": { + } + }, + "ResourceNotFoundException": { + "base": "

    The resource (for example, a Lambda function or access policy statement) specified in the request does not exist.

    ", + "refs": { + } + }, + "RoleArn": { + "base": null, + "refs": { + "CreateFunctionRequest$Role": "

    The Amazon Resource Name (ARN) of the IAM role that Lambda assumes when it executes your function to access any other Amazon Web Services (AWS) resources. For more information, see AWS Lambda: How it Works.

    ", + "FunctionConfiguration$Role": "

    The Amazon Resource Name (ARN) of the IAM role that Lambda assumes when it executes your function to access any other Amazon Web Services (AWS) resources.

    ", + "UpdateFunctionConfigurationRequest$Role": "

    The Amazon Resource Name (ARN) of the IAM role that Lambda will assume when it executes your function.

    " + } + }, + "Runtime": { + "base": null, + "refs": { + "CreateFunctionRequest$Runtime": "

    The runtime environment for the Lambda function you are uploading.

    ", + "FunctionConfiguration$Runtime": "

    The runtime environment for the Lambda function.

    ", + "UpdateFunctionConfigurationRequest$Runtime": null + } + }, + "S3Bucket": { + "base": null, + "refs": { + "FunctionCode$S3Bucket": "

    Amazon S3 bucket name where the .zip file containing your deployment package is stored. This bucket must reside in the same AWS region where you are creating the Lambda function.

    ", + "UpdateFunctionCodeRequest$S3Bucket": "

    Amazon S3 bucket name where the .zip file containing your deployment package is stored. This bucket must reside in the same AWS region where you are creating the Lambda function.

    " + } + }, + "S3Key": { + "base": null, + "refs": { + "FunctionCode$S3Key": "

    The Amazon S3 object (the deployment package) key name you want to upload.

    ", + "UpdateFunctionCodeRequest$S3Key": "

    The Amazon S3 object (the deployment package) key name you want to upload.

    " + } + }, + "S3ObjectVersion": { + "base": null, + "refs": { + "FunctionCode$S3ObjectVersion": "

    The Amazon S3 object (the deployment package) version you want to upload.

    ", + "UpdateFunctionCodeRequest$S3ObjectVersion": "

    The Amazon S3 object (the deployment package) version you want to upload.

    " + } + }, + "SecurityGroupId": { + "base": null, + "refs": { + "SecurityGroupIds$member": null + } + }, + "SecurityGroupIds": { + "base": null, + "refs": { + "VpcConfig$SecurityGroupIds": "

    A list of one or more security groups IDs in your VPC.

    ", + "VpcConfigResponse$SecurityGroupIds": "

    A list of security group IDs associated with the Lambda function.

    " + } + }, + "ServiceException": { + "base": "

    The AWS Lambda service encountered an internal error.

    ", + "refs": { + } + }, + "SourceOwner": { + "base": null, + "refs": { + "AddPermissionRequest$SourceAccount": "

    The AWS account ID (without a hyphen) of the source owner. For example, if the SourceArn identifies a bucket, then this is the bucket owner's account ID. You can use this additional condition to ensure the bucket you specify is owned by a specific account (it is possible the bucket owner deleted the bucket and some other AWS account created the bucket). You can also use this condition to specify all sources (that is, you don't specify the SourceArn) owned by a specific account.

    " + } + }, + "StatementId": { + "base": null, + "refs": { + "AddPermissionRequest$StatementId": "

    A unique statement identifier.

    ", + "RemovePermissionRequest$StatementId": "

    Statement ID of the permission to remove.

    " + } + }, + "String": { + "base": null, + "refs": { + "AddPermissionResponse$Statement": "

    The permission statement you specified in the request. The response returns the same as a string using a backslash (\"\\\") as an escape character in the JSON.

    ", + "CodeStorageExceededException$Type": null, + "CodeStorageExceededException$message": null, + "DeleteEventSourceMappingRequest$UUID": "

    The event source mapping ID.

    ", + "EC2AccessDeniedException$Type": null, + "EC2AccessDeniedException$Message": null, + "EC2ThrottledException$Type": null, + "EC2ThrottledException$Message": null, + "EC2UnexpectedException$Type": null, + "EC2UnexpectedException$Message": null, + "EC2UnexpectedException$EC2ErrorCode": null, + "ENILimitReachedException$Type": null, + "ENILimitReachedException$Message": null, + "EventSourceMappingConfiguration$UUID": "

    The AWS Lambda assigned opaque identifier for the mapping.

    ", + "EventSourceMappingConfiguration$LastProcessingResult": "

    The result of the last AWS Lambda invocation of your Lambda function.

    ", + "EventSourceMappingConfiguration$State": "

    The state of the event source mapping. It can be Creating, Enabled, Disabled, Enabling, Disabling, Updating, or Deleting.

    ", + "EventSourceMappingConfiguration$StateTransitionReason": "

    The reason the event source mapping is in its current state. It is either user-requested or an AWS Lambda-initiated state transition.

    ", + "FunctionCodeLocation$RepositoryType": "

    The repository from which you can download the function.

    ", + "FunctionCodeLocation$Location": "

    The presigned URL you can use to download the function's .zip file that you previously uploaded. The URL is valid for up to 10 minutes.

    ", + "FunctionConfiguration$CodeSha256": "

    It is the SHA256 hash of your function deployment package.

    ", + "GetEventSourceMappingRequest$UUID": "

    The AWS Lambda assigned ID of the event source mapping.

    ", + "GetPolicyResponse$Policy": "

    The resource policy associated with the specified function. The response returns the same as a string using a backslash (\"\\\") as an escape character in the JSON.

    ", + "InvalidParameterValueException$Type": null, + "InvalidParameterValueException$message": null, + "InvalidRequestContentException$Type": null, + "InvalidRequestContentException$message": null, + "InvalidSecurityGroupIDException$Type": null, + "InvalidSecurityGroupIDException$Message": null, + "InvalidSubnetIDException$Type": null, + "InvalidSubnetIDException$Message": null, + "InvocationRequest$ClientContext": "

    Using the ClientContext you can pass client-specific information to the Lambda function you are invoking. You can then process the client information in your Lambda function as you choose through the context variable. For an example of a ClientContext JSON, see PutEvents in the Amazon Mobile Analytics API Reference and User Guide.

    The ClientContext JSON must be base64-encoded.

    ", + "InvocationResponse$FunctionError": "

    Indicates whether an error occurred while executing the Lambda function. If an error occurred this field will have one of two values; Handled or Unhandled. Handled errors are errors that are reported by the function while the Unhandled errors are those detected and reported by AWS Lambda. Unhandled errors include out of memory errors and function timeouts. For information about how to report an Handled error, see Programming Model.

    ", + "InvocationResponse$LogResult": "

    It is the base64-encoded logs for the Lambda function invocation. This is present only if the invocation type is RequestResponse and the logs were requested.

    ", + "ListAliasesRequest$Marker": "

    Optional string. An opaque pagination token returned from a previous ListAliases operation. If present, indicates where to continue the listing.

    ", + "ListAliasesResponse$NextMarker": "

    A string, present if there are more aliases.

    ", + "ListEventSourceMappingsRequest$Marker": "

    Optional string. An opaque pagination token returned from a previous ListEventSourceMappings operation. If present, specifies to continue the list from where the returning call left off.

    ", + "ListEventSourceMappingsResponse$NextMarker": "

    A string, present if there are more event source mappings.

    ", + "ListFunctionsRequest$Marker": "

    Optional string. An opaque pagination token returned from a previous ListFunctions operation. If present, indicates where to continue the listing.

    ", + "ListFunctionsResponse$NextMarker": "

    A string, present if there are more functions.

    ", + "ListVersionsByFunctionRequest$Marker": "

    Optional string. An opaque pagination token returned from a previous ListVersionsByFunction operation. If present, indicates where to continue the listing.

    ", + "ListVersionsByFunctionResponse$NextMarker": "

    A string, present if there are more function versions.

    ", + "PolicyLengthExceededException$Type": null, + "PolicyLengthExceededException$message": null, + "PublishVersionRequest$CodeSha256": "

    The SHA256 hash of the deployment package you want to publish. This provides validation on the code you are publishing. If you provide this parameter value must match the SHA256 of the $LATEST version for the publication to succeed.

    ", + "RequestTooLargeException$Type": null, + "RequestTooLargeException$message": null, + "ResourceConflictException$Type": null, + "ResourceConflictException$message": null, + "ResourceNotFoundException$Type": null, + "ResourceNotFoundException$Message": null, + "ServiceException$Type": null, + "ServiceException$Message": null, + "SubnetIPAddressLimitReachedException$Type": null, + "SubnetIPAddressLimitReachedException$Message": null, + "TooManyRequestsException$retryAfterSeconds": "

    The number of seconds the caller should wait before retrying.

    ", + "TooManyRequestsException$Type": null, + "TooManyRequestsException$message": null, + "UnsupportedMediaTypeException$Type": null, + "UnsupportedMediaTypeException$message": null, + "UpdateEventSourceMappingRequest$UUID": "

    The event source mapping identifier.

    " + } + }, + "SubnetIPAddressLimitReachedException": { + "base": "

    AWS Lambda was not able to set up VPC access for the Lambda function because one or more configured subnets has no available IP addresses.

    ", + "refs": { + } + }, + "SubnetId": { + "base": null, + "refs": { + "SubnetIds$member": null + } + }, + "SubnetIds": { + "base": null, + "refs": { + "VpcConfig$SubnetIds": "

    A list of one or more subnet IDs in your VPC.

    ", + "VpcConfigResponse$SubnetIds": "

    A list of subnet IDs associated with the Lambda function.

    " + } + }, + "Timeout": { + "base": null, + "refs": { + "CreateFunctionRequest$Timeout": "

    The function execution time at which Lambda should terminate the function. Because the execution time has cost implications, we recommend you set this value based on your expected execution time. The default is 3 seconds.

    ", + "FunctionConfiguration$Timeout": "

    The function execution time at which Lambda should terminate the function. Because the execution time has cost implications, we recommend you set this value based on your expected execution time. The default is 3 seconds.

    ", + "UpdateFunctionConfigurationRequest$Timeout": "

    The function execution time at which AWS Lambda should terminate the function. Because the execution time has cost implications, we recommend you set this value based on your expected execution time. The default is 3 seconds.

    " + } + }, + "Timestamp": { + "base": null, + "refs": { + "FunctionConfiguration$LastModified": "

    The time stamp of the last time you updated the function.

    " + } + }, + "TooManyRequestsException": { + "base": null, + "refs": { + } + }, + "UnsupportedMediaTypeException": { + "base": "

    The content type of the Invoke request body is not JSON.

    ", + "refs": { + } + }, + "UpdateAliasRequest": { + "base": null, + "refs": { + } + }, + "UpdateEventSourceMappingRequest": { + "base": null, + "refs": { + } + }, + "UpdateFunctionCodeRequest": { + "base": null, + "refs": { + } + }, + "UpdateFunctionConfigurationRequest": { + "base": null, + "refs": { + } + }, + "Version": { + "base": null, + "refs": { + "AliasConfiguration$FunctionVersion": "

    Function version to which the alias points.

    ", + "CreateAliasRequest$FunctionVersion": "

    Lambda function version for which you are creating the alias.

    ", + "FunctionConfiguration$Version": "

    The version of the Lambda function.

    ", + "ListAliasesRequest$FunctionVersion": "

    If you specify this optional parameter, the API returns only the aliases that are pointing to the specific Lambda function version, otherwise the API returns all of the aliases created for the Lambda function.

    ", + "UpdateAliasRequest$FunctionVersion": "

    Using this parameter you can change the Lambda function version to which the alias points.

    " + } + }, + "VpcConfig": { + "base": "

    If your Lambda function accesses resources in a VPC, you provide this parameter identifying the list of security group IDs and subnet IDs. These must belong to the same VPC. You must provide at least one security group and one subnet ID.

    ", + "refs": { + "CreateFunctionRequest$VpcConfig": "

    If your Lambda function accesses resources in a VPC, you provide this parameter identifying the list of security group IDs and subnet IDs. These must belong to the same VPC. You must provide at least one security group and one subnet ID.

    ", + "UpdateFunctionConfigurationRequest$VpcConfig": null + } + }, + "VpcConfigResponse": { + "base": "

    VPC configuration associated with your Lambda function.

    ", + "refs": { + "FunctionConfiguration$VpcConfig": "

    VPC configuration associated with your Lambda function.

    " + } + }, + "VpcId": { + "base": null, + "refs": { + "VpcConfigResponse$VpcId": "

    The VPC ID associated with you Lambda function.

    " + } + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/lambda/2015-03-31/examples-1.json b/vendor/github.com/aws/aws-sdk-go/models/apis/lambda/2015-03-31/examples-1.json new file mode 100644 index 000000000..0ea7e3b0b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/lambda/2015-03-31/examples-1.json @@ -0,0 +1,5 @@ +{ + "version": "1.0", + "examples": { + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/lambda/2015-03-31/paginators-1.json b/vendor/github.com/aws/aws-sdk-go/models/apis/lambda/2015-03-31/paginators-1.json new file mode 100644 index 000000000..da00fb78f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/lambda/2015-03-31/paginators-1.json @@ -0,0 +1,16 @@ +{ + "pagination": { + "ListEventSourceMappings": { + "input_token": "Marker", + "output_token": "NextMarker", + "limit_key": "MaxItems", + "result_key": "EventSourceMappings" + }, + "ListFunctions": { + "input_token": "Marker", + "output_token": "NextMarker", + "limit_key": "MaxItems", + "result_key": "Functions" + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/logs/2014-03-28/api-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/logs/2014-03-28/api-2.json new file mode 100644 index 000000000..9b08376c1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/logs/2014-03-28/api-2.json @@ -0,0 +1,1138 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2014-03-28", + "endpointPrefix":"logs", + "jsonVersion":"1.1", + "protocol":"json", + "serviceFullName":"Amazon CloudWatch Logs", + "signatureVersion":"v4", + "targetPrefix":"Logs_20140328" + }, + "operations":{ + "CancelExportTask":{ + "name":"CancelExportTask", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CancelExportTaskRequest"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidOperationException"}, + {"shape":"ServiceUnavailableException"} + ] + }, + "CreateExportTask":{ + "name":"CreateExportTask", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateExportTaskRequest"}, + "output":{"shape":"CreateExportTaskResponse"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"LimitExceededException"}, + {"shape":"OperationAbortedException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ResourceAlreadyExistsException"} + ] + }, + "CreateLogGroup":{ + "name":"CreateLogGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateLogGroupRequest"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"ResourceAlreadyExistsException"}, + {"shape":"LimitExceededException"}, + {"shape":"OperationAbortedException"}, + {"shape":"ServiceUnavailableException"} + ] + }, + "CreateLogStream":{ + "name":"CreateLogStream", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateLogStreamRequest"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"ResourceAlreadyExistsException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ServiceUnavailableException"} + ] + }, + "DeleteDestination":{ + "name":"DeleteDestination", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteDestinationRequest"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"OperationAbortedException"}, + {"shape":"ServiceUnavailableException"} + ] + }, + "DeleteLogGroup":{ + "name":"DeleteLogGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteLogGroupRequest"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"OperationAbortedException"}, + {"shape":"ServiceUnavailableException"} + ] + }, + "DeleteLogStream":{ + "name":"DeleteLogStream", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteLogStreamRequest"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"OperationAbortedException"}, + {"shape":"ServiceUnavailableException"} + ] + }, + "DeleteMetricFilter":{ + "name":"DeleteMetricFilter", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteMetricFilterRequest"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"OperationAbortedException"}, + {"shape":"ServiceUnavailableException"} + ] + }, + "DeleteRetentionPolicy":{ + "name":"DeleteRetentionPolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteRetentionPolicyRequest"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"OperationAbortedException"}, + {"shape":"ServiceUnavailableException"} + ] + }, + "DeleteSubscriptionFilter":{ + "name":"DeleteSubscriptionFilter", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteSubscriptionFilterRequest"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"OperationAbortedException"}, + {"shape":"ServiceUnavailableException"} + ] + }, + "DescribeDestinations":{ + "name":"DescribeDestinations", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeDestinationsRequest"}, + "output":{"shape":"DescribeDestinationsResponse"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"ServiceUnavailableException"} + ] + }, + "DescribeExportTasks":{ + "name":"DescribeExportTasks", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeExportTasksRequest"}, + "output":{"shape":"DescribeExportTasksResponse"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"ServiceUnavailableException"} + ] + }, + "DescribeLogGroups":{ + "name":"DescribeLogGroups", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeLogGroupsRequest"}, + "output":{"shape":"DescribeLogGroupsResponse"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"ServiceUnavailableException"} + ] + }, + "DescribeLogStreams":{ + "name":"DescribeLogStreams", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeLogStreamsRequest"}, + "output":{"shape":"DescribeLogStreamsResponse"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ServiceUnavailableException"} + ] + }, + "DescribeMetricFilters":{ + "name":"DescribeMetricFilters", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeMetricFiltersRequest"}, + "output":{"shape":"DescribeMetricFiltersResponse"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ServiceUnavailableException"} + ] + }, + "DescribeSubscriptionFilters":{ + "name":"DescribeSubscriptionFilters", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeSubscriptionFiltersRequest"}, + "output":{"shape":"DescribeSubscriptionFiltersResponse"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ServiceUnavailableException"} + ] + }, + "FilterLogEvents":{ + "name":"FilterLogEvents", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"FilterLogEventsRequest"}, + "output":{"shape":"FilterLogEventsResponse"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ServiceUnavailableException"} + ] + }, + "GetLogEvents":{ + "name":"GetLogEvents", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetLogEventsRequest"}, + "output":{"shape":"GetLogEventsResponse"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ServiceUnavailableException"} + ] + }, + "PutDestination":{ + "name":"PutDestination", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PutDestinationRequest"}, + "output":{"shape":"PutDestinationResponse"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"OperationAbortedException"}, + {"shape":"ServiceUnavailableException"} + ] + }, + "PutDestinationPolicy":{ + "name":"PutDestinationPolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PutDestinationPolicyRequest"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"OperationAbortedException"}, + {"shape":"ServiceUnavailableException"} + ] + }, + "PutLogEvents":{ + "name":"PutLogEvents", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PutLogEventsRequest"}, + "output":{"shape":"PutLogEventsResponse"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"InvalidSequenceTokenException"}, + {"shape":"DataAlreadyAcceptedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"OperationAbortedException"}, + {"shape":"ServiceUnavailableException"} + ] + }, + "PutMetricFilter":{ + "name":"PutMetricFilter", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PutMetricFilterRequest"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"OperationAbortedException"}, + {"shape":"LimitExceededException"}, + {"shape":"ServiceUnavailableException"} + ] + }, + "PutRetentionPolicy":{ + "name":"PutRetentionPolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PutRetentionPolicyRequest"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"OperationAbortedException"}, + {"shape":"ServiceUnavailableException"} + ] + }, + "PutSubscriptionFilter":{ + "name":"PutSubscriptionFilter", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PutSubscriptionFilterRequest"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"OperationAbortedException"}, + {"shape":"LimitExceededException"}, + {"shape":"ServiceUnavailableException"} + ] + }, + "TestMetricFilter":{ + "name":"TestMetricFilter", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"TestMetricFilterRequest"}, + "output":{"shape":"TestMetricFilterResponse"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"ServiceUnavailableException"} + ] + } + }, + "shapes":{ + "AccessPolicy":{ + "type":"string", + "min":1 + }, + "Arn":{"type":"string"}, + "CancelExportTaskRequest":{ + "type":"structure", + "required":["taskId"], + "members":{ + "taskId":{"shape":"ExportTaskId"} + } + }, + "CreateExportTaskRequest":{ + "type":"structure", + "required":[ + "logGroupName", + "from", + "to", + "destination" + ], + "members":{ + "taskName":{"shape":"ExportTaskName"}, + "logGroupName":{"shape":"LogGroupName"}, + "logStreamNamePrefix":{"shape":"LogStreamName"}, + "from":{"shape":"Timestamp"}, + "to":{"shape":"Timestamp"}, + "destination":{"shape":"ExportDestinationBucket"}, + "destinationPrefix":{"shape":"ExportDestinationPrefix"} + } + }, + "CreateExportTaskResponse":{ + "type":"structure", + "members":{ + "taskId":{"shape":"ExportTaskId"} + } + }, + "CreateLogGroupRequest":{ + "type":"structure", + "required":["logGroupName"], + "members":{ + "logGroupName":{"shape":"LogGroupName"} + } + }, + "CreateLogStreamRequest":{ + "type":"structure", + "required":[ + "logGroupName", + "logStreamName" + ], + "members":{ + "logGroupName":{"shape":"LogGroupName"}, + "logStreamName":{"shape":"LogStreamName"} + } + }, + "DataAlreadyAcceptedException":{ + "type":"structure", + "members":{ + "expectedSequenceToken":{"shape":"SequenceToken"} + }, + "exception":true + }, + "Days":{"type":"integer"}, + "DeleteDestinationRequest":{ + "type":"structure", + "required":["destinationName"], + "members":{ + "destinationName":{"shape":"DestinationName"} + } + }, + "DeleteLogGroupRequest":{ + "type":"structure", + "required":["logGroupName"], + "members":{ + "logGroupName":{"shape":"LogGroupName"} + } + }, + "DeleteLogStreamRequest":{ + "type":"structure", + "required":[ + "logGroupName", + "logStreamName" + ], + "members":{ + "logGroupName":{"shape":"LogGroupName"}, + "logStreamName":{"shape":"LogStreamName"} + } + }, + "DeleteMetricFilterRequest":{ + "type":"structure", + "required":[ + "logGroupName", + "filterName" + ], + "members":{ + "logGroupName":{"shape":"LogGroupName"}, + "filterName":{"shape":"FilterName"} + } + }, + "DeleteRetentionPolicyRequest":{ + "type":"structure", + "required":["logGroupName"], + "members":{ + "logGroupName":{"shape":"LogGroupName"} + } + }, + "DeleteSubscriptionFilterRequest":{ + "type":"structure", + "required":[ + "logGroupName", + "filterName" + ], + "members":{ + "logGroupName":{"shape":"LogGroupName"}, + "filterName":{"shape":"FilterName"} + } + }, + "Descending":{"type":"boolean"}, + "DescribeDestinationsRequest":{ + "type":"structure", + "members":{ + "DestinationNamePrefix":{"shape":"DestinationName"}, + "nextToken":{"shape":"NextToken"}, + "limit":{"shape":"DescribeLimit"} + } + }, + "DescribeDestinationsResponse":{ + "type":"structure", + "members":{ + "destinations":{"shape":"Destinations"}, + "nextToken":{"shape":"NextToken"} + } + }, + "DescribeExportTasksRequest":{ + "type":"structure", + "members":{ + "taskId":{"shape":"ExportTaskId"}, + "statusCode":{"shape":"ExportTaskStatusCode"}, + "nextToken":{"shape":"NextToken"}, + "limit":{"shape":"DescribeLimit"} + } + }, + "DescribeExportTasksResponse":{ + "type":"structure", + "members":{ + "exportTasks":{"shape":"ExportTasks"}, + "nextToken":{"shape":"NextToken"} + } + }, + "DescribeLimit":{ + "type":"integer", + "max":50, + "min":1 + }, + "DescribeLogGroupsRequest":{ + "type":"structure", + "members":{ + "logGroupNamePrefix":{"shape":"LogGroupName"}, + "nextToken":{"shape":"NextToken"}, + "limit":{"shape":"DescribeLimit"} + } + }, + "DescribeLogGroupsResponse":{ + "type":"structure", + "members":{ + "logGroups":{"shape":"LogGroups"}, + "nextToken":{"shape":"NextToken"} + } + }, + "DescribeLogStreamsRequest":{ + "type":"structure", + "required":["logGroupName"], + "members":{ + "logGroupName":{"shape":"LogGroupName"}, + "logStreamNamePrefix":{"shape":"LogStreamName"}, + "orderBy":{"shape":"OrderBy"}, + "descending":{"shape":"Descending"}, + "nextToken":{"shape":"NextToken"}, + "limit":{"shape":"DescribeLimit"} + } + }, + "DescribeLogStreamsResponse":{ + "type":"structure", + "members":{ + "logStreams":{"shape":"LogStreams"}, + "nextToken":{"shape":"NextToken"} + } + }, + "DescribeMetricFiltersRequest":{ + "type":"structure", + "required":["logGroupName"], + "members":{ + "logGroupName":{"shape":"LogGroupName"}, + "filterNamePrefix":{"shape":"FilterName"}, + "nextToken":{"shape":"NextToken"}, + "limit":{"shape":"DescribeLimit"} + } + }, + "DescribeMetricFiltersResponse":{ + "type":"structure", + "members":{ + "metricFilters":{"shape":"MetricFilters"}, + "nextToken":{"shape":"NextToken"} + } + }, + "DescribeSubscriptionFiltersRequest":{ + "type":"structure", + "required":["logGroupName"], + "members":{ + "logGroupName":{"shape":"LogGroupName"}, + "filterNamePrefix":{"shape":"FilterName"}, + "nextToken":{"shape":"NextToken"}, + "limit":{"shape":"DescribeLimit"} + } + }, + "DescribeSubscriptionFiltersResponse":{ + "type":"structure", + "members":{ + "subscriptionFilters":{"shape":"SubscriptionFilters"}, + "nextToken":{"shape":"NextToken"} + } + }, + "Destination":{ + "type":"structure", + "members":{ + "destinationName":{"shape":"DestinationName"}, + "targetArn":{"shape":"TargetArn"}, + "roleArn":{"shape":"RoleArn"}, + "accessPolicy":{"shape":"AccessPolicy"}, + "arn":{"shape":"Arn"}, + "creationTime":{"shape":"Timestamp"} + } + }, + "DestinationArn":{ + "type":"string", + "min":1 + }, + "DestinationName":{ + "type":"string", + "max":512, + "min":1, + "pattern":"[^:*]*" + }, + "Destinations":{ + "type":"list", + "member":{"shape":"Destination"} + }, + "EventId":{"type":"string"}, + "EventMessage":{ + "type":"string", + "min":1 + }, + "EventNumber":{"type":"long"}, + "EventsLimit":{ + "type":"integer", + "max":10000, + "min":1 + }, + "ExportDestinationBucket":{ + "type":"string", + "max":512, + "min":1 + }, + "ExportDestinationPrefix":{"type":"string"}, + "ExportTask":{ + "type":"structure", + "members":{ + "taskId":{"shape":"ExportTaskId"}, + "taskName":{"shape":"ExportTaskName"}, + "logGroupName":{"shape":"LogGroupName"}, + "from":{"shape":"Timestamp"}, + "to":{"shape":"Timestamp"}, + "destination":{"shape":"ExportDestinationBucket"}, + "destinationPrefix":{"shape":"ExportDestinationPrefix"}, + "status":{"shape":"ExportTaskStatus"}, + "executionInfo":{"shape":"ExportTaskExecutionInfo"} + } + }, + "ExportTaskExecutionInfo":{ + "type":"structure", + "members":{ + "creationTime":{"shape":"Timestamp"}, + "completionTime":{"shape":"Timestamp"} + } + }, + "ExportTaskId":{ + "type":"string", + "max":512, + "min":1 + }, + "ExportTaskName":{ + "type":"string", + "max":512, + "min":1 + }, + "ExportTaskStatus":{ + "type":"structure", + "members":{ + "code":{"shape":"ExportTaskStatusCode"}, + "message":{"shape":"ExportTaskStatusMessage"} + } + }, + "ExportTaskStatusCode":{ + "type":"string", + "enum":[ + "CANCELLED", + "COMPLETED", + "FAILED", + "PENDING", + "PENDING_CANCEL", + "RUNNING" + ] + }, + "ExportTaskStatusMessage":{"type":"string"}, + "ExportTasks":{ + "type":"list", + "member":{"shape":"ExportTask"} + }, + "ExtractedValues":{ + "type":"map", + "key":{"shape":"Token"}, + "value":{"shape":"Value"} + }, + "FilterCount":{"type":"integer"}, + "FilterLogEventsRequest":{ + "type":"structure", + "required":["logGroupName"], + "members":{ + "logGroupName":{"shape":"LogGroupName"}, + "logStreamNames":{"shape":"InputLogStreamNames"}, + "startTime":{"shape":"Timestamp"}, + "endTime":{"shape":"Timestamp"}, + "filterPattern":{"shape":"FilterPattern"}, + "nextToken":{"shape":"NextToken"}, + "limit":{"shape":"EventsLimit"}, + "interleaved":{"shape":"Interleaved"} + } + }, + "FilterLogEventsResponse":{ + "type":"structure", + "members":{ + "events":{"shape":"FilteredLogEvents"}, + "searchedLogStreams":{"shape":"SearchedLogStreams"}, + "nextToken":{"shape":"NextToken"} + } + }, + "FilterName":{ + "type":"string", + "max":512, + "min":1, + "pattern":"[^:*]*" + }, + "FilterPattern":{ + "type":"string", + "max":1024, + "min":0 + }, + "FilteredLogEvent":{ + "type":"structure", + "members":{ + "logStreamName":{"shape":"LogStreamName"}, + "timestamp":{"shape":"Timestamp"}, + "message":{"shape":"EventMessage"}, + "ingestionTime":{"shape":"Timestamp"}, + "eventId":{"shape":"EventId"} + } + }, + "FilteredLogEvents":{ + "type":"list", + "member":{"shape":"FilteredLogEvent"} + }, + "GetLogEventsRequest":{ + "type":"structure", + "required":[ + "logGroupName", + "logStreamName" + ], + "members":{ + "logGroupName":{"shape":"LogGroupName"}, + "logStreamName":{"shape":"LogStreamName"}, + "startTime":{"shape":"Timestamp"}, + "endTime":{"shape":"Timestamp"}, + "nextToken":{"shape":"NextToken"}, + "limit":{"shape":"EventsLimit"}, + "startFromHead":{"shape":"StartFromHead"} + } + }, + "GetLogEventsResponse":{ + "type":"structure", + "members":{ + "events":{"shape":"OutputLogEvents"}, + "nextForwardToken":{"shape":"NextToken"}, + "nextBackwardToken":{"shape":"NextToken"} + } + }, + "InputLogEvent":{ + "type":"structure", + "required":[ + "timestamp", + "message" + ], + "members":{ + "timestamp":{"shape":"Timestamp"}, + "message":{"shape":"EventMessage"} + } + }, + "InputLogEvents":{ + "type":"list", + "member":{"shape":"InputLogEvent"}, + "max":10000, + "min":1 + }, + "InputLogStreamNames":{ + "type":"list", + "member":{"shape":"LogStreamName"}, + "max":100, + "min":1 + }, + "Interleaved":{"type":"boolean"}, + "InvalidOperationException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InvalidParameterException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InvalidSequenceTokenException":{ + "type":"structure", + "members":{ + "expectedSequenceToken":{"shape":"SequenceToken"} + }, + "exception":true + }, + "LimitExceededException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "LogEventIndex":{"type":"integer"}, + "LogGroup":{ + "type":"structure", + "members":{ + "logGroupName":{"shape":"LogGroupName"}, + "creationTime":{"shape":"Timestamp"}, + "retentionInDays":{"shape":"Days"}, + "metricFilterCount":{"shape":"FilterCount"}, + "arn":{"shape":"Arn"}, + "storedBytes":{"shape":"StoredBytes"} + } + }, + "LogGroupName":{ + "type":"string", + "max":512, + "min":1, + "pattern":"[\\.\\-_/#A-Za-z0-9]+" + }, + "LogGroups":{ + "type":"list", + "member":{"shape":"LogGroup"} + }, + "LogStream":{ + "type":"structure", + "members":{ + "logStreamName":{"shape":"LogStreamName"}, + "creationTime":{"shape":"Timestamp"}, + "firstEventTimestamp":{"shape":"Timestamp"}, + "lastEventTimestamp":{"shape":"Timestamp"}, + "lastIngestionTime":{"shape":"Timestamp"}, + "uploadSequenceToken":{"shape":"SequenceToken"}, + "arn":{"shape":"Arn"}, + "storedBytes":{"shape":"StoredBytes"} + } + }, + "LogStreamName":{ + "type":"string", + "max":512, + "min":1, + "pattern":"[^:*]*" + }, + "LogStreamSearchedCompletely":{"type":"boolean"}, + "LogStreams":{ + "type":"list", + "member":{"shape":"LogStream"} + }, + "MetricFilter":{ + "type":"structure", + "members":{ + "filterName":{"shape":"FilterName"}, + "filterPattern":{"shape":"FilterPattern"}, + "metricTransformations":{"shape":"MetricTransformations"}, + "creationTime":{"shape":"Timestamp"} + } + }, + "MetricFilterMatchRecord":{ + "type":"structure", + "members":{ + "eventNumber":{"shape":"EventNumber"}, + "eventMessage":{"shape":"EventMessage"}, + "extractedValues":{"shape":"ExtractedValues"} + } + }, + "MetricFilterMatches":{ + "type":"list", + "member":{"shape":"MetricFilterMatchRecord"} + }, + "MetricFilters":{ + "type":"list", + "member":{"shape":"MetricFilter"} + }, + "MetricName":{ + "type":"string", + "max":255, + "pattern":"[^:*$]*" + }, + "MetricNamespace":{ + "type":"string", + "max":255, + "pattern":"[^:*$]*" + }, + "MetricTransformation":{ + "type":"structure", + "required":[ + "metricName", + "metricNamespace", + "metricValue" + ], + "members":{ + "metricName":{"shape":"MetricName"}, + "metricNamespace":{"shape":"MetricNamespace"}, + "metricValue":{"shape":"MetricValue"} + } + }, + "MetricTransformations":{ + "type":"list", + "member":{"shape":"MetricTransformation"}, + "max":1, + "min":1 + }, + "MetricValue":{ + "type":"string", + "max":100 + }, + "NextToken":{ + "type":"string", + "min":1 + }, + "OperationAbortedException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "OrderBy":{ + "type":"string", + "enum":[ + "LogStreamName", + "LastEventTime" + ] + }, + "OutputLogEvent":{ + "type":"structure", + "members":{ + "timestamp":{"shape":"Timestamp"}, + "message":{"shape":"EventMessage"}, + "ingestionTime":{"shape":"Timestamp"} + } + }, + "OutputLogEvents":{ + "type":"list", + "member":{"shape":"OutputLogEvent"} + }, + "PutDestinationPolicyRequest":{ + "type":"structure", + "required":[ + "destinationName", + "accessPolicy" + ], + "members":{ + "destinationName":{"shape":"DestinationName"}, + "accessPolicy":{"shape":"AccessPolicy"} + } + }, + "PutDestinationRequest":{ + "type":"structure", + "required":[ + "destinationName", + "targetArn", + "roleArn" + ], + "members":{ + "destinationName":{"shape":"DestinationName"}, + "targetArn":{"shape":"TargetArn"}, + "roleArn":{"shape":"RoleArn"} + } + }, + "PutDestinationResponse":{ + "type":"structure", + "members":{ + "destination":{"shape":"Destination"} + } + }, + "PutLogEventsRequest":{ + "type":"structure", + "required":[ + "logGroupName", + "logStreamName", + "logEvents" + ], + "members":{ + "logGroupName":{"shape":"LogGroupName"}, + "logStreamName":{"shape":"LogStreamName"}, + "logEvents":{"shape":"InputLogEvents"}, + "sequenceToken":{"shape":"SequenceToken"} + } + }, + "PutLogEventsResponse":{ + "type":"structure", + "members":{ + "nextSequenceToken":{"shape":"SequenceToken"}, + "rejectedLogEventsInfo":{"shape":"RejectedLogEventsInfo"} + } + }, + "PutMetricFilterRequest":{ + "type":"structure", + "required":[ + "logGroupName", + "filterName", + "filterPattern", + "metricTransformations" + ], + "members":{ + "logGroupName":{"shape":"LogGroupName"}, + "filterName":{"shape":"FilterName"}, + "filterPattern":{"shape":"FilterPattern"}, + "metricTransformations":{"shape":"MetricTransformations"} + } + }, + "PutRetentionPolicyRequest":{ + "type":"structure", + "required":[ + "logGroupName", + "retentionInDays" + ], + "members":{ + "logGroupName":{"shape":"LogGroupName"}, + "retentionInDays":{"shape":"Days"} + } + }, + "PutSubscriptionFilterRequest":{ + "type":"structure", + "required":[ + "logGroupName", + "filterName", + "filterPattern", + "destinationArn" + ], + "members":{ + "logGroupName":{"shape":"LogGroupName"}, + "filterName":{"shape":"FilterName"}, + "filterPattern":{"shape":"FilterPattern"}, + "destinationArn":{"shape":"DestinationArn"}, + "roleArn":{"shape":"RoleArn"} + } + }, + "RejectedLogEventsInfo":{ + "type":"structure", + "members":{ + "tooNewLogEventStartIndex":{"shape":"LogEventIndex"}, + "tooOldLogEventEndIndex":{"shape":"LogEventIndex"}, + "expiredLogEventEndIndex":{"shape":"LogEventIndex"} + } + }, + "ResourceAlreadyExistsException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "ResourceNotFoundException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "RoleArn":{ + "type":"string", + "min":1 + }, + "SearchedLogStream":{ + "type":"structure", + "members":{ + "logStreamName":{"shape":"LogStreamName"}, + "searchedCompletely":{"shape":"LogStreamSearchedCompletely"} + } + }, + "SearchedLogStreams":{ + "type":"list", + "member":{"shape":"SearchedLogStream"} + }, + "SequenceToken":{ + "type":"string", + "min":1 + }, + "ServiceUnavailableException":{ + "type":"structure", + "members":{ + }, + "exception":true, + "fault":true + }, + "StartFromHead":{"type":"boolean"}, + "StoredBytes":{ + "type":"long", + "min":0 + }, + "SubscriptionFilter":{ + "type":"structure", + "members":{ + "filterName":{"shape":"FilterName"}, + "logGroupName":{"shape":"LogGroupName"}, + "filterPattern":{"shape":"FilterPattern"}, + "destinationArn":{"shape":"DestinationArn"}, + "roleArn":{"shape":"RoleArn"}, + "creationTime":{"shape":"Timestamp"} + } + }, + "SubscriptionFilters":{ + "type":"list", + "member":{"shape":"SubscriptionFilter"} + }, + "TargetArn":{ + "type":"string", + "min":1 + }, + "TestEventMessages":{ + "type":"list", + "member":{"shape":"EventMessage"}, + "max":50, + "min":1 + }, + "TestMetricFilterRequest":{ + "type":"structure", + "required":[ + "filterPattern", + "logEventMessages" + ], + "members":{ + "filterPattern":{"shape":"FilterPattern"}, + "logEventMessages":{"shape":"TestEventMessages"} + } + }, + "TestMetricFilterResponse":{ + "type":"structure", + "members":{ + "matches":{"shape":"MetricFilterMatches"} + } + }, + "Timestamp":{ + "type":"long", + "min":0 + }, + "Token":{"type":"string"}, + "Value":{"type":"string"} + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/logs/2014-03-28/docs-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/logs/2014-03-28/docs-2.json new file mode 100644 index 000000000..4b9ba501d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/logs/2014-03-28/docs-2.json @@ -0,0 +1,789 @@ +{ + "version": "2.0", + "service": "Amazon CloudWatch Logs API Reference

    You can use Amazon CloudWatch Logs to monitor, store, and access your log files from Amazon Elastic Compute Cloud (Amazon EC2) instances, Amazon CloudTrail, or other sources. You can then retrieve the associated log data from CloudWatch Logs using the Amazon CloudWatch console, the CloudWatch Logs commands in the AWS CLI, the CloudWatch Logs API, or the CloudWatch Logs SDK.

    You can use CloudWatch Logs to:

    • Monitor Logs from Amazon EC2 Instances in Real-time: You can use CloudWatch Logs to monitor applications and systems using log data. For example, CloudWatch Logs can track the number of errors that occur in your application logs and send you a notification whenever the rate of errors exceeds a threshold you specify. CloudWatch Logs uses your log data for monitoring; so, no code changes are required. For example, you can monitor application logs for specific literal terms (such as \"NullReferenceException\") or count the number of occurrences of a literal term at a particular position in log data (such as \"404\" status codes in an Apache access log). When the term you are searching for is found, CloudWatch Logs reports the data to a Amazon CloudWatch metric that you specify.

    • Monitor Amazon CloudTrail Logged Events: You can create alarms in Amazon CloudWatch and receive notifications of particular API activity as captured by CloudTrail and use the notification to perform troubleshooting.

    • Archive Log Data: You can use CloudWatch Logs to store your log data in highly durable storage. You can change the log retention setting so that any log events older than this setting are automatically deleted. The CloudWatch Logs agent makes it easy to quickly send both rotated and non-rotated log data off of a host and into the log service. You can then access the raw log data when you need it.

    ", + "operations": { + "CancelExportTask": "

    Cancels an export task if it is in PENDING or RUNNING state.

    ", + "CreateExportTask": "

    Creates an ExportTask which allows you to efficiently export data from a Log Group to your Amazon S3 bucket.

    This is an asynchronous call. If all the required information is provided, this API will initiate an export task and respond with the task Id. Once started, DescribeExportTasks can be used to get the status of an export task. You can only have one active (RUNNING or PENDING) export task at a time, per account.

    You can export logs from multiple log groups or multiple time ranges to the same Amazon S3 bucket. To separate out log data for each export task, you can specify a prefix that will be used as the Amazon S3 key prefix for all exported objects.

    ", + "CreateLogGroup": "

    Creates a new log group with the specified name. The name of the log group must be unique within a region for an AWS account. You can create up to 500 log groups per account.

    You must use the following guidelines when naming a log group:

    • Log group names can be between 1 and 512 characters long.
    • Allowed characters are a-z, A-Z, 0-9, '_' (underscore), '-' (hyphen), '/' (forward slash), and '.' (period).

    ", + "CreateLogStream": "

    Creates a new log stream in the specified log group. The name of the log stream must be unique within the log group. There is no limit on the number of log streams that can exist in a log group.

    You must use the following guidelines when naming a log stream:

    • Log stream names can be between 1 and 512 characters long.
    • The ':' colon character is not allowed.

    ", + "DeleteDestination": "

    Deletes the destination with the specified name and eventually disables all the subscription filters that publish to it. This will not delete the physical resource encapsulated by the destination.

    ", + "DeleteLogGroup": "

    Deletes the log group with the specified name and permanently deletes all the archived log events associated with it.

    ", + "DeleteLogStream": "

    Deletes a log stream and permanently deletes all the archived log events associated with it.

    ", + "DeleteMetricFilter": "

    Deletes a metric filter associated with the specified log group.

    ", + "DeleteRetentionPolicy": "

    Deletes the retention policy of the specified log group. Log events would not expire if they belong to log groups without a retention policy.

    ", + "DeleteSubscriptionFilter": "

    Deletes a subscription filter associated with the specified log group.

    ", + "DescribeDestinations": "

    Returns all the destinations that are associated with the AWS account making the request. The list returned in the response is ASCII-sorted by destination name.

    By default, this operation returns up to 50 destinations. If there are more destinations to list, the response would contain a nextToken value in the response body. You can also limit the number of destinations returned in the response by specifying the limit parameter in the request.

    ", + "DescribeExportTasks": "

    Returns all the export tasks that are associated with the AWS account making the request. The export tasks can be filtered based on TaskId or TaskStatus.

    By default, this operation returns up to 50 export tasks that satisfy the specified filters. If there are more export tasks to list, the response would contain a nextToken value in the response body. You can also limit the number of export tasks returned in the response by specifying the limit parameter in the request.

    ", + "DescribeLogGroups": "

    Returns all the log groups that are associated with the AWS account making the request. The list returned in the response is ASCII-sorted by log group name.

    By default, this operation returns up to 50 log groups. If there are more log groups to list, the response would contain a nextToken value in the response body. You can also limit the number of log groups returned in the response by specifying the limit parameter in the request.

    ", + "DescribeLogStreams": "

    Returns all the log streams that are associated with the specified log group. The list returned in the response is ASCII-sorted by log stream name.

    By default, this operation returns up to 50 log streams. If there are more log streams to list, the response would contain a nextToken value in the response body. You can also limit the number of log streams returned in the response by specifying the limit parameter in the request. This operation has a limit of five transactions per second, after which transactions are throttled.

    ", + "DescribeMetricFilters": "

    Returns all the metrics filters associated with the specified log group. The list returned in the response is ASCII-sorted by filter name.

    By default, this operation returns up to 50 metric filters. If there are more metric filters to list, the response would contain a nextToken value in the response body. You can also limit the number of metric filters returned in the response by specifying the limit parameter in the request.

    ", + "DescribeSubscriptionFilters": "

    Returns all the subscription filters associated with the specified log group. The list returned in the response is ASCII-sorted by filter name.

    By default, this operation returns up to 50 subscription filters. If there are more subscription filters to list, the response would contain a nextToken value in the response body. You can also limit the number of subscription filters returned in the response by specifying the limit parameter in the request.

    ", + "FilterLogEvents": "

    Retrieves log events, optionally filtered by a filter pattern from the specified log group. You can provide an optional time range to filter the results on the event timestamp. You can limit the streams searched to an explicit list of logStreamNames.

    By default, this operation returns as much matching log events as can fit in a response size of 1MB, up to 10,000 log events, or all the events found within a time-bounded scan window. If the response includes a nextToken, then there is more data to search, and the search can be resumed with a new request providing the nextToken. The response will contain a list of searchedLogStreams that contains information about which streams were searched in the request and whether they have been searched completely or require further pagination. The limit parameter in the request. can be used to specify the maximum number of events to return in a page.

    ", + "GetLogEvents": "

    Retrieves log events from the specified log stream. You can provide an optional time range to filter the results on the event timestamp.

    By default, this operation returns as much log events as can fit in a response size of 1MB, up to 10,000 log events. The response will always include a nextForwardToken and a nextBackwardToken in the response body. You can use any of these tokens in subsequent GetLogEvents requests to paginate through events in either forward or backward direction. You can also limit the number of log events returned in the response by specifying the limit parameter in the request.

    ", + "PutDestination": "

    Creates or updates a Destination. A destination encapsulates a physical resource (such as a Kinesis stream) and allows you to subscribe to a real-time stream of log events of a different account, ingested through PutLogEvents requests. Currently, the only supported physical resource is a Amazon Kinesis stream belonging to the same account as the destination.

    A destination controls what is written to its Amazon Kinesis stream through an access policy. By default, PutDestination does not set any access policy with the destination, which means a cross-account user will not be able to call PutSubscriptionFilter against this destination. To enable that, the destination owner must call PutDestinationPolicy after PutDestination.

    ", + "PutDestinationPolicy": "

    Creates or updates an access policy associated with an existing Destination. An access policy is an IAM policy document that is used to authorize claims to register a subscription filter against a given destination.

    ", + "PutLogEvents": "

    Uploads a batch of log events to the specified log stream.

    Every PutLogEvents request must include the sequenceToken obtained from the response of the previous request. An upload in a newly created log stream does not require a sequenceToken.

    The batch of events must satisfy the following constraints:

    • The maximum batch size is 1,048,576 bytes, and this size is calculated as the sum of all event messages in UTF-8, plus 26 bytes for each log event.
    • None of the log events in the batch can be more than 2 hours in the future.
    • None of the log events in the batch can be older than 14 days or the retention period of the log group.
    • The log events in the batch must be in chronological ordered by their timestamp.
    • The maximum number of log events in a batch is 10,000.
    • A batch of log events in a single PutLogEvents request cannot span more than 24 hours. Otherwise, the PutLogEvents operation will fail.

    ", + "PutMetricFilter": "

    Creates or updates a metric filter and associates it with the specified log group. Metric filters allow you to configure rules to extract metric data from log events ingested through PutLogEvents requests.

    The maximum number of metric filters that can be associated with a log group is 100.

    ", + "PutRetentionPolicy": "

    Sets the retention of the specified log group. A retention policy allows you to configure the number of days you want to retain log events in the specified log group.

    ", + "PutSubscriptionFilter": "

    Creates or updates a subscription filter and associates it with the specified log group. Subscription filters allow you to subscribe to a real-time stream of log events ingested through PutLogEvents requests and have them delivered to a specific destination. Currently, the supported destinations are:

    • An Amazon Kinesis stream belonging to the same account as the subscription filter, for same-account delivery.
    • A logical destination (used via an ARN of Destination) belonging to a different account, for cross-account delivery.
    • An Amazon Kinesis Firehose stream belonging to the same account as the subscription filter, for same-account delivery.
    • An AWS Lambda function belonging to the same account as the subscription filter, for same-account delivery.

    Currently there can only be one subscription filter associated with a log group.

    ", + "TestMetricFilter": "

    Tests the filter pattern of a metric filter against a sample of log event messages. You can use this operation to validate the correctness of a metric filter pattern.

    " + }, + "shapes": { + "AccessPolicy": { + "base": null, + "refs": { + "Destination$accessPolicy": "

    An IAM policy document that governs which AWS accounts can create subscription filters against this destination.

    ", + "PutDestinationPolicyRequest$accessPolicy": "

    An IAM policy document that authorizes cross-account users to deliver their log events to associated destination.

    " + } + }, + "Arn": { + "base": null, + "refs": { + "Destination$arn": "

    ARN of this destination.

    ", + "LogGroup$arn": null, + "LogStream$arn": null + } + }, + "CancelExportTaskRequest": { + "base": null, + "refs": { + } + }, + "CreateExportTaskRequest": { + "base": null, + "refs": { + } + }, + "CreateExportTaskResponse": { + "base": null, + "refs": { + } + }, + "CreateLogGroupRequest": { + "base": null, + "refs": { + } + }, + "CreateLogStreamRequest": { + "base": null, + "refs": { + } + }, + "DataAlreadyAcceptedException": { + "base": null, + "refs": { + } + }, + "Days": { + "base": "

    Specifies the number of days you want to retain log events in the specified log group. Possible values are: 1, 3, 5, 7, 14, 30, 60, 90, 120, 150, 180, 365, 400, 545, 731, 1827, 3653.

    ", + "refs": { + "LogGroup$retentionInDays": null, + "PutRetentionPolicyRequest$retentionInDays": null + } + }, + "DeleteDestinationRequest": { + "base": null, + "refs": { + } + }, + "DeleteLogGroupRequest": { + "base": null, + "refs": { + } + }, + "DeleteLogStreamRequest": { + "base": null, + "refs": { + } + }, + "DeleteMetricFilterRequest": { + "base": null, + "refs": { + } + }, + "DeleteRetentionPolicyRequest": { + "base": null, + "refs": { + } + }, + "DeleteSubscriptionFilterRequest": { + "base": null, + "refs": { + } + }, + "Descending": { + "base": null, + "refs": { + "DescribeLogStreamsRequest$descending": "

    If set to true, results are returned in descending order. If you don't specify a value or set it to false, results are returned in ascending order.

    " + } + }, + "DescribeDestinationsRequest": { + "base": null, + "refs": { + } + }, + "DescribeDestinationsResponse": { + "base": null, + "refs": { + } + }, + "DescribeExportTasksRequest": { + "base": null, + "refs": { + } + }, + "DescribeExportTasksResponse": { + "base": null, + "refs": { + } + }, + "DescribeLimit": { + "base": "

    The maximum number of results to return.

    ", + "refs": { + "DescribeDestinationsRequest$limit": null, + "DescribeExportTasksRequest$limit": "

    The maximum number of items returned in the response. If you don't specify a value, the request would return up to 50 items.

    ", + "DescribeLogGroupsRequest$limit": "

    The maximum number of items returned in the response. If you don't specify a value, the request would return up to 50 items.

    ", + "DescribeLogStreamsRequest$limit": "

    The maximum number of items returned in the response. If you don't specify a value, the request would return up to 50 items.

    ", + "DescribeMetricFiltersRequest$limit": "

    The maximum number of items returned in the response. If you don't specify a value, the request would return up to 50 items.

    ", + "DescribeSubscriptionFiltersRequest$limit": null + } + }, + "DescribeLogGroupsRequest": { + "base": null, + "refs": { + } + }, + "DescribeLogGroupsResponse": { + "base": null, + "refs": { + } + }, + "DescribeLogStreamsRequest": { + "base": null, + "refs": { + } + }, + "DescribeLogStreamsResponse": { + "base": null, + "refs": { + } + }, + "DescribeMetricFiltersRequest": { + "base": null, + "refs": { + } + }, + "DescribeMetricFiltersResponse": { + "base": null, + "refs": { + } + }, + "DescribeSubscriptionFiltersRequest": { + "base": null, + "refs": { + } + }, + "DescribeSubscriptionFiltersResponse": { + "base": null, + "refs": { + } + }, + "Destination": { + "base": "

    A cross account destination that is the recipient of subscription log events.

    ", + "refs": { + "Destinations$member": null, + "PutDestinationResponse$destination": null + } + }, + "DestinationArn": { + "base": null, + "refs": { + "PutSubscriptionFilterRequest$destinationArn": "

    The ARN of the destination to deliver matching log events to. Currently, the supported destinations are:

    • An Amazon Kinesis stream belonging to the same account as the subscription filter, for same-account delivery.
    • A logical destination (used via an ARN of Destination) belonging to a different account, for cross-account delivery.
    • An Amazon Kinesis Firehose stream belonging to the same account as the subscription filter, for same-account delivery.
    • An AWS Lambda function belonging to the same account as the subscription filter, for same-account delivery.

    ", + "SubscriptionFilter$destinationArn": null + } + }, + "DestinationName": { + "base": null, + "refs": { + "DeleteDestinationRequest$destinationName": "

    The name of destination to delete.

    ", + "DescribeDestinationsRequest$DestinationNamePrefix": "

    Will only return destinations that match the provided destinationNamePrefix. If you don't specify a value, no prefix is applied.

    ", + "Destination$destinationName": "

    Name of the destination.

    ", + "PutDestinationPolicyRequest$destinationName": "

    A name for an existing destination.

    ", + "PutDestinationRequest$destinationName": "

    A name for the destination.

    " + } + }, + "Destinations": { + "base": null, + "refs": { + "DescribeDestinationsResponse$destinations": null + } + }, + "EventId": { + "base": null, + "refs": { + "FilteredLogEvent$eventId": "

    A unique identifier for this event.

    " + } + }, + "EventMessage": { + "base": null, + "refs": { + "FilteredLogEvent$message": "

    The data contained in the log event.

    ", + "InputLogEvent$message": null, + "MetricFilterMatchRecord$eventMessage": null, + "OutputLogEvent$message": null, + "TestEventMessages$member": null + } + }, + "EventNumber": { + "base": null, + "refs": { + "MetricFilterMatchRecord$eventNumber": null + } + }, + "EventsLimit": { + "base": "

    The maximum number of events to return.

    ", + "refs": { + "FilterLogEventsRequest$limit": "

    The maximum number of events to return in a page of results. Default is 10,000 events.

    ", + "GetLogEventsRequest$limit": "

    The maximum number of log events returned in the response. If you don't specify a value, the request would return as many log events as can fit in a response size of 1MB, up to 10,000 log events.

    " + } + }, + "ExportDestinationBucket": { + "base": null, + "refs": { + "CreateExportTaskRequest$destination": "

    Name of Amazon S3 bucket to which the log data will be exported.

    Note: Only buckets in the same AWS region are supported.

    ", + "ExportTask$destination": "

    Name of Amazon S3 bucket to which the log data was exported.

    " + } + }, + "ExportDestinationPrefix": { + "base": null, + "refs": { + "CreateExportTaskRequest$destinationPrefix": "

    Prefix that will be used as the start of Amazon S3 key for every object exported. If not specified, this defaults to 'exportedlogs'.

    ", + "ExportTask$destinationPrefix": "

    Prefix that was used as the start of Amazon S3 key for every object exported.

    " + } + }, + "ExportTask": { + "base": "

    Represents an export task.

    ", + "refs": { + "ExportTasks$member": null + } + }, + "ExportTaskExecutionInfo": { + "base": "

    Represents the status of an export task.

    ", + "refs": { + "ExportTask$executionInfo": "

    Execution info about the export task.

    " + } + }, + "ExportTaskId": { + "base": null, + "refs": { + "CancelExportTaskRequest$taskId": "

    Id of the export task to cancel.

    ", + "CreateExportTaskResponse$taskId": "

    Id of the export task that got created.

    ", + "DescribeExportTasksRequest$taskId": "

    Export task that matches the specified task Id will be returned. This can result in zero or one export task.

    ", + "ExportTask$taskId": "

    Id of the export task.

    " + } + }, + "ExportTaskName": { + "base": null, + "refs": { + "CreateExportTaskRequest$taskName": "

    The name of the export task.

    ", + "ExportTask$taskName": "

    The name of the export task.

    " + } + }, + "ExportTaskStatus": { + "base": "

    Represents the status of an export task.

    ", + "refs": { + "ExportTask$status": "

    Status of the export task.

    " + } + }, + "ExportTaskStatusCode": { + "base": null, + "refs": { + "DescribeExportTasksRequest$statusCode": "

    All export tasks that matches the specified status code will be returned. This can return zero or more export tasks.

    ", + "ExportTaskStatus$code": "

    Status code of the export task.

    " + } + }, + "ExportTaskStatusMessage": { + "base": null, + "refs": { + "ExportTaskStatus$message": "

    Status message related to the code.

    " + } + }, + "ExportTasks": { + "base": "

    A list of export tasks.

    ", + "refs": { + "DescribeExportTasksResponse$exportTasks": null + } + }, + "ExtractedValues": { + "base": null, + "refs": { + "MetricFilterMatchRecord$extractedValues": null + } + }, + "FilterCount": { + "base": "

    The number of metric filters associated with the log group.

    ", + "refs": { + "LogGroup$metricFilterCount": null + } + }, + "FilterLogEventsRequest": { + "base": null, + "refs": { + } + }, + "FilterLogEventsResponse": { + "base": null, + "refs": { + } + }, + "FilterName": { + "base": "

    A name for a metric or subscription filter.

    ", + "refs": { + "DeleteMetricFilterRequest$filterName": "

    The name of the metric filter to delete.

    ", + "DeleteSubscriptionFilterRequest$filterName": "

    The name of the subscription filter to delete.

    ", + "DescribeMetricFiltersRequest$filterNamePrefix": "

    Will only return metric filters that match the provided filterNamePrefix. If you don't specify a value, no prefix filter is applied.

    ", + "DescribeSubscriptionFiltersRequest$filterNamePrefix": "

    Will only return subscription filters that match the provided filterNamePrefix. If you don't specify a value, no prefix filter is applied.

    ", + "MetricFilter$filterName": null, + "PutMetricFilterRequest$filterName": "

    A name for the metric filter.

    ", + "PutSubscriptionFilterRequest$filterName": "

    A name for the subscription filter.

    ", + "SubscriptionFilter$filterName": null + } + }, + "FilterPattern": { + "base": "

    A symbolic description of how CloudWatch Logs should interpret the data in each log event. For example, a log event may contain timestamps, IP addresses, strings, and so on. You use the filter pattern to specify what to look for in the log event message.

    ", + "refs": { + "FilterLogEventsRequest$filterPattern": "

    A valid CloudWatch Logs filter pattern to use for filtering the response. If not provided, all the events are matched.

    ", + "MetricFilter$filterPattern": null, + "PutMetricFilterRequest$filterPattern": "

    A valid CloudWatch Logs filter pattern for extracting metric data out of ingested log events.

    ", + "PutSubscriptionFilterRequest$filterPattern": "

    A valid CloudWatch Logs filter pattern for subscribing to a filtered stream of log events.

    ", + "SubscriptionFilter$filterPattern": null, + "TestMetricFilterRequest$filterPattern": null + } + }, + "FilteredLogEvent": { + "base": "

    Represents a matched event from a FilterLogEvents request.

    ", + "refs": { + "FilteredLogEvents$member": null + } + }, + "FilteredLogEvents": { + "base": "

    A list of matched FilteredLogEvent objects returned from a FilterLogEvents request.

    ", + "refs": { + "FilterLogEventsResponse$events": "

    A list of FilteredLogEvent objects representing the matched events from the request.

    " + } + }, + "GetLogEventsRequest": { + "base": null, + "refs": { + } + }, + "GetLogEventsResponse": { + "base": null, + "refs": { + } + }, + "InputLogEvent": { + "base": "

    A log event is a record of some activity that was recorded by the application or resource being monitored. The log event record that CloudWatch Logs understands contains two properties: the timestamp of when the event occurred, and the raw event message.

    ", + "refs": { + "InputLogEvents$member": null + } + }, + "InputLogEvents": { + "base": "

    A list of log events belonging to a log stream.

    ", + "refs": { + "PutLogEventsRequest$logEvents": null + } + }, + "InputLogStreamNames": { + "base": "

    A list of log stream names.

    ", + "refs": { + "FilterLogEventsRequest$logStreamNames": "

    Optional list of log stream names within the specified log group to search. Defaults to all the log streams in the log group.

    " + } + }, + "Interleaved": { + "base": null, + "refs": { + "FilterLogEventsRequest$interleaved": "

    If provided, the API will make a best effort to provide responses that contain events from multiple log streams within the log group interleaved in a single response. If not provided, all the matched log events in the first log stream will be searched first, then those in the next log stream, etc.

    " + } + }, + "InvalidOperationException": { + "base": "

    Returned if the operation is not valid on the specified resource

    ", + "refs": { + } + }, + "InvalidParameterException": { + "base": "

    Returned if a parameter of the request is incorrectly specified.

    ", + "refs": { + } + }, + "InvalidSequenceTokenException": { + "base": null, + "refs": { + } + }, + "LimitExceededException": { + "base": "

    Returned if you have reached the maximum number of resources that can be created.

    ", + "refs": { + } + }, + "LogEventIndex": { + "base": null, + "refs": { + "RejectedLogEventsInfo$tooNewLogEventStartIndex": null, + "RejectedLogEventsInfo$tooOldLogEventEndIndex": null, + "RejectedLogEventsInfo$expiredLogEventEndIndex": null + } + }, + "LogGroup": { + "base": null, + "refs": { + "LogGroups$member": null + } + }, + "LogGroupName": { + "base": null, + "refs": { + "CreateExportTaskRequest$logGroupName": "

    The name of the log group to export.

    ", + "CreateLogGroupRequest$logGroupName": "

    The name of the log group to create.

    ", + "CreateLogStreamRequest$logGroupName": "

    The name of the log group under which the log stream is to be created.

    ", + "DeleteLogGroupRequest$logGroupName": "

    The name of the log group to delete.

    ", + "DeleteLogStreamRequest$logGroupName": "

    The name of the log group under which the log stream to delete belongs.

    ", + "DeleteMetricFilterRequest$logGroupName": "

    The name of the log group that is associated with the metric filter to delete.

    ", + "DeleteRetentionPolicyRequest$logGroupName": "

    The name of the log group that is associated with the retention policy to delete.

    ", + "DeleteSubscriptionFilterRequest$logGroupName": "

    The name of the log group that is associated with the subscription filter to delete.

    ", + "DescribeLogGroupsRequest$logGroupNamePrefix": "

    Will only return log groups that match the provided logGroupNamePrefix. If you don't specify a value, no prefix filter is applied.

    ", + "DescribeLogStreamsRequest$logGroupName": "

    The log group name for which log streams are to be listed.

    ", + "DescribeMetricFiltersRequest$logGroupName": "

    The log group name for which metric filters are to be listed.

    ", + "DescribeSubscriptionFiltersRequest$logGroupName": "

    The log group name for which subscription filters are to be listed.

    ", + "ExportTask$logGroupName": "

    The name of the log group from which logs data was exported.

    ", + "FilterLogEventsRequest$logGroupName": "

    The name of the log group to query.

    ", + "GetLogEventsRequest$logGroupName": "

    The name of the log group to query.

    ", + "LogGroup$logGroupName": null, + "PutLogEventsRequest$logGroupName": "

    The name of the log group to put log events to.

    ", + "PutMetricFilterRequest$logGroupName": "

    The name of the log group to associate the metric filter with.

    ", + "PutRetentionPolicyRequest$logGroupName": "

    The name of the log group to associate the retention policy with.

    ", + "PutSubscriptionFilterRequest$logGroupName": "

    The name of the log group to associate the subscription filter with.

    ", + "SubscriptionFilter$logGroupName": null + } + }, + "LogGroups": { + "base": "

    A list of log groups.

    ", + "refs": { + "DescribeLogGroupsResponse$logGroups": null + } + }, + "LogStream": { + "base": "

    A log stream is sequence of log events from a single emitter of logs.

    ", + "refs": { + "LogStreams$member": null + } + }, + "LogStreamName": { + "base": null, + "refs": { + "CreateExportTaskRequest$logStreamNamePrefix": "

    Will only export log streams that match the provided logStreamNamePrefix. If you don't specify a value, no prefix filter is applied.

    ", + "CreateLogStreamRequest$logStreamName": "

    The name of the log stream to create.

    ", + "DeleteLogStreamRequest$logStreamName": "

    The name of the log stream to delete.

    ", + "DescribeLogStreamsRequest$logStreamNamePrefix": "

    Will only return log streams that match the provided logStreamNamePrefix. If you don't specify a value, no prefix filter is applied.

    ", + "FilteredLogEvent$logStreamName": "

    The name of the log stream this event belongs to.

    ", + "GetLogEventsRequest$logStreamName": "

    The name of the log stream to query.

    ", + "InputLogStreamNames$member": null, + "LogStream$logStreamName": null, + "PutLogEventsRequest$logStreamName": "

    The name of the log stream to put log events to.

    ", + "SearchedLogStream$logStreamName": "

    The name of the log stream.

    " + } + }, + "LogStreamSearchedCompletely": { + "base": null, + "refs": { + "SearchedLogStream$searchedCompletely": "

    Indicates whether all the events in this log stream were searched or more data exists to search by paginating further.

    " + } + }, + "LogStreams": { + "base": "

    A list of log streams.

    ", + "refs": { + "DescribeLogStreamsResponse$logStreams": null + } + }, + "MetricFilter": { + "base": "

    Metric filters can be used to express how CloudWatch Logs would extract metric observations from ingested log events and transform them to metric data in a CloudWatch metric.

    ", + "refs": { + "MetricFilters$member": null + } + }, + "MetricFilterMatchRecord": { + "base": null, + "refs": { + "MetricFilterMatches$member": null + } + }, + "MetricFilterMatches": { + "base": null, + "refs": { + "TestMetricFilterResponse$matches": null + } + }, + "MetricFilters": { + "base": null, + "refs": { + "DescribeMetricFiltersResponse$metricFilters": null + } + }, + "MetricName": { + "base": "

    The name of the CloudWatch metric to which the monitored log information should be published. For example, you may publish to a metric called ErrorCount.

    ", + "refs": { + "MetricTransformation$metricName": null + } + }, + "MetricNamespace": { + "base": "

    The destination namespace of the new CloudWatch metric.

    ", + "refs": { + "MetricTransformation$metricNamespace": null + } + }, + "MetricTransformation": { + "base": null, + "refs": { + "MetricTransformations$member": null + } + }, + "MetricTransformations": { + "base": null, + "refs": { + "MetricFilter$metricTransformations": null, + "PutMetricFilterRequest$metricTransformations": "

    A collection of information needed to define how metric data gets emitted.

    " + } + }, + "MetricValue": { + "base": "

    What to publish to the metric. For example, if you're counting the occurrences of a particular term like \"Error\", the value will be \"1\" for each occurrence. If you're counting the bytes transferred the published value will be the value in the log event.

    ", + "refs": { + "MetricTransformation$metricValue": null + } + }, + "NextToken": { + "base": "

    A string token used for pagination that points to the next page of results. It must be a value obtained from the response of the previous request. The token expires after 24 hours.

    ", + "refs": { + "DescribeDestinationsRequest$nextToken": null, + "DescribeDestinationsResponse$nextToken": null, + "DescribeExportTasksRequest$nextToken": "

    A string token used for pagination that points to the next page of results. It must be a value obtained from the response of the previous DescribeExportTasks request.

    ", + "DescribeExportTasksResponse$nextToken": null, + "DescribeLogGroupsRequest$nextToken": "

    A string token used for pagination that points to the next page of results. It must be a value obtained from the response of the previous DescribeLogGroups request.

    ", + "DescribeLogGroupsResponse$nextToken": null, + "DescribeLogStreamsRequest$nextToken": "

    A string token used for pagination that points to the next page of results. It must be a value obtained from the response of the previous DescribeLogStreams request.

    ", + "DescribeLogStreamsResponse$nextToken": null, + "DescribeMetricFiltersRequest$nextToken": "

    A string token used for pagination that points to the next page of results. It must be a value obtained from the response of the previous DescribeMetricFilters request.

    ", + "DescribeMetricFiltersResponse$nextToken": null, + "DescribeSubscriptionFiltersRequest$nextToken": null, + "DescribeSubscriptionFiltersResponse$nextToken": null, + "FilterLogEventsRequest$nextToken": "

    A pagination token obtained from a FilterLogEvents response to continue paginating the FilterLogEvents results. This token is omitted from the response when there are no other events to display.

    ", + "FilterLogEventsResponse$nextToken": "

    A pagination token obtained from a FilterLogEvents response to continue paginating the FilterLogEvents results. This token is omitted from the response when there are no other events to display.

    ", + "GetLogEventsRequest$nextToken": "

    A string token used for pagination that points to the next page of results. It must be a value obtained from the nextForwardToken or nextBackwardToken fields in the response of the previous GetLogEvents request.

    ", + "GetLogEventsResponse$nextForwardToken": null, + "GetLogEventsResponse$nextBackwardToken": null + } + }, + "OperationAbortedException": { + "base": "

    Returned if multiple requests to update the same resource were in conflict.

    ", + "refs": { + } + }, + "OrderBy": { + "base": null, + "refs": { + "DescribeLogStreamsRequest$orderBy": "

    Specifies what to order the returned log streams by. Valid arguments are 'LogStreamName' or 'LastEventTime'. If you don't specify a value, results are ordered by LogStreamName. If 'LastEventTime' is chosen, the request cannot also contain a logStreamNamePrefix.

    " + } + }, + "OutputLogEvent": { + "base": null, + "refs": { + "OutputLogEvents$member": null + } + }, + "OutputLogEvents": { + "base": null, + "refs": { + "GetLogEventsResponse$events": null + } + }, + "PutDestinationPolicyRequest": { + "base": null, + "refs": { + } + }, + "PutDestinationRequest": { + "base": null, + "refs": { + } + }, + "PutDestinationResponse": { + "base": null, + "refs": { + } + }, + "PutLogEventsRequest": { + "base": null, + "refs": { + } + }, + "PutLogEventsResponse": { + "base": null, + "refs": { + } + }, + "PutMetricFilterRequest": { + "base": null, + "refs": { + } + }, + "PutRetentionPolicyRequest": { + "base": null, + "refs": { + } + }, + "PutSubscriptionFilterRequest": { + "base": null, + "refs": { + } + }, + "RejectedLogEventsInfo": { + "base": null, + "refs": { + "PutLogEventsResponse$rejectedLogEventsInfo": null + } + }, + "ResourceAlreadyExistsException": { + "base": "

    Returned if the specified resource already exists.

    ", + "refs": { + } + }, + "ResourceNotFoundException": { + "base": "

    Returned if the specified resource does not exist.

    ", + "refs": { + } + }, + "RoleArn": { + "base": null, + "refs": { + "Destination$roleArn": "

    A role for impersonation for delivering log events to the target.

    ", + "PutDestinationRequest$roleArn": "

    The ARN of an IAM role that grants CloudWatch Logs permissions to do Amazon Kinesis PutRecord requests on the desitnation stream.

    ", + "PutSubscriptionFilterRequest$roleArn": "

    The ARN of an IAM role that grants CloudWatch Logs permissions to deliver ingested log events to the destination stream. You don't need to provide the ARN when you are working with a logical destination (used via an ARN of Destination) for cross-account delivery.

    ", + "SubscriptionFilter$roleArn": null + } + }, + "SearchedLogStream": { + "base": "

    An object indicating the search status of a log stream in a FilterLogEvents request.

    ", + "refs": { + "SearchedLogStreams$member": null + } + }, + "SearchedLogStreams": { + "base": "

    A list of SearchedLogStream objects indicating the search status for log streams in a FilterLogEvents request.

    ", + "refs": { + "FilterLogEventsResponse$searchedLogStreams": "

    A list of SearchedLogStream objects indicating which log streams have been searched in this request and whether each has been searched completely or still has more to be paginated.

    " + } + }, + "SequenceToken": { + "base": "

    A string token used for making PutLogEvents requests. A sequenceToken can only be used once, and PutLogEvents requests must include the sequenceToken obtained from the response of the previous request.

    ", + "refs": { + "DataAlreadyAcceptedException$expectedSequenceToken": null, + "InvalidSequenceTokenException$expectedSequenceToken": null, + "LogStream$uploadSequenceToken": null, + "PutLogEventsRequest$sequenceToken": "

    A string token that must be obtained from the response of the previous PutLogEvents request.

    ", + "PutLogEventsResponse$nextSequenceToken": null + } + }, + "ServiceUnavailableException": { + "base": "

    Returned if the service cannot complete the request.

    ", + "refs": { + } + }, + "StartFromHead": { + "base": null, + "refs": { + "GetLogEventsRequest$startFromHead": "

    If set to true, the earliest log events would be returned first. The default is false (the latest log events are returned first).

    " + } + }, + "StoredBytes": { + "base": null, + "refs": { + "LogGroup$storedBytes": null, + "LogStream$storedBytes": null + } + }, + "SubscriptionFilter": { + "base": null, + "refs": { + "SubscriptionFilters$member": null + } + }, + "SubscriptionFilters": { + "base": null, + "refs": { + "DescribeSubscriptionFiltersResponse$subscriptionFilters": null + } + }, + "TargetArn": { + "base": null, + "refs": { + "Destination$targetArn": "

    ARN of the physical target where the log events will be delivered (eg. ARN of a Kinesis stream).

    ", + "PutDestinationRequest$targetArn": "

    The ARN of an Amazon Kinesis stream to deliver matching log events to.

    " + } + }, + "TestEventMessages": { + "base": null, + "refs": { + "TestMetricFilterRequest$logEventMessages": "

    A list of log event messages to test.

    " + } + }, + "TestMetricFilterRequest": { + "base": null, + "refs": { + } + }, + "TestMetricFilterResponse": { + "base": null, + "refs": { + } + }, + "Timestamp": { + "base": "

    A point in time expressed as the number of milliseconds since Jan 1, 1970 00:00:00 UTC.

    ", + "refs": { + "CreateExportTaskRequest$from": "

    A point in time expressed as the number of milliseconds since Jan 1, 1970 00:00:00 UTC. It indicates the start time of the range for the request. Events with a timestamp prior to this time will not be exported.

    ", + "CreateExportTaskRequest$to": "

    A point in time expressed as the number of milliseconds since Jan 1, 1970 00:00:00 UTC. It indicates the end time of the range for the request. Events with a timestamp later than this time will not be exported.

    ", + "Destination$creationTime": "

    A point in time expressed as the number of milliseconds since Jan 1, 1970 00:00:00 UTC specifying when this destination was created.

    ", + "ExportTask$from": "

    A point in time expressed as the number of milliseconds since Jan 1, 1970 00:00:00 UTC. Events with a timestamp prior to this time are not exported.

    ", + "ExportTask$to": "

    A point in time expressed as the number of milliseconds since Jan 1, 1970 00:00:00 UTC. Events with a timestamp later than this time are not exported.

    ", + "ExportTaskExecutionInfo$creationTime": "

    A point in time when the export task got created.

    ", + "ExportTaskExecutionInfo$completionTime": "

    A point in time when the export task got completed.

    ", + "FilterLogEventsRequest$startTime": "

    A point in time expressed as the number of milliseconds since Jan 1, 1970 00:00:00 UTC. If provided, events with a timestamp prior to this time are not returned.

    ", + "FilterLogEventsRequest$endTime": "

    A point in time expressed as the number of milliseconds since Jan 1, 1970 00:00:00 UTC. If provided, events with a timestamp later than this time are not returned.

    ", + "FilteredLogEvent$timestamp": null, + "FilteredLogEvent$ingestionTime": null, + "GetLogEventsRequest$startTime": null, + "GetLogEventsRequest$endTime": null, + "InputLogEvent$timestamp": null, + "LogGroup$creationTime": null, + "LogStream$creationTime": null, + "LogStream$firstEventTimestamp": null, + "LogStream$lastEventTimestamp": null, + "LogStream$lastIngestionTime": null, + "MetricFilter$creationTime": null, + "OutputLogEvent$timestamp": null, + "OutputLogEvent$ingestionTime": null, + "SubscriptionFilter$creationTime": null + } + }, + "Token": { + "base": null, + "refs": { + "ExtractedValues$key": null + } + }, + "Value": { + "base": null, + "refs": { + "ExtractedValues$value": null + } + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/logs/2014-03-28/examples-1.json b/vendor/github.com/aws/aws-sdk-go/models/apis/logs/2014-03-28/examples-1.json new file mode 100644 index 000000000..0ea7e3b0b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/logs/2014-03-28/examples-1.json @@ -0,0 +1,5 @@ +{ + "version": "1.0", + "examples": { + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/logs/2014-03-28/paginators-1.json b/vendor/github.com/aws/aws-sdk-go/models/apis/logs/2014-03-28/paginators-1.json new file mode 100644 index 000000000..f68932415 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/logs/2014-03-28/paginators-1.json @@ -0,0 +1,49 @@ +{ + "pagination": { + "DescribeDestinations": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "limit", + "result_key": "destinations" + }, + "DescribeLogGroups": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "limit", + "result_key": "logGroups" + }, + "DescribeLogStreams": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "limit", + "result_key": "logStreams" + }, + "DescribeMetricFilters": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "limit", + "result_key": "metricFilters" + }, + "DescribeSubscriptionFilters": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "limit", + "result_key": "subscriptionFilters" + }, + "FilterLogEvents": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "limit", + "result_key": [ + "events", + "searchedLogStreams" + ] + }, + "GetLogEvents": { + "input_token": "nextToken", + "output_token": "nextForwardToken", + "limit_key": "limit", + "result_key": "events" + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/machinelearning/2014-12-12/api-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/machinelearning/2014-12-12/api-2.json new file mode 100644 index 000000000..c8fbcdcda --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/machinelearning/2014-12-12/api-2.json @@ -0,0 +1,1947 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2014-12-12", + "endpointPrefix":"machinelearning", + "jsonVersion":"1.1", + "serviceFullName":"Amazon Machine Learning", + "signatureVersion":"v4", + "targetPrefix":"AmazonML_20141212", + "protocol":"json" + }, + "operations":{ + "AddTags":{ + "name":"AddTags", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AddTagsInput"}, + "output":{"shape":"AddTagsOutput"}, + "errors":[ + { + "shape":"InvalidInputException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InvalidTagException", + "exception":true + }, + { + "shape":"TagLimitExceededException", + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"InternalServerException", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + } + ] + }, + "CreateBatchPrediction":{ + "name":"CreateBatchPrediction", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateBatchPredictionInput"}, + "output":{"shape":"CreateBatchPredictionOutput"}, + "errors":[ + { + "shape":"InvalidInputException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InternalServerException", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + }, + { + "shape":"IdempotentParameterMismatchException", + "error":{"httpStatusCode":400}, + "exception":true + } + ] + }, + "CreateDataSourceFromRDS":{ + "name":"CreateDataSourceFromRDS", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateDataSourceFromRDSInput"}, + "output":{"shape":"CreateDataSourceFromRDSOutput"}, + "errors":[ + { + "shape":"InvalidInputException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InternalServerException", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + }, + { + "shape":"IdempotentParameterMismatchException", + "error":{"httpStatusCode":400}, + "exception":true + } + ] + }, + "CreateDataSourceFromRedshift":{ + "name":"CreateDataSourceFromRedshift", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateDataSourceFromRedshiftInput"}, + "output":{"shape":"CreateDataSourceFromRedshiftOutput"}, + "errors":[ + { + "shape":"InvalidInputException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InternalServerException", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + }, + { + "shape":"IdempotentParameterMismatchException", + "error":{"httpStatusCode":400}, + "exception":true + } + ] + }, + "CreateDataSourceFromS3":{ + "name":"CreateDataSourceFromS3", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateDataSourceFromS3Input"}, + "output":{"shape":"CreateDataSourceFromS3Output"}, + "errors":[ + { + "shape":"InvalidInputException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InternalServerException", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + }, + { + "shape":"IdempotentParameterMismatchException", + "error":{"httpStatusCode":400}, + "exception":true + } + ] + }, + "CreateEvaluation":{ + "name":"CreateEvaluation", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateEvaluationInput"}, + "output":{"shape":"CreateEvaluationOutput"}, + "errors":[ + { + "shape":"InvalidInputException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InternalServerException", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + }, + { + "shape":"IdempotentParameterMismatchException", + "error":{"httpStatusCode":400}, + "exception":true + } + ] + }, + "CreateMLModel":{ + "name":"CreateMLModel", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateMLModelInput"}, + "output":{"shape":"CreateMLModelOutput"}, + "errors":[ + { + "shape":"InvalidInputException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InternalServerException", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + }, + { + "shape":"IdempotentParameterMismatchException", + "error":{"httpStatusCode":400}, + "exception":true + } + ] + }, + "CreateRealtimeEndpoint":{ + "name":"CreateRealtimeEndpoint", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateRealtimeEndpointInput"}, + "output":{"shape":"CreateRealtimeEndpointOutput"}, + "errors":[ + { + "shape":"InvalidInputException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"InternalServerException", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + } + ] + }, + "DeleteBatchPrediction":{ + "name":"DeleteBatchPrediction", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteBatchPredictionInput"}, + "output":{"shape":"DeleteBatchPredictionOutput"}, + "errors":[ + { + "shape":"InvalidInputException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"InternalServerException", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + } + ] + }, + "DeleteDataSource":{ + "name":"DeleteDataSource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteDataSourceInput"}, + "output":{"shape":"DeleteDataSourceOutput"}, + "errors":[ + { + "shape":"InvalidInputException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"InternalServerException", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + } + ] + }, + "DeleteEvaluation":{ + "name":"DeleteEvaluation", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteEvaluationInput"}, + "output":{"shape":"DeleteEvaluationOutput"}, + "errors":[ + { + "shape":"InvalidInputException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"InternalServerException", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + } + ] + }, + "DeleteMLModel":{ + "name":"DeleteMLModel", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteMLModelInput"}, + "output":{"shape":"DeleteMLModelOutput"}, + "errors":[ + { + "shape":"InvalidInputException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"InternalServerException", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + } + ] + }, + "DeleteRealtimeEndpoint":{ + "name":"DeleteRealtimeEndpoint", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteRealtimeEndpointInput"}, + "output":{"shape":"DeleteRealtimeEndpointOutput"}, + "errors":[ + { + "shape":"InvalidInputException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"InternalServerException", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + } + ] + }, + "DeleteTags":{ + "name":"DeleteTags", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteTagsInput"}, + "output":{"shape":"DeleteTagsOutput"}, + "errors":[ + { + "shape":"InvalidInputException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InvalidTagException", + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"InternalServerException", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + } + ] + }, + "DescribeBatchPredictions":{ + "name":"DescribeBatchPredictions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeBatchPredictionsInput"}, + "output":{"shape":"DescribeBatchPredictionsOutput"}, + "errors":[ + { + "shape":"InvalidInputException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InternalServerException", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + } + ] + }, + "DescribeDataSources":{ + "name":"DescribeDataSources", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeDataSourcesInput"}, + "output":{"shape":"DescribeDataSourcesOutput"}, + "errors":[ + { + "shape":"InvalidInputException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InternalServerException", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + } + ] + }, + "DescribeEvaluations":{ + "name":"DescribeEvaluations", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeEvaluationsInput"}, + "output":{"shape":"DescribeEvaluationsOutput"}, + "errors":[ + { + "shape":"InvalidInputException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InternalServerException", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + } + ] + }, + "DescribeMLModels":{ + "name":"DescribeMLModels", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeMLModelsInput"}, + "output":{"shape":"DescribeMLModelsOutput"}, + "errors":[ + { + "shape":"InvalidInputException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InternalServerException", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + } + ] + }, + "DescribeTags":{ + "name":"DescribeTags", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeTagsInput"}, + "output":{"shape":"DescribeTagsOutput"}, + "errors":[ + { + "shape":"InvalidInputException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"InternalServerException", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + } + ] + }, + "GetBatchPrediction":{ + "name":"GetBatchPrediction", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetBatchPredictionInput"}, + "output":{"shape":"GetBatchPredictionOutput"}, + "errors":[ + { + "shape":"InvalidInputException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"InternalServerException", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + } + ] + }, + "GetDataSource":{ + "name":"GetDataSource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetDataSourceInput"}, + "output":{"shape":"GetDataSourceOutput"}, + "errors":[ + { + "shape":"InvalidInputException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"InternalServerException", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + } + ] + }, + "GetEvaluation":{ + "name":"GetEvaluation", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetEvaluationInput"}, + "output":{"shape":"GetEvaluationOutput"}, + "errors":[ + { + "shape":"InvalidInputException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"InternalServerException", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + } + ] + }, + "GetMLModel":{ + "name":"GetMLModel", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetMLModelInput"}, + "output":{"shape":"GetMLModelOutput"}, + "errors":[ + { + "shape":"InvalidInputException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"InternalServerException", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + } + ] + }, + "Predict":{ + "name":"Predict", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PredictInput"}, + "output":{"shape":"PredictOutput"}, + "errors":[ + { + "shape":"InvalidInputException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"LimitExceededException", + "error":{"httpStatusCode":417}, + "exception":true + }, + { + "shape":"InternalServerException", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + }, + { + "shape":"PredictorNotMountedException", + "error":{"httpStatusCode":400}, + "exception":true + } + ] + }, + "UpdateBatchPrediction":{ + "name":"UpdateBatchPrediction", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateBatchPredictionInput"}, + "output":{"shape":"UpdateBatchPredictionOutput"}, + "errors":[ + { + "shape":"InvalidInputException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"InternalServerException", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + } + ] + }, + "UpdateDataSource":{ + "name":"UpdateDataSource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateDataSourceInput"}, + "output":{"shape":"UpdateDataSourceOutput"}, + "errors":[ + { + "shape":"InvalidInputException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"InternalServerException", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + } + ] + }, + "UpdateEvaluation":{ + "name":"UpdateEvaluation", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateEvaluationInput"}, + "output":{"shape":"UpdateEvaluationOutput"}, + "errors":[ + { + "shape":"InvalidInputException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"InternalServerException", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + } + ] + }, + "UpdateMLModel":{ + "name":"UpdateMLModel", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateMLModelInput"}, + "output":{"shape":"UpdateMLModelOutput"}, + "errors":[ + { + "shape":"InvalidInputException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"InternalServerException", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + } + ] + } + }, + "shapes":{ + "AddTagsInput":{ + "type":"structure", + "required":[ + "Tags", + "ResourceId", + "ResourceType" + ], + "members":{ + "Tags":{"shape":"TagList"}, + "ResourceId":{"shape":"EntityId"}, + "ResourceType":{"shape":"TaggableResourceType"} + } + }, + "AddTagsOutput":{ + "type":"structure", + "members":{ + "ResourceId":{"shape":"EntityId"}, + "ResourceType":{"shape":"TaggableResourceType"} + } + }, + "Algorithm":{ + "type":"string", + "enum":["sgd"] + }, + "AwsUserArn":{ + "type":"string", + "pattern":"arn:aws:iam::[0-9]+:((user/.+)|(root))" + }, + "BatchPrediction":{ + "type":"structure", + "members":{ + "BatchPredictionId":{"shape":"EntityId"}, + "MLModelId":{"shape":"EntityId"}, + "BatchPredictionDataSourceId":{"shape":"EntityId"}, + "InputDataLocationS3":{"shape":"S3Url"}, + "CreatedByIamUser":{"shape":"AwsUserArn"}, + "CreatedAt":{"shape":"EpochTime"}, + "LastUpdatedAt":{"shape":"EpochTime"}, + "Name":{"shape":"EntityName"}, + "Status":{"shape":"EntityStatus"}, + "OutputUri":{"shape":"S3Url"}, + "Message":{"shape":"Message"} + } + }, + "BatchPredictionFilterVariable":{ + "type":"string", + "enum":[ + "CreatedAt", + "LastUpdatedAt", + "Status", + "Name", + "IAMUser", + "MLModelId", + "DataSourceId", + "DataURI" + ] + }, + "BatchPredictions":{ + "type":"list", + "member":{"shape":"BatchPrediction"} + }, + "ComparatorValue":{ + "type":"string", + "max":1024, + "pattern":".*\\S.*|^$" + }, + "ComputeStatistics":{"type":"boolean"}, + "CreateBatchPredictionInput":{ + "type":"structure", + "required":[ + "BatchPredictionId", + "MLModelId", + "BatchPredictionDataSourceId", + "OutputUri" + ], + "members":{ + "BatchPredictionId":{"shape":"EntityId"}, + "BatchPredictionName":{"shape":"EntityName"}, + "MLModelId":{"shape":"EntityId"}, + "BatchPredictionDataSourceId":{"shape":"EntityId"}, + "OutputUri":{"shape":"S3Url"} + } + }, + "CreateBatchPredictionOutput":{ + "type":"structure", + "members":{ + "BatchPredictionId":{"shape":"EntityId"} + } + }, + "CreateDataSourceFromRDSInput":{ + "type":"structure", + "required":[ + "DataSourceId", + "RDSData", + "RoleARN" + ], + "members":{ + "DataSourceId":{"shape":"EntityId"}, + "DataSourceName":{"shape":"EntityName"}, + "RDSData":{"shape":"RDSDataSpec"}, + "RoleARN":{"shape":"RoleARN"}, + "ComputeStatistics":{"shape":"ComputeStatistics"} + } + }, + "CreateDataSourceFromRDSOutput":{ + "type":"structure", + "members":{ + "DataSourceId":{"shape":"EntityId"} + } + }, + "CreateDataSourceFromRedshiftInput":{ + "type":"structure", + "required":[ + "DataSourceId", + "DataSpec", + "RoleARN" + ], + "members":{ + "DataSourceId":{"shape":"EntityId"}, + "DataSourceName":{"shape":"EntityName"}, + "DataSpec":{"shape":"RedshiftDataSpec"}, + "RoleARN":{"shape":"RoleARN"}, + "ComputeStatistics":{"shape":"ComputeStatistics"} + } + }, + "CreateDataSourceFromRedshiftOutput":{ + "type":"structure", + "members":{ + "DataSourceId":{"shape":"EntityId"} + } + }, + "CreateDataSourceFromS3Input":{ + "type":"structure", + "required":[ + "DataSourceId", + "DataSpec" + ], + "members":{ + "DataSourceId":{"shape":"EntityId"}, + "DataSourceName":{"shape":"EntityName"}, + "DataSpec":{"shape":"S3DataSpec"}, + "ComputeStatistics":{"shape":"ComputeStatistics"} + } + }, + "CreateDataSourceFromS3Output":{ + "type":"structure", + "members":{ + "DataSourceId":{"shape":"EntityId"} + } + }, + "CreateEvaluationInput":{ + "type":"structure", + "required":[ + "EvaluationId", + "MLModelId", + "EvaluationDataSourceId" + ], + "members":{ + "EvaluationId":{"shape":"EntityId"}, + "EvaluationName":{"shape":"EntityName"}, + "MLModelId":{"shape":"EntityId"}, + "EvaluationDataSourceId":{"shape":"EntityId"} + } + }, + "CreateEvaluationOutput":{ + "type":"structure", + "members":{ + "EvaluationId":{"shape":"EntityId"} + } + }, + "CreateMLModelInput":{ + "type":"structure", + "required":[ + "MLModelId", + "MLModelType", + "TrainingDataSourceId" + ], + "members":{ + "MLModelId":{"shape":"EntityId"}, + "MLModelName":{"shape":"EntityName"}, + "MLModelType":{"shape":"MLModelType"}, + "Parameters":{"shape":"TrainingParameters"}, + "TrainingDataSourceId":{"shape":"EntityId"}, + "Recipe":{"shape":"Recipe"}, + "RecipeUri":{"shape":"S3Url"} + } + }, + "CreateMLModelOutput":{ + "type":"structure", + "members":{ + "MLModelId":{"shape":"EntityId"} + } + }, + "CreateRealtimeEndpointInput":{ + "type":"structure", + "required":["MLModelId"], + "members":{ + "MLModelId":{"shape":"EntityId"} + } + }, + "CreateRealtimeEndpointOutput":{ + "type":"structure", + "members":{ + "MLModelId":{"shape":"EntityId"}, + "RealtimeEndpointInfo":{"shape":"RealtimeEndpointInfo"} + } + }, + "DataRearrangement":{"type":"string"}, + "DataSchema":{ + "type":"string", + "max":131071 + }, + "DataSource":{ + "type":"structure", + "members":{ + "DataSourceId":{"shape":"EntityId"}, + "DataLocationS3":{"shape":"S3Url"}, + "DataRearrangement":{"shape":"DataRearrangement"}, + "CreatedByIamUser":{"shape":"AwsUserArn"}, + "CreatedAt":{"shape":"EpochTime"}, + "LastUpdatedAt":{"shape":"EpochTime"}, + "DataSizeInBytes":{"shape":"LongType"}, + "NumberOfFiles":{"shape":"LongType"}, + "Name":{"shape":"EntityName"}, + "Status":{"shape":"EntityStatus"}, + "Message":{"shape":"Message"}, + "RedshiftMetadata":{"shape":"RedshiftMetadata"}, + "RDSMetadata":{"shape":"RDSMetadata"}, + "RoleARN":{"shape":"RoleARN"}, + "ComputeStatistics":{"shape":"ComputeStatistics"} + } + }, + "DataSourceFilterVariable":{ + "type":"string", + "enum":[ + "CreatedAt", + "LastUpdatedAt", + "Status", + "Name", + "DataLocationS3", + "IAMUser" + ] + }, + "DataSources":{ + "type":"list", + "member":{"shape":"DataSource"} + }, + "DeleteBatchPredictionInput":{ + "type":"structure", + "required":["BatchPredictionId"], + "members":{ + "BatchPredictionId":{"shape":"EntityId"} + } + }, + "DeleteBatchPredictionOutput":{ + "type":"structure", + "members":{ + "BatchPredictionId":{"shape":"EntityId"} + } + }, + "DeleteDataSourceInput":{ + "type":"structure", + "required":["DataSourceId"], + "members":{ + "DataSourceId":{"shape":"EntityId"} + } + }, + "DeleteDataSourceOutput":{ + "type":"structure", + "members":{ + "DataSourceId":{"shape":"EntityId"} + } + }, + "DeleteEvaluationInput":{ + "type":"structure", + "required":["EvaluationId"], + "members":{ + "EvaluationId":{"shape":"EntityId"} + } + }, + "DeleteEvaluationOutput":{ + "type":"structure", + "members":{ + "EvaluationId":{"shape":"EntityId"} + } + }, + "DeleteMLModelInput":{ + "type":"structure", + "required":["MLModelId"], + "members":{ + "MLModelId":{"shape":"EntityId"} + } + }, + "DeleteMLModelOutput":{ + "type":"structure", + "members":{ + "MLModelId":{"shape":"EntityId"} + } + }, + "DeleteRealtimeEndpointInput":{ + "type":"structure", + "required":["MLModelId"], + "members":{ + "MLModelId":{"shape":"EntityId"} + } + }, + "DeleteRealtimeEndpointOutput":{ + "type":"structure", + "members":{ + "MLModelId":{"shape":"EntityId"}, + "RealtimeEndpointInfo":{"shape":"RealtimeEndpointInfo"} + } + }, + "DeleteTagsInput":{ + "type":"structure", + "required":[ + "TagKeys", + "ResourceId", + "ResourceType" + ], + "members":{ + "TagKeys":{"shape":"TagKeyList"}, + "ResourceId":{"shape":"EntityId"}, + "ResourceType":{"shape":"TaggableResourceType"} + } + }, + "DeleteTagsOutput":{ + "type":"structure", + "members":{ + "ResourceId":{"shape":"EntityId"}, + "ResourceType":{"shape":"TaggableResourceType"} + } + }, + "DescribeBatchPredictionsInput":{ + "type":"structure", + "members":{ + "FilterVariable":{"shape":"BatchPredictionFilterVariable"}, + "EQ":{"shape":"ComparatorValue"}, + "GT":{"shape":"ComparatorValue"}, + "LT":{"shape":"ComparatorValue"}, + "GE":{"shape":"ComparatorValue"}, + "LE":{"shape":"ComparatorValue"}, + "NE":{"shape":"ComparatorValue"}, + "Prefix":{"shape":"ComparatorValue"}, + "SortOrder":{"shape":"SortOrder"}, + "NextToken":{"shape":"StringType"}, + "Limit":{"shape":"PageLimit"} + } + }, + "DescribeBatchPredictionsOutput":{ + "type":"structure", + "members":{ + "Results":{"shape":"BatchPredictions"}, + "NextToken":{"shape":"StringType"} + } + }, + "DescribeDataSourcesInput":{ + "type":"structure", + "members":{ + "FilterVariable":{"shape":"DataSourceFilterVariable"}, + "EQ":{"shape":"ComparatorValue"}, + "GT":{"shape":"ComparatorValue"}, + "LT":{"shape":"ComparatorValue"}, + "GE":{"shape":"ComparatorValue"}, + "LE":{"shape":"ComparatorValue"}, + "NE":{"shape":"ComparatorValue"}, + "Prefix":{"shape":"ComparatorValue"}, + "SortOrder":{"shape":"SortOrder"}, + "NextToken":{"shape":"StringType"}, + "Limit":{"shape":"PageLimit"} + } + }, + "DescribeDataSourcesOutput":{ + "type":"structure", + "members":{ + "Results":{"shape":"DataSources"}, + "NextToken":{"shape":"StringType"} + } + }, + "DescribeEvaluationsInput":{ + "type":"structure", + "members":{ + "FilterVariable":{"shape":"EvaluationFilterVariable"}, + "EQ":{"shape":"ComparatorValue"}, + "GT":{"shape":"ComparatorValue"}, + "LT":{"shape":"ComparatorValue"}, + "GE":{"shape":"ComparatorValue"}, + "LE":{"shape":"ComparatorValue"}, + "NE":{"shape":"ComparatorValue"}, + "Prefix":{"shape":"ComparatorValue"}, + "SortOrder":{"shape":"SortOrder"}, + "NextToken":{"shape":"StringType"}, + "Limit":{"shape":"PageLimit"} + } + }, + "DescribeEvaluationsOutput":{ + "type":"structure", + "members":{ + "Results":{"shape":"Evaluations"}, + "NextToken":{"shape":"StringType"} + } + }, + "DescribeMLModelsInput":{ + "type":"structure", + "members":{ + "FilterVariable":{"shape":"MLModelFilterVariable"}, + "EQ":{"shape":"ComparatorValue"}, + "GT":{"shape":"ComparatorValue"}, + "LT":{"shape":"ComparatorValue"}, + "GE":{"shape":"ComparatorValue"}, + "LE":{"shape":"ComparatorValue"}, + "NE":{"shape":"ComparatorValue"}, + "Prefix":{"shape":"ComparatorValue"}, + "SortOrder":{"shape":"SortOrder"}, + "NextToken":{"shape":"StringType"}, + "Limit":{"shape":"PageLimit"} + } + }, + "DescribeMLModelsOutput":{ + "type":"structure", + "members":{ + "Results":{"shape":"MLModels"}, + "NextToken":{"shape":"StringType"} + } + }, + "DescribeTagsInput":{ + "type":"structure", + "required":[ + "ResourceId", + "ResourceType" + ], + "members":{ + "ResourceId":{"shape":"EntityId"}, + "ResourceType":{"shape":"TaggableResourceType"} + } + }, + "DescribeTagsOutput":{ + "type":"structure", + "members":{ + "ResourceId":{"shape":"EntityId"}, + "ResourceType":{"shape":"TaggableResourceType"}, + "Tags":{"shape":"TagList"} + } + }, + "DetailsAttributes":{ + "type":"string", + "enum":[ + "PredictiveModelType", + "Algorithm" + ] + }, + "DetailsMap":{ + "type":"map", + "key":{"shape":"DetailsAttributes"}, + "value":{"shape":"DetailsValue"} + }, + "DetailsValue":{ + "type":"string", + "min":1 + }, + "EDPPipelineId":{ + "type":"string", + "min":1, + "max":1024 + }, + "EDPResourceRole":{ + "type":"string", + "min":1, + "max":64 + }, + "EDPSecurityGroupId":{ + "type":"string", + "min":1, + "max":255 + }, + "EDPSecurityGroupIds":{ + "type":"list", + "member":{"shape":"EDPSecurityGroupId"} + }, + "EDPServiceRole":{ + "type":"string", + "min":1, + "max":64 + }, + "EDPSubnetId":{ + "type":"string", + "min":1, + "max":255 + }, + "EntityId":{ + "type":"string", + "min":1, + "max":64, + "pattern":"[a-zA-Z0-9_.-]+" + }, + "EntityName":{ + "type":"string", + "max":1024, + "pattern":".*\\S.*|^$" + }, + "EntityStatus":{ + "type":"string", + "enum":[ + "PENDING", + "INPROGRESS", + "FAILED", + "COMPLETED", + "DELETED" + ] + }, + "EpochTime":{"type":"timestamp"}, + "ErrorCode":{"type":"integer"}, + "ErrorMessage":{ + "type":"string", + "max":2048 + }, + "Evaluation":{ + "type":"structure", + "members":{ + "EvaluationId":{"shape":"EntityId"}, + "MLModelId":{"shape":"EntityId"}, + "EvaluationDataSourceId":{"shape":"EntityId"}, + "InputDataLocationS3":{"shape":"S3Url"}, + "CreatedByIamUser":{"shape":"AwsUserArn"}, + "CreatedAt":{"shape":"EpochTime"}, + "LastUpdatedAt":{"shape":"EpochTime"}, + "Name":{"shape":"EntityName"}, + "Status":{"shape":"EntityStatus"}, + "PerformanceMetrics":{"shape":"PerformanceMetrics"}, + "Message":{"shape":"Message"} + } + }, + "EvaluationFilterVariable":{ + "type":"string", + "enum":[ + "CreatedAt", + "LastUpdatedAt", + "Status", + "Name", + "IAMUser", + "MLModelId", + "DataSourceId", + "DataURI" + ] + }, + "Evaluations":{ + "type":"list", + "member":{"shape":"Evaluation"} + }, + "GetBatchPredictionInput":{ + "type":"structure", + "required":["BatchPredictionId"], + "members":{ + "BatchPredictionId":{"shape":"EntityId"} + } + }, + "GetBatchPredictionOutput":{ + "type":"structure", + "members":{ + "BatchPredictionId":{"shape":"EntityId"}, + "MLModelId":{"shape":"EntityId"}, + "BatchPredictionDataSourceId":{"shape":"EntityId"}, + "InputDataLocationS3":{"shape":"S3Url"}, + "CreatedByIamUser":{"shape":"AwsUserArn"}, + "CreatedAt":{"shape":"EpochTime"}, + "LastUpdatedAt":{"shape":"EpochTime"}, + "Name":{"shape":"EntityName"}, + "Status":{"shape":"EntityStatus"}, + "OutputUri":{"shape":"S3Url"}, + "LogUri":{"shape":"PresignedS3Url"}, + "Message":{"shape":"Message"} + } + }, + "GetDataSourceInput":{ + "type":"structure", + "required":["DataSourceId"], + "members":{ + "DataSourceId":{"shape":"EntityId"}, + "Verbose":{"shape":"Verbose"} + } + }, + "GetDataSourceOutput":{ + "type":"structure", + "members":{ + "DataSourceId":{"shape":"EntityId"}, + "DataLocationS3":{"shape":"S3Url"}, + "DataRearrangement":{"shape":"DataRearrangement"}, + "CreatedByIamUser":{"shape":"AwsUserArn"}, + "CreatedAt":{"shape":"EpochTime"}, + "LastUpdatedAt":{"shape":"EpochTime"}, + "DataSizeInBytes":{"shape":"LongType"}, + "NumberOfFiles":{"shape":"LongType"}, + "Name":{"shape":"EntityName"}, + "Status":{"shape":"EntityStatus"}, + "LogUri":{"shape":"PresignedS3Url"}, + "Message":{"shape":"Message"}, + "RedshiftMetadata":{"shape":"RedshiftMetadata"}, + "RDSMetadata":{"shape":"RDSMetadata"}, + "RoleARN":{"shape":"RoleARN"}, + "ComputeStatistics":{"shape":"ComputeStatistics"}, + "DataSourceSchema":{"shape":"DataSchema"} + } + }, + "GetEvaluationInput":{ + "type":"structure", + "required":["EvaluationId"], + "members":{ + "EvaluationId":{"shape":"EntityId"} + } + }, + "GetEvaluationOutput":{ + "type":"structure", + "members":{ + "EvaluationId":{"shape":"EntityId"}, + "MLModelId":{"shape":"EntityId"}, + "EvaluationDataSourceId":{"shape":"EntityId"}, + "InputDataLocationS3":{"shape":"S3Url"}, + "CreatedByIamUser":{"shape":"AwsUserArn"}, + "CreatedAt":{"shape":"EpochTime"}, + "LastUpdatedAt":{"shape":"EpochTime"}, + "Name":{"shape":"EntityName"}, + "Status":{"shape":"EntityStatus"}, + "PerformanceMetrics":{"shape":"PerformanceMetrics"}, + "LogUri":{"shape":"PresignedS3Url"}, + "Message":{"shape":"Message"} + } + }, + "GetMLModelInput":{ + "type":"structure", + "required":["MLModelId"], + "members":{ + "MLModelId":{"shape":"EntityId"}, + "Verbose":{"shape":"Verbose"} + } + }, + "GetMLModelOutput":{ + "type":"structure", + "members":{ + "MLModelId":{"shape":"EntityId"}, + "TrainingDataSourceId":{"shape":"EntityId"}, + "CreatedByIamUser":{"shape":"AwsUserArn"}, + "CreatedAt":{"shape":"EpochTime"}, + "LastUpdatedAt":{"shape":"EpochTime"}, + "Name":{"shape":"MLModelName"}, + "Status":{"shape":"EntityStatus"}, + "SizeInBytes":{"shape":"LongType"}, + "EndpointInfo":{"shape":"RealtimeEndpointInfo"}, + "TrainingParameters":{"shape":"TrainingParameters"}, + "InputDataLocationS3":{"shape":"S3Url"}, + "MLModelType":{"shape":"MLModelType"}, + "ScoreThreshold":{"shape":"ScoreThreshold"}, + "ScoreThresholdLastUpdatedAt":{"shape":"EpochTime"}, + "LogUri":{"shape":"PresignedS3Url"}, + "Message":{"shape":"Message"}, + "Recipe":{"shape":"Recipe"}, + "Schema":{"shape":"DataSchema"} + } + }, + "IdempotentParameterMismatchException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"}, + "code":{"shape":"ErrorCode"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "IntegerType":{"type":"integer"}, + "InternalServerException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"}, + "code":{"shape":"ErrorCode"} + }, + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + }, + "InvalidInputException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"}, + "code":{"shape":"ErrorCode"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidTagException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "exception":true + }, + "Label":{ + "type":"string", + "min":1 + }, + "LimitExceededException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"}, + "code":{"shape":"ErrorCode"} + }, + "error":{"httpStatusCode":417}, + "exception":true + }, + "LongType":{"type":"long"}, + "MLModel":{ + "type":"structure", + "members":{ + "MLModelId":{"shape":"EntityId"}, + "TrainingDataSourceId":{"shape":"EntityId"}, + "CreatedByIamUser":{"shape":"AwsUserArn"}, + "CreatedAt":{"shape":"EpochTime"}, + "LastUpdatedAt":{"shape":"EpochTime"}, + "Name":{"shape":"MLModelName"}, + "Status":{"shape":"EntityStatus"}, + "SizeInBytes":{"shape":"LongType"}, + "EndpointInfo":{"shape":"RealtimeEndpointInfo"}, + "TrainingParameters":{"shape":"TrainingParameters"}, + "InputDataLocationS3":{"shape":"S3Url"}, + "Algorithm":{"shape":"Algorithm"}, + "MLModelType":{"shape":"MLModelType"}, + "ScoreThreshold":{"shape":"ScoreThreshold"}, + "ScoreThresholdLastUpdatedAt":{"shape":"EpochTime"}, + "Message":{"shape":"Message"} + } + }, + "MLModelFilterVariable":{ + "type":"string", + "enum":[ + "CreatedAt", + "LastUpdatedAt", + "Status", + "Name", + "IAMUser", + "TrainingDataSourceId", + "RealtimeEndpointStatus", + "MLModelType", + "Algorithm", + "TrainingDataURI" + ] + }, + "MLModelName":{ + "type":"string", + "max":1024 + }, + "MLModelType":{ + "type":"string", + "enum":[ + "REGRESSION", + "BINARY", + "MULTICLASS" + ] + }, + "MLModels":{ + "type":"list", + "member":{"shape":"MLModel"} + }, + "Message":{ + "type":"string", + "max":10240 + }, + "PageLimit":{ + "type":"integer", + "min":1, + "max":100 + }, + "PerformanceMetrics":{ + "type":"structure", + "members":{ + "Properties":{"shape":"PerformanceMetricsProperties"} + } + }, + "PerformanceMetricsProperties":{ + "type":"map", + "key":{"shape":"PerformanceMetricsPropertyKey"}, + "value":{"shape":"PerformanceMetricsPropertyValue"} + }, + "PerformanceMetricsPropertyKey":{"type":"string"}, + "PerformanceMetricsPropertyValue":{"type":"string"}, + "PredictInput":{ + "type":"structure", + "required":[ + "MLModelId", + "Record", + "PredictEndpoint" + ], + "members":{ + "MLModelId":{"shape":"EntityId"}, + "Record":{"shape":"Record"}, + "PredictEndpoint":{"shape":"VipURL"} + } + }, + "PredictOutput":{ + "type":"structure", + "members":{ + "Prediction":{"shape":"Prediction"} + } + }, + "Prediction":{ + "type":"structure", + "members":{ + "predictedLabel":{"shape":"Label"}, + "predictedValue":{"shape":"floatLabel"}, + "predictedScores":{"shape":"ScoreValuePerLabelMap"}, + "details":{"shape":"DetailsMap"} + } + }, + "PredictorNotMountedException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "PresignedS3Url":{"type":"string"}, + "RDSDataSpec":{ + "type":"structure", + "required":[ + "DatabaseInformation", + "SelectSqlQuery", + "DatabaseCredentials", + "S3StagingLocation", + "ResourceRole", + "ServiceRole", + "SubnetId", + "SecurityGroupIds" + ], + "members":{ + "DatabaseInformation":{"shape":"RDSDatabase"}, + "SelectSqlQuery":{"shape":"RDSSelectSqlQuery"}, + "DatabaseCredentials":{"shape":"RDSDatabaseCredentials"}, + "S3StagingLocation":{"shape":"S3Url"}, + "DataRearrangement":{"shape":"DataRearrangement"}, + "DataSchema":{"shape":"DataSchema"}, + "DataSchemaUri":{"shape":"S3Url"}, + "ResourceRole":{"shape":"EDPResourceRole"}, + "ServiceRole":{"shape":"EDPServiceRole"}, + "SubnetId":{"shape":"EDPSubnetId"}, + "SecurityGroupIds":{"shape":"EDPSecurityGroupIds"} + } + }, + "RDSDatabase":{ + "type":"structure", + "required":[ + "InstanceIdentifier", + "DatabaseName" + ], + "members":{ + "InstanceIdentifier":{"shape":"RDSInstanceIdentifier"}, + "DatabaseName":{"shape":"RDSDatabaseName"} + } + }, + "RDSDatabaseCredentials":{ + "type":"structure", + "required":[ + "Username", + "Password" + ], + "members":{ + "Username":{"shape":"RDSDatabaseUsername"}, + "Password":{"shape":"RDSDatabasePassword"} + } + }, + "RDSDatabaseName":{ + "type":"string", + "min":1, + "max":64 + }, + "RDSDatabasePassword":{ + "type":"string", + "min":8, + "max":128 + }, + "RDSDatabaseUsername":{ + "type":"string", + "min":1, + "max":128 + }, + "RDSInstanceIdentifier":{ + "type":"string", + "min":1, + "max":63, + "pattern":"[a-z0-9-]+" + }, + "RDSMetadata":{ + "type":"structure", + "members":{ + "Database":{"shape":"RDSDatabase"}, + "DatabaseUserName":{"shape":"RDSDatabaseUsername"}, + "SelectSqlQuery":{"shape":"RDSSelectSqlQuery"}, + "ResourceRole":{"shape":"EDPResourceRole"}, + "ServiceRole":{"shape":"EDPServiceRole"}, + "DataPipelineId":{"shape":"EDPPipelineId"} + } + }, + "RDSSelectSqlQuery":{ + "type":"string", + "min":1, + "max":16777216 + }, + "RealtimeEndpointInfo":{ + "type":"structure", + "members":{ + "PeakRequestsPerSecond":{"shape":"IntegerType"}, + "CreatedAt":{"shape":"EpochTime"}, + "EndpointUrl":{"shape":"VipURL"}, + "EndpointStatus":{"shape":"RealtimeEndpointStatus"} + } + }, + "RealtimeEndpointStatus":{ + "type":"string", + "enum":[ + "NONE", + "READY", + "UPDATING", + "FAILED" + ] + }, + "Recipe":{ + "type":"string", + "max":131071 + }, + "Record":{ + "type":"map", + "key":{"shape":"VariableName"}, + "value":{"shape":"VariableValue"} + }, + "RedshiftClusterIdentifier":{ + "type":"string", + "min":1, + "max":63, + "pattern":"[a-z0-9-]+" + }, + "RedshiftDataSpec":{ + "type":"structure", + "required":[ + "DatabaseInformation", + "SelectSqlQuery", + "DatabaseCredentials", + "S3StagingLocation" + ], + "members":{ + "DatabaseInformation":{"shape":"RedshiftDatabase"}, + "SelectSqlQuery":{"shape":"RedshiftSelectSqlQuery"}, + "DatabaseCredentials":{"shape":"RedshiftDatabaseCredentials"}, + "S3StagingLocation":{"shape":"S3Url"}, + "DataRearrangement":{"shape":"DataRearrangement"}, + "DataSchema":{"shape":"DataSchema"}, + "DataSchemaUri":{"shape":"S3Url"} + } + }, + "RedshiftDatabase":{ + "type":"structure", + "required":[ + "DatabaseName", + "ClusterIdentifier" + ], + "members":{ + "DatabaseName":{"shape":"RedshiftDatabaseName"}, + "ClusterIdentifier":{"shape":"RedshiftClusterIdentifier"} + } + }, + "RedshiftDatabaseCredentials":{ + "type":"structure", + "required":[ + "Username", + "Password" + ], + "members":{ + "Username":{"shape":"RedshiftDatabaseUsername"}, + "Password":{"shape":"RedshiftDatabasePassword"} + } + }, + "RedshiftDatabaseName":{ + "type":"string", + "min":1, + "max":64, + "pattern":"[a-z0-9]+" + }, + "RedshiftDatabasePassword":{ + "type":"string", + "min":8, + "max":64 + }, + "RedshiftDatabaseUsername":{ + "type":"string", + "min":1, + "max":128 + }, + "RedshiftMetadata":{ + "type":"structure", + "members":{ + "RedshiftDatabase":{"shape":"RedshiftDatabase"}, + "DatabaseUserName":{"shape":"RedshiftDatabaseUsername"}, + "SelectSqlQuery":{"shape":"RedshiftSelectSqlQuery"} + } + }, + "RedshiftSelectSqlQuery":{ + "type":"string", + "min":1, + "max":16777216 + }, + "ResourceNotFoundException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"}, + "code":{"shape":"ErrorCode"} + }, + "error":{"httpStatusCode":404}, + "exception":true + }, + "RoleARN":{ + "type":"string", + "min":1, + "max":110 + }, + "S3DataSpec":{ + "type":"structure", + "required":["DataLocationS3"], + "members":{ + "DataLocationS3":{"shape":"S3Url"}, + "DataRearrangement":{"shape":"DataRearrangement"}, + "DataSchema":{"shape":"DataSchema"}, + "DataSchemaLocationS3":{"shape":"S3Url"} + } + }, + "S3Url":{ + "type":"string", + "max":2048, + "pattern":"s3://([^/]+)(/.*)?" + }, + "ScoreThreshold":{"type":"float"}, + "ScoreValue":{"type":"float"}, + "ScoreValuePerLabelMap":{ + "type":"map", + "key":{"shape":"Label"}, + "value":{"shape":"ScoreValue"} + }, + "SortOrder":{ + "type":"string", + "enum":[ + "asc", + "dsc" + ] + }, + "StringType":{"type":"string"}, + "Tag":{ + "type":"structure", + "members":{ + "Key":{"shape":"TagKey"}, + "Value":{"shape":"TagValue"} + } + }, + "TagKey":{ + "type":"string", + "min":1, + "max":128, + "pattern":"^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*)$" + }, + "TagKeyList":{ + "type":"list", + "member":{"shape":"TagKey"} + }, + "TagLimitExceededException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "exception":true + }, + "TagList":{ + "type":"list", + "member":{"shape":"Tag"} + }, + "TagValue":{ + "type":"string", + "min":0, + "max":256, + "pattern":"^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*)$" + }, + "TaggableResourceType":{ + "type":"string", + "enum":[ + "BatchPrediction", + "DataSource", + "Evaluation", + "MLModel" + ] + }, + "TrainingParameters":{ + "type":"map", + "key":{"shape":"StringType"}, + "value":{"shape":"StringType"} + }, + "UpdateBatchPredictionInput":{ + "type":"structure", + "required":[ + "BatchPredictionId", + "BatchPredictionName" + ], + "members":{ + "BatchPredictionId":{"shape":"EntityId"}, + "BatchPredictionName":{"shape":"EntityName"} + } + }, + "UpdateBatchPredictionOutput":{ + "type":"structure", + "members":{ + "BatchPredictionId":{"shape":"EntityId"} + } + }, + "UpdateDataSourceInput":{ + "type":"structure", + "required":[ + "DataSourceId", + "DataSourceName" + ], + "members":{ + "DataSourceId":{"shape":"EntityId"}, + "DataSourceName":{"shape":"EntityName"} + } + }, + "UpdateDataSourceOutput":{ + "type":"structure", + "members":{ + "DataSourceId":{"shape":"EntityId"} + } + }, + "UpdateEvaluationInput":{ + "type":"structure", + "required":[ + "EvaluationId", + "EvaluationName" + ], + "members":{ + "EvaluationId":{"shape":"EntityId"}, + "EvaluationName":{"shape":"EntityName"} + } + }, + "UpdateEvaluationOutput":{ + "type":"structure", + "members":{ + "EvaluationId":{"shape":"EntityId"} + } + }, + "UpdateMLModelInput":{ + "type":"structure", + "required":["MLModelId"], + "members":{ + "MLModelId":{"shape":"EntityId"}, + "MLModelName":{"shape":"EntityName"}, + "ScoreThreshold":{"shape":"ScoreThreshold"} + } + }, + "UpdateMLModelOutput":{ + "type":"structure", + "members":{ + "MLModelId":{"shape":"EntityId"} + } + }, + "VariableName":{"type":"string"}, + "VariableValue":{"type":"string"}, + "Verbose":{"type":"boolean"}, + "VipURL":{ + "type":"string", + "max":2048, + "pattern":"https://[a-zA-Z0-9-.]*\\.amazon(aws)?\\.com[/]?" + }, + "floatLabel":{"type":"float"} + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/machinelearning/2014-12-12/docs-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/machinelearning/2014-12-12/docs-2.json new file mode 100644 index 000000000..7f7035e7b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/machinelearning/2014-12-12/docs-2.json @@ -0,0 +1,1128 @@ +{ + "version": "2.0", + "operations": { + "AddTags": "

    Adds one or more tags to an object, up to a limit of 10. Each tag consists of a key and an optional value. If you add a tag using a key that is already associated with the ML object, AddTags updates the tag's value.

    ", + "CreateBatchPrediction": "

    Generates predictions for a group of observations. The observations to process exist in one or more data files referenced by a DataSource. This operation creates a new BatchPrediction, and uses an MLModel and the data files referenced by the DataSource as information sources.

    CreateBatchPrediction is an asynchronous operation. In response to CreateBatchPrediction, Amazon Machine Learning (Amazon ML) immediately returns and sets the BatchPrediction status to PENDING. After the BatchPrediction completes, Amazon ML sets the status to COMPLETED.

    You can poll for status updates by using the GetBatchPrediction operation and checking the Status parameter of the result. After the COMPLETED status appears, the results are available in the location specified by the OutputUri parameter.

    ", + "CreateDataSourceFromRDS": "

    Creates a DataSource object from an Amazon Relational Database Service (Amazon RDS). A DataSource references data that can be used to perform CreateMLModel, CreateEvaluation, or CreateBatchPrediction operations.

    CreateDataSourceFromRDS is an asynchronous operation. In response to CreateDataSourceFromRDS, Amazon Machine Learning (Amazon ML) immediately returns and sets the DataSource status to PENDING. After the DataSource is created and ready for use, Amazon ML sets the Status parameter to COMPLETED. DataSource in the COMPLETED or PENDING state can be used only to perform >CreateMLModel>, CreateEvaluation, or CreateBatchPrediction operations.

    If Amazon ML cannot accept the input source, it sets the Status parameter to FAILED and includes an error message in the Message attribute of the GetDataSource operation response.

    ", + "CreateDataSourceFromRedshift": "

    Creates a DataSource from a database hosted on an Amazon Redshift cluster. A DataSource references data that can be used to perform either CreateMLModel, CreateEvaluation, or CreateBatchPrediction operations.

    CreateDataSourceFromRedshift is an asynchronous operation. In response to CreateDataSourceFromRedshift, Amazon Machine Learning (Amazon ML) immediately returns and sets the DataSource status to PENDING. After the DataSource is created and ready for use, Amazon ML sets the Status parameter to COMPLETED. DataSource in COMPLETED or PENDING states can be used to perform only CreateMLModel, CreateEvaluation, or CreateBatchPrediction operations.

    If Amazon ML can't accept the input source, it sets the Status parameter to FAILED and includes an error message in the Message attribute of the GetDataSource operation response.

    The observations should be contained in the database hosted on an Amazon Redshift cluster and should be specified by a SelectSqlQuery query. Amazon ML executes an Unload command in Amazon Redshift to transfer the result set of the SelectSqlQuery query to S3StagingLocation.

    After the DataSource has been created, it's ready for use in evaluations and batch predictions. If you plan to use the DataSource to train an MLModel, the DataSource also requires a recipe. A recipe describes how each input variable will be used in training an MLModel. Will the variable be included or excluded from training? Will the variable be manipulated; for example, will it be combined with another variable or will it be split apart into word combinations? The recipe provides answers to these questions.

    You can't change an existing datasource, but you can copy and modify the settings from an existing Amazon Redshift datasource to create a new datasource. To do so, call GetDataSource for an existing datasource and copy the values to a CreateDataSource call. Change the settings that you want to change and make sure that all required fields have the appropriate values.

    ", + "CreateDataSourceFromS3": "

    Creates a DataSource object. A DataSource references data that can be used to perform CreateMLModel, CreateEvaluation, or CreateBatchPrediction operations.

    CreateDataSourceFromS3 is an asynchronous operation. In response to CreateDataSourceFromS3, Amazon Machine Learning (Amazon ML) immediately returns and sets the DataSource status to PENDING. After the DataSource has been created and is ready for use, Amazon ML sets the Status parameter to COMPLETED. DataSource in the COMPLETED or PENDING state can be used to perform only CreateMLModel, CreateEvaluation or CreateBatchPrediction operations.

    If Amazon ML can't accept the input source, it sets the Status parameter to FAILED and includes an error message in the Message attribute of the GetDataSource operation response.

    The observation data used in a DataSource should be ready to use; that is, it should have a consistent structure, and missing data values should be kept to a minimum. The observation data must reside in one or more .csv files in an Amazon Simple Storage Service (Amazon S3) location, along with a schema that describes the data items by name and type. The same schema must be used for all of the data files referenced by the DataSource.

    After the DataSource has been created, it's ready to use in evaluations and batch predictions. If you plan to use the DataSource to train an MLModel, the DataSource also needs a recipe. A recipe describes how each input variable will be used in training an MLModel. Will the variable be included or excluded from training? Will the variable be manipulated; for example, will it be combined with another variable or will it be split apart into word combinations? The recipe provides answers to these questions.

    ", + "CreateEvaluation": "

    Creates a new Evaluation of an MLModel. An MLModel is evaluated on a set of observations associated to a DataSource. Like a DataSource for an MLModel, the DataSource for an Evaluation contains values for the Target Variable. The Evaluation compares the predicted result for each observation to the actual outcome and provides a summary so that you know how effective the MLModel functions on the test data. Evaluation generates a relevant performance metric, such as BinaryAUC, RegressionRMSE or MulticlassAvgFScore based on the corresponding MLModelType: BINARY, REGRESSION or MULTICLASS.

    CreateEvaluation is an asynchronous operation. In response to CreateEvaluation, Amazon Machine Learning (Amazon ML) immediately returns and sets the evaluation status to PENDING. After the Evaluation is created and ready for use, Amazon ML sets the status to COMPLETED.

    You can use the GetEvaluation operation to check progress of the evaluation during the creation operation.

    ", + "CreateMLModel": "

    Creates a new MLModel using the DataSource and the recipe as information sources.

    An MLModel is nearly immutable. Users can update only the MLModelName and the ScoreThreshold in an MLModel without creating a new MLModel.

    CreateMLModel is an asynchronous operation. In response to CreateMLModel, Amazon Machine Learning (Amazon ML) immediately returns and sets the MLModel status to PENDING. After the MLModel has been created and ready is for use, Amazon ML sets the status to COMPLETED.

    You can use the GetMLModel operation to check the progress of the MLModel during the creation operation.

    CreateMLModel requires a DataSource with computed statistics, which can be created by setting ComputeStatistics to true in CreateDataSourcceFromRDS, CreateDataSourceFromS3, or CreateDataSourceFromRedshift operations.

    ", + "CreateRealtimeEndpoint": "

    Creates a real-time endpoint for the MLModel. The endpoint contains the URI of the MLModel; that is, the location to send real-time prediction requests for the specified MLModel.

    ", + "DeleteBatchPrediction": "

    Assigns the DELETED status to a BatchPrediction, rendering it unusable.

    After using the DeleteBatchPrediction operation, you can use the GetBatchPrediction operation to verify that the status of the BatchPrediction changed to DELETED.

    Caution: The result of the DeleteBatchPrediction operation is irreversible.

    ", + "DeleteDataSource": "

    Assigns the DELETED status to a DataSource, rendering it unusable.

    After using the DeleteDataSource operation, you can use the GetDataSource operation to verify that the status of the DataSource changed to DELETED.

    Caution: The results of the DeleteDataSource operation are irreversible.

    ", + "DeleteEvaluation": "

    Assigns the DELETED status to an Evaluation, rendering it unusable.

    After invoking the DeleteEvaluation operation, you can use the GetEvaluation operation to verify that the status of the Evaluation changed to DELETED.

    Caution

    The results of the DeleteEvaluation operation are irreversible.

    ", + "DeleteMLModel": "

    Assigns the DELETED status to an MLModel, rendering it unusable.

    After using the DeleteMLModel operation, you can use the GetMLModel operation to verify that the status of the MLModel changed to DELETED.

    Caution: The result of the DeleteMLModel operation is irreversible.

    ", + "DeleteRealtimeEndpoint": "

    Deletes a real time endpoint of an MLModel.

    ", + "DeleteTags": "

    Deletes the specified tags associated with an ML object. After this operation is complete, you can't recover deleted tags.

    If you specify a tag that doesn't exist, Amazon ML ignores it.

    ", + "DescribeBatchPredictions": "

    Returns a list of BatchPrediction operations that match the search criteria in the request.

    ", + "DescribeDataSources": "

    Returns a list of DataSource that match the search criteria in the request.

    ", + "DescribeEvaluations": "

    Returns a list of DescribeEvaluations that match the search criteria in the request.

    ", + "DescribeMLModels": "

    Returns a list of MLModel that match the search criteria in the request.

    ", + "DescribeTags": "

    Describes one or more of the tags for your Amazon ML object.

    ", + "GetBatchPrediction": "

    Returns a BatchPrediction that includes detailed metadata, status, and data file information for a Batch Prediction request.

    ", + "GetDataSource": "

    Returns a DataSource that includes metadata and data file information, as well as the current status of the DataSource.

    GetDataSource provides results in normal or verbose format. The verbose format adds the schema description and the list of files pointed to by the DataSource to the normal format.

    ", + "GetEvaluation": "

    Returns an Evaluation that includes metadata as well as the current status of the Evaluation.

    ", + "GetMLModel": "

    Returns an MLModel that includes detailed metadata, data source information, and the current status of the MLModel.

    GetMLModel provides results in normal or verbose format.

    ", + "Predict": "

    Generates a prediction for the observation using the specified ML Model.

    Note

    Not all response parameters will be populated. Whether a response parameter is populated depends on the type of model requested.

    ", + "UpdateBatchPrediction": "

    Updates the BatchPredictionName of a BatchPrediction.

    You can use the GetBatchPrediction operation to view the contents of the updated data element.

    ", + "UpdateDataSource": "

    Updates the DataSourceName of a DataSource.

    You can use the GetDataSource operation to view the contents of the updated data element.

    ", + "UpdateEvaluation": "

    Updates the EvaluationName of an Evaluation.

    You can use the GetEvaluation operation to view the contents of the updated data element.

    ", + "UpdateMLModel": "

    Updates the MLModelName and the ScoreThreshold of an MLModel.

    You can use the GetMLModel operation to view the contents of the updated data element.

    " + }, + "service": "Definition of the public APIs exposed by Amazon Machine Learning", + "shapes": { + "AddTagsInput": { + "base": null, + "refs": { + } + }, + "AddTagsOutput": { + "base": "

    Amazon ML returns the following elements.

    ", + "refs": { + } + }, + "Algorithm": { + "base": "

    The function used to train an MLModel. Training choices supported by Amazon ML include the following:

    • SGD - Stochastic Gradient Descent.
    • RandomForest - Random forest of decision trees.
    ", + "refs": { + "MLModel$Algorithm": "

    The algorithm used to train the MLModel. The following algorithm is supported:

    • SGD -- Stochastic gradient descent. The goal of SGD is to minimize the gradient of the loss function.
    " + } + }, + "AwsUserArn": { + "base": "

    An Amazon Web Service (AWS) user account identifier. The account identifier can be an AWS root account or an AWS Identity and Access Management (IAM) user.

    ", + "refs": { + "BatchPrediction$CreatedByIamUser": "

    The AWS user account that invoked the BatchPrediction. The account type can be either an AWS root account or an AWS Identity and Access Management (IAM) user account.

    ", + "DataSource$CreatedByIamUser": "

    The AWS user account from which the DataSource was created. The account type can be either an AWS root account or an AWS Identity and Access Management (IAM) user account.

    ", + "Evaluation$CreatedByIamUser": "

    The AWS user account that invoked the evaluation. The account type can be either an AWS root account or an AWS Identity and Access Management (IAM) user account.

    ", + "GetBatchPredictionOutput$CreatedByIamUser": "

    The AWS user account that invoked the BatchPrediction. The account type can be either an AWS root account or an AWS Identity and Access Management (IAM) user account.

    ", + "GetDataSourceOutput$CreatedByIamUser": "

    The AWS user account from which the DataSource was created. The account type can be either an AWS root account or an AWS Identity and Access Management (IAM) user account.

    ", + "GetEvaluationOutput$CreatedByIamUser": "

    The AWS user account that invoked the evaluation. The account type can be either an AWS root account or an AWS Identity and Access Management (IAM) user account.

    ", + "GetMLModelOutput$CreatedByIamUser": "

    The AWS user account from which the MLModel was created. The account type can be either an AWS root account or an AWS Identity and Access Management (IAM) user account.

    ", + "MLModel$CreatedByIamUser": "

    The AWS user account from which the MLModel was created. The account type can be either an AWS root account or an AWS Identity and Access Management (IAM) user account.

    " + } + }, + "BatchPrediction": { + "base": "

    Represents the output of a GetBatchPrediction operation.

    The content consists of the detailed metadata, the status, and the data file information of a Batch Prediction.

    ", + "refs": { + "BatchPredictions$member": null + } + }, + "BatchPredictionFilterVariable": { + "base": "

    A list of the variables to use in searching or filtering BatchPrediction.

    • CreatedAt - Sets the search criteria to BatchPrediction creation date.
    • Status - Sets the search criteria to BatchPrediction status.
    • Name - Sets the search criteria to the contents of BatchPrediction Name.
    • IAMUser - Sets the search criteria to the user account that invoked the BatchPrediction creation.
    • MLModelId - Sets the search criteria to the MLModel used in the BatchPrediction.
    • DataSourceId - Sets the search criteria to the DataSource used in the BatchPrediction.
    • DataURI - Sets the search criteria to the data file(s) used in the BatchPrediction. The URL can identify either a file or an Amazon Simple Storage Service (Amazon S3) bucket or directory.
    ", + "refs": { + "DescribeBatchPredictionsInput$FilterVariable": "

    Use one of the following variables to filter a list of BatchPrediction:

    • CreatedAt - Sets the search criteria to the BatchPrediction creation date.
    • Status - Sets the search criteria to the BatchPrediction status.
    • Name - Sets the search criteria to the contents of the BatchPrediction Name.
    • IAMUser - Sets the search criteria to the user account that invoked the BatchPrediction creation.
    • MLModelId - Sets the search criteria to the MLModel used in the BatchPrediction.
    • DataSourceId - Sets the search criteria to the DataSource used in the BatchPrediction.
    • DataURI - Sets the search criteria to the data file(s) used in the BatchPrediction. The URL can identify either a file or an Amazon Simple Storage Solution (Amazon S3) bucket or directory.
    " + } + }, + "BatchPredictions": { + "base": null, + "refs": { + "DescribeBatchPredictionsOutput$Results": "

    A list of BatchPrediction objects that meet the search criteria.

    " + } + }, + "ComparatorValue": { + "base": "

    The value specified in a filtering condition. The ComparatorValue becomes the reference value when matching or evaluating data values in filtering and searching functions.

    ", + "refs": { + "DescribeBatchPredictionsInput$EQ": "

    The equal to operator. The BatchPrediction results will have FilterVariable values that exactly match the value specified with EQ.

    ", + "DescribeBatchPredictionsInput$GT": "

    The greater than operator. The BatchPrediction results will have FilterVariable values that are greater than the value specified with GT.

    ", + "DescribeBatchPredictionsInput$LT": "

    The less than operator. The BatchPrediction results will have FilterVariable values that are less than the value specified with LT.

    ", + "DescribeBatchPredictionsInput$GE": "

    The greater than or equal to operator. The BatchPrediction results will have FilterVariable values that are greater than or equal to the value specified with GE.

    ", + "DescribeBatchPredictionsInput$LE": "

    The less than or equal to operator. The BatchPrediction results will have FilterVariable values that are less than or equal to the value specified with LE.

    ", + "DescribeBatchPredictionsInput$NE": "

    The not equal to operator. The BatchPrediction results will have FilterVariable values not equal to the value specified with NE.

    ", + "DescribeBatchPredictionsInput$Prefix": "

    A string that is found at the beginning of a variable, such as Name or Id.

    For example, a Batch Prediction operation could have the Name 2014-09-09-HolidayGiftMailer. To search for this BatchPrediction, select Name for the FilterVariable and any of the following strings for the Prefix:

    • 2014-09

    • 2014-09-09

    • 2014-09-09-Holiday

    ", + "DescribeDataSourcesInput$EQ": "

    The equal to operator. The DataSource results will have FilterVariable values that exactly match the value specified with EQ.

    ", + "DescribeDataSourcesInput$GT": "

    The greater than operator. The DataSource results will have FilterVariable values that are greater than the value specified with GT.

    ", + "DescribeDataSourcesInput$LT": "

    The less than operator. The DataSource results will have FilterVariable values that are less than the value specified with LT.

    ", + "DescribeDataSourcesInput$GE": "

    The greater than or equal to operator. The DataSource results will have FilterVariable values that are greater than or equal to the value specified with GE.

    ", + "DescribeDataSourcesInput$LE": "

    The less than or equal to operator. The DataSource results will have FilterVariable values that are less than or equal to the value specified with LE.

    ", + "DescribeDataSourcesInput$NE": "

    The not equal to operator. The DataSource results will have FilterVariable values not equal to the value specified with NE.

    ", + "DescribeDataSourcesInput$Prefix": "

    A string that is found at the beginning of a variable, such as Name or Id.

    For example, a DataSource could have the Name 2014-09-09-HolidayGiftMailer. To search for this DataSource, select Name for the FilterVariable and any of the following strings for the Prefix:

    • 2014-09

    • 2014-09-09

    • 2014-09-09-Holiday

    ", + "DescribeEvaluationsInput$EQ": "

    The equal to operator. The Evaluation results will have FilterVariable values that exactly match the value specified with EQ.

    ", + "DescribeEvaluationsInput$GT": "

    The greater than operator. The Evaluation results will have FilterVariable values that are greater than the value specified with GT.

    ", + "DescribeEvaluationsInput$LT": "

    The less than operator. The Evaluation results will have FilterVariable values that are less than the value specified with LT.

    ", + "DescribeEvaluationsInput$GE": "

    The greater than or equal to operator. The Evaluation results will have FilterVariable values that are greater than or equal to the value specified with GE.

    ", + "DescribeEvaluationsInput$LE": "

    The less than or equal to operator. The Evaluation results will have FilterVariable values that are less than or equal to the value specified with LE.

    ", + "DescribeEvaluationsInput$NE": "

    The not equal to operator. The Evaluation results will have FilterVariable values not equal to the value specified with NE.

    ", + "DescribeEvaluationsInput$Prefix": "

    A string that is found at the beginning of a variable, such as Name or Id.

    For example, an Evaluation could have the Name 2014-09-09-HolidayGiftMailer. To search for this Evaluation, select Name for the FilterVariable and any of the following strings for the Prefix:

    • 2014-09

    • 2014-09-09

    • 2014-09-09-Holiday

    ", + "DescribeMLModelsInput$EQ": "

    The equal to operator. The MLModel results will have FilterVariable values that exactly match the value specified with EQ.

    ", + "DescribeMLModelsInput$GT": "

    The greater than operator. The MLModel results will have FilterVariable values that are greater than the value specified with GT.

    ", + "DescribeMLModelsInput$LT": "

    The less than operator. The MLModel results will have FilterVariable values that are less than the value specified with LT.

    ", + "DescribeMLModelsInput$GE": "

    The greater than or equal to operator. The MLModel results will have FilterVariable values that are greater than or equal to the value specified with GE.

    ", + "DescribeMLModelsInput$LE": "

    The less than or equal to operator. The MLModel results will have FilterVariable values that are less than or equal to the value specified with LE.

    ", + "DescribeMLModelsInput$NE": "

    The not equal to operator. The MLModel results will have FilterVariable values not equal to the value specified with NE.

    ", + "DescribeMLModelsInput$Prefix": "

    A string that is found at the beginning of a variable, such as Name or Id.

    For example, an MLModel could have the Name 2014-09-09-HolidayGiftMailer. To search for this MLModel, select Name for the FilterVariable and any of the following strings for the Prefix:

    • 2014-09

    • 2014-09-09

    • 2014-09-09-Holiday

    " + } + }, + "ComputeStatistics": { + "base": null, + "refs": { + "CreateDataSourceFromRDSInput$ComputeStatistics": "

    The compute statistics for a DataSource. The statistics are generated from the observation data referenced by a DataSource. Amazon ML uses the statistics internally during MLModel training. This parameter must be set to true if the DataSource needs to be used for MLModel training.

    ", + "CreateDataSourceFromRedshiftInput$ComputeStatistics": "

    The compute statistics for a DataSource. The statistics are generated from the observation data referenced by a DataSource. Amazon ML uses the statistics internally during MLModel training. This parameter must be set to true if the DataSource needs to be used for MLModel training.

    ", + "CreateDataSourceFromS3Input$ComputeStatistics": "

    The compute statistics for a DataSource. The statistics are generated from the observation data referenced by a DataSource. Amazon ML uses the statistics internally during MLModel training. This parameter must be set to true if the DataSource needs to be used for MLModel training.

    ", + "DataSource$ComputeStatistics": "

    The parameter is true if statistics need to be generated from the observation data.

    ", + "GetDataSourceOutput$ComputeStatistics": "

    The parameter is true if statistics need to be generated from the observation data.

    " + } + }, + "CreateBatchPredictionInput": { + "base": null, + "refs": { + } + }, + "CreateBatchPredictionOutput": { + "base": "

    Represents the output of a CreateBatchPrediction operation, and is an acknowledgement that Amazon ML received the request.

    The CreateBatchPrediction operation is asynchronous. You can poll for status updates by using the >GetBatchPrediction operation and checking the Status parameter of the result.

    ", + "refs": { + } + }, + "CreateDataSourceFromRDSInput": { + "base": null, + "refs": { + } + }, + "CreateDataSourceFromRDSOutput": { + "base": "

    Represents the output of a CreateDataSourceFromRDS operation, and is an acknowledgement that Amazon ML received the request.

    The CreateDataSourceFromRDS> operation is asynchronous. You can poll for updates by using the GetBatchPrediction operation and checking the Status parameter. You can inspect the Message when Status shows up as FAILED. You can also check the progress of the copy operation by going to the DataPipeline console and looking up the pipeline using the pipelineId from the describe call.

    ", + "refs": { + } + }, + "CreateDataSourceFromRedshiftInput": { + "base": null, + "refs": { + } + }, + "CreateDataSourceFromRedshiftOutput": { + "base": "

    Represents the output of a CreateDataSourceFromRedshift operation, and is an acknowledgement that Amazon ML received the request.

    The CreateDataSourceFromRedshift operation is asynchronous. You can poll for updates by using the GetBatchPrediction operation and checking the Status parameter.

    ", + "refs": { + } + }, + "CreateDataSourceFromS3Input": { + "base": null, + "refs": { + } + }, + "CreateDataSourceFromS3Output": { + "base": "

    Represents the output of a CreateDataSourceFromS3 operation, and is an acknowledgement that Amazon ML received the request.

    The CreateDataSourceFromS3 operation is asynchronous. You can poll for updates by using the GetBatchPrediction operation and checking the Status parameter.

    ", + "refs": { + } + }, + "CreateEvaluationInput": { + "base": null, + "refs": { + } + }, + "CreateEvaluationOutput": { + "base": "

    Represents the output of a CreateEvaluation operation, and is an acknowledgement that Amazon ML received the request.

    CreateEvaluation operation is asynchronous. You can poll for status updates by using the GetEvcaluation operation and checking the Status parameter.

    ", + "refs": { + } + }, + "CreateMLModelInput": { + "base": null, + "refs": { + } + }, + "CreateMLModelOutput": { + "base": "

    Represents the output of a CreateMLModel operation, and is an acknowledgement that Amazon ML received the request.

    The CreateMLModel operation is asynchronous. You can poll for status updates by using the GetMLModel operation and checking the Status parameter.

    ", + "refs": { + } + }, + "CreateRealtimeEndpointInput": { + "base": null, + "refs": { + } + }, + "CreateRealtimeEndpointOutput": { + "base": "

    Represents the output of an CreateRealtimeEndpoint operation.

    The result contains the MLModelId and the endpoint information for the MLModel.

    The endpoint information includes the URI of the MLModel; that is, the location to send online prediction requests for the specified MLModel.

    ", + "refs": { + } + }, + "DataRearrangement": { + "base": null, + "refs": { + "DataSource$DataRearrangement": "

    A JSON string that represents the splitting and rearrangement requirement used when this DataSource was created.

    ", + "GetDataSourceOutput$DataRearrangement": "

    A JSON string that represents the splitting and rearrangement requirement used when this DataSource was created.

    ", + "RDSDataSpec$DataRearrangement": "

    A JSON string that represents the splitting and rearrangement processing to be applied to a DataSource. If the DataRearrangement parameter is not provided, all of the input data is used to create the Datasource.

    There are multiple parameters that control what data is used to create a datasource:

    • percentBegin

      Use percentBegin to indicate the beginning of the range of the data used to create the Datasource. If you do not include percentBegin and percentEnd, Amazon ML includes all of the data when creating the datasource.

    • percentEnd

      Use percentEnd to indicate the end of the range of the data used to create the Datasource. If you do not include percentBegin and percentEnd, Amazon ML includes all of the data when creating the datasource.

    • complement

      The complement parameter instructs Amazon ML to use the data that is not included in the range of percentBegin to percentEnd to create a datasource. The complement parameter is useful if you need to create complementary datasources for training and evaluation. To create a complementary datasource, use the same values for percentBegin and percentEnd, along with the complement parameter.

      For example, the following two datasources do not share any data, and can be used to train and evaluate a model. The first datasource has 25 percent of the data, and the second one has 75 percent of the data.

      Datasource for evaluation: {\"splitting\":{\"percentBegin\":0, \"percentEnd\":25}}

      Datasource for training: {\"splitting\":{\"percentBegin\":0, \"percentEnd\":25, \"complement\":\"true\"}}

    • strategy

      To change how Amazon ML splits the data for a datasource, use the strategy parameter.

      The default value for the strategy parameter is sequential, meaning that Amazon ML takes all of the data records between the percentBegin and percentEnd parameters for the datasource, in the order that the records appear in the input data.

      The following two DataRearrangement lines are examples of sequentially ordered training and evaluation datasources:

      Datasource for evaluation: {\"splitting\":{\"percentBegin\":70, \"percentEnd\":100, \"strategy\":\"sequential\"}}

      Datasource for training: {\"splitting\":{\"percentBegin\":70, \"percentEnd\":100, \"strategy\":\"sequential\", \"complement\":\"true\"}}

      To randomly split the input data into the proportions indicated by the percentBegin and percentEnd parameters, set the strategy parameter to random and provide a string that is used as the seed value for the random data splitting (for example, you can use the S3 path to your data as the random seed string). If you choose the random split strategy, Amazon ML assigns each row of data a pseudo-random number between 0 and 100, and then selects the rows that have an assigned number between percentBegin and percentEnd. Pseudo-random numbers are assigned using both the input seed string value and the byte offset as a seed, so changing the data results in a different split. Any existing ordering is preserved. The random splitting strategy ensures that variables in the training and evaluation data are distributed similarly. It is useful in the cases where the input data may have an implicit sort order, which would otherwise result in training and evaluation datasources containing non-similar data records.

      The following two DataRearrangement lines are examples of non-sequentially ordered training and evaluation datasources:

      Datasource for evaluation: {\"splitting\":{\"percentBegin\":70, \"percentEnd\":100, \"strategy\":\"random\", \"randomSeed\"=\"s3://my_s3_path/bucket/file.csv\"}}

      Datasource for training: {\"splitting\":{\"percentBegin\":70, \"percentEnd\":100, \"strategy\":\"random\", \"randomSeed\"=\"s3://my_s3_path/bucket/file.csv\", \"complement\":\"true\"}}

    ", + "RedshiftDataSpec$DataRearrangement": "

    A JSON string that represents the splitting and rearrangement processing to be applied to a DataSource. If the DataRearrangement parameter is not provided, all of the input data is used to create the Datasource.

    There are multiple parameters that control what data is used to create a datasource:

    • percentBegin

      Use percentBegin to indicate the beginning of the range of the data used to create the Datasource. If you do not include percentBegin and percentEnd, Amazon ML includes all of the data when creating the datasource.

    • percentEnd

      Use percentEnd to indicate the end of the range of the data used to create the Datasource. If you do not include percentBegin and percentEnd, Amazon ML includes all of the data when creating the datasource.

    • complement

      The complement parameter instructs Amazon ML to use the data that is not included in the range of percentBegin to percentEnd to create a datasource. The complement parameter is useful if you need to create complementary datasources for training and evaluation. To create a complementary datasource, use the same values for percentBegin and percentEnd, along with the complement parameter.

      For example, the following two datasources do not share any data, and can be used to train and evaluate a model. The first datasource has 25 percent of the data, and the second one has 75 percent of the data.

      Datasource for evaluation: {\"splitting\":{\"percentBegin\":0, \"percentEnd\":25}}

      Datasource for training: {\"splitting\":{\"percentBegin\":0, \"percentEnd\":25, \"complement\":\"true\"}}

    • strategy

      To change how Amazon ML splits the data for a datasource, use the strategy parameter.

      The default value for the strategy parameter is sequential, meaning that Amazon ML takes all of the data records between the percentBegin and percentEnd parameters for the datasource, in the order that the records appear in the input data.

      The following two DataRearrangement lines are examples of sequentially ordered training and evaluation datasources:

      Datasource for evaluation: {\"splitting\":{\"percentBegin\":70, \"percentEnd\":100, \"strategy\":\"sequential\"}}

      Datasource for training: {\"splitting\":{\"percentBegin\":70, \"percentEnd\":100, \"strategy\":\"sequential\", \"complement\":\"true\"}}

      To randomly split the input data into the proportions indicated by the percentBegin and percentEnd parameters, set the strategy parameter to random and provide a string that is used as the seed value for the random data splitting (for example, you can use the S3 path to your data as the random seed string). If you choose the random split strategy, Amazon ML assigns each row of data a pseudo-random number between 0 and 100, and then selects the rows that have an assigned number between percentBegin and percentEnd. Pseudo-random numbers are assigned using both the input seed string value and the byte offset as a seed, so changing the data results in a different split. Any existing ordering is preserved. The random splitting strategy ensures that variables in the training and evaluation data are distributed similarly. It is useful in the cases where the input data may have an implicit sort order, which would otherwise result in training and evaluation datasources containing non-similar data records.

      The following two DataRearrangement lines are examples of non-sequentially ordered training and evaluation datasources:

      Datasource for evaluation: {\"splitting\":{\"percentBegin\":70, \"percentEnd\":100, \"strategy\":\"random\", \"randomSeed\"=\"s3://my_s3_path/bucket/file.csv\"}}

      Datasource for training: {\"splitting\":{\"percentBegin\":70, \"percentEnd\":100, \"strategy\":\"random\", \"randomSeed\"=\"s3://my_s3_path/bucket/file.csv\", \"complement\":\"true\"}}

    ", + "S3DataSpec$DataRearrangement": "

    A JSON string that represents the splitting and rearrangement processing to be applied to a DataSource. If the DataRearrangement parameter is not provided, all of the input data is used to create the Datasource.

    There are multiple parameters that control what data is used to create a datasource:

    • percentBegin

      Use percentBegin to indicate the beginning of the range of the data used to create the Datasource. If you do not include percentBegin and percentEnd, Amazon ML includes all of the data when creating the datasource.

    • percentEnd

      Use percentEnd to indicate the end of the range of the data used to create the Datasource. If you do not include percentBegin and percentEnd, Amazon ML includes all of the data when creating the datasource.

    • complement

      The complement parameter instructs Amazon ML to use the data that is not included in the range of percentBegin to percentEnd to create a datasource. The complement parameter is useful if you need to create complementary datasources for training and evaluation. To create a complementary datasource, use the same values for percentBegin and percentEnd, along with the complement parameter.

      For example, the following two datasources do not share any data, and can be used to train and evaluate a model. The first datasource has 25 percent of the data, and the second one has 75 percent of the data.

      Datasource for evaluation: {\"splitting\":{\"percentBegin\":0, \"percentEnd\":25}}

      Datasource for training: {\"splitting\":{\"percentBegin\":0, \"percentEnd\":25, \"complement\":\"true\"}}

    • strategy

      To change how Amazon ML splits the data for a datasource, use the strategy parameter.

      The default value for the strategy parameter is sequential, meaning that Amazon ML takes all of the data records between the percentBegin and percentEnd parameters for the datasource, in the order that the records appear in the input data.

      The following two DataRearrangement lines are examples of sequentially ordered training and evaluation datasources:

      Datasource for evaluation: {\"splitting\":{\"percentBegin\":70, \"percentEnd\":100, \"strategy\":\"sequential\"}}

      Datasource for training: {\"splitting\":{\"percentBegin\":70, \"percentEnd\":100, \"strategy\":\"sequential\", \"complement\":\"true\"}}

      To randomly split the input data into the proportions indicated by the percentBegin and percentEnd parameters, set the strategy parameter to random and provide a string that is used as the seed value for the random data splitting (for example, you can use the S3 path to your data as the random seed string). If you choose the random split strategy, Amazon ML assigns each row of data a pseudo-random number between 0 and 100, and then selects the rows that have an assigned number between percentBegin and percentEnd. Pseudo-random numbers are assigned using both the input seed string value and the byte offset as a seed, so changing the data results in a different split. Any existing ordering is preserved. The random splitting strategy ensures that variables in the training and evaluation data are distributed similarly. It is useful in the cases where the input data may have an implicit sort order, which would otherwise result in training and evaluation datasources containing non-similar data records.

      The following two DataRearrangement lines are examples of non-sequentially ordered training and evaluation datasources:

      Datasource for evaluation: {\"splitting\":{\"percentBegin\":70, \"percentEnd\":100, \"strategy\":\"random\", \"randomSeed\"=\"s3://my_s3_path/bucket/file.csv\"}}

      Datasource for training: {\"splitting\":{\"percentBegin\":70, \"percentEnd\":100, \"strategy\":\"random\", \"randomSeed\"=\"s3://my_s3_path/bucket/file.csv\", \"complement\":\"true\"}}

    " + } + }, + "DataSchema": { + "base": "

    The schema of a DataSource. The DataSchema defines the structure of the observation data in the data file(s) referenced in the DataSource. The DataSource schema is expressed in JSON format.

    DataSchema is not required if you specify a DataSchemaUri

    { \"version\": \"1.0\", \"recordAnnotationFieldName\": \"F1\", \"recordWeightFieldName\": \"F2\", \"targetFieldName\": \"F3\", \"dataFormat\": \"CSV\", \"dataFileContainsHeader\": true, \"variables\": [ { \"fieldName\": \"F1\", \"fieldType\": \"TEXT\" }, { \"fieldName\": \"F2\", \"fieldType\": \"NUMERIC\" }, { \"fieldName\": \"F3\", \"fieldType\": \"CATEGORICAL\" }, { \"fieldName\": \"F4\", \"fieldType\": \"NUMERIC\" }, { \"fieldName\": \"F5\", \"fieldType\": \"CATEGORICAL\" }, { \"fieldName\": \"F6\", \"fieldType\": \"TEXT\" }, { \"fieldName\": \"F7\", \"fieldType\": \"WEIGHTED_INT_SEQUENCE\" }, { \"fieldName\": \"F8\", \"fieldType\": \"WEIGHTED_STRING_SEQUENCE\" } ], \"excludedVariableNames\": [ \"F6\" ] }

    ", + "refs": { + "GetDataSourceOutput$DataSourceSchema": "

    The schema used by all of the data files of this DataSource.

    Note

    This parameter is provided as part of the verbose format.

    ", + "GetMLModelOutput$Schema": "

    The schema used by all of the data files referenced by the DataSource.

    Note

    This parameter is provided as part of the verbose format.

    ", + "RDSDataSpec$DataSchema": "

    A JSON string that represents the schema for an Amazon RDS DataSource. The DataSchema defines the structure of the observation data in the data file(s) referenced in the DataSource.

    A DataSchema is not required if you specify a DataSchemaUri

    Define your DataSchema as a series of key-value pairs. attributes and excludedVariableNames have an array of key-value pairs for their value. Use the following format to define your DataSchema.

    { \"version\": \"1.0\",

    \"recordAnnotationFieldName\": \"F1\",

    \"recordWeightFieldName\": \"F2\",

    \"targetFieldName\": \"F3\",

    \"dataFormat\": \"CSV\",

    \"dataFileContainsHeader\": true,

    \"attributes\": [

    { \"fieldName\": \"F1\", \"fieldType\": \"TEXT\" }, { \"fieldName\": \"F2\", \"fieldType\": \"NUMERIC\" }, { \"fieldName\": \"F3\", \"fieldType\": \"CATEGORICAL\" }, { \"fieldName\": \"F4\", \"fieldType\": \"NUMERIC\" }, { \"fieldName\": \"F5\", \"fieldType\": \"CATEGORICAL\" }, { \"fieldName\": \"F6\", \"fieldType\": \"TEXT\" }, { \"fieldName\": \"F7\", \"fieldType\": \"WEIGHTED_INT_SEQUENCE\" }, { \"fieldName\": \"F8\", \"fieldType\": \"WEIGHTED_STRING_SEQUENCE\" } ],

    \"excludedVariableNames\": [ \"F6\" ] }

    ", + "RedshiftDataSpec$DataSchema": "

    A JSON string that represents the schema for an Amazon Redshift DataSource. The DataSchema defines the structure of the observation data in the data file(s) referenced in the DataSource.

    A DataSchema is not required if you specify a DataSchemaUri.

    Define your DataSchema as a series of key-value pairs. attributes and excludedVariableNames have an array of key-value pairs for their value. Use the following format to define your DataSchema.

    { \"version\": \"1.0\",

    \"recordAnnotationFieldName\": \"F1\",

    \"recordWeightFieldName\": \"F2\",

    \"targetFieldName\": \"F3\",

    \"dataFormat\": \"CSV\",

    \"dataFileContainsHeader\": true,

    \"attributes\": [

    { \"fieldName\": \"F1\", \"fieldType\": \"TEXT\" }, { \"fieldName\": \"F2\", \"fieldType\": \"NUMERIC\" }, { \"fieldName\": \"F3\", \"fieldType\": \"CATEGORICAL\" }, { \"fieldName\": \"F4\", \"fieldType\": \"NUMERIC\" }, { \"fieldName\": \"F5\", \"fieldType\": \"CATEGORICAL\" }, { \"fieldName\": \"F6\", \"fieldType\": \"TEXT\" }, { \"fieldName\": \"F7\", \"fieldType\": \"WEIGHTED_INT_SEQUENCE\" }, { \"fieldName\": \"F8\", \"fieldType\": \"WEIGHTED_STRING_SEQUENCE\" } ],

    \"excludedVariableNames\": [ \"F6\" ] }

    ", + "S3DataSpec$DataSchema": "

    A JSON string that represents the schema for an Amazon S3 DataSource. The DataSchema defines the structure of the observation data in the data file(s) referenced in the DataSource.

    You must provide either the DataSchema or the DataSchemaLocationS3.

    Define your DataSchema as a series of key-value pairs. attributes and excludedVariableNames have an array of key-value pairs for their value. Use the following format to define your DataSchema.

    { \"version\": \"1.0\",

    \"recordAnnotationFieldName\": \"F1\",

    \"recordWeightFieldName\": \"F2\",

    \"targetFieldName\": \"F3\",

    \"dataFormat\": \"CSV\",

    \"dataFileContainsHeader\": true,

    \"attributes\": [

    { \"fieldName\": \"F1\", \"fieldType\": \"TEXT\" }, { \"fieldName\": \"F2\", \"fieldType\": \"NUMERIC\" }, { \"fieldName\": \"F3\", \"fieldType\": \"CATEGORICAL\" }, { \"fieldName\": \"F4\", \"fieldType\": \"NUMERIC\" }, { \"fieldName\": \"F5\", \"fieldType\": \"CATEGORICAL\" }, { \"fieldName\": \"F6\", \"fieldType\": \"TEXT\" }, { \"fieldName\": \"F7\", \"fieldType\": \"WEIGHTED_INT_SEQUENCE\" }, { \"fieldName\": \"F8\", \"fieldType\": \"WEIGHTED_STRING_SEQUENCE\" } ],

    \"excludedVariableNames\": [ \"F6\" ] }

    " + } + }, + "DataSource": { + "base": "

    Represents the output of the GetDataSource operation.

    The content consists of the detailed metadata and data file information and the current status of the DataSource.

    ", + "refs": { + "DataSources$member": null + } + }, + "DataSourceFilterVariable": { + "base": "

    A list of the variables to use in searching or filtering DataSource.

    • CreatedAt - Sets the search criteria to DataSource creation date.
    • Status - Sets the search criteria to DataSource status.
    • Name - Sets the search criteria to the contents of DataSource Name.
    • DataUri - Sets the search criteria to the URI of data files used to create the DataSource. The URI can identify either a file or an Amazon Simple Storage Service (Amazon S3) bucket or directory.
    • IAMUser - Sets the search criteria to the user account that invoked the DataSource creation.
    Note

    The variable names should match the variable names in the DataSource.

    ", + "refs": { + "DescribeDataSourcesInput$FilterVariable": "

    Use one of the following variables to filter a list of DataSource:

    • CreatedAt - Sets the search criteria to DataSource creation dates.
    • Status - Sets the search criteria to DataSource statuses.
    • Name - Sets the search criteria to the contents of DataSource Name.
    • DataUri - Sets the search criteria to the URI of data files used to create the DataSource. The URI can identify either a file or an Amazon Simple Storage Service (Amazon S3) bucket or directory.
    • IAMUser - Sets the search criteria to the user account that invoked the DataSource creation.
    " + } + }, + "DataSources": { + "base": null, + "refs": { + "DescribeDataSourcesOutput$Results": "

    A list of DataSource that meet the search criteria.

    " + } + }, + "DeleteBatchPredictionInput": { + "base": null, + "refs": { + } + }, + "DeleteBatchPredictionOutput": { + "base": "

    Represents the output of a DeleteBatchPrediction operation.

    You can use the GetBatchPrediction operation and check the value of the Status parameter to see whether a BatchPrediction is marked as DELETED.

    ", + "refs": { + } + }, + "DeleteDataSourceInput": { + "base": null, + "refs": { + } + }, + "DeleteDataSourceOutput": { + "base": "

    Represents the output of a DeleteDataSource operation.

    ", + "refs": { + } + }, + "DeleteEvaluationInput": { + "base": null, + "refs": { + } + }, + "DeleteEvaluationOutput": { + "base": "

    Represents the output of a DeleteEvaluation operation. The output indicates that Amazon Machine Learning (Amazon ML) received the request.

    You can use the GetEvaluation operation and check the value of the Status parameter to see whether an Evaluation is marked as DELETED.

    ", + "refs": { + } + }, + "DeleteMLModelInput": { + "base": null, + "refs": { + } + }, + "DeleteMLModelOutput": { + "base": "

    Represents the output of a DeleteMLModel operation.

    You can use the GetMLModel operation and check the value of the Status parameter to see whether an MLModel is marked as DELETED.

    ", + "refs": { + } + }, + "DeleteRealtimeEndpointInput": { + "base": null, + "refs": { + } + }, + "DeleteRealtimeEndpointOutput": { + "base": "

    Represents the output of an DeleteRealtimeEndpoint operation.

    The result contains the MLModelId and the endpoint information for the MLModel.

    ", + "refs": { + } + }, + "DeleteTagsInput": { + "base": null, + "refs": { + } + }, + "DeleteTagsOutput": { + "base": "

    Amazon ML returns the following elements.

    ", + "refs": { + } + }, + "DescribeBatchPredictionsInput": { + "base": null, + "refs": { + } + }, + "DescribeBatchPredictionsOutput": { + "base": "

    Represents the output of a DescribeBatchPredictions operation. The content is essentially a list of BatchPredictions.

    ", + "refs": { + } + }, + "DescribeDataSourcesInput": { + "base": null, + "refs": { + } + }, + "DescribeDataSourcesOutput": { + "base": "

    Represents the query results from a DescribeDataSources operation. The content is essentially a list of DataSource.

    ", + "refs": { + } + }, + "DescribeEvaluationsInput": { + "base": null, + "refs": { + } + }, + "DescribeEvaluationsOutput": { + "base": "

    Represents the query results from a DescribeEvaluations operation. The content is essentially a list of Evaluation.

    ", + "refs": { + } + }, + "DescribeMLModelsInput": { + "base": null, + "refs": { + } + }, + "DescribeMLModelsOutput": { + "base": "

    Represents the output of a DescribeMLModels operation. The content is essentially a list of MLModel.

    ", + "refs": { + } + }, + "DescribeTagsInput": { + "base": null, + "refs": { + } + }, + "DescribeTagsOutput": { + "base": "

    Amazon ML returns the following elements.

    ", + "refs": { + } + }, + "DetailsAttributes": { + "base": "Contains the key values of DetailsMap: PredictiveModelType - Indicates the type of the MLModel. Algorithm - Indicates the algorithm that was used for the MLModel.", + "refs": { + "DetailsMap$key": null + } + }, + "DetailsMap": { + "base": "Provides any additional details regarding the prediction.", + "refs": { + "Prediction$details": null + } + }, + "DetailsValue": { + "base": null, + "refs": { + "DetailsMap$value": null + } + }, + "EDPPipelineId": { + "base": null, + "refs": { + "RDSMetadata$DataPipelineId": "

    The ID of the Data Pipeline instance that is used to carry to copy data from Amazon RDS to Amazon S3. You can use the ID to find details about the instance in the Data Pipeline console.

    " + } + }, + "EDPResourceRole": { + "base": null, + "refs": { + "RDSDataSpec$ResourceRole": "

    The role (DataPipelineDefaultResourceRole) assumed by an Amazon Elastic Compute Cloud (Amazon EC2) instance to carry out the copy operation from Amazon RDS to an Amazon S3 task. For more information, see Role templates for data pipelines.

    ", + "RDSMetadata$ResourceRole": "

    The role (DataPipelineDefaultResourceRole) assumed by an Amazon EC2 instance to carry out the copy task from Amazon RDS to Amazon S3. For more information, see Role templates for data pipelines.

    " + } + }, + "EDPSecurityGroupId": { + "base": null, + "refs": { + "EDPSecurityGroupIds$member": null + } + }, + "EDPSecurityGroupIds": { + "base": null, + "refs": { + "RDSDataSpec$SecurityGroupIds": "

    The security group IDs to be used to access a VPC-based RDS DB instance. Ensure that there are appropriate ingress rules set up to allow access to the RDS DB instance. This attribute is used by Data Pipeline to carry out the copy operation from Amazon RDS to an Amazon S3 task.

    " + } + }, + "EDPServiceRole": { + "base": null, + "refs": { + "RDSDataSpec$ServiceRole": "

    The role (DataPipelineDefaultRole) assumed by AWS Data Pipeline service to monitor the progress of the copy task from Amazon RDS to Amazon S3. For more information, see Role templates for data pipelines.

    ", + "RDSMetadata$ServiceRole": "

    The role (DataPipelineDefaultRole) assumed by the Data Pipeline service to monitor the progress of the copy task from Amazon RDS to Amazon S3. For more information, see Role templates for data pipelines.

    " + } + }, + "EDPSubnetId": { + "base": null, + "refs": { + "RDSDataSpec$SubnetId": "

    The subnet ID to be used to access a VPC-based RDS DB instance. This attribute is used by Data Pipeline to carry out the copy task from Amazon RDS to Amazon S3.

    " + } + }, + "EntityId": { + "base": null, + "refs": { + "AddTagsInput$ResourceId": "

    The ID of the ML object to tag. For example, exampleModelId.

    ", + "AddTagsOutput$ResourceId": "

    The ID of the ML object that was tagged.

    ", + "BatchPrediction$BatchPredictionId": "

    The ID assigned to the BatchPrediction at creation. This value should be identical to the value of the BatchPredictionID in the request.

    ", + "BatchPrediction$MLModelId": "

    The ID of the MLModel that generated predictions for the BatchPrediction request.

    ", + "BatchPrediction$BatchPredictionDataSourceId": "

    The ID of the DataSource that points to the group of observations to predict.

    ", + "CreateBatchPredictionInput$BatchPredictionId": "

    A user-supplied ID that uniquely identifies the BatchPrediction.

    ", + "CreateBatchPredictionInput$MLModelId": "

    The ID of the MLModel that will generate predictions for the group of observations.

    ", + "CreateBatchPredictionInput$BatchPredictionDataSourceId": "

    The ID of the DataSource that points to the group of observations to predict.

    ", + "CreateBatchPredictionOutput$BatchPredictionId": "

    A user-supplied ID that uniquely identifies the BatchPrediction. This value is identical to the value of the BatchPredictionId in the request.

    ", + "CreateDataSourceFromRDSInput$DataSourceId": "

    A user-supplied ID that uniquely identifies the DataSource. Typically, an Amazon Resource Number (ARN) becomes the ID for a DataSource.

    ", + "CreateDataSourceFromRDSOutput$DataSourceId": "

    A user-supplied ID that uniquely identifies the datasource. This value should be identical to the value of the DataSourceID in the request.

    ", + "CreateDataSourceFromRedshiftInput$DataSourceId": "

    A user-supplied ID that uniquely identifies the DataSource.

    ", + "CreateDataSourceFromRedshiftOutput$DataSourceId": "

    A user-supplied ID that uniquely identifies the datasource. This value should be identical to the value of the DataSourceID in the request.

    ", + "CreateDataSourceFromS3Input$DataSourceId": "

    A user-supplied identifier that uniquely identifies the DataSource.

    ", + "CreateDataSourceFromS3Output$DataSourceId": "

    A user-supplied ID that uniquely identifies the DataSource. This value should be identical to the value of the DataSourceID in the request.

    ", + "CreateEvaluationInput$EvaluationId": "

    A user-supplied ID that uniquely identifies the Evaluation.

    ", + "CreateEvaluationInput$MLModelId": "

    The ID of the MLModel to evaluate.

    The schema used in creating the MLModel must match the schema of the DataSource used in the Evaluation.

    ", + "CreateEvaluationInput$EvaluationDataSourceId": "

    The ID of the DataSource for the evaluation. The schema of the DataSource must match the schema used to create the MLModel.

    ", + "CreateEvaluationOutput$EvaluationId": "

    The user-supplied ID that uniquely identifies the Evaluation. This value should be identical to the value of the EvaluationId in the request.

    ", + "CreateMLModelInput$MLModelId": "

    A user-supplied ID that uniquely identifies the MLModel.

    ", + "CreateMLModelInput$TrainingDataSourceId": "

    The DataSource that points to the training data.

    ", + "CreateMLModelOutput$MLModelId": "

    A user-supplied ID that uniquely identifies the MLModel. This value should be identical to the value of the MLModelId in the request.

    ", + "CreateRealtimeEndpointInput$MLModelId": "

    The ID assigned to the MLModel during creation.

    ", + "CreateRealtimeEndpointOutput$MLModelId": "

    A user-supplied ID that uniquely identifies the MLModel. This value should be identical to the value of the MLModelId in the request.

    ", + "DataSource$DataSourceId": "

    The ID that is assigned to the DataSource during creation.

    ", + "DeleteBatchPredictionInput$BatchPredictionId": "

    A user-supplied ID that uniquely identifies the BatchPrediction.

    ", + "DeleteBatchPredictionOutput$BatchPredictionId": "

    A user-supplied ID that uniquely identifies the BatchPrediction. This value should be identical to the value of the BatchPredictionID in the request.

    ", + "DeleteDataSourceInput$DataSourceId": "

    A user-supplied ID that uniquely identifies the DataSource.

    ", + "DeleteDataSourceOutput$DataSourceId": "

    A user-supplied ID that uniquely identifies the DataSource. This value should be identical to the value of the DataSourceID in the request.

    ", + "DeleteEvaluationInput$EvaluationId": "

    A user-supplied ID that uniquely identifies the Evaluation to delete.

    ", + "DeleteEvaluationOutput$EvaluationId": "

    A user-supplied ID that uniquely identifies the Evaluation. This value should be identical to the value of the EvaluationId in the request.

    ", + "DeleteMLModelInput$MLModelId": "

    A user-supplied ID that uniquely identifies the MLModel.

    ", + "DeleteMLModelOutput$MLModelId": "

    A user-supplied ID that uniquely identifies the MLModel. This value should be identical to the value of the MLModelID in the request.

    ", + "DeleteRealtimeEndpointInput$MLModelId": "

    The ID assigned to the MLModel during creation.

    ", + "DeleteRealtimeEndpointOutput$MLModelId": "

    A user-supplied ID that uniquely identifies the MLModel. This value should be identical to the value of the MLModelId in the request.

    ", + "DeleteTagsInput$ResourceId": "

    The ID of the tagged ML object. For example, exampleModelId.

    ", + "DeleteTagsOutput$ResourceId": "

    The ID of the ML object from which tags were deleted.

    ", + "DescribeTagsInput$ResourceId": "

    The ID of the ML object. For example, exampleModelId.

    ", + "DescribeTagsOutput$ResourceId": "

    The ID of the tagged ML object.

    ", + "Evaluation$EvaluationId": "

    The ID that is assigned to the Evaluation at creation.

    ", + "Evaluation$MLModelId": "

    The ID of the MLModel that is the focus of the evaluation.

    ", + "Evaluation$EvaluationDataSourceId": "

    The ID of the DataSource that is used to evaluate the MLModel.

    ", + "GetBatchPredictionInput$BatchPredictionId": "

    An ID assigned to the BatchPrediction at creation.

    ", + "GetBatchPredictionOutput$BatchPredictionId": "

    An ID assigned to the BatchPrediction at creation. This value should be identical to the value of the BatchPredictionID in the request.

    ", + "GetBatchPredictionOutput$MLModelId": "

    The ID of the MLModel that generated predictions for the BatchPrediction request.

    ", + "GetBatchPredictionOutput$BatchPredictionDataSourceId": "

    The ID of the DataSource that was used to create the BatchPrediction.

    ", + "GetDataSourceInput$DataSourceId": "

    The ID assigned to the DataSource at creation.

    ", + "GetDataSourceOutput$DataSourceId": "

    The ID assigned to the DataSource at creation. This value should be identical to the value of the DataSourceId in the request.

    ", + "GetEvaluationInput$EvaluationId": "

    The ID of the Evaluation to retrieve. The evaluation of each MLModel is recorded and cataloged. The ID provides the means to access the information.

    ", + "GetEvaluationOutput$EvaluationId": "

    The evaluation ID which is same as the EvaluationId in the request.

    ", + "GetEvaluationOutput$MLModelId": "

    The ID of the MLModel that was the focus of the evaluation.

    ", + "GetEvaluationOutput$EvaluationDataSourceId": "

    The DataSource used for this evaluation.

    ", + "GetMLModelInput$MLModelId": "

    The ID assigned to the MLModel at creation.

    ", + "GetMLModelOutput$MLModelId": "

    The MLModel ID, which is same as the MLModelId in the request.

    ", + "GetMLModelOutput$TrainingDataSourceId": "

    The ID of the training DataSource.

    ", + "MLModel$MLModelId": "

    The ID assigned to the MLModel at creation.

    ", + "MLModel$TrainingDataSourceId": "

    The ID of the training DataSource. The CreateMLModel operation uses the TrainingDataSourceId.

    ", + "PredictInput$MLModelId": "

    A unique identifier of the MLModel.

    ", + "UpdateBatchPredictionInput$BatchPredictionId": "

    The ID assigned to the BatchPrediction during creation.

    ", + "UpdateBatchPredictionOutput$BatchPredictionId": "

    The ID assigned to the BatchPrediction during creation. This value should be identical to the value of the BatchPredictionId in the request.

    ", + "UpdateDataSourceInput$DataSourceId": "

    The ID assigned to the DataSource during creation.

    ", + "UpdateDataSourceOutput$DataSourceId": "

    The ID assigned to the DataSource during creation. This value should be identical to the value of the DataSourceID in the request.

    ", + "UpdateEvaluationInput$EvaluationId": "

    The ID assigned to the Evaluation during creation.

    ", + "UpdateEvaluationOutput$EvaluationId": "

    The ID assigned to the Evaluation during creation. This value should be identical to the value of the Evaluation in the request.

    ", + "UpdateMLModelInput$MLModelId": "

    The ID assigned to the MLModel during creation.

    ", + "UpdateMLModelOutput$MLModelId": "

    The ID assigned to the MLModel during creation. This value should be identical to the value of the MLModelID in the request.

    " + } + }, + "EntityName": { + "base": "

    A user-supplied name or description of the Amazon ML resource.

    ", + "refs": { + "BatchPrediction$Name": "

    A user-supplied name or description of the BatchPrediction.

    ", + "CreateBatchPredictionInput$BatchPredictionName": "

    A user-supplied name or description of the BatchPrediction. BatchPredictionName can only use the UTF-8 character set.

    ", + "CreateDataSourceFromRDSInput$DataSourceName": "

    A user-supplied name or description of the DataSource.

    ", + "CreateDataSourceFromRedshiftInput$DataSourceName": "

    A user-supplied name or description of the DataSource.

    ", + "CreateDataSourceFromS3Input$DataSourceName": "

    A user-supplied name or description of the DataSource.

    ", + "CreateEvaluationInput$EvaluationName": "

    A user-supplied name or description of the Evaluation.

    ", + "CreateMLModelInput$MLModelName": "

    A user-supplied name or description of the MLModel.

    ", + "DataSource$Name": "

    A user-supplied name or description of the DataSource.

    ", + "Evaluation$Name": "

    A user-supplied name or description of the Evaluation.

    ", + "GetBatchPredictionOutput$Name": "

    A user-supplied name or description of the BatchPrediction.

    ", + "GetDataSourceOutput$Name": "

    A user-supplied name or description of the DataSource.

    ", + "GetEvaluationOutput$Name": "

    A user-supplied name or description of the Evaluation.

    ", + "UpdateBatchPredictionInput$BatchPredictionName": "

    A new user-supplied name or description of the BatchPrediction.

    ", + "UpdateDataSourceInput$DataSourceName": "

    A new user-supplied name or description of the DataSource that will replace the current description.

    ", + "UpdateEvaluationInput$EvaluationName": "

    A new user-supplied name or description of the Evaluation that will replace the current content.

    ", + "UpdateMLModelInput$MLModelName": "

    A user-supplied name or description of the MLModel.

    " + } + }, + "EntityStatus": { + "base": "

    Object status with the following possible values:

    • PENDING
    • INPROGRESS
    • FAILED
    • COMPLETED
    • DELETED
    ", + "refs": { + "BatchPrediction$Status": "

    The status of the BatchPrediction. This element can have one of the following values:

    • PENDING - Amazon Machine Learning (Amazon ML) submitted a request to generate predictions for a batch of observations.
    • INPROGRESS - The process is underway.
    • FAILED - The request to perform a batch prediction did not run to completion. It is not usable.
    • COMPLETED - The batch prediction process completed successfully.
    • DELETED - The BatchPrediction is marked as deleted. It is not usable.
    ", + "DataSource$Status": "

    The current status of the DataSource. This element can have one of the following values:

    • PENDING - Amazon Machine Learning (Amazon ML) submitted a request to create a DataSource.
    • INPROGRESS - The creation process is underway.
    • FAILED - The request to create a DataSource did not run to completion. It is not usable.
    • COMPLETED - The creation process completed successfully.
    • DELETED - The DataSource is marked as deleted. It is not usable.
    ", + "Evaluation$Status": "

    The status of the evaluation. This element can have one of the following values:

    • PENDING - Amazon Machine Learning (Amazon ML) submitted a request to evaluate an MLModel.
    • INPROGRESS - The evaluation is underway.
    • FAILED - The request to evaluate an MLModel did not run to completion. It is not usable.
    • COMPLETED - The evaluation process completed successfully.
    • DELETED - The Evaluation is marked as deleted. It is not usable.
    ", + "GetBatchPredictionOutput$Status": "

    The status of the BatchPrediction, which can be one of the following values:

    • PENDING - Amazon Machine Learning (Amazon ML) submitted a request to generate batch predictions.
    • INPROGRESS - The batch predictions are in progress.
    • FAILED - The request to perform a batch prediction did not run to completion. It is not usable.
    • COMPLETED - The batch prediction process completed successfully.
    • DELETED - The BatchPrediction is marked as deleted. It is not usable.
    ", + "GetDataSourceOutput$Status": "

    The current status of the DataSource. This element can have one of the following values:

    • PENDING - Amazon ML submitted a request to create a DataSource.
    • INPROGRESS - The creation process is underway.
    • FAILED - The request to create a DataSource did not run to completion. It is not usable.
    • COMPLETED - The creation process completed successfully.
    • DELETED - The DataSource is marked as deleted. It is not usable.
    ", + "GetEvaluationOutput$Status": "

    The status of the evaluation. This element can have one of the following values:

    • PENDING - Amazon Machine Language (Amazon ML) submitted a request to evaluate an MLModel.
    • INPROGRESS - The evaluation is underway.
    • FAILED - The request to evaluate an MLModel did not run to completion. It is not usable.
    • COMPLETED - The evaluation process completed successfully.
    • DELETED - The Evaluation is marked as deleted. It is not usable.
    ", + "GetMLModelOutput$Status": "

    The current status of the MLModel. This element can have one of the following values:

    • PENDING - Amazon Machine Learning (Amazon ML) submitted a request to describe a MLModel.
    • INPROGRESS - The request is processing.
    • FAILED - The request did not run to completion. The ML model isn't usable.
    • COMPLETED - The request completed successfully.
    • DELETED - The MLModel is marked as deleted. It isn't usable.
    ", + "MLModel$Status": "

    The current status of an MLModel. This element can have one of the following values:

    • PENDING - Amazon Machine Learning (Amazon ML) submitted a request to create an MLModel.
    • INPROGRESS - The creation process is underway.
    • FAILED - The request to create an MLModel didn't run to completion. The model isn't usable.
    • COMPLETED - The creation process completed successfully.
    • DELETED - The MLModel is marked as deleted. It isn't usable.
    " + } + }, + "EpochTime": { + "base": "

    A timestamp represented in epoch time.

    ", + "refs": { + "BatchPrediction$CreatedAt": "

    The time that the BatchPrediction was created. The time is expressed in epoch time.

    ", + "BatchPrediction$LastUpdatedAt": "

    The time of the most recent edit to the BatchPrediction. The time is expressed in epoch time.

    ", + "DataSource$CreatedAt": "

    The time that the DataSource was created. The time is expressed in epoch time.

    ", + "DataSource$LastUpdatedAt": "

    The time of the most recent edit to the BatchPrediction. The time is expressed in epoch time.

    ", + "Evaluation$CreatedAt": "

    The time that the Evaluation was created. The time is expressed in epoch time.

    ", + "Evaluation$LastUpdatedAt": "

    The time of the most recent edit to the Evaluation. The time is expressed in epoch time.

    ", + "GetBatchPredictionOutput$CreatedAt": "

    The time when the BatchPrediction was created. The time is expressed in epoch time.

    ", + "GetBatchPredictionOutput$LastUpdatedAt": "

    The time of the most recent edit to BatchPrediction. The time is expressed in epoch time.

    ", + "GetDataSourceOutput$CreatedAt": "

    The time that the DataSource was created. The time is expressed in epoch time.

    ", + "GetDataSourceOutput$LastUpdatedAt": "

    The time of the most recent edit to the DataSource. The time is expressed in epoch time.

    ", + "GetEvaluationOutput$CreatedAt": "

    The time that the Evaluation was created. The time is expressed in epoch time.

    ", + "GetEvaluationOutput$LastUpdatedAt": "

    The time of the most recent edit to the BatchPrediction. The time is expressed in epoch time.

    ", + "GetMLModelOutput$CreatedAt": "

    The time that the MLModel was created. The time is expressed in epoch time.

    ", + "GetMLModelOutput$LastUpdatedAt": "

    The time of the most recent edit to the MLModel. The time is expressed in epoch time.

    ", + "GetMLModelOutput$ScoreThresholdLastUpdatedAt": "

    The time of the most recent edit to the ScoreThreshold. The time is expressed in epoch time.

    ", + "MLModel$CreatedAt": "

    The time that the MLModel was created. The time is expressed in epoch time.

    ", + "MLModel$LastUpdatedAt": "

    The time of the most recent edit to the MLModel. The time is expressed in epoch time.

    ", + "MLModel$ScoreThresholdLastUpdatedAt": "

    The time of the most recent edit to the ScoreThreshold. The time is expressed in epoch time.

    ", + "RealtimeEndpointInfo$CreatedAt": "

    The time that the request to create the real-time endpoint for the MLModel was received. The time is expressed in epoch time.

    " + } + }, + "ErrorCode": { + "base": null, + "refs": { + "IdempotentParameterMismatchException$code": null, + "InternalServerException$code": null, + "InvalidInputException$code": null, + "LimitExceededException$code": null, + "ResourceNotFoundException$code": null + } + }, + "ErrorMessage": { + "base": null, + "refs": { + "IdempotentParameterMismatchException$message": null, + "InternalServerException$message": null, + "InvalidInputException$message": null, + "InvalidTagException$message": null, + "LimitExceededException$message": null, + "PredictorNotMountedException$message": null, + "ResourceNotFoundException$message": null, + "TagLimitExceededException$message": null + } + }, + "Evaluation": { + "base": "

    Represents the output of GetEvaluation operation.

    The content consists of the detailed metadata and data file information and the current status of the Evaluation.

    ", + "refs": { + "Evaluations$member": null + } + }, + "EvaluationFilterVariable": { + "base": "

    A list of the variables to use in searching or filtering Evaluation.

    • CreatedAt - Sets the search criteria to Evaluation creation date.
    • Status - Sets the search criteria to Evaluation status.
    • Name - Sets the search criteria to the contents of Evaluation Name.
    • IAMUser - Sets the search criteria to the user account that invoked an evaluation.
    • MLModelId - Sets the search criteria to the Predictor that was evaluated.
    • DataSourceId - Sets the search criteria to the DataSource used in evaluation.
    • DataUri - Sets the search criteria to the data file(s) used in evaluation. The URL can identify either a file or an Amazon Simple Storage Service (Amazon S3) bucket or directory.
    ", + "refs": { + "DescribeEvaluationsInput$FilterVariable": "

    Use one of the following variable to filter a list of Evaluation objects:

    • CreatedAt - Sets the search criteria to the Evaluation creation date.
    • Status - Sets the search criteria to the Evaluation status.
    • Name - Sets the search criteria to the contents of Evaluation Name.
    • IAMUser - Sets the search criteria to the user account that invoked an Evaluation.
    • MLModelId - Sets the search criteria to the MLModel that was evaluated.
    • DataSourceId - Sets the search criteria to the DataSource used in Evaluation.
    • DataUri - Sets the search criteria to the data file(s) used in Evaluation. The URL can identify either a file or an Amazon Simple Storage Solution (Amazon S3) bucket or directory.
    " + } + }, + "Evaluations": { + "base": null, + "refs": { + "DescribeEvaluationsOutput$Results": "

    A list of Evaluation that meet the search criteria.

    " + } + }, + "GetBatchPredictionInput": { + "base": null, + "refs": { + } + }, + "GetBatchPredictionOutput": { + "base": "

    Represents the output of a GetBatchPrediction operation and describes a BatchPrediction.

    ", + "refs": { + } + }, + "GetDataSourceInput": { + "base": null, + "refs": { + } + }, + "GetDataSourceOutput": { + "base": "

    Represents the output of a GetDataSource operation and describes a DataSource.

    ", + "refs": { + } + }, + "GetEvaluationInput": { + "base": null, + "refs": { + } + }, + "GetEvaluationOutput": { + "base": "

    Represents the output of a GetEvaluation operation and describes an Evaluation.

    ", + "refs": { + } + }, + "GetMLModelInput": { + "base": null, + "refs": { + } + }, + "GetMLModelOutput": { + "base": "

    Represents the output of a GetMLModel operation, and provides detailed information about a MLModel.

    ", + "refs": { + } + }, + "IdempotentParameterMismatchException": { + "base": "

    A second request to use or change an object was not allowed. This can result from retrying a request using a parameter that was not present in the original request.

    ", + "refs": { + } + }, + "IntegerType": { + "base": "

    Integer type that is a 32-bit signed number.

    ", + "refs": { + "RealtimeEndpointInfo$PeakRequestsPerSecond": "

    The maximum processing rate for the real-time endpoint for MLModel, measured in incoming requests per second.

    " + } + }, + "InternalServerException": { + "base": "

    An error on the server occurred when trying to process a request.

    ", + "refs": { + } + }, + "InvalidInputException": { + "base": "

    An error on the client occurred. Typically, the cause is an invalid input value.

    ", + "refs": { + } + }, + "InvalidTagException": { + "base": null, + "refs": { + } + }, + "Label": { + "base": null, + "refs": { + "Prediction$predictedLabel": "

    The prediction label for either a BINARY or MULTICLASS MLModel.

    ", + "ScoreValuePerLabelMap$key": null + } + }, + "LimitExceededException": { + "base": "

    The subscriber exceeded the maximum number of operations. This exception can occur when listing objects such as DataSource.

    ", + "refs": { + } + }, + "LongType": { + "base": "

    Long integer type that is a 64-bit signed number.

    ", + "refs": { + "DataSource$DataSizeInBytes": "

    The total number of observations contained in the data files that the DataSource references.

    ", + "DataSource$NumberOfFiles": "

    The number of data files referenced by the DataSource.

    ", + "GetDataSourceOutput$DataSizeInBytes": "

    The total size of observations in the data files.

    ", + "GetDataSourceOutput$NumberOfFiles": "

    The number of data files referenced by the DataSource.

    ", + "GetMLModelOutput$SizeInBytes": null, + "MLModel$SizeInBytes": null + } + }, + "MLModel": { + "base": "

    Represents the output of a GetMLModel operation.

    The content consists of the detailed metadata and the current status of the MLModel.

    ", + "refs": { + "MLModels$member": null + } + }, + "MLModelFilterVariable": { + "base": null, + "refs": { + "DescribeMLModelsInput$FilterVariable": "

    Use one of the following variables to filter a list of MLModel:

    • CreatedAt - Sets the search criteria to MLModel creation date.
    • Status - Sets the search criteria to MLModel status.
    • Name - Sets the search criteria to the contents of MLModel Name.
    • IAMUser - Sets the search criteria to the user account that invoked the MLModel creation.
    • TrainingDataSourceId - Sets the search criteria to the DataSource used to train one or more MLModel.
    • RealtimeEndpointStatus - Sets the search criteria to the MLModel real-time endpoint status.
    • MLModelType - Sets the search criteria to MLModel type: binary, regression, or multi-class.
    • Algorithm - Sets the search criteria to the algorithm that the MLModel uses.
    • TrainingDataURI - Sets the search criteria to the data file(s) used in training a MLModel. The URL can identify either a file or an Amazon Simple Storage Service (Amazon S3) bucket or directory.
    " + } + }, + "MLModelName": { + "base": null, + "refs": { + "GetMLModelOutput$Name": "

    A user-supplied name or description of the MLModel.

    ", + "MLModel$Name": "

    A user-supplied name or description of the MLModel.

    " + } + }, + "MLModelType": { + "base": null, + "refs": { + "CreateMLModelInput$MLModelType": "

    The category of supervised learning that this MLModel will address. Choose from the following types:

    • Choose REGRESSION if the MLModel will be used to predict a numeric value.
    • Choose BINARY if the MLModel result has two possible values.
    • Choose MULTICLASS if the MLModel result has a limited number of values.

    For more information, see the Amazon Machine Learning Developer Guide.

    ", + "GetMLModelOutput$MLModelType": "

    Identifies the MLModel category. The following are the available types:

    • REGRESSION -- Produces a numeric result. For example, \"What price should a house be listed at?\"
    • BINARY -- Produces one of two possible results. For example, \"Is this an e-commerce website?\"
    • MULTICLASS -- Produces one of several possible results. For example, \"Is this a HIGH, LOW or MEDIUM risk trade?\"
    ", + "MLModel$MLModelType": "

    Identifies the MLModel category. The following are the available types:

    • REGRESSION - Produces a numeric result. For example, \"What price should a house be listed at?\"
    • BINARY - Produces one of two possible results. For example, \"Is this a child-friendly web site?\".
    • MULTICLASS - Produces one of several possible results. For example, \"Is this a HIGH-, LOW-, or MEDIUM-risk trade?\".
    " + } + }, + "MLModels": { + "base": null, + "refs": { + "DescribeMLModelsOutput$Results": "

    A list of MLModel that meet the search criteria.

    " + } + }, + "Message": { + "base": "

    Description of the most recent details about an object.

    ", + "refs": { + "BatchPrediction$Message": "

    A description of the most recent details about processing the batch prediction request.

    ", + "DataSource$Message": "

    A description of the most recent details about creating the DataSource.

    ", + "Evaluation$Message": "

    A description of the most recent details about evaluating the MLModel.

    ", + "GetBatchPredictionOutput$Message": "

    A description of the most recent details about processing the batch prediction request.

    ", + "GetDataSourceOutput$Message": "

    The user-supplied description of the most recent details about creating the DataSource.

    ", + "GetEvaluationOutput$Message": "

    A description of the most recent details about evaluating the MLModel.

    ", + "GetMLModelOutput$Message": "

    A description of the most recent details about accessing the MLModel.

    ", + "MLModel$Message": "

    A description of the most recent details about accessing the MLModel.

    " + } + }, + "PageLimit": { + "base": null, + "refs": { + "DescribeBatchPredictionsInput$Limit": "

    The number of pages of information to include in the result. The range of acceptable values is 1 through 100. The default value is 100.

    ", + "DescribeDataSourcesInput$Limit": "

    The maximum number of DataSource to include in the result.

    ", + "DescribeEvaluationsInput$Limit": "

    The maximum number of Evaluation to include in the result.

    ", + "DescribeMLModelsInput$Limit": "

    The number of pages of information to include in the result. The range of acceptable values is 1 through 100. The default value is 100.

    " + } + }, + "PerformanceMetrics": { + "base": "

    Measurements of how well the MLModel performed on known observations. One of the following metrics is returned, based on the type of the MLModel:

    • BinaryAUC: The binary MLModel uses the Area Under the Curve (AUC) technique to measure performance.

    • RegressionRMSE: The regression MLModel uses the Root Mean Square Error (RMSE) technique to measure performance. RMSE measures the difference between predicted and actual values for a single variable.

    • MulticlassAvgFScore: The multiclass MLModel uses the F1 score technique to measure performance.

    For more information about performance metrics, please see the Amazon Machine Learning Developer Guide.

    ", + "refs": { + "Evaluation$PerformanceMetrics": "

    Measurements of how well the MLModel performed, using observations referenced by the DataSource. One of the following metrics is returned, based on the type of the MLModel:

    • BinaryAUC: A binary MLModel uses the Area Under the Curve (AUC) technique to measure performance.

    • RegressionRMSE: A regression MLModel uses the Root Mean Square Error (RMSE) technique to measure performance. RMSE measures the difference between predicted and actual values for a single variable.

    • MulticlassAvgFScore: A multiclass MLModel uses the F1 score technique to measure performance.

    For more information about performance metrics, please see the Amazon Machine Learning Developer Guide.

    ", + "GetEvaluationOutput$PerformanceMetrics": "

    Measurements of how well the MLModel performed using observations referenced by the DataSource. One of the following metric is returned based on the type of the MLModel:

    • BinaryAUC: A binary MLModel uses the Area Under the Curve (AUC) technique to measure performance.

    • RegressionRMSE: A regression MLModel uses the Root Mean Square Error (RMSE) technique to measure performance. RMSE measures the difference between predicted and actual values for a single variable.

    • MulticlassAvgFScore: A multiclass MLModel uses the F1 score technique to measure performance.

    For more information about performance metrics, please see the Amazon Machine Learning Developer Guide.

    " + } + }, + "PerformanceMetricsProperties": { + "base": null, + "refs": { + "PerformanceMetrics$Properties": null + } + }, + "PerformanceMetricsPropertyKey": { + "base": null, + "refs": { + "PerformanceMetricsProperties$key": null + } + }, + "PerformanceMetricsPropertyValue": { + "base": null, + "refs": { + "PerformanceMetricsProperties$value": null + } + }, + "PredictInput": { + "base": null, + "refs": { + } + }, + "PredictOutput": { + "base": null, + "refs": { + } + }, + "Prediction": { + "base": "

    The output from a Predict operation:

    • Details - Contains the following attributes: DetailsAttributes.PREDICTIVE_MODEL_TYPE - REGRESSION | BINARY | MULTICLASS DetailsAttributes.ALGORITHM - SGD

    • PredictedLabel - Present for either a BINARY or MULTICLASS MLModel request.

    • PredictedScores - Contains the raw classification score corresponding to each label.

    • PredictedValue - Present for a REGRESSION MLModel request.

    ", + "refs": { + "PredictOutput$Prediction": null + } + }, + "PredictorNotMountedException": { + "base": "

    The exception is thrown when a predict request is made to an unmounted MLModel.

    ", + "refs": { + } + }, + "PresignedS3Url": { + "base": null, + "refs": { + "GetBatchPredictionOutput$LogUri": "

    A link to the file that contains logs of the CreateBatchPrediction operation.

    ", + "GetDataSourceOutput$LogUri": "

    A link to the file containing logs of CreateDataSourceFrom* operations.

    ", + "GetEvaluationOutput$LogUri": "

    A link to the file that contains logs of the CreateEvaluation operation.

    ", + "GetMLModelOutput$LogUri": "

    A link to the file that contains logs of the CreateMLModel operation.

    " + } + }, + "RDSDataSpec": { + "base": "

    The data specification of an Amazon Relational Database Service (Amazon RDS) DataSource.

    ", + "refs": { + "CreateDataSourceFromRDSInput$RDSData": "

    The data specification of an Amazon RDS DataSource:

    • DatabaseInformation -

      • DatabaseName - The name of the Amazon RDS database.
      • InstanceIdentifier - A unique identifier for the Amazon RDS database instance.

    • DatabaseCredentials - AWS Identity and Access Management (IAM) credentials that are used to connect to the Amazon RDS database.

    • ResourceRole - A role (DataPipelineDefaultResourceRole) assumed by an EC2 instance to carry out the copy task from Amazon RDS to Amazon Simple Storage Service (Amazon S3). For more information, see Role templates for data pipelines.

    • ServiceRole - A role (DataPipelineDefaultRole) assumed by the AWS Data Pipeline service to monitor the progress of the copy task from Amazon RDS to Amazon S3. For more information, see Role templates for data pipelines.

    • SecurityInfo - The security information to use to access an RDS DB instance. You need to set up appropriate ingress rules for the security entity IDs provided to allow access to the Amazon RDS instance. Specify a [SubnetId, SecurityGroupIds] pair for a VPC-based RDS DB instance.

    • SelectSqlQuery - A query that is used to retrieve the observation data for the Datasource.

    • S3StagingLocation - The Amazon S3 location for staging Amazon RDS data. The data retrieved from Amazon RDS using SelectSqlQuery is stored in this location.

    • DataSchemaUri - The Amazon S3 location of the DataSchema.

    • DataSchema - A JSON string representing the schema. This is not required if DataSchemaUri is specified.

    • DataRearrangement - A JSON string that represents the splitting and rearrangement requirements for the Datasource.


      Sample - \"{\\\"splitting\\\":{\\\"percentBegin\\\":10,\\\"percentEnd\\\":60}}\"

    " + } + }, + "RDSDatabase": { + "base": "

    The database details of an Amazon RDS database.

    ", + "refs": { + "RDSDataSpec$DatabaseInformation": "

    Describes the DatabaseName and InstanceIdentifier of an an Amazon RDS database.

    ", + "RDSMetadata$Database": "

    The database details required to connect to an Amazon RDS.

    " + } + }, + "RDSDatabaseCredentials": { + "base": "

    The database credentials to connect to a database on an RDS DB instance.

    ", + "refs": { + "RDSDataSpec$DatabaseCredentials": "

    The AWS Identity and Access Management (IAM) credentials that are used connect to the Amazon RDS database.

    " + } + }, + "RDSDatabaseName": { + "base": "

    The name of a database hosted on an RDS DB instance.

    ", + "refs": { + "RDSDatabase$DatabaseName": null + } + }, + "RDSDatabasePassword": { + "base": "

    The password to be used by Amazon ML to connect to a database on an RDS DB instance. The password should have sufficient permissions to execute the RDSSelectQuery query.

    ", + "refs": { + "RDSDatabaseCredentials$Password": null + } + }, + "RDSDatabaseUsername": { + "base": "

    The username to be used by Amazon ML to connect to database on an Amazon RDS instance. The username should have sufficient permissions to execute an RDSSelectSqlQuery query.

    ", + "refs": { + "RDSDatabaseCredentials$Username": null, + "RDSMetadata$DatabaseUserName": null + } + }, + "RDSInstanceIdentifier": { + "base": "Identifier of RDS DB Instances.", + "refs": { + "RDSDatabase$InstanceIdentifier": "

    The ID of an RDS DB instance.

    " + } + }, + "RDSMetadata": { + "base": "

    The datasource details that are specific to Amazon RDS.

    ", + "refs": { + "DataSource$RDSMetadata": null, + "GetDataSourceOutput$RDSMetadata": null + } + }, + "RDSSelectSqlQuery": { + "base": "

    The SQL query to be executed against the Amazon RDS database. The SQL query should be valid for the Amazon RDS type being used.

    ", + "refs": { + "RDSDataSpec$SelectSqlQuery": "

    The query that is used to retrieve the observation data for the DataSource.

    ", + "RDSMetadata$SelectSqlQuery": "

    The SQL query that is supplied during CreateDataSourceFromRDS. Returns only if Verbose is true in GetDataSourceInput.

    " + } + }, + "RealtimeEndpointInfo": { + "base": "

    Describes the real-time endpoint information for an MLModel.

    ", + "refs": { + "CreateRealtimeEndpointOutput$RealtimeEndpointInfo": "

    The endpoint information of the MLModel

    ", + "DeleteRealtimeEndpointOutput$RealtimeEndpointInfo": "

    The endpoint information of the MLModel

    ", + "GetMLModelOutput$EndpointInfo": "

    The current endpoint of the MLModel

    ", + "MLModel$EndpointInfo": "

    The current endpoint of the MLModel.

    " + } + }, + "RealtimeEndpointStatus": { + "base": null, + "refs": { + "RealtimeEndpointInfo$EndpointStatus": "

    The current status of the real-time endpoint for the MLModel. This element can have one of the following values:

    • NONE - Endpoint does not exist or was previously deleted.
    • READY - Endpoint is ready to be used for real-time predictions.
    • UPDATING - Updating/creating the endpoint.
    " + } + }, + "Recipe": { + "base": null, + "refs": { + "CreateMLModelInput$Recipe": "

    The data recipe for creating the MLModel. You must specify either the recipe or its URI. If you don't specify a recipe or its URI, Amazon ML creates a default.

    ", + "GetMLModelOutput$Recipe": "

    The recipe to use when training the MLModel. The Recipe provides detailed information about the observation data to use during training, and manipulations to perform on the observation data during training.

    Note

    This parameter is provided as part of the verbose format.

    " + } + }, + "Record": { + "base": "

    A map of variable name-value pairs that represent an observation.

    ", + "refs": { + "PredictInput$Record": null + } + }, + "RedshiftClusterIdentifier": { + "base": "

    The ID of an Amazon Redshift cluster.

    ", + "refs": { + "RedshiftDatabase$ClusterIdentifier": null + } + }, + "RedshiftDataSpec": { + "base": "

    Describes the data specification of an Amazon Redshift DataSource.

    ", + "refs": { + "CreateDataSourceFromRedshiftInput$DataSpec": "

    The data specification of an Amazon Redshift DataSource:

    • DatabaseInformation -

      • DatabaseName - The name of the Amazon Redshift database.
      • ClusterIdentifier - The unique ID for the Amazon Redshift cluster.

    • DatabaseCredentials - The AWS Identity and Access Management (IAM) credentials that are used to connect to the Amazon Redshift database.

    • SelectSqlQuery - The query that is used to retrieve the observation data for the Datasource.

    • S3StagingLocation - The Amazon Simple Storage Service (Amazon S3) location for staging Amazon Redshift data. The data retrieved from Amazon Redshift using the SelectSqlQuery query is stored in this location.

    • DataSchemaUri - The Amazon S3 location of the DataSchema.

    • DataSchema - A JSON string representing the schema. This is not required if DataSchemaUri is specified.

    • DataRearrangement - A JSON string that represents the splitting and rearrangement requirements for the DataSource.

      Sample - \"{\\\"splitting\\\":{\\\"percentBegin\\\":10,\\\"percentEnd\\\":60}}\"

    " + } + }, + "RedshiftDatabase": { + "base": "

    Describes the database details required to connect to an Amazon Redshift database.

    ", + "refs": { + "RedshiftDataSpec$DatabaseInformation": "

    Describes the DatabaseName and ClusterIdentifier for an Amazon Redshift DataSource.

    ", + "RedshiftMetadata$RedshiftDatabase": null + } + }, + "RedshiftDatabaseCredentials": { + "base": "

    Describes the database credentials for connecting to a database on an Amazon Redshift cluster.

    ", + "refs": { + "RedshiftDataSpec$DatabaseCredentials": "

    Describes AWS Identity and Access Management (IAM) credentials that are used connect to the Amazon Redshift database.

    " + } + }, + "RedshiftDatabaseName": { + "base": "

    The name of a database hosted on an Amazon Redshift cluster.

    ", + "refs": { + "RedshiftDatabase$DatabaseName": null + } + }, + "RedshiftDatabasePassword": { + "base": "

    A password to be used by Amazon ML to connect to a database on an Amazon Redshift cluster. The password should have sufficient permissions to execute a RedshiftSelectSqlQuery query. The password should be valid for an Amazon Redshift USER.

    ", + "refs": { + "RedshiftDatabaseCredentials$Password": null + } + }, + "RedshiftDatabaseUsername": { + "base": "

    A username to be used by Amazon Machine Learning (Amazon ML)to connect to a database on an Amazon Redshift cluster. The username should have sufficient permissions to execute the RedshiftSelectSqlQuery query. The username should be valid for an Amazon Redshift USER.

    ", + "refs": { + "RedshiftDatabaseCredentials$Username": null, + "RedshiftMetadata$DatabaseUserName": null + } + }, + "RedshiftMetadata": { + "base": "

    Describes the DataSource details specific to Amazon Redshift.

    ", + "refs": { + "DataSource$RedshiftMetadata": null, + "GetDataSourceOutput$RedshiftMetadata": null + } + }, + "RedshiftSelectSqlQuery": { + "base": "

    Describes the SQL query to execute on the Amazon Redshift database. The SQL query should be valid for an Amazon Redshift SELECT.

    ", + "refs": { + "RedshiftDataSpec$SelectSqlQuery": "

    Describes the SQL Query to execute on an Amazon Redshift database for an Amazon Redshift DataSource.

    ", + "RedshiftMetadata$SelectSqlQuery": "

    The SQL query that is specified during CreateDataSourceFromRedshift. Returns only if Verbose is true in GetDataSourceInput.

    " + } + }, + "ResourceNotFoundException": { + "base": "

    A specified resource cannot be located.

    ", + "refs": { + } + }, + "RoleARN": { + "base": "

    The Amazon Resource Name (ARN) of an AWS IAM Role, such as the following: arn:aws:iam::account:role/rolename.

    ", + "refs": { + "CreateDataSourceFromRDSInput$RoleARN": "

    The role that Amazon ML assumes on behalf of the user to create and activate a data pipeline in the user's account and copy data using the SelectSqlQuery query from Amazon RDS to Amazon S3.

    ", + "CreateDataSourceFromRedshiftInput$RoleARN": "

    A fully specified role Amazon Resource Name (ARN). Amazon ML assumes the role on behalf of the user to create the following:

    • A security group to allow Amazon ML to execute the SelectSqlQuery query on an Amazon Redshift cluster

    • An Amazon S3 bucket policy to grant Amazon ML read/write permissions on the S3StagingLocation

    ", + "DataSource$RoleARN": null, + "GetDataSourceOutput$RoleARN": null + } + }, + "S3DataSpec": { + "base": "

    Describes the data specification of a DataSource.

    ", + "refs": { + "CreateDataSourceFromS3Input$DataSpec": "

    The data specification of a DataSource:

    • DataLocationS3 - The Amazon S3 location of the observation data.

    • DataSchemaLocationS3 - The Amazon S3 location of the DataSchema.

    • DataSchema - A JSON string representing the schema. This is not required if DataSchemaUri is specified.

    • DataRearrangement - A JSON string that represents the splitting and rearrangement requirements for the Datasource.

      Sample - \"{\\\"splitting\\\":{\\\"percentBegin\\\":10,\\\"percentEnd\\\":60}}\"

    " + } + }, + "S3Url": { + "base": "

    A reference to a file or bucket on Amazon Simple Storage Service (Amazon S3).

    ", + "refs": { + "BatchPrediction$InputDataLocationS3": "

    The location of the data file or directory in Amazon Simple Storage Service (Amazon S3).

    ", + "BatchPrediction$OutputUri": "

    The location of an Amazon S3 bucket or directory to receive the operation results. The following substrings are not allowed in the s3 key portion of the outputURI field: ':', '//', '/./', '/../'.

    ", + "CreateBatchPredictionInput$OutputUri": "

    The location of an Amazon Simple Storage Service (Amazon S3) bucket or directory to store the batch prediction results. The following substrings are not allowed in the s3 key portion of the outputURI field: ':', '//', '/./', '/../'.

    Amazon ML needs permissions to store and retrieve the logs on your behalf. For information about how to set permissions, see the Amazon Machine Learning Developer Guide.

    ", + "CreateMLModelInput$RecipeUri": "

    The Amazon Simple Storage Service (Amazon S3) location and file name that contains the MLModel recipe. You must specify either the recipe or its URI. If you don't specify a recipe or its URI, Amazon ML creates a default.

    ", + "DataSource$DataLocationS3": "

    The location and name of the data in Amazon Simple Storage Service (Amazon S3) that is used by a DataSource.

    ", + "Evaluation$InputDataLocationS3": "

    The location and name of the data in Amazon Simple Storage Server (Amazon S3) that is used in the evaluation.

    ", + "GetBatchPredictionOutput$InputDataLocationS3": "

    The location of the data file or directory in Amazon Simple Storage Service (Amazon S3).

    ", + "GetBatchPredictionOutput$OutputUri": "

    The location of an Amazon S3 bucket or directory to receive the operation results.

    ", + "GetDataSourceOutput$DataLocationS3": "

    The location of the data file or directory in Amazon Simple Storage Service (Amazon S3).

    ", + "GetEvaluationOutput$InputDataLocationS3": "

    The location of the data file or directory in Amazon Simple Storage Service (Amazon S3).

    ", + "GetMLModelOutput$InputDataLocationS3": "

    The location of the data file or directory in Amazon Simple Storage Service (Amazon S3).

    ", + "MLModel$InputDataLocationS3": "

    The location of the data file or directory in Amazon Simple Storage Service (Amazon S3).

    ", + "RDSDataSpec$S3StagingLocation": "

    The Amazon S3 location for staging Amazon RDS data. The data retrieved from Amazon RDS using SelectSqlQuery is stored in this location.

    ", + "RDSDataSpec$DataSchemaUri": "

    The Amazon S3 location of the DataSchema.

    ", + "RedshiftDataSpec$S3StagingLocation": "

    Describes an Amazon S3 location to store the result set of the SelectSqlQuery query.

    ", + "RedshiftDataSpec$DataSchemaUri": "

    Describes the schema location for an Amazon Redshift DataSource.

    ", + "S3DataSpec$DataLocationS3": "

    The location of the data file(s) used by a DataSource. The URI specifies a data file or an Amazon Simple Storage Service (Amazon S3) directory or bucket containing data files.

    ", + "S3DataSpec$DataSchemaLocationS3": "

    Describes the schema location in Amazon S3. You must provide either the DataSchema or the DataSchemaLocationS3.

    " + } + }, + "ScoreThreshold": { + "base": null, + "refs": { + "GetMLModelOutput$ScoreThreshold": "

    The scoring threshold is used in binary classification MLModel models. It marks the boundary between a positive prediction and a negative prediction.

    Output values greater than or equal to the threshold receive a positive result from the MLModel, such as true. Output values less than the threshold receive a negative response from the MLModel, such as false.

    ", + "MLModel$ScoreThreshold": null, + "UpdateMLModelInput$ScoreThreshold": "

    The ScoreThreshold used in binary classification MLModel that marks the boundary between a positive prediction and a negative prediction.

    Output values greater than or equal to the ScoreThreshold receive a positive result from the MLModel, such as true. Output values less than the ScoreThreshold receive a negative response from the MLModel, such as false.

    " + } + }, + "ScoreValue": { + "base": null, + "refs": { + "ScoreValuePerLabelMap$value": null + } + }, + "ScoreValuePerLabelMap": { + "base": "Provides the raw classification score corresponding to each label.", + "refs": { + "Prediction$predictedScores": null + } + }, + "SortOrder": { + "base": "

    The sort order specified in a listing condition. Possible values include the following:

    • asc - Present the information in ascending order (from A-Z).
    • dsc - Present the information in descending order (from Z-A).
    ", + "refs": { + "DescribeBatchPredictionsInput$SortOrder": "

    A two-value parameter that determines the sequence of the resulting list of MLModels.

    • asc - Arranges the list in ascending order (A-Z, 0-9).
    • dsc - Arranges the list in descending order (Z-A, 9-0).

    Results are sorted by FilterVariable.

    ", + "DescribeDataSourcesInput$SortOrder": "

    A two-value parameter that determines the sequence of the resulting list of DataSource.

    • asc - Arranges the list in ascending order (A-Z, 0-9).
    • dsc - Arranges the list in descending order (Z-A, 9-0).

    Results are sorted by FilterVariable.

    ", + "DescribeEvaluationsInput$SortOrder": "

    A two-value parameter that determines the sequence of the resulting list of Evaluation.

    • asc - Arranges the list in ascending order (A-Z, 0-9).
    • dsc - Arranges the list in descending order (Z-A, 9-0).

    Results are sorted by FilterVariable.

    ", + "DescribeMLModelsInput$SortOrder": "

    A two-value parameter that determines the sequence of the resulting list of MLModel.

    • asc - Arranges the list in ascending order (A-Z, 0-9).
    • dsc - Arranges the list in descending order (Z-A, 9-0).

    Results are sorted by FilterVariable.

    " + } + }, + "StringType": { + "base": "

    String type.

    ", + "refs": { + "DescribeBatchPredictionsInput$NextToken": "

    An ID of the page in the paginated results.

    ", + "DescribeBatchPredictionsOutput$NextToken": "

    The ID of the next page in the paginated results that indicates at least one more page follows.

    ", + "DescribeDataSourcesInput$NextToken": "

    The ID of the page in the paginated results.

    ", + "DescribeDataSourcesOutput$NextToken": "

    An ID of the next page in the paginated results that indicates at least one more page follows.

    ", + "DescribeEvaluationsInput$NextToken": "

    The ID of the page in the paginated results.

    ", + "DescribeEvaluationsOutput$NextToken": "

    The ID of the next page in the paginated results that indicates at least one more page follows.

    ", + "DescribeMLModelsInput$NextToken": "

    The ID of the page in the paginated results.

    ", + "DescribeMLModelsOutput$NextToken": "

    The ID of the next page in the paginated results that indicates at least one more page follows.

    ", + "TrainingParameters$key": null, + "TrainingParameters$value": null + } + }, + "Tag": { + "base": "

    A custom key-value pair associated with an ML object, such as an ML model.

    ", + "refs": { + "TagList$member": null + } + }, + "TagKey": { + "base": null, + "refs": { + "Tag$Key": "

    A unique identifier for the tag. Valid characters include Unicode letters, digits, white space, _, ., /, =, +, -, %, and @.

    ", + "TagKeyList$member": null + } + }, + "TagKeyList": { + "base": null, + "refs": { + "DeleteTagsInput$TagKeys": "

    One or more tags to delete.

    " + } + }, + "TagLimitExceededException": { + "base": null, + "refs": { + } + }, + "TagList": { + "base": null, + "refs": { + "AddTagsInput$Tags": "

    The key-value pairs to use to create tags. If you specify a key without specifying a value, Amazon ML creates a tag with the specified key and a value of null.

    ", + "DescribeTagsOutput$Tags": "

    A list of tags associated with the ML object.

    " + } + }, + "TagValue": { + "base": null, + "refs": { + "Tag$Value": "

    An optional string, typically used to describe or define the tag. Valid characters include Unicode letters, digits, white space, _, ., /, =, +, -, %, and @.

    " + } + }, + "TaggableResourceType": { + "base": null, + "refs": { + "AddTagsInput$ResourceType": "

    The type of the ML object to tag.

    ", + "AddTagsOutput$ResourceType": "

    The type of the ML object that was tagged.

    ", + "DeleteTagsInput$ResourceType": "

    The type of the tagged ML object.

    ", + "DeleteTagsOutput$ResourceType": "

    The type of the ML object from which tags were deleted.

    ", + "DescribeTagsInput$ResourceType": "

    The type of the ML object.

    ", + "DescribeTagsOutput$ResourceType": "

    The type of the tagged ML object.

    " + } + }, + "TrainingParameters": { + "base": null, + "refs": { + "CreateMLModelInput$Parameters": "

    A list of the training parameters in the MLModel. The list is implemented as a map of key-value pairs.

    The following is the current set of training parameters:

    • sgd.maxMLModelSizeInBytes - The maximum allowed size of the model. Depending on the input data, the size of the model might affect its performance.

      The value is an integer that ranges from 100000 to 2147483648. The default value is 33554432.

    • sgd.maxPasses - The number of times that the training process traverses the observations to build the MLModel. The value is an integer that ranges from 1 to 10000. The default value is 10.

    • sgd.shuffleType - Whether Amazon ML shuffles the training data. Shuffling the data improves a model's ability to find the optimal solution for a variety of data types. The valid values are auto and none. The default value is none. We strongly recommend that you shuffle your data.

    • sgd.l1RegularizationAmount - The coefficient regularization L1 norm. It controls overfitting the data by penalizing large coefficients. This tends to drive coefficients to zero, resulting in a sparse feature set. If you use this parameter, start by specifying a small value, such as 1.0E-08.

      The value is a double that ranges from 0 to MAX_DOUBLE. The default is to not use L1 normalization. This parameter can't be used when L2 is specified. Use this parameter sparingly.

    • sgd.l2RegularizationAmount - The coefficient regularization L2 norm. It controls overfitting the data by penalizing large coefficients. This tends to drive coefficients to small, nonzero values. If you use this parameter, start by specifying a small value, such as 1.0E-08.

      The value is a double that ranges from 0 to MAX_DOUBLE. The default is to not use L2 normalization. This parameter can't be used when L1 is specified. Use this parameter sparingly.

    ", + "GetMLModelOutput$TrainingParameters": "

    A list of the training parameters in the MLModel. The list is implemented as a map of key-value pairs.

    The following is the current set of training parameters:

    • sgd.maxMLModelSizeInBytes - The maximum allowed size of the model. Depending on the input data, the size of the model might affect its performance.

      The value is an integer that ranges from 100000 to 2147483648. The default value is 33554432.

    • sgd.maxPasses - The number of times that the training process traverses the observations to build the MLModel. The value is an integer that ranges from 1 to 10000. The default value is 10.

    • sgd.shuffleType - Whether Amazon ML shuffles the training data. Shuffling data improves a model's ability to find the optimal solution for a variety of data types. The valid values are auto and none. The default value is none. We strongly recommend that you shuffle your data.

    • sgd.l1RegularizationAmount - The coefficient regularization L1 norm. It controls overfitting the data by penalizing large coefficients. This tends to drive coefficients to zero, resulting in a sparse feature set. If you use this parameter, start by specifying a small value, such as 1.0E-08.

      The value is a double that ranges from 0 to MAX_DOUBLE. The default is to not use L1 normalization. This parameter can't be used when L2 is specified. Use this parameter sparingly.

    • sgd.l2RegularizationAmount - The coefficient regularization L2 norm. It controls overfitting the data by penalizing large coefficients. This tends to drive coefficients to small, nonzero values. If you use this parameter, start by specifying a small value, such as 1.0E-08.

      The value is a double that ranges from 0 to MAX_DOUBLE. The default is to not use L2 normalization. This parameter can't be used when L1 is specified. Use this parameter sparingly.

    ", + "MLModel$TrainingParameters": "

    A list of the training parameters in the MLModel. The list is implemented as a map of key-value pairs.

    The following is the current set of training parameters:

    • sgd.maxMLModelSizeInBytes - The maximum allowed size of the model. Depending on the input data, the size of the model might affect its performance.

      The value is an integer that ranges from 100000 to 2147483648. The default value is 33554432.

    • sgd.maxPasses - The number of times that the training process traverses the observations to build the MLModel. The value is an integer that ranges from 1 to 10000. The default value is 10.

    • sgd.shuffleType - Whether Amazon ML shuffles the training data. Shuffling the data improves a model's ability to find the optimal solution for a variety of data types. The valid values are auto and none. The default value is none.

    • sgd.l1RegularizationAmount - The coefficient regularization L1 norm, which controls overfitting the data by penalizing large coefficients. This parameter tends to drive coefficients to zero, resulting in sparse feature set. If you use this parameter, start by specifying a small value, such as 1.0E-08.

      The value is a double that ranges from 0 to MAX_DOUBLE. The default is to not use L1 normalization. This parameter can't be used when L2 is specified. Use this parameter sparingly.

    • sgd.l2RegularizationAmount - The coefficient regularization L2 norm, which controls overfitting the data by penalizing large coefficients. This tends to drive coefficients to small, nonzero values. If you use this parameter, start by specifying a small value, such as 1.0E-08.

      The value is a double that ranges from 0 to MAX_DOUBLE. The default is to not use L2 normalization. This parameter can't be used when L1 is specified. Use this parameter sparingly.

    " + } + }, + "UpdateBatchPredictionInput": { + "base": null, + "refs": { + } + }, + "UpdateBatchPredictionOutput": { + "base": "

    Represents the output of an UpdateBatchPrediction operation.

    You can see the updated content by using the GetBatchPrediction operation.

    ", + "refs": { + } + }, + "UpdateDataSourceInput": { + "base": null, + "refs": { + } + }, + "UpdateDataSourceOutput": { + "base": "

    Represents the output of an UpdateDataSource operation.

    You can see the updated content by using the GetBatchPrediction operation.

    ", + "refs": { + } + }, + "UpdateEvaluationInput": { + "base": null, + "refs": { + } + }, + "UpdateEvaluationOutput": { + "base": "

    Represents the output of an UpdateEvaluation operation.

    You can see the updated content by using the GetEvaluation operation.

    ", + "refs": { + } + }, + "UpdateMLModelInput": { + "base": null, + "refs": { + } + }, + "UpdateMLModelOutput": { + "base": "

    Represents the output of an UpdateMLModel operation.

    You can see the updated content by using the GetMLModel operation.

    ", + "refs": { + } + }, + "VariableName": { + "base": "

    The name of a variable. Currently it's used to specify the name of the target value, label, weight, and tags.

    ", + "refs": { + "Record$key": null + } + }, + "VariableValue": { + "base": "

    The value of a variable. Currently it's used to specify values of the target value, weights, and tag variables and for filtering variable values.

    ", + "refs": { + "Record$value": null + } + }, + "Verbose": { + "base": "

    Specifies whether a describe operation should return exhaustive or abbreviated information.

    ", + "refs": { + "GetDataSourceInput$Verbose": "

    Specifies whether the GetDataSource operation should return DataSourceSchema.

    If true, DataSourceSchema is returned.

    If false, DataSourceSchema is not returned.

    ", + "GetMLModelInput$Verbose": "

    Specifies whether the GetMLModel operation should return Recipe.

    If true, Recipe is returned.

    If false, Recipe is not returned.

    " + } + }, + "VipURL": { + "base": null, + "refs": { + "PredictInput$PredictEndpoint": null, + "RealtimeEndpointInfo$EndpointUrl": "

    The URI that specifies where to send real-time prediction requests for the MLModel.

    Note

    The application must wait until the real-time endpoint is ready before using this URI.

    " + } + }, + "floatLabel": { + "base": null, + "refs": { + "Prediction$predictedValue": "The prediction value for REGRESSION MLModel." + } + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/machinelearning/2014-12-12/paginators-1.json b/vendor/github.com/aws/aws-sdk-go/models/apis/machinelearning/2014-12-12/paginators-1.json new file mode 100644 index 000000000..c13ce65af --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/machinelearning/2014-12-12/paginators-1.json @@ -0,0 +1,28 @@ +{ + "pagination": { + "DescribeBatchPredictions": { + "limit_key": "Limit", + "output_token": "NextToken", + "input_token": "NextToken", + "result_key": "Results" + }, + "DescribeDataSources": { + "limit_key": "Limit", + "output_token": "NextToken", + "input_token": "NextToken", + "result_key": "Results" + }, + "DescribeEvaluations": { + "limit_key": "Limit", + "output_token": "NextToken", + "input_token": "NextToken", + "result_key": "Results" + }, + "DescribeMLModels": { + "limit_key": "Limit", + "output_token": "NextToken", + "input_token": "NextToken", + "result_key": "Results" + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/machinelearning/2014-12-12/waiters-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/machinelearning/2014-12-12/waiters-2.json new file mode 100644 index 000000000..da6b1c951 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/machinelearning/2014-12-12/waiters-2.json @@ -0,0 +1,81 @@ +{ + "version": 2, + "waiters": { + "DataSourceAvailable": { + "delay": 30, + "operation": "DescribeDataSources", + "maxAttempts": 60, + "acceptors": [ + { + "expected": "COMPLETED", + "matcher": "pathAll", + "state": "success", + "argument": "Results[].Status" + }, + { + "expected": "FAILED", + "matcher": "pathAny", + "state": "failure", + "argument": "Results[].Status" + } + ] + }, + "MLModelAvailable": { + "delay": 30, + "operation": "DescribeMLModels", + "maxAttempts": 60, + "acceptors": [ + { + "expected": "COMPLETED", + "matcher": "pathAll", + "state": "success", + "argument": "Results[].Status" + }, + { + "expected": "FAILED", + "matcher": "pathAny", + "state": "failure", + "argument": "Results[].Status" + } + ] + }, + "EvaluationAvailable": { + "delay": 30, + "operation": "DescribeEvaluations", + "maxAttempts": 60, + "acceptors": [ + { + "expected": "COMPLETED", + "matcher": "pathAll", + "state": "success", + "argument": "Results[].Status" + }, + { + "expected": "FAILED", + "matcher": "pathAny", + "state": "failure", + "argument": "Results[].Status" + } + ] + }, + "BatchPredictionAvailable": { + "delay": 30, + "operation": "DescribeBatchPredictions", + "maxAttempts": 60, + "acceptors": [ + { + "expected": "COMPLETED", + "matcher": "pathAll", + "state": "success", + "argument": "Results[].Status" + }, + { + "expected": "FAILED", + "matcher": "pathAny", + "state": "failure", + "argument": "Results[].Status" + } + ] + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/marketplacecommerceanalytics/2015-07-01/api-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/marketplacecommerceanalytics/2015-07-01/api-2.json new file mode 100644 index 000000000..fa343e101 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/marketplacecommerceanalytics/2015-07-01/api-2.json @@ -0,0 +1,120 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2015-07-01", + "endpointPrefix":"marketplacecommerceanalytics", + "jsonVersion":"1.1", + "protocol":"json", + "serviceFullName":"AWS Marketplace Commerce Analytics", + "signatureVersion":"v4", + "signingName":"marketplacecommerceanalytics", + "targetPrefix":"MarketplaceCommerceAnalytics20150701" + }, + "operations":{ + "GenerateDataSet":{ + "name":"GenerateDataSet", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GenerateDataSetRequest"}, + "output":{"shape":"GenerateDataSetResult"}, + "errors":[ + {"shape":"MarketplaceCommerceAnalyticsException"} + ] + } + }, + "shapes":{ + "CustomerDefinedValues":{ + "type":"map", + "key":{"shape":"OptionalKey"}, + "value":{"shape":"OptionalValue"}, + "max":5, + "min":1 + }, + "DataSetPublicationDate":{"type":"timestamp"}, + "DataSetRequestId":{"type":"string"}, + "DataSetType":{ + "type":"string", + "enum":[ + "customer_subscriber_hourly_monthly_subscriptions", + "customer_subscriber_annual_subscriptions", + "daily_business_usage_by_instance_type", + "daily_business_fees", + "daily_business_free_trial_conversions", + "daily_business_new_instances", + "daily_business_new_product_subscribers", + "daily_business_canceled_product_subscribers", + "monthly_revenue_billing_and_revenue_data", + "monthly_revenue_annual_subscriptions", + "disbursed_amount_by_product", + "disbursed_amount_by_product_with_uncollected_funds", + "disbursed_amount_by_customer_geo", + "disbursed_amount_by_age_of_uncollected_funds", + "disbursed_amount_by_age_of_disbursed_funds", + "customer_profile_by_industry", + "customer_profile_by_revenue", + "customer_profile_by_geography" + ], + "max":255, + "min":1 + }, + "DestinationS3BucketName":{ + "type":"string", + "min":1 + }, + "DestinationS3Prefix":{"type":"string"}, + "ExceptionMessage":{"type":"string"}, + "GenerateDataSetRequest":{ + "type":"structure", + "required":[ + "dataSetType", + "dataSetPublicationDate", + "roleNameArn", + "destinationS3BucketName", + "snsTopicArn" + ], + "members":{ + "dataSetType":{"shape":"DataSetType"}, + "dataSetPublicationDate":{"shape":"DataSetPublicationDate"}, + "roleNameArn":{"shape":"RoleNameArn"}, + "destinationS3BucketName":{"shape":"DestinationS3BucketName"}, + "destinationS3Prefix":{"shape":"DestinationS3Prefix"}, + "snsTopicArn":{"shape":"SnsTopicArn"}, + "customerDefinedValues":{"shape":"CustomerDefinedValues"} + } + }, + "GenerateDataSetResult":{ + "type":"structure", + "members":{ + "dataSetRequestId":{"shape":"DataSetRequestId"} + } + }, + "MarketplaceCommerceAnalyticsException":{ + "type":"structure", + "members":{ + "message":{"shape":"ExceptionMessage"} + }, + "exception":true, + "fault":true + }, + "OptionalKey":{ + "type":"string", + "max":255, + "min":1 + }, + "OptionalValue":{ + "type":"string", + "max":255, + "min":1 + }, + "RoleNameArn":{ + "type":"string", + "min":1 + }, + "SnsTopicArn":{ + "type":"string", + "min":1 + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/marketplacecommerceanalytics/2015-07-01/docs-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/marketplacecommerceanalytics/2015-07-01/docs-2.json new file mode 100644 index 000000000..8d17a391f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/marketplacecommerceanalytics/2015-07-01/docs-2.json @@ -0,0 +1,90 @@ +{ + "version": "2.0", + "service": "Provides AWS Marketplace business intelligence data on-demand.", + "operations": { + "GenerateDataSet": "Given a data set type and data set publication date, asynchronously publishes the requested data set to the specified S3 bucket and notifies the specified SNS topic once the data is available. Returns a unique request identifier that can be used to correlate requests with notifications from the SNS topic. Data sets will be published in comma-separated values (CSV) format with the file name {data_set_type}_YYYY-MM-DD.csv. If a file with the same name already exists (e.g. if the same data set is requested twice), the original file will be overwritten by the new file. Requires a Role with an attached permissions policy providing Allow permissions for the following actions: s3:PutObject, s3:GetBucketLocation, sns:GetTopicAttributes, sns:Publish, iam:GetRolePolicy." + }, + "shapes": { + "CustomerDefinedValues": { + "base": null, + "refs": { + "GenerateDataSetRequest$customerDefinedValues": "(Optional) Key-value pairs which will be returned, unmodified, in the Amazon SNS notification message and the data set metadata file. These key-value pairs can be used to correlated responses with tracking information from other systems." + } + }, + "DataSetPublicationDate": { + "base": null, + "refs": { + "GenerateDataSetRequest$dataSetPublicationDate": "The date a data set was published. For daily data sets, provide a date with day-level granularity for the desired day. For weekly data sets, provide a date with day-level granularity within the desired week (the day value will be ignored). For monthly data sets, provide a date with month-level granularity for the desired month (the day value will be ignored)." + } + }, + "DataSetRequestId": { + "base": null, + "refs": { + "GenerateDataSetResult$dataSetRequestId": "A unique identifier representing a specific request to the GenerateDataSet operation. This identifier can be used to correlate a request with notifications from the SNS topic." + } + }, + "DataSetType": { + "base": null, + "refs": { + "GenerateDataSetRequest$dataSetType": "

    The desired data set type.

    • customer_subscriber_hourly_monthly_subscriptions - Available daily by 5:00 PM Pacific Time since 2014-07-21.
    • customer_subscriber_annual_subscriptions - Available daily by 5:00 PM Pacific Time since 2014-07-21.
    • daily_business_usage_by_instance_type - Available daily by 5:00 PM Pacific Time since 2015-01-26.
    • daily_business_fees - Available daily by 5:00 PM Pacific Time since 2015-01-26.
    • daily_business_free_trial_conversions - Available daily by 5:00 PM Pacific Time since 2015-01-26.
    • daily_business_new_instances - Available daily by 5:00 PM Pacific Time since 2015-01-26.
    • daily_business_new_product_subscribers - Available daily by 5:00 PM Pacific Time since 2015-01-26.
    • daily_business_canceled_product_subscribers - Available daily by 5:00 PM Pacific Time since 2015-01-26.
    • monthly_revenue_billing_and_revenue_data - Available monthly on the 4th day of the month by 5:00 PM Pacific Time since 2015-02.
    • monthly_revenue_annual_subscriptions - Available monthly on the 4th day of the month by 5:00 PM Pacific Time since 2015-02.
    • disbursed_amount_by_product - Available every 30 days by 5:00 PM Pacific Time since 2015-01-26.
    • disbursed_amount_by_product_with_uncollected_funds -This data set is only available from 2012-04-19 until 2015-01-25. After 2015-01-25, this data set was split into three data sets: disbursed_amount_by_product, disbursed_amount_by_age_of_uncollected_funds, and disbursed_amount_by_age_of_disbursed_funds.
    • disbursed_amount_by_customer_geo - Available every 30 days by 5:00 PM Pacific Time since 2012-04-19.
    • disbursed_amount_by_age_of_uncollected_funds - Available every 30 days by 5:00 PM Pacific Time since 2015-01-26.
    • disbursed_amount_by_age_of_disbursed_funds - Available every 30 days by 5:00 PM Pacific Time since 2015-01-26.
    • customer_profile_by_industry - Available daily by 5:00 PM Pacific Time since 2015-10-01.
    • customer_profile_by_revenue - Available daily by 5:00 PM Pacific Time since 2015-10-01.
    • customer_profile_by_geography - Available daily by 5:00 PM Pacific Time since 2015-10-01.

    " + } + }, + "DestinationS3BucketName": { + "base": null, + "refs": { + "GenerateDataSetRequest$destinationS3BucketName": "The name (friendly name, not ARN) of the destination S3 bucket." + } + }, + "DestinationS3Prefix": { + "base": null, + "refs": { + "GenerateDataSetRequest$destinationS3Prefix": "(Optional) The desired S3 prefix for the published data set, similar to a directory path in standard file systems. For example, if given the bucket name \"mybucket\" and the prefix \"myprefix/mydatasets\", the output file \"outputfile\" would be published to \"s3://mybucket/myprefix/mydatasets/outputfile\". If the prefix directory structure does not exist, it will be created. If no prefix is provided, the data set will be published to the S3 bucket root." + } + }, + "ExceptionMessage": { + "base": null, + "refs": { + "MarketplaceCommerceAnalyticsException$message": null + } + }, + "GenerateDataSetRequest": { + "base": "Container for the parameters to the GenerateDataSet operation.", + "refs": { + } + }, + "GenerateDataSetResult": { + "base": "Container for the result of the GenerateDataSet operation.", + "refs": { + } + }, + "MarketplaceCommerceAnalyticsException": { + "base": "This exception is thrown when an internal service error occurs.", + "refs": { + } + }, + "OptionalKey": { + "base": null, + "refs": { + "CustomerDefinedValues$key": null + } + }, + "OptionalValue": { + "base": null, + "refs": { + "CustomerDefinedValues$value": null + } + }, + "RoleNameArn": { + "base": null, + "refs": { + "GenerateDataSetRequest$roleNameArn": "The Amazon Resource Name (ARN) of the Role with an attached permissions policy to interact with the provided AWS services." + } + }, + "SnsTopicArn": { + "base": null, + "refs": { + "GenerateDataSetRequest$snsTopicArn": "Amazon Resource Name (ARN) for the SNS Topic that will be notified when the data set has been published or if an error has occurred." + } + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/marketplacecommerceanalytics/2015-07-01/examples-1.json b/vendor/github.com/aws/aws-sdk-go/models/apis/marketplacecommerceanalytics/2015-07-01/examples-1.json new file mode 100644 index 000000000..0ea7e3b0b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/marketplacecommerceanalytics/2015-07-01/examples-1.json @@ -0,0 +1,5 @@ +{ + "version": "1.0", + "examples": { + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/meteringmarketplace/2016-01-14/api-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/meteringmarketplace/2016-01-14/api-2.json new file mode 100644 index 000000000..e389a233e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/meteringmarketplace/2016-01-14/api-2.json @@ -0,0 +1,127 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2016-01-14", + "endpointPrefix":"metering.marketplace", + "jsonVersion":"1.1", + "protocol":"json", + "serviceFullName":"AWSMarketplace Metering", + "signatureVersion":"v4", + "signingName":"aws-marketplace", + "targetPrefix":"AWSMPMeteringService" + }, + "operations":{ + "MeterUsage":{ + "name":"MeterUsage", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"MeterUsageRequest"}, + "output":{"shape":"MeterUsageResult"}, + "errors":[ + {"shape":"InternalServiceErrorException"}, + {"shape":"InvalidProductCodeException"}, + {"shape":"InvalidUsageDimensionException"}, + {"shape":"InvalidEndpointRegionException"}, + {"shape":"TimestampOutOfBoundsException"}, + {"shape":"DuplicateRequestException"}, + {"shape":"ThrottlingException"} + ] + } + }, + "shapes":{ + "Boolean":{"type":"boolean"}, + "DuplicateRequestException":{ + "type":"structure", + "members":{ + "message":{"shape":"errorMessage"} + }, + "exception":true + }, + "InternalServiceErrorException":{ + "type":"structure", + "members":{ + "message":{"shape":"errorMessage"} + }, + "exception":true, + "fault":true + }, + "InvalidEndpointRegionException":{ + "type":"structure", + "members":{ + "message":{"shape":"errorMessage"} + }, + "exception":true + }, + "InvalidProductCodeException":{ + "type":"structure", + "members":{ + "message":{"shape":"errorMessage"} + }, + "exception":true + }, + "InvalidUsageDimensionException":{ + "type":"structure", + "members":{ + "message":{"shape":"errorMessage"} + }, + "exception":true + }, + "MeterUsageRequest":{ + "type":"structure", + "required":[ + "ProductCode", + "Timestamp", + "UsageDimension", + "UsageQuantity", + "DryRun" + ], + "members":{ + "ProductCode":{"shape":"ProductCode"}, + "Timestamp":{"shape":"Timestamp"}, + "UsageDimension":{"shape":"UsageDimension"}, + "UsageQuantity":{"shape":"UsageQuantity"}, + "DryRun":{"shape":"Boolean"} + } + }, + "MeterUsageResult":{ + "type":"structure", + "members":{ + "MeteringRecordId":{"shape":"String"} + } + }, + "ProductCode":{ + "type":"string", + "max":255, + "min":1 + }, + "String":{"type":"string"}, + "ThrottlingException":{ + "type":"structure", + "members":{ + "message":{"shape":"errorMessage"} + }, + "exception":true + }, + "Timestamp":{"type":"timestamp"}, + "TimestampOutOfBoundsException":{ + "type":"structure", + "members":{ + "message":{"shape":"errorMessage"} + }, + "exception":true + }, + "UsageDimension":{ + "type":"string", + "max":255, + "min":1 + }, + "UsageQuantity":{ + "type":"integer", + "max":10000, + "min":0 + }, + "errorMessage":{"type":"string"} + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/meteringmarketplace/2016-01-14/docs-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/meteringmarketplace/2016-01-14/docs-2.json new file mode 100644 index 000000000..c71903432 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/meteringmarketplace/2016-01-14/docs-2.json @@ -0,0 +1,102 @@ +{ + "version": "2.0", + "service": "AWS Marketplace Metering Service

    This reference provides descriptions of the low-level AWS Marketplace Metering Service API.

    AWS Marketplace sellers can use this API to submit usage data for custom usage dimensions.

    Submitting Metering Records

    • MeterUsage- Submits the metering record for a Marketplace product.

    ", + "operations": { + "MeterUsage": "

    API to emit metering records. For identical requests, the API is idempotent. It simply returns the metering record ID.

    " + }, + "shapes": { + "Boolean": { + "base": null, + "refs": { + "MeterUsageRequest$DryRun": "

    Checks whether you have the permissions required for the action, but does not make the request. If you have the permissions, the request returns DryRunOperation; otherwise, it returns UnauthorizedException.

    " + } + }, + "DuplicateRequestException": { + "base": "

    A metering record has already been emitted by the same EC2 instance for the given {usageDimension, timestamp} with a different usageQuantity.

    ", + "refs": { + } + }, + "InternalServiceErrorException": { + "base": "

    An internal error has occurred. Retry your request. If the problem persists, post a message with details on the AWS forums.

    ", + "refs": { + } + }, + "InvalidEndpointRegionException": { + "base": "

    The endpoint being called is in a region different from your EC2 instance. The region of the Metering service endpoint and the region of the EC2 instance must match.

    ", + "refs": { + } + }, + "InvalidProductCodeException": { + "base": "

    The product code passed does not match the product code used for publishing the product.

    ", + "refs": { + } + }, + "InvalidUsageDimensionException": { + "base": "

    The usage dimension does not match one of the UsageDimensions associated with products.

    ", + "refs": { + } + }, + "MeterUsageRequest": { + "base": null, + "refs": { + } + }, + "MeterUsageResult": { + "base": null, + "refs": { + } + }, + "ProductCode": { + "base": null, + "refs": { + "MeterUsageRequest$ProductCode": "

    Product code is used to uniquely identify a product in AWS Marketplace. The product code should be the same as the one used during the publishing of a new product.

    " + } + }, + "String": { + "base": null, + "refs": { + "MeterUsageResult$MeteringRecordId": null + } + }, + "ThrottlingException": { + "base": "

    The calls to the MeterUsage API are throttled.

    ", + "refs": { + } + }, + "Timestamp": { + "base": null, + "refs": { + "MeterUsageRequest$Timestamp": "

    Timestamp of the hour, recorded in UTC. The seconds and milliseconds portions of the timestamp will be ignored.

    " + } + }, + "TimestampOutOfBoundsException": { + "base": "

    The timestamp value passed in the meterUsage() is out of allowed range.

    ", + "refs": { + } + }, + "UsageDimension": { + "base": null, + "refs": { + "MeterUsageRequest$UsageDimension": "

    It will be one of the 'fcp dimension name' provided during the publishing of the product.

    " + } + }, + "UsageQuantity": { + "base": null, + "refs": { + "MeterUsageRequest$UsageQuantity": "

    Consumption value for the hour.

    " + } + }, + "errorMessage": { + "base": null, + "refs": { + "DuplicateRequestException$message": null, + "InternalServiceErrorException$message": null, + "InvalidEndpointRegionException$message": null, + "InvalidProductCodeException$message": null, + "InvalidUsageDimensionException$message": null, + "ThrottlingException$message": null, + "TimestampOutOfBoundsException$message": null + } + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/meteringmarketplace/2016-01-14/examples-1.json b/vendor/github.com/aws/aws-sdk-go/models/apis/meteringmarketplace/2016-01-14/examples-1.json new file mode 100644 index 000000000..0ea7e3b0b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/meteringmarketplace/2016-01-14/examples-1.json @@ -0,0 +1,5 @@ +{ + "version": "1.0", + "examples": { + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/mobileanalytics/2014-06-05/api-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/mobileanalytics/2014-06-05/api-2.json new file mode 100644 index 000000000..c593da2a9 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/mobileanalytics/2014-06-05/api-2.json @@ -0,0 +1,119 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2014-06-05", + "endpointPrefix":"mobileanalytics", + "serviceFullName":"Amazon Mobile Analytics", + "signatureVersion":"v4", + "protocol":"rest-json" + }, + "operations":{ + "PutEvents":{ + "name":"PutEvents", + "http":{ + "method":"POST", + "requestUri":"/2014-06-05/events", + "responseCode":202 + }, + "input":{"shape":"PutEventsInput"}, + "errors":[ + { + "shape":"BadRequestException", + "error":{"httpStatusCode":400}, + "exception":true + } + ] + } + }, + "shapes":{ + "BadRequestException":{ + "type":"structure", + "members":{ + "message":{"shape":"String"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "Double":{"type":"double"}, + "Event":{ + "type":"structure", + "required":[ + "eventType", + "timestamp" + ], + "members":{ + "eventType":{"shape":"String50Chars"}, + "timestamp":{"shape":"ISO8601Timestamp"}, + "session":{"shape":"Session"}, + "version":{"shape":"String10Chars"}, + "attributes":{"shape":"MapOfStringToString"}, + "metrics":{"shape":"MapOfStringToNumber"} + } + }, + "EventListDefinition":{ + "type":"list", + "member":{"shape":"Event"} + }, + "ISO8601Timestamp":{"type":"string"}, + "Long":{"type":"long"}, + "MapOfStringToNumber":{ + "type":"map", + "key":{"shape":"String50Chars"}, + "value":{"shape":"Double"}, + "min":0, + "max":50 + }, + "MapOfStringToString":{ + "type":"map", + "key":{"shape":"String50Chars"}, + "value":{"shape":"String0to1000Chars"}, + "min":0, + "max":50 + }, + "PutEventsInput":{ + "type":"structure", + "required":[ + "events", + "clientContext" + ], + "members":{ + "events":{"shape":"EventListDefinition"}, + "clientContext":{ + "shape":"String", + "location":"header", + "locationName":"x-amz-Client-Context" + }, + "clientContextEncoding":{ + "shape":"String", + "location":"header", + "locationName":"x-amz-Client-Context-Encoding" + } + } + }, + "Session":{ + "type":"structure", + "members":{ + "id":{"shape":"String50Chars"}, + "duration":{"shape":"Long"}, + "startTimestamp":{"shape":"ISO8601Timestamp"}, + "stopTimestamp":{"shape":"ISO8601Timestamp"} + } + }, + "String":{"type":"string"}, + "String0to1000Chars":{ + "type":"string", + "min":0, + "max":1000 + }, + "String10Chars":{ + "type":"string", + "min":1, + "max":10 + }, + "String50Chars":{ + "type":"string", + "min":1, + "max":50 + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/mobileanalytics/2014-06-05/docs-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/mobileanalytics/2014-06-05/docs-2.json new file mode 100644 index 000000000..838e32f7e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/mobileanalytics/2014-06-05/docs-2.json @@ -0,0 +1,98 @@ +{ + "version": "2.0", + "operations": { + "PutEvents": "

    The PutEvents operation records one or more events. You can have up to 1,500 unique custom events per app, any combination of up to 40 attributes and metrics per custom event, and any number of attribute or metric values.

    " + }, + "service": "

    Amazon Mobile Analytics is a service for collecting, visualizing, and understanding app usage data at scale.

    ", + "shapes": { + "BadRequestException": { + "base": "

    An exception object returned when a request fails.

    ", + "refs": { + } + }, + "Double": { + "base": null, + "refs": { + "MapOfStringToNumber$value": null + } + }, + "Event": { + "base": "

    A JSON object representing a batch of unique event occurrences in your app.

    ", + "refs": { + "EventListDefinition$member": null + } + }, + "EventListDefinition": { + "base": null, + "refs": { + "PutEventsInput$events": "

    An array of Event JSON objects

    " + } + }, + "ISO8601Timestamp": { + "base": null, + "refs": { + "Event$timestamp": "

    The time the event occurred in ISO 8601 standard date time format. For example, 2014-06-30T19:07:47.885Z

    ", + "Session$startTimestamp": "

    The time the event started in ISO 8601 standard date time format. For example, 2014-06-30T19:07:47.885Z

    ", + "Session$stopTimestamp": "

    The time the event terminated in ISO 8601 standard date time format. For example, 2014-06-30T19:07:47.885Z

    " + } + }, + "Long": { + "base": null, + "refs": { + "Session$duration": "

    The duration of the session.

    " + } + }, + "MapOfStringToNumber": { + "base": null, + "refs": { + "Event$metrics": "

    A collection of key-value pairs that gives additional, measurable context to the event. The key-value pairs are specified by the developer.

    This collection can be empty or the attribute object can be omitted.

    " + } + }, + "MapOfStringToString": { + "base": null, + "refs": { + "Event$attributes": "

    A collection of key-value pairs that give additional context to the event. The key-value pairs are specified by the developer.

    This collection can be empty or the attribute object can be omitted.

    " + } + }, + "PutEventsInput": { + "base": "

    A container for the data needed for a PutEvent operation

    ", + "refs": { + } + }, + "Session": { + "base": "

    Describes the session. Session information is required on ALL events.

    ", + "refs": { + "Event$session": "

    The session the event occured within.

    " + } + }, + "String": { + "base": null, + "refs": { + "BadRequestException$message": "

    A text description associated with the BadRequestException object.

    ", + "PutEventsInput$clientContext": "

    The client context including the client ID, app title, app version and package name.

    ", + "PutEventsInput$clientContextEncoding": "

    The encoding used for the client context.

    " + } + }, + "String0to1000Chars": { + "base": null, + "refs": { + "MapOfStringToString$value": null + } + }, + "String10Chars": { + "base": null, + "refs": { + "Event$version": "

    The version of the event.

    " + } + }, + "String50Chars": { + "base": null, + "refs": { + "Event$eventType": "

    A name signifying an event that occurred in your app. This is used for grouping and aggregating like events together for reporting purposes.

    ", + "MapOfStringToNumber$key": null, + "MapOfStringToString$key": null, + "Session$id": "

    A unique identifier for the session

    " + } + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/monitoring/2010-08-01/api-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/monitoring/2010-08-01/api-2.json new file mode 100644 index 000000000..cbc0f069e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/monitoring/2010-08-01/api-2.json @@ -0,0 +1,790 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2010-08-01", + "endpointPrefix":"monitoring", + "protocol":"query", + "serviceAbbreviation":"CloudWatch", + "serviceFullName":"Amazon CloudWatch", + "signatureVersion":"v4", + "xmlNamespace":"http://monitoring.amazonaws.com/doc/2010-08-01/" + }, + "operations":{ + "DeleteAlarms":{ + "name":"DeleteAlarms", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteAlarmsInput"}, + "errors":[ + {"shape":"ResourceNotFound"} + ] + }, + "DescribeAlarmHistory":{ + "name":"DescribeAlarmHistory", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeAlarmHistoryInput"}, + "output":{ + "shape":"DescribeAlarmHistoryOutput", + "resultWrapper":"DescribeAlarmHistoryResult" + }, + "errors":[ + {"shape":"InvalidNextToken"} + ] + }, + "DescribeAlarms":{ + "name":"DescribeAlarms", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeAlarmsInput"}, + "output":{ + "shape":"DescribeAlarmsOutput", + "resultWrapper":"DescribeAlarmsResult" + }, + "errors":[ + {"shape":"InvalidNextToken"} + ] + }, + "DescribeAlarmsForMetric":{ + "name":"DescribeAlarmsForMetric", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeAlarmsForMetricInput"}, + "output":{ + "shape":"DescribeAlarmsForMetricOutput", + "resultWrapper":"DescribeAlarmsForMetricResult" + } + }, + "DisableAlarmActions":{ + "name":"DisableAlarmActions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DisableAlarmActionsInput"} + }, + "EnableAlarmActions":{ + "name":"EnableAlarmActions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"EnableAlarmActionsInput"} + }, + "GetMetricStatistics":{ + "name":"GetMetricStatistics", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetMetricStatisticsInput"}, + "output":{ + "shape":"GetMetricStatisticsOutput", + "resultWrapper":"GetMetricStatisticsResult" + }, + "errors":[ + {"shape":"InvalidParameterValueException"}, + {"shape":"MissingRequiredParameterException"}, + {"shape":"InvalidParameterCombinationException"}, + {"shape":"InternalServiceFault"} + ] + }, + "ListMetrics":{ + "name":"ListMetrics", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListMetricsInput"}, + "output":{ + "shape":"ListMetricsOutput", + "resultWrapper":"ListMetricsResult" + }, + "errors":[ + {"shape":"InternalServiceFault"}, + {"shape":"InvalidParameterValueException"} + ] + }, + "PutMetricAlarm":{ + "name":"PutMetricAlarm", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PutMetricAlarmInput"}, + "errors":[ + {"shape":"LimitExceededFault"} + ] + }, + "PutMetricData":{ + "name":"PutMetricData", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PutMetricDataInput"}, + "errors":[ + {"shape":"InvalidParameterValueException"}, + {"shape":"MissingRequiredParameterException"}, + {"shape":"InvalidParameterCombinationException"}, + {"shape":"InternalServiceFault"} + ] + }, + "SetAlarmState":{ + "name":"SetAlarmState", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"SetAlarmStateInput"}, + "errors":[ + {"shape":"ResourceNotFound"}, + {"shape":"InvalidFormatFault"} + ] + } + }, + "shapes":{ + "ActionPrefix":{ + "type":"string", + "max":1024, + "min":1 + }, + "ActionsEnabled":{"type":"boolean"}, + "AlarmArn":{ + "type":"string", + "max":1600, + "min":1 + }, + "AlarmDescription":{ + "type":"string", + "max":1024, + "min":0 + }, + "AlarmHistoryItem":{ + "type":"structure", + "members":{ + "AlarmName":{"shape":"AlarmName"}, + "Timestamp":{"shape":"Timestamp"}, + "HistoryItemType":{"shape":"HistoryItemType"}, + "HistorySummary":{"shape":"HistorySummary"}, + "HistoryData":{"shape":"HistoryData"} + } + }, + "AlarmHistoryItems":{ + "type":"list", + "member":{"shape":"AlarmHistoryItem"} + }, + "AlarmName":{ + "type":"string", + "max":255, + "min":1 + }, + "AlarmNamePrefix":{ + "type":"string", + "max":255, + "min":1 + }, + "AlarmNames":{ + "type":"list", + "member":{"shape":"AlarmName"}, + "max":100 + }, + "AwsQueryErrorMessage":{"type":"string"}, + "ComparisonOperator":{ + "type":"string", + "enum":[ + "GreaterThanOrEqualToThreshold", + "GreaterThanThreshold", + "LessThanThreshold", + "LessThanOrEqualToThreshold" + ] + }, + "Datapoint":{ + "type":"structure", + "members":{ + "Timestamp":{"shape":"Timestamp"}, + "SampleCount":{"shape":"DatapointValue"}, + "Average":{"shape":"DatapointValue"}, + "Sum":{"shape":"DatapointValue"}, + "Minimum":{"shape":"DatapointValue"}, + "Maximum":{"shape":"DatapointValue"}, + "Unit":{"shape":"StandardUnit"} + }, + "xmlOrder":[ + "Timestamp", + "SampleCount", + "Average", + "Sum", + "Minimum", + "Maximum", + "Unit" + ] + }, + "DatapointValue":{"type":"double"}, + "Datapoints":{ + "type":"list", + "member":{"shape":"Datapoint"} + }, + "DeleteAlarmsInput":{ + "type":"structure", + "required":["AlarmNames"], + "members":{ + "AlarmNames":{"shape":"AlarmNames"} + } + }, + "DescribeAlarmHistoryInput":{ + "type":"structure", + "members":{ + "AlarmName":{"shape":"AlarmName"}, + "HistoryItemType":{"shape":"HistoryItemType"}, + "StartDate":{"shape":"Timestamp"}, + "EndDate":{"shape":"Timestamp"}, + "MaxRecords":{"shape":"MaxRecords"}, + "NextToken":{"shape":"NextToken"} + } + }, + "DescribeAlarmHistoryOutput":{ + "type":"structure", + "members":{ + "AlarmHistoryItems":{"shape":"AlarmHistoryItems"}, + "NextToken":{"shape":"NextToken"} + } + }, + "DescribeAlarmsForMetricInput":{ + "type":"structure", + "required":[ + "MetricName", + "Namespace" + ], + "members":{ + "MetricName":{"shape":"MetricName"}, + "Namespace":{"shape":"Namespace"}, + "Statistic":{"shape":"Statistic"}, + "Dimensions":{"shape":"Dimensions"}, + "Period":{"shape":"Period"}, + "Unit":{"shape":"StandardUnit"} + } + }, + "DescribeAlarmsForMetricOutput":{ + "type":"structure", + "members":{ + "MetricAlarms":{"shape":"MetricAlarms"} + } + }, + "DescribeAlarmsInput":{ + "type":"structure", + "members":{ + "AlarmNames":{"shape":"AlarmNames"}, + "AlarmNamePrefix":{"shape":"AlarmNamePrefix"}, + "StateValue":{"shape":"StateValue"}, + "ActionPrefix":{"shape":"ActionPrefix"}, + "MaxRecords":{"shape":"MaxRecords"}, + "NextToken":{"shape":"NextToken"} + } + }, + "DescribeAlarmsOutput":{ + "type":"structure", + "members":{ + "MetricAlarms":{"shape":"MetricAlarms"}, + "NextToken":{"shape":"NextToken"} + } + }, + "Dimension":{ + "type":"structure", + "required":[ + "Name", + "Value" + ], + "members":{ + "Name":{"shape":"DimensionName"}, + "Value":{"shape":"DimensionValue"} + }, + "xmlOrder":[ + "Name", + "Value" + ] + }, + "DimensionFilter":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{"shape":"DimensionName"}, + "Value":{"shape":"DimensionValue"} + } + }, + "DimensionFilters":{ + "type":"list", + "member":{"shape":"DimensionFilter"}, + "max":10 + }, + "DimensionName":{ + "type":"string", + "max":255, + "min":1 + }, + "DimensionValue":{ + "type":"string", + "max":255, + "min":1 + }, + "Dimensions":{ + "type":"list", + "member":{"shape":"Dimension"}, + "max":10 + }, + "DisableAlarmActionsInput":{ + "type":"structure", + "required":["AlarmNames"], + "members":{ + "AlarmNames":{"shape":"AlarmNames"} + } + }, + "EnableAlarmActionsInput":{ + "type":"structure", + "required":["AlarmNames"], + "members":{ + "AlarmNames":{"shape":"AlarmNames"} + } + }, + "ErrorMessage":{ + "type":"string", + "max":255, + "min":1 + }, + "EvaluationPeriods":{ + "type":"integer", + "min":1 + }, + "FaultDescription":{"type":"string"}, + "GetMetricStatisticsInput":{ + "type":"structure", + "required":[ + "Namespace", + "MetricName", + "StartTime", + "EndTime", + "Period", + "Statistics" + ], + "members":{ + "Namespace":{"shape":"Namespace"}, + "MetricName":{"shape":"MetricName"}, + "Dimensions":{"shape":"Dimensions"}, + "StartTime":{"shape":"Timestamp"}, + "EndTime":{"shape":"Timestamp"}, + "Period":{"shape":"Period"}, + "Statistics":{"shape":"Statistics"}, + "Unit":{"shape":"StandardUnit"} + } + }, + "GetMetricStatisticsOutput":{ + "type":"structure", + "members":{ + "Label":{"shape":"MetricLabel"}, + "Datapoints":{"shape":"Datapoints"} + } + }, + "HistoryData":{ + "type":"string", + "max":4095, + "min":1 + }, + "HistoryItemType":{ + "type":"string", + "enum":[ + "ConfigurationUpdate", + "StateUpdate", + "Action" + ] + }, + "HistorySummary":{ + "type":"string", + "max":255, + "min":1 + }, + "InternalServiceFault":{ + "type":"structure", + "members":{ + "Message":{"shape":"FaultDescription"} + }, + "error":{ + "code":"InternalServiceError", + "httpStatusCode":500 + }, + "exception":true, + "xmlOrder":["Message"] + }, + "InvalidFormatFault":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "error":{ + "code":"InvalidFormat", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidNextToken":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "error":{ + "code":"InvalidNextToken", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidParameterCombinationException":{ + "type":"structure", + "members":{ + "message":{"shape":"AwsQueryErrorMessage"} + }, + "error":{ + "code":"InvalidParameterCombination", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidParameterValueException":{ + "type":"structure", + "members":{ + "message":{"shape":"AwsQueryErrorMessage"} + }, + "error":{ + "code":"InvalidParameterValue", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "LimitExceededFault":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "error":{ + "code":"LimitExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "ListMetricsInput":{ + "type":"structure", + "members":{ + "Namespace":{"shape":"Namespace"}, + "MetricName":{"shape":"MetricName"}, + "Dimensions":{"shape":"DimensionFilters"}, + "NextToken":{"shape":"NextToken"} + } + }, + "ListMetricsOutput":{ + "type":"structure", + "members":{ + "Metrics":{"shape":"Metrics"}, + "NextToken":{"shape":"NextToken"} + }, + "xmlOrder":[ + "Metrics", + "NextToken" + ] + }, + "MaxRecords":{ + "type":"integer", + "max":100, + "min":1 + }, + "Metric":{ + "type":"structure", + "members":{ + "Namespace":{"shape":"Namespace"}, + "MetricName":{"shape":"MetricName"}, + "Dimensions":{"shape":"Dimensions"} + }, + "xmlOrder":[ + "Namespace", + "MetricName", + "Dimensions" + ] + }, + "MetricAlarm":{ + "type":"structure", + "members":{ + "AlarmName":{"shape":"AlarmName"}, + "AlarmArn":{"shape":"AlarmArn"}, + "AlarmDescription":{"shape":"AlarmDescription"}, + "AlarmConfigurationUpdatedTimestamp":{"shape":"Timestamp"}, + "ActionsEnabled":{"shape":"ActionsEnabled"}, + "OKActions":{"shape":"ResourceList"}, + "AlarmActions":{"shape":"ResourceList"}, + "InsufficientDataActions":{"shape":"ResourceList"}, + "StateValue":{"shape":"StateValue"}, + "StateReason":{"shape":"StateReason"}, + "StateReasonData":{"shape":"StateReasonData"}, + "StateUpdatedTimestamp":{"shape":"Timestamp"}, + "MetricName":{"shape":"MetricName"}, + "Namespace":{"shape":"Namespace"}, + "Statistic":{"shape":"Statistic"}, + "Dimensions":{"shape":"Dimensions"}, + "Period":{"shape":"Period"}, + "Unit":{"shape":"StandardUnit"}, + "EvaluationPeriods":{"shape":"EvaluationPeriods"}, + "Threshold":{"shape":"Threshold"}, + "ComparisonOperator":{"shape":"ComparisonOperator"} + }, + "xmlOrder":[ + "AlarmName", + "AlarmArn", + "AlarmDescription", + "AlarmConfigurationUpdatedTimestamp", + "ActionsEnabled", + "OKActions", + "AlarmActions", + "InsufficientDataActions", + "StateValue", + "StateReason", + "StateReasonData", + "StateUpdatedTimestamp", + "MetricName", + "Namespace", + "Statistic", + "Dimensions", + "Period", + "Unit", + "EvaluationPeriods", + "Threshold", + "ComparisonOperator" + ] + }, + "MetricAlarms":{ + "type":"list", + "member":{"shape":"MetricAlarm"} + }, + "MetricData":{ + "type":"list", + "member":{"shape":"MetricDatum"} + }, + "MetricDatum":{ + "type":"structure", + "required":["MetricName"], + "members":{ + "MetricName":{"shape":"MetricName"}, + "Dimensions":{"shape":"Dimensions"}, + "Timestamp":{"shape":"Timestamp"}, + "Value":{"shape":"DatapointValue"}, + "StatisticValues":{"shape":"StatisticSet"}, + "Unit":{"shape":"StandardUnit"} + } + }, + "MetricLabel":{"type":"string"}, + "MetricName":{ + "type":"string", + "max":255, + "min":1 + }, + "Metrics":{ + "type":"list", + "member":{"shape":"Metric"} + }, + "MissingRequiredParameterException":{ + "type":"structure", + "members":{ + "message":{"shape":"AwsQueryErrorMessage"} + }, + "error":{ + "code":"MissingParameter", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "Namespace":{ + "type":"string", + "max":255, + "min":1, + "pattern":"[^:].*" + }, + "NextToken":{ + "type":"string", + "max":1024, + "min":0 + }, + "Period":{ + "type":"integer", + "min":60 + }, + "PutMetricAlarmInput":{ + "type":"structure", + "required":[ + "AlarmName", + "MetricName", + "Namespace", + "Statistic", + "Period", + "EvaluationPeriods", + "Threshold", + "ComparisonOperator" + ], + "members":{ + "AlarmName":{"shape":"AlarmName"}, + "AlarmDescription":{"shape":"AlarmDescription"}, + "ActionsEnabled":{"shape":"ActionsEnabled"}, + "OKActions":{"shape":"ResourceList"}, + "AlarmActions":{"shape":"ResourceList"}, + "InsufficientDataActions":{"shape":"ResourceList"}, + "MetricName":{"shape":"MetricName"}, + "Namespace":{"shape":"Namespace"}, + "Statistic":{"shape":"Statistic"}, + "Dimensions":{"shape":"Dimensions"}, + "Period":{"shape":"Period"}, + "Unit":{"shape":"StandardUnit"}, + "EvaluationPeriods":{"shape":"EvaluationPeriods"}, + "Threshold":{"shape":"Threshold"}, + "ComparisonOperator":{"shape":"ComparisonOperator"} + } + }, + "PutMetricDataInput":{ + "type":"structure", + "required":[ + "Namespace", + "MetricData" + ], + "members":{ + "Namespace":{"shape":"Namespace"}, + "MetricData":{"shape":"MetricData"} + } + }, + "ResourceList":{ + "type":"list", + "member":{"shape":"ResourceName"}, + "max":5 + }, + "ResourceName":{ + "type":"string", + "max":1024, + "min":1 + }, + "ResourceNotFound":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "error":{ + "code":"ResourceNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "SetAlarmStateInput":{ + "type":"structure", + "required":[ + "AlarmName", + "StateValue", + "StateReason" + ], + "members":{ + "AlarmName":{"shape":"AlarmName"}, + "StateValue":{"shape":"StateValue"}, + "StateReason":{"shape":"StateReason"}, + "StateReasonData":{"shape":"StateReasonData"} + } + }, + "StandardUnit":{ + "type":"string", + "enum":[ + "Seconds", + "Microseconds", + "Milliseconds", + "Bytes", + "Kilobytes", + "Megabytes", + "Gigabytes", + "Terabytes", + "Bits", + "Kilobits", + "Megabits", + "Gigabits", + "Terabits", + "Percent", + "Count", + "Bytes/Second", + "Kilobytes/Second", + "Megabytes/Second", + "Gigabytes/Second", + "Terabytes/Second", + "Bits/Second", + "Kilobits/Second", + "Megabits/Second", + "Gigabits/Second", + "Terabits/Second", + "Count/Second", + "None" + ] + }, + "StateReason":{ + "type":"string", + "max":1023, + "min":0 + }, + "StateReasonData":{ + "type":"string", + "max":4000, + "min":0 + }, + "StateValue":{ + "type":"string", + "enum":[ + "OK", + "ALARM", + "INSUFFICIENT_DATA" + ] + }, + "Statistic":{ + "type":"string", + "enum":[ + "SampleCount", + "Average", + "Sum", + "Minimum", + "Maximum" + ] + }, + "StatisticSet":{ + "type":"structure", + "required":[ + "SampleCount", + "Sum", + "Minimum", + "Maximum" + ], + "members":{ + "SampleCount":{"shape":"DatapointValue"}, + "Sum":{"shape":"DatapointValue"}, + "Minimum":{"shape":"DatapointValue"}, + "Maximum":{"shape":"DatapointValue"} + } + }, + "Statistics":{ + "type":"list", + "member":{"shape":"Statistic"}, + "max":5, + "min":1 + }, + "Threshold":{"type":"double"}, + "Timestamp":{"type":"timestamp"} + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/monitoring/2010-08-01/docs-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/monitoring/2010-08-01/docs-2.json new file mode 100644 index 000000000..5f8d03b84 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/monitoring/2010-08-01/docs-2.json @@ -0,0 +1,515 @@ +{ + "version": "2.0", + "service": "

    Amazon CloudWatch monitors your Amazon Web Services (AWS) resources and the applications you run on AWS in real-time. You can use CloudWatch to collect and track metrics, which are the variables you want to measure for your resources and applications.

    CloudWatch alarms send notifications or automatically make changes to the resources you are monitoring based on rules that you define. For example, you can monitor the CPU usage and disk reads and writes of your Amazon Elastic Compute Cloud (Amazon EC2) instances and then use this data to determine whether you should launch additional instances to handle increased load. You can also use this data to stop under-used instances to save money.

    In addition to monitoring the built-in metrics that come with AWS, you can monitor your own custom metrics. With CloudWatch, you gain system-wide visibility into resource utilization, application performance, and operational health.

    ", + "operations": { + "DeleteAlarms": "

    Deletes all specified alarms. In the event of an error, no alarms are deleted.

    ", + "DescribeAlarmHistory": "

    Retrieves history for the specified alarm. Filter alarms by date range or item type. If an alarm name is not specified, Amazon CloudWatch returns histories for all of the owner's alarms.

    Amazon CloudWatch retains the history of an alarm for two weeks, whether or not you delete the alarm. ", + "DescribeAlarms": "

    Retrieves alarms with the specified names. If no name is specified, all alarms for the user are returned. Alarms can be retrieved by using only a prefix for the alarm name, the alarm state, or a prefix for any action.

    ", + "DescribeAlarmsForMetric": "

    Retrieves all alarms for a single metric. Specify a statistic, period, or unit to filter the set of alarms further.

    ", + "DisableAlarmActions": "

    Disables actions for the specified alarms. When an alarm's actions are disabled the alarm's state may change, but none of the alarm's actions will execute.

    ", + "EnableAlarmActions": "

    Enables actions for the specified alarms.

    ", + "GetMetricStatistics": "

    Gets statistics for the specified metric.

    The maximum number of data points that can be queried is 50,850, whereas the maximum number of data points returned from a single GetMetricStatistics request is 1,440. If you make a request that generates more than 1,440 data points, Amazon CloudWatch returns an error. In such a case, you can alter the request by narrowing the specified time range or increasing the specified period. Alternatively, you can make multiple requests across adjacent time ranges. GetMetricStatistics does not return the data in chronological order.

    Amazon CloudWatch aggregates data points based on the length of the period that you specify. For example, if you request statistics with a one-minute granularity, Amazon CloudWatch aggregates data points with time stamps that fall within the same one-minute period. In such a case, the data points queried can greatly outnumber the data points returned.

    The following examples show various statistics allowed by the data point query maximum of 50,850 when you call GetMetricStatistics on Amazon EC2 instances with detailed (one-minute) monitoring enabled:

    • Statistics for up to 400 instances for a span of one hour
    • Statistics for up to 35 instances over a span of 24 hours
    • Statistics for up to 2 instances over a span of 2 weeks

    For information about the namespace, metric names, and dimensions that other Amazon Web Services products use to send metrics to CloudWatch, go to Amazon CloudWatch Metrics, Namespaces, and Dimensions Reference in the Amazon CloudWatch Developer Guide.

    ", + "ListMetrics": "

    Returns a list of valid metrics stored for the AWS account owner. Returned metrics can be used with GetMetricStatistics to obtain statistical data for a given metric.

    Up to 500 results are returned for any one call. To retrieve further results, use returned NextToken values with subsequent ListMetrics operations. If you create a metric with the PutMetricData action, allow up to fifteen minutes for the metric to appear in calls to the ListMetrics action. Statistics about the metric, however, are available sooner using GetMetricStatistics. ", + "PutMetricAlarm": "

    Creates or updates an alarm and associates it with the specified Amazon CloudWatch metric. Optionally, this operation can associate one or more Amazon Simple Notification Service resources with the alarm.

    When this operation creates an alarm, the alarm state is immediately set to INSUFFICIENT_DATA. The alarm is evaluated and its StateValue is set appropriately. Any actions associated with the StateValue is then executed.

    When updating an existing alarm, its StateValue is left unchanged. If you are using an AWS Identity and Access Management (IAM) account to create or modify an alarm, you must have the following Amazon EC2 permissions:
    • ec2:DescribeInstanceStatus and ec2:DescribeInstances for all alarms on Amazon EC2 instance status metrics.
    • ec2:StopInstances for alarms with stop actions.
    • ec2:TerminateInstances for alarms with terminate actions.
    • ec2:DescribeInstanceRecoveryAttribute, and ec2:RecoverInstances for alarms with recover actions.

    If you have read/write permissions for Amazon CloudWatch but not for Amazon EC2, you can still create an alarm but the stop or terminate actions won't be performed on the Amazon EC2 instance. However, if you are later granted permission to use the associated Amazon EC2 APIs, the alarm actions you created earlier will be performed. For more information about IAM permissions, see Permissions and Policies in Using IAM.

    If you are using an IAM role (e.g., an Amazon EC2 instance profile), you cannot stop or terminate the instance using alarm actions. However, you can still see the alarm state and perform any other actions such as Amazon SNS notifications or Auto Scaling policies.

    If you are using temporary security credentials granted using the AWS Security Token Service (AWS STS), you cannot stop or terminate an Amazon EC2 instance using alarm actions.

    ", + "PutMetricData": "

    Publishes metric data points to Amazon CloudWatch. Amazon CloudWatch associates the data points with the specified metric. If the specified metric does not exist, Amazon CloudWatch creates the metric. When Amazon CloudWatch creates a metric, it can take up to fifteen minutes for the metric to appear in calls to the ListMetrics action.

    Each PutMetricData request is limited to 8 KB in size for HTTP GET requests and is limited to 40 KB in size for HTTP POST requests.

    Although the Value parameter accepts numbers of type Double, Amazon CloudWatch rejects values that are either too small or too large. Values must be in the range of 8.515920e-109 to 1.174271e+108 (Base 10) or 2e-360 to 2e360 (Base 2). In addition, special values (e.g., NaN, +Infinity, -Infinity) are not supported.

    Data that is timestamped 24 hours or more in the past may take in excess of 48 hours to become available from submission time using GetMetricStatistics.

    ", + "SetAlarmState": "

    Temporarily sets the state of an alarm. When the updated StateValue differs from the previous value, the action configured for the appropriate state is invoked. For example, if your alarm is configured to send an Amazon SNS message when an alarm is triggered, temporarily changing the alarm's state to ALARM will send an Amazon SNS message. This is not a permanent change. The next periodic alarm check (in about a minute) will set the alarm to its actual state. Because the alarm state change happens very quickly, it is typically only visibile in the alarm's History tab in the Amazon CloudWatch console or through DescribeAlarmHistory.

    " + }, + "shapes": { + "ActionPrefix": { + "base": null, + "refs": { + "DescribeAlarmsInput$ActionPrefix": "

    The action name prefix.

    " + } + }, + "ActionsEnabled": { + "base": null, + "refs": { + "MetricAlarm$ActionsEnabled": "

    Indicates whether actions should be executed during any changes to the alarm's state.

    ", + "PutMetricAlarmInput$ActionsEnabled": "

    Indicates whether or not actions should be executed during any changes to the alarm's state.

    " + } + }, + "AlarmArn": { + "base": null, + "refs": { + "MetricAlarm$AlarmArn": "

    The Amazon Resource Name (ARN) of the alarm.

    " + } + }, + "AlarmDescription": { + "base": null, + "refs": { + "MetricAlarm$AlarmDescription": "

    The description for the alarm.

    ", + "PutMetricAlarmInput$AlarmDescription": "

    The description for the alarm.

    " + } + }, + "AlarmHistoryItem": { + "base": "

    The AlarmHistoryItem data type contains descriptive information about the history of a specific alarm. If you call DescribeAlarmHistory, Amazon CloudWatch returns this data type as part of the DescribeAlarmHistoryResult data type.

    ", + "refs": { + "AlarmHistoryItems$member": null + } + }, + "AlarmHistoryItems": { + "base": null, + "refs": { + "DescribeAlarmHistoryOutput$AlarmHistoryItems": "

    A list of alarm histories in JSON format.

    " + } + }, + "AlarmName": { + "base": null, + "refs": { + "AlarmHistoryItem$AlarmName": "

    The descriptive name for the alarm.

    ", + "AlarmNames$member": null, + "DescribeAlarmHistoryInput$AlarmName": "

    The name of the alarm.

    ", + "MetricAlarm$AlarmName": "

    The name of the alarm.

    ", + "PutMetricAlarmInput$AlarmName": "

    The descriptive name for the alarm. This name must be unique within the user's AWS account

    ", + "SetAlarmStateInput$AlarmName": "

    The descriptive name for the alarm. This name must be unique within the user's AWS account. The maximum length is 255 characters.

    " + } + }, + "AlarmNamePrefix": { + "base": null, + "refs": { + "DescribeAlarmsInput$AlarmNamePrefix": "

    The alarm name prefix. AlarmNames cannot be specified if this parameter is specified.

    " + } + }, + "AlarmNames": { + "base": null, + "refs": { + "DeleteAlarmsInput$AlarmNames": "

    A list of alarms to be deleted.

    ", + "DescribeAlarmsInput$AlarmNames": "

    A list of alarm names to retrieve information for.

    ", + "DisableAlarmActionsInput$AlarmNames": "

    The names of the alarms to disable actions for.

    ", + "EnableAlarmActionsInput$AlarmNames": "

    The names of the alarms to enable actions for.

    " + } + }, + "AwsQueryErrorMessage": { + "base": null, + "refs": { + "InvalidParameterCombinationException$message": "

    ", + "InvalidParameterValueException$message": "

    ", + "MissingRequiredParameterException$message": "

    " + } + }, + "ComparisonOperator": { + "base": null, + "refs": { + "MetricAlarm$ComparisonOperator": "

    The arithmetic operation to use when comparing the specified Statistic and Threshold. The specified Statistic value is used as the first operand.

    ", + "PutMetricAlarmInput$ComparisonOperator": "

    The arithmetic operation to use when comparing the specified Statistic and Threshold. The specified Statistic value is used as the first operand.

    " + } + }, + "Datapoint": { + "base": "

    The Datapoint data type encapsulates the statistical data that Amazon CloudWatch computes from metric data.

    ", + "refs": { + "Datapoints$member": null + } + }, + "DatapointValue": { + "base": null, + "refs": { + "Datapoint$SampleCount": "

    The number of metric values that contributed to the aggregate value of this datapoint.

    ", + "Datapoint$Average": "

    The average of metric values that correspond to the datapoint.

    ", + "Datapoint$Sum": "

    The sum of metric values used for the datapoint.

    ", + "Datapoint$Minimum": "

    The minimum metric value used for the datapoint.

    ", + "Datapoint$Maximum": "

    The maximum of the metric value used for the datapoint.

    ", + "MetricDatum$Value": "

    The value for the metric.

    Although the Value parameter accepts numbers of type Double, Amazon CloudWatch rejects values that are either too small or too large. Values must be in the range of 8.515920e-109 to 1.174271e+108 (Base 10) or 2e-360 to 2e360 (Base 2). In addition, special values (e.g., NaN, +Infinity, -Infinity) are not supported. ", + "StatisticSet$SampleCount": "

    The number of samples used for the statistic set.

    ", + "StatisticSet$Sum": "

    The sum of values for the sample set.

    ", + "StatisticSet$Minimum": "

    The minimum value of the sample set.

    ", + "StatisticSet$Maximum": "

    The maximum value of the sample set.

    " + } + }, + "Datapoints": { + "base": null, + "refs": { + "GetMetricStatisticsOutput$Datapoints": "

    The datapoints for the specified metric.

    " + } + }, + "DeleteAlarmsInput": { + "base": null, + "refs": { + } + }, + "DescribeAlarmHistoryInput": { + "base": null, + "refs": { + } + }, + "DescribeAlarmHistoryOutput": { + "base": "

    The output for the DescribeAlarmHistory action.

    ", + "refs": { + } + }, + "DescribeAlarmsForMetricInput": { + "base": null, + "refs": { + } + }, + "DescribeAlarmsForMetricOutput": { + "base": "

    The output for the DescribeAlarmsForMetric action.

    ", + "refs": { + } + }, + "DescribeAlarmsInput": { + "base": null, + "refs": { + } + }, + "DescribeAlarmsOutput": { + "base": "

    The output for the DescribeAlarms action.

    ", + "refs": { + } + }, + "Dimension": { + "base": "

    The Dimension data type further expands on the identity of a metric using a Name, Value pair.

    For examples that use one or more dimensions, see PutMetricData.

    ", + "refs": { + "Dimensions$member": null + } + }, + "DimensionFilter": { + "base": "

    The DimensionFilter data type is used to filter ListMetrics results.

    ", + "refs": { + "DimensionFilters$member": null + } + }, + "DimensionFilters": { + "base": null, + "refs": { + "ListMetricsInput$Dimensions": "

    A list of dimensions to filter against.

    " + } + }, + "DimensionName": { + "base": null, + "refs": { + "Dimension$Name": "

    The name of the dimension.

    ", + "DimensionFilter$Name": "

    The dimension name to be matched.

    " + } + }, + "DimensionValue": { + "base": null, + "refs": { + "Dimension$Value": "

    The value representing the dimension measurement

    ", + "DimensionFilter$Value": "

    The value of the dimension to be matched.

    Specifying a Name without specifying a Value returns all values associated with that Name. " + } + }, + "Dimensions": { + "base": null, + "refs": { + "DescribeAlarmsForMetricInput$Dimensions": "

    The list of dimensions associated with the metric. If the metric has any associated dimensions, you must specify them in order for the DescribeAlarmsForMetric to succeed.

    ", + "GetMetricStatisticsInput$Dimensions": "

    A list of dimensions describing qualities of the metric.

    ", + "Metric$Dimensions": "

    A list of dimensions associated with the metric.

    ", + "MetricAlarm$Dimensions": "

    The list of dimensions associated with the alarm's associated metric.

    ", + "MetricDatum$Dimensions": "

    A list of dimensions associated with the metric. Note, when using the Dimensions value in a query, you need to append .member.N to it (e.g., Dimensions.member.N).

    ", + "PutMetricAlarmInput$Dimensions": "

    The dimensions for the alarm's associated metric.

    " + } + }, + "DisableAlarmActionsInput": { + "base": "

    ", + "refs": { + } + }, + "EnableAlarmActionsInput": { + "base": null, + "refs": { + } + }, + "ErrorMessage": { + "base": null, + "refs": { + "InvalidFormatFault$message": "

    ", + "InvalidNextToken$message": "

    ", + "LimitExceededFault$message": "

    ", + "ResourceNotFound$message": "

    " + } + }, + "EvaluationPeriods": { + "base": null, + "refs": { + "MetricAlarm$EvaluationPeriods": "

    The number of periods over which data is compared to the specified threshold.

    ", + "PutMetricAlarmInput$EvaluationPeriods": "

    The number of periods over which data is compared to the specified threshold.

    " + } + }, + "FaultDescription": { + "base": null, + "refs": { + "InternalServiceFault$Message": "

    " + } + }, + "GetMetricStatisticsInput": { + "base": null, + "refs": { + } + }, + "GetMetricStatisticsOutput": { + "base": "

    The output for the GetMetricStatistics action.

    ", + "refs": { + } + }, + "HistoryData": { + "base": null, + "refs": { + "AlarmHistoryItem$HistoryData": "

    Machine-readable data about the alarm in JSON format.

    " + } + }, + "HistoryItemType": { + "base": null, + "refs": { + "AlarmHistoryItem$HistoryItemType": "

    The type of alarm history item.

    ", + "DescribeAlarmHistoryInput$HistoryItemType": "

    The type of alarm histories to retrieve.

    " + } + }, + "HistorySummary": { + "base": null, + "refs": { + "AlarmHistoryItem$HistorySummary": "

    A human-readable summary of the alarm history.

    " + } + }, + "InternalServiceFault": { + "base": "

    Indicates that the request processing has failed due to some unknown error, exception, or failure.

    ", + "refs": { + } + }, + "InvalidFormatFault": { + "base": "

    Data was not syntactically valid JSON.

    ", + "refs": { + } + }, + "InvalidNextToken": { + "base": "

    The next token specified is invalid.

    ", + "refs": { + } + }, + "InvalidParameterCombinationException": { + "base": "

    Parameters that must not be used together were used together.

    ", + "refs": { + } + }, + "InvalidParameterValueException": { + "base": "

    Bad or out-of-range value was supplied for the input parameter.

    ", + "refs": { + } + }, + "LimitExceededFault": { + "base": "

    The quota for alarms for this customer has already been reached.

    ", + "refs": { + } + }, + "ListMetricsInput": { + "base": null, + "refs": { + } + }, + "ListMetricsOutput": { + "base": "

    The output for the ListMetrics action.

    ", + "refs": { + } + }, + "MaxRecords": { + "base": null, + "refs": { + "DescribeAlarmHistoryInput$MaxRecords": "

    The maximum number of alarm history records to retrieve.

    ", + "DescribeAlarmsInput$MaxRecords": "

    The maximum number of alarm descriptions to retrieve.

    " + } + }, + "Metric": { + "base": "

    The Metric data type contains information about a specific metric. If you call ListMetrics, Amazon CloudWatch returns information contained by this data type.

    The example in the Examples section publishes two metrics named buffers and latency. Both metrics are in the examples namespace. Both metrics have two dimensions, InstanceID and InstanceType.

    ", + "refs": { + "Metrics$member": null + } + }, + "MetricAlarm": { + "base": "

    The MetricAlarm data type represents an alarm. You can use PutMetricAlarm to create or update an alarm.

    ", + "refs": { + "MetricAlarms$member": null + } + }, + "MetricAlarms": { + "base": null, + "refs": { + "DescribeAlarmsForMetricOutput$MetricAlarms": "

    A list of information for each alarm with the specified metric.

    ", + "DescribeAlarmsOutput$MetricAlarms": "

    A list of information for the specified alarms.

    " + } + }, + "MetricData": { + "base": null, + "refs": { + "PutMetricDataInput$MetricData": "

    A list of data describing the metric.

    " + } + }, + "MetricDatum": { + "base": "

    The MetricDatum data type encapsulates the information sent with PutMetricData to either create a new metric or add new values to be aggregated into an existing metric.

    ", + "refs": { + "MetricData$member": null + } + }, + "MetricLabel": { + "base": null, + "refs": { + "GetMetricStatisticsOutput$Label": "

    A label describing the specified metric.

    " + } + }, + "MetricName": { + "base": null, + "refs": { + "DescribeAlarmsForMetricInput$MetricName": "

    The name of the metric.

    ", + "GetMetricStatisticsInput$MetricName": "

    The name of the metric, with or without spaces.

    ", + "ListMetricsInput$MetricName": "

    The name of the metric to filter against.

    ", + "Metric$MetricName": "

    The name of the metric.

    ", + "MetricAlarm$MetricName": "

    The name of the alarm's metric.

    ", + "MetricDatum$MetricName": "

    The name of the metric.

    ", + "PutMetricAlarmInput$MetricName": "

    The name for the alarm's associated metric.

    " + } + }, + "Metrics": { + "base": null, + "refs": { + "ListMetricsOutput$Metrics": "

    A list of metrics used to generate statistics for an AWS account.

    " + } + }, + "MissingRequiredParameterException": { + "base": "

    An input parameter that is mandatory for processing the request is not supplied.

    ", + "refs": { + } + }, + "Namespace": { + "base": null, + "refs": { + "DescribeAlarmsForMetricInput$Namespace": "

    The namespace of the metric.

    ", + "GetMetricStatisticsInput$Namespace": "

    The namespace of the metric, with or without spaces.

    ", + "ListMetricsInput$Namespace": "

    The namespace to filter against.

    ", + "Metric$Namespace": "

    The namespace of the metric.

    ", + "MetricAlarm$Namespace": "

    The namespace of alarm's associated metric.

    ", + "PutMetricAlarmInput$Namespace": "

    The namespace for the alarm's associated metric.

    ", + "PutMetricDataInput$Namespace": "

    The namespace for the metric data.

    You cannot specify a namespace that begins with \"AWS/\". Namespaces that begin with \"AWS/\" are reserved for other Amazon Web Services products that send metrics to Amazon CloudWatch. " + } + }, + "NextToken": { + "base": null, + "refs": { + "DescribeAlarmHistoryInput$NextToken": "

    The token returned by a previous call to indicate that there is more data available.

    ", + "DescribeAlarmHistoryOutput$NextToken": "

    A string that marks the start of the next batch of returned results.

    ", + "DescribeAlarmsInput$NextToken": "

    The token returned by a previous call to indicate that there is more data available.

    ", + "DescribeAlarmsOutput$NextToken": "

    A string that marks the start of the next batch of returned results.

    ", + "ListMetricsInput$NextToken": "

    The token returned by a previous call to indicate that there is more data available.

    ", + "ListMetricsOutput$NextToken": "

    A string that marks the start of the next batch of returned results.

    " + } + }, + "Period": { + "base": null, + "refs": { + "DescribeAlarmsForMetricInput$Period": "

    The period in seconds over which the statistic is applied.

    ", + "GetMetricStatisticsInput$Period": "

    The granularity, in seconds, of the returned datapoints. Period must be at least 60 seconds and must be a multiple of 60. The default value is 60.

    ", + "MetricAlarm$Period": "

    The period in seconds over which the statistic is applied.

    ", + "PutMetricAlarmInput$Period": "

    The period in seconds over which the specified statistic is applied.

    " + } + }, + "PutMetricAlarmInput": { + "base": null, + "refs": { + } + }, + "PutMetricDataInput": { + "base": null, + "refs": { + } + }, + "ResourceList": { + "base": null, + "refs": { + "MetricAlarm$OKActions": "

    The list of actions to execute when this alarm transitions into an OK state from any other state. Each action is specified as an Amazon Resource Name (ARN).

    ", + "MetricAlarm$AlarmActions": "

    The list of actions to execute when this alarm transitions into an ALARM state from any other state. Each action is specified as an Amazon Resource Name (ARN).

    ", + "MetricAlarm$InsufficientDataActions": "

    The list of actions to execute when this alarm transitions into an INSUFFICIENT_DATA state from any other state. Each action is specified as an Amazon Resource Name (ARN).

    The current WSDL lists this attribute as UnknownActions.", + "PutMetricAlarmInput$OKActions": "

    The list of actions to execute when this alarm transitions into an OK state from any other state. Each action is specified as an Amazon Resource Name (ARN).

    Valid Values: arn:aws:automate:region (e.g., us-east-1):ec2:stop | arn:aws:automate:region (e.g., us-east-1):ec2:terminate | arn:aws:automate:region (e.g., us-east-1):ec2:recover

    Valid Values (for use with IAM roles): arn:aws:swf:us-east-1:{customer-account}:action/actions/AWS_EC2.InstanceId.Stop/1.0 | arn:aws:swf:us-east-1:{customer-account}:action/actions/AWS_EC2.InstanceId.Terminate/1.0 | arn:aws:swf:us-east-1:{customer-account}:action/actions/AWS_EC2.InstanceId.Reboot/1.0

    Note: You must create at least one stop, terminate, or reboot alarm using the Amazon EC2 or CloudWatch console to create the EC2ActionsAccess IAM role for the first time. After this IAM role is created, you can create stop, terminate, or reboot alarms using the CLI.

    ", + "PutMetricAlarmInput$AlarmActions": "

    The list of actions to execute when this alarm transitions into an ALARM state from any other state. Each action is specified as an Amazon Resource Name (ARN).

    Valid Values: arn:aws:automate:region (e.g., us-east-1):ec2:stop | arn:aws:automate:region (e.g., us-east-1):ec2:terminate | arn:aws:automate:region (e.g., us-east-1):ec2:recover

    Valid Values (for use with IAM roles): arn:aws:swf:us-east-1:{customer-account}:action/actions/AWS_EC2.InstanceId.Stop/1.0 | arn:aws:swf:us-east-1:{customer-account}:action/actions/AWS_EC2.InstanceId.Terminate/1.0 | arn:aws:swf:us-east-1:{customer-account}:action/actions/AWS_EC2.InstanceId.Reboot/1.0

    Note: You must create at least one stop, terminate, or reboot alarm using the Amazon EC2 or CloudWatch console to create the EC2ActionsAccess IAM role for the first time. After this IAM role is created, you can create stop, terminate, or reboot alarms using the CLI.

    ", + "PutMetricAlarmInput$InsufficientDataActions": "

    The list of actions to execute when this alarm transitions into an INSUFFICIENT_DATA state from any other state. Each action is specified as an Amazon Resource Name (ARN).

    Valid Values: arn:aws:automate:region (e.g., us-east-1):ec2:stop | arn:aws:automate:region (e.g., us-east-1):ec2:terminate | arn:aws:automate:region (e.g., us-east-1):ec2:recover

    Valid Values (for use with IAM roles): arn:aws:swf:us-east-1:{customer-account}:action/actions/AWS_EC2.InstanceId.Stop/1.0 | arn:aws:swf:us-east-1:{customer-account}:action/actions/AWS_EC2.InstanceId.Terminate/1.0 | arn:aws:swf:us-east-1:{customer-account}:action/actions/AWS_EC2.InstanceId.Reboot/1.0

    Note: You must create at least one stop, terminate, or reboot alarm using the Amazon EC2 or CloudWatch console to create the EC2ActionsAccess IAM role for the first time. After this IAM role is created, you can create stop, terminate, or reboot alarms using the CLI.

    " + } + }, + "ResourceName": { + "base": null, + "refs": { + "ResourceList$member": null + } + }, + "ResourceNotFound": { + "base": "

    The named resource does not exist.

    ", + "refs": { + } + }, + "SetAlarmStateInput": { + "base": null, + "refs": { + } + }, + "StandardUnit": { + "base": null, + "refs": { + "Datapoint$Unit": "

    The standard unit used for the datapoint.

    ", + "DescribeAlarmsForMetricInput$Unit": "

    The unit for the metric.

    ", + "GetMetricStatisticsInput$Unit": "

    The unit for the metric.

    ", + "MetricAlarm$Unit": "

    The unit of the alarm's associated metric.

    ", + "MetricDatum$Unit": "

    The unit of the metric.

    ", + "PutMetricAlarmInput$Unit": "

    The statistic's unit of measure. For example, the units for the Amazon EC2 NetworkIn metric are Bytes because NetworkIn tracks the number of bytes that an instance receives on all network interfaces. You can also specify a unit when you create a custom metric. Units help provide conceptual meaning to your data. Metric data points that specify a unit of measure, such as Percent, are aggregated separately.

    Note: If you specify a unit, you must use a unit that is appropriate for the metric. Otherwise, this can cause an Amazon CloudWatch alarm to get stuck in the INSUFFICIENT DATA state.

    " + } + }, + "StateReason": { + "base": null, + "refs": { + "MetricAlarm$StateReason": "

    A human-readable explanation for the alarm's state.

    ", + "SetAlarmStateInput$StateReason": "

    The reason that this alarm is set to this specific state (in human-readable text format)

    " + } + }, + "StateReasonData": { + "base": null, + "refs": { + "MetricAlarm$StateReasonData": "

    An explanation for the alarm's state in machine-readable JSON format

    ", + "SetAlarmStateInput$StateReasonData": "

    The reason that this alarm is set to this specific state (in machine-readable JSON format)

    " + } + }, + "StateValue": { + "base": null, + "refs": { + "DescribeAlarmsInput$StateValue": "

    The state value to be used in matching alarms.

    ", + "MetricAlarm$StateValue": "

    The state value for the alarm.

    ", + "SetAlarmStateInput$StateValue": "

    The value of the state.

    " + } + }, + "Statistic": { + "base": null, + "refs": { + "DescribeAlarmsForMetricInput$Statistic": "

    The statistic for the metric.

    ", + "MetricAlarm$Statistic": "

    The statistic to apply to the alarm's associated metric.

    ", + "PutMetricAlarmInput$Statistic": "

    The statistic to apply to the alarm's associated metric.

    ", + "Statistics$member": null + } + }, + "StatisticSet": { + "base": "

    The StatisticSet data type describes the StatisticValues component of MetricDatum, and represents a set of statistics that describes a specific metric.

    ", + "refs": { + "MetricDatum$StatisticValues": "

    A set of statistical values describing the metric.

    " + } + }, + "Statistics": { + "base": null, + "refs": { + "GetMetricStatisticsInput$Statistics": "

    The metric statistics to return. For information about specific statistics returned by GetMetricStatistics, see Statistics in the Amazon CloudWatch Developer Guide.

    " + } + }, + "Threshold": { + "base": null, + "refs": { + "MetricAlarm$Threshold": "

    The value against which the specified statistic is compared.

    ", + "PutMetricAlarmInput$Threshold": "

    The value against which the specified statistic is compared.

    " + } + }, + "Timestamp": { + "base": null, + "refs": { + "AlarmHistoryItem$Timestamp": "

    The time stamp for the alarm history item.

    ", + "Datapoint$Timestamp": "

    The time stamp used for the datapoint.

    ", + "DescribeAlarmHistoryInput$StartDate": "

    The starting date to retrieve alarm history.

    ", + "DescribeAlarmHistoryInput$EndDate": "

    The ending date to retrieve alarm history.

    ", + "GetMetricStatisticsInput$StartTime": "

    The time stamp to use for determining the first datapoint to return. The value specified is inclusive; results include datapoints with the time stamp specified. The time stamp must be in ISO 8601 UTC format (e.g., 2014-09-03T23:00:00Z).

    The specified start time is rounded down to the nearest value. Datapoints are returned for start times up to two weeks in the past. Specified start times that are more than two weeks in the past will not return datapoints for metrics that are older than two weeks.

    Data that is timestamped 24 hours or more in the past may take in excess of 48 hours to become available from submission time using GetMetricStatistics.

    ", + "GetMetricStatisticsInput$EndTime": "

    The time stamp to use for determining the last datapoint to return. The value specified is exclusive; results will include datapoints up to the time stamp specified. The time stamp must be in ISO 8601 UTC format (e.g., 2014-09-03T23:00:00Z).

    ", + "MetricAlarm$AlarmConfigurationUpdatedTimestamp": "

    The time stamp of the last update to the alarm configuration.

    ", + "MetricAlarm$StateUpdatedTimestamp": "

    The time stamp of the last update to the alarm's state.

    ", + "MetricDatum$Timestamp": "

    The time stamp used for the metric in ISO 8601 Universal Coordinated Time (UTC) format. If not specified, the default value is set to the time the metric data was received.

    " + } + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/monitoring/2010-08-01/examples-1.json b/vendor/github.com/aws/aws-sdk-go/models/apis/monitoring/2010-08-01/examples-1.json new file mode 100644 index 000000000..0ea7e3b0b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/monitoring/2010-08-01/examples-1.json @@ -0,0 +1,5 @@ +{ + "version": "1.0", + "examples": { + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/monitoring/2010-08-01/paginators-1.json b/vendor/github.com/aws/aws-sdk-go/models/apis/monitoring/2010-08-01/paginators-1.json new file mode 100644 index 000000000..7bee28c28 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/monitoring/2010-08-01/paginators-1.json @@ -0,0 +1,24 @@ +{ + "pagination": { + "DescribeAlarmHistory": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxRecords", + "result_key": "AlarmHistoryItems" + }, + "DescribeAlarms": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxRecords", + "result_key": "MetricAlarms" + }, + "DescribeAlarmsForMetric": { + "result_key": "MetricAlarms" + }, + "ListMetrics": { + "input_token": "NextToken", + "output_token": "NextToken", + "result_key": "Metrics" + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/monitoring/2010-08-01/waiters-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/monitoring/2010-08-01/waiters-2.json new file mode 100644 index 000000000..cb0cf0bfe --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/monitoring/2010-08-01/waiters-2.json @@ -0,0 +1,18 @@ +{ + "version": 2, + "waiters": { + "AlarmExists": { + "delay": 5, + "maxAttempts": 40, + "operation": "DescribeAlarms", + "acceptors": [ + { + "matcher": "path", + "expected": true, + "argument": "length(MetricAlarms[]) > `0`", + "state": "success" + } + ] + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/opsworks/2013-02-18/api-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/opsworks/2013-02-18/api-2.json new file mode 100644 index 000000000..931d1680e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/opsworks/2013-02-18/api-2.json @@ -0,0 +1,2606 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2013-02-18", + "endpointPrefix":"opsworks", + "jsonVersion":"1.1", + "protocol":"json", + "serviceFullName":"AWS OpsWorks", + "signatureVersion":"v4", + "targetPrefix":"OpsWorks_20130218" + }, + "operations":{ + "AssignInstance":{ + "name":"AssignInstance", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AssignInstanceRequest"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ] + }, + "AssignVolume":{ + "name":"AssignVolume", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AssignVolumeRequest"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ] + }, + "AssociateElasticIp":{ + "name":"AssociateElasticIp", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AssociateElasticIpRequest"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ] + }, + "AttachElasticLoadBalancer":{ + "name":"AttachElasticLoadBalancer", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AttachElasticLoadBalancerRequest"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ] + }, + "CloneStack":{ + "name":"CloneStack", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CloneStackRequest"}, + "output":{"shape":"CloneStackResult"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ] + }, + "CreateApp":{ + "name":"CreateApp", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateAppRequest"}, + "output":{"shape":"CreateAppResult"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ] + }, + "CreateDeployment":{ + "name":"CreateDeployment", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateDeploymentRequest"}, + "output":{"shape":"CreateDeploymentResult"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ] + }, + "CreateInstance":{ + "name":"CreateInstance", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateInstanceRequest"}, + "output":{"shape":"CreateInstanceResult"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ] + }, + "CreateLayer":{ + "name":"CreateLayer", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateLayerRequest"}, + "output":{"shape":"CreateLayerResult"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ] + }, + "CreateStack":{ + "name":"CreateStack", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateStackRequest"}, + "output":{"shape":"CreateStackResult"}, + "errors":[ + {"shape":"ValidationException"} + ] + }, + "CreateUserProfile":{ + "name":"CreateUserProfile", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateUserProfileRequest"}, + "output":{"shape":"CreateUserProfileResult"}, + "errors":[ + {"shape":"ValidationException"} + ] + }, + "DeleteApp":{ + "name":"DeleteApp", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteAppRequest"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ] + }, + "DeleteInstance":{ + "name":"DeleteInstance", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteInstanceRequest"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ] + }, + "DeleteLayer":{ + "name":"DeleteLayer", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteLayerRequest"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ] + }, + "DeleteStack":{ + "name":"DeleteStack", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteStackRequest"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ] + }, + "DeleteUserProfile":{ + "name":"DeleteUserProfile", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteUserProfileRequest"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ] + }, + "DeregisterEcsCluster":{ + "name":"DeregisterEcsCluster", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeregisterEcsClusterRequest"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ] + }, + "DeregisterElasticIp":{ + "name":"DeregisterElasticIp", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeregisterElasticIpRequest"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ] + }, + "DeregisterInstance":{ + "name":"DeregisterInstance", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeregisterInstanceRequest"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ] + }, + "DeregisterRdsDbInstance":{ + "name":"DeregisterRdsDbInstance", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeregisterRdsDbInstanceRequest"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ] + }, + "DeregisterVolume":{ + "name":"DeregisterVolume", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeregisterVolumeRequest"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ] + }, + "DescribeAgentVersions":{ + "name":"DescribeAgentVersions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeAgentVersionsRequest"}, + "output":{"shape":"DescribeAgentVersionsResult"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ] + }, + "DescribeApps":{ + "name":"DescribeApps", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeAppsRequest"}, + "output":{"shape":"DescribeAppsResult"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ] + }, + "DescribeCommands":{ + "name":"DescribeCommands", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeCommandsRequest"}, + "output":{"shape":"DescribeCommandsResult"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ] + }, + "DescribeDeployments":{ + "name":"DescribeDeployments", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeDeploymentsRequest"}, + "output":{"shape":"DescribeDeploymentsResult"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ] + }, + "DescribeEcsClusters":{ + "name":"DescribeEcsClusters", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeEcsClustersRequest"}, + "output":{"shape":"DescribeEcsClustersResult"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ] + }, + "DescribeElasticIps":{ + "name":"DescribeElasticIps", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeElasticIpsRequest"}, + "output":{"shape":"DescribeElasticIpsResult"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ] + }, + "DescribeElasticLoadBalancers":{ + "name":"DescribeElasticLoadBalancers", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeElasticLoadBalancersRequest"}, + "output":{"shape":"DescribeElasticLoadBalancersResult"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ] + }, + "DescribeInstances":{ + "name":"DescribeInstances", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeInstancesRequest"}, + "output":{"shape":"DescribeInstancesResult"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ] + }, + "DescribeLayers":{ + "name":"DescribeLayers", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeLayersRequest"}, + "output":{"shape":"DescribeLayersResult"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ] + }, + "DescribeLoadBasedAutoScaling":{ + "name":"DescribeLoadBasedAutoScaling", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeLoadBasedAutoScalingRequest"}, + "output":{"shape":"DescribeLoadBasedAutoScalingResult"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ] + }, + "DescribeMyUserProfile":{ + "name":"DescribeMyUserProfile", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "output":{"shape":"DescribeMyUserProfileResult"} + }, + "DescribePermissions":{ + "name":"DescribePermissions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribePermissionsRequest"}, + "output":{"shape":"DescribePermissionsResult"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ] + }, + "DescribeRaidArrays":{ + "name":"DescribeRaidArrays", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeRaidArraysRequest"}, + "output":{"shape":"DescribeRaidArraysResult"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ] + }, + "DescribeRdsDbInstances":{ + "name":"DescribeRdsDbInstances", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeRdsDbInstancesRequest"}, + "output":{"shape":"DescribeRdsDbInstancesResult"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ] + }, + "DescribeServiceErrors":{ + "name":"DescribeServiceErrors", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeServiceErrorsRequest"}, + "output":{"shape":"DescribeServiceErrorsResult"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ] + }, + "DescribeStackProvisioningParameters":{ + "name":"DescribeStackProvisioningParameters", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeStackProvisioningParametersRequest"}, + "output":{"shape":"DescribeStackProvisioningParametersResult"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ] + }, + "DescribeStackSummary":{ + "name":"DescribeStackSummary", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeStackSummaryRequest"}, + "output":{"shape":"DescribeStackSummaryResult"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ] + }, + "DescribeStacks":{ + "name":"DescribeStacks", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeStacksRequest"}, + "output":{"shape":"DescribeStacksResult"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ] + }, + "DescribeTimeBasedAutoScaling":{ + "name":"DescribeTimeBasedAutoScaling", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeTimeBasedAutoScalingRequest"}, + "output":{"shape":"DescribeTimeBasedAutoScalingResult"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ] + }, + "DescribeUserProfiles":{ + "name":"DescribeUserProfiles", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeUserProfilesRequest"}, + "output":{"shape":"DescribeUserProfilesResult"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ] + }, + "DescribeVolumes":{ + "name":"DescribeVolumes", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeVolumesRequest"}, + "output":{"shape":"DescribeVolumesResult"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ] + }, + "DetachElasticLoadBalancer":{ + "name":"DetachElasticLoadBalancer", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DetachElasticLoadBalancerRequest"}, + "errors":[ + {"shape":"ResourceNotFoundException"} + ] + }, + "DisassociateElasticIp":{ + "name":"DisassociateElasticIp", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DisassociateElasticIpRequest"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ] + }, + "GetHostnameSuggestion":{ + "name":"GetHostnameSuggestion", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetHostnameSuggestionRequest"}, + "output":{"shape":"GetHostnameSuggestionResult"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ] + }, + "GrantAccess":{ + "name":"GrantAccess", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GrantAccessRequest"}, + "output":{"shape":"GrantAccessResult"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ] + }, + "RebootInstance":{ + "name":"RebootInstance", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RebootInstanceRequest"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ] + }, + "RegisterEcsCluster":{ + "name":"RegisterEcsCluster", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RegisterEcsClusterRequest"}, + "output":{"shape":"RegisterEcsClusterResult"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ] + }, + "RegisterElasticIp":{ + "name":"RegisterElasticIp", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RegisterElasticIpRequest"}, + "output":{"shape":"RegisterElasticIpResult"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ] + }, + "RegisterInstance":{ + "name":"RegisterInstance", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RegisterInstanceRequest"}, + "output":{"shape":"RegisterInstanceResult"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ] + }, + "RegisterRdsDbInstance":{ + "name":"RegisterRdsDbInstance", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RegisterRdsDbInstanceRequest"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ] + }, + "RegisterVolume":{ + "name":"RegisterVolume", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RegisterVolumeRequest"}, + "output":{"shape":"RegisterVolumeResult"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ] + }, + "SetLoadBasedAutoScaling":{ + "name":"SetLoadBasedAutoScaling", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"SetLoadBasedAutoScalingRequest"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ] + }, + "SetPermission":{ + "name":"SetPermission", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"SetPermissionRequest"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ] + }, + "SetTimeBasedAutoScaling":{ + "name":"SetTimeBasedAutoScaling", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"SetTimeBasedAutoScalingRequest"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ] + }, + "StartInstance":{ + "name":"StartInstance", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StartInstanceRequest"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ] + }, + "StartStack":{ + "name":"StartStack", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StartStackRequest"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ] + }, + "StopInstance":{ + "name":"StopInstance", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StopInstanceRequest"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ] + }, + "StopStack":{ + "name":"StopStack", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StopStackRequest"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ] + }, + "UnassignInstance":{ + "name":"UnassignInstance", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UnassignInstanceRequest"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ] + }, + "UnassignVolume":{ + "name":"UnassignVolume", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UnassignVolumeRequest"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ] + }, + "UpdateApp":{ + "name":"UpdateApp", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateAppRequest"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ] + }, + "UpdateElasticIp":{ + "name":"UpdateElasticIp", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateElasticIpRequest"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ] + }, + "UpdateInstance":{ + "name":"UpdateInstance", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateInstanceRequest"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ] + }, + "UpdateLayer":{ + "name":"UpdateLayer", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateLayerRequest"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ] + }, + "UpdateMyUserProfile":{ + "name":"UpdateMyUserProfile", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateMyUserProfileRequest"}, + "errors":[ + {"shape":"ValidationException"} + ] + }, + "UpdateRdsDbInstance":{ + "name":"UpdateRdsDbInstance", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateRdsDbInstanceRequest"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ] + }, + "UpdateStack":{ + "name":"UpdateStack", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateStackRequest"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ] + }, + "UpdateUserProfile":{ + "name":"UpdateUserProfile", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateUserProfileRequest"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ] + }, + "UpdateVolume":{ + "name":"UpdateVolume", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateVolumeRequest"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ] + } + }, + "shapes":{ + "AgentVersion":{ + "type":"structure", + "members":{ + "Version":{"shape":"String"}, + "ConfigurationManager":{"shape":"StackConfigurationManager"} + } + }, + "AgentVersions":{ + "type":"list", + "member":{"shape":"AgentVersion"} + }, + "App":{ + "type":"structure", + "members":{ + "AppId":{"shape":"String"}, + "StackId":{"shape":"String"}, + "Shortname":{"shape":"String"}, + "Name":{"shape":"String"}, + "Description":{"shape":"String"}, + "DataSources":{"shape":"DataSources"}, + "Type":{"shape":"AppType"}, + "AppSource":{"shape":"Source"}, + "Domains":{"shape":"Strings"}, + "EnableSsl":{"shape":"Boolean"}, + "SslConfiguration":{"shape":"SslConfiguration"}, + "Attributes":{"shape":"AppAttributes"}, + "CreatedAt":{"shape":"String"}, + "Environment":{"shape":"EnvironmentVariables"} + } + }, + "AppAttributes":{ + "type":"map", + "key":{"shape":"AppAttributesKeys"}, + "value":{"shape":"String"} + }, + "AppAttributesKeys":{ + "type":"string", + "enum":[ + "DocumentRoot", + "RailsEnv", + "AutoBundleOnDeploy", + "AwsFlowRubySettings" + ] + }, + "AppType":{ + "type":"string", + "enum":[ + "aws-flow-ruby", + "java", + "rails", + "php", + "nodejs", + "static", + "other" + ] + }, + "Apps":{ + "type":"list", + "member":{"shape":"App"} + }, + "Architecture":{ + "type":"string", + "enum":[ + "x86_64", + "i386" + ] + }, + "AssignInstanceRequest":{ + "type":"structure", + "required":[ + "InstanceId", + "LayerIds" + ], + "members":{ + "InstanceId":{"shape":"String"}, + "LayerIds":{"shape":"Strings"} + } + }, + "AssignVolumeRequest":{ + "type":"structure", + "required":["VolumeId"], + "members":{ + "VolumeId":{"shape":"String"}, + "InstanceId":{"shape":"String"} + } + }, + "AssociateElasticIpRequest":{ + "type":"structure", + "required":["ElasticIp"], + "members":{ + "ElasticIp":{"shape":"String"}, + "InstanceId":{"shape":"String"} + } + }, + "AttachElasticLoadBalancerRequest":{ + "type":"structure", + "required":[ + "ElasticLoadBalancerName", + "LayerId" + ], + "members":{ + "ElasticLoadBalancerName":{"shape":"String"}, + "LayerId":{"shape":"String"} + } + }, + "AutoScalingThresholds":{ + "type":"structure", + "members":{ + "InstanceCount":{"shape":"Integer"}, + "ThresholdsWaitTime":{"shape":"Minute"}, + "IgnoreMetricsTime":{"shape":"Minute"}, + "CpuThreshold":{"shape":"Double"}, + "MemoryThreshold":{"shape":"Double"}, + "LoadThreshold":{"shape":"Double"}, + "Alarms":{"shape":"Strings"} + } + }, + "AutoScalingType":{ + "type":"string", + "enum":[ + "load", + "timer" + ] + }, + "BlockDeviceMapping":{ + "type":"structure", + "members":{ + "DeviceName":{"shape":"String"}, + "NoDevice":{"shape":"String"}, + "VirtualName":{"shape":"String"}, + "Ebs":{"shape":"EbsBlockDevice"} + } + }, + "BlockDeviceMappings":{ + "type":"list", + "member":{"shape":"BlockDeviceMapping"} + }, + "Boolean":{ + "type":"boolean", + "box":true + }, + "ChefConfiguration":{ + "type":"structure", + "members":{ + "ManageBerkshelf":{"shape":"Boolean"}, + "BerkshelfVersion":{"shape":"String"} + } + }, + "CloneStackRequest":{ + "type":"structure", + "required":[ + "SourceStackId", + "ServiceRoleArn" + ], + "members":{ + "SourceStackId":{"shape":"String"}, + "Name":{"shape":"String"}, + "Region":{"shape":"String"}, + "VpcId":{"shape":"String"}, + "Attributes":{"shape":"StackAttributes"}, + "ServiceRoleArn":{"shape":"String"}, + "DefaultInstanceProfileArn":{"shape":"String"}, + "DefaultOs":{"shape":"String"}, + "HostnameTheme":{"shape":"String"}, + "DefaultAvailabilityZone":{"shape":"String"}, + "DefaultSubnetId":{"shape":"String"}, + "CustomJson":{"shape":"String"}, + "ConfigurationManager":{"shape":"StackConfigurationManager"}, + "ChefConfiguration":{"shape":"ChefConfiguration"}, + "UseCustomCookbooks":{"shape":"Boolean"}, + "UseOpsworksSecurityGroups":{"shape":"Boolean"}, + "CustomCookbooksSource":{"shape":"Source"}, + "DefaultSshKeyName":{"shape":"String"}, + "ClonePermissions":{"shape":"Boolean"}, + "CloneAppIds":{"shape":"Strings"}, + "DefaultRootDeviceType":{"shape":"RootDeviceType"}, + "AgentVersion":{"shape":"String"} + } + }, + "CloneStackResult":{ + "type":"structure", + "members":{ + "StackId":{"shape":"String"} + } + }, + "Command":{ + "type":"structure", + "members":{ + "CommandId":{"shape":"String"}, + "InstanceId":{"shape":"String"}, + "DeploymentId":{"shape":"String"}, + "CreatedAt":{"shape":"DateTime"}, + "AcknowledgedAt":{"shape":"DateTime"}, + "CompletedAt":{"shape":"DateTime"}, + "Status":{"shape":"String"}, + "ExitCode":{"shape":"Integer"}, + "LogUrl":{"shape":"String"}, + "Type":{"shape":"String"} + } + }, + "Commands":{ + "type":"list", + "member":{"shape":"Command"} + }, + "CreateAppRequest":{ + "type":"structure", + "required":[ + "StackId", + "Name", + "Type" + ], + "members":{ + "StackId":{"shape":"String"}, + "Shortname":{"shape":"String"}, + "Name":{"shape":"String"}, + "Description":{"shape":"String"}, + "DataSources":{"shape":"DataSources"}, + "Type":{"shape":"AppType"}, + "AppSource":{"shape":"Source"}, + "Domains":{"shape":"Strings"}, + "EnableSsl":{"shape":"Boolean"}, + "SslConfiguration":{"shape":"SslConfiguration"}, + "Attributes":{"shape":"AppAttributes"}, + "Environment":{"shape":"EnvironmentVariables"} + } + }, + "CreateAppResult":{ + "type":"structure", + "members":{ + "AppId":{"shape":"String"} + } + }, + "CreateDeploymentRequest":{ + "type":"structure", + "required":[ + "StackId", + "Command" + ], + "members":{ + "StackId":{"shape":"String"}, + "AppId":{"shape":"String"}, + "InstanceIds":{"shape":"Strings"}, + "LayerIds":{"shape":"Strings"}, + "Command":{"shape":"DeploymentCommand"}, + "Comment":{"shape":"String"}, + "CustomJson":{"shape":"String"} + } + }, + "CreateDeploymentResult":{ + "type":"structure", + "members":{ + "DeploymentId":{"shape":"String"} + } + }, + "CreateInstanceRequest":{ + "type":"structure", + "required":[ + "StackId", + "LayerIds", + "InstanceType" + ], + "members":{ + "StackId":{"shape":"String"}, + "LayerIds":{"shape":"Strings"}, + "InstanceType":{"shape":"String"}, + "AutoScalingType":{"shape":"AutoScalingType"}, + "Hostname":{"shape":"String"}, + "Os":{"shape":"String"}, + "AmiId":{"shape":"String"}, + "SshKeyName":{"shape":"String"}, + "AvailabilityZone":{"shape":"String"}, + "VirtualizationType":{"shape":"String"}, + "SubnetId":{"shape":"String"}, + "Architecture":{"shape":"Architecture"}, + "RootDeviceType":{"shape":"RootDeviceType"}, + "BlockDeviceMappings":{"shape":"BlockDeviceMappings"}, + "InstallUpdatesOnBoot":{"shape":"Boolean"}, + "EbsOptimized":{"shape":"Boolean"}, + "AgentVersion":{"shape":"String"}, + "Tenancy":{"shape":"String"} + } + }, + "CreateInstanceResult":{ + "type":"structure", + "members":{ + "InstanceId":{"shape":"String"} + } + }, + "CreateLayerRequest":{ + "type":"structure", + "required":[ + "StackId", + "Type", + "Name", + "Shortname" + ], + "members":{ + "StackId":{"shape":"String"}, + "Type":{"shape":"LayerType"}, + "Name":{"shape":"String"}, + "Shortname":{"shape":"String"}, + "Attributes":{"shape":"LayerAttributes"}, + "CustomInstanceProfileArn":{"shape":"String"}, + "CustomJson":{"shape":"String"}, + "CustomSecurityGroupIds":{"shape":"Strings"}, + "Packages":{"shape":"Strings"}, + "VolumeConfigurations":{"shape":"VolumeConfigurations"}, + "EnableAutoHealing":{"shape":"Boolean"}, + "AutoAssignElasticIps":{"shape":"Boolean"}, + "AutoAssignPublicIps":{"shape":"Boolean"}, + "CustomRecipes":{"shape":"Recipes"}, + "InstallUpdatesOnBoot":{"shape":"Boolean"}, + "UseEbsOptimizedInstances":{"shape":"Boolean"}, + "LifecycleEventConfiguration":{"shape":"LifecycleEventConfiguration"} + } + }, + "CreateLayerResult":{ + "type":"structure", + "members":{ + "LayerId":{"shape":"String"} + } + }, + "CreateStackRequest":{ + "type":"structure", + "required":[ + "Name", + "Region", + "ServiceRoleArn", + "DefaultInstanceProfileArn" + ], + "members":{ + "Name":{"shape":"String"}, + "Region":{"shape":"String"}, + "VpcId":{"shape":"String"}, + "Attributes":{"shape":"StackAttributes"}, + "ServiceRoleArn":{"shape":"String"}, + "DefaultInstanceProfileArn":{"shape":"String"}, + "DefaultOs":{"shape":"String"}, + "HostnameTheme":{"shape":"String"}, + "DefaultAvailabilityZone":{"shape":"String"}, + "DefaultSubnetId":{"shape":"String"}, + "CustomJson":{"shape":"String"}, + "ConfigurationManager":{"shape":"StackConfigurationManager"}, + "ChefConfiguration":{"shape":"ChefConfiguration"}, + "UseCustomCookbooks":{"shape":"Boolean"}, + "UseOpsworksSecurityGroups":{"shape":"Boolean"}, + "CustomCookbooksSource":{"shape":"Source"}, + "DefaultSshKeyName":{"shape":"String"}, + "DefaultRootDeviceType":{"shape":"RootDeviceType"}, + "AgentVersion":{"shape":"String"} + } + }, + "CreateStackResult":{ + "type":"structure", + "members":{ + "StackId":{"shape":"String"} + } + }, + "CreateUserProfileRequest":{ + "type":"structure", + "required":["IamUserArn"], + "members":{ + "IamUserArn":{"shape":"String"}, + "SshUsername":{"shape":"String"}, + "SshPublicKey":{"shape":"String"}, + "AllowSelfManagement":{"shape":"Boolean"} + } + }, + "CreateUserProfileResult":{ + "type":"structure", + "members":{ + "IamUserArn":{"shape":"String"} + } + }, + "DailyAutoScalingSchedule":{ + "type":"map", + "key":{"shape":"Hour"}, + "value":{"shape":"Switch"} + }, + "DataSource":{ + "type":"structure", + "members":{ + "Type":{"shape":"String"}, + "Arn":{"shape":"String"}, + "DatabaseName":{"shape":"String"} + } + }, + "DataSources":{ + "type":"list", + "member":{"shape":"DataSource"} + }, + "DateTime":{"type":"string"}, + "DeleteAppRequest":{ + "type":"structure", + "required":["AppId"], + "members":{ + "AppId":{"shape":"String"} + } + }, + "DeleteInstanceRequest":{ + "type":"structure", + "required":["InstanceId"], + "members":{ + "InstanceId":{"shape":"String"}, + "DeleteElasticIp":{"shape":"Boolean"}, + "DeleteVolumes":{"shape":"Boolean"} + } + }, + "DeleteLayerRequest":{ + "type":"structure", + "required":["LayerId"], + "members":{ + "LayerId":{"shape":"String"} + } + }, + "DeleteStackRequest":{ + "type":"structure", + "required":["StackId"], + "members":{ + "StackId":{"shape":"String"} + } + }, + "DeleteUserProfileRequest":{ + "type":"structure", + "required":["IamUserArn"], + "members":{ + "IamUserArn":{"shape":"String"} + } + }, + "Deployment":{ + "type":"structure", + "members":{ + "DeploymentId":{"shape":"String"}, + "StackId":{"shape":"String"}, + "AppId":{"shape":"String"}, + "CreatedAt":{"shape":"DateTime"}, + "CompletedAt":{"shape":"DateTime"}, + "Duration":{"shape":"Integer"}, + "IamUserArn":{"shape":"String"}, + "Comment":{"shape":"String"}, + "Command":{"shape":"DeploymentCommand"}, + "Status":{"shape":"String"}, + "CustomJson":{"shape":"String"}, + "InstanceIds":{"shape":"Strings"} + } + }, + "DeploymentCommand":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{"shape":"DeploymentCommandName"}, + "Args":{"shape":"DeploymentCommandArgs"} + } + }, + "DeploymentCommandArgs":{ + "type":"map", + "key":{"shape":"String"}, + "value":{"shape":"Strings"} + }, + "DeploymentCommandName":{ + "type":"string", + "enum":[ + "install_dependencies", + "update_dependencies", + "update_custom_cookbooks", + "execute_recipes", + "configure", + "setup", + "deploy", + "rollback", + "start", + "stop", + "restart", + "undeploy" + ] + }, + "Deployments":{ + "type":"list", + "member":{"shape":"Deployment"} + }, + "DeregisterEcsClusterRequest":{ + "type":"structure", + "required":["EcsClusterArn"], + "members":{ + "EcsClusterArn":{"shape":"String"} + } + }, + "DeregisterElasticIpRequest":{ + "type":"structure", + "required":["ElasticIp"], + "members":{ + "ElasticIp":{"shape":"String"} + } + }, + "DeregisterInstanceRequest":{ + "type":"structure", + "required":["InstanceId"], + "members":{ + "InstanceId":{"shape":"String"} + } + }, + "DeregisterRdsDbInstanceRequest":{ + "type":"structure", + "required":["RdsDbInstanceArn"], + "members":{ + "RdsDbInstanceArn":{"shape":"String"} + } + }, + "DeregisterVolumeRequest":{ + "type":"structure", + "required":["VolumeId"], + "members":{ + "VolumeId":{"shape":"String"} + } + }, + "DescribeAgentVersionsRequest":{ + "type":"structure", + "members":{ + "StackId":{"shape":"String"}, + "ConfigurationManager":{"shape":"StackConfigurationManager"} + } + }, + "DescribeAgentVersionsResult":{ + "type":"structure", + "members":{ + "AgentVersions":{"shape":"AgentVersions"} + } + }, + "DescribeAppsRequest":{ + "type":"structure", + "members":{ + "StackId":{"shape":"String"}, + "AppIds":{"shape":"Strings"} + } + }, + "DescribeAppsResult":{ + "type":"structure", + "members":{ + "Apps":{"shape":"Apps"} + } + }, + "DescribeCommandsRequest":{ + "type":"structure", + "members":{ + "DeploymentId":{"shape":"String"}, + "InstanceId":{"shape":"String"}, + "CommandIds":{"shape":"Strings"} + } + }, + "DescribeCommandsResult":{ + "type":"structure", + "members":{ + "Commands":{"shape":"Commands"} + } + }, + "DescribeDeploymentsRequest":{ + "type":"structure", + "members":{ + "StackId":{"shape":"String"}, + "AppId":{"shape":"String"}, + "DeploymentIds":{"shape":"Strings"} + } + }, + "DescribeDeploymentsResult":{ + "type":"structure", + "members":{ + "Deployments":{"shape":"Deployments"} + } + }, + "DescribeEcsClustersRequest":{ + "type":"structure", + "members":{ + "EcsClusterArns":{"shape":"Strings"}, + "StackId":{"shape":"String"}, + "NextToken":{"shape":"String"}, + "MaxResults":{"shape":"Integer"} + } + }, + "DescribeEcsClustersResult":{ + "type":"structure", + "members":{ + "EcsClusters":{"shape":"EcsClusters"}, + "NextToken":{"shape":"String"} + } + }, + "DescribeElasticIpsRequest":{ + "type":"structure", + "members":{ + "InstanceId":{"shape":"String"}, + "StackId":{"shape":"String"}, + "Ips":{"shape":"Strings"} + } + }, + "DescribeElasticIpsResult":{ + "type":"structure", + "members":{ + "ElasticIps":{"shape":"ElasticIps"} + } + }, + "DescribeElasticLoadBalancersRequest":{ + "type":"structure", + "members":{ + "StackId":{"shape":"String"}, + "LayerIds":{"shape":"Strings"} + } + }, + "DescribeElasticLoadBalancersResult":{ + "type":"structure", + "members":{ + "ElasticLoadBalancers":{"shape":"ElasticLoadBalancers"} + } + }, + "DescribeInstancesRequest":{ + "type":"structure", + "members":{ + "StackId":{"shape":"String"}, + "LayerId":{"shape":"String"}, + "InstanceIds":{"shape":"Strings"} + } + }, + "DescribeInstancesResult":{ + "type":"structure", + "members":{ + "Instances":{"shape":"Instances"} + } + }, + "DescribeLayersRequest":{ + "type":"structure", + "members":{ + "StackId":{"shape":"String"}, + "LayerIds":{"shape":"Strings"} + } + }, + "DescribeLayersResult":{ + "type":"structure", + "members":{ + "Layers":{"shape":"Layers"} + } + }, + "DescribeLoadBasedAutoScalingRequest":{ + "type":"structure", + "required":["LayerIds"], + "members":{ + "LayerIds":{"shape":"Strings"} + } + }, + "DescribeLoadBasedAutoScalingResult":{ + "type":"structure", + "members":{ + "LoadBasedAutoScalingConfigurations":{"shape":"LoadBasedAutoScalingConfigurations"} + } + }, + "DescribeMyUserProfileResult":{ + "type":"structure", + "members":{ + "UserProfile":{"shape":"SelfUserProfile"} + } + }, + "DescribePermissionsRequest":{ + "type":"structure", + "members":{ + "IamUserArn":{"shape":"String"}, + "StackId":{"shape":"String"} + } + }, + "DescribePermissionsResult":{ + "type":"structure", + "members":{ + "Permissions":{"shape":"Permissions"} + } + }, + "DescribeRaidArraysRequest":{ + "type":"structure", + "members":{ + "InstanceId":{"shape":"String"}, + "StackId":{"shape":"String"}, + "RaidArrayIds":{"shape":"Strings"} + } + }, + "DescribeRaidArraysResult":{ + "type":"structure", + "members":{ + "RaidArrays":{"shape":"RaidArrays"} + } + }, + "DescribeRdsDbInstancesRequest":{ + "type":"structure", + "required":["StackId"], + "members":{ + "StackId":{"shape":"String"}, + "RdsDbInstanceArns":{"shape":"Strings"} + } + }, + "DescribeRdsDbInstancesResult":{ + "type":"structure", + "members":{ + "RdsDbInstances":{"shape":"RdsDbInstances"} + } + }, + "DescribeServiceErrorsRequest":{ + "type":"structure", + "members":{ + "StackId":{"shape":"String"}, + "InstanceId":{"shape":"String"}, + "ServiceErrorIds":{"shape":"Strings"} + } + }, + "DescribeServiceErrorsResult":{ + "type":"structure", + "members":{ + "ServiceErrors":{"shape":"ServiceErrors"} + } + }, + "DescribeStackProvisioningParametersRequest":{ + "type":"structure", + "required":["StackId"], + "members":{ + "StackId":{"shape":"String"} + } + }, + "DescribeStackProvisioningParametersResult":{ + "type":"structure", + "members":{ + "AgentInstallerUrl":{"shape":"String"}, + "Parameters":{"shape":"Parameters"} + } + }, + "DescribeStackSummaryRequest":{ + "type":"structure", + "required":["StackId"], + "members":{ + "StackId":{"shape":"String"} + } + }, + "DescribeStackSummaryResult":{ + "type":"structure", + "members":{ + "StackSummary":{"shape":"StackSummary"} + } + }, + "DescribeStacksRequest":{ + "type":"structure", + "members":{ + "StackIds":{"shape":"Strings"} + } + }, + "DescribeStacksResult":{ + "type":"structure", + "members":{ + "Stacks":{"shape":"Stacks"} + } + }, + "DescribeTimeBasedAutoScalingRequest":{ + "type":"structure", + "required":["InstanceIds"], + "members":{ + "InstanceIds":{"shape":"Strings"} + } + }, + "DescribeTimeBasedAutoScalingResult":{ + "type":"structure", + "members":{ + "TimeBasedAutoScalingConfigurations":{"shape":"TimeBasedAutoScalingConfigurations"} + } + }, + "DescribeUserProfilesRequest":{ + "type":"structure", + "members":{ + "IamUserArns":{"shape":"Strings"} + } + }, + "DescribeUserProfilesResult":{ + "type":"structure", + "members":{ + "UserProfiles":{"shape":"UserProfiles"} + } + }, + "DescribeVolumesRequest":{ + "type":"structure", + "members":{ + "InstanceId":{"shape":"String"}, + "StackId":{"shape":"String"}, + "RaidArrayId":{"shape":"String"}, + "VolumeIds":{"shape":"Strings"} + } + }, + "DescribeVolumesResult":{ + "type":"structure", + "members":{ + "Volumes":{"shape":"Volumes"} + } + }, + "DetachElasticLoadBalancerRequest":{ + "type":"structure", + "required":[ + "ElasticLoadBalancerName", + "LayerId" + ], + "members":{ + "ElasticLoadBalancerName":{"shape":"String"}, + "LayerId":{"shape":"String"} + } + }, + "DisassociateElasticIpRequest":{ + "type":"structure", + "required":["ElasticIp"], + "members":{ + "ElasticIp":{"shape":"String"} + } + }, + "Double":{ + "type":"double", + "box":true + }, + "EbsBlockDevice":{ + "type":"structure", + "members":{ + "SnapshotId":{"shape":"String"}, + "Iops":{"shape":"Integer"}, + "VolumeSize":{"shape":"Integer"}, + "VolumeType":{"shape":"VolumeType"}, + "DeleteOnTermination":{"shape":"Boolean"} + } + }, + "EcsCluster":{ + "type":"structure", + "members":{ + "EcsClusterArn":{"shape":"String"}, + "EcsClusterName":{"shape":"String"}, + "StackId":{"shape":"String"}, + "RegisteredAt":{"shape":"DateTime"} + } + }, + "EcsClusters":{ + "type":"list", + "member":{"shape":"EcsCluster"} + }, + "ElasticIp":{ + "type":"structure", + "members":{ + "Ip":{"shape":"String"}, + "Name":{"shape":"String"}, + "Domain":{"shape":"String"}, + "Region":{"shape":"String"}, + "InstanceId":{"shape":"String"} + } + }, + "ElasticIps":{ + "type":"list", + "member":{"shape":"ElasticIp"} + }, + "ElasticLoadBalancer":{ + "type":"structure", + "members":{ + "ElasticLoadBalancerName":{"shape":"String"}, + "Region":{"shape":"String"}, + "DnsName":{"shape":"String"}, + "StackId":{"shape":"String"}, + "LayerId":{"shape":"String"}, + "VpcId":{"shape":"String"}, + "AvailabilityZones":{"shape":"Strings"}, + "SubnetIds":{"shape":"Strings"}, + "Ec2InstanceIds":{"shape":"Strings"} + } + }, + "ElasticLoadBalancers":{ + "type":"list", + "member":{"shape":"ElasticLoadBalancer"} + }, + "EnvironmentVariable":{ + "type":"structure", + "required":[ + "Key", + "Value" + ], + "members":{ + "Key":{"shape":"String"}, + "Value":{"shape":"String"}, + "Secure":{"shape":"Boolean"} + } + }, + "EnvironmentVariables":{ + "type":"list", + "member":{"shape":"EnvironmentVariable"} + }, + "GetHostnameSuggestionRequest":{ + "type":"structure", + "required":["LayerId"], + "members":{ + "LayerId":{"shape":"String"} + } + }, + "GetHostnameSuggestionResult":{ + "type":"structure", + "members":{ + "LayerId":{"shape":"String"}, + "Hostname":{"shape":"String"} + } + }, + "GrantAccessRequest":{ + "type":"structure", + "required":["InstanceId"], + "members":{ + "InstanceId":{"shape":"String"}, + "ValidForInMinutes":{"shape":"ValidForInMinutes"} + } + }, + "GrantAccessResult":{ + "type":"structure", + "members":{ + "TemporaryCredential":{"shape":"TemporaryCredential"} + } + }, + "Hour":{"type":"string"}, + "Instance":{ + "type":"structure", + "members":{ + "AgentVersion":{"shape":"String"}, + "AmiId":{"shape":"String"}, + "Architecture":{"shape":"Architecture"}, + "AutoScalingType":{"shape":"AutoScalingType"}, + "AvailabilityZone":{"shape":"String"}, + "BlockDeviceMappings":{"shape":"BlockDeviceMappings"}, + "CreatedAt":{"shape":"DateTime"}, + "EbsOptimized":{"shape":"Boolean"}, + "Ec2InstanceId":{"shape":"String"}, + "EcsClusterArn":{"shape":"String"}, + "EcsContainerInstanceArn":{"shape":"String"}, + "ElasticIp":{"shape":"String"}, + "Hostname":{"shape":"String"}, + "InfrastructureClass":{"shape":"String"}, + "InstallUpdatesOnBoot":{"shape":"Boolean"}, + "InstanceId":{"shape":"String"}, + "InstanceProfileArn":{"shape":"String"}, + "InstanceType":{"shape":"String"}, + "LastServiceErrorId":{"shape":"String"}, + "LayerIds":{"shape":"Strings"}, + "Os":{"shape":"String"}, + "Platform":{"shape":"String"}, + "PrivateDns":{"shape":"String"}, + "PrivateIp":{"shape":"String"}, + "PublicDns":{"shape":"String"}, + "PublicIp":{"shape":"String"}, + "RegisteredBy":{"shape":"String"}, + "ReportedAgentVersion":{"shape":"String"}, + "ReportedOs":{"shape":"ReportedOs"}, + "RootDeviceType":{"shape":"RootDeviceType"}, + "RootDeviceVolumeId":{"shape":"String"}, + "SecurityGroupIds":{"shape":"Strings"}, + "SshHostDsaKeyFingerprint":{"shape":"String"}, + "SshHostRsaKeyFingerprint":{"shape":"String"}, + "SshKeyName":{"shape":"String"}, + "StackId":{"shape":"String"}, + "Status":{"shape":"String"}, + "SubnetId":{"shape":"String"}, + "Tenancy":{"shape":"String"}, + "VirtualizationType":{"shape":"VirtualizationType"} + } + }, + "InstanceIdentity":{ + "type":"structure", + "members":{ + "Document":{"shape":"String"}, + "Signature":{"shape":"String"} + } + }, + "Instances":{ + "type":"list", + "member":{"shape":"Instance"} + }, + "InstancesCount":{ + "type":"structure", + "members":{ + "Assigning":{"shape":"Integer"}, + "Booting":{"shape":"Integer"}, + "ConnectionLost":{"shape":"Integer"}, + "Deregistering":{"shape":"Integer"}, + "Online":{"shape":"Integer"}, + "Pending":{"shape":"Integer"}, + "Rebooting":{"shape":"Integer"}, + "Registered":{"shape":"Integer"}, + "Registering":{"shape":"Integer"}, + "Requested":{"shape":"Integer"}, + "RunningSetup":{"shape":"Integer"}, + "SetupFailed":{"shape":"Integer"}, + "ShuttingDown":{"shape":"Integer"}, + "StartFailed":{"shape":"Integer"}, + "Stopped":{"shape":"Integer"}, + "Stopping":{"shape":"Integer"}, + "Terminated":{"shape":"Integer"}, + "Terminating":{"shape":"Integer"}, + "Unassigning":{"shape":"Integer"} + } + }, + "Integer":{ + "type":"integer", + "box":true + }, + "Layer":{ + "type":"structure", + "members":{ + "StackId":{"shape":"String"}, + "LayerId":{"shape":"String"}, + "Type":{"shape":"LayerType"}, + "Name":{"shape":"String"}, + "Shortname":{"shape":"String"}, + "Attributes":{"shape":"LayerAttributes"}, + "CustomInstanceProfileArn":{"shape":"String"}, + "CustomJson":{"shape":"String"}, + "CustomSecurityGroupIds":{"shape":"Strings"}, + "DefaultSecurityGroupNames":{"shape":"Strings"}, + "Packages":{"shape":"Strings"}, + "VolumeConfigurations":{"shape":"VolumeConfigurations"}, + "EnableAutoHealing":{"shape":"Boolean"}, + "AutoAssignElasticIps":{"shape":"Boolean"}, + "AutoAssignPublicIps":{"shape":"Boolean"}, + "DefaultRecipes":{"shape":"Recipes"}, + "CustomRecipes":{"shape":"Recipes"}, + "CreatedAt":{"shape":"DateTime"}, + "InstallUpdatesOnBoot":{"shape":"Boolean"}, + "UseEbsOptimizedInstances":{"shape":"Boolean"}, + "LifecycleEventConfiguration":{"shape":"LifecycleEventConfiguration"} + } + }, + "LayerAttributes":{ + "type":"map", + "key":{"shape":"LayerAttributesKeys"}, + "value":{"shape":"String"} + }, + "LayerAttributesKeys":{ + "type":"string", + "enum":[ + "EcsClusterArn", + "EnableHaproxyStats", + "HaproxyStatsUrl", + "HaproxyStatsUser", + "HaproxyStatsPassword", + "HaproxyHealthCheckUrl", + "HaproxyHealthCheckMethod", + "MysqlRootPassword", + "MysqlRootPasswordUbiquitous", + "GangliaUrl", + "GangliaUser", + "GangliaPassword", + "MemcachedMemory", + "NodejsVersion", + "RubyVersion", + "RubygemsVersion", + "ManageBundler", + "BundlerVersion", + "RailsStack", + "PassengerVersion", + "Jvm", + "JvmVersion", + "JvmOptions", + "JavaAppServer", + "JavaAppServerVersion" + ] + }, + "LayerType":{ + "type":"string", + "enum":[ + "aws-flow-ruby", + "ecs-cluster", + "java-app", + "lb", + "web", + "php-app", + "rails-app", + "nodejs-app", + "memcached", + "db-master", + "monitoring-master", + "custom" + ] + }, + "Layers":{ + "type":"list", + "member":{"shape":"Layer"} + }, + "LifecycleEventConfiguration":{ + "type":"structure", + "members":{ + "Shutdown":{"shape":"ShutdownEventConfiguration"} + } + }, + "LoadBasedAutoScalingConfiguration":{ + "type":"structure", + "members":{ + "LayerId":{"shape":"String"}, + "Enable":{"shape":"Boolean"}, + "UpScaling":{"shape":"AutoScalingThresholds"}, + "DownScaling":{"shape":"AutoScalingThresholds"} + } + }, + "LoadBasedAutoScalingConfigurations":{ + "type":"list", + "member":{"shape":"LoadBasedAutoScalingConfiguration"} + }, + "Minute":{ + "type":"integer", + "box":true, + "max":100, + "min":1 + }, + "Parameters":{ + "type":"map", + "key":{"shape":"String"}, + "value":{"shape":"String"} + }, + "Permission":{ + "type":"structure", + "members":{ + "StackId":{"shape":"String"}, + "IamUserArn":{"shape":"String"}, + "AllowSsh":{"shape":"Boolean"}, + "AllowSudo":{"shape":"Boolean"}, + "Level":{"shape":"String"} + } + }, + "Permissions":{ + "type":"list", + "member":{"shape":"Permission"} + }, + "RaidArray":{ + "type":"structure", + "members":{ + "RaidArrayId":{"shape":"String"}, + "InstanceId":{"shape":"String"}, + "Name":{"shape":"String"}, + "RaidLevel":{"shape":"Integer"}, + "NumberOfDisks":{"shape":"Integer"}, + "Size":{"shape":"Integer"}, + "Device":{"shape":"String"}, + "MountPoint":{"shape":"String"}, + "AvailabilityZone":{"shape":"String"}, + "CreatedAt":{"shape":"DateTime"}, + "StackId":{"shape":"String"}, + "VolumeType":{"shape":"String"}, + "Iops":{"shape":"Integer"} + } + }, + "RaidArrays":{ + "type":"list", + "member":{"shape":"RaidArray"} + }, + "RdsDbInstance":{ + "type":"structure", + "members":{ + "RdsDbInstanceArn":{"shape":"String"}, + "DbInstanceIdentifier":{"shape":"String"}, + "DbUser":{"shape":"String"}, + "DbPassword":{"shape":"String"}, + "Region":{"shape":"String"}, + "Address":{"shape":"String"}, + "Engine":{"shape":"String"}, + "StackId":{"shape":"String"}, + "MissingOnRds":{"shape":"Boolean"} + } + }, + "RdsDbInstances":{ + "type":"list", + "member":{"shape":"RdsDbInstance"} + }, + "RebootInstanceRequest":{ + "type":"structure", + "required":["InstanceId"], + "members":{ + "InstanceId":{"shape":"String"} + } + }, + "Recipes":{ + "type":"structure", + "members":{ + "Setup":{"shape":"Strings"}, + "Configure":{"shape":"Strings"}, + "Deploy":{"shape":"Strings"}, + "Undeploy":{"shape":"Strings"}, + "Shutdown":{"shape":"Strings"} + } + }, + "RegisterEcsClusterRequest":{ + "type":"structure", + "required":[ + "EcsClusterArn", + "StackId" + ], + "members":{ + "EcsClusterArn":{"shape":"String"}, + "StackId":{"shape":"String"} + } + }, + "RegisterEcsClusterResult":{ + "type":"structure", + "members":{ + "EcsClusterArn":{"shape":"String"} + } + }, + "RegisterElasticIpRequest":{ + "type":"structure", + "required":[ + "ElasticIp", + "StackId" + ], + "members":{ + "ElasticIp":{"shape":"String"}, + "StackId":{"shape":"String"} + } + }, + "RegisterElasticIpResult":{ + "type":"structure", + "members":{ + "ElasticIp":{"shape":"String"} + } + }, + "RegisterInstanceRequest":{ + "type":"structure", + "required":["StackId"], + "members":{ + "StackId":{"shape":"String"}, + "Hostname":{"shape":"String"}, + "PublicIp":{"shape":"String"}, + "PrivateIp":{"shape":"String"}, + "RsaPublicKey":{"shape":"String"}, + "RsaPublicKeyFingerprint":{"shape":"String"}, + "InstanceIdentity":{"shape":"InstanceIdentity"} + } + }, + "RegisterInstanceResult":{ + "type":"structure", + "members":{ + "InstanceId":{"shape":"String"} + } + }, + "RegisterRdsDbInstanceRequest":{ + "type":"structure", + "required":[ + "StackId", + "RdsDbInstanceArn", + "DbUser", + "DbPassword" + ], + "members":{ + "StackId":{"shape":"String"}, + "RdsDbInstanceArn":{"shape":"String"}, + "DbUser":{"shape":"String"}, + "DbPassword":{"shape":"String"} + } + }, + "RegisterVolumeRequest":{ + "type":"structure", + "required":["StackId"], + "members":{ + "Ec2VolumeId":{"shape":"String"}, + "StackId":{"shape":"String"} + } + }, + "RegisterVolumeResult":{ + "type":"structure", + "members":{ + "VolumeId":{"shape":"String"} + } + }, + "ReportedOs":{ + "type":"structure", + "members":{ + "Family":{"shape":"String"}, + "Name":{"shape":"String"}, + "Version":{"shape":"String"} + } + }, + "ResourceNotFoundException":{ + "type":"structure", + "members":{ + "message":{"shape":"String"} + }, + "exception":true + }, + "RootDeviceType":{ + "type":"string", + "enum":[ + "ebs", + "instance-store" + ] + }, + "SelfUserProfile":{ + "type":"structure", + "members":{ + "IamUserArn":{"shape":"String"}, + "Name":{"shape":"String"}, + "SshUsername":{"shape":"String"}, + "SshPublicKey":{"shape":"String"} + } + }, + "ServiceError":{ + "type":"structure", + "members":{ + "ServiceErrorId":{"shape":"String"}, + "StackId":{"shape":"String"}, + "InstanceId":{"shape":"String"}, + "Type":{"shape":"String"}, + "Message":{"shape":"String"}, + "CreatedAt":{"shape":"DateTime"} + } + }, + "ServiceErrors":{ + "type":"list", + "member":{"shape":"ServiceError"} + }, + "SetLoadBasedAutoScalingRequest":{ + "type":"structure", + "required":["LayerId"], + "members":{ + "LayerId":{"shape":"String"}, + "Enable":{"shape":"Boolean"}, + "UpScaling":{"shape":"AutoScalingThresholds"}, + "DownScaling":{"shape":"AutoScalingThresholds"} + } + }, + "SetPermissionRequest":{ + "type":"structure", + "required":[ + "StackId", + "IamUserArn" + ], + "members":{ + "StackId":{"shape":"String"}, + "IamUserArn":{"shape":"String"}, + "AllowSsh":{"shape":"Boolean"}, + "AllowSudo":{"shape":"Boolean"}, + "Level":{"shape":"String"} + } + }, + "SetTimeBasedAutoScalingRequest":{ + "type":"structure", + "required":["InstanceId"], + "members":{ + "InstanceId":{"shape":"String"}, + "AutoScalingSchedule":{"shape":"WeeklyAutoScalingSchedule"} + } + }, + "ShutdownEventConfiguration":{ + "type":"structure", + "members":{ + "ExecutionTimeout":{"shape":"Integer"}, + "DelayUntilElbConnectionsDrained":{"shape":"Boolean"} + } + }, + "Source":{ + "type":"structure", + "members":{ + "Type":{"shape":"SourceType"}, + "Url":{"shape":"String"}, + "Username":{"shape":"String"}, + "Password":{"shape":"String"}, + "SshKey":{"shape":"String"}, + "Revision":{"shape":"String"} + } + }, + "SourceType":{ + "type":"string", + "enum":[ + "git", + "svn", + "archive", + "s3" + ] + }, + "SslConfiguration":{ + "type":"structure", + "required":[ + "Certificate", + "PrivateKey" + ], + "members":{ + "Certificate":{"shape":"String"}, + "PrivateKey":{"shape":"String"}, + "Chain":{"shape":"String"} + } + }, + "Stack":{ + "type":"structure", + "members":{ + "StackId":{"shape":"String"}, + "Name":{"shape":"String"}, + "Arn":{"shape":"String"}, + "Region":{"shape":"String"}, + "VpcId":{"shape":"String"}, + "Attributes":{"shape":"StackAttributes"}, + "ServiceRoleArn":{"shape":"String"}, + "DefaultInstanceProfileArn":{"shape":"String"}, + "DefaultOs":{"shape":"String"}, + "HostnameTheme":{"shape":"String"}, + "DefaultAvailabilityZone":{"shape":"String"}, + "DefaultSubnetId":{"shape":"String"}, + "CustomJson":{"shape":"String"}, + "ConfigurationManager":{"shape":"StackConfigurationManager"}, + "ChefConfiguration":{"shape":"ChefConfiguration"}, + "UseCustomCookbooks":{"shape":"Boolean"}, + "UseOpsworksSecurityGroups":{"shape":"Boolean"}, + "CustomCookbooksSource":{"shape":"Source"}, + "DefaultSshKeyName":{"shape":"String"}, + "CreatedAt":{"shape":"DateTime"}, + "DefaultRootDeviceType":{"shape":"RootDeviceType"}, + "AgentVersion":{"shape":"String"} + } + }, + "StackAttributes":{ + "type":"map", + "key":{"shape":"StackAttributesKeys"}, + "value":{"shape":"String"} + }, + "StackAttributesKeys":{ + "type":"string", + "enum":["Color"] + }, + "StackConfigurationManager":{ + "type":"structure", + "members":{ + "Name":{"shape":"String"}, + "Version":{"shape":"String"} + } + }, + "StackSummary":{ + "type":"structure", + "members":{ + "StackId":{"shape":"String"}, + "Name":{"shape":"String"}, + "Arn":{"shape":"String"}, + "LayersCount":{"shape":"Integer"}, + "AppsCount":{"shape":"Integer"}, + "InstancesCount":{"shape":"InstancesCount"} + } + }, + "Stacks":{ + "type":"list", + "member":{"shape":"Stack"} + }, + "StartInstanceRequest":{ + "type":"structure", + "required":["InstanceId"], + "members":{ + "InstanceId":{"shape":"String"} + } + }, + "StartStackRequest":{ + "type":"structure", + "required":["StackId"], + "members":{ + "StackId":{"shape":"String"} + } + }, + "StopInstanceRequest":{ + "type":"structure", + "required":["InstanceId"], + "members":{ + "InstanceId":{"shape":"String"} + } + }, + "StopStackRequest":{ + "type":"structure", + "required":["StackId"], + "members":{ + "StackId":{"shape":"String"} + } + }, + "String":{"type":"string"}, + "Strings":{ + "type":"list", + "member":{"shape":"String"} + }, + "Switch":{"type":"string"}, + "TemporaryCredential":{ + "type":"structure", + "members":{ + "Username":{"shape":"String"}, + "Password":{"shape":"String"}, + "ValidForInMinutes":{"shape":"Integer"}, + "InstanceId":{"shape":"String"} + } + }, + "TimeBasedAutoScalingConfiguration":{ + "type":"structure", + "members":{ + "InstanceId":{"shape":"String"}, + "AutoScalingSchedule":{"shape":"WeeklyAutoScalingSchedule"} + } + }, + "TimeBasedAutoScalingConfigurations":{ + "type":"list", + "member":{"shape":"TimeBasedAutoScalingConfiguration"} + }, + "UnassignInstanceRequest":{ + "type":"structure", + "required":["InstanceId"], + "members":{ + "InstanceId":{"shape":"String"} + } + }, + "UnassignVolumeRequest":{ + "type":"structure", + "required":["VolumeId"], + "members":{ + "VolumeId":{"shape":"String"} + } + }, + "UpdateAppRequest":{ + "type":"structure", + "required":["AppId"], + "members":{ + "AppId":{"shape":"String"}, + "Name":{"shape":"String"}, + "Description":{"shape":"String"}, + "DataSources":{"shape":"DataSources"}, + "Type":{"shape":"AppType"}, + "AppSource":{"shape":"Source"}, + "Domains":{"shape":"Strings"}, + "EnableSsl":{"shape":"Boolean"}, + "SslConfiguration":{"shape":"SslConfiguration"}, + "Attributes":{"shape":"AppAttributes"}, + "Environment":{"shape":"EnvironmentVariables"} + } + }, + "UpdateElasticIpRequest":{ + "type":"structure", + "required":["ElasticIp"], + "members":{ + "ElasticIp":{"shape":"String"}, + "Name":{"shape":"String"} + } + }, + "UpdateInstanceRequest":{ + "type":"structure", + "required":["InstanceId"], + "members":{ + "InstanceId":{"shape":"String"}, + "LayerIds":{"shape":"Strings"}, + "InstanceType":{"shape":"String"}, + "AutoScalingType":{"shape":"AutoScalingType"}, + "Hostname":{"shape":"String"}, + "Os":{"shape":"String"}, + "AmiId":{"shape":"String"}, + "SshKeyName":{"shape":"String"}, + "Architecture":{"shape":"Architecture"}, + "InstallUpdatesOnBoot":{"shape":"Boolean"}, + "EbsOptimized":{"shape":"Boolean"}, + "AgentVersion":{"shape":"String"} + } + }, + "UpdateLayerRequest":{ + "type":"structure", + "required":["LayerId"], + "members":{ + "LayerId":{"shape":"String"}, + "Name":{"shape":"String"}, + "Shortname":{"shape":"String"}, + "Attributes":{"shape":"LayerAttributes"}, + "CustomInstanceProfileArn":{"shape":"String"}, + "CustomJson":{"shape":"String"}, + "CustomSecurityGroupIds":{"shape":"Strings"}, + "Packages":{"shape":"Strings"}, + "VolumeConfigurations":{"shape":"VolumeConfigurations"}, + "EnableAutoHealing":{"shape":"Boolean"}, + "AutoAssignElasticIps":{"shape":"Boolean"}, + "AutoAssignPublicIps":{"shape":"Boolean"}, + "CustomRecipes":{"shape":"Recipes"}, + "InstallUpdatesOnBoot":{"shape":"Boolean"}, + "UseEbsOptimizedInstances":{"shape":"Boolean"}, + "LifecycleEventConfiguration":{"shape":"LifecycleEventConfiguration"} + } + }, + "UpdateMyUserProfileRequest":{ + "type":"structure", + "members":{ + "SshPublicKey":{"shape":"String"} + } + }, + "UpdateRdsDbInstanceRequest":{ + "type":"structure", + "required":["RdsDbInstanceArn"], + "members":{ + "RdsDbInstanceArn":{"shape":"String"}, + "DbUser":{"shape":"String"}, + "DbPassword":{"shape":"String"} + } + }, + "UpdateStackRequest":{ + "type":"structure", + "required":["StackId"], + "members":{ + "StackId":{"shape":"String"}, + "Name":{"shape":"String"}, + "Attributes":{"shape":"StackAttributes"}, + "ServiceRoleArn":{"shape":"String"}, + "DefaultInstanceProfileArn":{"shape":"String"}, + "DefaultOs":{"shape":"String"}, + "HostnameTheme":{"shape":"String"}, + "DefaultAvailabilityZone":{"shape":"String"}, + "DefaultSubnetId":{"shape":"String"}, + "CustomJson":{"shape":"String"}, + "ConfigurationManager":{"shape":"StackConfigurationManager"}, + "ChefConfiguration":{"shape":"ChefConfiguration"}, + "UseCustomCookbooks":{"shape":"Boolean"}, + "CustomCookbooksSource":{"shape":"Source"}, + "DefaultSshKeyName":{"shape":"String"}, + "DefaultRootDeviceType":{"shape":"RootDeviceType"}, + "UseOpsworksSecurityGroups":{"shape":"Boolean"}, + "AgentVersion":{"shape":"String"} + } + }, + "UpdateUserProfileRequest":{ + "type":"structure", + "required":["IamUserArn"], + "members":{ + "IamUserArn":{"shape":"String"}, + "SshUsername":{"shape":"String"}, + "SshPublicKey":{"shape":"String"}, + "AllowSelfManagement":{"shape":"Boolean"} + } + }, + "UpdateVolumeRequest":{ + "type":"structure", + "required":["VolumeId"], + "members":{ + "VolumeId":{"shape":"String"}, + "Name":{"shape":"String"}, + "MountPoint":{"shape":"String"} + } + }, + "UserProfile":{ + "type":"structure", + "members":{ + "IamUserArn":{"shape":"String"}, + "Name":{"shape":"String"}, + "SshUsername":{"shape":"String"}, + "SshPublicKey":{"shape":"String"}, + "AllowSelfManagement":{"shape":"Boolean"} + } + }, + "UserProfiles":{ + "type":"list", + "member":{"shape":"UserProfile"} + }, + "ValidForInMinutes":{ + "type":"integer", + "box":true, + "max":1440, + "min":60 + }, + "ValidationException":{ + "type":"structure", + "members":{ + "message":{"shape":"String"} + }, + "exception":true + }, + "VirtualizationType":{ + "type":"string", + "enum":[ + "paravirtual", + "hvm" + ] + }, + "Volume":{ + "type":"structure", + "members":{ + "VolumeId":{"shape":"String"}, + "Ec2VolumeId":{"shape":"String"}, + "Name":{"shape":"String"}, + "RaidArrayId":{"shape":"String"}, + "InstanceId":{"shape":"String"}, + "Status":{"shape":"String"}, + "Size":{"shape":"Integer"}, + "Device":{"shape":"String"}, + "MountPoint":{"shape":"String"}, + "Region":{"shape":"String"}, + "AvailabilityZone":{"shape":"String"}, + "VolumeType":{"shape":"String"}, + "Iops":{"shape":"Integer"} + } + }, + "VolumeConfiguration":{ + "type":"structure", + "required":[ + "MountPoint", + "NumberOfDisks", + "Size" + ], + "members":{ + "MountPoint":{"shape":"String"}, + "RaidLevel":{"shape":"Integer"}, + "NumberOfDisks":{"shape":"Integer"}, + "Size":{"shape":"Integer"}, + "VolumeType":{"shape":"String"}, + "Iops":{"shape":"Integer"} + } + }, + "VolumeConfigurations":{ + "type":"list", + "member":{"shape":"VolumeConfiguration"} + }, + "VolumeType":{ + "type":"string", + "enum":[ + "gp2", + "io1", + "standard" + ] + }, + "Volumes":{ + "type":"list", + "member":{"shape":"Volume"} + }, + "WeeklyAutoScalingSchedule":{ + "type":"structure", + "members":{ + "Monday":{"shape":"DailyAutoScalingSchedule"}, + "Tuesday":{"shape":"DailyAutoScalingSchedule"}, + "Wednesday":{"shape":"DailyAutoScalingSchedule"}, + "Thursday":{"shape":"DailyAutoScalingSchedule"}, + "Friday":{"shape":"DailyAutoScalingSchedule"}, + "Saturday":{"shape":"DailyAutoScalingSchedule"}, + "Sunday":{"shape":"DailyAutoScalingSchedule"} + } + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/opsworks/2013-02-18/docs-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/opsworks/2013-02-18/docs-2.json new file mode 100644 index 000000000..1e74540b2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/opsworks/2013-02-18/docs-2.json @@ -0,0 +1,1688 @@ +{ + "version": "2.0", + "service": "AWS OpsWorks

    Welcome to the AWS OpsWorks API Reference. This guide provides descriptions, syntax, and usage examples for AWS OpsWorks actions and data types, including common parameters and error codes.

    AWS OpsWorks is an application management service that provides an integrated experience for overseeing the complete application lifecycle. For information about this product, go to the AWS OpsWorks details page.

    SDKs and CLI

    The most common way to use the AWS OpsWorks API is by using the AWS Command Line Interface (CLI) or by using one of the AWS SDKs to implement applications in your preferred language. For more information, see:

    Endpoints

    AWS OpsWorks supports two endpoints, opsworks.us-east-1.amazonaws.com and opsworks.ap-south-1.amazonaws.com (both HTTPS). You must connect to one of those two endpoints. You can then use the API to direct AWS OpsWorks to create stacks in any AWS region. Stacks created in all regions except ap-south-1 are connected to the us-east-1 regional endpoint; stacks created in ap-south-1 are associated with the ap-south-1 regional endpoint, and can only be accessed or managed within that endpoint.

    Chef Versions

    When you call CreateStack, CloneStack, or UpdateStack we recommend you use the ConfigurationManager parameter to specify the Chef version. The recommended and default value for Linux stacks is currently 12. Windows stacks use Chef 12.2. For more information, see Chef Versions.

    You can specify Chef 12, 11.10, or 11.4 for your Linux stack. We recommend migrating your existing Linux stacks to Chef 12 as soon as possible.

    ", + "operations": { + "AssignInstance": "

    Assign a registered instance to a layer.

    • You can assign registered on-premises instances to any layer type.

    • You can assign registered Amazon EC2 instances only to custom layers.

    • You cannot use this action with instances that were created with AWS OpsWorks.

    Required Permissions: To use this action, an AWS Identity and Access Management (IAM) user must have a Manage permissions level for the stack or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

    ", + "AssignVolume": "

    Assigns one of the stack's registered Amazon EBS volumes to a specified instance. The volume must first be registered with the stack by calling RegisterVolume. After you register the volume, you must call UpdateVolume to specify a mount point before calling AssignVolume. For more information, see Resource Management.

    Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

    ", + "AssociateElasticIp": "

    Associates one of the stack's registered Elastic IP addresses with a specified instance. The address must first be registered with the stack by calling RegisterElasticIp. For more information, see Resource Management.

    Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

    ", + "AttachElasticLoadBalancer": "

    Attaches an Elastic Load Balancing load balancer to a specified layer. For more information, see Elastic Load Balancing.

    You must create the Elastic Load Balancing instance separately, by using the Elastic Load Balancing console, API, or CLI. For more information, see Elastic Load Balancing Developer Guide.

    Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

    ", + "CloneStack": "

    Creates a clone of a specified stack. For more information, see Clone a Stack. By default, all parameters are set to the values used by the parent stack.

    Required Permissions: To use this action, an IAM user must have an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

    ", + "CreateApp": "

    Creates an app for a specified stack. For more information, see Creating Apps.

    Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

    ", + "CreateDeployment": "

    Runs deployment or stack commands. For more information, see Deploying Apps and Run Stack Commands.

    Required Permissions: To use this action, an IAM user must have a Deploy or Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

    ", + "CreateInstance": "

    Creates an instance in a specified stack. For more information, see Adding an Instance to a Layer.

    Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

    ", + "CreateLayer": "

    Creates a layer. For more information, see How to Create a Layer.

    You should use CreateLayer for noncustom layer types such as PHP App Server only if the stack does not have an existing layer of that type. A stack can have at most one instance of each noncustom layer; if you attempt to create a second instance, CreateLayer fails. A stack can have an arbitrary number of custom layers, so you can call CreateLayer as many times as you like for that layer type.

    Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

    ", + "CreateStack": "

    Creates a new stack. For more information, see Create a New Stack.

    Required Permissions: To use this action, an IAM user must have an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

    ", + "CreateUserProfile": "

    Creates a new user profile.

    Required Permissions: To use this action, an IAM user must have an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

    ", + "DeleteApp": "

    Deletes a specified app.

    Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

    ", + "DeleteInstance": "

    Deletes a specified instance, which terminates the associated Amazon EC2 instance. You must stop an instance before you can delete it.

    For more information, see Deleting Instances.

    Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

    ", + "DeleteLayer": "

    Deletes a specified layer. You must first stop and then delete all associated instances or unassign registered instances. For more information, see How to Delete a Layer.

    Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

    ", + "DeleteStack": "

    Deletes a specified stack. You must first delete all instances, layers, and apps or deregister registered instances. For more information, see Shut Down a Stack.

    Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

    ", + "DeleteUserProfile": "

    Deletes a user profile.

    Required Permissions: To use this action, an IAM user must have an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

    ", + "DeregisterEcsCluster": "

    Deregisters a specified Amazon ECS cluster from a stack. For more information, see Resource Management.

    Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack or an attached policy that explicitly grants permissions. For more information on user permissions, see http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html.

    ", + "DeregisterElasticIp": "

    Deregisters a specified Elastic IP address. The address can then be registered by another stack. For more information, see Resource Management.

    Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

    ", + "DeregisterInstance": "

    Deregister a registered Amazon EC2 or on-premises instance. This action removes the instance from the stack and returns it to your control. This action can not be used with instances that were created with AWS OpsWorks.

    Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

    ", + "DeregisterRdsDbInstance": "

    Deregisters an Amazon RDS instance.

    Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

    ", + "DeregisterVolume": "

    Deregisters an Amazon EBS volume. The volume can then be registered by another stack. For more information, see Resource Management.

    Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

    ", + "DescribeAgentVersions": "

    Describes the available AWS OpsWorks agent versions. You must specify a stack ID or a configuration manager. DescribeAgentVersions returns a list of available agent versions for the specified stack or configuration manager.

    ", + "DescribeApps": "

    Requests a description of a specified set of apps.

    You must specify at least one of the parameters.

    Required Permissions: To use this action, an IAM user must have a Show, Deploy, or Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

    ", + "DescribeCommands": "

    Describes the results of specified commands.

    You must specify at least one of the parameters.

    Required Permissions: To use this action, an IAM user must have a Show, Deploy, or Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

    ", + "DescribeDeployments": "

    Requests a description of a specified set of deployments.

    You must specify at least one of the parameters.

    Required Permissions: To use this action, an IAM user must have a Show, Deploy, or Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

    ", + "DescribeEcsClusters": "

    Describes Amazon ECS clusters that are registered with a stack. If you specify only a stack ID, you can use the MaxResults and NextToken parameters to paginate the response. However, AWS OpsWorks currently supports only one cluster per layer, so the result set has a maximum of one element.

    Required Permissions: To use this action, an IAM user must have a Show, Deploy, or Manage permissions level for the stack or an attached policy that explicitly grants permission. For more information on user permissions, see Managing User Permissions.

    ", + "DescribeElasticIps": "

    Describes Elastic IP addresses.

    You must specify at least one of the parameters.

    Required Permissions: To use this action, an IAM user must have a Show, Deploy, or Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

    ", + "DescribeElasticLoadBalancers": "

    Describes a stack's Elastic Load Balancing instances.

    You must specify at least one of the parameters.

    Required Permissions: To use this action, an IAM user must have a Show, Deploy, or Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

    ", + "DescribeInstances": "

    Requests a description of a set of instances.

    You must specify at least one of the parameters.

    Required Permissions: To use this action, an IAM user must have a Show, Deploy, or Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

    ", + "DescribeLayers": "

    Requests a description of one or more layers in a specified stack.

    You must specify at least one of the parameters.

    Required Permissions: To use this action, an IAM user must have a Show, Deploy, or Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

    ", + "DescribeLoadBasedAutoScaling": "

    Describes load-based auto scaling configurations for specified layers.

    You must specify at least one of the parameters.

    Required Permissions: To use this action, an IAM user must have a Show, Deploy, or Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

    ", + "DescribeMyUserProfile": "

    Describes a user's SSH information.

    Required Permissions: To use this action, an IAM user must have self-management enabled or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

    ", + "DescribePermissions": "

    Describes the permissions for a specified stack.

    Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

    ", + "DescribeRaidArrays": "

    Describe an instance's RAID arrays.

    You must specify at least one of the parameters.

    Required Permissions: To use this action, an IAM user must have a Show, Deploy, or Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

    ", + "DescribeRdsDbInstances": "

    Describes Amazon RDS instances.

    Required Permissions: To use this action, an IAM user must have a Show, Deploy, or Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

    ", + "DescribeServiceErrors": "

    Describes AWS OpsWorks service errors.

    Required Permissions: To use this action, an IAM user must have a Show, Deploy, or Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

    ", + "DescribeStackProvisioningParameters": "

    Requests a description of a stack's provisioning parameters.

    Required Permissions: To use this action, an IAM user must have a Show, Deploy, or Manage permissions level for the stack or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

    ", + "DescribeStackSummary": "

    Describes the number of layers and apps in a specified stack, and the number of instances in each state, such as running_setup or online.

    Required Permissions: To use this action, an IAM user must have a Show, Deploy, or Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

    ", + "DescribeStacks": "

    Requests a description of one or more stacks.

    Required Permissions: To use this action, an IAM user must have a Show, Deploy, or Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

    ", + "DescribeTimeBasedAutoScaling": "

    Describes time-based auto scaling configurations for specified instances.

    You must specify at least one of the parameters.

    Required Permissions: To use this action, an IAM user must have a Show, Deploy, or Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

    ", + "DescribeUserProfiles": "

    Describe specified users.

    Required Permissions: To use this action, an IAM user must have an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

    ", + "DescribeVolumes": "

    Describes an instance's Amazon EBS volumes.

    You must specify at least one of the parameters.

    Required Permissions: To use this action, an IAM user must have a Show, Deploy, or Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

    ", + "DetachElasticLoadBalancer": "

    Detaches a specified Elastic Load Balancing instance from its layer.

    Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

    ", + "DisassociateElasticIp": "

    Disassociates an Elastic IP address from its instance. The address remains registered with the stack. For more information, see Resource Management.

    Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

    ", + "GetHostnameSuggestion": "

    Gets a generated host name for the specified layer, based on the current host name theme.

    Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

    ", + "GrantAccess": "

    This action can be used only with Windows stacks.

    Grants RDP access to a Windows instance for a specified time period.

    ", + "RebootInstance": "

    Reboots a specified instance. For more information, see Starting, Stopping, and Rebooting Instances.

    Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

    ", + "RegisterEcsCluster": "

    Registers a specified Amazon ECS cluster with a stack. You can register only one cluster with a stack. A cluster can be registered with only one stack. For more information, see Resource Management.

    Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

    ", + "RegisterElasticIp": "

    Registers an Elastic IP address with a specified stack. An address can be registered with only one stack at a time. If the address is already registered, you must first deregister it by calling DeregisterElasticIp. For more information, see Resource Management.

    Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

    ", + "RegisterInstance": "

    Registers instances with a specified stack that were created outside of AWS OpsWorks.

    We do not recommend using this action to register instances. The complete registration operation has two primary steps, installing the AWS OpsWorks agent on the instance and registering the instance with the stack. RegisterInstance handles only the second step. You should instead use the AWS CLI register command, which performs the entire registration operation. For more information, see Registering an Instance with an AWS OpsWorks Stack.

    Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

    ", + "RegisterRdsDbInstance": "

    Registers an Amazon RDS instance with a stack.

    Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

    ", + "RegisterVolume": "

    Registers an Amazon EBS volume with a specified stack. A volume can be registered with only one stack at a time. If the volume is already registered, you must first deregister it by calling DeregisterVolume. For more information, see Resource Management.

    Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

    ", + "SetLoadBasedAutoScaling": "

    Specify the load-based auto scaling configuration for a specified layer. For more information, see Managing Load with Time-based and Load-based Instances.

    To use load-based auto scaling, you must create a set of load-based auto scaling instances. Load-based auto scaling operates only on the instances from that set, so you must ensure that you have created enough instances to handle the maximum anticipated load.

    Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

    ", + "SetPermission": "

    Specifies a user's permissions. For more information, see Security and Permissions.

    Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

    ", + "SetTimeBasedAutoScaling": "

    Specify the time-based auto scaling configuration for a specified instance. For more information, see Managing Load with Time-based and Load-based Instances.

    Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

    ", + "StartInstance": "

    Starts a specified instance. For more information, see Starting, Stopping, and Rebooting Instances.

    Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

    ", + "StartStack": "

    Starts a stack's instances.

    Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

    ", + "StopInstance": "

    Stops a specified instance. When you stop a standard instance, the data disappears and must be reinstalled when you restart the instance. You can stop an Amazon EBS-backed instance without losing data. For more information, see Starting, Stopping, and Rebooting Instances.

    Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

    ", + "StopStack": "

    Stops a specified stack.

    Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

    ", + "UnassignInstance": "

    Unassigns a registered instance from all of it's layers. The instance remains in the stack as an unassigned instance and can be assigned to another layer, as needed. You cannot use this action with instances that were created with AWS OpsWorks.

    Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

    ", + "UnassignVolume": "

    Unassigns an assigned Amazon EBS volume. The volume remains registered with the stack. For more information, see Resource Management.

    Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

    ", + "UpdateApp": "

    Updates a specified app.

    Required Permissions: To use this action, an IAM user must have a Deploy or Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

    ", + "UpdateElasticIp": "

    Updates a registered Elastic IP address's name. For more information, see Resource Management.

    Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

    ", + "UpdateInstance": "

    Updates a specified instance.

    Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

    ", + "UpdateLayer": "

    Updates a specified layer.

    Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

    ", + "UpdateMyUserProfile": "

    Updates a user's SSH public key.

    Required Permissions: To use this action, an IAM user must have self-management enabled or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

    ", + "UpdateRdsDbInstance": "

    Updates an Amazon RDS instance.

    Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

    ", + "UpdateStack": "

    Updates a specified stack.

    Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

    ", + "UpdateUserProfile": "

    Updates a specified user profile.

    Required Permissions: To use this action, an IAM user must have an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

    ", + "UpdateVolume": "

    Updates an Amazon EBS volume's name or mount point. For more information, see Resource Management.

    Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

    " + }, + "shapes": { + "AgentVersion": { + "base": "

    Describes an agent version.

    ", + "refs": { + "AgentVersions$member": null + } + }, + "AgentVersions": { + "base": null, + "refs": { + "DescribeAgentVersionsResult$AgentVersions": "

    The agent versions for the specified stack or configuration manager. Note that this value is the complete version number, not the abbreviated number used by the console.

    " + } + }, + "App": { + "base": "

    A description of the app.

    ", + "refs": { + "Apps$member": null + } + }, + "AppAttributes": { + "base": null, + "refs": { + "App$Attributes": "

    The stack attributes.

    ", + "CreateAppRequest$Attributes": "

    One or more user-defined key/value pairs to be added to the stack attributes.

    ", + "UpdateAppRequest$Attributes": "

    One or more user-defined key/value pairs to be added to the stack attributes.

    " + } + }, + "AppAttributesKeys": { + "base": null, + "refs": { + "AppAttributes$key": null + } + }, + "AppType": { + "base": null, + "refs": { + "App$Type": "

    The app type.

    ", + "CreateAppRequest$Type": "

    The app type. Each supported type is associated with a particular layer. For example, PHP applications are associated with a PHP layer. AWS OpsWorks deploys an application to those instances that are members of the corresponding layer. If your app isn't one of the standard types, or you prefer to implement your own Deploy recipes, specify other.

    ", + "UpdateAppRequest$Type": "

    The app type.

    " + } + }, + "Apps": { + "base": null, + "refs": { + "DescribeAppsResult$Apps": "

    An array of App objects that describe the specified apps.

    " + } + }, + "Architecture": { + "base": null, + "refs": { + "CreateInstanceRequest$Architecture": "

    The instance architecture. The default option is x86_64. Instance types do not necessarily support both architectures. For a list of the architectures that are supported by the different instance types, see Instance Families and Types.

    ", + "Instance$Architecture": "

    The instance architecture: \"i386\" or \"x86_64\".

    ", + "UpdateInstanceRequest$Architecture": "

    The instance architecture. Instance types do not necessarily support both architectures. For a list of the architectures that are supported by the different instance types, see Instance Families and Types.

    " + } + }, + "AssignInstanceRequest": { + "base": null, + "refs": { + } + }, + "AssignVolumeRequest": { + "base": null, + "refs": { + } + }, + "AssociateElasticIpRequest": { + "base": null, + "refs": { + } + }, + "AttachElasticLoadBalancerRequest": { + "base": null, + "refs": { + } + }, + "AutoScalingThresholds": { + "base": "

    Describes a load-based auto scaling upscaling or downscaling threshold configuration, which specifies when AWS OpsWorks starts or stops load-based instances.

    ", + "refs": { + "LoadBasedAutoScalingConfiguration$UpScaling": "

    An AutoScalingThresholds object that describes the upscaling configuration, which defines how and when AWS OpsWorks increases the number of instances.

    ", + "LoadBasedAutoScalingConfiguration$DownScaling": "

    An AutoScalingThresholds object that describes the downscaling configuration, which defines how and when AWS OpsWorks reduces the number of instances.

    ", + "SetLoadBasedAutoScalingRequest$UpScaling": "

    An AutoScalingThresholds object with the upscaling threshold configuration. If the load exceeds these thresholds for a specified amount of time, AWS OpsWorks starts a specified number of instances.

    ", + "SetLoadBasedAutoScalingRequest$DownScaling": "

    An AutoScalingThresholds object with the downscaling threshold configuration. If the load falls below these thresholds for a specified amount of time, AWS OpsWorks stops a specified number of instances.

    " + } + }, + "AutoScalingType": { + "base": null, + "refs": { + "CreateInstanceRequest$AutoScalingType": "

    For load-based or time-based instances, the type. Windows stacks can use only time-based instances.

    ", + "Instance$AutoScalingType": "

    For load-based or time-based instances, the type.

    ", + "UpdateInstanceRequest$AutoScalingType": "

    For load-based or time-based instances, the type. Windows stacks can use only time-based instances.

    " + } + }, + "BlockDeviceMapping": { + "base": "

    Describes a block device mapping. This data type maps directly to the Amazon EC2 BlockDeviceMapping data type.

    ", + "refs": { + "BlockDeviceMappings$member": null + } + }, + "BlockDeviceMappings": { + "base": null, + "refs": { + "CreateInstanceRequest$BlockDeviceMappings": "

    An array of BlockDeviceMapping objects that specify the instance's block devices. For more information, see Block Device Mapping. Note that block device mappings are not supported for custom AMIs.

    ", + "Instance$BlockDeviceMappings": "

    An array of BlockDeviceMapping objects that specify the instance's block device mappings.

    " + } + }, + "Boolean": { + "base": null, + "refs": { + "App$EnableSsl": "

    Whether to enable SSL for the app.

    ", + "ChefConfiguration$ManageBerkshelf": "

    Whether to enable Berkshelf.

    ", + "CloneStackRequest$UseCustomCookbooks": "

    Whether to use custom cookbooks.

    ", + "CloneStackRequest$UseOpsworksSecurityGroups": "

    Whether to associate the AWS OpsWorks built-in security groups with the stack's layers.

    AWS OpsWorks provides a standard set of built-in security groups, one for each layer, which are associated with layers by default. With UseOpsworksSecurityGroups you can instead provide your own custom security groups. UseOpsworksSecurityGroups has the following settings:

    • True - AWS OpsWorks automatically associates the appropriate built-in security group with each layer (default setting). You can associate additional security groups with a layer after you create it but you cannot delete the built-in security group.

    • False - AWS OpsWorks does not associate built-in security groups with layers. You must create appropriate Amazon Elastic Compute Cloud (Amazon EC2) security groups and associate a security group with each layer that you create. However, you can still manually associate a built-in security group with a layer on creation; custom security groups are required only for those layers that need custom settings.

    For more information, see Create a New Stack.

    ", + "CloneStackRequest$ClonePermissions": "

    Whether to clone the source stack's permissions.

    ", + "CreateAppRequest$EnableSsl": "

    Whether to enable SSL for the app.

    ", + "CreateInstanceRequest$InstallUpdatesOnBoot": "

    Whether to install operating system and package updates when the instance boots. The default value is true. To control when updates are installed, set this value to false. You must then update your instances manually by using CreateDeployment to run the update_dependencies stack command or by manually running yum (Amazon Linux) or apt-get (Ubuntu) on the instances.

    We strongly recommend using the default value of true to ensure that your instances have the latest security updates.

    ", + "CreateInstanceRequest$EbsOptimized": "

    Whether to create an Amazon EBS-optimized instance.

    ", + "CreateLayerRequest$EnableAutoHealing": "

    Whether to disable auto healing for the layer.

    ", + "CreateLayerRequest$AutoAssignElasticIps": "

    Whether to automatically assign an Elastic IP address to the layer's instances. For more information, see How to Edit a Layer.

    ", + "CreateLayerRequest$AutoAssignPublicIps": "

    For stacks that are running in a VPC, whether to automatically assign a public IP address to the layer's instances. For more information, see How to Edit a Layer.

    ", + "CreateLayerRequest$InstallUpdatesOnBoot": "

    Whether to install operating system and package updates when the instance boots. The default value is true. To control when updates are installed, set this value to false. You must then update your instances manually by using CreateDeployment to run the update_dependencies stack command or by manually running yum (Amazon Linux) or apt-get (Ubuntu) on the instances.

    To ensure that your instances have the latest security updates, we strongly recommend using the default value of true.

    ", + "CreateLayerRequest$UseEbsOptimizedInstances": "

    Whether to use Amazon EBS-optimized instances.

    ", + "CreateStackRequest$UseCustomCookbooks": "

    Whether the stack uses custom cookbooks.

    ", + "CreateStackRequest$UseOpsworksSecurityGroups": "

    Whether to associate the AWS OpsWorks built-in security groups with the stack's layers.

    AWS OpsWorks provides a standard set of built-in security groups, one for each layer, which are associated with layers by default. With UseOpsworksSecurityGroups you can instead provide your own custom security groups. UseOpsworksSecurityGroups has the following settings:

    • True - AWS OpsWorks automatically associates the appropriate built-in security group with each layer (default setting). You can associate additional security groups with a layer after you create it, but you cannot delete the built-in security group.

    • False - AWS OpsWorks does not associate built-in security groups with layers. You must create appropriate EC2 security groups and associate a security group with each layer that you create. However, you can still manually associate a built-in security group with a layer on creation; custom security groups are required only for those layers that need custom settings.

    For more information, see Create a New Stack.

    ", + "CreateUserProfileRequest$AllowSelfManagement": "

    Whether users can specify their own SSH public key through the My Settings page. For more information, see Setting an IAM User's Public SSH Key.

    ", + "DeleteInstanceRequest$DeleteElasticIp": "

    Whether to delete the instance Elastic IP address.

    ", + "DeleteInstanceRequest$DeleteVolumes": "

    Whether to delete the instance's Amazon EBS volumes.

    ", + "EbsBlockDevice$DeleteOnTermination": "

    Whether the volume is deleted on instance termination.

    ", + "EnvironmentVariable$Secure": "

    (Optional) Whether the variable's value will be returned by the DescribeApps action. To conceal an environment variable's value, set Secure to true. DescribeApps then returns *****FILTERED***** instead of the actual value. The default value for Secure is false.

    ", + "Instance$EbsOptimized": "

    Whether this is an Amazon EBS-optimized instance.

    ", + "Instance$InstallUpdatesOnBoot": "

    Whether to install operating system and package updates when the instance boots. The default value is true. If this value is set to false, you must then update your instances manually by using CreateDeployment to run the update_dependencies stack command or by manually running yum (Amazon Linux) or apt-get (Ubuntu) on the instances.

    We strongly recommend using the default value of true, to ensure that your instances have the latest security updates.

    ", + "Layer$EnableAutoHealing": "

    Whether auto healing is disabled for the layer.

    ", + "Layer$AutoAssignElasticIps": "

    Whether to automatically assign an Elastic IP address to the layer's instances. For more information, see How to Edit a Layer.

    ", + "Layer$AutoAssignPublicIps": "

    For stacks that are running in a VPC, whether to automatically assign a public IP address to the layer's instances. For more information, see How to Edit a Layer.

    ", + "Layer$InstallUpdatesOnBoot": "

    Whether to install operating system and package updates when the instance boots. The default value is true. If this value is set to false, you must then update your instances manually by using CreateDeployment to run the update_dependencies stack command or manually running yum (Amazon Linux) or apt-get (Ubuntu) on the instances.

    We strongly recommend using the default value of true, to ensure that your instances have the latest security updates.

    ", + "Layer$UseEbsOptimizedInstances": "

    Whether the layer uses Amazon EBS-optimized instances.

    ", + "LoadBasedAutoScalingConfiguration$Enable": "

    Whether load-based auto scaling is enabled for the layer.

    ", + "Permission$AllowSsh": "

    Whether the user can use SSH.

    ", + "Permission$AllowSudo": "

    Whether the user can use sudo.

    ", + "RdsDbInstance$MissingOnRds": "

    Set to true if AWS OpsWorks was unable to discover the Amazon RDS instance. AWS OpsWorks attempts to discover the instance only once. If this value is set to true, you must deregister the instance and then register it again.

    ", + "SetLoadBasedAutoScalingRequest$Enable": "

    Enables load-based auto scaling for the layer.

    ", + "SetPermissionRequest$AllowSsh": "

    The user is allowed to use SSH to communicate with the instance.

    ", + "SetPermissionRequest$AllowSudo": "

    The user is allowed to use sudo to elevate privileges.

    ", + "ShutdownEventConfiguration$DelayUntilElbConnectionsDrained": "

    Whether to enable Elastic Load Balancing connection draining. For more information, see Connection Draining

    ", + "Stack$UseCustomCookbooks": "

    Whether the stack uses custom cookbooks.

    ", + "Stack$UseOpsworksSecurityGroups": "

    Whether the stack automatically associates the AWS OpsWorks built-in security groups with the stack's layers.

    ", + "UpdateAppRequest$EnableSsl": "

    Whether SSL is enabled for the app.

    ", + "UpdateInstanceRequest$InstallUpdatesOnBoot": "

    Whether to install operating system and package updates when the instance boots. The default value is true. To control when updates are installed, set this value to false. You must then update your instances manually by using CreateDeployment to run the update_dependencies stack command or by manually running yum (Amazon Linux) or apt-get (Ubuntu) on the instances.

    We strongly recommend using the default value of true, to ensure that your instances have the latest security updates.

    ", + "UpdateInstanceRequest$EbsOptimized": "

    This property cannot be updated.

    ", + "UpdateLayerRequest$EnableAutoHealing": "

    Whether to disable auto healing for the layer.

    ", + "UpdateLayerRequest$AutoAssignElasticIps": "

    Whether to automatically assign an Elastic IP address to the layer's instances. For more information, see How to Edit a Layer.

    ", + "UpdateLayerRequest$AutoAssignPublicIps": "

    For stacks that are running in a VPC, whether to automatically assign a public IP address to the layer's instances. For more information, see How to Edit a Layer.

    ", + "UpdateLayerRequest$InstallUpdatesOnBoot": "

    Whether to install operating system and package updates when the instance boots. The default value is true. To control when updates are installed, set this value to false. You must then update your instances manually by using CreateDeployment to run the update_dependencies stack command or manually running yum (Amazon Linux) or apt-get (Ubuntu) on the instances.

    We strongly recommend using the default value of true, to ensure that your instances have the latest security updates.

    ", + "UpdateLayerRequest$UseEbsOptimizedInstances": "

    Whether to use Amazon EBS-optimized instances.

    ", + "UpdateStackRequest$UseCustomCookbooks": "

    Whether the stack uses custom cookbooks.

    ", + "UpdateStackRequest$UseOpsworksSecurityGroups": "

    Whether to associate the AWS OpsWorks built-in security groups with the stack's layers.

    AWS OpsWorks provides a standard set of built-in security groups, one for each layer, which are associated with layers by default. UseOpsworksSecurityGroups allows you to provide your own custom security groups instead of using the built-in groups. UseOpsworksSecurityGroups has the following settings:

    • True - AWS OpsWorks automatically associates the appropriate built-in security group with each layer (default setting). You can associate additional security groups with a layer after you create it, but you cannot delete the built-in security group.

    • False - AWS OpsWorks does not associate built-in security groups with layers. You must create appropriate EC2 security groups and associate a security group with each layer that you create. However, you can still manually associate a built-in security group with a layer on. Custom security groups are required only for those layers that need custom settings.

    For more information, see Create a New Stack.

    ", + "UpdateUserProfileRequest$AllowSelfManagement": "

    Whether users can specify their own SSH public key through the My Settings page. For more information, see Managing User Permissions.

    ", + "UserProfile$AllowSelfManagement": "

    Whether users can specify their own SSH public key through the My Settings page. For more information, see Managing User Permissions.

    " + } + }, + "ChefConfiguration": { + "base": "

    Describes the Chef configuration.

    ", + "refs": { + "CloneStackRequest$ChefConfiguration": "

    A ChefConfiguration object that specifies whether to enable Berkshelf and the Berkshelf version on Chef 11.10 stacks. For more information, see Create a New Stack.

    ", + "CreateStackRequest$ChefConfiguration": "

    A ChefConfiguration object that specifies whether to enable Berkshelf and the Berkshelf version on Chef 11.10 stacks. For more information, see Create a New Stack.

    ", + "Stack$ChefConfiguration": "

    A ChefConfiguration object that specifies whether to enable Berkshelf and the Berkshelf version. For more information, see Create a New Stack.

    ", + "UpdateStackRequest$ChefConfiguration": "

    A ChefConfiguration object that specifies whether to enable Berkshelf and the Berkshelf version on Chef 11.10 stacks. For more information, see Create a New Stack.

    " + } + }, + "CloneStackRequest": { + "base": null, + "refs": { + } + }, + "CloneStackResult": { + "base": "

    Contains the response to a CloneStack request.

    ", + "refs": { + } + }, + "Command": { + "base": "

    Describes a command.

    ", + "refs": { + "Commands$member": null + } + }, + "Commands": { + "base": null, + "refs": { + "DescribeCommandsResult$Commands": "

    An array of Command objects that describe each of the specified commands.

    " + } + }, + "CreateAppRequest": { + "base": null, + "refs": { + } + }, + "CreateAppResult": { + "base": "

    Contains the response to a CreateApp request.

    ", + "refs": { + } + }, + "CreateDeploymentRequest": { + "base": null, + "refs": { + } + }, + "CreateDeploymentResult": { + "base": "

    Contains the response to a CreateDeployment request.

    ", + "refs": { + } + }, + "CreateInstanceRequest": { + "base": null, + "refs": { + } + }, + "CreateInstanceResult": { + "base": "

    Contains the response to a CreateInstance request.

    ", + "refs": { + } + }, + "CreateLayerRequest": { + "base": null, + "refs": { + } + }, + "CreateLayerResult": { + "base": "

    Contains the response to a CreateLayer request.

    ", + "refs": { + } + }, + "CreateStackRequest": { + "base": null, + "refs": { + } + }, + "CreateStackResult": { + "base": "

    Contains the response to a CreateStack request.

    ", + "refs": { + } + }, + "CreateUserProfileRequest": { + "base": null, + "refs": { + } + }, + "CreateUserProfileResult": { + "base": "

    Contains the response to a CreateUserProfile request.

    ", + "refs": { + } + }, + "DailyAutoScalingSchedule": { + "base": null, + "refs": { + "WeeklyAutoScalingSchedule$Monday": "

    The schedule for Monday.

    ", + "WeeklyAutoScalingSchedule$Tuesday": "

    The schedule for Tuesday.

    ", + "WeeklyAutoScalingSchedule$Wednesday": "

    The schedule for Wednesday.

    ", + "WeeklyAutoScalingSchedule$Thursday": "

    The schedule for Thursday.

    ", + "WeeklyAutoScalingSchedule$Friday": "

    The schedule for Friday.

    ", + "WeeklyAutoScalingSchedule$Saturday": "

    The schedule for Saturday.

    ", + "WeeklyAutoScalingSchedule$Sunday": "

    The schedule for Sunday.

    " + } + }, + "DataSource": { + "base": "

    Describes an app's data source.

    ", + "refs": { + "DataSources$member": null + } + }, + "DataSources": { + "base": null, + "refs": { + "App$DataSources": "

    The app's data sources.

    ", + "CreateAppRequest$DataSources": "

    The app's data source.

    ", + "UpdateAppRequest$DataSources": "

    The app's data sources.

    " + } + }, + "DateTime": { + "base": null, + "refs": { + "Command$CreatedAt": "

    Date and time when the command was run.

    ", + "Command$AcknowledgedAt": "

    Date and time when the command was acknowledged.

    ", + "Command$CompletedAt": "

    Date when the command completed.

    ", + "Deployment$CreatedAt": "

    Date when the deployment was created.

    ", + "Deployment$CompletedAt": "

    Date when the deployment completed.

    ", + "EcsCluster$RegisteredAt": "

    The time and date that the cluster was registered with the stack.

    ", + "Instance$CreatedAt": "

    The time that the instance was created.

    ", + "Layer$CreatedAt": "

    Date when the layer was created.

    ", + "RaidArray$CreatedAt": "

    When the RAID array was created.

    ", + "ServiceError$CreatedAt": "

    When the error occurred.

    ", + "Stack$CreatedAt": "

    The date when the stack was created.

    " + } + }, + "DeleteAppRequest": { + "base": null, + "refs": { + } + }, + "DeleteInstanceRequest": { + "base": null, + "refs": { + } + }, + "DeleteLayerRequest": { + "base": null, + "refs": { + } + }, + "DeleteStackRequest": { + "base": null, + "refs": { + } + }, + "DeleteUserProfileRequest": { + "base": null, + "refs": { + } + }, + "Deployment": { + "base": "

    Describes a deployment of a stack or app.

    ", + "refs": { + "Deployments$member": null + } + }, + "DeploymentCommand": { + "base": "

    Used to specify a stack or deployment command.

    ", + "refs": { + "CreateDeploymentRequest$Command": "

    A DeploymentCommand object that specifies the deployment command and any associated arguments.

    ", + "Deployment$Command": null + } + }, + "DeploymentCommandArgs": { + "base": null, + "refs": { + "DeploymentCommand$Args": "

    The arguments of those commands that take arguments. It should be set to a JSON object with the following format:

    {\"arg_name1\" : [\"value1\", \"value2\", ...], \"arg_name2\" : [\"value1\", \"value2\", ...], ...}

    The update_dependencies command takes two arguments:

    • upgrade_os_to - Specifies the desired Amazon Linux version for instances whose OS you want to upgrade, such as Amazon Linux 2014.09. You must also set the allow_reboot argument to true.

    • allow_reboot - Specifies whether to allow AWS OpsWorks to reboot the instances if necessary, after installing the updates. This argument can be set to either true or false. The default value is false.

    For example, to upgrade an instance to Amazon Linux 2014.09, set Args to the following.

    { \"upgrade_os_to\":[\"Amazon Linux 2014.09\"], \"allow_reboot\":[\"true\"] }

    " + } + }, + "DeploymentCommandName": { + "base": null, + "refs": { + "DeploymentCommand$Name": "

    Specifies the operation. You can specify only one command.

    For stacks, the following commands are available:

    • execute_recipes: Execute one or more recipes. To specify the recipes, set an Args parameter named recipes to the list of recipes to be executed. For example, to execute phpapp::appsetup, set Args to {\"recipes\":[\"phpapp::appsetup\"]}.

    • install_dependencies: Install the stack's dependencies.

    • update_custom_cookbooks: Update the stack's custom cookbooks.

    • update_dependencies: Update the stack's dependencies.

    The update_dependencies and install_dependencies commands are supported only for Linux instances. You can run the commands successfully on Windows instances, but they do nothing.

    For apps, the following commands are available:

    • deploy: Deploy an app. Ruby on Rails apps have an optional Args parameter named migrate. Set Args to {\"migrate\":[\"true\"]} to migrate the database. The default setting is {\"migrate\":[\"false\"]}.

    • rollback Roll the app back to the previous version. When you update an app, AWS OpsWorks stores the previous version, up to a maximum of five versions. You can use this command to roll an app back as many as four versions.

    • start: Start the app's web or application server.

    • stop: Stop the app's web or application server.

    • restart: Restart the app's web or application server.

    • undeploy: Undeploy the app.

    " + } + }, + "Deployments": { + "base": null, + "refs": { + "DescribeDeploymentsResult$Deployments": "

    An array of Deployment objects that describe the deployments.

    " + } + }, + "DeregisterEcsClusterRequest": { + "base": null, + "refs": { + } + }, + "DeregisterElasticIpRequest": { + "base": null, + "refs": { + } + }, + "DeregisterInstanceRequest": { + "base": null, + "refs": { + } + }, + "DeregisterRdsDbInstanceRequest": { + "base": null, + "refs": { + } + }, + "DeregisterVolumeRequest": { + "base": null, + "refs": { + } + }, + "DescribeAgentVersionsRequest": { + "base": null, + "refs": { + } + }, + "DescribeAgentVersionsResult": { + "base": "

    Contains the response to a DescribeAgentVersions request.

    ", + "refs": { + } + }, + "DescribeAppsRequest": { + "base": null, + "refs": { + } + }, + "DescribeAppsResult": { + "base": "

    Contains the response to a DescribeApps request.

    ", + "refs": { + } + }, + "DescribeCommandsRequest": { + "base": null, + "refs": { + } + }, + "DescribeCommandsResult": { + "base": "

    Contains the response to a DescribeCommands request.

    ", + "refs": { + } + }, + "DescribeDeploymentsRequest": { + "base": null, + "refs": { + } + }, + "DescribeDeploymentsResult": { + "base": "

    Contains the response to a DescribeDeployments request.

    ", + "refs": { + } + }, + "DescribeEcsClustersRequest": { + "base": null, + "refs": { + } + }, + "DescribeEcsClustersResult": { + "base": "

    Contains the response to a DescribeEcsClusters request.

    ", + "refs": { + } + }, + "DescribeElasticIpsRequest": { + "base": null, + "refs": { + } + }, + "DescribeElasticIpsResult": { + "base": "

    Contains the response to a DescribeElasticIps request.

    ", + "refs": { + } + }, + "DescribeElasticLoadBalancersRequest": { + "base": null, + "refs": { + } + }, + "DescribeElasticLoadBalancersResult": { + "base": "

    Contains the response to a DescribeElasticLoadBalancers request.

    ", + "refs": { + } + }, + "DescribeInstancesRequest": { + "base": null, + "refs": { + } + }, + "DescribeInstancesResult": { + "base": "

    Contains the response to a DescribeInstances request.

    ", + "refs": { + } + }, + "DescribeLayersRequest": { + "base": null, + "refs": { + } + }, + "DescribeLayersResult": { + "base": "

    Contains the response to a DescribeLayers request.

    ", + "refs": { + } + }, + "DescribeLoadBasedAutoScalingRequest": { + "base": null, + "refs": { + } + }, + "DescribeLoadBasedAutoScalingResult": { + "base": "

    Contains the response to a DescribeLoadBasedAutoScaling request.

    ", + "refs": { + } + }, + "DescribeMyUserProfileResult": { + "base": "

    Contains the response to a DescribeMyUserProfile request.

    ", + "refs": { + } + }, + "DescribePermissionsRequest": { + "base": null, + "refs": { + } + }, + "DescribePermissionsResult": { + "base": "

    Contains the response to a DescribePermissions request.

    ", + "refs": { + } + }, + "DescribeRaidArraysRequest": { + "base": null, + "refs": { + } + }, + "DescribeRaidArraysResult": { + "base": "

    Contains the response to a DescribeRaidArrays request.

    ", + "refs": { + } + }, + "DescribeRdsDbInstancesRequest": { + "base": null, + "refs": { + } + }, + "DescribeRdsDbInstancesResult": { + "base": "

    Contains the response to a DescribeRdsDbInstances request.

    ", + "refs": { + } + }, + "DescribeServiceErrorsRequest": { + "base": null, + "refs": { + } + }, + "DescribeServiceErrorsResult": { + "base": "

    Contains the response to a DescribeServiceErrors request.

    ", + "refs": { + } + }, + "DescribeStackProvisioningParametersRequest": { + "base": null, + "refs": { + } + }, + "DescribeStackProvisioningParametersResult": { + "base": "

    Contains the response to a DescribeStackProvisioningParameters request.

    ", + "refs": { + } + }, + "DescribeStackSummaryRequest": { + "base": null, + "refs": { + } + }, + "DescribeStackSummaryResult": { + "base": "

    Contains the response to a DescribeStackSummary request.

    ", + "refs": { + } + }, + "DescribeStacksRequest": { + "base": null, + "refs": { + } + }, + "DescribeStacksResult": { + "base": "

    Contains the response to a DescribeStacks request.

    ", + "refs": { + } + }, + "DescribeTimeBasedAutoScalingRequest": { + "base": null, + "refs": { + } + }, + "DescribeTimeBasedAutoScalingResult": { + "base": "

    Contains the response to a DescribeTimeBasedAutoScaling request.

    ", + "refs": { + } + }, + "DescribeUserProfilesRequest": { + "base": null, + "refs": { + } + }, + "DescribeUserProfilesResult": { + "base": "

    Contains the response to a DescribeUserProfiles request.

    ", + "refs": { + } + }, + "DescribeVolumesRequest": { + "base": null, + "refs": { + } + }, + "DescribeVolumesResult": { + "base": "

    Contains the response to a DescribeVolumes request.

    ", + "refs": { + } + }, + "DetachElasticLoadBalancerRequest": { + "base": null, + "refs": { + } + }, + "DisassociateElasticIpRequest": { + "base": null, + "refs": { + } + }, + "Double": { + "base": null, + "refs": { + "AutoScalingThresholds$CpuThreshold": "

    The CPU utilization threshold, as a percent of the available CPU. A value of -1 disables the threshold.

    ", + "AutoScalingThresholds$MemoryThreshold": "

    The memory utilization threshold, as a percent of the available memory. A value of -1 disables the threshold.

    ", + "AutoScalingThresholds$LoadThreshold": "

    The load threshold. A value of -1 disables the threshold. For more information about how load is computed, see Load (computing).

    " + } + }, + "EbsBlockDevice": { + "base": "

    Describes an Amazon EBS volume. This data type maps directly to the Amazon EC2 EbsBlockDevice data type.

    ", + "refs": { + "BlockDeviceMapping$Ebs": "

    An EBSBlockDevice that defines how to configure an Amazon EBS volume when the instance is launched.

    " + } + }, + "EcsCluster": { + "base": "

    Describes a registered Amazon ECS cluster.

    ", + "refs": { + "EcsClusters$member": null + } + }, + "EcsClusters": { + "base": null, + "refs": { + "DescribeEcsClustersResult$EcsClusters": "

    A list of EcsCluster objects containing the cluster descriptions.

    " + } + }, + "ElasticIp": { + "base": "

    Describes an Elastic IP address.

    ", + "refs": { + "ElasticIps$member": null + } + }, + "ElasticIps": { + "base": null, + "refs": { + "DescribeElasticIpsResult$ElasticIps": "

    An ElasticIps object that describes the specified Elastic IP addresses.

    " + } + }, + "ElasticLoadBalancer": { + "base": "

    Describes an Elastic Load Balancing instance.

    ", + "refs": { + "ElasticLoadBalancers$member": null + } + }, + "ElasticLoadBalancers": { + "base": null, + "refs": { + "DescribeElasticLoadBalancersResult$ElasticLoadBalancers": "

    A list of ElasticLoadBalancer objects that describe the specified Elastic Load Balancing instances.

    " + } + }, + "EnvironmentVariable": { + "base": "

    Represents an app's environment variable.

    ", + "refs": { + "EnvironmentVariables$member": null + } + }, + "EnvironmentVariables": { + "base": null, + "refs": { + "App$Environment": "

    An array of EnvironmentVariable objects that specify environment variables to be associated with the app. After you deploy the app, these variables are defined on the associated app server instances. For more information, see Environment Variables.

    There is no specific limit on the number of environment variables. However, the size of the associated data structure - which includes the variable names, values, and protected flag values - cannot exceed 10 KB (10240 Bytes). This limit should accommodate most if not all use cases, but if you do exceed it, you will cause an exception (API) with an \"Environment: is too large (maximum is 10KB)\" message.

    ", + "CreateAppRequest$Environment": "

    An array of EnvironmentVariable objects that specify environment variables to be associated with the app. After you deploy the app, these variables are defined on the associated app server instance. For more information, see Environment Variables.

    There is no specific limit on the number of environment variables. However, the size of the associated data structure - which includes the variables' names, values, and protected flag values - cannot exceed 10 KB (10240 Bytes). This limit should accommodate most if not all use cases. Exceeding it will cause an exception with the message, \"Environment: is too large (maximum is 10KB).\"

    This parameter is supported only by Chef 11.10 stacks. If you have specified one or more environment variables, you cannot modify the stack's Chef version.

    ", + "UpdateAppRequest$Environment": "

    An array of EnvironmentVariable objects that specify environment variables to be associated with the app. After you deploy the app, these variables are defined on the associated app server instances.For more information, see Environment Variables.

    There is no specific limit on the number of environment variables. However, the size of the associated data structure - which includes the variables' names, values, and protected flag values - cannot exceed 10 KB (10240 Bytes). This limit should accommodate most if not all use cases. Exceeding it will cause an exception with the message, \"Environment: is too large (maximum is 10KB).\"

    This parameter is supported only by Chef 11.10 stacks. If you have specified one or more environment variables, you cannot modify the stack's Chef version.

    " + } + }, + "GetHostnameSuggestionRequest": { + "base": null, + "refs": { + } + }, + "GetHostnameSuggestionResult": { + "base": "

    Contains the response to a GetHostnameSuggestion request.

    ", + "refs": { + } + }, + "GrantAccessRequest": { + "base": null, + "refs": { + } + }, + "GrantAccessResult": { + "base": "

    Contains the response to a GrantAccess request.

    ", + "refs": { + } + }, + "Hour": { + "base": null, + "refs": { + "DailyAutoScalingSchedule$key": null + } + }, + "Instance": { + "base": "

    Describes an instance.

    ", + "refs": { + "Instances$member": null + } + }, + "InstanceIdentity": { + "base": "

    Contains a description of an Amazon EC2 instance from the Amazon EC2 metadata service. For more information, see Instance Metadata and User Data.

    ", + "refs": { + "RegisterInstanceRequest$InstanceIdentity": "

    An InstanceIdentity object that contains the instance's identity.

    " + } + }, + "Instances": { + "base": null, + "refs": { + "DescribeInstancesResult$Instances": "

    An array of Instance objects that describe the instances.

    " + } + }, + "InstancesCount": { + "base": "

    Describes how many instances a stack has for each status.

    ", + "refs": { + "StackSummary$InstancesCount": "

    An InstancesCount object with the number of instances in each status.

    " + } + }, + "Integer": { + "base": null, + "refs": { + "AutoScalingThresholds$InstanceCount": "

    The number of instances to add or remove when the load exceeds a threshold.

    ", + "Command$ExitCode": "

    The command exit code.

    ", + "Deployment$Duration": "

    The deployment duration.

    ", + "DescribeEcsClustersRequest$MaxResults": "

    To receive a paginated response, use this parameter to specify the maximum number of results to be returned with a single call. If the number of available results exceeds this maximum, the response includes a NextToken value that you can assign to the NextToken request parameter to get the next set of results.

    ", + "EbsBlockDevice$Iops": "

    The number of I/O operations per second (IOPS) that the volume supports. For more information, see EbsBlockDevice.

    ", + "EbsBlockDevice$VolumeSize": "

    The volume size, in GiB. For more information, see EbsBlockDevice.

    ", + "InstancesCount$Assigning": "

    The number of instances in the Assigning state.

    ", + "InstancesCount$Booting": "

    The number of instances with booting status.

    ", + "InstancesCount$ConnectionLost": "

    The number of instances with connection_lost status.

    ", + "InstancesCount$Deregistering": "

    The number of instances in the Deregistering state.

    ", + "InstancesCount$Online": "

    The number of instances with online status.

    ", + "InstancesCount$Pending": "

    The number of instances with pending status.

    ", + "InstancesCount$Rebooting": "

    The number of instances with rebooting status.

    ", + "InstancesCount$Registered": "

    The number of instances in the Registered state.

    ", + "InstancesCount$Registering": "

    The number of instances in the Registering state.

    ", + "InstancesCount$Requested": "

    The number of instances with requested status.

    ", + "InstancesCount$RunningSetup": "

    The number of instances with running_setup status.

    ", + "InstancesCount$SetupFailed": "

    The number of instances with setup_failed status.

    ", + "InstancesCount$ShuttingDown": "

    The number of instances with shutting_down status.

    ", + "InstancesCount$StartFailed": "

    The number of instances with start_failed status.

    ", + "InstancesCount$Stopped": "

    The number of instances with stopped status.

    ", + "InstancesCount$Stopping": "

    The number of instances with stopping status.

    ", + "InstancesCount$Terminated": "

    The number of instances with terminated status.

    ", + "InstancesCount$Terminating": "

    The number of instances with terminating status.

    ", + "InstancesCount$Unassigning": "

    The number of instances in the Unassigning state.

    ", + "RaidArray$RaidLevel": "

    The RAID level.

    ", + "RaidArray$NumberOfDisks": "

    The number of disks in the array.

    ", + "RaidArray$Size": "

    The array's size.

    ", + "RaidArray$Iops": "

    For PIOPS volumes, the IOPS per disk.

    ", + "ShutdownEventConfiguration$ExecutionTimeout": "

    The time, in seconds, that AWS OpsWorks will wait after triggering a Shutdown event before shutting down an instance.

    ", + "StackSummary$LayersCount": "

    The number of layers.

    ", + "StackSummary$AppsCount": "

    The number of apps.

    ", + "TemporaryCredential$ValidForInMinutes": "

    The length of time (in minutes) that the grant is valid. When the grant expires, at the end of this period, the user will no longer be able to use the credentials to log in. If they are logged in at the time, they will be automatically logged out.

    ", + "Volume$Size": "

    The volume size.

    ", + "Volume$Iops": "

    For PIOPS volumes, the IOPS per disk.

    ", + "VolumeConfiguration$RaidLevel": "

    The volume RAID level.

    ", + "VolumeConfiguration$NumberOfDisks": "

    The number of disks in the volume.

    ", + "VolumeConfiguration$Size": "

    The volume size.

    ", + "VolumeConfiguration$Iops": "

    For PIOPS volumes, the IOPS per disk.

    " + } + }, + "Layer": { + "base": "

    Describes a layer.

    ", + "refs": { + "Layers$member": null + } + }, + "LayerAttributes": { + "base": null, + "refs": { + "CreateLayerRequest$Attributes": "

    One or more user-defined key-value pairs to be added to the stack attributes.

    To create a cluster layer, set the EcsClusterArn attribute to the cluster's ARN.

    ", + "Layer$Attributes": "

    The layer attributes.

    For the HaproxyStatsPassword, MysqlRootPassword, and GangliaPassword attributes, AWS OpsWorks returns *****FILTERED***** instead of the actual value

    For an ECS Cluster layer, AWS OpsWorks the EcsClusterArn attribute is set to the cluster's ARN.

    ", + "UpdateLayerRequest$Attributes": "

    One or more user-defined key/value pairs to be added to the stack attributes.

    " + } + }, + "LayerAttributesKeys": { + "base": null, + "refs": { + "LayerAttributes$key": null + } + }, + "LayerType": { + "base": null, + "refs": { + "CreateLayerRequest$Type": "

    The layer type. A stack cannot have more than one built-in layer of the same type. It can have any number of custom layers. Built-in layers are not available in Chef 12 stacks.

    ", + "Layer$Type": "

    The layer type.

    " + } + }, + "Layers": { + "base": null, + "refs": { + "DescribeLayersResult$Layers": "

    An array of Layer objects that describe the layers.

    " + } + }, + "LifecycleEventConfiguration": { + "base": "

    Specifies the lifecycle event configuration

    ", + "refs": { + "CreateLayerRequest$LifecycleEventConfiguration": "

    A LifeCycleEventConfiguration object that you can use to configure the Shutdown event to specify an execution timeout and enable or disable Elastic Load Balancer connection draining.

    ", + "Layer$LifecycleEventConfiguration": "

    A LifeCycleEventConfiguration object that specifies the Shutdown event configuration.

    ", + "UpdateLayerRequest$LifecycleEventConfiguration": "

    " + } + }, + "LoadBasedAutoScalingConfiguration": { + "base": "

    Describes a layer's load-based auto scaling configuration.

    ", + "refs": { + "LoadBasedAutoScalingConfigurations$member": null + } + }, + "LoadBasedAutoScalingConfigurations": { + "base": null, + "refs": { + "DescribeLoadBasedAutoScalingResult$LoadBasedAutoScalingConfigurations": "

    An array of LoadBasedAutoScalingConfiguration objects that describe each layer's configuration.

    " + } + }, + "Minute": { + "base": null, + "refs": { + "AutoScalingThresholds$ThresholdsWaitTime": "

    The amount of time, in minutes, that the load must exceed a threshold before more instances are added or removed.

    ", + "AutoScalingThresholds$IgnoreMetricsTime": "

    The amount of time (in minutes) after a scaling event occurs that AWS OpsWorks should ignore metrics and suppress additional scaling events. For example, AWS OpsWorks adds new instances following an upscaling event but the instances won't start reducing the load until they have been booted and configured. There is no point in raising additional scaling events during that operation, which typically takes several minutes. IgnoreMetricsTime allows you to direct AWS OpsWorks to suppress scaling events long enough to get the new instances online.

    " + } + }, + "Parameters": { + "base": null, + "refs": { + "DescribeStackProvisioningParametersResult$Parameters": "

    An embedded object that contains the provisioning parameters.

    " + } + }, + "Permission": { + "base": "

    Describes stack or user permissions.

    ", + "refs": { + "Permissions$member": null + } + }, + "Permissions": { + "base": null, + "refs": { + "DescribePermissionsResult$Permissions": "

    An array of Permission objects that describe the stack permissions.

    • If the request object contains only a stack ID, the array contains a Permission object with permissions for each of the stack IAM ARNs.

    • If the request object contains only an IAM ARN, the array contains a Permission object with permissions for each of the user's stack IDs.

    • If the request contains a stack ID and an IAM ARN, the array contains a single Permission object with permissions for the specified stack and IAM ARN.

    " + } + }, + "RaidArray": { + "base": "

    Describes an instance's RAID array.

    ", + "refs": { + "RaidArrays$member": null + } + }, + "RaidArrays": { + "base": null, + "refs": { + "DescribeRaidArraysResult$RaidArrays": "

    A RaidArrays object that describes the specified RAID arrays.

    " + } + }, + "RdsDbInstance": { + "base": "

    Describes an Amazon RDS instance.

    ", + "refs": { + "RdsDbInstances$member": null + } + }, + "RdsDbInstances": { + "base": null, + "refs": { + "DescribeRdsDbInstancesResult$RdsDbInstances": "

    An a array of RdsDbInstance objects that describe the instances.

    " + } + }, + "RebootInstanceRequest": { + "base": null, + "refs": { + } + }, + "Recipes": { + "base": "

    AWS OpsWorks supports five lifecycle events: setup, configuration, deploy, undeploy, and shutdown. For each layer, AWS OpsWorks runs a set of standard recipes for each event. In addition, you can provide custom recipes for any or all layers and events. AWS OpsWorks runs custom event recipes after the standard recipes. LayerCustomRecipes specifies the custom recipes for a particular layer to be run in response to each of the five events.

    To specify a recipe, use the cookbook's directory name in the repository followed by two colons and the recipe name, which is the recipe's file name without the .rb extension. For example: phpapp2::dbsetup specifies the dbsetup.rb recipe in the repository's phpapp2 folder.

    ", + "refs": { + "CreateLayerRequest$CustomRecipes": "

    A LayerCustomRecipes object that specifies the layer custom recipes.

    ", + "Layer$DefaultRecipes": null, + "Layer$CustomRecipes": "

    A LayerCustomRecipes object that specifies the layer's custom recipes.

    ", + "UpdateLayerRequest$CustomRecipes": "

    A LayerCustomRecipes object that specifies the layer's custom recipes.

    " + } + }, + "RegisterEcsClusterRequest": { + "base": null, + "refs": { + } + }, + "RegisterEcsClusterResult": { + "base": "

    Contains the response to a RegisterEcsCluster request.

    ", + "refs": { + } + }, + "RegisterElasticIpRequest": { + "base": null, + "refs": { + } + }, + "RegisterElasticIpResult": { + "base": "

    Contains the response to a RegisterElasticIp request.

    ", + "refs": { + } + }, + "RegisterInstanceRequest": { + "base": null, + "refs": { + } + }, + "RegisterInstanceResult": { + "base": "

    Contains the response to a RegisterInstanceResult request.

    ", + "refs": { + } + }, + "RegisterRdsDbInstanceRequest": { + "base": null, + "refs": { + } + }, + "RegisterVolumeRequest": { + "base": null, + "refs": { + } + }, + "RegisterVolumeResult": { + "base": "

    Contains the response to a RegisterVolume request.

    ", + "refs": { + } + }, + "ReportedOs": { + "base": "

    A registered instance's reported operating system.

    ", + "refs": { + "Instance$ReportedOs": "

    For registered instances, the reported operating system.

    " + } + }, + "ResourceNotFoundException": { + "base": "

    Indicates that a resource was not found.

    ", + "refs": { + } + }, + "RootDeviceType": { + "base": null, + "refs": { + "CloneStackRequest$DefaultRootDeviceType": "

    The default root device type. This value is used by default for all instances in the cloned stack, but you can override it when you create an instance. For more information, see Storage for the Root Device.

    ", + "CreateInstanceRequest$RootDeviceType": "

    The instance root device type. For more information, see Storage for the Root Device.

    ", + "CreateStackRequest$DefaultRootDeviceType": "

    The default root device type. This value is the default for all instances in the stack, but you can override it when you create an instance. The default option is instance-store. For more information, see Storage for the Root Device.

    ", + "Instance$RootDeviceType": "

    The instance's root device type. For more information, see Storage for the Root Device.

    ", + "Stack$DefaultRootDeviceType": "

    The default root device type. This value is used by default for all instances in the stack, but you can override it when you create an instance. For more information, see Storage for the Root Device.

    ", + "UpdateStackRequest$DefaultRootDeviceType": "

    The default root device type. This value is used by default for all instances in the stack, but you can override it when you create an instance. For more information, see Storage for the Root Device.

    " + } + }, + "SelfUserProfile": { + "base": "

    Describes a user's SSH information.

    ", + "refs": { + "DescribeMyUserProfileResult$UserProfile": "

    A UserProfile object that describes the user's SSH information.

    " + } + }, + "ServiceError": { + "base": "

    Describes an AWS OpsWorks service error.

    ", + "refs": { + "ServiceErrors$member": null + } + }, + "ServiceErrors": { + "base": null, + "refs": { + "DescribeServiceErrorsResult$ServiceErrors": "

    An array of ServiceError objects that describe the specified service errors.

    " + } + }, + "SetLoadBasedAutoScalingRequest": { + "base": null, + "refs": { + } + }, + "SetPermissionRequest": { + "base": null, + "refs": { + } + }, + "SetTimeBasedAutoScalingRequest": { + "base": null, + "refs": { + } + }, + "ShutdownEventConfiguration": { + "base": "

    The Shutdown event configuration.

    ", + "refs": { + "LifecycleEventConfiguration$Shutdown": "

    A ShutdownEventConfiguration object that specifies the Shutdown event configuration.

    " + } + }, + "Source": { + "base": "

    Contains the information required to retrieve an app or cookbook from a repository. For more information, see Creating Apps or Custom Recipes and Cookbooks.

    ", + "refs": { + "App$AppSource": "

    A Source object that describes the app repository.

    ", + "CloneStackRequest$CustomCookbooksSource": null, + "CreateAppRequest$AppSource": "

    A Source object that specifies the app repository.

    ", + "CreateStackRequest$CustomCookbooksSource": null, + "Stack$CustomCookbooksSource": null, + "UpdateAppRequest$AppSource": "

    A Source object that specifies the app repository.

    ", + "UpdateStackRequest$CustomCookbooksSource": null + } + }, + "SourceType": { + "base": null, + "refs": { + "Source$Type": "

    The repository type.

    " + } + }, + "SslConfiguration": { + "base": "

    Describes an app's SSL configuration.

    ", + "refs": { + "App$SslConfiguration": "

    An SslConfiguration object with the SSL configuration.

    ", + "CreateAppRequest$SslConfiguration": "

    An SslConfiguration object with the SSL configuration.

    ", + "UpdateAppRequest$SslConfiguration": "

    An SslConfiguration object with the SSL configuration.

    " + } + }, + "Stack": { + "base": "

    Describes a stack.

    ", + "refs": { + "Stacks$member": null + } + }, + "StackAttributes": { + "base": null, + "refs": { + "CloneStackRequest$Attributes": "

    A list of stack attributes and values as key/value pairs to be added to the cloned stack.

    ", + "CreateStackRequest$Attributes": "

    One or more user-defined key-value pairs to be added to the stack attributes.

    ", + "Stack$Attributes": "

    The stack's attributes.

    ", + "UpdateStackRequest$Attributes": "

    One or more user-defined key-value pairs to be added to the stack attributes.

    " + } + }, + "StackAttributesKeys": { + "base": null, + "refs": { + "StackAttributes$key": null + } + }, + "StackConfigurationManager": { + "base": "

    Describes the configuration manager.

    ", + "refs": { + "AgentVersion$ConfigurationManager": "

    The configuration manager.

    ", + "CloneStackRequest$ConfigurationManager": "

    The configuration manager. When you clone a stack we recommend that you use the configuration manager to specify the Chef version: 12, 11.10, or 11.4 for Linux stacks, or 12.2 for Windows stacks. The default value for Linux stacks is currently 12.

    ", + "CreateStackRequest$ConfigurationManager": "

    The configuration manager. When you create a stack we recommend that you use the configuration manager to specify the Chef version: 12, 11.10, or 11.4 for Linux stacks, or 12.2 for Windows stacks. The default value for Linux stacks is currently 11.4.

    ", + "DescribeAgentVersionsRequest$ConfigurationManager": "

    The configuration manager.

    ", + "Stack$ConfigurationManager": "

    The configuration manager.

    ", + "UpdateStackRequest$ConfigurationManager": "

    The configuration manager. When you update a stack, we recommend that you use the configuration manager to specify the Chef version: 12, 11.10, or 11.4 for Linux stacks, or 12.2 for Windows stacks. The default value for Linux stacks is currently 11.4.

    " + } + }, + "StackSummary": { + "base": "

    Summarizes the number of layers, instances, and apps in a stack.

    ", + "refs": { + "DescribeStackSummaryResult$StackSummary": "

    A StackSummary object that contains the results.

    " + } + }, + "Stacks": { + "base": null, + "refs": { + "DescribeStacksResult$Stacks": "

    An array of Stack objects that describe the stacks.

    " + } + }, + "StartInstanceRequest": { + "base": null, + "refs": { + } + }, + "StartStackRequest": { + "base": null, + "refs": { + } + }, + "StopInstanceRequest": { + "base": null, + "refs": { + } + }, + "StopStackRequest": { + "base": null, + "refs": { + } + }, + "String": { + "base": null, + "refs": { + "AgentVersion$Version": "

    The agent version.

    ", + "App$AppId": "

    The app ID.

    ", + "App$StackId": "

    The app stack ID.

    ", + "App$Shortname": "

    The app's short name.

    ", + "App$Name": "

    The app name.

    ", + "App$Description": "

    A description of the app.

    ", + "App$CreatedAt": "

    When the app was created.

    ", + "AppAttributes$value": null, + "AssignInstanceRequest$InstanceId": "

    The instance ID.

    ", + "AssignVolumeRequest$VolumeId": "

    The volume ID.

    ", + "AssignVolumeRequest$InstanceId": "

    The instance ID.

    ", + "AssociateElasticIpRequest$ElasticIp": "

    The Elastic IP address.

    ", + "AssociateElasticIpRequest$InstanceId": "

    The instance ID.

    ", + "AttachElasticLoadBalancerRequest$ElasticLoadBalancerName": "

    The Elastic Load Balancing instance's name.

    ", + "AttachElasticLoadBalancerRequest$LayerId": "

    The ID of the layer that the Elastic Load Balancing instance is to be attached to.

    ", + "BlockDeviceMapping$DeviceName": "

    The device name that is exposed to the instance, such as /dev/sdh. For the root device, you can use the explicit device name or you can set this parameter to ROOT_DEVICE and AWS OpsWorks will provide the correct device name.

    ", + "BlockDeviceMapping$NoDevice": "

    Suppresses the specified device included in the AMI's block device mapping.

    ", + "BlockDeviceMapping$VirtualName": "

    The virtual device name. For more information, see BlockDeviceMapping.

    ", + "ChefConfiguration$BerkshelfVersion": "

    The Berkshelf version.

    ", + "CloneStackRequest$SourceStackId": "

    The source stack ID.

    ", + "CloneStackRequest$Name": "

    The cloned stack name.

    ", + "CloneStackRequest$Region": "

    The cloned stack AWS region, such as \"us-east-1\". For more information about AWS regions, see Regions and Endpoints.

    ", + "CloneStackRequest$VpcId": "

    The ID of the VPC that the cloned stack is to be launched into. It must be in the specified region. All instances are launched into this VPC, and you cannot change the ID later.

    • If your account supports EC2 Classic, the default value is no VPC.

    • If your account does not support EC2 Classic, the default value is the default VPC for the specified region.

    If the VPC ID corresponds to a default VPC and you have specified either the DefaultAvailabilityZone or the DefaultSubnetId parameter only, AWS OpsWorks infers the value of the other parameter. If you specify neither parameter, AWS OpsWorks sets these parameters to the first valid Availability Zone for the specified region and the corresponding default VPC subnet ID, respectively.

    If you specify a nondefault VPC ID, note the following:

    • It must belong to a VPC in your account that is in the specified region.

    • You must specify a value for DefaultSubnetId.

    For more information on how to use AWS OpsWorks with a VPC, see Running a Stack in a VPC. For more information on default VPC and EC2 Classic, see Supported Platforms.

    ", + "CloneStackRequest$ServiceRoleArn": "

    The stack AWS Identity and Access Management (IAM) role, which allows AWS OpsWorks to work with AWS resources on your behalf. You must set this parameter to the Amazon Resource Name (ARN) for an existing IAM role. If you create a stack by using the AWS OpsWorks console, it creates the role for you. You can obtain an existing stack's IAM ARN programmatically by calling DescribePermissions. For more information about IAM ARNs, see Using Identifiers.

    You must set this parameter to a valid service role ARN or the action will fail; there is no default value. You can specify the source stack's service role ARN, if you prefer, but you must do so explicitly.

    ", + "CloneStackRequest$DefaultInstanceProfileArn": "

    The Amazon Resource Name (ARN) of an IAM profile that is the default profile for all of the stack's EC2 instances. For more information about IAM ARNs, see Using Identifiers.

    ", + "CloneStackRequest$DefaultOs": "

    The stack's operating system, which must be set to one of the following.

    • A supported Linux operating system: An Amazon Linux version, such as Amazon Linux 2016.03, Amazon Linux 2015.09, or Amazon Linux 2015.03.

    • A supported Ubuntu operating system, such as Ubuntu 16.04 LTS, Ubuntu 14.04 LTS, or Ubuntu 12.04 LTS.

    • CentOS 7

    • Red Hat Enterprise Linux 7

    • Microsoft Windows Server 2012 R2 Base, Microsoft Windows Server 2012 R2 with SQL Server Express, Microsoft Windows Server 2012 R2 with SQL Server Standard, or Microsoft Windows Server 2012 R2 with SQL Server Web.

    • A custom AMI: Custom. You specify the custom AMI you want to use when you create instances. For more information on how to use custom AMIs with OpsWorks, see Using Custom AMIs.

    The default option is the parent stack's operating system. For more information on the supported operating systems, see AWS OpsWorks Operating Systems.

    You can specify a different Linux operating system for the cloned stack, but you cannot change from Linux to Windows or Windows to Linux.

    ", + "CloneStackRequest$HostnameTheme": "

    The stack's host name theme, with spaces are replaced by underscores. The theme is used to generate host names for the stack's instances. By default, HostnameTheme is set to Layer_Dependent, which creates host names by appending integers to the layer's short name. The other themes are:

    • Baked_Goods

    • Clouds

    • Europe_Cities

    • Fruits

    • Greek_Deities

    • Legendary_creatures_from_Japan

    • Planets_and_Moons

    • Roman_Deities

    • Scottish_Islands

    • US_Cities

    • Wild_Cats

    To obtain a generated host name, call GetHostNameSuggestion, which returns a host name based on the current theme.

    ", + "CloneStackRequest$DefaultAvailabilityZone": "

    The cloned stack's default Availability Zone, which must be in the specified region. For more information, see Regions and Endpoints. If you also specify a value for DefaultSubnetId, the subnet must be in the same zone. For more information, see the VpcId parameter description.

    ", + "CloneStackRequest$DefaultSubnetId": "

    The stack's default VPC subnet ID. This parameter is required if you specify a value for the VpcId parameter. All instances are launched into this subnet unless you specify otherwise when you create the instance. If you also specify a value for DefaultAvailabilityZone, the subnet must be in that zone. For information on default values and when this parameter is required, see the VpcId parameter description.

    ", + "CloneStackRequest$CustomJson": "

    A string that contains user-defined, custom JSON. It is used to override the corresponding default stack configuration JSON values. The string should be in the following format and must escape characters such as '\"':

    \"{\\\"key1\\\": \\\"value1\\\", \\\"key2\\\": \\\"value2\\\",...}\"

    For more information on custom JSON, see Use Custom JSON to Modify the Stack Configuration Attributes

    ", + "CloneStackRequest$DefaultSshKeyName": "

    A default Amazon EC2 key pair name. The default value is none. If you specify a key pair name, AWS OpsWorks installs the public key on the instance and you can use the private key with an SSH client to log in to the instance. For more information, see Using SSH to Communicate with an Instance and Managing SSH Access. You can override this setting by specifying a different key pair, or no key pair, when you create an instance.

    ", + "CloneStackRequest$AgentVersion": "

    The default AWS OpsWorks agent version. You have the following options:

    • Auto-update - Set this parameter to LATEST. AWS OpsWorks automatically installs new agent versions on the stack's instances as soon as they are available.

    • Fixed version - Set this parameter to your preferred agent version. To update the agent version, you must edit the stack configuration and specify a new version. AWS OpsWorks then automatically installs that version on the stack's instances.

    The default setting is LATEST. To specify an agent version, you must use the complete version number, not the abbreviated number shown on the console. For a list of available agent version numbers, call DescribeAgentVersions.

    You can also specify an agent version when you create or update an instance, which overrides the stack's default setting.

    ", + "CloneStackResult$StackId": "

    The cloned stack ID.

    ", + "Command$CommandId": "

    The command ID.

    ", + "Command$InstanceId": "

    The ID of the instance where the command was executed.

    ", + "Command$DeploymentId": "

    The command deployment ID.

    ", + "Command$Status": "

    The command status:

    • failed

    • successful

    • skipped

    • pending

    ", + "Command$LogUrl": "

    The URL of the command log.

    ", + "Command$Type": "

    The command type:

    • deploy

    • rollback

    • start

    • stop

    • restart

    • undeploy

    • update_dependencies

    • install_dependencies

    • update_custom_cookbooks

    • execute_recipes

    ", + "CreateAppRequest$StackId": "

    The stack ID.

    ", + "CreateAppRequest$Shortname": "

    The app's short name.

    ", + "CreateAppRequest$Name": "

    The app name.

    ", + "CreateAppRequest$Description": "

    A description of the app.

    ", + "CreateAppResult$AppId": "

    The app ID.

    ", + "CreateDeploymentRequest$StackId": "

    The stack ID.

    ", + "CreateDeploymentRequest$AppId": "

    The app ID. This parameter is required for app deployments, but not for other deployment commands.

    ", + "CreateDeploymentRequest$Comment": "

    A user-defined comment.

    ", + "CreateDeploymentRequest$CustomJson": "

    A string that contains user-defined, custom JSON. It is used to override the corresponding default stack configuration JSON values. The string should be in the following format and must escape characters such as '\"':

    \"{\\\"key1\\\": \\\"value1\\\", \\\"key2\\\": \\\"value2\\\",...}\"

    For more information on custom JSON, see Use Custom JSON to Modify the Stack Configuration Attributes.

    ", + "CreateDeploymentResult$DeploymentId": "

    The deployment ID, which can be used with other requests to identify the deployment.

    ", + "CreateInstanceRequest$StackId": "

    The stack ID.

    ", + "CreateInstanceRequest$InstanceType": "

    The instance type, such as t2.micro. For a list of supported instance types, open the stack in the console, choose Instances, and choose + Instance. The Size list contains the currently supported types. For more information, see Instance Families and Types. The parameter values that you use to specify the various types are in the API Name column of the Available Instance Types table.

    ", + "CreateInstanceRequest$Hostname": "

    The instance host name.

    ", + "CreateInstanceRequest$Os": "

    The instance's operating system, which must be set to one of the following.

    • A supported Linux operating system: An Amazon Linux version, such as Amazon Linux 2016.03, Amazon Linux 2015.09, or Amazon Linux 2015.03.

    • A supported Ubuntu operating system, such as Ubuntu 16.04 LTS, Ubuntu 14.04 LTS, or Ubuntu 12.04 LTS.

    • CentOS 7

    • Red Hat Enterprise Linux 7

    • A supported Windows operating system, such as Microsoft Windows Server 2012 R2 Base, Microsoft Windows Server 2012 R2 with SQL Server Express, Microsoft Windows Server 2012 R2 with SQL Server Standard, or Microsoft Windows Server 2012 R2 with SQL Server Web.

    • A custom AMI: Custom.

    For more information on the supported operating systems, see AWS OpsWorks Operating Systems.

    The default option is the current Amazon Linux version. If you set this parameter to Custom, you must use the CreateInstance action's AmiId parameter to specify the custom AMI that you want to use. Block device mappings are not supported if the value is Custom. For more information on the supported operating systems, see Operating SystemsFor more information on how to use custom AMIs with AWS OpsWorks, see Using Custom AMIs.

    ", + "CreateInstanceRequest$AmiId": "

    A custom AMI ID to be used to create the instance. The AMI should be based on one of the supported operating systems. For more information, see Using Custom AMIs.

    If you specify a custom AMI, you must set Os to Custom.

    ", + "CreateInstanceRequest$SshKeyName": "

    The instance's Amazon EC2 key-pair name.

    ", + "CreateInstanceRequest$AvailabilityZone": "

    The instance Availability Zone. For more information, see Regions and Endpoints.

    ", + "CreateInstanceRequest$VirtualizationType": "

    The instance's virtualization type, paravirtual or hvm.

    ", + "CreateInstanceRequest$SubnetId": "

    The ID of the instance's subnet. If the stack is running in a VPC, you can use this parameter to override the stack's default subnet ID value and direct AWS OpsWorks to launch the instance in a different subnet.

    ", + "CreateInstanceRequest$AgentVersion": "

    The default AWS OpsWorks agent version. You have the following options:

    • INHERIT - Use the stack's default agent version setting.

    • version_number - Use the specified agent version. This value overrides the stack's default setting. To update the agent version, edit the instance configuration and specify a new version. AWS OpsWorks then automatically installs that version on the instance.

    The default setting is INHERIT. To specify an agent version, you must use the complete version number, not the abbreviated number shown on the console. For a list of available agent version numbers, call DescribeAgentVersions.

    ", + "CreateInstanceRequest$Tenancy": "

    The instance's tenancy option. The default option is no tenancy, or if the instance is running in a VPC, inherit tenancy settings from the VPC. The following are valid values for this parameter: dedicated, default, or host. Because there are costs associated with changes in tenancy options, we recommend that you research tenancy options before choosing them for your instances. For more information about dedicated hosts, see Dedicated Hosts Overview and Amazon EC2 Dedicated Hosts. For more information about dedicated instances, see Dedicated Instances and Amazon EC2 Dedicated Instances.

    ", + "CreateInstanceResult$InstanceId": "

    The instance ID.

    ", + "CreateLayerRequest$StackId": "

    The layer stack ID.

    ", + "CreateLayerRequest$Name": "

    The layer name, which is used by the console.

    ", + "CreateLayerRequest$Shortname": "

    For custom layers only, use this parameter to specify the layer's short name, which is used internally by AWS OpsWorks and by Chef recipes. The short name is also used as the name for the directory where your app files are installed. It can have a maximum of 200 characters, which are limited to the alphanumeric characters, '-', '_', and '.'.

    The built-in layers' short names are defined by AWS OpsWorks. For more information, see the Layer Reference.

    ", + "CreateLayerRequest$CustomInstanceProfileArn": "

    The ARN of an IAM profile to be used for the layer's EC2 instances. For more information about IAM ARNs, see Using Identifiers.

    ", + "CreateLayerRequest$CustomJson": "

    A JSON-formatted string containing custom stack configuration and deployment attributes to be installed on the layer's instances. For more information, see Using Custom JSON. This feature is supported as of version 1.7.42 of the AWS CLI.

    ", + "CreateLayerResult$LayerId": "

    The layer ID.

    ", + "CreateStackRequest$Name": "

    The stack name.

    ", + "CreateStackRequest$Region": "

    The stack's AWS region, such as \"us-east-1\". For more information about Amazon regions, see Regions and Endpoints.

    ", + "CreateStackRequest$VpcId": "

    The ID of the VPC that the stack is to be launched into. The VPC must be in the stack's region. All instances are launched into this VPC. You cannot change the ID later.

    • If your account supports EC2-Classic, the default value is no VPC.

    • If your account does not support EC2-Classic, the default value is the default VPC for the specified region.

    If the VPC ID corresponds to a default VPC and you have specified either the DefaultAvailabilityZone or the DefaultSubnetId parameter only, AWS OpsWorks infers the value of the other parameter. If you specify neither parameter, AWS OpsWorks sets these parameters to the first valid Availability Zone for the specified region and the corresponding default VPC subnet ID, respectively.

    If you specify a nondefault VPC ID, note the following:

    • It must belong to a VPC in your account that is in the specified region.

    • You must specify a value for DefaultSubnetId.

    For more information on how to use AWS OpsWorks with a VPC, see Running a Stack in a VPC. For more information on default VPC and EC2-Classic, see Supported Platforms.

    ", + "CreateStackRequest$ServiceRoleArn": "

    The stack's AWS Identity and Access Management (IAM) role, which allows AWS OpsWorks to work with AWS resources on your behalf. You must set this parameter to the Amazon Resource Name (ARN) for an existing IAM role. For more information about IAM ARNs, see Using Identifiers.

    ", + "CreateStackRequest$DefaultInstanceProfileArn": "

    The Amazon Resource Name (ARN) of an IAM profile that is the default profile for all of the stack's EC2 instances. For more information about IAM ARNs, see Using Identifiers.

    ", + "CreateStackRequest$DefaultOs": "

    The stack's default operating system, which is installed on every instance unless you specify a different operating system when you create the instance. You can specify one of the following.

    • A supported Linux operating system: An Amazon Linux version, such as Amazon Linux 2016.03, Amazon Linux 2015.09, or Amazon Linux 2015.03.

    • A supported Ubuntu operating system, such as Ubuntu 16.04 LTS, Ubuntu 14.04 LTS, or Ubuntu 12.04 LTS.

    • CentOS 7

    • Red Hat Enterprise Linux 7

    • A supported Windows operating system, such as Microsoft Windows Server 2012 R2 Base, Microsoft Windows Server 2012 R2 with SQL Server Express, Microsoft Windows Server 2012 R2 with SQL Server Standard, or Microsoft Windows Server 2012 R2 with SQL Server Web.

    • A custom AMI: Custom. You specify the custom AMI you want to use when you create instances. For more information, see Using Custom AMIs.

    The default option is the current Amazon Linux version. For more information on the supported operating systems, see AWS OpsWorks Operating Systems.

    ", + "CreateStackRequest$HostnameTheme": "

    The stack's host name theme, with spaces replaced by underscores. The theme is used to generate host names for the stack's instances. By default, HostnameTheme is set to Layer_Dependent, which creates host names by appending integers to the layer's short name. The other themes are:

    • Baked_Goods

    • Clouds

    • Europe_Cities

    • Fruits

    • Greek_Deities

    • Legendary_creatures_from_Japan

    • Planets_and_Moons

    • Roman_Deities

    • Scottish_Islands

    • US_Cities

    • Wild_Cats

    To obtain a generated host name, call GetHostNameSuggestion, which returns a host name based on the current theme.

    ", + "CreateStackRequest$DefaultAvailabilityZone": "

    The stack's default Availability Zone, which must be in the specified region. For more information, see Regions and Endpoints. If you also specify a value for DefaultSubnetId, the subnet must be in the same zone. For more information, see the VpcId parameter description.

    ", + "CreateStackRequest$DefaultSubnetId": "

    The stack's default VPC subnet ID. This parameter is required if you specify a value for the VpcId parameter. All instances are launched into this subnet unless you specify otherwise when you create the instance. If you also specify a value for DefaultAvailabilityZone, the subnet must be in that zone. For information on default values and when this parameter is required, see the VpcId parameter description.

    ", + "CreateStackRequest$CustomJson": "

    A string that contains user-defined, custom JSON. It can be used to override the corresponding default stack configuration attribute values or to pass data to recipes. The string should be in the following escape characters such as '\"':

    \"{\\\"key1\\\": \\\"value1\\\", \\\"key2\\\": \\\"value2\\\",...}\"

    For more information on custom JSON, see Use Custom JSON to Modify the Stack Configuration Attributes.

    ", + "CreateStackRequest$DefaultSshKeyName": "

    A default Amazon EC2 key pair name. The default value is none. If you specify a key pair name, AWS OpsWorks installs the public key on the instance and you can use the private key with an SSH client to log in to the instance. For more information, see Using SSH to Communicate with an Instance and Managing SSH Access. You can override this setting by specifying a different key pair, or no key pair, when you create an instance.

    ", + "CreateStackRequest$AgentVersion": "

    The default AWS OpsWorks agent version. You have the following options:

    • Auto-update - Set this parameter to LATEST. AWS OpsWorks automatically installs new agent versions on the stack's instances as soon as they are available.

    • Fixed version - Set this parameter to your preferred agent version. To update the agent version, you must edit the stack configuration and specify a new version. AWS OpsWorks then automatically installs that version on the stack's instances.

    The default setting is the most recent release of the agent. To specify an agent version, you must use the complete version number, not the abbreviated number shown on the console. For a list of available agent version numbers, call DescribeAgentVersions.

    You can also specify an agent version when you create or update an instance, which overrides the stack's default setting.

    ", + "CreateStackResult$StackId": "

    The stack ID, which is an opaque string that you use to identify the stack when performing actions such as DescribeStacks.

    ", + "CreateUserProfileRequest$IamUserArn": "

    The user's IAM ARN.

    ", + "CreateUserProfileRequest$SshUsername": "

    The user's SSH user name. The allowable characters are [a-z], [A-Z], [0-9], '-', and '_'. If the specified name includes other punctuation marks, AWS OpsWorks removes them. For example, my.name will be changed to myname. If you do not specify an SSH user name, AWS OpsWorks generates one from the IAM user name.

    ", + "CreateUserProfileRequest$SshPublicKey": "

    The user's public SSH key.

    ", + "CreateUserProfileResult$IamUserArn": "

    The user's IAM ARN.

    ", + "DataSource$Type": "

    The data source's type, AutoSelectOpsworksMysqlInstance, OpsworksMysqlInstance, or RdsDbInstance.

    ", + "DataSource$Arn": "

    The data source's ARN.

    ", + "DataSource$DatabaseName": "

    The database name.

    ", + "DeleteAppRequest$AppId": "

    The app ID.

    ", + "DeleteInstanceRequest$InstanceId": "

    The instance ID.

    ", + "DeleteLayerRequest$LayerId": "

    The layer ID.

    ", + "DeleteStackRequest$StackId": "

    The stack ID.

    ", + "DeleteUserProfileRequest$IamUserArn": "

    The user's IAM ARN.

    ", + "Deployment$DeploymentId": "

    The deployment ID.

    ", + "Deployment$StackId": "

    The stack ID.

    ", + "Deployment$AppId": "

    The app ID.

    ", + "Deployment$IamUserArn": "

    The user's IAM ARN.

    ", + "Deployment$Comment": "

    A user-defined comment.

    ", + "Deployment$Status": "

    The deployment status:

    • running

    • successful

    • failed

    ", + "Deployment$CustomJson": "

    A string that contains user-defined custom JSON. It can be used to override the corresponding default stack configuration attribute values for stack or to pass data to recipes. The string should be in the following format and must escape characters such as '\"':

    \"{\\\"key1\\\": \\\"value1\\\", \\\"key2\\\": \\\"value2\\\",...}\"

    For more information on custom JSON, see Use Custom JSON to Modify the Stack Configuration Attributes.

    ", + "DeploymentCommandArgs$key": null, + "DeregisterEcsClusterRequest$EcsClusterArn": "

    The cluster's ARN.

    ", + "DeregisterElasticIpRequest$ElasticIp": "

    The Elastic IP address.

    ", + "DeregisterInstanceRequest$InstanceId": "

    The instance ID.

    ", + "DeregisterRdsDbInstanceRequest$RdsDbInstanceArn": "

    The Amazon RDS instance's ARN.

    ", + "DeregisterVolumeRequest$VolumeId": "

    The AWS OpsWorks volume ID, which is the GUID that AWS OpsWorks assigned to the instance when you registered the volume with the stack, not the Amazon EC2 volume ID.

    ", + "DescribeAgentVersionsRequest$StackId": "

    The stack ID.

    ", + "DescribeAppsRequest$StackId": "

    The app stack ID. If you use this parameter, DescribeApps returns a description of the apps in the specified stack.

    ", + "DescribeCommandsRequest$DeploymentId": "

    The deployment ID. If you include this parameter, DescribeCommands returns a description of the commands associated with the specified deployment.

    ", + "DescribeCommandsRequest$InstanceId": "

    The instance ID. If you include this parameter, DescribeCommands returns a description of the commands associated with the specified instance.

    ", + "DescribeDeploymentsRequest$StackId": "

    The stack ID. If you include this parameter, DescribeDeployments returns a description of the commands associated with the specified stack.

    ", + "DescribeDeploymentsRequest$AppId": "

    The app ID. If you include this parameter, DescribeDeployments returns a description of the commands associated with the specified app.

    ", + "DescribeEcsClustersRequest$StackId": "

    A stack ID. DescribeEcsClusters returns a description of the cluster that is registered with the stack.

    ", + "DescribeEcsClustersRequest$NextToken": "

    If the previous paginated request did not return all of the remaining results, the response object'sNextToken parameter value is set to a token. To retrieve the next set of results, call DescribeEcsClusters again and assign that token to the request object's NextToken parameter. If there are no remaining results, the previous response object's NextToken parameter is set to null.

    ", + "DescribeEcsClustersResult$NextToken": "

    If a paginated request does not return all of the remaining results, this parameter is set to a token that you can assign to the request object's NextToken parameter to retrieve the next set of results. If the previous paginated request returned all of the remaining results, this parameter is set to null.

    ", + "DescribeElasticIpsRequest$InstanceId": "

    The instance ID. If you include this parameter, DescribeElasticIps returns a description of the Elastic IP addresses associated with the specified instance.

    ", + "DescribeElasticIpsRequest$StackId": "

    A stack ID. If you include this parameter, DescribeElasticIps returns a description of the Elastic IP addresses that are registered with the specified stack.

    ", + "DescribeElasticLoadBalancersRequest$StackId": "

    A stack ID. The action describes the stack's Elastic Load Balancing instances.

    ", + "DescribeInstancesRequest$StackId": "

    A stack ID. If you use this parameter, DescribeInstances returns descriptions of the instances associated with the specified stack.

    ", + "DescribeInstancesRequest$LayerId": "

    A layer ID. If you use this parameter, DescribeInstances returns descriptions of the instances associated with the specified layer.

    ", + "DescribeLayersRequest$StackId": "

    The stack ID.

    ", + "DescribePermissionsRequest$IamUserArn": "

    The user's IAM ARN. For more information about IAM ARNs, see Using Identifiers.

    ", + "DescribePermissionsRequest$StackId": "

    The stack ID.

    ", + "DescribeRaidArraysRequest$InstanceId": "

    The instance ID. If you use this parameter, DescribeRaidArrays returns descriptions of the RAID arrays associated with the specified instance.

    ", + "DescribeRaidArraysRequest$StackId": "

    The stack ID.

    ", + "DescribeRdsDbInstancesRequest$StackId": "

    The stack ID that the instances are registered with. The operation returns descriptions of all registered Amazon RDS instances.

    ", + "DescribeServiceErrorsRequest$StackId": "

    The stack ID. If you use this parameter, DescribeServiceErrors returns descriptions of the errors associated with the specified stack.

    ", + "DescribeServiceErrorsRequest$InstanceId": "

    The instance ID. If you use this parameter, DescribeServiceErrors returns descriptions of the errors associated with the specified instance.

    ", + "DescribeStackProvisioningParametersRequest$StackId": "

    The stack ID

    ", + "DescribeStackProvisioningParametersResult$AgentInstallerUrl": "

    The AWS OpsWorks agent installer's URL.

    ", + "DescribeStackSummaryRequest$StackId": "

    The stack ID.

    ", + "DescribeVolumesRequest$InstanceId": "

    The instance ID. If you use this parameter, DescribeVolumes returns descriptions of the volumes associated with the specified instance.

    ", + "DescribeVolumesRequest$StackId": "

    A stack ID. The action describes the stack's registered Amazon EBS volumes.

    ", + "DescribeVolumesRequest$RaidArrayId": "

    The RAID array ID. If you use this parameter, DescribeVolumes returns descriptions of the volumes associated with the specified RAID array.

    ", + "DetachElasticLoadBalancerRequest$ElasticLoadBalancerName": "

    The Elastic Load Balancing instance's name.

    ", + "DetachElasticLoadBalancerRequest$LayerId": "

    The ID of the layer that the Elastic Load Balancing instance is attached to.

    ", + "DisassociateElasticIpRequest$ElasticIp": "

    The Elastic IP address.

    ", + "EbsBlockDevice$SnapshotId": "

    The snapshot ID.

    ", + "EcsCluster$EcsClusterArn": "

    The cluster's ARN.

    ", + "EcsCluster$EcsClusterName": "

    The cluster name.

    ", + "EcsCluster$StackId": "

    The stack ID.

    ", + "ElasticIp$Ip": "

    The IP address.

    ", + "ElasticIp$Name": "

    The name.

    ", + "ElasticIp$Domain": "

    The domain.

    ", + "ElasticIp$Region": "

    The AWS region. For more information, see Regions and Endpoints.

    ", + "ElasticIp$InstanceId": "

    The ID of the instance that the address is attached to.

    ", + "ElasticLoadBalancer$ElasticLoadBalancerName": "

    The Elastic Load Balancing instance's name.

    ", + "ElasticLoadBalancer$Region": "

    The instance's AWS region.

    ", + "ElasticLoadBalancer$DnsName": "

    The instance's public DNS name.

    ", + "ElasticLoadBalancer$StackId": "

    The ID of the stack that the instance is associated with.

    ", + "ElasticLoadBalancer$LayerId": "

    The ID of the layer that the instance is attached to.

    ", + "ElasticLoadBalancer$VpcId": "

    The VPC ID.

    ", + "EnvironmentVariable$Key": "

    (Required) The environment variable's name, which can consist of up to 64 characters and must be specified. The name can contain upper- and lowercase letters, numbers, and underscores (_), but it must start with a letter or underscore.

    ", + "EnvironmentVariable$Value": "

    (Optional) The environment variable's value, which can be left empty. If you specify a value, it can contain up to 256 characters, which must all be printable.

    ", + "GetHostnameSuggestionRequest$LayerId": "

    The layer ID.

    ", + "GetHostnameSuggestionResult$LayerId": "

    The layer ID.

    ", + "GetHostnameSuggestionResult$Hostname": "

    The generated host name.

    ", + "GrantAccessRequest$InstanceId": "

    The instance's AWS OpsWorks ID.

    ", + "Instance$AgentVersion": "

    The agent version. This parameter is set to INHERIT if the instance inherits the default stack setting or to a a version number for a fixed agent version.

    ", + "Instance$AmiId": "

    A custom AMI ID to be used to create the instance. For more information, see Instances

    ", + "Instance$AvailabilityZone": "

    The instance Availability Zone. For more information, see Regions and Endpoints.

    ", + "Instance$Ec2InstanceId": "

    The ID of the associated Amazon EC2 instance.

    ", + "Instance$EcsClusterArn": "

    For container instances, the Amazon ECS cluster's ARN.

    ", + "Instance$EcsContainerInstanceArn": "

    For container instances, the instance's ARN.

    ", + "Instance$ElasticIp": "

    The instance Elastic IP address .

    ", + "Instance$Hostname": "

    The instance host name.

    ", + "Instance$InfrastructureClass": "

    For registered instances, the infrastructure class: ec2 or on-premises.

    ", + "Instance$InstanceId": "

    The instance ID.

    ", + "Instance$InstanceProfileArn": "

    The ARN of the instance's IAM profile. For more information about IAM ARNs, see Using Identifiers.

    ", + "Instance$InstanceType": "

    The instance type, such as t2.micro.

    ", + "Instance$LastServiceErrorId": "

    The ID of the last service error. For more information, call DescribeServiceErrors.

    ", + "Instance$Os": "

    The instance's operating system.

    ", + "Instance$Platform": "

    The instance's platform.

    ", + "Instance$PrivateDns": "

    The The instance's private DNS name.

    ", + "Instance$PrivateIp": "

    The instance's private IP address.

    ", + "Instance$PublicDns": "

    The instance public DNS name.

    ", + "Instance$PublicIp": "

    The instance public IP address.

    ", + "Instance$RegisteredBy": "

    For registered instances, who performed the registration.

    ", + "Instance$ReportedAgentVersion": "

    The instance's reported AWS OpsWorks agent version.

    ", + "Instance$RootDeviceVolumeId": "

    The root device volume ID.

    ", + "Instance$SshHostDsaKeyFingerprint": "

    The SSH key's Deep Security Agent (DSA) fingerprint.

    ", + "Instance$SshHostRsaKeyFingerprint": "

    The SSH key's RSA fingerprint.

    ", + "Instance$SshKeyName": "

    The instance's Amazon EC2 key-pair name.

    ", + "Instance$StackId": "

    The stack ID.

    ", + "Instance$Status": "

    The instance status:

    • booting

    • connection_lost

    • online

    • pending

    • rebooting

    • requested

    • running_setup

    • setup_failed

    • shutting_down

    • start_failed

    • stop_failed

    • stopped

    • stopping

    • terminated

    • terminating

    ", + "Instance$SubnetId": "

    The instance's subnet ID; applicable only if the stack is running in a VPC.

    ", + "Instance$Tenancy": "

    The instance's tenancy option, such as dedicated or host.

    ", + "InstanceIdentity$Document": "

    A JSON document that contains the metadata.

    ", + "InstanceIdentity$Signature": "

    A signature that can be used to verify the document's accuracy and authenticity.

    ", + "Layer$StackId": "

    The layer stack ID.

    ", + "Layer$LayerId": "

    The layer ID.

    ", + "Layer$Name": "

    The layer name.

    ", + "Layer$Shortname": "

    The layer short name.

    ", + "Layer$CustomInstanceProfileArn": "

    The ARN of the default IAM profile to be used for the layer's EC2 instances. For more information about IAM ARNs, see Using Identifiers.

    ", + "Layer$CustomJson": "

    A JSON formatted string containing the layer's custom stack configuration and deployment attributes.

    ", + "LayerAttributes$value": null, + "LoadBasedAutoScalingConfiguration$LayerId": "

    The layer ID.

    ", + "Parameters$key": null, + "Parameters$value": null, + "Permission$StackId": "

    A stack ID.

    ", + "Permission$IamUserArn": "

    The Amazon Resource Name (ARN) for an AWS Identity and Access Management (IAM) role. For more information about IAM ARNs, see Using Identifiers.

    ", + "Permission$Level": "

    The user's permission level, which must be the following:

    • deny

    • show

    • deploy

    • manage

    • iam_only

    For more information on the permissions associated with these levels, see Managing User Permissions

    ", + "RaidArray$RaidArrayId": "

    The array ID.

    ", + "RaidArray$InstanceId": "

    The instance ID.

    ", + "RaidArray$Name": "

    The array name.

    ", + "RaidArray$Device": "

    The array's Linux device. For example /dev/mdadm0.

    ", + "RaidArray$MountPoint": "

    The array's mount point.

    ", + "RaidArray$AvailabilityZone": "

    The array's Availability Zone. For more information, see Regions and Endpoints.

    ", + "RaidArray$StackId": "

    The stack ID.

    ", + "RaidArray$VolumeType": "

    The volume type, standard or PIOPS.

    ", + "RdsDbInstance$RdsDbInstanceArn": "

    The instance's ARN.

    ", + "RdsDbInstance$DbInstanceIdentifier": "

    The DB instance identifier.

    ", + "RdsDbInstance$DbUser": "

    The master user name.

    ", + "RdsDbInstance$DbPassword": "

    AWS OpsWorks returns *****FILTERED***** instead of the actual value.

    ", + "RdsDbInstance$Region": "

    The instance's AWS region.

    ", + "RdsDbInstance$Address": "

    The instance's address.

    ", + "RdsDbInstance$Engine": "

    The instance's database engine.

    ", + "RdsDbInstance$StackId": "

    The ID of the stack that the instance is registered with.

    ", + "RebootInstanceRequest$InstanceId": "

    The instance ID.

    ", + "RegisterEcsClusterRequest$EcsClusterArn": "

    The cluster's ARN.

    ", + "RegisterEcsClusterRequest$StackId": "

    The stack ID.

    ", + "RegisterEcsClusterResult$EcsClusterArn": "

    The cluster's ARN.

    ", + "RegisterElasticIpRequest$ElasticIp": "

    The Elastic IP address.

    ", + "RegisterElasticIpRequest$StackId": "

    The stack ID.

    ", + "RegisterElasticIpResult$ElasticIp": "

    The Elastic IP address.

    ", + "RegisterInstanceRequest$StackId": "

    The ID of the stack that the instance is to be registered with.

    ", + "RegisterInstanceRequest$Hostname": "

    The instance's hostname.

    ", + "RegisterInstanceRequest$PublicIp": "

    The instance's public IP address.

    ", + "RegisterInstanceRequest$PrivateIp": "

    The instance's private IP address.

    ", + "RegisterInstanceRequest$RsaPublicKey": "

    The instances public RSA key. This key is used to encrypt communication between the instance and the service.

    ", + "RegisterInstanceRequest$RsaPublicKeyFingerprint": "

    The instances public RSA key fingerprint.

    ", + "RegisterInstanceResult$InstanceId": "

    The registered instance's AWS OpsWorks ID.

    ", + "RegisterRdsDbInstanceRequest$StackId": "

    The stack ID.

    ", + "RegisterRdsDbInstanceRequest$RdsDbInstanceArn": "

    The Amazon RDS instance's ARN.

    ", + "RegisterRdsDbInstanceRequest$DbUser": "

    The database's master user name.

    ", + "RegisterRdsDbInstanceRequest$DbPassword": "

    The database password.

    ", + "RegisterVolumeRequest$Ec2VolumeId": "

    The Amazon EBS volume ID.

    ", + "RegisterVolumeRequest$StackId": "

    The stack ID.

    ", + "RegisterVolumeResult$VolumeId": "

    The volume ID.

    ", + "ReportedOs$Family": "

    The operating system family.

    ", + "ReportedOs$Name": "

    The operating system name.

    ", + "ReportedOs$Version": "

    The operating system version.

    ", + "ResourceNotFoundException$message": "

    The exception message.

    ", + "SelfUserProfile$IamUserArn": "

    The user's IAM ARN.

    ", + "SelfUserProfile$Name": "

    The user's name.

    ", + "SelfUserProfile$SshUsername": "

    The user's SSH user name.

    ", + "SelfUserProfile$SshPublicKey": "

    The user's SSH public key.

    ", + "ServiceError$ServiceErrorId": "

    The error ID.

    ", + "ServiceError$StackId": "

    The stack ID.

    ", + "ServiceError$InstanceId": "

    The instance ID.

    ", + "ServiceError$Type": "

    The error type.

    ", + "ServiceError$Message": "

    A message that describes the error.

    ", + "SetLoadBasedAutoScalingRequest$LayerId": "

    The layer ID.

    ", + "SetPermissionRequest$StackId": "

    The stack ID.

    ", + "SetPermissionRequest$IamUserArn": "

    The user's IAM ARN.

    ", + "SetPermissionRequest$Level": "

    The user's permission level, which must be set to one of the following strings. You cannot set your own permissions level.

    • deny

    • show

    • deploy

    • manage

    • iam_only

    For more information on the permissions associated with these levels, see Managing User Permissions.

    ", + "SetTimeBasedAutoScalingRequest$InstanceId": "

    The instance ID.

    ", + "Source$Url": "

    The source URL.

    ", + "Source$Username": "

    This parameter depends on the repository type.

    • For Amazon S3 bundles, set Username to the appropriate IAM access key ID.

    • For HTTP bundles, Git repositories, and Subversion repositories, set Username to the user name.

    ", + "Source$Password": "

    When included in a request, the parameter depends on the repository type.

    • For Amazon S3 bundles, set Password to the appropriate IAM secret access key.

    • For HTTP bundles and Subversion repositories, set Password to the password.

    For more information on how to safely handle IAM credentials, see http://docs.aws.amazon.com/general/latest/gr/aws-access-keys-best-practices.html.

    In responses, AWS OpsWorks returns *****FILTERED***** instead of the actual value.

    ", + "Source$SshKey": "

    In requests, the repository's SSH key.

    In responses, AWS OpsWorks returns *****FILTERED***** instead of the actual value.

    ", + "Source$Revision": "

    The application's version. AWS OpsWorks enables you to easily deploy new versions of an application. One of the simplest approaches is to have branches or revisions in your repository that represent different versions that can potentially be deployed.

    ", + "SslConfiguration$Certificate": "

    The contents of the certificate's domain.crt file.

    ", + "SslConfiguration$PrivateKey": "

    The private key; the contents of the certificate's domain.kex file.

    ", + "SslConfiguration$Chain": "

    Optional. Can be used to specify an intermediate certificate authority key or client authentication.

    ", + "Stack$StackId": "

    The stack ID.

    ", + "Stack$Name": "

    The stack name.

    ", + "Stack$Arn": "

    The stack's ARN.

    ", + "Stack$Region": "

    The stack AWS region, such as \"us-east-1\". For more information about AWS regions, see Regions and Endpoints.

    ", + "Stack$VpcId": "

    The VPC ID; applicable only if the stack is running in a VPC.

    ", + "Stack$ServiceRoleArn": "

    The stack AWS Identity and Access Management (IAM) role.

    ", + "Stack$DefaultInstanceProfileArn": "

    The ARN of an IAM profile that is the default profile for all of the stack's EC2 instances. For more information about IAM ARNs, see Using Identifiers.

    ", + "Stack$DefaultOs": "

    The stack's default operating system.

    ", + "Stack$HostnameTheme": "

    The stack host name theme, with spaces replaced by underscores.

    ", + "Stack$DefaultAvailabilityZone": "

    The stack's default Availability Zone. For more information, see Regions and Endpoints.

    ", + "Stack$DefaultSubnetId": "

    The default subnet ID; applicable only if the stack is running in a VPC.

    ", + "Stack$CustomJson": "

    A JSON object that contains user-defined attributes to be added to the stack configuration and deployment attributes. You can use custom JSON to override the corresponding default stack configuration attribute values or to pass data to recipes. The string should be in the following format and must escape characters such as '\"':

    \"{\\\"key1\\\": \\\"value1\\\", \\\"key2\\\": \\\"value2\\\",...}\"

    For more information on custom JSON, see Use Custom JSON to Modify the Stack Configuration Attributes.

    ", + "Stack$DefaultSshKeyName": "

    A default Amazon EC2 key pair for the stack's instances. You can override this value when you create or update an instance.

    ", + "Stack$AgentVersion": "

    The agent version. This parameter is set to LATEST for auto-update. or a version number for a fixed agent version.

    ", + "StackAttributes$value": null, + "StackConfigurationManager$Name": "

    The name. This parameter must be set to \"Chef\".

    ", + "StackConfigurationManager$Version": "

    The Chef version. This parameter must be set to 12, 11.10, or 11.4 for Linux stacks, and to 12.2 for Windows stacks. The default value for Linux stacks is 11.4.

    ", + "StackSummary$StackId": "

    The stack ID.

    ", + "StackSummary$Name": "

    The stack name.

    ", + "StackSummary$Arn": "

    The stack's ARN.

    ", + "StartInstanceRequest$InstanceId": "

    The instance ID.

    ", + "StartStackRequest$StackId": "

    The stack ID.

    ", + "StopInstanceRequest$InstanceId": "

    The instance ID.

    ", + "StopStackRequest$StackId": "

    The stack ID.

    ", + "Strings$member": null, + "TemporaryCredential$Username": "

    The user name.

    ", + "TemporaryCredential$Password": "

    The password.

    ", + "TemporaryCredential$InstanceId": "

    The instance's AWS OpsWorks ID.

    ", + "TimeBasedAutoScalingConfiguration$InstanceId": "

    The instance ID.

    ", + "UnassignInstanceRequest$InstanceId": "

    The instance ID.

    ", + "UnassignVolumeRequest$VolumeId": "

    The volume ID.

    ", + "UpdateAppRequest$AppId": "

    The app ID.

    ", + "UpdateAppRequest$Name": "

    The app name.

    ", + "UpdateAppRequest$Description": "

    A description of the app.

    ", + "UpdateElasticIpRequest$ElasticIp": "

    The address.

    ", + "UpdateElasticIpRequest$Name": "

    The new name.

    ", + "UpdateInstanceRequest$InstanceId": "

    The instance ID.

    ", + "UpdateInstanceRequest$InstanceType": "

    The instance type, such as t2.micro. For a list of supported instance types, open the stack in the console, choose Instances, and choose + Instance. The Size list contains the currently supported types. For more information, see Instance Families and Types. The parameter values that you use to specify the various types are in the API Name column of the Available Instance Types table.

    ", + "UpdateInstanceRequest$Hostname": "

    The instance host name.

    ", + "UpdateInstanceRequest$Os": "

    The instance's operating system, which must be set to one of the following.

    • A supported Linux operating system: An Amazon Linux version, such as Amazon Linux 2016.03, Amazon Linux 2015.09, or Amazon Linux 2015.03.

    • A supported Ubuntu operating system, such as Ubuntu 16.04 LTS, Ubuntu 14.04 LTS, or Ubuntu 12.04 LTS.

    • CentOS 7

    • Red Hat Enterprise Linux 7

    • A supported Windows operating system, such as Microsoft Windows Server 2012 R2 Base, Microsoft Windows Server 2012 R2 with SQL Server Express, Microsoft Windows Server 2012 R2 with SQL Server Standard, or Microsoft Windows Server 2012 R2 with SQL Server Web.

    • A custom AMI: Custom.

    For more information on the supported operating systems, see AWS OpsWorks Operating Systems.

    The default option is the current Amazon Linux version. If you set this parameter to Custom, you must use the AmiId parameter to specify the custom AMI that you want to use. For more information on the supported operating systems, see Operating Systems. For more information on how to use custom AMIs with OpsWorks, see Using Custom AMIs.

    You can specify a different Linux operating system for the updated stack, but you cannot change from Linux to Windows or Windows to Linux.

    ", + "UpdateInstanceRequest$AmiId": "

    A custom AMI ID to be used to create the instance. The AMI must be based on one of the supported operating systems. For more information, see Instances

    If you specify a custom AMI, you must set Os to Custom.

    ", + "UpdateInstanceRequest$SshKeyName": "

    The instance's Amazon EC2 key name.

    ", + "UpdateInstanceRequest$AgentVersion": "

    The default AWS OpsWorks agent version. You have the following options:

    • INHERIT - Use the stack's default agent version setting.

    • version_number - Use the specified agent version. This value overrides the stack's default setting. To update the agent version, you must edit the instance configuration and specify a new version. AWS OpsWorks then automatically installs that version on the instance.

    The default setting is INHERIT. To specify an agent version, you must use the complete version number, not the abbreviated number shown on the console. For a list of available agent version numbers, call DescribeAgentVersions.

    ", + "UpdateLayerRequest$LayerId": "

    The layer ID.

    ", + "UpdateLayerRequest$Name": "

    The layer name, which is used by the console.

    ", + "UpdateLayerRequest$Shortname": "

    For custom layers only, use this parameter to specify the layer's short name, which is used internally by AWS OpsWorksand by Chef. The short name is also used as the name for the directory where your app files are installed. It can have a maximum of 200 characters and must be in the following format: /\\A[a-z0-9\\-\\_\\.]+\\Z/.

    The built-in layers' short names are defined by AWS OpsWorks. For more information, see the Layer Reference

    ", + "UpdateLayerRequest$CustomInstanceProfileArn": "

    The ARN of an IAM profile to be used for all of the layer's EC2 instances. For more information about IAM ARNs, see Using Identifiers.

    ", + "UpdateLayerRequest$CustomJson": "

    A JSON-formatted string containing custom stack configuration and deployment attributes to be installed on the layer's instances. For more information, see Using Custom JSON.

    ", + "UpdateMyUserProfileRequest$SshPublicKey": "

    The user's SSH public key.

    ", + "UpdateRdsDbInstanceRequest$RdsDbInstanceArn": "

    The Amazon RDS instance's ARN.

    ", + "UpdateRdsDbInstanceRequest$DbUser": "

    The master user name.

    ", + "UpdateRdsDbInstanceRequest$DbPassword": "

    The database password.

    ", + "UpdateStackRequest$StackId": "

    The stack ID.

    ", + "UpdateStackRequest$Name": "

    The stack's new name.

    ", + "UpdateStackRequest$ServiceRoleArn": "

    Do not use this parameter. You cannot update a stack's service role.

    ", + "UpdateStackRequest$DefaultInstanceProfileArn": "

    The ARN of an IAM profile that is the default profile for all of the stack's EC2 instances. For more information about IAM ARNs, see Using Identifiers.

    ", + "UpdateStackRequest$DefaultOs": "

    The stack's operating system, which must be set to one of the following:

    • A supported Linux operating system: An Amazon Linux version, such as Amazon Linux 2016.03, Amazon Linux 2015.09, or Amazon Linux 2015.03.

    • A supported Ubuntu operating system, such as Ubuntu 16.04 LTS, Ubuntu 14.04 LTS, or Ubuntu 12.04 LTS.

    • CentOS 7

    • Red Hat Enterprise Linux 7

    • A supported Windows operating system, such as Microsoft Windows Server 2012 R2 Base, Microsoft Windows Server 2012 R2 with SQL Server Express, Microsoft Windows Server 2012 R2 with SQL Server Standard, or Microsoft Windows Server 2012 R2 with SQL Server Web.

    • A custom AMI: Custom. You specify the custom AMI you want to use when you create instances. For more information on how to use custom AMIs with OpsWorks, see Using Custom AMIs.

    The default option is the stack's current operating system. For more information on the supported operating systems, see AWS OpsWorks Operating Systems.

    ", + "UpdateStackRequest$HostnameTheme": "

    The stack's new host name theme, with spaces replaced by underscores. The theme is used to generate host names for the stack's instances. By default, HostnameTheme is set to Layer_Dependent, which creates host names by appending integers to the layer's short name. The other themes are:

    • Baked_Goods

    • Clouds

    • Europe_Cities

    • Fruits

    • Greek_Deities

    • Legendary_creatures_from_Japan

    • Planets_and_Moons

    • Roman_Deities

    • Scottish_Islands

    • US_Cities

    • Wild_Cats

    To obtain a generated host name, call GetHostNameSuggestion, which returns a host name based on the current theme.

    ", + "UpdateStackRequest$DefaultAvailabilityZone": "

    The stack's default Availability Zone, which must be in the stack's region. For more information, see Regions and Endpoints. If you also specify a value for DefaultSubnetId, the subnet must be in the same zone. For more information, see CreateStack.

    ", + "UpdateStackRequest$DefaultSubnetId": "

    The stack's default VPC subnet ID. This parameter is required if you specify a value for the VpcId parameter. All instances are launched into this subnet unless you specify otherwise when you create the instance. If you also specify a value for DefaultAvailabilityZone, the subnet must be in that zone. For information on default values and when this parameter is required, see the VpcId parameter description.

    ", + "UpdateStackRequest$CustomJson": "

    A string that contains user-defined, custom JSON. It can be used to override the corresponding default stack configuration JSON values or to pass data to recipes. The string should be in the following format and escape characters such as '\"':

    \"{\\\"key1\\\": \\\"value1\\\", \\\"key2\\\": \\\"value2\\\",...}\"

    For more information on custom JSON, see Use Custom JSON to Modify the Stack Configuration Attributes.

    ", + "UpdateStackRequest$DefaultSshKeyName": "

    A default Amazon EC2 key-pair name. The default value is none. If you specify a key-pair name, AWS OpsWorks installs the public key on the instance and you can use the private key with an SSH client to log in to the instance. For more information, see Using SSH to Communicate with an Instance and Managing SSH Access. You can override this setting by specifying a different key pair, or no key pair, when you create an instance.

    ", + "UpdateStackRequest$AgentVersion": "

    The default AWS OpsWorks agent version. You have the following options:

    • Auto-update - Set this parameter to LATEST. AWS OpsWorks automatically installs new agent versions on the stack's instances as soon as they are available.

    • Fixed version - Set this parameter to your preferred agent version. To update the agent version, you must edit the stack configuration and specify a new version. AWS OpsWorks then automatically installs that version on the stack's instances.

    The default setting is LATEST. To specify an agent version, you must use the complete version number, not the abbreviated number shown on the console. For a list of available agent version numbers, call DescribeAgentVersions.

    You can also specify an agent version when you create or update an instance, which overrides the stack's default setting.

    ", + "UpdateUserProfileRequest$IamUserArn": "

    The user IAM ARN.

    ", + "UpdateUserProfileRequest$SshUsername": "

    The user's SSH user name. The allowable characters are [a-z], [A-Z], [0-9], '-', and '_'. If the specified name includes other punctuation marks, AWS OpsWorks removes them. For example, my.name will be changed to myname. If you do not specify an SSH user name, AWS OpsWorks generates one from the IAM user name.

    ", + "UpdateUserProfileRequest$SshPublicKey": "

    The user's new SSH public key.

    ", + "UpdateVolumeRequest$VolumeId": "

    The volume ID.

    ", + "UpdateVolumeRequest$Name": "

    The new name.

    ", + "UpdateVolumeRequest$MountPoint": "

    The new mount point.

    ", + "UserProfile$IamUserArn": "

    The user's IAM ARN.

    ", + "UserProfile$Name": "

    The user's name.

    ", + "UserProfile$SshUsername": "

    The user's SSH user name.

    ", + "UserProfile$SshPublicKey": "

    The user's SSH public key.

    ", + "ValidationException$message": "

    The exception message.

    ", + "Volume$VolumeId": "

    The volume ID.

    ", + "Volume$Ec2VolumeId": "

    The Amazon EC2 volume ID.

    ", + "Volume$Name": "

    The volume name.

    ", + "Volume$RaidArrayId": "

    The RAID array ID.

    ", + "Volume$InstanceId": "

    The instance ID.

    ", + "Volume$Status": "

    The value returned by DescribeVolumes.

    ", + "Volume$Device": "

    The device name.

    ", + "Volume$MountPoint": "

    The volume mount point. For example, \"/mnt/disk1\".

    ", + "Volume$Region": "

    The AWS region. For more information about AWS regions, see Regions and Endpoints.

    ", + "Volume$AvailabilityZone": "

    The volume Availability Zone. For more information, see Regions and Endpoints.

    ", + "Volume$VolumeType": "

    The volume type, standard or PIOPS.

    ", + "VolumeConfiguration$MountPoint": "

    The volume mount point. For example \"/dev/sdh\".

    ", + "VolumeConfiguration$VolumeType": "

    The volume type:

    • standard - Magnetic

    • io1 - Provisioned IOPS (SSD)

    • gp2 - General Purpose (SSD)

    " + } + }, + "Strings": { + "base": null, + "refs": { + "App$Domains": "

    The app vhost settings with multiple domains separated by commas. For example: 'www.example.com, example.com'

    ", + "AssignInstanceRequest$LayerIds": "

    The layer ID, which must correspond to a custom layer. You cannot assign a registered instance to a built-in layer.

    ", + "AutoScalingThresholds$Alarms": "

    Custom Cloudwatch auto scaling alarms, to be used as thresholds. This parameter takes a list of up to five alarm names, which are case sensitive and must be in the same region as the stack.

    To use custom alarms, you must update your service role to allow cloudwatch:DescribeAlarms. You can either have AWS OpsWorks update the role for you when you first use this feature or you can edit the role manually. For more information, see Allowing AWS OpsWorks to Act on Your Behalf.

    ", + "CloneStackRequest$CloneAppIds": "

    A list of source stack app IDs to be included in the cloned stack.

    ", + "CreateAppRequest$Domains": "

    The app virtual host settings, with multiple domains separated by commas. For example: 'www.example.com, example.com'

    ", + "CreateDeploymentRequest$InstanceIds": "

    The instance IDs for the deployment targets.

    ", + "CreateDeploymentRequest$LayerIds": "

    The layer IDs for the deployment targets.

    ", + "CreateInstanceRequest$LayerIds": "

    An array that contains the instance's layer IDs.

    ", + "CreateLayerRequest$CustomSecurityGroupIds": "

    An array containing the layer custom security group IDs.

    ", + "CreateLayerRequest$Packages": "

    An array of Package objects that describes the layer packages.

    ", + "Deployment$InstanceIds": "

    The IDs of the target instances.

    ", + "DeploymentCommandArgs$value": null, + "DescribeAppsRequest$AppIds": "

    An array of app IDs for the apps to be described. If you use this parameter, DescribeApps returns a description of the specified apps. Otherwise, it returns a description of every app.

    ", + "DescribeCommandsRequest$CommandIds": "

    An array of command IDs. If you include this parameter, DescribeCommands returns a description of the specified commands. Otherwise, it returns a description of every command.

    ", + "DescribeDeploymentsRequest$DeploymentIds": "

    An array of deployment IDs to be described. If you include this parameter, DescribeDeployments returns a description of the specified deployments. Otherwise, it returns a description of every deployment.

    ", + "DescribeEcsClustersRequest$EcsClusterArns": "

    A list of ARNs, one for each cluster to be described.

    ", + "DescribeElasticIpsRequest$Ips": "

    An array of Elastic IP addresses to be described. If you include this parameter, DescribeElasticIps returns a description of the specified Elastic IP addresses. Otherwise, it returns a description of every Elastic IP address.

    ", + "DescribeElasticLoadBalancersRequest$LayerIds": "

    A list of layer IDs. The action describes the Elastic Load Balancing instances for the specified layers.

    ", + "DescribeInstancesRequest$InstanceIds": "

    An array of instance IDs to be described. If you use this parameter, DescribeInstances returns a description of the specified instances. Otherwise, it returns a description of every instance.

    ", + "DescribeLayersRequest$LayerIds": "

    An array of layer IDs that specify the layers to be described. If you omit this parameter, DescribeLayers returns a description of every layer in the specified stack.

    ", + "DescribeLoadBasedAutoScalingRequest$LayerIds": "

    An array of layer IDs.

    ", + "DescribeRaidArraysRequest$RaidArrayIds": "

    An array of RAID array IDs. If you use this parameter, DescribeRaidArrays returns descriptions of the specified arrays. Otherwise, it returns a description of every array.

    ", + "DescribeRdsDbInstancesRequest$RdsDbInstanceArns": "

    An array containing the ARNs of the instances to be described.

    ", + "DescribeServiceErrorsRequest$ServiceErrorIds": "

    An array of service error IDs. If you use this parameter, DescribeServiceErrors returns descriptions of the specified errors. Otherwise, it returns a description of every error.

    ", + "DescribeStacksRequest$StackIds": "

    An array of stack IDs that specify the stacks to be described. If you omit this parameter, DescribeStacks returns a description of every stack.

    ", + "DescribeTimeBasedAutoScalingRequest$InstanceIds": "

    An array of instance IDs.

    ", + "DescribeUserProfilesRequest$IamUserArns": "

    An array of IAM user ARNs that identify the users to be described.

    ", + "DescribeVolumesRequest$VolumeIds": "

    Am array of volume IDs. If you use this parameter, DescribeVolumes returns descriptions of the specified volumes. Otherwise, it returns a description of every volume.

    ", + "ElasticLoadBalancer$AvailabilityZones": "

    A list of Availability Zones.

    ", + "ElasticLoadBalancer$SubnetIds": "

    A list of subnet IDs, if the stack is running in a VPC.

    ", + "ElasticLoadBalancer$Ec2InstanceIds": "

    A list of the EC2 instances that the Elastic Load Balancing instance is managing traffic for.

    ", + "Instance$LayerIds": "

    An array containing the instance layer IDs.

    ", + "Instance$SecurityGroupIds": "

    An array containing the instance security group IDs.

    ", + "Layer$CustomSecurityGroupIds": "

    An array containing the layer's custom security group IDs.

    ", + "Layer$DefaultSecurityGroupNames": "

    An array containing the layer's security group names.

    ", + "Layer$Packages": "

    An array of Package objects that describe the layer's packages.

    ", + "Recipes$Setup": "

    An array of custom recipe names to be run following a setup event.

    ", + "Recipes$Configure": "

    An array of custom recipe names to be run following a configure event.

    ", + "Recipes$Deploy": "

    An array of custom recipe names to be run following a deploy event.

    ", + "Recipes$Undeploy": "

    An array of custom recipe names to be run following a undeploy event.

    ", + "Recipes$Shutdown": "

    An array of custom recipe names to be run following a shutdown event.

    ", + "UpdateAppRequest$Domains": "

    The app's virtual host settings, with multiple domains separated by commas. For example: 'www.example.com, example.com'

    ", + "UpdateInstanceRequest$LayerIds": "

    The instance's layer IDs.

    ", + "UpdateLayerRequest$CustomSecurityGroupIds": "

    An array containing the layer's custom security group IDs.

    ", + "UpdateLayerRequest$Packages": "

    An array of Package objects that describe the layer's packages.

    " + } + }, + "Switch": { + "base": null, + "refs": { + "DailyAutoScalingSchedule$value": null + } + }, + "TemporaryCredential": { + "base": "

    Contains the data needed by RDP clients such as the Microsoft Remote Desktop Connection to log in to the instance.

    ", + "refs": { + "GrantAccessResult$TemporaryCredential": "

    A TemporaryCredential object that contains the data needed to log in to the instance by RDP clients, such as the Microsoft Remote Desktop Connection.

    " + } + }, + "TimeBasedAutoScalingConfiguration": { + "base": "

    Describes an instance's time-based auto scaling configuration.

    ", + "refs": { + "TimeBasedAutoScalingConfigurations$member": null + } + }, + "TimeBasedAutoScalingConfigurations": { + "base": null, + "refs": { + "DescribeTimeBasedAutoScalingResult$TimeBasedAutoScalingConfigurations": "

    An array of TimeBasedAutoScalingConfiguration objects that describe the configuration for the specified instances.

    " + } + }, + "UnassignInstanceRequest": { + "base": null, + "refs": { + } + }, + "UnassignVolumeRequest": { + "base": null, + "refs": { + } + }, + "UpdateAppRequest": { + "base": null, + "refs": { + } + }, + "UpdateElasticIpRequest": { + "base": null, + "refs": { + } + }, + "UpdateInstanceRequest": { + "base": null, + "refs": { + } + }, + "UpdateLayerRequest": { + "base": null, + "refs": { + } + }, + "UpdateMyUserProfileRequest": { + "base": null, + "refs": { + } + }, + "UpdateRdsDbInstanceRequest": { + "base": null, + "refs": { + } + }, + "UpdateStackRequest": { + "base": null, + "refs": { + } + }, + "UpdateUserProfileRequest": { + "base": null, + "refs": { + } + }, + "UpdateVolumeRequest": { + "base": null, + "refs": { + } + }, + "UserProfile": { + "base": "

    Describes a user's SSH information.

    ", + "refs": { + "UserProfiles$member": null + } + }, + "UserProfiles": { + "base": null, + "refs": { + "DescribeUserProfilesResult$UserProfiles": "

    A Users object that describes the specified users.

    " + } + }, + "ValidForInMinutes": { + "base": null, + "refs": { + "GrantAccessRequest$ValidForInMinutes": "

    The length of time (in minutes) that the grant is valid. When the grant expires at the end of this period, the user will no longer be able to use the credentials to log in. If the user is logged in at the time, he or she automatically will be logged out.

    " + } + }, + "ValidationException": { + "base": "

    Indicates that a request was not valid.

    ", + "refs": { + } + }, + "VirtualizationType": { + "base": null, + "refs": { + "Instance$VirtualizationType": "

    The instance's virtualization type: paravirtual or hvm.

    " + } + }, + "Volume": { + "base": "

    Describes an instance's Amazon EBS volume.

    ", + "refs": { + "Volumes$member": null + } + }, + "VolumeConfiguration": { + "base": "

    Describes an Amazon EBS volume configuration.

    ", + "refs": { + "VolumeConfigurations$member": null + } + }, + "VolumeConfigurations": { + "base": null, + "refs": { + "CreateLayerRequest$VolumeConfigurations": "

    A VolumeConfigurations object that describes the layer's Amazon EBS volumes.

    ", + "Layer$VolumeConfigurations": "

    A VolumeConfigurations object that describes the layer's Amazon EBS volumes.

    ", + "UpdateLayerRequest$VolumeConfigurations": "

    A VolumeConfigurations object that describes the layer's Amazon EBS volumes.

    " + } + }, + "VolumeType": { + "base": null, + "refs": { + "EbsBlockDevice$VolumeType": "

    The volume type. gp2 for General Purpose (SSD) volumes, io1 for Provisioned IOPS (SSD) volumes, and standard for Magnetic volumes.

    " + } + }, + "Volumes": { + "base": null, + "refs": { + "DescribeVolumesResult$Volumes": "

    An array of volume IDs.

    " + } + }, + "WeeklyAutoScalingSchedule": { + "base": "

    Describes a time-based instance's auto scaling schedule. The schedule consists of a set of key-value pairs.

    • The key is the time period (a UTC hour) and must be an integer from 0 - 23.

    • The value indicates whether the instance should be online or offline for the specified period, and must be set to \"on\" or \"off\"

    The default setting for all time periods is off, so you use the following parameters primarily to specify the online periods. You don't have to explicitly specify offline periods unless you want to change an online period to an offline period.

    The following example specifies that the instance should be online for four hours, from UTC 1200 - 1600. It will be off for the remainder of the day.

    { \"12\":\"on\", \"13\":\"on\", \"14\":\"on\", \"15\":\"on\" }

    ", + "refs": { + "SetTimeBasedAutoScalingRequest$AutoScalingSchedule": "

    An AutoScalingSchedule with the instance schedule.

    ", + "TimeBasedAutoScalingConfiguration$AutoScalingSchedule": "

    A WeeklyAutoScalingSchedule object with the instance schedule.

    " + } + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/opsworks/2013-02-18/examples-1.json b/vendor/github.com/aws/aws-sdk-go/models/apis/opsworks/2013-02-18/examples-1.json new file mode 100644 index 000000000..0ea7e3b0b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/opsworks/2013-02-18/examples-1.json @@ -0,0 +1,5 @@ +{ + "version": "1.0", + "examples": { + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/opsworks/2013-02-18/paginators-1.json b/vendor/github.com/aws/aws-sdk-go/models/apis/opsworks/2013-02-18/paginators-1.json new file mode 100644 index 000000000..3825e7db0 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/opsworks/2013-02-18/paginators-1.json @@ -0,0 +1,55 @@ +{ + "pagination": { + "DescribeApps": { + "result_key": "Apps" + }, + "DescribeCommands": { + "result_key": "Commands" + }, + "DescribeDeployments": { + "result_key": "Deployments" + }, + "DescribeEcsClusters": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "EcsClusters" + }, + "DescribeElasticIps": { + "result_key": "ElasticIps" + }, + "DescribeElasticLoadBalancers": { + "result_key": "ElasticLoadBalancers" + }, + "DescribeInstances": { + "result_key": "Instances" + }, + "DescribeLayers": { + "result_key": "Layers" + }, + "DescribeLoadBasedAutoScaling": { + "result_key": "LoadBasedAutoScalingConfigurations" + }, + "DescribePermissions": { + "result_key": "Permissions" + }, + "DescribeRaidArrays": { + "result_key": "RaidArrays" + }, + "DescribeServiceErrors": { + "result_key": "ServiceErrors" + }, + "DescribeStacks": { + "result_key": "Stacks" + }, + "DescribeTimeBasedAutoScaling": { + "result_key": "TimeBasedAutoScalingConfigurations" + }, + "DescribeUserProfiles": { + "result_key": "UserProfiles" + }, + "DescribeVolumes": { + "result_key": "Volumes" + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/opsworks/2013-02-18/waiters-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/opsworks/2013-02-18/waiters-2.json new file mode 100644 index 000000000..8daa084ff --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/opsworks/2013-02-18/waiters-2.json @@ -0,0 +1,295 @@ +{ + "version": 2, + "waiters": { + "AppExists": { + "delay": 1, + "operation": "DescribeApps", + "maxAttempts": 40, + "acceptors": [ + { + "expected": 200, + "matcher": "status", + "state": "success" + }, + { + "matcher": "status", + "expected": 400, + "state": "failure" + } + ] + }, + "DeploymentSuccessful": { + "delay": 15, + "operation": "DescribeDeployments", + "maxAttempts": 40, + "description": "Wait until a deployment has completed successfully", + "acceptors": [ + { + "expected": "successful", + "matcher": "pathAll", + "state": "success", + "argument": "Deployments[].Status" + }, + { + "expected": "failed", + "matcher": "pathAny", + "state": "failure", + "argument": "Deployments[].Status" + } + ] + }, + "InstanceOnline": { + "delay": 15, + "operation": "DescribeInstances", + "maxAttempts": 40, + "description": "Wait until OpsWorks instance is online.", + "acceptors": [ + { + "expected": "online", + "matcher": "pathAll", + "state": "success", + "argument": "Instances[].Status" + }, + { + "expected": "setup_failed", + "matcher": "pathAny", + "state": "failure", + "argument": "Instances[].Status" + }, + { + "expected": "shutting_down", + "matcher": "pathAny", + "state": "failure", + "argument": "Instances[].Status" + }, + { + "expected": "start_failed", + "matcher": "pathAny", + "state": "failure", + "argument": "Instances[].Status" + }, + { + "expected": "stopped", + "matcher": "pathAny", + "state": "failure", + "argument": "Instances[].Status" + }, + { + "expected": "stopping", + "matcher": "pathAny", + "state": "failure", + "argument": "Instances[].Status" + }, + { + "expected": "terminating", + "matcher": "pathAny", + "state": "failure", + "argument": "Instances[].Status" + }, + { + "expected": "terminated", + "matcher": "pathAny", + "state": "failure", + "argument": "Instances[].Status" + }, + { + "expected": "stop_failed", + "matcher": "pathAny", + "state": "failure", + "argument": "Instances[].Status" + } + ] + }, + "InstanceRegistered": { + "delay": 15, + "operation": "DescribeInstances", + "maxAttempts": 40, + "description": "Wait until OpsWorks instance is registered.", + "acceptors": [ + { + "expected": "registered", + "matcher": "pathAll", + "state": "success", + "argument": "Instances[].Status" + }, + { + "expected": "setup_failed", + "matcher": "pathAny", + "state": "failure", + "argument": "Instances[].Status" + }, + { + "expected": "shutting_down", + "matcher": "pathAny", + "state": "failure", + "argument": "Instances[].Status" + }, + { + "expected": "stopped", + "matcher": "pathAny", + "state": "failure", + "argument": "Instances[].Status" + }, + { + "expected": "stopping", + "matcher": "pathAny", + "state": "failure", + "argument": "Instances[].Status" + }, + { + "expected": "terminating", + "matcher": "pathAny", + "state": "failure", + "argument": "Instances[].Status" + }, + { + "expected": "terminated", + "matcher": "pathAny", + "state": "failure", + "argument": "Instances[].Status" + }, + { + "expected": "stop_failed", + "matcher": "pathAny", + "state": "failure", + "argument": "Instances[].Status" + } + ] + }, + "InstanceStopped": { + "delay": 15, + "operation": "DescribeInstances", + "maxAttempts": 40, + "description": "Wait until OpsWorks instance is stopped.", + "acceptors": [ + { + "expected": "stopped", + "matcher": "pathAll", + "state": "success", + "argument": "Instances[].Status" + }, + { + "expected": "booting", + "matcher": "pathAny", + "state": "failure", + "argument": "Instances[].Status" + }, + { + "expected": "online", + "matcher": "pathAny", + "state": "failure", + "argument": "Instances[].Status" + }, + { + "expected": "pending", + "matcher": "pathAny", + "state": "failure", + "argument": "Instances[].Status" + }, + { + "expected": "rebooting", + "matcher": "pathAny", + "state": "failure", + "argument": "Instances[].Status" + }, + { + "expected": "requested", + "matcher": "pathAny", + "state": "failure", + "argument": "Instances[].Status" + }, + { + "expected": "running_setup", + "matcher": "pathAny", + "state": "failure", + "argument": "Instances[].Status" + }, + { + "expected": "setup_failed", + "matcher": "pathAny", + "state": "failure", + "argument": "Instances[].Status" + }, + { + "expected": "start_failed", + "matcher": "pathAny", + "state": "failure", + "argument": "Instances[].Status" + }, + { + "expected": "stop_failed", + "matcher": "pathAny", + "state": "failure", + "argument": "Instances[].Status" + } + ] + }, + "InstanceTerminated": { + "delay": 15, + "operation": "DescribeInstances", + "maxAttempts": 40, + "description": "Wait until OpsWorks instance is terminated.", + "acceptors": [ + { + "expected": "terminated", + "matcher": "pathAll", + "state": "success", + "argument": "Instances[].Status" + }, + { + "expected": "ResourceNotFoundException", + "matcher": "error", + "state": "success" + }, + { + "expected": "booting", + "matcher": "pathAny", + "state": "failure", + "argument": "Instances[].Status" + }, + { + "expected": "online", + "matcher": "pathAny", + "state": "failure", + "argument": "Instances[].Status" + }, + { + "expected": "pending", + "matcher": "pathAny", + "state": "failure", + "argument": "Instances[].Status" + }, + { + "expected": "rebooting", + "matcher": "pathAny", + "state": "failure", + "argument": "Instances[].Status" + }, + { + "expected": "requested", + "matcher": "pathAny", + "state": "failure", + "argument": "Instances[].Status" + }, + { + "expected": "running_setup", + "matcher": "pathAny", + "state": "failure", + "argument": "Instances[].Status" + }, + { + "expected": "setup_failed", + "matcher": "pathAny", + "state": "failure", + "argument": "Instances[].Status" + }, + { + "expected": "start_failed", + "matcher": "pathAny", + "state": "failure", + "argument": "Instances[].Status" + } + ] + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/rds/2013-01-10/api-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/rds/2013-01-10/api-2.json new file mode 100644 index 000000000..3868a93c7 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/rds/2013-01-10/api-2.json @@ -0,0 +1,2901 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2013-01-10", + "endpointPrefix":"rds", + "protocol":"query", + "serviceAbbreviation":"Amazon RDS", + "serviceFullName":"Amazon Relational Database Service", + "signatureVersion":"v4", + "xmlNamespace":"http://rds.amazonaws.com/doc/2013-01-10/" + }, + "operations":{ + "AddSourceIdentifierToSubscription":{ + "name":"AddSourceIdentifierToSubscription", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AddSourceIdentifierToSubscriptionMessage"}, + "output":{ + "shape":"AddSourceIdentifierToSubscriptionResult", + "resultWrapper":"AddSourceIdentifierToSubscriptionResult" + }, + "errors":[ + {"shape":"SubscriptionNotFoundFault"}, + {"shape":"SourceNotFoundFault"} + ] + }, + "AddTagsToResource":{ + "name":"AddTagsToResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AddTagsToResourceMessage"}, + "errors":[ + {"shape":"DBInstanceNotFoundFault"}, + {"shape":"DBSnapshotNotFoundFault"} + ] + }, + "AuthorizeDBSecurityGroupIngress":{ + "name":"AuthorizeDBSecurityGroupIngress", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AuthorizeDBSecurityGroupIngressMessage"}, + "output":{ + "shape":"AuthorizeDBSecurityGroupIngressResult", + "resultWrapper":"AuthorizeDBSecurityGroupIngressResult" + }, + "errors":[ + {"shape":"DBSecurityGroupNotFoundFault"}, + {"shape":"InvalidDBSecurityGroupStateFault"}, + {"shape":"AuthorizationAlreadyExistsFault"}, + {"shape":"AuthorizationQuotaExceededFault"} + ] + }, + "CopyDBSnapshot":{ + "name":"CopyDBSnapshot", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CopyDBSnapshotMessage"}, + "output":{ + "shape":"CopyDBSnapshotResult", + "resultWrapper":"CopyDBSnapshotResult" + }, + "errors":[ + {"shape":"DBSnapshotAlreadyExistsFault"}, + {"shape":"DBSnapshotNotFoundFault"}, + {"shape":"InvalidDBSnapshotStateFault"}, + {"shape":"SnapshotQuotaExceededFault"} + ] + }, + "CreateDBInstance":{ + "name":"CreateDBInstance", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateDBInstanceMessage"}, + "output":{ + "shape":"CreateDBInstanceResult", + "resultWrapper":"CreateDBInstanceResult" + }, + "errors":[ + {"shape":"DBInstanceAlreadyExistsFault"}, + {"shape":"InsufficientDBInstanceCapacityFault"}, + {"shape":"DBParameterGroupNotFoundFault"}, + {"shape":"DBSecurityGroupNotFoundFault"}, + {"shape":"InstanceQuotaExceededFault"}, + {"shape":"StorageQuotaExceededFault"}, + {"shape":"DBSubnetGroupNotFoundFault"}, + {"shape":"DBSubnetGroupDoesNotCoverEnoughAZs"}, + {"shape":"InvalidSubnet"}, + {"shape":"InvalidVPCNetworkStateFault"}, + {"shape":"ProvisionedIopsNotAvailableInAZFault"}, + {"shape":"OptionGroupNotFoundFault"} + ] + }, + "CreateDBInstanceReadReplica":{ + "name":"CreateDBInstanceReadReplica", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateDBInstanceReadReplicaMessage"}, + "output":{ + "shape":"CreateDBInstanceReadReplicaResult", + "resultWrapper":"CreateDBInstanceReadReplicaResult" + }, + "errors":[ + {"shape":"DBInstanceAlreadyExistsFault"}, + {"shape":"InsufficientDBInstanceCapacityFault"}, + {"shape":"DBParameterGroupNotFoundFault"}, + {"shape":"DBSecurityGroupNotFoundFault"}, + {"shape":"InstanceQuotaExceededFault"}, + {"shape":"StorageQuotaExceededFault"}, + {"shape":"DBInstanceNotFoundFault"}, + {"shape":"InvalidDBInstanceStateFault"}, + {"shape":"DBSubnetGroupNotFoundFault"}, + {"shape":"DBSubnetGroupDoesNotCoverEnoughAZs"}, + {"shape":"InvalidSubnet"}, + {"shape":"InvalidVPCNetworkStateFault"}, + {"shape":"ProvisionedIopsNotAvailableInAZFault"}, + {"shape":"OptionGroupNotFoundFault"} + ] + }, + "CreateDBParameterGroup":{ + "name":"CreateDBParameterGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateDBParameterGroupMessage"}, + "output":{ + "shape":"CreateDBParameterGroupResult", + "resultWrapper":"CreateDBParameterGroupResult" + }, + "errors":[ + {"shape":"DBParameterGroupQuotaExceededFault"}, + {"shape":"DBParameterGroupAlreadyExistsFault"} + ] + }, + "CreateDBSecurityGroup":{ + "name":"CreateDBSecurityGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateDBSecurityGroupMessage"}, + "output":{ + "shape":"CreateDBSecurityGroupResult", + "resultWrapper":"CreateDBSecurityGroupResult" + }, + "errors":[ + {"shape":"DBSecurityGroupAlreadyExistsFault"}, + {"shape":"DBSecurityGroupQuotaExceededFault"}, + {"shape":"DBSecurityGroupNotSupportedFault"} + ] + }, + "CreateDBSnapshot":{ + "name":"CreateDBSnapshot", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateDBSnapshotMessage"}, + "output":{ + "shape":"CreateDBSnapshotResult", + "resultWrapper":"CreateDBSnapshotResult" + }, + "errors":[ + {"shape":"DBSnapshotAlreadyExistsFault"}, + {"shape":"InvalidDBInstanceStateFault"}, + {"shape":"DBInstanceNotFoundFault"}, + {"shape":"SnapshotQuotaExceededFault"} + ] + }, + "CreateDBSubnetGroup":{ + "name":"CreateDBSubnetGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateDBSubnetGroupMessage"}, + "output":{ + "shape":"CreateDBSubnetGroupResult", + "resultWrapper":"CreateDBSubnetGroupResult" + }, + "errors":[ + {"shape":"DBSubnetGroupAlreadyExistsFault"}, + {"shape":"DBSubnetGroupQuotaExceededFault"}, + {"shape":"DBSubnetQuotaExceededFault"}, + {"shape":"DBSubnetGroupDoesNotCoverEnoughAZs"}, + {"shape":"InvalidSubnet"} + ] + }, + "CreateEventSubscription":{ + "name":"CreateEventSubscription", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateEventSubscriptionMessage"}, + "output":{ + "shape":"CreateEventSubscriptionResult", + "resultWrapper":"CreateEventSubscriptionResult" + }, + "errors":[ + {"shape":"EventSubscriptionQuotaExceededFault"}, + {"shape":"SubscriptionAlreadyExistFault"}, + {"shape":"SNSInvalidTopicFault"}, + {"shape":"SNSNoAuthorizationFault"}, + {"shape":"SNSTopicArnNotFoundFault"}, + {"shape":"SubscriptionCategoryNotFoundFault"}, + {"shape":"SourceNotFoundFault"} + ] + }, + "CreateOptionGroup":{ + "name":"CreateOptionGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateOptionGroupMessage"}, + "output":{ + "shape":"CreateOptionGroupResult", + "resultWrapper":"CreateOptionGroupResult" + }, + "errors":[ + {"shape":"OptionGroupAlreadyExistsFault"}, + {"shape":"OptionGroupQuotaExceededFault"} + ] + }, + "DeleteDBInstance":{ + "name":"DeleteDBInstance", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteDBInstanceMessage"}, + "output":{ + "shape":"DeleteDBInstanceResult", + "resultWrapper":"DeleteDBInstanceResult" + }, + "errors":[ + {"shape":"DBInstanceNotFoundFault"}, + {"shape":"InvalidDBInstanceStateFault"}, + {"shape":"DBSnapshotAlreadyExistsFault"}, + {"shape":"SnapshotQuotaExceededFault"} + ] + }, + "DeleteDBParameterGroup":{ + "name":"DeleteDBParameterGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteDBParameterGroupMessage"}, + "errors":[ + {"shape":"InvalidDBParameterGroupStateFault"}, + {"shape":"DBParameterGroupNotFoundFault"} + ] + }, + "DeleteDBSecurityGroup":{ + "name":"DeleteDBSecurityGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteDBSecurityGroupMessage"}, + "errors":[ + {"shape":"InvalidDBSecurityGroupStateFault"}, + {"shape":"DBSecurityGroupNotFoundFault"} + ] + }, + "DeleteDBSnapshot":{ + "name":"DeleteDBSnapshot", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteDBSnapshotMessage"}, + "output":{ + "shape":"DeleteDBSnapshotResult", + "resultWrapper":"DeleteDBSnapshotResult" + }, + "errors":[ + {"shape":"InvalidDBSnapshotStateFault"}, + {"shape":"DBSnapshotNotFoundFault"} + ] + }, + "DeleteDBSubnetGroup":{ + "name":"DeleteDBSubnetGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteDBSubnetGroupMessage"}, + "errors":[ + {"shape":"InvalidDBSubnetGroupStateFault"}, + {"shape":"InvalidDBSubnetStateFault"}, + {"shape":"DBSubnetGroupNotFoundFault"} + ] + }, + "DeleteEventSubscription":{ + "name":"DeleteEventSubscription", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteEventSubscriptionMessage"}, + "output":{ + "shape":"DeleteEventSubscriptionResult", + "resultWrapper":"DeleteEventSubscriptionResult" + }, + "errors":[ + {"shape":"SubscriptionNotFoundFault"}, + {"shape":"InvalidEventSubscriptionStateFault"} + ] + }, + "DeleteOptionGroup":{ + "name":"DeleteOptionGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteOptionGroupMessage"}, + "errors":[ + {"shape":"OptionGroupNotFoundFault"}, + {"shape":"InvalidOptionGroupStateFault"} + ] + }, + "DescribeDBEngineVersions":{ + "name":"DescribeDBEngineVersions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeDBEngineVersionsMessage"}, + "output":{ + "shape":"DBEngineVersionMessage", + "resultWrapper":"DescribeDBEngineVersionsResult" + } + }, + "DescribeDBInstances":{ + "name":"DescribeDBInstances", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeDBInstancesMessage"}, + "output":{ + "shape":"DBInstanceMessage", + "resultWrapper":"DescribeDBInstancesResult" + }, + "errors":[ + {"shape":"DBInstanceNotFoundFault"} + ] + }, + "DescribeDBParameterGroups":{ + "name":"DescribeDBParameterGroups", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeDBParameterGroupsMessage"}, + "output":{ + "shape":"DBParameterGroupsMessage", + "resultWrapper":"DescribeDBParameterGroupsResult" + }, + "errors":[ + {"shape":"DBParameterGroupNotFoundFault"} + ] + }, + "DescribeDBParameters":{ + "name":"DescribeDBParameters", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeDBParametersMessage"}, + "output":{ + "shape":"DBParameterGroupDetails", + "resultWrapper":"DescribeDBParametersResult" + }, + "errors":[ + {"shape":"DBParameterGroupNotFoundFault"} + ] + }, + "DescribeDBSecurityGroups":{ + "name":"DescribeDBSecurityGroups", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeDBSecurityGroupsMessage"}, + "output":{ + "shape":"DBSecurityGroupMessage", + "resultWrapper":"DescribeDBSecurityGroupsResult" + }, + "errors":[ + {"shape":"DBSecurityGroupNotFoundFault"} + ] + }, + "DescribeDBSnapshots":{ + "name":"DescribeDBSnapshots", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeDBSnapshotsMessage"}, + "output":{ + "shape":"DBSnapshotMessage", + "resultWrapper":"DescribeDBSnapshotsResult" + }, + "errors":[ + {"shape":"DBSnapshotNotFoundFault"} + ] + }, + "DescribeDBSubnetGroups":{ + "name":"DescribeDBSubnetGroups", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeDBSubnetGroupsMessage"}, + "output":{ + "shape":"DBSubnetGroupMessage", + "resultWrapper":"DescribeDBSubnetGroupsResult" + }, + "errors":[ + {"shape":"DBSubnetGroupNotFoundFault"} + ] + }, + "DescribeEngineDefaultParameters":{ + "name":"DescribeEngineDefaultParameters", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeEngineDefaultParametersMessage"}, + "output":{ + "shape":"DescribeEngineDefaultParametersResult", + "resultWrapper":"DescribeEngineDefaultParametersResult" + } + }, + "DescribeEventCategories":{ + "name":"DescribeEventCategories", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeEventCategoriesMessage"}, + "output":{ + "shape":"EventCategoriesMessage", + "resultWrapper":"DescribeEventCategoriesResult" + } + }, + "DescribeEventSubscriptions":{ + "name":"DescribeEventSubscriptions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeEventSubscriptionsMessage"}, + "output":{ + "shape":"EventSubscriptionsMessage", + "resultWrapper":"DescribeEventSubscriptionsResult" + }, + "errors":[ + {"shape":"SubscriptionNotFoundFault"} + ] + }, + "DescribeEvents":{ + "name":"DescribeEvents", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeEventsMessage"}, + "output":{ + "shape":"EventsMessage", + "resultWrapper":"DescribeEventsResult" + } + }, + "DescribeOptionGroupOptions":{ + "name":"DescribeOptionGroupOptions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeOptionGroupOptionsMessage"}, + "output":{ + "shape":"OptionGroupOptionsMessage", + "resultWrapper":"DescribeOptionGroupOptionsResult" + } + }, + "DescribeOptionGroups":{ + "name":"DescribeOptionGroups", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeOptionGroupsMessage"}, + "output":{ + "shape":"OptionGroups", + "resultWrapper":"DescribeOptionGroupsResult" + }, + "errors":[ + {"shape":"OptionGroupNotFoundFault"} + ] + }, + "DescribeOrderableDBInstanceOptions":{ + "name":"DescribeOrderableDBInstanceOptions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeOrderableDBInstanceOptionsMessage"}, + "output":{ + "shape":"OrderableDBInstanceOptionsMessage", + "resultWrapper":"DescribeOrderableDBInstanceOptionsResult" + } + }, + "DescribeReservedDBInstances":{ + "name":"DescribeReservedDBInstances", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeReservedDBInstancesMessage"}, + "output":{ + "shape":"ReservedDBInstanceMessage", + "resultWrapper":"DescribeReservedDBInstancesResult" + }, + "errors":[ + {"shape":"ReservedDBInstanceNotFoundFault"} + ] + }, + "DescribeReservedDBInstancesOfferings":{ + "name":"DescribeReservedDBInstancesOfferings", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeReservedDBInstancesOfferingsMessage"}, + "output":{ + "shape":"ReservedDBInstancesOfferingMessage", + "resultWrapper":"DescribeReservedDBInstancesOfferingsResult" + }, + "errors":[ + {"shape":"ReservedDBInstancesOfferingNotFoundFault"} + ] + }, + "ListTagsForResource":{ + "name":"ListTagsForResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListTagsForResourceMessage"}, + "output":{ + "shape":"TagListMessage", + "resultWrapper":"ListTagsForResourceResult" + }, + "errors":[ + {"shape":"DBInstanceNotFoundFault"}, + {"shape":"DBSnapshotNotFoundFault"} + ] + }, + "ModifyDBInstance":{ + "name":"ModifyDBInstance", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyDBInstanceMessage"}, + "output":{ + "shape":"ModifyDBInstanceResult", + "resultWrapper":"ModifyDBInstanceResult" + }, + "errors":[ + {"shape":"InvalidDBInstanceStateFault"}, + {"shape":"InvalidDBSecurityGroupStateFault"}, + {"shape":"DBInstanceAlreadyExistsFault"}, + {"shape":"DBInstanceNotFoundFault"}, + {"shape":"DBSecurityGroupNotFoundFault"}, + {"shape":"DBParameterGroupNotFoundFault"}, + {"shape":"InsufficientDBInstanceCapacityFault"}, + {"shape":"StorageQuotaExceededFault"}, + {"shape":"InvalidVPCNetworkStateFault"}, + {"shape":"ProvisionedIopsNotAvailableInAZFault"}, + {"shape":"OptionGroupNotFoundFault"}, + {"shape":"DBUpgradeDependencyFailureFault"} + ] + }, + "ModifyDBParameterGroup":{ + "name":"ModifyDBParameterGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyDBParameterGroupMessage"}, + "output":{ + "shape":"DBParameterGroupNameMessage", + "resultWrapper":"ModifyDBParameterGroupResult" + }, + "errors":[ + {"shape":"DBParameterGroupNotFoundFault"}, + {"shape":"InvalidDBParameterGroupStateFault"} + ] + }, + "ModifyDBSubnetGroup":{ + "name":"ModifyDBSubnetGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyDBSubnetGroupMessage"}, + "output":{ + "shape":"ModifyDBSubnetGroupResult", + "resultWrapper":"ModifyDBSubnetGroupResult" + }, + "errors":[ + {"shape":"DBSubnetGroupNotFoundFault"}, + {"shape":"DBSubnetQuotaExceededFault"}, + {"shape":"SubnetAlreadyInUse"}, + {"shape":"DBSubnetGroupDoesNotCoverEnoughAZs"}, + {"shape":"InvalidSubnet"} + ] + }, + "ModifyEventSubscription":{ + "name":"ModifyEventSubscription", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyEventSubscriptionMessage"}, + "output":{ + "shape":"ModifyEventSubscriptionResult", + "resultWrapper":"ModifyEventSubscriptionResult" + }, + "errors":[ + {"shape":"EventSubscriptionQuotaExceededFault"}, + {"shape":"SubscriptionNotFoundFault"}, + {"shape":"SNSInvalidTopicFault"}, + {"shape":"SNSNoAuthorizationFault"}, + {"shape":"SNSTopicArnNotFoundFault"}, + {"shape":"SubscriptionCategoryNotFoundFault"} + ] + }, + "ModifyOptionGroup":{ + "name":"ModifyOptionGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyOptionGroupMessage"}, + "output":{ + "shape":"ModifyOptionGroupResult", + "resultWrapper":"ModifyOptionGroupResult" + }, + "errors":[ + {"shape":"InvalidOptionGroupStateFault"}, + {"shape":"OptionGroupNotFoundFault"} + ] + }, + "PromoteReadReplica":{ + "name":"PromoteReadReplica", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PromoteReadReplicaMessage"}, + "output":{ + "shape":"PromoteReadReplicaResult", + "resultWrapper":"PromoteReadReplicaResult" + }, + "errors":[ + {"shape":"InvalidDBInstanceStateFault"}, + {"shape":"DBInstanceNotFoundFault"} + ] + }, + "PurchaseReservedDBInstancesOffering":{ + "name":"PurchaseReservedDBInstancesOffering", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PurchaseReservedDBInstancesOfferingMessage"}, + "output":{ + "shape":"PurchaseReservedDBInstancesOfferingResult", + "resultWrapper":"PurchaseReservedDBInstancesOfferingResult" + }, + "errors":[ + {"shape":"ReservedDBInstancesOfferingNotFoundFault"}, + {"shape":"ReservedDBInstanceAlreadyExistsFault"}, + {"shape":"ReservedDBInstanceQuotaExceededFault"} + ] + }, + "RebootDBInstance":{ + "name":"RebootDBInstance", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RebootDBInstanceMessage"}, + "output":{ + "shape":"RebootDBInstanceResult", + "resultWrapper":"RebootDBInstanceResult" + }, + "errors":[ + {"shape":"InvalidDBInstanceStateFault"}, + {"shape":"DBInstanceNotFoundFault"} + ] + }, + "RemoveSourceIdentifierFromSubscription":{ + "name":"RemoveSourceIdentifierFromSubscription", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RemoveSourceIdentifierFromSubscriptionMessage"}, + "output":{ + "shape":"RemoveSourceIdentifierFromSubscriptionResult", + "resultWrapper":"RemoveSourceIdentifierFromSubscriptionResult" + }, + "errors":[ + {"shape":"SubscriptionNotFoundFault"}, + {"shape":"SourceNotFoundFault"} + ] + }, + "RemoveTagsFromResource":{ + "name":"RemoveTagsFromResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RemoveTagsFromResourceMessage"}, + "errors":[ + {"shape":"DBInstanceNotFoundFault"}, + {"shape":"DBSnapshotNotFoundFault"} + ] + }, + "ResetDBParameterGroup":{ + "name":"ResetDBParameterGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ResetDBParameterGroupMessage"}, + "output":{ + "shape":"DBParameterGroupNameMessage", + "resultWrapper":"ResetDBParameterGroupResult" + }, + "errors":[ + {"shape":"InvalidDBParameterGroupStateFault"}, + {"shape":"DBParameterGroupNotFoundFault"} + ] + }, + "RestoreDBInstanceFromDBSnapshot":{ + "name":"RestoreDBInstanceFromDBSnapshot", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RestoreDBInstanceFromDBSnapshotMessage"}, + "output":{ + "shape":"RestoreDBInstanceFromDBSnapshotResult", + "resultWrapper":"RestoreDBInstanceFromDBSnapshotResult" + }, + "errors":[ + {"shape":"DBInstanceAlreadyExistsFault"}, + {"shape":"DBSnapshotNotFoundFault"}, + {"shape":"InstanceQuotaExceededFault"}, + {"shape":"InsufficientDBInstanceCapacityFault"}, + {"shape":"InvalidDBSnapshotStateFault"}, + {"shape":"StorageQuotaExceededFault"}, + {"shape":"InvalidVPCNetworkStateFault"}, + {"shape":"InvalidRestoreFault"}, + {"shape":"DBSubnetGroupNotFoundFault"}, + {"shape":"DBSubnetGroupDoesNotCoverEnoughAZs"}, + {"shape":"InvalidSubnet"}, + {"shape":"ProvisionedIopsNotAvailableInAZFault"}, + {"shape":"OptionGroupNotFoundFault"} + ] + }, + "RestoreDBInstanceToPointInTime":{ + "name":"RestoreDBInstanceToPointInTime", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RestoreDBInstanceToPointInTimeMessage"}, + "output":{ + "shape":"RestoreDBInstanceToPointInTimeResult", + "resultWrapper":"RestoreDBInstanceToPointInTimeResult" + }, + "errors":[ + {"shape":"DBInstanceAlreadyExistsFault"}, + {"shape":"DBInstanceNotFoundFault"}, + {"shape":"InstanceQuotaExceededFault"}, + {"shape":"InsufficientDBInstanceCapacityFault"}, + {"shape":"InvalidDBInstanceStateFault"}, + {"shape":"PointInTimeRestoreNotEnabledFault"}, + {"shape":"StorageQuotaExceededFault"}, + {"shape":"InvalidVPCNetworkStateFault"}, + {"shape":"InvalidRestoreFault"}, + {"shape":"DBSubnetGroupNotFoundFault"}, + {"shape":"DBSubnetGroupDoesNotCoverEnoughAZs"}, + {"shape":"InvalidSubnet"}, + {"shape":"ProvisionedIopsNotAvailableInAZFault"}, + {"shape":"OptionGroupNotFoundFault"} + ] + }, + "RevokeDBSecurityGroupIngress":{ + "name":"RevokeDBSecurityGroupIngress", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RevokeDBSecurityGroupIngressMessage"}, + "output":{ + "shape":"RevokeDBSecurityGroupIngressResult", + "resultWrapper":"RevokeDBSecurityGroupIngressResult" + }, + "errors":[ + {"shape":"DBSecurityGroupNotFoundFault"}, + {"shape":"AuthorizationNotFoundFault"}, + {"shape":"InvalidDBSecurityGroupStateFault"} + ] + } + }, + "shapes":{ + "AddSourceIdentifierToSubscriptionMessage":{ + "type":"structure", + "required":[ + "SubscriptionName", + "SourceIdentifier" + ], + "members":{ + "SubscriptionName":{"shape":"String"}, + "SourceIdentifier":{"shape":"String"} + } + }, + "AddSourceIdentifierToSubscriptionResult":{ + "type":"structure", + "members":{ + "EventSubscription":{"shape":"EventSubscription"} + } + }, + "AddTagsToResourceMessage":{ + "type":"structure", + "required":[ + "ResourceName", + "Tags" + ], + "members":{ + "ResourceName":{"shape":"String"}, + "Tags":{"shape":"TagList"} + } + }, + "ApplyMethod":{ + "type":"string", + "enum":[ + "immediate", + "pending-reboot" + ] + }, + "AuthorizationAlreadyExistsFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"AuthorizationAlreadyExists", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "AuthorizationNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"AuthorizationNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "AuthorizationQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"AuthorizationQuotaExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "AuthorizeDBSecurityGroupIngressMessage":{ + "type":"structure", + "required":["DBSecurityGroupName"], + "members":{ + "DBSecurityGroupName":{"shape":"String"}, + "CIDRIP":{"shape":"String"}, + "EC2SecurityGroupName":{"shape":"String"}, + "EC2SecurityGroupId":{"shape":"String"}, + "EC2SecurityGroupOwnerId":{"shape":"String"} + } + }, + "AuthorizeDBSecurityGroupIngressResult":{ + "type":"structure", + "members":{ + "DBSecurityGroup":{"shape":"DBSecurityGroup"} + } + }, + "AvailabilityZone":{ + "type":"structure", + "members":{ + "Name":{"shape":"String"}, + "ProvisionedIopsCapable":{"shape":"Boolean"} + }, + "wrapper":true + }, + "AvailabilityZoneList":{ + "type":"list", + "member":{ + "shape":"AvailabilityZone", + "locationName":"AvailabilityZone" + } + }, + "Boolean":{"type":"boolean"}, + "BooleanOptional":{"type":"boolean"}, + "CharacterSet":{ + "type":"structure", + "members":{ + "CharacterSetName":{"shape":"String"}, + "CharacterSetDescription":{"shape":"String"} + } + }, + "CopyDBSnapshotMessage":{ + "type":"structure", + "required":[ + "SourceDBSnapshotIdentifier", + "TargetDBSnapshotIdentifier" + ], + "members":{ + "SourceDBSnapshotIdentifier":{"shape":"String"}, + "TargetDBSnapshotIdentifier":{"shape":"String"} + } + }, + "CopyDBSnapshotResult":{ + "type":"structure", + "members":{ + "DBSnapshot":{"shape":"DBSnapshot"} + } + }, + "CreateDBInstanceMessage":{ + "type":"structure", + "required":[ + "DBInstanceIdentifier", + "AllocatedStorage", + "DBInstanceClass", + "Engine", + "MasterUsername", + "MasterUserPassword" + ], + "members":{ + "DBName":{"shape":"String"}, + "DBInstanceIdentifier":{"shape":"String"}, + "AllocatedStorage":{"shape":"IntegerOptional"}, + "DBInstanceClass":{"shape":"String"}, + "Engine":{"shape":"String"}, + "MasterUsername":{"shape":"String"}, + "MasterUserPassword":{"shape":"String"}, + "DBSecurityGroups":{"shape":"DBSecurityGroupNameList"}, + "VpcSecurityGroupIds":{"shape":"VpcSecurityGroupIdList"}, + "AvailabilityZone":{"shape":"String"}, + "DBSubnetGroupName":{"shape":"String"}, + "PreferredMaintenanceWindow":{"shape":"String"}, + "DBParameterGroupName":{"shape":"String"}, + "BackupRetentionPeriod":{"shape":"IntegerOptional"}, + "PreferredBackupWindow":{"shape":"String"}, + "Port":{"shape":"IntegerOptional"}, + "MultiAZ":{"shape":"BooleanOptional"}, + "EngineVersion":{"shape":"String"}, + "AutoMinorVersionUpgrade":{"shape":"BooleanOptional"}, + "LicenseModel":{"shape":"String"}, + "Iops":{"shape":"IntegerOptional"}, + "OptionGroupName":{"shape":"String"}, + "CharacterSetName":{"shape":"String"}, + "PubliclyAccessible":{"shape":"BooleanOptional"} + } + }, + "CreateDBInstanceReadReplicaMessage":{ + "type":"structure", + "required":[ + "DBInstanceIdentifier", + "SourceDBInstanceIdentifier" + ], + "members":{ + "DBInstanceIdentifier":{"shape":"String"}, + "SourceDBInstanceIdentifier":{"shape":"String"}, + "DBInstanceClass":{"shape":"String"}, + "AvailabilityZone":{"shape":"String"}, + "Port":{"shape":"IntegerOptional"}, + "AutoMinorVersionUpgrade":{"shape":"BooleanOptional"}, + "Iops":{"shape":"IntegerOptional"}, + "OptionGroupName":{"shape":"String"}, + "PubliclyAccessible":{"shape":"BooleanOptional"} + } + }, + "CreateDBInstanceReadReplicaResult":{ + "type":"structure", + "members":{ + "DBInstance":{"shape":"DBInstance"} + } + }, + "CreateDBInstanceResult":{ + "type":"structure", + "members":{ + "DBInstance":{"shape":"DBInstance"} + } + }, + "CreateDBParameterGroupMessage":{ + "type":"structure", + "required":[ + "DBParameterGroupName", + "DBParameterGroupFamily", + "Description" + ], + "members":{ + "DBParameterGroupName":{"shape":"String"}, + "DBParameterGroupFamily":{"shape":"String"}, + "Description":{"shape":"String"} + } + }, + "CreateDBParameterGroupResult":{ + "type":"structure", + "members":{ + "DBParameterGroup":{"shape":"DBParameterGroup"} + } + }, + "CreateDBSecurityGroupMessage":{ + "type":"structure", + "required":[ + "DBSecurityGroupName", + "DBSecurityGroupDescription" + ], + "members":{ + "DBSecurityGroupName":{"shape":"String"}, + "DBSecurityGroupDescription":{"shape":"String"} + } + }, + "CreateDBSecurityGroupResult":{ + "type":"structure", + "members":{ + "DBSecurityGroup":{"shape":"DBSecurityGroup"} + } + }, + "CreateDBSnapshotMessage":{ + "type":"structure", + "required":[ + "DBSnapshotIdentifier", + "DBInstanceIdentifier" + ], + "members":{ + "DBSnapshotIdentifier":{"shape":"String"}, + "DBInstanceIdentifier":{"shape":"String"} + } + }, + "CreateDBSnapshotResult":{ + "type":"structure", + "members":{ + "DBSnapshot":{"shape":"DBSnapshot"} + } + }, + "CreateDBSubnetGroupMessage":{ + "type":"structure", + "required":[ + "DBSubnetGroupName", + "DBSubnetGroupDescription", + "SubnetIds" + ], + "members":{ + "DBSubnetGroupName":{"shape":"String"}, + "DBSubnetGroupDescription":{"shape":"String"}, + "SubnetIds":{"shape":"SubnetIdentifierList"} + } + }, + "CreateDBSubnetGroupResult":{ + "type":"structure", + "members":{ + "DBSubnetGroup":{"shape":"DBSubnetGroup"} + } + }, + "CreateEventSubscriptionMessage":{ + "type":"structure", + "required":[ + "SubscriptionName", + "SnsTopicArn" + ], + "members":{ + "SubscriptionName":{"shape":"String"}, + "SnsTopicArn":{"shape":"String"}, + "SourceType":{"shape":"String"}, + "EventCategories":{"shape":"EventCategoriesList"}, + "SourceIds":{"shape":"SourceIdsList"}, + "Enabled":{"shape":"BooleanOptional"} + } + }, + "CreateEventSubscriptionResult":{ + "type":"structure", + "members":{ + "EventSubscription":{"shape":"EventSubscription"} + } + }, + "CreateOptionGroupMessage":{ + "type":"structure", + "required":[ + "OptionGroupName", + "EngineName", + "MajorEngineVersion", + "OptionGroupDescription" + ], + "members":{ + "OptionGroupName":{"shape":"String"}, + "EngineName":{"shape":"String"}, + "MajorEngineVersion":{"shape":"String"}, + "OptionGroupDescription":{"shape":"String"} + } + }, + "CreateOptionGroupResult":{ + "type":"structure", + "members":{ + "OptionGroup":{"shape":"OptionGroup"} + } + }, + "DBEngineVersion":{ + "type":"structure", + "members":{ + "Engine":{"shape":"String"}, + "EngineVersion":{"shape":"String"}, + "DBParameterGroupFamily":{"shape":"String"}, + "DBEngineDescription":{"shape":"String"}, + "DBEngineVersionDescription":{"shape":"String"}, + "DefaultCharacterSet":{"shape":"CharacterSet"}, + "SupportedCharacterSets":{"shape":"SupportedCharacterSetsList"} + } + }, + "DBEngineVersionList":{ + "type":"list", + "member":{ + "shape":"DBEngineVersion", + "locationName":"DBEngineVersion" + } + }, + "DBEngineVersionMessage":{ + "type":"structure", + "members":{ + "Marker":{"shape":"String"}, + "DBEngineVersions":{"shape":"DBEngineVersionList"} + } + }, + "DBInstance":{ + "type":"structure", + "members":{ + "DBInstanceIdentifier":{"shape":"String"}, + "DBInstanceClass":{"shape":"String"}, + "Engine":{"shape":"String"}, + "DBInstanceStatus":{"shape":"String"}, + "MasterUsername":{"shape":"String"}, + "DBName":{"shape":"String"}, + "Endpoint":{"shape":"Endpoint"}, + "AllocatedStorage":{"shape":"Integer"}, + "InstanceCreateTime":{"shape":"TStamp"}, + "PreferredBackupWindow":{"shape":"String"}, + "BackupRetentionPeriod":{"shape":"Integer"}, + "DBSecurityGroups":{"shape":"DBSecurityGroupMembershipList"}, + "VpcSecurityGroups":{"shape":"VpcSecurityGroupMembershipList"}, + "DBParameterGroups":{"shape":"DBParameterGroupStatusList"}, + "AvailabilityZone":{"shape":"String"}, + "DBSubnetGroup":{"shape":"DBSubnetGroup"}, + "PreferredMaintenanceWindow":{"shape":"String"}, + "PendingModifiedValues":{"shape":"PendingModifiedValues"}, + "LatestRestorableTime":{"shape":"TStamp"}, + "MultiAZ":{"shape":"Boolean"}, + "EngineVersion":{"shape":"String"}, + "AutoMinorVersionUpgrade":{"shape":"Boolean"}, + "ReadReplicaSourceDBInstanceIdentifier":{"shape":"String"}, + "ReadReplicaDBInstanceIdentifiers":{"shape":"ReadReplicaDBInstanceIdentifierList"}, + "LicenseModel":{"shape":"String"}, + "Iops":{"shape":"IntegerOptional"}, + "OptionGroupMembership":{"shape":"OptionGroupMembership"}, + "CharacterSetName":{"shape":"String"}, + "SecondaryAvailabilityZone":{"shape":"String"}, + "PubliclyAccessible":{"shape":"Boolean"} + }, + "wrapper":true + }, + "DBInstanceAlreadyExistsFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBInstanceAlreadyExists", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "DBInstanceList":{ + "type":"list", + "member":{ + "shape":"DBInstance", + "locationName":"DBInstance" + } + }, + "DBInstanceMessage":{ + "type":"structure", + "members":{ + "Marker":{"shape":"String"}, + "DBInstances":{"shape":"DBInstanceList"} + } + }, + "DBInstanceNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBInstanceNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "DBParameterGroup":{ + "type":"structure", + "members":{ + "DBParameterGroupName":{"shape":"String"}, + "DBParameterGroupFamily":{"shape":"String"}, + "Description":{"shape":"String"} + }, + "wrapper":true + }, + "DBParameterGroupAlreadyExistsFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBParameterGroupAlreadyExists", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "DBParameterGroupDetails":{ + "type":"structure", + "members":{ + "Parameters":{"shape":"ParametersList"}, + "Marker":{"shape":"String"} + } + }, + "DBParameterGroupList":{ + "type":"list", + "member":{ + "shape":"DBParameterGroup", + "locationName":"DBParameterGroup" + } + }, + "DBParameterGroupNameMessage":{ + "type":"structure", + "members":{ + "DBParameterGroupName":{"shape":"String"} + } + }, + "DBParameterGroupNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBParameterGroupNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "DBParameterGroupQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBParameterGroupQuotaExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "DBParameterGroupStatus":{ + "type":"structure", + "members":{ + "DBParameterGroupName":{"shape":"String"}, + "ParameterApplyStatus":{"shape":"String"} + } + }, + "DBParameterGroupStatusList":{ + "type":"list", + "member":{ + "shape":"DBParameterGroupStatus", + "locationName":"DBParameterGroup" + } + }, + "DBParameterGroupsMessage":{ + "type":"structure", + "members":{ + "Marker":{"shape":"String"}, + "DBParameterGroups":{"shape":"DBParameterGroupList"} + } + }, + "DBSecurityGroup":{ + "type":"structure", + "members":{ + "OwnerId":{"shape":"String"}, + "DBSecurityGroupName":{"shape":"String"}, + "DBSecurityGroupDescription":{"shape":"String"}, + "VpcId":{"shape":"String"}, + "EC2SecurityGroups":{"shape":"EC2SecurityGroupList"}, + "IPRanges":{"shape":"IPRangeList"} + }, + "wrapper":true + }, + "DBSecurityGroupAlreadyExistsFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBSecurityGroupAlreadyExists", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "DBSecurityGroupMembership":{ + "type":"structure", + "members":{ + "DBSecurityGroupName":{"shape":"String"}, + "Status":{"shape":"String"} + } + }, + "DBSecurityGroupMembershipList":{ + "type":"list", + "member":{ + "shape":"DBSecurityGroupMembership", + "locationName":"DBSecurityGroup" + } + }, + "DBSecurityGroupMessage":{ + "type":"structure", + "members":{ + "Marker":{"shape":"String"}, + "DBSecurityGroups":{"shape":"DBSecurityGroups"} + } + }, + "DBSecurityGroupNameList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"DBSecurityGroupName" + } + }, + "DBSecurityGroupNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBSecurityGroupNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "DBSecurityGroupNotSupportedFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBSecurityGroupNotSupported", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "DBSecurityGroupQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"QuotaExceeded.DBSecurityGroup", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "DBSecurityGroups":{ + "type":"list", + "member":{ + "shape":"DBSecurityGroup", + "locationName":"DBSecurityGroup" + } + }, + "DBSnapshot":{ + "type":"structure", + "members":{ + "DBSnapshotIdentifier":{"shape":"String"}, + "DBInstanceIdentifier":{"shape":"String"}, + "SnapshotCreateTime":{"shape":"TStamp"}, + "Engine":{"shape":"String"}, + "AllocatedStorage":{"shape":"Integer"}, + "Status":{"shape":"String"}, + "Port":{"shape":"Integer"}, + "AvailabilityZone":{"shape":"String"}, + "VpcId":{"shape":"String"}, + "InstanceCreateTime":{"shape":"TStamp"}, + "MasterUsername":{"shape":"String"}, + "EngineVersion":{"shape":"String"}, + "LicenseModel":{"shape":"String"}, + "SnapshotType":{"shape":"String"}, + "Iops":{"shape":"IntegerOptional"} + }, + "wrapper":true + }, + "DBSnapshotAlreadyExistsFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBSnapshotAlreadyExists", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "DBSnapshotList":{ + "type":"list", + "member":{ + "shape":"DBSnapshot", + "locationName":"DBSnapshot" + } + }, + "DBSnapshotMessage":{ + "type":"structure", + "members":{ + "Marker":{"shape":"String"}, + "DBSnapshots":{"shape":"DBSnapshotList"} + } + }, + "DBSnapshotNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBSnapshotNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "DBSubnetGroup":{ + "type":"structure", + "members":{ + "DBSubnetGroupName":{"shape":"String"}, + "DBSubnetGroupDescription":{"shape":"String"}, + "VpcId":{"shape":"String"}, + "SubnetGroupStatus":{"shape":"String"}, + "Subnets":{"shape":"SubnetList"} + }, + "wrapper":true + }, + "DBSubnetGroupAlreadyExistsFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBSubnetGroupAlreadyExists", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "DBSubnetGroupDoesNotCoverEnoughAZs":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBSubnetGroupDoesNotCoverEnoughAZs", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "DBSubnetGroupMessage":{ + "type":"structure", + "members":{ + "Marker":{"shape":"String"}, + "DBSubnetGroups":{"shape":"DBSubnetGroups"} + } + }, + "DBSubnetGroupNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBSubnetGroupNotFoundFault", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "DBSubnetGroupQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBSubnetGroupQuotaExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "DBSubnetGroups":{ + "type":"list", + "member":{ + "shape":"DBSubnetGroup", + "locationName":"DBSubnetGroup" + } + }, + "DBSubnetQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBSubnetQuotaExceededFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "DBUpgradeDependencyFailureFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBUpgradeDependencyFailure", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "DeleteDBInstanceMessage":{ + "type":"structure", + "required":["DBInstanceIdentifier"], + "members":{ + "DBInstanceIdentifier":{"shape":"String"}, + "SkipFinalSnapshot":{"shape":"Boolean"}, + "FinalDBSnapshotIdentifier":{"shape":"String"} + } + }, + "DeleteDBInstanceResult":{ + "type":"structure", + "members":{ + "DBInstance":{"shape":"DBInstance"} + } + }, + "DeleteDBParameterGroupMessage":{ + "type":"structure", + "required":["DBParameterGroupName"], + "members":{ + "DBParameterGroupName":{"shape":"String"} + } + }, + "DeleteDBSecurityGroupMessage":{ + "type":"structure", + "required":["DBSecurityGroupName"], + "members":{ + "DBSecurityGroupName":{"shape":"String"} + } + }, + "DeleteDBSnapshotMessage":{ + "type":"structure", + "required":["DBSnapshotIdentifier"], + "members":{ + "DBSnapshotIdentifier":{"shape":"String"} + } + }, + "DeleteDBSnapshotResult":{ + "type":"structure", + "members":{ + "DBSnapshot":{"shape":"DBSnapshot"} + } + }, + "DeleteDBSubnetGroupMessage":{ + "type":"structure", + "required":["DBSubnetGroupName"], + "members":{ + "DBSubnetGroupName":{"shape":"String"} + } + }, + "DeleteEventSubscriptionMessage":{ + "type":"structure", + "required":["SubscriptionName"], + "members":{ + "SubscriptionName":{"shape":"String"} + } + }, + "DeleteEventSubscriptionResult":{ + "type":"structure", + "members":{ + "EventSubscription":{"shape":"EventSubscription"} + } + }, + "DeleteOptionGroupMessage":{ + "type":"structure", + "required":["OptionGroupName"], + "members":{ + "OptionGroupName":{"shape":"String"} + } + }, + "DescribeDBEngineVersionsMessage":{ + "type":"structure", + "members":{ + "Engine":{"shape":"String"}, + "EngineVersion":{"shape":"String"}, + "DBParameterGroupFamily":{"shape":"String"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"}, + "DefaultOnly":{"shape":"Boolean"}, + "ListSupportedCharacterSets":{"shape":"BooleanOptional"} + } + }, + "DescribeDBInstancesMessage":{ + "type":"structure", + "members":{ + "DBInstanceIdentifier":{"shape":"String"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeDBParameterGroupsMessage":{ + "type":"structure", + "members":{ + "DBParameterGroupName":{"shape":"String"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeDBParametersMessage":{ + "type":"structure", + "required":["DBParameterGroupName"], + "members":{ + "DBParameterGroupName":{"shape":"String"}, + "Source":{"shape":"String"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeDBSecurityGroupsMessage":{ + "type":"structure", + "members":{ + "DBSecurityGroupName":{"shape":"String"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeDBSnapshotsMessage":{ + "type":"structure", + "members":{ + "DBInstanceIdentifier":{"shape":"String"}, + "DBSnapshotIdentifier":{"shape":"String"}, + "SnapshotType":{"shape":"String"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeDBSubnetGroupsMessage":{ + "type":"structure", + "members":{ + "DBSubnetGroupName":{"shape":"String"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeEngineDefaultParametersMessage":{ + "type":"structure", + "required":["DBParameterGroupFamily"], + "members":{ + "DBParameterGroupFamily":{"shape":"String"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeEngineDefaultParametersResult":{ + "type":"structure", + "members":{ + "EngineDefaults":{"shape":"EngineDefaults"} + } + }, + "DescribeEventCategoriesMessage":{ + "type":"structure", + "members":{ + "SourceType":{"shape":"String"} + } + }, + "DescribeEventSubscriptionsMessage":{ + "type":"structure", + "members":{ + "SubscriptionName":{"shape":"String"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeEventsMessage":{ + "type":"structure", + "members":{ + "SourceIdentifier":{"shape":"String"}, + "SourceType":{"shape":"SourceType"}, + "StartTime":{"shape":"TStamp"}, + "EndTime":{"shape":"TStamp"}, + "Duration":{"shape":"IntegerOptional"}, + "EventCategories":{"shape":"EventCategoriesList"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeOptionGroupOptionsMessage":{ + "type":"structure", + "required":["EngineName"], + "members":{ + "EngineName":{"shape":"String"}, + "MajorEngineVersion":{"shape":"String"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeOptionGroupsMessage":{ + "type":"structure", + "members":{ + "OptionGroupName":{"shape":"String"}, + "Marker":{"shape":"String"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "EngineName":{"shape":"String"}, + "MajorEngineVersion":{"shape":"String"} + } + }, + "DescribeOrderableDBInstanceOptionsMessage":{ + "type":"structure", + "required":["Engine"], + "members":{ + "Engine":{"shape":"String"}, + "EngineVersion":{"shape":"String"}, + "DBInstanceClass":{"shape":"String"}, + "LicenseModel":{"shape":"String"}, + "Vpc":{"shape":"BooleanOptional"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeReservedDBInstancesMessage":{ + "type":"structure", + "members":{ + "ReservedDBInstanceId":{"shape":"String"}, + "ReservedDBInstancesOfferingId":{"shape":"String"}, + "DBInstanceClass":{"shape":"String"}, + "Duration":{"shape":"String"}, + "ProductDescription":{"shape":"String"}, + "OfferingType":{"shape":"String"}, + "MultiAZ":{"shape":"BooleanOptional"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeReservedDBInstancesOfferingsMessage":{ + "type":"structure", + "members":{ + "ReservedDBInstancesOfferingId":{"shape":"String"}, + "DBInstanceClass":{"shape":"String"}, + "Duration":{"shape":"String"}, + "ProductDescription":{"shape":"String"}, + "OfferingType":{"shape":"String"}, + "MultiAZ":{"shape":"BooleanOptional"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "Double":{"type":"double"}, + "EC2SecurityGroup":{ + "type":"structure", + "members":{ + "Status":{"shape":"String"}, + "EC2SecurityGroupName":{"shape":"String"}, + "EC2SecurityGroupId":{"shape":"String"}, + "EC2SecurityGroupOwnerId":{"shape":"String"} + } + }, + "EC2SecurityGroupList":{ + "type":"list", + "member":{ + "shape":"EC2SecurityGroup", + "locationName":"EC2SecurityGroup" + } + }, + "Endpoint":{ + "type":"structure", + "members":{ + "Address":{"shape":"String"}, + "Port":{"shape":"Integer"} + } + }, + "EngineDefaults":{ + "type":"structure", + "members":{ + "DBParameterGroupFamily":{"shape":"String"}, + "Marker":{"shape":"String"}, + "Parameters":{"shape":"ParametersList"} + }, + "wrapper":true + }, + "Event":{ + "type":"structure", + "members":{ + "SourceIdentifier":{"shape":"String"}, + "SourceType":{"shape":"SourceType"}, + "Message":{"shape":"String"}, + "EventCategories":{"shape":"EventCategoriesList"}, + "Date":{"shape":"TStamp"} + } + }, + "EventCategoriesList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"EventCategory" + } + }, + "EventCategoriesMap":{ + "type":"structure", + "members":{ + "SourceType":{"shape":"String"}, + "EventCategories":{"shape":"EventCategoriesList"} + }, + "wrapper":true + }, + "EventCategoriesMapList":{ + "type":"list", + "member":{ + "shape":"EventCategoriesMap", + "locationName":"EventCategoriesMap" + } + }, + "EventCategoriesMessage":{ + "type":"structure", + "members":{ + "EventCategoriesMapList":{"shape":"EventCategoriesMapList"} + } + }, + "EventList":{ + "type":"list", + "member":{ + "shape":"Event", + "locationName":"Event" + } + }, + "EventSubscription":{ + "type":"structure", + "members":{ + "Id":{"shape":"String"}, + "CustomerAwsId":{"shape":"String"}, + "CustSubscriptionId":{"shape":"String"}, + "SnsTopicArn":{"shape":"String"}, + "Status":{"shape":"String"}, + "SubscriptionCreationTime":{"shape":"String"}, + "SourceType":{"shape":"String"}, + "SourceIdsList":{"shape":"SourceIdsList"}, + "EventCategoriesList":{"shape":"EventCategoriesList"}, + "Enabled":{"shape":"Boolean"} + }, + "wrapper":true + }, + "EventSubscriptionQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"EventSubscriptionQuotaExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "EventSubscriptionsList":{ + "type":"list", + "member":{ + "shape":"EventSubscription", + "locationName":"EventSubscription" + } + }, + "EventSubscriptionsMessage":{ + "type":"structure", + "members":{ + "Marker":{"shape":"String"}, + "EventSubscriptionsList":{"shape":"EventSubscriptionsList"} + } + }, + "EventsMessage":{ + "type":"structure", + "members":{ + "Marker":{"shape":"String"}, + "Events":{"shape":"EventList"} + } + }, + "IPRange":{ + "type":"structure", + "members":{ + "Status":{"shape":"String"}, + "CIDRIP":{"shape":"String"} + } + }, + "IPRangeList":{ + "type":"list", + "member":{ + "shape":"IPRange", + "locationName":"IPRange" + } + }, + "InstanceQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InstanceQuotaExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InsufficientDBInstanceCapacityFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InsufficientDBInstanceCapacity", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "Integer":{"type":"integer"}, + "IntegerOptional":{"type":"integer"}, + "InvalidDBInstanceStateFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidDBInstanceState", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidDBParameterGroupStateFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidDBParameterGroupState", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidDBSecurityGroupStateFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidDBSecurityGroupState", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidDBSnapshotStateFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidDBSnapshotState", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidDBSubnetGroupStateFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidDBSubnetGroupStateFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidDBSubnetStateFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidDBSubnetStateFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidEventSubscriptionStateFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidEventSubscriptionState", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidOptionGroupStateFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidOptionGroupStateFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidRestoreFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidRestoreFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidSubnet":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidSubnet", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidVPCNetworkStateFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidVPCNetworkStateFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "KeyList":{ + "type":"list", + "member":{"shape":"String"} + }, + "ListTagsForResourceMessage":{ + "type":"structure", + "required":["ResourceName"], + "members":{ + "ResourceName":{"shape":"String"} + } + }, + "ModifyDBInstanceMessage":{ + "type":"structure", + "required":["DBInstanceIdentifier"], + "members":{ + "DBInstanceIdentifier":{"shape":"String"}, + "AllocatedStorage":{"shape":"IntegerOptional"}, + "DBInstanceClass":{"shape":"String"}, + "DBSecurityGroups":{"shape":"DBSecurityGroupNameList"}, + "VpcSecurityGroupIds":{"shape":"VpcSecurityGroupIdList"}, + "ApplyImmediately":{"shape":"Boolean"}, + "MasterUserPassword":{"shape":"String"}, + "DBParameterGroupName":{"shape":"String"}, + "BackupRetentionPeriod":{"shape":"IntegerOptional"}, + "PreferredBackupWindow":{"shape":"String"}, + "PreferredMaintenanceWindow":{"shape":"String"}, + "MultiAZ":{"shape":"BooleanOptional"}, + "EngineVersion":{"shape":"String"}, + "AllowMajorVersionUpgrade":{"shape":"Boolean"}, + "AutoMinorVersionUpgrade":{"shape":"BooleanOptional"}, + "Iops":{"shape":"IntegerOptional"}, + "OptionGroupName":{"shape":"String"}, + "NewDBInstanceIdentifier":{"shape":"String"} + } + }, + "ModifyDBInstanceResult":{ + "type":"structure", + "members":{ + "DBInstance":{"shape":"DBInstance"} + } + }, + "ModifyDBParameterGroupMessage":{ + "type":"structure", + "required":[ + "DBParameterGroupName", + "Parameters" + ], + "members":{ + "DBParameterGroupName":{"shape":"String"}, + "Parameters":{"shape":"ParametersList"} + } + }, + "ModifyDBSubnetGroupMessage":{ + "type":"structure", + "required":[ + "DBSubnetGroupName", + "SubnetIds" + ], + "members":{ + "DBSubnetGroupName":{"shape":"String"}, + "DBSubnetGroupDescription":{"shape":"String"}, + "SubnetIds":{"shape":"SubnetIdentifierList"} + } + }, + "ModifyDBSubnetGroupResult":{ + "type":"structure", + "members":{ + "DBSubnetGroup":{"shape":"DBSubnetGroup"} + } + }, + "ModifyEventSubscriptionMessage":{ + "type":"structure", + "required":["SubscriptionName"], + "members":{ + "SubscriptionName":{"shape":"String"}, + "SnsTopicArn":{"shape":"String"}, + "SourceType":{"shape":"String"}, + "EventCategories":{"shape":"EventCategoriesList"}, + "Enabled":{"shape":"BooleanOptional"} + } + }, + "ModifyEventSubscriptionResult":{ + "type":"structure", + "members":{ + "EventSubscription":{"shape":"EventSubscription"} + } + }, + "ModifyOptionGroupMessage":{ + "type":"structure", + "required":["OptionGroupName"], + "members":{ + "OptionGroupName":{"shape":"String"}, + "OptionsToInclude":{"shape":"OptionConfigurationList"}, + "OptionsToRemove":{"shape":"OptionNamesList"}, + "ApplyImmediately":{"shape":"Boolean"} + } + }, + "ModifyOptionGroupResult":{ + "type":"structure", + "members":{ + "OptionGroup":{"shape":"OptionGroup"} + } + }, + "Option":{ + "type":"structure", + "members":{ + "OptionName":{"shape":"String"}, + "OptionDescription":{"shape":"String"}, + "Port":{"shape":"IntegerOptional"}, + "DBSecurityGroupMemberships":{"shape":"DBSecurityGroupMembershipList"}, + "VpcSecurityGroupMemberships":{"shape":"VpcSecurityGroupMembershipList"} + } + }, + "OptionConfiguration":{ + "type":"structure", + "required":["OptionName"], + "members":{ + "OptionName":{"shape":"String"}, + "Port":{"shape":"IntegerOptional"}, + "DBSecurityGroupMemberships":{"shape":"DBSecurityGroupNameList"}, + "VpcSecurityGroupMemberships":{"shape":"VpcSecurityGroupIdList"} + } + }, + "OptionConfigurationList":{ + "type":"list", + "member":{ + "shape":"OptionConfiguration", + "locationName":"OptionConfiguration" + } + }, + "OptionGroup":{ + "type":"structure", + "members":{ + "OptionGroupName":{"shape":"String"}, + "OptionGroupDescription":{"shape":"String"}, + "EngineName":{"shape":"String"}, + "MajorEngineVersion":{"shape":"String"}, + "Options":{"shape":"OptionsList"}, + "AllowsVpcAndNonVpcInstanceMemberships":{"shape":"Boolean"}, + "VpcId":{"shape":"String"} + }, + "wrapper":true + }, + "OptionGroupAlreadyExistsFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"OptionGroupAlreadyExistsFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "OptionGroupMembership":{ + "type":"structure", + "members":{ + "OptionGroupName":{"shape":"String"}, + "Status":{"shape":"String"} + } + }, + "OptionGroupNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"OptionGroupNotFoundFault", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "OptionGroupOption":{ + "type":"structure", + "members":{ + "Name":{"shape":"String"}, + "Description":{"shape":"String"}, + "EngineName":{"shape":"String"}, + "MajorEngineVersion":{"shape":"String"}, + "MinimumRequiredMinorEngineVersion":{"shape":"String"}, + "PortRequired":{"shape":"Boolean"}, + "DefaultPort":{"shape":"IntegerOptional"}, + "OptionsDependedOn":{"shape":"OptionsDependedOn"} + } + }, + "OptionGroupOptionsList":{ + "type":"list", + "member":{ + "shape":"OptionGroupOption", + "locationName":"OptionGroupOption" + } + }, + "OptionGroupOptionsMessage":{ + "type":"structure", + "members":{ + "OptionGroupOptions":{"shape":"OptionGroupOptionsList"}, + "Marker":{"shape":"String"} + } + }, + "OptionGroupQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"OptionGroupQuotaExceededFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "OptionGroups":{ + "type":"structure", + "members":{ + "OptionGroupsList":{"shape":"OptionGroupsList"}, + "Marker":{"shape":"String"} + } + }, + "OptionGroupsList":{ + "type":"list", + "member":{ + "shape":"OptionGroup", + "locationName":"OptionGroup" + } + }, + "OptionNamesList":{ + "type":"list", + "member":{"shape":"String"} + }, + "OptionsDependedOn":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"OptionName" + } + }, + "OptionsList":{ + "type":"list", + "member":{ + "shape":"Option", + "locationName":"Option" + } + }, + "OrderableDBInstanceOption":{ + "type":"structure", + "members":{ + "Engine":{"shape":"String"}, + "EngineVersion":{"shape":"String"}, + "DBInstanceClass":{"shape":"String"}, + "LicenseModel":{"shape":"String"}, + "AvailabilityZones":{"shape":"AvailabilityZoneList"}, + "MultiAZCapable":{"shape":"Boolean"}, + "ReadReplicaCapable":{"shape":"Boolean"}, + "Vpc":{"shape":"Boolean"} + }, + "wrapper":true + }, + "OrderableDBInstanceOptionsList":{ + "type":"list", + "member":{ + "shape":"OrderableDBInstanceOption", + "locationName":"OrderableDBInstanceOption" + } + }, + "OrderableDBInstanceOptionsMessage":{ + "type":"structure", + "members":{ + "OrderableDBInstanceOptions":{"shape":"OrderableDBInstanceOptionsList"}, + "Marker":{"shape":"String"} + } + }, + "Parameter":{ + "type":"structure", + "members":{ + "ParameterName":{"shape":"String"}, + "ParameterValue":{"shape":"String"}, + "Description":{"shape":"String"}, + "Source":{"shape":"String"}, + "ApplyType":{"shape":"String"}, + "DataType":{"shape":"String"}, + "AllowedValues":{"shape":"String"}, + "IsModifiable":{"shape":"Boolean"}, + "MinimumEngineVersion":{"shape":"String"}, + "ApplyMethod":{"shape":"ApplyMethod"} + } + }, + "ParametersList":{ + "type":"list", + "member":{ + "shape":"Parameter", + "locationName":"Parameter" + } + }, + "PendingModifiedValues":{ + "type":"structure", + "members":{ + "DBInstanceClass":{"shape":"String"}, + "AllocatedStorage":{"shape":"IntegerOptional"}, + "MasterUserPassword":{"shape":"String"}, + "Port":{"shape":"IntegerOptional"}, + "BackupRetentionPeriod":{"shape":"IntegerOptional"}, + "MultiAZ":{"shape":"BooleanOptional"}, + "EngineVersion":{"shape":"String"}, + "Iops":{"shape":"IntegerOptional"}, + "DBInstanceIdentifier":{"shape":"String"} + } + }, + "PointInTimeRestoreNotEnabledFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"PointInTimeRestoreNotEnabled", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "PromoteReadReplicaMessage":{ + "type":"structure", + "required":["DBInstanceIdentifier"], + "members":{ + "DBInstanceIdentifier":{"shape":"String"}, + "BackupRetentionPeriod":{"shape":"IntegerOptional"}, + "PreferredBackupWindow":{"shape":"String"} + } + }, + "PromoteReadReplicaResult":{ + "type":"structure", + "members":{ + "DBInstance":{"shape":"DBInstance"} + } + }, + "ProvisionedIopsNotAvailableInAZFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"ProvisionedIopsNotAvailableInAZFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "PurchaseReservedDBInstancesOfferingMessage":{ + "type":"structure", + "required":["ReservedDBInstancesOfferingId"], + "members":{ + "ReservedDBInstancesOfferingId":{"shape":"String"}, + "ReservedDBInstanceId":{"shape":"String"}, + "DBInstanceCount":{"shape":"IntegerOptional"} + } + }, + "PurchaseReservedDBInstancesOfferingResult":{ + "type":"structure", + "members":{ + "ReservedDBInstance":{"shape":"ReservedDBInstance"} + } + }, + "ReadReplicaDBInstanceIdentifierList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"ReadReplicaDBInstanceIdentifier" + } + }, + "RebootDBInstanceMessage":{ + "type":"structure", + "required":["DBInstanceIdentifier"], + "members":{ + "DBInstanceIdentifier":{"shape":"String"}, + "ForceFailover":{"shape":"BooleanOptional"} + } + }, + "RebootDBInstanceResult":{ + "type":"structure", + "members":{ + "DBInstance":{"shape":"DBInstance"} + } + }, + "RecurringCharge":{ + "type":"structure", + "members":{ + "RecurringChargeAmount":{"shape":"Double"}, + "RecurringChargeFrequency":{"shape":"String"} + }, + "wrapper":true + }, + "RecurringChargeList":{ + "type":"list", + "member":{ + "shape":"RecurringCharge", + "locationName":"RecurringCharge" + } + }, + "RemoveSourceIdentifierFromSubscriptionMessage":{ + "type":"structure", + "required":[ + "SubscriptionName", + "SourceIdentifier" + ], + "members":{ + "SubscriptionName":{"shape":"String"}, + "SourceIdentifier":{"shape":"String"} + } + }, + "RemoveSourceIdentifierFromSubscriptionResult":{ + "type":"structure", + "members":{ + "EventSubscription":{"shape":"EventSubscription"} + } + }, + "RemoveTagsFromResourceMessage":{ + "type":"structure", + "required":[ + "ResourceName", + "TagKeys" + ], + "members":{ + "ResourceName":{"shape":"String"}, + "TagKeys":{"shape":"KeyList"} + } + }, + "ReservedDBInstance":{ + "type":"structure", + "members":{ + "ReservedDBInstanceId":{"shape":"String"}, + "ReservedDBInstancesOfferingId":{"shape":"String"}, + "DBInstanceClass":{"shape":"String"}, + "StartTime":{"shape":"TStamp"}, + "Duration":{"shape":"Integer"}, + "FixedPrice":{"shape":"Double"}, + "UsagePrice":{"shape":"Double"}, + "CurrencyCode":{"shape":"String"}, + "DBInstanceCount":{"shape":"Integer"}, + "ProductDescription":{"shape":"String"}, + "OfferingType":{"shape":"String"}, + "MultiAZ":{"shape":"Boolean"}, + "State":{"shape":"String"}, + "RecurringCharges":{"shape":"RecurringChargeList"} + }, + "wrapper":true + }, + "ReservedDBInstanceAlreadyExistsFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"ReservedDBInstanceAlreadyExists", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "ReservedDBInstanceList":{ + "type":"list", + "member":{ + "shape":"ReservedDBInstance", + "locationName":"ReservedDBInstance" + } + }, + "ReservedDBInstanceMessage":{ + "type":"structure", + "members":{ + "Marker":{"shape":"String"}, + "ReservedDBInstances":{"shape":"ReservedDBInstanceList"} + } + }, + "ReservedDBInstanceNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"ReservedDBInstanceNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "ReservedDBInstanceQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"ReservedDBInstanceQuotaExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "ReservedDBInstancesOffering":{ + "type":"structure", + "members":{ + "ReservedDBInstancesOfferingId":{"shape":"String"}, + "DBInstanceClass":{"shape":"String"}, + "Duration":{"shape":"Integer"}, + "FixedPrice":{"shape":"Double"}, + "UsagePrice":{"shape":"Double"}, + "CurrencyCode":{"shape":"String"}, + "ProductDescription":{"shape":"String"}, + "OfferingType":{"shape":"String"}, + "MultiAZ":{"shape":"Boolean"}, + "RecurringCharges":{"shape":"RecurringChargeList"} + }, + "wrapper":true + }, + "ReservedDBInstancesOfferingList":{ + "type":"list", + "member":{ + "shape":"ReservedDBInstancesOffering", + "locationName":"ReservedDBInstancesOffering" + } + }, + "ReservedDBInstancesOfferingMessage":{ + "type":"structure", + "members":{ + "Marker":{"shape":"String"}, + "ReservedDBInstancesOfferings":{"shape":"ReservedDBInstancesOfferingList"} + } + }, + "ReservedDBInstancesOfferingNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"ReservedDBInstancesOfferingNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "ResetDBParameterGroupMessage":{ + "type":"structure", + "required":["DBParameterGroupName"], + "members":{ + "DBParameterGroupName":{"shape":"String"}, + "ResetAllParameters":{"shape":"Boolean"}, + "Parameters":{"shape":"ParametersList"} + } + }, + "RestoreDBInstanceFromDBSnapshotMessage":{ + "type":"structure", + "required":[ + "DBInstanceIdentifier", + "DBSnapshotIdentifier" + ], + "members":{ + "DBInstanceIdentifier":{"shape":"String"}, + "DBSnapshotIdentifier":{"shape":"String"}, + "DBInstanceClass":{"shape":"String"}, + "Port":{"shape":"IntegerOptional"}, + "AvailabilityZone":{"shape":"String"}, + "DBSubnetGroupName":{"shape":"String"}, + "MultiAZ":{"shape":"BooleanOptional"}, + "PubliclyAccessible":{"shape":"BooleanOptional"}, + "AutoMinorVersionUpgrade":{"shape":"BooleanOptional"}, + "LicenseModel":{"shape":"String"}, + "DBName":{"shape":"String"}, + "Engine":{"shape":"String"}, + "Iops":{"shape":"IntegerOptional"}, + "OptionGroupName":{"shape":"String"} + } + }, + "RestoreDBInstanceFromDBSnapshotResult":{ + "type":"structure", + "members":{ + "DBInstance":{"shape":"DBInstance"} + } + }, + "RestoreDBInstanceToPointInTimeMessage":{ + "type":"structure", + "required":[ + "SourceDBInstanceIdentifier", + "TargetDBInstanceIdentifier" + ], + "members":{ + "SourceDBInstanceIdentifier":{"shape":"String"}, + "TargetDBInstanceIdentifier":{"shape":"String"}, + "RestoreTime":{"shape":"TStamp"}, + "UseLatestRestorableTime":{"shape":"Boolean"}, + "DBInstanceClass":{"shape":"String"}, + "Port":{"shape":"IntegerOptional"}, + "AvailabilityZone":{"shape":"String"}, + "DBSubnetGroupName":{"shape":"String"}, + "MultiAZ":{"shape":"BooleanOptional"}, + "PubliclyAccessible":{"shape":"BooleanOptional"}, + "AutoMinorVersionUpgrade":{"shape":"BooleanOptional"}, + "LicenseModel":{"shape":"String"}, + "DBName":{"shape":"String"}, + "Engine":{"shape":"String"}, + "Iops":{"shape":"IntegerOptional"}, + "OptionGroupName":{"shape":"String"} + } + }, + "RestoreDBInstanceToPointInTimeResult":{ + "type":"structure", + "members":{ + "DBInstance":{"shape":"DBInstance"} + } + }, + "RevokeDBSecurityGroupIngressMessage":{ + "type":"structure", + "required":["DBSecurityGroupName"], + "members":{ + "DBSecurityGroupName":{"shape":"String"}, + "CIDRIP":{"shape":"String"}, + "EC2SecurityGroupName":{"shape":"String"}, + "EC2SecurityGroupId":{"shape":"String"}, + "EC2SecurityGroupOwnerId":{"shape":"String"} + } + }, + "RevokeDBSecurityGroupIngressResult":{ + "type":"structure", + "members":{ + "DBSecurityGroup":{"shape":"DBSecurityGroup"} + } + }, + "SNSInvalidTopicFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"SNSInvalidTopic", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "SNSNoAuthorizationFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"SNSNoAuthorization", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "SNSTopicArnNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"SNSTopicArnNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "SnapshotQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"SnapshotQuotaExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "SourceIdsList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"SourceId" + } + }, + "SourceNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"SourceNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "SourceType":{ + "type":"string", + "enum":[ + "db-instance", + "db-parameter-group", + "db-security-group", + "db-snapshot" + ] + }, + "StorageQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"StorageQuotaExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "String":{"type":"string"}, + "Subnet":{ + "type":"structure", + "members":{ + "SubnetIdentifier":{"shape":"String"}, + "SubnetAvailabilityZone":{"shape":"AvailabilityZone"}, + "SubnetStatus":{"shape":"String"} + } + }, + "SubnetAlreadyInUse":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"SubnetAlreadyInUse", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "SubnetIdentifierList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"SubnetIdentifier" + } + }, + "SubnetList":{ + "type":"list", + "member":{ + "shape":"Subnet", + "locationName":"Subnet" + } + }, + "SubscriptionAlreadyExistFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"SubscriptionAlreadyExist", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "SubscriptionCategoryNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"SubscriptionCategoryNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "SubscriptionNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"SubscriptionNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "SupportedCharacterSetsList":{ + "type":"list", + "member":{ + "shape":"CharacterSet", + "locationName":"CharacterSet" + } + }, + "TStamp":{"type":"timestamp"}, + "Tag":{ + "type":"structure", + "members":{ + "Key":{"shape":"String"}, + "Value":{"shape":"String"} + } + }, + "TagList":{ + "type":"list", + "member":{ + "shape":"Tag", + "locationName":"Tag" + } + }, + "TagListMessage":{ + "type":"structure", + "members":{ + "TagList":{"shape":"TagList"} + } + }, + "VpcSecurityGroupIdList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"VpcSecurityGroupId" + } + }, + "VpcSecurityGroupMembership":{ + "type":"structure", + "members":{ + "VpcSecurityGroupId":{"shape":"String"}, + "Status":{"shape":"String"} + } + }, + "VpcSecurityGroupMembershipList":{ + "type":"list", + "member":{ + "shape":"VpcSecurityGroupMembership", + "locationName":"VpcSecurityGroupMembership" + } + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/rds/2013-01-10/docs-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/rds/2013-01-10/docs-2.json new file mode 100644 index 000000000..b955e246d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/rds/2013-01-10/docs-2.json @@ -0,0 +1,1681 @@ +{ + "version": "2.0", + "service": "Amazon Relational Database Service

    Amazon Relational Database Service (Amazon RDS) is a web service that makes it easier to set up, operate, and scale a relational database in the cloud. It provides cost-efficient, resizable capacity for an industry-standard relational database and manages common database administration tasks, freeing up developers to focus on what makes their applications and businesses unique.

    Amazon RDS gives you access to the capabilities of a familiar MySQL or Oracle database server. This means the code, applications, and tools you already use today with your existing MySQL or Oracle databases work with Amazon RDS without modification. Amazon RDS automatically backs up your database and maintains the database software that powers your DB Instance. Amazon RDS is flexible: you can scale your database instance's compute resources and storage capacity to meet your application's demand. As with all Amazon Web Services, there are no up-front investments, and you pay only for the resources you use.

    This is the Amazon RDS API Reference. It contains a comprehensive description of all Amazon RDS Query APIs and data types. Note that this API is asynchronous and some actions may require polling to determine when an action has been applied. See the parameter description to determine if a change is applied immediately or on the next instance reboot or during the maintenance window. To get started with Amazon RDS, go to the Amazon RDS Getting Started Guide. For more information on Amazon RDS concepts and usage scenarios, go to the Amazon RDS User Guide.

    ", + "operations": { + "AddSourceIdentifierToSubscription": "

    Adds a source identifier to an existing RDS event notification subscription.

    ", + "AddTagsToResource": "

    Adds metadata tags to a DB Instance. These tags can also be used with cost allocation reporting to track cost associated with a DB Instance.

    For an overview on tagging DB Instances, see DB Instance Tags.

    ", + "AuthorizeDBSecurityGroupIngress": "

    Enables ingress to a DBSecurityGroup using one of two forms of authorization. First, EC2 or VPC Security Groups can be added to the DBSecurityGroup if the application using the database is running on EC2 or VPC instances. Second, IP ranges are available if the application accessing your database is running on the Internet. Required parameters for this API are one of CIDR range, EC2SecurityGroupId for VPC, or (EC2SecurityGroupOwnerId and either EC2SecurityGroupName or EC2SecurityGroupId for non-VPC).

    You cannot authorize ingress from an EC2 security group in one Region to an Amazon RDS DB Instance in another. You cannot authorize ingress from a VPC security group in one VPC to an Amazon RDS DB Instance in another.

    For an overview of CIDR ranges, go to the Wikipedia Tutorial.

    ", + "CopyDBSnapshot": "

    Copies the specified DBSnapshot. The source DBSnapshot must be in the \"available\" state.

    ", + "CreateDBInstance": "

    Creates a new DB instance.

    ", + "CreateDBInstanceReadReplica": "

    Creates a DB Instance that acts as a Read Replica of a source DB Instance.

    All Read Replica DB Instances are created as Single-AZ deployments with backups disabled. All other DB Instance attributes (including DB Security Groups and DB Parameter Groups) are inherited from the source DB Instance, except as specified below.

    The source DB Instance must have backup retention enabled.

    ", + "CreateDBParameterGroup": "

    Creates a new DB Parameter Group.

    A DB Parameter Group is initially created with the default parameters for the database engine used by the DB Instance. To provide custom values for any of the parameters, you must modify the group after creating it using ModifyDBParameterGroup. Once you've created a DB Parameter Group, you need to associate it with your DB Instance using ModifyDBInstance. When you associate a new DB Parameter Group with a running DB Instance, you need to reboot the DB Instance for the new DB Parameter Group and associated settings to take effect.

    ", + "CreateDBSecurityGroup": "

    Creates a new DB Security Group. DB Security Groups control access to a DB Instance.

    ", + "CreateDBSnapshot": "

    Creates a DBSnapshot. The source DBInstance must be in \"available\" state.

    ", + "CreateDBSubnetGroup": "

    Creates a new DB subnet group. DB subnet groups must contain at least one subnet in at least two AZs in the region.

    ", + "CreateEventSubscription": "

    Creates an RDS event notification subscription. This action requires a topic ARN (Amazon Resource Name) created by either the RDS console, the SNS console, or the SNS API. To obtain an ARN with SNS, you must create a topic in Amazon SNS and subscribe to the topic. The ARN is displayed in the SNS console.

    You can specify the type of source (SourceType) you want to be notified of, provide a list of RDS sources (SourceIds) that triggers the events, and provide a list of event categories (EventCategories) for events you want to be notified of. For example, you can specify SourceType = db-instance, SourceIds = mydbinstance1, mydbinstance2 and EventCategories = Availability, Backup.

    If you specify both the SourceType and SourceIds, such as SourceType = db-instance and SourceIdentifier = myDBInstance1, you will be notified of all the db-instance events for the specified source. If you specify a SourceType but do not specify a SourceIdentifier, you will receive notice of the events for that source type for all your RDS sources. If you do not specify either the SourceType nor the SourceIdentifier, you will be notified of events generated from all RDS sources belonging to your customer account.

    ", + "CreateOptionGroup": "

    Creates a new Option Group.

    ", + "DeleteDBInstance": "

    The DeleteDBInstance API deletes a previously provisioned RDS instance. A successful response from the web service indicates the request was received correctly. If a final DBSnapshot is requested the status of the RDS instance will be \"deleting\" until the DBSnapshot is created. DescribeDBInstance is used to monitor the status of this operation. This cannot be canceled or reverted once submitted.

    ", + "DeleteDBParameterGroup": "

    Deletes a specified DBParameterGroup. The DBParameterGroup cannot be associated with any RDS instances to be deleted.

    The specified DB Parameter Group cannot be associated with any DB Instances. ", + "DeleteDBSecurityGroup": "

    Deletes a DB Security Group.

    The specified DB Security Group must not be associated with any DB Instances.", + "DeleteDBSnapshot": "

    Deletes a DBSnapshot.

    The DBSnapshot must be in the available state to be deleted.", + "DeleteDBSubnetGroup": "

    Deletes a DB subnet group.

    The specified database subnet group must not be associated with any DB instances.", + "DeleteEventSubscription": "

    Deletes an RDS event notification subscription.

    ", + "DeleteOptionGroup": "

    Deletes an existing Option Group.

    ", + "DescribeDBEngineVersions": "

    Returns a list of the available DB engines.

    ", + "DescribeDBInstances": "

    Returns information about provisioned RDS instances. This API supports pagination.

    ", + "DescribeDBParameterGroups": "

    Returns a list of DBParameterGroup descriptions. If a DBParameterGroupName is specified, the list will contain only the description of the specified DBParameterGroup.

    ", + "DescribeDBParameters": "

    Returns the detailed parameter list for a particular DBParameterGroup.

    ", + "DescribeDBSecurityGroups": "

    Returns a list of DBSecurityGroup descriptions. If a DBSecurityGroupName is specified, the list will contain only the descriptions of the specified DBSecurityGroup.

    For an overview of CIDR ranges, go to the Wikipedia Tutorial.

    ", + "DescribeDBSnapshots": "

    Returns information about DBSnapshots. This API supports pagination.

    ", + "DescribeDBSubnetGroups": "

    Returns a list of DBSubnetGroup descriptions. If a DBSubnetGroupName is specified, the list will contain only the descriptions of the specified DBSubnetGroup.

    For an overview of CIDR ranges, go to the Wikipedia Tutorial.

    ", + "DescribeEngineDefaultParameters": "

    Returns the default engine and system parameter information for the specified database engine.

    ", + "DescribeEventCategories": "

    Displays a list of categories for all event source types, or, if specified, for a specified source type. You can see a list of the event categories and source types in the Events topic in the Amazon RDS User Guide.

    ", + "DescribeEventSubscriptions": "

    Lists all the subscription descriptions for a customer account. The description for a subscription includes SubscriptionName, SNSTopicARN, CustomerID, SourceType, SourceID, CreationTime, and Status.

    If you specify a SubscriptionName, lists the description for that subscription.

    ", + "DescribeEvents": "

    Returns events related to DB instances, DB security groups, DB Snapshots, and DB parameter groups for the past 14 days. Events specific to a particular DB Iinstance, DB security group, DB Snapshot, or DB parameter group can be obtained by providing the source identifier as a parameter. By default, the past hour of events are returned.

    You can see a list of event categories and source types in the Events topic in the Amazon RDS User Guide.

    ", + "DescribeOptionGroupOptions": "

    Describes all available options.

    ", + "DescribeOptionGroups": "

    Describes the available option groups.

    ", + "DescribeOrderableDBInstanceOptions": "

    Returns a list of orderable DB Instance options for the specified engine.

    ", + "DescribeReservedDBInstances": "

    Returns information about reserved DB Instances for this account, or about a specified reserved DB Instance.

    ", + "DescribeReservedDBInstancesOfferings": "

    Lists available reserved DB Instance offerings.

    ", + "ListTagsForResource": "

    Lists all tags on a DB Instance.

    For an overview on tagging DB Instances, see DB Instance Tags.

    ", + "ModifyDBInstance": "

    Modify settings for a DB Instance. You can change one or more database configuration parameters by specifying these parameters and the new values in the request.

    ", + "ModifyDBParameterGroup": "

    Modifies the parameters of a DBParameterGroup. To modify more than one parameter submit a list of the following: ParameterName, ParameterValue, and ApplyMethod. A maximum of 20 parameters can be modified in a single request.

    The apply-immediate method can be used only for dynamic parameters; the pending-reboot method can be used with MySQL and Oracle DB Instances for either dynamic or static parameters. For Microsoft SQL Server DB Instances, the pending-reboot method can be used only for static parameters.

    ", + "ModifyDBSubnetGroup": "

    Modifies an existing DB subnet group. DB subnet groups must contain at least one subnet in at least two AZs in the region.

    ", + "ModifyEventSubscription": "

    Modifies an existing RDS event notification subscription. Note that you cannot modify the source identifiers using this call; to change source identifiers for a subscription, use the AddSourceIdentifierToSubscription and RemoveSourceIdentifierFromSubscription calls.

    You can see a list of the event categories for a given SourceType in the Events topic in the Amazon RDS User Guide or by using the DescribeEventCategories action.

    ", + "ModifyOptionGroup": "

    Modifies an existing Option Group.

    ", + "PromoteReadReplica": "

    Promotes a Read Replica DB Instance to a standalone DB Instance.

    ", + "PurchaseReservedDBInstancesOffering": "

    Purchases a reserved DB Instance offering.

    ", + "RebootDBInstance": "

    Reboots a previously provisioned RDS instance. This API results in the application of modified DBParameterGroup parameters with ApplyStatus of pending-reboot to the RDS instance. This action is taken as soon as possible, and results in a momentary outage to the RDS instance during which the RDS instance status is set to rebooting. If the RDS instance is configured for MultiAZ, it is possible that the reboot will be conducted through a failover. A DBInstance event is created when the reboot is completed.

    ", + "RemoveSourceIdentifierFromSubscription": "

    Removes a source identifier from an existing RDS event notification subscription.

    ", + "RemoveTagsFromResource": "

    Removes metadata tags from a DB Instance.

    For an overview on tagging DB Instances, see DB Instance Tags.

    ", + "ResetDBParameterGroup": "

    Modifies the parameters of a DBParameterGroup to the engine/system default value. To reset specific parameters submit a list of the following: ParameterName and ApplyMethod. To reset the entire DBParameterGroup specify the DBParameterGroup name and ResetAllParameters parameters. When resetting the entire group, dynamic parameters are updated immediately and static parameters are set to pending-reboot to take effect on the next DB instance restart or RebootDBInstance request.

    ", + "RestoreDBInstanceFromDBSnapshot": "

    Creates a new DB Instance from a DB snapshot. The target database is created from the source database restore point with the same configuration as the original source database, except that the new RDS instance is created with the default security group.

    ", + "RestoreDBInstanceToPointInTime": "

    Restores a DB Instance to an arbitrary point-in-time. Users can restore to any point in time before the latestRestorableTime for up to backupRetentionPeriod days. The target database is created from the source database with the same configuration as the original database except that the DB instance is created with the default DB security group.

    ", + "RevokeDBSecurityGroupIngress": "

    Revokes ingress from a DBSecurityGroup for previously authorized IP ranges or EC2 or VPC Security Groups. Required parameters for this API are one of CIDRIP, EC2SecurityGroupId for VPC, or (EC2SecurityGroupOwnerId and either EC2SecurityGroupName or EC2SecurityGroupId).

    " + }, + "shapes": { + "AddSourceIdentifierToSubscriptionMessage": { + "base": "

    ", + "refs": { + } + }, + "AddSourceIdentifierToSubscriptionResult": { + "base": null, + "refs": { + } + }, + "AddTagsToResourceMessage": { + "base": "

    ", + "refs": { + } + }, + "ApplyMethod": { + "base": null, + "refs": { + "Parameter$ApplyMethod": "

    Indicates when to apply parameter updates.

    " + } + }, + "AuthorizationAlreadyExistsFault": { + "base": "

    The specified CIDRIP or EC2 security group is already authorized for the specified DB security group.

    ", + "refs": { + } + }, + "AuthorizationNotFoundFault": { + "base": "

    Specified CIDRIP or EC2 security group is not authorized for the specified DB security group.

    RDS may not also be authorized via IAM to perform necessary actions on your behalf.

    ", + "refs": { + } + }, + "AuthorizationQuotaExceededFault": { + "base": "

    DB security group authorization quota has been reached.

    ", + "refs": { + } + }, + "AuthorizeDBSecurityGroupIngressMessage": { + "base": "

    ", + "refs": { + } + }, + "AuthorizeDBSecurityGroupIngressResult": { + "base": null, + "refs": { + } + }, + "AvailabilityZone": { + "base": "

    Contains Availability Zone information.

    This data type is used as an element in the following data type:

    ", + "refs": { + "AvailabilityZoneList$member": null, + "Subnet$SubnetAvailabilityZone": null + } + }, + "AvailabilityZoneList": { + "base": null, + "refs": { + "OrderableDBInstanceOption$AvailabilityZones": "

    A list of availability zones for the orderable DB Instance.

    " + } + }, + "Boolean": { + "base": null, + "refs": { + "AvailabilityZone$ProvisionedIopsCapable": "

    True indicates the availability zone is capable of provisioned IOPs.

    ", + "DBInstance$MultiAZ": "

    Specifies if the DB Instance is a Multi-AZ deployment.

    ", + "DBInstance$AutoMinorVersionUpgrade": "

    Indicates that minor version patches are applied automatically.

    ", + "DBInstance$PubliclyAccessible": null, + "DeleteDBInstanceMessage$SkipFinalSnapshot": "

    Determines whether a final DB Snapshot is created before the DB Instance is deleted. If true is specified, no DBSnapshot is created. If false is specified, a DB Snapshot is created before the DB Instance is deleted.

    The FinalDBSnapshotIdentifier parameter must be specified if SkipFinalSnapshot is false.

    Default: false

    ", + "DescribeDBEngineVersionsMessage$DefaultOnly": "

    Indicates that only the default version of the specified engine or engine and major version combination is returned.

    ", + "EventSubscription$Enabled": "

    A Boolean value indicating if the subscription is enabled. True indicates the subscription is enabled.

    ", + "ModifyDBInstanceMessage$ApplyImmediately": "

    Specifies whether or not the modifications in this request and any pending modifications are asynchronously applied as soon as possible, regardless of the PreferredMaintenanceWindow setting for the DB Instance.

    If this parameter is passed as false, changes to the DB Instance are applied on the next call to RebootDBInstance, the next maintenance reboot, or the next failure reboot, whichever occurs first. See each parameter to determine when a change is applied.

    Default: false

    ", + "ModifyDBInstanceMessage$AllowMajorVersionUpgrade": "

    Indicates that major version upgrades are allowed. Changing this parameter does not result in an outage and the change is asynchronously applied as soon as possible.

    Constraints: This parameter must be set to true when specifying a value for the EngineVersion parameter that is a different major version than the DB Instance's current version.

    ", + "ModifyOptionGroupMessage$ApplyImmediately": "

    Indicates whether the changes should be applied immediately, or during the next maintenance window for each instance associated with the Option Group.

    ", + "OptionGroup$AllowsVpcAndNonVpcInstanceMemberships": "

    Indicates whether this option group can be applied to both VPC and non-VPC instances. The value 'true' indicates the option group can be applied to both VPC and non-VPC instances.

    ", + "OptionGroupOption$PortRequired": "

    Specifies whether the option requires a port.

    ", + "OrderableDBInstanceOption$MultiAZCapable": "

    Indicates whether this orderable DB Instance is multi-AZ capable.

    ", + "OrderableDBInstanceOption$ReadReplicaCapable": "

    Indicates whether this orderable DB Instance can have a read replica.

    ", + "OrderableDBInstanceOption$Vpc": "

    Indicates whether this is a VPC orderable DB Instance.

    ", + "Parameter$IsModifiable": "

    Indicates whether (true) or not (false) the parameter can be modified. Some parameters have security or operational implications that prevent them from being changed.

    ", + "ReservedDBInstance$MultiAZ": "

    Indicates if the reservation applies to Multi-AZ deployments.

    ", + "ReservedDBInstancesOffering$MultiAZ": "

    Indicates if the offering applies to Multi-AZ deployments.

    ", + "ResetDBParameterGroupMessage$ResetAllParameters": "

    Specifies whether (true) or not (false) to reset all parameters in the DB Parameter Group to default values.

    Default: true

    ", + "RestoreDBInstanceToPointInTimeMessage$UseLatestRestorableTime": "

    Specifies whether (true) or not (false) the DB Instance is restored from the latest backup time.

    Default: false

    Constraints: Cannot be specified if RestoreTime parameter is provided.

    " + } + }, + "BooleanOptional": { + "base": null, + "refs": { + "CreateDBInstanceMessage$MultiAZ": "

    Specifies if the DB Instance is a Multi-AZ deployment. You cannot set the AvailabilityZone parameter if the MultiAZ parameter is set to true.

    ", + "CreateDBInstanceMessage$AutoMinorVersionUpgrade": "

    Indicates that minor engine upgrades will be applied automatically to the DB Instance during the maintenance window.

    Default: true

    ", + "CreateDBInstanceMessage$PubliclyAccessible": null, + "CreateDBInstanceReadReplicaMessage$AutoMinorVersionUpgrade": "

    Indicates that minor engine upgrades will be applied automatically to the Read Replica during the maintenance window.

    Default: Inherits from the source DB Instance

    ", + "CreateDBInstanceReadReplicaMessage$PubliclyAccessible": null, + "CreateEventSubscriptionMessage$Enabled": "

    A Boolean value; set to true to activate the subscription, set to false to create the subscription but not active it.

    ", + "DescribeDBEngineVersionsMessage$ListSupportedCharacterSets": "

    If this parameter is specified, and if the requested engine supports the CharacterSetName parameter for CreateDBInstance, the response includes a list of supported character sets for each engine version.

    ", + "DescribeOrderableDBInstanceOptionsMessage$Vpc": "

    The VPC filter value. Specify this parameter to show only the available VPC or non-VPC offerings.

    ", + "DescribeReservedDBInstancesMessage$MultiAZ": "

    The Multi-AZ filter value. Specify this parameter to show only those reservations matching the specified Multi-AZ parameter.

    ", + "DescribeReservedDBInstancesOfferingsMessage$MultiAZ": "

    The Multi-AZ filter value. Specify this parameter to show only the available offerings matching the specified Multi-AZ parameter.

    ", + "ModifyDBInstanceMessage$MultiAZ": "

    Specifies if the DB Instance is a Multi-AZ deployment. Changing this parameter does not result in an outage and the change is applied during the next maintenance window unless the ApplyImmediately parameter is set to true for this request.

    Constraints: Cannot be specified if the DB Instance is a read replica.

    ", + "ModifyDBInstanceMessage$AutoMinorVersionUpgrade": "

    Indicates that minor version upgrades will be applied automatically to the DB Instance during the maintenance window. Changing this parameter does not result in an outage except in the following case and the change is asynchronously applied as soon as possible. An outage will result if this parameter is set to true during the maintenance window, and a newer minor version is available, and RDS has enabled auto patching for that engine version.

    ", + "ModifyEventSubscriptionMessage$Enabled": "

    A Boolean value; set to true to activate the subscription.

    ", + "PendingModifiedValues$MultiAZ": "

    Indicates that the Single-AZ DB Instance is to change to a Multi-AZ deployment.

    ", + "RebootDBInstanceMessage$ForceFailover": "

    When true, the reboot will be conducted through a MultiAZ failover.

    Constraint: You cannot specify true if the instance is not configured for MultiAZ.

    ", + "RestoreDBInstanceFromDBSnapshotMessage$MultiAZ": "

    Specifies if the DB Instance is a Multi-AZ deployment.

    Constraint: You cannot specify the AvailabilityZone parameter if the MultiAZ parameter is set to true.

    ", + "RestoreDBInstanceFromDBSnapshotMessage$PubliclyAccessible": null, + "RestoreDBInstanceFromDBSnapshotMessage$AutoMinorVersionUpgrade": "

    Indicates that minor version upgrades will be applied automatically to the DB Instance during the maintenance window.

    ", + "RestoreDBInstanceToPointInTimeMessage$MultiAZ": "

    Specifies if the DB Instance is a Multi-AZ deployment.

    Constraint: You cannot specify the AvailabilityZone parameter if the MultiAZ parameter is set to true.

    ", + "RestoreDBInstanceToPointInTimeMessage$PubliclyAccessible": null, + "RestoreDBInstanceToPointInTimeMessage$AutoMinorVersionUpgrade": "

    Indicates that minor version upgrades will be applied automatically to the DB Instance during the maintenance window.

    " + } + }, + "CharacterSet": { + "base": "

    This data type is used as a response element in the action DescribeDBEngineVersions.

    ", + "refs": { + "DBEngineVersion$DefaultCharacterSet": "

    The default character set for new instances of this engine version, if the CharacterSetName parameter of the CreateDBInstance API is not specified.

    ", + "SupportedCharacterSetsList$member": null + } + }, + "CopyDBSnapshotMessage": { + "base": "

    ", + "refs": { + } + }, + "CopyDBSnapshotResult": { + "base": null, + "refs": { + } + }, + "CreateDBInstanceMessage": { + "base": "

    ", + "refs": { + } + }, + "CreateDBInstanceReadReplicaMessage": { + "base": null, + "refs": { + } + }, + "CreateDBInstanceReadReplicaResult": { + "base": null, + "refs": { + } + }, + "CreateDBInstanceResult": { + "base": null, + "refs": { + } + }, + "CreateDBParameterGroupMessage": { + "base": "

    ", + "refs": { + } + }, + "CreateDBParameterGroupResult": { + "base": null, + "refs": { + } + }, + "CreateDBSecurityGroupMessage": { + "base": "

    ", + "refs": { + } + }, + "CreateDBSecurityGroupResult": { + "base": null, + "refs": { + } + }, + "CreateDBSnapshotMessage": { + "base": "

    ", + "refs": { + } + }, + "CreateDBSnapshotResult": { + "base": null, + "refs": { + } + }, + "CreateDBSubnetGroupMessage": { + "base": "

    ", + "refs": { + } + }, + "CreateDBSubnetGroupResult": { + "base": null, + "refs": { + } + }, + "CreateEventSubscriptionMessage": { + "base": "

    ", + "refs": { + } + }, + "CreateEventSubscriptionResult": { + "base": null, + "refs": { + } + }, + "CreateOptionGroupMessage": { + "base": "

    ", + "refs": { + } + }, + "CreateOptionGroupResult": { + "base": null, + "refs": { + } + }, + "DBEngineVersion": { + "base": "

    This data type is used as a response element in the action DescribeDBEngineVersions.

    ", + "refs": { + "DBEngineVersionList$member": null + } + }, + "DBEngineVersionList": { + "base": null, + "refs": { + "DBEngineVersionMessage$DBEngineVersions": "

    A list of DBEngineVersion elements.

    " + } + }, + "DBEngineVersionMessage": { + "base": "

    Contains the result of a successful invocation of the DescribeDBEngineVersions action.

    ", + "refs": { + } + }, + "DBInstance": { + "base": "

    Contains the result of a successful invocation of the following actions:

    This data type is used as a response element in the DescribeDBInstances action.

    ", + "refs": { + "CreateDBInstanceReadReplicaResult$DBInstance": null, + "CreateDBInstanceResult$DBInstance": null, + "DBInstanceList$member": null, + "DeleteDBInstanceResult$DBInstance": null, + "ModifyDBInstanceResult$DBInstance": null, + "PromoteReadReplicaResult$DBInstance": null, + "RebootDBInstanceResult$DBInstance": null, + "RestoreDBInstanceFromDBSnapshotResult$DBInstance": null, + "RestoreDBInstanceToPointInTimeResult$DBInstance": null + } + }, + "DBInstanceAlreadyExistsFault": { + "base": "

    User already has a DB instance with the given identifier.

    ", + "refs": { + } + }, + "DBInstanceList": { + "base": null, + "refs": { + "DBInstanceMessage$DBInstances": "

    A list of DBInstance instances.

    " + } + }, + "DBInstanceMessage": { + "base": "

    Contains the result of a successful invocation of the DescribeDBInstances action.

    ", + "refs": { + } + }, + "DBInstanceNotFoundFault": { + "base": "

    DBInstanceIdentifier does not refer to an existing DB instance.

    ", + "refs": { + } + }, + "DBParameterGroup": { + "base": "

    Contains the result of a successful invocation of the CreateDBParameterGroup action.

    This data type is used as a request parameter in the DeleteDBParameterGroup action, and as a response element in the DescribeDBParameterGroups action.

    ", + "refs": { + "CreateDBParameterGroupResult$DBParameterGroup": null, + "DBParameterGroupList$member": null + } + }, + "DBParameterGroupAlreadyExistsFault": { + "base": "

    A DB parameter group with the same name exists.

    ", + "refs": { + } + }, + "DBParameterGroupDetails": { + "base": "

    Contains the result of a successful invocation of the DescribeDBParameters action.

    ", + "refs": { + } + }, + "DBParameterGroupList": { + "base": null, + "refs": { + "DBParameterGroupsMessage$DBParameterGroups": "

    A list of DBParameterGroup instances.

    " + } + }, + "DBParameterGroupNameMessage": { + "base": "

    Contains the result of a successful invocation of the ModifyDBParameterGroup or ResetDBParameterGroup action.

    ", + "refs": { + } + }, + "DBParameterGroupNotFoundFault": { + "base": "

    DBParameterGroupName does not refer to an existing DB parameter group.

    ", + "refs": { + } + }, + "DBParameterGroupQuotaExceededFault": { + "base": "

    Request would result in user exceeding the allowed number of DB parameter groups.

    ", + "refs": { + } + }, + "DBParameterGroupStatus": { + "base": "

    The status of the DB Parameter Group.

    This data type is used as a response element in the following actions:

    ", + "refs": { + "DBParameterGroupStatusList$member": null + } + }, + "DBParameterGroupStatusList": { + "base": null, + "refs": { + "DBInstance$DBParameterGroups": "

    Provides the list of DB Parameter Groups applied to this DB Instance.

    " + } + }, + "DBParameterGroupsMessage": { + "base": "

    Contains the result of a successful invocation of the DescribeDBParameterGroups action.

    ", + "refs": { + } + }, + "DBSecurityGroup": { + "base": "

    Contains the result of a successful invocation of the following actions:

    This data type is used as a response element in the DescribeDBSecurityGroups action.

    ", + "refs": { + "AuthorizeDBSecurityGroupIngressResult$DBSecurityGroup": null, + "CreateDBSecurityGroupResult$DBSecurityGroup": null, + "DBSecurityGroups$member": null, + "RevokeDBSecurityGroupIngressResult$DBSecurityGroup": null + } + }, + "DBSecurityGroupAlreadyExistsFault": { + "base": "

    A DB security group with the name specified in DBSecurityGroupName already exists.

    ", + "refs": { + } + }, + "DBSecurityGroupMembership": { + "base": "

    This data type is used as a response element in the following actions:

    ", + "refs": { + "DBSecurityGroupMembershipList$member": null + } + }, + "DBSecurityGroupMembershipList": { + "base": null, + "refs": { + "DBInstance$DBSecurityGroups": "

    Provides List of DB Security Group elements containing only DBSecurityGroup.Name and DBSecurityGroup.Status subelements.

    ", + "Option$DBSecurityGroupMemberships": "

    If the Option requires access to a port, then this DB Security Group allows access to the port.

    " + } + }, + "DBSecurityGroupMessage": { + "base": "

    Contains the result of a successful invocation of the DescribeDBSecurityGroups action.

    ", + "refs": { + } + }, + "DBSecurityGroupNameList": { + "base": null, + "refs": { + "CreateDBInstanceMessage$DBSecurityGroups": "

    A list of DB Security Groups to associate with this DB Instance.

    Default: The default DB Security Group for the database engine.

    ", + "ModifyDBInstanceMessage$DBSecurityGroups": "

    A list of DB Security Groups to authorize on this DB Instance. Changing this parameter does not result in an outage and the change is asynchronously applied as soon as possible.

    Constraints:

    • Must be 1 to 255 alphanumeric characters
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "OptionConfiguration$DBSecurityGroupMemberships": "

    A list of DBSecurityGroupMemebrship name strings used for this option.

    " + } + }, + "DBSecurityGroupNotFoundFault": { + "base": "

    DBSecurityGroupName does not refer to an existing DB security group.

    ", + "refs": { + } + }, + "DBSecurityGroupNotSupportedFault": { + "base": "

    A DB security group is not allowed for this action.

    ", + "refs": { + } + }, + "DBSecurityGroupQuotaExceededFault": { + "base": "

    Request would result in user exceeding the allowed number of DB security groups.

    ", + "refs": { + } + }, + "DBSecurityGroups": { + "base": null, + "refs": { + "DBSecurityGroupMessage$DBSecurityGroups": "

    A list of DBSecurityGroup instances.

    " + } + }, + "DBSnapshot": { + "base": "

    Contains the result of a successful invocation of the following actions:

    This data type is used as a response element in the DescribeDBSnapshots action.

    ", + "refs": { + "CopyDBSnapshotResult$DBSnapshot": null, + "CreateDBSnapshotResult$DBSnapshot": null, + "DBSnapshotList$member": null, + "DeleteDBSnapshotResult$DBSnapshot": null + } + }, + "DBSnapshotAlreadyExistsFault": { + "base": "

    DBSnapshotIdentifier is already used by an existing snapshot.

    ", + "refs": { + } + }, + "DBSnapshotList": { + "base": null, + "refs": { + "DBSnapshotMessage$DBSnapshots": "

    A list of DBSnapshot instances.

    " + } + }, + "DBSnapshotMessage": { + "base": "

    Contains the result of a successful invocation of the DescribeDBSnapshots action.

    ", + "refs": { + } + }, + "DBSnapshotNotFoundFault": { + "base": "

    DBSnapshotIdentifier does not refer to an existing DB snapshot.

    ", + "refs": { + } + }, + "DBSubnetGroup": { + "base": "

    Contains the result of a successful invocation of the following actions:

    This data type is used as a response element in the DescribeDBSubnetGroups action.

    ", + "refs": { + "CreateDBSubnetGroupResult$DBSubnetGroup": null, + "DBInstance$DBSubnetGroup": "

    Provides the inforamtion of the subnet group associated with the DB instance, including the name, descrption and subnets in the subnet group.

    ", + "DBSubnetGroups$member": null, + "ModifyDBSubnetGroupResult$DBSubnetGroup": null + } + }, + "DBSubnetGroupAlreadyExistsFault": { + "base": "

    DBSubnetGroupName is already used by an existing DB subnet group.

    ", + "refs": { + } + }, + "DBSubnetGroupDoesNotCoverEnoughAZs": { + "base": "

    Subnets in the DB subnet group should cover at least two Availability Zones unless there is only one Availability Zone.

    ", + "refs": { + } + }, + "DBSubnetGroupMessage": { + "base": "

    Contains the result of a successful invocation of the DescribeDBSubnetGroups action.

    ", + "refs": { + } + }, + "DBSubnetGroupNotFoundFault": { + "base": "

    DBSubnetGroupName does not refer to an existing DB subnet group.

    ", + "refs": { + } + }, + "DBSubnetGroupQuotaExceededFault": { + "base": "

    Request would result in user exceeding the allowed number of DB subnet groups.

    ", + "refs": { + } + }, + "DBSubnetGroups": { + "base": null, + "refs": { + "DBSubnetGroupMessage$DBSubnetGroups": "

    A list of DBSubnetGroup instances.

    " + } + }, + "DBSubnetQuotaExceededFault": { + "base": "

    Request would result in user exceeding the allowed number of subnets in a DB subnet groups.

    ", + "refs": { + } + }, + "DBUpgradeDependencyFailureFault": { + "base": "

    The DB upgrade failed because a resource the DB depends on could not be modified.

    ", + "refs": { + } + }, + "DeleteDBInstanceMessage": { + "base": "

    ", + "refs": { + } + }, + "DeleteDBInstanceResult": { + "base": null, + "refs": { + } + }, + "DeleteDBParameterGroupMessage": { + "base": "

    ", + "refs": { + } + }, + "DeleteDBSecurityGroupMessage": { + "base": "

    ", + "refs": { + } + }, + "DeleteDBSnapshotMessage": { + "base": "

    ", + "refs": { + } + }, + "DeleteDBSnapshotResult": { + "base": null, + "refs": { + } + }, + "DeleteDBSubnetGroupMessage": { + "base": "

    ", + "refs": { + } + }, + "DeleteEventSubscriptionMessage": { + "base": "

    ", + "refs": { + } + }, + "DeleteEventSubscriptionResult": { + "base": null, + "refs": { + } + }, + "DeleteOptionGroupMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeDBEngineVersionsMessage": { + "base": null, + "refs": { + } + }, + "DescribeDBInstancesMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeDBParameterGroupsMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeDBParametersMessage": { + "base": null, + "refs": { + } + }, + "DescribeDBSecurityGroupsMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeDBSnapshotsMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeDBSubnetGroupsMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeEngineDefaultParametersMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeEngineDefaultParametersResult": { + "base": null, + "refs": { + } + }, + "DescribeEventCategoriesMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeEventSubscriptionsMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeEventsMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeOptionGroupOptionsMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeOptionGroupsMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeOrderableDBInstanceOptionsMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeReservedDBInstancesMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeReservedDBInstancesOfferingsMessage": { + "base": "

    ", + "refs": { + } + }, + "Double": { + "base": null, + "refs": { + "RecurringCharge$RecurringChargeAmount": "

    The amount of the recurring charge.

    ", + "ReservedDBInstance$FixedPrice": "

    The fixed price charged for this reserved DB Instance.

    ", + "ReservedDBInstance$UsagePrice": "

    The hourly price charged for this reserved DB Instance.

    ", + "ReservedDBInstancesOffering$FixedPrice": "

    The fixed price charged for this offering.

    ", + "ReservedDBInstancesOffering$UsagePrice": "

    The hourly price charged for this offering.

    " + } + }, + "EC2SecurityGroup": { + "base": "

    This data type is used as a response element in the following actions:

    ", + "refs": { + "EC2SecurityGroupList$member": null + } + }, + "EC2SecurityGroupList": { + "base": null, + "refs": { + "DBSecurityGroup$EC2SecurityGroups": "

    Contains a list of EC2SecurityGroup elements.

    " + } + }, + "Endpoint": { + "base": "

    This data type is used as a response element in the following actions:

    ", + "refs": { + "DBInstance$Endpoint": "

    Specifies the connection endpoint.

    " + } + }, + "EngineDefaults": { + "base": "

    Contains the result of a successful invocation of the DescribeEngineDefaultParameters action.

    ", + "refs": { + "DescribeEngineDefaultParametersResult$EngineDefaults": null + } + }, + "Event": { + "base": "

    This data type is used as a response element in the DescribeEvents action.

    ", + "refs": { + "EventList$member": null + } + }, + "EventCategoriesList": { + "base": null, + "refs": { + "CreateEventSubscriptionMessage$EventCategories": "

    A list of event categories for a SourceType that you want to subscribe to. You can see a list of the categories for a given SourceType in the Events topic in the Amazon RDS User Guide or by using the DescribeEventCategories action.

    ", + "DescribeEventsMessage$EventCategories": "

    A list of event categories that trigger notifications for a event notification subscription.

    ", + "Event$EventCategories": "

    Specifies the category for the event.

    ", + "EventCategoriesMap$EventCategories": "

    The event categories for the specified source type

    ", + "EventSubscription$EventCategoriesList": "

    A list of event categories for the RDS event notification subscription.

    ", + "ModifyEventSubscriptionMessage$EventCategories": "

    A list of event categories for a SourceType that you want to subscribe to. You can see a list of the categories for a given SourceType in the Events topic in the Amazon RDS User Guide or by using the DescribeEventCategories action.

    " + } + }, + "EventCategoriesMap": { + "base": "

    Contains the results of a successful invocation of the DescribeEventCategories action.

    ", + "refs": { + "EventCategoriesMapList$member": null + } + }, + "EventCategoriesMapList": { + "base": null, + "refs": { + "EventCategoriesMessage$EventCategoriesMapList": "

    A list of EventCategoriesMap data types.

    " + } + }, + "EventCategoriesMessage": { + "base": "

    Data returned from the DescribeEventCategories action.

    ", + "refs": { + } + }, + "EventList": { + "base": null, + "refs": { + "EventsMessage$Events": "

    A list of Event instances.

    " + } + }, + "EventSubscription": { + "base": "

    Contains the results of a successful invocation of the DescribeEventSubscriptions action.

    ", + "refs": { + "AddSourceIdentifierToSubscriptionResult$EventSubscription": null, + "CreateEventSubscriptionResult$EventSubscription": null, + "DeleteEventSubscriptionResult$EventSubscription": null, + "EventSubscriptionsList$member": null, + "ModifyEventSubscriptionResult$EventSubscription": null, + "RemoveSourceIdentifierFromSubscriptionResult$EventSubscription": null + } + }, + "EventSubscriptionQuotaExceededFault": { + "base": "

    You have reached the maximum number of event subscriptions.

    ", + "refs": { + } + }, + "EventSubscriptionsList": { + "base": null, + "refs": { + "EventSubscriptionsMessage$EventSubscriptionsList": "

    A list of EventSubscriptions data types.

    " + } + }, + "EventSubscriptionsMessage": { + "base": "

    Data returned by the DescribeEventSubscriptions action.

    ", + "refs": { + } + }, + "EventsMessage": { + "base": "

    Contains the result of a successful invocation of the DescribeEvents action.

    ", + "refs": { + } + }, + "IPRange": { + "base": "

    This data type is used as a response element in the DescribeDBSecurityGroups action.

    ", + "refs": { + "IPRangeList$member": null + } + }, + "IPRangeList": { + "base": null, + "refs": { + "DBSecurityGroup$IPRanges": "

    Contains a list of IPRange elements.

    " + } + }, + "InstanceQuotaExceededFault": { + "base": "

    Request would result in user exceeding the allowed number of DB instances.

    ", + "refs": { + } + }, + "InsufficientDBInstanceCapacityFault": { + "base": "

    Specified DB instance class is not available in the specified Availability Zone.

    ", + "refs": { + } + }, + "Integer": { + "base": null, + "refs": { + "DBInstance$AllocatedStorage": "

    Specifies the allocated storage size specified in gigabytes.

    ", + "DBInstance$BackupRetentionPeriod": "

    Specifies the number of days for which automatic DB Snapshots are retained.

    ", + "DBSnapshot$AllocatedStorage": "

    Specifies the allocated storage size in gigabytes (GB).

    ", + "DBSnapshot$Port": "

    Specifies the port that the database engine was listening on at the time of the snapshot.

    ", + "Endpoint$Port": "

    Specifies the port that the database engine is listening on.

    ", + "ReservedDBInstance$Duration": "

    The duration of the reservation in seconds.

    ", + "ReservedDBInstance$DBInstanceCount": "

    The number of reserved DB Instances.

    ", + "ReservedDBInstancesOffering$Duration": "

    The duration of the offering in seconds.

    " + } + }, + "IntegerOptional": { + "base": null, + "refs": { + "CreateDBInstanceMessage$AllocatedStorage": "

    The amount of storage (in gigabytes) to be initially allocated for the database instance.

    MySQL

    Constraints: Must be an integer from 5 to 1024.

    Type: Integer

    Oracle

    Constraints: Must be an integer from 10 to 1024.

    SQL Server

    Constraints: Must be an integer from 200 to 1024 (Standard Edition and Enterprise Edition) or from 30 to 1024 (Express Edition and Web Edition)

    ", + "CreateDBInstanceMessage$BackupRetentionPeriod": "

    The number of days for which automated backups are retained. Setting this parameter to a positive number enables backups. Setting this parameter to 0 disables automated backups.

    Default: 1

    Constraints:

    • Must be a value from 0 to 8
    • Cannot be set to 0 if the DB Instance is a master instance with read replicas
    ", + "CreateDBInstanceMessage$Port": "

    The port number on which the database accepts connections.

    MySQL

    Default: 3306

    Valid Values: 1150-65535

    Type: Integer

    Oracle

    Default: 1521

    Valid Values: 1150-65535

    SQL Server

    Default: 1433

    Valid Values: 1150-65535 except for 1434 and 3389.

    ", + "CreateDBInstanceMessage$Iops": "

    The amount of Provisioned IOPS (input/output operations per second) to be initially allocated for the DB Instance.

    Constraints: Must be an integer greater than 1000.

    ", + "CreateDBInstanceReadReplicaMessage$Port": "

    The port number that the DB Instance uses for connections.

    Default: Inherits from the source DB Instance

    Valid Values: 1150-65535

    ", + "CreateDBInstanceReadReplicaMessage$Iops": "

    The amount of Provisioned IOPS (input/output operations per second) to be initially allocated for the DB Instance.

    ", + "DBInstance$Iops": "

    Specifies the Provisioned IOPS (I/O operations per second) value.

    ", + "DBSnapshot$Iops": "

    Specifies the Provisioned IOPS (I/O operations per second) value of the DB Instance at the time of the snapshot.

    ", + "DescribeDBEngineVersionsMessage$MaxRecords": "

    The maximum number of records to include in the response. If more than the MaxRecords value is available, a pagination token called a marker is included in the response so that the following results can be retrieved.

    Default: 100

    Constraints: minimum 20, maximum 100

    ", + "DescribeDBInstancesMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results may be retrieved.

    Default: 100

    Constraints: minimum 20, maximum 100

    ", + "DescribeDBParameterGroupsMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results may be retrieved.

    Default: 100

    Constraints: minimum 20, maximum 100

    ", + "DescribeDBParametersMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results may be retrieved.

    Default: 100

    Constraints: minimum 20, maximum 100

    ", + "DescribeDBSecurityGroupsMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results may be retrieved.

    Default: 100

    Constraints: minimum 20, maximum 100

    ", + "DescribeDBSnapshotsMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results may be retrieved.

    Default: 100

    Constraints: minimum 20, maximum 100

    ", + "DescribeDBSubnetGroupsMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results may be retrieved.

    Default: 100

    Constraints: minimum 20, maximum 100

    ", + "DescribeEngineDefaultParametersMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results may be retrieved.

    Default: 100

    Constraints: minimum 20, maximum 100

    ", + "DescribeEventSubscriptionsMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved.

    Default: 100

    Constraints: minimum 20, maximum 100

    ", + "DescribeEventsMessage$Duration": "

    The number of minutes to retrieve events for.

    Default: 60

    ", + "DescribeEventsMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results may be retrieved.

    Default: 100

    Constraints: minimum 20, maximum 100

    ", + "DescribeOptionGroupOptionsMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved.

    Default: 100

    Constraints: minimum 20, maximum 100

    ", + "DescribeOptionGroupsMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved.

    Default: 100

    Constraints: minimum 20, maximum 100

    ", + "DescribeOrderableDBInstanceOptionsMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved.

    Default: 100

    Constraints: minimum 20, maximum 100

    ", + "DescribeReservedDBInstancesMessage$MaxRecords": "

    The maximum number of records to include in the response. If more than the MaxRecords value is available, a pagination token called a marker is included in the response so that the following results can be retrieved.

    Default: 100

    Constraints: minimum 20, maximum 100

    ", + "DescribeReservedDBInstancesOfferingsMessage$MaxRecords": "

    The maximum number of records to include in the response. If more than the MaxRecords value is available, a pagination token called a marker is included in the response so that the following results can be retrieved.

    Default: 100

    Constraints: minimum 20, maximum 100

    ", + "ModifyDBInstanceMessage$AllocatedStorage": "

    The new storage capacity of the RDS instance. Changing this parameter does not result in an outage and the change is applied during the next maintenance window unless the ApplyImmediately parameter is set to true for this request.

    MySQL

    Default: Uses existing setting

    Valid Values: 5-1024

    Constraints: Value supplied must be at least 10% greater than the current value. Values that are not at least 10% greater than the existing value are rounded up so that they are 10% greater than the current value.

    Type: Integer

    Oracle

    Default: Uses existing setting

    Valid Values: 10-1024

    Constraints: Value supplied must be at least 10% greater than the current value. Values that are not at least 10% greater than the existing value are rounded up so that they are 10% greater than the current value.

    SQL Server

    Cannot be modified.

    ", + "ModifyDBInstanceMessage$BackupRetentionPeriod": "

    The number of days to retain automated backups. Setting this parameter to a positive number enables backups. Setting this parameter to 0 disables automated backups.

    Changing this parameter can result in an outage if you change from 0 to a non-zero value or from a non-zero value to 0. These changes are applied during the next maintenance window unless the ApplyImmediately parameter is set to true for this request. If you change the parameter from one non-zero value to another non-zero value, the change is asynchronously applied as soon as possible.

    Default: Uses existing setting

    Constraints:

    • Must be a value from 0 to 8
    • Cannot be set to 0 if the DB Instance is a master instance with read replicas or if the DB Instance is a read replica
    ", + "ModifyDBInstanceMessage$Iops": "

    The new Provisioned IOPS (I/O operations per second) value for the RDS instance. Changing this parameter does not result in an outage and the change is applied during the next maintenance window unless the ApplyImmediately parameter is set to true for this request.

    Default: Uses existing setting

    Constraints: Value supplied must be at least 10% greater than the current value. Values that are not at least 10% greater than the existing value are rounded up so that they are 10% greater than the current value.

    Type: Integer

    ", + "Option$Port": "

    If required, the port configured for this option to use.

    ", + "OptionConfiguration$Port": "

    The optional port for the option.

    ", + "OptionGroupOption$DefaultPort": "

    If the option requires a port, specifies the default port for the option.

    ", + "PendingModifiedValues$AllocatedStorage": "

    Contains the new AllocatedStorage size for the DB Instance that will be applied or is in progress.

    ", + "PendingModifiedValues$Port": "

    Specifies the pending port for the DB Instance.

    ", + "PendingModifiedValues$BackupRetentionPeriod": "

    Specifies the pending number of days for which automated backups are retained.

    ", + "PendingModifiedValues$Iops": "

    Specifies the new Provisioned IOPS value for the DB Instance that will be applied or is being applied.

    ", + "PromoteReadReplicaMessage$BackupRetentionPeriod": "

    The number of days to retain automated backups. Setting this parameter to a positive number enables backups. Setting this parameter to 0 disables automated backups.

    Default: 1

    Constraints:

    • Must be a value from 0 to 8
    ", + "PurchaseReservedDBInstancesOfferingMessage$DBInstanceCount": "

    The number of instances to reserve.

    Default: 1

    ", + "RestoreDBInstanceFromDBSnapshotMessage$Port": "

    The port number on which the database accepts connections.

    Default: The same port as the original DB Instance

    Constraints: Value must be 1150-65535

    ", + "RestoreDBInstanceFromDBSnapshotMessage$Iops": "

    The amount of Provisioned IOPS (input/output operations per second) to be initially allocated for the DB Instance.

    Constraints: Must be an integer greater than 1000.

    ", + "RestoreDBInstanceToPointInTimeMessage$Port": "

    The port number on which the database accepts connections.

    Constraints: Value must be 1150-65535

    Default: The same port as the original DB Instance.

    ", + "RestoreDBInstanceToPointInTimeMessage$Iops": "

    The amount of Provisioned IOPS (input/output operations per second) to be initially allocated for the DB Instance.

    Constraints: Must be an integer greater than 1000.

    " + } + }, + "InvalidDBInstanceStateFault": { + "base": "

    The specified DB instance is not in the available state.

    ", + "refs": { + } + }, + "InvalidDBParameterGroupStateFault": { + "base": "

    The DB parameter group cannot be deleted because it is in use.

    ", + "refs": { + } + }, + "InvalidDBSecurityGroupStateFault": { + "base": "

    The state of the DB security group does not allow deletion.

    ", + "refs": { + } + }, + "InvalidDBSnapshotStateFault": { + "base": "

    The state of the DB snapshot does not allow deletion.

    ", + "refs": { + } + }, + "InvalidDBSubnetGroupStateFault": { + "base": "

    The DB subnet group cannot be deleted because it is in use.

    ", + "refs": { + } + }, + "InvalidDBSubnetStateFault": { + "base": "

    The DB subnet is not in the available state.

    ", + "refs": { + } + }, + "InvalidEventSubscriptionStateFault": { + "base": "

    This error can occur if someone else is modifying a subscription. You should retry the action.

    ", + "refs": { + } + }, + "InvalidOptionGroupStateFault": { + "base": "

    The option group is not in the available state.

    ", + "refs": { + } + }, + "InvalidRestoreFault": { + "base": "

    Cannot restore from vpc backup to non-vpc DB instance.

    ", + "refs": { + } + }, + "InvalidSubnet": { + "base": "

    The requested subnet is invalid, or multiple subnets were requested that are not all in a common VPC.

    ", + "refs": { + } + }, + "InvalidVPCNetworkStateFault": { + "base": "

    DB subnet group does not cover all Availability Zones after it is created because users' change.

    ", + "refs": { + } + }, + "KeyList": { + "base": null, + "refs": { + "RemoveTagsFromResourceMessage$TagKeys": "

    The tag key (name) of the tag to be removed.

    " + } + }, + "ListTagsForResourceMessage": { + "base": "

    ", + "refs": { + } + }, + "ModifyDBInstanceMessage": { + "base": "

    ", + "refs": { + } + }, + "ModifyDBInstanceResult": { + "base": null, + "refs": { + } + }, + "ModifyDBParameterGroupMessage": { + "base": "

    ", + "refs": { + } + }, + "ModifyDBSubnetGroupMessage": { + "base": "

    ", + "refs": { + } + }, + "ModifyDBSubnetGroupResult": { + "base": null, + "refs": { + } + }, + "ModifyEventSubscriptionMessage": { + "base": "

    ", + "refs": { + } + }, + "ModifyEventSubscriptionResult": { + "base": null, + "refs": { + } + }, + "ModifyOptionGroupMessage": { + "base": "

    ", + "refs": { + } + }, + "ModifyOptionGroupResult": { + "base": null, + "refs": { + } + }, + "Option": { + "base": "

    Option details.

    ", + "refs": { + "OptionsList$member": null + } + }, + "OptionConfiguration": { + "base": "

    A list of all available options

    ", + "refs": { + "OptionConfigurationList$member": null + } + }, + "OptionConfigurationList": { + "base": null, + "refs": { + "ModifyOptionGroupMessage$OptionsToInclude": "

    Options in this list are added to the Option Group or, if already present, the specified configuration is used to update the existing configuration.

    " + } + }, + "OptionGroup": { + "base": "

    ", + "refs": { + "CreateOptionGroupResult$OptionGroup": null, + "ModifyOptionGroupResult$OptionGroup": null, + "OptionGroupsList$member": null + } + }, + "OptionGroupAlreadyExistsFault": { + "base": "

    The option group you are trying to create already exists.

    ", + "refs": { + } + }, + "OptionGroupMembership": { + "base": null, + "refs": { + "DBInstance$OptionGroupMembership": "

    Specifies the name and status of the option group that this instance belongs to.

    " + } + }, + "OptionGroupNotFoundFault": { + "base": "

    The specified option group could not be found.

    ", + "refs": { + } + }, + "OptionGroupOption": { + "base": "

    Available option.

    ", + "refs": { + "OptionGroupOptionsList$member": null + } + }, + "OptionGroupOptionsList": { + "base": "

    List of available options.

    ", + "refs": { + "OptionGroupOptionsMessage$OptionGroupOptions": null + } + }, + "OptionGroupOptionsMessage": { + "base": "

    ", + "refs": { + } + }, + "OptionGroupQuotaExceededFault": { + "base": "

    The quota of 20 option groups was exceeded for this AWS account.

    ", + "refs": { + } + }, + "OptionGroups": { + "base": "

    List of option groups.

    ", + "refs": { + } + }, + "OptionGroupsList": { + "base": null, + "refs": { + "OptionGroups$OptionGroupsList": "

    List of option groups.

    " + } + }, + "OptionNamesList": { + "base": null, + "refs": { + "ModifyOptionGroupMessage$OptionsToRemove": "

    Options in this list are removed from the Option Group.

    " + } + }, + "OptionsDependedOn": { + "base": null, + "refs": { + "OptionGroupOption$OptionsDependedOn": "

    List of all options that are prerequisites for this option.

    " + } + }, + "OptionsList": { + "base": null, + "refs": { + "OptionGroup$Options": "

    Indicates what options are available in the option group.

    " + } + }, + "OrderableDBInstanceOption": { + "base": "

    Contains a list of available options for a DB Instance

    This data type is used as a response element in the DescribeOrderableDBInstanceOptions action.

    ", + "refs": { + "OrderableDBInstanceOptionsList$member": null + } + }, + "OrderableDBInstanceOptionsList": { + "base": null, + "refs": { + "OrderableDBInstanceOptionsMessage$OrderableDBInstanceOptions": "

    An OrderableDBInstanceOption structure containing information about orderable options for the DB Instance.

    " + } + }, + "OrderableDBInstanceOptionsMessage": { + "base": "

    Contains the result of a successful invocation of the DescribeOrderableDBInstanceOptions action.

    ", + "refs": { + } + }, + "Parameter": { + "base": "

    This data type is used as a request parameter in the ModifyDBParameterGroup and ResetDBParameterGroup actions.

    This data type is used as a response element in the DescribeEngineDefaultParameters and DescribeDBParameters actions.

    ", + "refs": { + "ParametersList$member": null + } + }, + "ParametersList": { + "base": null, + "refs": { + "DBParameterGroupDetails$Parameters": "

    A list of Parameter instances.

    ", + "EngineDefaults$Parameters": "

    Contains a list of engine default parameters.

    ", + "ModifyDBParameterGroupMessage$Parameters": "

    An array of parameter names, values, and the apply method for the parameter update. At least one parameter name, value, and apply method must be supplied; subsequent arguments are optional. A maximum of 20 parameters may be modified in a single request.

    Valid Values (for the application method): immediate | pending-reboot

    You can use the immediate value with dynamic parameters only. You can use the pending-reboot value for both dynamic and static parameters, and changes are applied when DB Instance reboots. ", + "ResetDBParameterGroupMessage$Parameters": "

    An array of parameter names, values, and the apply method for the parameter update. At least one parameter name, value, and apply method must be supplied; subsequent arguments are optional. A maximum of 20 parameters may be modified in a single request.

    MySQL

    Valid Values (for Apply method): immediate | pending-reboot

    You can use the immediate value with dynamic parameters only. You can use the pending-reboot value for both dynamic and static parameters, and changes are applied when DB Instance reboots.

    Oracle

    Valid Values (for Apply method): pending-reboot

    " + } + }, + "PendingModifiedValues": { + "base": "

    This data type is used as a response element in the ModifyDBInstance action.

    ", + "refs": { + "DBInstance$PendingModifiedValues": "

    Specifies that changes to the DB Instance are pending. This element is only included when changes are pending. Specific changes are identified by subelements.

    " + } + }, + "PointInTimeRestoreNotEnabledFault": { + "base": "

    SourceDBInstanceIdentifier refers to a DB instance with BackupRetentionPeriod equal to 0.

    ", + "refs": { + } + }, + "PromoteReadReplicaMessage": { + "base": "

    ", + "refs": { + } + }, + "PromoteReadReplicaResult": { + "base": null, + "refs": { + } + }, + "ProvisionedIopsNotAvailableInAZFault": { + "base": "

    Provisioned IOPS not available in the specified Availability Zone.

    ", + "refs": { + } + }, + "PurchaseReservedDBInstancesOfferingMessage": { + "base": "

    ", + "refs": { + } + }, + "PurchaseReservedDBInstancesOfferingResult": { + "base": null, + "refs": { + } + }, + "ReadReplicaDBInstanceIdentifierList": { + "base": null, + "refs": { + "DBInstance$ReadReplicaDBInstanceIdentifiers": "

    Contains one or more identifiers of the Read Replicas associated with this DB Instance.

    " + } + }, + "RebootDBInstanceMessage": { + "base": "

    ", + "refs": { + } + }, + "RebootDBInstanceResult": { + "base": null, + "refs": { + } + }, + "RecurringCharge": { + "base": "

    This data type is used as a response element in the DescribeReservedDBInstances and DescribeReservedDBInstancesOfferings actions.

    ", + "refs": { + "RecurringChargeList$member": null + } + }, + "RecurringChargeList": { + "base": null, + "refs": { + "ReservedDBInstance$RecurringCharges": "

    The recurring price charged to run this reserved DB Instance.

    ", + "ReservedDBInstancesOffering$RecurringCharges": "

    The recurring price charged to run this reserved DB Instance.

    " + } + }, + "RemoveSourceIdentifierFromSubscriptionMessage": { + "base": "

    ", + "refs": { + } + }, + "RemoveSourceIdentifierFromSubscriptionResult": { + "base": null, + "refs": { + } + }, + "RemoveTagsFromResourceMessage": { + "base": "

    ", + "refs": { + } + }, + "ReservedDBInstance": { + "base": "

    This data type is used as a response element in the DescribeReservedDBInstances and PurchaseReservedDBInstancesOffering actions.

    ", + "refs": { + "PurchaseReservedDBInstancesOfferingResult$ReservedDBInstance": null, + "ReservedDBInstanceList$member": null + } + }, + "ReservedDBInstanceAlreadyExistsFault": { + "base": "

    User already has a reservation with the given identifier.

    ", + "refs": { + } + }, + "ReservedDBInstanceList": { + "base": null, + "refs": { + "ReservedDBInstanceMessage$ReservedDBInstances": "

    A list of of reserved DB Instances.

    " + } + }, + "ReservedDBInstanceMessage": { + "base": "

    Contains the result of a successful invocation of the DescribeReservedDBInstances action.

    ", + "refs": { + } + }, + "ReservedDBInstanceNotFoundFault": { + "base": "

    The specified reserved DB Instance not found.

    ", + "refs": { + } + }, + "ReservedDBInstanceQuotaExceededFault": { + "base": "

    Request would exceed the user's DB Instance quota.

    ", + "refs": { + } + }, + "ReservedDBInstancesOffering": { + "base": "

    This data type is used as a response element in the DescribeReservedDBInstancesOfferings action.

    ", + "refs": { + "ReservedDBInstancesOfferingList$member": null + } + }, + "ReservedDBInstancesOfferingList": { + "base": null, + "refs": { + "ReservedDBInstancesOfferingMessage$ReservedDBInstancesOfferings": "

    A list of reserved DB Instance offerings.

    " + } + }, + "ReservedDBInstancesOfferingMessage": { + "base": "

    Contains the result of a successful invocation of the DescribeReservedDBInstancesOfferings action.

    ", + "refs": { + } + }, + "ReservedDBInstancesOfferingNotFoundFault": { + "base": "

    Specified offering does not exist.

    ", + "refs": { + } + }, + "ResetDBParameterGroupMessage": { + "base": "

    ", + "refs": { + } + }, + "RestoreDBInstanceFromDBSnapshotMessage": { + "base": "

    ", + "refs": { + } + }, + "RestoreDBInstanceFromDBSnapshotResult": { + "base": null, + "refs": { + } + }, + "RestoreDBInstanceToPointInTimeMessage": { + "base": "

    ", + "refs": { + } + }, + "RestoreDBInstanceToPointInTimeResult": { + "base": null, + "refs": { + } + }, + "RevokeDBSecurityGroupIngressMessage": { + "base": "

    ", + "refs": { + } + }, + "RevokeDBSecurityGroupIngressResult": { + "base": null, + "refs": { + } + }, + "SNSInvalidTopicFault": { + "base": "

    SNS has responded that there is a problem with the SND topic specified.

    ", + "refs": { + } + }, + "SNSNoAuthorizationFault": { + "base": "

    You do not have permission to publish to the SNS topic ARN.

    ", + "refs": { + } + }, + "SNSTopicArnNotFoundFault": { + "base": "

    The SNS topic ARN does not exist.

    ", + "refs": { + } + }, + "SnapshotQuotaExceededFault": { + "base": "

    Request would result in user exceeding the allowed number of DB snapshots.

    ", + "refs": { + } + }, + "SourceIdsList": { + "base": null, + "refs": { + "CreateEventSubscriptionMessage$SourceIds": "

    The list of identifiers of the event sources for which events will be returned. If not specified, then all sources are included in the response. An identifier must begin with a letter and must contain only ASCII letters, digits, and hyphens; it cannot end with a hyphen or contain two consecutive hyphens.

    Constraints:

    • If SourceIds are supplied, SourceType must also be provided.
    • If the source type is a DB instance, then a DBInstanceIdentifier must be supplied.
    • If the source type is a DB security group, a DBSecurityGroupName must be supplied.
    • If the source type is a DB parameter group, a DBParameterGroupName must be supplied.
    • If the source type is a DB Snapshot, a DBSnapshotIdentifier must be supplied.
    ", + "EventSubscription$SourceIdsList": "

    A list of source Ids for the RDS event notification subscription.

    " + } + }, + "SourceNotFoundFault": { + "base": "

    The requested source could not be found.

    ", + "refs": { + } + }, + "SourceType": { + "base": null, + "refs": { + "DescribeEventsMessage$SourceType": "

    The event source to retrieve events for. If no value is specified, all events are returned.

    ", + "Event$SourceType": "

    Specifies the source type for this event.

    " + } + }, + "StorageQuotaExceededFault": { + "base": "

    Request would result in user exceeding the allowed amount of storage available across all DB instances.

    ", + "refs": { + } + }, + "String": { + "base": null, + "refs": { + "AddSourceIdentifierToSubscriptionMessage$SubscriptionName": "

    The name of the RDS event notification subscription you want to add a source identifier to.

    ", + "AddSourceIdentifierToSubscriptionMessage$SourceIdentifier": "

    The identifier of the event source to be added. An identifier must begin with a letter and must contain only ASCII letters, digits, and hyphens; it cannot end with a hyphen or contain two consecutive hyphens.

    Constraints:

    • If the source type is a DB instance, then a DBInstanceIdentifier must be supplied.
    • If the source type is a DB security group, a DBSecurityGroupName must be supplied.
    • If the source type is a DB parameter group, a DBParameterGroupName must be supplied.
    • If the source type is a DB Snapshot, a DBSnapshotIdentifier must be supplied.
    ", + "AddTagsToResourceMessage$ResourceName": "

    The DB Instance the tags will be added to.

    ", + "AuthorizeDBSecurityGroupIngressMessage$DBSecurityGroupName": "

    The name of the DB Security Group to add authorization to.

    ", + "AuthorizeDBSecurityGroupIngressMessage$CIDRIP": "

    The IP range to authorize.

    ", + "AuthorizeDBSecurityGroupIngressMessage$EC2SecurityGroupName": "

    Name of the EC2 Security Group to authorize. For VPC DB Security Groups, EC2SecurityGroupId must be provided. Otherwise, EC2SecurityGroupOwnerId and either EC2SecurityGroupName or EC2SecurityGroupId must be provided.

    ", + "AuthorizeDBSecurityGroupIngressMessage$EC2SecurityGroupId": "

    Id of the EC2 Security Group to authorize. For VPC DB Security Groups, EC2SecurityGroupId must be provided. Otherwise, EC2SecurityGroupOwnerId and either EC2SecurityGroupName or EC2SecurityGroupId must be provided.

    ", + "AuthorizeDBSecurityGroupIngressMessage$EC2SecurityGroupOwnerId": "

    AWS Account Number of the owner of the EC2 Security Group specified in the EC2SecurityGroupName parameter. The AWS Access Key ID is not an acceptable value. For VPC DB Security Groups, EC2SecurityGroupId must be provided. Otherwise, EC2SecurityGroupOwnerId and either EC2SecurityGroupName or EC2SecurityGroupId must be provided.

    ", + "AvailabilityZone$Name": "

    The name of the availability zone.

    ", + "CharacterSet$CharacterSetName": "

    The name of the character set.

    ", + "CharacterSet$CharacterSetDescription": "

    The description of the character set.

    ", + "CopyDBSnapshotMessage$SourceDBSnapshotIdentifier": "

    The identifier for the source DB snapshot.

    Constraints:

    • Must be the identifier for a valid system snapshot in the \"available\" state.

    Example: rds:mydb-2012-04-02-00-01

    ", + "CopyDBSnapshotMessage$TargetDBSnapshotIdentifier": "

    The identifier for the copied snapshot.

    Constraints:

    • Cannot be null, empty, or blank
    • Must contain from 1 to 255 alphanumeric characters or hyphens
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens

    Example: my-db-snapshot

    ", + "CreateDBInstanceMessage$DBName": "

    The meaning of this parameter differs according to the database engine you use.

    MySQL

    The name of the database to create when the DB Instance is created. If this parameter is not specified, no database is created in the DB Instance.

    Constraints:

    • Must contain 1 to 64 alphanumeric characters
    • Cannot be a word reserved by the specified database engine

    Type: String

    Oracle

    The Oracle System ID (SID) of the created DB Instance.

    Default: ORCL

    Constraints:

    • Cannot be longer than 8 characters

    SQL Server

    Not applicable. Must be null.

    ", + "CreateDBInstanceMessage$DBInstanceIdentifier": "

    The DB Instance identifier. This parameter is stored as a lowercase string.

    Constraints:

    • Must contain from 1 to 63 alphanumeric characters or hyphens (1 to 15 for SQL Server).
    • First character must be a letter.
    • Cannot end with a hyphen or contain two consecutive hyphens.

    Example: mydbinstance

    ", + "CreateDBInstanceMessage$DBInstanceClass": "

    The compute and memory capacity of the DB Instance.

    Valid Values: db.t1.micro | db.m1.small | db.m1.medium | db.m1.large | db.m1.xlarge | db.m2.xlarge |db.m2.2xlarge | db.m2.4xlarge

    ", + "CreateDBInstanceMessage$Engine": "

    The name of the database engine to be used for this instance.

    Valid Values: MySQL | oracle-se1 | oracle-se | oracle-ee | sqlserver-ee | sqlserver-se | sqlserver-ex | sqlserver-web

    ", + "CreateDBInstanceMessage$MasterUsername": "

    The name of master user for the client DB Instance.

    MySQL

    Constraints:

    • Must be 1 to 16 alphanumeric characters.
    • First character must be a letter.
    • Cannot be a reserved word for the chosen database engine.

    Type: String

    Oracle

    Constraints:

    • Must be 1 to 30 alphanumeric characters.
    • First character must be a letter.
    • Cannot be a reserved word for the chosen database engine.

    SQL Server

    Constraints:

    • Must be 1 to 128 alphanumeric characters.
    • First character must be a letter.
    • Cannot be a reserved word for the chosen database engine.
    ", + "CreateDBInstanceMessage$MasterUserPassword": "

    The password for the master database user. Can be any printable ASCII character except \"/\", \"\\\", or \"@\".

    Type: String

    MySQL

    Constraints: Must contain from 8 to 41 alphanumeric characters.

    Oracle

    Constraints: Must contain from 8 to 30 alphanumeric characters.

    SQL Server

    Constraints: Must contain from 8 to 128 alphanumeric characters.

    ", + "CreateDBInstanceMessage$AvailabilityZone": "

    The EC2 Availability Zone that the database instance will be created in.

    Default: A random, system-chosen Availability Zone in the endpoint's region.

    Example: us-east-1d

    Constraint: The AvailabilityZone parameter cannot be specified if the MultiAZ parameter is set to true. The specified Availability Zone must be in the same region as the current endpoint.

    ", + "CreateDBInstanceMessage$DBSubnetGroupName": "

    A DB Subnet Group to associate with this DB Instance.

    If there is no DB Subnet Group, then it is a non-VPC DB instance.

    ", + "CreateDBInstanceMessage$PreferredMaintenanceWindow": "

    The weekly time range (in UTC) during which system maintenance can occur.

    Format: ddd:hh24:mi-ddd:hh24:mi

    Default: A 30-minute window selected at random from an 8-hour block of time per region, occurring on a random day of the week. The following list shows the time blocks for each region from which the default maintenance windows are assigned.

    • US-East (Northern Virginia) Region: 03:00-11:00 UTC
    • US-West (Northern California) Region: 06:00-14:00 UTC
    • EU (Ireland) Region: 22:00-06:00 UTC
    • Asia Pacific (Singapore) Region: 14:00-22:00 UTC
    • Asia Pacific (Tokyo) Region: 17:00-03:00 UTC

    Valid Days: Mon, Tue, Wed, Thu, Fri, Sat, Sun

    Constraints: Minimum 30-minute window.

    ", + "CreateDBInstanceMessage$DBParameterGroupName": "

    The name of the DB Parameter Group to associate with this DB instance. If this argument is omitted, the default DBParameterGroup for the specified engine will be used.

    Constraints:

    • Must be 1 to 255 alphanumeric characters
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "CreateDBInstanceMessage$PreferredBackupWindow": "

    The daily time range during which automated backups are created if automated backups are enabled, using the BackupRetentionPeriod parameter.

    Default: A 30-minute window selected at random from an 8-hour block of time per region. The following list shows the time blocks for each region from which the default backup windows are assigned.

    • US-East (Northern Virginia) Region: 03:00-11:00 UTC
    • US-West (Northern California) Region: 06:00-14:00 UTC
    • EU (Ireland) Region: 22:00-06:00 UTC
    • Asia Pacific (Singapore) Region: 14:00-22:00 UTC
    • Asia Pacific (Tokyo) Region: 17:00-03:00 UTC

    Constraints: Must be in the format hh24:mi-hh24:mi. Times should be Universal Time Coordinated (UTC). Must not conflict with the preferred maintenance window. Must be at least 30 minutes.

    ", + "CreateDBInstanceMessage$EngineVersion": "

    The version number of the database engine to use.

    MySQL

    Example: 5.1.42

    Type: String

    Oracle

    Example: 11.2.0.2.v2

    Type: String

    SQL Server

    Example: 10.50.2789.0.v1

    ", + "CreateDBInstanceMessage$LicenseModel": "

    License model information for this DB Instance.

    Valid values: license-included | bring-your-own-license | general-public-license

    ", + "CreateDBInstanceMessage$OptionGroupName": "

    Indicates that the DB Instance should be associated with the specified option group.

    ", + "CreateDBInstanceMessage$CharacterSetName": "

    For supported engines, indicates that the DB Instance should be associated with the specified CharacterSet.

    ", + "CreateDBInstanceReadReplicaMessage$DBInstanceIdentifier": "

    The DB Instance identifier of the Read Replica. This is the unique key that identifies a DB Instance. This parameter is stored as a lowercase string.

    ", + "CreateDBInstanceReadReplicaMessage$SourceDBInstanceIdentifier": "

    The identifier of the DB Instance that will act as the source for the Read Replica. Each DB Instance can have up to five Read Replicas.

    Constraints: Must be the identifier of an existing DB Instance that is not already a Read Replica DB Instance.

    ", + "CreateDBInstanceReadReplicaMessage$DBInstanceClass": "

    The compute and memory capacity of the Read Replica.

    Valid Values: db.m1.small | db.m1.medium | db.m1.large | db.m1.xlarge | db.m2.xlarge |db.m2.2xlarge | db.m2.4xlarge

    Default: Inherits from the source DB Instance.

    ", + "CreateDBInstanceReadReplicaMessage$AvailabilityZone": "

    The Amazon EC2 Availability Zone that the Read Replica will be created in.

    Default: A random, system-chosen Availability Zone in the endpoint's region.

    Example: us-east-1d

    ", + "CreateDBInstanceReadReplicaMessage$OptionGroupName": "

    The option group the DB instance will be associated with. If omitted, the default Option Group for the engine specified will be used.

    ", + "CreateDBParameterGroupMessage$DBParameterGroupName": "

    The name of the DB Parameter Group.

    Constraints:

    • Must be 1 to 255 alphanumeric characters
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    This value is stored as a lower-case string.", + "CreateDBParameterGroupMessage$DBParameterGroupFamily": "

    The DB Parameter Group Family name. A DB Parameter Group can be associated with one and only one DB Parameter Group Family, and can be applied only to a DB Instance running a database engine and engine version compatible with that DB Parameter Group Family.

    ", + "CreateDBParameterGroupMessage$Description": "

    The description for the DB Parameter Group.

    ", + "CreateDBSecurityGroupMessage$DBSecurityGroupName": "

    The name for the DB Security Group. This value is stored as a lowercase string.

    Constraints: Must contain no more than 255 alphanumeric characters or hyphens. Must not be \"Default\".

    Example: mysecuritygroup

    ", + "CreateDBSecurityGroupMessage$DBSecurityGroupDescription": "

    The description for the DB Security Group.

    ", + "CreateDBSnapshotMessage$DBSnapshotIdentifier": "

    The identifier for the DB Snapshot.

    Constraints:

    • Cannot be null, empty, or blank
    • Must contain from 1 to 255 alphanumeric characters or hyphens
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens

    Example: my-snapshot-id

    ", + "CreateDBSnapshotMessage$DBInstanceIdentifier": "

    The DB Instance identifier. This is the unique key that identifies a DB Instance. This parameter isn't case sensitive.

    Constraints:

    • Must contain from 1 to 63 alphanumeric characters or hyphens
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "CreateDBSubnetGroupMessage$DBSubnetGroupName": "

    The name for the DB Subnet Group. This value is stored as a lowercase string.

    Constraints: Must contain no more than 255 alphanumeric characters or hyphens. Must not be \"Default\".

    Example: mySubnetgroup

    ", + "CreateDBSubnetGroupMessage$DBSubnetGroupDescription": "

    The description for the DB Subnet Group.

    ", + "CreateEventSubscriptionMessage$SubscriptionName": "

    The name of the subscription.

    Constraints: The name must be less than 255 characters.

    ", + "CreateEventSubscriptionMessage$SnsTopicArn": "

    The Amazon Resource Name (ARN) of the SNS topic created for event notification. The ARN is created by Amazon SNS when you create a topic and subscribe to it.

    ", + "CreateEventSubscriptionMessage$SourceType": "

    The type of source that will be generating the events. For example, if you want to be notified of events generated by a DB instance, you would set this parameter to db-instance. if this value is not specified, all events are returned.

    Valid values: db-instance | db-parameter-group | db-security-group | db-snapshot

    ", + "CreateOptionGroupMessage$OptionGroupName": "

    Specifies the name of the option group to be created.

    Constraints:

    • Must be 1 to 255 alphanumeric characters or hyphens
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens

    Example: myOptiongroup

    ", + "CreateOptionGroupMessage$EngineName": "

    Specifies the name of the engine that this option group should be associated with.

    ", + "CreateOptionGroupMessage$MajorEngineVersion": "

    Specifies the major version of the engine that this option group should be associated with.

    ", + "CreateOptionGroupMessage$OptionGroupDescription": "

    The description of the option group.

    ", + "DBEngineVersion$Engine": "

    The name of the database engine.

    ", + "DBEngineVersion$EngineVersion": "

    The version number of the database engine.

    ", + "DBEngineVersion$DBParameterGroupFamily": "

    The name of the DBParameterGroupFamily for the database engine.

    ", + "DBEngineVersion$DBEngineDescription": "

    The description of the database engine.

    ", + "DBEngineVersion$DBEngineVersionDescription": "

    The description of the database engine version.

    ", + "DBEngineVersionMessage$Marker": "

    An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DBInstance$DBInstanceIdentifier": "

    Contains a user-supplied database identifier. This is the unique key that identifies a DB Instance.

    ", + "DBInstance$DBInstanceClass": "

    Contains the name of the compute and memory capacity class of the DB Instance.

    ", + "DBInstance$Engine": "

    Provides the name of the database engine to be used for this DB Instance.

    ", + "DBInstance$DBInstanceStatus": "

    Specifies the current state of this database.

    ", + "DBInstance$MasterUsername": "

    Contains the master username for the DB Instance.

    ", + "DBInstance$DBName": "

    The meaning of this parameter differs according to the database engine you use.

    MySQL

    Contains the name of the initial database of this instance that was provided at create time, if one was specified when the DB Instance was created. This same name is returned for the life of the DB Instance.

    Type: String

    Oracle

    Contains the Oracle System ID (SID) of the created DB Instance.

    ", + "DBInstance$PreferredBackupWindow": "

    Specifies the daily time range during which automated backups are created if automated backups are enabled, as determined by the BackupRetentionPeriod.

    ", + "DBInstance$AvailabilityZone": "

    Specifies the name of the Availability Zone the DB Instance is located in.

    ", + "DBInstance$PreferredMaintenanceWindow": "

    Specifies the weekly time range (in UTC) during which system maintenance can occur.

    ", + "DBInstance$EngineVersion": "

    Indicates the database engine version.

    ", + "DBInstance$ReadReplicaSourceDBInstanceIdentifier": "

    Contains the identifier of the source DB Instance if this DB Instance is a Read Replica.

    ", + "DBInstance$LicenseModel": "

    License model information for this DB Instance.

    ", + "DBInstance$CharacterSetName": "

    If present, specifies the name of the character set that this instance is associated with.

    ", + "DBInstance$SecondaryAvailabilityZone": "

    If present, specifies the name of the secondary Availability Zone for a DB instance with multi-AZ support.

    ", + "DBInstanceMessage$Marker": "

    An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords .

    ", + "DBParameterGroup$DBParameterGroupName": "

    Provides the name of the DB Parameter Group.

    ", + "DBParameterGroup$DBParameterGroupFamily": "

    Provides the name of the DB Parameter Group Family that this DB Parameter Group is compatible with.

    ", + "DBParameterGroup$Description": "

    Provides the customer-specified description for this DB Parameter Group.

    ", + "DBParameterGroupDetails$Marker": "

    An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DBParameterGroupNameMessage$DBParameterGroupName": "

    The name of the DB Parameter Group.

    ", + "DBParameterGroupStatus$DBParameterGroupName": "

    The name of the DP Parameter Group.

    ", + "DBParameterGroupStatus$ParameterApplyStatus": "

    The status of parameter updates.

    ", + "DBParameterGroupsMessage$Marker": "

    An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DBSecurityGroup$OwnerId": "

    Provides the AWS ID of the owner of a specific DB Security Group.

    ", + "DBSecurityGroup$DBSecurityGroupName": "

    Specifies the name of the DB Security Group.

    ", + "DBSecurityGroup$DBSecurityGroupDescription": "

    Provides the description of the DB Security Group.

    ", + "DBSecurityGroup$VpcId": "

    Provides the VpcId of the DB Security Group.

    ", + "DBSecurityGroupMembership$DBSecurityGroupName": "

    The name of the DB Security Group.

    ", + "DBSecurityGroupMembership$Status": "

    The status of the DB Security Group.

    ", + "DBSecurityGroupMessage$Marker": "

    An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DBSecurityGroupNameList$member": null, + "DBSnapshot$DBSnapshotIdentifier": "

    Specifies the identifier for the DB Snapshot.

    ", + "DBSnapshot$DBInstanceIdentifier": "

    Specifies the the DBInstanceIdentifier of the DB Instance this DB Snapshot was created from.

    ", + "DBSnapshot$Engine": "

    Specifies the name of the database engine.

    ", + "DBSnapshot$Status": "

    Specifies the status of this DB Snapshot.

    ", + "DBSnapshot$AvailabilityZone": "

    Specifies the name of the Availability Zone the DB Instance was located in at the time of the DB Snapshot.

    ", + "DBSnapshot$VpcId": "

    Provides the Vpc Id associated with the DB Snapshot.

    ", + "DBSnapshot$MasterUsername": "

    Provides the master username for the DB Instance.

    ", + "DBSnapshot$EngineVersion": "

    Specifies the version of the database engine.

    ", + "DBSnapshot$LicenseModel": "

    License model information for the restored DB Instance.

    ", + "DBSnapshot$SnapshotType": "

    Provides the type of the DB Snapshot.

    ", + "DBSnapshotMessage$Marker": "

    An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DBSubnetGroup$DBSubnetGroupName": "

    Specifies the name of the DB Subnet Group.

    ", + "DBSubnetGroup$DBSubnetGroupDescription": "

    Provides the description of the DB Subnet Group.

    ", + "DBSubnetGroup$VpcId": "

    Provides the VpcId of the DB Subnet Group.

    ", + "DBSubnetGroup$SubnetGroupStatus": "

    Provides the status of the DB Subnet Group.

    ", + "DBSubnetGroupMessage$Marker": "

    An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DeleteDBInstanceMessage$DBInstanceIdentifier": "

    The DB Instance identifier for the DB Instance to be deleted. This parameter isn't case sensitive.

    Constraints:

    • Must contain from 1 to 63 alphanumeric characters or hyphens
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "DeleteDBInstanceMessage$FinalDBSnapshotIdentifier": "

    The DBSnapshotIdentifier of the new DBSnapshot created when SkipFinalSnapshot is set to false.

    Specifying this parameter and also setting the SkipFinalShapshot parameter to true results in an error.

    Constraints:

    • Must be 1 to 255 alphanumeric characters
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "DeleteDBParameterGroupMessage$DBParameterGroupName": "

    The name of the DB Parameter Group.

    Constraints:

    • Must be the name of an existing DB Parameter Group
    • You cannot delete a default DB Parameter Group
    • Cannot be associated with any DB Instances
    ", + "DeleteDBSecurityGroupMessage$DBSecurityGroupName": "

    The name of the DB Security Group to delete.

    You cannot delete the default DB Security Group.

    Constraints:

    • Must be 1 to 255 alphanumeric characters
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "DeleteDBSnapshotMessage$DBSnapshotIdentifier": "

    The DBSnapshot identifier.

    Constraints: Must be the name of an existing DB Snapshot in the available state.

    ", + "DeleteDBSubnetGroupMessage$DBSubnetGroupName": "

    The name of the database subnet group to delete.

    You cannot delete the default subnet group.

    Constraints:

    • Must be 1 to 255 alphanumeric characters
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "DeleteEventSubscriptionMessage$SubscriptionName": "

    The name of the RDS event notification subscription you want to delete.

    ", + "DeleteOptionGroupMessage$OptionGroupName": "

    The name of the option group to be deleted.

    You cannot delete default Option Groups.", + "DescribeDBEngineVersionsMessage$Engine": "

    The database engine to return.

    ", + "DescribeDBEngineVersionsMessage$EngineVersion": "

    The database engine version to return.

    Example: 5.1.49

    ", + "DescribeDBEngineVersionsMessage$DBParameterGroupFamily": "

    The name of a specific DB Parameter Group family to return details for.

    Constraints:

    • Must be 1 to 255 alphanumeric characters
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "DescribeDBEngineVersionsMessage$Marker": "

    An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DescribeDBInstancesMessage$DBInstanceIdentifier": "

    The user-supplied instance identifier. If this parameter is specified, information from only the specific DB Instance is returned. This parameter isn't case sensitive.

    Constraints:

    • Must contain from 1 to 63 alphanumeric characters or hyphens
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "DescribeDBInstancesMessage$Marker": "

    An optional pagination token provided by a previous DescribeDBInstances request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords .

    ", + "DescribeDBParameterGroupsMessage$DBParameterGroupName": "

    The name of a specific DB Parameter Group to return details for.

    Constraints:

    • Must be 1 to 255 alphanumeric characters
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "DescribeDBParameterGroupsMessage$Marker": "

    An optional pagination token provided by a previous DescribeDBParameterGroups request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DescribeDBParametersMessage$DBParameterGroupName": "

    The name of a specific DB Parameter Group to return details for.

    Constraints:

    • Must be 1 to 255 alphanumeric characters
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "DescribeDBParametersMessage$Source": "

    The parameter types to return.

    Default: All parameter types returned

    Valid Values: user | system | engine-default

    ", + "DescribeDBParametersMessage$Marker": "

    An optional pagination token provided by a previous DescribeDBParameters request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DescribeDBSecurityGroupsMessage$DBSecurityGroupName": "

    The name of the DB Security Group to return details for.

    ", + "DescribeDBSecurityGroupsMessage$Marker": "

    An optional pagination token provided by a previous DescribeDBSecurityGroups request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DescribeDBSnapshotsMessage$DBInstanceIdentifier": "

    A DB Instance Identifier to retrieve the list of DB Snapshots for. Cannot be used in conjunction with DBSnapshotIdentifier. This parameter isn't case sensitive.

    Constraints:

    • Must contain from 1 to 63 alphanumeric characters or hyphens
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "DescribeDBSnapshotsMessage$DBSnapshotIdentifier": "

    A specific DB Snapshot Identifier to describe. Cannot be used in conjunction with DBInstanceIdentifier. This value is stored as a lowercase string.

    Constraints:

    • Must be 1 to 255 alphanumeric characters
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    • If this is the identifier of an automated snapshot, the SnapshotType parameter must also be specified.
    ", + "DescribeDBSnapshotsMessage$SnapshotType": "

    An optional snapshot type for which snapshots will be returned. If not specified, the returned results will include snapshots of all types.

    ", + "DescribeDBSnapshotsMessage$Marker": "

    An optional pagination token provided by a previous DescribeDBSnapshots request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DescribeDBSubnetGroupsMessage$DBSubnetGroupName": "

    The name of the DB Subnet Group to return details for.

    ", + "DescribeDBSubnetGroupsMessage$Marker": "

    An optional pagination token provided by a previous DescribeDBSubnetGroups request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DescribeEngineDefaultParametersMessage$DBParameterGroupFamily": "

    The name of the DB Parameter Group Family.

    ", + "DescribeEngineDefaultParametersMessage$Marker": "

    An optional pagination token provided by a previous DescribeEngineDefaultParameters request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DescribeEventCategoriesMessage$SourceType": "

    The type of source that will be generating the events.

    Valid values: db-instance | db-parameter-group | db-security-group | db-snapshot

    ", + "DescribeEventSubscriptionsMessage$SubscriptionName": "

    The name of the RDS event notification subscription you want to describe.

    ", + "DescribeEventSubscriptionsMessage$Marker": "

    An optional pagination token provided by a previous DescribeOrderableDBInstanceOptions request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords .

    ", + "DescribeEventsMessage$SourceIdentifier": "

    The identifier of the event source for which events will be returned. If not specified, then all sources are included in the response.

    Constraints:

    • If SourceIdentifier is supplied, SourceType must also be provided.
    • If the source type is DBInstance, then a DBInstanceIdentifier must be supplied.
    • If the source type is DBSecurityGroup, a DBSecurityGroupName must be supplied.
    • If the source type is DBParameterGroup, a DBParameterGroupName must be supplied.
    • If the source type is DBSnapshot, a DBSnapshotIdentifier must be supplied.
    • Cannot end with a hyphen or contain two consecutive hyphens.
    ", + "DescribeEventsMessage$Marker": "

    An optional pagination token provided by a previous DescribeEvents request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DescribeOptionGroupOptionsMessage$EngineName": "

    Options available for the given DB engine name to be described.

    ", + "DescribeOptionGroupOptionsMessage$MajorEngineVersion": "

    If specified, filters the results to include only options for the specified major engine version.

    ", + "DescribeOptionGroupOptionsMessage$Marker": "

    An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DescribeOptionGroupsMessage$OptionGroupName": "

    The name of the option group to describe. Cannot be supplied together with EngineName or MajorEngineVersion.

    ", + "DescribeOptionGroupsMessage$Marker": "

    An optional pagination token provided by a previous DescribeOptionGroups request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DescribeOptionGroupsMessage$EngineName": "

    Filters the list of option groups to only include groups associated with a specific database engine.

    ", + "DescribeOptionGroupsMessage$MajorEngineVersion": "

    Filters the list of option groups to only include groups associated with a specific database engine version. If specified, then EngineName must also be specified.

    ", + "DescribeOrderableDBInstanceOptionsMessage$Engine": "

    The name of the engine to retrieve DB Instance options for.

    ", + "DescribeOrderableDBInstanceOptionsMessage$EngineVersion": "

    The engine version filter value. Specify this parameter to show only the available offerings matching the specified engine version.

    ", + "DescribeOrderableDBInstanceOptionsMessage$DBInstanceClass": "

    The DB Instance class filter value. Specify this parameter to show only the available offerings matching the specified DB Instance class.

    ", + "DescribeOrderableDBInstanceOptionsMessage$LicenseModel": "

    The license model filter value. Specify this parameter to show only the available offerings matching the specified license model.

    ", + "DescribeOrderableDBInstanceOptionsMessage$Marker": "

    An optional pagination token provided by a previous DescribeOrderableDBInstanceOptions request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords .

    ", + "DescribeReservedDBInstancesMessage$ReservedDBInstanceId": "

    The reserved DB Instance identifier filter value. Specify this parameter to show only the reservation that matches the specified reservation ID.

    ", + "DescribeReservedDBInstancesMessage$ReservedDBInstancesOfferingId": "

    The offering identifier filter value. Specify this parameter to show only purchased reservations matching the specified offering identifier.

    ", + "DescribeReservedDBInstancesMessage$DBInstanceClass": "

    The DB Instance class filter value. Specify this parameter to show only those reservations matching the specified DB Instances class.

    ", + "DescribeReservedDBInstancesMessage$Duration": "

    The duration filter value, specified in years or seconds. Specify this parameter to show only reservations for this duration.

    Valid Values: 1 | 3 | 31536000 | 94608000

    ", + "DescribeReservedDBInstancesMessage$ProductDescription": "

    The product description filter value. Specify this parameter to show only those reservations matching the specified product description.

    ", + "DescribeReservedDBInstancesMessage$OfferingType": "

    The offering type filter value. Specify this parameter to show only the available offerings matching the specified offering type.

    Valid Values: \"Light Utilization\" | \"Medium Utilization\" | \"Heavy Utilization\"

    ", + "DescribeReservedDBInstancesMessage$Marker": "

    An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DescribeReservedDBInstancesOfferingsMessage$ReservedDBInstancesOfferingId": "

    The offering identifier filter value. Specify this parameter to show only the available offering that matches the specified reservation identifier.

    Example: 438012d3-4052-4cc7-b2e3-8d3372e0e706

    ", + "DescribeReservedDBInstancesOfferingsMessage$DBInstanceClass": "

    The DB Instance class filter value. Specify this parameter to show only the available offerings matching the specified DB Instance class.

    ", + "DescribeReservedDBInstancesOfferingsMessage$Duration": "

    Duration filter value, specified in years or seconds. Specify this parameter to show only reservations for this duration.

    Valid Values: 1 | 3 | 31536000 | 94608000

    ", + "DescribeReservedDBInstancesOfferingsMessage$ProductDescription": "

    Product description filter value. Specify this parameter to show only the available offerings matching the specified product description.

    ", + "DescribeReservedDBInstancesOfferingsMessage$OfferingType": "

    The offering type filter value. Specify this parameter to show only the available offerings matching the specified offering type.

    Valid Values: \"Light Utilization\" | \"Medium Utilization\" | \"Heavy Utilization\"

    ", + "DescribeReservedDBInstancesOfferingsMessage$Marker": "

    An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "EC2SecurityGroup$Status": "

    Provides the status of the EC2 security group. Status can be \"authorizing\", \"authorized\", \"revoking\", and \"revoked\".

    ", + "EC2SecurityGroup$EC2SecurityGroupName": "

    Specifies the name of the EC2 Security Group.

    ", + "EC2SecurityGroup$EC2SecurityGroupId": "

    Specifies the id of the EC2 Security Group.

    ", + "EC2SecurityGroup$EC2SecurityGroupOwnerId": "

    Specifies the AWS ID of the owner of the EC2 security group specified in the EC2SecurityGroupName field.

    ", + "Endpoint$Address": "

    Specifies the DNS address of the DB Instance.

    ", + "EngineDefaults$DBParameterGroupFamily": "

    Specifies the name of the DB Parameter Group Family which the engine default parameters apply to.

    ", + "EngineDefaults$Marker": "

    An optional pagination token provided by a previous EngineDefaults request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords .

    ", + "Event$SourceIdentifier": "

    Provides the identifier for the source of the event.

    ", + "Event$Message": "

    Provides the text of this event.

    ", + "EventCategoriesList$member": null, + "EventCategoriesMap$SourceType": "

    The source type that the returned categories belong to

    ", + "EventSubscription$Id": "

    Not used.

    ", + "EventSubscription$CustomerAwsId": "

    The AWS customer account associated with the RDS event notification subscription.

    ", + "EventSubscription$CustSubscriptionId": "

    The RDS event notification subscription Id.

    ", + "EventSubscription$SnsTopicArn": "

    The topic ARN of the RDS event notification subscription.

    ", + "EventSubscription$Status": "

    The status of the RDS event notification subscription.

    Constraints:

    Can be one of the following: creating | modifying | deleting | active | no-permission | topic-not-exist

    The status \"no-permission\" indicates that RDS no longer has permission to post to the SNS topic. The status \"topic-not-exist\" indicates that the topic was deleted after the subscription was created.

    ", + "EventSubscription$SubscriptionCreationTime": "

    The time the RDS event notification subscription was created.

    ", + "EventSubscription$SourceType": "

    The source type for the RDS event notification subscription.

    ", + "EventSubscriptionsMessage$Marker": "

    An optional pagination token provided by a previous DescribeOrderableDBInstanceOptions request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "EventsMessage$Marker": "

    An optional pagination token provided by a previous Events request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords .

    ", + "IPRange$Status": "

    Specifies the status of the IP range. Status can be \"authorizing\", \"authorized\", \"revoking\", and \"revoked\".

    ", + "IPRange$CIDRIP": "

    Specifies the IP range.

    ", + "KeyList$member": null, + "ListTagsForResourceMessage$ResourceName": "

    The DB Instance with tags to be listed.

    ", + "ModifyDBInstanceMessage$DBInstanceIdentifier": "

    The DB Instance identifier. This value is stored as a lowercase string.

    Constraints:

    • Must be the identifier for an existing DB Instance
    • Must contain from 1 to 63 alphanumeric characters or hyphens
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "ModifyDBInstanceMessage$DBInstanceClass": "

    The new compute and memory capacity of the DB Instance. To determine the instance classes that are available for a particular DB engine, use the DescribeOrderableDBInstanceOptions action.

    Passing a value for this parameter causes an outage during the change and is applied during the next maintenance window, unless the ApplyImmediately parameter is specified as true for this request.

    Default: Uses existing setting

    Valid Values: db.t1.micro | db.m1.small | db.m1.medium | db.m1.large | db.m1.xlarge | db.m2.xlarge | db.m2.2xlarge | db.m2.4xlarge

    ", + "ModifyDBInstanceMessage$MasterUserPassword": "

    The new password for the DB Instance master user. Can be any printable ASCII character except \"/\", \"\\\", or \"@\".

    Changing this parameter does not result in an outage and the change is asynchronously applied as soon as possible. Between the time of the request and the completion of the request, the MasterUserPassword element exists in the PendingModifiedValues element of the operation response.

    Default: Uses existing setting

    Constraints: Must be 8 to 41 alphanumeric characters (MySQL), 8 to 30 alphanumeric characters (Oracle), or 8 to 128 alphanumeric characters (SQL Server).

    Amazon RDS API actions never return the password, so this action provides a way to regain access to a master instance user if the password is lost. ", + "ModifyDBInstanceMessage$DBParameterGroupName": "

    The name of the DB Parameter Group to apply to this DB Instance. Changing this parameter does not result in an outage and the change is applied during the next maintenance window unless the ApplyImmediately parameter is set to true for this request.

    Default: Uses existing setting

    Constraints: The DB Parameter Group must be in the same DB Parameter Group family as this DB Instance.

    ", + "ModifyDBInstanceMessage$PreferredBackupWindow": "

    The daily time range during which automated backups are created if automated backups are enabled, as determined by the BackupRetentionPeriod. Changing this parameter does not result in an outage and the change is asynchronously applied as soon as possible.

    Constraints:

    • Must be in the format hh24:mi-hh24:mi
    • Times should be Universal Time Coordinated (UTC)
    • Must not conflict with the preferred maintenance window
    • Must be at least 30 minutes
    ", + "ModifyDBInstanceMessage$PreferredMaintenanceWindow": "

    The weekly time range (in UTC) during which system maintenance can occur, which may result in an outage. Changing this parameter does not result in an outage, except in the following situation, and the change is asynchronously applied as soon as possible. If there are pending actions that cause a reboot, and the maintenance window is changed to include the current time, then changing this parameter will cause a reboot of the DB Instance. If moving this window to the current time, there must be at least 30 minutes between the current time and end of the window to ensure pending changes are applied.

    Default: Uses existing setting

    Format: ddd:hh24:mi-ddd:hh24:mi

    Valid Days: Mon | Tue | Wed | Thu | Fri | Sat | Sun

    Constraints: Must be at least 30 minutes

    ", + "ModifyDBInstanceMessage$EngineVersion": "

    The version number of the database engine to upgrade to. Changing this parameter results in an outage and the change is applied during the next maintenance window unless the ApplyImmediately parameter is set to true for this request.

    For major version upgrades, if a nondefault DB Parameter Group is currently in use, a new DB Parameter Group in the DB Parameter Group Family for the new engine version must be specified. The new DB Parameter Group can be the default for that DB Parameter Group Family.

    Example: 5.1.42

    ", + "ModifyDBInstanceMessage$OptionGroupName": "

    Indicates that the DB Instance should be associated with the specified option group. Changing this parameter does not result in an outage except in the following case and the change is applied during the next maintenance window unless the ApplyImmediately parameter is set to true for this request. If the parameter change results in an option group that enables OEM, this change can cause a brief (sub-second) period during which new connections are rejected but existing connections are not interrupted.

    ", + "ModifyDBInstanceMessage$NewDBInstanceIdentifier": "

    The new DB Instance identifier for the DB Instance when renaming a DB Instance. This value is stored as a lowercase string.

    Constraints:

    • Must contain from 1 to 63 alphanumeric characters or hyphens
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "ModifyDBParameterGroupMessage$DBParameterGroupName": "

    The name of the DB Parameter Group.

    Constraints:

    • Must be the name of an existing DB Parameter Group
    • Must be 1 to 255 alphanumeric characters
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "ModifyDBSubnetGroupMessage$DBSubnetGroupName": "

    The name for the DB Subnet Group. This value is stored as a lowercase string.

    Constraints: Must contain no more than 255 alphanumeric characters or hyphens. Must not be \"Default\".

    Example: mySubnetgroup

    ", + "ModifyDBSubnetGroupMessage$DBSubnetGroupDescription": "

    The description for the DB Subnet Group.

    ", + "ModifyEventSubscriptionMessage$SubscriptionName": "

    The name of the RDS event notification subscription.

    ", + "ModifyEventSubscriptionMessage$SnsTopicArn": "

    The Amazon Resource Name (ARN) of the SNS topic created for event notification. The ARN is created by Amazon SNS when you create a topic and subscribe to it.

    ", + "ModifyEventSubscriptionMessage$SourceType": "

    The type of source that will be generating the events. For example, if you want to be notified of events generated by a DB instance, you would set this parameter to db-instance. if this value is not specified, all events are returned.

    Valid values: db-instance | db-parameter-group | db-security-group | db-snapshot

    ", + "ModifyOptionGroupMessage$OptionGroupName": "

    The name of the option group to be modified.

    ", + "Option$OptionName": "

    The name of the option.

    ", + "Option$OptionDescription": "

    The description of the option.

    ", + "OptionConfiguration$OptionName": "

    The configuration of options to include in a group.

    ", + "OptionGroup$OptionGroupName": "

    Specifies the name of the option group.

    ", + "OptionGroup$OptionGroupDescription": "

    Provides the description of the option group.

    ", + "OptionGroup$EngineName": "

    Engine name that this option group can be applied to.

    ", + "OptionGroup$MajorEngineVersion": "

    Indicates the major engine version associated with this option group.

    ", + "OptionGroup$VpcId": "

    If AllowsVpcAndNonVpcInstanceMemberships is 'false', this field is blank. If AllowsVpcAndNonVpcInstanceMemberships is 'true' and this field is blank, then this option group can be applied to both VPC and non-VPC instances. If this field contains a value, then this option group can only be applied to instances that are in the VPC indicated by this field.

    ", + "OptionGroupMembership$OptionGroupName": "

    The name of the option group that the instance belongs to.

    ", + "OptionGroupMembership$Status": "

    The status of the DB Instance's option group membership (e.g. in-sync, pending, pending-maintenance, applying).

    ", + "OptionGroupOption$Name": "

    The name of the option.

    ", + "OptionGroupOption$Description": "

    The description of the option.

    ", + "OptionGroupOption$EngineName": "

    Engine name that this option can be applied to.

    ", + "OptionGroupOption$MajorEngineVersion": "

    Indicates the major engine version that the option is available for.

    ", + "OptionGroupOption$MinimumRequiredMinorEngineVersion": "

    The minimum required engine version for the option to be applied.

    ", + "OptionGroupOptionsMessage$Marker": "

    An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "OptionGroups$Marker": null, + "OptionNamesList$member": null, + "OptionsDependedOn$member": null, + "OrderableDBInstanceOption$Engine": "

    The engine type of the orderable DB Instance.

    ", + "OrderableDBInstanceOption$EngineVersion": "

    The engine version of the orderable DB Instance.

    ", + "OrderableDBInstanceOption$DBInstanceClass": "

    The DB Instance Class for the orderable DB Instance

    ", + "OrderableDBInstanceOption$LicenseModel": "

    The license model for the orderable DB Instance.

    ", + "OrderableDBInstanceOptionsMessage$Marker": "

    An optional pagination token provided by a previous OrderableDBInstanceOptions request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords .

    ", + "Parameter$ParameterName": "

    Specifies the name of the parameter.

    ", + "Parameter$ParameterValue": "

    Specifies the value of the parameter.

    ", + "Parameter$Description": "

    Provides a description of the parameter.

    ", + "Parameter$Source": "

    Indicates the source of the parameter value.

    ", + "Parameter$ApplyType": "

    Specifies the engine specific parameters type.

    ", + "Parameter$DataType": "

    Specifies the valid data type for the parameter.

    ", + "Parameter$AllowedValues": "

    Specifies the valid range of values for the parameter.

    ", + "Parameter$MinimumEngineVersion": "

    The earliest engine version to which the parameter can apply.

    ", + "PendingModifiedValues$DBInstanceClass": "

    Contains the new DBInstanceClass for the DB Instance that will be applied or is in progress.

    ", + "PendingModifiedValues$MasterUserPassword": "

    Contains the pending or in-progress change of the master credentials for the DB Instance.

    ", + "PendingModifiedValues$EngineVersion": "

    Indicates the database engine version.

    ", + "PendingModifiedValues$DBInstanceIdentifier": "

    Contains the new DBInstanceIdentifier for the DB Instance that will be applied or is in progress.

    ", + "PromoteReadReplicaMessage$DBInstanceIdentifier": "

    The DB Instance identifier. This value is stored as a lowercase string.

    Constraints:

    • Must be the identifier for an existing Read Replica DB Instance
    • Must contain from 1 to 63 alphanumeric characters or hyphens
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens

    Example: mydbinstance

    ", + "PromoteReadReplicaMessage$PreferredBackupWindow": "

    The daily time range during which automated backups are created if automated backups are enabled, using the BackupRetentionPeriod parameter.

    Default: A 30-minute window selected at random from an 8-hour block of time per region. The following list shows the time blocks for each region from which the default backup windows are assigned.

    • US-East (Northern Virginia) Region: 03:00-11:00 UTC
    • US-West (Northern California) Region: 06:00-14:00 UTC
    • EU (Ireland) Region: 22:00-06:00 UTC
    • Asia Pacific (Singapore) Region: 14:00-22:00 UTC
    • Asia Pacific (Tokyo) Region: 17:00-03:00 UTC

    Constraints: Must be in the format hh24:mi-hh24:mi. Times should be Universal Time Coordinated (UTC). Must not conflict with the preferred maintenance window. Must be at least 30 minutes.

    ", + "PurchaseReservedDBInstancesOfferingMessage$ReservedDBInstancesOfferingId": "

    The ID of the Reserved DB Instance offering to purchase.

    Example: 438012d3-4052-4cc7-b2e3-8d3372e0e706

    ", + "PurchaseReservedDBInstancesOfferingMessage$ReservedDBInstanceId": "

    Customer-specified identifier to track this reservation.

    Example: myreservationID

    ", + "ReadReplicaDBInstanceIdentifierList$member": null, + "RebootDBInstanceMessage$DBInstanceIdentifier": "

    The DB Instance identifier. This parameter is stored as a lowercase string.

    Constraints:

    • Must contain from 1 to 63 alphanumeric characters or hyphens
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "RecurringCharge$RecurringChargeFrequency": "

    The frequency of the recurring charge.

    ", + "RemoveSourceIdentifierFromSubscriptionMessage$SubscriptionName": "

    The name of the RDS event notification subscription you want to remove a source identifier from.

    ", + "RemoveSourceIdentifierFromSubscriptionMessage$SourceIdentifier": "

    The source identifier to be removed from the subscription, such as the DB instance identifier for a DB instance or the name of a security group.

    ", + "RemoveTagsFromResourceMessage$ResourceName": "

    The DB Instance the tags will be removed from.

    ", + "ReservedDBInstance$ReservedDBInstanceId": "

    The unique identifier for the reservation.

    ", + "ReservedDBInstance$ReservedDBInstancesOfferingId": "

    The offering identifier.

    ", + "ReservedDBInstance$DBInstanceClass": "

    The DB instance class for the reserved DB Instance.

    ", + "ReservedDBInstance$CurrencyCode": "

    The currency code for the reserved DB Instance.

    ", + "ReservedDBInstance$ProductDescription": "

    The description of the reserved DB Instance.

    ", + "ReservedDBInstance$OfferingType": "

    The offering type of this reserved DB Instance.

    ", + "ReservedDBInstance$State": "

    The state of the reserved DB Instance.

    ", + "ReservedDBInstanceMessage$Marker": "

    An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "ReservedDBInstancesOffering$ReservedDBInstancesOfferingId": "

    The offering identifier.

    ", + "ReservedDBInstancesOffering$DBInstanceClass": "

    The DB instance class for the reserved DB Instance.

    ", + "ReservedDBInstancesOffering$CurrencyCode": "

    The currency code for the reserved DB Instance offering.

    ", + "ReservedDBInstancesOffering$ProductDescription": "

    The database engine used by the offering.

    ", + "ReservedDBInstancesOffering$OfferingType": "

    The offering type.

    ", + "ReservedDBInstancesOfferingMessage$Marker": "

    An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "ResetDBParameterGroupMessage$DBParameterGroupName": "

    The name of the DB Parameter Group.

    Constraints:

    • Must be 1 to 255 alphanumeric characters
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "RestoreDBInstanceFromDBSnapshotMessage$DBInstanceIdentifier": "

    The identifier for the DB Snapshot to restore from.

    Constraints:

    • Must contain from 1 to 63 alphanumeric characters or hyphens
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "RestoreDBInstanceFromDBSnapshotMessage$DBSnapshotIdentifier": "

    Name of the DB Instance to create from the DB Snapshot. This parameter isn't case sensitive.

    Constraints:

    • Must contain from 1 to 255 alphanumeric characters or hyphens
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens

    Example: my-snapshot-id

    ", + "RestoreDBInstanceFromDBSnapshotMessage$DBInstanceClass": "

    The compute and memory capacity of the Amazon RDS DB instance.

    Valid Values: db.t1.micro | db.m1.small | db.m1.medium | db.m1.large | db.m1.xlarge | db.m2.2xlarge | db.m2.4xlarge

    ", + "RestoreDBInstanceFromDBSnapshotMessage$AvailabilityZone": "

    The EC2 Availability Zone that the database instance will be created in.

    Default: A random, system-chosen Availability Zone.

    Constraint: You cannot specify the AvailabilityZone parameter if the MultiAZ parameter is set to true.

    Example: us-east-1a

    ", + "RestoreDBInstanceFromDBSnapshotMessage$DBSubnetGroupName": "

    The DB Subnet Group name to use for the new instance.

    ", + "RestoreDBInstanceFromDBSnapshotMessage$LicenseModel": "

    License model information for the restored DB Instance.

    Default: Same as source.

    Valid values: license-included | bring-your-own-license | general-public-license

    ", + "RestoreDBInstanceFromDBSnapshotMessage$DBName": "

    The database name for the restored DB Instance.

    This parameter doesn't apply to the MySQL engine.

    ", + "RestoreDBInstanceFromDBSnapshotMessage$Engine": "

    The database engine to use for the new instance.

    Default: The same as source

    Constraint: Must be compatible with the engine of the source

    Example: oracle-ee

    ", + "RestoreDBInstanceFromDBSnapshotMessage$OptionGroupName": null, + "RestoreDBInstanceToPointInTimeMessage$SourceDBInstanceIdentifier": "

    The identifier of the source DB Instance from which to restore.

    Constraints:

    • Must be the identifier of an existing database instance
    • Must contain from 1 to 63 alphanumeric characters or hyphens
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "RestoreDBInstanceToPointInTimeMessage$TargetDBInstanceIdentifier": "

    The name of the new database instance to be created.

    Constraints:

    • Must contain from 1 to 63 alphanumeric characters or hyphens
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "RestoreDBInstanceToPointInTimeMessage$DBInstanceClass": "

    The compute and memory capacity of the Amazon RDS DB instance.

    Valid Values: db.t1.micro | db.m1.small | db.m1.medium | db.m1.large | db.m1.xlarge | db.m2.2xlarge | db.m2.4xlarge

    Default: The same DBInstanceClass as the original DB Instance.

    ", + "RestoreDBInstanceToPointInTimeMessage$AvailabilityZone": "

    The EC2 Availability Zone that the database instance will be created in.

    Default: A random, system-chosen Availability Zone.

    Constraint: You cannot specify the AvailabilityZone parameter if the MultiAZ parameter is set to true.

    Example: us-east-1a

    ", + "RestoreDBInstanceToPointInTimeMessage$DBSubnetGroupName": "

    The DB subnet group name to use for the new instance.

    ", + "RestoreDBInstanceToPointInTimeMessage$LicenseModel": "

    License model information for the restored DB Instance.

    Default: Same as source.

    Valid values: license-included | bring-your-own-license | general-public-license

    ", + "RestoreDBInstanceToPointInTimeMessage$DBName": "

    The database name for the restored DB Instance.

    This parameter is not used for the MySQL engine.

    ", + "RestoreDBInstanceToPointInTimeMessage$Engine": "

    The database engine to use for the new instance.

    Default: The same as source

    Constraint: Must be compatible with the engine of the source

    Example: oracle-ee

    ", + "RestoreDBInstanceToPointInTimeMessage$OptionGroupName": null, + "RevokeDBSecurityGroupIngressMessage$DBSecurityGroupName": "

    The name of the DB Security Group to revoke ingress from.

    ", + "RevokeDBSecurityGroupIngressMessage$CIDRIP": "

    The IP range to revoke access from. Must be a valid CIDR range. If CIDRIP is specified, EC2SecurityGroupName, EC2SecurityGroupId and EC2SecurityGroupOwnerId cannot be provided.

    ", + "RevokeDBSecurityGroupIngressMessage$EC2SecurityGroupName": "

    The name of the EC2 Security Group to revoke access from. For VPC DB Security Groups, EC2SecurityGroupId must be provided. Otherwise, EC2SecurityGroupOwnerId and either EC2SecurityGroupName or EC2SecurityGroupId must be provided.

    ", + "RevokeDBSecurityGroupIngressMessage$EC2SecurityGroupId": "

    The id of the EC2 Security Group to revoke access from. For VPC DB Security Groups, EC2SecurityGroupId must be provided. Otherwise, EC2SecurityGroupOwnerId and either EC2SecurityGroupName or EC2SecurityGroupId must be provided.

    ", + "RevokeDBSecurityGroupIngressMessage$EC2SecurityGroupOwnerId": "

    The AWS Account Number of the owner of the EC2 security group specified in the EC2SecurityGroupName parameter. The AWS Access Key ID is not an acceptable value. For VPC DB Security Groups, EC2SecurityGroupId must be provided. Otherwise, EC2SecurityGroupOwnerId and either EC2SecurityGroupName or EC2SecurityGroupId must be provided.

    ", + "SourceIdsList$member": null, + "Subnet$SubnetIdentifier": "

    Specifies the identifier of the subnet.

    ", + "Subnet$SubnetStatus": "

    Specifies the status of the subnet.

    ", + "SubnetIdentifierList$member": null, + "Tag$Key": "

    A key is the required name of the tag. The string value can be from 1 to 128 Unicode characters in length and cannot be prefixed with \"aws:\". The string may only contain only the set of Unicode letters, digits, white-space, '_', '.', '/', '=', '+', '-' (Java regex: \"^([\\\\p{L}\\\\p{Z}\\\\p{N}_.:/=+\\\\-]*)$\").

    ", + "Tag$Value": "

    A value is the optional value of the tag. The string value can be from 1 to 256 Unicode characters in length and cannot be prefixed with \"aws:\". The string may only contain only the set of Unicode letters, digits, white-space, '_', '.', '/', '=', '+', '-' (Java regex: \"^([\\\\p{L}\\\\p{Z}\\\\p{N}_.:/=+\\\\-]*)$\").

    ", + "VpcSecurityGroupIdList$member": null, + "VpcSecurityGroupMembership$VpcSecurityGroupId": "

    The name of the VPC security group.

    ", + "VpcSecurityGroupMembership$Status": "

    The status of the VPC Security Group.

    " + } + }, + "Subnet": { + "base": "

    This data type is used as a response element in the DescribeDBSubnetGroups action.

    ", + "refs": { + "SubnetList$member": null + } + }, + "SubnetAlreadyInUse": { + "base": "

    The DB subnet is already in use in the Availability Zone.

    ", + "refs": { + } + }, + "SubnetIdentifierList": { + "base": null, + "refs": { + "CreateDBSubnetGroupMessage$SubnetIds": "

    The EC2 Subnet IDs for the DB Subnet Group.

    ", + "ModifyDBSubnetGroupMessage$SubnetIds": "

    The EC2 Subnet IDs for the DB Subnet Group.

    " + } + }, + "SubnetList": { + "base": null, + "refs": { + "DBSubnetGroup$Subnets": "

    Contains a list of Subnet elements.

    " + } + }, + "SubscriptionAlreadyExistFault": { + "base": "

    The supplied subscription name already exists.

    ", + "refs": { + } + }, + "SubscriptionCategoryNotFoundFault": { + "base": "

    The supplied category does not exist.

    ", + "refs": { + } + }, + "SubscriptionNotFoundFault": { + "base": "

    The subscription name does not exist.

    ", + "refs": { + } + }, + "SupportedCharacterSetsList": { + "base": null, + "refs": { + "DBEngineVersion$SupportedCharacterSets": "

    A list of the character sets supported by this engine for the CharacterSetName parameter of the CreateDBInstance API.

    " + } + }, + "TStamp": { + "base": null, + "refs": { + "DBInstance$InstanceCreateTime": "

    Provides the date and time the DB Instance was created.

    ", + "DBInstance$LatestRestorableTime": "

    Specifies the latest time to which a database can be restored with point-in-time restore.

    ", + "DBSnapshot$SnapshotCreateTime": "

    Provides the time (UTC) when the snapshot was taken.

    ", + "DBSnapshot$InstanceCreateTime": "

    Specifies the time (UTC) when the snapshot was taken.

    ", + "DescribeEventsMessage$StartTime": "

    The beginning of the time interval to retrieve events for, specified in ISO 8601 format. For more information about ISO 8601, go to the ISO8601 Wikipedia page.

    Example: 2009-07-08T18:00Z

    ", + "DescribeEventsMessage$EndTime": "

    The end of the time interval for which to retrieve events, specified in ISO 8601 format. For more information about ISO 8601, go to the ISO8601 Wikipedia page.

    Example: 2009-07-08T18:00Z

    ", + "Event$Date": "

    Specifies the date and time of the event.

    ", + "ReservedDBInstance$StartTime": "

    The time the reservation started.

    ", + "RestoreDBInstanceToPointInTimeMessage$RestoreTime": "

    The date and time to restore from.

    Valid Values: Value must be a UTC time

    Constraints:

    • Must be before the latest restorable time for the DB Instance
    • Cannot be specified if UseLatestRestorableTime parameter is true

    Example: 2009-09-07T23:45:00Z

    " + } + }, + "Tag": { + "base": "

    Metadata assigned to a DB Instance consisting of a key-value pair.

    ", + "refs": { + "TagList$member": null + } + }, + "TagList": { + "base": "

    A list of tags.

    ", + "refs": { + "AddTagsToResourceMessage$Tags": "

    The tags to be assigned to the DB Instance.

    ", + "TagListMessage$TagList": "

    List of tags returned by the ListTagsForResource operation.

    " + } + }, + "TagListMessage": { + "base": "

    ", + "refs": { + } + }, + "VpcSecurityGroupIdList": { + "base": null, + "refs": { + "CreateDBInstanceMessage$VpcSecurityGroupIds": "

    A list of EC2 VPC Security Groups to associate with this DB Instance.

    Default: The default EC2 VPC Security Group for the DB Subnet group's VPC.

    ", + "ModifyDBInstanceMessage$VpcSecurityGroupIds": "

    A list of EC2 VPC Security Groups to authorize on this DB Instance. This change is asynchronously applied as soon as possible.

    Constraints:

    • Must be 1 to 255 alphanumeric characters
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "OptionConfiguration$VpcSecurityGroupMemberships": "

    A list of VpcSecurityGroupMemebrship name strings used for this option.

    " + } + }, + "VpcSecurityGroupMembership": { + "base": "

    This data type is used as a response element for queries on VPC security group membership.

    ", + "refs": { + "VpcSecurityGroupMembershipList$member": null + } + }, + "VpcSecurityGroupMembershipList": { + "base": null, + "refs": { + "DBInstance$VpcSecurityGroups": "

    Provides List of VPC security group elements that the DB Instance belongs to.

    ", + "Option$VpcSecurityGroupMemberships": "

    If the Option requires access to a port, then this VPC Security Group allows access to the port.

    " + } + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/rds/2013-01-10/examples-1.json b/vendor/github.com/aws/aws-sdk-go/models/apis/rds/2013-01-10/examples-1.json new file mode 100644 index 000000000..0ea7e3b0b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/rds/2013-01-10/examples-1.json @@ -0,0 +1,5 @@ +{ + "version": "1.0", + "examples": { + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/rds/2013-01-10/paginators-1.json b/vendor/github.com/aws/aws-sdk-go/models/apis/rds/2013-01-10/paginators-1.json new file mode 100644 index 000000000..e70d762e0 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/rds/2013-01-10/paginators-1.json @@ -0,0 +1,97 @@ +{ + "pagination": { + "DescribeDBEngineVersions": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords", + "result_key": "DBEngineVersions" + }, + "DescribeDBInstances": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords", + "result_key": "DBInstances" + }, + "DescribeDBParameterGroups": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords", + "result_key": "DBParameterGroups" + }, + "DescribeDBParameters": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords", + "result_key": "Parameters" + }, + "DescribeDBSecurityGroups": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords", + "result_key": "DBSecurityGroups" + }, + "DescribeDBSnapshots": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords", + "result_key": "DBSnapshots" + }, + "DescribeDBSubnetGroups": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords", + "result_key": "DBSubnetGroups" + }, + "DescribeEngineDefaultParameters": { + "input_token": "Marker", + "output_token": "EngineDefaults.Marker", + "limit_key": "MaxRecords", + "result_key": "EngineDefaults.Parameters" + }, + "DescribeEventSubscriptions": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords", + "result_key": "EventSubscriptionsList" + }, + "DescribeEvents": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords", + "result_key": "Events" + }, + "DescribeOptionGroupOptions": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords", + "result_key": "OptionGroupOptions" + }, + "DescribeOptionGroups": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords", + "result_key": "OptionGroupsList" + }, + "DescribeOrderableDBInstanceOptions": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords", + "result_key": "OrderableDBInstanceOptions" + }, + "DescribeReservedDBInstances": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords", + "result_key": "ReservedDBInstances" + }, + "DescribeReservedDBInstancesOfferings": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords", + "result_key": "ReservedDBInstancesOfferings" + }, + "ListTagsForResource": { + "result_key": "TagList" + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/rds/2013-02-12/api-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/rds/2013-02-12/api-2.json new file mode 100644 index 000000000..50bb7e4ce --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/rds/2013-02-12/api-2.json @@ -0,0 +1,3057 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2013-02-12", + "endpointPrefix":"rds", + "protocol":"query", + "serviceAbbreviation":"Amazon RDS", + "serviceFullName":"Amazon Relational Database Service", + "signatureVersion":"v4", + "xmlNamespace":"http://rds.amazonaws.com/doc/2013-02-12/" + }, + "operations":{ + "AddSourceIdentifierToSubscription":{ + "name":"AddSourceIdentifierToSubscription", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AddSourceIdentifierToSubscriptionMessage"}, + "output":{ + "shape":"AddSourceIdentifierToSubscriptionResult", + "resultWrapper":"AddSourceIdentifierToSubscriptionResult" + }, + "errors":[ + {"shape":"SubscriptionNotFoundFault"}, + {"shape":"SourceNotFoundFault"} + ] + }, + "AddTagsToResource":{ + "name":"AddTagsToResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AddTagsToResourceMessage"}, + "errors":[ + {"shape":"DBInstanceNotFoundFault"}, + {"shape":"DBSnapshotNotFoundFault"} + ] + }, + "AuthorizeDBSecurityGroupIngress":{ + "name":"AuthorizeDBSecurityGroupIngress", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AuthorizeDBSecurityGroupIngressMessage"}, + "output":{ + "shape":"AuthorizeDBSecurityGroupIngressResult", + "resultWrapper":"AuthorizeDBSecurityGroupIngressResult" + }, + "errors":[ + {"shape":"DBSecurityGroupNotFoundFault"}, + {"shape":"InvalidDBSecurityGroupStateFault"}, + {"shape":"AuthorizationAlreadyExistsFault"}, + {"shape":"AuthorizationQuotaExceededFault"} + ] + }, + "CopyDBSnapshot":{ + "name":"CopyDBSnapshot", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CopyDBSnapshotMessage"}, + "output":{ + "shape":"CopyDBSnapshotResult", + "resultWrapper":"CopyDBSnapshotResult" + }, + "errors":[ + {"shape":"DBSnapshotAlreadyExistsFault"}, + {"shape":"DBSnapshotNotFoundFault"}, + {"shape":"InvalidDBSnapshotStateFault"}, + {"shape":"SnapshotQuotaExceededFault"} + ] + }, + "CreateDBInstance":{ + "name":"CreateDBInstance", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateDBInstanceMessage"}, + "output":{ + "shape":"CreateDBInstanceResult", + "resultWrapper":"CreateDBInstanceResult" + }, + "errors":[ + {"shape":"DBInstanceAlreadyExistsFault"}, + {"shape":"InsufficientDBInstanceCapacityFault"}, + {"shape":"DBParameterGroupNotFoundFault"}, + {"shape":"DBSecurityGroupNotFoundFault"}, + {"shape":"InstanceQuotaExceededFault"}, + {"shape":"StorageQuotaExceededFault"}, + {"shape":"DBSubnetGroupNotFoundFault"}, + {"shape":"DBSubnetGroupDoesNotCoverEnoughAZs"}, + {"shape":"InvalidSubnet"}, + {"shape":"InvalidVPCNetworkStateFault"}, + {"shape":"ProvisionedIopsNotAvailableInAZFault"}, + {"shape":"OptionGroupNotFoundFault"} + ] + }, + "CreateDBInstanceReadReplica":{ + "name":"CreateDBInstanceReadReplica", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateDBInstanceReadReplicaMessage"}, + "output":{ + "shape":"CreateDBInstanceReadReplicaResult", + "resultWrapper":"CreateDBInstanceReadReplicaResult" + }, + "errors":[ + {"shape":"DBInstanceAlreadyExistsFault"}, + {"shape":"InsufficientDBInstanceCapacityFault"}, + {"shape":"DBParameterGroupNotFoundFault"}, + {"shape":"DBSecurityGroupNotFoundFault"}, + {"shape":"InstanceQuotaExceededFault"}, + {"shape":"StorageQuotaExceededFault"}, + {"shape":"DBInstanceNotFoundFault"}, + {"shape":"InvalidDBInstanceStateFault"}, + {"shape":"DBSubnetGroupNotFoundFault"}, + {"shape":"DBSubnetGroupDoesNotCoverEnoughAZs"}, + {"shape":"InvalidSubnet"}, + {"shape":"InvalidVPCNetworkStateFault"}, + {"shape":"ProvisionedIopsNotAvailableInAZFault"}, + {"shape":"OptionGroupNotFoundFault"} + ] + }, + "CreateDBParameterGroup":{ + "name":"CreateDBParameterGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateDBParameterGroupMessage"}, + "output":{ + "shape":"CreateDBParameterGroupResult", + "resultWrapper":"CreateDBParameterGroupResult" + }, + "errors":[ + {"shape":"DBParameterGroupQuotaExceededFault"}, + {"shape":"DBParameterGroupAlreadyExistsFault"} + ] + }, + "CreateDBSecurityGroup":{ + "name":"CreateDBSecurityGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateDBSecurityGroupMessage"}, + "output":{ + "shape":"CreateDBSecurityGroupResult", + "resultWrapper":"CreateDBSecurityGroupResult" + }, + "errors":[ + {"shape":"DBSecurityGroupAlreadyExistsFault"}, + {"shape":"DBSecurityGroupQuotaExceededFault"}, + {"shape":"DBSecurityGroupNotSupportedFault"} + ] + }, + "CreateDBSnapshot":{ + "name":"CreateDBSnapshot", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateDBSnapshotMessage"}, + "output":{ + "shape":"CreateDBSnapshotResult", + "resultWrapper":"CreateDBSnapshotResult" + }, + "errors":[ + {"shape":"DBSnapshotAlreadyExistsFault"}, + {"shape":"InvalidDBInstanceStateFault"}, + {"shape":"DBInstanceNotFoundFault"}, + {"shape":"SnapshotQuotaExceededFault"} + ] + }, + "CreateDBSubnetGroup":{ + "name":"CreateDBSubnetGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateDBSubnetGroupMessage"}, + "output":{ + "shape":"CreateDBSubnetGroupResult", + "resultWrapper":"CreateDBSubnetGroupResult" + }, + "errors":[ + {"shape":"DBSubnetGroupAlreadyExistsFault"}, + {"shape":"DBSubnetGroupQuotaExceededFault"}, + {"shape":"DBSubnetQuotaExceededFault"}, + {"shape":"DBSubnetGroupDoesNotCoverEnoughAZs"}, + {"shape":"InvalidSubnet"} + ] + }, + "CreateEventSubscription":{ + "name":"CreateEventSubscription", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateEventSubscriptionMessage"}, + "output":{ + "shape":"CreateEventSubscriptionResult", + "resultWrapper":"CreateEventSubscriptionResult" + }, + "errors":[ + {"shape":"EventSubscriptionQuotaExceededFault"}, + {"shape":"SubscriptionAlreadyExistFault"}, + {"shape":"SNSInvalidTopicFault"}, + {"shape":"SNSNoAuthorizationFault"}, + {"shape":"SNSTopicArnNotFoundFault"}, + {"shape":"SubscriptionCategoryNotFoundFault"}, + {"shape":"SourceNotFoundFault"} + ] + }, + "CreateOptionGroup":{ + "name":"CreateOptionGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateOptionGroupMessage"}, + "output":{ + "shape":"CreateOptionGroupResult", + "resultWrapper":"CreateOptionGroupResult" + }, + "errors":[ + {"shape":"OptionGroupAlreadyExistsFault"}, + {"shape":"OptionGroupQuotaExceededFault"} + ] + }, + "DeleteDBInstance":{ + "name":"DeleteDBInstance", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteDBInstanceMessage"}, + "output":{ + "shape":"DeleteDBInstanceResult", + "resultWrapper":"DeleteDBInstanceResult" + }, + "errors":[ + {"shape":"DBInstanceNotFoundFault"}, + {"shape":"InvalidDBInstanceStateFault"}, + {"shape":"DBSnapshotAlreadyExistsFault"}, + {"shape":"SnapshotQuotaExceededFault"} + ] + }, + "DeleteDBParameterGroup":{ + "name":"DeleteDBParameterGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteDBParameterGroupMessage"}, + "errors":[ + {"shape":"InvalidDBParameterGroupStateFault"}, + {"shape":"DBParameterGroupNotFoundFault"} + ] + }, + "DeleteDBSecurityGroup":{ + "name":"DeleteDBSecurityGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteDBSecurityGroupMessage"}, + "errors":[ + {"shape":"InvalidDBSecurityGroupStateFault"}, + {"shape":"DBSecurityGroupNotFoundFault"} + ] + }, + "DeleteDBSnapshot":{ + "name":"DeleteDBSnapshot", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteDBSnapshotMessage"}, + "output":{ + "shape":"DeleteDBSnapshotResult", + "resultWrapper":"DeleteDBSnapshotResult" + }, + "errors":[ + {"shape":"InvalidDBSnapshotStateFault"}, + {"shape":"DBSnapshotNotFoundFault"} + ] + }, + "DeleteDBSubnetGroup":{ + "name":"DeleteDBSubnetGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteDBSubnetGroupMessage"}, + "errors":[ + {"shape":"InvalidDBSubnetGroupStateFault"}, + {"shape":"InvalidDBSubnetStateFault"}, + {"shape":"DBSubnetGroupNotFoundFault"} + ] + }, + "DeleteEventSubscription":{ + "name":"DeleteEventSubscription", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteEventSubscriptionMessage"}, + "output":{ + "shape":"DeleteEventSubscriptionResult", + "resultWrapper":"DeleteEventSubscriptionResult" + }, + "errors":[ + {"shape":"SubscriptionNotFoundFault"}, + {"shape":"InvalidEventSubscriptionStateFault"} + ] + }, + "DeleteOptionGroup":{ + "name":"DeleteOptionGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteOptionGroupMessage"}, + "errors":[ + {"shape":"OptionGroupNotFoundFault"}, + {"shape":"InvalidOptionGroupStateFault"} + ] + }, + "DescribeDBEngineVersions":{ + "name":"DescribeDBEngineVersions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeDBEngineVersionsMessage"}, + "output":{ + "shape":"DBEngineVersionMessage", + "resultWrapper":"DescribeDBEngineVersionsResult" + } + }, + "DescribeDBInstances":{ + "name":"DescribeDBInstances", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeDBInstancesMessage"}, + "output":{ + "shape":"DBInstanceMessage", + "resultWrapper":"DescribeDBInstancesResult" + }, + "errors":[ + {"shape":"DBInstanceNotFoundFault"} + ] + }, + "DescribeDBLogFiles":{ + "name":"DescribeDBLogFiles", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeDBLogFilesMessage"}, + "output":{ + "shape":"DescribeDBLogFilesResponse", + "resultWrapper":"DescribeDBLogFilesResult" + }, + "errors":[ + {"shape":"DBInstanceNotFoundFault"} + ] + }, + "DescribeDBParameterGroups":{ + "name":"DescribeDBParameterGroups", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeDBParameterGroupsMessage"}, + "output":{ + "shape":"DBParameterGroupsMessage", + "resultWrapper":"DescribeDBParameterGroupsResult" + }, + "errors":[ + {"shape":"DBParameterGroupNotFoundFault"} + ] + }, + "DescribeDBParameters":{ + "name":"DescribeDBParameters", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeDBParametersMessage"}, + "output":{ + "shape":"DBParameterGroupDetails", + "resultWrapper":"DescribeDBParametersResult" + }, + "errors":[ + {"shape":"DBParameterGroupNotFoundFault"} + ] + }, + "DescribeDBSecurityGroups":{ + "name":"DescribeDBSecurityGroups", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeDBSecurityGroupsMessage"}, + "output":{ + "shape":"DBSecurityGroupMessage", + "resultWrapper":"DescribeDBSecurityGroupsResult" + }, + "errors":[ + {"shape":"DBSecurityGroupNotFoundFault"} + ] + }, + "DescribeDBSnapshots":{ + "name":"DescribeDBSnapshots", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeDBSnapshotsMessage"}, + "output":{ + "shape":"DBSnapshotMessage", + "resultWrapper":"DescribeDBSnapshotsResult" + }, + "errors":[ + {"shape":"DBSnapshotNotFoundFault"} + ] + }, + "DescribeDBSubnetGroups":{ + "name":"DescribeDBSubnetGroups", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeDBSubnetGroupsMessage"}, + "output":{ + "shape":"DBSubnetGroupMessage", + "resultWrapper":"DescribeDBSubnetGroupsResult" + }, + "errors":[ + {"shape":"DBSubnetGroupNotFoundFault"} + ] + }, + "DescribeEngineDefaultParameters":{ + "name":"DescribeEngineDefaultParameters", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeEngineDefaultParametersMessage"}, + "output":{ + "shape":"DescribeEngineDefaultParametersResult", + "resultWrapper":"DescribeEngineDefaultParametersResult" + } + }, + "DescribeEventCategories":{ + "name":"DescribeEventCategories", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeEventCategoriesMessage"}, + "output":{ + "shape":"EventCategoriesMessage", + "resultWrapper":"DescribeEventCategoriesResult" + } + }, + "DescribeEventSubscriptions":{ + "name":"DescribeEventSubscriptions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeEventSubscriptionsMessage"}, + "output":{ + "shape":"EventSubscriptionsMessage", + "resultWrapper":"DescribeEventSubscriptionsResult" + }, + "errors":[ + {"shape":"SubscriptionNotFoundFault"} + ] + }, + "DescribeEvents":{ + "name":"DescribeEvents", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeEventsMessage"}, + "output":{ + "shape":"EventsMessage", + "resultWrapper":"DescribeEventsResult" + } + }, + "DescribeOptionGroupOptions":{ + "name":"DescribeOptionGroupOptions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeOptionGroupOptionsMessage"}, + "output":{ + "shape":"OptionGroupOptionsMessage", + "resultWrapper":"DescribeOptionGroupOptionsResult" + } + }, + "DescribeOptionGroups":{ + "name":"DescribeOptionGroups", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeOptionGroupsMessage"}, + "output":{ + "shape":"OptionGroups", + "resultWrapper":"DescribeOptionGroupsResult" + }, + "errors":[ + {"shape":"OptionGroupNotFoundFault"} + ] + }, + "DescribeOrderableDBInstanceOptions":{ + "name":"DescribeOrderableDBInstanceOptions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeOrderableDBInstanceOptionsMessage"}, + "output":{ + "shape":"OrderableDBInstanceOptionsMessage", + "resultWrapper":"DescribeOrderableDBInstanceOptionsResult" + } + }, + "DescribeReservedDBInstances":{ + "name":"DescribeReservedDBInstances", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeReservedDBInstancesMessage"}, + "output":{ + "shape":"ReservedDBInstanceMessage", + "resultWrapper":"DescribeReservedDBInstancesResult" + }, + "errors":[ + {"shape":"ReservedDBInstanceNotFoundFault"} + ] + }, + "DescribeReservedDBInstancesOfferings":{ + "name":"DescribeReservedDBInstancesOfferings", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeReservedDBInstancesOfferingsMessage"}, + "output":{ + "shape":"ReservedDBInstancesOfferingMessage", + "resultWrapper":"DescribeReservedDBInstancesOfferingsResult" + }, + "errors":[ + {"shape":"ReservedDBInstancesOfferingNotFoundFault"} + ] + }, + "DownloadDBLogFilePortion":{ + "name":"DownloadDBLogFilePortion", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DownloadDBLogFilePortionMessage"}, + "output":{ + "shape":"DownloadDBLogFilePortionDetails", + "resultWrapper":"DownloadDBLogFilePortionResult" + }, + "errors":[ + {"shape":"DBInstanceNotFoundFault"}, + {"shape":"DBLogFileNotFoundFault"} + ] + }, + "ListTagsForResource":{ + "name":"ListTagsForResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListTagsForResourceMessage"}, + "output":{ + "shape":"TagListMessage", + "resultWrapper":"ListTagsForResourceResult" + }, + "errors":[ + {"shape":"DBInstanceNotFoundFault"}, + {"shape":"DBSnapshotNotFoundFault"} + ] + }, + "ModifyDBInstance":{ + "name":"ModifyDBInstance", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyDBInstanceMessage"}, + "output":{ + "shape":"ModifyDBInstanceResult", + "resultWrapper":"ModifyDBInstanceResult" + }, + "errors":[ + {"shape":"InvalidDBInstanceStateFault"}, + {"shape":"InvalidDBSecurityGroupStateFault"}, + {"shape":"DBInstanceAlreadyExistsFault"}, + {"shape":"DBInstanceNotFoundFault"}, + {"shape":"DBSecurityGroupNotFoundFault"}, + {"shape":"DBParameterGroupNotFoundFault"}, + {"shape":"InsufficientDBInstanceCapacityFault"}, + {"shape":"StorageQuotaExceededFault"}, + {"shape":"InvalidVPCNetworkStateFault"}, + {"shape":"ProvisionedIopsNotAvailableInAZFault"}, + {"shape":"OptionGroupNotFoundFault"}, + {"shape":"DBUpgradeDependencyFailureFault"} + ] + }, + "ModifyDBParameterGroup":{ + "name":"ModifyDBParameterGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyDBParameterGroupMessage"}, + "output":{ + "shape":"DBParameterGroupNameMessage", + "resultWrapper":"ModifyDBParameterGroupResult" + }, + "errors":[ + {"shape":"DBParameterGroupNotFoundFault"}, + {"shape":"InvalidDBParameterGroupStateFault"} + ] + }, + "ModifyDBSubnetGroup":{ + "name":"ModifyDBSubnetGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyDBSubnetGroupMessage"}, + "output":{ + "shape":"ModifyDBSubnetGroupResult", + "resultWrapper":"ModifyDBSubnetGroupResult" + }, + "errors":[ + {"shape":"DBSubnetGroupNotFoundFault"}, + {"shape":"DBSubnetQuotaExceededFault"}, + {"shape":"SubnetAlreadyInUse"}, + {"shape":"DBSubnetGroupDoesNotCoverEnoughAZs"}, + {"shape":"InvalidSubnet"} + ] + }, + "ModifyEventSubscription":{ + "name":"ModifyEventSubscription", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyEventSubscriptionMessage"}, + "output":{ + "shape":"ModifyEventSubscriptionResult", + "resultWrapper":"ModifyEventSubscriptionResult" + }, + "errors":[ + {"shape":"EventSubscriptionQuotaExceededFault"}, + {"shape":"SubscriptionNotFoundFault"}, + {"shape":"SNSInvalidTopicFault"}, + {"shape":"SNSNoAuthorizationFault"}, + {"shape":"SNSTopicArnNotFoundFault"}, + {"shape":"SubscriptionCategoryNotFoundFault"} + ] + }, + "ModifyOptionGroup":{ + "name":"ModifyOptionGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyOptionGroupMessage"}, + "output":{ + "shape":"ModifyOptionGroupResult", + "resultWrapper":"ModifyOptionGroupResult" + }, + "errors":[ + {"shape":"InvalidOptionGroupStateFault"}, + {"shape":"OptionGroupNotFoundFault"} + ] + }, + "PromoteReadReplica":{ + "name":"PromoteReadReplica", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PromoteReadReplicaMessage"}, + "output":{ + "shape":"PromoteReadReplicaResult", + "resultWrapper":"PromoteReadReplicaResult" + }, + "errors":[ + {"shape":"InvalidDBInstanceStateFault"}, + {"shape":"DBInstanceNotFoundFault"} + ] + }, + "PurchaseReservedDBInstancesOffering":{ + "name":"PurchaseReservedDBInstancesOffering", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PurchaseReservedDBInstancesOfferingMessage"}, + "output":{ + "shape":"PurchaseReservedDBInstancesOfferingResult", + "resultWrapper":"PurchaseReservedDBInstancesOfferingResult" + }, + "errors":[ + {"shape":"ReservedDBInstancesOfferingNotFoundFault"}, + {"shape":"ReservedDBInstanceAlreadyExistsFault"}, + {"shape":"ReservedDBInstanceQuotaExceededFault"} + ] + }, + "RebootDBInstance":{ + "name":"RebootDBInstance", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RebootDBInstanceMessage"}, + "output":{ + "shape":"RebootDBInstanceResult", + "resultWrapper":"RebootDBInstanceResult" + }, + "errors":[ + {"shape":"InvalidDBInstanceStateFault"}, + {"shape":"DBInstanceNotFoundFault"} + ] + }, + "RemoveSourceIdentifierFromSubscription":{ + "name":"RemoveSourceIdentifierFromSubscription", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RemoveSourceIdentifierFromSubscriptionMessage"}, + "output":{ + "shape":"RemoveSourceIdentifierFromSubscriptionResult", + "resultWrapper":"RemoveSourceIdentifierFromSubscriptionResult" + }, + "errors":[ + {"shape":"SubscriptionNotFoundFault"}, + {"shape":"SourceNotFoundFault"} + ] + }, + "RemoveTagsFromResource":{ + "name":"RemoveTagsFromResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RemoveTagsFromResourceMessage"}, + "errors":[ + {"shape":"DBInstanceNotFoundFault"}, + {"shape":"DBSnapshotNotFoundFault"} + ] + }, + "ResetDBParameterGroup":{ + "name":"ResetDBParameterGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ResetDBParameterGroupMessage"}, + "output":{ + "shape":"DBParameterGroupNameMessage", + "resultWrapper":"ResetDBParameterGroupResult" + }, + "errors":[ + {"shape":"InvalidDBParameterGroupStateFault"}, + {"shape":"DBParameterGroupNotFoundFault"} + ] + }, + "RestoreDBInstanceFromDBSnapshot":{ + "name":"RestoreDBInstanceFromDBSnapshot", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RestoreDBInstanceFromDBSnapshotMessage"}, + "output":{ + "shape":"RestoreDBInstanceFromDBSnapshotResult", + "resultWrapper":"RestoreDBInstanceFromDBSnapshotResult" + }, + "errors":[ + {"shape":"DBInstanceAlreadyExistsFault"}, + {"shape":"DBSnapshotNotFoundFault"}, + {"shape":"InstanceQuotaExceededFault"}, + {"shape":"InsufficientDBInstanceCapacityFault"}, + {"shape":"InvalidDBSnapshotStateFault"}, + {"shape":"StorageQuotaExceededFault"}, + {"shape":"InvalidVPCNetworkStateFault"}, + {"shape":"InvalidRestoreFault"}, + {"shape":"DBSubnetGroupNotFoundFault"}, + {"shape":"DBSubnetGroupDoesNotCoverEnoughAZs"}, + {"shape":"InvalidSubnet"}, + {"shape":"ProvisionedIopsNotAvailableInAZFault"}, + {"shape":"OptionGroupNotFoundFault"} + ] + }, + "RestoreDBInstanceToPointInTime":{ + "name":"RestoreDBInstanceToPointInTime", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RestoreDBInstanceToPointInTimeMessage"}, + "output":{ + "shape":"RestoreDBInstanceToPointInTimeResult", + "resultWrapper":"RestoreDBInstanceToPointInTimeResult" + }, + "errors":[ + {"shape":"DBInstanceAlreadyExistsFault"}, + {"shape":"DBInstanceNotFoundFault"}, + {"shape":"InstanceQuotaExceededFault"}, + {"shape":"InsufficientDBInstanceCapacityFault"}, + {"shape":"InvalidDBInstanceStateFault"}, + {"shape":"PointInTimeRestoreNotEnabledFault"}, + {"shape":"StorageQuotaExceededFault"}, + {"shape":"InvalidVPCNetworkStateFault"}, + {"shape":"InvalidRestoreFault"}, + {"shape":"DBSubnetGroupNotFoundFault"}, + {"shape":"DBSubnetGroupDoesNotCoverEnoughAZs"}, + {"shape":"InvalidSubnet"}, + {"shape":"ProvisionedIopsNotAvailableInAZFault"}, + {"shape":"OptionGroupNotFoundFault"} + ] + }, + "RevokeDBSecurityGroupIngress":{ + "name":"RevokeDBSecurityGroupIngress", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RevokeDBSecurityGroupIngressMessage"}, + "output":{ + "shape":"RevokeDBSecurityGroupIngressResult", + "resultWrapper":"RevokeDBSecurityGroupIngressResult" + }, + "errors":[ + {"shape":"DBSecurityGroupNotFoundFault"}, + {"shape":"AuthorizationNotFoundFault"}, + {"shape":"InvalidDBSecurityGroupStateFault"} + ] + } + }, + "shapes":{ + "AddSourceIdentifierToSubscriptionMessage":{ + "type":"structure", + "required":[ + "SubscriptionName", + "SourceIdentifier" + ], + "members":{ + "SubscriptionName":{"shape":"String"}, + "SourceIdentifier":{"shape":"String"} + } + }, + "AddSourceIdentifierToSubscriptionResult":{ + "type":"structure", + "members":{ + "EventSubscription":{"shape":"EventSubscription"} + } + }, + "AddTagsToResourceMessage":{ + "type":"structure", + "required":[ + "ResourceName", + "Tags" + ], + "members":{ + "ResourceName":{"shape":"String"}, + "Tags":{"shape":"TagList"} + } + }, + "ApplyMethod":{ + "type":"string", + "enum":[ + "immediate", + "pending-reboot" + ] + }, + "AuthorizationAlreadyExistsFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"AuthorizationAlreadyExists", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "AuthorizationNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"AuthorizationNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "AuthorizationQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"AuthorizationQuotaExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "AuthorizeDBSecurityGroupIngressMessage":{ + "type":"structure", + "required":["DBSecurityGroupName"], + "members":{ + "DBSecurityGroupName":{"shape":"String"}, + "CIDRIP":{"shape":"String"}, + "EC2SecurityGroupName":{"shape":"String"}, + "EC2SecurityGroupId":{"shape":"String"}, + "EC2SecurityGroupOwnerId":{"shape":"String"} + } + }, + "AuthorizeDBSecurityGroupIngressResult":{ + "type":"structure", + "members":{ + "DBSecurityGroup":{"shape":"DBSecurityGroup"} + } + }, + "AvailabilityZone":{ + "type":"structure", + "members":{ + "Name":{"shape":"String"}, + "ProvisionedIopsCapable":{"shape":"Boolean"} + }, + "wrapper":true + }, + "AvailabilityZoneList":{ + "type":"list", + "member":{ + "shape":"AvailabilityZone", + "locationName":"AvailabilityZone" + } + }, + "Boolean":{"type":"boolean"}, + "BooleanOptional":{"type":"boolean"}, + "CharacterSet":{ + "type":"structure", + "members":{ + "CharacterSetName":{"shape":"String"}, + "CharacterSetDescription":{"shape":"String"} + } + }, + "CopyDBSnapshotMessage":{ + "type":"structure", + "required":[ + "SourceDBSnapshotIdentifier", + "TargetDBSnapshotIdentifier" + ], + "members":{ + "SourceDBSnapshotIdentifier":{"shape":"String"}, + "TargetDBSnapshotIdentifier":{"shape":"String"} + } + }, + "CopyDBSnapshotResult":{ + "type":"structure", + "members":{ + "DBSnapshot":{"shape":"DBSnapshot"} + } + }, + "CreateDBInstanceMessage":{ + "type":"structure", + "required":[ + "DBInstanceIdentifier", + "AllocatedStorage", + "DBInstanceClass", + "Engine", + "MasterUsername", + "MasterUserPassword" + ], + "members":{ + "DBName":{"shape":"String"}, + "DBInstanceIdentifier":{"shape":"String"}, + "AllocatedStorage":{"shape":"IntegerOptional"}, + "DBInstanceClass":{"shape":"String"}, + "Engine":{"shape":"String"}, + "MasterUsername":{"shape":"String"}, + "MasterUserPassword":{"shape":"String"}, + "DBSecurityGroups":{"shape":"DBSecurityGroupNameList"}, + "VpcSecurityGroupIds":{"shape":"VpcSecurityGroupIdList"}, + "AvailabilityZone":{"shape":"String"}, + "DBSubnetGroupName":{"shape":"String"}, + "PreferredMaintenanceWindow":{"shape":"String"}, + "DBParameterGroupName":{"shape":"String"}, + "BackupRetentionPeriod":{"shape":"IntegerOptional"}, + "PreferredBackupWindow":{"shape":"String"}, + "Port":{"shape":"IntegerOptional"}, + "MultiAZ":{"shape":"BooleanOptional"}, + "EngineVersion":{"shape":"String"}, + "AutoMinorVersionUpgrade":{"shape":"BooleanOptional"}, + "LicenseModel":{"shape":"String"}, + "Iops":{"shape":"IntegerOptional"}, + "OptionGroupName":{"shape":"String"}, + "CharacterSetName":{"shape":"String"}, + "PubliclyAccessible":{"shape":"BooleanOptional"} + } + }, + "CreateDBInstanceReadReplicaMessage":{ + "type":"structure", + "required":[ + "DBInstanceIdentifier", + "SourceDBInstanceIdentifier" + ], + "members":{ + "DBInstanceIdentifier":{"shape":"String"}, + "SourceDBInstanceIdentifier":{"shape":"String"}, + "DBInstanceClass":{"shape":"String"}, + "AvailabilityZone":{"shape":"String"}, + "Port":{"shape":"IntegerOptional"}, + "AutoMinorVersionUpgrade":{"shape":"BooleanOptional"}, + "Iops":{"shape":"IntegerOptional"}, + "OptionGroupName":{"shape":"String"}, + "PubliclyAccessible":{"shape":"BooleanOptional"} + } + }, + "CreateDBInstanceReadReplicaResult":{ + "type":"structure", + "members":{ + "DBInstance":{"shape":"DBInstance"} + } + }, + "CreateDBInstanceResult":{ + "type":"structure", + "members":{ + "DBInstance":{"shape":"DBInstance"} + } + }, + "CreateDBParameterGroupMessage":{ + "type":"structure", + "required":[ + "DBParameterGroupName", + "DBParameterGroupFamily", + "Description" + ], + "members":{ + "DBParameterGroupName":{"shape":"String"}, + "DBParameterGroupFamily":{"shape":"String"}, + "Description":{"shape":"String"} + } + }, + "CreateDBParameterGroupResult":{ + "type":"structure", + "members":{ + "DBParameterGroup":{"shape":"DBParameterGroup"} + } + }, + "CreateDBSecurityGroupMessage":{ + "type":"structure", + "required":[ + "DBSecurityGroupName", + "DBSecurityGroupDescription" + ], + "members":{ + "DBSecurityGroupName":{"shape":"String"}, + "DBSecurityGroupDescription":{"shape":"String"} + } + }, + "CreateDBSecurityGroupResult":{ + "type":"structure", + "members":{ + "DBSecurityGroup":{"shape":"DBSecurityGroup"} + } + }, + "CreateDBSnapshotMessage":{ + "type":"structure", + "required":[ + "DBSnapshotIdentifier", + "DBInstanceIdentifier" + ], + "members":{ + "DBSnapshotIdentifier":{"shape":"String"}, + "DBInstanceIdentifier":{"shape":"String"} + } + }, + "CreateDBSnapshotResult":{ + "type":"structure", + "members":{ + "DBSnapshot":{"shape":"DBSnapshot"} + } + }, + "CreateDBSubnetGroupMessage":{ + "type":"structure", + "required":[ + "DBSubnetGroupName", + "DBSubnetGroupDescription", + "SubnetIds" + ], + "members":{ + "DBSubnetGroupName":{"shape":"String"}, + "DBSubnetGroupDescription":{"shape":"String"}, + "SubnetIds":{"shape":"SubnetIdentifierList"} + } + }, + "CreateDBSubnetGroupResult":{ + "type":"structure", + "members":{ + "DBSubnetGroup":{"shape":"DBSubnetGroup"} + } + }, + "CreateEventSubscriptionMessage":{ + "type":"structure", + "required":[ + "SubscriptionName", + "SnsTopicArn" + ], + "members":{ + "SubscriptionName":{"shape":"String"}, + "SnsTopicArn":{"shape":"String"}, + "SourceType":{"shape":"String"}, + "EventCategories":{"shape":"EventCategoriesList"}, + "SourceIds":{"shape":"SourceIdsList"}, + "Enabled":{"shape":"BooleanOptional"} + } + }, + "CreateEventSubscriptionResult":{ + "type":"structure", + "members":{ + "EventSubscription":{"shape":"EventSubscription"} + } + }, + "CreateOptionGroupMessage":{ + "type":"structure", + "required":[ + "OptionGroupName", + "EngineName", + "MajorEngineVersion", + "OptionGroupDescription" + ], + "members":{ + "OptionGroupName":{"shape":"String"}, + "EngineName":{"shape":"String"}, + "MajorEngineVersion":{"shape":"String"}, + "OptionGroupDescription":{"shape":"String"} + } + }, + "CreateOptionGroupResult":{ + "type":"structure", + "members":{ + "OptionGroup":{"shape":"OptionGroup"} + } + }, + "DBEngineVersion":{ + "type":"structure", + "members":{ + "Engine":{"shape":"String"}, + "EngineVersion":{"shape":"String"}, + "DBParameterGroupFamily":{"shape":"String"}, + "DBEngineDescription":{"shape":"String"}, + "DBEngineVersionDescription":{"shape":"String"}, + "DefaultCharacterSet":{"shape":"CharacterSet"}, + "SupportedCharacterSets":{"shape":"SupportedCharacterSetsList"} + } + }, + "DBEngineVersionList":{ + "type":"list", + "member":{ + "shape":"DBEngineVersion", + "locationName":"DBEngineVersion" + } + }, + "DBEngineVersionMessage":{ + "type":"structure", + "members":{ + "Marker":{"shape":"String"}, + "DBEngineVersions":{"shape":"DBEngineVersionList"} + } + }, + "DBInstance":{ + "type":"structure", + "members":{ + "DBInstanceIdentifier":{"shape":"String"}, + "DBInstanceClass":{"shape":"String"}, + "Engine":{"shape":"String"}, + "DBInstanceStatus":{"shape":"String"}, + "MasterUsername":{"shape":"String"}, + "DBName":{"shape":"String"}, + "Endpoint":{"shape":"Endpoint"}, + "AllocatedStorage":{"shape":"Integer"}, + "InstanceCreateTime":{"shape":"TStamp"}, + "PreferredBackupWindow":{"shape":"String"}, + "BackupRetentionPeriod":{"shape":"Integer"}, + "DBSecurityGroups":{"shape":"DBSecurityGroupMembershipList"}, + "VpcSecurityGroups":{"shape":"VpcSecurityGroupMembershipList"}, + "DBParameterGroups":{"shape":"DBParameterGroupStatusList"}, + "AvailabilityZone":{"shape":"String"}, + "DBSubnetGroup":{"shape":"DBSubnetGroup"}, + "PreferredMaintenanceWindow":{"shape":"String"}, + "PendingModifiedValues":{"shape":"PendingModifiedValues"}, + "LatestRestorableTime":{"shape":"TStamp"}, + "MultiAZ":{"shape":"Boolean"}, + "EngineVersion":{"shape":"String"}, + "AutoMinorVersionUpgrade":{"shape":"Boolean"}, + "ReadReplicaSourceDBInstanceIdentifier":{"shape":"String"}, + "ReadReplicaDBInstanceIdentifiers":{"shape":"ReadReplicaDBInstanceIdentifierList"}, + "LicenseModel":{"shape":"String"}, + "Iops":{"shape":"IntegerOptional"}, + "OptionGroupMemberships":{"shape":"OptionGroupMembershipList"}, + "CharacterSetName":{"shape":"String"}, + "SecondaryAvailabilityZone":{"shape":"String"}, + "PubliclyAccessible":{"shape":"Boolean"} + }, + "wrapper":true + }, + "DBInstanceAlreadyExistsFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBInstanceAlreadyExists", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "DBInstanceList":{ + "type":"list", + "member":{ + "shape":"DBInstance", + "locationName":"DBInstance" + } + }, + "DBInstanceMessage":{ + "type":"structure", + "members":{ + "Marker":{"shape":"String"}, + "DBInstances":{"shape":"DBInstanceList"} + } + }, + "DBInstanceNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBInstanceNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "DBLogFileNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBLogFileNotFoundFault", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "DBParameterGroup":{ + "type":"structure", + "members":{ + "DBParameterGroupName":{"shape":"String"}, + "DBParameterGroupFamily":{"shape":"String"}, + "Description":{"shape":"String"} + }, + "wrapper":true + }, + "DBParameterGroupAlreadyExistsFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBParameterGroupAlreadyExists", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "DBParameterGroupDetails":{ + "type":"structure", + "members":{ + "Parameters":{"shape":"ParametersList"}, + "Marker":{"shape":"String"} + } + }, + "DBParameterGroupList":{ + "type":"list", + "member":{ + "shape":"DBParameterGroup", + "locationName":"DBParameterGroup" + } + }, + "DBParameterGroupNameMessage":{ + "type":"structure", + "members":{ + "DBParameterGroupName":{"shape":"String"} + } + }, + "DBParameterGroupNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBParameterGroupNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "DBParameterGroupQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBParameterGroupQuotaExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "DBParameterGroupStatus":{ + "type":"structure", + "members":{ + "DBParameterGroupName":{"shape":"String"}, + "ParameterApplyStatus":{"shape":"String"} + } + }, + "DBParameterGroupStatusList":{ + "type":"list", + "member":{ + "shape":"DBParameterGroupStatus", + "locationName":"DBParameterGroup" + } + }, + "DBParameterGroupsMessage":{ + "type":"structure", + "members":{ + "Marker":{"shape":"String"}, + "DBParameterGroups":{"shape":"DBParameterGroupList"} + } + }, + "DBSecurityGroup":{ + "type":"structure", + "members":{ + "OwnerId":{"shape":"String"}, + "DBSecurityGroupName":{"shape":"String"}, + "DBSecurityGroupDescription":{"shape":"String"}, + "VpcId":{"shape":"String"}, + "EC2SecurityGroups":{"shape":"EC2SecurityGroupList"}, + "IPRanges":{"shape":"IPRangeList"} + }, + "wrapper":true + }, + "DBSecurityGroupAlreadyExistsFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBSecurityGroupAlreadyExists", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "DBSecurityGroupMembership":{ + "type":"structure", + "members":{ + "DBSecurityGroupName":{"shape":"String"}, + "Status":{"shape":"String"} + } + }, + "DBSecurityGroupMembershipList":{ + "type":"list", + "member":{ + "shape":"DBSecurityGroupMembership", + "locationName":"DBSecurityGroup" + } + }, + "DBSecurityGroupMessage":{ + "type":"structure", + "members":{ + "Marker":{"shape":"String"}, + "DBSecurityGroups":{"shape":"DBSecurityGroups"} + } + }, + "DBSecurityGroupNameList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"DBSecurityGroupName" + } + }, + "DBSecurityGroupNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBSecurityGroupNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "DBSecurityGroupNotSupportedFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBSecurityGroupNotSupported", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "DBSecurityGroupQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"QuotaExceeded.DBSecurityGroup", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "DBSecurityGroups":{ + "type":"list", + "member":{ + "shape":"DBSecurityGroup", + "locationName":"DBSecurityGroup" + } + }, + "DBSnapshot":{ + "type":"structure", + "members":{ + "DBSnapshotIdentifier":{"shape":"String"}, + "DBInstanceIdentifier":{"shape":"String"}, + "SnapshotCreateTime":{"shape":"TStamp"}, + "Engine":{"shape":"String"}, + "AllocatedStorage":{"shape":"Integer"}, + "Status":{"shape":"String"}, + "Port":{"shape":"Integer"}, + "AvailabilityZone":{"shape":"String"}, + "VpcId":{"shape":"String"}, + "InstanceCreateTime":{"shape":"TStamp"}, + "MasterUsername":{"shape":"String"}, + "EngineVersion":{"shape":"String"}, + "LicenseModel":{"shape":"String"}, + "SnapshotType":{"shape":"String"}, + "Iops":{"shape":"IntegerOptional"}, + "OptionGroupName":{"shape":"String"} + }, + "wrapper":true + }, + "DBSnapshotAlreadyExistsFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBSnapshotAlreadyExists", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "DBSnapshotList":{ + "type":"list", + "member":{ + "shape":"DBSnapshot", + "locationName":"DBSnapshot" + } + }, + "DBSnapshotMessage":{ + "type":"structure", + "members":{ + "Marker":{"shape":"String"}, + "DBSnapshots":{"shape":"DBSnapshotList"} + } + }, + "DBSnapshotNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBSnapshotNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "DBSubnetGroup":{ + "type":"structure", + "members":{ + "DBSubnetGroupName":{"shape":"String"}, + "DBSubnetGroupDescription":{"shape":"String"}, + "VpcId":{"shape":"String"}, + "SubnetGroupStatus":{"shape":"String"}, + "Subnets":{"shape":"SubnetList"} + }, + "wrapper":true + }, + "DBSubnetGroupAlreadyExistsFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBSubnetGroupAlreadyExists", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "DBSubnetGroupDoesNotCoverEnoughAZs":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBSubnetGroupDoesNotCoverEnoughAZs", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "DBSubnetGroupMessage":{ + "type":"structure", + "members":{ + "Marker":{"shape":"String"}, + "DBSubnetGroups":{"shape":"DBSubnetGroups"} + } + }, + "DBSubnetGroupNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBSubnetGroupNotFoundFault", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "DBSubnetGroupQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBSubnetGroupQuotaExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "DBSubnetGroups":{ + "type":"list", + "member":{ + "shape":"DBSubnetGroup", + "locationName":"DBSubnetGroup" + } + }, + "DBSubnetQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBSubnetQuotaExceededFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "DBUpgradeDependencyFailureFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBUpgradeDependencyFailure", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "DeleteDBInstanceMessage":{ + "type":"structure", + "required":["DBInstanceIdentifier"], + "members":{ + "DBInstanceIdentifier":{"shape":"String"}, + "SkipFinalSnapshot":{"shape":"Boolean"}, + "FinalDBSnapshotIdentifier":{"shape":"String"} + } + }, + "DeleteDBInstanceResult":{ + "type":"structure", + "members":{ + "DBInstance":{"shape":"DBInstance"} + } + }, + "DeleteDBParameterGroupMessage":{ + "type":"structure", + "required":["DBParameterGroupName"], + "members":{ + "DBParameterGroupName":{"shape":"String"} + } + }, + "DeleteDBSecurityGroupMessage":{ + "type":"structure", + "required":["DBSecurityGroupName"], + "members":{ + "DBSecurityGroupName":{"shape":"String"} + } + }, + "DeleteDBSnapshotMessage":{ + "type":"structure", + "required":["DBSnapshotIdentifier"], + "members":{ + "DBSnapshotIdentifier":{"shape":"String"} + } + }, + "DeleteDBSnapshotResult":{ + "type":"structure", + "members":{ + "DBSnapshot":{"shape":"DBSnapshot"} + } + }, + "DeleteDBSubnetGroupMessage":{ + "type":"structure", + "required":["DBSubnetGroupName"], + "members":{ + "DBSubnetGroupName":{"shape":"String"} + } + }, + "DeleteEventSubscriptionMessage":{ + "type":"structure", + "required":["SubscriptionName"], + "members":{ + "SubscriptionName":{"shape":"String"} + } + }, + "DeleteEventSubscriptionResult":{ + "type":"structure", + "members":{ + "EventSubscription":{"shape":"EventSubscription"} + } + }, + "DeleteOptionGroupMessage":{ + "type":"structure", + "required":["OptionGroupName"], + "members":{ + "OptionGroupName":{"shape":"String"} + } + }, + "DescribeDBEngineVersionsMessage":{ + "type":"structure", + "members":{ + "Engine":{"shape":"String"}, + "EngineVersion":{"shape":"String"}, + "DBParameterGroupFamily":{"shape":"String"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"}, + "DefaultOnly":{"shape":"Boolean"}, + "ListSupportedCharacterSets":{"shape":"BooleanOptional"} + } + }, + "DescribeDBInstancesMessage":{ + "type":"structure", + "members":{ + "DBInstanceIdentifier":{"shape":"String"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeDBLogFilesDetails":{ + "type":"structure", + "members":{ + "LogFileName":{"shape":"String"}, + "LastWritten":{"shape":"Long"}, + "Size":{"shape":"Long"} + } + }, + "DescribeDBLogFilesList":{ + "type":"list", + "member":{ + "shape":"DescribeDBLogFilesDetails", + "locationName":"DescribeDBLogFilesDetails" + } + }, + "DescribeDBLogFilesMessage":{ + "type":"structure", + "required":["DBInstanceIdentifier"], + "members":{ + "DBInstanceIdentifier":{"shape":"String"}, + "FilenameContains":{"shape":"String"}, + "FileLastWritten":{"shape":"Long"}, + "FileSize":{"shape":"Long"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeDBLogFilesResponse":{ + "type":"structure", + "members":{ + "DescribeDBLogFiles":{"shape":"DescribeDBLogFilesList"}, + "Marker":{"shape":"String"} + } + }, + "DescribeDBParameterGroupsMessage":{ + "type":"structure", + "members":{ + "DBParameterGroupName":{"shape":"String"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeDBParametersMessage":{ + "type":"structure", + "required":["DBParameterGroupName"], + "members":{ + "DBParameterGroupName":{"shape":"String"}, + "Source":{"shape":"String"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeDBSecurityGroupsMessage":{ + "type":"structure", + "members":{ + "DBSecurityGroupName":{"shape":"String"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeDBSnapshotsMessage":{ + "type":"structure", + "members":{ + "DBInstanceIdentifier":{"shape":"String"}, + "DBSnapshotIdentifier":{"shape":"String"}, + "SnapshotType":{"shape":"String"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeDBSubnetGroupsMessage":{ + "type":"structure", + "members":{ + "DBSubnetGroupName":{"shape":"String"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeEngineDefaultParametersMessage":{ + "type":"structure", + "required":["DBParameterGroupFamily"], + "members":{ + "DBParameterGroupFamily":{"shape":"String"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeEngineDefaultParametersResult":{ + "type":"structure", + "members":{ + "EngineDefaults":{"shape":"EngineDefaults"} + } + }, + "DescribeEventCategoriesMessage":{ + "type":"structure", + "members":{ + "SourceType":{"shape":"String"} + } + }, + "DescribeEventSubscriptionsMessage":{ + "type":"structure", + "members":{ + "SubscriptionName":{"shape":"String"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeEventsMessage":{ + "type":"structure", + "members":{ + "SourceIdentifier":{"shape":"String"}, + "SourceType":{"shape":"SourceType"}, + "StartTime":{"shape":"TStamp"}, + "EndTime":{"shape":"TStamp"}, + "Duration":{"shape":"IntegerOptional"}, + "EventCategories":{"shape":"EventCategoriesList"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeOptionGroupOptionsMessage":{ + "type":"structure", + "required":["EngineName"], + "members":{ + "EngineName":{"shape":"String"}, + "MajorEngineVersion":{"shape":"String"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeOptionGroupsMessage":{ + "type":"structure", + "members":{ + "OptionGroupName":{"shape":"String"}, + "Marker":{"shape":"String"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "EngineName":{"shape":"String"}, + "MajorEngineVersion":{"shape":"String"} + } + }, + "DescribeOrderableDBInstanceOptionsMessage":{ + "type":"structure", + "required":["Engine"], + "members":{ + "Engine":{"shape":"String"}, + "EngineVersion":{"shape":"String"}, + "DBInstanceClass":{"shape":"String"}, + "LicenseModel":{"shape":"String"}, + "Vpc":{"shape":"BooleanOptional"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeReservedDBInstancesMessage":{ + "type":"structure", + "members":{ + "ReservedDBInstanceId":{"shape":"String"}, + "ReservedDBInstancesOfferingId":{"shape":"String"}, + "DBInstanceClass":{"shape":"String"}, + "Duration":{"shape":"String"}, + "ProductDescription":{"shape":"String"}, + "OfferingType":{"shape":"String"}, + "MultiAZ":{"shape":"BooleanOptional"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeReservedDBInstancesOfferingsMessage":{ + "type":"structure", + "members":{ + "ReservedDBInstancesOfferingId":{"shape":"String"}, + "DBInstanceClass":{"shape":"String"}, + "Duration":{"shape":"String"}, + "ProductDescription":{"shape":"String"}, + "OfferingType":{"shape":"String"}, + "MultiAZ":{"shape":"BooleanOptional"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "Double":{"type":"double"}, + "DownloadDBLogFilePortionDetails":{ + "type":"structure", + "members":{ + "LogFileData":{"shape":"String"}, + "Marker":{"shape":"String"}, + "AdditionalDataPending":{"shape":"Boolean"} + } + }, + "DownloadDBLogFilePortionMessage":{ + "type":"structure", + "required":[ + "DBInstanceIdentifier", + "LogFileName" + ], + "members":{ + "DBInstanceIdentifier":{"shape":"String"}, + "LogFileName":{"shape":"String"}, + "Marker":{"shape":"String"}, + "NumberOfLines":{"shape":"Integer"} + } + }, + "EC2SecurityGroup":{ + "type":"structure", + "members":{ + "Status":{"shape":"String"}, + "EC2SecurityGroupName":{"shape":"String"}, + "EC2SecurityGroupId":{"shape":"String"}, + "EC2SecurityGroupOwnerId":{"shape":"String"} + } + }, + "EC2SecurityGroupList":{ + "type":"list", + "member":{ + "shape":"EC2SecurityGroup", + "locationName":"EC2SecurityGroup" + } + }, + "Endpoint":{ + "type":"structure", + "members":{ + "Address":{"shape":"String"}, + "Port":{"shape":"Integer"} + } + }, + "EngineDefaults":{ + "type":"structure", + "members":{ + "DBParameterGroupFamily":{"shape":"String"}, + "Marker":{"shape":"String"}, + "Parameters":{"shape":"ParametersList"} + }, + "wrapper":true + }, + "Event":{ + "type":"structure", + "members":{ + "SourceIdentifier":{"shape":"String"}, + "SourceType":{"shape":"SourceType"}, + "Message":{"shape":"String"}, + "EventCategories":{"shape":"EventCategoriesList"}, + "Date":{"shape":"TStamp"} + } + }, + "EventCategoriesList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"EventCategory" + } + }, + "EventCategoriesMap":{ + "type":"structure", + "members":{ + "SourceType":{"shape":"String"}, + "EventCategories":{"shape":"EventCategoriesList"} + }, + "wrapper":true + }, + "EventCategoriesMapList":{ + "type":"list", + "member":{ + "shape":"EventCategoriesMap", + "locationName":"EventCategoriesMap" + } + }, + "EventCategoriesMessage":{ + "type":"structure", + "members":{ + "EventCategoriesMapList":{"shape":"EventCategoriesMapList"} + } + }, + "EventList":{ + "type":"list", + "member":{ + "shape":"Event", + "locationName":"Event" + } + }, + "EventSubscription":{ + "type":"structure", + "members":{ + "CustomerAwsId":{"shape":"String"}, + "CustSubscriptionId":{"shape":"String"}, + "SnsTopicArn":{"shape":"String"}, + "Status":{"shape":"String"}, + "SubscriptionCreationTime":{"shape":"String"}, + "SourceType":{"shape":"String"}, + "SourceIdsList":{"shape":"SourceIdsList"}, + "EventCategoriesList":{"shape":"EventCategoriesList"}, + "Enabled":{"shape":"Boolean"} + }, + "wrapper":true + }, + "EventSubscriptionQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"EventSubscriptionQuotaExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "EventSubscriptionsList":{ + "type":"list", + "member":{ + "shape":"EventSubscription", + "locationName":"EventSubscription" + } + }, + "EventSubscriptionsMessage":{ + "type":"structure", + "members":{ + "Marker":{"shape":"String"}, + "EventSubscriptionsList":{"shape":"EventSubscriptionsList"} + } + }, + "EventsMessage":{ + "type":"structure", + "members":{ + "Marker":{"shape":"String"}, + "Events":{"shape":"EventList"} + } + }, + "IPRange":{ + "type":"structure", + "members":{ + "Status":{"shape":"String"}, + "CIDRIP":{"shape":"String"} + } + }, + "IPRangeList":{ + "type":"list", + "member":{ + "shape":"IPRange", + "locationName":"IPRange" + } + }, + "InstanceQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InstanceQuotaExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InsufficientDBInstanceCapacityFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InsufficientDBInstanceCapacity", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "Integer":{"type":"integer"}, + "IntegerOptional":{"type":"integer"}, + "InvalidDBInstanceStateFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidDBInstanceState", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidDBParameterGroupStateFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidDBParameterGroupState", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidDBSecurityGroupStateFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidDBSecurityGroupState", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidDBSnapshotStateFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidDBSnapshotState", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidDBSubnetGroupStateFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidDBSubnetGroupStateFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidDBSubnetStateFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidDBSubnetStateFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidEventSubscriptionStateFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidEventSubscriptionState", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidOptionGroupStateFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidOptionGroupStateFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidRestoreFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidRestoreFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidSubnet":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidSubnet", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidVPCNetworkStateFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidVPCNetworkStateFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "KeyList":{ + "type":"list", + "member":{"shape":"String"} + }, + "ListTagsForResourceMessage":{ + "type":"structure", + "required":["ResourceName"], + "members":{ + "ResourceName":{"shape":"String"} + } + }, + "Long":{"type":"long"}, + "ModifyDBInstanceMessage":{ + "type":"structure", + "required":["DBInstanceIdentifier"], + "members":{ + "DBInstanceIdentifier":{"shape":"String"}, + "AllocatedStorage":{"shape":"IntegerOptional"}, + "DBInstanceClass":{"shape":"String"}, + "DBSecurityGroups":{"shape":"DBSecurityGroupNameList"}, + "VpcSecurityGroupIds":{"shape":"VpcSecurityGroupIdList"}, + "ApplyImmediately":{"shape":"Boolean"}, + "MasterUserPassword":{"shape":"String"}, + "DBParameterGroupName":{"shape":"String"}, + "BackupRetentionPeriod":{"shape":"IntegerOptional"}, + "PreferredBackupWindow":{"shape":"String"}, + "PreferredMaintenanceWindow":{"shape":"String"}, + "MultiAZ":{"shape":"BooleanOptional"}, + "EngineVersion":{"shape":"String"}, + "AllowMajorVersionUpgrade":{"shape":"Boolean"}, + "AutoMinorVersionUpgrade":{"shape":"BooleanOptional"}, + "Iops":{"shape":"IntegerOptional"}, + "OptionGroupName":{"shape":"String"}, + "NewDBInstanceIdentifier":{"shape":"String"} + } + }, + "ModifyDBInstanceResult":{ + "type":"structure", + "members":{ + "DBInstance":{"shape":"DBInstance"} + } + }, + "ModifyDBParameterGroupMessage":{ + "type":"structure", + "required":[ + "DBParameterGroupName", + "Parameters" + ], + "members":{ + "DBParameterGroupName":{"shape":"String"}, + "Parameters":{"shape":"ParametersList"} + } + }, + "ModifyDBSubnetGroupMessage":{ + "type":"structure", + "required":[ + "DBSubnetGroupName", + "SubnetIds" + ], + "members":{ + "DBSubnetGroupName":{"shape":"String"}, + "DBSubnetGroupDescription":{"shape":"String"}, + "SubnetIds":{"shape":"SubnetIdentifierList"} + } + }, + "ModifyDBSubnetGroupResult":{ + "type":"structure", + "members":{ + "DBSubnetGroup":{"shape":"DBSubnetGroup"} + } + }, + "ModifyEventSubscriptionMessage":{ + "type":"structure", + "required":["SubscriptionName"], + "members":{ + "SubscriptionName":{"shape":"String"}, + "SnsTopicArn":{"shape":"String"}, + "SourceType":{"shape":"String"}, + "EventCategories":{"shape":"EventCategoriesList"}, + "Enabled":{"shape":"BooleanOptional"} + } + }, + "ModifyEventSubscriptionResult":{ + "type":"structure", + "members":{ + "EventSubscription":{"shape":"EventSubscription"} + } + }, + "ModifyOptionGroupMessage":{ + "type":"structure", + "required":["OptionGroupName"], + "members":{ + "OptionGroupName":{"shape":"String"}, + "OptionsToInclude":{"shape":"OptionConfigurationList"}, + "OptionsToRemove":{"shape":"OptionNamesList"}, + "ApplyImmediately":{"shape":"Boolean"} + } + }, + "ModifyOptionGroupResult":{ + "type":"structure", + "members":{ + "OptionGroup":{"shape":"OptionGroup"} + } + }, + "Option":{ + "type":"structure", + "members":{ + "OptionName":{"shape":"String"}, + "OptionDescription":{"shape":"String"}, + "Persistent":{"shape":"Boolean"}, + "Port":{"shape":"IntegerOptional"}, + "OptionSettings":{"shape":"OptionSettingConfigurationList"}, + "DBSecurityGroupMemberships":{"shape":"DBSecurityGroupMembershipList"}, + "VpcSecurityGroupMemberships":{"shape":"VpcSecurityGroupMembershipList"} + } + }, + "OptionConfiguration":{ + "type":"structure", + "required":["OptionName"], + "members":{ + "OptionName":{"shape":"String"}, + "Port":{"shape":"IntegerOptional"}, + "DBSecurityGroupMemberships":{"shape":"DBSecurityGroupNameList"}, + "VpcSecurityGroupMemberships":{"shape":"VpcSecurityGroupIdList"}, + "OptionSettings":{"shape":"OptionSettingsList"} + } + }, + "OptionConfigurationList":{ + "type":"list", + "member":{ + "shape":"OptionConfiguration", + "locationName":"OptionConfiguration" + } + }, + "OptionGroup":{ + "type":"structure", + "members":{ + "OptionGroupName":{"shape":"String"}, + "OptionGroupDescription":{"shape":"String"}, + "EngineName":{"shape":"String"}, + "MajorEngineVersion":{"shape":"String"}, + "Options":{"shape":"OptionsList"}, + "AllowsVpcAndNonVpcInstanceMemberships":{"shape":"Boolean"}, + "VpcId":{"shape":"String"} + }, + "wrapper":true + }, + "OptionGroupAlreadyExistsFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"OptionGroupAlreadyExistsFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "OptionGroupMembership":{ + "type":"structure", + "members":{ + "OptionGroupName":{"shape":"String"}, + "Status":{"shape":"String"} + } + }, + "OptionGroupMembershipList":{ + "type":"list", + "member":{ + "shape":"OptionGroupMembership", + "locationName":"OptionGroupMembership" + } + }, + "OptionGroupNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"OptionGroupNotFoundFault", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "OptionGroupOption":{ + "type":"structure", + "members":{ + "Name":{"shape":"String"}, + "Description":{"shape":"String"}, + "EngineName":{"shape":"String"}, + "MajorEngineVersion":{"shape":"String"}, + "MinimumRequiredMinorEngineVersion":{"shape":"String"}, + "PortRequired":{"shape":"Boolean"}, + "DefaultPort":{"shape":"IntegerOptional"}, + "OptionsDependedOn":{"shape":"OptionsDependedOn"}, + "Persistent":{"shape":"Boolean"}, + "OptionGroupOptionSettings":{"shape":"OptionGroupOptionSettingsList"} + } + }, + "OptionGroupOptionSetting":{ + "type":"structure", + "members":{ + "SettingName":{"shape":"String"}, + "SettingDescription":{"shape":"String"}, + "DefaultValue":{"shape":"String"}, + "ApplyType":{"shape":"String"}, + "AllowedValues":{"shape":"String"}, + "IsModifiable":{"shape":"Boolean"} + } + }, + "OptionGroupOptionSettingsList":{ + "type":"list", + "member":{ + "shape":"OptionGroupOptionSetting", + "locationName":"OptionGroupOptionSetting" + } + }, + "OptionGroupOptionsList":{ + "type":"list", + "member":{ + "shape":"OptionGroupOption", + "locationName":"OptionGroupOption" + } + }, + "OptionGroupOptionsMessage":{ + "type":"structure", + "members":{ + "OptionGroupOptions":{"shape":"OptionGroupOptionsList"}, + "Marker":{"shape":"String"} + } + }, + "OptionGroupQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"OptionGroupQuotaExceededFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "OptionGroups":{ + "type":"structure", + "members":{ + "OptionGroupsList":{"shape":"OptionGroupsList"}, + "Marker":{"shape":"String"} + } + }, + "OptionGroupsList":{ + "type":"list", + "member":{ + "shape":"OptionGroup", + "locationName":"OptionGroup" + } + }, + "OptionNamesList":{ + "type":"list", + "member":{"shape":"String"} + }, + "OptionSetting":{ + "type":"structure", + "members":{ + "Name":{"shape":"String"}, + "Value":{"shape":"String"}, + "DefaultValue":{"shape":"String"}, + "Description":{"shape":"String"}, + "ApplyType":{"shape":"String"}, + "DataType":{"shape":"String"}, + "AllowedValues":{"shape":"String"}, + "IsModifiable":{"shape":"Boolean"}, + "IsCollection":{"shape":"Boolean"} + } + }, + "OptionSettingConfigurationList":{ + "type":"list", + "member":{ + "shape":"OptionSetting", + "locationName":"OptionSetting" + } + }, + "OptionSettingsList":{ + "type":"list", + "member":{ + "shape":"OptionSetting", + "locationName":"OptionSetting" + } + }, + "OptionsDependedOn":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"OptionName" + } + }, + "OptionsList":{ + "type":"list", + "member":{ + "shape":"Option", + "locationName":"Option" + } + }, + "OrderableDBInstanceOption":{ + "type":"structure", + "members":{ + "Engine":{"shape":"String"}, + "EngineVersion":{"shape":"String"}, + "DBInstanceClass":{"shape":"String"}, + "LicenseModel":{"shape":"String"}, + "AvailabilityZones":{"shape":"AvailabilityZoneList"}, + "MultiAZCapable":{"shape":"Boolean"}, + "ReadReplicaCapable":{"shape":"Boolean"}, + "Vpc":{"shape":"Boolean"} + }, + "wrapper":true + }, + "OrderableDBInstanceOptionsList":{ + "type":"list", + "member":{ + "shape":"OrderableDBInstanceOption", + "locationName":"OrderableDBInstanceOption" + } + }, + "OrderableDBInstanceOptionsMessage":{ + "type":"structure", + "members":{ + "OrderableDBInstanceOptions":{"shape":"OrderableDBInstanceOptionsList"}, + "Marker":{"shape":"String"} + } + }, + "Parameter":{ + "type":"structure", + "members":{ + "ParameterName":{"shape":"String"}, + "ParameterValue":{"shape":"String"}, + "Description":{"shape":"String"}, + "Source":{"shape":"String"}, + "ApplyType":{"shape":"String"}, + "DataType":{"shape":"String"}, + "AllowedValues":{"shape":"String"}, + "IsModifiable":{"shape":"Boolean"}, + "MinimumEngineVersion":{"shape":"String"}, + "ApplyMethod":{"shape":"ApplyMethod"} + } + }, + "ParametersList":{ + "type":"list", + "member":{ + "shape":"Parameter", + "locationName":"Parameter" + } + }, + "PendingModifiedValues":{ + "type":"structure", + "members":{ + "DBInstanceClass":{"shape":"String"}, + "AllocatedStorage":{"shape":"IntegerOptional"}, + "MasterUserPassword":{"shape":"String"}, + "Port":{"shape":"IntegerOptional"}, + "BackupRetentionPeriod":{"shape":"IntegerOptional"}, + "MultiAZ":{"shape":"BooleanOptional"}, + "EngineVersion":{"shape":"String"}, + "Iops":{"shape":"IntegerOptional"}, + "DBInstanceIdentifier":{"shape":"String"} + } + }, + "PointInTimeRestoreNotEnabledFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"PointInTimeRestoreNotEnabled", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "PromoteReadReplicaMessage":{ + "type":"structure", + "required":["DBInstanceIdentifier"], + "members":{ + "DBInstanceIdentifier":{"shape":"String"}, + "BackupRetentionPeriod":{"shape":"IntegerOptional"}, + "PreferredBackupWindow":{"shape":"String"} + } + }, + "PromoteReadReplicaResult":{ + "type":"structure", + "members":{ + "DBInstance":{"shape":"DBInstance"} + } + }, + "ProvisionedIopsNotAvailableInAZFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"ProvisionedIopsNotAvailableInAZFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "PurchaseReservedDBInstancesOfferingMessage":{ + "type":"structure", + "required":["ReservedDBInstancesOfferingId"], + "members":{ + "ReservedDBInstancesOfferingId":{"shape":"String"}, + "ReservedDBInstanceId":{"shape":"String"}, + "DBInstanceCount":{"shape":"IntegerOptional"} + } + }, + "PurchaseReservedDBInstancesOfferingResult":{ + "type":"structure", + "members":{ + "ReservedDBInstance":{"shape":"ReservedDBInstance"} + } + }, + "ReadReplicaDBInstanceIdentifierList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"ReadReplicaDBInstanceIdentifier" + } + }, + "RebootDBInstanceMessage":{ + "type":"structure", + "required":["DBInstanceIdentifier"], + "members":{ + "DBInstanceIdentifier":{"shape":"String"}, + "ForceFailover":{"shape":"BooleanOptional"} + } + }, + "RebootDBInstanceResult":{ + "type":"structure", + "members":{ + "DBInstance":{"shape":"DBInstance"} + } + }, + "RecurringCharge":{ + "type":"structure", + "members":{ + "RecurringChargeAmount":{"shape":"Double"}, + "RecurringChargeFrequency":{"shape":"String"} + }, + "wrapper":true + }, + "RecurringChargeList":{ + "type":"list", + "member":{ + "shape":"RecurringCharge", + "locationName":"RecurringCharge" + } + }, + "RemoveSourceIdentifierFromSubscriptionMessage":{ + "type":"structure", + "required":[ + "SubscriptionName", + "SourceIdentifier" + ], + "members":{ + "SubscriptionName":{"shape":"String"}, + "SourceIdentifier":{"shape":"String"} + } + }, + "RemoveSourceIdentifierFromSubscriptionResult":{ + "type":"structure", + "members":{ + "EventSubscription":{"shape":"EventSubscription"} + } + }, + "RemoveTagsFromResourceMessage":{ + "type":"structure", + "required":[ + "ResourceName", + "TagKeys" + ], + "members":{ + "ResourceName":{"shape":"String"}, + "TagKeys":{"shape":"KeyList"} + } + }, + "ReservedDBInstance":{ + "type":"structure", + "members":{ + "ReservedDBInstanceId":{"shape":"String"}, + "ReservedDBInstancesOfferingId":{"shape":"String"}, + "DBInstanceClass":{"shape":"String"}, + "StartTime":{"shape":"TStamp"}, + "Duration":{"shape":"Integer"}, + "FixedPrice":{"shape":"Double"}, + "UsagePrice":{"shape":"Double"}, + "CurrencyCode":{"shape":"String"}, + "DBInstanceCount":{"shape":"Integer"}, + "ProductDescription":{"shape":"String"}, + "OfferingType":{"shape":"String"}, + "MultiAZ":{"shape":"Boolean"}, + "State":{"shape":"String"}, + "RecurringCharges":{"shape":"RecurringChargeList"} + }, + "wrapper":true + }, + "ReservedDBInstanceAlreadyExistsFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"ReservedDBInstanceAlreadyExists", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "ReservedDBInstanceList":{ + "type":"list", + "member":{ + "shape":"ReservedDBInstance", + "locationName":"ReservedDBInstance" + } + }, + "ReservedDBInstanceMessage":{ + "type":"structure", + "members":{ + "Marker":{"shape":"String"}, + "ReservedDBInstances":{"shape":"ReservedDBInstanceList"} + } + }, + "ReservedDBInstanceNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"ReservedDBInstanceNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "ReservedDBInstanceQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"ReservedDBInstanceQuotaExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "ReservedDBInstancesOffering":{ + "type":"structure", + "members":{ + "ReservedDBInstancesOfferingId":{"shape":"String"}, + "DBInstanceClass":{"shape":"String"}, + "Duration":{"shape":"Integer"}, + "FixedPrice":{"shape":"Double"}, + "UsagePrice":{"shape":"Double"}, + "CurrencyCode":{"shape":"String"}, + "ProductDescription":{"shape":"String"}, + "OfferingType":{"shape":"String"}, + "MultiAZ":{"shape":"Boolean"}, + "RecurringCharges":{"shape":"RecurringChargeList"} + }, + "wrapper":true + }, + "ReservedDBInstancesOfferingList":{ + "type":"list", + "member":{ + "shape":"ReservedDBInstancesOffering", + "locationName":"ReservedDBInstancesOffering" + } + }, + "ReservedDBInstancesOfferingMessage":{ + "type":"structure", + "members":{ + "Marker":{"shape":"String"}, + "ReservedDBInstancesOfferings":{"shape":"ReservedDBInstancesOfferingList"} + } + }, + "ReservedDBInstancesOfferingNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"ReservedDBInstancesOfferingNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "ResetDBParameterGroupMessage":{ + "type":"structure", + "required":["DBParameterGroupName"], + "members":{ + "DBParameterGroupName":{"shape":"String"}, + "ResetAllParameters":{"shape":"Boolean"}, + "Parameters":{"shape":"ParametersList"} + } + }, + "RestoreDBInstanceFromDBSnapshotMessage":{ + "type":"structure", + "required":[ + "DBInstanceIdentifier", + "DBSnapshotIdentifier" + ], + "members":{ + "DBInstanceIdentifier":{"shape":"String"}, + "DBSnapshotIdentifier":{"shape":"String"}, + "DBInstanceClass":{"shape":"String"}, + "Port":{"shape":"IntegerOptional"}, + "AvailabilityZone":{"shape":"String"}, + "DBSubnetGroupName":{"shape":"String"}, + "MultiAZ":{"shape":"BooleanOptional"}, + "PubliclyAccessible":{"shape":"BooleanOptional"}, + "AutoMinorVersionUpgrade":{"shape":"BooleanOptional"}, + "LicenseModel":{"shape":"String"}, + "DBName":{"shape":"String"}, + "Engine":{"shape":"String"}, + "Iops":{"shape":"IntegerOptional"}, + "OptionGroupName":{"shape":"String"} + } + }, + "RestoreDBInstanceFromDBSnapshotResult":{ + "type":"structure", + "members":{ + "DBInstance":{"shape":"DBInstance"} + } + }, + "RestoreDBInstanceToPointInTimeMessage":{ + "type":"structure", + "required":[ + "SourceDBInstanceIdentifier", + "TargetDBInstanceIdentifier" + ], + "members":{ + "SourceDBInstanceIdentifier":{"shape":"String"}, + "TargetDBInstanceIdentifier":{"shape":"String"}, + "RestoreTime":{"shape":"TStamp"}, + "UseLatestRestorableTime":{"shape":"Boolean"}, + "DBInstanceClass":{"shape":"String"}, + "Port":{"shape":"IntegerOptional"}, + "AvailabilityZone":{"shape":"String"}, + "DBSubnetGroupName":{"shape":"String"}, + "MultiAZ":{"shape":"BooleanOptional"}, + "PubliclyAccessible":{"shape":"BooleanOptional"}, + "AutoMinorVersionUpgrade":{"shape":"BooleanOptional"}, + "LicenseModel":{"shape":"String"}, + "DBName":{"shape":"String"}, + "Engine":{"shape":"String"}, + "Iops":{"shape":"IntegerOptional"}, + "OptionGroupName":{"shape":"String"} + } + }, + "RestoreDBInstanceToPointInTimeResult":{ + "type":"structure", + "members":{ + "DBInstance":{"shape":"DBInstance"} + } + }, + "RevokeDBSecurityGroupIngressMessage":{ + "type":"structure", + "required":["DBSecurityGroupName"], + "members":{ + "DBSecurityGroupName":{"shape":"String"}, + "CIDRIP":{"shape":"String"}, + "EC2SecurityGroupName":{"shape":"String"}, + "EC2SecurityGroupId":{"shape":"String"}, + "EC2SecurityGroupOwnerId":{"shape":"String"} + } + }, + "RevokeDBSecurityGroupIngressResult":{ + "type":"structure", + "members":{ + "DBSecurityGroup":{"shape":"DBSecurityGroup"} + } + }, + "SNSInvalidTopicFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"SNSInvalidTopic", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "SNSNoAuthorizationFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"SNSNoAuthorization", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "SNSTopicArnNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"SNSTopicArnNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "SnapshotQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"SnapshotQuotaExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "SourceIdsList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"SourceId" + } + }, + "SourceNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"SourceNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "SourceType":{ + "type":"string", + "enum":[ + "db-instance", + "db-parameter-group", + "db-security-group", + "db-snapshot" + ] + }, + "StorageQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"StorageQuotaExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "String":{"type":"string"}, + "Subnet":{ + "type":"structure", + "members":{ + "SubnetIdentifier":{"shape":"String"}, + "SubnetAvailabilityZone":{"shape":"AvailabilityZone"}, + "SubnetStatus":{"shape":"String"} + } + }, + "SubnetAlreadyInUse":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"SubnetAlreadyInUse", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "SubnetIdentifierList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"SubnetIdentifier" + } + }, + "SubnetList":{ + "type":"list", + "member":{ + "shape":"Subnet", + "locationName":"Subnet" + } + }, + "SubscriptionAlreadyExistFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"SubscriptionAlreadyExist", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "SubscriptionCategoryNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"SubscriptionCategoryNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "SubscriptionNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"SubscriptionNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "SupportedCharacterSetsList":{ + "type":"list", + "member":{ + "shape":"CharacterSet", + "locationName":"CharacterSet" + } + }, + "TStamp":{"type":"timestamp"}, + "Tag":{ + "type":"structure", + "members":{ + "Key":{"shape":"String"}, + "Value":{"shape":"String"} + } + }, + "TagList":{ + "type":"list", + "member":{ + "shape":"Tag", + "locationName":"Tag" + } + }, + "TagListMessage":{ + "type":"structure", + "members":{ + "TagList":{"shape":"TagList"} + } + }, + "VpcSecurityGroupIdList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"VpcSecurityGroupId" + } + }, + "VpcSecurityGroupMembership":{ + "type":"structure", + "members":{ + "VpcSecurityGroupId":{"shape":"String"}, + "Status":{"shape":"String"} + } + }, + "VpcSecurityGroupMembershipList":{ + "type":"list", + "member":{ + "shape":"VpcSecurityGroupMembership", + "locationName":"VpcSecurityGroupMembership" + } + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/rds/2013-02-12/docs-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/rds/2013-02-12/docs-2.json new file mode 100644 index 000000000..31661545e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/rds/2013-02-12/docs-2.json @@ -0,0 +1,1796 @@ +{ + "version": "2.0", + "service": "Amazon Relational Database Service

    Amazon Relational Database Service (Amazon RDS) is a web service that makes it easier to set up, operate, and scale a relational database in the cloud. It provides cost-efficient, resizable capacity for an industry-standard relational database and manages common database administration tasks, freeing up developers to focus on what makes their applications and businesses unique.

    Amazon RDS gives you access to the capabilities of a familiar MySQL or Oracle database server. This means the code, applications, and tools you already use today with your existing MySQL or Oracle databases work with Amazon RDS without modification. Amazon RDS automatically backs up your database and maintains the database software that powers your DB Instance. Amazon RDS is flexible: you can scale your database instance's compute resources and storage capacity to meet your application's demand. As with all Amazon Web Services, there are no up-front investments, and you pay only for the resources you use.

    This is the Amazon RDS API Reference. It contains a comprehensive description of all Amazon RDS Query APIs and data types. Note that this API is asynchronous and some actions may require polling to determine when an action has been applied. See the parameter description to determine if a change is applied immediately or on the next instance reboot or during the maintenance window. For more information on Amazon RDS concepts and usage scenarios, see the Amazon RDS User Guide.

    ", + "operations": { + "AddSourceIdentifierToSubscription": "

    Adds a source identifier to an existing RDS event notification subscription.

    ", + "AddTagsToResource": "

    Adds metadata tags to a DB Instance. These tags can also be used with cost allocation reporting to track cost associated with a DB Instance.

    ", + "AuthorizeDBSecurityGroupIngress": "

    Enables ingress to a DBSecurityGroup using one of two forms of authorization. First, EC2 or VPC Security Groups can be added to the DBSecurityGroup if the application using the database is running on EC2 or VPC instances. Second, IP ranges are available if the application accessing your database is running on the Internet. Required parameters for this API are one of CIDR range, EC2SecurityGroupId for VPC, or (EC2SecurityGroupOwnerId and either EC2SecurityGroupName or EC2SecurityGroupId for non-VPC).

    You cannot authorize ingress from an EC2 security group in one Region to an Amazon RDS DB Instance in another. You cannot authorize ingress from a VPC security group in one VPC to an Amazon RDS DB Instance in another.

    For an overview of CIDR ranges, go to the Wikipedia Tutorial.

    ", + "CopyDBSnapshot": "

    Copies the specified DBSnapshot. The source DBSnapshot must be in the \"available\" state.

    ", + "CreateDBInstance": "

    Creates a new DB instance.

    ", + "CreateDBInstanceReadReplica": "

    Creates a DB Instance that acts as a Read Replica of a source DB Instance.

    All Read Replica DB Instances are created as Single-AZ deployments with backups disabled. All other DB Instance attributes (including DB Security Groups and DB Parameter Groups) are inherited from the source DB Instance, except as specified below.

    The source DB Instance must have backup retention enabled.

    ", + "CreateDBParameterGroup": "

    Creates a new DB Parameter Group.

    A DB Parameter Group is initially created with the default parameters for the database engine used by the DB Instance. To provide custom values for any of the parameters, you must modify the group after creating it using ModifyDBParameterGroup. Once you've created a DB Parameter Group, you need to associate it with your DB Instance using ModifyDBInstance. When you associate a new DB Parameter Group with a running DB Instance, you need to reboot the DB Instance for the new DB Parameter Group and associated settings to take effect.

    ", + "CreateDBSecurityGroup": "

    Creates a new DB Security Group. DB Security Groups control access to a DB Instance.

    ", + "CreateDBSnapshot": "

    Creates a DBSnapshot. The source DBInstance must be in \"available\" state.

    ", + "CreateDBSubnetGroup": "

    Creates a new DB subnet group. DB subnet groups must contain at least one subnet in at least two AZs in the region.

    ", + "CreateEventSubscription": "

    Creates an RDS event notification subscription. This action requires a topic ARN (Amazon Resource Name) created by either the RDS console, the SNS console, or the SNS API. To obtain an ARN with SNS, you must create a topic in Amazon SNS and subscribe to the topic. The ARN is displayed in the SNS console.

    You can specify the type of source (SourceType) you want to be notified of, provide a list of RDS sources (SourceIds) that triggers the events, and provide a list of event categories (EventCategories) for events you want to be notified of. For example, you can specify SourceType = db-instance, SourceIds = mydbinstance1, mydbinstance2 and EventCategories = Availability, Backup.

    If you specify both the SourceType and SourceIds, such as SourceType = db-instance and SourceIdentifier = myDBInstance1, you will be notified of all the db-instance events for the specified source. If you specify a SourceType but do not specify a SourceIdentifier, you will receive notice of the events for that source type for all your RDS sources. If you do not specify either the SourceType nor the SourceIdentifier, you will be notified of events generated from all RDS sources belonging to your customer account.

    ", + "CreateOptionGroup": "

    Creates a new Option Group. You can create up to 20 option groups.

    ", + "DeleteDBInstance": "

    The DeleteDBInstance action deletes a previously provisioned DB instance. A successful response from the web service indicates the request was received correctly. When you delete a DB instance, all automated backups for that instance are deleted and cannot be recovered. Manual DB Snapshots of the DB instance to be deleted are not deleted.

    If a final DBSnapshot is requested the status of the RDS instance will be \"deleting\" until the DBSnapshot is created. DescribeDBInstance is used to monitor the status of this operation. This cannot be canceled or reverted once submitted.

    ", + "DeleteDBParameterGroup": "

    Deletes a specified DBParameterGroup. The DBParameterGroup cannot be associated with any RDS instances to be deleted.

    The specified DB Parameter Group cannot be associated with any DB Instances. ", + "DeleteDBSecurityGroup": "

    Deletes a DB Security Group.

    The specified DB Security Group must not be associated with any DB Instances.", + "DeleteDBSnapshot": "

    Deletes a DBSnapshot.

    The DBSnapshot must be in the available state to be deleted.", + "DeleteDBSubnetGroup": "

    Deletes a DB subnet group.

    The specified database subnet group must not be associated with any DB instances.", + "DeleteEventSubscription": "

    Deletes an RDS event notification subscription.

    ", + "DeleteOptionGroup": "

    Deletes an existing Option Group.

    ", + "DescribeDBEngineVersions": "

    Returns a list of the available DB engines.

    ", + "DescribeDBInstances": "

    Returns information about provisioned RDS instances. This API supports pagination.

    ", + "DescribeDBLogFiles": "

    Returns a list of DB log files for the DB instance.

    ", + "DescribeDBParameterGroups": "

    Returns a list of DBParameterGroup descriptions. If a DBParameterGroupName is specified, the list will contain only the description of the specified DBParameterGroup.

    ", + "DescribeDBParameters": "

    Returns the detailed parameter list for a particular DBParameterGroup.

    ", + "DescribeDBSecurityGroups": "

    Returns a list of DBSecurityGroup descriptions. If a DBSecurityGroupName is specified, the list will contain only the descriptions of the specified DBSecurityGroup.

    For an overview of CIDR ranges, go to the Wikipedia Tutorial.

    ", + "DescribeDBSnapshots": "

    Returns information about DBSnapshots. This API supports pagination.

    ", + "DescribeDBSubnetGroups": "

    Returns a list of DBSubnetGroup descriptions. If a DBSubnetGroupName is specified, the list will contain only the descriptions of the specified DBSubnetGroup.

    For an overview of CIDR ranges, go to the Wikipedia Tutorial.

    ", + "DescribeEngineDefaultParameters": "

    Returns the default engine and system parameter information for the specified database engine.

    ", + "DescribeEventCategories": "

    Displays a list of categories for all event source types, or, if specified, for a specified source type. You can see a list of the event categories and source types in the Events topic in the Amazon RDS User Guide.

    ", + "DescribeEventSubscriptions": "

    Lists all the subscription descriptions for a customer account. The description for a subscription includes SubscriptionName, SNSTopicARN, CustomerID, SourceType, SourceID, CreationTime, and Status.

    If you specify a SubscriptionName, lists the description for that subscription.

    ", + "DescribeEvents": "

    Returns events related to DB Instances, DB Security Groups, DB Snapshots and DB Parameter Groups for the past 14 days. Events specific to a particular DB Instance, DB Security Group, database snapshot or DB Parameter Group can be obtained by providing the name as a parameter. By default, the past hour of events are returned.

    ", + "DescribeOptionGroupOptions": "

    Describes all available options.

    ", + "DescribeOptionGroups": "

    Describes the available option groups.

    ", + "DescribeOrderableDBInstanceOptions": "

    Returns a list of orderable DB Instance options for the specified engine.

    ", + "DescribeReservedDBInstances": "

    Returns information about reserved DB Instances for this account, or about a specified reserved DB Instance.

    ", + "DescribeReservedDBInstancesOfferings": "

    Lists available reserved DB Instance offerings.

    ", + "DownloadDBLogFilePortion": "

    Downloads the last line of the specified log file.

    ", + "ListTagsForResource": "

    Lists all tags on a DB Instance.

    ", + "ModifyDBInstance": "

    Modify settings for a DB Instance. You can change one or more database configuration parameters by specifying these parameters and the new values in the request.

    ", + "ModifyDBParameterGroup": "

    Modifies the parameters of a DBParameterGroup. To modify more than one parameter submit a list of the following: ParameterName, ParameterValue, and ApplyMethod. A maximum of 20 parameters can be modified in a single request.

    The apply-immediate method can be used only for dynamic parameters; the pending-reboot method can be used with MySQL and Oracle DB Instances for either dynamic or static parameters. For Microsoft SQL Server DB Instances, the pending-reboot method can be used only for static parameters.

    ", + "ModifyDBSubnetGroup": "

    Modifies an existing DB subnet group. DB subnet groups must contain at least one subnet in at least two AZs in the region.

    ", + "ModifyEventSubscription": "

    Modifies an existing RDS event notification subscription. Note that you cannot modify the source identifiers using this call; to change source identifiers for a subscription, use the AddSourceIdentifierToSubscription and RemoveSourceIdentifierFromSubscription calls.

    You can see a list of the event categories for a given SourceType in the Events topic in the Amazon RDS User Guide or by using the DescribeEventCategories action.

    ", + "ModifyOptionGroup": "

    Modifies an existing Option Group.

    ", + "PromoteReadReplica": "

    Promotes a Read Replica DB Instance to a standalone DB Instance.

    ", + "PurchaseReservedDBInstancesOffering": "

    Purchases a reserved DB Instance offering.

    ", + "RebootDBInstance": "

    Reboots a previously provisioned RDS instance. This API results in the application of modified DBParameterGroup parameters with ApplyStatus of pending-reboot to the RDS instance. This action is taken as soon as possible, and results in a momentary outage to the RDS instance during which the RDS instance status is set to rebooting. If the RDS instance is configured for MultiAZ, it is possible that the reboot will be conducted through a failover. A DBInstance event is created when the reboot is completed.

    ", + "RemoveSourceIdentifierFromSubscription": "

    Removes a source identifier from an existing RDS event notification subscription.

    ", + "RemoveTagsFromResource": "

    Removes metadata tags from a DB Instance.

    ", + "ResetDBParameterGroup": "

    Modifies the parameters of a DBParameterGroup to the engine/system default value. To reset specific parameters submit a list of the following: ParameterName and ApplyMethod. To reset the entire DBParameterGroup specify the DBParameterGroup name and ResetAllParameters parameters. When resetting the entire group, dynamic parameters are updated immediately and static parameters are set to pending-reboot to take effect on the next DB instance restart or RebootDBInstance request.

    ", + "RestoreDBInstanceFromDBSnapshot": "

    Creates a new DB Instance from a DB snapshot. The target database is created from the source database restore point with the same configuration as the original source database, except that the new RDS instance is created with the default security group.

    ", + "RestoreDBInstanceToPointInTime": "

    Restores a DB Instance to an arbitrary point-in-time. Users can restore to any point in time before the latestRestorableTime for up to backupRetentionPeriod days. The target database is created from the source database with the same configuration as the original database except that the DB instance is created with the default DB security group.

    ", + "RevokeDBSecurityGroupIngress": "

    Revokes ingress from a DBSecurityGroup for previously authorized IP ranges or EC2 or VPC Security Groups. Required parameters for this API are one of CIDRIP, EC2SecurityGroupId for VPC, or (EC2SecurityGroupOwnerId and either EC2SecurityGroupName or EC2SecurityGroupId).

    " + }, + "shapes": { + "AddSourceIdentifierToSubscriptionMessage": { + "base": "

    ", + "refs": { + } + }, + "AddSourceIdentifierToSubscriptionResult": { + "base": null, + "refs": { + } + }, + "AddTagsToResourceMessage": { + "base": "

    ", + "refs": { + } + }, + "ApplyMethod": { + "base": null, + "refs": { + "Parameter$ApplyMethod": "

    Indicates when to apply parameter updates.

    " + } + }, + "AuthorizationAlreadyExistsFault": { + "base": "

    The specified CIDRIP or EC2 security group is already authorized for the specified DB security group.

    ", + "refs": { + } + }, + "AuthorizationNotFoundFault": { + "base": "

    Specified CIDRIP or EC2 security group is not authorized for the specified DB security group.

    RDS may not also be authorized via IAM to perform necessary actions on your behalf.

    ", + "refs": { + } + }, + "AuthorizationQuotaExceededFault": { + "base": "

    DB security group authorization quota has been reached.

    ", + "refs": { + } + }, + "AuthorizeDBSecurityGroupIngressMessage": { + "base": "

    ", + "refs": { + } + }, + "AuthorizeDBSecurityGroupIngressResult": { + "base": null, + "refs": { + } + }, + "AvailabilityZone": { + "base": "

    Contains Availability Zone information.

    This data type is used as an element in the following data type:

    ", + "refs": { + "AvailabilityZoneList$member": null, + "Subnet$SubnetAvailabilityZone": null + } + }, + "AvailabilityZoneList": { + "base": null, + "refs": { + "OrderableDBInstanceOption$AvailabilityZones": "

    A list of availability zones for the orderable DB Instance.

    " + } + }, + "Boolean": { + "base": null, + "refs": { + "AvailabilityZone$ProvisionedIopsCapable": "

    True indicates the availability zone is capable of provisioned IOPs.

    ", + "DBInstance$MultiAZ": "

    Specifies if the DB Instance is a Multi-AZ deployment.

    ", + "DBInstance$AutoMinorVersionUpgrade": "

    Indicates that minor version patches are applied automatically.

    ", + "DBInstance$PubliclyAccessible": "

    Specifies the accessibility options for the DB Instance. A value of true specifies an Internet-facing instance with a publicly resolvable DNS name, which resolves to a public IP address. A value of false specifies an internal instance with a DNS name that resolves to a private IP address.

    Default: The default behavior varies depending on whether a VPC has been requested or not. The following list shows the default behavior in each case.

    • Default VPC:true
    • VPC:false

    If no DB subnet group has been specified as part of the request and the PubliclyAccessible value has not been set, the DB instance will be publicly accessible. If a specific DB subnet group has been specified as part of the request and the PubliclyAccessible value has not been set, the DB instance will be private.

    ", + "DeleteDBInstanceMessage$SkipFinalSnapshot": "

    Determines whether a final DB Snapshot is created before the DB Instance is deleted. If true is specified, no DBSnapshot is created. If false is specified, a DB Snapshot is created before the DB Instance is deleted.

    The FinalDBSnapshotIdentifier parameter must be specified if SkipFinalSnapshot is false.

    Default: false

    ", + "DescribeDBEngineVersionsMessage$DefaultOnly": "

    Indicates that only the default version of the specified engine or engine and major version combination is returned.

    ", + "DownloadDBLogFilePortionDetails$AdditionalDataPending": "

    Boolean value that if true, indicates there is more data to be downloaded.

    ", + "EventSubscription$Enabled": "

    A Boolean value indicating if the subscription is enabled. True indicates the subscription is enabled.

    ", + "ModifyDBInstanceMessage$ApplyImmediately": "

    Specifies whether or not the modifications in this request and any pending modifications are asynchronously applied as soon as possible, regardless of the PreferredMaintenanceWindow setting for the DB Instance.

    If this parameter is passed as false, changes to the DB Instance are applied on the next call to RebootDBInstance, the next maintenance reboot, or the next failure reboot, whichever occurs first. See each parameter to determine when a change is applied.

    Default: false

    ", + "ModifyDBInstanceMessage$AllowMajorVersionUpgrade": "

    Indicates that major version upgrades are allowed. Changing this parameter does not result in an outage and the change is asynchronously applied as soon as possible.

    Constraints: This parameter must be set to true when specifying a value for the EngineVersion parameter that is a different major version than the DB Instance's current version.

    ", + "ModifyOptionGroupMessage$ApplyImmediately": "

    Indicates whether the changes should be applied immediately, or during the next maintenance window for each instance associated with the Option Group.

    ", + "Option$Persistent": "

    Indicate if this option is persistent.

    ", + "OptionGroup$AllowsVpcAndNonVpcInstanceMemberships": "

    Indicates whether this option group can be applied to both VPC and non-VPC instances. The value 'true' indicates the option group can be applied to both VPC and non-VPC instances.

    ", + "OptionGroupOption$PortRequired": "

    Specifies whether the option requires a port.

    ", + "OptionGroupOption$Persistent": "

    Specifies whether the option is persistent in an option group.

    ", + "OptionGroupOptionSetting$IsModifiable": "

    Boolean value where true indicates that this option group option can be changed from the default value.

    ", + "OptionSetting$IsModifiable": "

    A Boolean value that, when true, indicates the option setting can be modified from the default.

    ", + "OptionSetting$IsCollection": "

    Indicates if the option setting is part of a collection.

    ", + "OrderableDBInstanceOption$MultiAZCapable": "

    Indicates whether this orderable DB Instance is multi-AZ capable.

    ", + "OrderableDBInstanceOption$ReadReplicaCapable": "

    Indicates whether this orderable DB Instance can have a read replica.

    ", + "OrderableDBInstanceOption$Vpc": "

    Indicates whether this is a VPC orderable DB Instance.

    ", + "Parameter$IsModifiable": "

    Indicates whether (true) or not (false) the parameter can be modified. Some parameters have security or operational implications that prevent them from being changed.

    ", + "ReservedDBInstance$MultiAZ": "

    Indicates if the reservation applies to Multi-AZ deployments.

    ", + "ReservedDBInstancesOffering$MultiAZ": "

    Indicates if the offering applies to Multi-AZ deployments.

    ", + "ResetDBParameterGroupMessage$ResetAllParameters": "

    Specifies whether (true) or not (false) to reset all parameters in the DB Parameter Group to default values.

    Default: true

    ", + "RestoreDBInstanceToPointInTimeMessage$UseLatestRestorableTime": "

    Specifies whether (true) or not (false) the DB Instance is restored from the latest backup time.

    Default: false

    Constraints: Cannot be specified if RestoreTime parameter is provided.

    " + } + }, + "BooleanOptional": { + "base": null, + "refs": { + "CreateDBInstanceMessage$MultiAZ": "

    Specifies if the DB Instance is a Multi-AZ deployment. You cannot set the AvailabilityZone parameter if the MultiAZ parameter is set to true.

    ", + "CreateDBInstanceMessage$AutoMinorVersionUpgrade": "

    Indicates that minor engine upgrades will be applied automatically to the DB Instance during the maintenance window.

    Default: true

    ", + "CreateDBInstanceMessage$PubliclyAccessible": "

    Specifies the accessibility options for the DB Instance. A value of true specifies an Internet-facing instance with a publicly resolvable DNS name, which resolves to a public IP address. A value of false specifies an internal instance with a DNS name that resolves to a private IP address.

    Default: The default behavior varies depending on whether a VPC has been requested or not. The following list shows the default behavior in each case.

    • Default VPC:true
    • VPC:false

    If no DB subnet group has been specified as part of the request and the PubliclyAccessible value has not been set, the DB instance will be publicly accessible. If a specific DB subnet group has been specified as part of the request and the PubliclyAccessible value has not been set, the DB instance will be private.

    ", + "CreateDBInstanceReadReplicaMessage$AutoMinorVersionUpgrade": "

    Indicates that minor engine upgrades will be applied automatically to the Read Replica during the maintenance window.

    Default: Inherits from the source DB Instance

    ", + "CreateDBInstanceReadReplicaMessage$PubliclyAccessible": "

    Specifies the accessibility options for the DB Instance. A value of true specifies an Internet-facing instance with a publicly resolvable DNS name, which resolves to a public IP address. A value of false specifies an internal instance with a DNS name that resolves to a private IP address.

    Default: The default behavior varies depending on whether a VPC has been requested or not. The following list shows the default behavior in each case.

    • Default VPC:true
    • VPC:false

    If no DB subnet group has been specified as part of the request and the PubliclyAccessible value has not been set, the DB instance will be publicly accessible. If a specific DB subnet group has been specified as part of the request and the PubliclyAccessible value has not been set, the DB instance will be private.

    ", + "CreateEventSubscriptionMessage$Enabled": "

    A Boolean value; set to true to activate the subscription, set to false to create the subscription but not active it.

    ", + "DescribeDBEngineVersionsMessage$ListSupportedCharacterSets": "

    If this parameter is specified, and if the requested engine supports the CharacterSetName parameter for CreateDBInstance, the response includes a list of supported character sets for each engine version.

    ", + "DescribeOrderableDBInstanceOptionsMessage$Vpc": "

    The VPC filter value. Specify this parameter to show only the available VPC or non-VPC offerings.

    ", + "DescribeReservedDBInstancesMessage$MultiAZ": "

    The Multi-AZ filter value. Specify this parameter to show only those reservations matching the specified Multi-AZ parameter.

    ", + "DescribeReservedDBInstancesOfferingsMessage$MultiAZ": "

    The Multi-AZ filter value. Specify this parameter to show only the available offerings matching the specified Multi-AZ parameter.

    ", + "ModifyDBInstanceMessage$MultiAZ": "

    Specifies if the DB Instance is a Multi-AZ deployment. Changing this parameter does not result in an outage and the change is applied during the next maintenance window unless the ApplyImmediately parameter is set to true for this request.

    Constraints: Cannot be specified if the DB Instance is a read replica.

    ", + "ModifyDBInstanceMessage$AutoMinorVersionUpgrade": "

    Indicates that minor version upgrades will be applied automatically to the DB Instance during the maintenance window. Changing this parameter does not result in an outage except in the following case and the change is asynchronously applied as soon as possible. An outage will result if this parameter is set to true during the maintenance window, and a newer minor version is available, and RDS has enabled auto patching for that engine version.

    ", + "ModifyEventSubscriptionMessage$Enabled": "

    A Boolean value; set to true to activate the subscription.

    ", + "PendingModifiedValues$MultiAZ": "

    Indicates that the Single-AZ DB Instance is to change to a Multi-AZ deployment.

    ", + "RebootDBInstanceMessage$ForceFailover": "

    When true, the reboot will be conducted through a MultiAZ failover.

    Constraint: You cannot specify true if the instance is not configured for MultiAZ.

    ", + "RestoreDBInstanceFromDBSnapshotMessage$MultiAZ": "

    Specifies if the DB Instance is a Multi-AZ deployment.

    Constraint: You cannot specify the AvailabilityZone parameter if the MultiAZ parameter is set to true.

    ", + "RestoreDBInstanceFromDBSnapshotMessage$PubliclyAccessible": "

    Specifies the accessibility options for the DB Instance. A value of true specifies an Internet-facing instance with a publicly resolvable DNS name, which resolves to a public IP address. A value of false specifies an internal instance with a DNS name that resolves to a private IP address.

    Default: The default behavior varies depending on whether a VPC has been requested or not. The following list shows the default behavior in each case.

    • Default VPC:true
    • VPC:false

    If no DB subnet group has been specified as part of the request and the PubliclyAccessible value has not been set, the DB instance will be publicly accessible. If a specific DB subnet group has been specified as part of the request and the PubliclyAccessible value has not been set, the DB instance will be private.

    ", + "RestoreDBInstanceFromDBSnapshotMessage$AutoMinorVersionUpgrade": "

    Indicates that minor version upgrades will be applied automatically to the DB Instance during the maintenance window.

    ", + "RestoreDBInstanceToPointInTimeMessage$MultiAZ": "

    Specifies if the DB Instance is a Multi-AZ deployment.

    Constraint: You cannot specify the AvailabilityZone parameter if the MultiAZ parameter is set to true.

    ", + "RestoreDBInstanceToPointInTimeMessage$PubliclyAccessible": "

    Specifies the accessibility options for the DB Instance. A value of true specifies an Internet-facing instance with a publicly resolvable DNS name, which resolves to a public IP address. A value of false specifies an internal instance with a DNS name that resolves to a private IP address.

    Default: The default behavior varies depending on whether a VPC has been requested or not. The following list shows the default behavior in each case.

    • Default VPC:true
    • VPC:false

    If no DB subnet group has been specified as part of the request and the PubliclyAccessible value has not been set, the DB instance will be publicly accessible. If a specific DB subnet group has been specified as part of the request and the PubliclyAccessible value has not been set, the DB instance will be private.

    ", + "RestoreDBInstanceToPointInTimeMessage$AutoMinorVersionUpgrade": "

    Indicates that minor version upgrades will be applied automatically to the DB Instance during the maintenance window.

    " + } + }, + "CharacterSet": { + "base": "

    This data type is used as a response element in the action DescribeDBEngineVersions.

    ", + "refs": { + "DBEngineVersion$DefaultCharacterSet": "

    The default character set for new instances of this engine version, if the CharacterSetName parameter of the CreateDBInstance API is not specified.

    ", + "SupportedCharacterSetsList$member": null + } + }, + "CopyDBSnapshotMessage": { + "base": "

    ", + "refs": { + } + }, + "CopyDBSnapshotResult": { + "base": null, + "refs": { + } + }, + "CreateDBInstanceMessage": { + "base": "

    ", + "refs": { + } + }, + "CreateDBInstanceReadReplicaMessage": { + "base": null, + "refs": { + } + }, + "CreateDBInstanceReadReplicaResult": { + "base": null, + "refs": { + } + }, + "CreateDBInstanceResult": { + "base": null, + "refs": { + } + }, + "CreateDBParameterGroupMessage": { + "base": "

    ", + "refs": { + } + }, + "CreateDBParameterGroupResult": { + "base": null, + "refs": { + } + }, + "CreateDBSecurityGroupMessage": { + "base": "

    ", + "refs": { + } + }, + "CreateDBSecurityGroupResult": { + "base": null, + "refs": { + } + }, + "CreateDBSnapshotMessage": { + "base": "

    ", + "refs": { + } + }, + "CreateDBSnapshotResult": { + "base": null, + "refs": { + } + }, + "CreateDBSubnetGroupMessage": { + "base": "

    ", + "refs": { + } + }, + "CreateDBSubnetGroupResult": { + "base": null, + "refs": { + } + }, + "CreateEventSubscriptionMessage": { + "base": "

    ", + "refs": { + } + }, + "CreateEventSubscriptionResult": { + "base": null, + "refs": { + } + }, + "CreateOptionGroupMessage": { + "base": "

    ", + "refs": { + } + }, + "CreateOptionGroupResult": { + "base": null, + "refs": { + } + }, + "DBEngineVersion": { + "base": "

    This data type is used as a response element in the action DescribeDBEngineVersions.

    ", + "refs": { + "DBEngineVersionList$member": null + } + }, + "DBEngineVersionList": { + "base": null, + "refs": { + "DBEngineVersionMessage$DBEngineVersions": "

    A list of DBEngineVersion elements.

    " + } + }, + "DBEngineVersionMessage": { + "base": "

    Contains the result of a successful invocation of the DescribeDBEngineVersions action.

    ", + "refs": { + } + }, + "DBInstance": { + "base": "

    Contains the result of a successful invocation of the following actions:

    This data type is used as a response element in the DescribeDBInstances action.

    ", + "refs": { + "CreateDBInstanceReadReplicaResult$DBInstance": null, + "CreateDBInstanceResult$DBInstance": null, + "DBInstanceList$member": null, + "DeleteDBInstanceResult$DBInstance": null, + "ModifyDBInstanceResult$DBInstance": null, + "PromoteReadReplicaResult$DBInstance": null, + "RebootDBInstanceResult$DBInstance": null, + "RestoreDBInstanceFromDBSnapshotResult$DBInstance": null, + "RestoreDBInstanceToPointInTimeResult$DBInstance": null + } + }, + "DBInstanceAlreadyExistsFault": { + "base": "

    User already has a DB instance with the given identifier.

    ", + "refs": { + } + }, + "DBInstanceList": { + "base": null, + "refs": { + "DBInstanceMessage$DBInstances": "

    A list of DBInstance instances.

    " + } + }, + "DBInstanceMessage": { + "base": "

    Contains the result of a successful invocation of the DescribeDBInstances action.

    ", + "refs": { + } + }, + "DBInstanceNotFoundFault": { + "base": "

    DBInstanceIdentifier does not refer to an existing DB instance.

    ", + "refs": { + } + }, + "DBLogFileNotFoundFault": { + "base": "

    LogFileName does not refer to an existing DB log file.

    ", + "refs": { + } + }, + "DBParameterGroup": { + "base": "

    Contains the result of a successful invocation of the CreateDBParameterGroup action.

    This data type is used as a request parameter in the DeleteDBParameterGroup action, and as a response element in the DescribeDBParameterGroups action.

    ", + "refs": { + "CreateDBParameterGroupResult$DBParameterGroup": null, + "DBParameterGroupList$member": null + } + }, + "DBParameterGroupAlreadyExistsFault": { + "base": "

    A DB parameter group with the same name exists.

    ", + "refs": { + } + }, + "DBParameterGroupDetails": { + "base": "

    Contains the result of a successful invocation of the DescribeDBParameters action.

    ", + "refs": { + } + }, + "DBParameterGroupList": { + "base": null, + "refs": { + "DBParameterGroupsMessage$DBParameterGroups": "

    A list of DBParameterGroup instances.

    " + } + }, + "DBParameterGroupNameMessage": { + "base": "

    Contains the result of a successful invocation of the ModifyDBParameterGroup or ResetDBParameterGroup action.

    ", + "refs": { + } + }, + "DBParameterGroupNotFoundFault": { + "base": "

    DBParameterGroupName does not refer to an existing DB parameter group.

    ", + "refs": { + } + }, + "DBParameterGroupQuotaExceededFault": { + "base": "

    Request would result in user exceeding the allowed number of DB parameter groups.

    ", + "refs": { + } + }, + "DBParameterGroupStatus": { + "base": "

    The status of the DB Parameter Group.

    This data type is used as a response element in the following actions:

    ", + "refs": { + "DBParameterGroupStatusList$member": null + } + }, + "DBParameterGroupStatusList": { + "base": null, + "refs": { + "DBInstance$DBParameterGroups": "

    Provides the list of DB Parameter Groups applied to this DB Instance.

    " + } + }, + "DBParameterGroupsMessage": { + "base": "

    Contains the result of a successful invocation of the DescribeDBParameterGroups action.

    ", + "refs": { + } + }, + "DBSecurityGroup": { + "base": "

    Contains the result of a successful invocation of the following actions:

    This data type is used as a response element in the DescribeDBSecurityGroups action.

    ", + "refs": { + "AuthorizeDBSecurityGroupIngressResult$DBSecurityGroup": null, + "CreateDBSecurityGroupResult$DBSecurityGroup": null, + "DBSecurityGroups$member": null, + "RevokeDBSecurityGroupIngressResult$DBSecurityGroup": null + } + }, + "DBSecurityGroupAlreadyExistsFault": { + "base": "

    A DB security group with the name specified in DBSecurityGroupName already exists.

    ", + "refs": { + } + }, + "DBSecurityGroupMembership": { + "base": "

    This data type is used as a response element in the following actions:

    ", + "refs": { + "DBSecurityGroupMembershipList$member": null + } + }, + "DBSecurityGroupMembershipList": { + "base": null, + "refs": { + "DBInstance$DBSecurityGroups": "

    Provides List of DB Security Group elements containing only DBSecurityGroup.Name and DBSecurityGroup.Status subelements.

    ", + "Option$DBSecurityGroupMemberships": "

    If the option requires access to a port, then this DB Security Group allows access to the port.

    " + } + }, + "DBSecurityGroupMessage": { + "base": "

    Contains the result of a successful invocation of the DescribeDBSecurityGroups action.

    ", + "refs": { + } + }, + "DBSecurityGroupNameList": { + "base": null, + "refs": { + "CreateDBInstanceMessage$DBSecurityGroups": "

    A list of DB Security Groups to associate with this DB Instance.

    Default: The default DB Security Group for the database engine.

    ", + "ModifyDBInstanceMessage$DBSecurityGroups": "

    A list of DB Security Groups to authorize on this DB Instance. Changing this parameter does not result in an outage and the change is asynchronously applied as soon as possible.

    Constraints:

    • Must be 1 to 255 alphanumeric characters
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "OptionConfiguration$DBSecurityGroupMemberships": "

    A list of DBSecurityGroupMemebrship name strings used for this option.

    " + } + }, + "DBSecurityGroupNotFoundFault": { + "base": "

    DBSecurityGroupName does not refer to an existing DB security group.

    ", + "refs": { + } + }, + "DBSecurityGroupNotSupportedFault": { + "base": "

    A DB security group is not allowed for this action.

    ", + "refs": { + } + }, + "DBSecurityGroupQuotaExceededFault": { + "base": "

    Request would result in user exceeding the allowed number of DB security groups.

    ", + "refs": { + } + }, + "DBSecurityGroups": { + "base": null, + "refs": { + "DBSecurityGroupMessage$DBSecurityGroups": "

    A list of DBSecurityGroup instances.

    " + } + }, + "DBSnapshot": { + "base": "

    Contains the result of a successful invocation of the following actions:

    This data type is used as a response element in the DescribeDBSnapshots action.

    ", + "refs": { + "CopyDBSnapshotResult$DBSnapshot": null, + "CreateDBSnapshotResult$DBSnapshot": null, + "DBSnapshotList$member": null, + "DeleteDBSnapshotResult$DBSnapshot": null + } + }, + "DBSnapshotAlreadyExistsFault": { + "base": "

    DBSnapshotIdentifier is already used by an existing snapshot.

    ", + "refs": { + } + }, + "DBSnapshotList": { + "base": null, + "refs": { + "DBSnapshotMessage$DBSnapshots": "

    A list of DBSnapshot instances.

    " + } + }, + "DBSnapshotMessage": { + "base": "

    Contains the result of a successful invocation of the DescribeDBSnapshots action.

    ", + "refs": { + } + }, + "DBSnapshotNotFoundFault": { + "base": "

    DBSnapshotIdentifier does not refer to an existing DB snapshot.

    ", + "refs": { + } + }, + "DBSubnetGroup": { + "base": "

    Contains the result of a successful invocation of the following actions:

    This data type is used as a response element in the DescribeDBSubnetGroups action.

    ", + "refs": { + "CreateDBSubnetGroupResult$DBSubnetGroup": null, + "DBInstance$DBSubnetGroup": "

    Provides the inforamtion of the subnet group associated with the DB instance, including the name, descrption and subnets in the subnet group.

    ", + "DBSubnetGroups$member": null, + "ModifyDBSubnetGroupResult$DBSubnetGroup": null + } + }, + "DBSubnetGroupAlreadyExistsFault": { + "base": "

    DBSubnetGroupName is already used by an existing DB subnet group.

    ", + "refs": { + } + }, + "DBSubnetGroupDoesNotCoverEnoughAZs": { + "base": "

    Subnets in the DB subnet group should cover at least two Availability Zones unless there is only one Availability Zone.

    ", + "refs": { + } + }, + "DBSubnetGroupMessage": { + "base": "

    Contains the result of a successful invocation of the DescribeDBSubnetGroups action.

    ", + "refs": { + } + }, + "DBSubnetGroupNotFoundFault": { + "base": "

    DBSubnetGroupName does not refer to an existing DB subnet group.

    ", + "refs": { + } + }, + "DBSubnetGroupQuotaExceededFault": { + "base": "

    Request would result in user exceeding the allowed number of DB subnet groups.

    ", + "refs": { + } + }, + "DBSubnetGroups": { + "base": null, + "refs": { + "DBSubnetGroupMessage$DBSubnetGroups": "

    A list of DBSubnetGroup instances.

    " + } + }, + "DBSubnetQuotaExceededFault": { + "base": "

    Request would result in user exceeding the allowed number of subnets in a DB subnet groups.

    ", + "refs": { + } + }, + "DBUpgradeDependencyFailureFault": { + "base": "

    The DB upgrade failed because a resource the DB depends on could not be modified.

    ", + "refs": { + } + }, + "DeleteDBInstanceMessage": { + "base": "

    ", + "refs": { + } + }, + "DeleteDBInstanceResult": { + "base": null, + "refs": { + } + }, + "DeleteDBParameterGroupMessage": { + "base": "

    ", + "refs": { + } + }, + "DeleteDBSecurityGroupMessage": { + "base": "

    ", + "refs": { + } + }, + "DeleteDBSnapshotMessage": { + "base": "

    ", + "refs": { + } + }, + "DeleteDBSnapshotResult": { + "base": null, + "refs": { + } + }, + "DeleteDBSubnetGroupMessage": { + "base": "

    ", + "refs": { + } + }, + "DeleteEventSubscriptionMessage": { + "base": "

    ", + "refs": { + } + }, + "DeleteEventSubscriptionResult": { + "base": null, + "refs": { + } + }, + "DeleteOptionGroupMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeDBEngineVersionsMessage": { + "base": null, + "refs": { + } + }, + "DescribeDBInstancesMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeDBLogFilesDetails": { + "base": "

    This data type is used as a response element to DescribeDBLogFiles.

    ", + "refs": { + "DescribeDBLogFilesList$member": null + } + }, + "DescribeDBLogFilesList": { + "base": null, + "refs": { + "DescribeDBLogFilesResponse$DescribeDBLogFiles": "

    The DB log files returned.

    " + } + }, + "DescribeDBLogFilesMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeDBLogFilesResponse": { + "base": "

    The response from a call to DescribeDBLogFiles.

    ", + "refs": { + } + }, + "DescribeDBParameterGroupsMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeDBParametersMessage": { + "base": null, + "refs": { + } + }, + "DescribeDBSecurityGroupsMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeDBSnapshotsMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeDBSubnetGroupsMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeEngineDefaultParametersMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeEngineDefaultParametersResult": { + "base": null, + "refs": { + } + }, + "DescribeEventCategoriesMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeEventSubscriptionsMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeEventsMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeOptionGroupOptionsMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeOptionGroupsMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeOrderableDBInstanceOptionsMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeReservedDBInstancesMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeReservedDBInstancesOfferingsMessage": { + "base": "

    ", + "refs": { + } + }, + "Double": { + "base": null, + "refs": { + "RecurringCharge$RecurringChargeAmount": "

    The amount of the recurring charge.

    ", + "ReservedDBInstance$FixedPrice": "

    The fixed price charged for this reserved DB Instance.

    ", + "ReservedDBInstance$UsagePrice": "

    The hourly price charged for this reserved DB Instance.

    ", + "ReservedDBInstancesOffering$FixedPrice": "

    The fixed price charged for this offering.

    ", + "ReservedDBInstancesOffering$UsagePrice": "

    The hourly price charged for this offering.

    " + } + }, + "DownloadDBLogFilePortionDetails": { + "base": "

    This data type is used as a response element to DownloadDBLogFilePortion.

    ", + "refs": { + } + }, + "DownloadDBLogFilePortionMessage": { + "base": "

    ", + "refs": { + } + }, + "EC2SecurityGroup": { + "base": "

    This data type is used as a response element in the following actions:

    ", + "refs": { + "EC2SecurityGroupList$member": null + } + }, + "EC2SecurityGroupList": { + "base": null, + "refs": { + "DBSecurityGroup$EC2SecurityGroups": "

    Contains a list of EC2SecurityGroup elements.

    " + } + }, + "Endpoint": { + "base": "

    This data type is used as a response element in the following actions:

    ", + "refs": { + "DBInstance$Endpoint": "

    Specifies the connection endpoint.

    " + } + }, + "EngineDefaults": { + "base": "

    Contains the result of a successful invocation of the DescribeEngineDefaultParameters action.

    ", + "refs": { + "DescribeEngineDefaultParametersResult$EngineDefaults": null + } + }, + "Event": { + "base": "

    This data type is used as a response element in the DescribeEvents action.

    ", + "refs": { + "EventList$member": null + } + }, + "EventCategoriesList": { + "base": null, + "refs": { + "CreateEventSubscriptionMessage$EventCategories": "

    A list of event categories for a SourceType that you want to subscribe to. You can see a list of the categories for a given SourceType in the Events topic in the Amazon RDS User Guide or by using the DescribeEventCategories action.

    ", + "DescribeEventsMessage$EventCategories": "

    A list of event categories that trigger notifications for a event notification subscription.

    ", + "Event$EventCategories": "

    Specifies the category for the event.

    ", + "EventCategoriesMap$EventCategories": "

    The event categories for the specified source type

    ", + "EventSubscription$EventCategoriesList": "

    A list of event categories for the RDS event notification subscription.

    ", + "ModifyEventSubscriptionMessage$EventCategories": "

    A list of event categories for a SourceType that you want to subscribe to. You can see a list of the categories for a given SourceType in the Events topic in the Amazon RDS User Guide or by using the DescribeEventCategories action.

    " + } + }, + "EventCategoriesMap": { + "base": "

    Contains the results of a successful invocation of the DescribeEventCategories action.

    ", + "refs": { + "EventCategoriesMapList$member": null + } + }, + "EventCategoriesMapList": { + "base": null, + "refs": { + "EventCategoriesMessage$EventCategoriesMapList": "

    A list of EventCategoriesMap data types.

    " + } + }, + "EventCategoriesMessage": { + "base": "

    Data returned from the DescribeEventCategories action.

    ", + "refs": { + } + }, + "EventList": { + "base": null, + "refs": { + "EventsMessage$Events": "

    A list of Event instances.

    " + } + }, + "EventSubscription": { + "base": "

    Contains the results of a successful invocation of the DescribeEventSubscriptions action.

    ", + "refs": { + "AddSourceIdentifierToSubscriptionResult$EventSubscription": null, + "CreateEventSubscriptionResult$EventSubscription": null, + "DeleteEventSubscriptionResult$EventSubscription": null, + "EventSubscriptionsList$member": null, + "ModifyEventSubscriptionResult$EventSubscription": null, + "RemoveSourceIdentifierFromSubscriptionResult$EventSubscription": null + } + }, + "EventSubscriptionQuotaExceededFault": { + "base": "

    You have reached the maximum number of event subscriptions.

    ", + "refs": { + } + }, + "EventSubscriptionsList": { + "base": null, + "refs": { + "EventSubscriptionsMessage$EventSubscriptionsList": "

    A list of EventSubscriptions data types.

    " + } + }, + "EventSubscriptionsMessage": { + "base": "

    Data returned by the DescribeEventSubscriptions action.

    ", + "refs": { + } + }, + "EventsMessage": { + "base": "

    Contains the result of a successful invocation of the DescribeEvents action.

    ", + "refs": { + } + }, + "IPRange": { + "base": "

    This data type is used as a response element in the DescribeDBSecurityGroups action.

    ", + "refs": { + "IPRangeList$member": null + } + }, + "IPRangeList": { + "base": null, + "refs": { + "DBSecurityGroup$IPRanges": "

    Contains a list of IPRange elements.

    " + } + }, + "InstanceQuotaExceededFault": { + "base": "

    Request would result in user exceeding the allowed number of DB instances.

    ", + "refs": { + } + }, + "InsufficientDBInstanceCapacityFault": { + "base": "

    Specified DB instance class is not available in the specified Availability Zone.

    ", + "refs": { + } + }, + "Integer": { + "base": null, + "refs": { + "DBInstance$AllocatedStorage": "

    Specifies the allocated storage size specified in gigabytes.

    ", + "DBInstance$BackupRetentionPeriod": "

    Specifies the number of days for which automatic DB Snapshots are retained.

    ", + "DBSnapshot$AllocatedStorage": "

    Specifies the allocated storage size in gigabytes (GB).

    ", + "DBSnapshot$Port": "

    Specifies the port that the database engine was listening on at the time of the snapshot.

    ", + "DownloadDBLogFilePortionMessage$NumberOfLines": "

    The number of lines remaining to be downloaded.

    ", + "Endpoint$Port": "

    Specifies the port that the database engine is listening on.

    ", + "ReservedDBInstance$Duration": "

    The duration of the reservation in seconds.

    ", + "ReservedDBInstance$DBInstanceCount": "

    The number of reserved DB Instances.

    ", + "ReservedDBInstancesOffering$Duration": "

    The duration of the offering in seconds.

    " + } + }, + "IntegerOptional": { + "base": null, + "refs": { + "CreateDBInstanceMessage$AllocatedStorage": "

    The amount of storage (in gigabytes) to be initially allocated for the database instance.

    MySQL

    Constraints: Must be an integer from 5 to 1024.

    Type: Integer

    Oracle

    Constraints: Must be an integer from 10 to 1024.

    SQL Server

    Constraints: Must be an integer from 200 to 1024 (Standard Edition and Enterprise Edition) or from 30 to 1024 (Express Edition and Web Edition)

    ", + "CreateDBInstanceMessage$BackupRetentionPeriod": "

    The number of days for which automated backups are retained. Setting this parameter to a positive number enables backups. Setting this parameter to 0 disables automated backups.

    Default: 1

    Constraints:

    • Must be a value from 0 to 8
    • Cannot be set to 0 if the DB Instance is a master instance with read replicas
    ", + "CreateDBInstanceMessage$Port": "

    The port number on which the database accepts connections.

    MySQL

    Default: 3306

    Valid Values: 1150-65535

    Type: Integer

    Oracle

    Default: 1521

    Valid Values: 1150-65535

    SQL Server

    Default: 1433

    Valid Values: 1150-65535 except for 1434 and 3389.

    ", + "CreateDBInstanceMessage$Iops": "

    The amount of Provisioned IOPS (input/output operations per second) to be initially allocated for the DB Instance.

    Constraints: Must be an integer greater than 1000.

    ", + "CreateDBInstanceReadReplicaMessage$Port": "

    The port number that the DB Instance uses for connections.

    Default: Inherits from the source DB Instance

    Valid Values: 1150-65535

    ", + "CreateDBInstanceReadReplicaMessage$Iops": "

    The amount of Provisioned IOPS (input/output operations per second) to be initially allocated for the DB Instance.

    ", + "DBInstance$Iops": "

    Specifies the Provisioned IOPS (I/O operations per second) value.

    ", + "DBSnapshot$Iops": "

    Specifies the Provisioned IOPS (I/O operations per second) value of the DB Instance at the time of the snapshot.

    ", + "DescribeDBEngineVersionsMessage$MaxRecords": "

    The maximum number of records to include in the response. If more than the MaxRecords value is available, a pagination token called a marker is included in the response so that the following results can be retrieved.

    Default: 100

    Constraints: minimum 20, maximum 100

    ", + "DescribeDBInstancesMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results may be retrieved.

    Default: 100

    Constraints: minimum 20, maximum 100

    ", + "DescribeDBLogFilesMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved.

    ", + "DescribeDBParameterGroupsMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results may be retrieved.

    Default: 100

    Constraints: minimum 20, maximum 100

    ", + "DescribeDBParametersMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results may be retrieved.

    Default: 100

    Constraints: minimum 20, maximum 100

    ", + "DescribeDBSecurityGroupsMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results may be retrieved.

    Default: 100

    Constraints: minimum 20, maximum 100

    ", + "DescribeDBSnapshotsMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results may be retrieved.

    Default: 100

    Constraints: minimum 20, maximum 100

    ", + "DescribeDBSubnetGroupsMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results may be retrieved.

    Default: 100

    Constraints: minimum 20, maximum 100

    ", + "DescribeEngineDefaultParametersMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results may be retrieved.

    Default: 100

    Constraints: minimum 20, maximum 100

    ", + "DescribeEventSubscriptionsMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved.

    Default: 100

    Constraints: minimum 20, maximum 100

    ", + "DescribeEventsMessage$Duration": "

    The number of minutes to retrieve events for.

    Default: 60

    ", + "DescribeEventsMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results may be retrieved.

    Default: 100

    Constraints: minimum 20, maximum 100

    ", + "DescribeOptionGroupOptionsMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved.

    Default: 100

    Constraints: minimum 20, maximum 100

    ", + "DescribeOptionGroupsMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved.

    Default: 100

    Constraints: minimum 20, maximum 100

    ", + "DescribeOrderableDBInstanceOptionsMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved.

    Default: 100

    Constraints: minimum 20, maximum 100

    ", + "DescribeReservedDBInstancesMessage$MaxRecords": "

    The maximum number of records to include in the response. If more than the MaxRecords value is available, a pagination token called a marker is included in the response so that the following results can be retrieved.

    Default: 100

    Constraints: minimum 20, maximum 100

    ", + "DescribeReservedDBInstancesOfferingsMessage$MaxRecords": "

    The maximum number of records to include in the response. If more than the MaxRecords value is available, a pagination token called a marker is included in the response so that the following results can be retrieved.

    Default: 100

    Constraints: minimum 20, maximum 100

    ", + "ModifyDBInstanceMessage$AllocatedStorage": "

    The new storage capacity of the RDS instance. Changing this parameter does not result in an outage and the change is applied during the next maintenance window unless the ApplyImmediately parameter is set to true for this request.

    MySQL

    Default: Uses existing setting

    Valid Values: 5-1024

    Constraints: Value supplied must be at least 10% greater than the current value. Values that are not at least 10% greater than the existing value are rounded up so that they are 10% greater than the current value.

    Type: Integer

    Oracle

    Default: Uses existing setting

    Valid Values: 10-1024

    Constraints: Value supplied must be at least 10% greater than the current value. Values that are not at least 10% greater than the existing value are rounded up so that they are 10% greater than the current value.

    SQL Server

    Cannot be modified.

    If you choose to migrate your DB instance from using standard storage to using Provisioned IOPS, or from using Provisioned IOPS to using standard storage, the process can take time. The duration of the migration depends on several factors such as database load, storage size, storage type (standard or Provisioned IOPS), amount of IOPS provisioned (if any), and the number of prior scale storage operations. Typical migration times are under 24 hours, but the process can take up to several days in some cases. During the migration, the DB instance will be available for use, but may experience performance degradation. While the migration takes place, nightly backups for the instance will be suspended. No other Amazon RDS operations can take place for the instance, including modifying the instance, rebooting the instance, deleting the instance, creating a read replica for the instance, and creating a DB snapshot of the instance.

    ", + "ModifyDBInstanceMessage$BackupRetentionPeriod": "

    The number of days to retain automated backups. Setting this parameter to a positive number enables backups. Setting this parameter to 0 disables automated backups.

    Changing this parameter can result in an outage if you change from 0 to a non-zero value or from a non-zero value to 0. These changes are applied during the next maintenance window unless the ApplyImmediately parameter is set to true for this request. If you change the parameter from one non-zero value to another non-zero value, the change is asynchronously applied as soon as possible.

    Default: Uses existing setting

    Constraints:

    • Must be a value from 0 to 8
    • Cannot be set to 0 if the DB Instance is a master instance with read replicas or if the DB Instance is a read replica
    ", + "ModifyDBInstanceMessage$Iops": "

    The new Provisioned IOPS (I/O operations per second) value for the RDS instance. Changing this parameter does not result in an outage and the change is applied during the next maintenance window unless the ApplyImmediately parameter is set to true for this request.

    Default: Uses existing setting

    Constraints: Value supplied must be at least 10% greater than the current value. Values that are not at least 10% greater than the existing value are rounded up so that they are 10% greater than the current value.

    Type: Integer

    If you choose to migrate your DB instance from using standard storage to using Provisioned IOPS, or from using Provisioned IOPS to using standard storage, the process can take time. The duration of the migration depends on several factors such as database load, storage size, storage type (standard or Provisioned IOPS), amount of IOPS provisioned (if any), and the number of prior scale storage operations. Typical migration times are under 24 hours, but the process can take up to several days in some cases. During the migration, the DB instance will be available for use, but may experience performance degradation. While the migration takes place, nightly backups for the instance will be suspended. No other Amazon RDS operations can take place for the instance, including modifying the instance, rebooting the instance, deleting the instance, creating a read replica for the instance, and creating a DB snapshot of the instance.

    ", + "Option$Port": "

    If required, the port configured for this option to use.

    ", + "OptionConfiguration$Port": "

    The optional port for the option.

    ", + "OptionGroupOption$DefaultPort": "

    If the option requires a port, specifies the default port for the option.

    ", + "PendingModifiedValues$AllocatedStorage": "

    Contains the new AllocatedStorage size for the DB Instance that will be applied or is in progress.

    ", + "PendingModifiedValues$Port": "

    Specifies the pending port for the DB Instance.

    ", + "PendingModifiedValues$BackupRetentionPeriod": "

    Specifies the pending number of days for which automated backups are retained.

    ", + "PendingModifiedValues$Iops": "

    Specifies the new Provisioned IOPS value for the DB Instance that will be applied or is being applied.

    ", + "PromoteReadReplicaMessage$BackupRetentionPeriod": "

    The number of days to retain automated backups. Setting this parameter to a positive number enables backups. Setting this parameter to 0 disables automated backups.

    Default: 1

    Constraints:

    • Must be a value from 0 to 8
    ", + "PurchaseReservedDBInstancesOfferingMessage$DBInstanceCount": "

    The number of instances to reserve.

    Default: 1

    ", + "RestoreDBInstanceFromDBSnapshotMessage$Port": "

    The port number on which the database accepts connections.

    Default: The same port as the original DB Instance

    Constraints: Value must be 1150-65535

    ", + "RestoreDBInstanceFromDBSnapshotMessage$Iops": "

    Specifies the amount of provisioned IOPS for the DB Instance, expressed in I/O operations per second. If this parameter is not specified, the IOPS value will be taken from the backup. If this parameter is set to 0, the new instance will be converted to a non-PIOPS instance, which will take additional time, though your DB instance will be available for connections before the conversion starts.

    Constraints: Must be an integer greater than 1000.

    ", + "RestoreDBInstanceToPointInTimeMessage$Port": "

    The port number on which the database accepts connections.

    Constraints: Value must be 1150-65535

    Default: The same port as the original DB Instance.

    ", + "RestoreDBInstanceToPointInTimeMessage$Iops": "

    The amount of Provisioned IOPS (input/output operations per second) to be initially allocated for the DB Instance.

    Constraints: Must be an integer greater than 1000.

    " + } + }, + "InvalidDBInstanceStateFault": { + "base": "

    The specified DB instance is not in the available state.

    ", + "refs": { + } + }, + "InvalidDBParameterGroupStateFault": { + "base": "

    The DB parameter group cannot be deleted because it is in use.

    ", + "refs": { + } + }, + "InvalidDBSecurityGroupStateFault": { + "base": "

    The state of the DB security group does not allow deletion.

    ", + "refs": { + } + }, + "InvalidDBSnapshotStateFault": { + "base": "

    The state of the DB snapshot does not allow deletion.

    ", + "refs": { + } + }, + "InvalidDBSubnetGroupStateFault": { + "base": "

    The DB subnet group cannot be deleted because it is in use.

    ", + "refs": { + } + }, + "InvalidDBSubnetStateFault": { + "base": "

    The DB subnet is not in the available state.

    ", + "refs": { + } + }, + "InvalidEventSubscriptionStateFault": { + "base": "

    This error can occur if someone else is modifying a subscription. You should retry the action.

    ", + "refs": { + } + }, + "InvalidOptionGroupStateFault": { + "base": "

    The option group is not in the available state.

    ", + "refs": { + } + }, + "InvalidRestoreFault": { + "base": "

    Cannot restore from vpc backup to non-vpc DB instance.

    ", + "refs": { + } + }, + "InvalidSubnet": { + "base": "

    The requested subnet is invalid, or multiple subnets were requested that are not all in a common VPC.

    ", + "refs": { + } + }, + "InvalidVPCNetworkStateFault": { + "base": "

    DB subnet group does not cover all Availability Zones after it is created because users' change.

    ", + "refs": { + } + }, + "KeyList": { + "base": null, + "refs": { + "RemoveTagsFromResourceMessage$TagKeys": "

    The tag key (name) of the tag to be removed.

    " + } + }, + "ListTagsForResourceMessage": { + "base": "

    ", + "refs": { + } + }, + "Long": { + "base": null, + "refs": { + "DescribeDBLogFilesDetails$LastWritten": "

    The date and time that the last log entry was written.

    ", + "DescribeDBLogFilesDetails$Size": "

    The size, in bytes, of the log file for the specified DB instance.

    ", + "DescribeDBLogFilesMessage$FileLastWritten": "

    Filters the available log files for files written since the specified date.

    ", + "DescribeDBLogFilesMessage$FileSize": "

    Filters the available log files for files larger than the specified size.

    " + } + }, + "ModifyDBInstanceMessage": { + "base": "

    ", + "refs": { + } + }, + "ModifyDBInstanceResult": { + "base": null, + "refs": { + } + }, + "ModifyDBParameterGroupMessage": { + "base": "

    ", + "refs": { + } + }, + "ModifyDBSubnetGroupMessage": { + "base": "

    ", + "refs": { + } + }, + "ModifyDBSubnetGroupResult": { + "base": null, + "refs": { + } + }, + "ModifyEventSubscriptionMessage": { + "base": "

    ", + "refs": { + } + }, + "ModifyEventSubscriptionResult": { + "base": null, + "refs": { + } + }, + "ModifyOptionGroupMessage": { + "base": "

    ", + "refs": { + } + }, + "ModifyOptionGroupResult": { + "base": null, + "refs": { + } + }, + "Option": { + "base": "

    Option details.

    ", + "refs": { + "OptionsList$member": null + } + }, + "OptionConfiguration": { + "base": "

    A list of all available options

    ", + "refs": { + "OptionConfigurationList$member": null + } + }, + "OptionConfigurationList": { + "base": null, + "refs": { + "ModifyOptionGroupMessage$OptionsToInclude": "

    Options in this list are added to the Option Group or, if already present, the specified configuration is used to update the existing configuration.

    " + } + }, + "OptionGroup": { + "base": "

    ", + "refs": { + "CreateOptionGroupResult$OptionGroup": null, + "ModifyOptionGroupResult$OptionGroup": null, + "OptionGroupsList$member": null + } + }, + "OptionGroupAlreadyExistsFault": { + "base": "

    The option group you are trying to create already exists.

    ", + "refs": { + } + }, + "OptionGroupMembership": { + "base": "

    Provides information on the option groups the DB instance is a member of.

    ", + "refs": { + "OptionGroupMembershipList$member": null + } + }, + "OptionGroupMembershipList": { + "base": null, + "refs": { + "DBInstance$OptionGroupMemberships": "

    Provides the list of option group memberships for this DB Instance.

    " + } + }, + "OptionGroupNotFoundFault": { + "base": "

    The specified option group could not be found.

    ", + "refs": { + } + }, + "OptionGroupOption": { + "base": "

    Available option.

    ", + "refs": { + "OptionGroupOptionsList$member": null + } + }, + "OptionGroupOptionSetting": { + "base": "

    Option Group option settings are used to display settings available for each option with their default values and other information. These values are used with the DescribeOptionGroupOptions action.

    ", + "refs": { + "OptionGroupOptionSettingsList$member": null + } + }, + "OptionGroupOptionSettingsList": { + "base": null, + "refs": { + "OptionGroupOption$OptionGroupOptionSettings": "

    Specifies the option settings that are available (and the default value) for each option in an option group.

    " + } + }, + "OptionGroupOptionsList": { + "base": "

    List of available option group options.

    ", + "refs": { + "OptionGroupOptionsMessage$OptionGroupOptions": null + } + }, + "OptionGroupOptionsMessage": { + "base": "

    ", + "refs": { + } + }, + "OptionGroupQuotaExceededFault": { + "base": "

    The quota of 20 option groups was exceeded for this AWS account.

    ", + "refs": { + } + }, + "OptionGroups": { + "base": "

    List of option groups.

    ", + "refs": { + } + }, + "OptionGroupsList": { + "base": null, + "refs": { + "OptionGroups$OptionGroupsList": "

    List of option groups.

    " + } + }, + "OptionNamesList": { + "base": null, + "refs": { + "ModifyOptionGroupMessage$OptionsToRemove": "

    Options in this list are removed from the Option Group.

    " + } + }, + "OptionSetting": { + "base": "

    Option settings are the actual settings being applied or configured for that option. It is used when you modify an option group or describe option groups. For example, the NATIVE_NETWORK_ENCRYPTION option has a setting called SQLNET.ENCRYPTION_SERVER that can have several different values.

    ", + "refs": { + "OptionSettingConfigurationList$member": null, + "OptionSettingsList$member": null + } + }, + "OptionSettingConfigurationList": { + "base": null, + "refs": { + "Option$OptionSettings": "

    The option settings for this option.

    " + } + }, + "OptionSettingsList": { + "base": null, + "refs": { + "OptionConfiguration$OptionSettings": "

    The option settings to include in an option group.

    " + } + }, + "OptionsDependedOn": { + "base": null, + "refs": { + "OptionGroupOption$OptionsDependedOn": "

    List of all options that are prerequisites for this option.

    " + } + }, + "OptionsList": { + "base": null, + "refs": { + "OptionGroup$Options": "

    Indicates what options are available in the option group.

    " + } + }, + "OrderableDBInstanceOption": { + "base": "

    Contains a list of available options for a DB Instance

    This data type is used as a response element in the DescribeOrderableDBInstanceOptions action.

    ", + "refs": { + "OrderableDBInstanceOptionsList$member": null + } + }, + "OrderableDBInstanceOptionsList": { + "base": null, + "refs": { + "OrderableDBInstanceOptionsMessage$OrderableDBInstanceOptions": "

    An OrderableDBInstanceOption structure containing information about orderable options for the DB Instance.

    " + } + }, + "OrderableDBInstanceOptionsMessage": { + "base": "

    Contains the result of a successful invocation of the DescribeOrderableDBInstanceOptions action.

    ", + "refs": { + } + }, + "Parameter": { + "base": "

    This data type is used as a request parameter in the ModifyDBParameterGroup and ResetDBParameterGroup actions.

    This data type is used as a response element in the DescribeEngineDefaultParameters and DescribeDBParameters actions.

    ", + "refs": { + "ParametersList$member": null + } + }, + "ParametersList": { + "base": null, + "refs": { + "DBParameterGroupDetails$Parameters": "

    A list of Parameter instances.

    ", + "EngineDefaults$Parameters": "

    Contains a list of engine default parameters.

    ", + "ModifyDBParameterGroupMessage$Parameters": "

    An array of parameter names, values, and the apply method for the parameter update. At least one parameter name, value, and apply method must be supplied; subsequent arguments are optional. A maximum of 20 parameters may be modified in a single request.

    Valid Values (for the application method): immediate | pending-reboot

    You can use the immediate value with dynamic parameters only. You can use the pending-reboot value for both dynamic and static parameters, and changes are applied when DB Instance reboots. ", + "ResetDBParameterGroupMessage$Parameters": "

    An array of parameter names, values, and the apply method for the parameter update. At least one parameter name, value, and apply method must be supplied; subsequent arguments are optional. A maximum of 20 parameters may be modified in a single request.

    MySQL

    Valid Values (for Apply method): immediate | pending-reboot

    You can use the immediate value with dynamic parameters only. You can use the pending-reboot value for both dynamic and static parameters, and changes are applied when DB Instance reboots.

    Oracle

    Valid Values (for Apply method): pending-reboot

    " + } + }, + "PendingModifiedValues": { + "base": "

    This data type is used as a response element in the ModifyDBInstance action.

    ", + "refs": { + "DBInstance$PendingModifiedValues": "

    Specifies that changes to the DB Instance are pending. This element is only included when changes are pending. Specific changes are identified by subelements.

    " + } + }, + "PointInTimeRestoreNotEnabledFault": { + "base": "

    SourceDBInstanceIdentifier refers to a DB instance with BackupRetentionPeriod equal to 0.

    ", + "refs": { + } + }, + "PromoteReadReplicaMessage": { + "base": "

    ", + "refs": { + } + }, + "PromoteReadReplicaResult": { + "base": null, + "refs": { + } + }, + "ProvisionedIopsNotAvailableInAZFault": { + "base": "

    Provisioned IOPS not available in the specified Availability Zone.

    ", + "refs": { + } + }, + "PurchaseReservedDBInstancesOfferingMessage": { + "base": "

    ", + "refs": { + } + }, + "PurchaseReservedDBInstancesOfferingResult": { + "base": null, + "refs": { + } + }, + "ReadReplicaDBInstanceIdentifierList": { + "base": null, + "refs": { + "DBInstance$ReadReplicaDBInstanceIdentifiers": "

    Contains one or more identifiers of the Read Replicas associated with this DB Instance.

    " + } + }, + "RebootDBInstanceMessage": { + "base": "

    ", + "refs": { + } + }, + "RebootDBInstanceResult": { + "base": null, + "refs": { + } + }, + "RecurringCharge": { + "base": "

    This data type is used as a response element in the DescribeReservedDBInstances and DescribeReservedDBInstancesOfferings actions.

    ", + "refs": { + "RecurringChargeList$member": null + } + }, + "RecurringChargeList": { + "base": null, + "refs": { + "ReservedDBInstance$RecurringCharges": "

    The recurring price charged to run this reserved DB Instance.

    ", + "ReservedDBInstancesOffering$RecurringCharges": "

    The recurring price charged to run this reserved DB Instance.

    " + } + }, + "RemoveSourceIdentifierFromSubscriptionMessage": { + "base": "

    ", + "refs": { + } + }, + "RemoveSourceIdentifierFromSubscriptionResult": { + "base": null, + "refs": { + } + }, + "RemoveTagsFromResourceMessage": { + "base": "

    ", + "refs": { + } + }, + "ReservedDBInstance": { + "base": "

    This data type is used as a response element in the DescribeReservedDBInstances and PurchaseReservedDBInstancesOffering actions.

    ", + "refs": { + "PurchaseReservedDBInstancesOfferingResult$ReservedDBInstance": null, + "ReservedDBInstanceList$member": null + } + }, + "ReservedDBInstanceAlreadyExistsFault": { + "base": "

    User already has a reservation with the given identifier.

    ", + "refs": { + } + }, + "ReservedDBInstanceList": { + "base": null, + "refs": { + "ReservedDBInstanceMessage$ReservedDBInstances": "

    A list of of reserved DB Instances.

    " + } + }, + "ReservedDBInstanceMessage": { + "base": "

    Contains the result of a successful invocation of the DescribeReservedDBInstances action.

    ", + "refs": { + } + }, + "ReservedDBInstanceNotFoundFault": { + "base": "

    The specified reserved DB Instance not found.

    ", + "refs": { + } + }, + "ReservedDBInstanceQuotaExceededFault": { + "base": "

    Request would exceed the user's DB Instance quota.

    ", + "refs": { + } + }, + "ReservedDBInstancesOffering": { + "base": "

    This data type is used as a response element in the DescribeReservedDBInstancesOfferings action.

    ", + "refs": { + "ReservedDBInstancesOfferingList$member": null + } + }, + "ReservedDBInstancesOfferingList": { + "base": null, + "refs": { + "ReservedDBInstancesOfferingMessage$ReservedDBInstancesOfferings": "

    A list of reserved DB Instance offerings.

    " + } + }, + "ReservedDBInstancesOfferingMessage": { + "base": "

    Contains the result of a successful invocation of the DescribeReservedDBInstancesOfferings action.

    ", + "refs": { + } + }, + "ReservedDBInstancesOfferingNotFoundFault": { + "base": "

    Specified offering does not exist.

    ", + "refs": { + } + }, + "ResetDBParameterGroupMessage": { + "base": "

    ", + "refs": { + } + }, + "RestoreDBInstanceFromDBSnapshotMessage": { + "base": "

    ", + "refs": { + } + }, + "RestoreDBInstanceFromDBSnapshotResult": { + "base": null, + "refs": { + } + }, + "RestoreDBInstanceToPointInTimeMessage": { + "base": "

    ", + "refs": { + } + }, + "RestoreDBInstanceToPointInTimeResult": { + "base": null, + "refs": { + } + }, + "RevokeDBSecurityGroupIngressMessage": { + "base": "

    ", + "refs": { + } + }, + "RevokeDBSecurityGroupIngressResult": { + "base": null, + "refs": { + } + }, + "SNSInvalidTopicFault": { + "base": "

    SNS has responded that there is a problem with the SND topic specified.

    ", + "refs": { + } + }, + "SNSNoAuthorizationFault": { + "base": "

    You do not have permission to publish to the SNS topic ARN.

    ", + "refs": { + } + }, + "SNSTopicArnNotFoundFault": { + "base": "

    The SNS topic ARN does not exist.

    ", + "refs": { + } + }, + "SnapshotQuotaExceededFault": { + "base": "

    Request would result in user exceeding the allowed number of DB snapshots.

    ", + "refs": { + } + }, + "SourceIdsList": { + "base": null, + "refs": { + "CreateEventSubscriptionMessage$SourceIds": "

    The list of identifiers of the event sources for which events will be returned. If not specified, then all sources are included in the response. An identifier must begin with a letter and must contain only ASCII letters, digits, and hyphens; it cannot end with a hyphen or contain two consecutive hyphens.

    Constraints:

    • If SourceIds are supplied, SourceType must also be provided.
    • If the source type is a DB instance, then a DBInstanceIdentifier must be supplied.
    • If the source type is a DB security group, a DBSecurityGroupName must be supplied.
    • If the source type is a DB parameter group, a DBParameterGroupName must be supplied.
    • If the source type is a DB Snapshot, a DBSnapshotIdentifier must be supplied.
    ", + "EventSubscription$SourceIdsList": "

    A list of source Ids for the RDS event notification subscription.

    " + } + }, + "SourceNotFoundFault": { + "base": "

    The requested source could not be found.

    ", + "refs": { + } + }, + "SourceType": { + "base": null, + "refs": { + "DescribeEventsMessage$SourceType": "

    The event source to retrieve events for. If no value is specified, all events are returned.

    ", + "Event$SourceType": "

    Specifies the source type for this event.

    " + } + }, + "StorageQuotaExceededFault": { + "base": "

    Request would result in user exceeding the allowed amount of storage available across all DB instances.

    ", + "refs": { + } + }, + "String": { + "base": null, + "refs": { + "AddSourceIdentifierToSubscriptionMessage$SubscriptionName": "

    The name of the RDS event notification subscription you want to add a source identifier to.

    ", + "AddSourceIdentifierToSubscriptionMessage$SourceIdentifier": "

    The identifier of the event source to be added. An identifier must begin with a letter and must contain only ASCII letters, digits, and hyphens; it cannot end with a hyphen or contain two consecutive hyphens.

    Constraints:

    • If the source type is a DB instance, then a DBInstanceIdentifier must be supplied.
    • If the source type is a DB security group, a DBSecurityGroupName must be supplied.
    • If the source type is a DB parameter group, a DBParameterGroupName must be supplied.
    • If the source type is a DB Snapshot, a DBSnapshotIdentifier must be supplied.
    ", + "AddTagsToResourceMessage$ResourceName": "

    The DB Instance the tags will be added to. This value is an Amazon Resource Name (ARN). For information about creating an ARN, see Constructing an RDS Amazon Resource Name (ARN).

    ", + "AuthorizeDBSecurityGroupIngressMessage$DBSecurityGroupName": "

    The name of the DB Security Group to add authorization to.

    ", + "AuthorizeDBSecurityGroupIngressMessage$CIDRIP": "

    The IP range to authorize.

    ", + "AuthorizeDBSecurityGroupIngressMessage$EC2SecurityGroupName": "

    Name of the EC2 Security Group to authorize. For VPC DB Security Groups, EC2SecurityGroupId must be provided. Otherwise, EC2SecurityGroupOwnerId and either EC2SecurityGroupName or EC2SecurityGroupId must be provided.

    ", + "AuthorizeDBSecurityGroupIngressMessage$EC2SecurityGroupId": "

    Id of the EC2 Security Group to authorize. For VPC DB Security Groups, EC2SecurityGroupId must be provided. Otherwise, EC2SecurityGroupOwnerId and either EC2SecurityGroupName or EC2SecurityGroupId must be provided.

    ", + "AuthorizeDBSecurityGroupIngressMessage$EC2SecurityGroupOwnerId": "

    AWS Account Number of the owner of the EC2 Security Group specified in the EC2SecurityGroupName parameter. The AWS Access Key ID is not an acceptable value. For VPC DB Security Groups, EC2SecurityGroupId must be provided. Otherwise, EC2SecurityGroupOwnerId and either EC2SecurityGroupName or EC2SecurityGroupId must be provided.

    ", + "AvailabilityZone$Name": "

    The name of the availability zone.

    ", + "CharacterSet$CharacterSetName": "

    The name of the character set.

    ", + "CharacterSet$CharacterSetDescription": "

    The description of the character set.

    ", + "CopyDBSnapshotMessage$SourceDBSnapshotIdentifier": "

    The identifier for the source DB snapshot.

    Constraints:

    • Must be the identifier for a valid system snapshot in the \"available\" state.

    Example: rds:mydb-2012-04-02-00-01

    ", + "CopyDBSnapshotMessage$TargetDBSnapshotIdentifier": "

    The identifier for the copied snapshot.

    Constraints:

    • Cannot be null, empty, or blank
    • Must contain from 1 to 255 alphanumeric characters or hyphens
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens

    Example: my-db-snapshot

    ", + "CreateDBInstanceMessage$DBName": "

    The meaning of this parameter differs according to the database engine you use.

    MySQL

    The name of the database to create when the DB Instance is created. If this parameter is not specified, no database is created in the DB Instance.

    Constraints:

    • Must contain 1 to 64 alphanumeric characters
    • Cannot be a word reserved by the specified database engine

    Type: String

    Oracle

    The Oracle System ID (SID) of the created DB Instance.

    Default: ORCL

    Constraints:

    • Cannot be longer than 8 characters

    SQL Server

    Not applicable. Must be null.

    ", + "CreateDBInstanceMessage$DBInstanceIdentifier": "

    The DB Instance identifier. This parameter is stored as a lowercase string.

    Constraints:

    • Must contain from 1 to 63 alphanumeric characters or hyphens (1 to 15 for SQL Server).
    • First character must be a letter.
    • Cannot end with a hyphen or contain two consecutive hyphens.

    Example: mydbinstance

    ", + "CreateDBInstanceMessage$DBInstanceClass": "

    The compute and memory capacity of the DB Instance.

    Valid Values: db.t1.micro | db.m1.small | db.m1.medium | db.m1.large | db.m1.xlarge | db.m2.xlarge |db.m2.2xlarge | db.m2.4xlarge

    ", + "CreateDBInstanceMessage$Engine": "

    The name of the database engine to be used for this instance.

    Valid Values: MySQL | oracle-se1 | oracle-se | oracle-ee | sqlserver-ee | sqlserver-se | sqlserver-ex | sqlserver-web

    ", + "CreateDBInstanceMessage$MasterUsername": "

    The name of master user for the client DB Instance.

    MySQL

    Constraints:

    • Must be 1 to 16 alphanumeric characters.
    • First character must be a letter.
    • Cannot be a reserved word for the chosen database engine.

    Type: String

    Oracle

    Constraints:

    • Must be 1 to 30 alphanumeric characters.
    • First character must be a letter.
    • Cannot be a reserved word for the chosen database engine.

    SQL Server

    Constraints:

    • Must be 1 to 128 alphanumeric characters.
    • First character must be a letter.
    • Cannot be a reserved word for the chosen database engine.
    ", + "CreateDBInstanceMessage$MasterUserPassword": "

    The password for the master database user. Can be any printable ASCII character except \"/\", \"\\\", or \"@\".

    Type: String

    MySQL

    Constraints: Must contain from 8 to 41 alphanumeric characters.

    Oracle

    Constraints: Must contain from 8 to 30 alphanumeric characters.

    SQL Server

    Constraints: Must contain from 8 to 128 alphanumeric characters.

    ", + "CreateDBInstanceMessage$AvailabilityZone": "

    The EC2 Availability Zone that the database instance will be created in.

    Default: A random, system-chosen Availability Zone in the endpoint's region.

    Example: us-east-1d

    Constraint: The AvailabilityZone parameter cannot be specified if the MultiAZ parameter is set to true. The specified Availability Zone must be in the same region as the current endpoint.

    ", + "CreateDBInstanceMessage$DBSubnetGroupName": "

    A DB Subnet Group to associate with this DB Instance.

    If there is no DB Subnet Group, then it is a non-VPC DB instance.

    ", + "CreateDBInstanceMessage$PreferredMaintenanceWindow": "

    The weekly time range (in UTC) during which system maintenance can occur.

    Format: ddd:hh24:mi-ddd:hh24:mi

    Default: A 30-minute window selected at random from an 8-hour block of time per region, occurring on a random day of the week. To see the time blocks available, see Adjusting the Preferred Maintenance Window in the Amazon RDS User Guide.

    Valid Days: Mon, Tue, Wed, Thu, Fri, Sat, Sun

    Constraints: Minimum 30-minute window.

    ", + "CreateDBInstanceMessage$DBParameterGroupName": "

    The name of the DB Parameter Group to associate with this DB instance. If this argument is omitted, the default DBParameterGroup for the specified engine will be used.

    Constraints:

    • Must be 1 to 255 alphanumeric characters
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "CreateDBInstanceMessage$PreferredBackupWindow": "

    The daily time range during which automated backups are created if automated backups are enabled, using the BackupRetentionPeriod parameter.

    Default: A 30-minute window selected at random from an 8-hour block of time per region. The following list shows the time blocks for each region from which the default backup windows are assigned.

    • US-East (Northern Virginia) Region: 03:00-11:00 UTC
    • US-West (Northern California) Region: 06:00-14:00 UTC
    • EU (Ireland) Region: 22:00-06:00 UTC
    • Asia Pacific (Singapore) Region: 14:00-22:00 UTC
    • Asia Pacific (Tokyo) Region: 17:00-03:00 UTC

    Constraints: Must be in the format hh24:mi-hh24:mi. Times should be Universal Time Coordinated (UTC). Must not conflict with the preferred maintenance window. Must be at least 30 minutes.

    ", + "CreateDBInstanceMessage$EngineVersion": "

    The version number of the database engine to use.

    MySQL

    Example: 5.1.42

    Type: String

    Oracle

    Example: 11.2.0.2.v2

    Type: String

    SQL Server

    Example: 10.50.2789.0.v1

    ", + "CreateDBInstanceMessage$LicenseModel": "

    License model information for this DB Instance.

    Valid values: license-included | bring-your-own-license | general-public-license

    ", + "CreateDBInstanceMessage$OptionGroupName": "

    Indicates that the DB Instance should be associated with the specified option group.

    Permanent options, such as the TDE option for Oracle Advanced Security TDE, cannot be removed from an option group, and that option group cannot be removed from a DB instance once it is associated with a DB instance

    ", + "CreateDBInstanceMessage$CharacterSetName": "

    For supported engines, indicates that the DB Instance should be associated with the specified CharacterSet.

    ", + "CreateDBInstanceReadReplicaMessage$DBInstanceIdentifier": "

    The DB Instance identifier of the Read Replica. This is the unique key that identifies a DB Instance. This parameter is stored as a lowercase string.

    ", + "CreateDBInstanceReadReplicaMessage$SourceDBInstanceIdentifier": "

    The identifier of the DB Instance that will act as the source for the Read Replica. Each DB Instance can have up to five Read Replicas.

    Constraints: Must be the identifier of an existing DB Instance that is not already a Read Replica DB Instance.

    ", + "CreateDBInstanceReadReplicaMessage$DBInstanceClass": "

    The compute and memory capacity of the Read Replica.

    Valid Values: db.m1.small | db.m1.medium | db.m1.large | db.m1.xlarge | db.m2.xlarge |db.m2.2xlarge | db.m2.4xlarge

    Default: Inherits from the source DB Instance.

    ", + "CreateDBInstanceReadReplicaMessage$AvailabilityZone": "

    The Amazon EC2 Availability Zone that the Read Replica will be created in.

    Default: A random, system-chosen Availability Zone in the endpoint's region.

    Example: us-east-1d

    ", + "CreateDBInstanceReadReplicaMessage$OptionGroupName": "

    The option group the DB instance will be associated with. If omitted, the default Option Group for the engine specified will be used.

    ", + "CreateDBParameterGroupMessage$DBParameterGroupName": "

    The name of the DB Parameter Group.

    Constraints:

    • Must be 1 to 255 alphanumeric characters
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    This value is stored as a lower-case string.", + "CreateDBParameterGroupMessage$DBParameterGroupFamily": "

    The DB Parameter Group Family name. A DB Parameter Group can be associated with one and only one DB Parameter Group Family, and can be applied only to a DB Instance running a database engine and engine version compatible with that DB Parameter Group Family.

    ", + "CreateDBParameterGroupMessage$Description": "

    The description for the DB Parameter Group.

    ", + "CreateDBSecurityGroupMessage$DBSecurityGroupName": "

    The name for the DB Security Group. This value is stored as a lowercase string.

    Constraints: Must contain no more than 255 alphanumeric characters or hyphens. Must not be \"Default\".

    Example: mysecuritygroup

    ", + "CreateDBSecurityGroupMessage$DBSecurityGroupDescription": "

    The description for the DB Security Group.

    ", + "CreateDBSnapshotMessage$DBSnapshotIdentifier": "

    The identifier for the DB Snapshot.

    Constraints:

    • Cannot be null, empty, or blank
    • Must contain from 1 to 255 alphanumeric characters or hyphens
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens

    Example: my-snapshot-id

    ", + "CreateDBSnapshotMessage$DBInstanceIdentifier": "

    The DB Instance identifier. This is the unique key that identifies a DB Instance. This parameter isn't case sensitive.

    Constraints:

    • Must contain from 1 to 63 alphanumeric characters or hyphens
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "CreateDBSubnetGroupMessage$DBSubnetGroupName": "

    The name for the DB Subnet Group. This value is stored as a lowercase string.

    Constraints: Must contain no more than 255 alphanumeric characters or hyphens. Must not be \"Default\".

    Example: mySubnetgroup

    ", + "CreateDBSubnetGroupMessage$DBSubnetGroupDescription": "

    The description for the DB Subnet Group.

    ", + "CreateEventSubscriptionMessage$SubscriptionName": "

    The name of the subscription.

    Constraints: The name must be less than 255 characters.

    ", + "CreateEventSubscriptionMessage$SnsTopicArn": "

    The Amazon Resource Name (ARN) of the SNS topic created for event notification. The ARN is created by Amazon SNS when you create a topic and subscribe to it.

    ", + "CreateEventSubscriptionMessage$SourceType": "

    The type of source that will be generating the events. For example, if you want to be notified of events generated by a DB instance, you would set this parameter to db-instance. if this value is not specified, all events are returned.

    Valid values: db-instance | db-parameter-group | db-security-group | db-snapshot

    ", + "CreateOptionGroupMessage$OptionGroupName": "

    Specifies the name of the option group to be created.

    Constraints:

    • Must be 1 to 255 alphanumeric characters or hyphens
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens

    Example: myOptiongroup

    ", + "CreateOptionGroupMessage$EngineName": "

    Specifies the name of the engine that this option group should be associated with.

    ", + "CreateOptionGroupMessage$MajorEngineVersion": "

    Specifies the major version of the engine that this option group should be associated with.

    ", + "CreateOptionGroupMessage$OptionGroupDescription": "

    The description of the option group.

    ", + "DBEngineVersion$Engine": "

    The name of the database engine.

    ", + "DBEngineVersion$EngineVersion": "

    The version number of the database engine.

    ", + "DBEngineVersion$DBParameterGroupFamily": "

    The name of the DBParameterGroupFamily for the database engine.

    ", + "DBEngineVersion$DBEngineDescription": "

    The description of the database engine.

    ", + "DBEngineVersion$DBEngineVersionDescription": "

    The description of the database engine version.

    ", + "DBEngineVersionMessage$Marker": "

    An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DBInstance$DBInstanceIdentifier": "

    Contains a user-supplied database identifier. This is the unique key that identifies a DB Instance.

    ", + "DBInstance$DBInstanceClass": "

    Contains the name of the compute and memory capacity class of the DB Instance.

    ", + "DBInstance$Engine": "

    Provides the name of the database engine to be used for this DB Instance.

    ", + "DBInstance$DBInstanceStatus": "

    Specifies the current state of this database.

    ", + "DBInstance$MasterUsername": "

    Contains the master username for the DB Instance.

    ", + "DBInstance$DBName": "

    The meaning of this parameter differs according to the database engine you use.

    MySQL

    Contains the name of the initial database of this instance that was provided at create time, if one was specified when the DB Instance was created. This same name is returned for the life of the DB Instance.

    Type: String

    Oracle

    Contains the Oracle System ID (SID) of the created DB Instance.

    ", + "DBInstance$PreferredBackupWindow": "

    Specifies the daily time range during which automated backups are created if automated backups are enabled, as determined by the BackupRetentionPeriod.

    ", + "DBInstance$AvailabilityZone": "

    Specifies the name of the Availability Zone the DB Instance is located in.

    ", + "DBInstance$PreferredMaintenanceWindow": "

    Specifies the weekly time range (in UTC) during which system maintenance can occur.

    ", + "DBInstance$EngineVersion": "

    Indicates the database engine version.

    ", + "DBInstance$ReadReplicaSourceDBInstanceIdentifier": "

    Contains the identifier of the source DB Instance if this DB Instance is a Read Replica.

    ", + "DBInstance$LicenseModel": "

    License model information for this DB Instance.

    ", + "DBInstance$CharacterSetName": "

    If present, specifies the name of the character set that this instance is associated with.

    ", + "DBInstance$SecondaryAvailabilityZone": "

    If present, specifies the name of the secondary Availability Zone for a DB instance with multi-AZ support.

    ", + "DBInstanceMessage$Marker": "

    An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords .

    ", + "DBParameterGroup$DBParameterGroupName": "

    Provides the name of the DB Parameter Group.

    ", + "DBParameterGroup$DBParameterGroupFamily": "

    Provides the name of the DB Parameter Group Family that this DB Parameter Group is compatible with.

    ", + "DBParameterGroup$Description": "

    Provides the customer-specified description for this DB Parameter Group.

    ", + "DBParameterGroupDetails$Marker": "

    An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DBParameterGroupNameMessage$DBParameterGroupName": "

    The name of the DB Parameter Group.

    ", + "DBParameterGroupStatus$DBParameterGroupName": "

    The name of the DP Parameter Group.

    ", + "DBParameterGroupStatus$ParameterApplyStatus": "

    The status of parameter updates.

    ", + "DBParameterGroupsMessage$Marker": "

    An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DBSecurityGroup$OwnerId": "

    Provides the AWS ID of the owner of a specific DB Security Group.

    ", + "DBSecurityGroup$DBSecurityGroupName": "

    Specifies the name of the DB Security Group.

    ", + "DBSecurityGroup$DBSecurityGroupDescription": "

    Provides the description of the DB Security Group.

    ", + "DBSecurityGroup$VpcId": "

    Provides the VpcId of the DB Security Group.

    ", + "DBSecurityGroupMembership$DBSecurityGroupName": "

    The name of the DB Security Group.

    ", + "DBSecurityGroupMembership$Status": "

    The status of the DB Security Group.

    ", + "DBSecurityGroupMessage$Marker": "

    An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DBSecurityGroupNameList$member": null, + "DBSnapshot$DBSnapshotIdentifier": "

    Specifies the identifier for the DB Snapshot.

    ", + "DBSnapshot$DBInstanceIdentifier": "

    Specifies the the DBInstanceIdentifier of the DB Instance this DB Snapshot was created from.

    ", + "DBSnapshot$Engine": "

    Specifies the name of the database engine.

    ", + "DBSnapshot$Status": "

    Specifies the status of this DB Snapshot.

    ", + "DBSnapshot$AvailabilityZone": "

    Specifies the name of the Availability Zone the DB Instance was located in at the time of the DB Snapshot.

    ", + "DBSnapshot$VpcId": "

    Provides the Vpc Id associated with the DB Snapshot.

    ", + "DBSnapshot$MasterUsername": "

    Provides the master username for the DB Snapshot.

    ", + "DBSnapshot$EngineVersion": "

    Specifies the version of the database engine.

    ", + "DBSnapshot$LicenseModel": "

    License model information for the restored DB Instance.

    ", + "DBSnapshot$SnapshotType": "

    Provides the type of the DB Snapshot.

    ", + "DBSnapshot$OptionGroupName": "

    Provides the option group name for the DB Snapshot.

    ", + "DBSnapshotMessage$Marker": "

    An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DBSubnetGroup$DBSubnetGroupName": "

    Specifies the name of the DB Subnet Group.

    ", + "DBSubnetGroup$DBSubnetGroupDescription": "

    Provides the description of the DB Subnet Group.

    ", + "DBSubnetGroup$VpcId": "

    Provides the VpcId of the DB Subnet Group.

    ", + "DBSubnetGroup$SubnetGroupStatus": "

    Provides the status of the DB Subnet Group.

    ", + "DBSubnetGroupMessage$Marker": "

    An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DeleteDBInstanceMessage$DBInstanceIdentifier": "

    The DB Instance identifier for the DB Instance to be deleted. This parameter isn't case sensitive.

    Constraints:

    • Must contain from 1 to 63 alphanumeric characters or hyphens
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "DeleteDBInstanceMessage$FinalDBSnapshotIdentifier": "

    The DBSnapshotIdentifier of the new DBSnapshot created when SkipFinalSnapshot is set to false.

    Specifying this parameter and also setting the SkipFinalShapshot parameter to true results in an error.

    Constraints:

    • Must be 1 to 255 alphanumeric characters
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "DeleteDBParameterGroupMessage$DBParameterGroupName": "

    The name of the DB Parameter Group.

    Constraints:

    • Must be the name of an existing DB Parameter Group
    • You cannot delete a default DB Parameter Group
    • Cannot be associated with any DB Instances
    ", + "DeleteDBSecurityGroupMessage$DBSecurityGroupName": "

    The name of the DB Security Group to delete.

    You cannot delete the default DB Security Group.

    Constraints:

    • Must be 1 to 255 alphanumeric characters
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "DeleteDBSnapshotMessage$DBSnapshotIdentifier": "

    The DBSnapshot identifier.

    Constraints: Must be the name of an existing DB Snapshot in the available state.

    ", + "DeleteDBSubnetGroupMessage$DBSubnetGroupName": "

    The name of the database subnet group to delete.

    You cannot delete the default subnet group.

    Constraints:

    • Must be 1 to 255 alphanumeric characters
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "DeleteEventSubscriptionMessage$SubscriptionName": "

    The name of the RDS event notification subscription you want to delete.

    ", + "DeleteOptionGroupMessage$OptionGroupName": "

    The name of the option group to be deleted.

    You cannot delete default Option Groups.", + "DescribeDBEngineVersionsMessage$Engine": "

    The database engine to return.

    ", + "DescribeDBEngineVersionsMessage$EngineVersion": "

    The database engine version to return.

    Example: 5.1.49

    ", + "DescribeDBEngineVersionsMessage$DBParameterGroupFamily": "

    The name of a specific DB Parameter Group family to return details for.

    Constraints:

    • Must be 1 to 255 alphanumeric characters
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "DescribeDBEngineVersionsMessage$Marker": "

    An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DescribeDBInstancesMessage$DBInstanceIdentifier": "

    The user-supplied instance identifier. If this parameter is specified, information from only the specific DB Instance is returned. This parameter isn't case sensitive.

    Constraints:

    • Must contain from 1 to 63 alphanumeric characters or hyphens
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "DescribeDBInstancesMessage$Marker": "

    An optional pagination token provided by a previous DescribeDBInstances request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords .

    ", + "DescribeDBLogFilesDetails$LogFileName": "

    The name of the log file for the specified DB instance.

    ", + "DescribeDBLogFilesMessage$DBInstanceIdentifier": "

    The customer-assigned name of the DB Instance that contains the log files you want to list.

    Constraints:

    • Must contain from 1 to 63 alphanumeric characters or hyphens
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "DescribeDBLogFilesMessage$FilenameContains": "

    Filters the available log files for log file names that contain the specified string.

    ", + "DescribeDBLogFilesMessage$Marker": "

    The pagination token provided in the previous request. If this parameter is specified the response includes only records beyond the marker, up to MaxRecords.

    ", + "DescribeDBLogFilesResponse$Marker": "

    An optional paging token.

    ", + "DescribeDBParameterGroupsMessage$DBParameterGroupName": "

    The name of a specific DB Parameter Group to return details for.

    Constraints:

    • Must be 1 to 255 alphanumeric characters
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "DescribeDBParameterGroupsMessage$Marker": "

    An optional pagination token provided by a previous DescribeDBParameterGroups request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DescribeDBParametersMessage$DBParameterGroupName": "

    The name of a specific DB Parameter Group to return details for.

    Constraints:

    • Must be 1 to 255 alphanumeric characters
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "DescribeDBParametersMessage$Source": "

    The parameter types to return.

    Default: All parameter types returned

    Valid Values: user | system | engine-default

    ", + "DescribeDBParametersMessage$Marker": "

    An optional pagination token provided by a previous DescribeDBParameters request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DescribeDBSecurityGroupsMessage$DBSecurityGroupName": "

    The name of the DB Security Group to return details for.

    ", + "DescribeDBSecurityGroupsMessage$Marker": "

    An optional pagination token provided by a previous DescribeDBSecurityGroups request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DescribeDBSnapshotsMessage$DBInstanceIdentifier": "

    A DB Instance Identifier to retrieve the list of DB Snapshots for. Cannot be used in conjunction with DBSnapshotIdentifier. This parameter isn't case sensitive.

    Constraints:

    • Must contain from 1 to 63 alphanumeric characters or hyphens
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "DescribeDBSnapshotsMessage$DBSnapshotIdentifier": "

    A specific DB Snapshot Identifier to describe. Cannot be used in conjunction with DBInstanceIdentifier. This value is stored as a lowercase string.

    Constraints:

    • Must be 1 to 255 alphanumeric characters
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    • If this is the identifier of an automated snapshot, the SnapshotType parameter must also be specified.
    ", + "DescribeDBSnapshotsMessage$SnapshotType": "

    An optional snapshot type for which snapshots will be returned. If not specified, the returned results will include snapshots of all types.

    ", + "DescribeDBSnapshotsMessage$Marker": "

    An optional pagination token provided by a previous DescribeDBSnapshots request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DescribeDBSubnetGroupsMessage$DBSubnetGroupName": "

    The name of the DB Subnet Group to return details for.

    ", + "DescribeDBSubnetGroupsMessage$Marker": "

    An optional pagination token provided by a previous DescribeDBSubnetGroups request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DescribeEngineDefaultParametersMessage$DBParameterGroupFamily": "

    The name of the DB Parameter Group Family.

    ", + "DescribeEngineDefaultParametersMessage$Marker": "

    An optional pagination token provided by a previous DescribeEngineDefaultParameters request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DescribeEventCategoriesMessage$SourceType": "

    The type of source that will be generating the events.

    Valid values: db-instance | db-parameter-group | db-security-group | db-snapshot

    ", + "DescribeEventSubscriptionsMessage$SubscriptionName": "

    The name of the RDS event notification subscription you want to describe.

    ", + "DescribeEventSubscriptionsMessage$Marker": "

    An optional pagination token provided by a previous DescribeOrderableDBInstanceOptions request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords .

    ", + "DescribeEventsMessage$SourceIdentifier": "

    The identifier of the event source for which events will be returned. If not specified, then all sources are included in the response.

    Constraints:

    • If SourceIdentifier is supplied, SourceType must also be provided.
    • If the source type is DBInstance, then a DBInstanceIdentifier must be supplied.
    • If the source type is DBSecurityGroup, a DBSecurityGroupName must be supplied.
    • If the source type is DBParameterGroup, a DBParameterGroupName must be supplied.
    • If the source type is DBSnapshot, a DBSnapshotIdentifier must be supplied.
    • Cannot end with a hyphen or contain two consecutive hyphens.
    ", + "DescribeEventsMessage$Marker": "

    An optional pagination token provided by a previous DescribeEvents request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DescribeOptionGroupOptionsMessage$EngineName": "

    A required parameter. Options available for the given Engine name will be described.

    ", + "DescribeOptionGroupOptionsMessage$MajorEngineVersion": "

    If specified, filters the results to include only options for the specified major engine version.

    ", + "DescribeOptionGroupOptionsMessage$Marker": "

    An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DescribeOptionGroupsMessage$OptionGroupName": "

    The name of the option group to describe. Cannot be supplied together with EngineName or MajorEngineVersion.

    ", + "DescribeOptionGroupsMessage$Marker": "

    An optional pagination token provided by a previous DescribeOptionGroups request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DescribeOptionGroupsMessage$EngineName": "

    Filters the list of option groups to only include groups associated with a specific database engine.

    ", + "DescribeOptionGroupsMessage$MajorEngineVersion": "

    Filters the list of option groups to only include groups associated with a specific database engine version. If specified, then EngineName must also be specified.

    ", + "DescribeOrderableDBInstanceOptionsMessage$Engine": "

    The name of the engine to retrieve DB Instance options for.

    ", + "DescribeOrderableDBInstanceOptionsMessage$EngineVersion": "

    The engine version filter value. Specify this parameter to show only the available offerings matching the specified engine version.

    ", + "DescribeOrderableDBInstanceOptionsMessage$DBInstanceClass": "

    The DB Instance class filter value. Specify this parameter to show only the available offerings matching the specified DB Instance class.

    ", + "DescribeOrderableDBInstanceOptionsMessage$LicenseModel": "

    The license model filter value. Specify this parameter to show only the available offerings matching the specified license model.

    ", + "DescribeOrderableDBInstanceOptionsMessage$Marker": "

    An optional pagination token provided by a previous DescribeOrderableDBInstanceOptions request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords .

    ", + "DescribeReservedDBInstancesMessage$ReservedDBInstanceId": "

    The reserved DB Instance identifier filter value. Specify this parameter to show only the reservation that matches the specified reservation ID.

    ", + "DescribeReservedDBInstancesMessage$ReservedDBInstancesOfferingId": "

    The offering identifier filter value. Specify this parameter to show only purchased reservations matching the specified offering identifier.

    ", + "DescribeReservedDBInstancesMessage$DBInstanceClass": "

    The DB Instance class filter value. Specify this parameter to show only those reservations matching the specified DB Instances class.

    ", + "DescribeReservedDBInstancesMessage$Duration": "

    The duration filter value, specified in years or seconds. Specify this parameter to show only reservations for this duration.

    Valid Values: 1 | 3 | 31536000 | 94608000

    ", + "DescribeReservedDBInstancesMessage$ProductDescription": "

    The product description filter value. Specify this parameter to show only those reservations matching the specified product description.

    ", + "DescribeReservedDBInstancesMessage$OfferingType": "

    The offering type filter value. Specify this parameter to show only the available offerings matching the specified offering type.

    Valid Values: \"Light Utilization\" | \"Medium Utilization\" | \"Heavy Utilization\"

    ", + "DescribeReservedDBInstancesMessage$Marker": "

    An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DescribeReservedDBInstancesOfferingsMessage$ReservedDBInstancesOfferingId": "

    The offering identifier filter value. Specify this parameter to show only the available offering that matches the specified reservation identifier.

    Example: 438012d3-4052-4cc7-b2e3-8d3372e0e706

    ", + "DescribeReservedDBInstancesOfferingsMessage$DBInstanceClass": "

    The DB Instance class filter value. Specify this parameter to show only the available offerings matching the specified DB Instance class.

    ", + "DescribeReservedDBInstancesOfferingsMessage$Duration": "

    Duration filter value, specified in years or seconds. Specify this parameter to show only reservations for this duration.

    Valid Values: 1 | 3 | 31536000 | 94608000

    ", + "DescribeReservedDBInstancesOfferingsMessage$ProductDescription": "

    Product description filter value. Specify this parameter to show only the available offerings matching the specified product description.

    ", + "DescribeReservedDBInstancesOfferingsMessage$OfferingType": "

    The offering type filter value. Specify this parameter to show only the available offerings matching the specified offering type.

    Valid Values: \"Light Utilization\" | \"Medium Utilization\" | \"Heavy Utilization\"

    ", + "DescribeReservedDBInstancesOfferingsMessage$Marker": "

    An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DownloadDBLogFilePortionDetails$LogFileData": "

    Entries from the specified log file.

    ", + "DownloadDBLogFilePortionDetails$Marker": "

    An optional pagination token provided by a previous DownloadDBLogFilePortion request.

    ", + "DownloadDBLogFilePortionMessage$DBInstanceIdentifier": "

    The customer-assigned name of the DB Instance that contains the log files you want to list.

    Constraints:

    • Must contain from 1 to 63 alphanumeric characters or hyphens
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "DownloadDBLogFilePortionMessage$LogFileName": "

    The name of the log file to be downloaded.

    ", + "DownloadDBLogFilePortionMessage$Marker": "

    The pagination token provided in the previous request. If this parameter is specified the response includes only records beyond the marker, up to MaxRecords.

    ", + "EC2SecurityGroup$Status": "

    Provides the status of the EC2 security group. Status can be \"authorizing\", \"authorized\", \"revoking\", and \"revoked\".

    ", + "EC2SecurityGroup$EC2SecurityGroupName": "

    Specifies the name of the EC2 Security Group.

    ", + "EC2SecurityGroup$EC2SecurityGroupId": "

    Specifies the id of the EC2 Security Group.

    ", + "EC2SecurityGroup$EC2SecurityGroupOwnerId": "

    Specifies the AWS ID of the owner of the EC2 security group specified in the EC2SecurityGroupName field.

    ", + "Endpoint$Address": "

    Specifies the DNS address of the DB Instance.

    ", + "EngineDefaults$DBParameterGroupFamily": "

    Specifies the name of the DB Parameter Group Family which the engine default parameters apply to.

    ", + "EngineDefaults$Marker": "

    An optional pagination token provided by a previous EngineDefaults request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords .

    ", + "Event$SourceIdentifier": "

    Provides the identifier for the source of the event.

    ", + "Event$Message": "

    Provides the text of this event.

    ", + "EventCategoriesList$member": null, + "EventCategoriesMap$SourceType": "

    The source type that the returned categories belong to

    ", + "EventSubscription$CustomerAwsId": "

    The AWS customer account associated with the RDS event notification subscription.

    ", + "EventSubscription$CustSubscriptionId": "

    The RDS event notification subscription Id.

    ", + "EventSubscription$SnsTopicArn": "

    The topic ARN of the RDS event notification subscription.

    ", + "EventSubscription$Status": "

    The status of the RDS event notification subscription.

    Constraints:

    Can be one of the following: creating | modifying | deleting | active | no-permission | topic-not-exist

    The status \"no-permission\" indicates that RDS no longer has permission to post to the SNS topic. The status \"topic-not-exist\" indicates that the topic was deleted after the subscription was created.

    ", + "EventSubscription$SubscriptionCreationTime": "

    The time the RDS event notification subscription was created.

    ", + "EventSubscription$SourceType": "

    The source type for the RDS event notification subscription.

    ", + "EventSubscriptionsMessage$Marker": "

    An optional pagination token provided by a previous DescribeOrderableDBInstanceOptions request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "EventsMessage$Marker": "

    An optional pagination token provided by a previous Events request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords .

    ", + "IPRange$Status": "

    Specifies the status of the IP range. Status can be \"authorizing\", \"authorized\", \"revoking\", and \"revoked\".

    ", + "IPRange$CIDRIP": "

    Specifies the IP range.

    ", + "KeyList$member": null, + "ListTagsForResourceMessage$ResourceName": "

    The DB Instance with tags to be listed. This value is an Amazon Resource Name (ARN). For information about creating an ARN, see Constructing an RDS Amazon Resource Name (ARN).

    ", + "ModifyDBInstanceMessage$DBInstanceIdentifier": "

    The DB Instance identifier. This value is stored as a lowercase string.

    Constraints:

    • Must be the identifier for an existing DB Instance
    • Must contain from 1 to 63 alphanumeric characters or hyphens
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "ModifyDBInstanceMessage$DBInstanceClass": "

    The new compute and memory capacity of the DB Instance. To determine the instance classes that are available for a particular DB engine, use the DescribeOrderableDBInstanceOptions action.

    Passing a value for this parameter causes an outage during the change and is applied during the next maintenance window, unless the ApplyImmediately parameter is specified as true for this request.

    Default: Uses existing setting

    Valid Values: db.t1.micro | db.m1.small | db.m1.medium | db.m1.large | db.m1.xlarge | db.m2.xlarge | db.m2.2xlarge | db.m2.4xlarge

    ", + "ModifyDBInstanceMessage$MasterUserPassword": "

    The new password for the DB Instance master user. Can be any printable ASCII character except \"/\", \"\\\", or \"@\".

    Changing this parameter does not result in an outage and the change is asynchronously applied as soon as possible. Between the time of the request and the completion of the request, the MasterUserPassword element exists in the PendingModifiedValues element of the operation response.

    Default: Uses existing setting

    Constraints: Must be 8 to 41 alphanumeric characters (MySQL), 8 to 30 alphanumeric characters (Oracle), or 8 to 128 alphanumeric characters (SQL Server).

    Amazon RDS API actions never return the password, so this action provides a way to regain access to a master instance user if the password is lost. ", + "ModifyDBInstanceMessage$DBParameterGroupName": "

    The name of the DB Parameter Group to apply to this DB Instance. Changing this parameter does not result in an outage and the change is applied during the next maintenance window unless the ApplyImmediately parameter is set to true for this request.

    Default: Uses existing setting

    Constraints: The DB Parameter Group must be in the same DB Parameter Group family as this DB Instance.

    ", + "ModifyDBInstanceMessage$PreferredBackupWindow": "

    The daily time range during which automated backups are created if automated backups are enabled, as determined by the BackupRetentionPeriod. Changing this parameter does not result in an outage and the change is asynchronously applied as soon as possible.

    Constraints:

    • Must be in the format hh24:mi-hh24:mi
    • Times should be Universal Time Coordinated (UTC)
    • Must not conflict with the preferred maintenance window
    • Must be at least 30 minutes
    ", + "ModifyDBInstanceMessage$PreferredMaintenanceWindow": "

    The weekly time range (in UTC) during which system maintenance can occur, which may result in an outage. Changing this parameter does not result in an outage, except in the following situation, and the change is asynchronously applied as soon as possible. If there are pending actions that cause a reboot, and the maintenance window is changed to include the current time, then changing this parameter will cause a reboot of the DB Instance. If moving this window to the current time, there must be at least 30 minutes between the current time and end of the window to ensure pending changes are applied.

    Default: Uses existing setting

    Format: ddd:hh24:mi-ddd:hh24:mi

    Valid Days: Mon | Tue | Wed | Thu | Fri | Sat | Sun

    Constraints: Must be at least 30 minutes

    ", + "ModifyDBInstanceMessage$EngineVersion": "

    The version number of the database engine to upgrade to. Changing this parameter results in an outage and the change is applied during the next maintenance window unless the ApplyImmediately parameter is set to true for this request.

    For major version upgrades, if a nondefault DB Parameter Group is currently in use, a new DB Parameter Group in the DB Parameter Group Family for the new engine version must be specified. The new DB Parameter Group can be the default for that DB Parameter Group Family.

    Example: 5.1.42

    ", + "ModifyDBInstanceMessage$OptionGroupName": "

    Indicates that the DB Instance should be associated with the specified option group. Changing this parameter does not result in an outage except in the following case and the change is applied during the next maintenance window unless the ApplyImmediately parameter is set to true for this request. If the parameter change results in an option group that enables OEM, this change can cause a brief (sub-second) period during which new connections are rejected but existing connections are not interrupted.

    Permanent options, such as the TDE option for Oracle Advanced Security TDE, cannot be removed from an option group, and that option group cannot be removed from a DB instance once it is associated with a DB instance

    ", + "ModifyDBInstanceMessage$NewDBInstanceIdentifier": "

    The new DB Instance identifier for the DB Instance when renaming a DB Instance. This value is stored as a lowercase string.

    Constraints:

    • Must contain from 1 to 63 alphanumeric characters or hyphens
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "ModifyDBParameterGroupMessage$DBParameterGroupName": "

    The name of the DB Parameter Group.

    Constraints:

    • Must be the name of an existing DB Parameter Group
    • Must be 1 to 255 alphanumeric characters
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "ModifyDBSubnetGroupMessage$DBSubnetGroupName": "

    The name for the DB Subnet Group. This value is stored as a lowercase string.

    Constraints: Must contain no more than 255 alphanumeric characters or hyphens. Must not be \"Default\".

    Example: mySubnetgroup

    ", + "ModifyDBSubnetGroupMessage$DBSubnetGroupDescription": "

    The description for the DB Subnet Group.

    ", + "ModifyEventSubscriptionMessage$SubscriptionName": "

    The name of the RDS event notification subscription.

    ", + "ModifyEventSubscriptionMessage$SnsTopicArn": "

    The Amazon Resource Name (ARN) of the SNS topic created for event notification. The ARN is created by Amazon SNS when you create a topic and subscribe to it.

    ", + "ModifyEventSubscriptionMessage$SourceType": "

    The type of source that will be generating the events. For example, if you want to be notified of events generated by a DB instance, you would set this parameter to db-instance. if this value is not specified, all events are returned.

    Valid values: db-instance | db-parameter-group | db-security-group | db-snapshot

    ", + "ModifyOptionGroupMessage$OptionGroupName": "

    The name of the option group to be modified.

    Permanent options, such as the TDE option for Oracle Advanced Security TDE, cannot be removed from an option group, and that option group cannot be removed from a DB instance once it is associated with a DB instance

    ", + "Option$OptionName": "

    The name of the option.

    ", + "Option$OptionDescription": "

    The description of the option.

    ", + "OptionConfiguration$OptionName": "

    The configuration of options to include in a group.

    ", + "OptionGroup$OptionGroupName": "

    Specifies the name of the option group.

    ", + "OptionGroup$OptionGroupDescription": "

    Provides the description of the option group.

    ", + "OptionGroup$EngineName": "

    Engine name that this option group can be applied to.

    ", + "OptionGroup$MajorEngineVersion": "

    Indicates the major engine version associated with this option group.

    ", + "OptionGroup$VpcId": "

    If AllowsVpcAndNonVpcInstanceMemberships is 'false', this field is blank. If AllowsVpcAndNonVpcInstanceMemberships is 'true' and this field is blank, then this option group can be applied to both VPC and non-VPC instances. If this field contains a value, then this option group can only be applied to instances that are in the VPC indicated by this field.

    ", + "OptionGroupMembership$OptionGroupName": "

    The name of the option group that the instance belongs to.

    ", + "OptionGroupMembership$Status": "

    The status of the DB Instance's option group membership (e.g. in-sync, pending, pending-maintenance, applying).

    ", + "OptionGroupOption$Name": "

    The name of the option.

    ", + "OptionGroupOption$Description": "

    The description of the option.

    ", + "OptionGroupOption$EngineName": "

    Engine name that this option can be applied to.

    ", + "OptionGroupOption$MajorEngineVersion": "

    Indicates the major engine version that the option is available for.

    ", + "OptionGroupOption$MinimumRequiredMinorEngineVersion": "

    The minimum required engine version for the option to be applied.

    ", + "OptionGroupOptionSetting$SettingName": "

    The name of the option group option.

    ", + "OptionGroupOptionSetting$SettingDescription": "

    The description of the option group option.

    ", + "OptionGroupOptionSetting$DefaultValue": "

    The default value for the option group option.

    ", + "OptionGroupOptionSetting$ApplyType": "

    The DB engine specific parameter type for the option group option.

    ", + "OptionGroupOptionSetting$AllowedValues": "

    Indicates the acceptable values for the option group option.

    ", + "OptionGroupOptionsMessage$Marker": "

    An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "OptionGroups$Marker": "

    An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "OptionNamesList$member": null, + "OptionSetting$Name": "

    The name of the option that has settings that you can set.

    ", + "OptionSetting$Value": "

    The current value of the option setting.

    ", + "OptionSetting$DefaultValue": "

    The default value of the option setting.

    ", + "OptionSetting$Description": "

    The description of the option setting.

    ", + "OptionSetting$ApplyType": "

    The DB engine specific parameter type.

    ", + "OptionSetting$DataType": "

    The data type of the option setting.

    ", + "OptionSetting$AllowedValues": "

    The allowed values of the option setting.

    ", + "OptionsDependedOn$member": null, + "OrderableDBInstanceOption$Engine": "

    The engine type of the orderable DB Instance.

    ", + "OrderableDBInstanceOption$EngineVersion": "

    The engine version of the orderable DB Instance.

    ", + "OrderableDBInstanceOption$DBInstanceClass": "

    The DB Instance Class for the orderable DB Instance

    ", + "OrderableDBInstanceOption$LicenseModel": "

    The license model for the orderable DB Instance.

    ", + "OrderableDBInstanceOptionsMessage$Marker": "

    An optional pagination token provided by a previous OrderableDBInstanceOptions request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords .

    ", + "Parameter$ParameterName": "

    Specifies the name of the parameter.

    ", + "Parameter$ParameterValue": "

    Specifies the value of the parameter.

    ", + "Parameter$Description": "

    Provides a description of the parameter.

    ", + "Parameter$Source": "

    Indicates the source of the parameter value.

    ", + "Parameter$ApplyType": "

    Specifies the engine specific parameters type.

    ", + "Parameter$DataType": "

    Specifies the valid data type for the parameter.

    ", + "Parameter$AllowedValues": "

    Specifies the valid range of values for the parameter.

    ", + "Parameter$MinimumEngineVersion": "

    The earliest engine version to which the parameter can apply.

    ", + "PendingModifiedValues$DBInstanceClass": "

    Contains the new DBInstanceClass for the DB Instance that will be applied or is in progress.

    ", + "PendingModifiedValues$MasterUserPassword": "

    Contains the pending or in-progress change of the master credentials for the DB Instance.

    ", + "PendingModifiedValues$EngineVersion": "

    Indicates the database engine version.

    ", + "PendingModifiedValues$DBInstanceIdentifier": "

    Contains the new DBInstanceIdentifier for the DB Instance that will be applied or is in progress.

    ", + "PromoteReadReplicaMessage$DBInstanceIdentifier": "

    The DB Instance identifier. This value is stored as a lowercase string.

    Constraints:

    • Must be the identifier for an existing Read Replica DB Instance
    • Must contain from 1 to 63 alphanumeric characters or hyphens
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens

    Example: mydbinstance

    ", + "PromoteReadReplicaMessage$PreferredBackupWindow": "

    The daily time range during which automated backups are created if automated backups are enabled, using the BackupRetentionPeriod parameter.

    Default: A 30-minute window selected at random from an 8-hour block of time per region. The following list shows the time blocks for each region from which the default backup windows are assigned.

    • US-East (Northern Virginia) Region: 03:00-11:00 UTC
    • US-West (Northern California) Region: 06:00-14:00 UTC
    • EU (Ireland) Region: 22:00-06:00 UTC
    • Asia Pacific (Singapore) Region: 14:00-22:00 UTC
    • Asia Pacific (Tokyo) Region: 17:00-03:00 UTC

    Constraints: Must be in the format hh24:mi-hh24:mi. Times should be Universal Time Coordinated (UTC). Must not conflict with the preferred maintenance window. Must be at least 30 minutes.

    ", + "PurchaseReservedDBInstancesOfferingMessage$ReservedDBInstancesOfferingId": "

    The ID of the Reserved DB Instance offering to purchase.

    Example: 438012d3-4052-4cc7-b2e3-8d3372e0e706

    ", + "PurchaseReservedDBInstancesOfferingMessage$ReservedDBInstanceId": "

    Customer-specified identifier to track this reservation.

    Example: myreservationID

    ", + "ReadReplicaDBInstanceIdentifierList$member": null, + "RebootDBInstanceMessage$DBInstanceIdentifier": "

    The DB Instance identifier. This parameter is stored as a lowercase string.

    Constraints:

    • Must contain from 1 to 63 alphanumeric characters or hyphens
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "RecurringCharge$RecurringChargeFrequency": "

    The frequency of the recurring charge.

    ", + "RemoveSourceIdentifierFromSubscriptionMessage$SubscriptionName": "

    The name of the RDS event notification subscription you want to remove a source identifier from.

    ", + "RemoveSourceIdentifierFromSubscriptionMessage$SourceIdentifier": "

    The source identifier to be removed from the subscription, such as the DB instance identifier for a DB instance or the name of a security group.

    ", + "RemoveTagsFromResourceMessage$ResourceName": "

    The DB Instance the tags will be removed from. This value is an Amazon Resource Name (ARN). For information about creating an ARN, see Constructing an RDS Amazon Resource Name (ARN).

    ", + "ReservedDBInstance$ReservedDBInstanceId": "

    The unique identifier for the reservation.

    ", + "ReservedDBInstance$ReservedDBInstancesOfferingId": "

    The offering identifier.

    ", + "ReservedDBInstance$DBInstanceClass": "

    The DB instance class for the reserved DB Instance.

    ", + "ReservedDBInstance$CurrencyCode": "

    The currency code for the reserved DB Instance.

    ", + "ReservedDBInstance$ProductDescription": "

    The description of the reserved DB Instance.

    ", + "ReservedDBInstance$OfferingType": "

    The offering type of this reserved DB Instance.

    ", + "ReservedDBInstance$State": "

    The state of the reserved DB Instance.

    ", + "ReservedDBInstanceMessage$Marker": "

    An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "ReservedDBInstancesOffering$ReservedDBInstancesOfferingId": "

    The offering identifier.

    ", + "ReservedDBInstancesOffering$DBInstanceClass": "

    The DB instance class for the reserved DB Instance.

    ", + "ReservedDBInstancesOffering$CurrencyCode": "

    The currency code for the reserved DB Instance offering.

    ", + "ReservedDBInstancesOffering$ProductDescription": "

    The database engine used by the offering.

    ", + "ReservedDBInstancesOffering$OfferingType": "

    The offering type.

    ", + "ReservedDBInstancesOfferingMessage$Marker": "

    An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "ResetDBParameterGroupMessage$DBParameterGroupName": "

    The name of the DB Parameter Group.

    Constraints:

    • Must be 1 to 255 alphanumeric characters
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "RestoreDBInstanceFromDBSnapshotMessage$DBInstanceIdentifier": "

    The identifier for the DB Snapshot to restore from.

    Constraints:

    • Must contain from 1 to 63 alphanumeric characters or hyphens
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "RestoreDBInstanceFromDBSnapshotMessage$DBSnapshotIdentifier": "

    Name of the DB Instance to create from the DB Snapshot. This parameter isn't case sensitive.

    Constraints:

    • Must contain from 1 to 255 alphanumeric characters or hyphens
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens

    Example: my-snapshot-id

    ", + "RestoreDBInstanceFromDBSnapshotMessage$DBInstanceClass": "

    The compute and memory capacity of the Amazon RDS DB instance.

    Valid Values: db.t1.micro | db.m1.small | db.m1.medium | db.m1.large | db.m1.xlarge | db.m2.2xlarge | db.m2.4xlarge

    ", + "RestoreDBInstanceFromDBSnapshotMessage$AvailabilityZone": "

    The EC2 Availability Zone that the database instance will be created in.

    Default: A random, system-chosen Availability Zone.

    Constraint: You cannot specify the AvailabilityZone parameter if the MultiAZ parameter is set to true.

    Example: us-east-1a

    ", + "RestoreDBInstanceFromDBSnapshotMessage$DBSubnetGroupName": "

    The DB Subnet Group name to use for the new instance.

    ", + "RestoreDBInstanceFromDBSnapshotMessage$LicenseModel": "

    License model information for the restored DB Instance.

    Default: Same as source.

    Valid values: license-included | bring-your-own-license | general-public-license

    ", + "RestoreDBInstanceFromDBSnapshotMessage$DBName": "

    The database name for the restored DB Instance.

    This parameter doesn't apply to the MySQL engine.

    ", + "RestoreDBInstanceFromDBSnapshotMessage$Engine": "

    The database engine to use for the new instance.

    Default: The same as source

    Constraint: Must be compatible with the engine of the source

    Example: oracle-ee

    ", + "RestoreDBInstanceFromDBSnapshotMessage$OptionGroupName": "

    The name of the option group to be used for the restored DB instance.

    Permanent options, such as the TDE option for Oracle Advanced Security TDE, cannot be removed from an option group, and that option group cannot be removed from a DB instance once it is associated with a DB instance

    ", + "RestoreDBInstanceToPointInTimeMessage$SourceDBInstanceIdentifier": "

    The identifier of the source DB Instance from which to restore.

    Constraints:

    • Must be the identifier of an existing database instance
    • Must contain from 1 to 63 alphanumeric characters or hyphens
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "RestoreDBInstanceToPointInTimeMessage$TargetDBInstanceIdentifier": "

    The name of the new database instance to be created.

    Constraints:

    • Must contain from 1 to 63 alphanumeric characters or hyphens
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "RestoreDBInstanceToPointInTimeMessage$DBInstanceClass": "

    The compute and memory capacity of the Amazon RDS DB instance.

    Valid Values: db.t1.micro | db.m1.small | db.m1.medium | db.m1.large | db.m1.xlarge | db.m2.2xlarge | db.m2.4xlarge

    Default: The same DBInstanceClass as the original DB Instance.

    ", + "RestoreDBInstanceToPointInTimeMessage$AvailabilityZone": "

    The EC2 Availability Zone that the database instance will be created in.

    Default: A random, system-chosen Availability Zone.

    Constraint: You cannot specify the AvailabilityZone parameter if the MultiAZ parameter is set to true.

    Example: us-east-1a

    ", + "RestoreDBInstanceToPointInTimeMessage$DBSubnetGroupName": "

    The DB subnet group name to use for the new instance.

    ", + "RestoreDBInstanceToPointInTimeMessage$LicenseModel": "

    License model information for the restored DB Instance.

    Default: Same as source.

    Valid values: license-included | bring-your-own-license | general-public-license

    ", + "RestoreDBInstanceToPointInTimeMessage$DBName": "

    The database name for the restored DB Instance.

    This parameter is not used for the MySQL engine.

    ", + "RestoreDBInstanceToPointInTimeMessage$Engine": "

    The database engine to use for the new instance.

    Default: The same as source

    Constraint: Must be compatible with the engine of the source

    Example: oracle-ee

    ", + "RestoreDBInstanceToPointInTimeMessage$OptionGroupName": "

    The name of the option group to be used for the restored DB instance.

    Permanent options, such as the TDE option for Oracle Advanced Security TDE, cannot be removed from an option group, and that option group cannot be removed from a DB instance once it is associated with a DB instance

    ", + "RevokeDBSecurityGroupIngressMessage$DBSecurityGroupName": "

    The name of the DB Security Group to revoke ingress from.

    ", + "RevokeDBSecurityGroupIngressMessage$CIDRIP": "

    The IP range to revoke access from. Must be a valid CIDR range. If CIDRIP is specified, EC2SecurityGroupName, EC2SecurityGroupId and EC2SecurityGroupOwnerId cannot be provided.

    ", + "RevokeDBSecurityGroupIngressMessage$EC2SecurityGroupName": "

    The name of the EC2 Security Group to revoke access from. For VPC DB Security Groups, EC2SecurityGroupId must be provided. Otherwise, EC2SecurityGroupOwnerId and either EC2SecurityGroupName or EC2SecurityGroupId must be provided.

    ", + "RevokeDBSecurityGroupIngressMessage$EC2SecurityGroupId": "

    The id of the EC2 Security Group to revoke access from. For VPC DB Security Groups, EC2SecurityGroupId must be provided. Otherwise, EC2SecurityGroupOwnerId and either EC2SecurityGroupName or EC2SecurityGroupId must be provided.

    ", + "RevokeDBSecurityGroupIngressMessage$EC2SecurityGroupOwnerId": "

    The AWS Account Number of the owner of the EC2 security group specified in the EC2SecurityGroupName parameter. The AWS Access Key ID is not an acceptable value. For VPC DB Security Groups, EC2SecurityGroupId must be provided. Otherwise, EC2SecurityGroupOwnerId and either EC2SecurityGroupName or EC2SecurityGroupId must be provided.

    ", + "SourceIdsList$member": null, + "Subnet$SubnetIdentifier": "

    Specifies the identifier of the subnet.

    ", + "Subnet$SubnetStatus": "

    Specifies the status of the subnet.

    ", + "SubnetIdentifierList$member": null, + "Tag$Key": "

    A key is the required name of the tag. The string value can be from 1 to 128 Unicode characters in length and cannot be prefixed with \"aws:\". The string may only contain only the set of Unicode letters, digits, white-space, '_', '.', '/', '=', '+', '-' (Java regex: \"^([\\\\p{L}\\\\p{Z}\\\\p{N}_.:/=+\\\\-]*)$\").

    ", + "Tag$Value": "

    A value is the optional value of the tag. The string value can be from 1 to 256 Unicode characters in length and cannot be prefixed with \"aws:\". The string may only contain only the set of Unicode letters, digits, white-space, '_', '.', '/', '=', '+', '-' (Java regex: \"^([\\\\p{L}\\\\p{Z}\\\\p{N}_.:/=+\\\\-]*)$\").

    ", + "VpcSecurityGroupIdList$member": null, + "VpcSecurityGroupMembership$VpcSecurityGroupId": "

    The name of the VPC security group.

    ", + "VpcSecurityGroupMembership$Status": "

    The status of the VPC Security Group.

    " + } + }, + "Subnet": { + "base": "

    This data type is used as a response element in the DescribeDBSubnetGroups action.

    ", + "refs": { + "SubnetList$member": null + } + }, + "SubnetAlreadyInUse": { + "base": "

    The DB subnet is already in use in the Availability Zone.

    ", + "refs": { + } + }, + "SubnetIdentifierList": { + "base": null, + "refs": { + "CreateDBSubnetGroupMessage$SubnetIds": "

    The EC2 Subnet IDs for the DB Subnet Group.

    ", + "ModifyDBSubnetGroupMessage$SubnetIds": "

    The EC2 Subnet IDs for the DB Subnet Group.

    " + } + }, + "SubnetList": { + "base": null, + "refs": { + "DBSubnetGroup$Subnets": "

    Contains a list of Subnet elements.

    " + } + }, + "SubscriptionAlreadyExistFault": { + "base": "

    The supplied subscription name already exists.

    ", + "refs": { + } + }, + "SubscriptionCategoryNotFoundFault": { + "base": "

    The supplied category does not exist.

    ", + "refs": { + } + }, + "SubscriptionNotFoundFault": { + "base": "

    The subscription name does not exist.

    ", + "refs": { + } + }, + "SupportedCharacterSetsList": { + "base": null, + "refs": { + "DBEngineVersion$SupportedCharacterSets": "

    A list of the character sets supported by this engine for the CharacterSetName parameter of the CreateDBInstance API.

    " + } + }, + "TStamp": { + "base": null, + "refs": { + "DBInstance$InstanceCreateTime": "

    Provides the date and time the DB Instance was created.

    ", + "DBInstance$LatestRestorableTime": "

    Specifies the latest time to which a database can be restored with point-in-time restore.

    ", + "DBSnapshot$SnapshotCreateTime": "

    Provides the time (UTC) when the snapshot was taken.

    ", + "DBSnapshot$InstanceCreateTime": "

    Specifies the time (UTC) when the snapshot was taken.

    ", + "DescribeEventsMessage$StartTime": "

    The beginning of the time interval to retrieve events for, specified in ISO 8601 format. For more information about ISO 8601, go to the ISO8601 Wikipedia page.

    Example: 2009-07-08T18:00Z

    ", + "DescribeEventsMessage$EndTime": "

    The end of the time interval for which to retrieve events, specified in ISO 8601 format. For more information about ISO 8601, go to the ISO8601 Wikipedia page.

    Example: 2009-07-08T18:00Z

    ", + "Event$Date": "

    Specifies the date and time of the event.

    ", + "ReservedDBInstance$StartTime": "

    The time the reservation started.

    ", + "RestoreDBInstanceToPointInTimeMessage$RestoreTime": "

    The date and time to restore from.

    Valid Values: Value must be a UTC time

    Constraints:

    • Must be before the latest restorable time for the DB Instance
    • Cannot be specified if UseLatestRestorableTime parameter is true

    Example: 2009-09-07T23:45:00Z

    " + } + }, + "Tag": { + "base": "

    Metadata assigned to a DB Instance consisting of a key-value pair.

    ", + "refs": { + "TagList$member": null + } + }, + "TagList": { + "base": "

    A list of tags.

    ", + "refs": { + "AddTagsToResourceMessage$Tags": "

    The tags to be assigned to the DB Instance.

    ", + "TagListMessage$TagList": "

    List of tags returned by the ListTagsForResource operation.

    " + } + }, + "TagListMessage": { + "base": "

    ", + "refs": { + } + }, + "VpcSecurityGroupIdList": { + "base": null, + "refs": { + "CreateDBInstanceMessage$VpcSecurityGroupIds": "

    A list of EC2 VPC Security Groups to associate with this DB Instance.

    Default: The default EC2 VPC Security Group for the DB Subnet group's VPC.

    ", + "ModifyDBInstanceMessage$VpcSecurityGroupIds": "

    A list of EC2 VPC Security Groups to authorize on this DB Instance. This change is asynchronously applied as soon as possible.

    Constraints:

    • Must be 1 to 255 alphanumeric characters
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "OptionConfiguration$VpcSecurityGroupMemberships": "

    A list of VpcSecurityGroupMemebrship name strings used for this option.

    " + } + }, + "VpcSecurityGroupMembership": { + "base": "

    This data type is used as a response element for queries on VPC security group membership.

    ", + "refs": { + "VpcSecurityGroupMembershipList$member": null + } + }, + "VpcSecurityGroupMembershipList": { + "base": null, + "refs": { + "DBInstance$VpcSecurityGroups": "

    Provides List of VPC security group elements that the DB Instance belongs to.

    ", + "Option$VpcSecurityGroupMemberships": "

    If the option requires access to a port, then this VPC Security Group allows access to the port.

    " + } + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/rds/2013-02-12/examples-1.json b/vendor/github.com/aws/aws-sdk-go/models/apis/rds/2013-02-12/examples-1.json new file mode 100644 index 000000000..0ea7e3b0b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/rds/2013-02-12/examples-1.json @@ -0,0 +1,5 @@ +{ + "version": "1.0", + "examples": { + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/rds/2013-02-12/paginators-1.json b/vendor/github.com/aws/aws-sdk-go/models/apis/rds/2013-02-12/paginators-1.json new file mode 100644 index 000000000..662845c12 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/rds/2013-02-12/paginators-1.json @@ -0,0 +1,110 @@ +{ + "pagination": { + "DescribeDBEngineVersions": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords", + "result_key": "DBEngineVersions" + }, + "DescribeDBInstances": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords", + "result_key": "DBInstances" + }, + "DescribeDBLogFiles": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords", + "result_key": "DescribeDBLogFiles" + }, + "DescribeDBParameterGroups": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords", + "result_key": "DBParameterGroups" + }, + "DescribeDBParameters": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords", + "result_key": "Parameters" + }, + "DescribeDBSecurityGroups": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords", + "result_key": "DBSecurityGroups" + }, + "DescribeDBSnapshots": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords", + "result_key": "DBSnapshots" + }, + "DescribeDBSubnetGroups": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords", + "result_key": "DBSubnetGroups" + }, + "DescribeEngineDefaultParameters": { + "input_token": "Marker", + "output_token": "EngineDefaults.Marker", + "limit_key": "MaxRecords", + "result_key": "EngineDefaults.Parameters" + }, + "DescribeEventSubscriptions": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords", + "result_key": "EventSubscriptionsList" + }, + "DescribeEvents": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords", + "result_key": "Events" + }, + "DescribeOptionGroupOptions": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords", + "result_key": "OptionGroupOptions" + }, + "DescribeOptionGroups": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords", + "result_key": "OptionGroupsList" + }, + "DescribeOrderableDBInstanceOptions": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords", + "result_key": "OrderableDBInstanceOptions" + }, + "DescribeReservedDBInstances": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords", + "result_key": "ReservedDBInstances" + }, + "DescribeReservedDBInstancesOfferings": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords", + "result_key": "ReservedDBInstancesOfferings" + }, + "DownloadDBLogFilePortion": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "NumberOfLines", + "more_results": "AdditionalDataPending", + "result_key": "LogFileData" + }, + "ListTagsForResource": { + "result_key": "TagList" + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/rds/2013-09-09/api-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/rds/2013-09-09/api-2.json new file mode 100644 index 000000000..264239184 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/rds/2013-09-09/api-2.json @@ -0,0 +1,3158 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2013-09-09", + "endpointPrefix":"rds", + "protocol":"query", + "serviceAbbreviation":"Amazon RDS", + "serviceFullName":"Amazon Relational Database Service", + "signatureVersion":"v4", + "xmlNamespace":"http://rds.amazonaws.com/doc/2013-09-09/" + }, + "operations":{ + "AddSourceIdentifierToSubscription":{ + "name":"AddSourceIdentifierToSubscription", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AddSourceIdentifierToSubscriptionMessage"}, + "output":{ + "shape":"AddSourceIdentifierToSubscriptionResult", + "resultWrapper":"AddSourceIdentifierToSubscriptionResult" + }, + "errors":[ + {"shape":"SubscriptionNotFoundFault"}, + {"shape":"SourceNotFoundFault"} + ] + }, + "AddTagsToResource":{ + "name":"AddTagsToResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AddTagsToResourceMessage"}, + "errors":[ + {"shape":"DBInstanceNotFoundFault"}, + {"shape":"DBSnapshotNotFoundFault"} + ] + }, + "AuthorizeDBSecurityGroupIngress":{ + "name":"AuthorizeDBSecurityGroupIngress", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AuthorizeDBSecurityGroupIngressMessage"}, + "output":{ + "shape":"AuthorizeDBSecurityGroupIngressResult", + "resultWrapper":"AuthorizeDBSecurityGroupIngressResult" + }, + "errors":[ + {"shape":"DBSecurityGroupNotFoundFault"}, + {"shape":"InvalidDBSecurityGroupStateFault"}, + {"shape":"AuthorizationAlreadyExistsFault"}, + {"shape":"AuthorizationQuotaExceededFault"} + ] + }, + "CopyDBSnapshot":{ + "name":"CopyDBSnapshot", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CopyDBSnapshotMessage"}, + "output":{ + "shape":"CopyDBSnapshotResult", + "resultWrapper":"CopyDBSnapshotResult" + }, + "errors":[ + {"shape":"DBSnapshotAlreadyExistsFault"}, + {"shape":"DBSnapshotNotFoundFault"}, + {"shape":"InvalidDBSnapshotStateFault"}, + {"shape":"SnapshotQuotaExceededFault"} + ] + }, + "CreateDBInstance":{ + "name":"CreateDBInstance", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateDBInstanceMessage"}, + "output":{ + "shape":"CreateDBInstanceResult", + "resultWrapper":"CreateDBInstanceResult" + }, + "errors":[ + {"shape":"DBInstanceAlreadyExistsFault"}, + {"shape":"InsufficientDBInstanceCapacityFault"}, + {"shape":"DBParameterGroupNotFoundFault"}, + {"shape":"DBSecurityGroupNotFoundFault"}, + {"shape":"InstanceQuotaExceededFault"}, + {"shape":"StorageQuotaExceededFault"}, + {"shape":"DBSubnetGroupNotFoundFault"}, + {"shape":"DBSubnetGroupDoesNotCoverEnoughAZs"}, + {"shape":"InvalidSubnet"}, + {"shape":"InvalidVPCNetworkStateFault"}, + {"shape":"ProvisionedIopsNotAvailableInAZFault"}, + {"shape":"OptionGroupNotFoundFault"} + ] + }, + "CreateDBInstanceReadReplica":{ + "name":"CreateDBInstanceReadReplica", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateDBInstanceReadReplicaMessage"}, + "output":{ + "shape":"CreateDBInstanceReadReplicaResult", + "resultWrapper":"CreateDBInstanceReadReplicaResult" + }, + "errors":[ + {"shape":"DBInstanceAlreadyExistsFault"}, + {"shape":"InsufficientDBInstanceCapacityFault"}, + {"shape":"DBParameterGroupNotFoundFault"}, + {"shape":"DBSecurityGroupNotFoundFault"}, + {"shape":"InstanceQuotaExceededFault"}, + {"shape":"StorageQuotaExceededFault"}, + {"shape":"DBInstanceNotFoundFault"}, + {"shape":"InvalidDBInstanceStateFault"}, + {"shape":"DBSubnetGroupNotFoundFault"}, + {"shape":"DBSubnetGroupDoesNotCoverEnoughAZs"}, + {"shape":"InvalidSubnet"}, + {"shape":"InvalidVPCNetworkStateFault"}, + {"shape":"ProvisionedIopsNotAvailableInAZFault"}, + {"shape":"OptionGroupNotFoundFault"}, + {"shape":"DBSubnetGroupNotAllowedFault"}, + {"shape":"InvalidDBSubnetGroupFault"} + ] + }, + "CreateDBParameterGroup":{ + "name":"CreateDBParameterGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateDBParameterGroupMessage"}, + "output":{ + "shape":"CreateDBParameterGroupResult", + "resultWrapper":"CreateDBParameterGroupResult" + }, + "errors":[ + {"shape":"DBParameterGroupQuotaExceededFault"}, + {"shape":"DBParameterGroupAlreadyExistsFault"} + ] + }, + "CreateDBSecurityGroup":{ + "name":"CreateDBSecurityGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateDBSecurityGroupMessage"}, + "output":{ + "shape":"CreateDBSecurityGroupResult", + "resultWrapper":"CreateDBSecurityGroupResult" + }, + "errors":[ + {"shape":"DBSecurityGroupAlreadyExistsFault"}, + {"shape":"DBSecurityGroupQuotaExceededFault"}, + {"shape":"DBSecurityGroupNotSupportedFault"} + ] + }, + "CreateDBSnapshot":{ + "name":"CreateDBSnapshot", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateDBSnapshotMessage"}, + "output":{ + "shape":"CreateDBSnapshotResult", + "resultWrapper":"CreateDBSnapshotResult" + }, + "errors":[ + {"shape":"DBSnapshotAlreadyExistsFault"}, + {"shape":"InvalidDBInstanceStateFault"}, + {"shape":"DBInstanceNotFoundFault"}, + {"shape":"SnapshotQuotaExceededFault"} + ] + }, + "CreateDBSubnetGroup":{ + "name":"CreateDBSubnetGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateDBSubnetGroupMessage"}, + "output":{ + "shape":"CreateDBSubnetGroupResult", + "resultWrapper":"CreateDBSubnetGroupResult" + }, + "errors":[ + {"shape":"DBSubnetGroupAlreadyExistsFault"}, + {"shape":"DBSubnetGroupQuotaExceededFault"}, + {"shape":"DBSubnetQuotaExceededFault"}, + {"shape":"DBSubnetGroupDoesNotCoverEnoughAZs"}, + {"shape":"InvalidSubnet"} + ] + }, + "CreateEventSubscription":{ + "name":"CreateEventSubscription", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateEventSubscriptionMessage"}, + "output":{ + "shape":"CreateEventSubscriptionResult", + "resultWrapper":"CreateEventSubscriptionResult" + }, + "errors":[ + {"shape":"EventSubscriptionQuotaExceededFault"}, + {"shape":"SubscriptionAlreadyExistFault"}, + {"shape":"SNSInvalidTopicFault"}, + {"shape":"SNSNoAuthorizationFault"}, + {"shape":"SNSTopicArnNotFoundFault"}, + {"shape":"SubscriptionCategoryNotFoundFault"}, + {"shape":"SourceNotFoundFault"} + ] + }, + "CreateOptionGroup":{ + "name":"CreateOptionGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateOptionGroupMessage"}, + "output":{ + "shape":"CreateOptionGroupResult", + "resultWrapper":"CreateOptionGroupResult" + }, + "errors":[ + {"shape":"OptionGroupAlreadyExistsFault"}, + {"shape":"OptionGroupQuotaExceededFault"} + ] + }, + "DeleteDBInstance":{ + "name":"DeleteDBInstance", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteDBInstanceMessage"}, + "output":{ + "shape":"DeleteDBInstanceResult", + "resultWrapper":"DeleteDBInstanceResult" + }, + "errors":[ + {"shape":"DBInstanceNotFoundFault"}, + {"shape":"InvalidDBInstanceStateFault"}, + {"shape":"DBSnapshotAlreadyExistsFault"}, + {"shape":"SnapshotQuotaExceededFault"} + ] + }, + "DeleteDBParameterGroup":{ + "name":"DeleteDBParameterGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteDBParameterGroupMessage"}, + "errors":[ + {"shape":"InvalidDBParameterGroupStateFault"}, + {"shape":"DBParameterGroupNotFoundFault"} + ] + }, + "DeleteDBSecurityGroup":{ + "name":"DeleteDBSecurityGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteDBSecurityGroupMessage"}, + "errors":[ + {"shape":"InvalidDBSecurityGroupStateFault"}, + {"shape":"DBSecurityGroupNotFoundFault"} + ] + }, + "DeleteDBSnapshot":{ + "name":"DeleteDBSnapshot", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteDBSnapshotMessage"}, + "output":{ + "shape":"DeleteDBSnapshotResult", + "resultWrapper":"DeleteDBSnapshotResult" + }, + "errors":[ + {"shape":"InvalidDBSnapshotStateFault"}, + {"shape":"DBSnapshotNotFoundFault"} + ] + }, + "DeleteDBSubnetGroup":{ + "name":"DeleteDBSubnetGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteDBSubnetGroupMessage"}, + "errors":[ + {"shape":"InvalidDBSubnetGroupStateFault"}, + {"shape":"InvalidDBSubnetStateFault"}, + {"shape":"DBSubnetGroupNotFoundFault"} + ] + }, + "DeleteEventSubscription":{ + "name":"DeleteEventSubscription", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteEventSubscriptionMessage"}, + "output":{ + "shape":"DeleteEventSubscriptionResult", + "resultWrapper":"DeleteEventSubscriptionResult" + }, + "errors":[ + {"shape":"SubscriptionNotFoundFault"}, + {"shape":"InvalidEventSubscriptionStateFault"} + ] + }, + "DeleteOptionGroup":{ + "name":"DeleteOptionGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteOptionGroupMessage"}, + "errors":[ + {"shape":"OptionGroupNotFoundFault"}, + {"shape":"InvalidOptionGroupStateFault"} + ] + }, + "DescribeDBEngineVersions":{ + "name":"DescribeDBEngineVersions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeDBEngineVersionsMessage"}, + "output":{ + "shape":"DBEngineVersionMessage", + "resultWrapper":"DescribeDBEngineVersionsResult" + } + }, + "DescribeDBInstances":{ + "name":"DescribeDBInstances", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeDBInstancesMessage"}, + "output":{ + "shape":"DBInstanceMessage", + "resultWrapper":"DescribeDBInstancesResult" + }, + "errors":[ + {"shape":"DBInstanceNotFoundFault"} + ] + }, + "DescribeDBLogFiles":{ + "name":"DescribeDBLogFiles", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeDBLogFilesMessage"}, + "output":{ + "shape":"DescribeDBLogFilesResponse", + "resultWrapper":"DescribeDBLogFilesResult" + }, + "errors":[ + {"shape":"DBInstanceNotFoundFault"} + ] + }, + "DescribeDBParameterGroups":{ + "name":"DescribeDBParameterGroups", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeDBParameterGroupsMessage"}, + "output":{ + "shape":"DBParameterGroupsMessage", + "resultWrapper":"DescribeDBParameterGroupsResult" + }, + "errors":[ + {"shape":"DBParameterGroupNotFoundFault"} + ] + }, + "DescribeDBParameters":{ + "name":"DescribeDBParameters", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeDBParametersMessage"}, + "output":{ + "shape":"DBParameterGroupDetails", + "resultWrapper":"DescribeDBParametersResult" + }, + "errors":[ + {"shape":"DBParameterGroupNotFoundFault"} + ] + }, + "DescribeDBSecurityGroups":{ + "name":"DescribeDBSecurityGroups", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeDBSecurityGroupsMessage"}, + "output":{ + "shape":"DBSecurityGroupMessage", + "resultWrapper":"DescribeDBSecurityGroupsResult" + }, + "errors":[ + {"shape":"DBSecurityGroupNotFoundFault"} + ] + }, + "DescribeDBSnapshots":{ + "name":"DescribeDBSnapshots", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeDBSnapshotsMessage"}, + "output":{ + "shape":"DBSnapshotMessage", + "resultWrapper":"DescribeDBSnapshotsResult" + }, + "errors":[ + {"shape":"DBSnapshotNotFoundFault"} + ] + }, + "DescribeDBSubnetGroups":{ + "name":"DescribeDBSubnetGroups", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeDBSubnetGroupsMessage"}, + "output":{ + "shape":"DBSubnetGroupMessage", + "resultWrapper":"DescribeDBSubnetGroupsResult" + }, + "errors":[ + {"shape":"DBSubnetGroupNotFoundFault"} + ] + }, + "DescribeEngineDefaultParameters":{ + "name":"DescribeEngineDefaultParameters", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeEngineDefaultParametersMessage"}, + "output":{ + "shape":"DescribeEngineDefaultParametersResult", + "resultWrapper":"DescribeEngineDefaultParametersResult" + } + }, + "DescribeEventCategories":{ + "name":"DescribeEventCategories", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeEventCategoriesMessage"}, + "output":{ + "shape":"EventCategoriesMessage", + "resultWrapper":"DescribeEventCategoriesResult" + } + }, + "DescribeEventSubscriptions":{ + "name":"DescribeEventSubscriptions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeEventSubscriptionsMessage"}, + "output":{ + "shape":"EventSubscriptionsMessage", + "resultWrapper":"DescribeEventSubscriptionsResult" + }, + "errors":[ + {"shape":"SubscriptionNotFoundFault"} + ] + }, + "DescribeEvents":{ + "name":"DescribeEvents", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeEventsMessage"}, + "output":{ + "shape":"EventsMessage", + "resultWrapper":"DescribeEventsResult" + } + }, + "DescribeOptionGroupOptions":{ + "name":"DescribeOptionGroupOptions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeOptionGroupOptionsMessage"}, + "output":{ + "shape":"OptionGroupOptionsMessage", + "resultWrapper":"DescribeOptionGroupOptionsResult" + } + }, + "DescribeOptionGroups":{ + "name":"DescribeOptionGroups", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeOptionGroupsMessage"}, + "output":{ + "shape":"OptionGroups", + "resultWrapper":"DescribeOptionGroupsResult" + }, + "errors":[ + {"shape":"OptionGroupNotFoundFault"} + ] + }, + "DescribeOrderableDBInstanceOptions":{ + "name":"DescribeOrderableDBInstanceOptions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeOrderableDBInstanceOptionsMessage"}, + "output":{ + "shape":"OrderableDBInstanceOptionsMessage", + "resultWrapper":"DescribeOrderableDBInstanceOptionsResult" + } + }, + "DescribeReservedDBInstances":{ + "name":"DescribeReservedDBInstances", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeReservedDBInstancesMessage"}, + "output":{ + "shape":"ReservedDBInstanceMessage", + "resultWrapper":"DescribeReservedDBInstancesResult" + }, + "errors":[ + {"shape":"ReservedDBInstanceNotFoundFault"} + ] + }, + "DescribeReservedDBInstancesOfferings":{ + "name":"DescribeReservedDBInstancesOfferings", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeReservedDBInstancesOfferingsMessage"}, + "output":{ + "shape":"ReservedDBInstancesOfferingMessage", + "resultWrapper":"DescribeReservedDBInstancesOfferingsResult" + }, + "errors":[ + {"shape":"ReservedDBInstancesOfferingNotFoundFault"} + ] + }, + "DownloadDBLogFilePortion":{ + "name":"DownloadDBLogFilePortion", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DownloadDBLogFilePortionMessage"}, + "output":{ + "shape":"DownloadDBLogFilePortionDetails", + "resultWrapper":"DownloadDBLogFilePortionResult" + }, + "errors":[ + {"shape":"DBInstanceNotFoundFault"}, + {"shape":"DBLogFileNotFoundFault"} + ] + }, + "ListTagsForResource":{ + "name":"ListTagsForResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListTagsForResourceMessage"}, + "output":{ + "shape":"TagListMessage", + "resultWrapper":"ListTagsForResourceResult" + }, + "errors":[ + {"shape":"DBInstanceNotFoundFault"}, + {"shape":"DBSnapshotNotFoundFault"} + ] + }, + "ModifyDBInstance":{ + "name":"ModifyDBInstance", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyDBInstanceMessage"}, + "output":{ + "shape":"ModifyDBInstanceResult", + "resultWrapper":"ModifyDBInstanceResult" + }, + "errors":[ + {"shape":"InvalidDBInstanceStateFault"}, + {"shape":"InvalidDBSecurityGroupStateFault"}, + {"shape":"DBInstanceAlreadyExistsFault"}, + {"shape":"DBInstanceNotFoundFault"}, + {"shape":"DBSecurityGroupNotFoundFault"}, + {"shape":"DBParameterGroupNotFoundFault"}, + {"shape":"InsufficientDBInstanceCapacityFault"}, + {"shape":"StorageQuotaExceededFault"}, + {"shape":"InvalidVPCNetworkStateFault"}, + {"shape":"ProvisionedIopsNotAvailableInAZFault"}, + {"shape":"OptionGroupNotFoundFault"}, + {"shape":"DBUpgradeDependencyFailureFault"} + ] + }, + "ModifyDBParameterGroup":{ + "name":"ModifyDBParameterGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyDBParameterGroupMessage"}, + "output":{ + "shape":"DBParameterGroupNameMessage", + "resultWrapper":"ModifyDBParameterGroupResult" + }, + "errors":[ + {"shape":"DBParameterGroupNotFoundFault"}, + {"shape":"InvalidDBParameterGroupStateFault"} + ] + }, + "ModifyDBSubnetGroup":{ + "name":"ModifyDBSubnetGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyDBSubnetGroupMessage"}, + "output":{ + "shape":"ModifyDBSubnetGroupResult", + "resultWrapper":"ModifyDBSubnetGroupResult" + }, + "errors":[ + {"shape":"DBSubnetGroupNotFoundFault"}, + {"shape":"DBSubnetQuotaExceededFault"}, + {"shape":"SubnetAlreadyInUse"}, + {"shape":"DBSubnetGroupDoesNotCoverEnoughAZs"}, + {"shape":"InvalidSubnet"} + ] + }, + "ModifyEventSubscription":{ + "name":"ModifyEventSubscription", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyEventSubscriptionMessage"}, + "output":{ + "shape":"ModifyEventSubscriptionResult", + "resultWrapper":"ModifyEventSubscriptionResult" + }, + "errors":[ + {"shape":"EventSubscriptionQuotaExceededFault"}, + {"shape":"SubscriptionNotFoundFault"}, + {"shape":"SNSInvalidTopicFault"}, + {"shape":"SNSNoAuthorizationFault"}, + {"shape":"SNSTopicArnNotFoundFault"}, + {"shape":"SubscriptionCategoryNotFoundFault"} + ] + }, + "ModifyOptionGroup":{ + "name":"ModifyOptionGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyOptionGroupMessage"}, + "output":{ + "shape":"ModifyOptionGroupResult", + "resultWrapper":"ModifyOptionGroupResult" + }, + "errors":[ + {"shape":"InvalidOptionGroupStateFault"}, + {"shape":"OptionGroupNotFoundFault"} + ] + }, + "PromoteReadReplica":{ + "name":"PromoteReadReplica", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PromoteReadReplicaMessage"}, + "output":{ + "shape":"PromoteReadReplicaResult", + "resultWrapper":"PromoteReadReplicaResult" + }, + "errors":[ + {"shape":"InvalidDBInstanceStateFault"}, + {"shape":"DBInstanceNotFoundFault"} + ] + }, + "PurchaseReservedDBInstancesOffering":{ + "name":"PurchaseReservedDBInstancesOffering", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PurchaseReservedDBInstancesOfferingMessage"}, + "output":{ + "shape":"PurchaseReservedDBInstancesOfferingResult", + "resultWrapper":"PurchaseReservedDBInstancesOfferingResult" + }, + "errors":[ + {"shape":"ReservedDBInstancesOfferingNotFoundFault"}, + {"shape":"ReservedDBInstanceAlreadyExistsFault"}, + {"shape":"ReservedDBInstanceQuotaExceededFault"} + ] + }, + "RebootDBInstance":{ + "name":"RebootDBInstance", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RebootDBInstanceMessage"}, + "output":{ + "shape":"RebootDBInstanceResult", + "resultWrapper":"RebootDBInstanceResult" + }, + "errors":[ + {"shape":"InvalidDBInstanceStateFault"}, + {"shape":"DBInstanceNotFoundFault"} + ] + }, + "RemoveSourceIdentifierFromSubscription":{ + "name":"RemoveSourceIdentifierFromSubscription", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RemoveSourceIdentifierFromSubscriptionMessage"}, + "output":{ + "shape":"RemoveSourceIdentifierFromSubscriptionResult", + "resultWrapper":"RemoveSourceIdentifierFromSubscriptionResult" + }, + "errors":[ + {"shape":"SubscriptionNotFoundFault"}, + {"shape":"SourceNotFoundFault"} + ] + }, + "RemoveTagsFromResource":{ + "name":"RemoveTagsFromResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RemoveTagsFromResourceMessage"}, + "errors":[ + {"shape":"DBInstanceNotFoundFault"}, + {"shape":"DBSnapshotNotFoundFault"} + ] + }, + "ResetDBParameterGroup":{ + "name":"ResetDBParameterGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ResetDBParameterGroupMessage"}, + "output":{ + "shape":"DBParameterGroupNameMessage", + "resultWrapper":"ResetDBParameterGroupResult" + }, + "errors":[ + {"shape":"InvalidDBParameterGroupStateFault"}, + {"shape":"DBParameterGroupNotFoundFault"} + ] + }, + "RestoreDBInstanceFromDBSnapshot":{ + "name":"RestoreDBInstanceFromDBSnapshot", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RestoreDBInstanceFromDBSnapshotMessage"}, + "output":{ + "shape":"RestoreDBInstanceFromDBSnapshotResult", + "resultWrapper":"RestoreDBInstanceFromDBSnapshotResult" + }, + "errors":[ + {"shape":"DBInstanceAlreadyExistsFault"}, + {"shape":"DBSnapshotNotFoundFault"}, + {"shape":"InstanceQuotaExceededFault"}, + {"shape":"InsufficientDBInstanceCapacityFault"}, + {"shape":"InvalidDBSnapshotStateFault"}, + {"shape":"StorageQuotaExceededFault"}, + {"shape":"InvalidVPCNetworkStateFault"}, + {"shape":"InvalidRestoreFault"}, + {"shape":"DBSubnetGroupNotFoundFault"}, + {"shape":"DBSubnetGroupDoesNotCoverEnoughAZs"}, + {"shape":"InvalidSubnet"}, + {"shape":"ProvisionedIopsNotAvailableInAZFault"}, + {"shape":"OptionGroupNotFoundFault"} + ] + }, + "RestoreDBInstanceToPointInTime":{ + "name":"RestoreDBInstanceToPointInTime", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RestoreDBInstanceToPointInTimeMessage"}, + "output":{ + "shape":"RestoreDBInstanceToPointInTimeResult", + "resultWrapper":"RestoreDBInstanceToPointInTimeResult" + }, + "errors":[ + {"shape":"DBInstanceAlreadyExistsFault"}, + {"shape":"DBInstanceNotFoundFault"}, + {"shape":"InstanceQuotaExceededFault"}, + {"shape":"InsufficientDBInstanceCapacityFault"}, + {"shape":"InvalidDBInstanceStateFault"}, + {"shape":"PointInTimeRestoreNotEnabledFault"}, + {"shape":"StorageQuotaExceededFault"}, + {"shape":"InvalidVPCNetworkStateFault"}, + {"shape":"InvalidRestoreFault"}, + {"shape":"DBSubnetGroupNotFoundFault"}, + {"shape":"DBSubnetGroupDoesNotCoverEnoughAZs"}, + {"shape":"InvalidSubnet"}, + {"shape":"ProvisionedIopsNotAvailableInAZFault"}, + {"shape":"OptionGroupNotFoundFault"} + ] + }, + "RevokeDBSecurityGroupIngress":{ + "name":"RevokeDBSecurityGroupIngress", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RevokeDBSecurityGroupIngressMessage"}, + "output":{ + "shape":"RevokeDBSecurityGroupIngressResult", + "resultWrapper":"RevokeDBSecurityGroupIngressResult" + }, + "errors":[ + {"shape":"DBSecurityGroupNotFoundFault"}, + {"shape":"AuthorizationNotFoundFault"}, + {"shape":"InvalidDBSecurityGroupStateFault"} + ] + } + }, + "shapes":{ + "AddSourceIdentifierToSubscriptionMessage":{ + "type":"structure", + "required":[ + "SubscriptionName", + "SourceIdentifier" + ], + "members":{ + "SubscriptionName":{"shape":"String"}, + "SourceIdentifier":{"shape":"String"} + } + }, + "AddSourceIdentifierToSubscriptionResult":{ + "type":"structure", + "members":{ + "EventSubscription":{"shape":"EventSubscription"} + } + }, + "AddTagsToResourceMessage":{ + "type":"structure", + "required":[ + "ResourceName", + "Tags" + ], + "members":{ + "ResourceName":{"shape":"String"}, + "Tags":{"shape":"TagList"} + } + }, + "ApplyMethod":{ + "type":"string", + "enum":[ + "immediate", + "pending-reboot" + ] + }, + "AuthorizationAlreadyExistsFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"AuthorizationAlreadyExists", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "AuthorizationNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"AuthorizationNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "AuthorizationQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"AuthorizationQuotaExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "AuthorizeDBSecurityGroupIngressMessage":{ + "type":"structure", + "required":["DBSecurityGroupName"], + "members":{ + "DBSecurityGroupName":{"shape":"String"}, + "CIDRIP":{"shape":"String"}, + "EC2SecurityGroupName":{"shape":"String"}, + "EC2SecurityGroupId":{"shape":"String"}, + "EC2SecurityGroupOwnerId":{"shape":"String"} + } + }, + "AuthorizeDBSecurityGroupIngressResult":{ + "type":"structure", + "members":{ + "DBSecurityGroup":{"shape":"DBSecurityGroup"} + } + }, + "AvailabilityZone":{ + "type":"structure", + "members":{ + "Name":{"shape":"String"}, + "ProvisionedIopsCapable":{"shape":"Boolean"} + }, + "wrapper":true + }, + "AvailabilityZoneList":{ + "type":"list", + "member":{ + "shape":"AvailabilityZone", + "locationName":"AvailabilityZone" + } + }, + "Boolean":{"type":"boolean"}, + "BooleanOptional":{"type":"boolean"}, + "CharacterSet":{ + "type":"structure", + "members":{ + "CharacterSetName":{"shape":"String"}, + "CharacterSetDescription":{"shape":"String"} + } + }, + "CopyDBSnapshotMessage":{ + "type":"structure", + "required":[ + "SourceDBSnapshotIdentifier", + "TargetDBSnapshotIdentifier" + ], + "members":{ + "SourceDBSnapshotIdentifier":{"shape":"String"}, + "TargetDBSnapshotIdentifier":{"shape":"String"}, + "Tags":{"shape":"TagList"} + } + }, + "CopyDBSnapshotResult":{ + "type":"structure", + "members":{ + "DBSnapshot":{"shape":"DBSnapshot"} + } + }, + "CreateDBInstanceMessage":{ + "type":"structure", + "required":[ + "DBInstanceIdentifier", + "AllocatedStorage", + "DBInstanceClass", + "Engine", + "MasterUsername", + "MasterUserPassword" + ], + "members":{ + "DBName":{"shape":"String"}, + "DBInstanceIdentifier":{"shape":"String"}, + "AllocatedStorage":{"shape":"IntegerOptional"}, + "DBInstanceClass":{"shape":"String"}, + "Engine":{"shape":"String"}, + "MasterUsername":{"shape":"String"}, + "MasterUserPassword":{"shape":"String"}, + "DBSecurityGroups":{"shape":"DBSecurityGroupNameList"}, + "VpcSecurityGroupIds":{"shape":"VpcSecurityGroupIdList"}, + "AvailabilityZone":{"shape":"String"}, + "DBSubnetGroupName":{"shape":"String"}, + "PreferredMaintenanceWindow":{"shape":"String"}, + "DBParameterGroupName":{"shape":"String"}, + "BackupRetentionPeriod":{"shape":"IntegerOptional"}, + "PreferredBackupWindow":{"shape":"String"}, + "Port":{"shape":"IntegerOptional"}, + "MultiAZ":{"shape":"BooleanOptional"}, + "EngineVersion":{"shape":"String"}, + "AutoMinorVersionUpgrade":{"shape":"BooleanOptional"}, + "LicenseModel":{"shape":"String"}, + "Iops":{"shape":"IntegerOptional"}, + "OptionGroupName":{"shape":"String"}, + "CharacterSetName":{"shape":"String"}, + "PubliclyAccessible":{"shape":"BooleanOptional"}, + "Tags":{"shape":"TagList"} + } + }, + "CreateDBInstanceReadReplicaMessage":{ + "type":"structure", + "required":[ + "DBInstanceIdentifier", + "SourceDBInstanceIdentifier" + ], + "members":{ + "DBInstanceIdentifier":{"shape":"String"}, + "SourceDBInstanceIdentifier":{"shape":"String"}, + "DBInstanceClass":{"shape":"String"}, + "AvailabilityZone":{"shape":"String"}, + "Port":{"shape":"IntegerOptional"}, + "AutoMinorVersionUpgrade":{"shape":"BooleanOptional"}, + "Iops":{"shape":"IntegerOptional"}, + "OptionGroupName":{"shape":"String"}, + "PubliclyAccessible":{"shape":"BooleanOptional"}, + "Tags":{"shape":"TagList"}, + "DBSubnetGroupName":{"shape":"String"} + } + }, + "CreateDBInstanceReadReplicaResult":{ + "type":"structure", + "members":{ + "DBInstance":{"shape":"DBInstance"} + } + }, + "CreateDBInstanceResult":{ + "type":"structure", + "members":{ + "DBInstance":{"shape":"DBInstance"} + } + }, + "CreateDBParameterGroupMessage":{ + "type":"structure", + "required":[ + "DBParameterGroupName", + "DBParameterGroupFamily", + "Description" + ], + "members":{ + "DBParameterGroupName":{"shape":"String"}, + "DBParameterGroupFamily":{"shape":"String"}, + "Description":{"shape":"String"}, + "Tags":{"shape":"TagList"} + } + }, + "CreateDBParameterGroupResult":{ + "type":"structure", + "members":{ + "DBParameterGroup":{"shape":"DBParameterGroup"} + } + }, + "CreateDBSecurityGroupMessage":{ + "type":"structure", + "required":[ + "DBSecurityGroupName", + "DBSecurityGroupDescription" + ], + "members":{ + "DBSecurityGroupName":{"shape":"String"}, + "DBSecurityGroupDescription":{"shape":"String"}, + "Tags":{"shape":"TagList"} + } + }, + "CreateDBSecurityGroupResult":{ + "type":"structure", + "members":{ + "DBSecurityGroup":{"shape":"DBSecurityGroup"} + } + }, + "CreateDBSnapshotMessage":{ + "type":"structure", + "required":[ + "DBSnapshotIdentifier", + "DBInstanceIdentifier" + ], + "members":{ + "DBSnapshotIdentifier":{"shape":"String"}, + "DBInstanceIdentifier":{"shape":"String"}, + "Tags":{"shape":"TagList"} + } + }, + "CreateDBSnapshotResult":{ + "type":"structure", + "members":{ + "DBSnapshot":{"shape":"DBSnapshot"} + } + }, + "CreateDBSubnetGroupMessage":{ + "type":"structure", + "required":[ + "DBSubnetGroupName", + "DBSubnetGroupDescription", + "SubnetIds" + ], + "members":{ + "DBSubnetGroupName":{"shape":"String"}, + "DBSubnetGroupDescription":{"shape":"String"}, + "SubnetIds":{"shape":"SubnetIdentifierList"}, + "Tags":{"shape":"TagList"} + } + }, + "CreateDBSubnetGroupResult":{ + "type":"structure", + "members":{ + "DBSubnetGroup":{"shape":"DBSubnetGroup"} + } + }, + "CreateEventSubscriptionMessage":{ + "type":"structure", + "required":[ + "SubscriptionName", + "SnsTopicArn" + ], + "members":{ + "SubscriptionName":{"shape":"String"}, + "SnsTopicArn":{"shape":"String"}, + "SourceType":{"shape":"String"}, + "EventCategories":{"shape":"EventCategoriesList"}, + "SourceIds":{"shape":"SourceIdsList"}, + "Enabled":{"shape":"BooleanOptional"}, + "Tags":{"shape":"TagList"} + } + }, + "CreateEventSubscriptionResult":{ + "type":"structure", + "members":{ + "EventSubscription":{"shape":"EventSubscription"} + } + }, + "CreateOptionGroupMessage":{ + "type":"structure", + "required":[ + "OptionGroupName", + "EngineName", + "MajorEngineVersion", + "OptionGroupDescription" + ], + "members":{ + "OptionGroupName":{"shape":"String"}, + "EngineName":{"shape":"String"}, + "MajorEngineVersion":{"shape":"String"}, + "OptionGroupDescription":{"shape":"String"}, + "Tags":{"shape":"TagList"} + } + }, + "CreateOptionGroupResult":{ + "type":"structure", + "members":{ + "OptionGroup":{"shape":"OptionGroup"} + } + }, + "DBEngineVersion":{ + "type":"structure", + "members":{ + "Engine":{"shape":"String"}, + "EngineVersion":{"shape":"String"}, + "DBParameterGroupFamily":{"shape":"String"}, + "DBEngineDescription":{"shape":"String"}, + "DBEngineVersionDescription":{"shape":"String"}, + "DefaultCharacterSet":{"shape":"CharacterSet"}, + "SupportedCharacterSets":{"shape":"SupportedCharacterSetsList"} + } + }, + "DBEngineVersionList":{ + "type":"list", + "member":{ + "shape":"DBEngineVersion", + "locationName":"DBEngineVersion" + } + }, + "DBEngineVersionMessage":{ + "type":"structure", + "members":{ + "Marker":{"shape":"String"}, + "DBEngineVersions":{"shape":"DBEngineVersionList"} + } + }, + "DBInstance":{ + "type":"structure", + "members":{ + "DBInstanceIdentifier":{"shape":"String"}, + "DBInstanceClass":{"shape":"String"}, + "Engine":{"shape":"String"}, + "DBInstanceStatus":{"shape":"String"}, + "MasterUsername":{"shape":"String"}, + "DBName":{"shape":"String"}, + "Endpoint":{"shape":"Endpoint"}, + "AllocatedStorage":{"shape":"Integer"}, + "InstanceCreateTime":{"shape":"TStamp"}, + "PreferredBackupWindow":{"shape":"String"}, + "BackupRetentionPeriod":{"shape":"Integer"}, + "DBSecurityGroups":{"shape":"DBSecurityGroupMembershipList"}, + "VpcSecurityGroups":{"shape":"VpcSecurityGroupMembershipList"}, + "DBParameterGroups":{"shape":"DBParameterGroupStatusList"}, + "AvailabilityZone":{"shape":"String"}, + "DBSubnetGroup":{"shape":"DBSubnetGroup"}, + "PreferredMaintenanceWindow":{"shape":"String"}, + "PendingModifiedValues":{"shape":"PendingModifiedValues"}, + "LatestRestorableTime":{"shape":"TStamp"}, + "MultiAZ":{"shape":"Boolean"}, + "EngineVersion":{"shape":"String"}, + "AutoMinorVersionUpgrade":{"shape":"Boolean"}, + "ReadReplicaSourceDBInstanceIdentifier":{"shape":"String"}, + "ReadReplicaDBInstanceIdentifiers":{"shape":"ReadReplicaDBInstanceIdentifierList"}, + "LicenseModel":{"shape":"String"}, + "Iops":{"shape":"IntegerOptional"}, + "OptionGroupMemberships":{"shape":"OptionGroupMembershipList"}, + "CharacterSetName":{"shape":"String"}, + "SecondaryAvailabilityZone":{"shape":"String"}, + "PubliclyAccessible":{"shape":"Boolean"}, + "StatusInfos":{"shape":"DBInstanceStatusInfoList"} + }, + "wrapper":true + }, + "DBInstanceAlreadyExistsFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBInstanceAlreadyExists", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "DBInstanceList":{ + "type":"list", + "member":{ + "shape":"DBInstance", + "locationName":"DBInstance" + } + }, + "DBInstanceMessage":{ + "type":"structure", + "members":{ + "Marker":{"shape":"String"}, + "DBInstances":{"shape":"DBInstanceList"} + } + }, + "DBInstanceNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBInstanceNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "DBInstanceStatusInfo":{ + "type":"structure", + "members":{ + "StatusType":{"shape":"String"}, + "Normal":{"shape":"Boolean"}, + "Status":{"shape":"String"}, + "Message":{"shape":"String"} + } + }, + "DBInstanceStatusInfoList":{ + "type":"list", + "member":{ + "shape":"DBInstanceStatusInfo", + "locationName":"DBInstanceStatusInfo" + } + }, + "DBLogFileNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBLogFileNotFoundFault", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "DBParameterGroup":{ + "type":"structure", + "members":{ + "DBParameterGroupName":{"shape":"String"}, + "DBParameterGroupFamily":{"shape":"String"}, + "Description":{"shape":"String"} + }, + "wrapper":true + }, + "DBParameterGroupAlreadyExistsFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBParameterGroupAlreadyExists", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "DBParameterGroupDetails":{ + "type":"structure", + "members":{ + "Parameters":{"shape":"ParametersList"}, + "Marker":{"shape":"String"} + } + }, + "DBParameterGroupList":{ + "type":"list", + "member":{ + "shape":"DBParameterGroup", + "locationName":"DBParameterGroup" + } + }, + "DBParameterGroupNameMessage":{ + "type":"structure", + "members":{ + "DBParameterGroupName":{"shape":"String"} + } + }, + "DBParameterGroupNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBParameterGroupNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "DBParameterGroupQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBParameterGroupQuotaExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "DBParameterGroupStatus":{ + "type":"structure", + "members":{ + "DBParameterGroupName":{"shape":"String"}, + "ParameterApplyStatus":{"shape":"String"} + } + }, + "DBParameterGroupStatusList":{ + "type":"list", + "member":{ + "shape":"DBParameterGroupStatus", + "locationName":"DBParameterGroup" + } + }, + "DBParameterGroupsMessage":{ + "type":"structure", + "members":{ + "Marker":{"shape":"String"}, + "DBParameterGroups":{"shape":"DBParameterGroupList"} + } + }, + "DBSecurityGroup":{ + "type":"structure", + "members":{ + "OwnerId":{"shape":"String"}, + "DBSecurityGroupName":{"shape":"String"}, + "DBSecurityGroupDescription":{"shape":"String"}, + "VpcId":{"shape":"String"}, + "EC2SecurityGroups":{"shape":"EC2SecurityGroupList"}, + "IPRanges":{"shape":"IPRangeList"} + }, + "wrapper":true + }, + "DBSecurityGroupAlreadyExistsFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBSecurityGroupAlreadyExists", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "DBSecurityGroupMembership":{ + "type":"structure", + "members":{ + "DBSecurityGroupName":{"shape":"String"}, + "Status":{"shape":"String"} + } + }, + "DBSecurityGroupMembershipList":{ + "type":"list", + "member":{ + "shape":"DBSecurityGroupMembership", + "locationName":"DBSecurityGroup" + } + }, + "DBSecurityGroupMessage":{ + "type":"structure", + "members":{ + "Marker":{"shape":"String"}, + "DBSecurityGroups":{"shape":"DBSecurityGroups"} + } + }, + "DBSecurityGroupNameList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"DBSecurityGroupName" + } + }, + "DBSecurityGroupNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBSecurityGroupNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "DBSecurityGroupNotSupportedFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBSecurityGroupNotSupported", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "DBSecurityGroupQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"QuotaExceeded.DBSecurityGroup", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "DBSecurityGroups":{ + "type":"list", + "member":{ + "shape":"DBSecurityGroup", + "locationName":"DBSecurityGroup" + } + }, + "DBSnapshot":{ + "type":"structure", + "members":{ + "DBSnapshotIdentifier":{"shape":"String"}, + "DBInstanceIdentifier":{"shape":"String"}, + "SnapshotCreateTime":{"shape":"TStamp"}, + "Engine":{"shape":"String"}, + "AllocatedStorage":{"shape":"Integer"}, + "Status":{"shape":"String"}, + "Port":{"shape":"Integer"}, + "AvailabilityZone":{"shape":"String"}, + "VpcId":{"shape":"String"}, + "InstanceCreateTime":{"shape":"TStamp"}, + "MasterUsername":{"shape":"String"}, + "EngineVersion":{"shape":"String"}, + "LicenseModel":{"shape":"String"}, + "SnapshotType":{"shape":"String"}, + "Iops":{"shape":"IntegerOptional"}, + "OptionGroupName":{"shape":"String"}, + "PercentProgress":{"shape":"Integer"}, + "SourceRegion":{"shape":"String"} + }, + "wrapper":true + }, + "DBSnapshotAlreadyExistsFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBSnapshotAlreadyExists", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "DBSnapshotList":{ + "type":"list", + "member":{ + "shape":"DBSnapshot", + "locationName":"DBSnapshot" + } + }, + "DBSnapshotMessage":{ + "type":"structure", + "members":{ + "Marker":{"shape":"String"}, + "DBSnapshots":{"shape":"DBSnapshotList"} + } + }, + "DBSnapshotNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBSnapshotNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "DBSubnetGroup":{ + "type":"structure", + "members":{ + "DBSubnetGroupName":{"shape":"String"}, + "DBSubnetGroupDescription":{"shape":"String"}, + "VpcId":{"shape":"String"}, + "SubnetGroupStatus":{"shape":"String"}, + "Subnets":{"shape":"SubnetList"} + }, + "wrapper":true + }, + "DBSubnetGroupAlreadyExistsFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBSubnetGroupAlreadyExists", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "DBSubnetGroupDoesNotCoverEnoughAZs":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBSubnetGroupDoesNotCoverEnoughAZs", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "DBSubnetGroupMessage":{ + "type":"structure", + "members":{ + "Marker":{"shape":"String"}, + "DBSubnetGroups":{"shape":"DBSubnetGroups"} + } + }, + "DBSubnetGroupNotAllowedFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBSubnetGroupNotAllowedFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "DBSubnetGroupNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBSubnetGroupNotFoundFault", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "DBSubnetGroupQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBSubnetGroupQuotaExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "DBSubnetGroups":{ + "type":"list", + "member":{ + "shape":"DBSubnetGroup", + "locationName":"DBSubnetGroup" + } + }, + "DBSubnetQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBSubnetQuotaExceededFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "DBUpgradeDependencyFailureFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBUpgradeDependencyFailure", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "DeleteDBInstanceMessage":{ + "type":"structure", + "required":["DBInstanceIdentifier"], + "members":{ + "DBInstanceIdentifier":{"shape":"String"}, + "SkipFinalSnapshot":{"shape":"Boolean"}, + "FinalDBSnapshotIdentifier":{"shape":"String"} + } + }, + "DeleteDBInstanceResult":{ + "type":"structure", + "members":{ + "DBInstance":{"shape":"DBInstance"} + } + }, + "DeleteDBParameterGroupMessage":{ + "type":"structure", + "required":["DBParameterGroupName"], + "members":{ + "DBParameterGroupName":{"shape":"String"} + } + }, + "DeleteDBSecurityGroupMessage":{ + "type":"structure", + "required":["DBSecurityGroupName"], + "members":{ + "DBSecurityGroupName":{"shape":"String"} + } + }, + "DeleteDBSnapshotMessage":{ + "type":"structure", + "required":["DBSnapshotIdentifier"], + "members":{ + "DBSnapshotIdentifier":{"shape":"String"} + } + }, + "DeleteDBSnapshotResult":{ + "type":"structure", + "members":{ + "DBSnapshot":{"shape":"DBSnapshot"} + } + }, + "DeleteDBSubnetGroupMessage":{ + "type":"structure", + "required":["DBSubnetGroupName"], + "members":{ + "DBSubnetGroupName":{"shape":"String"} + } + }, + "DeleteEventSubscriptionMessage":{ + "type":"structure", + "required":["SubscriptionName"], + "members":{ + "SubscriptionName":{"shape":"String"} + } + }, + "DeleteEventSubscriptionResult":{ + "type":"structure", + "members":{ + "EventSubscription":{"shape":"EventSubscription"} + } + }, + "DeleteOptionGroupMessage":{ + "type":"structure", + "required":["OptionGroupName"], + "members":{ + "OptionGroupName":{"shape":"String"} + } + }, + "DescribeDBEngineVersionsMessage":{ + "type":"structure", + "members":{ + "Engine":{"shape":"String"}, + "EngineVersion":{"shape":"String"}, + "DBParameterGroupFamily":{"shape":"String"}, + "Filters":{"shape":"FilterList"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"}, + "DefaultOnly":{"shape":"Boolean"}, + "ListSupportedCharacterSets":{"shape":"BooleanOptional"} + } + }, + "DescribeDBInstancesMessage":{ + "type":"structure", + "members":{ + "DBInstanceIdentifier":{"shape":"String"}, + "Filters":{"shape":"FilterList"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeDBLogFilesDetails":{ + "type":"structure", + "members":{ + "LogFileName":{"shape":"String"}, + "LastWritten":{"shape":"Long"}, + "Size":{"shape":"Long"} + } + }, + "DescribeDBLogFilesList":{ + "type":"list", + "member":{ + "shape":"DescribeDBLogFilesDetails", + "locationName":"DescribeDBLogFilesDetails" + } + }, + "DescribeDBLogFilesMessage":{ + "type":"structure", + "required":["DBInstanceIdentifier"], + "members":{ + "DBInstanceIdentifier":{"shape":"String"}, + "FilenameContains":{"shape":"String"}, + "FileLastWritten":{"shape":"Long"}, + "FileSize":{"shape":"Long"}, + "Filters":{"shape":"FilterList"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeDBLogFilesResponse":{ + "type":"structure", + "members":{ + "DescribeDBLogFiles":{"shape":"DescribeDBLogFilesList"}, + "Marker":{"shape":"String"} + } + }, + "DescribeDBParameterGroupsMessage":{ + "type":"structure", + "members":{ + "DBParameterGroupName":{"shape":"String"}, + "Filters":{"shape":"FilterList"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeDBParametersMessage":{ + "type":"structure", + "required":["DBParameterGroupName"], + "members":{ + "DBParameterGroupName":{"shape":"String"}, + "Source":{"shape":"String"}, + "Filters":{"shape":"FilterList"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeDBSecurityGroupsMessage":{ + "type":"structure", + "members":{ + "DBSecurityGroupName":{"shape":"String"}, + "Filters":{"shape":"FilterList"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeDBSnapshotsMessage":{ + "type":"structure", + "members":{ + "DBInstanceIdentifier":{"shape":"String"}, + "DBSnapshotIdentifier":{"shape":"String"}, + "SnapshotType":{"shape":"String"}, + "Filters":{"shape":"FilterList"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeDBSubnetGroupsMessage":{ + "type":"structure", + "members":{ + "DBSubnetGroupName":{"shape":"String"}, + "Filters":{"shape":"FilterList"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeEngineDefaultParametersMessage":{ + "type":"structure", + "required":["DBParameterGroupFamily"], + "members":{ + "DBParameterGroupFamily":{"shape":"String"}, + "Filters":{"shape":"FilterList"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeEngineDefaultParametersResult":{ + "type":"structure", + "members":{ + "EngineDefaults":{"shape":"EngineDefaults"} + } + }, + "DescribeEventCategoriesMessage":{ + "type":"structure", + "members":{ + "SourceType":{"shape":"String"}, + "Filters":{"shape":"FilterList"} + } + }, + "DescribeEventSubscriptionsMessage":{ + "type":"structure", + "members":{ + "SubscriptionName":{"shape":"String"}, + "Filters":{"shape":"FilterList"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeEventsMessage":{ + "type":"structure", + "members":{ + "SourceIdentifier":{"shape":"String"}, + "SourceType":{"shape":"SourceType"}, + "StartTime":{"shape":"TStamp"}, + "EndTime":{"shape":"TStamp"}, + "Duration":{"shape":"IntegerOptional"}, + "EventCategories":{"shape":"EventCategoriesList"}, + "Filters":{"shape":"FilterList"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeOptionGroupOptionsMessage":{ + "type":"structure", + "required":["EngineName"], + "members":{ + "EngineName":{"shape":"String"}, + "MajorEngineVersion":{"shape":"String"}, + "Filters":{"shape":"FilterList"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeOptionGroupsMessage":{ + "type":"structure", + "members":{ + "OptionGroupName":{"shape":"String"}, + "Filters":{"shape":"FilterList"}, + "Marker":{"shape":"String"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "EngineName":{"shape":"String"}, + "MajorEngineVersion":{"shape":"String"} + } + }, + "DescribeOrderableDBInstanceOptionsMessage":{ + "type":"structure", + "required":["Engine"], + "members":{ + "Engine":{"shape":"String"}, + "EngineVersion":{"shape":"String"}, + "DBInstanceClass":{"shape":"String"}, + "LicenseModel":{"shape":"String"}, + "Vpc":{"shape":"BooleanOptional"}, + "Filters":{"shape":"FilterList"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeReservedDBInstancesMessage":{ + "type":"structure", + "members":{ + "ReservedDBInstanceId":{"shape":"String"}, + "ReservedDBInstancesOfferingId":{"shape":"String"}, + "DBInstanceClass":{"shape":"String"}, + "Duration":{"shape":"String"}, + "ProductDescription":{"shape":"String"}, + "OfferingType":{"shape":"String"}, + "MultiAZ":{"shape":"BooleanOptional"}, + "Filters":{"shape":"FilterList"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeReservedDBInstancesOfferingsMessage":{ + "type":"structure", + "members":{ + "ReservedDBInstancesOfferingId":{"shape":"String"}, + "DBInstanceClass":{"shape":"String"}, + "Duration":{"shape":"String"}, + "ProductDescription":{"shape":"String"}, + "OfferingType":{"shape":"String"}, + "MultiAZ":{"shape":"BooleanOptional"}, + "Filters":{"shape":"FilterList"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "Double":{"type":"double"}, + "DownloadDBLogFilePortionDetails":{ + "type":"structure", + "members":{ + "LogFileData":{"shape":"String"}, + "Marker":{"shape":"String"}, + "AdditionalDataPending":{"shape":"Boolean"} + } + }, + "DownloadDBLogFilePortionMessage":{ + "type":"structure", + "required":[ + "DBInstanceIdentifier", + "LogFileName" + ], + "members":{ + "DBInstanceIdentifier":{"shape":"String"}, + "LogFileName":{"shape":"String"}, + "Marker":{"shape":"String"}, + "NumberOfLines":{"shape":"Integer"} + } + }, + "EC2SecurityGroup":{ + "type":"structure", + "members":{ + "Status":{"shape":"String"}, + "EC2SecurityGroupName":{"shape":"String"}, + "EC2SecurityGroupId":{"shape":"String"}, + "EC2SecurityGroupOwnerId":{"shape":"String"} + } + }, + "EC2SecurityGroupList":{ + "type":"list", + "member":{ + "shape":"EC2SecurityGroup", + "locationName":"EC2SecurityGroup" + } + }, + "Endpoint":{ + "type":"structure", + "members":{ + "Address":{"shape":"String"}, + "Port":{"shape":"Integer"} + } + }, + "EngineDefaults":{ + "type":"structure", + "members":{ + "DBParameterGroupFamily":{"shape":"String"}, + "Marker":{"shape":"String"}, + "Parameters":{"shape":"ParametersList"} + }, + "wrapper":true + }, + "Event":{ + "type":"structure", + "members":{ + "SourceIdentifier":{"shape":"String"}, + "SourceType":{"shape":"SourceType"}, + "Message":{"shape":"String"}, + "EventCategories":{"shape":"EventCategoriesList"}, + "Date":{"shape":"TStamp"} + } + }, + "EventCategoriesList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"EventCategory" + } + }, + "EventCategoriesMap":{ + "type":"structure", + "members":{ + "SourceType":{"shape":"String"}, + "EventCategories":{"shape":"EventCategoriesList"} + }, + "wrapper":true + }, + "EventCategoriesMapList":{ + "type":"list", + "member":{ + "shape":"EventCategoriesMap", + "locationName":"EventCategoriesMap" + } + }, + "EventCategoriesMessage":{ + "type":"structure", + "members":{ + "EventCategoriesMapList":{"shape":"EventCategoriesMapList"} + } + }, + "EventList":{ + "type":"list", + "member":{ + "shape":"Event", + "locationName":"Event" + } + }, + "EventSubscription":{ + "type":"structure", + "members":{ + "CustomerAwsId":{"shape":"String"}, + "CustSubscriptionId":{"shape":"String"}, + "SnsTopicArn":{"shape":"String"}, + "Status":{"shape":"String"}, + "SubscriptionCreationTime":{"shape":"String"}, + "SourceType":{"shape":"String"}, + "SourceIdsList":{"shape":"SourceIdsList"}, + "EventCategoriesList":{"shape":"EventCategoriesList"}, + "Enabled":{"shape":"Boolean"} + }, + "wrapper":true + }, + "EventSubscriptionQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"EventSubscriptionQuotaExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "EventSubscriptionsList":{ + "type":"list", + "member":{ + "shape":"EventSubscription", + "locationName":"EventSubscription" + } + }, + "EventSubscriptionsMessage":{ + "type":"structure", + "members":{ + "Marker":{"shape":"String"}, + "EventSubscriptionsList":{"shape":"EventSubscriptionsList"} + } + }, + "EventsMessage":{ + "type":"structure", + "members":{ + "Marker":{"shape":"String"}, + "Events":{"shape":"EventList"} + } + }, + "Filter":{ + "type":"structure", + "required":[ + "Name", + "Values" + ], + "members":{ + "Name":{"shape":"String"}, + "Values":{"shape":"FilterValueList"} + } + }, + "FilterList":{ + "type":"list", + "member":{ + "shape":"Filter", + "locationName":"Filter" + } + }, + "FilterValueList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"Value" + } + }, + "IPRange":{ + "type":"structure", + "members":{ + "Status":{"shape":"String"}, + "CIDRIP":{"shape":"String"} + } + }, + "IPRangeList":{ + "type":"list", + "member":{ + "shape":"IPRange", + "locationName":"IPRange" + } + }, + "InstanceQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InstanceQuotaExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InsufficientDBInstanceCapacityFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InsufficientDBInstanceCapacity", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "Integer":{"type":"integer"}, + "IntegerOptional":{"type":"integer"}, + "InvalidDBInstanceStateFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidDBInstanceState", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidDBParameterGroupStateFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidDBParameterGroupState", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidDBSecurityGroupStateFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidDBSecurityGroupState", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidDBSnapshotStateFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidDBSnapshotState", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidDBSubnetGroupFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidDBSubnetGroupFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidDBSubnetGroupStateFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidDBSubnetGroupStateFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidDBSubnetStateFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidDBSubnetStateFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidEventSubscriptionStateFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidEventSubscriptionState", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidOptionGroupStateFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidOptionGroupStateFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidRestoreFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidRestoreFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidSubnet":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidSubnet", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidVPCNetworkStateFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidVPCNetworkStateFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "KeyList":{ + "type":"list", + "member":{"shape":"String"} + }, + "ListTagsForResourceMessage":{ + "type":"structure", + "required":["ResourceName"], + "members":{ + "ResourceName":{"shape":"String"}, + "Filters":{"shape":"FilterList"} + } + }, + "Long":{"type":"long"}, + "ModifyDBInstanceMessage":{ + "type":"structure", + "required":["DBInstanceIdentifier"], + "members":{ + "DBInstanceIdentifier":{"shape":"String"}, + "AllocatedStorage":{"shape":"IntegerOptional"}, + "DBInstanceClass":{"shape":"String"}, + "DBSecurityGroups":{"shape":"DBSecurityGroupNameList"}, + "VpcSecurityGroupIds":{"shape":"VpcSecurityGroupIdList"}, + "ApplyImmediately":{"shape":"Boolean"}, + "MasterUserPassword":{"shape":"String"}, + "DBParameterGroupName":{"shape":"String"}, + "BackupRetentionPeriod":{"shape":"IntegerOptional"}, + "PreferredBackupWindow":{"shape":"String"}, + "PreferredMaintenanceWindow":{"shape":"String"}, + "MultiAZ":{"shape":"BooleanOptional"}, + "EngineVersion":{"shape":"String"}, + "AllowMajorVersionUpgrade":{"shape":"Boolean"}, + "AutoMinorVersionUpgrade":{"shape":"BooleanOptional"}, + "Iops":{"shape":"IntegerOptional"}, + "OptionGroupName":{"shape":"String"}, + "NewDBInstanceIdentifier":{"shape":"String"} + } + }, + "ModifyDBInstanceResult":{ + "type":"structure", + "members":{ + "DBInstance":{"shape":"DBInstance"} + } + }, + "ModifyDBParameterGroupMessage":{ + "type":"structure", + "required":[ + "DBParameterGroupName", + "Parameters" + ], + "members":{ + "DBParameterGroupName":{"shape":"String"}, + "Parameters":{"shape":"ParametersList"} + } + }, + "ModifyDBSubnetGroupMessage":{ + "type":"structure", + "required":[ + "DBSubnetGroupName", + "SubnetIds" + ], + "members":{ + "DBSubnetGroupName":{"shape":"String"}, + "DBSubnetGroupDescription":{"shape":"String"}, + "SubnetIds":{"shape":"SubnetIdentifierList"} + } + }, + "ModifyDBSubnetGroupResult":{ + "type":"structure", + "members":{ + "DBSubnetGroup":{"shape":"DBSubnetGroup"} + } + }, + "ModifyEventSubscriptionMessage":{ + "type":"structure", + "required":["SubscriptionName"], + "members":{ + "SubscriptionName":{"shape":"String"}, + "SnsTopicArn":{"shape":"String"}, + "SourceType":{"shape":"String"}, + "EventCategories":{"shape":"EventCategoriesList"}, + "Enabled":{"shape":"BooleanOptional"} + } + }, + "ModifyEventSubscriptionResult":{ + "type":"structure", + "members":{ + "EventSubscription":{"shape":"EventSubscription"} + } + }, + "ModifyOptionGroupMessage":{ + "type":"structure", + "required":["OptionGroupName"], + "members":{ + "OptionGroupName":{"shape":"String"}, + "OptionsToInclude":{"shape":"OptionConfigurationList"}, + "OptionsToRemove":{"shape":"OptionNamesList"}, + "ApplyImmediately":{"shape":"Boolean"} + } + }, + "ModifyOptionGroupResult":{ + "type":"structure", + "members":{ + "OptionGroup":{"shape":"OptionGroup"} + } + }, + "Option":{ + "type":"structure", + "members":{ + "OptionName":{"shape":"String"}, + "OptionDescription":{"shape":"String"}, + "Persistent":{"shape":"Boolean"}, + "Permanent":{"shape":"Boolean"}, + "Port":{"shape":"IntegerOptional"}, + "OptionSettings":{"shape":"OptionSettingConfigurationList"}, + "DBSecurityGroupMemberships":{"shape":"DBSecurityGroupMembershipList"}, + "VpcSecurityGroupMemberships":{"shape":"VpcSecurityGroupMembershipList"} + } + }, + "OptionConfiguration":{ + "type":"structure", + "required":["OptionName"], + "members":{ + "OptionName":{"shape":"String"}, + "Port":{"shape":"IntegerOptional"}, + "DBSecurityGroupMemberships":{"shape":"DBSecurityGroupNameList"}, + "VpcSecurityGroupMemberships":{"shape":"VpcSecurityGroupIdList"}, + "OptionSettings":{"shape":"OptionSettingsList"} + } + }, + "OptionConfigurationList":{ + "type":"list", + "member":{ + "shape":"OptionConfiguration", + "locationName":"OptionConfiguration" + } + }, + "OptionGroup":{ + "type":"structure", + "members":{ + "OptionGroupName":{"shape":"String"}, + "OptionGroupDescription":{"shape":"String"}, + "EngineName":{"shape":"String"}, + "MajorEngineVersion":{"shape":"String"}, + "Options":{"shape":"OptionsList"}, + "AllowsVpcAndNonVpcInstanceMemberships":{"shape":"Boolean"}, + "VpcId":{"shape":"String"} + }, + "wrapper":true + }, + "OptionGroupAlreadyExistsFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"OptionGroupAlreadyExistsFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "OptionGroupMembership":{ + "type":"structure", + "members":{ + "OptionGroupName":{"shape":"String"}, + "Status":{"shape":"String"} + } + }, + "OptionGroupMembershipList":{ + "type":"list", + "member":{ + "shape":"OptionGroupMembership", + "locationName":"OptionGroupMembership" + } + }, + "OptionGroupNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"OptionGroupNotFoundFault", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "OptionGroupOption":{ + "type":"structure", + "members":{ + "Name":{"shape":"String"}, + "Description":{"shape":"String"}, + "EngineName":{"shape":"String"}, + "MajorEngineVersion":{"shape":"String"}, + "MinimumRequiredMinorEngineVersion":{"shape":"String"}, + "PortRequired":{"shape":"Boolean"}, + "DefaultPort":{"shape":"IntegerOptional"}, + "OptionsDependedOn":{"shape":"OptionsDependedOn"}, + "Persistent":{"shape":"Boolean"}, + "Permanent":{"shape":"Boolean"}, + "OptionGroupOptionSettings":{"shape":"OptionGroupOptionSettingsList"} + } + }, + "OptionGroupOptionSetting":{ + "type":"structure", + "members":{ + "SettingName":{"shape":"String"}, + "SettingDescription":{"shape":"String"}, + "DefaultValue":{"shape":"String"}, + "ApplyType":{"shape":"String"}, + "AllowedValues":{"shape":"String"}, + "IsModifiable":{"shape":"Boolean"} + } + }, + "OptionGroupOptionSettingsList":{ + "type":"list", + "member":{ + "shape":"OptionGroupOptionSetting", + "locationName":"OptionGroupOptionSetting" + } + }, + "OptionGroupOptionsList":{ + "type":"list", + "member":{ + "shape":"OptionGroupOption", + "locationName":"OptionGroupOption" + } + }, + "OptionGroupOptionsMessage":{ + "type":"structure", + "members":{ + "OptionGroupOptions":{"shape":"OptionGroupOptionsList"}, + "Marker":{"shape":"String"} + } + }, + "OptionGroupQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"OptionGroupQuotaExceededFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "OptionGroups":{ + "type":"structure", + "members":{ + "OptionGroupsList":{"shape":"OptionGroupsList"}, + "Marker":{"shape":"String"} + } + }, + "OptionGroupsList":{ + "type":"list", + "member":{ + "shape":"OptionGroup", + "locationName":"OptionGroup" + } + }, + "OptionNamesList":{ + "type":"list", + "member":{"shape":"String"} + }, + "OptionSetting":{ + "type":"structure", + "members":{ + "Name":{"shape":"String"}, + "Value":{"shape":"String"}, + "DefaultValue":{"shape":"String"}, + "Description":{"shape":"String"}, + "ApplyType":{"shape":"String"}, + "DataType":{"shape":"String"}, + "AllowedValues":{"shape":"String"}, + "IsModifiable":{"shape":"Boolean"}, + "IsCollection":{"shape":"Boolean"} + } + }, + "OptionSettingConfigurationList":{ + "type":"list", + "member":{ + "shape":"OptionSetting", + "locationName":"OptionSetting" + } + }, + "OptionSettingsList":{ + "type":"list", + "member":{ + "shape":"OptionSetting", + "locationName":"OptionSetting" + } + }, + "OptionsDependedOn":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"OptionName" + } + }, + "OptionsList":{ + "type":"list", + "member":{ + "shape":"Option", + "locationName":"Option" + } + }, + "OrderableDBInstanceOption":{ + "type":"structure", + "members":{ + "Engine":{"shape":"String"}, + "EngineVersion":{"shape":"String"}, + "DBInstanceClass":{"shape":"String"}, + "LicenseModel":{"shape":"String"}, + "AvailabilityZones":{"shape":"AvailabilityZoneList"}, + "MultiAZCapable":{"shape":"Boolean"}, + "ReadReplicaCapable":{"shape":"Boolean"}, + "Vpc":{"shape":"Boolean"} + }, + "wrapper":true + }, + "OrderableDBInstanceOptionsList":{ + "type":"list", + "member":{ + "shape":"OrderableDBInstanceOption", + "locationName":"OrderableDBInstanceOption" + } + }, + "OrderableDBInstanceOptionsMessage":{ + "type":"structure", + "members":{ + "OrderableDBInstanceOptions":{"shape":"OrderableDBInstanceOptionsList"}, + "Marker":{"shape":"String"} + } + }, + "Parameter":{ + "type":"structure", + "members":{ + "ParameterName":{"shape":"String"}, + "ParameterValue":{"shape":"String"}, + "Description":{"shape":"String"}, + "Source":{"shape":"String"}, + "ApplyType":{"shape":"String"}, + "DataType":{"shape":"String"}, + "AllowedValues":{"shape":"String"}, + "IsModifiable":{"shape":"Boolean"}, + "MinimumEngineVersion":{"shape":"String"}, + "ApplyMethod":{"shape":"ApplyMethod"} + } + }, + "ParametersList":{ + "type":"list", + "member":{ + "shape":"Parameter", + "locationName":"Parameter" + } + }, + "PendingModifiedValues":{ + "type":"structure", + "members":{ + "DBInstanceClass":{"shape":"String"}, + "AllocatedStorage":{"shape":"IntegerOptional"}, + "MasterUserPassword":{"shape":"String"}, + "Port":{"shape":"IntegerOptional"}, + "BackupRetentionPeriod":{"shape":"IntegerOptional"}, + "MultiAZ":{"shape":"BooleanOptional"}, + "EngineVersion":{"shape":"String"}, + "Iops":{"shape":"IntegerOptional"}, + "DBInstanceIdentifier":{"shape":"String"} + } + }, + "PointInTimeRestoreNotEnabledFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"PointInTimeRestoreNotEnabled", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "PromoteReadReplicaMessage":{ + "type":"structure", + "required":["DBInstanceIdentifier"], + "members":{ + "DBInstanceIdentifier":{"shape":"String"}, + "BackupRetentionPeriod":{"shape":"IntegerOptional"}, + "PreferredBackupWindow":{"shape":"String"} + } + }, + "PromoteReadReplicaResult":{ + "type":"structure", + "members":{ + "DBInstance":{"shape":"DBInstance"} + } + }, + "ProvisionedIopsNotAvailableInAZFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"ProvisionedIopsNotAvailableInAZFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "PurchaseReservedDBInstancesOfferingMessage":{ + "type":"structure", + "required":["ReservedDBInstancesOfferingId"], + "members":{ + "ReservedDBInstancesOfferingId":{"shape":"String"}, + "ReservedDBInstanceId":{"shape":"String"}, + "DBInstanceCount":{"shape":"IntegerOptional"}, + "Tags":{"shape":"TagList"} + } + }, + "PurchaseReservedDBInstancesOfferingResult":{ + "type":"structure", + "members":{ + "ReservedDBInstance":{"shape":"ReservedDBInstance"} + } + }, + "ReadReplicaDBInstanceIdentifierList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"ReadReplicaDBInstanceIdentifier" + } + }, + "RebootDBInstanceMessage":{ + "type":"structure", + "required":["DBInstanceIdentifier"], + "members":{ + "DBInstanceIdentifier":{"shape":"String"}, + "ForceFailover":{"shape":"BooleanOptional"} + } + }, + "RebootDBInstanceResult":{ + "type":"structure", + "members":{ + "DBInstance":{"shape":"DBInstance"} + } + }, + "RecurringCharge":{ + "type":"structure", + "members":{ + "RecurringChargeAmount":{"shape":"Double"}, + "RecurringChargeFrequency":{"shape":"String"} + }, + "wrapper":true + }, + "RecurringChargeList":{ + "type":"list", + "member":{ + "shape":"RecurringCharge", + "locationName":"RecurringCharge" + } + }, + "RemoveSourceIdentifierFromSubscriptionMessage":{ + "type":"structure", + "required":[ + "SubscriptionName", + "SourceIdentifier" + ], + "members":{ + "SubscriptionName":{"shape":"String"}, + "SourceIdentifier":{"shape":"String"} + } + }, + "RemoveSourceIdentifierFromSubscriptionResult":{ + "type":"structure", + "members":{ + "EventSubscription":{"shape":"EventSubscription"} + } + }, + "RemoveTagsFromResourceMessage":{ + "type":"structure", + "required":[ + "ResourceName", + "TagKeys" + ], + "members":{ + "ResourceName":{"shape":"String"}, + "TagKeys":{"shape":"KeyList"} + } + }, + "ReservedDBInstance":{ + "type":"structure", + "members":{ + "ReservedDBInstanceId":{"shape":"String"}, + "ReservedDBInstancesOfferingId":{"shape":"String"}, + "DBInstanceClass":{"shape":"String"}, + "StartTime":{"shape":"TStamp"}, + "Duration":{"shape":"Integer"}, + "FixedPrice":{"shape":"Double"}, + "UsagePrice":{"shape":"Double"}, + "CurrencyCode":{"shape":"String"}, + "DBInstanceCount":{"shape":"Integer"}, + "ProductDescription":{"shape":"String"}, + "OfferingType":{"shape":"String"}, + "MultiAZ":{"shape":"Boolean"}, + "State":{"shape":"String"}, + "RecurringCharges":{"shape":"RecurringChargeList"} + }, + "wrapper":true + }, + "ReservedDBInstanceAlreadyExistsFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"ReservedDBInstanceAlreadyExists", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "ReservedDBInstanceList":{ + "type":"list", + "member":{ + "shape":"ReservedDBInstance", + "locationName":"ReservedDBInstance" + } + }, + "ReservedDBInstanceMessage":{ + "type":"structure", + "members":{ + "Marker":{"shape":"String"}, + "ReservedDBInstances":{"shape":"ReservedDBInstanceList"} + } + }, + "ReservedDBInstanceNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"ReservedDBInstanceNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "ReservedDBInstanceQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"ReservedDBInstanceQuotaExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "ReservedDBInstancesOffering":{ + "type":"structure", + "members":{ + "ReservedDBInstancesOfferingId":{"shape":"String"}, + "DBInstanceClass":{"shape":"String"}, + "Duration":{"shape":"Integer"}, + "FixedPrice":{"shape":"Double"}, + "UsagePrice":{"shape":"Double"}, + "CurrencyCode":{"shape":"String"}, + "ProductDescription":{"shape":"String"}, + "OfferingType":{"shape":"String"}, + "MultiAZ":{"shape":"Boolean"}, + "RecurringCharges":{"shape":"RecurringChargeList"} + }, + "wrapper":true + }, + "ReservedDBInstancesOfferingList":{ + "type":"list", + "member":{ + "shape":"ReservedDBInstancesOffering", + "locationName":"ReservedDBInstancesOffering" + } + }, + "ReservedDBInstancesOfferingMessage":{ + "type":"structure", + "members":{ + "Marker":{"shape":"String"}, + "ReservedDBInstancesOfferings":{"shape":"ReservedDBInstancesOfferingList"} + } + }, + "ReservedDBInstancesOfferingNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"ReservedDBInstancesOfferingNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "ResetDBParameterGroupMessage":{ + "type":"structure", + "required":["DBParameterGroupName"], + "members":{ + "DBParameterGroupName":{"shape":"String"}, + "ResetAllParameters":{"shape":"Boolean"}, + "Parameters":{"shape":"ParametersList"} + } + }, + "RestoreDBInstanceFromDBSnapshotMessage":{ + "type":"structure", + "required":[ + "DBInstanceIdentifier", + "DBSnapshotIdentifier" + ], + "members":{ + "DBInstanceIdentifier":{"shape":"String"}, + "DBSnapshotIdentifier":{"shape":"String"}, + "DBInstanceClass":{"shape":"String"}, + "Port":{"shape":"IntegerOptional"}, + "AvailabilityZone":{"shape":"String"}, + "DBSubnetGroupName":{"shape":"String"}, + "MultiAZ":{"shape":"BooleanOptional"}, + "PubliclyAccessible":{"shape":"BooleanOptional"}, + "AutoMinorVersionUpgrade":{"shape":"BooleanOptional"}, + "LicenseModel":{"shape":"String"}, + "DBName":{"shape":"String"}, + "Engine":{"shape":"String"}, + "Iops":{"shape":"IntegerOptional"}, + "OptionGroupName":{"shape":"String"}, + "Tags":{"shape":"TagList"} + } + }, + "RestoreDBInstanceFromDBSnapshotResult":{ + "type":"structure", + "members":{ + "DBInstance":{"shape":"DBInstance"} + } + }, + "RestoreDBInstanceToPointInTimeMessage":{ + "type":"structure", + "required":[ + "SourceDBInstanceIdentifier", + "TargetDBInstanceIdentifier" + ], + "members":{ + "SourceDBInstanceIdentifier":{"shape":"String"}, + "TargetDBInstanceIdentifier":{"shape":"String"}, + "RestoreTime":{"shape":"TStamp"}, + "UseLatestRestorableTime":{"shape":"Boolean"}, + "DBInstanceClass":{"shape":"String"}, + "Port":{"shape":"IntegerOptional"}, + "AvailabilityZone":{"shape":"String"}, + "DBSubnetGroupName":{"shape":"String"}, + "MultiAZ":{"shape":"BooleanOptional"}, + "PubliclyAccessible":{"shape":"BooleanOptional"}, + "AutoMinorVersionUpgrade":{"shape":"BooleanOptional"}, + "LicenseModel":{"shape":"String"}, + "DBName":{"shape":"String"}, + "Engine":{"shape":"String"}, + "Iops":{"shape":"IntegerOptional"}, + "OptionGroupName":{"shape":"String"}, + "Tags":{"shape":"TagList"} + } + }, + "RestoreDBInstanceToPointInTimeResult":{ + "type":"structure", + "members":{ + "DBInstance":{"shape":"DBInstance"} + } + }, + "RevokeDBSecurityGroupIngressMessage":{ + "type":"structure", + "required":["DBSecurityGroupName"], + "members":{ + "DBSecurityGroupName":{"shape":"String"}, + "CIDRIP":{"shape":"String"}, + "EC2SecurityGroupName":{"shape":"String"}, + "EC2SecurityGroupId":{"shape":"String"}, + "EC2SecurityGroupOwnerId":{"shape":"String"} + } + }, + "RevokeDBSecurityGroupIngressResult":{ + "type":"structure", + "members":{ + "DBSecurityGroup":{"shape":"DBSecurityGroup"} + } + }, + "SNSInvalidTopicFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"SNSInvalidTopic", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "SNSNoAuthorizationFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"SNSNoAuthorization", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "SNSTopicArnNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"SNSTopicArnNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "SnapshotQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"SnapshotQuotaExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "SourceIdsList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"SourceId" + } + }, + "SourceNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"SourceNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "SourceType":{ + "type":"string", + "enum":[ + "db-instance", + "db-parameter-group", + "db-security-group", + "db-snapshot" + ] + }, + "StorageQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"StorageQuotaExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "String":{"type":"string"}, + "Subnet":{ + "type":"structure", + "members":{ + "SubnetIdentifier":{"shape":"String"}, + "SubnetAvailabilityZone":{"shape":"AvailabilityZone"}, + "SubnetStatus":{"shape":"String"} + } + }, + "SubnetAlreadyInUse":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"SubnetAlreadyInUse", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "SubnetIdentifierList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"SubnetIdentifier" + } + }, + "SubnetList":{ + "type":"list", + "member":{ + "shape":"Subnet", + "locationName":"Subnet" + } + }, + "SubscriptionAlreadyExistFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"SubscriptionAlreadyExist", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "SubscriptionCategoryNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"SubscriptionCategoryNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "SubscriptionNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"SubscriptionNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "SupportedCharacterSetsList":{ + "type":"list", + "member":{ + "shape":"CharacterSet", + "locationName":"CharacterSet" + } + }, + "TStamp":{"type":"timestamp"}, + "Tag":{ + "type":"structure", + "members":{ + "Key":{"shape":"String"}, + "Value":{"shape":"String"} + } + }, + "TagList":{ + "type":"list", + "member":{ + "shape":"Tag", + "locationName":"Tag" + } + }, + "TagListMessage":{ + "type":"structure", + "members":{ + "TagList":{"shape":"TagList"} + } + }, + "VpcSecurityGroupIdList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"VpcSecurityGroupId" + } + }, + "VpcSecurityGroupMembership":{ + "type":"structure", + "members":{ + "VpcSecurityGroupId":{"shape":"String"}, + "Status":{"shape":"String"} + } + }, + "VpcSecurityGroupMembershipList":{ + "type":"list", + "member":{ + "shape":"VpcSecurityGroupMembership", + "locationName":"VpcSecurityGroupMembership" + } + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/rds/2013-09-09/docs-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/rds/2013-09-09/docs-2.json new file mode 100644 index 000000000..f0a22a6eb --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/rds/2013-09-09/docs-2.json @@ -0,0 +1,1876 @@ +{ + "version": "2.0", + "service": "Amazon Relational Database Service

    Amazon Relational Database Service (Amazon RDS) is a web service that makes it easier to set up, operate, and scale a relational database in the cloud. It provides cost-efficient, resizable capacity for an industry-standard relational database and manages common database administration tasks, freeing up developers to focus on what makes their applications and businesses unique.

    Amazon RDS gives you access to the capabilities of a MySQL, PostgreSQL, Microsoft SQL Server, or Oracle database server. This means the code, applications, and tools you already use today with your existing databases work with Amazon RDS without modification. Amazon RDS automatically backs up your database and maintains the database software that powers your DB instance. Amazon RDS is flexible: you can scale your database instance's compute resources and storage capacity to meet your application's demand. As with all Amazon Web Services, there are no up-front investments, and you pay only for the resources you use.

    This is an interface reference for Amazon RDS. It contains documentation for a programming or command line interface you can use to manage Amazon RDS. Note that Amazon RDS is asynchronous, which means that some interfaces may require techniques such as polling or callback functions to determine when a command has been applied. In this reference, the parameter descriptions indicate whether a command is applied immediately, on the next instance reboot, or during the maintenance window. For a summary of the Amazon RDS interfaces, go to Available RDS Interfaces.

    ", + "operations": { + "AddSourceIdentifierToSubscription": "

    Adds a source identifier to an existing RDS event notification subscription.

    ", + "AddTagsToResource": "

    Adds metadata tags to an Amazon RDS resource. These tags can also be used with cost allocation reporting to track cost associated with Amazon RDS resources, or used in Condition statement in IAM policy for Amazon RDS.

    For an overview on tagging Amazon RDS resources, see Tagging Amazon RDS Resources.

    ", + "AuthorizeDBSecurityGroupIngress": "

    Enables ingress to a DBSecurityGroup using one of two forms of authorization. First, EC2 or VPC security groups can be added to the DBSecurityGroup if the application using the database is running on EC2 or VPC instances. Second, IP ranges are available if the application accessing your database is running on the Internet. Required parameters for this API are one of CIDR range, EC2SecurityGroupId for VPC, or (EC2SecurityGroupOwnerId and either EC2SecurityGroupName or EC2SecurityGroupId for non-VPC).

    You cannot authorize ingress from an EC2 security group in one Region to an Amazon RDS DB instance in another. You cannot authorize ingress from a VPC security group in one VPC to an Amazon RDS DB instance in another.

    For an overview of CIDR ranges, go to the Wikipedia Tutorial.

    ", + "CopyDBSnapshot": "

    Copies the specified DBSnapshot. The source DBSnapshot must be in the \"available\" state.

    ", + "CreateDBInstance": "

    Creates a new DB instance.

    ", + "CreateDBInstanceReadReplica": "

    Creates a DB instance that acts as a read replica of a source DB instance.

    All read replica DB instances are created as Single-AZ deployments with backups disabled. All other DB instance attributes (including DB security groups and DB parameter groups) are inherited from the source DB instance, except as specified below.

    The source DB instance must have backup retention enabled.

    ", + "CreateDBParameterGroup": "

    Creates a new DB parameter group.

    A DB parameter group is initially created with the default parameters for the database engine used by the DB instance. To provide custom values for any of the parameters, you must modify the group after creating it using ModifyDBParameterGroup. Once you've created a DB parameter group, you need to associate it with your DB instance using ModifyDBInstance. When you associate a new DB parameter group with a running DB instance, you need to reboot the DB Instance for the new DB parameter group and associated settings to take effect.

    After you create a DB parameter group, you should wait at least 5 minutes before creating your first DB instance that uses that DB parameter group as the default parameter group. This allows Amazon RDS to fully complete the create action before the parameter group is used as the default for a new DB instance. This is especially important for parameters that are critical when creating the default database for a DB instance, such as the character set for the default database defined by the character_set_database parameter. You can use the Parameter Groups option of the Amazon RDS console or the DescribeDBParameters command to verify that your DB parameter group has been created or modified.

    ", + "CreateDBSecurityGroup": "

    Creates a new DB security group. DB security groups control access to a DB instance.

    ", + "CreateDBSnapshot": "

    Creates a DBSnapshot. The source DBInstance must be in \"available\" state.

    ", + "CreateDBSubnetGroup": "

    Creates a new DB subnet group. DB subnet groups must contain at least one subnet in at least two AZs in the region.

    ", + "CreateEventSubscription": "

    Creates an RDS event notification subscription. This action requires a topic ARN (Amazon Resource Name) created by either the RDS console, the SNS console, or the SNS API. To obtain an ARN with SNS, you must create a topic in Amazon SNS and subscribe to the topic. The ARN is displayed in the SNS console.

    You can specify the type of source (SourceType) you want to be notified of, provide a list of RDS sources (SourceIds) that triggers the events, and provide a list of event categories (EventCategories) for events you want to be notified of. For example, you can specify SourceType = db-instance, SourceIds = mydbinstance1, mydbinstance2 and EventCategories = Availability, Backup.

    If you specify both the SourceType and SourceIds, such as SourceType = db-instance and SourceIdentifier = myDBInstance1, you will be notified of all the db-instance events for the specified source. If you specify a SourceType but do not specify a SourceIdentifier, you will receive notice of the events for that source type for all your RDS sources. If you do not specify either the SourceType nor the SourceIdentifier, you will be notified of events generated from all RDS sources belonging to your customer account.

    ", + "CreateOptionGroup": "

    Creates a new option group. You can create up to 20 option groups.

    ", + "DeleteDBInstance": "

    The DeleteDBInstance action deletes a previously provisioned DB instance. A successful response from the web service indicates the request was received correctly. When you delete a DB instance, all automated backups for that instance are deleted and cannot be recovered. Manual DB snapshots of the DB instance to be deleted are not deleted.

    If a final DB snapshot is requested the status of the RDS instance will be \"deleting\" until the DB snapshot is created. The API action DescribeDBInstance is used to monitor the status of this operation. The action cannot be canceled or reverted once submitted.

    ", + "DeleteDBParameterGroup": "

    Deletes a specified DBParameterGroup. The DBParameterGroup cannot be associated with any RDS instances to be deleted.

    The specified DB parameter group cannot be associated with any DB instances. ", + "DeleteDBSecurityGroup": "

    Deletes a DB security group.

    The specified DB security group must not be associated with any DB instances.", + "DeleteDBSnapshot": "

    Deletes a DBSnapshot. If the snapshot is being copied, the copy operation is terminated.

    The DBSnapshot must be in the available state to be deleted.", + "DeleteDBSubnetGroup": "

    Deletes a DB subnet group.

    The specified database subnet group must not be associated with any DB instances.", + "DeleteEventSubscription": "

    Deletes an RDS event notification subscription.

    ", + "DeleteOptionGroup": "

    Deletes an existing option group.

    ", + "DescribeDBEngineVersions": "

    Returns a list of the available DB engines.

    ", + "DescribeDBInstances": "

    Returns information about provisioned RDS instances. This API supports pagination.

    ", + "DescribeDBLogFiles": "

    Returns a list of DB log files for the DB instance.

    ", + "DescribeDBParameterGroups": "

    Returns a list of DBParameterGroup descriptions. If a DBParameterGroupName is specified, the list will contain only the description of the specified DB parameter group.

    ", + "DescribeDBParameters": "

    Returns the detailed parameter list for a particular DB parameter group.

    ", + "DescribeDBSecurityGroups": "

    Returns a list of DBSecurityGroup descriptions. If a DBSecurityGroupName is specified, the list will contain only the descriptions of the specified DB security group.

    ", + "DescribeDBSnapshots": "

    Returns information about DB snapshots. This API supports pagination.

    ", + "DescribeDBSubnetGroups": "

    Returns a list of DBSubnetGroup descriptions. If a DBSubnetGroupName is specified, the list will contain only the descriptions of the specified DBSubnetGroup.

    For an overview of CIDR ranges, go to the Wikipedia Tutorial.

    ", + "DescribeEngineDefaultParameters": "

    Returns the default engine and system parameter information for the specified database engine.

    ", + "DescribeEventCategories": "

    Displays a list of categories for all event source types, or, if specified, for a specified source type. You can see a list of the event categories and source types in the Events topic in the Amazon RDS User Guide.

    ", + "DescribeEventSubscriptions": "

    Lists all the subscription descriptions for a customer account. The description for a subscription includes SubscriptionName, SNSTopicARN, CustomerID, SourceType, SourceID, CreationTime, and Status.

    If you specify a SubscriptionName, lists the description for that subscription.

    ", + "DescribeEvents": "

    Returns events related to DB instances, DB security groups, DB snapshots, and DB parameter groups for the past 14 days. Events specific to a particular DB instance, DB security group, database snapshot, or DB parameter group can be obtained by providing the name as a parameter. By default, the past hour of events are returned.

    ", + "DescribeOptionGroupOptions": "

    Describes all available options.

    ", + "DescribeOptionGroups": "

    Describes the available option groups.

    ", + "DescribeOrderableDBInstanceOptions": "

    Returns a list of orderable DB instance options for the specified engine.

    ", + "DescribeReservedDBInstances": "

    Returns information about reserved DB instances for this account, or about a specified reserved DB instance.

    ", + "DescribeReservedDBInstancesOfferings": "

    Lists available reserved DB instance offerings.

    ", + "DownloadDBLogFilePortion": "

    Downloads all or a portion of the specified log file.

    ", + "ListTagsForResource": "

    Lists all tags on an Amazon RDS resource.

    For an overview on tagging an Amazon RDS resource, see Tagging Amazon RDS Resources.

    ", + "ModifyDBInstance": "

    Modify settings for a DB instance. You can change one or more database configuration parameters by specifying these parameters and the new values in the request.

    ", + "ModifyDBParameterGroup": "

    Modifies the parameters of a DB parameter group. To modify more than one parameter, submit a list of the following: ParameterName, ParameterValue, and ApplyMethod. A maximum of 20 parameters can be modified in a single request.

    The apply-immediate method can be used only for dynamic parameters; the pending-reboot method can be used with MySQL, PostgreSQL, and Oracle DB instances for either dynamic or static parameters. For Microsoft SQL Server DB instances, the pending-reboot method can be used only for static parameters.

    After you modify a DB parameter group, you should wait at least 5 minutes before creating your first DB instance that uses that DB parameter group as the default parameter group. This allows Amazon RDS to fully complete the modify action before the parameter group is used as the default for a new DB instance. This is especially important for parameters that are critical when creating the default database for a DB instance, such as the character set for the default database defined by the character_set_database parameter. You can use the Parameter Groups option of the Amazon RDS console or the DescribeDBParameters command to verify that your DB parameter group has been created or modified.

    ", + "ModifyDBSubnetGroup": "

    Modifies an existing DB subnet group. DB subnet groups must contain at least one subnet in at least two AZs in the region.

    ", + "ModifyEventSubscription": "

    Modifies an existing RDS event notification subscription. Note that you cannot modify the source identifiers using this call; to change source identifiers for a subscription, use the AddSourceIdentifierToSubscription and RemoveSourceIdentifierFromSubscription calls.

    You can see a list of the event categories for a given SourceType in the Events topic in the Amazon RDS User Guide or by using the DescribeEventCategories action.

    ", + "ModifyOptionGroup": "

    Modifies an existing option group.

    ", + "PromoteReadReplica": "

    Promotes a read replica DB instance to a standalone DB instance.

    We recommend that you enable automated backups on your read replica before promoting the read replica. This ensures that no backup is taken during the promotion process. Once the instance is promoted to a primary instance, backups are taken based on your backup settings.

    ", + "PurchaseReservedDBInstancesOffering": "

    Purchases a reserved DB instance offering.

    ", + "RebootDBInstance": "

    Rebooting a DB instance restarts the database engine service. A reboot also applies to the DB instance any modifications to the associated DB parameter group that were pending. Rebooting a DB instance results in a momentary outage of the instance, during which the DB instance status is set to rebooting. If the RDS instance is configured for MultiAZ, it is possible that the reboot will be conducted through a failover. An Amazon RDS event is created when the reboot is completed.

    If your DB instance is deployed in multiple Availability Zones, you can force a failover from one AZ to the other during the reboot. You might force a failover to test the availability of your DB instance deployment or to restore operations to the original AZ after a failover occurs.

    The time required to reboot is a function of the specific database engine's crash recovery process. To improve the reboot time, we recommend that you reduce database activities as much as possible during the reboot process to reduce rollback activity for in-transit transactions.

    ", + "RemoveSourceIdentifierFromSubscription": "

    Removes a source identifier from an existing RDS event notification subscription.

    ", + "RemoveTagsFromResource": "

    Removes metadata tags from an Amazon RDS resource.

    For an overview on tagging an Amazon RDS resource, see Tagging Amazon RDS Resources.

    ", + "ResetDBParameterGroup": "

    Modifies the parameters of a DB parameter group to the engine/system default value. To reset specific parameters submit a list of the following: ParameterName and ApplyMethod. To reset the entire DB parameter group, specify the DBParameterGroup name and ResetAllParameters parameters. When resetting the entire group, dynamic parameters are updated immediately and static parameters are set to pending-reboot to take effect on the next DB instance restart or RebootDBInstance request.

    ", + "RestoreDBInstanceFromDBSnapshot": "

    Creates a new DB instance from a DB snapshot. The target database is created from the source database restore point with the same configuration as the original source database, except that the new RDS instance is created with the default security group.

    ", + "RestoreDBInstanceToPointInTime": "

    Restores a DB instance to an arbitrary point-in-time. Users can restore to any point in time before the latestRestorableTime for up to backupRetentionPeriod days. The target database is created from the source database with the same configuration as the original database except that the DB instance is created with the default DB security group.

    ", + "RevokeDBSecurityGroupIngress": "

    Revokes ingress from a DBSecurityGroup for previously authorized IP ranges or EC2 or VPC Security Groups. Required parameters for this API are one of CIDRIP, EC2SecurityGroupId for VPC, or (EC2SecurityGroupOwnerId and either EC2SecurityGroupName or EC2SecurityGroupId).

    " + }, + "shapes": { + "AddSourceIdentifierToSubscriptionMessage": { + "base": "

    ", + "refs": { + } + }, + "AddSourceIdentifierToSubscriptionResult": { + "base": null, + "refs": { + } + }, + "AddTagsToResourceMessage": { + "base": "

    ", + "refs": { + } + }, + "ApplyMethod": { + "base": null, + "refs": { + "Parameter$ApplyMethod": "

    Indicates when to apply parameter updates.

    " + } + }, + "AuthorizationAlreadyExistsFault": { + "base": "

    The specified CIDRIP or EC2 security group is already authorized for the specified DB security group.

    ", + "refs": { + } + }, + "AuthorizationNotFoundFault": { + "base": "

    Specified CIDRIP or EC2 security group is not authorized for the specified DB security group.

    RDS may not also be authorized via IAM to perform necessary actions on your behalf.

    ", + "refs": { + } + }, + "AuthorizationQuotaExceededFault": { + "base": "

    DB security group authorization quota has been reached.

    ", + "refs": { + } + }, + "AuthorizeDBSecurityGroupIngressMessage": { + "base": "

    ", + "refs": { + } + }, + "AuthorizeDBSecurityGroupIngressResult": { + "base": null, + "refs": { + } + }, + "AvailabilityZone": { + "base": "

    Contains Availability Zone information.

    This data type is used as an element in the following data type:

    ", + "refs": { + "AvailabilityZoneList$member": null, + "Subnet$SubnetAvailabilityZone": null + } + }, + "AvailabilityZoneList": { + "base": null, + "refs": { + "OrderableDBInstanceOption$AvailabilityZones": "

    A list of availability zones for the orderable DB instance.

    " + } + }, + "Boolean": { + "base": null, + "refs": { + "AvailabilityZone$ProvisionedIopsCapable": "

    True indicates the availability zone is capable of provisioned IOPs.

    ", + "DBInstance$MultiAZ": "

    Specifies if the DB instance is a Multi-AZ deployment.

    ", + "DBInstance$AutoMinorVersionUpgrade": "

    Indicates that minor version patches are applied automatically.

    ", + "DBInstance$PubliclyAccessible": "

    Specifies the accessibility options for the DB instance. A value of true specifies an Internet-facing instance with a publicly resolvable DNS name, which resolves to a public IP address. A value of false specifies an internal instance with a DNS name that resolves to a private IP address.

    Default: The default behavior varies depending on whether a VPC has been requested or not. The following list shows the default behavior in each case.

    • Default VPC:true
    • VPC:false

    If no DB subnet group has been specified as part of the request and the PubliclyAccessible value has not been set, the DB instance will be publicly accessible. If a specific DB subnet group has been specified as part of the request and the PubliclyAccessible value has not been set, the DB instance will be private.

    ", + "DBInstanceStatusInfo$Normal": "

    Boolean value that is true if the instance is operating normally, or false if the instance is in an error state.

    ", + "DeleteDBInstanceMessage$SkipFinalSnapshot": "

    Determines whether a final DB snapshot is created before the DB instance is deleted. If true is specified, no DBSnapshot is created. If false is specified, a DB snapshot is created before the DB instance is deleted.

    Specify true when deleting a read replica.

    The FinalDBSnapshotIdentifier parameter must be specified if SkipFinalSnapshot is false.

    Default: false

    ", + "DescribeDBEngineVersionsMessage$DefaultOnly": "

    Indicates that only the default version of the specified engine or engine and major version combination is returned.

    ", + "DownloadDBLogFilePortionDetails$AdditionalDataPending": "

    Boolean value that if true, indicates there is more data to be downloaded.

    ", + "EventSubscription$Enabled": "

    A Boolean value indicating if the subscription is enabled. True indicates the subscription is enabled.

    ", + "ModifyDBInstanceMessage$ApplyImmediately": "

    Specifies whether the modifications in this request and any pending modifications are asynchronously applied as soon as possible, regardless of the PreferredMaintenanceWindow setting for the DB instance.

    If this parameter is set to false, changes to the DB instance are applied during the next maintenance window. Some parameter changes can cause an outage and will be applied on the next call to RebootDBInstance, or the next failure reboot. Review the table of parameters in Modifying a DB Instance and Using the Apply Immediately Parameter to see the impact that setting ApplyImmediately to true or false has for each modified parameter and to determine when the changes will be applied.

    Default: false

    ", + "ModifyDBInstanceMessage$AllowMajorVersionUpgrade": "

    Indicates that major version upgrades are allowed. Changing this parameter does not result in an outage and the change is asynchronously applied as soon as possible.

    Constraints: This parameter must be set to true when specifying a value for the EngineVersion parameter that is a different major version than the DB instance's current version.

    ", + "ModifyOptionGroupMessage$ApplyImmediately": "

    Indicates whether the changes should be applied immediately, or during the next maintenance window for each instance associated with the option group.

    ", + "Option$Persistent": "

    Indicate if this option is persistent.

    ", + "Option$Permanent": "

    Indicate if this option is permanent.

    ", + "OptionGroup$AllowsVpcAndNonVpcInstanceMemberships": "

    Indicates whether this option group can be applied to both VPC and non-VPC instances. The value 'true' indicates the option group can be applied to both VPC and non-VPC instances.

    ", + "OptionGroupOption$PortRequired": "

    Specifies whether the option requires a port.

    ", + "OptionGroupOption$Persistent": "

    A persistent option cannot be removed from the option group once the option group is used, but this option can be removed from the db instance while modifying the related data and assigning another option group without this option.

    ", + "OptionGroupOption$Permanent": "

    A permanent option cannot be removed from the option group once the option group is used, and it cannot be removed from the db instance after assigning an option group with this permanent option.

    ", + "OptionGroupOptionSetting$IsModifiable": "

    Boolean value where true indicates that this option group option can be changed from the default value.

    ", + "OptionSetting$IsModifiable": "

    A Boolean value that, when true, indicates the option setting can be modified from the default.

    ", + "OptionSetting$IsCollection": "

    Indicates if the option setting is part of a collection.

    ", + "OrderableDBInstanceOption$MultiAZCapable": "

    Indicates whether this orderable DB instance is multi-AZ capable.

    ", + "OrderableDBInstanceOption$ReadReplicaCapable": "

    Indicates whether this orderable DB instance can have a read replica.

    ", + "OrderableDBInstanceOption$Vpc": "

    Indicates whether this is a VPC orderable DB instance.

    ", + "Parameter$IsModifiable": "

    Indicates whether (true) or not (false) the parameter can be modified. Some parameters have security or operational implications that prevent them from being changed.

    ", + "ReservedDBInstance$MultiAZ": "

    Indicates if the reservation applies to Multi-AZ deployments.

    ", + "ReservedDBInstancesOffering$MultiAZ": "

    Indicates if the offering applies to Multi-AZ deployments.

    ", + "ResetDBParameterGroupMessage$ResetAllParameters": "

    Specifies whether (true) or not (false) to reset all parameters in the DB parameter group to default values.

    Default: true

    ", + "RestoreDBInstanceToPointInTimeMessage$UseLatestRestorableTime": "

    Specifies whether (true) or not (false) the DB instance is restored from the latest backup time.

    Default: false

    Constraints: Cannot be specified if RestoreTime parameter is provided.

    " + } + }, + "BooleanOptional": { + "base": null, + "refs": { + "CreateDBInstanceMessage$MultiAZ": "

    Specifies if the DB instance is a Multi-AZ deployment. You cannot set the AvailabilityZone parameter if the MultiAZ parameter is set to true.

    ", + "CreateDBInstanceMessage$AutoMinorVersionUpgrade": "

    Indicates that minor engine upgrades will be applied automatically to the DB instance during the maintenance window.

    Default: true

    ", + "CreateDBInstanceMessage$PubliclyAccessible": "

    Specifies the accessibility options for the DB instance. A value of true specifies an Internet-facing instance with a publicly resolvable DNS name, which resolves to a public IP address. A value of false specifies an internal instance with a DNS name that resolves to a private IP address.

    Default: The default behavior varies depending on whether a VPC has been requested or not. The following list shows the default behavior in each case.

    • Default VPC:true
    • VPC:false

    If no DB subnet group has been specified as part of the request and the PubliclyAccessible value has not been set, the DB instance will be publicly accessible. If a specific DB subnet group has been specified as part of the request and the PubliclyAccessible value has not been set, the DB instance will be private.

    ", + "CreateDBInstanceReadReplicaMessage$AutoMinorVersionUpgrade": "

    Indicates that minor engine upgrades will be applied automatically to the read replica during the maintenance window.

    Default: Inherits from the source DB instance

    ", + "CreateDBInstanceReadReplicaMessage$PubliclyAccessible": "

    Specifies the accessibility options for the DB instance. A value of true specifies an Internet-facing instance with a publicly resolvable DNS name, which resolves to a public IP address. A value of false specifies an internal instance with a DNS name that resolves to a private IP address.

    Default: The default behavior varies depending on whether a VPC has been requested or not. The following list shows the default behavior in each case.

    • Default VPC:true
    • VPC:false

    If no DB subnet group has been specified as part of the request and the PubliclyAccessible value has not been set, the DB instance will be publicly accessible. If a specific DB subnet group has been specified as part of the request and the PubliclyAccessible value has not been set, the DB instance will be private.

    ", + "CreateEventSubscriptionMessage$Enabled": "

    A Boolean value; set to true to activate the subscription, set to false to create the subscription but not active it.

    ", + "DescribeDBEngineVersionsMessage$ListSupportedCharacterSets": "

    If this parameter is specified, and if the requested engine supports the CharacterSetName parameter for CreateDBInstance, the response includes a list of supported character sets for each engine version.

    ", + "DescribeOrderableDBInstanceOptionsMessage$Vpc": "

    The VPC filter value. Specify this parameter to show only the available VPC or non-VPC offerings.

    ", + "DescribeReservedDBInstancesMessage$MultiAZ": "

    The Multi-AZ filter value. Specify this parameter to show only those reservations matching the specified Multi-AZ parameter.

    ", + "DescribeReservedDBInstancesOfferingsMessage$MultiAZ": "

    The Multi-AZ filter value. Specify this parameter to show only the available offerings matching the specified Multi-AZ parameter.

    ", + "ModifyDBInstanceMessage$MultiAZ": "

    Specifies if the DB instance is a Multi-AZ deployment. Changing this parameter does not result in an outage and the change is applied during the next maintenance window unless the ApplyImmediately parameter is set to true for this request.

    Constraints: Cannot be specified if the DB instance is a read replica.

    ", + "ModifyDBInstanceMessage$AutoMinorVersionUpgrade": "

    Indicates that minor version upgrades will be applied automatically to the DB instance during the maintenance window. Changing this parameter does not result in an outage except in the following case and the change is asynchronously applied as soon as possible. An outage will result if this parameter is set to true during the maintenance window, and a newer minor version is available, and RDS has enabled auto patching for that engine version.

    ", + "ModifyEventSubscriptionMessage$Enabled": "

    A Boolean value; set to true to activate the subscription.

    ", + "PendingModifiedValues$MultiAZ": "

    Indicates that the Single-AZ DB instance is to change to a Multi-AZ deployment.

    ", + "RebootDBInstanceMessage$ForceFailover": "

    When true, the reboot will be conducted through a MultiAZ failover.

    Constraint: You cannot specify true if the instance is not configured for MultiAZ.

    ", + "RestoreDBInstanceFromDBSnapshotMessage$MultiAZ": "

    Specifies if the DB instance is a Multi-AZ deployment.

    Constraint: You cannot specify the AvailabilityZone parameter if the MultiAZ parameter is set to true.

    ", + "RestoreDBInstanceFromDBSnapshotMessage$PubliclyAccessible": "

    Specifies the accessibility options for the DB instance. A value of true specifies an Internet-facing instance with a publicly resolvable DNS name, which resolves to a public IP address. A value of false specifies an internal instance with a DNS name that resolves to a private IP address.

    Default: The default behavior varies depending on whether a VPC has been requested or not. The following list shows the default behavior in each case.

    • Default VPC:true
    • VPC:false

    If no DB subnet group has been specified as part of the request and the PubliclyAccessible value has not been set, the DB instance will be publicly accessible. If a specific DB subnet group has been specified as part of the request and the PubliclyAccessible value has not been set, the DB instance will be private.

    ", + "RestoreDBInstanceFromDBSnapshotMessage$AutoMinorVersionUpgrade": "

    Indicates that minor version upgrades will be applied automatically to the DB instance during the maintenance window.

    ", + "RestoreDBInstanceToPointInTimeMessage$MultiAZ": "

    Specifies if the DB instance is a Multi-AZ deployment.

    Constraint: You cannot specify the AvailabilityZone parameter if the MultiAZ parameter is set to true.

    ", + "RestoreDBInstanceToPointInTimeMessage$PubliclyAccessible": "

    Specifies the accessibility options for the DB instance. A value of true specifies an Internet-facing instance with a publicly resolvable DNS name, which resolves to a public IP address. A value of false specifies an internal instance with a DNS name that resolves to a private IP address.

    Default: The default behavior varies depending on whether a VPC has been requested or not. The following list shows the default behavior in each case.

    • Default VPC:true
    • VPC:false

    If no DB subnet group has been specified as part of the request and the PubliclyAccessible value has not been set, the DB instance will be publicly accessible. If a specific DB subnet group has been specified as part of the request and the PubliclyAccessible value has not been set, the DB instance will be private.

    ", + "RestoreDBInstanceToPointInTimeMessage$AutoMinorVersionUpgrade": "

    Indicates that minor version upgrades will be applied automatically to the DB instance during the maintenance window.

    " + } + }, + "CharacterSet": { + "base": "

    This data type is used as a response element in the action DescribeDBEngineVersions.

    ", + "refs": { + "DBEngineVersion$DefaultCharacterSet": "

    The default character set for new instances of this engine version, if the CharacterSetName parameter of the CreateDBInstance API is not specified.

    ", + "SupportedCharacterSetsList$member": null + } + }, + "CopyDBSnapshotMessage": { + "base": "

    ", + "refs": { + } + }, + "CopyDBSnapshotResult": { + "base": null, + "refs": { + } + }, + "CreateDBInstanceMessage": { + "base": "

    ", + "refs": { + } + }, + "CreateDBInstanceReadReplicaMessage": { + "base": null, + "refs": { + } + }, + "CreateDBInstanceReadReplicaResult": { + "base": null, + "refs": { + } + }, + "CreateDBInstanceResult": { + "base": null, + "refs": { + } + }, + "CreateDBParameterGroupMessage": { + "base": "

    ", + "refs": { + } + }, + "CreateDBParameterGroupResult": { + "base": null, + "refs": { + } + }, + "CreateDBSecurityGroupMessage": { + "base": "

    ", + "refs": { + } + }, + "CreateDBSecurityGroupResult": { + "base": null, + "refs": { + } + }, + "CreateDBSnapshotMessage": { + "base": "

    ", + "refs": { + } + }, + "CreateDBSnapshotResult": { + "base": null, + "refs": { + } + }, + "CreateDBSubnetGroupMessage": { + "base": "

    ", + "refs": { + } + }, + "CreateDBSubnetGroupResult": { + "base": null, + "refs": { + } + }, + "CreateEventSubscriptionMessage": { + "base": "

    ", + "refs": { + } + }, + "CreateEventSubscriptionResult": { + "base": null, + "refs": { + } + }, + "CreateOptionGroupMessage": { + "base": "

    ", + "refs": { + } + }, + "CreateOptionGroupResult": { + "base": null, + "refs": { + } + }, + "DBEngineVersion": { + "base": "

    This data type is used as a response element in the action DescribeDBEngineVersions.

    ", + "refs": { + "DBEngineVersionList$member": null + } + }, + "DBEngineVersionList": { + "base": null, + "refs": { + "DBEngineVersionMessage$DBEngineVersions": "

    A list of DBEngineVersion elements.

    " + } + }, + "DBEngineVersionMessage": { + "base": "

    Contains the result of a successful invocation of the DescribeDBEngineVersions action.

    ", + "refs": { + } + }, + "DBInstance": { + "base": "

    Contains the result of a successful invocation of the following actions:

    This data type is used as a response element in the DescribeDBInstances action.

    ", + "refs": { + "CreateDBInstanceReadReplicaResult$DBInstance": null, + "CreateDBInstanceResult$DBInstance": null, + "DBInstanceList$member": null, + "DeleteDBInstanceResult$DBInstance": null, + "ModifyDBInstanceResult$DBInstance": null, + "PromoteReadReplicaResult$DBInstance": null, + "RebootDBInstanceResult$DBInstance": null, + "RestoreDBInstanceFromDBSnapshotResult$DBInstance": null, + "RestoreDBInstanceToPointInTimeResult$DBInstance": null + } + }, + "DBInstanceAlreadyExistsFault": { + "base": "

    User already has a DB instance with the given identifier.

    ", + "refs": { + } + }, + "DBInstanceList": { + "base": null, + "refs": { + "DBInstanceMessage$DBInstances": "

    A list of DBInstance instances.

    " + } + }, + "DBInstanceMessage": { + "base": "

    Contains the result of a successful invocation of the DescribeDBInstances action.

    ", + "refs": { + } + }, + "DBInstanceNotFoundFault": { + "base": "

    DBInstanceIdentifier does not refer to an existing DB instance.

    ", + "refs": { + } + }, + "DBInstanceStatusInfo": { + "base": "

    Provides a list of status information for a DB instance.

    ", + "refs": { + "DBInstanceStatusInfoList$member": null + } + }, + "DBInstanceStatusInfoList": { + "base": null, + "refs": { + "DBInstance$StatusInfos": "

    The status of a read replica. If the instance is not a read replica, this will be blank.

    " + } + }, + "DBLogFileNotFoundFault": { + "base": "

    LogFileName does not refer to an existing DB log file.

    ", + "refs": { + } + }, + "DBParameterGroup": { + "base": "

    Contains the result of a successful invocation of the CreateDBParameterGroup action.

    This data type is used as a request parameter in the DeleteDBParameterGroup action, and as a response element in the DescribeDBParameterGroups action.

    ", + "refs": { + "CreateDBParameterGroupResult$DBParameterGroup": null, + "DBParameterGroupList$member": null + } + }, + "DBParameterGroupAlreadyExistsFault": { + "base": "

    A DB parameter group with the same name exists.

    ", + "refs": { + } + }, + "DBParameterGroupDetails": { + "base": "

    Contains the result of a successful invocation of the DescribeDBParameters action.

    ", + "refs": { + } + }, + "DBParameterGroupList": { + "base": null, + "refs": { + "DBParameterGroupsMessage$DBParameterGroups": "

    A list of DBParameterGroup instances.

    " + } + }, + "DBParameterGroupNameMessage": { + "base": "

    Contains the result of a successful invocation of the ModifyDBParameterGroup or ResetDBParameterGroup action.

    ", + "refs": { + } + }, + "DBParameterGroupNotFoundFault": { + "base": "

    DBParameterGroupName does not refer to an existing DB parameter group.

    ", + "refs": { + } + }, + "DBParameterGroupQuotaExceededFault": { + "base": "

    Request would result in user exceeding the allowed number of DB parameter groups.

    ", + "refs": { + } + }, + "DBParameterGroupStatus": { + "base": "

    The status of the DB parameter group.

    This data type is used as a response element in the following actions:

    ", + "refs": { + "DBParameterGroupStatusList$member": null + } + }, + "DBParameterGroupStatusList": { + "base": null, + "refs": { + "DBInstance$DBParameterGroups": "

    Provides the list of DB parameter groups applied to this DB instance.

    " + } + }, + "DBParameterGroupsMessage": { + "base": "

    Contains the result of a successful invocation of the DescribeDBParameterGroups action.

    ", + "refs": { + } + }, + "DBSecurityGroup": { + "base": "

    Contains the result of a successful invocation of the following actions:

    This data type is used as a response element in the DescribeDBSecurityGroups action.

    ", + "refs": { + "AuthorizeDBSecurityGroupIngressResult$DBSecurityGroup": null, + "CreateDBSecurityGroupResult$DBSecurityGroup": null, + "DBSecurityGroups$member": null, + "RevokeDBSecurityGroupIngressResult$DBSecurityGroup": null + } + }, + "DBSecurityGroupAlreadyExistsFault": { + "base": "

    A DB security group with the name specified in DBSecurityGroupName already exists.

    ", + "refs": { + } + }, + "DBSecurityGroupMembership": { + "base": "

    This data type is used as a response element in the following actions:

    ", + "refs": { + "DBSecurityGroupMembershipList$member": null + } + }, + "DBSecurityGroupMembershipList": { + "base": null, + "refs": { + "DBInstance$DBSecurityGroups": "

    Provides List of DB security group elements containing only DBSecurityGroup.Name and DBSecurityGroup.Status subelements.

    ", + "Option$DBSecurityGroupMemberships": "

    If the option requires access to a port, then this DB security group allows access to the port.

    " + } + }, + "DBSecurityGroupMessage": { + "base": "

    Contains the result of a successful invocation of the DescribeDBSecurityGroups action.

    ", + "refs": { + } + }, + "DBSecurityGroupNameList": { + "base": null, + "refs": { + "CreateDBInstanceMessage$DBSecurityGroups": "

    A list of DB security groups to associate with this DB instance.

    Default: The default DB security group for the database engine.

    ", + "ModifyDBInstanceMessage$DBSecurityGroups": "

    A list of DB security groups to authorize on this DB instance. Changing this parameter does not result in an outage and the change is asynchronously applied as soon as possible.

    Constraints:

    • Must be 1 to 255 alphanumeric characters
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "OptionConfiguration$DBSecurityGroupMemberships": "

    A list of DBSecurityGroupMemebrship name strings used for this option.

    " + } + }, + "DBSecurityGroupNotFoundFault": { + "base": "

    DBSecurityGroupName does not refer to an existing DB security group.

    ", + "refs": { + } + }, + "DBSecurityGroupNotSupportedFault": { + "base": "

    A DB security group is not allowed for this action.

    ", + "refs": { + } + }, + "DBSecurityGroupQuotaExceededFault": { + "base": "

    Request would result in user exceeding the allowed number of DB security groups.

    ", + "refs": { + } + }, + "DBSecurityGroups": { + "base": null, + "refs": { + "DBSecurityGroupMessage$DBSecurityGroups": "

    A list of DBSecurityGroup instances.

    " + } + }, + "DBSnapshot": { + "base": "

    Contains the result of a successful invocation of the following actions:

    This data type is used as a response element in the DescribeDBSnapshots action.

    ", + "refs": { + "CopyDBSnapshotResult$DBSnapshot": null, + "CreateDBSnapshotResult$DBSnapshot": null, + "DBSnapshotList$member": null, + "DeleteDBSnapshotResult$DBSnapshot": null + } + }, + "DBSnapshotAlreadyExistsFault": { + "base": "

    DBSnapshotIdentifier is already used by an existing snapshot.

    ", + "refs": { + } + }, + "DBSnapshotList": { + "base": null, + "refs": { + "DBSnapshotMessage$DBSnapshots": "

    A list of DBSnapshot instances.

    " + } + }, + "DBSnapshotMessage": { + "base": "

    Contains the result of a successful invocation of the DescribeDBSnapshots action.

    ", + "refs": { + } + }, + "DBSnapshotNotFoundFault": { + "base": "

    DBSnapshotIdentifier does not refer to an existing DB snapshot.

    ", + "refs": { + } + }, + "DBSubnetGroup": { + "base": "

    Contains the result of a successful invocation of the following actions:

    This data type is used as a response element in the DescribeDBSubnetGroups action.

    ", + "refs": { + "CreateDBSubnetGroupResult$DBSubnetGroup": null, + "DBInstance$DBSubnetGroup": "

    Specifies information on the subnet group associated with the DB instance, including the name, description, and subnets in the subnet group.

    ", + "DBSubnetGroups$member": null, + "ModifyDBSubnetGroupResult$DBSubnetGroup": null + } + }, + "DBSubnetGroupAlreadyExistsFault": { + "base": "

    DBSubnetGroupName is already used by an existing DB subnet group.

    ", + "refs": { + } + }, + "DBSubnetGroupDoesNotCoverEnoughAZs": { + "base": "

    Subnets in the DB subnet group should cover at least two Availability Zones unless there is only one Availability Zone.

    ", + "refs": { + } + }, + "DBSubnetGroupMessage": { + "base": "

    Contains the result of a successful invocation of the DescribeDBSubnetGroups action.

    ", + "refs": { + } + }, + "DBSubnetGroupNotAllowedFault": { + "base": "

    Indicates that the DBSubnetGroup should not be specified while creating read replicas that lie in the same region as the source instance.

    ", + "refs": { + } + }, + "DBSubnetGroupNotFoundFault": { + "base": "

    DBSubnetGroupName does not refer to an existing DB subnet group.

    ", + "refs": { + } + }, + "DBSubnetGroupQuotaExceededFault": { + "base": "

    Request would result in user exceeding the allowed number of DB subnet groups.

    ", + "refs": { + } + }, + "DBSubnetGroups": { + "base": null, + "refs": { + "DBSubnetGroupMessage$DBSubnetGroups": "

    A list of DBSubnetGroup instances.

    " + } + }, + "DBSubnetQuotaExceededFault": { + "base": "

    Request would result in user exceeding the allowed number of subnets in a DB subnet groups.

    ", + "refs": { + } + }, + "DBUpgradeDependencyFailureFault": { + "base": "

    The DB upgrade failed because a resource the DB depends on could not be modified.

    ", + "refs": { + } + }, + "DeleteDBInstanceMessage": { + "base": "

    ", + "refs": { + } + }, + "DeleteDBInstanceResult": { + "base": null, + "refs": { + } + }, + "DeleteDBParameterGroupMessage": { + "base": "

    ", + "refs": { + } + }, + "DeleteDBSecurityGroupMessage": { + "base": "

    ", + "refs": { + } + }, + "DeleteDBSnapshotMessage": { + "base": "

    ", + "refs": { + } + }, + "DeleteDBSnapshotResult": { + "base": null, + "refs": { + } + }, + "DeleteDBSubnetGroupMessage": { + "base": "

    ", + "refs": { + } + }, + "DeleteEventSubscriptionMessage": { + "base": "

    ", + "refs": { + } + }, + "DeleteEventSubscriptionResult": { + "base": null, + "refs": { + } + }, + "DeleteOptionGroupMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeDBEngineVersionsMessage": { + "base": null, + "refs": { + } + }, + "DescribeDBInstancesMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeDBLogFilesDetails": { + "base": "

    This data type is used as a response element to DescribeDBLogFiles.

    ", + "refs": { + "DescribeDBLogFilesList$member": null + } + }, + "DescribeDBLogFilesList": { + "base": null, + "refs": { + "DescribeDBLogFilesResponse$DescribeDBLogFiles": "

    The DB log files returned.

    " + } + }, + "DescribeDBLogFilesMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeDBLogFilesResponse": { + "base": "

    The response from a call to DescribeDBLogFiles.

    ", + "refs": { + } + }, + "DescribeDBParameterGroupsMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeDBParametersMessage": { + "base": null, + "refs": { + } + }, + "DescribeDBSecurityGroupsMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeDBSnapshotsMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeDBSubnetGroupsMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeEngineDefaultParametersMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeEngineDefaultParametersResult": { + "base": null, + "refs": { + } + }, + "DescribeEventCategoriesMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeEventSubscriptionsMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeEventsMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeOptionGroupOptionsMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeOptionGroupsMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeOrderableDBInstanceOptionsMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeReservedDBInstancesMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeReservedDBInstancesOfferingsMessage": { + "base": "

    ", + "refs": { + } + }, + "Double": { + "base": null, + "refs": { + "RecurringCharge$RecurringChargeAmount": "

    The amount of the recurring charge.

    ", + "ReservedDBInstance$FixedPrice": "

    The fixed price charged for this reserved DB instance.

    ", + "ReservedDBInstance$UsagePrice": "

    The hourly price charged for this reserved DB instance.

    ", + "ReservedDBInstancesOffering$FixedPrice": "

    The fixed price charged for this offering.

    ", + "ReservedDBInstancesOffering$UsagePrice": "

    The hourly price charged for this offering.

    " + } + }, + "DownloadDBLogFilePortionDetails": { + "base": "

    This data type is used as a response element to DownloadDBLogFilePortion.

    ", + "refs": { + } + }, + "DownloadDBLogFilePortionMessage": { + "base": "

    ", + "refs": { + } + }, + "EC2SecurityGroup": { + "base": "

    This data type is used as a response element in the following actions:

    ", + "refs": { + "EC2SecurityGroupList$member": null + } + }, + "EC2SecurityGroupList": { + "base": null, + "refs": { + "DBSecurityGroup$EC2SecurityGroups": "

    Contains a list of EC2SecurityGroup elements.

    " + } + }, + "Endpoint": { + "base": "

    This data type is used as a response element in the following actions:

    ", + "refs": { + "DBInstance$Endpoint": "

    Specifies the connection endpoint.

    " + } + }, + "EngineDefaults": { + "base": "

    Contains the result of a successful invocation of the DescribeEngineDefaultParameters action.

    ", + "refs": { + "DescribeEngineDefaultParametersResult$EngineDefaults": null + } + }, + "Event": { + "base": "

    This data type is used as a response element in the DescribeEvents action.

    ", + "refs": { + "EventList$member": null + } + }, + "EventCategoriesList": { + "base": null, + "refs": { + "CreateEventSubscriptionMessage$EventCategories": "

    A list of event categories for a SourceType that you want to subscribe to. You can see a list of the categories for a given SourceType in the Events topic in the Amazon RDS User Guide or by using the DescribeEventCategories action.

    ", + "DescribeEventsMessage$EventCategories": "

    A list of event categories that trigger notifications for a event notification subscription.

    ", + "Event$EventCategories": "

    Specifies the category for the event.

    ", + "EventCategoriesMap$EventCategories": "

    The event categories for the specified source type

    ", + "EventSubscription$EventCategoriesList": "

    A list of event categories for the RDS event notification subscription.

    ", + "ModifyEventSubscriptionMessage$EventCategories": "

    A list of event categories for a SourceType that you want to subscribe to. You can see a list of the categories for a given SourceType in the Events topic in the Amazon RDS User Guide or by using the DescribeEventCategories action.

    " + } + }, + "EventCategoriesMap": { + "base": "

    Contains the results of a successful invocation of the DescribeEventCategories action.

    ", + "refs": { + "EventCategoriesMapList$member": null + } + }, + "EventCategoriesMapList": { + "base": null, + "refs": { + "EventCategoriesMessage$EventCategoriesMapList": "

    A list of EventCategoriesMap data types.

    " + } + }, + "EventCategoriesMessage": { + "base": "

    Data returned from the DescribeEventCategories action.

    ", + "refs": { + } + }, + "EventList": { + "base": null, + "refs": { + "EventsMessage$Events": "

    A list of Event instances.

    " + } + }, + "EventSubscription": { + "base": "

    Contains the results of a successful invocation of the DescribeEventSubscriptions action.

    ", + "refs": { + "AddSourceIdentifierToSubscriptionResult$EventSubscription": null, + "CreateEventSubscriptionResult$EventSubscription": null, + "DeleteEventSubscriptionResult$EventSubscription": null, + "EventSubscriptionsList$member": null, + "ModifyEventSubscriptionResult$EventSubscription": null, + "RemoveSourceIdentifierFromSubscriptionResult$EventSubscription": null + } + }, + "EventSubscriptionQuotaExceededFault": { + "base": "

    You have reached the maximum number of event subscriptions.

    ", + "refs": { + } + }, + "EventSubscriptionsList": { + "base": null, + "refs": { + "EventSubscriptionsMessage$EventSubscriptionsList": "

    A list of EventSubscriptions data types.

    " + } + }, + "EventSubscriptionsMessage": { + "base": "

    Data returned by the DescribeEventSubscriptions action.

    ", + "refs": { + } + }, + "EventsMessage": { + "base": "

    Contains the result of a successful invocation of the DescribeEvents action.

    ", + "refs": { + } + }, + "Filter": { + "base": null, + "refs": { + "FilterList$member": null + } + }, + "FilterList": { + "base": null, + "refs": { + "DescribeDBEngineVersionsMessage$Filters": "

    Not currently supported.

    ", + "DescribeDBInstancesMessage$Filters": "

    This parameter is not currently supported.

    ", + "DescribeDBLogFilesMessage$Filters": "

    This parameter is not currently supported.

    ", + "DescribeDBParameterGroupsMessage$Filters": "

    This parameter is not currently supported.

    ", + "DescribeDBParametersMessage$Filters": "

    This parameter is not currently supported.

    ", + "DescribeDBSecurityGroupsMessage$Filters": "

    This parameter is not currently supported.

    ", + "DescribeDBSnapshotsMessage$Filters": "

    This parameter is not currently supported.

    ", + "DescribeDBSubnetGroupsMessage$Filters": "

    This parameter is not currently supported.

    ", + "DescribeEngineDefaultParametersMessage$Filters": "

    Not currently supported.

    ", + "DescribeEventCategoriesMessage$Filters": "

    This parameter is not currently supported.

    ", + "DescribeEventSubscriptionsMessage$Filters": "

    This parameter is not currently supported.

    ", + "DescribeEventsMessage$Filters": "

    This parameter is not currently supported.

    ", + "DescribeOptionGroupOptionsMessage$Filters": "

    This parameter is not currently supported.

    ", + "DescribeOptionGroupsMessage$Filters": "

    This parameter is not currently supported.

    ", + "DescribeOrderableDBInstanceOptionsMessage$Filters": "

    This parameter is not currently supported.

    ", + "DescribeReservedDBInstancesMessage$Filters": "

    This parameter is not currently supported.

    ", + "DescribeReservedDBInstancesOfferingsMessage$Filters": "

    This parameter is not currently supported.

    ", + "ListTagsForResourceMessage$Filters": "

    This parameter is not currently supported.

    " + } + }, + "FilterValueList": { + "base": null, + "refs": { + "Filter$Values": "

    This parameter is not currently supported.

    " + } + }, + "IPRange": { + "base": "

    This data type is used as a response element in the DescribeDBSecurityGroups action.

    ", + "refs": { + "IPRangeList$member": null + } + }, + "IPRangeList": { + "base": null, + "refs": { + "DBSecurityGroup$IPRanges": "

    Contains a list of IPRange elements.

    " + } + }, + "InstanceQuotaExceededFault": { + "base": "

    Request would result in user exceeding the allowed number of DB instances.

    ", + "refs": { + } + }, + "InsufficientDBInstanceCapacityFault": { + "base": "

    Specified DB instance class is not available in the specified Availability Zone.

    ", + "refs": { + } + }, + "Integer": { + "base": null, + "refs": { + "DBInstance$AllocatedStorage": "

    Specifies the allocated storage size specified in gigabytes.

    ", + "DBInstance$BackupRetentionPeriod": "

    Specifies the number of days for which automatic DB snapshots are retained.

    ", + "DBSnapshot$AllocatedStorage": "

    Specifies the allocated storage size in gigabytes (GB).

    ", + "DBSnapshot$Port": "

    Specifies the port that the database engine was listening on at the time of the snapshot.

    ", + "DBSnapshot$PercentProgress": "

    The percentage of the estimated data that has been transferred.

    ", + "DownloadDBLogFilePortionMessage$NumberOfLines": "

    The number of lines to download.

    If the NumberOfLines parameter is specified, then the block of lines returned can be from the beginning or the end of the log file, depending on the value of the Marker parameter.

    • If neither Marker or NumberOfLines are specified, the entire log file is returned.

    • If NumberOfLines is specified and Marker is not specified, then the most recent lines from the end of the log file are returned.

    • If Marker is specified as \"0\", then the specified number of lines from the beginning of the log file are returned.

    • You can download the log file in blocks of lines by specifying the size of the block using the NumberOfLines parameter, and by specifying a value of \"0\" for the Marker parameter in your first request. Include the Marker value returned in the response as the Marker value for the next request, continuing until the AdditionalDataPending response element returns false.

    ", + "Endpoint$Port": "

    Specifies the port that the database engine is listening on.

    ", + "ReservedDBInstance$Duration": "

    The duration of the reservation in seconds.

    ", + "ReservedDBInstance$DBInstanceCount": "

    The number of reserved DB instances.

    ", + "ReservedDBInstancesOffering$Duration": "

    The duration of the offering in seconds.

    " + } + }, + "IntegerOptional": { + "base": null, + "refs": { + "CreateDBInstanceMessage$AllocatedStorage": "

    The amount of storage (in gigabytes) to be initially allocated for the database instance.

    Type: Integer

    MySQL

    Constraints: Must be an integer from 5 to 3072.

    PostgreSQL

    Constraints: Must be an integer from 5 to 3072.

    Oracle

    Constraints: Must be an integer from 10 to 3072.

    SQL Server

    Constraints: Must be an integer from 200 to 1024 (Standard Edition and Enterprise Edition) or from 30 to 1024 (Express Edition and Web Edition)

    ", + "CreateDBInstanceMessage$BackupRetentionPeriod": "

    The number of days for which automated backups are retained. Setting this parameter to a positive number enables backups. Setting this parameter to 0 disables automated backups.

    Default: 1

    Constraints:

    • Must be a value from 0 to 35
    • Cannot be set to 0 if the DB instance is a source to read replicas
    ", + "CreateDBInstanceMessage$Port": "

    The port number on which the database accepts connections.

    MySQL

    Default: 3306

    Valid Values: 1150-65535

    Type: Integer

    PostgreSQL

    Default: 5432

    Valid Values: 1150-65535

    Type: Integer

    Oracle

    Default: 1521

    Valid Values: 1150-65535

    SQL Server

    Default: 1433

    Valid Values: 1150-65535 except for 1434, 3389, 47001, 49152, and 49152 through 49156.

    ", + "CreateDBInstanceMessage$Iops": "

    The amount of Provisioned IOPS (input/output operations per second) to be initially allocated for the DB instance.

    Constraints: To use PIOPS, this value must be an integer greater than 1000.

    ", + "CreateDBInstanceReadReplicaMessage$Port": "

    The port number that the DB instance uses for connections.

    Default: Inherits from the source DB instance

    Valid Values: 1150-65535

    ", + "CreateDBInstanceReadReplicaMessage$Iops": "

    The amount of Provisioned IOPS (input/output operations per second) to be initially allocated for the DB instance.

    ", + "DBInstance$Iops": "

    Specifies the Provisioned IOPS (I/O operations per second) value.

    ", + "DBSnapshot$Iops": "

    Specifies the Provisioned IOPS (I/O operations per second) value of the DB instance at the time of the snapshot.

    ", + "DescribeDBEngineVersionsMessage$MaxRecords": "

    The maximum number of records to include in the response. If more than the MaxRecords value is available, a pagination token called a marker is included in the response so that the following results can be retrieved.

    Default: 100

    Constraints: minimum 20, maximum 100

    ", + "DescribeDBInstancesMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results may be retrieved.

    Default: 100

    Constraints: minimum 20, maximum 100

    ", + "DescribeDBLogFilesMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved.

    ", + "DescribeDBParameterGroupsMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results may be retrieved.

    Default: 100

    Constraints: minimum 20, maximum 100

    ", + "DescribeDBParametersMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results may be retrieved.

    Default: 100

    Constraints: minimum 20, maximum 100

    ", + "DescribeDBSecurityGroupsMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results may be retrieved.

    Default: 100

    Constraints: minimum 20, maximum 100

    ", + "DescribeDBSnapshotsMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results may be retrieved.

    Default: 100

    Constraints: minimum 20, maximum 100

    ", + "DescribeDBSubnetGroupsMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results may be retrieved.

    Default: 100

    Constraints: minimum 20, maximum 100

    ", + "DescribeEngineDefaultParametersMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results may be retrieved.

    Default: 100

    Constraints: minimum 20, maximum 100

    ", + "DescribeEventSubscriptionsMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved.

    Default: 100

    Constraints: minimum 20, maximum 100

    ", + "DescribeEventsMessage$Duration": "

    The number of minutes to retrieve events for.

    Default: 60

    ", + "DescribeEventsMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results may be retrieved.

    Default: 100

    Constraints: minimum 20, maximum 100

    ", + "DescribeOptionGroupOptionsMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved.

    Default: 100

    Constraints: minimum 20, maximum 100

    ", + "DescribeOptionGroupsMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved.

    Default: 100

    Constraints: minimum 20, maximum 100

    ", + "DescribeOrderableDBInstanceOptionsMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved.

    Default: 100

    Constraints: minimum 20, maximum 100

    ", + "DescribeReservedDBInstancesMessage$MaxRecords": "

    The maximum number of records to include in the response. If more than the MaxRecords value is available, a pagination token called a marker is included in the response so that the following results can be retrieved.

    Default: 100

    Constraints: minimum 20, maximum 100

    ", + "DescribeReservedDBInstancesOfferingsMessage$MaxRecords": "

    The maximum number of records to include in the response. If more than the MaxRecords value is available, a pagination token called a marker is included in the response so that the following results can be retrieved.

    Default: 100

    Constraints: minimum 20, maximum 100

    ", + "ModifyDBInstanceMessage$AllocatedStorage": "

    The new storage capacity of the RDS instance. Changing this parameter does not result in an outage and the change is applied during the next maintenance window unless the ApplyImmediately parameter is set to true for this request.

    MySQL

    Default: Uses existing setting

    Valid Values: 5-3072

    Constraints: Value supplied must be at least 10% greater than the current value. Values that are not at least 10% greater than the existing value are rounded up so that they are 10% greater than the current value.

    Type: Integer

    PostgreSQL

    Default: Uses existing setting

    Valid Values: 5-3072

    Constraints: Value supplied must be at least 10% greater than the current value. Values that are not at least 10% greater than the existing value are rounded up so that they are 10% greater than the current value.

    Type: Integer

    Oracle

    Default: Uses existing setting

    Valid Values: 10-3072

    Constraints: Value supplied must be at least 10% greater than the current value. Values that are not at least 10% greater than the existing value are rounded up so that they are 10% greater than the current value.

    SQL Server

    Cannot be modified.

    If you choose to migrate your DB instance from using standard storage to using Provisioned IOPS, or from using Provisioned IOPS to using standard storage, the process can take time. The duration of the migration depends on several factors such as database load, storage size, storage type (standard or Provisioned IOPS), amount of IOPS provisioned (if any), and the number of prior scale storage operations. Typical migration times are under 24 hours, but the process can take up to several days in some cases. During the migration, the DB instance will be available for use, but may experience performance degradation. While the migration takes place, nightly backups for the instance will be suspended. No other Amazon RDS operations can take place for the instance, including modifying the instance, rebooting the instance, deleting the instance, creating a read replica for the instance, and creating a DB snapshot of the instance.

    ", + "ModifyDBInstanceMessage$BackupRetentionPeriod": "

    The number of days to retain automated backups. Setting this parameter to a positive number enables backups. Setting this parameter to 0 disables automated backups.

    Changing this parameter can result in an outage if you change from 0 to a non-zero value or from a non-zero value to 0. These changes are applied during the next maintenance window unless the ApplyImmediately parameter is set to true for this request. If you change the parameter from one non-zero value to another non-zero value, the change is asynchronously applied as soon as possible.

    Default: Uses existing setting

    Constraints:

    • Must be a value from 0 to 35
    • Can be specified for a read replica only if the source is running MySQL 5.6
    • Cannot be set to 0 if the DB instance is a source to read replicas
    ", + "ModifyDBInstanceMessage$Iops": "

    The new Provisioned IOPS (I/O operations per second) value for the RDS instance. Changing this parameter does not result in an outage and the change is applied during the next maintenance window unless the ApplyImmediately parameter is set to true for this request.

    Default: Uses existing setting

    Constraints: Value supplied must be at least 10% greater than the current value. Values that are not at least 10% greater than the existing value are rounded up so that they are 10% greater than the current value. If you are migrating from Provisioned IOPS to standard storage, set this value to 0.

    SQL Server

    Setting the IOPS value for the SQL Server database engine is not supported.

    Type: Integer

    If you choose to migrate your DB instance from using standard storage to using Provisioned IOPS, or from using Provisioned IOPS to using standard storage, the process can take time. The duration of the migration depends on several factors such as database load, storage size, storage type (standard or Provisioned IOPS), amount of IOPS provisioned (if any), and the number of prior scale storage operations. Typical migration times are under 24 hours, but the process can take up to several days in some cases. During the migration, the DB instance will be available for use, but may experience performance degradation. While the migration takes place, nightly backups for the instance will be suspended. No other Amazon RDS operations can take place for the instance, including modifying the instance, rebooting the instance, deleting the instance, creating a read replica for the instance, and creating a DB snapshot of the instance.

    ", + "Option$Port": "

    If required, the port configured for this option to use.

    ", + "OptionConfiguration$Port": "

    The optional port for the option.

    ", + "OptionGroupOption$DefaultPort": "

    If the option requires a port, specifies the default port for the option.

    ", + "PendingModifiedValues$AllocatedStorage": "

    Contains the new AllocatedStorage size for the DB instance that will be applied or is in progress.

    ", + "PendingModifiedValues$Port": "

    Specifies the pending port for the DB instance.

    ", + "PendingModifiedValues$BackupRetentionPeriod": "

    Specifies the pending number of days for which automated backups are retained.

    ", + "PendingModifiedValues$Iops": "

    Specifies the new Provisioned IOPS value for the DB instance that will be applied or is being applied.

    ", + "PromoteReadReplicaMessage$BackupRetentionPeriod": "

    The number of days to retain automated backups. Setting this parameter to a positive number enables backups. Setting this parameter to 0 disables automated backups.

    Default: 1

    Constraints:

    • Must be a value from 0 to 8
    ", + "PurchaseReservedDBInstancesOfferingMessage$DBInstanceCount": "

    The number of instances to reserve.

    Default: 1

    ", + "RestoreDBInstanceFromDBSnapshotMessage$Port": "

    The port number on which the database accepts connections.

    Default: The same port as the original DB instance

    Constraints: Value must be 1150-65535

    ", + "RestoreDBInstanceFromDBSnapshotMessage$Iops": "

    Specifies the amount of provisioned IOPS for the DB instance, expressed in I/O operations per second. If this parameter is not specified, the IOPS value will be taken from the backup. If this parameter is set to 0, the new instance will be converted to a non-PIOPS instance, which will take additional time, though your DB instance will be available for connections before the conversion starts.

    Constraints: Must be an integer greater than 1000.

    SQL Server

    Setting the IOPS value for the SQL Server database engine is not supported.

    ", + "RestoreDBInstanceToPointInTimeMessage$Port": "

    The port number on which the database accepts connections.

    Constraints: Value must be 1150-65535

    Default: The same port as the original DB instance.

    ", + "RestoreDBInstanceToPointInTimeMessage$Iops": "

    The amount of Provisioned IOPS (input/output operations per second) to be initially allocated for the DB instance.

    Constraints: Must be an integer greater than 1000.

    SQL Server

    Setting the IOPS value for the SQL Server database engine is not supported.

    " + } + }, + "InvalidDBInstanceStateFault": { + "base": "

    The specified DB instance is not in the available state.

    ", + "refs": { + } + }, + "InvalidDBParameterGroupStateFault": { + "base": "

    The DB parameter group cannot be deleted because it is in use.

    ", + "refs": { + } + }, + "InvalidDBSecurityGroupStateFault": { + "base": "

    The state of the DB security group does not allow deletion.

    ", + "refs": { + } + }, + "InvalidDBSnapshotStateFault": { + "base": "

    The state of the DB snapshot does not allow deletion.

    ", + "refs": { + } + }, + "InvalidDBSubnetGroupFault": { + "base": "

    Indicates the DBSubnetGroup does not belong to the same VPC as that of an existing cross region read replica of the same source instance.

    ", + "refs": { + } + }, + "InvalidDBSubnetGroupStateFault": { + "base": "

    The DB subnet group cannot be deleted because it is in use.

    ", + "refs": { + } + }, + "InvalidDBSubnetStateFault": { + "base": "

    The DB subnet is not in the available state.

    ", + "refs": { + } + }, + "InvalidEventSubscriptionStateFault": { + "base": "

    This error can occur if someone else is modifying a subscription. You should retry the action.

    ", + "refs": { + } + }, + "InvalidOptionGroupStateFault": { + "base": "

    The option group is not in the available state.

    ", + "refs": { + } + }, + "InvalidRestoreFault": { + "base": "

    Cannot restore from vpc backup to non-vpc DB instance.

    ", + "refs": { + } + }, + "InvalidSubnet": { + "base": "

    The requested subnet is invalid, or multiple subnets were requested that are not all in a common VPC.

    ", + "refs": { + } + }, + "InvalidVPCNetworkStateFault": { + "base": "

    DB subnet group does not cover all Availability Zones after it is created because users' change.

    ", + "refs": { + } + }, + "KeyList": { + "base": null, + "refs": { + "RemoveTagsFromResourceMessage$TagKeys": "

    The tag key (name) of the tag to be removed.

    " + } + }, + "ListTagsForResourceMessage": { + "base": "

    ", + "refs": { + } + }, + "Long": { + "base": null, + "refs": { + "DescribeDBLogFilesDetails$LastWritten": "

    A POSIX timestamp when the last log entry was written.

    ", + "DescribeDBLogFilesDetails$Size": "

    The size, in bytes, of the log file for the specified DB instance.

    ", + "DescribeDBLogFilesMessage$FileLastWritten": "

    Filters the available log files for files written since the specified date, in POSIX timestamp format.

    ", + "DescribeDBLogFilesMessage$FileSize": "

    Filters the available log files for files larger than the specified size.

    " + } + }, + "ModifyDBInstanceMessage": { + "base": "

    ", + "refs": { + } + }, + "ModifyDBInstanceResult": { + "base": null, + "refs": { + } + }, + "ModifyDBParameterGroupMessage": { + "base": "

    ", + "refs": { + } + }, + "ModifyDBSubnetGroupMessage": { + "base": "

    ", + "refs": { + } + }, + "ModifyDBSubnetGroupResult": { + "base": null, + "refs": { + } + }, + "ModifyEventSubscriptionMessage": { + "base": "

    ", + "refs": { + } + }, + "ModifyEventSubscriptionResult": { + "base": null, + "refs": { + } + }, + "ModifyOptionGroupMessage": { + "base": "

    ", + "refs": { + } + }, + "ModifyOptionGroupResult": { + "base": null, + "refs": { + } + }, + "Option": { + "base": "

    Option details.

    ", + "refs": { + "OptionsList$member": null + } + }, + "OptionConfiguration": { + "base": "

    A list of all available options

    ", + "refs": { + "OptionConfigurationList$member": null + } + }, + "OptionConfigurationList": { + "base": null, + "refs": { + "ModifyOptionGroupMessage$OptionsToInclude": "

    Options in this list are added to the option group or, if already present, the specified configuration is used to update the existing configuration.

    " + } + }, + "OptionGroup": { + "base": "

    ", + "refs": { + "CreateOptionGroupResult$OptionGroup": null, + "ModifyOptionGroupResult$OptionGroup": null, + "OptionGroupsList$member": null + } + }, + "OptionGroupAlreadyExistsFault": { + "base": "

    The option group you are trying to create already exists.

    ", + "refs": { + } + }, + "OptionGroupMembership": { + "base": "

    Provides information on the option groups the DB instance is a member of.

    ", + "refs": { + "OptionGroupMembershipList$member": null + } + }, + "OptionGroupMembershipList": { + "base": null, + "refs": { + "DBInstance$OptionGroupMemberships": "

    Provides the list of option group memberships for this DB instance.

    " + } + }, + "OptionGroupNotFoundFault": { + "base": "

    The specified option group could not be found.

    ", + "refs": { + } + }, + "OptionGroupOption": { + "base": "

    Available option.

    ", + "refs": { + "OptionGroupOptionsList$member": null + } + }, + "OptionGroupOptionSetting": { + "base": "

    option group option settings are used to display settings available for each option with their default values and other information. These values are used with the DescribeOptionGroupOptions action.

    ", + "refs": { + "OptionGroupOptionSettingsList$member": null + } + }, + "OptionGroupOptionSettingsList": { + "base": null, + "refs": { + "OptionGroupOption$OptionGroupOptionSettings": "

    Specifies the option settings that are available (and the default value) for each option in an option group.

    " + } + }, + "OptionGroupOptionsList": { + "base": "

    List of available option group options.

    ", + "refs": { + "OptionGroupOptionsMessage$OptionGroupOptions": null + } + }, + "OptionGroupOptionsMessage": { + "base": "

    ", + "refs": { + } + }, + "OptionGroupQuotaExceededFault": { + "base": "

    The quota of 20 option groups was exceeded for this AWS account.

    ", + "refs": { + } + }, + "OptionGroups": { + "base": "

    List of option groups.

    ", + "refs": { + } + }, + "OptionGroupsList": { + "base": null, + "refs": { + "OptionGroups$OptionGroupsList": "

    List of option groups.

    " + } + }, + "OptionNamesList": { + "base": null, + "refs": { + "ModifyOptionGroupMessage$OptionsToRemove": "

    Options in this list are removed from the option group.

    " + } + }, + "OptionSetting": { + "base": "

    Option settings are the actual settings being applied or configured for that option. It is used when you modify an option group or describe option groups. For example, the NATIVE_NETWORK_ENCRYPTION option has a setting called SQLNET.ENCRYPTION_SERVER that can have several different values.

    ", + "refs": { + "OptionSettingConfigurationList$member": null, + "OptionSettingsList$member": null + } + }, + "OptionSettingConfigurationList": { + "base": null, + "refs": { + "Option$OptionSettings": "

    The option settings for this option.

    " + } + }, + "OptionSettingsList": { + "base": null, + "refs": { + "OptionConfiguration$OptionSettings": "

    The option settings to include in an option group.

    " + } + }, + "OptionsDependedOn": { + "base": null, + "refs": { + "OptionGroupOption$OptionsDependedOn": "

    List of all options that are prerequisites for this option.

    " + } + }, + "OptionsList": { + "base": null, + "refs": { + "OptionGroup$Options": "

    Indicates what options are available in the option group.

    " + } + }, + "OrderableDBInstanceOption": { + "base": "

    Contains a list of available options for a DB instance

    This data type is used as a response element in the DescribeOrderableDBInstanceOptions action.

    ", + "refs": { + "OrderableDBInstanceOptionsList$member": null + } + }, + "OrderableDBInstanceOptionsList": { + "base": null, + "refs": { + "OrderableDBInstanceOptionsMessage$OrderableDBInstanceOptions": "

    An OrderableDBInstanceOption structure containing information about orderable options for the DB instance.

    " + } + }, + "OrderableDBInstanceOptionsMessage": { + "base": "

    Contains the result of a successful invocation of the DescribeOrderableDBInstanceOptions action.

    ", + "refs": { + } + }, + "Parameter": { + "base": "

    This data type is used as a request parameter in the ModifyDBParameterGroup and ResetDBParameterGroup actions.

    This data type is used as a response element in the DescribeEngineDefaultParameters and DescribeDBParameters actions.

    ", + "refs": { + "ParametersList$member": null + } + }, + "ParametersList": { + "base": null, + "refs": { + "DBParameterGroupDetails$Parameters": "

    A list of Parameter values.

    ", + "EngineDefaults$Parameters": "

    Contains a list of engine default parameters.

    ", + "ModifyDBParameterGroupMessage$Parameters": "

    An array of parameter names, values, and the apply method for the parameter update. At least one parameter name, value, and apply method must be supplied; subsequent arguments are optional. A maximum of 20 parameters may be modified in a single request.

    Valid Values (for the application method): immediate | pending-reboot

    You can use the immediate value with dynamic parameters only. You can use the pending-reboot value for both dynamic and static parameters, and changes are applied when DB instance reboots. ", + "ResetDBParameterGroupMessage$Parameters": "

    An array of parameter names, values, and the apply method for the parameter update. At least one parameter name, value, and apply method must be supplied; subsequent arguments are optional. A maximum of 20 parameters may be modified in a single request.

    MySQL

    Valid Values (for Apply method): immediate | pending-reboot

    You can use the immediate value with dynamic parameters only. You can use the pending-reboot value for both dynamic and static parameters, and changes are applied when DB instance reboots.

    Oracle

    Valid Values (for Apply method): pending-reboot

    " + } + }, + "PendingModifiedValues": { + "base": "

    This data type is used as a response element in the ModifyDBInstance action.

    ", + "refs": { + "DBInstance$PendingModifiedValues": "

    Specifies that changes to the DB instance are pending. This element is only included when changes are pending. Specific changes are identified by subelements.

    " + } + }, + "PointInTimeRestoreNotEnabledFault": { + "base": "

    SourceDBInstanceIdentifier refers to a DB instance with BackupRetentionPeriod equal to 0.

    ", + "refs": { + } + }, + "PromoteReadReplicaMessage": { + "base": "

    ", + "refs": { + } + }, + "PromoteReadReplicaResult": { + "base": null, + "refs": { + } + }, + "ProvisionedIopsNotAvailableInAZFault": { + "base": "

    Provisioned IOPS not available in the specified Availability Zone.

    ", + "refs": { + } + }, + "PurchaseReservedDBInstancesOfferingMessage": { + "base": "

    ", + "refs": { + } + }, + "PurchaseReservedDBInstancesOfferingResult": { + "base": null, + "refs": { + } + }, + "ReadReplicaDBInstanceIdentifierList": { + "base": null, + "refs": { + "DBInstance$ReadReplicaDBInstanceIdentifiers": "

    Contains one or more identifiers of the read replicas associated with this DB instance.

    " + } + }, + "RebootDBInstanceMessage": { + "base": "

    ", + "refs": { + } + }, + "RebootDBInstanceResult": { + "base": null, + "refs": { + } + }, + "RecurringCharge": { + "base": "

    This data type is used as a response element in the DescribeReservedDBInstances and DescribeReservedDBInstancesOfferings actions.

    ", + "refs": { + "RecurringChargeList$member": null + } + }, + "RecurringChargeList": { + "base": null, + "refs": { + "ReservedDBInstance$RecurringCharges": "

    The recurring price charged to run this reserved DB instance.

    ", + "ReservedDBInstancesOffering$RecurringCharges": "

    The recurring price charged to run this reserved DB instance.

    " + } + }, + "RemoveSourceIdentifierFromSubscriptionMessage": { + "base": "

    ", + "refs": { + } + }, + "RemoveSourceIdentifierFromSubscriptionResult": { + "base": null, + "refs": { + } + }, + "RemoveTagsFromResourceMessage": { + "base": "

    ", + "refs": { + } + }, + "ReservedDBInstance": { + "base": "

    This data type is used as a response element in the DescribeReservedDBInstances and PurchaseReservedDBInstancesOffering actions.

    ", + "refs": { + "PurchaseReservedDBInstancesOfferingResult$ReservedDBInstance": null, + "ReservedDBInstanceList$member": null + } + }, + "ReservedDBInstanceAlreadyExistsFault": { + "base": "

    User already has a reservation with the given identifier.

    ", + "refs": { + } + }, + "ReservedDBInstanceList": { + "base": null, + "refs": { + "ReservedDBInstanceMessage$ReservedDBInstances": "

    A list of reserved DB instances.

    " + } + }, + "ReservedDBInstanceMessage": { + "base": "

    Contains the result of a successful invocation of the DescribeReservedDBInstances action.

    ", + "refs": { + } + }, + "ReservedDBInstanceNotFoundFault": { + "base": "

    The specified reserved DB Instance not found.

    ", + "refs": { + } + }, + "ReservedDBInstanceQuotaExceededFault": { + "base": "

    Request would exceed the user's DB Instance quota.

    ", + "refs": { + } + }, + "ReservedDBInstancesOffering": { + "base": "

    This data type is used as a response element in the DescribeReservedDBInstancesOfferings action.

    ", + "refs": { + "ReservedDBInstancesOfferingList$member": null + } + }, + "ReservedDBInstancesOfferingList": { + "base": null, + "refs": { + "ReservedDBInstancesOfferingMessage$ReservedDBInstancesOfferings": "

    A list of reserved DB instance offerings.

    " + } + }, + "ReservedDBInstancesOfferingMessage": { + "base": "

    Contains the result of a successful invocation of the DescribeReservedDBInstancesOfferings action.

    ", + "refs": { + } + }, + "ReservedDBInstancesOfferingNotFoundFault": { + "base": "

    Specified offering does not exist.

    ", + "refs": { + } + }, + "ResetDBParameterGroupMessage": { + "base": "

    ", + "refs": { + } + }, + "RestoreDBInstanceFromDBSnapshotMessage": { + "base": "

    ", + "refs": { + } + }, + "RestoreDBInstanceFromDBSnapshotResult": { + "base": null, + "refs": { + } + }, + "RestoreDBInstanceToPointInTimeMessage": { + "base": "

    ", + "refs": { + } + }, + "RestoreDBInstanceToPointInTimeResult": { + "base": null, + "refs": { + } + }, + "RevokeDBSecurityGroupIngressMessage": { + "base": "

    ", + "refs": { + } + }, + "RevokeDBSecurityGroupIngressResult": { + "base": null, + "refs": { + } + }, + "SNSInvalidTopicFault": { + "base": "

    SNS has responded that there is a problem with the SND topic specified.

    ", + "refs": { + } + }, + "SNSNoAuthorizationFault": { + "base": "

    You do not have permission to publish to the SNS topic ARN.

    ", + "refs": { + } + }, + "SNSTopicArnNotFoundFault": { + "base": "

    The SNS topic ARN does not exist.

    ", + "refs": { + } + }, + "SnapshotQuotaExceededFault": { + "base": "

    Request would result in user exceeding the allowed number of DB snapshots.

    ", + "refs": { + } + }, + "SourceIdsList": { + "base": null, + "refs": { + "CreateEventSubscriptionMessage$SourceIds": "

    The list of identifiers of the event sources for which events will be returned. If not specified, then all sources are included in the response. An identifier must begin with a letter and must contain only ASCII letters, digits, and hyphens; it cannot end with a hyphen or contain two consecutive hyphens.

    Constraints:

    • If SourceIds are supplied, SourceType must also be provided.
    • If the source type is a DB instance, then a DBInstanceIdentifier must be supplied.
    • If the source type is a DB security group, a DBSecurityGroupName must be supplied.
    • If the source type is a DB parameter group, a DBParameterGroupName must be supplied.
    • If the source type is a DB snapshot, a DBSnapshotIdentifier must be supplied.
    ", + "EventSubscription$SourceIdsList": "

    A list of source Ids for the RDS event notification subscription.

    " + } + }, + "SourceNotFoundFault": { + "base": "

    The requested source could not be found.

    ", + "refs": { + } + }, + "SourceType": { + "base": null, + "refs": { + "DescribeEventsMessage$SourceType": "

    The event source to retrieve events for. If no value is specified, all events are returned.

    ", + "Event$SourceType": "

    Specifies the source type for this event.

    " + } + }, + "StorageQuotaExceededFault": { + "base": "

    Request would result in user exceeding the allowed amount of storage available across all DB instances.

    ", + "refs": { + } + }, + "String": { + "base": null, + "refs": { + "AddSourceIdentifierToSubscriptionMessage$SubscriptionName": "

    The name of the RDS event notification subscription you want to add a source identifier to.

    ", + "AddSourceIdentifierToSubscriptionMessage$SourceIdentifier": "

    The identifier of the event source to be added. An identifier must begin with a letter and must contain only ASCII letters, digits, and hyphens; it cannot end with a hyphen or contain two consecutive hyphens.

    Constraints:

    • If the source type is a DB instance, then a DBInstanceIdentifier must be supplied.
    • If the source type is a DB security group, a DBSecurityGroupName must be supplied.
    • If the source type is a DB parameter group, a DBParameterGroupName must be supplied.
    • If the source type is a DB snapshot, a DBSnapshotIdentifier must be supplied.
    ", + "AddTagsToResourceMessage$ResourceName": "

    The Amazon RDS resource the tags will be added to. This value is an Amazon Resource Name (ARN). For information about creating an ARN, see Constructing an RDS Amazon Resource Name (ARN).

    ", + "AuthorizeDBSecurityGroupIngressMessage$DBSecurityGroupName": "

    The name of the DB security group to add authorization to.

    ", + "AuthorizeDBSecurityGroupIngressMessage$CIDRIP": "

    The IP range to authorize.

    ", + "AuthorizeDBSecurityGroupIngressMessage$EC2SecurityGroupName": "

    Name of the EC2 security group to authorize. For VPC DB security groups, EC2SecurityGroupId must be provided. Otherwise, EC2SecurityGroupOwnerId and either EC2SecurityGroupName or EC2SecurityGroupId must be provided.

    ", + "AuthorizeDBSecurityGroupIngressMessage$EC2SecurityGroupId": "

    Id of the EC2 security group to authorize. For VPC DB security groups, EC2SecurityGroupId must be provided. Otherwise, EC2SecurityGroupOwnerId and either EC2SecurityGroupName or EC2SecurityGroupId must be provided.

    ", + "AuthorizeDBSecurityGroupIngressMessage$EC2SecurityGroupOwnerId": "

    AWS Account Number of the owner of the EC2 security group specified in the EC2SecurityGroupName parameter. The AWS Access Key ID is not an acceptable value. For VPC DB security groups, EC2SecurityGroupId must be provided. Otherwise, EC2SecurityGroupOwnerId and either EC2SecurityGroupName or EC2SecurityGroupId must be provided.

    ", + "AvailabilityZone$Name": "

    The name of the availability zone.

    ", + "CharacterSet$CharacterSetName": "

    The name of the character set.

    ", + "CharacterSet$CharacterSetDescription": "

    The description of the character set.

    ", + "CopyDBSnapshotMessage$SourceDBSnapshotIdentifier": "

    The identifier for the source DB snapshot.

    Constraints:

    • Must specify a valid system snapshot in the \"available\" state.
    • If the source snapshot is in the same region as the copy, specify a valid DB snapshot identifier.
    • If the source snapshot is in a different region than the copy, specify a valid DB snapshot ARN. For more information, go to Copying a DB Snapshot.

    Example: rds:mydb-2012-04-02-00-01

    Example: arn:aws:rds:rr-regn-1:123456789012:snapshot:mysql-instance1-snapshot-20130805

    ", + "CopyDBSnapshotMessage$TargetDBSnapshotIdentifier": "

    The identifier for the copied snapshot.

    Constraints:

    • Cannot be null, empty, or blank
    • Must contain from 1 to 255 alphanumeric characters or hyphens
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens

    Example: my-db-snapshot

    ", + "CreateDBInstanceMessage$DBName": "

    The meaning of this parameter differs according to the database engine you use.

    Type: String

    MySQL

    The name of the database to create when the DB instance is created. If this parameter is not specified, no database is created in the DB instance.

    Constraints:

    • Must contain 1 to 64 alphanumeric characters
    • Cannot be a word reserved by the specified database engine

    PostgreSQL

    The name of the database to create when the DB instance is created. If this parameter is not specified, no database is created in the DB instance.

    Constraints:

    • Must contain 1 to 63 alphanumeric characters
    • Must begin with a letter or an underscore. Subsequent characters can be letters, underscores, or digits (0-9).
    • Cannot be a word reserved by the specified database engine

    Oracle

    The Oracle System ID (SID) of the created DB instance.

    Default: ORCL

    Constraints:

    • Cannot be longer than 8 characters

    SQL Server

    Not applicable. Must be null.

    ", + "CreateDBInstanceMessage$DBInstanceIdentifier": "

    The DB instance identifier. This parameter is stored as a lowercase string.

    Constraints:

    • Must contain from 1 to 63 alphanumeric characters or hyphens (1 to 15 for SQL Server).
    • First character must be a letter.
    • Cannot end with a hyphen or contain two consecutive hyphens.

    Example: mydbinstance

    ", + "CreateDBInstanceMessage$DBInstanceClass": "

    The compute and memory capacity of the DB instance.

    Valid Values: db.t1.micro | db.m1.small | db.m1.medium | db.m1.large | db.m1.xlarge | db.m2.xlarge |db.m2.2xlarge | db.m2.4xlarge | db.m3.medium | db.m3.large | db.m3.xlarge | db.m3.2xlarge | db.r3.large | db.r3.xlarge | db.r3.2xlarge | db.r3.4xlarge | db.r3.8xlarge

    ", + "CreateDBInstanceMessage$Engine": "

    The name of the database engine to be used for this instance.

    Valid Values: MySQL | oracle-se1 | oracle-se | oracle-ee | sqlserver-ee | sqlserver-se | sqlserver-ex | sqlserver-web | postgres

    ", + "CreateDBInstanceMessage$MasterUsername": "

    The name of master user for the client DB instance.

    MySQL

    Constraints:

    • Must be 1 to 16 alphanumeric characters.
    • First character must be a letter.
    • Cannot be a reserved word for the chosen database engine.

    Type: String

    Oracle

    Constraints:

    • Must be 1 to 30 alphanumeric characters.
    • First character must be a letter.
    • Cannot be a reserved word for the chosen database engine.

    SQL Server

    Constraints:

    • Must be 1 to 128 alphanumeric characters.
    • First character must be a letter.
    • Cannot be a reserved word for the chosen database engine.
    ", + "CreateDBInstanceMessage$MasterUserPassword": "

    The password for the master database user. Can be any printable ASCII character except \"/\", \"\"\", or \"@\".

    Type: String

    MySQL

    Constraints: Must contain from 8 to 41 characters.

    Oracle

    Constraints: Must contain from 8 to 30 characters.

    SQL Server

    Constraints: Must contain from 8 to 128 characters.

    ", + "CreateDBInstanceMessage$AvailabilityZone": "

    The EC2 Availability Zone that the database instance will be created in.

    Default: A random, system-chosen Availability Zone in the endpoint's region.

    Example: us-east-1d

    Constraint: The AvailabilityZone parameter cannot be specified if the MultiAZ parameter is set to true. The specified Availability Zone must be in the same region as the current endpoint.

    ", + "CreateDBInstanceMessage$DBSubnetGroupName": "

    A DB subnet group to associate with this DB instance.

    If there is no DB subnet group, then it is a non-VPC DB instance.

    ", + "CreateDBInstanceMessage$PreferredMaintenanceWindow": "

    The weekly time range (in UTC) during which system maintenance can occur.

    Format: ddd:hh24:mi-ddd:hh24:mi

    Default: A 30-minute window selected at random from an 8-hour block of time per region, occurring on a random day of the week. To see the time blocks available, see Adjusting the Preferred Maintenance Window in the Amazon RDS User Guide.

    Valid Days: Mon, Tue, Wed, Thu, Fri, Sat, Sun

    Constraints: Minimum 30-minute window.

    ", + "CreateDBInstanceMessage$DBParameterGroupName": "

    The name of the DB parameter group to associate with this DB instance. If this argument is omitted, the default DBParameterGroup for the specified engine will be used.

    Constraints:

    • Must be 1 to 255 alphanumeric characters
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "CreateDBInstanceMessage$PreferredBackupWindow": "

    The daily time range during which automated backups are created if automated backups are enabled, using the BackupRetentionPeriod parameter.

    Default: A 30-minute window selected at random from an 8-hour block of time per region. See the Amazon RDS User Guide for the time blocks for each region from which the default backup windows are assigned.

    Constraints: Must be in the format hh24:mi-hh24:mi. Times should be Universal Time Coordinated (UTC). Must not conflict with the preferred maintenance window. Must be at least 30 minutes.

    ", + "CreateDBInstanceMessage$EngineVersion": "

    The version number of the database engine to use.

    MySQL

    Example: 5.1.42

    Type: String

    PostgreSQL

    Example: 9.3

    Type: String

    Oracle

    Example: 11.2.0.2.v2

    Type: String

    SQL Server

    Example: 10.50.2789.0.v1

    ", + "CreateDBInstanceMessage$LicenseModel": "

    License model information for this DB instance.

    Valid values: license-included | bring-your-own-license | general-public-license

    ", + "CreateDBInstanceMessage$OptionGroupName": "

    Indicates that the DB instance should be associated with the specified option group.

    Permanent options, such as the TDE option for Oracle Advanced Security TDE, cannot be removed from an option group, and that option group cannot be removed from a DB instance once it is associated with a DB instance

    ", + "CreateDBInstanceMessage$CharacterSetName": "

    For supported engines, indicates that the DB instance should be associated with the specified CharacterSet.

    ", + "CreateDBInstanceReadReplicaMessage$DBInstanceIdentifier": "

    The DB instance identifier of the read replica. This is the unique key that identifies a DB instance. This parameter is stored as a lowercase string.

    ", + "CreateDBInstanceReadReplicaMessage$SourceDBInstanceIdentifier": "

    The identifier of the DB instance that will act as the source for the read replica. Each DB instance can have up to five read replicas.

    Constraints:

    • Must be the identifier of an existing DB instance.
    • Can specify a DB instance that is a read replica only if the source is running MySQL 5.6.
    • The specified DB instance must have automatic backups enabled, its backup retention period must be greater than 0.
    • If the source DB instance is in the same region as the read replica, specify a valid DB instance identifier.
    • If the source DB instance is in a different region than the read replica, specify a valid DB instance ARN. For more information, go to Constructing a Amazon RDS Amazon Resource Name (ARN).
    ", + "CreateDBInstanceReadReplicaMessage$DBInstanceClass": "

    The compute and memory capacity of the read replica.

    Valid Values: db.m1.small | db.m1.medium | db.m1.large | db.m1.xlarge | db.m2.xlarge |db.m2.2xlarge | db.m2.4xlarge | db.m3.medium | db.m3.large | db.m3.xlarge | db.m3.2xlarge | db.r3.large | db.r3.xlarge | db.r3.2xlarge | db.r3.4xlarge | db.r3.8xlarge

    Default: Inherits from the source DB instance.

    ", + "CreateDBInstanceReadReplicaMessage$AvailabilityZone": "

    The Amazon EC2 Availability Zone that the read replica will be created in.

    Default: A random, system-chosen Availability Zone in the endpoint's region.

    Example: us-east-1d

    ", + "CreateDBInstanceReadReplicaMessage$OptionGroupName": "

    The option group the DB instance will be associated with. If omitted, the default option group for the engine specified will be used.

    ", + "CreateDBInstanceReadReplicaMessage$DBSubnetGroupName": "

    Specifies a DB subnet group for the DB instance. The new DB instance will be created in the VPC associated with the DB subnet group. If no DB subnet group is specified, then the new DB instance is not created in a VPC.

    Constraints:

    • Can only be specified if the source DB instance identifier specifies a DB instance in another region.
    • The specified DB subnet group must be in the same region in which the operation is running.
    • All read replicas in one region that are created from the same source DB instance must either:
      • Specify DB subnet groups from the same VPC. All these read replicas will be created in the same VPC.
      • Not specify a DB subnet group. All these read replicas will be created outside of any VPC.
    ", + "CreateDBParameterGroupMessage$DBParameterGroupName": "

    The name of the DB parameter group.

    Constraints:

    • Must be 1 to 255 alphanumeric characters
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    This value is stored as a lower-case string.", + "CreateDBParameterGroupMessage$DBParameterGroupFamily": "

    The DB parameter group family name. A DB parameter group can be associated with one and only one DB parameter group family, and can be applied only to a DB instance running a database engine and engine version compatible with that DB parameter group family.

    ", + "CreateDBParameterGroupMessage$Description": "

    The description for the DB parameter group.

    ", + "CreateDBSecurityGroupMessage$DBSecurityGroupName": "

    The name for the DB security group. This value is stored as a lowercase string.

    Constraints:

    • Must be 1 to 255 alphanumeric characters
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    • Must not be \"Default\"
    • May not contain spaces

    Example: mysecuritygroup

    ", + "CreateDBSecurityGroupMessage$DBSecurityGroupDescription": "

    The description for the DB security group.

    ", + "CreateDBSnapshotMessage$DBSnapshotIdentifier": "

    The identifier for the DB snapshot.

    Constraints:

    • Cannot be null, empty, or blank
    • Must contain from 1 to 255 alphanumeric characters or hyphens
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens

    Example: my-snapshot-id

    ", + "CreateDBSnapshotMessage$DBInstanceIdentifier": "

    The DB instance identifier. This is the unique key that identifies a DB instance.

    Constraints:

    • Must contain from 1 to 63 alphanumeric characters or hyphens
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "CreateDBSubnetGroupMessage$DBSubnetGroupName": "

    The name for the DB subnet group. This value is stored as a lowercase string.

    Constraints: Must contain no more than 255 alphanumeric characters or hyphens. Must not be \"Default\".

    Example: mySubnetgroup

    ", + "CreateDBSubnetGroupMessage$DBSubnetGroupDescription": "

    The description for the DB subnet group.

    ", + "CreateEventSubscriptionMessage$SubscriptionName": "

    The name of the subscription.

    Constraints: The name must be less than 255 characters.

    ", + "CreateEventSubscriptionMessage$SnsTopicArn": "

    The Amazon Resource Name (ARN) of the SNS topic created for event notification. The ARN is created by Amazon SNS when you create a topic and subscribe to it.

    ", + "CreateEventSubscriptionMessage$SourceType": "

    The type of source that will be generating the events. For example, if you want to be notified of events generated by a DB instance, you would set this parameter to db-instance. if this value is not specified, all events are returned.

    Valid values: db-instance | db-parameter-group | db-security-group | db-snapshot

    ", + "CreateOptionGroupMessage$OptionGroupName": "

    Specifies the name of the option group to be created.

    Constraints:

    • Must be 1 to 255 alphanumeric characters or hyphens
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens

    Example: myoptiongroup

    ", + "CreateOptionGroupMessage$EngineName": "

    Specifies the name of the engine that this option group should be associated with.

    ", + "CreateOptionGroupMessage$MajorEngineVersion": "

    Specifies the major version of the engine that this option group should be associated with.

    ", + "CreateOptionGroupMessage$OptionGroupDescription": "

    The description of the option group.

    ", + "DBEngineVersion$Engine": "

    The name of the database engine.

    ", + "DBEngineVersion$EngineVersion": "

    The version number of the database engine.

    ", + "DBEngineVersion$DBParameterGroupFamily": "

    The name of the DB parameter group family for the database engine.

    ", + "DBEngineVersion$DBEngineDescription": "

    The description of the database engine.

    ", + "DBEngineVersion$DBEngineVersionDescription": "

    The description of the database engine version.

    ", + "DBEngineVersionMessage$Marker": "

    An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DBInstance$DBInstanceIdentifier": "

    Contains a user-supplied database identifier. This is the unique key that identifies a DB instance.

    ", + "DBInstance$DBInstanceClass": "

    Contains the name of the compute and memory capacity class of the DB instance.

    ", + "DBInstance$Engine": "

    Provides the name of the database engine to be used for this DB instance.

    ", + "DBInstance$DBInstanceStatus": "

    Specifies the current state of this database.

    ", + "DBInstance$MasterUsername": "

    Contains the master username for the DB instance.

    ", + "DBInstance$DBName": "

    The meaning of this parameter differs according to the database engine you use. For example, this value returns only MySQL information when returning values from CreateDBInstanceReadReplica since read replicas are only supported for MySQL.

    MySQL

    Contains the name of the initial database of this instance that was provided at create time, if one was specified when the DB instance was created. This same name is returned for the life of the DB instance.

    Type: String

    Oracle

    Contains the Oracle System ID (SID) of the created DB instance. Not shown when the returned parameters do not apply to an Oracle DB instance.

    ", + "DBInstance$PreferredBackupWindow": "

    Specifies the daily time range during which automated backups are created if automated backups are enabled, as determined by the BackupRetentionPeriod.

    ", + "DBInstance$AvailabilityZone": "

    Specifies the name of the Availability Zone the DB instance is located in.

    ", + "DBInstance$PreferredMaintenanceWindow": "

    Specifies the weekly time range (in UTC) during which system maintenance can occur.

    ", + "DBInstance$EngineVersion": "

    Indicates the database engine version.

    ", + "DBInstance$ReadReplicaSourceDBInstanceIdentifier": "

    Contains the identifier of the source DB instance if this DB instance is a read replica.

    ", + "DBInstance$LicenseModel": "

    License model information for this DB instance.

    ", + "DBInstance$CharacterSetName": "

    If present, specifies the name of the character set that this instance is associated with.

    ", + "DBInstance$SecondaryAvailabilityZone": "

    If present, specifies the name of the secondary Availability Zone for a DB instance with multi-AZ support.

    ", + "DBInstanceMessage$Marker": "

    An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords .

    ", + "DBInstanceStatusInfo$StatusType": "

    This value is currently \"read replication.\"

    ", + "DBInstanceStatusInfo$Status": "

    Status of the DB instance. For a StatusType of read replica, the values can be replicating, error, stopped, or terminated.

    ", + "DBInstanceStatusInfo$Message": "

    Details of the error if there is an error for the instance. If the instance is not in an error state, this value is blank.

    ", + "DBParameterGroup$DBParameterGroupName": "

    Provides the name of the DB parameter group.

    ", + "DBParameterGroup$DBParameterGroupFamily": "

    Provides the name of the DB parameter group family that this DB parameter group is compatible with.

    ", + "DBParameterGroup$Description": "

    Provides the customer-specified description for this DB parameter group.

    ", + "DBParameterGroupDetails$Marker": "

    An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DBParameterGroupNameMessage$DBParameterGroupName": "

    The name of the DB parameter group.

    ", + "DBParameterGroupStatus$DBParameterGroupName": "

    The name of the DP parameter group.

    ", + "DBParameterGroupStatus$ParameterApplyStatus": "

    The status of parameter updates.

    ", + "DBParameterGroupsMessage$Marker": "

    An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DBSecurityGroup$OwnerId": "

    Provides the AWS ID of the owner of a specific DB security group.

    ", + "DBSecurityGroup$DBSecurityGroupName": "

    Specifies the name of the DB security group.

    ", + "DBSecurityGroup$DBSecurityGroupDescription": "

    Provides the description of the DB security group.

    ", + "DBSecurityGroup$VpcId": "

    Provides the VpcId of the DB security group.

    ", + "DBSecurityGroupMembership$DBSecurityGroupName": "

    The name of the DB security group.

    ", + "DBSecurityGroupMembership$Status": "

    The status of the DB security group.

    ", + "DBSecurityGroupMessage$Marker": "

    An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DBSecurityGroupNameList$member": null, + "DBSnapshot$DBSnapshotIdentifier": "

    Specifies the identifier for the DB snapshot.

    ", + "DBSnapshot$DBInstanceIdentifier": "

    Specifies the DB instance identifier of the DB instance this DB snapshot was created from.

    ", + "DBSnapshot$Engine": "

    Specifies the name of the database engine.

    ", + "DBSnapshot$Status": "

    Specifies the status of this DB snapshot.

    ", + "DBSnapshot$AvailabilityZone": "

    Specifies the name of the Availability Zone the DB instance was located in at the time of the DB snapshot.

    ", + "DBSnapshot$VpcId": "

    Provides the Vpc Id associated with the DB snapshot.

    ", + "DBSnapshot$MasterUsername": "

    Provides the master username for the DB snapshot.

    ", + "DBSnapshot$EngineVersion": "

    Specifies the version of the database engine.

    ", + "DBSnapshot$LicenseModel": "

    License model information for the restored DB instance.

    ", + "DBSnapshot$SnapshotType": "

    Provides the type of the DB snapshot.

    ", + "DBSnapshot$OptionGroupName": "

    Provides the option group name for the DB snapshot.

    ", + "DBSnapshot$SourceRegion": "

    The region that the DB snapshot was created in or copied from.

    ", + "DBSnapshotMessage$Marker": "

    An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DBSubnetGroup$DBSubnetGroupName": "

    Specifies the name of the DB subnet group.

    ", + "DBSubnetGroup$DBSubnetGroupDescription": "

    Provides the description of the DB subnet group.

    ", + "DBSubnetGroup$VpcId": "

    Provides the VpcId of the DB subnet group.

    ", + "DBSubnetGroup$SubnetGroupStatus": "

    Provides the status of the DB subnet group.

    ", + "DBSubnetGroupMessage$Marker": "

    An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DeleteDBInstanceMessage$DBInstanceIdentifier": "

    The DB instance identifier for the DB instance to be deleted. This parameter isn't case sensitive.

    Constraints:

    • Must contain from 1 to 63 alphanumeric characters or hyphens
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "DeleteDBInstanceMessage$FinalDBSnapshotIdentifier": "

    The DBSnapshotIdentifier of the new DBSnapshot created when SkipFinalSnapshot is set to false.

    Specifying this parameter and also setting the SkipFinalShapshot parameter to true results in an error.

    Constraints:

    • Must be 1 to 255 alphanumeric characters
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    • Cannot be specified when deleting a read replica.
    ", + "DeleteDBParameterGroupMessage$DBParameterGroupName": "

    The name of the DB parameter group.

    Constraints:

    • Must be the name of an existing DB parameter group
    • You cannot delete a default DB parameter group
    • Cannot be associated with any DB instances
    ", + "DeleteDBSecurityGroupMessage$DBSecurityGroupName": "

    The name of the DB security group to delete.

    You cannot delete the default DB security group.

    Constraints:

    • Must be 1 to 255 alphanumeric characters
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    • Must not be \"Default\"
    • May not contain spaces
    ", + "DeleteDBSnapshotMessage$DBSnapshotIdentifier": "

    The DBSnapshot identifier.

    Constraints: Must be the name of an existing DB snapshot in the available state.

    ", + "DeleteDBSubnetGroupMessage$DBSubnetGroupName": "

    The name of the database subnet group to delete.

    You cannot delete the default subnet group.

    Constraints:

    • Must be 1 to 255 alphanumeric characters
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "DeleteEventSubscriptionMessage$SubscriptionName": "

    The name of the RDS event notification subscription you want to delete.

    ", + "DeleteOptionGroupMessage$OptionGroupName": "

    The name of the option group to be deleted.

    You cannot delete default option groups.", + "DescribeDBEngineVersionsMessage$Engine": "

    The database engine to return.

    ", + "DescribeDBEngineVersionsMessage$EngineVersion": "

    The database engine version to return.

    Example: 5.1.49

    ", + "DescribeDBEngineVersionsMessage$DBParameterGroupFamily": "

    The name of a specific DB parameter group family to return details for.

    Constraints:

    • Must be 1 to 255 alphanumeric characters
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "DescribeDBEngineVersionsMessage$Marker": "

    An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DescribeDBInstancesMessage$DBInstanceIdentifier": "

    The user-supplied instance identifier. If this parameter is specified, information from only the specific DB instance is returned. This parameter isn't case sensitive.

    Constraints:

    • Must contain from 1 to 63 alphanumeric characters or hyphens
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "DescribeDBInstancesMessage$Marker": "

    An optional pagination token provided by a previous DescribeDBInstances request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords .

    ", + "DescribeDBLogFilesDetails$LogFileName": "

    The name of the log file for the specified DB instance.

    ", + "DescribeDBLogFilesMessage$DBInstanceIdentifier": "

    The customer-assigned name of the DB instance that contains the log files you want to list.

    Constraints:

    • Must contain from 1 to 63 alphanumeric characters or hyphens
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "DescribeDBLogFilesMessage$FilenameContains": "

    Filters the available log files for log file names that contain the specified string.

    ", + "DescribeDBLogFilesMessage$Marker": "

    The pagination token provided in the previous request. If this parameter is specified the response includes only records beyond the marker, up to MaxRecords.

    ", + "DescribeDBLogFilesResponse$Marker": "

    A pagination token that can be used in a subsequent DescribeDBLogFiles request.

    ", + "DescribeDBParameterGroupsMessage$DBParameterGroupName": "

    The name of a specific DB parameter group to return details for.

    Constraints:

    • Must be 1 to 255 alphanumeric characters
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "DescribeDBParameterGroupsMessage$Marker": "

    An optional pagination token provided by a previous DescribeDBParameterGroups request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DescribeDBParametersMessage$DBParameterGroupName": "

    The name of a specific DB parameter group to return details for.

    Constraints:

    • Must be 1 to 255 alphanumeric characters
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "DescribeDBParametersMessage$Source": "

    The parameter types to return.

    Default: All parameter types returned

    Valid Values: user | system | engine-default

    ", + "DescribeDBParametersMessage$Marker": "

    An optional pagination token provided by a previous DescribeDBParameters request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DescribeDBSecurityGroupsMessage$DBSecurityGroupName": "

    The name of the DB security group to return details for.

    ", + "DescribeDBSecurityGroupsMessage$Marker": "

    An optional pagination token provided by a previous DescribeDBSecurityGroups request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DescribeDBSnapshotsMessage$DBInstanceIdentifier": "

    A DB instance identifier to retrieve the list of DB snapshots for. Cannot be used in conjunction with DBSnapshotIdentifier. This parameter is not case sensitive.

    Constraints:

    • Must contain from 1 to 63 alphanumeric characters or hyphens
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "DescribeDBSnapshotsMessage$DBSnapshotIdentifier": "

    A specific DB snapshot identifier to describe. Cannot be used in conjunction with DBInstanceIdentifier. This value is stored as a lowercase string.

    Constraints:

    • Must be 1 to 255 alphanumeric characters
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    • If this is the identifier of an automated snapshot, the SnapshotType parameter must also be specified.
    ", + "DescribeDBSnapshotsMessage$SnapshotType": "

    The type of snapshots that will be returned. Values can be \"automated\" or \"manual.\" If not specified, the returned results will include all snapshots types.

    ", + "DescribeDBSnapshotsMessage$Marker": "

    An optional pagination token provided by a previous DescribeDBSnapshots request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DescribeDBSubnetGroupsMessage$DBSubnetGroupName": "

    The name of the DB subnet group to return details for.

    ", + "DescribeDBSubnetGroupsMessage$Marker": "

    An optional pagination token provided by a previous DescribeDBSubnetGroups request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DescribeEngineDefaultParametersMessage$DBParameterGroupFamily": "

    The name of the DB parameter group family.

    ", + "DescribeEngineDefaultParametersMessage$Marker": "

    An optional pagination token provided by a previous DescribeEngineDefaultParameters request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DescribeEventCategoriesMessage$SourceType": "

    The type of source that will be generating the events.

    Valid values: db-instance | db-parameter-group | db-security-group | db-snapshot

    ", + "DescribeEventSubscriptionsMessage$SubscriptionName": "

    The name of the RDS event notification subscription you want to describe.

    ", + "DescribeEventSubscriptionsMessage$Marker": "

    An optional pagination token provided by a previous DescribeOrderableDBInstanceOptions request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords .

    ", + "DescribeEventsMessage$SourceIdentifier": "

    The identifier of the event source for which events will be returned. If not specified, then all sources are included in the response.

    Constraints:

    • If SourceIdentifier is supplied, SourceType must also be provided.
    • If the source type is DBInstance, then a DBInstanceIdentifier must be supplied.
    • If the source type is DBSecurityGroup, a DBSecurityGroupName must be supplied.
    • If the source type is DBParameterGroup, a DBParameterGroupName must be supplied.
    • If the source type is DBSnapshot, a DBSnapshotIdentifier must be supplied.
    • Cannot end with a hyphen or contain two consecutive hyphens.
    ", + "DescribeEventsMessage$Marker": "

    An optional pagination token provided by a previous DescribeEvents request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DescribeOptionGroupOptionsMessage$EngineName": "

    A required parameter. Options available for the given Engine name will be described.

    ", + "DescribeOptionGroupOptionsMessage$MajorEngineVersion": "

    If specified, filters the results to include only options for the specified major engine version.

    ", + "DescribeOptionGroupOptionsMessage$Marker": "

    An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DescribeOptionGroupsMessage$OptionGroupName": "

    The name of the option group to describe. Cannot be supplied together with EngineName or MajorEngineVersion.

    ", + "DescribeOptionGroupsMessage$Marker": "

    An optional pagination token provided by a previous DescribeOptionGroups request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DescribeOptionGroupsMessage$EngineName": "

    Filters the list of option groups to only include groups associated with a specific database engine.

    ", + "DescribeOptionGroupsMessage$MajorEngineVersion": "

    Filters the list of option groups to only include groups associated with a specific database engine version. If specified, then EngineName must also be specified.

    ", + "DescribeOrderableDBInstanceOptionsMessage$Engine": "

    The name of the engine to retrieve DB instance options for.

    ", + "DescribeOrderableDBInstanceOptionsMessage$EngineVersion": "

    The engine version filter value. Specify this parameter to show only the available offerings matching the specified engine version.

    ", + "DescribeOrderableDBInstanceOptionsMessage$DBInstanceClass": "

    The DB instance class filter value. Specify this parameter to show only the available offerings matching the specified DB instance class.

    ", + "DescribeOrderableDBInstanceOptionsMessage$LicenseModel": "

    The license model filter value. Specify this parameter to show only the available offerings matching the specified license model.

    ", + "DescribeOrderableDBInstanceOptionsMessage$Marker": "

    An optional pagination token provided by a previous DescribeOrderableDBInstanceOptions request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords .

    ", + "DescribeReservedDBInstancesMessage$ReservedDBInstanceId": "

    The reserved DB instance identifier filter value. Specify this parameter to show only the reservation that matches the specified reservation ID.

    ", + "DescribeReservedDBInstancesMessage$ReservedDBInstancesOfferingId": "

    The offering identifier filter value. Specify this parameter to show only purchased reservations matching the specified offering identifier.

    ", + "DescribeReservedDBInstancesMessage$DBInstanceClass": "

    The DB instance class filter value. Specify this parameter to show only those reservations matching the specified DB instances class.

    ", + "DescribeReservedDBInstancesMessage$Duration": "

    The duration filter value, specified in years or seconds. Specify this parameter to show only reservations for this duration.

    Valid Values: 1 | 3 | 31536000 | 94608000

    ", + "DescribeReservedDBInstancesMessage$ProductDescription": "

    The product description filter value. Specify this parameter to show only those reservations matching the specified product description.

    ", + "DescribeReservedDBInstancesMessage$OfferingType": "

    The offering type filter value. Specify this parameter to show only the available offerings matching the specified offering type.

    Valid Values: \"Light Utilization\" | \"Medium Utilization\" | \"Heavy Utilization\"

    ", + "DescribeReservedDBInstancesMessage$Marker": "

    An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DescribeReservedDBInstancesOfferingsMessage$ReservedDBInstancesOfferingId": "

    The offering identifier filter value. Specify this parameter to show only the available offering that matches the specified reservation identifier.

    Example: 438012d3-4052-4cc7-b2e3-8d3372e0e706

    ", + "DescribeReservedDBInstancesOfferingsMessage$DBInstanceClass": "

    The DB instance class filter value. Specify this parameter to show only the available offerings matching the specified DB instance class.

    ", + "DescribeReservedDBInstancesOfferingsMessage$Duration": "

    Duration filter value, specified in years or seconds. Specify this parameter to show only reservations for this duration.

    Valid Values: 1 | 3 | 31536000 | 94608000

    ", + "DescribeReservedDBInstancesOfferingsMessage$ProductDescription": "

    Product description filter value. Specify this parameter to show only the available offerings matching the specified product description.

    ", + "DescribeReservedDBInstancesOfferingsMessage$OfferingType": "

    The offering type filter value. Specify this parameter to show only the available offerings matching the specified offering type.

    Valid Values: \"Light Utilization\" | \"Medium Utilization\" | \"Heavy Utilization\"

    ", + "DescribeReservedDBInstancesOfferingsMessage$Marker": "

    An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DownloadDBLogFilePortionDetails$LogFileData": "

    Entries from the specified log file.

    ", + "DownloadDBLogFilePortionDetails$Marker": "

    A pagination token that can be used in a subsequent DownloadDBLogFilePortion request.

    ", + "DownloadDBLogFilePortionMessage$DBInstanceIdentifier": "

    The customer-assigned name of the DB instance that contains the log files you want to list.

    Constraints:

    • Must contain from 1 to 63 alphanumeric characters or hyphens
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "DownloadDBLogFilePortionMessage$LogFileName": "

    The name of the log file to be downloaded.

    ", + "DownloadDBLogFilePortionMessage$Marker": "

    The pagination token provided in the previous request or \"0\". If the Marker parameter is specified the response includes only records beyond the marker until the end of the file or up to NumberOfLines.

    ", + "EC2SecurityGroup$Status": "

    Provides the status of the EC2 security group. Status can be \"authorizing\", \"authorized\", \"revoking\", and \"revoked\".

    ", + "EC2SecurityGroup$EC2SecurityGroupName": "

    Specifies the name of the EC2 security group.

    ", + "EC2SecurityGroup$EC2SecurityGroupId": "

    Specifies the id of the EC2 security group.

    ", + "EC2SecurityGroup$EC2SecurityGroupOwnerId": "

    Specifies the AWS ID of the owner of the EC2 security group specified in the EC2SecurityGroupName field.

    ", + "Endpoint$Address": "

    Specifies the DNS address of the DB instance.

    ", + "EngineDefaults$DBParameterGroupFamily": "

    Specifies the name of the DB parameter group family which the engine default parameters apply to.

    ", + "EngineDefaults$Marker": "

    An optional pagination token provided by a previous EngineDefaults request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords .

    ", + "Event$SourceIdentifier": "

    Provides the identifier for the source of the event.

    ", + "Event$Message": "

    Provides the text of this event.

    ", + "EventCategoriesList$member": null, + "EventCategoriesMap$SourceType": "

    The source type that the returned categories belong to

    ", + "EventSubscription$CustomerAwsId": "

    The AWS customer account associated with the RDS event notification subscription.

    ", + "EventSubscription$CustSubscriptionId": "

    The RDS event notification subscription Id.

    ", + "EventSubscription$SnsTopicArn": "

    The topic ARN of the RDS event notification subscription.

    ", + "EventSubscription$Status": "

    The status of the RDS event notification subscription.

    Constraints:

    Can be one of the following: creating | modifying | deleting | active | no-permission | topic-not-exist

    The status \"no-permission\" indicates that RDS no longer has permission to post to the SNS topic. The status \"topic-not-exist\" indicates that the topic was deleted after the subscription was created.

    ", + "EventSubscription$SubscriptionCreationTime": "

    The time the RDS event notification subscription was created.

    ", + "EventSubscription$SourceType": "

    The source type for the RDS event notification subscription.

    ", + "EventSubscriptionsMessage$Marker": "

    An optional pagination token provided by a previous DescribeOrderableDBInstanceOptions request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "EventsMessage$Marker": "

    An optional pagination token provided by a previous Events request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords .

    ", + "Filter$Name": "

    This parameter is not currently supported.

    ", + "FilterValueList$member": null, + "IPRange$Status": "

    Specifies the status of the IP range. Status can be \"authorizing\", \"authorized\", \"revoking\", and \"revoked\".

    ", + "IPRange$CIDRIP": "

    Specifies the IP range.

    ", + "KeyList$member": null, + "ListTagsForResourceMessage$ResourceName": "

    The Amazon RDS resource with tags to be listed. This value is an Amazon Resource Name (ARN). For information about creating an ARN, see Constructing an RDS Amazon Resource Name (ARN).

    ", + "ModifyDBInstanceMessage$DBInstanceIdentifier": "

    The DB instance identifier. This value is stored as a lowercase string.

    Constraints:

    • Must be the identifier for an existing DB instance
    • Must contain from 1 to 63 alphanumeric characters or hyphens
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "ModifyDBInstanceMessage$DBInstanceClass": "

    The new compute and memory capacity of the DB instance. To determine the instance classes that are available for a particular DB engine, use the DescribeOrderableDBInstanceOptions action.

    Passing a value for this parameter causes an outage during the change and is applied during the next maintenance window, unless the ApplyImmediately parameter is specified as true for this request.

    Default: Uses existing setting

    Valid Values: db.t1.micro | db.m1.small | db.m1.medium | db.m1.large | db.m1.xlarge | db.m2.xlarge | db.m2.2xlarge | db.m2.4xlarge | db.m3.medium | db.m3.large | db.m3.xlarge | db.m3.2xlarge | db.r3.large | db.r3.xlarge | db.r3.2xlarge | db.r3.4xlarge | db.r3.8xlarge

    ", + "ModifyDBInstanceMessage$MasterUserPassword": "

    The new password for the DB instance master user. Can be any printable ASCII character except \"/\", \"\"\", or \"@\".

    Changing this parameter does not result in an outage and the change is asynchronously applied as soon as possible. Between the time of the request and the completion of the request, the MasterUserPassword element exists in the PendingModifiedValues element of the operation response.

    Default: Uses existing setting

    Constraints: Must be 8 to 41 alphanumeric characters (MySQL), 8 to 30 alphanumeric characters (Oracle), or 8 to 128 alphanumeric characters (SQL Server).

    Amazon RDS API actions never return the password, so this action provides a way to regain access to a master instance user if the password is lost. ", + "ModifyDBInstanceMessage$DBParameterGroupName": "

    The name of the DB parameter group to apply to the DB instance. Changing this setting does not result in an outage. The parameter group name itself is changed immediately, but the actual parameter changes are not applied until you reboot the instance without failover. The DB instance will NOT be rebooted automatically and the parameter changes will NOT be applied during the next maintenance window.

    Default: Uses existing setting

    Constraints: The DB parameter group must be in the same DB parameter group family as the DB instance.

    ", + "ModifyDBInstanceMessage$PreferredBackupWindow": "

    The daily time range during which automated backups are created if automated backups are enabled, as determined by the BackupRetentionPeriod. Changing this parameter does not result in an outage and the change is asynchronously applied as soon as possible.

    Constraints:

    • Must be in the format hh24:mi-hh24:mi
    • Times should be Universal Time Coordinated (UTC)
    • Must not conflict with the preferred maintenance window
    • Must be at least 30 minutes
    ", + "ModifyDBInstanceMessage$PreferredMaintenanceWindow": "

    The weekly time range (in UTC) during which system maintenance can occur, which may result in an outage. Changing this parameter does not result in an outage, except in the following situation, and the change is asynchronously applied as soon as possible. If there are pending actions that cause a reboot, and the maintenance window is changed to include the current time, then changing this parameter will cause a reboot of the DB instance. If moving this window to the current time, there must be at least 30 minutes between the current time and end of the window to ensure pending changes are applied.

    Default: Uses existing setting

    Format: ddd:hh24:mi-ddd:hh24:mi

    Valid Days: Mon | Tue | Wed | Thu | Fri | Sat | Sun

    Constraints: Must be at least 30 minutes

    ", + "ModifyDBInstanceMessage$EngineVersion": "

    The version number of the database engine to upgrade to. Changing this parameter results in an outage and the change is applied during the next maintenance window unless the ApplyImmediately parameter is set to true for this request.

    For major version upgrades, if a non-default DB parameter group is currently in use, a new DB parameter group in the DB parameter group family for the new engine version must be specified. The new DB parameter group can be the default for that DB parameter group family.

    Example: 5.1.42

    ", + "ModifyDBInstanceMessage$OptionGroupName": "

    Indicates that the DB instance should be associated with the specified option group. Changing this parameter does not result in an outage except in the following case and the change is applied during the next maintenance window unless the ApplyImmediately parameter is set to true for this request. If the parameter change results in an option group that enables OEM, this change can cause a brief (sub-second) period during which new connections are rejected but existing connections are not interrupted.

    Permanent options, such as the TDE option for Oracle Advanced Security TDE, cannot be removed from an option group, and that option group cannot be removed from a DB instance once it is associated with a DB instance

    ", + "ModifyDBInstanceMessage$NewDBInstanceIdentifier": "

    The new DB instance identifier for the DB instance when renaming a DB instance. When you change the DB instance identifier, an instance reboot will occur immediately if you set Apply Immediately to true, or will occur during the next maintenance window if Apply Immediately to false. This value is stored as a lowercase string.

    Constraints:

    • Must contain from 1 to 63 alphanumeric characters or hyphens
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "ModifyDBParameterGroupMessage$DBParameterGroupName": "

    The name of the DB parameter group.

    Constraints:

    • Must be the name of an existing DB parameter group
    • Must be 1 to 255 alphanumeric characters
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "ModifyDBSubnetGroupMessage$DBSubnetGroupName": "

    The name for the DB subnet group. This value is stored as a lowercase string.

    Constraints: Must contain no more than 255 alphanumeric characters or hyphens. Must not be \"Default\".

    Example: mySubnetgroup

    ", + "ModifyDBSubnetGroupMessage$DBSubnetGroupDescription": "

    The description for the DB subnet group.

    ", + "ModifyEventSubscriptionMessage$SubscriptionName": "

    The name of the RDS event notification subscription.

    ", + "ModifyEventSubscriptionMessage$SnsTopicArn": "

    The Amazon Resource Name (ARN) of the SNS topic created for event notification. The ARN is created by Amazon SNS when you create a topic and subscribe to it.

    ", + "ModifyEventSubscriptionMessage$SourceType": "

    The type of source that will be generating the events. For example, if you want to be notified of events generated by a DB instance, you would set this parameter to db-instance. if this value is not specified, all events are returned.

    Valid values: db-instance | db-parameter-group | db-security-group | db-snapshot

    ", + "ModifyOptionGroupMessage$OptionGroupName": "

    The name of the option group to be modified.

    Permanent options, such as the TDE option for Oracle Advanced Security TDE, cannot be removed from an option group, and that option group cannot be removed from a DB instance once it is associated with a DB instance

    ", + "Option$OptionName": "

    The name of the option.

    ", + "Option$OptionDescription": "

    The description of the option.

    ", + "OptionConfiguration$OptionName": "

    The configuration of options to include in a group.

    ", + "OptionGroup$OptionGroupName": "

    Specifies the name of the option group.

    ", + "OptionGroup$OptionGroupDescription": "

    Provides the description of the option group.

    ", + "OptionGroup$EngineName": "

    Engine name that this option group can be applied to.

    ", + "OptionGroup$MajorEngineVersion": "

    Indicates the major engine version associated with this option group.

    ", + "OptionGroup$VpcId": "

    If AllowsVpcAndNonVpcInstanceMemberships is 'false', this field is blank. If AllowsVpcAndNonVpcInstanceMemberships is 'true' and this field is blank, then this option group can be applied to both VPC and non-VPC instances. If this field contains a value, then this option group can only be applied to instances that are in the VPC indicated by this field.

    ", + "OptionGroupMembership$OptionGroupName": "

    The name of the option group that the instance belongs to.

    ", + "OptionGroupMembership$Status": "

    The status of the DB instance's option group membership (e.g. in-sync, pending, pending-maintenance, applying).

    ", + "OptionGroupOption$Name": "

    The name of the option.

    ", + "OptionGroupOption$Description": "

    The description of the option.

    ", + "OptionGroupOption$EngineName": "

    Engine name that this option can be applied to.

    ", + "OptionGroupOption$MajorEngineVersion": "

    Indicates the major engine version that the option is available for.

    ", + "OptionGroupOption$MinimumRequiredMinorEngineVersion": "

    The minimum required engine version for the option to be applied.

    ", + "OptionGroupOptionSetting$SettingName": "

    The name of the option group option.

    ", + "OptionGroupOptionSetting$SettingDescription": "

    The description of the option group option.

    ", + "OptionGroupOptionSetting$DefaultValue": "

    The default value for the option group option.

    ", + "OptionGroupOptionSetting$ApplyType": "

    The DB engine specific parameter type for the option group option.

    ", + "OptionGroupOptionSetting$AllowedValues": "

    Indicates the acceptable values for the option group option.

    ", + "OptionGroupOptionsMessage$Marker": "

    An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "OptionGroups$Marker": "

    An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "OptionNamesList$member": null, + "OptionSetting$Name": "

    The name of the option that has settings that you can set.

    ", + "OptionSetting$Value": "

    The current value of the option setting.

    ", + "OptionSetting$DefaultValue": "

    The default value of the option setting.

    ", + "OptionSetting$Description": "

    The description of the option setting.

    ", + "OptionSetting$ApplyType": "

    The DB engine specific parameter type.

    ", + "OptionSetting$DataType": "

    The data type of the option setting.

    ", + "OptionSetting$AllowedValues": "

    The allowed values of the option setting.

    ", + "OptionsDependedOn$member": null, + "OrderableDBInstanceOption$Engine": "

    The engine type of the orderable DB instance.

    ", + "OrderableDBInstanceOption$EngineVersion": "

    The engine version of the orderable DB instance.

    ", + "OrderableDBInstanceOption$DBInstanceClass": "

    The DB instance Class for the orderable DB instance

    ", + "OrderableDBInstanceOption$LicenseModel": "

    The license model for the orderable DB instance.

    ", + "OrderableDBInstanceOptionsMessage$Marker": "

    An optional pagination token provided by a previous OrderableDBInstanceOptions request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords .

    ", + "Parameter$ParameterName": "

    Specifies the name of the parameter.

    ", + "Parameter$ParameterValue": "

    Specifies the value of the parameter.

    ", + "Parameter$Description": "

    Provides a description of the parameter.

    ", + "Parameter$Source": "

    Indicates the source of the parameter value.

    ", + "Parameter$ApplyType": "

    Specifies the engine specific parameters type.

    ", + "Parameter$DataType": "

    Specifies the valid data type for the parameter.

    ", + "Parameter$AllowedValues": "

    Specifies the valid range of values for the parameter.

    ", + "Parameter$MinimumEngineVersion": "

    The earliest engine version to which the parameter can apply.

    ", + "PendingModifiedValues$DBInstanceClass": "

    Contains the new DBInstanceClass for the DB instance that will be applied or is in progress.

    ", + "PendingModifiedValues$MasterUserPassword": "

    Contains the pending or in-progress change of the master credentials for the DB instance.

    ", + "PendingModifiedValues$EngineVersion": "

    Indicates the database engine version.

    ", + "PendingModifiedValues$DBInstanceIdentifier": "

    Contains the new DBInstanceIdentifier for the DB instance that will be applied or is in progress.

    ", + "PromoteReadReplicaMessage$DBInstanceIdentifier": "

    The DB instance identifier. This value is stored as a lowercase string.

    Constraints:

    • Must be the identifier for an existing read replica DB instance
    • Must contain from 1 to 63 alphanumeric characters or hyphens
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens

    Example: mydbinstance

    ", + "PromoteReadReplicaMessage$PreferredBackupWindow": "

    The daily time range during which automated backups are created if automated backups are enabled, using the BackupRetentionPeriod parameter.

    Default: A 30-minute window selected at random from an 8-hour block of time per region. See the Amazon RDS User Guide for the time blocks for each region from which the default backup windows are assigned.

    Constraints: Must be in the format hh24:mi-hh24:mi. Times should be Universal Time Coordinated (UTC). Must not conflict with the preferred maintenance window. Must be at least 30 minutes.

    ", + "PurchaseReservedDBInstancesOfferingMessage$ReservedDBInstancesOfferingId": "

    The ID of the Reserved DB instance offering to purchase.

    Example: 438012d3-4052-4cc7-b2e3-8d3372e0e706

    ", + "PurchaseReservedDBInstancesOfferingMessage$ReservedDBInstanceId": "

    Customer-specified identifier to track this reservation.

    Example: myreservationID

    ", + "ReadReplicaDBInstanceIdentifierList$member": null, + "RebootDBInstanceMessage$DBInstanceIdentifier": "

    The DB instance identifier. This parameter is stored as a lowercase string.

    Constraints:

    • Must contain from 1 to 63 alphanumeric characters or hyphens
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "RecurringCharge$RecurringChargeFrequency": "

    The frequency of the recurring charge.

    ", + "RemoveSourceIdentifierFromSubscriptionMessage$SubscriptionName": "

    The name of the RDS event notification subscription you want to remove a source identifier from.

    ", + "RemoveSourceIdentifierFromSubscriptionMessage$SourceIdentifier": "

    The source identifier to be removed from the subscription, such as the DB instance identifier for a DB instance or the name of a security group.

    ", + "RemoveTagsFromResourceMessage$ResourceName": "

    The Amazon RDS resource the tags will be removed from. This value is an Amazon Resource Name (ARN). For information about creating an ARN, see Constructing an RDS Amazon Resource Name (ARN).

    ", + "ReservedDBInstance$ReservedDBInstanceId": "

    The unique identifier for the reservation.

    ", + "ReservedDBInstance$ReservedDBInstancesOfferingId": "

    The offering identifier.

    ", + "ReservedDBInstance$DBInstanceClass": "

    The DB instance class for the reserved DB instance.

    ", + "ReservedDBInstance$CurrencyCode": "

    The currency code for the reserved DB instance.

    ", + "ReservedDBInstance$ProductDescription": "

    The description of the reserved DB instance.

    ", + "ReservedDBInstance$OfferingType": "

    The offering type of this reserved DB instance.

    ", + "ReservedDBInstance$State": "

    The state of the reserved DB instance.

    ", + "ReservedDBInstanceMessage$Marker": "

    An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "ReservedDBInstancesOffering$ReservedDBInstancesOfferingId": "

    The offering identifier.

    ", + "ReservedDBInstancesOffering$DBInstanceClass": "

    The DB instance class for the reserved DB instance.

    ", + "ReservedDBInstancesOffering$CurrencyCode": "

    The currency code for the reserved DB instance offering.

    ", + "ReservedDBInstancesOffering$ProductDescription": "

    The database engine used by the offering.

    ", + "ReservedDBInstancesOffering$OfferingType": "

    The offering type.

    ", + "ReservedDBInstancesOfferingMessage$Marker": "

    An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "ResetDBParameterGroupMessage$DBParameterGroupName": "

    The name of the DB parameter group.

    Constraints:

    • Must be 1 to 255 alphanumeric characters
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "RestoreDBInstanceFromDBSnapshotMessage$DBInstanceIdentifier": "

    Name of the DB instance to create from the DB snapshot. This parameter isn't case sensitive.

    Constraints:

    • Must contain from 1 to 255 alphanumeric characters or hyphens
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens

    Example: my-snapshot-id

    ", + "RestoreDBInstanceFromDBSnapshotMessage$DBSnapshotIdentifier": "

    The identifier for the DB snapshot to restore from.

    Constraints:

    • Must contain from 1 to 63 alphanumeric characters or hyphens
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "RestoreDBInstanceFromDBSnapshotMessage$DBInstanceClass": "

    The compute and memory capacity of the Amazon RDS DB instance.

    Valid Values: db.t1.micro | db.m1.small | db.m1.medium | db.m1.large | db.m1.xlarge | db.m2.2xlarge | db.m2.4xlarge | db.m3.medium | db.m3.large | db.m3.xlarge | db.m3.2xlarge | db.r3.large | db.r3.xlarge | db.r3.2xlarge | db.r3.4xlarge | db.r3.8xlarge

    ", + "RestoreDBInstanceFromDBSnapshotMessage$AvailabilityZone": "

    The EC2 Availability Zone that the database instance will be created in.

    Default: A random, system-chosen Availability Zone.

    Constraint: You cannot specify the AvailabilityZone parameter if the MultiAZ parameter is set to true.

    Example: us-east-1a

    ", + "RestoreDBInstanceFromDBSnapshotMessage$DBSubnetGroupName": "

    The DB subnet group name to use for the new instance.

    ", + "RestoreDBInstanceFromDBSnapshotMessage$LicenseModel": "

    License model information for the restored DB instance.

    Default: Same as source.

    Valid values: license-included | bring-your-own-license | general-public-license

    ", + "RestoreDBInstanceFromDBSnapshotMessage$DBName": "

    The database name for the restored DB instance.

    This parameter doesn't apply to the MySQL engine.

    ", + "RestoreDBInstanceFromDBSnapshotMessage$Engine": "

    The database engine to use for the new instance.

    Default: The same as source

    Constraint: Must be compatible with the engine of the source

    Example: oracle-ee

    ", + "RestoreDBInstanceFromDBSnapshotMessage$OptionGroupName": "

    The name of the option group to be used for the restored DB instance.

    Permanent options, such as the TDE option for Oracle Advanced Security TDE, cannot be removed from an option group, and that option group cannot be removed from a DB instance once it is associated with a DB instance

    ", + "RestoreDBInstanceToPointInTimeMessage$SourceDBInstanceIdentifier": "

    The identifier of the source DB instance from which to restore.

    Constraints:

    • Must be the identifier of an existing database instance
    • Must contain from 1 to 63 alphanumeric characters or hyphens
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "RestoreDBInstanceToPointInTimeMessage$TargetDBInstanceIdentifier": "

    The name of the new database instance to be created.

    Constraints:

    • Must contain from 1 to 63 alphanumeric characters or hyphens
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "RestoreDBInstanceToPointInTimeMessage$DBInstanceClass": "

    The compute and memory capacity of the Amazon RDS DB instance.

    Valid Values: db.t1.micro | db.m1.small | db.m1.medium | db.m1.large | db.m1.xlarge | db.m2.2xlarge | db.m2.4xlarge | db.m3.medium | db.m3.large | db.m3.xlarge | db.m3.2xlarge | db.r3.large | db.r3.xlarge | db.r3.2xlarge | db.r3.4xlarge | db.r3.8xlarge

    Default: The same DBInstanceClass as the original DB instance.

    ", + "RestoreDBInstanceToPointInTimeMessage$AvailabilityZone": "

    The EC2 Availability Zone that the database instance will be created in.

    Default: A random, system-chosen Availability Zone.

    Constraint: You cannot specify the AvailabilityZone parameter if the MultiAZ parameter is set to true.

    Example: us-east-1a

    ", + "RestoreDBInstanceToPointInTimeMessage$DBSubnetGroupName": "

    The DB subnet group name to use for the new instance.

    ", + "RestoreDBInstanceToPointInTimeMessage$LicenseModel": "

    License model information for the restored DB instance.

    Default: Same as source.

    Valid values: license-included | bring-your-own-license | general-public-license

    ", + "RestoreDBInstanceToPointInTimeMessage$DBName": "

    The database name for the restored DB instance.

    This parameter is not used for the MySQL engine.

    ", + "RestoreDBInstanceToPointInTimeMessage$Engine": "

    The database engine to use for the new instance.

    Default: The same as source

    Constraint: Must be compatible with the engine of the source

    Example: oracle-ee

    ", + "RestoreDBInstanceToPointInTimeMessage$OptionGroupName": "

    The name of the option group to be used for the restored DB instance.

    Permanent options, such as the TDE option for Oracle Advanced Security TDE, cannot be removed from an option group, and that option group cannot be removed from a DB instance once it is associated with a DB instance

    ", + "RevokeDBSecurityGroupIngressMessage$DBSecurityGroupName": "

    The name of the DB security group to revoke ingress from.

    ", + "RevokeDBSecurityGroupIngressMessage$CIDRIP": "

    The IP range to revoke access from. Must be a valid CIDR range. If CIDRIP is specified, EC2SecurityGroupName, EC2SecurityGroupId and EC2SecurityGroupOwnerId cannot be provided.

    ", + "RevokeDBSecurityGroupIngressMessage$EC2SecurityGroupName": "

    The name of the EC2 security group to revoke access from. For VPC DB security groups, EC2SecurityGroupId must be provided. Otherwise, EC2SecurityGroupOwnerId and either EC2SecurityGroupName or EC2SecurityGroupId must be provided.

    ", + "RevokeDBSecurityGroupIngressMessage$EC2SecurityGroupId": "

    The id of the EC2 security group to revoke access from. For VPC DB security groups, EC2SecurityGroupId must be provided. Otherwise, EC2SecurityGroupOwnerId and either EC2SecurityGroupName or EC2SecurityGroupId must be provided.

    ", + "RevokeDBSecurityGroupIngressMessage$EC2SecurityGroupOwnerId": "

    The AWS Account Number of the owner of the EC2 security group specified in the EC2SecurityGroupName parameter. The AWS Access Key ID is not an acceptable value. For VPC DB security groups, EC2SecurityGroupId must be provided. Otherwise, EC2SecurityGroupOwnerId and either EC2SecurityGroupName or EC2SecurityGroupId must be provided.

    ", + "SourceIdsList$member": null, + "Subnet$SubnetIdentifier": "

    Specifies the identifier of the subnet.

    ", + "Subnet$SubnetStatus": "

    Specifies the status of the subnet.

    ", + "SubnetIdentifierList$member": null, + "Tag$Key": "

    A key is the required name of the tag. The string value can be from 1 to 128 Unicode characters in length and cannot be prefixed with \"aws:\" or \"rds:\". The string may only contain only the set of Unicode letters, digits, white-space, '_', '.', '/', '=', '+', '-' (Java regex: \"^([\\\\p{L}\\\\p{Z}\\\\p{N}_.:/=+\\\\-]*)$\").

    ", + "Tag$Value": "

    A value is the optional value of the tag. The string value can be from 1 to 256 Unicode characters in length and cannot be prefixed with \"aws:\" or \"rds:\". The string may only contain only the set of Unicode letters, digits, white-space, '_', '.', '/', '=', '+', '-' (Java regex: \"^([\\\\p{L}\\\\p{Z}\\\\p{N}_.:/=+\\\\-]*)$\").

    ", + "VpcSecurityGroupIdList$member": null, + "VpcSecurityGroupMembership$VpcSecurityGroupId": "

    The name of the VPC security group.

    ", + "VpcSecurityGroupMembership$Status": "

    The status of the VPC security group.

    " + } + }, + "Subnet": { + "base": "

    This data type is used as a response element in the DescribeDBSubnetGroups action.

    ", + "refs": { + "SubnetList$member": null + } + }, + "SubnetAlreadyInUse": { + "base": "

    The DB subnet is already in use in the Availability Zone.

    ", + "refs": { + } + }, + "SubnetIdentifierList": { + "base": null, + "refs": { + "CreateDBSubnetGroupMessage$SubnetIds": "

    The EC2 Subnet IDs for the DB subnet group.

    ", + "ModifyDBSubnetGroupMessage$SubnetIds": "

    The EC2 subnet IDs for the DB subnet group.

    " + } + }, + "SubnetList": { + "base": null, + "refs": { + "DBSubnetGroup$Subnets": "

    Contains a list of Subnet elements.

    " + } + }, + "SubscriptionAlreadyExistFault": { + "base": "

    The supplied subscription name already exists.

    ", + "refs": { + } + }, + "SubscriptionCategoryNotFoundFault": { + "base": "

    The supplied category does not exist.

    ", + "refs": { + } + }, + "SubscriptionNotFoundFault": { + "base": "

    The subscription name does not exist.

    ", + "refs": { + } + }, + "SupportedCharacterSetsList": { + "base": null, + "refs": { + "DBEngineVersion$SupportedCharacterSets": "

    A list of the character sets supported by this engine for the CharacterSetName parameter of the CreateDBInstance API.

    " + } + }, + "TStamp": { + "base": null, + "refs": { + "DBInstance$InstanceCreateTime": "

    Provides the date and time the DB instance was created.

    ", + "DBInstance$LatestRestorableTime": "

    Specifies the latest time to which a database can be restored with point-in-time restore.

    ", + "DBSnapshot$SnapshotCreateTime": "

    Provides the time (UTC) when the snapshot was taken.

    ", + "DBSnapshot$InstanceCreateTime": "

    Specifies the time (UTC) when the snapshot was taken.

    ", + "DescribeEventsMessage$StartTime": "

    The beginning of the time interval to retrieve events for, specified in ISO 8601 format. For more information about ISO 8601, go to the ISO8601 Wikipedia page.

    Example: 2009-07-08T18:00Z

    ", + "DescribeEventsMessage$EndTime": "

    The end of the time interval for which to retrieve events, specified in ISO 8601 format. For more information about ISO 8601, go to the ISO8601 Wikipedia page.

    Example: 2009-07-08T18:00Z

    ", + "Event$Date": "

    Specifies the date and time of the event.

    ", + "ReservedDBInstance$StartTime": "

    The time the reservation started.

    ", + "RestoreDBInstanceToPointInTimeMessage$RestoreTime": "

    The date and time to restore from.

    Valid Values: Value must be a UTC time

    Constraints:

    • Must be before the latest restorable time for the DB instance
    • Cannot be specified if UseLatestRestorableTime parameter is true

    Example: 2009-09-07T23:45:00Z

    " + } + }, + "Tag": { + "base": "

    Metadata assigned to an Amazon RDS resource consisting of a key-value pair.

    ", + "refs": { + "TagList$member": null + } + }, + "TagList": { + "base": "

    A list of tags.

    ", + "refs": { + "AddTagsToResourceMessage$Tags": "

    The tags to be assigned to the Amazon RDS resource.

    ", + "CopyDBSnapshotMessage$Tags": null, + "CreateDBInstanceMessage$Tags": null, + "CreateDBInstanceReadReplicaMessage$Tags": null, + "CreateDBParameterGroupMessage$Tags": null, + "CreateDBSecurityGroupMessage$Tags": null, + "CreateDBSnapshotMessage$Tags": null, + "CreateDBSubnetGroupMessage$Tags": null, + "CreateEventSubscriptionMessage$Tags": null, + "CreateOptionGroupMessage$Tags": null, + "PurchaseReservedDBInstancesOfferingMessage$Tags": null, + "RestoreDBInstanceFromDBSnapshotMessage$Tags": null, + "RestoreDBInstanceToPointInTimeMessage$Tags": null, + "TagListMessage$TagList": "

    List of tags returned by the ListTagsForResource operation.

    " + } + }, + "TagListMessage": { + "base": "

    ", + "refs": { + } + }, + "VpcSecurityGroupIdList": { + "base": null, + "refs": { + "CreateDBInstanceMessage$VpcSecurityGroupIds": "

    A list of EC2 VPC security groups to associate with this DB instance.

    Default: The default EC2 VPC security group for the DB subnet group's VPC.

    ", + "ModifyDBInstanceMessage$VpcSecurityGroupIds": "

    A list of EC2 VPC security groups to authorize on this DB instance. This change is asynchronously applied as soon as possible.

    Constraints:

    • Must be 1 to 255 alphanumeric characters
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "OptionConfiguration$VpcSecurityGroupMemberships": "

    A list of VpcSecurityGroupMemebrship name strings used for this option.

    " + } + }, + "VpcSecurityGroupMembership": { + "base": "

    This data type is used as a response element for queries on VPC security group membership.

    ", + "refs": { + "VpcSecurityGroupMembershipList$member": null + } + }, + "VpcSecurityGroupMembershipList": { + "base": null, + "refs": { + "DBInstance$VpcSecurityGroups": "

    Provides List of VPC security group elements that the DB instance belongs to.

    ", + "Option$VpcSecurityGroupMemberships": "

    If the option requires access to a port, then this VPC security group allows access to the port.

    " + } + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/rds/2013-09-09/examples-1.json b/vendor/github.com/aws/aws-sdk-go/models/apis/rds/2013-09-09/examples-1.json new file mode 100644 index 000000000..0ea7e3b0b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/rds/2013-09-09/examples-1.json @@ -0,0 +1,5 @@ +{ + "version": "1.0", + "examples": { + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/rds/2013-09-09/paginators-1.json b/vendor/github.com/aws/aws-sdk-go/models/apis/rds/2013-09-09/paginators-1.json new file mode 100644 index 000000000..662845c12 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/rds/2013-09-09/paginators-1.json @@ -0,0 +1,110 @@ +{ + "pagination": { + "DescribeDBEngineVersions": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords", + "result_key": "DBEngineVersions" + }, + "DescribeDBInstances": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords", + "result_key": "DBInstances" + }, + "DescribeDBLogFiles": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords", + "result_key": "DescribeDBLogFiles" + }, + "DescribeDBParameterGroups": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords", + "result_key": "DBParameterGroups" + }, + "DescribeDBParameters": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords", + "result_key": "Parameters" + }, + "DescribeDBSecurityGroups": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords", + "result_key": "DBSecurityGroups" + }, + "DescribeDBSnapshots": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords", + "result_key": "DBSnapshots" + }, + "DescribeDBSubnetGroups": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords", + "result_key": "DBSubnetGroups" + }, + "DescribeEngineDefaultParameters": { + "input_token": "Marker", + "output_token": "EngineDefaults.Marker", + "limit_key": "MaxRecords", + "result_key": "EngineDefaults.Parameters" + }, + "DescribeEventSubscriptions": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords", + "result_key": "EventSubscriptionsList" + }, + "DescribeEvents": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords", + "result_key": "Events" + }, + "DescribeOptionGroupOptions": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords", + "result_key": "OptionGroupOptions" + }, + "DescribeOptionGroups": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords", + "result_key": "OptionGroupsList" + }, + "DescribeOrderableDBInstanceOptions": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords", + "result_key": "OrderableDBInstanceOptions" + }, + "DescribeReservedDBInstances": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords", + "result_key": "ReservedDBInstances" + }, + "DescribeReservedDBInstancesOfferings": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords", + "result_key": "ReservedDBInstancesOfferings" + }, + "DownloadDBLogFilePortion": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "NumberOfLines", + "more_results": "AdditionalDataPending", + "result_key": "LogFileData" + }, + "ListTagsForResource": { + "result_key": "TagList" + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/rds/2013-09-09/waiters-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/rds/2013-09-09/waiters-2.json new file mode 100644 index 000000000..b01500797 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/rds/2013-09-09/waiters-2.json @@ -0,0 +1,97 @@ +{ + "version": 2, + "waiters": { + "DBInstanceAvailable": { + "delay": 30, + "operation": "DescribeDBInstances", + "maxAttempts": 60, + "acceptors": [ + { + "expected": "available", + "matcher": "pathAll", + "state": "success", + "argument": "DBInstances[].DBInstanceStatus" + }, + { + "expected": "deleted", + "matcher": "pathAny", + "state": "failure", + "argument": "DBInstances[].DBInstanceStatus" + }, + { + "expected": "deleting", + "matcher": "pathAny", + "state": "failure", + "argument": "DBInstances[].DBInstanceStatus" + }, + { + "expected": "failed", + "matcher": "pathAny", + "state": "failure", + "argument": "DBInstances[].DBInstanceStatus" + }, + { + "expected": "incompatible-restore", + "matcher": "pathAny", + "state": "failure", + "argument": "DBInstances[].DBInstanceStatus" + }, + { + "expected": "incompatible-parameters", + "matcher": "pathAny", + "state": "failure", + "argument": "DBInstances[].DBInstanceStatus" + }, + { + "expected": "incompatible-parameters", + "matcher": "pathAny", + "state": "failure", + "argument": "DBInstances[].DBInstanceStatus" + }, + { + "expected": "incompatible-restore", + "matcher": "pathAny", + "state": "failure", + "argument": "DBInstances[].DBInstanceStatus" + } + ] + }, + "DBInstanceDeleted": { + "delay": 30, + "operation": "DescribeDBInstances", + "maxAttempts": 60, + "acceptors": [ + { + "expected": "deleted", + "matcher": "pathAll", + "state": "success", + "argument": "DBInstances[].DBInstanceStatus" + }, + { + "expected": "creating", + "matcher": "pathAny", + "state": "failure", + "argument": "DBInstances[].DBInstanceStatus" + }, + { + "expected": "modifying", + "matcher": "pathAny", + "state": "failure", + "argument": "DBInstances[].DBInstanceStatus" + }, + { + "expected": "rebooting", + "matcher": "pathAny", + "state": "failure", + "argument": "DBInstances[].DBInstanceStatus" + }, + { + "expected": "resetting-master-credentials", + "matcher": "pathAny", + "state": "failure", + "argument": "DBInstances[].DBInstanceStatus" + } + ] + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/rds/2014-09-01/api-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/rds/2014-09-01/api-2.json new file mode 100644 index 000000000..45271a77a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/rds/2014-09-01/api-2.json @@ -0,0 +1,3271 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2014-09-01", + "endpointPrefix":"rds", + "protocol":"query", + "serviceAbbreviation":"Amazon RDS", + "serviceFullName":"Amazon Relational Database Service", + "signatureVersion":"v4", + "xmlNamespace":"http://rds.amazonaws.com/doc/2014-09-01/" + }, + "operations":{ + "AddSourceIdentifierToSubscription":{ + "name":"AddSourceIdentifierToSubscription", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AddSourceIdentifierToSubscriptionMessage"}, + "output":{ + "shape":"AddSourceIdentifierToSubscriptionResult", + "resultWrapper":"AddSourceIdentifierToSubscriptionResult" + }, + "errors":[ + {"shape":"SubscriptionNotFoundFault"}, + {"shape":"SourceNotFoundFault"} + ] + }, + "AddTagsToResource":{ + "name":"AddTagsToResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AddTagsToResourceMessage"}, + "errors":[ + {"shape":"DBInstanceNotFoundFault"}, + {"shape":"DBSnapshotNotFoundFault"} + ] + }, + "AuthorizeDBSecurityGroupIngress":{ + "name":"AuthorizeDBSecurityGroupIngress", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AuthorizeDBSecurityGroupIngressMessage"}, + "output":{ + "shape":"AuthorizeDBSecurityGroupIngressResult", + "resultWrapper":"AuthorizeDBSecurityGroupIngressResult" + }, + "errors":[ + {"shape":"DBSecurityGroupNotFoundFault"}, + {"shape":"InvalidDBSecurityGroupStateFault"}, + {"shape":"AuthorizationAlreadyExistsFault"}, + {"shape":"AuthorizationQuotaExceededFault"} + ] + }, + "CopyDBParameterGroup":{ + "name":"CopyDBParameterGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CopyDBParameterGroupMessage"}, + "output":{ + "shape":"CopyDBParameterGroupResult", + "resultWrapper":"CopyDBParameterGroupResult" + }, + "errors":[ + {"shape":"DBParameterGroupNotFoundFault"}, + {"shape":"DBParameterGroupAlreadyExistsFault"}, + {"shape":"DBParameterGroupQuotaExceededFault"} + ] + }, + "CopyDBSnapshot":{ + "name":"CopyDBSnapshot", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CopyDBSnapshotMessage"}, + "output":{ + "shape":"CopyDBSnapshotResult", + "resultWrapper":"CopyDBSnapshotResult" + }, + "errors":[ + {"shape":"DBSnapshotAlreadyExistsFault"}, + {"shape":"DBSnapshotNotFoundFault"}, + {"shape":"InvalidDBSnapshotStateFault"}, + {"shape":"SnapshotQuotaExceededFault"} + ] + }, + "CopyOptionGroup":{ + "name":"CopyOptionGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CopyOptionGroupMessage"}, + "output":{ + "shape":"CopyOptionGroupResult", + "resultWrapper":"CopyOptionGroupResult" + }, + "errors":[ + {"shape":"OptionGroupAlreadyExistsFault"}, + {"shape":"OptionGroupNotFoundFault"}, + {"shape":"OptionGroupQuotaExceededFault"} + ] + }, + "CreateDBInstance":{ + "name":"CreateDBInstance", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateDBInstanceMessage"}, + "output":{ + "shape":"CreateDBInstanceResult", + "resultWrapper":"CreateDBInstanceResult" + }, + "errors":[ + {"shape":"DBInstanceAlreadyExistsFault"}, + {"shape":"InsufficientDBInstanceCapacityFault"}, + {"shape":"DBParameterGroupNotFoundFault"}, + {"shape":"DBSecurityGroupNotFoundFault"}, + {"shape":"InstanceQuotaExceededFault"}, + {"shape":"StorageQuotaExceededFault"}, + {"shape":"DBSubnetGroupNotFoundFault"}, + {"shape":"DBSubnetGroupDoesNotCoverEnoughAZs"}, + {"shape":"InvalidSubnet"}, + {"shape":"InvalidVPCNetworkStateFault"}, + {"shape":"ProvisionedIopsNotAvailableInAZFault"}, + {"shape":"OptionGroupNotFoundFault"}, + {"shape":"StorageTypeNotSupportedFault"}, + {"shape":"AuthorizationNotFoundFault"} + ] + }, + "CreateDBInstanceReadReplica":{ + "name":"CreateDBInstanceReadReplica", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateDBInstanceReadReplicaMessage"}, + "output":{ + "shape":"CreateDBInstanceReadReplicaResult", + "resultWrapper":"CreateDBInstanceReadReplicaResult" + }, + "errors":[ + {"shape":"DBInstanceAlreadyExistsFault"}, + {"shape":"InsufficientDBInstanceCapacityFault"}, + {"shape":"DBParameterGroupNotFoundFault"}, + {"shape":"DBSecurityGroupNotFoundFault"}, + {"shape":"InstanceQuotaExceededFault"}, + {"shape":"StorageQuotaExceededFault"}, + {"shape":"DBInstanceNotFoundFault"}, + {"shape":"InvalidDBInstanceStateFault"}, + {"shape":"DBSubnetGroupNotFoundFault"}, + {"shape":"DBSubnetGroupDoesNotCoverEnoughAZs"}, + {"shape":"InvalidSubnet"}, + {"shape":"InvalidVPCNetworkStateFault"}, + {"shape":"ProvisionedIopsNotAvailableInAZFault"}, + {"shape":"OptionGroupNotFoundFault"}, + {"shape":"DBSubnetGroupNotAllowedFault"}, + {"shape":"InvalidDBSubnetGroupFault"}, + {"shape":"StorageTypeNotSupportedFault"} + ] + }, + "CreateDBParameterGroup":{ + "name":"CreateDBParameterGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateDBParameterGroupMessage"}, + "output":{ + "shape":"CreateDBParameterGroupResult", + "resultWrapper":"CreateDBParameterGroupResult" + }, + "errors":[ + {"shape":"DBParameterGroupQuotaExceededFault"}, + {"shape":"DBParameterGroupAlreadyExistsFault"} + ] + }, + "CreateDBSecurityGroup":{ + "name":"CreateDBSecurityGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateDBSecurityGroupMessage"}, + "output":{ + "shape":"CreateDBSecurityGroupResult", + "resultWrapper":"CreateDBSecurityGroupResult" + }, + "errors":[ + {"shape":"DBSecurityGroupAlreadyExistsFault"}, + {"shape":"DBSecurityGroupQuotaExceededFault"}, + {"shape":"DBSecurityGroupNotSupportedFault"} + ] + }, + "CreateDBSnapshot":{ + "name":"CreateDBSnapshot", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateDBSnapshotMessage"}, + "output":{ + "shape":"CreateDBSnapshotResult", + "resultWrapper":"CreateDBSnapshotResult" + }, + "errors":[ + {"shape":"DBSnapshotAlreadyExistsFault"}, + {"shape":"InvalidDBInstanceStateFault"}, + {"shape":"DBInstanceNotFoundFault"}, + {"shape":"SnapshotQuotaExceededFault"} + ] + }, + "CreateDBSubnetGroup":{ + "name":"CreateDBSubnetGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateDBSubnetGroupMessage"}, + "output":{ + "shape":"CreateDBSubnetGroupResult", + "resultWrapper":"CreateDBSubnetGroupResult" + }, + "errors":[ + {"shape":"DBSubnetGroupAlreadyExistsFault"}, + {"shape":"DBSubnetGroupQuotaExceededFault"}, + {"shape":"DBSubnetQuotaExceededFault"}, + {"shape":"DBSubnetGroupDoesNotCoverEnoughAZs"}, + {"shape":"InvalidSubnet"} + ] + }, + "CreateEventSubscription":{ + "name":"CreateEventSubscription", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateEventSubscriptionMessage"}, + "output":{ + "shape":"CreateEventSubscriptionResult", + "resultWrapper":"CreateEventSubscriptionResult" + }, + "errors":[ + {"shape":"EventSubscriptionQuotaExceededFault"}, + {"shape":"SubscriptionAlreadyExistFault"}, + {"shape":"SNSInvalidTopicFault"}, + {"shape":"SNSNoAuthorizationFault"}, + {"shape":"SNSTopicArnNotFoundFault"}, + {"shape":"SubscriptionCategoryNotFoundFault"}, + {"shape":"SourceNotFoundFault"} + ] + }, + "CreateOptionGroup":{ + "name":"CreateOptionGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateOptionGroupMessage"}, + "output":{ + "shape":"CreateOptionGroupResult", + "resultWrapper":"CreateOptionGroupResult" + }, + "errors":[ + {"shape":"OptionGroupAlreadyExistsFault"}, + {"shape":"OptionGroupQuotaExceededFault"} + ] + }, + "DeleteDBInstance":{ + "name":"DeleteDBInstance", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteDBInstanceMessage"}, + "output":{ + "shape":"DeleteDBInstanceResult", + "resultWrapper":"DeleteDBInstanceResult" + }, + "errors":[ + {"shape":"DBInstanceNotFoundFault"}, + {"shape":"InvalidDBInstanceStateFault"}, + {"shape":"DBSnapshotAlreadyExistsFault"}, + {"shape":"SnapshotQuotaExceededFault"} + ] + }, + "DeleteDBParameterGroup":{ + "name":"DeleteDBParameterGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteDBParameterGroupMessage"}, + "errors":[ + {"shape":"InvalidDBParameterGroupStateFault"}, + {"shape":"DBParameterGroupNotFoundFault"} + ] + }, + "DeleteDBSecurityGroup":{ + "name":"DeleteDBSecurityGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteDBSecurityGroupMessage"}, + "errors":[ + {"shape":"InvalidDBSecurityGroupStateFault"}, + {"shape":"DBSecurityGroupNotFoundFault"} + ] + }, + "DeleteDBSnapshot":{ + "name":"DeleteDBSnapshot", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteDBSnapshotMessage"}, + "output":{ + "shape":"DeleteDBSnapshotResult", + "resultWrapper":"DeleteDBSnapshotResult" + }, + "errors":[ + {"shape":"InvalidDBSnapshotStateFault"}, + {"shape":"DBSnapshotNotFoundFault"} + ] + }, + "DeleteDBSubnetGroup":{ + "name":"DeleteDBSubnetGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteDBSubnetGroupMessage"}, + "errors":[ + {"shape":"InvalidDBSubnetGroupStateFault"}, + {"shape":"InvalidDBSubnetStateFault"}, + {"shape":"DBSubnetGroupNotFoundFault"} + ] + }, + "DeleteEventSubscription":{ + "name":"DeleteEventSubscription", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteEventSubscriptionMessage"}, + "output":{ + "shape":"DeleteEventSubscriptionResult", + "resultWrapper":"DeleteEventSubscriptionResult" + }, + "errors":[ + {"shape":"SubscriptionNotFoundFault"}, + {"shape":"InvalidEventSubscriptionStateFault"} + ] + }, + "DeleteOptionGroup":{ + "name":"DeleteOptionGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteOptionGroupMessage"}, + "errors":[ + {"shape":"OptionGroupNotFoundFault"}, + {"shape":"InvalidOptionGroupStateFault"} + ] + }, + "DescribeDBEngineVersions":{ + "name":"DescribeDBEngineVersions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeDBEngineVersionsMessage"}, + "output":{ + "shape":"DBEngineVersionMessage", + "resultWrapper":"DescribeDBEngineVersionsResult" + } + }, + "DescribeDBInstances":{ + "name":"DescribeDBInstances", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeDBInstancesMessage"}, + "output":{ + "shape":"DBInstanceMessage", + "resultWrapper":"DescribeDBInstancesResult" + }, + "errors":[ + {"shape":"DBInstanceNotFoundFault"} + ] + }, + "DescribeDBLogFiles":{ + "name":"DescribeDBLogFiles", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeDBLogFilesMessage"}, + "output":{ + "shape":"DescribeDBLogFilesResponse", + "resultWrapper":"DescribeDBLogFilesResult" + }, + "errors":[ + {"shape":"DBInstanceNotFoundFault"} + ] + }, + "DescribeDBParameterGroups":{ + "name":"DescribeDBParameterGroups", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeDBParameterGroupsMessage"}, + "output":{ + "shape":"DBParameterGroupsMessage", + "resultWrapper":"DescribeDBParameterGroupsResult" + }, + "errors":[ + {"shape":"DBParameterGroupNotFoundFault"} + ] + }, + "DescribeDBParameters":{ + "name":"DescribeDBParameters", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeDBParametersMessage"}, + "output":{ + "shape":"DBParameterGroupDetails", + "resultWrapper":"DescribeDBParametersResult" + }, + "errors":[ + {"shape":"DBParameterGroupNotFoundFault"} + ] + }, + "DescribeDBSecurityGroups":{ + "name":"DescribeDBSecurityGroups", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeDBSecurityGroupsMessage"}, + "output":{ + "shape":"DBSecurityGroupMessage", + "resultWrapper":"DescribeDBSecurityGroupsResult" + }, + "errors":[ + {"shape":"DBSecurityGroupNotFoundFault"} + ] + }, + "DescribeDBSnapshots":{ + "name":"DescribeDBSnapshots", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeDBSnapshotsMessage"}, + "output":{ + "shape":"DBSnapshotMessage", + "resultWrapper":"DescribeDBSnapshotsResult" + }, + "errors":[ + {"shape":"DBSnapshotNotFoundFault"} + ] + }, + "DescribeDBSubnetGroups":{ + "name":"DescribeDBSubnetGroups", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeDBSubnetGroupsMessage"}, + "output":{ + "shape":"DBSubnetGroupMessage", + "resultWrapper":"DescribeDBSubnetGroupsResult" + }, + "errors":[ + {"shape":"DBSubnetGroupNotFoundFault"} + ] + }, + "DescribeEngineDefaultParameters":{ + "name":"DescribeEngineDefaultParameters", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeEngineDefaultParametersMessage"}, + "output":{ + "shape":"DescribeEngineDefaultParametersResult", + "resultWrapper":"DescribeEngineDefaultParametersResult" + } + }, + "DescribeEventCategories":{ + "name":"DescribeEventCategories", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeEventCategoriesMessage"}, + "output":{ + "shape":"EventCategoriesMessage", + "resultWrapper":"DescribeEventCategoriesResult" + } + }, + "DescribeEventSubscriptions":{ + "name":"DescribeEventSubscriptions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeEventSubscriptionsMessage"}, + "output":{ + "shape":"EventSubscriptionsMessage", + "resultWrapper":"DescribeEventSubscriptionsResult" + }, + "errors":[ + {"shape":"SubscriptionNotFoundFault"} + ] + }, + "DescribeEvents":{ + "name":"DescribeEvents", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeEventsMessage"}, + "output":{ + "shape":"EventsMessage", + "resultWrapper":"DescribeEventsResult" + } + }, + "DescribeOptionGroupOptions":{ + "name":"DescribeOptionGroupOptions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeOptionGroupOptionsMessage"}, + "output":{ + "shape":"OptionGroupOptionsMessage", + "resultWrapper":"DescribeOptionGroupOptionsResult" + } + }, + "DescribeOptionGroups":{ + "name":"DescribeOptionGroups", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeOptionGroupsMessage"}, + "output":{ + "shape":"OptionGroups", + "resultWrapper":"DescribeOptionGroupsResult" + }, + "errors":[ + {"shape":"OptionGroupNotFoundFault"} + ] + }, + "DescribeOrderableDBInstanceOptions":{ + "name":"DescribeOrderableDBInstanceOptions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeOrderableDBInstanceOptionsMessage"}, + "output":{ + "shape":"OrderableDBInstanceOptionsMessage", + "resultWrapper":"DescribeOrderableDBInstanceOptionsResult" + } + }, + "DescribeReservedDBInstances":{ + "name":"DescribeReservedDBInstances", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeReservedDBInstancesMessage"}, + "output":{ + "shape":"ReservedDBInstanceMessage", + "resultWrapper":"DescribeReservedDBInstancesResult" + }, + "errors":[ + {"shape":"ReservedDBInstanceNotFoundFault"} + ] + }, + "DescribeReservedDBInstancesOfferings":{ + "name":"DescribeReservedDBInstancesOfferings", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeReservedDBInstancesOfferingsMessage"}, + "output":{ + "shape":"ReservedDBInstancesOfferingMessage", + "resultWrapper":"DescribeReservedDBInstancesOfferingsResult" + }, + "errors":[ + {"shape":"ReservedDBInstancesOfferingNotFoundFault"} + ] + }, + "DownloadDBLogFilePortion":{ + "name":"DownloadDBLogFilePortion", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DownloadDBLogFilePortionMessage"}, + "output":{ + "shape":"DownloadDBLogFilePortionDetails", + "resultWrapper":"DownloadDBLogFilePortionResult" + }, + "errors":[ + {"shape":"DBInstanceNotFoundFault"}, + {"shape":"DBLogFileNotFoundFault"} + ] + }, + "ListTagsForResource":{ + "name":"ListTagsForResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListTagsForResourceMessage"}, + "output":{ + "shape":"TagListMessage", + "resultWrapper":"ListTagsForResourceResult" + }, + "errors":[ + {"shape":"DBInstanceNotFoundFault"}, + {"shape":"DBSnapshotNotFoundFault"} + ] + }, + "ModifyDBInstance":{ + "name":"ModifyDBInstance", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyDBInstanceMessage"}, + "output":{ + "shape":"ModifyDBInstanceResult", + "resultWrapper":"ModifyDBInstanceResult" + }, + "errors":[ + {"shape":"InvalidDBInstanceStateFault"}, + {"shape":"InvalidDBSecurityGroupStateFault"}, + {"shape":"DBInstanceAlreadyExistsFault"}, + {"shape":"DBInstanceNotFoundFault"}, + {"shape":"DBSecurityGroupNotFoundFault"}, + {"shape":"DBParameterGroupNotFoundFault"}, + {"shape":"InsufficientDBInstanceCapacityFault"}, + {"shape":"StorageQuotaExceededFault"}, + {"shape":"InvalidVPCNetworkStateFault"}, + {"shape":"ProvisionedIopsNotAvailableInAZFault"}, + {"shape":"OptionGroupNotFoundFault"}, + {"shape":"DBUpgradeDependencyFailureFault"}, + {"shape":"StorageTypeNotSupportedFault"}, + {"shape":"AuthorizationNotFoundFault"} + ] + }, + "ModifyDBParameterGroup":{ + "name":"ModifyDBParameterGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyDBParameterGroupMessage"}, + "output":{ + "shape":"DBParameterGroupNameMessage", + "resultWrapper":"ModifyDBParameterGroupResult" + }, + "errors":[ + {"shape":"DBParameterGroupNotFoundFault"}, + {"shape":"InvalidDBParameterGroupStateFault"} + ] + }, + "ModifyDBSubnetGroup":{ + "name":"ModifyDBSubnetGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyDBSubnetGroupMessage"}, + "output":{ + "shape":"ModifyDBSubnetGroupResult", + "resultWrapper":"ModifyDBSubnetGroupResult" + }, + "errors":[ + {"shape":"DBSubnetGroupNotFoundFault"}, + {"shape":"DBSubnetQuotaExceededFault"}, + {"shape":"SubnetAlreadyInUse"}, + {"shape":"DBSubnetGroupDoesNotCoverEnoughAZs"}, + {"shape":"InvalidSubnet"} + ] + }, + "ModifyEventSubscription":{ + "name":"ModifyEventSubscription", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyEventSubscriptionMessage"}, + "output":{ + "shape":"ModifyEventSubscriptionResult", + "resultWrapper":"ModifyEventSubscriptionResult" + }, + "errors":[ + {"shape":"EventSubscriptionQuotaExceededFault"}, + {"shape":"SubscriptionNotFoundFault"}, + {"shape":"SNSInvalidTopicFault"}, + {"shape":"SNSNoAuthorizationFault"}, + {"shape":"SNSTopicArnNotFoundFault"}, + {"shape":"SubscriptionCategoryNotFoundFault"} + ] + }, + "ModifyOptionGroup":{ + "name":"ModifyOptionGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyOptionGroupMessage"}, + "output":{ + "shape":"ModifyOptionGroupResult", + "resultWrapper":"ModifyOptionGroupResult" + }, + "errors":[ + {"shape":"InvalidOptionGroupStateFault"}, + {"shape":"OptionGroupNotFoundFault"} + ] + }, + "PromoteReadReplica":{ + "name":"PromoteReadReplica", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PromoteReadReplicaMessage"}, + "output":{ + "shape":"PromoteReadReplicaResult", + "resultWrapper":"PromoteReadReplicaResult" + }, + "errors":[ + {"shape":"InvalidDBInstanceStateFault"}, + {"shape":"DBInstanceNotFoundFault"} + ] + }, + "PurchaseReservedDBInstancesOffering":{ + "name":"PurchaseReservedDBInstancesOffering", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PurchaseReservedDBInstancesOfferingMessage"}, + "output":{ + "shape":"PurchaseReservedDBInstancesOfferingResult", + "resultWrapper":"PurchaseReservedDBInstancesOfferingResult" + }, + "errors":[ + {"shape":"ReservedDBInstancesOfferingNotFoundFault"}, + {"shape":"ReservedDBInstanceAlreadyExistsFault"}, + {"shape":"ReservedDBInstanceQuotaExceededFault"} + ] + }, + "RebootDBInstance":{ + "name":"RebootDBInstance", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RebootDBInstanceMessage"}, + "output":{ + "shape":"RebootDBInstanceResult", + "resultWrapper":"RebootDBInstanceResult" + }, + "errors":[ + {"shape":"InvalidDBInstanceStateFault"}, + {"shape":"DBInstanceNotFoundFault"} + ] + }, + "RemoveSourceIdentifierFromSubscription":{ + "name":"RemoveSourceIdentifierFromSubscription", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RemoveSourceIdentifierFromSubscriptionMessage"}, + "output":{ + "shape":"RemoveSourceIdentifierFromSubscriptionResult", + "resultWrapper":"RemoveSourceIdentifierFromSubscriptionResult" + }, + "errors":[ + {"shape":"SubscriptionNotFoundFault"}, + {"shape":"SourceNotFoundFault"} + ] + }, + "RemoveTagsFromResource":{ + "name":"RemoveTagsFromResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RemoveTagsFromResourceMessage"}, + "errors":[ + {"shape":"DBInstanceNotFoundFault"}, + {"shape":"DBSnapshotNotFoundFault"} + ] + }, + "ResetDBParameterGroup":{ + "name":"ResetDBParameterGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ResetDBParameterGroupMessage"}, + "output":{ + "shape":"DBParameterGroupNameMessage", + "resultWrapper":"ResetDBParameterGroupResult" + }, + "errors":[ + {"shape":"InvalidDBParameterGroupStateFault"}, + {"shape":"DBParameterGroupNotFoundFault"} + ] + }, + "RestoreDBInstanceFromDBSnapshot":{ + "name":"RestoreDBInstanceFromDBSnapshot", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RestoreDBInstanceFromDBSnapshotMessage"}, + "output":{ + "shape":"RestoreDBInstanceFromDBSnapshotResult", + "resultWrapper":"RestoreDBInstanceFromDBSnapshotResult" + }, + "errors":[ + {"shape":"DBInstanceAlreadyExistsFault"}, + {"shape":"DBSnapshotNotFoundFault"}, + {"shape":"InstanceQuotaExceededFault"}, + {"shape":"InsufficientDBInstanceCapacityFault"}, + {"shape":"InvalidDBSnapshotStateFault"}, + {"shape":"StorageQuotaExceededFault"}, + {"shape":"InvalidVPCNetworkStateFault"}, + {"shape":"InvalidRestoreFault"}, + {"shape":"DBSubnetGroupNotFoundFault"}, + {"shape":"DBSubnetGroupDoesNotCoverEnoughAZs"}, + {"shape":"InvalidSubnet"}, + {"shape":"ProvisionedIopsNotAvailableInAZFault"}, + {"shape":"OptionGroupNotFoundFault"}, + {"shape":"StorageTypeNotSupportedFault"}, + {"shape":"AuthorizationNotFoundFault"} + ] + }, + "RestoreDBInstanceToPointInTime":{ + "name":"RestoreDBInstanceToPointInTime", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RestoreDBInstanceToPointInTimeMessage"}, + "output":{ + "shape":"RestoreDBInstanceToPointInTimeResult", + "resultWrapper":"RestoreDBInstanceToPointInTimeResult" + }, + "errors":[ + {"shape":"DBInstanceAlreadyExistsFault"}, + {"shape":"DBInstanceNotFoundFault"}, + {"shape":"InstanceQuotaExceededFault"}, + {"shape":"InsufficientDBInstanceCapacityFault"}, + {"shape":"InvalidDBInstanceStateFault"}, + {"shape":"PointInTimeRestoreNotEnabledFault"}, + {"shape":"StorageQuotaExceededFault"}, + {"shape":"InvalidVPCNetworkStateFault"}, + {"shape":"InvalidRestoreFault"}, + {"shape":"DBSubnetGroupNotFoundFault"}, + {"shape":"DBSubnetGroupDoesNotCoverEnoughAZs"}, + {"shape":"InvalidSubnet"}, + {"shape":"ProvisionedIopsNotAvailableInAZFault"}, + {"shape":"OptionGroupNotFoundFault"}, + {"shape":"StorageTypeNotSupportedFault"}, + {"shape":"AuthorizationNotFoundFault"} + ] + }, + "RevokeDBSecurityGroupIngress":{ + "name":"RevokeDBSecurityGroupIngress", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RevokeDBSecurityGroupIngressMessage"}, + "output":{ + "shape":"RevokeDBSecurityGroupIngressResult", + "resultWrapper":"RevokeDBSecurityGroupIngressResult" + }, + "errors":[ + {"shape":"DBSecurityGroupNotFoundFault"}, + {"shape":"AuthorizationNotFoundFault"}, + {"shape":"InvalidDBSecurityGroupStateFault"} + ] + } + }, + "shapes":{ + "AddSourceIdentifierToSubscriptionMessage":{ + "type":"structure", + "required":[ + "SubscriptionName", + "SourceIdentifier" + ], + "members":{ + "SubscriptionName":{"shape":"String"}, + "SourceIdentifier":{"shape":"String"} + } + }, + "AddSourceIdentifierToSubscriptionResult":{ + "type":"structure", + "members":{ + "EventSubscription":{"shape":"EventSubscription"} + } + }, + "AddTagsToResourceMessage":{ + "type":"structure", + "required":[ + "ResourceName", + "Tags" + ], + "members":{ + "ResourceName":{"shape":"String"}, + "Tags":{"shape":"TagList"} + } + }, + "ApplyMethod":{ + "type":"string", + "enum":[ + "immediate", + "pending-reboot" + ] + }, + "AuthorizationAlreadyExistsFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"AuthorizationAlreadyExists", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "AuthorizationNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"AuthorizationNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "AuthorizationQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"AuthorizationQuotaExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "AuthorizeDBSecurityGroupIngressMessage":{ + "type":"structure", + "required":["DBSecurityGroupName"], + "members":{ + "DBSecurityGroupName":{"shape":"String"}, + "CIDRIP":{"shape":"String"}, + "EC2SecurityGroupName":{"shape":"String"}, + "EC2SecurityGroupId":{"shape":"String"}, + "EC2SecurityGroupOwnerId":{"shape":"String"} + } + }, + "AuthorizeDBSecurityGroupIngressResult":{ + "type":"structure", + "members":{ + "DBSecurityGroup":{"shape":"DBSecurityGroup"} + } + }, + "AvailabilityZone":{ + "type":"structure", + "members":{ + "Name":{"shape":"String"} + }, + "wrapper":true + }, + "AvailabilityZoneList":{ + "type":"list", + "member":{ + "shape":"AvailabilityZone", + "locationName":"AvailabilityZone" + } + }, + "Boolean":{"type":"boolean"}, + "BooleanOptional":{"type":"boolean"}, + "CharacterSet":{ + "type":"structure", + "members":{ + "CharacterSetName":{"shape":"String"}, + "CharacterSetDescription":{"shape":"String"} + } + }, + "CopyDBParameterGroupMessage":{ + "type":"structure", + "required":[ + "SourceDBParameterGroupIdentifier", + "TargetDBParameterGroupIdentifier", + "TargetDBParameterGroupDescription" + ], + "members":{ + "SourceDBParameterGroupIdentifier":{"shape":"String"}, + "TargetDBParameterGroupIdentifier":{"shape":"String"}, + "TargetDBParameterGroupDescription":{"shape":"String"}, + "Tags":{"shape":"TagList"} + } + }, + "CopyDBParameterGroupResult":{ + "type":"structure", + "members":{ + "DBParameterGroup":{"shape":"DBParameterGroup"} + } + }, + "CopyDBSnapshotMessage":{ + "type":"structure", + "required":[ + "SourceDBSnapshotIdentifier", + "TargetDBSnapshotIdentifier" + ], + "members":{ + "SourceDBSnapshotIdentifier":{"shape":"String"}, + "TargetDBSnapshotIdentifier":{"shape":"String"}, + "Tags":{"shape":"TagList"} + } + }, + "CopyDBSnapshotResult":{ + "type":"structure", + "members":{ + "DBSnapshot":{"shape":"DBSnapshot"} + } + }, + "CopyOptionGroupMessage":{ + "type":"structure", + "required":[ + "SourceOptionGroupIdentifier", + "TargetOptionGroupIdentifier", + "TargetOptionGroupDescription" + ], + "members":{ + "SourceOptionGroupIdentifier":{"shape":"String"}, + "TargetOptionGroupIdentifier":{"shape":"String"}, + "TargetOptionGroupDescription":{"shape":"String"}, + "Tags":{"shape":"TagList"} + } + }, + "CopyOptionGroupResult":{ + "type":"structure", + "members":{ + "OptionGroup":{"shape":"OptionGroup"} + } + }, + "CreateDBInstanceMessage":{ + "type":"structure", + "required":[ + "DBInstanceIdentifier", + "AllocatedStorage", + "DBInstanceClass", + "Engine", + "MasterUsername", + "MasterUserPassword" + ], + "members":{ + "DBName":{"shape":"String"}, + "DBInstanceIdentifier":{"shape":"String"}, + "AllocatedStorage":{"shape":"IntegerOptional"}, + "DBInstanceClass":{"shape":"String"}, + "Engine":{"shape":"String"}, + "MasterUsername":{"shape":"String"}, + "MasterUserPassword":{"shape":"String"}, + "DBSecurityGroups":{"shape":"DBSecurityGroupNameList"}, + "VpcSecurityGroupIds":{"shape":"VpcSecurityGroupIdList"}, + "AvailabilityZone":{"shape":"String"}, + "DBSubnetGroupName":{"shape":"String"}, + "PreferredMaintenanceWindow":{"shape":"String"}, + "DBParameterGroupName":{"shape":"String"}, + "BackupRetentionPeriod":{"shape":"IntegerOptional"}, + "PreferredBackupWindow":{"shape":"String"}, + "Port":{"shape":"IntegerOptional"}, + "MultiAZ":{"shape":"BooleanOptional"}, + "EngineVersion":{"shape":"String"}, + "AutoMinorVersionUpgrade":{"shape":"BooleanOptional"}, + "LicenseModel":{"shape":"String"}, + "Iops":{"shape":"IntegerOptional"}, + "OptionGroupName":{"shape":"String"}, + "CharacterSetName":{"shape":"String"}, + "PubliclyAccessible":{"shape":"BooleanOptional"}, + "Tags":{"shape":"TagList"}, + "StorageType":{"shape":"String"}, + "TdeCredentialArn":{"shape":"String"}, + "TdeCredentialPassword":{"shape":"String"} + } + }, + "CreateDBInstanceReadReplicaMessage":{ + "type":"structure", + "required":[ + "DBInstanceIdentifier", + "SourceDBInstanceIdentifier" + ], + "members":{ + "DBInstanceIdentifier":{"shape":"String"}, + "SourceDBInstanceIdentifier":{"shape":"String"}, + "DBInstanceClass":{"shape":"String"}, + "AvailabilityZone":{"shape":"String"}, + "Port":{"shape":"IntegerOptional"}, + "AutoMinorVersionUpgrade":{"shape":"BooleanOptional"}, + "Iops":{"shape":"IntegerOptional"}, + "OptionGroupName":{"shape":"String"}, + "PubliclyAccessible":{"shape":"BooleanOptional"}, + "Tags":{"shape":"TagList"}, + "DBSubnetGroupName":{"shape":"String"}, + "StorageType":{"shape":"String"} + } + }, + "CreateDBInstanceReadReplicaResult":{ + "type":"structure", + "members":{ + "DBInstance":{"shape":"DBInstance"} + } + }, + "CreateDBInstanceResult":{ + "type":"structure", + "members":{ + "DBInstance":{"shape":"DBInstance"} + } + }, + "CreateDBParameterGroupMessage":{ + "type":"structure", + "required":[ + "DBParameterGroupName", + "DBParameterGroupFamily", + "Description" + ], + "members":{ + "DBParameterGroupName":{"shape":"String"}, + "DBParameterGroupFamily":{"shape":"String"}, + "Description":{"shape":"String"}, + "Tags":{"shape":"TagList"} + } + }, + "CreateDBParameterGroupResult":{ + "type":"structure", + "members":{ + "DBParameterGroup":{"shape":"DBParameterGroup"} + } + }, + "CreateDBSecurityGroupMessage":{ + "type":"structure", + "required":[ + "DBSecurityGroupName", + "DBSecurityGroupDescription" + ], + "members":{ + "DBSecurityGroupName":{"shape":"String"}, + "DBSecurityGroupDescription":{"shape":"String"}, + "Tags":{"shape":"TagList"} + } + }, + "CreateDBSecurityGroupResult":{ + "type":"structure", + "members":{ + "DBSecurityGroup":{"shape":"DBSecurityGroup"} + } + }, + "CreateDBSnapshotMessage":{ + "type":"structure", + "required":[ + "DBSnapshotIdentifier", + "DBInstanceIdentifier" + ], + "members":{ + "DBSnapshotIdentifier":{"shape":"String"}, + "DBInstanceIdentifier":{"shape":"String"}, + "Tags":{"shape":"TagList"} + } + }, + "CreateDBSnapshotResult":{ + "type":"structure", + "members":{ + "DBSnapshot":{"shape":"DBSnapshot"} + } + }, + "CreateDBSubnetGroupMessage":{ + "type":"structure", + "required":[ + "DBSubnetGroupName", + "DBSubnetGroupDescription", + "SubnetIds" + ], + "members":{ + "DBSubnetGroupName":{"shape":"String"}, + "DBSubnetGroupDescription":{"shape":"String"}, + "SubnetIds":{"shape":"SubnetIdentifierList"}, + "Tags":{"shape":"TagList"} + } + }, + "CreateDBSubnetGroupResult":{ + "type":"structure", + "members":{ + "DBSubnetGroup":{"shape":"DBSubnetGroup"} + } + }, + "CreateEventSubscriptionMessage":{ + "type":"structure", + "required":[ + "SubscriptionName", + "SnsTopicArn" + ], + "members":{ + "SubscriptionName":{"shape":"String"}, + "SnsTopicArn":{"shape":"String"}, + "SourceType":{"shape":"String"}, + "EventCategories":{"shape":"EventCategoriesList"}, + "SourceIds":{"shape":"SourceIdsList"}, + "Enabled":{"shape":"BooleanOptional"}, + "Tags":{"shape":"TagList"} + } + }, + "CreateEventSubscriptionResult":{ + "type":"structure", + "members":{ + "EventSubscription":{"shape":"EventSubscription"} + } + }, + "CreateOptionGroupMessage":{ + "type":"structure", + "required":[ + "OptionGroupName", + "EngineName", + "MajorEngineVersion", + "OptionGroupDescription" + ], + "members":{ + "OptionGroupName":{"shape":"String"}, + "EngineName":{"shape":"String"}, + "MajorEngineVersion":{"shape":"String"}, + "OptionGroupDescription":{"shape":"String"}, + "Tags":{"shape":"TagList"} + } + }, + "CreateOptionGroupResult":{ + "type":"structure", + "members":{ + "OptionGroup":{"shape":"OptionGroup"} + } + }, + "DBEngineVersion":{ + "type":"structure", + "members":{ + "Engine":{"shape":"String"}, + "EngineVersion":{"shape":"String"}, + "DBParameterGroupFamily":{"shape":"String"}, + "DBEngineDescription":{"shape":"String"}, + "DBEngineVersionDescription":{"shape":"String"}, + "DefaultCharacterSet":{"shape":"CharacterSet"}, + "SupportedCharacterSets":{"shape":"SupportedCharacterSetsList"} + } + }, + "DBEngineVersionList":{ + "type":"list", + "member":{ + "shape":"DBEngineVersion", + "locationName":"DBEngineVersion" + } + }, + "DBEngineVersionMessage":{ + "type":"structure", + "members":{ + "Marker":{"shape":"String"}, + "DBEngineVersions":{"shape":"DBEngineVersionList"} + } + }, + "DBInstance":{ + "type":"structure", + "members":{ + "DBInstanceIdentifier":{"shape":"String"}, + "DBInstanceClass":{"shape":"String"}, + "Engine":{"shape":"String"}, + "DBInstanceStatus":{"shape":"String"}, + "MasterUsername":{"shape":"String"}, + "DBName":{"shape":"String"}, + "Endpoint":{"shape":"Endpoint"}, + "AllocatedStorage":{"shape":"Integer"}, + "InstanceCreateTime":{"shape":"TStamp"}, + "PreferredBackupWindow":{"shape":"String"}, + "BackupRetentionPeriod":{"shape":"Integer"}, + "DBSecurityGroups":{"shape":"DBSecurityGroupMembershipList"}, + "VpcSecurityGroups":{"shape":"VpcSecurityGroupMembershipList"}, + "DBParameterGroups":{"shape":"DBParameterGroupStatusList"}, + "AvailabilityZone":{"shape":"String"}, + "DBSubnetGroup":{"shape":"DBSubnetGroup"}, + "PreferredMaintenanceWindow":{"shape":"String"}, + "PendingModifiedValues":{"shape":"PendingModifiedValues"}, + "LatestRestorableTime":{"shape":"TStamp"}, + "MultiAZ":{"shape":"Boolean"}, + "EngineVersion":{"shape":"String"}, + "AutoMinorVersionUpgrade":{"shape":"Boolean"}, + "ReadReplicaSourceDBInstanceIdentifier":{"shape":"String"}, + "ReadReplicaDBInstanceIdentifiers":{"shape":"ReadReplicaDBInstanceIdentifierList"}, + "LicenseModel":{"shape":"String"}, + "Iops":{"shape":"IntegerOptional"}, + "OptionGroupMemberships":{"shape":"OptionGroupMembershipList"}, + "CharacterSetName":{"shape":"String"}, + "SecondaryAvailabilityZone":{"shape":"String"}, + "PubliclyAccessible":{"shape":"Boolean"}, + "StatusInfos":{"shape":"DBInstanceStatusInfoList"}, + "StorageType":{"shape":"String"}, + "TdeCredentialArn":{"shape":"String"} + }, + "wrapper":true + }, + "DBInstanceAlreadyExistsFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBInstanceAlreadyExists", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "DBInstanceList":{ + "type":"list", + "member":{ + "shape":"DBInstance", + "locationName":"DBInstance" + } + }, + "DBInstanceMessage":{ + "type":"structure", + "members":{ + "Marker":{"shape":"String"}, + "DBInstances":{"shape":"DBInstanceList"} + } + }, + "DBInstanceNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBInstanceNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "DBInstanceStatusInfo":{ + "type":"structure", + "members":{ + "StatusType":{"shape":"String"}, + "Normal":{"shape":"Boolean"}, + "Status":{"shape":"String"}, + "Message":{"shape":"String"} + } + }, + "DBInstanceStatusInfoList":{ + "type":"list", + "member":{ + "shape":"DBInstanceStatusInfo", + "locationName":"DBInstanceStatusInfo" + } + }, + "DBLogFileNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBLogFileNotFoundFault", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "DBParameterGroup":{ + "type":"structure", + "members":{ + "DBParameterGroupName":{"shape":"String"}, + "DBParameterGroupFamily":{"shape":"String"}, + "Description":{"shape":"String"} + }, + "wrapper":true + }, + "DBParameterGroupAlreadyExistsFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBParameterGroupAlreadyExists", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "DBParameterGroupDetails":{ + "type":"structure", + "members":{ + "Parameters":{"shape":"ParametersList"}, + "Marker":{"shape":"String"} + } + }, + "DBParameterGroupList":{ + "type":"list", + "member":{ + "shape":"DBParameterGroup", + "locationName":"DBParameterGroup" + } + }, + "DBParameterGroupNameMessage":{ + "type":"structure", + "members":{ + "DBParameterGroupName":{"shape":"String"} + } + }, + "DBParameterGroupNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBParameterGroupNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "DBParameterGroupQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBParameterGroupQuotaExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "DBParameterGroupStatus":{ + "type":"structure", + "members":{ + "DBParameterGroupName":{"shape":"String"}, + "ParameterApplyStatus":{"shape":"String"} + } + }, + "DBParameterGroupStatusList":{ + "type":"list", + "member":{ + "shape":"DBParameterGroupStatus", + "locationName":"DBParameterGroup" + } + }, + "DBParameterGroupsMessage":{ + "type":"structure", + "members":{ + "Marker":{"shape":"String"}, + "DBParameterGroups":{"shape":"DBParameterGroupList"} + } + }, + "DBSecurityGroup":{ + "type":"structure", + "members":{ + "OwnerId":{"shape":"String"}, + "DBSecurityGroupName":{"shape":"String"}, + "DBSecurityGroupDescription":{"shape":"String"}, + "VpcId":{"shape":"String"}, + "EC2SecurityGroups":{"shape":"EC2SecurityGroupList"}, + "IPRanges":{"shape":"IPRangeList"} + }, + "wrapper":true + }, + "DBSecurityGroupAlreadyExistsFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBSecurityGroupAlreadyExists", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "DBSecurityGroupMembership":{ + "type":"structure", + "members":{ + "DBSecurityGroupName":{"shape":"String"}, + "Status":{"shape":"String"} + } + }, + "DBSecurityGroupMembershipList":{ + "type":"list", + "member":{ + "shape":"DBSecurityGroupMembership", + "locationName":"DBSecurityGroup" + } + }, + "DBSecurityGroupMessage":{ + "type":"structure", + "members":{ + "Marker":{"shape":"String"}, + "DBSecurityGroups":{"shape":"DBSecurityGroups"} + } + }, + "DBSecurityGroupNameList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"DBSecurityGroupName" + } + }, + "DBSecurityGroupNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBSecurityGroupNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "DBSecurityGroupNotSupportedFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBSecurityGroupNotSupported", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "DBSecurityGroupQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"QuotaExceeded.DBSecurityGroup", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "DBSecurityGroups":{ + "type":"list", + "member":{ + "shape":"DBSecurityGroup", + "locationName":"DBSecurityGroup" + } + }, + "DBSnapshot":{ + "type":"structure", + "members":{ + "DBSnapshotIdentifier":{"shape":"String"}, + "DBInstanceIdentifier":{"shape":"String"}, + "SnapshotCreateTime":{"shape":"TStamp"}, + "Engine":{"shape":"String"}, + "AllocatedStorage":{"shape":"Integer"}, + "Status":{"shape":"String"}, + "Port":{"shape":"Integer"}, + "AvailabilityZone":{"shape":"String"}, + "VpcId":{"shape":"String"}, + "InstanceCreateTime":{"shape":"TStamp"}, + "MasterUsername":{"shape":"String"}, + "EngineVersion":{"shape":"String"}, + "LicenseModel":{"shape":"String"}, + "SnapshotType":{"shape":"String"}, + "Iops":{"shape":"IntegerOptional"}, + "OptionGroupName":{"shape":"String"}, + "PercentProgress":{"shape":"Integer"}, + "SourceRegion":{"shape":"String"}, + "StorageType":{"shape":"String"}, + "TdeCredentialArn":{"shape":"String"} + }, + "wrapper":true + }, + "DBSnapshotAlreadyExistsFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBSnapshotAlreadyExists", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "DBSnapshotList":{ + "type":"list", + "member":{ + "shape":"DBSnapshot", + "locationName":"DBSnapshot" + } + }, + "DBSnapshotMessage":{ + "type":"structure", + "members":{ + "Marker":{"shape":"String"}, + "DBSnapshots":{"shape":"DBSnapshotList"} + } + }, + "DBSnapshotNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBSnapshotNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "DBSubnetGroup":{ + "type":"structure", + "members":{ + "DBSubnetGroupName":{"shape":"String"}, + "DBSubnetGroupDescription":{"shape":"String"}, + "VpcId":{"shape":"String"}, + "SubnetGroupStatus":{"shape":"String"}, + "Subnets":{"shape":"SubnetList"} + }, + "wrapper":true + }, + "DBSubnetGroupAlreadyExistsFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBSubnetGroupAlreadyExists", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "DBSubnetGroupDoesNotCoverEnoughAZs":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBSubnetGroupDoesNotCoverEnoughAZs", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "DBSubnetGroupMessage":{ + "type":"structure", + "members":{ + "Marker":{"shape":"String"}, + "DBSubnetGroups":{"shape":"DBSubnetGroups"} + } + }, + "DBSubnetGroupNotAllowedFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBSubnetGroupNotAllowedFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "DBSubnetGroupNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBSubnetGroupNotFoundFault", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "DBSubnetGroupQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBSubnetGroupQuotaExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "DBSubnetGroups":{ + "type":"list", + "member":{ + "shape":"DBSubnetGroup", + "locationName":"DBSubnetGroup" + } + }, + "DBSubnetQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBSubnetQuotaExceededFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "DBUpgradeDependencyFailureFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBUpgradeDependencyFailure", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "DeleteDBInstanceMessage":{ + "type":"structure", + "required":["DBInstanceIdentifier"], + "members":{ + "DBInstanceIdentifier":{"shape":"String"}, + "SkipFinalSnapshot":{"shape":"Boolean"}, + "FinalDBSnapshotIdentifier":{"shape":"String"} + } + }, + "DeleteDBInstanceResult":{ + "type":"structure", + "members":{ + "DBInstance":{"shape":"DBInstance"} + } + }, + "DeleteDBParameterGroupMessage":{ + "type":"structure", + "required":["DBParameterGroupName"], + "members":{ + "DBParameterGroupName":{"shape":"String"} + } + }, + "DeleteDBSecurityGroupMessage":{ + "type":"structure", + "required":["DBSecurityGroupName"], + "members":{ + "DBSecurityGroupName":{"shape":"String"} + } + }, + "DeleteDBSnapshotMessage":{ + "type":"structure", + "required":["DBSnapshotIdentifier"], + "members":{ + "DBSnapshotIdentifier":{"shape":"String"} + } + }, + "DeleteDBSnapshotResult":{ + "type":"structure", + "members":{ + "DBSnapshot":{"shape":"DBSnapshot"} + } + }, + "DeleteDBSubnetGroupMessage":{ + "type":"structure", + "required":["DBSubnetGroupName"], + "members":{ + "DBSubnetGroupName":{"shape":"String"} + } + }, + "DeleteEventSubscriptionMessage":{ + "type":"structure", + "required":["SubscriptionName"], + "members":{ + "SubscriptionName":{"shape":"String"} + } + }, + "DeleteEventSubscriptionResult":{ + "type":"structure", + "members":{ + "EventSubscription":{"shape":"EventSubscription"} + } + }, + "DeleteOptionGroupMessage":{ + "type":"structure", + "required":["OptionGroupName"], + "members":{ + "OptionGroupName":{"shape":"String"} + } + }, + "DescribeDBEngineVersionsMessage":{ + "type":"structure", + "members":{ + "Engine":{"shape":"String"}, + "EngineVersion":{"shape":"String"}, + "DBParameterGroupFamily":{"shape":"String"}, + "Filters":{"shape":"FilterList"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"}, + "DefaultOnly":{"shape":"Boolean"}, + "ListSupportedCharacterSets":{"shape":"BooleanOptional"} + } + }, + "DescribeDBInstancesMessage":{ + "type":"structure", + "members":{ + "DBInstanceIdentifier":{"shape":"String"}, + "Filters":{"shape":"FilterList"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeDBLogFilesDetails":{ + "type":"structure", + "members":{ + "LogFileName":{"shape":"String"}, + "LastWritten":{"shape":"Long"}, + "Size":{"shape":"Long"} + } + }, + "DescribeDBLogFilesList":{ + "type":"list", + "member":{ + "shape":"DescribeDBLogFilesDetails", + "locationName":"DescribeDBLogFilesDetails" + } + }, + "DescribeDBLogFilesMessage":{ + "type":"structure", + "required":["DBInstanceIdentifier"], + "members":{ + "DBInstanceIdentifier":{"shape":"String"}, + "FilenameContains":{"shape":"String"}, + "FileLastWritten":{"shape":"Long"}, + "FileSize":{"shape":"Long"}, + "Filters":{"shape":"FilterList"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeDBLogFilesResponse":{ + "type":"structure", + "members":{ + "DescribeDBLogFiles":{"shape":"DescribeDBLogFilesList"}, + "Marker":{"shape":"String"} + } + }, + "DescribeDBParameterGroupsMessage":{ + "type":"structure", + "members":{ + "DBParameterGroupName":{"shape":"String"}, + "Filters":{"shape":"FilterList"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeDBParametersMessage":{ + "type":"structure", + "required":["DBParameterGroupName"], + "members":{ + "DBParameterGroupName":{"shape":"String"}, + "Source":{"shape":"String"}, + "Filters":{"shape":"FilterList"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeDBSecurityGroupsMessage":{ + "type":"structure", + "members":{ + "DBSecurityGroupName":{"shape":"String"}, + "Filters":{"shape":"FilterList"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeDBSnapshotsMessage":{ + "type":"structure", + "members":{ + "DBInstanceIdentifier":{"shape":"String"}, + "DBSnapshotIdentifier":{"shape":"String"}, + "SnapshotType":{"shape":"String"}, + "Filters":{"shape":"FilterList"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeDBSubnetGroupsMessage":{ + "type":"structure", + "members":{ + "DBSubnetGroupName":{"shape":"String"}, + "Filters":{"shape":"FilterList"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeEngineDefaultParametersMessage":{ + "type":"structure", + "required":["DBParameterGroupFamily"], + "members":{ + "DBParameterGroupFamily":{"shape":"String"}, + "Filters":{"shape":"FilterList"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeEngineDefaultParametersResult":{ + "type":"structure", + "members":{ + "EngineDefaults":{"shape":"EngineDefaults"} + } + }, + "DescribeEventCategoriesMessage":{ + "type":"structure", + "members":{ + "SourceType":{"shape":"String"}, + "Filters":{"shape":"FilterList"} + } + }, + "DescribeEventSubscriptionsMessage":{ + "type":"structure", + "members":{ + "SubscriptionName":{"shape":"String"}, + "Filters":{"shape":"FilterList"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeEventsMessage":{ + "type":"structure", + "members":{ + "SourceIdentifier":{"shape":"String"}, + "SourceType":{"shape":"SourceType"}, + "StartTime":{"shape":"TStamp"}, + "EndTime":{"shape":"TStamp"}, + "Duration":{"shape":"IntegerOptional"}, + "EventCategories":{"shape":"EventCategoriesList"}, + "Filters":{"shape":"FilterList"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeOptionGroupOptionsMessage":{ + "type":"structure", + "required":["EngineName"], + "members":{ + "EngineName":{"shape":"String"}, + "MajorEngineVersion":{"shape":"String"}, + "Filters":{"shape":"FilterList"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeOptionGroupsMessage":{ + "type":"structure", + "members":{ + "OptionGroupName":{"shape":"String"}, + "Filters":{"shape":"FilterList"}, + "Marker":{"shape":"String"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "EngineName":{"shape":"String"}, + "MajorEngineVersion":{"shape":"String"} + } + }, + "DescribeOrderableDBInstanceOptionsMessage":{ + "type":"structure", + "required":["Engine"], + "members":{ + "Engine":{"shape":"String"}, + "EngineVersion":{"shape":"String"}, + "DBInstanceClass":{"shape":"String"}, + "LicenseModel":{"shape":"String"}, + "Vpc":{"shape":"BooleanOptional"}, + "Filters":{"shape":"FilterList"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeReservedDBInstancesMessage":{ + "type":"structure", + "members":{ + "ReservedDBInstanceId":{"shape":"String"}, + "ReservedDBInstancesOfferingId":{"shape":"String"}, + "DBInstanceClass":{"shape":"String"}, + "Duration":{"shape":"String"}, + "ProductDescription":{"shape":"String"}, + "OfferingType":{"shape":"String"}, + "MultiAZ":{"shape":"BooleanOptional"}, + "Filters":{"shape":"FilterList"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeReservedDBInstancesOfferingsMessage":{ + "type":"structure", + "members":{ + "ReservedDBInstancesOfferingId":{"shape":"String"}, + "DBInstanceClass":{"shape":"String"}, + "Duration":{"shape":"String"}, + "ProductDescription":{"shape":"String"}, + "OfferingType":{"shape":"String"}, + "MultiAZ":{"shape":"BooleanOptional"}, + "Filters":{"shape":"FilterList"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "Double":{"type":"double"}, + "DownloadDBLogFilePortionDetails":{ + "type":"structure", + "members":{ + "LogFileData":{"shape":"String"}, + "Marker":{"shape":"String"}, + "AdditionalDataPending":{"shape":"Boolean"} + } + }, + "DownloadDBLogFilePortionMessage":{ + "type":"structure", + "required":[ + "DBInstanceIdentifier", + "LogFileName" + ], + "members":{ + "DBInstanceIdentifier":{"shape":"String"}, + "LogFileName":{"shape":"String"}, + "Marker":{"shape":"String"}, + "NumberOfLines":{"shape":"Integer"} + } + }, + "EC2SecurityGroup":{ + "type":"structure", + "members":{ + "Status":{"shape":"String"}, + "EC2SecurityGroupName":{"shape":"String"}, + "EC2SecurityGroupId":{"shape":"String"}, + "EC2SecurityGroupOwnerId":{"shape":"String"} + } + }, + "EC2SecurityGroupList":{ + "type":"list", + "member":{ + "shape":"EC2SecurityGroup", + "locationName":"EC2SecurityGroup" + } + }, + "Endpoint":{ + "type":"structure", + "members":{ + "Address":{"shape":"String"}, + "Port":{"shape":"Integer"} + } + }, + "EngineDefaults":{ + "type":"structure", + "members":{ + "DBParameterGroupFamily":{"shape":"String"}, + "Marker":{"shape":"String"}, + "Parameters":{"shape":"ParametersList"} + }, + "wrapper":true + }, + "Event":{ + "type":"structure", + "members":{ + "SourceIdentifier":{"shape":"String"}, + "SourceType":{"shape":"SourceType"}, + "Message":{"shape":"String"}, + "EventCategories":{"shape":"EventCategoriesList"}, + "Date":{"shape":"TStamp"} + } + }, + "EventCategoriesList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"EventCategory" + } + }, + "EventCategoriesMap":{ + "type":"structure", + "members":{ + "SourceType":{"shape":"String"}, + "EventCategories":{"shape":"EventCategoriesList"} + }, + "wrapper":true + }, + "EventCategoriesMapList":{ + "type":"list", + "member":{ + "shape":"EventCategoriesMap", + "locationName":"EventCategoriesMap" + } + }, + "EventCategoriesMessage":{ + "type":"structure", + "members":{ + "EventCategoriesMapList":{"shape":"EventCategoriesMapList"} + } + }, + "EventList":{ + "type":"list", + "member":{ + "shape":"Event", + "locationName":"Event" + } + }, + "EventSubscription":{ + "type":"structure", + "members":{ + "CustomerAwsId":{"shape":"String"}, + "CustSubscriptionId":{"shape":"String"}, + "SnsTopicArn":{"shape":"String"}, + "Status":{"shape":"String"}, + "SubscriptionCreationTime":{"shape":"String"}, + "SourceType":{"shape":"String"}, + "SourceIdsList":{"shape":"SourceIdsList"}, + "EventCategoriesList":{"shape":"EventCategoriesList"}, + "Enabled":{"shape":"Boolean"} + }, + "wrapper":true + }, + "EventSubscriptionQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"EventSubscriptionQuotaExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "EventSubscriptionsList":{ + "type":"list", + "member":{ + "shape":"EventSubscription", + "locationName":"EventSubscription" + } + }, + "EventSubscriptionsMessage":{ + "type":"structure", + "members":{ + "Marker":{"shape":"String"}, + "EventSubscriptionsList":{"shape":"EventSubscriptionsList"} + } + }, + "EventsMessage":{ + "type":"structure", + "members":{ + "Marker":{"shape":"String"}, + "Events":{"shape":"EventList"} + } + }, + "Filter":{ + "type":"structure", + "required":[ + "Name", + "Values" + ], + "members":{ + "Name":{"shape":"String"}, + "Values":{"shape":"FilterValueList"} + } + }, + "FilterList":{ + "type":"list", + "member":{ + "shape":"Filter", + "locationName":"Filter" + } + }, + "FilterValueList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"Value" + } + }, + "IPRange":{ + "type":"structure", + "members":{ + "Status":{"shape":"String"}, + "CIDRIP":{"shape":"String"} + } + }, + "IPRangeList":{ + "type":"list", + "member":{ + "shape":"IPRange", + "locationName":"IPRange" + } + }, + "InstanceQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InstanceQuotaExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InsufficientDBInstanceCapacityFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InsufficientDBInstanceCapacity", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "Integer":{"type":"integer"}, + "IntegerOptional":{"type":"integer"}, + "InvalidDBInstanceStateFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidDBInstanceState", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidDBParameterGroupStateFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidDBParameterGroupState", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidDBSecurityGroupStateFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidDBSecurityGroupState", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidDBSnapshotStateFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidDBSnapshotState", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidDBSubnetGroupFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidDBSubnetGroupFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidDBSubnetGroupStateFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidDBSubnetGroupStateFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidDBSubnetStateFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidDBSubnetStateFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidEventSubscriptionStateFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidEventSubscriptionState", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidOptionGroupStateFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidOptionGroupStateFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidRestoreFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidRestoreFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidSubnet":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidSubnet", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidVPCNetworkStateFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidVPCNetworkStateFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "KeyList":{ + "type":"list", + "member":{"shape":"String"} + }, + "ListTagsForResourceMessage":{ + "type":"structure", + "required":["ResourceName"], + "members":{ + "ResourceName":{"shape":"String"}, + "Filters":{"shape":"FilterList"} + } + }, + "Long":{"type":"long"}, + "ModifyDBInstanceMessage":{ + "type":"structure", + "required":["DBInstanceIdentifier"], + "members":{ + "DBInstanceIdentifier":{"shape":"String"}, + "AllocatedStorage":{"shape":"IntegerOptional"}, + "DBInstanceClass":{"shape":"String"}, + "DBSecurityGroups":{"shape":"DBSecurityGroupNameList"}, + "VpcSecurityGroupIds":{"shape":"VpcSecurityGroupIdList"}, + "ApplyImmediately":{"shape":"Boolean"}, + "MasterUserPassword":{"shape":"String"}, + "DBParameterGroupName":{"shape":"String"}, + "BackupRetentionPeriod":{"shape":"IntegerOptional"}, + "PreferredBackupWindow":{"shape":"String"}, + "PreferredMaintenanceWindow":{"shape":"String"}, + "MultiAZ":{"shape":"BooleanOptional"}, + "EngineVersion":{"shape":"String"}, + "AllowMajorVersionUpgrade":{"shape":"Boolean"}, + "AutoMinorVersionUpgrade":{"shape":"BooleanOptional"}, + "Iops":{"shape":"IntegerOptional"}, + "OptionGroupName":{"shape":"String"}, + "NewDBInstanceIdentifier":{"shape":"String"}, + "StorageType":{"shape":"String"}, + "TdeCredentialArn":{"shape":"String"}, + "TdeCredentialPassword":{"shape":"String"} + } + }, + "ModifyDBInstanceResult":{ + "type":"structure", + "members":{ + "DBInstance":{"shape":"DBInstance"} + } + }, + "ModifyDBParameterGroupMessage":{ + "type":"structure", + "required":[ + "DBParameterGroupName", + "Parameters" + ], + "members":{ + "DBParameterGroupName":{"shape":"String"}, + "Parameters":{"shape":"ParametersList"} + } + }, + "ModifyDBSubnetGroupMessage":{ + "type":"structure", + "required":[ + "DBSubnetGroupName", + "SubnetIds" + ], + "members":{ + "DBSubnetGroupName":{"shape":"String"}, + "DBSubnetGroupDescription":{"shape":"String"}, + "SubnetIds":{"shape":"SubnetIdentifierList"} + } + }, + "ModifyDBSubnetGroupResult":{ + "type":"structure", + "members":{ + "DBSubnetGroup":{"shape":"DBSubnetGroup"} + } + }, + "ModifyEventSubscriptionMessage":{ + "type":"structure", + "required":["SubscriptionName"], + "members":{ + "SubscriptionName":{"shape":"String"}, + "SnsTopicArn":{"shape":"String"}, + "SourceType":{"shape":"String"}, + "EventCategories":{"shape":"EventCategoriesList"}, + "Enabled":{"shape":"BooleanOptional"} + } + }, + "ModifyEventSubscriptionResult":{ + "type":"structure", + "members":{ + "EventSubscription":{"shape":"EventSubscription"} + } + }, + "ModifyOptionGroupMessage":{ + "type":"structure", + "required":["OptionGroupName"], + "members":{ + "OptionGroupName":{"shape":"String"}, + "OptionsToInclude":{"shape":"OptionConfigurationList"}, + "OptionsToRemove":{"shape":"OptionNamesList"}, + "ApplyImmediately":{"shape":"Boolean"} + } + }, + "ModifyOptionGroupResult":{ + "type":"structure", + "members":{ + "OptionGroup":{"shape":"OptionGroup"} + } + }, + "Option":{ + "type":"structure", + "members":{ + "OptionName":{"shape":"String"}, + "OptionDescription":{"shape":"String"}, + "Persistent":{"shape":"Boolean"}, + "Permanent":{"shape":"Boolean"}, + "Port":{"shape":"IntegerOptional"}, + "OptionSettings":{"shape":"OptionSettingConfigurationList"}, + "DBSecurityGroupMemberships":{"shape":"DBSecurityGroupMembershipList"}, + "VpcSecurityGroupMemberships":{"shape":"VpcSecurityGroupMembershipList"} + } + }, + "OptionConfiguration":{ + "type":"structure", + "required":["OptionName"], + "members":{ + "OptionName":{"shape":"String"}, + "Port":{"shape":"IntegerOptional"}, + "DBSecurityGroupMemberships":{"shape":"DBSecurityGroupNameList"}, + "VpcSecurityGroupMemberships":{"shape":"VpcSecurityGroupIdList"}, + "OptionSettings":{"shape":"OptionSettingsList"} + } + }, + "OptionConfigurationList":{ + "type":"list", + "member":{ + "shape":"OptionConfiguration", + "locationName":"OptionConfiguration" + } + }, + "OptionGroup":{ + "type":"structure", + "members":{ + "OptionGroupName":{"shape":"String"}, + "OptionGroupDescription":{"shape":"String"}, + "EngineName":{"shape":"String"}, + "MajorEngineVersion":{"shape":"String"}, + "Options":{"shape":"OptionsList"}, + "AllowsVpcAndNonVpcInstanceMemberships":{"shape":"Boolean"}, + "VpcId":{"shape":"String"} + }, + "wrapper":true + }, + "OptionGroupAlreadyExistsFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"OptionGroupAlreadyExistsFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "OptionGroupMembership":{ + "type":"structure", + "members":{ + "OptionGroupName":{"shape":"String"}, + "Status":{"shape":"String"} + } + }, + "OptionGroupMembershipList":{ + "type":"list", + "member":{ + "shape":"OptionGroupMembership", + "locationName":"OptionGroupMembership" + } + }, + "OptionGroupNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"OptionGroupNotFoundFault", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "OptionGroupOption":{ + "type":"structure", + "members":{ + "Name":{"shape":"String"}, + "Description":{"shape":"String"}, + "EngineName":{"shape":"String"}, + "MajorEngineVersion":{"shape":"String"}, + "MinimumRequiredMinorEngineVersion":{"shape":"String"}, + "PortRequired":{"shape":"Boolean"}, + "DefaultPort":{"shape":"IntegerOptional"}, + "OptionsDependedOn":{"shape":"OptionsDependedOn"}, + "Persistent":{"shape":"Boolean"}, + "Permanent":{"shape":"Boolean"}, + "OptionGroupOptionSettings":{"shape":"OptionGroupOptionSettingsList"} + } + }, + "OptionGroupOptionSetting":{ + "type":"structure", + "members":{ + "SettingName":{"shape":"String"}, + "SettingDescription":{"shape":"String"}, + "DefaultValue":{"shape":"String"}, + "ApplyType":{"shape":"String"}, + "AllowedValues":{"shape":"String"}, + "IsModifiable":{"shape":"Boolean"} + } + }, + "OptionGroupOptionSettingsList":{ + "type":"list", + "member":{ + "shape":"OptionGroupOptionSetting", + "locationName":"OptionGroupOptionSetting" + } + }, + "OptionGroupOptionsList":{ + "type":"list", + "member":{ + "shape":"OptionGroupOption", + "locationName":"OptionGroupOption" + } + }, + "OptionGroupOptionsMessage":{ + "type":"structure", + "members":{ + "OptionGroupOptions":{"shape":"OptionGroupOptionsList"}, + "Marker":{"shape":"String"} + } + }, + "OptionGroupQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"OptionGroupQuotaExceededFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "OptionGroups":{ + "type":"structure", + "members":{ + "OptionGroupsList":{"shape":"OptionGroupsList"}, + "Marker":{"shape":"String"} + } + }, + "OptionGroupsList":{ + "type":"list", + "member":{ + "shape":"OptionGroup", + "locationName":"OptionGroup" + } + }, + "OptionNamesList":{ + "type":"list", + "member":{"shape":"String"} + }, + "OptionSetting":{ + "type":"structure", + "members":{ + "Name":{"shape":"String"}, + "Value":{"shape":"String"}, + "DefaultValue":{"shape":"String"}, + "Description":{"shape":"String"}, + "ApplyType":{"shape":"String"}, + "DataType":{"shape":"String"}, + "AllowedValues":{"shape":"String"}, + "IsModifiable":{"shape":"Boolean"}, + "IsCollection":{"shape":"Boolean"} + } + }, + "OptionSettingConfigurationList":{ + "type":"list", + "member":{ + "shape":"OptionSetting", + "locationName":"OptionSetting" + } + }, + "OptionSettingsList":{ + "type":"list", + "member":{ + "shape":"OptionSetting", + "locationName":"OptionSetting" + } + }, + "OptionsDependedOn":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"OptionName" + } + }, + "OptionsList":{ + "type":"list", + "member":{ + "shape":"Option", + "locationName":"Option" + } + }, + "OrderableDBInstanceOption":{ + "type":"structure", + "members":{ + "Engine":{"shape":"String"}, + "EngineVersion":{"shape":"String"}, + "DBInstanceClass":{"shape":"String"}, + "LicenseModel":{"shape":"String"}, + "AvailabilityZones":{"shape":"AvailabilityZoneList"}, + "MultiAZCapable":{"shape":"Boolean"}, + "ReadReplicaCapable":{"shape":"Boolean"}, + "Vpc":{"shape":"Boolean"}, + "StorageType":{"shape":"String"}, + "SupportsIops":{"shape":"Boolean"} + }, + "wrapper":true + }, + "OrderableDBInstanceOptionsList":{ + "type":"list", + "member":{ + "shape":"OrderableDBInstanceOption", + "locationName":"OrderableDBInstanceOption" + } + }, + "OrderableDBInstanceOptionsMessage":{ + "type":"structure", + "members":{ + "OrderableDBInstanceOptions":{"shape":"OrderableDBInstanceOptionsList"}, + "Marker":{"shape":"String"} + } + }, + "Parameter":{ + "type":"structure", + "members":{ + "ParameterName":{"shape":"String"}, + "ParameterValue":{"shape":"String"}, + "Description":{"shape":"String"}, + "Source":{"shape":"String"}, + "ApplyType":{"shape":"String"}, + "DataType":{"shape":"String"}, + "AllowedValues":{"shape":"String"}, + "IsModifiable":{"shape":"Boolean"}, + "MinimumEngineVersion":{"shape":"String"}, + "ApplyMethod":{"shape":"ApplyMethod"} + } + }, + "ParametersList":{ + "type":"list", + "member":{ + "shape":"Parameter", + "locationName":"Parameter" + } + }, + "PendingModifiedValues":{ + "type":"structure", + "members":{ + "DBInstanceClass":{"shape":"String"}, + "AllocatedStorage":{"shape":"IntegerOptional"}, + "MasterUserPassword":{"shape":"String"}, + "Port":{"shape":"IntegerOptional"}, + "BackupRetentionPeriod":{"shape":"IntegerOptional"}, + "MultiAZ":{"shape":"BooleanOptional"}, + "EngineVersion":{"shape":"String"}, + "Iops":{"shape":"IntegerOptional"}, + "DBInstanceIdentifier":{"shape":"String"}, + "StorageType":{"shape":"String"} + } + }, + "PointInTimeRestoreNotEnabledFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"PointInTimeRestoreNotEnabled", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "PromoteReadReplicaMessage":{ + "type":"structure", + "required":["DBInstanceIdentifier"], + "members":{ + "DBInstanceIdentifier":{"shape":"String"}, + "BackupRetentionPeriod":{"shape":"IntegerOptional"}, + "PreferredBackupWindow":{"shape":"String"} + } + }, + "PromoteReadReplicaResult":{ + "type":"structure", + "members":{ + "DBInstance":{"shape":"DBInstance"} + } + }, + "ProvisionedIopsNotAvailableInAZFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"ProvisionedIopsNotAvailableInAZFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "PurchaseReservedDBInstancesOfferingMessage":{ + "type":"structure", + "required":["ReservedDBInstancesOfferingId"], + "members":{ + "ReservedDBInstancesOfferingId":{"shape":"String"}, + "ReservedDBInstanceId":{"shape":"String"}, + "DBInstanceCount":{"shape":"IntegerOptional"}, + "Tags":{"shape":"TagList"} + } + }, + "PurchaseReservedDBInstancesOfferingResult":{ + "type":"structure", + "members":{ + "ReservedDBInstance":{"shape":"ReservedDBInstance"} + } + }, + "ReadReplicaDBInstanceIdentifierList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"ReadReplicaDBInstanceIdentifier" + } + }, + "RebootDBInstanceMessage":{ + "type":"structure", + "required":["DBInstanceIdentifier"], + "members":{ + "DBInstanceIdentifier":{"shape":"String"}, + "ForceFailover":{"shape":"BooleanOptional"} + } + }, + "RebootDBInstanceResult":{ + "type":"structure", + "members":{ + "DBInstance":{"shape":"DBInstance"} + } + }, + "RecurringCharge":{ + "type":"structure", + "members":{ + "RecurringChargeAmount":{"shape":"Double"}, + "RecurringChargeFrequency":{"shape":"String"} + }, + "wrapper":true + }, + "RecurringChargeList":{ + "type":"list", + "member":{ + "shape":"RecurringCharge", + "locationName":"RecurringCharge" + } + }, + "RemoveSourceIdentifierFromSubscriptionMessage":{ + "type":"structure", + "required":[ + "SubscriptionName", + "SourceIdentifier" + ], + "members":{ + "SubscriptionName":{"shape":"String"}, + "SourceIdentifier":{"shape":"String"} + } + }, + "RemoveSourceIdentifierFromSubscriptionResult":{ + "type":"structure", + "members":{ + "EventSubscription":{"shape":"EventSubscription"} + } + }, + "RemoveTagsFromResourceMessage":{ + "type":"structure", + "required":[ + "ResourceName", + "TagKeys" + ], + "members":{ + "ResourceName":{"shape":"String"}, + "TagKeys":{"shape":"KeyList"} + } + }, + "ReservedDBInstance":{ + "type":"structure", + "members":{ + "ReservedDBInstanceId":{"shape":"String"}, + "ReservedDBInstancesOfferingId":{"shape":"String"}, + "DBInstanceClass":{"shape":"String"}, + "StartTime":{"shape":"TStamp"}, + "Duration":{"shape":"Integer"}, + "FixedPrice":{"shape":"Double"}, + "UsagePrice":{"shape":"Double"}, + "CurrencyCode":{"shape":"String"}, + "DBInstanceCount":{"shape":"Integer"}, + "ProductDescription":{"shape":"String"}, + "OfferingType":{"shape":"String"}, + "MultiAZ":{"shape":"Boolean"}, + "State":{"shape":"String"}, + "RecurringCharges":{"shape":"RecurringChargeList"} + }, + "wrapper":true + }, + "ReservedDBInstanceAlreadyExistsFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"ReservedDBInstanceAlreadyExists", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "ReservedDBInstanceList":{ + "type":"list", + "member":{ + "shape":"ReservedDBInstance", + "locationName":"ReservedDBInstance" + } + }, + "ReservedDBInstanceMessage":{ + "type":"structure", + "members":{ + "Marker":{"shape":"String"}, + "ReservedDBInstances":{"shape":"ReservedDBInstanceList"} + } + }, + "ReservedDBInstanceNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"ReservedDBInstanceNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "ReservedDBInstanceQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"ReservedDBInstanceQuotaExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "ReservedDBInstancesOffering":{ + "type":"structure", + "members":{ + "ReservedDBInstancesOfferingId":{"shape":"String"}, + "DBInstanceClass":{"shape":"String"}, + "Duration":{"shape":"Integer"}, + "FixedPrice":{"shape":"Double"}, + "UsagePrice":{"shape":"Double"}, + "CurrencyCode":{"shape":"String"}, + "ProductDescription":{"shape":"String"}, + "OfferingType":{"shape":"String"}, + "MultiAZ":{"shape":"Boolean"}, + "RecurringCharges":{"shape":"RecurringChargeList"} + }, + "wrapper":true + }, + "ReservedDBInstancesOfferingList":{ + "type":"list", + "member":{ + "shape":"ReservedDBInstancesOffering", + "locationName":"ReservedDBInstancesOffering" + } + }, + "ReservedDBInstancesOfferingMessage":{ + "type":"structure", + "members":{ + "Marker":{"shape":"String"}, + "ReservedDBInstancesOfferings":{"shape":"ReservedDBInstancesOfferingList"} + } + }, + "ReservedDBInstancesOfferingNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"ReservedDBInstancesOfferingNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "ResetDBParameterGroupMessage":{ + "type":"structure", + "required":["DBParameterGroupName"], + "members":{ + "DBParameterGroupName":{"shape":"String"}, + "ResetAllParameters":{"shape":"Boolean"}, + "Parameters":{"shape":"ParametersList"} + } + }, + "RestoreDBInstanceFromDBSnapshotMessage":{ + "type":"structure", + "required":[ + "DBInstanceIdentifier", + "DBSnapshotIdentifier" + ], + "members":{ + "DBInstanceIdentifier":{"shape":"String"}, + "DBSnapshotIdentifier":{"shape":"String"}, + "DBInstanceClass":{"shape":"String"}, + "Port":{"shape":"IntegerOptional"}, + "AvailabilityZone":{"shape":"String"}, + "DBSubnetGroupName":{"shape":"String"}, + "MultiAZ":{"shape":"BooleanOptional"}, + "PubliclyAccessible":{"shape":"BooleanOptional"}, + "AutoMinorVersionUpgrade":{"shape":"BooleanOptional"}, + "LicenseModel":{"shape":"String"}, + "DBName":{"shape":"String"}, + "Engine":{"shape":"String"}, + "Iops":{"shape":"IntegerOptional"}, + "OptionGroupName":{"shape":"String"}, + "Tags":{"shape":"TagList"}, + "StorageType":{"shape":"String"}, + "TdeCredentialArn":{"shape":"String"}, + "TdeCredentialPassword":{"shape":"String"} + } + }, + "RestoreDBInstanceFromDBSnapshotResult":{ + "type":"structure", + "members":{ + "DBInstance":{"shape":"DBInstance"} + } + }, + "RestoreDBInstanceToPointInTimeMessage":{ + "type":"structure", + "required":[ + "SourceDBInstanceIdentifier", + "TargetDBInstanceIdentifier" + ], + "members":{ + "SourceDBInstanceIdentifier":{"shape":"String"}, + "TargetDBInstanceIdentifier":{"shape":"String"}, + "RestoreTime":{"shape":"TStamp"}, + "UseLatestRestorableTime":{"shape":"Boolean"}, + "DBInstanceClass":{"shape":"String"}, + "Port":{"shape":"IntegerOptional"}, + "AvailabilityZone":{"shape":"String"}, + "DBSubnetGroupName":{"shape":"String"}, + "MultiAZ":{"shape":"BooleanOptional"}, + "PubliclyAccessible":{"shape":"BooleanOptional"}, + "AutoMinorVersionUpgrade":{"shape":"BooleanOptional"}, + "LicenseModel":{"shape":"String"}, + "DBName":{"shape":"String"}, + "Engine":{"shape":"String"}, + "Iops":{"shape":"IntegerOptional"}, + "OptionGroupName":{"shape":"String"}, + "Tags":{"shape":"TagList"}, + "StorageType":{"shape":"String"}, + "TdeCredentialArn":{"shape":"String"}, + "TdeCredentialPassword":{"shape":"String"} + } + }, + "RestoreDBInstanceToPointInTimeResult":{ + "type":"structure", + "members":{ + "DBInstance":{"shape":"DBInstance"} + } + }, + "RevokeDBSecurityGroupIngressMessage":{ + "type":"structure", + "required":["DBSecurityGroupName"], + "members":{ + "DBSecurityGroupName":{"shape":"String"}, + "CIDRIP":{"shape":"String"}, + "EC2SecurityGroupName":{"shape":"String"}, + "EC2SecurityGroupId":{"shape":"String"}, + "EC2SecurityGroupOwnerId":{"shape":"String"} + } + }, + "RevokeDBSecurityGroupIngressResult":{ + "type":"structure", + "members":{ + "DBSecurityGroup":{"shape":"DBSecurityGroup"} + } + }, + "SNSInvalidTopicFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"SNSInvalidTopic", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "SNSNoAuthorizationFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"SNSNoAuthorization", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "SNSTopicArnNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"SNSTopicArnNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "SnapshotQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"SnapshotQuotaExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "SourceIdsList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"SourceId" + } + }, + "SourceNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"SourceNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "SourceType":{ + "type":"string", + "enum":[ + "db-instance", + "db-parameter-group", + "db-security-group", + "db-snapshot" + ] + }, + "StorageQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"StorageQuotaExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "StorageTypeNotSupportedFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"StorageTypeNotSupported", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "String":{"type":"string"}, + "Subnet":{ + "type":"structure", + "members":{ + "SubnetIdentifier":{"shape":"String"}, + "SubnetAvailabilityZone":{"shape":"AvailabilityZone"}, + "SubnetStatus":{"shape":"String"} + } + }, + "SubnetAlreadyInUse":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"SubnetAlreadyInUse", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "SubnetIdentifierList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"SubnetIdentifier" + } + }, + "SubnetList":{ + "type":"list", + "member":{ + "shape":"Subnet", + "locationName":"Subnet" + } + }, + "SubscriptionAlreadyExistFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"SubscriptionAlreadyExist", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "SubscriptionCategoryNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"SubscriptionCategoryNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "SubscriptionNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"SubscriptionNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "SupportedCharacterSetsList":{ + "type":"list", + "member":{ + "shape":"CharacterSet", + "locationName":"CharacterSet" + } + }, + "TStamp":{"type":"timestamp"}, + "Tag":{ + "type":"structure", + "members":{ + "Key":{"shape":"String"}, + "Value":{"shape":"String"} + } + }, + "TagList":{ + "type":"list", + "member":{ + "shape":"Tag", + "locationName":"Tag" + } + }, + "TagListMessage":{ + "type":"structure", + "members":{ + "TagList":{"shape":"TagList"} + } + }, + "VpcSecurityGroupIdList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"VpcSecurityGroupId" + } + }, + "VpcSecurityGroupMembership":{ + "type":"structure", + "members":{ + "VpcSecurityGroupId":{"shape":"String"}, + "Status":{"shape":"String"} + } + }, + "VpcSecurityGroupMembershipList":{ + "type":"list", + "member":{ + "shape":"VpcSecurityGroupMembership", + "locationName":"VpcSecurityGroupMembership" + } + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/rds/2014-09-01/docs-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/rds/2014-09-01/docs-2.json new file mode 100644 index 000000000..7c321267f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/rds/2014-09-01/docs-2.json @@ -0,0 +1,1932 @@ +{ + "version": "2.0", + "service": "Amazon Relational Database Service

    Amazon Relational Database Service (Amazon RDS) is a web service that makes it easier to set up, operate, and scale a relational database in the cloud. It provides cost-efficient, resizable capacity for an industry-standard relational database and manages common database administration tasks, freeing up developers to focus on what makes their applications and businesses unique.

    Amazon RDS gives you access to the capabilities of a MySQL, PostgreSQL, Microsoft SQL Server, or Oracle database server. This means the code, applications, and tools you already use today with your existing databases work with Amazon RDS without modification. Amazon RDS automatically backs up your database and maintains the database software that powers your DB instance. Amazon RDS is flexible: you can scale your database instance's compute resources and storage capacity to meet your application's demand. As with all Amazon Web Services, there are no up-front investments, and you pay only for the resources you use.

    This is an interface reference for Amazon RDS. It contains documentation for a programming or command line interface you can use to manage Amazon RDS. Note that Amazon RDS is asynchronous, which means that some interfaces may require techniques such as polling or callback functions to determine when a command has been applied. In this reference, the parameter descriptions indicate whether a command is applied immediately, on the next instance reboot, or during the maintenance window. For a summary of the Amazon RDS interfaces, go to Available RDS Interfaces.

    ", + "operations": { + "AddSourceIdentifierToSubscription": "

    Adds a source identifier to an existing RDS event notification subscription.

    ", + "AddTagsToResource": "

    Adds metadata tags to an Amazon RDS resource. These tags can also be used with cost allocation reporting to track cost associated with Amazon RDS resources, or used in Condition statement in IAM policy for Amazon RDS.

    For an overview on tagging Amazon RDS resources, see Tagging Amazon RDS Resources.

    ", + "AuthorizeDBSecurityGroupIngress": "

    Enables ingress to a DBSecurityGroup using one of two forms of authorization. First, EC2 or VPC security groups can be added to the DBSecurityGroup if the application using the database is running on EC2 or VPC instances. Second, IP ranges are available if the application accessing your database is running on the Internet. Required parameters for this API are one of CIDR range, EC2SecurityGroupId for VPC, or (EC2SecurityGroupOwnerId and either EC2SecurityGroupName or EC2SecurityGroupId for non-VPC).

    You cannot authorize ingress from an EC2 security group in one Region to an Amazon RDS DB instance in another. You cannot authorize ingress from a VPC security group in one VPC to an Amazon RDS DB instance in another.

    For an overview of CIDR ranges, go to the Wikipedia Tutorial.

    ", + "CopyDBParameterGroup": "

    Copies the specified DB parameter group.

    ", + "CopyDBSnapshot": "

    Copies the specified DBSnapshot. The source DBSnapshot must be in the \"available\" state.

    ", + "CopyOptionGroup": "

    Copies the specified option group.

    ", + "CreateDBInstance": "

    Creates a new DB instance.

    ", + "CreateDBInstanceReadReplica": "

    Creates a DB instance that acts as a Read Replica of a source DB instance.

    All Read Replica DB instances are created as Single-AZ deployments with backups disabled. All other DB instance attributes (including DB security groups and DB parameter groups) are inherited from the source DB instance, except as specified below.

    The source DB instance must have backup retention enabled.

    ", + "CreateDBParameterGroup": "

    Creates a new DB parameter group.

    A DB parameter group is initially created with the default parameters for the database engine used by the DB instance. To provide custom values for any of the parameters, you must modify the group after creating it using ModifyDBParameterGroup. Once you've created a DB parameter group, you need to associate it with your DB instance using ModifyDBInstance. When you associate a new DB parameter group with a running DB instance, you need to reboot the DB instance without failover for the new DB parameter group and associated settings to take effect.

    After you create a DB parameter group, you should wait at least 5 minutes before creating your first DB instance that uses that DB parameter group as the default parameter group. This allows Amazon RDS to fully complete the create action before the parameter group is used as the default for a new DB instance. This is especially important for parameters that are critical when creating the default database for a DB instance, such as the character set for the default database defined by the character_set_database parameter. You can use the Parameter Groups option of the Amazon RDS console or the DescribeDBParameters command to verify that your DB parameter group has been created or modified.

    ", + "CreateDBSecurityGroup": "

    Creates a new DB security group. DB security groups control access to a DB instance.

    ", + "CreateDBSnapshot": "

    Creates a DBSnapshot. The source DBInstance must be in \"available\" state.

    ", + "CreateDBSubnetGroup": "

    Creates a new DB subnet group. DB subnet groups must contain at least one subnet in at least two AZs in the region.

    ", + "CreateEventSubscription": "

    Creates an RDS event notification subscription. This action requires a topic ARN (Amazon Resource Name) created by either the RDS console, the SNS console, or the SNS API. To obtain an ARN with SNS, you must create a topic in Amazon SNS and subscribe to the topic. The ARN is displayed in the SNS console.

    You can specify the type of source (SourceType) you want to be notified of, provide a list of RDS sources (SourceIds) that triggers the events, and provide a list of event categories (EventCategories) for events you want to be notified of. For example, you can specify SourceType = db-instance, SourceIds = mydbinstance1, mydbinstance2 and EventCategories = Availability, Backup.

    If you specify both the SourceType and SourceIds, such as SourceType = db-instance and SourceIdentifier = myDBInstance1, you will be notified of all the db-instance events for the specified source. If you specify a SourceType but do not specify a SourceIdentifier, you will receive notice of the events for that source type for all your RDS sources. If you do not specify either the SourceType nor the SourceIdentifier, you will be notified of events generated from all RDS sources belonging to your customer account.

    ", + "CreateOptionGroup": "

    Creates a new option group. You can create up to 20 option groups.

    ", + "DeleteDBInstance": "

    The DeleteDBInstance action deletes a previously provisioned DB instance. A successful response from the web service indicates the request was received correctly. When you delete a DB instance, all automated backups for that instance are deleted and cannot be recovered. Manual DB snapshots of the DB instance to be deleted are not deleted.

    If a final DB snapshot is requested the status of the RDS instance will be \"deleting\" until the DB snapshot is created. The API action DescribeDBInstance is used to monitor the status of this operation. The action cannot be canceled or reverted once submitted.

    ", + "DeleteDBParameterGroup": "

    Deletes a specified DBParameterGroup. The DBParameterGroup to be deleted cannot be associated with any DB instances.

    The specified DB parameter group cannot be associated with any DB instances. ", + "DeleteDBSecurityGroup": "

    Deletes a DB security group.

    The specified DB security group must not be associated with any DB instances.", + "DeleteDBSnapshot": "

    Deletes a DBSnapshot. If the snapshot is being copied, the copy operation is terminated.

    The DBSnapshot must be in the available state to be deleted.", + "DeleteDBSubnetGroup": "

    Deletes a DB subnet group.

    The specified database subnet group must not be associated with any DB instances.", + "DeleteEventSubscription": "

    Deletes an RDS event notification subscription.

    ", + "DeleteOptionGroup": "

    Deletes an existing option group.

    ", + "DescribeDBEngineVersions": "

    Returns a list of the available DB engines.

    ", + "DescribeDBInstances": "

    Returns information about provisioned RDS instances. This API supports pagination.

    ", + "DescribeDBLogFiles": "

    Returns a list of DB log files for the DB instance.

    ", + "DescribeDBParameterGroups": "

    Returns a list of DBParameterGroup descriptions. If a DBParameterGroupName is specified, the list will contain only the description of the specified DB parameter group.

    ", + "DescribeDBParameters": "

    Returns the detailed parameter list for a particular DB parameter group.

    ", + "DescribeDBSecurityGroups": "

    Returns a list of DBSecurityGroup descriptions. If a DBSecurityGroupName is specified, the list will contain only the descriptions of the specified DB security group.

    ", + "DescribeDBSnapshots": "

    Returns information about DB snapshots. This API supports pagination.

    ", + "DescribeDBSubnetGroups": "

    Returns a list of DBSubnetGroup descriptions. If a DBSubnetGroupName is specified, the list will contain only the descriptions of the specified DBSubnetGroup.

    For an overview of CIDR ranges, go to the Wikipedia Tutorial.

    ", + "DescribeEngineDefaultParameters": "

    Returns the default engine and system parameter information for the specified database engine.

    ", + "DescribeEventCategories": "

    Displays a list of categories for all event source types, or, if specified, for a specified source type. You can see a list of the event categories and source types in the Events topic in the Amazon RDS User Guide.

    ", + "DescribeEventSubscriptions": "

    Lists all the subscription descriptions for a customer account. The description for a subscription includes SubscriptionName, SNSTopicARN, CustomerID, SourceType, SourceID, CreationTime, and Status.

    If you specify a SubscriptionName, lists the description for that subscription.

    ", + "DescribeEvents": "

    Returns events related to DB instances, DB security groups, DB snapshots, and DB parameter groups for the past 14 days. Events specific to a particular DB instance, DB security group, database snapshot, or DB parameter group can be obtained by providing the name as a parameter. By default, the past hour of events are returned.

    ", + "DescribeOptionGroupOptions": "

    Describes all available options.

    ", + "DescribeOptionGroups": "

    Describes the available option groups.

    ", + "DescribeOrderableDBInstanceOptions": "

    Returns a list of orderable DB instance options for the specified engine.

    ", + "DescribeReservedDBInstances": "

    Returns information about reserved DB instances for this account, or about a specified reserved DB instance.

    ", + "DescribeReservedDBInstancesOfferings": "

    Lists available reserved DB instance offerings.

    ", + "DownloadDBLogFilePortion": "

    Downloads all or a portion of the specified log file.

    ", + "ListTagsForResource": "

    Lists all tags on an Amazon RDS resource.

    For an overview on tagging an Amazon RDS resource, see Tagging Amazon RDS Resources.

    ", + "ModifyDBInstance": "

    Modify settings for a DB instance. You can change one or more database configuration parameters by specifying these parameters and the new values in the request.

    ", + "ModifyDBParameterGroup": "

    Modifies the parameters of a DB parameter group. To modify more than one parameter, submit a list of the following: ParameterName, ParameterValue, and ApplyMethod. A maximum of 20 parameters can be modified in a single request.

    Changes to dynamic parameters are applied immediately. Changes to static parameters require a reboot without failover to the DB instance associated with the parameter group before the change can take effect.

    After you modify a DB parameter group, you should wait at least 5 minutes before creating your first DB instance that uses that DB parameter group as the default parameter group. This allows Amazon RDS to fully complete the modify action before the parameter group is used as the default for a new DB instance. This is especially important for parameters that are critical when creating the default database for a DB instance, such as the character set for the default database defined by the character_set_database parameter. You can use the Parameter Groups option of the Amazon RDS console or the DescribeDBParameters command to verify that your DB parameter group has been created or modified.

    ", + "ModifyDBSubnetGroup": "

    Modifies an existing DB subnet group. DB subnet groups must contain at least one subnet in at least two AZs in the region.

    ", + "ModifyEventSubscription": "

    Modifies an existing RDS event notification subscription. Note that you cannot modify the source identifiers using this call; to change source identifiers for a subscription, use the AddSourceIdentifierToSubscription and RemoveSourceIdentifierFromSubscription calls.

    You can see a list of the event categories for a given SourceType in the Events topic in the Amazon RDS User Guide or by using the DescribeEventCategories action.

    ", + "ModifyOptionGroup": "

    Modifies an existing option group.

    ", + "PromoteReadReplica": "

    Promotes a Read Replica DB instance to a standalone DB instance.

    We recommend that you enable automated backups on your Read Replica before promoting the Read Replica. This ensures that no backup is taken during the promotion process. Once the instance is promoted to a primary instance, backups are taken based on your backup settings.

    ", + "PurchaseReservedDBInstancesOffering": "

    Purchases a reserved DB instance offering.

    ", + "RebootDBInstance": "

    Rebooting a DB instance restarts the database engine service. A reboot also applies to the DB instance any modifications to the associated DB parameter group that were pending. Rebooting a DB instance results in a momentary outage of the instance, during which the DB instance status is set to rebooting. If the RDS instance is configured for MultiAZ, it is possible that the reboot will be conducted through a failover. An Amazon RDS event is created when the reboot is completed.

    If your DB instance is deployed in multiple Availability Zones, you can force a failover from one AZ to the other during the reboot. You might force a failover to test the availability of your DB instance deployment or to restore operations to the original AZ after a failover occurs.

    The time required to reboot is a function of the specific database engine's crash recovery process. To improve the reboot time, we recommend that you reduce database activities as much as possible during the reboot process to reduce rollback activity for in-transit transactions.

    ", + "RemoveSourceIdentifierFromSubscription": "

    Removes a source identifier from an existing RDS event notification subscription.

    ", + "RemoveTagsFromResource": "

    Removes metadata tags from an Amazon RDS resource.

    For an overview on tagging an Amazon RDS resource, see Tagging Amazon RDS Resources.

    ", + "ResetDBParameterGroup": "

    Modifies the parameters of a DB parameter group to the engine/system default value. To reset specific parameters submit a list of the following: ParameterName and ApplyMethod. To reset the entire DB parameter group, specify the DBParameterGroup name and ResetAllParameters parameters. When resetting the entire group, dynamic parameters are updated immediately and static parameters are set to pending-reboot to take effect on the next DB instance restart or RebootDBInstance request.

    ", + "RestoreDBInstanceFromDBSnapshot": "

    Creates a new DB instance from a DB snapshot. The target database is created from the source database restore point with the same configuration as the original source database, except that the new RDS instance is created with the default security group.

    If your intent is to replace your original DB instance with the new, restored DB instance, then rename your original DB instance before you call the RestoreDBInstanceFromDBSnapshot action. RDS does not allow two DB instances with the same name. Once you have renamed your original DB instance with a different identifier, then you can pass the original name of the DB instance as the DBInstanceIdentifier in the call to the RestoreDBInstanceFromDBSnapshot action. The result is that you will replace the original DB instance with the DB instance created from the snapshot.

    ", + "RestoreDBInstanceToPointInTime": "

    Restores a DB instance to an arbitrary point-in-time. Users can restore to any point in time before the LatestRestorableTime for up to BackupRetentionPeriod days. The target database is created from the source database with the same configuration as the original database except that the DB instance is created with the default DB security group.

    ", + "RevokeDBSecurityGroupIngress": "

    Revokes ingress from a DBSecurityGroup for previously authorized IP ranges or EC2 or VPC Security Groups. Required parameters for this API are one of CIDRIP, EC2SecurityGroupId for VPC, or (EC2SecurityGroupOwnerId and either EC2SecurityGroupName or EC2SecurityGroupId).

    " + }, + "shapes": { + "AddSourceIdentifierToSubscriptionMessage": { + "base": "

    ", + "refs": { + } + }, + "AddSourceIdentifierToSubscriptionResult": { + "base": null, + "refs": { + } + }, + "AddTagsToResourceMessage": { + "base": "

    ", + "refs": { + } + }, + "ApplyMethod": { + "base": null, + "refs": { + "Parameter$ApplyMethod": "

    Indicates when to apply parameter updates.

    " + } + }, + "AuthorizationAlreadyExistsFault": { + "base": "

    The specified CIDRIP or EC2 security group is already authorized for the specified DB security group.

    ", + "refs": { + } + }, + "AuthorizationNotFoundFault": { + "base": "

    Specified CIDRIP or EC2 security group is not authorized for the specified DB security group.

    RDS may not also be authorized via IAM to perform necessary actions on your behalf.

    ", + "refs": { + } + }, + "AuthorizationQuotaExceededFault": { + "base": "

    DB security group authorization quota has been reached.

    ", + "refs": { + } + }, + "AuthorizeDBSecurityGroupIngressMessage": { + "base": "

    ", + "refs": { + } + }, + "AuthorizeDBSecurityGroupIngressResult": { + "base": null, + "refs": { + } + }, + "AvailabilityZone": { + "base": "

    Contains Availability Zone information.

    This data type is used as an element in the following data type:

    ", + "refs": { + "AvailabilityZoneList$member": null, + "Subnet$SubnetAvailabilityZone": null + } + }, + "AvailabilityZoneList": { + "base": null, + "refs": { + "OrderableDBInstanceOption$AvailabilityZones": "

    A list of availability zones for the orderable DB instance.

    " + } + }, + "Boolean": { + "base": null, + "refs": { + "DBInstance$MultiAZ": "

    Specifies if the DB instance is a Multi-AZ deployment.

    ", + "DBInstance$AutoMinorVersionUpgrade": "

    Indicates that minor version patches are applied automatically.

    ", + "DBInstance$PubliclyAccessible": "

    Specifies the accessibility options for the DB instance. A value of true specifies an Internet-facing instance with a publicly resolvable DNS name, which resolves to a public IP address. A value of false specifies an internal instance with a DNS name that resolves to a private IP address.

    Default: The default behavior varies depending on whether a VPC has been requested or not. The following list shows the default behavior in each case.

    • Default VPC:true
    • VPC:false

    If no DB subnet group has been specified as part of the request and the PubliclyAccessible value has not been set, the DB instance will be publicly accessible. If a specific DB subnet group has been specified as part of the request and the PubliclyAccessible value has not been set, the DB instance will be private.

    ", + "DBInstanceStatusInfo$Normal": "

    Boolean value that is true if the instance is operating normally, or false if the instance is in an error state.

    ", + "DeleteDBInstanceMessage$SkipFinalSnapshot": "

    Determines whether a final DB snapshot is created before the DB instance is deleted. If true is specified, no DBSnapshot is created. If false is specified, a DB snapshot is created before the DB instance is deleted.

    Specify true when deleting a Read Replica.

    The FinalDBSnapshotIdentifier parameter must be specified if SkipFinalSnapshot is false.

    Default: false

    ", + "DescribeDBEngineVersionsMessage$DefaultOnly": "

    Indicates that only the default version of the specified engine or engine and major version combination is returned.

    ", + "DownloadDBLogFilePortionDetails$AdditionalDataPending": "

    Boolean value that if true, indicates there is more data to be downloaded.

    ", + "EventSubscription$Enabled": "

    A Boolean value indicating if the subscription is enabled. True indicates the subscription is enabled.

    ", + "ModifyDBInstanceMessage$ApplyImmediately": "

    Specifies whether the modifications in this request and any pending modifications are asynchronously applied as soon as possible, regardless of the PreferredMaintenanceWindow setting for the DB instance.

    If this parameter is set to false, changes to the DB instance are applied during the next maintenance window. Some parameter changes can cause an outage and will be applied on the next call to RebootDBInstance, or the next failure reboot. Review the table of parameters in Modifying a DB Instance and Using the Apply Immediately Parameter to see the impact that setting ApplyImmediately to true or false has for each modified parameter and to determine when the changes will be applied.

    Default: false

    ", + "ModifyDBInstanceMessage$AllowMajorVersionUpgrade": "

    Indicates that major version upgrades are allowed. Changing this parameter does not result in an outage and the change is asynchronously applied as soon as possible.

    Constraints: This parameter must be set to true when specifying a value for the EngineVersion parameter that is a different major version than the DB instance's current version.

    ", + "ModifyOptionGroupMessage$ApplyImmediately": "

    Indicates whether the changes should be applied immediately, or during the next maintenance window for each instance associated with the option group.

    ", + "Option$Persistent": "

    Indicate if this option is persistent.

    ", + "Option$Permanent": "

    Indicate if this option is permanent.

    ", + "OptionGroup$AllowsVpcAndNonVpcInstanceMemberships": "

    Indicates whether this option group can be applied to both VPC and non-VPC instances. The value true indicates the option group can be applied to both VPC and non-VPC instances.

    ", + "OptionGroupOption$PortRequired": "

    Specifies whether the option requires a port.

    ", + "OptionGroupOption$Persistent": "

    A persistent option cannot be removed from the option group once the option group is used, but this option can be removed from the db instance while modifying the related data and assigning another option group without this option.

    ", + "OptionGroupOption$Permanent": "

    A permanent option cannot be removed from the option group once the option group is used, and it cannot be removed from the db instance after assigning an option group with this permanent option.

    ", + "OptionGroupOptionSetting$IsModifiable": "

    Boolean value where true indicates that this option group option can be changed from the default value.

    ", + "OptionSetting$IsModifiable": "

    A Boolean value that, when true, indicates the option setting can be modified from the default.

    ", + "OptionSetting$IsCollection": "

    Indicates if the option setting is part of a collection.

    ", + "OrderableDBInstanceOption$MultiAZCapable": "

    Indicates whether this orderable DB instance is multi-AZ capable.

    ", + "OrderableDBInstanceOption$ReadReplicaCapable": "

    Indicates whether this orderable DB instance can have a Read Replica.

    ", + "OrderableDBInstanceOption$Vpc": "

    Indicates whether this is a VPC orderable DB instance.

    ", + "OrderableDBInstanceOption$SupportsIops": "

    Indicates whether this orderable DB instance supports provisioned IOPS.

    ", + "Parameter$IsModifiable": "

    Indicates whether (true) or not (false) the parameter can be modified. Some parameters have security or operational implications that prevent them from being changed.

    ", + "ReservedDBInstance$MultiAZ": "

    Indicates if the reservation applies to Multi-AZ deployments.

    ", + "ReservedDBInstancesOffering$MultiAZ": "

    Indicates if the offering applies to Multi-AZ deployments.

    ", + "ResetDBParameterGroupMessage$ResetAllParameters": "

    Specifies whether (true) or not (false) to reset all parameters in the DB parameter group to default values.

    Default: true

    ", + "RestoreDBInstanceToPointInTimeMessage$UseLatestRestorableTime": "

    Specifies whether (true) or not (false) the DB instance is restored from the latest backup time.

    Default: false

    Constraints: Cannot be specified if RestoreTime parameter is provided.

    " + } + }, + "BooleanOptional": { + "base": null, + "refs": { + "CreateDBInstanceMessage$MultiAZ": "

    Specifies if the DB instance is a Multi-AZ deployment. You cannot set the AvailabilityZone parameter if the MultiAZ parameter is set to true.

    ", + "CreateDBInstanceMessage$AutoMinorVersionUpgrade": "

    Indicates that minor engine upgrades will be applied automatically to the DB instance during the maintenance window.

    Default: true

    ", + "CreateDBInstanceMessage$PubliclyAccessible": "

    Specifies the accessibility options for the DB instance. A value of true specifies an Internet-facing instance with a publicly resolvable DNS name, which resolves to a public IP address. A value of false specifies an internal instance with a DNS name that resolves to a private IP address.

    Default: The default behavior varies depending on whether a VPC has been requested or not. The following list shows the default behavior in each case.

    • Default VPC:true
    • VPC:false

    If no DB subnet group has been specified as part of the request and the PubliclyAccessible value has not been set, the DB instance will be publicly accessible. If a specific DB subnet group has been specified as part of the request and the PubliclyAccessible value has not been set, the DB instance will be private.

    ", + "CreateDBInstanceReadReplicaMessage$AutoMinorVersionUpgrade": "

    Indicates that minor engine upgrades will be applied automatically to the Read Replica during the maintenance window.

    Default: Inherits from the source DB instance

    ", + "CreateDBInstanceReadReplicaMessage$PubliclyAccessible": "

    Specifies the accessibility options for the DB instance. A value of true specifies an Internet-facing instance with a publicly resolvable DNS name, which resolves to a public IP address. A value of false specifies an internal instance with a DNS name that resolves to a private IP address.

    Default: The default behavior varies depending on whether a VPC has been requested or not. The following list shows the default behavior in each case.

    • Default VPC:true
    • VPC:false

    If no DB subnet group has been specified as part of the request and the PubliclyAccessible value has not been set, the DB instance will be publicly accessible. If a specific DB subnet group has been specified as part of the request and the PubliclyAccessible value has not been set, the DB instance will be private.

    ", + "CreateEventSubscriptionMessage$Enabled": "

    A Boolean value; set to true to activate the subscription, set to false to create the subscription but not active it.

    ", + "DescribeDBEngineVersionsMessage$ListSupportedCharacterSets": "

    If this parameter is specified, and if the requested engine supports the CharacterSetName parameter for CreateDBInstance, the response includes a list of supported character sets for each engine version.

    ", + "DescribeOrderableDBInstanceOptionsMessage$Vpc": "

    The VPC filter value. Specify this parameter to show only the available VPC or non-VPC offerings.

    ", + "DescribeReservedDBInstancesMessage$MultiAZ": "

    The Multi-AZ filter value. Specify this parameter to show only those reservations matching the specified Multi-AZ parameter.

    ", + "DescribeReservedDBInstancesOfferingsMessage$MultiAZ": "

    The Multi-AZ filter value. Specify this parameter to show only the available offerings matching the specified Multi-AZ parameter.

    ", + "ModifyDBInstanceMessage$MultiAZ": "

    Specifies if the DB instance is a Multi-AZ deployment. Changing this parameter does not result in an outage and the change is applied during the next maintenance window unless the ApplyImmediately parameter is set to true for this request.

    Constraints: Cannot be specified if the DB instance is a Read Replica.

    ", + "ModifyDBInstanceMessage$AutoMinorVersionUpgrade": "

    Indicates that minor version upgrades will be applied automatically to the DB instance during the maintenance window. Changing this parameter does not result in an outage except in the following case and the change is asynchronously applied as soon as possible. An outage will result if this parameter is set to true during the maintenance window, and a newer minor version is available, and RDS has enabled auto patching for that engine version.

    ", + "ModifyEventSubscriptionMessage$Enabled": "

    A Boolean value; set to true to activate the subscription.

    ", + "PendingModifiedValues$MultiAZ": "

    Indicates that the Single-AZ DB instance is to change to a Multi-AZ deployment.

    ", + "RebootDBInstanceMessage$ForceFailover": "

    When true, the reboot will be conducted through a MultiAZ failover.

    Constraint: You cannot specify true if the instance is not configured for MultiAZ.

    ", + "RestoreDBInstanceFromDBSnapshotMessage$MultiAZ": "

    Specifies if the DB instance is a Multi-AZ deployment.

    Constraint: You cannot specify the AvailabilityZone parameter if the MultiAZ parameter is set to true.

    ", + "RestoreDBInstanceFromDBSnapshotMessage$PubliclyAccessible": "

    Specifies the accessibility options for the DB instance. A value of true specifies an Internet-facing instance with a publicly resolvable DNS name, which resolves to a public IP address. A value of false specifies an internal instance with a DNS name that resolves to a private IP address.

    Default: The default behavior varies depending on whether a VPC has been requested or not. The following list shows the default behavior in each case.

    • Default VPC:true
    • VPC:false

    If no DB subnet group has been specified as part of the request and the PubliclyAccessible value has not been set, the DB instance will be publicly accessible. If a specific DB subnet group has been specified as part of the request and the PubliclyAccessible value has not been set, the DB instance will be private.

    ", + "RestoreDBInstanceFromDBSnapshotMessage$AutoMinorVersionUpgrade": "

    Indicates that minor version upgrades will be applied automatically to the DB instance during the maintenance window.

    ", + "RestoreDBInstanceToPointInTimeMessage$MultiAZ": "

    Specifies if the DB instance is a Multi-AZ deployment.

    Constraint: You cannot specify the AvailabilityZone parameter if the MultiAZ parameter is set to true.

    ", + "RestoreDBInstanceToPointInTimeMessage$PubliclyAccessible": "

    Specifies the accessibility options for the DB instance. A value of true specifies an Internet-facing instance with a publicly resolvable DNS name, which resolves to a public IP address. A value of false specifies an internal instance with a DNS name that resolves to a private IP address.

    Default: The default behavior varies depending on whether a VPC has been requested or not. The following list shows the default behavior in each case.

    • Default VPC:true
    • VPC:false

    If no DB subnet group has been specified as part of the request and the PubliclyAccessible value has not been set, the DB instance will be publicly accessible. If a specific DB subnet group has been specified as part of the request and the PubliclyAccessible value has not been set, the DB instance will be private.

    ", + "RestoreDBInstanceToPointInTimeMessage$AutoMinorVersionUpgrade": "

    Indicates that minor version upgrades will be applied automatically to the DB instance during the maintenance window.

    " + } + }, + "CharacterSet": { + "base": "

    This data type is used as a response element in the action DescribeDBEngineVersions.

    ", + "refs": { + "DBEngineVersion$DefaultCharacterSet": "

    The default character set for new instances of this engine version, if the CharacterSetName parameter of the CreateDBInstance API is not specified.

    ", + "SupportedCharacterSetsList$member": null + } + }, + "CopyDBParameterGroupMessage": { + "base": "

    ", + "refs": { + } + }, + "CopyDBParameterGroupResult": { + "base": null, + "refs": { + } + }, + "CopyDBSnapshotMessage": { + "base": "

    ", + "refs": { + } + }, + "CopyDBSnapshotResult": { + "base": null, + "refs": { + } + }, + "CopyOptionGroupMessage": { + "base": "

    ", + "refs": { + } + }, + "CopyOptionGroupResult": { + "base": null, + "refs": { + } + }, + "CreateDBInstanceMessage": { + "base": "

    ", + "refs": { + } + }, + "CreateDBInstanceReadReplicaMessage": { + "base": null, + "refs": { + } + }, + "CreateDBInstanceReadReplicaResult": { + "base": null, + "refs": { + } + }, + "CreateDBInstanceResult": { + "base": null, + "refs": { + } + }, + "CreateDBParameterGroupMessage": { + "base": "

    ", + "refs": { + } + }, + "CreateDBParameterGroupResult": { + "base": null, + "refs": { + } + }, + "CreateDBSecurityGroupMessage": { + "base": "

    ", + "refs": { + } + }, + "CreateDBSecurityGroupResult": { + "base": null, + "refs": { + } + }, + "CreateDBSnapshotMessage": { + "base": "

    ", + "refs": { + } + }, + "CreateDBSnapshotResult": { + "base": null, + "refs": { + } + }, + "CreateDBSubnetGroupMessage": { + "base": "

    ", + "refs": { + } + }, + "CreateDBSubnetGroupResult": { + "base": null, + "refs": { + } + }, + "CreateEventSubscriptionMessage": { + "base": "

    ", + "refs": { + } + }, + "CreateEventSubscriptionResult": { + "base": null, + "refs": { + } + }, + "CreateOptionGroupMessage": { + "base": "

    ", + "refs": { + } + }, + "CreateOptionGroupResult": { + "base": null, + "refs": { + } + }, + "DBEngineVersion": { + "base": "

    This data type is used as a response element in the action DescribeDBEngineVersions.

    ", + "refs": { + "DBEngineVersionList$member": null + } + }, + "DBEngineVersionList": { + "base": null, + "refs": { + "DBEngineVersionMessage$DBEngineVersions": "

    A list of DBEngineVersion elements.

    " + } + }, + "DBEngineVersionMessage": { + "base": "

    Contains the result of a successful invocation of the DescribeDBEngineVersions action.

    ", + "refs": { + } + }, + "DBInstance": { + "base": "

    Contains the result of a successful invocation of the following actions:

    This data type is used as a response element in the DescribeDBInstances action.

    ", + "refs": { + "CreateDBInstanceReadReplicaResult$DBInstance": null, + "CreateDBInstanceResult$DBInstance": null, + "DBInstanceList$member": null, + "DeleteDBInstanceResult$DBInstance": null, + "ModifyDBInstanceResult$DBInstance": null, + "PromoteReadReplicaResult$DBInstance": null, + "RebootDBInstanceResult$DBInstance": null, + "RestoreDBInstanceFromDBSnapshotResult$DBInstance": null, + "RestoreDBInstanceToPointInTimeResult$DBInstance": null + } + }, + "DBInstanceAlreadyExistsFault": { + "base": "

    User already has a DB instance with the given identifier.

    ", + "refs": { + } + }, + "DBInstanceList": { + "base": null, + "refs": { + "DBInstanceMessage$DBInstances": "

    A list of DBInstance instances.

    " + } + }, + "DBInstanceMessage": { + "base": "

    Contains the result of a successful invocation of the DescribeDBInstances action.

    ", + "refs": { + } + }, + "DBInstanceNotFoundFault": { + "base": "

    DBInstanceIdentifier does not refer to an existing DB instance.

    ", + "refs": { + } + }, + "DBInstanceStatusInfo": { + "base": "

    Provides a list of status information for a DB instance.

    ", + "refs": { + "DBInstanceStatusInfoList$member": null + } + }, + "DBInstanceStatusInfoList": { + "base": null, + "refs": { + "DBInstance$StatusInfos": "

    The status of a Read Replica. If the instance is not a Read Replica, this will be blank.

    " + } + }, + "DBLogFileNotFoundFault": { + "base": "

    LogFileName does not refer to an existing DB log file.

    ", + "refs": { + } + }, + "DBParameterGroup": { + "base": "

    Contains the result of a successful invocation of the CreateDBParameterGroup action.

    This data type is used as a request parameter in the DeleteDBParameterGroup action, and as a response element in the DescribeDBParameterGroups action.

    ", + "refs": { + "CopyDBParameterGroupResult$DBParameterGroup": null, + "CreateDBParameterGroupResult$DBParameterGroup": null, + "DBParameterGroupList$member": null + } + }, + "DBParameterGroupAlreadyExistsFault": { + "base": "

    A DB parameter group with the same name exists.

    ", + "refs": { + } + }, + "DBParameterGroupDetails": { + "base": "

    Contains the result of a successful invocation of the DescribeDBParameters action.

    ", + "refs": { + } + }, + "DBParameterGroupList": { + "base": null, + "refs": { + "DBParameterGroupsMessage$DBParameterGroups": "

    A list of DBParameterGroup instances.

    " + } + }, + "DBParameterGroupNameMessage": { + "base": "

    Contains the result of a successful invocation of the ModifyDBParameterGroup or ResetDBParameterGroup action.

    ", + "refs": { + } + }, + "DBParameterGroupNotFoundFault": { + "base": "

    DBParameterGroupName does not refer to an existing DB parameter group.

    ", + "refs": { + } + }, + "DBParameterGroupQuotaExceededFault": { + "base": "

    Request would result in user exceeding the allowed number of DB parameter groups.

    ", + "refs": { + } + }, + "DBParameterGroupStatus": { + "base": "

    The status of the DB parameter group.

    This data type is used as a response element in the following actions:

    ", + "refs": { + "DBParameterGroupStatusList$member": null + } + }, + "DBParameterGroupStatusList": { + "base": null, + "refs": { + "DBInstance$DBParameterGroups": "

    Provides the list of DB parameter groups applied to this DB instance.

    " + } + }, + "DBParameterGroupsMessage": { + "base": "

    Contains the result of a successful invocation of the DescribeDBParameterGroups action.

    ", + "refs": { + } + }, + "DBSecurityGroup": { + "base": "

    Contains the result of a successful invocation of the following actions:

    This data type is used as a response element in the DescribeDBSecurityGroups action.

    ", + "refs": { + "AuthorizeDBSecurityGroupIngressResult$DBSecurityGroup": null, + "CreateDBSecurityGroupResult$DBSecurityGroup": null, + "DBSecurityGroups$member": null, + "RevokeDBSecurityGroupIngressResult$DBSecurityGroup": null + } + }, + "DBSecurityGroupAlreadyExistsFault": { + "base": "

    A DB security group with the name specified in DBSecurityGroupName already exists.

    ", + "refs": { + } + }, + "DBSecurityGroupMembership": { + "base": "

    This data type is used as a response element in the following actions:

    ", + "refs": { + "DBSecurityGroupMembershipList$member": null + } + }, + "DBSecurityGroupMembershipList": { + "base": null, + "refs": { + "DBInstance$DBSecurityGroups": "

    Provides List of DB security group elements containing only DBSecurityGroup.Name and DBSecurityGroup.Status subelements.

    ", + "Option$DBSecurityGroupMemberships": "

    If the option requires access to a port, then this DB security group allows access to the port.

    " + } + }, + "DBSecurityGroupMessage": { + "base": "

    Contains the result of a successful invocation of the DescribeDBSecurityGroups action.

    ", + "refs": { + } + }, + "DBSecurityGroupNameList": { + "base": null, + "refs": { + "CreateDBInstanceMessage$DBSecurityGroups": "

    A list of DB security groups to associate with this DB instance.

    Default: The default DB security group for the database engine.

    ", + "ModifyDBInstanceMessage$DBSecurityGroups": "

    A list of DB security groups to authorize on this DB instance. Changing this setting does not result in an outage and the change is asynchronously applied as soon as possible.

    Constraints:

    • Must be 1 to 255 alphanumeric characters
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "OptionConfiguration$DBSecurityGroupMemberships": "

    A list of DBSecurityGroupMemebrship name strings used for this option.

    " + } + }, + "DBSecurityGroupNotFoundFault": { + "base": "

    DBSecurityGroupName does not refer to an existing DB security group.

    ", + "refs": { + } + }, + "DBSecurityGroupNotSupportedFault": { + "base": "

    A DB security group is not allowed for this action.

    ", + "refs": { + } + }, + "DBSecurityGroupQuotaExceededFault": { + "base": "

    Request would result in user exceeding the allowed number of DB security groups.

    ", + "refs": { + } + }, + "DBSecurityGroups": { + "base": null, + "refs": { + "DBSecurityGroupMessage$DBSecurityGroups": "

    A list of DBSecurityGroup instances.

    " + } + }, + "DBSnapshot": { + "base": "

    Contains the result of a successful invocation of the following actions:

    This data type is used as a response element in the DescribeDBSnapshots action.

    ", + "refs": { + "CopyDBSnapshotResult$DBSnapshot": null, + "CreateDBSnapshotResult$DBSnapshot": null, + "DBSnapshotList$member": null, + "DeleteDBSnapshotResult$DBSnapshot": null + } + }, + "DBSnapshotAlreadyExistsFault": { + "base": "

    DBSnapshotIdentifier is already used by an existing snapshot.

    ", + "refs": { + } + }, + "DBSnapshotList": { + "base": null, + "refs": { + "DBSnapshotMessage$DBSnapshots": "

    A list of DBSnapshot instances.

    " + } + }, + "DBSnapshotMessage": { + "base": "

    Contains the result of a successful invocation of the DescribeDBSnapshots action.

    ", + "refs": { + } + }, + "DBSnapshotNotFoundFault": { + "base": "

    DBSnapshotIdentifier does not refer to an existing DB snapshot.

    ", + "refs": { + } + }, + "DBSubnetGroup": { + "base": "

    Contains the result of a successful invocation of the following actions:

    This data type is used as a response element in the DescribeDBSubnetGroups action.

    ", + "refs": { + "CreateDBSubnetGroupResult$DBSubnetGroup": null, + "DBInstance$DBSubnetGroup": "

    Specifies information on the subnet group associated with the DB instance, including the name, description, and subnets in the subnet group.

    ", + "DBSubnetGroups$member": null, + "ModifyDBSubnetGroupResult$DBSubnetGroup": null + } + }, + "DBSubnetGroupAlreadyExistsFault": { + "base": "

    DBSubnetGroupName is already used by an existing DB subnet group.

    ", + "refs": { + } + }, + "DBSubnetGroupDoesNotCoverEnoughAZs": { + "base": "

    Subnets in the DB subnet group should cover at least two Availability Zones unless there is only one Availability Zone.

    ", + "refs": { + } + }, + "DBSubnetGroupMessage": { + "base": "

    Contains the result of a successful invocation of the DescribeDBSubnetGroups action.

    ", + "refs": { + } + }, + "DBSubnetGroupNotAllowedFault": { + "base": "

    Indicates that the DBSubnetGroup should not be specified while creating read replicas that lie in the same region as the source instance.

    ", + "refs": { + } + }, + "DBSubnetGroupNotFoundFault": { + "base": "

    DBSubnetGroupName does not refer to an existing DB subnet group.

    ", + "refs": { + } + }, + "DBSubnetGroupQuotaExceededFault": { + "base": "

    Request would result in user exceeding the allowed number of DB subnet groups.

    ", + "refs": { + } + }, + "DBSubnetGroups": { + "base": null, + "refs": { + "DBSubnetGroupMessage$DBSubnetGroups": "

    A list of DBSubnetGroup instances.

    " + } + }, + "DBSubnetQuotaExceededFault": { + "base": "

    Request would result in user exceeding the allowed number of subnets in a DB subnet groups.

    ", + "refs": { + } + }, + "DBUpgradeDependencyFailureFault": { + "base": "

    The DB upgrade failed because a resource the DB depends on could not be modified.

    ", + "refs": { + } + }, + "DeleteDBInstanceMessage": { + "base": "

    ", + "refs": { + } + }, + "DeleteDBInstanceResult": { + "base": null, + "refs": { + } + }, + "DeleteDBParameterGroupMessage": { + "base": "

    ", + "refs": { + } + }, + "DeleteDBSecurityGroupMessage": { + "base": "

    ", + "refs": { + } + }, + "DeleteDBSnapshotMessage": { + "base": "

    ", + "refs": { + } + }, + "DeleteDBSnapshotResult": { + "base": null, + "refs": { + } + }, + "DeleteDBSubnetGroupMessage": { + "base": "

    ", + "refs": { + } + }, + "DeleteEventSubscriptionMessage": { + "base": "

    ", + "refs": { + } + }, + "DeleteEventSubscriptionResult": { + "base": null, + "refs": { + } + }, + "DeleteOptionGroupMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeDBEngineVersionsMessage": { + "base": null, + "refs": { + } + }, + "DescribeDBInstancesMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeDBLogFilesDetails": { + "base": "

    This data type is used as a response element to DescribeDBLogFiles.

    ", + "refs": { + "DescribeDBLogFilesList$member": null + } + }, + "DescribeDBLogFilesList": { + "base": null, + "refs": { + "DescribeDBLogFilesResponse$DescribeDBLogFiles": "

    The DB log files returned.

    " + } + }, + "DescribeDBLogFilesMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeDBLogFilesResponse": { + "base": "

    The response from a call to DescribeDBLogFiles.

    ", + "refs": { + } + }, + "DescribeDBParameterGroupsMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeDBParametersMessage": { + "base": null, + "refs": { + } + }, + "DescribeDBSecurityGroupsMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeDBSnapshotsMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeDBSubnetGroupsMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeEngineDefaultParametersMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeEngineDefaultParametersResult": { + "base": null, + "refs": { + } + }, + "DescribeEventCategoriesMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeEventSubscriptionsMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeEventsMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeOptionGroupOptionsMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeOptionGroupsMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeOrderableDBInstanceOptionsMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeReservedDBInstancesMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeReservedDBInstancesOfferingsMessage": { + "base": "

    ", + "refs": { + } + }, + "Double": { + "base": null, + "refs": { + "RecurringCharge$RecurringChargeAmount": "

    The amount of the recurring charge.

    ", + "ReservedDBInstance$FixedPrice": "

    The fixed price charged for this reserved DB instance.

    ", + "ReservedDBInstance$UsagePrice": "

    The hourly price charged for this reserved DB instance.

    ", + "ReservedDBInstancesOffering$FixedPrice": "

    The fixed price charged for this offering.

    ", + "ReservedDBInstancesOffering$UsagePrice": "

    The hourly price charged for this offering.

    " + } + }, + "DownloadDBLogFilePortionDetails": { + "base": "

    This data type is used as a response element to DownloadDBLogFilePortion.

    ", + "refs": { + } + }, + "DownloadDBLogFilePortionMessage": { + "base": "

    ", + "refs": { + } + }, + "EC2SecurityGroup": { + "base": "

    This data type is used as a response element in the following actions:

    ", + "refs": { + "EC2SecurityGroupList$member": null + } + }, + "EC2SecurityGroupList": { + "base": null, + "refs": { + "DBSecurityGroup$EC2SecurityGroups": "

    Contains a list of EC2SecurityGroup elements.

    " + } + }, + "Endpoint": { + "base": "

    This data type is used as a response element in the following actions:

    ", + "refs": { + "DBInstance$Endpoint": "

    Specifies the connection endpoint.

    " + } + }, + "EngineDefaults": { + "base": "

    Contains the result of a successful invocation of the DescribeEngineDefaultParameters action.

    ", + "refs": { + "DescribeEngineDefaultParametersResult$EngineDefaults": null + } + }, + "Event": { + "base": "

    This data type is used as a response element in the DescribeEvents action.

    ", + "refs": { + "EventList$member": null + } + }, + "EventCategoriesList": { + "base": null, + "refs": { + "CreateEventSubscriptionMessage$EventCategories": "

    A list of event categories for a SourceType that you want to subscribe to. You can see a list of the categories for a given SourceType in the Events topic in the Amazon RDS User Guide or by using the DescribeEventCategories action.

    ", + "DescribeEventsMessage$EventCategories": "

    A list of event categories that trigger notifications for a event notification subscription.

    ", + "Event$EventCategories": "

    Specifies the category for the event.

    ", + "EventCategoriesMap$EventCategories": "

    The event categories for the specified source type

    ", + "EventSubscription$EventCategoriesList": "

    A list of event categories for the RDS event notification subscription.

    ", + "ModifyEventSubscriptionMessage$EventCategories": "

    A list of event categories for a SourceType that you want to subscribe to. You can see a list of the categories for a given SourceType in the Events topic in the Amazon RDS User Guide or by using the DescribeEventCategories action.

    " + } + }, + "EventCategoriesMap": { + "base": "

    Contains the results of a successful invocation of the DescribeEventCategories action.

    ", + "refs": { + "EventCategoriesMapList$member": null + } + }, + "EventCategoriesMapList": { + "base": null, + "refs": { + "EventCategoriesMessage$EventCategoriesMapList": "

    A list of EventCategoriesMap data types.

    " + } + }, + "EventCategoriesMessage": { + "base": "

    Data returned from the DescribeEventCategories action.

    ", + "refs": { + } + }, + "EventList": { + "base": null, + "refs": { + "EventsMessage$Events": "

    A list of Event instances.

    " + } + }, + "EventSubscription": { + "base": "

    Contains the results of a successful invocation of the DescribeEventSubscriptions action.

    ", + "refs": { + "AddSourceIdentifierToSubscriptionResult$EventSubscription": null, + "CreateEventSubscriptionResult$EventSubscription": null, + "DeleteEventSubscriptionResult$EventSubscription": null, + "EventSubscriptionsList$member": null, + "ModifyEventSubscriptionResult$EventSubscription": null, + "RemoveSourceIdentifierFromSubscriptionResult$EventSubscription": null + } + }, + "EventSubscriptionQuotaExceededFault": { + "base": "

    You have reached the maximum number of event subscriptions.

    ", + "refs": { + } + }, + "EventSubscriptionsList": { + "base": null, + "refs": { + "EventSubscriptionsMessage$EventSubscriptionsList": "

    A list of EventSubscriptions data types.

    " + } + }, + "EventSubscriptionsMessage": { + "base": "

    Data returned by the DescribeEventSubscriptions action.

    ", + "refs": { + } + }, + "EventsMessage": { + "base": "

    Contains the result of a successful invocation of the DescribeEvents action.

    ", + "refs": { + } + }, + "Filter": { + "base": null, + "refs": { + "FilterList$member": null + } + }, + "FilterList": { + "base": null, + "refs": { + "DescribeDBEngineVersionsMessage$Filters": "

    Not currently supported.

    ", + "DescribeDBInstancesMessage$Filters": "

    This parameter is not currently supported.

    ", + "DescribeDBLogFilesMessage$Filters": "

    This parameter is not currently supported.

    ", + "DescribeDBParameterGroupsMessage$Filters": "

    This parameter is not currently supported.

    ", + "DescribeDBParametersMessage$Filters": "

    This parameter is not currently supported.

    ", + "DescribeDBSecurityGroupsMessage$Filters": "

    This parameter is not currently supported.

    ", + "DescribeDBSnapshotsMessage$Filters": "

    This parameter is not currently supported.

    ", + "DescribeDBSubnetGroupsMessage$Filters": "

    This parameter is not currently supported.

    ", + "DescribeEngineDefaultParametersMessage$Filters": "

    Not currently supported.

    ", + "DescribeEventCategoriesMessage$Filters": "

    This parameter is not currently supported.

    ", + "DescribeEventSubscriptionsMessage$Filters": "

    This parameter is not currently supported.

    ", + "DescribeEventsMessage$Filters": "

    This parameter is not currently supported.

    ", + "DescribeOptionGroupOptionsMessage$Filters": "

    This parameter is not currently supported.

    ", + "DescribeOptionGroupsMessage$Filters": "

    This parameter is not currently supported.

    ", + "DescribeOrderableDBInstanceOptionsMessage$Filters": "

    This parameter is not currently supported.

    ", + "DescribeReservedDBInstancesMessage$Filters": "

    This parameter is not currently supported.

    ", + "DescribeReservedDBInstancesOfferingsMessage$Filters": "

    This parameter is not currently supported.

    ", + "ListTagsForResourceMessage$Filters": "

    This parameter is not currently supported.

    " + } + }, + "FilterValueList": { + "base": null, + "refs": { + "Filter$Values": "

    This parameter is not currently supported.

    " + } + }, + "IPRange": { + "base": "

    This data type is used as a response element in the DescribeDBSecurityGroups action.

    ", + "refs": { + "IPRangeList$member": null + } + }, + "IPRangeList": { + "base": null, + "refs": { + "DBSecurityGroup$IPRanges": "

    Contains a list of IPRange elements.

    " + } + }, + "InstanceQuotaExceededFault": { + "base": "

    Request would result in user exceeding the allowed number of DB instances.

    ", + "refs": { + } + }, + "InsufficientDBInstanceCapacityFault": { + "base": "

    Specified DB instance class is not available in the specified Availability Zone.

    ", + "refs": { + } + }, + "Integer": { + "base": null, + "refs": { + "DBInstance$AllocatedStorage": "

    Specifies the allocated storage size specified in gigabytes.

    ", + "DBInstance$BackupRetentionPeriod": "

    Specifies the number of days for which automatic DB snapshots are retained.

    ", + "DBSnapshot$AllocatedStorage": "

    Specifies the allocated storage size in gigabytes (GB).

    ", + "DBSnapshot$Port": "

    Specifies the port that the database engine was listening on at the time of the snapshot.

    ", + "DBSnapshot$PercentProgress": "

    The percentage of the estimated data that has been transferred.

    ", + "DownloadDBLogFilePortionMessage$NumberOfLines": "

    The number of lines to download.

    If the NumberOfLines parameter is specified, then the block of lines returned can be from the beginning or the end of the log file, depending on the value of the Marker parameter.

    • If neither Marker or NumberOfLines are specified, the entire log file is returned.

    • If NumberOfLines is specified and Marker is not specified, then the most recent lines from the end of the log file are returned.

    • If Marker is specified as \"0\", then the specified number of lines from the beginning of the log file are returned.

    • You can download the log file in blocks of lines by specifying the size of the block using the NumberOfLines parameter, and by specifying a value of \"0\" for the Marker parameter in your first request. Include the Marker value returned in the response as the Marker value for the next request, continuing until the AdditionalDataPending response element returns false.

    ", + "Endpoint$Port": "

    Specifies the port that the database engine is listening on.

    ", + "ReservedDBInstance$Duration": "

    The duration of the reservation in seconds.

    ", + "ReservedDBInstance$DBInstanceCount": "

    The number of reserved DB instances.

    ", + "ReservedDBInstancesOffering$Duration": "

    The duration of the offering in seconds.

    " + } + }, + "IntegerOptional": { + "base": null, + "refs": { + "CreateDBInstanceMessage$AllocatedStorage": "

    The amount of storage (in gigabytes) to be initially allocated for the database instance.

    Type: Integer

    MySQL

    Constraints: Must be an integer from 5 to 3072.

    PostgreSQL

    Constraints: Must be an integer from 5 to 3072.

    Oracle

    Constraints: Must be an integer from 10 to 3072.

    SQL Server

    Constraints: Must be an integer from 200 to 1024 (Standard Edition and Enterprise Edition) or from 20 to 1024 (Express Edition and Web Edition)

    ", + "CreateDBInstanceMessage$BackupRetentionPeriod": "

    The number of days for which automated backups are retained. Setting this parameter to a positive number enables backups. Setting this parameter to 0 disables automated backups.

    Default: 1

    Constraints:

    • Must be a value from 0 to 35
    • Cannot be set to 0 if the DB instance is a source to Read Replicas
    ", + "CreateDBInstanceMessage$Port": "

    The port number on which the database accepts connections.

    MySQL

    Default: 3306

    Valid Values: 1150-65535

    Type: Integer

    PostgreSQL

    Default: 5432

    Valid Values: 1150-65535

    Type: Integer

    Oracle

    Default: 1521

    Valid Values: 1150-65535

    SQL Server

    Default: 1433

    Valid Values: 1150-65535 except for 1434, 3389, 47001, 49152, and 49152 through 49156.

    ", + "CreateDBInstanceMessage$Iops": "

    The amount of Provisioned IOPS (input/output operations per second) to be initially allocated for the DB instance.

    Constraints: To use PIOPS, this value must be an integer greater than 1000.

    ", + "CreateDBInstanceReadReplicaMessage$Port": "

    The port number that the DB instance uses for connections.

    Default: Inherits from the source DB instance

    Valid Values: 1150-65535

    ", + "CreateDBInstanceReadReplicaMessage$Iops": "

    The amount of Provisioned IOPS (input/output operations per second) to be initially allocated for the DB instance.

    ", + "DBInstance$Iops": "

    Specifies the Provisioned IOPS (I/O operations per second) value.

    ", + "DBSnapshot$Iops": "

    Specifies the Provisioned IOPS (I/O operations per second) value of the DB instance at the time of the snapshot.

    ", + "DescribeDBEngineVersionsMessage$MaxRecords": "

    The maximum number of records to include in the response. If more than the MaxRecords value is available, a pagination token called a marker is included in the response so that the following results can be retrieved.

    Default: 100

    Constraints: minimum 20, maximum 100

    ", + "DescribeDBInstancesMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results may be retrieved.

    Default: 100

    Constraints: minimum 20, maximum 100

    ", + "DescribeDBLogFilesMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved.

    ", + "DescribeDBParameterGroupsMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results may be retrieved.

    Default: 100

    Constraints: minimum 20, maximum 100

    ", + "DescribeDBParametersMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results may be retrieved.

    Default: 100

    Constraints: minimum 20, maximum 100

    ", + "DescribeDBSecurityGroupsMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results may be retrieved.

    Default: 100

    Constraints: minimum 20, maximum 100

    ", + "DescribeDBSnapshotsMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results may be retrieved.

    Default: 100

    Constraints: minimum 20, maximum 100

    ", + "DescribeDBSubnetGroupsMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results may be retrieved.

    Default: 100

    Constraints: minimum 20, maximum 100

    ", + "DescribeEngineDefaultParametersMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results may be retrieved.

    Default: 100

    Constraints: minimum 20, maximum 100

    ", + "DescribeEventSubscriptionsMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved.

    Default: 100

    Constraints: minimum 20, maximum 100

    ", + "DescribeEventsMessage$Duration": "

    The number of minutes to retrieve events for.

    Default: 60

    ", + "DescribeEventsMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results may be retrieved.

    Default: 100

    Constraints: minimum 20, maximum 100

    ", + "DescribeOptionGroupOptionsMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved.

    Default: 100

    Constraints: minimum 20, maximum 100

    ", + "DescribeOptionGroupsMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved.

    Default: 100

    Constraints: minimum 20, maximum 100

    ", + "DescribeOrderableDBInstanceOptionsMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved.

    Default: 100

    Constraints: minimum 20, maximum 100

    ", + "DescribeReservedDBInstancesMessage$MaxRecords": "

    The maximum number of records to include in the response. If more than the MaxRecords value is available, a pagination token called a marker is included in the response so that the following results can be retrieved.

    Default: 100

    Constraints: minimum 20, maximum 100

    ", + "DescribeReservedDBInstancesOfferingsMessage$MaxRecords": "

    The maximum number of records to include in the response. If more than the MaxRecords value is available, a pagination token called a marker is included in the response so that the following results can be retrieved.

    Default: 100

    Constraints: minimum 20, maximum 100

    ", + "ModifyDBInstanceMessage$AllocatedStorage": "

    The new storage capacity of the RDS instance. Changing this setting does not result in an outage and the change is applied during the next maintenance window unless ApplyImmediately is set to true for this request.

    MySQL

    Default: Uses existing setting

    Valid Values: 5-3072

    Constraints: Value supplied must be at least 10% greater than the current value. Values that are not at least 10% greater than the existing value are rounded up so that they are 10% greater than the current value.

    Type: Integer

    PostgreSQL

    Default: Uses existing setting

    Valid Values: 5-3072

    Constraints: Value supplied must be at least 10% greater than the current value. Values that are not at least 10% greater than the existing value are rounded up so that they are 10% greater than the current value.

    Type: Integer

    Oracle

    Default: Uses existing setting

    Valid Values: 10-3072

    Constraints: Value supplied must be at least 10% greater than the current value. Values that are not at least 10% greater than the existing value are rounded up so that they are 10% greater than the current value.

    SQL Server

    Cannot be modified.

    If you choose to migrate your DB instance from using standard storage to using Provisioned IOPS, or from using Provisioned IOPS to using standard storage, the process can take time. The duration of the migration depends on several factors such as database load, storage size, storage type (standard or Provisioned IOPS), amount of IOPS provisioned (if any), and the number of prior scale storage operations. Typical migration times are under 24 hours, but the process can take up to several days in some cases. During the migration, the DB instance will be available for use, but may experience performance degradation. While the migration takes place, nightly backups for the instance will be suspended. No other Amazon RDS operations can take place for the instance, including modifying the instance, rebooting the instance, deleting the instance, creating a read replica for the instance, and creating a DB snapshot of the instance.

    ", + "ModifyDBInstanceMessage$BackupRetentionPeriod": "

    The number of days to retain automated backups. Setting this parameter to a positive number enables backups. Setting this parameter to 0 disables automated backups.

    Changing this parameter can result in an outage if you change from 0 to a non-zero value or from a non-zero value to 0. These changes are applied during the next maintenance window unless the ApplyImmediately parameter is set to true for this request. If you change the parameter from one non-zero value to another non-zero value, the change is asynchronously applied as soon as possible.

    Default: Uses existing setting

    Constraints:

    • Must be a value from 0 to 35
    • Can be specified for a MySQL Read Replica only if the source is running MySQL 5.6
    • Can be specified for a PostgreSQL Read Replica only if the source is running PostgreSQL 9.3.5
    • Cannot be set to 0 if the DB instance is a source to Read Replicas
    ", + "ModifyDBInstanceMessage$Iops": "

    The new Provisioned IOPS (I/O operations per second) value for the RDS instance. Changing this setting does not result in an outage and the change is applied during the next maintenance window unless the ApplyImmediately parameter is set to true for this request.

    Default: Uses existing setting

    Constraints: Value supplied must be at least 10% greater than the current value. Values that are not at least 10% greater than the existing value are rounded up so that they are 10% greater than the current value. If you are migrating from Provisioned IOPS to standard storage, set this value to 0. The DB instance will require a reboot for the change in storage type to take effect.

    SQL Server

    Setting the IOPS value for the SQL Server database engine is not supported.

    Type: Integer

    If you choose to migrate your DB instance from using standard storage to using Provisioned IOPS, or from using Provisioned IOPS to using standard storage, the process can take time. The duration of the migration depends on several factors such as database load, storage size, storage type (standard or Provisioned IOPS), amount of IOPS provisioned (if any), and the number of prior scale storage operations. Typical migration times are under 24 hours, but the process can take up to several days in some cases. During the migration, the DB instance will be available for use, but may experience performance degradation. While the migration takes place, nightly backups for the instance will be suspended. No other Amazon RDS operations can take place for the instance, including modifying the instance, rebooting the instance, deleting the instance, creating a read replica for the instance, and creating a DB snapshot of the instance.

    ", + "Option$Port": "

    If required, the port configured for this option to use.

    ", + "OptionConfiguration$Port": "

    The optional port for the option.

    ", + "OptionGroupOption$DefaultPort": "

    If the option requires a port, specifies the default port for the option.

    ", + "PendingModifiedValues$AllocatedStorage": "

    Contains the new AllocatedStorage size for the DB instance that will be applied or is in progress.

    ", + "PendingModifiedValues$Port": "

    Specifies the pending port for the DB instance.

    ", + "PendingModifiedValues$BackupRetentionPeriod": "

    Specifies the pending number of days for which automated backups are retained.

    ", + "PendingModifiedValues$Iops": "

    Specifies the new Provisioned IOPS value for the DB instance that will be applied or is being applied.

    ", + "PromoteReadReplicaMessage$BackupRetentionPeriod": "

    The number of days to retain automated backups. Setting this parameter to a positive number enables backups. Setting this parameter to 0 disables automated backups.

    Default: 1

    Constraints:

    • Must be a value from 0 to 8
    ", + "PurchaseReservedDBInstancesOfferingMessage$DBInstanceCount": "

    The number of instances to reserve.

    Default: 1

    ", + "RestoreDBInstanceFromDBSnapshotMessage$Port": "

    The port number on which the database accepts connections.

    Default: The same port as the original DB instance

    Constraints: Value must be 1150-65535

    ", + "RestoreDBInstanceFromDBSnapshotMessage$Iops": "

    Specifies the amount of provisioned IOPS for the DB instance, expressed in I/O operations per second. If this parameter is not specified, the IOPS value will be taken from the backup. If this parameter is set to 0, the new instance will be converted to a non-PIOPS instance, which will take additional time, though your DB instance will be available for connections before the conversion starts.

    Constraints: Must be an integer greater than 1000.

    SQL Server

    Setting the IOPS value for the SQL Server database engine is not supported.

    ", + "RestoreDBInstanceToPointInTimeMessage$Port": "

    The port number on which the database accepts connections.

    Constraints: Value must be 1150-65535

    Default: The same port as the original DB instance.

    ", + "RestoreDBInstanceToPointInTimeMessage$Iops": "

    The amount of Provisioned IOPS (input/output operations per second) to be initially allocated for the DB instance.

    Constraints: Must be an integer greater than 1000.

    SQL Server

    Setting the IOPS value for the SQL Server database engine is not supported.

    " + } + }, + "InvalidDBInstanceStateFault": { + "base": "

    The specified DB instance is not in the available state.

    ", + "refs": { + } + }, + "InvalidDBParameterGroupStateFault": { + "base": "

    The DB parameter group cannot be deleted because it is in use.

    ", + "refs": { + } + }, + "InvalidDBSecurityGroupStateFault": { + "base": "

    The state of the DB security group does not allow deletion.

    ", + "refs": { + } + }, + "InvalidDBSnapshotStateFault": { + "base": "

    The state of the DB snapshot does not allow deletion.

    ", + "refs": { + } + }, + "InvalidDBSubnetGroupFault": { + "base": "

    Indicates the DBSubnetGroup does not belong to the same VPC as that of an existing cross region read replica of the same source instance.

    ", + "refs": { + } + }, + "InvalidDBSubnetGroupStateFault": { + "base": "

    The DB subnet group cannot be deleted because it is in use.

    ", + "refs": { + } + }, + "InvalidDBSubnetStateFault": { + "base": "

    The DB subnet is not in the available state.

    ", + "refs": { + } + }, + "InvalidEventSubscriptionStateFault": { + "base": "

    This error can occur if someone else is modifying a subscription. You should retry the action.

    ", + "refs": { + } + }, + "InvalidOptionGroupStateFault": { + "base": "

    The option group is not in the available state.

    ", + "refs": { + } + }, + "InvalidRestoreFault": { + "base": "

    Cannot restore from vpc backup to non-vpc DB instance.

    ", + "refs": { + } + }, + "InvalidSubnet": { + "base": "

    The requested subnet is invalid, or multiple subnets were requested that are not all in a common VPC.

    ", + "refs": { + } + }, + "InvalidVPCNetworkStateFault": { + "base": "

    DB subnet group does not cover all Availability Zones after it is created because users' change.

    ", + "refs": { + } + }, + "KeyList": { + "base": null, + "refs": { + "RemoveTagsFromResourceMessage$TagKeys": "

    The tag key (name) of the tag to be removed.

    " + } + }, + "ListTagsForResourceMessage": { + "base": "

    ", + "refs": { + } + }, + "Long": { + "base": null, + "refs": { + "DescribeDBLogFilesDetails$LastWritten": "

    A POSIX timestamp when the last log entry was written.

    ", + "DescribeDBLogFilesDetails$Size": "

    The size, in bytes, of the log file for the specified DB instance.

    ", + "DescribeDBLogFilesMessage$FileLastWritten": "

    Filters the available log files for files written since the specified date, in POSIX timestamp format.

    ", + "DescribeDBLogFilesMessage$FileSize": "

    Filters the available log files for files larger than the specified size.

    " + } + }, + "ModifyDBInstanceMessage": { + "base": "

    ", + "refs": { + } + }, + "ModifyDBInstanceResult": { + "base": null, + "refs": { + } + }, + "ModifyDBParameterGroupMessage": { + "base": "

    ", + "refs": { + } + }, + "ModifyDBSubnetGroupMessage": { + "base": "

    ", + "refs": { + } + }, + "ModifyDBSubnetGroupResult": { + "base": null, + "refs": { + } + }, + "ModifyEventSubscriptionMessage": { + "base": "

    ", + "refs": { + } + }, + "ModifyEventSubscriptionResult": { + "base": null, + "refs": { + } + }, + "ModifyOptionGroupMessage": { + "base": "

    ", + "refs": { + } + }, + "ModifyOptionGroupResult": { + "base": null, + "refs": { + } + }, + "Option": { + "base": "

    Option details.

    ", + "refs": { + "OptionsList$member": null + } + }, + "OptionConfiguration": { + "base": "

    A list of all available options

    ", + "refs": { + "OptionConfigurationList$member": null + } + }, + "OptionConfigurationList": { + "base": null, + "refs": { + "ModifyOptionGroupMessage$OptionsToInclude": "

    Options in this list are added to the option group or, if already present, the specified configuration is used to update the existing configuration.

    " + } + }, + "OptionGroup": { + "base": "

    ", + "refs": { + "CopyOptionGroupResult$OptionGroup": null, + "CreateOptionGroupResult$OptionGroup": null, + "ModifyOptionGroupResult$OptionGroup": null, + "OptionGroupsList$member": null + } + }, + "OptionGroupAlreadyExistsFault": { + "base": "

    The option group you are trying to create already exists.

    ", + "refs": { + } + }, + "OptionGroupMembership": { + "base": "

    Provides information on the option groups the DB instance is a member of.

    ", + "refs": { + "OptionGroupMembershipList$member": null + } + }, + "OptionGroupMembershipList": { + "base": null, + "refs": { + "DBInstance$OptionGroupMemberships": "

    Provides the list of option group memberships for this DB instance.

    " + } + }, + "OptionGroupNotFoundFault": { + "base": "

    The specified option group could not be found.

    ", + "refs": { + } + }, + "OptionGroupOption": { + "base": "

    Available option.

    ", + "refs": { + "OptionGroupOptionsList$member": null + } + }, + "OptionGroupOptionSetting": { + "base": "

    Option group option settings are used to display settings available for each option with their default values and other information. These values are used with the DescribeOptionGroupOptions action.

    ", + "refs": { + "OptionGroupOptionSettingsList$member": null + } + }, + "OptionGroupOptionSettingsList": { + "base": null, + "refs": { + "OptionGroupOption$OptionGroupOptionSettings": "

    Specifies the option settings that are available (and the default value) for each option in an option group.

    " + } + }, + "OptionGroupOptionsList": { + "base": "

    List of available option group options.

    ", + "refs": { + "OptionGroupOptionsMessage$OptionGroupOptions": null + } + }, + "OptionGroupOptionsMessage": { + "base": "

    ", + "refs": { + } + }, + "OptionGroupQuotaExceededFault": { + "base": "

    The quota of 20 option groups was exceeded for this AWS account.

    ", + "refs": { + } + }, + "OptionGroups": { + "base": "

    List of option groups.

    ", + "refs": { + } + }, + "OptionGroupsList": { + "base": null, + "refs": { + "OptionGroups$OptionGroupsList": "

    List of option groups.

    " + } + }, + "OptionNamesList": { + "base": null, + "refs": { + "ModifyOptionGroupMessage$OptionsToRemove": "

    Options in this list are removed from the option group.

    " + } + }, + "OptionSetting": { + "base": "

    Option settings are the actual settings being applied or configured for that option. It is used when you modify an option group or describe option groups. For example, the NATIVE_NETWORK_ENCRYPTION option has a setting called SQLNET.ENCRYPTION_SERVER that can have several different values.

    ", + "refs": { + "OptionSettingConfigurationList$member": null, + "OptionSettingsList$member": null + } + }, + "OptionSettingConfigurationList": { + "base": null, + "refs": { + "Option$OptionSettings": "

    The option settings for this option.

    " + } + }, + "OptionSettingsList": { + "base": null, + "refs": { + "OptionConfiguration$OptionSettings": "

    The option settings to include in an option group.

    " + } + }, + "OptionsDependedOn": { + "base": null, + "refs": { + "OptionGroupOption$OptionsDependedOn": "

    List of all options that are prerequisites for this option.

    " + } + }, + "OptionsList": { + "base": null, + "refs": { + "OptionGroup$Options": "

    Indicates what options are available in the option group.

    " + } + }, + "OrderableDBInstanceOption": { + "base": "

    Contains a list of available options for a DB instance

    This data type is used as a response element in the DescribeOrderableDBInstanceOptions action.

    ", + "refs": { + "OrderableDBInstanceOptionsList$member": null + } + }, + "OrderableDBInstanceOptionsList": { + "base": null, + "refs": { + "OrderableDBInstanceOptionsMessage$OrderableDBInstanceOptions": "

    An OrderableDBInstanceOption structure containing information about orderable options for the DB instance.

    " + } + }, + "OrderableDBInstanceOptionsMessage": { + "base": "

    Contains the result of a successful invocation of the DescribeOrderableDBInstanceOptions action.

    ", + "refs": { + } + }, + "Parameter": { + "base": "

    This data type is used as a request parameter in the ModifyDBParameterGroup and ResetDBParameterGroup actions.

    This data type is used as a response element in the DescribeEngineDefaultParameters and DescribeDBParameters actions.

    ", + "refs": { + "ParametersList$member": null + } + }, + "ParametersList": { + "base": null, + "refs": { + "DBParameterGroupDetails$Parameters": "

    A list of Parameter values.

    ", + "EngineDefaults$Parameters": "

    Contains a list of engine default parameters.

    ", + "ModifyDBParameterGroupMessage$Parameters": "

    An array of parameter names, values, and the apply method for the parameter update. At least one parameter name, value, and apply method must be supplied; subsequent arguments are optional. A maximum of 20 parameters may be modified in a single request.

    Valid Values (for the application method): immediate | pending-reboot

    You can use the immediate value with dynamic parameters only. You can use the pending-reboot value for both dynamic and static parameters, and changes are applied when you reboot the DB instance without failover. ", + "ResetDBParameterGroupMessage$Parameters": "

    An array of parameter names, values, and the apply method for the parameter update. At least one parameter name, value, and apply method must be supplied; subsequent arguments are optional. A maximum of 20 parameters may be modified in a single request.

    MySQL

    Valid Values (for Apply method): immediate | pending-reboot

    You can use the immediate value with dynamic parameters only. You can use the pending-reboot value for both dynamic and static parameters, and changes are applied when DB instance reboots.

    Oracle

    Valid Values (for Apply method): pending-reboot

    " + } + }, + "PendingModifiedValues": { + "base": "

    This data type is used as a response element in the ModifyDBInstance action.

    ", + "refs": { + "DBInstance$PendingModifiedValues": "

    Specifies that changes to the DB instance are pending. This element is only included when changes are pending. Specific changes are identified by subelements.

    " + } + }, + "PointInTimeRestoreNotEnabledFault": { + "base": "

    SourceDBInstanceIdentifier refers to a DB instance with BackupRetentionPeriod equal to 0.

    ", + "refs": { + } + }, + "PromoteReadReplicaMessage": { + "base": "

    ", + "refs": { + } + }, + "PromoteReadReplicaResult": { + "base": null, + "refs": { + } + }, + "ProvisionedIopsNotAvailableInAZFault": { + "base": "

    Provisioned IOPS not available in the specified Availability Zone.

    ", + "refs": { + } + }, + "PurchaseReservedDBInstancesOfferingMessage": { + "base": "

    ", + "refs": { + } + }, + "PurchaseReservedDBInstancesOfferingResult": { + "base": null, + "refs": { + } + }, + "ReadReplicaDBInstanceIdentifierList": { + "base": null, + "refs": { + "DBInstance$ReadReplicaDBInstanceIdentifiers": "

    Contains one or more identifiers of the Read Replicas associated with this DB instance.

    " + } + }, + "RebootDBInstanceMessage": { + "base": "

    ", + "refs": { + } + }, + "RebootDBInstanceResult": { + "base": null, + "refs": { + } + }, + "RecurringCharge": { + "base": "

    This data type is used as a response element in the DescribeReservedDBInstances and DescribeReservedDBInstancesOfferings actions.

    ", + "refs": { + "RecurringChargeList$member": null + } + }, + "RecurringChargeList": { + "base": null, + "refs": { + "ReservedDBInstance$RecurringCharges": "

    The recurring price charged to run this reserved DB instance.

    ", + "ReservedDBInstancesOffering$RecurringCharges": "

    The recurring price charged to run this reserved DB instance.

    " + } + }, + "RemoveSourceIdentifierFromSubscriptionMessage": { + "base": "

    ", + "refs": { + } + }, + "RemoveSourceIdentifierFromSubscriptionResult": { + "base": null, + "refs": { + } + }, + "RemoveTagsFromResourceMessage": { + "base": "

    ", + "refs": { + } + }, + "ReservedDBInstance": { + "base": "

    This data type is used as a response element in the DescribeReservedDBInstances and PurchaseReservedDBInstancesOffering actions.

    ", + "refs": { + "PurchaseReservedDBInstancesOfferingResult$ReservedDBInstance": null, + "ReservedDBInstanceList$member": null + } + }, + "ReservedDBInstanceAlreadyExistsFault": { + "base": "

    User already has a reservation with the given identifier.

    ", + "refs": { + } + }, + "ReservedDBInstanceList": { + "base": null, + "refs": { + "ReservedDBInstanceMessage$ReservedDBInstances": "

    A list of reserved DB instances.

    " + } + }, + "ReservedDBInstanceMessage": { + "base": "

    Contains the result of a successful invocation of the DescribeReservedDBInstances action.

    ", + "refs": { + } + }, + "ReservedDBInstanceNotFoundFault": { + "base": "

    The specified reserved DB Instance not found.

    ", + "refs": { + } + }, + "ReservedDBInstanceQuotaExceededFault": { + "base": "

    Request would exceed the user's DB Instance quota.

    ", + "refs": { + } + }, + "ReservedDBInstancesOffering": { + "base": "

    This data type is used as a response element in the DescribeReservedDBInstancesOfferings action.

    ", + "refs": { + "ReservedDBInstancesOfferingList$member": null + } + }, + "ReservedDBInstancesOfferingList": { + "base": null, + "refs": { + "ReservedDBInstancesOfferingMessage$ReservedDBInstancesOfferings": "

    A list of reserved DB instance offerings.

    " + } + }, + "ReservedDBInstancesOfferingMessage": { + "base": "

    Contains the result of a successful invocation of the DescribeReservedDBInstancesOfferings action.

    ", + "refs": { + } + }, + "ReservedDBInstancesOfferingNotFoundFault": { + "base": "

    Specified offering does not exist.

    ", + "refs": { + } + }, + "ResetDBParameterGroupMessage": { + "base": "

    ", + "refs": { + } + }, + "RestoreDBInstanceFromDBSnapshotMessage": { + "base": "

    ", + "refs": { + } + }, + "RestoreDBInstanceFromDBSnapshotResult": { + "base": null, + "refs": { + } + }, + "RestoreDBInstanceToPointInTimeMessage": { + "base": "

    ", + "refs": { + } + }, + "RestoreDBInstanceToPointInTimeResult": { + "base": null, + "refs": { + } + }, + "RevokeDBSecurityGroupIngressMessage": { + "base": "

    ", + "refs": { + } + }, + "RevokeDBSecurityGroupIngressResult": { + "base": null, + "refs": { + } + }, + "SNSInvalidTopicFault": { + "base": "

    SNS has responded that there is a problem with the SND topic specified.

    ", + "refs": { + } + }, + "SNSNoAuthorizationFault": { + "base": "

    You do not have permission to publish to the SNS topic ARN.

    ", + "refs": { + } + }, + "SNSTopicArnNotFoundFault": { + "base": "

    The SNS topic ARN does not exist.

    ", + "refs": { + } + }, + "SnapshotQuotaExceededFault": { + "base": "

    Request would result in user exceeding the allowed number of DB snapshots.

    ", + "refs": { + } + }, + "SourceIdsList": { + "base": null, + "refs": { + "CreateEventSubscriptionMessage$SourceIds": "

    The list of identifiers of the event sources for which events will be returned. If not specified, then all sources are included in the response. An identifier must begin with a letter and must contain only ASCII letters, digits, and hyphens; it cannot end with a hyphen or contain two consecutive hyphens.

    Constraints:

    • If SourceIds are supplied, SourceType must also be provided.
    • If the source type is a DB instance, then a DBInstanceIdentifier must be supplied.
    • If the source type is a DB security group, a DBSecurityGroupName must be supplied.
    • If the source type is a DB parameter group, a DBParameterGroupName must be supplied.
    • If the source type is a DB snapshot, a DBSnapshotIdentifier must be supplied.
    ", + "EventSubscription$SourceIdsList": "

    A list of source IDs for the RDS event notification subscription.

    " + } + }, + "SourceNotFoundFault": { + "base": "

    The requested source could not be found.

    ", + "refs": { + } + }, + "SourceType": { + "base": null, + "refs": { + "DescribeEventsMessage$SourceType": "

    The event source to retrieve events for. If no value is specified, all events are returned.

    ", + "Event$SourceType": "

    Specifies the source type for this event.

    " + } + }, + "StorageQuotaExceededFault": { + "base": "

    Request would result in user exceeding the allowed amount of storage available across all DB instances.

    ", + "refs": { + } + }, + "StorageTypeNotSupportedFault": { + "base": "

    StorageType specified cannot be associated with the DB Instance.

    ", + "refs": { + } + }, + "String": { + "base": null, + "refs": { + "AddSourceIdentifierToSubscriptionMessage$SubscriptionName": "

    The name of the RDS event notification subscription you want to add a source identifier to.

    ", + "AddSourceIdentifierToSubscriptionMessage$SourceIdentifier": "

    The identifier of the event source to be added. An identifier must begin with a letter and must contain only ASCII letters, digits, and hyphens; it cannot end with a hyphen or contain two consecutive hyphens.

    Constraints:

    • If the source type is a DB instance, then a DBInstanceIdentifier must be supplied.
    • If the source type is a DB security group, a DBSecurityGroupName must be supplied.
    • If the source type is a DB parameter group, a DBParameterGroupName must be supplied.
    • If the source type is a DB snapshot, a DBSnapshotIdentifier must be supplied.
    ", + "AddTagsToResourceMessage$ResourceName": "

    The Amazon RDS resource the tags will be added to. This value is an Amazon Resource Name (ARN). For information about creating an ARN, see Constructing an RDS Amazon Resource Name (ARN).

    ", + "AuthorizeDBSecurityGroupIngressMessage$DBSecurityGroupName": "

    The name of the DB security group to add authorization to.

    ", + "AuthorizeDBSecurityGroupIngressMessage$CIDRIP": "

    The IP range to authorize.

    ", + "AuthorizeDBSecurityGroupIngressMessage$EC2SecurityGroupName": "

    Name of the EC2 security group to authorize. For VPC DB security groups, EC2SecurityGroupId must be provided. Otherwise, EC2SecurityGroupOwnerId and either EC2SecurityGroupName or EC2SecurityGroupId must be provided.

    ", + "AuthorizeDBSecurityGroupIngressMessage$EC2SecurityGroupId": "

    Id of the EC2 security group to authorize. For VPC DB security groups, EC2SecurityGroupId must be provided. Otherwise, EC2SecurityGroupOwnerId and either EC2SecurityGroupName or EC2SecurityGroupId must be provided.

    ", + "AuthorizeDBSecurityGroupIngressMessage$EC2SecurityGroupOwnerId": "

    AWS Account Number of the owner of the EC2 security group specified in the EC2SecurityGroupName parameter. The AWS Access Key ID is not an acceptable value. For VPC DB security groups, EC2SecurityGroupId must be provided. Otherwise, EC2SecurityGroupOwnerId and either EC2SecurityGroupName or EC2SecurityGroupId must be provided.

    ", + "AvailabilityZone$Name": "

    The name of the availability zone.

    ", + "CharacterSet$CharacterSetName": "

    The name of the character set.

    ", + "CharacterSet$CharacterSetDescription": "

    The description of the character set.

    ", + "CopyDBParameterGroupMessage$SourceDBParameterGroupIdentifier": "

    The identifier or ARN for the source DB parameter group.

    Constraints:

    • Must specify a valid DB parameter group.
    • If the source DB parameter group is in the same region as the copy, specify a valid DB parameter group identifier, for example my-db-param-group, or a valid ARN.
    • If the source DB parameter group is in a different region than the copy, specify a valid DB parameter group ARN, for example arn:aws:rds:us-west-2:123456789012:pg:special-parameters.
    ", + "CopyDBParameterGroupMessage$TargetDBParameterGroupIdentifier": "

    The identifier for the copied DB parameter group.

    Constraints:

    • Cannot be null, empty, or blank
    • Must contain from 1 to 255 alphanumeric characters or hyphens
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens

    Example: my-db-parameter-group

    ", + "CopyDBParameterGroupMessage$TargetDBParameterGroupDescription": "

    A description for the copied DB parameter group.

    ", + "CopyDBSnapshotMessage$SourceDBSnapshotIdentifier": "

    The identifier for the source DB snapshot.

    Constraints:

    • Must specify a valid system snapshot in the \"available\" state.
    • If the source snapshot is in the same region as the copy, specify a valid DB snapshot identifier.
    • If the source snapshot is in a different region than the copy, specify a valid DB snapshot ARN. For more information, go to Copying a DB Snapshot.

    Example: rds:mydb-2012-04-02-00-01

    Example: arn:aws:rds:rr-regn-1:123456789012:snapshot:mysql-instance1-snapshot-20130805

    ", + "CopyDBSnapshotMessage$TargetDBSnapshotIdentifier": "

    The identifier for the copied snapshot.

    Constraints:

    • Cannot be null, empty, or blank
    • Must contain from 1 to 255 alphanumeric characters or hyphens
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens

    Example: my-db-snapshot

    ", + "CopyOptionGroupMessage$SourceOptionGroupIdentifier": "

    The identifier or ARN for the source option group.

    Constraints:

    • Must specify a valid option group.
    • If the source option group is in the same region as the copy, specify a valid option group identifier, for example my-option-group, or a valid ARN.
    • If the source option group is in a different region than the copy, specify a valid option group ARN, for example arn:aws:rds:us-west-2:123456789012:og:special-options.
    ", + "CopyOptionGroupMessage$TargetOptionGroupIdentifier": "

    The identifier for the copied option group.

    Constraints:

    • Cannot be null, empty, or blank
    • Must contain from 1 to 255 alphanumeric characters or hyphens
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens

    Example: my-option-group

    ", + "CopyOptionGroupMessage$TargetOptionGroupDescription": "

    The description for the copied option group.

    ", + "CreateDBInstanceMessage$DBName": "

    The meaning of this parameter differs according to the database engine you use.

    Type: String

    MySQL

    The name of the database to create when the DB instance is created. If this parameter is not specified, no database is created in the DB instance.

    Constraints:

    • Must contain 1 to 64 alphanumeric characters
    • Cannot be a word reserved by the specified database engine

    PostgreSQL

    The name of the database to create when the DB instance is created. If this parameter is not specified, no database is created in the DB instance.

    Constraints:

    • Must contain 1 to 63 alphanumeric characters
    • Must begin with a letter or an underscore. Subsequent characters can be letters, underscores, or digits (0-9).
    • Cannot be a word reserved by the specified database engine

    Oracle

    The Oracle System ID (SID) of the created DB instance.

    Default: ORCL

    Constraints:

    • Cannot be longer than 8 characters

    SQL Server

    Not applicable. Must be null.

    ", + "CreateDBInstanceMessage$DBInstanceIdentifier": "

    The DB instance identifier. This parameter is stored as a lowercase string.

    Constraints:

    • Must contain from 1 to 63 alphanumeric characters or hyphens (1 to 15 for SQL Server).
    • First character must be a letter.
    • Cannot end with a hyphen or contain two consecutive hyphens.

    Example: mydbinstance

    ", + "CreateDBInstanceMessage$DBInstanceClass": "

    The compute and memory capacity of the DB instance.

    Valid Values: db.t1.micro | db.m1.small | db.m1.medium | db.m1.large | db.m1.xlarge | db.m2.xlarge |db.m2.2xlarge | db.m2.4xlarge | db.m3.medium | db.m3.large | db.m3.xlarge | db.m3.2xlarge | db.r3.large | db.r3.xlarge | db.r3.2xlarge | db.r3.4xlarge | db.r3.8xlarge | db.t2.micro | db.t2.small | db.t2.medium

    ", + "CreateDBInstanceMessage$Engine": "

    The name of the database engine to be used for this instance.

    Valid Values: MySQL | oracle-se1 | oracle-se | oracle-ee | sqlserver-ee | sqlserver-se | sqlserver-ex | sqlserver-web | postgres

    Not every database engine is available for every AWS region.

    ", + "CreateDBInstanceMessage$MasterUsername": "

    The name of master user for the client DB instance.

    MySQL

    Constraints:

    • Must be 1 to 16 alphanumeric characters.
    • First character must be a letter.
    • Cannot be a reserved word for the chosen database engine.

    Type: String

    Oracle

    Constraints:

    • Must be 1 to 30 alphanumeric characters.
    • First character must be a letter.
    • Cannot be a reserved word for the chosen database engine.

    SQL Server

    Constraints:

    • Must be 1 to 128 alphanumeric characters.
    • First character must be a letter.
    • Cannot be a reserved word for the chosen database engine.
    ", + "CreateDBInstanceMessage$MasterUserPassword": "

    The password for the master database user. Can be any printable ASCII character except \"/\", \"\"\", or \"@\".

    Type: String

    MySQL

    Constraints: Must contain from 8 to 41 characters.

    Oracle

    Constraints: Must contain from 8 to 30 characters.

    SQL Server

    Constraints: Must contain from 8 to 128 characters.

    ", + "CreateDBInstanceMessage$AvailabilityZone": "

    The EC2 Availability Zone that the database instance will be created in. For information on regions and Availability Zones, see Regions and Availability Zones.

    Default: A random, system-chosen Availability Zone in the endpoint's region.

    Example: us-east-1d

    Constraint: The AvailabilityZone parameter cannot be specified if the MultiAZ parameter is set to true. The specified Availability Zone must be in the same region as the current endpoint.

    ", + "CreateDBInstanceMessage$DBSubnetGroupName": "

    A DB subnet group to associate with this DB instance.

    If there is no DB subnet group, then it is a non-VPC DB instance.

    ", + "CreateDBInstanceMessage$PreferredMaintenanceWindow": "

    The weekly time range (in UTC) during which system maintenance can occur. For more information, see DB Instance Maintenance.

    Format: ddd:hh24:mi-ddd:hh24:mi

    Default: A 30-minute window selected at random from an 8-hour block of time per region, occurring on a random day of the week. To see the time blocks available, see Adjusting the Preferred Maintenance Window in the Amazon RDS User Guide.

    Valid Days: Mon, Tue, Wed, Thu, Fri, Sat, Sun

    Constraints: Minimum 30-minute window.

    ", + "CreateDBInstanceMessage$DBParameterGroupName": "

    The name of the DB parameter group to associate with this DB instance. If this argument is omitted, the default DBParameterGroup for the specified engine will be used.

    Constraints:

    • Must be 1 to 255 alphanumeric characters
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "CreateDBInstanceMessage$PreferredBackupWindow": "

    The daily time range during which automated backups are created if automated backups are enabled, using the BackupRetentionPeriod parameter. For more information, see DB Instance Backups.

    Default: A 30-minute window selected at random from an 8-hour block of time per region. See the Amazon RDS User Guide for the time blocks for each region from which the default backup windows are assigned.

    Constraints: Must be in the format hh24:mi-hh24:mi. Times should be Universal Time Coordinated (UTC). Must not conflict with the preferred maintenance window. Must be at least 30 minutes.

    ", + "CreateDBInstanceMessage$EngineVersion": "

    The version number of the database engine to use.

    The following are the database engines and major and minor versions that are available with Amazon RDS. Not every database engine is available for every AWS region.

    MySQL

    • Version 5.1: 5.1.45 | 5.1.49 | 5.1.50 | 5.1.57 | 5.1.61 | 5.1.62 | 5.1.63 | 5.1.69 | 5.1.71 | 5.1.73
    • Version 5.5: 5.5.12 | 5.5.20 | 5.5.23 | 5.5.25a | 5.5.27 | 5.5.31 | 5.5.33 | 5.5.37 | 5.5.38 | 5.5.8
    • Version 5.6: 5.6.12 | 5.6.13 | 5.6.17 | 5.6.19 | 5.6.21

    Oracle Database Enterprise Edition (oracle-ee)

    • Version 11.2: 11.2.0.2.v3 | 11.2.0.2.v4 | 11.2.0.2.v5 | 11.2.0.2.v6 | 11.2.0.2.v7 | 11.2.0.3.v1 | 11.2.0.4.v1

    Oracle Database Standard Edition (oracle-se)

    • Version 11.2: 11.2.0.2.v3 | 11.2.0.2.v4 | 11.2.0.2.v5 | 11.2.0.2.v6 | 11.2.0.2.v7 | 11.2.0.3.v1 | 11.2.0.4.v1

    Oracle Database Standard Edition One (oracle-se1)

    • Version 11.2: 11.2.0.2.v3 | 11.2.0.2.v4 | 11.2.0.2.v5 | 11.2.0.2.v6 | 11.2.0.2.v7 | 11.2.0.3.v1 | 11.2.0.4.v1

    PostgreSQL

    • Version 9.3: 9.3.1 | 9.3.2 | 9.3.3

    Microsoft SQL Server Enterprise Edition (sqlserver-ee)

    • Version 10.5: 10.50.2789.0.v1
    • Version 11.0: 11.00.2100.60.v1

    Microsoft SQL Server Express Edition (sqlserver-ex)

    • Version 10.5: 10.50.2789.0.v1
    • Version 11.0: 11.00.2100.60.v1

    Microsoft SQL Server Standard Edition (sqlserver-se)

    • Version 10.5: 10.50.2789.0.v1
    • Version 11.0: 11.00.2100.60.v1

    Microsoft SQL Server Web Edition (sqlserver-web)

    • Version 10.5: 10.50.2789.0.v1
    • Version 11.0: 11.00.2100.60.v1
    ", + "CreateDBInstanceMessage$LicenseModel": "

    License model information for this DB instance.

    Valid values: license-included | bring-your-own-license | general-public-license

    ", + "CreateDBInstanceMessage$OptionGroupName": "

    Indicates that the DB instance should be associated with the specified option group.

    Permanent options, such as the TDE option for Oracle Advanced Security TDE, cannot be removed from an option group, and that option group cannot be removed from a DB instance once it is associated with a DB instance

    ", + "CreateDBInstanceMessage$CharacterSetName": "

    For supported engines, indicates that the DB instance should be associated with the specified CharacterSet.

    ", + "CreateDBInstanceMessage$StorageType": "

    Specifies storage type to be associated with the DB Instance.

    Valid values: standard | gp2 | io1

    If you specify io1, you must also include a value for the Iops parameter.

    Default: io1 if the Iops parameter is specified; otherwise standard

    ", + "CreateDBInstanceMessage$TdeCredentialArn": "

    The ARN from the Key Store with which to associate the instance for TDE encryption.

    ", + "CreateDBInstanceMessage$TdeCredentialPassword": "

    The password for the given ARN from the Key Store in order to access the device.

    ", + "CreateDBInstanceReadReplicaMessage$DBInstanceIdentifier": "

    The DB instance identifier of the Read Replica. This is the unique key that identifies a DB instance. This parameter is stored as a lowercase string.

    ", + "CreateDBInstanceReadReplicaMessage$SourceDBInstanceIdentifier": "

    The identifier of the DB instance that will act as the source for the Read Replica. Each DB instance can have up to five Read Replicas.

    Constraints:

    • Must be the identifier of an existing DB instance.
    • Can specify a DB instance that is a MySQL Read Replica only if the source is running MySQL 5.6.
    • Can specify a DB instance that is a PostgreSQL Read Replica only if the source is running PostgreSQL 9.3.5.
    • The specified DB instance must have automatic backups enabled, its backup retention period must be greater than 0.
    • If the source DB instance is in the same region as the Read Replica, specify a valid DB instance identifier.
    • If the source DB instance is in a different region than the Read Replica, specify a valid DB instance ARN. For more information, go to Constructing a Amazon RDS Amazon Resource Name (ARN).
    ", + "CreateDBInstanceReadReplicaMessage$DBInstanceClass": "

    The compute and memory capacity of the Read Replica.

    Valid Values: db.m1.small | db.m1.medium | db.m1.large | db.m1.xlarge | db.m2.xlarge |db.m2.2xlarge | db.m2.4xlarge | db.m3.medium | db.m3.large | db.m3.xlarge | db.m3.2xlarge | db.r3.large | db.r3.xlarge | db.r3.2xlarge | db.r3.4xlarge | db.r3.8xlarge | db.t2.micro | db.t2.small | db.t2.medium

    Default: Inherits from the source DB instance.

    ", + "CreateDBInstanceReadReplicaMessage$AvailabilityZone": "

    The Amazon EC2 Availability Zone that the Read Replica will be created in.

    Default: A random, system-chosen Availability Zone in the endpoint's region.

    Example: us-east-1d

    ", + "CreateDBInstanceReadReplicaMessage$OptionGroupName": "

    The option group the DB instance will be associated with. If omitted, the default option group for the engine specified will be used.

    ", + "CreateDBInstanceReadReplicaMessage$DBSubnetGroupName": "

    Specifies a DB subnet group for the DB instance. The new DB instance will be created in the VPC associated with the DB subnet group. If no DB subnet group is specified, then the new DB instance is not created in a VPC.

    Constraints:

    • Can only be specified if the source DB instance identifier specifies a DB instance in another region.
    • The specified DB subnet group must be in the same region in which the operation is running.
    • All Read Replicas in one region that are created from the same source DB instance must either:
      • Specify DB subnet groups from the same VPC. All these Read Replicas will be created in the same VPC.
      • Not specify a DB subnet group. All these Read Replicas will be created outside of any VPC.
    ", + "CreateDBInstanceReadReplicaMessage$StorageType": "

    Specifies storage type to be associated with the DB Instance Read Replica.

    Valid values: standard | gp2 | io1

    If you specify io1, you must also include a value for the Iops parameter.

    Default: io1 if the Iops parameter is specified; otherwise standard

    ", + "CreateDBParameterGroupMessage$DBParameterGroupName": "

    The name of the DB parameter group.

    Constraints:

    • Must be 1 to 255 alphanumeric characters
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    This value is stored as a lower-case string.", + "CreateDBParameterGroupMessage$DBParameterGroupFamily": "

    The DB parameter group family name. A DB parameter group can be associated with one and only one DB parameter group family, and can be applied only to a DB instance running a database engine and engine version compatible with that DB parameter group family.

    ", + "CreateDBParameterGroupMessage$Description": "

    The description for the DB parameter group.

    ", + "CreateDBSecurityGroupMessage$DBSecurityGroupName": "

    The name for the DB security group. This value is stored as a lowercase string.

    Constraints:

    • Must be 1 to 255 alphanumeric characters
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    • Must not be \"Default\"
    • May not contain spaces

    Example: mysecuritygroup

    ", + "CreateDBSecurityGroupMessage$DBSecurityGroupDescription": "

    The description for the DB security group.

    ", + "CreateDBSnapshotMessage$DBSnapshotIdentifier": "

    The identifier for the DB snapshot.

    Constraints:

    • Cannot be null, empty, or blank
    • Must contain from 1 to 255 alphanumeric characters or hyphens
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens

    Example: my-snapshot-id

    ", + "CreateDBSnapshotMessage$DBInstanceIdentifier": "

    The DB instance identifier. This is the unique key that identifies a DB instance.

    Constraints:

    • Must contain from 1 to 63 alphanumeric characters or hyphens
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "CreateDBSubnetGroupMessage$DBSubnetGroupName": "

    The name for the DB subnet group. This value is stored as a lowercase string.

    Constraints: Must contain no more than 255 alphanumeric characters or hyphens. Must not be \"Default\".

    Example: mySubnetgroup

    ", + "CreateDBSubnetGroupMessage$DBSubnetGroupDescription": "

    The description for the DB subnet group.

    ", + "CreateEventSubscriptionMessage$SubscriptionName": "

    The name of the subscription.

    Constraints: The name must be less than 255 characters.

    ", + "CreateEventSubscriptionMessage$SnsTopicArn": "

    The Amazon Resource Name (ARN) of the SNS topic created for event notification. The ARN is created by Amazon SNS when you create a topic and subscribe to it.

    ", + "CreateEventSubscriptionMessage$SourceType": "

    The type of source that will be generating the events. For example, if you want to be notified of events generated by a DB instance, you would set this parameter to db-instance. if this value is not specified, all events are returned.

    Valid values: db-instance | db-parameter-group | db-security-group | db-snapshot

    ", + "CreateOptionGroupMessage$OptionGroupName": "

    Specifies the name of the option group to be created.

    Constraints:

    • Must be 1 to 255 alphanumeric characters or hyphens
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens

    Example: myoptiongroup

    ", + "CreateOptionGroupMessage$EngineName": "

    Specifies the name of the engine that this option group should be associated with.

    ", + "CreateOptionGroupMessage$MajorEngineVersion": "

    Specifies the major version of the engine that this option group should be associated with.

    ", + "CreateOptionGroupMessage$OptionGroupDescription": "

    The description of the option group.

    ", + "DBEngineVersion$Engine": "

    The name of the database engine.

    ", + "DBEngineVersion$EngineVersion": "

    The version number of the database engine.

    ", + "DBEngineVersion$DBParameterGroupFamily": "

    The name of the DB parameter group family for the database engine.

    ", + "DBEngineVersion$DBEngineDescription": "

    The description of the database engine.

    ", + "DBEngineVersion$DBEngineVersionDescription": "

    The description of the database engine version.

    ", + "DBEngineVersionMessage$Marker": "

    An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DBInstance$DBInstanceIdentifier": "

    Contains a user-supplied database identifier. This is the unique key that identifies a DB instance.

    ", + "DBInstance$DBInstanceClass": "

    Contains the name of the compute and memory capacity class of the DB instance.

    ", + "DBInstance$Engine": "

    Provides the name of the database engine to be used for this DB instance.

    ", + "DBInstance$DBInstanceStatus": "

    Specifies the current state of this database.

    ", + "DBInstance$MasterUsername": "

    Contains the master username for the DB instance.

    ", + "DBInstance$DBName": "

    The meaning of this parameter differs according to the database engine you use. For example, this value returns either MySQL or PostgreSQL information when returning values from CreateDBInstanceReadReplica since Read Replicas are only supported for MySQL and PostgreSQL.

    MySQL, SQL Server, PostgreSQL

    Contains the name of the initial database of this instance that was provided at create time, if one was specified when the DB instance was created. This same name is returned for the life of the DB instance.

    Type: String

    Oracle

    Contains the Oracle System ID (SID) of the created DB instance. Not shown when the returned parameters do not apply to an Oracle DB instance.

    ", + "DBInstance$PreferredBackupWindow": "

    Specifies the daily time range during which automated backups are created if automated backups are enabled, as determined by the BackupRetentionPeriod.

    ", + "DBInstance$AvailabilityZone": "

    Specifies the name of the Availability Zone the DB instance is located in.

    ", + "DBInstance$PreferredMaintenanceWindow": "

    Specifies the weekly time range (in UTC) during which system maintenance can occur.

    ", + "DBInstance$EngineVersion": "

    Indicates the database engine version.

    ", + "DBInstance$ReadReplicaSourceDBInstanceIdentifier": "

    Contains the identifier of the source DB instance if this DB instance is a Read Replica.

    ", + "DBInstance$LicenseModel": "

    License model information for this DB instance.

    ", + "DBInstance$CharacterSetName": "

    If present, specifies the name of the character set that this instance is associated with.

    ", + "DBInstance$SecondaryAvailabilityZone": "

    If present, specifies the name of the secondary Availability Zone for a DB instance with multi-AZ support.

    ", + "DBInstance$StorageType": "

    Specifies storage type associated with DB Instance.

    ", + "DBInstance$TdeCredentialArn": "

    The ARN from the Key Store with which the instance is associated for TDE encryption.

    ", + "DBInstanceMessage$Marker": "

    An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords .

    ", + "DBInstanceStatusInfo$StatusType": "

    This value is currently \"read replication.\"

    ", + "DBInstanceStatusInfo$Status": "

    Status of the DB instance. For a StatusType of read replica, the values can be replicating, error, stopped, or terminated.

    ", + "DBInstanceStatusInfo$Message": "

    Details of the error if there is an error for the instance. If the instance is not in an error state, this value is blank.

    ", + "DBParameterGroup$DBParameterGroupName": "

    Provides the name of the DB parameter group.

    ", + "DBParameterGroup$DBParameterGroupFamily": "

    Provides the name of the DB parameter group family that this DB parameter group is compatible with.

    ", + "DBParameterGroup$Description": "

    Provides the customer-specified description for this DB parameter group.

    ", + "DBParameterGroupDetails$Marker": "

    An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DBParameterGroupNameMessage$DBParameterGroupName": "

    The name of the DB parameter group.

    ", + "DBParameterGroupStatus$DBParameterGroupName": "

    The name of the DP parameter group.

    ", + "DBParameterGroupStatus$ParameterApplyStatus": "

    The status of parameter updates.

    ", + "DBParameterGroupsMessage$Marker": "

    An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DBSecurityGroup$OwnerId": "

    Provides the AWS ID of the owner of a specific DB security group.

    ", + "DBSecurityGroup$DBSecurityGroupName": "

    Specifies the name of the DB security group.

    ", + "DBSecurityGroup$DBSecurityGroupDescription": "

    Provides the description of the DB security group.

    ", + "DBSecurityGroup$VpcId": "

    Provides the VpcId of the DB security group.

    ", + "DBSecurityGroupMembership$DBSecurityGroupName": "

    The name of the DB security group.

    ", + "DBSecurityGroupMembership$Status": "

    The status of the DB security group.

    ", + "DBSecurityGroupMessage$Marker": "

    An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DBSecurityGroupNameList$member": null, + "DBSnapshot$DBSnapshotIdentifier": "

    Specifies the identifier for the DB snapshot.

    ", + "DBSnapshot$DBInstanceIdentifier": "

    Specifies the DB instance identifier of the DB instance this DB snapshot was created from.

    ", + "DBSnapshot$Engine": "

    Specifies the name of the database engine.

    ", + "DBSnapshot$Status": "

    Specifies the status of this DB snapshot.

    ", + "DBSnapshot$AvailabilityZone": "

    Specifies the name of the Availability Zone the DB instance was located in at the time of the DB snapshot.

    ", + "DBSnapshot$VpcId": "

    Provides the Vpc Id associated with the DB snapshot.

    ", + "DBSnapshot$MasterUsername": "

    Provides the master username for the DB snapshot.

    ", + "DBSnapshot$EngineVersion": "

    Specifies the version of the database engine.

    ", + "DBSnapshot$LicenseModel": "

    License model information for the restored DB instance.

    ", + "DBSnapshot$SnapshotType": "

    Provides the type of the DB snapshot.

    ", + "DBSnapshot$OptionGroupName": "

    Provides the option group name for the DB snapshot.

    ", + "DBSnapshot$SourceRegion": "

    The region that the DB snapshot was created in or copied from.

    ", + "DBSnapshot$StorageType": "

    Specifies storage type associated with DB Snapshot.

    ", + "DBSnapshot$TdeCredentialArn": "

    The ARN from the Key Store with which to associate the instance for TDE encryption.

    ", + "DBSnapshotMessage$Marker": "

    An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DBSubnetGroup$DBSubnetGroupName": "

    Specifies the name of the DB subnet group.

    ", + "DBSubnetGroup$DBSubnetGroupDescription": "

    Provides the description of the DB subnet group.

    ", + "DBSubnetGroup$VpcId": "

    Provides the VpcId of the DB subnet group.

    ", + "DBSubnetGroup$SubnetGroupStatus": "

    Provides the status of the DB subnet group.

    ", + "DBSubnetGroupMessage$Marker": "

    An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DeleteDBInstanceMessage$DBInstanceIdentifier": "

    The DB instance identifier for the DB instance to be deleted. This parameter isn't case sensitive.

    Constraints:

    • Must contain from 1 to 63 alphanumeric characters or hyphens
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "DeleteDBInstanceMessage$FinalDBSnapshotIdentifier": "

    The DBSnapshotIdentifier of the new DBSnapshot created when SkipFinalSnapshot is set to false.

    Specifying this parameter and also setting the SkipFinalShapshot parameter to true results in an error.

    Constraints:

    • Must be 1 to 255 alphanumeric characters
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    • Cannot be specified when deleting a Read Replica.
    ", + "DeleteDBParameterGroupMessage$DBParameterGroupName": "

    The name of the DB parameter group.

    Constraints:

    • Must be the name of an existing DB parameter group
    • You cannot delete a default DB parameter group
    • Cannot be associated with any DB instances
    ", + "DeleteDBSecurityGroupMessage$DBSecurityGroupName": "

    The name of the DB security group to delete.

    You cannot delete the default DB security group.

    Constraints:

    • Must be 1 to 255 alphanumeric characters
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    • Must not be \"Default\"
    • May not contain spaces
    ", + "DeleteDBSnapshotMessage$DBSnapshotIdentifier": "

    The DBSnapshot identifier.

    Constraints: Must be the name of an existing DB snapshot in the available state.

    ", + "DeleteDBSubnetGroupMessage$DBSubnetGroupName": "

    The name of the database subnet group to delete.

    You cannot delete the default subnet group.

    Constraints:

    • Must be 1 to 255 alphanumeric characters
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "DeleteEventSubscriptionMessage$SubscriptionName": "

    The name of the RDS event notification subscription you want to delete.

    ", + "DeleteOptionGroupMessage$OptionGroupName": "

    The name of the option group to be deleted.

    You cannot delete default option groups.", + "DescribeDBEngineVersionsMessage$Engine": "

    The database engine to return.

    ", + "DescribeDBEngineVersionsMessage$EngineVersion": "

    The database engine version to return.

    Example: 5.1.49

    ", + "DescribeDBEngineVersionsMessage$DBParameterGroupFamily": "

    The name of a specific DB parameter group family to return details for.

    Constraints:

    • Must be 1 to 255 alphanumeric characters
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "DescribeDBEngineVersionsMessage$Marker": "

    An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DescribeDBInstancesMessage$DBInstanceIdentifier": "

    The user-supplied instance identifier. If this parameter is specified, information from only the specific DB instance is returned. This parameter isn't case sensitive.

    Constraints:

    • Must contain from 1 to 63 alphanumeric characters or hyphens
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "DescribeDBInstancesMessage$Marker": "

    An optional pagination token provided by a previous DescribeDBInstances request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords .

    ", + "DescribeDBLogFilesDetails$LogFileName": "

    The name of the log file for the specified DB instance.

    ", + "DescribeDBLogFilesMessage$DBInstanceIdentifier": "

    The customer-assigned name of the DB instance that contains the log files you want to list.

    Constraints:

    • Must contain from 1 to 63 alphanumeric characters or hyphens
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "DescribeDBLogFilesMessage$FilenameContains": "

    Filters the available log files for log file names that contain the specified string.

    ", + "DescribeDBLogFilesMessage$Marker": "

    The pagination token provided in the previous request. If this parameter is specified the response includes only records beyond the marker, up to MaxRecords.

    ", + "DescribeDBLogFilesResponse$Marker": "

    A pagination token that can be used in a subsequent DescribeDBLogFiles request.

    ", + "DescribeDBParameterGroupsMessage$DBParameterGroupName": "

    The name of a specific DB parameter group to return details for.

    Constraints:

    • Must be 1 to 255 alphanumeric characters
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "DescribeDBParameterGroupsMessage$Marker": "

    An optional pagination token provided by a previous DescribeDBParameterGroups request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DescribeDBParametersMessage$DBParameterGroupName": "

    The name of a specific DB parameter group to return details for.

    Constraints:

    • Must be 1 to 255 alphanumeric characters
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "DescribeDBParametersMessage$Source": "

    The parameter types to return.

    Default: All parameter types returned

    Valid Values: user | system | engine-default

    ", + "DescribeDBParametersMessage$Marker": "

    An optional pagination token provided by a previous DescribeDBParameters request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DescribeDBSecurityGroupsMessage$DBSecurityGroupName": "

    The name of the DB security group to return details for.

    ", + "DescribeDBSecurityGroupsMessage$Marker": "

    An optional pagination token provided by a previous DescribeDBSecurityGroups request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DescribeDBSnapshotsMessage$DBInstanceIdentifier": "

    A DB instance identifier to retrieve the list of DB snapshots for. Cannot be used in conjunction with DBSnapshotIdentifier. This parameter is not case sensitive.

    Constraints:

    • Must contain from 1 to 63 alphanumeric characters or hyphens
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "DescribeDBSnapshotsMessage$DBSnapshotIdentifier": "

    A specific DB snapshot identifier to describe. Cannot be used in conjunction with DBInstanceIdentifier. This value is stored as a lowercase string.

    Constraints:

    • Must be 1 to 255 alphanumeric characters
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    • If this is the identifier of an automated snapshot, the SnapshotType parameter must also be specified.
    ", + "DescribeDBSnapshotsMessage$SnapshotType": "

    The type of snapshots that will be returned. Values can be \"automated\" or \"manual.\" If not specified, the returned results will include all snapshots types.

    ", + "DescribeDBSnapshotsMessage$Marker": "

    An optional pagination token provided by a previous DescribeDBSnapshots request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DescribeDBSubnetGroupsMessage$DBSubnetGroupName": "

    The name of the DB subnet group to return details for.

    ", + "DescribeDBSubnetGroupsMessage$Marker": "

    An optional pagination token provided by a previous DescribeDBSubnetGroups request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DescribeEngineDefaultParametersMessage$DBParameterGroupFamily": "

    The name of the DB parameter group family.

    ", + "DescribeEngineDefaultParametersMessage$Marker": "

    An optional pagination token provided by a previous DescribeEngineDefaultParameters request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DescribeEventCategoriesMessage$SourceType": "

    The type of source that will be generating the events.

    Valid values: db-instance | db-parameter-group | db-security-group | db-snapshot

    ", + "DescribeEventSubscriptionsMessage$SubscriptionName": "

    The name of the RDS event notification subscription you want to describe.

    ", + "DescribeEventSubscriptionsMessage$Marker": "

    An optional pagination token provided by a previous DescribeOrderableDBInstanceOptions request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords .

    ", + "DescribeEventsMessage$SourceIdentifier": "

    The identifier of the event source for which events will be returned. If not specified, then all sources are included in the response.

    Constraints:

    • If SourceIdentifier is supplied, SourceType must also be provided.
    • If the source type is DBInstance, then a DBInstanceIdentifier must be supplied.
    • If the source type is DBSecurityGroup, a DBSecurityGroupName must be supplied.
    • If the source type is DBParameterGroup, a DBParameterGroupName must be supplied.
    • If the source type is DBSnapshot, a DBSnapshotIdentifier must be supplied.
    • Cannot end with a hyphen or contain two consecutive hyphens.
    ", + "DescribeEventsMessage$Marker": "

    An optional pagination token provided by a previous DescribeEvents request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DescribeOptionGroupOptionsMessage$EngineName": "

    A required parameter. Options available for the given Engine name will be described.

    ", + "DescribeOptionGroupOptionsMessage$MajorEngineVersion": "

    If specified, filters the results to include only options for the specified major engine version.

    ", + "DescribeOptionGroupOptionsMessage$Marker": "

    An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DescribeOptionGroupsMessage$OptionGroupName": "

    The name of the option group to describe. Cannot be supplied together with EngineName or MajorEngineVersion.

    ", + "DescribeOptionGroupsMessage$Marker": "

    An optional pagination token provided by a previous DescribeOptionGroups request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DescribeOptionGroupsMessage$EngineName": "

    Filters the list of option groups to only include groups associated with a specific database engine.

    ", + "DescribeOptionGroupsMessage$MajorEngineVersion": "

    Filters the list of option groups to only include groups associated with a specific database engine version. If specified, then EngineName must also be specified.

    ", + "DescribeOrderableDBInstanceOptionsMessage$Engine": "

    The name of the engine to retrieve DB instance options for.

    ", + "DescribeOrderableDBInstanceOptionsMessage$EngineVersion": "

    The engine version filter value. Specify this parameter to show only the available offerings matching the specified engine version.

    ", + "DescribeOrderableDBInstanceOptionsMessage$DBInstanceClass": "

    The DB instance class filter value. Specify this parameter to show only the available offerings matching the specified DB instance class.

    ", + "DescribeOrderableDBInstanceOptionsMessage$LicenseModel": "

    The license model filter value. Specify this parameter to show only the available offerings matching the specified license model.

    ", + "DescribeOrderableDBInstanceOptionsMessage$Marker": "

    An optional pagination token provided by a previous DescribeOrderableDBInstanceOptions request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords .

    ", + "DescribeReservedDBInstancesMessage$ReservedDBInstanceId": "

    The reserved DB instance identifier filter value. Specify this parameter to show only the reservation that matches the specified reservation ID.

    ", + "DescribeReservedDBInstancesMessage$ReservedDBInstancesOfferingId": "

    The offering identifier filter value. Specify this parameter to show only purchased reservations matching the specified offering identifier.

    ", + "DescribeReservedDBInstancesMessage$DBInstanceClass": "

    The DB instance class filter value. Specify this parameter to show only those reservations matching the specified DB instances class.

    ", + "DescribeReservedDBInstancesMessage$Duration": "

    The duration filter value, specified in years or seconds. Specify this parameter to show only reservations for this duration.

    Valid Values: 1 | 3 | 31536000 | 94608000

    ", + "DescribeReservedDBInstancesMessage$ProductDescription": "

    The product description filter value. Specify this parameter to show only those reservations matching the specified product description.

    ", + "DescribeReservedDBInstancesMessage$OfferingType": "

    The offering type filter value. Specify this parameter to show only the available offerings matching the specified offering type.

    Valid Values: \"Light Utilization\" | \"Medium Utilization\" | \"Heavy Utilization\"

    ", + "DescribeReservedDBInstancesMessage$Marker": "

    An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DescribeReservedDBInstancesOfferingsMessage$ReservedDBInstancesOfferingId": "

    The offering identifier filter value. Specify this parameter to show only the available offering that matches the specified reservation identifier.

    Example: 438012d3-4052-4cc7-b2e3-8d3372e0e706

    ", + "DescribeReservedDBInstancesOfferingsMessage$DBInstanceClass": "

    The DB instance class filter value. Specify this parameter to show only the available offerings matching the specified DB instance class.

    ", + "DescribeReservedDBInstancesOfferingsMessage$Duration": "

    Duration filter value, specified in years or seconds. Specify this parameter to show only reservations for this duration.

    Valid Values: 1 | 3 | 31536000 | 94608000

    ", + "DescribeReservedDBInstancesOfferingsMessage$ProductDescription": "

    Product description filter value. Specify this parameter to show only the available offerings matching the specified product description.

    ", + "DescribeReservedDBInstancesOfferingsMessage$OfferingType": "

    The offering type filter value. Specify this parameter to show only the available offerings matching the specified offering type.

    Valid Values: \"Light Utilization\" | \"Medium Utilization\" | \"Heavy Utilization\"

    ", + "DescribeReservedDBInstancesOfferingsMessage$Marker": "

    An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DownloadDBLogFilePortionDetails$LogFileData": "

    Entries from the specified log file.

    ", + "DownloadDBLogFilePortionDetails$Marker": "

    A pagination token that can be used in a subsequent DownloadDBLogFilePortion request.

    ", + "DownloadDBLogFilePortionMessage$DBInstanceIdentifier": "

    The customer-assigned name of the DB instance that contains the log files you want to list.

    Constraints:

    • Must contain from 1 to 63 alphanumeric characters or hyphens
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "DownloadDBLogFilePortionMessage$LogFileName": "

    The name of the log file to be downloaded.

    ", + "DownloadDBLogFilePortionMessage$Marker": "

    The pagination token provided in the previous request or \"0\". If the Marker parameter is specified the response includes only records beyond the marker until the end of the file or up to NumberOfLines.

    ", + "EC2SecurityGroup$Status": "

    Provides the status of the EC2 security group. Status can be \"authorizing\", \"authorized\", \"revoking\", and \"revoked\".

    ", + "EC2SecurityGroup$EC2SecurityGroupName": "

    Specifies the name of the EC2 security group.

    ", + "EC2SecurityGroup$EC2SecurityGroupId": "

    Specifies the id of the EC2 security group.

    ", + "EC2SecurityGroup$EC2SecurityGroupOwnerId": "

    Specifies the AWS ID of the owner of the EC2 security group specified in the EC2SecurityGroupName field.

    ", + "Endpoint$Address": "

    Specifies the DNS address of the DB instance.

    ", + "EngineDefaults$DBParameterGroupFamily": "

    Specifies the name of the DB parameter group family which the engine default parameters apply to.

    ", + "EngineDefaults$Marker": "

    An optional pagination token provided by a previous EngineDefaults request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords .

    ", + "Event$SourceIdentifier": "

    Provides the identifier for the source of the event.

    ", + "Event$Message": "

    Provides the text of this event.

    ", + "EventCategoriesList$member": null, + "EventCategoriesMap$SourceType": "

    The source type that the returned categories belong to

    ", + "EventSubscription$CustomerAwsId": "

    The AWS customer account associated with the RDS event notification subscription.

    ", + "EventSubscription$CustSubscriptionId": "

    The RDS event notification subscription Id.

    ", + "EventSubscription$SnsTopicArn": "

    The topic ARN of the RDS event notification subscription.

    ", + "EventSubscription$Status": "

    The status of the RDS event notification subscription.

    Constraints:

    Can be one of the following: creating | modifying | deleting | active | no-permission | topic-not-exist

    The status \"no-permission\" indicates that RDS no longer has permission to post to the SNS topic. The status \"topic-not-exist\" indicates that the topic was deleted after the subscription was created.

    ", + "EventSubscription$SubscriptionCreationTime": "

    The time the RDS event notification subscription was created.

    ", + "EventSubscription$SourceType": "

    The source type for the RDS event notification subscription.

    ", + "EventSubscriptionsMessage$Marker": "

    An optional pagination token provided by a previous DescribeOrderableDBInstanceOptions request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "EventsMessage$Marker": "

    An optional pagination token provided by a previous Events request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords .

    ", + "Filter$Name": "

    This parameter is not currently supported.

    ", + "FilterValueList$member": null, + "IPRange$Status": "

    Specifies the status of the IP range. Status can be \"authorizing\", \"authorized\", \"revoking\", and \"revoked\".

    ", + "IPRange$CIDRIP": "

    Specifies the IP range.

    ", + "KeyList$member": null, + "ListTagsForResourceMessage$ResourceName": "

    The Amazon RDS resource with tags to be listed. This value is an Amazon Resource Name (ARN). For information about creating an ARN, see Constructing an RDS Amazon Resource Name (ARN).

    ", + "ModifyDBInstanceMessage$DBInstanceIdentifier": "

    The DB instance identifier. This value is stored as a lowercase string.

    Constraints:

    • Must be the identifier for an existing DB instance
    • Must contain from 1 to 63 alphanumeric characters or hyphens
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "ModifyDBInstanceMessage$DBInstanceClass": "

    The new compute and memory capacity of the DB instance. To determine the instance classes that are available for a particular DB engine, use the DescribeOrderableDBInstanceOptions action.

    Passing a value for this setting causes an outage during the change and is applied during the next maintenance window, unless ApplyImmediately is specified as true for this request.

    Default: Uses existing setting

    Valid Values: db.t1.micro | db.m1.small | db.m1.medium | db.m1.large | db.m1.xlarge | db.m2.xlarge | db.m2.2xlarge | db.m2.4xlarge | db.m3.medium | db.m3.large | db.m3.xlarge | db.m3.2xlarge | db.r3.large | db.r3.xlarge | db.r3.2xlarge | db.r3.4xlarge | db.r3.8xlarge | db.t2.micro | db.t2.small | db.t2.medium

    ", + "ModifyDBInstanceMessage$MasterUserPassword": "

    The new password for the DB instance master user. Can be any printable ASCII character except \"/\", \"\"\", or \"@\".

    Changing this parameter does not result in an outage and the change is asynchronously applied as soon as possible. Between the time of the request and the completion of the request, the MasterUserPassword element exists in the PendingModifiedValues element of the operation response.

    Default: Uses existing setting

    Constraints: Must be 8 to 41 alphanumeric characters (MySQL), 8 to 30 alphanumeric characters (Oracle), or 8 to 128 alphanumeric characters (SQL Server).

    Amazon RDS API actions never return the password, so this action provides a way to regain access to a master instance user if the password is lost. This includes restoring privileges that may have been accidentally revoked. ", + "ModifyDBInstanceMessage$DBParameterGroupName": "

    The name of the DB parameter group to apply to the DB instance. Changing this setting does not result in an outage. The parameter group name itself is changed immediately, but the actual parameter changes are not applied until you reboot the instance without failover. The db instance will NOT be rebooted automatically and the parameter changes will NOT be applied during the next maintenance window.

    Default: Uses existing setting

    Constraints: The DB parameter group must be in the same DB parameter group family as this DB instance.

    ", + "ModifyDBInstanceMessage$PreferredBackupWindow": "

    The daily time range during which automated backups are created if automated backups are enabled, as determined by the BackupRetentionPeriod. Changing this parameter does not result in an outage and the change is asynchronously applied as soon as possible.

    Constraints:

    • Must be in the format hh24:mi-hh24:mi
    • Times should be Universal Time Coordinated (UTC)
    • Must not conflict with the preferred maintenance window
    • Must be at least 30 minutes
    ", + "ModifyDBInstanceMessage$PreferredMaintenanceWindow": "

    The weekly time range (in UTC) during which system maintenance can occur, which may result in an outage. Changing this parameter does not result in an outage, except in the following situation, and the change is asynchronously applied as soon as possible. If there are pending actions that cause a reboot, and the maintenance window is changed to include the current time, then changing this parameter will cause a reboot of the DB instance. If moving this window to the current time, there must be at least 30 minutes between the current time and end of the window to ensure pending changes are applied.

    Default: Uses existing setting

    Format: ddd:hh24:mi-ddd:hh24:mi

    Valid Days: Mon | Tue | Wed | Thu | Fri | Sat | Sun

    Constraints: Must be at least 30 minutes

    ", + "ModifyDBInstanceMessage$EngineVersion": "

    The version number of the database engine to upgrade to. Changing this parameter results in an outage and the change is applied during the next maintenance window unless the ApplyImmediately parameter is set to true for this request.

    For major version upgrades, if a non-default DB parameter group is currently in use, a new DB parameter group in the DB parameter group family for the new engine version must be specified. The new DB parameter group can be the default for that DB parameter group family.

    For a list of valid engine versions, see CreateDBInstance.

    ", + "ModifyDBInstanceMessage$OptionGroupName": "

    Indicates that the DB instance should be associated with the specified option group. Changing this parameter does not result in an outage except in the following case and the change is applied during the next maintenance window unless the ApplyImmediately parameter is set to true for this request. If the parameter change results in an option group that enables OEM, this change can cause a brief (sub-second) period during which new connections are rejected but existing connections are not interrupted.

    Permanent options, such as the TDE option for Oracle Advanced Security TDE, cannot be removed from an option group, and that option group cannot be removed from a DB instance once it is associated with a DB instance

    ", + "ModifyDBInstanceMessage$NewDBInstanceIdentifier": "

    The new DB instance identifier for the DB instance when renaming a DB instance. When you change the DB instance identifier, an instance reboot will occur immediately if you set Apply Immediately to true, or will occur during the next maintenance window if Apply Immediately to false. This value is stored as a lowercase string.

    Constraints:

    • Must contain from 1 to 63 alphanumeric characters or hyphens
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "ModifyDBInstanceMessage$StorageType": "

    Specifies storage type to be associated with the DB Instance.

    Valid values: standard | gp2 | io1

    If you specify io1, you must also include a value for the Iops parameter.

    Default: io1 if the Iops parameter is specified; otherwise standard

    ", + "ModifyDBInstanceMessage$TdeCredentialArn": "

    The ARN from the Key Store with which to associate the instance for TDE encryption.

    ", + "ModifyDBInstanceMessage$TdeCredentialPassword": "

    The password for the given ARN from the Key Store in order to access the device.

    ", + "ModifyDBParameterGroupMessage$DBParameterGroupName": "

    The name of the DB parameter group.

    Constraints:

    • Must be the name of an existing DB parameter group
    • Must be 1 to 255 alphanumeric characters
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "ModifyDBSubnetGroupMessage$DBSubnetGroupName": "

    The name for the DB subnet group. This value is stored as a lowercase string.

    Constraints: Must contain no more than 255 alphanumeric characters or hyphens. Must not be \"Default\".

    Example: mySubnetgroup

    ", + "ModifyDBSubnetGroupMessage$DBSubnetGroupDescription": "

    The description for the DB subnet group.

    ", + "ModifyEventSubscriptionMessage$SubscriptionName": "

    The name of the RDS event notification subscription.

    ", + "ModifyEventSubscriptionMessage$SnsTopicArn": "

    The Amazon Resource Name (ARN) of the SNS topic created for event notification. The ARN is created by Amazon SNS when you create a topic and subscribe to it.

    ", + "ModifyEventSubscriptionMessage$SourceType": "

    The type of source that will be generating the events. For example, if you want to be notified of events generated by a DB instance, you would set this parameter to db-instance. if this value is not specified, all events are returned.

    Valid values: db-instance | db-parameter-group | db-security-group | db-snapshot

    ", + "ModifyOptionGroupMessage$OptionGroupName": "

    The name of the option group to be modified.

    Permanent options, such as the TDE option for Oracle Advanced Security TDE, cannot be removed from an option group, and that option group cannot be removed from a DB instance once it is associated with a DB instance

    ", + "Option$OptionName": "

    The name of the option.

    ", + "Option$OptionDescription": "

    The description of the option.

    ", + "OptionConfiguration$OptionName": "

    The configuration of options to include in a group.

    ", + "OptionGroup$OptionGroupName": "

    Specifies the name of the option group.

    ", + "OptionGroup$OptionGroupDescription": "

    Provides a description of the option group.

    ", + "OptionGroup$EngineName": "

    Engine name that this option group can be applied to.

    ", + "OptionGroup$MajorEngineVersion": "

    Indicates the major engine version associated with this option group.

    ", + "OptionGroup$VpcId": "

    If AllowsVpcAndNonVpcInstanceMemberships is false, this field is blank. If AllowsVpcAndNonVpcInstanceMemberships is true and this field is blank, then this option group can be applied to both VPC and non-VPC instances. If this field contains a value, then this option group can only be applied to instances that are in the VPC indicated by this field.

    ", + "OptionGroupMembership$OptionGroupName": "

    The name of the option group that the instance belongs to.

    ", + "OptionGroupMembership$Status": "

    The status of the DB instance's option group membership (e.g. in-sync, pending, pending-maintenance, applying).

    ", + "OptionGroupOption$Name": "

    The name of the option.

    ", + "OptionGroupOption$Description": "

    The description of the option.

    ", + "OptionGroupOption$EngineName": "

    The name of the engine that this option can be applied to.

    ", + "OptionGroupOption$MajorEngineVersion": "

    Indicates the major engine version that the option is available for.

    ", + "OptionGroupOption$MinimumRequiredMinorEngineVersion": "

    The minimum required engine version for the option to be applied.

    ", + "OptionGroupOptionSetting$SettingName": "

    The name of the option group option.

    ", + "OptionGroupOptionSetting$SettingDescription": "

    The description of the option group option.

    ", + "OptionGroupOptionSetting$DefaultValue": "

    The default value for the option group option.

    ", + "OptionGroupOptionSetting$ApplyType": "

    The DB engine specific parameter type for the option group option.

    ", + "OptionGroupOptionSetting$AllowedValues": "

    Indicates the acceptable values for the option group option.

    ", + "OptionGroupOptionsMessage$Marker": "

    An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "OptionGroups$Marker": "

    An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "OptionNamesList$member": null, + "OptionSetting$Name": "

    The name of the option that has settings that you can set.

    ", + "OptionSetting$Value": "

    The current value of the option setting.

    ", + "OptionSetting$DefaultValue": "

    The default value of the option setting.

    ", + "OptionSetting$Description": "

    The description of the option setting.

    ", + "OptionSetting$ApplyType": "

    The DB engine specific parameter type.

    ", + "OptionSetting$DataType": "

    The data type of the option setting.

    ", + "OptionSetting$AllowedValues": "

    The allowed values of the option setting.

    ", + "OptionsDependedOn$member": null, + "OrderableDBInstanceOption$Engine": "

    The engine type of the orderable DB instance.

    ", + "OrderableDBInstanceOption$EngineVersion": "

    The engine version of the orderable DB instance.

    ", + "OrderableDBInstanceOption$DBInstanceClass": "

    The DB instance Class for the orderable DB instance

    ", + "OrderableDBInstanceOption$LicenseModel": "

    The license model for the orderable DB instance.

    ", + "OrderableDBInstanceOption$StorageType": "

    The storage type for this orderable DB instance.

    ", + "OrderableDBInstanceOptionsMessage$Marker": "

    An optional pagination token provided by a previous OrderableDBInstanceOptions request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords .

    ", + "Parameter$ParameterName": "

    Specifies the name of the parameter.

    ", + "Parameter$ParameterValue": "

    Specifies the value of the parameter.

    ", + "Parameter$Description": "

    Provides a description of the parameter.

    ", + "Parameter$Source": "

    Indicates the source of the parameter value.

    ", + "Parameter$ApplyType": "

    Specifies the engine specific parameters type.

    ", + "Parameter$DataType": "

    Specifies the valid data type for the parameter.

    ", + "Parameter$AllowedValues": "

    Specifies the valid range of values for the parameter.

    ", + "Parameter$MinimumEngineVersion": "

    The earliest engine version to which the parameter can apply.

    ", + "PendingModifiedValues$DBInstanceClass": "

    Contains the new DBInstanceClass for the DB instance that will be applied or is in progress.

    ", + "PendingModifiedValues$MasterUserPassword": "

    Contains the pending or in-progress change of the master credentials for the DB instance.

    ", + "PendingModifiedValues$EngineVersion": "

    Indicates the database engine version.

    ", + "PendingModifiedValues$DBInstanceIdentifier": "

    Contains the new DBInstanceIdentifier for the DB instance that will be applied or is in progress.

    ", + "PendingModifiedValues$StorageType": "

    Specifies storage type to be associated with the DB instance.

    ", + "PromoteReadReplicaMessage$DBInstanceIdentifier": "

    The DB instance identifier. This value is stored as a lowercase string.

    Constraints:

    • Must be the identifier for an existing Read Replica DB instance
    • Must contain from 1 to 63 alphanumeric characters or hyphens
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens

    Example: mydbinstance

    ", + "PromoteReadReplicaMessage$PreferredBackupWindow": "

    The daily time range during which automated backups are created if automated backups are enabled, using the BackupRetentionPeriod parameter.

    Default: A 30-minute window selected at random from an 8-hour block of time per region. See the Amazon RDS User Guide for the time blocks for each region from which the default backup windows are assigned.

    Constraints: Must be in the format hh24:mi-hh24:mi. Times should be Universal Time Coordinated (UTC). Must not conflict with the preferred maintenance window. Must be at least 30 minutes.

    ", + "PurchaseReservedDBInstancesOfferingMessage$ReservedDBInstancesOfferingId": "

    The ID of the Reserved DB instance offering to purchase.

    Example: 438012d3-4052-4cc7-b2e3-8d3372e0e706

    ", + "PurchaseReservedDBInstancesOfferingMessage$ReservedDBInstanceId": "

    Customer-specified identifier to track this reservation.

    Example: myreservationID

    ", + "ReadReplicaDBInstanceIdentifierList$member": null, + "RebootDBInstanceMessage$DBInstanceIdentifier": "

    The DB instance identifier. This parameter is stored as a lowercase string.

    Constraints:

    • Must contain from 1 to 63 alphanumeric characters or hyphens
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "RecurringCharge$RecurringChargeFrequency": "

    The frequency of the recurring charge.

    ", + "RemoveSourceIdentifierFromSubscriptionMessage$SubscriptionName": "

    The name of the RDS event notification subscription you want to remove a source identifier from.

    ", + "RemoveSourceIdentifierFromSubscriptionMessage$SourceIdentifier": "

    The source identifier to be removed from the subscription, such as the DB instance identifier for a DB instance or the name of a security group.

    ", + "RemoveTagsFromResourceMessage$ResourceName": "

    The Amazon RDS resource the tags will be removed from. This value is an Amazon Resource Name (ARN). For information about creating an ARN, see Constructing an RDS Amazon Resource Name (ARN).

    ", + "ReservedDBInstance$ReservedDBInstanceId": "

    The unique identifier for the reservation.

    ", + "ReservedDBInstance$ReservedDBInstancesOfferingId": "

    The offering identifier.

    ", + "ReservedDBInstance$DBInstanceClass": "

    The DB instance class for the reserved DB instance.

    ", + "ReservedDBInstance$CurrencyCode": "

    The currency code for the reserved DB instance.

    ", + "ReservedDBInstance$ProductDescription": "

    The description of the reserved DB instance.

    ", + "ReservedDBInstance$OfferingType": "

    The offering type of this reserved DB instance.

    ", + "ReservedDBInstance$State": "

    The state of the reserved DB instance.

    ", + "ReservedDBInstanceMessage$Marker": "

    An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "ReservedDBInstancesOffering$ReservedDBInstancesOfferingId": "

    The offering identifier.

    ", + "ReservedDBInstancesOffering$DBInstanceClass": "

    The DB instance class for the reserved DB instance.

    ", + "ReservedDBInstancesOffering$CurrencyCode": "

    The currency code for the reserved DB instance offering.

    ", + "ReservedDBInstancesOffering$ProductDescription": "

    The database engine used by the offering.

    ", + "ReservedDBInstancesOffering$OfferingType": "

    The offering type.

    ", + "ReservedDBInstancesOfferingMessage$Marker": "

    An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "ResetDBParameterGroupMessage$DBParameterGroupName": "

    The name of the DB parameter group.

    Constraints:

    • Must be 1 to 255 alphanumeric characters
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "RestoreDBInstanceFromDBSnapshotMessage$DBInstanceIdentifier": "

    Name of the DB instance to create from the DB snapshot. This parameter isn't case sensitive.

    Constraints:

    • Must contain from 1 to 255 alphanumeric characters or hyphens
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens

    Example: my-snapshot-id

    ", + "RestoreDBInstanceFromDBSnapshotMessage$DBSnapshotIdentifier": "

    The identifier for the DB snapshot to restore from.

    Constraints:

    • Must contain from 1 to 63 alphanumeric characters or hyphens
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "RestoreDBInstanceFromDBSnapshotMessage$DBInstanceClass": "

    The compute and memory capacity of the Amazon RDS DB instance.

    Valid Values: db.t1.micro | db.m1.small | db.m1.medium | db.m1.large | db.m1.xlarge | db.m2.2xlarge | db.m2.4xlarge | db.m3.medium | db.m3.large | db.m3.xlarge | db.m3.2xlarge | db.r3.large | db.r3.xlarge | db.r3.2xlarge | db.r3.4xlarge | db.r3.8xlarge | db.t2.micro | db.t2.small | db.t2.medium

    ", + "RestoreDBInstanceFromDBSnapshotMessage$AvailabilityZone": "

    The EC2 Availability Zone that the database instance will be created in.

    Default: A random, system-chosen Availability Zone.

    Constraint: You cannot specify the AvailabilityZone parameter if the MultiAZ parameter is set to true.

    Example: us-east-1a

    ", + "RestoreDBInstanceFromDBSnapshotMessage$DBSubnetGroupName": "

    The DB subnet group name to use for the new instance.

    ", + "RestoreDBInstanceFromDBSnapshotMessage$LicenseModel": "

    License model information for the restored DB instance.

    Default: Same as source.

    Valid values: license-included | bring-your-own-license | general-public-license

    ", + "RestoreDBInstanceFromDBSnapshotMessage$DBName": "

    The database name for the restored DB instance.

    This parameter doesn't apply to the MySQL engine.

    ", + "RestoreDBInstanceFromDBSnapshotMessage$Engine": "

    The database engine to use for the new instance.

    Default: The same as source

    Constraint: Must be compatible with the engine of the source

    Valid Values: MySQL | oracle-se1 | oracle-se | oracle-ee | sqlserver-ee | sqlserver-se | sqlserver-ex | sqlserver-web | postgres

    ", + "RestoreDBInstanceFromDBSnapshotMessage$OptionGroupName": "

    The name of the option group to be used for the restored DB instance.

    Permanent options, such as the TDE option for Oracle Advanced Security TDE, cannot be removed from an option group, and that option group cannot be removed from a DB instance once it is associated with a DB instance

    ", + "RestoreDBInstanceFromDBSnapshotMessage$StorageType": "

    Specifies storage type to be associated with the DB Instance.

    Valid values: standard | gp2 | io1

    If you specify io1, you must also include a value for the Iops parameter.

    Default: io1 if the Iops parameter is specified; otherwise standard

    ", + "RestoreDBInstanceFromDBSnapshotMessage$TdeCredentialArn": "

    The ARN from the Key Store with which to associate the instance for TDE encryption.

    ", + "RestoreDBInstanceFromDBSnapshotMessage$TdeCredentialPassword": "

    The password for the given ARN from the Key Store in order to access the device.

    ", + "RestoreDBInstanceToPointInTimeMessage$SourceDBInstanceIdentifier": "

    The identifier of the source DB instance from which to restore.

    Constraints:

    • Must be the identifier of an existing database instance
    • Must contain from 1 to 63 alphanumeric characters or hyphens
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "RestoreDBInstanceToPointInTimeMessage$TargetDBInstanceIdentifier": "

    The name of the new database instance to be created.

    Constraints:

    • Must contain from 1 to 63 alphanumeric characters or hyphens
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "RestoreDBInstanceToPointInTimeMessage$DBInstanceClass": "

    The compute and memory capacity of the Amazon RDS DB instance.

    Valid Values: db.t1.micro | db.m1.small | db.m1.medium | db.m1.large | db.m1.xlarge | db.m2.2xlarge | db.m2.4xlarge | db.m3.medium | db.m3.large | db.m3.xlarge | db.m3.2xlarge | db.r3.large | db.r3.xlarge | db.r3.2xlarge | db.r3.4xlarge | db.r3.8xlarge | db.t2.micro | db.t2.small | db.t2.medium

    Default: The same DBInstanceClass as the original DB instance.

    ", + "RestoreDBInstanceToPointInTimeMessage$AvailabilityZone": "

    The EC2 Availability Zone that the database instance will be created in.

    Default: A random, system-chosen Availability Zone.

    Constraint: You cannot specify the AvailabilityZone parameter if the MultiAZ parameter is set to true.

    Example: us-east-1a

    ", + "RestoreDBInstanceToPointInTimeMessage$DBSubnetGroupName": "

    The DB subnet group name to use for the new instance.

    ", + "RestoreDBInstanceToPointInTimeMessage$LicenseModel": "

    License model information for the restored DB instance.

    Default: Same as source.

    Valid values: license-included | bring-your-own-license | general-public-license

    ", + "RestoreDBInstanceToPointInTimeMessage$DBName": "

    The database name for the restored DB instance.

    This parameter is not used for the MySQL engine.

    ", + "RestoreDBInstanceToPointInTimeMessage$Engine": "

    The database engine to use for the new instance.

    Default: The same as source

    Constraint: Must be compatible with the engine of the source

    Valid Values: MySQL | oracle-se1 | oracle-se | oracle-ee | sqlserver-ee | sqlserver-se | sqlserver-ex | sqlserver-web | postgres

    ", + "RestoreDBInstanceToPointInTimeMessage$OptionGroupName": "

    The name of the option group to be used for the restored DB instance.

    Permanent options, such as the TDE option for Oracle Advanced Security TDE, cannot be removed from an option group, and that option group cannot be removed from a DB instance once it is associated with a DB instance

    ", + "RestoreDBInstanceToPointInTimeMessage$StorageType": "

    Specifies storage type to be associated with the DB Instance.

    Valid values: standard | gp2 | io1

    If you specify io1, you must also include a value for the Iops parameter.

    Default: io1 if the Iops parameter is specified; otherwise standard

    ", + "RestoreDBInstanceToPointInTimeMessage$TdeCredentialArn": "

    The ARN from the Key Store with which to associate the instance for TDE encryption.

    ", + "RestoreDBInstanceToPointInTimeMessage$TdeCredentialPassword": "

    The password for the given ARN from the Key Store in order to access the device.

    ", + "RevokeDBSecurityGroupIngressMessage$DBSecurityGroupName": "

    The name of the DB security group to revoke ingress from.

    ", + "RevokeDBSecurityGroupIngressMessage$CIDRIP": "

    The IP range to revoke access from. Must be a valid CIDR range. If CIDRIP is specified, EC2SecurityGroupName, EC2SecurityGroupId and EC2SecurityGroupOwnerId cannot be provided.

    ", + "RevokeDBSecurityGroupIngressMessage$EC2SecurityGroupName": "

    The name of the EC2 security group to revoke access from. For VPC DB security groups, EC2SecurityGroupId must be provided. Otherwise, EC2SecurityGroupOwnerId and either EC2SecurityGroupName or EC2SecurityGroupId must be provided.

    ", + "RevokeDBSecurityGroupIngressMessage$EC2SecurityGroupId": "

    The id of the EC2 security group to revoke access from. For VPC DB security groups, EC2SecurityGroupId must be provided. Otherwise, EC2SecurityGroupOwnerId and either EC2SecurityGroupName or EC2SecurityGroupId must be provided.

    ", + "RevokeDBSecurityGroupIngressMessage$EC2SecurityGroupOwnerId": "

    The AWS Account Number of the owner of the EC2 security group specified in the EC2SecurityGroupName parameter. The AWS Access Key ID is not an acceptable value. For VPC DB security groups, EC2SecurityGroupId must be provided. Otherwise, EC2SecurityGroupOwnerId and either EC2SecurityGroupName or EC2SecurityGroupId must be provided.

    ", + "SourceIdsList$member": null, + "Subnet$SubnetIdentifier": "

    Specifies the identifier of the subnet.

    ", + "Subnet$SubnetStatus": "

    Specifies the status of the subnet.

    ", + "SubnetIdentifierList$member": null, + "Tag$Key": "

    A key is the required name of the tag. The string value can be from 1 to 128 Unicode characters in length and cannot be prefixed with \"aws:\" or \"rds:\". The string may only contain only the set of Unicode letters, digits, white-space, '_', '.', '/', '=', '+', '-' (Java regex: \"^([\\\\p{L}\\\\p{Z}\\\\p{N}_.:/=+\\\\-]*)$\").

    ", + "Tag$Value": "

    A value is the optional value of the tag. The string value can be from 1 to 256 Unicode characters in length and cannot be prefixed with \"aws:\" or \"rds:\". The string may only contain only the set of Unicode letters, digits, white-space, '_', '.', '/', '=', '+', '-' (Java regex: \"^([\\\\p{L}\\\\p{Z}\\\\p{N}_.:/=+\\\\-]*)$\").

    ", + "VpcSecurityGroupIdList$member": null, + "VpcSecurityGroupMembership$VpcSecurityGroupId": "

    The name of the VPC security group.

    ", + "VpcSecurityGroupMembership$Status": "

    The status of the VPC security group.

    " + } + }, + "Subnet": { + "base": "

    This data type is used as a response element in the DescribeDBSubnetGroups action.

    ", + "refs": { + "SubnetList$member": null + } + }, + "SubnetAlreadyInUse": { + "base": "

    The DB subnet is already in use in the Availability Zone.

    ", + "refs": { + } + }, + "SubnetIdentifierList": { + "base": null, + "refs": { + "CreateDBSubnetGroupMessage$SubnetIds": "

    The EC2 Subnet IDs for the DB subnet group.

    ", + "ModifyDBSubnetGroupMessage$SubnetIds": "

    The EC2 subnet IDs for the DB subnet group.

    " + } + }, + "SubnetList": { + "base": null, + "refs": { + "DBSubnetGroup$Subnets": "

    Contains a list of Subnet elements.

    " + } + }, + "SubscriptionAlreadyExistFault": { + "base": "

    The supplied subscription name already exists.

    ", + "refs": { + } + }, + "SubscriptionCategoryNotFoundFault": { + "base": "

    The supplied category does not exist.

    ", + "refs": { + } + }, + "SubscriptionNotFoundFault": { + "base": "

    The subscription name does not exist.

    ", + "refs": { + } + }, + "SupportedCharacterSetsList": { + "base": null, + "refs": { + "DBEngineVersion$SupportedCharacterSets": "

    A list of the character sets supported by this engine for the CharacterSetName parameter of the CreateDBInstance API.

    " + } + }, + "TStamp": { + "base": null, + "refs": { + "DBInstance$InstanceCreateTime": "

    Provides the date and time the DB instance was created.

    ", + "DBInstance$LatestRestorableTime": "

    Specifies the latest time to which a database can be restored with point-in-time restore.

    ", + "DBSnapshot$SnapshotCreateTime": "

    Provides the time (UTC) when the snapshot was taken.

    ", + "DBSnapshot$InstanceCreateTime": "

    Specifies the time (UTC) when the snapshot was taken.

    ", + "DescribeEventsMessage$StartTime": "

    The beginning of the time interval to retrieve events for, specified in ISO 8601 format. For more information about ISO 8601, go to the ISO8601 Wikipedia page.

    Example: 2009-07-08T18:00Z

    ", + "DescribeEventsMessage$EndTime": "

    The end of the time interval for which to retrieve events, specified in ISO 8601 format. For more information about ISO 8601, go to the ISO8601 Wikipedia page.

    Example: 2009-07-08T18:00Z

    ", + "Event$Date": "

    Specifies the date and time of the event.

    ", + "ReservedDBInstance$StartTime": "

    The time the reservation started.

    ", + "RestoreDBInstanceToPointInTimeMessage$RestoreTime": "

    The date and time to restore from.

    Valid Values: Value must be a UTC time

    Constraints:

    • Must be before the latest restorable time for the DB instance
    • Cannot be specified if UseLatestRestorableTime parameter is true

    Example: 2009-09-07T23:45:00Z

    " + } + }, + "Tag": { + "base": "

    Metadata assigned to an Amazon RDS resource consisting of a key-value pair.

    ", + "refs": { + "TagList$member": null + } + }, + "TagList": { + "base": "

    A list of tags.

    ", + "refs": { + "AddTagsToResourceMessage$Tags": "

    The tags to be assigned to the Amazon RDS resource.

    ", + "CopyDBParameterGroupMessage$Tags": null, + "CopyDBSnapshotMessage$Tags": null, + "CopyOptionGroupMessage$Tags": null, + "CreateDBInstanceMessage$Tags": null, + "CreateDBInstanceReadReplicaMessage$Tags": null, + "CreateDBParameterGroupMessage$Tags": null, + "CreateDBSecurityGroupMessage$Tags": null, + "CreateDBSnapshotMessage$Tags": null, + "CreateDBSubnetGroupMessage$Tags": null, + "CreateEventSubscriptionMessage$Tags": null, + "CreateOptionGroupMessage$Tags": null, + "PurchaseReservedDBInstancesOfferingMessage$Tags": null, + "RestoreDBInstanceFromDBSnapshotMessage$Tags": null, + "RestoreDBInstanceToPointInTimeMessage$Tags": null, + "TagListMessage$TagList": "

    List of tags returned by the ListTagsForResource operation.

    " + } + }, + "TagListMessage": { + "base": "

    ", + "refs": { + } + }, + "VpcSecurityGroupIdList": { + "base": null, + "refs": { + "CreateDBInstanceMessage$VpcSecurityGroupIds": "

    A list of EC2 VPC security groups to associate with this DB instance.

    Default: The default EC2 VPC security group for the DB subnet group's VPC.

    ", + "ModifyDBInstanceMessage$VpcSecurityGroupIds": "

    A list of EC2 VPC security groups to authorize on this DB instance. This change is asynchronously applied as soon as possible.

    Constraints:

    • Must be 1 to 255 alphanumeric characters
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "OptionConfiguration$VpcSecurityGroupMemberships": "

    A list of VpcSecurityGroupMemebrship name strings used for this option.

    " + } + }, + "VpcSecurityGroupMembership": { + "base": "

    This data type is used as a response element for queries on VPC security group membership.

    ", + "refs": { + "VpcSecurityGroupMembershipList$member": null + } + }, + "VpcSecurityGroupMembershipList": { + "base": null, + "refs": { + "DBInstance$VpcSecurityGroups": "

    Provides List of VPC security group elements that the DB instance belongs to.

    ", + "Option$VpcSecurityGroupMemberships": "

    If the option requires access to a port, then this VPC security group allows access to the port.

    " + } + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/rds/2014-09-01/examples-1.json b/vendor/github.com/aws/aws-sdk-go/models/apis/rds/2014-09-01/examples-1.json new file mode 100644 index 000000000..0ea7e3b0b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/rds/2014-09-01/examples-1.json @@ -0,0 +1,5 @@ +{ + "version": "1.0", + "examples": { + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/rds/2014-10-31/api-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/rds/2014-10-31/api-2.json new file mode 100644 index 000000000..ba34a1b94 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/rds/2014-10-31/api-2.json @@ -0,0 +1,4720 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2014-10-31", + "endpointPrefix":"rds", + "protocol":"query", + "serviceAbbreviation":"Amazon RDS", + "serviceFullName":"Amazon Relational Database Service", + "signatureVersion":"v4", + "xmlNamespace":"http://rds.amazonaws.com/doc/2014-10-31/" + }, + "operations":{ + "AddSourceIdentifierToSubscription":{ + "name":"AddSourceIdentifierToSubscription", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AddSourceIdentifierToSubscriptionMessage"}, + "output":{ + "shape":"AddSourceIdentifierToSubscriptionResult", + "resultWrapper":"AddSourceIdentifierToSubscriptionResult" + }, + "errors":[ + {"shape":"SubscriptionNotFoundFault"}, + {"shape":"SourceNotFoundFault"} + ] + }, + "AddTagsToResource":{ + "name":"AddTagsToResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AddTagsToResourceMessage"}, + "errors":[ + {"shape":"DBInstanceNotFoundFault"}, + {"shape":"DBSnapshotNotFoundFault"} + ] + }, + "ApplyPendingMaintenanceAction":{ + "name":"ApplyPendingMaintenanceAction", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ApplyPendingMaintenanceActionMessage"}, + "output":{ + "shape":"ApplyPendingMaintenanceActionResult", + "resultWrapper":"ApplyPendingMaintenanceActionResult" + }, + "errors":[ + {"shape":"ResourceNotFoundFault"} + ] + }, + "AuthorizeDBSecurityGroupIngress":{ + "name":"AuthorizeDBSecurityGroupIngress", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AuthorizeDBSecurityGroupIngressMessage"}, + "output":{ + "shape":"AuthorizeDBSecurityGroupIngressResult", + "resultWrapper":"AuthorizeDBSecurityGroupIngressResult" + }, + "errors":[ + {"shape":"DBSecurityGroupNotFoundFault"}, + {"shape":"InvalidDBSecurityGroupStateFault"}, + {"shape":"AuthorizationAlreadyExistsFault"}, + {"shape":"AuthorizationQuotaExceededFault"} + ] + }, + "CopyDBClusterSnapshot":{ + "name":"CopyDBClusterSnapshot", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CopyDBClusterSnapshotMessage"}, + "output":{ + "shape":"CopyDBClusterSnapshotResult", + "resultWrapper":"CopyDBClusterSnapshotResult" + }, + "errors":[ + {"shape":"DBClusterSnapshotAlreadyExistsFault"}, + {"shape":"DBClusterSnapshotNotFoundFault"}, + {"shape":"InvalidDBClusterStateFault"} + ] + }, + "CopyDBParameterGroup":{ + "name":"CopyDBParameterGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CopyDBParameterGroupMessage"}, + "output":{ + "shape":"CopyDBParameterGroupResult", + "resultWrapper":"CopyDBParameterGroupResult" + }, + "errors":[ + {"shape":"DBParameterGroupNotFoundFault"}, + {"shape":"DBParameterGroupAlreadyExistsFault"}, + {"shape":"DBParameterGroupQuotaExceededFault"} + ] + }, + "CopyDBSnapshot":{ + "name":"CopyDBSnapshot", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CopyDBSnapshotMessage"}, + "output":{ + "shape":"CopyDBSnapshotResult", + "resultWrapper":"CopyDBSnapshotResult" + }, + "errors":[ + {"shape":"DBSnapshotAlreadyExistsFault"}, + {"shape":"DBSnapshotNotFoundFault"}, + {"shape":"InvalidDBSnapshotStateFault"}, + {"shape":"SnapshotQuotaExceededFault"}, + {"shape":"KMSKeyNotAccessibleFault"} + ] + }, + "CopyOptionGroup":{ + "name":"CopyOptionGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CopyOptionGroupMessage"}, + "output":{ + "shape":"CopyOptionGroupResult", + "resultWrapper":"CopyOptionGroupResult" + }, + "errors":[ + {"shape":"OptionGroupAlreadyExistsFault"}, + {"shape":"OptionGroupNotFoundFault"}, + {"shape":"OptionGroupQuotaExceededFault"} + ] + }, + "CreateDBCluster":{ + "name":"CreateDBCluster", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateDBClusterMessage"}, + "output":{ + "shape":"CreateDBClusterResult", + "resultWrapper":"CreateDBClusterResult" + }, + "errors":[ + {"shape":"DBClusterAlreadyExistsFault"}, + {"shape":"InsufficientStorageClusterCapacityFault"}, + {"shape":"DBClusterQuotaExceededFault"}, + {"shape":"StorageQuotaExceededFault"}, + {"shape":"DBSubnetGroupNotFoundFault"}, + {"shape":"InvalidVPCNetworkStateFault"}, + {"shape":"InvalidDBClusterStateFault"}, + {"shape":"InvalidDBSubnetGroupStateFault"}, + {"shape":"InvalidSubnet"}, + {"shape":"DBClusterParameterGroupNotFoundFault"}, + {"shape":"KMSKeyNotAccessibleFault"}, + {"shape":"DBClusterNotFoundFault"} + ] + }, + "CreateDBClusterParameterGroup":{ + "name":"CreateDBClusterParameterGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateDBClusterParameterGroupMessage"}, + "output":{ + "shape":"CreateDBClusterParameterGroupResult", + "resultWrapper":"CreateDBClusterParameterGroupResult" + }, + "errors":[ + {"shape":"DBParameterGroupQuotaExceededFault"}, + {"shape":"DBParameterGroupAlreadyExistsFault"} + ] + }, + "CreateDBClusterSnapshot":{ + "name":"CreateDBClusterSnapshot", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateDBClusterSnapshotMessage"}, + "output":{ + "shape":"CreateDBClusterSnapshotResult", + "resultWrapper":"CreateDBClusterSnapshotResult" + }, + "errors":[ + {"shape":"DBClusterSnapshotAlreadyExistsFault"}, + {"shape":"InvalidDBClusterStateFault"}, + {"shape":"DBClusterNotFoundFault"}, + {"shape":"SnapshotQuotaExceededFault"}, + {"shape":"InvalidDBClusterSnapshotStateFault"} + ] + }, + "CreateDBInstance":{ + "name":"CreateDBInstance", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateDBInstanceMessage"}, + "output":{ + "shape":"CreateDBInstanceResult", + "resultWrapper":"CreateDBInstanceResult" + }, + "errors":[ + {"shape":"DBInstanceAlreadyExistsFault"}, + {"shape":"InsufficientDBInstanceCapacityFault"}, + {"shape":"DBParameterGroupNotFoundFault"}, + {"shape":"DBSecurityGroupNotFoundFault"}, + {"shape":"InstanceQuotaExceededFault"}, + {"shape":"StorageQuotaExceededFault"}, + {"shape":"DBSubnetGroupNotFoundFault"}, + {"shape":"DBSubnetGroupDoesNotCoverEnoughAZs"}, + {"shape":"InvalidDBClusterStateFault"}, + {"shape":"InvalidSubnet"}, + {"shape":"InvalidVPCNetworkStateFault"}, + {"shape":"ProvisionedIopsNotAvailableInAZFault"}, + {"shape":"OptionGroupNotFoundFault"}, + {"shape":"DBClusterNotFoundFault"}, + {"shape":"StorageTypeNotSupportedFault"}, + {"shape":"AuthorizationNotFoundFault"}, + {"shape":"KMSKeyNotAccessibleFault"}, + {"shape":"DomainNotFoundFault"} + ] + }, + "CreateDBInstanceReadReplica":{ + "name":"CreateDBInstanceReadReplica", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateDBInstanceReadReplicaMessage"}, + "output":{ + "shape":"CreateDBInstanceReadReplicaResult", + "resultWrapper":"CreateDBInstanceReadReplicaResult" + }, + "errors":[ + {"shape":"DBInstanceAlreadyExistsFault"}, + {"shape":"InsufficientDBInstanceCapacityFault"}, + {"shape":"DBParameterGroupNotFoundFault"}, + {"shape":"DBSecurityGroupNotFoundFault"}, + {"shape":"InstanceQuotaExceededFault"}, + {"shape":"StorageQuotaExceededFault"}, + {"shape":"DBInstanceNotFoundFault"}, + {"shape":"InvalidDBInstanceStateFault"}, + {"shape":"DBSubnetGroupNotFoundFault"}, + {"shape":"DBSubnetGroupDoesNotCoverEnoughAZs"}, + {"shape":"InvalidSubnet"}, + {"shape":"InvalidVPCNetworkStateFault"}, + {"shape":"ProvisionedIopsNotAvailableInAZFault"}, + {"shape":"OptionGroupNotFoundFault"}, + {"shape":"DBSubnetGroupNotAllowedFault"}, + {"shape":"InvalidDBSubnetGroupFault"}, + {"shape":"StorageTypeNotSupportedFault"}, + {"shape":"KMSKeyNotAccessibleFault"} + ] + }, + "CreateDBParameterGroup":{ + "name":"CreateDBParameterGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateDBParameterGroupMessage"}, + "output":{ + "shape":"CreateDBParameterGroupResult", + "resultWrapper":"CreateDBParameterGroupResult" + }, + "errors":[ + {"shape":"DBParameterGroupQuotaExceededFault"}, + {"shape":"DBParameterGroupAlreadyExistsFault"} + ] + }, + "CreateDBSecurityGroup":{ + "name":"CreateDBSecurityGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateDBSecurityGroupMessage"}, + "output":{ + "shape":"CreateDBSecurityGroupResult", + "resultWrapper":"CreateDBSecurityGroupResult" + }, + "errors":[ + {"shape":"DBSecurityGroupAlreadyExistsFault"}, + {"shape":"DBSecurityGroupQuotaExceededFault"}, + {"shape":"DBSecurityGroupNotSupportedFault"} + ] + }, + "CreateDBSnapshot":{ + "name":"CreateDBSnapshot", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateDBSnapshotMessage"}, + "output":{ + "shape":"CreateDBSnapshotResult", + "resultWrapper":"CreateDBSnapshotResult" + }, + "errors":[ + {"shape":"DBSnapshotAlreadyExistsFault"}, + {"shape":"InvalidDBInstanceStateFault"}, + {"shape":"DBInstanceNotFoundFault"}, + {"shape":"SnapshotQuotaExceededFault"} + ] + }, + "CreateDBSubnetGroup":{ + "name":"CreateDBSubnetGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateDBSubnetGroupMessage"}, + "output":{ + "shape":"CreateDBSubnetGroupResult", + "resultWrapper":"CreateDBSubnetGroupResult" + }, + "errors":[ + {"shape":"DBSubnetGroupAlreadyExistsFault"}, + {"shape":"DBSubnetGroupQuotaExceededFault"}, + {"shape":"DBSubnetQuotaExceededFault"}, + {"shape":"DBSubnetGroupDoesNotCoverEnoughAZs"}, + {"shape":"InvalidSubnet"} + ] + }, + "CreateEventSubscription":{ + "name":"CreateEventSubscription", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateEventSubscriptionMessage"}, + "output":{ + "shape":"CreateEventSubscriptionResult", + "resultWrapper":"CreateEventSubscriptionResult" + }, + "errors":[ + {"shape":"EventSubscriptionQuotaExceededFault"}, + {"shape":"SubscriptionAlreadyExistFault"}, + {"shape":"SNSInvalidTopicFault"}, + {"shape":"SNSNoAuthorizationFault"}, + {"shape":"SNSTopicArnNotFoundFault"}, + {"shape":"SubscriptionCategoryNotFoundFault"}, + {"shape":"SourceNotFoundFault"} + ] + }, + "CreateOptionGroup":{ + "name":"CreateOptionGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateOptionGroupMessage"}, + "output":{ + "shape":"CreateOptionGroupResult", + "resultWrapper":"CreateOptionGroupResult" + }, + "errors":[ + {"shape":"OptionGroupAlreadyExistsFault"}, + {"shape":"OptionGroupQuotaExceededFault"} + ] + }, + "DeleteDBCluster":{ + "name":"DeleteDBCluster", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteDBClusterMessage"}, + "output":{ + "shape":"DeleteDBClusterResult", + "resultWrapper":"DeleteDBClusterResult" + }, + "errors":[ + {"shape":"DBClusterNotFoundFault"}, + {"shape":"InvalidDBClusterStateFault"}, + {"shape":"DBClusterSnapshotAlreadyExistsFault"}, + {"shape":"SnapshotQuotaExceededFault"}, + {"shape":"InvalidDBClusterSnapshotStateFault"} + ] + }, + "DeleteDBClusterParameterGroup":{ + "name":"DeleteDBClusterParameterGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteDBClusterParameterGroupMessage"}, + "errors":[ + {"shape":"InvalidDBParameterGroupStateFault"}, + {"shape":"DBParameterGroupNotFoundFault"} + ] + }, + "DeleteDBClusterSnapshot":{ + "name":"DeleteDBClusterSnapshot", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteDBClusterSnapshotMessage"}, + "output":{ + "shape":"DeleteDBClusterSnapshotResult", + "resultWrapper":"DeleteDBClusterSnapshotResult" + }, + "errors":[ + {"shape":"InvalidDBClusterSnapshotStateFault"}, + {"shape":"DBClusterSnapshotNotFoundFault"} + ] + }, + "DeleteDBInstance":{ + "name":"DeleteDBInstance", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteDBInstanceMessage"}, + "output":{ + "shape":"DeleteDBInstanceResult", + "resultWrapper":"DeleteDBInstanceResult" + }, + "errors":[ + {"shape":"DBInstanceNotFoundFault"}, + {"shape":"InvalidDBInstanceStateFault"}, + {"shape":"DBSnapshotAlreadyExistsFault"}, + {"shape":"SnapshotQuotaExceededFault"}, + {"shape":"InvalidDBClusterStateFault"} + ] + }, + "DeleteDBParameterGroup":{ + "name":"DeleteDBParameterGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteDBParameterGroupMessage"}, + "errors":[ + {"shape":"InvalidDBParameterGroupStateFault"}, + {"shape":"DBParameterGroupNotFoundFault"} + ] + }, + "DeleteDBSecurityGroup":{ + "name":"DeleteDBSecurityGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteDBSecurityGroupMessage"}, + "errors":[ + {"shape":"InvalidDBSecurityGroupStateFault"}, + {"shape":"DBSecurityGroupNotFoundFault"} + ] + }, + "DeleteDBSnapshot":{ + "name":"DeleteDBSnapshot", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteDBSnapshotMessage"}, + "output":{ + "shape":"DeleteDBSnapshotResult", + "resultWrapper":"DeleteDBSnapshotResult" + }, + "errors":[ + {"shape":"InvalidDBSnapshotStateFault"}, + {"shape":"DBSnapshotNotFoundFault"} + ] + }, + "DeleteDBSubnetGroup":{ + "name":"DeleteDBSubnetGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteDBSubnetGroupMessage"}, + "errors":[ + {"shape":"InvalidDBSubnetGroupStateFault"}, + {"shape":"InvalidDBSubnetStateFault"}, + {"shape":"DBSubnetGroupNotFoundFault"} + ] + }, + "DeleteEventSubscription":{ + "name":"DeleteEventSubscription", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteEventSubscriptionMessage"}, + "output":{ + "shape":"DeleteEventSubscriptionResult", + "resultWrapper":"DeleteEventSubscriptionResult" + }, + "errors":[ + {"shape":"SubscriptionNotFoundFault"}, + {"shape":"InvalidEventSubscriptionStateFault"} + ] + }, + "DeleteOptionGroup":{ + "name":"DeleteOptionGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteOptionGroupMessage"}, + "errors":[ + {"shape":"OptionGroupNotFoundFault"}, + {"shape":"InvalidOptionGroupStateFault"} + ] + }, + "DescribeAccountAttributes":{ + "name":"DescribeAccountAttributes", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeAccountAttributesMessage"}, + "output":{ + "shape":"AccountAttributesMessage", + "resultWrapper":"DescribeAccountAttributesResult" + } + }, + "DescribeCertificates":{ + "name":"DescribeCertificates", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeCertificatesMessage"}, + "output":{ + "shape":"CertificateMessage", + "resultWrapper":"DescribeCertificatesResult" + }, + "errors":[ + {"shape":"CertificateNotFoundFault"} + ] + }, + "DescribeDBClusterParameterGroups":{ + "name":"DescribeDBClusterParameterGroups", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeDBClusterParameterGroupsMessage"}, + "output":{ + "shape":"DBClusterParameterGroupsMessage", + "resultWrapper":"DescribeDBClusterParameterGroupsResult" + }, + "errors":[ + {"shape":"DBParameterGroupNotFoundFault"} + ] + }, + "DescribeDBClusterParameters":{ + "name":"DescribeDBClusterParameters", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeDBClusterParametersMessage"}, + "output":{ + "shape":"DBClusterParameterGroupDetails", + "resultWrapper":"DescribeDBClusterParametersResult" + }, + "errors":[ + {"shape":"DBParameterGroupNotFoundFault"} + ] + }, + "DescribeDBClusterSnapshotAttributes":{ + "name":"DescribeDBClusterSnapshotAttributes", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeDBClusterSnapshotAttributesMessage"}, + "output":{ + "shape":"DescribeDBClusterSnapshotAttributesResult", + "resultWrapper":"DescribeDBClusterSnapshotAttributesResult" + }, + "errors":[ + {"shape":"DBClusterSnapshotNotFoundFault"} + ] + }, + "DescribeDBClusterSnapshots":{ + "name":"DescribeDBClusterSnapshots", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeDBClusterSnapshotsMessage"}, + "output":{ + "shape":"DBClusterSnapshotMessage", + "resultWrapper":"DescribeDBClusterSnapshotsResult" + }, + "errors":[ + {"shape":"DBClusterSnapshotNotFoundFault"} + ] + }, + "DescribeDBClusters":{ + "name":"DescribeDBClusters", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeDBClustersMessage"}, + "output":{ + "shape":"DBClusterMessage", + "resultWrapper":"DescribeDBClustersResult" + }, + "errors":[ + {"shape":"DBClusterNotFoundFault"} + ] + }, + "DescribeDBEngineVersions":{ + "name":"DescribeDBEngineVersions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeDBEngineVersionsMessage"}, + "output":{ + "shape":"DBEngineVersionMessage", + "resultWrapper":"DescribeDBEngineVersionsResult" + } + }, + "DescribeDBInstances":{ + "name":"DescribeDBInstances", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeDBInstancesMessage"}, + "output":{ + "shape":"DBInstanceMessage", + "resultWrapper":"DescribeDBInstancesResult" + }, + "errors":[ + {"shape":"DBInstanceNotFoundFault"} + ] + }, + "DescribeDBLogFiles":{ + "name":"DescribeDBLogFiles", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeDBLogFilesMessage"}, + "output":{ + "shape":"DescribeDBLogFilesResponse", + "resultWrapper":"DescribeDBLogFilesResult" + }, + "errors":[ + {"shape":"DBInstanceNotFoundFault"} + ] + }, + "DescribeDBParameterGroups":{ + "name":"DescribeDBParameterGroups", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeDBParameterGroupsMessage"}, + "output":{ + "shape":"DBParameterGroupsMessage", + "resultWrapper":"DescribeDBParameterGroupsResult" + }, + "errors":[ + {"shape":"DBParameterGroupNotFoundFault"} + ] + }, + "DescribeDBParameters":{ + "name":"DescribeDBParameters", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeDBParametersMessage"}, + "output":{ + "shape":"DBParameterGroupDetails", + "resultWrapper":"DescribeDBParametersResult" + }, + "errors":[ + {"shape":"DBParameterGroupNotFoundFault"} + ] + }, + "DescribeDBSecurityGroups":{ + "name":"DescribeDBSecurityGroups", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeDBSecurityGroupsMessage"}, + "output":{ + "shape":"DBSecurityGroupMessage", + "resultWrapper":"DescribeDBSecurityGroupsResult" + }, + "errors":[ + {"shape":"DBSecurityGroupNotFoundFault"} + ] + }, + "DescribeDBSnapshotAttributes":{ + "name":"DescribeDBSnapshotAttributes", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeDBSnapshotAttributesMessage"}, + "output":{ + "shape":"DescribeDBSnapshotAttributesResult", + "resultWrapper":"DescribeDBSnapshotAttributesResult" + }, + "errors":[ + {"shape":"DBSnapshotNotFoundFault"} + ] + }, + "DescribeDBSnapshots":{ + "name":"DescribeDBSnapshots", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeDBSnapshotsMessage"}, + "output":{ + "shape":"DBSnapshotMessage", + "resultWrapper":"DescribeDBSnapshotsResult" + }, + "errors":[ + {"shape":"DBSnapshotNotFoundFault"} + ] + }, + "DescribeDBSubnetGroups":{ + "name":"DescribeDBSubnetGroups", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeDBSubnetGroupsMessage"}, + "output":{ + "shape":"DBSubnetGroupMessage", + "resultWrapper":"DescribeDBSubnetGroupsResult" + }, + "errors":[ + {"shape":"DBSubnetGroupNotFoundFault"} + ] + }, + "DescribeEngineDefaultClusterParameters":{ + "name":"DescribeEngineDefaultClusterParameters", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeEngineDefaultClusterParametersMessage"}, + "output":{ + "shape":"DescribeEngineDefaultClusterParametersResult", + "resultWrapper":"DescribeEngineDefaultClusterParametersResult" + } + }, + "DescribeEngineDefaultParameters":{ + "name":"DescribeEngineDefaultParameters", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeEngineDefaultParametersMessage"}, + "output":{ + "shape":"DescribeEngineDefaultParametersResult", + "resultWrapper":"DescribeEngineDefaultParametersResult" + } + }, + "DescribeEventCategories":{ + "name":"DescribeEventCategories", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeEventCategoriesMessage"}, + "output":{ + "shape":"EventCategoriesMessage", + "resultWrapper":"DescribeEventCategoriesResult" + } + }, + "DescribeEventSubscriptions":{ + "name":"DescribeEventSubscriptions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeEventSubscriptionsMessage"}, + "output":{ + "shape":"EventSubscriptionsMessage", + "resultWrapper":"DescribeEventSubscriptionsResult" + }, + "errors":[ + {"shape":"SubscriptionNotFoundFault"} + ] + }, + "DescribeEvents":{ + "name":"DescribeEvents", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeEventsMessage"}, + "output":{ + "shape":"EventsMessage", + "resultWrapper":"DescribeEventsResult" + } + }, + "DescribeOptionGroupOptions":{ + "name":"DescribeOptionGroupOptions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeOptionGroupOptionsMessage"}, + "output":{ + "shape":"OptionGroupOptionsMessage", + "resultWrapper":"DescribeOptionGroupOptionsResult" + } + }, + "DescribeOptionGroups":{ + "name":"DescribeOptionGroups", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeOptionGroupsMessage"}, + "output":{ + "shape":"OptionGroups", + "resultWrapper":"DescribeOptionGroupsResult" + }, + "errors":[ + {"shape":"OptionGroupNotFoundFault"} + ] + }, + "DescribeOrderableDBInstanceOptions":{ + "name":"DescribeOrderableDBInstanceOptions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeOrderableDBInstanceOptionsMessage"}, + "output":{ + "shape":"OrderableDBInstanceOptionsMessage", + "resultWrapper":"DescribeOrderableDBInstanceOptionsResult" + } + }, + "DescribePendingMaintenanceActions":{ + "name":"DescribePendingMaintenanceActions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribePendingMaintenanceActionsMessage"}, + "output":{ + "shape":"PendingMaintenanceActionsMessage", + "resultWrapper":"DescribePendingMaintenanceActionsResult" + }, + "errors":[ + {"shape":"ResourceNotFoundFault"} + ] + }, + "DescribeReservedDBInstances":{ + "name":"DescribeReservedDBInstances", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeReservedDBInstancesMessage"}, + "output":{ + "shape":"ReservedDBInstanceMessage", + "resultWrapper":"DescribeReservedDBInstancesResult" + }, + "errors":[ + {"shape":"ReservedDBInstanceNotFoundFault"} + ] + }, + "DescribeReservedDBInstancesOfferings":{ + "name":"DescribeReservedDBInstancesOfferings", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeReservedDBInstancesOfferingsMessage"}, + "output":{ + "shape":"ReservedDBInstancesOfferingMessage", + "resultWrapper":"DescribeReservedDBInstancesOfferingsResult" + }, + "errors":[ + {"shape":"ReservedDBInstancesOfferingNotFoundFault"} + ] + }, + "DownloadDBLogFilePortion":{ + "name":"DownloadDBLogFilePortion", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DownloadDBLogFilePortionMessage"}, + "output":{ + "shape":"DownloadDBLogFilePortionDetails", + "resultWrapper":"DownloadDBLogFilePortionResult" + }, + "errors":[ + {"shape":"DBInstanceNotFoundFault"}, + {"shape":"DBLogFileNotFoundFault"} + ] + }, + "FailoverDBCluster":{ + "name":"FailoverDBCluster", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"FailoverDBClusterMessage"}, + "output":{ + "shape":"FailoverDBClusterResult", + "resultWrapper":"FailoverDBClusterResult" + }, + "errors":[ + {"shape":"DBClusterNotFoundFault"}, + {"shape":"InvalidDBClusterStateFault"} + ] + }, + "ListTagsForResource":{ + "name":"ListTagsForResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListTagsForResourceMessage"}, + "output":{ + "shape":"TagListMessage", + "resultWrapper":"ListTagsForResourceResult" + }, + "errors":[ + {"shape":"DBInstanceNotFoundFault"}, + {"shape":"DBSnapshotNotFoundFault"} + ] + }, + "ModifyDBCluster":{ + "name":"ModifyDBCluster", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyDBClusterMessage"}, + "output":{ + "shape":"ModifyDBClusterResult", + "resultWrapper":"ModifyDBClusterResult" + }, + "errors":[ + {"shape":"DBClusterNotFoundFault"}, + {"shape":"InvalidDBClusterStateFault"}, + {"shape":"StorageQuotaExceededFault"}, + {"shape":"DBSubnetGroupNotFoundFault"}, + {"shape":"InvalidVPCNetworkStateFault"}, + {"shape":"InvalidDBSubnetGroupStateFault"}, + {"shape":"InvalidSubnet"}, + {"shape":"DBClusterParameterGroupNotFoundFault"}, + {"shape":"InvalidDBSecurityGroupStateFault"}, + {"shape":"InvalidDBInstanceStateFault"}, + {"shape":"DBClusterAlreadyExistsFault"} + ] + }, + "ModifyDBClusterParameterGroup":{ + "name":"ModifyDBClusterParameterGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyDBClusterParameterGroupMessage"}, + "output":{ + "shape":"DBClusterParameterGroupNameMessage", + "resultWrapper":"ModifyDBClusterParameterGroupResult" + }, + "errors":[ + {"shape":"DBParameterGroupNotFoundFault"}, + {"shape":"InvalidDBParameterGroupStateFault"} + ] + }, + "ModifyDBClusterSnapshotAttribute":{ + "name":"ModifyDBClusterSnapshotAttribute", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyDBClusterSnapshotAttributeMessage"}, + "output":{ + "shape":"ModifyDBClusterSnapshotAttributeResult", + "resultWrapper":"ModifyDBClusterSnapshotAttributeResult" + }, + "errors":[ + {"shape":"DBClusterSnapshotNotFoundFault"}, + {"shape":"InvalidDBClusterSnapshotStateFault"}, + {"shape":"SharedSnapshotQuotaExceededFault"} + ] + }, + "ModifyDBInstance":{ + "name":"ModifyDBInstance", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyDBInstanceMessage"}, + "output":{ + "shape":"ModifyDBInstanceResult", + "resultWrapper":"ModifyDBInstanceResult" + }, + "errors":[ + {"shape":"InvalidDBInstanceStateFault"}, + {"shape":"InvalidDBSecurityGroupStateFault"}, + {"shape":"DBInstanceAlreadyExistsFault"}, + {"shape":"DBInstanceNotFoundFault"}, + {"shape":"DBSecurityGroupNotFoundFault"}, + {"shape":"DBParameterGroupNotFoundFault"}, + {"shape":"InsufficientDBInstanceCapacityFault"}, + {"shape":"StorageQuotaExceededFault"}, + {"shape":"InvalidVPCNetworkStateFault"}, + {"shape":"ProvisionedIopsNotAvailableInAZFault"}, + {"shape":"OptionGroupNotFoundFault"}, + {"shape":"DBUpgradeDependencyFailureFault"}, + {"shape":"StorageTypeNotSupportedFault"}, + {"shape":"AuthorizationNotFoundFault"}, + {"shape":"CertificateNotFoundFault"}, + {"shape":"DomainNotFoundFault"} + ] + }, + "ModifyDBParameterGroup":{ + "name":"ModifyDBParameterGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyDBParameterGroupMessage"}, + "output":{ + "shape":"DBParameterGroupNameMessage", + "resultWrapper":"ModifyDBParameterGroupResult" + }, + "errors":[ + {"shape":"DBParameterGroupNotFoundFault"}, + {"shape":"InvalidDBParameterGroupStateFault"} + ] + }, + "ModifyDBSnapshotAttribute":{ + "name":"ModifyDBSnapshotAttribute", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyDBSnapshotAttributeMessage"}, + "output":{ + "shape":"ModifyDBSnapshotAttributeResult", + "resultWrapper":"ModifyDBSnapshotAttributeResult" + }, + "errors":[ + {"shape":"DBSnapshotNotFoundFault"}, + {"shape":"InvalidDBSnapshotStateFault"}, + {"shape":"SharedSnapshotQuotaExceededFault"} + ] + }, + "ModifyDBSubnetGroup":{ + "name":"ModifyDBSubnetGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyDBSubnetGroupMessage"}, + "output":{ + "shape":"ModifyDBSubnetGroupResult", + "resultWrapper":"ModifyDBSubnetGroupResult" + }, + "errors":[ + {"shape":"DBSubnetGroupNotFoundFault"}, + {"shape":"DBSubnetQuotaExceededFault"}, + {"shape":"SubnetAlreadyInUse"}, + {"shape":"DBSubnetGroupDoesNotCoverEnoughAZs"}, + {"shape":"InvalidSubnet"} + ] + }, + "ModifyEventSubscription":{ + "name":"ModifyEventSubscription", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyEventSubscriptionMessage"}, + "output":{ + "shape":"ModifyEventSubscriptionResult", + "resultWrapper":"ModifyEventSubscriptionResult" + }, + "errors":[ + {"shape":"EventSubscriptionQuotaExceededFault"}, + {"shape":"SubscriptionNotFoundFault"}, + {"shape":"SNSInvalidTopicFault"}, + {"shape":"SNSNoAuthorizationFault"}, + {"shape":"SNSTopicArnNotFoundFault"}, + {"shape":"SubscriptionCategoryNotFoundFault"} + ] + }, + "ModifyOptionGroup":{ + "name":"ModifyOptionGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyOptionGroupMessage"}, + "output":{ + "shape":"ModifyOptionGroupResult", + "resultWrapper":"ModifyOptionGroupResult" + }, + "errors":[ + {"shape":"InvalidOptionGroupStateFault"}, + {"shape":"OptionGroupNotFoundFault"} + ] + }, + "PromoteReadReplica":{ + "name":"PromoteReadReplica", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PromoteReadReplicaMessage"}, + "output":{ + "shape":"PromoteReadReplicaResult", + "resultWrapper":"PromoteReadReplicaResult" + }, + "errors":[ + {"shape":"InvalidDBInstanceStateFault"}, + {"shape":"DBInstanceNotFoundFault"} + ] + }, + "PromoteReadReplicaDBCluster":{ + "name":"PromoteReadReplicaDBCluster", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PromoteReadReplicaDBClusterMessage"}, + "output":{ + "shape":"PromoteReadReplicaDBClusterResult", + "resultWrapper":"PromoteReadReplicaDBClusterResult" + }, + "errors":[ + {"shape":"DBClusterNotFoundFault"}, + {"shape":"InvalidDBClusterStateFault"} + ] + }, + "PurchaseReservedDBInstancesOffering":{ + "name":"PurchaseReservedDBInstancesOffering", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PurchaseReservedDBInstancesOfferingMessage"}, + "output":{ + "shape":"PurchaseReservedDBInstancesOfferingResult", + "resultWrapper":"PurchaseReservedDBInstancesOfferingResult" + }, + "errors":[ + {"shape":"ReservedDBInstancesOfferingNotFoundFault"}, + {"shape":"ReservedDBInstanceAlreadyExistsFault"}, + {"shape":"ReservedDBInstanceQuotaExceededFault"} + ] + }, + "RebootDBInstance":{ + "name":"RebootDBInstance", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RebootDBInstanceMessage"}, + "output":{ + "shape":"RebootDBInstanceResult", + "resultWrapper":"RebootDBInstanceResult" + }, + "errors":[ + {"shape":"InvalidDBInstanceStateFault"}, + {"shape":"DBInstanceNotFoundFault"} + ] + }, + "RemoveSourceIdentifierFromSubscription":{ + "name":"RemoveSourceIdentifierFromSubscription", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RemoveSourceIdentifierFromSubscriptionMessage"}, + "output":{ + "shape":"RemoveSourceIdentifierFromSubscriptionResult", + "resultWrapper":"RemoveSourceIdentifierFromSubscriptionResult" + }, + "errors":[ + {"shape":"SubscriptionNotFoundFault"}, + {"shape":"SourceNotFoundFault"} + ] + }, + "RemoveTagsFromResource":{ + "name":"RemoveTagsFromResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RemoveTagsFromResourceMessage"}, + "errors":[ + {"shape":"DBInstanceNotFoundFault"}, + {"shape":"DBSnapshotNotFoundFault"} + ] + }, + "ResetDBClusterParameterGroup":{ + "name":"ResetDBClusterParameterGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ResetDBClusterParameterGroupMessage"}, + "output":{ + "shape":"DBClusterParameterGroupNameMessage", + "resultWrapper":"ResetDBClusterParameterGroupResult" + }, + "errors":[ + {"shape":"InvalidDBParameterGroupStateFault"}, + {"shape":"DBParameterGroupNotFoundFault"} + ] + }, + "ResetDBParameterGroup":{ + "name":"ResetDBParameterGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ResetDBParameterGroupMessage"}, + "output":{ + "shape":"DBParameterGroupNameMessage", + "resultWrapper":"ResetDBParameterGroupResult" + }, + "errors":[ + {"shape":"InvalidDBParameterGroupStateFault"}, + {"shape":"DBParameterGroupNotFoundFault"} + ] + }, + "RestoreDBClusterFromSnapshot":{ + "name":"RestoreDBClusterFromSnapshot", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RestoreDBClusterFromSnapshotMessage"}, + "output":{ + "shape":"RestoreDBClusterFromSnapshotResult", + "resultWrapper":"RestoreDBClusterFromSnapshotResult" + }, + "errors":[ + {"shape":"DBClusterAlreadyExistsFault"}, + {"shape":"DBClusterQuotaExceededFault"}, + {"shape":"StorageQuotaExceededFault"}, + {"shape":"DBSubnetGroupNotFoundFault"}, + {"shape":"DBSnapshotNotFoundFault"}, + {"shape":"DBClusterSnapshotNotFoundFault"}, + {"shape":"InsufficientDBClusterCapacityFault"}, + {"shape":"InsufficientStorageClusterCapacityFault"}, + {"shape":"InvalidDBSnapshotStateFault"}, + {"shape":"InvalidDBClusterSnapshotStateFault"}, + {"shape":"StorageQuotaExceededFault"}, + {"shape":"InvalidVPCNetworkStateFault"}, + {"shape":"InvalidRestoreFault"}, + {"shape":"DBSubnetGroupNotFoundFault"}, + {"shape":"InvalidSubnet"}, + {"shape":"OptionGroupNotFoundFault"}, + {"shape":"KMSKeyNotAccessibleFault"} + ] + }, + "RestoreDBClusterToPointInTime":{ + "name":"RestoreDBClusterToPointInTime", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RestoreDBClusterToPointInTimeMessage"}, + "output":{ + "shape":"RestoreDBClusterToPointInTimeResult", + "resultWrapper":"RestoreDBClusterToPointInTimeResult" + }, + "errors":[ + {"shape":"DBClusterAlreadyExistsFault"}, + {"shape":"DBClusterQuotaExceededFault"}, + {"shape":"StorageQuotaExceededFault"}, + {"shape":"DBSubnetGroupNotFoundFault"}, + {"shape":"DBClusterNotFoundFault"}, + {"shape":"DBClusterSnapshotNotFoundFault"}, + {"shape":"InsufficientDBClusterCapacityFault"}, + {"shape":"InvalidDBSnapshotStateFault"}, + {"shape":"InvalidDBClusterSnapshotStateFault"}, + {"shape":"StorageQuotaExceededFault"}, + {"shape":"InvalidVPCNetworkStateFault"}, + {"shape":"InvalidRestoreFault"}, + {"shape":"DBSubnetGroupNotFoundFault"}, + {"shape":"InvalidSubnet"}, + {"shape":"OptionGroupNotFoundFault"}, + {"shape":"KMSKeyNotAccessibleFault"} + ] + }, + "RestoreDBInstanceFromDBSnapshot":{ + "name":"RestoreDBInstanceFromDBSnapshot", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RestoreDBInstanceFromDBSnapshotMessage"}, + "output":{ + "shape":"RestoreDBInstanceFromDBSnapshotResult", + "resultWrapper":"RestoreDBInstanceFromDBSnapshotResult" + }, + "errors":[ + {"shape":"DBInstanceAlreadyExistsFault"}, + {"shape":"DBSnapshotNotFoundFault"}, + {"shape":"InstanceQuotaExceededFault"}, + {"shape":"InsufficientDBInstanceCapacityFault"}, + {"shape":"InvalidDBSnapshotStateFault"}, + {"shape":"StorageQuotaExceededFault"}, + {"shape":"InvalidVPCNetworkStateFault"}, + {"shape":"InvalidRestoreFault"}, + {"shape":"DBSubnetGroupNotFoundFault"}, + {"shape":"DBSubnetGroupDoesNotCoverEnoughAZs"}, + {"shape":"InvalidSubnet"}, + {"shape":"ProvisionedIopsNotAvailableInAZFault"}, + {"shape":"OptionGroupNotFoundFault"}, + {"shape":"StorageTypeNotSupportedFault"}, + {"shape":"AuthorizationNotFoundFault"}, + {"shape":"KMSKeyNotAccessibleFault"}, + {"shape":"DBSecurityGroupNotFoundFault"}, + {"shape":"DomainNotFoundFault"} + ] + }, + "RestoreDBInstanceToPointInTime":{ + "name":"RestoreDBInstanceToPointInTime", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RestoreDBInstanceToPointInTimeMessage"}, + "output":{ + "shape":"RestoreDBInstanceToPointInTimeResult", + "resultWrapper":"RestoreDBInstanceToPointInTimeResult" + }, + "errors":[ + {"shape":"DBInstanceAlreadyExistsFault"}, + {"shape":"DBInstanceNotFoundFault"}, + {"shape":"InstanceQuotaExceededFault"}, + {"shape":"InsufficientDBInstanceCapacityFault"}, + {"shape":"InvalidDBInstanceStateFault"}, + {"shape":"PointInTimeRestoreNotEnabledFault"}, + {"shape":"StorageQuotaExceededFault"}, + {"shape":"InvalidVPCNetworkStateFault"}, + {"shape":"InvalidRestoreFault"}, + {"shape":"DBSubnetGroupNotFoundFault"}, + {"shape":"DBSubnetGroupDoesNotCoverEnoughAZs"}, + {"shape":"InvalidSubnet"}, + {"shape":"ProvisionedIopsNotAvailableInAZFault"}, + {"shape":"OptionGroupNotFoundFault"}, + {"shape":"StorageTypeNotSupportedFault"}, + {"shape":"AuthorizationNotFoundFault"}, + {"shape":"KMSKeyNotAccessibleFault"}, + {"shape":"DBSecurityGroupNotFoundFault"}, + {"shape":"DomainNotFoundFault"} + ] + }, + "RevokeDBSecurityGroupIngress":{ + "name":"RevokeDBSecurityGroupIngress", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RevokeDBSecurityGroupIngressMessage"}, + "output":{ + "shape":"RevokeDBSecurityGroupIngressResult", + "resultWrapper":"RevokeDBSecurityGroupIngressResult" + }, + "errors":[ + {"shape":"DBSecurityGroupNotFoundFault"}, + {"shape":"AuthorizationNotFoundFault"}, + {"shape":"InvalidDBSecurityGroupStateFault"} + ] + } + }, + "shapes":{ + "AccountAttributesMessage":{ + "type":"structure", + "members":{ + "AccountQuotas":{"shape":"AccountQuotaList"} + } + }, + "AccountQuota":{ + "type":"structure", + "members":{ + "AccountQuotaName":{"shape":"String"}, + "Used":{"shape":"Long"}, + "Max":{"shape":"Long"} + }, + "wrapper":true + }, + "AccountQuotaList":{ + "type":"list", + "member":{ + "shape":"AccountQuota", + "locationName":"AccountQuota" + } + }, + "AddSourceIdentifierToSubscriptionMessage":{ + "type":"structure", + "required":[ + "SubscriptionName", + "SourceIdentifier" + ], + "members":{ + "SubscriptionName":{"shape":"String"}, + "SourceIdentifier":{"shape":"String"} + } + }, + "AddSourceIdentifierToSubscriptionResult":{ + "type":"structure", + "members":{ + "EventSubscription":{"shape":"EventSubscription"} + } + }, + "AddTagsToResourceMessage":{ + "type":"structure", + "required":[ + "ResourceName", + "Tags" + ], + "members":{ + "ResourceName":{"shape":"String"}, + "Tags":{"shape":"TagList"} + } + }, + "ApplyMethod":{ + "type":"string", + "enum":[ + "immediate", + "pending-reboot" + ] + }, + "ApplyPendingMaintenanceActionMessage":{ + "type":"structure", + "required":[ + "ResourceIdentifier", + "ApplyAction", + "OptInType" + ], + "members":{ + "ResourceIdentifier":{"shape":"String"}, + "ApplyAction":{"shape":"String"}, + "OptInType":{"shape":"String"} + } + }, + "ApplyPendingMaintenanceActionResult":{ + "type":"structure", + "members":{ + "ResourcePendingMaintenanceActions":{"shape":"ResourcePendingMaintenanceActions"} + } + }, + "AttributeValueList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"AttributeValue" + } + }, + "AuthorizationAlreadyExistsFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"AuthorizationAlreadyExists", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "AuthorizationNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"AuthorizationNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "AuthorizationQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"AuthorizationQuotaExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "AuthorizeDBSecurityGroupIngressMessage":{ + "type":"structure", + "required":["DBSecurityGroupName"], + "members":{ + "DBSecurityGroupName":{"shape":"String"}, + "CIDRIP":{"shape":"String"}, + "EC2SecurityGroupName":{"shape":"String"}, + "EC2SecurityGroupId":{"shape":"String"}, + "EC2SecurityGroupOwnerId":{"shape":"String"} + } + }, + "AuthorizeDBSecurityGroupIngressResult":{ + "type":"structure", + "members":{ + "DBSecurityGroup":{"shape":"DBSecurityGroup"} + } + }, + "AvailabilityZone":{ + "type":"structure", + "members":{ + "Name":{"shape":"String"} + }, + "wrapper":true + }, + "AvailabilityZoneList":{ + "type":"list", + "member":{ + "shape":"AvailabilityZone", + "locationName":"AvailabilityZone" + } + }, + "AvailabilityZones":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"AvailabilityZone" + } + }, + "Boolean":{"type":"boolean"}, + "BooleanOptional":{"type":"boolean"}, + "Certificate":{ + "type":"structure", + "members":{ + "CertificateIdentifier":{"shape":"String"}, + "CertificateType":{"shape":"String"}, + "Thumbprint":{"shape":"String"}, + "ValidFrom":{"shape":"TStamp"}, + "ValidTill":{"shape":"TStamp"} + }, + "wrapper":true + }, + "CertificateList":{ + "type":"list", + "member":{ + "shape":"Certificate", + "locationName":"Certificate" + } + }, + "CertificateMessage":{ + "type":"structure", + "members":{ + "Certificates":{"shape":"CertificateList"}, + "Marker":{"shape":"String"} + } + }, + "CertificateNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"CertificateNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "CharacterSet":{ + "type":"structure", + "members":{ + "CharacterSetName":{"shape":"String"}, + "CharacterSetDescription":{"shape":"String"} + } + }, + "CopyDBClusterSnapshotMessage":{ + "type":"structure", + "required":[ + "SourceDBClusterSnapshotIdentifier", + "TargetDBClusterSnapshotIdentifier" + ], + "members":{ + "SourceDBClusterSnapshotIdentifier":{"shape":"String"}, + "TargetDBClusterSnapshotIdentifier":{"shape":"String"}, + "Tags":{"shape":"TagList"} + } + }, + "CopyDBClusterSnapshotResult":{ + "type":"structure", + "members":{ + "DBClusterSnapshot":{"shape":"DBClusterSnapshot"} + } + }, + "CopyDBParameterGroupMessage":{ + "type":"structure", + "required":[ + "SourceDBParameterGroupIdentifier", + "TargetDBParameterGroupIdentifier", + "TargetDBParameterGroupDescription" + ], + "members":{ + "SourceDBParameterGroupIdentifier":{"shape":"String"}, + "TargetDBParameterGroupIdentifier":{"shape":"String"}, + "TargetDBParameterGroupDescription":{"shape":"String"}, + "Tags":{"shape":"TagList"} + } + }, + "CopyDBParameterGroupResult":{ + "type":"structure", + "members":{ + "DBParameterGroup":{"shape":"DBParameterGroup"} + } + }, + "CopyDBSnapshotMessage":{ + "type":"structure", + "required":[ + "SourceDBSnapshotIdentifier", + "TargetDBSnapshotIdentifier" + ], + "members":{ + "SourceDBSnapshotIdentifier":{"shape":"String"}, + "TargetDBSnapshotIdentifier":{"shape":"String"}, + "KmsKeyId":{"shape":"String"}, + "Tags":{"shape":"TagList"}, + "CopyTags":{"shape":"BooleanOptional"} + } + }, + "CopyDBSnapshotResult":{ + "type":"structure", + "members":{ + "DBSnapshot":{"shape":"DBSnapshot"} + } + }, + "CopyOptionGroupMessage":{ + "type":"structure", + "required":[ + "SourceOptionGroupIdentifier", + "TargetOptionGroupIdentifier", + "TargetOptionGroupDescription" + ], + "members":{ + "SourceOptionGroupIdentifier":{"shape":"String"}, + "TargetOptionGroupIdentifier":{"shape":"String"}, + "TargetOptionGroupDescription":{"shape":"String"}, + "Tags":{"shape":"TagList"} + } + }, + "CopyOptionGroupResult":{ + "type":"structure", + "members":{ + "OptionGroup":{"shape":"OptionGroup"} + } + }, + "CreateDBClusterMessage":{ + "type":"structure", + "required":[ + "DBClusterIdentifier", + "Engine", + "MasterUsername", + "MasterUserPassword" + ], + "members":{ + "AvailabilityZones":{"shape":"AvailabilityZones"}, + "BackupRetentionPeriod":{"shape":"IntegerOptional"}, + "CharacterSetName":{"shape":"String"}, + "DatabaseName":{"shape":"String"}, + "DBClusterIdentifier":{"shape":"String"}, + "DBClusterParameterGroupName":{"shape":"String"}, + "VpcSecurityGroupIds":{"shape":"VpcSecurityGroupIdList"}, + "DBSubnetGroupName":{"shape":"String"}, + "Engine":{"shape":"String"}, + "EngineVersion":{"shape":"String"}, + "Port":{"shape":"IntegerOptional"}, + "MasterUsername":{"shape":"String"}, + "MasterUserPassword":{"shape":"String"}, + "OptionGroupName":{"shape":"String"}, + "PreferredBackupWindow":{"shape":"String"}, + "PreferredMaintenanceWindow":{"shape":"String"}, + "ReplicationSourceIdentifier":{"shape":"String"}, + "Tags":{"shape":"TagList"}, + "StorageEncrypted":{"shape":"BooleanOptional"}, + "KmsKeyId":{"shape":"String"} + } + }, + "CreateDBClusterParameterGroupMessage":{ + "type":"structure", + "required":[ + "DBClusterParameterGroupName", + "DBParameterGroupFamily", + "Description" + ], + "members":{ + "DBClusterParameterGroupName":{"shape":"String"}, + "DBParameterGroupFamily":{"shape":"String"}, + "Description":{"shape":"String"}, + "Tags":{"shape":"TagList"} + } + }, + "CreateDBClusterParameterGroupResult":{ + "type":"structure", + "members":{ + "DBClusterParameterGroup":{"shape":"DBClusterParameterGroup"} + } + }, + "CreateDBClusterResult":{ + "type":"structure", + "members":{ + "DBCluster":{"shape":"DBCluster"} + } + }, + "CreateDBClusterSnapshotMessage":{ + "type":"structure", + "required":[ + "DBClusterSnapshotIdentifier", + "DBClusterIdentifier" + ], + "members":{ + "DBClusterSnapshotIdentifier":{"shape":"String"}, + "DBClusterIdentifier":{"shape":"String"}, + "Tags":{"shape":"TagList"} + } + }, + "CreateDBClusterSnapshotResult":{ + "type":"structure", + "members":{ + "DBClusterSnapshot":{"shape":"DBClusterSnapshot"} + } + }, + "CreateDBInstanceMessage":{ + "type":"structure", + "required":[ + "DBInstanceIdentifier", + "DBInstanceClass", + "Engine" + ], + "members":{ + "DBName":{"shape":"String"}, + "DBInstanceIdentifier":{"shape":"String"}, + "AllocatedStorage":{"shape":"IntegerOptional"}, + "DBInstanceClass":{"shape":"String"}, + "Engine":{"shape":"String"}, + "MasterUsername":{"shape":"String"}, + "MasterUserPassword":{"shape":"String"}, + "DBSecurityGroups":{"shape":"DBSecurityGroupNameList"}, + "VpcSecurityGroupIds":{"shape":"VpcSecurityGroupIdList"}, + "AvailabilityZone":{"shape":"String"}, + "DBSubnetGroupName":{"shape":"String"}, + "PreferredMaintenanceWindow":{"shape":"String"}, + "DBParameterGroupName":{"shape":"String"}, + "BackupRetentionPeriod":{"shape":"IntegerOptional"}, + "PreferredBackupWindow":{"shape":"String"}, + "Port":{"shape":"IntegerOptional"}, + "MultiAZ":{"shape":"BooleanOptional"}, + "EngineVersion":{"shape":"String"}, + "AutoMinorVersionUpgrade":{"shape":"BooleanOptional"}, + "LicenseModel":{"shape":"String"}, + "Iops":{"shape":"IntegerOptional"}, + "OptionGroupName":{"shape":"String"}, + "CharacterSetName":{"shape":"String"}, + "PubliclyAccessible":{"shape":"BooleanOptional"}, + "Tags":{"shape":"TagList"}, + "DBClusterIdentifier":{"shape":"String"}, + "StorageType":{"shape":"String"}, + "TdeCredentialArn":{"shape":"String"}, + "TdeCredentialPassword":{"shape":"String"}, + "StorageEncrypted":{"shape":"BooleanOptional"}, + "KmsKeyId":{"shape":"String"}, + "Domain":{"shape":"String"}, + "CopyTagsToSnapshot":{"shape":"BooleanOptional"}, + "MonitoringInterval":{"shape":"IntegerOptional"}, + "MonitoringRoleArn":{"shape":"String"}, + "DomainIAMRoleName":{"shape":"String"}, + "PromotionTier":{"shape":"IntegerOptional"} + } + }, + "CreateDBInstanceReadReplicaMessage":{ + "type":"structure", + "required":[ + "DBInstanceIdentifier", + "SourceDBInstanceIdentifier" + ], + "members":{ + "DBInstanceIdentifier":{"shape":"String"}, + "SourceDBInstanceIdentifier":{"shape":"String"}, + "DBInstanceClass":{"shape":"String"}, + "AvailabilityZone":{"shape":"String"}, + "Port":{"shape":"IntegerOptional"}, + "AutoMinorVersionUpgrade":{"shape":"BooleanOptional"}, + "Iops":{"shape":"IntegerOptional"}, + "OptionGroupName":{"shape":"String"}, + "PubliclyAccessible":{"shape":"BooleanOptional"}, + "Tags":{"shape":"TagList"}, + "DBSubnetGroupName":{"shape":"String"}, + "StorageType":{"shape":"String"}, + "CopyTagsToSnapshot":{"shape":"BooleanOptional"}, + "MonitoringInterval":{"shape":"IntegerOptional"}, + "MonitoringRoleArn":{"shape":"String"} + } + }, + "CreateDBInstanceReadReplicaResult":{ + "type":"structure", + "members":{ + "DBInstance":{"shape":"DBInstance"} + } + }, + "CreateDBInstanceResult":{ + "type":"structure", + "members":{ + "DBInstance":{"shape":"DBInstance"} + } + }, + "CreateDBParameterGroupMessage":{ + "type":"structure", + "required":[ + "DBParameterGroupName", + "DBParameterGroupFamily", + "Description" + ], + "members":{ + "DBParameterGroupName":{"shape":"String"}, + "DBParameterGroupFamily":{"shape":"String"}, + "Description":{"shape":"String"}, + "Tags":{"shape":"TagList"} + } + }, + "CreateDBParameterGroupResult":{ + "type":"structure", + "members":{ + "DBParameterGroup":{"shape":"DBParameterGroup"} + } + }, + "CreateDBSecurityGroupMessage":{ + "type":"structure", + "required":[ + "DBSecurityGroupName", + "DBSecurityGroupDescription" + ], + "members":{ + "DBSecurityGroupName":{"shape":"String"}, + "DBSecurityGroupDescription":{"shape":"String"}, + "Tags":{"shape":"TagList"} + } + }, + "CreateDBSecurityGroupResult":{ + "type":"structure", + "members":{ + "DBSecurityGroup":{"shape":"DBSecurityGroup"} + } + }, + "CreateDBSnapshotMessage":{ + "type":"structure", + "required":[ + "DBSnapshotIdentifier", + "DBInstanceIdentifier" + ], + "members":{ + "DBSnapshotIdentifier":{"shape":"String"}, + "DBInstanceIdentifier":{"shape":"String"}, + "Tags":{"shape":"TagList"} + } + }, + "CreateDBSnapshotResult":{ + "type":"structure", + "members":{ + "DBSnapshot":{"shape":"DBSnapshot"} + } + }, + "CreateDBSubnetGroupMessage":{ + "type":"structure", + "required":[ + "DBSubnetGroupName", + "DBSubnetGroupDescription", + "SubnetIds" + ], + "members":{ + "DBSubnetGroupName":{"shape":"String"}, + "DBSubnetGroupDescription":{"shape":"String"}, + "SubnetIds":{"shape":"SubnetIdentifierList"}, + "Tags":{"shape":"TagList"} + } + }, + "CreateDBSubnetGroupResult":{ + "type":"structure", + "members":{ + "DBSubnetGroup":{"shape":"DBSubnetGroup"} + } + }, + "CreateEventSubscriptionMessage":{ + "type":"structure", + "required":[ + "SubscriptionName", + "SnsTopicArn" + ], + "members":{ + "SubscriptionName":{"shape":"String"}, + "SnsTopicArn":{"shape":"String"}, + "SourceType":{"shape":"String"}, + "EventCategories":{"shape":"EventCategoriesList"}, + "SourceIds":{"shape":"SourceIdsList"}, + "Enabled":{"shape":"BooleanOptional"}, + "Tags":{"shape":"TagList"} + } + }, + "CreateEventSubscriptionResult":{ + "type":"structure", + "members":{ + "EventSubscription":{"shape":"EventSubscription"} + } + }, + "CreateOptionGroupMessage":{ + "type":"structure", + "required":[ + "OptionGroupName", + "EngineName", + "MajorEngineVersion", + "OptionGroupDescription" + ], + "members":{ + "OptionGroupName":{"shape":"String"}, + "EngineName":{"shape":"String"}, + "MajorEngineVersion":{"shape":"String"}, + "OptionGroupDescription":{"shape":"String"}, + "Tags":{"shape":"TagList"} + } + }, + "CreateOptionGroupResult":{ + "type":"structure", + "members":{ + "OptionGroup":{"shape":"OptionGroup"} + } + }, + "DBCluster":{ + "type":"structure", + "members":{ + "AllocatedStorage":{"shape":"IntegerOptional"}, + "AvailabilityZones":{"shape":"AvailabilityZones"}, + "BackupRetentionPeriod":{"shape":"IntegerOptional"}, + "CharacterSetName":{"shape":"String"}, + "DatabaseName":{"shape":"String"}, + "DBClusterIdentifier":{"shape":"String"}, + "DBClusterParameterGroup":{"shape":"String"}, + "DBSubnetGroup":{"shape":"String"}, + "Status":{"shape":"String"}, + "PercentProgress":{"shape":"String"}, + "EarliestRestorableTime":{"shape":"TStamp"}, + "Endpoint":{"shape":"String"}, + "Engine":{"shape":"String"}, + "EngineVersion":{"shape":"String"}, + "LatestRestorableTime":{"shape":"TStamp"}, + "Port":{"shape":"IntegerOptional"}, + "MasterUsername":{"shape":"String"}, + "DBClusterOptionGroupMemberships":{"shape":"DBClusterOptionGroupMemberships"}, + "PreferredBackupWindow":{"shape":"String"}, + "PreferredMaintenanceWindow":{"shape":"String"}, + "ReplicationSourceIdentifier":{"shape":"String"}, + "ReadReplicaIdentifiers":{"shape":"ReadReplicaIdentifierList"}, + "DBClusterMembers":{"shape":"DBClusterMemberList"}, + "VpcSecurityGroups":{"shape":"VpcSecurityGroupMembershipList"}, + "HostedZoneId":{"shape":"String"}, + "StorageEncrypted":{"shape":"Boolean"}, + "KmsKeyId":{"shape":"String"}, + "DbClusterResourceId":{"shape":"String"} + }, + "wrapper":true + }, + "DBClusterAlreadyExistsFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBClusterAlreadyExistsFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "DBClusterList":{ + "type":"list", + "member":{ + "shape":"DBCluster", + "locationName":"DBCluster" + } + }, + "DBClusterMember":{ + "type":"structure", + "members":{ + "DBInstanceIdentifier":{"shape":"String"}, + "IsClusterWriter":{"shape":"Boolean"}, + "DBClusterParameterGroupStatus":{"shape":"String"}, + "PromotionTier":{"shape":"IntegerOptional"} + }, + "wrapper":true + }, + "DBClusterMemberList":{ + "type":"list", + "member":{ + "shape":"DBClusterMember", + "locationName":"DBClusterMember" + } + }, + "DBClusterMessage":{ + "type":"structure", + "members":{ + "Marker":{"shape":"String"}, + "DBClusters":{"shape":"DBClusterList"} + } + }, + "DBClusterNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBClusterNotFoundFault", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "DBClusterOptionGroupMemberships":{ + "type":"list", + "member":{ + "shape":"DBClusterOptionGroupStatus", + "locationName":"DBClusterOptionGroup" + } + }, + "DBClusterOptionGroupStatus":{ + "type":"structure", + "members":{ + "DBClusterOptionGroupName":{"shape":"String"}, + "Status":{"shape":"String"} + } + }, + "DBClusterParameterGroup":{ + "type":"structure", + "members":{ + "DBClusterParameterGroupName":{"shape":"String"}, + "DBParameterGroupFamily":{"shape":"String"}, + "Description":{"shape":"String"} + }, + "wrapper":true + }, + "DBClusterParameterGroupDetails":{ + "type":"structure", + "members":{ + "Parameters":{"shape":"ParametersList"}, + "Marker":{"shape":"String"} + } + }, + "DBClusterParameterGroupList":{ + "type":"list", + "member":{ + "shape":"DBClusterParameterGroup", + "locationName":"DBClusterParameterGroup" + } + }, + "DBClusterParameterGroupNameMessage":{ + "type":"structure", + "members":{ + "DBClusterParameterGroupName":{"shape":"String"} + } + }, + "DBClusterParameterGroupNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBClusterParameterGroupNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "DBClusterParameterGroupsMessage":{ + "type":"structure", + "members":{ + "Marker":{"shape":"String"}, + "DBClusterParameterGroups":{"shape":"DBClusterParameterGroupList"} + } + }, + "DBClusterQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBClusterQuotaExceededFault", + "httpStatusCode":403, + "senderFault":true + }, + "exception":true + }, + "DBClusterSnapshot":{ + "type":"structure", + "members":{ + "AvailabilityZones":{"shape":"AvailabilityZones"}, + "DBClusterSnapshotIdentifier":{"shape":"String"}, + "DBClusterIdentifier":{"shape":"String"}, + "SnapshotCreateTime":{"shape":"TStamp"}, + "Engine":{"shape":"String"}, + "AllocatedStorage":{"shape":"Integer"}, + "Status":{"shape":"String"}, + "Port":{"shape":"Integer"}, + "VpcId":{"shape":"String"}, + "ClusterCreateTime":{"shape":"TStamp"}, + "MasterUsername":{"shape":"String"}, + "EngineVersion":{"shape":"String"}, + "LicenseModel":{"shape":"String"}, + "SnapshotType":{"shape":"String"}, + "PercentProgress":{"shape":"Integer"}, + "StorageEncrypted":{"shape":"Boolean"}, + "KmsKeyId":{"shape":"String"} + }, + "wrapper":true + }, + "DBClusterSnapshotAlreadyExistsFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBClusterSnapshotAlreadyExistsFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "DBClusterSnapshotAttribute":{ + "type":"structure", + "members":{ + "AttributeName":{"shape":"String"}, + "AttributeValues":{"shape":"AttributeValueList"} + } + }, + "DBClusterSnapshotAttributeList":{ + "type":"list", + "member":{ + "shape":"DBClusterSnapshotAttribute", + "locationName":"DBClusterSnapshotAttribute" + } + }, + "DBClusterSnapshotAttributesResult":{ + "type":"structure", + "members":{ + "DBClusterSnapshotIdentifier":{"shape":"String"}, + "DBClusterSnapshotAttributes":{"shape":"DBClusterSnapshotAttributeList"} + }, + "wrapper":true + }, + "DBClusterSnapshotList":{ + "type":"list", + "member":{ + "shape":"DBClusterSnapshot", + "locationName":"DBClusterSnapshot" + } + }, + "DBClusterSnapshotMessage":{ + "type":"structure", + "members":{ + "Marker":{"shape":"String"}, + "DBClusterSnapshots":{"shape":"DBClusterSnapshotList"} + } + }, + "DBClusterSnapshotNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBClusterSnapshotNotFoundFault", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "DBEngineVersion":{ + "type":"structure", + "members":{ + "Engine":{"shape":"String"}, + "EngineVersion":{"shape":"String"}, + "DBParameterGroupFamily":{"shape":"String"}, + "DBEngineDescription":{"shape":"String"}, + "DBEngineVersionDescription":{"shape":"String"}, + "DefaultCharacterSet":{"shape":"CharacterSet"}, + "SupportedCharacterSets":{"shape":"SupportedCharacterSetsList"}, + "ValidUpgradeTarget":{"shape":"ValidUpgradeTargetList"} + } + }, + "DBEngineVersionList":{ + "type":"list", + "member":{ + "shape":"DBEngineVersion", + "locationName":"DBEngineVersion" + } + }, + "DBEngineVersionMessage":{ + "type":"structure", + "members":{ + "Marker":{"shape":"String"}, + "DBEngineVersions":{"shape":"DBEngineVersionList"} + } + }, + "DBInstance":{ + "type":"structure", + "members":{ + "DBInstanceIdentifier":{"shape":"String"}, + "DBInstanceClass":{"shape":"String"}, + "Engine":{"shape":"String"}, + "DBInstanceStatus":{"shape":"String"}, + "MasterUsername":{"shape":"String"}, + "DBName":{"shape":"String"}, + "Endpoint":{"shape":"Endpoint"}, + "AllocatedStorage":{"shape":"Integer"}, + "InstanceCreateTime":{"shape":"TStamp"}, + "PreferredBackupWindow":{"shape":"String"}, + "BackupRetentionPeriod":{"shape":"Integer"}, + "DBSecurityGroups":{"shape":"DBSecurityGroupMembershipList"}, + "VpcSecurityGroups":{"shape":"VpcSecurityGroupMembershipList"}, + "DBParameterGroups":{"shape":"DBParameterGroupStatusList"}, + "AvailabilityZone":{"shape":"String"}, + "DBSubnetGroup":{"shape":"DBSubnetGroup"}, + "PreferredMaintenanceWindow":{"shape":"String"}, + "PendingModifiedValues":{"shape":"PendingModifiedValues"}, + "LatestRestorableTime":{"shape":"TStamp"}, + "MultiAZ":{"shape":"Boolean"}, + "EngineVersion":{"shape":"String"}, + "AutoMinorVersionUpgrade":{"shape":"Boolean"}, + "ReadReplicaSourceDBInstanceIdentifier":{"shape":"String"}, + "ReadReplicaDBInstanceIdentifiers":{"shape":"ReadReplicaDBInstanceIdentifierList"}, + "LicenseModel":{"shape":"String"}, + "Iops":{"shape":"IntegerOptional"}, + "OptionGroupMemberships":{"shape":"OptionGroupMembershipList"}, + "CharacterSetName":{"shape":"String"}, + "SecondaryAvailabilityZone":{"shape":"String"}, + "PubliclyAccessible":{"shape":"Boolean"}, + "StatusInfos":{"shape":"DBInstanceStatusInfoList"}, + "StorageType":{"shape":"String"}, + "TdeCredentialArn":{"shape":"String"}, + "DbInstancePort":{"shape":"Integer"}, + "DBClusterIdentifier":{"shape":"String"}, + "StorageEncrypted":{"shape":"Boolean"}, + "KmsKeyId":{"shape":"String"}, + "DbiResourceId":{"shape":"String"}, + "CACertificateIdentifier":{"shape":"String"}, + "DomainMemberships":{"shape":"DomainMembershipList"}, + "CopyTagsToSnapshot":{"shape":"Boolean"}, + "MonitoringInterval":{"shape":"IntegerOptional"}, + "EnhancedMonitoringResourceArn":{"shape":"String"}, + "MonitoringRoleArn":{"shape":"String"}, + "PromotionTier":{"shape":"IntegerOptional"} + }, + "wrapper":true + }, + "DBInstanceAlreadyExistsFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBInstanceAlreadyExists", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "DBInstanceList":{ + "type":"list", + "member":{ + "shape":"DBInstance", + "locationName":"DBInstance" + } + }, + "DBInstanceMessage":{ + "type":"structure", + "members":{ + "Marker":{"shape":"String"}, + "DBInstances":{"shape":"DBInstanceList"} + } + }, + "DBInstanceNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBInstanceNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "DBInstanceStatusInfo":{ + "type":"structure", + "members":{ + "StatusType":{"shape":"String"}, + "Normal":{"shape":"Boolean"}, + "Status":{"shape":"String"}, + "Message":{"shape":"String"} + } + }, + "DBInstanceStatusInfoList":{ + "type":"list", + "member":{ + "shape":"DBInstanceStatusInfo", + "locationName":"DBInstanceStatusInfo" + } + }, + "DBLogFileNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBLogFileNotFoundFault", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "DBParameterGroup":{ + "type":"structure", + "members":{ + "DBParameterGroupName":{"shape":"String"}, + "DBParameterGroupFamily":{"shape":"String"}, + "Description":{"shape":"String"} + }, + "wrapper":true + }, + "DBParameterGroupAlreadyExistsFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBParameterGroupAlreadyExists", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "DBParameterGroupDetails":{ + "type":"structure", + "members":{ + "Parameters":{"shape":"ParametersList"}, + "Marker":{"shape":"String"} + } + }, + "DBParameterGroupList":{ + "type":"list", + "member":{ + "shape":"DBParameterGroup", + "locationName":"DBParameterGroup" + } + }, + "DBParameterGroupNameMessage":{ + "type":"structure", + "members":{ + "DBParameterGroupName":{"shape":"String"} + } + }, + "DBParameterGroupNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBParameterGroupNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "DBParameterGroupQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBParameterGroupQuotaExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "DBParameterGroupStatus":{ + "type":"structure", + "members":{ + "DBParameterGroupName":{"shape":"String"}, + "ParameterApplyStatus":{"shape":"String"} + } + }, + "DBParameterGroupStatusList":{ + "type":"list", + "member":{ + "shape":"DBParameterGroupStatus", + "locationName":"DBParameterGroup" + } + }, + "DBParameterGroupsMessage":{ + "type":"structure", + "members":{ + "Marker":{"shape":"String"}, + "DBParameterGroups":{"shape":"DBParameterGroupList"} + } + }, + "DBSecurityGroup":{ + "type":"structure", + "members":{ + "OwnerId":{"shape":"String"}, + "DBSecurityGroupName":{"shape":"String"}, + "DBSecurityGroupDescription":{"shape":"String"}, + "VpcId":{"shape":"String"}, + "EC2SecurityGroups":{"shape":"EC2SecurityGroupList"}, + "IPRanges":{"shape":"IPRangeList"} + }, + "wrapper":true + }, + "DBSecurityGroupAlreadyExistsFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBSecurityGroupAlreadyExists", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "DBSecurityGroupMembership":{ + "type":"structure", + "members":{ + "DBSecurityGroupName":{"shape":"String"}, + "Status":{"shape":"String"} + } + }, + "DBSecurityGroupMembershipList":{ + "type":"list", + "member":{ + "shape":"DBSecurityGroupMembership", + "locationName":"DBSecurityGroup" + } + }, + "DBSecurityGroupMessage":{ + "type":"structure", + "members":{ + "Marker":{"shape":"String"}, + "DBSecurityGroups":{"shape":"DBSecurityGroups"} + } + }, + "DBSecurityGroupNameList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"DBSecurityGroupName" + } + }, + "DBSecurityGroupNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBSecurityGroupNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "DBSecurityGroupNotSupportedFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBSecurityGroupNotSupported", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "DBSecurityGroupQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"QuotaExceeded.DBSecurityGroup", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "DBSecurityGroups":{ + "type":"list", + "member":{ + "shape":"DBSecurityGroup", + "locationName":"DBSecurityGroup" + } + }, + "DBSnapshot":{ + "type":"structure", + "members":{ + "DBSnapshotIdentifier":{"shape":"String"}, + "DBInstanceIdentifier":{"shape":"String"}, + "SnapshotCreateTime":{"shape":"TStamp"}, + "Engine":{"shape":"String"}, + "AllocatedStorage":{"shape":"Integer"}, + "Status":{"shape":"String"}, + "Port":{"shape":"Integer"}, + "AvailabilityZone":{"shape":"String"}, + "VpcId":{"shape":"String"}, + "InstanceCreateTime":{"shape":"TStamp"}, + "MasterUsername":{"shape":"String"}, + "EngineVersion":{"shape":"String"}, + "LicenseModel":{"shape":"String"}, + "SnapshotType":{"shape":"String"}, + "Iops":{"shape":"IntegerOptional"}, + "OptionGroupName":{"shape":"String"}, + "PercentProgress":{"shape":"Integer"}, + "SourceRegion":{"shape":"String"}, + "SourceDBSnapshotIdentifier":{"shape":"String"}, + "StorageType":{"shape":"String"}, + "TdeCredentialArn":{"shape":"String"}, + "Encrypted":{"shape":"Boolean"}, + "KmsKeyId":{"shape":"String"} + }, + "wrapper":true + }, + "DBSnapshotAlreadyExistsFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBSnapshotAlreadyExists", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "DBSnapshotAttribute":{ + "type":"structure", + "members":{ + "AttributeName":{"shape":"String"}, + "AttributeValues":{"shape":"AttributeValueList"} + }, + "wrapper":true + }, + "DBSnapshotAttributeList":{ + "type":"list", + "member":{ + "shape":"DBSnapshotAttribute", + "locationName":"DBSnapshotAttribute" + } + }, + "DBSnapshotAttributesResult":{ + "type":"structure", + "members":{ + "DBSnapshotIdentifier":{"shape":"String"}, + "DBSnapshotAttributes":{"shape":"DBSnapshotAttributeList"} + }, + "wrapper":true + }, + "DBSnapshotList":{ + "type":"list", + "member":{ + "shape":"DBSnapshot", + "locationName":"DBSnapshot" + } + }, + "DBSnapshotMessage":{ + "type":"structure", + "members":{ + "Marker":{"shape":"String"}, + "DBSnapshots":{"shape":"DBSnapshotList"} + } + }, + "DBSnapshotNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBSnapshotNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "DBSubnetGroup":{ + "type":"structure", + "members":{ + "DBSubnetGroupName":{"shape":"String"}, + "DBSubnetGroupDescription":{"shape":"String"}, + "VpcId":{"shape":"String"}, + "SubnetGroupStatus":{"shape":"String"}, + "Subnets":{"shape":"SubnetList"} + }, + "wrapper":true + }, + "DBSubnetGroupAlreadyExistsFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBSubnetGroupAlreadyExists", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "DBSubnetGroupDoesNotCoverEnoughAZs":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBSubnetGroupDoesNotCoverEnoughAZs", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "DBSubnetGroupMessage":{ + "type":"structure", + "members":{ + "Marker":{"shape":"String"}, + "DBSubnetGroups":{"shape":"DBSubnetGroups"} + } + }, + "DBSubnetGroupNotAllowedFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBSubnetGroupNotAllowedFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "DBSubnetGroupNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBSubnetGroupNotFoundFault", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "DBSubnetGroupQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBSubnetGroupQuotaExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "DBSubnetGroups":{ + "type":"list", + "member":{ + "shape":"DBSubnetGroup", + "locationName":"DBSubnetGroup" + } + }, + "DBSubnetQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBSubnetQuotaExceededFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "DBUpgradeDependencyFailureFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBUpgradeDependencyFailure", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "DeleteDBClusterMessage":{ + "type":"structure", + "required":["DBClusterIdentifier"], + "members":{ + "DBClusterIdentifier":{"shape":"String"}, + "SkipFinalSnapshot":{"shape":"Boolean"}, + "FinalDBSnapshotIdentifier":{"shape":"String"} + } + }, + "DeleteDBClusterParameterGroupMessage":{ + "type":"structure", + "required":["DBClusterParameterGroupName"], + "members":{ + "DBClusterParameterGroupName":{"shape":"String"} + } + }, + "DeleteDBClusterResult":{ + "type":"structure", + "members":{ + "DBCluster":{"shape":"DBCluster"} + } + }, + "DeleteDBClusterSnapshotMessage":{ + "type":"structure", + "required":["DBClusterSnapshotIdentifier"], + "members":{ + "DBClusterSnapshotIdentifier":{"shape":"String"} + } + }, + "DeleteDBClusterSnapshotResult":{ + "type":"structure", + "members":{ + "DBClusterSnapshot":{"shape":"DBClusterSnapshot"} + } + }, + "DeleteDBInstanceMessage":{ + "type":"structure", + "required":["DBInstanceIdentifier"], + "members":{ + "DBInstanceIdentifier":{"shape":"String"}, + "SkipFinalSnapshot":{"shape":"Boolean"}, + "FinalDBSnapshotIdentifier":{"shape":"String"} + } + }, + "DeleteDBInstanceResult":{ + "type":"structure", + "members":{ + "DBInstance":{"shape":"DBInstance"} + } + }, + "DeleteDBParameterGroupMessage":{ + "type":"structure", + "required":["DBParameterGroupName"], + "members":{ + "DBParameterGroupName":{"shape":"String"} + } + }, + "DeleteDBSecurityGroupMessage":{ + "type":"structure", + "required":["DBSecurityGroupName"], + "members":{ + "DBSecurityGroupName":{"shape":"String"} + } + }, + "DeleteDBSnapshotMessage":{ + "type":"structure", + "required":["DBSnapshotIdentifier"], + "members":{ + "DBSnapshotIdentifier":{"shape":"String"} + } + }, + "DeleteDBSnapshotResult":{ + "type":"structure", + "members":{ + "DBSnapshot":{"shape":"DBSnapshot"} + } + }, + "DeleteDBSubnetGroupMessage":{ + "type":"structure", + "required":["DBSubnetGroupName"], + "members":{ + "DBSubnetGroupName":{"shape":"String"} + } + }, + "DeleteEventSubscriptionMessage":{ + "type":"structure", + "required":["SubscriptionName"], + "members":{ + "SubscriptionName":{"shape":"String"} + } + }, + "DeleteEventSubscriptionResult":{ + "type":"structure", + "members":{ + "EventSubscription":{"shape":"EventSubscription"} + } + }, + "DeleteOptionGroupMessage":{ + "type":"structure", + "required":["OptionGroupName"], + "members":{ + "OptionGroupName":{"shape":"String"} + } + }, + "DescribeAccountAttributesMessage":{ + "type":"structure", + "members":{ + } + }, + "DescribeCertificatesMessage":{ + "type":"structure", + "members":{ + "CertificateIdentifier":{"shape":"String"}, + "Filters":{"shape":"FilterList"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeDBClusterParameterGroupsMessage":{ + "type":"structure", + "members":{ + "DBClusterParameterGroupName":{"shape":"String"}, + "Filters":{"shape":"FilterList"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeDBClusterParametersMessage":{ + "type":"structure", + "required":["DBClusterParameterGroupName"], + "members":{ + "DBClusterParameterGroupName":{"shape":"String"}, + "Source":{"shape":"String"}, + "Filters":{"shape":"FilterList"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeDBClusterSnapshotAttributesMessage":{ + "type":"structure", + "required":["DBClusterSnapshotIdentifier"], + "members":{ + "DBClusterSnapshotIdentifier":{"shape":"String"} + } + }, + "DescribeDBClusterSnapshotAttributesResult":{ + "type":"structure", + "members":{ + "DBClusterSnapshotAttributesResult":{"shape":"DBClusterSnapshotAttributesResult"} + } + }, + "DescribeDBClusterSnapshotsMessage":{ + "type":"structure", + "members":{ + "DBClusterIdentifier":{"shape":"String"}, + "DBClusterSnapshotIdentifier":{"shape":"String"}, + "SnapshotType":{"shape":"String"}, + "Filters":{"shape":"FilterList"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"}, + "IncludeShared":{"shape":"Boolean"}, + "IncludePublic":{"shape":"Boolean"} + } + }, + "DescribeDBClustersMessage":{ + "type":"structure", + "members":{ + "DBClusterIdentifier":{"shape":"String"}, + "Filters":{"shape":"FilterList"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeDBEngineVersionsMessage":{ + "type":"structure", + "members":{ + "Engine":{"shape":"String"}, + "EngineVersion":{"shape":"String"}, + "DBParameterGroupFamily":{"shape":"String"}, + "Filters":{"shape":"FilterList"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"}, + "DefaultOnly":{"shape":"Boolean"}, + "ListSupportedCharacterSets":{"shape":"BooleanOptional"} + } + }, + "DescribeDBInstancesMessage":{ + "type":"structure", + "members":{ + "DBInstanceIdentifier":{"shape":"String"}, + "Filters":{"shape":"FilterList"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeDBLogFilesDetails":{ + "type":"structure", + "members":{ + "LogFileName":{"shape":"String"}, + "LastWritten":{"shape":"Long"}, + "Size":{"shape":"Long"} + } + }, + "DescribeDBLogFilesList":{ + "type":"list", + "member":{ + "shape":"DescribeDBLogFilesDetails", + "locationName":"DescribeDBLogFilesDetails" + } + }, + "DescribeDBLogFilesMessage":{ + "type":"structure", + "required":["DBInstanceIdentifier"], + "members":{ + "DBInstanceIdentifier":{"shape":"String"}, + "FilenameContains":{"shape":"String"}, + "FileLastWritten":{"shape":"Long"}, + "FileSize":{"shape":"Long"}, + "Filters":{"shape":"FilterList"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeDBLogFilesResponse":{ + "type":"structure", + "members":{ + "DescribeDBLogFiles":{"shape":"DescribeDBLogFilesList"}, + "Marker":{"shape":"String"} + } + }, + "DescribeDBParameterGroupsMessage":{ + "type":"structure", + "members":{ + "DBParameterGroupName":{"shape":"String"}, + "Filters":{"shape":"FilterList"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeDBParametersMessage":{ + "type":"structure", + "required":["DBParameterGroupName"], + "members":{ + "DBParameterGroupName":{"shape":"String"}, + "Source":{"shape":"String"}, + "Filters":{"shape":"FilterList"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeDBSecurityGroupsMessage":{ + "type":"structure", + "members":{ + "DBSecurityGroupName":{"shape":"String"}, + "Filters":{"shape":"FilterList"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeDBSnapshotAttributesMessage":{ + "type":"structure", + "required":["DBSnapshotIdentifier"], + "members":{ + "DBSnapshotIdentifier":{"shape":"String"} + } + }, + "DescribeDBSnapshotAttributesResult":{ + "type":"structure", + "members":{ + "DBSnapshotAttributesResult":{"shape":"DBSnapshotAttributesResult"} + } + }, + "DescribeDBSnapshotsMessage":{ + "type":"structure", + "members":{ + "DBInstanceIdentifier":{"shape":"String"}, + "DBSnapshotIdentifier":{"shape":"String"}, + "SnapshotType":{"shape":"String"}, + "Filters":{"shape":"FilterList"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"}, + "IncludeShared":{"shape":"Boolean"}, + "IncludePublic":{"shape":"Boolean"} + } + }, + "DescribeDBSubnetGroupsMessage":{ + "type":"structure", + "members":{ + "DBSubnetGroupName":{"shape":"String"}, + "Filters":{"shape":"FilterList"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeEngineDefaultClusterParametersMessage":{ + "type":"structure", + "required":["DBParameterGroupFamily"], + "members":{ + "DBParameterGroupFamily":{"shape":"String"}, + "Filters":{"shape":"FilterList"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeEngineDefaultClusterParametersResult":{ + "type":"structure", + "members":{ + "EngineDefaults":{"shape":"EngineDefaults"} + } + }, + "DescribeEngineDefaultParametersMessage":{ + "type":"structure", + "required":["DBParameterGroupFamily"], + "members":{ + "DBParameterGroupFamily":{"shape":"String"}, + "Filters":{"shape":"FilterList"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeEngineDefaultParametersResult":{ + "type":"structure", + "members":{ + "EngineDefaults":{"shape":"EngineDefaults"} + } + }, + "DescribeEventCategoriesMessage":{ + "type":"structure", + "members":{ + "SourceType":{"shape":"String"}, + "Filters":{"shape":"FilterList"} + } + }, + "DescribeEventSubscriptionsMessage":{ + "type":"structure", + "members":{ + "SubscriptionName":{"shape":"String"}, + "Filters":{"shape":"FilterList"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeEventsMessage":{ + "type":"structure", + "members":{ + "SourceIdentifier":{"shape":"String"}, + "SourceType":{"shape":"SourceType"}, + "StartTime":{"shape":"TStamp"}, + "EndTime":{"shape":"TStamp"}, + "Duration":{"shape":"IntegerOptional"}, + "EventCategories":{"shape":"EventCategoriesList"}, + "Filters":{"shape":"FilterList"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeOptionGroupOptionsMessage":{ + "type":"structure", + "required":["EngineName"], + "members":{ + "EngineName":{"shape":"String"}, + "MajorEngineVersion":{"shape":"String"}, + "Filters":{"shape":"FilterList"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeOptionGroupsMessage":{ + "type":"structure", + "members":{ + "OptionGroupName":{"shape":"String"}, + "Filters":{"shape":"FilterList"}, + "Marker":{"shape":"String"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "EngineName":{"shape":"String"}, + "MajorEngineVersion":{"shape":"String"} + } + }, + "DescribeOrderableDBInstanceOptionsMessage":{ + "type":"structure", + "required":["Engine"], + "members":{ + "Engine":{"shape":"String"}, + "EngineVersion":{"shape":"String"}, + "DBInstanceClass":{"shape":"String"}, + "LicenseModel":{"shape":"String"}, + "Vpc":{"shape":"BooleanOptional"}, + "Filters":{"shape":"FilterList"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribePendingMaintenanceActionsMessage":{ + "type":"structure", + "members":{ + "ResourceIdentifier":{"shape":"String"}, + "Filters":{"shape":"FilterList"}, + "Marker":{"shape":"String"}, + "MaxRecords":{"shape":"IntegerOptional"} + } + }, + "DescribeReservedDBInstancesMessage":{ + "type":"structure", + "members":{ + "ReservedDBInstanceId":{"shape":"String"}, + "ReservedDBInstancesOfferingId":{"shape":"String"}, + "DBInstanceClass":{"shape":"String"}, + "Duration":{"shape":"String"}, + "ProductDescription":{"shape":"String"}, + "OfferingType":{"shape":"String"}, + "MultiAZ":{"shape":"BooleanOptional"}, + "Filters":{"shape":"FilterList"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeReservedDBInstancesOfferingsMessage":{ + "type":"structure", + "members":{ + "ReservedDBInstancesOfferingId":{"shape":"String"}, + "DBInstanceClass":{"shape":"String"}, + "Duration":{"shape":"String"}, + "ProductDescription":{"shape":"String"}, + "OfferingType":{"shape":"String"}, + "MultiAZ":{"shape":"BooleanOptional"}, + "Filters":{"shape":"FilterList"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DomainMembership":{ + "type":"structure", + "members":{ + "Domain":{"shape":"String"}, + "Status":{"shape":"String"}, + "FQDN":{"shape":"String"}, + "IAMRoleName":{"shape":"String"} + } + }, + "DomainMembershipList":{ + "type":"list", + "member":{ + "shape":"DomainMembership", + "locationName":"DomainMembership" + } + }, + "DomainNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DomainNotFoundFault", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "Double":{"type":"double"}, + "DownloadDBLogFilePortionDetails":{ + "type":"structure", + "members":{ + "LogFileData":{"shape":"String"}, + "Marker":{"shape":"String"}, + "AdditionalDataPending":{"shape":"Boolean"} + } + }, + "DownloadDBLogFilePortionMessage":{ + "type":"structure", + "required":[ + "DBInstanceIdentifier", + "LogFileName" + ], + "members":{ + "DBInstanceIdentifier":{"shape":"String"}, + "LogFileName":{"shape":"String"}, + "Marker":{"shape":"String"}, + "NumberOfLines":{"shape":"Integer"} + } + }, + "EC2SecurityGroup":{ + "type":"structure", + "members":{ + "Status":{"shape":"String"}, + "EC2SecurityGroupName":{"shape":"String"}, + "EC2SecurityGroupId":{"shape":"String"}, + "EC2SecurityGroupOwnerId":{"shape":"String"} + } + }, + "EC2SecurityGroupList":{ + "type":"list", + "member":{ + "shape":"EC2SecurityGroup", + "locationName":"EC2SecurityGroup" + } + }, + "Endpoint":{ + "type":"structure", + "members":{ + "Address":{"shape":"String"}, + "Port":{"shape":"Integer"}, + "HostedZoneId":{"shape":"String"} + } + }, + "EngineDefaults":{ + "type":"structure", + "members":{ + "DBParameterGroupFamily":{"shape":"String"}, + "Marker":{"shape":"String"}, + "Parameters":{"shape":"ParametersList"} + }, + "wrapper":true + }, + "Event":{ + "type":"structure", + "members":{ + "SourceIdentifier":{"shape":"String"}, + "SourceType":{"shape":"SourceType"}, + "Message":{"shape":"String"}, + "EventCategories":{"shape":"EventCategoriesList"}, + "Date":{"shape":"TStamp"} + } + }, + "EventCategoriesList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"EventCategory" + } + }, + "EventCategoriesMap":{ + "type":"structure", + "members":{ + "SourceType":{"shape":"String"}, + "EventCategories":{"shape":"EventCategoriesList"} + }, + "wrapper":true + }, + "EventCategoriesMapList":{ + "type":"list", + "member":{ + "shape":"EventCategoriesMap", + "locationName":"EventCategoriesMap" + } + }, + "EventCategoriesMessage":{ + "type":"structure", + "members":{ + "EventCategoriesMapList":{"shape":"EventCategoriesMapList"} + } + }, + "EventList":{ + "type":"list", + "member":{ + "shape":"Event", + "locationName":"Event" + } + }, + "EventSubscription":{ + "type":"structure", + "members":{ + "CustomerAwsId":{"shape":"String"}, + "CustSubscriptionId":{"shape":"String"}, + "SnsTopicArn":{"shape":"String"}, + "Status":{"shape":"String"}, + "SubscriptionCreationTime":{"shape":"String"}, + "SourceType":{"shape":"String"}, + "SourceIdsList":{"shape":"SourceIdsList"}, + "EventCategoriesList":{"shape":"EventCategoriesList"}, + "Enabled":{"shape":"Boolean"} + }, + "wrapper":true + }, + "EventSubscriptionQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"EventSubscriptionQuotaExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "EventSubscriptionsList":{ + "type":"list", + "member":{ + "shape":"EventSubscription", + "locationName":"EventSubscription" + } + }, + "EventSubscriptionsMessage":{ + "type":"structure", + "members":{ + "Marker":{"shape":"String"}, + "EventSubscriptionsList":{"shape":"EventSubscriptionsList"} + } + }, + "EventsMessage":{ + "type":"structure", + "members":{ + "Marker":{"shape":"String"}, + "Events":{"shape":"EventList"} + } + }, + "FailoverDBClusterMessage":{ + "type":"structure", + "members":{ + "DBClusterIdentifier":{"shape":"String"} + } + }, + "FailoverDBClusterResult":{ + "type":"structure", + "members":{ + "DBCluster":{"shape":"DBCluster"} + } + }, + "Filter":{ + "type":"structure", + "required":[ + "Name", + "Values" + ], + "members":{ + "Name":{"shape":"String"}, + "Values":{"shape":"FilterValueList"} + } + }, + "FilterList":{ + "type":"list", + "member":{ + "shape":"Filter", + "locationName":"Filter" + } + }, + "FilterValueList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"Value" + } + }, + "IPRange":{ + "type":"structure", + "members":{ + "Status":{"shape":"String"}, + "CIDRIP":{"shape":"String"} + } + }, + "IPRangeList":{ + "type":"list", + "member":{ + "shape":"IPRange", + "locationName":"IPRange" + } + }, + "InstanceQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InstanceQuotaExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InsufficientDBClusterCapacityFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InsufficientDBClusterCapacityFault", + "httpStatusCode":403, + "senderFault":true + }, + "exception":true + }, + "InsufficientDBInstanceCapacityFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InsufficientDBInstanceCapacity", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InsufficientStorageClusterCapacityFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InsufficientStorageClusterCapacity", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "Integer":{"type":"integer"}, + "IntegerOptional":{"type":"integer"}, + "InvalidDBClusterSnapshotStateFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidDBClusterSnapshotStateFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidDBClusterStateFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidDBClusterStateFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidDBInstanceStateFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidDBInstanceState", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidDBParameterGroupStateFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidDBParameterGroupState", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidDBSecurityGroupStateFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidDBSecurityGroupState", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidDBSnapshotStateFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidDBSnapshotState", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidDBSubnetGroupFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidDBSubnetGroupFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidDBSubnetGroupStateFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidDBSubnetGroupStateFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidDBSubnetStateFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidDBSubnetStateFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidEventSubscriptionStateFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidEventSubscriptionState", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidOptionGroupStateFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidOptionGroupStateFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidRestoreFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidRestoreFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidSubnet":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidSubnet", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidVPCNetworkStateFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidVPCNetworkStateFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "KMSKeyNotAccessibleFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"KMSKeyNotAccessibleFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "KeyList":{ + "type":"list", + "member":{"shape":"String"} + }, + "ListTagsForResourceMessage":{ + "type":"structure", + "required":["ResourceName"], + "members":{ + "ResourceName":{"shape":"String"}, + "Filters":{"shape":"FilterList"} + } + }, + "Long":{"type":"long"}, + "ModifyDBClusterMessage":{ + "type":"structure", + "required":["DBClusterIdentifier"], + "members":{ + "DBClusterIdentifier":{"shape":"String"}, + "NewDBClusterIdentifier":{"shape":"String"}, + "ApplyImmediately":{"shape":"Boolean"}, + "BackupRetentionPeriod":{"shape":"IntegerOptional"}, + "DBClusterParameterGroupName":{"shape":"String"}, + "VpcSecurityGroupIds":{"shape":"VpcSecurityGroupIdList"}, + "Port":{"shape":"IntegerOptional"}, + "MasterUserPassword":{"shape":"String"}, + "OptionGroupName":{"shape":"String"}, + "PreferredBackupWindow":{"shape":"String"}, + "PreferredMaintenanceWindow":{"shape":"String"} + } + }, + "ModifyDBClusterParameterGroupMessage":{ + "type":"structure", + "required":[ + "DBClusterParameterGroupName", + "Parameters" + ], + "members":{ + "DBClusterParameterGroupName":{"shape":"String"}, + "Parameters":{"shape":"ParametersList"} + } + }, + "ModifyDBClusterResult":{ + "type":"structure", + "members":{ + "DBCluster":{"shape":"DBCluster"} + } + }, + "ModifyDBClusterSnapshotAttributeMessage":{ + "type":"structure", + "required":[ + "DBClusterSnapshotIdentifier", + "AttributeName" + ], + "members":{ + "DBClusterSnapshotIdentifier":{"shape":"String"}, + "AttributeName":{"shape":"String"}, + "ValuesToAdd":{"shape":"AttributeValueList"}, + "ValuesToRemove":{"shape":"AttributeValueList"} + } + }, + "ModifyDBClusterSnapshotAttributeResult":{ + "type":"structure", + "members":{ + "DBClusterSnapshotAttributesResult":{"shape":"DBClusterSnapshotAttributesResult"} + } + }, + "ModifyDBInstanceMessage":{ + "type":"structure", + "required":["DBInstanceIdentifier"], + "members":{ + "DBInstanceIdentifier":{"shape":"String"}, + "AllocatedStorage":{"shape":"IntegerOptional"}, + "DBInstanceClass":{"shape":"String"}, + "DBSecurityGroups":{"shape":"DBSecurityGroupNameList"}, + "VpcSecurityGroupIds":{"shape":"VpcSecurityGroupIdList"}, + "ApplyImmediately":{"shape":"Boolean"}, + "MasterUserPassword":{"shape":"String"}, + "DBParameterGroupName":{"shape":"String"}, + "BackupRetentionPeriod":{"shape":"IntegerOptional"}, + "PreferredBackupWindow":{"shape":"String"}, + "PreferredMaintenanceWindow":{"shape":"String"}, + "MultiAZ":{"shape":"BooleanOptional"}, + "EngineVersion":{"shape":"String"}, + "AllowMajorVersionUpgrade":{"shape":"Boolean"}, + "AutoMinorVersionUpgrade":{"shape":"BooleanOptional"}, + "Iops":{"shape":"IntegerOptional"}, + "OptionGroupName":{"shape":"String"}, + "NewDBInstanceIdentifier":{"shape":"String"}, + "StorageType":{"shape":"String"}, + "TdeCredentialArn":{"shape":"String"}, + "TdeCredentialPassword":{"shape":"String"}, + "CACertificateIdentifier":{"shape":"String"}, + "Domain":{"shape":"String"}, + "CopyTagsToSnapshot":{"shape":"BooleanOptional"}, + "MonitoringInterval":{"shape":"IntegerOptional"}, + "DBPortNumber":{"shape":"IntegerOptional"}, + "PubliclyAccessible":{"shape":"BooleanOptional"}, + "MonitoringRoleArn":{"shape":"String"}, + "DomainIAMRoleName":{"shape":"String"}, + "PromotionTier":{"shape":"IntegerOptional"} + } + }, + "ModifyDBInstanceResult":{ + "type":"structure", + "members":{ + "DBInstance":{"shape":"DBInstance"} + } + }, + "ModifyDBParameterGroupMessage":{ + "type":"structure", + "required":[ + "DBParameterGroupName", + "Parameters" + ], + "members":{ + "DBParameterGroupName":{"shape":"String"}, + "Parameters":{"shape":"ParametersList"} + } + }, + "ModifyDBSnapshotAttributeMessage":{ + "type":"structure", + "required":[ + "DBSnapshotIdentifier", + "AttributeName" + ], + "members":{ + "DBSnapshotIdentifier":{"shape":"String"}, + "AttributeName":{"shape":"String"}, + "ValuesToAdd":{"shape":"AttributeValueList"}, + "ValuesToRemove":{"shape":"AttributeValueList"} + } + }, + "ModifyDBSnapshotAttributeResult":{ + "type":"structure", + "members":{ + "DBSnapshotAttributesResult":{"shape":"DBSnapshotAttributesResult"} + } + }, + "ModifyDBSubnetGroupMessage":{ + "type":"structure", + "required":[ + "DBSubnetGroupName", + "SubnetIds" + ], + "members":{ + "DBSubnetGroupName":{"shape":"String"}, + "DBSubnetGroupDescription":{"shape":"String"}, + "SubnetIds":{"shape":"SubnetIdentifierList"} + } + }, + "ModifyDBSubnetGroupResult":{ + "type":"structure", + "members":{ + "DBSubnetGroup":{"shape":"DBSubnetGroup"} + } + }, + "ModifyEventSubscriptionMessage":{ + "type":"structure", + "required":["SubscriptionName"], + "members":{ + "SubscriptionName":{"shape":"String"}, + "SnsTopicArn":{"shape":"String"}, + "SourceType":{"shape":"String"}, + "EventCategories":{"shape":"EventCategoriesList"}, + "Enabled":{"shape":"BooleanOptional"} + } + }, + "ModifyEventSubscriptionResult":{ + "type":"structure", + "members":{ + "EventSubscription":{"shape":"EventSubscription"} + } + }, + "ModifyOptionGroupMessage":{ + "type":"structure", + "required":["OptionGroupName"], + "members":{ + "OptionGroupName":{"shape":"String"}, + "OptionsToInclude":{"shape":"OptionConfigurationList"}, + "OptionsToRemove":{"shape":"OptionNamesList"}, + "ApplyImmediately":{"shape":"Boolean"} + } + }, + "ModifyOptionGroupResult":{ + "type":"structure", + "members":{ + "OptionGroup":{"shape":"OptionGroup"} + } + }, + "Option":{ + "type":"structure", + "members":{ + "OptionName":{"shape":"String"}, + "OptionDescription":{"shape":"String"}, + "Persistent":{"shape":"Boolean"}, + "Permanent":{"shape":"Boolean"}, + "Port":{"shape":"IntegerOptional"}, + "OptionSettings":{"shape":"OptionSettingConfigurationList"}, + "DBSecurityGroupMemberships":{"shape":"DBSecurityGroupMembershipList"}, + "VpcSecurityGroupMemberships":{"shape":"VpcSecurityGroupMembershipList"} + } + }, + "OptionConfiguration":{ + "type":"structure", + "required":["OptionName"], + "members":{ + "OptionName":{"shape":"String"}, + "Port":{"shape":"IntegerOptional"}, + "DBSecurityGroupMemberships":{"shape":"DBSecurityGroupNameList"}, + "VpcSecurityGroupMemberships":{"shape":"VpcSecurityGroupIdList"}, + "OptionSettings":{"shape":"OptionSettingsList"} + } + }, + "OptionConfigurationList":{ + "type":"list", + "member":{ + "shape":"OptionConfiguration", + "locationName":"OptionConfiguration" + } + }, + "OptionGroup":{ + "type":"structure", + "members":{ + "OptionGroupName":{"shape":"String"}, + "OptionGroupDescription":{"shape":"String"}, + "EngineName":{"shape":"String"}, + "MajorEngineVersion":{"shape":"String"}, + "Options":{"shape":"OptionsList"}, + "AllowsVpcAndNonVpcInstanceMemberships":{"shape":"Boolean"}, + "VpcId":{"shape":"String"} + }, + "wrapper":true + }, + "OptionGroupAlreadyExistsFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"OptionGroupAlreadyExistsFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "OptionGroupMembership":{ + "type":"structure", + "members":{ + "OptionGroupName":{"shape":"String"}, + "Status":{"shape":"String"} + } + }, + "OptionGroupMembershipList":{ + "type":"list", + "member":{ + "shape":"OptionGroupMembership", + "locationName":"OptionGroupMembership" + } + }, + "OptionGroupNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"OptionGroupNotFoundFault", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "OptionGroupOption":{ + "type":"structure", + "members":{ + "Name":{"shape":"String"}, + "Description":{"shape":"String"}, + "EngineName":{"shape":"String"}, + "MajorEngineVersion":{"shape":"String"}, + "MinimumRequiredMinorEngineVersion":{"shape":"String"}, + "PortRequired":{"shape":"Boolean"}, + "DefaultPort":{"shape":"IntegerOptional"}, + "OptionsDependedOn":{"shape":"OptionsDependedOn"}, + "Persistent":{"shape":"Boolean"}, + "Permanent":{"shape":"Boolean"}, + "OptionGroupOptionSettings":{"shape":"OptionGroupOptionSettingsList"} + } + }, + "OptionGroupOptionSetting":{ + "type":"structure", + "members":{ + "SettingName":{"shape":"String"}, + "SettingDescription":{"shape":"String"}, + "DefaultValue":{"shape":"String"}, + "ApplyType":{"shape":"String"}, + "AllowedValues":{"shape":"String"}, + "IsModifiable":{"shape":"Boolean"} + } + }, + "OptionGroupOptionSettingsList":{ + "type":"list", + "member":{ + "shape":"OptionGroupOptionSetting", + "locationName":"OptionGroupOptionSetting" + } + }, + "OptionGroupOptionsList":{ + "type":"list", + "member":{ + "shape":"OptionGroupOption", + "locationName":"OptionGroupOption" + } + }, + "OptionGroupOptionsMessage":{ + "type":"structure", + "members":{ + "OptionGroupOptions":{"shape":"OptionGroupOptionsList"}, + "Marker":{"shape":"String"} + } + }, + "OptionGroupQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"OptionGroupQuotaExceededFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "OptionGroups":{ + "type":"structure", + "members":{ + "OptionGroupsList":{"shape":"OptionGroupsList"}, + "Marker":{"shape":"String"} + } + }, + "OptionGroupsList":{ + "type":"list", + "member":{ + "shape":"OptionGroup", + "locationName":"OptionGroup" + } + }, + "OptionNamesList":{ + "type":"list", + "member":{"shape":"String"} + }, + "OptionSetting":{ + "type":"structure", + "members":{ + "Name":{"shape":"String"}, + "Value":{"shape":"String"}, + "DefaultValue":{"shape":"String"}, + "Description":{"shape":"String"}, + "ApplyType":{"shape":"String"}, + "DataType":{"shape":"String"}, + "AllowedValues":{"shape":"String"}, + "IsModifiable":{"shape":"Boolean"}, + "IsCollection":{"shape":"Boolean"} + } + }, + "OptionSettingConfigurationList":{ + "type":"list", + "member":{ + "shape":"OptionSetting", + "locationName":"OptionSetting" + } + }, + "OptionSettingsList":{ + "type":"list", + "member":{ + "shape":"OptionSetting", + "locationName":"OptionSetting" + } + }, + "OptionsDependedOn":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"OptionName" + } + }, + "OptionsList":{ + "type":"list", + "member":{ + "shape":"Option", + "locationName":"Option" + } + }, + "OrderableDBInstanceOption":{ + "type":"structure", + "members":{ + "Engine":{"shape":"String"}, + "EngineVersion":{"shape":"String"}, + "DBInstanceClass":{"shape":"String"}, + "LicenseModel":{"shape":"String"}, + "AvailabilityZones":{"shape":"AvailabilityZoneList"}, + "MultiAZCapable":{"shape":"Boolean"}, + "ReadReplicaCapable":{"shape":"Boolean"}, + "Vpc":{"shape":"Boolean"}, + "SupportsStorageEncryption":{"shape":"Boolean"}, + "StorageType":{"shape":"String"}, + "SupportsIops":{"shape":"Boolean"}, + "SupportsEnhancedMonitoring":{"shape":"Boolean"} + }, + "wrapper":true + }, + "OrderableDBInstanceOptionsList":{ + "type":"list", + "member":{ + "shape":"OrderableDBInstanceOption", + "locationName":"OrderableDBInstanceOption" + } + }, + "OrderableDBInstanceOptionsMessage":{ + "type":"structure", + "members":{ + "OrderableDBInstanceOptions":{"shape":"OrderableDBInstanceOptionsList"}, + "Marker":{"shape":"String"} + } + }, + "Parameter":{ + "type":"structure", + "members":{ + "ParameterName":{"shape":"String"}, + "ParameterValue":{"shape":"String"}, + "Description":{"shape":"String"}, + "Source":{"shape":"String"}, + "ApplyType":{"shape":"String"}, + "DataType":{"shape":"String"}, + "AllowedValues":{"shape":"String"}, + "IsModifiable":{"shape":"Boolean"}, + "MinimumEngineVersion":{"shape":"String"}, + "ApplyMethod":{"shape":"ApplyMethod"} + } + }, + "ParametersList":{ + "type":"list", + "member":{ + "shape":"Parameter", + "locationName":"Parameter" + } + }, + "PendingMaintenanceAction":{ + "type":"structure", + "members":{ + "Action":{"shape":"String"}, + "AutoAppliedAfterDate":{"shape":"TStamp"}, + "ForcedApplyDate":{"shape":"TStamp"}, + "OptInStatus":{"shape":"String"}, + "CurrentApplyDate":{"shape":"TStamp"}, + "Description":{"shape":"String"} + } + }, + "PendingMaintenanceActionDetails":{ + "type":"list", + "member":{ + "shape":"PendingMaintenanceAction", + "locationName":"PendingMaintenanceAction" + } + }, + "PendingMaintenanceActions":{ + "type":"list", + "member":{ + "shape":"ResourcePendingMaintenanceActions", + "locationName":"ResourcePendingMaintenanceActions" + } + }, + "PendingMaintenanceActionsMessage":{ + "type":"structure", + "members":{ + "PendingMaintenanceActions":{"shape":"PendingMaintenanceActions"}, + "Marker":{"shape":"String"} + } + }, + "PendingModifiedValues":{ + "type":"structure", + "members":{ + "DBInstanceClass":{"shape":"String"}, + "AllocatedStorage":{"shape":"IntegerOptional"}, + "MasterUserPassword":{"shape":"String"}, + "Port":{"shape":"IntegerOptional"}, + "BackupRetentionPeriod":{"shape":"IntegerOptional"}, + "MultiAZ":{"shape":"BooleanOptional"}, + "EngineVersion":{"shape":"String"}, + "Iops":{"shape":"IntegerOptional"}, + "DBInstanceIdentifier":{"shape":"String"}, + "StorageType":{"shape":"String"}, + "CACertificateIdentifier":{"shape":"String"} + } + }, + "PointInTimeRestoreNotEnabledFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"PointInTimeRestoreNotEnabled", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "PromoteReadReplicaDBClusterMessage":{ + "type":"structure", + "required":["DBClusterIdentifier"], + "members":{ + "DBClusterIdentifier":{"shape":"String"} + } + }, + "PromoteReadReplicaDBClusterResult":{ + "type":"structure", + "members":{ + "DBCluster":{"shape":"DBCluster"} + } + }, + "PromoteReadReplicaMessage":{ + "type":"structure", + "required":["DBInstanceIdentifier"], + "members":{ + "DBInstanceIdentifier":{"shape":"String"}, + "BackupRetentionPeriod":{"shape":"IntegerOptional"}, + "PreferredBackupWindow":{"shape":"String"} + } + }, + "PromoteReadReplicaResult":{ + "type":"structure", + "members":{ + "DBInstance":{"shape":"DBInstance"} + } + }, + "ProvisionedIopsNotAvailableInAZFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"ProvisionedIopsNotAvailableInAZFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "PurchaseReservedDBInstancesOfferingMessage":{ + "type":"structure", + "required":["ReservedDBInstancesOfferingId"], + "members":{ + "ReservedDBInstancesOfferingId":{"shape":"String"}, + "ReservedDBInstanceId":{"shape":"String"}, + "DBInstanceCount":{"shape":"IntegerOptional"}, + "Tags":{"shape":"TagList"} + } + }, + "PurchaseReservedDBInstancesOfferingResult":{ + "type":"structure", + "members":{ + "ReservedDBInstance":{"shape":"ReservedDBInstance"} + } + }, + "ReadReplicaDBInstanceIdentifierList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"ReadReplicaDBInstanceIdentifier" + } + }, + "ReadReplicaIdentifierList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"ReadReplicaIdentifier" + } + }, + "RebootDBInstanceMessage":{ + "type":"structure", + "required":["DBInstanceIdentifier"], + "members":{ + "DBInstanceIdentifier":{"shape":"String"}, + "ForceFailover":{"shape":"BooleanOptional"} + } + }, + "RebootDBInstanceResult":{ + "type":"structure", + "members":{ + "DBInstance":{"shape":"DBInstance"} + } + }, + "RecurringCharge":{ + "type":"structure", + "members":{ + "RecurringChargeAmount":{"shape":"Double"}, + "RecurringChargeFrequency":{"shape":"String"} + }, + "wrapper":true + }, + "RecurringChargeList":{ + "type":"list", + "member":{ + "shape":"RecurringCharge", + "locationName":"RecurringCharge" + } + }, + "RemoveSourceIdentifierFromSubscriptionMessage":{ + "type":"structure", + "required":[ + "SubscriptionName", + "SourceIdentifier" + ], + "members":{ + "SubscriptionName":{"shape":"String"}, + "SourceIdentifier":{"shape":"String"} + } + }, + "RemoveSourceIdentifierFromSubscriptionResult":{ + "type":"structure", + "members":{ + "EventSubscription":{"shape":"EventSubscription"} + } + }, + "RemoveTagsFromResourceMessage":{ + "type":"structure", + "required":[ + "ResourceName", + "TagKeys" + ], + "members":{ + "ResourceName":{"shape":"String"}, + "TagKeys":{"shape":"KeyList"} + } + }, + "ReservedDBInstance":{ + "type":"structure", + "members":{ + "ReservedDBInstanceId":{"shape":"String"}, + "ReservedDBInstancesOfferingId":{"shape":"String"}, + "DBInstanceClass":{"shape":"String"}, + "StartTime":{"shape":"TStamp"}, + "Duration":{"shape":"Integer"}, + "FixedPrice":{"shape":"Double"}, + "UsagePrice":{"shape":"Double"}, + "CurrencyCode":{"shape":"String"}, + "DBInstanceCount":{"shape":"Integer"}, + "ProductDescription":{"shape":"String"}, + "OfferingType":{"shape":"String"}, + "MultiAZ":{"shape":"Boolean"}, + "State":{"shape":"String"}, + "RecurringCharges":{"shape":"RecurringChargeList"} + }, + "wrapper":true + }, + "ReservedDBInstanceAlreadyExistsFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"ReservedDBInstanceAlreadyExists", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "ReservedDBInstanceList":{ + "type":"list", + "member":{ + "shape":"ReservedDBInstance", + "locationName":"ReservedDBInstance" + } + }, + "ReservedDBInstanceMessage":{ + "type":"structure", + "members":{ + "Marker":{"shape":"String"}, + "ReservedDBInstances":{"shape":"ReservedDBInstanceList"} + } + }, + "ReservedDBInstanceNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"ReservedDBInstanceNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "ReservedDBInstanceQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"ReservedDBInstanceQuotaExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "ReservedDBInstancesOffering":{ + "type":"structure", + "members":{ + "ReservedDBInstancesOfferingId":{"shape":"String"}, + "DBInstanceClass":{"shape":"String"}, + "Duration":{"shape":"Integer"}, + "FixedPrice":{"shape":"Double"}, + "UsagePrice":{"shape":"Double"}, + "CurrencyCode":{"shape":"String"}, + "ProductDescription":{"shape":"String"}, + "OfferingType":{"shape":"String"}, + "MultiAZ":{"shape":"Boolean"}, + "RecurringCharges":{"shape":"RecurringChargeList"} + }, + "wrapper":true + }, + "ReservedDBInstancesOfferingList":{ + "type":"list", + "member":{ + "shape":"ReservedDBInstancesOffering", + "locationName":"ReservedDBInstancesOffering" + } + }, + "ReservedDBInstancesOfferingMessage":{ + "type":"structure", + "members":{ + "Marker":{"shape":"String"}, + "ReservedDBInstancesOfferings":{"shape":"ReservedDBInstancesOfferingList"} + } + }, + "ReservedDBInstancesOfferingNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"ReservedDBInstancesOfferingNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "ResetDBClusterParameterGroupMessage":{ + "type":"structure", + "required":["DBClusterParameterGroupName"], + "members":{ + "DBClusterParameterGroupName":{"shape":"String"}, + "ResetAllParameters":{"shape":"Boolean"}, + "Parameters":{"shape":"ParametersList"} + } + }, + "ResetDBParameterGroupMessage":{ + "type":"structure", + "required":["DBParameterGroupName"], + "members":{ + "DBParameterGroupName":{"shape":"String"}, + "ResetAllParameters":{"shape":"Boolean"}, + "Parameters":{"shape":"ParametersList"} + } + }, + "ResourceNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"ResourceNotFoundFault", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "ResourcePendingMaintenanceActions":{ + "type":"structure", + "members":{ + "ResourceIdentifier":{"shape":"String"}, + "PendingMaintenanceActionDetails":{"shape":"PendingMaintenanceActionDetails"} + }, + "wrapper":true + }, + "RestoreDBClusterFromSnapshotMessage":{ + "type":"structure", + "required":[ + "DBClusterIdentifier", + "SnapshotIdentifier", + "Engine" + ], + "members":{ + "AvailabilityZones":{"shape":"AvailabilityZones"}, + "DBClusterIdentifier":{"shape":"String"}, + "SnapshotIdentifier":{"shape":"String"}, + "Engine":{"shape":"String"}, + "EngineVersion":{"shape":"String"}, + "Port":{"shape":"IntegerOptional"}, + "DBSubnetGroupName":{"shape":"String"}, + "DatabaseName":{"shape":"String"}, + "OptionGroupName":{"shape":"String"}, + "VpcSecurityGroupIds":{"shape":"VpcSecurityGroupIdList"}, + "Tags":{"shape":"TagList"}, + "KmsKeyId":{"shape":"String"} + } + }, + "RestoreDBClusterFromSnapshotResult":{ + "type":"structure", + "members":{ + "DBCluster":{"shape":"DBCluster"} + } + }, + "RestoreDBClusterToPointInTimeMessage":{ + "type":"structure", + "required":[ + "DBClusterIdentifier", + "SourceDBClusterIdentifier" + ], + "members":{ + "DBClusterIdentifier":{"shape":"String"}, + "SourceDBClusterIdentifier":{"shape":"String"}, + "RestoreToTime":{"shape":"TStamp"}, + "UseLatestRestorableTime":{"shape":"Boolean"}, + "Port":{"shape":"IntegerOptional"}, + "DBSubnetGroupName":{"shape":"String"}, + "OptionGroupName":{"shape":"String"}, + "VpcSecurityGroupIds":{"shape":"VpcSecurityGroupIdList"}, + "Tags":{"shape":"TagList"}, + "KmsKeyId":{"shape":"String"} + } + }, + "RestoreDBClusterToPointInTimeResult":{ + "type":"structure", + "members":{ + "DBCluster":{"shape":"DBCluster"} + } + }, + "RestoreDBInstanceFromDBSnapshotMessage":{ + "type":"structure", + "required":[ + "DBInstanceIdentifier", + "DBSnapshotIdentifier" + ], + "members":{ + "DBInstanceIdentifier":{"shape":"String"}, + "DBSnapshotIdentifier":{"shape":"String"}, + "DBInstanceClass":{"shape":"String"}, + "Port":{"shape":"IntegerOptional"}, + "AvailabilityZone":{"shape":"String"}, + "DBSubnetGroupName":{"shape":"String"}, + "MultiAZ":{"shape":"BooleanOptional"}, + "PubliclyAccessible":{"shape":"BooleanOptional"}, + "AutoMinorVersionUpgrade":{"shape":"BooleanOptional"}, + "LicenseModel":{"shape":"String"}, + "DBName":{"shape":"String"}, + "Engine":{"shape":"String"}, + "Iops":{"shape":"IntegerOptional"}, + "OptionGroupName":{"shape":"String"}, + "Tags":{"shape":"TagList"}, + "StorageType":{"shape":"String"}, + "TdeCredentialArn":{"shape":"String"}, + "TdeCredentialPassword":{"shape":"String"}, + "Domain":{"shape":"String"}, + "CopyTagsToSnapshot":{"shape":"BooleanOptional"}, + "DomainIAMRoleName":{"shape":"String"} + } + }, + "RestoreDBInstanceFromDBSnapshotResult":{ + "type":"structure", + "members":{ + "DBInstance":{"shape":"DBInstance"} + } + }, + "RestoreDBInstanceToPointInTimeMessage":{ + "type":"structure", + "required":[ + "SourceDBInstanceIdentifier", + "TargetDBInstanceIdentifier" + ], + "members":{ + "SourceDBInstanceIdentifier":{"shape":"String"}, + "TargetDBInstanceIdentifier":{"shape":"String"}, + "RestoreTime":{"shape":"TStamp"}, + "UseLatestRestorableTime":{"shape":"Boolean"}, + "DBInstanceClass":{"shape":"String"}, + "Port":{"shape":"IntegerOptional"}, + "AvailabilityZone":{"shape":"String"}, + "DBSubnetGroupName":{"shape":"String"}, + "MultiAZ":{"shape":"BooleanOptional"}, + "PubliclyAccessible":{"shape":"BooleanOptional"}, + "AutoMinorVersionUpgrade":{"shape":"BooleanOptional"}, + "LicenseModel":{"shape":"String"}, + "DBName":{"shape":"String"}, + "Engine":{"shape":"String"}, + "Iops":{"shape":"IntegerOptional"}, + "OptionGroupName":{"shape":"String"}, + "CopyTagsToSnapshot":{"shape":"BooleanOptional"}, + "Tags":{"shape":"TagList"}, + "StorageType":{"shape":"String"}, + "TdeCredentialArn":{"shape":"String"}, + "TdeCredentialPassword":{"shape":"String"}, + "Domain":{"shape":"String"}, + "DomainIAMRoleName":{"shape":"String"} + } + }, + "RestoreDBInstanceToPointInTimeResult":{ + "type":"structure", + "members":{ + "DBInstance":{"shape":"DBInstance"} + } + }, + "RevokeDBSecurityGroupIngressMessage":{ + "type":"structure", + "required":["DBSecurityGroupName"], + "members":{ + "DBSecurityGroupName":{"shape":"String"}, + "CIDRIP":{"shape":"String"}, + "EC2SecurityGroupName":{"shape":"String"}, + "EC2SecurityGroupId":{"shape":"String"}, + "EC2SecurityGroupOwnerId":{"shape":"String"} + } + }, + "RevokeDBSecurityGroupIngressResult":{ + "type":"structure", + "members":{ + "DBSecurityGroup":{"shape":"DBSecurityGroup"} + } + }, + "SNSInvalidTopicFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"SNSInvalidTopic", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "SNSNoAuthorizationFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"SNSNoAuthorization", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "SNSTopicArnNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"SNSTopicArnNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "SharedSnapshotQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"SharedSnapshotQuotaExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "SnapshotQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"SnapshotQuotaExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "SourceIdsList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"SourceId" + } + }, + "SourceNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"SourceNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "SourceType":{ + "type":"string", + "enum":[ + "db-instance", + "db-parameter-group", + "db-security-group", + "db-snapshot", + "db-cluster", + "db-cluster-snapshot" + ] + }, + "StorageQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"StorageQuotaExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "StorageTypeNotSupportedFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"StorageTypeNotSupported", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "String":{"type":"string"}, + "Subnet":{ + "type":"structure", + "members":{ + "SubnetIdentifier":{"shape":"String"}, + "SubnetAvailabilityZone":{"shape":"AvailabilityZone"}, + "SubnetStatus":{"shape":"String"} + } + }, + "SubnetAlreadyInUse":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"SubnetAlreadyInUse", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "SubnetIdentifierList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"SubnetIdentifier" + } + }, + "SubnetList":{ + "type":"list", + "member":{ + "shape":"Subnet", + "locationName":"Subnet" + } + }, + "SubscriptionAlreadyExistFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"SubscriptionAlreadyExist", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "SubscriptionCategoryNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"SubscriptionCategoryNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "SubscriptionNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"SubscriptionNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "SupportedCharacterSetsList":{ + "type":"list", + "member":{ + "shape":"CharacterSet", + "locationName":"CharacterSet" + } + }, + "TStamp":{"type":"timestamp"}, + "Tag":{ + "type":"structure", + "members":{ + "Key":{"shape":"String"}, + "Value":{"shape":"String"} + } + }, + "TagList":{ + "type":"list", + "member":{ + "shape":"Tag", + "locationName":"Tag" + } + }, + "TagListMessage":{ + "type":"structure", + "members":{ + "TagList":{"shape":"TagList"} + } + }, + "UpgradeTarget":{ + "type":"structure", + "members":{ + "Engine":{"shape":"String"}, + "EngineVersion":{"shape":"String"}, + "Description":{"shape":"String"}, + "AutoUpgrade":{"shape":"Boolean"}, + "IsMajorVersionUpgrade":{"shape":"Boolean"} + } + }, + "ValidUpgradeTargetList":{ + "type":"list", + "member":{ + "shape":"UpgradeTarget", + "locationName":"UpgradeTarget" + } + }, + "VpcSecurityGroupIdList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"VpcSecurityGroupId" + } + }, + "VpcSecurityGroupMembership":{ + "type":"structure", + "members":{ + "VpcSecurityGroupId":{"shape":"String"}, + "Status":{"shape":"String"} + } + }, + "VpcSecurityGroupMembershipList":{ + "type":"list", + "member":{ + "shape":"VpcSecurityGroupMembership", + "locationName":"VpcSecurityGroupMembership" + } + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/rds/2014-10-31/docs-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/rds/2014-10-31/docs-2.json new file mode 100644 index 000000000..11b4827c3 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/rds/2014-10-31/docs-2.json @@ -0,0 +1,2755 @@ +{ + "version": "2.0", + "service": "Amazon Relational Database Service

    Amazon Relational Database Service (Amazon RDS) is a web service that makes it easier to set up, operate, and scale a relational database in the cloud. It provides cost-efficient, resizeable capacity for an industry-standard relational database and manages common database administration tasks, freeing up developers to focus on what makes their applications and businesses unique.

    Amazon RDS gives you access to the capabilities of a MySQL, MariaDB, PostgreSQL, Microsoft SQL Server, Oracle, or Amazon Aurora database server. These capabilities mean that the code, applications, and tools you already use today with your existing databases work with Amazon RDS without modification. Amazon RDS automatically backs up your database and maintains the database software that powers your DB instance. Amazon RDS is flexible: you can scale your database instance's compute resources and storage capacity to meet your application's demand. As with all Amazon Web Services, there are no up-front investments, and you pay only for the resources you use.

    This interface reference for Amazon RDS contains documentation for a programming or command line interface you can use to manage Amazon RDS. Note that Amazon RDS is asynchronous, which means that some interfaces might require techniques such as polling or callback functions to determine when a command has been applied. In this reference, the parameter descriptions indicate whether a command is applied immediately, on the next instance reboot, or during the maintenance window. The reference structure is as follows, and we list following some related topics from the user guide.

    Amazon RDS API Reference

    Amazon RDS User Guide

    ", + "operations": { + "AddSourceIdentifierToSubscription": "

    Adds a source identifier to an existing RDS event notification subscription.

    ", + "AddTagsToResource": "

    Adds metadata tags to an Amazon RDS resource. These tags can also be used with cost allocation reporting to track cost associated with Amazon RDS resources, or used in a Condition statement in an IAM policy for Amazon RDS.

    For an overview on tagging Amazon RDS resources, see Tagging Amazon RDS Resources.

    ", + "ApplyPendingMaintenanceAction": "

    Applies a pending maintenance action to a resource (for example, to a DB instance).

    ", + "AuthorizeDBSecurityGroupIngress": "

    Enables ingress to a DBSecurityGroup using one of two forms of authorization. First, EC2 or VPC security groups can be added to the DBSecurityGroup if the application using the database is running on EC2 or VPC instances. Second, IP ranges are available if the application accessing your database is running on the Internet. Required parameters for this API are one of CIDR range, EC2SecurityGroupId for VPC, or (EC2SecurityGroupOwnerId and either EC2SecurityGroupName or EC2SecurityGroupId for non-VPC).

    You cannot authorize ingress from an EC2 security group in one region to an Amazon RDS DB instance in another. You cannot authorize ingress from a VPC security group in one VPC to an Amazon RDS DB instance in another.

    For an overview of CIDR ranges, go to the Wikipedia Tutorial.

    ", + "CopyDBClusterSnapshot": "

    Creates a snapshot of a DB cluster. For more information on Amazon Aurora, see Aurora on Amazon RDS in the Amazon RDS User Guide.

    ", + "CopyDBParameterGroup": "

    Copies the specified DB parameter group.

    ", + "CopyDBSnapshot": "

    Copies the specified DB snapshot. The source DB snapshot must be in the \"available\" state.

    If you are copying from a shared manual DB snapshot, the SourceDBSnapshotIdentifier must be the ARN of the shared DB snapshot.

    ", + "CopyOptionGroup": "

    Copies the specified option group.

    ", + "CreateDBCluster": "

    Creates a new Amazon Aurora DB cluster.

    You can use the ReplicationSourceIdentifier parameter to create the DB cluster as a Read Replica of another DB cluster.

    For more information on Amazon Aurora, see Aurora on Amazon RDS in the Amazon RDS User Guide.

    ", + "CreateDBClusterParameterGroup": "

    Creates a new DB cluster parameter group.

    Parameters in a DB cluster parameter group apply to all of the instances in a DB cluster.

    A DB cluster parameter group is initially created with the default parameters for the database engine used by instances in the DB cluster. To provide custom values for any of the parameters, you must modify the group after creating it using ModifyDBClusterParameterGroup. Once you've created a DB cluster parameter group, you need to associate it with your DB cluster using ModifyDBCluster. When you associate a new DB cluster parameter group with a running DB cluster, you need to reboot the DB instances in the DB cluster without failover for the new DB cluster parameter group and associated settings to take effect.

    After you create a DB cluster parameter group, you should wait at least 5 minutes before creating your first DB cluster that uses that DB cluster parameter group as the default parameter group. This allows Amazon RDS to fully complete the create action before the DB cluster parameter group is used as the default for a new DB cluster. This is especially important for parameters that are critical when creating the default database for a DB cluster, such as the character set for the default database defined by the character_set_database parameter. You can use the Parameter Groups option of the Amazon RDS console or the DescribeDBClusterParameters command to verify that your DB cluster parameter group has been created or modified.

    For more information on Amazon Aurora, see Aurora on Amazon RDS in the Amazon RDS User Guide.

    ", + "CreateDBClusterSnapshot": "

    Creates a snapshot of a DB cluster. For more information on Amazon Aurora, see Aurora on Amazon RDS in the Amazon RDS User Guide.

    ", + "CreateDBInstance": "

    Creates a new DB instance.

    ", + "CreateDBInstanceReadReplica": "

    Creates a DB instance for a DB instance running MySQL, MariaDB, or PostgreSQL that acts as a Read Replica of a source DB instance.

    All Read Replica DB instances are created as Single-AZ deployments with backups disabled. All other DB instance attributes (including DB security groups and DB parameter groups) are inherited from the source DB instance, except as specified below.

    The source DB instance must have backup retention enabled.

    ", + "CreateDBParameterGroup": "

    Creates a new DB parameter group.

    A DB parameter group is initially created with the default parameters for the database engine used by the DB instance. To provide custom values for any of the parameters, you must modify the group after creating it using ModifyDBParameterGroup. Once you've created a DB parameter group, you need to associate it with your DB instance using ModifyDBInstance. When you associate a new DB parameter group with a running DB instance, you need to reboot the DB instance without failover for the new DB parameter group and associated settings to take effect.

    After you create a DB parameter group, you should wait at least 5 minutes before creating your first DB instance that uses that DB parameter group as the default parameter group. This allows Amazon RDS to fully complete the create action before the parameter group is used as the default for a new DB instance. This is especially important for parameters that are critical when creating the default database for a DB instance, such as the character set for the default database defined by the character_set_database parameter. You can use the Parameter Groups option of the Amazon RDS console or the DescribeDBParameters command to verify that your DB parameter group has been created or modified.

    ", + "CreateDBSecurityGroup": "

    Creates a new DB security group. DB security groups control access to a DB instance.

    ", + "CreateDBSnapshot": "

    Creates a DBSnapshot. The source DBInstance must be in \"available\" state.

    ", + "CreateDBSubnetGroup": "

    Creates a new DB subnet group. DB subnet groups must contain at least one subnet in at least two AZs in the region.

    ", + "CreateEventSubscription": "

    Creates an RDS event notification subscription. This action requires a topic ARN (Amazon Resource Name) created by either the RDS console, the SNS console, or the SNS API. To obtain an ARN with SNS, you must create a topic in Amazon SNS and subscribe to the topic. The ARN is displayed in the SNS console.

    You can specify the type of source (SourceType) you want to be notified of, provide a list of RDS sources (SourceIds) that triggers the events, and provide a list of event categories (EventCategories) for events you want to be notified of. For example, you can specify SourceType = db-instance, SourceIds = mydbinstance1, mydbinstance2 and EventCategories = Availability, Backup.

    If you specify both the SourceType and SourceIds, such as SourceType = db-instance and SourceIdentifier = myDBInstance1, you will be notified of all the db-instance events for the specified source. If you specify a SourceType but do not specify a SourceIdentifier, you will receive notice of the events for that source type for all your RDS sources. If you do not specify either the SourceType nor the SourceIdentifier, you will be notified of events generated from all RDS sources belonging to your customer account.

    ", + "CreateOptionGroup": "

    Creates a new option group. You can create up to 20 option groups.

    ", + "DeleteDBCluster": "

    The DeleteDBCluster action deletes a previously provisioned DB cluster. When you delete a DB cluster, all automated backups for that DB cluster are deleted and cannot be recovered. Manual DB cluster snapshots of the specified DB cluster are not deleted.

    For more information on Amazon Aurora, see Aurora on Amazon RDS in the Amazon RDS User Guide.

    ", + "DeleteDBClusterParameterGroup": "

    Deletes a specified DB cluster parameter group. The DB cluster parameter group to be deleted cannot be associated with any DB clusters.

    For more information on Amazon Aurora, see Aurora on Amazon RDS in the Amazon RDS User Guide.

    ", + "DeleteDBClusterSnapshot": "

    Deletes a DB cluster snapshot. If the snapshot is being copied, the copy operation is terminated.

    The DB cluster snapshot must be in the available state to be deleted.

    For more information on Amazon Aurora, see Aurora on Amazon RDS in the Amazon RDS User Guide.

    ", + "DeleteDBInstance": "

    The DeleteDBInstance action deletes a previously provisioned DB instance. When you delete a DB instance, all automated backups for that instance are deleted and cannot be recovered. Manual DB snapshots of the DB instance to be deleted by DeleteDBInstance are not deleted.

    If you request a final DB snapshot the status of the Amazon RDS DB instance is deleting until the DB snapshot is created. The API action DescribeDBInstance is used to monitor the status of this operation. The action cannot be canceled or reverted once submitted.

    Note that when a DB instance is in a failure state and has a status of failed, incompatible-restore, or incompatible-network, you can only delete it when the SkipFinalSnapshot parameter is set to true.

    If the specified DB instance is part of an Amazon Aurora DB cluster, you cannot delete the DB instance if the following are true:

    • The DB cluster is a Read Replica of another Amazon Aurora DB cluster.

    • The DB instance is the only instance in the DB cluster.

    To delete a DB instance in this case, first call the PromoteReadReplicaDBCluster API action to promote the DB cluster so it's no longer a Read Replica. After the promotion completes, then call the DeleteDBInstance API action to delete the final instance in the DB cluster.

    ", + "DeleteDBParameterGroup": "

    Deletes a specified DBParameterGroup. The DBParameterGroup to be deleted cannot be associated with any DB instances.

    ", + "DeleteDBSecurityGroup": "

    Deletes a DB security group.

    The specified DB security group must not be associated with any DB instances.

    ", + "DeleteDBSnapshot": "

    Deletes a DBSnapshot. If the snapshot is being copied, the copy operation is terminated.

    The DBSnapshot must be in the available state to be deleted.

    ", + "DeleteDBSubnetGroup": "

    Deletes a DB subnet group.

    The specified database subnet group must not be associated with any DB instances.

    ", + "DeleteEventSubscription": "

    Deletes an RDS event notification subscription.

    ", + "DeleteOptionGroup": "

    Deletes an existing option group.

    ", + "DescribeAccountAttributes": "

    Lists all of the attributes for a customer account. The attributes include Amazon RDS quotas for the account, such as the number of DB instances allowed. The description for a quota includes the quota name, current usage toward that quota, and the quota's maximum value.

    This command does not take any parameters.

    ", + "DescribeCertificates": "

    Lists the set of CA certificates provided by Amazon RDS for this AWS account.

    ", + "DescribeDBClusterParameterGroups": "

    Returns a list of DBClusterParameterGroup descriptions. If a DBClusterParameterGroupName parameter is specified, the list will contain only the description of the specified DB cluster parameter group.

    For more information on Amazon Aurora, see Aurora on Amazon RDS in the Amazon RDS User Guide.

    ", + "DescribeDBClusterParameters": "

    Returns the detailed parameter list for a particular DB cluster parameter group.

    For more information on Amazon Aurora, see Aurora on Amazon RDS in the Amazon RDS User Guide.

    ", + "DescribeDBClusterSnapshotAttributes": "

    Returns a list of DB cluster snapshot attribute names and values for a manual DB cluster snapshot.

    When sharing snapshots with other AWS accounts, DescribeDBClusterSnapshotAttributes returns the restore attribute and a list of IDs for the AWS accounts that are authorized to copy or restore the manual DB cluster snapshot. If all is included in the list of values for the restore attribute, then the manual DB cluster snapshot is public and can be copied or restored by all AWS accounts.

    To add or remove access for an AWS account to copy or restore a manual DB cluster snapshot, or to make the manual DB cluster snapshot public or private, use the ModifyDBClusterSnapshotAttribute API action.

    ", + "DescribeDBClusterSnapshots": "

    Returns information about DB cluster snapshots. This API action supports pagination.

    For more information on Amazon Aurora, see Aurora on Amazon RDS in the Amazon RDS User Guide.

    ", + "DescribeDBClusters": "

    Returns information about provisioned Aurora DB clusters. This API supports pagination.

    For more information on Amazon Aurora, see Aurora on Amazon RDS in the Amazon RDS User Guide.

    ", + "DescribeDBEngineVersions": "

    Returns a list of the available DB engines.

    ", + "DescribeDBInstances": "

    Returns information about provisioned RDS instances. This API supports pagination.

    ", + "DescribeDBLogFiles": "

    Returns a list of DB log files for the DB instance.

    ", + "DescribeDBParameterGroups": "

    Returns a list of DBParameterGroup descriptions. If a DBParameterGroupName is specified, the list will contain only the description of the specified DB parameter group.

    ", + "DescribeDBParameters": "

    Returns the detailed parameter list for a particular DB parameter group.

    ", + "DescribeDBSecurityGroups": "

    Returns a list of DBSecurityGroup descriptions. If a DBSecurityGroupName is specified, the list will contain only the descriptions of the specified DB security group.

    ", + "DescribeDBSnapshotAttributes": "

    Returns a list of DB snapshot attribute names and values for a manual DB snapshot.

    When sharing snapshots with other AWS accounts, DescribeDBSnapshotAttributes returns the restore attribute and a list of IDs for the AWS accounts that are authorized to copy or restore the manual DB snapshot. If all is included in the list of values for the restore attribute, then the manual DB snapshot is public and can be copied or restored by all AWS accounts.

    To add or remove access for an AWS account to copy or restore a manual DB snapshot, or to make the manual DB snapshot public or private, use the ModifyDBSnapshotAttribute API action.

    ", + "DescribeDBSnapshots": "

    Returns information about DB snapshots. This API action supports pagination.

    ", + "DescribeDBSubnetGroups": "

    Returns a list of DBSubnetGroup descriptions. If a DBSubnetGroupName is specified, the list will contain only the descriptions of the specified DBSubnetGroup.

    For an overview of CIDR ranges, go to the Wikipedia Tutorial.

    ", + "DescribeEngineDefaultClusterParameters": "

    Returns the default engine and system parameter information for the cluster database engine.

    For more information on Amazon Aurora, see Aurora on Amazon RDS in the Amazon RDS User Guide.

    ", + "DescribeEngineDefaultParameters": "

    Returns the default engine and system parameter information for the specified database engine.

    ", + "DescribeEventCategories": "

    Displays a list of categories for all event source types, or, if specified, for a specified source type. You can see a list of the event categories and source types in the Events topic in the Amazon RDS User Guide.

    ", + "DescribeEventSubscriptions": "

    Lists all the subscription descriptions for a customer account. The description for a subscription includes SubscriptionName, SNSTopicARN, CustomerID, SourceType, SourceID, CreationTime, and Status.

    If you specify a SubscriptionName, lists the description for that subscription.

    ", + "DescribeEvents": "

    Returns events related to DB instances, DB security groups, DB snapshots, and DB parameter groups for the past 14 days. Events specific to a particular DB instance, DB security group, database snapshot, or DB parameter group can be obtained by providing the name as a parameter. By default, the past hour of events are returned.

    ", + "DescribeOptionGroupOptions": "

    Describes all available options.

    ", + "DescribeOptionGroups": "

    Describes the available option groups.

    ", + "DescribeOrderableDBInstanceOptions": "

    Returns a list of orderable DB instance options for the specified engine.

    ", + "DescribePendingMaintenanceActions": "

    Returns a list of resources (for example, DB instances) that have at least one pending maintenance action.

    ", + "DescribeReservedDBInstances": "

    Returns information about reserved DB instances for this account, or about a specified reserved DB instance.

    ", + "DescribeReservedDBInstancesOfferings": "

    Lists available reserved DB instance offerings.

    ", + "DownloadDBLogFilePortion": "

    Downloads all or a portion of the specified log file, up to 1 MB in size.

    ", + "FailoverDBCluster": "

    Forces a failover for a DB cluster.

    A failover for a DB cluster promotes one of the read-only instances in the DB cluster to the master DB instance (the cluster writer) and deletes the current primary instance.

    Amazon Aurora will automatically fail over to a read-only instance, if one exists, when the primary instance fails. You can force a failover when you want to simulate a failure of a DB instance for testing. Because each instance in a DB cluster has its own endpoint address, you will need to clean up and re-establish any existing connections that use those endpoint addresses when the failover is complete.

    For more information on Amazon Aurora, see Aurora on Amazon RDS in the Amazon RDS User Guide.

    ", + "ListTagsForResource": "

    Lists all tags on an Amazon RDS resource.

    For an overview on tagging an Amazon RDS resource, see Tagging Amazon RDS Resources.

    ", + "ModifyDBCluster": "

    Modify a setting for an Amazon Aurora DB cluster. You can change one or more database configuration parameters by specifying these parameters and the new values in the request. For more information on Amazon Aurora, see Aurora on Amazon RDS in the Amazon RDS User Guide.

    ", + "ModifyDBClusterParameterGroup": "

    Modifies the parameters of a DB cluster parameter group. To modify more than one parameter, submit a list of the following: ParameterName, ParameterValue, and ApplyMethod. A maximum of 20 parameters can be modified in a single request.

    For more information on Amazon Aurora, see Aurora on Amazon RDS in the Amazon RDS User Guide.

    Changes to dynamic parameters are applied immediately. Changes to static parameters require a reboot without failover to the DB cluster associated with the parameter group before the change can take effect.

    After you create a DB cluster parameter group, you should wait at least 5 minutes before creating your first DB cluster that uses that DB cluster parameter group as the default parameter group. This allows Amazon RDS to fully complete the create action before the parameter group is used as the default for a new DB cluster. This is especially important for parameters that are critical when creating the default database for a DB cluster, such as the character set for the default database defined by the character_set_database parameter. You can use the Parameter Groups option of the Amazon RDS console or the DescribeDBClusterParameters command to verify that your DB cluster parameter group has been created or modified.

    ", + "ModifyDBClusterSnapshotAttribute": "

    Adds an attribute and values to, or removes an attribute and values from, a manual DB cluster snapshot.

    To share a manual DB cluster snapshot with other AWS accounts, specify restore as the AttributeName and use the ValuesToAdd parameter to add a list of IDs of the AWS accounts that are authorized to restore the manual DB cluster snapshot. Use the value all to make the manual DB cluster snapshot public, which means that it can be copied or restored by all AWS accounts. Do not add the all value for any manual DB cluster snapshots that contain private information that you don't want available to all AWS accounts.

    To view which AWS accounts have access to copy or restore a manual DB cluster snapshot, or whether a manual DB cluster snapshot public or private, use the DescribeDBClusterSnapshotAttributes API action.

    If a manual DB cluster snapshot is encrypted, it cannot be shared.

    ", + "ModifyDBInstance": "

    Modify settings for a DB instance. You can change one or more database configuration parameters by specifying these parameters and the new values in the request.

    ", + "ModifyDBParameterGroup": "

    Modifies the parameters of a DB parameter group. To modify more than one parameter, submit a list of the following: ParameterName, ParameterValue, and ApplyMethod. A maximum of 20 parameters can be modified in a single request.

    Changes to dynamic parameters are applied immediately. Changes to static parameters require a reboot without failover to the DB instance associated with the parameter group before the change can take effect.

    After you modify a DB parameter group, you should wait at least 5 minutes before creating your first DB instance that uses that DB parameter group as the default parameter group. This allows Amazon RDS to fully complete the modify action before the parameter group is used as the default for a new DB instance. This is especially important for parameters that are critical when creating the default database for a DB instance, such as the character set for the default database defined by the character_set_database parameter. You can use the Parameter Groups option of the Amazon RDS console or the DescribeDBParameters command to verify that your DB parameter group has been created or modified.

    ", + "ModifyDBSnapshotAttribute": "

    Adds an attribute and values to, or removes an attribute and values from, a manual DB snapshot.

    To share a manual DB snapshot with other AWS accounts, specify restore as the AttributeName and use the ValuesToAdd parameter to add a list of IDs of the AWS accounts that are authorized to restore the manual DB snapshot. Uses the value all to make the manual DB snapshot public, which means it can be copied or restored by all AWS accounts. Do not add the all value for any manual DB snapshots that contain private information that you don't want available to all AWS accounts.

    To view which AWS accounts have access to copy or restore a manual DB snapshot, or whether a manual DB snapshot public or private, use the DescribeDBSnapshotAttributes API action.

    If the manual DB snapshot is encrypted, it cannot be shared.

    ", + "ModifyDBSubnetGroup": "

    Modifies an existing DB subnet group. DB subnet groups must contain at least one subnet in at least two AZs in the region.

    ", + "ModifyEventSubscription": "

    Modifies an existing RDS event notification subscription. Note that you cannot modify the source identifiers using this call; to change source identifiers for a subscription, use the AddSourceIdentifierToSubscription and RemoveSourceIdentifierFromSubscription calls.

    You can see a list of the event categories for a given SourceType in the Events topic in the Amazon RDS User Guide or by using the DescribeEventCategories action.

    ", + "ModifyOptionGroup": "

    Modifies an existing option group.

    ", + "PromoteReadReplica": "

    Promotes a Read Replica DB instance to a standalone DB instance.

    We recommend that you enable automated backups on your Read Replica before promoting the Read Replica. This ensures that no backup is taken during the promotion process. Once the instance is promoted to a primary instance, backups are taken based on your backup settings.

    ", + "PromoteReadReplicaDBCluster": "

    Promotes a Read Replica DB cluster to a standalone DB cluster.

    ", + "PurchaseReservedDBInstancesOffering": "

    Purchases a reserved DB instance offering.

    ", + "RebootDBInstance": "

    Rebooting a DB instance restarts the database engine service. A reboot also applies to the DB instance any modifications to the associated DB parameter group that were pending. Rebooting a DB instance results in a momentary outage of the instance, during which the DB instance status is set to rebooting. If the RDS instance is configured for MultiAZ, it is possible that the reboot will be conducted through a failover. An Amazon RDS event is created when the reboot is completed.

    If your DB instance is deployed in multiple Availability Zones, you can force a failover from one AZ to the other during the reboot. You might force a failover to test the availability of your DB instance deployment or to restore operations to the original AZ after a failover occurs.

    The time required to reboot is a function of the specific database engine's crash recovery process. To improve the reboot time, we recommend that you reduce database activities as much as possible during the reboot process to reduce rollback activity for in-transit transactions.

    ", + "RemoveSourceIdentifierFromSubscription": "

    Removes a source identifier from an existing RDS event notification subscription.

    ", + "RemoveTagsFromResource": "

    Removes metadata tags from an Amazon RDS resource.

    For an overview on tagging an Amazon RDS resource, see Tagging Amazon RDS Resources.

    ", + "ResetDBClusterParameterGroup": "

    Modifies the parameters of a DB cluster parameter group to the default value. To reset specific parameters submit a list of the following: ParameterName and ApplyMethod. To reset the entire DB cluster parameter group, specify the DBClusterParameterGroupName and ResetAllParameters parameters.

    When resetting the entire group, dynamic parameters are updated immediately and static parameters are set to pending-reboot to take effect on the next DB instance restart or RebootDBInstance request. You must call RebootDBInstance for every DB instance in your DB cluster that you want the updated static parameter to apply to.

    For more information on Amazon Aurora, see Aurora on Amazon RDS in the Amazon RDS User Guide.

    ", + "ResetDBParameterGroup": "

    Modifies the parameters of a DB parameter group to the engine/system default value. To reset specific parameters submit a list of the following: ParameterName and ApplyMethod. To reset the entire DB parameter group, specify the DBParameterGroup name and ResetAllParameters parameters. When resetting the entire group, dynamic parameters are updated immediately and static parameters are set to pending-reboot to take effect on the next DB instance restart or RebootDBInstance request.

    ", + "RestoreDBClusterFromSnapshot": "

    Creates a new DB cluster from a DB cluster snapshot. The target DB cluster is created from the source DB cluster restore point with the same configuration as the original source DB cluster, except that the new DB cluster is created with the default security group.

    For more information on Amazon Aurora, see Aurora on Amazon RDS in the Amazon RDS User Guide.

    ", + "RestoreDBClusterToPointInTime": "

    Restores a DB cluster to an arbitrary point in time. Users can restore to any point in time before LatestRestorableTime for up to BackupRetentionPeriod days. The target DB cluster is created from the source DB cluster with the same configuration as the original DB cluster, except that the new DB cluster is created with the default DB security group.

    For more information on Amazon Aurora, see Aurora on Amazon RDS in the Amazon RDS User Guide.

    ", + "RestoreDBInstanceFromDBSnapshot": "

    Creates a new DB instance from a DB snapshot. The target database is created from the source database restore point with the most of original configuration with the default security group and the default DB parameter group. By default, the new DB instance is created as a single-AZ deployment except when the instance is a SQL Server instance that has an option group that is associated with mirroring; in this case, the instance becomes a mirrored AZ deployment and not a single-AZ deployment.

    If your intent is to replace your original DB instance with the new, restored DB instance, then rename your original DB instance before you call the RestoreDBInstanceFromDBSnapshot action. RDS does not allow two DB instances with the same name. Once you have renamed your original DB instance with a different identifier, then you can pass the original name of the DB instance as the DBInstanceIdentifier in the call to the RestoreDBInstanceFromDBSnapshot action. The result is that you will replace the original DB instance with the DB instance created from the snapshot.

    If you are restoring from a shared manual DB snapshot, the DBSnapshotIdentifier must be the ARN of the shared DB snapshot.

    ", + "RestoreDBInstanceToPointInTime": "

    Restores a DB instance to an arbitrary point in time. You can restore to any point in time before the time identified by the LatestRestorableTime property. You can restore to a point up to the number of days specified by the BackupRetentionPeriod property.

    The target database is created with most of the original configuration, but in a system-selected availability zone, with the default security group, the default subnet group, and the default DB parameter group. By default, the new DB instance is created as a single-AZ deployment except when the instance is a SQL Server instance that has an option group that is associated with mirroring; in this case, the instance becomes a mirrored deployment and not a single-AZ deployment.

    ", + "RevokeDBSecurityGroupIngress": "

    Revokes ingress from a DBSecurityGroup for previously authorized IP ranges or EC2 or VPC Security Groups. Required parameters for this API are one of CIDRIP, EC2SecurityGroupId for VPC, or (EC2SecurityGroupOwnerId and either EC2SecurityGroupName or EC2SecurityGroupId).

    " + }, + "shapes": { + "AccountAttributesMessage": { + "base": "

    Data returned by the DescribeAccountAttributes action.

    ", + "refs": { + } + }, + "AccountQuota": { + "base": "

    Describes a quota for an AWS account, for example, the number of DB instances allowed.

    ", + "refs": { + "AccountQuotaList$member": null + } + }, + "AccountQuotaList": { + "base": null, + "refs": { + "AccountAttributesMessage$AccountQuotas": "

    A list of AccountQuota objects. Within this list, each quota has a name, a count of usage toward the quota maximum, and a maximum value for the quota.

    " + } + }, + "AddSourceIdentifierToSubscriptionMessage": { + "base": "

    ", + "refs": { + } + }, + "AddSourceIdentifierToSubscriptionResult": { + "base": null, + "refs": { + } + }, + "AddTagsToResourceMessage": { + "base": "

    ", + "refs": { + } + }, + "ApplyMethod": { + "base": null, + "refs": { + "Parameter$ApplyMethod": "

    Indicates when to apply parameter updates.

    " + } + }, + "ApplyPendingMaintenanceActionMessage": { + "base": "

    ", + "refs": { + } + }, + "ApplyPendingMaintenanceActionResult": { + "base": null, + "refs": { + } + }, + "AttributeValueList": { + "base": null, + "refs": { + "DBClusterSnapshotAttribute$AttributeValues": "

    The value(s) for the manual DB cluster snapshot attribute.

    If the AttributeName field is set to restore, then this element returns a list of IDs of the AWS accounts that are authorized to copy or restore the manual DB cluster snapshot. If a value of all is in the list, then the manual DB cluster snapshot is public and available for any AWS account to copy or restore.

    ", + "DBSnapshotAttribute$AttributeValues": "

    The value or values for the manual DB snapshot attribute.

    If the AttributeName field is set to restore, then this element returns a list of IDs of the AWS accounts that are authorized to copy or restore the manual DB snapshot. If a value of all is in the list, then the manual DB snapshot is public and available for any AWS account to copy or restore.

    ", + "ModifyDBClusterSnapshotAttributeMessage$ValuesToAdd": "

    A list of DB cluster snapshot attributes to add to the attribute specified by AttributeName.

    To authorize other AWS accounts to copy or restore a manual DB cluster snapshot, set this list to include one or more AWS account IDs, or all to make the manual DB cluster snapshot restorable by any AWS account. Do not add the all value for any manual DB cluster snapshots that contain private information that you don't want available to all AWS accounts.

    ", + "ModifyDBClusterSnapshotAttributeMessage$ValuesToRemove": "

    A list of DB cluster snapshot attributes to remove from the attribute specified by AttributeName.

    To remove authorization for other AWS accounts to copy or restore a manual DB cluster snapshot, set this list to include one or more AWS account identifiers, or all to remove authorization for any AWS account to copy or restore the DB cluster snapshot. If you specify all, an AWS account whose account ID is explicitly added to the restore attribute can still copy or restore a manual DB cluster snapshot.

    ", + "ModifyDBSnapshotAttributeMessage$ValuesToAdd": "

    A list of DB snapshot attributes to add to the attribute specified by AttributeName.

    To authorize other AWS accounts to copy or restore a manual snapshot, set this list to include one or more AWS account IDs, or all to make the manual DB snapshot restorable by any AWS account. Do not add the all value for any manual DB snapshots that contain private information that you don't want available to all AWS accounts.

    ", + "ModifyDBSnapshotAttributeMessage$ValuesToRemove": "

    A list of DB snapshot attributes to remove from the attribute specified by AttributeName.

    To remove authorization for other AWS accounts to copy or restore a manual snapshot, set this list to include one or more AWS account identifiers, or all to remove authorization for any AWS account to copy or restore the DB snapshot. If you specify all, an AWS account whose account ID is explicitly added to the restore attribute can still copy or restore the manual DB snapshot.

    " + } + }, + "AuthorizationAlreadyExistsFault": { + "base": "

    The specified CIDRIP or EC2 security group is already authorized for the specified DB security group.

    ", + "refs": { + } + }, + "AuthorizationNotFoundFault": { + "base": "

    Specified CIDRIP or EC2 security group is not authorized for the specified DB security group.

    RDS may not also be authorized via IAM to perform necessary actions on your behalf.

    ", + "refs": { + } + }, + "AuthorizationQuotaExceededFault": { + "base": "

    DB security group authorization quota has been reached.

    ", + "refs": { + } + }, + "AuthorizeDBSecurityGroupIngressMessage": { + "base": "

    ", + "refs": { + } + }, + "AuthorizeDBSecurityGroupIngressResult": { + "base": null, + "refs": { + } + }, + "AvailabilityZone": { + "base": "

    Contains Availability Zone information.

    This data type is used as an element in the following data type:

    ", + "refs": { + "AvailabilityZoneList$member": null, + "Subnet$SubnetAvailabilityZone": null + } + }, + "AvailabilityZoneList": { + "base": null, + "refs": { + "OrderableDBInstanceOption$AvailabilityZones": "

    A list of Availability Zones for the orderable DB instance.

    " + } + }, + "AvailabilityZones": { + "base": null, + "refs": { + "CreateDBClusterMessage$AvailabilityZones": "

    A list of EC2 Availability Zones that instances in the DB cluster can be created in. For information on regions and Availability Zones, see Regions and Availability Zones.

    ", + "DBCluster$AvailabilityZones": "

    Provides the list of EC2 Availability Zones that instances in the DB cluster can be created in.

    ", + "DBClusterSnapshot$AvailabilityZones": "

    Provides the list of EC2 Availability Zones that instances in the DB cluster snapshot can be restored in.

    ", + "RestoreDBClusterFromSnapshotMessage$AvailabilityZones": "

    Provides the list of EC2 Availability Zones that instances in the restored DB cluster can be created in.

    " + } + }, + "Boolean": { + "base": null, + "refs": { + "DBCluster$StorageEncrypted": "

    Specifies whether the DB cluster is encrypted.

    ", + "DBClusterMember$IsClusterWriter": "

    Value that is true if the cluster member is the primary instance for the DB cluster and false otherwise.

    ", + "DBClusterSnapshot$StorageEncrypted": "

    Specifies whether the DB cluster snapshot is encrypted.

    ", + "DBInstance$MultiAZ": "

    Specifies if the DB instance is a Multi-AZ deployment.

    ", + "DBInstance$AutoMinorVersionUpgrade": "

    Indicates that minor version patches are applied automatically.

    ", + "DBInstance$PubliclyAccessible": "

    Specifies the accessibility options for the DB instance. A value of true specifies an Internet-facing instance with a publicly resolvable DNS name, which resolves to a public IP address. A value of false specifies an internal instance with a DNS name that resolves to a private IP address.

    Default: The default behavior varies depending on whether a VPC has been requested or not. The following list shows the default behavior in each case.

    • Default VPC:true

    • VPC:false

    If no DB subnet group has been specified as part of the request and the PubliclyAccessible value has not been set, the DB instance will be publicly accessible. If a specific DB subnet group has been specified as part of the request and the PubliclyAccessible value has not been set, the DB instance will be private.

    ", + "DBInstance$StorageEncrypted": "

    Specifies whether the DB instance is encrypted.

    ", + "DBInstance$CopyTagsToSnapshot": "

    Specifies whether tags are copied from the DB instance to snapshots of the DB instance.

    ", + "DBInstanceStatusInfo$Normal": "

    Boolean value that is true if the instance is operating normally, or false if the instance is in an error state.

    ", + "DBSnapshot$Encrypted": "

    Specifies whether the DB snapshot is encrypted.

    ", + "DeleteDBClusterMessage$SkipFinalSnapshot": "

    Determines whether a final DB cluster snapshot is created before the DB cluster is deleted. If true is specified, no DB cluster snapshot is created. If false is specified, a DB cluster snapshot is created before the DB cluster is deleted.

    You must specify a FinalDBSnapshotIdentifier parameter if SkipFinalSnapshot is false.

    Default: false

    ", + "DeleteDBInstanceMessage$SkipFinalSnapshot": "

    Determines whether a final DB snapshot is created before the DB instance is deleted. If true is specified, no DBSnapshot is created. If false is specified, a DB snapshot is created before the DB instance is deleted.

    Note that when a DB instance is in a failure state and has a status of 'failed', 'incompatible-restore', or 'incompatible-network', it can only be deleted when the SkipFinalSnapshot parameter is set to \"true\".

    Specify true when deleting a Read Replica.

    The FinalDBSnapshotIdentifier parameter must be specified if SkipFinalSnapshot is false.

    Default: false

    ", + "DescribeDBClusterSnapshotsMessage$IncludeShared": "

    Set this value to true to include shared manual DB cluster snapshots from other AWS accounts that this AWS account has been given permission to copy or restore, otherwise set this value to false. The default is false.

    You can give an AWS account permission to restore a manual DB cluster snapshot from another AWS account by the ModifyDBClusterSnapshotAttribute API action.

    ", + "DescribeDBClusterSnapshotsMessage$IncludePublic": "

    Set this value to true to include manual DB cluster snapshots that are public and can be copied or restored by any AWS account, otherwise set this value to false. The default is false. The default is false.

    You can share a manual DB cluster snapshot as public by using the ModifyDBClusterSnapshotAttribute API action.

    ", + "DescribeDBEngineVersionsMessage$DefaultOnly": "

    Indicates that only the default version of the specified engine or engine and major version combination is returned.

    ", + "DescribeDBSnapshotsMessage$IncludeShared": "

    Set this value to true to include shared manual DB snapshots from other AWS accounts that this AWS account has been given permission to copy or restore, otherwise set this value to false. The default is false.

    You can give an AWS account permission to restore a manual DB snapshot from another AWS account by using the ModifyDBSnapshotAttribute API action.

    ", + "DescribeDBSnapshotsMessage$IncludePublic": "

    Set this value to true to include manual DB snapshots that are public and can be copied or restored by any AWS account, otherwise set this value to false. The default is false.

    You can share a manual DB snapshot as public by using the ModifyDBSnapshotAttribute API.

    ", + "DownloadDBLogFilePortionDetails$AdditionalDataPending": "

    Boolean value that if true, indicates there is more data to be downloaded.

    ", + "EventSubscription$Enabled": "

    A Boolean value indicating if the subscription is enabled. True indicates the subscription is enabled.

    ", + "ModifyDBClusterMessage$ApplyImmediately": "

    A value that specifies whether the modifications in this request and any pending modifications are asynchronously applied as soon as possible, regardless of the PreferredMaintenanceWindow setting for the DB cluster. If this parameter is set to false, changes to the DB cluster are applied during the next maintenance window.

    The ApplyImmediately parameter only affects the NewDBClusterIdentifier and MasterUserPassword values. If you set the ApplyImmediately parameter value to false, then changes to the NewDBClusterIdentifier and MasterUserPassword values are applied during the next maintenance window. All other changes are applied immediately, regardless of the value of the ApplyImmediately parameter.

    Default: false

    ", + "ModifyDBInstanceMessage$ApplyImmediately": "

    Specifies whether the modifications in this request and any pending modifications are asynchronously applied as soon as possible, regardless of the PreferredMaintenanceWindow setting for the DB instance.

    If this parameter is set to false, changes to the DB instance are applied during the next maintenance window. Some parameter changes can cause an outage and will be applied on the next call to RebootDBInstance, or the next failure reboot. Review the table of parameters in Modifying a DB Instance and Using the Apply Immediately Parameter to see the impact that setting ApplyImmediately to true or false has for each modified parameter and to determine when the changes will be applied.

    Default: false

    ", + "ModifyDBInstanceMessage$AllowMajorVersionUpgrade": "

    Indicates that major version upgrades are allowed. Changing this parameter does not result in an outage and the change is asynchronously applied as soon as possible.

    Constraints: This parameter must be set to true when specifying a value for the EngineVersion parameter that is a different major version than the DB instance's current version.

    ", + "ModifyOptionGroupMessage$ApplyImmediately": "

    Indicates whether the changes should be applied immediately, or during the next maintenance window for each instance associated with the option group.

    ", + "Option$Persistent": "

    Indicate if this option is persistent.

    ", + "Option$Permanent": "

    Indicate if this option is permanent.

    ", + "OptionGroup$AllowsVpcAndNonVpcInstanceMemberships": "

    Indicates whether this option group can be applied to both VPC and non-VPC instances. The value true indicates the option group can be applied to both VPC and non-VPC instances.

    ", + "OptionGroupOption$PortRequired": "

    Specifies whether the option requires a port.

    ", + "OptionGroupOption$Persistent": "

    A persistent option cannot be removed from the option group once the option group is used, but this option can be removed from the db instance while modifying the related data and assigning another option group without this option.

    ", + "OptionGroupOption$Permanent": "

    A permanent option cannot be removed from the option group once the option group is used, and it cannot be removed from the db instance after assigning an option group with this permanent option.

    ", + "OptionGroupOptionSetting$IsModifiable": "

    Boolean value where true indicates that this option group option can be changed from the default value.

    ", + "OptionSetting$IsModifiable": "

    A Boolean value that, when true, indicates the option setting can be modified from the default.

    ", + "OptionSetting$IsCollection": "

    Indicates if the option setting is part of a collection.

    ", + "OrderableDBInstanceOption$MultiAZCapable": "

    Indicates whether this orderable DB instance is multi-AZ capable.

    ", + "OrderableDBInstanceOption$ReadReplicaCapable": "

    Indicates whether this orderable DB instance can have a Read Replica.

    ", + "OrderableDBInstanceOption$Vpc": "

    Indicates whether this is a VPC orderable DB instance.

    ", + "OrderableDBInstanceOption$SupportsStorageEncryption": "

    Indicates whether this orderable DB instance supports encrypted storage.

    ", + "OrderableDBInstanceOption$SupportsIops": "

    Indicates whether this orderable DB instance supports provisioned IOPS.

    ", + "OrderableDBInstanceOption$SupportsEnhancedMonitoring": "

    Indicates whether the DB instance supports enhanced monitoring at intervals from 1 to 60 seconds.

    ", + "Parameter$IsModifiable": "

    Indicates whether (true) or not (false) the parameter can be modified. Some parameters have security or operational implications that prevent them from being changed.

    ", + "ReservedDBInstance$MultiAZ": "

    Indicates if the reservation applies to Multi-AZ deployments.

    ", + "ReservedDBInstancesOffering$MultiAZ": "

    Indicates if the offering applies to Multi-AZ deployments.

    ", + "ResetDBClusterParameterGroupMessage$ResetAllParameters": "

    A value that is set to true to reset all parameters in the DB cluster parameter group to their default values, and false otherwise. You cannot use this parameter if there is a list of parameter names specified for the Parameters parameter.

    ", + "ResetDBParameterGroupMessage$ResetAllParameters": "

    Specifies whether (true) or not (false) to reset all parameters in the DB parameter group to default values.

    Default: true

    ", + "RestoreDBClusterToPointInTimeMessage$UseLatestRestorableTime": "

    A value that is set to true to restore the DB cluster to the latest restorable backup time, and false otherwise.

    Default: false

    Constraints: Cannot be specified if RestoreToTime parameter is provided.

    ", + "RestoreDBInstanceToPointInTimeMessage$UseLatestRestorableTime": "

    Specifies whether (true) or not (false) the DB instance is restored from the latest backup time.

    Default: false

    Constraints: Cannot be specified if RestoreTime parameter is provided.

    ", + "UpgradeTarget$AutoUpgrade": "

    A value that indicates whether the target version will be applied to any source DB instances that have AutoMinorVersionUpgrade set to true.

    ", + "UpgradeTarget$IsMajorVersionUpgrade": "

    A value that indicates whether a database engine will be upgraded to a major version.

    " + } + }, + "BooleanOptional": { + "base": null, + "refs": { + "CopyDBSnapshotMessage$CopyTags": "

    True to copy all tags from the source DB snapshot to the target DB snapshot; otherwise false. The default is false.

    ", + "CreateDBClusterMessage$StorageEncrypted": "

    Specifies whether the DB cluster is encrypted.

    ", + "CreateDBInstanceMessage$MultiAZ": "

    Specifies if the DB instance is a Multi-AZ deployment. You cannot set the AvailabilityZone parameter if the MultiAZ parameter is set to true.

    ", + "CreateDBInstanceMessage$AutoMinorVersionUpgrade": "

    Indicates that minor engine upgrades will be applied automatically to the DB instance during the maintenance window.

    Default: true

    ", + "CreateDBInstanceMessage$PubliclyAccessible": "

    Specifies the accessibility options for the DB instance. A value of true specifies an Internet-facing instance with a publicly resolvable DNS name, which resolves to a public IP address. A value of false specifies an internal instance with a DNS name that resolves to a private IP address.

    Default: The default behavior varies depending on whether a VPC has been requested or not. The following list shows the default behavior in each case.

    • Default VPC: true

    • VPC: false

    If no DB subnet group has been specified as part of the request and the PubliclyAccessible value has not been set, the DB instance will be publicly accessible. If a specific DB subnet group has been specified as part of the request and the PubliclyAccessible value has not been set, the DB instance will be private.

    ", + "CreateDBInstanceMessage$StorageEncrypted": "

    Specifies whether the DB instance is encrypted.

    Default: false

    ", + "CreateDBInstanceMessage$CopyTagsToSnapshot": "

    True to copy all tags from the DB instance to snapshots of the DB instance; otherwise false. The default is false.

    ", + "CreateDBInstanceReadReplicaMessage$AutoMinorVersionUpgrade": "

    Indicates that minor engine upgrades will be applied automatically to the Read Replica during the maintenance window.

    Default: Inherits from the source DB instance

    ", + "CreateDBInstanceReadReplicaMessage$PubliclyAccessible": "

    Specifies the accessibility options for the DB instance. A value of true specifies an Internet-facing instance with a publicly resolvable DNS name, which resolves to a public IP address. A value of false specifies an internal instance with a DNS name that resolves to a private IP address.

    Default: The default behavior varies depending on whether a VPC has been requested or not. The following list shows the default behavior in each case.

    • Default VPC:true

    • VPC:false

    If no DB subnet group has been specified as part of the request and the PubliclyAccessible value has not been set, the DB instance will be publicly accessible. If a specific DB subnet group has been specified as part of the request and the PubliclyAccessible value has not been set, the DB instance will be private.

    ", + "CreateDBInstanceReadReplicaMessage$CopyTagsToSnapshot": "

    True to copy all tags from the Read Replica to snapshots of the Read Replica; otherwise false. The default is false.

    ", + "CreateEventSubscriptionMessage$Enabled": "

    A Boolean value; set to true to activate the subscription, set to false to create the subscription but not active it.

    ", + "DescribeDBEngineVersionsMessage$ListSupportedCharacterSets": "

    If this parameter is specified, and if the requested engine supports the CharacterSetName parameter for CreateDBInstance, the response includes a list of supported character sets for each engine version.

    ", + "DescribeOrderableDBInstanceOptionsMessage$Vpc": "

    The VPC filter value. Specify this parameter to show only the available VPC or non-VPC offerings.

    ", + "DescribeReservedDBInstancesMessage$MultiAZ": "

    The Multi-AZ filter value. Specify this parameter to show only those reservations matching the specified Multi-AZ parameter.

    ", + "DescribeReservedDBInstancesOfferingsMessage$MultiAZ": "

    The Multi-AZ filter value. Specify this parameter to show only the available offerings matching the specified Multi-AZ parameter.

    ", + "ModifyDBInstanceMessage$MultiAZ": "

    Specifies if the DB instance is a Multi-AZ deployment. Changing this parameter does not result in an outage and the change is applied during the next maintenance window unless the ApplyImmediately parameter is set to true for this request.

    Constraints: Cannot be specified if the DB instance is a Read Replica.

    ", + "ModifyDBInstanceMessage$AutoMinorVersionUpgrade": "

    Indicates that minor version upgrades will be applied automatically to the DB instance during the maintenance window. Changing this parameter does not result in an outage except in the following case and the change is asynchronously applied as soon as possible. An outage will result if this parameter is set to true during the maintenance window, and a newer minor version is available, and RDS has enabled auto patching for that engine version.

    ", + "ModifyDBInstanceMessage$CopyTagsToSnapshot": "

    True to copy all tags from the DB instance to snapshots of the DB instance; otherwise false. The default is false.

    ", + "ModifyDBInstanceMessage$PubliclyAccessible": "

    Boolean value that indicates if the DB instance has a publicly resolvable DNS name. Set to True to make the DB instance Internet-facing with a publicly resolvable DNS name, which resolves to a public IP address. Set to False to make the DB instance internal with a DNS name that resolves to a private IP address.

    PubliclyAccessible only applies to DB instances in a VPC. The DB instance must be part of a public subnet and PubliclyAccessible must be true in order for it to be publicly accessible.

    Changes to the PubliclyAccessible parameter are applied immediately regardless of the value of the ApplyImmediately parameter.

    Default: false

    ", + "ModifyEventSubscriptionMessage$Enabled": "

    A Boolean value; set to true to activate the subscription.

    ", + "PendingModifiedValues$MultiAZ": "

    Indicates that the Single-AZ DB instance is to change to a Multi-AZ deployment.

    ", + "RebootDBInstanceMessage$ForceFailover": "

    When true, the reboot will be conducted through a MultiAZ failover.

    Constraint: You cannot specify true if the instance is not configured for MultiAZ.

    ", + "RestoreDBInstanceFromDBSnapshotMessage$MultiAZ": "

    Specifies if the DB instance is a Multi-AZ deployment.

    Constraint: You cannot specify the AvailabilityZone parameter if the MultiAZ parameter is set to true.

    ", + "RestoreDBInstanceFromDBSnapshotMessage$PubliclyAccessible": "

    Specifies the accessibility options for the DB instance. A value of true specifies an Internet-facing instance with a publicly resolvable DNS name, which resolves to a public IP address. A value of false specifies an internal instance with a DNS name that resolves to a private IP address.

    Default: The default behavior varies depending on whether a VPC has been requested or not. The following list shows the default behavior in each case.

    • Default VPC: true

    • VPC: false

    If no DB subnet group has been specified as part of the request and the PubliclyAccessible value has not been set, the DB instance will be publicly accessible. If a specific DB subnet group has been specified as part of the request and the PubliclyAccessible value has not been set, the DB instance will be private.

    ", + "RestoreDBInstanceFromDBSnapshotMessage$AutoMinorVersionUpgrade": "

    Indicates that minor version upgrades will be applied automatically to the DB instance during the maintenance window.

    ", + "RestoreDBInstanceFromDBSnapshotMessage$CopyTagsToSnapshot": "

    True to copy all tags from the restored DB instance to snapshots of the DB instance; otherwise false. The default is false.

    ", + "RestoreDBInstanceToPointInTimeMessage$MultiAZ": "

    Specifies if the DB instance is a Multi-AZ deployment.

    Constraint: You cannot specify the AvailabilityZone parameter if the MultiAZ parameter is set to true.

    ", + "RestoreDBInstanceToPointInTimeMessage$PubliclyAccessible": "

    Specifies the accessibility options for the DB instance. A value of true specifies an Internet-facing instance with a publicly resolvable DNS name, which resolves to a public IP address. A value of false specifies an internal instance with a DNS name that resolves to a private IP address.

    Default: The default behavior varies depending on whether a VPC has been requested or not. The following list shows the default behavior in each case.

    • Default VPC:true

    • VPC:false

    If no DB subnet group has been specified as part of the request and the PubliclyAccessible value has not been set, the DB instance will be publicly accessible. If a specific DB subnet group has been specified as part of the request and the PubliclyAccessible value has not been set, the DB instance will be private.

    ", + "RestoreDBInstanceToPointInTimeMessage$AutoMinorVersionUpgrade": "

    Indicates that minor version upgrades will be applied automatically to the DB instance during the maintenance window.

    ", + "RestoreDBInstanceToPointInTimeMessage$CopyTagsToSnapshot": "

    True to copy all tags from the restored DB instance to snapshots of the DB instance; otherwise false. The default is false.

    " + } + }, + "Certificate": { + "base": "

    A CA certificate for an AWS account.

    ", + "refs": { + "CertificateList$member": null + } + }, + "CertificateList": { + "base": null, + "refs": { + "CertificateMessage$Certificates": "

    The list of Certificate objects for the AWS account.

    " + } + }, + "CertificateMessage": { + "base": "

    Data returned by the DescribeCertificates action.

    ", + "refs": { + } + }, + "CertificateNotFoundFault": { + "base": "

    CertificateIdentifier does not refer to an existing certificate.

    ", + "refs": { + } + }, + "CharacterSet": { + "base": "

    This data type is used as a response element in the action DescribeDBEngineVersions.

    ", + "refs": { + "DBEngineVersion$DefaultCharacterSet": "

    The default character set for new instances of this engine version, if the CharacterSetName parameter of the CreateDBInstance API is not specified.

    ", + "SupportedCharacterSetsList$member": null + } + }, + "CopyDBClusterSnapshotMessage": { + "base": "

    ", + "refs": { + } + }, + "CopyDBClusterSnapshotResult": { + "base": null, + "refs": { + } + }, + "CopyDBParameterGroupMessage": { + "base": "

    ", + "refs": { + } + }, + "CopyDBParameterGroupResult": { + "base": null, + "refs": { + } + }, + "CopyDBSnapshotMessage": { + "base": "

    ", + "refs": { + } + }, + "CopyDBSnapshotResult": { + "base": null, + "refs": { + } + }, + "CopyOptionGroupMessage": { + "base": "

    ", + "refs": { + } + }, + "CopyOptionGroupResult": { + "base": null, + "refs": { + } + }, + "CreateDBClusterMessage": { + "base": "

    ", + "refs": { + } + }, + "CreateDBClusterParameterGroupMessage": { + "base": "

    ", + "refs": { + } + }, + "CreateDBClusterParameterGroupResult": { + "base": null, + "refs": { + } + }, + "CreateDBClusterResult": { + "base": null, + "refs": { + } + }, + "CreateDBClusterSnapshotMessage": { + "base": "

    ", + "refs": { + } + }, + "CreateDBClusterSnapshotResult": { + "base": null, + "refs": { + } + }, + "CreateDBInstanceMessage": { + "base": "

    ", + "refs": { + } + }, + "CreateDBInstanceReadReplicaMessage": { + "base": null, + "refs": { + } + }, + "CreateDBInstanceReadReplicaResult": { + "base": null, + "refs": { + } + }, + "CreateDBInstanceResult": { + "base": null, + "refs": { + } + }, + "CreateDBParameterGroupMessage": { + "base": "

    ", + "refs": { + } + }, + "CreateDBParameterGroupResult": { + "base": null, + "refs": { + } + }, + "CreateDBSecurityGroupMessage": { + "base": "

    ", + "refs": { + } + }, + "CreateDBSecurityGroupResult": { + "base": null, + "refs": { + } + }, + "CreateDBSnapshotMessage": { + "base": "

    ", + "refs": { + } + }, + "CreateDBSnapshotResult": { + "base": null, + "refs": { + } + }, + "CreateDBSubnetGroupMessage": { + "base": "

    ", + "refs": { + } + }, + "CreateDBSubnetGroupResult": { + "base": null, + "refs": { + } + }, + "CreateEventSubscriptionMessage": { + "base": "

    ", + "refs": { + } + }, + "CreateEventSubscriptionResult": { + "base": null, + "refs": { + } + }, + "CreateOptionGroupMessage": { + "base": "

    ", + "refs": { + } + }, + "CreateOptionGroupResult": { + "base": null, + "refs": { + } + }, + "DBCluster": { + "base": "

    Contains the result of a successful invocation of the following actions:

    This data type is used as a response element in the DescribeDBClusters action.

    ", + "refs": { + "CreateDBClusterResult$DBCluster": null, + "DBClusterList$member": null, + "DeleteDBClusterResult$DBCluster": null, + "FailoverDBClusterResult$DBCluster": null, + "ModifyDBClusterResult$DBCluster": null, + "PromoteReadReplicaDBClusterResult$DBCluster": null, + "RestoreDBClusterFromSnapshotResult$DBCluster": null, + "RestoreDBClusterToPointInTimeResult$DBCluster": null + } + }, + "DBClusterAlreadyExistsFault": { + "base": "

    User already has a DB cluster with the given identifier.

    ", + "refs": { + } + }, + "DBClusterList": { + "base": null, + "refs": { + "DBClusterMessage$DBClusters": "

    Contains a list of DB clusters for the user.

    " + } + }, + "DBClusterMember": { + "base": "

    Contains information about an instance that is part of a DB cluster.

    ", + "refs": { + "DBClusterMemberList$member": null + } + }, + "DBClusterMemberList": { + "base": null, + "refs": { + "DBCluster$DBClusterMembers": "

    Provides the list of instances that make up the DB cluster.

    " + } + }, + "DBClusterMessage": { + "base": "

    Contains the result of a successful invocation of the DescribeDBClusters action.

    ", + "refs": { + } + }, + "DBClusterNotFoundFault": { + "base": "

    DBClusterIdentifier does not refer to an existing DB cluster.

    ", + "refs": { + } + }, + "DBClusterOptionGroupMemberships": { + "base": null, + "refs": { + "DBCluster$DBClusterOptionGroupMemberships": "

    Provides the list of option group memberships for this DB cluster.

    " + } + }, + "DBClusterOptionGroupStatus": { + "base": "

    Contains status information for a DB cluster option group.

    ", + "refs": { + "DBClusterOptionGroupMemberships$member": null + } + }, + "DBClusterParameterGroup": { + "base": "

    Contains the result of a successful invocation of the CreateDBClusterParameterGroup action.

    This data type is used as a request parameter in the DeleteDBClusterParameterGroup action, and as a response element in the DescribeDBClusterParameterGroups action.

    ", + "refs": { + "CreateDBClusterParameterGroupResult$DBClusterParameterGroup": null, + "DBClusterParameterGroupList$member": null + } + }, + "DBClusterParameterGroupDetails": { + "base": "

    Provides details about a DB cluster parameter group including the parameters in the DB cluster parameter group.

    ", + "refs": { + } + }, + "DBClusterParameterGroupList": { + "base": null, + "refs": { + "DBClusterParameterGroupsMessage$DBClusterParameterGroups": "

    A list of DB cluster parameter groups.

    " + } + }, + "DBClusterParameterGroupNameMessage": { + "base": "

    ", + "refs": { + } + }, + "DBClusterParameterGroupNotFoundFault": { + "base": "

    DBClusterParameterGroupName does not refer to an existing DB Cluster parameter group.

    ", + "refs": { + } + }, + "DBClusterParameterGroupsMessage": { + "base": "

    ", + "refs": { + } + }, + "DBClusterQuotaExceededFault": { + "base": "

    User attempted to create a new DB cluster and the user has already reached the maximum allowed DB cluster quota.

    ", + "refs": { + } + }, + "DBClusterSnapshot": { + "base": "

    Contains the result of a successful invocation of the following actions:

    This data type is used as a response element in the DescribeDBClusterSnapshots action.

    ", + "refs": { + "CopyDBClusterSnapshotResult$DBClusterSnapshot": null, + "CreateDBClusterSnapshotResult$DBClusterSnapshot": null, + "DBClusterSnapshotList$member": null, + "DeleteDBClusterSnapshotResult$DBClusterSnapshot": null + } + }, + "DBClusterSnapshotAlreadyExistsFault": { + "base": "

    User already has a DB cluster snapshot with the given identifier.

    ", + "refs": { + } + }, + "DBClusterSnapshotAttribute": { + "base": "

    Contains the name and values of a manual DB cluster snapshot attribute.

    Manual DB cluster snapshot attributes are used to authorize other AWS accounts to restore a manual DB cluster snapshot. For more information, see the ModifyDBClusterSnapshotAttribute API action.

    ", + "refs": { + "DBClusterSnapshotAttributeList$member": null + } + }, + "DBClusterSnapshotAttributeList": { + "base": null, + "refs": { + "DBClusterSnapshotAttributesResult$DBClusterSnapshotAttributes": "

    The list of attributes and values for the manual DB cluster snapshot.

    " + } + }, + "DBClusterSnapshotAttributesResult": { + "base": "

    Contains the results of a successful call to the DescribeDBClusterSnapshotAttributes API action.

    Manual DB cluster snapshot attributes are used to authorize other AWS accounts to copy or restore a manual DB cluster snapshot. For more information, see the ModifyDBClusterSnapshotAttribute API action.

    ", + "refs": { + "DescribeDBClusterSnapshotAttributesResult$DBClusterSnapshotAttributesResult": null, + "ModifyDBClusterSnapshotAttributeResult$DBClusterSnapshotAttributesResult": null + } + }, + "DBClusterSnapshotList": { + "base": null, + "refs": { + "DBClusterSnapshotMessage$DBClusterSnapshots": "

    Provides a list of DB cluster snapshots for the user.

    " + } + }, + "DBClusterSnapshotMessage": { + "base": "

    Provides a list of DB cluster snapshots for the user as the result of a call to the DescribeDBClusterSnapshots action.

    ", + "refs": { + } + }, + "DBClusterSnapshotNotFoundFault": { + "base": "

    DBClusterSnapshotIdentifier does not refer to an existing DB cluster snapshot.

    ", + "refs": { + } + }, + "DBEngineVersion": { + "base": "

    This data type is used as a response element in the action DescribeDBEngineVersions.

    ", + "refs": { + "DBEngineVersionList$member": null + } + }, + "DBEngineVersionList": { + "base": null, + "refs": { + "DBEngineVersionMessage$DBEngineVersions": "

    A list of DBEngineVersion elements.

    " + } + }, + "DBEngineVersionMessage": { + "base": "

    Contains the result of a successful invocation of the DescribeDBEngineVersions action.

    ", + "refs": { + } + }, + "DBInstance": { + "base": "

    Contains the result of a successful invocation of the following actions:

    This data type is used as a response element in the DescribeDBInstances action.

    ", + "refs": { + "CreateDBInstanceReadReplicaResult$DBInstance": null, + "CreateDBInstanceResult$DBInstance": null, + "DBInstanceList$member": null, + "DeleteDBInstanceResult$DBInstance": null, + "ModifyDBInstanceResult$DBInstance": null, + "PromoteReadReplicaResult$DBInstance": null, + "RebootDBInstanceResult$DBInstance": null, + "RestoreDBInstanceFromDBSnapshotResult$DBInstance": null, + "RestoreDBInstanceToPointInTimeResult$DBInstance": null + } + }, + "DBInstanceAlreadyExistsFault": { + "base": "

    User already has a DB instance with the given identifier.

    ", + "refs": { + } + }, + "DBInstanceList": { + "base": null, + "refs": { + "DBInstanceMessage$DBInstances": "

    A list of DBInstance instances.

    " + } + }, + "DBInstanceMessage": { + "base": "

    Contains the result of a successful invocation of the DescribeDBInstances action.

    ", + "refs": { + } + }, + "DBInstanceNotFoundFault": { + "base": "

    DBInstanceIdentifier does not refer to an existing DB instance.

    ", + "refs": { + } + }, + "DBInstanceStatusInfo": { + "base": "

    Provides a list of status information for a DB instance.

    ", + "refs": { + "DBInstanceStatusInfoList$member": null + } + }, + "DBInstanceStatusInfoList": { + "base": null, + "refs": { + "DBInstance$StatusInfos": "

    The status of a Read Replica. If the instance is not a Read Replica, this will be blank.

    " + } + }, + "DBLogFileNotFoundFault": { + "base": "

    LogFileName does not refer to an existing DB log file.

    ", + "refs": { + } + }, + "DBParameterGroup": { + "base": "

    Contains the result of a successful invocation of the CreateDBParameterGroup action.

    This data type is used as a request parameter in the DeleteDBParameterGroup action, and as a response element in the DescribeDBParameterGroups action.

    ", + "refs": { + "CopyDBParameterGroupResult$DBParameterGroup": null, + "CreateDBParameterGroupResult$DBParameterGroup": null, + "DBParameterGroupList$member": null + } + }, + "DBParameterGroupAlreadyExistsFault": { + "base": "

    A DB parameter group with the same name exists.

    ", + "refs": { + } + }, + "DBParameterGroupDetails": { + "base": "

    Contains the result of a successful invocation of the DescribeDBParameters action.

    ", + "refs": { + } + }, + "DBParameterGroupList": { + "base": null, + "refs": { + "DBParameterGroupsMessage$DBParameterGroups": "

    A list of DBParameterGroup instances.

    " + } + }, + "DBParameterGroupNameMessage": { + "base": "

    Contains the result of a successful invocation of the ModifyDBParameterGroup or ResetDBParameterGroup action.

    ", + "refs": { + } + }, + "DBParameterGroupNotFoundFault": { + "base": "

    DBParameterGroupName does not refer to an existing DB parameter group.

    ", + "refs": { + } + }, + "DBParameterGroupQuotaExceededFault": { + "base": "

    Request would result in user exceeding the allowed number of DB parameter groups.

    ", + "refs": { + } + }, + "DBParameterGroupStatus": { + "base": "

    The status of the DB parameter group.

    This data type is used as a response element in the following actions:

    ", + "refs": { + "DBParameterGroupStatusList$member": null + } + }, + "DBParameterGroupStatusList": { + "base": null, + "refs": { + "DBInstance$DBParameterGroups": "

    Provides the list of DB parameter groups applied to this DB instance.

    " + } + }, + "DBParameterGroupsMessage": { + "base": "

    Contains the result of a successful invocation of the DescribeDBParameterGroups action.

    ", + "refs": { + } + }, + "DBSecurityGroup": { + "base": "

    Contains the result of a successful invocation of the following actions:

    This data type is used as a response element in the DescribeDBSecurityGroups action.

    ", + "refs": { + "AuthorizeDBSecurityGroupIngressResult$DBSecurityGroup": null, + "CreateDBSecurityGroupResult$DBSecurityGroup": null, + "DBSecurityGroups$member": null, + "RevokeDBSecurityGroupIngressResult$DBSecurityGroup": null + } + }, + "DBSecurityGroupAlreadyExistsFault": { + "base": "

    A DB security group with the name specified in DBSecurityGroupName already exists.

    ", + "refs": { + } + }, + "DBSecurityGroupMembership": { + "base": "

    This data type is used as a response element in the following actions:

    ", + "refs": { + "DBSecurityGroupMembershipList$member": null + } + }, + "DBSecurityGroupMembershipList": { + "base": null, + "refs": { + "DBInstance$DBSecurityGroups": "

    Provides List of DB security group elements containing only DBSecurityGroup.Name and DBSecurityGroup.Status subelements.

    ", + "Option$DBSecurityGroupMemberships": "

    If the option requires access to a port, then this DB security group allows access to the port.

    " + } + }, + "DBSecurityGroupMessage": { + "base": "

    Contains the result of a successful invocation of the DescribeDBSecurityGroups action.

    ", + "refs": { + } + }, + "DBSecurityGroupNameList": { + "base": null, + "refs": { + "CreateDBInstanceMessage$DBSecurityGroups": "

    A list of DB security groups to associate with this DB instance.

    Default: The default DB security group for the database engine.

    ", + "ModifyDBInstanceMessage$DBSecurityGroups": "

    A list of DB security groups to authorize on this DB instance. Changing this setting does not result in an outage and the change is asynchronously applied as soon as possible.

    Constraints:

    • Must be 1 to 255 alphanumeric characters

    • First character must be a letter

    • Cannot end with a hyphen or contain two consecutive hyphens

    ", + "OptionConfiguration$DBSecurityGroupMemberships": "

    A list of DBSecurityGroupMemebrship name strings used for this option.

    " + } + }, + "DBSecurityGroupNotFoundFault": { + "base": "

    DBSecurityGroupName does not refer to an existing DB security group.

    ", + "refs": { + } + }, + "DBSecurityGroupNotSupportedFault": { + "base": "

    A DB security group is not allowed for this action.

    ", + "refs": { + } + }, + "DBSecurityGroupQuotaExceededFault": { + "base": "

    Request would result in user exceeding the allowed number of DB security groups.

    ", + "refs": { + } + }, + "DBSecurityGroups": { + "base": null, + "refs": { + "DBSecurityGroupMessage$DBSecurityGroups": "

    A list of DBSecurityGroup instances.

    " + } + }, + "DBSnapshot": { + "base": "

    Contains the result of a successful invocation of the following actions:

    This data type is used as a response element in the DescribeDBSnapshots action.

    ", + "refs": { + "CopyDBSnapshotResult$DBSnapshot": null, + "CreateDBSnapshotResult$DBSnapshot": null, + "DBSnapshotList$member": null, + "DeleteDBSnapshotResult$DBSnapshot": null + } + }, + "DBSnapshotAlreadyExistsFault": { + "base": "

    DBSnapshotIdentifier is already used by an existing snapshot.

    ", + "refs": { + } + }, + "DBSnapshotAttribute": { + "base": "

    Contains the name and values of a manual DB snapshot attribute

    Manual DB snapshot attributes are used to authorize other AWS accounts to restore a manual DB snapshot. For more information, see the ModifyDBSnapshotAttribute API.

    ", + "refs": { + "DBSnapshotAttributeList$member": null + } + }, + "DBSnapshotAttributeList": { + "base": null, + "refs": { + "DBSnapshotAttributesResult$DBSnapshotAttributes": "

    The list of attributes and values for the manual DB snapshot.

    " + } + }, + "DBSnapshotAttributesResult": { + "base": "

    Contains the results of a successful call to the DescribeDBSnapshotAttributes API action.

    Manual DB snapshot attributes are used to authorize other AWS accounts to copy or restore a manual DB snapshot. For more information, see the ModifyDBSnapshotAttribute API action.

    ", + "refs": { + "DescribeDBSnapshotAttributesResult$DBSnapshotAttributesResult": null, + "ModifyDBSnapshotAttributeResult$DBSnapshotAttributesResult": null + } + }, + "DBSnapshotList": { + "base": null, + "refs": { + "DBSnapshotMessage$DBSnapshots": "

    A list of DBSnapshot instances.

    " + } + }, + "DBSnapshotMessage": { + "base": "

    Contains the result of a successful invocation of the DescribeDBSnapshots action.

    ", + "refs": { + } + }, + "DBSnapshotNotFoundFault": { + "base": "

    DBSnapshotIdentifier does not refer to an existing DB snapshot.

    ", + "refs": { + } + }, + "DBSubnetGroup": { + "base": "

    Contains the result of a successful invocation of the following actions:

    This data type is used as a response element in the DescribeDBSubnetGroups action.

    ", + "refs": { + "CreateDBSubnetGroupResult$DBSubnetGroup": null, + "DBInstance$DBSubnetGroup": "

    Specifies information on the subnet group associated with the DB instance, including the name, description, and subnets in the subnet group.

    ", + "DBSubnetGroups$member": null, + "ModifyDBSubnetGroupResult$DBSubnetGroup": null + } + }, + "DBSubnetGroupAlreadyExistsFault": { + "base": "

    DBSubnetGroupName is already used by an existing DB subnet group.

    ", + "refs": { + } + }, + "DBSubnetGroupDoesNotCoverEnoughAZs": { + "base": "

    Subnets in the DB subnet group should cover at least two Availability Zones unless there is only one Availability Zone.

    ", + "refs": { + } + }, + "DBSubnetGroupMessage": { + "base": "

    Contains the result of a successful invocation of the DescribeDBSubnetGroups action.

    ", + "refs": { + } + }, + "DBSubnetGroupNotAllowedFault": { + "base": "

    Indicates that the DBSubnetGroup should not be specified while creating read replicas that lie in the same region as the source instance.

    ", + "refs": { + } + }, + "DBSubnetGroupNotFoundFault": { + "base": "

    DBSubnetGroupName does not refer to an existing DB subnet group.

    ", + "refs": { + } + }, + "DBSubnetGroupQuotaExceededFault": { + "base": "

    Request would result in user exceeding the allowed number of DB subnet groups.

    ", + "refs": { + } + }, + "DBSubnetGroups": { + "base": null, + "refs": { + "DBSubnetGroupMessage$DBSubnetGroups": "

    A list of DBSubnetGroup instances.

    " + } + }, + "DBSubnetQuotaExceededFault": { + "base": "

    Request would result in user exceeding the allowed number of subnets in a DB subnet groups.

    ", + "refs": { + } + }, + "DBUpgradeDependencyFailureFault": { + "base": "

    The DB upgrade failed because a resource the DB depends on could not be modified.

    ", + "refs": { + } + }, + "DeleteDBClusterMessage": { + "base": "

    ", + "refs": { + } + }, + "DeleteDBClusterParameterGroupMessage": { + "base": "

    ", + "refs": { + } + }, + "DeleteDBClusterResult": { + "base": null, + "refs": { + } + }, + "DeleteDBClusterSnapshotMessage": { + "base": "

    ", + "refs": { + } + }, + "DeleteDBClusterSnapshotResult": { + "base": null, + "refs": { + } + }, + "DeleteDBInstanceMessage": { + "base": "

    ", + "refs": { + } + }, + "DeleteDBInstanceResult": { + "base": null, + "refs": { + } + }, + "DeleteDBParameterGroupMessage": { + "base": "

    ", + "refs": { + } + }, + "DeleteDBSecurityGroupMessage": { + "base": "

    ", + "refs": { + } + }, + "DeleteDBSnapshotMessage": { + "base": "

    ", + "refs": { + } + }, + "DeleteDBSnapshotResult": { + "base": null, + "refs": { + } + }, + "DeleteDBSubnetGroupMessage": { + "base": "

    ", + "refs": { + } + }, + "DeleteEventSubscriptionMessage": { + "base": "

    ", + "refs": { + } + }, + "DeleteEventSubscriptionResult": { + "base": null, + "refs": { + } + }, + "DeleteOptionGroupMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeAccountAttributesMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeCertificatesMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeDBClusterParameterGroupsMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeDBClusterParametersMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeDBClusterSnapshotAttributesMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeDBClusterSnapshotAttributesResult": { + "base": null, + "refs": { + } + }, + "DescribeDBClusterSnapshotsMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeDBClustersMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeDBEngineVersionsMessage": { + "base": null, + "refs": { + } + }, + "DescribeDBInstancesMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeDBLogFilesDetails": { + "base": "

    This data type is used as a response element to DescribeDBLogFiles.

    ", + "refs": { + "DescribeDBLogFilesList$member": null + } + }, + "DescribeDBLogFilesList": { + "base": null, + "refs": { + "DescribeDBLogFilesResponse$DescribeDBLogFiles": "

    The DB log files returned.

    " + } + }, + "DescribeDBLogFilesMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeDBLogFilesResponse": { + "base": "

    The response from a call to DescribeDBLogFiles.

    ", + "refs": { + } + }, + "DescribeDBParameterGroupsMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeDBParametersMessage": { + "base": null, + "refs": { + } + }, + "DescribeDBSecurityGroupsMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeDBSnapshotAttributesMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeDBSnapshotAttributesResult": { + "base": null, + "refs": { + } + }, + "DescribeDBSnapshotsMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeDBSubnetGroupsMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeEngineDefaultClusterParametersMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeEngineDefaultClusterParametersResult": { + "base": null, + "refs": { + } + }, + "DescribeEngineDefaultParametersMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeEngineDefaultParametersResult": { + "base": null, + "refs": { + } + }, + "DescribeEventCategoriesMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeEventSubscriptionsMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeEventsMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeOptionGroupOptionsMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeOptionGroupsMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeOrderableDBInstanceOptionsMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribePendingMaintenanceActionsMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeReservedDBInstancesMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeReservedDBInstancesOfferingsMessage": { + "base": "

    ", + "refs": { + } + }, + "DomainMembership": { + "base": "

    An Active Directory Domain membership record associated with the DB instance.

    ", + "refs": { + "DomainMembershipList$member": null + } + }, + "DomainMembershipList": { + "base": "

    List of Active Directory Domain membership records associated with a DB instance.

    ", + "refs": { + "DBInstance$DomainMemberships": "

    The Active Directory Domain membership records associated with the DB instance.

    " + } + }, + "DomainNotFoundFault": { + "base": "

    Domain does not refer to an existing Active Directory Domain.

    ", + "refs": { + } + }, + "Double": { + "base": null, + "refs": { + "RecurringCharge$RecurringChargeAmount": "

    The amount of the recurring charge.

    ", + "ReservedDBInstance$FixedPrice": "

    The fixed price charged for this reserved DB instance.

    ", + "ReservedDBInstance$UsagePrice": "

    The hourly price charged for this reserved DB instance.

    ", + "ReservedDBInstancesOffering$FixedPrice": "

    The fixed price charged for this offering.

    ", + "ReservedDBInstancesOffering$UsagePrice": "

    The hourly price charged for this offering.

    " + } + }, + "DownloadDBLogFilePortionDetails": { + "base": "

    This data type is used as a response element to DownloadDBLogFilePortion.

    ", + "refs": { + } + }, + "DownloadDBLogFilePortionMessage": { + "base": "

    ", + "refs": { + } + }, + "EC2SecurityGroup": { + "base": "

    This data type is used as a response element in the following actions:

    ", + "refs": { + "EC2SecurityGroupList$member": null + } + }, + "EC2SecurityGroupList": { + "base": null, + "refs": { + "DBSecurityGroup$EC2SecurityGroups": "

    Contains a list of EC2SecurityGroup elements.

    " + } + }, + "Endpoint": { + "base": "

    This data type is used as a response element in the following actions:

    ", + "refs": { + "DBInstance$Endpoint": "

    Specifies the connection endpoint.

    " + } + }, + "EngineDefaults": { + "base": "

    Contains the result of a successful invocation of the DescribeEngineDefaultParameters action.

    ", + "refs": { + "DescribeEngineDefaultClusterParametersResult$EngineDefaults": null, + "DescribeEngineDefaultParametersResult$EngineDefaults": null + } + }, + "Event": { + "base": "

    This data type is used as a response element in the DescribeEvents action.

    ", + "refs": { + "EventList$member": null + } + }, + "EventCategoriesList": { + "base": null, + "refs": { + "CreateEventSubscriptionMessage$EventCategories": "

    A list of event categories for a SourceType that you want to subscribe to. You can see a list of the categories for a given SourceType in the Events topic in the Amazon RDS User Guide or by using the DescribeEventCategories action.

    ", + "DescribeEventsMessage$EventCategories": "

    A list of event categories that trigger notifications for a event notification subscription.

    ", + "Event$EventCategories": "

    Specifies the category for the event.

    ", + "EventCategoriesMap$EventCategories": "

    The event categories for the specified source type

    ", + "EventSubscription$EventCategoriesList": "

    A list of event categories for the RDS event notification subscription.

    ", + "ModifyEventSubscriptionMessage$EventCategories": "

    A list of event categories for a SourceType that you want to subscribe to. You can see a list of the categories for a given SourceType in the Events topic in the Amazon RDS User Guide or by using the DescribeEventCategories action.

    " + } + }, + "EventCategoriesMap": { + "base": "

    Contains the results of a successful invocation of the DescribeEventCategories action.

    ", + "refs": { + "EventCategoriesMapList$member": null + } + }, + "EventCategoriesMapList": { + "base": null, + "refs": { + "EventCategoriesMessage$EventCategoriesMapList": "

    A list of EventCategoriesMap data types.

    " + } + }, + "EventCategoriesMessage": { + "base": "

    Data returned from the DescribeEventCategories action.

    ", + "refs": { + } + }, + "EventList": { + "base": null, + "refs": { + "EventsMessage$Events": "

    A list of Event instances.

    " + } + }, + "EventSubscription": { + "base": "

    Contains the results of a successful invocation of the DescribeEventSubscriptions action.

    ", + "refs": { + "AddSourceIdentifierToSubscriptionResult$EventSubscription": null, + "CreateEventSubscriptionResult$EventSubscription": null, + "DeleteEventSubscriptionResult$EventSubscription": null, + "EventSubscriptionsList$member": null, + "ModifyEventSubscriptionResult$EventSubscription": null, + "RemoveSourceIdentifierFromSubscriptionResult$EventSubscription": null + } + }, + "EventSubscriptionQuotaExceededFault": { + "base": "

    You have reached the maximum number of event subscriptions.

    ", + "refs": { + } + }, + "EventSubscriptionsList": { + "base": null, + "refs": { + "EventSubscriptionsMessage$EventSubscriptionsList": "

    A list of EventSubscriptions data types.

    " + } + }, + "EventSubscriptionsMessage": { + "base": "

    Data returned by the DescribeEventSubscriptions action.

    ", + "refs": { + } + }, + "EventsMessage": { + "base": "

    Contains the result of a successful invocation of the DescribeEvents action.

    ", + "refs": { + } + }, + "FailoverDBClusterMessage": { + "base": "

    ", + "refs": { + } + }, + "FailoverDBClusterResult": { + "base": null, + "refs": { + } + }, + "Filter": { + "base": "

    This type is not currently supported.

    ", + "refs": { + "FilterList$member": null + } + }, + "FilterList": { + "base": null, + "refs": { + "DescribeCertificatesMessage$Filters": "

    This parameter is not currently supported.

    ", + "DescribeDBClusterParameterGroupsMessage$Filters": "

    This parameter is not currently supported.

    ", + "DescribeDBClusterParametersMessage$Filters": "

    This parameter is not currently supported.

    ", + "DescribeDBClusterSnapshotsMessage$Filters": "

    This parameter is not currently supported.

    ", + "DescribeDBClustersMessage$Filters": "

    This parameter is not currently supported.

    ", + "DescribeDBEngineVersionsMessage$Filters": "

    Not currently supported.

    ", + "DescribeDBInstancesMessage$Filters": "

    This parameter is not currently supported.

    ", + "DescribeDBLogFilesMessage$Filters": "

    This parameter is not currently supported.

    ", + "DescribeDBParameterGroupsMessage$Filters": "

    This parameter is not currently supported.

    ", + "DescribeDBParametersMessage$Filters": "

    This parameter is not currently supported.

    ", + "DescribeDBSecurityGroupsMessage$Filters": "

    This parameter is not currently supported.

    ", + "DescribeDBSnapshotsMessage$Filters": "

    This parameter is not currently supported.

    ", + "DescribeDBSubnetGroupsMessage$Filters": "

    This parameter is not currently supported.

    ", + "DescribeEngineDefaultClusterParametersMessage$Filters": "

    This parameter is not currently supported.

    ", + "DescribeEngineDefaultParametersMessage$Filters": "

    Not currently supported.

    ", + "DescribeEventCategoriesMessage$Filters": "

    This parameter is not currently supported.

    ", + "DescribeEventSubscriptionsMessage$Filters": "

    This parameter is not currently supported.

    ", + "DescribeEventsMessage$Filters": "

    This parameter is not currently supported.

    ", + "DescribeOptionGroupOptionsMessage$Filters": "

    This parameter is not currently supported.

    ", + "DescribeOptionGroupsMessage$Filters": "

    This parameter is not currently supported.

    ", + "DescribeOrderableDBInstanceOptionsMessage$Filters": "

    This parameter is not currently supported.

    ", + "DescribePendingMaintenanceActionsMessage$Filters": "

    A filter that specifies one or more resources to return pending maintenance actions for.

    Supported filters:

    • db-instance-id - Accepts DB instance identifiers and DB instance Amazon Resource Names (ARNs). The results list will only include pending maintenance actions for the DB instances identified by these ARNs.

    ", + "DescribeReservedDBInstancesMessage$Filters": "

    This parameter is not currently supported.

    ", + "DescribeReservedDBInstancesOfferingsMessage$Filters": "

    This parameter is not currently supported.

    ", + "ListTagsForResourceMessage$Filters": "

    This parameter is not currently supported.

    " + } + }, + "FilterValueList": { + "base": null, + "refs": { + "Filter$Values": "

    This parameter is not currently supported.

    " + } + }, + "IPRange": { + "base": "

    This data type is used as a response element in the DescribeDBSecurityGroups action.

    ", + "refs": { + "IPRangeList$member": null + } + }, + "IPRangeList": { + "base": null, + "refs": { + "DBSecurityGroup$IPRanges": "

    Contains a list of IPRange elements.

    " + } + }, + "InstanceQuotaExceededFault": { + "base": "

    Request would result in user exceeding the allowed number of DB instances.

    ", + "refs": { + } + }, + "InsufficientDBClusterCapacityFault": { + "base": "

    The DB cluster does not have enough capacity for the current operation.

    ", + "refs": { + } + }, + "InsufficientDBInstanceCapacityFault": { + "base": "

    Specified DB instance class is not available in the specified Availability Zone.

    ", + "refs": { + } + }, + "InsufficientStorageClusterCapacityFault": { + "base": "

    There is insufficient storage available for the current action. You may be able to resolve this error by updating your subnet group to use different Availability Zones that have more storage available.

    ", + "refs": { + } + }, + "Integer": { + "base": null, + "refs": { + "DBClusterSnapshot$AllocatedStorage": "

    Specifies the allocated storage size in gigabytes (GB).

    ", + "DBClusterSnapshot$Port": "

    Specifies the port that the DB cluster was listening on at the time of the snapshot.

    ", + "DBClusterSnapshot$PercentProgress": "

    Specifies the percentage of the estimated data that has been transferred.

    ", + "DBInstance$AllocatedStorage": "

    Specifies the allocated storage size specified in gigabytes.

    ", + "DBInstance$BackupRetentionPeriod": "

    Specifies the number of days for which automatic DB snapshots are retained.

    ", + "DBInstance$DbInstancePort": "

    Specifies the port that the DB instance listens on. If the DB instance is part of a DB cluster, this can be a different port than the DB cluster port.

    ", + "DBSnapshot$AllocatedStorage": "

    Specifies the allocated storage size in gigabytes (GB).

    ", + "DBSnapshot$Port": "

    Specifies the port that the database engine was listening on at the time of the snapshot.

    ", + "DBSnapshot$PercentProgress": "

    The percentage of the estimated data that has been transferred.

    ", + "DownloadDBLogFilePortionMessage$NumberOfLines": "

    The number of lines to download. If the number of lines specified results in a file over 1 MB in size, the file will be truncated at 1 MB in size.

    If the NumberOfLines parameter is specified, then the block of lines returned can be from the beginning or the end of the log file, depending on the value of the Marker parameter.

    • If neither Marker or NumberOfLines are specified, the entire log file is returned up to a maximum of 10000 lines, starting with the most recent log entries first.

    • If NumberOfLines is specified and Marker is not specified, then the most recent lines from the end of the log file are returned.

    • If Marker is specified as \"0\", then the specified number of lines from the beginning of the log file are returned.

    • You can download the log file in blocks of lines by specifying the size of the block using the NumberOfLines parameter, and by specifying a value of \"0\" for the Marker parameter in your first request. Include the Marker value returned in the response as the Marker value for the next request, continuing until the AdditionalDataPending response element returns false.

    ", + "Endpoint$Port": "

    Specifies the port that the database engine is listening on.

    ", + "ReservedDBInstance$Duration": "

    The duration of the reservation in seconds.

    ", + "ReservedDBInstance$DBInstanceCount": "

    The number of reserved DB instances.

    ", + "ReservedDBInstancesOffering$Duration": "

    The duration of the offering in seconds.

    " + } + }, + "IntegerOptional": { + "base": null, + "refs": { + "CreateDBClusterMessage$BackupRetentionPeriod": "

    The number of days for which automated backups are retained. You must specify a minimum value of 1.

    Default: 1

    Constraints:

    • Must be a value from 1 to 35

    ", + "CreateDBClusterMessage$Port": "

    The port number on which the instances in the DB cluster accept connections.

    Default: 3306

    ", + "CreateDBInstanceMessage$AllocatedStorage": "

    The amount of storage (in gigabytes) to be initially allocated for the database instance.

    Type: Integer

    MySQL

    Constraints: Must be an integer from 5 to 6144.

    MariaDB

    Constraints: Must be an integer from 5 to 6144.

    PostgreSQL

    Constraints: Must be an integer from 5 to 6144.

    Oracle

    Constraints: Must be an integer from 10 to 6144.

    SQL Server

    Constraints: Must be an integer from 200 to 4096 (Standard Edition and Enterprise Edition) or from 20 to 4096 (Express Edition and Web Edition)

    ", + "CreateDBInstanceMessage$BackupRetentionPeriod": "

    The number of days for which automated backups are retained. Setting this parameter to a positive number enables backups. Setting this parameter to 0 disables automated backups.

    Default: 1

    Constraints:

    • Must be a value from 0 to 35

    • Cannot be set to 0 if the DB instance is a source to Read Replicas

    ", + "CreateDBInstanceMessage$Port": "

    The port number on which the database accepts connections.

    MySQL

    Default: 3306

    Valid Values: 1150-65535

    Type: Integer

    MariaDB

    Default: 3306

    Valid Values: 1150-65535

    Type: Integer

    PostgreSQL

    Default: 5432

    Valid Values: 1150-65535

    Type: Integer

    Oracle

    Default: 1521

    Valid Values: 1150-65535

    SQL Server

    Default: 1433

    Valid Values: 1150-65535 except for 1434, 3389, 47001, 49152, and 49152 through 49156.

    Amazon Aurora

    Default: 3306

    Valid Values: 1150-65535

    Type: Integer

    ", + "CreateDBInstanceMessage$Iops": "

    The amount of Provisioned IOPS (input/output operations per second) to be initially allocated for the DB instance.

    Constraints: Must be a multiple between 3 and 10 of the storage amount for the DB instance. Must also be an integer multiple of 1000. For example, if the size of your DB instance is 500 GB, then your Iops value can be 2000, 3000, 4000, or 5000.

    ", + "CreateDBInstanceMessage$MonitoringInterval": "

    The interval, in seconds, between points when Enhanced Monitoring metrics are collected for the DB instance. To disable collecting Enhanced Monitoring metrics, specify 0. The default is 0.

    If MonitoringRoleArn is specified, then you must also set MonitoringInterval to a value other than 0.

    Valid Values: 0, 1, 5, 10, 15, 30, 60

    ", + "CreateDBInstanceMessage$PromotionTier": "

    A value that specifies the order in which an Aurora Replica is promoted to the primary instance after a failure of the existing primary instance. For more information, see Fault Tolerance for an Aurora DB Cluster.

    Default: 1

    Valid Values: 0 - 15

    ", + "CreateDBInstanceReadReplicaMessage$Port": "

    The port number that the DB instance uses for connections.

    Default: Inherits from the source DB instance

    Valid Values: 1150-65535

    ", + "CreateDBInstanceReadReplicaMessage$Iops": "

    The amount of Provisioned IOPS (input/output operations per second) to be initially allocated for the DB instance.

    ", + "CreateDBInstanceReadReplicaMessage$MonitoringInterval": "

    The interval, in seconds, between points when Enhanced Monitoring metrics are collected for the Read Replica. To disable collecting Enhanced Monitoring metrics, specify 0. The default is 0.

    If MonitoringRoleArn is specified, then you must also set MonitoringInterval to a value other than 0.

    Valid Values: 0, 1, 5, 10, 15, 30, 60

    ", + "DBCluster$AllocatedStorage": "

    Specifies the allocated storage size in gigabytes (GB).

    ", + "DBCluster$BackupRetentionPeriod": "

    Specifies the number of days for which automatic DB snapshots are retained.

    ", + "DBCluster$Port": "

    Specifies the port that the database engine is listening on.

    ", + "DBClusterMember$PromotionTier": "

    A value that specifies the order in which an Aurora Replica is promoted to the primary instance after a failure of the existing primary instance. For more information, see Fault Tolerance for an Aurora DB Cluster.

    ", + "DBInstance$Iops": "

    Specifies the Provisioned IOPS (I/O operations per second) value.

    ", + "DBInstance$MonitoringInterval": "

    The interval, in seconds, between points when Enhanced Monitoring metrics are collected for the DB instance.

    ", + "DBInstance$PromotionTier": "

    A value that specifies the order in which an Aurora Replica is promoted to the primary instance after a failure of the existing primary instance. For more information, see Fault Tolerance for an Aurora DB Cluster.

    ", + "DBSnapshot$Iops": "

    Specifies the Provisioned IOPS (I/O operations per second) value of the DB instance at the time of the snapshot.

    ", + "DescribeCertificatesMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved.

    Default: 100

    Constraints: Minimum 20, maximum 100.

    ", + "DescribeDBClusterParameterGroupsMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved.

    Default: 100

    Constraints: Minimum 20, maximum 100.

    ", + "DescribeDBClusterParametersMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved.

    Default: 100

    Constraints: Minimum 20, maximum 100.

    ", + "DescribeDBClusterSnapshotsMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved.

    Default: 100

    Constraints: Minimum 20, maximum 100.

    ", + "DescribeDBClustersMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved.

    Default: 100

    Constraints: Minimum 20, maximum 100.

    ", + "DescribeDBEngineVersionsMessage$MaxRecords": "

    The maximum number of records to include in the response. If more than the MaxRecords value is available, a pagination token called a marker is included in the response so that the following results can be retrieved.

    Default: 100

    Constraints: Minimum 20, maximum 100.

    ", + "DescribeDBInstancesMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved.

    Default: 100

    Constraints: Minimum 20, maximum 100.

    ", + "DescribeDBLogFilesMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved.

    ", + "DescribeDBParameterGroupsMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved.

    Default: 100

    Constraints: Minimum 20, maximum 100.

    ", + "DescribeDBParametersMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved.

    Default: 100

    Constraints: Minimum 20, maximum 100.

    ", + "DescribeDBSecurityGroupsMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved.

    Default: 100

    Constraints: Minimum 20, maximum 100.

    ", + "DescribeDBSnapshotsMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved.

    Default: 100

    Constraints: Minimum 20, maximum 100.

    ", + "DescribeDBSubnetGroupsMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved.

    Default: 100

    Constraints: Minimum 20, maximum 100.

    ", + "DescribeEngineDefaultClusterParametersMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved.

    Default: 100

    Constraints: Minimum 20, maximum 100.

    ", + "DescribeEngineDefaultParametersMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved.

    Default: 100

    Constraints: Minimum 20, maximum 100.

    ", + "DescribeEventSubscriptionsMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved.

    Default: 100

    Constraints: Minimum 20, maximum 100.

    ", + "DescribeEventsMessage$Duration": "

    The number of minutes to retrieve events for.

    Default: 60

    ", + "DescribeEventsMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved.

    Default: 100

    Constraints: Minimum 20, maximum 100.

    ", + "DescribeOptionGroupOptionsMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved.

    Default: 100

    Constraints: Minimum 20, maximum 100.

    ", + "DescribeOptionGroupsMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved.

    Default: 100

    Constraints: Minimum 20, maximum 100.

    ", + "DescribeOrderableDBInstanceOptionsMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved.

    Default: 100

    Constraints: Minimum 20, maximum 100.

    ", + "DescribePendingMaintenanceActionsMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved.

    Default: 100

    Constraints: Minimum 20, maximum 100.

    ", + "DescribeReservedDBInstancesMessage$MaxRecords": "

    The maximum number of records to include in the response. If more than the MaxRecords value is available, a pagination token called a marker is included in the response so that the following results can be retrieved.

    Default: 100

    Constraints: Minimum 20, maximum 100.

    ", + "DescribeReservedDBInstancesOfferingsMessage$MaxRecords": "

    The maximum number of records to include in the response. If more than the MaxRecords value is available, a pagination token called a marker is included in the response so that the following results can be retrieved.

    Default: 100

    Constraints: Minimum 20, maximum 100.

    ", + "ModifyDBClusterMessage$BackupRetentionPeriod": "

    The number of days for which automated backups are retained. You must specify a minimum value of 1.

    Default: 1

    Constraints:

    • Must be a value from 1 to 35

    ", + "ModifyDBClusterMessage$Port": "

    The port number on which the DB cluster accepts connections.

    Constraints: Value must be 1150-65535

    Default: The same port as the original DB cluster.

    ", + "ModifyDBInstanceMessage$AllocatedStorage": "

    The new storage capacity of the RDS instance. Changing this setting does not result in an outage and the change is applied during the next maintenance window unless ApplyImmediately is set to true for this request.

    MySQL

    Default: Uses existing setting

    Valid Values: 5-6144

    Constraints: Value supplied must be at least 10% greater than the current value. Values that are not at least 10% greater than the existing value are rounded up so that they are 10% greater than the current value.

    Type: Integer

    MariaDB

    Default: Uses existing setting

    Valid Values: 5-6144

    Constraints: Value supplied must be at least 10% greater than the current value. Values that are not at least 10% greater than the existing value are rounded up so that they are 10% greater than the current value.

    Type: Integer

    PostgreSQL

    Default: Uses existing setting

    Valid Values: 5-6144

    Constraints: Value supplied must be at least 10% greater than the current value. Values that are not at least 10% greater than the existing value are rounded up so that they are 10% greater than the current value.

    Type: Integer

    Oracle

    Default: Uses existing setting

    Valid Values: 10-6144

    Constraints: Value supplied must be at least 10% greater than the current value. Values that are not at least 10% greater than the existing value are rounded up so that they are 10% greater than the current value.

    SQL Server

    Cannot be modified.

    If you choose to migrate your DB instance from using standard storage to using Provisioned IOPS, or from using Provisioned IOPS to using standard storage, the process can take time. The duration of the migration depends on several factors such as database load, storage size, storage type (standard or Provisioned IOPS), amount of IOPS provisioned (if any), and the number of prior scale storage operations. Typical migration times are under 24 hours, but the process can take up to several days in some cases. During the migration, the DB instance will be available for use, but might experience performance degradation. While the migration takes place, nightly backups for the instance will be suspended. No other Amazon RDS operations can take place for the instance, including modifying the instance, rebooting the instance, deleting the instance, creating a Read Replica for the instance, and creating a DB snapshot of the instance.

    ", + "ModifyDBInstanceMessage$BackupRetentionPeriod": "

    The number of days to retain automated backups. Setting this parameter to a positive number enables backups. Setting this parameter to 0 disables automated backups.

    Changing this parameter can result in an outage if you change from 0 to a non-zero value or from a non-zero value to 0. These changes are applied during the next maintenance window unless the ApplyImmediately parameter is set to true for this request. If you change the parameter from one non-zero value to another non-zero value, the change is asynchronously applied as soon as possible.

    Default: Uses existing setting

    Constraints:

    • Must be a value from 0 to 35

    • Can be specified for a MySQL Read Replica only if the source is running MySQL 5.6

    • Can be specified for a PostgreSQL Read Replica only if the source is running PostgreSQL 9.3.5

    • Cannot be set to 0 if the DB instance is a source to Read Replicas

    ", + "ModifyDBInstanceMessage$Iops": "

    The new Provisioned IOPS (I/O operations per second) value for the RDS instance. Changing this setting does not result in an outage and the change is applied during the next maintenance window unless the ApplyImmediately parameter is set to true for this request.

    Default: Uses existing setting

    Constraints: Value supplied must be at least 10% greater than the current value. Values that are not at least 10% greater than the existing value are rounded up so that they are 10% greater than the current value. If you are migrating from Provisioned IOPS to standard storage, set this value to 0. The DB instance will require a reboot for the change in storage type to take effect.

    SQL Server

    Setting the IOPS value for the SQL Server database engine is not supported.

    Type: Integer

    If you choose to migrate your DB instance from using standard storage to using Provisioned IOPS, or from using Provisioned IOPS to using standard storage, the process can take time. The duration of the migration depends on several factors such as database load, storage size, storage type (standard or Provisioned IOPS), amount of IOPS provisioned (if any), and the number of prior scale storage operations. Typical migration times are under 24 hours, but the process can take up to several days in some cases. During the migration, the DB instance will be available for use, but might experience performance degradation. While the migration takes place, nightly backups for the instance will be suspended. No other Amazon RDS operations can take place for the instance, including modifying the instance, rebooting the instance, deleting the instance, creating a Read Replica for the instance, and creating a DB snapshot of the instance.

    ", + "ModifyDBInstanceMessage$MonitoringInterval": "

    The interval, in seconds, between points when Enhanced Monitoring metrics are collected for the DB instance. To disable collecting Enhanced Monitoring metrics, specify 0. The default is 0.

    If MonitoringRoleArn is specified, then you must also set MonitoringInterval to a value other than 0.

    Valid Values: 0, 1, 5, 10, 15, 30, 60

    ", + "ModifyDBInstanceMessage$DBPortNumber": "

    The port number on which the database accepts connections.

    The value of the DBPortNumber parameter must not match any of the port values specified for options in the option group for the DB instance.

    Your database will restart when you change the DBPortNumber value regardless of the value of the ApplyImmediately parameter.

    MySQL

    Default: 3306

    Valid Values: 1150-65535

    MariaDB

    Default: 3306

    Valid Values: 1150-65535

    PostgreSQL

    Default: 5432

    Valid Values: 1150-65535

    Type: Integer

    Oracle

    Default: 1521

    Valid Values: 1150-65535

    SQL Server

    Default: 1433

    Valid Values: 1150-65535 except for 1434, 3389, 47001, 49152, and 49152 through 49156.

    Amazon Aurora

    Default: 3306

    Valid Values: 1150-65535

    ", + "ModifyDBInstanceMessage$PromotionTier": "

    A value that specifies the order in which an Aurora Replica is promoted to the primary instance after a failure of the existing primary instance. For more information, see Fault Tolerance for an Aurora DB Cluster.

    Default: 1

    Valid Values: 0 - 15

    ", + "Option$Port": "

    If required, the port configured for this option to use.

    ", + "OptionConfiguration$Port": "

    The optional port for the option.

    ", + "OptionGroupOption$DefaultPort": "

    If the option requires a port, specifies the default port for the option.

    ", + "PendingModifiedValues$AllocatedStorage": "

    Contains the new AllocatedStorage size for the DB instance that will be applied or is in progress.

    ", + "PendingModifiedValues$Port": "

    Specifies the pending port for the DB instance.

    ", + "PendingModifiedValues$BackupRetentionPeriod": "

    Specifies the pending number of days for which automated backups are retained.

    ", + "PendingModifiedValues$Iops": "

    Specifies the new Provisioned IOPS value for the DB instance that will be applied or is being applied.

    ", + "PromoteReadReplicaMessage$BackupRetentionPeriod": "

    The number of days to retain automated backups. Setting this parameter to a positive number enables backups. Setting this parameter to 0 disables automated backups.

    Default: 1

    Constraints:

    • Must be a value from 0 to 8

    ", + "PurchaseReservedDBInstancesOfferingMessage$DBInstanceCount": "

    The number of instances to reserve.

    Default: 1

    ", + "RestoreDBClusterFromSnapshotMessage$Port": "

    The port number on which the new DB cluster accepts connections.

    Constraints: Value must be 1150-65535

    Default: The same port as the original DB cluster.

    ", + "RestoreDBClusterToPointInTimeMessage$Port": "

    The port number on which the new DB cluster accepts connections.

    Constraints: Value must be 1150-65535

    Default: The same port as the original DB cluster.

    ", + "RestoreDBInstanceFromDBSnapshotMessage$Port": "

    The port number on which the database accepts connections.

    Default: The same port as the original DB instance

    Constraints: Value must be 1150-65535

    ", + "RestoreDBInstanceFromDBSnapshotMessage$Iops": "

    Specifies the amount of provisioned IOPS for the DB instance, expressed in I/O operations per second. If this parameter is not specified, the IOPS value will be taken from the backup. If this parameter is set to 0, the new instance will be converted to a non-PIOPS instance, which will take additional time, though your DB instance will be available for connections before the conversion starts.

    Constraints: Must be an integer greater than 1000.

    SQL Server

    Setting the IOPS value for the SQL Server database engine is not supported.

    ", + "RestoreDBInstanceToPointInTimeMessage$Port": "

    The port number on which the database accepts connections.

    Constraints: Value must be 1150-65535

    Default: The same port as the original DB instance.

    ", + "RestoreDBInstanceToPointInTimeMessage$Iops": "

    The amount of Provisioned IOPS (input/output operations per second) to be initially allocated for the DB instance.

    Constraints: Must be an integer greater than 1000.

    SQL Server

    Setting the IOPS value for the SQL Server database engine is not supported.

    " + } + }, + "InvalidDBClusterSnapshotStateFault": { + "base": "

    The supplied value is not a valid DB cluster snapshot state.

    ", + "refs": { + } + }, + "InvalidDBClusterStateFault": { + "base": "

    The DB cluster is not in a valid state.

    ", + "refs": { + } + }, + "InvalidDBInstanceStateFault": { + "base": "

    The specified DB instance is not in the available state.

    ", + "refs": { + } + }, + "InvalidDBParameterGroupStateFault": { + "base": "

    The DB parameter group cannot be deleted because it is in use.

    ", + "refs": { + } + }, + "InvalidDBSecurityGroupStateFault": { + "base": "

    The state of the DB security group does not allow deletion.

    ", + "refs": { + } + }, + "InvalidDBSnapshotStateFault": { + "base": "

    The state of the DB snapshot does not allow deletion.

    ", + "refs": { + } + }, + "InvalidDBSubnetGroupFault": { + "base": "

    Indicates the DBSubnetGroup does not belong to the same VPC as that of an existing cross region read replica of the same source instance.

    ", + "refs": { + } + }, + "InvalidDBSubnetGroupStateFault": { + "base": "

    The DB subnet group cannot be deleted because it is in use.

    ", + "refs": { + } + }, + "InvalidDBSubnetStateFault": { + "base": "

    The DB subnet is not in the available state.

    ", + "refs": { + } + }, + "InvalidEventSubscriptionStateFault": { + "base": "

    This error can occur if someone else is modifying a subscription. You should retry the action.

    ", + "refs": { + } + }, + "InvalidOptionGroupStateFault": { + "base": "

    The option group is not in the available state.

    ", + "refs": { + } + }, + "InvalidRestoreFault": { + "base": "

    Cannot restore from vpc backup to non-vpc DB instance.

    ", + "refs": { + } + }, + "InvalidSubnet": { + "base": "

    The requested subnet is invalid, or multiple subnets were requested that are not all in a common VPC.

    ", + "refs": { + } + }, + "InvalidVPCNetworkStateFault": { + "base": "

    DB subnet group does not cover all Availability Zones after it is created because users' change.

    ", + "refs": { + } + }, + "KMSKeyNotAccessibleFault": { + "base": "

    Error accessing KMS key.

    ", + "refs": { + } + }, + "KeyList": { + "base": null, + "refs": { + "RemoveTagsFromResourceMessage$TagKeys": "

    The tag key (name) of the tag to be removed.

    " + } + }, + "ListTagsForResourceMessage": { + "base": "

    ", + "refs": { + } + }, + "Long": { + "base": null, + "refs": { + "AccountQuota$Used": "

    The amount currently used toward the quota maximum.

    ", + "AccountQuota$Max": "

    The maximum allowed value for the quota.

    ", + "DescribeDBLogFilesDetails$LastWritten": "

    A POSIX timestamp when the last log entry was written.

    ", + "DescribeDBLogFilesDetails$Size": "

    The size, in bytes, of the log file for the specified DB instance.

    ", + "DescribeDBLogFilesMessage$FileLastWritten": "

    Filters the available log files for files written since the specified date, in POSIX timestamp format with milliseconds.

    ", + "DescribeDBLogFilesMessage$FileSize": "

    Filters the available log files for files larger than the specified size.

    " + } + }, + "ModifyDBClusterMessage": { + "base": "

    ", + "refs": { + } + }, + "ModifyDBClusterParameterGroupMessage": { + "base": "

    ", + "refs": { + } + }, + "ModifyDBClusterResult": { + "base": null, + "refs": { + } + }, + "ModifyDBClusterSnapshotAttributeMessage": { + "base": "

    ", + "refs": { + } + }, + "ModifyDBClusterSnapshotAttributeResult": { + "base": null, + "refs": { + } + }, + "ModifyDBInstanceMessage": { + "base": "

    ", + "refs": { + } + }, + "ModifyDBInstanceResult": { + "base": null, + "refs": { + } + }, + "ModifyDBParameterGroupMessage": { + "base": "

    ", + "refs": { + } + }, + "ModifyDBSnapshotAttributeMessage": { + "base": "

    ", + "refs": { + } + }, + "ModifyDBSnapshotAttributeResult": { + "base": null, + "refs": { + } + }, + "ModifyDBSubnetGroupMessage": { + "base": "

    ", + "refs": { + } + }, + "ModifyDBSubnetGroupResult": { + "base": null, + "refs": { + } + }, + "ModifyEventSubscriptionMessage": { + "base": "

    ", + "refs": { + } + }, + "ModifyEventSubscriptionResult": { + "base": null, + "refs": { + } + }, + "ModifyOptionGroupMessage": { + "base": "

    ", + "refs": { + } + }, + "ModifyOptionGroupResult": { + "base": null, + "refs": { + } + }, + "Option": { + "base": "

    Option details.

    ", + "refs": { + "OptionsList$member": null + } + }, + "OptionConfiguration": { + "base": "

    A list of all available options

    ", + "refs": { + "OptionConfigurationList$member": null + } + }, + "OptionConfigurationList": { + "base": null, + "refs": { + "ModifyOptionGroupMessage$OptionsToInclude": "

    Options in this list are added to the option group or, if already present, the specified configuration is used to update the existing configuration.

    " + } + }, + "OptionGroup": { + "base": "

    ", + "refs": { + "CopyOptionGroupResult$OptionGroup": null, + "CreateOptionGroupResult$OptionGroup": null, + "ModifyOptionGroupResult$OptionGroup": null, + "OptionGroupsList$member": null + } + }, + "OptionGroupAlreadyExistsFault": { + "base": "

    The option group you are trying to create already exists.

    ", + "refs": { + } + }, + "OptionGroupMembership": { + "base": "

    Provides information on the option groups the DB instance is a member of.

    ", + "refs": { + "OptionGroupMembershipList$member": null + } + }, + "OptionGroupMembershipList": { + "base": null, + "refs": { + "DBInstance$OptionGroupMemberships": "

    Provides the list of option group memberships for this DB instance.

    " + } + }, + "OptionGroupNotFoundFault": { + "base": "

    The specified option group could not be found.

    ", + "refs": { + } + }, + "OptionGroupOption": { + "base": "

    Available option.

    ", + "refs": { + "OptionGroupOptionsList$member": null + } + }, + "OptionGroupOptionSetting": { + "base": "

    Option group option settings are used to display settings available for each option with their default values and other information. These values are used with the DescribeOptionGroupOptions action.

    ", + "refs": { + "OptionGroupOptionSettingsList$member": null + } + }, + "OptionGroupOptionSettingsList": { + "base": null, + "refs": { + "OptionGroupOption$OptionGroupOptionSettings": "

    Specifies the option settings that are available (and the default value) for each option in an option group.

    " + } + }, + "OptionGroupOptionsList": { + "base": "

    List of available option group options.

    ", + "refs": { + "OptionGroupOptionsMessage$OptionGroupOptions": null + } + }, + "OptionGroupOptionsMessage": { + "base": "

    ", + "refs": { + } + }, + "OptionGroupQuotaExceededFault": { + "base": "

    The quota of 20 option groups was exceeded for this AWS account.

    ", + "refs": { + } + }, + "OptionGroups": { + "base": "

    List of option groups.

    ", + "refs": { + } + }, + "OptionGroupsList": { + "base": null, + "refs": { + "OptionGroups$OptionGroupsList": "

    List of option groups.

    " + } + }, + "OptionNamesList": { + "base": null, + "refs": { + "ModifyOptionGroupMessage$OptionsToRemove": "

    Options in this list are removed from the option group.

    " + } + }, + "OptionSetting": { + "base": "

    Option settings are the actual settings being applied or configured for that option. It is used when you modify an option group or describe option groups. For example, the NATIVE_NETWORK_ENCRYPTION option has a setting called SQLNET.ENCRYPTION_SERVER that can have several different values.

    ", + "refs": { + "OptionSettingConfigurationList$member": null, + "OptionSettingsList$member": null + } + }, + "OptionSettingConfigurationList": { + "base": null, + "refs": { + "Option$OptionSettings": "

    The option settings for this option.

    " + } + }, + "OptionSettingsList": { + "base": null, + "refs": { + "OptionConfiguration$OptionSettings": "

    The option settings to include in an option group.

    " + } + }, + "OptionsDependedOn": { + "base": null, + "refs": { + "OptionGroupOption$OptionsDependedOn": "

    List of all options that are prerequisites for this option.

    " + } + }, + "OptionsList": { + "base": null, + "refs": { + "OptionGroup$Options": "

    Indicates what options are available in the option group.

    " + } + }, + "OrderableDBInstanceOption": { + "base": "

    Contains a list of available options for a DB instance

    This data type is used as a response element in the DescribeOrderableDBInstanceOptions action.

    ", + "refs": { + "OrderableDBInstanceOptionsList$member": null + } + }, + "OrderableDBInstanceOptionsList": { + "base": null, + "refs": { + "OrderableDBInstanceOptionsMessage$OrderableDBInstanceOptions": "

    An OrderableDBInstanceOption structure containing information about orderable options for the DB instance.

    " + } + }, + "OrderableDBInstanceOptionsMessage": { + "base": "

    Contains the result of a successful invocation of the DescribeOrderableDBInstanceOptions action.

    ", + "refs": { + } + }, + "Parameter": { + "base": "

    This data type is used as a request parameter in the ModifyDBParameterGroup and ResetDBParameterGroup actions.

    This data type is used as a response element in the DescribeEngineDefaultParameters and DescribeDBParameters actions.

    ", + "refs": { + "ParametersList$member": null + } + }, + "ParametersList": { + "base": null, + "refs": { + "DBClusterParameterGroupDetails$Parameters": "

    Provides a list of parameters for the DB cluster parameter group.

    ", + "DBParameterGroupDetails$Parameters": "

    A list of Parameter values.

    ", + "EngineDefaults$Parameters": "

    Contains a list of engine default parameters.

    ", + "ModifyDBClusterParameterGroupMessage$Parameters": "

    A list of parameters in the DB cluster parameter group to modify.

    ", + "ModifyDBParameterGroupMessage$Parameters": "

    An array of parameter names, values, and the apply method for the parameter update. At least one parameter name, value, and apply method must be supplied; subsequent arguments are optional. A maximum of 20 parameters can be modified in a single request.

    Valid Values (for the application method): immediate | pending-reboot

    You can use the immediate value with dynamic parameters only. You can use the pending-reboot value for both dynamic and static parameters, and changes are applied when you reboot the DB instance without failover.

    ", + "ResetDBClusterParameterGroupMessage$Parameters": "

    A list of parameter names in the DB cluster parameter group to reset to the default values. You cannot use this parameter if the ResetAllParameters parameter is set to true.

    ", + "ResetDBParameterGroupMessage$Parameters": "

    An array of parameter names, values, and the apply method for the parameter update. At least one parameter name, value, and apply method must be supplied; subsequent arguments are optional. A maximum of 20 parameters can be modified in a single request.

    MySQL

    Valid Values (for Apply method): immediate | pending-reboot

    You can use the immediate value with dynamic parameters only. You can use the pending-reboot value for both dynamic and static parameters, and changes are applied when DB instance reboots.

    MariaDB

    Valid Values (for Apply method): immediate | pending-reboot

    You can use the immediate value with dynamic parameters only. You can use the pending-reboot value for both dynamic and static parameters, and changes are applied when DB instance reboots.

    Oracle

    Valid Values (for Apply method): pending-reboot

    " + } + }, + "PendingMaintenanceAction": { + "base": "

    Provides information about a pending maintenance action for a resource.

    ", + "refs": { + "PendingMaintenanceActionDetails$member": null + } + }, + "PendingMaintenanceActionDetails": { + "base": null, + "refs": { + "ResourcePendingMaintenanceActions$PendingMaintenanceActionDetails": "

    A list that provides details about the pending maintenance actions for the resource.

    " + } + }, + "PendingMaintenanceActions": { + "base": null, + "refs": { + "PendingMaintenanceActionsMessage$PendingMaintenanceActions": "

    A list of the pending maintenance actions for the resource.

    " + } + }, + "PendingMaintenanceActionsMessage": { + "base": "

    Data returned from the DescribePendingMaintenanceActions action.

    ", + "refs": { + } + }, + "PendingModifiedValues": { + "base": "

    This data type is used as a response element in the ModifyDBInstance action.

    ", + "refs": { + "DBInstance$PendingModifiedValues": "

    Specifies that changes to the DB instance are pending. This element is only included when changes are pending. Specific changes are identified by subelements.

    " + } + }, + "PointInTimeRestoreNotEnabledFault": { + "base": "

    SourceDBInstanceIdentifier refers to a DB instance with BackupRetentionPeriod equal to 0.

    ", + "refs": { + } + }, + "PromoteReadReplicaDBClusterMessage": { + "base": "

    ", + "refs": { + } + }, + "PromoteReadReplicaDBClusterResult": { + "base": null, + "refs": { + } + }, + "PromoteReadReplicaMessage": { + "base": "

    ", + "refs": { + } + }, + "PromoteReadReplicaResult": { + "base": null, + "refs": { + } + }, + "ProvisionedIopsNotAvailableInAZFault": { + "base": "

    Provisioned IOPS not available in the specified Availability Zone.

    ", + "refs": { + } + }, + "PurchaseReservedDBInstancesOfferingMessage": { + "base": "

    ", + "refs": { + } + }, + "PurchaseReservedDBInstancesOfferingResult": { + "base": null, + "refs": { + } + }, + "ReadReplicaDBInstanceIdentifierList": { + "base": null, + "refs": { + "DBInstance$ReadReplicaDBInstanceIdentifiers": "

    Contains one or more identifiers of the Read Replicas associated with this DB instance.

    " + } + }, + "ReadReplicaIdentifierList": { + "base": null, + "refs": { + "DBCluster$ReadReplicaIdentifiers": "

    Contains one or more identifiers of the Read Replicas associated with this DB cluster.

    " + } + }, + "RebootDBInstanceMessage": { + "base": "

    ", + "refs": { + } + }, + "RebootDBInstanceResult": { + "base": null, + "refs": { + } + }, + "RecurringCharge": { + "base": "

    This data type is used as a response element in the DescribeReservedDBInstances and DescribeReservedDBInstancesOfferings actions.

    ", + "refs": { + "RecurringChargeList$member": null + } + }, + "RecurringChargeList": { + "base": null, + "refs": { + "ReservedDBInstance$RecurringCharges": "

    The recurring price charged to run this reserved DB instance.

    ", + "ReservedDBInstancesOffering$RecurringCharges": "

    The recurring price charged to run this reserved DB instance.

    " + } + }, + "RemoveSourceIdentifierFromSubscriptionMessage": { + "base": "

    ", + "refs": { + } + }, + "RemoveSourceIdentifierFromSubscriptionResult": { + "base": null, + "refs": { + } + }, + "RemoveTagsFromResourceMessage": { + "base": "

    ", + "refs": { + } + }, + "ReservedDBInstance": { + "base": "

    This data type is used as a response element in the DescribeReservedDBInstances and PurchaseReservedDBInstancesOffering actions.

    ", + "refs": { + "PurchaseReservedDBInstancesOfferingResult$ReservedDBInstance": null, + "ReservedDBInstanceList$member": null + } + }, + "ReservedDBInstanceAlreadyExistsFault": { + "base": "

    User already has a reservation with the given identifier.

    ", + "refs": { + } + }, + "ReservedDBInstanceList": { + "base": null, + "refs": { + "ReservedDBInstanceMessage$ReservedDBInstances": "

    A list of reserved DB instances.

    " + } + }, + "ReservedDBInstanceMessage": { + "base": "

    Contains the result of a successful invocation of the DescribeReservedDBInstances action.

    ", + "refs": { + } + }, + "ReservedDBInstanceNotFoundFault": { + "base": "

    The specified reserved DB Instance not found.

    ", + "refs": { + } + }, + "ReservedDBInstanceQuotaExceededFault": { + "base": "

    Request would exceed the user's DB Instance quota.

    ", + "refs": { + } + }, + "ReservedDBInstancesOffering": { + "base": "

    This data type is used as a response element in the DescribeReservedDBInstancesOfferings action.

    ", + "refs": { + "ReservedDBInstancesOfferingList$member": null + } + }, + "ReservedDBInstancesOfferingList": { + "base": null, + "refs": { + "ReservedDBInstancesOfferingMessage$ReservedDBInstancesOfferings": "

    A list of reserved DB instance offerings.

    " + } + }, + "ReservedDBInstancesOfferingMessage": { + "base": "

    Contains the result of a successful invocation of the DescribeReservedDBInstancesOfferings action.

    ", + "refs": { + } + }, + "ReservedDBInstancesOfferingNotFoundFault": { + "base": "

    Specified offering does not exist.

    ", + "refs": { + } + }, + "ResetDBClusterParameterGroupMessage": { + "base": "

    ", + "refs": { + } + }, + "ResetDBParameterGroupMessage": { + "base": "

    ", + "refs": { + } + }, + "ResourceNotFoundFault": { + "base": "

    The specified resource ID was not found.

    ", + "refs": { + } + }, + "ResourcePendingMaintenanceActions": { + "base": "

    Describes the pending maintenance actions for a resource.

    ", + "refs": { + "ApplyPendingMaintenanceActionResult$ResourcePendingMaintenanceActions": null, + "PendingMaintenanceActions$member": null + } + }, + "RestoreDBClusterFromSnapshotMessage": { + "base": "

    ", + "refs": { + } + }, + "RestoreDBClusterFromSnapshotResult": { + "base": null, + "refs": { + } + }, + "RestoreDBClusterToPointInTimeMessage": { + "base": "

    ", + "refs": { + } + }, + "RestoreDBClusterToPointInTimeResult": { + "base": null, + "refs": { + } + }, + "RestoreDBInstanceFromDBSnapshotMessage": { + "base": "

    ", + "refs": { + } + }, + "RestoreDBInstanceFromDBSnapshotResult": { + "base": null, + "refs": { + } + }, + "RestoreDBInstanceToPointInTimeMessage": { + "base": "

    ", + "refs": { + } + }, + "RestoreDBInstanceToPointInTimeResult": { + "base": null, + "refs": { + } + }, + "RevokeDBSecurityGroupIngressMessage": { + "base": "

    ", + "refs": { + } + }, + "RevokeDBSecurityGroupIngressResult": { + "base": null, + "refs": { + } + }, + "SNSInvalidTopicFault": { + "base": "

    SNS has responded that there is a problem with the SND topic specified.

    ", + "refs": { + } + }, + "SNSNoAuthorizationFault": { + "base": "

    You do not have permission to publish to the SNS topic ARN.

    ", + "refs": { + } + }, + "SNSTopicArnNotFoundFault": { + "base": "

    The SNS topic ARN does not exist.

    ", + "refs": { + } + }, + "SharedSnapshotQuotaExceededFault": { + "base": "

    You have exceeded the maximum number of accounts that you can share a manual DB snapshot with.

    ", + "refs": { + } + }, + "SnapshotQuotaExceededFault": { + "base": "

    Request would result in user exceeding the allowed number of DB snapshots.

    ", + "refs": { + } + }, + "SourceIdsList": { + "base": null, + "refs": { + "CreateEventSubscriptionMessage$SourceIds": "

    The list of identifiers of the event sources for which events will be returned. If not specified, then all sources are included in the response. An identifier must begin with a letter and must contain only ASCII letters, digits, and hyphens; it cannot end with a hyphen or contain two consecutive hyphens.

    Constraints:

    • If SourceIds are supplied, SourceType must also be provided.

    • If the source type is a DB instance, then a DBInstanceIdentifier must be supplied.

    • If the source type is a DB security group, a DBSecurityGroupName must be supplied.

    • If the source type is a DB parameter group, a DBParameterGroupName must be supplied.

    • If the source type is a DB snapshot, a DBSnapshotIdentifier must be supplied.

    ", + "EventSubscription$SourceIdsList": "

    A list of source IDs for the RDS event notification subscription.

    " + } + }, + "SourceNotFoundFault": { + "base": "

    The requested source could not be found.

    ", + "refs": { + } + }, + "SourceType": { + "base": null, + "refs": { + "DescribeEventsMessage$SourceType": "

    The event source to retrieve events for. If no value is specified, all events are returned.

    ", + "Event$SourceType": "

    Specifies the source type for this event.

    " + } + }, + "StorageQuotaExceededFault": { + "base": "

    Request would result in user exceeding the allowed amount of storage available across all DB instances.

    ", + "refs": { + } + }, + "StorageTypeNotSupportedFault": { + "base": "

    StorageType specified cannot be associated with the DB Instance.

    ", + "refs": { + } + }, + "String": { + "base": null, + "refs": { + "AccountQuota$AccountQuotaName": "

    The name of the Amazon RDS quota for this AWS account.

    ", + "AddSourceIdentifierToSubscriptionMessage$SubscriptionName": "

    The name of the RDS event notification subscription you want to add a source identifier to.

    ", + "AddSourceIdentifierToSubscriptionMessage$SourceIdentifier": "

    The identifier of the event source to be added. An identifier must begin with a letter and must contain only ASCII letters, digits, and hyphens; it cannot end with a hyphen or contain two consecutive hyphens.

    Constraints:

    • If the source type is a DB instance, then a DBInstanceIdentifier must be supplied.

    • If the source type is a DB security group, a DBSecurityGroupName must be supplied.

    • If the source type is a DB parameter group, a DBParameterGroupName must be supplied.

    • If the source type is a DB snapshot, a DBSnapshotIdentifier must be supplied.

    ", + "AddTagsToResourceMessage$ResourceName": "

    The Amazon RDS resource the tags will be added to. This value is an Amazon Resource Name (ARN). For information about creating an ARN, see Constructing an RDS Amazon Resource Name (ARN).

    ", + "ApplyPendingMaintenanceActionMessage$ResourceIdentifier": "

    The RDS Amazon Resource Name (ARN) of the resource that the pending maintenance action applies to. For information about creating an ARN, see Constructing an RDS Amazon Resource Name (ARN).

    ", + "ApplyPendingMaintenanceActionMessage$ApplyAction": "

    The pending maintenance action to apply to this resource.

    Valid values: system-update, db-upgrade

    ", + "ApplyPendingMaintenanceActionMessage$OptInType": "

    A value that specifies the type of opt-in request, or undoes an opt-in request. An opt-in request of type immediate cannot be undone.

    Valid values:

    • immediate - Apply the maintenance action immediately.

    • next-maintenance - Apply the maintenance action during the next maintenance window for the resource.

    • undo-opt-in - Cancel any existing next-maintenance opt-in requests.

    ", + "AttributeValueList$member": null, + "AuthorizeDBSecurityGroupIngressMessage$DBSecurityGroupName": "

    The name of the DB security group to add authorization to.

    ", + "AuthorizeDBSecurityGroupIngressMessage$CIDRIP": "

    The IP range to authorize.

    ", + "AuthorizeDBSecurityGroupIngressMessage$EC2SecurityGroupName": "

    Name of the EC2 security group to authorize. For VPC DB security groups, EC2SecurityGroupId must be provided. Otherwise, EC2SecurityGroupOwnerId and either EC2SecurityGroupName or EC2SecurityGroupId must be provided.

    ", + "AuthorizeDBSecurityGroupIngressMessage$EC2SecurityGroupId": "

    Id of the EC2 security group to authorize. For VPC DB security groups, EC2SecurityGroupId must be provided. Otherwise, EC2SecurityGroupOwnerId and either EC2SecurityGroupName or EC2SecurityGroupId must be provided.

    ", + "AuthorizeDBSecurityGroupIngressMessage$EC2SecurityGroupOwnerId": "

    AWS account number of the owner of the EC2 security group specified in the EC2SecurityGroupName parameter. The AWS Access Key ID is not an acceptable value. For VPC DB security groups, EC2SecurityGroupId must be provided. Otherwise, EC2SecurityGroupOwnerId and either EC2SecurityGroupName or EC2SecurityGroupId must be provided.

    ", + "AvailabilityZone$Name": "

    The name of the availability zone.

    ", + "AvailabilityZones$member": null, + "Certificate$CertificateIdentifier": "

    The unique key that identifies a certificate.

    ", + "Certificate$CertificateType": "

    The type of the certificate.

    ", + "Certificate$Thumbprint": "

    The thumbprint of the certificate.

    ", + "CertificateMessage$Marker": "

    An optional pagination token provided by a previous DescribeCertificates request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords .

    ", + "CharacterSet$CharacterSetName": "

    The name of the character set.

    ", + "CharacterSet$CharacterSetDescription": "

    The description of the character set.

    ", + "CopyDBClusterSnapshotMessage$SourceDBClusterSnapshotIdentifier": "

    The identifier of the DB cluster snapshot to copy. This parameter is not case-sensitive.

    Constraints:

    • Must contain from 1 to 63 alphanumeric characters or hyphens.

    • First character must be a letter.

    • Cannot end with a hyphen or contain two consecutive hyphens.

    Example: my-cluster-snapshot1

    ", + "CopyDBClusterSnapshotMessage$TargetDBClusterSnapshotIdentifier": "

    The identifier of the new DB cluster snapshot to create from the source DB cluster snapshot. This parameter is not case-sensitive.

    Constraints:

    • Must contain from 1 to 63 alphanumeric characters or hyphens.

    • First character must be a letter.

    • Cannot end with a hyphen or contain two consecutive hyphens.

    Example: my-cluster-snapshot2

    ", + "CopyDBParameterGroupMessage$SourceDBParameterGroupIdentifier": "

    The identifier or ARN for the source DB parameter group. For information about creating an ARN, see Constructing an RDS Amazon Resource Name (ARN).

    Constraints:

    • Must specify a valid DB parameter group.

    • If the source DB parameter group is in the same region as the copy, specify a valid DB parameter group identifier, for example my-db-param-group, or a valid ARN.

    • If the source DB parameter group is in a different region than the copy, specify a valid DB parameter group ARN, for example arn:aws:rds:us-west-2:123456789012:pg:special-parameters.

    ", + "CopyDBParameterGroupMessage$TargetDBParameterGroupIdentifier": "

    The identifier for the copied DB parameter group.

    Constraints:

    • Cannot be null, empty, or blank

    • Must contain from 1 to 255 alphanumeric characters or hyphens

    • First character must be a letter

    • Cannot end with a hyphen or contain two consecutive hyphens

    Example: my-db-parameter-group

    ", + "CopyDBParameterGroupMessage$TargetDBParameterGroupDescription": "

    A description for the copied DB parameter group.

    ", + "CopyDBSnapshotMessage$SourceDBSnapshotIdentifier": "

    The identifier for the source DB snapshot.

    If you are copying from a shared manual DB snapshot, this must be the ARN of the shared DB snapshot.

    Constraints:

    • Must specify a valid system snapshot in the \"available\" state.

    • If the source snapshot is in the same region as the copy, specify a valid DB snapshot identifier.

    • If the source snapshot is in a different region than the copy, specify a valid DB snapshot ARN. For more information, go to Copying a DB Snapshot.

    Example: rds:mydb-2012-04-02-00-01

    Example: arn:aws:rds:rr-regn-1:123456789012:snapshot:mysql-instance1-snapshot-20130805

    ", + "CopyDBSnapshotMessage$TargetDBSnapshotIdentifier": "

    The identifier for the copied snapshot.

    Constraints:

    • Cannot be null, empty, or blank

    • Must contain from 1 to 255 alphanumeric characters or hyphens

    • First character must be a letter

    • Cannot end with a hyphen or contain two consecutive hyphens

    Example: my-db-snapshot

    ", + "CopyDBSnapshotMessage$KmsKeyId": "

    The AWS Key Management Service (AWS KMS) key identifier for an encrypted DB snapshot. The KMS key identifier is the Amazon Resource Name (ARN) or the KMS key alias for the KMS encryption key.

    If you copy an unencrypted DB snapshot and specify a value for the KmsKeyId parameter, Amazon RDS encrypts the target DB snapshot using the specified KMS encryption key.

    If you copy an encrypted DB snapshot from your AWS account, you can specify a value for KmsKeyId to encrypt the copy with a new KMS encryption key. If you don't specify a value for KmsKeyId then the copy of the DB snapshot is encrypted with the same KMS key as the source DB snapshot.

    If you copy an encrypted DB snapshot that is shared from another AWS account, then you must specify a value for KmsKeyId.

    ", + "CopyOptionGroupMessage$SourceOptionGroupIdentifier": "

    The identifier or ARN for the source option group. For information about creating an ARN, see Constructing an RDS Amazon Resource Name (ARN).

    Constraints:

    • Must specify a valid option group.

    • If the source option group is in the same region as the copy, specify a valid option group identifier, for example my-option-group, or a valid ARN.

    • If the source option group is in a different region than the copy, specify a valid option group ARN, for example arn:aws:rds:us-west-2:123456789012:og:special-options.

    ", + "CopyOptionGroupMessage$TargetOptionGroupIdentifier": "

    The identifier for the copied option group.

    Constraints:

    • Cannot be null, empty, or blank

    • Must contain from 1 to 255 alphanumeric characters or hyphens

    • First character must be a letter

    • Cannot end with a hyphen or contain two consecutive hyphens

    Example: my-option-group

    ", + "CopyOptionGroupMessage$TargetOptionGroupDescription": "

    The description for the copied option group.

    ", + "CreateDBClusterMessage$CharacterSetName": "

    A value that indicates that the DB cluster should be associated with the specified CharacterSet.

    ", + "CreateDBClusterMessage$DatabaseName": "

    The name for your database of up to 8 alpha-numeric characters. If you do not provide a name, Amazon RDS will not create a database in the DB cluster you are creating.

    ", + "CreateDBClusterMessage$DBClusterIdentifier": "

    The DB cluster identifier. This parameter is stored as a lowercase string.

    Constraints:

    • Must contain from 1 to 63 alphanumeric characters or hyphens.

    • First character must be a letter.

    • Cannot end with a hyphen or contain two consecutive hyphens.

    Example: my-cluster1

    ", + "CreateDBClusterMessage$DBClusterParameterGroupName": "

    The name of the DB cluster parameter group to associate with this DB cluster. If this argument is omitted, default.aurora5.6 for the specified engine will be used.

    Constraints:

    • Must be 1 to 255 alphanumeric characters

    • First character must be a letter

    • Cannot end with a hyphen or contain two consecutive hyphens

    ", + "CreateDBClusterMessage$DBSubnetGroupName": "

    A DB subnet group to associate with this DB cluster.

    Constraints: Must contain no more than 255 alphanumeric characters, periods, underscores, spaces, or hyphens. Must not be default.

    Example: mySubnetgroup

    ", + "CreateDBClusterMessage$Engine": "

    The name of the database engine to be used for this DB cluster.

    Valid Values: aurora

    ", + "CreateDBClusterMessage$EngineVersion": "

    The version number of the database engine to use.

    Aurora

    Example: 5.6.10a

    ", + "CreateDBClusterMessage$MasterUsername": "

    The name of the master user for the client DB cluster.

    Constraints:

    • Must be 1 to 16 alphanumeric characters.

    • First character must be a letter.

    • Cannot be a reserved word for the chosen database engine.

    ", + "CreateDBClusterMessage$MasterUserPassword": "

    The password for the master database user. This password can contain any printable ASCII character except \"/\", \"\"\", or \"@\".

    Constraints: Must contain from 8 to 41 characters.

    ", + "CreateDBClusterMessage$OptionGroupName": "

    A value that indicates that the DB cluster should be associated with the specified option group.

    Permanent options cannot be removed from an option group. The option group cannot be removed from a DB cluster once it is associated with a DB cluster.

    ", + "CreateDBClusterMessage$PreferredBackupWindow": "

    The daily time range during which automated backups are created if automated backups are enabled using the BackupRetentionPeriod parameter.

    Default: A 30-minute window selected at random from an 8-hour block of time per region. To see the time blocks available, see Adjusting the Preferred Maintenance Window in the Amazon RDS User Guide.

    Constraints:

    • Must be in the format hh24:mi-hh24:mi.

    • Times should be in Universal Coordinated Time (UTC).

    • Must not conflict with the preferred maintenance window.

    • Must be at least 30 minutes.

    ", + "CreateDBClusterMessage$PreferredMaintenanceWindow": "

    The weekly time range during which system maintenance can occur, in Universal Coordinated Time (UTC).

    Format: ddd:hh24:mi-ddd:hh24:mi

    Default: A 30-minute window selected at random from an 8-hour block of time per region, occurring on a random day of the week. To see the time blocks available, see Adjusting the Preferred Maintenance Window in the Amazon RDS User Guide.

    Valid Days: Mon, Tue, Wed, Thu, Fri, Sat, Sun

    Constraints: Minimum 30-minute window.

    ", + "CreateDBClusterMessage$ReplicationSourceIdentifier": "

    The Amazon Resource Name (ARN) of the source DB cluster if this DB cluster is created as a Read Replica.

    ", + "CreateDBClusterMessage$KmsKeyId": "

    The KMS key identifier for an encrypted DB cluster.

    The KMS key identifier is the Amazon Resource Name (ARN) for the KMS encryption key. If you are creating a DB cluster with the same AWS account that owns the KMS encryption key used to encrypt the new DB cluster, then you can use the KMS key alias instead of the ARN for the KM encryption key.

    If the StorageEncrypted parameter is true, and you do not specify a value for the KmsKeyId parameter, then Amazon RDS will use your default encryption key. AWS KMS creates the default encryption key for your AWS account. Your AWS account has a different default encryption key for each AWS region.

    ", + "CreateDBClusterParameterGroupMessage$DBClusterParameterGroupName": "

    The name of the DB cluster parameter group.

    Constraints:

    • Must be 1 to 255 alphanumeric characters

    • First character must be a letter

    • Cannot end with a hyphen or contain two consecutive hyphens

    This value is stored as a lowercase string.

    ", + "CreateDBClusterParameterGroupMessage$DBParameterGroupFamily": "

    The DB cluster parameter group family name. A DB cluster parameter group can be associated with one and only one DB cluster parameter group family, and can be applied only to a DB cluster running a database engine and engine version compatible with that DB cluster parameter group family.

    ", + "CreateDBClusterParameterGroupMessage$Description": "

    The description for the DB cluster parameter group.

    ", + "CreateDBClusterSnapshotMessage$DBClusterSnapshotIdentifier": "

    The identifier of the DB cluster snapshot. This parameter is stored as a lowercase string.

    Constraints:

    • Must contain from 1 to 63 alphanumeric characters or hyphens.

    • First character must be a letter.

    • Cannot end with a hyphen or contain two consecutive hyphens.

    Example: my-cluster1-snapshot1

    ", + "CreateDBClusterSnapshotMessage$DBClusterIdentifier": "

    The identifier of the DB cluster to create a snapshot for. This parameter is not case-sensitive.

    Constraints:

    • Must contain from 1 to 63 alphanumeric characters or hyphens.

    • First character must be a letter.

    • Cannot end with a hyphen or contain two consecutive hyphens.

    Example: my-cluster1

    ", + "CreateDBInstanceMessage$DBName": "

    The meaning of this parameter differs according to the database engine you use.

    Type: String

    MySQL

    The name of the database to create when the DB instance is created. If this parameter is not specified, no database is created in the DB instance.

    Constraints:

    • Must contain 1 to 64 alphanumeric characters

    • Cannot be a word reserved by the specified database engine

    MariaDB

    The name of the database to create when the DB instance is created. If this parameter is not specified, no database is created in the DB instance.

    Constraints:

    • Must contain 1 to 64 alphanumeric characters

    • Cannot be a word reserved by the specified database engine

    PostgreSQL

    The name of the database to create when the DB instance is created. If this parameter is not specified, the default \"postgres\" database is created in the DB instance.

    Constraints:

    • Must contain 1 to 63 alphanumeric characters

    • Must begin with a letter or an underscore. Subsequent characters can be letters, underscores, or digits (0-9).

    • Cannot be a word reserved by the specified database engine

    Oracle

    The Oracle System ID (SID) of the created DB instance.

    Default: ORCL

    Constraints:

    • Cannot be longer than 8 characters

    SQL Server

    Not applicable. Must be null.

    Amazon Aurora

    The name of the database to create when the primary instance of the DB cluster is created. If this parameter is not specified, no database is created in the DB instance.

    Constraints:

    • Must contain 1 to 64 alphanumeric characters

    • Cannot be a word reserved by the specified database engine

    ", + "CreateDBInstanceMessage$DBInstanceIdentifier": "

    The DB instance identifier. This parameter is stored as a lowercase string.

    Constraints:

    • Must contain from 1 to 63 alphanumeric characters or hyphens (1 to 15 for SQL Server).

    • First character must be a letter.

    • Cannot end with a hyphen or contain two consecutive hyphens.

    Example: mydbinstance

    ", + "CreateDBInstanceMessage$DBInstanceClass": "

    The compute and memory capacity of the DB instance.

    Valid Values: db.t1.micro | db.m1.small | db.m1.medium | db.m1.large | db.m1.xlarge | db.m2.xlarge |db.m2.2xlarge | db.m2.4xlarge | db.m3.medium | db.m3.large | db.m3.xlarge | db.m3.2xlarge | db.m4.large | db.m4.xlarge | db.m4.2xlarge | db.m4.4xlarge | db.m4.10xlarge | db.r3.large | db.r3.xlarge | db.r3.2xlarge | db.r3.4xlarge | db.r3.8xlarge | db.t2.micro | db.t2.small | db.t2.medium | db.t2.large

    ", + "CreateDBInstanceMessage$Engine": "

    The name of the database engine to be used for this instance.

    Valid Values: MySQL | mariadb | oracle-se1 | oracle-se | oracle-ee | sqlserver-ee | sqlserver-se | sqlserver-ex | sqlserver-web | postgres | aurora

    Not every database engine is available for every AWS region.

    ", + "CreateDBInstanceMessage$MasterUsername": "

    The name of master user for the client DB instance.

    MySQL

    Constraints:

    • Must be 1 to 16 alphanumeric characters.

    • First character must be a letter.

    • Cannot be a reserved word for the chosen database engine.

    MariaDB

    Constraints:

    • Must be 1 to 16 alphanumeric characters.

    • Cannot be a reserved word for the chosen database engine.

    Type: String

    Oracle

    Constraints:

    • Must be 1 to 30 alphanumeric characters.

    • First character must be a letter.

    • Cannot be a reserved word for the chosen database engine.

    SQL Server

    Constraints:

    • Must be 1 to 128 alphanumeric characters.

    • First character must be a letter.

    • Cannot be a reserved word for the chosen database engine.

    PostgreSQL

    Constraints:

    • Must be 1 to 63 alphanumeric characters.

    • First character must be a letter.

    • Cannot be a reserved word for the chosen database engine.

    ", + "CreateDBInstanceMessage$MasterUserPassword": "

    The password for the master database user. Can be any printable ASCII character except \"/\", \"\"\", or \"@\".

    Type: String

    MySQL

    Constraints: Must contain from 8 to 41 characters.

    MariaDB

    Constraints: Must contain from 8 to 41 characters.

    Oracle

    Constraints: Must contain from 8 to 30 characters.

    SQL Server

    Constraints: Must contain from 8 to 128 characters.

    PostgreSQL

    Constraints: Must contain from 8 to 128 characters.

    Amazon Aurora

    Constraints: Must contain from 8 to 41 characters.

    ", + "CreateDBInstanceMessage$AvailabilityZone": "

    The EC2 Availability Zone that the database instance will be created in. For information on regions and Availability Zones, see Regions and Availability Zones.

    Default: A random, system-chosen Availability Zone in the endpoint's region.

    Example: us-east-1d

    Constraint: The AvailabilityZone parameter cannot be specified if the MultiAZ parameter is set to true. The specified Availability Zone must be in the same region as the current endpoint.

    ", + "CreateDBInstanceMessage$DBSubnetGroupName": "

    A DB subnet group to associate with this DB instance.

    If there is no DB subnet group, then it is a non-VPC DB instance.

    ", + "CreateDBInstanceMessage$PreferredMaintenanceWindow": "

    The weekly time range during which system maintenance can occur, in Universal Coordinated Time (UTC). For more information, see DB Instance Maintenance.

    Format: ddd:hh24:mi-ddd:hh24:mi

    Default: A 30-minute window selected at random from an 8-hour block of time per region, occurring on a random day of the week. To see the time blocks available, see Adjusting the Preferred Maintenance Window in the Amazon RDS User Guide.

    Valid Days: Mon, Tue, Wed, Thu, Fri, Sat, Sun

    Constraints: Minimum 30-minute window.

    ", + "CreateDBInstanceMessage$DBParameterGroupName": "

    The name of the DB parameter group to associate with this DB instance. If this argument is omitted, the default DBParameterGroup for the specified engine will be used.

    Constraints:

    • Must be 1 to 255 alphanumeric characters

    • First character must be a letter

    • Cannot end with a hyphen or contain two consecutive hyphens

    ", + "CreateDBInstanceMessage$PreferredBackupWindow": "

    The daily time range during which automated backups are created if automated backups are enabled, using the BackupRetentionPeriod parameter. For more information, see DB Instance Backups.

    Default: A 30-minute window selected at random from an 8-hour block of time per region. To see the time blocks available, see Adjusting the Preferred Maintenance Window in the Amazon RDS User Guide.

    Constraints:

    • Must be in the format hh24:mi-hh24:mi.

    • Times should be in Universal Coordinated Time (UTC).

    • Must not conflict with the preferred maintenance window.

    • Must be at least 30 minutes.

    ", + "CreateDBInstanceMessage$EngineVersion": "

    The version number of the database engine to use.

    The following are the database engines and major and minor versions that are available with Amazon RDS. Not every database engine is available for every AWS region.

    Amazon Aurora

    • Version 5.6 (only available in AWS regions ap-northeast-1, ap-northeast-2, ap-southeast-2, eu-west-1, us-east-1, us-west-2): 5.6.10a

    MariaDB

    • Version 10.0 (available in all AWS regions): 10.0.17 | 10.0.24

    Microsoft SQL Server Enterprise Edition (sqlserver-ee)

    • Version 11.00 (available in all AWS regions): 11.00.2100.60.v1 | 11.00.5058.0.v1 | 11.00.6020.0.v1

    • Version 10.50 (available in all AWS regions): 10.50.2789.0.v1 | 10.50.6000.34.v1 | 10.50.6529.0.v1

    Microsoft SQL Server Express Edition (sqlserver-ex)

    • Version 12.00 (available in all AWS regions): 12.00.4422.0.v1

    • Version 11.00 (available in all AWS regions): 11.00.2100.60.v1 | 11.00.5058.0.v1 | 11.00.6020.0.v1

    • Version 10.50 (available in all AWS regions): 10.50.2789.0.v1 | 10.50.6000.34.v1 | 10.50.6529.0.v1

    Microsoft SQL Server Standard Edition (sqlserver-se)

    • Version 12.00 (available in all AWS regions): 12.00.4422.0.v1

    • Version 11.00 (available in all AWS regions): 11.00.2100.60.v1 | 11.00.5058.0.v1 | 11.00.6020.0.v1

    • Version 10.50 (available in all AWS regions): 10.50.2789.0.v1 | 10.50.6000.34.v1 | 10.50.6529.0.v1

    Microsoft SQL Server Web Edition (sqlserver-web)

    • Version 12.00 (available in all AWS regions): 12.00.4422.0.v1

    • Version 11.00 (available in all AWS regions): 11.00.2100.60.v1 | 11.00.5058.0.v1 | 11.00.6020.0.v1

    • Version 10.50 (available in all AWS regions): 10.50.2789.0.v1 | 10.50.6000.34.v1 | 10.50.6529.0.v1

    MySQL

    • Version 5.7 (available in all AWS regions): 5.7.10 | 5.7.11

    • Version 5.6 (available in all AWS regions except ap-northeast-2): 5.6.19a | 5.6.19b | 5.6.21 | 5.6.21b | 5.6.22

    • Version 5.6 (available in all AWS regions): 5.6.23 | 5.6.27 | 5.6.29

    • Version 5.5 (available in all AWS regions except eu-central-1, ap-northeast-2): 5.5.40 | 5.5.40a

    • Version 5.5 (available in all AWS regions except ap-northeast-2): 5.5.40b | 5.5.41

    • Version 5.5 (available in all AWS regions): 5.5.42 | 5.5.46

    • Version 5.1 (available in all AWS regions except eu-central-1, ap-northeast-2): 5.1.73a | 5.1.73b

    Oracle Database Enterprise Edition (oracle-ee)

    • Version 12.1 (available in all AWS regions except ap-northeast-2): 12.1.0.1.v1 | 12.1.0.1.v2

    • Version 12.1 (available in all AWS regions except ap-northeast-2, us-gov-west-1): 12.1.0.1.v3 | 12.1.0.1.v4

    • Version 12.1 (available in all AWS regions): 12.1.0.2.v1

    • Version 12.1 (available in all AWS regions except us-gov-west-1): 12.1.0.2.v2 | 12.1.0.2.v3

    • Version 11.2 (available in all AWS regions except eu-central-1, ap-northeast-2): 11.2.0.2.v3 | 11.2.0.2.v4 | 11.2.0.2.v5 | 11.2.0.2.v6 | 11.2.0.2.v7

    • Version 11.2 (available in all AWS regions except ap-northeast-2): 11.2.0.3.v1 | 11.2.0.3.v2 | 11.2.0.3.v3

    • Version 11.2 (available in all AWS regions except ap-northeast-2, us-gov-west-1): 11.2.0.3.v4

    • Version 11.2 (available in all AWS regions): 11.2.0.4.v1 | 11.2.0.4.v3 | 11.2.0.4.v4

    • Version 11.2 (available in all AWS regions except us-gov-west-1): 11.2.0.4.v5 | 11.2.0.4.v6 | 11.2.0.4.v7

    Oracle Database Standard Edition (oracle-se)

    • Version 12.1 (available in all AWS regions except ap-northeast-2): 12.1.0.1.v1 | 12.1.0.1.v2

    • Version 12.1 (available in all AWS regions except ap-northeast-2, us-gov-west-1): 12.1.0.1.v3 | 12.1.0.1.v4

    • Version 11.2 (available in all AWS regions except eu-central-1, ap-northeast-2): 11.2.0.2.v3 | 11.2.0.2.v4 | 11.2.0.2.v5 | 11.2.0.2.v6 | 11.2.0.2.v7

    • Version 11.2 (available in all AWS regions except ap-northeast-2): 11.2.0.3.v1 | 11.2.0.3.v2 | 11.2.0.3.v3

    • Version 11.2 (available in all AWS regions except ap-northeast-2, us-gov-west-1): 11.2.0.3.v4

    • Version 11.2 (available in all AWS regions): 11.2.0.4.v1 | 11.2.0.4.v3 | 11.2.0.4.v4

    • Version 11.2 (available in all AWS regions except us-gov-west-1): 11.2.0.4.v5 | 11.2.0.4.v6 | 11.2.0.4.v7

    Oracle Database Standard Edition One (oracle-se1)

    • Version 12.1 (available in all AWS regions except ap-northeast-2): 12.1.0.1.v1 | 12.1.0.1.v2

    • Version 12.1 (available in all AWS regions except ap-northeast-2, us-gov-west-1): 12.1.0.1.v3 | 12.1.0.1.v4

    • Version 11.2 (available in all AWS regions except eu-central-1, ap-northeast-2): 11.2.0.2.v3 | 11.2.0.2.v4 | 11.2.0.2.v5 | 11.2.0.2.v6 | 11.2.0.2.v7

    • Version 11.2 (available in all AWS regions except ap-northeast-2): 11.2.0.3.v1 | 11.2.0.3.v2 | 11.2.0.3.v3

    • Version 11.2 (available in all AWS regions except ap-northeast-2, us-gov-west-1): 11.2.0.3.v4

    • Version 11.2 (available in all AWS regions): 11.2.0.4.v1 | 11.2.0.4.v3 | 11.2.0.4.v4

    • Version 11.2 (available in all AWS regions except us-gov-west-1): 11.2.0.4.v5 | 11.2.0.4.v6 | 11.2.0.4.v7

    Oracle Database Standard Edition Two (oracle-se2)

    • Version 12.1 (available in all AWS regions except us-gov-west-1): 12.1.0.2.v2 | 12.1.0.2.v3

    PostgreSQL

    • Version 9.5 (available in all AWS regions except us-gov-west-1): 9.5.2

    • Version 9.4 (available in all AWS regions): 9.4.1 | 9.4.4 | 9.4.5

    • Version 9.4 (available in all AWS regions except us-gov-west-1): 9.4.7

    • Version 9.3 (available in all AWS regions except eu-central-1, ap-northeast-2): 9.3.1 | 9.3.2

    • Version 9.3 (available in all AWS regions except ap-northeast-2): 9.3.10 | 9.3.3 | 9.3.5 | 9.3.6 | 9.3.9

    • Version 9.3 (available in all AWS regions except ap-northeast-2, us-gov-west-1): 9.3.12

    ", + "CreateDBInstanceMessage$LicenseModel": "

    License model information for this DB instance.

    Valid values: license-included | bring-your-own-license | general-public-license

    ", + "CreateDBInstanceMessage$OptionGroupName": "

    Indicates that the DB instance should be associated with the specified option group.

    Permanent options, such as the TDE option for Oracle Advanced Security TDE, cannot be removed from an option group, and that option group cannot be removed from a DB instance once it is associated with a DB instance

    ", + "CreateDBInstanceMessage$CharacterSetName": "

    For supported engines, indicates that the DB instance should be associated with the specified CharacterSet.

    ", + "CreateDBInstanceMessage$DBClusterIdentifier": "

    The identifier of the DB cluster that the instance will belong to.

    For information on creating a DB cluster, see CreateDBCluster.

    Type: String

    ", + "CreateDBInstanceMessage$StorageType": "

    Specifies the storage type to be associated with the DB instance.

    Valid values: standard | gp2 | io1

    If you specify io1, you must also include a value for the Iops parameter.

    Default: io1 if the Iops parameter is specified; otherwise standard

    ", + "CreateDBInstanceMessage$TdeCredentialArn": "

    The ARN from the Key Store with which to associate the instance for TDE encryption.

    ", + "CreateDBInstanceMessage$TdeCredentialPassword": "

    The password for the given ARN from the Key Store in order to access the device.

    ", + "CreateDBInstanceMessage$KmsKeyId": "

    The KMS key identifier for an encrypted DB instance.

    The KMS key identifier is the Amazon Resource Name (ARN) for the KMS encryption key. If you are creating a DB instance with the same AWS account that owns the KMS encryption key used to encrypt the new DB instance, then you can use the KMS key alias instead of the ARN for the KM encryption key.

    If the StorageEncrypted parameter is true, and you do not specify a value for the KmsKeyId parameter, then Amazon RDS will use your default encryption key. AWS KMS creates the default encryption key for your AWS account. Your AWS account has a different default encryption key for each AWS region.

    ", + "CreateDBInstanceMessage$Domain": "

    Specify the Active Directory Domain to create the instance in.

    ", + "CreateDBInstanceMessage$MonitoringRoleArn": "

    The ARN for the IAM role that permits RDS to send enhanced monitoring metrics to CloudWatch Logs. For example, arn:aws:iam:123456789012:role/emaccess. For information on creating a monitoring role, go to To create an IAM role for Amazon RDS Enhanced Monitoring.

    If MonitoringInterval is set to a value other than 0, then you must supply a MonitoringRoleArn value.

    ", + "CreateDBInstanceMessage$DomainIAMRoleName": "

    Specify the name of the IAM role to be used when making API calls to the Directory Service.

    ", + "CreateDBInstanceReadReplicaMessage$DBInstanceIdentifier": "

    The DB instance identifier of the Read Replica. This identifier is the unique key that identifies a DB instance. This parameter is stored as a lowercase string.

    ", + "CreateDBInstanceReadReplicaMessage$SourceDBInstanceIdentifier": "

    The identifier of the DB instance that will act as the source for the Read Replica. Each DB instance can have up to five Read Replicas.

    Constraints:

    • Must be the identifier of an existing MySQL, MariaDB, or PostgreSQL DB instance.

    • Can specify a DB instance that is a MySQL Read Replica only if the source is running MySQL 5.6.

    • Can specify a DB instance that is a PostgreSQL Read Replica only if the source is running PostgreSQL 9.3.5.

    • The specified DB instance must have automatic backups enabled, its backup retention period must be greater than 0.

    • If the source DB instance is in the same region as the Read Replica, specify a valid DB instance identifier.

    • If the source DB instance is in a different region than the Read Replica, specify a valid DB instance ARN. For more information, go to Constructing a Amazon RDS Amazon Resource Name (ARN).

    ", + "CreateDBInstanceReadReplicaMessage$DBInstanceClass": "

    The compute and memory capacity of the Read Replica.

    Valid Values: db.m1.small | db.m1.medium | db.m1.large | db.m1.xlarge | db.m2.xlarge |db.m2.2xlarge | db.m2.4xlarge | db.m3.medium | db.m3.large | db.m3.xlarge | db.m3.2xlarge | db.m4.large | db.m4.xlarge | db.m4.2xlarge | db.m4.4xlarge | db.m4.10xlarge | db.r3.large | db.r3.xlarge | db.r3.2xlarge | db.r3.4xlarge | db.r3.8xlarge | db.t2.micro | db.t2.small | db.t2.medium | db.t2.large

    Default: Inherits from the source DB instance.

    ", + "CreateDBInstanceReadReplicaMessage$AvailabilityZone": "

    The Amazon EC2 Availability Zone that the Read Replica will be created in.

    Default: A random, system-chosen Availability Zone in the endpoint's region.

    Example: us-east-1d

    ", + "CreateDBInstanceReadReplicaMessage$OptionGroupName": "

    The option group the DB instance will be associated with. If omitted, the default option group for the engine specified will be used.

    ", + "CreateDBInstanceReadReplicaMessage$DBSubnetGroupName": "

    Specifies a DB subnet group for the DB instance. The new DB instance will be created in the VPC associated with the DB subnet group. If no DB subnet group is specified, then the new DB instance is not created in a VPC.

    Constraints:

    • Can only be specified if the source DB instance identifier specifies a DB instance in another region.

    • The specified DB subnet group must be in the same region in which the operation is running.

    • All Read Replicas in one region that are created from the same source DB instance must either:>

      • Specify DB subnet groups from the same VPC. All these Read Replicas will be created in the same VPC.

      • Not specify a DB subnet group. All these Read Replicas will be created outside of any VPC.

    Constraints: Must contain no more than 255 alphanumeric characters, periods, underscores, spaces, or hyphens. Must not be default.

    Example: mySubnetgroup

    ", + "CreateDBInstanceReadReplicaMessage$StorageType": "

    Specifies the storage type to be associated with the Read Replica.

    Valid values: standard | gp2 | io1

    If you specify io1, you must also include a value for the Iops parameter.

    Default: io1 if the Iops parameter is specified; otherwise standard

    ", + "CreateDBInstanceReadReplicaMessage$MonitoringRoleArn": "

    The ARN for the IAM role that permits RDS to send enhanced monitoring metrics to CloudWatch Logs. For example, arn:aws:iam:123456789012:role/emaccess. For information on creating a monitoring role, go to To create an IAM role for Amazon RDS Enhanced Monitoring.

    If MonitoringInterval is set to a value other than 0, then you must supply a MonitoringRoleArn value.

    ", + "CreateDBParameterGroupMessage$DBParameterGroupName": "

    The name of the DB parameter group.

    Constraints:

    • Must be 1 to 255 alphanumeric characters

    • First character must be a letter

    • Cannot end with a hyphen or contain two consecutive hyphens

    This value is stored as a lowercase string.

    ", + "CreateDBParameterGroupMessage$DBParameterGroupFamily": "

    The DB parameter group family name. A DB parameter group can be associated with one and only one DB parameter group family, and can be applied only to a DB instance running a database engine and engine version compatible with that DB parameter group family.

    ", + "CreateDBParameterGroupMessage$Description": "

    The description for the DB parameter group.

    ", + "CreateDBSecurityGroupMessage$DBSecurityGroupName": "

    The name for the DB security group. This value is stored as a lowercase string.

    Constraints:

    • Must be 1 to 255 alphanumeric characters

    • First character must be a letter

    • Cannot end with a hyphen or contain two consecutive hyphens

    • Must not be \"Default\"

    • Cannot contain spaces

    Example: mysecuritygroup

    ", + "CreateDBSecurityGroupMessage$DBSecurityGroupDescription": "

    The description for the DB security group.

    ", + "CreateDBSnapshotMessage$DBSnapshotIdentifier": "

    The identifier for the DB snapshot.

    Constraints:

    • Cannot be null, empty, or blank

    • Must contain from 1 to 255 alphanumeric characters or hyphens

    • First character must be a letter

    • Cannot end with a hyphen or contain two consecutive hyphens

    Example: my-snapshot-id

    ", + "CreateDBSnapshotMessage$DBInstanceIdentifier": "

    The DB instance identifier. This is the unique key that identifies a DB instance.

    Constraints:

    • Must contain from 1 to 63 alphanumeric characters or hyphens

    • First character must be a letter

    • Cannot end with a hyphen or contain two consecutive hyphens

    ", + "CreateDBSubnetGroupMessage$DBSubnetGroupName": "

    The name for the DB subnet group. This value is stored as a lowercase string.

    Constraints: Must contain no more than 255 alphanumeric characters. Cannot contain periods, underscores, spaces, or hyphens. Must not be default.

    Example: mySubnetgroup

    ", + "CreateDBSubnetGroupMessage$DBSubnetGroupDescription": "

    The description for the DB subnet group.

    ", + "CreateEventSubscriptionMessage$SubscriptionName": "

    The name of the subscription.

    Constraints: The name must be less than 255 characters.

    ", + "CreateEventSubscriptionMessage$SnsTopicArn": "

    The Amazon Resource Name (ARN) of the SNS topic created for event notification. The ARN is created by Amazon SNS when you create a topic and subscribe to it.

    ", + "CreateEventSubscriptionMessage$SourceType": "

    The type of source that will be generating the events. For example, if you want to be notified of events generated by a DB instance, you would set this parameter to db-instance. if this value is not specified, all events are returned.

    Valid values: db-instance | db-cluster | db-parameter-group | db-security-group | db-snapshot | db-cluster-snapshot

    ", + "CreateOptionGroupMessage$OptionGroupName": "

    Specifies the name of the option group to be created.

    Constraints:

    • Must be 1 to 255 alphanumeric characters or hyphens

    • First character must be a letter

    • Cannot end with a hyphen or contain two consecutive hyphens

    Example: myoptiongroup

    ", + "CreateOptionGroupMessage$EngineName": "

    Specifies the name of the engine that this option group should be associated with.

    ", + "CreateOptionGroupMessage$MajorEngineVersion": "

    Specifies the major version of the engine that this option group should be associated with.

    ", + "CreateOptionGroupMessage$OptionGroupDescription": "

    The description of the option group.

    ", + "DBCluster$CharacterSetName": "

    If present, specifies the name of the character set that this cluster is associated with.

    ", + "DBCluster$DatabaseName": "

    Contains the name of the initial database of this DB cluster that was provided at create time, if one was specified when the DB cluster was created. This same name is returned for the life of the DB cluster.

    ", + "DBCluster$DBClusterIdentifier": "

    Contains a user-supplied DB cluster identifier. This identifier is the unique key that identifies a DB cluster.

    ", + "DBCluster$DBClusterParameterGroup": "

    Specifies the name of the DB cluster parameter group for the DB cluster.

    ", + "DBCluster$DBSubnetGroup": "

    Specifies information on the subnet group associated with the DB cluster, including the name, description, and subnets in the subnet group.

    ", + "DBCluster$Status": "

    Specifies the current state of this DB cluster.

    ", + "DBCluster$PercentProgress": "

    Specifies the progress of the operation as a percentage.

    ", + "DBCluster$Endpoint": "

    Specifies the connection endpoint for the primary instance of the DB cluster.

    ", + "DBCluster$Engine": "

    Provides the name of the database engine to be used for this DB cluster.

    ", + "DBCluster$EngineVersion": "

    Indicates the database engine version.

    ", + "DBCluster$MasterUsername": "

    Contains the master username for the DB cluster.

    ", + "DBCluster$PreferredBackupWindow": "

    Specifies the daily time range during which automated backups are created if automated backups are enabled, as determined by the BackupRetentionPeriod.

    ", + "DBCluster$PreferredMaintenanceWindow": "

    Specifies the weekly time range during which system maintenance can occur, in Universal Coordinated Time (UTC).

    ", + "DBCluster$ReplicationSourceIdentifier": "

    Contains the identifier of the source DB cluster if this DB cluster is a Read Replica.

    ", + "DBCluster$HostedZoneId": "

    Specifies the ID that Amazon Route 53 assigns when you create a hosted zone.

    ", + "DBCluster$KmsKeyId": "

    If StorageEncrypted is true, the KMS key identifier for the encrypted DB cluster.

    ", + "DBCluster$DbClusterResourceId": "

    The region-unique, immutable identifier for the DB cluster. This identifier is found in AWS CloudTrail log entries whenever the KMS key for the DB cluster is accessed.

    ", + "DBClusterMember$DBInstanceIdentifier": "

    Specifies the instance identifier for this member of the DB cluster.

    ", + "DBClusterMember$DBClusterParameterGroupStatus": "

    Specifies the status of the DB cluster parameter group for this member of the DB cluster.

    ", + "DBClusterMessage$Marker": "

    A pagination token that can be used in a subsequent DescribeDBClusters request.

    ", + "DBClusterOptionGroupStatus$DBClusterOptionGroupName": "

    Specifies the name of the DB cluster option group.

    ", + "DBClusterOptionGroupStatus$Status": "

    Specifies the status of the DB cluster option group.

    ", + "DBClusterParameterGroup$DBClusterParameterGroupName": "

    Provides the name of the DB cluster parameter group.

    ", + "DBClusterParameterGroup$DBParameterGroupFamily": "

    Provides the name of the DB parameter group family that this DB cluster parameter group is compatible with.

    ", + "DBClusterParameterGroup$Description": "

    Provides the customer-specified description for this DB cluster parameter group.

    ", + "DBClusterParameterGroupDetails$Marker": "

    An optional pagination token provided by a previous DescribeDBClusterParameters request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords .

    ", + "DBClusterParameterGroupNameMessage$DBClusterParameterGroupName": "

    The name of the DB cluster parameter group.

    Constraints:

    • Must be 1 to 255 alphanumeric characters

    • First character must be a letter

    • Cannot end with a hyphen or contain two consecutive hyphens

    This value is stored as a lowercase string.

    ", + "DBClusterParameterGroupsMessage$Marker": "

    An optional pagination token provided by a previous DescribeDBClusterParameterGroups request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DBClusterSnapshot$DBClusterSnapshotIdentifier": "

    Specifies the identifier for the DB cluster snapshot.

    ", + "DBClusterSnapshot$DBClusterIdentifier": "

    Specifies the DB cluster identifier of the DB cluster that this DB cluster snapshot was created from.

    ", + "DBClusterSnapshot$Engine": "

    Specifies the name of the database engine.

    ", + "DBClusterSnapshot$Status": "

    Specifies the status of this DB cluster snapshot.

    ", + "DBClusterSnapshot$VpcId": "

    Provides the VPC ID associated with the DB cluster snapshot.

    ", + "DBClusterSnapshot$MasterUsername": "

    Provides the master username for the DB cluster snapshot.

    ", + "DBClusterSnapshot$EngineVersion": "

    Provides the version of the database engine for this DB cluster snapshot.

    ", + "DBClusterSnapshot$LicenseModel": "

    Provides the license model information for this DB cluster snapshot.

    ", + "DBClusterSnapshot$SnapshotType": "

    Provides the type of the DB cluster snapshot.

    ", + "DBClusterSnapshot$KmsKeyId": "

    If StorageEncrypted is true, the KMS key identifier for the encrypted DB cluster snapshot.

    ", + "DBClusterSnapshotAttribute$AttributeName": "

    The name of the manual DB cluster snapshot attribute.

    The attribute named restore refers to the list of AWS accounts that have permission to copy or restore the manual DB cluster snapshot. For more information, see the ModifyDBClusterSnapshotAttribute API action.

    ", + "DBClusterSnapshotAttributesResult$DBClusterSnapshotIdentifier": "

    The identifier of the manual DB cluster snapshot that the attributes apply to.

    ", + "DBClusterSnapshotMessage$Marker": "

    An optional pagination token provided by a previous DescribeDBClusterSnapshots request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DBEngineVersion$Engine": "

    The name of the database engine.

    ", + "DBEngineVersion$EngineVersion": "

    The version number of the database engine.

    ", + "DBEngineVersion$DBParameterGroupFamily": "

    The name of the DB parameter group family for the database engine.

    ", + "DBEngineVersion$DBEngineDescription": "

    The description of the database engine.

    ", + "DBEngineVersion$DBEngineVersionDescription": "

    The description of the database engine version.

    ", + "DBEngineVersionMessage$Marker": "

    An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DBInstance$DBInstanceIdentifier": "

    Contains a user-supplied database identifier. This identifier is the unique key that identifies a DB instance.

    ", + "DBInstance$DBInstanceClass": "

    Contains the name of the compute and memory capacity class of the DB instance.

    ", + "DBInstance$Engine": "

    Provides the name of the database engine to be used for this DB instance.

    ", + "DBInstance$DBInstanceStatus": "

    Specifies the current state of this database.

    ", + "DBInstance$MasterUsername": "

    Contains the master username for the DB instance.

    ", + "DBInstance$DBName": "

    The meaning of this parameter differs according to the database engine you use. For example, this value returns MySQL, MariaDB, or PostgreSQL information when returning values from CreateDBInstanceReadReplica since Read Replicas are only supported for these engines.

    MySQL, MariaDB, SQL Server, PostgreSQL, Amazon Aurora

    Contains the name of the initial database of this instance that was provided at create time, if one was specified when the DB instance was created. This same name is returned for the life of the DB instance.

    Type: String

    Oracle

    Contains the Oracle System ID (SID) of the created DB instance. Not shown when the returned parameters do not apply to an Oracle DB instance.

    ", + "DBInstance$PreferredBackupWindow": "

    Specifies the daily time range during which automated backups are created if automated backups are enabled, as determined by the BackupRetentionPeriod.

    ", + "DBInstance$AvailabilityZone": "

    Specifies the name of the Availability Zone the DB instance is located in.

    ", + "DBInstance$PreferredMaintenanceWindow": "

    Specifies the weekly time range during which system maintenance can occur, in Universal Coordinated Time (UTC).

    ", + "DBInstance$EngineVersion": "

    Indicates the database engine version.

    ", + "DBInstance$ReadReplicaSourceDBInstanceIdentifier": "

    Contains the identifier of the source DB instance if this DB instance is a Read Replica.

    ", + "DBInstance$LicenseModel": "

    License model information for this DB instance.

    ", + "DBInstance$CharacterSetName": "

    If present, specifies the name of the character set that this instance is associated with.

    ", + "DBInstance$SecondaryAvailabilityZone": "

    If present, specifies the name of the secondary Availability Zone for a DB instance with multi-AZ support.

    ", + "DBInstance$StorageType": "

    Specifies the storage type associated with DB instance.

    ", + "DBInstance$TdeCredentialArn": "

    The ARN from the Key Store with which the instance is associated for TDE encryption.

    ", + "DBInstance$DBClusterIdentifier": "

    If the DB instance is a member of a DB cluster, contains the name of the DB cluster that the DB instance is a member of.

    ", + "DBInstance$KmsKeyId": "

    If StorageEncrypted is true, the KMS key identifier for the encrypted DB instance.

    ", + "DBInstance$DbiResourceId": "

    The region-unique, immutable identifier for the DB instance. This identifier is found in AWS CloudTrail log entries whenever the KMS key for the DB instance is accessed.

    ", + "DBInstance$CACertificateIdentifier": "

    The identifier of the CA certificate for this DB instance.

    ", + "DBInstance$EnhancedMonitoringResourceArn": "

    The Amazon Resource Name (ARN) of the Amazon CloudWatch Logs log stream that receives the Enhanced Monitoring metrics data for the DB instance.

    ", + "DBInstance$MonitoringRoleArn": "

    The ARN for the IAM role that permits RDS to send Enhanced Monitoring metrics to CloudWatch Logs.

    ", + "DBInstanceMessage$Marker": "

    An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords .

    ", + "DBInstanceStatusInfo$StatusType": "

    This value is currently \"read replication.\"

    ", + "DBInstanceStatusInfo$Status": "

    Status of the DB instance. For a StatusType of read replica, the values can be replicating, error, stopped, or terminated.

    ", + "DBInstanceStatusInfo$Message": "

    Details of the error if there is an error for the instance. If the instance is not in an error state, this value is blank.

    ", + "DBParameterGroup$DBParameterGroupName": "

    Provides the name of the DB parameter group.

    ", + "DBParameterGroup$DBParameterGroupFamily": "

    Provides the name of the DB parameter group family that this DB parameter group is compatible with.

    ", + "DBParameterGroup$Description": "

    Provides the customer-specified description for this DB parameter group.

    ", + "DBParameterGroupDetails$Marker": "

    An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DBParameterGroupNameMessage$DBParameterGroupName": "

    Provides the name of the DB parameter group.

    ", + "DBParameterGroupStatus$DBParameterGroupName": "

    The name of the DP parameter group.

    ", + "DBParameterGroupStatus$ParameterApplyStatus": "

    The status of parameter updates.

    ", + "DBParameterGroupsMessage$Marker": "

    An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DBSecurityGroup$OwnerId": "

    Provides the AWS ID of the owner of a specific DB security group.

    ", + "DBSecurityGroup$DBSecurityGroupName": "

    Specifies the name of the DB security group.

    ", + "DBSecurityGroup$DBSecurityGroupDescription": "

    Provides the description of the DB security group.

    ", + "DBSecurityGroup$VpcId": "

    Provides the VpcId of the DB security group.

    ", + "DBSecurityGroupMembership$DBSecurityGroupName": "

    The name of the DB security group.

    ", + "DBSecurityGroupMembership$Status": "

    The status of the DB security group.

    ", + "DBSecurityGroupMessage$Marker": "

    An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DBSecurityGroupNameList$member": null, + "DBSnapshot$DBSnapshotIdentifier": "

    Specifies the identifier for the DB snapshot.

    ", + "DBSnapshot$DBInstanceIdentifier": "

    Specifies the DB instance identifier of the DB instance this DB snapshot was created from.

    ", + "DBSnapshot$Engine": "

    Specifies the name of the database engine.

    ", + "DBSnapshot$Status": "

    Specifies the status of this DB snapshot.

    ", + "DBSnapshot$AvailabilityZone": "

    Specifies the name of the Availability Zone the DB instance was located in at the time of the DB snapshot.

    ", + "DBSnapshot$VpcId": "

    Provides the VPC ID associated with the DB snapshot.

    ", + "DBSnapshot$MasterUsername": "

    Provides the master username for the DB snapshot.

    ", + "DBSnapshot$EngineVersion": "

    Specifies the version of the database engine.

    ", + "DBSnapshot$LicenseModel": "

    License model information for the restored DB instance.

    ", + "DBSnapshot$SnapshotType": "

    Provides the type of the DB snapshot.

    ", + "DBSnapshot$OptionGroupName": "

    Provides the option group name for the DB snapshot.

    ", + "DBSnapshot$SourceRegion": "

    The region that the DB snapshot was created in or copied from.

    ", + "DBSnapshot$SourceDBSnapshotIdentifier": "

    The DB snapshot Arn that the DB snapshot was copied from. It only has value in case of cross customer or cross region copy.

    ", + "DBSnapshot$StorageType": "

    Specifies the storage type associated with DB Snapshot.

    ", + "DBSnapshot$TdeCredentialArn": "

    The ARN from the Key Store with which to associate the instance for TDE encryption.

    ", + "DBSnapshot$KmsKeyId": "

    If Encrypted is true, the KMS key identifier for the encrypted DB snapshot.

    ", + "DBSnapshotAttribute$AttributeName": "

    The name of the manual DB snapshot attribute.

    The attribute named restore refers to the list of AWS accounts that have permission to copy or restore the manual DB cluster snapshot. For more information, see the ModifyDBSnapshotAttribute API action.

    ", + "DBSnapshotAttributesResult$DBSnapshotIdentifier": "

    The identifier of the manual DB snapshot that the attributes apply to.

    ", + "DBSnapshotMessage$Marker": "

    An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DBSubnetGroup$DBSubnetGroupName": "

    The name of the DB subnet group.

    ", + "DBSubnetGroup$DBSubnetGroupDescription": "

    Provides the description of the DB subnet group.

    ", + "DBSubnetGroup$VpcId": "

    Provides the VpcId of the DB subnet group.

    ", + "DBSubnetGroup$SubnetGroupStatus": "

    Provides the status of the DB subnet group.

    ", + "DBSubnetGroupMessage$Marker": "

    An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DeleteDBClusterMessage$DBClusterIdentifier": "

    The DB cluster identifier for the DB cluster to be deleted. This parameter isn't case-sensitive.

    Constraints:

    • Must contain from 1 to 63 alphanumeric characters or hyphens

    • First character must be a letter

    • Cannot end with a hyphen or contain two consecutive hyphens

    ", + "DeleteDBClusterMessage$FinalDBSnapshotIdentifier": "

    The DB cluster snapshot identifier of the new DB cluster snapshot created when SkipFinalSnapshot is set to false.

    Specifying this parameter and also setting the SkipFinalShapshot parameter to true results in an error.

    Constraints:

    • Must be 1 to 255 alphanumeric characters

    • First character must be a letter

    • Cannot end with a hyphen or contain two consecutive hyphens

    ", + "DeleteDBClusterParameterGroupMessage$DBClusterParameterGroupName": "

    The name of the DB cluster parameter group.

    Constraints:

    • Must be the name of an existing DB cluster parameter group.

    • You cannot delete a default DB cluster parameter group.

    • Cannot be associated with any DB clusters.

    ", + "DeleteDBClusterSnapshotMessage$DBClusterSnapshotIdentifier": "

    The identifier of the DB cluster snapshot to delete.

    Constraints: Must be the name of an existing DB cluster snapshot in the available state.

    ", + "DeleteDBInstanceMessage$DBInstanceIdentifier": "

    The DB instance identifier for the DB instance to be deleted. This parameter isn't case-sensitive.

    Constraints:

    • Must contain from 1 to 63 alphanumeric characters or hyphens

    • First character must be a letter

    • Cannot end with a hyphen or contain two consecutive hyphens

    ", + "DeleteDBInstanceMessage$FinalDBSnapshotIdentifier": "

    The DBSnapshotIdentifier of the new DBSnapshot created when SkipFinalSnapshot is set to false.

    Specifying this parameter and also setting the SkipFinalShapshot parameter to true results in an error.

    Constraints:

    • Must be 1 to 255 alphanumeric characters

    • First character must be a letter

    • Cannot end with a hyphen or contain two consecutive hyphens

    • Cannot be specified when deleting a Read Replica.

    ", + "DeleteDBParameterGroupMessage$DBParameterGroupName": "

    The name of the DB parameter group.

    Constraints:

    • Must be the name of an existing DB parameter group

    • You cannot delete a default DB parameter group

    • Cannot be associated with any DB instances

    ", + "DeleteDBSecurityGroupMessage$DBSecurityGroupName": "

    The name of the DB security group to delete.

    You cannot delete the default DB security group.

    Constraints:

    • Must be 1 to 255 alphanumeric characters

    • First character must be a letter

    • Cannot end with a hyphen or contain two consecutive hyphens

    • Must not be \"Default\"

    • Cannot contain spaces

    ", + "DeleteDBSnapshotMessage$DBSnapshotIdentifier": "

    The DBSnapshot identifier.

    Constraints: Must be the name of an existing DB snapshot in the available state.

    ", + "DeleteDBSubnetGroupMessage$DBSubnetGroupName": "

    The name of the database subnet group to delete.

    You cannot delete the default subnet group.

    Constraints:

    Constraints: Must contain no more than 255 alphanumeric characters, periods, underscores, spaces, or hyphens. Must not be default.

    Example: mySubnetgroup

    ", + "DeleteEventSubscriptionMessage$SubscriptionName": "

    The name of the RDS event notification subscription you want to delete.

    ", + "DeleteOptionGroupMessage$OptionGroupName": "

    The name of the option group to be deleted.

    You cannot delete default option groups.

    ", + "DescribeCertificatesMessage$CertificateIdentifier": "

    The user-supplied certificate identifier. If this parameter is specified, information for only the identified certificate is returned. This parameter isn't case-sensitive.

    Constraints:

    • Must contain from 1 to 63 alphanumeric characters or hyphens

    • First character must be a letter

    • Cannot end with a hyphen or contain two consecutive hyphens

    ", + "DescribeCertificatesMessage$Marker": "

    An optional pagination token provided by a previous DescribeCertificates request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DescribeDBClusterParameterGroupsMessage$DBClusterParameterGroupName": "

    The name of a specific DB cluster parameter group to return details for.

    Constraints:

    • Must be 1 to 255 alphanumeric characters

    • First character must be a letter

    • Cannot end with a hyphen or contain two consecutive hyphens

    ", + "DescribeDBClusterParameterGroupsMessage$Marker": "

    An optional pagination token provided by a previous DescribeDBClusterParameterGroups request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DescribeDBClusterParametersMessage$DBClusterParameterGroupName": "

    The name of a specific DB cluster parameter group to return parameter details for.

    Constraints:

    • Must be 1 to 255 alphanumeric characters

    • First character must be a letter

    • Cannot end with a hyphen or contain two consecutive hyphens

    ", + "DescribeDBClusterParametersMessage$Source": "

    A value that indicates to return only parameters for a specific source. Parameter sources can be engine, service, or customer.

    ", + "DescribeDBClusterParametersMessage$Marker": "

    An optional pagination token provided by a previous DescribeDBClusterParameters request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DescribeDBClusterSnapshotAttributesMessage$DBClusterSnapshotIdentifier": "

    The identifier for the DB cluster snapshot to describe the attributes for.

    ", + "DescribeDBClusterSnapshotsMessage$DBClusterIdentifier": "

    The ID of the DB cluster to retrieve the list of DB cluster snapshots for. This parameter cannot be used in conjunction with the DBClusterSnapshotIdentifier parameter. This parameter is not case-sensitive.

    Constraints:

    • Must contain from 1 to 63 alphanumeric characters or hyphens

    • First character must be a letter

    • Cannot end with a hyphen or contain two consecutive hyphens

    ", + "DescribeDBClusterSnapshotsMessage$DBClusterSnapshotIdentifier": "

    A specific DB cluster snapshot identifier to describe. This parameter cannot be used in conjunction with the DBClusterIdentifier parameter. This value is stored as a lowercase string.

    Constraints:

    • Must be 1 to 255 alphanumeric characters

    • First character must be a letter

    • Cannot end with a hyphen or contain two consecutive hyphens

    • If this identifier is for an automated snapshot, the SnapshotType parameter must also be specified.

    ", + "DescribeDBClusterSnapshotsMessage$SnapshotType": "

    The type of DB cluster snapshots to be returned. You can specify one of the following values:

    • automated - Return all DB cluster snapshots that have been automatically taken by Amazon RDS for my AWS account.

    • manual - Return all DB cluster snapshots that have been taken by my AWS account.

    • shared - Return all manual DB cluster snapshots that have been shared to my AWS account.

    • public - Return all DB cluster snapshots that have been marked as public.

    If you don't specify a SnapshotType value, then both automated and manual DB cluster snapshots are returned. You can include shared DB cluster snapshots with these results by setting the IncludeShared parameter to true. You can include public DB cluster snapshots with these results by setting the IncludePublic parameter to true.

    The IncludeShared and IncludePublic parameters don't apply for SnapshotType values of manual or automated. The IncludePublic parameter doesn't apply when SnapshotType is set to shared. The IncludeShared parameter doesn't apply when SnapshotType is set to public.

    ", + "DescribeDBClusterSnapshotsMessage$Marker": "

    An optional pagination token provided by a previous DescribeDBClusterSnapshots request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DescribeDBClustersMessage$DBClusterIdentifier": "

    The user-supplied DB cluster identifier. If this parameter is specified, information from only the specific DB cluster is returned. This parameter isn't case-sensitive.

    Constraints:

    • Must contain from 1 to 63 alphanumeric characters or hyphens

    • First character must be a letter

    • Cannot end with a hyphen or contain two consecutive hyphens

    ", + "DescribeDBClustersMessage$Marker": "

    An optional pagination token provided by a previous DescribeDBClusters request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DescribeDBEngineVersionsMessage$Engine": "

    The database engine to return.

    ", + "DescribeDBEngineVersionsMessage$EngineVersion": "

    The database engine version to return.

    Example: 5.1.49

    ", + "DescribeDBEngineVersionsMessage$DBParameterGroupFamily": "

    The name of a specific DB parameter group family to return details for.

    Constraints:

    • Must be 1 to 255 alphanumeric characters

    • First character must be a letter

    • Cannot end with a hyphen or contain two consecutive hyphens

    ", + "DescribeDBEngineVersionsMessage$Marker": "

    An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DescribeDBInstancesMessage$DBInstanceIdentifier": "

    The user-supplied instance identifier. If this parameter is specified, information from only the specific DB instance is returned. This parameter isn't case-sensitive.

    Constraints:

    • Must contain from 1 to 63 alphanumeric characters or hyphens

    • First character must be a letter

    • Cannot end with a hyphen or contain two consecutive hyphens

    ", + "DescribeDBInstancesMessage$Marker": "

    An optional pagination token provided by a previous DescribeDBInstances request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DescribeDBLogFilesDetails$LogFileName": "

    The name of the log file for the specified DB instance.

    ", + "DescribeDBLogFilesMessage$DBInstanceIdentifier": "

    The customer-assigned name of the DB instance that contains the log files you want to list.

    Constraints:

    • Must contain from 1 to 63 alphanumeric characters or hyphens

    • First character must be a letter

    • Cannot end with a hyphen or contain two consecutive hyphens

    ", + "DescribeDBLogFilesMessage$FilenameContains": "

    Filters the available log files for log file names that contain the specified string.

    ", + "DescribeDBLogFilesMessage$Marker": "

    The pagination token provided in the previous request. If this parameter is specified the response includes only records beyond the marker, up to MaxRecords.

    ", + "DescribeDBLogFilesResponse$Marker": "

    A pagination token that can be used in a subsequent DescribeDBLogFiles request.

    ", + "DescribeDBParameterGroupsMessage$DBParameterGroupName": "

    The name of a specific DB parameter group to return details for.

    Constraints:

    • Must be 1 to 255 alphanumeric characters

    • First character must be a letter

    • Cannot end with a hyphen or contain two consecutive hyphens

    ", + "DescribeDBParameterGroupsMessage$Marker": "

    An optional pagination token provided by a previous DescribeDBParameterGroups request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DescribeDBParametersMessage$DBParameterGroupName": "

    The name of a specific DB parameter group to return details for.

    Constraints:

    • Must be 1 to 255 alphanumeric characters

    • First character must be a letter

    • Cannot end with a hyphen or contain two consecutive hyphens

    ", + "DescribeDBParametersMessage$Source": "

    The parameter types to return.

    Default: All parameter types returned

    Valid Values: user | system | engine-default

    ", + "DescribeDBParametersMessage$Marker": "

    An optional pagination token provided by a previous DescribeDBParameters request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DescribeDBSecurityGroupsMessage$DBSecurityGroupName": "

    The name of the DB security group to return details for.

    ", + "DescribeDBSecurityGroupsMessage$Marker": "

    An optional pagination token provided by a previous DescribeDBSecurityGroups request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DescribeDBSnapshotAttributesMessage$DBSnapshotIdentifier": "

    The identifier for the DB snapshot to describe the attributes for.

    ", + "DescribeDBSnapshotsMessage$DBInstanceIdentifier": "

    The ID of the DB instance to retrieve the list of DB snapshots for. This parameter cannot be used in conjunction with DBSnapshotIdentifier. This parameter is not case-sensitive.

    Constraints:

    • Must contain from 1 to 63 alphanumeric characters or hyphens

    • First character must be a letter

    • Cannot end with a hyphen or contain two consecutive hyphens

    ", + "DescribeDBSnapshotsMessage$DBSnapshotIdentifier": "

    A specific DB snapshot identifier to describe. This parameter cannot be used in conjunction with DBInstanceIdentifier. This value is stored as a lowercase string.

    Constraints:

    • Must be 1 to 255 alphanumeric characters.

    • First character must be a letter.

    • Cannot end with a hyphen or contain two consecutive hyphens.

    • If this identifier is for an automated snapshot, the SnapshotType parameter must also be specified.

    ", + "DescribeDBSnapshotsMessage$SnapshotType": "

    The type of snapshots to be returned. You can specify one of the following values:

    • automated - Return all DB snapshots that have been automatically taken by Amazon RDS for my AWS account.

    • manual - Return all DB snapshots that have been taken by my AWS account.

    • shared - Return all manual DB snapshots that have been shared to my AWS account.

    • public - Return all DB snapshots that have been marked as public.

    If you don't specify a SnapshotType value, then both automated and manual snapshots are returned. You can include shared snapshots with these results by setting the IncludeShared parameter to true. You can include public snapshots with these results by setting the IncludePublic parameter to true.

    The IncludeShared and IncludePublic parameters don't apply for SnapshotType values of manual or automated. The IncludePublic parameter doesn't apply when SnapshotType is set to shared. The IncludeShared parameter doesn't apply when SnapshotType is set to public.

    ", + "DescribeDBSnapshotsMessage$Marker": "

    An optional pagination token provided by a previous DescribeDBSnapshots request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DescribeDBSubnetGroupsMessage$DBSubnetGroupName": "

    The name of the DB subnet group to return details for.

    ", + "DescribeDBSubnetGroupsMessage$Marker": "

    An optional pagination token provided by a previous DescribeDBSubnetGroups request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DescribeEngineDefaultClusterParametersMessage$DBParameterGroupFamily": "

    The name of the DB cluster parameter group family to return engine parameter information for.

    ", + "DescribeEngineDefaultClusterParametersMessage$Marker": "

    An optional pagination token provided by a previous DescribeEngineDefaultClusterParameters request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DescribeEngineDefaultParametersMessage$DBParameterGroupFamily": "

    The name of the DB parameter group family.

    ", + "DescribeEngineDefaultParametersMessage$Marker": "

    An optional pagination token provided by a previous DescribeEngineDefaultParameters request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DescribeEventCategoriesMessage$SourceType": "

    The type of source that will be generating the events.

    Valid values: db-instance | db-parameter-group | db-security-group | db-snapshot

    ", + "DescribeEventSubscriptionsMessage$SubscriptionName": "

    The name of the RDS event notification subscription you want to describe.

    ", + "DescribeEventSubscriptionsMessage$Marker": "

    An optional pagination token provided by a previous DescribeOrderableDBInstanceOptions request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords .

    ", + "DescribeEventsMessage$SourceIdentifier": "

    The identifier of the event source for which events will be returned. If not specified, then all sources are included in the response.

    Constraints:

    • If SourceIdentifier is supplied, SourceType must also be provided.

    • If the source type is DBInstance, then a DBInstanceIdentifier must be supplied.

    • If the source type is DBSecurityGroup, a DBSecurityGroupName must be supplied.

    • If the source type is DBParameterGroup, a DBParameterGroupName must be supplied.

    • If the source type is DBSnapshot, a DBSnapshotIdentifier must be supplied.

    • Cannot end with a hyphen or contain two consecutive hyphens.

    ", + "DescribeEventsMessage$Marker": "

    An optional pagination token provided by a previous DescribeEvents request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DescribeOptionGroupOptionsMessage$EngineName": "

    A required parameter. Options available for the given engine name will be described.

    ", + "DescribeOptionGroupOptionsMessage$MajorEngineVersion": "

    If specified, filters the results to include only options for the specified major engine version.

    ", + "DescribeOptionGroupOptionsMessage$Marker": "

    An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DescribeOptionGroupsMessage$OptionGroupName": "

    The name of the option group to describe. Cannot be supplied together with EngineName or MajorEngineVersion.

    ", + "DescribeOptionGroupsMessage$Marker": "

    An optional pagination token provided by a previous DescribeOptionGroups request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DescribeOptionGroupsMessage$EngineName": "

    Filters the list of option groups to only include groups associated with a specific database engine.

    ", + "DescribeOptionGroupsMessage$MajorEngineVersion": "

    Filters the list of option groups to only include groups associated with a specific database engine version. If specified, then EngineName must also be specified.

    ", + "DescribeOrderableDBInstanceOptionsMessage$Engine": "

    The name of the engine to retrieve DB instance options for.

    ", + "DescribeOrderableDBInstanceOptionsMessage$EngineVersion": "

    The engine version filter value. Specify this parameter to show only the available offerings matching the specified engine version.

    ", + "DescribeOrderableDBInstanceOptionsMessage$DBInstanceClass": "

    The DB instance class filter value. Specify this parameter to show only the available offerings matching the specified DB instance class.

    ", + "DescribeOrderableDBInstanceOptionsMessage$LicenseModel": "

    The license model filter value. Specify this parameter to show only the available offerings matching the specified license model.

    ", + "DescribeOrderableDBInstanceOptionsMessage$Marker": "

    An optional pagination token provided by a previous DescribeOrderableDBInstanceOptions request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords .

    ", + "DescribePendingMaintenanceActionsMessage$ResourceIdentifier": "

    The ARN of a resource to return pending maintenance actions for.

    ", + "DescribePendingMaintenanceActionsMessage$Marker": "

    An optional pagination token provided by a previous DescribePendingMaintenanceActions request. If this parameter is specified, the response includes only records beyond the marker, up to a number of records specified by MaxRecords.

    ", + "DescribeReservedDBInstancesMessage$ReservedDBInstanceId": "

    The reserved DB instance identifier filter value. Specify this parameter to show only the reservation that matches the specified reservation ID.

    ", + "DescribeReservedDBInstancesMessage$ReservedDBInstancesOfferingId": "

    The offering identifier filter value. Specify this parameter to show only purchased reservations matching the specified offering identifier.

    ", + "DescribeReservedDBInstancesMessage$DBInstanceClass": "

    The DB instance class filter value. Specify this parameter to show only those reservations matching the specified DB instances class.

    ", + "DescribeReservedDBInstancesMessage$Duration": "

    The duration filter value, specified in years or seconds. Specify this parameter to show only reservations for this duration.

    Valid Values: 1 | 3 | 31536000 | 94608000

    ", + "DescribeReservedDBInstancesMessage$ProductDescription": "

    The product description filter value. Specify this parameter to show only those reservations matching the specified product description.

    ", + "DescribeReservedDBInstancesMessage$OfferingType": "

    The offering type filter value. Specify this parameter to show only the available offerings matching the specified offering type.

    Valid Values: \"Partial Upfront\" | \"All Upfront\" | \"No Upfront\"

    ", + "DescribeReservedDBInstancesMessage$Marker": "

    An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DescribeReservedDBInstancesOfferingsMessage$ReservedDBInstancesOfferingId": "

    The offering identifier filter value. Specify this parameter to show only the available offering that matches the specified reservation identifier.

    Example: 438012d3-4052-4cc7-b2e3-8d3372e0e706

    ", + "DescribeReservedDBInstancesOfferingsMessage$DBInstanceClass": "

    The DB instance class filter value. Specify this parameter to show only the available offerings matching the specified DB instance class.

    ", + "DescribeReservedDBInstancesOfferingsMessage$Duration": "

    Duration filter value, specified in years or seconds. Specify this parameter to show only reservations for this duration.

    Valid Values: 1 | 3 | 31536000 | 94608000

    ", + "DescribeReservedDBInstancesOfferingsMessage$ProductDescription": "

    Product description filter value. Specify this parameter to show only the available offerings matching the specified product description.

    ", + "DescribeReservedDBInstancesOfferingsMessage$OfferingType": "

    The offering type filter value. Specify this parameter to show only the available offerings matching the specified offering type.

    Valid Values: \"Partial Upfront\" | \"All Upfront\" | \"No Upfront\"

    ", + "DescribeReservedDBInstancesOfferingsMessage$Marker": "

    An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DomainMembership$Domain": "

    The identifier of the Active Directory Domain.

    ", + "DomainMembership$Status": "

    The status of the DB instance's Active Directory Domain membership, such as joined, pending-join, failed etc).

    ", + "DomainMembership$FQDN": "

    The fully qualified domain name of the Active Directory Domain.

    ", + "DomainMembership$IAMRoleName": "

    The name of the IAM role to be used when making API calls to the Directory Service.

    ", + "DownloadDBLogFilePortionDetails$LogFileData": "

    Entries from the specified log file.

    ", + "DownloadDBLogFilePortionDetails$Marker": "

    A pagination token that can be used in a subsequent DownloadDBLogFilePortion request.

    ", + "DownloadDBLogFilePortionMessage$DBInstanceIdentifier": "

    The customer-assigned name of the DB instance that contains the log files you want to list.

    Constraints:

    • Must contain from 1 to 63 alphanumeric characters or hyphens

    • First character must be a letter

    • Cannot end with a hyphen or contain two consecutive hyphens

    ", + "DownloadDBLogFilePortionMessage$LogFileName": "

    The name of the log file to be downloaded.

    ", + "DownloadDBLogFilePortionMessage$Marker": "

    The pagination token provided in the previous request or \"0\". If the Marker parameter is specified the response includes only records beyond the marker until the end of the file or up to NumberOfLines.

    ", + "EC2SecurityGroup$Status": "

    Provides the status of the EC2 security group. Status can be \"authorizing\", \"authorized\", \"revoking\", and \"revoked\".

    ", + "EC2SecurityGroup$EC2SecurityGroupName": "

    Specifies the name of the EC2 security group.

    ", + "EC2SecurityGroup$EC2SecurityGroupId": "

    Specifies the id of the EC2 security group.

    ", + "EC2SecurityGroup$EC2SecurityGroupOwnerId": "

    Specifies the AWS ID of the owner of the EC2 security group specified in the EC2SecurityGroupName field.

    ", + "Endpoint$Address": "

    Specifies the DNS address of the DB instance.

    ", + "Endpoint$HostedZoneId": "

    Specifies the ID that Amazon Route 53 assigns when you create a hosted zone.

    ", + "EngineDefaults$DBParameterGroupFamily": "

    Specifies the name of the DB parameter group family that the engine default parameters apply to.

    ", + "EngineDefaults$Marker": "

    An optional pagination token provided by a previous EngineDefaults request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords .

    ", + "Event$SourceIdentifier": "

    Provides the identifier for the source of the event.

    ", + "Event$Message": "

    Provides the text of this event.

    ", + "EventCategoriesList$member": null, + "EventCategoriesMap$SourceType": "

    The source type that the returned categories belong to

    ", + "EventSubscription$CustomerAwsId": "

    The AWS customer account associated with the RDS event notification subscription.

    ", + "EventSubscription$CustSubscriptionId": "

    The RDS event notification subscription Id.

    ", + "EventSubscription$SnsTopicArn": "

    The topic ARN of the RDS event notification subscription.

    ", + "EventSubscription$Status": "

    The status of the RDS event notification subscription.

    Constraints:

    Can be one of the following: creating | modifying | deleting | active | no-permission | topic-not-exist

    The status \"no-permission\" indicates that RDS no longer has permission to post to the SNS topic. The status \"topic-not-exist\" indicates that the topic was deleted after the subscription was created.

    ", + "EventSubscription$SubscriptionCreationTime": "

    The time the RDS event notification subscription was created.

    ", + "EventSubscription$SourceType": "

    The source type for the RDS event notification subscription.

    ", + "EventSubscriptionsMessage$Marker": "

    An optional pagination token provided by a previous DescribeOrderableDBInstanceOptions request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "EventsMessage$Marker": "

    An optional pagination token provided by a previous Events request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords .

    ", + "FailoverDBClusterMessage$DBClusterIdentifier": "

    A DB cluster identifier to force a failover for. This parameter is not case-sensitive.

    Constraints:

    • Must contain from 1 to 63 alphanumeric characters or hyphens

    • First character must be a letter

    • Cannot end with a hyphen or contain two consecutive hyphens

    ", + "Filter$Name": "

    This parameter is not currently supported.

    ", + "FilterValueList$member": null, + "IPRange$Status": "

    Specifies the status of the IP range. Status can be \"authorizing\", \"authorized\", \"revoking\", and \"revoked\".

    ", + "IPRange$CIDRIP": "

    Specifies the IP range.

    ", + "KeyList$member": null, + "ListTagsForResourceMessage$ResourceName": "

    The Amazon RDS resource with tags to be listed. This value is an Amazon Resource Name (ARN). For information about creating an ARN, see Constructing an RDS Amazon Resource Name (ARN).

    ", + "ModifyDBClusterMessage$DBClusterIdentifier": "

    The DB cluster identifier for the cluster being modified. This parameter is not case-sensitive.

    Constraints:

    • Must be the identifier for an existing DB cluster.

    • Must contain from 1 to 63 alphanumeric characters or hyphens.

    • First character must be a letter.

    • Cannot end with a hyphen or contain two consecutive hyphens.

    ", + "ModifyDBClusterMessage$NewDBClusterIdentifier": "

    The new DB cluster identifier for the DB cluster when renaming a DB cluster. This value is stored as a lowercase string.

    Constraints:

    • Must contain from 1 to 63 alphanumeric characters or hyphens

    • First character must be a letter

    • Cannot end with a hyphen or contain two consecutive hyphens

    Example: my-cluster2

    ", + "ModifyDBClusterMessage$DBClusterParameterGroupName": "

    The name of the DB cluster parameter group to use for the DB cluster.

    ", + "ModifyDBClusterMessage$MasterUserPassword": "

    The new password for the master database user. This password can contain any printable ASCII character except \"/\", \"\"\", or \"@\".

    Constraints: Must contain from 8 to 41 characters.

    ", + "ModifyDBClusterMessage$OptionGroupName": "

    A value that indicates that the DB cluster should be associated with the specified option group. Changing this parameter does not result in an outage except in the following case, and the change is applied during the next maintenance window unless the ApplyImmediately parameter is set to true for this request. If the parameter change results in an option group that enables OEM, this change can cause a brief (sub-second) period during which new connections are rejected but existing connections are not interrupted.

    Permanent options cannot be removed from an option group. The option group cannot be removed from a DB cluster once it is associated with a DB cluster.

    ", + "ModifyDBClusterMessage$PreferredBackupWindow": "

    The daily time range during which automated backups are created if automated backups are enabled, using the BackupRetentionPeriod parameter.

    Default: A 30-minute window selected at random from an 8-hour block of time per region. To see the time blocks available, see Adjusting the Preferred Maintenance Window in the Amazon RDS User Guide.

    Constraints:

    • Must be in the format hh24:mi-hh24:mi.

    • Times should be in Universal Coordinated Time (UTC).

    • Must not conflict with the preferred maintenance window.

    • Must be at least 30 minutes.

    ", + "ModifyDBClusterMessage$PreferredMaintenanceWindow": "

    The weekly time range during which system maintenance can occur, in Universal Coordinated Time (UTC).

    Format: ddd:hh24:mi-ddd:hh24:mi

    Default: A 30-minute window selected at random from an 8-hour block of time per region, occurring on a random day of the week. To see the time blocks available, see Adjusting the Preferred Maintenance Window in the Amazon RDS User Guide.

    Valid Days: Mon, Tue, Wed, Thu, Fri, Sat, Sun

    Constraints: Minimum 30-minute window.

    ", + "ModifyDBClusterParameterGroupMessage$DBClusterParameterGroupName": "

    The name of the DB cluster parameter group to modify.

    ", + "ModifyDBClusterSnapshotAttributeMessage$DBClusterSnapshotIdentifier": "

    The identifier for the DB cluster snapshot to modify the attributes for.

    ", + "ModifyDBClusterSnapshotAttributeMessage$AttributeName": "

    The name of the DB cluster snapshot attribute to modify.

    To manage authorization for other AWS accounts to copy or restore a manual DB cluster snapshot, set this value to restore.

    ", + "ModifyDBInstanceMessage$DBInstanceIdentifier": "

    The DB instance identifier. This value is stored as a lowercase string.

    Constraints:

    • Must be the identifier for an existing DB instance

    • Must contain from 1 to 63 alphanumeric characters or hyphens

    • First character must be a letter

    • Cannot end with a hyphen or contain two consecutive hyphens

    ", + "ModifyDBInstanceMessage$DBInstanceClass": "

    The new compute and memory capacity of the DB instance. To determine the instance classes that are available for a particular DB engine, use the DescribeOrderableDBInstanceOptions action.

    Passing a value for this setting causes an outage during the change and is applied during the next maintenance window, unless ApplyImmediately is specified as true for this request.

    Default: Uses existing setting

    Valid Values: db.t1.micro | db.m1.small | db.m1.medium | db.m1.large | db.m1.xlarge | db.m2.xlarge | db.m2.2xlarge | db.m2.4xlarge | db.m3.medium | db.m3.large | db.m3.xlarge | db.m3.2xlarge | db.m4.large | db.m4.xlarge | db.m4.2xlarge | db.m4.4xlarge | db.m4.10xlarge | db.r3.large | db.r3.xlarge | db.r3.2xlarge | db.r3.4xlarge | db.r3.8xlarge | db.t2.micro | db.t2.small | db.t2.medium | db.t2.large

    ", + "ModifyDBInstanceMessage$MasterUserPassword": "

    The new password for the DB instance master user. Can be any printable ASCII character except \"/\", \"\"\", or \"@\".

    Changing this parameter does not result in an outage and the change is asynchronously applied as soon as possible. Between the time of the request and the completion of the request, the MasterUserPassword element exists in the PendingModifiedValues element of the operation response.

    Default: Uses existing setting

    Constraints: Must be 8 to 41 alphanumeric characters (MySQL, MariaDB, and Amazon Aurora), 8 to 30 alphanumeric characters (Oracle), or 8 to 128 alphanumeric characters (SQL Server).

    Amazon RDS API actions never return the password, so this action provides a way to regain access to a primary instance user if the password is lost. This includes restoring privileges that might have been accidentally revoked.

    ", + "ModifyDBInstanceMessage$DBParameterGroupName": "

    The name of the DB parameter group to apply to the DB instance. Changing this setting does not result in an outage. The parameter group name itself is changed immediately, but the actual parameter changes are not applied until you reboot the instance without failover. The db instance will NOT be rebooted automatically and the parameter changes will NOT be applied during the next maintenance window.

    Default: Uses existing setting

    Constraints: The DB parameter group must be in the same DB parameter group family as this DB instance.

    ", + "ModifyDBInstanceMessage$PreferredBackupWindow": "

    The daily time range during which automated backups are created if automated backups are enabled, as determined by the BackupRetentionPeriod parameter. Changing this parameter does not result in an outage and the change is asynchronously applied as soon as possible.

    Constraints:

    • Must be in the format hh24:mi-hh24:mi

    • Times should be in Universal Time Coordinated (UTC)

    • Must not conflict with the preferred maintenance window

    • Must be at least 30 minutes

    ", + "ModifyDBInstanceMessage$PreferredMaintenanceWindow": "

    The weekly time range (in UTC) during which system maintenance can occur, which might result in an outage. Changing this parameter does not result in an outage, except in the following situation, and the change is asynchronously applied as soon as possible. If there are pending actions that cause a reboot, and the maintenance window is changed to include the current time, then changing this parameter will cause a reboot of the DB instance. If moving this window to the current time, there must be at least 30 minutes between the current time and end of the window to ensure pending changes are applied.

    Default: Uses existing setting

    Format: ddd:hh24:mi-ddd:hh24:mi

    Valid Days: Mon | Tue | Wed | Thu | Fri | Sat | Sun

    Constraints: Must be at least 30 minutes

    ", + "ModifyDBInstanceMessage$EngineVersion": "

    The version number of the database engine to upgrade to. Changing this parameter results in an outage and the change is applied during the next maintenance window unless the ApplyImmediately parameter is set to true for this request.

    For major version upgrades, if a non-default DB parameter group is currently in use, a new DB parameter group in the DB parameter group family for the new engine version must be specified. The new DB parameter group can be the default for that DB parameter group family.

    For a list of valid engine versions, see CreateDBInstance.

    ", + "ModifyDBInstanceMessage$OptionGroupName": "

    Indicates that the DB instance should be associated with the specified option group. Changing this parameter does not result in an outage except in the following case and the change is applied during the next maintenance window unless the ApplyImmediately parameter is set to true for this request. If the parameter change results in an option group that enables OEM, this change can cause a brief (sub-second) period during which new connections are rejected but existing connections are not interrupted.

    Permanent options, such as the TDE option for Oracle Advanced Security TDE, cannot be removed from an option group, and that option group cannot be removed from a DB instance once it is associated with a DB instance

    ", + "ModifyDBInstanceMessage$NewDBInstanceIdentifier": "

    The new DB instance identifier for the DB instance when renaming a DB instance. When you change the DB instance identifier, an instance reboot will occur immediately if you set Apply Immediately to true, or will occur during the next maintenance window if Apply Immediately to false. This value is stored as a lowercase string.

    Constraints:

    • Must contain from 1 to 63 alphanumeric characters or hyphens

    • First character must be a letter

    • Cannot end with a hyphen or contain two consecutive hyphens

    ", + "ModifyDBInstanceMessage$StorageType": "

    Specifies the storage type to be associated with the DB instance.

    Valid values: standard | gp2 | io1

    If you specify io1, you must also include a value for the Iops parameter.

    Default: io1 if the Iops parameter is specified; otherwise standard

    ", + "ModifyDBInstanceMessage$TdeCredentialArn": "

    The ARN from the Key Store with which to associate the instance for TDE encryption.

    ", + "ModifyDBInstanceMessage$TdeCredentialPassword": "

    The password for the given ARN from the Key Store in order to access the device.

    ", + "ModifyDBInstanceMessage$CACertificateIdentifier": "

    Indicates the certificate that needs to be associated with the instance.

    ", + "ModifyDBInstanceMessage$Domain": "

    Specify the Active Directory Domain to move the instance to.

    The specified Active Directory Domain must be created prior to this operation. Currently only a SQL Server instance can be created in a Active Directory Domain.

    ", + "ModifyDBInstanceMessage$MonitoringRoleArn": "

    The ARN for the IAM role that permits RDS to send enhanced monitoring metrics to CloudWatch Logs. For example, arn:aws:iam:123456789012:role/emaccess. For information on creating a monitoring role, go to To create an IAM role for Amazon RDS Enhanced Monitoring.

    If MonitoringInterval is set to a value other than 0, then you must supply a MonitoringRoleArn value.

    ", + "ModifyDBInstanceMessage$DomainIAMRoleName": "

    Specify the name of the IAM role to be used when making API calls to the Directory Service.

    ", + "ModifyDBParameterGroupMessage$DBParameterGroupName": "

    The name of the DB parameter group.

    Constraints:

    • Must be the name of an existing DB parameter group

    • Must be 1 to 255 alphanumeric characters

    • First character must be a letter

    • Cannot end with a hyphen or contain two consecutive hyphens

    ", + "ModifyDBSnapshotAttributeMessage$DBSnapshotIdentifier": "

    The identifier for the DB snapshot to modify the attributes for.

    ", + "ModifyDBSnapshotAttributeMessage$AttributeName": "

    The name of the DB snapshot attribute to modify.

    To manage authorization for other AWS accounts to copy or restore a manual DB snapshot, set this value to restore.

    ", + "ModifyDBSubnetGroupMessage$DBSubnetGroupName": "

    The name for the DB subnet group. This value is stored as a lowercase string.

    Constraints: Must contain no more than 255 alphanumeric characters, periods, underscores, spaces, or hyphens. Must not be default.

    Example: mySubnetgroup

    ", + "ModifyDBSubnetGroupMessage$DBSubnetGroupDescription": "

    The description for the DB subnet group.

    ", + "ModifyEventSubscriptionMessage$SubscriptionName": "

    The name of the RDS event notification subscription.

    ", + "ModifyEventSubscriptionMessage$SnsTopicArn": "

    The Amazon Resource Name (ARN) of the SNS topic created for event notification. The ARN is created by Amazon SNS when you create a topic and subscribe to it.

    ", + "ModifyEventSubscriptionMessage$SourceType": "

    The type of source that will be generating the events. For example, if you want to be notified of events generated by a DB instance, you would set this parameter to db-instance. if this value is not specified, all events are returned.

    Valid values: db-instance | db-parameter-group | db-security-group | db-snapshot

    ", + "ModifyOptionGroupMessage$OptionGroupName": "

    The name of the option group to be modified.

    Permanent options, such as the TDE option for Oracle Advanced Security TDE, cannot be removed from an option group, and that option group cannot be removed from a DB instance once it is associated with a DB instance

    ", + "Option$OptionName": "

    The name of the option.

    ", + "Option$OptionDescription": "

    The description of the option.

    ", + "OptionConfiguration$OptionName": "

    The configuration of options to include in a group.

    ", + "OptionGroup$OptionGroupName": "

    Specifies the name of the option group.

    ", + "OptionGroup$OptionGroupDescription": "

    Provides a description of the option group.

    ", + "OptionGroup$EngineName": "

    Indicates the name of the engine that this option group can be applied to.

    ", + "OptionGroup$MajorEngineVersion": "

    Indicates the major engine version associated with this option group.

    ", + "OptionGroup$VpcId": "

    If AllowsVpcAndNonVpcInstanceMemberships is false, this field is blank. If AllowsVpcAndNonVpcInstanceMemberships is true and this field is blank, then this option group can be applied to both VPC and non-VPC instances. If this field contains a value, then this option group can only be applied to instances that are in the VPC indicated by this field.

    ", + "OptionGroupMembership$OptionGroupName": "

    The name of the option group that the instance belongs to.

    ", + "OptionGroupMembership$Status": "

    The status of the DB instance's option group membership. Valid values are: in-sync, pending-apply, pending-removal, pending-maintenance-apply, pending-maintenance-removal, applying, removing, and failed.

    ", + "OptionGroupOption$Name": "

    The name of the option.

    ", + "OptionGroupOption$Description": "

    The description of the option.

    ", + "OptionGroupOption$EngineName": "

    The name of the engine that this option can be applied to.

    ", + "OptionGroupOption$MajorEngineVersion": "

    Indicates the major engine version that the option is available for.

    ", + "OptionGroupOption$MinimumRequiredMinorEngineVersion": "

    The minimum required engine version for the option to be applied.

    ", + "OptionGroupOptionSetting$SettingName": "

    The name of the option group option.

    ", + "OptionGroupOptionSetting$SettingDescription": "

    The description of the option group option.

    ", + "OptionGroupOptionSetting$DefaultValue": "

    The default value for the option group option.

    ", + "OptionGroupOptionSetting$ApplyType": "

    The DB engine specific parameter type for the option group option.

    ", + "OptionGroupOptionSetting$AllowedValues": "

    Indicates the acceptable values for the option group option.

    ", + "OptionGroupOptionsMessage$Marker": "

    An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "OptionGroups$Marker": "

    An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "OptionNamesList$member": null, + "OptionSetting$Name": "

    The name of the option that has settings that you can set.

    ", + "OptionSetting$Value": "

    The current value of the option setting.

    ", + "OptionSetting$DefaultValue": "

    The default value of the option setting.

    ", + "OptionSetting$Description": "

    The description of the option setting.

    ", + "OptionSetting$ApplyType": "

    The DB engine specific parameter type.

    ", + "OptionSetting$DataType": "

    The data type of the option setting.

    ", + "OptionSetting$AllowedValues": "

    The allowed values of the option setting.

    ", + "OptionsDependedOn$member": null, + "OrderableDBInstanceOption$Engine": "

    The engine type of the orderable DB instance.

    ", + "OrderableDBInstanceOption$EngineVersion": "

    The engine version of the orderable DB instance.

    ", + "OrderableDBInstanceOption$DBInstanceClass": "

    The DB instance class for the orderable DB instance.

    ", + "OrderableDBInstanceOption$LicenseModel": "

    The license model for the orderable DB instance.

    ", + "OrderableDBInstanceOption$StorageType": "

    Indicates the storage type for this orderable DB instance.

    ", + "OrderableDBInstanceOptionsMessage$Marker": "

    An optional pagination token provided by a previous OrderableDBInstanceOptions request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords .

    ", + "Parameter$ParameterName": "

    Specifies the name of the parameter.

    ", + "Parameter$ParameterValue": "

    Specifies the value of the parameter.

    ", + "Parameter$Description": "

    Provides a description of the parameter.

    ", + "Parameter$Source": "

    Indicates the source of the parameter value.

    ", + "Parameter$ApplyType": "

    Specifies the engine specific parameters type.

    ", + "Parameter$DataType": "

    Specifies the valid data type for the parameter.

    ", + "Parameter$AllowedValues": "

    Specifies the valid range of values for the parameter.

    ", + "Parameter$MinimumEngineVersion": "

    The earliest engine version to which the parameter can apply.

    ", + "PendingMaintenanceAction$Action": "

    The type of pending maintenance action that is available for the resource.

    ", + "PendingMaintenanceAction$OptInStatus": "

    Indicates the type of opt-in request that has been received for the resource.

    ", + "PendingMaintenanceAction$Description": "

    A description providing more detail about the maintenance action.

    ", + "PendingMaintenanceActionsMessage$Marker": "

    An optional pagination token provided by a previous DescribePendingMaintenanceActions request. If this parameter is specified, the response includes only records beyond the marker, up to a number of records specified by MaxRecords.

    ", + "PendingModifiedValues$DBInstanceClass": "

    Contains the new DBInstanceClass for the DB instance that will be applied or is in progress.

    ", + "PendingModifiedValues$MasterUserPassword": "

    Contains the pending or in-progress change of the master credentials for the DB instance.

    ", + "PendingModifiedValues$EngineVersion": "

    Indicates the database engine version.

    ", + "PendingModifiedValues$DBInstanceIdentifier": "

    Contains the new DBInstanceIdentifier for the DB instance that will be applied or is in progress.

    ", + "PendingModifiedValues$StorageType": "

    Specifies the storage type to be associated with the DB instance.

    ", + "PendingModifiedValues$CACertificateIdentifier": "

    Specifies the identifier of the CA certificate for the DB instance.

    ", + "PromoteReadReplicaDBClusterMessage$DBClusterIdentifier": "

    The identifier of the DB cluster Read Replica to promote. This parameter is not case-sensitive.

    Constraints:

    • Must contain from 1 to 63 alphanumeric characters or hyphens.

    • First character must be a letter.

    • Cannot end with a hyphen or contain two consecutive hyphens.

    Example: my-cluster-replica1

    ", + "PromoteReadReplicaMessage$DBInstanceIdentifier": "

    The DB instance identifier. This value is stored as a lowercase string.

    Constraints:

    • Must be the identifier for an existing Read Replica DB instance

    • Must contain from 1 to 63 alphanumeric characters or hyphens

    • First character must be a letter

    • Cannot end with a hyphen or contain two consecutive hyphens

    Example: mydbinstance

    ", + "PromoteReadReplicaMessage$PreferredBackupWindow": "

    The daily time range during which automated backups are created if automated backups are enabled, using the BackupRetentionPeriod parameter.

    Default: A 30-minute window selected at random from an 8-hour block of time per region. To see the time blocks available, see Adjusting the Preferred Maintenance Window in the Amazon RDS User Guide.

    Constraints:

    • Must be in the format hh24:mi-hh24:mi.

    • Times should be in Universal Coordinated Time (UTC).

    • Must not conflict with the preferred maintenance window.

    • Must be at least 30 minutes.

    ", + "PurchaseReservedDBInstancesOfferingMessage$ReservedDBInstancesOfferingId": "

    The ID of the Reserved DB instance offering to purchase.

    Example: 438012d3-4052-4cc7-b2e3-8d3372e0e706

    ", + "PurchaseReservedDBInstancesOfferingMessage$ReservedDBInstanceId": "

    Customer-specified identifier to track this reservation.

    Example: myreservationID

    ", + "ReadReplicaDBInstanceIdentifierList$member": null, + "ReadReplicaIdentifierList$member": null, + "RebootDBInstanceMessage$DBInstanceIdentifier": "

    The DB instance identifier. This parameter is stored as a lowercase string.

    Constraints:

    • Must contain from 1 to 63 alphanumeric characters or hyphens

    • First character must be a letter

    • Cannot end with a hyphen or contain two consecutive hyphens

    ", + "RecurringCharge$RecurringChargeFrequency": "

    The frequency of the recurring charge.

    ", + "RemoveSourceIdentifierFromSubscriptionMessage$SubscriptionName": "

    The name of the RDS event notification subscription you want to remove a source identifier from.

    ", + "RemoveSourceIdentifierFromSubscriptionMessage$SourceIdentifier": "

    The source identifier to be removed from the subscription, such as the DB instance identifier for a DB instance or the name of a security group.

    ", + "RemoveTagsFromResourceMessage$ResourceName": "

    The Amazon RDS resource the tags will be removed from. This value is an Amazon Resource Name (ARN). For information about creating an ARN, see Constructing an RDS Amazon Resource Name (ARN).

    ", + "ReservedDBInstance$ReservedDBInstanceId": "

    The unique identifier for the reservation.

    ", + "ReservedDBInstance$ReservedDBInstancesOfferingId": "

    The offering identifier.

    ", + "ReservedDBInstance$DBInstanceClass": "

    The DB instance class for the reserved DB instance.

    ", + "ReservedDBInstance$CurrencyCode": "

    The currency code for the reserved DB instance.

    ", + "ReservedDBInstance$ProductDescription": "

    The description of the reserved DB instance.

    ", + "ReservedDBInstance$OfferingType": "

    The offering type of this reserved DB instance.

    ", + "ReservedDBInstance$State": "

    The state of the reserved DB instance.

    ", + "ReservedDBInstanceMessage$Marker": "

    An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "ReservedDBInstancesOffering$ReservedDBInstancesOfferingId": "

    The offering identifier.

    ", + "ReservedDBInstancesOffering$DBInstanceClass": "

    The DB instance class for the reserved DB instance.

    ", + "ReservedDBInstancesOffering$CurrencyCode": "

    The currency code for the reserved DB instance offering.

    ", + "ReservedDBInstancesOffering$ProductDescription": "

    The database engine used by the offering.

    ", + "ReservedDBInstancesOffering$OfferingType": "

    The offering type.

    ", + "ReservedDBInstancesOfferingMessage$Marker": "

    An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "ResetDBClusterParameterGroupMessage$DBClusterParameterGroupName": "

    The name of the DB cluster parameter group to reset.

    ", + "ResetDBParameterGroupMessage$DBParameterGroupName": "

    The name of the DB parameter group.

    Constraints:

    • Must be 1 to 255 alphanumeric characters

    • First character must be a letter

    • Cannot end with a hyphen or contain two consecutive hyphens

    ", + "ResourcePendingMaintenanceActions$ResourceIdentifier": "

    The ARN of the resource that has pending maintenance actions.

    ", + "RestoreDBClusterFromSnapshotMessage$DBClusterIdentifier": "

    The name of the DB cluster to create from the DB cluster snapshot. This parameter isn't case-sensitive.

    Constraints:

    • Must contain from 1 to 255 alphanumeric characters or hyphens

    • First character must be a letter

    • Cannot end with a hyphen or contain two consecutive hyphens

    Example: my-snapshot-id

    ", + "RestoreDBClusterFromSnapshotMessage$SnapshotIdentifier": "

    The identifier for the DB cluster snapshot to restore from.

    Constraints:

    • Must contain from 1 to 63 alphanumeric characters or hyphens

    • First character must be a letter

    • Cannot end with a hyphen or contain two consecutive hyphens

    ", + "RestoreDBClusterFromSnapshotMessage$Engine": "

    The database engine to use for the new DB cluster.

    Default: The same as source

    Constraint: Must be compatible with the engine of the source

    ", + "RestoreDBClusterFromSnapshotMessage$EngineVersion": "

    The version of the database engine to use for the new DB cluster.

    ", + "RestoreDBClusterFromSnapshotMessage$DBSubnetGroupName": "

    The name of the DB subnet group to use for the new DB cluster.

    Constraints: Must contain no more than 255 alphanumeric characters, periods, underscores, spaces, or hyphens. Must not be default.

    Example: mySubnetgroup

    ", + "RestoreDBClusterFromSnapshotMessage$DatabaseName": "

    The database name for the restored DB cluster.

    ", + "RestoreDBClusterFromSnapshotMessage$OptionGroupName": "

    The name of the option group to use for the restored DB cluster.

    ", + "RestoreDBClusterFromSnapshotMessage$KmsKeyId": "

    The KMS key identifier to use when restoring an encrypted DB cluster from an encrypted DB cluster snapshot.

    The KMS key identifier is the Amazon Resource Name (ARN) for the KMS encryption key. If you are restoring a DB cluster with the same AWS account that owns the KMS encryption key used to encrypt the new DB cluster, then you can use the KMS key alias instead of the ARN for the KMS encryption key.

    If you do not specify a value for the KmsKeyId parameter, then the following will occur:

    • If the DB cluster snapshot is encrypted, then the restored DB cluster is encrypted using the KMS key that was used to encrypt the DB cluster snapshot.

    • If the DB cluster snapshot is not encrypted, then the restored DB cluster is not encrypted.

    If SnapshotIdentifier refers to a DB cluster snapshot that is not encrypted, and you specify a value for the KmsKeyId parameter, then the restore request is rejected.

    ", + "RestoreDBClusterToPointInTimeMessage$DBClusterIdentifier": "

    The name of the new DB cluster to be created.

    Constraints:

    • Must contain from 1 to 63 alphanumeric characters or hyphens

    • First character must be a letter

    • Cannot end with a hyphen or contain two consecutive hyphens

    ", + "RestoreDBClusterToPointInTimeMessage$SourceDBClusterIdentifier": "

    The identifier of the source DB cluster from which to restore.

    Constraints:

    • Must be the identifier of an existing database instance

    • Must contain from 1 to 63 alphanumeric characters or hyphens

    • First character must be a letter

    • Cannot end with a hyphen or contain two consecutive hyphens

    ", + "RestoreDBClusterToPointInTimeMessage$DBSubnetGroupName": "

    The DB subnet group name to use for the new DB cluster.

    Constraints: Must contain no more than 255 alphanumeric characters, periods, underscores, spaces, or hyphens. Must not be default.

    Example: mySubnetgroup

    ", + "RestoreDBClusterToPointInTimeMessage$OptionGroupName": "

    The name of the option group for the new DB cluster.

    ", + "RestoreDBClusterToPointInTimeMessage$KmsKeyId": "

    The KMS key identifier to use when restoring an encrypted DB cluster from an encrypted DB cluster.

    The KMS key identifier is the Amazon Resource Name (ARN) for the KMS encryption key. If you are restoring a DB cluster with the same AWS account that owns the KMS encryption key used to encrypt the new DB cluster, then you can use the KMS key alias instead of the ARN for the KMS encryption key.

    You can restore to a new DB cluster and encrypt the new DB cluster with a KMS key that is different than the KMS key used to encrypt the source DB cluster. The new DB cluster will be encrypted with the KMS key identified by the KmsKeyId parameter.

    If you do not specify a value for the KmsKeyId parameter, then the following will occur:

    • If the DB cluster is encrypted, then the restored DB cluster is encrypted using the KMS key that was used to encrypt the source DB cluster.

    • If the DB cluster is not encrypted, then the restored DB cluster is not encrypted.

    If DBClusterIdentifier refers to a DB cluster that is note encrypted, then the restore request is rejected.

    ", + "RestoreDBInstanceFromDBSnapshotMessage$DBInstanceIdentifier": "

    Name of the DB instance to create from the DB snapshot. This parameter isn't case-sensitive.

    Constraints:

    • Must contain from 1 to 63 alphanumeric characters or hyphens (1 to 15 for SQL Server)

    • First character must be a letter

    • Cannot end with a hyphen or contain two consecutive hyphens

    Example: my-snapshot-id

    ", + "RestoreDBInstanceFromDBSnapshotMessage$DBSnapshotIdentifier": "

    The identifier for the DB snapshot to restore from.

    Constraints:

    • Must contain from 1 to 255 alphanumeric characters or hyphens

    • First character must be a letter

    • Cannot end with a hyphen or contain two consecutive hyphens

    If you are restoring from a shared manual DB snapshot, the DBSnapshotIdentifier must be the ARN of the shared DB snapshot.

    ", + "RestoreDBInstanceFromDBSnapshotMessage$DBInstanceClass": "

    The compute and memory capacity of the Amazon RDS DB instance.

    Valid Values: db.t1.micro | db.m1.small | db.m1.medium | db.m1.large | db.m1.xlarge | db.m2.2xlarge | db.m2.4xlarge | db.m3.medium | db.m3.large | db.m3.xlarge | db.m3.2xlarge | db.m4.large | db.m4.xlarge | db.m4.2xlarge | db.m4.4xlarge | db.m4.10xlarge | db.r3.large | db.r3.xlarge | db.r3.2xlarge | db.r3.4xlarge | db.r3.8xlarge | db.t2.micro | db.t2.small | db.t2.medium | db.t2.large

    ", + "RestoreDBInstanceFromDBSnapshotMessage$AvailabilityZone": "

    The EC2 Availability Zone that the database instance will be created in.

    Default: A random, system-chosen Availability Zone.

    Constraint: You cannot specify the AvailabilityZone parameter if the MultiAZ parameter is set to true.

    Example: us-east-1a

    ", + "RestoreDBInstanceFromDBSnapshotMessage$DBSubnetGroupName": "

    The DB subnet group name to use for the new instance.

    Constraints: Must contain no more than 255 alphanumeric characters, periods, underscores, spaces, or hyphens. Must not be default.

    Example: mySubnetgroup

    ", + "RestoreDBInstanceFromDBSnapshotMessage$LicenseModel": "

    License model information for the restored DB instance.

    Default: Same as source.

    Valid values: license-included | bring-your-own-license | general-public-license

    ", + "RestoreDBInstanceFromDBSnapshotMessage$DBName": "

    The database name for the restored DB instance.

    This parameter doesn't apply to the MySQL or MariaDB engines.

    ", + "RestoreDBInstanceFromDBSnapshotMessage$Engine": "

    The database engine to use for the new instance.

    Default: The same as source

    Constraint: Must be compatible with the engine of the source

    Valid Values: MySQL | mariadb | oracle-se1 | oracle-se | oracle-ee | sqlserver-ee | sqlserver-se | sqlserver-ex | sqlserver-web | postgres | aurora

    ", + "RestoreDBInstanceFromDBSnapshotMessage$OptionGroupName": "

    The name of the option group to be used for the restored DB instance.

    Permanent options, such as the TDE option for Oracle Advanced Security TDE, cannot be removed from an option group, and that option group cannot be removed from a DB instance once it is associated with a DB instance

    ", + "RestoreDBInstanceFromDBSnapshotMessage$StorageType": "

    Specifies the storage type to be associated with the DB instance.

    Valid values: standard | gp2 | io1

    If you specify io1, you must also include a value for the Iops parameter.

    Default: io1 if the Iops parameter is specified; otherwise standard

    ", + "RestoreDBInstanceFromDBSnapshotMessage$TdeCredentialArn": "

    The ARN from the Key Store with which to associate the instance for TDE encryption.

    ", + "RestoreDBInstanceFromDBSnapshotMessage$TdeCredentialPassword": "

    The password for the given ARN from the Key Store in order to access the device.

    ", + "RestoreDBInstanceFromDBSnapshotMessage$Domain": "

    Specify the Active Directory Domain to restore the instance in.

    ", + "RestoreDBInstanceFromDBSnapshotMessage$DomainIAMRoleName": "

    Specify the name of the IAM role to be used when making API calls to the Directory Service.

    ", + "RestoreDBInstanceToPointInTimeMessage$SourceDBInstanceIdentifier": "

    The identifier of the source DB instance from which to restore.

    Constraints:

    • Must be the identifier of an existing database instance

    • Must contain from 1 to 63 alphanumeric characters or hyphens

    • First character must be a letter

    • Cannot end with a hyphen or contain two consecutive hyphens

    ", + "RestoreDBInstanceToPointInTimeMessage$TargetDBInstanceIdentifier": "

    The name of the new database instance to be created.

    Constraints:

    • Must contain from 1 to 63 alphanumeric characters or hyphens

    • First character must be a letter

    • Cannot end with a hyphen or contain two consecutive hyphens

    ", + "RestoreDBInstanceToPointInTimeMessage$DBInstanceClass": "

    The compute and memory capacity of the Amazon RDS DB instance.

    Valid Values: db.t1.micro | db.m1.small | db.m1.medium | db.m1.large | db.m1.xlarge | db.m2.2xlarge | db.m2.4xlarge | db.m3.medium | db.m3.large | db.m3.xlarge | db.m3.2xlarge | db.m4.large | db.m4.xlarge | db.m4.2xlarge | db.m4.4xlarge | db.m4.10xlarge | db.r3.large | db.r3.xlarge | db.r3.2xlarge | db.r3.4xlarge | db.r3.8xlarge | db.t2.micro | db.t2.small | db.t2.medium | db.t2.large

    Default: The same DBInstanceClass as the original DB instance.

    ", + "RestoreDBInstanceToPointInTimeMessage$AvailabilityZone": "

    The EC2 Availability Zone that the database instance will be created in.

    Default: A random, system-chosen Availability Zone.

    Constraint: You cannot specify the AvailabilityZone parameter if the MultiAZ parameter is set to true.

    Example: us-east-1a

    ", + "RestoreDBInstanceToPointInTimeMessage$DBSubnetGroupName": "

    The DB subnet group name to use for the new instance.

    Constraints: Must contain no more than 255 alphanumeric characters, periods, underscores, spaces, or hyphens. Must not be default.

    Example: mySubnetgroup

    ", + "RestoreDBInstanceToPointInTimeMessage$LicenseModel": "

    License model information for the restored DB instance.

    Default: Same as source.

    Valid values: license-included | bring-your-own-license | general-public-license

    ", + "RestoreDBInstanceToPointInTimeMessage$DBName": "

    The database name for the restored DB instance.

    This parameter is not used for the MySQL or MariaDB engines.

    ", + "RestoreDBInstanceToPointInTimeMessage$Engine": "

    The database engine to use for the new instance.

    Default: The same as source

    Constraint: Must be compatible with the engine of the source

    Valid Values: MySQL | mariadb | oracle-se1 | oracle-se | oracle-ee | sqlserver-ee | sqlserver-se | sqlserver-ex | sqlserver-web | postgres | aurora

    ", + "RestoreDBInstanceToPointInTimeMessage$OptionGroupName": "

    The name of the option group to be used for the restored DB instance.

    Permanent options, such as the TDE option for Oracle Advanced Security TDE, cannot be removed from an option group, and that option group cannot be removed from a DB instance once it is associated with a DB instance

    ", + "RestoreDBInstanceToPointInTimeMessage$StorageType": "

    Specifies the storage type to be associated with the DB instance.

    Valid values: standard | gp2 | io1

    If you specify io1, you must also include a value for the Iops parameter.

    Default: io1 if the Iops parameter is specified; otherwise standard

    ", + "RestoreDBInstanceToPointInTimeMessage$TdeCredentialArn": "

    The ARN from the Key Store with which to associate the instance for TDE encryption.

    ", + "RestoreDBInstanceToPointInTimeMessage$TdeCredentialPassword": "

    The password for the given ARN from the Key Store in order to access the device.

    ", + "RestoreDBInstanceToPointInTimeMessage$Domain": "

    Specify the Active Directory Domain to restore the instance in.

    ", + "RestoreDBInstanceToPointInTimeMessage$DomainIAMRoleName": "

    Specify the name of the IAM role to be used when making API calls to the Directory Service.

    ", + "RevokeDBSecurityGroupIngressMessage$DBSecurityGroupName": "

    The name of the DB security group to revoke ingress from.

    ", + "RevokeDBSecurityGroupIngressMessage$CIDRIP": "

    The IP range to revoke access from. Must be a valid CIDR range. If CIDRIP is specified, EC2SecurityGroupName, EC2SecurityGroupId and EC2SecurityGroupOwnerId cannot be provided.

    ", + "RevokeDBSecurityGroupIngressMessage$EC2SecurityGroupName": "

    The name of the EC2 security group to revoke access from. For VPC DB security groups, EC2SecurityGroupId must be provided. Otherwise, EC2SecurityGroupOwnerId and either EC2SecurityGroupName or EC2SecurityGroupId must be provided.

    ", + "RevokeDBSecurityGroupIngressMessage$EC2SecurityGroupId": "

    The id of the EC2 security group to revoke access from. For VPC DB security groups, EC2SecurityGroupId must be provided. Otherwise, EC2SecurityGroupOwnerId and either EC2SecurityGroupName or EC2SecurityGroupId must be provided.

    ", + "RevokeDBSecurityGroupIngressMessage$EC2SecurityGroupOwnerId": "

    The AWS Account Number of the owner of the EC2 security group specified in the EC2SecurityGroupName parameter. The AWS Access Key ID is not an acceptable value. For VPC DB security groups, EC2SecurityGroupId must be provided. Otherwise, EC2SecurityGroupOwnerId and either EC2SecurityGroupName or EC2SecurityGroupId must be provided.

    ", + "SourceIdsList$member": null, + "Subnet$SubnetIdentifier": "

    Specifies the identifier of the subnet.

    ", + "Subnet$SubnetStatus": "

    Specifies the status of the subnet.

    ", + "SubnetIdentifierList$member": null, + "Tag$Key": "

    A key is the required name of the tag. The string value can be from 1 to 128 Unicode characters in length and cannot be prefixed with \"aws:\" or \"rds:\". The string can only contain only the set of Unicode letters, digits, white-space, '_', '.', '/', '=', '+', '-' (Java regex: \"^([\\\\p{L}\\\\p{Z}\\\\p{N}_.:/=+\\\\-]*)$\").

    ", + "Tag$Value": "

    A value is the optional value of the tag. The string value can be from 1 to 256 Unicode characters in length and cannot be prefixed with \"aws:\" or \"rds:\". The string can only contain only the set of Unicode letters, digits, white-space, '_', '.', '/', '=', '+', '-' (Java regex: \"^([\\\\p{L}\\\\p{Z}\\\\p{N}_.:/=+\\\\-]*)$\").

    ", + "UpgradeTarget$Engine": "

    The name of the upgrade target database engine.

    ", + "UpgradeTarget$EngineVersion": "

    The version number of the upgrade target database engine.

    ", + "UpgradeTarget$Description": "

    The version of the database engine that a DB instance can be upgraded to.

    ", + "VpcSecurityGroupIdList$member": null, + "VpcSecurityGroupMembership$VpcSecurityGroupId": "

    The name of the VPC security group.

    ", + "VpcSecurityGroupMembership$Status": "

    The status of the VPC security group.

    " + } + }, + "Subnet": { + "base": "

    This data type is used as a response element in the DescribeDBSubnetGroups action.

    ", + "refs": { + "SubnetList$member": null + } + }, + "SubnetAlreadyInUse": { + "base": "

    The DB subnet is already in use in the Availability Zone.

    ", + "refs": { + } + }, + "SubnetIdentifierList": { + "base": null, + "refs": { + "CreateDBSubnetGroupMessage$SubnetIds": "

    The EC2 Subnet IDs for the DB subnet group.

    ", + "ModifyDBSubnetGroupMessage$SubnetIds": "

    The EC2 subnet IDs for the DB subnet group.

    " + } + }, + "SubnetList": { + "base": null, + "refs": { + "DBSubnetGroup$Subnets": "

    Contains a list of Subnet elements.

    " + } + }, + "SubscriptionAlreadyExistFault": { + "base": "

    The supplied subscription name already exists.

    ", + "refs": { + } + }, + "SubscriptionCategoryNotFoundFault": { + "base": "

    The supplied category does not exist.

    ", + "refs": { + } + }, + "SubscriptionNotFoundFault": { + "base": "

    The subscription name does not exist.

    ", + "refs": { + } + }, + "SupportedCharacterSetsList": { + "base": null, + "refs": { + "DBEngineVersion$SupportedCharacterSets": "

    A list of the character sets supported by this engine for the CharacterSetName parameter of the CreateDBInstance API.

    " + } + }, + "TStamp": { + "base": null, + "refs": { + "Certificate$ValidFrom": "

    The starting date from which the certificate is valid.

    ", + "Certificate$ValidTill": "

    The final date that the certificate continues to be valid.

    ", + "DBCluster$EarliestRestorableTime": "

    Specifies the earliest time to which a database can be restored with point-in-time restore.

    ", + "DBCluster$LatestRestorableTime": "

    Specifies the latest time to which a database can be restored with point-in-time restore.

    ", + "DBClusterSnapshot$SnapshotCreateTime": "

    Provides the time when the snapshot was taken, in Universal Coordinated Time (UTC).

    ", + "DBClusterSnapshot$ClusterCreateTime": "

    Specifies the time when the DB cluster was created, in Universal Coordinated Time (UTC).

    ", + "DBInstance$InstanceCreateTime": "

    Provides the date and time the DB instance was created.

    ", + "DBInstance$LatestRestorableTime": "

    Specifies the latest time to which a database can be restored with point-in-time restore.

    ", + "DBSnapshot$SnapshotCreateTime": "

    Provides the time when the snapshot was taken, in Universal Coordinated Time (UTC).

    ", + "DBSnapshot$InstanceCreateTime": "

    Specifies the time when the snapshot was taken, in Universal Coordinated Time (UTC).

    ", + "DescribeEventsMessage$StartTime": "

    The beginning of the time interval to retrieve events for, specified in ISO 8601 format. For more information about ISO 8601, go to the ISO8601 Wikipedia page.

    Example: 2009-07-08T18:00Z

    ", + "DescribeEventsMessage$EndTime": "

    The end of the time interval for which to retrieve events, specified in ISO 8601 format. For more information about ISO 8601, go to the ISO8601 Wikipedia page.

    Example: 2009-07-08T18:00Z

    ", + "Event$Date": "

    Specifies the date and time of the event.

    ", + "PendingMaintenanceAction$AutoAppliedAfterDate": "

    The date of the maintenance window when the action will be applied. The maintenance action will be applied to the resource during its first maintenance window after this date. If this date is specified, any next-maintenance opt-in requests are ignored.

    ", + "PendingMaintenanceAction$ForcedApplyDate": "

    The date when the maintenance action will be automatically applied. The maintenance action will be applied to the resource on this date regardless of the maintenance window for the resource. If this date is specified, any immediate opt-in requests are ignored.

    ", + "PendingMaintenanceAction$CurrentApplyDate": "

    The effective date when the pending maintenance action will be applied to the resource. This date takes into account opt-in requests received from the ApplyPendingMaintenanceAction API, the AutoAppliedAfterDate, and the ForcedApplyDate. This value is blank if an opt-in request has not been received and nothing has been specified as AutoAppliedAfterDate or ForcedApplyDate.

    ", + "ReservedDBInstance$StartTime": "

    The time the reservation started.

    ", + "RestoreDBClusterToPointInTimeMessage$RestoreToTime": "

    The date and time to restore the DB cluster to.

    Valid Values: Value must be a time in Universal Coordinated Time (UTC) format

    Constraints:

    • Must be before the latest restorable time for the DB instance

    • Cannot be specified if UseLatestRestorableTime parameter is true

    Example: 2015-03-07T23:45:00Z

    ", + "RestoreDBInstanceToPointInTimeMessage$RestoreTime": "

    The date and time to restore from.

    Valid Values: Value must be a time in Universal Coordinated Time (UTC) format

    Constraints:

    • Must be before the latest restorable time for the DB instance

    • Cannot be specified if UseLatestRestorableTime parameter is true

    Example: 2009-09-07T23:45:00Z

    " + } + }, + "Tag": { + "base": "

    Metadata assigned to an Amazon RDS resource consisting of a key-value pair.

    ", + "refs": { + "TagList$member": null + } + }, + "TagList": { + "base": "

    A list of tags.

    ", + "refs": { + "AddTagsToResourceMessage$Tags": "

    The tags to be assigned to the Amazon RDS resource.

    ", + "CopyDBClusterSnapshotMessage$Tags": null, + "CopyDBParameterGroupMessage$Tags": null, + "CopyDBSnapshotMessage$Tags": null, + "CopyOptionGroupMessage$Tags": null, + "CreateDBClusterMessage$Tags": null, + "CreateDBClusterParameterGroupMessage$Tags": null, + "CreateDBClusterSnapshotMessage$Tags": "

    The tags to be assigned to the DB cluster snapshot.

    ", + "CreateDBInstanceMessage$Tags": null, + "CreateDBInstanceReadReplicaMessage$Tags": null, + "CreateDBParameterGroupMessage$Tags": null, + "CreateDBSecurityGroupMessage$Tags": null, + "CreateDBSnapshotMessage$Tags": null, + "CreateDBSubnetGroupMessage$Tags": null, + "CreateEventSubscriptionMessage$Tags": null, + "CreateOptionGroupMessage$Tags": null, + "PurchaseReservedDBInstancesOfferingMessage$Tags": null, + "RestoreDBClusterFromSnapshotMessage$Tags": "

    The tags to be assigned to the restored DB cluster.

    ", + "RestoreDBClusterToPointInTimeMessage$Tags": null, + "RestoreDBInstanceFromDBSnapshotMessage$Tags": null, + "RestoreDBInstanceToPointInTimeMessage$Tags": null, + "TagListMessage$TagList": "

    List of tags returned by the ListTagsForResource operation.

    " + } + }, + "TagListMessage": { + "base": "

    ", + "refs": { + } + }, + "UpgradeTarget": { + "base": "

    The version of the database engine that a DB instance can be upgraded to.

    ", + "refs": { + "ValidUpgradeTargetList$member": null + } + }, + "ValidUpgradeTargetList": { + "base": null, + "refs": { + "DBEngineVersion$ValidUpgradeTarget": "

    A list of engine versions that this database engine version can be upgraded to.

    " + } + }, + "VpcSecurityGroupIdList": { + "base": null, + "refs": { + "CreateDBClusterMessage$VpcSecurityGroupIds": "

    A list of EC2 VPC security groups to associate with this DB cluster.

    ", + "CreateDBInstanceMessage$VpcSecurityGroupIds": "

    A list of EC2 VPC security groups to associate with this DB instance.

    Default: The default EC2 VPC security group for the DB subnet group's VPC.

    ", + "ModifyDBClusterMessage$VpcSecurityGroupIds": "

    A lst of VPC security groups that the DB cluster will belong to.

    ", + "ModifyDBInstanceMessage$VpcSecurityGroupIds": "

    A list of EC2 VPC security groups to authorize on this DB instance. This change is asynchronously applied as soon as possible.

    Constraints:

    • Must be 1 to 255 alphanumeric characters

    • First character must be a letter

    • Cannot end with a hyphen or contain two consecutive hyphens

    ", + "OptionConfiguration$VpcSecurityGroupMemberships": "

    A list of VpcSecurityGroupMemebrship name strings used for this option.

    ", + "RestoreDBClusterFromSnapshotMessage$VpcSecurityGroupIds": "

    A list of VPC security groups that the new DB cluster will belong to.

    ", + "RestoreDBClusterToPointInTimeMessage$VpcSecurityGroupIds": "

    A lst of VPC security groups that the new DB cluster belongs to.

    " + } + }, + "VpcSecurityGroupMembership": { + "base": "

    This data type is used as a response element for queries on VPC security group membership.

    ", + "refs": { + "VpcSecurityGroupMembershipList$member": null + } + }, + "VpcSecurityGroupMembershipList": { + "base": null, + "refs": { + "DBCluster$VpcSecurityGroups": "

    Provides a list of VPC security groups that the DB cluster belongs to.

    ", + "DBInstance$VpcSecurityGroups": "

    Provides List of VPC security group elements that the DB instance belongs to.

    ", + "Option$VpcSecurityGroupMemberships": "

    If the option requires access to a port, then this VPC security group allows access to the port.

    " + } + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/rds/2014-10-31/examples-1.json b/vendor/github.com/aws/aws-sdk-go/models/apis/rds/2014-10-31/examples-1.json new file mode 100644 index 000000000..0ea7e3b0b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/rds/2014-10-31/examples-1.json @@ -0,0 +1,5 @@ +{ + "version": "1.0", + "examples": { + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/rds/2014-10-31/paginators-1.json b/vendor/github.com/aws/aws-sdk-go/models/apis/rds/2014-10-31/paginators-1.json new file mode 100644 index 000000000..662845c12 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/rds/2014-10-31/paginators-1.json @@ -0,0 +1,110 @@ +{ + "pagination": { + "DescribeDBEngineVersions": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords", + "result_key": "DBEngineVersions" + }, + "DescribeDBInstances": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords", + "result_key": "DBInstances" + }, + "DescribeDBLogFiles": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords", + "result_key": "DescribeDBLogFiles" + }, + "DescribeDBParameterGroups": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords", + "result_key": "DBParameterGroups" + }, + "DescribeDBParameters": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords", + "result_key": "Parameters" + }, + "DescribeDBSecurityGroups": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords", + "result_key": "DBSecurityGroups" + }, + "DescribeDBSnapshots": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords", + "result_key": "DBSnapshots" + }, + "DescribeDBSubnetGroups": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords", + "result_key": "DBSubnetGroups" + }, + "DescribeEngineDefaultParameters": { + "input_token": "Marker", + "output_token": "EngineDefaults.Marker", + "limit_key": "MaxRecords", + "result_key": "EngineDefaults.Parameters" + }, + "DescribeEventSubscriptions": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords", + "result_key": "EventSubscriptionsList" + }, + "DescribeEvents": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords", + "result_key": "Events" + }, + "DescribeOptionGroupOptions": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords", + "result_key": "OptionGroupOptions" + }, + "DescribeOptionGroups": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords", + "result_key": "OptionGroupsList" + }, + "DescribeOrderableDBInstanceOptions": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords", + "result_key": "OrderableDBInstanceOptions" + }, + "DescribeReservedDBInstances": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords", + "result_key": "ReservedDBInstances" + }, + "DescribeReservedDBInstancesOfferings": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords", + "result_key": "ReservedDBInstancesOfferings" + }, + "DownloadDBLogFilePortion": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "NumberOfLines", + "more_results": "AdditionalDataPending", + "result_key": "LogFileData" + }, + "ListTagsForResource": { + "result_key": "TagList" + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/rds/2014-10-31/waiters-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/rds/2014-10-31/waiters-2.json new file mode 100644 index 000000000..4f403ac05 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/rds/2014-10-31/waiters-2.json @@ -0,0 +1,102 @@ +{ + "version": 2, + "waiters": { + "DBInstanceAvailable": { + "delay": 30, + "operation": "DescribeDBInstances", + "maxAttempts": 60, + "acceptors": [ + { + "expected": "available", + "matcher": "pathAll", + "state": "success", + "argument": "DBInstances[].DBInstanceStatus" + }, + { + "expected": "deleted", + "matcher": "pathAny", + "state": "failure", + "argument": "DBInstances[].DBInstanceStatus" + }, + { + "expected": "deleting", + "matcher": "pathAny", + "state": "failure", + "argument": "DBInstances[].DBInstanceStatus" + }, + { + "expected": "failed", + "matcher": "pathAny", + "state": "failure", + "argument": "DBInstances[].DBInstanceStatus" + }, + { + "expected": "incompatible-restore", + "matcher": "pathAny", + "state": "failure", + "argument": "DBInstances[].DBInstanceStatus" + }, + { + "expected": "incompatible-parameters", + "matcher": "pathAny", + "state": "failure", + "argument": "DBInstances[].DBInstanceStatus" + }, + { + "expected": "incompatible-parameters", + "matcher": "pathAny", + "state": "failure", + "argument": "DBInstances[].DBInstanceStatus" + }, + { + "expected": "incompatible-restore", + "matcher": "pathAny", + "state": "failure", + "argument": "DBInstances[].DBInstanceStatus" + } + ] + }, + "DBInstanceDeleted": { + "delay": 30, + "operation": "DescribeDBInstances", + "maxAttempts": 60, + "acceptors": [ + { + "expected": "deleted", + "matcher": "pathAll", + "state": "success", + "argument": "DBInstances[].DBInstanceStatus" + }, + { + "expected": "DBInstanceNotFound", + "matcher": "error", + "state": "success" + }, + { + "expected": "creating", + "matcher": "pathAny", + "state": "failure", + "argument": "DBInstances[].DBInstanceStatus" + }, + { + "expected": "modifying", + "matcher": "pathAny", + "state": "failure", + "argument": "DBInstances[].DBInstanceStatus" + }, + { + "expected": "rebooting", + "matcher": "pathAny", + "state": "failure", + "argument": "DBInstances[].DBInstanceStatus" + }, + { + "expected": "resetting-master-credentials", + "matcher": "pathAny", + "state": "failure", + "argument": "DBInstances[].DBInstanceStatus" + } + ] + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/redshift/2012-12-01/api-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/redshift/2012-12-01/api-2.json new file mode 100644 index 000000000..c8771991d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/redshift/2012-12-01/api-2.json @@ -0,0 +1,3768 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2012-12-01", + "endpointPrefix":"redshift", + "protocol":"query", + "serviceFullName":"Amazon Redshift", + "signatureVersion":"v4", + "xmlNamespace":"http://redshift.amazonaws.com/doc/2012-12-01/" + }, + "operations":{ + "AuthorizeClusterSecurityGroupIngress":{ + "name":"AuthorizeClusterSecurityGroupIngress", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AuthorizeClusterSecurityGroupIngressMessage"}, + "output":{ + "shape":"AuthorizeClusterSecurityGroupIngressResult", + "resultWrapper":"AuthorizeClusterSecurityGroupIngressResult" + }, + "errors":[ + {"shape":"ClusterSecurityGroupNotFoundFault"}, + {"shape":"InvalidClusterSecurityGroupStateFault"}, + {"shape":"AuthorizationAlreadyExistsFault"}, + {"shape":"AuthorizationQuotaExceededFault"} + ] + }, + "AuthorizeSnapshotAccess":{ + "name":"AuthorizeSnapshotAccess", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AuthorizeSnapshotAccessMessage"}, + "output":{ + "shape":"AuthorizeSnapshotAccessResult", + "resultWrapper":"AuthorizeSnapshotAccessResult" + }, + "errors":[ + {"shape":"ClusterSnapshotNotFoundFault"}, + {"shape":"AuthorizationAlreadyExistsFault"}, + {"shape":"AuthorizationQuotaExceededFault"}, + {"shape":"DependentServiceRequestThrottlingFault"}, + {"shape":"InvalidClusterSnapshotStateFault"}, + {"shape":"LimitExceededFault"} + ] + }, + "CopyClusterSnapshot":{ + "name":"CopyClusterSnapshot", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CopyClusterSnapshotMessage"}, + "output":{ + "shape":"CopyClusterSnapshotResult", + "resultWrapper":"CopyClusterSnapshotResult" + }, + "errors":[ + {"shape":"ClusterSnapshotAlreadyExistsFault"}, + {"shape":"ClusterSnapshotNotFoundFault"}, + {"shape":"InvalidClusterSnapshotStateFault"}, + {"shape":"ClusterSnapshotQuotaExceededFault"} + ] + }, + "CreateCluster":{ + "name":"CreateCluster", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateClusterMessage"}, + "output":{ + "shape":"CreateClusterResult", + "resultWrapper":"CreateClusterResult" + }, + "errors":[ + {"shape":"ClusterAlreadyExistsFault"}, + {"shape":"InsufficientClusterCapacityFault"}, + {"shape":"ClusterParameterGroupNotFoundFault"}, + {"shape":"ClusterSecurityGroupNotFoundFault"}, + {"shape":"ClusterQuotaExceededFault"}, + {"shape":"NumberOfNodesQuotaExceededFault"}, + {"shape":"NumberOfNodesPerClusterLimitExceededFault"}, + {"shape":"ClusterSubnetGroupNotFoundFault"}, + {"shape":"InvalidVPCNetworkStateFault"}, + {"shape":"InvalidClusterSubnetGroupStateFault"}, + {"shape":"InvalidSubnet"}, + {"shape":"UnauthorizedOperation"}, + {"shape":"HsmClientCertificateNotFoundFault"}, + {"shape":"HsmConfigurationNotFoundFault"}, + {"shape":"InvalidElasticIpFault"}, + {"shape":"TagLimitExceededFault"}, + {"shape":"InvalidTagFault"}, + {"shape":"LimitExceededFault"}, + {"shape":"DependentServiceRequestThrottlingFault"} + ] + }, + "CreateClusterParameterGroup":{ + "name":"CreateClusterParameterGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateClusterParameterGroupMessage"}, + "output":{ + "shape":"CreateClusterParameterGroupResult", + "resultWrapper":"CreateClusterParameterGroupResult" + }, + "errors":[ + {"shape":"ClusterParameterGroupQuotaExceededFault"}, + {"shape":"ClusterParameterGroupAlreadyExistsFault"}, + {"shape":"TagLimitExceededFault"}, + {"shape":"InvalidTagFault"} + ] + }, + "CreateClusterSecurityGroup":{ + "name":"CreateClusterSecurityGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateClusterSecurityGroupMessage"}, + "output":{ + "shape":"CreateClusterSecurityGroupResult", + "resultWrapper":"CreateClusterSecurityGroupResult" + }, + "errors":[ + {"shape":"ClusterSecurityGroupAlreadyExistsFault"}, + {"shape":"ClusterSecurityGroupQuotaExceededFault"}, + {"shape":"TagLimitExceededFault"}, + {"shape":"InvalidTagFault"} + ] + }, + "CreateClusterSnapshot":{ + "name":"CreateClusterSnapshot", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateClusterSnapshotMessage"}, + "output":{ + "shape":"CreateClusterSnapshotResult", + "resultWrapper":"CreateClusterSnapshotResult" + }, + "errors":[ + {"shape":"ClusterSnapshotAlreadyExistsFault"}, + {"shape":"InvalidClusterStateFault"}, + {"shape":"ClusterNotFoundFault"}, + {"shape":"ClusterSnapshotQuotaExceededFault"}, + {"shape":"TagLimitExceededFault"}, + {"shape":"InvalidTagFault"} + ] + }, + "CreateClusterSubnetGroup":{ + "name":"CreateClusterSubnetGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateClusterSubnetGroupMessage"}, + "output":{ + "shape":"CreateClusterSubnetGroupResult", + "resultWrapper":"CreateClusterSubnetGroupResult" + }, + "errors":[ + {"shape":"ClusterSubnetGroupAlreadyExistsFault"}, + {"shape":"ClusterSubnetGroupQuotaExceededFault"}, + {"shape":"ClusterSubnetQuotaExceededFault"}, + {"shape":"InvalidSubnet"}, + {"shape":"UnauthorizedOperation"}, + {"shape":"TagLimitExceededFault"}, + {"shape":"InvalidTagFault"}, + {"shape":"DependentServiceRequestThrottlingFault"} + ] + }, + "CreateEventSubscription":{ + "name":"CreateEventSubscription", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateEventSubscriptionMessage"}, + "output":{ + "shape":"CreateEventSubscriptionResult", + "resultWrapper":"CreateEventSubscriptionResult" + }, + "errors":[ + {"shape":"EventSubscriptionQuotaExceededFault"}, + {"shape":"SubscriptionAlreadyExistFault"}, + {"shape":"SNSInvalidTopicFault"}, + {"shape":"SNSNoAuthorizationFault"}, + {"shape":"SNSTopicArnNotFoundFault"}, + {"shape":"SubscriptionEventIdNotFoundFault"}, + {"shape":"SubscriptionCategoryNotFoundFault"}, + {"shape":"SubscriptionSeverityNotFoundFault"}, + {"shape":"SourceNotFoundFault"}, + {"shape":"TagLimitExceededFault"}, + {"shape":"InvalidTagFault"} + ] + }, + "CreateHsmClientCertificate":{ + "name":"CreateHsmClientCertificate", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateHsmClientCertificateMessage"}, + "output":{ + "shape":"CreateHsmClientCertificateResult", + "resultWrapper":"CreateHsmClientCertificateResult" + }, + "errors":[ + {"shape":"HsmClientCertificateAlreadyExistsFault"}, + {"shape":"HsmClientCertificateQuotaExceededFault"}, + {"shape":"TagLimitExceededFault"}, + {"shape":"InvalidTagFault"} + ] + }, + "CreateHsmConfiguration":{ + "name":"CreateHsmConfiguration", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateHsmConfigurationMessage"}, + "output":{ + "shape":"CreateHsmConfigurationResult", + "resultWrapper":"CreateHsmConfigurationResult" + }, + "errors":[ + {"shape":"HsmConfigurationAlreadyExistsFault"}, + {"shape":"HsmConfigurationQuotaExceededFault"}, + {"shape":"TagLimitExceededFault"}, + {"shape":"InvalidTagFault"} + ] + }, + "CreateSnapshotCopyGrant":{ + "name":"CreateSnapshotCopyGrant", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateSnapshotCopyGrantMessage"}, + "output":{ + "shape":"CreateSnapshotCopyGrantResult", + "resultWrapper":"CreateSnapshotCopyGrantResult" + }, + "errors":[ + {"shape":"SnapshotCopyGrantAlreadyExistsFault"}, + {"shape":"SnapshotCopyGrantQuotaExceededFault"}, + {"shape":"LimitExceededFault"}, + {"shape":"TagLimitExceededFault"}, + {"shape":"InvalidTagFault"}, + {"shape":"DependentServiceRequestThrottlingFault"} + ] + }, + "CreateTags":{ + "name":"CreateTags", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateTagsMessage"}, + "errors":[ + {"shape":"TagLimitExceededFault"}, + {"shape":"ResourceNotFoundFault"}, + {"shape":"InvalidTagFault"} + ] + }, + "DeleteCluster":{ + "name":"DeleteCluster", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteClusterMessage"}, + "output":{ + "shape":"DeleteClusterResult", + "resultWrapper":"DeleteClusterResult" + }, + "errors":[ + {"shape":"ClusterNotFoundFault"}, + {"shape":"InvalidClusterStateFault"}, + {"shape":"ClusterSnapshotAlreadyExistsFault"}, + {"shape":"ClusterSnapshotQuotaExceededFault"} + ] + }, + "DeleteClusterParameterGroup":{ + "name":"DeleteClusterParameterGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteClusterParameterGroupMessage"}, + "errors":[ + {"shape":"InvalidClusterParameterGroupStateFault"}, + {"shape":"ClusterParameterGroupNotFoundFault"} + ] + }, + "DeleteClusterSecurityGroup":{ + "name":"DeleteClusterSecurityGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteClusterSecurityGroupMessage"}, + "errors":[ + {"shape":"InvalidClusterSecurityGroupStateFault"}, + {"shape":"ClusterSecurityGroupNotFoundFault"} + ] + }, + "DeleteClusterSnapshot":{ + "name":"DeleteClusterSnapshot", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteClusterSnapshotMessage"}, + "output":{ + "shape":"DeleteClusterSnapshotResult", + "resultWrapper":"DeleteClusterSnapshotResult" + }, + "errors":[ + {"shape":"InvalidClusterSnapshotStateFault"}, + {"shape":"ClusterSnapshotNotFoundFault"} + ] + }, + "DeleteClusterSubnetGroup":{ + "name":"DeleteClusterSubnetGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteClusterSubnetGroupMessage"}, + "errors":[ + {"shape":"InvalidClusterSubnetGroupStateFault"}, + {"shape":"InvalidClusterSubnetStateFault"}, + {"shape":"ClusterSubnetGroupNotFoundFault"} + ] + }, + "DeleteEventSubscription":{ + "name":"DeleteEventSubscription", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteEventSubscriptionMessage"}, + "errors":[ + {"shape":"SubscriptionNotFoundFault"}, + {"shape":"InvalidSubscriptionStateFault"} + ] + }, + "DeleteHsmClientCertificate":{ + "name":"DeleteHsmClientCertificate", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteHsmClientCertificateMessage"}, + "errors":[ + {"shape":"InvalidHsmClientCertificateStateFault"}, + {"shape":"HsmClientCertificateNotFoundFault"} + ] + }, + "DeleteHsmConfiguration":{ + "name":"DeleteHsmConfiguration", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteHsmConfigurationMessage"}, + "errors":[ + {"shape":"InvalidHsmConfigurationStateFault"}, + {"shape":"HsmConfigurationNotFoundFault"} + ] + }, + "DeleteSnapshotCopyGrant":{ + "name":"DeleteSnapshotCopyGrant", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteSnapshotCopyGrantMessage"}, + "errors":[ + {"shape":"InvalidSnapshotCopyGrantStateFault"}, + {"shape":"SnapshotCopyGrantNotFoundFault"} + ] + }, + "DeleteTags":{ + "name":"DeleteTags", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteTagsMessage"}, + "errors":[ + {"shape":"ResourceNotFoundFault"}, + {"shape":"InvalidTagFault"} + ] + }, + "DescribeClusterParameterGroups":{ + "name":"DescribeClusterParameterGroups", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeClusterParameterGroupsMessage"}, + "output":{ + "shape":"ClusterParameterGroupsMessage", + "resultWrapper":"DescribeClusterParameterGroupsResult" + }, + "errors":[ + {"shape":"ClusterParameterGroupNotFoundFault"}, + {"shape":"InvalidTagFault"} + ] + }, + "DescribeClusterParameters":{ + "name":"DescribeClusterParameters", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeClusterParametersMessage"}, + "output":{ + "shape":"ClusterParameterGroupDetails", + "resultWrapper":"DescribeClusterParametersResult" + }, + "errors":[ + {"shape":"ClusterParameterGroupNotFoundFault"} + ] + }, + "DescribeClusterSecurityGroups":{ + "name":"DescribeClusterSecurityGroups", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeClusterSecurityGroupsMessage"}, + "output":{ + "shape":"ClusterSecurityGroupMessage", + "resultWrapper":"DescribeClusterSecurityGroupsResult" + }, + "errors":[ + {"shape":"ClusterSecurityGroupNotFoundFault"}, + {"shape":"InvalidTagFault"} + ] + }, + "DescribeClusterSnapshots":{ + "name":"DescribeClusterSnapshots", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeClusterSnapshotsMessage"}, + "output":{ + "shape":"SnapshotMessage", + "resultWrapper":"DescribeClusterSnapshotsResult" + }, + "errors":[ + {"shape":"ClusterSnapshotNotFoundFault"}, + {"shape":"InvalidTagFault"} + ] + }, + "DescribeClusterSubnetGroups":{ + "name":"DescribeClusterSubnetGroups", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeClusterSubnetGroupsMessage"}, + "output":{ + "shape":"ClusterSubnetGroupMessage", + "resultWrapper":"DescribeClusterSubnetGroupsResult" + }, + "errors":[ + {"shape":"ClusterSubnetGroupNotFoundFault"}, + {"shape":"InvalidTagFault"} + ] + }, + "DescribeClusterVersions":{ + "name":"DescribeClusterVersions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeClusterVersionsMessage"}, + "output":{ + "shape":"ClusterVersionsMessage", + "resultWrapper":"DescribeClusterVersionsResult" + } + }, + "DescribeClusters":{ + "name":"DescribeClusters", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeClustersMessage"}, + "output":{ + "shape":"ClustersMessage", + "resultWrapper":"DescribeClustersResult" + }, + "errors":[ + {"shape":"ClusterNotFoundFault"}, + {"shape":"InvalidTagFault"} + ] + }, + "DescribeDefaultClusterParameters":{ + "name":"DescribeDefaultClusterParameters", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeDefaultClusterParametersMessage"}, + "output":{ + "shape":"DescribeDefaultClusterParametersResult", + "resultWrapper":"DescribeDefaultClusterParametersResult" + } + }, + "DescribeEventCategories":{ + "name":"DescribeEventCategories", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeEventCategoriesMessage"}, + "output":{ + "shape":"EventCategoriesMessage", + "resultWrapper":"DescribeEventCategoriesResult" + } + }, + "DescribeEventSubscriptions":{ + "name":"DescribeEventSubscriptions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeEventSubscriptionsMessage"}, + "output":{ + "shape":"EventSubscriptionsMessage", + "resultWrapper":"DescribeEventSubscriptionsResult" + }, + "errors":[ + {"shape":"SubscriptionNotFoundFault"} + ] + }, + "DescribeEvents":{ + "name":"DescribeEvents", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeEventsMessage"}, + "output":{ + "shape":"EventsMessage", + "resultWrapper":"DescribeEventsResult" + } + }, + "DescribeHsmClientCertificates":{ + "name":"DescribeHsmClientCertificates", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeHsmClientCertificatesMessage"}, + "output":{ + "shape":"HsmClientCertificateMessage", + "resultWrapper":"DescribeHsmClientCertificatesResult" + }, + "errors":[ + {"shape":"HsmClientCertificateNotFoundFault"}, + {"shape":"InvalidTagFault"} + ] + }, + "DescribeHsmConfigurations":{ + "name":"DescribeHsmConfigurations", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeHsmConfigurationsMessage"}, + "output":{ + "shape":"HsmConfigurationMessage", + "resultWrapper":"DescribeHsmConfigurationsResult" + }, + "errors":[ + {"shape":"HsmConfigurationNotFoundFault"}, + {"shape":"InvalidTagFault"} + ] + }, + "DescribeLoggingStatus":{ + "name":"DescribeLoggingStatus", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeLoggingStatusMessage"}, + "output":{ + "shape":"LoggingStatus", + "resultWrapper":"DescribeLoggingStatusResult" + }, + "errors":[ + {"shape":"ClusterNotFoundFault"} + ] + }, + "DescribeOrderableClusterOptions":{ + "name":"DescribeOrderableClusterOptions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeOrderableClusterOptionsMessage"}, + "output":{ + "shape":"OrderableClusterOptionsMessage", + "resultWrapper":"DescribeOrderableClusterOptionsResult" + } + }, + "DescribeReservedNodeOfferings":{ + "name":"DescribeReservedNodeOfferings", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeReservedNodeOfferingsMessage"}, + "output":{ + "shape":"ReservedNodeOfferingsMessage", + "resultWrapper":"DescribeReservedNodeOfferingsResult" + }, + "errors":[ + {"shape":"ReservedNodeOfferingNotFoundFault"}, + {"shape":"UnsupportedOperationFault"} + ] + }, + "DescribeReservedNodes":{ + "name":"DescribeReservedNodes", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeReservedNodesMessage"}, + "output":{ + "shape":"ReservedNodesMessage", + "resultWrapper":"DescribeReservedNodesResult" + }, + "errors":[ + {"shape":"ReservedNodeNotFoundFault"} + ] + }, + "DescribeResize":{ + "name":"DescribeResize", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeResizeMessage"}, + "output":{ + "shape":"ResizeProgressMessage", + "resultWrapper":"DescribeResizeResult" + }, + "errors":[ + {"shape":"ClusterNotFoundFault"}, + {"shape":"ResizeNotFoundFault"} + ] + }, + "DescribeSnapshotCopyGrants":{ + "name":"DescribeSnapshotCopyGrants", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeSnapshotCopyGrantsMessage"}, + "output":{ + "shape":"SnapshotCopyGrantMessage", + "resultWrapper":"DescribeSnapshotCopyGrantsResult" + }, + "errors":[ + {"shape":"SnapshotCopyGrantNotFoundFault"}, + {"shape":"InvalidTagFault"} + ] + }, + "DescribeTableRestoreStatus":{ + "name":"DescribeTableRestoreStatus", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeTableRestoreStatusMessage"}, + "output":{ + "shape":"TableRestoreStatusMessage", + "resultWrapper":"DescribeTableRestoreStatusResult" + }, + "errors":[ + {"shape":"TableRestoreNotFoundFault"}, + {"shape":"ClusterNotFoundFault"} + ] + }, + "DescribeTags":{ + "name":"DescribeTags", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeTagsMessage"}, + "output":{ + "shape":"TaggedResourceListMessage", + "resultWrapper":"DescribeTagsResult" + }, + "errors":[ + {"shape":"ResourceNotFoundFault"}, + {"shape":"InvalidTagFault"} + ] + }, + "DisableLogging":{ + "name":"DisableLogging", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DisableLoggingMessage"}, + "output":{ + "shape":"LoggingStatus", + "resultWrapper":"DisableLoggingResult" + }, + "errors":[ + {"shape":"ClusterNotFoundFault"} + ] + }, + "DisableSnapshotCopy":{ + "name":"DisableSnapshotCopy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DisableSnapshotCopyMessage"}, + "output":{ + "shape":"DisableSnapshotCopyResult", + "resultWrapper":"DisableSnapshotCopyResult" + }, + "errors":[ + {"shape":"ClusterNotFoundFault"}, + {"shape":"SnapshotCopyAlreadyDisabledFault"}, + {"shape":"InvalidClusterStateFault"}, + {"shape":"UnauthorizedOperation"} + ] + }, + "EnableLogging":{ + "name":"EnableLogging", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"EnableLoggingMessage"}, + "output":{ + "shape":"LoggingStatus", + "resultWrapper":"EnableLoggingResult" + }, + "errors":[ + {"shape":"ClusterNotFoundFault"}, + {"shape":"BucketNotFoundFault"}, + {"shape":"InsufficientS3BucketPolicyFault"}, + {"shape":"InvalidS3KeyPrefixFault"}, + {"shape":"InvalidS3BucketNameFault"} + ] + }, + "EnableSnapshotCopy":{ + "name":"EnableSnapshotCopy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"EnableSnapshotCopyMessage"}, + "output":{ + "shape":"EnableSnapshotCopyResult", + "resultWrapper":"EnableSnapshotCopyResult" + }, + "errors":[ + {"shape":"IncompatibleOrderableOptions"}, + {"shape":"InvalidClusterStateFault"}, + {"shape":"ClusterNotFoundFault"}, + {"shape":"CopyToRegionDisabledFault"}, + {"shape":"SnapshotCopyAlreadyEnabledFault"}, + {"shape":"UnknownSnapshotCopyRegionFault"}, + {"shape":"UnauthorizedOperation"}, + {"shape":"SnapshotCopyGrantNotFoundFault"}, + {"shape":"LimitExceededFault"}, + {"shape":"DependentServiceRequestThrottlingFault"} + ] + }, + "ModifyCluster":{ + "name":"ModifyCluster", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyClusterMessage"}, + "output":{ + "shape":"ModifyClusterResult", + "resultWrapper":"ModifyClusterResult" + }, + "errors":[ + {"shape":"InvalidClusterStateFault"}, + {"shape":"InvalidClusterSecurityGroupStateFault"}, + {"shape":"ClusterNotFoundFault"}, + {"shape":"NumberOfNodesQuotaExceededFault"}, + {"shape":"ClusterSecurityGroupNotFoundFault"}, + {"shape":"ClusterParameterGroupNotFoundFault"}, + {"shape":"InsufficientClusterCapacityFault"}, + {"shape":"UnsupportedOptionFault"}, + {"shape":"UnauthorizedOperation"}, + {"shape":"HsmClientCertificateNotFoundFault"}, + {"shape":"HsmConfigurationNotFoundFault"}, + {"shape":"ClusterAlreadyExistsFault"}, + {"shape":"LimitExceededFault"}, + {"shape":"DependentServiceRequestThrottlingFault"}, + {"shape":"InvalidElasticIpFault"} + ] + }, + "ModifyClusterIamRoles":{ + "name":"ModifyClusterIamRoles", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyClusterIamRolesMessage"}, + "output":{ + "shape":"ModifyClusterIamRolesResult", + "resultWrapper":"ModifyClusterIamRolesResult" + }, + "errors":[ + {"shape":"InvalidClusterStateFault"}, + {"shape":"ClusterNotFoundFault"} + ] + }, + "ModifyClusterParameterGroup":{ + "name":"ModifyClusterParameterGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyClusterParameterGroupMessage"}, + "output":{ + "shape":"ClusterParameterGroupNameMessage", + "resultWrapper":"ModifyClusterParameterGroupResult" + }, + "errors":[ + {"shape":"ClusterParameterGroupNotFoundFault"}, + {"shape":"InvalidClusterParameterGroupStateFault"} + ] + }, + "ModifyClusterSubnetGroup":{ + "name":"ModifyClusterSubnetGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyClusterSubnetGroupMessage"}, + "output":{ + "shape":"ModifyClusterSubnetGroupResult", + "resultWrapper":"ModifyClusterSubnetGroupResult" + }, + "errors":[ + {"shape":"ClusterSubnetGroupNotFoundFault"}, + {"shape":"ClusterSubnetQuotaExceededFault"}, + {"shape":"SubnetAlreadyInUse"}, + {"shape":"InvalidSubnet"}, + {"shape":"UnauthorizedOperation"}, + {"shape":"DependentServiceRequestThrottlingFault"} + ] + }, + "ModifyEventSubscription":{ + "name":"ModifyEventSubscription", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyEventSubscriptionMessage"}, + "output":{ + "shape":"ModifyEventSubscriptionResult", + "resultWrapper":"ModifyEventSubscriptionResult" + }, + "errors":[ + {"shape":"SubscriptionNotFoundFault"}, + {"shape":"SNSInvalidTopicFault"}, + {"shape":"SNSNoAuthorizationFault"}, + {"shape":"SNSTopicArnNotFoundFault"}, + {"shape":"SubscriptionEventIdNotFoundFault"}, + {"shape":"SubscriptionCategoryNotFoundFault"}, + {"shape":"SubscriptionSeverityNotFoundFault"}, + {"shape":"SourceNotFoundFault"}, + {"shape":"InvalidSubscriptionStateFault"} + ] + }, + "ModifySnapshotCopyRetentionPeriod":{ + "name":"ModifySnapshotCopyRetentionPeriod", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifySnapshotCopyRetentionPeriodMessage"}, + "output":{ + "shape":"ModifySnapshotCopyRetentionPeriodResult", + "resultWrapper":"ModifySnapshotCopyRetentionPeriodResult" + }, + "errors":[ + {"shape":"ClusterNotFoundFault"}, + {"shape":"SnapshotCopyDisabledFault"}, + {"shape":"UnauthorizedOperation"}, + {"shape":"InvalidClusterStateFault"} + ] + }, + "PurchaseReservedNodeOffering":{ + "name":"PurchaseReservedNodeOffering", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PurchaseReservedNodeOfferingMessage"}, + "output":{ + "shape":"PurchaseReservedNodeOfferingResult", + "resultWrapper":"PurchaseReservedNodeOfferingResult" + }, + "errors":[ + {"shape":"ReservedNodeOfferingNotFoundFault"}, + {"shape":"ReservedNodeAlreadyExistsFault"}, + {"shape":"ReservedNodeQuotaExceededFault"}, + {"shape":"UnsupportedOperationFault"} + ] + }, + "RebootCluster":{ + "name":"RebootCluster", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RebootClusterMessage"}, + "output":{ + "shape":"RebootClusterResult", + "resultWrapper":"RebootClusterResult" + }, + "errors":[ + {"shape":"InvalidClusterStateFault"}, + {"shape":"ClusterNotFoundFault"} + ] + }, + "ResetClusterParameterGroup":{ + "name":"ResetClusterParameterGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ResetClusterParameterGroupMessage"}, + "output":{ + "shape":"ClusterParameterGroupNameMessage", + "resultWrapper":"ResetClusterParameterGroupResult" + }, + "errors":[ + {"shape":"InvalidClusterParameterGroupStateFault"}, + {"shape":"ClusterParameterGroupNotFoundFault"} + ] + }, + "RestoreFromClusterSnapshot":{ + "name":"RestoreFromClusterSnapshot", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RestoreFromClusterSnapshotMessage"}, + "output":{ + "shape":"RestoreFromClusterSnapshotResult", + "resultWrapper":"RestoreFromClusterSnapshotResult" + }, + "errors":[ + {"shape":"AccessToSnapshotDeniedFault"}, + {"shape":"ClusterAlreadyExistsFault"}, + {"shape":"ClusterSnapshotNotFoundFault"}, + {"shape":"ClusterQuotaExceededFault"}, + {"shape":"InsufficientClusterCapacityFault"}, + {"shape":"InvalidClusterSnapshotStateFault"}, + {"shape":"InvalidRestoreFault"}, + {"shape":"NumberOfNodesQuotaExceededFault"}, + {"shape":"NumberOfNodesPerClusterLimitExceededFault"}, + {"shape":"InvalidVPCNetworkStateFault"}, + {"shape":"InvalidClusterSubnetGroupStateFault"}, + {"shape":"InvalidSubnet"}, + {"shape":"ClusterSubnetGroupNotFoundFault"}, + {"shape":"UnauthorizedOperation"}, + {"shape":"HsmClientCertificateNotFoundFault"}, + {"shape":"HsmConfigurationNotFoundFault"}, + {"shape":"InvalidElasticIpFault"}, + {"shape":"ClusterParameterGroupNotFoundFault"}, + {"shape":"ClusterSecurityGroupNotFoundFault"}, + {"shape":"LimitExceededFault"}, + {"shape":"DependentServiceRequestThrottlingFault"} + ] + }, + "RestoreTableFromClusterSnapshot":{ + "name":"RestoreTableFromClusterSnapshot", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RestoreTableFromClusterSnapshotMessage"}, + "output":{ + "shape":"RestoreTableFromClusterSnapshotResult", + "resultWrapper":"RestoreTableFromClusterSnapshotResult" + }, + "errors":[ + {"shape":"ClusterSnapshotNotFoundFault"}, + {"shape":"InProgressTableRestoreQuotaExceededFault"}, + {"shape":"InvalidClusterSnapshotStateFault"}, + {"shape":"InvalidTableRestoreArgumentFault"}, + {"shape":"ClusterNotFoundFault"}, + {"shape":"InvalidClusterStateFault"}, + {"shape":"UnsupportedOperationFault"} + ] + }, + "RevokeClusterSecurityGroupIngress":{ + "name":"RevokeClusterSecurityGroupIngress", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RevokeClusterSecurityGroupIngressMessage"}, + "output":{ + "shape":"RevokeClusterSecurityGroupIngressResult", + "resultWrapper":"RevokeClusterSecurityGroupIngressResult" + }, + "errors":[ + {"shape":"ClusterSecurityGroupNotFoundFault"}, + {"shape":"AuthorizationNotFoundFault"}, + {"shape":"InvalidClusterSecurityGroupStateFault"} + ] + }, + "RevokeSnapshotAccess":{ + "name":"RevokeSnapshotAccess", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RevokeSnapshotAccessMessage"}, + "output":{ + "shape":"RevokeSnapshotAccessResult", + "resultWrapper":"RevokeSnapshotAccessResult" + }, + "errors":[ + {"shape":"AccessToSnapshotDeniedFault"}, + {"shape":"AuthorizationNotFoundFault"}, + {"shape":"ClusterSnapshotNotFoundFault"} + ] + }, + "RotateEncryptionKey":{ + "name":"RotateEncryptionKey", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RotateEncryptionKeyMessage"}, + "output":{ + "shape":"RotateEncryptionKeyResult", + "resultWrapper":"RotateEncryptionKeyResult" + }, + "errors":[ + {"shape":"ClusterNotFoundFault"}, + {"shape":"InvalidClusterStateFault"}, + {"shape":"DependentServiceRequestThrottlingFault"} + ] + } + }, + "shapes":{ + "AccessToSnapshotDeniedFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"AccessToSnapshotDenied", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "AccountWithRestoreAccess":{ + "type":"structure", + "members":{ + "AccountId":{"shape":"String"} + } + }, + "AccountsWithRestoreAccessList":{ + "type":"list", + "member":{ + "shape":"AccountWithRestoreAccess", + "locationName":"AccountWithRestoreAccess" + } + }, + "AuthorizationAlreadyExistsFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"AuthorizationAlreadyExists", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "AuthorizationNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"AuthorizationNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "AuthorizationQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"AuthorizationQuotaExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "AuthorizeClusterSecurityGroupIngressMessage":{ + "type":"structure", + "required":["ClusterSecurityGroupName"], + "members":{ + "ClusterSecurityGroupName":{"shape":"String"}, + "CIDRIP":{"shape":"String"}, + "EC2SecurityGroupName":{"shape":"String"}, + "EC2SecurityGroupOwnerId":{"shape":"String"} + } + }, + "AuthorizeClusterSecurityGroupIngressResult":{ + "type":"structure", + "members":{ + "ClusterSecurityGroup":{"shape":"ClusterSecurityGroup"} + } + }, + "AuthorizeSnapshotAccessMessage":{ + "type":"structure", + "required":[ + "SnapshotIdentifier", + "AccountWithRestoreAccess" + ], + "members":{ + "SnapshotIdentifier":{"shape":"String"}, + "SnapshotClusterIdentifier":{"shape":"String"}, + "AccountWithRestoreAccess":{"shape":"String"} + } + }, + "AuthorizeSnapshotAccessResult":{ + "type":"structure", + "members":{ + "Snapshot":{"shape":"Snapshot"} + } + }, + "AvailabilityZone":{ + "type":"structure", + "members":{ + "Name":{"shape":"String"} + }, + "wrapper":true + }, + "AvailabilityZoneList":{ + "type":"list", + "member":{ + "shape":"AvailabilityZone", + "locationName":"AvailabilityZone" + } + }, + "Boolean":{"type":"boolean"}, + "BooleanOptional":{"type":"boolean"}, + "BucketNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"BucketNotFoundFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "Cluster":{ + "type":"structure", + "members":{ + "ClusterIdentifier":{"shape":"String"}, + "NodeType":{"shape":"String"}, + "ClusterStatus":{"shape":"String"}, + "ModifyStatus":{"shape":"String"}, + "MasterUsername":{"shape":"String"}, + "DBName":{"shape":"String"}, + "Endpoint":{"shape":"Endpoint"}, + "ClusterCreateTime":{"shape":"TStamp"}, + "AutomatedSnapshotRetentionPeriod":{"shape":"Integer"}, + "ClusterSecurityGroups":{"shape":"ClusterSecurityGroupMembershipList"}, + "VpcSecurityGroups":{"shape":"VpcSecurityGroupMembershipList"}, + "ClusterParameterGroups":{"shape":"ClusterParameterGroupStatusList"}, + "ClusterSubnetGroupName":{"shape":"String"}, + "VpcId":{"shape":"String"}, + "AvailabilityZone":{"shape":"String"}, + "PreferredMaintenanceWindow":{"shape":"String"}, + "PendingModifiedValues":{"shape":"PendingModifiedValues"}, + "ClusterVersion":{"shape":"String"}, + "AllowVersionUpgrade":{"shape":"Boolean"}, + "NumberOfNodes":{"shape":"Integer"}, + "PubliclyAccessible":{"shape":"Boolean"}, + "Encrypted":{"shape":"Boolean"}, + "RestoreStatus":{"shape":"RestoreStatus"}, + "HsmStatus":{"shape":"HsmStatus"}, + "ClusterSnapshotCopyStatus":{"shape":"ClusterSnapshotCopyStatus"}, + "ClusterPublicKey":{"shape":"String"}, + "ClusterNodes":{"shape":"ClusterNodesList"}, + "ElasticIpStatus":{"shape":"ElasticIpStatus"}, + "ClusterRevisionNumber":{"shape":"String"}, + "Tags":{"shape":"TagList"}, + "KmsKeyId":{"shape":"String"}, + "IamRoles":{"shape":"ClusterIamRoleList"} + }, + "wrapper":true + }, + "ClusterAlreadyExistsFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"ClusterAlreadyExists", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "ClusterIamRole":{ + "type":"structure", + "members":{ + "IamRoleArn":{"shape":"String"}, + "ApplyStatus":{"shape":"String"} + } + }, + "ClusterIamRoleList":{ + "type":"list", + "member":{ + "shape":"ClusterIamRole", + "locationName":"ClusterIamRole" + } + }, + "ClusterList":{ + "type":"list", + "member":{ + "shape":"Cluster", + "locationName":"Cluster" + } + }, + "ClusterNode":{ + "type":"structure", + "members":{ + "NodeRole":{"shape":"String"}, + "PrivateIPAddress":{"shape":"String"}, + "PublicIPAddress":{"shape":"String"} + } + }, + "ClusterNodesList":{ + "type":"list", + "member":{"shape":"ClusterNode"} + }, + "ClusterNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"ClusterNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "ClusterParameterGroup":{ + "type":"structure", + "members":{ + "ParameterGroupName":{"shape":"String"}, + "ParameterGroupFamily":{"shape":"String"}, + "Description":{"shape":"String"}, + "Tags":{"shape":"TagList"} + }, + "wrapper":true + }, + "ClusterParameterGroupAlreadyExistsFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"ClusterParameterGroupAlreadyExists", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "ClusterParameterGroupDetails":{ + "type":"structure", + "members":{ + "Parameters":{"shape":"ParametersList"}, + "Marker":{"shape":"String"} + } + }, + "ClusterParameterGroupNameMessage":{ + "type":"structure", + "members":{ + "ParameterGroupName":{"shape":"String"}, + "ParameterGroupStatus":{"shape":"String"} + } + }, + "ClusterParameterGroupNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"ClusterParameterGroupNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "ClusterParameterGroupQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"ClusterParameterGroupQuotaExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "ClusterParameterGroupStatus":{ + "type":"structure", + "members":{ + "ParameterGroupName":{"shape":"String"}, + "ParameterApplyStatus":{"shape":"String"}, + "ClusterParameterStatusList":{"shape":"ClusterParameterStatusList"} + } + }, + "ClusterParameterGroupStatusList":{ + "type":"list", + "member":{ + "shape":"ClusterParameterGroupStatus", + "locationName":"ClusterParameterGroup" + } + }, + "ClusterParameterGroupsMessage":{ + "type":"structure", + "members":{ + "Marker":{"shape":"String"}, + "ParameterGroups":{"shape":"ParameterGroupList"} + } + }, + "ClusterParameterStatus":{ + "type":"structure", + "members":{ + "ParameterName":{"shape":"String"}, + "ParameterApplyStatus":{"shape":"String"}, + "ParameterApplyErrorDescription":{"shape":"String"} + } + }, + "ClusterParameterStatusList":{ + "type":"list", + "member":{"shape":"ClusterParameterStatus"} + }, + "ClusterQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"ClusterQuotaExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "ClusterSecurityGroup":{ + "type":"structure", + "members":{ + "ClusterSecurityGroupName":{"shape":"String"}, + "Description":{"shape":"String"}, + "EC2SecurityGroups":{"shape":"EC2SecurityGroupList"}, + "IPRanges":{"shape":"IPRangeList"}, + "Tags":{"shape":"TagList"} + }, + "wrapper":true + }, + "ClusterSecurityGroupAlreadyExistsFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"ClusterSecurityGroupAlreadyExists", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "ClusterSecurityGroupMembership":{ + "type":"structure", + "members":{ + "ClusterSecurityGroupName":{"shape":"String"}, + "Status":{"shape":"String"} + } + }, + "ClusterSecurityGroupMembershipList":{ + "type":"list", + "member":{ + "shape":"ClusterSecurityGroupMembership", + "locationName":"ClusterSecurityGroup" + } + }, + "ClusterSecurityGroupMessage":{ + "type":"structure", + "members":{ + "Marker":{"shape":"String"}, + "ClusterSecurityGroups":{"shape":"ClusterSecurityGroups"} + } + }, + "ClusterSecurityGroupNameList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"ClusterSecurityGroupName" + } + }, + "ClusterSecurityGroupNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"ClusterSecurityGroupNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "ClusterSecurityGroupQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"QuotaExceeded.ClusterSecurityGroup", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "ClusterSecurityGroups":{ + "type":"list", + "member":{ + "shape":"ClusterSecurityGroup", + "locationName":"ClusterSecurityGroup" + } + }, + "ClusterSnapshotAlreadyExistsFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"ClusterSnapshotAlreadyExists", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "ClusterSnapshotCopyStatus":{ + "type":"structure", + "members":{ + "DestinationRegion":{"shape":"String"}, + "RetentionPeriod":{"shape":"Long"}, + "SnapshotCopyGrantName":{"shape":"String"} + } + }, + "ClusterSnapshotNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"ClusterSnapshotNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "ClusterSnapshotQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"ClusterSnapshotQuotaExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "ClusterSubnetGroup":{ + "type":"structure", + "members":{ + "ClusterSubnetGroupName":{"shape":"String"}, + "Description":{"shape":"String"}, + "VpcId":{"shape":"String"}, + "SubnetGroupStatus":{"shape":"String"}, + "Subnets":{"shape":"SubnetList"}, + "Tags":{"shape":"TagList"} + }, + "wrapper":true + }, + "ClusterSubnetGroupAlreadyExistsFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"ClusterSubnetGroupAlreadyExists", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "ClusterSubnetGroupMessage":{ + "type":"structure", + "members":{ + "Marker":{"shape":"String"}, + "ClusterSubnetGroups":{"shape":"ClusterSubnetGroups"} + } + }, + "ClusterSubnetGroupNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"ClusterSubnetGroupNotFoundFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "ClusterSubnetGroupQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"ClusterSubnetGroupQuotaExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "ClusterSubnetGroups":{ + "type":"list", + "member":{ + "shape":"ClusterSubnetGroup", + "locationName":"ClusterSubnetGroup" + } + }, + "ClusterSubnetQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"ClusterSubnetQuotaExceededFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "ClusterVersion":{ + "type":"structure", + "members":{ + "ClusterVersion":{"shape":"String"}, + "ClusterParameterGroupFamily":{"shape":"String"}, + "Description":{"shape":"String"} + } + }, + "ClusterVersionList":{ + "type":"list", + "member":{ + "shape":"ClusterVersion", + "locationName":"ClusterVersion" + } + }, + "ClusterVersionsMessage":{ + "type":"structure", + "members":{ + "Marker":{"shape":"String"}, + "ClusterVersions":{"shape":"ClusterVersionList"} + } + }, + "ClustersMessage":{ + "type":"structure", + "members":{ + "Marker":{"shape":"String"}, + "Clusters":{"shape":"ClusterList"} + } + }, + "CopyClusterSnapshotMessage":{ + "type":"structure", + "required":[ + "SourceSnapshotIdentifier", + "TargetSnapshotIdentifier" + ], + "members":{ + "SourceSnapshotIdentifier":{"shape":"String"}, + "SourceSnapshotClusterIdentifier":{"shape":"String"}, + "TargetSnapshotIdentifier":{"shape":"String"} + } + }, + "CopyClusterSnapshotResult":{ + "type":"structure", + "members":{ + "Snapshot":{"shape":"Snapshot"} + } + }, + "CopyToRegionDisabledFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"CopyToRegionDisabledFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "CreateClusterMessage":{ + "type":"structure", + "required":[ + "ClusterIdentifier", + "NodeType", + "MasterUsername", + "MasterUserPassword" + ], + "members":{ + "DBName":{"shape":"String"}, + "ClusterIdentifier":{"shape":"String"}, + "ClusterType":{"shape":"String"}, + "NodeType":{"shape":"String"}, + "MasterUsername":{"shape":"String"}, + "MasterUserPassword":{"shape":"String"}, + "ClusterSecurityGroups":{"shape":"ClusterSecurityGroupNameList"}, + "VpcSecurityGroupIds":{"shape":"VpcSecurityGroupIdList"}, + "ClusterSubnetGroupName":{"shape":"String"}, + "AvailabilityZone":{"shape":"String"}, + "PreferredMaintenanceWindow":{"shape":"String"}, + "ClusterParameterGroupName":{"shape":"String"}, + "AutomatedSnapshotRetentionPeriod":{"shape":"IntegerOptional"}, + "Port":{"shape":"IntegerOptional"}, + "ClusterVersion":{"shape":"String"}, + "AllowVersionUpgrade":{"shape":"BooleanOptional"}, + "NumberOfNodes":{"shape":"IntegerOptional"}, + "PubliclyAccessible":{"shape":"BooleanOptional"}, + "Encrypted":{"shape":"BooleanOptional"}, + "HsmClientCertificateIdentifier":{"shape":"String"}, + "HsmConfigurationIdentifier":{"shape":"String"}, + "ElasticIp":{"shape":"String"}, + "Tags":{"shape":"TagList"}, + "KmsKeyId":{"shape":"String"}, + "AdditionalInfo":{"shape":"String"}, + "IamRoles":{"shape":"IamRoleArnList"} + } + }, + "CreateClusterParameterGroupMessage":{ + "type":"structure", + "required":[ + "ParameterGroupName", + "ParameterGroupFamily", + "Description" + ], + "members":{ + "ParameterGroupName":{"shape":"String"}, + "ParameterGroupFamily":{"shape":"String"}, + "Description":{"shape":"String"}, + "Tags":{"shape":"TagList"} + } + }, + "CreateClusterParameterGroupResult":{ + "type":"structure", + "members":{ + "ClusterParameterGroup":{"shape":"ClusterParameterGroup"} + } + }, + "CreateClusterResult":{ + "type":"structure", + "members":{ + "Cluster":{"shape":"Cluster"} + } + }, + "CreateClusterSecurityGroupMessage":{ + "type":"structure", + "required":[ + "ClusterSecurityGroupName", + "Description" + ], + "members":{ + "ClusterSecurityGroupName":{"shape":"String"}, + "Description":{"shape":"String"}, + "Tags":{"shape":"TagList"} + } + }, + "CreateClusterSecurityGroupResult":{ + "type":"structure", + "members":{ + "ClusterSecurityGroup":{"shape":"ClusterSecurityGroup"} + } + }, + "CreateClusterSnapshotMessage":{ + "type":"structure", + "required":[ + "SnapshotIdentifier", + "ClusterIdentifier" + ], + "members":{ + "SnapshotIdentifier":{"shape":"String"}, + "ClusterIdentifier":{"shape":"String"}, + "Tags":{"shape":"TagList"} + } + }, + "CreateClusterSnapshotResult":{ + "type":"structure", + "members":{ + "Snapshot":{"shape":"Snapshot"} + } + }, + "CreateClusterSubnetGroupMessage":{ + "type":"structure", + "required":[ + "ClusterSubnetGroupName", + "Description", + "SubnetIds" + ], + "members":{ + "ClusterSubnetGroupName":{"shape":"String"}, + "Description":{"shape":"String"}, + "SubnetIds":{"shape":"SubnetIdentifierList"}, + "Tags":{"shape":"TagList"} + } + }, + "CreateClusterSubnetGroupResult":{ + "type":"structure", + "members":{ + "ClusterSubnetGroup":{"shape":"ClusterSubnetGroup"} + } + }, + "CreateEventSubscriptionMessage":{ + "type":"structure", + "required":[ + "SubscriptionName", + "SnsTopicArn" + ], + "members":{ + "SubscriptionName":{"shape":"String"}, + "SnsTopicArn":{"shape":"String"}, + "SourceType":{"shape":"String"}, + "SourceIds":{"shape":"SourceIdsList"}, + "EventCategories":{"shape":"EventCategoriesList"}, + "Severity":{"shape":"String"}, + "Enabled":{"shape":"BooleanOptional"}, + "Tags":{"shape":"TagList"} + } + }, + "CreateEventSubscriptionResult":{ + "type":"structure", + "members":{ + "EventSubscription":{"shape":"EventSubscription"} + } + }, + "CreateHsmClientCertificateMessage":{ + "type":"structure", + "required":["HsmClientCertificateIdentifier"], + "members":{ + "HsmClientCertificateIdentifier":{"shape":"String"}, + "Tags":{"shape":"TagList"} + } + }, + "CreateHsmClientCertificateResult":{ + "type":"structure", + "members":{ + "HsmClientCertificate":{"shape":"HsmClientCertificate"} + } + }, + "CreateHsmConfigurationMessage":{ + "type":"structure", + "required":[ + "HsmConfigurationIdentifier", + "Description", + "HsmIpAddress", + "HsmPartitionName", + "HsmPartitionPassword", + "HsmServerPublicCertificate" + ], + "members":{ + "HsmConfigurationIdentifier":{"shape":"String"}, + "Description":{"shape":"String"}, + "HsmIpAddress":{"shape":"String"}, + "HsmPartitionName":{"shape":"String"}, + "HsmPartitionPassword":{"shape":"String"}, + "HsmServerPublicCertificate":{"shape":"String"}, + "Tags":{"shape":"TagList"} + } + }, + "CreateHsmConfigurationResult":{ + "type":"structure", + "members":{ + "HsmConfiguration":{"shape":"HsmConfiguration"} + } + }, + "CreateSnapshotCopyGrantMessage":{ + "type":"structure", + "required":["SnapshotCopyGrantName"], + "members":{ + "SnapshotCopyGrantName":{"shape":"String"}, + "KmsKeyId":{"shape":"String"}, + "Tags":{"shape":"TagList"} + } + }, + "CreateSnapshotCopyGrantResult":{ + "type":"structure", + "members":{ + "SnapshotCopyGrant":{"shape":"SnapshotCopyGrant"} + } + }, + "CreateTagsMessage":{ + "type":"structure", + "required":[ + "ResourceName", + "Tags" + ], + "members":{ + "ResourceName":{"shape":"String"}, + "Tags":{"shape":"TagList"} + } + }, + "DefaultClusterParameters":{ + "type":"structure", + "members":{ + "ParameterGroupFamily":{"shape":"String"}, + "Marker":{"shape":"String"}, + "Parameters":{"shape":"ParametersList"} + }, + "wrapper":true + }, + "DeleteClusterMessage":{ + "type":"structure", + "required":["ClusterIdentifier"], + "members":{ + "ClusterIdentifier":{"shape":"String"}, + "SkipFinalClusterSnapshot":{"shape":"Boolean"}, + "FinalClusterSnapshotIdentifier":{"shape":"String"} + } + }, + "DeleteClusterParameterGroupMessage":{ + "type":"structure", + "required":["ParameterGroupName"], + "members":{ + "ParameterGroupName":{"shape":"String"} + } + }, + "DeleteClusterResult":{ + "type":"structure", + "members":{ + "Cluster":{"shape":"Cluster"} + } + }, + "DeleteClusterSecurityGroupMessage":{ + "type":"structure", + "required":["ClusterSecurityGroupName"], + "members":{ + "ClusterSecurityGroupName":{"shape":"String"} + } + }, + "DeleteClusterSnapshotMessage":{ + "type":"structure", + "required":["SnapshotIdentifier"], + "members":{ + "SnapshotIdentifier":{"shape":"String"}, + "SnapshotClusterIdentifier":{"shape":"String"} + } + }, + "DeleteClusterSnapshotResult":{ + "type":"structure", + "members":{ + "Snapshot":{"shape":"Snapshot"} + } + }, + "DeleteClusterSubnetGroupMessage":{ + "type":"structure", + "required":["ClusterSubnetGroupName"], + "members":{ + "ClusterSubnetGroupName":{"shape":"String"} + } + }, + "DeleteEventSubscriptionMessage":{ + "type":"structure", + "required":["SubscriptionName"], + "members":{ + "SubscriptionName":{"shape":"String"} + } + }, + "DeleteHsmClientCertificateMessage":{ + "type":"structure", + "required":["HsmClientCertificateIdentifier"], + "members":{ + "HsmClientCertificateIdentifier":{"shape":"String"} + } + }, + "DeleteHsmConfigurationMessage":{ + "type":"structure", + "required":["HsmConfigurationIdentifier"], + "members":{ + "HsmConfigurationIdentifier":{"shape":"String"} + } + }, + "DeleteSnapshotCopyGrantMessage":{ + "type":"structure", + "required":["SnapshotCopyGrantName"], + "members":{ + "SnapshotCopyGrantName":{"shape":"String"} + } + }, + "DeleteTagsMessage":{ + "type":"structure", + "required":[ + "ResourceName", + "TagKeys" + ], + "members":{ + "ResourceName":{"shape":"String"}, + "TagKeys":{"shape":"TagKeyList"} + } + }, + "DependentServiceRequestThrottlingFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DependentServiceRequestThrottlingFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "DescribeClusterParameterGroupsMessage":{ + "type":"structure", + "members":{ + "ParameterGroupName":{"shape":"String"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"}, + "TagKeys":{"shape":"TagKeyList"}, + "TagValues":{"shape":"TagValueList"} + } + }, + "DescribeClusterParametersMessage":{ + "type":"structure", + "required":["ParameterGroupName"], + "members":{ + "ParameterGroupName":{"shape":"String"}, + "Source":{"shape":"String"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeClusterSecurityGroupsMessage":{ + "type":"structure", + "members":{ + "ClusterSecurityGroupName":{"shape":"String"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"}, + "TagKeys":{"shape":"TagKeyList"}, + "TagValues":{"shape":"TagValueList"} + } + }, + "DescribeClusterSnapshotsMessage":{ + "type":"structure", + "members":{ + "ClusterIdentifier":{"shape":"String"}, + "SnapshotIdentifier":{"shape":"String"}, + "SnapshotType":{"shape":"String"}, + "StartTime":{"shape":"TStamp"}, + "EndTime":{"shape":"TStamp"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"}, + "OwnerAccount":{"shape":"String"}, + "TagKeys":{"shape":"TagKeyList"}, + "TagValues":{"shape":"TagValueList"} + } + }, + "DescribeClusterSubnetGroupsMessage":{ + "type":"structure", + "members":{ + "ClusterSubnetGroupName":{"shape":"String"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"}, + "TagKeys":{"shape":"TagKeyList"}, + "TagValues":{"shape":"TagValueList"} + } + }, + "DescribeClusterVersionsMessage":{ + "type":"structure", + "members":{ + "ClusterVersion":{"shape":"String"}, + "ClusterParameterGroupFamily":{"shape":"String"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeClustersMessage":{ + "type":"structure", + "members":{ + "ClusterIdentifier":{"shape":"String"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"}, + "TagKeys":{"shape":"TagKeyList"}, + "TagValues":{"shape":"TagValueList"} + } + }, + "DescribeDefaultClusterParametersMessage":{ + "type":"structure", + "required":["ParameterGroupFamily"], + "members":{ + "ParameterGroupFamily":{"shape":"String"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeDefaultClusterParametersResult":{ + "type":"structure", + "members":{ + "DefaultClusterParameters":{"shape":"DefaultClusterParameters"} + } + }, + "DescribeEventCategoriesMessage":{ + "type":"structure", + "members":{ + "SourceType":{"shape":"String"} + } + }, + "DescribeEventSubscriptionsMessage":{ + "type":"structure", + "members":{ + "SubscriptionName":{"shape":"String"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeEventsMessage":{ + "type":"structure", + "members":{ + "SourceIdentifier":{"shape":"String"}, + "SourceType":{"shape":"SourceType"}, + "StartTime":{"shape":"TStamp"}, + "EndTime":{"shape":"TStamp"}, + "Duration":{"shape":"IntegerOptional"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeHsmClientCertificatesMessage":{ + "type":"structure", + "members":{ + "HsmClientCertificateIdentifier":{"shape":"String"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"}, + "TagKeys":{"shape":"TagKeyList"}, + "TagValues":{"shape":"TagValueList"} + } + }, + "DescribeHsmConfigurationsMessage":{ + "type":"structure", + "members":{ + "HsmConfigurationIdentifier":{"shape":"String"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"}, + "TagKeys":{"shape":"TagKeyList"}, + "TagValues":{"shape":"TagValueList"} + } + }, + "DescribeLoggingStatusMessage":{ + "type":"structure", + "required":["ClusterIdentifier"], + "members":{ + "ClusterIdentifier":{"shape":"String"} + } + }, + "DescribeOrderableClusterOptionsMessage":{ + "type":"structure", + "members":{ + "ClusterVersion":{"shape":"String"}, + "NodeType":{"shape":"String"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeReservedNodeOfferingsMessage":{ + "type":"structure", + "members":{ + "ReservedNodeOfferingId":{"shape":"String"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeReservedNodesMessage":{ + "type":"structure", + "members":{ + "ReservedNodeId":{"shape":"String"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeResizeMessage":{ + "type":"structure", + "required":["ClusterIdentifier"], + "members":{ + "ClusterIdentifier":{"shape":"String"} + } + }, + "DescribeSnapshotCopyGrantsMessage":{ + "type":"structure", + "members":{ + "SnapshotCopyGrantName":{"shape":"String"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"}, + "TagKeys":{"shape":"TagKeyList"}, + "TagValues":{"shape":"TagValueList"} + } + }, + "DescribeTableRestoreStatusMessage":{ + "type":"structure", + "members":{ + "ClusterIdentifier":{"shape":"String"}, + "TableRestoreRequestId":{"shape":"String"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeTagsMessage":{ + "type":"structure", + "members":{ + "ResourceName":{"shape":"String"}, + "ResourceType":{"shape":"String"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"}, + "TagKeys":{"shape":"TagKeyList"}, + "TagValues":{"shape":"TagValueList"} + } + }, + "DisableLoggingMessage":{ + "type":"structure", + "required":["ClusterIdentifier"], + "members":{ + "ClusterIdentifier":{"shape":"String"} + } + }, + "DisableSnapshotCopyMessage":{ + "type":"structure", + "required":["ClusterIdentifier"], + "members":{ + "ClusterIdentifier":{"shape":"String"} + } + }, + "DisableSnapshotCopyResult":{ + "type":"structure", + "members":{ + "Cluster":{"shape":"Cluster"} + } + }, + "Double":{"type":"double"}, + "DoubleOptional":{"type":"double"}, + "EC2SecurityGroup":{ + "type":"structure", + "members":{ + "Status":{"shape":"String"}, + "EC2SecurityGroupName":{"shape":"String"}, + "EC2SecurityGroupOwnerId":{"shape":"String"}, + "Tags":{"shape":"TagList"} + } + }, + "EC2SecurityGroupList":{ + "type":"list", + "member":{ + "shape":"EC2SecurityGroup", + "locationName":"EC2SecurityGroup" + } + }, + "ElasticIpStatus":{ + "type":"structure", + "members":{ + "ElasticIp":{"shape":"String"}, + "Status":{"shape":"String"} + } + }, + "EnableLoggingMessage":{ + "type":"structure", + "required":[ + "ClusterIdentifier", + "BucketName" + ], + "members":{ + "ClusterIdentifier":{"shape":"String"}, + "BucketName":{"shape":"String"}, + "S3KeyPrefix":{"shape":"String"} + } + }, + "EnableSnapshotCopyMessage":{ + "type":"structure", + "required":[ + "ClusterIdentifier", + "DestinationRegion" + ], + "members":{ + "ClusterIdentifier":{"shape":"String"}, + "DestinationRegion":{"shape":"String"}, + "RetentionPeriod":{"shape":"IntegerOptional"}, + "SnapshotCopyGrantName":{"shape":"String"} + } + }, + "EnableSnapshotCopyResult":{ + "type":"structure", + "members":{ + "Cluster":{"shape":"Cluster"} + } + }, + "Endpoint":{ + "type":"structure", + "members":{ + "Address":{"shape":"String"}, + "Port":{"shape":"Integer"} + } + }, + "Event":{ + "type":"structure", + "members":{ + "SourceIdentifier":{"shape":"String"}, + "SourceType":{"shape":"SourceType"}, + "Message":{"shape":"String"}, + "EventCategories":{"shape":"EventCategoriesList"}, + "Severity":{"shape":"String"}, + "Date":{"shape":"TStamp"}, + "EventId":{"shape":"String"} + } + }, + "EventCategoriesList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"EventCategory" + } + }, + "EventCategoriesMap":{ + "type":"structure", + "members":{ + "SourceType":{"shape":"String"}, + "Events":{"shape":"EventInfoMapList"} + }, + "wrapper":true + }, + "EventCategoriesMapList":{ + "type":"list", + "member":{ + "shape":"EventCategoriesMap", + "locationName":"EventCategoriesMap" + } + }, + "EventCategoriesMessage":{ + "type":"structure", + "members":{ + "EventCategoriesMapList":{"shape":"EventCategoriesMapList"} + } + }, + "EventInfoMap":{ + "type":"structure", + "members":{ + "EventId":{"shape":"String"}, + "EventCategories":{"shape":"EventCategoriesList"}, + "EventDescription":{"shape":"String"}, + "Severity":{"shape":"String"} + }, + "wrapper":true + }, + "EventInfoMapList":{ + "type":"list", + "member":{ + "shape":"EventInfoMap", + "locationName":"EventInfoMap" + } + }, + "EventList":{ + "type":"list", + "member":{ + "shape":"Event", + "locationName":"Event" + } + }, + "EventSubscription":{ + "type":"structure", + "members":{ + "CustomerAwsId":{"shape":"String"}, + "CustSubscriptionId":{"shape":"String"}, + "SnsTopicArn":{"shape":"String"}, + "Status":{"shape":"String"}, + "SubscriptionCreationTime":{"shape":"TStamp"}, + "SourceType":{"shape":"String"}, + "SourceIdsList":{"shape":"SourceIdsList"}, + "EventCategoriesList":{"shape":"EventCategoriesList"}, + "Severity":{"shape":"String"}, + "Enabled":{"shape":"Boolean"}, + "Tags":{"shape":"TagList"} + }, + "wrapper":true + }, + "EventSubscriptionQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"EventSubscriptionQuotaExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "EventSubscriptionsList":{ + "type":"list", + "member":{ + "shape":"EventSubscription", + "locationName":"EventSubscription" + } + }, + "EventSubscriptionsMessage":{ + "type":"structure", + "members":{ + "Marker":{"shape":"String"}, + "EventSubscriptionsList":{"shape":"EventSubscriptionsList"} + } + }, + "EventsMessage":{ + "type":"structure", + "members":{ + "Marker":{"shape":"String"}, + "Events":{"shape":"EventList"} + } + }, + "HsmClientCertificate":{ + "type":"structure", + "members":{ + "HsmClientCertificateIdentifier":{"shape":"String"}, + "HsmClientCertificatePublicKey":{"shape":"String"}, + "Tags":{"shape":"TagList"} + }, + "wrapper":true + }, + "HsmClientCertificateAlreadyExistsFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"HsmClientCertificateAlreadyExistsFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "HsmClientCertificateList":{ + "type":"list", + "member":{ + "shape":"HsmClientCertificate", + "locationName":"HsmClientCertificate" + } + }, + "HsmClientCertificateMessage":{ + "type":"structure", + "members":{ + "Marker":{"shape":"String"}, + "HsmClientCertificates":{"shape":"HsmClientCertificateList"} + } + }, + "HsmClientCertificateNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"HsmClientCertificateNotFoundFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "HsmClientCertificateQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"HsmClientCertificateQuotaExceededFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "HsmConfiguration":{ + "type":"structure", + "members":{ + "HsmConfigurationIdentifier":{"shape":"String"}, + "Description":{"shape":"String"}, + "HsmIpAddress":{"shape":"String"}, + "HsmPartitionName":{"shape":"String"}, + "Tags":{"shape":"TagList"} + }, + "wrapper":true + }, + "HsmConfigurationAlreadyExistsFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"HsmConfigurationAlreadyExistsFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "HsmConfigurationList":{ + "type":"list", + "member":{ + "shape":"HsmConfiguration", + "locationName":"HsmConfiguration" + } + }, + "HsmConfigurationMessage":{ + "type":"structure", + "members":{ + "Marker":{"shape":"String"}, + "HsmConfigurations":{"shape":"HsmConfigurationList"} + } + }, + "HsmConfigurationNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"HsmConfigurationNotFoundFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "HsmConfigurationQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"HsmConfigurationQuotaExceededFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "HsmStatus":{ + "type":"structure", + "members":{ + "HsmClientCertificateIdentifier":{"shape":"String"}, + "HsmConfigurationIdentifier":{"shape":"String"}, + "Status":{"shape":"String"} + } + }, + "IPRange":{ + "type":"structure", + "members":{ + "Status":{"shape":"String"}, + "CIDRIP":{"shape":"String"}, + "Tags":{"shape":"TagList"} + } + }, + "IPRangeList":{ + "type":"list", + "member":{ + "shape":"IPRange", + "locationName":"IPRange" + } + }, + "IamRoleArnList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"IamRoleArn" + } + }, + "ImportTablesCompleted":{ + "type":"list", + "member":{"shape":"String"} + }, + "ImportTablesInProgress":{ + "type":"list", + "member":{"shape":"String"} + }, + "ImportTablesNotStarted":{ + "type":"list", + "member":{"shape":"String"} + }, + "InProgressTableRestoreQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InProgressTableRestoreQuotaExceededFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "IncompatibleOrderableOptions":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"IncompatibleOrderableOptions", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InsufficientClusterCapacityFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InsufficientClusterCapacity", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InsufficientS3BucketPolicyFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InsufficientS3BucketPolicyFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "Integer":{"type":"integer"}, + "IntegerOptional":{"type":"integer"}, + "InvalidClusterParameterGroupStateFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidClusterParameterGroupState", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidClusterSecurityGroupStateFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidClusterSecurityGroupState", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidClusterSnapshotStateFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidClusterSnapshotState", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidClusterStateFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidClusterState", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidClusterSubnetGroupStateFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidClusterSubnetGroupStateFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidClusterSubnetStateFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidClusterSubnetStateFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidElasticIpFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidElasticIpFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidHsmClientCertificateStateFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidHsmClientCertificateStateFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidHsmConfigurationStateFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidHsmConfigurationStateFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidRestoreFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidRestore", + "httpStatusCode":406, + "senderFault":true + }, + "exception":true + }, + "InvalidS3BucketNameFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidS3BucketNameFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidS3KeyPrefixFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidS3KeyPrefixFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidSnapshotCopyGrantStateFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidSnapshotCopyGrantStateFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidSubnet":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidSubnet", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidSubscriptionStateFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidSubscriptionStateFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidTableRestoreArgumentFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidTableRestoreArgument", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidTagFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidTagFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidVPCNetworkStateFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidVPCNetworkStateFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "LimitExceededFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"LimitExceededFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "LoggingStatus":{ + "type":"structure", + "members":{ + "LoggingEnabled":{"shape":"Boolean"}, + "BucketName":{"shape":"String"}, + "S3KeyPrefix":{"shape":"String"}, + "LastSuccessfulDeliveryTime":{"shape":"TStamp"}, + "LastFailureTime":{"shape":"TStamp"}, + "LastFailureMessage":{"shape":"String"} + } + }, + "Long":{"type":"long"}, + "LongOptional":{"type":"long"}, + "ModifyClusterIamRolesMessage":{ + "type":"structure", + "required":["ClusterIdentifier"], + "members":{ + "ClusterIdentifier":{"shape":"String"}, + "AddIamRoles":{"shape":"IamRoleArnList"}, + "RemoveIamRoles":{"shape":"IamRoleArnList"} + } + }, + "ModifyClusterIamRolesResult":{ + "type":"structure", + "members":{ + "Cluster":{"shape":"Cluster"} + } + }, + "ModifyClusterMessage":{ + "type":"structure", + "required":["ClusterIdentifier"], + "members":{ + "ClusterIdentifier":{"shape":"String"}, + "ClusterType":{"shape":"String"}, + "NodeType":{"shape":"String"}, + "NumberOfNodes":{"shape":"IntegerOptional"}, + "ClusterSecurityGroups":{"shape":"ClusterSecurityGroupNameList"}, + "VpcSecurityGroupIds":{"shape":"VpcSecurityGroupIdList"}, + "MasterUserPassword":{"shape":"String"}, + "ClusterParameterGroupName":{"shape":"String"}, + "AutomatedSnapshotRetentionPeriod":{"shape":"IntegerOptional"}, + "PreferredMaintenanceWindow":{"shape":"String"}, + "ClusterVersion":{"shape":"String"}, + "AllowVersionUpgrade":{"shape":"BooleanOptional"}, + "HsmClientCertificateIdentifier":{"shape":"String"}, + "HsmConfigurationIdentifier":{"shape":"String"}, + "NewClusterIdentifier":{"shape":"String"}, + "PubliclyAccessible":{"shape":"BooleanOptional"}, + "ElasticIp":{"shape":"String"} + } + }, + "ModifyClusterParameterGroupMessage":{ + "type":"structure", + "required":[ + "ParameterGroupName", + "Parameters" + ], + "members":{ + "ParameterGroupName":{"shape":"String"}, + "Parameters":{"shape":"ParametersList"} + } + }, + "ModifyClusterResult":{ + "type":"structure", + "members":{ + "Cluster":{"shape":"Cluster"} + } + }, + "ModifyClusterSubnetGroupMessage":{ + "type":"structure", + "required":[ + "ClusterSubnetGroupName", + "SubnetIds" + ], + "members":{ + "ClusterSubnetGroupName":{"shape":"String"}, + "Description":{"shape":"String"}, + "SubnetIds":{"shape":"SubnetIdentifierList"} + } + }, + "ModifyClusterSubnetGroupResult":{ + "type":"structure", + "members":{ + "ClusterSubnetGroup":{"shape":"ClusterSubnetGroup"} + } + }, + "ModifyEventSubscriptionMessage":{ + "type":"structure", + "required":["SubscriptionName"], + "members":{ + "SubscriptionName":{"shape":"String"}, + "SnsTopicArn":{"shape":"String"}, + "SourceType":{"shape":"String"}, + "SourceIds":{"shape":"SourceIdsList"}, + "EventCategories":{"shape":"EventCategoriesList"}, + "Severity":{"shape":"String"}, + "Enabled":{"shape":"BooleanOptional"} + } + }, + "ModifyEventSubscriptionResult":{ + "type":"structure", + "members":{ + "EventSubscription":{"shape":"EventSubscription"} + } + }, + "ModifySnapshotCopyRetentionPeriodMessage":{ + "type":"structure", + "required":[ + "ClusterIdentifier", + "RetentionPeriod" + ], + "members":{ + "ClusterIdentifier":{"shape":"String"}, + "RetentionPeriod":{"shape":"Integer"} + } + }, + "ModifySnapshotCopyRetentionPeriodResult":{ + "type":"structure", + "members":{ + "Cluster":{"shape":"Cluster"} + } + }, + "NumberOfNodesPerClusterLimitExceededFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"NumberOfNodesPerClusterLimitExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "NumberOfNodesQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"NumberOfNodesQuotaExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "OrderableClusterOption":{ + "type":"structure", + "members":{ + "ClusterVersion":{"shape":"String"}, + "ClusterType":{"shape":"String"}, + "NodeType":{"shape":"String"}, + "AvailabilityZones":{"shape":"AvailabilityZoneList"} + }, + "wrapper":true + }, + "OrderableClusterOptionsList":{ + "type":"list", + "member":{ + "shape":"OrderableClusterOption", + "locationName":"OrderableClusterOption" + } + }, + "OrderableClusterOptionsMessage":{ + "type":"structure", + "members":{ + "OrderableClusterOptions":{"shape":"OrderableClusterOptionsList"}, + "Marker":{"shape":"String"} + } + }, + "Parameter":{ + "type":"structure", + "members":{ + "ParameterName":{"shape":"String"}, + "ParameterValue":{"shape":"String"}, + "Description":{"shape":"String"}, + "Source":{"shape":"String"}, + "DataType":{"shape":"String"}, + "AllowedValues":{"shape":"String"}, + "ApplyType":{"shape":"ParameterApplyType"}, + "IsModifiable":{"shape":"Boolean"}, + "MinimumEngineVersion":{"shape":"String"} + } + }, + "ParameterApplyType":{ + "type":"string", + "enum":[ + "static", + "dynamic" + ] + }, + "ParameterGroupList":{ + "type":"list", + "member":{ + "shape":"ClusterParameterGroup", + "locationName":"ClusterParameterGroup" + } + }, + "ParametersList":{ + "type":"list", + "member":{ + "shape":"Parameter", + "locationName":"Parameter" + } + }, + "PendingModifiedValues":{ + "type":"structure", + "members":{ + "MasterUserPassword":{"shape":"String"}, + "NodeType":{"shape":"String"}, + "NumberOfNodes":{"shape":"IntegerOptional"}, + "ClusterType":{"shape":"String"}, + "ClusterVersion":{"shape":"String"}, + "AutomatedSnapshotRetentionPeriod":{"shape":"IntegerOptional"}, + "ClusterIdentifier":{"shape":"String"}, + "PubliclyAccessible":{"shape":"BooleanOptional"} + } + }, + "PurchaseReservedNodeOfferingMessage":{ + "type":"structure", + "required":["ReservedNodeOfferingId"], + "members":{ + "ReservedNodeOfferingId":{"shape":"String"}, + "NodeCount":{"shape":"IntegerOptional"} + } + }, + "PurchaseReservedNodeOfferingResult":{ + "type":"structure", + "members":{ + "ReservedNode":{"shape":"ReservedNode"} + } + }, + "RebootClusterMessage":{ + "type":"structure", + "required":["ClusterIdentifier"], + "members":{ + "ClusterIdentifier":{"shape":"String"} + } + }, + "RebootClusterResult":{ + "type":"structure", + "members":{ + "Cluster":{"shape":"Cluster"} + } + }, + "RecurringCharge":{ + "type":"structure", + "members":{ + "RecurringChargeAmount":{"shape":"Double"}, + "RecurringChargeFrequency":{"shape":"String"} + }, + "wrapper":true + }, + "RecurringChargeList":{ + "type":"list", + "member":{ + "shape":"RecurringCharge", + "locationName":"RecurringCharge" + } + }, + "ReservedNode":{ + "type":"structure", + "members":{ + "ReservedNodeId":{"shape":"String"}, + "ReservedNodeOfferingId":{"shape":"String"}, + "NodeType":{"shape":"String"}, + "StartTime":{"shape":"TStamp"}, + "Duration":{"shape":"Integer"}, + "FixedPrice":{"shape":"Double"}, + "UsagePrice":{"shape":"Double"}, + "CurrencyCode":{"shape":"String"}, + "NodeCount":{"shape":"Integer"}, + "State":{"shape":"String"}, + "OfferingType":{"shape":"String"}, + "RecurringCharges":{"shape":"RecurringChargeList"} + }, + "wrapper":true + }, + "ReservedNodeAlreadyExistsFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"ReservedNodeAlreadyExists", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "ReservedNodeList":{ + "type":"list", + "member":{ + "shape":"ReservedNode", + "locationName":"ReservedNode" + } + }, + "ReservedNodeNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"ReservedNodeNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "ReservedNodeOffering":{ + "type":"structure", + "members":{ + "ReservedNodeOfferingId":{"shape":"String"}, + "NodeType":{"shape":"String"}, + "Duration":{"shape":"Integer"}, + "FixedPrice":{"shape":"Double"}, + "UsagePrice":{"shape":"Double"}, + "CurrencyCode":{"shape":"String"}, + "OfferingType":{"shape":"String"}, + "RecurringCharges":{"shape":"RecurringChargeList"} + }, + "wrapper":true + }, + "ReservedNodeOfferingList":{ + "type":"list", + "member":{ + "shape":"ReservedNodeOffering", + "locationName":"ReservedNodeOffering" + } + }, + "ReservedNodeOfferingNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"ReservedNodeOfferingNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "ReservedNodeOfferingsMessage":{ + "type":"structure", + "members":{ + "Marker":{"shape":"String"}, + "ReservedNodeOfferings":{"shape":"ReservedNodeOfferingList"} + } + }, + "ReservedNodeQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"ReservedNodeQuotaExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "ReservedNodesMessage":{ + "type":"structure", + "members":{ + "Marker":{"shape":"String"}, + "ReservedNodes":{"shape":"ReservedNodeList"} + } + }, + "ResetClusterParameterGroupMessage":{ + "type":"structure", + "required":["ParameterGroupName"], + "members":{ + "ParameterGroupName":{"shape":"String"}, + "ResetAllParameters":{"shape":"Boolean"}, + "Parameters":{"shape":"ParametersList"} + } + }, + "ResizeNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"ResizeNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "ResizeProgressMessage":{ + "type":"structure", + "members":{ + "TargetNodeType":{"shape":"String"}, + "TargetNumberOfNodes":{"shape":"IntegerOptional"}, + "TargetClusterType":{"shape":"String"}, + "Status":{"shape":"String"}, + "ImportTablesCompleted":{"shape":"ImportTablesCompleted"}, + "ImportTablesInProgress":{"shape":"ImportTablesInProgress"}, + "ImportTablesNotStarted":{"shape":"ImportTablesNotStarted"}, + "AvgResizeRateInMegaBytesPerSecond":{"shape":"DoubleOptional"}, + "TotalResizeDataInMegaBytes":{"shape":"LongOptional"}, + "ProgressInMegaBytes":{"shape":"LongOptional"}, + "ElapsedTimeInSeconds":{"shape":"LongOptional"}, + "EstimatedTimeToCompletionInSeconds":{"shape":"LongOptional"} + } + }, + "ResourceNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"ResourceNotFoundFault", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "RestorableNodeTypeList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"NodeType" + } + }, + "RestoreFromClusterSnapshotMessage":{ + "type":"structure", + "required":[ + "ClusterIdentifier", + "SnapshotIdentifier" + ], + "members":{ + "ClusterIdentifier":{"shape":"String"}, + "SnapshotIdentifier":{"shape":"String"}, + "SnapshotClusterIdentifier":{"shape":"String"}, + "Port":{"shape":"IntegerOptional"}, + "AvailabilityZone":{"shape":"String"}, + "AllowVersionUpgrade":{"shape":"BooleanOptional"}, + "ClusterSubnetGroupName":{"shape":"String"}, + "PubliclyAccessible":{"shape":"BooleanOptional"}, + "OwnerAccount":{"shape":"String"}, + "HsmClientCertificateIdentifier":{"shape":"String"}, + "HsmConfigurationIdentifier":{"shape":"String"}, + "ElasticIp":{"shape":"String"}, + "ClusterParameterGroupName":{"shape":"String"}, + "ClusterSecurityGroups":{"shape":"ClusterSecurityGroupNameList"}, + "VpcSecurityGroupIds":{"shape":"VpcSecurityGroupIdList"}, + "PreferredMaintenanceWindow":{"shape":"String"}, + "AutomatedSnapshotRetentionPeriod":{"shape":"IntegerOptional"}, + "KmsKeyId":{"shape":"String"}, + "NodeType":{"shape":"String"}, + "AdditionalInfo":{"shape":"String"}, + "IamRoles":{"shape":"IamRoleArnList"} + } + }, + "RestoreFromClusterSnapshotResult":{ + "type":"structure", + "members":{ + "Cluster":{"shape":"Cluster"} + } + }, + "RestoreStatus":{ + "type":"structure", + "members":{ + "Status":{"shape":"String"}, + "CurrentRestoreRateInMegaBytesPerSecond":{"shape":"Double"}, + "SnapshotSizeInMegaBytes":{"shape":"Long"}, + "ProgressInMegaBytes":{"shape":"Long"}, + "ElapsedTimeInSeconds":{"shape":"Long"}, + "EstimatedTimeToCompletionInSeconds":{"shape":"Long"} + } + }, + "RestoreTableFromClusterSnapshotMessage":{ + "type":"structure", + "required":[ + "ClusterIdentifier", + "SnapshotIdentifier", + "SourceDatabaseName", + "SourceTableName", + "NewTableName" + ], + "members":{ + "ClusterIdentifier":{"shape":"String"}, + "SnapshotIdentifier":{"shape":"String"}, + "SourceDatabaseName":{"shape":"String"}, + "SourceSchemaName":{"shape":"String"}, + "SourceTableName":{"shape":"String"}, + "TargetDatabaseName":{"shape":"String"}, + "TargetSchemaName":{"shape":"String"}, + "NewTableName":{"shape":"String"} + } + }, + "RestoreTableFromClusterSnapshotResult":{ + "type":"structure", + "members":{ + "TableRestoreStatus":{"shape":"TableRestoreStatus"} + } + }, + "RevokeClusterSecurityGroupIngressMessage":{ + "type":"structure", + "required":["ClusterSecurityGroupName"], + "members":{ + "ClusterSecurityGroupName":{"shape":"String"}, + "CIDRIP":{"shape":"String"}, + "EC2SecurityGroupName":{"shape":"String"}, + "EC2SecurityGroupOwnerId":{"shape":"String"} + } + }, + "RevokeClusterSecurityGroupIngressResult":{ + "type":"structure", + "members":{ + "ClusterSecurityGroup":{"shape":"ClusterSecurityGroup"} + } + }, + "RevokeSnapshotAccessMessage":{ + "type":"structure", + "required":[ + "SnapshotIdentifier", + "AccountWithRestoreAccess" + ], + "members":{ + "SnapshotIdentifier":{"shape":"String"}, + "SnapshotClusterIdentifier":{"shape":"String"}, + "AccountWithRestoreAccess":{"shape":"String"} + } + }, + "RevokeSnapshotAccessResult":{ + "type":"structure", + "members":{ + "Snapshot":{"shape":"Snapshot"} + } + }, + "RotateEncryptionKeyMessage":{ + "type":"structure", + "required":["ClusterIdentifier"], + "members":{ + "ClusterIdentifier":{"shape":"String"} + } + }, + "RotateEncryptionKeyResult":{ + "type":"structure", + "members":{ + "Cluster":{"shape":"Cluster"} + } + }, + "SNSInvalidTopicFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"SNSInvalidTopic", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "SNSNoAuthorizationFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"SNSNoAuthorization", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "SNSTopicArnNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"SNSTopicArnNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "Snapshot":{ + "type":"structure", + "members":{ + "SnapshotIdentifier":{"shape":"String"}, + "ClusterIdentifier":{"shape":"String"}, + "SnapshotCreateTime":{"shape":"TStamp"}, + "Status":{"shape":"String"}, + "Port":{"shape":"Integer"}, + "AvailabilityZone":{"shape":"String"}, + "ClusterCreateTime":{"shape":"TStamp"}, + "MasterUsername":{"shape":"String"}, + "ClusterVersion":{"shape":"String"}, + "SnapshotType":{"shape":"String"}, + "NodeType":{"shape":"String"}, + "NumberOfNodes":{"shape":"Integer"}, + "DBName":{"shape":"String"}, + "VpcId":{"shape":"String"}, + "Encrypted":{"shape":"Boolean"}, + "KmsKeyId":{"shape":"String"}, + "EncryptedWithHSM":{"shape":"Boolean"}, + "AccountsWithRestoreAccess":{"shape":"AccountsWithRestoreAccessList"}, + "OwnerAccount":{"shape":"String"}, + "TotalBackupSizeInMegaBytes":{"shape":"Double"}, + "ActualIncrementalBackupSizeInMegaBytes":{"shape":"Double"}, + "BackupProgressInMegaBytes":{"shape":"Double"}, + "CurrentBackupRateInMegaBytesPerSecond":{"shape":"Double"}, + "EstimatedSecondsToCompletion":{"shape":"Long"}, + "ElapsedTimeInSeconds":{"shape":"Long"}, + "SourceRegion":{"shape":"String"}, + "Tags":{"shape":"TagList"}, + "RestorableNodeTypes":{"shape":"RestorableNodeTypeList"} + }, + "wrapper":true + }, + "SnapshotCopyAlreadyDisabledFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"SnapshotCopyAlreadyDisabledFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "SnapshotCopyAlreadyEnabledFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"SnapshotCopyAlreadyEnabledFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "SnapshotCopyDisabledFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"SnapshotCopyDisabledFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "SnapshotCopyGrant":{ + "type":"structure", + "members":{ + "SnapshotCopyGrantName":{"shape":"String"}, + "KmsKeyId":{"shape":"String"}, + "Tags":{"shape":"TagList"} + }, + "wrapper":true + }, + "SnapshotCopyGrantAlreadyExistsFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"SnapshotCopyGrantAlreadyExistsFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "SnapshotCopyGrantList":{ + "type":"list", + "member":{ + "shape":"SnapshotCopyGrant", + "locationName":"SnapshotCopyGrant" + } + }, + "SnapshotCopyGrantMessage":{ + "type":"structure", + "members":{ + "Marker":{"shape":"String"}, + "SnapshotCopyGrants":{"shape":"SnapshotCopyGrantList"} + } + }, + "SnapshotCopyGrantNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"SnapshotCopyGrantNotFoundFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "SnapshotCopyGrantQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"SnapshotCopyGrantQuotaExceededFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "SnapshotList":{ + "type":"list", + "member":{ + "shape":"Snapshot", + "locationName":"Snapshot" + } + }, + "SnapshotMessage":{ + "type":"structure", + "members":{ + "Marker":{"shape":"String"}, + "Snapshots":{"shape":"SnapshotList"} + } + }, + "SourceIdsList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"SourceId" + } + }, + "SourceNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"SourceNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "SourceType":{ + "type":"string", + "enum":[ + "cluster", + "cluster-parameter-group", + "cluster-security-group", + "cluster-snapshot" + ] + }, + "String":{"type":"string"}, + "Subnet":{ + "type":"structure", + "members":{ + "SubnetIdentifier":{"shape":"String"}, + "SubnetAvailabilityZone":{"shape":"AvailabilityZone"}, + "SubnetStatus":{"shape":"String"} + } + }, + "SubnetAlreadyInUse":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"SubnetAlreadyInUse", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "SubnetIdentifierList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"SubnetIdentifier" + } + }, + "SubnetList":{ + "type":"list", + "member":{ + "shape":"Subnet", + "locationName":"Subnet" + } + }, + "SubscriptionAlreadyExistFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"SubscriptionAlreadyExist", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "SubscriptionCategoryNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"SubscriptionCategoryNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "SubscriptionEventIdNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"SubscriptionEventIdNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "SubscriptionNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"SubscriptionNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "SubscriptionSeverityNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"SubscriptionSeverityNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "TStamp":{"type":"timestamp"}, + "TableRestoreNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"TableRestoreNotFoundFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "TableRestoreStatus":{ + "type":"structure", + "members":{ + "TableRestoreRequestId":{"shape":"String"}, + "Status":{"shape":"TableRestoreStatusType"}, + "Message":{"shape":"String"}, + "RequestTime":{"shape":"TStamp"}, + "ProgressInMegaBytes":{"shape":"LongOptional"}, + "TotalDataInMegaBytes":{"shape":"LongOptional"}, + "ClusterIdentifier":{"shape":"String"}, + "SnapshotIdentifier":{"shape":"String"}, + "SourceDatabaseName":{"shape":"String"}, + "SourceSchemaName":{"shape":"String"}, + "SourceTableName":{"shape":"String"}, + "TargetDatabaseName":{"shape":"String"}, + "TargetSchemaName":{"shape":"String"}, + "NewTableName":{"shape":"String"} + }, + "wrapper":true + }, + "TableRestoreStatusList":{ + "type":"list", + "member":{ + "shape":"TableRestoreStatus", + "locationName":"TableRestoreStatus" + } + }, + "TableRestoreStatusMessage":{ + "type":"structure", + "members":{ + "TableRestoreStatusDetails":{"shape":"TableRestoreStatusList"}, + "Marker":{"shape":"String"} + } + }, + "TableRestoreStatusType":{ + "type":"string", + "enum":[ + "PENDING", + "IN_PROGRESS", + "SUCCEEDED", + "FAILED", + "CANCELED" + ] + }, + "Tag":{ + "type":"structure", + "members":{ + "Key":{"shape":"String"}, + "Value":{"shape":"String"} + } + }, + "TagKeyList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"TagKey" + } + }, + "TagLimitExceededFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"TagLimitExceededFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "TagList":{ + "type":"list", + "member":{ + "shape":"Tag", + "locationName":"Tag" + } + }, + "TagValueList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"TagValue" + } + }, + "TaggedResource":{ + "type":"structure", + "members":{ + "Tag":{"shape":"Tag"}, + "ResourceName":{"shape":"String"}, + "ResourceType":{"shape":"String"} + } + }, + "TaggedResourceList":{ + "type":"list", + "member":{ + "shape":"TaggedResource", + "locationName":"TaggedResource" + } + }, + "TaggedResourceListMessage":{ + "type":"structure", + "members":{ + "TaggedResources":{"shape":"TaggedResourceList"}, + "Marker":{"shape":"String"} + } + }, + "UnauthorizedOperation":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"UnauthorizedOperation", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "UnknownSnapshotCopyRegionFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"UnknownSnapshotCopyRegionFault", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "UnsupportedOperationFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"UnsupportedOperation", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "UnsupportedOptionFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"UnsupportedOptionFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "VpcSecurityGroupIdList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"VpcSecurityGroupId" + } + }, + "VpcSecurityGroupMembership":{ + "type":"structure", + "members":{ + "VpcSecurityGroupId":{"shape":"String"}, + "Status":{"shape":"String"} + } + }, + "VpcSecurityGroupMembershipList":{ + "type":"list", + "member":{ + "shape":"VpcSecurityGroupMembership", + "locationName":"VpcSecurityGroup" + } + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/redshift/2012-12-01/docs-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/redshift/2012-12-01/docs-2.json new file mode 100644 index 000000000..221b5c124 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/redshift/2012-12-01/docs-2.json @@ -0,0 +1,2135 @@ +{ + "version": "2.0", + "service": "Amazon Redshift Overview

    This is an interface reference for Amazon Redshift. It contains documentation for one of the programming or command line interfaces you can use to manage Amazon Redshift clusters. Note that Amazon Redshift is asynchronous, which means that some interfaces may require techniques, such as polling or asynchronous callback handlers, to determine when a command has been applied. In this reference, the parameter descriptions indicate whether a change is applied immediately, on the next instance reboot, or during the next maintenance window. For a summary of the Amazon Redshift cluster management interfaces, go to Using the Amazon Redshift Management Interfaces .

    Amazon Redshift manages all the work of setting up, operating, and scaling a data warehouse: provisioning capacity, monitoring and backing up the cluster, and applying patches and upgrades to the Amazon Redshift engine. You can focus on using your data to acquire new insights for your business and customers.

    If you are a first-time user of Amazon Redshift, we recommend that you begin by reading the The Amazon Redshift Getting Started Guide

    If you are a database developer, the Amazon Redshift Database Developer Guide explains how to design, build, query, and maintain the databases that make up your data warehouse.

    ", + "operations": { + "AuthorizeClusterSecurityGroupIngress": "

    Adds an inbound (ingress) rule to an Amazon Redshift security group. Depending on whether the application accessing your cluster is running on the Internet or an Amazon EC2 instance, you can authorize inbound access to either a Classless Interdomain Routing (CIDR)/Internet Protocol (IP) range or to an Amazon EC2 security group. You can add as many as 20 ingress rules to an Amazon Redshift security group.

    If you authorize access to an Amazon EC2 security group, specify EC2SecurityGroupName and EC2SecurityGroupOwnerId. The Amazon EC2 security group and Amazon Redshift cluster must be in the same AWS region.

    If you authorize access to a CIDR/IP address range, specify CIDRIP. For an overview of CIDR blocks, see the Wikipedia article on Classless Inter-Domain Routing.

    You must also associate the security group with a cluster so that clients running on these IP addresses or the EC2 instance are authorized to connect to the cluster. For information about managing security groups, go to Working with Security Groups in the Amazon Redshift Cluster Management Guide.

    ", + "AuthorizeSnapshotAccess": "

    Authorizes the specified AWS customer account to restore the specified snapshot.

    For more information about working with snapshots, go to Amazon Redshift Snapshots in the Amazon Redshift Cluster Management Guide.

    ", + "CopyClusterSnapshot": "

    Copies the specified automated cluster snapshot to a new manual cluster snapshot. The source must be an automated snapshot and it must be in the available state.

    When you delete a cluster, Amazon Redshift deletes any automated snapshots of the cluster. Also, when the retention period of the snapshot expires, Amazon Redshift automatically deletes it. If you want to keep an automated snapshot for a longer period, you can make a manual copy of the snapshot. Manual snapshots are retained until you delete them.

    For more information about working with snapshots, go to Amazon Redshift Snapshots in the Amazon Redshift Cluster Management Guide.

    ", + "CreateCluster": "

    Creates a new cluster. To create the cluster in virtual private cloud (VPC), you must provide cluster subnet group name. If you don't provide a cluster subnet group name or the cluster security group parameter, Amazon Redshift creates a non-VPC cluster, it associates the default cluster security group with the cluster. For more information about managing clusters, go to Amazon Redshift Clusters in the Amazon Redshift Cluster Management Guide .

    ", + "CreateClusterParameterGroup": "

    Creates an Amazon Redshift parameter group.

    Creating parameter groups is independent of creating clusters. You can associate a cluster with a parameter group when you create the cluster. You can also associate an existing cluster with a parameter group after the cluster is created by using ModifyCluster.

    Parameters in the parameter group define specific behavior that applies to the databases you create on the cluster. For more information about parameters and parameter groups, go to Amazon Redshift Parameter Groups in the Amazon Redshift Cluster Management Guide.

    ", + "CreateClusterSecurityGroup": "

    Creates a new Amazon Redshift security group. You use security groups to control access to non-VPC clusters.

    For information about managing security groups, go to Amazon Redshift Cluster Security Groups in the Amazon Redshift Cluster Management Guide.

    ", + "CreateClusterSnapshot": "

    Creates a manual snapshot of the specified cluster. The cluster must be in the available state.

    For more information about working with snapshots, go to Amazon Redshift Snapshots in the Amazon Redshift Cluster Management Guide.

    ", + "CreateClusterSubnetGroup": "

    Creates a new Amazon Redshift subnet group. You must provide a list of one or more subnets in your existing Amazon Virtual Private Cloud (Amazon VPC) when creating Amazon Redshift subnet group.

    For information about subnet groups, go to Amazon Redshift Cluster Subnet Groups in the Amazon Redshift Cluster Management Guide.

    ", + "CreateEventSubscription": "

    Creates an Amazon Redshift event notification subscription. This action requires an ARN (Amazon Resource Name) of an Amazon SNS topic created by either the Amazon Redshift console, the Amazon SNS console, or the Amazon SNS API. To obtain an ARN with Amazon SNS, you must create a topic in Amazon SNS and subscribe to the topic. The ARN is displayed in the SNS console.

    You can specify the source type, and lists of Amazon Redshift source IDs, event categories, and event severities. Notifications will be sent for all events you want that match those criteria. For example, you can specify source type = cluster, source ID = my-cluster-1 and mycluster2, event categories = Availability, Backup, and severity = ERROR. The subscription will only send notifications for those ERROR events in the Availability and Backup categories for the specified clusters.

    If you specify both the source type and source IDs, such as source type = cluster and source identifier = my-cluster-1, notifications will be sent for all the cluster events for my-cluster-1. If you specify a source type but do not specify a source identifier, you will receive notice of the events for the objects of that type in your AWS account. If you do not specify either the SourceType nor the SourceIdentifier, you will be notified of events generated from all Amazon Redshift sources belonging to your AWS account. You must specify a source type if you specify a source ID.

    ", + "CreateHsmClientCertificate": "

    Creates an HSM client certificate that an Amazon Redshift cluster will use to connect to the client's HSM in order to store and retrieve the keys used to encrypt the cluster databases.

    The command returns a public key, which you must store in the HSM. In addition to creating the HSM certificate, you must create an Amazon Redshift HSM configuration that provides a cluster the information needed to store and use encryption keys in the HSM. For more information, go to Hardware Security Modules in the Amazon Redshift Cluster Management Guide.

    ", + "CreateHsmConfiguration": "

    Creates an HSM configuration that contains the information required by an Amazon Redshift cluster to store and use database encryption keys in a Hardware Security Module (HSM). After creating the HSM configuration, you can specify it as a parameter when creating a cluster. The cluster will then store its encryption keys in the HSM.

    In addition to creating an HSM configuration, you must also create an HSM client certificate. For more information, go to Hardware Security Modules in the Amazon Redshift Cluster Management Guide.

    ", + "CreateSnapshotCopyGrant": "

    Creates a snapshot copy grant that permits Amazon Redshift to use a customer master key (CMK) from AWS Key Management Service (AWS KMS) to encrypt copied snapshots in a destination region.

    For more information about managing snapshot copy grants, go to Amazon Redshift Database Encryption in the Amazon Redshift Cluster Management Guide.

    ", + "CreateTags": "

    Adds one or more tags to a specified resource.

    A resource can have up to 10 tags. If you try to create more than 10 tags for a resource, you will receive an error and the attempt will fail.

    If you specify a key that already exists for the resource, the value for that key will be updated with the new value.

    ", + "DeleteCluster": "

    Deletes a previously provisioned cluster. A successful response from the web service indicates that the request was received correctly. Use DescribeClusters to monitor the status of the deletion. The delete operation cannot be canceled or reverted once submitted. For more information about managing clusters, go to Amazon Redshift Clusters in the Amazon Redshift Cluster Management Guide .

    If you want to shut down the cluster and retain it for future use, set SkipFinalClusterSnapshot to false and specify a name for FinalClusterSnapshotIdentifier. You can later restore this snapshot to resume using the cluster. If a final cluster snapshot is requested, the status of the cluster will be \"final-snapshot\" while the snapshot is being taken, then it's \"deleting\" once Amazon Redshift begins deleting the cluster.

    For more information about managing clusters, go to Amazon Redshift Clusters in the Amazon Redshift Cluster Management Guide .

    ", + "DeleteClusterParameterGroup": "

    Deletes a specified Amazon Redshift parameter group. You cannot delete a parameter group if it is associated with a cluster.

    ", + "DeleteClusterSecurityGroup": "

    Deletes an Amazon Redshift security group.

    You cannot delete a security group that is associated with any clusters. You cannot delete the default security group.

    For information about managing security groups, go to Amazon Redshift Cluster Security Groups in the Amazon Redshift Cluster Management Guide.

    ", + "DeleteClusterSnapshot": "

    Deletes the specified manual snapshot. The snapshot must be in the available state, with no other users authorized to access the snapshot.

    Unlike automated snapshots, manual snapshots are retained even after you delete your cluster. Amazon Redshift does not delete your manual snapshots. You must delete manual snapshot explicitly to avoid getting charged. If other accounts are authorized to access the snapshot, you must revoke all of the authorizations before you can delete the snapshot.

    ", + "DeleteClusterSubnetGroup": "

    Deletes the specified cluster subnet group.

    ", + "DeleteEventSubscription": "

    Deletes an Amazon Redshift event notification subscription.

    ", + "DeleteHsmClientCertificate": "

    Deletes the specified HSM client certificate.

    ", + "DeleteHsmConfiguration": "

    Deletes the specified Amazon Redshift HSM configuration.

    ", + "DeleteSnapshotCopyGrant": "

    Deletes the specified snapshot copy grant.

    ", + "DeleteTags": "

    Deletes a tag or tags from a resource. You must provide the ARN of the resource from which you want to delete the tag or tags.

    ", + "DescribeClusterParameterGroups": "

    Returns a list of Amazon Redshift parameter groups, including parameter groups you created and the default parameter group. For each parameter group, the response includes the parameter group name, description, and parameter group family name. You can optionally specify a name to retrieve the description of a specific parameter group.

    For more information about parameters and parameter groups, go to Amazon Redshift Parameter Groups in the Amazon Redshift Cluster Management Guide.

    If you specify both tag keys and tag values in the same request, Amazon Redshift returns all parameter groups that match any combination of the specified keys and values. For example, if you have owner and environment for tag keys, and admin and test for tag values, all parameter groups that have any combination of those values are returned.

    If both tag keys and values are omitted from the request, parameter groups are returned regardless of whether they have tag keys or values associated with them.

    ", + "DescribeClusterParameters": "

    Returns a detailed list of parameters contained within the specified Amazon Redshift parameter group. For each parameter the response includes information such as parameter name, description, data type, value, whether the parameter value is modifiable, and so on.

    You can specify source filter to retrieve parameters of only specific type. For example, to retrieve parameters that were modified by a user action such as from ModifyClusterParameterGroup, you can specify source equal to user.

    For more information about parameters and parameter groups, go to Amazon Redshift Parameter Groups in the Amazon Redshift Cluster Management Guide.

    ", + "DescribeClusterSecurityGroups": "

    Returns information about Amazon Redshift security groups. If the name of a security group is specified, the response will contain only information about only that security group.

    For information about managing security groups, go to Amazon Redshift Cluster Security Groups in the Amazon Redshift Cluster Management Guide.

    If you specify both tag keys and tag values in the same request, Amazon Redshift returns all security groups that match any combination of the specified keys and values. For example, if you have owner and environment for tag keys, and admin and test for tag values, all security groups that have any combination of those values are returned.

    If both tag keys and values are omitted from the request, security groups are returned regardless of whether they have tag keys or values associated with them.

    ", + "DescribeClusterSnapshots": "

    Returns one or more snapshot objects, which contain metadata about your cluster snapshots. By default, this operation returns information about all snapshots of all clusters that are owned by you AWS customer account. No information is returned for snapshots owned by inactive AWS customer accounts.

    If you specify both tag keys and tag values in the same request, Amazon Redshift returns all snapshots that match any combination of the specified keys and values. For example, if you have owner and environment for tag keys, and admin and test for tag values, all snapshots that have any combination of those values are returned. Only snapshots that you own are returned in the response; shared snapshots are not returned with the tag key and tag value request parameters.

    If both tag keys and values are omitted from the request, snapshots are returned regardless of whether they have tag keys or values associated with them.

    ", + "DescribeClusterSubnetGroups": "

    Returns one or more cluster subnet group objects, which contain metadata about your cluster subnet groups. By default, this operation returns information about all cluster subnet groups that are defined in you AWS account.

    If you specify both tag keys and tag values in the same request, Amazon Redshift returns all subnet groups that match any combination of the specified keys and values. For example, if you have owner and environment for tag keys, and admin and test for tag values, all subnet groups that have any combination of those values are returned.

    If both tag keys and values are omitted from the request, subnet groups are returned regardless of whether they have tag keys or values associated with them.

    ", + "DescribeClusterVersions": "

    Returns descriptions of the available Amazon Redshift cluster versions. You can call this operation even before creating any clusters to learn more about the Amazon Redshift versions. For more information about managing clusters, go to Amazon Redshift Clusters in the Amazon Redshift Cluster Management Guide

    ", + "DescribeClusters": "

    Returns properties of provisioned clusters including general cluster properties, cluster database properties, maintenance and backup properties, and security and access properties. This operation supports pagination. For more information about managing clusters, go to Amazon Redshift Clusters in the Amazon Redshift Cluster Management Guide .

    If you specify both tag keys and tag values in the same request, Amazon Redshift returns all clusters that match any combination of the specified keys and values. For example, if you have owner and environment for tag keys, and admin and test for tag values, all clusters that have any combination of those values are returned.

    If both tag keys and values are omitted from the request, clusters are returned regardless of whether they have tag keys or values associated with them.

    ", + "DescribeDefaultClusterParameters": "

    Returns a list of parameter settings for the specified parameter group family.

    For more information about parameters and parameter groups, go to Amazon Redshift Parameter Groups in the Amazon Redshift Cluster Management Guide.

    ", + "DescribeEventCategories": "

    Displays a list of event categories for all event source types, or for a specified source type. For a list of the event categories and source types, go to Amazon Redshift Event Notifications.

    ", + "DescribeEventSubscriptions": "

    Lists descriptions of all the Amazon Redshift event notifications subscription for a customer account. If you specify a subscription name, lists the description for that subscription.

    ", + "DescribeEvents": "

    Returns events related to clusters, security groups, snapshots, and parameter groups for the past 14 days. Events specific to a particular cluster, security group, snapshot or parameter group can be obtained by providing the name as a parameter. By default, the past hour of events are returned.

    ", + "DescribeHsmClientCertificates": "

    Returns information about the specified HSM client certificate. If no certificate ID is specified, returns information about all the HSM certificates owned by your AWS customer account.

    If you specify both tag keys and tag values in the same request, Amazon Redshift returns all HSM client certificates that match any combination of the specified keys and values. For example, if you have owner and environment for tag keys, and admin and test for tag values, all HSM client certificates that have any combination of those values are returned.

    If both tag keys and values are omitted from the request, HSM client certificates are returned regardless of whether they have tag keys or values associated with them.

    ", + "DescribeHsmConfigurations": "

    Returns information about the specified Amazon Redshift HSM configuration. If no configuration ID is specified, returns information about all the HSM configurations owned by your AWS customer account.

    If you specify both tag keys and tag values in the same request, Amazon Redshift returns all HSM connections that match any combination of the specified keys and values. For example, if you have owner and environment for tag keys, and admin and test for tag values, all HSM connections that have any combination of those values are returned.

    If both tag keys and values are omitted from the request, HSM connections are returned regardless of whether they have tag keys or values associated with them.

    ", + "DescribeLoggingStatus": "

    Describes whether information, such as queries and connection attempts, is being logged for the specified Amazon Redshift cluster.

    ", + "DescribeOrderableClusterOptions": "

    Returns a list of orderable cluster options. Before you create a new cluster you can use this operation to find what options are available, such as the EC2 Availability Zones (AZ) in the specific AWS region that you can specify, and the node types you can request. The node types differ by available storage, memory, CPU and price. With the cost involved you might want to obtain a list of cluster options in the specific region and specify values when creating a cluster. For more information about managing clusters, go to Amazon Redshift Clusters in the Amazon Redshift Cluster Management Guide

    ", + "DescribeReservedNodeOfferings": "

    Returns a list of the available reserved node offerings by Amazon Redshift with their descriptions including the node type, the fixed and recurring costs of reserving the node and duration the node will be reserved for you. These descriptions help you determine which reserve node offering you want to purchase. You then use the unique offering ID in you call to PurchaseReservedNodeOffering to reserve one or more nodes for your Amazon Redshift cluster.

    For more information about reserved node offerings, go to Purchasing Reserved Nodes in the Amazon Redshift Cluster Management Guide.

    ", + "DescribeReservedNodes": "

    Returns the descriptions of the reserved nodes.

    ", + "DescribeResize": "

    Returns information about the last resize operation for the specified cluster. If no resize operation has ever been initiated for the specified cluster, a HTTP 404 error is returned. If a resize operation was initiated and completed, the status of the resize remains as SUCCEEDED until the next resize.

    A resize operation can be requested using ModifyCluster and specifying a different number or type of nodes for the cluster.

    ", + "DescribeSnapshotCopyGrants": "

    Returns a list of snapshot copy grants owned by the AWS account in the destination region.

    For more information about managing snapshot copy grants, go to Amazon Redshift Database Encryption in the Amazon Redshift Cluster Management Guide.

    ", + "DescribeTableRestoreStatus": "

    Lists the status of one or more table restore requests made using the RestoreTableFromClusterSnapshot API action. If you don't specify a value for the TableRestoreRequestId parameter, then DescribeTableRestoreStatus returns the status of all table restore requests ordered by the date and time of the request in ascending order. Otherwise DescribeTableRestoreStatus returns the status of the table specified by TableRestoreRequestId.

    ", + "DescribeTags": "

    Returns a list of tags. You can return tags from a specific resource by specifying an ARN, or you can return all tags for a given type of resource, such as clusters, snapshots, and so on.

    The following are limitations for DescribeTags:

    • You cannot specify an ARN and a resource-type value together in the same request.
    • You cannot use the MaxRecords and Marker parameters together with the ARN parameter.
    • The MaxRecords parameter can be a range from 10 to 50 results to return in a request.

    If you specify both tag keys and tag values in the same request, Amazon Redshift returns all resources that match any combination of the specified keys and values. For example, if you have owner and environment for tag keys, and admin and test for tag values, all resources that have any combination of those values are returned.

    If both tag keys and values are omitted from the request, resources are returned regardless of whether they have tag keys or values associated with them.

    ", + "DisableLogging": "

    Stops logging information, such as queries and connection attempts, for the specified Amazon Redshift cluster.

    ", + "DisableSnapshotCopy": "

    Disables the automatic copying of snapshots from one region to another region for a specified cluster.

    If your cluster and its snapshots are encrypted using a customer master key (CMK) from AWS KMS, use DeleteSnapshotCopyGrant to delete the grant that grants Amazon Redshift permission to the CMK in the destination region.

    ", + "EnableLogging": "

    Starts logging information, such as queries and connection attempts, for the specified Amazon Redshift cluster.

    ", + "EnableSnapshotCopy": "

    Enables the automatic copy of snapshots from one region to another region for a specified cluster.

    ", + "ModifyCluster": "

    Modifies the settings for a cluster. For example, you can add another security or parameter group, update the preferred maintenance window, or change the master user password. Resetting a cluster password or modifying the security groups associated with a cluster do not need a reboot. However, modifying a parameter group requires a reboot for parameters to take effect. For more information about managing clusters, go to Amazon Redshift Clusters in the Amazon Redshift Cluster Management Guide .

    You can also change node type and the number of nodes to scale up or down the cluster. When resizing a cluster, you must specify both the number of nodes and the node type even if one of the parameters does not change.

    ", + "ModifyClusterIamRoles": "

    Modifies the list of AWS Identity and Access Management (IAM) roles that can be used by the cluster to access other AWS services.

    A cluster can have up to 10 IAM roles associated at any time.

    ", + "ModifyClusterParameterGroup": "

    Modifies the parameters of a parameter group.

    For more information about parameters and parameter groups, go to Amazon Redshift Parameter Groups in the Amazon Redshift Cluster Management Guide.

    ", + "ModifyClusterSubnetGroup": "

    Modifies a cluster subnet group to include the specified list of VPC subnets. The operation replaces the existing list of subnets with the new list of subnets.

    ", + "ModifyEventSubscription": "

    Modifies an existing Amazon Redshift event notification subscription.

    ", + "ModifySnapshotCopyRetentionPeriod": "

    Modifies the number of days to retain automated snapshots in the destination region after they are copied from the source region.

    ", + "PurchaseReservedNodeOffering": "

    Allows you to purchase reserved nodes. Amazon Redshift offers a predefined set of reserved node offerings. You can purchase one or more of the offerings. You can call the DescribeReservedNodeOfferings API to obtain the available reserved node offerings. You can call this API by providing a specific reserved node offering and the number of nodes you want to reserve.

    For more information about reserved node offerings, go to Purchasing Reserved Nodes in the Amazon Redshift Cluster Management Guide.

    ", + "RebootCluster": "

    Reboots a cluster. This action is taken as soon as possible. It results in a momentary outage to the cluster, during which the cluster status is set to rebooting. A cluster event is created when the reboot is completed. Any pending cluster modifications (see ModifyCluster) are applied at this reboot. For more information about managing clusters, go to Amazon Redshift Clusters in the Amazon Redshift Cluster Management Guide

    ", + "ResetClusterParameterGroup": "

    Sets one or more parameters of the specified parameter group to their default values and sets the source values of the parameters to \"engine-default\". To reset the entire parameter group specify the ResetAllParameters parameter. For parameter changes to take effect you must reboot any associated clusters.

    ", + "RestoreFromClusterSnapshot": "

    Creates a new cluster from a snapshot. By default, Amazon Redshift creates the resulting cluster with the same configuration as the original cluster from which the snapshot was created, except that the new cluster is created with the default cluster security and parameter groups. After Amazon Redshift creates the cluster, you can use the ModifyCluster API to associate a different security group and different parameter group with the restored cluster. If you are using a DS node type, you can also choose to change to another DS node type of the same size during restore.

    If you restore a cluster into a VPC, you must provide a cluster subnet group where you want the cluster restored.

    For more information about working with snapshots, go to Amazon Redshift Snapshots in the Amazon Redshift Cluster Management Guide.

    ", + "RestoreTableFromClusterSnapshot": "

    Creates a new table from a table in an Amazon Redshift cluster snapshot. You must create the new table within the Amazon Redshift cluster that the snapshot was taken from.

    You cannot use RestoreTableFromClusterSnapshot to restore a table with the same name as an existing table in an Amazon Redshift cluster. That is, you cannot overwrite an existing table in a cluster with a restored table. If you want to replace your original table with a new, restored table, then rename or drop your original table before you call RestoreTableFromClusterSnapshot. When you have renamed your original table, then you can pass the original name of the table as the NewTableName parameter value in the call to RestoreTableFromClusterSnapshot. This way, you can replace the original table with the table created from the snapshot.

    ", + "RevokeClusterSecurityGroupIngress": "

    Revokes an ingress rule in an Amazon Redshift security group for a previously authorized IP range or Amazon EC2 security group. To add an ingress rule, see AuthorizeClusterSecurityGroupIngress. For information about managing security groups, go to Amazon Redshift Cluster Security Groups in the Amazon Redshift Cluster Management Guide.

    ", + "RevokeSnapshotAccess": "

    Removes the ability of the specified AWS customer account to restore the specified snapshot. If the account is currently restoring the snapshot, the restore will run to completion.

    For more information about working with snapshots, go to Amazon Redshift Snapshots in the Amazon Redshift Cluster Management Guide.

    ", + "RotateEncryptionKey": "

    Rotates the encryption keys for a cluster.

    " + }, + "shapes": { + "AccessToSnapshotDeniedFault": { + "base": "

    The owner of the specified snapshot has not authorized your account to access the snapshot.

    ", + "refs": { + } + }, + "AccountWithRestoreAccess": { + "base": "

    Describes an AWS customer account authorized to restore a snapshot.

    ", + "refs": { + "AccountsWithRestoreAccessList$member": null + } + }, + "AccountsWithRestoreAccessList": { + "base": null, + "refs": { + "Snapshot$AccountsWithRestoreAccess": "

    A list of the AWS customer accounts authorized to restore the snapshot. Returns null if no accounts are authorized. Visible only to the snapshot owner.

    " + } + }, + "AuthorizationAlreadyExistsFault": { + "base": "

    The specified CIDR block or EC2 security group is already authorized for the specified cluster security group.

    ", + "refs": { + } + }, + "AuthorizationNotFoundFault": { + "base": "

    The specified CIDR IP range or EC2 security group is not authorized for the specified cluster security group.

    ", + "refs": { + } + }, + "AuthorizationQuotaExceededFault": { + "base": "

    The authorization quota for the cluster security group has been reached.

    ", + "refs": { + } + }, + "AuthorizeClusterSecurityGroupIngressMessage": { + "base": "

    ", + "refs": { + } + }, + "AuthorizeClusterSecurityGroupIngressResult": { + "base": null, + "refs": { + } + }, + "AuthorizeSnapshotAccessMessage": { + "base": "

    ", + "refs": { + } + }, + "AuthorizeSnapshotAccessResult": { + "base": null, + "refs": { + } + }, + "AvailabilityZone": { + "base": "

    Describes an availability zone.

    ", + "refs": { + "AvailabilityZoneList$member": null, + "Subnet$SubnetAvailabilityZone": null + } + }, + "AvailabilityZoneList": { + "base": null, + "refs": { + "OrderableClusterOption$AvailabilityZones": "

    A list of availability zones for the orderable cluster.

    " + } + }, + "Boolean": { + "base": null, + "refs": { + "Cluster$AllowVersionUpgrade": "

    If true, major version upgrades will be applied automatically to the cluster during the maintenance window.

    ", + "Cluster$PubliclyAccessible": "

    If true, the cluster can be accessed from a public network.

    ", + "Cluster$Encrypted": "

    If true, data in the cluster is encrypted at rest.

    ", + "DeleteClusterMessage$SkipFinalClusterSnapshot": "

    Determines whether a final snapshot of the cluster is created before Amazon Redshift deletes the cluster. If true, a final cluster snapshot is not created. If false, a final cluster snapshot is created before the cluster is deleted.

    The FinalClusterSnapshotIdentifier parameter must be specified if SkipFinalClusterSnapshot is false.

    Default: false

    ", + "EventSubscription$Enabled": "

    A Boolean value indicating whether the subscription is enabled. true indicates the subscription is enabled.

    ", + "LoggingStatus$LoggingEnabled": "

    true if logging is on, false if logging is off.

    ", + "Parameter$IsModifiable": "

    If true, the parameter can be modified. Some parameters have security or operational implications that prevent them from being changed.

    ", + "ResetClusterParameterGroupMessage$ResetAllParameters": "

    If true, all parameters in the specified parameter group will be reset to their default values.

    Default: true

    ", + "Snapshot$Encrypted": "

    If true, the data in the snapshot is encrypted at rest.

    ", + "Snapshot$EncryptedWithHSM": "

    A boolean that indicates whether the snapshot data is encrypted using the HSM keys of the source cluster. true indicates that the data is encrypted using HSM keys.

    " + } + }, + "BooleanOptional": { + "base": null, + "refs": { + "CreateClusterMessage$AllowVersionUpgrade": "

    If true, major version upgrades can be applied during the maintenance window to the Amazon Redshift engine that is running on the cluster.

    When a new major version of the Amazon Redshift engine is released, you can request that the service automatically apply upgrades during the maintenance window to the Amazon Redshift engine that is running on your cluster.

    Default: true

    ", + "CreateClusterMessage$PubliclyAccessible": "

    If true, the cluster can be accessed from a public network.

    ", + "CreateClusterMessage$Encrypted": "

    If true, the data in the cluster is encrypted at rest.

    Default: false

    ", + "CreateEventSubscriptionMessage$Enabled": "

    A Boolean value; set to true to activate the subscription, set to false to create the subscription but not active it.

    ", + "ModifyClusterMessage$AllowVersionUpgrade": "

    If true, major version upgrades will be applied automatically to the cluster during the maintenance window.

    Default: false

    ", + "ModifyClusterMessage$PubliclyAccessible": "

    If true, the cluster can be accessed from a public network. Only clusters in VPCs can be set to be publicly available.

    ", + "ModifyEventSubscriptionMessage$Enabled": "

    A Boolean value indicating if the subscription is enabled. true indicates the subscription is enabled

    ", + "PendingModifiedValues$PubliclyAccessible": "

    The pending or in-progress change of the ability to connect to the cluster from the public network.

    ", + "RestoreFromClusterSnapshotMessage$AllowVersionUpgrade": "

    If true, major version upgrades can be applied during the maintenance window to the Amazon Redshift engine that is running on the cluster.

    Default: true

    ", + "RestoreFromClusterSnapshotMessage$PubliclyAccessible": "

    If true, the cluster can be accessed from a public network.

    " + } + }, + "BucketNotFoundFault": { + "base": "

    Could not find the specified S3 bucket.

    ", + "refs": { + } + }, + "Cluster": { + "base": "

    Describes a cluster.

    ", + "refs": { + "ClusterList$member": null, + "CreateClusterResult$Cluster": null, + "DeleteClusterResult$Cluster": null, + "DisableSnapshotCopyResult$Cluster": null, + "EnableSnapshotCopyResult$Cluster": null, + "ModifyClusterIamRolesResult$Cluster": null, + "ModifyClusterResult$Cluster": null, + "ModifySnapshotCopyRetentionPeriodResult$Cluster": null, + "RebootClusterResult$Cluster": null, + "RestoreFromClusterSnapshotResult$Cluster": null, + "RotateEncryptionKeyResult$Cluster": null + } + }, + "ClusterAlreadyExistsFault": { + "base": "

    The account already has a cluster with the given identifier.

    ", + "refs": { + } + }, + "ClusterIamRole": { + "base": "

    An AWS Identity and Access Management (IAM) role that can be used by the associated Amazon Redshift cluster to access other AWS services.

    ", + "refs": { + "ClusterIamRoleList$member": null + } + }, + "ClusterIamRoleList": { + "base": null, + "refs": { + "Cluster$IamRoles": "

    A list of AWS Identity and Access Management (IAM) roles that can be used by the cluster to access other AWS services.

    " + } + }, + "ClusterList": { + "base": null, + "refs": { + "ClustersMessage$Clusters": "

    A list of Cluster objects, where each object describes one cluster.

    " + } + }, + "ClusterNode": { + "base": "

    The identifier of a node in a cluster.

    ", + "refs": { + "ClusterNodesList$member": null + } + }, + "ClusterNodesList": { + "base": null, + "refs": { + "Cluster$ClusterNodes": "

    The nodes in a cluster.

    " + } + }, + "ClusterNotFoundFault": { + "base": "

    The ClusterIdentifier parameter does not refer to an existing cluster.

    ", + "refs": { + } + }, + "ClusterParameterGroup": { + "base": "

    Describes a parameter group.

    ", + "refs": { + "CreateClusterParameterGroupResult$ClusterParameterGroup": null, + "ParameterGroupList$member": null + } + }, + "ClusterParameterGroupAlreadyExistsFault": { + "base": "

    A cluster parameter group with the same name already exists.

    ", + "refs": { + } + }, + "ClusterParameterGroupDetails": { + "base": "

    Contains the output from the DescribeClusterParameters action.

    ", + "refs": { + } + }, + "ClusterParameterGroupNameMessage": { + "base": "

    ", + "refs": { + } + }, + "ClusterParameterGroupNotFoundFault": { + "base": "

    The parameter group name does not refer to an existing parameter group.

    ", + "refs": { + } + }, + "ClusterParameterGroupQuotaExceededFault": { + "base": "

    The request would result in the user exceeding the allowed number of cluster parameter groups. For information about increasing your quota, go to Limits in Amazon Redshift in the Amazon Redshift Cluster Management Guide.

    ", + "refs": { + } + }, + "ClusterParameterGroupStatus": { + "base": "

    Describes the status of a parameter group.

    ", + "refs": { + "ClusterParameterGroupStatusList$member": null + } + }, + "ClusterParameterGroupStatusList": { + "base": null, + "refs": { + "Cluster$ClusterParameterGroups": "

    The list of cluster parameter groups that are associated with this cluster. Each parameter group in the list is returned with its status.

    " + } + }, + "ClusterParameterGroupsMessage": { + "base": "

    Contains the output from the DescribeClusterParameterGroups action.

    ", + "refs": { + } + }, + "ClusterParameterStatus": { + "base": "

    Describes the status of a parameter group.

    ", + "refs": { + "ClusterParameterStatusList$member": null + } + }, + "ClusterParameterStatusList": { + "base": null, + "refs": { + "ClusterParameterGroupStatus$ClusterParameterStatusList": "

    The list of parameter statuses.

    For more information about parameters and parameter groups, go to Amazon Redshift Parameter Groups in the Amazon Redshift Cluster Management Guide.

    " + } + }, + "ClusterQuotaExceededFault": { + "base": "

    The request would exceed the allowed number of cluster instances for this account. For information about increasing your quota, go to Limits in Amazon Redshift in the Amazon Redshift Cluster Management Guide.

    ", + "refs": { + } + }, + "ClusterSecurityGroup": { + "base": "

    Describes a security group.

    ", + "refs": { + "AuthorizeClusterSecurityGroupIngressResult$ClusterSecurityGroup": null, + "ClusterSecurityGroups$member": null, + "CreateClusterSecurityGroupResult$ClusterSecurityGroup": null, + "RevokeClusterSecurityGroupIngressResult$ClusterSecurityGroup": null + } + }, + "ClusterSecurityGroupAlreadyExistsFault": { + "base": "

    A cluster security group with the same name already exists.

    ", + "refs": { + } + }, + "ClusterSecurityGroupMembership": { + "base": "

    Describes a cluster security group.

    ", + "refs": { + "ClusterSecurityGroupMembershipList$member": null + } + }, + "ClusterSecurityGroupMembershipList": { + "base": null, + "refs": { + "Cluster$ClusterSecurityGroups": "

    A list of cluster security group that are associated with the cluster. Each security group is represented by an element that contains ClusterSecurityGroup.Name and ClusterSecurityGroup.Status subelements.

    Cluster security groups are used when the cluster is not created in a VPC. Clusters that are created in a VPC use VPC security groups, which are listed by the VpcSecurityGroups parameter.

    " + } + }, + "ClusterSecurityGroupMessage": { + "base": "

    ", + "refs": { + } + }, + "ClusterSecurityGroupNameList": { + "base": null, + "refs": { + "CreateClusterMessage$ClusterSecurityGroups": "

    A list of security groups to be associated with this cluster.

    Default: The default cluster security group for Amazon Redshift.

    ", + "ModifyClusterMessage$ClusterSecurityGroups": "

    A list of cluster security groups to be authorized on this cluster. This change is asynchronously applied as soon as possible.

    Security groups currently associated with the cluster, and not in the list of groups to apply, will be revoked from the cluster.

    Constraints:

    • Must be 1 to 255 alphanumeric characters or hyphens
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "RestoreFromClusterSnapshotMessage$ClusterSecurityGroups": "

    A list of security groups to be associated with this cluster.

    Default: The default cluster security group for Amazon Redshift.

    Cluster security groups only apply to clusters outside of VPCs.

    " + } + }, + "ClusterSecurityGroupNotFoundFault": { + "base": "

    The cluster security group name does not refer to an existing cluster security group.

    ", + "refs": { + } + }, + "ClusterSecurityGroupQuotaExceededFault": { + "base": "

    The request would result in the user exceeding the allowed number of cluster security groups. For information about increasing your quota, go to Limits in Amazon Redshift in the Amazon Redshift Cluster Management Guide.

    ", + "refs": { + } + }, + "ClusterSecurityGroups": { + "base": null, + "refs": { + "ClusterSecurityGroupMessage$ClusterSecurityGroups": "

    A list of ClusterSecurityGroup instances.

    " + } + }, + "ClusterSnapshotAlreadyExistsFault": { + "base": "

    The value specified as a snapshot identifier is already used by an existing snapshot.

    ", + "refs": { + } + }, + "ClusterSnapshotCopyStatus": { + "base": "

    Returns the destination region and retention period that are configured for cross-region snapshot copy.

    ", + "refs": { + "Cluster$ClusterSnapshotCopyStatus": "

    Returns the destination region and retention period that are configured for cross-region snapshot copy.

    " + } + }, + "ClusterSnapshotNotFoundFault": { + "base": "

    The snapshot identifier does not refer to an existing cluster snapshot.

    ", + "refs": { + } + }, + "ClusterSnapshotQuotaExceededFault": { + "base": "

    The request would result in the user exceeding the allowed number of cluster snapshots.

    ", + "refs": { + } + }, + "ClusterSubnetGroup": { + "base": "

    Describes a subnet group.

    ", + "refs": { + "ClusterSubnetGroups$member": null, + "CreateClusterSubnetGroupResult$ClusterSubnetGroup": null, + "ModifyClusterSubnetGroupResult$ClusterSubnetGroup": null + } + }, + "ClusterSubnetGroupAlreadyExistsFault": { + "base": "

    A ClusterSubnetGroupName is already used by an existing cluster subnet group.

    ", + "refs": { + } + }, + "ClusterSubnetGroupMessage": { + "base": "

    Contains the output from the DescribeClusterSubnetGroups action.

    ", + "refs": { + } + }, + "ClusterSubnetGroupNotFoundFault": { + "base": "

    The cluster subnet group name does not refer to an existing cluster subnet group.

    ", + "refs": { + } + }, + "ClusterSubnetGroupQuotaExceededFault": { + "base": "

    The request would result in user exceeding the allowed number of cluster subnet groups. For information about increasing your quota, go to Limits in Amazon Redshift in the Amazon Redshift Cluster Management Guide.

    ", + "refs": { + } + }, + "ClusterSubnetGroups": { + "base": null, + "refs": { + "ClusterSubnetGroupMessage$ClusterSubnetGroups": "

    A list of ClusterSubnetGroup instances.

    " + } + }, + "ClusterSubnetQuotaExceededFault": { + "base": "

    The request would result in user exceeding the allowed number of subnets in a cluster subnet groups. For information about increasing your quota, go to Limits in Amazon Redshift in the Amazon Redshift Cluster Management Guide.

    ", + "refs": { + } + }, + "ClusterVersion": { + "base": "

    Describes a cluster version, including the parameter group family and description of the version.

    ", + "refs": { + "ClusterVersionList$member": null + } + }, + "ClusterVersionList": { + "base": null, + "refs": { + "ClusterVersionsMessage$ClusterVersions": "

    A list of Version elements.

    " + } + }, + "ClusterVersionsMessage": { + "base": "

    Contains the output from the DescribeClusterVersions action.

    ", + "refs": { + } + }, + "ClustersMessage": { + "base": "

    Contains the output from the DescribeClusters action.

    ", + "refs": { + } + }, + "CopyClusterSnapshotMessage": { + "base": "

    ", + "refs": { + } + }, + "CopyClusterSnapshotResult": { + "base": null, + "refs": { + } + }, + "CopyToRegionDisabledFault": { + "base": "

    Cross-region snapshot copy was temporarily disabled. Try your request again.

    ", + "refs": { + } + }, + "CreateClusterMessage": { + "base": "

    ", + "refs": { + } + }, + "CreateClusterParameterGroupMessage": { + "base": "

    ", + "refs": { + } + }, + "CreateClusterParameterGroupResult": { + "base": null, + "refs": { + } + }, + "CreateClusterResult": { + "base": null, + "refs": { + } + }, + "CreateClusterSecurityGroupMessage": { + "base": "

    ", + "refs": { + } + }, + "CreateClusterSecurityGroupResult": { + "base": null, + "refs": { + } + }, + "CreateClusterSnapshotMessage": { + "base": "

    ", + "refs": { + } + }, + "CreateClusterSnapshotResult": { + "base": null, + "refs": { + } + }, + "CreateClusterSubnetGroupMessage": { + "base": "

    ", + "refs": { + } + }, + "CreateClusterSubnetGroupResult": { + "base": null, + "refs": { + } + }, + "CreateEventSubscriptionMessage": { + "base": "

    ", + "refs": { + } + }, + "CreateEventSubscriptionResult": { + "base": null, + "refs": { + } + }, + "CreateHsmClientCertificateMessage": { + "base": "

    ", + "refs": { + } + }, + "CreateHsmClientCertificateResult": { + "base": null, + "refs": { + } + }, + "CreateHsmConfigurationMessage": { + "base": "

    ", + "refs": { + } + }, + "CreateHsmConfigurationResult": { + "base": null, + "refs": { + } + }, + "CreateSnapshotCopyGrantMessage": { + "base": "

    The result of the CreateSnapshotCopyGrant action.

    ", + "refs": { + } + }, + "CreateSnapshotCopyGrantResult": { + "base": null, + "refs": { + } + }, + "CreateTagsMessage": { + "base": "

    Contains the output from the CreateTags action.

    ", + "refs": { + } + }, + "DefaultClusterParameters": { + "base": "

    Describes the default cluster parameters for a parameter group family.

    ", + "refs": { + "DescribeDefaultClusterParametersResult$DefaultClusterParameters": null + } + }, + "DeleteClusterMessage": { + "base": "

    ", + "refs": { + } + }, + "DeleteClusterParameterGroupMessage": { + "base": "

    ", + "refs": { + } + }, + "DeleteClusterResult": { + "base": null, + "refs": { + } + }, + "DeleteClusterSecurityGroupMessage": { + "base": "

    ", + "refs": { + } + }, + "DeleteClusterSnapshotMessage": { + "base": "

    ", + "refs": { + } + }, + "DeleteClusterSnapshotResult": { + "base": null, + "refs": { + } + }, + "DeleteClusterSubnetGroupMessage": { + "base": "

    ", + "refs": { + } + }, + "DeleteEventSubscriptionMessage": { + "base": "

    ", + "refs": { + } + }, + "DeleteHsmClientCertificateMessage": { + "base": "

    ", + "refs": { + } + }, + "DeleteHsmConfigurationMessage": { + "base": "

    ", + "refs": { + } + }, + "DeleteSnapshotCopyGrantMessage": { + "base": "

    The result of the DeleteSnapshotCopyGrant action.

    ", + "refs": { + } + }, + "DeleteTagsMessage": { + "base": "

    Contains the output from the DeleteTags action.

    ", + "refs": { + } + }, + "DependentServiceRequestThrottlingFault": { + "base": "

    The request cannot be completed because a dependent service is throttling requests made by Amazon Redshift on your behalf. Wait and retry the request.

    ", + "refs": { + } + }, + "DescribeClusterParameterGroupsMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeClusterParametersMessage": { + "base": null, + "refs": { + } + }, + "DescribeClusterSecurityGroupsMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeClusterSnapshotsMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeClusterSubnetGroupsMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeClusterVersionsMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeClustersMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeDefaultClusterParametersMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeDefaultClusterParametersResult": { + "base": null, + "refs": { + } + }, + "DescribeEventCategoriesMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeEventSubscriptionsMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeEventsMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeHsmClientCertificatesMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeHsmConfigurationsMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeLoggingStatusMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeOrderableClusterOptionsMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeReservedNodeOfferingsMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeReservedNodesMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeResizeMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeSnapshotCopyGrantsMessage": { + "base": "

    The result of the DescribeSnapshotCopyGrants action.

    ", + "refs": { + } + }, + "DescribeTableRestoreStatusMessage": { + "base": null, + "refs": { + } + }, + "DescribeTagsMessage": { + "base": "

    ", + "refs": { + } + }, + "DisableLoggingMessage": { + "base": "

    ", + "refs": { + } + }, + "DisableSnapshotCopyMessage": { + "base": "

    ", + "refs": { + } + }, + "DisableSnapshotCopyResult": { + "base": null, + "refs": { + } + }, + "Double": { + "base": null, + "refs": { + "RecurringCharge$RecurringChargeAmount": "

    The amount charged per the period of time specified by the recurring charge frequency.

    ", + "ReservedNode$FixedPrice": "

    The fixed cost Amazon Redshift charges you for this reserved node.

    ", + "ReservedNode$UsagePrice": "

    The hourly rate Amazon Redshift charges you for this reserved node.

    ", + "ReservedNodeOffering$FixedPrice": "

    The upfront fixed charge you will pay to purchase the specific reserved node offering.

    ", + "ReservedNodeOffering$UsagePrice": "

    The rate you are charged for each hour the cluster that is using the offering is running.

    ", + "RestoreStatus$CurrentRestoreRateInMegaBytesPerSecond": "

    The number of megabytes per second being transferred from the backup storage. Returns the average rate for a completed backup.

    ", + "Snapshot$TotalBackupSizeInMegaBytes": "

    The size of the complete set of backup data that would be used to restore the cluster.

    ", + "Snapshot$ActualIncrementalBackupSizeInMegaBytes": "

    The size of the incremental backup.

    ", + "Snapshot$BackupProgressInMegaBytes": "

    The number of megabytes that have been transferred to the snapshot backup.

    ", + "Snapshot$CurrentBackupRateInMegaBytesPerSecond": "

    The number of megabytes per second being transferred to the snapshot backup. Returns 0 for a completed backup.

    " + } + }, + "DoubleOptional": { + "base": null, + "refs": { + "ResizeProgressMessage$AvgResizeRateInMegaBytesPerSecond": "

    The average rate of the resize operation over the last few minutes, measured in megabytes per second. After the resize operation completes, this value shows the average rate of the entire resize operation.

    " + } + }, + "EC2SecurityGroup": { + "base": "

    Describes an Amazon EC2 security group.

    ", + "refs": { + "EC2SecurityGroupList$member": null + } + }, + "EC2SecurityGroupList": { + "base": null, + "refs": { + "ClusterSecurityGroup$EC2SecurityGroups": "

    A list of EC2 security groups that are permitted to access clusters associated with this cluster security group.

    " + } + }, + "ElasticIpStatus": { + "base": "

    Describes the status of the elastic IP (EIP) address.

    ", + "refs": { + "Cluster$ElasticIpStatus": "

    The status of the elastic IP (EIP) address.

    " + } + }, + "EnableLoggingMessage": { + "base": "

    ", + "refs": { + } + }, + "EnableSnapshotCopyMessage": { + "base": "

    ", + "refs": { + } + }, + "EnableSnapshotCopyResult": { + "base": null, + "refs": { + } + }, + "Endpoint": { + "base": "

    Describes a connection endpoint.

    ", + "refs": { + "Cluster$Endpoint": "

    The connection endpoint.

    " + } + }, + "Event": { + "base": "

    Describes an event.

    ", + "refs": { + "EventList$member": null + } + }, + "EventCategoriesList": { + "base": null, + "refs": { + "CreateEventSubscriptionMessage$EventCategories": "

    Specifies the Amazon Redshift event categories to be published by the event notification subscription.

    Values: Configuration, Management, Monitoring, Security

    ", + "Event$EventCategories": "

    A list of the event categories.

    Values: Configuration, Management, Monitoring, Security

    ", + "EventInfoMap$EventCategories": "

    The category of an Amazon Redshift event.

    ", + "EventSubscription$EventCategoriesList": "

    The list of Amazon Redshift event categories specified in the event notification subscription.

    Values: Configuration, Management, Monitoring, Security

    ", + "ModifyEventSubscriptionMessage$EventCategories": "

    Specifies the Amazon Redshift event categories to be published by the event notification subscription.

    Values: Configuration, Management, Monitoring, Security

    " + } + }, + "EventCategoriesMap": { + "base": "

    Describes event categories.

    ", + "refs": { + "EventCategoriesMapList$member": null + } + }, + "EventCategoriesMapList": { + "base": null, + "refs": { + "EventCategoriesMessage$EventCategoriesMapList": "

    A list of event categories descriptions.

    " + } + }, + "EventCategoriesMessage": { + "base": "

    ", + "refs": { + } + }, + "EventInfoMap": { + "base": "

    Describes event information.

    ", + "refs": { + "EventInfoMapList$member": null + } + }, + "EventInfoMapList": { + "base": null, + "refs": { + "EventCategoriesMap$Events": "

    The events in the event category.

    " + } + }, + "EventList": { + "base": null, + "refs": { + "EventsMessage$Events": "

    A list of Event instances.

    " + } + }, + "EventSubscription": { + "base": "

    Describes event subscriptions.

    ", + "refs": { + "CreateEventSubscriptionResult$EventSubscription": null, + "EventSubscriptionsList$member": null, + "ModifyEventSubscriptionResult$EventSubscription": null + } + }, + "EventSubscriptionQuotaExceededFault": { + "base": "

    The request would exceed the allowed number of event subscriptions for this account. For information about increasing your quota, go to Limits in Amazon Redshift in the Amazon Redshift Cluster Management Guide.

    ", + "refs": { + } + }, + "EventSubscriptionsList": { + "base": null, + "refs": { + "EventSubscriptionsMessage$EventSubscriptionsList": "

    A list of event subscriptions.

    " + } + }, + "EventSubscriptionsMessage": { + "base": "

    ", + "refs": { + } + }, + "EventsMessage": { + "base": "

    ", + "refs": { + } + }, + "HsmClientCertificate": { + "base": "

    Returns information about an HSM client certificate. The certificate is stored in a secure Hardware Storage Module (HSM), and used by the Amazon Redshift cluster to encrypt data files.

    ", + "refs": { + "CreateHsmClientCertificateResult$HsmClientCertificate": null, + "HsmClientCertificateList$member": null + } + }, + "HsmClientCertificateAlreadyExistsFault": { + "base": "

    There is already an existing Amazon Redshift HSM client certificate with the specified identifier.

    ", + "refs": { + } + }, + "HsmClientCertificateList": { + "base": null, + "refs": { + "HsmClientCertificateMessage$HsmClientCertificates": "

    A list of the identifiers for one or more HSM client certificates used by Amazon Redshift clusters to store and retrieve database encryption keys in an HSM.

    " + } + }, + "HsmClientCertificateMessage": { + "base": "

    ", + "refs": { + } + }, + "HsmClientCertificateNotFoundFault": { + "base": "

    There is no Amazon Redshift HSM client certificate with the specified identifier.

    ", + "refs": { + } + }, + "HsmClientCertificateQuotaExceededFault": { + "base": "

    The quota for HSM client certificates has been reached. For information about increasing your quota, go to Limits in Amazon Redshift in the Amazon Redshift Cluster Management Guide.

    ", + "refs": { + } + }, + "HsmConfiguration": { + "base": "

    Returns information about an HSM configuration, which is an object that describes to Amazon Redshift clusters the information they require to connect to an HSM where they can store database encryption keys.

    ", + "refs": { + "CreateHsmConfigurationResult$HsmConfiguration": null, + "HsmConfigurationList$member": null + } + }, + "HsmConfigurationAlreadyExistsFault": { + "base": "

    There is already an existing Amazon Redshift HSM configuration with the specified identifier.

    ", + "refs": { + } + }, + "HsmConfigurationList": { + "base": null, + "refs": { + "HsmConfigurationMessage$HsmConfigurations": "

    A list of HsmConfiguration objects.

    " + } + }, + "HsmConfigurationMessage": { + "base": "

    ", + "refs": { + } + }, + "HsmConfigurationNotFoundFault": { + "base": "

    There is no Amazon Redshift HSM configuration with the specified identifier.

    ", + "refs": { + } + }, + "HsmConfigurationQuotaExceededFault": { + "base": "

    The quota for HSM configurations has been reached. For information about increasing your quota, go to Limits in Amazon Redshift in the Amazon Redshift Cluster Management Guide.

    ", + "refs": { + } + }, + "HsmStatus": { + "base": "

    Describes the status of changes to HSM settings.

    ", + "refs": { + "Cluster$HsmStatus": "

    Reports whether the Amazon Redshift cluster has finished applying any HSM settings changes specified in a modify cluster command.

    Values: active, applying

    " + } + }, + "IPRange": { + "base": "

    Describes an IP range used in a security group.

    ", + "refs": { + "IPRangeList$member": null + } + }, + "IPRangeList": { + "base": null, + "refs": { + "ClusterSecurityGroup$IPRanges": "

    A list of IP ranges (CIDR blocks) that are permitted to access clusters associated with this cluster security group.

    " + } + }, + "IamRoleArnList": { + "base": null, + "refs": { + "CreateClusterMessage$IamRoles": "

    A list of AWS Identity and Access Management (IAM) roles that can be used by the cluster to access other AWS services. You must supply the IAM roles in their Amazon Resource Name (ARN) format. You can supply up to 10 IAM roles in a single request.

    A cluster can have up to 10 IAM roles associated at any time.

    ", + "ModifyClusterIamRolesMessage$AddIamRoles": "

    Zero or more IAM roles (in their ARN format) to associate with the cluster. You can associate up to 10 IAM roles with a single cluster in a single request.

    ", + "ModifyClusterIamRolesMessage$RemoveIamRoles": "

    Zero or more IAM roles (in their ARN format) to disassociate from the cluster. You can disassociate up to 10 IAM roles from a single cluster in a single request.

    ", + "RestoreFromClusterSnapshotMessage$IamRoles": "

    A list of AWS Identity and Access Management (IAM) roles that can be used by the cluster to access other AWS services. You must supply the IAM roles in their Amazon Resource Name (ARN) format. You can supply up to 10 IAM roles in a single request.

    A cluster can have up to 10 IAM roles associated at any time.

    " + } + }, + "ImportTablesCompleted": { + "base": null, + "refs": { + "ResizeProgressMessage$ImportTablesCompleted": "

    The names of tables that have been completely imported .

    Valid Values: List of table names.

    " + } + }, + "ImportTablesInProgress": { + "base": null, + "refs": { + "ResizeProgressMessage$ImportTablesInProgress": "

    The names of tables that are being currently imported.

    Valid Values: List of table names.

    " + } + }, + "ImportTablesNotStarted": { + "base": null, + "refs": { + "ResizeProgressMessage$ImportTablesNotStarted": "

    The names of tables that have not been yet imported.

    Valid Values: List of table names

    " + } + }, + "InProgressTableRestoreQuotaExceededFault": { + "base": "

    You have exceeded the allowed number of table restore requests. Wait for your current table restore requests to complete before making a new request.

    ", + "refs": { + } + }, + "IncompatibleOrderableOptions": { + "base": "

    The specified options are incompatible.

    ", + "refs": { + } + }, + "InsufficientClusterCapacityFault": { + "base": "

    The number of nodes specified exceeds the allotted capacity of the cluster.

    ", + "refs": { + } + }, + "InsufficientS3BucketPolicyFault": { + "base": "

    The cluster does not have read bucket or put object permissions on the S3 bucket specified when enabling logging.

    ", + "refs": { + } + }, + "Integer": { + "base": null, + "refs": { + "Cluster$AutomatedSnapshotRetentionPeriod": "

    The number of days that automatic cluster snapshots are retained.

    ", + "Cluster$NumberOfNodes": "

    The number of compute nodes in the cluster.

    ", + "Endpoint$Port": "

    The port that the database engine is listening on.

    ", + "ModifySnapshotCopyRetentionPeriodMessage$RetentionPeriod": "

    The number of days to retain automated snapshots in the destination region after they are copied from the source region.

    If you decrease the retention period for automated snapshots that are copied to a destination region, Amazon Redshift will delete any existing automated snapshots that were copied to the destination region and that fall outside of the new retention period.

    Constraints: Must be at least 1 and no more than 35.

    ", + "ReservedNode$Duration": "

    The duration of the node reservation in seconds.

    ", + "ReservedNode$NodeCount": "

    The number of reserved compute nodes.

    ", + "ReservedNodeOffering$Duration": "

    The duration, in seconds, for which the offering will reserve the node.

    ", + "Snapshot$Port": "

    The port that the cluster is listening on.

    ", + "Snapshot$NumberOfNodes": "

    The number of nodes in the cluster.

    " + } + }, + "IntegerOptional": { + "base": null, + "refs": { + "CreateClusterMessage$AutomatedSnapshotRetentionPeriod": "

    The number of days that automated snapshots are retained. If the value is 0, automated snapshots are disabled. Even if automated snapshots are disabled, you can still create manual snapshots when you want with CreateClusterSnapshot.

    Default: 1

    Constraints: Must be a value from 0 to 35.

    ", + "CreateClusterMessage$Port": "

    The port number on which the cluster accepts incoming connections.

    The cluster is accessible only via the JDBC and ODBC connection strings. Part of the connection string requires the port on which the cluster will listen for incoming connections.

    Default: 5439

    Valid Values: 1150-65535

    ", + "CreateClusterMessage$NumberOfNodes": "

    The number of compute nodes in the cluster. This parameter is required when the ClusterType parameter is specified as multi-node.

    For information about determining how many nodes you need, go to Working with Clusters in the Amazon Redshift Cluster Management Guide.

    If you don't specify this parameter, you get a single-node cluster. When requesting a multi-node cluster, you must specify the number of nodes that you want in the cluster.

    Default: 1

    Constraints: Value must be at least 1 and no more than 100.

    ", + "DescribeClusterParameterGroupsMessage$MaxRecords": "

    The maximum number of response records to return in each call. If the number of remaining response records exceeds the specified MaxRecords value, a value is returned in a marker field of the response. You can retrieve the next set of records by retrying the command with the returned marker value.

    Default: 100

    Constraints: minimum 20, maximum 100.

    ", + "DescribeClusterParametersMessage$MaxRecords": "

    The maximum number of response records to return in each call. If the number of remaining response records exceeds the specified MaxRecords value, a value is returned in a marker field of the response. You can retrieve the next set of records by retrying the command with the returned marker value.

    Default: 100

    Constraints: minimum 20, maximum 100.

    ", + "DescribeClusterSecurityGroupsMessage$MaxRecords": "

    The maximum number of response records to return in each call. If the number of remaining response records exceeds the specified MaxRecords value, a value is returned in a marker field of the response. You can retrieve the next set of records by retrying the command with the returned marker value.

    Default: 100

    Constraints: minimum 20, maximum 100.

    ", + "DescribeClusterSnapshotsMessage$MaxRecords": "

    The maximum number of response records to return in each call. If the number of remaining response records exceeds the specified MaxRecords value, a value is returned in a marker field of the response. You can retrieve the next set of records by retrying the command with the returned marker value.

    Default: 100

    Constraints: minimum 20, maximum 100.

    ", + "DescribeClusterSubnetGroupsMessage$MaxRecords": "

    The maximum number of response records to return in each call. If the number of remaining response records exceeds the specified MaxRecords value, a value is returned in a marker field of the response. You can retrieve the next set of records by retrying the command with the returned marker value.

    Default: 100

    Constraints: minimum 20, maximum 100.

    ", + "DescribeClusterVersionsMessage$MaxRecords": "

    The maximum number of response records to return in each call. If the number of remaining response records exceeds the specified MaxRecords value, a value is returned in a marker field of the response. You can retrieve the next set of records by retrying the command with the returned marker value.

    Default: 100

    Constraints: minimum 20, maximum 100.

    ", + "DescribeClustersMessage$MaxRecords": "

    The maximum number of response records to return in each call. If the number of remaining response records exceeds the specified MaxRecords value, a value is returned in a marker field of the response. You can retrieve the next set of records by retrying the command with the returned marker value.

    Default: 100

    Constraints: minimum 20, maximum 100.

    ", + "DescribeDefaultClusterParametersMessage$MaxRecords": "

    The maximum number of response records to return in each call. If the number of remaining response records exceeds the specified MaxRecords value, a value is returned in a marker field of the response. You can retrieve the next set of records by retrying the command with the returned marker value.

    Default: 100

    Constraints: minimum 20, maximum 100.

    ", + "DescribeEventSubscriptionsMessage$MaxRecords": "

    The maximum number of response records to return in each call. If the number of remaining response records exceeds the specified MaxRecords value, a value is returned in a marker field of the response. You can retrieve the next set of records by retrying the command with the returned marker value.

    Default: 100

    Constraints: minimum 20, maximum 100.

    ", + "DescribeEventsMessage$Duration": "

    The number of minutes prior to the time of the request for which to retrieve events. For example, if the request is sent at 18:00 and you specify a duration of 60, then only events which have occurred after 17:00 will be returned.

    Default: 60

    ", + "DescribeEventsMessage$MaxRecords": "

    The maximum number of response records to return in each call. If the number of remaining response records exceeds the specified MaxRecords value, a value is returned in a marker field of the response. You can retrieve the next set of records by retrying the command with the returned marker value.

    Default: 100

    Constraints: minimum 20, maximum 100.

    ", + "DescribeHsmClientCertificatesMessage$MaxRecords": "

    The maximum number of response records to return in each call. If the number of remaining response records exceeds the specified MaxRecords value, a value is returned in a marker field of the response. You can retrieve the next set of records by retrying the command with the returned marker value.

    Default: 100

    Constraints: minimum 20, maximum 100.

    ", + "DescribeHsmConfigurationsMessage$MaxRecords": "

    The maximum number of response records to return in each call. If the number of remaining response records exceeds the specified MaxRecords value, a value is returned in a marker field of the response. You can retrieve the next set of records by retrying the command with the returned marker value.

    Default: 100

    Constraints: minimum 20, maximum 100.

    ", + "DescribeOrderableClusterOptionsMessage$MaxRecords": "

    The maximum number of response records to return in each call. If the number of remaining response records exceeds the specified MaxRecords value, a value is returned in a marker field of the response. You can retrieve the next set of records by retrying the command with the returned marker value.

    Default: 100

    Constraints: minimum 20, maximum 100.

    ", + "DescribeReservedNodeOfferingsMessage$MaxRecords": "

    The maximum number of response records to return in each call. If the number of remaining response records exceeds the specified MaxRecords value, a value is returned in a marker field of the response. You can retrieve the next set of records by retrying the command with the returned marker value.

    Default: 100

    Constraints: minimum 20, maximum 100.

    ", + "DescribeReservedNodesMessage$MaxRecords": "

    The maximum number of response records to return in each call. If the number of remaining response records exceeds the specified MaxRecords value, a value is returned in a marker field of the response. You can retrieve the next set of records by retrying the command with the returned marker value.

    Default: 100

    Constraints: minimum 20, maximum 100.

    ", + "DescribeSnapshotCopyGrantsMessage$MaxRecords": "

    The maximum number of response records to return in each call. If the number of remaining response records exceeds the specified MaxRecords value, a value is returned in a marker field of the response. You can retrieve the next set of records by retrying the command with the returned marker value.

    Default: 100

    Constraints: minimum 20, maximum 100.

    ", + "DescribeTableRestoreStatusMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved.

    ", + "DescribeTagsMessage$MaxRecords": "

    The maximum number or response records to return in each call. If the number of remaining response records exceeds the specified MaxRecords value, a value is returned in a marker field of the response. You can retrieve the next set of records by retrying the command with the returned marker value.

    ", + "EnableSnapshotCopyMessage$RetentionPeriod": "

    The number of days to retain automated snapshots in the destination region after they are copied from the source region.

    Default: 7.

    Constraints: Must be at least 1 and no more than 35.

    ", + "ModifyClusterMessage$NumberOfNodes": "

    The new number of nodes of the cluster. If you specify a new number of nodes, you must also specify the node type parameter.

    When you submit your request to resize a cluster, Amazon Redshift sets access permissions for the cluster to read-only. After Amazon Redshift provisions a new cluster according to your resize requirements, there will be a temporary outage while the old cluster is deleted and your connection is switched to the new cluster. When the new connection is complete, the original access permissions for the cluster are restored. You can use DescribeResize to track the progress of the resize request.

    Valid Values: Integer greater than 0.

    ", + "ModifyClusterMessage$AutomatedSnapshotRetentionPeriod": "

    The number of days that automated snapshots are retained. If the value is 0, automated snapshots are disabled. Even if automated snapshots are disabled, you can still create manual snapshots when you want with CreateClusterSnapshot.

    If you decrease the automated snapshot retention period from its current value, existing automated snapshots that fall outside of the new retention period will be immediately deleted.

    Default: Uses existing setting.

    Constraints: Must be a value from 0 to 35.

    ", + "PendingModifiedValues$NumberOfNodes": "

    The pending or in-progress change of the number of nodes in the cluster.

    ", + "PendingModifiedValues$AutomatedSnapshotRetentionPeriod": "

    The pending or in-progress change of the automated snapshot retention period.

    ", + "PurchaseReservedNodeOfferingMessage$NodeCount": "

    The number of reserved nodes that you want to purchase.

    Default: 1

    ", + "ResizeProgressMessage$TargetNumberOfNodes": "

    The number of nodes that the cluster will have after the resize operation is complete.

    ", + "RestoreFromClusterSnapshotMessage$Port": "

    The port number on which the cluster accepts connections.

    Default: The same port as the original cluster.

    Constraints: Must be between 1115 and 65535.

    ", + "RestoreFromClusterSnapshotMessage$AutomatedSnapshotRetentionPeriod": "

    The number of days that automated snapshots are retained. If the value is 0, automated snapshots are disabled. Even if automated snapshots are disabled, you can still create manual snapshots when you want with CreateClusterSnapshot.

    Default: The value selected for the cluster from which the snapshot was taken.

    Constraints: Must be a value from 0 to 35.

    " + } + }, + "InvalidClusterParameterGroupStateFault": { + "base": "

    The cluster parameter group action can not be completed because another task is in progress that involves the parameter group. Wait a few moments and try the operation again.

    ", + "refs": { + } + }, + "InvalidClusterSecurityGroupStateFault": { + "base": "

    The state of the cluster security group is not available.

    ", + "refs": { + } + }, + "InvalidClusterSnapshotStateFault": { + "base": "

    The specified cluster snapshot is not in the available state, or other accounts are authorized to access the snapshot.

    ", + "refs": { + } + }, + "InvalidClusterStateFault": { + "base": "

    The specified cluster is not in the available state.

    ", + "refs": { + } + }, + "InvalidClusterSubnetGroupStateFault": { + "base": "

    The cluster subnet group cannot be deleted because it is in use.

    ", + "refs": { + } + }, + "InvalidClusterSubnetStateFault": { + "base": "

    The state of the subnet is invalid.

    ", + "refs": { + } + }, + "InvalidElasticIpFault": { + "base": "

    The Elastic IP (EIP) is invalid or cannot be found.

    ", + "refs": { + } + }, + "InvalidHsmClientCertificateStateFault": { + "base": "

    The specified HSM client certificate is not in the available state, or it is still in use by one or more Amazon Redshift clusters.

    ", + "refs": { + } + }, + "InvalidHsmConfigurationStateFault": { + "base": "

    The specified HSM configuration is not in the available state, or it is still in use by one or more Amazon Redshift clusters.

    ", + "refs": { + } + }, + "InvalidRestoreFault": { + "base": "

    The restore is invalid.

    ", + "refs": { + } + }, + "InvalidS3BucketNameFault": { + "base": "

    The S3 bucket name is invalid. For more information about naming rules, go to Bucket Restrictions and Limitations in the Amazon Simple Storage Service (S3) Developer Guide.

    ", + "refs": { + } + }, + "InvalidS3KeyPrefixFault": { + "base": "

    The string specified for the logging S3 key prefix does not comply with the documented constraints.

    ", + "refs": { + } + }, + "InvalidSnapshotCopyGrantStateFault": { + "base": "

    The snapshot copy grant can't be deleted because it is used by one or more clusters.

    ", + "refs": { + } + }, + "InvalidSubnet": { + "base": "

    The requested subnet is not valid, or not all of the subnets are in the same VPC.

    ", + "refs": { + } + }, + "InvalidSubscriptionStateFault": { + "base": "

    The subscription request is invalid because it is a duplicate request. This subscription request is already in progress.

    ", + "refs": { + } + }, + "InvalidTableRestoreArgumentFault": { + "base": "

    The value specified for the sourceDatabaseName, sourceSchemaName, or sourceTableName parameter, or a combination of these, doesn't exist in the snapshot.

    ", + "refs": { + } + }, + "InvalidTagFault": { + "base": "

    The tag is invalid.

    ", + "refs": { + } + }, + "InvalidVPCNetworkStateFault": { + "base": "

    The cluster subnet group does not cover all Availability Zones.

    ", + "refs": { + } + }, + "LimitExceededFault": { + "base": "

    The encryption key has exceeded its grant limit in AWS KMS.

    ", + "refs": { + } + }, + "LoggingStatus": { + "base": "

    Describes the status of logging for a cluster.

    ", + "refs": { + } + }, + "Long": { + "base": null, + "refs": { + "ClusterSnapshotCopyStatus$RetentionPeriod": "

    The number of days that automated snapshots are retained in the destination region after they are copied from a source region.

    ", + "RestoreStatus$SnapshotSizeInMegaBytes": "

    The size of the set of snapshot data used to restore the cluster.

    ", + "RestoreStatus$ProgressInMegaBytes": "

    The number of megabytes that have been transferred from snapshot storage.

    ", + "RestoreStatus$ElapsedTimeInSeconds": "

    The amount of time an in-progress restore has been running, or the amount of time it took a completed restore to finish.

    ", + "RestoreStatus$EstimatedTimeToCompletionInSeconds": "

    The estimate of the time remaining before the restore will complete. Returns 0 for a completed restore.

    ", + "Snapshot$EstimatedSecondsToCompletion": "

    The estimate of the time remaining before the snapshot backup will complete. Returns 0 for a completed backup.

    ", + "Snapshot$ElapsedTimeInSeconds": "

    The amount of time an in-progress snapshot backup has been running, or the amount of time it took a completed backup to finish.

    " + } + }, + "LongOptional": { + "base": null, + "refs": { + "ResizeProgressMessage$TotalResizeDataInMegaBytes": "

    The estimated total amount of data, in megabytes, on the cluster before the resize operation began.

    ", + "ResizeProgressMessage$ProgressInMegaBytes": "

    While the resize operation is in progress, this value shows the current amount of data, in megabytes, that has been processed so far. When the resize operation is complete, this value shows the total amount of data, in megabytes, on the cluster, which may be more or less than TotalResizeDataInMegaBytes (the estimated total amount of data before resize).

    ", + "ResizeProgressMessage$ElapsedTimeInSeconds": "

    The amount of seconds that have elapsed since the resize operation began. After the resize operation completes, this value shows the total actual time, in seconds, for the resize operation.

    ", + "ResizeProgressMessage$EstimatedTimeToCompletionInSeconds": "

    The estimated time remaining, in seconds, until the resize operation is complete. This value is calculated based on the average resize rate and the estimated amount of data remaining to be processed. Once the resize operation is complete, this value will be 0.

    ", + "TableRestoreStatus$ProgressInMegaBytes": "

    The amount of data restored to the new table so far, in megabytes (MB).

    ", + "TableRestoreStatus$TotalDataInMegaBytes": "

    The total amount of data to restore to the new table, in megabytes (MB).

    " + } + }, + "ModifyClusterIamRolesMessage": { + "base": "

    ", + "refs": { + } + }, + "ModifyClusterIamRolesResult": { + "base": null, + "refs": { + } + }, + "ModifyClusterMessage": { + "base": "

    ", + "refs": { + } + }, + "ModifyClusterParameterGroupMessage": { + "base": "

    ", + "refs": { + } + }, + "ModifyClusterResult": { + "base": null, + "refs": { + } + }, + "ModifyClusterSubnetGroupMessage": { + "base": "

    ", + "refs": { + } + }, + "ModifyClusterSubnetGroupResult": { + "base": null, + "refs": { + } + }, + "ModifyEventSubscriptionMessage": { + "base": "

    ", + "refs": { + } + }, + "ModifyEventSubscriptionResult": { + "base": null, + "refs": { + } + }, + "ModifySnapshotCopyRetentionPeriodMessage": { + "base": "

    ", + "refs": { + } + }, + "ModifySnapshotCopyRetentionPeriodResult": { + "base": null, + "refs": { + } + }, + "NumberOfNodesPerClusterLimitExceededFault": { + "base": "

    The operation would exceed the number of nodes allowed for a cluster.

    ", + "refs": { + } + }, + "NumberOfNodesQuotaExceededFault": { + "base": "

    The operation would exceed the number of nodes allotted to the account. For information about increasing your quota, go to Limits in Amazon Redshift in the Amazon Redshift Cluster Management Guide.

    ", + "refs": { + } + }, + "OrderableClusterOption": { + "base": "

    Describes an orderable cluster option.

    ", + "refs": { + "OrderableClusterOptionsList$member": null + } + }, + "OrderableClusterOptionsList": { + "base": null, + "refs": { + "OrderableClusterOptionsMessage$OrderableClusterOptions": "

    An OrderableClusterOption structure containing information about orderable options for the cluster.

    " + } + }, + "OrderableClusterOptionsMessage": { + "base": "

    Contains the output from the DescribeOrderableClusterOptions action.

    ", + "refs": { + } + }, + "Parameter": { + "base": "

    Describes a parameter in a cluster parameter group.

    ", + "refs": { + "ParametersList$member": null + } + }, + "ParameterApplyType": { + "base": null, + "refs": { + "Parameter$ApplyType": "

    Specifies how to apply the WLM configuration parameter. Some properties can be applied dynamically, while other properties require that any associated clusters be rebooted for the configuration changes to be applied. For more information about parameters and parameter groups, go to Amazon Redshift Parameter Groups in the Amazon Redshift Cluster Management Guide.

    " + } + }, + "ParameterGroupList": { + "base": null, + "refs": { + "ClusterParameterGroupsMessage$ParameterGroups": "

    A list of ClusterParameterGroup instances. Each instance describes one cluster parameter group.

    " + } + }, + "ParametersList": { + "base": null, + "refs": { + "ClusterParameterGroupDetails$Parameters": "

    A list of Parameter instances. Each instance lists the parameters of one cluster parameter group.

    ", + "DefaultClusterParameters$Parameters": "

    The list of cluster default parameters.

    ", + "ModifyClusterParameterGroupMessage$Parameters": "

    An array of parameters to be modified. A maximum of 20 parameters can be modified in a single request.

    For each parameter to be modified, you must supply at least the parameter name and parameter value; other name-value pairs of the parameter are optional.

    For the workload management (WLM) configuration, you must supply all the name-value pairs in the wlm_json_configuration parameter.

    ", + "ResetClusterParameterGroupMessage$Parameters": "

    An array of names of parameters to be reset. If ResetAllParameters option is not used, then at least one parameter name must be supplied.

    Constraints: A maximum of 20 parameters can be reset in a single request.

    " + } + }, + "PendingModifiedValues": { + "base": "

    Describes cluster attributes that are in a pending state. A change to one or more the attributes was requested and is in progress or will be applied.

    ", + "refs": { + "Cluster$PendingModifiedValues": "

    If present, changes to the cluster are pending. Specific pending changes are identified by subelements.

    " + } + }, + "PurchaseReservedNodeOfferingMessage": { + "base": "

    ", + "refs": { + } + }, + "PurchaseReservedNodeOfferingResult": { + "base": null, + "refs": { + } + }, + "RebootClusterMessage": { + "base": "

    ", + "refs": { + } + }, + "RebootClusterResult": { + "base": null, + "refs": { + } + }, + "RecurringCharge": { + "base": "

    Describes a recurring charge.

    ", + "refs": { + "RecurringChargeList$member": null + } + }, + "RecurringChargeList": { + "base": null, + "refs": { + "ReservedNode$RecurringCharges": "

    The recurring charges for the reserved node.

    ", + "ReservedNodeOffering$RecurringCharges": "

    The charge to your account regardless of whether you are creating any clusters using the node offering. Recurring charges are only in effect for heavy-utilization reserved nodes.

    " + } + }, + "ReservedNode": { + "base": "

    Describes a reserved node. You can call the DescribeReservedNodeOfferings API to obtain the available reserved node offerings.

    ", + "refs": { + "PurchaseReservedNodeOfferingResult$ReservedNode": null, + "ReservedNodeList$member": null + } + }, + "ReservedNodeAlreadyExistsFault": { + "base": "

    User already has a reservation with the given identifier.

    ", + "refs": { + } + }, + "ReservedNodeList": { + "base": null, + "refs": { + "ReservedNodesMessage$ReservedNodes": "

    The list of ReservedNode objects.

    " + } + }, + "ReservedNodeNotFoundFault": { + "base": "

    The specified reserved compute node not found.

    ", + "refs": { + } + }, + "ReservedNodeOffering": { + "base": "

    Describes a reserved node offering.

    ", + "refs": { + "ReservedNodeOfferingList$member": null + } + }, + "ReservedNodeOfferingList": { + "base": null, + "refs": { + "ReservedNodeOfferingsMessage$ReservedNodeOfferings": "

    A list of ReservedNodeOffering objects.

    " + } + }, + "ReservedNodeOfferingNotFoundFault": { + "base": "

    Specified offering does not exist.

    ", + "refs": { + } + }, + "ReservedNodeOfferingsMessage": { + "base": "

    ", + "refs": { + } + }, + "ReservedNodeQuotaExceededFault": { + "base": "

    Request would exceed the user's compute node quota. For information about increasing your quota, go to Limits in Amazon Redshift in the Amazon Redshift Cluster Management Guide.

    ", + "refs": { + } + }, + "ReservedNodesMessage": { + "base": "

    ", + "refs": { + } + }, + "ResetClusterParameterGroupMessage": { + "base": "

    ", + "refs": { + } + }, + "ResizeNotFoundFault": { + "base": "

    A resize operation for the specified cluster is not found.

    ", + "refs": { + } + }, + "ResizeProgressMessage": { + "base": "

    Describes the result of a cluster resize operation.

    ", + "refs": { + } + }, + "ResourceNotFoundFault": { + "base": "

    The resource could not be found.

    ", + "refs": { + } + }, + "RestorableNodeTypeList": { + "base": null, + "refs": { + "Snapshot$RestorableNodeTypes": "

    The list of node types that this cluster snapshot is able to restore into.

    " + } + }, + "RestoreFromClusterSnapshotMessage": { + "base": "

    ", + "refs": { + } + }, + "RestoreFromClusterSnapshotResult": { + "base": null, + "refs": { + } + }, + "RestoreStatus": { + "base": "

    Describes the status of a cluster restore action. Returns null if the cluster was not created by restoring a snapshot.

    ", + "refs": { + "Cluster$RestoreStatus": "

    Describes the status of a cluster restore action. Returns null if the cluster was not created by restoring a snapshot.

    " + } + }, + "RestoreTableFromClusterSnapshotMessage": { + "base": null, + "refs": { + } + }, + "RestoreTableFromClusterSnapshotResult": { + "base": null, + "refs": { + } + }, + "RevokeClusterSecurityGroupIngressMessage": { + "base": "

    ", + "refs": { + } + }, + "RevokeClusterSecurityGroupIngressResult": { + "base": null, + "refs": { + } + }, + "RevokeSnapshotAccessMessage": { + "base": "

    ", + "refs": { + } + }, + "RevokeSnapshotAccessResult": { + "base": null, + "refs": { + } + }, + "RotateEncryptionKeyMessage": { + "base": "

    ", + "refs": { + } + }, + "RotateEncryptionKeyResult": { + "base": null, + "refs": { + } + }, + "SNSInvalidTopicFault": { + "base": "

    Amazon SNS has responded that there is a problem with the specified Amazon SNS topic.

    ", + "refs": { + } + }, + "SNSNoAuthorizationFault": { + "base": "

    You do not have permission to publish to the specified Amazon SNS topic.

    ", + "refs": { + } + }, + "SNSTopicArnNotFoundFault": { + "base": "

    An Amazon SNS topic with the specified Amazon Resource Name (ARN) does not exist.

    ", + "refs": { + } + }, + "Snapshot": { + "base": "

    Describes a snapshot.

    ", + "refs": { + "AuthorizeSnapshotAccessResult$Snapshot": null, + "CopyClusterSnapshotResult$Snapshot": null, + "CreateClusterSnapshotResult$Snapshot": null, + "DeleteClusterSnapshotResult$Snapshot": null, + "RevokeSnapshotAccessResult$Snapshot": null, + "SnapshotList$member": null + } + }, + "SnapshotCopyAlreadyDisabledFault": { + "base": "

    The cluster already has cross-region snapshot copy disabled.

    ", + "refs": { + } + }, + "SnapshotCopyAlreadyEnabledFault": { + "base": "

    The cluster already has cross-region snapshot copy enabled.

    ", + "refs": { + } + }, + "SnapshotCopyDisabledFault": { + "base": "

    Cross-region snapshot copy was temporarily disabled. Try your request again.

    ", + "refs": { + } + }, + "SnapshotCopyGrant": { + "base": "

    The snapshot copy grant that grants Amazon Redshift permission to encrypt copied snapshots with the specified customer master key (CMK) from AWS KMS in the destination region.

    For more information about managing snapshot copy grants, go to Amazon Redshift Database Encryption in the Amazon Redshift Cluster Management Guide.

    ", + "refs": { + "CreateSnapshotCopyGrantResult$SnapshotCopyGrant": null, + "SnapshotCopyGrantList$member": null + } + }, + "SnapshotCopyGrantAlreadyExistsFault": { + "base": "

    The snapshot copy grant can't be created because a grant with the same name already exists.

    ", + "refs": { + } + }, + "SnapshotCopyGrantList": { + "base": null, + "refs": { + "SnapshotCopyGrantMessage$SnapshotCopyGrants": "

    The list of SnapshotCopyGrant objects.

    " + } + }, + "SnapshotCopyGrantMessage": { + "base": "

    ", + "refs": { + } + }, + "SnapshotCopyGrantNotFoundFault": { + "base": "

    The specified snapshot copy grant can't be found. Make sure that the name is typed correctly and that the grant exists in the destination region.

    ", + "refs": { + } + }, + "SnapshotCopyGrantQuotaExceededFault": { + "base": "

    The AWS account has exceeded the maximum number of snapshot copy grants in this region.

    ", + "refs": { + } + }, + "SnapshotList": { + "base": null, + "refs": { + "SnapshotMessage$Snapshots": "

    A list of Snapshot instances.

    " + } + }, + "SnapshotMessage": { + "base": "

    Contains the output from the DescribeClusterSnapshots action.

    ", + "refs": { + } + }, + "SourceIdsList": { + "base": null, + "refs": { + "CreateEventSubscriptionMessage$SourceIds": "

    A list of one or more identifiers of Amazon Redshift source objects. All of the objects must be of the same type as was specified in the source type parameter. The event subscription will return only events generated by the specified objects. If not specified, then events are returned for all objects within the source type specified.

    Example: my-cluster-1, my-cluster-2

    Example: my-snapshot-20131010

    ", + "EventSubscription$SourceIdsList": "

    A list of the sources that publish events to the Amazon Redshift event notification subscription.

    ", + "ModifyEventSubscriptionMessage$SourceIds": "

    A list of one or more identifiers of Amazon Redshift source objects. All of the objects must be of the same type as was specified in the source type parameter. The event subscription will return only events generated by the specified objects. If not specified, then events are returned for all objects within the source type specified.

    Example: my-cluster-1, my-cluster-2

    Example: my-snapshot-20131010

    " + } + }, + "SourceNotFoundFault": { + "base": "

    The specified Amazon Redshift event source could not be found.

    ", + "refs": { + } + }, + "SourceType": { + "base": null, + "refs": { + "DescribeEventsMessage$SourceType": "

    The event source to retrieve events for. If no value is specified, all events are returned.

    Constraints:

    If SourceType is supplied, SourceIdentifier must also be provided.

    • Specify cluster when SourceIdentifier is a cluster identifier.
    • Specify cluster-security-group when SourceIdentifier is a cluster security group name.
    • Specify cluster-parameter-group when SourceIdentifier is a cluster parameter group name.
    • Specify cluster-snapshot when SourceIdentifier is a cluster snapshot identifier.
    ", + "Event$SourceType": "

    The source type for this event.

    " + } + }, + "String": { + "base": null, + "refs": { + "AccountWithRestoreAccess$AccountId": "

    The identifier of an AWS customer account authorized to restore a snapshot.

    ", + "AuthorizeClusterSecurityGroupIngressMessage$ClusterSecurityGroupName": "

    The name of the security group to which the ingress rule is added.

    ", + "AuthorizeClusterSecurityGroupIngressMessage$CIDRIP": "

    The IP range to be added the Amazon Redshift security group.

    ", + "AuthorizeClusterSecurityGroupIngressMessage$EC2SecurityGroupName": "

    The EC2 security group to be added the Amazon Redshift security group.

    ", + "AuthorizeClusterSecurityGroupIngressMessage$EC2SecurityGroupOwnerId": "

    The AWS account number of the owner of the security group specified by the EC2SecurityGroupName parameter. The AWS Access Key ID is not an acceptable value.

    Example: 111122223333

    ", + "AuthorizeSnapshotAccessMessage$SnapshotIdentifier": "

    The identifier of the snapshot the account is authorized to restore.

    ", + "AuthorizeSnapshotAccessMessage$SnapshotClusterIdentifier": "

    The identifier of the cluster the snapshot was created from. This parameter is required if your IAM user has a policy containing a snapshot resource element that specifies anything other than * for the cluster name.

    ", + "AuthorizeSnapshotAccessMessage$AccountWithRestoreAccess": "

    The identifier of the AWS customer account authorized to restore the specified snapshot.

    ", + "AvailabilityZone$Name": "

    The name of the availability zone.

    ", + "Cluster$ClusterIdentifier": "

    The unique identifier of the cluster.

    ", + "Cluster$NodeType": "

    The node type for the nodes in the cluster.

    ", + "Cluster$ClusterStatus": "

    The current state of the cluster. Possible values are:

    • available
    • creating
    • deleting
    • final-snapshot
    • hardware-failure
    • incompatible-hsm
    • incompatible-network
    • incompatible-parameters
    • incompatible-restore
    • modifying
    • rebooting
    • renaming
    • resizing
    • rotating-keys
    • storage-full
    • updating-hsm

    ", + "Cluster$ModifyStatus": "

    The status of a modify operation, if any, initiated for the cluster.

    ", + "Cluster$MasterUsername": "

    The master user name for the cluster. This name is used to connect to the database that is specified in DBName.

    ", + "Cluster$DBName": "

    The name of the initial database that was created when the cluster was created. This same name is returned for the life of the cluster. If an initial database was not specified, a database named \"dev\" was created by default.

    ", + "Cluster$ClusterSubnetGroupName": "

    The name of the subnet group that is associated with the cluster. This parameter is valid only when the cluster is in a VPC.

    ", + "Cluster$VpcId": "

    The identifier of the VPC the cluster is in, if the cluster is in a VPC.

    ", + "Cluster$AvailabilityZone": "

    The name of the Availability Zone in which the cluster is located.

    ", + "Cluster$PreferredMaintenanceWindow": "

    The weekly time range (in UTC) during which system maintenance can occur.

    ", + "Cluster$ClusterVersion": "

    The version ID of the Amazon Redshift engine that is running on the cluster.

    ", + "Cluster$ClusterPublicKey": "

    The public key for the cluster.

    ", + "Cluster$ClusterRevisionNumber": "

    The specific revision number of the database in the cluster.

    ", + "Cluster$KmsKeyId": "

    The AWS Key Management Service (KMS) key ID of the encryption key used to encrypt data in the cluster.

    ", + "ClusterIamRole$IamRoleArn": "

    The Amazon Resource Name (ARN) of the IAM role. For example, arn:aws:iam::123456789012:role/RedshiftCopyUnload.

    ", + "ClusterIamRole$ApplyStatus": "

    Describes the status of the IAM role's association with an Amazon Redshift cluster.

    The following are possible statuses and descriptions.

    • in-sync: The role is available for use by the cluster.
    • adding: The role is in the process of being associated with the cluster.
    • removing: The role is in the process of being disassociated with the cluster.

    ", + "ClusterNode$NodeRole": "

    Whether the node is a leader node or a compute node.

    ", + "ClusterNode$PrivateIPAddress": "

    The private IP address of a node within a cluster.

    ", + "ClusterNode$PublicIPAddress": "

    The public IP address of a node within a cluster.

    ", + "ClusterParameterGroup$ParameterGroupName": "

    The name of the cluster parameter group.

    ", + "ClusterParameterGroup$ParameterGroupFamily": "

    The name of the cluster parameter group family that this cluster parameter group is compatible with.

    ", + "ClusterParameterGroup$Description": "

    The description of the parameter group.

    ", + "ClusterParameterGroupDetails$Marker": "

    A value that indicates the starting point for the next set of response records in a subsequent request. If a value is returned in a response, you can retrieve the next set of records by providing this returned marker value in the Marker parameter and retrying the command. If the Marker field is empty, all response records have been retrieved for the request.

    ", + "ClusterParameterGroupNameMessage$ParameterGroupName": "

    The name of the cluster parameter group.

    ", + "ClusterParameterGroupNameMessage$ParameterGroupStatus": "

    The status of the parameter group. For example, if you made a change to a parameter group name-value pair, then the change could be pending a reboot of an associated cluster.

    ", + "ClusterParameterGroupStatus$ParameterGroupName": "

    The name of the cluster parameter group.

    ", + "ClusterParameterGroupStatus$ParameterApplyStatus": "

    The status of parameter updates.

    ", + "ClusterParameterGroupsMessage$Marker": "

    A value that indicates the starting point for the next set of response records in a subsequent request. If a value is returned in a response, you can retrieve the next set of records by providing this returned marker value in the Marker parameter and retrying the command. If the Marker field is empty, all response records have been retrieved for the request.

    ", + "ClusterParameterStatus$ParameterName": "

    The name of the parameter.

    ", + "ClusterParameterStatus$ParameterApplyStatus": "

    The status of the parameter that indicates whether the parameter is in sync with the database, waiting for a cluster reboot, or encountered an error when being applied.

    The following are possible statuses and descriptions.

    • in-sync: The parameter value is in sync with the database.
    • pending-reboot: The parameter value will be applied after the cluster reboots.
    • applying: The parameter value is being applied to the database.
    • invalid-parameter: Cannot apply the parameter value because it has an invalid value or syntax.
    • apply-deferred: The parameter contains static property changes. The changes are deferred until the cluster reboots.
    • apply-error: Cannot connect to the cluster. The parameter change will be applied after the cluster reboots.
    • unknown-error: Cannot apply the parameter change right now. The change will be applied after the cluster reboots.

    ", + "ClusterParameterStatus$ParameterApplyErrorDescription": "

    The error that prevented the parameter from being applied to the database.

    ", + "ClusterSecurityGroup$ClusterSecurityGroupName": "

    The name of the cluster security group to which the operation was applied.

    ", + "ClusterSecurityGroup$Description": "

    A description of the security group.

    ", + "ClusterSecurityGroupMembership$ClusterSecurityGroupName": "

    The name of the cluster security group.

    ", + "ClusterSecurityGroupMembership$Status": "

    The status of the cluster security group.

    ", + "ClusterSecurityGroupMessage$Marker": "

    A value that indicates the starting point for the next set of response records in a subsequent request. If a value is returned in a response, you can retrieve the next set of records by providing this returned marker value in the Marker parameter and retrying the command. If the Marker field is empty, all response records have been retrieved for the request.

    ", + "ClusterSecurityGroupNameList$member": null, + "ClusterSnapshotCopyStatus$DestinationRegion": "

    The destination region that snapshots are automatically copied to when cross-region snapshot copy is enabled.

    ", + "ClusterSnapshotCopyStatus$SnapshotCopyGrantName": "

    The name of the snapshot copy grant.

    ", + "ClusterSubnetGroup$ClusterSubnetGroupName": "

    The name of the cluster subnet group.

    ", + "ClusterSubnetGroup$Description": "

    The description of the cluster subnet group.

    ", + "ClusterSubnetGroup$VpcId": "

    The VPC ID of the cluster subnet group.

    ", + "ClusterSubnetGroup$SubnetGroupStatus": "

    The status of the cluster subnet group. Possible values are Complete, Incomplete and Invalid.

    ", + "ClusterSubnetGroupMessage$Marker": "

    A value that indicates the starting point for the next set of response records in a subsequent request. If a value is returned in a response, you can retrieve the next set of records by providing this returned marker value in the Marker parameter and retrying the command. If the Marker field is empty, all response records have been retrieved for the request.

    ", + "ClusterVersion$ClusterVersion": "

    The version number used by the cluster.

    ", + "ClusterVersion$ClusterParameterGroupFamily": "

    The name of the cluster parameter group family for the cluster.

    ", + "ClusterVersion$Description": "

    The description of the cluster version.

    ", + "ClusterVersionsMessage$Marker": "

    A value that indicates the starting point for the next set of response records in a subsequent request. If a value is returned in a response, you can retrieve the next set of records by providing this returned marker value in the Marker parameter and retrying the command. If the Marker field is empty, all response records have been retrieved for the request.

    ", + "ClustersMessage$Marker": "

    A value that indicates the starting point for the next set of response records in a subsequent request. If a value is returned in a response, you can retrieve the next set of records by providing this returned marker value in the Marker parameter and retrying the command. If the Marker field is empty, all response records have been retrieved for the request.

    ", + "CopyClusterSnapshotMessage$SourceSnapshotIdentifier": "

    The identifier for the source snapshot.

    Constraints:

    • Must be the identifier for a valid automated snapshot whose state is available.
    ", + "CopyClusterSnapshotMessage$SourceSnapshotClusterIdentifier": "

    The identifier of the cluster the source snapshot was created from. This parameter is required if your IAM user has a policy containing a snapshot resource element that specifies anything other than * for the cluster name.

    Constraints:

    • Must be the identifier for a valid cluster.
    ", + "CopyClusterSnapshotMessage$TargetSnapshotIdentifier": "

    The identifier given to the new manual snapshot.

    Constraints:

    • Cannot be null, empty, or blank.
    • Must contain from 1 to 255 alphanumeric characters or hyphens.
    • First character must be a letter.
    • Cannot end with a hyphen or contain two consecutive hyphens.
    • Must be unique for the AWS account that is making the request.
    ", + "CreateClusterMessage$DBName": "

    The name of the first database to be created when the cluster is created.

    To create additional databases after the cluster is created, connect to the cluster with a SQL client and use SQL commands to create a database. For more information, go to Create a Database in the Amazon Redshift Database Developer Guide.

    Default: dev

    Constraints:

    • Must contain 1 to 64 alphanumeric characters.
    • Must contain only lowercase letters.
    • Cannot be a word that is reserved by the service. A list of reserved words can be found in Reserved Words in the Amazon Redshift Database Developer Guide.
    ", + "CreateClusterMessage$ClusterIdentifier": "

    A unique identifier for the cluster. You use this identifier to refer to the cluster for any subsequent cluster operations such as deleting or modifying. The identifier also appears in the Amazon Redshift console.

    Constraints:

    • Must contain from 1 to 63 alphanumeric characters or hyphens.
    • Alphabetic characters must be lowercase.
    • First character must be a letter.
    • Cannot end with a hyphen or contain two consecutive hyphens.
    • Must be unique for all clusters within an AWS account.

    Example: myexamplecluster

    ", + "CreateClusterMessage$ClusterType": "

    The type of the cluster. When cluster type is specified as

    • single-node, the NumberOfNodes parameter is not required.
    • multi-node, the NumberOfNodes parameter is required.

    Valid Values: multi-node | single-node

    Default: multi-node

    ", + "CreateClusterMessage$NodeType": "

    The node type to be provisioned for the cluster. For information about node types, go to Working with Clusters in the Amazon Redshift Cluster Management Guide.

    Valid Values: ds1.xlarge | ds1.8xlarge | ds2.xlarge | ds2.8xlarge | dc1.large | dc1.8xlarge.

    ", + "CreateClusterMessage$MasterUsername": "

    The user name associated with the master user account for the cluster that is being created.

    Constraints:

    • Must be 1 - 128 alphanumeric characters.
    • First character must be a letter.
    • Cannot be a reserved word. A list of reserved words can be found in Reserved Words in the Amazon Redshift Database Developer Guide.
    ", + "CreateClusterMessage$MasterUserPassword": "

    The password associated with the master user account for the cluster that is being created.

    Constraints:

    • Must be between 8 and 64 characters in length.
    • Must contain at least one uppercase letter.
    • Must contain at least one lowercase letter.
    • Must contain one number.
    • Can be any printable ASCII character (ASCII code 33 to 126) except ' (single quote), \" (double quote), \\, /, @, or space.
    ", + "CreateClusterMessage$ClusterSubnetGroupName": "

    The name of a cluster subnet group to be associated with this cluster.

    If this parameter is not provided the resulting cluster will be deployed outside virtual private cloud (VPC).

    ", + "CreateClusterMessage$AvailabilityZone": "

    The EC2 Availability Zone (AZ) in which you want Amazon Redshift to provision the cluster. For example, if you have several EC2 instances running in a specific Availability Zone, then you might want the cluster to be provisioned in the same zone in order to decrease network latency.

    Default: A random, system-chosen Availability Zone in the region that is specified by the endpoint.

    Example: us-east-1d

    Constraint: The specified Availability Zone must be in the same region as the current endpoint.

    ", + "CreateClusterMessage$PreferredMaintenanceWindow": "

    The weekly time range (in UTC) during which automated cluster maintenance can occur.

    Format: ddd:hh24:mi-ddd:hh24:mi

    Default: A 30-minute window selected at random from an 8-hour block of time per region, occurring on a random day of the week. For more information about the time blocks for each region, see Maintenance Windows in Amazon Redshift Cluster Management Guide.

    Valid Days: Mon | Tue | Wed | Thu | Fri | Sat | Sun

    Constraints: Minimum 30-minute window.

    ", + "CreateClusterMessage$ClusterParameterGroupName": "

    The name of the parameter group to be associated with this cluster.

    Default: The default Amazon Redshift cluster parameter group. For information about the default parameter group, go to Working with Amazon Redshift Parameter Groups

    Constraints:

    • Must be 1 to 255 alphanumeric characters or hyphens.
    • First character must be a letter.
    • Cannot end with a hyphen or contain two consecutive hyphens.
    ", + "CreateClusterMessage$ClusterVersion": "

    The version of the Amazon Redshift engine software that you want to deploy on the cluster.

    The version selected runs on all the nodes in the cluster.

    Constraints: Only version 1.0 is currently available.

    Example: 1.0

    ", + "CreateClusterMessage$HsmClientCertificateIdentifier": "

    Specifies the name of the HSM client certificate the Amazon Redshift cluster uses to retrieve the data encryption keys stored in an HSM.

    ", + "CreateClusterMessage$HsmConfigurationIdentifier": "

    Specifies the name of the HSM configuration that contains the information the Amazon Redshift cluster can use to retrieve and store keys in an HSM.

    ", + "CreateClusterMessage$ElasticIp": "

    The Elastic IP (EIP) address for the cluster.

    Constraints: The cluster must be provisioned in EC2-VPC and publicly-accessible through an Internet gateway. For more information about provisioning clusters in EC2-VPC, go to Supported Platforms to Launch Your Cluster in the Amazon Redshift Cluster Management Guide.

    ", + "CreateClusterMessage$KmsKeyId": "

    The AWS Key Management Service (KMS) key ID of the encryption key that you want to use to encrypt data in the cluster.

    ", + "CreateClusterMessage$AdditionalInfo": "

    Reserved.

    ", + "CreateClusterParameterGroupMessage$ParameterGroupName": "

    The name of the cluster parameter group.

    Constraints:

    • Must be 1 to 255 alphanumeric characters or hyphens
    • First character must be a letter.
    • Cannot end with a hyphen or contain two consecutive hyphens.
    • Must be unique withing your AWS account.
    This value is stored as a lower-case string.", + "CreateClusterParameterGroupMessage$ParameterGroupFamily": "

    The Amazon Redshift engine version to which the cluster parameter group applies. The cluster engine version determines the set of parameters.

    To get a list of valid parameter group family names, you can call DescribeClusterParameterGroups. By default, Amazon Redshift returns a list of all the parameter groups that are owned by your AWS account, including the default parameter groups for each Amazon Redshift engine version. The parameter group family names associated with the default parameter groups provide you the valid values. For example, a valid family name is \"redshift-1.0\".

    ", + "CreateClusterParameterGroupMessage$Description": "

    A description of the parameter group.

    ", + "CreateClusterSecurityGroupMessage$ClusterSecurityGroupName": "

    The name for the security group. Amazon Redshift stores the value as a lowercase string.

    Constraints:

    • Must contain no more than 255 alphanumeric characters or hyphens.
    • Must not be \"Default\".
    • Must be unique for all security groups that are created by your AWS account.

    Example: examplesecuritygroup

    ", + "CreateClusterSecurityGroupMessage$Description": "

    A description for the security group.

    ", + "CreateClusterSnapshotMessage$SnapshotIdentifier": "

    A unique identifier for the snapshot that you are requesting. This identifier must be unique for all snapshots within the AWS account.

    Constraints:

    • Cannot be null, empty, or blank
    • Must contain from 1 to 255 alphanumeric characters or hyphens
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens

    Example: my-snapshot-id

    ", + "CreateClusterSnapshotMessage$ClusterIdentifier": "

    The cluster identifier for which you want a snapshot.

    ", + "CreateClusterSubnetGroupMessage$ClusterSubnetGroupName": "

    The name for the subnet group. Amazon Redshift stores the value as a lowercase string.

    Constraints:

    • Must contain no more than 255 alphanumeric characters or hyphens.
    • Must not be \"Default\".
    • Must be unique for all subnet groups that are created by your AWS account.

    Example: examplesubnetgroup

    ", + "CreateClusterSubnetGroupMessage$Description": "

    A description for the subnet group.

    ", + "CreateEventSubscriptionMessage$SubscriptionName": "

    The name of the event subscription to be created.

    Constraints:

    • Cannot be null, empty, or blank.
    • Must contain from 1 to 255 alphanumeric characters or hyphens.
    • First character must be a letter.
    • Cannot end with a hyphen or contain two consecutive hyphens.
    ", + "CreateEventSubscriptionMessage$SnsTopicArn": "

    The Amazon Resource Name (ARN) of the Amazon SNS topic used to transmit the event notifications. The ARN is created by Amazon SNS when you create a topic and subscribe to it.

    ", + "CreateEventSubscriptionMessage$SourceType": "

    The type of source that will be generating the events. For example, if you want to be notified of events generated by a cluster, you would set this parameter to cluster. If this value is not specified, events are returned for all Amazon Redshift objects in your AWS account. You must specify a source type in order to specify source IDs.

    Valid values: cluster, cluster-parameter-group, cluster-security-group, and cluster-snapshot.

    ", + "CreateEventSubscriptionMessage$Severity": "

    Specifies the Amazon Redshift event severity to be published by the event notification subscription.

    Values: ERROR, INFO

    ", + "CreateHsmClientCertificateMessage$HsmClientCertificateIdentifier": "

    The identifier to be assigned to the new HSM client certificate that the cluster will use to connect to the HSM to use the database encryption keys.

    ", + "CreateHsmConfigurationMessage$HsmConfigurationIdentifier": "

    The identifier to be assigned to the new Amazon Redshift HSM configuration.

    ", + "CreateHsmConfigurationMessage$Description": "

    A text description of the HSM configuration to be created.

    ", + "CreateHsmConfigurationMessage$HsmIpAddress": "

    The IP address that the Amazon Redshift cluster must use to access the HSM.

    ", + "CreateHsmConfigurationMessage$HsmPartitionName": "

    The name of the partition in the HSM where the Amazon Redshift clusters will store their database encryption keys.

    ", + "CreateHsmConfigurationMessage$HsmPartitionPassword": "

    The password required to access the HSM partition.

    ", + "CreateHsmConfigurationMessage$HsmServerPublicCertificate": "

    The HSMs public certificate file. When using Cloud HSM, the file name is server.pem.

    ", + "CreateSnapshotCopyGrantMessage$SnapshotCopyGrantName": "

    The name of the snapshot copy grant. This name must be unique in the region for the AWS account.

    Constraints:

    • Must contain from 1 to 63 alphanumeric characters or hyphens.
    • Alphabetic characters must be lowercase.
    • First character must be a letter.
    • Cannot end with a hyphen or contain two consecutive hyphens.
    • Must be unique for all clusters within an AWS account.

    ", + "CreateSnapshotCopyGrantMessage$KmsKeyId": "

    The unique identifier of the customer master key (CMK) to which to grant Amazon Redshift permission. If no key is specified, the default key is used.

    ", + "CreateTagsMessage$ResourceName": "

    The Amazon Resource Name (ARN) to which you want to add the tag or tags. For example, arn:aws:redshift:us-east-1:123456789:cluster:t1.

    ", + "DefaultClusterParameters$ParameterGroupFamily": "

    The name of the cluster parameter group family to which the engine default parameters apply.

    ", + "DefaultClusterParameters$Marker": "

    A value that indicates the starting point for the next set of response records in a subsequent request. If a value is returned in a response, you can retrieve the next set of records by providing this returned marker value in the Marker parameter and retrying the command. If the Marker field is empty, all response records have been retrieved for the request.

    ", + "DeleteClusterMessage$ClusterIdentifier": "

    The identifier of the cluster to be deleted.

    Constraints:

    • Must contain lowercase characters.
    • Must contain from 1 to 63 alphanumeric characters or hyphens.
    • First character must be a letter.
    • Cannot end with a hyphen or contain two consecutive hyphens.
    ", + "DeleteClusterMessage$FinalClusterSnapshotIdentifier": "

    The identifier of the final snapshot that is to be created immediately before deleting the cluster. If this parameter is provided, SkipFinalClusterSnapshot must be false.

    Constraints:

    • Must be 1 to 255 alphanumeric characters.
    • First character must be a letter.
    • Cannot end with a hyphen or contain two consecutive hyphens.
    ", + "DeleteClusterParameterGroupMessage$ParameterGroupName": "

    The name of the parameter group to be deleted.

    Constraints:

    • Must be the name of an existing cluster parameter group.
    • Cannot delete a default cluster parameter group.
    ", + "DeleteClusterSecurityGroupMessage$ClusterSecurityGroupName": "

    The name of the cluster security group to be deleted.

    ", + "DeleteClusterSnapshotMessage$SnapshotIdentifier": "

    The unique identifier of the manual snapshot to be deleted.

    Constraints: Must be the name of an existing snapshot that is in the available state.

    ", + "DeleteClusterSnapshotMessage$SnapshotClusterIdentifier": "

    The unique identifier of the cluster the snapshot was created from. This parameter is required if your IAM user has a policy containing a snapshot resource element that specifies anything other than * for the cluster name.

    Constraints: Must be the name of valid cluster.

    ", + "DeleteClusterSubnetGroupMessage$ClusterSubnetGroupName": "

    The name of the cluster subnet group name to be deleted.

    ", + "DeleteEventSubscriptionMessage$SubscriptionName": "

    The name of the Amazon Redshift event notification subscription to be deleted.

    ", + "DeleteHsmClientCertificateMessage$HsmClientCertificateIdentifier": "

    The identifier of the HSM client certificate to be deleted.

    ", + "DeleteHsmConfigurationMessage$HsmConfigurationIdentifier": "

    The identifier of the Amazon Redshift HSM configuration to be deleted.

    ", + "DeleteSnapshotCopyGrantMessage$SnapshotCopyGrantName": "

    The name of the snapshot copy grant to delete.

    ", + "DeleteTagsMessage$ResourceName": "

    The Amazon Resource Name (ARN) from which you want to remove the tag or tags. For example, arn:aws:redshift:us-east-1:123456789:cluster:t1.

    ", + "DescribeClusterParameterGroupsMessage$ParameterGroupName": "

    The name of a specific parameter group for which to return details. By default, details about all parameter groups and the default parameter group are returned.

    ", + "DescribeClusterParameterGroupsMessage$Marker": "

    An optional parameter that specifies the starting point to return a set of response records. When the results of a DescribeClusterParameterGroups request exceed the value specified in MaxRecords, AWS returns a value in the Marker field of the response. You can retrieve the next set of response records by providing the returned marker value in the Marker parameter and retrying the request.

    ", + "DescribeClusterParametersMessage$ParameterGroupName": "

    The name of a cluster parameter group for which to return details.

    ", + "DescribeClusterParametersMessage$Source": "

    The parameter types to return. Specify user to show parameters that are different form the default. Similarly, specify engine-default to show parameters that are the same as the default parameter group.

    Default: All parameter types returned.

    Valid Values: user | engine-default

    ", + "DescribeClusterParametersMessage$Marker": "

    An optional parameter that specifies the starting point to return a set of response records. When the results of a DescribeClusterParameters request exceed the value specified in MaxRecords, AWS returns a value in the Marker field of the response. You can retrieve the next set of response records by providing the returned marker value in the Marker parameter and retrying the request.

    ", + "DescribeClusterSecurityGroupsMessage$ClusterSecurityGroupName": "

    The name of a cluster security group for which you are requesting details. You can specify either the Marker parameter or a ClusterSecurityGroupName parameter, but not both.

    Example: securitygroup1

    ", + "DescribeClusterSecurityGroupsMessage$Marker": "

    An optional parameter that specifies the starting point to return a set of response records. When the results of a DescribeClusterSecurityGroups request exceed the value specified in MaxRecords, AWS returns a value in the Marker field of the response. You can retrieve the next set of response records by providing the returned marker value in the Marker parameter and retrying the request.

    Constraints: You can specify either the ClusterSecurityGroupName parameter or the Marker parameter, but not both.

    ", + "DescribeClusterSnapshotsMessage$ClusterIdentifier": "

    The identifier of the cluster for which information about snapshots is requested.

    ", + "DescribeClusterSnapshotsMessage$SnapshotIdentifier": "

    The snapshot identifier of the snapshot about which to return information.

    ", + "DescribeClusterSnapshotsMessage$SnapshotType": "

    The type of snapshots for which you are requesting information. By default, snapshots of all types are returned.

    Valid Values: automated | manual

    ", + "DescribeClusterSnapshotsMessage$Marker": "

    An optional parameter that specifies the starting point to return a set of response records. When the results of a DescribeClusterSnapshots request exceed the value specified in MaxRecords, AWS returns a value in the Marker field of the response. You can retrieve the next set of response records by providing the returned marker value in the Marker parameter and retrying the request.

    ", + "DescribeClusterSnapshotsMessage$OwnerAccount": "

    The AWS customer account used to create or copy the snapshot. Use this field to filter the results to snapshots owned by a particular account. To describe snapshots you own, either specify your AWS customer account, or do not specify the parameter.

    ", + "DescribeClusterSubnetGroupsMessage$ClusterSubnetGroupName": "

    The name of the cluster subnet group for which information is requested.

    ", + "DescribeClusterSubnetGroupsMessage$Marker": "

    An optional parameter that specifies the starting point to return a set of response records. When the results of a DescribeClusterSubnetGroups request exceed the value specified in MaxRecords, AWS returns a value in the Marker field of the response. You can retrieve the next set of response records by providing the returned marker value in the Marker parameter and retrying the request.

    ", + "DescribeClusterVersionsMessage$ClusterVersion": "

    The specific cluster version to return.

    Example: 1.0

    ", + "DescribeClusterVersionsMessage$ClusterParameterGroupFamily": "

    The name of a specific cluster parameter group family to return details for.

    Constraints:

    • Must be 1 to 255 alphanumeric characters
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "DescribeClusterVersionsMessage$Marker": "

    An optional parameter that specifies the starting point to return a set of response records. When the results of a DescribeClusterVersions request exceed the value specified in MaxRecords, AWS returns a value in the Marker field of the response. You can retrieve the next set of response records by providing the returned marker value in the Marker parameter and retrying the request.

    ", + "DescribeClustersMessage$ClusterIdentifier": "

    The unique identifier of a cluster whose properties you are requesting. This parameter is case sensitive.

    The default is that all clusters defined for an account are returned.

    ", + "DescribeClustersMessage$Marker": "

    An optional parameter that specifies the starting point to return a set of response records. When the results of a DescribeClusters request exceed the value specified in MaxRecords, AWS returns a value in the Marker field of the response. You can retrieve the next set of response records by providing the returned marker value in the Marker parameter and retrying the request.

    Constraints: You can specify either the ClusterIdentifier parameter or the Marker parameter, but not both.

    ", + "DescribeDefaultClusterParametersMessage$ParameterGroupFamily": "

    The name of the cluster parameter group family.

    ", + "DescribeDefaultClusterParametersMessage$Marker": "

    An optional parameter that specifies the starting point to return a set of response records. When the results of a DescribeDefaultClusterParameters request exceed the value specified in MaxRecords, AWS returns a value in the Marker field of the response. You can retrieve the next set of response records by providing the returned marker value in the Marker parameter and retrying the request.

    ", + "DescribeEventCategoriesMessage$SourceType": "

    The source type, such as cluster or parameter group, to which the described event categories apply.

    Valid values: cluster, cluster-snapshot, cluster-parameter-group, and cluster-security-group.

    ", + "DescribeEventSubscriptionsMessage$SubscriptionName": "

    The name of the Amazon Redshift event notification subscription to be described.

    ", + "DescribeEventSubscriptionsMessage$Marker": "

    An optional parameter that specifies the starting point to return a set of response records. When the results of a DescribeEventSubscriptions request exceed the value specified in MaxRecords, AWS returns a value in the Marker field of the response. You can retrieve the next set of response records by providing the returned marker value in the Marker parameter and retrying the request.

    ", + "DescribeEventsMessage$SourceIdentifier": "

    The identifier of the event source for which events will be returned. If this parameter is not specified, then all sources are included in the response.

    Constraints:

    If SourceIdentifier is supplied, SourceType must also be provided.

    • Specify a cluster identifier when SourceType is cluster.
    • Specify a cluster security group name when SourceType is cluster-security-group.
    • Specify a cluster parameter group name when SourceType is cluster-parameter-group.
    • Specify a cluster snapshot identifier when SourceType is cluster-snapshot.
    ", + "DescribeEventsMessage$Marker": "

    An optional parameter that specifies the starting point to return a set of response records. When the results of a DescribeEvents request exceed the value specified in MaxRecords, AWS returns a value in the Marker field of the response. You can retrieve the next set of response records by providing the returned marker value in the Marker parameter and retrying the request.

    ", + "DescribeHsmClientCertificatesMessage$HsmClientCertificateIdentifier": "

    The identifier of a specific HSM client certificate for which you want information. If no identifier is specified, information is returned for all HSM client certificates owned by your AWS customer account.

    ", + "DescribeHsmClientCertificatesMessage$Marker": "

    An optional parameter that specifies the starting point to return a set of response records. When the results of a DescribeHsmClientCertificates request exceed the value specified in MaxRecords, AWS returns a value in the Marker field of the response. You can retrieve the next set of response records by providing the returned marker value in the Marker parameter and retrying the request.

    ", + "DescribeHsmConfigurationsMessage$HsmConfigurationIdentifier": "

    The identifier of a specific Amazon Redshift HSM configuration to be described. If no identifier is specified, information is returned for all HSM configurations owned by your AWS customer account.

    ", + "DescribeHsmConfigurationsMessage$Marker": "

    An optional parameter that specifies the starting point to return a set of response records. When the results of a DescribeHsmConfigurations request exceed the value specified in MaxRecords, AWS returns a value in the Marker field of the response. You can retrieve the next set of response records by providing the returned marker value in the Marker parameter and retrying the request.

    ", + "DescribeLoggingStatusMessage$ClusterIdentifier": "

    The identifier of the cluster from which to get the logging status.

    Example: examplecluster

    ", + "DescribeOrderableClusterOptionsMessage$ClusterVersion": "

    The version filter value. Specify this parameter to show only the available offerings matching the specified version.

    Default: All versions.

    Constraints: Must be one of the version returned from DescribeClusterVersions.

    ", + "DescribeOrderableClusterOptionsMessage$NodeType": "

    The node type filter value. Specify this parameter to show only the available offerings matching the specified node type.

    ", + "DescribeOrderableClusterOptionsMessage$Marker": "

    An optional parameter that specifies the starting point to return a set of response records. When the results of a DescribeOrderableClusterOptions request exceed the value specified in MaxRecords, AWS returns a value in the Marker field of the response. You can retrieve the next set of response records by providing the returned marker value in the Marker parameter and retrying the request.

    ", + "DescribeReservedNodeOfferingsMessage$ReservedNodeOfferingId": "

    The unique identifier for the offering.

    ", + "DescribeReservedNodeOfferingsMessage$Marker": "

    An optional parameter that specifies the starting point to return a set of response records. When the results of a DescribeReservedNodeOfferings request exceed the value specified in MaxRecords, AWS returns a value in the Marker field of the response. You can retrieve the next set of response records by providing the returned marker value in the Marker parameter and retrying the request.

    ", + "DescribeReservedNodesMessage$ReservedNodeId": "

    Identifier for the node reservation.

    ", + "DescribeReservedNodesMessage$Marker": "

    An optional parameter that specifies the starting point to return a set of response records. When the results of a DescribeReservedNodes request exceed the value specified in MaxRecords, AWS returns a value in the Marker field of the response. You can retrieve the next set of response records by providing the returned marker value in the Marker parameter and retrying the request.

    ", + "DescribeResizeMessage$ClusterIdentifier": "

    The unique identifier of a cluster whose resize progress you are requesting. This parameter is case-sensitive.

    By default, resize operations for all clusters defined for an AWS account are returned.

    ", + "DescribeSnapshotCopyGrantsMessage$SnapshotCopyGrantName": "

    The name of the snapshot copy grant.

    ", + "DescribeSnapshotCopyGrantsMessage$Marker": "

    An optional parameter that specifies the starting point to return a set of response records. When the results of a DescribeSnapshotCopyGrant request exceed the value specified in MaxRecords, AWS returns a value in the Marker field of the response. You can retrieve the next set of response records by providing the returned marker value in the Marker parameter and retrying the request.

    Constraints: You can specify either the SnapshotCopyGrantName parameter or the Marker parameter, but not both.

    ", + "DescribeTableRestoreStatusMessage$ClusterIdentifier": "

    The Amazon Redshift cluster that the table is being restored to.

    ", + "DescribeTableRestoreStatusMessage$TableRestoreRequestId": "

    The identifier of the table restore request to return status for. If you don't specify a TableRestoreRequestId value, then DescribeTableRestoreStatus returns the status of all in-progress table restore requests.

    ", + "DescribeTableRestoreStatusMessage$Marker": "

    An optional pagination token provided by a previous DescribeTableRestoreStatus request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by the MaxRecords parameter.

    ", + "DescribeTagsMessage$ResourceName": "

    The Amazon Resource Name (ARN) for which you want to describe the tag or tags. For example, arn:aws:redshift:us-east-1:123456789:cluster:t1.

    ", + "DescribeTagsMessage$ResourceType": "

    The type of resource with which you want to view tags. Valid resource types are:

    • Cluster
    • CIDR/IP
    • EC2 security group
    • Snapshot
    • Cluster security group
    • Subnet group
    • HSM connection
    • HSM certificate
    • Parameter group
    • Snapshot copy grant

    For more information about Amazon Redshift resource types and constructing ARNs, go to Constructing an Amazon Redshift Amazon Resource Name (ARN) in the Amazon Redshift Cluster Management Guide.

    ", + "DescribeTagsMessage$Marker": "

    A value that indicates the starting point for the next set of response records in a subsequent request. If a value is returned in a response, you can retrieve the next set of records by providing this returned marker value in the marker parameter and retrying the command. If the marker field is empty, all response records have been retrieved for the request.

    ", + "DisableLoggingMessage$ClusterIdentifier": "

    The identifier of the cluster on which logging is to be stopped.

    Example: examplecluster

    ", + "DisableSnapshotCopyMessage$ClusterIdentifier": "

    The unique identifier of the source cluster that you want to disable copying of snapshots to a destination region.

    Constraints: Must be the valid name of an existing cluster that has cross-region snapshot copy enabled.

    ", + "EC2SecurityGroup$Status": "

    The status of the EC2 security group.

    ", + "EC2SecurityGroup$EC2SecurityGroupName": "

    The name of the EC2 Security Group.

    ", + "EC2SecurityGroup$EC2SecurityGroupOwnerId": "

    The AWS ID of the owner of the EC2 security group specified in the EC2SecurityGroupName field.

    ", + "ElasticIpStatus$ElasticIp": "

    The elastic IP (EIP) address for the cluster.

    ", + "ElasticIpStatus$Status": "

    The status of the elastic IP (EIP) address.

    ", + "EnableLoggingMessage$ClusterIdentifier": "

    The identifier of the cluster on which logging is to be started.

    Example: examplecluster

    ", + "EnableLoggingMessage$BucketName": "

    The name of an existing S3 bucket where the log files are to be stored.

    Constraints:

    • Must be in the same region as the cluster
    • The cluster must have read bucket and put object permissions
    ", + "EnableLoggingMessage$S3KeyPrefix": "

    The prefix applied to the log file names.

    Constraints:

    • Cannot exceed 512 characters
    • Cannot contain spaces( ), double quotes (\"), single quotes ('), a backslash (\\), or control characters. The hexadecimal codes for invalid characters are:
      • x00 to x20
      • x22
      • x27
      • x5c
      • x7f or larger
    ", + "EnableSnapshotCopyMessage$ClusterIdentifier": "

    The unique identifier of the source cluster to copy snapshots from.

    Constraints: Must be the valid name of an existing cluster that does not already have cross-region snapshot copy enabled.

    ", + "EnableSnapshotCopyMessage$DestinationRegion": "

    The destination region that you want to copy snapshots to.

    Constraints: Must be the name of a valid region. For more information, see Regions and Endpoints in the Amazon Web Services General Reference.

    ", + "EnableSnapshotCopyMessage$SnapshotCopyGrantName": "

    The name of the snapshot copy grant to use when snapshots of an AWS KMS-encrypted cluster are copied to the destination region.

    ", + "Endpoint$Address": "

    The DNS address of the Cluster.

    ", + "Event$SourceIdentifier": "

    The identifier for the source of the event.

    ", + "Event$Message": "

    The text of this event.

    ", + "Event$Severity": "

    The severity of the event.

    Values: ERROR, INFO

    ", + "Event$EventId": "

    The identifier of the event.

    ", + "EventCategoriesList$member": null, + "EventCategoriesMap$SourceType": "

    The source type, such as cluster or cluster-snapshot, that the returned categories belong to.

    ", + "EventInfoMap$EventId": "

    The identifier of an Amazon Redshift event.

    ", + "EventInfoMap$EventDescription": "

    The description of an Amazon Redshift event.

    ", + "EventInfoMap$Severity": "

    The severity of the event.

    Values: ERROR, INFO

    ", + "EventSubscription$CustomerAwsId": "

    The AWS customer account associated with the Amazon Redshift event notification subscription.

    ", + "EventSubscription$CustSubscriptionId": "

    The name of the Amazon Redshift event notification subscription.

    ", + "EventSubscription$SnsTopicArn": "

    The Amazon Resource Name (ARN) of the Amazon SNS topic used by the event notification subscription.

    ", + "EventSubscription$Status": "

    The status of the Amazon Redshift event notification subscription.

    Constraints:

    • Can be one of the following: active | no-permission | topic-not-exist
    • The status \"no-permission\" indicates that Amazon Redshift no longer has permission to post to the Amazon SNS topic. The status \"topic-not-exist\" indicates that the topic was deleted after the subscription was created.
    ", + "EventSubscription$SourceType": "

    The source type of the events returned the Amazon Redshift event notification, such as cluster, or cluster-snapshot.

    ", + "EventSubscription$Severity": "

    The event severity specified in the Amazon Redshift event notification subscription.

    Values: ERROR, INFO

    ", + "EventSubscriptionsMessage$Marker": "

    A value that indicates the starting point for the next set of response records in a subsequent request. If a value is returned in a response, you can retrieve the next set of records by providing this returned marker value in the Marker parameter and retrying the command. If the Marker field is empty, all response records have been retrieved for the request.

    ", + "EventsMessage$Marker": "

    A value that indicates the starting point for the next set of response records in a subsequent request. If a value is returned in a response, you can retrieve the next set of records by providing this returned marker value in the Marker parameter and retrying the command. If the Marker field is empty, all response records have been retrieved for the request.

    ", + "HsmClientCertificate$HsmClientCertificateIdentifier": "

    The identifier of the HSM client certificate.

    ", + "HsmClientCertificate$HsmClientCertificatePublicKey": "

    The public key that the Amazon Redshift cluster will use to connect to the HSM. You must register the public key in the HSM.

    ", + "HsmClientCertificateMessage$Marker": "

    A value that indicates the starting point for the next set of response records in a subsequent request. If a value is returned in a response, you can retrieve the next set of records by providing this returned marker value in the Marker parameter and retrying the command. If the Marker field is empty, all response records have been retrieved for the request.

    ", + "HsmConfiguration$HsmConfigurationIdentifier": "

    The name of the Amazon Redshift HSM configuration.

    ", + "HsmConfiguration$Description": "

    A text description of the HSM configuration.

    ", + "HsmConfiguration$HsmIpAddress": "

    The IP address that the Amazon Redshift cluster must use to access the HSM.

    ", + "HsmConfiguration$HsmPartitionName": "

    The name of the partition in the HSM where the Amazon Redshift clusters will store their database encryption keys.

    ", + "HsmConfigurationMessage$Marker": "

    A value that indicates the starting point for the next set of response records in a subsequent request. If a value is returned in a response, you can retrieve the next set of records by providing this returned marker value in the Marker parameter and retrying the command. If the Marker field is empty, all response records have been retrieved for the request.

    ", + "HsmStatus$HsmClientCertificateIdentifier": "

    Specifies the name of the HSM client certificate the Amazon Redshift cluster uses to retrieve the data encryption keys stored in an HSM.

    ", + "HsmStatus$HsmConfigurationIdentifier": "

    Specifies the name of the HSM configuration that contains the information the Amazon Redshift cluster can use to retrieve and store keys in an HSM.

    ", + "HsmStatus$Status": "

    Reports whether the Amazon Redshift cluster has finished applying any HSM settings changes specified in a modify cluster command.

    Values: active, applying

    ", + "IPRange$Status": "

    The status of the IP range, for example, \"authorized\".

    ", + "IPRange$CIDRIP": "

    The IP range in Classless Inter-Domain Routing (CIDR) notation.

    ", + "IamRoleArnList$member": null, + "ImportTablesCompleted$member": null, + "ImportTablesInProgress$member": null, + "ImportTablesNotStarted$member": null, + "LoggingStatus$BucketName": "

    The name of the S3 bucket where the log files are stored.

    ", + "LoggingStatus$S3KeyPrefix": "

    The prefix applied to the log file names.

    ", + "LoggingStatus$LastFailureMessage": "

    The message indicating that logs failed to be delivered.

    ", + "ModifyClusterIamRolesMessage$ClusterIdentifier": "

    The unique identifier of the cluster for which you want to associate or disassociate IAM roles.

    ", + "ModifyClusterMessage$ClusterIdentifier": "

    The unique identifier of the cluster to be modified.

    Example: examplecluster

    ", + "ModifyClusterMessage$ClusterType": "

    The new cluster type.

    When you submit your cluster resize request, your existing cluster goes into a read-only mode. After Amazon Redshift provisions a new cluster based on your resize requirements, there will be outage for a period while the old cluster is deleted and your connection is switched to the new cluster. You can use DescribeResize to track the progress of the resize request.

    Valid Values: multi-node | single-node

    ", + "ModifyClusterMessage$NodeType": "

    The new node type of the cluster. If you specify a new node type, you must also specify the number of nodes parameter.

    When you submit your request to resize a cluster, Amazon Redshift sets access permissions for the cluster to read-only. After Amazon Redshift provisions a new cluster according to your resize requirements, there will be a temporary outage while the old cluster is deleted and your connection is switched to the new cluster. When the new connection is complete, the original access permissions for the cluster are restored. You can use DescribeResize to track the progress of the resize request.

    Valid Values: ds1.xlarge | ds1.8xlarge | ds2.xlarge | ds2.8xlarge | dc1.large | dc1.8xlarge.

    ", + "ModifyClusterMessage$MasterUserPassword": "

    The new password for the cluster master user. This change is asynchronously applied as soon as possible. Between the time of the request and the completion of the request, the MasterUserPassword element exists in the PendingModifiedValues element of the operation response. Operations never return the password, so this operation provides a way to regain access to the master user account for a cluster if the password is lost.

    Default: Uses existing setting.

    Constraints:

    • Must be between 8 and 64 characters in length.
    • Must contain at least one uppercase letter.
    • Must contain at least one lowercase letter.
    • Must contain one number.
    • Can be any printable ASCII character (ASCII code 33 to 126) except ' (single quote), \" (double quote), \\, /, @, or space.
    ", + "ModifyClusterMessage$ClusterParameterGroupName": "

    The name of the cluster parameter group to apply to this cluster. This change is applied only after the cluster is rebooted. To reboot a cluster use RebootCluster.

    Default: Uses existing setting.

    Constraints: The cluster parameter group must be in the same parameter group family that matches the cluster version.

    ", + "ModifyClusterMessage$PreferredMaintenanceWindow": "

    The weekly time range (in UTC) during which system maintenance can occur, if necessary. If system maintenance is necessary during the window, it may result in an outage.

    This maintenance window change is made immediately. If the new maintenance window indicates the current time, there must be at least 120 minutes between the current time and end of the window in order to ensure that pending changes are applied.

    Default: Uses existing setting.

    Format: ddd:hh24:mi-ddd:hh24:mi, for example wed:07:30-wed:08:00.

    Valid Days: Mon | Tue | Wed | Thu | Fri | Sat | Sun

    Constraints: Must be at least 30 minutes.

    ", + "ModifyClusterMessage$ClusterVersion": "

    The new version number of the Amazon Redshift engine to upgrade to.

    For major version upgrades, if a non-default cluster parameter group is currently in use, a new cluster parameter group in the cluster parameter group family for the new version must be specified. The new cluster parameter group can be the default for that cluster parameter group family. For more information about parameters and parameter groups, go to Amazon Redshift Parameter Groups in the Amazon Redshift Cluster Management Guide.

    Example: 1.0

    ", + "ModifyClusterMessage$HsmClientCertificateIdentifier": "

    Specifies the name of the HSM client certificate the Amazon Redshift cluster uses to retrieve the data encryption keys stored in an HSM.

    ", + "ModifyClusterMessage$HsmConfigurationIdentifier": "

    Specifies the name of the HSM configuration that contains the information the Amazon Redshift cluster can use to retrieve and store keys in an HSM.

    ", + "ModifyClusterMessage$NewClusterIdentifier": "

    The new identifier for the cluster.

    Constraints:

    • Must contain from 1 to 63 alphanumeric characters or hyphens.
    • Alphabetic characters must be lowercase.
    • First character must be a letter.
    • Cannot end with a hyphen or contain two consecutive hyphens.
    • Must be unique for all clusters within an AWS account.

    Example: examplecluster

    ", + "ModifyClusterMessage$ElasticIp": "

    The Elastic IP (EIP) address for the cluster.

    Constraints: The cluster must be provisioned in EC2-VPC and publicly-accessible through an Internet gateway. For more information about provisioning clusters in EC2-VPC, go to Supported Platforms to Launch Your Cluster in the Amazon Redshift Cluster Management Guide.

    ", + "ModifyClusterParameterGroupMessage$ParameterGroupName": "

    The name of the parameter group to be modified.

    ", + "ModifyClusterSubnetGroupMessage$ClusterSubnetGroupName": "

    The name of the subnet group to be modified.

    ", + "ModifyClusterSubnetGroupMessage$Description": "

    A text description of the subnet group to be modified.

    ", + "ModifyEventSubscriptionMessage$SubscriptionName": "

    The name of the modified Amazon Redshift event notification subscription.

    ", + "ModifyEventSubscriptionMessage$SnsTopicArn": "

    The Amazon Resource Name (ARN) of the SNS topic to be used by the event notification subscription.

    ", + "ModifyEventSubscriptionMessage$SourceType": "

    The type of source that will be generating the events. For example, if you want to be notified of events generated by a cluster, you would set this parameter to cluster. If this value is not specified, events are returned for all Amazon Redshift objects in your AWS account. You must specify a source type in order to specify source IDs.

    Valid values: cluster, cluster-parameter-group, cluster-security-group, and cluster-snapshot.

    ", + "ModifyEventSubscriptionMessage$Severity": "

    Specifies the Amazon Redshift event severity to be published by the event notification subscription.

    Values: ERROR, INFO

    ", + "ModifySnapshotCopyRetentionPeriodMessage$ClusterIdentifier": "

    The unique identifier of the cluster for which you want to change the retention period for automated snapshots that are copied to a destination region.

    Constraints: Must be the valid name of an existing cluster that has cross-region snapshot copy enabled.

    ", + "OrderableClusterOption$ClusterVersion": "

    The version of the orderable cluster.

    ", + "OrderableClusterOption$ClusterType": "

    The cluster type, for example multi-node.

    ", + "OrderableClusterOption$NodeType": "

    The node type for the orderable cluster.

    ", + "OrderableClusterOptionsMessage$Marker": "

    A value that indicates the starting point for the next set of response records in a subsequent request. If a value is returned in a response, you can retrieve the next set of records by providing this returned marker value in the Marker parameter and retrying the command. If the Marker field is empty, all response records have been retrieved for the request.

    ", + "Parameter$ParameterName": "

    The name of the parameter.

    ", + "Parameter$ParameterValue": "

    The value of the parameter.

    ", + "Parameter$Description": "

    A description of the parameter.

    ", + "Parameter$Source": "

    The source of the parameter value, such as \"engine-default\" or \"user\".

    ", + "Parameter$DataType": "

    The data type of the parameter.

    ", + "Parameter$AllowedValues": "

    The valid range of values for the parameter.

    ", + "Parameter$MinimumEngineVersion": "

    The earliest engine version to which the parameter can apply.

    ", + "PendingModifiedValues$MasterUserPassword": "

    The pending or in-progress change of the master user password for the cluster.

    ", + "PendingModifiedValues$NodeType": "

    The pending or in-progress change of the cluster's node type.

    ", + "PendingModifiedValues$ClusterType": "

    The pending or in-progress change of the cluster type.

    ", + "PendingModifiedValues$ClusterVersion": "

    The pending or in-progress change of the service version.

    ", + "PendingModifiedValues$ClusterIdentifier": "

    The pending or in-progress change of the new identifier for the cluster.

    ", + "PurchaseReservedNodeOfferingMessage$ReservedNodeOfferingId": "

    The unique identifier of the reserved node offering you want to purchase.

    ", + "RebootClusterMessage$ClusterIdentifier": "

    The cluster identifier.

    ", + "RecurringCharge$RecurringChargeFrequency": "

    The frequency at which the recurring charge amount is applied.

    ", + "ReservedNode$ReservedNodeId": "

    The unique identifier for the reservation.

    ", + "ReservedNode$ReservedNodeOfferingId": "

    The identifier for the reserved node offering.

    ", + "ReservedNode$NodeType": "

    The node type of the reserved node.

    ", + "ReservedNode$CurrencyCode": "

    The currency code for the reserved cluster.

    ", + "ReservedNode$State": "

    The state of the reserved compute node.

    Possible Values:

    • pending-payment-This reserved node has recently been purchased, and the sale has been approved, but payment has not yet been confirmed.
    • active-This reserved node is owned by the caller and is available for use.
    • payment-failed-Payment failed for the purchase attempt.
    ", + "ReservedNode$OfferingType": "

    The anticipated utilization of the reserved node, as defined in the reserved node offering.

    ", + "ReservedNodeOffering$ReservedNodeOfferingId": "

    The offering identifier.

    ", + "ReservedNodeOffering$NodeType": "

    The node type offered by the reserved node offering.

    ", + "ReservedNodeOffering$CurrencyCode": "

    The currency code for the compute nodes offering.

    ", + "ReservedNodeOffering$OfferingType": "

    The anticipated utilization of the reserved node, as defined in the reserved node offering.

    ", + "ReservedNodeOfferingsMessage$Marker": "

    A value that indicates the starting point for the next set of response records in a subsequent request. If a value is returned in a response, you can retrieve the next set of records by providing this returned marker value in the Marker parameter and retrying the command. If the Marker field is empty, all response records have been retrieved for the request.

    ", + "ReservedNodesMessage$Marker": "

    A value that indicates the starting point for the next set of response records in a subsequent request. If a value is returned in a response, you can retrieve the next set of records by providing this returned marker value in the Marker parameter and retrying the command. If the Marker field is empty, all response records have been retrieved for the request.

    ", + "ResetClusterParameterGroupMessage$ParameterGroupName": "

    The name of the cluster parameter group to be reset.

    ", + "ResizeProgressMessage$TargetNodeType": "

    The node type that the cluster will have after the resize operation is complete.

    ", + "ResizeProgressMessage$TargetClusterType": "

    The cluster type after the resize operation is complete.

    Valid Values: multi-node | single-node

    ", + "ResizeProgressMessage$Status": "

    The status of the resize operation.

    Valid Values: NONE | IN_PROGRESS | FAILED | SUCCEEDED

    ", + "RestorableNodeTypeList$member": null, + "RestoreFromClusterSnapshotMessage$ClusterIdentifier": "

    The identifier of the cluster that will be created from restoring the snapshot.

    Constraints:

    • Must contain from 1 to 63 alphanumeric characters or hyphens.
    • Alphabetic characters must be lowercase.
    • First character must be a letter.
    • Cannot end with a hyphen or contain two consecutive hyphens.
    • Must be unique for all clusters within an AWS account.

    ", + "RestoreFromClusterSnapshotMessage$SnapshotIdentifier": "

    The name of the snapshot from which to create the new cluster. This parameter isn't case sensitive.

    Example: my-snapshot-id

    ", + "RestoreFromClusterSnapshotMessage$SnapshotClusterIdentifier": "

    The name of the cluster the source snapshot was created from. This parameter is required if your IAM user has a policy containing a snapshot resource element that specifies anything other than * for the cluster name.

    ", + "RestoreFromClusterSnapshotMessage$AvailabilityZone": "

    The Amazon EC2 Availability Zone in which to restore the cluster.

    Default: A random, system-chosen Availability Zone.

    Example: us-east-1a

    ", + "RestoreFromClusterSnapshotMessage$ClusterSubnetGroupName": "

    The name of the subnet group where you want to cluster restored.

    A snapshot of cluster in VPC can be restored only in VPC. Therefore, you must provide subnet group name where you want the cluster restored.

    ", + "RestoreFromClusterSnapshotMessage$OwnerAccount": "

    The AWS customer account used to create or copy the snapshot. Required if you are restoring a snapshot you do not own, optional if you own the snapshot.

    ", + "RestoreFromClusterSnapshotMessage$HsmClientCertificateIdentifier": "

    Specifies the name of the HSM client certificate the Amazon Redshift cluster uses to retrieve the data encryption keys stored in an HSM.

    ", + "RestoreFromClusterSnapshotMessage$HsmConfigurationIdentifier": "

    Specifies the name of the HSM configuration that contains the information the Amazon Redshift cluster can use to retrieve and store keys in an HSM.

    ", + "RestoreFromClusterSnapshotMessage$ElasticIp": "

    The elastic IP (EIP) address for the cluster.

    ", + "RestoreFromClusterSnapshotMessage$ClusterParameterGroupName": "

    The name of the parameter group to be associated with this cluster.

    Default: The default Amazon Redshift cluster parameter group. For information about the default parameter group, go to Working with Amazon Redshift Parameter Groups.

    Constraints:

    • Must be 1 to 255 alphanumeric characters or hyphens.
    • First character must be a letter.
    • Cannot end with a hyphen or contain two consecutive hyphens.
    ", + "RestoreFromClusterSnapshotMessage$PreferredMaintenanceWindow": "

    The weekly time range (in UTC) during which automated cluster maintenance can occur.

    Format: ddd:hh24:mi-ddd:hh24:mi

    Default: The value selected for the cluster from which the snapshot was taken. For more information about the time blocks for each region, see Maintenance Windows in Amazon Redshift Cluster Management Guide.

    Valid Days: Mon | Tue | Wed | Thu | Fri | Sat | Sun

    Constraints: Minimum 30-minute window.

    ", + "RestoreFromClusterSnapshotMessage$KmsKeyId": "

    The AWS Key Management Service (KMS) key ID of the encryption key that you want to use to encrypt data in the cluster that you restore from a shared snapshot.

    ", + "RestoreFromClusterSnapshotMessage$NodeType": "

    The node type that the restored cluster will be provisioned with.

    Default: The node type of the cluster from which the snapshot was taken. You can modify this if you are using any DS node type. In that case, you can choose to restore into another DS node type of the same size. For example, you can restore ds1.8xlarge into ds2.8xlarge, or ds2.xlarge into ds1.xlarge. If you have a DC instance type, you must restore into that same instance type and size. In other words, you can only restore a dc1.large instance type into another dc1.large instance type. For more information about node types, see About Clusters and Nodes in the Amazon Redshift Cluster Management Guide

    ", + "RestoreFromClusterSnapshotMessage$AdditionalInfo": "

    Reserved.

    ", + "RestoreStatus$Status": "

    The status of the restore action. Returns starting, restoring, completed, or failed.

    ", + "RestoreTableFromClusterSnapshotMessage$ClusterIdentifier": "

    The identifier of the Amazon Redshift cluster to restore the table to.

    ", + "RestoreTableFromClusterSnapshotMessage$SnapshotIdentifier": "

    The identifier of the snapshot to restore the table from. This snapshot must have been created from the Amazon Redshift cluster specified by the ClusterIdentifier parameter.

    ", + "RestoreTableFromClusterSnapshotMessage$SourceDatabaseName": "

    The name of the source database that contains the table to restore from.

    ", + "RestoreTableFromClusterSnapshotMessage$SourceSchemaName": "

    The name of the source schema that contains the table to restore from. If you do not specify a SourceSchemaName value, the default is public.

    ", + "RestoreTableFromClusterSnapshotMessage$SourceTableName": "

    The name of the source table to restore from.

    ", + "RestoreTableFromClusterSnapshotMessage$TargetDatabaseName": "

    The name of the database to restore the table to.

    ", + "RestoreTableFromClusterSnapshotMessage$TargetSchemaName": "

    The name of the schema to restore the table to.

    ", + "RestoreTableFromClusterSnapshotMessage$NewTableName": "

    The name of the table to create as a result of the current request.

    ", + "RevokeClusterSecurityGroupIngressMessage$ClusterSecurityGroupName": "

    The name of the security Group from which to revoke the ingress rule.

    ", + "RevokeClusterSecurityGroupIngressMessage$CIDRIP": "

    The IP range for which to revoke access. This range must be a valid Classless Inter-Domain Routing (CIDR) block of IP addresses. If CIDRIP is specified, EC2SecurityGroupName and EC2SecurityGroupOwnerId cannot be provided.

    ", + "RevokeClusterSecurityGroupIngressMessage$EC2SecurityGroupName": "

    The name of the EC2 Security Group whose access is to be revoked. If EC2SecurityGroupName is specified, EC2SecurityGroupOwnerId must also be provided and CIDRIP cannot be provided.

    ", + "RevokeClusterSecurityGroupIngressMessage$EC2SecurityGroupOwnerId": "

    The AWS account number of the owner of the security group specified in the EC2SecurityGroupName parameter. The AWS access key ID is not an acceptable value. If EC2SecurityGroupOwnerId is specified, EC2SecurityGroupName must also be provided. and CIDRIP cannot be provided.

    Example: 111122223333

    ", + "RevokeSnapshotAccessMessage$SnapshotIdentifier": "

    The identifier of the snapshot that the account can no longer access.

    ", + "RevokeSnapshotAccessMessage$SnapshotClusterIdentifier": "

    The identifier of the cluster the snapshot was created from. This parameter is required if your IAM user has a policy containing a snapshot resource element that specifies anything other than * for the cluster name.

    ", + "RevokeSnapshotAccessMessage$AccountWithRestoreAccess": "

    The identifier of the AWS customer account that can no longer restore the specified snapshot.

    ", + "RotateEncryptionKeyMessage$ClusterIdentifier": "

    The unique identifier of the cluster that you want to rotate the encryption keys for.

    Constraints: Must be the name of valid cluster that has encryption enabled.

    ", + "Snapshot$SnapshotIdentifier": "

    The snapshot identifier that is provided in the request.

    ", + "Snapshot$ClusterIdentifier": "

    The identifier of the cluster for which the snapshot was taken.

    ", + "Snapshot$Status": "

    The snapshot status. The value of the status depends on the API operation used.

    ", + "Snapshot$AvailabilityZone": "

    The Availability Zone in which the cluster was created.

    ", + "Snapshot$MasterUsername": "

    The master user name for the cluster.

    ", + "Snapshot$ClusterVersion": "

    The version ID of the Amazon Redshift engine that is running on the cluster.

    ", + "Snapshot$SnapshotType": "

    The snapshot type. Snapshots created using CreateClusterSnapshot and CopyClusterSnapshot will be of type \"manual\".

    ", + "Snapshot$NodeType": "

    The node type of the nodes in the cluster.

    ", + "Snapshot$DBName": "

    The name of the database that was created when the cluster was created.

    ", + "Snapshot$VpcId": "

    The VPC identifier of the cluster if the snapshot is from a cluster in a VPC. Otherwise, this field is not in the output.

    ", + "Snapshot$KmsKeyId": "

    The AWS Key Management Service (KMS) key ID of the encryption key that was used to encrypt data in the cluster from which the snapshot was taken.

    ", + "Snapshot$OwnerAccount": "

    For manual snapshots, the AWS customer account used to create or copy the snapshot. For automatic snapshots, the owner of the cluster. The owner can perform all snapshot actions, such as sharing a manual snapshot.

    ", + "Snapshot$SourceRegion": "

    The source region from which the snapshot was copied.

    ", + "SnapshotCopyGrant$SnapshotCopyGrantName": "

    The name of the snapshot copy grant.

    ", + "SnapshotCopyGrant$KmsKeyId": "

    The unique identifier of the customer master key (CMK) in AWS KMS to which Amazon Redshift is granted permission.

    ", + "SnapshotCopyGrantMessage$Marker": "

    An optional parameter that specifies the starting point to return a set of response records. When the results of a DescribeSnapshotCopyGrant request exceed the value specified in MaxRecords, AWS returns a value in the Marker field of the response. You can retrieve the next set of response records by providing the returned marker value in the Marker parameter and retrying the request.

    Constraints: You can specify either the SnapshotCopyGrantName parameter or the Marker parameter, but not both.

    ", + "SnapshotMessage$Marker": "

    A value that indicates the starting point for the next set of response records in a subsequent request. If a value is returned in a response, you can retrieve the next set of records by providing this returned marker value in the Marker parameter and retrying the command. If the Marker field is empty, all response records have been retrieved for the request.

    ", + "SourceIdsList$member": null, + "Subnet$SubnetIdentifier": "

    The identifier of the subnet.

    ", + "Subnet$SubnetStatus": "

    The status of the subnet.

    ", + "SubnetIdentifierList$member": null, + "TableRestoreStatus$TableRestoreRequestId": "

    The unique identifier for the table restore request.

    ", + "TableRestoreStatus$Message": "

    A description of the status of the table restore request. Status values include SUCCEEDED, FAILED, CANCELED, PENDING, IN_PROGRESS.

    ", + "TableRestoreStatus$ClusterIdentifier": "

    The identifier of the Amazon Redshift cluster that the table is being restored to.

    ", + "TableRestoreStatus$SnapshotIdentifier": "

    The identifier of the snapshot that the table is being restored from.

    ", + "TableRestoreStatus$SourceDatabaseName": "

    The name of the source database that contains the table being restored.

    ", + "TableRestoreStatus$SourceSchemaName": "

    The name of the source schema that contains the table being restored.

    ", + "TableRestoreStatus$SourceTableName": "

    The name of the source table being restored.

    ", + "TableRestoreStatus$TargetDatabaseName": "

    The name of the database to restore the table to.

    ", + "TableRestoreStatus$TargetSchemaName": "

    The name of the schema to restore the table to.

    ", + "TableRestoreStatus$NewTableName": "

    The name of the table to create as a result of the table restore request.

    ", + "TableRestoreStatusMessage$Marker": "

    A pagination token that can be used in a subsequent DescribeTableRestoreStatus request.

    ", + "Tag$Key": "

    The key, or name, for the resource tag.

    ", + "Tag$Value": "

    The value for the resource tag.

    ", + "TagKeyList$member": null, + "TagValueList$member": null, + "TaggedResource$ResourceName": "

    The Amazon Resource Name (ARN) with which the tag is associated. For example, arn:aws:redshift:us-east-1:123456789:cluster:t1.

    ", + "TaggedResource$ResourceType": "

    The type of resource with which the tag is associated. Valid resource types are:

    • Cluster
    • CIDR/IP
    • EC2 security group
    • Snapshot
    • Cluster security group
    • Subnet group
    • HSM connection
    • HSM certificate
    • Parameter group

    For more information about Amazon Redshift resource types and constructing ARNs, go to Constructing an Amazon Redshift Amazon Resource Name (ARN) in the Amazon Redshift Cluster Management Guide.

    ", + "TaggedResourceListMessage$Marker": "

    A value that indicates the starting point for the next set of response records in a subsequent request. If a value is returned in a response, you can retrieve the next set of records by providing this returned marker value in the Marker parameter and retrying the command. If the Marker field is empty, all response records have been retrieved for the request.

    ", + "VpcSecurityGroupIdList$member": null, + "VpcSecurityGroupMembership$VpcSecurityGroupId": "

    The identifier of the VPC security group.

    ", + "VpcSecurityGroupMembership$Status": "

    The status of the VPC security group.

    " + } + }, + "Subnet": { + "base": "

    Describes a subnet.

    ", + "refs": { + "SubnetList$member": null + } + }, + "SubnetAlreadyInUse": { + "base": "

    A specified subnet is already in use by another cluster.

    ", + "refs": { + } + }, + "SubnetIdentifierList": { + "base": null, + "refs": { + "CreateClusterSubnetGroupMessage$SubnetIds": "

    An array of VPC subnet IDs. A maximum of 20 subnets can be modified in a single request.

    ", + "ModifyClusterSubnetGroupMessage$SubnetIds": "

    An array of VPC subnet IDs. A maximum of 20 subnets can be modified in a single request.

    " + } + }, + "SubnetList": { + "base": null, + "refs": { + "ClusterSubnetGroup$Subnets": "

    A list of the VPC Subnet elements.

    " + } + }, + "SubscriptionAlreadyExistFault": { + "base": "

    There is already an existing event notification subscription with the specified name.

    ", + "refs": { + } + }, + "SubscriptionCategoryNotFoundFault": { + "base": "

    The value specified for the event category was not one of the allowed values, or it specified a category that does not apply to the specified source type. The allowed values are Configuration, Management, Monitoring, and Security.

    ", + "refs": { + } + }, + "SubscriptionEventIdNotFoundFault": { + "base": "

    An Amazon Redshift event with the specified event ID does not exist.

    ", + "refs": { + } + }, + "SubscriptionNotFoundFault": { + "base": "

    An Amazon Redshift event notification subscription with the specified name does not exist.

    ", + "refs": { + } + }, + "SubscriptionSeverityNotFoundFault": { + "base": "

    The value specified for the event severity was not one of the allowed values, or it specified a severity that does not apply to the specified source type. The allowed values are ERROR and INFO.

    ", + "refs": { + } + }, + "TStamp": { + "base": null, + "refs": { + "Cluster$ClusterCreateTime": "

    The date and time that the cluster was created.

    ", + "DescribeClusterSnapshotsMessage$StartTime": "

    A value that requests only snapshots created at or after the specified time. The time value is specified in ISO 8601 format. For more information about ISO 8601, go to the ISO8601 Wikipedia page.

    Example: 2012-07-16T18:00:00Z

    ", + "DescribeClusterSnapshotsMessage$EndTime": "

    A time value that requests only snapshots created at or before the specified time. The time value is specified in ISO 8601 format. For more information about ISO 8601, go to the ISO8601 Wikipedia page.

    Example: 2012-07-16T18:00:00Z

    ", + "DescribeEventsMessage$StartTime": "

    The beginning of the time interval to retrieve events for, specified in ISO 8601 format. For more information about ISO 8601, go to the ISO8601 Wikipedia page.

    Example: 2009-07-08T18:00Z

    ", + "DescribeEventsMessage$EndTime": "

    The end of the time interval for which to retrieve events, specified in ISO 8601 format. For more information about ISO 8601, go to the ISO8601 Wikipedia page.

    Example: 2009-07-08T18:00Z

    ", + "Event$Date": "

    The date and time of the event.

    ", + "EventSubscription$SubscriptionCreationTime": "

    The date and time the Amazon Redshift event notification subscription was created.

    ", + "LoggingStatus$LastSuccessfulDeliveryTime": "

    The last time that logs were delivered.

    ", + "LoggingStatus$LastFailureTime": "

    The last time when logs failed to be delivered.

    ", + "ReservedNode$StartTime": "

    The time the reservation started. You purchase a reserved node offering for a duration. This is the start time of that duration.

    ", + "Snapshot$SnapshotCreateTime": "

    The time (UTC) when Amazon Redshift began the snapshot. A snapshot contains a copy of the cluster data as of this exact time.

    ", + "Snapshot$ClusterCreateTime": "

    The time (UTC) when the cluster was originally created.

    ", + "TableRestoreStatus$RequestTime": "

    The time that the table restore request was made, in Universal Coordinated Time (UTC).

    " + } + }, + "TableRestoreNotFoundFault": { + "base": "

    The specified TableRestoreRequestId value was not found.

    ", + "refs": { + } + }, + "TableRestoreStatus": { + "base": "

    Describes the status of a RestoreTableFromClusterSnapshot operation.

    ", + "refs": { + "RestoreTableFromClusterSnapshotResult$TableRestoreStatus": null, + "TableRestoreStatusList$member": null + } + }, + "TableRestoreStatusList": { + "base": null, + "refs": { + "TableRestoreStatusMessage$TableRestoreStatusDetails": "

    A list of status details for one or more table restore requests.

    " + } + }, + "TableRestoreStatusMessage": { + "base": null, + "refs": { + } + }, + "TableRestoreStatusType": { + "base": null, + "refs": { + "TableRestoreStatus$Status": "

    A value that describes the current state of the table restore request.

    Valid Values: SUCCEEDED, FAILED, CANCELED, PENDING, IN_PROGRESS

    " + } + }, + "Tag": { + "base": "

    A tag consisting of a name/value pair for a resource.

    ", + "refs": { + "TagList$member": null, + "TaggedResource$Tag": "

    The tag for the resource.

    " + } + }, + "TagKeyList": { + "base": null, + "refs": { + "DeleteTagsMessage$TagKeys": "

    The tag key that you want to delete.

    ", + "DescribeClusterParameterGroupsMessage$TagKeys": "

    A tag key or keys for which you want to return all matching cluster parameter groups that are associated with the specified key or keys. For example, suppose that you have parameter groups that are tagged with keys called owner and environment. If you specify both of these tag keys in the request, Amazon Redshift returns a response with the parameter groups that have either or both of these tag keys associated with them.

    ", + "DescribeClusterSecurityGroupsMessage$TagKeys": "

    A tag key or keys for which you want to return all matching cluster security groups that are associated with the specified key or keys. For example, suppose that you have security groups that are tagged with keys called owner and environment. If you specify both of these tag keys in the request, Amazon Redshift returns a response with the security groups that have either or both of these tag keys associated with them.

    ", + "DescribeClusterSnapshotsMessage$TagKeys": "

    A tag key or keys for which you want to return all matching cluster snapshots that are associated with the specified key or keys. For example, suppose that you have snapshots that are tagged with keys called owner and environment. If you specify both of these tag keys in the request, Amazon Redshift returns a response with the snapshots that have either or both of these tag keys associated with them.

    ", + "DescribeClusterSubnetGroupsMessage$TagKeys": "

    A tag key or keys for which you want to return all matching cluster subnet groups that are associated with the specified key or keys. For example, suppose that you have subnet groups that are tagged with keys called owner and environment. If you specify both of these tag keys in the request, Amazon Redshift returns a response with the subnet groups that have either or both of these tag keys associated with them.

    ", + "DescribeClustersMessage$TagKeys": "

    A tag key or keys for which you want to return all matching clusters that are associated with the specified key or keys. For example, suppose that you have clusters that are tagged with keys called owner and environment. If you specify both of these tag keys in the request, Amazon Redshift returns a response with the clusters that have either or both of these tag keys associated with them.

    ", + "DescribeHsmClientCertificatesMessage$TagKeys": "

    A tag key or keys for which you want to return all matching HSM client certificates that are associated with the specified key or keys. For example, suppose that you have HSM client certificates that are tagged with keys called owner and environment. If you specify both of these tag keys in the request, Amazon Redshift returns a response with the HSM client certificates that have either or both of these tag keys associated with them.

    ", + "DescribeHsmConfigurationsMessage$TagKeys": "

    A tag key or keys for which you want to return all matching HSM configurations that are associated with the specified key or keys. For example, suppose that you have HSM configurations that are tagged with keys called owner and environment. If you specify both of these tag keys in the request, Amazon Redshift returns a response with the HSM configurations that have either or both of these tag keys associated with them.

    ", + "DescribeSnapshotCopyGrantsMessage$TagKeys": "

    A tag key or keys for which you want to return all matching resources that are associated with the specified key or keys. For example, suppose that you have resources tagged with keys called owner and environment. If you specify both of these tag keys in the request, Amazon Redshift returns a response with all resources that have either or both of these tag keys associated with them.

    ", + "DescribeTagsMessage$TagKeys": "

    A tag key or keys for which you want to return all matching resources that are associated with the specified key or keys. For example, suppose that you have resources tagged with keys called owner and environment. If you specify both of these tag keys in the request, Amazon Redshift returns a response with all resources that have either or both of these tag keys associated with them.

    " + } + }, + "TagLimitExceededFault": { + "base": "

    The request exceeds the limit of 10 tags for the resource.

    ", + "refs": { + } + }, + "TagList": { + "base": null, + "refs": { + "Cluster$Tags": "

    The list of tags for the cluster.

    ", + "ClusterParameterGroup$Tags": "

    The list of tags for the cluster parameter group.

    ", + "ClusterSecurityGroup$Tags": "

    The list of tags for the cluster security group.

    ", + "ClusterSubnetGroup$Tags": "

    The list of tags for the cluster subnet group.

    ", + "CreateClusterMessage$Tags": "

    A list of tag instances.

    ", + "CreateClusterParameterGroupMessage$Tags": "

    A list of tag instances.

    ", + "CreateClusterSecurityGroupMessage$Tags": "

    A list of tag instances.

    ", + "CreateClusterSnapshotMessage$Tags": "

    A list of tag instances.

    ", + "CreateClusterSubnetGroupMessage$Tags": "

    A list of tag instances.

    ", + "CreateEventSubscriptionMessage$Tags": "

    A list of tag instances.

    ", + "CreateHsmClientCertificateMessage$Tags": "

    A list of tag instances.

    ", + "CreateHsmConfigurationMessage$Tags": "

    A list of tag instances.

    ", + "CreateSnapshotCopyGrantMessage$Tags": "

    A list of tag instances.

    ", + "CreateTagsMessage$Tags": "

    One or more name/value pairs to add as tags to the specified resource. Each tag name is passed in with the parameter Key and the corresponding value is passed in with the parameter Value. The Key and Value parameters are separated by a comma (,). Separate multiple tags with a space. For example, --tags \"Key\"=\"owner\",\"Value\"=\"admin\" \"Key\"=\"environment\",\"Value\"=\"test\" \"Key\"=\"version\",\"Value\"=\"1.0\".

    ", + "EC2SecurityGroup$Tags": "

    The list of tags for the EC2 security group.

    ", + "EventSubscription$Tags": "

    The list of tags for the event subscription.

    ", + "HsmClientCertificate$Tags": "

    The list of tags for the HSM client certificate.

    ", + "HsmConfiguration$Tags": "

    The list of tags for the HSM configuration.

    ", + "IPRange$Tags": "

    The list of tags for the IP range.

    ", + "Snapshot$Tags": "

    The list of tags for the cluster snapshot.

    ", + "SnapshotCopyGrant$Tags": "

    A list of tag instances.

    " + } + }, + "TagValueList": { + "base": null, + "refs": { + "DescribeClusterParameterGroupsMessage$TagValues": "

    A tag value or values for which you want to return all matching cluster parameter groups that are associated with the specified tag value or values. For example, suppose that you have parameter groups that are tagged with values called admin and test. If you specify both of these tag values in the request, Amazon Redshift returns a response with the parameter groups that have either or both of these tag values associated with them.

    ", + "DescribeClusterSecurityGroupsMessage$TagValues": "

    A tag value or values for which you want to return all matching cluster security groups that are associated with the specified tag value or values. For example, suppose that you have security groups that are tagged with values called admin and test. If you specify both of these tag values in the request, Amazon Redshift returns a response with the security groups that have either or both of these tag values associated with them.

    ", + "DescribeClusterSnapshotsMessage$TagValues": "

    A tag value or values for which you want to return all matching cluster snapshots that are associated with the specified tag value or values. For example, suppose that you have snapshots that are tagged with values called admin and test. If you specify both of these tag values in the request, Amazon Redshift returns a response with the snapshots that have either or both of these tag values associated with them.

    ", + "DescribeClusterSubnetGroupsMessage$TagValues": "

    A tag value or values for which you want to return all matching cluster subnet groups that are associated with the specified tag value or values. For example, suppose that you have subnet groups that are tagged with values called admin and test. If you specify both of these tag values in the request, Amazon Redshift returns a response with the subnet groups that have either or both of these tag values associated with them.

    ", + "DescribeClustersMessage$TagValues": "

    A tag value or values for which you want to return all matching clusters that are associated with the specified tag value or values. For example, suppose that you have clusters that are tagged with values called admin and test. If you specify both of these tag values in the request, Amazon Redshift returns a response with the clusters that have either or both of these tag values associated with them.

    ", + "DescribeHsmClientCertificatesMessage$TagValues": "

    A tag value or values for which you want to return all matching HSM client certificates that are associated with the specified tag value or values. For example, suppose that you have HSM client certificates that are tagged with values called admin and test. If you specify both of these tag values in the request, Amazon Redshift returns a response with the HSM client certificates that have either or both of these tag values associated with them.

    ", + "DescribeHsmConfigurationsMessage$TagValues": "

    A tag value or values for which you want to return all matching HSM configurations that are associated with the specified tag value or values. For example, suppose that you have HSM configurations that are tagged with values called admin and test. If you specify both of these tag values in the request, Amazon Redshift returns a response with the HSM configurations that have either or both of these tag values associated with them.

    ", + "DescribeSnapshotCopyGrantsMessage$TagValues": "

    A tag value or values for which you want to return all matching resources that are associated with the specified value or values. For example, suppose that you have resources tagged with values called admin and test. If you specify both of these tag values in the request, Amazon Redshift returns a response with all resources that have either or both of these tag values associated with them.

    ", + "DescribeTagsMessage$TagValues": "

    A tag value or values for which you want to return all matching resources that are associated with the specified value or values. For example, suppose that you have resources tagged with values called admin and test. If you specify both of these tag values in the request, Amazon Redshift returns a response with all resources that have either or both of these tag values associated with them.

    " + } + }, + "TaggedResource": { + "base": "

    A tag and its associated resource.

    ", + "refs": { + "TaggedResourceList$member": null + } + }, + "TaggedResourceList": { + "base": null, + "refs": { + "TaggedResourceListMessage$TaggedResources": "

    A list of tags with their associated resources.

    " + } + }, + "TaggedResourceListMessage": { + "base": "

    ", + "refs": { + } + }, + "UnauthorizedOperation": { + "base": "

    Your account is not authorized to perform the requested operation.

    ", + "refs": { + } + }, + "UnknownSnapshotCopyRegionFault": { + "base": "

    The specified region is incorrect or does not exist.

    ", + "refs": { + } + }, + "UnsupportedOperationFault": { + "base": "

    The requested operation isn't supported.

    ", + "refs": { + } + }, + "UnsupportedOptionFault": { + "base": "

    A request option was specified that is not supported.

    ", + "refs": { + } + }, + "VpcSecurityGroupIdList": { + "base": null, + "refs": { + "CreateClusterMessage$VpcSecurityGroupIds": "

    A list of Virtual Private Cloud (VPC) security groups to be associated with the cluster.

    Default: The default VPC security group is associated with the cluster.

    ", + "ModifyClusterMessage$VpcSecurityGroupIds": "

    A list of virtual private cloud (VPC) security groups to be associated with the cluster.

    ", + "RestoreFromClusterSnapshotMessage$VpcSecurityGroupIds": "

    A list of Virtual Private Cloud (VPC) security groups to be associated with the cluster.

    Default: The default VPC security group is associated with the cluster.

    VPC security groups only apply to clusters in VPCs.

    " + } + }, + "VpcSecurityGroupMembership": { + "base": "

    Describes the members of a VPC security group.

    ", + "refs": { + "VpcSecurityGroupMembershipList$member": null + } + }, + "VpcSecurityGroupMembershipList": { + "base": null, + "refs": { + "Cluster$VpcSecurityGroups": "

    A list of Virtual Private Cloud (VPC) security groups that are associated with the cluster. This parameter is returned only if the cluster is in a VPC.

    " + } + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/redshift/2012-12-01/examples-1.json b/vendor/github.com/aws/aws-sdk-go/models/apis/redshift/2012-12-01/examples-1.json new file mode 100644 index 000000000..0ea7e3b0b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/redshift/2012-12-01/examples-1.json @@ -0,0 +1,5 @@ +{ + "version": "1.0", + "examples": { + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/redshift/2012-12-01/paginators-1.json b/vendor/github.com/aws/aws-sdk-go/models/apis/redshift/2012-12-01/paginators-1.json new file mode 100644 index 000000000..03027de35 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/redshift/2012-12-01/paginators-1.json @@ -0,0 +1,94 @@ +{ + "pagination": { + "DescribeClusterParameterGroups": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords", + "result_key": "ParameterGroups" + }, + "DescribeClusterParameters": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords", + "result_key": "Parameters" + }, + "DescribeClusterSecurityGroups": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords", + "result_key": "ClusterSecurityGroups" + }, + "DescribeClusterSnapshots": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords", + "result_key": "Snapshots" + }, + "DescribeClusterSubnetGroups": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords", + "result_key": "ClusterSubnetGroups" + }, + "DescribeClusterVersions": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords", + "result_key": "ClusterVersions" + }, + "DescribeClusters": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords", + "result_key": "Clusters" + }, + "DescribeDefaultClusterParameters": { + "input_token": "Marker", + "output_token": "DefaultClusterParameters.Marker", + "limit_key": "MaxRecords", + "result_key": "DefaultClusterParameters.Parameters" + }, + "DescribeEventSubscriptions": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords", + "result_key": "EventSubscriptionsList" + }, + "DescribeEvents": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords", + "result_key": "Events" + }, + "DescribeHsmClientCertificates": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords", + "result_key": "HsmClientCertificates" + }, + "DescribeHsmConfigurations": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords", + "result_key": "HsmConfigurations" + }, + "DescribeOrderableClusterOptions": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords", + "result_key": "OrderableClusterOptions" + }, + "DescribeReservedNodeOfferings": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords", + "result_key": "ReservedNodeOfferings" + }, + "DescribeReservedNodes": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords", + "result_key": "ReservedNodes" + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/redshift/2012-12-01/waiters-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/redshift/2012-12-01/waiters-2.json new file mode 100644 index 000000000..3efd9e5b2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/redshift/2012-12-01/waiters-2.json @@ -0,0 +1,97 @@ +{ + "version": 2, + "waiters": { + "ClusterAvailable": { + "delay": 60, + "operation": "DescribeClusters", + "maxAttempts": 30, + "acceptors": [ + { + "expected": "available", + "matcher": "pathAll", + "state": "success", + "argument": "Clusters[].ClusterStatus" + }, + { + "expected": "deleting", + "matcher": "pathAny", + "state": "failure", + "argument": "Clusters[].ClusterStatus" + }, + { + "expected": "ClusterNotFound", + "matcher": "error", + "state": "retry" + } + ] + }, + "ClusterDeleted": { + "delay": 60, + "operation": "DescribeClusters", + "maxAttempts": 30, + "acceptors": [ + { + "expected": "ClusterNotFound", + "matcher": "error", + "state": "success" + }, + { + "expected": "creating", + "matcher": "pathAny", + "state": "failure", + "argument": "Clusters[].ClusterStatus" + }, + { + "expected": "pathAny", + "matcher": "pathList", + "state": "failure", + "argument": "Clusters[].ClusterStatus" + } + ] + }, + "ClusterRestored": { + "operation": "DescribeClusters", + "maxAttempts": 30, + "delay": 60, + "acceptors": [ + { + "state": "success", + "matcher": "pathAll", + "argument": "Clusters[].RestoreStatus.Status", + "expected": "completed" + }, + { + "state": "failure", + "matcher": "pathAny", + "argument": "Clusters[].ClusterStatus", + "expected": "deleting" + } + ] + }, + "SnapshotAvailable": { + "delay": 15, + "operation": "DescribeClusterSnapshots", + "maxAttempts": 20, + "acceptors": [ + { + "expected": "available", + "matcher": "pathAll", + "state": "success", + "argument": "Snapshots[].Status" + }, + { + "expected": "failed", + "matcher": "pathAny", + "state": "failure", + "argument": "Snapshots[].Status" + }, + { + "expected": "deleted", + "matcher": "pathAny", + "state": "failure", + "argument": "Snapshots[].Status" + } + ] + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/route53/2013-04-01/api-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/route53/2013-04-01/api-2.json new file mode 100644 index 000000000..0d6707610 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/route53/2013-04-01/api-2.json @@ -0,0 +1,3245 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2013-04-01", + "endpointPrefix":"route53", + "globalEndpoint":"route53.amazonaws.com", + "protocol":"rest-xml", + "serviceAbbreviation":"Route 53", + "serviceFullName":"Amazon Route 53", + "signatureVersion":"v4" + }, + "operations":{ + "AssociateVPCWithHostedZone":{ + "name":"AssociateVPCWithHostedZone", + "http":{ + "method":"POST", + "requestUri":"/2013-04-01/hostedzone/{Id}/associatevpc" + }, + "input":{ + "shape":"AssociateVPCWithHostedZoneRequest", + "locationName":"AssociateVPCWithHostedZoneRequest", + "xmlNamespace":{"uri":"https://route53.amazonaws.com/doc/2013-04-01/"} + }, + "output":{"shape":"AssociateVPCWithHostedZoneResponse"}, + "errors":[ + {"shape":"NoSuchHostedZone"}, + {"shape":"InvalidVPCId"}, + {"shape":"InvalidInput"}, + {"shape":"PublicZoneVPCAssociation"}, + {"shape":"ConflictingDomainExists"}, + {"shape":"LimitsExceeded"} + ] + }, + "ChangeResourceRecordSets":{ + "name":"ChangeResourceRecordSets", + "http":{ + "method":"POST", + "requestUri":"/2013-04-01/hostedzone/{Id}/rrset/" + }, + "input":{ + "shape":"ChangeResourceRecordSetsRequest", + "locationName":"ChangeResourceRecordSetsRequest", + "xmlNamespace":{"uri":"https://route53.amazonaws.com/doc/2013-04-01/"} + }, + "output":{"shape":"ChangeResourceRecordSetsResponse"}, + "errors":[ + {"shape":"NoSuchHostedZone"}, + {"shape":"NoSuchHealthCheck"}, + {"shape":"InvalidChangeBatch"}, + {"shape":"InvalidInput"}, + {"shape":"PriorRequestNotComplete"} + ] + }, + "ChangeTagsForResource":{ + "name":"ChangeTagsForResource", + "http":{ + "method":"POST", + "requestUri":"/2013-04-01/tags/{ResourceType}/{ResourceId}" + }, + "input":{ + "shape":"ChangeTagsForResourceRequest", + "locationName":"ChangeTagsForResourceRequest", + "xmlNamespace":{"uri":"https://route53.amazonaws.com/doc/2013-04-01/"} + }, + "output":{"shape":"ChangeTagsForResourceResponse"}, + "errors":[ + {"shape":"InvalidInput"}, + {"shape":"NoSuchHealthCheck"}, + {"shape":"NoSuchHostedZone"}, + {"shape":"PriorRequestNotComplete"}, + {"shape":"ThrottlingException"} + ] + }, + "CreateHealthCheck":{ + "name":"CreateHealthCheck", + "http":{ + "method":"POST", + "requestUri":"/2013-04-01/healthcheck", + "responseCode":201 + }, + "input":{ + "shape":"CreateHealthCheckRequest", + "locationName":"CreateHealthCheckRequest", + "xmlNamespace":{"uri":"https://route53.amazonaws.com/doc/2013-04-01/"} + }, + "output":{"shape":"CreateHealthCheckResponse"}, + "errors":[ + {"shape":"TooManyHealthChecks"}, + {"shape":"HealthCheckAlreadyExists"}, + {"shape":"InvalidInput"} + ] + }, + "CreateHostedZone":{ + "name":"CreateHostedZone", + "http":{ + "method":"POST", + "requestUri":"/2013-04-01/hostedzone", + "responseCode":201 + }, + "input":{ + "shape":"CreateHostedZoneRequest", + "locationName":"CreateHostedZoneRequest", + "xmlNamespace":{"uri":"https://route53.amazonaws.com/doc/2013-04-01/"} + }, + "output":{"shape":"CreateHostedZoneResponse"}, + "errors":[ + {"shape":"InvalidDomainName"}, + {"shape":"HostedZoneAlreadyExists"}, + {"shape":"TooManyHostedZones"}, + {"shape":"InvalidVPCId"}, + {"shape":"InvalidInput"}, + {"shape":"DelegationSetNotAvailable"}, + {"shape":"ConflictingDomainExists"}, + {"shape":"NoSuchDelegationSet"}, + {"shape":"DelegationSetNotReusable"} + ] + }, + "CreateReusableDelegationSet":{ + "name":"CreateReusableDelegationSet", + "http":{ + "method":"POST", + "requestUri":"/2013-04-01/delegationset", + "responseCode":201 + }, + "input":{ + "shape":"CreateReusableDelegationSetRequest", + "locationName":"CreateReusableDelegationSetRequest", + "xmlNamespace":{"uri":"https://route53.amazonaws.com/doc/2013-04-01/"} + }, + "output":{"shape":"CreateReusableDelegationSetResponse"}, + "errors":[ + {"shape":"DelegationSetAlreadyCreated"}, + {"shape":"LimitsExceeded"}, + {"shape":"HostedZoneNotFound"}, + {"shape":"InvalidArgument"}, + {"shape":"InvalidInput"}, + {"shape":"DelegationSetNotAvailable"}, + {"shape":"DelegationSetAlreadyReusable"} + ] + }, + "CreateTrafficPolicy":{ + "name":"CreateTrafficPolicy", + "http":{ + "method":"POST", + "requestUri":"/2013-04-01/trafficpolicy", + "responseCode":201 + }, + "input":{ + "shape":"CreateTrafficPolicyRequest", + "locationName":"CreateTrafficPolicyRequest", + "xmlNamespace":{"uri":"https://route53.amazonaws.com/doc/2013-04-01/"} + }, + "output":{"shape":"CreateTrafficPolicyResponse"}, + "errors":[ + {"shape":"InvalidInput"}, + {"shape":"TooManyTrafficPolicies"}, + {"shape":"TrafficPolicyAlreadyExists"}, + {"shape":"InvalidTrafficPolicyDocument"} + ] + }, + "CreateTrafficPolicyInstance":{ + "name":"CreateTrafficPolicyInstance", + "http":{ + "method":"POST", + "requestUri":"/2013-04-01/trafficpolicyinstance", + "responseCode":201 + }, + "input":{ + "shape":"CreateTrafficPolicyInstanceRequest", + "locationName":"CreateTrafficPolicyInstanceRequest", + "xmlNamespace":{"uri":"https://route53.amazonaws.com/doc/2013-04-01/"} + }, + "output":{"shape":"CreateTrafficPolicyInstanceResponse"}, + "errors":[ + {"shape":"NoSuchHostedZone"}, + {"shape":"InvalidInput"}, + {"shape":"TooManyTrafficPolicyInstances"}, + {"shape":"NoSuchTrafficPolicy"}, + {"shape":"TrafficPolicyInstanceAlreadyExists"} + ] + }, + "CreateTrafficPolicyVersion":{ + "name":"CreateTrafficPolicyVersion", + "http":{ + "method":"POST", + "requestUri":"/2013-04-01/trafficpolicy/{Id}", + "responseCode":201 + }, + "input":{ + "shape":"CreateTrafficPolicyVersionRequest", + "locationName":"CreateTrafficPolicyVersionRequest", + "xmlNamespace":{"uri":"https://route53.amazonaws.com/doc/2013-04-01/"} + }, + "output":{"shape":"CreateTrafficPolicyVersionResponse"}, + "errors":[ + {"shape":"NoSuchTrafficPolicy"}, + {"shape":"InvalidInput"}, + {"shape":"ConcurrentModification"}, + {"shape":"InvalidTrafficPolicyDocument"} + ] + }, + "DeleteHealthCheck":{ + "name":"DeleteHealthCheck", + "http":{ + "method":"DELETE", + "requestUri":"/2013-04-01/healthcheck/{HealthCheckId}" + }, + "input":{"shape":"DeleteHealthCheckRequest"}, + "output":{"shape":"DeleteHealthCheckResponse"}, + "errors":[ + {"shape":"NoSuchHealthCheck"}, + {"shape":"HealthCheckInUse"}, + {"shape":"InvalidInput"} + ] + }, + "DeleteHostedZone":{ + "name":"DeleteHostedZone", + "http":{ + "method":"DELETE", + "requestUri":"/2013-04-01/hostedzone/{Id}" + }, + "input":{"shape":"DeleteHostedZoneRequest"}, + "output":{"shape":"DeleteHostedZoneResponse"}, + "errors":[ + {"shape":"NoSuchHostedZone"}, + {"shape":"HostedZoneNotEmpty"}, + {"shape":"PriorRequestNotComplete"}, + {"shape":"InvalidInput"}, + {"shape":"InvalidDomainName"} + ] + }, + "DeleteReusableDelegationSet":{ + "name":"DeleteReusableDelegationSet", + "http":{ + "method":"DELETE", + "requestUri":"/2013-04-01/delegationset/{Id}" + }, + "input":{"shape":"DeleteReusableDelegationSetRequest"}, + "output":{"shape":"DeleteReusableDelegationSetResponse"}, + "errors":[ + {"shape":"NoSuchDelegationSet"}, + {"shape":"DelegationSetInUse"}, + {"shape":"DelegationSetNotReusable"}, + {"shape":"InvalidInput"} + ] + }, + "DeleteTrafficPolicy":{ + "name":"DeleteTrafficPolicy", + "http":{ + "method":"DELETE", + "requestUri":"/2013-04-01/trafficpolicy/{Id}/{Version}" + }, + "input":{"shape":"DeleteTrafficPolicyRequest"}, + "output":{"shape":"DeleteTrafficPolicyResponse"}, + "errors":[ + {"shape":"NoSuchTrafficPolicy"}, + {"shape":"InvalidInput"}, + {"shape":"TrafficPolicyInUse"}, + {"shape":"ConcurrentModification"} + ] + }, + "DeleteTrafficPolicyInstance":{ + "name":"DeleteTrafficPolicyInstance", + "http":{ + "method":"DELETE", + "requestUri":"/2013-04-01/trafficpolicyinstance/{Id}" + }, + "input":{"shape":"DeleteTrafficPolicyInstanceRequest"}, + "output":{"shape":"DeleteTrafficPolicyInstanceResponse"}, + "errors":[ + {"shape":"NoSuchTrafficPolicyInstance"}, + {"shape":"InvalidInput"}, + {"shape":"PriorRequestNotComplete"} + ] + }, + "DisassociateVPCFromHostedZone":{ + "name":"DisassociateVPCFromHostedZone", + "http":{ + "method":"POST", + "requestUri":"/2013-04-01/hostedzone/{Id}/disassociatevpc" + }, + "input":{ + "shape":"DisassociateVPCFromHostedZoneRequest", + "locationName":"DisassociateVPCFromHostedZoneRequest", + "xmlNamespace":{"uri":"https://route53.amazonaws.com/doc/2013-04-01/"} + }, + "output":{"shape":"DisassociateVPCFromHostedZoneResponse"}, + "errors":[ + {"shape":"NoSuchHostedZone"}, + {"shape":"InvalidVPCId"}, + {"shape":"VPCAssociationNotFound"}, + {"shape":"LastVPCAssociation"}, + {"shape":"InvalidInput"} + ] + }, + "GetChange":{ + "name":"GetChange", + "http":{ + "method":"GET", + "requestUri":"/2013-04-01/change/{Id}" + }, + "input":{"shape":"GetChangeRequest"}, + "output":{"shape":"GetChangeResponse"}, + "errors":[ + {"shape":"NoSuchChange"}, + {"shape":"InvalidInput"} + ] + }, + "GetChangeDetails":{ + "name":"GetChangeDetails", + "http":{ + "method":"GET", + "requestUri":"/2013-04-01/changedetails/{Id}" + }, + "input":{"shape":"GetChangeDetailsRequest"}, + "output":{"shape":"GetChangeDetailsResponse"}, + "errors":[ + {"shape":"NoSuchChange"}, + {"shape":"InvalidInput"} + ], + "deprecated":true + }, + "GetCheckerIpRanges":{ + "name":"GetCheckerIpRanges", + "http":{ + "method":"GET", + "requestUri":"/2013-04-01/checkeripranges" + }, + "input":{"shape":"GetCheckerIpRangesRequest"}, + "output":{"shape":"GetCheckerIpRangesResponse"} + }, + "GetGeoLocation":{ + "name":"GetGeoLocation", + "http":{ + "method":"GET", + "requestUri":"/2013-04-01/geolocation" + }, + "input":{"shape":"GetGeoLocationRequest"}, + "output":{"shape":"GetGeoLocationResponse"}, + "errors":[ + {"shape":"NoSuchGeoLocation"}, + {"shape":"InvalidInput"} + ] + }, + "GetHealthCheck":{ + "name":"GetHealthCheck", + "http":{ + "method":"GET", + "requestUri":"/2013-04-01/healthcheck/{HealthCheckId}" + }, + "input":{"shape":"GetHealthCheckRequest"}, + "output":{"shape":"GetHealthCheckResponse"}, + "errors":[ + {"shape":"NoSuchHealthCheck"}, + {"shape":"InvalidInput"}, + {"shape":"IncompatibleVersion"} + ] + }, + "GetHealthCheckCount":{ + "name":"GetHealthCheckCount", + "http":{ + "method":"GET", + "requestUri":"/2013-04-01/healthcheckcount" + }, + "input":{"shape":"GetHealthCheckCountRequest"}, + "output":{"shape":"GetHealthCheckCountResponse"} + }, + "GetHealthCheckLastFailureReason":{ + "name":"GetHealthCheckLastFailureReason", + "http":{ + "method":"GET", + "requestUri":"/2013-04-01/healthcheck/{HealthCheckId}/lastfailurereason" + }, + "input":{"shape":"GetHealthCheckLastFailureReasonRequest"}, + "output":{"shape":"GetHealthCheckLastFailureReasonResponse"}, + "errors":[ + {"shape":"NoSuchHealthCheck"}, + {"shape":"InvalidInput"} + ] + }, + "GetHealthCheckStatus":{ + "name":"GetHealthCheckStatus", + "http":{ + "method":"GET", + "requestUri":"/2013-04-01/healthcheck/{HealthCheckId}/status" + }, + "input":{"shape":"GetHealthCheckStatusRequest"}, + "output":{"shape":"GetHealthCheckStatusResponse"}, + "errors":[ + {"shape":"NoSuchHealthCheck"}, + {"shape":"InvalidInput"} + ] + }, + "GetHostedZone":{ + "name":"GetHostedZone", + "http":{ + "method":"GET", + "requestUri":"/2013-04-01/hostedzone/{Id}" + }, + "input":{"shape":"GetHostedZoneRequest"}, + "output":{"shape":"GetHostedZoneResponse"}, + "errors":[ + {"shape":"NoSuchHostedZone"}, + {"shape":"InvalidInput"} + ] + }, + "GetHostedZoneCount":{ + "name":"GetHostedZoneCount", + "http":{ + "method":"GET", + "requestUri":"/2013-04-01/hostedzonecount" + }, + "input":{"shape":"GetHostedZoneCountRequest"}, + "output":{"shape":"GetHostedZoneCountResponse"}, + "errors":[ + {"shape":"InvalidInput"} + ] + }, + "GetReusableDelegationSet":{ + "name":"GetReusableDelegationSet", + "http":{ + "method":"GET", + "requestUri":"/2013-04-01/delegationset/{Id}" + }, + "input":{"shape":"GetReusableDelegationSetRequest"}, + "output":{"shape":"GetReusableDelegationSetResponse"}, + "errors":[ + {"shape":"NoSuchDelegationSet"}, + {"shape":"DelegationSetNotReusable"}, + {"shape":"InvalidInput"} + ] + }, + "GetTrafficPolicy":{ + "name":"GetTrafficPolicy", + "http":{ + "method":"GET", + "requestUri":"/2013-04-01/trafficpolicy/{Id}/{Version}" + }, + "input":{"shape":"GetTrafficPolicyRequest"}, + "output":{"shape":"GetTrafficPolicyResponse"}, + "errors":[ + {"shape":"NoSuchTrafficPolicy"}, + {"shape":"InvalidInput"} + ] + }, + "GetTrafficPolicyInstance":{ + "name":"GetTrafficPolicyInstance", + "http":{ + "method":"GET", + "requestUri":"/2013-04-01/trafficpolicyinstance/{Id}" + }, + "input":{"shape":"GetTrafficPolicyInstanceRequest"}, + "output":{"shape":"GetTrafficPolicyInstanceResponse"}, + "errors":[ + {"shape":"NoSuchTrafficPolicyInstance"}, + {"shape":"InvalidInput"} + ] + }, + "GetTrafficPolicyInstanceCount":{ + "name":"GetTrafficPolicyInstanceCount", + "http":{ + "method":"GET", + "requestUri":"/2013-04-01/trafficpolicyinstancecount" + }, + "input":{"shape":"GetTrafficPolicyInstanceCountRequest"}, + "output":{"shape":"GetTrafficPolicyInstanceCountResponse"} + }, + "ListChangeBatchesByHostedZone":{ + "name":"ListChangeBatchesByHostedZone", + "http":{ + "method":"GET", + "requestUri":"/2013-04-01/hostedzone/{Id}/changes" + }, + "input":{"shape":"ListChangeBatchesByHostedZoneRequest"}, + "output":{"shape":"ListChangeBatchesByHostedZoneResponse"}, + "errors":[ + {"shape":"NoSuchHostedZone"}, + {"shape":"InvalidInput"} + ], + "deprecated":true + }, + "ListChangeBatchesByRRSet":{ + "name":"ListChangeBatchesByRRSet", + "http":{ + "method":"GET", + "requestUri":"/2013-04-01/hostedzone/{Id}/rrsChanges" + }, + "input":{"shape":"ListChangeBatchesByRRSetRequest"}, + "output":{"shape":"ListChangeBatchesByRRSetResponse"}, + "errors":[ + {"shape":"NoSuchHostedZone"}, + {"shape":"InvalidInput"} + ], + "deprecated":true + }, + "ListGeoLocations":{ + "name":"ListGeoLocations", + "http":{ + "method":"GET", + "requestUri":"/2013-04-01/geolocations" + }, + "input":{"shape":"ListGeoLocationsRequest"}, + "output":{"shape":"ListGeoLocationsResponse"}, + "errors":[ + {"shape":"InvalidInput"} + ] + }, + "ListHealthChecks":{ + "name":"ListHealthChecks", + "http":{ + "method":"GET", + "requestUri":"/2013-04-01/healthcheck" + }, + "input":{"shape":"ListHealthChecksRequest"}, + "output":{"shape":"ListHealthChecksResponse"}, + "errors":[ + {"shape":"InvalidInput"}, + {"shape":"IncompatibleVersion"} + ] + }, + "ListHostedZones":{ + "name":"ListHostedZones", + "http":{ + "method":"GET", + "requestUri":"/2013-04-01/hostedzone" + }, + "input":{"shape":"ListHostedZonesRequest"}, + "output":{"shape":"ListHostedZonesResponse"}, + "errors":[ + {"shape":"InvalidInput"}, + {"shape":"NoSuchDelegationSet"}, + {"shape":"DelegationSetNotReusable"} + ] + }, + "ListHostedZonesByName":{ + "name":"ListHostedZonesByName", + "http":{ + "method":"GET", + "requestUri":"/2013-04-01/hostedzonesbyname" + }, + "input":{"shape":"ListHostedZonesByNameRequest"}, + "output":{"shape":"ListHostedZonesByNameResponse"}, + "errors":[ + {"shape":"InvalidInput"}, + {"shape":"InvalidDomainName"} + ] + }, + "ListResourceRecordSets":{ + "name":"ListResourceRecordSets", + "http":{ + "method":"GET", + "requestUri":"/2013-04-01/hostedzone/{Id}/rrset" + }, + "input":{"shape":"ListResourceRecordSetsRequest"}, + "output":{"shape":"ListResourceRecordSetsResponse"}, + "errors":[ + {"shape":"NoSuchHostedZone"}, + {"shape":"InvalidInput"} + ] + }, + "ListReusableDelegationSets":{ + "name":"ListReusableDelegationSets", + "http":{ + "method":"GET", + "requestUri":"/2013-04-01/delegationset" + }, + "input":{"shape":"ListReusableDelegationSetsRequest"}, + "output":{"shape":"ListReusableDelegationSetsResponse"}, + "errors":[ + {"shape":"InvalidInput"} + ] + }, + "ListTagsForResource":{ + "name":"ListTagsForResource", + "http":{ + "method":"GET", + "requestUri":"/2013-04-01/tags/{ResourceType}/{ResourceId}" + }, + "input":{"shape":"ListTagsForResourceRequest"}, + "output":{"shape":"ListTagsForResourceResponse"}, + "errors":[ + {"shape":"InvalidInput"}, + {"shape":"NoSuchHealthCheck"}, + {"shape":"NoSuchHostedZone"}, + {"shape":"PriorRequestNotComplete"}, + {"shape":"ThrottlingException"} + ] + }, + "ListTagsForResources":{ + "name":"ListTagsForResources", + "http":{ + "method":"POST", + "requestUri":"/2013-04-01/tags/{ResourceType}" + }, + "input":{ + "shape":"ListTagsForResourcesRequest", + "locationName":"ListTagsForResourcesRequest", + "xmlNamespace":{"uri":"https://route53.amazonaws.com/doc/2013-04-01/"} + }, + "output":{"shape":"ListTagsForResourcesResponse"}, + "errors":[ + {"shape":"InvalidInput"}, + {"shape":"NoSuchHealthCheck"}, + {"shape":"NoSuchHostedZone"}, + {"shape":"PriorRequestNotComplete"}, + {"shape":"ThrottlingException"} + ] + }, + "ListTrafficPolicies":{ + "name":"ListTrafficPolicies", + "http":{ + "method":"GET", + "requestUri":"/2013-04-01/trafficpolicies" + }, + "input":{"shape":"ListTrafficPoliciesRequest"}, + "output":{"shape":"ListTrafficPoliciesResponse"}, + "errors":[ + {"shape":"InvalidInput"} + ] + }, + "ListTrafficPolicyInstances":{ + "name":"ListTrafficPolicyInstances", + "http":{ + "method":"GET", + "requestUri":"/2013-04-01/trafficpolicyinstances" + }, + "input":{"shape":"ListTrafficPolicyInstancesRequest"}, + "output":{"shape":"ListTrafficPolicyInstancesResponse"}, + "errors":[ + {"shape":"InvalidInput"}, + {"shape":"NoSuchTrafficPolicyInstance"} + ] + }, + "ListTrafficPolicyInstancesByHostedZone":{ + "name":"ListTrafficPolicyInstancesByHostedZone", + "http":{ + "method":"GET", + "requestUri":"/2013-04-01/trafficpolicyinstances/hostedzone" + }, + "input":{"shape":"ListTrafficPolicyInstancesByHostedZoneRequest"}, + "output":{"shape":"ListTrafficPolicyInstancesByHostedZoneResponse"}, + "errors":[ + {"shape":"InvalidInput"}, + {"shape":"NoSuchTrafficPolicyInstance"}, + {"shape":"NoSuchHostedZone"} + ] + }, + "ListTrafficPolicyInstancesByPolicy":{ + "name":"ListTrafficPolicyInstancesByPolicy", + "http":{ + "method":"GET", + "requestUri":"/2013-04-01/trafficpolicyinstances/trafficpolicy" + }, + "input":{"shape":"ListTrafficPolicyInstancesByPolicyRequest"}, + "output":{"shape":"ListTrafficPolicyInstancesByPolicyResponse"}, + "errors":[ + {"shape":"InvalidInput"}, + {"shape":"NoSuchTrafficPolicyInstance"}, + {"shape":"NoSuchTrafficPolicy"} + ] + }, + "ListTrafficPolicyVersions":{ + "name":"ListTrafficPolicyVersions", + "http":{ + "method":"GET", + "requestUri":"/2013-04-01/trafficpolicies/{Id}/versions" + }, + "input":{"shape":"ListTrafficPolicyVersionsRequest"}, + "output":{"shape":"ListTrafficPolicyVersionsResponse"}, + "errors":[ + {"shape":"InvalidInput"}, + {"shape":"NoSuchTrafficPolicy"} + ] + }, + "UpdateHealthCheck":{ + "name":"UpdateHealthCheck", + "http":{ + "method":"POST", + "requestUri":"/2013-04-01/healthcheck/{HealthCheckId}" + }, + "input":{ + "shape":"UpdateHealthCheckRequest", + "locationName":"UpdateHealthCheckRequest", + "xmlNamespace":{"uri":"https://route53.amazonaws.com/doc/2013-04-01/"} + }, + "output":{"shape":"UpdateHealthCheckResponse"}, + "errors":[ + {"shape":"NoSuchHealthCheck"}, + {"shape":"InvalidInput"}, + {"shape":"HealthCheckVersionMismatch"} + ] + }, + "UpdateHostedZoneComment":{ + "name":"UpdateHostedZoneComment", + "http":{ + "method":"POST", + "requestUri":"/2013-04-01/hostedzone/{Id}" + }, + "input":{ + "shape":"UpdateHostedZoneCommentRequest", + "locationName":"UpdateHostedZoneCommentRequest", + "xmlNamespace":{"uri":"https://route53.amazonaws.com/doc/2013-04-01/"} + }, + "output":{"shape":"UpdateHostedZoneCommentResponse"}, + "errors":[ + {"shape":"NoSuchHostedZone"}, + {"shape":"InvalidInput"} + ] + }, + "UpdateTrafficPolicyComment":{ + "name":"UpdateTrafficPolicyComment", + "http":{ + "method":"POST", + "requestUri":"/2013-04-01/trafficpolicy/{Id}/{Version}" + }, + "input":{ + "shape":"UpdateTrafficPolicyCommentRequest", + "locationName":"UpdateTrafficPolicyCommentRequest", + "xmlNamespace":{"uri":"https://route53.amazonaws.com/doc/2013-04-01/"} + }, + "output":{"shape":"UpdateTrafficPolicyCommentResponse"}, + "errors":[ + {"shape":"InvalidInput"}, + {"shape":"NoSuchTrafficPolicy"}, + {"shape":"ConcurrentModification"} + ] + }, + "UpdateTrafficPolicyInstance":{ + "name":"UpdateTrafficPolicyInstance", + "http":{ + "method":"POST", + "requestUri":"/2013-04-01/trafficpolicyinstance/{Id}" + }, + "input":{ + "shape":"UpdateTrafficPolicyInstanceRequest", + "locationName":"UpdateTrafficPolicyInstanceRequest", + "xmlNamespace":{"uri":"https://route53.amazonaws.com/doc/2013-04-01/"} + }, + "output":{"shape":"UpdateTrafficPolicyInstanceResponse"}, + "errors":[ + {"shape":"InvalidInput"}, + {"shape":"NoSuchTrafficPolicy"}, + {"shape":"NoSuchTrafficPolicyInstance"}, + {"shape":"PriorRequestNotComplete"}, + {"shape":"ConflictingTypes"} + ] + } + }, + "shapes":{ + "AWSAccountID":{"type":"string"}, + "AlarmIdentifier":{ + "type":"structure", + "required":[ + "Region", + "Name" + ], + "members":{ + "Region":{"shape":"CloudWatchRegion"}, + "Name":{"shape":"AlarmName"} + } + }, + "AlarmName":{ + "type":"string", + "max":256, + "min":1 + }, + "AliasHealthEnabled":{"type":"boolean"}, + "AliasTarget":{ + "type":"structure", + "required":[ + "HostedZoneId", + "DNSName", + "EvaluateTargetHealth" + ], + "members":{ + "HostedZoneId":{"shape":"ResourceId"}, + "DNSName":{"shape":"DNSName"}, + "EvaluateTargetHealth":{"shape":"AliasHealthEnabled"} + } + }, + "AssociateVPCComment":{"type":"string"}, + "AssociateVPCWithHostedZoneRequest":{ + "type":"structure", + "required":[ + "HostedZoneId", + "VPC" + ], + "members":{ + "HostedZoneId":{ + "shape":"ResourceId", + "location":"uri", + "locationName":"Id" + }, + "VPC":{"shape":"VPC"}, + "Comment":{"shape":"AssociateVPCComment"} + } + }, + "AssociateVPCWithHostedZoneResponse":{ + "type":"structure", + "required":["ChangeInfo"], + "members":{ + "ChangeInfo":{"shape":"ChangeInfo"} + } + }, + "Change":{ + "type":"structure", + "required":[ + "Action", + "ResourceRecordSet" + ], + "members":{ + "Action":{"shape":"ChangeAction"}, + "ResourceRecordSet":{"shape":"ResourceRecordSet"} + } + }, + "ChangeAction":{ + "type":"string", + "enum":[ + "CREATE", + "DELETE", + "UPSERT" + ] + }, + "ChangeBatch":{ + "type":"structure", + "required":["Changes"], + "members":{ + "Comment":{"shape":"ResourceDescription"}, + "Changes":{"shape":"Changes"} + } + }, + "ChangeBatchRecord":{ + "type":"structure", + "required":[ + "Id", + "Status" + ], + "members":{ + "Id":{"shape":"ResourceId"}, + "SubmittedAt":{"shape":"TimeStamp"}, + "Status":{"shape":"ChangeStatus"}, + "Comment":{"shape":"ResourceDescription"}, + "Submitter":{"shape":"AWSAccountID"}, + "Changes":{"shape":"Changes"} + }, + "deprecated":true + }, + "ChangeBatchRecords":{ + "type":"list", + "member":{ + "shape":"ChangeBatchRecord", + "locationName":"ChangeBatchRecord" + }, + "deprecated":true, + "min":1 + }, + "ChangeInfo":{ + "type":"structure", + "required":[ + "Id", + "Status", + "SubmittedAt" + ], + "members":{ + "Id":{"shape":"ResourceId"}, + "Status":{"shape":"ChangeStatus"}, + "SubmittedAt":{"shape":"TimeStamp"}, + "Comment":{"shape":"ResourceDescription"} + } + }, + "ChangeResourceRecordSetsRequest":{ + "type":"structure", + "required":[ + "HostedZoneId", + "ChangeBatch" + ], + "members":{ + "HostedZoneId":{ + "shape":"ResourceId", + "location":"uri", + "locationName":"Id" + }, + "ChangeBatch":{"shape":"ChangeBatch"} + } + }, + "ChangeResourceRecordSetsResponse":{ + "type":"structure", + "required":["ChangeInfo"], + "members":{ + "ChangeInfo":{"shape":"ChangeInfo"} + } + }, + "ChangeStatus":{ + "type":"string", + "enum":[ + "PENDING", + "INSYNC" + ] + }, + "ChangeTagsForResourceRequest":{ + "type":"structure", + "required":[ + "ResourceType", + "ResourceId" + ], + "members":{ + "ResourceType":{ + "shape":"TagResourceType", + "location":"uri", + "locationName":"ResourceType" + }, + "ResourceId":{ + "shape":"TagResourceId", + "location":"uri", + "locationName":"ResourceId" + }, + "AddTags":{"shape":"TagList"}, + "RemoveTagKeys":{"shape":"TagKeyList"} + } + }, + "ChangeTagsForResourceResponse":{ + "type":"structure", + "members":{ + } + }, + "Changes":{ + "type":"list", + "member":{ + "shape":"Change", + "locationName":"Change" + }, + "min":1 + }, + "CheckerIpRanges":{ + "type":"list", + "member":{"shape":"IPAddressCidr"} + }, + "ChildHealthCheckList":{ + "type":"list", + "member":{ + "shape":"HealthCheckId", + "locationName":"ChildHealthCheck" + }, + "max":256 + }, + "CloudWatchAlarmConfiguration":{ + "type":"structure", + "required":[ + "EvaluationPeriods", + "Threshold", + "ComparisonOperator", + "Period", + "MetricName", + "Namespace", + "Statistic" + ], + "members":{ + "EvaluationPeriods":{"shape":"EvaluationPeriods"}, + "Threshold":{"shape":"Threshold"}, + "ComparisonOperator":{"shape":"ComparisonOperator"}, + "Period":{"shape":"Period"}, + "MetricName":{"shape":"MetricName"}, + "Namespace":{"shape":"Namespace"}, + "Statistic":{"shape":"Statistic"}, + "Dimensions":{"shape":"DimensionList"} + } + }, + "CloudWatchRegion":{ + "type":"string", + "enum":[ + "us-east-1", + "us-west-1", + "us-west-2", + "eu-central-1", + "eu-west-1", + "ap-southeast-1", + "ap-southeast-2", + "ap-northeast-1", + "ap-northeast-2", + "sa-east-1" + ], + "max":64, + "min":1 + }, + "ComparisonOperator":{ + "type":"string", + "enum":[ + "GreaterThanOrEqualToThreshold", + "GreaterThanThreshold", + "LessThanThreshold", + "LessThanOrEqualToThreshold" + ] + }, + "ConcurrentModification":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "ConflictingDomainExists":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "exception":true + }, + "ConflictingTypes":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "CreateHealthCheckRequest":{ + "type":"structure", + "required":[ + "CallerReference", + "HealthCheckConfig" + ], + "members":{ + "CallerReference":{"shape":"HealthCheckNonce"}, + "HealthCheckConfig":{"shape":"HealthCheckConfig"} + } + }, + "CreateHealthCheckResponse":{ + "type":"structure", + "required":[ + "HealthCheck", + "Location" + ], + "members":{ + "HealthCheck":{"shape":"HealthCheck"}, + "Location":{ + "shape":"ResourceURI", + "location":"header", + "locationName":"Location" + } + } + }, + "CreateHostedZoneRequest":{ + "type":"structure", + "required":[ + "Name", + "CallerReference" + ], + "members":{ + "Name":{"shape":"DNSName"}, + "VPC":{"shape":"VPC"}, + "CallerReference":{"shape":"Nonce"}, + "HostedZoneConfig":{"shape":"HostedZoneConfig"}, + "DelegationSetId":{"shape":"ResourceId"} + } + }, + "CreateHostedZoneResponse":{ + "type":"structure", + "required":[ + "HostedZone", + "ChangeInfo", + "DelegationSet", + "Location" + ], + "members":{ + "HostedZone":{"shape":"HostedZone"}, + "ChangeInfo":{"shape":"ChangeInfo"}, + "DelegationSet":{"shape":"DelegationSet"}, + "VPC":{"shape":"VPC"}, + "Location":{ + "shape":"ResourceURI", + "location":"header", + "locationName":"Location" + } + } + }, + "CreateReusableDelegationSetRequest":{ + "type":"structure", + "required":["CallerReference"], + "members":{ + "CallerReference":{"shape":"Nonce"}, + "HostedZoneId":{"shape":"ResourceId"} + } + }, + "CreateReusableDelegationSetResponse":{ + "type":"structure", + "required":[ + "DelegationSet", + "Location" + ], + "members":{ + "DelegationSet":{"shape":"DelegationSet"}, + "Location":{ + "shape":"ResourceURI", + "location":"header", + "locationName":"Location" + } + } + }, + "CreateTrafficPolicyInstanceRequest":{ + "type":"structure", + "required":[ + "HostedZoneId", + "Name", + "TTL", + "TrafficPolicyId", + "TrafficPolicyVersion" + ], + "members":{ + "HostedZoneId":{"shape":"ResourceId"}, + "Name":{"shape":"DNSName"}, + "TTL":{"shape":"TTL"}, + "TrafficPolicyId":{"shape":"TrafficPolicyId"}, + "TrafficPolicyVersion":{"shape":"TrafficPolicyVersion"} + } + }, + "CreateTrafficPolicyInstanceResponse":{ + "type":"structure", + "required":[ + "TrafficPolicyInstance", + "Location" + ], + "members":{ + "TrafficPolicyInstance":{"shape":"TrafficPolicyInstance"}, + "Location":{ + "shape":"ResourceURI", + "location":"header", + "locationName":"Location" + } + } + }, + "CreateTrafficPolicyRequest":{ + "type":"structure", + "required":[ + "Name", + "Document" + ], + "members":{ + "Name":{"shape":"TrafficPolicyName"}, + "Document":{"shape":"TrafficPolicyDocument"}, + "Comment":{"shape":"TrafficPolicyComment"} + } + }, + "CreateTrafficPolicyResponse":{ + "type":"structure", + "required":[ + "TrafficPolicy", + "Location" + ], + "members":{ + "TrafficPolicy":{"shape":"TrafficPolicy"}, + "Location":{ + "shape":"ResourceURI", + "location":"header", + "locationName":"Location" + } + } + }, + "CreateTrafficPolicyVersionRequest":{ + "type":"structure", + "required":[ + "Id", + "Document" + ], + "members":{ + "Id":{ + "shape":"TrafficPolicyId", + "location":"uri", + "locationName":"Id" + }, + "Document":{"shape":"TrafficPolicyDocument"}, + "Comment":{"shape":"TrafficPolicyComment"} + } + }, + "CreateTrafficPolicyVersionResponse":{ + "type":"structure", + "required":[ + "TrafficPolicy", + "Location" + ], + "members":{ + "TrafficPolicy":{"shape":"TrafficPolicy"}, + "Location":{ + "shape":"ResourceURI", + "location":"header", + "locationName":"Location" + } + } + }, + "DNSName":{ + "type":"string", + "max":1024 + }, + "Date":{ + "type":"string", + "deprecated":true, + "max":256 + }, + "DelegationSet":{ + "type":"structure", + "required":["NameServers"], + "members":{ + "Id":{"shape":"ResourceId"}, + "CallerReference":{"shape":"Nonce"}, + "NameServers":{"shape":"DelegationSetNameServers"} + } + }, + "DelegationSetAlreadyCreated":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "exception":true + }, + "DelegationSetAlreadyReusable":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "exception":true + }, + "DelegationSetInUse":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "exception":true + }, + "DelegationSetNameServers":{ + "type":"list", + "member":{ + "shape":"DNSName", + "locationName":"NameServer" + }, + "min":1 + }, + "DelegationSetNotAvailable":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "exception":true + }, + "DelegationSetNotReusable":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "exception":true + }, + "DelegationSets":{ + "type":"list", + "member":{ + "shape":"DelegationSet", + "locationName":"DelegationSet" + } + }, + "DeleteHealthCheckRequest":{ + "type":"structure", + "required":["HealthCheckId"], + "members":{ + "HealthCheckId":{ + "shape":"HealthCheckId", + "location":"uri", + "locationName":"HealthCheckId" + } + } + }, + "DeleteHealthCheckResponse":{ + "type":"structure", + "members":{ + } + }, + "DeleteHostedZoneRequest":{ + "type":"structure", + "required":["Id"], + "members":{ + "Id":{ + "shape":"ResourceId", + "location":"uri", + "locationName":"Id" + } + } + }, + "DeleteHostedZoneResponse":{ + "type":"structure", + "required":["ChangeInfo"], + "members":{ + "ChangeInfo":{"shape":"ChangeInfo"} + } + }, + "DeleteReusableDelegationSetRequest":{ + "type":"structure", + "required":["Id"], + "members":{ + "Id":{ + "shape":"ResourceId", + "location":"uri", + "locationName":"Id" + } + } + }, + "DeleteReusableDelegationSetResponse":{ + "type":"structure", + "members":{ + } + }, + "DeleteTrafficPolicyInstanceRequest":{ + "type":"structure", + "required":["Id"], + "members":{ + "Id":{ + "shape":"TrafficPolicyInstanceId", + "location":"uri", + "locationName":"Id" + } + } + }, + "DeleteTrafficPolicyInstanceResponse":{ + "type":"structure", + "members":{ + } + }, + "DeleteTrafficPolicyRequest":{ + "type":"structure", + "required":[ + "Id", + "Version" + ], + "members":{ + "Id":{ + "shape":"TrafficPolicyId", + "location":"uri", + "locationName":"Id" + }, + "Version":{ + "shape":"TrafficPolicyVersion", + "location":"uri", + "locationName":"Version" + } + } + }, + "DeleteTrafficPolicyResponse":{ + "type":"structure", + "members":{ + } + }, + "Dimension":{ + "type":"structure", + "required":[ + "Name", + "Value" + ], + "members":{ + "Name":{"shape":"DimensionField"}, + "Value":{"shape":"DimensionField"} + } + }, + "DimensionField":{ + "type":"string", + "max":255, + "min":1 + }, + "DimensionList":{ + "type":"list", + "member":{ + "shape":"Dimension", + "locationName":"Dimension" + }, + "max":10 + }, + "DisassociateVPCComment":{"type":"string"}, + "DisassociateVPCFromHostedZoneRequest":{ + "type":"structure", + "required":[ + "HostedZoneId", + "VPC" + ], + "members":{ + "HostedZoneId":{ + "shape":"ResourceId", + "location":"uri", + "locationName":"Id" + }, + "VPC":{"shape":"VPC"}, + "Comment":{"shape":"DisassociateVPCComment"} + } + }, + "DisassociateVPCFromHostedZoneResponse":{ + "type":"structure", + "required":["ChangeInfo"], + "members":{ + "ChangeInfo":{"shape":"ChangeInfo"} + } + }, + "EnableSNI":{"type":"boolean"}, + "ErrorMessage":{"type":"string"}, + "ErrorMessages":{ + "type":"list", + "member":{ + "shape":"ErrorMessage", + "locationName":"Message" + } + }, + "EvaluationPeriods":{ + "type":"integer", + "min":1 + }, + "FailureThreshold":{ + "type":"integer", + "max":10, + "min":1 + }, + "FullyQualifiedDomainName":{ + "type":"string", + "max":255 + }, + "GeoLocation":{ + "type":"structure", + "members":{ + "ContinentCode":{"shape":"GeoLocationContinentCode"}, + "CountryCode":{"shape":"GeoLocationCountryCode"}, + "SubdivisionCode":{"shape":"GeoLocationSubdivisionCode"} + } + }, + "GeoLocationContinentCode":{ + "type":"string", + "max":2, + "min":2 + }, + "GeoLocationContinentName":{ + "type":"string", + "max":32, + "min":1 + }, + "GeoLocationCountryCode":{ + "type":"string", + "max":2, + "min":1 + }, + "GeoLocationCountryName":{ + "type":"string", + "max":64, + "min":1 + }, + "GeoLocationDetails":{ + "type":"structure", + "members":{ + "ContinentCode":{"shape":"GeoLocationContinentCode"}, + "ContinentName":{"shape":"GeoLocationContinentName"}, + "CountryCode":{"shape":"GeoLocationCountryCode"}, + "CountryName":{"shape":"GeoLocationCountryName"}, + "SubdivisionCode":{"shape":"GeoLocationSubdivisionCode"}, + "SubdivisionName":{"shape":"GeoLocationSubdivisionName"} + } + }, + "GeoLocationDetailsList":{ + "type":"list", + "member":{ + "shape":"GeoLocationDetails", + "locationName":"GeoLocationDetails" + } + }, + "GeoLocationSubdivisionCode":{ + "type":"string", + "max":3, + "min":1 + }, + "GeoLocationSubdivisionName":{ + "type":"string", + "max":64, + "min":1 + }, + "GetChangeDetailsRequest":{ + "type":"structure", + "required":["Id"], + "members":{ + "Id":{ + "shape":"ResourceId", + "location":"uri", + "locationName":"Id" + } + }, + "deprecated":true + }, + "GetChangeDetailsResponse":{ + "type":"structure", + "required":["ChangeBatchRecord"], + "members":{ + "ChangeBatchRecord":{"shape":"ChangeBatchRecord"} + }, + "deprecated":true + }, + "GetChangeRequest":{ + "type":"structure", + "required":["Id"], + "members":{ + "Id":{ + "shape":"ResourceId", + "location":"uri", + "locationName":"Id" + } + } + }, + "GetChangeResponse":{ + "type":"structure", + "required":["ChangeInfo"], + "members":{ + "ChangeInfo":{"shape":"ChangeInfo"} + } + }, + "GetCheckerIpRangesRequest":{ + "type":"structure", + "members":{ + } + }, + "GetCheckerIpRangesResponse":{ + "type":"structure", + "required":["CheckerIpRanges"], + "members":{ + "CheckerIpRanges":{"shape":"CheckerIpRanges"} + } + }, + "GetGeoLocationRequest":{ + "type":"structure", + "members":{ + "ContinentCode":{ + "shape":"GeoLocationContinentCode", + "location":"querystring", + "locationName":"continentcode" + }, + "CountryCode":{ + "shape":"GeoLocationCountryCode", + "location":"querystring", + "locationName":"countrycode" + }, + "SubdivisionCode":{ + "shape":"GeoLocationSubdivisionCode", + "location":"querystring", + "locationName":"subdivisioncode" + } + } + }, + "GetGeoLocationResponse":{ + "type":"structure", + "required":["GeoLocationDetails"], + "members":{ + "GeoLocationDetails":{"shape":"GeoLocationDetails"} + } + }, + "GetHealthCheckCountRequest":{ + "type":"structure", + "members":{ + } + }, + "GetHealthCheckCountResponse":{ + "type":"structure", + "required":["HealthCheckCount"], + "members":{ + "HealthCheckCount":{"shape":"HealthCheckCount"} + } + }, + "GetHealthCheckLastFailureReasonRequest":{ + "type":"structure", + "required":["HealthCheckId"], + "members":{ + "HealthCheckId":{ + "shape":"HealthCheckId", + "location":"uri", + "locationName":"HealthCheckId" + } + } + }, + "GetHealthCheckLastFailureReasonResponse":{ + "type":"structure", + "required":["HealthCheckObservations"], + "members":{ + "HealthCheckObservations":{"shape":"HealthCheckObservations"} + } + }, + "GetHealthCheckRequest":{ + "type":"structure", + "required":["HealthCheckId"], + "members":{ + "HealthCheckId":{ + "shape":"HealthCheckId", + "location":"uri", + "locationName":"HealthCheckId" + } + } + }, + "GetHealthCheckResponse":{ + "type":"structure", + "required":["HealthCheck"], + "members":{ + "HealthCheck":{"shape":"HealthCheck"} + } + }, + "GetHealthCheckStatusRequest":{ + "type":"structure", + "required":["HealthCheckId"], + "members":{ + "HealthCheckId":{ + "shape":"HealthCheckId", + "location":"uri", + "locationName":"HealthCheckId" + } + } + }, + "GetHealthCheckStatusResponse":{ + "type":"structure", + "required":["HealthCheckObservations"], + "members":{ + "HealthCheckObservations":{"shape":"HealthCheckObservations"} + } + }, + "GetHostedZoneCountRequest":{ + "type":"structure", + "members":{ + } + }, + "GetHostedZoneCountResponse":{ + "type":"structure", + "required":["HostedZoneCount"], + "members":{ + "HostedZoneCount":{"shape":"HostedZoneCount"} + } + }, + "GetHostedZoneRequest":{ + "type":"structure", + "required":["Id"], + "members":{ + "Id":{ + "shape":"ResourceId", + "location":"uri", + "locationName":"Id" + } + } + }, + "GetHostedZoneResponse":{ + "type":"structure", + "required":["HostedZone"], + "members":{ + "HostedZone":{"shape":"HostedZone"}, + "DelegationSet":{"shape":"DelegationSet"}, + "VPCs":{"shape":"VPCs"} + } + }, + "GetReusableDelegationSetRequest":{ + "type":"structure", + "required":["Id"], + "members":{ + "Id":{ + "shape":"ResourceId", + "location":"uri", + "locationName":"Id" + } + } + }, + "GetReusableDelegationSetResponse":{ + "type":"structure", + "required":["DelegationSet"], + "members":{ + "DelegationSet":{"shape":"DelegationSet"} + } + }, + "GetTrafficPolicyInstanceCountRequest":{ + "type":"structure", + "members":{ + } + }, + "GetTrafficPolicyInstanceCountResponse":{ + "type":"structure", + "required":["TrafficPolicyInstanceCount"], + "members":{ + "TrafficPolicyInstanceCount":{"shape":"TrafficPolicyInstanceCount"} + } + }, + "GetTrafficPolicyInstanceRequest":{ + "type":"structure", + "required":["Id"], + "members":{ + "Id":{ + "shape":"TrafficPolicyInstanceId", + "location":"uri", + "locationName":"Id" + } + } + }, + "GetTrafficPolicyInstanceResponse":{ + "type":"structure", + "required":["TrafficPolicyInstance"], + "members":{ + "TrafficPolicyInstance":{"shape":"TrafficPolicyInstance"} + } + }, + "GetTrafficPolicyRequest":{ + "type":"structure", + "required":[ + "Id", + "Version" + ], + "members":{ + "Id":{ + "shape":"TrafficPolicyId", + "location":"uri", + "locationName":"Id" + }, + "Version":{ + "shape":"TrafficPolicyVersion", + "location":"uri", + "locationName":"Version" + } + } + }, + "GetTrafficPolicyResponse":{ + "type":"structure", + "required":["TrafficPolicy"], + "members":{ + "TrafficPolicy":{"shape":"TrafficPolicy"} + } + }, + "HealthCheck":{ + "type":"structure", + "required":[ + "Id", + "CallerReference", + "HealthCheckConfig", + "HealthCheckVersion" + ], + "members":{ + "Id":{"shape":"HealthCheckId"}, + "CallerReference":{"shape":"HealthCheckNonce"}, + "HealthCheckConfig":{"shape":"HealthCheckConfig"}, + "HealthCheckVersion":{"shape":"HealthCheckVersion"}, + "CloudWatchAlarmConfiguration":{"shape":"CloudWatchAlarmConfiguration"} + } + }, + "HealthCheckAlreadyExists":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "error":{"httpStatusCode":409}, + "exception":true + }, + "HealthCheckConfig":{ + "type":"structure", + "required":["Type"], + "members":{ + "IPAddress":{"shape":"IPAddress"}, + "Port":{"shape":"Port"}, + "Type":{"shape":"HealthCheckType"}, + "ResourcePath":{"shape":"ResourcePath"}, + "FullyQualifiedDomainName":{"shape":"FullyQualifiedDomainName"}, + "SearchString":{"shape":"SearchString"}, + "RequestInterval":{"shape":"RequestInterval"}, + "FailureThreshold":{"shape":"FailureThreshold"}, + "MeasureLatency":{"shape":"MeasureLatency"}, + "Inverted":{"shape":"Inverted"}, + "HealthThreshold":{"shape":"HealthThreshold"}, + "ChildHealthChecks":{"shape":"ChildHealthCheckList"}, + "EnableSNI":{"shape":"EnableSNI"}, + "Regions":{"shape":"HealthCheckRegionList"}, + "AlarmIdentifier":{"shape":"AlarmIdentifier"}, + "InsufficientDataHealthStatus":{"shape":"InsufficientDataHealthStatus"} + } + }, + "HealthCheckCount":{"type":"long"}, + "HealthCheckId":{ + "type":"string", + "max":64 + }, + "HealthCheckInUse":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "HealthCheckNonce":{ + "type":"string", + "max":64, + "min":1 + }, + "HealthCheckObservation":{ + "type":"structure", + "members":{ + "Region":{"shape":"HealthCheckRegion"}, + "IPAddress":{"shape":"IPAddress"}, + "StatusReport":{"shape":"StatusReport"} + } + }, + "HealthCheckObservations":{ + "type":"list", + "member":{ + "shape":"HealthCheckObservation", + "locationName":"HealthCheckObservation" + } + }, + "HealthCheckRegion":{ + "type":"string", + "enum":[ + "us-east-1", + "us-west-1", + "us-west-2", + "eu-west-1", + "ap-southeast-1", + "ap-southeast-2", + "ap-northeast-1", + "sa-east-1" + ], + "max":64, + "min":1 + }, + "HealthCheckRegionList":{ + "type":"list", + "member":{ + "shape":"HealthCheckRegion", + "locationName":"Region" + }, + "max":64, + "min":1 + }, + "HealthCheckType":{ + "type":"string", + "enum":[ + "HTTP", + "HTTPS", + "HTTP_STR_MATCH", + "HTTPS_STR_MATCH", + "TCP", + "CALCULATED", + "CLOUDWATCH_METRIC" + ] + }, + "HealthCheckVersion":{ + "type":"long", + "min":1 + }, + "HealthCheckVersionMismatch":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "error":{"httpStatusCode":409}, + "exception":true + }, + "HealthChecks":{ + "type":"list", + "member":{ + "shape":"HealthCheck", + "locationName":"HealthCheck" + } + }, + "HealthThreshold":{ + "type":"integer", + "max":256, + "min":0 + }, + "HostedZone":{ + "type":"structure", + "required":[ + "Id", + "Name", + "CallerReference" + ], + "members":{ + "Id":{"shape":"ResourceId"}, + "Name":{"shape":"DNSName"}, + "CallerReference":{"shape":"Nonce"}, + "Config":{"shape":"HostedZoneConfig"}, + "ResourceRecordSetCount":{"shape":"HostedZoneRRSetCount"} + } + }, + "HostedZoneAlreadyExists":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "error":{"httpStatusCode":409}, + "exception":true + }, + "HostedZoneConfig":{ + "type":"structure", + "members":{ + "Comment":{"shape":"ResourceDescription"}, + "PrivateZone":{"shape":"IsPrivateZone"} + } + }, + "HostedZoneCount":{"type":"long"}, + "HostedZoneNotEmpty":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "HostedZoneNotFound":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "exception":true + }, + "HostedZoneRRSetCount":{"type":"long"}, + "HostedZones":{ + "type":"list", + "member":{ + "shape":"HostedZone", + "locationName":"HostedZone" + } + }, + "IPAddress":{ + "type":"string", + "max":15, + "pattern":"^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$" + }, + "IPAddressCidr":{"type":"string"}, + "IncompatibleVersion":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InsufficientDataHealthStatus":{ + "type":"string", + "enum":[ + "Healthy", + "Unhealthy", + "LastKnownStatus" + ] + }, + "InvalidArgument":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "exception":true + }, + "InvalidChangeBatch":{ + "type":"structure", + "members":{ + "messages":{"shape":"ErrorMessages"} + }, + "exception":true + }, + "InvalidDomainName":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidInput":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidTrafficPolicyDocument":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidVPCId":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "Inverted":{"type":"boolean"}, + "IsPrivateZone":{"type":"boolean"}, + "LastVPCAssociation":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "LimitsExceeded":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "exception":true + }, + "ListChangeBatchesByHostedZoneRequest":{ + "type":"structure", + "required":[ + "HostedZoneId", + "StartDate", + "EndDate" + ], + "members":{ + "HostedZoneId":{ + "shape":"ResourceId", + "location":"uri", + "locationName":"Id" + }, + "StartDate":{ + "shape":"Date", + "location":"querystring", + "locationName":"startDate" + }, + "EndDate":{ + "shape":"Date", + "location":"querystring", + "locationName":"endDate" + }, + "MaxItems":{ + "shape":"PageMaxItems", + "location":"querystring", + "locationName":"maxItems" + }, + "Marker":{ + "shape":"PageMarker", + "location":"querystring", + "locationName":"marker" + } + }, + "deprecated":true + }, + "ListChangeBatchesByHostedZoneResponse":{ + "type":"structure", + "required":[ + "MaxItems", + "Marker", + "ChangeBatchRecords" + ], + "members":{ + "MaxItems":{"shape":"PageMaxItems"}, + "Marker":{"shape":"PageMarker"}, + "IsTruncated":{"shape":"PageTruncated"}, + "ChangeBatchRecords":{"shape":"ChangeBatchRecords"}, + "NextMarker":{"shape":"PageMarker"} + }, + "deprecated":true + }, + "ListChangeBatchesByRRSetRequest":{ + "type":"structure", + "required":[ + "HostedZoneId", + "Name", + "Type", + "StartDate", + "EndDate" + ], + "members":{ + "HostedZoneId":{ + "shape":"ResourceId", + "location":"uri", + "locationName":"Id" + }, + "Name":{ + "shape":"DNSName", + "location":"querystring", + "locationName":"rrSet_name" + }, + "Type":{ + "shape":"RRType", + "location":"querystring", + "locationName":"type" + }, + "SetIdentifier":{ + "shape":"ResourceRecordSetIdentifier", + "location":"querystring", + "locationName":"identifier" + }, + "StartDate":{ + "shape":"Date", + "location":"querystring", + "locationName":"startDate" + }, + "EndDate":{ + "shape":"Date", + "location":"querystring", + "locationName":"endDate" + }, + "MaxItems":{ + "shape":"PageMaxItems", + "location":"querystring", + "locationName":"maxItems" + }, + "Marker":{ + "shape":"PageMarker", + "location":"querystring", + "locationName":"marker" + } + }, + "deprecated":true + }, + "ListChangeBatchesByRRSetResponse":{ + "type":"structure", + "required":[ + "MaxItems", + "Marker", + "ChangeBatchRecords" + ], + "members":{ + "MaxItems":{"shape":"PageMaxItems"}, + "Marker":{"shape":"PageMarker"}, + "IsTruncated":{"shape":"PageTruncated"}, + "ChangeBatchRecords":{"shape":"ChangeBatchRecords"}, + "NextMarker":{"shape":"PageMarker"} + }, + "deprecated":true + }, + "ListGeoLocationsRequest":{ + "type":"structure", + "members":{ + "StartContinentCode":{ + "shape":"GeoLocationContinentCode", + "location":"querystring", + "locationName":"startcontinentcode" + }, + "StartCountryCode":{ + "shape":"GeoLocationCountryCode", + "location":"querystring", + "locationName":"startcountrycode" + }, + "StartSubdivisionCode":{ + "shape":"GeoLocationSubdivisionCode", + "location":"querystring", + "locationName":"startsubdivisioncode" + }, + "MaxItems":{ + "shape":"PageMaxItems", + "location":"querystring", + "locationName":"maxitems" + } + } + }, + "ListGeoLocationsResponse":{ + "type":"structure", + "required":[ + "GeoLocationDetailsList", + "IsTruncated", + "MaxItems" + ], + "members":{ + "GeoLocationDetailsList":{"shape":"GeoLocationDetailsList"}, + "IsTruncated":{"shape":"PageTruncated"}, + "NextContinentCode":{"shape":"GeoLocationContinentCode"}, + "NextCountryCode":{"shape":"GeoLocationCountryCode"}, + "NextSubdivisionCode":{"shape":"GeoLocationSubdivisionCode"}, + "MaxItems":{"shape":"PageMaxItems"} + } + }, + "ListHealthChecksRequest":{ + "type":"structure", + "members":{ + "Marker":{ + "shape":"PageMarker", + "location":"querystring", + "locationName":"marker" + }, + "MaxItems":{ + "shape":"PageMaxItems", + "location":"querystring", + "locationName":"maxitems" + } + } + }, + "ListHealthChecksResponse":{ + "type":"structure", + "required":[ + "HealthChecks", + "Marker", + "IsTruncated", + "MaxItems" + ], + "members":{ + "HealthChecks":{"shape":"HealthChecks"}, + "Marker":{"shape":"PageMarker"}, + "IsTruncated":{"shape":"PageTruncated"}, + "NextMarker":{"shape":"PageMarker"}, + "MaxItems":{"shape":"PageMaxItems"} + } + }, + "ListHostedZonesByNameRequest":{ + "type":"structure", + "members":{ + "DNSName":{ + "shape":"DNSName", + "location":"querystring", + "locationName":"dnsname" + }, + "HostedZoneId":{ + "shape":"ResourceId", + "location":"querystring", + "locationName":"hostedzoneid" + }, + "MaxItems":{ + "shape":"PageMaxItems", + "location":"querystring", + "locationName":"maxitems" + } + } + }, + "ListHostedZonesByNameResponse":{ + "type":"structure", + "required":[ + "HostedZones", + "IsTruncated", + "MaxItems" + ], + "members":{ + "HostedZones":{"shape":"HostedZones"}, + "DNSName":{"shape":"DNSName"}, + "HostedZoneId":{"shape":"ResourceId"}, + "IsTruncated":{"shape":"PageTruncated"}, + "NextDNSName":{"shape":"DNSName"}, + "NextHostedZoneId":{"shape":"ResourceId"}, + "MaxItems":{"shape":"PageMaxItems"} + } + }, + "ListHostedZonesRequest":{ + "type":"structure", + "members":{ + "Marker":{ + "shape":"PageMarker", + "location":"querystring", + "locationName":"marker" + }, + "MaxItems":{ + "shape":"PageMaxItems", + "location":"querystring", + "locationName":"maxitems" + }, + "DelegationSetId":{ + "shape":"ResourceId", + "location":"querystring", + "locationName":"delegationsetid" + } + } + }, + "ListHostedZonesResponse":{ + "type":"structure", + "required":[ + "HostedZones", + "Marker", + "IsTruncated", + "MaxItems" + ], + "members":{ + "HostedZones":{"shape":"HostedZones"}, + "Marker":{"shape":"PageMarker"}, + "IsTruncated":{"shape":"PageTruncated"}, + "NextMarker":{"shape":"PageMarker"}, + "MaxItems":{"shape":"PageMaxItems"} + } + }, + "ListResourceRecordSetsRequest":{ + "type":"structure", + "required":["HostedZoneId"], + "members":{ + "HostedZoneId":{ + "shape":"ResourceId", + "location":"uri", + "locationName":"Id" + }, + "StartRecordName":{ + "shape":"DNSName", + "location":"querystring", + "locationName":"name" + }, + "StartRecordType":{ + "shape":"RRType", + "location":"querystring", + "locationName":"type" + }, + "StartRecordIdentifier":{ + "shape":"ResourceRecordSetIdentifier", + "location":"querystring", + "locationName":"identifier" + }, + "MaxItems":{ + "shape":"PageMaxItems", + "location":"querystring", + "locationName":"maxitems" + } + } + }, + "ListResourceRecordSetsResponse":{ + "type":"structure", + "required":[ + "ResourceRecordSets", + "IsTruncated", + "MaxItems" + ], + "members":{ + "ResourceRecordSets":{"shape":"ResourceRecordSets"}, + "IsTruncated":{"shape":"PageTruncated"}, + "NextRecordName":{"shape":"DNSName"}, + "NextRecordType":{"shape":"RRType"}, + "NextRecordIdentifier":{"shape":"ResourceRecordSetIdentifier"}, + "MaxItems":{"shape":"PageMaxItems"} + } + }, + "ListReusableDelegationSetsRequest":{ + "type":"structure", + "members":{ + "Marker":{ + "shape":"PageMarker", + "location":"querystring", + "locationName":"marker" + }, + "MaxItems":{ + "shape":"PageMaxItems", + "location":"querystring", + "locationName":"maxitems" + } + } + }, + "ListReusableDelegationSetsResponse":{ + "type":"structure", + "required":[ + "DelegationSets", + "Marker", + "IsTruncated", + "MaxItems" + ], + "members":{ + "DelegationSets":{"shape":"DelegationSets"}, + "Marker":{"shape":"PageMarker"}, + "IsTruncated":{"shape":"PageTruncated"}, + "NextMarker":{"shape":"PageMarker"}, + "MaxItems":{"shape":"PageMaxItems"} + } + }, + "ListTagsForResourceRequest":{ + "type":"structure", + "required":[ + "ResourceType", + "ResourceId" + ], + "members":{ + "ResourceType":{ + "shape":"TagResourceType", + "location":"uri", + "locationName":"ResourceType" + }, + "ResourceId":{ + "shape":"TagResourceId", + "location":"uri", + "locationName":"ResourceId" + } + } + }, + "ListTagsForResourceResponse":{ + "type":"structure", + "required":["ResourceTagSet"], + "members":{ + "ResourceTagSet":{"shape":"ResourceTagSet"} + } + }, + "ListTagsForResourcesRequest":{ + "type":"structure", + "required":[ + "ResourceType", + "ResourceIds" + ], + "members":{ + "ResourceType":{ + "shape":"TagResourceType", + "location":"uri", + "locationName":"ResourceType" + }, + "ResourceIds":{"shape":"TagResourceIdList"} + } + }, + "ListTagsForResourcesResponse":{ + "type":"structure", + "required":["ResourceTagSets"], + "members":{ + "ResourceTagSets":{"shape":"ResourceTagSetList"} + } + }, + "ListTrafficPoliciesRequest":{ + "type":"structure", + "members":{ + "TrafficPolicyIdMarker":{ + "shape":"TrafficPolicyId", + "location":"querystring", + "locationName":"trafficpolicyid" + }, + "MaxItems":{ + "shape":"PageMaxItems", + "location":"querystring", + "locationName":"maxitems" + } + } + }, + "ListTrafficPoliciesResponse":{ + "type":"structure", + "required":[ + "TrafficPolicySummaries", + "IsTruncated", + "TrafficPolicyIdMarker", + "MaxItems" + ], + "members":{ + "TrafficPolicySummaries":{"shape":"TrafficPolicySummaries"}, + "IsTruncated":{"shape":"PageTruncated"}, + "TrafficPolicyIdMarker":{"shape":"TrafficPolicyId"}, + "MaxItems":{"shape":"PageMaxItems"} + } + }, + "ListTrafficPolicyInstancesByHostedZoneRequest":{ + "type":"structure", + "required":["HostedZoneId"], + "members":{ + "HostedZoneId":{ + "shape":"ResourceId", + "location":"querystring", + "locationName":"id" + }, + "TrafficPolicyInstanceNameMarker":{ + "shape":"DNSName", + "location":"querystring", + "locationName":"trafficpolicyinstancename" + }, + "TrafficPolicyInstanceTypeMarker":{ + "shape":"RRType", + "location":"querystring", + "locationName":"trafficpolicyinstancetype" + }, + "MaxItems":{ + "shape":"PageMaxItems", + "location":"querystring", + "locationName":"maxitems" + } + } + }, + "ListTrafficPolicyInstancesByHostedZoneResponse":{ + "type":"structure", + "required":[ + "TrafficPolicyInstances", + "IsTruncated", + "MaxItems" + ], + "members":{ + "TrafficPolicyInstances":{"shape":"TrafficPolicyInstances"}, + "TrafficPolicyInstanceNameMarker":{"shape":"DNSName"}, + "TrafficPolicyInstanceTypeMarker":{"shape":"RRType"}, + "IsTruncated":{"shape":"PageTruncated"}, + "MaxItems":{"shape":"PageMaxItems"} + } + }, + "ListTrafficPolicyInstancesByPolicyRequest":{ + "type":"structure", + "required":[ + "TrafficPolicyId", + "TrafficPolicyVersion" + ], + "members":{ + "TrafficPolicyId":{ + "shape":"TrafficPolicyId", + "location":"querystring", + "locationName":"id" + }, + "TrafficPolicyVersion":{ + "shape":"TrafficPolicyVersion", + "location":"querystring", + "locationName":"version" + }, + "HostedZoneIdMarker":{ + "shape":"ResourceId", + "location":"querystring", + "locationName":"hostedzoneid" + }, + "TrafficPolicyInstanceNameMarker":{ + "shape":"DNSName", + "location":"querystring", + "locationName":"trafficpolicyinstancename" + }, + "TrafficPolicyInstanceTypeMarker":{ + "shape":"RRType", + "location":"querystring", + "locationName":"trafficpolicyinstancetype" + }, + "MaxItems":{ + "shape":"PageMaxItems", + "location":"querystring", + "locationName":"maxitems" + } + } + }, + "ListTrafficPolicyInstancesByPolicyResponse":{ + "type":"structure", + "required":[ + "TrafficPolicyInstances", + "IsTruncated", + "MaxItems" + ], + "members":{ + "TrafficPolicyInstances":{"shape":"TrafficPolicyInstances"}, + "HostedZoneIdMarker":{"shape":"ResourceId"}, + "TrafficPolicyInstanceNameMarker":{"shape":"DNSName"}, + "TrafficPolicyInstanceTypeMarker":{"shape":"RRType"}, + "IsTruncated":{"shape":"PageTruncated"}, + "MaxItems":{"shape":"PageMaxItems"} + } + }, + "ListTrafficPolicyInstancesRequest":{ + "type":"structure", + "members":{ + "HostedZoneIdMarker":{ + "shape":"ResourceId", + "location":"querystring", + "locationName":"hostedzoneid" + }, + "TrafficPolicyInstanceNameMarker":{ + "shape":"DNSName", + "location":"querystring", + "locationName":"trafficpolicyinstancename" + }, + "TrafficPolicyInstanceTypeMarker":{ + "shape":"RRType", + "location":"querystring", + "locationName":"trafficpolicyinstancetype" + }, + "MaxItems":{ + "shape":"PageMaxItems", + "location":"querystring", + "locationName":"maxitems" + } + } + }, + "ListTrafficPolicyInstancesResponse":{ + "type":"structure", + "required":[ + "TrafficPolicyInstances", + "IsTruncated", + "MaxItems" + ], + "members":{ + "TrafficPolicyInstances":{"shape":"TrafficPolicyInstances"}, + "HostedZoneIdMarker":{"shape":"ResourceId"}, + "TrafficPolicyInstanceNameMarker":{"shape":"DNSName"}, + "TrafficPolicyInstanceTypeMarker":{"shape":"RRType"}, + "IsTruncated":{"shape":"PageTruncated"}, + "MaxItems":{"shape":"PageMaxItems"} + } + }, + "ListTrafficPolicyVersionsRequest":{ + "type":"structure", + "required":["Id"], + "members":{ + "Id":{ + "shape":"TrafficPolicyId", + "location":"uri", + "locationName":"Id" + }, + "TrafficPolicyVersionMarker":{ + "shape":"TrafficPolicyVersionMarker", + "location":"querystring", + "locationName":"trafficpolicyversion" + }, + "MaxItems":{ + "shape":"PageMaxItems", + "location":"querystring", + "locationName":"maxitems" + } + } + }, + "ListTrafficPolicyVersionsResponse":{ + "type":"structure", + "required":[ + "TrafficPolicies", + "IsTruncated", + "TrafficPolicyVersionMarker", + "MaxItems" + ], + "members":{ + "TrafficPolicies":{"shape":"TrafficPolicies"}, + "IsTruncated":{"shape":"PageTruncated"}, + "TrafficPolicyVersionMarker":{"shape":"TrafficPolicyVersionMarker"}, + "MaxItems":{"shape":"PageMaxItems"} + } + }, + "MeasureLatency":{"type":"boolean"}, + "Message":{ + "type":"string", + "max":1024 + }, + "MetricName":{ + "type":"string", + "max":255, + "min":1 + }, + "Namespace":{ + "type":"string", + "max":255, + "min":1 + }, + "NoSuchChange":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "error":{"httpStatusCode":404}, + "exception":true + }, + "NoSuchDelegationSet":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "exception":true + }, + "NoSuchGeoLocation":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "error":{"httpStatusCode":404}, + "exception":true + }, + "NoSuchHealthCheck":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "error":{"httpStatusCode":404}, + "exception":true + }, + "NoSuchHostedZone":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "error":{"httpStatusCode":404}, + "exception":true + }, + "NoSuchTrafficPolicy":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "error":{"httpStatusCode":404}, + "exception":true + }, + "NoSuchTrafficPolicyInstance":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "error":{"httpStatusCode":404}, + "exception":true + }, + "Nonce":{ + "type":"string", + "max":128, + "min":1 + }, + "PageMarker":{ + "type":"string", + "max":64 + }, + "PageMaxItems":{"type":"string"}, + "PageTruncated":{"type":"boolean"}, + "Period":{ + "type":"integer", + "min":60 + }, + "Port":{ + "type":"integer", + "max":65535, + "min":1 + }, + "PriorRequestNotComplete":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "PublicZoneVPCAssociation":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "RData":{ + "type":"string", + "max":4000 + }, + "RRType":{ + "type":"string", + "enum":[ + "SOA", + "A", + "TXT", + "NS", + "CNAME", + "MX", + "PTR", + "SRV", + "SPF", + "AAAA" + ] + }, + "RequestInterval":{ + "type":"integer", + "max":30, + "min":10 + }, + "ResourceDescription":{ + "type":"string", + "max":256 + }, + "ResourceId":{ + "type":"string", + "max":32 + }, + "ResourcePath":{ + "type":"string", + "max":255 + }, + "ResourceRecord":{ + "type":"structure", + "required":["Value"], + "members":{ + "Value":{"shape":"RData"} + } + }, + "ResourceRecordSet":{ + "type":"structure", + "required":[ + "Name", + "Type" + ], + "members":{ + "Name":{"shape":"DNSName"}, + "Type":{"shape":"RRType"}, + "SetIdentifier":{"shape":"ResourceRecordSetIdentifier"}, + "Weight":{"shape":"ResourceRecordSetWeight"}, + "Region":{"shape":"ResourceRecordSetRegion"}, + "GeoLocation":{"shape":"GeoLocation"}, + "Failover":{"shape":"ResourceRecordSetFailover"}, + "TTL":{"shape":"TTL"}, + "ResourceRecords":{"shape":"ResourceRecords"}, + "AliasTarget":{"shape":"AliasTarget"}, + "HealthCheckId":{"shape":"HealthCheckId"}, + "TrafficPolicyInstanceId":{"shape":"TrafficPolicyInstanceId"} + } + }, + "ResourceRecordSetFailover":{ + "type":"string", + "enum":[ + "PRIMARY", + "SECONDARY" + ] + }, + "ResourceRecordSetIdentifier":{ + "type":"string", + "max":128, + "min":1 + }, + "ResourceRecordSetRegion":{ + "type":"string", + "enum":[ + "us-east-1", + "us-west-1", + "us-west-2", + "eu-west-1", + "eu-central-1", + "ap-southeast-1", + "ap-southeast-2", + "ap-northeast-1", + "ap-northeast-2", + "sa-east-1", + "cn-north-1", + "ap-south-1" + ], + "max":64, + "min":1 + }, + "ResourceRecordSetWeight":{ + "type":"long", + "max":255, + "min":0 + }, + "ResourceRecordSets":{ + "type":"list", + "member":{ + "shape":"ResourceRecordSet", + "locationName":"ResourceRecordSet" + } + }, + "ResourceRecords":{ + "type":"list", + "member":{ + "shape":"ResourceRecord", + "locationName":"ResourceRecord" + }, + "min":1 + }, + "ResourceTagSet":{ + "type":"structure", + "members":{ + "ResourceType":{"shape":"TagResourceType"}, + "ResourceId":{"shape":"TagResourceId"}, + "Tags":{"shape":"TagList"} + } + }, + "ResourceTagSetList":{ + "type":"list", + "member":{ + "shape":"ResourceTagSet", + "locationName":"ResourceTagSet" + } + }, + "ResourceURI":{ + "type":"string", + "max":1024 + }, + "SearchString":{ + "type":"string", + "max":255 + }, + "Statistic":{ + "type":"string", + "enum":[ + "Average", + "Sum", + "SampleCount", + "Maximum", + "Minimum" + ] + }, + "Status":{"type":"string"}, + "StatusReport":{ + "type":"structure", + "members":{ + "Status":{"shape":"Status"}, + "CheckedTime":{"shape":"TimeStamp"} + } + }, + "TTL":{ + "type":"long", + "max":2147483647, + "min":0 + }, + "Tag":{ + "type":"structure", + "members":{ + "Key":{"shape":"TagKey"}, + "Value":{"shape":"TagValue"} + } + }, + "TagKey":{ + "type":"string", + "max":128 + }, + "TagKeyList":{ + "type":"list", + "member":{ + "shape":"TagKey", + "locationName":"Key" + }, + "max":10, + "min":1 + }, + "TagList":{ + "type":"list", + "member":{ + "shape":"Tag", + "locationName":"Tag" + }, + "max":10, + "min":1 + }, + "TagResourceId":{ + "type":"string", + "max":64 + }, + "TagResourceIdList":{ + "type":"list", + "member":{ + "shape":"TagResourceId", + "locationName":"ResourceId" + }, + "max":10, + "min":1 + }, + "TagResourceType":{ + "type":"string", + "enum":[ + "healthcheck", + "hostedzone" + ] + }, + "TagValue":{ + "type":"string", + "max":256 + }, + "Threshold":{"type":"double"}, + "ThrottlingException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "TimeStamp":{"type":"timestamp"}, + "TooManyHealthChecks":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "exception":true + }, + "TooManyHostedZones":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "TooManyTrafficPolicies":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "TooManyTrafficPolicyInstances":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "TrafficPolicies":{ + "type":"list", + "member":{ + "shape":"TrafficPolicy", + "locationName":"TrafficPolicy" + } + }, + "TrafficPolicy":{ + "type":"structure", + "required":[ + "Id", + "Version", + "Name", + "Type", + "Document" + ], + "members":{ + "Id":{"shape":"TrafficPolicyId"}, + "Version":{"shape":"TrafficPolicyVersion"}, + "Name":{"shape":"TrafficPolicyName"}, + "Type":{"shape":"RRType"}, + "Document":{"shape":"TrafficPolicyDocument"}, + "Comment":{"shape":"TrafficPolicyComment"} + } + }, + "TrafficPolicyAlreadyExists":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "error":{"httpStatusCode":409}, + "exception":true + }, + "TrafficPolicyComment":{ + "type":"string", + "max":1024 + }, + "TrafficPolicyDocument":{ + "type":"string", + "max":102400 + }, + "TrafficPolicyId":{ + "type":"string", + "max":36 + }, + "TrafficPolicyInUse":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "TrafficPolicyInstance":{ + "type":"structure", + "required":[ + "Id", + "HostedZoneId", + "Name", + "TTL", + "State", + "Message", + "TrafficPolicyId", + "TrafficPolicyVersion", + "TrafficPolicyType" + ], + "members":{ + "Id":{"shape":"TrafficPolicyInstanceId"}, + "HostedZoneId":{"shape":"ResourceId"}, + "Name":{"shape":"DNSName"}, + "TTL":{"shape":"TTL"}, + "State":{"shape":"TrafficPolicyInstanceState"}, + "Message":{"shape":"Message"}, + "TrafficPolicyId":{"shape":"TrafficPolicyId"}, + "TrafficPolicyVersion":{"shape":"TrafficPolicyVersion"}, + "TrafficPolicyType":{"shape":"RRType"} + } + }, + "TrafficPolicyInstanceAlreadyExists":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "error":{"httpStatusCode":409}, + "exception":true + }, + "TrafficPolicyInstanceCount":{"type":"integer"}, + "TrafficPolicyInstanceId":{ + "type":"string", + "max":36 + }, + "TrafficPolicyInstanceState":{"type":"string"}, + "TrafficPolicyInstances":{ + "type":"list", + "member":{ + "shape":"TrafficPolicyInstance", + "locationName":"TrafficPolicyInstance" + } + }, + "TrafficPolicyName":{ + "type":"string", + "max":512 + }, + "TrafficPolicySummaries":{ + "type":"list", + "member":{ + "shape":"TrafficPolicySummary", + "locationName":"TrafficPolicySummary" + } + }, + "TrafficPolicySummary":{ + "type":"structure", + "required":[ + "Id", + "Name", + "Type", + "LatestVersion", + "TrafficPolicyCount" + ], + "members":{ + "Id":{"shape":"TrafficPolicyId"}, + "Name":{"shape":"TrafficPolicyName"}, + "Type":{"shape":"RRType"}, + "LatestVersion":{"shape":"TrafficPolicyVersion"}, + "TrafficPolicyCount":{"shape":"TrafficPolicyVersion"} + } + }, + "TrafficPolicyVersion":{ + "type":"integer", + "max":1000, + "min":1 + }, + "TrafficPolicyVersionMarker":{ + "type":"string", + "max":4 + }, + "UpdateHealthCheckRequest":{ + "type":"structure", + "required":["HealthCheckId"], + "members":{ + "HealthCheckId":{ + "shape":"HealthCheckId", + "location":"uri", + "locationName":"HealthCheckId" + }, + "HealthCheckVersion":{"shape":"HealthCheckVersion"}, + "IPAddress":{"shape":"IPAddress"}, + "Port":{"shape":"Port"}, + "ResourcePath":{"shape":"ResourcePath"}, + "FullyQualifiedDomainName":{"shape":"FullyQualifiedDomainName"}, + "SearchString":{"shape":"SearchString"}, + "FailureThreshold":{"shape":"FailureThreshold"}, + "Inverted":{"shape":"Inverted"}, + "HealthThreshold":{"shape":"HealthThreshold"}, + "ChildHealthChecks":{"shape":"ChildHealthCheckList"}, + "EnableSNI":{"shape":"EnableSNI"}, + "Regions":{"shape":"HealthCheckRegionList"}, + "AlarmIdentifier":{"shape":"AlarmIdentifier"}, + "InsufficientDataHealthStatus":{"shape":"InsufficientDataHealthStatus"} + } + }, + "UpdateHealthCheckResponse":{ + "type":"structure", + "required":["HealthCheck"], + "members":{ + "HealthCheck":{"shape":"HealthCheck"} + } + }, + "UpdateHostedZoneCommentRequest":{ + "type":"structure", + "required":["Id"], + "members":{ + "Id":{ + "shape":"ResourceId", + "location":"uri", + "locationName":"Id" + }, + "Comment":{"shape":"ResourceDescription"} + } + }, + "UpdateHostedZoneCommentResponse":{ + "type":"structure", + "required":["HostedZone"], + "members":{ + "HostedZone":{"shape":"HostedZone"} + } + }, + "UpdateTrafficPolicyCommentRequest":{ + "type":"structure", + "required":[ + "Id", + "Version", + "Comment" + ], + "members":{ + "Id":{ + "shape":"TrafficPolicyId", + "location":"uri", + "locationName":"Id" + }, + "Version":{ + "shape":"TrafficPolicyVersion", + "location":"uri", + "locationName":"Version" + }, + "Comment":{"shape":"TrafficPolicyComment"} + } + }, + "UpdateTrafficPolicyCommentResponse":{ + "type":"structure", + "required":["TrafficPolicy"], + "members":{ + "TrafficPolicy":{"shape":"TrafficPolicy"} + } + }, + "UpdateTrafficPolicyInstanceRequest":{ + "type":"structure", + "required":[ + "Id", + "TTL", + "TrafficPolicyId", + "TrafficPolicyVersion" + ], + "members":{ + "Id":{ + "shape":"TrafficPolicyInstanceId", + "location":"uri", + "locationName":"Id" + }, + "TTL":{"shape":"TTL"}, + "TrafficPolicyId":{"shape":"TrafficPolicyId"}, + "TrafficPolicyVersion":{"shape":"TrafficPolicyVersion"} + } + }, + "UpdateTrafficPolicyInstanceResponse":{ + "type":"structure", + "required":["TrafficPolicyInstance"], + "members":{ + "TrafficPolicyInstance":{"shape":"TrafficPolicyInstance"} + } + }, + "VPC":{ + "type":"structure", + "members":{ + "VPCRegion":{"shape":"VPCRegion"}, + "VPCId":{"shape":"VPCId"} + } + }, + "VPCAssociationNotFound":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "error":{"httpStatusCode":404}, + "exception":true + }, + "VPCId":{ + "type":"string", + "max":1024 + }, + "VPCRegion":{ + "type":"string", + "enum":[ + "us-east-1", + "us-west-1", + "us-west-2", + "eu-west-1", + "eu-central-1", + "ap-southeast-1", + "ap-southeast-2", + "ap-south-1", + "ap-northeast-1", + "ap-northeast-2", + "sa-east-1", + "cn-north-1" + ], + "max":64, + "min":1 + }, + "VPCs":{ + "type":"list", + "member":{ + "shape":"VPC", + "locationName":"VPC" + }, + "min":1 + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/route53/2013-04-01/docs-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/route53/2013-04-01/docs-2.json new file mode 100644 index 000000000..536f1b1b9 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/route53/2013-04-01/docs-2.json @@ -0,0 +1,1794 @@ +{ + "version": "2.0", + "service": null, + "operations": { + "AssociateVPCWithHostedZone": "

    This action associates a VPC with an hosted zone.

    To associate a VPC with an hosted zone, send a POST request to the /Route 53 API version/hostedzone/hosted zone ID/associatevpc resource. The request body must include a document with a AssociateVPCWithHostedZoneRequest element. The response returns the AssociateVPCWithHostedZoneResponse element that contains ChangeInfo for you to track the progress of the AssociateVPCWithHostedZoneRequest you made. See GetChange operation for how to track the progress of your change.

    ", + "ChangeResourceRecordSets": "

    Use this action to create or change your authoritative DNS information. To use this action, send a POST request to the /Route 53 API version/hostedzone/hosted Zone ID/rrset resource. The request body must include a document with a ChangeResourceRecordSetsRequest element.

    Changes are a list of change items and are considered transactional. For more information on transactional changes, also known as change batches, see POST ChangeResourceRecordSets in the Amazon Route 53 API Reference.

    Due to the nature of transactional changes, you cannot delete the same resource record set more than once in a single change batch. If you attempt to delete the same change batch more than once, Amazon Route 53 returns an InvalidChangeBatch error.

    In response to a ChangeResourceRecordSets request, your DNS data is changed on all Amazon Route 53 DNS servers. Initially, the status of a change is PENDING. This means the change has not yet propagated to all the authoritative Amazon Route 53 DNS servers. When the change is propagated to all hosts, the change returns a status of INSYNC.

    Note the following limitations on a ChangeResourceRecordSets request:

    • A request cannot contain more than 100 Change elements.
    • A request cannot contain more than 1000 ResourceRecord elements.
    • The sum of the number of characters (including spaces) in all Value elements in a request cannot exceed 32,000 characters.
    ", + "ChangeTagsForResource": null, + "CreateHealthCheck": "

    This action creates a new health check.

    To create a new health check, send a POST request to the /Route 53 API version/healthcheck resource. The request body must include a document with a CreateHealthCheckRequest element. The response returns the CreateHealthCheckResponse element that contains metadata about the health check.

    ", + "CreateHostedZone": "

    This action creates a new hosted zone.

    To create a new hosted zone, send a POST request to the /Route 53 API version/hostedzone resource. The request body must include a document with a CreateHostedZoneRequest element. The response returns the CreateHostedZoneResponse element that contains metadata about the hosted zone.

    Amazon Route 53 automatically creates a default SOA record and four NS records for the zone. The NS records in the hosted zone are the name servers you give your registrar to delegate your domain to. For more information about SOA and NS records, see NS and SOA Records that Amazon Route 53 Creates for a Hosted Zone in the Amazon Route 53 Developer Guide.

    When you create a zone, its initial status is PENDING. This means that it is not yet available on all DNS servers. The status of the zone changes to INSYNC when the NS and SOA records are available on all Amazon Route 53 DNS servers.

    When trying to create a hosted zone using a reusable delegation set, you could specify an optional DelegationSetId, and Route53 would assign those 4 NS records for the zone, instead of alloting a new one.

    ", + "CreateReusableDelegationSet": "

    This action creates a reusable delegationSet.

    To create a new reusable delegationSet, send a POST request to the /Route 53 API version/delegationset resource. The request body must include a document with a CreateReusableDelegationSetRequest element. The response returns the CreateReusableDelegationSetResponse element that contains metadata about the delegationSet.

    If the optional parameter HostedZoneId is specified, it marks the delegationSet associated with that particular hosted zone as reusable.

    ", + "CreateTrafficPolicy": "

    Creates a traffic policy, which you use to create multiple DNS resource record sets for one domain name (such as example.com) or one subdomain name (such as www.example.com).

    To create a traffic policy, send a POST request to the /Route 53 API version/trafficpolicy resource. The request body must include a document with a CreateTrafficPolicyRequest element. The response includes the CreateTrafficPolicyResponse element, which contains information about the new traffic policy.

    ", + "CreateTrafficPolicyInstance": "

    Creates resource record sets in a specified hosted zone based on the settings in a specified traffic policy version. In addition, CreateTrafficPolicyInstance associates the resource record sets with a specified domain name (such as example.com) or subdomain name (such as www.example.com). Amazon Route 53 responds to DNS queries for the domain or subdomain name by using the resource record sets that CreateTrafficPolicyInstance created.

    To create a traffic policy instance, send a POST request to the /Route 53 API version/trafficpolicyinstance resource. The request body must include a document with a CreateTrafficPolicyRequest element. The response returns the CreateTrafficPolicyInstanceResponse element, which contains information about the traffic policy instance.

    ", + "CreateTrafficPolicyVersion": "

    Creates a new version of an existing traffic policy. When you create a new version of a traffic policy, you specify the ID of the traffic policy that you want to update and a JSON-formatted document that describes the new version.

    You use traffic policies to create multiple DNS resource record sets for one domain name (such as example.com) or one subdomain name (such as www.example.com).

    To create a new version, send a POST request to the /Route 53 API version/trafficpolicy/ resource. The request body includes a document with a CreateTrafficPolicyVersionRequest element. The response returns the CreateTrafficPolicyVersionResponse element, which contains information about the new version of the traffic policy.

    ", + "DeleteHealthCheck": "

    This action deletes a health check. To delete a health check, send a DELETE request to the /Route 53 API version/healthcheck/health check ID resource.

    You can delete a health check only if there are no resource record sets associated with this health check. If resource record sets are associated with this health check, you must disassociate them before you can delete your health check. If you try to delete a health check that is associated with resource record sets, Amazon Route 53 will deny your request with a HealthCheckInUse error. For information about disassociating the records from your health check, see ChangeResourceRecordSets.", + "DeleteHostedZone": "

    This action deletes a hosted zone. To delete a hosted zone, send a DELETE request to the /Route 53 API version/hostedzone/hosted zone ID resource.

    You can delete a hosted zone only if there are no resource record sets other than the default SOA record and NS resource record sets. If your hosted zone contains other resource record sets, you must delete them before you can delete your hosted zone. If you try to delete a hosted zone that contains other resource record sets, Amazon Route 53 will deny your request with a HostedZoneNotEmpty error. For information about deleting records from your hosted zone, see ChangeResourceRecordSets.", + "DeleteReusableDelegationSet": "

    This action deletes a reusable delegation set. To delete a reusable delegation set, send a DELETE request to the /Route 53 API version/delegationset/delegation set ID resource.

    You can delete a reusable delegation set only if there are no associated hosted zones. If your reusable delegation set contains associated hosted zones, you must delete them before you can delete your reusable delegation set. If you try to delete a reusable delegation set that contains associated hosted zones, Amazon Route 53 will deny your request with a DelegationSetInUse error.", + "DeleteTrafficPolicy": "

    Deletes a traffic policy. To delete a traffic policy, send a DELETE request to the /Route 53 API version/trafficpolicy resource.

    ", + "DeleteTrafficPolicyInstance": "

    Deletes a traffic policy instance and all of the resource record sets that Amazon Route 53 created when you created the instance.

    To delete a traffic policy instance, send a DELETE request to the /Route 53 API version/trafficpolicy/traffic policy instance ID resource.

    When you delete a traffic policy instance, Amazon Route 53 also deletes all of the resource record sets that were created when you created the traffic policy instance.", + "DisassociateVPCFromHostedZone": "

    This action disassociates a VPC from an hosted zone.

    To disassociate a VPC to a hosted zone, send a POST request to the /Route 53 API version/hostedzone/hosted zone ID/disassociatevpc resource. The request body must include a document with a DisassociateVPCFromHostedZoneRequest element. The response returns the DisassociateVPCFromHostedZoneResponse element that contains ChangeInfo for you to track the progress of the DisassociateVPCFromHostedZoneRequest you made. See GetChange operation for how to track the progress of your change.

    ", + "GetChange": "

    This action returns the current status of a change batch request. The status is one of the following values:

    - PENDING indicates that the changes in this request have not replicated to all Amazon Route 53 DNS servers. This is the initial status of all change batch requests.

    - INSYNC indicates that the changes have replicated to all Amazon Route 53 DNS servers.

    ", + "GetChangeDetails": "

    This action returns the status and changes of a change batch request.

    ", + "GetCheckerIpRanges": "

    To retrieve a list of the IP ranges used by Amazon Route 53 health checkers to check the health of your resources, send a GET request to the /Route 53 API version/checkeripranges resource. You can use these IP addresses to configure router and firewall rules to allow health checkers to check the health of your resources.

    ", + "GetGeoLocation": "

    To retrieve a single geo location, send a GET request to the /Route 53 API version/geolocation resource with one of these options: continentcode | countrycode | countrycode and subdivisioncode.

    ", + "GetHealthCheck": "

    To retrieve the health check, send a GET request to the /Route 53 API version/healthcheck/health check ID resource.

    ", + "GetHealthCheckCount": "

    To retrieve a count of all your health checks, send a GET request to the /Route 53 API version/healthcheckcount resource.

    ", + "GetHealthCheckLastFailureReason": "

    If you want to learn why a health check is currently failing or why it failed most recently (if at all), you can get the failure reason for the most recent failure. Send a GET request to the /Route 53 API version/healthcheck/health check ID/lastfailurereason resource.

    ", + "GetHealthCheckStatus": "

    To retrieve the health check status, send a GET request to the /Route 53 API version/healthcheck/health check ID/status resource. You can use this call to get a health check's current status.

    ", + "GetHostedZone": "

    To retrieve the delegation set for a hosted zone, send a GET request to the /Route 53 API version/hostedzone/hosted zone ID resource. The delegation set is the four Amazon Route 53 name servers that were assigned to the hosted zone when you created it.

    ", + "GetHostedZoneCount": "

    To retrieve a count of all your hosted zones, send a GET request to the /Route 53 API version/hostedzonecount resource.

    ", + "GetReusableDelegationSet": "

    To retrieve the reusable delegation set, send a GET request to the /Route 53 API version/delegationset/delegation set ID resource.

    ", + "GetTrafficPolicy": "

    Gets information about a specific traffic policy version. To get the information, send a GET request to the /Route 53 API version/trafficpolicy resource.

    ", + "GetTrafficPolicyInstance": "

    Gets information about a specified traffic policy instance.

    To get information about the traffic policy instance, send a GET request to the /Route 53 API version/trafficpolicyinstance resource.

    After you submit a CreateTrafficPolicyInstance or an UpdateTrafficPolicyInstance request, there's a brief delay while Amazon Route 53 creates the resource record sets that are specified in the traffic policy definition. For more information, see the State response element.", + "GetTrafficPolicyInstanceCount": "

    Gets the number of traffic policy instances that are associated with the current AWS account.

    To get the number of traffic policy instances, send a GET request to the /Route 53 API version/trafficpolicyinstancecount resource.

    ", + "ListChangeBatchesByHostedZone": "

    This action gets the list of ChangeBatches in a given time period for a given hosted zone.

    ", + "ListChangeBatchesByRRSet": "

    This action gets the list of ChangeBatches in a given time period for a given hosted zone and RRSet.

    ", + "ListGeoLocations": "

    To retrieve a list of supported geo locations, send a GET request to the /Route 53 API version/geolocations resource. The response to this request includes a GeoLocationDetailsList element with zero, one, or multiple GeoLocationDetails child elements. The list is sorted by country code, and then subdivision code, followed by continents at the end of the list.

    By default, the list of geo locations is displayed on a single page. You can control the length of the page that is displayed by using the MaxItems parameter. If the list is truncated, IsTruncated will be set to true and a combination of NextContinentCode, NextCountryCode, NextSubdivisionCode will be populated. You can pass these as parameters to StartContinentCode, StartCountryCode, StartSubdivisionCode to control the geo location that the list begins with.

    ", + "ListHealthChecks": "

    To retrieve a list of your health checks, send a GET request to the /Route 53 API version/healthcheck resource. The response to this request includes a HealthChecks element with zero, one, or multiple HealthCheck child elements. By default, the list of health checks is displayed on a single page. You can control the length of the page that is displayed by using the MaxItems parameter. You can use the Marker parameter to control the health check that the list begins with.

    Amazon Route 53 returns a maximum of 100 items. If you set MaxItems to a value greater than 100, Amazon Route 53 returns only the first 100.", + "ListHostedZones": "

    To retrieve a list of your hosted zones, send a GET request to the /Route 53 API version/hostedzone resource. The response to this request includes a HostedZones element with zero, one, or multiple HostedZone child elements. By default, the list of hosted zones is displayed on a single page. You can control the length of the page that is displayed by using the MaxItems parameter. You can use the Marker parameter to control the hosted zone that the list begins with.

    Amazon Route 53 returns a maximum of 100 items. If you set MaxItems to a value greater than 100, Amazon Route 53 returns only the first 100.", + "ListHostedZonesByName": "

    To retrieve a list of your hosted zones in lexicographic order, send a GET request to the /Route 53 API version/hostedzonesbyname resource. The response to this request includes a HostedZones element with zero or more HostedZone child elements lexicographically ordered by DNS name. By default, the list of hosted zones is displayed on a single page. You can control the length of the page that is displayed by using the MaxItems parameter. You can use the DNSName and HostedZoneId parameters to control the hosted zone that the list begins with.

    Amazon Route 53 returns a maximum of 100 items. If you set MaxItems to a value greater than 100, Amazon Route 53 returns only the first 100.", + "ListResourceRecordSets": "

    List the resource record sets in a specified hosted zone. Send a GET request to the 2013-04-01/hostedzone/hosted zone ID/rrset resource.

    ListResourceRecordSets returns up to 100 resource record sets at a time in ASCII order, beginning at a position specified by the name and type elements. The action sorts results first by DNS name with the labels reversed, for example:

    com.example.www.

    Note the trailing dot, which can change the sort order in some circumstances. When multiple records have the same DNS name, the action sorts results by the record type.

    You can use the name and type elements to adjust the beginning position of the list of resource record sets returned:

    • If you do not specify Name or Type: The results begin with the first resource record set that the hosted zone contains.
    • If you specify Name but not Type: The results begin with the first resource record set in the list whose name is greater than or equal to Name.
    • If you specify Type but not Name: Amazon Route 53 returns the InvalidInput error.
    • If you specify both Name and Type: The results begin with the first resource record set in the list whose name is greater than or equal to Name, and whose type is greater than or equal to Type.

    This action returns the most current version of the records. This includes records that are PENDING, and that are not yet available on all Amazon Route 53 DNS servers.

    To ensure that you get an accurate listing of the resource record sets for a hosted zone at a point in time, do not submit a ChangeResourceRecordSets request while you are paging through the results of a ListResourceRecordSets request. If you do, some pages may display results without the latest changes while other pages display results with the latest changes.

    ", + "ListReusableDelegationSets": "

    To retrieve a list of your reusable delegation sets, send a GET request to the /Route 53 API version/delegationset resource. The response to this request includes a DelegationSets element with zero, one, or multiple DelegationSet child elements. By default, the list of delegation sets is displayed on a single page. You can control the length of the page that is displayed by using the MaxItems parameter. You can use the Marker parameter to control the delegation set that the list begins with.

    Amazon Route 53 returns a maximum of 100 items. If you set MaxItems to a value greater than 100, Amazon Route 53 returns only the first 100.", + "ListTagsForResource": null, + "ListTagsForResources": null, + "ListTrafficPolicies": "

    Gets information about the latest version for every traffic policy that is associated with the current AWS account. To get the information, send a GET request to the /Route 53 API version/trafficpolicy resource.

    Amazon Route 53 returns a maximum of 100 items in each response. If you have a lot of traffic policies, you can use the maxitems parameter to list them in groups of up to 100.

    The response includes three values that help you navigate from one group of maxitems traffic policies to the next:

    • IsTruncated
    • If the value of IsTruncated in the response is true, there are more traffic policies associated with the current AWS account.

      If IsTruncated is false, this response includes the last traffic policy that is associated with the current account.

    • TrafficPolicyIdMarker
    • If IsTruncated is true, TrafficPolicyIdMarker is the ID of the first traffic policy in the next group of MaxItems traffic policies. If you want to list more traffic policies, make another call to ListTrafficPolicies, and specify the value of the TrafficPolicyIdMarker element from the response in the TrafficPolicyIdMarker request parameter.

      If IsTruncated is false, the TrafficPolicyIdMarker element is omitted from the response.

    • MaxItems
    • The value that you specified for the MaxItems parameter in the request that produced the current response.

    ", + "ListTrafficPolicyInstances": "

    Gets information about the traffic policy instances that you created by using the current AWS account.

    After you submit an UpdateTrafficPolicyInstance request, there's a brief delay while Amazon Route 53 creates the resource record sets that are specified in the traffic policy definition. For more information, see the State response element.

    To get information about the traffic policy instances that are associated with the current AWS account, send a GET request to the /Route 53 API version/trafficpolicyinstance resource.

    Amazon Route 53 returns a maximum of 100 items in each response. If you have a lot of traffic policy instances, you can use the MaxItems parameter to list them in groups of up to 100.

    The response includes five values that help you navigate from one group of MaxItems traffic policy instances to the next:

    • IsTruncated
    • If the value of IsTruncated in the response is true, there are more traffic policy instances associated with the current AWS account.

      If IsTruncated is false, this response includes the last traffic policy instance that is associated with the current account.

    • MaxItems
    • The value that you specified for the MaxItems parameter in the request that produced the current response.

    • HostedZoneIdMarker, TrafficPolicyInstanceNameMarker, and TrafficPolicyInstanceTypeMarker
    • If IsTruncated is true, these three values in the response represent the first traffic policy instance in the next group of MaxItems traffic policy instances. To list more traffic policy instances, make another call to ListTrafficPolicyInstances, and specify these values in the corresponding request parameters.

      If IsTruncated is false, all three elements are omitted from the response.

    ", + "ListTrafficPolicyInstancesByHostedZone": "

    Gets information about the traffic policy instances that you created in a specified hosted zone.

    After you submit an UpdateTrafficPolicyInstance request, there's a brief delay while Amazon Route 53 creates the resource record sets that are specified in the traffic policy definition. For more information, see the State response element.

    To get information about the traffic policy instances that you created in a specified hosted zone, send a GET request to the /Route 53 API version/trafficpolicyinstance resource and include the ID of the hosted zone.

    Amazon Route 53 returns a maximum of 100 items in each response. If you have a lot of traffic policy instances, you can use the MaxItems parameter to list them in groups of up to 100.

    The response includes four values that help you navigate from one group of MaxItems traffic policy instances to the next:

    • IsTruncated
    • If the value of IsTruncated in the response is true, there are more traffic policy instances associated with the current AWS account.

      If IsTruncated is false, this response includes the last traffic policy instance that is associated with the current account.

    • MaxItems
    • The value that you specified for the MaxItems parameter in the request that produced the current response.

    • TrafficPolicyInstanceNameMarker and TrafficPolicyInstanceTypeMarker
    • If IsTruncated is true, these two values in the response represent the first traffic policy instance in the next group of MaxItems traffic policy instances. To list more traffic policy instances, make another call to ListTrafficPolicyInstancesByHostedZone, and specify these values in the corresponding request parameters.

      If IsTruncated is false, all three elements are omitted from the response.

    ", + "ListTrafficPolicyInstancesByPolicy": "

    Gets information about the traffic policy instances that you created by using a specify traffic policy version.

    After you submit a CreateTrafficPolicyInstance or an UpdateTrafficPolicyInstance request, there's a brief delay while Amazon Route 53 creates the resource record sets that are specified in the traffic policy definition. For more information, see the State response element.

    To get information about the traffic policy instances that you created by using a specify traffic policy version, send a GET request to the /Route 53 API version/trafficpolicyinstance resource and include the ID and version of the traffic policy.

    Amazon Route 53 returns a maximum of 100 items in each response. If you have a lot of traffic policy instances, you can use the MaxItems parameter to list them in groups of up to 100.

    The response includes five values that help you navigate from one group of MaxItems traffic policy instances to the next:

    • IsTruncated

      If the value of IsTruncated in the response is true, there are more traffic policy instances associated with the specified traffic policy.

      If IsTruncated is false, this response includes the last traffic policy instance that is associated with the specified traffic policy.

    • MaxItems

      The value that you specified for the MaxItems parameter in the request that produced the current response.

    • HostedZoneIdMarker, TrafficPolicyInstanceNameMarker, and TrafficPolicyInstanceTypeMarker

      If IsTruncated is true, these values in the response represent the first traffic policy instance in the next group of MaxItems traffic policy instances. To list more traffic policy instances, make another call to ListTrafficPolicyInstancesByPolicy, and specify these values in the corresponding request parameters.

      If IsTruncated is false, all three elements are omitted from the response.

    ", + "ListTrafficPolicyVersions": "

    Gets information about all of the versions for a specified traffic policy. ListTrafficPolicyVersions lists only versions that have not been deleted.

    Amazon Route 53 returns a maximum of 100 items in each response. If you have a lot of traffic policies, you can use the maxitems parameter to list them in groups of up to 100.

    The response includes three values that help you navigate from one group of maxitemsmaxitems traffic policies to the next:

    • IsTruncated
    • If the value of IsTruncated in the response is true, there are more traffic policy versions associated with the specified traffic policy.

      If IsTruncated is false, this response includes the last traffic policy version that is associated with the specified traffic policy.

    • TrafficPolicyVersionMarker
    • The ID of the next traffic policy version that is associated with the current AWS account. If you want to list more traffic policies, make another call to ListTrafficPolicyVersions, and specify the value of the TrafficPolicyVersionMarker element in the TrafficPolicyVersionMarker request parameter.

      If IsTruncated is false, Amazon Route 53 omits the TrafficPolicyVersionMarker element from the response.

    • MaxItems
    • The value that you specified for the MaxItems parameter in the request that produced the current response.

    ", + "UpdateHealthCheck": "

    This action updates an existing health check.

    To update a health check, send a POST request to the /Route 53 API version/healthcheck/health check ID resource. The request body must include a document with an UpdateHealthCheckRequest element. The response returns an UpdateHealthCheckResponse element, which contains metadata about the health check.

    ", + "UpdateHostedZoneComment": "

    To update the hosted zone comment, send a POST request to the /Route 53 API version/hostedzone/hosted zone ID resource. The request body must include a document with a UpdateHostedZoneCommentRequest element. The response to this request includes the modified HostedZone element.

    The comment can have a maximum length of 256 characters.", + "UpdateTrafficPolicyComment": "

    Updates the comment for a specified traffic policy version.

    To update the comment, send a POST request to the /Route 53 API version/trafficpolicy/ resource.

    The request body must include a document with an UpdateTrafficPolicyCommentRequest element.

    ", + "UpdateTrafficPolicyInstance": "

    Updates the resource record sets in a specified hosted zone that were created based on the settings in a specified traffic policy version.

    The DNS type of the resource record sets that you're updating must match the DNS type in the JSON document that is associated with the traffic policy version that you're using to update the traffic policy instance.

    When you update a traffic policy instance, Amazon Route 53 continues to respond to DNS queries for the root resource record set name (such as example.com) while it replaces one group of resource record sets with another. Amazon Route 53 performs the following operations:

    1. Amazon Route 53 creates a new group of resource record sets based on the specified traffic policy. This is true regardless of how substantial the differences are between the existing resource record sets and the new resource record sets.
    2. When all of the new resource record sets have been created, Amazon Route 53 starts to respond to DNS queries for the root resource record set name (such as example.com) by using the new resource record sets.
    3. Amazon Route 53 deletes the old group of resource record sets that are associated with the root resource record set name.

    To update a traffic policy instance, send a POST request to the /Route 53 API version/trafficpolicyinstance/traffic policy ID resource. The request body must include a document with an UpdateTrafficPolicyInstanceRequest element.

    " + }, + "shapes": { + "AWSAccountID": { + "base": null, + "refs": { + "ChangeBatchRecord$Submitter": "

    The AWS account ID attached to the changes.

    " + } + }, + "AlarmIdentifier": { + "base": "

    A complex type that contains information to uniquely identify the CloudWatch alarm that you're associating with a Route 53 health check.

    ", + "refs": { + "HealthCheckConfig$AlarmIdentifier": "

    A complex type that contains information to uniquely identify the CloudWatch alarm that you're associating with a Route 53 health check.

    ", + "UpdateHealthCheckRequest$AlarmIdentifier": null + } + }, + "AlarmName": { + "base": null, + "refs": { + "AlarmIdentifier$Name": "

    The name of the CloudWatch alarm.

    " + } + }, + "AliasHealthEnabled": { + "base": null, + "refs": { + "AliasTarget$EvaluateTargetHealth": "

    Alias resource record sets only: If you set the value of EvaluateTargetHealth to true for the resource record set or sets in an alias, weighted alias, latency alias, or failover alias resource record set, and if you specify a value for HealthCheckId for every resource record set that is referenced by these alias resource record sets, the alias resource record sets inherit the health of the referenced resource record sets.

    In this configuration, when Amazon Route 53 receives a DNS query for an alias resource record set:

    1. Amazon Route 53 looks at the resource record sets that are referenced by the alias resource record sets to determine which health checks they're using.
    2. Amazon Route 53 checks the current status of each health check. (Amazon Route 53 periodically checks the health of the endpoint that is specified in a health check; it doesn't perform the health check when the DNS query arrives.)
    3. Based on the status of the health checks, Amazon Route 53 determines which resource record sets are healthy. Unhealthy resource record sets are immediately removed from consideration. In addition, if all of the resource record sets that are referenced by an alias resource record set are unhealthy, that alias resource record set also is immediately removed from consideration.
    4. Based on the configuration of the alias resource record sets (weighted alias or latency alias, for example) and the configuration of the resource record sets that they reference, Amazon Route 53 chooses a resource record set from the healthy resource record sets, and responds to the query.

    Note the following:

    • You cannot set EvaluateTargetHealth to true when the alias target is a CloudFront distribution.
    • If the AWS resource that you specify in AliasTarget is a resource record set or a group of resource record sets (for example, a group of weighted resource record sets), but it is not another alias resource record set, we recommend that you associate a health check with all of the resource record sets in the alias target.
    • If you specify an ELB load balancer in AliasTarget, Elastic Load Balancing routes queries only to the healthy Amazon EC2 instances that are registered with the load balancer. If no Amazon EC2 instances are healthy or if the load balancer itself is unhealthy, and if EvaluateTargetHealth is true for the corresponding alias resource record set, Amazon Route 53 routes queries to other resources.
    • When you create a load balancer, you configure settings for Elastic Load Balancing health checks; they're not Amazon Route 53 health checks, but they perform a similar function. Do not create Amazon Route 53 health checks for the Amazon EC2 instances that you register with an ELB load balancer. For more information, see How Health Checks Work in More Complex Amazon Route 53 Configurations in the Amazon Route 53 Developer Guide.

    We recommend that you set EvaluateTargetHealth to true only when you have enough idle capacity to handle the failure of one or more endpoints.

    For more information and examples, see Amazon Route 53 Health Checks and DNS Failover in the Amazon Route 53 Developer Guide.

    " + } + }, + "AliasTarget": { + "base": "

    Alias resource record sets only: Information about the CloudFront distribution, ELB load balancer, Amazon S3 bucket, or Amazon Route 53 resource record set to which you are routing traffic.

    If you're creating resource record sets for a private hosted zone, note the following:

    • You can create alias resource record sets only for Amazon Route 53 resource record sets in the same private hosted zone. Creating alias resource record sets for CloudFront distributions, ELB load balancers, and Amazon S3 buckets is not supported.
    • You can't create alias resource record sets for failover, geolocation, or latency resource record sets in a private hosted zone.
    ", + "refs": { + "ResourceRecordSet$AliasTarget": "

    Alias resource record sets only: Information about the AWS resource to which you are redirecting traffic.

    " + } + }, + "AssociateVPCComment": { + "base": null, + "refs": { + "AssociateVPCWithHostedZoneRequest$Comment": "

    Optional: Any comments you want to include about a AssociateVPCWithHostedZoneRequest.

    " + } + }, + "AssociateVPCWithHostedZoneRequest": { + "base": "

    A complex type that contains information about the request to associate a VPC with an hosted zone.

    ", + "refs": { + } + }, + "AssociateVPCWithHostedZoneResponse": { + "base": "

    A complex type containing the response information for the request.

    ", + "refs": { + } + }, + "Change": { + "base": "

    A complex type that contains the information for each change in a change batch request.

    ", + "refs": { + "Changes$member": null + } + }, + "ChangeAction": { + "base": null, + "refs": { + "Change$Action": "

    The action to perform:

    • CREATE: Creates a resource record set that has the specified values.
    • DELETE: Deletes a existing resource record set that has the specified values for Name, Type, SetIdentifier (for latency, weighted, geolocation, and failover resource record sets), and TTL (except alias resource record sets, for which the TTL is determined by the AWS resource that you're routing DNS queries to).
    • UPSERT: If a resource record set does not already exist, Amazon Route 53 creates it. If a resource record set does exist, Amazon Route 53 updates it with the values in the request. Amazon Route 53 can update an existing resource record set only when all of the following values match: Name, Type, and SetIdentifier (for weighted, latency, geolocation, and failover resource record sets).
    " + } + }, + "ChangeBatch": { + "base": "

    A complex type that contains an optional comment and the changes that you want to make with a change batch request.

    ", + "refs": { + "ChangeResourceRecordSetsRequest$ChangeBatch": "

    A complex type that contains an optional comment and the Changes element.

    " + } + }, + "ChangeBatchRecord": { + "base": "

    A complex type that lists the changes and information for a ChangeBatch.

    ", + "refs": { + "ChangeBatchRecords$member": null, + "GetChangeDetailsResponse$ChangeBatchRecord": "

    A complex type that contains information about the specified change batch, including the change batch ID, the status of the change, and the contained changes.

    " + } + }, + "ChangeBatchRecords": { + "base": null, + "refs": { + "ListChangeBatchesByHostedZoneResponse$ChangeBatchRecords": "

    The change batches within the given hosted zone and time period.

    ", + "ListChangeBatchesByRRSetResponse$ChangeBatchRecords": "

    The change batches within the given hosted zone and time period.

    " + } + }, + "ChangeInfo": { + "base": "

    A complex type that describes change information about changes made to your hosted zone.

    This element contains an ID that you use when performing a GetChange action to get detailed information about the change.

    ", + "refs": { + "AssociateVPCWithHostedZoneResponse$ChangeInfo": "

    A complex type that contains the ID, the status, and the date and time of your AssociateVPCWithHostedZoneRequest.

    ", + "ChangeResourceRecordSetsResponse$ChangeInfo": "

    A complex type that contains information about changes made to your hosted zone.

    This element contains an ID that you use when performing a GetChange action to get detailed information about the change.

    ", + "CreateHostedZoneResponse$ChangeInfo": "

    A complex type that contains information about the request to create a hosted zone. This includes an ID that you use when you call the GetChange action to get the current status of the change request.

    ", + "DeleteHostedZoneResponse$ChangeInfo": "

    A complex type that contains the ID, the status, and the date and time of your delete request.

    ", + "DisassociateVPCFromHostedZoneResponse$ChangeInfo": "

    A complex type that contains the ID, the status, and the date and time of your DisassociateVPCFromHostedZoneRequest.

    ", + "GetChangeResponse$ChangeInfo": "

    A complex type that contains information about the specified change batch, including the change batch ID, the status of the change, and the date and time of the request.

    " + } + }, + "ChangeResourceRecordSetsRequest": { + "base": "

    A complex type that contains a change batch.

    ", + "refs": { + } + }, + "ChangeResourceRecordSetsResponse": { + "base": "

    A complex type containing the response for the request.

    ", + "refs": { + } + }, + "ChangeStatus": { + "base": null, + "refs": { + "ChangeBatchRecord$Status": "

    The current state of the request. PENDING indicates that this request has not yet been applied to all Amazon Route 53 DNS servers.

    Valid Values: PENDING | INSYNC

    ", + "ChangeInfo$Status": "

    The current state of the request. PENDING indicates that this request has not yet been applied to all Amazon Route 53 DNS servers.

    Valid Values: PENDING | INSYNC

    " + } + }, + "ChangeTagsForResourceRequest": { + "base": "

    A complex type containing information about a request to add, change, or delete the tags that are associated with a resource.

    ", + "refs": { + } + }, + "ChangeTagsForResourceResponse": { + "base": "

    Empty response for the request.

    ", + "refs": { + } + }, + "Changes": { + "base": null, + "refs": { + "ChangeBatch$Changes": "

    A complex type that contains one Change element for each resource record set that you want to create or delete.

    ", + "ChangeBatchRecord$Changes": "

    A list of changes made in the ChangeBatch.

    " + } + }, + "CheckerIpRanges": { + "base": null, + "refs": { + "GetCheckerIpRangesResponse$CheckerIpRanges": "

    A complex type that contains sorted list of IP ranges in CIDR format for Amazon Route 53 health checkers.

    " + } + }, + "ChildHealthCheckList": { + "base": null, + "refs": { + "HealthCheckConfig$ChildHealthChecks": "

    For a specified parent health check, a list of HealthCheckId values for the associated child health checks.

    ", + "UpdateHealthCheckRequest$ChildHealthChecks": "

    For a specified parent health check, a list of HealthCheckId values for the associated child health checks.

    Specify this value only if you want to change it.

    " + } + }, + "CloudWatchAlarmConfiguration": { + "base": "

    For CLOUDWATCH_METRIC health checks, a complex type that contains information about the CloudWatch alarm that you're associating with the health check.

    ", + "refs": { + "HealthCheck$CloudWatchAlarmConfiguration": "

    For CLOUDWATCH_METRIC health checks, a complex type that contains information about the CloudWatch alarm that you're associating with the health check.

    " + } + }, + "CloudWatchRegion": { + "base": null, + "refs": { + "AlarmIdentifier$Region": "

    The CloudWatchRegion that the CloudWatch alarm was created in.

    " + } + }, + "ComparisonOperator": { + "base": null, + "refs": { + "CloudWatchAlarmConfiguration$ComparisonOperator": "

    The arithmetic operation to use when comparing the specified Statistic and Threshold.

    Valid Values are GreaterThanOrEqualToThreshold, GreaterThanThreshold, LessThanThreshold and LessThanOrEqualToThreshold

    " + } + }, + "ConcurrentModification": { + "base": "

    Another user submitted a request to update the object at the same time that you did. Retry the request.

    ", + "refs": { + } + }, + "ConflictingDomainExists": { + "base": null, + "refs": { + } + }, + "ConflictingTypes": { + "base": "

    You tried to update a traffic policy instance by using a traffic policy version that has a different DNS type than the current type for the instance. You specified the type in the JSON document in the CreateTrafficPolicy or CreateTrafficPolicyVersionrequest.

    ", + "refs": { + } + }, + "CreateHealthCheckRequest": { + "base": "

    >A complex type that contains information about the request to create a health check.

    ", + "refs": { + } + }, + "CreateHealthCheckResponse": { + "base": "

    A complex type containing the response information for the new health check.

    ", + "refs": { + } + }, + "CreateHostedZoneRequest": { + "base": "

    A complex type that contains information about the request to create a hosted zone.

    ", + "refs": { + } + }, + "CreateHostedZoneResponse": { + "base": "

    A complex type containing the response information for the new hosted zone.

    ", + "refs": { + } + }, + "CreateReusableDelegationSetRequest": { + "base": null, + "refs": { + } + }, + "CreateReusableDelegationSetResponse": { + "base": null, + "refs": { + } + }, + "CreateTrafficPolicyInstanceRequest": { + "base": "

    A complex type that contains information about the resource record sets that you want to create based on a specified traffic policy.

    ", + "refs": { + } + }, + "CreateTrafficPolicyInstanceResponse": { + "base": "

    A complex type that contains the response information for the CreateTrafficPolicyInstance request.

    ", + "refs": { + } + }, + "CreateTrafficPolicyRequest": { + "base": "

    A complex type that contains information about the traffic policy that you want to create.

    ", + "refs": { + } + }, + "CreateTrafficPolicyResponse": { + "base": "

    A complex type that contains the response information for the CreateTrafficPolicy request.

    ", + "refs": { + } + }, + "CreateTrafficPolicyVersionRequest": { + "base": "

    A complex type that contains information about the traffic policy for which you want to create a new version.

    ", + "refs": { + } + }, + "CreateTrafficPolicyVersionResponse": { + "base": "

    A complex type that contains the response information for the CreateTrafficPolicyVersion request.

    ", + "refs": { + } + }, + "DNSName": { + "base": null, + "refs": { + "AliasTarget$DNSName": "

    Alias resource record sets only: The external DNS name associated with the AWS Resource. The value that you specify depends on where you want to route queries:

    • A CloudFront distribution: Specify the domain name that CloudFront assigned when you created your distribution. Your CloudFront distribution must include an alternate domain name that matches the name of the resource record set. For example, if the name of the resource record set is acme.example.com, your CloudFront distribution must include acme.example.com as one of the alternate domain names. For more information, see Using Alternate Domain Names (CNAMEs) in the Amazon CloudFront Developer Guide.
    • An ELB load balancer: Specify the DNS name associated with the load balancer. You can get the DNS name by using the AWS Management Console, the ELB API, or the AWS CLI. Use the same method to get values for HostedZoneId and DNSName. If you get one value from the console and the other value from the API or the CLI, creating the resource record set will fail.
    • An Elastic Beanstalk environment: Specify the CNAME attribute for the environment. (The environment must have a regionalized domain name.)
    • An Amazon S3 bucket that is configured as a static website: Specify the domain name of the Amazon S3 website endpoint in which you created the bucket; for example, s3-website-us-east-1.amazonaws.com. For more information about valid values, see the table Amazon Simple Storage Service (S3) Website Endpoints in the Amazon Web Services General Reference. For more information about using Amazon S3 buckets for websites, see Hosting a Static Website on Amazon S3 in the Amazon Simple Storage Service Developer Guide.
    • Another Amazon Route 53 resource record set: Specify the value of the Name element for a resource record set in the current hosted zone.
    ", + "CreateHostedZoneRequest$Name": "

    The name of the domain. This must be a fully-specified domain, for example, www.example.com. The trailing dot is optional; Amazon Route 53 assumes that the domain name is fully qualified. This means that Amazon Route 53 treats www.example.com (without a trailing dot) and www.example.com. (with a trailing dot) as identical.

    This is the name you have registered with your DNS registrar. You should ask your registrar to change the authoritative name servers for your domain to the set of NameServers elements returned in DelegationSet.

    ", + "CreateTrafficPolicyInstanceRequest$Name": "

    The domain name (such as example.com) or subdomain name (such as www.example.com) for which Amazon Route 53 responds to DNS queries by using the resource record sets that Amazon Route 53 creates for this traffic policy instance.

    ", + "DelegationSetNameServers$member": null, + "HostedZone$Name": "

    The name of the domain. This must be a fully-specified domain, for example, www.example.com. The trailing dot is optional; Amazon Route 53 assumes that the domain name is fully qualified. This means that Amazon Route 53 treats www.example.com (without a trailing dot) and www.example.com. (with a trailing dot) as identical.

    This is the name you have registered with your DNS registrar. You should ask your registrar to change the authoritative name servers for your domain to the set of NameServers elements returned in DelegationSet.

    ", + "ListChangeBatchesByRRSetRequest$Name": "

    The name of the RRSet that you want to see changes for.

    ", + "ListHostedZonesByNameRequest$DNSName": "

    The first name in the lexicographic ordering of domain names that you want the ListHostedZonesByNameRequest request to list.

    If the request returned more than one page of results, submit another request and specify the value of NextDNSName and NextHostedZoneId from the last response in the DNSName and HostedZoneId parameters to get the next page of results.

    ", + "ListHostedZonesByNameResponse$DNSName": "

    The DNSName value sent in the request.

    ", + "ListHostedZonesByNameResponse$NextDNSName": "

    If the value of IsTruncated in the ListHostedZonesByNameResponse is true, there are more hosted zones associated with the current AWS account. To get the next page of results, make another request to ListHostedZonesByName. Specify the value of NextDNSName in the DNSName parameter. Specify NextHostedZoneId in the HostedZoneId parameter.

    ", + "ListResourceRecordSetsRequest$StartRecordName": "

    The first name in the lexicographic ordering of domain names that you want the ListResourceRecordSets request to list.

    ", + "ListResourceRecordSetsResponse$NextRecordName": "

    If the results were truncated, the name of the next record in the list. This element is present only if IsTruncated is true.

    ", + "ListTrafficPolicyInstancesByHostedZoneRequest$TrafficPolicyInstanceNameMarker": "

    For the first request to ListTrafficPolicyInstancesByHostedZone, omit this value.

    If the value of IsTruncated in the previous response was true, TrafficPolicyInstanceNameMarker is the name of the first traffic policy instance in the next group of MaxItems traffic policy instances.

    If the value of IsTruncated in the previous response was false, there are no more traffic policy instances to get for this hosted zone.

    If the value of IsTruncated in the previous response was false, omit this value.

    ", + "ListTrafficPolicyInstancesByHostedZoneResponse$TrafficPolicyInstanceNameMarker": "

    If IsTruncated is true, TrafficPolicyInstanceNameMarker is the name of the first traffic policy instance in the next group of MaxItems traffic policy instances.

    ", + "ListTrafficPolicyInstancesByPolicyRequest$TrafficPolicyInstanceNameMarker": "

    For the first request to ListTrafficPolicyInstancesByPolicy, omit this value.

    If the value of IsTruncated in the previous response was true, TrafficPolicyInstanceNameMarker is the name of the first traffic policy instance in the next group of MaxItems traffic policy instances.

    If the value of IsTruncated in the previous response was false, there are no more traffic policy instances to get for this hosted zone.

    If the value of IsTruncated in the previous response was false, omit this value.

    ", + "ListTrafficPolicyInstancesByPolicyResponse$TrafficPolicyInstanceNameMarker": "

    If IsTruncated is true, TrafficPolicyInstanceNameMarker is the name of the first traffic policy instance in the next group of MaxItems traffic policy instances.

    ", + "ListTrafficPolicyInstancesRequest$TrafficPolicyInstanceNameMarker": "

    For the first request to ListTrafficPolicyInstances, omit this value.

    If the value of IsTruncated in the previous response was true, TrafficPolicyInstanceNameMarker is the name of the first traffic policy instance in the next group of MaxItems traffic policy instances.

    If the value of IsTruncated in the previous response was false, there are no more traffic policy instances to get.

    ", + "ListTrafficPolicyInstancesResponse$TrafficPolicyInstanceNameMarker": "

    If IsTruncated is true, TrafficPolicyInstanceNameMarker is the name of the first traffic policy instance in the next group of MaxItems traffic policy instances.

    ", + "ResourceRecordSet$Name": "

    The name of the domain you want to perform the action on.

    Enter a fully qualified domain name, for example, www.example.com. You can optionally include a trailing dot. If you omit the trailing dot, Amazon Route 53 still assumes that the domain name that you specify is fully qualified. This means that Amazon Route 53 treats www.example.com (without a trailing dot) and www.example.com. (with a trailing dot) as identical.

    For information about how to specify characters other than a-z, 0-9, and - (hyphen) and how to specify internationalized domain names, see DNS Domain Name Format in the Amazon Route 53 Developer Guide.

    You can use an asterisk (*) character in the name. DNS treats the * character either as a wildcard or as the * character (ASCII 42), depending on where it appears in the name. For more information, see Using an Asterisk (*) in the Names of Hosted Zones and Resource Record Sets in the Amazon Route 53 Developer Guide

    You can't use the * wildcard for resource records sets that have a type of NS.", + "TrafficPolicyInstance$Name": null + } + }, + "Date": { + "base": null, + "refs": { + "ListChangeBatchesByHostedZoneRequest$StartDate": "

    The start of the time period you want to see changes for.

    ", + "ListChangeBatchesByHostedZoneRequest$EndDate": "

    The end of the time period you want to see changes for.

    ", + "ListChangeBatchesByRRSetRequest$StartDate": "

    The start of the time period you want to see changes for.

    ", + "ListChangeBatchesByRRSetRequest$EndDate": "

    The end of the time period you want to see changes for.

    " + } + }, + "DelegationSet": { + "base": "

    A complex type that contains name server information.

    ", + "refs": { + "CreateHostedZoneResponse$DelegationSet": "

    A complex type that contains name server information.

    ", + "CreateReusableDelegationSetResponse$DelegationSet": "

    A complex type that contains name server information.

    ", + "DelegationSets$member": null, + "GetHostedZoneResponse$DelegationSet": "

    A complex type that contains information about the name servers for the specified hosted zone.

    ", + "GetReusableDelegationSetResponse$DelegationSet": "

    A complex type that contains the information about the nameservers for the specified delegation set ID.

    " + } + }, + "DelegationSetAlreadyCreated": { + "base": "

    A delegation set with the same owner and caller reference combination has already been created.

    ", + "refs": { + } + }, + "DelegationSetAlreadyReusable": { + "base": "

    The specified delegation set has already been marked as reusable.

    ", + "refs": { + } + }, + "DelegationSetInUse": { + "base": "

    The specified delegation contains associated hosted zones which must be deleted before the reusable delegation set can be deleted.

    ", + "refs": { + } + }, + "DelegationSetNameServers": { + "base": null, + "refs": { + "DelegationSet$NameServers": "

    A complex type that contains the authoritative name servers for the hosted zone. Use the method provided by your domain registrar to add an NS record to your domain for each NameServer that is assigned to your hosted zone.

    " + } + }, + "DelegationSetNotAvailable": { + "base": "

    Amazon Route 53 allows some duplicate domain names, but there is a maximum number of duplicate names. This error indicates that you have reached that maximum. If you want to create another hosted zone with the same name and Amazon Route 53 generates this error, you can request an increase to the limit on the Contact Us page.

    ", + "refs": { + } + }, + "DelegationSetNotReusable": { + "base": "

    The specified delegation set has not been marked as reusable.

    ", + "refs": { + } + }, + "DelegationSets": { + "base": null, + "refs": { + "ListReusableDelegationSetsResponse$DelegationSets": "

    A complex type that contains information about the reusable delegation sets associated with the current AWS account.

    " + } + }, + "DeleteHealthCheckRequest": { + "base": "

    A complex type containing the request information for delete health check.

    ", + "refs": { + } + }, + "DeleteHealthCheckResponse": { + "base": "

    Empty response for the request.

    ", + "refs": { + } + }, + "DeleteHostedZoneRequest": { + "base": "

    A complex type that contains information about the hosted zone that you want to delete.

    ", + "refs": { + } + }, + "DeleteHostedZoneResponse": { + "base": "

    A complex type containing the response information for the request.

    ", + "refs": { + } + }, + "DeleteReusableDelegationSetRequest": { + "base": "

    A complex type containing the information for the delete request.

    ", + "refs": { + } + }, + "DeleteReusableDelegationSetResponse": { + "base": "

    Empty response for the request.

    ", + "refs": { + } + }, + "DeleteTrafficPolicyInstanceRequest": { + "base": "

    A complex type that contains information about the traffic policy instance that you want to delete.

    ", + "refs": { + } + }, + "DeleteTrafficPolicyInstanceResponse": { + "base": "

    An empty element.

    ", + "refs": { + } + }, + "DeleteTrafficPolicyRequest": { + "base": "

    A request to delete a specified traffic policy version.

    ", + "refs": { + } + }, + "DeleteTrafficPolicyResponse": { + "base": "

    An empty element.

    ", + "refs": { + } + }, + "Dimension": { + "base": "

    The name and value of a dimension for a CloudWatch metric.

    ", + "refs": { + "DimensionList$member": null + } + }, + "DimensionField": { + "base": null, + "refs": { + "Dimension$Name": "

    The name of the dimension.

    ", + "Dimension$Value": "

    The value of the dimension.

    " + } + }, + "DimensionList": { + "base": null, + "refs": { + "CloudWatchAlarmConfiguration$Dimensions": "

    A list of Dimension elements for the CloudWatch metric that is associated with the CloudWatch alarm. For information about the metrics and dimensions that CloudWatch supports, see Amazon CloudWatch Namespaces, Dimensions, and Metrics Reference.

    " + } + }, + "DisassociateVPCComment": { + "base": null, + "refs": { + "DisassociateVPCFromHostedZoneRequest$Comment": "

    Optional: Any comments you want to include about a DisassociateVPCFromHostedZoneRequest.

    " + } + }, + "DisassociateVPCFromHostedZoneRequest": { + "base": "

    A complex type that contains information about the request to disassociate a VPC from an hosted zone.

    ", + "refs": { + } + }, + "DisassociateVPCFromHostedZoneResponse": { + "base": "

    A complex type containing the response information for the request.

    ", + "refs": { + } + }, + "EnableSNI": { + "base": null, + "refs": { + "HealthCheckConfig$EnableSNI": "

    Specify whether you want Amazon Route 53 to send the value of FullyQualifiedDomainName to the endpoint in the client_hello message during TLS negotiation. If you don't specify a value for EnableSNI, Amazon Route 53 defaults to true when Type is HTTPS or HTTPS_STR_MATCH and defaults to false when Type is any other value.

    ", + "UpdateHealthCheckRequest$EnableSNI": "

    Specify whether you want Amazon Route 53 to send the value of FullyQualifiedDomainName to the endpoint in the client_hello message during TLS negotiation. If you don't specify a value for EnableSNI, Amazon Route 53 defaults to true when Type is HTTPS or HTTPS_STR_MATCH and defaults to false when Type is any other value.

    Specify this value only if you want to change it.

    " + } + }, + "ErrorMessage": { + "base": null, + "refs": { + "ConcurrentModification$message": "

    Descriptive message for the error response.

    ", + "ConflictingDomainExists$message": null, + "ConflictingTypes$message": "

    Descriptive message for the error response.

    ", + "DelegationSetAlreadyCreated$message": "

    Descriptive message for the error response.

    ", + "DelegationSetAlreadyReusable$message": "

    Descriptive message for the error response.

    ", + "DelegationSetInUse$message": "

    Descriptive message for the error response.

    ", + "DelegationSetNotAvailable$message": "

    Descriptive message for the error response.

    ", + "DelegationSetNotReusable$message": "

    Descriptive message for the error response.

    ", + "ErrorMessages$member": null, + "HealthCheckAlreadyExists$message": "

    Descriptive message for the error response.

    ", + "HealthCheckInUse$message": "

    Descriptive message for the error response.

    ", + "HealthCheckVersionMismatch$message": null, + "HostedZoneAlreadyExists$message": "

    Descriptive message for the error response.

    ", + "HostedZoneNotEmpty$message": "

    Descriptive message for the error response.

    ", + "HostedZoneNotFound$message": "

    Descriptive message for the error response.

    ", + "IncompatibleVersion$message": null, + "InvalidArgument$message": "

    Descriptive message for the error response.

    ", + "InvalidDomainName$message": "

    Descriptive message for the error response.

    ", + "InvalidInput$message": "

    Descriptive message for the error response.

    ", + "InvalidTrafficPolicyDocument$message": "

    Descriptive message for the error response.

    ", + "InvalidVPCId$message": "

    Descriptive message for the error response.

    ", + "LastVPCAssociation$message": "

    Descriptive message for the error response.

    ", + "LimitsExceeded$message": "

    Descriptive message for the error response.

    ", + "NoSuchChange$message": null, + "NoSuchDelegationSet$message": "

    Descriptive message for the error response.

    ", + "NoSuchGeoLocation$message": "

    Descriptive message for the error response.

    ", + "NoSuchHealthCheck$message": "

    Descriptive message for the error response.

    ", + "NoSuchHostedZone$message": null, + "NoSuchTrafficPolicy$message": "

    Descriptive message for the error response.

    ", + "NoSuchTrafficPolicyInstance$message": "

    Descriptive message for the error response.

    ", + "PriorRequestNotComplete$message": null, + "PublicZoneVPCAssociation$message": "

    Descriptive message for the error response.

    ", + "ThrottlingException$message": null, + "TooManyHealthChecks$message": null, + "TooManyHostedZones$message": "

    Descriptive message for the error response.

    ", + "TooManyTrafficPolicies$message": "

    Descriptive message for the error response.

    ", + "TooManyTrafficPolicyInstances$message": "

    Descriptive message for the error response.

    ", + "TrafficPolicyAlreadyExists$message": "

    Descriptive message for the error response.

    ", + "TrafficPolicyInUse$message": "

    Descriptive message for the error response.

    ", + "TrafficPolicyInstanceAlreadyExists$message": "

    Descriptive message for the error response.

    ", + "VPCAssociationNotFound$message": "

    Descriptive message for the error response.

    " + } + }, + "ErrorMessages": { + "base": null, + "refs": { + "InvalidChangeBatch$messages": "

    Descriptive message for the error response.

    " + } + }, + "EvaluationPeriods": { + "base": null, + "refs": { + "CloudWatchAlarmConfiguration$EvaluationPeriods": "

    The number of periods over which data is compared to the specified threshold.

    " + } + }, + "FailureThreshold": { + "base": null, + "refs": { + "HealthCheckConfig$FailureThreshold": "

    The number of consecutive health checks that an endpoint must pass or fail for Amazon Route 53 to change the current status of the endpoint from unhealthy to healthy or vice versa.

    Valid values are integers between 1 and 10. For more information, see \"How Amazon Route 53 Determines Whether an Endpoint Is Healthy\" in the Amazon Route 53 Developer Guide.

    ", + "UpdateHealthCheckRequest$FailureThreshold": "

    The number of consecutive health checks that an endpoint must pass or fail for Amazon Route 53 to change the current status of the endpoint from unhealthy to healthy or vice versa.

    Valid values are integers between 1 and 10. For more information, see \"How Amazon Route 53 Determines Whether an Endpoint Is Healthy\" in the Amazon Route 53 Developer Guide.

    Specify this value only if you want to change it.

    " + } + }, + "FullyQualifiedDomainName": { + "base": null, + "refs": { + "HealthCheckConfig$FullyQualifiedDomainName": "

    Fully qualified domain name of the instance to be health checked.

    ", + "UpdateHealthCheckRequest$FullyQualifiedDomainName": "

    Fully qualified domain name of the instance to be health checked.

    Specify this value only if you want to change it.

    " + } + }, + "GeoLocation": { + "base": "

    A complex type that contains information about a geo location.

    ", + "refs": { + "ResourceRecordSet$GeoLocation": "

    Geo location resource record sets only: A complex type that lets you control how Amazon Route 53 responds to DNS queries based on the geographic origin of the query. For example, if you want all queries from Africa to be routed to a web server with an IP address of 192.0.2.111, create a resource record set with a Type of A and a ContinentCode of AF.

    You can create geolocation and geolocation alias resource record sets only in public hosted zones.

    If you create separate resource record sets for overlapping geographic regions (for example, one resource record set for a continent and one for a country on the same continent), priority goes to the smallest geographic region. This allows you to route most queries for a continent to one resource and to route queries for a country on that continent to a different resource.

    You cannot create two geolocation resource record sets that specify the same geographic location.

    The value * in the CountryCode element matches all geographic locations that aren't specified in other geolocation resource record sets that have the same values for the Name and Type elements.

    Geolocation works by mapping IP addresses to locations. However, some IP addresses aren't mapped to geographic locations, so even if you create geolocation resource record sets that cover all seven continents, Amazon Route 53 will receive some DNS queries from locations that it can't identify. We recommend that you create a resource record set for which the value of CountryCode is *, which handles both queries that come from locations for which you haven't created geolocation resource record sets and queries from IP addresses that aren't mapped to a location. If you don't create a * resource record set, Amazon Route 53 returns a \"no answer\" response for queries from those locations.

    You cannot create non-geolocation resource record sets that have the same values for the Name and Type elements as geolocation resource record sets.

    " + } + }, + "GeoLocationContinentCode": { + "base": null, + "refs": { + "GeoLocation$ContinentCode": "

    The code for a continent geo location. Note: only continent locations have a continent code.

    Valid values: AF | AN | AS | EU | OC | NA | SA

    Constraint: Specifying ContinentCode with either CountryCode or SubdivisionCode returns an InvalidInput error.

    ", + "GeoLocationDetails$ContinentCode": "

    The code for a continent geo location. Note: only continent locations have a continent code.

    ", + "GetGeoLocationRequest$ContinentCode": "

    The code for a continent geo location. Note: only continent locations have a continent code.

    Valid values: AF | AN | AS | EU | OC | NA | SA

    Constraint: Specifying ContinentCode with either CountryCode or SubdivisionCode returns an InvalidInput error.

    ", + "ListGeoLocationsRequest$StartContinentCode": "

    The first continent code in the lexicographic ordering of geo locations that you want the ListGeoLocations request to list. For non-continent geo locations, this should be null.

    Valid values: AF | AN | AS | EU | OC | NA | SA

    Constraint: Specifying ContinentCode with either CountryCode or SubdivisionCode returns an InvalidInput error.

    ", + "ListGeoLocationsResponse$NextContinentCode": "

    If the results were truncated, the continent code of the next geo location in the list. This element is present only if IsTruncated is true and the next geo location to list is a continent location.

    " + } + }, + "GeoLocationContinentName": { + "base": null, + "refs": { + "GeoLocationDetails$ContinentName": "

    The name of the continent. This element is only present if ContinentCode is also present.

    " + } + }, + "GeoLocationCountryCode": { + "base": null, + "refs": { + "GeoLocation$CountryCode": "

    The code for a country geo location. The default location uses '*' for the country code and will match all locations that are not matched by a geo location.

    The default geo location uses a * for the country code. All other country codes follow the ISO 3166 two-character code.

    ", + "GeoLocationDetails$CountryCode": "

    The code for a country geo location. The default location uses '*' for the country code and will match all locations that are not matched by a geo location.

    The default geo location uses a * for the country code. All other country codes follow the ISO 3166 two-character code.

    ", + "GetGeoLocationRequest$CountryCode": "

    The code for a country geo location. The default location uses '*' for the country code and will match all locations that are not matched by a geo location.

    The default geo location uses a * for the country code. All other country codes follow the ISO 3166 two-character code.

    ", + "ListGeoLocationsRequest$StartCountryCode": "

    The first country code in the lexicographic ordering of geo locations that you want the ListGeoLocations request to list.

    The default geo location uses a * for the country code. All other country codes follow the ISO 3166 two-character code.

    ", + "ListGeoLocationsResponse$NextCountryCode": "

    If the results were truncated, the country code of the next geo location in the list. This element is present only if IsTruncated is true and the next geo location to list is not a continent location.

    " + } + }, + "GeoLocationCountryName": { + "base": null, + "refs": { + "GeoLocationDetails$CountryName": "

    The name of the country. This element is only present if CountryCode is also present.

    " + } + }, + "GeoLocationDetails": { + "base": "

    A complex type that contains information about a GeoLocation.

    ", + "refs": { + "GeoLocationDetailsList$member": null, + "GetGeoLocationResponse$GeoLocationDetails": "

    A complex type that contains the information about the specified geo location.

    " + } + }, + "GeoLocationDetailsList": { + "base": null, + "refs": { + "ListGeoLocationsResponse$GeoLocationDetailsList": "

    A complex type that contains information about the geo locations that are returned by the request.

    " + } + }, + "GeoLocationSubdivisionCode": { + "base": null, + "refs": { + "GeoLocation$SubdivisionCode": "

    The code for a country's subdivision (e.g., a province of Canada). A subdivision code is only valid with the appropriate country code.

    Constraint: Specifying SubdivisionCode without CountryCode returns an InvalidInput error.

    ", + "GeoLocationDetails$SubdivisionCode": "

    The code for a country's subdivision (e.g., a province of Canada). A subdivision code is only valid with the appropriate country code.

    ", + "GetGeoLocationRequest$SubdivisionCode": "

    The code for a country's subdivision (e.g., a province of Canada). A subdivision code is only valid with the appropriate country code.

    Constraint: Specifying SubdivisionCode without CountryCode returns an InvalidInput error.

    ", + "ListGeoLocationsRequest$StartSubdivisionCode": "

    The first subdivision code in the lexicographic ordering of geo locations that you want the ListGeoLocations request to list.

    Constraint: Specifying SubdivisionCode without CountryCode returns an InvalidInput error.

    ", + "ListGeoLocationsResponse$NextSubdivisionCode": "

    If the results were truncated, the subdivision code of the next geo location in the list. This element is present only if IsTruncated is true and the next geo location has a subdivision.

    " + } + }, + "GeoLocationSubdivisionName": { + "base": null, + "refs": { + "GeoLocationDetails$SubdivisionName": "

    The name of the subdivision. This element is only present if SubdivisionCode is also present.

    " + } + }, + "GetChangeDetailsRequest": { + "base": "

    The input for a GetChangeDetails request.

    ", + "refs": { + } + }, + "GetChangeDetailsResponse": { + "base": "

    A complex type that contains the ChangeBatchRecord element.

    ", + "refs": { + } + }, + "GetChangeRequest": { + "base": "

    The input for a GetChange request.

    ", + "refs": { + } + }, + "GetChangeResponse": { + "base": "

    A complex type that contains the ChangeInfo element.

    ", + "refs": { + } + }, + "GetCheckerIpRangesRequest": { + "base": "

    Empty request.

    ", + "refs": { + } + }, + "GetCheckerIpRangesResponse": { + "base": "

    A complex type that contains the CheckerIpRanges element.

    ", + "refs": { + } + }, + "GetGeoLocationRequest": { + "base": "

    A complex type that contains information about the request to get a geo location.

    ", + "refs": { + } + }, + "GetGeoLocationResponse": { + "base": "

    A complex type containing information about the specified geo location.

    ", + "refs": { + } + }, + "GetHealthCheckCountRequest": { + "base": "

    To retrieve a count of all your health checks, send a GET request to the /Route 53 API version/healthcheckcount resource.

    ", + "refs": { + } + }, + "GetHealthCheckCountResponse": { + "base": "

    A complex type that contains the count of health checks associated with the current AWS account.

    ", + "refs": { + } + }, + "GetHealthCheckLastFailureReasonRequest": { + "base": "

    A complex type that contains information about the request to get the most recent failure reason for a health check.

    ", + "refs": { + } + }, + "GetHealthCheckLastFailureReasonResponse": { + "base": "

    A complex type that contains information about the most recent failure for the specified health check.

    ", + "refs": { + } + }, + "GetHealthCheckRequest": { + "base": "

    A complex type that contains information about the request to get a health check.

    ", + "refs": { + } + }, + "GetHealthCheckResponse": { + "base": "

    A complex type containing information about the specified health check.

    ", + "refs": { + } + }, + "GetHealthCheckStatusRequest": { + "base": "

    A complex type that contains information about the request to get health check status for a health check.

    ", + "refs": { + } + }, + "GetHealthCheckStatusResponse": { + "base": "

    A complex type that contains information about the status of the specified health check.

    ", + "refs": { + } + }, + "GetHostedZoneCountRequest": { + "base": "

    To retrieve a count of all your hosted zones, send a GET request to the /Route 53 API version/hostedzonecount resource.

    ", + "refs": { + } + }, + "GetHostedZoneCountResponse": { + "base": "

    A complex type that contains the count of hosted zones associated with the current AWS account.

    ", + "refs": { + } + }, + "GetHostedZoneRequest": { + "base": "

    The input for a GetHostedZone request.

    ", + "refs": { + } + }, + "GetHostedZoneResponse": { + "base": "

    A complex type containing information about the specified hosted zone.

    ", + "refs": { + } + }, + "GetReusableDelegationSetRequest": { + "base": "

    The input for a GetReusableDelegationSet request.

    ", + "refs": { + } + }, + "GetReusableDelegationSetResponse": { + "base": "

    A complex type containing information about the specified reusable delegation set.

    ", + "refs": { + } + }, + "GetTrafficPolicyInstanceCountRequest": { + "base": "

    To retrieve a count of all your traffic policy instances, send a GET request to the /Route 53 API version/trafficpolicyinstancecount resource.

    ", + "refs": { + } + }, + "GetTrafficPolicyInstanceCountResponse": { + "base": "

    A complex type that contains information about the number of traffic policy instances that are associated with the current AWS account.

    ", + "refs": { + } + }, + "GetTrafficPolicyInstanceRequest": { + "base": "

    Gets information about a specified traffic policy instance.

    To get information about a traffic policy instance, send a GET request to the /Route 53 API version/trafficpolicyinstance/Id resource.

    ", + "refs": { + } + }, + "GetTrafficPolicyInstanceResponse": { + "base": "

    A complex type that contains information about the resource record sets that Amazon Route 53 created based on a specified traffic policy.

    ", + "refs": { + } + }, + "GetTrafficPolicyRequest": { + "base": "

    Gets information about a specific traffic policy version. To get the information, send a GET request to the /Route 53 API version/trafficpolicy resource, and specify the ID and the version of the traffic policy.

    ", + "refs": { + } + }, + "GetTrafficPolicyResponse": { + "base": "

    A complex type that contains the response information for the request.

    ", + "refs": { + } + }, + "HealthCheck": { + "base": "

    A complex type that contains identifying information about the health check.

    ", + "refs": { + "CreateHealthCheckResponse$HealthCheck": "

    A complex type that contains identifying information about the health check.

    ", + "GetHealthCheckResponse$HealthCheck": "

    A complex type that contains the information about the specified health check.

    ", + "HealthChecks$member": null, + "UpdateHealthCheckResponse$HealthCheck": null + } + }, + "HealthCheckAlreadyExists": { + "base": "

    The health check you are trying to create already exists. Amazon Route 53 returns this error when a health check has already been created with the specified CallerReference.

    ", + "refs": { + } + }, + "HealthCheckConfig": { + "base": "

    A complex type that contains the health check configuration.

    ", + "refs": { + "CreateHealthCheckRequest$HealthCheckConfig": "

    A complex type that contains health check configuration.

    ", + "HealthCheck$HealthCheckConfig": "

    A complex type that contains the health check configuration.

    " + } + }, + "HealthCheckCount": { + "base": null, + "refs": { + "GetHealthCheckCountResponse$HealthCheckCount": "

    The number of health checks associated with the current AWS account.

    " + } + }, + "HealthCheckId": { + "base": null, + "refs": { + "ChildHealthCheckList$member": null, + "DeleteHealthCheckRequest$HealthCheckId": "

    The ID of the health check to delete.

    ", + "GetHealthCheckLastFailureReasonRequest$HealthCheckId": "

    The ID of the health check for which you want to retrieve the reason for the most recent failure.

    ", + "GetHealthCheckRequest$HealthCheckId": "

    The ID of the health check to retrieve.

    ", + "GetHealthCheckStatusRequest$HealthCheckId": "

    If you want Amazon Route 53 to return this resource record set in response to a DNS query only when a health check is passing, include the HealthCheckId element and specify the ID of the applicable health check.

    Amazon Route 53 determines whether a resource record set is healthy by periodically sending a request to the endpoint that is specified in the health check. If that endpoint returns an HTTP status code of 2xx or 3xx, the endpoint is healthy. If the endpoint returns an HTTP status code of 400 or greater, or if the endpoint doesn't respond for a certain amount of time, Amazon Route 53 considers the endpoint unhealthy and also considers the resource record set unhealthy.

    The HealthCheckId element is only useful when Amazon Route 53 is choosing between two or more resource record sets to respond to a DNS query, and you want Amazon Route 53 to base the choice in part on the status of a health check. Configuring health checks only makes sense in the following configurations:

    • You're checking the health of the resource record sets in a weighted, latency, geolocation, or failover resource record set, and you specify health check IDs for all of the resource record sets. If the health check for one resource record set specifies an endpoint that is not healthy, Amazon Route 53 stops responding to queries using the value for that resource record set.
    • You set EvaluateTargetHealth to true for the resource record sets in an alias, weighted alias, latency alias, geolocation alias, or failover alias resource record set, and you specify health check IDs for all of the resource record sets that are referenced by the alias resource record sets. For more information about this configuration, see EvaluateTargetHealth.

      Amazon Route 53 doesn't check the health of the endpoint specified in the resource record set, for example, the endpoint specified by the IP address in the Value element. When you add a HealthCheckId element to a resource record set, Amazon Route 53 checks the health of the endpoint that you specified in the health check.

    For geolocation resource record sets, if an endpoint is unhealthy, Amazon Route 53 looks for a resource record set for the larger, associated geographic region. For example, suppose you have resource record sets for a state in the United States, for the United States, for North America, and for all locations. If the endpoint for the state resource record set is unhealthy, Amazon Route 53 checks the resource record sets for the United States, for North America, and for all locations (a resource record set for which the value of CountryCode is *), in that order, until it finds a resource record set for which the endpoint is healthy.

    If your health checks specify the endpoint only by domain name, we recommend that you create a separate health check for each endpoint. For example, create a health check for each HTTP server that is serving content for www.example.com. For the value of FullyQualifiedDomainName, specify the domain name of the server (such as us-east-1-www.example.com), not the name of the resource record sets (example.com).

    In this configuration, if you create a health check for which the value of FullyQualifiedDomainName matches the name of the resource record sets and then associate the health check with those resource record sets, health check results will be unpredictable.", + "HealthCheck$Id": "

    The ID of the specified health check.

    ", + "ResourceRecordSet$HealthCheckId": "

    Health Check resource record sets only, not required for alias resource record sets: An identifier that is used to identify health check associated with the resource record set.

    ", + "UpdateHealthCheckRequest$HealthCheckId": "

    The ID of the health check to update.

    " + } + }, + "HealthCheckInUse": { + "base": "

    There are resource records associated with this health check. Before you can delete the health check, you must disassociate it from the resource record sets.

    ", + "refs": { + } + }, + "HealthCheckNonce": { + "base": null, + "refs": { + "CreateHealthCheckRequest$CallerReference": "

    A unique string that identifies the request and that allows failed CreateHealthCheck requests to be retried without the risk of executing the operation twice. You must use a unique CallerReference string every time you create a health check. CallerReference can be any unique string; you might choose to use a string that identifies your project.

    Valid characters are any Unicode code points that are legal in an XML 1.0 document. The UTF-8 encoding of the value must be less than 128 bytes.

    ", + "HealthCheck$CallerReference": "

    A unique string that identifies the request to create the health check.

    " + } + }, + "HealthCheckObservation": { + "base": "

    A complex type that contains the IP address of a Amazon Route 53 health checker and the reason for the health check status.

    ", + "refs": { + "HealthCheckObservations$member": null + } + }, + "HealthCheckObservations": { + "base": null, + "refs": { + "GetHealthCheckLastFailureReasonResponse$HealthCheckObservations": "

    A list that contains one HealthCheckObservation element for each Amazon Route 53 health checker.

    ", + "GetHealthCheckStatusResponse$HealthCheckObservations": "

    A list that contains one HealthCheckObservation element for each Amazon Route 53 health checker.

    " + } + }, + "HealthCheckRegion": { + "base": "

    An Amazon EC2 region that you want Amazon Route 53 to use to perform health checks.

    ", + "refs": { + "HealthCheckObservation$Region": "

    The HealthCheckRegion of the Amazon Route 53 health checker that performed this health check.

    ", + "HealthCheckRegionList$member": null + } + }, + "HealthCheckRegionList": { + "base": null, + "refs": { + "HealthCheckConfig$Regions": "

    A list of HealthCheckRegion values that you want Amazon Route 53 to use to perform health checks for the specified endpoint. You must specify at least three regions.

    ", + "UpdateHealthCheckRequest$Regions": "

    A list of HealthCheckRegion values that specify the Amazon EC2 regions that you want Amazon Route 53 to use to perform health checks. You must specify at least three regions.

    When you remove a region from the list, Amazon Route 53 will briefly continue to check your endpoint from that region.

    Specify this value only if you want to change it.

    " + } + }, + "HealthCheckType": { + "base": null, + "refs": { + "HealthCheckConfig$Type": "

    The type of health check to be performed. Currently supported types are TCP, HTTP, HTTPS, HTTP_STR_MATCH, HTTPS_STR_MATCH, CALCULATED and CLOUDWATCH_METRIC.

    " + } + }, + "HealthCheckVersion": { + "base": null, + "refs": { + "HealthCheck$HealthCheckVersion": "

    The version of the health check. You can optionally pass this value in a call to UpdateHealthCheck to prevent overwriting another change to the health check.

    ", + "UpdateHealthCheckRequest$HealthCheckVersion": "

    Optional. When you specify a health check version, Amazon Route 53 compares this value with the current value in the health check, which prevents you from updating the health check when the versions don't match. Using HealthCheckVersion lets you prevent overwriting another change to the health check.

    " + } + }, + "HealthCheckVersionMismatch": { + "base": null, + "refs": { + } + }, + "HealthChecks": { + "base": null, + "refs": { + "ListHealthChecksResponse$HealthChecks": "

    A complex type that contains information about the health checks associated with the current AWS account.

    " + } + }, + "HealthThreshold": { + "base": null, + "refs": { + "HealthCheckConfig$HealthThreshold": "

    The minimum number of child health checks that must be healthy for Amazon Route 53 to consider the parent health check to be healthy. Valid values are integers between 0 and 256, inclusive.

    ", + "UpdateHealthCheckRequest$HealthThreshold": "

    The minimum number of child health checks that must be healthy for Amazon Route 53 to consider the parent health check to be healthy. Valid values are integers between 0 and 256, inclusive.

    Specify this value only if you want to change it.

    " + } + }, + "HostedZone": { + "base": "

    A complex type that contain information about the specified hosted zone.

    ", + "refs": { + "CreateHostedZoneResponse$HostedZone": "

    A complex type that contains identifying information about the hosted zone.

    ", + "GetHostedZoneResponse$HostedZone": "

    A complex type that contains the information about the specified hosted zone.

    ", + "HostedZones$member": null, + "UpdateHostedZoneCommentResponse$HostedZone": null + } + }, + "HostedZoneAlreadyExists": { + "base": "

    The hosted zone you are trying to create already exists. Amazon Route 53 returns this error when a hosted zone has already been created with the specified CallerReference.

    ", + "refs": { + } + }, + "HostedZoneConfig": { + "base": "

    A complex type that contains an optional comment about your hosted zone. If you don't want to specify a comment, you can omit the HostedZoneConfig and Comment elements from the XML document.

    ", + "refs": { + "CreateHostedZoneRequest$HostedZoneConfig": "

    A complex type that contains an optional comment about your hosted zone.

    ", + "HostedZone$Config": "

    A complex type that contains the Comment element.

    " + } + }, + "HostedZoneCount": { + "base": null, + "refs": { + "GetHostedZoneCountResponse$HostedZoneCount": "

    The number of hosted zones associated with the current AWS account.

    " + } + }, + "HostedZoneNotEmpty": { + "base": "

    The hosted zone contains resource record sets in addition to the default NS and SOA resource record sets. Before you can delete the hosted zone, you must delete the additional resource record sets.

    ", + "refs": { + } + }, + "HostedZoneNotFound": { + "base": "

    The specified HostedZone cannot be found.

    ", + "refs": { + } + }, + "HostedZoneRRSetCount": { + "base": null, + "refs": { + "HostedZone$ResourceRecordSetCount": "

    Total number of resource record sets in the hosted zone.

    " + } + }, + "HostedZones": { + "base": null, + "refs": { + "ListHostedZonesByNameResponse$HostedZones": "

    A complex type that contains information about the hosted zones associated with the current AWS account.

    ", + "ListHostedZonesResponse$HostedZones": "

    A complex type that contains information about the hosted zones associated with the current AWS account.

    " + } + }, + "IPAddress": { + "base": null, + "refs": { + "HealthCheckConfig$IPAddress": "

    IP Address of the instance being checked.

    ", + "HealthCheckObservation$IPAddress": "

    The IP address of the Amazon Route 53 health checker that performed this health check.

    ", + "UpdateHealthCheckRequest$IPAddress": "

    The IP address of the resource that you want to check.

    Specify this value only if you want to change it.

    " + } + }, + "IPAddressCidr": { + "base": null, + "refs": { + "CheckerIpRanges$member": null + } + }, + "IncompatibleVersion": { + "base": "

    The resource you are trying to access is unsupported on this Amazon Route 53 endpoint. Please consider using a newer endpoint or a tool that does so.

    ", + "refs": { + } + }, + "InsufficientDataHealthStatus": { + "base": null, + "refs": { + "HealthCheckConfig$InsufficientDataHealthStatus": "

    The status of the health check when CloudWatch has insufficient data about the state of associated alarm. Valid values are Healthy, Unhealthy and LastKnownStatus.

    ", + "UpdateHealthCheckRequest$InsufficientDataHealthStatus": null + } + }, + "InvalidArgument": { + "base": "

    At least one of the specified arguments is invalid.

    ", + "refs": { + } + }, + "InvalidChangeBatch": { + "base": "

    This error contains a list of one or more error messages. Each error message indicates one error in the change batch.

    ", + "refs": { + } + }, + "InvalidDomainName": { + "base": "

    This error indicates that the specified domain name is not valid.

    ", + "refs": { + } + }, + "InvalidInput": { + "base": "

    Some value specified in the request is invalid or the XML document is malformed.

    ", + "refs": { + } + }, + "InvalidTrafficPolicyDocument": { + "base": "

    The format of the traffic policy document that you specified in the Document element is invalid.

    ", + "refs": { + } + }, + "InvalidVPCId": { + "base": "

    The hosted zone you are trying to create for your VPC_ID does not belong to you. Amazon Route 53 returns this error when the VPC specified by VPCId does not belong to you.

    ", + "refs": { + } + }, + "Inverted": { + "base": null, + "refs": { + "HealthCheckConfig$Inverted": "

    A boolean value that indicates whether the status of health check should be inverted. For example, if a health check is healthy but Inverted is True, then Amazon Route 53 considers the health check to be unhealthy.

    ", + "UpdateHealthCheckRequest$Inverted": "

    A boolean value that indicates whether the status of health check should be inverted. For example, if a health check is healthy but Inverted is True, then Amazon Route 53 considers the health check to be unhealthy.

    Specify this value only if you want to change it.

    " + } + }, + "IsPrivateZone": { + "base": null, + "refs": { + "HostedZoneConfig$PrivateZone": "

    GetHostedZone and ListHostedZone responses: A Boolean value that indicates whether a hosted zone is private.

    CreateHostedZone requests: When you're creating a private hosted zone (when you specify values for VPCId and VPCRegion), you can optionally specify true for PrivateZone.

    " + } + }, + "LastVPCAssociation": { + "base": "

    The VPC you are trying to disassociate from the hosted zone is the last the VPC that is associated with the hosted zone. Amazon Route 53 currently doesn't support disassociate the last VPC from the hosted zone.

    ", + "refs": { + } + }, + "LimitsExceeded": { + "base": "

    The limits specified for a resource have been exceeded.

    ", + "refs": { + } + }, + "ListChangeBatchesByHostedZoneRequest": { + "base": "

    The input for a ListChangeBatchesByHostedZone request.

    ", + "refs": { + } + }, + "ListChangeBatchesByHostedZoneResponse": { + "base": "

    The input for a ListChangeBatchesByHostedZone request.

    ", + "refs": { + } + }, + "ListChangeBatchesByRRSetRequest": { + "base": "

    The input for a ListChangeBatchesByRRSet request.

    ", + "refs": { + } + }, + "ListChangeBatchesByRRSetResponse": { + "base": "

    The input for a ListChangeBatchesByRRSet request.

    ", + "refs": { + } + }, + "ListGeoLocationsRequest": { + "base": "

    The input for a ListGeoLocations request.

    ", + "refs": { + } + }, + "ListGeoLocationsResponse": { + "base": "

    A complex type that contains information about the geo locations that are returned by the request and information about the response.

    ", + "refs": { + } + }, + "ListHealthChecksRequest": { + "base": "

    To retrieve a list of your health checks, send a GET request to the /Route 53 API version/healthcheck resource. The response to this request includes a HealthChecks element with zero or more HealthCheck child elements. By default, the list of health checks is displayed on a single page. You can control the length of the page that is displayed by using the MaxItems parameter. You can use the Marker parameter to control the health check that the list begins with.

    Amazon Route 53 returns a maximum of 100 items. If you set MaxItems to a value greater than 100, Amazon Route 53 returns only the first 100.", + "refs": { + } + }, + "ListHealthChecksResponse": { + "base": "

    A complex type that contains the response for the request.

    ", + "refs": { + } + }, + "ListHostedZonesByNameRequest": { + "base": "

    To retrieve a list of your hosted zones in lexicographic order, send a GET request to the /Route 53 API version/hostedzonesbyname resource. The response to this request includes a HostedZones element with zero or more HostedZone child elements lexicographically ordered by DNS name. By default, the list of hosted zones is displayed on a single page. You can control the length of the page that is displayed by using the MaxItems parameter. You can use the DNSName and HostedZoneId parameters to control the hosted zone that the list begins with.

    ", + "refs": { + } + }, + "ListHostedZonesByNameResponse": { + "base": "

    A complex type that contains the response for the request.

    ", + "refs": { + } + }, + "ListHostedZonesRequest": { + "base": "

    To retrieve a list of your hosted zones, send a GET request to the /Route 53 API version/hostedzone resource. The response to this request includes a HostedZones element with zero or more HostedZone child elements. By default, the list of hosted zones is displayed on a single page. You can control the length of the page that is displayed by using the MaxItems parameter. You can use the Marker parameter to control the hosted zone that the list begins with.

    Amazon Route 53 returns a maximum of 100 items. If you set MaxItems to a value greater than 100, Amazon Route 53 returns only the first 100.", + "refs": { + } + }, + "ListHostedZonesResponse": { + "base": "

    A complex type that contains the response for the request.

    ", + "refs": { + } + }, + "ListResourceRecordSetsRequest": { + "base": "

    The input for a ListResourceRecordSets request.

    ", + "refs": { + } + }, + "ListResourceRecordSetsResponse": { + "base": "

    A complex type that contains information about the resource record sets that are returned by the request and information about the response.

    ", + "refs": { + } + }, + "ListReusableDelegationSetsRequest": { + "base": "

    To retrieve a list of your reusable delegation sets, send a GET request to the /Route 53 API version/delegationset resource. The response to this request includes a DelegationSets element with zero or more DelegationSet child elements. By default, the list of reusable delegation sets is displayed on a single page. You can control the length of the page that is displayed by using the MaxItems parameter. You can use the Marker parameter to control the delegation set that the list begins with.

    Amazon Route 53 returns a maximum of 100 items. If you set MaxItems to a value greater than 100, Amazon Route 53 returns only the first 100.", + "refs": { + } + }, + "ListReusableDelegationSetsResponse": { + "base": "

    A complex type that contains the response for the request.

    ", + "refs": { + } + }, + "ListTagsForResourceRequest": { + "base": "

    A complex type containing information about a request for a list of the tags that are associated with an individual resource.

    ", + "refs": { + } + }, + "ListTagsForResourceResponse": { + "base": "

    A complex type containing tags for the specified resource.

    ", + "refs": { + } + }, + "ListTagsForResourcesRequest": { + "base": "

    A complex type containing information about a request for a list of the tags that are associated with up to 10 specified resources.

    ", + "refs": { + } + }, + "ListTagsForResourcesResponse": { + "base": "

    A complex type containing tags for the specified resources.

    ", + "refs": { + } + }, + "ListTrafficPoliciesRequest": { + "base": "

    A complex type that contains the information about the request to list the traffic policies that are associated with the current AWS account.

    ", + "refs": { + } + }, + "ListTrafficPoliciesResponse": { + "base": "

    A complex type that contains the response information for the request.

    ", + "refs": { + } + }, + "ListTrafficPolicyInstancesByHostedZoneRequest": { + "base": "

    A request for the traffic policy instances that you created in a specified hosted zone.

    ", + "refs": { + } + }, + "ListTrafficPolicyInstancesByHostedZoneResponse": { + "base": "

    A complex type that contains the response information for the request.

    ", + "refs": { + } + }, + "ListTrafficPolicyInstancesByPolicyRequest": { + "base": "

    A complex type that contains the information about the request to list your traffic policy instances.

    ", + "refs": { + } + }, + "ListTrafficPolicyInstancesByPolicyResponse": { + "base": "

    A complex type that contains the response information for the request.

    ", + "refs": { + } + }, + "ListTrafficPolicyInstancesRequest": { + "base": "

    A complex type that contains the information about the request to list your traffic policy instances.

    ", + "refs": { + } + }, + "ListTrafficPolicyInstancesResponse": { + "base": "

    A complex type that contains the response information for the request.

    ", + "refs": { + } + }, + "ListTrafficPolicyVersionsRequest": { + "base": "

    A complex type that contains the information about the request to list your traffic policies.

    ", + "refs": { + } + }, + "ListTrafficPolicyVersionsResponse": { + "base": "

    A complex type that contains the response information for the request.

    ", + "refs": { + } + }, + "MeasureLatency": { + "base": null, + "refs": { + "HealthCheckConfig$MeasureLatency": "

    A Boolean value that indicates whether you want Amazon Route 53 to measure the latency between health checkers in multiple AWS regions and your endpoint and to display CloudWatch latency graphs in the Amazon Route 53 console.

    " + } + }, + "Message": { + "base": null, + "refs": { + "TrafficPolicyInstance$Message": null + } + }, + "MetricName": { + "base": null, + "refs": { + "CloudWatchAlarmConfiguration$MetricName": "

    The name of the CloudWatch metric that is associated with the CloudWatch alarm.

    " + } + }, + "Namespace": { + "base": null, + "refs": { + "CloudWatchAlarmConfiguration$Namespace": "

    The namespace of the CloudWatch metric that is associated with the CloudWatch alarm.

    " + } + }, + "NoSuchChange": { + "base": null, + "refs": { + } + }, + "NoSuchDelegationSet": { + "base": "

    The specified delegation set does not exist.

    ", + "refs": { + } + }, + "NoSuchGeoLocation": { + "base": "

    The geo location you are trying to get does not exist.

    ", + "refs": { + } + }, + "NoSuchHealthCheck": { + "base": "

    The health check you are trying to get or delete does not exist.

    ", + "refs": { + } + }, + "NoSuchHostedZone": { + "base": null, + "refs": { + } + }, + "NoSuchTrafficPolicy": { + "base": "

    No traffic policy exists with the specified ID.

    ", + "refs": { + } + }, + "NoSuchTrafficPolicyInstance": { + "base": "

    No traffic policy instance exists with the specified ID.

    ", + "refs": { + } + }, + "Nonce": { + "base": null, + "refs": { + "CreateHostedZoneRequest$CallerReference": "

    A unique string that identifies the request and that allows failed CreateHostedZone requests to be retried without the risk of executing the operation twice. You must use a unique CallerReference string every time you create a hosted zone. CallerReference can be any unique string; you might choose to use a string that identifies your project, such as DNSMigration_01.

    Valid characters are any Unicode code points that are legal in an XML 1.0 document. The UTF-8 encoding of the value must be less than 128 bytes.

    ", + "CreateReusableDelegationSetRequest$CallerReference": "

    A unique string that identifies the request and that allows failed CreateReusableDelegationSet requests to be retried without the risk of executing the operation twice. You must use a unique CallerReference string every time you create a reusable delegation set. CallerReference can be any unique string; you might choose to use a string that identifies your project, such as DNSMigration_01.

    Valid characters are any Unicode code points that are legal in an XML 1.0 document. The UTF-8 encoding of the value must be less than 128 bytes.

    ", + "DelegationSet$CallerReference": null, + "HostedZone$CallerReference": "

    A unique string that identifies the request to create the hosted zone.

    " + } + }, + "PageMarker": { + "base": null, + "refs": { + "ListChangeBatchesByHostedZoneRequest$Marker": "

    The page marker.

    ", + "ListChangeBatchesByHostedZoneResponse$Marker": "

    The page marker.

    ", + "ListChangeBatchesByHostedZoneResponse$NextMarker": "

    The next page marker.

    ", + "ListChangeBatchesByRRSetRequest$Marker": "

    The page marker.

    ", + "ListChangeBatchesByRRSetResponse$Marker": "

    The page marker.

    ", + "ListChangeBatchesByRRSetResponse$NextMarker": "

    The next page marker.

    ", + "ListHealthChecksRequest$Marker": "

    If the request returned more than one page of results, submit another request and specify the value of NextMarker from the last response in the marker parameter to get the next page of results.

    ", + "ListHealthChecksResponse$Marker": "

    If the request returned more than one page of results, submit another request and specify the value of NextMarker from the last response in the marker parameter to get the next page of results.

    ", + "ListHealthChecksResponse$NextMarker": "

    Indicates where to continue listing health checks. If IsTruncated is true, make another request to ListHealthChecks and include the value of the NextMarker element in the Marker element to get the next page of results.

    ", + "ListHostedZonesRequest$Marker": "

    If the request returned more than one page of results, submit another request and specify the value of NextMarker from the last response in the marker parameter to get the next page of results.

    ", + "ListHostedZonesResponse$Marker": "

    If the request returned more than one page of results, submit another request and specify the value of NextMarker from the last response in the marker parameter to get the next page of results.

    ", + "ListHostedZonesResponse$NextMarker": "

    Indicates where to continue listing hosted zones. If IsTruncated is true, make another request to ListHostedZones and include the value of the NextMarker element in the Marker element to get the next page of results.

    ", + "ListReusableDelegationSetsRequest$Marker": "

    If the request returned more than one page of results, submit another request and specify the value of NextMarker from the last response in the marker parameter to get the next page of results.

    ", + "ListReusableDelegationSetsResponse$Marker": "

    If the request returned more than one page of results, submit another request and specify the value of NextMarker from the last response in the marker parameter to get the next page of results.

    ", + "ListReusableDelegationSetsResponse$NextMarker": "

    Indicates where to continue listing reusable delegation sets. If IsTruncated is true, make another request to ListReusableDelegationSets and include the value of the NextMarker element in the Marker element of the previous response to get the next page of results.

    " + } + }, + "PageMaxItems": { + "base": null, + "refs": { + "ListChangeBatchesByHostedZoneRequest$MaxItems": "

    The maximum number of items on a page.

    ", + "ListChangeBatchesByHostedZoneResponse$MaxItems": "

    The maximum number of items on a page.

    ", + "ListChangeBatchesByRRSetRequest$MaxItems": "

    The maximum number of items on a page.

    ", + "ListChangeBatchesByRRSetResponse$MaxItems": "

    The maximum number of items on a page.

    ", + "ListGeoLocationsRequest$MaxItems": "

    The maximum number of geo locations you want in the response body.

    ", + "ListGeoLocationsResponse$MaxItems": "

    The maximum number of records you requested. The maximum value of MaxItems is 100.

    ", + "ListHealthChecksRequest$MaxItems": "

    Specify the maximum number of health checks to return per page of results.

    ", + "ListHealthChecksResponse$MaxItems": "

    The maximum number of health checks to be included in the response body. If the number of health checks associated with this AWS account exceeds MaxItems, the value of IsTruncated in the response is true. Call ListHealthChecks again and specify the value of NextMarker from the last response in the Marker element of the next request to get the next page of results.

    ", + "ListHostedZonesByNameRequest$MaxItems": "

    Specify the maximum number of hosted zones to return per page of results.

    ", + "ListHostedZonesByNameResponse$MaxItems": "

    The maximum number of hosted zones to be included in the response body. If the number of hosted zones associated with this AWS account exceeds MaxItems, the value of IsTruncated in the ListHostedZonesByNameResponse is true. Call ListHostedZonesByName again and specify the value of NextDNSName and NextHostedZoneId elements from the previous response to get the next page of results.

    ", + "ListHostedZonesRequest$MaxItems": "

    Specify the maximum number of hosted zones to return per page of results.

    ", + "ListHostedZonesResponse$MaxItems": "

    The maximum number of hosted zones to be included in the response body. If the number of hosted zones associated with this AWS account exceeds MaxItems, the value of IsTruncated in the response is true. Call ListHostedZones again and specify the value of NextMarker in the Marker parameter to get the next page of results.

    ", + "ListResourceRecordSetsRequest$MaxItems": "

    The maximum number of records you want in the response body.

    ", + "ListResourceRecordSetsResponse$MaxItems": "

    The maximum number of records you requested. The maximum value of MaxItems is 100.

    ", + "ListReusableDelegationSetsRequest$MaxItems": "

    Specify the maximum number of reusable delegation sets to return per page of results.

    ", + "ListReusableDelegationSetsResponse$MaxItems": "

    The maximum number of reusable delegation sets to be included in the response body. If the number of reusable delegation sets associated with this AWS account exceeds MaxItems, the value of IsTruncated in the response is true. To get the next page of results, call ListReusableDelegationSets again and specify the value of NextMarker from the previous response in the Marker element of the request.

    ", + "ListTrafficPoliciesRequest$MaxItems": "

    The maximum number of traffic policies to be included in the response body for this request. If you have more than MaxItems traffic policies, the value of the IsTruncated element in the response is true, and the value of the TrafficPolicyIdMarker element is the ID of the first traffic policy in the next group of MaxItems traffic policies.

    ", + "ListTrafficPoliciesResponse$MaxItems": "

    The value that you specified for the MaxItems parameter in the call to ListTrafficPolicies that produced the current response.

    ", + "ListTrafficPolicyInstancesByHostedZoneRequest$MaxItems": "

    The maximum number of traffic policy instances to be included in the response body for this request. If you have more than MaxItems traffic policy instances, the value of the IsTruncated element in the response is true, and the values of HostedZoneIdMarker, TrafficPolicyInstanceNameMarker, and TrafficPolicyInstanceTypeMarker represent the first traffic policy instance in the next group of MaxItems traffic policy instances.

    ", + "ListTrafficPolicyInstancesByHostedZoneResponse$MaxItems": "

    The value that you specified for the MaxItems parameter in the call to ListTrafficPolicyInstancesByHostedZone that produced the current response.

    ", + "ListTrafficPolicyInstancesByPolicyRequest$MaxItems": "

    The maximum number of traffic policy instances to be included in the response body for this request. If you have more than MaxItems traffic policy instances, the value of the IsTruncated element in the response is true, and the values of HostedZoneIdMarker, TrafficPolicyInstanceNameMarker, and TrafficPolicyInstanceTypeMarker represent the first traffic policy instance in the next group of MaxItems traffic policy instances.

    ", + "ListTrafficPolicyInstancesByPolicyResponse$MaxItems": "

    The value that you specified for the MaxItems parameter in the call to ListTrafficPolicyInstancesByPolicy that produced the current response.

    ", + "ListTrafficPolicyInstancesRequest$MaxItems": "

    The maximum number of traffic policy instances to be included in the response body for this request. If you have more than MaxItems traffic policy instances, the value of the IsTruncated element in the response is true, and the values of HostedZoneIdMarker, TrafficPolicyInstanceNameMarker, and TrafficPolicyInstanceTypeMarker represent the first traffic policy instance in the next group of MaxItems traffic policy instances.

    ", + "ListTrafficPolicyInstancesResponse$MaxItems": "

    The value that you specified for the MaxItems parameter in the call to ListTrafficPolicyInstances that produced the current response.

    ", + "ListTrafficPolicyVersionsRequest$MaxItems": "

    The maximum number of traffic policy versions that you want Amazon Route 53 to include in the response body for this request. If the specified traffic policy has more than MaxItems versions, the value of the IsTruncated element in the response is true, and the value of the TrafficPolicyVersionMarker element is the ID of the first version in the next group of MaxItems traffic policy versions.

    ", + "ListTrafficPolicyVersionsResponse$MaxItems": "

    The value that you specified for the maxitems parameter in the call to ListTrafficPolicyVersions that produced the current response.

    " + } + }, + "PageTruncated": { + "base": null, + "refs": { + "ListChangeBatchesByHostedZoneResponse$IsTruncated": "

    A flag that indicates if there are more change batches to list.

    ", + "ListChangeBatchesByRRSetResponse$IsTruncated": "

    A flag that indicates if there are more change batches to list.

    ", + "ListGeoLocationsResponse$IsTruncated": "

    A flag that indicates whether there are more geo locations to be listed. If your results were truncated, you can make a follow-up request for the next page of results by using the values included in the NextContinentCode, NextCountryCode, and NextSubdivisionCode elements.

    Valid Values: true | false

    ", + "ListHealthChecksResponse$IsTruncated": "

    A flag indicating whether there are more health checks to be listed. If your results were truncated, you can make a follow-up request for the next page of results by using the Marker element.

    Valid Values: true | false

    ", + "ListHostedZonesByNameResponse$IsTruncated": "

    A flag indicating whether there are more hosted zones to be listed. If your results were truncated, you can make a follow-up request for the next page of results by using the NextDNSName and NextHostedZoneId elements.

    Valid Values: true | false

    ", + "ListHostedZonesResponse$IsTruncated": "

    A flag indicating whether there are more hosted zones to be listed. If your results were truncated, you can make a follow-up request for the next page of results by using the Marker element.

    Valid Values: true | false

    ", + "ListResourceRecordSetsResponse$IsTruncated": "

    A flag that indicates whether there are more resource record sets to be listed. If your results were truncated, you can make a follow-up request for the next page of results by using the NextRecordName element.

    Valid Values: true | false

    ", + "ListReusableDelegationSetsResponse$IsTruncated": "

    A flag indicating whether there are more reusable delegation sets to be listed. If your results were truncated, you can make a follow-up request for the next page of results by using the Marker element.

    Valid Values: true | false

    ", + "ListTrafficPoliciesResponse$IsTruncated": "

    A flag that indicates whether there are more traffic policies to be listed. If the response was truncated, you can get the next group of MaxItems traffic policies by calling ListTrafficPolicies again and specifying the value of the TrafficPolicyIdMarker element in the TrafficPolicyIdMarker request parameter.

    Valid Values: true | false

    ", + "ListTrafficPolicyInstancesByHostedZoneResponse$IsTruncated": "

    A flag that indicates whether there are more traffic policy instances to be listed. If the response was truncated, you can get the next group of MaxItems traffic policy instances by calling ListTrafficPolicyInstancesByHostedZone again and specifying the values of the HostedZoneIdMarker, TrafficPolicyInstanceNameMarker, and TrafficPolicyInstanceTypeMarker elements in the corresponding request parameters.

    Valid Values: true | false

    ", + "ListTrafficPolicyInstancesByPolicyResponse$IsTruncated": "

    A flag that indicates whether there are more traffic policy instances to be listed. If the response was truncated, you can get the next group of MaxItems traffic policy instances by calling ListTrafficPolicyInstancesByPolicy again and specifying the values of the HostedZoneIdMarker, TrafficPolicyInstanceNameMarker, and TrafficPolicyInstanceTypeMarker elements in the corresponding request parameters.

    Valid Values: true | false

    ", + "ListTrafficPolicyInstancesResponse$IsTruncated": "

    A flag that indicates whether there are more traffic policy instances to be listed. If the response was truncated, you can get the next group of MaxItems traffic policy instances by calling ListTrafficPolicyInstances again and specifying the values of the HostedZoneIdMarker, TrafficPolicyInstanceNameMarker, and TrafficPolicyInstanceTypeMarker elements in the corresponding request parameters.

    Valid Values: true | false

    ", + "ListTrafficPolicyVersionsResponse$IsTruncated": "

    A flag that indicates whether there are more traffic policies to be listed. If the response was truncated, you can get the next group of maxitems traffic policies by calling ListTrafficPolicyVersions again and specifying the value of the NextMarker element in the marker parameter.

    Valid Values: true | false

    " + } + }, + "Period": { + "base": null, + "refs": { + "CloudWatchAlarmConfiguration$Period": "

    An integer that represents the period in seconds over which the statistic is applied.

    " + } + }, + "Port": { + "base": null, + "refs": { + "HealthCheckConfig$Port": "

    Port on which connection will be opened to the instance to health check. For HTTP and HTTP_STR_MATCH this defaults to 80 if the port is not specified. For HTTPS and HTTPS_STR_MATCH this defaults to 443 if the port is not specified.

    ", + "UpdateHealthCheckRequest$Port": "

    The port on which you want Amazon Route 53 to open a connection to perform health checks.

    Specify this value only if you want to change it.

    " + } + }, + "PriorRequestNotComplete": { + "base": "

    The request was rejected because Amazon Route 53 was still processing a prior request.

    ", + "refs": { + } + }, + "PublicZoneVPCAssociation": { + "base": "

    The hosted zone you are trying to associate VPC with doesn't have any VPC association. Amazon Route 53 currently doesn't support associate a VPC with a public hosted zone.

    ", + "refs": { + } + }, + "RData": { + "base": null, + "refs": { + "ResourceRecord$Value": "

    The current or new DNS record value, not to exceed 4,000 characters. In the case of a DELETE action, if the current value does not match the actual value, an error is returned. For descriptions about how to format Value for different record types, see Supported DNS Resource Record Types in the Amazon Route 53 Developer Guide.

    You can specify more than one value for all record types except CNAME and SOA.

    " + } + }, + "RRType": { + "base": null, + "refs": { + "ListChangeBatchesByRRSetRequest$Type": "

    The type of the RRSet that you want to see changes for.

    ", + "ListResourceRecordSetsRequest$StartRecordType": "

    The DNS type at which to begin the listing of resource record sets.

    Valid values: A | AAAA | CNAME | MX | NS | PTR | SOA | SPF | SRV | TXT

    Values for Weighted Resource Record Sets: A | AAAA | CNAME | TXT

    Values for Regional Resource Record Sets: A | AAAA | CNAME | TXT

    Values for Alias Resource Record Sets: A | AAAA

    Constraint: Specifying type without specifying name returns an InvalidInput error.

    ", + "ListResourceRecordSetsResponse$NextRecordType": "

    If the results were truncated, the type of the next record in the list. This element is present only if IsTruncated is true.

    ", + "ListTrafficPolicyInstancesByHostedZoneRequest$TrafficPolicyInstanceTypeMarker": "

    For the first request to ListTrafficPolicyInstancesByHostedZone, omit this value.

    If the value of IsTruncated in the previous response was true, TrafficPolicyInstanceTypeMarker is the DNS type of the first traffic policy instance in the next group of MaxItems traffic policy instances.

    If the value of IsTruncated in the previous response was false, there are no more traffic policy instances to get for this hosted zone.

    ", + "ListTrafficPolicyInstancesByHostedZoneResponse$TrafficPolicyInstanceTypeMarker": "

    If IsTruncated is true, TrafficPolicyInstanceTypeMarker is the DNS type of the resource record sets that are associated with the first traffic policy instance in the next group of MaxItems traffic policy instances.

    ", + "ListTrafficPolicyInstancesByPolicyRequest$TrafficPolicyInstanceTypeMarker": "

    For the first request to ListTrafficPolicyInstancesByPolicy, omit this value.

    If the value of IsTruncated in the previous response was true, TrafficPolicyInstanceTypeMarker is the DNS type of the first traffic policy instance in the next group of MaxItems traffic policy instances.

    If the value of IsTruncated in the previous response was false, there are no more traffic policy instances to get for this hosted zone.

    ", + "ListTrafficPolicyInstancesByPolicyResponse$TrafficPolicyInstanceTypeMarker": "

    If IsTruncated is true, TrafficPolicyInstanceTypeMarker is the DNS type of the resource record sets that are associated with the first traffic policy instance in the next group of MaxItems traffic policy instances.

    ", + "ListTrafficPolicyInstancesRequest$TrafficPolicyInstanceTypeMarker": "

    For the first request to ListTrafficPolicyInstances, omit this value.

    If the value of IsTruncated in the previous response was true, TrafficPolicyInstanceTypeMarker is the DNS type of the first traffic policy instance in the next group of MaxItems traffic policy instances.

    If the value of IsTruncated in the previous response was false, there are no more traffic policy instances to get.

    ", + "ListTrafficPolicyInstancesResponse$TrafficPolicyInstanceTypeMarker": "

    If IsTruncated is true, TrafficPolicyInstanceTypeMarker is the DNS type of the resource record sets that are associated with the first traffic policy instance in the next group of MaxItems traffic policy instances.

    ", + "ResourceRecordSet$Type": "

    The DNS record type. For information about different record types and how data is encoded for them, see Supported DNS Resource Record Types in the Amazon Route 53 Developer Guide.

    Valid values for basic resource record sets: A | AAAA | CNAME | MX | NS | PTR | SOA | SPF | SRV | TXT

    Values for weighted, latency, geolocation, and failover resource record sets: A | AAAA | CNAME | MX | PTR | SPF | SRV | TXT. When creating a group of weighted, latency, geolocation, or failover resource record sets, specify the same value for all of the resource record sets in the group.

    SPF records were formerly used to verify the identity of the sender of email messages. However, we no longer recommend that you create resource record sets for which the value of Type is SPF. RFC 7208, Sender Policy Framework (SPF) for Authorizing Use of Domains in Email, Version 1, has been updated to say, \"...[I]ts existence and mechanism defined in [RFC4408] have led to some interoperability issues. Accordingly, its use is no longer appropriate for SPF version 1; implementations are not to use it.\" In RFC 7208, see section 14.1, The SPF DNS Record Type.

    Values for alias resource record sets:

    • CloudFront distributions: A
    • ELB load balancers: A | AAAA
    • Amazon S3 buckets: A
    • Another resource record set in this hosted zone: Specify the type of the resource record set for which you're creating the alias. Specify any value except NS or SOA.
    ", + "TrafficPolicy$Type": null, + "TrafficPolicyInstance$TrafficPolicyType": null, + "TrafficPolicySummary$Type": null + } + }, + "RequestInterval": { + "base": null, + "refs": { + "HealthCheckConfig$RequestInterval": "

    The number of seconds between the time that Amazon Route 53 gets a response from your endpoint and the time that it sends the next health-check request.

    Each Amazon Route 53 health checker makes requests at this interval. Valid values are 10 and 30. The default value is 30.

    " + } + }, + "ResourceDescription": { + "base": null, + "refs": { + "ChangeBatch$Comment": "

    Optional: Any comments you want to include about a change batch request.

    ", + "ChangeBatchRecord$Comment": "

    A complex type that describes change information about changes made to your hosted zone.

    This element contains an ID that you use when performing a GetChange action to get detailed information about the change.

    ", + "ChangeInfo$Comment": "

    A complex type that describes change information about changes made to your hosted zone.

    This element contains an ID that you use when performing a GetChange action to get detailed information about the change.

    ", + "HostedZoneConfig$Comment": "

    An optional comment about your hosted zone. If you don't want to specify a comment, you can omit the HostedZoneConfig and Comment elements from the XML document.

    ", + "UpdateHostedZoneCommentRequest$Comment": "

    A comment about your hosted zone.

    " + } + }, + "ResourceId": { + "base": null, + "refs": { + "AliasTarget$HostedZoneId": "

    Alias resource record sets only: The value you use depends on where you want to route queries:

    • A CloudFront distribution: Specify Z2FDTNDATAQYW2.
    • An ELB load balancer: Specify the value of the hosted zone ID for the load balancer. You can get the hosted zone ID by using the AWS Management Console, the ELB API, or the AWS CLI. Use the same method to get values for HostedZoneId and DNSName. If you get one value from the console and the other value from the API or the CLI, creating the resource record set will fail.
    • An Amazon S3 bucket that is configured as a static website: Specify the hosted zone ID for the Amazon S3 website endpoint in which you created the bucket. For more information about valid values, see the table Amazon Simple Storage Service (S3) Website Endpoints in the Amazon Web Services General Reference.
    • Another Amazon Route 53 resource record set in your hosted zone: Specify the hosted zone ID of your hosted zone. (An alias resource record set cannot reference a resource record set in a different hosted zone.)
    ", + "AssociateVPCWithHostedZoneRequest$HostedZoneId": "

    The ID of the hosted zone you want to associate your VPC with.

    Note that you cannot associate a VPC with a hosted zone that doesn't have an existing VPC association.

    ", + "ChangeBatchRecord$Id": "

    The ID of the request. Use this ID to track when the change has completed across all Amazon Route 53 DNS servers.

    ", + "ChangeInfo$Id": "

    The ID of the request. Use this ID to track when the change has completed across all Amazon Route 53 DNS servers.

    ", + "ChangeResourceRecordSetsRequest$HostedZoneId": "

    The ID of the hosted zone that contains the resource record sets that you want to change.

    ", + "CreateHostedZoneRequest$DelegationSetId": "

    The delegation set id of the reusable delgation set whose NS records you want to assign to the new hosted zone.

    ", + "CreateReusableDelegationSetRequest$HostedZoneId": "

    The ID of the hosted zone whose delegation set you want to mark as reusable. It is an optional parameter.

    ", + "CreateTrafficPolicyInstanceRequest$HostedZoneId": "

    The ID of the hosted zone in which you want Amazon Route 53 to create resource record sets by using the configuration in a traffic policy.

    ", + "DelegationSet$Id": null, + "DeleteHostedZoneRequest$Id": "

    The ID of the hosted zone you want to delete.

    ", + "DeleteReusableDelegationSetRequest$Id": "

    The ID of the reusable delegation set you want to delete.

    ", + "DisassociateVPCFromHostedZoneRequest$HostedZoneId": "

    The ID of the hosted zone you want to disassociate your VPC from.

    Note that you cannot disassociate the last VPC from a hosted zone.

    ", + "GetChangeDetailsRequest$Id": "

    The ID of the change batch request. The value that you specify here is the value that ChangeResourceRecordSets returned in the Id element when you submitted the request.

    ", + "GetChangeRequest$Id": "

    The ID of the change batch request. The value that you specify here is the value that ChangeResourceRecordSets returned in the Id element when you submitted the request.

    ", + "GetHostedZoneRequest$Id": "

    The ID of the hosted zone for which you want to get a list of the name servers in the delegation set.

    ", + "GetReusableDelegationSetRequest$Id": "

    The ID of the reusable delegation set for which you want to get a list of the name server.

    ", + "HostedZone$Id": "

    The ID of the specified hosted zone.

    ", + "ListChangeBatchesByHostedZoneRequest$HostedZoneId": "

    The ID of the hosted zone that you want to see changes for.

    ", + "ListChangeBatchesByRRSetRequest$HostedZoneId": "

    The ID of the hosted zone that you want to see changes for.

    ", + "ListHostedZonesByNameRequest$HostedZoneId": "

    If the request returned more than one page of results, submit another request and specify the value of NextDNSName and NextHostedZoneId from the last response in the DNSName and HostedZoneId parameters to get the next page of results.

    ", + "ListHostedZonesByNameResponse$HostedZoneId": "

    The HostedZoneId value sent in the request.

    ", + "ListHostedZonesByNameResponse$NextHostedZoneId": "

    If the value of IsTruncated in the ListHostedZonesByNameResponse is true, there are more hosted zones associated with the current AWS account. To get the next page of results, make another request to ListHostedZonesByName. Specify the value of NextDNSName in the DNSName parameter. Specify NextHostedZoneId in the HostedZoneId parameter.

    ", + "ListHostedZonesRequest$DelegationSetId": null, + "ListResourceRecordSetsRequest$HostedZoneId": "

    The ID of the hosted zone that contains the resource record sets that you want to get.

    ", + "ListTrafficPolicyInstancesByHostedZoneRequest$HostedZoneId": "

    The ID of the hosted zone for which you want to list traffic policy instances.

    ", + "ListTrafficPolicyInstancesByPolicyRequest$HostedZoneIdMarker": "

    For the first request to ListTrafficPolicyInstancesByPolicy, omit this value.

    If the value of IsTruncated in the previous response was true, HostedZoneIdMarker is the ID of the hosted zone for the first traffic policy instance in the next group of MaxItems traffic policy instances.

    If the value of IsTruncated in the previous response was false, there are no more traffic policy instances to get for this hosted zone.

    If the value of IsTruncated in the previous response was false, omit this value.

    ", + "ListTrafficPolicyInstancesByPolicyResponse$HostedZoneIdMarker": "

    If IsTruncated is true, HostedZoneIdMarker is the ID of the hosted zone of the first traffic policy instance in the next group of MaxItems traffic policy instances.

    ", + "ListTrafficPolicyInstancesRequest$HostedZoneIdMarker": "

    For the first request to ListTrafficPolicyInstances, omit this value.

    If the value of IsTruncated in the previous response was true, you have more traffic policy instances. To get the next group of MaxItems traffic policy instances, submit another ListTrafficPolicyInstances request. For the value of HostedZoneIdMarker, specify the value of HostedZoneIdMarker from the previous response, which is the hosted zone ID of the first traffic policy instance in the next group of MaxItems traffic policy instances.

    If the value of IsTruncated in the previous response was false, there are no more traffic policy instances to get.

    ", + "ListTrafficPolicyInstancesResponse$HostedZoneIdMarker": "

    If IsTruncated is true, HostedZoneIdMarker is the ID of the hosted zone of the first traffic policy instance in the next group of MaxItems traffic policy instances.

    ", + "TrafficPolicyInstance$HostedZoneId": null, + "UpdateHostedZoneCommentRequest$Id": "

    The ID of the hosted zone you want to update.

    " + } + }, + "ResourcePath": { + "base": null, + "refs": { + "HealthCheckConfig$ResourcePath": "

    Path to ping on the instance to check the health. Required for HTTP, HTTPS, HTTP_STR_MATCH, and HTTPS_STR_MATCH health checks. The HTTP request is issued to the instance on the given port and path.

    ", + "UpdateHealthCheckRequest$ResourcePath": "

    The path that you want Amazon Route 53 to request when performing health checks. The path can be any value for which your endpoint will return an HTTP status code of 2xx or 3xx when the endpoint is healthy, for example the file /docs/route53-health-check.html.

    Specify this value only if you want to change it.

    " + } + }, + "ResourceRecord": { + "base": "

    A complex type that contains the value of the Value element for the current resource record set.

    ", + "refs": { + "ResourceRecords$member": null + } + }, + "ResourceRecordSet": { + "base": "

    A complex type that contains information about the current resource record set.

    ", + "refs": { + "Change$ResourceRecordSet": "

    Information about the resource record set to create or delete.

    ", + "ResourceRecordSets$member": null + } + }, + "ResourceRecordSetFailover": { + "base": null, + "refs": { + "ResourceRecordSet$Failover": "

    Failover resource record sets only: To configure failover, you add the Failover element to two resource record sets. For one resource record set, you specify PRIMARY as the value for Failover; for the other resource record set, you specify SECONDARY. In addition, you include the HealthCheckId element and specify the health check that you want Amazon Route 53 to perform for each resource record set.

    You can create failover and failover alias resource record sets only in public hosted zones.

    Except where noted, the following failover behaviors assume that you have included the HealthCheckId element in both resource record sets:

    • When the primary resource record set is healthy, Amazon Route 53 responds to DNS queries with the applicable value from the primary resource record set regardless of the health of the secondary resource record set.
    • When the primary resource record set is unhealthy and the secondary resource record set is healthy, Amazon Route 53 responds to DNS queries with the applicable value from the secondary resource record set.
    • When the secondary resource record set is unhealthy, Amazon Route 53 responds to DNS queries with the applicable value from the primary resource record set regardless of the health of the primary resource record set.
    • If you omit the HealthCheckId element for the secondary resource record set, and if the primary resource record set is unhealthy, Amazon Route 53 always responds to DNS queries with the applicable value from the secondary resource record set. This is true regardless of the health of the associated endpoint.

    You cannot create non-failover resource record sets that have the same values for the Name and Type elements as failover resource record sets.

    For failover alias resource record sets, you must also include the EvaluateTargetHealth element and set the value to true.

    For more information about configuring failover for Amazon Route 53, see Amazon Route 53 Health Checks and DNS Failover in the Amazon Route 53 Developer Guide.

    Valid values: PRIMARY | SECONDARY

    " + } + }, + "ResourceRecordSetIdentifier": { + "base": null, + "refs": { + "ListChangeBatchesByRRSetRequest$SetIdentifier": "

    The identifier of the RRSet that you want to see changes for.

    ", + "ListResourceRecordSetsRequest$StartRecordIdentifier": "

    Weighted resource record sets only: If results were truncated for a given DNS name and type, specify the value of NextRecordIdentifier from the previous response to get the next resource record set that has the current DNS name and type.

    ", + "ListResourceRecordSetsResponse$NextRecordIdentifier": "

    Weighted resource record sets only: If results were truncated for a given DNS name and type, the value of SetIdentifier for the next resource record set that has the current DNS name and type.

    ", + "ResourceRecordSet$SetIdentifier": "

    Weighted, Latency, Geo, and Failover resource record sets only: An identifier that differentiates among multiple resource record sets that have the same combination of DNS name and type. The value of SetIdentifier must be unique for each resource record set that has the same combination of DNS name and type.

    " + } + }, + "ResourceRecordSetRegion": { + "base": null, + "refs": { + "ResourceRecordSet$Region": "

    Latency-based resource record sets only: The Amazon EC2 region where the resource that is specified in this resource record set resides. The resource typically is an AWS resource, such as an Amazon EC2 instance or an ELB load balancer, and is referred to by an IP address or a DNS domain name, depending on the record type.

    You can create latency and latency alias resource record sets only in public hosted zones.

    When Amazon Route 53 receives a DNS query for a domain name and type for which you have created latency resource record sets, Amazon Route 53 selects the latency resource record set that has the lowest latency between the end user and the associated Amazon EC2 region. Amazon Route 53 then returns the value that is associated with the selected resource record set.

    Note the following:

    • You can only specify one ResourceRecord per latency resource record set.
    • You can only create one latency resource record set for each Amazon EC2 region.
    • You are not required to create latency resource record sets for all Amazon EC2 regions. Amazon Route 53 will choose the region with the best latency from among the regions for which you create latency resource record sets.
    • You cannot create non-latency resource record sets that have the same values for the Name and Type elements as latency resource record sets.
    " + } + }, + "ResourceRecordSetWeight": { + "base": null, + "refs": { + "ResourceRecordSet$Weight": "

    Weighted resource record sets only: Among resource record sets that have the same combination of DNS name and type, a value that determines the proportion of DNS queries that Amazon Route 53 responds to using the current resource record set. Amazon Route 53 calculates the sum of the weights for the resource record sets that have the same combination of DNS name and type. Amazon Route 53 then responds to queries based on the ratio of a resource's weight to the total. Note the following:

    • You must specify a value for the Weight element for every weighted resource record set.
    • You can only specify one ResourceRecord per weighted resource record set.
    • You cannot create latency, failover, or geolocation resource record sets that have the same values for the Name and Type elements as weighted resource record sets.
    • You can create a maximum of 100 weighted resource record sets that have the same values for the Name and Type elements.
    • For weighted (but not weighted alias) resource record sets, if you set Weight to 0 for a resource record set, Amazon Route 53 never responds to queries with the applicable value for that resource record set. However, if you set Weight to 0 for all resource record sets that have the same combination of DNS name and type, traffic is routed to all resources with equal probability.

      The effect of setting Weight to 0 is different when you associate health checks with weighted resource record sets. For more information, see Options for Configuring Amazon Route 53 Active-Active and Active-Passive Failover in the Amazon Route 53 Developer Guide.

    " + } + }, + "ResourceRecordSets": { + "base": null, + "refs": { + "ListResourceRecordSetsResponse$ResourceRecordSets": "

    A complex type that contains information about the resource record sets that are returned by the request.

    " + } + }, + "ResourceRecords": { + "base": null, + "refs": { + "ResourceRecordSet$ResourceRecords": "

    A complex type that contains the resource records for the current resource record set.

    " + } + }, + "ResourceTagSet": { + "base": "

    A complex type containing a resource and its associated tags.

    ", + "refs": { + "ListTagsForResourceResponse$ResourceTagSet": "

    A ResourceTagSet containing tags associated with the specified resource.

    ", + "ResourceTagSetList$member": null + } + }, + "ResourceTagSetList": { + "base": null, + "refs": { + "ListTagsForResourcesResponse$ResourceTagSets": "

    A list of ResourceTagSets containing tags associated with the specified resources.

    " + } + }, + "ResourceURI": { + "base": null, + "refs": { + "CreateHealthCheckResponse$Location": "

    The unique URL representing the new health check.

    ", + "CreateHostedZoneResponse$Location": "

    The unique URL representing the new hosted zone.

    ", + "CreateReusableDelegationSetResponse$Location": "

    The unique URL representing the new reusbale delegation set.

    ", + "CreateTrafficPolicyInstanceResponse$Location": "

    A unique URL that represents a new traffic policy instance.

    ", + "CreateTrafficPolicyResponse$Location": null, + "CreateTrafficPolicyVersionResponse$Location": null + } + }, + "SearchString": { + "base": null, + "refs": { + "HealthCheckConfig$SearchString": "

    A string to search for in the body of a health check response. Required for HTTP_STR_MATCH and HTTPS_STR_MATCH health checks. Amazon Route 53 considers case when searching for SearchString in the response body.

    ", + "UpdateHealthCheckRequest$SearchString": "

    If the value of Type is HTTP_STR_MATCH or HTTP_STR_MATCH, the string that you want Amazon Route 53 to search for in the response body from the specified resource. If the string appears in the response body, Amazon Route 53 considers the resource healthy. Amazon Route 53 considers case when searching for SearchString in the response body.

    Specify this value only if you want to change it.

    " + } + }, + "Statistic": { + "base": null, + "refs": { + "CloudWatchAlarmConfiguration$Statistic": "

    The statistic to apply to the CloudWatch metric that is associated with the CloudWatch alarm.

    Valid Values are SampleCount, Average, Sum, Minimum and Maximum

    " + } + }, + "Status": { + "base": null, + "refs": { + "StatusReport$Status": "

    The observed health check status.

    " + } + }, + "StatusReport": { + "base": "

    A complex type that contains information about the health check status for the current observation.

    ", + "refs": { + "HealthCheckObservation$StatusReport": "

    A complex type that contains information about the health check status for the current observation.

    " + } + }, + "TTL": { + "base": null, + "refs": { + "CreateTrafficPolicyInstanceRequest$TTL": "

    The TTL that you want Amazon Route 53 to assign to all of the resource record sets that it creates in the specified hosted zone.

    ", + "ResourceRecordSet$TTL": "

    The cache time to live for the current resource record set. Note the following:

    • If you're creating a non-alias resource record set, TTL is required.
    • If you're creating an alias resource record set, omit TTL. Amazon Route 53 uses the value of TTL for the alias target.
    • If you're associating this resource record set with a health check (if you're adding a HealthCheckId element), we recommend that you specify a TTL of 60 seconds or less so clients respond quickly to changes in health status.
    • All of the resource record sets in a group of weighted, latency, geolocation, or failover resource record sets must have the same value for TTL.
    • If a group of weighted resource record sets includes one or more weighted alias resource record sets for which the alias target is an ELB load balancer, we recommend that you specify a TTL of 60 seconds for all of the non-alias weighted resource record sets that have the same name and type. Values other than 60 seconds (the TTL for load balancers) will change the effect of the values that you specify for Weight.
    ", + "TrafficPolicyInstance$TTL": null, + "UpdateTrafficPolicyInstanceRequest$TTL": "

    The TTL that you want Amazon Route 53 to assign to all of the updated resource record sets.

    " + } + }, + "Tag": { + "base": "

    A single tag containing a key and value.

    ", + "refs": { + "TagList$member": null + } + }, + "TagKey": { + "base": null, + "refs": { + "Tag$Key": "

    The key for a Tag.

    ", + "TagKeyList$member": null + } + }, + "TagKeyList": { + "base": null, + "refs": { + "ChangeTagsForResourceRequest$RemoveTagKeys": "

    A list of Tag keys that you want to remove from the specified resource.

    " + } + }, + "TagList": { + "base": null, + "refs": { + "ChangeTagsForResourceRequest$AddTags": "

    A complex type that contains a list of Tag elements. Each Tag element identifies a tag that you want to add or update for the specified resource.

    ", + "ResourceTagSet$Tags": "

    The tags associated with the specified resource.

    " + } + }, + "TagResourceId": { + "base": null, + "refs": { + "ChangeTagsForResourceRequest$ResourceId": "

    The ID of the resource for which you want to add, change, or delete tags.

    ", + "ListTagsForResourceRequest$ResourceId": "

    The ID of the resource for which you want to retrieve tags.

    ", + "ResourceTagSet$ResourceId": "

    The ID for the specified resource.

    ", + "TagResourceIdList$member": null + } + }, + "TagResourceIdList": { + "base": null, + "refs": { + "ListTagsForResourcesRequest$ResourceIds": "

    A complex type that contains the ResourceId element for each resource for which you want to get a list of tags.

    " + } + }, + "TagResourceType": { + "base": null, + "refs": { + "ChangeTagsForResourceRequest$ResourceType": "

    The type of the resource.

    - The resource type for health checks is healthcheck.

    - The resource type for hosted zones is hostedzone.

    ", + "ListTagsForResourceRequest$ResourceType": "

    The type of the resource.

    - The resource type for health checks is healthcheck.

    - The resource type for hosted zones is hostedzone.

    ", + "ListTagsForResourcesRequest$ResourceType": "

    The type of the resources.

    - The resource type for health checks is healthcheck.

    - The resource type for hosted zones is hostedzone.

    ", + "ResourceTagSet$ResourceType": "

    The type of the resource.

    - The resource type for health checks is healthcheck.

    - The resource type for hosted zones is hostedzone.

    " + } + }, + "TagValue": { + "base": null, + "refs": { + "Tag$Value": "

    The value for a Tag.

    " + } + }, + "Threshold": { + "base": null, + "refs": { + "CloudWatchAlarmConfiguration$Threshold": "

    The value that the metric is compared with to determine the state of the alarm. For example, if you want the health check to fail if the average TCP connection time is greater than 500 milliseconds for more than 60 seconds, the threshold is 500.

    " + } + }, + "ThrottlingException": { + "base": null, + "refs": { + } + }, + "TimeStamp": { + "base": null, + "refs": { + "ChangeBatchRecord$SubmittedAt": "

    The date and time the change was submitted, in the format YYYY-MM-DDThh:mm:ssZ, as specified in the ISO 8601 standard (for example, 2009-11-19T19:37:58Z). The Z after the time indicates that the time is listed in Coordinated Universal Time (UTC).

    ", + "ChangeInfo$SubmittedAt": "

    The date and time the change was submitted, in the format YYYY-MM-DDThh:mm:ssZ, as specified in the ISO 8601 standard (for example, 2009-11-19T19:37:58Z). The Z after the time indicates that the time is listed in Coordinated Universal Time (UTC).

    ", + "StatusReport$CheckedTime": "

    The date and time the health check status was observed, in the format YYYY-MM-DDThh:mm:ssZ, as specified in the ISO 8601 standard (for example, 2009-11-19T19:37:58Z). The Z after the time indicates that the time is listed in Coordinated Universal Time (UTC).

    " + } + }, + "TooManyHealthChecks": { + "base": null, + "refs": { + } + }, + "TooManyHostedZones": { + "base": "

    This error indicates that you've reached the maximum number of hosted zones that can be created for the current AWS account. You can request an increase to the limit on the Contact Us page.

    ", + "refs": { + } + }, + "TooManyTrafficPolicies": { + "base": "

    You've created the maximum number of traffic policies that can be created for the current AWS account. You can request an increase to the limit on the Contact Us page.

    ", + "refs": { + } + }, + "TooManyTrafficPolicyInstances": { + "base": "

    You've created the maximum number of traffic policy instances that can be created for the current AWS account. You can request an increase to the limit on the Contact Us page.

    ", + "refs": { + } + }, + "TrafficPolicies": { + "base": null, + "refs": { + "ListTrafficPolicyVersionsResponse$TrafficPolicies": "

    A list that contains one TrafficPolicy element for each traffic policy version that is associated with the specified traffic policy.

    " + } + }, + "TrafficPolicy": { + "base": null, + "refs": { + "CreateTrafficPolicyResponse$TrafficPolicy": "

    A complex type that contains settings for the new traffic policy.

    ", + "CreateTrafficPolicyVersionResponse$TrafficPolicy": "

    A complex type that contains settings for the new version of the traffic policy.

    ", + "GetTrafficPolicyResponse$TrafficPolicy": "

    A complex type that contains settings for the specified traffic policy.

    ", + "TrafficPolicies$member": null, + "UpdateTrafficPolicyCommentResponse$TrafficPolicy": "

    A complex type that contains settings for the specified traffic policy.

    " + } + }, + "TrafficPolicyAlreadyExists": { + "base": "

    A traffic policy that has the same value for Name already exists.

    ", + "refs": { + } + }, + "TrafficPolicyComment": { + "base": null, + "refs": { + "CreateTrafficPolicyRequest$Comment": "

    Any comments that you want to include about the traffic policy.

    ", + "CreateTrafficPolicyVersionRequest$Comment": "

    Any comments that you want to include about the new traffic policy version.

    ", + "TrafficPolicy$Comment": null, + "UpdateTrafficPolicyCommentRequest$Comment": "

    The new comment for the specified traffic policy and version.

    " + } + }, + "TrafficPolicyDocument": { + "base": null, + "refs": { + "CreateTrafficPolicyRequest$Document": "

    The definition of this traffic policy in JSON format. For more information, see Traffic Policy Document Format in the Amazon Route 53 API Reference.

    ", + "CreateTrafficPolicyVersionRequest$Document": "

    The definition of a new traffic policy version, in JSON format. You must specify the full definition of the new traffic policy. You cannot specify just the differences between the new version and a previous version. For more information, see Traffic Policy Document Format in the Amazon Route 53 API Reference.

    ", + "TrafficPolicy$Document": null + } + }, + "TrafficPolicyId": { + "base": null, + "refs": { + "CreateTrafficPolicyInstanceRequest$TrafficPolicyId": "

    The ID of the traffic policy that you want to use to create resource record sets in the specified hosted zone.

    ", + "CreateTrafficPolicyVersionRequest$Id": "

    The ID of the traffic policy for which you want to create a new version.

    ", + "DeleteTrafficPolicyRequest$Id": "

    The ID of the traffic policy that you want to delete.

    ", + "GetTrafficPolicyRequest$Id": "

    The ID of the traffic policy that you want to get information about.

    ", + "ListTrafficPoliciesRequest$TrafficPolicyIdMarker": "

    For your first request to ListTrafficPolicies, do not include the TrafficPolicyIdMarker parameter.

    If you have more traffic policies than the value of MaxItems, ListTrafficPolicies returns only the first MaxItems traffic policies. To get the next group of MaxItems policies, submit another request to ListTrafficPolicies. For the value of TrafficPolicyIdMarker, specify the value of the TrafficPolicyIdMarker element that was returned in the previous response.

    Policies are listed in the order in which they were created.

    ", + "ListTrafficPoliciesResponse$TrafficPolicyIdMarker": "

    If the value of IsTruncated is true, TrafficPolicyIdMarker is the ID of the first traffic policy in the next group of MaxItems traffic policies.

    ", + "ListTrafficPolicyInstancesByPolicyRequest$TrafficPolicyId": "

    The ID of the traffic policy for which you want to list traffic policy instances.

    ", + "ListTrafficPolicyVersionsRequest$Id": "

    Specify the value of Id of the traffic policy for which you want to list all versions.

    ", + "TrafficPolicy$Id": null, + "TrafficPolicyInstance$TrafficPolicyId": null, + "TrafficPolicySummary$Id": null, + "UpdateTrafficPolicyCommentRequest$Id": "

    The value of Id for the traffic policy for which you want to update the comment.

    ", + "UpdateTrafficPolicyInstanceRequest$TrafficPolicyId": "

    The ID of the traffic policy that you want Amazon Route 53 to use to update resource record sets for the specified traffic policy instance.

    " + } + }, + "TrafficPolicyInUse": { + "base": "

    One or more traffic policy instances were created by using the specified traffic policy.

    ", + "refs": { + } + }, + "TrafficPolicyInstance": { + "base": null, + "refs": { + "CreateTrafficPolicyInstanceResponse$TrafficPolicyInstance": "

    A complex type that contains settings for the new traffic policy instance.

    ", + "GetTrafficPolicyInstanceResponse$TrafficPolicyInstance": "

    A complex type that contains settings for the traffic policy instance.

    ", + "TrafficPolicyInstances$member": null, + "UpdateTrafficPolicyInstanceResponse$TrafficPolicyInstance": "

    A complex type that contains settings for the updated traffic policy instance.

    " + } + }, + "TrafficPolicyInstanceAlreadyExists": { + "base": "

    Traffic policy instance with given Id already exists.

    ", + "refs": { + } + }, + "TrafficPolicyInstanceCount": { + "base": null, + "refs": { + "GetTrafficPolicyInstanceCountResponse$TrafficPolicyInstanceCount": "

    The number of traffic policy instances that are associated with the current AWS account.

    " + } + }, + "TrafficPolicyInstanceId": { + "base": null, + "refs": { + "DeleteTrafficPolicyInstanceRequest$Id": "

    The ID of the traffic policy instance that you want to delete.

    When you delete a traffic policy instance, Amazon Route 53 also deletes all of the resource record sets that were created when you created the traffic policy instance.", + "GetTrafficPolicyInstanceRequest$Id": "

    The ID of the traffic policy instance that you want to get information about.

    ", + "ResourceRecordSet$TrafficPolicyInstanceId": null, + "TrafficPolicyInstance$Id": null, + "UpdateTrafficPolicyInstanceRequest$Id": "

    The ID of the traffic policy instance that you want to update.

    " + } + }, + "TrafficPolicyInstanceState": { + "base": null, + "refs": { + "TrafficPolicyInstance$State": null + } + }, + "TrafficPolicyInstances": { + "base": null, + "refs": { + "ListTrafficPolicyInstancesByHostedZoneResponse$TrafficPolicyInstances": "

    A list that contains one TrafficPolicyInstance element for each traffic policy instance that matches the elements in the request.

    ", + "ListTrafficPolicyInstancesByPolicyResponse$TrafficPolicyInstances": "

    A list that contains one TrafficPolicyInstance element for each traffic policy instance that matches the elements in the request.

    ", + "ListTrafficPolicyInstancesResponse$TrafficPolicyInstances": "

    A list that contains one TrafficPolicyInstance element for each traffic policy instance that matches the elements in the request.

    " + } + }, + "TrafficPolicyName": { + "base": null, + "refs": { + "CreateTrafficPolicyRequest$Name": "

    The name of the traffic policy.

    ", + "TrafficPolicy$Name": null, + "TrafficPolicySummary$Name": null + } + }, + "TrafficPolicySummaries": { + "base": null, + "refs": { + "ListTrafficPoliciesResponse$TrafficPolicySummaries": "

    A list that contains one TrafficPolicySummary element for each traffic policy that was created by the current AWS account.

    " + } + }, + "TrafficPolicySummary": { + "base": null, + "refs": { + "TrafficPolicySummaries$member": null + } + }, + "TrafficPolicyVersion": { + "base": null, + "refs": { + "CreateTrafficPolicyInstanceRequest$TrafficPolicyVersion": "

    The version of the traffic policy that you want to use to create resource record sets in the specified hosted zone.

    ", + "DeleteTrafficPolicyRequest$Version": "

    The version number of the traffic policy that you want to delete.

    ", + "GetTrafficPolicyRequest$Version": "

    The version number of the traffic policy that you want to get information about.

    ", + "ListTrafficPolicyInstancesByPolicyRequest$TrafficPolicyVersion": "

    The version of the traffic policy for which you want to list traffic policy instances. The version must be associated with the traffic policy that is specified by TrafficPolicyId.

    ", + "TrafficPolicy$Version": null, + "TrafficPolicyInstance$TrafficPolicyVersion": null, + "TrafficPolicySummary$LatestVersion": null, + "TrafficPolicySummary$TrafficPolicyCount": null, + "UpdateTrafficPolicyCommentRequest$Version": "

    The value of Version for the traffic policy for which you want to update the comment.

    ", + "UpdateTrafficPolicyInstanceRequest$TrafficPolicyVersion": "

    The version of the traffic policy that you want Amazon Route 53 to use to update resource record sets for the specified traffic policy instance.

    " + } + }, + "TrafficPolicyVersionMarker": { + "base": null, + "refs": { + "ListTrafficPolicyVersionsRequest$TrafficPolicyVersionMarker": "

    For your first request to ListTrafficPolicyVersions, do not include the TrafficPolicyVersionMarker parameter.

    If you have more traffic policy versions than the value of MaxItems, ListTrafficPolicyVersions returns only the first group of MaxItems versions. To get the next group of MaxItems traffic policy versions, submit another request to ListTrafficPolicyVersions. For the value of TrafficPolicyVersionMarker, specify the value of the TrafficPolicyVersionMarker element that was returned in the previous response.

    Traffic policy versions are listed in sequential order.

    ", + "ListTrafficPolicyVersionsResponse$TrafficPolicyVersionMarker": "

    If IsTruncated is true, the value of TrafficPolicyVersionMarker identifies the first traffic policy in the next group of MaxItems traffic policies. Call ListTrafficPolicyVersions again and specify the value of TrafficPolicyVersionMarker in the TrafficPolicyVersionMarker request parameter.

    This element is present only if IsTruncated is true.

    " + } + }, + "UpdateHealthCheckRequest": { + "base": "

    >A complex type that contains information about the request to update a health check.

    ", + "refs": { + } + }, + "UpdateHealthCheckResponse": { + "base": null, + "refs": { + } + }, + "UpdateHostedZoneCommentRequest": { + "base": "

    A complex type that contains information about the request to update a hosted zone comment.

    ", + "refs": { + } + }, + "UpdateHostedZoneCommentResponse": { + "base": "

    A complex type containing information about the specified hosted zone after the update.

    ", + "refs": { + } + }, + "UpdateTrafficPolicyCommentRequest": { + "base": "

    A complex type that contains information about the traffic policy for which you want to update the comment.

    ", + "refs": { + } + }, + "UpdateTrafficPolicyCommentResponse": { + "base": "

    A complex type that contains the response information for the traffic policy.

    ", + "refs": { + } + }, + "UpdateTrafficPolicyInstanceRequest": { + "base": "

    A complex type that contains information about the resource record sets that you want to update based on a specified traffic policy instance.

    ", + "refs": { + } + }, + "UpdateTrafficPolicyInstanceResponse": { + "base": "

    A complex type that contains information about the resource record sets that Amazon Route 53 created based on a specified traffic policy.

    ", + "refs": { + } + }, + "VPC": { + "base": null, + "refs": { + "AssociateVPCWithHostedZoneRequest$VPC": "

    The VPC that you want your hosted zone to be associated with.

    ", + "CreateHostedZoneRequest$VPC": "

    The VPC that you want your hosted zone to be associated with. By providing this parameter, your newly created hosted cannot be resolved anywhere other than the given VPC.

    ", + "CreateHostedZoneResponse$VPC": null, + "DisassociateVPCFromHostedZoneRequest$VPC": "

    The VPC that you want your hosted zone to be disassociated from.

    ", + "VPCs$member": null + } + }, + "VPCAssociationNotFound": { + "base": "

    The VPC you specified is not currently associated with the hosted zone.

    ", + "refs": { + } + }, + "VPCId": { + "base": "

    A VPC ID

    ", + "refs": { + "VPC$VPCId": null + } + }, + "VPCRegion": { + "base": null, + "refs": { + "VPC$VPCRegion": null + } + }, + "VPCs": { + "base": "

    A list of VPCs

    ", + "refs": { + "GetHostedZoneResponse$VPCs": "

    A complex type that contains information about VPCs associated with the specified hosted zone.

    " + } + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/route53/2013-04-01/examples-1.json b/vendor/github.com/aws/aws-sdk-go/models/apis/route53/2013-04-01/examples-1.json new file mode 100644 index 000000000..0ea7e3b0b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/route53/2013-04-01/examples-1.json @@ -0,0 +1,5 @@ +{ + "version": "1.0", + "examples": { + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/route53/2013-04-01/paginators-1.json b/vendor/github.com/aws/aws-sdk-go/models/apis/route53/2013-04-01/paginators-1.json new file mode 100644 index 000000000..d472f47a2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/route53/2013-04-01/paginators-1.json @@ -0,0 +1,33 @@ +{ + "pagination": { + "ListHealthChecks": { + "input_token": "Marker", + "output_token": "NextMarker", + "more_results": "IsTruncated", + "limit_key": "MaxItems", + "result_key": "HealthChecks" + }, + "ListHostedZones": { + "input_token": "Marker", + "output_token": "NextMarker", + "more_results": "IsTruncated", + "limit_key": "MaxItems", + "result_key": "HostedZones" + }, + "ListResourceRecordSets": { + "more_results": "IsTruncated", + "limit_key": "MaxItems", + "result_key": "ResourceRecordSets", + "input_token": [ + "StartRecordName", + "StartRecordType", + "StartRecordIdentifier" + ], + "output_token": [ + "NextRecordName", + "NextRecordType", + "NextRecordIdentifier" + ] + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/route53/2013-04-01/waiters-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/route53/2013-04-01/waiters-2.json new file mode 100644 index 000000000..94aad399e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/route53/2013-04-01/waiters-2.json @@ -0,0 +1,18 @@ +{ + "version": 2, + "waiters": { + "ResourceRecordSetsChanged": { + "delay": 30, + "maxAttempts": 60, + "operation": "GetChange", + "acceptors": [ + { + "matcher": "path", + "expected": "INSYNC", + "argument": "ChangeInfo.Status", + "state": "success" + } + ] + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/route53domains/2014-05-15/api-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/route53domains/2014-05-15/api-2.json new file mode 100644 index 000000000..dc41413ee --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/route53domains/2014-05-15/api-2.json @@ -0,0 +1,1197 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2014-05-15", + "endpointPrefix":"route53domains", + "jsonVersion":"1.1", + "protocol":"json", + "serviceFullName":"Amazon Route 53 Domains", + "signatureVersion":"v4", + "targetPrefix":"Route53Domains_v20140515" + }, + "operations":{ + "CheckDomainAvailability":{ + "name":"CheckDomainAvailability", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CheckDomainAvailabilityRequest"}, + "output":{"shape":"CheckDomainAvailabilityResponse"}, + "errors":[ + {"shape":"InvalidInput"}, + {"shape":"UnsupportedTLD"} + ] + }, + "DeleteTagsForDomain":{ + "name":"DeleteTagsForDomain", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteTagsForDomainRequest"}, + "output":{"shape":"DeleteTagsForDomainResponse"}, + "errors":[ + {"shape":"InvalidInput"}, + {"shape":"OperationLimitExceeded"}, + {"shape":"UnsupportedTLD"} + ] + }, + "DisableDomainAutoRenew":{ + "name":"DisableDomainAutoRenew", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DisableDomainAutoRenewRequest"}, + "output":{"shape":"DisableDomainAutoRenewResponse"}, + "errors":[ + {"shape":"InvalidInput"}, + {"shape":"UnsupportedTLD"} + ] + }, + "DisableDomainTransferLock":{ + "name":"DisableDomainTransferLock", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DisableDomainTransferLockRequest"}, + "output":{"shape":"DisableDomainTransferLockResponse"}, + "errors":[ + {"shape":"InvalidInput"}, + {"shape":"DuplicateRequest"}, + {"shape":"TLDRulesViolation"}, + {"shape":"OperationLimitExceeded"}, + {"shape":"UnsupportedTLD"} + ] + }, + "EnableDomainAutoRenew":{ + "name":"EnableDomainAutoRenew", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"EnableDomainAutoRenewRequest"}, + "output":{"shape":"EnableDomainAutoRenewResponse"}, + "errors":[ + {"shape":"InvalidInput"}, + {"shape":"UnsupportedTLD"}, + {"shape":"TLDRulesViolation"} + ] + }, + "EnableDomainTransferLock":{ + "name":"EnableDomainTransferLock", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"EnableDomainTransferLockRequest"}, + "output":{"shape":"EnableDomainTransferLockResponse"}, + "errors":[ + {"shape":"InvalidInput"}, + {"shape":"DuplicateRequest"}, + {"shape":"TLDRulesViolation"}, + {"shape":"OperationLimitExceeded"}, + {"shape":"UnsupportedTLD"} + ] + }, + "GetContactReachabilityStatus":{ + "name":"GetContactReachabilityStatus", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetContactReachabilityStatusRequest"}, + "output":{"shape":"GetContactReachabilityStatusResponse"}, + "errors":[ + {"shape":"InvalidInput"}, + {"shape":"OperationLimitExceeded"}, + {"shape":"UnsupportedTLD"} + ] + }, + "GetDomainDetail":{ + "name":"GetDomainDetail", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetDomainDetailRequest"}, + "output":{"shape":"GetDomainDetailResponse"}, + "errors":[ + {"shape":"InvalidInput"}, + {"shape":"UnsupportedTLD"} + ] + }, + "GetOperationDetail":{ + "name":"GetOperationDetail", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetOperationDetailRequest"}, + "output":{"shape":"GetOperationDetailResponse"}, + "errors":[ + {"shape":"InvalidInput"} + ] + }, + "ListDomains":{ + "name":"ListDomains", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListDomainsRequest"}, + "output":{"shape":"ListDomainsResponse"}, + "errors":[ + {"shape":"InvalidInput"} + ] + }, + "ListOperations":{ + "name":"ListOperations", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListOperationsRequest"}, + "output":{"shape":"ListOperationsResponse"}, + "errors":[ + {"shape":"InvalidInput"} + ] + }, + "ListTagsForDomain":{ + "name":"ListTagsForDomain", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListTagsForDomainRequest"}, + "output":{"shape":"ListTagsForDomainResponse"}, + "errors":[ + {"shape":"InvalidInput"}, + {"shape":"OperationLimitExceeded"}, + {"shape":"UnsupportedTLD"} + ] + }, + "RegisterDomain":{ + "name":"RegisterDomain", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RegisterDomainRequest"}, + "output":{"shape":"RegisterDomainResponse"}, + "errors":[ + {"shape":"InvalidInput"}, + {"shape":"UnsupportedTLD"}, + {"shape":"DuplicateRequest"}, + {"shape":"TLDRulesViolation"}, + {"shape":"DomainLimitExceeded"}, + {"shape":"OperationLimitExceeded"} + ] + }, + "ResendContactReachabilityEmail":{ + "name":"ResendContactReachabilityEmail", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ResendContactReachabilityEmailRequest"}, + "output":{"shape":"ResendContactReachabilityEmailResponse"}, + "errors":[ + {"shape":"InvalidInput"}, + {"shape":"OperationLimitExceeded"}, + {"shape":"UnsupportedTLD"} + ] + }, + "RetrieveDomainAuthCode":{ + "name":"RetrieveDomainAuthCode", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RetrieveDomainAuthCodeRequest"}, + "output":{"shape":"RetrieveDomainAuthCodeResponse"}, + "errors":[ + {"shape":"InvalidInput"}, + {"shape":"UnsupportedTLD"} + ] + }, + "TransferDomain":{ + "name":"TransferDomain", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"TransferDomainRequest"}, + "output":{"shape":"TransferDomainResponse"}, + "errors":[ + {"shape":"InvalidInput"}, + {"shape":"UnsupportedTLD"}, + {"shape":"DuplicateRequest"}, + {"shape":"TLDRulesViolation"}, + {"shape":"DomainLimitExceeded"}, + {"shape":"OperationLimitExceeded"} + ] + }, + "UpdateDomainContact":{ + "name":"UpdateDomainContact", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateDomainContactRequest"}, + "output":{"shape":"UpdateDomainContactResponse"}, + "errors":[ + {"shape":"InvalidInput"}, + {"shape":"DuplicateRequest"}, + {"shape":"TLDRulesViolation"}, + {"shape":"OperationLimitExceeded"}, + {"shape":"UnsupportedTLD"} + ] + }, + "UpdateDomainContactPrivacy":{ + "name":"UpdateDomainContactPrivacy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateDomainContactPrivacyRequest"}, + "output":{"shape":"UpdateDomainContactPrivacyResponse"}, + "errors":[ + {"shape":"InvalidInput"}, + {"shape":"DuplicateRequest"}, + {"shape":"TLDRulesViolation"}, + {"shape":"OperationLimitExceeded"}, + {"shape":"UnsupportedTLD"} + ] + }, + "UpdateDomainNameservers":{ + "name":"UpdateDomainNameservers", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateDomainNameserversRequest"}, + "output":{"shape":"UpdateDomainNameserversResponse"}, + "errors":[ + {"shape":"InvalidInput"}, + {"shape":"DuplicateRequest"}, + {"shape":"TLDRulesViolation"}, + {"shape":"OperationLimitExceeded"}, + {"shape":"UnsupportedTLD"} + ] + }, + "UpdateTagsForDomain":{ + "name":"UpdateTagsForDomain", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateTagsForDomainRequest"}, + "output":{"shape":"UpdateTagsForDomainResponse"}, + "errors":[ + {"shape":"InvalidInput"}, + {"shape":"OperationLimitExceeded"}, + {"shape":"UnsupportedTLD"} + ] + } + }, + "shapes":{ + "AddressLine":{ + "type":"string", + "max":255 + }, + "Boolean":{"type":"boolean"}, + "CheckDomainAvailabilityRequest":{ + "type":"structure", + "required":["DomainName"], + "members":{ + "DomainName":{"shape":"DomainName"}, + "IdnLangCode":{"shape":"LangCode"} + } + }, + "CheckDomainAvailabilityResponse":{ + "type":"structure", + "required":["Availability"], + "members":{ + "Availability":{"shape":"DomainAvailability"} + } + }, + "City":{ + "type":"string", + "max":255 + }, + "ContactDetail":{ + "type":"structure", + "members":{ + "FirstName":{"shape":"ContactName"}, + "LastName":{"shape":"ContactName"}, + "ContactType":{"shape":"ContactType"}, + "OrganizationName":{"shape":"ContactName"}, + "AddressLine1":{"shape":"AddressLine"}, + "AddressLine2":{"shape":"AddressLine"}, + "City":{"shape":"City"}, + "State":{"shape":"State"}, + "CountryCode":{"shape":"CountryCode"}, + "ZipCode":{"shape":"ZipCode"}, + "PhoneNumber":{"shape":"ContactNumber"}, + "Email":{"shape":"Email"}, + "Fax":{"shape":"ContactNumber"}, + "ExtraParams":{"shape":"ExtraParamList"} + }, + "sensitive":true + }, + "ContactName":{ + "type":"string", + "max":255 + }, + "ContactNumber":{ + "type":"string", + "max":30 + }, + "ContactType":{ + "type":"string", + "enum":[ + "PERSON", + "COMPANY", + "ASSOCIATION", + "PUBLIC_BODY", + "RESELLER" + ] + }, + "CountryCode":{ + "type":"string", + "enum":[ + "AD", + "AE", + "AF", + "AG", + "AI", + "AL", + "AM", + "AN", + "AO", + "AQ", + "AR", + "AS", + "AT", + "AU", + "AW", + "AZ", + "BA", + "BB", + "BD", + "BE", + "BF", + "BG", + "BH", + "BI", + "BJ", + "BL", + "BM", + "BN", + "BO", + "BR", + "BS", + "BT", + "BW", + "BY", + "BZ", + "CA", + "CC", + "CD", + "CF", + "CG", + "CH", + "CI", + "CK", + "CL", + "CM", + "CN", + "CO", + "CR", + "CU", + "CV", + "CX", + "CY", + "CZ", + "DE", + "DJ", + "DK", + "DM", + "DO", + "DZ", + "EC", + "EE", + "EG", + "ER", + "ES", + "ET", + "FI", + "FJ", + "FK", + "FM", + "FO", + "FR", + "GA", + "GB", + "GD", + "GE", + "GH", + "GI", + "GL", + "GM", + "GN", + "GQ", + "GR", + "GT", + "GU", + "GW", + "GY", + "HK", + "HN", + "HR", + "HT", + "HU", + "ID", + "IE", + "IL", + "IM", + "IN", + "IQ", + "IR", + "IS", + "IT", + "JM", + "JO", + "JP", + "KE", + "KG", + "KH", + "KI", + "KM", + "KN", + "KP", + "KR", + "KW", + "KY", + "KZ", + "LA", + "LB", + "LC", + "LI", + "LK", + "LR", + "LS", + "LT", + "LU", + "LV", + "LY", + "MA", + "MC", + "MD", + "ME", + "MF", + "MG", + "MH", + "MK", + "ML", + "MM", + "MN", + "MO", + "MP", + "MR", + "MS", + "MT", + "MU", + "MV", + "MW", + "MX", + "MY", + "MZ", + "NA", + "NC", + "NE", + "NG", + "NI", + "NL", + "NO", + "NP", + "NR", + "NU", + "NZ", + "OM", + "PA", + "PE", + "PF", + "PG", + "PH", + "PK", + "PL", + "PM", + "PN", + "PR", + "PT", + "PW", + "PY", + "QA", + "RO", + "RS", + "RU", + "RW", + "SA", + "SB", + "SC", + "SD", + "SE", + "SG", + "SH", + "SI", + "SK", + "SL", + "SM", + "SN", + "SO", + "SR", + "ST", + "SV", + "SY", + "SZ", + "TC", + "TD", + "TG", + "TH", + "TJ", + "TK", + "TL", + "TM", + "TN", + "TO", + "TR", + "TT", + "TV", + "TW", + "TZ", + "UA", + "UG", + "US", + "UY", + "UZ", + "VA", + "VC", + "VE", + "VG", + "VI", + "VN", + "VU", + "WF", + "WS", + "YE", + "YT", + "ZA", + "ZM", + "ZW" + ] + }, + "DNSSec":{"type":"string"}, + "DeleteTagsForDomainRequest":{ + "type":"structure", + "required":[ + "DomainName", + "TagsToDelete" + ], + "members":{ + "DomainName":{"shape":"DomainName"}, + "TagsToDelete":{"shape":"TagKeyList"} + } + }, + "DeleteTagsForDomainResponse":{ + "type":"structure", + "members":{ + } + }, + "DisableDomainAutoRenewRequest":{ + "type":"structure", + "required":["DomainName"], + "members":{ + "DomainName":{"shape":"DomainName"} + } + }, + "DisableDomainAutoRenewResponse":{ + "type":"structure", + "members":{ + } + }, + "DisableDomainTransferLockRequest":{ + "type":"structure", + "required":["DomainName"], + "members":{ + "DomainName":{"shape":"DomainName"} + } + }, + "DisableDomainTransferLockResponse":{ + "type":"structure", + "required":["OperationId"], + "members":{ + "OperationId":{"shape":"OperationId"} + } + }, + "DomainAuthCode":{ + "type":"string", + "max":1024, + "sensitive":true + }, + "DomainAvailability":{ + "type":"string", + "enum":[ + "AVAILABLE", + "AVAILABLE_RESERVED", + "AVAILABLE_PREORDER", + "UNAVAILABLE", + "UNAVAILABLE_PREMIUM", + "UNAVAILABLE_RESTRICTED", + "RESERVED", + "DONT_KNOW" + ] + }, + "DomainLimitExceeded":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "exception":true + }, + "DomainName":{ + "type":"string", + "max":255, + "pattern":"[a-zA-Z0-9_\\-.]*" + }, + "DomainStatus":{"type":"string"}, + "DomainStatusList":{ + "type":"list", + "member":{"shape":"DomainStatus"} + }, + "DomainSummary":{ + "type":"structure", + "required":["DomainName"], + "members":{ + "DomainName":{"shape":"DomainName"}, + "AutoRenew":{"shape":"Boolean"}, + "TransferLock":{"shape":"Boolean"}, + "Expiry":{"shape":"Timestamp"} + } + }, + "DomainSummaryList":{ + "type":"list", + "member":{"shape":"DomainSummary"} + }, + "DuplicateRequest":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "exception":true + }, + "DurationInYears":{ + "type":"integer", + "max":10, + "min":1 + }, + "Email":{ + "type":"string", + "max":254 + }, + "EnableDomainAutoRenewRequest":{ + "type":"structure", + "required":["DomainName"], + "members":{ + "DomainName":{"shape":"DomainName"} + } + }, + "EnableDomainAutoRenewResponse":{ + "type":"structure", + "members":{ + } + }, + "EnableDomainTransferLockRequest":{ + "type":"structure", + "required":["DomainName"], + "members":{ + "DomainName":{"shape":"DomainName"} + } + }, + "EnableDomainTransferLockResponse":{ + "type":"structure", + "required":["OperationId"], + "members":{ + "OperationId":{"shape":"OperationId"} + } + }, + "ErrorMessage":{"type":"string"}, + "ExtraParam":{ + "type":"structure", + "required":[ + "Name", + "Value" + ], + "members":{ + "Name":{"shape":"ExtraParamName"}, + "Value":{"shape":"ExtraParamValue"} + } + }, + "ExtraParamList":{ + "type":"list", + "member":{"shape":"ExtraParam"} + }, + "ExtraParamName":{ + "type":"string", + "enum":[ + "DUNS_NUMBER", + "BRAND_NUMBER", + "BIRTH_DEPARTMENT", + "BIRTH_DATE_IN_YYYY_MM_DD", + "BIRTH_COUNTRY", + "BIRTH_CITY", + "DOCUMENT_NUMBER", + "AU_ID_NUMBER", + "AU_ID_TYPE", + "CA_LEGAL_TYPE", + "CA_BUSINESS_ENTITY_TYPE", + "ES_IDENTIFICATION", + "ES_IDENTIFICATION_TYPE", + "ES_LEGAL_FORM", + "FI_BUSINESS_NUMBER", + "FI_ID_NUMBER", + "IT_PIN", + "RU_PASSPORT_DATA", + "SE_ID_NUMBER", + "SG_ID_NUMBER", + "VAT_NUMBER" + ] + }, + "ExtraParamValue":{ + "type":"string", + "max":2048 + }, + "FIAuthKey":{"type":"string"}, + "GetContactReachabilityStatusRequest":{ + "type":"structure", + "members":{ + "domainName":{"shape":"DomainName"} + } + }, + "GetContactReachabilityStatusResponse":{ + "type":"structure", + "members":{ + "domainName":{"shape":"DomainName"}, + "status":{"shape":"ReachabilityStatus"} + } + }, + "GetDomainDetailRequest":{ + "type":"structure", + "required":["DomainName"], + "members":{ + "DomainName":{"shape":"DomainName"} + } + }, + "GetDomainDetailResponse":{ + "type":"structure", + "required":[ + "DomainName", + "Nameservers", + "AdminContact", + "RegistrantContact", + "TechContact" + ], + "members":{ + "DomainName":{"shape":"DomainName"}, + "Nameservers":{"shape":"NameserverList"}, + "AutoRenew":{"shape":"Boolean"}, + "AdminContact":{"shape":"ContactDetail"}, + "RegistrantContact":{"shape":"ContactDetail"}, + "TechContact":{"shape":"ContactDetail"}, + "AdminPrivacy":{"shape":"Boolean"}, + "RegistrantPrivacy":{"shape":"Boolean"}, + "TechPrivacy":{"shape":"Boolean"}, + "RegistrarName":{"shape":"RegistrarName"}, + "WhoIsServer":{"shape":"RegistrarWhoIsServer"}, + "RegistrarUrl":{"shape":"RegistrarUrl"}, + "AbuseContactEmail":{"shape":"Email"}, + "AbuseContactPhone":{"shape":"ContactNumber"}, + "RegistryDomainId":{"shape":"RegistryDomainId"}, + "CreationDate":{"shape":"Timestamp"}, + "UpdatedDate":{"shape":"Timestamp"}, + "ExpirationDate":{"shape":"Timestamp"}, + "Reseller":{"shape":"Reseller"}, + "DnsSec":{"shape":"DNSSec"}, + "StatusList":{"shape":"DomainStatusList"} + } + }, + "GetOperationDetailRequest":{ + "type":"structure", + "required":["OperationId"], + "members":{ + "OperationId":{"shape":"OperationId"} + } + }, + "GetOperationDetailResponse":{ + "type":"structure", + "members":{ + "OperationId":{"shape":"OperationId"}, + "Status":{"shape":"OperationStatus"}, + "Message":{"shape":"ErrorMessage"}, + "DomainName":{"shape":"DomainName"}, + "Type":{"shape":"OperationType"}, + "SubmittedDate":{"shape":"Timestamp"} + } + }, + "GlueIp":{ + "type":"string", + "max":45 + }, + "GlueIpList":{ + "type":"list", + "member":{"shape":"GlueIp"} + }, + "HostName":{ + "type":"string", + "max":255, + "pattern":"[a-zA-Z0-9_\\-.]*" + }, + "InvalidInput":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "exception":true + }, + "LangCode":{ + "type":"string", + "max":3 + }, + "ListDomainsRequest":{ + "type":"structure", + "members":{ + "Marker":{"shape":"PageMarker"}, + "MaxItems":{"shape":"PageMaxItems"} + } + }, + "ListDomainsResponse":{ + "type":"structure", + "required":["Domains"], + "members":{ + "Domains":{"shape":"DomainSummaryList"}, + "NextPageMarker":{"shape":"PageMarker"} + } + }, + "ListOperationsRequest":{ + "type":"structure", + "members":{ + "Marker":{"shape":"PageMarker"}, + "MaxItems":{"shape":"PageMaxItems"} + } + }, + "ListOperationsResponse":{ + "type":"structure", + "required":["Operations"], + "members":{ + "Operations":{"shape":"OperationSummaryList"}, + "NextPageMarker":{"shape":"PageMarker"} + } + }, + "ListTagsForDomainRequest":{ + "type":"structure", + "required":["DomainName"], + "members":{ + "DomainName":{"shape":"DomainName"} + } + }, + "ListTagsForDomainResponse":{ + "type":"structure", + "required":["TagList"], + "members":{ + "TagList":{"shape":"TagList"} + } + }, + "Nameserver":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{"shape":"HostName"}, + "GlueIps":{"shape":"GlueIpList"} + } + }, + "NameserverList":{ + "type":"list", + "member":{"shape":"Nameserver"} + }, + "OperationId":{ + "type":"string", + "max":255 + }, + "OperationLimitExceeded":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "exception":true + }, + "OperationStatus":{ + "type":"string", + "enum":[ + "SUBMITTED", + "IN_PROGRESS", + "ERROR", + "SUCCESSFUL", + "FAILED" + ] + }, + "OperationSummary":{ + "type":"structure", + "required":[ + "OperationId", + "Status", + "Type", + "SubmittedDate" + ], + "members":{ + "OperationId":{"shape":"OperationId"}, + "Status":{"shape":"OperationStatus"}, + "Type":{"shape":"OperationType"}, + "SubmittedDate":{"shape":"Timestamp"} + } + }, + "OperationSummaryList":{ + "type":"list", + "member":{"shape":"OperationSummary"} + }, + "OperationType":{ + "type":"string", + "enum":[ + "REGISTER_DOMAIN", + "DELETE_DOMAIN", + "TRANSFER_IN_DOMAIN", + "UPDATE_DOMAIN_CONTACT", + "UPDATE_NAMESERVER", + "CHANGE_PRIVACY_PROTECTION", + "DOMAIN_LOCK" + ] + }, + "PageMarker":{ + "type":"string", + "max":4096 + }, + "PageMaxItems":{ + "type":"integer", + "max":100 + }, + "ReachabilityStatus":{ + "type":"string", + "enum":[ + "PENDING", + "DONE", + "EXPIRED" + ] + }, + "RegisterDomainRequest":{ + "type":"structure", + "required":[ + "DomainName", + "DurationInYears", + "AdminContact", + "RegistrantContact", + "TechContact" + ], + "members":{ + "DomainName":{"shape":"DomainName"}, + "IdnLangCode":{"shape":"LangCode"}, + "DurationInYears":{"shape":"DurationInYears"}, + "AutoRenew":{"shape":"Boolean"}, + "AdminContact":{"shape":"ContactDetail"}, + "RegistrantContact":{"shape":"ContactDetail"}, + "TechContact":{"shape":"ContactDetail"}, + "PrivacyProtectAdminContact":{"shape":"Boolean"}, + "PrivacyProtectRegistrantContact":{"shape":"Boolean"}, + "PrivacyProtectTechContact":{"shape":"Boolean"} + } + }, + "RegisterDomainResponse":{ + "type":"structure", + "required":["OperationId"], + "members":{ + "OperationId":{"shape":"OperationId"} + } + }, + "RegistrarName":{"type":"string"}, + "RegistrarUrl":{"type":"string"}, + "RegistrarWhoIsServer":{"type":"string"}, + "RegistryDomainId":{"type":"string"}, + "Reseller":{"type":"string"}, + "ResendContactReachabilityEmailRequest":{ + "type":"structure", + "members":{ + "domainName":{"shape":"DomainName"} + } + }, + "ResendContactReachabilityEmailResponse":{ + "type":"structure", + "members":{ + "domainName":{"shape":"DomainName"}, + "emailAddress":{"shape":"Email"}, + "isAlreadyVerified":{"shape":"Boolean"} + } + }, + "RetrieveDomainAuthCodeRequest":{ + "type":"structure", + "required":["DomainName"], + "members":{ + "DomainName":{"shape":"DomainName"} + } + }, + "RetrieveDomainAuthCodeResponse":{ + "type":"structure", + "required":["AuthCode"], + "members":{ + "AuthCode":{"shape":"DomainAuthCode"} + } + }, + "State":{ + "type":"string", + "max":255 + }, + "TLDRulesViolation":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "exception":true + }, + "Tag":{ + "type":"structure", + "members":{ + "Key":{"shape":"TagKey"}, + "Value":{"shape":"TagValue"} + } + }, + "TagKey":{"type":"string"}, + "TagKeyList":{ + "type":"list", + "member":{"shape":"TagKey"} + }, + "TagList":{ + "type":"list", + "member":{"shape":"Tag"} + }, + "TagValue":{"type":"string"}, + "Timestamp":{"type":"timestamp"}, + "TransferDomainRequest":{ + "type":"structure", + "required":[ + "DomainName", + "DurationInYears", + "AdminContact", + "RegistrantContact", + "TechContact" + ], + "members":{ + "DomainName":{"shape":"DomainName"}, + "IdnLangCode":{"shape":"LangCode"}, + "DurationInYears":{"shape":"DurationInYears"}, + "Nameservers":{"shape":"NameserverList"}, + "AuthCode":{"shape":"DomainAuthCode"}, + "AutoRenew":{"shape":"Boolean"}, + "AdminContact":{"shape":"ContactDetail"}, + "RegistrantContact":{"shape":"ContactDetail"}, + "TechContact":{"shape":"ContactDetail"}, + "PrivacyProtectAdminContact":{"shape":"Boolean"}, + "PrivacyProtectRegistrantContact":{"shape":"Boolean"}, + "PrivacyProtectTechContact":{"shape":"Boolean"} + } + }, + "TransferDomainResponse":{ + "type":"structure", + "required":["OperationId"], + "members":{ + "OperationId":{"shape":"OperationId"} + } + }, + "UnsupportedTLD":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "exception":true + }, + "UpdateDomainContactPrivacyRequest":{ + "type":"structure", + "required":["DomainName"], + "members":{ + "DomainName":{"shape":"DomainName"}, + "AdminPrivacy":{"shape":"Boolean"}, + "RegistrantPrivacy":{"shape":"Boolean"}, + "TechPrivacy":{"shape":"Boolean"} + } + }, + "UpdateDomainContactPrivacyResponse":{ + "type":"structure", + "required":["OperationId"], + "members":{ + "OperationId":{"shape":"OperationId"} + } + }, + "UpdateDomainContactRequest":{ + "type":"structure", + "required":["DomainName"], + "members":{ + "DomainName":{"shape":"DomainName"}, + "AdminContact":{"shape":"ContactDetail"}, + "RegistrantContact":{"shape":"ContactDetail"}, + "TechContact":{"shape":"ContactDetail"} + } + }, + "UpdateDomainContactResponse":{ + "type":"structure", + "required":["OperationId"], + "members":{ + "OperationId":{"shape":"OperationId"} + } + }, + "UpdateDomainNameserversRequest":{ + "type":"structure", + "required":[ + "DomainName", + "Nameservers" + ], + "members":{ + "DomainName":{"shape":"DomainName"}, + "FIAuthKey":{"shape":"FIAuthKey"}, + "Nameservers":{"shape":"NameserverList"} + } + }, + "UpdateDomainNameserversResponse":{ + "type":"structure", + "required":["OperationId"], + "members":{ + "OperationId":{"shape":"OperationId"} + } + }, + "UpdateTagsForDomainRequest":{ + "type":"structure", + "required":["DomainName"], + "members":{ + "DomainName":{"shape":"DomainName"}, + "TagsToUpdate":{"shape":"TagList"} + } + }, + "UpdateTagsForDomainResponse":{ + "type":"structure", + "members":{ + } + }, + "ZipCode":{ + "type":"string", + "max":255 + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/route53domains/2014-05-15/docs-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/route53domains/2014-05-15/docs-2.json new file mode 100644 index 000000000..2d5206b2e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/route53domains/2014-05-15/docs-2.json @@ -0,0 +1,654 @@ +{ + "version": "2.0", + "service": null, + "operations": { + "CheckDomainAvailability": "

    This operation checks the availability of one domain name. Note that if the availability status of a domain is pending, you must submit another request to determine the availability of the domain name.

    ", + "DeleteTagsForDomain": "

    This operation deletes the specified tags for a domain.

    All tag operations are eventually consistent; subsequent operations may not immediately represent all issued operations.

    ", + "DisableDomainAutoRenew": "

    This operation disables automatic renewal of domain registration for the specified domain.

    Caution! Amazon Route 53 doesn't have a manual renewal process, so if you disable automatic renewal, registration for the domain will not be renewed when the expiration date passes, and you will lose control of the domain name.", + "DisableDomainTransferLock": "

    This operation removes the transfer lock on the domain (specifically the clientTransferProhibited status) to allow domain transfers. We recommend you refrain from performing this action unless you intend to transfer the domain to a different registrar. Successful submission returns an operation ID that you can use to track the progress and completion of the action. If the request is not completed successfully, the domain registrant will be notified by email.

    ", + "EnableDomainAutoRenew": "

    This operation configures Amazon Route 53 to automatically renew the specified domain before the domain registration expires. The cost of renewing your domain registration is billed to your AWS account.

    The period during which you can renew a domain name varies by TLD. For a list of TLDs and their renewal policies, see \"Renewal, restoration, and deletion times\" on the website for our registrar partner, Gandi. Route 53 requires that you renew before the end of the renewal period that is listed on the Gandi website so we can complete processing before the deadline.

    ", + "EnableDomainTransferLock": "

    This operation sets the transfer lock on the domain (specifically the clientTransferProhibited status) to prevent domain transfers. Successful submission returns an operation ID that you can use to track the progress and completion of the action. If the request is not completed successfully, the domain registrant will be notified by email.

    ", + "GetContactReachabilityStatus": "

    For operations that require confirmation that the email address for the registrant contact is valid, such as registering a new domain, this operation returns information about whether the registrant contact has responded.

    If you want us to resend the email, use the ResendContactReachabilityEmail operation.

    ", + "GetDomainDetail": "

    This operation returns detailed information about the domain. The domain's contact information is also returned as part of the output.

    ", + "GetOperationDetail": "

    This operation returns the current status of an operation that is not completed.

    ", + "ListDomains": "

    This operation returns all the domain names registered with Amazon Route 53 for the current AWS account.

    ", + "ListOperations": "

    This operation returns the operation IDs of operations that are not yet complete.

    ", + "ListTagsForDomain": "

    This operation returns all of the tags that are associated with the specified domain.

    All tag operations are eventually consistent; subsequent operations may not immediately represent all issued operations.

    ", + "RegisterDomain": "

    This operation registers a domain. Domains are registered by the AWS registrar partner, Gandi. For some top-level domains (TLDs), this operation requires extra parameters.

    When you register a domain, Amazon Route 53 does the following:

    • Creates a Amazon Route 53 hosted zone that has the same name as the domain. Amazon Route 53 assigns four name servers to your hosted zone and automatically updates your domain registration with the names of these name servers.
    • Enables autorenew, so your domain registration will renew automatically each year. We'll notify you in advance of the renewal date so you can choose whether to renew the registration.
    • Optionally enables privacy protection, so WHOIS queries return contact information for our registrar partner, Gandi, instead of the information you entered for registrant, admin, and tech contacts.
    • If registration is successful, returns an operation ID that you can use to track the progress and completion of the action. If the request is not completed successfully, the domain registrant is notified by email.
    • Charges your AWS account an amount based on the top-level domain. For more information, see Amazon Route 53 Pricing.
    ", + "ResendContactReachabilityEmail": "

    For operations that require confirmation that the email address for the registrant contact is valid, such as registering a new domain, this operation resends the confirmation email to the current email address for the registrant contact.

    ", + "RetrieveDomainAuthCode": "

    This operation returns the AuthCode for the domain. To transfer a domain to another registrar, you provide this value to the new registrar.

    ", + "TransferDomain": "

    This operation transfers a domain from another registrar to Amazon Route 53. When the transfer is complete, the domain is registered with the AWS registrar partner, Gandi.

    For transfer requirements, a detailed procedure, and information about viewing the status of a domain transfer, see Transferring Registration for a Domain to Amazon Route 53 in the Amazon Route 53 Developer Guide.

    If the registrar for your domain is also the DNS service provider for the domain, we highly recommend that you consider transferring your DNS service to Amazon Route 53 or to another DNS service provider before you transfer your registration. Some registrars provide free DNS service when you purchase a domain registration. When you transfer the registration, the previous registrar will not renew your domain registration and could end your DNS service at any time.

    Caution! If the registrar for your domain is also the DNS service provider for the domain and you don't transfer DNS service to another provider, your website, email, and the web applications associated with the domain might become unavailable.

    If the transfer is successful, this method returns an operation ID that you can use to track the progress and completion of the action. If the transfer doesn't complete successfully, the domain registrant will be notified by email.

    ", + "UpdateDomainContact": "

    This operation updates the contact information for a particular domain. Information for at least one contact (registrant, administrator, or technical) must be supplied for update.

    If the update is successful, this method returns an operation ID that you can use to track the progress and completion of the action. If the request is not completed successfully, the domain registrant will be notified by email.

    ", + "UpdateDomainContactPrivacy": "

    This operation updates the specified domain contact's privacy setting. When the privacy option is enabled, personal information such as postal or email address is hidden from the results of a public WHOIS query. The privacy services are provided by the AWS registrar, Gandi. For more information, see the Gandi privacy features.

    This operation only affects the privacy of the specified contact type (registrant, administrator, or tech). Successful acceptance returns an operation ID that you can use with GetOperationDetail to track the progress and completion of the action. If the request is not completed successfully, the domain registrant will be notified by email.

    ", + "UpdateDomainNameservers": "

    This operation replaces the current set of name servers for the domain with the specified set of name servers. If you use Amazon Route 53 as your DNS service, specify the four name servers in the delegation set for the hosted zone for the domain.

    If successful, this operation returns an operation ID that you can use to track the progress and completion of the action. If the request is not completed successfully, the domain registrant will be notified by email.

    ", + "UpdateTagsForDomain": "

    This operation adds or updates tags for a specified domain.

    All tag operations are eventually consistent; subsequent operations may not immediately represent all issued operations.

    " + }, + "shapes": { + "AddressLine": { + "base": null, + "refs": { + "ContactDetail$AddressLine1": "

    First line of the contact's address.

    Type: String

    Default: None

    Constraints: Maximum 255 characters.

    Parents: RegistrantContact, AdminContact, TechContact

    Required: Yes

    ", + "ContactDetail$AddressLine2": "

    Second line of contact's address, if any.

    Type: String

    Default: None

    Constraints: Maximum 255 characters.

    Parents: RegistrantContact, AdminContact, TechContact

    Required: No

    " + } + }, + "Boolean": { + "base": null, + "refs": { + "DomainSummary$AutoRenew": "

    Indicates whether the domain is automatically renewed upon expiration.

    Type: Boolean

    Valid values: True | False

    ", + "DomainSummary$TransferLock": "

    Indicates whether a domain is locked from unauthorized transfer to another party.

    Type: Boolean

    Valid values: True | False

    ", + "GetDomainDetailResponse$AutoRenew": "

    Specifies whether the domain registration is set to renew automatically.

    Type: Boolean

    ", + "GetDomainDetailResponse$AdminPrivacy": "

    Specifies whether contact information for the admin contact is concealed from WHOIS queries. If the value is true, WHOIS (\"who is\") queries will return contact information for our registrar partner, Gandi, instead of the contact information that you enter.

    Type: Boolean

    ", + "GetDomainDetailResponse$RegistrantPrivacy": "

    Specifies whether contact information for the registrant contact is concealed from WHOIS queries. If the value is true, WHOIS (\"who is\") queries will return contact information for our registrar partner, Gandi, instead of the contact information that you enter.

    Type: Boolean

    ", + "GetDomainDetailResponse$TechPrivacy": "

    Specifies whether contact information for the tech contact is concealed from WHOIS queries. If the value is true, WHOIS (\"who is\") queries will return contact information for our registrar partner, Gandi, instead of the contact information that you enter.

    Type: Boolean

    ", + "RegisterDomainRequest$AutoRenew": "

    Indicates whether the domain will be automatically renewed (true) or not (false). Autorenewal only takes effect after the account is charged.

    Type: Boolean

    Valid values: true | false

    Default: true

    Required: No

    ", + "RegisterDomainRequest$PrivacyProtectAdminContact": "

    Whether you want to conceal contact information from WHOIS queries. If you specify true, WHOIS (\"who is\") queries will return contact information for our registrar partner, Gandi, instead of the contact information that you enter.

    Type: Boolean

    Default: true

    Valid values: true | false

    Required: No

    ", + "RegisterDomainRequest$PrivacyProtectRegistrantContact": "

    Whether you want to conceal contact information from WHOIS queries. If you specify true, WHOIS (\"who is\") queries will return contact information for our registrar partner, Gandi, instead of the contact information that you enter.

    Type: Boolean

    Default: true

    Valid values: true | false

    Required: No

    ", + "RegisterDomainRequest$PrivacyProtectTechContact": "

    Whether you want to conceal contact information from WHOIS queries. If you specify true, WHOIS (\"who is\") queries will return contact information for our registrar partner, Gandi, instead of the contact information that you enter.

    Type: Boolean

    Default: true

    Valid values: true | false

    Required: No

    ", + "ResendContactReachabilityEmailResponse$isAlreadyVerified": "

    True if the email address for the registrant contact has already been verified, and false otherwise. If the email address has already been verified, we don't send another confirmation email.

    ", + "TransferDomainRequest$AutoRenew": "

    Indicates whether the domain will be automatically renewed (true) or not (false). Autorenewal only takes effect after the account is charged.

    Type: Boolean

    Valid values: true | false

    Default: true

    Required: No

    ", + "TransferDomainRequest$PrivacyProtectAdminContact": "

    Whether you want to conceal contact information from WHOIS queries. If you specify true, WHOIS (\"who is\") queries will return contact information for our registrar partner, Gandi, instead of the contact information that you enter.

    Type: Boolean

    Default: true

    Valid values: true | false

    Required: No

    ", + "TransferDomainRequest$PrivacyProtectRegistrantContact": "

    Whether you want to conceal contact information from WHOIS queries. If you specify true, WHOIS (\"who is\") queries will return contact information for our registrar partner, Gandi, instead of the contact information that you enter.

    Type: Boolean

    Default: true

    Valid values: true | false

    Required: No

    ", + "TransferDomainRequest$PrivacyProtectTechContact": "

    Whether you want to conceal contact information from WHOIS queries. If you specify true, WHOIS (\"who is\") queries will return contact information for our registrar partner, Gandi, instead of the contact information that you enter.

    Type: Boolean

    Default: true

    Valid values: true | false

    Required: No

    ", + "UpdateDomainContactPrivacyRequest$AdminPrivacy": "

    Whether you want to conceal contact information from WHOIS queries. If you specify true, WHOIS (\"who is\") queries will return contact information for our registrar partner, Gandi, instead of the contact information that you enter.

    Type: Boolean

    Default: None

    Valid values: true | false

    Required: No

    ", + "UpdateDomainContactPrivacyRequest$RegistrantPrivacy": "

    Whether you want to conceal contact information from WHOIS queries. If you specify true, WHOIS (\"who is\") queries will return contact information for our registrar partner, Gandi, instead of the contact information that you enter.

    Type: Boolean

    Default: None

    Valid values: true | false

    Required: No

    ", + "UpdateDomainContactPrivacyRequest$TechPrivacy": "

    Whether you want to conceal contact information from WHOIS queries. If you specify true, WHOIS (\"who is\") queries will return contact information for our registrar partner, Gandi, instead of the contact information that you enter.

    Type: Boolean

    Default: None

    Valid values: true | false

    Required: No

    " + } + }, + "CheckDomainAvailabilityRequest": { + "base": "

    The CheckDomainAvailability request contains the following elements.

    ", + "refs": { + } + }, + "CheckDomainAvailabilityResponse": { + "base": "

    The CheckDomainAvailability response includes the following elements.

    ", + "refs": { + } + }, + "City": { + "base": null, + "refs": { + "ContactDetail$City": "

    The city of the contact's address.

    Type: String

    Default: None

    Constraints: Maximum 255 characters.

    Parents: RegistrantContact, AdminContact, TechContact

    Required: Yes

    " + } + }, + "ContactDetail": { + "base": "

    ContactDetail includes the following elements.

    ", + "refs": { + "GetDomainDetailResponse$AdminContact": "

    Provides details about the domain administrative contact.

    Type: Complex

    Children: FirstName, MiddleName, LastName, ContactType, OrganizationName, AddressLine1, AddressLine2, City, State, CountryCode, ZipCode, PhoneNumber, Email, Fax, ExtraParams

    ", + "GetDomainDetailResponse$RegistrantContact": "

    Provides details about the domain registrant.

    Type: Complex

    Children: FirstName, MiddleName, LastName, ContactType, OrganizationName, AddressLine1, AddressLine2, City, State, CountryCode, ZipCode, PhoneNumber, Email, Fax, ExtraParams

    ", + "GetDomainDetailResponse$TechContact": "

    Provides details about the domain technical contact.

    Type: Complex

    Children: FirstName, MiddleName, LastName, ContactType, OrganizationName, AddressLine1, AddressLine2, City, State, CountryCode, ZipCode, PhoneNumber, Email, Fax, ExtraParams

    ", + "RegisterDomainRequest$AdminContact": "

    Provides detailed contact information.

    Type: Complex

    Children: FirstName, MiddleName, LastName, ContactType, OrganizationName, AddressLine1, AddressLine2, City, State, CountryCode, ZipCode, PhoneNumber, Email, Fax, ExtraParams

    Required: Yes

    ", + "RegisterDomainRequest$RegistrantContact": "

    Provides detailed contact information.

    Type: Complex

    Children: FirstName, MiddleName, LastName, ContactType, OrganizationName, AddressLine1, AddressLine2, City, State, CountryCode, ZipCode, PhoneNumber, Email, Fax, ExtraParams

    Required: Yes

    ", + "RegisterDomainRequest$TechContact": "

    Provides detailed contact information.

    Type: Complex

    Children: FirstName, MiddleName, LastName, ContactType, OrganizationName, AddressLine1, AddressLine2, City, State, CountryCode, ZipCode, PhoneNumber, Email, Fax, ExtraParams

    Required: Yes

    ", + "TransferDomainRequest$AdminContact": "

    Provides detailed contact information.

    Type: Complex

    Children: FirstName, MiddleName, LastName, ContactType, OrganizationName, AddressLine1, AddressLine2, City, State, CountryCode, ZipCode, PhoneNumber, Email, Fax, ExtraParams

    Required: Yes

    ", + "TransferDomainRequest$RegistrantContact": "

    Provides detailed contact information.

    Type: Complex

    Children: FirstName, MiddleName, LastName, ContactType, OrganizationName, AddressLine1, AddressLine2, City, State, CountryCode, ZipCode, PhoneNumber, Email, Fax, ExtraParams

    Required: Yes

    ", + "TransferDomainRequest$TechContact": "

    Provides detailed contact information.

    Type: Complex

    Children: FirstName, MiddleName, LastName, ContactType, OrganizationName, AddressLine1, AddressLine2, City, State, CountryCode, ZipCode, PhoneNumber, Email, Fax, ExtraParams

    Required: Yes

    ", + "UpdateDomainContactRequest$AdminContact": "

    Provides detailed contact information.

    Type: Complex

    Children: FirstName, MiddleName, LastName, ContactType, OrganizationName, AddressLine1, AddressLine2, City, State, CountryCode, ZipCode, PhoneNumber, Email, Fax, ExtraParams

    Required: Yes

    ", + "UpdateDomainContactRequest$RegistrantContact": "

    Provides detailed contact information.

    Type: Complex

    Children: FirstName, MiddleName, LastName, ContactType, OrganizationName, AddressLine1, AddressLine2, City, State, CountryCode, ZipCode, PhoneNumber, Email, Fax, ExtraParams

    Required: Yes

    ", + "UpdateDomainContactRequest$TechContact": "

    Provides detailed contact information.

    Type: Complex

    Children: FirstName, MiddleName, LastName, ContactType, OrganizationName, AddressLine1, AddressLine2, City, State, CountryCode, ZipCode, PhoneNumber, Email, Fax, ExtraParams

    Required: Yes

    " + } + }, + "ContactName": { + "base": null, + "refs": { + "ContactDetail$FirstName": "

    First name of contact.

    Type: String

    Default: None

    Constraints: Maximum 255 characters.

    Parents: RegistrantContact, AdminContact, TechContact

    Required: Yes

    ", + "ContactDetail$LastName": "

    Last name of contact.

    Type: String

    Default: None

    Constraints: Maximum 255 characters.

    Parents: RegistrantContact, AdminContact, TechContact

    Required: Yes

    ", + "ContactDetail$OrganizationName": "

    Name of the organization for contact types other than PERSON.

    Type: String

    Default: None

    Constraints: Maximum 255 characters. Contact type must not be PERSON.

    Parents: RegistrantContact, AdminContact, TechContact

    Required: No

    " + } + }, + "ContactNumber": { + "base": null, + "refs": { + "ContactDetail$PhoneNumber": "

    The phone number of the contact.

    Type: String

    Default: None

    Constraints: Phone number must be specified in the format \"+[country dialing code].[number including any area code>]\". For example, a US phone number might appear as \"+1.1234567890\".

    Parents: RegistrantContact, AdminContact, TechContact

    Required: Yes

    ", + "ContactDetail$Fax": "

    Fax number of the contact.

    Type: String

    Default: None

    Constraints: Phone number must be specified in the format \"+[country dialing code].[number including any area code]\". For example, a US phone number might appear as \"+1.1234567890\".

    Parents: RegistrantContact, AdminContact, TechContact

    Required: No

    ", + "GetDomainDetailResponse$AbuseContactPhone": "

    Phone number for reporting abuse.

    Type: String

    " + } + }, + "ContactType": { + "base": null, + "refs": { + "ContactDetail$ContactType": "

    Indicates whether the contact is a person, company, association, or public organization. If you choose an option other than PERSON, you must enter an organization name, and you can't enable privacy protection for the contact.

    Type: String

    Default: None

    Constraints: Maximum 255 characters.

    Valid values: PERSON | COMPANY | ASSOCIATION | PUBLIC_BODY

    Parents: RegistrantContact, AdminContact, TechContact

    Required: Yes

    " + } + }, + "CountryCode": { + "base": null, + "refs": { + "ContactDetail$CountryCode": "

    Code for the country of the contact's address.

    Type: String

    Default: None

    Constraints: Maximum 255 characters.

    Parents: RegistrantContact, AdminContact, TechContact

    Required: Yes

    " + } + }, + "DNSSec": { + "base": null, + "refs": { + "GetDomainDetailResponse$DnsSec": "

    Reserved for future use.

    " + } + }, + "DeleteTagsForDomainRequest": { + "base": "

    The DeleteTagsForDomainRequest includes the following elements.

    ", + "refs": { + } + }, + "DeleteTagsForDomainResponse": { + "base": null, + "refs": { + } + }, + "DisableDomainAutoRenewRequest": { + "base": null, + "refs": { + } + }, + "DisableDomainAutoRenewResponse": { + "base": null, + "refs": { + } + }, + "DisableDomainTransferLockRequest": { + "base": "

    The DisableDomainTransferLock request includes the following element.

    ", + "refs": { + } + }, + "DisableDomainTransferLockResponse": { + "base": "

    The DisableDomainTransferLock response includes the following element.

    ", + "refs": { + } + }, + "DomainAuthCode": { + "base": null, + "refs": { + "RetrieveDomainAuthCodeResponse$AuthCode": "

    The authorization code for the domain.

    Type: String

    ", + "TransferDomainRequest$AuthCode": "

    The authorization code for the domain. You get this value from the current registrar.

    Type: String

    Required: Yes

    " + } + }, + "DomainAvailability": { + "base": null, + "refs": { + "CheckDomainAvailabilityResponse$Availability": "

    Whether the domain name is available for registering.

    You can only register domains designated as AVAILABLE.

    Type: String

    Valid values:

    • AVAILABLE – The domain name is available.
    • AVAILABLE_RESERVED – The domain name is reserved under specific conditions.
    • AVAILABLE_PREORDER – The domain name is available and can be preordered.
    • UNAVAILABLE – The domain name is not available.
    • UNAVAILABLE_PREMIUM – The domain name is not available.
    • UNAVAILABLE_RESTRICTED – The domain name is forbidden.
    • RESERVED – The domain name has been reserved for another person or organization.
    • DONT_KNOW – The TLD registry didn't reply with a definitive answer about whether the domain name is available. Amazon Route 53 can return this response for a variety of reasons, for example, the registry is performing maintenance. Try again later.
    " + } + }, + "DomainLimitExceeded": { + "base": "

    The number of domains has exceeded the allowed threshold for the account.

    ", + "refs": { + } + }, + "DomainName": { + "base": null, + "refs": { + "CheckDomainAvailabilityRequest$DomainName": "

    The name of a domain.

    Type: String

    Default: None

    Constraints: The domain name can contain only the letters a through z, the numbers 0 through 9, and hyphen (-). Internationalized Domain Names are not supported.

    Required: Yes

    ", + "DeleteTagsForDomainRequest$DomainName": "

    The domain for which you want to delete one or more tags.

    The name of a domain.

    Type: String

    Default: None

    Constraints: The domain name can contain only the letters a through z, the numbers 0 through 9, and hyphen (-). Hyphens are allowed only when they're surrounded by letters, numbers, or other hyphens. You can't specify a hyphen at the beginning or end of a label. To specify an Internationalized Domain Name, you must convert the name to Punycode.

    Required: Yes

    ", + "DisableDomainAutoRenewRequest$DomainName": null, + "DisableDomainTransferLockRequest$DomainName": "

    The name of a domain.

    Type: String

    Default: None

    Constraints: The domain name can contain only the letters a through z, the numbers 0 through 9, and hyphen (-). Internationalized Domain Names are not supported.

    Required: Yes

    ", + "DomainSummary$DomainName": "

    The name of a domain.

    Type: String

    ", + "EnableDomainAutoRenewRequest$DomainName": null, + "EnableDomainTransferLockRequest$DomainName": "

    The name of a domain.

    Type: String

    Default: None

    Constraints: The domain name can contain only the letters a through z, the numbers 0 through 9, and hyphen (-). Internationalized Domain Names are not supported.

    Required: Yes

    ", + "GetContactReachabilityStatusRequest$domainName": "

    The name of the domain for which you want to know whether the registrant contact has confirmed that the email address is valid.

    Type: String

    Default: None

    Required: Yes

    ", + "GetContactReachabilityStatusResponse$domainName": "

    The domain name for which you requested the reachability status.

    ", + "GetDomainDetailRequest$DomainName": "

    The name of a domain.

    Type: String

    Default: None

    Constraints: The domain name can contain only the letters a through z, the numbers 0 through 9, and hyphen (-). Internationalized Domain Names are not supported.

    Required: Yes

    ", + "GetDomainDetailResponse$DomainName": "

    The name of a domain.

    Type: String

    ", + "GetOperationDetailResponse$DomainName": "

    The name of a domain.

    Type: String

    ", + "ListTagsForDomainRequest$DomainName": "

    The domain for which you want to get a list of tags.

    ", + "RegisterDomainRequest$DomainName": "

    The name of a domain.

    Type: String

    Default: None

    Constraints: The domain name can contain only the letters a through z, the numbers 0 through 9, and hyphen (-). Internationalized Domain Names are not supported.

    Required: Yes

    ", + "ResendContactReachabilityEmailRequest$domainName": "

    The name of the domain for which you want Amazon Route 53 to resend a confirmation email to the registrant contact.

    Type: String

    Default: None

    Required: Yes

    ", + "ResendContactReachabilityEmailResponse$domainName": "

    The domain name for which you requested a confirmation email.

    ", + "RetrieveDomainAuthCodeRequest$DomainName": "

    The name of a domain.

    Type: String

    Default: None

    Constraints: The domain name can contain only the letters a through z, the numbers 0 through 9, and hyphen (-). Internationalized Domain Names are not supported.

    Required: Yes

    ", + "TransferDomainRequest$DomainName": "

    The name of a domain.

    Type: String

    Default: None

    Constraints: The domain name can contain only the letters a through z, the numbers 0 through 9, and hyphen (-). Internationalized Domain Names are not supported.

    Required: Yes

    ", + "UpdateDomainContactPrivacyRequest$DomainName": "

    The name of a domain.

    Type: String

    Default: None

    Constraints: The domain name can contain only the letters a through z, the numbers 0 through 9, and hyphen (-). Internationalized Domain Names are not supported.

    Required: Yes

    ", + "UpdateDomainContactRequest$DomainName": "

    The name of a domain.

    Type: String

    Default: None

    Constraints: The domain name can contain only the letters a through z, the numbers 0 through 9, and hyphen (-). Internationalized Domain Names are not supported.

    Required: Yes

    ", + "UpdateDomainNameserversRequest$DomainName": "

    The name of a domain.

    Type: String

    Default: None

    Constraints: The domain name can contain only the letters a through z, the numbers 0 through 9, and hyphen (-). Internationalized Domain Names are not supported.

    Required: Yes

    ", + "UpdateTagsForDomainRequest$DomainName": "

    The domain for which you want to add or update tags.

    The name of a domain.

    Type: String

    Default: None

    Constraints: The domain name can contain only the letters a through z, the numbers 0 through 9, and hyphen (-). Hyphens are allowed only when they're surrounded by letters, numbers, or other hyphens. You can't specify a hyphen at the beginning or end of a label. To specify an Internationalized Domain Name, you must convert the name to Punycode.

    Required: Yes

    " + } + }, + "DomainStatus": { + "base": null, + "refs": { + "DomainStatusList$member": null + } + }, + "DomainStatusList": { + "base": null, + "refs": { + "GetDomainDetailResponse$StatusList": "

    An array of domain name status codes, also known as Extensible Provisioning Protocol (EPP) status codes.

    ICANN, the organization that maintains a central database of domain names, has developed a set of domain name status codes that tell you the status of a variety of operations on a domain name, for example, registering a domain name, transferring a domain name to another registrar, renewing the registration for a domain name, and so on. All registrars use this same set of status codes.

    For a current list of domain name status codes and an explanation of what each code means, go to the ICANN website and search for epp status codes. (Search on the ICANN website; web searches sometimes return an old version of the document.)

    Type: Array of String

    " + } + }, + "DomainSummary": { + "base": null, + "refs": { + "DomainSummaryList$member": null + } + }, + "DomainSummaryList": { + "base": null, + "refs": { + "ListDomainsResponse$Domains": "

    A summary of domains.

    Type: Complex type containing a list of domain summaries.

    Children: AutoRenew, DomainName, Expiry, TransferLock

    " + } + }, + "DuplicateRequest": { + "base": "

    The request is already in progress for the domain.

    ", + "refs": { + } + }, + "DurationInYears": { + "base": null, + "refs": { + "RegisterDomainRequest$DurationInYears": "

    The number of years the domain will be registered. Domains are registered for a minimum of one year. The maximum period depends on the top-level domain.

    Type: Integer

    Default: 1

    Valid values: Integer from 1 to 10

    Required: Yes

    ", + "TransferDomainRequest$DurationInYears": "

    The number of years the domain will be registered. Domains are registered for a minimum of one year. The maximum period depends on the top-level domain.

    Type: Integer

    Default: 1

    Valid values: Integer from 1 to 10

    Required: Yes

    " + } + }, + "Email": { + "base": null, + "refs": { + "ContactDetail$Email": "

    Email address of the contact.

    Type: String

    Default: None

    Constraints: Maximum 254 characters.

    Parents: RegistrantContact, AdminContact, TechContact

    Required: Yes

    ", + "GetDomainDetailResponse$AbuseContactEmail": "

    Email address to contact to report incorrect contact information for a domain, to report that the domain is being used to send spam, to report that someone is cybersquatting on a domain name, or report some other type of abuse.

    Type: String

    ", + "ResendContactReachabilityEmailResponse$emailAddress": "

    The email address for the registrant contact at the time that we sent the verification email.

    " + } + }, + "EnableDomainAutoRenewRequest": { + "base": null, + "refs": { + } + }, + "EnableDomainAutoRenewResponse": { + "base": null, + "refs": { + } + }, + "EnableDomainTransferLockRequest": { + "base": "

    The EnableDomainTransferLock request includes the following element.

    ", + "refs": { + } + }, + "EnableDomainTransferLockResponse": { + "base": "

    The EnableDomainTransferLock response includes the following elements.

    ", + "refs": { + } + }, + "ErrorMessage": { + "base": null, + "refs": { + "DomainLimitExceeded$message": null, + "DuplicateRequest$message": null, + "GetOperationDetailResponse$Message": "

    Detailed information on the status including possible errors.

    Type: String

    ", + "InvalidInput$message": null, + "OperationLimitExceeded$message": null, + "TLDRulesViolation$message": null, + "UnsupportedTLD$message": null + } + }, + "ExtraParam": { + "base": "

    ExtraParam includes the following elements.

    ", + "refs": { + "ExtraParamList$member": null + } + }, + "ExtraParamList": { + "base": null, + "refs": { + "ContactDetail$ExtraParams": "

    A list of name-value pairs for parameters required by certain top-level domains.

    Type: Complex

    Default: None

    Parents: RegistrantContact, AdminContact, TechContact

    Children: Name, Value

    Required: No

    " + } + }, + "ExtraParamName": { + "base": null, + "refs": { + "ExtraParam$Name": "

    Name of the additional parameter required by the top-level domain.

    Type: String

    Default: None

    Valid values: DUNS_NUMBER | BRAND_NUMBER | BIRTH_DEPARTMENT | BIRTH_DATE_IN_YYYY_MM_DD | BIRTH_COUNTRY | BIRTH_CITY | DOCUMENT_NUMBER | AU_ID_NUMBER | AU_ID_TYPE | CA_LEGAL_TYPE | CA_BUSINESS_ENTITY_TYPE |ES_IDENTIFICATION | ES_IDENTIFICATION_TYPE | ES_LEGAL_FORM | FI_BUSINESS_NUMBER | FI_ID_NUMBER | IT_PIN | RU_PASSPORT_DATA | SE_ID_NUMBER | SG_ID_NUMBER | VAT_NUMBER

    Parent: ExtraParams

    Required: Yes

    " + } + }, + "ExtraParamValue": { + "base": null, + "refs": { + "ExtraParam$Value": "

    Values corresponding to the additional parameter names required by some top-level domains.

    Type: String

    Default: None

    Constraints: Maximum 2048 characters.

    Parent: ExtraParams

    Required: Yes

    " + } + }, + "FIAuthKey": { + "base": null, + "refs": { + "UpdateDomainNameserversRequest$FIAuthKey": "

    The authorization key for .fi domains

    " + } + }, + "GetContactReachabilityStatusRequest": { + "base": null, + "refs": { + } + }, + "GetContactReachabilityStatusResponse": { + "base": null, + "refs": { + } + }, + "GetDomainDetailRequest": { + "base": "

    The GetDomainDetail request includes the following element.

    ", + "refs": { + } + }, + "GetDomainDetailResponse": { + "base": "

    The GetDomainDetail response includes the following elements.

    ", + "refs": { + } + }, + "GetOperationDetailRequest": { + "base": "

    The GetOperationDetail request includes the following element.

    ", + "refs": { + } + }, + "GetOperationDetailResponse": { + "base": "

    The GetOperationDetail response includes the following elements.

    ", + "refs": { + } + }, + "GlueIp": { + "base": null, + "refs": { + "GlueIpList$member": null + } + }, + "GlueIpList": { + "base": null, + "refs": { + "Nameserver$GlueIps": "

    Glue IP address of a name server entry. Glue IP addresses are required only when the name of the name server is a subdomain of the domain. For example, if your domain is example.com and the name server for the domain is ns.example.com, you need to specify the IP address for ns.example.com.

    Type: List of IP addresses.

    Constraints: The list can contain only one IPv4 and one IPv6 address.

    Parent: Nameservers

    " + } + }, + "HostName": { + "base": null, + "refs": { + "Nameserver$Name": "

    The fully qualified host name of the name server.

    Type: String

    Constraint: Maximum 255 characterss

    Parent: Nameservers

    " + } + }, + "InvalidInput": { + "base": "

    The requested item is not acceptable. For example, for an OperationId it may refer to the ID of an operation that is already completed. For a domain name, it may not be a valid domain name or belong to the requester account.

    ", + "refs": { + } + }, + "LangCode": { + "base": null, + "refs": { + "CheckDomainAvailabilityRequest$IdnLangCode": "

    Reserved for future use.

    ", + "RegisterDomainRequest$IdnLangCode": "

    Reserved for future use.

    ", + "TransferDomainRequest$IdnLangCode": "

    Reserved for future use.

    " + } + }, + "ListDomainsRequest": { + "base": "

    The ListDomains request includes the following elements.

    ", + "refs": { + } + }, + "ListDomainsResponse": { + "base": "

    The ListDomains response includes the following elements.

    ", + "refs": { + } + }, + "ListOperationsRequest": { + "base": "

    The ListOperations request includes the following elements.

    ", + "refs": { + } + }, + "ListOperationsResponse": { + "base": "

    The ListOperations response includes the following elements.

    ", + "refs": { + } + }, + "ListTagsForDomainRequest": { + "base": "

    The ListTagsForDomainRequest includes the following elements.

    ", + "refs": { + } + }, + "ListTagsForDomainResponse": { + "base": "

    The ListTagsForDomain response includes the following elements.

    ", + "refs": { + } + }, + "Nameserver": { + "base": "

    Nameserver includes the following elements.

    ", + "refs": { + "NameserverList$member": null + } + }, + "NameserverList": { + "base": null, + "refs": { + "GetDomainDetailResponse$Nameservers": "

    The name of the domain.

    Type: String

    ", + "TransferDomainRequest$Nameservers": "

    Contains details for the host and glue IP addresses.

    Type: Complex

    Children: GlueIps, Name

    Required: No

    ", + "UpdateDomainNameserversRequest$Nameservers": "

    A list of new name servers for the domain.

    Type: Complex

    Children: Name, GlueIps

    Required: Yes

    " + } + }, + "OperationId": { + "base": null, + "refs": { + "DisableDomainTransferLockResponse$OperationId": "

    Identifier for tracking the progress of the request. To use this ID to query the operation status, use GetOperationDetail.

    Type: String

    Default: None

    Constraints: Maximum 255 characters.

    ", + "EnableDomainTransferLockResponse$OperationId": "

    Identifier for tracking the progress of the request. To use this ID to query the operation status, use GetOperationDetail.

    Type: String

    Default: None

    Constraints: Maximum 255 characters.

    ", + "GetOperationDetailRequest$OperationId": "

    The identifier for the operation for which you want to get the status. Amazon Route 53 returned the identifier in the response to the original request.

    Type: String

    Default: None

    Required: Yes

    ", + "GetOperationDetailResponse$OperationId": "

    The identifier for the operation.

    Type: String

    ", + "OperationSummary$OperationId": "

    Identifier returned to track the requested action.

    Type: String

    ", + "RegisterDomainResponse$OperationId": "

    Identifier for tracking the progress of the request. To use this ID to query the operation status, use GetOperationDetail.

    Type: String

    Default: None

    Constraints: Maximum 255 characters.

    ", + "TransferDomainResponse$OperationId": "

    Identifier for tracking the progress of the request. To use this ID to query the operation status, use GetOperationDetail.

    Type: String

    Default: None

    Constraints: Maximum 255 characters.

    ", + "UpdateDomainContactPrivacyResponse$OperationId": "

    Identifier for tracking the progress of the request. To use this ID to query the operation status, use GetOperationDetail.

    Type: String

    Default: None

    Constraints: Maximum 255 characters.

    ", + "UpdateDomainContactResponse$OperationId": "

    Identifier for tracking the progress of the request. To use this ID to query the operation status, use GetOperationDetail.

    Type: String

    Default: None

    Constraints: Maximum 255 characters.

    ", + "UpdateDomainNameserversResponse$OperationId": "

    Identifier for tracking the progress of the request. To use this ID to query the operation status, use GetOperationDetail.

    Type: String

    Default: None

    Constraints: Maximum 255 characters.

    " + } + }, + "OperationLimitExceeded": { + "base": "

    The number of operations or jobs running exceeded the allowed threshold for the account.

    ", + "refs": { + } + }, + "OperationStatus": { + "base": null, + "refs": { + "GetOperationDetailResponse$Status": "

    The current status of the requested operation in the system.

    Type: String

    ", + "OperationSummary$Status": "

    The current status of the requested operation in the system.

    Type: String

    " + } + }, + "OperationSummary": { + "base": "

    OperationSummary includes the following elements.

    ", + "refs": { + "OperationSummaryList$member": null + } + }, + "OperationSummaryList": { + "base": null, + "refs": { + "ListOperationsResponse$Operations": "

    Lists summaries of the operations.

    Type: Complex type containing a list of operation summaries

    Children: OperationId, Status, SubmittedDate, Type

    " + } + }, + "OperationType": { + "base": null, + "refs": { + "GetOperationDetailResponse$Type": "

    The type of operation that was requested.

    Type: String

    ", + "OperationSummary$Type": "

    Type of the action requested.

    Type: String

    Valid values: REGISTER_DOMAIN | DELETE_DOMAIN | TRANSFER_IN_DOMAIN | UPDATE_DOMAIN_CONTACT | UPDATE_NAMESERVER | CHANGE_PRIVACY_PROTECTION | DOMAIN_LOCK

    " + } + }, + "PageMarker": { + "base": null, + "refs": { + "ListDomainsRequest$Marker": "

    For an initial request for a list of domains, omit this element. If the number of domains that are associated with the current AWS account is greater than the value that you specified for MaxItems, you can use Marker to return additional domains. Get the value of NextPageMarker from the previous response, and submit another request that includes the value of NextPageMarker in the Marker element.

    Type: String

    Default: None

    Constraints: The marker must match the value specified in the previous request.

    Required: No

    ", + "ListDomainsResponse$NextPageMarker": "

    If there are more domains than you specified for MaxItems in the request, submit another request and include the value of NextPageMarker in the value of Marker.

    Type: String

    Parent: Operations

    ", + "ListOperationsRequest$Marker": "

    For an initial request for a list of operations, omit this element. If the number of operations that are not yet complete is greater than the value that you specified for MaxItems, you can use Marker to return additional operations. Get the value of NextPageMarker from the previous response, and submit another request that includes the value of NextPageMarker in the Marker element.

    Type: String

    Default: None

    Required: No

    ", + "ListOperationsResponse$NextPageMarker": "

    If there are more operations than you specified for MaxItems in the request, submit another request and include the value of NextPageMarker in the value of Marker.

    Type: String

    Parent: Operations

    " + } + }, + "PageMaxItems": { + "base": null, + "refs": { + "ListDomainsRequest$MaxItems": "

    Number of domains to be returned.

    Type: Integer

    Default: 20

    Constraints: A numeral between 1 and 100.

    Required: No

    ", + "ListOperationsRequest$MaxItems": "

    Number of domains to be returned.

    Type: Integer

    Default: 20

    Constraints: A value between 1 and 100.

    Required: No

    " + } + }, + "ReachabilityStatus": { + "base": null, + "refs": { + "GetContactReachabilityStatusResponse$status": "

    Whether the registrant contact has responded. PENDING indicates that we sent the confirmation email and haven't received a response yet, DONE indicates that we sent the email and got confirmation from the registrant contact, and EXPIRED indicates that the time limit expired before the registrant contact responded.

    Type: String

    Valid values: PENDING, DONE, EXPIRED

    " + } + }, + "RegisterDomainRequest": { + "base": "

    The RegisterDomain request includes the following elements.

    ", + "refs": { + } + }, + "RegisterDomainResponse": { + "base": "

    The RegisterDomain response includes the following element.

    ", + "refs": { + } + }, + "RegistrarName": { + "base": null, + "refs": { + "GetDomainDetailResponse$RegistrarName": "

    Name of the registrar of the domain as identified in the registry. Amazon Route 53 domains are registered by registrar Gandi. The value is \"GANDI SAS\".

    Type: String

    " + } + }, + "RegistrarUrl": { + "base": null, + "refs": { + "GetDomainDetailResponse$RegistrarUrl": "

    Web address of the registrar.

    Type: String

    " + } + }, + "RegistrarWhoIsServer": { + "base": null, + "refs": { + "GetDomainDetailResponse$WhoIsServer": "

    The fully qualified name of the WHOIS server that can answer the WHOIS query for the domain.

    Type: String

    " + } + }, + "RegistryDomainId": { + "base": null, + "refs": { + "GetDomainDetailResponse$RegistryDomainId": "

    Reserved for future use.

    " + } + }, + "Reseller": { + "base": null, + "refs": { + "GetDomainDetailResponse$Reseller": "

    Reseller of the domain. Domains registered or transferred using Amazon Route 53 domains will have \"Amazon\" as the reseller.

    Type: String

    " + } + }, + "ResendContactReachabilityEmailRequest": { + "base": null, + "refs": { + } + }, + "ResendContactReachabilityEmailResponse": { + "base": null, + "refs": { + } + }, + "RetrieveDomainAuthCodeRequest": { + "base": "

    The RetrieveDomainAuthCode request includes the following element.

    ", + "refs": { + } + }, + "RetrieveDomainAuthCodeResponse": { + "base": "

    The RetrieveDomainAuthCode response includes the following element.

    ", + "refs": { + } + }, + "State": { + "base": null, + "refs": { + "ContactDetail$State": "

    The state or province of the contact's city.

    Type: String

    Default: None

    Constraints: Maximum 255 characters.

    Parents: RegistrantContact, AdminContact, TechContact

    Required: No

    " + } + }, + "TLDRulesViolation": { + "base": "

    The top-level domain does not support this operation.

    ", + "refs": { + } + }, + "Tag": { + "base": "

    Each tag includes the following elements.

    ", + "refs": { + "TagList$member": null + } + }, + "TagKey": { + "base": null, + "refs": { + "Tag$Key": "

    The key (name) of a tag.

    Type: String

    Default: None

    Valid values: A-Z, a-z, 0-9, space, \".:/=+\\-@\"

    Constraints: Each key can be 1-128 characters long.

    Required: Yes

    ", + "TagKeyList$member": null + } + }, + "TagKeyList": { + "base": null, + "refs": { + "DeleteTagsForDomainRequest$TagsToDelete": "

    A list of tag keys to delete.

    Type: A list that contains the keys of the tags that you want to delete.

    Default: None

    Required: No

    '>" + } + }, + "TagList": { + "base": null, + "refs": { + "ListTagsForDomainResponse$TagList": "

    A list of the tags that are associated with the specified domain.

    Type: A complex type containing a list of tags

    Each tag includes the following elements.

    • Key

      The key (name) of a tag.

      Type: String

    • Value

      The value of a tag.

      Type: String

    ", + "UpdateTagsForDomainRequest$TagsToUpdate": "

    A list of the tag keys and values that you want to add or update. If you specify a key that already exists, the corresponding value will be replaced.

    Type: A complex type containing a list of tags

    Default: None

    Required: No

    '>

    Each tag includes the following elements:

    • Key

      The key (name) of a tag.

      Type: String

      Default: None

      Valid values: Unicode characters including alphanumeric, space, and \".:/=+\\-@\"

      Constraints: Each key can be 1-128 characters long.

      Required: Yes

    • Value

      The value of a tag.

      Type: String

      Default: None

      Valid values: Unicode characters including alphanumeric, space, and \".:/=+\\-@\"

      Constraints: Each value can be 0-256 characters long.

      Required: Yes

    " + } + }, + "TagValue": { + "base": null, + "refs": { + "Tag$Value": "

    The value of a tag.

    Type: String

    Default: None

    Valid values: A-Z, a-z, 0-9, space, \".:/=+\\-@\"

    Constraints: Each value can be 0-256 characters long.

    Required: Yes

    " + } + }, + "Timestamp": { + "base": null, + "refs": { + "DomainSummary$Expiry": "

    Expiration date of the domain in Coordinated Universal Time (UTC).

    Type: Long

    ", + "GetDomainDetailResponse$CreationDate": "

    The date when the domain was created as found in the response to a WHOIS query. The date format is Unix time.

    ", + "GetDomainDetailResponse$UpdatedDate": "

    The last updated date of the domain as found in the response to a WHOIS query. The date format is Unix time.

    ", + "GetDomainDetailResponse$ExpirationDate": "

    The date when the registration for the domain is set to expire. The date format is Unix time.

    ", + "GetOperationDetailResponse$SubmittedDate": "

    The date when the request was submitted.

    ", + "OperationSummary$SubmittedDate": "

    The date when the request was submitted.

    " + } + }, + "TransferDomainRequest": { + "base": "

    The TransferDomain request includes the following elements.

    ", + "refs": { + } + }, + "TransferDomainResponse": { + "base": "

    The TranserDomain response includes the following element.

    ", + "refs": { + } + }, + "UnsupportedTLD": { + "base": "

    Amazon Route 53 does not support this top-level domain.

    ", + "refs": { + } + }, + "UpdateDomainContactPrivacyRequest": { + "base": "

    The UpdateDomainContactPrivacy request includes the following elements.

    ", + "refs": { + } + }, + "UpdateDomainContactPrivacyResponse": { + "base": "

    The UpdateDomainContactPrivacy response includes the following element.

    ", + "refs": { + } + }, + "UpdateDomainContactRequest": { + "base": "

    The UpdateDomainContact request includes the following elements.

    ", + "refs": { + } + }, + "UpdateDomainContactResponse": { + "base": "

    The UpdateDomainContact response includes the following element.

    ", + "refs": { + } + }, + "UpdateDomainNameserversRequest": { + "base": "

    The UpdateDomainNameserver request includes the following elements.

    ", + "refs": { + } + }, + "UpdateDomainNameserversResponse": { + "base": "

    The UpdateDomainNameservers response includes the following element.

    ", + "refs": { + } + }, + "UpdateTagsForDomainRequest": { + "base": "

    The UpdateTagsForDomainRequest includes the following elements.

    ", + "refs": { + } + }, + "UpdateTagsForDomainResponse": { + "base": null, + "refs": { + } + }, + "ZipCode": { + "base": null, + "refs": { + "ContactDetail$ZipCode": "

    The zip or postal code of the contact's address.

    Type: String

    Default: None

    Constraints: Maximum 255 characters.

    Parents: RegistrantContact, AdminContact, TechContact

    Required: No

    " + } + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/route53domains/2014-05-15/examples-1.json b/vendor/github.com/aws/aws-sdk-go/models/apis/route53domains/2014-05-15/examples-1.json new file mode 100644 index 000000000..0ea7e3b0b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/route53domains/2014-05-15/examples-1.json @@ -0,0 +1,5 @@ +{ + "version": "1.0", + "examples": { + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/route53domains/2014-05-15/paginators-1.json b/vendor/github.com/aws/aws-sdk-go/models/apis/route53domains/2014-05-15/paginators-1.json new file mode 100644 index 000000000..8d1a73ab2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/route53domains/2014-05-15/paginators-1.json @@ -0,0 +1,17 @@ +{ + "version": "1.0", + "pagination": { + "ListDomains": { + "limit_key": "MaxItems", + "input_token": "Marker", + "output_token": "NextPageMarker", + "result_key": "Domains" + }, + "ListOperations": { + "limit_key": "MaxItems", + "input_token": "Marker", + "output_token": "NextPageMarker", + "result_key": "Operations" + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/s3/2006-03-01/api-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/s3/2006-03-01/api-2.json new file mode 100644 index 000000000..310dd8c57 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/s3/2006-03-01/api-2.json @@ -0,0 +1,4517 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2006-03-01", + "checksumFormat":"md5", + "endpointPrefix":"s3", + "globalEndpoint":"s3.amazonaws.com", + "protocol":"rest-xml", + "serviceAbbreviation":"Amazon S3", + "serviceFullName":"Amazon Simple Storage Service", + "signatureVersion":"s3", + "timestampFormat":"rfc822" + }, + "operations":{ + "AbortMultipartUpload":{ + "name":"AbortMultipartUpload", + "http":{ + "method":"DELETE", + "requestUri":"/{Bucket}/{Key+}" + }, + "input":{"shape":"AbortMultipartUploadRequest"}, + "output":{"shape":"AbortMultipartUploadOutput"}, + "errors":[ + {"shape":"NoSuchUpload"} + ], + "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/mpUploadAbort.html" + }, + "CompleteMultipartUpload":{ + "name":"CompleteMultipartUpload", + "http":{ + "method":"POST", + "requestUri":"/{Bucket}/{Key+}" + }, + "input":{"shape":"CompleteMultipartUploadRequest"}, + "output":{"shape":"CompleteMultipartUploadOutput"}, + "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/mpUploadComplete.html" + }, + "CopyObject":{ + "name":"CopyObject", + "http":{ + "method":"PUT", + "requestUri":"/{Bucket}/{Key+}" + }, + "input":{"shape":"CopyObjectRequest"}, + "output":{"shape":"CopyObjectOutput"}, + "errors":[ + {"shape":"ObjectNotInActiveTierError"} + ], + "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTObjectCOPY.html", + "alias":"PutObjectCopy" + }, + "CreateBucket":{ + "name":"CreateBucket", + "http":{ + "method":"PUT", + "requestUri":"/{Bucket}" + }, + "input":{"shape":"CreateBucketRequest"}, + "output":{"shape":"CreateBucketOutput"}, + "errors":[ + {"shape":"BucketAlreadyExists"}, + {"shape":"BucketAlreadyOwnedByYou"} + ], + "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketPUT.html", + "alias":"PutBucket" + }, + "CreateMultipartUpload":{ + "name":"CreateMultipartUpload", + "http":{ + "method":"POST", + "requestUri":"/{Bucket}/{Key+}?uploads" + }, + "input":{"shape":"CreateMultipartUploadRequest"}, + "output":{"shape":"CreateMultipartUploadOutput"}, + "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/mpUploadInitiate.html", + "alias":"InitiateMultipartUpload" + }, + "DeleteBucket":{ + "name":"DeleteBucket", + "http":{ + "method":"DELETE", + "requestUri":"/{Bucket}" + }, + "input":{"shape":"DeleteBucketRequest"}, + "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketDELETE.html" + }, + "DeleteBucketCors":{ + "name":"DeleteBucketCors", + "http":{ + "method":"DELETE", + "requestUri":"/{Bucket}?cors" + }, + "input":{"shape":"DeleteBucketCorsRequest"}, + "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketDELETEcors.html" + }, + "DeleteBucketLifecycle":{ + "name":"DeleteBucketLifecycle", + "http":{ + "method":"DELETE", + "requestUri":"/{Bucket}?lifecycle" + }, + "input":{"shape":"DeleteBucketLifecycleRequest"}, + "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketDELETElifecycle.html" + }, + "DeleteBucketPolicy":{ + "name":"DeleteBucketPolicy", + "http":{ + "method":"DELETE", + "requestUri":"/{Bucket}?policy" + }, + "input":{"shape":"DeleteBucketPolicyRequest"}, + "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketDELETEpolicy.html" + }, + "DeleteBucketReplication":{ + "name":"DeleteBucketReplication", + "http":{ + "method":"DELETE", + "requestUri":"/{Bucket}?replication" + }, + "input":{"shape":"DeleteBucketReplicationRequest"} + }, + "DeleteBucketTagging":{ + "name":"DeleteBucketTagging", + "http":{ + "method":"DELETE", + "requestUri":"/{Bucket}?tagging" + }, + "input":{"shape":"DeleteBucketTaggingRequest"}, + "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketDELETEtagging.html" + }, + "DeleteBucketWebsite":{ + "name":"DeleteBucketWebsite", + "http":{ + "method":"DELETE", + "requestUri":"/{Bucket}?website" + }, + "input":{"shape":"DeleteBucketWebsiteRequest"}, + "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketDELETEwebsite.html" + }, + "DeleteObject":{ + "name":"DeleteObject", + "http":{ + "method":"DELETE", + "requestUri":"/{Bucket}/{Key+}" + }, + "input":{"shape":"DeleteObjectRequest"}, + "output":{"shape":"DeleteObjectOutput"}, + "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTObjectDELETE.html" + }, + "DeleteObjects":{ + "name":"DeleteObjects", + "http":{ + "method":"POST", + "requestUri":"/{Bucket}?delete" + }, + "input":{"shape":"DeleteObjectsRequest"}, + "output":{"shape":"DeleteObjectsOutput"}, + "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/multiobjectdeleteapi.html", + "alias":"DeleteMultipleObjects" + }, + "GetBucketAccelerateConfiguration":{ + "name":"GetBucketAccelerateConfiguration", + "http":{ + "method":"GET", + "requestUri":"/{Bucket}?accelerate" + }, + "input":{"shape":"GetBucketAccelerateConfigurationRequest"}, + "output":{"shape":"GetBucketAccelerateConfigurationOutput"} + }, + "GetBucketAcl":{ + "name":"GetBucketAcl", + "http":{ + "method":"GET", + "requestUri":"/{Bucket}?acl" + }, + "input":{"shape":"GetBucketAclRequest"}, + "output":{"shape":"GetBucketAclOutput"}, + "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketGETacl.html" + }, + "GetBucketCors":{ + "name":"GetBucketCors", + "http":{ + "method":"GET", + "requestUri":"/{Bucket}?cors" + }, + "input":{"shape":"GetBucketCorsRequest"}, + "output":{"shape":"GetBucketCorsOutput"}, + "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketGETcors.html" + }, + "GetBucketLifecycle":{ + "name":"GetBucketLifecycle", + "http":{ + "method":"GET", + "requestUri":"/{Bucket}?lifecycle" + }, + "input":{"shape":"GetBucketLifecycleRequest"}, + "output":{"shape":"GetBucketLifecycleOutput"}, + "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketGETlifecycle.html", + "deprecated":true + }, + "GetBucketLifecycleConfiguration":{ + "name":"GetBucketLifecycleConfiguration", + "http":{ + "method":"GET", + "requestUri":"/{Bucket}?lifecycle" + }, + "input":{"shape":"GetBucketLifecycleConfigurationRequest"}, + "output":{"shape":"GetBucketLifecycleConfigurationOutput"} + }, + "GetBucketLocation":{ + "name":"GetBucketLocation", + "http":{ + "method":"GET", + "requestUri":"/{Bucket}?location" + }, + "input":{"shape":"GetBucketLocationRequest"}, + "output":{"shape":"GetBucketLocationOutput"}, + "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketGETlocation.html" + }, + "GetBucketLogging":{ + "name":"GetBucketLogging", + "http":{ + "method":"GET", + "requestUri":"/{Bucket}?logging" + }, + "input":{"shape":"GetBucketLoggingRequest"}, + "output":{"shape":"GetBucketLoggingOutput"}, + "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketGETlogging.html" + }, + "GetBucketNotification":{ + "name":"GetBucketNotification", + "http":{ + "method":"GET", + "requestUri":"/{Bucket}?notification" + }, + "input":{"shape":"GetBucketNotificationConfigurationRequest"}, + "output":{"shape":"NotificationConfigurationDeprecated"}, + "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketGETnotification.html", + "deprecated":true + }, + "GetBucketNotificationConfiguration":{ + "name":"GetBucketNotificationConfiguration", + "http":{ + "method":"GET", + "requestUri":"/{Bucket}?notification" + }, + "input":{"shape":"GetBucketNotificationConfigurationRequest"}, + "output":{"shape":"NotificationConfiguration"} + }, + "GetBucketPolicy":{ + "name":"GetBucketPolicy", + "http":{ + "method":"GET", + "requestUri":"/{Bucket}?policy" + }, + "input":{"shape":"GetBucketPolicyRequest"}, + "output":{"shape":"GetBucketPolicyOutput"}, + "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketGETpolicy.html" + }, + "GetBucketReplication":{ + "name":"GetBucketReplication", + "http":{ + "method":"GET", + "requestUri":"/{Bucket}?replication" + }, + "input":{"shape":"GetBucketReplicationRequest"}, + "output":{"shape":"GetBucketReplicationOutput"} + }, + "GetBucketRequestPayment":{ + "name":"GetBucketRequestPayment", + "http":{ + "method":"GET", + "requestUri":"/{Bucket}?requestPayment" + }, + "input":{"shape":"GetBucketRequestPaymentRequest"}, + "output":{"shape":"GetBucketRequestPaymentOutput"}, + "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTrequestPaymentGET.html" + }, + "GetBucketTagging":{ + "name":"GetBucketTagging", + "http":{ + "method":"GET", + "requestUri":"/{Bucket}?tagging" + }, + "input":{"shape":"GetBucketTaggingRequest"}, + "output":{"shape":"GetBucketTaggingOutput"}, + "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketGETtagging.html" + }, + "GetBucketVersioning":{ + "name":"GetBucketVersioning", + "http":{ + "method":"GET", + "requestUri":"/{Bucket}?versioning" + }, + "input":{"shape":"GetBucketVersioningRequest"}, + "output":{"shape":"GetBucketVersioningOutput"}, + "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketGETversioningStatus.html" + }, + "GetBucketWebsite":{ + "name":"GetBucketWebsite", + "http":{ + "method":"GET", + "requestUri":"/{Bucket}?website" + }, + "input":{"shape":"GetBucketWebsiteRequest"}, + "output":{"shape":"GetBucketWebsiteOutput"}, + "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketGETwebsite.html" + }, + "GetObject":{ + "name":"GetObject", + "http":{ + "method":"GET", + "requestUri":"/{Bucket}/{Key+}" + }, + "input":{"shape":"GetObjectRequest"}, + "output":{"shape":"GetObjectOutput"}, + "errors":[ + {"shape":"NoSuchKey"} + ], + "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTObjectGET.html" + }, + "GetObjectAcl":{ + "name":"GetObjectAcl", + "http":{ + "method":"GET", + "requestUri":"/{Bucket}/{Key+}?acl" + }, + "input":{"shape":"GetObjectAclRequest"}, + "output":{"shape":"GetObjectAclOutput"}, + "errors":[ + {"shape":"NoSuchKey"} + ], + "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTObjectGETacl.html" + }, + "GetObjectTorrent":{ + "name":"GetObjectTorrent", + "http":{ + "method":"GET", + "requestUri":"/{Bucket}/{Key+}?torrent" + }, + "input":{"shape":"GetObjectTorrentRequest"}, + "output":{"shape":"GetObjectTorrentOutput"}, + "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTObjectGETtorrent.html" + }, + "HeadBucket":{ + "name":"HeadBucket", + "http":{ + "method":"HEAD", + "requestUri":"/{Bucket}" + }, + "input":{"shape":"HeadBucketRequest"}, + "errors":[ + {"shape":"NoSuchBucket"} + ], + "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketHEAD.html" + }, + "HeadObject":{ + "name":"HeadObject", + "http":{ + "method":"HEAD", + "requestUri":"/{Bucket}/{Key+}" + }, + "input":{"shape":"HeadObjectRequest"}, + "output":{"shape":"HeadObjectOutput"}, + "errors":[ + {"shape":"NoSuchKey"} + ], + "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTObjectHEAD.html" + }, + "ListBuckets":{ + "name":"ListBuckets", + "http":{ + "method":"GET", + "requestUri":"/" + }, + "output":{"shape":"ListBucketsOutput"}, + "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTServiceGET.html", + "alias":"GetService" + }, + "ListMultipartUploads":{ + "name":"ListMultipartUploads", + "http":{ + "method":"GET", + "requestUri":"/{Bucket}?uploads" + }, + "input":{"shape":"ListMultipartUploadsRequest"}, + "output":{"shape":"ListMultipartUploadsOutput"}, + "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/mpUploadListMPUpload.html" + }, + "ListObjectVersions":{ + "name":"ListObjectVersions", + "http":{ + "method":"GET", + "requestUri":"/{Bucket}?versions" + }, + "input":{"shape":"ListObjectVersionsRequest"}, + "output":{"shape":"ListObjectVersionsOutput"}, + "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketGETVersion.html", + "alias":"GetBucketObjectVersions" + }, + "ListObjects":{ + "name":"ListObjects", + "http":{ + "method":"GET", + "requestUri":"/{Bucket}" + }, + "input":{"shape":"ListObjectsRequest"}, + "output":{"shape":"ListObjectsOutput"}, + "errors":[ + {"shape":"NoSuchBucket"} + ], + "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketGET.html", + "alias":"GetBucket" + }, + "ListObjectsV2":{ + "name":"ListObjectsV2", + "http":{ + "method":"GET", + "requestUri":"/{Bucket}?list-type=2" + }, + "input":{"shape":"ListObjectsV2Request"}, + "output":{"shape":"ListObjectsV2Output"}, + "errors":[ + {"shape":"NoSuchBucket"} + ] + }, + "ListParts":{ + "name":"ListParts", + "http":{ + "method":"GET", + "requestUri":"/{Bucket}/{Key+}" + }, + "input":{"shape":"ListPartsRequest"}, + "output":{"shape":"ListPartsOutput"}, + "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/mpUploadListParts.html" + }, + "PutBucketAccelerateConfiguration":{ + "name":"PutBucketAccelerateConfiguration", + "http":{ + "method":"PUT", + "requestUri":"/{Bucket}?accelerate" + }, + "input":{"shape":"PutBucketAccelerateConfigurationRequest"} + }, + "PutBucketAcl":{ + "name":"PutBucketAcl", + "http":{ + "method":"PUT", + "requestUri":"/{Bucket}?acl" + }, + "input":{"shape":"PutBucketAclRequest"}, + "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketPUTacl.html" + }, + "PutBucketCors":{ + "name":"PutBucketCors", + "http":{ + "method":"PUT", + "requestUri":"/{Bucket}?cors" + }, + "input":{"shape":"PutBucketCorsRequest"}, + "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketPUTcors.html" + }, + "PutBucketLifecycle":{ + "name":"PutBucketLifecycle", + "http":{ + "method":"PUT", + "requestUri":"/{Bucket}?lifecycle" + }, + "input":{"shape":"PutBucketLifecycleRequest"}, + "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketPUTlifecycle.html", + "deprecated":true + }, + "PutBucketLifecycleConfiguration":{ + "name":"PutBucketLifecycleConfiguration", + "http":{ + "method":"PUT", + "requestUri":"/{Bucket}?lifecycle" + }, + "input":{"shape":"PutBucketLifecycleConfigurationRequest"} + }, + "PutBucketLogging":{ + "name":"PutBucketLogging", + "http":{ + "method":"PUT", + "requestUri":"/{Bucket}?logging" + }, + "input":{"shape":"PutBucketLoggingRequest"}, + "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketPUTlogging.html" + }, + "PutBucketNotification":{ + "name":"PutBucketNotification", + "http":{ + "method":"PUT", + "requestUri":"/{Bucket}?notification" + }, + "input":{"shape":"PutBucketNotificationRequest"}, + "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketPUTnotification.html", + "deprecated":true + }, + "PutBucketNotificationConfiguration":{ + "name":"PutBucketNotificationConfiguration", + "http":{ + "method":"PUT", + "requestUri":"/{Bucket}?notification" + }, + "input":{"shape":"PutBucketNotificationConfigurationRequest"} + }, + "PutBucketPolicy":{ + "name":"PutBucketPolicy", + "http":{ + "method":"PUT", + "requestUri":"/{Bucket}?policy" + }, + "input":{"shape":"PutBucketPolicyRequest"}, + "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketPUTpolicy.html" + }, + "PutBucketReplication":{ + "name":"PutBucketReplication", + "http":{ + "method":"PUT", + "requestUri":"/{Bucket}?replication" + }, + "input":{"shape":"PutBucketReplicationRequest"} + }, + "PutBucketRequestPayment":{ + "name":"PutBucketRequestPayment", + "http":{ + "method":"PUT", + "requestUri":"/{Bucket}?requestPayment" + }, + "input":{"shape":"PutBucketRequestPaymentRequest"}, + "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTrequestPaymentPUT.html" + }, + "PutBucketTagging":{ + "name":"PutBucketTagging", + "http":{ + "method":"PUT", + "requestUri":"/{Bucket}?tagging" + }, + "input":{"shape":"PutBucketTaggingRequest"}, + "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketPUTtagging.html" + }, + "PutBucketVersioning":{ + "name":"PutBucketVersioning", + "http":{ + "method":"PUT", + "requestUri":"/{Bucket}?versioning" + }, + "input":{"shape":"PutBucketVersioningRequest"}, + "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketPUTVersioningStatus.html" + }, + "PutBucketWebsite":{ + "name":"PutBucketWebsite", + "http":{ + "method":"PUT", + "requestUri":"/{Bucket}?website" + }, + "input":{"shape":"PutBucketWebsiteRequest"}, + "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketPUTwebsite.html" + }, + "PutObject":{ + "name":"PutObject", + "http":{ + "method":"PUT", + "requestUri":"/{Bucket}/{Key+}" + }, + "input":{"shape":"PutObjectRequest"}, + "output":{"shape":"PutObjectOutput"}, + "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTObjectPUT.html" + }, + "PutObjectAcl":{ + "name":"PutObjectAcl", + "http":{ + "method":"PUT", + "requestUri":"/{Bucket}/{Key+}?acl" + }, + "input":{"shape":"PutObjectAclRequest"}, + "output":{"shape":"PutObjectAclOutput"}, + "errors":[ + {"shape":"NoSuchKey"} + ], + "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTObjectPUTacl.html" + }, + "RestoreObject":{ + "name":"RestoreObject", + "http":{ + "method":"POST", + "requestUri":"/{Bucket}/{Key+}?restore" + }, + "input":{"shape":"RestoreObjectRequest"}, + "output":{"shape":"RestoreObjectOutput"}, + "errors":[ + {"shape":"ObjectAlreadyInActiveTierError"} + ], + "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTObjectRestore.html", + "alias":"PostObjectRestore" + }, + "UploadPart":{ + "name":"UploadPart", + "http":{ + "method":"PUT", + "requestUri":"/{Bucket}/{Key+}" + }, + "input":{"shape":"UploadPartRequest"}, + "output":{"shape":"UploadPartOutput"}, + "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/mpUploadUploadPart.html" + }, + "UploadPartCopy":{ + "name":"UploadPartCopy", + "http":{ + "method":"PUT", + "requestUri":"/{Bucket}/{Key+}" + }, + "input":{"shape":"UploadPartCopyRequest"}, + "output":{"shape":"UploadPartCopyOutput"}, + "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/mpUploadUploadPartCopy.html" + } + }, + "shapes":{ + "AbortDate":{"type":"timestamp"}, + "AbortIncompleteMultipartUpload":{ + "type":"structure", + "members":{ + "DaysAfterInitiation":{"shape":"DaysAfterInitiation"} + } + }, + "AbortMultipartUploadOutput":{ + "type":"structure", + "members":{ + "RequestCharged":{ + "shape":"RequestCharged", + "location":"header", + "locationName":"x-amz-request-charged" + } + } + }, + "AbortMultipartUploadRequest":{ + "type":"structure", + "required":[ + "Bucket", + "Key", + "UploadId" + ], + "members":{ + "Bucket":{ + "shape":"BucketName", + "location":"uri", + "locationName":"Bucket" + }, + "Key":{ + "shape":"ObjectKey", + "location":"uri", + "locationName":"Key" + }, + "UploadId":{ + "shape":"MultipartUploadId", + "location":"querystring", + "locationName":"uploadId" + }, + "RequestPayer":{ + "shape":"RequestPayer", + "location":"header", + "locationName":"x-amz-request-payer" + } + } + }, + "AbortRuleId":{"type":"string"}, + "AccelerateConfiguration":{ + "type":"structure", + "members":{ + "Status":{"shape":"BucketAccelerateStatus"} + } + }, + "AcceptRanges":{"type":"string"}, + "AccessControlPolicy":{ + "type":"structure", + "members":{ + "Grants":{ + "shape":"Grants", + "locationName":"AccessControlList" + }, + "Owner":{"shape":"Owner"} + } + }, + "AllowedHeader":{"type":"string"}, + "AllowedHeaders":{ + "type":"list", + "member":{"shape":"AllowedHeader"}, + "flattened":true + }, + "AllowedMethod":{"type":"string"}, + "AllowedMethods":{ + "type":"list", + "member":{"shape":"AllowedMethod"}, + "flattened":true + }, + "AllowedOrigin":{"type":"string"}, + "AllowedOrigins":{ + "type":"list", + "member":{"shape":"AllowedOrigin"}, + "flattened":true + }, + "Body":{"type":"blob"}, + "Bucket":{ + "type":"structure", + "members":{ + "Name":{"shape":"BucketName"}, + "CreationDate":{"shape":"CreationDate"} + } + }, + "BucketAccelerateStatus":{ + "type":"string", + "enum":[ + "Enabled", + "Suspended" + ] + }, + "BucketAlreadyExists":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "BucketAlreadyOwnedByYou":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "BucketCannedACL":{ + "type":"string", + "enum":[ + "private", + "public-read", + "public-read-write", + "authenticated-read" + ] + }, + "BucketLifecycleConfiguration":{ + "type":"structure", + "required":["Rules"], + "members":{ + "Rules":{ + "shape":"LifecycleRules", + "locationName":"Rule" + } + } + }, + "BucketLocationConstraint":{ + "type":"string", + "enum":[ + "EU", + "eu-west-1", + "us-west-1", + "us-west-2", + "ap-south-1", + "ap-southeast-1", + "ap-southeast-2", + "ap-northeast-1", + "sa-east-1", + "cn-north-1", + "eu-central-1" + ] + }, + "BucketLoggingStatus":{ + "type":"structure", + "members":{ + "LoggingEnabled":{"shape":"LoggingEnabled"} + } + }, + "BucketLogsPermission":{ + "type":"string", + "enum":[ + "FULL_CONTROL", + "READ", + "WRITE" + ] + }, + "BucketName":{"type":"string"}, + "BucketVersioningStatus":{ + "type":"string", + "enum":[ + "Enabled", + "Suspended" + ] + }, + "Buckets":{ + "type":"list", + "member":{ + "shape":"Bucket", + "locationName":"Bucket" + } + }, + "CORSConfiguration":{ + "type":"structure", + "required":["CORSRules"], + "members":{ + "CORSRules":{ + "shape":"CORSRules", + "locationName":"CORSRule" + } + } + }, + "CORSRule":{ + "type":"structure", + "required":[ + "AllowedMethods", + "AllowedOrigins" + ], + "members":{ + "AllowedHeaders":{ + "shape":"AllowedHeaders", + "locationName":"AllowedHeader" + }, + "AllowedMethods":{ + "shape":"AllowedMethods", + "locationName":"AllowedMethod" + }, + "AllowedOrigins":{ + "shape":"AllowedOrigins", + "locationName":"AllowedOrigin" + }, + "ExposeHeaders":{ + "shape":"ExposeHeaders", + "locationName":"ExposeHeader" + }, + "MaxAgeSeconds":{"shape":"MaxAgeSeconds"} + } + }, + "CORSRules":{ + "type":"list", + "member":{"shape":"CORSRule"}, + "flattened":true + }, + "CacheControl":{"type":"string"}, + "CloudFunction":{"type":"string"}, + "CloudFunctionConfiguration":{ + "type":"structure", + "members":{ + "Id":{"shape":"NotificationId"}, + "Event":{ + "shape":"Event", + "deprecated":true + }, + "Events":{ + "shape":"EventList", + "locationName":"Event" + }, + "CloudFunction":{"shape":"CloudFunction"}, + "InvocationRole":{"shape":"CloudFunctionInvocationRole"} + } + }, + "CloudFunctionInvocationRole":{"type":"string"}, + "Code":{"type":"string"}, + "CommonPrefix":{ + "type":"structure", + "members":{ + "Prefix":{"shape":"Prefix"} + } + }, + "CommonPrefixList":{ + "type":"list", + "member":{"shape":"CommonPrefix"}, + "flattened":true + }, + "CompleteMultipartUploadOutput":{ + "type":"structure", + "members":{ + "Location":{"shape":"Location"}, + "Bucket":{"shape":"BucketName"}, + "Key":{"shape":"ObjectKey"}, + "Expiration":{ + "shape":"Expiration", + "location":"header", + "locationName":"x-amz-expiration" + }, + "ETag":{"shape":"ETag"}, + "ServerSideEncryption":{ + "shape":"ServerSideEncryption", + "location":"header", + "locationName":"x-amz-server-side-encryption" + }, + "VersionId":{ + "shape":"ObjectVersionId", + "location":"header", + "locationName":"x-amz-version-id" + }, + "SSEKMSKeyId":{ + "shape":"SSEKMSKeyId", + "location":"header", + "locationName":"x-amz-server-side-encryption-aws-kms-key-id" + }, + "RequestCharged":{ + "shape":"RequestCharged", + "location":"header", + "locationName":"x-amz-request-charged" + } + } + }, + "CompleteMultipartUploadRequest":{ + "type":"structure", + "required":[ + "Bucket", + "Key", + "UploadId" + ], + "members":{ + "Bucket":{ + "shape":"BucketName", + "location":"uri", + "locationName":"Bucket" + }, + "Key":{ + "shape":"ObjectKey", + "location":"uri", + "locationName":"Key" + }, + "MultipartUpload":{ + "shape":"CompletedMultipartUpload", + "locationName":"CompleteMultipartUpload", + "xmlNamespace":{"uri":"http://s3.amazonaws.com/doc/2006-03-01/"} + }, + "UploadId":{ + "shape":"MultipartUploadId", + "location":"querystring", + "locationName":"uploadId" + }, + "RequestPayer":{ + "shape":"RequestPayer", + "location":"header", + "locationName":"x-amz-request-payer" + } + }, + "payload":"MultipartUpload" + }, + "CompletedMultipartUpload":{ + "type":"structure", + "members":{ + "Parts":{ + "shape":"CompletedPartList", + "locationName":"Part" + } + } + }, + "CompletedPart":{ + "type":"structure", + "members":{ + "ETag":{"shape":"ETag"}, + "PartNumber":{"shape":"PartNumber"} + } + }, + "CompletedPartList":{ + "type":"list", + "member":{"shape":"CompletedPart"}, + "flattened":true + }, + "Condition":{ + "type":"structure", + "members":{ + "HttpErrorCodeReturnedEquals":{"shape":"HttpErrorCodeReturnedEquals"}, + "KeyPrefixEquals":{"shape":"KeyPrefixEquals"} + } + }, + "ContentDisposition":{"type":"string"}, + "ContentEncoding":{"type":"string"}, + "ContentLanguage":{"type":"string"}, + "ContentLength":{"type":"long"}, + "ContentMD5":{"type":"string"}, + "ContentRange":{"type":"string"}, + "ContentType":{"type":"string"}, + "CopyObjectOutput":{ + "type":"structure", + "members":{ + "CopyObjectResult":{"shape":"CopyObjectResult"}, + "Expiration":{ + "shape":"Expiration", + "location":"header", + "locationName":"x-amz-expiration" + }, + "CopySourceVersionId":{ + "shape":"CopySourceVersionId", + "location":"header", + "locationName":"x-amz-copy-source-version-id" + }, + "VersionId":{ + "shape":"ObjectVersionId", + "location":"header", + "locationName":"x-amz-version-id" + }, + "ServerSideEncryption":{ + "shape":"ServerSideEncryption", + "location":"header", + "locationName":"x-amz-server-side-encryption" + }, + "SSECustomerAlgorithm":{ + "shape":"SSECustomerAlgorithm", + "location":"header", + "locationName":"x-amz-server-side-encryption-customer-algorithm" + }, + "SSECustomerKeyMD5":{ + "shape":"SSECustomerKeyMD5", + "location":"header", + "locationName":"x-amz-server-side-encryption-customer-key-MD5" + }, + "SSEKMSKeyId":{ + "shape":"SSEKMSKeyId", + "location":"header", + "locationName":"x-amz-server-side-encryption-aws-kms-key-id" + }, + "RequestCharged":{ + "shape":"RequestCharged", + "location":"header", + "locationName":"x-amz-request-charged" + } + }, + "payload":"CopyObjectResult" + }, + "CopyObjectRequest":{ + "type":"structure", + "required":[ + "Bucket", + "CopySource", + "Key" + ], + "members":{ + "ACL":{ + "shape":"ObjectCannedACL", + "location":"header", + "locationName":"x-amz-acl" + }, + "Bucket":{ + "shape":"BucketName", + "location":"uri", + "locationName":"Bucket" + }, + "CacheControl":{ + "shape":"CacheControl", + "location":"header", + "locationName":"Cache-Control" + }, + "ContentDisposition":{ + "shape":"ContentDisposition", + "location":"header", + "locationName":"Content-Disposition" + }, + "ContentEncoding":{ + "shape":"ContentEncoding", + "location":"header", + "locationName":"Content-Encoding" + }, + "ContentLanguage":{ + "shape":"ContentLanguage", + "location":"header", + "locationName":"Content-Language" + }, + "ContentType":{ + "shape":"ContentType", + "location":"header", + "locationName":"Content-Type" + }, + "CopySource":{ + "shape":"CopySource", + "location":"header", + "locationName":"x-amz-copy-source" + }, + "CopySourceIfMatch":{ + "shape":"CopySourceIfMatch", + "location":"header", + "locationName":"x-amz-copy-source-if-match" + }, + "CopySourceIfModifiedSince":{ + "shape":"CopySourceIfModifiedSince", + "location":"header", + "locationName":"x-amz-copy-source-if-modified-since" + }, + "CopySourceIfNoneMatch":{ + "shape":"CopySourceIfNoneMatch", + "location":"header", + "locationName":"x-amz-copy-source-if-none-match" + }, + "CopySourceIfUnmodifiedSince":{ + "shape":"CopySourceIfUnmodifiedSince", + "location":"header", + "locationName":"x-amz-copy-source-if-unmodified-since" + }, + "Expires":{ + "shape":"Expires", + "location":"header", + "locationName":"Expires" + }, + "GrantFullControl":{ + "shape":"GrantFullControl", + "location":"header", + "locationName":"x-amz-grant-full-control" + }, + "GrantRead":{ + "shape":"GrantRead", + "location":"header", + "locationName":"x-amz-grant-read" + }, + "GrantReadACP":{ + "shape":"GrantReadACP", + "location":"header", + "locationName":"x-amz-grant-read-acp" + }, + "GrantWriteACP":{ + "shape":"GrantWriteACP", + "location":"header", + "locationName":"x-amz-grant-write-acp" + }, + "Key":{ + "shape":"ObjectKey", + "location":"uri", + "locationName":"Key" + }, + "Metadata":{ + "shape":"Metadata", + "location":"headers", + "locationName":"x-amz-meta-" + }, + "MetadataDirective":{ + "shape":"MetadataDirective", + "location":"header", + "locationName":"x-amz-metadata-directive" + }, + "ServerSideEncryption":{ + "shape":"ServerSideEncryption", + "location":"header", + "locationName":"x-amz-server-side-encryption" + }, + "StorageClass":{ + "shape":"StorageClass", + "location":"header", + "locationName":"x-amz-storage-class" + }, + "WebsiteRedirectLocation":{ + "shape":"WebsiteRedirectLocation", + "location":"header", + "locationName":"x-amz-website-redirect-location" + }, + "SSECustomerAlgorithm":{ + "shape":"SSECustomerAlgorithm", + "location":"header", + "locationName":"x-amz-server-side-encryption-customer-algorithm" + }, + "SSECustomerKey":{ + "shape":"SSECustomerKey", + "location":"header", + "locationName":"x-amz-server-side-encryption-customer-key" + }, + "SSECustomerKeyMD5":{ + "shape":"SSECustomerKeyMD5", + "location":"header", + "locationName":"x-amz-server-side-encryption-customer-key-MD5" + }, + "SSEKMSKeyId":{ + "shape":"SSEKMSKeyId", + "location":"header", + "locationName":"x-amz-server-side-encryption-aws-kms-key-id" + }, + "CopySourceSSECustomerAlgorithm":{ + "shape":"CopySourceSSECustomerAlgorithm", + "location":"header", + "locationName":"x-amz-copy-source-server-side-encryption-customer-algorithm" + }, + "CopySourceSSECustomerKey":{ + "shape":"CopySourceSSECustomerKey", + "location":"header", + "locationName":"x-amz-copy-source-server-side-encryption-customer-key" + }, + "CopySourceSSECustomerKeyMD5":{ + "shape":"CopySourceSSECustomerKeyMD5", + "location":"header", + "locationName":"x-amz-copy-source-server-side-encryption-customer-key-MD5" + }, + "RequestPayer":{ + "shape":"RequestPayer", + "location":"header", + "locationName":"x-amz-request-payer" + } + } + }, + "CopyObjectResult":{ + "type":"structure", + "members":{ + "ETag":{"shape":"ETag"}, + "LastModified":{"shape":"LastModified"} + } + }, + "CopyPartResult":{ + "type":"structure", + "members":{ + "ETag":{"shape":"ETag"}, + "LastModified":{"shape":"LastModified"} + } + }, + "CopySource":{ + "type":"string", + "pattern":"\\/.+\\/.+" + }, + "CopySourceIfMatch":{"type":"string"}, + "CopySourceIfModifiedSince":{"type":"timestamp"}, + "CopySourceIfNoneMatch":{"type":"string"}, + "CopySourceIfUnmodifiedSince":{"type":"timestamp"}, + "CopySourceRange":{"type":"string"}, + "CopySourceSSECustomerAlgorithm":{"type":"string"}, + "CopySourceSSECustomerKey":{ + "type":"string", + "sensitive":true + }, + "CopySourceSSECustomerKeyMD5":{"type":"string"}, + "CopySourceVersionId":{"type":"string"}, + "CreateBucketConfiguration":{ + "type":"structure", + "members":{ + "LocationConstraint":{"shape":"BucketLocationConstraint"} + } + }, + "CreateBucketOutput":{ + "type":"structure", + "members":{ + "Location":{ + "shape":"Location", + "location":"header", + "locationName":"Location" + } + } + }, + "CreateBucketRequest":{ + "type":"structure", + "required":["Bucket"], + "members":{ + "ACL":{ + "shape":"BucketCannedACL", + "location":"header", + "locationName":"x-amz-acl" + }, + "Bucket":{ + "shape":"BucketName", + "location":"uri", + "locationName":"Bucket" + }, + "CreateBucketConfiguration":{ + "shape":"CreateBucketConfiguration", + "locationName":"CreateBucketConfiguration", + "xmlNamespace":{"uri":"http://s3.amazonaws.com/doc/2006-03-01/"} + }, + "GrantFullControl":{ + "shape":"GrantFullControl", + "location":"header", + "locationName":"x-amz-grant-full-control" + }, + "GrantRead":{ + "shape":"GrantRead", + "location":"header", + "locationName":"x-amz-grant-read" + }, + "GrantReadACP":{ + "shape":"GrantReadACP", + "location":"header", + "locationName":"x-amz-grant-read-acp" + }, + "GrantWrite":{ + "shape":"GrantWrite", + "location":"header", + "locationName":"x-amz-grant-write" + }, + "GrantWriteACP":{ + "shape":"GrantWriteACP", + "location":"header", + "locationName":"x-amz-grant-write-acp" + } + }, + "payload":"CreateBucketConfiguration" + }, + "CreateMultipartUploadOutput":{ + "type":"structure", + "members":{ + "AbortDate":{ + "shape":"AbortDate", + "location":"header", + "locationName":"x-amz-abort-date" + }, + "AbortRuleId":{ + "shape":"AbortRuleId", + "location":"header", + "locationName":"x-amz-abort-rule-id" + }, + "Bucket":{ + "shape":"BucketName", + "locationName":"Bucket" + }, + "Key":{"shape":"ObjectKey"}, + "UploadId":{"shape":"MultipartUploadId"}, + "ServerSideEncryption":{ + "shape":"ServerSideEncryption", + "location":"header", + "locationName":"x-amz-server-side-encryption" + }, + "SSECustomerAlgorithm":{ + "shape":"SSECustomerAlgorithm", + "location":"header", + "locationName":"x-amz-server-side-encryption-customer-algorithm" + }, + "SSECustomerKeyMD5":{ + "shape":"SSECustomerKeyMD5", + "location":"header", + "locationName":"x-amz-server-side-encryption-customer-key-MD5" + }, + "SSEKMSKeyId":{ + "shape":"SSEKMSKeyId", + "location":"header", + "locationName":"x-amz-server-side-encryption-aws-kms-key-id" + }, + "RequestCharged":{ + "shape":"RequestCharged", + "location":"header", + "locationName":"x-amz-request-charged" + } + } + }, + "CreateMultipartUploadRequest":{ + "type":"structure", + "required":[ + "Bucket", + "Key" + ], + "members":{ + "ACL":{ + "shape":"ObjectCannedACL", + "location":"header", + "locationName":"x-amz-acl" + }, + "Bucket":{ + "shape":"BucketName", + "location":"uri", + "locationName":"Bucket" + }, + "CacheControl":{ + "shape":"CacheControl", + "location":"header", + "locationName":"Cache-Control" + }, + "ContentDisposition":{ + "shape":"ContentDisposition", + "location":"header", + "locationName":"Content-Disposition" + }, + "ContentEncoding":{ + "shape":"ContentEncoding", + "location":"header", + "locationName":"Content-Encoding" + }, + "ContentLanguage":{ + "shape":"ContentLanguage", + "location":"header", + "locationName":"Content-Language" + }, + "ContentType":{ + "shape":"ContentType", + "location":"header", + "locationName":"Content-Type" + }, + "Expires":{ + "shape":"Expires", + "location":"header", + "locationName":"Expires" + }, + "GrantFullControl":{ + "shape":"GrantFullControl", + "location":"header", + "locationName":"x-amz-grant-full-control" + }, + "GrantRead":{ + "shape":"GrantRead", + "location":"header", + "locationName":"x-amz-grant-read" + }, + "GrantReadACP":{ + "shape":"GrantReadACP", + "location":"header", + "locationName":"x-amz-grant-read-acp" + }, + "GrantWriteACP":{ + "shape":"GrantWriteACP", + "location":"header", + "locationName":"x-amz-grant-write-acp" + }, + "Key":{ + "shape":"ObjectKey", + "location":"uri", + "locationName":"Key" + }, + "Metadata":{ + "shape":"Metadata", + "location":"headers", + "locationName":"x-amz-meta-" + }, + "ServerSideEncryption":{ + "shape":"ServerSideEncryption", + "location":"header", + "locationName":"x-amz-server-side-encryption" + }, + "StorageClass":{ + "shape":"StorageClass", + "location":"header", + "locationName":"x-amz-storage-class" + }, + "WebsiteRedirectLocation":{ + "shape":"WebsiteRedirectLocation", + "location":"header", + "locationName":"x-amz-website-redirect-location" + }, + "SSECustomerAlgorithm":{ + "shape":"SSECustomerAlgorithm", + "location":"header", + "locationName":"x-amz-server-side-encryption-customer-algorithm" + }, + "SSECustomerKey":{ + "shape":"SSECustomerKey", + "location":"header", + "locationName":"x-amz-server-side-encryption-customer-key" + }, + "SSECustomerKeyMD5":{ + "shape":"SSECustomerKeyMD5", + "location":"header", + "locationName":"x-amz-server-side-encryption-customer-key-MD5" + }, + "SSEKMSKeyId":{ + "shape":"SSEKMSKeyId", + "location":"header", + "locationName":"x-amz-server-side-encryption-aws-kms-key-id" + }, + "RequestPayer":{ + "shape":"RequestPayer", + "location":"header", + "locationName":"x-amz-request-payer" + } + } + }, + "CreationDate":{"type":"timestamp"}, + "Date":{ + "type":"timestamp", + "timestampFormat":"iso8601" + }, + "Days":{"type":"integer"}, + "DaysAfterInitiation":{"type":"integer"}, + "Delete":{ + "type":"structure", + "required":["Objects"], + "members":{ + "Objects":{ + "shape":"ObjectIdentifierList", + "locationName":"Object" + }, + "Quiet":{"shape":"Quiet"} + } + }, + "DeleteBucketCorsRequest":{ + "type":"structure", + "required":["Bucket"], + "members":{ + "Bucket":{ + "shape":"BucketName", + "location":"uri", + "locationName":"Bucket" + } + } + }, + "DeleteBucketLifecycleRequest":{ + "type":"structure", + "required":["Bucket"], + "members":{ + "Bucket":{ + "shape":"BucketName", + "location":"uri", + "locationName":"Bucket" + } + } + }, + "DeleteBucketPolicyRequest":{ + "type":"structure", + "required":["Bucket"], + "members":{ + "Bucket":{ + "shape":"BucketName", + "location":"uri", + "locationName":"Bucket" + } + } + }, + "DeleteBucketReplicationRequest":{ + "type":"structure", + "required":["Bucket"], + "members":{ + "Bucket":{ + "shape":"BucketName", + "location":"uri", + "locationName":"Bucket" + } + } + }, + "DeleteBucketRequest":{ + "type":"structure", + "required":["Bucket"], + "members":{ + "Bucket":{ + "shape":"BucketName", + "location":"uri", + "locationName":"Bucket" + } + } + }, + "DeleteBucketTaggingRequest":{ + "type":"structure", + "required":["Bucket"], + "members":{ + "Bucket":{ + "shape":"BucketName", + "location":"uri", + "locationName":"Bucket" + } + } + }, + "DeleteBucketWebsiteRequest":{ + "type":"structure", + "required":["Bucket"], + "members":{ + "Bucket":{ + "shape":"BucketName", + "location":"uri", + "locationName":"Bucket" + } + } + }, + "DeleteMarker":{"type":"boolean"}, + "DeleteMarkerEntry":{ + "type":"structure", + "members":{ + "Owner":{"shape":"Owner"}, + "Key":{"shape":"ObjectKey"}, + "VersionId":{"shape":"ObjectVersionId"}, + "IsLatest":{"shape":"IsLatest"}, + "LastModified":{"shape":"LastModified"} + } + }, + "DeleteMarkerVersionId":{"type":"string"}, + "DeleteMarkers":{ + "type":"list", + "member":{"shape":"DeleteMarkerEntry"}, + "flattened":true + }, + "DeleteObjectOutput":{ + "type":"structure", + "members":{ + "DeleteMarker":{ + "shape":"DeleteMarker", + "location":"header", + "locationName":"x-amz-delete-marker" + }, + "VersionId":{ + "shape":"ObjectVersionId", + "location":"header", + "locationName":"x-amz-version-id" + }, + "RequestCharged":{ + "shape":"RequestCharged", + "location":"header", + "locationName":"x-amz-request-charged" + } + } + }, + "DeleteObjectRequest":{ + "type":"structure", + "required":[ + "Bucket", + "Key" + ], + "members":{ + "Bucket":{ + "shape":"BucketName", + "location":"uri", + "locationName":"Bucket" + }, + "Key":{ + "shape":"ObjectKey", + "location":"uri", + "locationName":"Key" + }, + "MFA":{ + "shape":"MFA", + "location":"header", + "locationName":"x-amz-mfa" + }, + "VersionId":{ + "shape":"ObjectVersionId", + "location":"querystring", + "locationName":"versionId" + }, + "RequestPayer":{ + "shape":"RequestPayer", + "location":"header", + "locationName":"x-amz-request-payer" + } + } + }, + "DeleteObjectsOutput":{ + "type":"structure", + "members":{ + "Deleted":{"shape":"DeletedObjects"}, + "RequestCharged":{ + "shape":"RequestCharged", + "location":"header", + "locationName":"x-amz-request-charged" + }, + "Errors":{ + "shape":"Errors", + "locationName":"Error" + } + } + }, + "DeleteObjectsRequest":{ + "type":"structure", + "required":[ + "Bucket", + "Delete" + ], + "members":{ + "Bucket":{ + "shape":"BucketName", + "location":"uri", + "locationName":"Bucket" + }, + "Delete":{ + "shape":"Delete", + "locationName":"Delete", + "xmlNamespace":{"uri":"http://s3.amazonaws.com/doc/2006-03-01/"} + }, + "MFA":{ + "shape":"MFA", + "location":"header", + "locationName":"x-amz-mfa" + }, + "RequestPayer":{ + "shape":"RequestPayer", + "location":"header", + "locationName":"x-amz-request-payer" + } + }, + "payload":"Delete" + }, + "DeletedObject":{ + "type":"structure", + "members":{ + "Key":{"shape":"ObjectKey"}, + "VersionId":{"shape":"ObjectVersionId"}, + "DeleteMarker":{"shape":"DeleteMarker"}, + "DeleteMarkerVersionId":{"shape":"DeleteMarkerVersionId"} + } + }, + "DeletedObjects":{ + "type":"list", + "member":{"shape":"DeletedObject"}, + "flattened":true + }, + "Delimiter":{"type":"string"}, + "Destination":{ + "type":"structure", + "required":["Bucket"], + "members":{ + "Bucket":{"shape":"BucketName"}, + "StorageClass":{"shape":"StorageClass"} + } + }, + "DisplayName":{"type":"string"}, + "ETag":{"type":"string"}, + "EmailAddress":{"type":"string"}, + "EncodingType":{ + "type":"string", + "enum":["url"] + }, + "Error":{ + "type":"structure", + "members":{ + "Key":{"shape":"ObjectKey"}, + "VersionId":{"shape":"ObjectVersionId"}, + "Code":{"shape":"Code"}, + "Message":{"shape":"Message"} + } + }, + "ErrorDocument":{ + "type":"structure", + "required":["Key"], + "members":{ + "Key":{"shape":"ObjectKey"} + } + }, + "Errors":{ + "type":"list", + "member":{"shape":"Error"}, + "flattened":true + }, + "Event":{ + "type":"string", + "enum":[ + "s3:ReducedRedundancyLostObject", + "s3:ObjectCreated:*", + "s3:ObjectCreated:Put", + "s3:ObjectCreated:Post", + "s3:ObjectCreated:Copy", + "s3:ObjectCreated:CompleteMultipartUpload", + "s3:ObjectRemoved:*", + "s3:ObjectRemoved:Delete", + "s3:ObjectRemoved:DeleteMarkerCreated" + ] + }, + "EventList":{ + "type":"list", + "member":{"shape":"Event"}, + "flattened":true + }, + "Expiration":{"type":"string"}, + "ExpirationStatus":{ + "type":"string", + "enum":[ + "Enabled", + "Disabled" + ] + }, + "ExpiredObjectDeleteMarker":{"type":"boolean"}, + "Expires":{"type":"timestamp"}, + "ExposeHeader":{"type":"string"}, + "ExposeHeaders":{ + "type":"list", + "member":{"shape":"ExposeHeader"}, + "flattened":true + }, + "FetchOwner":{"type":"boolean"}, + "FilterRule":{ + "type":"structure", + "members":{ + "Name":{"shape":"FilterRuleName"}, + "Value":{"shape":"FilterRuleValue"} + } + }, + "FilterRuleList":{ + "type":"list", + "member":{"shape":"FilterRule"}, + "flattened":true + }, + "FilterRuleName":{ + "type":"string", + "enum":[ + "prefix", + "suffix" + ] + }, + "FilterRuleValue":{"type":"string"}, + "GetBucketAccelerateConfigurationOutput":{ + "type":"structure", + "members":{ + "Status":{"shape":"BucketAccelerateStatus"} + } + }, + "GetBucketAccelerateConfigurationRequest":{ + "type":"structure", + "required":["Bucket"], + "members":{ + "Bucket":{ + "shape":"BucketName", + "location":"uri", + "locationName":"Bucket" + } + } + }, + "GetBucketAclOutput":{ + "type":"structure", + "members":{ + "Owner":{"shape":"Owner"}, + "Grants":{ + "shape":"Grants", + "locationName":"AccessControlList" + } + } + }, + "GetBucketAclRequest":{ + "type":"structure", + "required":["Bucket"], + "members":{ + "Bucket":{ + "shape":"BucketName", + "location":"uri", + "locationName":"Bucket" + } + } + }, + "GetBucketCorsOutput":{ + "type":"structure", + "members":{ + "CORSRules":{ + "shape":"CORSRules", + "locationName":"CORSRule" + } + } + }, + "GetBucketCorsRequest":{ + "type":"structure", + "required":["Bucket"], + "members":{ + "Bucket":{ + "shape":"BucketName", + "location":"uri", + "locationName":"Bucket" + } + } + }, + "GetBucketLifecycleConfigurationOutput":{ + "type":"structure", + "members":{ + "Rules":{ + "shape":"LifecycleRules", + "locationName":"Rule" + } + } + }, + "GetBucketLifecycleConfigurationRequest":{ + "type":"structure", + "required":["Bucket"], + "members":{ + "Bucket":{ + "shape":"BucketName", + "location":"uri", + "locationName":"Bucket" + } + } + }, + "GetBucketLifecycleOutput":{ + "type":"structure", + "members":{ + "Rules":{ + "shape":"Rules", + "locationName":"Rule" + } + } + }, + "GetBucketLifecycleRequest":{ + "type":"structure", + "required":["Bucket"], + "members":{ + "Bucket":{ + "shape":"BucketName", + "location":"uri", + "locationName":"Bucket" + } + } + }, + "GetBucketLocationOutput":{ + "type":"structure", + "members":{ + "LocationConstraint":{"shape":"BucketLocationConstraint"} + } + }, + "GetBucketLocationRequest":{ + "type":"structure", + "required":["Bucket"], + "members":{ + "Bucket":{ + "shape":"BucketName", + "location":"uri", + "locationName":"Bucket" + } + } + }, + "GetBucketLoggingOutput":{ + "type":"structure", + "members":{ + "LoggingEnabled":{"shape":"LoggingEnabled"} + } + }, + "GetBucketLoggingRequest":{ + "type":"structure", + "required":["Bucket"], + "members":{ + "Bucket":{ + "shape":"BucketName", + "location":"uri", + "locationName":"Bucket" + } + } + }, + "GetBucketNotificationConfigurationRequest":{ + "type":"structure", + "required":["Bucket"], + "members":{ + "Bucket":{ + "shape":"BucketName", + "location":"uri", + "locationName":"Bucket" + } + } + }, + "GetBucketPolicyOutput":{ + "type":"structure", + "members":{ + "Policy":{"shape":"Policy"} + }, + "payload":"Policy" + }, + "GetBucketPolicyRequest":{ + "type":"structure", + "required":["Bucket"], + "members":{ + "Bucket":{ + "shape":"BucketName", + "location":"uri", + "locationName":"Bucket" + } + } + }, + "GetBucketReplicationOutput":{ + "type":"structure", + "members":{ + "ReplicationConfiguration":{"shape":"ReplicationConfiguration"} + }, + "payload":"ReplicationConfiguration" + }, + "GetBucketReplicationRequest":{ + "type":"structure", + "required":["Bucket"], + "members":{ + "Bucket":{ + "shape":"BucketName", + "location":"uri", + "locationName":"Bucket" + } + } + }, + "GetBucketRequestPaymentOutput":{ + "type":"structure", + "members":{ + "Payer":{"shape":"Payer"} + } + }, + "GetBucketRequestPaymentRequest":{ + "type":"structure", + "required":["Bucket"], + "members":{ + "Bucket":{ + "shape":"BucketName", + "location":"uri", + "locationName":"Bucket" + } + } + }, + "GetBucketTaggingOutput":{ + "type":"structure", + "required":["TagSet"], + "members":{ + "TagSet":{"shape":"TagSet"} + } + }, + "GetBucketTaggingRequest":{ + "type":"structure", + "required":["Bucket"], + "members":{ + "Bucket":{ + "shape":"BucketName", + "location":"uri", + "locationName":"Bucket" + } + } + }, + "GetBucketVersioningOutput":{ + "type":"structure", + "members":{ + "Status":{"shape":"BucketVersioningStatus"}, + "MFADelete":{ + "shape":"MFADeleteStatus", + "locationName":"MfaDelete" + } + } + }, + "GetBucketVersioningRequest":{ + "type":"structure", + "required":["Bucket"], + "members":{ + "Bucket":{ + "shape":"BucketName", + "location":"uri", + "locationName":"Bucket" + } + } + }, + "GetBucketWebsiteOutput":{ + "type":"structure", + "members":{ + "RedirectAllRequestsTo":{"shape":"RedirectAllRequestsTo"}, + "IndexDocument":{"shape":"IndexDocument"}, + "ErrorDocument":{"shape":"ErrorDocument"}, + "RoutingRules":{"shape":"RoutingRules"} + } + }, + "GetBucketWebsiteRequest":{ + "type":"structure", + "required":["Bucket"], + "members":{ + "Bucket":{ + "shape":"BucketName", + "location":"uri", + "locationName":"Bucket" + } + } + }, + "GetObjectAclOutput":{ + "type":"structure", + "members":{ + "Owner":{"shape":"Owner"}, + "Grants":{ + "shape":"Grants", + "locationName":"AccessControlList" + }, + "RequestCharged":{ + "shape":"RequestCharged", + "location":"header", + "locationName":"x-amz-request-charged" + } + } + }, + "GetObjectAclRequest":{ + "type":"structure", + "required":[ + "Bucket", + "Key" + ], + "members":{ + "Bucket":{ + "shape":"BucketName", + "location":"uri", + "locationName":"Bucket" + }, + "Key":{ + "shape":"ObjectKey", + "location":"uri", + "locationName":"Key" + }, + "VersionId":{ + "shape":"ObjectVersionId", + "location":"querystring", + "locationName":"versionId" + }, + "RequestPayer":{ + "shape":"RequestPayer", + "location":"header", + "locationName":"x-amz-request-payer" + } + } + }, + "GetObjectOutput":{ + "type":"structure", + "members":{ + "Body":{ + "shape":"Body", + "streaming":true + }, + "DeleteMarker":{ + "shape":"DeleteMarker", + "location":"header", + "locationName":"x-amz-delete-marker" + }, + "AcceptRanges":{ + "shape":"AcceptRanges", + "location":"header", + "locationName":"accept-ranges" + }, + "Expiration":{ + "shape":"Expiration", + "location":"header", + "locationName":"x-amz-expiration" + }, + "Restore":{ + "shape":"Restore", + "location":"header", + "locationName":"x-amz-restore" + }, + "LastModified":{ + "shape":"LastModified", + "location":"header", + "locationName":"Last-Modified" + }, + "ContentLength":{ + "shape":"ContentLength", + "location":"header", + "locationName":"Content-Length" + }, + "ETag":{ + "shape":"ETag", + "location":"header", + "locationName":"ETag" + }, + "MissingMeta":{ + "shape":"MissingMeta", + "location":"header", + "locationName":"x-amz-missing-meta" + }, + "VersionId":{ + "shape":"ObjectVersionId", + "location":"header", + "locationName":"x-amz-version-id" + }, + "CacheControl":{ + "shape":"CacheControl", + "location":"header", + "locationName":"Cache-Control" + }, + "ContentDisposition":{ + "shape":"ContentDisposition", + "location":"header", + "locationName":"Content-Disposition" + }, + "ContentEncoding":{ + "shape":"ContentEncoding", + "location":"header", + "locationName":"Content-Encoding" + }, + "ContentLanguage":{ + "shape":"ContentLanguage", + "location":"header", + "locationName":"Content-Language" + }, + "ContentRange":{ + "shape":"ContentRange", + "location":"header", + "locationName":"Content-Range" + }, + "ContentType":{ + "shape":"ContentType", + "location":"header", + "locationName":"Content-Type" + }, + "Expires":{ + "shape":"Expires", + "location":"header", + "locationName":"Expires" + }, + "WebsiteRedirectLocation":{ + "shape":"WebsiteRedirectLocation", + "location":"header", + "locationName":"x-amz-website-redirect-location" + }, + "ServerSideEncryption":{ + "shape":"ServerSideEncryption", + "location":"header", + "locationName":"x-amz-server-side-encryption" + }, + "Metadata":{ + "shape":"Metadata", + "location":"headers", + "locationName":"x-amz-meta-" + }, + "SSECustomerAlgorithm":{ + "shape":"SSECustomerAlgorithm", + "location":"header", + "locationName":"x-amz-server-side-encryption-customer-algorithm" + }, + "SSECustomerKeyMD5":{ + "shape":"SSECustomerKeyMD5", + "location":"header", + "locationName":"x-amz-server-side-encryption-customer-key-MD5" + }, + "SSEKMSKeyId":{ + "shape":"SSEKMSKeyId", + "location":"header", + "locationName":"x-amz-server-side-encryption-aws-kms-key-id" + }, + "StorageClass":{ + "shape":"StorageClass", + "location":"header", + "locationName":"x-amz-storage-class" + }, + "RequestCharged":{ + "shape":"RequestCharged", + "location":"header", + "locationName":"x-amz-request-charged" + }, + "ReplicationStatus":{ + "shape":"ReplicationStatus", + "location":"header", + "locationName":"x-amz-replication-status" + } + }, + "payload":"Body" + }, + "GetObjectRequest":{ + "type":"structure", + "required":[ + "Bucket", + "Key" + ], + "members":{ + "Bucket":{ + "shape":"BucketName", + "location":"uri", + "locationName":"Bucket" + }, + "IfMatch":{ + "shape":"IfMatch", + "location":"header", + "locationName":"If-Match" + }, + "IfModifiedSince":{ + "shape":"IfModifiedSince", + "location":"header", + "locationName":"If-Modified-Since" + }, + "IfNoneMatch":{ + "shape":"IfNoneMatch", + "location":"header", + "locationName":"If-None-Match" + }, + "IfUnmodifiedSince":{ + "shape":"IfUnmodifiedSince", + "location":"header", + "locationName":"If-Unmodified-Since" + }, + "Key":{ + "shape":"ObjectKey", + "location":"uri", + "locationName":"Key" + }, + "Range":{ + "shape":"Range", + "location":"header", + "locationName":"Range" + }, + "ResponseCacheControl":{ + "shape":"ResponseCacheControl", + "location":"querystring", + "locationName":"response-cache-control" + }, + "ResponseContentDisposition":{ + "shape":"ResponseContentDisposition", + "location":"querystring", + "locationName":"response-content-disposition" + }, + "ResponseContentEncoding":{ + "shape":"ResponseContentEncoding", + "location":"querystring", + "locationName":"response-content-encoding" + }, + "ResponseContentLanguage":{ + "shape":"ResponseContentLanguage", + "location":"querystring", + "locationName":"response-content-language" + }, + "ResponseContentType":{ + "shape":"ResponseContentType", + "location":"querystring", + "locationName":"response-content-type" + }, + "ResponseExpires":{ + "shape":"ResponseExpires", + "location":"querystring", + "locationName":"response-expires" + }, + "VersionId":{ + "shape":"ObjectVersionId", + "location":"querystring", + "locationName":"versionId" + }, + "SSECustomerAlgorithm":{ + "shape":"SSECustomerAlgorithm", + "location":"header", + "locationName":"x-amz-server-side-encryption-customer-algorithm" + }, + "SSECustomerKey":{ + "shape":"SSECustomerKey", + "location":"header", + "locationName":"x-amz-server-side-encryption-customer-key" + }, + "SSECustomerKeyMD5":{ + "shape":"SSECustomerKeyMD5", + "location":"header", + "locationName":"x-amz-server-side-encryption-customer-key-MD5" + }, + "RequestPayer":{ + "shape":"RequestPayer", + "location":"header", + "locationName":"x-amz-request-payer" + } + } + }, + "GetObjectTorrentOutput":{ + "type":"structure", + "members":{ + "Body":{ + "shape":"Body", + "streaming":true + }, + "RequestCharged":{ + "shape":"RequestCharged", + "location":"header", + "locationName":"x-amz-request-charged" + } + }, + "payload":"Body" + }, + "GetObjectTorrentRequest":{ + "type":"structure", + "required":[ + "Bucket", + "Key" + ], + "members":{ + "Bucket":{ + "shape":"BucketName", + "location":"uri", + "locationName":"Bucket" + }, + "Key":{ + "shape":"ObjectKey", + "location":"uri", + "locationName":"Key" + }, + "RequestPayer":{ + "shape":"RequestPayer", + "location":"header", + "locationName":"x-amz-request-payer" + } + } + }, + "Grant":{ + "type":"structure", + "members":{ + "Grantee":{"shape":"Grantee"}, + "Permission":{"shape":"Permission"} + } + }, + "GrantFullControl":{"type":"string"}, + "GrantRead":{"type":"string"}, + "GrantReadACP":{"type":"string"}, + "GrantWrite":{"type":"string"}, + "GrantWriteACP":{"type":"string"}, + "Grantee":{ + "type":"structure", + "required":["Type"], + "members":{ + "DisplayName":{"shape":"DisplayName"}, + "EmailAddress":{"shape":"EmailAddress"}, + "ID":{"shape":"ID"}, + "Type":{ + "shape":"Type", + "locationName":"xsi:type", + "xmlAttribute":true + }, + "URI":{"shape":"URI"} + }, + "xmlNamespace":{ + "prefix":"xsi", + "uri":"http://www.w3.org/2001/XMLSchema-instance" + } + }, + "Grants":{ + "type":"list", + "member":{ + "shape":"Grant", + "locationName":"Grant" + } + }, + "HeadBucketRequest":{ + "type":"structure", + "required":["Bucket"], + "members":{ + "Bucket":{ + "shape":"BucketName", + "location":"uri", + "locationName":"Bucket" + } + } + }, + "HeadObjectOutput":{ + "type":"structure", + "members":{ + "DeleteMarker":{ + "shape":"DeleteMarker", + "location":"header", + "locationName":"x-amz-delete-marker" + }, + "AcceptRanges":{ + "shape":"AcceptRanges", + "location":"header", + "locationName":"accept-ranges" + }, + "Expiration":{ + "shape":"Expiration", + "location":"header", + "locationName":"x-amz-expiration" + }, + "Restore":{ + "shape":"Restore", + "location":"header", + "locationName":"x-amz-restore" + }, + "LastModified":{ + "shape":"LastModified", + "location":"header", + "locationName":"Last-Modified" + }, + "ContentLength":{ + "shape":"ContentLength", + "location":"header", + "locationName":"Content-Length" + }, + "ETag":{ + "shape":"ETag", + "location":"header", + "locationName":"ETag" + }, + "MissingMeta":{ + "shape":"MissingMeta", + "location":"header", + "locationName":"x-amz-missing-meta" + }, + "VersionId":{ + "shape":"ObjectVersionId", + "location":"header", + "locationName":"x-amz-version-id" + }, + "CacheControl":{ + "shape":"CacheControl", + "location":"header", + "locationName":"Cache-Control" + }, + "ContentDisposition":{ + "shape":"ContentDisposition", + "location":"header", + "locationName":"Content-Disposition" + }, + "ContentEncoding":{ + "shape":"ContentEncoding", + "location":"header", + "locationName":"Content-Encoding" + }, + "ContentLanguage":{ + "shape":"ContentLanguage", + "location":"header", + "locationName":"Content-Language" + }, + "ContentType":{ + "shape":"ContentType", + "location":"header", + "locationName":"Content-Type" + }, + "Expires":{ + "shape":"Expires", + "location":"header", + "locationName":"Expires" + }, + "WebsiteRedirectLocation":{ + "shape":"WebsiteRedirectLocation", + "location":"header", + "locationName":"x-amz-website-redirect-location" + }, + "ServerSideEncryption":{ + "shape":"ServerSideEncryption", + "location":"header", + "locationName":"x-amz-server-side-encryption" + }, + "Metadata":{ + "shape":"Metadata", + "location":"headers", + "locationName":"x-amz-meta-" + }, + "SSECustomerAlgorithm":{ + "shape":"SSECustomerAlgorithm", + "location":"header", + "locationName":"x-amz-server-side-encryption-customer-algorithm" + }, + "SSECustomerKeyMD5":{ + "shape":"SSECustomerKeyMD5", + "location":"header", + "locationName":"x-amz-server-side-encryption-customer-key-MD5" + }, + "SSEKMSKeyId":{ + "shape":"SSEKMSKeyId", + "location":"header", + "locationName":"x-amz-server-side-encryption-aws-kms-key-id" + }, + "StorageClass":{ + "shape":"StorageClass", + "location":"header", + "locationName":"x-amz-storage-class" + }, + "RequestCharged":{ + "shape":"RequestCharged", + "location":"header", + "locationName":"x-amz-request-charged" + }, + "ReplicationStatus":{ + "shape":"ReplicationStatus", + "location":"header", + "locationName":"x-amz-replication-status" + } + } + }, + "HeadObjectRequest":{ + "type":"structure", + "required":[ + "Bucket", + "Key" + ], + "members":{ + "Bucket":{ + "shape":"BucketName", + "location":"uri", + "locationName":"Bucket" + }, + "IfMatch":{ + "shape":"IfMatch", + "location":"header", + "locationName":"If-Match" + }, + "IfModifiedSince":{ + "shape":"IfModifiedSince", + "location":"header", + "locationName":"If-Modified-Since" + }, + "IfNoneMatch":{ + "shape":"IfNoneMatch", + "location":"header", + "locationName":"If-None-Match" + }, + "IfUnmodifiedSince":{ + "shape":"IfUnmodifiedSince", + "location":"header", + "locationName":"If-Unmodified-Since" + }, + "Key":{ + "shape":"ObjectKey", + "location":"uri", + "locationName":"Key" + }, + "Range":{ + "shape":"Range", + "location":"header", + "locationName":"Range" + }, + "VersionId":{ + "shape":"ObjectVersionId", + "location":"querystring", + "locationName":"versionId" + }, + "SSECustomerAlgorithm":{ + "shape":"SSECustomerAlgorithm", + "location":"header", + "locationName":"x-amz-server-side-encryption-customer-algorithm" + }, + "SSECustomerKey":{ + "shape":"SSECustomerKey", + "location":"header", + "locationName":"x-amz-server-side-encryption-customer-key" + }, + "SSECustomerKeyMD5":{ + "shape":"SSECustomerKeyMD5", + "location":"header", + "locationName":"x-amz-server-side-encryption-customer-key-MD5" + }, + "RequestPayer":{ + "shape":"RequestPayer", + "location":"header", + "locationName":"x-amz-request-payer" + } + } + }, + "HostName":{"type":"string"}, + "HttpErrorCodeReturnedEquals":{"type":"string"}, + "HttpRedirectCode":{"type":"string"}, + "ID":{"type":"string"}, + "IfMatch":{"type":"string"}, + "IfModifiedSince":{"type":"timestamp"}, + "IfNoneMatch":{"type":"string"}, + "IfUnmodifiedSince":{"type":"timestamp"}, + "IndexDocument":{ + "type":"structure", + "required":["Suffix"], + "members":{ + "Suffix":{"shape":"Suffix"} + } + }, + "Initiated":{"type":"timestamp"}, + "Initiator":{ + "type":"structure", + "members":{ + "ID":{"shape":"ID"}, + "DisplayName":{"shape":"DisplayName"} + } + }, + "IsLatest":{"type":"boolean"}, + "IsTruncated":{"type":"boolean"}, + "KeyCount":{"type":"integer"}, + "KeyMarker":{"type":"string"}, + "KeyPrefixEquals":{"type":"string"}, + "LambdaFunctionArn":{"type":"string"}, + "LambdaFunctionConfiguration":{ + "type":"structure", + "required":[ + "LambdaFunctionArn", + "Events" + ], + "members":{ + "Id":{"shape":"NotificationId"}, + "LambdaFunctionArn":{ + "shape":"LambdaFunctionArn", + "locationName":"CloudFunction" + }, + "Events":{ + "shape":"EventList", + "locationName":"Event" + }, + "Filter":{"shape":"NotificationConfigurationFilter"} + } + }, + "LambdaFunctionConfigurationList":{ + "type":"list", + "member":{"shape":"LambdaFunctionConfiguration"}, + "flattened":true + }, + "LastModified":{"type":"timestamp"}, + "LifecycleConfiguration":{ + "type":"structure", + "required":["Rules"], + "members":{ + "Rules":{ + "shape":"Rules", + "locationName":"Rule" + } + } + }, + "LifecycleExpiration":{ + "type":"structure", + "members":{ + "Date":{"shape":"Date"}, + "Days":{"shape":"Days"}, + "ExpiredObjectDeleteMarker":{"shape":"ExpiredObjectDeleteMarker"} + } + }, + "LifecycleRule":{ + "type":"structure", + "required":[ + "Prefix", + "Status" + ], + "members":{ + "Expiration":{"shape":"LifecycleExpiration"}, + "ID":{"shape":"ID"}, + "Prefix":{"shape":"Prefix"}, + "Status":{"shape":"ExpirationStatus"}, + "Transitions":{ + "shape":"TransitionList", + "locationName":"Transition" + }, + "NoncurrentVersionTransitions":{ + "shape":"NoncurrentVersionTransitionList", + "locationName":"NoncurrentVersionTransition" + }, + "NoncurrentVersionExpiration":{"shape":"NoncurrentVersionExpiration"}, + "AbortIncompleteMultipartUpload":{"shape":"AbortIncompleteMultipartUpload"} + } + }, + "LifecycleRules":{ + "type":"list", + "member":{"shape":"LifecycleRule"}, + "flattened":true + }, + "ListBucketsOutput":{ + "type":"structure", + "members":{ + "Buckets":{"shape":"Buckets"}, + "Owner":{"shape":"Owner"} + } + }, + "ListMultipartUploadsOutput":{ + "type":"structure", + "members":{ + "Bucket":{"shape":"BucketName"}, + "KeyMarker":{"shape":"KeyMarker"}, + "UploadIdMarker":{"shape":"UploadIdMarker"}, + "NextKeyMarker":{"shape":"NextKeyMarker"}, + "Prefix":{"shape":"Prefix"}, + "Delimiter":{"shape":"Delimiter"}, + "NextUploadIdMarker":{"shape":"NextUploadIdMarker"}, + "MaxUploads":{"shape":"MaxUploads"}, + "IsTruncated":{"shape":"IsTruncated"}, + "Uploads":{ + "shape":"MultipartUploadList", + "locationName":"Upload" + }, + "CommonPrefixes":{"shape":"CommonPrefixList"}, + "EncodingType":{"shape":"EncodingType"} + } + }, + "ListMultipartUploadsRequest":{ + "type":"structure", + "required":["Bucket"], + "members":{ + "Bucket":{ + "shape":"BucketName", + "location":"uri", + "locationName":"Bucket" + }, + "Delimiter":{ + "shape":"Delimiter", + "location":"querystring", + "locationName":"delimiter" + }, + "EncodingType":{ + "shape":"EncodingType", + "location":"querystring", + "locationName":"encoding-type" + }, + "KeyMarker":{ + "shape":"KeyMarker", + "location":"querystring", + "locationName":"key-marker" + }, + "MaxUploads":{ + "shape":"MaxUploads", + "location":"querystring", + "locationName":"max-uploads" + }, + "Prefix":{ + "shape":"Prefix", + "location":"querystring", + "locationName":"prefix" + }, + "UploadIdMarker":{ + "shape":"UploadIdMarker", + "location":"querystring", + "locationName":"upload-id-marker" + } + } + }, + "ListObjectVersionsOutput":{ + "type":"structure", + "members":{ + "IsTruncated":{"shape":"IsTruncated"}, + "KeyMarker":{"shape":"KeyMarker"}, + "VersionIdMarker":{"shape":"VersionIdMarker"}, + "NextKeyMarker":{"shape":"NextKeyMarker"}, + "NextVersionIdMarker":{"shape":"NextVersionIdMarker"}, + "Versions":{ + "shape":"ObjectVersionList", + "locationName":"Version" + }, + "DeleteMarkers":{ + "shape":"DeleteMarkers", + "locationName":"DeleteMarker" + }, + "Name":{"shape":"BucketName"}, + "Prefix":{"shape":"Prefix"}, + "Delimiter":{"shape":"Delimiter"}, + "MaxKeys":{"shape":"MaxKeys"}, + "CommonPrefixes":{"shape":"CommonPrefixList"}, + "EncodingType":{"shape":"EncodingType"} + } + }, + "ListObjectVersionsRequest":{ + "type":"structure", + "required":["Bucket"], + "members":{ + "Bucket":{ + "shape":"BucketName", + "location":"uri", + "locationName":"Bucket" + }, + "Delimiter":{ + "shape":"Delimiter", + "location":"querystring", + "locationName":"delimiter" + }, + "EncodingType":{ + "shape":"EncodingType", + "location":"querystring", + "locationName":"encoding-type" + }, + "KeyMarker":{ + "shape":"KeyMarker", + "location":"querystring", + "locationName":"key-marker" + }, + "MaxKeys":{ + "shape":"MaxKeys", + "location":"querystring", + "locationName":"max-keys" + }, + "Prefix":{ + "shape":"Prefix", + "location":"querystring", + "locationName":"prefix" + }, + "VersionIdMarker":{ + "shape":"VersionIdMarker", + "location":"querystring", + "locationName":"version-id-marker" + } + } + }, + "ListObjectsOutput":{ + "type":"structure", + "members":{ + "IsTruncated":{"shape":"IsTruncated"}, + "Marker":{"shape":"Marker"}, + "NextMarker":{"shape":"NextMarker"}, + "Contents":{"shape":"ObjectList"}, + "Name":{"shape":"BucketName"}, + "Prefix":{"shape":"Prefix"}, + "Delimiter":{"shape":"Delimiter"}, + "MaxKeys":{"shape":"MaxKeys"}, + "CommonPrefixes":{"shape":"CommonPrefixList"}, + "EncodingType":{"shape":"EncodingType"} + } + }, + "ListObjectsRequest":{ + "type":"structure", + "required":["Bucket"], + "members":{ + "Bucket":{ + "shape":"BucketName", + "location":"uri", + "locationName":"Bucket" + }, + "Delimiter":{ + "shape":"Delimiter", + "location":"querystring", + "locationName":"delimiter" + }, + "EncodingType":{ + "shape":"EncodingType", + "location":"querystring", + "locationName":"encoding-type" + }, + "Marker":{ + "shape":"Marker", + "location":"querystring", + "locationName":"marker" + }, + "MaxKeys":{ + "shape":"MaxKeys", + "location":"querystring", + "locationName":"max-keys" + }, + "Prefix":{ + "shape":"Prefix", + "location":"querystring", + "locationName":"prefix" + } + } + }, + "ListObjectsV2Output":{ + "type":"structure", + "members":{ + "IsTruncated":{"shape":"IsTruncated"}, + "Contents":{"shape":"ObjectList"}, + "Name":{"shape":"BucketName"}, + "Prefix":{"shape":"Prefix"}, + "Delimiter":{"shape":"Delimiter"}, + "MaxKeys":{"shape":"MaxKeys"}, + "CommonPrefixes":{"shape":"CommonPrefixList"}, + "EncodingType":{"shape":"EncodingType"}, + "KeyCount":{"shape":"KeyCount"}, + "ContinuationToken":{"shape":"Token"}, + "NextContinuationToken":{"shape":"NextToken"}, + "StartAfter":{"shape":"StartAfter"} + } + }, + "ListObjectsV2Request":{ + "type":"structure", + "required":["Bucket"], + "members":{ + "Bucket":{ + "shape":"BucketName", + "location":"uri", + "locationName":"Bucket" + }, + "Delimiter":{ + "shape":"Delimiter", + "location":"querystring", + "locationName":"delimiter" + }, + "EncodingType":{ + "shape":"EncodingType", + "location":"querystring", + "locationName":"encoding-type" + }, + "MaxKeys":{ + "shape":"MaxKeys", + "location":"querystring", + "locationName":"max-keys" + }, + "Prefix":{ + "shape":"Prefix", + "location":"querystring", + "locationName":"prefix" + }, + "ContinuationToken":{ + "shape":"Token", + "location":"querystring", + "locationName":"continuation-token" + }, + "FetchOwner":{ + "shape":"FetchOwner", + "location":"querystring", + "locationName":"fetch-owner" + }, + "StartAfter":{ + "shape":"StartAfter", + "location":"querystring", + "locationName":"start-after" + } + } + }, + "ListPartsOutput":{ + "type":"structure", + "members":{ + "AbortDate":{ + "shape":"AbortDate", + "location":"header", + "locationName":"x-amz-abort-date" + }, + "AbortRuleId":{ + "shape":"AbortRuleId", + "location":"header", + "locationName":"x-amz-abort-rule-id" + }, + "Bucket":{"shape":"BucketName"}, + "Key":{"shape":"ObjectKey"}, + "UploadId":{"shape":"MultipartUploadId"}, + "PartNumberMarker":{"shape":"PartNumberMarker"}, + "NextPartNumberMarker":{"shape":"NextPartNumberMarker"}, + "MaxParts":{"shape":"MaxParts"}, + "IsTruncated":{"shape":"IsTruncated"}, + "Parts":{ + "shape":"Parts", + "locationName":"Part" + }, + "Initiator":{"shape":"Initiator"}, + "Owner":{"shape":"Owner"}, + "StorageClass":{"shape":"StorageClass"}, + "RequestCharged":{ + "shape":"RequestCharged", + "location":"header", + "locationName":"x-amz-request-charged" + } + } + }, + "ListPartsRequest":{ + "type":"structure", + "required":[ + "Bucket", + "Key", + "UploadId" + ], + "members":{ + "Bucket":{ + "shape":"BucketName", + "location":"uri", + "locationName":"Bucket" + }, + "Key":{ + "shape":"ObjectKey", + "location":"uri", + "locationName":"Key" + }, + "MaxParts":{ + "shape":"MaxParts", + "location":"querystring", + "locationName":"max-parts" + }, + "PartNumberMarker":{ + "shape":"PartNumberMarker", + "location":"querystring", + "locationName":"part-number-marker" + }, + "UploadId":{ + "shape":"MultipartUploadId", + "location":"querystring", + "locationName":"uploadId" + }, + "RequestPayer":{ + "shape":"RequestPayer", + "location":"header", + "locationName":"x-amz-request-payer" + } + } + }, + "Location":{"type":"string"}, + "LoggingEnabled":{ + "type":"structure", + "members":{ + "TargetBucket":{"shape":"TargetBucket"}, + "TargetGrants":{"shape":"TargetGrants"}, + "TargetPrefix":{"shape":"TargetPrefix"} + } + }, + "MFA":{"type":"string"}, + "MFADelete":{ + "type":"string", + "enum":[ + "Enabled", + "Disabled" + ] + }, + "MFADeleteStatus":{ + "type":"string", + "enum":[ + "Enabled", + "Disabled" + ] + }, + "Marker":{"type":"string"}, + "MaxAgeSeconds":{"type":"integer"}, + "MaxKeys":{"type":"integer"}, + "MaxParts":{"type":"integer"}, + "MaxUploads":{"type":"integer"}, + "Message":{"type":"string"}, + "Metadata":{ + "type":"map", + "key":{"shape":"MetadataKey"}, + "value":{"shape":"MetadataValue"} + }, + "MetadataDirective":{ + "type":"string", + "enum":[ + "COPY", + "REPLACE" + ] + }, + "MetadataKey":{"type":"string"}, + "MetadataValue":{"type":"string"}, + "MissingMeta":{"type":"integer"}, + "MultipartUpload":{ + "type":"structure", + "members":{ + "UploadId":{"shape":"MultipartUploadId"}, + "Key":{"shape":"ObjectKey"}, + "Initiated":{"shape":"Initiated"}, + "StorageClass":{"shape":"StorageClass"}, + "Owner":{"shape":"Owner"}, + "Initiator":{"shape":"Initiator"} + } + }, + "MultipartUploadId":{"type":"string"}, + "MultipartUploadList":{ + "type":"list", + "member":{"shape":"MultipartUpload"}, + "flattened":true + }, + "NextKeyMarker":{"type":"string"}, + "NextMarker":{"type":"string"}, + "NextPartNumberMarker":{"type":"integer"}, + "NextToken":{"type":"string"}, + "NextUploadIdMarker":{"type":"string"}, + "NextVersionIdMarker":{"type":"string"}, + "NoSuchBucket":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "NoSuchKey":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "NoSuchUpload":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "NoncurrentVersionExpiration":{ + "type":"structure", + "members":{ + "NoncurrentDays":{"shape":"Days"} + } + }, + "NoncurrentVersionTransition":{ + "type":"structure", + "members":{ + "NoncurrentDays":{"shape":"Days"}, + "StorageClass":{"shape":"TransitionStorageClass"} + } + }, + "NoncurrentVersionTransitionList":{ + "type":"list", + "member":{"shape":"NoncurrentVersionTransition"}, + "flattened":true + }, + "NotificationConfiguration":{ + "type":"structure", + "members":{ + "TopicConfigurations":{ + "shape":"TopicConfigurationList", + "locationName":"TopicConfiguration" + }, + "QueueConfigurations":{ + "shape":"QueueConfigurationList", + "locationName":"QueueConfiguration" + }, + "LambdaFunctionConfigurations":{ + "shape":"LambdaFunctionConfigurationList", + "locationName":"CloudFunctionConfiguration" + } + } + }, + "NotificationConfigurationDeprecated":{ + "type":"structure", + "members":{ + "TopicConfiguration":{"shape":"TopicConfigurationDeprecated"}, + "QueueConfiguration":{"shape":"QueueConfigurationDeprecated"}, + "CloudFunctionConfiguration":{"shape":"CloudFunctionConfiguration"} + } + }, + "NotificationConfigurationFilter":{ + "type":"structure", + "members":{ + "Key":{ + "shape":"S3KeyFilter", + "locationName":"S3Key" + } + } + }, + "NotificationId":{"type":"string"}, + "Object":{ + "type":"structure", + "members":{ + "Key":{"shape":"ObjectKey"}, + "LastModified":{"shape":"LastModified"}, + "ETag":{"shape":"ETag"}, + "Size":{"shape":"Size"}, + "StorageClass":{"shape":"ObjectStorageClass"}, + "Owner":{"shape":"Owner"} + } + }, + "ObjectAlreadyInActiveTierError":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "ObjectCannedACL":{ + "type":"string", + "enum":[ + "private", + "public-read", + "public-read-write", + "authenticated-read", + "aws-exec-read", + "bucket-owner-read", + "bucket-owner-full-control" + ] + }, + "ObjectIdentifier":{ + "type":"structure", + "required":["Key"], + "members":{ + "Key":{"shape":"ObjectKey"}, + "VersionId":{"shape":"ObjectVersionId"} + } + }, + "ObjectIdentifierList":{ + "type":"list", + "member":{"shape":"ObjectIdentifier"}, + "flattened":true + }, + "ObjectKey":{ + "type":"string", + "min":1 + }, + "ObjectList":{ + "type":"list", + "member":{"shape":"Object"}, + "flattened":true + }, + "ObjectNotInActiveTierError":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "ObjectStorageClass":{ + "type":"string", + "enum":[ + "STANDARD", + "REDUCED_REDUNDANCY", + "GLACIER" + ] + }, + "ObjectVersion":{ + "type":"structure", + "members":{ + "ETag":{"shape":"ETag"}, + "Size":{"shape":"Size"}, + "StorageClass":{"shape":"ObjectVersionStorageClass"}, + "Key":{"shape":"ObjectKey"}, + "VersionId":{"shape":"ObjectVersionId"}, + "IsLatest":{"shape":"IsLatest"}, + "LastModified":{"shape":"LastModified"}, + "Owner":{"shape":"Owner"} + } + }, + "ObjectVersionId":{"type":"string"}, + "ObjectVersionList":{ + "type":"list", + "member":{"shape":"ObjectVersion"}, + "flattened":true + }, + "ObjectVersionStorageClass":{ + "type":"string", + "enum":["STANDARD"] + }, + "Owner":{ + "type":"structure", + "members":{ + "DisplayName":{"shape":"DisplayName"}, + "ID":{"shape":"ID"} + } + }, + "Part":{ + "type":"structure", + "members":{ + "PartNumber":{"shape":"PartNumber"}, + "LastModified":{"shape":"LastModified"}, + "ETag":{"shape":"ETag"}, + "Size":{"shape":"Size"} + } + }, + "PartNumber":{"type":"integer"}, + "PartNumberMarker":{"type":"integer"}, + "Parts":{ + "type":"list", + "member":{"shape":"Part"}, + "flattened":true + }, + "Payer":{ + "type":"string", + "enum":[ + "Requester", + "BucketOwner" + ] + }, + "Permission":{ + "type":"string", + "enum":[ + "FULL_CONTROL", + "WRITE", + "WRITE_ACP", + "READ", + "READ_ACP" + ] + }, + "Policy":{"type":"string"}, + "Prefix":{"type":"string"}, + "Protocol":{ + "type":"string", + "enum":[ + "http", + "https" + ] + }, + "PutBucketAccelerateConfigurationRequest":{ + "type":"structure", + "required":[ + "Bucket", + "AccelerateConfiguration" + ], + "members":{ + "Bucket":{ + "shape":"BucketName", + "location":"uri", + "locationName":"Bucket" + }, + "AccelerateConfiguration":{ + "shape":"AccelerateConfiguration", + "locationName":"AccelerateConfiguration", + "xmlNamespace":{"uri":"http://s3.amazonaws.com/doc/2006-03-01/"} + } + }, + "payload":"AccelerateConfiguration" + }, + "PutBucketAclRequest":{ + "type":"structure", + "required":["Bucket"], + "members":{ + "ACL":{ + "shape":"BucketCannedACL", + "location":"header", + "locationName":"x-amz-acl" + }, + "AccessControlPolicy":{ + "shape":"AccessControlPolicy", + "locationName":"AccessControlPolicy", + "xmlNamespace":{"uri":"http://s3.amazonaws.com/doc/2006-03-01/"} + }, + "Bucket":{ + "shape":"BucketName", + "location":"uri", + "locationName":"Bucket" + }, + "ContentMD5":{ + "shape":"ContentMD5", + "location":"header", + "locationName":"Content-MD5" + }, + "GrantFullControl":{ + "shape":"GrantFullControl", + "location":"header", + "locationName":"x-amz-grant-full-control" + }, + "GrantRead":{ + "shape":"GrantRead", + "location":"header", + "locationName":"x-amz-grant-read" + }, + "GrantReadACP":{ + "shape":"GrantReadACP", + "location":"header", + "locationName":"x-amz-grant-read-acp" + }, + "GrantWrite":{ + "shape":"GrantWrite", + "location":"header", + "locationName":"x-amz-grant-write" + }, + "GrantWriteACP":{ + "shape":"GrantWriteACP", + "location":"header", + "locationName":"x-amz-grant-write-acp" + } + }, + "payload":"AccessControlPolicy" + }, + "PutBucketCorsRequest":{ + "type":"structure", + "required":[ + "Bucket", + "CORSConfiguration" + ], + "members":{ + "Bucket":{ + "shape":"BucketName", + "location":"uri", + "locationName":"Bucket" + }, + "CORSConfiguration":{ + "shape":"CORSConfiguration", + "locationName":"CORSConfiguration", + "xmlNamespace":{"uri":"http://s3.amazonaws.com/doc/2006-03-01/"} + }, + "ContentMD5":{ + "shape":"ContentMD5", + "location":"header", + "locationName":"Content-MD5" + } + }, + "payload":"CORSConfiguration" + }, + "PutBucketLifecycleConfigurationRequest":{ + "type":"structure", + "required":["Bucket"], + "members":{ + "Bucket":{ + "shape":"BucketName", + "location":"uri", + "locationName":"Bucket" + }, + "LifecycleConfiguration":{ + "shape":"BucketLifecycleConfiguration", + "locationName":"LifecycleConfiguration", + "xmlNamespace":{"uri":"http://s3.amazonaws.com/doc/2006-03-01/"} + } + }, + "payload":"LifecycleConfiguration" + }, + "PutBucketLifecycleRequest":{ + "type":"structure", + "required":["Bucket"], + "members":{ + "Bucket":{ + "shape":"BucketName", + "location":"uri", + "locationName":"Bucket" + }, + "ContentMD5":{ + "shape":"ContentMD5", + "location":"header", + "locationName":"Content-MD5" + }, + "LifecycleConfiguration":{ + "shape":"LifecycleConfiguration", + "locationName":"LifecycleConfiguration", + "xmlNamespace":{"uri":"http://s3.amazonaws.com/doc/2006-03-01/"} + } + }, + "payload":"LifecycleConfiguration" + }, + "PutBucketLoggingRequest":{ + "type":"structure", + "required":[ + "Bucket", + "BucketLoggingStatus" + ], + "members":{ + "Bucket":{ + "shape":"BucketName", + "location":"uri", + "locationName":"Bucket" + }, + "BucketLoggingStatus":{ + "shape":"BucketLoggingStatus", + "locationName":"BucketLoggingStatus", + "xmlNamespace":{"uri":"http://s3.amazonaws.com/doc/2006-03-01/"} + }, + "ContentMD5":{ + "shape":"ContentMD5", + "location":"header", + "locationName":"Content-MD5" + } + }, + "payload":"BucketLoggingStatus" + }, + "PutBucketNotificationConfigurationRequest":{ + "type":"structure", + "required":[ + "Bucket", + "NotificationConfiguration" + ], + "members":{ + "Bucket":{ + "shape":"BucketName", + "location":"uri", + "locationName":"Bucket" + }, + "NotificationConfiguration":{ + "shape":"NotificationConfiguration", + "locationName":"NotificationConfiguration", + "xmlNamespace":{"uri":"http://s3.amazonaws.com/doc/2006-03-01/"} + } + }, + "payload":"NotificationConfiguration" + }, + "PutBucketNotificationRequest":{ + "type":"structure", + "required":[ + "Bucket", + "NotificationConfiguration" + ], + "members":{ + "Bucket":{ + "shape":"BucketName", + "location":"uri", + "locationName":"Bucket" + }, + "ContentMD5":{ + "shape":"ContentMD5", + "location":"header", + "locationName":"Content-MD5" + }, + "NotificationConfiguration":{ + "shape":"NotificationConfigurationDeprecated", + "locationName":"NotificationConfiguration", + "xmlNamespace":{"uri":"http://s3.amazonaws.com/doc/2006-03-01/"} + } + }, + "payload":"NotificationConfiguration" + }, + "PutBucketPolicyRequest":{ + "type":"structure", + "required":[ + "Bucket", + "Policy" + ], + "members":{ + "Bucket":{ + "shape":"BucketName", + "location":"uri", + "locationName":"Bucket" + }, + "ContentMD5":{ + "shape":"ContentMD5", + "location":"header", + "locationName":"Content-MD5" + }, + "Policy":{"shape":"Policy"} + }, + "payload":"Policy" + }, + "PutBucketReplicationRequest":{ + "type":"structure", + "required":[ + "Bucket", + "ReplicationConfiguration" + ], + "members":{ + "Bucket":{ + "shape":"BucketName", + "location":"uri", + "locationName":"Bucket" + }, + "ContentMD5":{ + "shape":"ContentMD5", + "location":"header", + "locationName":"Content-MD5" + }, + "ReplicationConfiguration":{ + "shape":"ReplicationConfiguration", + "locationName":"ReplicationConfiguration", + "xmlNamespace":{"uri":"http://s3.amazonaws.com/doc/2006-03-01/"} + } + }, + "payload":"ReplicationConfiguration" + }, + "PutBucketRequestPaymentRequest":{ + "type":"structure", + "required":[ + "Bucket", + "RequestPaymentConfiguration" + ], + "members":{ + "Bucket":{ + "shape":"BucketName", + "location":"uri", + "locationName":"Bucket" + }, + "ContentMD5":{ + "shape":"ContentMD5", + "location":"header", + "locationName":"Content-MD5" + }, + "RequestPaymentConfiguration":{ + "shape":"RequestPaymentConfiguration", + "locationName":"RequestPaymentConfiguration", + "xmlNamespace":{"uri":"http://s3.amazonaws.com/doc/2006-03-01/"} + } + }, + "payload":"RequestPaymentConfiguration" + }, + "PutBucketTaggingRequest":{ + "type":"structure", + "required":[ + "Bucket", + "Tagging" + ], + "members":{ + "Bucket":{ + "shape":"BucketName", + "location":"uri", + "locationName":"Bucket" + }, + "ContentMD5":{ + "shape":"ContentMD5", + "location":"header", + "locationName":"Content-MD5" + }, + "Tagging":{ + "shape":"Tagging", + "locationName":"Tagging", + "xmlNamespace":{"uri":"http://s3.amazonaws.com/doc/2006-03-01/"} + } + }, + "payload":"Tagging" + }, + "PutBucketVersioningRequest":{ + "type":"structure", + "required":[ + "Bucket", + "VersioningConfiguration" + ], + "members":{ + "Bucket":{ + "shape":"BucketName", + "location":"uri", + "locationName":"Bucket" + }, + "ContentMD5":{ + "shape":"ContentMD5", + "location":"header", + "locationName":"Content-MD5" + }, + "MFA":{ + "shape":"MFA", + "location":"header", + "locationName":"x-amz-mfa" + }, + "VersioningConfiguration":{ + "shape":"VersioningConfiguration", + "locationName":"VersioningConfiguration", + "xmlNamespace":{"uri":"http://s3.amazonaws.com/doc/2006-03-01/"} + } + }, + "payload":"VersioningConfiguration" + }, + "PutBucketWebsiteRequest":{ + "type":"structure", + "required":[ + "Bucket", + "WebsiteConfiguration" + ], + "members":{ + "Bucket":{ + "shape":"BucketName", + "location":"uri", + "locationName":"Bucket" + }, + "ContentMD5":{ + "shape":"ContentMD5", + "location":"header", + "locationName":"Content-MD5" + }, + "WebsiteConfiguration":{ + "shape":"WebsiteConfiguration", + "locationName":"WebsiteConfiguration", + "xmlNamespace":{"uri":"http://s3.amazonaws.com/doc/2006-03-01/"} + } + }, + "payload":"WebsiteConfiguration" + }, + "PutObjectAclOutput":{ + "type":"structure", + "members":{ + "RequestCharged":{ + "shape":"RequestCharged", + "location":"header", + "locationName":"x-amz-request-charged" + } + } + }, + "PutObjectAclRequest":{ + "type":"structure", + "required":[ + "Bucket", + "Key" + ], + "members":{ + "ACL":{ + "shape":"ObjectCannedACL", + "location":"header", + "locationName":"x-amz-acl" + }, + "AccessControlPolicy":{ + "shape":"AccessControlPolicy", + "locationName":"AccessControlPolicy", + "xmlNamespace":{"uri":"http://s3.amazonaws.com/doc/2006-03-01/"} + }, + "Bucket":{ + "shape":"BucketName", + "location":"uri", + "locationName":"Bucket" + }, + "ContentMD5":{ + "shape":"ContentMD5", + "location":"header", + "locationName":"Content-MD5" + }, + "GrantFullControl":{ + "shape":"GrantFullControl", + "location":"header", + "locationName":"x-amz-grant-full-control" + }, + "GrantRead":{ + "shape":"GrantRead", + "location":"header", + "locationName":"x-amz-grant-read" + }, + "GrantReadACP":{ + "shape":"GrantReadACP", + "location":"header", + "locationName":"x-amz-grant-read-acp" + }, + "GrantWrite":{ + "shape":"GrantWrite", + "location":"header", + "locationName":"x-amz-grant-write" + }, + "GrantWriteACP":{ + "shape":"GrantWriteACP", + "location":"header", + "locationName":"x-amz-grant-write-acp" + }, + "Key":{ + "shape":"ObjectKey", + "location":"uri", + "locationName":"Key" + }, + "RequestPayer":{ + "shape":"RequestPayer", + "location":"header", + "locationName":"x-amz-request-payer" + }, + "VersionId":{ + "shape":"ObjectVersionId", + "location":"querystring", + "locationName":"versionId" + } + }, + "payload":"AccessControlPolicy" + }, + "PutObjectOutput":{ + "type":"structure", + "members":{ + "Expiration":{ + "shape":"Expiration", + "location":"header", + "locationName":"x-amz-expiration" + }, + "ETag":{ + "shape":"ETag", + "location":"header", + "locationName":"ETag" + }, + "ServerSideEncryption":{ + "shape":"ServerSideEncryption", + "location":"header", + "locationName":"x-amz-server-side-encryption" + }, + "VersionId":{ + "shape":"ObjectVersionId", + "location":"header", + "locationName":"x-amz-version-id" + }, + "SSECustomerAlgorithm":{ + "shape":"SSECustomerAlgorithm", + "location":"header", + "locationName":"x-amz-server-side-encryption-customer-algorithm" + }, + "SSECustomerKeyMD5":{ + "shape":"SSECustomerKeyMD5", + "location":"header", + "locationName":"x-amz-server-side-encryption-customer-key-MD5" + }, + "SSEKMSKeyId":{ + "shape":"SSEKMSKeyId", + "location":"header", + "locationName":"x-amz-server-side-encryption-aws-kms-key-id" + }, + "RequestCharged":{ + "shape":"RequestCharged", + "location":"header", + "locationName":"x-amz-request-charged" + } + } + }, + "PutObjectRequest":{ + "type":"structure", + "required":[ + "Bucket", + "Key" + ], + "members":{ + "ACL":{ + "shape":"ObjectCannedACL", + "location":"header", + "locationName":"x-amz-acl" + }, + "Body":{ + "shape":"Body", + "streaming":true + }, + "Bucket":{ + "shape":"BucketName", + "location":"uri", + "locationName":"Bucket" + }, + "CacheControl":{ + "shape":"CacheControl", + "location":"header", + "locationName":"Cache-Control" + }, + "ContentDisposition":{ + "shape":"ContentDisposition", + "location":"header", + "locationName":"Content-Disposition" + }, + "ContentEncoding":{ + "shape":"ContentEncoding", + "location":"header", + "locationName":"Content-Encoding" + }, + "ContentLanguage":{ + "shape":"ContentLanguage", + "location":"header", + "locationName":"Content-Language" + }, + "ContentLength":{ + "shape":"ContentLength", + "location":"header", + "locationName":"Content-Length" + }, + "ContentMD5":{ + "shape":"ContentMD5", + "location":"header", + "locationName":"Content-MD5" + }, + "ContentType":{ + "shape":"ContentType", + "location":"header", + "locationName":"Content-Type" + }, + "Expires":{ + "shape":"Expires", + "location":"header", + "locationName":"Expires" + }, + "GrantFullControl":{ + "shape":"GrantFullControl", + "location":"header", + "locationName":"x-amz-grant-full-control" + }, + "GrantRead":{ + "shape":"GrantRead", + "location":"header", + "locationName":"x-amz-grant-read" + }, + "GrantReadACP":{ + "shape":"GrantReadACP", + "location":"header", + "locationName":"x-amz-grant-read-acp" + }, + "GrantWriteACP":{ + "shape":"GrantWriteACP", + "location":"header", + "locationName":"x-amz-grant-write-acp" + }, + "Key":{ + "shape":"ObjectKey", + "location":"uri", + "locationName":"Key" + }, + "Metadata":{ + "shape":"Metadata", + "location":"headers", + "locationName":"x-amz-meta-" + }, + "ServerSideEncryption":{ + "shape":"ServerSideEncryption", + "location":"header", + "locationName":"x-amz-server-side-encryption" + }, + "StorageClass":{ + "shape":"StorageClass", + "location":"header", + "locationName":"x-amz-storage-class" + }, + "WebsiteRedirectLocation":{ + "shape":"WebsiteRedirectLocation", + "location":"header", + "locationName":"x-amz-website-redirect-location" + }, + "SSECustomerAlgorithm":{ + "shape":"SSECustomerAlgorithm", + "location":"header", + "locationName":"x-amz-server-side-encryption-customer-algorithm" + }, + "SSECustomerKey":{ + "shape":"SSECustomerKey", + "location":"header", + "locationName":"x-amz-server-side-encryption-customer-key" + }, + "SSECustomerKeyMD5":{ + "shape":"SSECustomerKeyMD5", + "location":"header", + "locationName":"x-amz-server-side-encryption-customer-key-MD5" + }, + "SSEKMSKeyId":{ + "shape":"SSEKMSKeyId", + "location":"header", + "locationName":"x-amz-server-side-encryption-aws-kms-key-id" + }, + "RequestPayer":{ + "shape":"RequestPayer", + "location":"header", + "locationName":"x-amz-request-payer" + } + }, + "payload":"Body" + }, + "QueueArn":{"type":"string"}, + "QueueConfiguration":{ + "type":"structure", + "required":[ + "QueueArn", + "Events" + ], + "members":{ + "Id":{"shape":"NotificationId"}, + "QueueArn":{ + "shape":"QueueArn", + "locationName":"Queue" + }, + "Events":{ + "shape":"EventList", + "locationName":"Event" + }, + "Filter":{"shape":"NotificationConfigurationFilter"} + } + }, + "QueueConfigurationDeprecated":{ + "type":"structure", + "members":{ + "Id":{"shape":"NotificationId"}, + "Event":{ + "shape":"Event", + "deprecated":true + }, + "Events":{ + "shape":"EventList", + "locationName":"Event" + }, + "Queue":{"shape":"QueueArn"} + } + }, + "QueueConfigurationList":{ + "type":"list", + "member":{"shape":"QueueConfiguration"}, + "flattened":true + }, + "Quiet":{"type":"boolean"}, + "Range":{"type":"string"}, + "Redirect":{ + "type":"structure", + "members":{ + "HostName":{"shape":"HostName"}, + "HttpRedirectCode":{"shape":"HttpRedirectCode"}, + "Protocol":{"shape":"Protocol"}, + "ReplaceKeyPrefixWith":{"shape":"ReplaceKeyPrefixWith"}, + "ReplaceKeyWith":{"shape":"ReplaceKeyWith"} + } + }, + "RedirectAllRequestsTo":{ + "type":"structure", + "required":["HostName"], + "members":{ + "HostName":{"shape":"HostName"}, + "Protocol":{"shape":"Protocol"} + } + }, + "ReplaceKeyPrefixWith":{"type":"string"}, + "ReplaceKeyWith":{"type":"string"}, + "ReplicationConfiguration":{ + "type":"structure", + "required":[ + "Role", + "Rules" + ], + "members":{ + "Role":{"shape":"Role"}, + "Rules":{ + "shape":"ReplicationRules", + "locationName":"Rule" + } + } + }, + "ReplicationRule":{ + "type":"structure", + "required":[ + "Prefix", + "Status", + "Destination" + ], + "members":{ + "ID":{"shape":"ID"}, + "Prefix":{"shape":"Prefix"}, + "Status":{"shape":"ReplicationRuleStatus"}, + "Destination":{"shape":"Destination"} + } + }, + "ReplicationRuleStatus":{ + "type":"string", + "enum":[ + "Enabled", + "Disabled" + ] + }, + "ReplicationRules":{ + "type":"list", + "member":{"shape":"ReplicationRule"}, + "flattened":true + }, + "ReplicationStatus":{ + "type":"string", + "enum":[ + "COMPLETE", + "PENDING", + "FAILED", + "REPLICA" + ] + }, + "RequestCharged":{ + "type":"string", + "enum":["requester"] + }, + "RequestPayer":{ + "type":"string", + "enum":["requester"] + }, + "RequestPaymentConfiguration":{ + "type":"structure", + "required":["Payer"], + "members":{ + "Payer":{"shape":"Payer"} + } + }, + "ResponseCacheControl":{"type":"string"}, + "ResponseContentDisposition":{"type":"string"}, + "ResponseContentEncoding":{"type":"string"}, + "ResponseContentLanguage":{"type":"string"}, + "ResponseContentType":{"type":"string"}, + "ResponseExpires":{"type":"timestamp"}, + "Restore":{"type":"string"}, + "RestoreObjectOutput":{ + "type":"structure", + "members":{ + "RequestCharged":{ + "shape":"RequestCharged", + "location":"header", + "locationName":"x-amz-request-charged" + } + } + }, + "RestoreObjectRequest":{ + "type":"structure", + "required":[ + "Bucket", + "Key" + ], + "members":{ + "Bucket":{ + "shape":"BucketName", + "location":"uri", + "locationName":"Bucket" + }, + "Key":{ + "shape":"ObjectKey", + "location":"uri", + "locationName":"Key" + }, + "VersionId":{ + "shape":"ObjectVersionId", + "location":"querystring", + "locationName":"versionId" + }, + "RestoreRequest":{ + "shape":"RestoreRequest", + "locationName":"RestoreRequest", + "xmlNamespace":{"uri":"http://s3.amazonaws.com/doc/2006-03-01/"} + }, + "RequestPayer":{ + "shape":"RequestPayer", + "location":"header", + "locationName":"x-amz-request-payer" + } + }, + "payload":"RestoreRequest" + }, + "RestoreRequest":{ + "type":"structure", + "required":["Days"], + "members":{ + "Days":{"shape":"Days"} + } + }, + "Role":{"type":"string"}, + "RoutingRule":{ + "type":"structure", + "required":["Redirect"], + "members":{ + "Condition":{"shape":"Condition"}, + "Redirect":{"shape":"Redirect"} + } + }, + "RoutingRules":{ + "type":"list", + "member":{ + "shape":"RoutingRule", + "locationName":"RoutingRule" + } + }, + "Rule":{ + "type":"structure", + "required":[ + "Prefix", + "Status" + ], + "members":{ + "Expiration":{"shape":"LifecycleExpiration"}, + "ID":{"shape":"ID"}, + "Prefix":{"shape":"Prefix"}, + "Status":{"shape":"ExpirationStatus"}, + "Transition":{"shape":"Transition"}, + "NoncurrentVersionTransition":{"shape":"NoncurrentVersionTransition"}, + "NoncurrentVersionExpiration":{"shape":"NoncurrentVersionExpiration"}, + "AbortIncompleteMultipartUpload":{"shape":"AbortIncompleteMultipartUpload"} + } + }, + "Rules":{ + "type":"list", + "member":{"shape":"Rule"}, + "flattened":true + }, + "S3KeyFilter":{ + "type":"structure", + "members":{ + "FilterRules":{ + "shape":"FilterRuleList", + "locationName":"FilterRule" + } + } + }, + "SSECustomerAlgorithm":{"type":"string"}, + "SSECustomerKey":{ + "type":"string", + "sensitive":true + }, + "SSECustomerKeyMD5":{"type":"string"}, + "SSEKMSKeyId":{ + "type":"string", + "sensitive":true + }, + "ServerSideEncryption":{ + "type":"string", + "enum":[ + "AES256", + "aws:kms" + ] + }, + "Size":{"type":"integer"}, + "StartAfter":{"type":"string"}, + "StorageClass":{ + "type":"string", + "enum":[ + "STANDARD", + "REDUCED_REDUNDANCY", + "STANDARD_IA" + ] + }, + "Suffix":{"type":"string"}, + "Tag":{ + "type":"structure", + "required":[ + "Key", + "Value" + ], + "members":{ + "Key":{"shape":"ObjectKey"}, + "Value":{"shape":"Value"} + } + }, + "TagSet":{ + "type":"list", + "member":{ + "shape":"Tag", + "locationName":"Tag" + } + }, + "Tagging":{ + "type":"structure", + "required":["TagSet"], + "members":{ + "TagSet":{"shape":"TagSet"} + } + }, + "TargetBucket":{"type":"string"}, + "TargetGrant":{ + "type":"structure", + "members":{ + "Grantee":{"shape":"Grantee"}, + "Permission":{"shape":"BucketLogsPermission"} + } + }, + "TargetGrants":{ + "type":"list", + "member":{ + "shape":"TargetGrant", + "locationName":"Grant" + } + }, + "TargetPrefix":{"type":"string"}, + "Token":{"type":"string"}, + "TopicArn":{"type":"string"}, + "TopicConfiguration":{ + "type":"structure", + "required":[ + "TopicArn", + "Events" + ], + "members":{ + "Id":{"shape":"NotificationId"}, + "TopicArn":{ + "shape":"TopicArn", + "locationName":"Topic" + }, + "Events":{ + "shape":"EventList", + "locationName":"Event" + }, + "Filter":{"shape":"NotificationConfigurationFilter"} + } + }, + "TopicConfigurationDeprecated":{ + "type":"structure", + "members":{ + "Id":{"shape":"NotificationId"}, + "Events":{ + "shape":"EventList", + "locationName":"Event" + }, + "Event":{ + "shape":"Event", + "deprecated":true + }, + "Topic":{"shape":"TopicArn"} + } + }, + "TopicConfigurationList":{ + "type":"list", + "member":{"shape":"TopicConfiguration"}, + "flattened":true + }, + "Transition":{ + "type":"structure", + "members":{ + "Date":{"shape":"Date"}, + "Days":{"shape":"Days"}, + "StorageClass":{"shape":"TransitionStorageClass"} + } + }, + "TransitionList":{ + "type":"list", + "member":{"shape":"Transition"}, + "flattened":true + }, + "TransitionStorageClass":{ + "type":"string", + "enum":[ + "GLACIER", + "STANDARD_IA" + ] + }, + "Type":{ + "type":"string", + "enum":[ + "CanonicalUser", + "AmazonCustomerByEmail", + "Group" + ] + }, + "URI":{"type":"string"}, + "UploadIdMarker":{"type":"string"}, + "UploadPartCopyOutput":{ + "type":"structure", + "members":{ + "CopySourceVersionId":{ + "shape":"CopySourceVersionId", + "location":"header", + "locationName":"x-amz-copy-source-version-id" + }, + "CopyPartResult":{"shape":"CopyPartResult"}, + "ServerSideEncryption":{ + "shape":"ServerSideEncryption", + "location":"header", + "locationName":"x-amz-server-side-encryption" + }, + "SSECustomerAlgorithm":{ + "shape":"SSECustomerAlgorithm", + "location":"header", + "locationName":"x-amz-server-side-encryption-customer-algorithm" + }, + "SSECustomerKeyMD5":{ + "shape":"SSECustomerKeyMD5", + "location":"header", + "locationName":"x-amz-server-side-encryption-customer-key-MD5" + }, + "SSEKMSKeyId":{ + "shape":"SSEKMSKeyId", + "location":"header", + "locationName":"x-amz-server-side-encryption-aws-kms-key-id" + }, + "RequestCharged":{ + "shape":"RequestCharged", + "location":"header", + "locationName":"x-amz-request-charged" + } + }, + "payload":"CopyPartResult" + }, + "UploadPartCopyRequest":{ + "type":"structure", + "required":[ + "Bucket", + "CopySource", + "Key", + "PartNumber", + "UploadId" + ], + "members":{ + "Bucket":{ + "shape":"BucketName", + "location":"uri", + "locationName":"Bucket" + }, + "CopySource":{ + "shape":"CopySource", + "location":"header", + "locationName":"x-amz-copy-source" + }, + "CopySourceIfMatch":{ + "shape":"CopySourceIfMatch", + "location":"header", + "locationName":"x-amz-copy-source-if-match" + }, + "CopySourceIfModifiedSince":{ + "shape":"CopySourceIfModifiedSince", + "location":"header", + "locationName":"x-amz-copy-source-if-modified-since" + }, + "CopySourceIfNoneMatch":{ + "shape":"CopySourceIfNoneMatch", + "location":"header", + "locationName":"x-amz-copy-source-if-none-match" + }, + "CopySourceIfUnmodifiedSince":{ + "shape":"CopySourceIfUnmodifiedSince", + "location":"header", + "locationName":"x-amz-copy-source-if-unmodified-since" + }, + "CopySourceRange":{ + "shape":"CopySourceRange", + "location":"header", + "locationName":"x-amz-copy-source-range" + }, + "Key":{ + "shape":"ObjectKey", + "location":"uri", + "locationName":"Key" + }, + "PartNumber":{ + "shape":"PartNumber", + "location":"querystring", + "locationName":"partNumber" + }, + "UploadId":{ + "shape":"MultipartUploadId", + "location":"querystring", + "locationName":"uploadId" + }, + "SSECustomerAlgorithm":{ + "shape":"SSECustomerAlgorithm", + "location":"header", + "locationName":"x-amz-server-side-encryption-customer-algorithm" + }, + "SSECustomerKey":{ + "shape":"SSECustomerKey", + "location":"header", + "locationName":"x-amz-server-side-encryption-customer-key" + }, + "SSECustomerKeyMD5":{ + "shape":"SSECustomerKeyMD5", + "location":"header", + "locationName":"x-amz-server-side-encryption-customer-key-MD5" + }, + "CopySourceSSECustomerAlgorithm":{ + "shape":"CopySourceSSECustomerAlgorithm", + "location":"header", + "locationName":"x-amz-copy-source-server-side-encryption-customer-algorithm" + }, + "CopySourceSSECustomerKey":{ + "shape":"CopySourceSSECustomerKey", + "location":"header", + "locationName":"x-amz-copy-source-server-side-encryption-customer-key" + }, + "CopySourceSSECustomerKeyMD5":{ + "shape":"CopySourceSSECustomerKeyMD5", + "location":"header", + "locationName":"x-amz-copy-source-server-side-encryption-customer-key-MD5" + }, + "RequestPayer":{ + "shape":"RequestPayer", + "location":"header", + "locationName":"x-amz-request-payer" + } + } + }, + "UploadPartOutput":{ + "type":"structure", + "members":{ + "ServerSideEncryption":{ + "shape":"ServerSideEncryption", + "location":"header", + "locationName":"x-amz-server-side-encryption" + }, + "ETag":{ + "shape":"ETag", + "location":"header", + "locationName":"ETag" + }, + "SSECustomerAlgorithm":{ + "shape":"SSECustomerAlgorithm", + "location":"header", + "locationName":"x-amz-server-side-encryption-customer-algorithm" + }, + "SSECustomerKeyMD5":{ + "shape":"SSECustomerKeyMD5", + "location":"header", + "locationName":"x-amz-server-side-encryption-customer-key-MD5" + }, + "SSEKMSKeyId":{ + "shape":"SSEKMSKeyId", + "location":"header", + "locationName":"x-amz-server-side-encryption-aws-kms-key-id" + }, + "RequestCharged":{ + "shape":"RequestCharged", + "location":"header", + "locationName":"x-amz-request-charged" + } + } + }, + "UploadPartRequest":{ + "type":"structure", + "required":[ + "Bucket", + "Key", + "PartNumber", + "UploadId" + ], + "members":{ + "Body":{ + "shape":"Body", + "streaming":true + }, + "Bucket":{ + "shape":"BucketName", + "location":"uri", + "locationName":"Bucket" + }, + "ContentLength":{ + "shape":"ContentLength", + "location":"header", + "locationName":"Content-Length" + }, + "ContentMD5":{ + "shape":"ContentMD5", + "location":"header", + "locationName":"Content-MD5" + }, + "Key":{ + "shape":"ObjectKey", + "location":"uri", + "locationName":"Key" + }, + "PartNumber":{ + "shape":"PartNumber", + "location":"querystring", + "locationName":"partNumber" + }, + "UploadId":{ + "shape":"MultipartUploadId", + "location":"querystring", + "locationName":"uploadId" + }, + "SSECustomerAlgorithm":{ + "shape":"SSECustomerAlgorithm", + "location":"header", + "locationName":"x-amz-server-side-encryption-customer-algorithm" + }, + "SSECustomerKey":{ + "shape":"SSECustomerKey", + "location":"header", + "locationName":"x-amz-server-side-encryption-customer-key" + }, + "SSECustomerKeyMD5":{ + "shape":"SSECustomerKeyMD5", + "location":"header", + "locationName":"x-amz-server-side-encryption-customer-key-MD5" + }, + "RequestPayer":{ + "shape":"RequestPayer", + "location":"header", + "locationName":"x-amz-request-payer" + } + }, + "payload":"Body" + }, + "Value":{"type":"string"}, + "VersionIdMarker":{"type":"string"}, + "VersioningConfiguration":{ + "type":"structure", + "members":{ + "MFADelete":{ + "shape":"MFADelete", + "locationName":"MfaDelete" + }, + "Status":{"shape":"BucketVersioningStatus"} + } + }, + "WebsiteConfiguration":{ + "type":"structure", + "members":{ + "ErrorDocument":{"shape":"ErrorDocument"}, + "IndexDocument":{"shape":"IndexDocument"}, + "RedirectAllRequestsTo":{"shape":"RedirectAllRequestsTo"}, + "RoutingRules":{"shape":"RoutingRules"} + } + }, + "WebsiteRedirectLocation":{"type":"string"} + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/s3/2006-03-01/docs-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/s3/2006-03-01/docs-2.json new file mode 100644 index 000000000..5aa6c5cdb --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/s3/2006-03-01/docs-2.json @@ -0,0 +1,2445 @@ +{ + "version": "2.0", + "service": null, + "operations": { + "AbortMultipartUpload": "

    Aborts a multipart upload.

    To verify that all parts have been removed, so you don't get charged for the part storage, you should call the List Parts operation and ensure the parts list is empty.

    ", + "CompleteMultipartUpload": "Completes a multipart upload by assembling previously uploaded parts.", + "CopyObject": "Creates a copy of an object that is already stored in Amazon S3.", + "CreateBucket": "Creates a new bucket.", + "CreateMultipartUpload": "

    Initiates a multipart upload and returns an upload ID.

    Note: After you initiate multipart upload and upload one or more parts, you must either complete or abort multipart upload in order to stop getting charged for storage of the uploaded parts. Only after you either complete or abort multipart upload, Amazon S3 frees up the parts storage and stops charging you for the parts storage.

    ", + "DeleteBucket": "Deletes the bucket. All objects (including all object versions and Delete Markers) in the bucket must be deleted before the bucket itself can be deleted.", + "DeleteBucketCors": "Deletes the cors configuration information set for the bucket.", + "DeleteBucketLifecycle": "Deletes the lifecycle configuration from the bucket.", + "DeleteBucketPolicy": "Deletes the policy from the bucket.", + "DeleteBucketReplication": "Deletes the replication configuration from the bucket.", + "DeleteBucketTagging": "Deletes the tags from the bucket.", + "DeleteBucketWebsite": "This operation removes the website configuration from the bucket.", + "DeleteObject": "Removes the null version (if there is one) of an object and inserts a delete marker, which becomes the latest version of the object. If there isn't a null version, Amazon S3 does not remove any objects.", + "DeleteObjects": "This operation enables you to delete multiple objects from a bucket using a single HTTP request. You may specify up to 1000 keys.", + "GetBucketAccelerateConfiguration": "Returns the accelerate configuration of a bucket.", + "GetBucketAcl": "Gets the access control policy for the bucket.", + "GetBucketCors": "Returns the cors configuration for the bucket.", + "GetBucketLifecycle": "Deprecated, see the GetBucketLifecycleConfiguration operation.", + "GetBucketLifecycleConfiguration": "Returns the lifecycle configuration information set on the bucket.", + "GetBucketLocation": "Returns the region the bucket resides in.", + "GetBucketLogging": "Returns the logging status of a bucket and the permissions users have to view and modify that status. To use GET, you must be the bucket owner.", + "GetBucketNotification": "Deprecated, see the GetBucketNotificationConfiguration operation.", + "GetBucketNotificationConfiguration": "Returns the notification configuration of a bucket.", + "GetBucketPolicy": "Returns the policy of a specified bucket.", + "GetBucketReplication": "Deprecated, see the GetBucketReplicationConfiguration operation.", + "GetBucketRequestPayment": "Returns the request payment configuration of a bucket.", + "GetBucketTagging": "Returns the tag set associated with the bucket.", + "GetBucketVersioning": "Returns the versioning state of a bucket.", + "GetBucketWebsite": "Returns the website configuration for a bucket.", + "GetObject": "Retrieves objects from Amazon S3.", + "GetObjectAcl": "Returns the access control list (ACL) of an object.", + "GetObjectTorrent": "Return torrent files from a bucket.", + "HeadBucket": "This operation is useful to determine if a bucket exists and you have permission to access it.", + "HeadObject": "The HEAD operation retrieves metadata from an object without returning the object itself. This operation is useful if you're only interested in an object's metadata. To use HEAD, you must have READ access to the object.", + "ListBuckets": "Returns a list of all buckets owned by the authenticated sender of the request.", + "ListMultipartUploads": "This operation lists in-progress multipart uploads.", + "ListObjectVersions": "Returns metadata about all of the versions of objects in a bucket.", + "ListObjects": "Returns some or all (up to 1000) of the objects in a bucket. You can use the request parameters as selection criteria to return a subset of the objects in a bucket.", + "ListObjectsV2": "Returns some or all (up to 1000) of the objects in a bucket. You can use the request parameters as selection criteria to return a subset of the objects in a bucket. Note: ListObjectsV2 is the revised List Objects API and we recommend you use this revised API for new application development.", + "ListParts": "Lists the parts that have been uploaded for a specific multipart upload.", + "PutBucketAccelerateConfiguration": "Sets the accelerate configuration of an existing bucket.", + "PutBucketAcl": "Sets the permissions on a bucket using access control lists (ACL).", + "PutBucketCors": "Sets the cors configuration for a bucket.", + "PutBucketLifecycle": "Deprecated, see the PutBucketLifecycleConfiguration operation.", + "PutBucketLifecycleConfiguration": "Sets lifecycle configuration for your bucket. If a lifecycle configuration exists, it replaces it.", + "PutBucketLogging": "Set the logging parameters for a bucket and to specify permissions for who can view and modify the logging parameters. To set the logging status of a bucket, you must be the bucket owner.", + "PutBucketNotification": "Deprecated, see the PutBucketNotificationConfiguraiton operation.", + "PutBucketNotificationConfiguration": "Enables notifications of specified events for a bucket.", + "PutBucketPolicy": "Replaces a policy on a bucket. If the bucket already has a policy, the one in this request completely replaces it.", + "PutBucketReplication": "Creates a new replication configuration (or replaces an existing one, if present).", + "PutBucketRequestPayment": "Sets the request payment configuration for a bucket. By default, the bucket owner pays for downloads from the bucket. This configuration parameter enables the bucket owner (only) to specify that the person requesting the download will be charged for the download. Documentation on requester pays buckets can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/RequesterPaysBuckets.html", + "PutBucketTagging": "Sets the tags for a bucket.", + "PutBucketVersioning": "Sets the versioning state of an existing bucket. To set the versioning state, you must be the bucket owner.", + "PutBucketWebsite": "Set the website configuration for a bucket.", + "PutObject": "Adds an object to a bucket.", + "PutObjectAcl": "uses the acl subresource to set the access control list (ACL) permissions for an object that already exists in a bucket", + "RestoreObject": "Restores an archived copy of an object back into Amazon S3", + "UploadPart": "

    Uploads a part in a multipart upload.

    Note: After you initiate multipart upload and upload one or more parts, you must either complete or abort multipart upload in order to stop getting charged for storage of the uploaded parts. Only after you either complete or abort multipart upload, Amazon S3 frees up the parts storage and stops charging you for the parts storage.

    ", + "UploadPartCopy": "Uploads a part by copying data from an existing object as data source." + }, + "shapes": { + "AbortDate": { + "base": null, + "refs": { + "CreateMultipartUploadOutput$AbortDate": "Date when multipart upload will become eligible for abort operation by lifecycle.", + "ListPartsOutput$AbortDate": "Date when multipart upload will become eligible for abort operation by lifecycle." + } + }, + "AbortIncompleteMultipartUpload": { + "base": "Specifies the days since the initiation of an Incomplete Multipart Upload that Lifecycle will wait before permanently removing all parts of the upload.", + "refs": { + "LifecycleRule$AbortIncompleteMultipartUpload": null, + "Rule$AbortIncompleteMultipartUpload": null + } + }, + "AbortMultipartUploadOutput": { + "base": null, + "refs": { + } + }, + "AbortMultipartUploadRequest": { + "base": null, + "refs": { + } + }, + "AbortRuleId": { + "base": null, + "refs": { + "CreateMultipartUploadOutput$AbortRuleId": "Id of the lifecycle rule that makes a multipart upload eligible for abort operation.", + "ListPartsOutput$AbortRuleId": "Id of the lifecycle rule that makes a multipart upload eligible for abort operation." + } + }, + "AccelerateConfiguration": { + "base": null, + "refs": { + "PutBucketAccelerateConfigurationRequest$AccelerateConfiguration": "Specifies the Accelerate Configuration you want to set for the bucket." + } + }, + "AcceptRanges": { + "base": null, + "refs": { + "GetObjectOutput$AcceptRanges": null, + "HeadObjectOutput$AcceptRanges": null + } + }, + "AccessControlPolicy": { + "base": null, + "refs": { + "PutBucketAclRequest$AccessControlPolicy": null, + "PutObjectAclRequest$AccessControlPolicy": null + } + }, + "AllowedHeader": { + "base": null, + "refs": { + "AllowedHeaders$member": null + } + }, + "AllowedHeaders": { + "base": null, + "refs": { + "CORSRule$AllowedHeaders": "Specifies which headers are allowed in a pre-flight OPTIONS request." + } + }, + "AllowedMethod": { + "base": null, + "refs": { + "AllowedMethods$member": null + } + }, + "AllowedMethods": { + "base": null, + "refs": { + "CORSRule$AllowedMethods": "Identifies HTTP methods that the domain/origin specified in the rule is allowed to execute." + } + }, + "AllowedOrigin": { + "base": null, + "refs": { + "AllowedOrigins$member": null + } + }, + "AllowedOrigins": { + "base": null, + "refs": { + "CORSRule$AllowedOrigins": "One or more origins you want customers to be able to access the bucket from." + } + }, + "Body": { + "base": null, + "refs": { + "GetObjectOutput$Body": "Object data.", + "GetObjectTorrentOutput$Body": null, + "PutObjectRequest$Body": "Object data.", + "UploadPartRequest$Body": "Object data." + } + }, + "Bucket": { + "base": null, + "refs": { + "Buckets$member": null + } + }, + "BucketAccelerateStatus": { + "base": null, + "refs": { + "AccelerateConfiguration$Status": "The accelerate configuration of the bucket.", + "GetBucketAccelerateConfigurationOutput$Status": "The accelerate configuration of the bucket." + } + }, + "BucketAlreadyExists": { + "base": "The requested bucket name is not available. The bucket namespace is shared by all users of the system. Please select a different name and try again.", + "refs": { + } + }, + "BucketAlreadyOwnedByYou": { + "base": null, + "refs": { + } + }, + "BucketCannedACL": { + "base": null, + "refs": { + "CreateBucketRequest$ACL": "The canned ACL to apply to the bucket.", + "PutBucketAclRequest$ACL": "The canned ACL to apply to the bucket." + } + }, + "BucketLifecycleConfiguration": { + "base": null, + "refs": { + "PutBucketLifecycleConfigurationRequest$LifecycleConfiguration": null + } + }, + "BucketLocationConstraint": { + "base": null, + "refs": { + "CreateBucketConfiguration$LocationConstraint": "Specifies the region where the bucket will be created. If you don't specify a region, the bucket will be created in US Standard.", + "GetBucketLocationOutput$LocationConstraint": null + } + }, + "BucketLoggingStatus": { + "base": null, + "refs": { + "PutBucketLoggingRequest$BucketLoggingStatus": null + } + }, + "BucketLogsPermission": { + "base": null, + "refs": { + "TargetGrant$Permission": "Logging permissions assigned to the Grantee for the bucket." + } + }, + "BucketName": { + "base": null, + "refs": { + "AbortMultipartUploadRequest$Bucket": null, + "Bucket$Name": "The name of the bucket.", + "CompleteMultipartUploadOutput$Bucket": null, + "CompleteMultipartUploadRequest$Bucket": null, + "CopyObjectRequest$Bucket": null, + "CreateBucketRequest$Bucket": null, + "CreateMultipartUploadOutput$Bucket": "Name of the bucket to which the multipart upload was initiated.", + "CreateMultipartUploadRequest$Bucket": null, + "DeleteBucketCorsRequest$Bucket": null, + "DeleteBucketLifecycleRequest$Bucket": null, + "DeleteBucketPolicyRequest$Bucket": null, + "DeleteBucketReplicationRequest$Bucket": null, + "DeleteBucketRequest$Bucket": null, + "DeleteBucketTaggingRequest$Bucket": null, + "DeleteBucketWebsiteRequest$Bucket": null, + "DeleteObjectRequest$Bucket": null, + "DeleteObjectsRequest$Bucket": null, + "Destination$Bucket": "Amazon resource name (ARN) of the bucket where you want Amazon S3 to store replicas of the object identified by the rule.", + "GetBucketAccelerateConfigurationRequest$Bucket": "Name of the bucket for which the accelerate configuration is retrieved.", + "GetBucketAclRequest$Bucket": null, + "GetBucketCorsRequest$Bucket": null, + "GetBucketLifecycleConfigurationRequest$Bucket": null, + "GetBucketLifecycleRequest$Bucket": null, + "GetBucketLocationRequest$Bucket": null, + "GetBucketLoggingRequest$Bucket": null, + "GetBucketNotificationConfigurationRequest$Bucket": "Name of the bucket to get the notification configuration for.", + "GetBucketPolicyRequest$Bucket": null, + "GetBucketReplicationRequest$Bucket": null, + "GetBucketRequestPaymentRequest$Bucket": null, + "GetBucketTaggingRequest$Bucket": null, + "GetBucketVersioningRequest$Bucket": null, + "GetBucketWebsiteRequest$Bucket": null, + "GetObjectAclRequest$Bucket": null, + "GetObjectRequest$Bucket": null, + "GetObjectTorrentRequest$Bucket": null, + "HeadBucketRequest$Bucket": null, + "HeadObjectRequest$Bucket": null, + "ListMultipartUploadsOutput$Bucket": "Name of the bucket to which the multipart upload was initiated.", + "ListMultipartUploadsRequest$Bucket": null, + "ListObjectVersionsOutput$Name": null, + "ListObjectVersionsRequest$Bucket": null, + "ListObjectsOutput$Name": null, + "ListObjectsRequest$Bucket": null, + "ListObjectsV2Output$Name": "Name of the bucket to list.", + "ListObjectsV2Request$Bucket": "Name of the bucket to list.", + "ListPartsOutput$Bucket": "Name of the bucket to which the multipart upload was initiated.", + "ListPartsRequest$Bucket": null, + "PutBucketAccelerateConfigurationRequest$Bucket": "Name of the bucket for which the accelerate configuration is set.", + "PutBucketAclRequest$Bucket": null, + "PutBucketCorsRequest$Bucket": null, + "PutBucketLifecycleConfigurationRequest$Bucket": null, + "PutBucketLifecycleRequest$Bucket": null, + "PutBucketLoggingRequest$Bucket": null, + "PutBucketNotificationConfigurationRequest$Bucket": null, + "PutBucketNotificationRequest$Bucket": null, + "PutBucketPolicyRequest$Bucket": null, + "PutBucketReplicationRequest$Bucket": null, + "PutBucketRequestPaymentRequest$Bucket": null, + "PutBucketTaggingRequest$Bucket": null, + "PutBucketVersioningRequest$Bucket": null, + "PutBucketWebsiteRequest$Bucket": null, + "PutObjectAclRequest$Bucket": null, + "PutObjectRequest$Bucket": "Name of the bucket to which the PUT operation was initiated.", + "RestoreObjectRequest$Bucket": null, + "UploadPartCopyRequest$Bucket": null, + "UploadPartRequest$Bucket": "Name of the bucket to which the multipart upload was initiated." + } + }, + "BucketVersioningStatus": { + "base": null, + "refs": { + "GetBucketVersioningOutput$Status": "The versioning state of the bucket.", + "VersioningConfiguration$Status": "The versioning state of the bucket." + } + }, + "Buckets": { + "base": null, + "refs": { + "ListBucketsOutput$Buckets": null + } + }, + "CORSConfiguration": { + "base": null, + "refs": { + "PutBucketCorsRequest$CORSConfiguration": null + } + }, + "CORSRule": { + "base": null, + "refs": { + "CORSRules$member": null + } + }, + "CORSRules": { + "base": null, + "refs": { + "CORSConfiguration$CORSRules": null, + "GetBucketCorsOutput$CORSRules": null + } + }, + "CacheControl": { + "base": null, + "refs": { + "CopyObjectRequest$CacheControl": "Specifies caching behavior along the request/reply chain.", + "CreateMultipartUploadRequest$CacheControl": "Specifies caching behavior along the request/reply chain.", + "GetObjectOutput$CacheControl": "Specifies caching behavior along the request/reply chain.", + "HeadObjectOutput$CacheControl": "Specifies caching behavior along the request/reply chain.", + "PutObjectRequest$CacheControl": "Specifies caching behavior along the request/reply chain." + } + }, + "CloudFunction": { + "base": null, + "refs": { + "CloudFunctionConfiguration$CloudFunction": null + } + }, + "CloudFunctionConfiguration": { + "base": null, + "refs": { + "NotificationConfigurationDeprecated$CloudFunctionConfiguration": null + } + }, + "CloudFunctionInvocationRole": { + "base": null, + "refs": { + "CloudFunctionConfiguration$InvocationRole": null + } + }, + "Code": { + "base": null, + "refs": { + "Error$Code": null + } + }, + "CommonPrefix": { + "base": null, + "refs": { + "CommonPrefixList$member": null + } + }, + "CommonPrefixList": { + "base": null, + "refs": { + "ListMultipartUploadsOutput$CommonPrefixes": null, + "ListObjectVersionsOutput$CommonPrefixes": null, + "ListObjectsOutput$CommonPrefixes": null, + "ListObjectsV2Output$CommonPrefixes": "CommonPrefixes contains all (if there are any) keys between Prefix and the next occurrence of the string specified by delimiter" + } + }, + "CompleteMultipartUploadOutput": { + "base": null, + "refs": { + } + }, + "CompleteMultipartUploadRequest": { + "base": null, + "refs": { + } + }, + "CompletedMultipartUpload": { + "base": null, + "refs": { + "CompleteMultipartUploadRequest$MultipartUpload": null + } + }, + "CompletedPart": { + "base": null, + "refs": { + "CompletedPartList$member": null + } + }, + "CompletedPartList": { + "base": null, + "refs": { + "CompletedMultipartUpload$Parts": null + } + }, + "Condition": { + "base": null, + "refs": { + "RoutingRule$Condition": "A container for describing a condition that must be met for the specified redirect to apply. For example, 1. If request is for pages in the /docs folder, redirect to the /documents folder. 2. If request results in HTTP error 4xx, redirect request to another host where you might process the error." + } + }, + "ContentDisposition": { + "base": null, + "refs": { + "CopyObjectRequest$ContentDisposition": "Specifies presentational information for the object.", + "CreateMultipartUploadRequest$ContentDisposition": "Specifies presentational information for the object.", + "GetObjectOutput$ContentDisposition": "Specifies presentational information for the object.", + "HeadObjectOutput$ContentDisposition": "Specifies presentational information for the object.", + "PutObjectRequest$ContentDisposition": "Specifies presentational information for the object." + } + }, + "ContentEncoding": { + "base": null, + "refs": { + "CopyObjectRequest$ContentEncoding": "Specifies what content encodings have been applied to the object and thus what decoding mechanisms must be applied to obtain the media-type referenced by the Content-Type header field.", + "CreateMultipartUploadRequest$ContentEncoding": "Specifies what content encodings have been applied to the object and thus what decoding mechanisms must be applied to obtain the media-type referenced by the Content-Type header field.", + "GetObjectOutput$ContentEncoding": "Specifies what content encodings have been applied to the object and thus what decoding mechanisms must be applied to obtain the media-type referenced by the Content-Type header field.", + "HeadObjectOutput$ContentEncoding": "Specifies what content encodings have been applied to the object and thus what decoding mechanisms must be applied to obtain the media-type referenced by the Content-Type header field.", + "PutObjectRequest$ContentEncoding": "Specifies what content encodings have been applied to the object and thus what decoding mechanisms must be applied to obtain the media-type referenced by the Content-Type header field." + } + }, + "ContentLanguage": { + "base": null, + "refs": { + "CopyObjectRequest$ContentLanguage": "The language the content is in.", + "CreateMultipartUploadRequest$ContentLanguage": "The language the content is in.", + "GetObjectOutput$ContentLanguage": "The language the content is in.", + "HeadObjectOutput$ContentLanguage": "The language the content is in.", + "PutObjectRequest$ContentLanguage": "The language the content is in." + } + }, + "ContentLength": { + "base": null, + "refs": { + "GetObjectOutput$ContentLength": "Size of the body in bytes.", + "HeadObjectOutput$ContentLength": "Size of the body in bytes.", + "PutObjectRequest$ContentLength": "Size of the body in bytes. This parameter is useful when the size of the body cannot be determined automatically.", + "UploadPartRequest$ContentLength": "Size of the body in bytes. This parameter is useful when the size of the body cannot be determined automatically." + } + }, + "ContentMD5": { + "base": null, + "refs": { + "PutBucketAclRequest$ContentMD5": null, + "PutBucketCorsRequest$ContentMD5": null, + "PutBucketLifecycleRequest$ContentMD5": null, + "PutBucketLoggingRequest$ContentMD5": null, + "PutBucketNotificationRequest$ContentMD5": null, + "PutBucketPolicyRequest$ContentMD5": null, + "PutBucketReplicationRequest$ContentMD5": null, + "PutBucketRequestPaymentRequest$ContentMD5": null, + "PutBucketTaggingRequest$ContentMD5": null, + "PutBucketVersioningRequest$ContentMD5": null, + "PutBucketWebsiteRequest$ContentMD5": null, + "PutObjectAclRequest$ContentMD5": null, + "PutObjectRequest$ContentMD5": "The base64-encoded 128-bit MD5 digest of the part data.", + "UploadPartRequest$ContentMD5": "The base64-encoded 128-bit MD5 digest of the part data." + } + }, + "ContentRange": { + "base": null, + "refs": { + "GetObjectOutput$ContentRange": "The portion of the object returned in the response." + } + }, + "ContentType": { + "base": null, + "refs": { + "CopyObjectRequest$ContentType": "A standard MIME type describing the format of the object data.", + "CreateMultipartUploadRequest$ContentType": "A standard MIME type describing the format of the object data.", + "GetObjectOutput$ContentType": "A standard MIME type describing the format of the object data.", + "HeadObjectOutput$ContentType": "A standard MIME type describing the format of the object data.", + "PutObjectRequest$ContentType": "A standard MIME type describing the format of the object data." + } + }, + "CopyObjectOutput": { + "base": null, + "refs": { + } + }, + "CopyObjectRequest": { + "base": null, + "refs": { + } + }, + "CopyObjectResult": { + "base": null, + "refs": { + "CopyObjectOutput$CopyObjectResult": null + } + }, + "CopyPartResult": { + "base": null, + "refs": { + "UploadPartCopyOutput$CopyPartResult": null + } + }, + "CopySource": { + "base": null, + "refs": { + "CopyObjectRequest$CopySource": "The name of the source bucket and key name of the source object, separated by a slash (/). Must be URL-encoded.", + "UploadPartCopyRequest$CopySource": "The name of the source bucket and key name of the source object, separated by a slash (/). Must be URL-encoded." + } + }, + "CopySourceIfMatch": { + "base": null, + "refs": { + "CopyObjectRequest$CopySourceIfMatch": "Copies the object if its entity tag (ETag) matches the specified tag.", + "UploadPartCopyRequest$CopySourceIfMatch": "Copies the object if its entity tag (ETag) matches the specified tag." + } + }, + "CopySourceIfModifiedSince": { + "base": null, + "refs": { + "CopyObjectRequest$CopySourceIfModifiedSince": "Copies the object if it has been modified since the specified time.", + "UploadPartCopyRequest$CopySourceIfModifiedSince": "Copies the object if it has been modified since the specified time." + } + }, + "CopySourceIfNoneMatch": { + "base": null, + "refs": { + "CopyObjectRequest$CopySourceIfNoneMatch": "Copies the object if its entity tag (ETag) is different than the specified ETag.", + "UploadPartCopyRequest$CopySourceIfNoneMatch": "Copies the object if its entity tag (ETag) is different than the specified ETag." + } + }, + "CopySourceIfUnmodifiedSince": { + "base": null, + "refs": { + "CopyObjectRequest$CopySourceIfUnmodifiedSince": "Copies the object if it hasn't been modified since the specified time.", + "UploadPartCopyRequest$CopySourceIfUnmodifiedSince": "Copies the object if it hasn't been modified since the specified time." + } + }, + "CopySourceRange": { + "base": null, + "refs": { + "UploadPartCopyRequest$CopySourceRange": "The range of bytes to copy from the source object. The range value must use the form bytes=first-last, where the first and last are the zero-based byte offsets to copy. For example, bytes=0-9 indicates that you want to copy the first ten bytes of the source. You can copy a range only if the source object is greater than 5 GB." + } + }, + "CopySourceSSECustomerAlgorithm": { + "base": null, + "refs": { + "CopyObjectRequest$CopySourceSSECustomerAlgorithm": "Specifies the algorithm to use when decrypting the source object (e.g., AES256).", + "UploadPartCopyRequest$CopySourceSSECustomerAlgorithm": "Specifies the algorithm to use when decrypting the source object (e.g., AES256)." + } + }, + "CopySourceSSECustomerKey": { + "base": null, + "refs": { + "CopyObjectRequest$CopySourceSSECustomerKey": "Specifies the customer-provided encryption key for Amazon S3 to use to decrypt the source object. The encryption key provided in this header must be one that was used when the source object was created.", + "UploadPartCopyRequest$CopySourceSSECustomerKey": "Specifies the customer-provided encryption key for Amazon S3 to use to decrypt the source object. The encryption key provided in this header must be one that was used when the source object was created." + } + }, + "CopySourceSSECustomerKeyMD5": { + "base": null, + "refs": { + "CopyObjectRequest$CopySourceSSECustomerKeyMD5": "Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. Amazon S3 uses this header for a message integrity check to ensure the encryption key was transmitted without error.", + "UploadPartCopyRequest$CopySourceSSECustomerKeyMD5": "Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. Amazon S3 uses this header for a message integrity check to ensure the encryption key was transmitted without error." + } + }, + "CopySourceVersionId": { + "base": null, + "refs": { + "CopyObjectOutput$CopySourceVersionId": null, + "UploadPartCopyOutput$CopySourceVersionId": "The version of the source object that was copied, if you have enabled versioning on the source bucket." + } + }, + "CreateBucketConfiguration": { + "base": null, + "refs": { + "CreateBucketRequest$CreateBucketConfiguration": null + } + }, + "CreateBucketOutput": { + "base": null, + "refs": { + } + }, + "CreateBucketRequest": { + "base": null, + "refs": { + } + }, + "CreateMultipartUploadOutput": { + "base": null, + "refs": { + } + }, + "CreateMultipartUploadRequest": { + "base": null, + "refs": { + } + }, + "CreationDate": { + "base": null, + "refs": { + "Bucket$CreationDate": "Date the bucket was created." + } + }, + "Date": { + "base": null, + "refs": { + "LifecycleExpiration$Date": "Indicates at what date the object is to be moved or deleted. Should be in GMT ISO 8601 Format.", + "Transition$Date": "Indicates at what date the object is to be moved or deleted. Should be in GMT ISO 8601 Format." + } + }, + "Days": { + "base": null, + "refs": { + "LifecycleExpiration$Days": "Indicates the lifetime, in days, of the objects that are subject to the rule. The value must be a non-zero positive integer.", + "NoncurrentVersionExpiration$NoncurrentDays": "Specifies the number of days an object is noncurrent before Amazon S3 can perform the associated action. For information about the noncurrent days calculations, see How Amazon S3 Calculates When an Object Became Noncurrent in the Amazon Simple Storage Service Developer Guide.", + "NoncurrentVersionTransition$NoncurrentDays": "Specifies the number of days an object is noncurrent before Amazon S3 can perform the associated action. For information about the noncurrent days calculations, see How Amazon S3 Calculates When an Object Became Noncurrent in the Amazon Simple Storage Service Developer Guide.", + "RestoreRequest$Days": "Lifetime of the active copy in days", + "Transition$Days": "Indicates the lifetime, in days, of the objects that are subject to the rule. The value must be a non-zero positive integer." + } + }, + "DaysAfterInitiation": { + "base": null, + "refs": { + "AbortIncompleteMultipartUpload$DaysAfterInitiation": "Indicates the number of days that must pass since initiation for Lifecycle to abort an Incomplete Multipart Upload." + } + }, + "Delete": { + "base": null, + "refs": { + "DeleteObjectsRequest$Delete": null + } + }, + "DeleteBucketCorsRequest": { + "base": null, + "refs": { + } + }, + "DeleteBucketLifecycleRequest": { + "base": null, + "refs": { + } + }, + "DeleteBucketPolicyRequest": { + "base": null, + "refs": { + } + }, + "DeleteBucketReplicationRequest": { + "base": null, + "refs": { + } + }, + "DeleteBucketRequest": { + "base": null, + "refs": { + } + }, + "DeleteBucketTaggingRequest": { + "base": null, + "refs": { + } + }, + "DeleteBucketWebsiteRequest": { + "base": null, + "refs": { + } + }, + "DeleteMarker": { + "base": null, + "refs": { + "DeleteObjectOutput$DeleteMarker": "Specifies whether the versioned object that was permanently deleted was (true) or was not (false) a delete marker.", + "DeletedObject$DeleteMarker": null, + "GetObjectOutput$DeleteMarker": "Specifies whether the object retrieved was (true) or was not (false) a Delete Marker. If false, this response header does not appear in the response.", + "HeadObjectOutput$DeleteMarker": "Specifies whether the object retrieved was (true) or was not (false) a Delete Marker. If false, this response header does not appear in the response." + } + }, + "DeleteMarkerEntry": { + "base": null, + "refs": { + "DeleteMarkers$member": null + } + }, + "DeleteMarkerVersionId": { + "base": null, + "refs": { + "DeletedObject$DeleteMarkerVersionId": null + } + }, + "DeleteMarkers": { + "base": null, + "refs": { + "ListObjectVersionsOutput$DeleteMarkers": null + } + }, + "DeleteObjectOutput": { + "base": null, + "refs": { + } + }, + "DeleteObjectRequest": { + "base": null, + "refs": { + } + }, + "DeleteObjectsOutput": { + "base": null, + "refs": { + } + }, + "DeleteObjectsRequest": { + "base": null, + "refs": { + } + }, + "DeletedObject": { + "base": null, + "refs": { + "DeletedObjects$member": null + } + }, + "DeletedObjects": { + "base": null, + "refs": { + "DeleteObjectsOutput$Deleted": null + } + }, + "Delimiter": { + "base": null, + "refs": { + "ListMultipartUploadsOutput$Delimiter": null, + "ListMultipartUploadsRequest$Delimiter": "Character you use to group keys.", + "ListObjectVersionsOutput$Delimiter": null, + "ListObjectVersionsRequest$Delimiter": "A delimiter is a character you use to group keys.", + "ListObjectsOutput$Delimiter": null, + "ListObjectsRequest$Delimiter": "A delimiter is a character you use to group keys.", + "ListObjectsV2Output$Delimiter": "A delimiter is a character you use to group keys.", + "ListObjectsV2Request$Delimiter": "A delimiter is a character you use to group keys." + } + }, + "Destination": { + "base": null, + "refs": { + "ReplicationRule$Destination": null + } + }, + "DisplayName": { + "base": null, + "refs": { + "Grantee$DisplayName": "Screen name of the grantee.", + "Initiator$DisplayName": "Name of the Principal.", + "Owner$DisplayName": null + } + }, + "ETag": { + "base": null, + "refs": { + "CompleteMultipartUploadOutput$ETag": "Entity tag of the object.", + "CompletedPart$ETag": "Entity tag returned when the part was uploaded.", + "CopyObjectResult$ETag": null, + "CopyPartResult$ETag": "Entity tag of the object.", + "GetObjectOutput$ETag": "An ETag is an opaque identifier assigned by a web server to a specific version of a resource found at a URL", + "HeadObjectOutput$ETag": "An ETag is an opaque identifier assigned by a web server to a specific version of a resource found at a URL", + "Object$ETag": null, + "ObjectVersion$ETag": null, + "Part$ETag": "Entity tag returned when the part was uploaded.", + "PutObjectOutput$ETag": "Entity tag for the uploaded object.", + "UploadPartOutput$ETag": "Entity tag for the uploaded object." + } + }, + "EmailAddress": { + "base": null, + "refs": { + "Grantee$EmailAddress": "Email address of the grantee." + } + }, + "EncodingType": { + "base": "Requests Amazon S3 to encode the object keys in the response and specifies the encoding method to use. An object key may contain any Unicode character; however, XML 1.0 parser cannot parse some characters, such as characters with an ASCII value from 0 to 10. For characters that are not supported in XML 1.0, you can add this parameter to request that Amazon S3 encode the keys in the response.", + "refs": { + "ListMultipartUploadsOutput$EncodingType": "Encoding type used by Amazon S3 to encode object keys in the response.", + "ListMultipartUploadsRequest$EncodingType": null, + "ListObjectVersionsOutput$EncodingType": "Encoding type used by Amazon S3 to encode object keys in the response.", + "ListObjectVersionsRequest$EncodingType": null, + "ListObjectsOutput$EncodingType": "Encoding type used by Amazon S3 to encode object keys in the response.", + "ListObjectsRequest$EncodingType": null, + "ListObjectsV2Output$EncodingType": "Encoding type used by Amazon S3 to encode object keys in the response.", + "ListObjectsV2Request$EncodingType": "Encoding type used by Amazon S3 to encode object keys in the response." + } + }, + "Error": { + "base": null, + "refs": { + "Errors$member": null + } + }, + "ErrorDocument": { + "base": null, + "refs": { + "GetBucketWebsiteOutput$ErrorDocument": null, + "WebsiteConfiguration$ErrorDocument": null + } + }, + "Errors": { + "base": null, + "refs": { + "DeleteObjectsOutput$Errors": null + } + }, + "Event": { + "base": "Bucket event for which to send notifications.", + "refs": { + "CloudFunctionConfiguration$Event": null, + "EventList$member": null, + "QueueConfigurationDeprecated$Event": null, + "TopicConfigurationDeprecated$Event": "Bucket event for which to send notifications." + } + }, + "EventList": { + "base": null, + "refs": { + "CloudFunctionConfiguration$Events": null, + "LambdaFunctionConfiguration$Events": null, + "QueueConfiguration$Events": null, + "QueueConfigurationDeprecated$Events": null, + "TopicConfiguration$Events": null, + "TopicConfigurationDeprecated$Events": null + } + }, + "Expiration": { + "base": null, + "refs": { + "CompleteMultipartUploadOutput$Expiration": "If the object expiration is configured, this will contain the expiration date (expiry-date) and rule ID (rule-id). The value of rule-id is URL encoded.", + "CopyObjectOutput$Expiration": "If the object expiration is configured, the response includes this header.", + "GetObjectOutput$Expiration": "If the object expiration is configured (see PUT Bucket lifecycle), the response includes this header. It includes the expiry-date and rule-id key value pairs providing object expiration information. The value of the rule-id is URL encoded.", + "HeadObjectOutput$Expiration": "If the object expiration is configured (see PUT Bucket lifecycle), the response includes this header. It includes the expiry-date and rule-id key value pairs providing object expiration information. The value of the rule-id is URL encoded.", + "PutObjectOutput$Expiration": "If the object expiration is configured, this will contain the expiration date (expiry-date) and rule ID (rule-id). The value of rule-id is URL encoded." + } + }, + "ExpirationStatus": { + "base": null, + "refs": { + "LifecycleRule$Status": "If 'Enabled', the rule is currently being applied. If 'Disabled', the rule is not currently being applied.", + "Rule$Status": "If 'Enabled', the rule is currently being applied. If 'Disabled', the rule is not currently being applied." + } + }, + "ExpiredObjectDeleteMarker": { + "base": null, + "refs": { + "LifecycleExpiration$ExpiredObjectDeleteMarker": "Indicates whether Amazon S3 will remove a delete marker with no noncurrent versions. If set to true, the delete marker will be expired; if set to false the policy takes no action. This cannot be specified with Days or Date in a Lifecycle Expiration Policy." + } + }, + "Expires": { + "base": null, + "refs": { + "CopyObjectRequest$Expires": "The date and time at which the object is no longer cacheable.", + "CreateMultipartUploadRequest$Expires": "The date and time at which the object is no longer cacheable.", + "GetObjectOutput$Expires": "The date and time at which the object is no longer cacheable.", + "HeadObjectOutput$Expires": "The date and time at which the object is no longer cacheable.", + "PutObjectRequest$Expires": "The date and time at which the object is no longer cacheable." + } + }, + "ExposeHeader": { + "base": null, + "refs": { + "ExposeHeaders$member": null + } + }, + "ExposeHeaders": { + "base": null, + "refs": { + "CORSRule$ExposeHeaders": "One or more headers in the response that you want customers to be able to access from their applications (for example, from a JavaScript XMLHttpRequest object)." + } + }, + "FetchOwner": { + "base": null, + "refs": { + "ListObjectsV2Request$FetchOwner": "The owner field is not present in listV2 by default, if you want to return owner field with each key in the result then set the fetch owner field to true" + } + }, + "FilterRule": { + "base": "Container for key value pair that defines the criteria for the filter rule.", + "refs": { + "FilterRuleList$member": null + } + }, + "FilterRuleList": { + "base": "A list of containers for key value pair that defines the criteria for the filter rule.", + "refs": { + "S3KeyFilter$FilterRules": null + } + }, + "FilterRuleName": { + "base": null, + "refs": { + "FilterRule$Name": "Object key name prefix or suffix identifying one or more objects to which the filtering rule applies. Maximum prefix length can be up to 1,024 characters. Overlapping prefixes and suffixes are not supported. For more information, go to Configuring Event Notifications in the Amazon Simple Storage Service Developer Guide." + } + }, + "FilterRuleValue": { + "base": null, + "refs": { + "FilterRule$Value": null + } + }, + "GetBucketAccelerateConfigurationOutput": { + "base": null, + "refs": { + } + }, + "GetBucketAccelerateConfigurationRequest": { + "base": null, + "refs": { + } + }, + "GetBucketAclOutput": { + "base": null, + "refs": { + } + }, + "GetBucketAclRequest": { + "base": null, + "refs": { + } + }, + "GetBucketCorsOutput": { + "base": null, + "refs": { + } + }, + "GetBucketCorsRequest": { + "base": null, + "refs": { + } + }, + "GetBucketLifecycleConfigurationOutput": { + "base": null, + "refs": { + } + }, + "GetBucketLifecycleConfigurationRequest": { + "base": null, + "refs": { + } + }, + "GetBucketLifecycleOutput": { + "base": null, + "refs": { + } + }, + "GetBucketLifecycleRequest": { + "base": null, + "refs": { + } + }, + "GetBucketLocationOutput": { + "base": null, + "refs": { + } + }, + "GetBucketLocationRequest": { + "base": null, + "refs": { + } + }, + "GetBucketLoggingOutput": { + "base": null, + "refs": { + } + }, + "GetBucketLoggingRequest": { + "base": null, + "refs": { + } + }, + "GetBucketNotificationConfigurationRequest": { + "base": null, + "refs": { + } + }, + "GetBucketPolicyOutput": { + "base": null, + "refs": { + } + }, + "GetBucketPolicyRequest": { + "base": null, + "refs": { + } + }, + "GetBucketReplicationOutput": { + "base": null, + "refs": { + } + }, + "GetBucketReplicationRequest": { + "base": null, + "refs": { + } + }, + "GetBucketRequestPaymentOutput": { + "base": null, + "refs": { + } + }, + "GetBucketRequestPaymentRequest": { + "base": null, + "refs": { + } + }, + "GetBucketTaggingOutput": { + "base": null, + "refs": { + } + }, + "GetBucketTaggingRequest": { + "base": null, + "refs": { + } + }, + "GetBucketVersioningOutput": { + "base": null, + "refs": { + } + }, + "GetBucketVersioningRequest": { + "base": null, + "refs": { + } + }, + "GetBucketWebsiteOutput": { + "base": null, + "refs": { + } + }, + "GetBucketWebsiteRequest": { + "base": null, + "refs": { + } + }, + "GetObjectAclOutput": { + "base": null, + "refs": { + } + }, + "GetObjectAclRequest": { + "base": null, + "refs": { + } + }, + "GetObjectOutput": { + "base": null, + "refs": { + } + }, + "GetObjectRequest": { + "base": null, + "refs": { + } + }, + "GetObjectTorrentOutput": { + "base": null, + "refs": { + } + }, + "GetObjectTorrentRequest": { + "base": null, + "refs": { + } + }, + "Grant": { + "base": null, + "refs": { + "Grants$member": null + } + }, + "GrantFullControl": { + "base": null, + "refs": { + "CopyObjectRequest$GrantFullControl": "Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object.", + "CreateBucketRequest$GrantFullControl": "Allows grantee the read, write, read ACP, and write ACP permissions on the bucket.", + "CreateMultipartUploadRequest$GrantFullControl": "Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object.", + "PutBucketAclRequest$GrantFullControl": "Allows grantee the read, write, read ACP, and write ACP permissions on the bucket.", + "PutObjectAclRequest$GrantFullControl": "Allows grantee the read, write, read ACP, and write ACP permissions on the bucket.", + "PutObjectRequest$GrantFullControl": "Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object." + } + }, + "GrantRead": { + "base": null, + "refs": { + "CopyObjectRequest$GrantRead": "Allows grantee to read the object data and its metadata.", + "CreateBucketRequest$GrantRead": "Allows grantee to list the objects in the bucket.", + "CreateMultipartUploadRequest$GrantRead": "Allows grantee to read the object data and its metadata.", + "PutBucketAclRequest$GrantRead": "Allows grantee to list the objects in the bucket.", + "PutObjectAclRequest$GrantRead": "Allows grantee to list the objects in the bucket.", + "PutObjectRequest$GrantRead": "Allows grantee to read the object data and its metadata." + } + }, + "GrantReadACP": { + "base": null, + "refs": { + "CopyObjectRequest$GrantReadACP": "Allows grantee to read the object ACL.", + "CreateBucketRequest$GrantReadACP": "Allows grantee to read the bucket ACL.", + "CreateMultipartUploadRequest$GrantReadACP": "Allows grantee to read the object ACL.", + "PutBucketAclRequest$GrantReadACP": "Allows grantee to read the bucket ACL.", + "PutObjectAclRequest$GrantReadACP": "Allows grantee to read the bucket ACL.", + "PutObjectRequest$GrantReadACP": "Allows grantee to read the object ACL." + } + }, + "GrantWrite": { + "base": null, + "refs": { + "CreateBucketRequest$GrantWrite": "Allows grantee to create, overwrite, and delete any object in the bucket.", + "PutBucketAclRequest$GrantWrite": "Allows grantee to create, overwrite, and delete any object in the bucket.", + "PutObjectAclRequest$GrantWrite": "Allows grantee to create, overwrite, and delete any object in the bucket." + } + }, + "GrantWriteACP": { + "base": null, + "refs": { + "CopyObjectRequest$GrantWriteACP": "Allows grantee to write the ACL for the applicable object.", + "CreateBucketRequest$GrantWriteACP": "Allows grantee to write the ACL for the applicable bucket.", + "CreateMultipartUploadRequest$GrantWriteACP": "Allows grantee to write the ACL for the applicable object.", + "PutBucketAclRequest$GrantWriteACP": "Allows grantee to write the ACL for the applicable bucket.", + "PutObjectAclRequest$GrantWriteACP": "Allows grantee to write the ACL for the applicable bucket.", + "PutObjectRequest$GrantWriteACP": "Allows grantee to write the ACL for the applicable object." + } + }, + "Grantee": { + "base": null, + "refs": { + "Grant$Grantee": null, + "TargetGrant$Grantee": null + } + }, + "Grants": { + "base": null, + "refs": { + "AccessControlPolicy$Grants": "A list of grants.", + "GetBucketAclOutput$Grants": "A list of grants.", + "GetObjectAclOutput$Grants": "A list of grants." + } + }, + "HeadBucketRequest": { + "base": null, + "refs": { + } + }, + "HeadObjectOutput": { + "base": null, + "refs": { + } + }, + "HeadObjectRequest": { + "base": null, + "refs": { + } + }, + "HostName": { + "base": null, + "refs": { + "Redirect$HostName": "The host name to use in the redirect request.", + "RedirectAllRequestsTo$HostName": "Name of the host where requests will be redirected." + } + }, + "HttpErrorCodeReturnedEquals": { + "base": null, + "refs": { + "Condition$HttpErrorCodeReturnedEquals": "The HTTP error code when the redirect is applied. In the event of an error, if the error code equals this value, then the specified redirect is applied. Required when parent element Condition is specified and sibling KeyPrefixEquals is not specified. If both are specified, then both must be true for the redirect to be applied." + } + }, + "HttpRedirectCode": { + "base": null, + "refs": { + "Redirect$HttpRedirectCode": "The HTTP redirect code to use on the response. Not required if one of the siblings is present." + } + }, + "ID": { + "base": null, + "refs": { + "Grantee$ID": "The canonical user ID of the grantee.", + "Initiator$ID": "If the principal is an AWS account, it provides the Canonical User ID. If the principal is an IAM User, it provides a user ARN value.", + "LifecycleRule$ID": "Unique identifier for the rule. The value cannot be longer than 255 characters.", + "Owner$ID": null, + "ReplicationRule$ID": "Unique identifier for the rule. The value cannot be longer than 255 characters.", + "Rule$ID": "Unique identifier for the rule. The value cannot be longer than 255 characters." + } + }, + "IfMatch": { + "base": null, + "refs": { + "GetObjectRequest$IfMatch": "Return the object only if its entity tag (ETag) is the same as the one specified, otherwise return a 412 (precondition failed).", + "HeadObjectRequest$IfMatch": "Return the object only if its entity tag (ETag) is the same as the one specified, otherwise return a 412 (precondition failed)." + } + }, + "IfModifiedSince": { + "base": null, + "refs": { + "GetObjectRequest$IfModifiedSince": "Return the object only if it has been modified since the specified time, otherwise return a 304 (not modified).", + "HeadObjectRequest$IfModifiedSince": "Return the object only if it has been modified since the specified time, otherwise return a 304 (not modified)." + } + }, + "IfNoneMatch": { + "base": null, + "refs": { + "GetObjectRequest$IfNoneMatch": "Return the object only if its entity tag (ETag) is different from the one specified, otherwise return a 304 (not modified).", + "HeadObjectRequest$IfNoneMatch": "Return the object only if its entity tag (ETag) is different from the one specified, otherwise return a 304 (not modified)." + } + }, + "IfUnmodifiedSince": { + "base": null, + "refs": { + "GetObjectRequest$IfUnmodifiedSince": "Return the object only if it has not been modified since the specified time, otherwise return a 412 (precondition failed).", + "HeadObjectRequest$IfUnmodifiedSince": "Return the object only if it has not been modified since the specified time, otherwise return a 412 (precondition failed)." + } + }, + "IndexDocument": { + "base": null, + "refs": { + "GetBucketWebsiteOutput$IndexDocument": null, + "WebsiteConfiguration$IndexDocument": null + } + }, + "Initiated": { + "base": null, + "refs": { + "MultipartUpload$Initiated": "Date and time at which the multipart upload was initiated." + } + }, + "Initiator": { + "base": null, + "refs": { + "ListPartsOutput$Initiator": "Identifies who initiated the multipart upload.", + "MultipartUpload$Initiator": "Identifies who initiated the multipart upload." + } + }, + "IsLatest": { + "base": null, + "refs": { + "DeleteMarkerEntry$IsLatest": "Specifies whether the object is (true) or is not (false) the latest version of an object.", + "ObjectVersion$IsLatest": "Specifies whether the object is (true) or is not (false) the latest version of an object." + } + }, + "IsTruncated": { + "base": null, + "refs": { + "ListMultipartUploadsOutput$IsTruncated": "Indicates whether the returned list of multipart uploads is truncated. A value of true indicates that the list was truncated. The list can be truncated if the number of multipart uploads exceeds the limit allowed or specified by max uploads.", + "ListObjectVersionsOutput$IsTruncated": "A flag that indicates whether or not Amazon S3 returned all of the results that satisfied the search criteria. If your results were truncated, you can make a follow-up paginated request using the NextKeyMarker and NextVersionIdMarker response parameters as a starting place in another request to return the rest of the results.", + "ListObjectsOutput$IsTruncated": "A flag that indicates whether or not Amazon S3 returned all of the results that satisfied the search criteria.", + "ListObjectsV2Output$IsTruncated": "A flag that indicates whether or not Amazon S3 returned all of the results that satisfied the search criteria.", + "ListPartsOutput$IsTruncated": "Indicates whether the returned list of parts is truncated." + } + }, + "KeyCount": { + "base": null, + "refs": { + "ListObjectsV2Output$KeyCount": "KeyCount is the number of keys returned with this request. KeyCount will always be less than equals to MaxKeys field. Say you ask for 50 keys, your result will include less than equals 50 keys" + } + }, + "KeyMarker": { + "base": null, + "refs": { + "ListMultipartUploadsOutput$KeyMarker": "The key at or after which the listing began.", + "ListMultipartUploadsRequest$KeyMarker": "Together with upload-id-marker, this parameter specifies the multipart upload after which listing should begin.", + "ListObjectVersionsOutput$KeyMarker": "Marks the last Key returned in a truncated response.", + "ListObjectVersionsRequest$KeyMarker": "Specifies the key to start with when listing objects in a bucket." + } + }, + "KeyPrefixEquals": { + "base": null, + "refs": { + "Condition$KeyPrefixEquals": "The object key name prefix when the redirect is applied. For example, to redirect requests for ExamplePage.html, the key prefix will be ExamplePage.html. To redirect request for all pages with the prefix docs/, the key prefix will be /docs, which identifies all objects in the docs/ folder. Required when the parent element Condition is specified and sibling HttpErrorCodeReturnedEquals is not specified. If both conditions are specified, both must be true for the redirect to be applied." + } + }, + "LambdaFunctionArn": { + "base": null, + "refs": { + "LambdaFunctionConfiguration$LambdaFunctionArn": "Lambda cloud function ARN that Amazon S3 can invoke when it detects events of the specified type." + } + }, + "LambdaFunctionConfiguration": { + "base": "Container for specifying the AWS Lambda notification configuration.", + "refs": { + "LambdaFunctionConfigurationList$member": null + } + }, + "LambdaFunctionConfigurationList": { + "base": null, + "refs": { + "NotificationConfiguration$LambdaFunctionConfigurations": null + } + }, + "LastModified": { + "base": null, + "refs": { + "CopyObjectResult$LastModified": null, + "CopyPartResult$LastModified": "Date and time at which the object was uploaded.", + "DeleteMarkerEntry$LastModified": "Date and time the object was last modified.", + "GetObjectOutput$LastModified": "Last modified date of the object", + "HeadObjectOutput$LastModified": "Last modified date of the object", + "Object$LastModified": null, + "ObjectVersion$LastModified": "Date and time the object was last modified.", + "Part$LastModified": "Date and time at which the part was uploaded." + } + }, + "LifecycleConfiguration": { + "base": null, + "refs": { + "PutBucketLifecycleRequest$LifecycleConfiguration": null + } + }, + "LifecycleExpiration": { + "base": null, + "refs": { + "LifecycleRule$Expiration": null, + "Rule$Expiration": null + } + }, + "LifecycleRule": { + "base": null, + "refs": { + "LifecycleRules$member": null + } + }, + "LifecycleRules": { + "base": null, + "refs": { + "BucketLifecycleConfiguration$Rules": null, + "GetBucketLifecycleConfigurationOutput$Rules": null + } + }, + "ListBucketsOutput": { + "base": null, + "refs": { + } + }, + "ListMultipartUploadsOutput": { + "base": null, + "refs": { + } + }, + "ListMultipartUploadsRequest": { + "base": null, + "refs": { + } + }, + "ListObjectVersionsOutput": { + "base": null, + "refs": { + } + }, + "ListObjectVersionsRequest": { + "base": null, + "refs": { + } + }, + "ListObjectsOutput": { + "base": null, + "refs": { + } + }, + "ListObjectsRequest": { + "base": null, + "refs": { + } + }, + "ListObjectsV2Output": { + "base": null, + "refs": { + } + }, + "ListObjectsV2Request": { + "base": null, + "refs": { + } + }, + "ListPartsOutput": { + "base": null, + "refs": { + } + }, + "ListPartsRequest": { + "base": null, + "refs": { + } + }, + "Location": { + "base": null, + "refs": { + "CompleteMultipartUploadOutput$Location": null, + "CreateBucketOutput$Location": null + } + }, + "LoggingEnabled": { + "base": null, + "refs": { + "BucketLoggingStatus$LoggingEnabled": null, + "GetBucketLoggingOutput$LoggingEnabled": null + } + }, + "MFA": { + "base": null, + "refs": { + "DeleteObjectRequest$MFA": "The concatenation of the authentication device's serial number, a space, and the value that is displayed on your authentication device.", + "DeleteObjectsRequest$MFA": "The concatenation of the authentication device's serial number, a space, and the value that is displayed on your authentication device.", + "PutBucketVersioningRequest$MFA": "The concatenation of the authentication device's serial number, a space, and the value that is displayed on your authentication device." + } + }, + "MFADelete": { + "base": null, + "refs": { + "VersioningConfiguration$MFADelete": "Specifies whether MFA delete is enabled in the bucket versioning configuration. This element is only returned if the bucket has been configured with MFA delete. If the bucket has never been so configured, this element is not returned." + } + }, + "MFADeleteStatus": { + "base": null, + "refs": { + "GetBucketVersioningOutput$MFADelete": "Specifies whether MFA delete is enabled in the bucket versioning configuration. This element is only returned if the bucket has been configured with MFA delete. If the bucket has never been so configured, this element is not returned." + } + }, + "Marker": { + "base": null, + "refs": { + "ListObjectsOutput$Marker": null, + "ListObjectsRequest$Marker": "Specifies the key to start with when listing objects in a bucket." + } + }, + "MaxAgeSeconds": { + "base": null, + "refs": { + "CORSRule$MaxAgeSeconds": "The time in seconds that your browser is to cache the preflight response for the specified resource." + } + }, + "MaxKeys": { + "base": null, + "refs": { + "ListObjectVersionsOutput$MaxKeys": null, + "ListObjectVersionsRequest$MaxKeys": "Sets the maximum number of keys returned in the response. The response might contain fewer keys but will never contain more.", + "ListObjectsOutput$MaxKeys": null, + "ListObjectsRequest$MaxKeys": "Sets the maximum number of keys returned in the response. The response might contain fewer keys but will never contain more.", + "ListObjectsV2Output$MaxKeys": "Sets the maximum number of keys returned in the response. The response might contain fewer keys but will never contain more.", + "ListObjectsV2Request$MaxKeys": "Sets the maximum number of keys returned in the response. The response might contain fewer keys but will never contain more." + } + }, + "MaxParts": { + "base": null, + "refs": { + "ListPartsOutput$MaxParts": "Maximum number of parts that were allowed in the response.", + "ListPartsRequest$MaxParts": "Sets the maximum number of parts to return." + } + }, + "MaxUploads": { + "base": null, + "refs": { + "ListMultipartUploadsOutput$MaxUploads": "Maximum number of multipart uploads that could have been included in the response.", + "ListMultipartUploadsRequest$MaxUploads": "Sets the maximum number of multipart uploads, from 1 to 1,000, to return in the response body. 1,000 is the maximum number of uploads that can be returned in a response." + } + }, + "Message": { + "base": null, + "refs": { + "Error$Message": null + } + }, + "Metadata": { + "base": null, + "refs": { + "CopyObjectRequest$Metadata": "A map of metadata to store with the object in S3.", + "CreateMultipartUploadRequest$Metadata": "A map of metadata to store with the object in S3.", + "GetObjectOutput$Metadata": "A map of metadata to store with the object in S3.", + "HeadObjectOutput$Metadata": "A map of metadata to store with the object in S3.", + "PutObjectRequest$Metadata": "A map of metadata to store with the object in S3." + } + }, + "MetadataDirective": { + "base": null, + "refs": { + "CopyObjectRequest$MetadataDirective": "Specifies whether the metadata is copied from the source object or replaced with metadata provided in the request." + } + }, + "MetadataKey": { + "base": null, + "refs": { + "Metadata$key": null + } + }, + "MetadataValue": { + "base": null, + "refs": { + "Metadata$value": null + } + }, + "MissingMeta": { + "base": null, + "refs": { + "GetObjectOutput$MissingMeta": "This is set to the number of metadata entries not returned in x-amz-meta headers. This can happen if you create metadata using an API like SOAP that supports more flexible metadata than the REST API. For example, using SOAP, you can create metadata whose values are not legal HTTP headers.", + "HeadObjectOutput$MissingMeta": "This is set to the number of metadata entries not returned in x-amz-meta headers. This can happen if you create metadata using an API like SOAP that supports more flexible metadata than the REST API. For example, using SOAP, you can create metadata whose values are not legal HTTP headers." + } + }, + "MultipartUpload": { + "base": null, + "refs": { + "MultipartUploadList$member": null + } + }, + "MultipartUploadId": { + "base": null, + "refs": { + "AbortMultipartUploadRequest$UploadId": null, + "CompleteMultipartUploadRequest$UploadId": null, + "CreateMultipartUploadOutput$UploadId": "ID for the initiated multipart upload.", + "ListPartsOutput$UploadId": "Upload ID identifying the multipart upload whose parts are being listed.", + "ListPartsRequest$UploadId": "Upload ID identifying the multipart upload whose parts are being listed.", + "MultipartUpload$UploadId": "Upload ID that identifies the multipart upload.", + "UploadPartCopyRequest$UploadId": "Upload ID identifying the multipart upload whose part is being copied.", + "UploadPartRequest$UploadId": "Upload ID identifying the multipart upload whose part is being uploaded." + } + }, + "MultipartUploadList": { + "base": null, + "refs": { + "ListMultipartUploadsOutput$Uploads": null + } + }, + "NextKeyMarker": { + "base": null, + "refs": { + "ListMultipartUploadsOutput$NextKeyMarker": "When a list is truncated, this element specifies the value that should be used for the key-marker request parameter in a subsequent request.", + "ListObjectVersionsOutput$NextKeyMarker": "Use this value for the key marker request parameter in a subsequent request." + } + }, + "NextMarker": { + "base": null, + "refs": { + "ListObjectsOutput$NextMarker": "When response is truncated (the IsTruncated element value in the response is true), you can use the key name in this field as marker in the subsequent request to get next set of objects. Amazon S3 lists objects in alphabetical order Note: This element is returned only if you have delimiter request parameter specified. If response does not include the NextMaker and it is truncated, you can use the value of the last Key in the response as the marker in the subsequent request to get the next set of object keys." + } + }, + "NextPartNumberMarker": { + "base": null, + "refs": { + "ListPartsOutput$NextPartNumberMarker": "When a list is truncated, this element specifies the last part in the list, as well as the value to use for the part-number-marker request parameter in a subsequent request." + } + }, + "NextToken": { + "base": null, + "refs": { + "ListObjectsV2Output$NextContinuationToken": "NextContinuationToken is sent when isTruncated is true which means there are more keys in the bucket that can be listed. The next list requests to Amazon S3 can be continued with this NextContinuationToken. NextContinuationToken is obfuscated and is not a real key" + } + }, + "NextUploadIdMarker": { + "base": null, + "refs": { + "ListMultipartUploadsOutput$NextUploadIdMarker": "When a list is truncated, this element specifies the value that should be used for the upload-id-marker request parameter in a subsequent request." + } + }, + "NextVersionIdMarker": { + "base": null, + "refs": { + "ListObjectVersionsOutput$NextVersionIdMarker": "Use this value for the next version id marker parameter in a subsequent request." + } + }, + "NoSuchBucket": { + "base": "The specified bucket does not exist.", + "refs": { + } + }, + "NoSuchKey": { + "base": "The specified key does not exist.", + "refs": { + } + }, + "NoSuchUpload": { + "base": "The specified multipart upload does not exist.", + "refs": { + } + }, + "NoncurrentVersionExpiration": { + "base": "Specifies when noncurrent object versions expire. Upon expiration, Amazon S3 permanently deletes the noncurrent object versions. You set this lifecycle configuration action on a bucket that has versioning enabled (or suspended) to request that Amazon S3 delete noncurrent object versions at a specific period in the object's lifetime.", + "refs": { + "LifecycleRule$NoncurrentVersionExpiration": null, + "Rule$NoncurrentVersionExpiration": null + } + }, + "NoncurrentVersionTransition": { + "base": "Container for the transition rule that describes when noncurrent objects transition to the STANDARD_IA or GLACIER storage class. If your bucket is versioning-enabled (or versioning is suspended), you can set this action to request that Amazon S3 transition noncurrent object versions to the STANDARD_IA or GLACIER storage class at a specific period in the object's lifetime.", + "refs": { + "NoncurrentVersionTransitionList$member": null, + "Rule$NoncurrentVersionTransition": null + } + }, + "NoncurrentVersionTransitionList": { + "base": null, + "refs": { + "LifecycleRule$NoncurrentVersionTransitions": null + } + }, + "NotificationConfiguration": { + "base": "Container for specifying the notification configuration of the bucket. If this element is empty, notifications are turned off on the bucket.", + "refs": { + "PutBucketNotificationConfigurationRequest$NotificationConfiguration": null + } + }, + "NotificationConfigurationDeprecated": { + "base": null, + "refs": { + "PutBucketNotificationRequest$NotificationConfiguration": null + } + }, + "NotificationConfigurationFilter": { + "base": "Container for object key name filtering rules. For information about key name filtering, go to Configuring Event Notifications in the Amazon Simple Storage Service Developer Guide.", + "refs": { + "LambdaFunctionConfiguration$Filter": null, + "QueueConfiguration$Filter": null, + "TopicConfiguration$Filter": null + } + }, + "NotificationId": { + "base": "Optional unique identifier for configurations in a notification configuration. If you don't provide one, Amazon S3 will assign an ID.", + "refs": { + "CloudFunctionConfiguration$Id": null, + "LambdaFunctionConfiguration$Id": null, + "QueueConfiguration$Id": null, + "QueueConfigurationDeprecated$Id": null, + "TopicConfiguration$Id": null, + "TopicConfigurationDeprecated$Id": null + } + }, + "Object": { + "base": null, + "refs": { + "ObjectList$member": null + } + }, + "ObjectAlreadyInActiveTierError": { + "base": "This operation is not allowed against this storage tier", + "refs": { + } + }, + "ObjectCannedACL": { + "base": null, + "refs": { + "CopyObjectRequest$ACL": "The canned ACL to apply to the object.", + "CreateMultipartUploadRequest$ACL": "The canned ACL to apply to the object.", + "PutObjectAclRequest$ACL": "The canned ACL to apply to the object.", + "PutObjectRequest$ACL": "The canned ACL to apply to the object." + } + }, + "ObjectIdentifier": { + "base": null, + "refs": { + "ObjectIdentifierList$member": null + } + }, + "ObjectIdentifierList": { + "base": null, + "refs": { + "Delete$Objects": null + } + }, + "ObjectKey": { + "base": null, + "refs": { + "AbortMultipartUploadRequest$Key": null, + "CompleteMultipartUploadOutput$Key": null, + "CompleteMultipartUploadRequest$Key": null, + "CopyObjectRequest$Key": null, + "CreateMultipartUploadOutput$Key": "Object key for which the multipart upload was initiated.", + "CreateMultipartUploadRequest$Key": null, + "DeleteMarkerEntry$Key": "The object key.", + "DeleteObjectRequest$Key": null, + "DeletedObject$Key": null, + "Error$Key": null, + "ErrorDocument$Key": "The object key name to use when a 4XX class error occurs.", + "GetObjectAclRequest$Key": null, + "GetObjectRequest$Key": null, + "GetObjectTorrentRequest$Key": null, + "HeadObjectRequest$Key": null, + "ListPartsOutput$Key": "Object key for which the multipart upload was initiated.", + "ListPartsRequest$Key": null, + "MultipartUpload$Key": "Key of the object for which the multipart upload was initiated.", + "Object$Key": null, + "ObjectIdentifier$Key": "Key name of the object to delete.", + "ObjectVersion$Key": "The object key.", + "PutObjectAclRequest$Key": null, + "PutObjectRequest$Key": "Object key for which the PUT operation was initiated.", + "RestoreObjectRequest$Key": null, + "Tag$Key": "Name of the tag.", + "UploadPartCopyRequest$Key": null, + "UploadPartRequest$Key": "Object key for which the multipart upload was initiated." + } + }, + "ObjectList": { + "base": null, + "refs": { + "ListObjectsOutput$Contents": null, + "ListObjectsV2Output$Contents": "Metadata about each object returned." + } + }, + "ObjectNotInActiveTierError": { + "base": "The source object of the COPY operation is not in the active tier and is only stored in Amazon Glacier.", + "refs": { + } + }, + "ObjectStorageClass": { + "base": null, + "refs": { + "Object$StorageClass": "The class of storage used to store the object." + } + }, + "ObjectVersion": { + "base": null, + "refs": { + "ObjectVersionList$member": null + } + }, + "ObjectVersionId": { + "base": null, + "refs": { + "CompleteMultipartUploadOutput$VersionId": "Version of the object.", + "CopyObjectOutput$VersionId": "Version ID of the newly created copy.", + "DeleteMarkerEntry$VersionId": "Version ID of an object.", + "DeleteObjectOutput$VersionId": "Returns the version ID of the delete marker created as a result of the DELETE operation.", + "DeleteObjectRequest$VersionId": "VersionId used to reference a specific version of the object.", + "DeletedObject$VersionId": null, + "Error$VersionId": null, + "GetObjectAclRequest$VersionId": "VersionId used to reference a specific version of the object.", + "GetObjectOutput$VersionId": "Version of the object.", + "GetObjectRequest$VersionId": "VersionId used to reference a specific version of the object.", + "HeadObjectOutput$VersionId": "Version of the object.", + "HeadObjectRequest$VersionId": "VersionId used to reference a specific version of the object.", + "ObjectIdentifier$VersionId": "VersionId for the specific version of the object to delete.", + "ObjectVersion$VersionId": "Version ID of an object.", + "PutObjectAclRequest$VersionId": "VersionId used to reference a specific version of the object.", + "PutObjectOutput$VersionId": "Version of the object.", + "RestoreObjectRequest$VersionId": null + } + }, + "ObjectVersionList": { + "base": null, + "refs": { + "ListObjectVersionsOutput$Versions": null + } + }, + "ObjectVersionStorageClass": { + "base": null, + "refs": { + "ObjectVersion$StorageClass": "The class of storage used to store the object." + } + }, + "Owner": { + "base": null, + "refs": { + "AccessControlPolicy$Owner": null, + "DeleteMarkerEntry$Owner": null, + "GetBucketAclOutput$Owner": null, + "GetObjectAclOutput$Owner": null, + "ListBucketsOutput$Owner": null, + "ListPartsOutput$Owner": null, + "MultipartUpload$Owner": null, + "Object$Owner": null, + "ObjectVersion$Owner": null + } + }, + "Part": { + "base": null, + "refs": { + "Parts$member": null + } + }, + "PartNumber": { + "base": null, + "refs": { + "CompletedPart$PartNumber": "Part number that identifies the part. This is a positive integer between 1 and 10,000.", + "Part$PartNumber": "Part number identifying the part. This is a positive integer between 1 and 10,000.", + "UploadPartCopyRequest$PartNumber": "Part number of part being copied. This is a positive integer between 1 and 10,000.", + "UploadPartRequest$PartNumber": "Part number of part being uploaded. This is a positive integer between 1 and 10,000." + } + }, + "PartNumberMarker": { + "base": null, + "refs": { + "ListPartsOutput$PartNumberMarker": "Part number after which listing begins.", + "ListPartsRequest$PartNumberMarker": "Specifies the part after which listing should begin. Only parts with higher part numbers will be listed." + } + }, + "Parts": { + "base": null, + "refs": { + "ListPartsOutput$Parts": null + } + }, + "Payer": { + "base": null, + "refs": { + "GetBucketRequestPaymentOutput$Payer": "Specifies who pays for the download and request fees.", + "RequestPaymentConfiguration$Payer": "Specifies who pays for the download and request fees." + } + }, + "Permission": { + "base": null, + "refs": { + "Grant$Permission": "Specifies the permission given to the grantee." + } + }, + "Policy": { + "base": null, + "refs": { + "GetBucketPolicyOutput$Policy": "The bucket policy as a JSON document.", + "PutBucketPolicyRequest$Policy": "The bucket policy as a JSON document." + } + }, + "Prefix": { + "base": null, + "refs": { + "CommonPrefix$Prefix": null, + "LifecycleRule$Prefix": "Prefix identifying one or more objects to which the rule applies.", + "ListMultipartUploadsOutput$Prefix": "When a prefix is provided in the request, this field contains the specified prefix. The result contains only keys starting with the specified prefix.", + "ListMultipartUploadsRequest$Prefix": "Lists in-progress uploads only for those keys that begin with the specified prefix.", + "ListObjectVersionsOutput$Prefix": null, + "ListObjectVersionsRequest$Prefix": "Limits the response to keys that begin with the specified prefix.", + "ListObjectsOutput$Prefix": null, + "ListObjectsRequest$Prefix": "Limits the response to keys that begin with the specified prefix.", + "ListObjectsV2Output$Prefix": "Limits the response to keys that begin with the specified prefix.", + "ListObjectsV2Request$Prefix": "Limits the response to keys that begin with the specified prefix.", + "ReplicationRule$Prefix": "Object keyname prefix identifying one or more objects to which the rule applies. Maximum prefix length can be up to 1,024 characters. Overlapping prefixes are not supported.", + "Rule$Prefix": "Prefix identifying one or more objects to which the rule applies." + } + }, + "Protocol": { + "base": null, + "refs": { + "Redirect$Protocol": "Protocol to use (http, https) when redirecting requests. The default is the protocol that is used in the original request.", + "RedirectAllRequestsTo$Protocol": "Protocol to use (http, https) when redirecting requests. The default is the protocol that is used in the original request." + } + }, + "PutBucketAccelerateConfigurationRequest": { + "base": null, + "refs": { + } + }, + "PutBucketAclRequest": { + "base": null, + "refs": { + } + }, + "PutBucketCorsRequest": { + "base": null, + "refs": { + } + }, + "PutBucketLifecycleConfigurationRequest": { + "base": null, + "refs": { + } + }, + "PutBucketLifecycleRequest": { + "base": null, + "refs": { + } + }, + "PutBucketLoggingRequest": { + "base": null, + "refs": { + } + }, + "PutBucketNotificationConfigurationRequest": { + "base": null, + "refs": { + } + }, + "PutBucketNotificationRequest": { + "base": null, + "refs": { + } + }, + "PutBucketPolicyRequest": { + "base": null, + "refs": { + } + }, + "PutBucketReplicationRequest": { + "base": null, + "refs": { + } + }, + "PutBucketRequestPaymentRequest": { + "base": null, + "refs": { + } + }, + "PutBucketTaggingRequest": { + "base": null, + "refs": { + } + }, + "PutBucketVersioningRequest": { + "base": null, + "refs": { + } + }, + "PutBucketWebsiteRequest": { + "base": null, + "refs": { + } + }, + "PutObjectAclOutput": { + "base": null, + "refs": { + } + }, + "PutObjectAclRequest": { + "base": null, + "refs": { + } + }, + "PutObjectOutput": { + "base": null, + "refs": { + } + }, + "PutObjectRequest": { + "base": null, + "refs": { + } + }, + "QueueArn": { + "base": null, + "refs": { + "QueueConfiguration$QueueArn": "Amazon SQS queue ARN to which Amazon S3 will publish a message when it detects events of specified type.", + "QueueConfigurationDeprecated$Queue": null + } + }, + "QueueConfiguration": { + "base": "Container for specifying an configuration when you want Amazon S3 to publish events to an Amazon Simple Queue Service (Amazon SQS) queue.", + "refs": { + "QueueConfigurationList$member": null + } + }, + "QueueConfigurationDeprecated": { + "base": null, + "refs": { + "NotificationConfigurationDeprecated$QueueConfiguration": null + } + }, + "QueueConfigurationList": { + "base": null, + "refs": { + "NotificationConfiguration$QueueConfigurations": null + } + }, + "Quiet": { + "base": null, + "refs": { + "Delete$Quiet": "Element to enable quiet mode for the request. When you add this element, you must set its value to true." + } + }, + "Range": { + "base": null, + "refs": { + "GetObjectRequest$Range": "Downloads the specified range bytes of an object. For more information about the HTTP Range header, go to http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35.", + "HeadObjectRequest$Range": "Downloads the specified range bytes of an object. For more information about the HTTP Range header, go to http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35." + } + }, + "Redirect": { + "base": null, + "refs": { + "RoutingRule$Redirect": "Container for redirect information. You can redirect requests to another host, to another page, or with another protocol. In the event of an error, you can can specify a different error code to return." + } + }, + "RedirectAllRequestsTo": { + "base": null, + "refs": { + "GetBucketWebsiteOutput$RedirectAllRequestsTo": null, + "WebsiteConfiguration$RedirectAllRequestsTo": null + } + }, + "ReplaceKeyPrefixWith": { + "base": null, + "refs": { + "Redirect$ReplaceKeyPrefixWith": "The object key prefix to use in the redirect request. For example, to redirect requests for all pages with prefix docs/ (objects in the docs/ folder) to documents/, you can set a condition block with KeyPrefixEquals set to docs/ and in the Redirect set ReplaceKeyPrefixWith to /documents. Not required if one of the siblings is present. Can be present only if ReplaceKeyWith is not provided." + } + }, + "ReplaceKeyWith": { + "base": null, + "refs": { + "Redirect$ReplaceKeyWith": "The specific object key to use in the redirect request. For example, redirect request to error.html. Not required if one of the sibling is present. Can be present only if ReplaceKeyPrefixWith is not provided." + } + }, + "ReplicationConfiguration": { + "base": "Container for replication rules. You can add as many as 1,000 rules. Total replication configuration size can be up to 2 MB.", + "refs": { + "GetBucketReplicationOutput$ReplicationConfiguration": null, + "PutBucketReplicationRequest$ReplicationConfiguration": null + } + }, + "ReplicationRule": { + "base": null, + "refs": { + "ReplicationRules$member": null + } + }, + "ReplicationRuleStatus": { + "base": null, + "refs": { + "ReplicationRule$Status": "The rule is ignored if status is not Enabled." + } + }, + "ReplicationRules": { + "base": null, + "refs": { + "ReplicationConfiguration$Rules": "Container for information about a particular replication rule. Replication configuration must have at least one rule and can contain up to 1,000 rules." + } + }, + "ReplicationStatus": { + "base": null, + "refs": { + "GetObjectOutput$ReplicationStatus": null, + "HeadObjectOutput$ReplicationStatus": null + } + }, + "RequestCharged": { + "base": "If present, indicates that the requester was successfully charged for the request.", + "refs": { + "AbortMultipartUploadOutput$RequestCharged": null, + "CompleteMultipartUploadOutput$RequestCharged": null, + "CopyObjectOutput$RequestCharged": null, + "CreateMultipartUploadOutput$RequestCharged": null, + "DeleteObjectOutput$RequestCharged": null, + "DeleteObjectsOutput$RequestCharged": null, + "GetObjectAclOutput$RequestCharged": null, + "GetObjectOutput$RequestCharged": null, + "GetObjectTorrentOutput$RequestCharged": null, + "HeadObjectOutput$RequestCharged": null, + "ListPartsOutput$RequestCharged": null, + "PutObjectAclOutput$RequestCharged": null, + "PutObjectOutput$RequestCharged": null, + "RestoreObjectOutput$RequestCharged": null, + "UploadPartCopyOutput$RequestCharged": null, + "UploadPartOutput$RequestCharged": null + } + }, + "RequestPayer": { + "base": "Confirms that the requester knows that she or he will be charged for the request. Bucket owners need not specify this parameter in their requests. Documentation on downloading objects from requester pays buckets can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html", + "refs": { + "AbortMultipartUploadRequest$RequestPayer": null, + "CompleteMultipartUploadRequest$RequestPayer": null, + "CopyObjectRequest$RequestPayer": null, + "CreateMultipartUploadRequest$RequestPayer": null, + "DeleteObjectRequest$RequestPayer": null, + "DeleteObjectsRequest$RequestPayer": null, + "GetObjectAclRequest$RequestPayer": null, + "GetObjectRequest$RequestPayer": null, + "GetObjectTorrentRequest$RequestPayer": null, + "HeadObjectRequest$RequestPayer": null, + "ListPartsRequest$RequestPayer": null, + "PutObjectAclRequest$RequestPayer": null, + "PutObjectRequest$RequestPayer": null, + "RestoreObjectRequest$RequestPayer": null, + "UploadPartCopyRequest$RequestPayer": null, + "UploadPartRequest$RequestPayer": null + } + }, + "RequestPaymentConfiguration": { + "base": null, + "refs": { + "PutBucketRequestPaymentRequest$RequestPaymentConfiguration": null + } + }, + "ResponseCacheControl": { + "base": null, + "refs": { + "GetObjectRequest$ResponseCacheControl": "Sets the Cache-Control header of the response." + } + }, + "ResponseContentDisposition": { + "base": null, + "refs": { + "GetObjectRequest$ResponseContentDisposition": "Sets the Content-Disposition header of the response" + } + }, + "ResponseContentEncoding": { + "base": null, + "refs": { + "GetObjectRequest$ResponseContentEncoding": "Sets the Content-Encoding header of the response." + } + }, + "ResponseContentLanguage": { + "base": null, + "refs": { + "GetObjectRequest$ResponseContentLanguage": "Sets the Content-Language header of the response." + } + }, + "ResponseContentType": { + "base": null, + "refs": { + "GetObjectRequest$ResponseContentType": "Sets the Content-Type header of the response." + } + }, + "ResponseExpires": { + "base": null, + "refs": { + "GetObjectRequest$ResponseExpires": "Sets the Expires header of the response." + } + }, + "Restore": { + "base": null, + "refs": { + "GetObjectOutput$Restore": "Provides information about object restoration operation and expiration time of the restored object copy.", + "HeadObjectOutput$Restore": "Provides information about object restoration operation and expiration time of the restored object copy." + } + }, + "RestoreObjectOutput": { + "base": null, + "refs": { + } + }, + "RestoreObjectRequest": { + "base": null, + "refs": { + } + }, + "RestoreRequest": { + "base": null, + "refs": { + "RestoreObjectRequest$RestoreRequest": null + } + }, + "Role": { + "base": null, + "refs": { + "ReplicationConfiguration$Role": "Amazon Resource Name (ARN) of an IAM role for Amazon S3 to assume when replicating the objects." + } + }, + "RoutingRule": { + "base": null, + "refs": { + "RoutingRules$member": null + } + }, + "RoutingRules": { + "base": null, + "refs": { + "GetBucketWebsiteOutput$RoutingRules": null, + "WebsiteConfiguration$RoutingRules": null + } + }, + "Rule": { + "base": null, + "refs": { + "Rules$member": null + } + }, + "Rules": { + "base": null, + "refs": { + "GetBucketLifecycleOutput$Rules": null, + "LifecycleConfiguration$Rules": null + } + }, + "S3KeyFilter": { + "base": "Container for object key name prefix and suffix filtering rules.", + "refs": { + "NotificationConfigurationFilter$Key": null + } + }, + "SSECustomerAlgorithm": { + "base": null, + "refs": { + "CopyObjectOutput$SSECustomerAlgorithm": "If server-side encryption with a customer-provided encryption key was requested, the response will include this header confirming the encryption algorithm used.", + "CopyObjectRequest$SSECustomerAlgorithm": "Specifies the algorithm to use to when encrypting the object (e.g., AES256).", + "CreateMultipartUploadOutput$SSECustomerAlgorithm": "If server-side encryption with a customer-provided encryption key was requested, the response will include this header confirming the encryption algorithm used.", + "CreateMultipartUploadRequest$SSECustomerAlgorithm": "Specifies the algorithm to use to when encrypting the object (e.g., AES256).", + "GetObjectOutput$SSECustomerAlgorithm": "If server-side encryption with a customer-provided encryption key was requested, the response will include this header confirming the encryption algorithm used.", + "GetObjectRequest$SSECustomerAlgorithm": "Specifies the algorithm to use to when encrypting the object (e.g., AES256).", + "HeadObjectOutput$SSECustomerAlgorithm": "If server-side encryption with a customer-provided encryption key was requested, the response will include this header confirming the encryption algorithm used.", + "HeadObjectRequest$SSECustomerAlgorithm": "Specifies the algorithm to use to when encrypting the object (e.g., AES256).", + "PutObjectOutput$SSECustomerAlgorithm": "If server-side encryption with a customer-provided encryption key was requested, the response will include this header confirming the encryption algorithm used.", + "PutObjectRequest$SSECustomerAlgorithm": "Specifies the algorithm to use to when encrypting the object (e.g., AES256).", + "UploadPartCopyOutput$SSECustomerAlgorithm": "If server-side encryption with a customer-provided encryption key was requested, the response will include this header confirming the encryption algorithm used.", + "UploadPartCopyRequest$SSECustomerAlgorithm": "Specifies the algorithm to use to when encrypting the object (e.g., AES256).", + "UploadPartOutput$SSECustomerAlgorithm": "If server-side encryption with a customer-provided encryption key was requested, the response will include this header confirming the encryption algorithm used.", + "UploadPartRequest$SSECustomerAlgorithm": "Specifies the algorithm to use to when encrypting the object (e.g., AES256)." + } + }, + "SSECustomerKey": { + "base": null, + "refs": { + "CopyObjectRequest$SSECustomerKey": "Specifies the customer-provided encryption key for Amazon S3 to use in encrypting data. This value is used to store the object and then it is discarded; Amazon does not store the encryption key. The key must be appropriate for use with the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm header.", + "CreateMultipartUploadRequest$SSECustomerKey": "Specifies the customer-provided encryption key for Amazon S3 to use in encrypting data. This value is used to store the object and then it is discarded; Amazon does not store the encryption key. The key must be appropriate for use with the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm header.", + "GetObjectRequest$SSECustomerKey": "Specifies the customer-provided encryption key for Amazon S3 to use in encrypting data. This value is used to store the object and then it is discarded; Amazon does not store the encryption key. The key must be appropriate for use with the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm header.", + "HeadObjectRequest$SSECustomerKey": "Specifies the customer-provided encryption key for Amazon S3 to use in encrypting data. This value is used to store the object and then it is discarded; Amazon does not store the encryption key. The key must be appropriate for use with the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm header.", + "PutObjectRequest$SSECustomerKey": "Specifies the customer-provided encryption key for Amazon S3 to use in encrypting data. This value is used to store the object and then it is discarded; Amazon does not store the encryption key. The key must be appropriate for use with the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm header.", + "UploadPartCopyRequest$SSECustomerKey": "Specifies the customer-provided encryption key for Amazon S3 to use in encrypting data. This value is used to store the object and then it is discarded; Amazon does not store the encryption key. The key must be appropriate for use with the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm header. This must be the same encryption key specified in the initiate multipart upload request.", + "UploadPartRequest$SSECustomerKey": "Specifies the customer-provided encryption key for Amazon S3 to use in encrypting data. This value is used to store the object and then it is discarded; Amazon does not store the encryption key. The key must be appropriate for use with the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm header. This must be the same encryption key specified in the initiate multipart upload request." + } + }, + "SSECustomerKeyMD5": { + "base": null, + "refs": { + "CopyObjectOutput$SSECustomerKeyMD5": "If server-side encryption with a customer-provided encryption key was requested, the response will include this header to provide round trip message integrity verification of the customer-provided encryption key.", + "CopyObjectRequest$SSECustomerKeyMD5": "Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. Amazon S3 uses this header for a message integrity check to ensure the encryption key was transmitted without error.", + "CreateMultipartUploadOutput$SSECustomerKeyMD5": "If server-side encryption with a customer-provided encryption key was requested, the response will include this header to provide round trip message integrity verification of the customer-provided encryption key.", + "CreateMultipartUploadRequest$SSECustomerKeyMD5": "Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. Amazon S3 uses this header for a message integrity check to ensure the encryption key was transmitted without error.", + "GetObjectOutput$SSECustomerKeyMD5": "If server-side encryption with a customer-provided encryption key was requested, the response will include this header to provide round trip message integrity verification of the customer-provided encryption key.", + "GetObjectRequest$SSECustomerKeyMD5": "Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. Amazon S3 uses this header for a message integrity check to ensure the encryption key was transmitted without error.", + "HeadObjectOutput$SSECustomerKeyMD5": "If server-side encryption with a customer-provided encryption key was requested, the response will include this header to provide round trip message integrity verification of the customer-provided encryption key.", + "HeadObjectRequest$SSECustomerKeyMD5": "Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. Amazon S3 uses this header for a message integrity check to ensure the encryption key was transmitted without error.", + "PutObjectOutput$SSECustomerKeyMD5": "If server-side encryption with a customer-provided encryption key was requested, the response will include this header to provide round trip message integrity verification of the customer-provided encryption key.", + "PutObjectRequest$SSECustomerKeyMD5": "Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. Amazon S3 uses this header for a message integrity check to ensure the encryption key was transmitted without error.", + "UploadPartCopyOutput$SSECustomerKeyMD5": "If server-side encryption with a customer-provided encryption key was requested, the response will include this header to provide round trip message integrity verification of the customer-provided encryption key.", + "UploadPartCopyRequest$SSECustomerKeyMD5": "Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. Amazon S3 uses this header for a message integrity check to ensure the encryption key was transmitted without error.", + "UploadPartOutput$SSECustomerKeyMD5": "If server-side encryption with a customer-provided encryption key was requested, the response will include this header to provide round trip message integrity verification of the customer-provided encryption key.", + "UploadPartRequest$SSECustomerKeyMD5": "Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. Amazon S3 uses this header for a message integrity check to ensure the encryption key was transmitted without error." + } + }, + "SSEKMSKeyId": { + "base": null, + "refs": { + "CompleteMultipartUploadOutput$SSEKMSKeyId": "If present, specifies the ID of the AWS Key Management Service (KMS) master encryption key that was used for the object.", + "CopyObjectOutput$SSEKMSKeyId": "If present, specifies the ID of the AWS Key Management Service (KMS) master encryption key that was used for the object.", + "CopyObjectRequest$SSEKMSKeyId": "Specifies the AWS KMS key ID to use for object encryption. All GET and PUT requests for an object protected by AWS KMS will fail if not made via SSL or using SigV4. Documentation on configuring any of the officially supported AWS SDKs and CLI can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version", + "CreateMultipartUploadOutput$SSEKMSKeyId": "If present, specifies the ID of the AWS Key Management Service (KMS) master encryption key that was used for the object.", + "CreateMultipartUploadRequest$SSEKMSKeyId": "Specifies the AWS KMS key ID to use for object encryption. All GET and PUT requests for an object protected by AWS KMS will fail if not made via SSL or using SigV4. Documentation on configuring any of the officially supported AWS SDKs and CLI can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version", + "GetObjectOutput$SSEKMSKeyId": "If present, specifies the ID of the AWS Key Management Service (KMS) master encryption key that was used for the object.", + "HeadObjectOutput$SSEKMSKeyId": "If present, specifies the ID of the AWS Key Management Service (KMS) master encryption key that was used for the object.", + "PutObjectOutput$SSEKMSKeyId": "If present, specifies the ID of the AWS Key Management Service (KMS) master encryption key that was used for the object.", + "PutObjectRequest$SSEKMSKeyId": "Specifies the AWS KMS key ID to use for object encryption. All GET and PUT requests for an object protected by AWS KMS will fail if not made via SSL or using SigV4. Documentation on configuring any of the officially supported AWS SDKs and CLI can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version", + "UploadPartCopyOutput$SSEKMSKeyId": "If present, specifies the ID of the AWS Key Management Service (KMS) master encryption key that was used for the object.", + "UploadPartOutput$SSEKMSKeyId": "If present, specifies the ID of the AWS Key Management Service (KMS) master encryption key that was used for the object." + } + }, + "ServerSideEncryption": { + "base": null, + "refs": { + "CompleteMultipartUploadOutput$ServerSideEncryption": "The Server-side encryption algorithm used when storing this object in S3 (e.g., AES256, aws:kms).", + "CopyObjectOutput$ServerSideEncryption": "The Server-side encryption algorithm used when storing this object in S3 (e.g., AES256, aws:kms).", + "CopyObjectRequest$ServerSideEncryption": "The Server-side encryption algorithm used when storing this object in S3 (e.g., AES256, aws:kms).", + "CreateMultipartUploadOutput$ServerSideEncryption": "The Server-side encryption algorithm used when storing this object in S3 (e.g., AES256, aws:kms).", + "CreateMultipartUploadRequest$ServerSideEncryption": "The Server-side encryption algorithm used when storing this object in S3 (e.g., AES256, aws:kms).", + "GetObjectOutput$ServerSideEncryption": "The Server-side encryption algorithm used when storing this object in S3 (e.g., AES256, aws:kms).", + "HeadObjectOutput$ServerSideEncryption": "The Server-side encryption algorithm used when storing this object in S3 (e.g., AES256, aws:kms).", + "PutObjectOutput$ServerSideEncryption": "The Server-side encryption algorithm used when storing this object in S3 (e.g., AES256, aws:kms).", + "PutObjectRequest$ServerSideEncryption": "The Server-side encryption algorithm used when storing this object in S3 (e.g., AES256, aws:kms).", + "UploadPartCopyOutput$ServerSideEncryption": "The Server-side encryption algorithm used when storing this object in S3 (e.g., AES256, aws:kms).", + "UploadPartOutput$ServerSideEncryption": "The Server-side encryption algorithm used when storing this object in S3 (e.g., AES256, aws:kms)." + } + }, + "Size": { + "base": null, + "refs": { + "Object$Size": null, + "ObjectVersion$Size": "Size in bytes of the object.", + "Part$Size": "Size of the uploaded part data." + } + }, + "StartAfter": { + "base": null, + "refs": { + "ListObjectsV2Output$StartAfter": "StartAfter is where you want Amazon S3 to start listing from. Amazon S3 starts listing after this specified key. StartAfter can be any key in the bucket", + "ListObjectsV2Request$StartAfter": "StartAfter is where you want Amazon S3 to start listing from. Amazon S3 starts listing after this specified key. StartAfter can be any key in the bucket" + } + }, + "StorageClass": { + "base": null, + "refs": { + "CopyObjectRequest$StorageClass": "The type of storage to use for the object. Defaults to 'STANDARD'.", + "CreateMultipartUploadRequest$StorageClass": "The type of storage to use for the object. Defaults to 'STANDARD'.", + "Destination$StorageClass": "The class of storage used to store the object.", + "GetObjectOutput$StorageClass": null, + "HeadObjectOutput$StorageClass": null, + "ListPartsOutput$StorageClass": "The class of storage used to store the object.", + "MultipartUpload$StorageClass": "The class of storage used to store the object.", + "PutObjectRequest$StorageClass": "The type of storage to use for the object. Defaults to 'STANDARD'." + } + }, + "Suffix": { + "base": null, + "refs": { + "IndexDocument$Suffix": "A suffix that is appended to a request that is for a directory on the website endpoint (e.g. if the suffix is index.html and you make a request to samplebucket/images/ the data that is returned will be for the object with the key name images/index.html) The suffix must not be empty and must not include a slash character." + } + }, + "Tag": { + "base": null, + "refs": { + "TagSet$member": null + } + }, + "TagSet": { + "base": null, + "refs": { + "GetBucketTaggingOutput$TagSet": null, + "Tagging$TagSet": null + } + }, + "Tagging": { + "base": null, + "refs": { + "PutBucketTaggingRequest$Tagging": null + } + }, + "TargetBucket": { + "base": null, + "refs": { + "LoggingEnabled$TargetBucket": "Specifies the bucket where you want Amazon S3 to store server access logs. You can have your logs delivered to any bucket that you own, including the same bucket that is being logged. You can also configure multiple buckets to deliver their logs to the same target bucket. In this case you should choose a different TargetPrefix for each source bucket so that the delivered log files can be distinguished by key." + } + }, + "TargetGrant": { + "base": null, + "refs": { + "TargetGrants$member": null + } + }, + "TargetGrants": { + "base": null, + "refs": { + "LoggingEnabled$TargetGrants": null + } + }, + "TargetPrefix": { + "base": null, + "refs": { + "LoggingEnabled$TargetPrefix": "This element lets you specify a prefix for the keys that the log files will be stored under." + } + }, + "Token": { + "base": null, + "refs": { + "ListObjectsV2Output$ContinuationToken": "ContinuationToken indicates Amazon S3 that the list is being continued on this bucket with a token. ContinuationToken is obfuscated and is not a real key", + "ListObjectsV2Request$ContinuationToken": "ContinuationToken indicates Amazon S3 that the list is being continued on this bucket with a token. ContinuationToken is obfuscated and is not a real key" + } + }, + "TopicArn": { + "base": null, + "refs": { + "TopicConfiguration$TopicArn": "Amazon SNS topic ARN to which Amazon S3 will publish a message when it detects events of specified type.", + "TopicConfigurationDeprecated$Topic": "Amazon SNS topic to which Amazon S3 will publish a message to report the specified events for the bucket." + } + }, + "TopicConfiguration": { + "base": "Container for specifying the configuration when you want Amazon S3 to publish events to an Amazon Simple Notification Service (Amazon SNS) topic.", + "refs": { + "TopicConfigurationList$member": null + } + }, + "TopicConfigurationDeprecated": { + "base": null, + "refs": { + "NotificationConfigurationDeprecated$TopicConfiguration": null + } + }, + "TopicConfigurationList": { + "base": null, + "refs": { + "NotificationConfiguration$TopicConfigurations": null + } + }, + "Transition": { + "base": null, + "refs": { + "Rule$Transition": null, + "TransitionList$member": null + } + }, + "TransitionList": { + "base": null, + "refs": { + "LifecycleRule$Transitions": null + } + }, + "TransitionStorageClass": { + "base": null, + "refs": { + "NoncurrentVersionTransition$StorageClass": "The class of storage used to store the object.", + "Transition$StorageClass": "The class of storage used to store the object." + } + }, + "Type": { + "base": null, + "refs": { + "Grantee$Type": "Type of grantee" + } + }, + "URI": { + "base": null, + "refs": { + "Grantee$URI": "URI of the grantee group." + } + }, + "UploadIdMarker": { + "base": null, + "refs": { + "ListMultipartUploadsOutput$UploadIdMarker": "Upload ID after which listing began.", + "ListMultipartUploadsRequest$UploadIdMarker": "Together with key-marker, specifies the multipart upload after which listing should begin. If key-marker is not specified, the upload-id-marker parameter is ignored." + } + }, + "UploadPartCopyOutput": { + "base": null, + "refs": { + } + }, + "UploadPartCopyRequest": { + "base": null, + "refs": { + } + }, + "UploadPartOutput": { + "base": null, + "refs": { + } + }, + "UploadPartRequest": { + "base": null, + "refs": { + } + }, + "Value": { + "base": null, + "refs": { + "Tag$Value": "Value of the tag." + } + }, + "VersionIdMarker": { + "base": null, + "refs": { + "ListObjectVersionsOutput$VersionIdMarker": null, + "ListObjectVersionsRequest$VersionIdMarker": "Specifies the object version you want to start listing from." + } + }, + "VersioningConfiguration": { + "base": null, + "refs": { + "PutBucketVersioningRequest$VersioningConfiguration": null + } + }, + "WebsiteConfiguration": { + "base": null, + "refs": { + "PutBucketWebsiteRequest$WebsiteConfiguration": null + } + }, + "WebsiteRedirectLocation": { + "base": null, + "refs": { + "CopyObjectRequest$WebsiteRedirectLocation": "If the bucket is configured as a website, redirects requests for this object to another object in the same bucket or to an external URL. Amazon S3 stores the value of this header in the object metadata.", + "CreateMultipartUploadRequest$WebsiteRedirectLocation": "If the bucket is configured as a website, redirects requests for this object to another object in the same bucket or to an external URL. Amazon S3 stores the value of this header in the object metadata.", + "GetObjectOutput$WebsiteRedirectLocation": "If the bucket is configured as a website, redirects requests for this object to another object in the same bucket or to an external URL. Amazon S3 stores the value of this header in the object metadata.", + "HeadObjectOutput$WebsiteRedirectLocation": "If the bucket is configured as a website, redirects requests for this object to another object in the same bucket or to an external URL. Amazon S3 stores the value of this header in the object metadata.", + "PutObjectRequest$WebsiteRedirectLocation": "If the bucket is configured as a website, redirects requests for this object to another object in the same bucket or to an external URL. Amazon S3 stores the value of this header in the object metadata." + } + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/s3/2006-03-01/examples-1.json b/vendor/github.com/aws/aws-sdk-go/models/apis/s3/2006-03-01/examples-1.json new file mode 100644 index 000000000..0ea7e3b0b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/s3/2006-03-01/examples-1.json @@ -0,0 +1,5 @@ +{ + "version": "1.0", + "examples": { + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/s3/2006-03-01/paginators-1.json b/vendor/github.com/aws/aws-sdk-go/models/apis/s3/2006-03-01/paginators-1.json new file mode 100644 index 000000000..349892004 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/s3/2006-03-01/paginators-1.json @@ -0,0 +1,66 @@ +{ + "pagination": { + "ListBuckets": { + "result_key": "Buckets" + }, + "ListMultipartUploads": { + "limit_key": "MaxUploads", + "more_results": "IsTruncated", + "output_token": [ + "NextKeyMarker", + "NextUploadIdMarker" + ], + "input_token": [ + "KeyMarker", + "UploadIdMarker" + ], + "result_key": [ + "Uploads", + "CommonPrefixes" + ] + }, + "ListObjectVersions": { + "more_results": "IsTruncated", + "limit_key": "MaxKeys", + "output_token": [ + "NextKeyMarker", + "NextVersionIdMarker" + ], + "input_token": [ + "KeyMarker", + "VersionIdMarker" + ], + "result_key": [ + "Versions", + "DeleteMarkers", + "CommonPrefixes" + ] + }, + "ListObjects": { + "more_results": "IsTruncated", + "limit_key": "MaxKeys", + "output_token": "NextMarker || Contents[-1].Key", + "input_token": "Marker", + "result_key": [ + "Contents", + "CommonPrefixes" + ] + }, + "ListObjectsV2": { + "limit_key": "MaxKeys", + "output_token": "NextContinuationToken", + "input_token": "ContinuationToken", + "result_key": [ + "Contents", + "CommonPrefixes" + ] + }, + "ListParts": { + "more_results": "IsTruncated", + "limit_key": "MaxParts", + "output_token": "NextPartNumberMarker", + "input_token": "PartNumberMarker", + "result_key": "Parts" + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/s3/2006-03-01/waiters-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/s3/2006-03-01/waiters-2.json new file mode 100644 index 000000000..b508a8f5b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/s3/2006-03-01/waiters-2.json @@ -0,0 +1,73 @@ +{ + "version": 2, + "waiters": { + "BucketExists": { + "delay": 5, + "operation": "HeadBucket", + "maxAttempts": 20, + "acceptors": [ + { + "expected": 200, + "matcher": "status", + "state": "success" + }, + { + "expected": 301, + "matcher": "status", + "state": "success" + }, + { + "expected": 403, + "matcher": "status", + "state": "success" + }, + { + "expected": 404, + "matcher": "status", + "state": "retry" + } + ] + }, + "BucketNotExists": { + "delay": 5, + "operation": "HeadBucket", + "maxAttempts": 20, + "acceptors": [ + { + "expected": 404, + "matcher": "status", + "state": "success" + } + ] + }, + "ObjectExists": { + "delay": 5, + "operation": "HeadObject", + "maxAttempts": 20, + "acceptors": [ + { + "expected": 200, + "matcher": "status", + "state": "success" + }, + { + "expected": 404, + "matcher": "status", + "state": "retry" + } + ] + }, + "ObjectNotExists": { + "delay": 5, + "operation": "HeadObject", + "maxAttempts": 20, + "acceptors": [ + { + "expected": 404, + "matcher": "status", + "state": "success" + } + ] + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/sdb/2009-04-15/api-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/sdb/2009-04-15/api-2.json new file mode 100644 index 000000000..3eb686d6d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/sdb/2009-04-15/api-2.json @@ -0,0 +1,971 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2009-04-15", + "endpointPrefix":"sdb", + "serviceFullName":"Amazon SimpleDB", + "signatureVersion":"v2", + "xmlNamespace":"http://sdb.amazonaws.com/doc/2009-04-15/", + "protocol":"query" + }, + "operations":{ + "BatchDeleteAttributes":{ + "name":"BatchDeleteAttributes", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"BatchDeleteAttributesRequest"} + }, + "BatchPutAttributes":{ + "name":"BatchPutAttributes", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"BatchPutAttributesRequest"}, + "errors":[ + { + "shape":"DuplicateItemName", + "error":{ + "code":"DuplicateItemName", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidParameterValue", + "error":{ + "code":"InvalidParameterValue", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"MissingParameter", + "error":{ + "code":"MissingParameter", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"NoSuchDomain", + "error":{ + "code":"NoSuchDomain", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"NumberItemAttributesExceeded", + "error":{ + "code":"NumberItemAttributesExceeded", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + }, + { + "shape":"NumberDomainAttributesExceeded", + "error":{ + "code":"NumberDomainAttributesExceeded", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + }, + { + "shape":"NumberDomainBytesExceeded", + "error":{ + "code":"NumberDomainBytesExceeded", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + }, + { + "shape":"NumberSubmittedItemsExceeded", + "error":{ + "code":"NumberSubmittedItemsExceeded", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + }, + { + "shape":"NumberSubmittedAttributesExceeded", + "error":{ + "code":"NumberSubmittedAttributesExceeded", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + } + ] + }, + "CreateDomain":{ + "name":"CreateDomain", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateDomainRequest"}, + "errors":[ + { + "shape":"InvalidParameterValue", + "error":{ + "code":"InvalidParameterValue", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"MissingParameter", + "error":{ + "code":"MissingParameter", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"NumberDomainsExceeded", + "error":{ + "code":"NumberDomainsExceeded", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + } + ] + }, + "DeleteAttributes":{ + "name":"DeleteAttributes", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteAttributesRequest"}, + "errors":[ + { + "shape":"InvalidParameterValue", + "error":{ + "code":"InvalidParameterValue", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"MissingParameter", + "error":{ + "code":"MissingParameter", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"NoSuchDomain", + "error":{ + "code":"NoSuchDomain", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"AttributeDoesNotExist", + "error":{ + "code":"AttributeDoesNotExist", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + } + ] + }, + "DeleteDomain":{ + "name":"DeleteDomain", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteDomainRequest"}, + "errors":[ + { + "shape":"MissingParameter", + "error":{ + "code":"MissingParameter", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + } + ] + }, + "DomainMetadata":{ + "name":"DomainMetadata", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DomainMetadataRequest"}, + "output":{ + "shape":"DomainMetadataResult", + "resultWrapper":"DomainMetadataResult" + }, + "errors":[ + { + "shape":"MissingParameter", + "error":{ + "code":"MissingParameter", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"NoSuchDomain", + "error":{ + "code":"NoSuchDomain", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + } + ] + }, + "GetAttributes":{ + "name":"GetAttributes", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetAttributesRequest"}, + "output":{ + "shape":"GetAttributesResult", + "resultWrapper":"GetAttributesResult" + }, + "errors":[ + { + "shape":"InvalidParameterValue", + "error":{ + "code":"InvalidParameterValue", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"MissingParameter", + "error":{ + "code":"MissingParameter", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"NoSuchDomain", + "error":{ + "code":"NoSuchDomain", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + } + ] + }, + "ListDomains":{ + "name":"ListDomains", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListDomainsRequest"}, + "output":{ + "shape":"ListDomainsResult", + "resultWrapper":"ListDomainsResult" + }, + "errors":[ + { + "shape":"InvalidParameterValue", + "error":{ + "code":"InvalidParameterValue", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidNextToken", + "error":{ + "code":"InvalidNextToken", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + } + ] + }, + "PutAttributes":{ + "name":"PutAttributes", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PutAttributesRequest"}, + "errors":[ + { + "shape":"InvalidParameterValue", + "error":{ + "code":"InvalidParameterValue", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"MissingParameter", + "error":{ + "code":"MissingParameter", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"NoSuchDomain", + "error":{ + "code":"NoSuchDomain", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"NumberDomainAttributesExceeded", + "error":{ + "code":"NumberDomainAttributesExceeded", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + }, + { + "shape":"NumberDomainBytesExceeded", + "error":{ + "code":"NumberDomainBytesExceeded", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + }, + { + "shape":"NumberItemAttributesExceeded", + "error":{ + "code":"NumberItemAttributesExceeded", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + }, + { + "shape":"AttributeDoesNotExist", + "error":{ + "code":"AttributeDoesNotExist", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + } + ] + }, + "Select":{ + "name":"Select", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"SelectRequest"}, + "output":{ + "shape":"SelectResult", + "resultWrapper":"SelectResult" + }, + "errors":[ + { + "shape":"InvalidParameterValue", + "error":{ + "code":"InvalidParameterValue", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidNextToken", + "error":{ + "code":"InvalidNextToken", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidNumberPredicates", + "error":{ + "code":"InvalidNumberPredicates", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidNumberValueTests", + "error":{ + "code":"InvalidNumberValueTests", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidQueryExpression", + "error":{ + "code":"InvalidQueryExpression", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"MissingParameter", + "error":{ + "code":"MissingParameter", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"NoSuchDomain", + "error":{ + "code":"NoSuchDomain", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"RequestTimeout", + "error":{ + "code":"RequestTimeout", + "httpStatusCode":408, + "senderFault":true + }, + "exception":true + }, + { + "shape":"TooManyRequestedAttributes", + "error":{ + "code":"TooManyRequestedAttributes", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + } + ] + } + }, + "shapes":{ + "Attribute":{ + "type":"structure", + "required":[ + "Name", + "Value" + ], + "members":{ + "Name":{"shape":"String"}, + "AlternateNameEncoding":{"shape":"String"}, + "Value":{"shape":"String"}, + "AlternateValueEncoding":{"shape":"String"} + } + }, + "AttributeDoesNotExist":{ + "type":"structure", + "members":{ + "BoxUsage":{"shape":"Float"} + }, + "error":{ + "code":"AttributeDoesNotExist", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "AttributeList":{ + "type":"list", + "member":{ + "shape":"Attribute", + "locationName":"Attribute" + }, + "flattened":true + }, + "AttributeNameList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"AttributeName" + }, + "flattened":true + }, + "BatchDeleteAttributesRequest":{ + "type":"structure", + "required":[ + "DomainName", + "Items" + ], + "members":{ + "DomainName":{"shape":"String"}, + "Items":{"shape":"DeletableItemList"} + } + }, + "BatchPutAttributesRequest":{ + "type":"structure", + "required":[ + "DomainName", + "Items" + ], + "members":{ + "DomainName":{"shape":"String"}, + "Items":{"shape":"ReplaceableItemList"} + } + }, + "Boolean":{"type":"boolean"}, + "CreateDomainRequest":{ + "type":"structure", + "required":["DomainName"], + "members":{ + "DomainName":{"shape":"String"} + } + }, + "DeletableAttribute":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{"shape":"String"}, + "Value":{"shape":"String"} + } + }, + "DeletableAttributeList":{ + "type":"list", + "member":{ + "shape":"DeletableAttribute", + "locationName":"Attribute" + }, + "flattened":true + }, + "DeletableItem":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"String", + "locationName":"ItemName" + }, + "Attributes":{"shape":"DeletableAttributeList"} + } + }, + "DeletableItemList":{ + "type":"list", + "member":{ + "shape":"DeletableItem", + "locationName":"Item" + }, + "flattened":true + }, + "DeleteAttributesRequest":{ + "type":"structure", + "required":[ + "DomainName", + "ItemName" + ], + "members":{ + "DomainName":{"shape":"String"}, + "ItemName":{"shape":"String"}, + "Attributes":{"shape":"DeletableAttributeList"}, + "Expected":{"shape":"UpdateCondition"} + } + }, + "DeleteDomainRequest":{ + "type":"structure", + "required":["DomainName"], + "members":{ + "DomainName":{"shape":"String"} + } + }, + "DomainMetadataRequest":{ + "type":"structure", + "required":["DomainName"], + "members":{ + "DomainName":{"shape":"String"} + } + }, + "DomainMetadataResult":{ + "type":"structure", + "members":{ + "ItemCount":{"shape":"Integer"}, + "ItemNamesSizeBytes":{"shape":"Long"}, + "AttributeNameCount":{"shape":"Integer"}, + "AttributeNamesSizeBytes":{"shape":"Long"}, + "AttributeValueCount":{"shape":"Integer"}, + "AttributeValuesSizeBytes":{"shape":"Long"}, + "Timestamp":{"shape":"Integer"} + } + }, + "DomainNameList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"DomainName" + }, + "flattened":true + }, + "DuplicateItemName":{ + "type":"structure", + "members":{ + "BoxUsage":{"shape":"Float"} + }, + "error":{ + "code":"DuplicateItemName", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "Float":{"type":"float"}, + "GetAttributesRequest":{ + "type":"structure", + "required":[ + "DomainName", + "ItemName" + ], + "members":{ + "DomainName":{"shape":"String"}, + "ItemName":{"shape":"String"}, + "AttributeNames":{"shape":"AttributeNameList"}, + "ConsistentRead":{"shape":"Boolean"} + } + }, + "GetAttributesResult":{ + "type":"structure", + "members":{ + "Attributes":{"shape":"AttributeList"} + } + }, + "Integer":{"type":"integer"}, + "InvalidNextToken":{ + "type":"structure", + "members":{ + "BoxUsage":{"shape":"Float"} + }, + "error":{ + "code":"InvalidNextToken", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidNumberPredicates":{ + "type":"structure", + "members":{ + "BoxUsage":{"shape":"Float"} + }, + "error":{ + "code":"InvalidNumberPredicates", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidNumberValueTests":{ + "type":"structure", + "members":{ + "BoxUsage":{"shape":"Float"} + }, + "error":{ + "code":"InvalidNumberValueTests", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidParameterValue":{ + "type":"structure", + "members":{ + "BoxUsage":{"shape":"Float"} + }, + "error":{ + "code":"InvalidParameterValue", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidQueryExpression":{ + "type":"structure", + "members":{ + "BoxUsage":{"shape":"Float"} + }, + "error":{ + "code":"InvalidQueryExpression", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "Item":{ + "type":"structure", + "required":[ + "Name", + "Attributes" + ], + "members":{ + "Name":{"shape":"String"}, + "AlternateNameEncoding":{"shape":"String"}, + "Attributes":{"shape":"AttributeList"} + } + }, + "ItemList":{ + "type":"list", + "member":{ + "shape":"Item", + "locationName":"Item" + }, + "flattened":true + }, + "ListDomainsRequest":{ + "type":"structure", + "members":{ + "MaxNumberOfDomains":{"shape":"Integer"}, + "NextToken":{"shape":"String"} + } + }, + "ListDomainsResult":{ + "type":"structure", + "members":{ + "DomainNames":{"shape":"DomainNameList"}, + "NextToken":{"shape":"String"} + } + }, + "Long":{"type":"long"}, + "MissingParameter":{ + "type":"structure", + "members":{ + "BoxUsage":{"shape":"Float"} + }, + "error":{ + "code":"MissingParameter", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "NoSuchDomain":{ + "type":"structure", + "members":{ + "BoxUsage":{"shape":"Float"} + }, + "error":{ + "code":"NoSuchDomain", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "NumberDomainAttributesExceeded":{ + "type":"structure", + "members":{ + "BoxUsage":{"shape":"Float"} + }, + "error":{ + "code":"NumberDomainAttributesExceeded", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + }, + "NumberDomainBytesExceeded":{ + "type":"structure", + "members":{ + "BoxUsage":{"shape":"Float"} + }, + "error":{ + "code":"NumberDomainBytesExceeded", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + }, + "NumberDomainsExceeded":{ + "type":"structure", + "members":{ + "BoxUsage":{"shape":"Float"} + }, + "error":{ + "code":"NumberDomainsExceeded", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + }, + "NumberItemAttributesExceeded":{ + "type":"structure", + "members":{ + "BoxUsage":{"shape":"Float"} + }, + "error":{ + "code":"NumberItemAttributesExceeded", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + }, + "NumberSubmittedAttributesExceeded":{ + "type":"structure", + "members":{ + "BoxUsage":{"shape":"Float"} + }, + "error":{ + "code":"NumberSubmittedAttributesExceeded", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + }, + "NumberSubmittedItemsExceeded":{ + "type":"structure", + "members":{ + "BoxUsage":{"shape":"Float"} + }, + "error":{ + "code":"NumberSubmittedItemsExceeded", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + }, + "PutAttributesRequest":{ + "type":"structure", + "required":[ + "DomainName", + "ItemName", + "Attributes" + ], + "members":{ + "DomainName":{"shape":"String"}, + "ItemName":{"shape":"String"}, + "Attributes":{"shape":"ReplaceableAttributeList"}, + "Expected":{"shape":"UpdateCondition"} + } + }, + "ReplaceableAttribute":{ + "type":"structure", + "required":[ + "Name", + "Value" + ], + "members":{ + "Name":{"shape":"String"}, + "Value":{"shape":"String"}, + "Replace":{"shape":"Boolean"} + } + }, + "ReplaceableAttributeList":{ + "type":"list", + "member":{ + "shape":"ReplaceableAttribute", + "locationName":"Attribute" + }, + "flattened":true + }, + "ReplaceableItem":{ + "type":"structure", + "required":[ + "Name", + "Attributes" + ], + "members":{ + "Name":{ + "shape":"String", + "locationName":"ItemName" + }, + "Attributes":{"shape":"ReplaceableAttributeList"} + } + }, + "ReplaceableItemList":{ + "type":"list", + "member":{ + "shape":"ReplaceableItem", + "locationName":"Item" + }, + "flattened":true + }, + "RequestTimeout":{ + "type":"structure", + "members":{ + "BoxUsage":{"shape":"Float"} + }, + "error":{ + "code":"RequestTimeout", + "httpStatusCode":408, + "senderFault":true + }, + "exception":true + }, + "SelectRequest":{ + "type":"structure", + "required":["SelectExpression"], + "members":{ + "SelectExpression":{"shape":"String"}, + "NextToken":{"shape":"String"}, + "ConsistentRead":{"shape":"Boolean"} + } + }, + "SelectResult":{ + "type":"structure", + "members":{ + "Items":{"shape":"ItemList"}, + "NextToken":{"shape":"String"} + } + }, + "String":{"type":"string"}, + "TooManyRequestedAttributes":{ + "type":"structure", + "members":{ + "BoxUsage":{"shape":"Float"} + }, + "error":{ + "code":"TooManyRequestedAttributes", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "UpdateCondition":{ + "type":"structure", + "members":{ + "Name":{"shape":"String"}, + "Value":{"shape":"String"}, + "Exists":{"shape":"Boolean"} + } + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/sdb/2009-04-15/docs-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/sdb/2009-04-15/docs-2.json new file mode 100644 index 000000000..b4baed838 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/sdb/2009-04-15/docs-2.json @@ -0,0 +1,353 @@ +{ + "version": "2.0", + "operations": { + "BatchDeleteAttributes": "

    Performs multiple DeleteAttributes operations in a single call, which reduces round trips and latencies. This enables Amazon SimpleDB to optimize requests, which generally yields better throughput.

    If you specify BatchDeleteAttributes without attributes or values, all the attributes for the item are deleted.

    BatchDeleteAttributes is an idempotent operation; running it multiple times on the same item or attribute doesn't result in an error.

    The BatchDeleteAttributes operation succeeds or fails in its entirety. There are no partial deletes. You can execute multiple BatchDeleteAttributes operations and other operations in parallel. However, large numbers of concurrent BatchDeleteAttributes calls can result in Service Unavailable (503) responses.

    This operation is vulnerable to exceeding the maximum URL size when making a REST request using the HTTP GET method.

    This operation does not support conditions using Expected.X.Name, Expected.X.Value, or Expected.X.Exists.

    The following limitations are enforced for this operation:

    • 1 MB request size
    • 25 item limit per BatchDeleteAttributes operation

    ", + "BatchPutAttributes": "

    The BatchPutAttributes operation creates or replaces attributes within one or more items. By using this operation, the client can perform multiple PutAttribute operation with a single call. This helps yield savings in round trips and latencies, enabling Amazon SimpleDB to optimize requests and generally produce better throughput.

    The client may specify the item name with the Item.X.ItemName parameter. The client may specify new attributes using a combination of the Item.X.Attribute.Y.Name and Item.X.Attribute.Y.Value parameters. The client may specify the first attribute for the first item using the parameters Item.0.Attribute.0.Name and Item.0.Attribute.0.Value, and for the second attribute for the first item by the parameters Item.0.Attribute.1.Name and Item.0.Attribute.1.Value, and so on.

    Attributes are uniquely identified within an item by their name/value combination. For example, a single item can have the attributes { \"first_name\", \"first_value\" } and { \"first_name\", \"second_value\" }. However, it cannot have two attribute instances where both the Item.X.Attribute.Y.Name and Item.X.Attribute.Y.Value are the same.

    Optionally, the requester can supply the Replace parameter for each individual value. Setting this value to true will cause the new attribute values to replace the existing attribute values. For example, if an item I has the attributes { 'a', '1' }, { 'b', '2'} and { 'b', '3' } and the requester does a BatchPutAttributes of {'I', 'b', '4' } with the Replace parameter set to true, the final attributes of the item will be { 'a', '1' } and { 'b', '4' }, replacing the previous values of the 'b' attribute with the new value.

    You cannot specify an empty string as an item or as an attribute name. The BatchPutAttributes operation succeeds or fails in its entirety. There are no partial puts. This operation is vulnerable to exceeding the maximum URL size when making a REST request using the HTTP GET method. This operation does not support conditions using Expected.X.Name, Expected.X.Value, or Expected.X.Exists.

    You can execute multiple BatchPutAttributes operations and other operations in parallel. However, large numbers of concurrent BatchPutAttributes calls can result in Service Unavailable (503) responses.

    The following limitations are enforced for this operation:

    • 256 attribute name-value pairs per item
    • 1 MB request size
    • 1 billion attributes per domain
    • 10 GB of total user data storage per domain
    • 25 item limit per BatchPutAttributes operation

    ", + "CreateDomain": "

    The CreateDomain operation creates a new domain. The domain name should be unique among the domains associated with the Access Key ID provided in the request. The CreateDomain operation may take 10 or more seconds to complete.

    CreateDomain is an idempotent operation; running it multiple times using the same domain name will not result in an error response.

    The client can create up to 100 domains per account.

    If the client requires additional domains, go to http://aws.amazon.com/contact-us/simpledb-limit-request/.

    ", + "DeleteAttributes": "

    Deletes one or more attributes associated with an item. If all attributes of the item are deleted, the item is deleted.

    If DeleteAttributes is called without being passed any attributes or values specified, all the attributes for the item are deleted.

    DeleteAttributes is an idempotent operation; running it multiple times on the same item or attribute does not result in an error response.

    Because Amazon SimpleDB makes multiple copies of item data and uses an eventual consistency update model, performing a GetAttributes or Select operation (read) immediately after a DeleteAttributes or PutAttributes operation (write) might not return updated item data.

    ", + "DeleteDomain": "

    The DeleteDomain operation deletes a domain. Any items (and their attributes) in the domain are deleted as well. The DeleteDomain operation might take 10 or more seconds to complete.

    Running DeleteDomain on a domain that does not exist or running the function multiple times using the same domain name will not result in an error response. ", + "DomainMetadata": "

    Returns information about the domain, including when the domain was created, the number of items and attributes in the domain, and the size of the attribute names and values.

    ", + "GetAttributes": "

    Returns all of the attributes associated with the specified item. Optionally, the attributes returned can be limited to one or more attributes by specifying an attribute name parameter.

    If the item does not exist on the replica that was accessed for this operation, an empty set is returned. The system does not return an error as it cannot guarantee the item does not exist on other replicas.

    If GetAttributes is called without being passed any attribute names, all the attributes for the item are returned. ", + "ListDomains": "

    The ListDomains operation lists all domains associated with the Access Key ID. It returns domain names up to the limit set by MaxNumberOfDomains. A NextToken is returned if there are more than MaxNumberOfDomains domains. Calling ListDomains successive times with the NextToken provided by the operation returns up to MaxNumberOfDomains more domain names with each successive operation call.

    ", + "PutAttributes": "

    The PutAttributes operation creates or replaces attributes in an item. The client may specify new attributes using a combination of the Attribute.X.Name and Attribute.X.Value parameters. The client specifies the first attribute by the parameters Attribute.0.Name and Attribute.0.Value, the second attribute by the parameters Attribute.1.Name and Attribute.1.Value, and so on.

    Attributes are uniquely identified in an item by their name/value combination. For example, a single item can have the attributes { \"first_name\", \"first_value\" } and { \"first_name\", second_value\" }. However, it cannot have two attribute instances where both the Attribute.X.Name and Attribute.X.Value are the same.

    Optionally, the requestor can supply the Replace parameter for each individual attribute. Setting this value to true causes the new attribute value to replace the existing attribute value(s). For example, if an item has the attributes { 'a', '1' }, { 'b', '2'} and { 'b', '3' } and the requestor calls PutAttributes using the attributes { 'b', '4' } with the Replace parameter set to true, the final attributes of the item are changed to { 'a', '1' } and { 'b', '4' }, which replaces the previous values of the 'b' attribute with the new value.

    Using PutAttributes to replace attribute values that do not exist will not result in an error response.

    You cannot specify an empty string as an attribute name.

    Because Amazon SimpleDB makes multiple copies of client data and uses an eventual consistency update model, an immediate GetAttributes or Select operation (read) immediately after a PutAttributes or DeleteAttributes operation (write) might not return the updated data.

    The following limitations are enforced for this operation:

    • 256 total attribute name-value pairs per item
    • One billion attributes per domain
    • 10 GB of total user data storage per domain

    ", + "Select": "

    The Select operation returns a set of attributes for ItemNames that match the select expression. Select is similar to the standard SQL SELECT statement.

    The total size of the response cannot exceed 1 MB in total size. Amazon SimpleDB automatically adjusts the number of items returned per page to enforce this limit. For example, if the client asks to retrieve 2500 items, but each individual item is 10 kB in size, the system returns 100 items and an appropriate NextToken so the client can access the next page of results.

    For information on how to construct select expressions, see Using Select to Create Amazon SimpleDB Queries in the Developer Guide.

    " + }, + "service": "Amazon SimpleDB is a web service providing the core database functions of data indexing and querying in the cloud. By offloading the time and effort associated with building and operating a web-scale database, SimpleDB provides developers the freedom to focus on application development.

    A traditional, clustered relational database requires a sizable upfront capital outlay, is complex to design, and often requires extensive and repetitive database administration. Amazon SimpleDB is dramatically simpler, requiring no schema, automatically indexing your data and providing a simple API for storage and access. This approach eliminates the administrative burden of data modeling, index maintenance, and performance tuning. Developers gain access to this functionality within Amazon's proven computing environment, are able to scale instantly, and pay only for what they use.

    Visit http://aws.amazon.com/simpledb/ for more information.

    ", + "shapes": { + "Attribute": { + "base": "

    ", + "refs": { + "AttributeList$member": null + } + }, + "AttributeDoesNotExist": { + "base": "

    The specified attribute does not exist.

    ", + "refs": { + } + }, + "AttributeList": { + "base": null, + "refs": { + "GetAttributesResult$Attributes": "The list of attributes returned by the operation.", + "Item$Attributes": "A list of attributes." + } + }, + "AttributeNameList": { + "base": null, + "refs": { + "GetAttributesRequest$AttributeNames": "The names of the attributes." + } + }, + "BatchDeleteAttributesRequest": { + "base": null, + "refs": { + } + }, + "BatchPutAttributesRequest": { + "base": null, + "refs": { + } + }, + "Boolean": { + "base": null, + "refs": { + "GetAttributesRequest$ConsistentRead": "Determines whether or not strong consistency should be enforced when data is read from SimpleDB. If true, any data previously written to SimpleDB will be returned. Otherwise, results will be consistent eventually, and the client may not see data that was written immediately before your read.", + "ReplaceableAttribute$Replace": "A flag specifying whether or not to replace the attribute/value pair or to add a new attribute/value pair. The default setting is false.", + "SelectRequest$ConsistentRead": "Determines whether or not strong consistency should be enforced when data is read from SimpleDB. If true, any data previously written to SimpleDB will be returned. Otherwise, results will be consistent eventually, and the client may not see data that was written immediately before your read.", + "UpdateCondition$Exists": "

    A value specifying whether or not the specified attribute must exist with the specified value in order for the update condition to be satisfied. Specify true if the attribute must exist for the update condition to be satisfied. Specify false if the attribute should not exist in order for the update condition to be satisfied.

    " + } + }, + "CreateDomainRequest": { + "base": null, + "refs": { + } + }, + "DeletableAttribute": { + "base": "

    ", + "refs": { + "DeletableAttributeList$member": null + } + }, + "DeletableAttributeList": { + "base": null, + "refs": { + "DeletableItem$Attributes": null, + "DeleteAttributesRequest$Attributes": "A list of Attributes. Similar to columns on a spreadsheet, attributes represent categories of data that can be assigned to items." + } + }, + "DeletableItem": { + "base": null, + "refs": { + "DeletableItemList$member": null + } + }, + "DeletableItemList": { + "base": null, + "refs": { + "BatchDeleteAttributesRequest$Items": "A list of items on which to perform the operation." + } + }, + "DeleteAttributesRequest": { + "base": null, + "refs": { + } + }, + "DeleteDomainRequest": { + "base": null, + "refs": { + } + }, + "DomainMetadataRequest": { + "base": null, + "refs": { + } + }, + "DomainMetadataResult": { + "base": null, + "refs": { + } + }, + "DomainNameList": { + "base": null, + "refs": { + "ListDomainsResult$DomainNames": "A list of domain names that match the expression." + } + }, + "DuplicateItemName": { + "base": "

    The item name was specified more than once.

    ", + "refs": { + } + }, + "Float": { + "base": null, + "refs": { + "AttributeDoesNotExist$BoxUsage": null, + "DuplicateItemName$BoxUsage": null, + "InvalidNextToken$BoxUsage": null, + "InvalidNumberPredicates$BoxUsage": null, + "InvalidNumberValueTests$BoxUsage": null, + "InvalidParameterValue$BoxUsage": null, + "InvalidQueryExpression$BoxUsage": null, + "MissingParameter$BoxUsage": null, + "NoSuchDomain$BoxUsage": null, + "NumberDomainAttributesExceeded$BoxUsage": null, + "NumberDomainBytesExceeded$BoxUsage": null, + "NumberDomainsExceeded$BoxUsage": null, + "NumberItemAttributesExceeded$BoxUsage": null, + "NumberSubmittedAttributesExceeded$BoxUsage": null, + "NumberSubmittedItemsExceeded$BoxUsage": null, + "RequestTimeout$BoxUsage": null, + "TooManyRequestedAttributes$BoxUsage": null + } + }, + "GetAttributesRequest": { + "base": null, + "refs": { + } + }, + "GetAttributesResult": { + "base": null, + "refs": { + } + }, + "Integer": { + "base": null, + "refs": { + "DomainMetadataResult$ItemCount": "The number of all items in the domain.", + "DomainMetadataResult$AttributeNameCount": "The number of unique attribute names in the domain.", + "DomainMetadataResult$AttributeValueCount": "The number of all attribute name/value pairs in the domain.", + "DomainMetadataResult$Timestamp": "The data and time when metadata was calculated, in Epoch (UNIX) seconds.", + "ListDomainsRequest$MaxNumberOfDomains": "The maximum number of domain names you want returned. The range is 1 to 100. The default setting is 100." + } + }, + "InvalidNextToken": { + "base": "

    The specified NextToken is not valid.

    ", + "refs": { + } + }, + "InvalidNumberPredicates": { + "base": "

    Too many predicates exist in the query expression.

    ", + "refs": { + } + }, + "InvalidNumberValueTests": { + "base": "

    Too many predicates exist in the query expression.

    ", + "refs": { + } + }, + "InvalidParameterValue": { + "base": "

    The value for a parameter is invalid.

    ", + "refs": { + } + }, + "InvalidQueryExpression": { + "base": "

    The specified query expression syntax is not valid.

    ", + "refs": { + } + }, + "Item": { + "base": "

    ", + "refs": { + "ItemList$member": null + } + }, + "ItemList": { + "base": null, + "refs": { + "SelectResult$Items": "A list of items that match the select expression." + } + }, + "ListDomainsRequest": { + "base": null, + "refs": { + } + }, + "ListDomainsResult": { + "base": null, + "refs": { + } + }, + "Long": { + "base": null, + "refs": { + "DomainMetadataResult$ItemNamesSizeBytes": "The total size of all item names in the domain, in bytes.", + "DomainMetadataResult$AttributeNamesSizeBytes": "The total size of all unique attribute names in the domain, in bytes.", + "DomainMetadataResult$AttributeValuesSizeBytes": "The total size of all attribute values in the domain, in bytes." + } + }, + "MissingParameter": { + "base": "

    The request must contain the specified missing parameter.

    ", + "refs": { + } + }, + "NoSuchDomain": { + "base": "

    The specified domain does not exist.

    ", + "refs": { + } + }, + "NumberDomainAttributesExceeded": { + "base": "

    Too many attributes in this domain.

    ", + "refs": { + } + }, + "NumberDomainBytesExceeded": { + "base": "

    Too many bytes in this domain.

    ", + "refs": { + } + }, + "NumberDomainsExceeded": { + "base": "

    Too many domains exist per this account.

    ", + "refs": { + } + }, + "NumberItemAttributesExceeded": { + "base": "

    Too many attributes in this item.

    ", + "refs": { + } + }, + "NumberSubmittedAttributesExceeded": { + "base": "

    Too many attributes exist in a single call.

    ", + "refs": { + } + }, + "NumberSubmittedItemsExceeded": { + "base": "

    Too many items exist in a single call.

    ", + "refs": { + } + }, + "PutAttributesRequest": { + "base": null, + "refs": { + } + }, + "ReplaceableAttribute": { + "base": "

    ", + "refs": { + "ReplaceableAttributeList$member": null + } + }, + "ReplaceableAttributeList": { + "base": null, + "refs": { + "PutAttributesRequest$Attributes": "The list of attributes.", + "ReplaceableItem$Attributes": "The list of attributes for a replaceable item." + } + }, + "ReplaceableItem": { + "base": "

    ", + "refs": { + "ReplaceableItemList$member": null + } + }, + "ReplaceableItemList": { + "base": null, + "refs": { + "BatchPutAttributesRequest$Items": "A list of items on which to perform the operation." + } + }, + "RequestTimeout": { + "base": "

    A timeout occurred when attempting to query the specified domain with specified query expression.

    ", + "refs": { + } + }, + "SelectRequest": { + "base": null, + "refs": { + } + }, + "SelectResult": { + "base": null, + "refs": { + } + }, + "String": { + "base": null, + "refs": { + "Attribute$Name": "The name of the attribute.", + "Attribute$AlternateNameEncoding": "

    ", + "Attribute$Value": "The value of the attribute.", + "Attribute$AlternateValueEncoding": "

    ", + "AttributeNameList$member": null, + "BatchDeleteAttributesRequest$DomainName": "The name of the domain in which the attributes are being deleted.", + "BatchPutAttributesRequest$DomainName": "The name of the domain in which the attributes are being stored.", + "CreateDomainRequest$DomainName": "The name of the domain to create. The name can range between 3 and 255 characters and can contain the following characters: a-z, A-Z, 0-9, '_', '-', and '.'.", + "DeletableAttribute$Name": "The name of the attribute.", + "DeletableAttribute$Value": "The value of the attribute.", + "DeletableItem$Name": null, + "DeleteAttributesRequest$DomainName": "The name of the domain in which to perform the operation.", + "DeleteAttributesRequest$ItemName": "The name of the item. Similar to rows on a spreadsheet, items represent individual objects that contain one or more value-attribute pairs.", + "DeleteDomainRequest$DomainName": "The name of the domain to delete.", + "DomainMetadataRequest$DomainName": "The name of the domain for which to display the metadata of.", + "DomainNameList$member": null, + "GetAttributesRequest$DomainName": "The name of the domain in which to perform the operation.", + "GetAttributesRequest$ItemName": "The name of the item.", + "Item$Name": "The name of the item.", + "Item$AlternateNameEncoding": "

    ", + "ListDomainsRequest$NextToken": "A string informing Amazon SimpleDB where to start the next list of domain names.", + "ListDomainsResult$NextToken": "An opaque token indicating that there are more domains than the specified MaxNumberOfDomains still available.", + "PutAttributesRequest$DomainName": "The name of the domain in which to perform the operation.", + "PutAttributesRequest$ItemName": "The name of the item.", + "ReplaceableAttribute$Name": "The name of the replaceable attribute.", + "ReplaceableAttribute$Value": "The value of the replaceable attribute.", + "ReplaceableItem$Name": "The name of the replaceable item.", + "SelectRequest$SelectExpression": "The expression used to query the domain.", + "SelectRequest$NextToken": "A string informing Amazon SimpleDB where to start the next list of ItemNames.", + "SelectResult$NextToken": "An opaque token indicating that more items than MaxNumberOfItems were matched, the response size exceeded 1 megabyte, or the execution time exceeded 5 seconds.", + "UpdateCondition$Name": "

    The name of the attribute involved in the condition.

    ", + "UpdateCondition$Value": "

    The value of an attribute. This value can only be specified when the Exists parameter is equal to true.

    " + } + }, + "TooManyRequestedAttributes": { + "base": "

    Too many attributes requested.

    ", + "refs": { + } + }, + "UpdateCondition": { + "base": "

    Specifies the conditions under which data should be updated. If an update condition is specified for a request, the data will only be updated if the condition is satisfied. For example, if an attribute with a specific name and value exists, or if a specific attribute doesn't exist.

    ", + "refs": { + "DeleteAttributesRequest$Expected": "The update condition which, if specified, determines whether the specified attributes will be deleted or not. The update condition must be satisfied in order for this request to be processed and the attributes to be deleted.", + "PutAttributesRequest$Expected": "The update condition which, if specified, determines whether the specified attributes will be updated or not. The update condition must be satisfied in order for this request to be processed and the attributes to be updated." + } + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/sdb/2009-04-15/paginators-1.json b/vendor/github.com/aws/aws-sdk-go/models/apis/sdb/2009-04-15/paginators-1.json new file mode 100644 index 000000000..236209887 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/sdb/2009-04-15/paginators-1.json @@ -0,0 +1,15 @@ +{ + "pagination": { + "ListDomains": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxNumberOfDomains", + "result_key": "DomainNames" + }, + "Select": { + "input_token": "NextToken", + "output_token": "NextToken", + "result_key": "Items" + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/servicecatalog/2015-12-10/api-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/servicecatalog/2015-12-10/api-2.json new file mode 100644 index 000000000..8e0e421e6 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/servicecatalog/2015-12-10/api-2.json @@ -0,0 +1,749 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2015-12-10", + "endpointPrefix":"servicecatalog", + "jsonVersion":"1.1", + "protocol":"json", + "serviceFullName":"AWS Service Catalog", + "signatureVersion":"v4", + "targetPrefix":"AWS242ServiceCatalogService" + }, + "operations":{ + "DescribeProduct":{ + "name":"DescribeProduct", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeProductInput"}, + "output":{"shape":"DescribeProductOutput"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidParametersException"} + ] + }, + "DescribeProductView":{ + "name":"DescribeProductView", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeProductViewInput"}, + "output":{"shape":"DescribeProductViewOutput"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidParametersException"} + ] + }, + "DescribeProvisioningParameters":{ + "name":"DescribeProvisioningParameters", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeProvisioningParametersInput"}, + "output":{"shape":"DescribeProvisioningParametersOutput"}, + "errors":[ + {"shape":"InvalidParametersException"}, + {"shape":"ResourceNotFoundException"} + ] + }, + "DescribeRecord":{ + "name":"DescribeRecord", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeRecordInput"}, + "output":{"shape":"DescribeRecordOutput"}, + "errors":[ + {"shape":"ResourceNotFoundException"} + ] + }, + "ListLaunchPaths":{ + "name":"ListLaunchPaths", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListLaunchPathsInput"}, + "output":{"shape":"ListLaunchPathsOutput"}, + "errors":[ + {"shape":"InvalidParametersException"}, + {"shape":"ResourceNotFoundException"} + ] + }, + "ListRecordHistory":{ + "name":"ListRecordHistory", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListRecordHistoryInput"}, + "output":{"shape":"ListRecordHistoryOutput"}, + "errors":[ + {"shape":"InvalidParametersException"} + ] + }, + "ProvisionProduct":{ + "name":"ProvisionProduct", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ProvisionProductInput"}, + "output":{"shape":"ProvisionProductOutput"}, + "errors":[ + {"shape":"InvalidParametersException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"DuplicateResourceException"} + ] + }, + "ScanProvisionedProducts":{ + "name":"ScanProvisionedProducts", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ScanProvisionedProductsInput"}, + "output":{"shape":"ScanProvisionedProductsOutput"}, + "errors":[ + {"shape":"InvalidParametersException"} + ] + }, + "SearchProducts":{ + "name":"SearchProducts", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"SearchProductsInput"}, + "output":{"shape":"SearchProductsOutput"}, + "errors":[ + {"shape":"InvalidParametersException"} + ] + }, + "TerminateProvisionedProduct":{ + "name":"TerminateProvisionedProduct", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"TerminateProvisionedProductInput"}, + "output":{"shape":"TerminateProvisionedProductOutput"}, + "errors":[ + {"shape":"ResourceNotFoundException"} + ] + }, + "UpdateProvisionedProduct":{ + "name":"UpdateProvisionedProduct", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateProvisionedProductInput"}, + "output":{"shape":"UpdateProvisionedProductOutput"}, + "errors":[ + {"shape":"InvalidParametersException"}, + {"shape":"ResourceNotFoundException"} + ] + } + }, + "shapes":{ + "AcceptLanguage":{"type":"string"}, + "AllowedValue":{"type":"string"}, + "AllowedValues":{ + "type":"list", + "member":{"shape":"AllowedValue"} + }, + "ApproximateCount":{"type":"integer"}, + "AttributeValue":{"type":"string"}, + "ConstraintDescription":{"type":"string"}, + "ConstraintSummaries":{ + "type":"list", + "member":{"shape":"ConstraintSummary"} + }, + "ConstraintSummary":{ + "type":"structure", + "members":{ + "Type":{"shape":"ConstraintType"}, + "Description":{"shape":"ConstraintDescription"} + } + }, + "ConstraintType":{"type":"string"}, + "CreatedTime":{"type":"timestamp"}, + "DefaultValue":{"type":"string"}, + "DescribeProductInput":{ + "type":"structure", + "required":["Id"], + "members":{ + "AcceptLanguage":{"shape":"AcceptLanguage"}, + "Id":{"shape":"Id"} + } + }, + "DescribeProductOutput":{ + "type":"structure", + "members":{ + "ProductViewSummary":{"shape":"ProductViewSummary"}, + "ProvisioningArtifacts":{"shape":"ProvisioningArtifacts"} + } + }, + "DescribeProductViewInput":{ + "type":"structure", + "required":["Id"], + "members":{ + "AcceptLanguage":{"shape":"AcceptLanguage"}, + "Id":{"shape":"Id"} + } + }, + "DescribeProductViewOutput":{ + "type":"structure", + "members":{ + "ProductViewSummary":{"shape":"ProductViewSummary"}, + "ProvisioningArtifacts":{"shape":"ProvisioningArtifacts"} + } + }, + "DescribeProvisioningParametersInput":{ + "type":"structure", + "required":[ + "ProductId", + "ProvisioningArtifactId" + ], + "members":{ + "AcceptLanguage":{"shape":"AcceptLanguage"}, + "ProductId":{"shape":"Id"}, + "ProvisioningArtifactId":{"shape":"Id"}, + "PathId":{"shape":"Id"} + } + }, + "DescribeProvisioningParametersOutput":{ + "type":"structure", + "members":{ + "ProvisioningArtifactParameters":{"shape":"ProvisioningArtifactParameters"}, + "ConstraintSummaries":{"shape":"ConstraintSummaries"}, + "UsageInstructions":{"shape":"UsageInstructions"} + } + }, + "DescribeRecordInput":{ + "type":"structure", + "required":["Id"], + "members":{ + "AcceptLanguage":{"shape":"AcceptLanguage"}, + "Id":{"shape":"Id"}, + "PageToken":{"shape":"PageToken"}, + "PageSize":{"shape":"PageSize"} + } + }, + "DescribeRecordOutput":{ + "type":"structure", + "members":{ + "RecordDetail":{"shape":"RecordDetail"}, + "RecordOutputs":{"shape":"RecordOutputs"}, + "NextPageToken":{"shape":"PageToken"} + } + }, + "Description":{"type":"string"}, + "DuplicateResourceException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "ErrorCode":{"type":"string"}, + "ErrorDescription":{"type":"string"}, + "HasDefaultPath":{"type":"boolean"}, + "Id":{"type":"string"}, + "IdempotencyToken":{ + "type":"string", + "max":128, + "min":1, + "pattern":"[a-zA-Z0-9][a-zA-Z0-9_-]*" + }, + "IgnoreErrors":{"type":"boolean"}, + "InstructionType":{"type":"string"}, + "InstructionValue":{"type":"string"}, + "InvalidParametersException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "LastRequestId":{"type":"string"}, + "LaunchPathSummaries":{ + "type":"list", + "member":{"shape":"LaunchPathSummary"} + }, + "LaunchPathSummary":{ + "type":"structure", + "members":{ + "Id":{"shape":"Id"}, + "ConstraintSummaries":{"shape":"ConstraintSummaries"}, + "Tags":{"shape":"Tags"}, + "Name":{"shape":"PortfolioName"} + } + }, + "ListLaunchPathsInput":{ + "type":"structure", + "required":["ProductId"], + "members":{ + "AcceptLanguage":{"shape":"AcceptLanguage"}, + "ProductId":{"shape":"Id"}, + "PageSize":{"shape":"PageSize"}, + "PageToken":{"shape":"PageToken"} + } + }, + "ListLaunchPathsOutput":{ + "type":"structure", + "members":{ + "LaunchPathSummaries":{"shape":"LaunchPathSummaries"}, + "NextPageToken":{"shape":"PageToken"} + } + }, + "ListRecordHistoryInput":{ + "type":"structure", + "members":{ + "AcceptLanguage":{"shape":"AcceptLanguage"}, + "SearchFilter":{"shape":"ListRecordHistorySearchFilter"}, + "PageSize":{"shape":"PageSize"}, + "PageToken":{"shape":"PageToken"} + } + }, + "ListRecordHistoryOutput":{ + "type":"structure", + "members":{ + "RecordDetails":{"shape":"RecordDetails"}, + "NextPageToken":{"shape":"PageToken"} + } + }, + "ListRecordHistorySearchFilter":{ + "type":"structure", + "members":{ + "Key":{"shape":"SearchFilterKey"}, + "Value":{"shape":"SearchFilterValue"} + } + }, + "NoEcho":{"type":"boolean"}, + "NotificationArn":{ + "type":"string", + "max":1224, + "min":1, + "pattern":"arn:[a-z0-9-\\.]{1,63}:[a-z0-9-\\.]{0,63}:[a-z0-9-\\.]{0,63}:[a-z0-9-\\.]{0,63}:[^/].{0,1023}" + }, + "NotificationArns":{ + "type":"list", + "member":{"shape":"NotificationArn"}, + "max":5 + }, + "OutputKey":{"type":"string"}, + "OutputValue":{"type":"string"}, + "PageSize":{ + "type":"integer", + "max":20, + "min":0 + }, + "PageToken":{ + "type":"string", + "pattern":"[\\u0009\\u000a\\u000d\\u0020-\\uD7FF\\uE000-\\uFFFD]*" + }, + "ParameterConstraints":{ + "type":"structure", + "members":{ + "AllowedValues":{"shape":"AllowedValues"} + } + }, + "ParameterKey":{"type":"string"}, + "ParameterType":{"type":"string"}, + "ParameterValue":{"type":"string"}, + "PortfolioName":{"type":"string"}, + "ProductType":{"type":"string"}, + "ProductViewAggregationType":{"type":"string"}, + "ProductViewAggregationValue":{ + "type":"structure", + "members":{ + "Value":{"shape":"AttributeValue"}, + "ApproximateCount":{"shape":"ApproximateCount"} + } + }, + "ProductViewAggregationValues":{ + "type":"list", + "member":{"shape":"ProductViewAggregationValue"} + }, + "ProductViewAggregations":{ + "type":"map", + "key":{"shape":"ProductViewAggregationType"}, + "value":{"shape":"ProductViewAggregationValues"} + }, + "ProductViewDistributor":{"type":"string"}, + "ProductViewFilterBy":{ + "type":"string", + "enum":[ + "FullTextSearch", + "Owner", + "ProductType" + ] + }, + "ProductViewFilterValue":{"type":"string"}, + "ProductViewFilterValues":{ + "type":"list", + "member":{"shape":"ProductViewFilterValue"} + }, + "ProductViewFilters":{ + "type":"map", + "key":{"shape":"ProductViewFilterBy"}, + "value":{"shape":"ProductViewFilterValues"} + }, + "ProductViewName":{"type":"string"}, + "ProductViewOwner":{"type":"string"}, + "ProductViewShortDescription":{"type":"string"}, + "ProductViewSortBy":{ + "type":"string", + "enum":[ + "Title", + "VersionCount", + "CreationDate" + ] + }, + "ProductViewSummaries":{ + "type":"list", + "member":{"shape":"ProductViewSummary"} + }, + "ProductViewSummary":{ + "type":"structure", + "members":{ + "Id":{"shape":"Id"}, + "ProductId":{"shape":"Id"}, + "Name":{"shape":"ProductViewName"}, + "Owner":{"shape":"ProductViewOwner"}, + "ShortDescription":{"shape":"ProductViewShortDescription"}, + "Type":{"shape":"ProductType"}, + "Distributor":{"shape":"ProductViewDistributor"}, + "HasDefaultPath":{"shape":"HasDefaultPath"}, + "SupportEmail":{"shape":"SupportEmail"}, + "SupportDescription":{"shape":"SupportDescription"}, + "SupportUrl":{"shape":"SupportUrl"} + } + }, + "ProvisionProductInput":{ + "type":"structure", + "required":[ + "ProductId", + "ProvisioningArtifactId", + "ProvisionedProductName", + "ProvisionToken" + ], + "members":{ + "AcceptLanguage":{"shape":"AcceptLanguage"}, + "ProductId":{"shape":"Id"}, + "ProvisioningArtifactId":{"shape":"Id"}, + "PathId":{"shape":"Id"}, + "ProvisionedProductName":{"shape":"ProvisionedProductName"}, + "ProvisioningParameters":{"shape":"ProvisioningParameters"}, + "Tags":{"shape":"Tags"}, + "NotificationArns":{"shape":"NotificationArns"}, + "ProvisionToken":{ + "shape":"IdempotencyToken", + "idempotencyToken":true + } + } + }, + "ProvisionProductOutput":{ + "type":"structure", + "members":{ + "RecordDetail":{"shape":"RecordDetail"} + } + }, + "ProvisionedProductDetail":{ + "type":"structure", + "members":{ + "Name":{"shape":"ProvisionedProductNameOrArn"}, + "Arn":{"shape":"ProvisionedProductNameOrArn"}, + "Type":{"shape":"ProvisionedProductType"}, + "Id":{"shape":"ProvisionedProductId"}, + "Status":{"shape":"RecordStatus"}, + "StatusMessage":{"shape":"ProvisionedProductStatusMessage"}, + "CreatedTime":{"shape":"CreatedTime"}, + "IdempotencyToken":{"shape":"IdempotencyToken"}, + "LastRecordId":{"shape":"LastRequestId"} + } + }, + "ProvisionedProductDetails":{ + "type":"list", + "member":{"shape":"ProvisionedProductDetail"} + }, + "ProvisionedProductId":{"type":"string"}, + "ProvisionedProductName":{"type":"string"}, + "ProvisionedProductNameOrArn":{ + "type":"string", + "max":1224, + "min":1, + "pattern":"[a-zA-Z0-9][a-zA-Z0-9_-]{0,127}|arn:[a-z0-9-\\.]{1,63}:[a-z0-9-\\.]{0,63}:[a-z0-9-\\.]{0,63}:[a-z0-9-\\.]{0,63}:[^/].{0,1023}" + }, + "ProvisionedProductStatusMessage":{"type":"string"}, + "ProvisionedProductType":{"type":"string"}, + "ProvisioningArtifact":{ + "type":"structure", + "members":{ + "Id":{"shape":"Id"}, + "Name":{"shape":"ProvisioningArtifactName"}, + "Description":{"shape":"ProvisioningArtifactDescription"}, + "CreatedTime":{"shape":"ProvisioningArtifactCreatedTime"} + } + }, + "ProvisioningArtifactCreatedTime":{"type":"timestamp"}, + "ProvisioningArtifactDescription":{"type":"string"}, + "ProvisioningArtifactName":{"type":"string"}, + "ProvisioningArtifactParameter":{ + "type":"structure", + "members":{ + "ParameterKey":{"shape":"ParameterKey"}, + "DefaultValue":{"shape":"DefaultValue"}, + "ParameterType":{"shape":"ParameterType"}, + "IsNoEcho":{"shape":"NoEcho"}, + "Description":{"shape":"Description"}, + "ParameterConstraints":{"shape":"ParameterConstraints"} + } + }, + "ProvisioningArtifactParameters":{ + "type":"list", + "member":{"shape":"ProvisioningArtifactParameter"} + }, + "ProvisioningArtifacts":{ + "type":"list", + "member":{"shape":"ProvisioningArtifact"} + }, + "ProvisioningParameter":{ + "type":"structure", + "members":{ + "Key":{"shape":"ParameterKey"}, + "Value":{"shape":"ParameterValue"} + } + }, + "ProvisioningParameters":{ + "type":"list", + "member":{"shape":"ProvisioningParameter"} + }, + "RecordDetail":{ + "type":"structure", + "members":{ + "RecordId":{"shape":"Id"}, + "ProvisionedProductName":{"shape":"ProvisionedProductName"}, + "Status":{"shape":"RecordStatus"}, + "CreatedTime":{"shape":"CreatedTime"}, + "UpdatedTime":{"shape":"UpdatedTime"}, + "ProvisionedProductType":{"shape":"ProvisionedProductType"}, + "RecordType":{"shape":"RecordType"}, + "ProvisionedProductId":{"shape":"Id"}, + "ProductId":{"shape":"Id"}, + "ProvisioningArtifactId":{"shape":"Id"}, + "PathId":{"shape":"Id"}, + "RecordErrors":{"shape":"RecordErrors"}, + "RecordTags":{"shape":"RecordTags"} + } + }, + "RecordDetails":{ + "type":"list", + "member":{"shape":"RecordDetail"} + }, + "RecordError":{ + "type":"structure", + "members":{ + "Code":{"shape":"ErrorCode"}, + "Description":{"shape":"ErrorDescription"} + } + }, + "RecordErrors":{ + "type":"list", + "member":{"shape":"RecordError"} + }, + "RecordOutput":{ + "type":"structure", + "members":{ + "OutputKey":{"shape":"OutputKey"}, + "OutputValue":{"shape":"OutputValue"}, + "Description":{"shape":"Description"} + } + }, + "RecordOutputs":{ + "type":"list", + "member":{"shape":"RecordOutput"} + }, + "RecordStatus":{ + "type":"string", + "enum":[ + "IN_PROGRESS", + "SUCCEEDED", + "ERROR" + ] + }, + "RecordTag":{ + "type":"structure", + "members":{ + "Key":{"shape":"RecordTagKey"}, + "Value":{"shape":"RecordTagValue"} + } + }, + "RecordTagKey":{ + "type":"string", + "max":128, + "min":1, + "pattern":"^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-%@]*)$" + }, + "RecordTagValue":{ + "type":"string", + "max":256, + "min":1, + "pattern":"^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-%@]*)$" + }, + "RecordTags":{ + "type":"list", + "member":{"shape":"RecordTag"}, + "max":10 + }, + "RecordType":{"type":"string"}, + "ResourceNotFoundException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "ScanProvisionedProductsInput":{ + "type":"structure", + "members":{ + "AcceptLanguage":{"shape":"AcceptLanguage"}, + "PageSize":{"shape":"PageSize"}, + "PageToken":{"shape":"PageToken"} + } + }, + "ScanProvisionedProductsOutput":{ + "type":"structure", + "members":{ + "ProvisionedProducts":{"shape":"ProvisionedProductDetails"}, + "NextPageToken":{"shape":"PageToken"} + } + }, + "SearchFilterKey":{"type":"string"}, + "SearchFilterValue":{"type":"string"}, + "SearchProductsInput":{ + "type":"structure", + "members":{ + "AcceptLanguage":{"shape":"AcceptLanguage"}, + "Filters":{"shape":"ProductViewFilters"}, + "PageSize":{"shape":"PageSize"}, + "SortBy":{"shape":"ProductViewSortBy"}, + "SortOrder":{"shape":"SortOrder"}, + "PageToken":{"shape":"PageToken"} + } + }, + "SearchProductsOutput":{ + "type":"structure", + "members":{ + "ProductViewSummaries":{"shape":"ProductViewSummaries"}, + "ProductViewAggregations":{"shape":"ProductViewAggregations"}, + "NextPageToken":{"shape":"PageToken"} + } + }, + "SortOrder":{ + "type":"string", + "enum":[ + "ASCENDING", + "DESCENDING" + ] + }, + "SupportDescription":{"type":"string"}, + "SupportEmail":{"type":"string"}, + "SupportUrl":{"type":"string"}, + "Tag":{ + "type":"structure", + "members":{ + "Key":{"shape":"TagKey"}, + "Value":{"shape":"TagValue"} + } + }, + "TagKey":{ + "type":"string", + "max":128, + "min":1, + "pattern":"^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-%@]*)$" + }, + "TagValue":{ + "type":"string", + "max":256, + "min":1, + "pattern":"^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-%@]*)$" + }, + "Tags":{ + "type":"list", + "member":{"shape":"Tag"}, + "max":10 + }, + "TerminateProvisionedProductInput":{ + "type":"structure", + "required":["TerminateToken"], + "members":{ + "ProvisionedProductName":{"shape":"ProvisionedProductNameOrArn"}, + "ProvisionedProductId":{"shape":"Id"}, + "TerminateToken":{ + "shape":"IdempotencyToken", + "idempotencyToken":true + }, + "IgnoreErrors":{"shape":"IgnoreErrors"}, + "AcceptLanguage":{"shape":"AcceptLanguage"} + } + }, + "TerminateProvisionedProductOutput":{ + "type":"structure", + "members":{ + "RecordDetail":{"shape":"RecordDetail"} + } + }, + "UpdateProvisionedProductInput":{ + "type":"structure", + "required":["UpdateToken"], + "members":{ + "AcceptLanguage":{"shape":"AcceptLanguage"}, + "ProvisionedProductName":{"shape":"ProvisionedProductNameOrArn"}, + "ProvisionedProductId":{"shape":"Id"}, + "ProductId":{"shape":"Id"}, + "ProvisioningArtifactId":{"shape":"Id"}, + "PathId":{"shape":"Id"}, + "ProvisioningParameters":{"shape":"UpdateProvisioningParameters"}, + "UpdateToken":{ + "shape":"IdempotencyToken", + "idempotencyToken":true + } + } + }, + "UpdateProvisionedProductOutput":{ + "type":"structure", + "members":{ + "RecordDetail":{"shape":"RecordDetail"} + } + }, + "UpdateProvisioningParameter":{ + "type":"structure", + "members":{ + "Key":{"shape":"ParameterKey"}, + "Value":{"shape":"ParameterValue"}, + "UsePreviousValue":{"shape":"UsePreviousValue"} + } + }, + "UpdateProvisioningParameters":{ + "type":"list", + "member":{"shape":"UpdateProvisioningParameter"} + }, + "UpdatedTime":{"type":"timestamp"}, + "UsageInstruction":{ + "type":"structure", + "members":{ + "Type":{"shape":"InstructionType"}, + "Value":{"shape":"InstructionValue"} + } + }, + "UsageInstructions":{ + "type":"list", + "member":{"shape":"UsageInstruction"} + }, + "UsePreviousValue":{"type":"boolean"} + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/servicecatalog/2015-12-10/docs-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/servicecatalog/2015-12-10/docs-2.json new file mode 100644 index 000000000..9fd6e0e40 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/servicecatalog/2015-12-10/docs-2.json @@ -0,0 +1,789 @@ +{ + "version": "2.0", + "service": "AWS Service Catalog

    Overview

    AWS Service Catalog allows organizations to create and manage catalogs of IT services that are approved for use on AWS. This documentation provides reference material for the AWS Service Catalog end user API. To get the most out of this documentation, you need to be familiar with the terminology discussed in AWS Service Catalog Concepts.

    Additional Resources

    ", + "operations": { + "DescribeProduct": "

    Retrieves information about a specified product.

    This operation is functionally identical to DescribeProductView except that it takes as input ProductId instead of ProductViewId.

    ", + "DescribeProductView": "

    Retrieves information about a specified product.

    This operation is functionally identical to DescribeProduct except that it takes as input ProductViewId instead of ProductId.

    ", + "DescribeProvisioningParameters": "

    Provides information about parameters required to provision a specified product in a specified manner. Use this operation to obtain the list of ProvisioningArtifactParameters parameters available to call the ProvisionProduct operation for the specified product.

    ", + "DescribeRecord": "

    Retrieves a paginated list of the full details of a specific request. Use this operation after calling a request operation (ProvisionProduct, TerminateProvisionedProduct, or UpdateProvisionedProduct).

    ", + "ListLaunchPaths": "

    Returns a paginated list of all paths to a specified product. A path is how the user has access to a specified product, and is necessary when provisioning a product. A path also determines the constraints put on the product.

    ", + "ListRecordHistory": "

    Returns a paginated list of all performed requests, in the form of RecordDetails objects that are filtered as specified.

    ", + "ProvisionProduct": "

    Requests a Provision of a specified product. A ProvisionedProduct is a resourced instance for a product. For example, provisioning a CloudFormation-template-backed product results in launching a CloudFormation stack and all the underlying resources that come with it.

    You can check the status of this request using the DescribeRecord operation.

    ", + "ScanProvisionedProducts": "

    Returns a paginated list of all the ProvisionedProduct objects that are currently available (not terminated).

    ", + "SearchProducts": "

    Returns a paginated list all of the Products objects to which the caller has access.

    The output of this operation can be used as input for other operations, such as DescribeProductView.

    ", + "TerminateProvisionedProduct": "

    Requests termination of an existing ProvisionedProduct object. If there are Tags associated with the object, they are terminated when the ProvisionedProduct object is terminated.

    This operation does not delete any records associated with the ProvisionedProduct object.

    You can check the status of this request using the DescribeRecord operation.

    ", + "UpdateProvisionedProduct": "

    Requests updates to the configuration of an existing ProvisionedProduct object. If there are tags associated with the object, they cannot be updated or added with this operation. Depending on the specific updates requested, this operation may update with no interruption, with some interruption, or replace the ProvisionedProduct object entirely.

    You can check the status of this request using the DescribeRecord operation.

    " + }, + "shapes": { + "AcceptLanguage": { + "base": null, + "refs": { + "DescribeProductInput$AcceptLanguage": "

    Optional language code. Supported language codes are as follows:

    \"en\" (English)

    \"jp\" (Japanese)

    \"zh\" (Chinese)

    If no code is specified, \"en\" is used as the default.

    ", + "DescribeProductViewInput$AcceptLanguage": "

    Optional language code. Supported language codes are as follows:

    \"en\" (English)

    \"jp\" (Japanese)

    \"zh\" (Chinese)

    If no code is specified, \"en\" is used as the default.

    ", + "DescribeProvisioningParametersInput$AcceptLanguage": "

    Optional language code. Supported language codes are as follows:

    \"en\" (English)

    \"jp\" (Japanese)

    \"zh\" (Chinese)

    If no code is specified, \"en\" is used as the default.

    ", + "DescribeRecordInput$AcceptLanguage": "

    Optional language code. Supported language codes are as follows:

    \"en\" (English)

    \"jp\" (Japanese)

    \"zh\" (Chinese)

    If no code is specified, \"en\" is used as the default.

    ", + "ListLaunchPathsInput$AcceptLanguage": "

    Optional language code. Supported language codes are as follows:

    \"en\" (English)

    \"jp\" (Japanese)

    \"zh\" (Chinese)

    If no code is specified, \"en\" is used as the default.

    ", + "ListRecordHistoryInput$AcceptLanguage": "

    Optional language code. Supported language codes are as follows:

    \"en\" (English)

    \"jp\" (Japanese)

    \"zh\" (Chinese)

    If no code is specified, \"en\" is used as the default.

    ", + "ProvisionProductInput$AcceptLanguage": "

    Optional language code. Supported language codes are as follows:

    \"en\" (English)

    \"jp\" (Japanese)

    \"zh\" (Chinese)

    If no code is specified, \"en\" is used as the default.

    ", + "ScanProvisionedProductsInput$AcceptLanguage": "

    Optional language code. Supported language codes are as follows:

    \"en\" (English)

    \"jp\" (Japanese)

    \"zh\" (Chinese)

    If no code is specified, \"en\" is used as the default.

    ", + "SearchProductsInput$AcceptLanguage": "

    Optional language code. Supported language codes are as follows:

    \"en\" (English)

    \"jp\" (Japanese)

    \"zh\" (Chinese)

    If no code is specified, \"en\" is used as the default.

    ", + "TerminateProvisionedProductInput$AcceptLanguage": "

    Optional language code. Supported language codes are as follows:

    \"en\" (English)

    \"jp\" (Japanese)

    \"zh\" (Chinese)

    If no code is specified, \"en\" is used as the default.

    ", + "UpdateProvisionedProductInput$AcceptLanguage": "

    Optional language code. Supported language codes are as follows:

    \"en\" (English)

    \"jp\" (Japanese)

    \"zh\" (Chinese)

    If no code is specified, \"en\" is used as the default.

    " + } + }, + "AllowedValue": { + "base": null, + "refs": { + "AllowedValues$member": null + } + }, + "AllowedValues": { + "base": null, + "refs": { + "ParameterConstraints$AllowedValues": "

    The values that the administrator has allowed for the parameter.

    " + } + }, + "ApproximateCount": { + "base": null, + "refs": { + "ProductViewAggregationValue$ApproximateCount": "

    An approximate count of the products that match the value.

    " + } + }, + "AttributeValue": { + "base": null, + "refs": { + "ProductViewAggregationValue$Value": "

    The value of the product view aggregation.

    " + } + }, + "ConstraintDescription": { + "base": null, + "refs": { + "ConstraintSummary$Description": "

    The text description of the constraint.

    " + } + }, + "ConstraintSummaries": { + "base": null, + "refs": { + "DescribeProvisioningParametersOutput$ConstraintSummaries": "

    The list of constraint summaries that apply to provisioning this product.

    ", + "LaunchPathSummary$ConstraintSummaries": "

    List of constraints on the portfolio-product relationship.

    " + } + }, + "ConstraintSummary": { + "base": "

    An administrator-specified constraint to apply when provisioning a product.

    ", + "refs": { + "ConstraintSummaries$member": null + } + }, + "ConstraintType": { + "base": null, + "refs": { + "ConstraintSummary$Type": "

    The type of the constraint.

    " + } + }, + "CreatedTime": { + "base": null, + "refs": { + "ProvisionedProductDetail$CreatedTime": "

    The time the ProvisionedProduct was created.

    ", + "RecordDetail$CreatedTime": "

    The time when the record for the ProvisionedProduct object was created.

    " + } + }, + "DefaultValue": { + "base": null, + "refs": { + "ProvisioningArtifactParameter$DefaultValue": "

    The default value for this parameter.

    " + } + }, + "DescribeProductInput": { + "base": null, + "refs": { + } + }, + "DescribeProductOutput": { + "base": null, + "refs": { + } + }, + "DescribeProductViewInput": { + "base": null, + "refs": { + } + }, + "DescribeProductViewOutput": { + "base": null, + "refs": { + } + }, + "DescribeProvisioningParametersInput": { + "base": null, + "refs": { + } + }, + "DescribeProvisioningParametersOutput": { + "base": null, + "refs": { + } + }, + "DescribeRecordInput": { + "base": null, + "refs": { + } + }, + "DescribeRecordOutput": { + "base": null, + "refs": { + } + }, + "Description": { + "base": null, + "refs": { + "ProvisioningArtifactParameter$Description": "

    The text description of the parameter.

    ", + "RecordOutput$Description": "

    The text description of the output.

    " + } + }, + "DuplicateResourceException": { + "base": "

    The specified resource is a duplicate.

    ", + "refs": { + } + }, + "ErrorCode": { + "base": null, + "refs": { + "RecordError$Code": "

    The numeric value of the error.

    " + } + }, + "ErrorDescription": { + "base": null, + "refs": { + "RecordError$Description": "

    The text description of the error.

    " + } + }, + "HasDefaultPath": { + "base": null, + "refs": { + "ProductViewSummary$HasDefaultPath": "

    A value of false indicates that the product does not have a default path, while a value of true indicates that it does. If it's false, call ListLaunchPaths to disambiguate between paths. If true, ListLaunchPaths is not required, and the output of the ProductViewSummary operation can be used directly with DescribeProvisioningParameters.

    " + } + }, + "Id": { + "base": null, + "refs": { + "DescribeProductInput$Id": "

    The ProductId of the product to describe.

    ", + "DescribeProductViewInput$Id": "

    The ProductViewId of the product to describe.

    ", + "DescribeProvisioningParametersInput$ProductId": "

    The identifier of the product.

    ", + "DescribeProvisioningParametersInput$ProvisioningArtifactId": "

    The provisioning artifact identifier for this product.

    ", + "DescribeProvisioningParametersInput$PathId": "

    The identifier of the path for this product's provisioning. This value is optional if the product has a default path, and is required if there is more than one path for the specified product.

    ", + "DescribeRecordInput$Id": "

    The record identifier of the ProvisionedProduct object for which to retrieve output information. This is the RecordDetail.RecordId obtained from the request operation's response.

    ", + "LaunchPathSummary$Id": "

    The unique identifier of the product path.

    ", + "ListLaunchPathsInput$ProductId": "

    Identifies the product for which to retrieve LaunchPathSummaries information.

    ", + "ProductViewSummary$Id": "

    The product view identifier.

    ", + "ProductViewSummary$ProductId": "

    The product identifier.

    ", + "ProvisionProductInput$ProductId": "

    The identifier of the product.

    ", + "ProvisionProductInput$ProvisioningArtifactId": "

    The provisioning artifact identifier for this product.

    ", + "ProvisionProductInput$PathId": "

    The identifier of the path for this product's provisioning. This value is optional if the product has a default path, and is required if there is more than one path for the specified product.

    ", + "ProvisioningArtifact$Id": "

    The identifier for the artifact.

    ", + "RecordDetail$RecordId": "

    The identifier of the ProvisionedProduct object record.

    ", + "RecordDetail$ProvisionedProductId": "

    The identifier of the ProvisionedProduct object.

    ", + "RecordDetail$ProductId": "

    The identifier of the product.

    ", + "RecordDetail$ProvisioningArtifactId": "

    The provisioning artifact identifier for this product.

    ", + "RecordDetail$PathId": "

    The identifier of the path for this product's provisioning.

    ", + "TerminateProvisionedProductInput$ProvisionedProductId": "

    The identifier of the ProvisionedProduct object to terminate. You must specify either ProvisionedProductName or ProvisionedProductId, but not both.

    ", + "UpdateProvisionedProductInput$ProvisionedProductId": "

    The identifier of the ProvisionedProduct object to update. You must specify either ProvisionedProductName or ProvisionedProductId, but not both.

    ", + "UpdateProvisionedProductInput$ProductId": "

    The identifier of the ProvisionedProduct object.

    ", + "UpdateProvisionedProductInput$ProvisioningArtifactId": "

    The provisioning artifact identifier for this product.

    ", + "UpdateProvisionedProductInput$PathId": "

    The identifier of the path to use in the updated ProvisionedProduct object. This value is optional if the product has a default path, and is required if there is more than one path for the specified product.

    " + } + }, + "IdempotencyToken": { + "base": null, + "refs": { + "ProvisionProductInput$ProvisionToken": "

    An idempotency token that uniquely identifies the provisioning request.

    ", + "ProvisionedProductDetail$IdempotencyToken": "

    An idempotency token that uniquely identifies this ProvisionedProduct.

    ", + "TerminateProvisionedProductInput$TerminateToken": "

    An idempotency token that uniquely identifies the termination request. This token is only valid during the termination process. After the ProvisionedProduct object is terminated, further requests to terminate the same ProvisionedProduct object always return ResourceNotFound regardless of the value of TerminateToken.

    ", + "UpdateProvisionedProductInput$UpdateToken": "

    The idempotency token that uniquely identifies the provisioning update request.

    " + } + }, + "IgnoreErrors": { + "base": null, + "refs": { + "TerminateProvisionedProductInput$IgnoreErrors": "

    Optional Boolean parameter. If set to true, AWS Service Catalog stops managing the specified ProvisionedProduct object even if it cannot delete the underlying resources.

    " + } + }, + "InstructionType": { + "base": null, + "refs": { + "UsageInstruction$Type": "

    The usage instruction type for the value.

    " + } + }, + "InstructionValue": { + "base": null, + "refs": { + "UsageInstruction$Value": "

    The usage instruction value for this type.

    " + } + }, + "InvalidParametersException": { + "base": "

    One or more parameters provided to the operation are invalid.

    ", + "refs": { + } + }, + "LastRequestId": { + "base": null, + "refs": { + "ProvisionedProductDetail$LastRecordId": "

    The record identifier of the last request performed on this ProvisionedProduct object.

    " + } + }, + "LaunchPathSummaries": { + "base": null, + "refs": { + "ListLaunchPathsOutput$LaunchPathSummaries": "

    List of launch path information summaries for the specified PageToken.

    " + } + }, + "LaunchPathSummary": { + "base": "

    Summary information about a path for a user to have access to a specified product.

    ", + "refs": { + "LaunchPathSummaries$member": null + } + }, + "ListLaunchPathsInput": { + "base": null, + "refs": { + } + }, + "ListLaunchPathsOutput": { + "base": null, + "refs": { + } + }, + "ListRecordHistoryInput": { + "base": null, + "refs": { + } + }, + "ListRecordHistoryOutput": { + "base": null, + "refs": { + } + }, + "ListRecordHistorySearchFilter": { + "base": "

    The search filter to limit results when listing request history records.

    ", + "refs": { + "ListRecordHistoryInput$SearchFilter": "

    (Optional) The filter to limit search results.

    " + } + }, + "NoEcho": { + "base": null, + "refs": { + "ProvisioningArtifactParameter$IsNoEcho": "

    If this value is true, the value for this parameter is obfuscated from view when the parameter is retrieved. This parameter is used to hide sensitive information.

    " + } + }, + "NotificationArn": { + "base": null, + "refs": { + "NotificationArns$member": null + } + }, + "NotificationArns": { + "base": null, + "refs": { + "ProvisionProductInput$NotificationArns": "

    Passed to CloudFormation. The SNS topic ARNs to which to publish stack-related events.

    " + } + }, + "OutputKey": { + "base": null, + "refs": { + "RecordOutput$OutputKey": "

    The output key.

    " + } + }, + "OutputValue": { + "base": null, + "refs": { + "RecordOutput$OutputValue": "

    The output value.

    " + } + }, + "PageSize": { + "base": null, + "refs": { + "DescribeRecordInput$PageSize": "

    The maximum number of items to return in the results. If more results exist than fit in the specified PageSize, the value of NextPageToken in the response is non-null.

    ", + "ListLaunchPathsInput$PageSize": "

    The maximum number of items to return in the results. If more results exist than fit in the specified PageSize, the value of NextPageToken in the response is non-null.

    ", + "ListRecordHistoryInput$PageSize": "

    The maximum number of items to return in the results. If more results exist than fit in the specified PageSize, the value of NextPageToken in the response is non-null.

    ", + "ScanProvisionedProductsInput$PageSize": "

    The maximum number of items to return in the results. If more results exist than fit in the specified PageSize, the value of NextPageToken in the response is non-null.

    ", + "SearchProductsInput$PageSize": "

    The maximum number of items to return in the results. If more results exist than fit in the specified PageSize, the value of NextPageToken in the response is non-null.

    " + } + }, + "PageToken": { + "base": null, + "refs": { + "DescribeRecordInput$PageToken": "

    The page token of the first page retrieve. If null, this retrieves the first page of size PageSize.

    ", + "DescribeRecordOutput$NextPageToken": "

    The page token to use to retrieve the next page of results for this operation. If there are no more pages, this value is null.

    ", + "ListLaunchPathsInput$PageToken": "

    The page token of the first page retrieve. If null, this retrieves the first page of size PageSize.

    ", + "ListLaunchPathsOutput$NextPageToken": "

    The page token to use to retrieve the next page of results for this operation. If there are no more pages, this value is null.

    ", + "ListRecordHistoryInput$PageToken": "

    The page token of the first page retrieve. If null, this retrieves the first page of size PageSize.

    ", + "ListRecordHistoryOutput$NextPageToken": "

    The page token to use to retrieve the next page of results for this operation. If there are no more pages, this value is null.

    ", + "ScanProvisionedProductsInput$PageToken": "

    The page token of the first page retrieve. If null, this retrieves the first page of size PageSize.

    ", + "ScanProvisionedProductsOutput$NextPageToken": "

    The page token to use to retrieve the next page of results for this operation. If there are no more pages, this value is null.

    ", + "SearchProductsInput$PageToken": "

    The page token of the first page retrieve. If null, this retrieves the first page of size PageSize.

    ", + "SearchProductsOutput$NextPageToken": "

    The page token to use to retrieve the next page of results for this operation. If there are no more pages, this value is null.

    " + } + }, + "ParameterConstraints": { + "base": "

    The constraints that the administrator has put on the parameter.

    ", + "refs": { + "ProvisioningArtifactParameter$ParameterConstraints": "

    The list of constraints that the administrator has put on the parameter.

    " + } + }, + "ParameterKey": { + "base": null, + "refs": { + "ProvisioningArtifactParameter$ParameterKey": "

    The parameter key.

    ", + "ProvisioningParameter$Key": "

    The ProvisioningArtifactParameter.ParameterKey parameter from DescribeProvisioningParameters.

    ", + "UpdateProvisioningParameter$Key": "

    The ProvisioningArtifactParameter.ParameterKey parameter from DescribeProvisioningParameters.

    " + } + }, + "ParameterType": { + "base": null, + "refs": { + "ProvisioningArtifactParameter$ParameterType": "

    The parameter type.

    " + } + }, + "ParameterValue": { + "base": null, + "refs": { + "ProvisioningParameter$Value": "

    The value to use for provisioning. Any constraints on this value can be found in ProvisioningArtifactParameter for Key.

    ", + "UpdateProvisioningParameter$Value": "

    The value to use for updating the product provisioning. Any constraints on this value can be found in the ProvisioningArtifactParameter parameter for Key.

    " + } + }, + "PortfolioName": { + "base": null, + "refs": { + "LaunchPathSummary$Name": "

    Corresponds to the name of the portfolio to which the user was assigned.

    " + } + }, + "ProductType": { + "base": null, + "refs": { + "ProductViewSummary$Type": "

    The product type. Contact the product administrator for the significance of this value.

    " + } + }, + "ProductViewAggregationType": { + "base": null, + "refs": { + "ProductViewAggregations$key": null + } + }, + "ProductViewAggregationValue": { + "base": "

    A single product view aggregation value/count pair, containing metadata about each product to which the calling user has access.

    ", + "refs": { + "ProductViewAggregationValues$member": null + } + }, + "ProductViewAggregationValues": { + "base": null, + "refs": { + "ProductViewAggregations$value": null + } + }, + "ProductViewAggregations": { + "base": null, + "refs": { + "SearchProductsOutput$ProductViewAggregations": "

    A list of the product view aggregation value objects.

    " + } + }, + "ProductViewDistributor": { + "base": null, + "refs": { + "ProductViewSummary$Distributor": "

    The distributor of the product. Contact the product administrator for the significance of this value.

    " + } + }, + "ProductViewFilterBy": { + "base": null, + "refs": { + "ProductViewFilters$key": null + } + }, + "ProductViewFilterValue": { + "base": null, + "refs": { + "ProductViewFilterValues$member": null + } + }, + "ProductViewFilterValues": { + "base": null, + "refs": { + "ProductViewFilters$value": null + } + }, + "ProductViewFilters": { + "base": null, + "refs": { + "SearchProductsInput$Filters": "

    (Optional) The list of filters with which to limit search results. If no search filters are specified, the output is all the products to which the calling user has access.

    " + } + }, + "ProductViewName": { + "base": null, + "refs": { + "ProductViewSummary$Name": "

    The name of the product.

    " + } + }, + "ProductViewOwner": { + "base": null, + "refs": { + "ProductViewSummary$Owner": "

    The owner of the product. Contact the product administrator for the significance of this value.

    " + } + }, + "ProductViewShortDescription": { + "base": null, + "refs": { + "ProductViewSummary$ShortDescription": "

    Short description of the product.

    " + } + }, + "ProductViewSortBy": { + "base": null, + "refs": { + "SearchProductsInput$SortBy": "

    (Optional) The sort field specifier. If no value is specified, results are not sorted.

    " + } + }, + "ProductViewSummaries": { + "base": null, + "refs": { + "SearchProductsOutput$ProductViewSummaries": "

    A list of the product view summary objects.

    " + } + }, + "ProductViewSummary": { + "base": "

    The summary metadata about the specified product.

    ", + "refs": { + "DescribeProductOutput$ProductViewSummary": "

    The summary metadata about the specified product.

    ", + "DescribeProductViewOutput$ProductViewSummary": "

    The summary metadata about the specified product.

    ", + "ProductViewSummaries$member": null + } + }, + "ProvisionProductInput": { + "base": null, + "refs": { + } + }, + "ProvisionProductOutput": { + "base": null, + "refs": { + } + }, + "ProvisionedProductDetail": { + "base": "

    Detailed information about a ProvisionedProduct object.

    ", + "refs": { + "ProvisionedProductDetails$member": null + } + }, + "ProvisionedProductDetails": { + "base": null, + "refs": { + "ScanProvisionedProductsOutput$ProvisionedProducts": "

    A list of ProvisionedProduct detail objects.

    " + } + }, + "ProvisionedProductId": { + "base": null, + "refs": { + "ProvisionedProductDetail$Id": "

    The identifier of the ProvisionedProduct object.

    " + } + }, + "ProvisionedProductName": { + "base": null, + "refs": { + "ProvisionProductInput$ProvisionedProductName": "

    A user-friendly name to identify the ProvisionedProduct object. This value must be unique for the AWS account and cannot be updated after the product is provisioned.

    ", + "RecordDetail$ProvisionedProductName": "

    The user-friendly name of the ProvisionedProduct object.

    " + } + }, + "ProvisionedProductNameOrArn": { + "base": null, + "refs": { + "ProvisionedProductDetail$Name": "

    The user-friendly name of the ProvisionedProduct object.

    ", + "ProvisionedProductDetail$Arn": "

    The ARN associated with the ProvisionedProduct object.

    ", + "TerminateProvisionedProductInput$ProvisionedProductName": "

    The name of the ProvisionedProduct object to terminate. You must specify either ProvisionedProductName or ProvisionedProductId, but not both.

    ", + "UpdateProvisionedProductInput$ProvisionedProductName": "

    The updated name of the ProvisionedProduct object . You must specify either ProvisionedProductName or ProvisionedProductId, but not both.

    " + } + }, + "ProvisionedProductStatusMessage": { + "base": null, + "refs": { + "ProvisionedProductDetail$StatusMessage": "

    The current status message of the ProvisionedProduct.

    " + } + }, + "ProvisionedProductType": { + "base": null, + "refs": { + "ProvisionedProductDetail$Type": "

    The type of the ProvisionedProduct object.

    ", + "RecordDetail$ProvisionedProductType": "

    The type of the ProvisionedProduct object.

    " + } + }, + "ProvisioningArtifact": { + "base": "

    Contains information indicating the ways in which a product can be provisioned.

    ", + "refs": { + "ProvisioningArtifacts$member": null + } + }, + "ProvisioningArtifactCreatedTime": { + "base": null, + "refs": { + "ProvisioningArtifact$CreatedTime": "

    The time that the artifact was created by the Administrator.

    " + } + }, + "ProvisioningArtifactDescription": { + "base": null, + "refs": { + "ProvisioningArtifact$Description": "

    The text description of the artifact.

    " + } + }, + "ProvisioningArtifactName": { + "base": null, + "refs": { + "ProvisioningArtifact$Name": "

    The name of the artifact.

    " + } + }, + "ProvisioningArtifactParameter": { + "base": "

    A parameter used to successfully provision the product. This value includes a list of allowable values and additional metadata.

    ", + "refs": { + "ProvisioningArtifactParameters$member": null + } + }, + "ProvisioningArtifactParameters": { + "base": null, + "refs": { + "DescribeProvisioningParametersOutput$ProvisioningArtifactParameters": "

    The list of parameters used to successfully provision the product. Each parameter includes a list of allowable values and additional metadata about each parameter.

    " + } + }, + "ProvisioningArtifacts": { + "base": null, + "refs": { + "DescribeProductOutput$ProvisioningArtifacts": "

    A list of provisioning artifact objects for the specified product. The ProvisioningArtifacts parameter represent the ways the specified product can be provisioned.

    ", + "DescribeProductViewOutput$ProvisioningArtifacts": "

    A list of provisioning artifact objects for the specified product. The ProvisioningArtifacts represent the ways in which the specified product can be provisioned.

    " + } + }, + "ProvisioningParameter": { + "base": "

    The arameter key/value pairs used to provision a product.

    ", + "refs": { + "ProvisioningParameters$member": null + } + }, + "ProvisioningParameters": { + "base": null, + "refs": { + "ProvisionProductInput$ProvisioningParameters": "

    Parameters specified by the administrator that are required for provisioning the product.

    " + } + }, + "RecordDetail": { + "base": "

    The full details of a specific ProvisionedProduct object.

    ", + "refs": { + "DescribeRecordOutput$RecordDetail": "

    Detailed record information for the specified product.

    ", + "ProvisionProductOutput$RecordDetail": "

    The detailed result of the ProvisionProduct request, containing the inputs made to that request, the current state of the request, a pointer to the ProvisionedProduct object of the request, and a list of any errors that the request encountered.

    ", + "RecordDetails$member": null, + "TerminateProvisionedProductOutput$RecordDetail": "

    The detailed result of the TerminateProvisionedProduct request, containing the inputs made to that request, the current state of the request, a pointer to the ProvisionedProduct object that the request is modifying, and a list of any errors that the request encountered.

    ", + "UpdateProvisionedProductOutput$RecordDetail": "

    The detailed result of the UpdateProvisionedProduct request, containing the inputs made to that request, the current state of the request, a pointer to the ProvisionedProduct object that the request is modifying, and a list of any errors that the request encountered.

    " + } + }, + "RecordDetails": { + "base": null, + "refs": { + "ListRecordHistoryOutput$RecordDetails": "

    A list of record detail objects, listed in reverse chronological order.

    " + } + }, + "RecordError": { + "base": "

    The error code and description resulting from an operation.

    ", + "refs": { + "RecordErrors$member": null + } + }, + "RecordErrors": { + "base": null, + "refs": { + "RecordDetail$RecordErrors": "

    A list of errors that occurred while processing the request.

    " + } + }, + "RecordOutput": { + "base": "

    An output for the specified Product object created as the result of a request. For example, a CloudFormation-backed product that creates an S3 bucket would have an output for the S3 bucket URL.

    ", + "refs": { + "RecordOutputs$member": null + } + }, + "RecordOutputs": { + "base": null, + "refs": { + "DescribeRecordOutput$RecordOutputs": "

    A list of outputs for the specified Product object created as the result of a request. For example, a CloudFormation-backed product that creates an S3 bucket would have an output for the S3 bucket URL.

    " + } + }, + "RecordStatus": { + "base": null, + "refs": { + "ProvisionedProductDetail$Status": "

    The current status of the ProvisionedProduct.

    ", + "RecordDetail$Status": "

    The status of the ProvisionedProduct object.

    " + } + }, + "RecordTag": { + "base": "

    A tag associated with the record, stored as a key-value pair.

    ", + "refs": { + "RecordTags$member": null + } + }, + "RecordTagKey": { + "base": null, + "refs": { + "RecordTag$Key": "

    The key for this tag.

    " + } + }, + "RecordTagValue": { + "base": null, + "refs": { + "RecordTag$Value": "

    The value for this tag.

    " + } + }, + "RecordTags": { + "base": null, + "refs": { + "RecordDetail$RecordTags": "

    List of tags associated with this record.

    " + } + }, + "RecordType": { + "base": null, + "refs": { + "RecordDetail$RecordType": "

    The record type for this record.

    " + } + }, + "ResourceNotFoundException": { + "base": "

    The specified resource was not found.

    ", + "refs": { + } + }, + "ScanProvisionedProductsInput": { + "base": null, + "refs": { + } + }, + "ScanProvisionedProductsOutput": { + "base": null, + "refs": { + } + }, + "SearchFilterKey": { + "base": null, + "refs": { + "ListRecordHistorySearchFilter$Key": "

    The filter key.

    " + } + }, + "SearchFilterValue": { + "base": null, + "refs": { + "ListRecordHistorySearchFilter$Value": "

    The filter value for Key.

    " + } + }, + "SearchProductsInput": { + "base": null, + "refs": { + } + }, + "SearchProductsOutput": { + "base": null, + "refs": { + } + }, + "SortOrder": { + "base": null, + "refs": { + "SearchProductsInput$SortOrder": "

    (Optional) The sort order specifier. If no value is specified, results are not sorted.

    " + } + }, + "SupportDescription": { + "base": null, + "refs": { + "ProductViewSummary$SupportDescription": "

    The description of the support for this Product.

    " + } + }, + "SupportEmail": { + "base": null, + "refs": { + "ProductViewSummary$SupportEmail": "

    The email contact information to obtain support for this Product.

    " + } + }, + "SupportUrl": { + "base": null, + "refs": { + "ProductViewSummary$SupportUrl": "

    The URL information to obtain support for this Product.

    " + } + }, + "Tag": { + "base": "

    Optional key/value pairs to associate with this provisioning. These tags are propagated to the resources created in the provisioning.

    ", + "refs": { + "Tags$member": null + } + }, + "TagKey": { + "base": null, + "refs": { + "Tag$Key": "

    The ProvisioningArtifactParameter.TagKey parameter from DescribeProvisioningParameters.

    " + } + }, + "TagValue": { + "base": null, + "refs": { + "Tag$Value": "

    The esired value for this key.

    " + } + }, + "Tags": { + "base": null, + "refs": { + "LaunchPathSummary$Tags": "

    List of tags used by this launch path.

    ", + "ProvisionProductInput$Tags": "

    (Optional) A list of tags to use as provisioning options.

    " + } + }, + "TerminateProvisionedProductInput": { + "base": null, + "refs": { + } + }, + "TerminateProvisionedProductOutput": { + "base": null, + "refs": { + } + }, + "UpdateProvisionedProductInput": { + "base": null, + "refs": { + } + }, + "UpdateProvisionedProductOutput": { + "base": null, + "refs": { + } + }, + "UpdateProvisioningParameter": { + "base": "

    The parameter key/value pair used to update a ProvisionedProduct object. If UsePreviousValue is set to true, Value is ignored and the value for Key is kept as previously set (current value).

    ", + "refs": { + "UpdateProvisioningParameters$member": null + } + }, + "UpdateProvisioningParameters": { + "base": null, + "refs": { + "UpdateProvisionedProductInput$ProvisioningParameters": "

    A list of ProvisioningParameter objects used to update the ProvisionedProduct object.

    " + } + }, + "UpdatedTime": { + "base": null, + "refs": { + "RecordDetail$UpdatedTime": "

    The time when the record for the ProvisionedProduct object was last updated.

    " + } + }, + "UsageInstruction": { + "base": "

    Additional information provided by the administrator.

    ", + "refs": { + "UsageInstructions$member": null + } + }, + "UsageInstructions": { + "base": null, + "refs": { + "DescribeProvisioningParametersOutput$UsageInstructions": "

    Any additional metadata specifically related to the provisioning of the product. For example, see the Version field of the CloudFormation template.

    " + } + }, + "UsePreviousValue": { + "base": null, + "refs": { + "UpdateProvisioningParameter$UsePreviousValue": "

    If true, uses the currently set value for Key, ignoring UpdateProvisioningParameter.Value.

    " + } + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/servicecatalog/2015-12-10/examples-1.json b/vendor/github.com/aws/aws-sdk-go/models/apis/servicecatalog/2015-12-10/examples-1.json new file mode 100644 index 000000000..0ea7e3b0b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/servicecatalog/2015-12-10/examples-1.json @@ -0,0 +1,5 @@ +{ + "version": "1.0", + "examples": { + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/sns/2010-03-31/api-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/sns/2010-03-31/api-2.json new file mode 100644 index 000000000..0dd6e238b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/sns/2010-03-31/api-2.json @@ -0,0 +1,1139 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2010-03-31", + "endpointPrefix":"sns", + "protocol":"query", + "serviceAbbreviation":"Amazon SNS", + "serviceFullName":"Amazon Simple Notification Service", + "signatureVersion":"v4", + "xmlNamespace":"http://sns.amazonaws.com/doc/2010-03-31/" + }, + "operations":{ + "AddPermission":{ + "name":"AddPermission", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AddPermissionInput"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"InternalErrorException"}, + {"shape":"AuthorizationErrorException"}, + {"shape":"NotFoundException"} + ] + }, + "CheckIfPhoneNumberIsOptedOut":{ + "name":"CheckIfPhoneNumberIsOptedOut", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CheckIfPhoneNumberIsOptedOutInput"}, + "output":{ + "shape":"CheckIfPhoneNumberIsOptedOutResponse", + "resultWrapper":"CheckIfPhoneNumberIsOptedOutResult" + }, + "errors":[ + {"shape":"ThrottledException"}, + {"shape":"InternalErrorException"}, + {"shape":"InvalidParameterException"} + ] + }, + "ConfirmSubscription":{ + "name":"ConfirmSubscription", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ConfirmSubscriptionInput"}, + "output":{ + "shape":"ConfirmSubscriptionResponse", + "resultWrapper":"ConfirmSubscriptionResult" + }, + "errors":[ + {"shape":"SubscriptionLimitExceededException"}, + {"shape":"InvalidParameterException"}, + {"shape":"NotFoundException"}, + {"shape":"InternalErrorException"}, + {"shape":"AuthorizationErrorException"} + ] + }, + "CreatePlatformApplication":{ + "name":"CreatePlatformApplication", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreatePlatformApplicationInput"}, + "output":{ + "shape":"CreatePlatformApplicationResponse", + "resultWrapper":"CreatePlatformApplicationResult" + }, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"InternalErrorException"}, + {"shape":"AuthorizationErrorException"} + ] + }, + "CreatePlatformEndpoint":{ + "name":"CreatePlatformEndpoint", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreatePlatformEndpointInput"}, + "output":{ + "shape":"CreateEndpointResponse", + "resultWrapper":"CreatePlatformEndpointResult" + }, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"InternalErrorException"}, + {"shape":"AuthorizationErrorException"}, + {"shape":"NotFoundException"} + ] + }, + "CreateTopic":{ + "name":"CreateTopic", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateTopicInput"}, + "output":{ + "shape":"CreateTopicResponse", + "resultWrapper":"CreateTopicResult" + }, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"TopicLimitExceededException"}, + {"shape":"InternalErrorException"}, + {"shape":"AuthorizationErrorException"} + ] + }, + "DeleteEndpoint":{ + "name":"DeleteEndpoint", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteEndpointInput"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"InternalErrorException"}, + {"shape":"AuthorizationErrorException"} + ] + }, + "DeletePlatformApplication":{ + "name":"DeletePlatformApplication", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeletePlatformApplicationInput"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"InternalErrorException"}, + {"shape":"AuthorizationErrorException"} + ] + }, + "DeleteTopic":{ + "name":"DeleteTopic", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteTopicInput"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"InternalErrorException"}, + {"shape":"AuthorizationErrorException"}, + {"shape":"NotFoundException"} + ] + }, + "GetEndpointAttributes":{ + "name":"GetEndpointAttributes", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetEndpointAttributesInput"}, + "output":{ + "shape":"GetEndpointAttributesResponse", + "resultWrapper":"GetEndpointAttributesResult" + }, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"InternalErrorException"}, + {"shape":"AuthorizationErrorException"}, + {"shape":"NotFoundException"} + ] + }, + "GetPlatformApplicationAttributes":{ + "name":"GetPlatformApplicationAttributes", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetPlatformApplicationAttributesInput"}, + "output":{ + "shape":"GetPlatformApplicationAttributesResponse", + "resultWrapper":"GetPlatformApplicationAttributesResult" + }, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"InternalErrorException"}, + {"shape":"AuthorizationErrorException"}, + {"shape":"NotFoundException"} + ] + }, + "GetSMSAttributes":{ + "name":"GetSMSAttributes", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetSMSAttributesInput"}, + "output":{ + "shape":"GetSMSAttributesResponse", + "resultWrapper":"GetSMSAttributesResult" + }, + "errors":[ + {"shape":"ThrottledException"}, + {"shape":"InternalErrorException"}, + {"shape":"InvalidParameterException"} + ] + }, + "GetSubscriptionAttributes":{ + "name":"GetSubscriptionAttributes", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetSubscriptionAttributesInput"}, + "output":{ + "shape":"GetSubscriptionAttributesResponse", + "resultWrapper":"GetSubscriptionAttributesResult" + }, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"InternalErrorException"}, + {"shape":"NotFoundException"}, + {"shape":"AuthorizationErrorException"} + ] + }, + "GetTopicAttributes":{ + "name":"GetTopicAttributes", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetTopicAttributesInput"}, + "output":{ + "shape":"GetTopicAttributesResponse", + "resultWrapper":"GetTopicAttributesResult" + }, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"InternalErrorException"}, + {"shape":"NotFoundException"}, + {"shape":"AuthorizationErrorException"} + ] + }, + "ListEndpointsByPlatformApplication":{ + "name":"ListEndpointsByPlatformApplication", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListEndpointsByPlatformApplicationInput"}, + "output":{ + "shape":"ListEndpointsByPlatformApplicationResponse", + "resultWrapper":"ListEndpointsByPlatformApplicationResult" + }, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"InternalErrorException"}, + {"shape":"AuthorizationErrorException"}, + {"shape":"NotFoundException"} + ] + }, + "ListPhoneNumbersOptedOut":{ + "name":"ListPhoneNumbersOptedOut", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListPhoneNumbersOptedOutInput"}, + "output":{ + "shape":"ListPhoneNumbersOptedOutResponse", + "resultWrapper":"ListPhoneNumbersOptedOutResult" + }, + "errors":[ + {"shape":"ThrottledException"}, + {"shape":"InternalErrorException"}, + {"shape":"InvalidParameterException"} + ] + }, + "ListPlatformApplications":{ + "name":"ListPlatformApplications", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListPlatformApplicationsInput"}, + "output":{ + "shape":"ListPlatformApplicationsResponse", + "resultWrapper":"ListPlatformApplicationsResult" + }, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"InternalErrorException"}, + {"shape":"AuthorizationErrorException"} + ] + }, + "ListSubscriptions":{ + "name":"ListSubscriptions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListSubscriptionsInput"}, + "output":{ + "shape":"ListSubscriptionsResponse", + "resultWrapper":"ListSubscriptionsResult" + }, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"InternalErrorException"}, + {"shape":"AuthorizationErrorException"} + ] + }, + "ListSubscriptionsByTopic":{ + "name":"ListSubscriptionsByTopic", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListSubscriptionsByTopicInput"}, + "output":{ + "shape":"ListSubscriptionsByTopicResponse", + "resultWrapper":"ListSubscriptionsByTopicResult" + }, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"InternalErrorException"}, + {"shape":"NotFoundException"}, + {"shape":"AuthorizationErrorException"} + ] + }, + "ListTopics":{ + "name":"ListTopics", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListTopicsInput"}, + "output":{ + "shape":"ListTopicsResponse", + "resultWrapper":"ListTopicsResult" + }, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"InternalErrorException"}, + {"shape":"AuthorizationErrorException"} + ] + }, + "OptInPhoneNumber":{ + "name":"OptInPhoneNumber", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"OptInPhoneNumberInput"}, + "output":{ + "shape":"OptInPhoneNumberResponse", + "resultWrapper":"OptInPhoneNumberResult" + }, + "errors":[ + {"shape":"ThrottledException"}, + {"shape":"InternalErrorException"}, + {"shape":"InvalidParameterException"} + ] + }, + "Publish":{ + "name":"Publish", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PublishInput"}, + "output":{ + "shape":"PublishResponse", + "resultWrapper":"PublishResult" + }, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"InternalErrorException"}, + {"shape":"NotFoundException"}, + {"shape":"EndpointDisabledException"}, + {"shape":"PlatformApplicationDisabledException"}, + {"shape":"AuthorizationErrorException"} + ] + }, + "RemovePermission":{ + "name":"RemovePermission", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RemovePermissionInput"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"InternalErrorException"}, + {"shape":"AuthorizationErrorException"}, + {"shape":"NotFoundException"} + ] + }, + "SetEndpointAttributes":{ + "name":"SetEndpointAttributes", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"SetEndpointAttributesInput"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"InternalErrorException"}, + {"shape":"AuthorizationErrorException"}, + {"shape":"NotFoundException"} + ] + }, + "SetPlatformApplicationAttributes":{ + "name":"SetPlatformApplicationAttributes", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"SetPlatformApplicationAttributesInput"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"InternalErrorException"}, + {"shape":"AuthorizationErrorException"}, + {"shape":"NotFoundException"} + ] + }, + "SetSMSAttributes":{ + "name":"SetSMSAttributes", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"SetSMSAttributesInput"}, + "output":{ + "shape":"SetSMSAttributesResponse", + "resultWrapper":"SetSMSAttributesResult" + }, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"ThrottledException"}, + {"shape":"InternalErrorException"} + ] + }, + "SetSubscriptionAttributes":{ + "name":"SetSubscriptionAttributes", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"SetSubscriptionAttributesInput"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"InternalErrorException"}, + {"shape":"NotFoundException"}, + {"shape":"AuthorizationErrorException"} + ] + }, + "SetTopicAttributes":{ + "name":"SetTopicAttributes", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"SetTopicAttributesInput"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"InternalErrorException"}, + {"shape":"NotFoundException"}, + {"shape":"AuthorizationErrorException"} + ] + }, + "Subscribe":{ + "name":"Subscribe", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"SubscribeInput"}, + "output":{ + "shape":"SubscribeResponse", + "resultWrapper":"SubscribeResult" + }, + "errors":[ + {"shape":"SubscriptionLimitExceededException"}, + {"shape":"InvalidParameterException"}, + {"shape":"InternalErrorException"}, + {"shape":"NotFoundException"}, + {"shape":"AuthorizationErrorException"} + ] + }, + "Unsubscribe":{ + "name":"Unsubscribe", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UnsubscribeInput"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"InternalErrorException"}, + {"shape":"AuthorizationErrorException"}, + {"shape":"NotFoundException"} + ] + } + }, + "shapes":{ + "ActionsList":{ + "type":"list", + "member":{"shape":"action"} + }, + "AddPermissionInput":{ + "type":"structure", + "required":[ + "TopicArn", + "Label", + "AWSAccountId", + "ActionName" + ], + "members":{ + "TopicArn":{"shape":"topicARN"}, + "Label":{"shape":"label"}, + "AWSAccountId":{"shape":"DelegatesList"}, + "ActionName":{"shape":"ActionsList"} + } + }, + "AuthorizationErrorException":{ + "type":"structure", + "members":{ + "message":{"shape":"string"} + }, + "error":{ + "code":"AuthorizationError", + "httpStatusCode":403, + "senderFault":true + }, + "exception":true + }, + "Binary":{"type":"blob"}, + "CheckIfPhoneNumberIsOptedOutInput":{ + "type":"structure", + "required":["phoneNumber"], + "members":{ + "phoneNumber":{"shape":"PhoneNumber"} + } + }, + "CheckIfPhoneNumberIsOptedOutResponse":{ + "type":"structure", + "members":{ + "isOptedOut":{"shape":"boolean"} + } + }, + "ConfirmSubscriptionInput":{ + "type":"structure", + "required":[ + "TopicArn", + "Token" + ], + "members":{ + "TopicArn":{"shape":"topicARN"}, + "Token":{"shape":"token"}, + "AuthenticateOnUnsubscribe":{"shape":"authenticateOnUnsubscribe"} + } + }, + "ConfirmSubscriptionResponse":{ + "type":"structure", + "members":{ + "SubscriptionArn":{"shape":"subscriptionARN"} + } + }, + "CreateEndpointResponse":{ + "type":"structure", + "members":{ + "EndpointArn":{"shape":"String"} + } + }, + "CreatePlatformApplicationInput":{ + "type":"structure", + "required":[ + "Name", + "Platform", + "Attributes" + ], + "members":{ + "Name":{"shape":"String"}, + "Platform":{"shape":"String"}, + "Attributes":{"shape":"MapStringToString"} + } + }, + "CreatePlatformApplicationResponse":{ + "type":"structure", + "members":{ + "PlatformApplicationArn":{"shape":"String"} + } + }, + "CreatePlatformEndpointInput":{ + "type":"structure", + "required":[ + "PlatformApplicationArn", + "Token" + ], + "members":{ + "PlatformApplicationArn":{"shape":"String"}, + "Token":{"shape":"String"}, + "CustomUserData":{"shape":"String"}, + "Attributes":{"shape":"MapStringToString"} + } + }, + "CreateTopicInput":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{"shape":"topicName"} + } + }, + "CreateTopicResponse":{ + "type":"structure", + "members":{ + "TopicArn":{"shape":"topicARN"} + } + }, + "DelegatesList":{ + "type":"list", + "member":{"shape":"delegate"} + }, + "DeleteEndpointInput":{ + "type":"structure", + "required":["EndpointArn"], + "members":{ + "EndpointArn":{"shape":"String"} + } + }, + "DeletePlatformApplicationInput":{ + "type":"structure", + "required":["PlatformApplicationArn"], + "members":{ + "PlatformApplicationArn":{"shape":"String"} + } + }, + "DeleteTopicInput":{ + "type":"structure", + "required":["TopicArn"], + "members":{ + "TopicArn":{"shape":"topicARN"} + } + }, + "Endpoint":{ + "type":"structure", + "members":{ + "EndpointArn":{"shape":"String"}, + "Attributes":{"shape":"MapStringToString"} + } + }, + "EndpointDisabledException":{ + "type":"structure", + "members":{ + "message":{"shape":"string"} + }, + "error":{ + "code":"EndpointDisabled", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "GetEndpointAttributesInput":{ + "type":"structure", + "required":["EndpointArn"], + "members":{ + "EndpointArn":{"shape":"String"} + } + }, + "GetEndpointAttributesResponse":{ + "type":"structure", + "members":{ + "Attributes":{"shape":"MapStringToString"} + } + }, + "GetPlatformApplicationAttributesInput":{ + "type":"structure", + "required":["PlatformApplicationArn"], + "members":{ + "PlatformApplicationArn":{"shape":"String"} + } + }, + "GetPlatformApplicationAttributesResponse":{ + "type":"structure", + "members":{ + "Attributes":{"shape":"MapStringToString"} + } + }, + "GetSMSAttributesInput":{ + "type":"structure", + "members":{ + "attributes":{"shape":"ListString"} + } + }, + "GetSMSAttributesResponse":{ + "type":"structure", + "members":{ + "attributes":{"shape":"MapStringToString"} + } + }, + "GetSubscriptionAttributesInput":{ + "type":"structure", + "required":["SubscriptionArn"], + "members":{ + "SubscriptionArn":{"shape":"subscriptionARN"} + } + }, + "GetSubscriptionAttributesResponse":{ + "type":"structure", + "members":{ + "Attributes":{"shape":"SubscriptionAttributesMap"} + } + }, + "GetTopicAttributesInput":{ + "type":"structure", + "required":["TopicArn"], + "members":{ + "TopicArn":{"shape":"topicARN"} + } + }, + "GetTopicAttributesResponse":{ + "type":"structure", + "members":{ + "Attributes":{"shape":"TopicAttributesMap"} + } + }, + "InternalErrorException":{ + "type":"structure", + "members":{ + "message":{"shape":"string"} + }, + "error":{ + "code":"InternalError", + "httpStatusCode":500 + }, + "exception":true, + "fault":true + }, + "InvalidParameterException":{ + "type":"structure", + "members":{ + "message":{"shape":"string"} + }, + "error":{ + "code":"InvalidParameter", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidParameterValueException":{ + "type":"structure", + "members":{ + "message":{"shape":"string"} + }, + "error":{ + "code":"ParameterValueInvalid", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "ListEndpointsByPlatformApplicationInput":{ + "type":"structure", + "required":["PlatformApplicationArn"], + "members":{ + "PlatformApplicationArn":{"shape":"String"}, + "NextToken":{"shape":"String"} + } + }, + "ListEndpointsByPlatformApplicationResponse":{ + "type":"structure", + "members":{ + "Endpoints":{"shape":"ListOfEndpoints"}, + "NextToken":{"shape":"String"} + } + }, + "ListOfEndpoints":{ + "type":"list", + "member":{"shape":"Endpoint"} + }, + "ListOfPlatformApplications":{ + "type":"list", + "member":{"shape":"PlatformApplication"} + }, + "ListPhoneNumbersOptedOutInput":{ + "type":"structure", + "members":{ + "nextToken":{"shape":"string"} + } + }, + "ListPhoneNumbersOptedOutResponse":{ + "type":"structure", + "members":{ + "phoneNumbers":{"shape":"PhoneNumberList"}, + "nextToken":{"shape":"string"} + } + }, + "ListPlatformApplicationsInput":{ + "type":"structure", + "members":{ + "NextToken":{"shape":"String"} + } + }, + "ListPlatformApplicationsResponse":{ + "type":"structure", + "members":{ + "PlatformApplications":{"shape":"ListOfPlatformApplications"}, + "NextToken":{"shape":"String"} + } + }, + "ListString":{ + "type":"list", + "member":{"shape":"String"} + }, + "ListSubscriptionsByTopicInput":{ + "type":"structure", + "required":["TopicArn"], + "members":{ + "TopicArn":{"shape":"topicARN"}, + "NextToken":{"shape":"nextToken"} + } + }, + "ListSubscriptionsByTopicResponse":{ + "type":"structure", + "members":{ + "Subscriptions":{"shape":"SubscriptionsList"}, + "NextToken":{"shape":"nextToken"} + } + }, + "ListSubscriptionsInput":{ + "type":"structure", + "members":{ + "NextToken":{"shape":"nextToken"} + } + }, + "ListSubscriptionsResponse":{ + "type":"structure", + "members":{ + "Subscriptions":{"shape":"SubscriptionsList"}, + "NextToken":{"shape":"nextToken"} + } + }, + "ListTopicsInput":{ + "type":"structure", + "members":{ + "NextToken":{"shape":"nextToken"} + } + }, + "ListTopicsResponse":{ + "type":"structure", + "members":{ + "Topics":{"shape":"TopicsList"}, + "NextToken":{"shape":"nextToken"} + } + }, + "MapStringToString":{ + "type":"map", + "key":{"shape":"String"}, + "value":{"shape":"String"} + }, + "MessageAttributeMap":{ + "type":"map", + "key":{ + "shape":"String", + "locationName":"Name" + }, + "value":{ + "shape":"MessageAttributeValue", + "locationName":"Value" + } + }, + "MessageAttributeValue":{ + "type":"structure", + "required":["DataType"], + "members":{ + "DataType":{"shape":"String"}, + "StringValue":{"shape":"String"}, + "BinaryValue":{"shape":"Binary"} + } + }, + "NotFoundException":{ + "type":"structure", + "members":{ + "message":{"shape":"string"} + }, + "error":{ + "code":"NotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "OptInPhoneNumberInput":{ + "type":"structure", + "required":["phoneNumber"], + "members":{ + "phoneNumber":{"shape":"PhoneNumber"} + } + }, + "OptInPhoneNumberResponse":{ + "type":"structure", + "members":{ + } + }, + "PhoneNumber":{"type":"string"}, + "PhoneNumberList":{ + "type":"list", + "member":{"shape":"PhoneNumber"} + }, + "PlatformApplication":{ + "type":"structure", + "members":{ + "PlatformApplicationArn":{"shape":"String"}, + "Attributes":{"shape":"MapStringToString"} + } + }, + "PlatformApplicationDisabledException":{ + "type":"structure", + "members":{ + "message":{"shape":"string"} + }, + "error":{ + "code":"PlatformApplicationDisabled", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "PublishInput":{ + "type":"structure", + "required":["Message"], + "members":{ + "TopicArn":{"shape":"topicARN"}, + "TargetArn":{"shape":"String"}, + "PhoneNumber":{"shape":"String"}, + "Message":{"shape":"message"}, + "Subject":{"shape":"subject"}, + "MessageStructure":{"shape":"messageStructure"}, + "MessageAttributes":{"shape":"MessageAttributeMap"} + } + }, + "PublishResponse":{ + "type":"structure", + "members":{ + "MessageId":{"shape":"messageId"} + } + }, + "RemovePermissionInput":{ + "type":"structure", + "required":[ + "TopicArn", + "Label" + ], + "members":{ + "TopicArn":{"shape":"topicARN"}, + "Label":{"shape":"label"} + } + }, + "SetEndpointAttributesInput":{ + "type":"structure", + "required":[ + "EndpointArn", + "Attributes" + ], + "members":{ + "EndpointArn":{"shape":"String"}, + "Attributes":{"shape":"MapStringToString"} + } + }, + "SetPlatformApplicationAttributesInput":{ + "type":"structure", + "required":[ + "PlatformApplicationArn", + "Attributes" + ], + "members":{ + "PlatformApplicationArn":{"shape":"String"}, + "Attributes":{"shape":"MapStringToString"} + } + }, + "SetSMSAttributesInput":{ + "type":"structure", + "required":["attributes"], + "members":{ + "attributes":{"shape":"MapStringToString"} + } + }, + "SetSMSAttributesResponse":{ + "type":"structure", + "members":{ + } + }, + "SetSubscriptionAttributesInput":{ + "type":"structure", + "required":[ + "SubscriptionArn", + "AttributeName" + ], + "members":{ + "SubscriptionArn":{"shape":"subscriptionARN"}, + "AttributeName":{"shape":"attributeName"}, + "AttributeValue":{"shape":"attributeValue"} + } + }, + "SetTopicAttributesInput":{ + "type":"structure", + "required":[ + "TopicArn", + "AttributeName" + ], + "members":{ + "TopicArn":{"shape":"topicARN"}, + "AttributeName":{"shape":"attributeName"}, + "AttributeValue":{"shape":"attributeValue"} + } + }, + "String":{"type":"string"}, + "SubscribeInput":{ + "type":"structure", + "required":[ + "TopicArn", + "Protocol" + ], + "members":{ + "TopicArn":{"shape":"topicARN"}, + "Protocol":{"shape":"protocol"}, + "Endpoint":{"shape":"endpoint"} + } + }, + "SubscribeResponse":{ + "type":"structure", + "members":{ + "SubscriptionArn":{"shape":"subscriptionARN"} + } + }, + "Subscription":{ + "type":"structure", + "members":{ + "SubscriptionArn":{"shape":"subscriptionARN"}, + "Owner":{"shape":"account"}, + "Protocol":{"shape":"protocol"}, + "Endpoint":{"shape":"endpoint"}, + "TopicArn":{"shape":"topicARN"} + } + }, + "SubscriptionAttributesMap":{ + "type":"map", + "key":{"shape":"attributeName"}, + "value":{"shape":"attributeValue"} + }, + "SubscriptionLimitExceededException":{ + "type":"structure", + "members":{ + "message":{"shape":"string"} + }, + "error":{ + "code":"SubscriptionLimitExceeded", + "httpStatusCode":403, + "senderFault":true + }, + "exception":true + }, + "SubscriptionsList":{ + "type":"list", + "member":{"shape":"Subscription"} + }, + "ThrottledException":{ + "type":"structure", + "members":{ + "message":{"shape":"string"} + }, + "error":{ + "code":"Throttled", + "httpStatusCode":429, + "senderFault":true + }, + "exception":true + }, + "Topic":{ + "type":"structure", + "members":{ + "TopicArn":{"shape":"topicARN"} + } + }, + "TopicAttributesMap":{ + "type":"map", + "key":{"shape":"attributeName"}, + "value":{"shape":"attributeValue"} + }, + "TopicLimitExceededException":{ + "type":"structure", + "members":{ + "message":{"shape":"string"} + }, + "error":{ + "code":"TopicLimitExceeded", + "httpStatusCode":403, + "senderFault":true + }, + "exception":true + }, + "TopicsList":{ + "type":"list", + "member":{"shape":"Topic"} + }, + "UnsubscribeInput":{ + "type":"structure", + "required":["SubscriptionArn"], + "members":{ + "SubscriptionArn":{"shape":"subscriptionARN"} + } + }, + "account":{"type":"string"}, + "action":{"type":"string"}, + "attributeName":{"type":"string"}, + "attributeValue":{"type":"string"}, + "authenticateOnUnsubscribe":{"type":"string"}, + "boolean":{"type":"boolean"}, + "delegate":{"type":"string"}, + "endpoint":{"type":"string"}, + "label":{"type":"string"}, + "message":{"type":"string"}, + "messageId":{"type":"string"}, + "messageStructure":{"type":"string"}, + "nextToken":{"type":"string"}, + "protocol":{"type":"string"}, + "string":{"type":"string"}, + "subject":{"type":"string"}, + "subscriptionARN":{"type":"string"}, + "token":{"type":"string"}, + "topicARN":{"type":"string"}, + "topicName":{"type":"string"} + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/sns/2010-03-31/docs-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/sns/2010-03-31/docs-2.json new file mode 100644 index 000000000..8909088d2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/sns/2010-03-31/docs-2.json @@ -0,0 +1,658 @@ +{ + "version": "2.0", + "service": "Amazon Simple Notification Service

    Amazon Simple Notification Service (Amazon SNS) is a web service that enables you to build distributed web-enabled applications. Applications can use Amazon SNS to easily push real-time notification messages to interested subscribers over multiple delivery protocols. For more information about this product see http://aws.amazon.com/sns. For detailed information about Amazon SNS features and their associated API calls, see the Amazon SNS Developer Guide.

    We also provide SDKs that enable you to access Amazon SNS from your preferred programming language. The SDKs contain functionality that automatically takes care of tasks such as: cryptographically signing your service requests, retrying requests, and handling error responses. For a list of available SDKs, go to Tools for Amazon Web Services.

    ", + "operations": { + "AddPermission": "

    Adds a statement to a topic's access control policy, granting access for the specified AWS accounts to the specified actions.

    ", + "CheckIfPhoneNumberIsOptedOut": "

    Accepts a phone number and indicates whether the phone holder has opted out of receiving SMS messages from your account. You cannot send SMS messages to a number that is opted out.

    To resume sending messages, you can opt in the number by using the OptInPhoneNumber action.

    ", + "ConfirmSubscription": "

    Verifies an endpoint owner's intent to receive messages by validating the token sent to the endpoint by an earlier Subscribe action. If the token is valid, the action creates a new subscription and returns its Amazon Resource Name (ARN). This call requires an AWS signature only when the AuthenticateOnUnsubscribe flag is set to \"true\".

    ", + "CreatePlatformApplication": "

    Creates a platform application object for one of the supported push notification services, such as APNS and GCM, to which devices and mobile apps may register. You must specify PlatformPrincipal and PlatformCredential attributes when using the CreatePlatformApplication action. The PlatformPrincipal is received from the notification service. For APNS/APNS_SANDBOX, PlatformPrincipal is \"SSL certificate\". For GCM, PlatformPrincipal is not applicable. For ADM, PlatformPrincipal is \"client id\". The PlatformCredential is also received from the notification service. For WNS, PlatformPrincipal is \"Package Security Identifier\". For MPNS, PlatformPrincipal is \"TLS certificate\". For Baidu, PlatformPrincipal is \"API key\".

    For APNS/APNS_SANDBOX, PlatformCredential is \"private key\". For GCM, PlatformCredential is \"API key\". For ADM, PlatformCredential is \"client secret\". For WNS, PlatformCredential is \"secret key\". For MPNS, PlatformCredential is \"private key\". For Baidu, PlatformCredential is \"secret key\". The PlatformApplicationArn that is returned when using CreatePlatformApplication is then used as an attribute for the CreatePlatformEndpoint action. For more information, see Using Amazon SNS Mobile Push Notifications. For more information about obtaining the PlatformPrincipal and PlatformCredential for each of the supported push notification services, see Getting Started with Apple Push Notification Service, Getting Started with Amazon Device Messaging, Getting Started with Baidu Cloud Push, Getting Started with Google Cloud Messaging for Android, Getting Started with MPNS, or Getting Started with WNS.

    ", + "CreatePlatformEndpoint": "

    Creates an endpoint for a device and mobile app on one of the supported push notification services, such as GCM and APNS. CreatePlatformEndpoint requires the PlatformApplicationArn that is returned from CreatePlatformApplication. The EndpointArn that is returned when using CreatePlatformEndpoint can then be used by the Publish action to send a message to a mobile app or by the Subscribe action for subscription to a topic. The CreatePlatformEndpoint action is idempotent, so if the requester already owns an endpoint with the same device token and attributes, that endpoint's ARN is returned without creating a new endpoint. For more information, see Using Amazon SNS Mobile Push Notifications.

    When using CreatePlatformEndpoint with Baidu, two attributes must be provided: ChannelId and UserId. The token field must also contain the ChannelId. For more information, see Creating an Amazon SNS Endpoint for Baidu.

    ", + "CreateTopic": "

    Creates a topic to which notifications can be published. Users can create at most 100,000 topics. For more information, see http://aws.amazon.com/sns. This action is idempotent, so if the requester already owns a topic with the specified name, that topic's ARN is returned without creating a new topic.

    ", + "DeleteEndpoint": "

    Deletes the endpoint for a device and mobile app from Amazon SNS. This action is idempotent. For more information, see Using Amazon SNS Mobile Push Notifications.

    When you delete an endpoint that is also subscribed to a topic, then you must also unsubscribe the endpoint from the topic.

    ", + "DeletePlatformApplication": "

    Deletes a platform application object for one of the supported push notification services, such as APNS and GCM. For more information, see Using Amazon SNS Mobile Push Notifications.

    ", + "DeleteTopic": "

    Deletes a topic and all its subscriptions. Deleting a topic might prevent some messages previously sent to the topic from being delivered to subscribers. This action is idempotent, so deleting a topic that does not exist does not result in an error.

    ", + "GetEndpointAttributes": "

    Retrieves the endpoint attributes for a device on one of the supported push notification services, such as GCM and APNS. For more information, see Using Amazon SNS Mobile Push Notifications.

    ", + "GetPlatformApplicationAttributes": "

    Retrieves the attributes of the platform application object for the supported push notification services, such as APNS and GCM. For more information, see Using Amazon SNS Mobile Push Notifications.

    ", + "GetSMSAttributes": "

    Returns the settings for sending SMS messages from your account.

    These settings are set with the SetSMSAttributes action.

    ", + "GetSubscriptionAttributes": "

    Returns all of the properties of a subscription.

    ", + "GetTopicAttributes": "

    Returns all of the properties of a topic. Topic properties returned might differ based on the authorization of the user.

    ", + "ListEndpointsByPlatformApplication": "

    Lists the endpoints and endpoint attributes for devices in a supported push notification service, such as GCM and APNS. The results for ListEndpointsByPlatformApplication are paginated and return a limited list of endpoints, up to 100. If additional records are available after the first page results, then a NextToken string will be returned. To receive the next page, you call ListEndpointsByPlatformApplication again using the NextToken string received from the previous call. When there are no more records to return, NextToken will be null. For more information, see Using Amazon SNS Mobile Push Notifications.

    ", + "ListPhoneNumbersOptedOut": "

    Returns a list of phone numbers that are opted out, meaning you cannot send SMS messages to them.

    The results for ListPhoneNumbersOptedOut are paginated, and each page returns up to 100 phone numbers. If additional phone numbers are available after the first page of results, then a NextToken string will be returned. To receive the next page, you call ListPhoneNumbersOptedOut again using the NextToken string received from the previous call. When there are no more records to return, NextToken will be null.

    ", + "ListPlatformApplications": "

    Lists the platform application objects for the supported push notification services, such as APNS and GCM. The results for ListPlatformApplications are paginated and return a limited list of applications, up to 100. If additional records are available after the first page results, then a NextToken string will be returned. To receive the next page, you call ListPlatformApplications using the NextToken string received from the previous call. When there are no more records to return, NextToken will be null. For more information, see Using Amazon SNS Mobile Push Notifications.

    ", + "ListSubscriptions": "

    Returns a list of the requester's subscriptions. Each call returns a limited list of subscriptions, up to 100. If there are more subscriptions, a NextToken is also returned. Use the NextToken parameter in a new ListSubscriptions call to get further results.

    ", + "ListSubscriptionsByTopic": "

    Returns a list of the subscriptions to a specific topic. Each call returns a limited list of subscriptions, up to 100. If there are more subscriptions, a NextToken is also returned. Use the NextToken parameter in a new ListSubscriptionsByTopic call to get further results.

    ", + "ListTopics": "

    Returns a list of the requester's topics. Each call returns a limited list of topics, up to 100. If there are more topics, a NextToken is also returned. Use the NextToken parameter in a new ListTopics call to get further results.

    ", + "OptInPhoneNumber": "

    Use this request to opt in a phone number that is opted out, which enables you to resume sending SMS messages to the number.

    You can opt in a phone number only once every 30 days.

    ", + "Publish": "

    Sends a message to all of a topic's subscribed endpoints. When a messageId is returned, the message has been saved and Amazon SNS will attempt to deliver it to the topic's subscribers shortly. The format of the outgoing message to each subscribed endpoint depends on the notification protocol.

    To use the Publish action for sending a message to a mobile endpoint, such as an app on a Kindle device or mobile phone, you must specify the EndpointArn for the TargetArn parameter. The EndpointArn is returned when making a call with the CreatePlatformEndpoint action. The second example below shows a request and response for publishing to a mobile endpoint.

    For more information about formatting messages, see Send Custom Platform-Specific Payloads in Messages to Mobile Devices.

    ", + "RemovePermission": "

    Removes a statement from a topic's access control policy.

    ", + "SetEndpointAttributes": "

    Sets the attributes for an endpoint for a device on one of the supported push notification services, such as GCM and APNS. For more information, see Using Amazon SNS Mobile Push Notifications.

    ", + "SetPlatformApplicationAttributes": "

    Sets the attributes of the platform application object for the supported push notification services, such as APNS and GCM. For more information, see Using Amazon SNS Mobile Push Notifications. For information on configuring attributes for message delivery status, see Using Amazon SNS Application Attributes for Message Delivery Status.

    ", + "SetSMSAttributes": "

    Use this request to set the default settings for sending SMS messages and receiving daily SMS usage reports.

    You can override some of these settings for a single message when you use the Publish action with the MessageAttributes.entry.N parameter. For more information, see Sending an SMS Message in the Amazon SNS Developer Guide.

    ", + "SetSubscriptionAttributes": "

    Allows a subscription owner to set an attribute of the topic to a new value.

    ", + "SetTopicAttributes": "

    Allows a topic owner to set an attribute of the topic to a new value.

    ", + "Subscribe": "

    Prepares to subscribe an endpoint by sending the endpoint a confirmation message. To actually create a subscription, the endpoint owner must call the ConfirmSubscription action with the token from the confirmation message. Confirmation tokens are valid for three days.

    ", + "Unsubscribe": "

    Deletes a subscription. If the subscription requires authentication for deletion, only the owner of the subscription or the topic's owner can unsubscribe, and an AWS signature is required. If the Unsubscribe call does not require authentication and the requester is not the subscription owner, a final cancellation message is delivered to the endpoint, so that the endpoint owner can easily resubscribe to the topic if the Unsubscribe request was unintended.

    " + }, + "shapes": { + "ActionsList": { + "base": null, + "refs": { + "AddPermissionInput$ActionName": "

    The action you want to allow for the specified principal(s).

    Valid values: any Amazon SNS action name.

    " + } + }, + "AddPermissionInput": { + "base": null, + "refs": { + } + }, + "AuthorizationErrorException": { + "base": "

    Indicates that the user has been denied access to the requested resource.

    ", + "refs": { + } + }, + "Binary": { + "base": null, + "refs": { + "MessageAttributeValue$BinaryValue": "

    Binary type attributes can store any binary data, for example, compressed data, encrypted data, or images.

    " + } + }, + "CheckIfPhoneNumberIsOptedOutInput": { + "base": "

    The input for the CheckIfPhoneNumberIsOptedOut action.

    ", + "refs": { + } + }, + "CheckIfPhoneNumberIsOptedOutResponse": { + "base": "

    The response from the CheckIfPhoneNumberIsOptedOut action.

    ", + "refs": { + } + }, + "ConfirmSubscriptionInput": { + "base": "

    Input for ConfirmSubscription action.

    ", + "refs": { + } + }, + "ConfirmSubscriptionResponse": { + "base": "

    Response for ConfirmSubscriptions action.

    ", + "refs": { + } + }, + "CreateEndpointResponse": { + "base": "

    Response from CreateEndpoint action.

    ", + "refs": { + } + }, + "CreatePlatformApplicationInput": { + "base": "

    Input for CreatePlatformApplication action.

    ", + "refs": { + } + }, + "CreatePlatformApplicationResponse": { + "base": "

    Response from CreatePlatformApplication action.

    ", + "refs": { + } + }, + "CreatePlatformEndpointInput": { + "base": "

    Input for CreatePlatformEndpoint action.

    ", + "refs": { + } + }, + "CreateTopicInput": { + "base": "

    Input for CreateTopic action.

    ", + "refs": { + } + }, + "CreateTopicResponse": { + "base": "

    Response from CreateTopic action.

    ", + "refs": { + } + }, + "DelegatesList": { + "base": null, + "refs": { + "AddPermissionInput$AWSAccountId": "

    The AWS account IDs of the users (principals) who will be given access to the specified actions. The users must have AWS accounts, but do not need to be signed up for this service.

    " + } + }, + "DeleteEndpointInput": { + "base": "

    Input for DeleteEndpoint action.

    ", + "refs": { + } + }, + "DeletePlatformApplicationInput": { + "base": "

    Input for DeletePlatformApplication action.

    ", + "refs": { + } + }, + "DeleteTopicInput": { + "base": null, + "refs": { + } + }, + "Endpoint": { + "base": "

    Endpoint for mobile app and device.

    ", + "refs": { + "ListOfEndpoints$member": null + } + }, + "EndpointDisabledException": { + "base": "

    Exception error indicating endpoint disabled.

    ", + "refs": { + } + }, + "GetEndpointAttributesInput": { + "base": "

    Input for GetEndpointAttributes action.

    ", + "refs": { + } + }, + "GetEndpointAttributesResponse": { + "base": "

    Response from GetEndpointAttributes of the EndpointArn.

    ", + "refs": { + } + }, + "GetPlatformApplicationAttributesInput": { + "base": "

    Input for GetPlatformApplicationAttributes action.

    ", + "refs": { + } + }, + "GetPlatformApplicationAttributesResponse": { + "base": "

    Response for GetPlatformApplicationAttributes action.

    ", + "refs": { + } + }, + "GetSMSAttributesInput": { + "base": "

    The input for the GetSMSAttributes request.

    ", + "refs": { + } + }, + "GetSMSAttributesResponse": { + "base": "

    The response from the GetSMSAttributes request.

    ", + "refs": { + } + }, + "GetSubscriptionAttributesInput": { + "base": "

    Input for GetSubscriptionAttributes.

    ", + "refs": { + } + }, + "GetSubscriptionAttributesResponse": { + "base": "

    Response for GetSubscriptionAttributes action.

    ", + "refs": { + } + }, + "GetTopicAttributesInput": { + "base": "

    Input for GetTopicAttributes action.

    ", + "refs": { + } + }, + "GetTopicAttributesResponse": { + "base": "

    Response for GetTopicAttributes action.

    ", + "refs": { + } + }, + "InternalErrorException": { + "base": "

    Indicates an internal service error.

    ", + "refs": { + } + }, + "InvalidParameterException": { + "base": "

    Indicates that a request parameter does not comply with the associated constraints.

    ", + "refs": { + } + }, + "InvalidParameterValueException": { + "base": "

    Indicates that a request parameter does not comply with the associated constraints.

    ", + "refs": { + } + }, + "ListEndpointsByPlatformApplicationInput": { + "base": "

    Input for ListEndpointsByPlatformApplication action.

    ", + "refs": { + } + }, + "ListEndpointsByPlatformApplicationResponse": { + "base": "

    Response for ListEndpointsByPlatformApplication action.

    ", + "refs": { + } + }, + "ListOfEndpoints": { + "base": null, + "refs": { + "ListEndpointsByPlatformApplicationResponse$Endpoints": "

    Endpoints returned for ListEndpointsByPlatformApplication action.

    " + } + }, + "ListOfPlatformApplications": { + "base": null, + "refs": { + "ListPlatformApplicationsResponse$PlatformApplications": "

    Platform applications returned when calling ListPlatformApplications action.

    " + } + }, + "ListPhoneNumbersOptedOutInput": { + "base": "

    The input for the ListPhoneNumbersOptedOut action.

    ", + "refs": { + } + }, + "ListPhoneNumbersOptedOutResponse": { + "base": "

    The response from the ListPhoneNumbersOptedOut action.

    ", + "refs": { + } + }, + "ListPlatformApplicationsInput": { + "base": "

    Input for ListPlatformApplications action.

    ", + "refs": { + } + }, + "ListPlatformApplicationsResponse": { + "base": "

    Response for ListPlatformApplications action.

    ", + "refs": { + } + }, + "ListString": { + "base": null, + "refs": { + "GetSMSAttributesInput$attributes": "

    A list of the individual attribute names, such as MonthlySpendLimit, for which you want values.

    For all attribute names, see SetSMSAttributes.

    If you don't use this parameter, Amazon SNS returns all SMS attributes.

    " + } + }, + "ListSubscriptionsByTopicInput": { + "base": "

    Input for ListSubscriptionsByTopic action.

    ", + "refs": { + } + }, + "ListSubscriptionsByTopicResponse": { + "base": "

    Response for ListSubscriptionsByTopic action.

    ", + "refs": { + } + }, + "ListSubscriptionsInput": { + "base": "

    Input for ListSubscriptions action.

    ", + "refs": { + } + }, + "ListSubscriptionsResponse": { + "base": "

    Response for ListSubscriptions action

    ", + "refs": { + } + }, + "ListTopicsInput": { + "base": null, + "refs": { + } + }, + "ListTopicsResponse": { + "base": "

    Response for ListTopics action.

    ", + "refs": { + } + }, + "MapStringToString": { + "base": null, + "refs": { + "CreatePlatformApplicationInput$Attributes": "

    For a list of attributes, see SetPlatformApplicationAttributes

    ", + "CreatePlatformEndpointInput$Attributes": "

    For a list of attributes, see SetEndpointAttributes.

    ", + "Endpoint$Attributes": "

    Attributes for endpoint.

    ", + "GetEndpointAttributesResponse$Attributes": "

    Attributes include the following:

    • CustomUserData -- arbitrary user data to associate with the endpoint. Amazon SNS does not use this data. The data must be in UTF-8 format and less than 2KB.

    • Enabled -- flag that enables/disables delivery to the endpoint. Amazon SNS will set this to false when a notification service indicates to Amazon SNS that the endpoint is invalid. Users can set it back to true, typically after updating Token.

    • Token -- device token, also referred to as a registration id, for an app and mobile device. This is returned from the notification service when an app and mobile device are registered with the notification service.

    ", + "GetPlatformApplicationAttributesResponse$Attributes": "

    Attributes include the following:

    • EventEndpointCreated -- Topic ARN to which EndpointCreated event notifications should be sent.

    • EventEndpointDeleted -- Topic ARN to which EndpointDeleted event notifications should be sent.

    • EventEndpointUpdated -- Topic ARN to which EndpointUpdate event notifications should be sent.

    • EventDeliveryFailure -- Topic ARN to which DeliveryFailure event notifications should be sent upon Direct Publish delivery failure (permanent) to one of the application's endpoints.

    ", + "GetSMSAttributesResponse$attributes": "

    The SMS attribute names and their values.

    ", + "PlatformApplication$Attributes": "

    Attributes for platform application object.

    ", + "SetEndpointAttributesInput$Attributes": "

    A map of the endpoint attributes. Attributes in this map include the following:

    • CustomUserData -- arbitrary user data to associate with the endpoint. Amazon SNS does not use this data. The data must be in UTF-8 format and less than 2KB.

    • Enabled -- flag that enables/disables delivery to the endpoint. Amazon SNS will set this to false when a notification service indicates to Amazon SNS that the endpoint is invalid. Users can set it back to true, typically after updating Token.

    • Token -- device token, also referred to as a registration id, for an app and mobile device. This is returned from the notification service when an app and mobile device are registered with the notification service.

    ", + "SetPlatformApplicationAttributesInput$Attributes": "

    A map of the platform application attributes. Attributes in this map include the following:

    • PlatformCredential -- The credential received from the notification service. For APNS/APNS_SANDBOX, PlatformCredential is private key. For GCM, PlatformCredential is \"API key\". For ADM, PlatformCredential is \"client secret\".

    • PlatformPrincipal -- The principal received from the notification service. For APNS/APNS_SANDBOX, PlatformPrincipal is SSL certificate. For GCM, PlatformPrincipal is not applicable. For ADM, PlatformPrincipal is \"client id\".

    • EventEndpointCreated -- Topic ARN to which EndpointCreated event notifications should be sent.

    • EventEndpointDeleted -- Topic ARN to which EndpointDeleted event notifications should be sent.

    • EventEndpointUpdated -- Topic ARN to which EndpointUpdate event notifications should be sent.

    • EventDeliveryFailure -- Topic ARN to which DeliveryFailure event notifications should be sent upon Direct Publish delivery failure (permanent) to one of the application's endpoints.

    • SuccessFeedbackRoleArn -- IAM role ARN used to give Amazon SNS write access to use CloudWatch Logs on your behalf.

    • FailureFeedbackRoleArn -- IAM role ARN used to give Amazon SNS write access to use CloudWatch Logs on your behalf.

    • SuccessFeedbackSampleRate -- Sample rate percentage (0-100) of successfully delivered messages.

    ", + "SetSMSAttributesInput$attributes": "

    The default settings for sending SMS messages from your account. You can set values for the following attribute names:

    MonthlySpendLimit – The maximum amount in USD that you are willing to spend each month to send SMS messages. When Amazon SNS determines that sending an SMS message would incur a cost that exceeds this limit, it stops sending SMS messages within minutes.

    Amazon SNS stops sending SMS messages within minutes of the limit being crossed. During that interval, if you continue to send SMS messages, you will incur costs that exceed your limit.

    DeliveryStatusIAMRole – The ARN of the IAM role that allows Amazon SNS to write logs about SMS deliveries in CloudWatch Logs. For each SMS message that you send, Amazon SNS writes a log that includes the message price, the success or failure status, the reason for failure (if the message failed), the message dwell time, and other information.

    DeliveryStatusSuccessSamplingRate – The percentage of successful SMS deliveries for which Amazon SNS will write logs in CloudWatch Logs. The value can be an integer from 0 - 100. For example, to write logs only for failed deliveries, set this value to 0. To write logs for 10% of your successful deliveries, set it to 10.

    DefaultSenderID – A string, such as your business brand, that is displayed as the sender on the receiving device. Support for sender IDs varies by country. The sender ID can be 1 - 11 alphanumeric characters, and it must contain at least one letter.

    DefaultSMSType – The type of SMS message that you will send by default. You can assign the following values:

    • Promotional – Noncritical messages, such as marketing messages. Amazon SNS optimizes the message delivery to incur the lowest cost.

    • Transactional – (Default) Critical messages that support customer transactions, such as one-time passcodes for multi-factor authentication. Amazon SNS optimizes the message delivery to achieve the highest reliability.

    UsageReportS3Bucket – The name of the Amazon S3 bucket to receive daily SMS usage reports from Amazon SNS. Each day, Amazon SNS will deliver a usage report as a CSV file to the bucket. The report includes the following information for each SMS message that was successfully delivered by your account:

    • Time that the message was published (in UTC)

    • Message ID

    • Destination phone number

    • Message type

    • Delivery status

    • Message price (in USD)

    • Part number (a message is split into multiple parts if it is too long for a single message)

    • Total number of parts

    To receive the report, the bucket must have a policy that allows the Amazon SNS service principle to perform the s3:PutObject and s3:GetBucketLocation actions.

    For an example bucket policy and usage report, see Viewing Statistics About SMS Message Delivery in the Amazon SNS Developer Guide.

    " + } + }, + "MessageAttributeMap": { + "base": null, + "refs": { + "PublishInput$MessageAttributes": "

    Message attributes for Publish action.

    " + } + }, + "MessageAttributeValue": { + "base": "

    The user-specified message attribute value. For string data types, the value attribute has the same restrictions on the content as the message body. For more information, see Publish.

    Name, type, and value must not be empty or null. In addition, the message body should not be empty or null. All parts of the message attribute, including name, type, and value, are included in the message size restriction, which is currently 256 KB (262,144 bytes). For more information, see Using Amazon SNS Message Attributes.

    ", + "refs": { + "MessageAttributeMap$value": null + } + }, + "NotFoundException": { + "base": "

    Indicates that the requested resource does not exist.

    ", + "refs": { + } + }, + "OptInPhoneNumberInput": { + "base": "

    Input for the OptInPhoneNumber action.

    ", + "refs": { + } + }, + "OptInPhoneNumberResponse": { + "base": "

    The response for the OptInPhoneNumber action.

    ", + "refs": { + } + }, + "PhoneNumber": { + "base": null, + "refs": { + "CheckIfPhoneNumberIsOptedOutInput$phoneNumber": "

    The phone number for which you want to check the opt out status.

    ", + "OptInPhoneNumberInput$phoneNumber": "

    The phone number to opt in.

    ", + "PhoneNumberList$member": null + } + }, + "PhoneNumberList": { + "base": null, + "refs": { + "ListPhoneNumbersOptedOutResponse$phoneNumbers": "

    A list of phone numbers that are opted out of receiving SMS messages. The list is paginated, and each page can contain up to 100 phone numbers.

    " + } + }, + "PlatformApplication": { + "base": "

    Platform application object.

    ", + "refs": { + "ListOfPlatformApplications$member": null + } + }, + "PlatformApplicationDisabledException": { + "base": "

    Exception error indicating platform application disabled.

    ", + "refs": { + } + }, + "PublishInput": { + "base": "

    Input for Publish action.

    ", + "refs": { + } + }, + "PublishResponse": { + "base": "

    Response for Publish action.

    ", + "refs": { + } + }, + "RemovePermissionInput": { + "base": "

    Input for RemovePermission action.

    ", + "refs": { + } + }, + "SetEndpointAttributesInput": { + "base": "

    Input for SetEndpointAttributes action.

    ", + "refs": { + } + }, + "SetPlatformApplicationAttributesInput": { + "base": "

    Input for SetPlatformApplicationAttributes action.

    ", + "refs": { + } + }, + "SetSMSAttributesInput": { + "base": "

    The input for the SetSMSAttributes action.

    ", + "refs": { + } + }, + "SetSMSAttributesResponse": { + "base": "

    The response for the SetSMSAttributes action.

    ", + "refs": { + } + }, + "SetSubscriptionAttributesInput": { + "base": "

    Input for SetSubscriptionAttributes action.

    ", + "refs": { + } + }, + "SetTopicAttributesInput": { + "base": "

    Input for SetTopicAttributes action.

    ", + "refs": { + } + }, + "String": { + "base": null, + "refs": { + "CreateEndpointResponse$EndpointArn": "

    EndpointArn returned from CreateEndpoint action.

    ", + "CreatePlatformApplicationInput$Name": "

    Application names must be made up of only uppercase and lowercase ASCII letters, numbers, underscores, hyphens, and periods, and must be between 1 and 256 characters long.

    ", + "CreatePlatformApplicationInput$Platform": "

    The following platforms are supported: ADM (Amazon Device Messaging), APNS (Apple Push Notification Service), APNS_SANDBOX, and GCM (Google Cloud Messaging).

    ", + "CreatePlatformApplicationResponse$PlatformApplicationArn": "

    PlatformApplicationArn is returned.

    ", + "CreatePlatformEndpointInput$PlatformApplicationArn": "

    PlatformApplicationArn returned from CreatePlatformApplication is used to create a an endpoint.

    ", + "CreatePlatformEndpointInput$Token": "

    Unique identifier created by the notification service for an app on a device. The specific name for Token will vary, depending on which notification service is being used. For example, when using APNS as the notification service, you need the device token. Alternatively, when using GCM or ADM, the device token equivalent is called the registration ID.

    ", + "CreatePlatformEndpointInput$CustomUserData": "

    Arbitrary user data to associate with the endpoint. Amazon SNS does not use this data. The data must be in UTF-8 format and less than 2KB.

    ", + "DeleteEndpointInput$EndpointArn": "

    EndpointArn of endpoint to delete.

    ", + "DeletePlatformApplicationInput$PlatformApplicationArn": "

    PlatformApplicationArn of platform application object to delete.

    ", + "Endpoint$EndpointArn": "

    EndpointArn for mobile app and device.

    ", + "GetEndpointAttributesInput$EndpointArn": "

    EndpointArn for GetEndpointAttributes input.

    ", + "GetPlatformApplicationAttributesInput$PlatformApplicationArn": "

    PlatformApplicationArn for GetPlatformApplicationAttributesInput.

    ", + "ListEndpointsByPlatformApplicationInput$PlatformApplicationArn": "

    PlatformApplicationArn for ListEndpointsByPlatformApplicationInput action.

    ", + "ListEndpointsByPlatformApplicationInput$NextToken": "

    NextToken string is used when calling ListEndpointsByPlatformApplication action to retrieve additional records that are available after the first page results.

    ", + "ListEndpointsByPlatformApplicationResponse$NextToken": "

    NextToken string is returned when calling ListEndpointsByPlatformApplication action if additional records are available after the first page results.

    ", + "ListPlatformApplicationsInput$NextToken": "

    NextToken string is used when calling ListPlatformApplications action to retrieve additional records that are available after the first page results.

    ", + "ListPlatformApplicationsResponse$NextToken": "

    NextToken string is returned when calling ListPlatformApplications action if additional records are available after the first page results.

    ", + "ListString$member": null, + "MapStringToString$key": null, + "MapStringToString$value": null, + "MessageAttributeMap$key": null, + "MessageAttributeValue$DataType": "

    Amazon SNS supports the following logical data types: String, Number, and Binary. For more information, see Message Attribute Data Types.

    ", + "MessageAttributeValue$StringValue": "

    Strings are Unicode with UTF8 binary encoding. For a list of code values, see http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters.

    ", + "PlatformApplication$PlatformApplicationArn": "

    PlatformApplicationArn for platform application object.

    ", + "PublishInput$TargetArn": "

    Either TopicArn or EndpointArn, but not both.

    If you don't specify a value for the TargetArn parameter, you must specify a value for the PhoneNumber or TopicArn parameters.

    ", + "PublishInput$PhoneNumber": "

    The phone number to which you want to deliver an SMS message. Use E.164 format.

    If you don't specify a value for the PhoneNumber parameter, you must specify a value for the TargetArn or TopicArn parameters.

    ", + "SetEndpointAttributesInput$EndpointArn": "

    EndpointArn used for SetEndpointAttributes action.

    ", + "SetPlatformApplicationAttributesInput$PlatformApplicationArn": "

    PlatformApplicationArn for SetPlatformApplicationAttributes action.

    " + } + }, + "SubscribeInput": { + "base": "

    Input for Subscribe action.

    ", + "refs": { + } + }, + "SubscribeResponse": { + "base": "

    Response for Subscribe action.

    ", + "refs": { + } + }, + "Subscription": { + "base": "

    A wrapper type for the attributes of an Amazon SNS subscription.

    ", + "refs": { + "SubscriptionsList$member": null + } + }, + "SubscriptionAttributesMap": { + "base": null, + "refs": { + "GetSubscriptionAttributesResponse$Attributes": "

    A map of the subscription's attributes. Attributes in this map include the following:

    • SubscriptionArn -- the subscription's ARN

    • TopicArn -- the topic ARN that the subscription is associated with

    • Owner -- the AWS account ID of the subscription's owner

    • ConfirmationWasAuthenticated -- true if the subscription confirmation request was authenticated

    • DeliveryPolicy -- the JSON serialization of the subscription's delivery policy

    • EffectiveDeliveryPolicy -- the JSON serialization of the effective delivery policy that takes into account the topic delivery policy and account system defaults

    " + } + }, + "SubscriptionLimitExceededException": { + "base": "

    Indicates that the customer already owns the maximum allowed number of subscriptions.

    ", + "refs": { + } + }, + "SubscriptionsList": { + "base": null, + "refs": { + "ListSubscriptionsByTopicResponse$Subscriptions": "

    A list of subscriptions.

    ", + "ListSubscriptionsResponse$Subscriptions": "

    A list of subscriptions.

    " + } + }, + "ThrottledException": { + "base": "

    Indicates that the rate at which requests have been submitted for this action exceeds the limit for your account.

    ", + "refs": { + } + }, + "Topic": { + "base": "

    A wrapper type for the topic's Amazon Resource Name (ARN). To retrieve a topic's attributes, use GetTopicAttributes.

    ", + "refs": { + "TopicsList$member": null + } + }, + "TopicAttributesMap": { + "base": null, + "refs": { + "GetTopicAttributesResponse$Attributes": "

    A map of the topic's attributes. Attributes in this map include the following:

    • TopicArn -- the topic's ARN

    • Owner -- the AWS account ID of the topic's owner

    • Policy -- the JSON serialization of the topic's access control policy

    • DisplayName -- the human-readable name used in the \"From\" field for notifications to email and email-json endpoints

    • SubscriptionsPending -- the number of subscriptions pending confirmation on this topic

    • SubscriptionsConfirmed -- the number of confirmed subscriptions on this topic

    • SubscriptionsDeleted -- the number of deleted subscriptions on this topic

    • DeliveryPolicy -- the JSON serialization of the topic's delivery policy

    • EffectiveDeliveryPolicy -- the JSON serialization of the effective delivery policy that takes into account system defaults

    " + } + }, + "TopicLimitExceededException": { + "base": "

    Indicates that the customer already owns the maximum allowed number of topics.

    ", + "refs": { + } + }, + "TopicsList": { + "base": null, + "refs": { + "ListTopicsResponse$Topics": "

    A list of topic ARNs.

    " + } + }, + "UnsubscribeInput": { + "base": "

    Input for Unsubscribe action.

    ", + "refs": { + } + }, + "account": { + "base": null, + "refs": { + "Subscription$Owner": "

    The subscription's owner.

    " + } + }, + "action": { + "base": null, + "refs": { + "ActionsList$member": null + } + }, + "attributeName": { + "base": null, + "refs": { + "SetSubscriptionAttributesInput$AttributeName": "

    The name of the attribute you want to set. Only a subset of the subscriptions attributes are mutable.

    Valid values: DeliveryPolicy | RawMessageDelivery

    ", + "SetTopicAttributesInput$AttributeName": "

    The name of the attribute you want to set. Only a subset of the topic's attributes are mutable.

    Valid values: Policy | DisplayName | DeliveryPolicy

    ", + "SubscriptionAttributesMap$key": null, + "TopicAttributesMap$key": null + } + }, + "attributeValue": { + "base": null, + "refs": { + "SetSubscriptionAttributesInput$AttributeValue": "

    The new value for the attribute in JSON format.

    ", + "SetTopicAttributesInput$AttributeValue": "

    The new value for the attribute.

    ", + "SubscriptionAttributesMap$value": null, + "TopicAttributesMap$value": null + } + }, + "authenticateOnUnsubscribe": { + "base": null, + "refs": { + "ConfirmSubscriptionInput$AuthenticateOnUnsubscribe": "

    Disallows unauthenticated unsubscribes of the subscription. If the value of this parameter is true and the request has an AWS signature, then only the topic owner and the subscription owner can unsubscribe the endpoint. The unsubscribe action requires AWS authentication.

    " + } + }, + "boolean": { + "base": null, + "refs": { + "CheckIfPhoneNumberIsOptedOutResponse$isOptedOut": "

    Indicates whether the phone number is opted out:

    • true – The phone number is opted out, meaning you cannot publish SMS messages to it.

    • false – The phone number is opted in, meaning you can publish SMS messages to it.

    " + } + }, + "delegate": { + "base": null, + "refs": { + "DelegatesList$member": null + } + }, + "endpoint": { + "base": null, + "refs": { + "SubscribeInput$Endpoint": "

    The endpoint that you want to receive notifications. Endpoints vary by protocol:

    • For the http protocol, the endpoint is an URL beginning with \"http://\"

    • For the https protocol, the endpoint is a URL beginning with \"https://\"

    • For the email protocol, the endpoint is an email address

    • For the email-json protocol, the endpoint is an email address

    • For the sms protocol, the endpoint is a phone number of an SMS-enabled device

    • For the sqs protocol, the endpoint is the ARN of an Amazon SQS queue

    • For the application protocol, the endpoint is the EndpointArn of a mobile app and device.

    • For the lambda protocol, the endpoint is the ARN of an AWS Lambda function.

    ", + "Subscription$Endpoint": "

    The subscription's endpoint (format depends on the protocol).

    " + } + }, + "label": { + "base": null, + "refs": { + "AddPermissionInput$Label": "

    A unique identifier for the new policy statement.

    ", + "RemovePermissionInput$Label": "

    The unique label of the statement you want to remove.

    " + } + }, + "message": { + "base": null, + "refs": { + "PublishInput$Message": "

    The message you want to send to the topic.

    If you want to send the same message to all transport protocols, include the text of the message as a String value.

    If you want to send different messages for each transport protocol, set the value of the MessageStructure parameter to json and use a JSON object for the Message parameter. See the Examples section for the format of the JSON object.

    Constraints: Messages must be UTF-8 encoded strings at most 256 KB in size (262144 bytes, not 262144 characters).

    JSON-specific constraints:

    • Keys in the JSON object that correspond to supported transport protocols must have simple JSON string values.

    • The values will be parsed (unescaped) before they are used in outgoing messages.

    • Outbound notifications are JSON encoded (meaning that the characters will be reescaped for sending).

    • Values have a minimum length of 0 (the empty string, \"\", is allowed).

    • Values have a maximum length bounded by the overall message size (so, including multiple protocols may limit message sizes).

    • Non-string values will cause the key to be ignored.

    • Keys that do not correspond to supported transport protocols are ignored.

    • Duplicate keys are not allowed.

    • Failure to parse or validate any key or value in the message will cause the Publish call to return an error (no partial delivery).

    " + } + }, + "messageId": { + "base": null, + "refs": { + "PublishResponse$MessageId": "

    Unique identifier assigned to the published message.

    Length Constraint: Maximum 100 characters

    " + } + }, + "messageStructure": { + "base": null, + "refs": { + "PublishInput$MessageStructure": "

    Set MessageStructure to json if you want to send a different message for each protocol. For example, using one publish action, you can send a short message to your SMS subscribers and a longer message to your email subscribers. If you set MessageStructure to json, the value of the Message parameter must:

    • be a syntactically valid JSON object; and

    • contain at least a top-level JSON key of \"default\" with a value that is a string.

    You can define other top-level keys that define the message you want to send to a specific transport protocol (e.g., \"http\").

    For information about sending different messages for each protocol using the AWS Management Console, go to Create Different Messages for Each Protocol in the Amazon Simple Notification Service Getting Started Guide.

    Valid value: json

    " + } + }, + "nextToken": { + "base": null, + "refs": { + "ListSubscriptionsByTopicInput$NextToken": "

    Token returned by the previous ListSubscriptionsByTopic request.

    ", + "ListSubscriptionsByTopicResponse$NextToken": "

    Token to pass along to the next ListSubscriptionsByTopic request. This element is returned if there are more subscriptions to retrieve.

    ", + "ListSubscriptionsInput$NextToken": "

    Token returned by the previous ListSubscriptions request.

    ", + "ListSubscriptionsResponse$NextToken": "

    Token to pass along to the next ListSubscriptions request. This element is returned if there are more subscriptions to retrieve.

    ", + "ListTopicsInput$NextToken": "

    Token returned by the previous ListTopics request.

    ", + "ListTopicsResponse$NextToken": "

    Token to pass along to the next ListTopics request. This element is returned if there are additional topics to retrieve.

    " + } + }, + "protocol": { + "base": null, + "refs": { + "SubscribeInput$Protocol": "

    The protocol you want to use. Supported protocols include:

    • http -- delivery of JSON-encoded message via HTTP POST

    • https -- delivery of JSON-encoded message via HTTPS POST

    • email -- delivery of message via SMTP

    • email-json -- delivery of JSON-encoded message via SMTP

    • sms -- delivery of message via SMS

    • sqs -- delivery of JSON-encoded message to an Amazon SQS queue

    • application -- delivery of JSON-encoded message to an EndpointArn for a mobile app and device.

    • lambda -- delivery of JSON-encoded message to an AWS Lambda function.

    ", + "Subscription$Protocol": "

    The subscription's protocol.

    " + } + }, + "string": { + "base": null, + "refs": { + "AuthorizationErrorException$message": null, + "EndpointDisabledException$message": "

    Message for endpoint disabled.

    ", + "InternalErrorException$message": null, + "InvalidParameterException$message": null, + "InvalidParameterValueException$message": null, + "ListPhoneNumbersOptedOutInput$nextToken": "

    A NextToken string is used when you call the ListPhoneNumbersOptedOut action to retrieve additional records that are available after the first page of results.

    ", + "ListPhoneNumbersOptedOutResponse$nextToken": "

    A NextToken string is returned when you call the ListPhoneNumbersOptedOut action if additional records are available after the first page of results.

    ", + "NotFoundException$message": null, + "PlatformApplicationDisabledException$message": "

    Message for platform application disabled.

    ", + "SubscriptionLimitExceededException$message": null, + "ThrottledException$message": null, + "TopicLimitExceededException$message": null + } + }, + "subject": { + "base": null, + "refs": { + "PublishInput$Subject": "

    Optional parameter to be used as the \"Subject\" line when the message is delivered to email endpoints. This field will also be included, if present, in the standard JSON messages delivered to other endpoints.

    Constraints: Subjects must be ASCII text that begins with a letter, number, or punctuation mark; must not include line breaks or control characters; and must be less than 100 characters long.

    " + } + }, + "subscriptionARN": { + "base": null, + "refs": { + "ConfirmSubscriptionResponse$SubscriptionArn": "

    The ARN of the created subscription.

    ", + "GetSubscriptionAttributesInput$SubscriptionArn": "

    The ARN of the subscription whose properties you want to get.

    ", + "SetSubscriptionAttributesInput$SubscriptionArn": "

    The ARN of the subscription to modify.

    ", + "SubscribeResponse$SubscriptionArn": "

    The ARN of the subscription, if the service was able to create a subscription immediately (without requiring endpoint owner confirmation).

    ", + "Subscription$SubscriptionArn": "

    The subscription's ARN.

    ", + "UnsubscribeInput$SubscriptionArn": "

    The ARN of the subscription to be deleted.

    " + } + }, + "token": { + "base": null, + "refs": { + "ConfirmSubscriptionInput$Token": "

    Short-lived token sent to an endpoint during the Subscribe action.

    " + } + }, + "topicARN": { + "base": null, + "refs": { + "AddPermissionInput$TopicArn": "

    The ARN of the topic whose access control policy you wish to modify.

    ", + "ConfirmSubscriptionInput$TopicArn": "

    The ARN of the topic for which you wish to confirm a subscription.

    ", + "CreateTopicResponse$TopicArn": "

    The Amazon Resource Name (ARN) assigned to the created topic.

    ", + "DeleteTopicInput$TopicArn": "

    The ARN of the topic you want to delete.

    ", + "GetTopicAttributesInput$TopicArn": "

    The ARN of the topic whose properties you want to get.

    ", + "ListSubscriptionsByTopicInput$TopicArn": "

    The ARN of the topic for which you wish to find subscriptions.

    ", + "PublishInput$TopicArn": "

    The topic you want to publish to.

    If you don't specify a value for the TopicArn parameter, you must specify a value for the PhoneNumber or TargetArn parameters.

    ", + "RemovePermissionInput$TopicArn": "

    The ARN of the topic whose access control policy you wish to modify.

    ", + "SetTopicAttributesInput$TopicArn": "

    The ARN of the topic to modify.

    ", + "SubscribeInput$TopicArn": "

    The ARN of the topic you want to subscribe to.

    ", + "Subscription$TopicArn": "

    The ARN of the subscription's topic.

    ", + "Topic$TopicArn": "

    The topic's ARN.

    " + } + }, + "topicName": { + "base": null, + "refs": { + "CreateTopicInput$Name": "

    The name of the topic you want to create.

    Constraints: Topic names must be made up of only uppercase and lowercase ASCII letters, numbers, underscores, and hyphens, and must be between 1 and 256 characters long.

    " + } + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/sns/2010-03-31/paginators-1.json b/vendor/github.com/aws/aws-sdk-go/models/apis/sns/2010-03-31/paginators-1.json new file mode 100644 index 000000000..455e4708e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/sns/2010-03-31/paginators-1.json @@ -0,0 +1,29 @@ +{ + "pagination": { + "ListEndpointsByPlatformApplication": { + "input_token": "NextToken", + "output_token": "NextToken", + "result_key": "Endpoints" + }, + "ListPlatformApplications": { + "input_token": "NextToken", + "output_token": "NextToken", + "result_key": "PlatformApplications" + }, + "ListSubscriptions": { + "input_token": "NextToken", + "output_token": "NextToken", + "result_key": "Subscriptions" + }, + "ListSubscriptionsByTopic": { + "input_token": "NextToken", + "output_token": "NextToken", + "result_key": "Subscriptions" + }, + "ListTopics": { + "input_token": "NextToken", + "output_token": "NextToken", + "result_key": "Topics" + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/sqs/2012-11-05/api-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/sqs/2012-11-05/api-2.json new file mode 100644 index 000000000..b30100f88 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/sqs/2012-11-05/api-2.json @@ -0,0 +1,950 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2012-11-05", + "endpointPrefix":"sqs", + "protocol":"query", + "serviceAbbreviation":"Amazon SQS", + "serviceFullName":"Amazon Simple Queue Service", + "signatureVersion":"v4", + "xmlNamespace":"http://queue.amazonaws.com/doc/2012-11-05/" + }, + "operations":{ + "AddPermission":{ + "name":"AddPermission", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AddPermissionRequest"}, + "errors":[ + {"shape":"OverLimit"} + ] + }, + "ChangeMessageVisibility":{ + "name":"ChangeMessageVisibility", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ChangeMessageVisibilityRequest"}, + "errors":[ + {"shape":"MessageNotInflight"}, + {"shape":"ReceiptHandleIsInvalid"} + ] + }, + "ChangeMessageVisibilityBatch":{ + "name":"ChangeMessageVisibilityBatch", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ChangeMessageVisibilityBatchRequest"}, + "output":{ + "shape":"ChangeMessageVisibilityBatchResult", + "resultWrapper":"ChangeMessageVisibilityBatchResult" + }, + "errors":[ + {"shape":"TooManyEntriesInBatchRequest"}, + {"shape":"EmptyBatchRequest"}, + {"shape":"BatchEntryIdsNotDistinct"}, + {"shape":"InvalidBatchEntryId"} + ] + }, + "CreateQueue":{ + "name":"CreateQueue", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateQueueRequest"}, + "output":{ + "shape":"CreateQueueResult", + "resultWrapper":"CreateQueueResult" + }, + "errors":[ + {"shape":"QueueDeletedRecently"}, + {"shape":"QueueNameExists"} + ] + }, + "DeleteMessage":{ + "name":"DeleteMessage", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteMessageRequest"}, + "errors":[ + {"shape":"InvalidIdFormat"}, + {"shape":"ReceiptHandleIsInvalid"} + ] + }, + "DeleteMessageBatch":{ + "name":"DeleteMessageBatch", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteMessageBatchRequest"}, + "output":{ + "shape":"DeleteMessageBatchResult", + "resultWrapper":"DeleteMessageBatchResult" + }, + "errors":[ + {"shape":"TooManyEntriesInBatchRequest"}, + {"shape":"EmptyBatchRequest"}, + {"shape":"BatchEntryIdsNotDistinct"}, + {"shape":"InvalidBatchEntryId"} + ] + }, + "DeleteQueue":{ + "name":"DeleteQueue", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteQueueRequest"} + }, + "GetQueueAttributes":{ + "name":"GetQueueAttributes", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetQueueAttributesRequest"}, + "output":{ + "shape":"GetQueueAttributesResult", + "resultWrapper":"GetQueueAttributesResult" + }, + "errors":[ + {"shape":"InvalidAttributeName"} + ] + }, + "GetQueueUrl":{ + "name":"GetQueueUrl", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetQueueUrlRequest"}, + "output":{ + "shape":"GetQueueUrlResult", + "resultWrapper":"GetQueueUrlResult" + }, + "errors":[ + {"shape":"QueueDoesNotExist"} + ] + }, + "ListDeadLetterSourceQueues":{ + "name":"ListDeadLetterSourceQueues", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListDeadLetterSourceQueuesRequest"}, + "output":{ + "shape":"ListDeadLetterSourceQueuesResult", + "resultWrapper":"ListDeadLetterSourceQueuesResult" + }, + "errors":[ + {"shape":"QueueDoesNotExist"} + ] + }, + "ListQueues":{ + "name":"ListQueues", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListQueuesRequest"}, + "output":{ + "shape":"ListQueuesResult", + "resultWrapper":"ListQueuesResult" + } + }, + "PurgeQueue":{ + "name":"PurgeQueue", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PurgeQueueRequest"}, + "errors":[ + {"shape":"QueueDoesNotExist"}, + {"shape":"PurgeQueueInProgress"} + ] + }, + "ReceiveMessage":{ + "name":"ReceiveMessage", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ReceiveMessageRequest"}, + "output":{ + "shape":"ReceiveMessageResult", + "resultWrapper":"ReceiveMessageResult" + }, + "errors":[ + {"shape":"OverLimit"} + ] + }, + "RemovePermission":{ + "name":"RemovePermission", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RemovePermissionRequest"} + }, + "SendMessage":{ + "name":"SendMessage", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"SendMessageRequest"}, + "output":{ + "shape":"SendMessageResult", + "resultWrapper":"SendMessageResult" + }, + "errors":[ + {"shape":"InvalidMessageContents"}, + {"shape":"UnsupportedOperation"} + ] + }, + "SendMessageBatch":{ + "name":"SendMessageBatch", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"SendMessageBatchRequest"}, + "output":{ + "shape":"SendMessageBatchResult", + "resultWrapper":"SendMessageBatchResult" + }, + "errors":[ + {"shape":"TooManyEntriesInBatchRequest"}, + {"shape":"EmptyBatchRequest"}, + {"shape":"BatchEntryIdsNotDistinct"}, + {"shape":"BatchRequestTooLong"}, + {"shape":"InvalidBatchEntryId"}, + {"shape":"UnsupportedOperation"} + ] + }, + "SetQueueAttributes":{ + "name":"SetQueueAttributes", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"SetQueueAttributesRequest"}, + "errors":[ + {"shape":"InvalidAttributeName"} + ] + } + }, + "shapes":{ + "AWSAccountIdList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"AWSAccountId" + }, + "flattened":true + }, + "ActionNameList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"ActionName" + }, + "flattened":true + }, + "AddPermissionRequest":{ + "type":"structure", + "required":[ + "QueueUrl", + "Label", + "AWSAccountIds", + "Actions" + ], + "members":{ + "QueueUrl":{"shape":"String"}, + "Label":{"shape":"String"}, + "AWSAccountIds":{"shape":"AWSAccountIdList"}, + "Actions":{"shape":"ActionNameList"} + } + }, + "AttributeMap":{ + "type":"map", + "key":{ + "shape":"QueueAttributeName", + "locationName":"Name" + }, + "value":{ + "shape":"String", + "locationName":"Value" + }, + "flattened":true, + "locationName":"Attribute" + }, + "AttributeNameList":{ + "type":"list", + "member":{ + "shape":"QueueAttributeName", + "locationName":"AttributeName" + }, + "flattened":true + }, + "BatchEntryIdsNotDistinct":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"AWS.SimpleQueueService.BatchEntryIdsNotDistinct", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "BatchRequestTooLong":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"AWS.SimpleQueueService.BatchRequestTooLong", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "BatchResultErrorEntry":{ + "type":"structure", + "required":[ + "Id", + "SenderFault", + "Code" + ], + "members":{ + "Id":{"shape":"String"}, + "SenderFault":{"shape":"Boolean"}, + "Code":{"shape":"String"}, + "Message":{"shape":"String"} + } + }, + "BatchResultErrorEntryList":{ + "type":"list", + "member":{ + "shape":"BatchResultErrorEntry", + "locationName":"BatchResultErrorEntry" + }, + "flattened":true + }, + "Binary":{"type":"blob"}, + "BinaryList":{ + "type":"list", + "member":{ + "shape":"Binary", + "locationName":"BinaryListValue" + } + }, + "Boolean":{"type":"boolean"}, + "ChangeMessageVisibilityBatchRequest":{ + "type":"structure", + "required":[ + "QueueUrl", + "Entries" + ], + "members":{ + "QueueUrl":{"shape":"String"}, + "Entries":{"shape":"ChangeMessageVisibilityBatchRequestEntryList"} + } + }, + "ChangeMessageVisibilityBatchRequestEntry":{ + "type":"structure", + "required":[ + "Id", + "ReceiptHandle" + ], + "members":{ + "Id":{"shape":"String"}, + "ReceiptHandle":{"shape":"String"}, + "VisibilityTimeout":{"shape":"Integer"} + } + }, + "ChangeMessageVisibilityBatchRequestEntryList":{ + "type":"list", + "member":{ + "shape":"ChangeMessageVisibilityBatchRequestEntry", + "locationName":"ChangeMessageVisibilityBatchRequestEntry" + }, + "flattened":true + }, + "ChangeMessageVisibilityBatchResult":{ + "type":"structure", + "required":[ + "Successful", + "Failed" + ], + "members":{ + "Successful":{"shape":"ChangeMessageVisibilityBatchResultEntryList"}, + "Failed":{"shape":"BatchResultErrorEntryList"} + } + }, + "ChangeMessageVisibilityBatchResultEntry":{ + "type":"structure", + "required":["Id"], + "members":{ + "Id":{"shape":"String"} + } + }, + "ChangeMessageVisibilityBatchResultEntryList":{ + "type":"list", + "member":{ + "shape":"ChangeMessageVisibilityBatchResultEntry", + "locationName":"ChangeMessageVisibilityBatchResultEntry" + }, + "flattened":true + }, + "ChangeMessageVisibilityRequest":{ + "type":"structure", + "required":[ + "QueueUrl", + "ReceiptHandle", + "VisibilityTimeout" + ], + "members":{ + "QueueUrl":{"shape":"String"}, + "ReceiptHandle":{"shape":"String"}, + "VisibilityTimeout":{"shape":"Integer"} + } + }, + "CreateQueueRequest":{ + "type":"structure", + "required":["QueueName"], + "members":{ + "QueueName":{"shape":"String"}, + "Attributes":{ + "shape":"AttributeMap", + "locationName":"Attribute" + } + } + }, + "CreateQueueResult":{ + "type":"structure", + "members":{ + "QueueUrl":{"shape":"String"} + } + }, + "DeleteMessageBatchRequest":{ + "type":"structure", + "required":[ + "QueueUrl", + "Entries" + ], + "members":{ + "QueueUrl":{"shape":"String"}, + "Entries":{"shape":"DeleteMessageBatchRequestEntryList"} + } + }, + "DeleteMessageBatchRequestEntry":{ + "type":"structure", + "required":[ + "Id", + "ReceiptHandle" + ], + "members":{ + "Id":{"shape":"String"}, + "ReceiptHandle":{"shape":"String"} + } + }, + "DeleteMessageBatchRequestEntryList":{ + "type":"list", + "member":{ + "shape":"DeleteMessageBatchRequestEntry", + "locationName":"DeleteMessageBatchRequestEntry" + }, + "flattened":true + }, + "DeleteMessageBatchResult":{ + "type":"structure", + "required":[ + "Successful", + "Failed" + ], + "members":{ + "Successful":{"shape":"DeleteMessageBatchResultEntryList"}, + "Failed":{"shape":"BatchResultErrorEntryList"} + } + }, + "DeleteMessageBatchResultEntry":{ + "type":"structure", + "required":["Id"], + "members":{ + "Id":{"shape":"String"} + } + }, + "DeleteMessageBatchResultEntryList":{ + "type":"list", + "member":{ + "shape":"DeleteMessageBatchResultEntry", + "locationName":"DeleteMessageBatchResultEntry" + }, + "flattened":true + }, + "DeleteMessageRequest":{ + "type":"structure", + "required":[ + "QueueUrl", + "ReceiptHandle" + ], + "members":{ + "QueueUrl":{"shape":"String"}, + "ReceiptHandle":{"shape":"String"} + } + }, + "DeleteQueueRequest":{ + "type":"structure", + "required":["QueueUrl"], + "members":{ + "QueueUrl":{"shape":"String"} + } + }, + "EmptyBatchRequest":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"AWS.SimpleQueueService.EmptyBatchRequest", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "GetQueueAttributesRequest":{ + "type":"structure", + "required":["QueueUrl"], + "members":{ + "QueueUrl":{"shape":"String"}, + "AttributeNames":{"shape":"AttributeNameList"} + } + }, + "GetQueueAttributesResult":{ + "type":"structure", + "members":{ + "Attributes":{ + "shape":"AttributeMap", + "locationName":"Attribute" + } + } + }, + "GetQueueUrlRequest":{ + "type":"structure", + "required":["QueueName"], + "members":{ + "QueueName":{"shape":"String"}, + "QueueOwnerAWSAccountId":{"shape":"String"} + } + }, + "GetQueueUrlResult":{ + "type":"structure", + "members":{ + "QueueUrl":{"shape":"String"} + } + }, + "Integer":{"type":"integer"}, + "InvalidAttributeName":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InvalidBatchEntryId":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"AWS.SimpleQueueService.InvalidBatchEntryId", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidIdFormat":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InvalidMessageContents":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "ListDeadLetterSourceQueuesRequest":{ + "type":"structure", + "required":["QueueUrl"], + "members":{ + "QueueUrl":{"shape":"String"} + } + }, + "ListDeadLetterSourceQueuesResult":{ + "type":"structure", + "required":["queueUrls"], + "members":{ + "queueUrls":{"shape":"QueueUrlList"} + } + }, + "ListQueuesRequest":{ + "type":"structure", + "members":{ + "QueueNamePrefix":{"shape":"String"} + } + }, + "ListQueuesResult":{ + "type":"structure", + "members":{ + "QueueUrls":{"shape":"QueueUrlList"} + } + }, + "Message":{ + "type":"structure", + "members":{ + "MessageId":{"shape":"String"}, + "ReceiptHandle":{"shape":"String"}, + "MD5OfBody":{"shape":"String"}, + "Body":{"shape":"String"}, + "Attributes":{ + "shape":"AttributeMap", + "locationName":"Attribute" + }, + "MD5OfMessageAttributes":{"shape":"String"}, + "MessageAttributes":{ + "shape":"MessageAttributeMap", + "locationName":"MessageAttribute" + } + } + }, + "MessageAttributeMap":{ + "type":"map", + "key":{ + "shape":"String", + "locationName":"Name" + }, + "value":{ + "shape":"MessageAttributeValue", + "locationName":"Value" + }, + "flattened":true + }, + "MessageAttributeName":{"type":"string"}, + "MessageAttributeNameList":{ + "type":"list", + "member":{ + "shape":"MessageAttributeName", + "locationName":"MessageAttributeName" + }, + "flattened":true + }, + "MessageAttributeValue":{ + "type":"structure", + "required":["DataType"], + "members":{ + "StringValue":{"shape":"String"}, + "BinaryValue":{"shape":"Binary"}, + "StringListValues":{ + "shape":"StringList", + "flattened":true, + "locationName":"StringListValue" + }, + "BinaryListValues":{ + "shape":"BinaryList", + "flattened":true, + "locationName":"BinaryListValue" + }, + "DataType":{"shape":"String"} + } + }, + "MessageList":{ + "type":"list", + "member":{ + "shape":"Message", + "locationName":"Message" + }, + "flattened":true + }, + "MessageNotInflight":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"AWS.SimpleQueueService.MessageNotInflight", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "OverLimit":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"OverLimit", + "httpStatusCode":403, + "senderFault":true + }, + "exception":true + }, + "PurgeQueueInProgress":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"AWS.SimpleQueueService.PurgeQueueInProgress", + "httpStatusCode":403, + "senderFault":true + }, + "exception":true + }, + "PurgeQueueRequest":{ + "type":"structure", + "required":["QueueUrl"], + "members":{ + "QueueUrl":{"shape":"String"} + } + }, + "QueueAttributeName":{ + "type":"string", + "enum":[ + "Policy", + "VisibilityTimeout", + "MaximumMessageSize", + "MessageRetentionPeriod", + "ApproximateNumberOfMessages", + "ApproximateNumberOfMessagesNotVisible", + "CreatedTimestamp", + "LastModifiedTimestamp", + "QueueArn", + "ApproximateNumberOfMessagesDelayed", + "DelaySeconds", + "ReceiveMessageWaitTimeSeconds", + "RedrivePolicy" + ] + }, + "QueueDeletedRecently":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"AWS.SimpleQueueService.QueueDeletedRecently", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "QueueDoesNotExist":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"AWS.SimpleQueueService.NonExistentQueue", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "QueueNameExists":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"QueueAlreadyExists", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "QueueUrlList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"QueueUrl" + }, + "flattened":true + }, + "ReceiptHandleIsInvalid":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "ReceiveMessageRequest":{ + "type":"structure", + "required":["QueueUrl"], + "members":{ + "QueueUrl":{"shape":"String"}, + "AttributeNames":{"shape":"AttributeNameList"}, + "MessageAttributeNames":{"shape":"MessageAttributeNameList"}, + "MaxNumberOfMessages":{"shape":"Integer"}, + "VisibilityTimeout":{"shape":"Integer"}, + "WaitTimeSeconds":{"shape":"Integer"} + } + }, + "ReceiveMessageResult":{ + "type":"structure", + "members":{ + "Messages":{"shape":"MessageList"} + } + }, + "RemovePermissionRequest":{ + "type":"structure", + "required":[ + "QueueUrl", + "Label" + ], + "members":{ + "QueueUrl":{"shape":"String"}, + "Label":{"shape":"String"} + } + }, + "SendMessageBatchRequest":{ + "type":"structure", + "required":[ + "QueueUrl", + "Entries" + ], + "members":{ + "QueueUrl":{"shape":"String"}, + "Entries":{"shape":"SendMessageBatchRequestEntryList"} + } + }, + "SendMessageBatchRequestEntry":{ + "type":"structure", + "required":[ + "Id", + "MessageBody" + ], + "members":{ + "Id":{"shape":"String"}, + "MessageBody":{"shape":"String"}, + "DelaySeconds":{"shape":"Integer"}, + "MessageAttributes":{ + "shape":"MessageAttributeMap", + "locationName":"MessageAttribute" + } + } + }, + "SendMessageBatchRequestEntryList":{ + "type":"list", + "member":{ + "shape":"SendMessageBatchRequestEntry", + "locationName":"SendMessageBatchRequestEntry" + }, + "flattened":true + }, + "SendMessageBatchResult":{ + "type":"structure", + "required":[ + "Successful", + "Failed" + ], + "members":{ + "Successful":{"shape":"SendMessageBatchResultEntryList"}, + "Failed":{"shape":"BatchResultErrorEntryList"} + } + }, + "SendMessageBatchResultEntry":{ + "type":"structure", + "required":[ + "Id", + "MessageId", + "MD5OfMessageBody" + ], + "members":{ + "Id":{"shape":"String"}, + "MessageId":{"shape":"String"}, + "MD5OfMessageBody":{"shape":"String"}, + "MD5OfMessageAttributes":{"shape":"String"} + } + }, + "SendMessageBatchResultEntryList":{ + "type":"list", + "member":{ + "shape":"SendMessageBatchResultEntry", + "locationName":"SendMessageBatchResultEntry" + }, + "flattened":true + }, + "SendMessageRequest":{ + "type":"structure", + "required":[ + "QueueUrl", + "MessageBody" + ], + "members":{ + "QueueUrl":{"shape":"String"}, + "MessageBody":{"shape":"String"}, + "DelaySeconds":{"shape":"Integer"}, + "MessageAttributes":{ + "shape":"MessageAttributeMap", + "locationName":"MessageAttribute" + } + } + }, + "SendMessageResult":{ + "type":"structure", + "members":{ + "MD5OfMessageBody":{"shape":"String"}, + "MD5OfMessageAttributes":{"shape":"String"}, + "MessageId":{"shape":"String"} + } + }, + "SetQueueAttributesRequest":{ + "type":"structure", + "required":[ + "QueueUrl", + "Attributes" + ], + "members":{ + "QueueUrl":{"shape":"String"}, + "Attributes":{ + "shape":"AttributeMap", + "locationName":"Attribute" + } + } + }, + "String":{"type":"string"}, + "StringList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"StringListValue" + } + }, + "TooManyEntriesInBatchRequest":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"AWS.SimpleQueueService.TooManyEntriesInBatchRequest", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "UnsupportedOperation":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"AWS.SimpleQueueService.UnsupportedOperation", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/sqs/2012-11-05/docs-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/sqs/2012-11-05/docs-2.json new file mode 100644 index 000000000..3722552f8 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/sqs/2012-11-05/docs-2.json @@ -0,0 +1,504 @@ +{ + "version": "2.0", + "service": "

    Welcome to the Amazon Simple Queue Service API Reference. This section describes who should read this guide, how the guide is organized, and other resources related to the Amazon Simple Queue Service (Amazon SQS).

    Amazon SQS offers reliable and scalable hosted queues for storing messages as they travel between computers. By using Amazon SQS, you can move data between distributed components of your applications that perform different tasks without losing messages or requiring each component to be always available.

    Helpful Links:

    We also provide SDKs that enable you to access Amazon SQS from your preferred programming language. The SDKs contain functionality that automatically takes care of tasks such as:

    • Cryptographically signing your service requests

    • Retrying requests

    • Handling error responses

    For a list of available SDKs, go to Tools for Amazon Web Services.

    ", + "operations": { + "AddPermission": "

    Adds a permission to a queue for a specific principal. This allows for sharing access to the queue.

    When you create a queue, you have full control access rights for the queue. Only you (as owner of the queue) can grant or deny permissions to the queue. For more information about these permissions, see Shared Queues in the Amazon SQS Developer Guide.

    AddPermission writes an Amazon SQS-generated policy. If you want to write your own policy, use SetQueueAttributes to upload your policy. For more information about writing your own policy, see Using The Access Policy Language in the Amazon SQS Developer Guide.

    Some API actions take lists of parameters. These lists are specified using the param.n notation. Values of n are integers starting from 1. For example, a parameter list with two elements looks like this:

    ", + "ChangeMessageVisibility": "

    Changes the visibility timeout of a specified message in a queue to a new value. The maximum allowed timeout value you can set the value to is 12 hours. This means you can't extend the timeout of a message in an existing queue to more than a total visibility timeout of 12 hours. (For more information visibility timeout, see Visibility Timeout in the Amazon SQS Developer Guide.)

    For example, let's say you have a message and its default message visibility timeout is 5 minutes. After 3 minutes, you call ChangeMessageVisiblity with a timeout of 10 minutes. At that time, the timeout for the message would be extended by 10 minutes beyond the time of the ChangeMessageVisibility call. This results in a total visibility timeout of 13 minutes. You can continue to call ChangeMessageVisibility to extend the visibility timeout to a maximum of 12 hours. If you try to extend beyond 12 hours, the request will be rejected.

    There is a 120,000 limit for the number of inflight messages per queue. Messages are inflight after they have been received from the queue by a consuming component, but have not yet been deleted from the queue. If you reach the 120,000 limit, you will receive an OverLimit error message from Amazon SQS. To help avoid reaching the limit, you should delete the messages from the queue after they have been processed. You can also increase the number of queues you use to process the messages.

    If you attempt to set the VisibilityTimeout to an amount more than the maximum time left, Amazon SQS returns an error. It will not automatically recalculate and increase the timeout to the maximum time remaining.

    Unlike with a queue, when you change the visibility timeout for a specific message, that timeout value is applied immediately but is not saved in memory for that message. If you don't delete a message after it is received, the visibility timeout for the message the next time it is received reverts to the original timeout value, not the value you set with the ChangeMessageVisibility action.

    ", + "ChangeMessageVisibilityBatch": "

    Changes the visibility timeout of multiple messages. This is a batch version of ChangeMessageVisibility. The result of the action on each message is reported individually in the response. You can send up to 10 ChangeMessageVisibility requests with each ChangeMessageVisibilityBatch action.

    Because the batch request can result in a combination of successful and unsuccessful actions, you should check for batch errors even when the call returns an HTTP status code of 200.

    Some API actions take lists of parameters. These lists are specified using the param.n notation. Values of n are integers starting from 1. For example, a parameter list with two elements looks like this:

    ", + "CreateQueue": "

    Creates a new queue, or returns the URL of an existing one. When you request CreateQueue, you provide a name for the queue. To successfully create a new queue, you must provide a name that is unique within the scope of your own queues.

    If you delete a queue, you must wait at least 60 seconds before creating a queue with the same name.

    You may pass one or more attributes in the request. If you do not provide a value for any attribute, the queue will have the default value for that attribute.

    Use GetQueueUrl to get a queue's URL. GetQueueUrl requires only the QueueName parameter.

    If you provide the name of an existing queue, along with the exact names and values of all the queue's attributes, CreateQueue returns the queue URL for the existing queue. If the queue name, attribute names, or attribute values do not match an existing queue, CreateQueue returns an error.

    Some API actions take lists of parameters. These lists are specified using the param.n notation. Values of n are integers starting from 1. For example, a parameter list with two elements looks like this:

    ", + "DeleteMessage": "

    Deletes the specified message from the specified queue. You specify the message by using the message's receipt handle and not the message ID you received when you sent the message. Even if the message is locked by another reader due to the visibility timeout setting, it is still deleted from the queue. If you leave a message in the queue for longer than the queue's configured retention period, Amazon SQS automatically deletes it.

    The receipt handle is associated with a specific instance of receiving the message. If you receive a message more than once, the receipt handle you get each time you receive the message is different. When you request DeleteMessage, if you don't provide the most recently received receipt handle for the message, the request will still succeed, but the message might not be deleted.

    It is possible you will receive a message even after you have deleted it. This might happen on rare occasions if one of the servers storing a copy of the message is unavailable when you request to delete the message. The copy remains on the server and might be returned to you again on a subsequent receive request. You should create your system to be idempotent so that receiving a particular message more than once is not a problem.

    ", + "DeleteMessageBatch": "

    Deletes up to ten messages from the specified queue. This is a batch version of DeleteMessage. The result of the delete action on each message is reported individually in the response.

    Because the batch request can result in a combination of successful and unsuccessful actions, you should check for batch errors even when the call returns an HTTP status code of 200.

    Some API actions take lists of parameters. These lists are specified using the param.n notation. Values of n are integers starting from 1. For example, a parameter list with two elements looks like this:

    ", + "DeleteQueue": "

    Deletes the queue specified by the queue URL, regardless of whether the queue is empty. If the specified queue does not exist, Amazon SQS returns a successful response.

    Use DeleteQueue with care; once you delete your queue, any messages in the queue are no longer available.

    When you delete a queue, the deletion process takes up to 60 seconds. Requests you send involving that queue during the 60 seconds might succeed. For example, a SendMessage request might succeed, but after the 60 seconds, the queue and that message you sent no longer exist. Also, when you delete a queue, you must wait at least 60 seconds before creating a queue with the same name.

    We reserve the right to delete queues that have had no activity for more than 30 days. For more information, see How Amazon SQS Queues Work in the Amazon SQS Developer Guide.

    ", + "GetQueueAttributes": "

    Gets attributes for the specified queue.

    Some API actions take lists of parameters. These lists are specified using the param.n notation. Values of n are integers starting from 1. For example, a parameter list with two elements looks like this:

    ", + "GetQueueUrl": "

    Returns the URL of an existing queue. This action provides a simple way to retrieve the URL of an Amazon SQS queue.

    To access a queue that belongs to another AWS account, use the QueueOwnerAWSAccountId parameter to specify the account ID of the queue's owner. The queue's owner must grant you permission to access the queue. For more information about shared queue access, see AddPermission or go to Shared Queues in the Amazon SQS Developer Guide.

    ", + "ListDeadLetterSourceQueues": "

    Returns a list of your queues that have the RedrivePolicy queue attribute configured with a dead letter queue.

    For more information about using dead letter queues, see Using Amazon SQS Dead Letter Queues.

    ", + "ListQueues": "

    Returns a list of your queues. The maximum number of queues that can be returned is 1000. If you specify a value for the optional QueueNamePrefix parameter, only queues with a name beginning with the specified value are returned.

    ", + "PurgeQueue": "

    Deletes the messages in a queue specified by the queue URL.

    When you use the PurgeQueue API, the deleted messages in the queue cannot be retrieved.

    When you purge a queue, the message deletion process takes up to 60 seconds. All messages sent to the queue before calling PurgeQueue will be deleted; messages sent to the queue while it is being purged may be deleted. While the queue is being purged, messages sent to the queue before PurgeQueue was called may be received, but will be deleted within the next minute.

    ", + "ReceiveMessage": "

    Retrieves one or more messages, with a maximum limit of 10 messages, from the specified queue. Long poll support is enabled by using the WaitTimeSeconds parameter. For more information, see Amazon SQS Long Poll in the Amazon SQS Developer Guide.

    Short poll is the default behavior where a weighted random set of machines is sampled on a ReceiveMessage call. This means only the messages on the sampled machines are returned. If the number of messages in the queue is small (less than 1000), it is likely you will get fewer messages than you requested per ReceiveMessage call. If the number of messages in the queue is extremely small, you might not receive any messages in a particular ReceiveMessage response; in which case you should repeat the request.

    For each message returned, the response includes the following:

    • Message body

    • MD5 digest of the message body. For information about MD5, go to http://www.faqs.org/rfcs/rfc1321.html.

    • Message ID you received when you sent the message to the queue.

    • Receipt handle.

    • Message attributes.

    • MD5 digest of the message attributes.

    The receipt handle is the identifier you must provide when deleting the message. For more information, see Queue and Message Identifiers in the Amazon SQS Developer Guide.

    You can provide the VisibilityTimeout parameter in your request, which will be applied to the messages that Amazon SQS returns in the response. If you do not include the parameter, the overall visibility timeout for the queue is used for the returned messages. For more information, see Visibility Timeout in the Amazon SQS Developer Guide.

    Going forward, new attributes might be added. If you are writing code that calls this action, we recommend that you structure your code so that it can handle new attributes gracefully.

    ", + "RemovePermission": "

    Revokes any permissions in the queue policy that matches the specified Label parameter. Only the owner of the queue can remove permissions.

    ", + "SendMessage": "

    Delivers a message to the specified queue. With Amazon SQS, you now have the ability to send large payload messages that are up to 256KB (262,144 bytes) in size. To send large payloads, you must use an AWS SDK that supports SigV4 signing. To verify whether SigV4 is supported for an AWS SDK, check the SDK release notes.

    The following list shows the characters (in Unicode) allowed in your message, according to the W3C XML specification. For more information, go to http://www.w3.org/TR/REC-xml/#charsets If you send any characters not included in the list, your request will be rejected.

    #x9 | #xA | #xD | [#x20 to #xD7FF] | [#xE000 to #xFFFD] | [#x10000 to #x10FFFF]

    ", + "SendMessageBatch": "

    Delivers up to ten messages to the specified queue. This is a batch version of SendMessage. The result of the send action on each message is reported individually in the response. The maximum allowed individual message size is 256 KB (262,144 bytes).

    The maximum total payload size (i.e., the sum of all a batch's individual message lengths) is also 256 KB (262,144 bytes).

    If the DelaySeconds parameter is not specified for an entry, the default for the queue is used.

    The following list shows the characters (in Unicode) that are allowed in your message, according to the W3C XML specification. For more information, go to http://www.faqs.org/rfcs/rfc1321.html. If you send any characters that are not included in the list, your request will be rejected.

    #x9 | #xA | #xD | [#x20 to #xD7FF] | [#xE000 to #xFFFD] | [#x10000 to #x10FFFF]

    Because the batch request can result in a combination of successful and unsuccessful actions, you should check for batch errors even when the call returns an HTTP status code of 200.

    Some API actions take lists of parameters. These lists are specified using the param.n notation. Values of n are integers starting from 1. For example, a parameter list with two elements looks like this:

    ", + "SetQueueAttributes": "

    Sets the value of one or more queue attributes. When you change a queue's attributes, the change can take up to 60 seconds for most of the attributes to propagate throughout the SQS system. Changes made to the MessageRetentionPeriod attribute can take up to 15 minutes.

    Going forward, new attributes might be added. If you are writing code that calls this action, we recommend that you structure your code so that it can handle new attributes gracefully.

    " + }, + "shapes": { + "AWSAccountIdList": { + "base": null, + "refs": { + "AddPermissionRequest$AWSAccountIds": "

    The AWS account number of the principal who will be given permission. The principal must have an AWS account, but does not need to be signed up for Amazon SQS. For information about locating the AWS account identification, see Your AWS Identifiers in the Amazon SQS Developer Guide.

    " + } + }, + "ActionNameList": { + "base": null, + "refs": { + "AddPermissionRequest$Actions": "

    The action the client wants to allow for the specified principal. The following are valid values: * | SendMessage | ReceiveMessage | DeleteMessage | ChangeMessageVisibility | GetQueueAttributes | GetQueueUrl. For more information about these actions, see Understanding Permissions in the Amazon SQS Developer Guide.

    Specifying SendMessage, DeleteMessage, or ChangeMessageVisibility for the ActionName.n also grants permissions for the corresponding batch versions of those actions: SendMessageBatch, DeleteMessageBatch, and ChangeMessageVisibilityBatch.

    " + } + }, + "AddPermissionRequest": { + "base": "

    ", + "refs": { + } + }, + "AttributeMap": { + "base": null, + "refs": { + "CreateQueueRequest$Attributes": "

    A map of attributes with their corresponding values.

    The following lists the names, descriptions, and values of the special request parameters the CreateQueue action uses:

    • DelaySeconds - The time in seconds that the delivery of all messages in the queue will be delayed. An integer from 0 to 900 (15 minutes). The default for this attribute is 0 (zero).

    • MaximumMessageSize - The limit of how many bytes a message can contain before Amazon SQS rejects it. An integer from 1024 bytes (1 KiB) up to 262144 bytes (256 KiB). The default for this attribute is 262144 (256 KiB).

    • MessageRetentionPeriod - The number of seconds Amazon SQS retains a message. Integer representing seconds, from 60 (1 minute) to 1209600 (14 days). The default for this attribute is 345600 (4 days).

    • Policy - The queue's policy. A valid AWS policy. For more information about policy structure, see Overview of AWS IAM Policies in the Amazon IAM User Guide.

    • ReceiveMessageWaitTimeSeconds - The time for which a ReceiveMessage call will wait for a message to arrive. An integer from 0 to 20 (seconds). The default for this attribute is 0.

    • RedrivePolicy - The parameters for dead letter queue functionality of the source queue. For more information about RedrivePolicy and dead letter queues, see Using Amazon SQS Dead Letter Queues in the Amazon SQS Developer Guide.

    • VisibilityTimeout - The visibility timeout for the queue. An integer from 0 to 43200 (12 hours). The default for this attribute is 30. For more information about visibility timeout, see Visibility Timeout in the Amazon SQS Developer Guide.

    Any other valid special request parameters that are specified (such as ApproximateNumberOfMessages, ApproximateNumberOfMessagesDelayed, ApproximateNumberOfMessagesNotVisible, CreatedTimestamp, LastModifiedTimestamp, and QueueArn) will be ignored.

    ", + "GetQueueAttributesResult$Attributes": "

    A map of attributes to the respective values.

    ", + "Message$Attributes": "

    SenderId, SentTimestamp, ApproximateReceiveCount, and/or ApproximateFirstReceiveTimestamp. SentTimestamp and ApproximateFirstReceiveTimestamp are each returned as an integer representing the epoch time in milliseconds.

    ", + "SetQueueAttributesRequest$Attributes": "

    A map of attributes to set.

    The following lists the names, descriptions, and values of the special request parameters the SetQueueAttributes action uses:

    • DelaySeconds - The time in seconds that the delivery of all messages in the queue will be delayed. An integer from 0 to 900 (15 minutes). The default for this attribute is 0 (zero).

    • MaximumMessageSize - The limit of how many bytes a message can contain before Amazon SQS rejects it. An integer from 1024 bytes (1 KiB) up to 262144 bytes (256 KiB). The default for this attribute is 262144 (256 KiB).

    • MessageRetentionPeriod - The number of seconds Amazon SQS retains a message. Integer representing seconds, from 60 (1 minute) to 1209600 (14 days). The default for this attribute is 345600 (4 days).

    • Policy - The queue's policy. A valid AWS policy. For more information about policy structure, see Overview of AWS IAM Policies in the Amazon IAM User Guide.

    • ReceiveMessageWaitTimeSeconds - The time for which a ReceiveMessage call will wait for a message to arrive. An integer from 0 to 20 (seconds). The default for this attribute is 0.

    • VisibilityTimeout - The visibility timeout for the queue. An integer from 0 to 43200 (12 hours). The default for this attribute is 30. For more information about visibility timeout, see Visibility Timeout in the Amazon SQS Developer Guide.

    • RedrivePolicy - The parameters for dead letter queue functionality of the source queue. For more information about RedrivePolicy and dead letter queues, see Using Amazon SQS Dead Letter Queues in the Amazon SQS Developer Guide.

    Any other valid special request parameters that are specified (such as ApproximateNumberOfMessages, ApproximateNumberOfMessagesDelayed, ApproximateNumberOfMessagesNotVisible, CreatedTimestamp, LastModifiedTimestamp, and QueueArn) will be ignored.

    " + } + }, + "AttributeNameList": { + "base": null, + "refs": { + "GetQueueAttributesRequest$AttributeNames": "

    A list of attributes to retrieve information for. The following attributes are supported:

    • All - returns all values.

    • ApproximateNumberOfMessages - returns the approximate number of visible messages in a queue. For more information, see Resources Required to Process Messages in the Amazon SQS Developer Guide.

    • ApproximateNumberOfMessagesNotVisible - returns the approximate number of messages that are not timed-out and not deleted. For more information, see Resources Required to Process Messages in the Amazon SQS Developer Guide.

    • VisibilityTimeout - returns the visibility timeout for the queue. For more information about visibility timeout, see Visibility Timeout in the Amazon SQS Developer Guide.

    • CreatedTimestamp - returns the time when the queue was created (epoch time in seconds).

    • LastModifiedTimestamp - returns the time when the queue was last changed (epoch time in seconds).

    • Policy - returns the queue's policy.

    • MaximumMessageSize - returns the limit of how many bytes a message can contain before Amazon SQS rejects it.

    • MessageRetentionPeriod - returns the number of seconds Amazon SQS retains a message.

    • QueueArn - returns the queue's Amazon resource name (ARN).

    • ApproximateNumberOfMessagesDelayed - returns the approximate number of messages that are pending to be added to the queue.

    • DelaySeconds - returns the default delay on the queue in seconds.

    • ReceiveMessageWaitTimeSeconds - returns the time for which a ReceiveMessage call will wait for a message to arrive.

    • RedrivePolicy - returns the parameters for dead letter queue functionality of the source queue. For more information about RedrivePolicy and dead letter queues, see Using Amazon SQS Dead Letter Queues in the Amazon SQS Developer Guide.

    Going forward, new attributes might be added. If you are writing code that calls this action, we recommend that you structure your code so that it can handle new attributes gracefully.

    ", + "ReceiveMessageRequest$AttributeNames": "

    A list of attributes that need to be returned along with each message. These attributes include:

    • All - returns all values.

    • ApproximateFirstReceiveTimestamp - returns the time when the message was first received from the queue (epoch time in milliseconds).

    • ApproximateReceiveCount - returns the number of times a message has been received from the queue but not deleted.

    • SenderId - returns the AWS account number (or the IP address, if anonymous access is allowed) of the sender.

    • SentTimestamp - returns the time when the message was sent to the queue (epoch time in milliseconds).

    Any other valid special request parameters that are specified (such as ApproximateNumberOfMessages, ApproximateNumberOfMessagesDelayed, ApproximateNumberOfMessagesNotVisible, CreatedTimestamp, DelaySeconds, LastModifiedTimestamp, MaximumMessageSize, MessageRetentionPeriod, Policy, QueueArn, ReceiveMessageWaitTimeSeconds, RedrivePolicy, and VisibilityTimeout) will be ignored.

    " + } + }, + "BatchEntryIdsNotDistinct": { + "base": "

    Two or more batch entries have the same Id in the request.

    ", + "refs": { + } + }, + "BatchRequestTooLong": { + "base": "

    The length of all the messages put together is more than the limit.

    ", + "refs": { + } + }, + "BatchResultErrorEntry": { + "base": "

    This is used in the responses of batch API to give a detailed description of the result of an action on each entry in the request.

    ", + "refs": { + "BatchResultErrorEntryList$member": null + } + }, + "BatchResultErrorEntryList": { + "base": null, + "refs": { + "ChangeMessageVisibilityBatchResult$Failed": "

    A list of BatchResultErrorEntry items.

    ", + "DeleteMessageBatchResult$Failed": "

    A list of BatchResultErrorEntry items.

    ", + "SendMessageBatchResult$Failed": "

    A list of BatchResultErrorEntry items with the error detail about each message that could not be enqueued.

    " + } + }, + "Binary": { + "base": null, + "refs": { + "BinaryList$member": null, + "MessageAttributeValue$BinaryValue": "

    Binary type attributes can store any binary data, for example, compressed data, encrypted data, or images.

    " + } + }, + "BinaryList": { + "base": null, + "refs": { + "MessageAttributeValue$BinaryListValues": "

    Not implemented. Reserved for future use.

    " + } + }, + "Boolean": { + "base": null, + "refs": { + "BatchResultErrorEntry$SenderFault": "

    Whether the error happened due to the sender's fault.

    " + } + }, + "ChangeMessageVisibilityBatchRequest": { + "base": "

    ", + "refs": { + } + }, + "ChangeMessageVisibilityBatchRequestEntry": { + "base": "

    Encloses a receipt handle and an entry id for each message in ChangeMessageVisibilityBatch.

    All of the following parameters are list parameters that must be prefixed with ChangeMessageVisibilityBatchRequestEntry.n, where n is an integer value starting with 1. For example, a parameter list for this action might look like this:

    Your_Receipt_Handle]]>

    ", + "refs": { + "ChangeMessageVisibilityBatchRequestEntryList$member": null + } + }, + "ChangeMessageVisibilityBatchRequestEntryList": { + "base": null, + "refs": { + "ChangeMessageVisibilityBatchRequest$Entries": "

    A list of receipt handles of the messages for which the visibility timeout must be changed.

    " + } + }, + "ChangeMessageVisibilityBatchResult": { + "base": "

    For each message in the batch, the response contains a ChangeMessageVisibilityBatchResultEntry tag if the message succeeds or a BatchResultErrorEntry tag if the message fails.

    ", + "refs": { + } + }, + "ChangeMessageVisibilityBatchResultEntry": { + "base": "

    Encloses the id of an entry in ChangeMessageVisibilityBatch.

    ", + "refs": { + "ChangeMessageVisibilityBatchResultEntryList$member": null + } + }, + "ChangeMessageVisibilityBatchResultEntryList": { + "base": null, + "refs": { + "ChangeMessageVisibilityBatchResult$Successful": "

    A list of ChangeMessageVisibilityBatchResultEntry items.

    " + } + }, + "ChangeMessageVisibilityRequest": { + "base": null, + "refs": { + } + }, + "CreateQueueRequest": { + "base": "

    ", + "refs": { + } + }, + "CreateQueueResult": { + "base": "

    Returns the QueueUrl element of the created queue.

    ", + "refs": { + } + }, + "DeleteMessageBatchRequest": { + "base": "

    ", + "refs": { + } + }, + "DeleteMessageBatchRequestEntry": { + "base": "

    Encloses a receipt handle and an identifier for it.

    ", + "refs": { + "DeleteMessageBatchRequestEntryList$member": null + } + }, + "DeleteMessageBatchRequestEntryList": { + "base": null, + "refs": { + "DeleteMessageBatchRequest$Entries": "

    A list of receipt handles for the messages to be deleted.

    " + } + }, + "DeleteMessageBatchResult": { + "base": "

    For each message in the batch, the response contains a DeleteMessageBatchResultEntry tag if the message is deleted or a BatchResultErrorEntry tag if the message cannot be deleted.

    ", + "refs": { + } + }, + "DeleteMessageBatchResultEntry": { + "base": "

    Encloses the id an entry in DeleteMessageBatch.

    ", + "refs": { + "DeleteMessageBatchResultEntryList$member": null + } + }, + "DeleteMessageBatchResultEntryList": { + "base": null, + "refs": { + "DeleteMessageBatchResult$Successful": "

    A list of DeleteMessageBatchResultEntry items.

    " + } + }, + "DeleteMessageRequest": { + "base": "

    ", + "refs": { + } + }, + "DeleteQueueRequest": { + "base": "

    ", + "refs": { + } + }, + "EmptyBatchRequest": { + "base": "

    Batch request does not contain an entry.

    ", + "refs": { + } + }, + "GetQueueAttributesRequest": { + "base": "

    ", + "refs": { + } + }, + "GetQueueAttributesResult": { + "base": "

    A list of returned queue attributes.

    ", + "refs": { + } + }, + "GetQueueUrlRequest": { + "base": "

    ", + "refs": { + } + }, + "GetQueueUrlResult": { + "base": "

    For more information, see Responses in the Amazon SQS Developer Guide.

    ", + "refs": { + } + }, + "Integer": { + "base": null, + "refs": { + "ChangeMessageVisibilityBatchRequestEntry$VisibilityTimeout": "

    The new value (in seconds) for the message's visibility timeout.

    ", + "ChangeMessageVisibilityRequest$VisibilityTimeout": "

    The new value (in seconds - from 0 to 43200 - maximum 12 hours) for the message's visibility timeout.

    ", + "ReceiveMessageRequest$MaxNumberOfMessages": "

    The maximum number of messages to return. Amazon SQS never returns more messages than this value but may return fewer. Values can be from 1 to 10. Default is 1.

    All of the messages are not necessarily returned.

    ", + "ReceiveMessageRequest$VisibilityTimeout": "

    The duration (in seconds) that the received messages are hidden from subsequent retrieve requests after being retrieved by a ReceiveMessage request.

    ", + "ReceiveMessageRequest$WaitTimeSeconds": "

    The duration (in seconds) for which the call will wait for a message to arrive in the queue before returning. If a message is available, the call will return sooner than WaitTimeSeconds.

    ", + "SendMessageBatchRequestEntry$DelaySeconds": "

    The number of seconds for which the message has to be delayed.

    ", + "SendMessageRequest$DelaySeconds": "

    The number of seconds (0 to 900 - 15 minutes) to delay a specific message. Messages with a positive DelaySeconds value become available for processing after the delay time is finished. If you don't specify a value, the default value for the queue applies.

    " + } + }, + "InvalidAttributeName": { + "base": "

    The attribute referred to does not exist.

    ", + "refs": { + } + }, + "InvalidBatchEntryId": { + "base": "

    The Id of a batch entry in a batch request does not abide by the specification.

    ", + "refs": { + } + }, + "InvalidIdFormat": { + "base": "

    The receipt handle is not valid for the current version.

    ", + "refs": { + } + }, + "InvalidMessageContents": { + "base": "

    The message contains characters outside the allowed set.

    ", + "refs": { + } + }, + "ListDeadLetterSourceQueuesRequest": { + "base": "

    ", + "refs": { + } + }, + "ListDeadLetterSourceQueuesResult": { + "base": "

    A list of your dead letter source queues.

    ", + "refs": { + } + }, + "ListQueuesRequest": { + "base": "

    ", + "refs": { + } + }, + "ListQueuesResult": { + "base": "

    A list of your queues.

    ", + "refs": { + } + }, + "Message": { + "base": "

    An Amazon SQS message.

    ", + "refs": { + "MessageList$member": null + } + }, + "MessageAttributeMap": { + "base": null, + "refs": { + "Message$MessageAttributes": "

    Each message attribute consists of a Name, Type, and Value. For more information, see Message Attribute Items.

    ", + "SendMessageBatchRequestEntry$MessageAttributes": "

    Each message attribute consists of a Name, Type, and Value. For more information, see Message Attribute Items.

    ", + "SendMessageRequest$MessageAttributes": "

    Each message attribute consists of a Name, Type, and Value. For more information, see Message Attribute Items.

    " + } + }, + "MessageAttributeName": { + "base": null, + "refs": { + "MessageAttributeNameList$member": null + } + }, + "MessageAttributeNameList": { + "base": null, + "refs": { + "ReceiveMessageRequest$MessageAttributeNames": "

    The name of the message attribute, where N is the index. The message attribute name can contain the following characters: A-Z, a-z, 0-9, underscore (_), hyphen (-), and period (.). The name must not start or end with a period, and it should not have successive periods. The name is case sensitive and must be unique among all attribute names for the message. The name can be up to 256 characters long. The name cannot start with \"AWS.\" or \"Amazon.\" (or any variations in casing), because these prefixes are reserved for use by Amazon Web Services.

    When using ReceiveMessage, you can send a list of attribute names to receive, or you can return all of the attributes by specifying \"All\" or \".*\" in your request. You can also use \"bar.*\" to return all message attributes starting with the \"bar\" prefix.

    " + } + }, + "MessageAttributeValue": { + "base": "

    The user-specified message attribute value. For string data types, the value attribute has the same restrictions on the content as the message body. For more information, see SendMessage.

    Name, type, and value must not be empty or null. In addition, the message body should not be empty or null. All parts of the message attribute, including name, type, and value, are included in the message size restriction, which is currently 256 KB (262,144 bytes).

    ", + "refs": { + "MessageAttributeMap$value": null + } + }, + "MessageList": { + "base": null, + "refs": { + "ReceiveMessageResult$Messages": "

    A list of messages.

    " + } + }, + "MessageNotInflight": { + "base": "

    The message referred to is not in flight.

    ", + "refs": { + } + }, + "OverLimit": { + "base": "

    The action that you requested would violate a limit. For example, ReceiveMessage returns this error if the maximum number of messages inflight has already been reached. AddPermission returns this error if the maximum number of permissions for the queue has already been reached.

    ", + "refs": { + } + }, + "PurgeQueueInProgress": { + "base": "

    Indicates that the specified queue previously received a PurgeQueue request within the last 60 seconds, the time it can take to delete the messages in the queue.

    ", + "refs": { + } + }, + "PurgeQueueRequest": { + "base": "

    ", + "refs": { + } + }, + "QueueAttributeName": { + "base": null, + "refs": { + "AttributeMap$key": null, + "AttributeNameList$member": null + } + }, + "QueueDeletedRecently": { + "base": "

    You must wait 60 seconds after deleting a queue before you can create another with the same name.

    ", + "refs": { + } + }, + "QueueDoesNotExist": { + "base": "

    The queue referred to does not exist.

    ", + "refs": { + } + }, + "QueueNameExists": { + "base": "

    A queue already exists with this name. Amazon SQS returns this error only if the request includes attributes whose values differ from those of the existing queue.

    ", + "refs": { + } + }, + "QueueUrlList": { + "base": null, + "refs": { + "ListDeadLetterSourceQueuesResult$queueUrls": "

    A list of source queue URLs that have the RedrivePolicy queue attribute configured with a dead letter queue.

    ", + "ListQueuesResult$QueueUrls": "

    A list of queue URLs, up to 1000 entries.

    " + } + }, + "ReceiptHandleIsInvalid": { + "base": "

    The receipt handle provided is not valid.

    ", + "refs": { + } + }, + "ReceiveMessageRequest": { + "base": "

    ", + "refs": { + } + }, + "ReceiveMessageResult": { + "base": "

    A list of received messages.

    ", + "refs": { + } + }, + "RemovePermissionRequest": { + "base": "

    ", + "refs": { + } + }, + "SendMessageBatchRequest": { + "base": "

    ", + "refs": { + } + }, + "SendMessageBatchRequestEntry": { + "base": "

    Contains the details of a single Amazon SQS message along with a Id.

    ", + "refs": { + "SendMessageBatchRequestEntryList$member": null + } + }, + "SendMessageBatchRequestEntryList": { + "base": null, + "refs": { + "SendMessageBatchRequest$Entries": "

    A list of SendMessageBatchRequestEntry items.

    " + } + }, + "SendMessageBatchResult": { + "base": "

    For each message in the batch, the response contains a SendMessageBatchResultEntry tag if the message succeeds or a BatchResultErrorEntry tag if the message fails.

    ", + "refs": { + } + }, + "SendMessageBatchResultEntry": { + "base": "

    Encloses a message ID for successfully enqueued message of a SendMessageBatch.

    ", + "refs": { + "SendMessageBatchResultEntryList$member": null + } + }, + "SendMessageBatchResultEntryList": { + "base": null, + "refs": { + "SendMessageBatchResult$Successful": "

    A list of SendMessageBatchResultEntry items.

    " + } + }, + "SendMessageRequest": { + "base": "

    ", + "refs": { + } + }, + "SendMessageResult": { + "base": "

    The MD5OfMessageBody and MessageId elements.

    ", + "refs": { + } + }, + "SetQueueAttributesRequest": { + "base": "

    ", + "refs": { + } + }, + "String": { + "base": null, + "refs": { + "AWSAccountIdList$member": null, + "ActionNameList$member": null, + "AddPermissionRequest$QueueUrl": "

    The URL of the Amazon SQS queue to take action on.

    Queue URLs are case-sensitive.

    ", + "AddPermissionRequest$Label": "

    The unique identification of the permission you're setting (e.g., AliceSendMessage). Constraints: Maximum 80 characters; alphanumeric characters, hyphens (-), and underscores (_) are allowed.

    ", + "AttributeMap$value": null, + "BatchResultErrorEntry$Id": "

    The id of an entry in a batch request.

    ", + "BatchResultErrorEntry$Code": "

    An error code representing why the action failed on this entry.

    ", + "BatchResultErrorEntry$Message": "

    A message explaining why the action failed on this entry.

    ", + "ChangeMessageVisibilityBatchRequest$QueueUrl": "

    The URL of the Amazon SQS queue to take action on.

    Queue URLs are case-sensitive.

    ", + "ChangeMessageVisibilityBatchRequestEntry$Id": "

    An identifier for this particular receipt handle. This is used to communicate the result. Note that the Ids of a batch request need to be unique within the request.

    ", + "ChangeMessageVisibilityBatchRequestEntry$ReceiptHandle": "

    A receipt handle.

    ", + "ChangeMessageVisibilityBatchResultEntry$Id": "

    Represents a message whose visibility timeout has been changed successfully.

    ", + "ChangeMessageVisibilityRequest$QueueUrl": "

    The URL of the Amazon SQS queue to take action on.

    Queue URLs are case-sensitive.

    ", + "ChangeMessageVisibilityRequest$ReceiptHandle": "

    The receipt handle associated with the message whose visibility timeout should be changed. This parameter is returned by the ReceiveMessage action.

    ", + "CreateQueueRequest$QueueName": "

    The name for the queue to be created.

    Queue names are case-sensitive.

    ", + "CreateQueueResult$QueueUrl": "

    The URL for the created Amazon SQS queue.

    ", + "DeleteMessageBatchRequest$QueueUrl": "

    The URL of the Amazon SQS queue to take action on.

    Queue URLs are case-sensitive.

    ", + "DeleteMessageBatchRequestEntry$Id": "

    An identifier for this particular receipt handle. This is used to communicate the result. Note that the Ids of a batch request need to be unique within the request.

    ", + "DeleteMessageBatchRequestEntry$ReceiptHandle": "

    A receipt handle.

    ", + "DeleteMessageBatchResultEntry$Id": "

    Represents a successfully deleted message.

    ", + "DeleteMessageRequest$QueueUrl": "

    The URL of the Amazon SQS queue to take action on.

    Queue URLs are case-sensitive.

    ", + "DeleteMessageRequest$ReceiptHandle": "

    The receipt handle associated with the message to delete.

    ", + "DeleteQueueRequest$QueueUrl": "

    The URL of the Amazon SQS queue to take action on.

    Queue URLs are case-sensitive.

    ", + "GetQueueAttributesRequest$QueueUrl": "

    The URL of the Amazon SQS queue to take action on.

    Queue URLs are case-sensitive.

    ", + "GetQueueUrlRequest$QueueName": "

    The name of the queue whose URL must be fetched. Maximum 80 characters; alphanumeric characters, hyphens (-), and underscores (_) are allowed.

    Queue names are case-sensitive.

    ", + "GetQueueUrlRequest$QueueOwnerAWSAccountId": "

    The AWS account ID of the account that created the queue.

    ", + "GetQueueUrlResult$QueueUrl": "

    The URL for the queue.

    ", + "ListDeadLetterSourceQueuesRequest$QueueUrl": "

    The queue URL of a dead letter queue.

    Queue URLs are case-sensitive.

    ", + "ListQueuesRequest$QueueNamePrefix": "

    A string to use for filtering the list results. Only those queues whose name begins with the specified string are returned.

    Queue names are case-sensitive.

    ", + "Message$MessageId": "

    A unique identifier for the message. Message IDs are considered unique across all AWS accounts for an extended period of time.

    ", + "Message$ReceiptHandle": "

    An identifier associated with the act of receiving the message. A new receipt handle is returned every time you receive a message. When deleting a message, you provide the last received receipt handle to delete the message.

    ", + "Message$MD5OfBody": "

    An MD5 digest of the non-URL-encoded message body string.

    ", + "Message$Body": "

    The message's contents (not URL-encoded).

    ", + "Message$MD5OfMessageAttributes": "

    An MD5 digest of the non-URL-encoded message attribute string. This can be used to verify that Amazon SQS received the message correctly. Amazon SQS first URL decodes the message before creating the MD5 digest. For information about MD5, go to http://www.faqs.org/rfcs/rfc1321.html.

    ", + "MessageAttributeMap$key": null, + "MessageAttributeValue$StringValue": "

    Strings are Unicode with UTF8 binary encoding. For a list of code values, see http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters.

    ", + "MessageAttributeValue$DataType": "

    Amazon SQS supports the following logical data types: String, Number, and Binary. For the Number data type, you must use StringValue.

    You can also append custom labels. For more information, see Message Attribute Data Types.

    ", + "PurgeQueueRequest$QueueUrl": "

    The queue URL of the queue to delete the messages from when using the PurgeQueue API.

    Queue URLs are case-sensitive.

    ", + "QueueUrlList$member": null, + "ReceiveMessageRequest$QueueUrl": "

    The URL of the Amazon SQS queue to take action on.

    Queue URLs are case-sensitive.

    ", + "RemovePermissionRequest$QueueUrl": "

    The URL of the Amazon SQS queue to take action on.

    Queue URLs are case-sensitive.

    ", + "RemovePermissionRequest$Label": "

    The identification of the permission to remove. This is the label added with the AddPermission action.

    ", + "SendMessageBatchRequest$QueueUrl": "

    The URL of the Amazon SQS queue to take action on.

    Queue URLs are case-sensitive.

    ", + "SendMessageBatchRequestEntry$Id": "

    An identifier for the message in this batch. This is used to communicate the result. Note that the Ids of a batch request need to be unique within the request.

    ", + "SendMessageBatchRequestEntry$MessageBody": "

    Body of the message.

    ", + "SendMessageBatchResultEntry$Id": "

    An identifier for the message in this batch.

    ", + "SendMessageBatchResultEntry$MessageId": "

    An identifier for the message.

    ", + "SendMessageBatchResultEntry$MD5OfMessageBody": "

    An MD5 digest of the non-URL-encoded message body string. This can be used to verify that Amazon SQS received the message correctly. Amazon SQS first URL decodes the message before creating the MD5 digest. For information about MD5, go to http://www.faqs.org/rfcs/rfc1321.html.

    ", + "SendMessageBatchResultEntry$MD5OfMessageAttributes": "

    An MD5 digest of the non-URL-encoded message attribute string. This can be used to verify that Amazon SQS received the message batch correctly. Amazon SQS first URL decodes the message before creating the MD5 digest. For information about MD5, go to http://www.faqs.org/rfcs/rfc1321.html.

    ", + "SendMessageRequest$QueueUrl": "

    The URL of the Amazon SQS queue to take action on.

    Queue URLs are case-sensitive.

    ", + "SendMessageRequest$MessageBody": "

    The message to send. String maximum 256 KB in size. For a list of allowed characters, see the preceding important note.

    ", + "SendMessageResult$MD5OfMessageBody": "

    An MD5 digest of the non-URL-encoded message body string. This can be used to verify that Amazon SQS received the message correctly. Amazon SQS first URL decodes the message before creating the MD5 digest. For information about MD5, go to http://www.faqs.org/rfcs/rfc1321.html.

    ", + "SendMessageResult$MD5OfMessageAttributes": "

    An MD5 digest of the non-URL-encoded message attribute string. This can be used to verify that Amazon SQS received the message correctly. Amazon SQS first URL decodes the message before creating the MD5 digest. For information about MD5, go to http://www.faqs.org/rfcs/rfc1321.html.

    ", + "SendMessageResult$MessageId": "

    An element containing the message ID of the message sent to the queue. For more information, see Queue and Message Identifiers in the Amazon SQS Developer Guide.

    ", + "SetQueueAttributesRequest$QueueUrl": "

    The URL of the Amazon SQS queue to take action on.

    Queue URLs are case-sensitive.

    ", + "StringList$member": null + } + }, + "StringList": { + "base": null, + "refs": { + "MessageAttributeValue$StringListValues": "

    Not implemented. Reserved for future use.

    " + } + }, + "TooManyEntriesInBatchRequest": { + "base": "

    Batch request contains more number of entries than permissible.

    ", + "refs": { + } + }, + "UnsupportedOperation": { + "base": "

    Error code 400. Unsupported operation.

    ", + "refs": { + } + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/sqs/2012-11-05/examples-1.json b/vendor/github.com/aws/aws-sdk-go/models/apis/sqs/2012-11-05/examples-1.json new file mode 100644 index 000000000..ec813934f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/sqs/2012-11-05/examples-1.json @@ -0,0 +1,44 @@ +{ + "version": "1.0", + "examples": { + "CreateQueue": [ + { + "input": { + "QueueName": "MyQueue" + }, + "output": { + "QueueUrl": "https://queue.amazonaws.com/012345678910/MyQueue" + }, + "comments": { + "input": { + }, + "output": { + } + }, + "description": "The following operation creates an SQS queue named MyQueue.", + "id": "create-an-sqs-queue-1445915686197", + "title": "Create an SQS queue" + } + ], + "GetQueueUrl": [ + { + "input": { + "QueueName": "MyQueue", + "QueueOwnerAWSAccountId": "12345678910" + }, + "output": { + "QueueUrl": "https://queue.amazonaws.com/12345678910/MyQueue" + }, + "comments": { + "input": { + }, + "output": { + } + }, + "description": "The following example retrieves the queue ARN.", + "id": "retrieve-queue-attributes-from-an-sqs-queue-1445915930574", + "title": "Retrieve queue attributes from an SQS queue" + } + ] + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/sqs/2012-11-05/paginators-1.json b/vendor/github.com/aws/aws-sdk-go/models/apis/sqs/2012-11-05/paginators-1.json new file mode 100644 index 000000000..e7ac48a9f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/sqs/2012-11-05/paginators-1.json @@ -0,0 +1,7 @@ +{ + "pagination": { + "ListQueues": { + "result_key": "QueueUrls" + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/ssm/2014-11-06/api-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/ssm/2014-11-06/api-2.json new file mode 100644 index 000000000..a29b991bc --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/ssm/2014-11-06/api-2.json @@ -0,0 +1,1742 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2014-11-06", + "endpointPrefix":"ssm", + "jsonVersion":"1.1", + "protocol":"json", + "serviceAbbreviation":"Amazon SSM", + "serviceFullName":"Amazon Simple Systems Management Service", + "signatureVersion":"v4", + "targetPrefix":"AmazonSSM" + }, + "operations":{ + "AddTagsToResource":{ + "name":"AddTagsToResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AddTagsToResourceRequest"}, + "output":{"shape":"AddTagsToResourceResult"}, + "errors":[ + {"shape":"InvalidResourceType"}, + {"shape":"InvalidResourceId"}, + {"shape":"InternalServerError"} + ] + }, + "CancelCommand":{ + "name":"CancelCommand", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CancelCommandRequest"}, + "output":{"shape":"CancelCommandResult"}, + "errors":[ + {"shape":"InternalServerError"}, + {"shape":"InvalidCommandId"}, + {"shape":"InvalidInstanceId"}, + {"shape":"DuplicateInstanceId"} + ] + }, + "CreateActivation":{ + "name":"CreateActivation", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateActivationRequest"}, + "output":{"shape":"CreateActivationResult"}, + "errors":[ + {"shape":"InternalServerError"} + ] + }, + "CreateAssociation":{ + "name":"CreateAssociation", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateAssociationRequest"}, + "output":{"shape":"CreateAssociationResult"}, + "errors":[ + {"shape":"AssociationAlreadyExists"}, + {"shape":"AssociationLimitExceeded"}, + {"shape":"InternalServerError"}, + {"shape":"InvalidDocument"}, + {"shape":"InvalidInstanceId"}, + {"shape":"UnsupportedPlatformType"}, + {"shape":"InvalidParameters"} + ] + }, + "CreateAssociationBatch":{ + "name":"CreateAssociationBatch", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateAssociationBatchRequest"}, + "output":{"shape":"CreateAssociationBatchResult"}, + "errors":[ + {"shape":"InternalServerError"}, + {"shape":"InvalidDocument"}, + {"shape":"InvalidInstanceId"}, + {"shape":"InvalidParameters"}, + {"shape":"DuplicateInstanceId"}, + {"shape":"AssociationLimitExceeded"}, + {"shape":"UnsupportedPlatformType"} + ] + }, + "CreateDocument":{ + "name":"CreateDocument", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateDocumentRequest"}, + "output":{"shape":"CreateDocumentResult"}, + "errors":[ + {"shape":"DocumentAlreadyExists"}, + {"shape":"MaxDocumentSizeExceeded"}, + {"shape":"InternalServerError"}, + {"shape":"InvalidDocumentContent"}, + {"shape":"DocumentLimitExceeded"} + ] + }, + "DeleteActivation":{ + "name":"DeleteActivation", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteActivationRequest"}, + "output":{"shape":"DeleteActivationResult"}, + "errors":[ + {"shape":"InvalidActivationId"}, + {"shape":"InvalidActivation"}, + {"shape":"InternalServerError"} + ] + }, + "DeleteAssociation":{ + "name":"DeleteAssociation", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteAssociationRequest"}, + "output":{"shape":"DeleteAssociationResult"}, + "errors":[ + {"shape":"AssociationDoesNotExist"}, + {"shape":"InternalServerError"}, + {"shape":"InvalidDocument"}, + {"shape":"InvalidInstanceId"}, + {"shape":"TooManyUpdates"} + ] + }, + "DeleteDocument":{ + "name":"DeleteDocument", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteDocumentRequest"}, + "output":{"shape":"DeleteDocumentResult"}, + "errors":[ + {"shape":"InternalServerError"}, + {"shape":"InvalidDocument"}, + {"shape":"InvalidDocumentOperation"}, + {"shape":"AssociatedInstances"} + ] + }, + "DeregisterManagedInstance":{ + "name":"DeregisterManagedInstance", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeregisterManagedInstanceRequest"}, + "output":{"shape":"DeregisterManagedInstanceResult"}, + "errors":[ + {"shape":"InvalidInstanceId"}, + {"shape":"InternalServerError"} + ] + }, + "DescribeActivations":{ + "name":"DescribeActivations", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeActivationsRequest"}, + "output":{"shape":"DescribeActivationsResult"}, + "errors":[ + {"shape":"InvalidFilter"}, + {"shape":"InvalidNextToken"}, + {"shape":"InternalServerError"} + ] + }, + "DescribeAssociation":{ + "name":"DescribeAssociation", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeAssociationRequest"}, + "output":{"shape":"DescribeAssociationResult"}, + "errors":[ + {"shape":"AssociationDoesNotExist"}, + {"shape":"InternalServerError"}, + {"shape":"InvalidDocument"}, + {"shape":"InvalidInstanceId"} + ] + }, + "DescribeDocument":{ + "name":"DescribeDocument", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeDocumentRequest"}, + "output":{"shape":"DescribeDocumentResult"}, + "errors":[ + {"shape":"InternalServerError"}, + {"shape":"InvalidDocument"} + ] + }, + "DescribeDocumentPermission":{ + "name":"DescribeDocumentPermission", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeDocumentPermissionRequest"}, + "output":{"shape":"DescribeDocumentPermissionResponse"}, + "errors":[ + {"shape":"InternalServerError"}, + {"shape":"InvalidDocument"}, + {"shape":"InvalidPermissionType"} + ] + }, + "DescribeInstanceInformation":{ + "name":"DescribeInstanceInformation", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeInstanceInformationRequest"}, + "output":{"shape":"DescribeInstanceInformationResult"}, + "errors":[ + {"shape":"InternalServerError"}, + {"shape":"InvalidInstanceId"}, + {"shape":"InvalidNextToken"}, + {"shape":"InvalidInstanceInformationFilterValue"}, + {"shape":"InvalidFilterKey"} + ] + }, + "GetDocument":{ + "name":"GetDocument", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetDocumentRequest"}, + "output":{"shape":"GetDocumentResult"}, + "errors":[ + {"shape":"InternalServerError"}, + {"shape":"InvalidDocument"} + ] + }, + "ListAssociations":{ + "name":"ListAssociations", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListAssociationsRequest"}, + "output":{"shape":"ListAssociationsResult"}, + "errors":[ + {"shape":"InternalServerError"}, + {"shape":"InvalidNextToken"} + ] + }, + "ListCommandInvocations":{ + "name":"ListCommandInvocations", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListCommandInvocationsRequest"}, + "output":{"shape":"ListCommandInvocationsResult"}, + "errors":[ + {"shape":"InternalServerError"}, + {"shape":"InvalidCommandId"}, + {"shape":"InvalidInstanceId"}, + {"shape":"InvalidFilterKey"}, + {"shape":"InvalidNextToken"} + ] + }, + "ListCommands":{ + "name":"ListCommands", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListCommandsRequest"}, + "output":{"shape":"ListCommandsResult"}, + "errors":[ + {"shape":"InternalServerError"}, + {"shape":"InvalidCommandId"}, + {"shape":"InvalidInstanceId"}, + {"shape":"InvalidFilterKey"}, + {"shape":"InvalidNextToken"} + ] + }, + "ListDocuments":{ + "name":"ListDocuments", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListDocumentsRequest"}, + "output":{"shape":"ListDocumentsResult"}, + "errors":[ + {"shape":"InternalServerError"}, + {"shape":"InvalidNextToken"}, + {"shape":"InvalidFilterKey"} + ] + }, + "ListTagsForResource":{ + "name":"ListTagsForResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListTagsForResourceRequest"}, + "output":{"shape":"ListTagsForResourceResult"}, + "errors":[ + {"shape":"InvalidResourceType"}, + {"shape":"InvalidResourceId"}, + {"shape":"InternalServerError"} + ] + }, + "ModifyDocumentPermission":{ + "name":"ModifyDocumentPermission", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyDocumentPermissionRequest"}, + "output":{"shape":"ModifyDocumentPermissionResponse"}, + "errors":[ + {"shape":"InternalServerError"}, + {"shape":"InvalidDocument"}, + {"shape":"InvalidPermissionType"}, + {"shape":"DocumentPermissionLimit"}, + {"shape":"DocumentLimitExceeded"} + ] + }, + "RemoveTagsFromResource":{ + "name":"RemoveTagsFromResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RemoveTagsFromResourceRequest"}, + "output":{"shape":"RemoveTagsFromResourceResult"}, + "errors":[ + {"shape":"InvalidResourceType"}, + {"shape":"InvalidResourceId"}, + {"shape":"InternalServerError"} + ] + }, + "SendCommand":{ + "name":"SendCommand", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"SendCommandRequest"}, + "output":{"shape":"SendCommandResult"}, + "errors":[ + {"shape":"DuplicateInstanceId"}, + {"shape":"InternalServerError"}, + {"shape":"InvalidInstanceId"}, + {"shape":"InvalidDocument"}, + {"shape":"InvalidOutputFolder"}, + {"shape":"InvalidParameters"}, + {"shape":"UnsupportedPlatformType"}, + {"shape":"MaxDocumentSizeExceeded"} + ] + }, + "UpdateAssociationStatus":{ + "name":"UpdateAssociationStatus", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateAssociationStatusRequest"}, + "output":{"shape":"UpdateAssociationStatusResult"}, + "errors":[ + {"shape":"InternalServerError"}, + {"shape":"InvalidInstanceId"}, + {"shape":"InvalidDocument"}, + {"shape":"AssociationDoesNotExist"}, + {"shape":"StatusUnchanged"}, + {"shape":"TooManyUpdates"} + ] + }, + "UpdateManagedInstanceRole":{ + "name":"UpdateManagedInstanceRole", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateManagedInstanceRoleRequest"}, + "output":{"shape":"UpdateManagedInstanceRoleResult"}, + "errors":[ + {"shape":"InvalidInstanceId"}, + {"shape":"InternalServerError"} + ] + } + }, + "shapes":{ + "AccountId":{ + "type":"string", + "pattern":"(?i)all|[0-9]{12}" + }, + "AccountIdList":{ + "type":"list", + "member":{ + "shape":"AccountId", + "locationName":"AccountId" + }, + "max":20 + }, + "Activation":{ + "type":"structure", + "members":{ + "ActivationId":{"shape":"ActivationId"}, + "Description":{"shape":"ActivationDescription"}, + "DefaultInstanceName":{"shape":"DefaultInstanceName"}, + "IamRole":{"shape":"IamRole"}, + "RegistrationLimit":{"shape":"RegistrationLimit"}, + "RegistrationsCount":{"shape":"RegistrationsCount"}, + "ExpirationDate":{"shape":"ExpirationDate"}, + "Expired":{"shape":"Boolean"}, + "CreatedDate":{"shape":"CreatedDate"} + } + }, + "ActivationCode":{ + "type":"string", + "max":250, + "min":20 + }, + "ActivationDescription":{ + "type":"string", + "max":256, + "min":0 + }, + "ActivationId":{ + "type":"string", + "pattern":"^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$" + }, + "ActivationList":{ + "type":"list", + "member":{"shape":"Activation"} + }, + "AddTagsToResourceRequest":{ + "type":"structure", + "required":[ + "ResourceType", + "ResourceId", + "Tags" + ], + "members":{ + "ResourceType":{"shape":"ResourceTypeForTagging"}, + "ResourceId":{"shape":"ResourceId"}, + "Tags":{"shape":"TagList"} + } + }, + "AddTagsToResourceResult":{ + "type":"structure", + "members":{ + } + }, + "AssociatedInstances":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "Association":{ + "type":"structure", + "members":{ + "Name":{"shape":"DocumentName"}, + "InstanceId":{"shape":"InstanceId"} + } + }, + "AssociationAlreadyExists":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "AssociationDescription":{ + "type":"structure", + "members":{ + "Name":{"shape":"DocumentName"}, + "InstanceId":{"shape":"InstanceId"}, + "Date":{"shape":"DateTime"}, + "Status":{"shape":"AssociationStatus"}, + "Parameters":{"shape":"Parameters"} + } + }, + "AssociationDescriptionList":{ + "type":"list", + "member":{ + "shape":"AssociationDescription", + "locationName":"AssociationDescription" + } + }, + "AssociationDoesNotExist":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "AssociationFilter":{ + "type":"structure", + "required":[ + "key", + "value" + ], + "members":{ + "key":{"shape":"AssociationFilterKey"}, + "value":{"shape":"AssociationFilterValue"} + } + }, + "AssociationFilterKey":{ + "type":"string", + "enum":[ + "InstanceId", + "Name" + ] + }, + "AssociationFilterList":{ + "type":"list", + "member":{ + "shape":"AssociationFilter", + "locationName":"AssociationFilter" + }, + "min":1 + }, + "AssociationFilterValue":{ + "type":"string", + "min":1 + }, + "AssociationLimitExceeded":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "AssociationList":{ + "type":"list", + "member":{ + "shape":"Association", + "locationName":"Association" + } + }, + "AssociationStatus":{ + "type":"structure", + "required":[ + "Date", + "Name", + "Message" + ], + "members":{ + "Date":{"shape":"DateTime"}, + "Name":{"shape":"AssociationStatusName"}, + "Message":{"shape":"StatusMessage"}, + "AdditionalInfo":{"shape":"StatusAdditionalInfo"} + } + }, + "AssociationStatusName":{ + "type":"string", + "enum":[ + "Pending", + "Success", + "Failed" + ] + }, + "BatchErrorMessage":{"type":"string"}, + "Boolean":{"type":"boolean"}, + "CancelCommandRequest":{ + "type":"structure", + "required":["CommandId"], + "members":{ + "CommandId":{"shape":"CommandId"}, + "InstanceIds":{"shape":"InstanceIdList"} + } + }, + "CancelCommandResult":{ + "type":"structure", + "members":{ + } + }, + "Command":{ + "type":"structure", + "members":{ + "CommandId":{"shape":"CommandId"}, + "DocumentName":{"shape":"DocumentName"}, + "Comment":{"shape":"Comment"}, + "ExpiresAfter":{"shape":"DateTime"}, + "Parameters":{"shape":"Parameters"}, + "InstanceIds":{"shape":"InstanceIdList"}, + "RequestedDateTime":{"shape":"DateTime"}, + "Status":{"shape":"CommandStatus"}, + "OutputS3BucketName":{"shape":"S3BucketName"}, + "OutputS3KeyPrefix":{"shape":"S3KeyPrefix"} + } + }, + "CommandFilter":{ + "type":"structure", + "required":[ + "key", + "value" + ], + "members":{ + "key":{"shape":"CommandFilterKey"}, + "value":{"shape":"CommandFilterValue"} + } + }, + "CommandFilterKey":{ + "type":"string", + "enum":[ + "InvokedAfter", + "InvokedBefore", + "Status" + ] + }, + "CommandFilterList":{ + "type":"list", + "member":{"shape":"CommandFilter"}, + "max":3, + "min":1 + }, + "CommandFilterValue":{ + "type":"string", + "min":1 + }, + "CommandId":{ + "type":"string", + "max":36, + "min":36 + }, + "CommandInvocation":{ + "type":"structure", + "members":{ + "CommandId":{"shape":"CommandId"}, + "InstanceId":{"shape":"InstanceId"}, + "Comment":{"shape":"Comment"}, + "DocumentName":{"shape":"DocumentName"}, + "RequestedDateTime":{"shape":"DateTime"}, + "Status":{"shape":"CommandInvocationStatus"}, + "TraceOutput":{"shape":"InvocationTraceOutput"}, + "CommandPlugins":{"shape":"CommandPluginList"} + } + }, + "CommandInvocationList":{ + "type":"list", + "member":{"shape":"CommandInvocation"} + }, + "CommandInvocationStatus":{ + "type":"string", + "enum":[ + "Pending", + "InProgress", + "Cancelling", + "Success", + "TimedOut", + "Cancelled", + "Failed" + ] + }, + "CommandList":{ + "type":"list", + "member":{"shape":"Command"} + }, + "CommandMaxResults":{ + "type":"integer", + "max":50, + "min":1 + }, + "CommandPlugin":{ + "type":"structure", + "members":{ + "Name":{"shape":"CommandPluginName"}, + "Status":{"shape":"CommandPluginStatus"}, + "ResponseCode":{"shape":"ResponseCode"}, + "ResponseStartDateTime":{"shape":"DateTime"}, + "ResponseFinishDateTime":{"shape":"DateTime"}, + "Output":{"shape":"CommandPluginOutput"}, + "OutputS3BucketName":{"shape":"S3BucketName"}, + "OutputS3KeyPrefix":{"shape":"S3KeyPrefix"} + } + }, + "CommandPluginList":{ + "type":"list", + "member":{"shape":"CommandPlugin"} + }, + "CommandPluginName":{ + "type":"string", + "min":4 + }, + "CommandPluginOutput":{ + "type":"string", + "max":2500 + }, + "CommandPluginStatus":{ + "type":"string", + "enum":[ + "Pending", + "InProgress", + "Success", + "TimedOut", + "Cancelled", + "Failed" + ] + }, + "CommandStatus":{ + "type":"string", + "enum":[ + "Pending", + "InProgress", + "Cancelling", + "Success", + "TimedOut", + "Cancelled", + "Failed" + ] + }, + "Comment":{ + "type":"string", + "max":100 + }, + "ComputerName":{ + "type":"string", + "max":255, + "min":1 + }, + "CreateActivationRequest":{ + "type":"structure", + "required":["IamRole"], + "members":{ + "Description":{"shape":"ActivationDescription"}, + "DefaultInstanceName":{"shape":"DefaultInstanceName"}, + "IamRole":{"shape":"IamRole"}, + "RegistrationLimit":{ + "shape":"RegistrationLimit", + "box":true + }, + "ExpirationDate":{"shape":"ExpirationDate"} + } + }, + "CreateActivationResult":{ + "type":"structure", + "members":{ + "ActivationId":{"shape":"ActivationId"}, + "ActivationCode":{"shape":"ActivationCode"} + } + }, + "CreateAssociationBatchRequest":{ + "type":"structure", + "required":["Entries"], + "members":{ + "Entries":{"shape":"CreateAssociationBatchRequestEntries"} + } + }, + "CreateAssociationBatchRequestEntries":{ + "type":"list", + "member":{ + "shape":"CreateAssociationBatchRequestEntry", + "locationName":"entries" + } + }, + "CreateAssociationBatchRequestEntry":{ + "type":"structure", + "members":{ + "Name":{"shape":"DocumentName"}, + "InstanceId":{"shape":"InstanceId"}, + "Parameters":{"shape":"Parameters"} + } + }, + "CreateAssociationBatchResult":{ + "type":"structure", + "members":{ + "Successful":{"shape":"AssociationDescriptionList"}, + "Failed":{"shape":"FailedCreateAssociationList"} + } + }, + "CreateAssociationRequest":{ + "type":"structure", + "required":[ + "Name", + "InstanceId" + ], + "members":{ + "Name":{"shape":"DocumentName"}, + "InstanceId":{"shape":"InstanceId"}, + "Parameters":{"shape":"Parameters"} + } + }, + "CreateAssociationResult":{ + "type":"structure", + "members":{ + "AssociationDescription":{"shape":"AssociationDescription"} + } + }, + "CreateDocumentRequest":{ + "type":"structure", + "required":[ + "Content", + "Name" + ], + "members":{ + "Content":{"shape":"DocumentContent"}, + "Name":{"shape":"DocumentName"} + } + }, + "CreateDocumentResult":{ + "type":"structure", + "members":{ + "DocumentDescription":{"shape":"DocumentDescription"} + } + }, + "CreatedDate":{"type":"timestamp"}, + "DateTime":{"type":"timestamp"}, + "DefaultInstanceName":{ + "type":"string", + "max":256, + "min":0, + "pattern":"^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*)$" + }, + "DeleteActivationRequest":{ + "type":"structure", + "required":["ActivationId"], + "members":{ + "ActivationId":{"shape":"ActivationId"} + } + }, + "DeleteActivationResult":{ + "type":"structure", + "members":{ + } + }, + "DeleteAssociationRequest":{ + "type":"structure", + "required":[ + "Name", + "InstanceId" + ], + "members":{ + "Name":{"shape":"DocumentName"}, + "InstanceId":{"shape":"InstanceId"} + } + }, + "DeleteAssociationResult":{ + "type":"structure", + "members":{ + } + }, + "DeleteDocumentRequest":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{"shape":"DocumentName"} + } + }, + "DeleteDocumentResult":{ + "type":"structure", + "members":{ + } + }, + "DeregisterManagedInstanceRequest":{ + "type":"structure", + "required":["InstanceId"], + "members":{ + "InstanceId":{"shape":"ManagedInstanceId"} + } + }, + "DeregisterManagedInstanceResult":{ + "type":"structure", + "members":{ + } + }, + "DescribeActivationsFilter":{ + "type":"structure", + "members":{ + "FilterKey":{"shape":"DescribeActivationsFilterKeys"}, + "FilterValues":{"shape":"StringList"} + } + }, + "DescribeActivationsFilterKeys":{ + "type":"string", + "enum":[ + "ActivationIds", + "DefaultInstanceName", + "IamRole" + ] + }, + "DescribeActivationsFilterList":{ + "type":"list", + "member":{"shape":"DescribeActivationsFilter"} + }, + "DescribeActivationsRequest":{ + "type":"structure", + "members":{ + "Filters":{"shape":"DescribeActivationsFilterList"}, + "MaxResults":{ + "shape":"MaxResults", + "box":true + }, + "NextToken":{"shape":"NextToken"} + } + }, + "DescribeActivationsResult":{ + "type":"structure", + "members":{ + "ActivationList":{"shape":"ActivationList"}, + "NextToken":{"shape":"NextToken"} + } + }, + "DescribeAssociationRequest":{ + "type":"structure", + "required":[ + "Name", + "InstanceId" + ], + "members":{ + "Name":{"shape":"DocumentName"}, + "InstanceId":{"shape":"InstanceId"} + } + }, + "DescribeAssociationResult":{ + "type":"structure", + "members":{ + "AssociationDescription":{"shape":"AssociationDescription"} + } + }, + "DescribeDocumentPermissionRequest":{ + "type":"structure", + "required":[ + "Name", + "PermissionType" + ], + "members":{ + "Name":{"shape":"DocumentName"}, + "PermissionType":{"shape":"DocumentPermissionType"} + } + }, + "DescribeDocumentPermissionResponse":{ + "type":"structure", + "members":{ + "AccountIds":{"shape":"AccountIdList"} + } + }, + "DescribeDocumentRequest":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{"shape":"DocumentARN"} + } + }, + "DescribeDocumentResult":{ + "type":"structure", + "members":{ + "Document":{"shape":"DocumentDescription"} + } + }, + "DescribeInstanceInformationRequest":{ + "type":"structure", + "members":{ + "InstanceInformationFilterList":{"shape":"InstanceInformationFilterList"}, + "MaxResults":{ + "shape":"MaxResultsEC2Compatible", + "box":true + }, + "NextToken":{"shape":"NextToken"} + } + }, + "DescribeInstanceInformationResult":{ + "type":"structure", + "members":{ + "InstanceInformationList":{"shape":"InstanceInformationList"}, + "NextToken":{"shape":"NextToken"} + } + }, + "DescriptionInDocument":{"type":"string"}, + "DocumentARN":{ + "type":"string", + "pattern":"^[a-zA-Z0-9_\\-.:/]{3,128}$" + }, + "DocumentAlreadyExists":{ + "type":"structure", + "members":{ + "Message":{"shape":"String"} + }, + "exception":true + }, + "DocumentContent":{ + "type":"string", + "min":1 + }, + "DocumentDescription":{ + "type":"structure", + "members":{ + "Sha1":{"shape":"DocumentSha1"}, + "Hash":{"shape":"DocumentHash"}, + "HashType":{"shape":"DocumentHashType"}, + "Name":{"shape":"DocumentARN"}, + "Owner":{"shape":"DocumentOwner"}, + "CreatedDate":{"shape":"DateTime"}, + "Status":{"shape":"DocumentStatus"}, + "Description":{"shape":"DescriptionInDocument"}, + "Parameters":{"shape":"DocumentParameterList"}, + "PlatformTypes":{"shape":"PlatformTypeList"} + } + }, + "DocumentFilter":{ + "type":"structure", + "required":[ + "key", + "value" + ], + "members":{ + "key":{"shape":"DocumentFilterKey"}, + "value":{"shape":"DocumentFilterValue"} + } + }, + "DocumentFilterKey":{ + "type":"string", + "enum":[ + "Name", + "Owner", + "PlatformTypes" + ] + }, + "DocumentFilterList":{ + "type":"list", + "member":{ + "shape":"DocumentFilter", + "locationName":"DocumentFilter" + }, + "min":1 + }, + "DocumentFilterValue":{ + "type":"string", + "min":1 + }, + "DocumentHash":{ + "type":"string", + "max":256 + }, + "DocumentHashType":{ + "type":"string", + "enum":[ + "Sha256", + "Sha1" + ] + }, + "DocumentIdentifier":{ + "type":"structure", + "members":{ + "Name":{"shape":"DocumentARN"}, + "Owner":{"shape":"DocumentOwner"}, + "PlatformTypes":{"shape":"PlatformTypeList"} + } + }, + "DocumentIdentifierList":{ + "type":"list", + "member":{ + "shape":"DocumentIdentifier", + "locationName":"DocumentIdentifier" + } + }, + "DocumentLimitExceeded":{ + "type":"structure", + "members":{ + "Message":{"shape":"String"} + }, + "exception":true + }, + "DocumentName":{ + "type":"string", + "pattern":"^[a-zA-Z0-9_\\-.]{3,128}$" + }, + "DocumentOwner":{"type":"string"}, + "DocumentParameter":{ + "type":"structure", + "members":{ + "Name":{"shape":"DocumentParameterName"}, + "Type":{"shape":"DocumentParameterType"}, + "Description":{"shape":"DocumentParameterDescrption"}, + "DefaultValue":{"shape":"DocumentParameterDefaultValue"} + } + }, + "DocumentParameterDefaultValue":{"type":"string"}, + "DocumentParameterDescrption":{"type":"string"}, + "DocumentParameterList":{ + "type":"list", + "member":{ + "shape":"DocumentParameter", + "locationName":"DocumentParameter" + } + }, + "DocumentParameterName":{"type":"string"}, + "DocumentParameterType":{ + "type":"string", + "enum":[ + "String", + "StringList" + ] + }, + "DocumentPermissionLimit":{ + "type":"structure", + "members":{ + "Message":{"shape":"String"} + }, + "exception":true + }, + "DocumentPermissionType":{ + "type":"string", + "enum":["Share"] + }, + "DocumentSha1":{"type":"string"}, + "DocumentStatus":{ + "type":"string", + "enum":[ + "Creating", + "Active", + "Deleting" + ] + }, + "DuplicateInstanceId":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "ExpirationDate":{"type":"timestamp"}, + "FailedCreateAssociation":{ + "type":"structure", + "members":{ + "Entry":{"shape":"CreateAssociationBatchRequestEntry"}, + "Message":{"shape":"BatchErrorMessage"}, + "Fault":{"shape":"Fault"} + } + }, + "FailedCreateAssociationList":{ + "type":"list", + "member":{ + "shape":"FailedCreateAssociation", + "locationName":"FailedCreateAssociationEntry" + } + }, + "Fault":{ + "type":"string", + "enum":[ + "Client", + "Server", + "Unknown" + ] + }, + "GetDocumentRequest":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{"shape":"DocumentARN"} + } + }, + "GetDocumentResult":{ + "type":"structure", + "members":{ + "Name":{"shape":"DocumentARN"}, + "Content":{"shape":"DocumentContent"} + } + }, + "IPAddress":{ + "type":"string", + "max":46, + "min":1 + }, + "IamRole":{ + "type":"string", + "max":64 + }, + "InstanceId":{ + "type":"string", + "pattern":"(^i-(\\w{8}|\\w{17})$)|(^mi-\\w{17}$)" + }, + "InstanceIdList":{ + "type":"list", + "member":{"shape":"InstanceId"}, + "max":50, + "min":1 + }, + "InstanceInformation":{ + "type":"structure", + "members":{ + "InstanceId":{"shape":"InstanceId"}, + "PingStatus":{"shape":"PingStatus"}, + "LastPingDateTime":{ + "shape":"DateTime", + "box":true + }, + "AgentVersion":{"shape":"Version"}, + "IsLatestVersion":{ + "shape":"Boolean", + "box":true + }, + "PlatformType":{"shape":"PlatformType"}, + "PlatformName":{"shape":"String"}, + "PlatformVersion":{"shape":"String"}, + "ActivationId":{"shape":"ActivationId"}, + "IamRole":{"shape":"IamRole"}, + "RegistrationDate":{ + "shape":"DateTime", + "box":true + }, + "ResourceType":{"shape":"ResourceType"}, + "Name":{"shape":"String"}, + "IPAddress":{"shape":"IPAddress"}, + "ComputerName":{"shape":"ComputerName"} + } + }, + "InstanceInformationFilter":{ + "type":"structure", + "required":[ + "key", + "valueSet" + ], + "members":{ + "key":{"shape":"InstanceInformationFilterKey"}, + "valueSet":{"shape":"InstanceInformationFilterValueSet"} + } + }, + "InstanceInformationFilterKey":{ + "type":"string", + "enum":[ + "InstanceIds", + "AgentVersion", + "PingStatus", + "PlatformTypes", + "ActivationIds", + "IamRole", + "ResourceType" + ] + }, + "InstanceInformationFilterList":{ + "type":"list", + "member":{ + "shape":"InstanceInformationFilter", + "locationName":"InstanceInformationFilter" + }, + "min":1 + }, + "InstanceInformationFilterValue":{ + "type":"string", + "min":1 + }, + "InstanceInformationFilterValueSet":{ + "type":"list", + "member":{ + "shape":"InstanceInformationFilterValue", + "locationName":"InstanceInformationFilterValue" + }, + "max":100, + "min":1 + }, + "InstanceInformationList":{ + "type":"list", + "member":{ + "shape":"InstanceInformation", + "locationName":"InstanceInformation" + } + }, + "InternalServerError":{ + "type":"structure", + "members":{ + "Message":{"shape":"String"} + }, + "exception":true + }, + "InvalidActivation":{ + "type":"structure", + "members":{ + "Message":{"shape":"String"} + }, + "exception":true + }, + "InvalidActivationId":{ + "type":"structure", + "members":{ + "Message":{"shape":"String"} + }, + "exception":true + }, + "InvalidCommandId":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InvalidDocument":{ + "type":"structure", + "members":{ + "Message":{"shape":"String"} + }, + "exception":true + }, + "InvalidDocumentContent":{ + "type":"structure", + "members":{ + "Message":{"shape":"String"} + }, + "exception":true + }, + "InvalidDocumentOperation":{ + "type":"structure", + "members":{ + "Message":{"shape":"String"} + }, + "exception":true + }, + "InvalidFilter":{ + "type":"structure", + "members":{ + "Message":{"shape":"String"} + }, + "exception":true + }, + "InvalidFilterKey":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InvalidInstanceId":{ + "type":"structure", + "members":{ + "Message":{"shape":"String"} + }, + "exception":true + }, + "InvalidInstanceInformationFilterValue":{ + "type":"structure", + "members":{ + "message":{"shape":"String"} + }, + "exception":true + }, + "InvalidNextToken":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InvalidOutputFolder":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InvalidParameters":{ + "type":"structure", + "members":{ + "Message":{"shape":"String"} + }, + "exception":true + }, + "InvalidPermissionType":{ + "type":"structure", + "members":{ + "Message":{"shape":"String"} + }, + "exception":true + }, + "InvalidResourceId":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InvalidResourceType":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InvocationTraceOutput":{ + "type":"string", + "max":2500 + }, + "KeyList":{ + "type":"list", + "member":{"shape":"TagKey"} + }, + "ListAssociationsRequest":{ + "type":"structure", + "required":["AssociationFilterList"], + "members":{ + "AssociationFilterList":{"shape":"AssociationFilterList"}, + "MaxResults":{ + "shape":"MaxResults", + "box":true + }, + "NextToken":{"shape":"NextToken"} + } + }, + "ListAssociationsResult":{ + "type":"structure", + "members":{ + "Associations":{"shape":"AssociationList"}, + "NextToken":{"shape":"NextToken"} + } + }, + "ListCommandInvocationsRequest":{ + "type":"structure", + "members":{ + "CommandId":{"shape":"CommandId"}, + "InstanceId":{"shape":"InstanceId"}, + "MaxResults":{ + "shape":"CommandMaxResults", + "box":true + }, + "NextToken":{"shape":"NextToken"}, + "Filters":{"shape":"CommandFilterList"}, + "Details":{"shape":"Boolean"} + } + }, + "ListCommandInvocationsResult":{ + "type":"structure", + "members":{ + "CommandInvocations":{"shape":"CommandInvocationList"}, + "NextToken":{"shape":"NextToken"} + } + }, + "ListCommandsRequest":{ + "type":"structure", + "members":{ + "CommandId":{"shape":"CommandId"}, + "InstanceId":{"shape":"InstanceId"}, + "MaxResults":{ + "shape":"CommandMaxResults", + "box":true + }, + "NextToken":{"shape":"NextToken"}, + "Filters":{"shape":"CommandFilterList"} + } + }, + "ListCommandsResult":{ + "type":"structure", + "members":{ + "Commands":{"shape":"CommandList"}, + "NextToken":{"shape":"NextToken"} + } + }, + "ListDocumentsRequest":{ + "type":"structure", + "members":{ + "DocumentFilterList":{"shape":"DocumentFilterList"}, + "MaxResults":{ + "shape":"MaxResults", + "box":true + }, + "NextToken":{"shape":"NextToken"} + } + }, + "ListDocumentsResult":{ + "type":"structure", + "members":{ + "DocumentIdentifiers":{"shape":"DocumentIdentifierList"}, + "NextToken":{"shape":"NextToken"} + } + }, + "ListTagsForResourceRequest":{ + "type":"structure", + "required":[ + "ResourceType", + "ResourceId" + ], + "members":{ + "ResourceType":{"shape":"ResourceTypeForTagging"}, + "ResourceId":{"shape":"ResourceId"} + } + }, + "ListTagsForResourceResult":{ + "type":"structure", + "members":{ + "TagList":{"shape":"TagList"} + } + }, + "ManagedInstanceId":{ + "type":"string", + "pattern":"^mi-[0-9a-f]{17}$" + }, + "MaxDocumentSizeExceeded":{ + "type":"structure", + "members":{ + "Message":{"shape":"String"} + }, + "exception":true + }, + "MaxResults":{ + "type":"integer", + "max":50, + "min":1 + }, + "MaxResultsEC2Compatible":{ + "type":"integer", + "max":50, + "min":5 + }, + "ModifyDocumentPermissionRequest":{ + "type":"structure", + "required":[ + "Name", + "PermissionType" + ], + "members":{ + "Name":{"shape":"DocumentName"}, + "PermissionType":{"shape":"DocumentPermissionType"}, + "AccountIdsToAdd":{"shape":"AccountIdList"}, + "AccountIdsToRemove":{"shape":"AccountIdList"} + } + }, + "ModifyDocumentPermissionResponse":{ + "type":"structure", + "members":{ + } + }, + "NextToken":{"type":"string"}, + "ParameterName":{"type":"string"}, + "ParameterValue":{"type":"string"}, + "ParameterValueList":{ + "type":"list", + "member":{"shape":"ParameterValue"} + }, + "Parameters":{ + "type":"map", + "key":{"shape":"ParameterName"}, + "value":{"shape":"ParameterValueList"} + }, + "PingStatus":{ + "type":"string", + "enum":[ + "Online", + "ConnectionLost", + "Inactive" + ] + }, + "PlatformType":{ + "type":"string", + "enum":[ + "Windows", + "Linux" + ] + }, + "PlatformTypeList":{ + "type":"list", + "member":{ + "shape":"PlatformType", + "locationName":"PlatformType" + } + }, + "RegistrationLimit":{ + "type":"integer", + "max":1000, + "min":1 + }, + "RegistrationsCount":{ + "type":"integer", + "max":1000, + "min":1 + }, + "RemoveTagsFromResourceRequest":{ + "type":"structure", + "required":[ + "ResourceType", + "ResourceId", + "TagKeys" + ], + "members":{ + "ResourceType":{"shape":"ResourceTypeForTagging"}, + "ResourceId":{"shape":"ResourceId"}, + "TagKeys":{"shape":"KeyList"} + } + }, + "RemoveTagsFromResourceResult":{ + "type":"structure", + "members":{ + } + }, + "ResourceId":{ + "type":"string", + "pattern":"^mi-[0-9a-f]{17}$" + }, + "ResourceType":{ + "type":"string", + "enum":[ + "ManagedInstance", + "Document", + "EC2Instance" + ] + }, + "ResourceTypeForTagging":{ + "type":"string", + "enum":["ManagedInstance"] + }, + "ResponseCode":{"type":"integer"}, + "S3BucketName":{ + "type":"string", + "max":63, + "min":3 + }, + "S3KeyPrefix":{ + "type":"string", + "max":500 + }, + "SendCommandRequest":{ + "type":"structure", + "required":[ + "InstanceIds", + "DocumentName" + ], + "members":{ + "InstanceIds":{"shape":"InstanceIdList"}, + "DocumentName":{"shape":"DocumentARN"}, + "DocumentHash":{"shape":"DocumentHash"}, + "DocumentHashType":{"shape":"DocumentHashType"}, + "TimeoutSeconds":{ + "shape":"TimeoutSeconds", + "box":true + }, + "Comment":{"shape":"Comment"}, + "Parameters":{"shape":"Parameters"}, + "OutputS3BucketName":{"shape":"S3BucketName"}, + "OutputS3KeyPrefix":{"shape":"S3KeyPrefix"} + } + }, + "SendCommandResult":{ + "type":"structure", + "members":{ + "Command":{"shape":"Command"} + } + }, + "StatusAdditionalInfo":{ + "type":"string", + "max":1024 + }, + "StatusMessage":{ + "type":"string", + "max":1024 + }, + "StatusUnchanged":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "String":{"type":"string"}, + "StringList":{ + "type":"list", + "member":{"shape":"String"} + }, + "Tag":{ + "type":"structure", + "required":[ + "Key", + "Value" + ], + "members":{ + "Key":{"shape":"TagKey"}, + "Value":{"shape":"TagValue"} + } + }, + "TagKey":{ + "type":"string", + "max":128, + "min":1, + "pattern":"^(?!^(?i)aws:)(?=^[\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*$).*$" + }, + "TagList":{ + "type":"list", + "member":{"shape":"Tag"} + }, + "TagValue":{ + "type":"string", + "max":256, + "min":1, + "pattern":"^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*)$" + }, + "TimeoutSeconds":{ + "type":"integer", + "max":2592000, + "min":30 + }, + "TooManyUpdates":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "UnsupportedPlatformType":{ + "type":"structure", + "members":{ + "Message":{"shape":"String"} + }, + "exception":true + }, + "UpdateAssociationStatusRequest":{ + "type":"structure", + "required":[ + "Name", + "InstanceId", + "AssociationStatus" + ], + "members":{ + "Name":{"shape":"DocumentName"}, + "InstanceId":{"shape":"InstanceId"}, + "AssociationStatus":{"shape":"AssociationStatus"} + } + }, + "UpdateAssociationStatusResult":{ + "type":"structure", + "members":{ + "AssociationDescription":{"shape":"AssociationDescription"} + } + }, + "UpdateManagedInstanceRoleRequest":{ + "type":"structure", + "required":[ + "InstanceId", + "IamRole" + ], + "members":{ + "InstanceId":{"shape":"ManagedInstanceId"}, + "IamRole":{"shape":"IamRole"} + } + }, + "UpdateManagedInstanceRoleResult":{ + "type":"structure", + "members":{ + } + }, + "Version":{ + "type":"string", + "pattern":"^[0-9]{1,6}(\\.[0-9]{1,6}){2,3}$" + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/ssm/2014-11-06/docs-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/ssm/2014-11-06/docs-2.json new file mode 100644 index 000000000..64d7b0e2c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/ssm/2014-11-06/docs-2.json @@ -0,0 +1,1251 @@ +{ + "version": "2.0", + "service": "

    This is the Amazon Simple Systems Manager (SSM) API Reference. SSM enables you to remotely manage the configuration of your on-premises servers and virtual machines (VMs) and your Amazon EC2 instances using scripts, commands, or the Amazon EC2 console. SSM includes an on-demand solution called Amazon EC2 Run Command and a lightweight instance configuration solution called SSM Config.

    This references is intended to be used with the EC2 Run Command User Guide for Linux or Windows.

    You must register your on-premises servers and VMs through an activation process before you can configure them using Run Command. Registered servers and VMs are called managed instances. For more information, see Setting Up Run Command On Managed Instances (On-Premises Servers and VMs) on Linux or Setting Up Run Command On Managed Instances (On-Premises Servers and VMs) on Windows.

    Run Command

    Run Command provides an on-demand experience for executing commands. You can use pre-defined SSM documents to perform the actions listed later in this section, or you can create your own documents. With these documents, you can remotely configure your instances by sending commands using the Commands page in the Amazon EC2 console, AWS Tools for Windows PowerShell, the AWS CLI, or AWS SDKs.

    Run Command reports the status of the command execution for each instance targeted by a command. You can also audit the command execution to understand who executed commands, when, and what changes were made. By switching between different SSM documents, you can quickly configure your instances with different types of commands. To get started with Run Command, verify that your environment meets the prerequisites for remotely running commands on EC2 instances (Linux or Windows).

    SSM Config

    SSM Config is a lightweight instance configuration solution. SSM Config is currently only available for Windows instances. With SSM Config, you can specify a setup configuration for your instances. SSM Config is similar to EC2 User Data, which is another way of running one-time scripts or applying settings during instance launch. SSM Config is an extension of this capability. Using SSM documents, you can specify which actions the system should perform on your instances, including which applications to install, which AWS Directory Service directory to join, which Microsoft PowerShell modules to install, etc. If an instance is missing one or more of these configurations, the system makes those changes. By default, the system checks every five minutes to see if there is a new configuration to apply as defined in a new SSM document. If so, the system updates the instances accordingly. In this way, you can remotely maintain a consistent configuration baseline on your instances. SSM Config is available using the AWS CLI or the AWS Tools for Windows PowerShell. For more information, see Managing Windows Instance Configuration.

    SSM Config and Run Command include the following pre-defined documents.

    Linux

    • AWS-RunShellScript to run shell scripts

    • AWS-UpdateSSMAgent to update the Amazon SSM agent

    Windows

    • AWS-JoinDirectoryServiceDomain to join an AWS Directory

    • AWS-RunPowerShellScript to run PowerShell commands or scripts

    • AWS-UpdateEC2Config to update the EC2Config service

    • AWS-ConfigureWindowsUpdate to configure Windows Update settings

    • AWS-InstallApplication to install, repair, or uninstall software using an MSI package

    • AWS-InstallPowerShellModule to install PowerShell modules

    • AWS-ConfigureCloudWatch to configure Amazon CloudWatch Logs to monitor applications and systems

    • AWS-ListWindowsInventory to collect information about an EC2 instance running in Windows.

    • AWS-FindWindowsUpdates to scan an instance and determines which updates are missing.

    • AWS-InstallMissingWindowsUpdates to install missing updates on your EC2 instance.

    • AWS-InstallSpecificWindowsUpdates to install one or more specific updates.

    The commands or scripts specified in SSM documents run with administrative privilege on your instances because the Amazon SSM agent runs as root on Linux and the EC2Config service runs in the Local System account on Windows. If a user has permission to execute any of the pre-defined SSM documents (any document that begins with AWS-*) then that user also has administrator access to the instance. Delegate access to Run Command and SSM Config judiciously. This becomes extremely important if you create your own SSM documents. Amazon Web Services does not provide guidance about how to create secure SSM documents. You create SSM documents and delegate access to Run Command at your own risk. As a security best practice, we recommend that you assign access to \"AWS-*\" documents, especially the AWS-RunShellScript document on Linux and the AWS-RunPowerShellScript document on Windows, to trusted administrators only. You can create SSM documents for specific tasks and delegate access to non-administrators.

    For information about creating and sharing SSM documents, see the following topics in the SSM User Guide:

    ", + "operations": { + "AddTagsToResource": "

    Adds or overwrites one or more tags for the specified resource. Tags are metadata that you assign to your managed instances. Tags enable you to categorize your managed instances in different ways, for example, by purpose, owner, or environment. Each tag consists of a key and an optional value, both of which you define. For example, you could define a set of tags for your account's managed instances that helps you track each instance's owner and stack level. For example: Key=Owner and Value=DbAdmin, SysAdmin, or Dev. Or Key=Stack and Value=Production, Pre-Production, or Test. Each resource can have a maximum of 10 tags.

    We recommend that you devise a set of tag keys that meets your needs for each resource type. Using a consistent set of tag keys makes it easier for you to manage your resources. You can search and filter the resources based on the tags you add. Tags don't have any semantic meaning to Amazon EC2 and are interpreted strictly as a string of characters.

    For more information about tags, see Tagging Your Amazon EC2 Resources in the Amazon EC2 User Guide.

    ", + "CancelCommand": "

    Attempts to cancel the command specified by the Command ID. There is no guarantee that the command will be terminated and the underlying process stopped.

    ", + "CreateActivation": "

    Registers your on-premises server or virtual machine with Amazon EC2 so that you can manage these resources using Run Command. An on-premises server or virtual machine that has been registered with EC2 is called a managed instance. For more information about activations, see Setting Up Managed Instances (Linux) or Setting Up Managed Instances (Windows) in the Amazon EC2 User Guide.

    ", + "CreateAssociation": "

    Associates the specified SSM document with the specified instance.

    When you associate an SSM document with an instance, the configuration agent on the instance processes the document and configures the instance as specified.

    If you associate a document with an instance that already has an associated document, the system throws the AssociationAlreadyExists exception.

    ", + "CreateAssociationBatch": "

    Associates the specified SSM document with the specified instances.

    When you associate an SSM document with an instance, the configuration agent on the instance processes the document and configures the instance as specified.

    If you associate a document with an instance that already has an associated document, the system throws the AssociationAlreadyExists exception.

    ", + "CreateDocument": "

    Creates an SSM document.

    After you create an SSM document, you can use CreateAssociation to associate it with one or more running instances.

    ", + "DeleteActivation": "

    Deletes an activation. You are not required to delete an activation. If you delete an activation, you can no longer use it to register additional managed instances. Deleting an activation does not de-register managed instances. You must manually de-register managed instances.

    ", + "DeleteAssociation": "

    Disassociates the specified SSM document from the specified instance.

    When you disassociate an SSM document from an instance, it does not change the configuration of the instance. To change the configuration state of an instance after you disassociate a document, you must create a new document with the desired configuration and associate it with the instance.

    ", + "DeleteDocument": "

    Deletes the SSM document and all instance associations to the document.

    Before you delete the SSM document, we recommend that you use DeleteAssociation to disassociate all instances that are associated with the document.

    ", + "DeregisterManagedInstance": "

    Removes the server or virtual machine from the list of registered servers. You can reregister the instance again at any time. If you don’t plan to use Run Command on the server, we suggest uninstalling the SSM agent first.

    ", + "DescribeActivations": "

    Details about the activation, including: the date and time the activation was created, the expiration date, the IAM role assigned to the instances in the activation, and the number of instances activated by this registration.

    ", + "DescribeAssociation": "

    Describes the associations for the specified SSM document or instance.

    ", + "DescribeDocument": "

    Describes the specified SSM document.

    ", + "DescribeDocumentPermission": "

    Describes the permissions for an SSM document. If you created the document, you are the owner. If a document is shared, it can either be shared privately (by specifying a user’s AWS account ID) or publicly (All).

    ", + "DescribeInstanceInformation": "

    Describes one or more of your instances. You can use this to get information about instances like the operating system platform, the SSM agent version, status etc. If you specify one or more instance IDs, it returns information for those instances. If you do not specify instance IDs, it returns information for all your instances. If you specify an instance ID that is not valid or an instance that you do not own, you receive an error.

    ", + "GetDocument": "

    Gets the contents of the specified SSM document.

    ", + "ListAssociations": "

    Lists the associations for the specified SSM document or instance.

    ", + "ListCommandInvocations": "

    An invocation is copy of a command sent to a specific instance. A command can apply to one or more instances. A command invocation applies to one instance. For example, if a user executes SendCommand against three instances, then a command invocation is created for each requested instance ID. ListCommandInvocations provide status about command execution.

    ", + "ListCommands": "

    Lists the commands requested by users of the AWS account.

    ", + "ListDocuments": "

    Describes one or more of your SSM documents.

    ", + "ListTagsForResource": "

    Returns a list of the tags assigned to the specified resource.

    ", + "ModifyDocumentPermission": "

    Share a document publicly or privately. If you share a document privately, you must specify the AWS user account IDs for those people who can use the document. If you share a document publicly, you must specify All as the account ID.

    ", + "RemoveTagsFromResource": "

    Removes all tags from the specified resource.

    ", + "SendCommand": "

    Executes commands on one or more remote instances.

    ", + "UpdateAssociationStatus": "

    Updates the status of the SSM document associated with the specified instance.

    ", + "UpdateManagedInstanceRole": "

    Assigns or changes an Amazon Identity and Access Management (IAM) role to the managed instance.

    " + }, + "shapes": { + "AccountId": { + "base": null, + "refs": { + "AccountIdList$member": null + } + }, + "AccountIdList": { + "base": null, + "refs": { + "DescribeDocumentPermissionResponse$AccountIds": "

    The account IDs that have permission to use this document. The ID can be either an AWS account or All.

    ", + "ModifyDocumentPermissionRequest$AccountIdsToAdd": "

    The AWS user accounts that should have access to the document. The account IDs can either be a group of account IDs or All.

    ", + "ModifyDocumentPermissionRequest$AccountIdsToRemove": "

    The AWS user accounts that should no longer have access to the document. The AWS user account can either be a group of account IDs or All. This action has a higher priority than AccountIdsToAdd. If you specify an account ID to add and the same ID to remove, the system removes access to the document.

    " + } + }, + "Activation": { + "base": "

    An activation registers one or more on-premises servers or virtual machines (VMs) with AWS so that you can configure those servers or VMs using Run Command. A server or VM that has been registered with AWS is called a managed instance.

    ", + "refs": { + "ActivationList$member": null + } + }, + "ActivationCode": { + "base": null, + "refs": { + "CreateActivationResult$ActivationCode": "

    The code the system generates when it processes the activation. The activation code functions like a password to validate the activation ID.

    " + } + }, + "ActivationDescription": { + "base": null, + "refs": { + "Activation$Description": "

    A user defined description of the activation.

    ", + "CreateActivationRequest$Description": "

    A user-defined description of the resource that you want to register with Amazon EC2.

    " + } + }, + "ActivationId": { + "base": null, + "refs": { + "Activation$ActivationId": "

    The ID created by SSM when you submitted the activation.

    ", + "CreateActivationResult$ActivationId": "

    The ID number generated by the system when it processed the activation. The activation ID functions like a user name.

    ", + "DeleteActivationRequest$ActivationId": "

    The ID of the activation that you want to delete.

    ", + "InstanceInformation$ActivationId": "

    The activation ID created by SSM when the server or VM was registered.

    " + } + }, + "ActivationList": { + "base": null, + "refs": { + "DescribeActivationsResult$ActivationList": "

    A list of activations for your AWS account.

    " + } + }, + "AddTagsToResourceRequest": { + "base": null, + "refs": { + } + }, + "AddTagsToResourceResult": { + "base": null, + "refs": { + } + }, + "AssociatedInstances": { + "base": "

    You must disassociate an SSM document from all instances before you can delete it.

    ", + "refs": { + } + }, + "Association": { + "base": "

    Describes an association of an SSM document and an instance.

    ", + "refs": { + "AssociationList$member": null + } + }, + "AssociationAlreadyExists": { + "base": "

    The specified association already exists.

    ", + "refs": { + } + }, + "AssociationDescription": { + "base": "

    Describes the parameters for a document.

    ", + "refs": { + "AssociationDescriptionList$member": null, + "CreateAssociationResult$AssociationDescription": "

    Information about the association.

    ", + "DescribeAssociationResult$AssociationDescription": "

    Information about the association.

    ", + "UpdateAssociationStatusResult$AssociationDescription": "

    Information about the association.

    " + } + }, + "AssociationDescriptionList": { + "base": null, + "refs": { + "CreateAssociationBatchResult$Successful": "

    Information about the associations that succeeded.

    " + } + }, + "AssociationDoesNotExist": { + "base": "

    The specified association does not exist.

    ", + "refs": { + } + }, + "AssociationFilter": { + "base": "

    Describes a filter.

    ", + "refs": { + "AssociationFilterList$member": null + } + }, + "AssociationFilterKey": { + "base": null, + "refs": { + "AssociationFilter$key": "

    The name of the filter.

    " + } + }, + "AssociationFilterList": { + "base": null, + "refs": { + "ListAssociationsRequest$AssociationFilterList": "

    One or more filters. Use a filter to return a more specific list of results.

    " + } + }, + "AssociationFilterValue": { + "base": null, + "refs": { + "AssociationFilter$value": "

    The filter value.

    " + } + }, + "AssociationLimitExceeded": { + "base": "

    You can have at most 2,000 active associations.

    ", + "refs": { + } + }, + "AssociationList": { + "base": null, + "refs": { + "ListAssociationsResult$Associations": "

    The associations.

    " + } + }, + "AssociationStatus": { + "base": "

    Describes an association status.

    ", + "refs": { + "AssociationDescription$Status": "

    The association status.

    ", + "UpdateAssociationStatusRequest$AssociationStatus": "

    The association status.

    " + } + }, + "AssociationStatusName": { + "base": null, + "refs": { + "AssociationStatus$Name": "

    The status.

    " + } + }, + "BatchErrorMessage": { + "base": null, + "refs": { + "FailedCreateAssociation$Message": "

    A description of the failure.

    " + } + }, + "Boolean": { + "base": null, + "refs": { + "Activation$Expired": "

    Whether or not the activation is expired.

    ", + "InstanceInformation$IsLatestVersion": "

    Indicates whether latest version of the SSM agent is running on your instance.

    ", + "ListCommandInvocationsRequest$Details": "

    (Optional) If set this returns the response of the command executions and any command output. By default this is set to False.

    " + } + }, + "CancelCommandRequest": { + "base": "

    ", + "refs": { + } + }, + "CancelCommandResult": { + "base": "

    Whether or not the command was successfully canceled. There is no guarantee that a request can be canceled.

    ", + "refs": { + } + }, + "Command": { + "base": "

    Describes a command request.

    ", + "refs": { + "CommandList$member": null, + "SendCommandResult$Command": "

    The request as it was received by SSM. Also provides the command ID which can be used future references to this request.

    " + } + }, + "CommandFilter": { + "base": "

    Describes a command filter.

    ", + "refs": { + "CommandFilterList$member": null + } + }, + "CommandFilterKey": { + "base": null, + "refs": { + "CommandFilter$key": "

    The name of the filter. For example, requested date and time.

    " + } + }, + "CommandFilterList": { + "base": null, + "refs": { + "ListCommandInvocationsRequest$Filters": "

    (Optional) One or more filters. Use a filter to return a more specific list of results.

    ", + "ListCommandsRequest$Filters": "

    (Optional) One or more filters. Use a filter to return a more specific list of results.

    " + } + }, + "CommandFilterValue": { + "base": null, + "refs": { + "CommandFilter$value": "

    The filter value. For example: June 30, 2015.

    " + } + }, + "CommandId": { + "base": null, + "refs": { + "CancelCommandRequest$CommandId": "

    The ID of the command you want to cancel.

    ", + "Command$CommandId": "

    A unique identifier for this command.

    ", + "CommandInvocation$CommandId": "

    The command against which this invocation was requested.

    ", + "ListCommandInvocationsRequest$CommandId": "

    (Optional) The invocations for a specific command ID.

    ", + "ListCommandsRequest$CommandId": "

    (Optional) If provided, lists only the specified command.

    " + } + }, + "CommandInvocation": { + "base": "

    An invocation is copy of a command sent to a specific instance. A command can apply to one or more instances. A command invocation applies to one instance. For example, if a user executes SendCommand against three instances, then a command invocation is created for each requested instance ID. A command invocation returns status and detail information about a command you executed.

    ", + "refs": { + "CommandInvocationList$member": null + } + }, + "CommandInvocationList": { + "base": null, + "refs": { + "ListCommandInvocationsResult$CommandInvocations": "

    (Optional) A list of all invocations.

    " + } + }, + "CommandInvocationStatus": { + "base": null, + "refs": { + "CommandInvocation$Status": "

    Whether or not the invocation succeeded, failed, or is pending.

    " + } + }, + "CommandList": { + "base": null, + "refs": { + "ListCommandsResult$Commands": "

    (Optional) The list of commands requested by the user.

    " + } + }, + "CommandMaxResults": { + "base": null, + "refs": { + "ListCommandInvocationsRequest$MaxResults": "

    (Optional) The maximum number of items to return for this call. The call also returns a token that you can specify in a subsequent call to get the next set of results.

    ", + "ListCommandsRequest$MaxResults": "

    (Optional) The maximum number of items to return for this call. The call also returns a token that you can specify in a subsequent call to get the next set of results.

    " + } + }, + "CommandPlugin": { + "base": "

    Describes plugin details.

    ", + "refs": { + "CommandPluginList$member": null + } + }, + "CommandPluginList": { + "base": null, + "refs": { + "CommandInvocation$CommandPlugins": null + } + }, + "CommandPluginName": { + "base": null, + "refs": { + "CommandPlugin$Name": "

    The name of the plugin. Must be one of the following: aws:updateAgent, aws:domainjoin, aws:applications, aws:runPowerShellScript, aws:psmodule, aws:cloudWatch, aws:runShellScript, or aws:updateSSMAgent.

    " + } + }, + "CommandPluginOutput": { + "base": null, + "refs": { + "CommandPlugin$Output": "

    Output of the plugin execution.

    " + } + }, + "CommandPluginStatus": { + "base": null, + "refs": { + "CommandPlugin$Status": "

    The status of this plugin. You can execute a document with multiple plugins.

    " + } + }, + "CommandStatus": { + "base": null, + "refs": { + "Command$Status": "

    The status of the command.

    " + } + }, + "Comment": { + "base": null, + "refs": { + "Command$Comment": "

    User-specified information about the command, such as a brief description of what the command should do.

    ", + "CommandInvocation$Comment": "

    User-specified information about the command, such as a brief description of what the command should do.

    ", + "SendCommandRequest$Comment": "

    User-specified information about the command, such as a brief description of what the command should do.

    " + } + }, + "ComputerName": { + "base": null, + "refs": { + "InstanceInformation$ComputerName": "

    The fully qualified host name of the managed instance.

    " + } + }, + "CreateActivationRequest": { + "base": null, + "refs": { + } + }, + "CreateActivationResult": { + "base": null, + "refs": { + } + }, + "CreateAssociationBatchRequest": { + "base": null, + "refs": { + } + }, + "CreateAssociationBatchRequestEntries": { + "base": null, + "refs": { + "CreateAssociationBatchRequest$Entries": "

    One or more associations.

    " + } + }, + "CreateAssociationBatchRequestEntry": { + "base": "

    Describes the association of an SSM document and an instance.

    ", + "refs": { + "CreateAssociationBatchRequestEntries$member": null, + "FailedCreateAssociation$Entry": "

    The association.

    " + } + }, + "CreateAssociationBatchResult": { + "base": null, + "refs": { + } + }, + "CreateAssociationRequest": { + "base": null, + "refs": { + } + }, + "CreateAssociationResult": { + "base": null, + "refs": { + } + }, + "CreateDocumentRequest": { + "base": null, + "refs": { + } + }, + "CreateDocumentResult": { + "base": null, + "refs": { + } + }, + "CreatedDate": { + "base": null, + "refs": { + "Activation$CreatedDate": "

    The date the activation was created.

    " + } + }, + "DateTime": { + "base": null, + "refs": { + "AssociationDescription$Date": "

    The date when the association was made.

    ", + "AssociationStatus$Date": "

    The date when the status changed.

    ", + "Command$ExpiresAfter": "

    If this time is reached and the command has not already started executing, it will not execute. Calculated based on the ExpiresAfter user input provided as part of the SendCommand API.

    ", + "Command$RequestedDateTime": "

    The date and time the command was requested.

    ", + "CommandInvocation$RequestedDateTime": "

    The time and date the request was sent to this instance.

    ", + "CommandPlugin$ResponseStartDateTime": "

    The time the plugin started executing.

    ", + "CommandPlugin$ResponseFinishDateTime": "

    The time the plugin stopped executing. Could stop prematurely if, for example, a cancel command was sent.

    ", + "DocumentDescription$CreatedDate": "

    The date when the SSM document was created.

    ", + "InstanceInformation$LastPingDateTime": "

    The date and time when agent last pinged SSM service.

    ", + "InstanceInformation$RegistrationDate": "

    The date the server or VM was registered with AWS as a managed instance.

    " + } + }, + "DefaultInstanceName": { + "base": null, + "refs": { + "Activation$DefaultInstanceName": "

    A name for the managed instance when it is created.

    ", + "CreateActivationRequest$DefaultInstanceName": "

    The name of the registered, managed instance as it will appear in the Amazon EC2 console or when you use the AWS command line tools to list EC2 resources.

    " + } + }, + "DeleteActivationRequest": { + "base": null, + "refs": { + } + }, + "DeleteActivationResult": { + "base": null, + "refs": { + } + }, + "DeleteAssociationRequest": { + "base": null, + "refs": { + } + }, + "DeleteAssociationResult": { + "base": null, + "refs": { + } + }, + "DeleteDocumentRequest": { + "base": null, + "refs": { + } + }, + "DeleteDocumentResult": { + "base": null, + "refs": { + } + }, + "DeregisterManagedInstanceRequest": { + "base": null, + "refs": { + } + }, + "DeregisterManagedInstanceResult": { + "base": null, + "refs": { + } + }, + "DescribeActivationsFilter": { + "base": "

    Filter for the DescribeActivation API.

    ", + "refs": { + "DescribeActivationsFilterList$member": null + } + }, + "DescribeActivationsFilterKeys": { + "base": null, + "refs": { + "DescribeActivationsFilter$FilterKey": "

    The name of the filter.

    " + } + }, + "DescribeActivationsFilterList": { + "base": null, + "refs": { + "DescribeActivationsRequest$Filters": "

    A filter to view information about your activations.

    " + } + }, + "DescribeActivationsRequest": { + "base": null, + "refs": { + } + }, + "DescribeActivationsResult": { + "base": null, + "refs": { + } + }, + "DescribeAssociationRequest": { + "base": null, + "refs": { + } + }, + "DescribeAssociationResult": { + "base": null, + "refs": { + } + }, + "DescribeDocumentPermissionRequest": { + "base": null, + "refs": { + } + }, + "DescribeDocumentPermissionResponse": { + "base": null, + "refs": { + } + }, + "DescribeDocumentRequest": { + "base": null, + "refs": { + } + }, + "DescribeDocumentResult": { + "base": null, + "refs": { + } + }, + "DescribeInstanceInformationRequest": { + "base": null, + "refs": { + } + }, + "DescribeInstanceInformationResult": { + "base": null, + "refs": { + } + }, + "DescriptionInDocument": { + "base": null, + "refs": { + "DocumentDescription$Description": "

    A description of the document.

    " + } + }, + "DocumentARN": { + "base": null, + "refs": { + "DescribeDocumentRequest$Name": "

    The name of the SSM document.

    ", + "DocumentDescription$Name": "

    The name of the SSM document.

    ", + "DocumentIdentifier$Name": "

    The name of the SSM document.

    ", + "GetDocumentRequest$Name": "

    The name of the SSM document.

    ", + "GetDocumentResult$Name": "

    The name of the SSM document.

    ", + "SendCommandRequest$DocumentName": "

    Required. The name of the SSM document to execute. This can be an SSM public document or a custom document.

    " + } + }, + "DocumentAlreadyExists": { + "base": "

    The specified SSM document already exists.

    ", + "refs": { + } + }, + "DocumentContent": { + "base": null, + "refs": { + "CreateDocumentRequest$Content": "

    A valid JSON string.

    ", + "GetDocumentResult$Content": "

    The contents of the SSM document.

    " + } + }, + "DocumentDescription": { + "base": "

    Describes an SSM document.

    ", + "refs": { + "CreateDocumentResult$DocumentDescription": "

    Information about the SSM document.

    ", + "DescribeDocumentResult$Document": "

    Information about the SSM document.

    " + } + }, + "DocumentFilter": { + "base": "

    Describes a filter.

    ", + "refs": { + "DocumentFilterList$member": null + } + }, + "DocumentFilterKey": { + "base": null, + "refs": { + "DocumentFilter$key": "

    The name of the filter.

    " + } + }, + "DocumentFilterList": { + "base": null, + "refs": { + "ListDocumentsRequest$DocumentFilterList": "

    One or more filters. Use a filter to return a more specific list of results.

    " + } + }, + "DocumentFilterValue": { + "base": null, + "refs": { + "DocumentFilter$value": "

    The value of the filter.

    " + } + }, + "DocumentHash": { + "base": null, + "refs": { + "DocumentDescription$Hash": "

    The Sha256 or Sha1 hash created by the system when the document was created.

    Sha1 hashes have been deprecated.

    ", + "SendCommandRequest$DocumentHash": "

    The Sha256 or Sha1 hash created by the system when the document was created.

    Sha1 hashes have been deprecated.

    " + } + }, + "DocumentHashType": { + "base": null, + "refs": { + "DocumentDescription$HashType": "

    Sha256 or Sha1.

    Sha1 hashes have been deprecated.

    ", + "SendCommandRequest$DocumentHashType": "

    Sha256 or Sha1.

    Sha1 hashes have been deprecated.

    " + } + }, + "DocumentIdentifier": { + "base": "

    Describes the name of an SSM document.

    ", + "refs": { + "DocumentIdentifierList$member": null + } + }, + "DocumentIdentifierList": { + "base": null, + "refs": { + "ListDocumentsResult$DocumentIdentifiers": "

    The names of the SSM documents.

    " + } + }, + "DocumentLimitExceeded": { + "base": "

    You can have at most 100 active SSM documents.

    ", + "refs": { + } + }, + "DocumentName": { + "base": null, + "refs": { + "Association$Name": "

    The name of the SSM document.

    ", + "AssociationDescription$Name": "

    The name of the SSM document.

    ", + "Command$DocumentName": "

    The name of the SSM document requested for execution.

    ", + "CommandInvocation$DocumentName": "

    The document name that was requested for execution.

    ", + "CreateAssociationBatchRequestEntry$Name": "

    The name of the configuration document.

    ", + "CreateAssociationRequest$Name": "

    The name of the SSM document.

    ", + "CreateDocumentRequest$Name": "

    A name for the SSM document.

    ", + "DeleteAssociationRequest$Name": "

    The name of the SSM document.

    ", + "DeleteDocumentRequest$Name": "

    The name of the SSM document.

    ", + "DescribeAssociationRequest$Name": "

    The name of the SSM document.

    ", + "DescribeDocumentPermissionRequest$Name": "

    The name of the document for which you are the owner.

    ", + "ModifyDocumentPermissionRequest$Name": "

    The name of the document that you want to share.

    ", + "UpdateAssociationStatusRequest$Name": "

    The name of the SSM document.

    " + } + }, + "DocumentOwner": { + "base": null, + "refs": { + "DocumentDescription$Owner": "

    The AWS user account of the person who created the document.

    ", + "DocumentIdentifier$Owner": "

    The AWS user account of the person who created the document.

    " + } + }, + "DocumentParameter": { + "base": "

    Parameters specified in the SSM document that execute on the server when the command is run.

    ", + "refs": { + "DocumentParameterList$member": null + } + }, + "DocumentParameterDefaultValue": { + "base": null, + "refs": { + "DocumentParameter$DefaultValue": "

    If specified, the default values for the parameters. Parameters without a default value are required. Parameters with a default value are optional.

    " + } + }, + "DocumentParameterDescrption": { + "base": null, + "refs": { + "DocumentParameter$Description": "

    A description of what the parameter does, how to use it, the default value, and whether or not the parameter is optional.

    " + } + }, + "DocumentParameterList": { + "base": null, + "refs": { + "DocumentDescription$Parameters": "

    A description of the parameters for a document.

    " + } + }, + "DocumentParameterName": { + "base": null, + "refs": { + "DocumentParameter$Name": "

    The name of the parameter.

    " + } + }, + "DocumentParameterType": { + "base": null, + "refs": { + "DocumentParameter$Type": "

    The type of parameter. The type can be either “String” or “StringList”.

    " + } + }, + "DocumentPermissionLimit": { + "base": "

    The document cannot be shared with more AWS user accounts. You can share a document with a maximum of 20 accounts. You can publicly share up to five documents. If you need to increase this limit, contact AWS Support.

    ", + "refs": { + } + }, + "DocumentPermissionType": { + "base": null, + "refs": { + "DescribeDocumentPermissionRequest$PermissionType": "

    The permission type for the document. The permission type can be Share.

    ", + "ModifyDocumentPermissionRequest$PermissionType": "

    The permission type for the document. The permission type can be Share.

    " + } + }, + "DocumentSha1": { + "base": null, + "refs": { + "DocumentDescription$Sha1": "

    The SHA1 hash of the document, which you can use for verification purposes.

    " + } + }, + "DocumentStatus": { + "base": null, + "refs": { + "DocumentDescription$Status": "

    The status of the SSM document.

    " + } + }, + "DuplicateInstanceId": { + "base": "

    You cannot specify an instance ID in more than one association.

    ", + "refs": { + } + }, + "ExpirationDate": { + "base": null, + "refs": { + "Activation$ExpirationDate": "

    The date when this activation can no longer be used to register managed instances.

    ", + "CreateActivationRequest$ExpirationDate": "

    The date by which this activation request should expire. The default value is 24 hours.

    " + } + }, + "FailedCreateAssociation": { + "base": "

    Describes a failed association.

    ", + "refs": { + "FailedCreateAssociationList$member": null + } + }, + "FailedCreateAssociationList": { + "base": null, + "refs": { + "CreateAssociationBatchResult$Failed": "

    Information about the associations that failed.

    " + } + }, + "Fault": { + "base": null, + "refs": { + "FailedCreateAssociation$Fault": "

    The source of the failure.

    " + } + }, + "GetDocumentRequest": { + "base": null, + "refs": { + } + }, + "GetDocumentResult": { + "base": null, + "refs": { + } + }, + "IPAddress": { + "base": null, + "refs": { + "InstanceInformation$IPAddress": "

    The IP address of the managed instance.

    " + } + }, + "IamRole": { + "base": null, + "refs": { + "Activation$IamRole": "

    The Amazon Identity and Access Management (IAM) role to assign to the managed instance.

    ", + "CreateActivationRequest$IamRole": "

    The Amazon Identity and Access Management (IAM) role that you want to assign to the managed instance.

    ", + "InstanceInformation$IamRole": "

    The Amazon Identity and Access Management (IAM) role assigned to EC2 instances or managed instances.

    ", + "UpdateManagedInstanceRoleRequest$IamRole": "

    The IAM role you want to assign or change.

    " + } + }, + "InstanceId": { + "base": null, + "refs": { + "Association$InstanceId": "

    The ID of the instance.

    ", + "AssociationDescription$InstanceId": "

    The ID of the instance.

    ", + "CommandInvocation$InstanceId": "

    The instance ID in which this invocation was requested.

    ", + "CreateAssociationBatchRequestEntry$InstanceId": "

    The ID of the instance.

    ", + "CreateAssociationRequest$InstanceId": "

    The instance ID.

    ", + "DeleteAssociationRequest$InstanceId": "

    The ID of the instance.

    ", + "DescribeAssociationRequest$InstanceId": "

    The instance ID.

    ", + "InstanceIdList$member": null, + "InstanceInformation$InstanceId": "

    The instance ID.

    ", + "ListCommandInvocationsRequest$InstanceId": "

    (Optional) The command execution details for a specific instance ID.

    ", + "ListCommandsRequest$InstanceId": "

    (Optional) Lists commands issued against this instance ID.

    ", + "UpdateAssociationStatusRequest$InstanceId": "

    The ID of the instance.

    " + } + }, + "InstanceIdList": { + "base": null, + "refs": { + "CancelCommandRequest$InstanceIds": "

    (Optional) A list of instance IDs on which you want to cancel the command. If not provided, the command is canceled on every instance on which it was requested.

    ", + "Command$InstanceIds": "

    The instance IDs against which this command was requested.

    ", + "SendCommandRequest$InstanceIds": "

    Required. The instance IDs where the command should execute. You can specify a maximum of 50 IDs.

    " + } + }, + "InstanceInformation": { + "base": "

    Describes a filter for a specific list of instances.

    ", + "refs": { + "InstanceInformationList$member": null + } + }, + "InstanceInformationFilter": { + "base": "

    Describes a filter for a specific list of instances.

    ", + "refs": { + "InstanceInformationFilterList$member": null + } + }, + "InstanceInformationFilterKey": { + "base": null, + "refs": { + "InstanceInformationFilter$key": "

    The name of the filter.

    " + } + }, + "InstanceInformationFilterList": { + "base": null, + "refs": { + "DescribeInstanceInformationRequest$InstanceInformationFilterList": "

    One or more filters. Use a filter to return a more specific list of instances.

    " + } + }, + "InstanceInformationFilterValue": { + "base": null, + "refs": { + "InstanceInformationFilterValueSet$member": null + } + }, + "InstanceInformationFilterValueSet": { + "base": null, + "refs": { + "InstanceInformationFilter$valueSet": "

    The filter values.

    " + } + }, + "InstanceInformationList": { + "base": null, + "refs": { + "DescribeInstanceInformationResult$InstanceInformationList": "

    The instance information list.

    " + } + }, + "InternalServerError": { + "base": "

    An error occurred on the server side.

    ", + "refs": { + } + }, + "InvalidActivation": { + "base": "

    The activation is not valid. The activation might have been deleted, or the ActivationId and the ActivationCode do not match.

    ", + "refs": { + } + }, + "InvalidActivationId": { + "base": "

    The activation ID is not valid. Verify the you entered the correct ActivationId or ActivationCode and try again.

    ", + "refs": { + } + }, + "InvalidCommandId": { + "base": null, + "refs": { + } + }, + "InvalidDocument": { + "base": "

    The specified document does not exist.

    ", + "refs": { + } + }, + "InvalidDocumentContent": { + "base": "

    The content for the SSM document is not valid.

    ", + "refs": { + } + }, + "InvalidDocumentOperation": { + "base": "

    You attempted to delete a document while it is still shared. You must stop sharing the document before you can delete it.

    ", + "refs": { + } + }, + "InvalidFilter": { + "base": "

    The filter name is not valid. Verify the you entered the correct name and try again.

    ", + "refs": { + } + }, + "InvalidFilterKey": { + "base": "

    The specified key is not valid.

    ", + "refs": { + } + }, + "InvalidInstanceId": { + "base": "

    The instance is not in valid state. Valid states are: Running, Pending, Stopped, Stopping. Invalid states are: Shutting-down and Terminated.

    ", + "refs": { + } + }, + "InvalidInstanceInformationFilterValue": { + "base": "

    The specified filter value is not valid.

    ", + "refs": { + } + }, + "InvalidNextToken": { + "base": "

    The specified token is not valid.

    ", + "refs": { + } + }, + "InvalidOutputFolder": { + "base": "

    The S3 bucket does not exist.

    ", + "refs": { + } + }, + "InvalidParameters": { + "base": "

    You must specify values for all required parameters in the SSM document. You can only supply values to parameters defined in the SSM document.

    ", + "refs": { + } + }, + "InvalidPermissionType": { + "base": "

    The permission type is not supported. Share is the only supported permission type.

    ", + "refs": { + } + }, + "InvalidResourceId": { + "base": "

    The resource ID is not valid. Verify that you entered the correct ID and try again.

    ", + "refs": { + } + }, + "InvalidResourceType": { + "base": "

    The resource type is not valid. If you are attempting to tag an instance, the instance must be a registered, managed instance.

    ", + "refs": { + } + }, + "InvocationTraceOutput": { + "base": null, + "refs": { + "CommandInvocation$TraceOutput": "

    Gets the trace output sent by the agent.

    " + } + }, + "KeyList": { + "base": null, + "refs": { + "RemoveTagsFromResourceRequest$TagKeys": "

    Tag keys that you want to remove from the specified resource.

    " + } + }, + "ListAssociationsRequest": { + "base": null, + "refs": { + } + }, + "ListAssociationsResult": { + "base": null, + "refs": { + } + }, + "ListCommandInvocationsRequest": { + "base": null, + "refs": { + } + }, + "ListCommandInvocationsResult": { + "base": null, + "refs": { + } + }, + "ListCommandsRequest": { + "base": null, + "refs": { + } + }, + "ListCommandsResult": { + "base": null, + "refs": { + } + }, + "ListDocumentsRequest": { + "base": null, + "refs": { + } + }, + "ListDocumentsResult": { + "base": null, + "refs": { + } + }, + "ListTagsForResourceRequest": { + "base": null, + "refs": { + } + }, + "ListTagsForResourceResult": { + "base": null, + "refs": { + } + }, + "ManagedInstanceId": { + "base": null, + "refs": { + "DeregisterManagedInstanceRequest$InstanceId": "

    The ID assigned to the managed instance when you registered it using the activation process.

    ", + "UpdateManagedInstanceRoleRequest$InstanceId": "

    The ID of the managed instance where you want to update the role.

    " + } + }, + "MaxDocumentSizeExceeded": { + "base": "

    The size limit of an SSM document is 64 KB.

    ", + "refs": { + } + }, + "MaxResults": { + "base": null, + "refs": { + "DescribeActivationsRequest$MaxResults": "

    The maximum number of items to return for this call. The call also returns a token that you can specify in a subsequent call to get the next set of results.

    ", + "ListAssociationsRequest$MaxResults": "

    The maximum number of items to return for this call. The call also returns a token that you can specify in a subsequent call to get the next set of results.

    ", + "ListDocumentsRequest$MaxResults": "

    The maximum number of items to return for this call. The call also returns a token that you can specify in a subsequent call to get the next set of results.

    " + } + }, + "MaxResultsEC2Compatible": { + "base": null, + "refs": { + "DescribeInstanceInformationRequest$MaxResults": "

    The maximum number of items to return for this call. The call also returns a token that you can specify in a subsequent call to get the next set of results.

    " + } + }, + "ModifyDocumentPermissionRequest": { + "base": null, + "refs": { + } + }, + "ModifyDocumentPermissionResponse": { + "base": null, + "refs": { + } + }, + "NextToken": { + "base": null, + "refs": { + "DescribeActivationsRequest$NextToken": "

    A token to start the list. Use this token to get the next set of results.

    ", + "DescribeActivationsResult$NextToken": "

    The token for the next set of items to return. Use this token to get the next set of results.

    ", + "DescribeInstanceInformationRequest$NextToken": "

    The token for the next set of items to return. (You received this token from a previous call.)

    ", + "DescribeInstanceInformationResult$NextToken": "

    The token to use when requesting the next set of items. If there are no additional items to return, the string is empty.

    ", + "ListAssociationsRequest$NextToken": "

    The token for the next set of items to return. (You received this token from a previous call.)

    ", + "ListAssociationsResult$NextToken": "

    The token to use when requesting the next set of items. If there are no additional items to return, the string is empty.

    ", + "ListCommandInvocationsRequest$NextToken": "

    (Optional) The token for the next set of items to return. (You received this token from a previous call.)

    ", + "ListCommandInvocationsResult$NextToken": "

    (Optional) The token for the next set of items to return. (You received this token from a previous call.)

    ", + "ListCommandsRequest$NextToken": "

    (Optional) The token for the next set of items to return. (You received this token from a previous call.)

    ", + "ListCommandsResult$NextToken": "

    (Optional) The token for the next set of items to return. (You received this token from a previous call.)

    ", + "ListDocumentsRequest$NextToken": "

    The token for the next set of items to return. (You received this token from a previous call.)

    ", + "ListDocumentsResult$NextToken": "

    The token to use when requesting the next set of items. If there are no additional items to return, the string is empty.

    " + } + }, + "ParameterName": { + "base": null, + "refs": { + "Parameters$key": null + } + }, + "ParameterValue": { + "base": null, + "refs": { + "ParameterValueList$member": null + } + }, + "ParameterValueList": { + "base": null, + "refs": { + "Parameters$value": null + } + }, + "Parameters": { + "base": null, + "refs": { + "AssociationDescription$Parameters": "

    A description of the parameters for a document.

    ", + "Command$Parameters": "

    The parameter values to be inserted in the SSM document when executing the command.

    ", + "CreateAssociationBatchRequestEntry$Parameters": "

    A description of the parameters for a document.

    ", + "CreateAssociationRequest$Parameters": "

    The parameters for the documents runtime configuration.

    ", + "SendCommandRequest$Parameters": "

    The required and optional parameters specified in the SSM document being executed.

    " + } + }, + "PingStatus": { + "base": null, + "refs": { + "InstanceInformation$PingStatus": "

    Connection status of the SSM agent.

    " + } + }, + "PlatformType": { + "base": null, + "refs": { + "InstanceInformation$PlatformType": "

    The operating system platform type.

    ", + "PlatformTypeList$member": null + } + }, + "PlatformTypeList": { + "base": null, + "refs": { + "DocumentDescription$PlatformTypes": "

    The list of OS platforms compatible with this SSM document.

    ", + "DocumentIdentifier$PlatformTypes": "

    The operating system platform.

    " + } + }, + "RegistrationLimit": { + "base": null, + "refs": { + "Activation$RegistrationLimit": "

    The maximum number of managed instances that can be registered using this activation.

    ", + "CreateActivationRequest$RegistrationLimit": "

    Specify the maximum number of managed instances you want to register. The default value is 1 instance.

    " + } + }, + "RegistrationsCount": { + "base": null, + "refs": { + "Activation$RegistrationsCount": "

    The number of managed instances already registered with this activation.

    " + } + }, + "RemoveTagsFromResourceRequest": { + "base": null, + "refs": { + } + }, + "RemoveTagsFromResourceResult": { + "base": null, + "refs": { + } + }, + "ResourceId": { + "base": null, + "refs": { + "AddTagsToResourceRequest$ResourceId": "

    The resource ID you want to tag.

    ", + "ListTagsForResourceRequest$ResourceId": "

    The resource ID for which you want to see a list of tags.

    ", + "RemoveTagsFromResourceRequest$ResourceId": "

    The resource ID for which you want to remove tags.

    " + } + }, + "ResourceType": { + "base": null, + "refs": { + "InstanceInformation$ResourceType": "

    The type of instance. Instances are either EC2 instances or managed instances.

    " + } + }, + "ResourceTypeForTagging": { + "base": null, + "refs": { + "AddTagsToResourceRequest$ResourceType": "

    Specifies the type of resource you are tagging.

    ", + "ListTagsForResourceRequest$ResourceType": "

    Returns a list of tags for a specific resource type.

    ", + "RemoveTagsFromResourceRequest$ResourceType": "

    The type of resource of which you want to remove a tag.

    " + } + }, + "ResponseCode": { + "base": null, + "refs": { + "CommandPlugin$ResponseCode": "

    A numeric response code generated after executing the plugin.

    " + } + }, + "S3BucketName": { + "base": null, + "refs": { + "Command$OutputS3BucketName": "

    The S3 bucket where the responses to the command executions should be stored. This was requested when issuing the command.

    ", + "CommandPlugin$OutputS3BucketName": "

    The S3 bucket where the responses to the command executions should be stored. This was requested when issuing the command.

    ", + "SendCommandRequest$OutputS3BucketName": "

    The name of the S3 bucket where command execution responses should be stored.

    " + } + }, + "S3KeyPrefix": { + "base": null, + "refs": { + "Command$OutputS3KeyPrefix": "

    The S3 directory path inside the bucket where the responses to the command executions should be stored. This was requested when issuing the command.

    ", + "CommandPlugin$OutputS3KeyPrefix": "

    The S3 directory path inside the bucket where the responses to the command executions should be stored. This was requested when issuing the command.

    ", + "SendCommandRequest$OutputS3KeyPrefix": "

    The directory structure within the S3 bucket where the responses should be stored.

    " + } + }, + "SendCommandRequest": { + "base": null, + "refs": { + } + }, + "SendCommandResult": { + "base": null, + "refs": { + } + }, + "StatusAdditionalInfo": { + "base": null, + "refs": { + "AssociationStatus$AdditionalInfo": "

    A user-defined string.

    " + } + }, + "StatusMessage": { + "base": null, + "refs": { + "AssociationStatus$Message": "

    The reason for the status.

    " + } + }, + "StatusUnchanged": { + "base": "

    The updated status is the same as the current status.

    ", + "refs": { + } + }, + "String": { + "base": null, + "refs": { + "DocumentAlreadyExists$Message": null, + "DocumentLimitExceeded$Message": null, + "DocumentPermissionLimit$Message": null, + "InstanceInformation$PlatformName": "

    The name of the operating system platform running on your instance.

    ", + "InstanceInformation$PlatformVersion": "

    The version of the OS platform running on your instance.

    ", + "InstanceInformation$Name": "

    The name of the managed instance.

    ", + "InternalServerError$Message": null, + "InvalidActivation$Message": null, + "InvalidActivationId$Message": null, + "InvalidDocument$Message": "

    The SSM document does not exist or the document is not available to the user. This exception can be issued by CreateAssociation, CreateAssociationBatch, DeleteAssociation, DeleteDocument, DescribeAssociation, DescribeDocument, GetDocument, SendCommand, or UpdateAssociationStatus.

    ", + "InvalidDocumentContent$Message": "

    A description of the validation error.

    ", + "InvalidDocumentOperation$Message": null, + "InvalidFilter$Message": null, + "InvalidInstanceId$Message": null, + "InvalidInstanceInformationFilterValue$message": null, + "InvalidParameters$Message": null, + "InvalidPermissionType$Message": null, + "MaxDocumentSizeExceeded$Message": null, + "StringList$member": null, + "UnsupportedPlatformType$Message": null + } + }, + "StringList": { + "base": null, + "refs": { + "DescribeActivationsFilter$FilterValues": "

    The filter values.

    " + } + }, + "Tag": { + "base": "

    Metadata that you assign to your managed instances. Tags enable you to categorize your managed instances in different ways, for example, by purpose, owner, or environment.

    ", + "refs": { + "TagList$member": null + } + }, + "TagKey": { + "base": null, + "refs": { + "KeyList$member": null, + "Tag$Key": "

    The name of the tag.

    " + } + }, + "TagList": { + "base": null, + "refs": { + "AddTagsToResourceRequest$Tags": "

    One or more tags. The value parameter is required, but if you don't want the tag to have a value, specify the parameter with no value, and we set the value to an empty string.

    ", + "ListTagsForResourceResult$TagList": "

    A list of tags.

    " + } + }, + "TagValue": { + "base": null, + "refs": { + "Tag$Value": "

    The value of the tag.

    " + } + }, + "TimeoutSeconds": { + "base": null, + "refs": { + "SendCommandRequest$TimeoutSeconds": "

    If this time is reached and the command has not already started executing, it will not execute.

    " + } + }, + "TooManyUpdates": { + "base": "

    There are concurrent updates for a resource that supports one update at a time.

    ", + "refs": { + } + }, + "UnsupportedPlatformType": { + "base": "

    The document does not support the platform type of the given instance ID(s). For example, you sent an SSM document for a Windows instance to a Linux instance.

    ", + "refs": { + } + }, + "UpdateAssociationStatusRequest": { + "base": null, + "refs": { + } + }, + "UpdateAssociationStatusResult": { + "base": null, + "refs": { + } + }, + "UpdateManagedInstanceRoleRequest": { + "base": null, + "refs": { + } + }, + "UpdateManagedInstanceRoleResult": { + "base": null, + "refs": { + } + }, + "Version": { + "base": null, + "refs": { + "InstanceInformation$AgentVersion": "

    The version of the SSM agent running on your instance.

    " + } + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/ssm/2014-11-06/examples-1.json b/vendor/github.com/aws/aws-sdk-go/models/apis/ssm/2014-11-06/examples-1.json new file mode 100644 index 000000000..0ea7e3b0b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/ssm/2014-11-06/examples-1.json @@ -0,0 +1,5 @@ +{ + "version": "1.0", + "examples": { + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/ssm/2014-11-06/paginators-1.json b/vendor/github.com/aws/aws-sdk-go/models/apis/ssm/2014-11-06/paginators-1.json new file mode 100644 index 000000000..d024f65c2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/ssm/2014-11-06/paginators-1.json @@ -0,0 +1,34 @@ +{ + "pagination": { + "ListAssociations": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "Associations" + }, + "ListCommandInvocations": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "CommandInvocations" + }, + "ListCommands": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "Commands" + }, + "ListDocuments": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "DocumentIdentifiers" + }, + "DescribeActivations": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "ActivationList" + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/storagegateway/2013-06-30/api-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/storagegateway/2013-06-30/api-2.json new file mode 100644 index 000000000..f84fe73fc --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/storagegateway/2013-06-30/api-2.json @@ -0,0 +1,2275 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2013-06-30", + "endpointPrefix":"storagegateway", + "jsonVersion":"1.1", + "protocol":"json", + "serviceFullName":"AWS Storage Gateway", + "signatureVersion":"v4", + "targetPrefix":"StorageGateway_20130630" + }, + "operations":{ + "ActivateGateway":{ + "name":"ActivateGateway", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ActivateGatewayInput"}, + "output":{"shape":"ActivateGatewayOutput"}, + "errors":[ + {"shape":"InvalidGatewayRequestException"}, + {"shape":"InternalServerError"} + ] + }, + "AddCache":{ + "name":"AddCache", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AddCacheInput"}, + "output":{"shape":"AddCacheOutput"}, + "errors":[ + {"shape":"InvalidGatewayRequestException"}, + {"shape":"InternalServerError"} + ] + }, + "AddTagsToResource":{ + "name":"AddTagsToResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AddTagsToResourceInput"}, + "output":{"shape":"AddTagsToResourceOutput"}, + "errors":[ + {"shape":"InvalidGatewayRequestException"}, + {"shape":"InternalServerError"} + ] + }, + "AddUploadBuffer":{ + "name":"AddUploadBuffer", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AddUploadBufferInput"}, + "output":{"shape":"AddUploadBufferOutput"}, + "errors":[ + {"shape":"InvalidGatewayRequestException"}, + {"shape":"InternalServerError"} + ] + }, + "AddWorkingStorage":{ + "name":"AddWorkingStorage", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AddWorkingStorageInput"}, + "output":{"shape":"AddWorkingStorageOutput"}, + "errors":[ + {"shape":"InvalidGatewayRequestException"}, + {"shape":"InternalServerError"} + ] + }, + "CancelArchival":{ + "name":"CancelArchival", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CancelArchivalInput"}, + "output":{"shape":"CancelArchivalOutput"}, + "errors":[ + {"shape":"InvalidGatewayRequestException"}, + {"shape":"InternalServerError"} + ] + }, + "CancelRetrieval":{ + "name":"CancelRetrieval", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CancelRetrievalInput"}, + "output":{"shape":"CancelRetrievalOutput"}, + "errors":[ + {"shape":"InvalidGatewayRequestException"}, + {"shape":"InternalServerError"} + ] + }, + "CreateCachediSCSIVolume":{ + "name":"CreateCachediSCSIVolume", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateCachediSCSIVolumeInput"}, + "output":{"shape":"CreateCachediSCSIVolumeOutput"}, + "errors":[ + {"shape":"InvalidGatewayRequestException"}, + {"shape":"InternalServerError"} + ] + }, + "CreateSnapshot":{ + "name":"CreateSnapshot", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateSnapshotInput"}, + "output":{"shape":"CreateSnapshotOutput"}, + "errors":[ + {"shape":"InvalidGatewayRequestException"}, + {"shape":"InternalServerError"} + ] + }, + "CreateSnapshotFromVolumeRecoveryPoint":{ + "name":"CreateSnapshotFromVolumeRecoveryPoint", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateSnapshotFromVolumeRecoveryPointInput"}, + "output":{"shape":"CreateSnapshotFromVolumeRecoveryPointOutput"}, + "errors":[ + {"shape":"InvalidGatewayRequestException"}, + {"shape":"InternalServerError"} + ] + }, + "CreateStorediSCSIVolume":{ + "name":"CreateStorediSCSIVolume", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateStorediSCSIVolumeInput"}, + "output":{"shape":"CreateStorediSCSIVolumeOutput"}, + "errors":[ + {"shape":"InvalidGatewayRequestException"}, + {"shape":"InternalServerError"} + ] + }, + "CreateTapeWithBarcode":{ + "name":"CreateTapeWithBarcode", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateTapeWithBarcodeInput"}, + "output":{"shape":"CreateTapeWithBarcodeOutput"}, + "errors":[ + {"shape":"InvalidGatewayRequestException"}, + {"shape":"InternalServerError"} + ] + }, + "CreateTapes":{ + "name":"CreateTapes", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateTapesInput"}, + "output":{"shape":"CreateTapesOutput"}, + "errors":[ + {"shape":"InvalidGatewayRequestException"}, + {"shape":"InternalServerError"} + ] + }, + "DeleteBandwidthRateLimit":{ + "name":"DeleteBandwidthRateLimit", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteBandwidthRateLimitInput"}, + "output":{"shape":"DeleteBandwidthRateLimitOutput"}, + "errors":[ + {"shape":"InvalidGatewayRequestException"}, + {"shape":"InternalServerError"} + ] + }, + "DeleteChapCredentials":{ + "name":"DeleteChapCredentials", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteChapCredentialsInput"}, + "output":{"shape":"DeleteChapCredentialsOutput"}, + "errors":[ + {"shape":"InvalidGatewayRequestException"}, + {"shape":"InternalServerError"} + ] + }, + "DeleteGateway":{ + "name":"DeleteGateway", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteGatewayInput"}, + "output":{"shape":"DeleteGatewayOutput"}, + "errors":[ + {"shape":"InvalidGatewayRequestException"}, + {"shape":"InternalServerError"} + ] + }, + "DeleteSnapshotSchedule":{ + "name":"DeleteSnapshotSchedule", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteSnapshotScheduleInput"}, + "output":{"shape":"DeleteSnapshotScheduleOutput"}, + "errors":[ + {"shape":"InvalidGatewayRequestException"}, + {"shape":"InternalServerError"} + ] + }, + "DeleteTape":{ + "name":"DeleteTape", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteTapeInput"}, + "output":{"shape":"DeleteTapeOutput"}, + "errors":[ + {"shape":"InvalidGatewayRequestException"}, + {"shape":"InternalServerError"} + ] + }, + "DeleteTapeArchive":{ + "name":"DeleteTapeArchive", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteTapeArchiveInput"}, + "output":{"shape":"DeleteTapeArchiveOutput"}, + "errors":[ + {"shape":"InvalidGatewayRequestException"}, + {"shape":"InternalServerError"} + ] + }, + "DeleteVolume":{ + "name":"DeleteVolume", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteVolumeInput"}, + "output":{"shape":"DeleteVolumeOutput"}, + "errors":[ + {"shape":"InvalidGatewayRequestException"}, + {"shape":"InternalServerError"} + ] + }, + "DescribeBandwidthRateLimit":{ + "name":"DescribeBandwidthRateLimit", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeBandwidthRateLimitInput"}, + "output":{"shape":"DescribeBandwidthRateLimitOutput"}, + "errors":[ + {"shape":"InvalidGatewayRequestException"}, + {"shape":"InternalServerError"} + ] + }, + "DescribeCache":{ + "name":"DescribeCache", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeCacheInput"}, + "output":{"shape":"DescribeCacheOutput"}, + "errors":[ + {"shape":"InvalidGatewayRequestException"}, + {"shape":"InternalServerError"} + ] + }, + "DescribeCachediSCSIVolumes":{ + "name":"DescribeCachediSCSIVolumes", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeCachediSCSIVolumesInput"}, + "output":{"shape":"DescribeCachediSCSIVolumesOutput"}, + "errors":[ + {"shape":"InvalidGatewayRequestException"}, + {"shape":"InternalServerError"} + ] + }, + "DescribeChapCredentials":{ + "name":"DescribeChapCredentials", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeChapCredentialsInput"}, + "output":{"shape":"DescribeChapCredentialsOutput"}, + "errors":[ + {"shape":"InvalidGatewayRequestException"}, + {"shape":"InternalServerError"} + ] + }, + "DescribeGatewayInformation":{ + "name":"DescribeGatewayInformation", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeGatewayInformationInput"}, + "output":{"shape":"DescribeGatewayInformationOutput"}, + "errors":[ + {"shape":"InvalidGatewayRequestException"}, + {"shape":"InternalServerError"} + ] + }, + "DescribeMaintenanceStartTime":{ + "name":"DescribeMaintenanceStartTime", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeMaintenanceStartTimeInput"}, + "output":{"shape":"DescribeMaintenanceStartTimeOutput"}, + "errors":[ + {"shape":"InvalidGatewayRequestException"}, + {"shape":"InternalServerError"} + ] + }, + "DescribeSnapshotSchedule":{ + "name":"DescribeSnapshotSchedule", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeSnapshotScheduleInput"}, + "output":{"shape":"DescribeSnapshotScheduleOutput"}, + "errors":[ + {"shape":"InvalidGatewayRequestException"}, + {"shape":"InternalServerError"} + ] + }, + "DescribeStorediSCSIVolumes":{ + "name":"DescribeStorediSCSIVolumes", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeStorediSCSIVolumesInput"}, + "output":{"shape":"DescribeStorediSCSIVolumesOutput"}, + "errors":[ + {"shape":"InvalidGatewayRequestException"}, + {"shape":"InternalServerError"} + ] + }, + "DescribeTapeArchives":{ + "name":"DescribeTapeArchives", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeTapeArchivesInput"}, + "output":{"shape":"DescribeTapeArchivesOutput"}, + "errors":[ + {"shape":"InvalidGatewayRequestException"}, + {"shape":"InternalServerError"} + ] + }, + "DescribeTapeRecoveryPoints":{ + "name":"DescribeTapeRecoveryPoints", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeTapeRecoveryPointsInput"}, + "output":{"shape":"DescribeTapeRecoveryPointsOutput"}, + "errors":[ + {"shape":"InvalidGatewayRequestException"}, + {"shape":"InternalServerError"} + ] + }, + "DescribeTapes":{ + "name":"DescribeTapes", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeTapesInput"}, + "output":{"shape":"DescribeTapesOutput"}, + "errors":[ + {"shape":"InvalidGatewayRequestException"}, + {"shape":"InternalServerError"} + ] + }, + "DescribeUploadBuffer":{ + "name":"DescribeUploadBuffer", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeUploadBufferInput"}, + "output":{"shape":"DescribeUploadBufferOutput"}, + "errors":[ + {"shape":"InvalidGatewayRequestException"}, + {"shape":"InternalServerError"} + ] + }, + "DescribeVTLDevices":{ + "name":"DescribeVTLDevices", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeVTLDevicesInput"}, + "output":{"shape":"DescribeVTLDevicesOutput"}, + "errors":[ + {"shape":"InvalidGatewayRequestException"}, + {"shape":"InternalServerError"} + ] + }, + "DescribeWorkingStorage":{ + "name":"DescribeWorkingStorage", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeWorkingStorageInput"}, + "output":{"shape":"DescribeWorkingStorageOutput"}, + "errors":[ + {"shape":"InvalidGatewayRequestException"}, + {"shape":"InternalServerError"} + ] + }, + "DisableGateway":{ + "name":"DisableGateway", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DisableGatewayInput"}, + "output":{"shape":"DisableGatewayOutput"}, + "errors":[ + {"shape":"InvalidGatewayRequestException"}, + {"shape":"InternalServerError"} + ] + }, + "ListGateways":{ + "name":"ListGateways", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListGatewaysInput"}, + "output":{"shape":"ListGatewaysOutput"}, + "errors":[ + {"shape":"InvalidGatewayRequestException"}, + {"shape":"InternalServerError"} + ] + }, + "ListLocalDisks":{ + "name":"ListLocalDisks", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListLocalDisksInput"}, + "output":{"shape":"ListLocalDisksOutput"}, + "errors":[ + {"shape":"InvalidGatewayRequestException"}, + {"shape":"InternalServerError"} + ] + }, + "ListTagsForResource":{ + "name":"ListTagsForResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListTagsForResourceInput"}, + "output":{"shape":"ListTagsForResourceOutput"}, + "errors":[ + {"shape":"InvalidGatewayRequestException"}, + {"shape":"InternalServerError"} + ] + }, + "ListTapes":{ + "name":"ListTapes", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListTapesInput"}, + "output":{"shape":"ListTapesOutput"}, + "errors":[ + {"shape":"InvalidGatewayRequestException"}, + {"shape":"InternalServerError"} + ] + }, + "ListVolumeInitiators":{ + "name":"ListVolumeInitiators", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListVolumeInitiatorsInput"}, + "output":{"shape":"ListVolumeInitiatorsOutput"}, + "errors":[ + {"shape":"InvalidGatewayRequestException"}, + {"shape":"InternalServerError"} + ] + }, + "ListVolumeRecoveryPoints":{ + "name":"ListVolumeRecoveryPoints", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListVolumeRecoveryPointsInput"}, + "output":{"shape":"ListVolumeRecoveryPointsOutput"}, + "errors":[ + {"shape":"InvalidGatewayRequestException"}, + {"shape":"InternalServerError"} + ] + }, + "ListVolumes":{ + "name":"ListVolumes", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListVolumesInput"}, + "output":{"shape":"ListVolumesOutput"}, + "errors":[ + {"shape":"InvalidGatewayRequestException"}, + {"shape":"InternalServerError"} + ] + }, + "RemoveTagsFromResource":{ + "name":"RemoveTagsFromResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RemoveTagsFromResourceInput"}, + "output":{"shape":"RemoveTagsFromResourceOutput"}, + "errors":[ + {"shape":"InvalidGatewayRequestException"}, + {"shape":"InternalServerError"} + ] + }, + "ResetCache":{ + "name":"ResetCache", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ResetCacheInput"}, + "output":{"shape":"ResetCacheOutput"}, + "errors":[ + {"shape":"InvalidGatewayRequestException"}, + {"shape":"InternalServerError"} + ] + }, + "RetrieveTapeArchive":{ + "name":"RetrieveTapeArchive", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RetrieveTapeArchiveInput"}, + "output":{"shape":"RetrieveTapeArchiveOutput"}, + "errors":[ + {"shape":"InvalidGatewayRequestException"}, + {"shape":"InternalServerError"} + ] + }, + "RetrieveTapeRecoveryPoint":{ + "name":"RetrieveTapeRecoveryPoint", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RetrieveTapeRecoveryPointInput"}, + "output":{"shape":"RetrieveTapeRecoveryPointOutput"}, + "errors":[ + {"shape":"InvalidGatewayRequestException"}, + {"shape":"InternalServerError"} + ] + }, + "SetLocalConsolePassword":{ + "name":"SetLocalConsolePassword", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"SetLocalConsolePasswordInput"}, + "output":{"shape":"SetLocalConsolePasswordOutput"}, + "errors":[ + {"shape":"InvalidGatewayRequestException"}, + {"shape":"InternalServerError"} + ] + }, + "ShutdownGateway":{ + "name":"ShutdownGateway", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ShutdownGatewayInput"}, + "output":{"shape":"ShutdownGatewayOutput"}, + "errors":[ + {"shape":"InvalidGatewayRequestException"}, + {"shape":"InternalServerError"} + ] + }, + "StartGateway":{ + "name":"StartGateway", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StartGatewayInput"}, + "output":{"shape":"StartGatewayOutput"}, + "errors":[ + {"shape":"InvalidGatewayRequestException"}, + {"shape":"InternalServerError"} + ] + }, + "UpdateBandwidthRateLimit":{ + "name":"UpdateBandwidthRateLimit", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateBandwidthRateLimitInput"}, + "output":{"shape":"UpdateBandwidthRateLimitOutput"}, + "errors":[ + {"shape":"InvalidGatewayRequestException"}, + {"shape":"InternalServerError"} + ] + }, + "UpdateChapCredentials":{ + "name":"UpdateChapCredentials", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateChapCredentialsInput"}, + "output":{"shape":"UpdateChapCredentialsOutput"}, + "errors":[ + {"shape":"InvalidGatewayRequestException"}, + {"shape":"InternalServerError"} + ] + }, + "UpdateGatewayInformation":{ + "name":"UpdateGatewayInformation", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateGatewayInformationInput"}, + "output":{"shape":"UpdateGatewayInformationOutput"}, + "errors":[ + {"shape":"InvalidGatewayRequestException"}, + {"shape":"InternalServerError"} + ] + }, + "UpdateGatewaySoftwareNow":{ + "name":"UpdateGatewaySoftwareNow", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateGatewaySoftwareNowInput"}, + "output":{"shape":"UpdateGatewaySoftwareNowOutput"}, + "errors":[ + {"shape":"InvalidGatewayRequestException"}, + {"shape":"InternalServerError"} + ] + }, + "UpdateMaintenanceStartTime":{ + "name":"UpdateMaintenanceStartTime", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateMaintenanceStartTimeInput"}, + "output":{"shape":"UpdateMaintenanceStartTimeOutput"}, + "errors":[ + {"shape":"InvalidGatewayRequestException"}, + {"shape":"InternalServerError"} + ] + }, + "UpdateSnapshotSchedule":{ + "name":"UpdateSnapshotSchedule", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateSnapshotScheduleInput"}, + "output":{"shape":"UpdateSnapshotScheduleOutput"}, + "errors":[ + {"shape":"InvalidGatewayRequestException"}, + {"shape":"InternalServerError"} + ] + }, + "UpdateVTLDeviceType":{ + "name":"UpdateVTLDeviceType", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateVTLDeviceTypeInput"}, + "output":{"shape":"UpdateVTLDeviceTypeOutput"}, + "errors":[ + {"shape":"InvalidGatewayRequestException"}, + {"shape":"InternalServerError"} + ] + } + }, + "shapes":{ + "ActivateGatewayInput":{ + "type":"structure", + "required":[ + "ActivationKey", + "GatewayName", + "GatewayTimezone", + "GatewayRegion" + ], + "members":{ + "ActivationKey":{"shape":"ActivationKey"}, + "GatewayName":{"shape":"GatewayName"}, + "GatewayTimezone":{"shape":"GatewayTimezone"}, + "GatewayRegion":{"shape":"RegionId"}, + "GatewayType":{"shape":"GatewayType"}, + "TapeDriveType":{"shape":"TapeDriveType"}, + "MediumChangerType":{"shape":"MediumChangerType"} + } + }, + "ActivateGatewayOutput":{ + "type":"structure", + "members":{ + "GatewayARN":{"shape":"GatewayARN"} + } + }, + "ActivationKey":{ + "type":"string", + "max":50, + "min":1 + }, + "AddCacheInput":{ + "type":"structure", + "required":[ + "GatewayARN", + "DiskIds" + ], + "members":{ + "GatewayARN":{"shape":"GatewayARN"}, + "DiskIds":{"shape":"DiskIds"} + } + }, + "AddCacheOutput":{ + "type":"structure", + "members":{ + "GatewayARN":{"shape":"GatewayARN"} + } + }, + "AddTagsToResourceInput":{ + "type":"structure", + "required":[ + "ResourceARN", + "Tags" + ], + "members":{ + "ResourceARN":{"shape":"ResourceARN"}, + "Tags":{"shape":"Tags"} + } + }, + "AddTagsToResourceOutput":{ + "type":"structure", + "members":{ + "ResourceARN":{"shape":"ResourceARN"} + } + }, + "AddUploadBufferInput":{ + "type":"structure", + "required":[ + "GatewayARN", + "DiskIds" + ], + "members":{ + "GatewayARN":{"shape":"GatewayARN"}, + "DiskIds":{"shape":"DiskIds"} + } + }, + "AddUploadBufferOutput":{ + "type":"structure", + "members":{ + "GatewayARN":{"shape":"GatewayARN"} + } + }, + "AddWorkingStorageInput":{ + "type":"structure", + "required":[ + "GatewayARN", + "DiskIds" + ], + "members":{ + "GatewayARN":{"shape":"GatewayARN"}, + "DiskIds":{"shape":"DiskIds"} + } + }, + "AddWorkingStorageOutput":{ + "type":"structure", + "members":{ + "GatewayARN":{"shape":"GatewayARN"} + } + }, + "BandwidthDownloadRateLimit":{ + "type":"long", + "min":102400 + }, + "BandwidthType":{ + "type":"string", + "max":25, + "min":3 + }, + "BandwidthUploadRateLimit":{ + "type":"long", + "min":51200 + }, + "CachediSCSIVolume":{ + "type":"structure", + "members":{ + "VolumeARN":{"shape":"VolumeARN"}, + "VolumeId":{"shape":"VolumeId"}, + "VolumeType":{"shape":"VolumeType"}, + "VolumeStatus":{"shape":"VolumeStatus"}, + "VolumeSizeInBytes":{"shape":"long"}, + "VolumeProgress":{"shape":"DoubleObject"}, + "SourceSnapshotId":{"shape":"SnapshotId"}, + "VolumeiSCSIAttributes":{"shape":"VolumeiSCSIAttributes"} + } + }, + "CachediSCSIVolumes":{ + "type":"list", + "member":{"shape":"CachediSCSIVolume"} + }, + "CancelArchivalInput":{ + "type":"structure", + "required":[ + "GatewayARN", + "TapeARN" + ], + "members":{ + "GatewayARN":{"shape":"GatewayARN"}, + "TapeARN":{"shape":"TapeARN"} + } + }, + "CancelArchivalOutput":{ + "type":"structure", + "members":{ + "TapeARN":{"shape":"TapeARN"} + } + }, + "CancelRetrievalInput":{ + "type":"structure", + "required":[ + "GatewayARN", + "TapeARN" + ], + "members":{ + "GatewayARN":{"shape":"GatewayARN"}, + "TapeARN":{"shape":"TapeARN"} + } + }, + "CancelRetrievalOutput":{ + "type":"structure", + "members":{ + "TapeARN":{"shape":"TapeARN"} + } + }, + "ChapCredentials":{ + "type":"list", + "member":{"shape":"ChapInfo"} + }, + "ChapInfo":{ + "type":"structure", + "members":{ + "TargetARN":{"shape":"TargetARN"}, + "SecretToAuthenticateInitiator":{"shape":"ChapSecret"}, + "InitiatorName":{"shape":"IqnName"}, + "SecretToAuthenticateTarget":{"shape":"ChapSecret"} + } + }, + "ChapSecret":{ + "type":"string", + "max":100, + "min":1 + }, + "ClientToken":{ + "type":"string", + "max":100, + "min":5 + }, + "CreateCachediSCSIVolumeInput":{ + "type":"structure", + "required":[ + "GatewayARN", + "VolumeSizeInBytes", + "TargetName", + "NetworkInterfaceId", + "ClientToken" + ], + "members":{ + "GatewayARN":{"shape":"GatewayARN"}, + "VolumeSizeInBytes":{"shape":"long"}, + "SnapshotId":{"shape":"SnapshotId"}, + "TargetName":{"shape":"TargetName"}, + "NetworkInterfaceId":{"shape":"NetworkInterfaceId"}, + "ClientToken":{"shape":"ClientToken"} + } + }, + "CreateCachediSCSIVolumeOutput":{ + "type":"structure", + "members":{ + "VolumeARN":{"shape":"VolumeARN"}, + "TargetARN":{"shape":"TargetARN"} + } + }, + "CreateSnapshotFromVolumeRecoveryPointInput":{ + "type":"structure", + "required":[ + "VolumeARN", + "SnapshotDescription" + ], + "members":{ + "VolumeARN":{"shape":"VolumeARN"}, + "SnapshotDescription":{"shape":"SnapshotDescription"} + } + }, + "CreateSnapshotFromVolumeRecoveryPointOutput":{ + "type":"structure", + "members":{ + "SnapshotId":{"shape":"SnapshotId"}, + "VolumeARN":{"shape":"VolumeARN"}, + "VolumeRecoveryPointTime":{"shape":"string"} + } + }, + "CreateSnapshotInput":{ + "type":"structure", + "required":[ + "VolumeARN", + "SnapshotDescription" + ], + "members":{ + "VolumeARN":{"shape":"VolumeARN"}, + "SnapshotDescription":{"shape":"SnapshotDescription"} + } + }, + "CreateSnapshotOutput":{ + "type":"structure", + "members":{ + "VolumeARN":{"shape":"VolumeARN"}, + "SnapshotId":{"shape":"SnapshotId"} + } + }, + "CreateStorediSCSIVolumeInput":{ + "type":"structure", + "required":[ + "GatewayARN", + "DiskId", + "PreserveExistingData", + "TargetName", + "NetworkInterfaceId" + ], + "members":{ + "GatewayARN":{"shape":"GatewayARN"}, + "DiskId":{"shape":"DiskId"}, + "SnapshotId":{"shape":"SnapshotId"}, + "PreserveExistingData":{"shape":"boolean"}, + "TargetName":{"shape":"TargetName"}, + "NetworkInterfaceId":{"shape":"NetworkInterfaceId"} + } + }, + "CreateStorediSCSIVolumeOutput":{ + "type":"structure", + "members":{ + "VolumeARN":{"shape":"VolumeARN"}, + "VolumeSizeInBytes":{"shape":"long"}, + "TargetARN":{"shape":"TargetARN"} + } + }, + "CreateTapeWithBarcodeInput":{ + "type":"structure", + "required":[ + "GatewayARN", + "TapeSizeInBytes", + "TapeBarcode" + ], + "members":{ + "GatewayARN":{"shape":"GatewayARN"}, + "TapeSizeInBytes":{"shape":"TapeSize"}, + "TapeBarcode":{"shape":"TapeBarcode"} + } + }, + "CreateTapeWithBarcodeOutput":{ + "type":"structure", + "members":{ + "TapeARN":{"shape":"TapeARN"} + } + }, + "CreateTapesInput":{ + "type":"structure", + "required":[ + "GatewayARN", + "TapeSizeInBytes", + "ClientToken", + "NumTapesToCreate", + "TapeBarcodePrefix" + ], + "members":{ + "GatewayARN":{"shape":"GatewayARN"}, + "TapeSizeInBytes":{"shape":"TapeSize"}, + "ClientToken":{"shape":"ClientToken"}, + "NumTapesToCreate":{"shape":"NumTapesToCreate"}, + "TapeBarcodePrefix":{"shape":"TapeBarcodePrefix"} + } + }, + "CreateTapesOutput":{ + "type":"structure", + "members":{ + "TapeARNs":{"shape":"TapeARNs"} + } + }, + "DayOfWeek":{ + "type":"integer", + "max":6, + "min":0 + }, + "DeleteBandwidthRateLimitInput":{ + "type":"structure", + "required":[ + "GatewayARN", + "BandwidthType" + ], + "members":{ + "GatewayARN":{"shape":"GatewayARN"}, + "BandwidthType":{"shape":"BandwidthType"} + } + }, + "DeleteBandwidthRateLimitOutput":{ + "type":"structure", + "members":{ + "GatewayARN":{"shape":"GatewayARN"} + } + }, + "DeleteChapCredentialsInput":{ + "type":"structure", + "required":[ + "TargetARN", + "InitiatorName" + ], + "members":{ + "TargetARN":{"shape":"TargetARN"}, + "InitiatorName":{"shape":"IqnName"} + } + }, + "DeleteChapCredentialsOutput":{ + "type":"structure", + "members":{ + "TargetARN":{"shape":"TargetARN"}, + "InitiatorName":{"shape":"IqnName"} + } + }, + "DeleteGatewayInput":{ + "type":"structure", + "required":["GatewayARN"], + "members":{ + "GatewayARN":{"shape":"GatewayARN"} + } + }, + "DeleteGatewayOutput":{ + "type":"structure", + "members":{ + "GatewayARN":{"shape":"GatewayARN"} + } + }, + "DeleteSnapshotScheduleInput":{ + "type":"structure", + "required":["VolumeARN"], + "members":{ + "VolumeARN":{"shape":"VolumeARN"} + } + }, + "DeleteSnapshotScheduleOutput":{ + "type":"structure", + "members":{ + "VolumeARN":{"shape":"VolumeARN"} + } + }, + "DeleteTapeArchiveInput":{ + "type":"structure", + "required":["TapeARN"], + "members":{ + "TapeARN":{"shape":"TapeARN"} + } + }, + "DeleteTapeArchiveOutput":{ + "type":"structure", + "members":{ + "TapeARN":{"shape":"TapeARN"} + } + }, + "DeleteTapeInput":{ + "type":"structure", + "required":[ + "GatewayARN", + "TapeARN" + ], + "members":{ + "GatewayARN":{"shape":"GatewayARN"}, + "TapeARN":{"shape":"TapeARN"} + } + }, + "DeleteTapeOutput":{ + "type":"structure", + "members":{ + "TapeARN":{"shape":"TapeARN"} + } + }, + "DeleteVolumeInput":{ + "type":"structure", + "required":["VolumeARN"], + "members":{ + "VolumeARN":{"shape":"VolumeARN"} + } + }, + "DeleteVolumeOutput":{ + "type":"structure", + "members":{ + "VolumeARN":{"shape":"VolumeARN"} + } + }, + "DescribeBandwidthRateLimitInput":{ + "type":"structure", + "required":["GatewayARN"], + "members":{ + "GatewayARN":{"shape":"GatewayARN"} + } + }, + "DescribeBandwidthRateLimitOutput":{ + "type":"structure", + "members":{ + "GatewayARN":{"shape":"GatewayARN"}, + "AverageUploadRateLimitInBitsPerSec":{"shape":"BandwidthUploadRateLimit"}, + "AverageDownloadRateLimitInBitsPerSec":{"shape":"BandwidthDownloadRateLimit"} + } + }, + "DescribeCacheInput":{ + "type":"structure", + "required":["GatewayARN"], + "members":{ + "GatewayARN":{"shape":"GatewayARN"} + } + }, + "DescribeCacheOutput":{ + "type":"structure", + "members":{ + "GatewayARN":{"shape":"GatewayARN"}, + "DiskIds":{"shape":"DiskIds"}, + "CacheAllocatedInBytes":{"shape":"long"}, + "CacheUsedPercentage":{"shape":"double"}, + "CacheDirtyPercentage":{"shape":"double"}, + "CacheHitPercentage":{"shape":"double"}, + "CacheMissPercentage":{"shape":"double"} + } + }, + "DescribeCachediSCSIVolumesInput":{ + "type":"structure", + "required":["VolumeARNs"], + "members":{ + "VolumeARNs":{"shape":"VolumeARNs"} + } + }, + "DescribeCachediSCSIVolumesOutput":{ + "type":"structure", + "members":{ + "CachediSCSIVolumes":{"shape":"CachediSCSIVolumes"} + } + }, + "DescribeChapCredentialsInput":{ + "type":"structure", + "required":["TargetARN"], + "members":{ + "TargetARN":{"shape":"TargetARN"} + } + }, + "DescribeChapCredentialsOutput":{ + "type":"structure", + "members":{ + "ChapCredentials":{"shape":"ChapCredentials"} + } + }, + "DescribeGatewayInformationInput":{ + "type":"structure", + "required":["GatewayARN"], + "members":{ + "GatewayARN":{"shape":"GatewayARN"} + } + }, + "DescribeGatewayInformationOutput":{ + "type":"structure", + "members":{ + "GatewayARN":{"shape":"GatewayARN"}, + "GatewayId":{"shape":"GatewayId"}, + "GatewayName":{"shape":"string"}, + "GatewayTimezone":{"shape":"GatewayTimezone"}, + "GatewayState":{"shape":"GatewayState"}, + "GatewayNetworkInterfaces":{"shape":"GatewayNetworkInterfaces"}, + "GatewayType":{"shape":"GatewayType"}, + "NextUpdateAvailabilityDate":{"shape":"NextUpdateAvailabilityDate"}, + "LastSoftwareUpdate":{"shape":"LastSoftwareUpdate"} + } + }, + "DescribeMaintenanceStartTimeInput":{ + "type":"structure", + "required":["GatewayARN"], + "members":{ + "GatewayARN":{"shape":"GatewayARN"} + } + }, + "DescribeMaintenanceStartTimeOutput":{ + "type":"structure", + "members":{ + "GatewayARN":{"shape":"GatewayARN"}, + "HourOfDay":{"shape":"HourOfDay"}, + "MinuteOfHour":{"shape":"MinuteOfHour"}, + "DayOfWeek":{"shape":"DayOfWeek"}, + "Timezone":{"shape":"GatewayTimezone"} + } + }, + "DescribeSnapshotScheduleInput":{ + "type":"structure", + "required":["VolumeARN"], + "members":{ + "VolumeARN":{"shape":"VolumeARN"} + } + }, + "DescribeSnapshotScheduleOutput":{ + "type":"structure", + "members":{ + "VolumeARN":{"shape":"VolumeARN"}, + "StartAt":{"shape":"HourOfDay"}, + "RecurrenceInHours":{"shape":"RecurrenceInHours"}, + "Description":{"shape":"Description"}, + "Timezone":{"shape":"GatewayTimezone"} + } + }, + "DescribeStorediSCSIVolumesInput":{ + "type":"structure", + "required":["VolumeARNs"], + "members":{ + "VolumeARNs":{"shape":"VolumeARNs"} + } + }, + "DescribeStorediSCSIVolumesOutput":{ + "type":"structure", + "members":{ + "StorediSCSIVolumes":{"shape":"StorediSCSIVolumes"} + } + }, + "DescribeTapeArchivesInput":{ + "type":"structure", + "members":{ + "TapeARNs":{"shape":"TapeARNs"}, + "Marker":{"shape":"Marker"}, + "Limit":{"shape":"PositiveIntObject"} + } + }, + "DescribeTapeArchivesOutput":{ + "type":"structure", + "members":{ + "TapeArchives":{"shape":"TapeArchives"}, + "Marker":{"shape":"Marker"} + } + }, + "DescribeTapeRecoveryPointsInput":{ + "type":"structure", + "required":["GatewayARN"], + "members":{ + "GatewayARN":{"shape":"GatewayARN"}, + "Marker":{"shape":"Marker"}, + "Limit":{"shape":"PositiveIntObject"} + } + }, + "DescribeTapeRecoveryPointsOutput":{ + "type":"structure", + "members":{ + "GatewayARN":{"shape":"GatewayARN"}, + "TapeRecoveryPointInfos":{"shape":"TapeRecoveryPointInfos"}, + "Marker":{"shape":"Marker"} + } + }, + "DescribeTapesInput":{ + "type":"structure", + "required":["GatewayARN"], + "members":{ + "GatewayARN":{"shape":"GatewayARN"}, + "TapeARNs":{"shape":"TapeARNs"}, + "Marker":{"shape":"Marker"}, + "Limit":{"shape":"PositiveIntObject"} + } + }, + "DescribeTapesOutput":{ + "type":"structure", + "members":{ + "Tapes":{"shape":"Tapes"}, + "Marker":{"shape":"Marker"} + } + }, + "DescribeUploadBufferInput":{ + "type":"structure", + "required":["GatewayARN"], + "members":{ + "GatewayARN":{"shape":"GatewayARN"} + } + }, + "DescribeUploadBufferOutput":{ + "type":"structure", + "members":{ + "GatewayARN":{"shape":"GatewayARN"}, + "DiskIds":{"shape":"DiskIds"}, + "UploadBufferUsedInBytes":{"shape":"long"}, + "UploadBufferAllocatedInBytes":{"shape":"long"} + } + }, + "DescribeVTLDevicesInput":{ + "type":"structure", + "required":["GatewayARN"], + "members":{ + "GatewayARN":{"shape":"GatewayARN"}, + "VTLDeviceARNs":{"shape":"VTLDeviceARNs"}, + "Marker":{"shape":"Marker"}, + "Limit":{"shape":"PositiveIntObject"} + } + }, + "DescribeVTLDevicesOutput":{ + "type":"structure", + "members":{ + "GatewayARN":{"shape":"GatewayARN"}, + "VTLDevices":{"shape":"VTLDevices"}, + "Marker":{"shape":"Marker"} + } + }, + "DescribeWorkingStorageInput":{ + "type":"structure", + "required":["GatewayARN"], + "members":{ + "GatewayARN":{"shape":"GatewayARN"} + } + }, + "DescribeWorkingStorageOutput":{ + "type":"structure", + "members":{ + "GatewayARN":{"shape":"GatewayARN"}, + "DiskIds":{"shape":"DiskIds"}, + "WorkingStorageUsedInBytes":{"shape":"long"}, + "WorkingStorageAllocatedInBytes":{"shape":"long"} + } + }, + "Description":{ + "type":"string", + "max":255, + "min":1 + }, + "DeviceType":{ + "type":"string", + "max":50, + "min":2 + }, + "DeviceiSCSIAttributes":{ + "type":"structure", + "members":{ + "TargetARN":{"shape":"TargetARN"}, + "NetworkInterfaceId":{"shape":"NetworkInterfaceId"}, + "NetworkInterfacePort":{"shape":"integer"}, + "ChapEnabled":{"shape":"boolean"} + } + }, + "DisableGatewayInput":{ + "type":"structure", + "required":["GatewayARN"], + "members":{ + "GatewayARN":{"shape":"GatewayARN"} + } + }, + "DisableGatewayOutput":{ + "type":"structure", + "members":{ + "GatewayARN":{"shape":"GatewayARN"} + } + }, + "Disk":{ + "type":"structure", + "members":{ + "DiskId":{"shape":"DiskId"}, + "DiskPath":{"shape":"string"}, + "DiskNode":{"shape":"string"}, + "DiskStatus":{"shape":"string"}, + "DiskSizeInBytes":{"shape":"long"}, + "DiskAllocationType":{"shape":"DiskAllocationType"}, + "DiskAllocationResource":{"shape":"string"} + } + }, + "DiskAllocationType":{ + "type":"string", + "max":100, + "min":3 + }, + "DiskId":{ + "type":"string", + "max":300, + "min":1 + }, + "DiskIds":{ + "type":"list", + "member":{"shape":"DiskId"} + }, + "Disks":{ + "type":"list", + "member":{"shape":"Disk"} + }, + "DoubleObject":{"type":"double"}, + "ErrorCode":{ + "type":"string", + "enum":[ + "ActivationKeyExpired", + "ActivationKeyInvalid", + "ActivationKeyNotFound", + "GatewayInternalError", + "GatewayNotConnected", + "GatewayNotFound", + "GatewayProxyNetworkConnectionBusy", + "AuthenticationFailure", + "BandwidthThrottleScheduleNotFound", + "Blocked", + "CannotExportSnapshot", + "ChapCredentialNotFound", + "DiskAlreadyAllocated", + "DiskDoesNotExist", + "DiskSizeGreaterThanVolumeMaxSize", + "DiskSizeLessThanVolumeSize", + "DiskSizeNotGigAligned", + "DuplicateCertificateInfo", + "DuplicateSchedule", + "EndpointNotFound", + "IAMNotSupported", + "InitiatorInvalid", + "InitiatorNotFound", + "InternalError", + "InvalidGateway", + "InvalidEndpoint", + "InvalidParameters", + "InvalidSchedule", + "LocalStorageLimitExceeded", + "LunAlreadyAllocated ", + "LunInvalid", + "MaximumContentLengthExceeded", + "MaximumTapeCartridgeCountExceeded", + "MaximumVolumeCountExceeded", + "NetworkConfigurationChanged", + "NoDisksAvailable", + "NotImplemented", + "NotSupported", + "OperationAborted", + "OutdatedGateway", + "ParametersNotImplemented", + "RegionInvalid", + "RequestTimeout", + "ServiceUnavailable", + "SnapshotDeleted", + "SnapshotIdInvalid", + "SnapshotInProgress", + "SnapshotNotFound", + "SnapshotScheduleNotFound", + "StagingAreaFull", + "StorageFailure", + "TapeCartridgeNotFound", + "TargetAlreadyExists", + "TargetInvalid", + "TargetNotFound", + "UnauthorizedOperation", + "VolumeAlreadyExists", + "VolumeIdInvalid", + "VolumeInUse", + "VolumeNotFound", + "VolumeNotReady" + ] + }, + "GatewayARN":{ + "type":"string", + "max":500, + "min":50 + }, + "GatewayId":{ + "type":"string", + "max":30, + "min":12 + }, + "GatewayInfo":{ + "type":"structure", + "members":{ + "GatewayId":{"shape":"GatewayId"}, + "GatewayARN":{"shape":"GatewayARN"}, + "GatewayType":{"shape":"GatewayType"}, + "GatewayOperationalState":{"shape":"GatewayOperationalState"}, + "GatewayName":{"shape":"string"} + } + }, + "GatewayName":{ + "type":"string", + "max":255, + "min":2, + "pattern":"^[ -\\.0-\\[\\]-~]*[!-\\.0-\\[\\]-~][ -\\.0-\\[\\]-~]*$" + }, + "GatewayNetworkInterfaces":{ + "type":"list", + "member":{"shape":"NetworkInterface"} + }, + "GatewayOperationalState":{ + "type":"string", + "max":25, + "min":2 + }, + "GatewayState":{ + "type":"string", + "max":25, + "min":2 + }, + "GatewayTimezone":{ + "type":"string", + "max":10, + "min":3 + }, + "GatewayType":{ + "type":"string", + "max":20, + "min":2 + }, + "Gateways":{ + "type":"list", + "member":{"shape":"GatewayInfo"} + }, + "HourOfDay":{ + "type":"integer", + "max":23, + "min":0 + }, + "Initiator":{ + "type":"string", + "max":50, + "min":1 + }, + "Initiators":{ + "type":"list", + "member":{"shape":"Initiator"} + }, + "InternalServerError":{ + "type":"structure", + "members":{ + "message":{"shape":"string"}, + "error":{"shape":"StorageGatewayError"} + }, + "exception":true + }, + "InvalidGatewayRequestException":{ + "type":"structure", + "members":{ + "message":{"shape":"string"}, + "error":{"shape":"StorageGatewayError"} + }, + "exception":true + }, + "IqnName":{ + "type":"string", + "max":255, + "min":1, + "pattern":"[0-9a-z:.-]+" + }, + "LastSoftwareUpdate":{ + "type":"string", + "max":25, + "min":1 + }, + "ListGatewaysInput":{ + "type":"structure", + "members":{ + "Marker":{"shape":"Marker"}, + "Limit":{"shape":"PositiveIntObject"} + } + }, + "ListGatewaysOutput":{ + "type":"structure", + "members":{ + "Gateways":{"shape":"Gateways"}, + "Marker":{"shape":"Marker"} + } + }, + "ListLocalDisksInput":{ + "type":"structure", + "required":["GatewayARN"], + "members":{ + "GatewayARN":{"shape":"GatewayARN"} + } + }, + "ListLocalDisksOutput":{ + "type":"structure", + "members":{ + "GatewayARN":{"shape":"GatewayARN"}, + "Disks":{"shape":"Disks"} + } + }, + "ListTagsForResourceInput":{ + "type":"structure", + "required":["ResourceARN"], + "members":{ + "ResourceARN":{"shape":"ResourceARN"}, + "Marker":{"shape":"Marker"}, + "Limit":{"shape":"PositiveIntObject"} + } + }, + "ListTagsForResourceOutput":{ + "type":"structure", + "members":{ + "ResourceARN":{"shape":"ResourceARN"}, + "Marker":{"shape":"Marker"}, + "Tags":{"shape":"Tags"} + } + }, + "ListTapesInput":{ + "type":"structure", + "members":{ + "TapeARNs":{"shape":"TapeARNs"}, + "Marker":{"shape":"Marker"}, + "Limit":{"shape":"PositiveIntObject"} + } + }, + "ListTapesOutput":{ + "type":"structure", + "members":{ + "TapeInfos":{"shape":"TapeInfos"}, + "Marker":{"shape":"Marker"} + } + }, + "ListVolumeInitiatorsInput":{ + "type":"structure", + "required":["VolumeARN"], + "members":{ + "VolumeARN":{"shape":"VolumeARN"} + } + }, + "ListVolumeInitiatorsOutput":{ + "type":"structure", + "members":{ + "Initiators":{"shape":"Initiators"} + } + }, + "ListVolumeRecoveryPointsInput":{ + "type":"structure", + "required":["GatewayARN"], + "members":{ + "GatewayARN":{"shape":"GatewayARN"} + } + }, + "ListVolumeRecoveryPointsOutput":{ + "type":"structure", + "members":{ + "GatewayARN":{"shape":"GatewayARN"}, + "VolumeRecoveryPointInfos":{"shape":"VolumeRecoveryPointInfos"} + } + }, + "ListVolumesInput":{ + "type":"structure", + "members":{ + "GatewayARN":{"shape":"GatewayARN"}, + "Marker":{"shape":"Marker"}, + "Limit":{"shape":"PositiveIntObject"} + } + }, + "ListVolumesOutput":{ + "type":"structure", + "members":{ + "GatewayARN":{"shape":"GatewayARN"}, + "Marker":{"shape":"Marker"}, + "VolumeInfos":{"shape":"VolumeInfos"} + } + }, + "LocalConsolePassword":{ + "type":"string", + "max":512, + "min":6, + "pattern":"^[ -~]+$", + "sensitive":true + }, + "Marker":{ + "type":"string", + "max":1000, + "min":1 + }, + "MediumChangerType":{ + "type":"string", + "max":50, + "min":2 + }, + "MinuteOfHour":{ + "type":"integer", + "max":59, + "min":0 + }, + "NetworkInterface":{ + "type":"structure", + "members":{ + "Ipv4Address":{"shape":"string"}, + "MacAddress":{"shape":"string"}, + "Ipv6Address":{"shape":"string"} + } + }, + "NetworkInterfaceId":{ + "type":"string", + "pattern":"\\A(25[0-5]|2[0-4]\\d|[0-1]?\\d?\\d)(\\.(25[0-5]|2[0-4]\\d|[0-1]?\\d?\\d)){3}\\z" + }, + "NextUpdateAvailabilityDate":{ + "type":"string", + "max":25, + "min":1 + }, + "NumTapesToCreate":{ + "type":"integer", + "max":10, + "min":1 + }, + "PositiveIntObject":{ + "type":"integer", + "min":1 + }, + "RecurrenceInHours":{ + "type":"integer", + "max":24, + "min":1 + }, + "RegionId":{ + "type":"string", + "max":25, + "min":1 + }, + "RemoveTagsFromResourceInput":{ + "type":"structure", + "required":[ + "ResourceARN", + "TagKeys" + ], + "members":{ + "ResourceARN":{"shape":"ResourceARN"}, + "TagKeys":{"shape":"TagKeys"} + } + }, + "RemoveTagsFromResourceOutput":{ + "type":"structure", + "members":{ + "ResourceARN":{"shape":"ResourceARN"} + } + }, + "ResetCacheInput":{ + "type":"structure", + "required":["GatewayARN"], + "members":{ + "GatewayARN":{"shape":"GatewayARN"} + } + }, + "ResetCacheOutput":{ + "type":"structure", + "members":{ + "GatewayARN":{"shape":"GatewayARN"} + } + }, + "ResourceARN":{ + "type":"string", + "max":500, + "min":50 + }, + "RetrieveTapeArchiveInput":{ + "type":"structure", + "required":[ + "TapeARN", + "GatewayARN" + ], + "members":{ + "TapeARN":{"shape":"TapeARN"}, + "GatewayARN":{"shape":"GatewayARN"} + } + }, + "RetrieveTapeArchiveOutput":{ + "type":"structure", + "members":{ + "TapeARN":{"shape":"TapeARN"} + } + }, + "RetrieveTapeRecoveryPointInput":{ + "type":"structure", + "required":[ + "TapeARN", + "GatewayARN" + ], + "members":{ + "TapeARN":{"shape":"TapeARN"}, + "GatewayARN":{"shape":"GatewayARN"} + } + }, + "RetrieveTapeRecoveryPointOutput":{ + "type":"structure", + "members":{ + "TapeARN":{"shape":"TapeARN"} + } + }, + "SetLocalConsolePasswordInput":{ + "type":"structure", + "required":[ + "GatewayARN", + "LocalConsolePassword" + ], + "members":{ + "GatewayARN":{"shape":"GatewayARN"}, + "LocalConsolePassword":{"shape":"LocalConsolePassword"} + } + }, + "SetLocalConsolePasswordOutput":{ + "type":"structure", + "members":{ + "GatewayARN":{"shape":"GatewayARN"} + } + }, + "ShutdownGatewayInput":{ + "type":"structure", + "required":["GatewayARN"], + "members":{ + "GatewayARN":{"shape":"GatewayARN"} + } + }, + "ShutdownGatewayOutput":{ + "type":"structure", + "members":{ + "GatewayARN":{"shape":"GatewayARN"} + } + }, + "SnapshotDescription":{ + "type":"string", + "max":255, + "min":1 + }, + "SnapshotId":{ + "type":"string", + "pattern":"\\Asnap-([0-9A-Fa-f]{8}|[0-9A-Fa-f]{17})\\z" + }, + "StartGatewayInput":{ + "type":"structure", + "required":["GatewayARN"], + "members":{ + "GatewayARN":{"shape":"GatewayARN"} + } + }, + "StartGatewayOutput":{ + "type":"structure", + "members":{ + "GatewayARN":{"shape":"GatewayARN"} + } + }, + "StorageGatewayError":{ + "type":"structure", + "members":{ + "errorCode":{"shape":"ErrorCode"}, + "errorDetails":{"shape":"errorDetails"} + } + }, + "StorediSCSIVolume":{ + "type":"structure", + "members":{ + "VolumeARN":{"shape":"VolumeARN"}, + "VolumeId":{"shape":"VolumeId"}, + "VolumeType":{"shape":"VolumeType"}, + "VolumeStatus":{"shape":"VolumeStatus"}, + "VolumeSizeInBytes":{"shape":"long"}, + "VolumeProgress":{"shape":"DoubleObject"}, + "VolumeDiskId":{"shape":"DiskId"}, + "SourceSnapshotId":{"shape":"SnapshotId"}, + "PreservedExistingData":{"shape":"boolean"}, + "VolumeiSCSIAttributes":{"shape":"VolumeiSCSIAttributes"} + } + }, + "StorediSCSIVolumes":{ + "type":"list", + "member":{"shape":"StorediSCSIVolume"} + }, + "Tag":{ + "type":"structure", + "required":[ + "Key", + "Value" + ], + "members":{ + "Key":{"shape":"TagKey"}, + "Value":{"shape":"TagValue"} + } + }, + "TagKey":{ + "type":"string", + "max":128, + "min":1, + "pattern":"^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-%@]*)$" + }, + "TagKeys":{ + "type":"list", + "member":{"shape":"TagKey"} + }, + "TagValue":{ + "type":"string", + "max":256 + }, + "Tags":{ + "type":"list", + "member":{"shape":"Tag"} + }, + "Tape":{ + "type":"structure", + "members":{ + "TapeARN":{"shape":"TapeARN"}, + "TapeBarcode":{"shape":"TapeBarcode"}, + "TapeSizeInBytes":{"shape":"TapeSize"}, + "TapeStatus":{"shape":"TapeStatus"}, + "VTLDevice":{"shape":"VTLDeviceARN"}, + "Progress":{"shape":"DoubleObject"} + } + }, + "TapeARN":{ + "type":"string", + "max":500, + "min":50 + }, + "TapeARNs":{ + "type":"list", + "member":{"shape":"TapeARN"} + }, + "TapeArchive":{ + "type":"structure", + "members":{ + "TapeARN":{"shape":"TapeARN"}, + "TapeBarcode":{"shape":"TapeBarcode"}, + "TapeSizeInBytes":{"shape":"TapeSize"}, + "CompletionTime":{"shape":"Time"}, + "RetrievedTo":{"shape":"GatewayARN"}, + "TapeStatus":{"shape":"TapeArchiveStatus"} + } + }, + "TapeArchiveStatus":{"type":"string"}, + "TapeArchives":{ + "type":"list", + "member":{"shape":"TapeArchive"} + }, + "TapeBarcode":{ + "type":"string", + "max":16, + "min":7, + "pattern":"^[A-Z0-9]*$" + }, + "TapeBarcodePrefix":{ + "type":"string", + "max":4, + "min":1, + "pattern":"^[A-Z]*$" + }, + "TapeDriveType":{ + "type":"string", + "max":50, + "min":2 + }, + "TapeInfo":{ + "type":"structure", + "members":{ + "TapeARN":{"shape":"TapeARN"}, + "TapeBarcode":{"shape":"TapeBarcode"}, + "TapeSizeInBytes":{"shape":"TapeSize"}, + "TapeStatus":{"shape":"TapeStatus"}, + "GatewayARN":{"shape":"GatewayARN"} + } + }, + "TapeInfos":{ + "type":"list", + "member":{"shape":"TapeInfo"} + }, + "TapeRecoveryPointInfo":{ + "type":"structure", + "members":{ + "TapeARN":{"shape":"TapeARN"}, + "TapeRecoveryPointTime":{"shape":"Time"}, + "TapeSizeInBytes":{"shape":"TapeSize"}, + "TapeStatus":{"shape":"TapeRecoveryPointStatus"} + } + }, + "TapeRecoveryPointInfos":{ + "type":"list", + "member":{"shape":"TapeRecoveryPointInfo"} + }, + "TapeRecoveryPointStatus":{"type":"string"}, + "TapeSize":{"type":"long"}, + "TapeStatus":{"type":"string"}, + "Tapes":{ + "type":"list", + "member":{"shape":"Tape"} + }, + "TargetARN":{ + "type":"string", + "max":800, + "min":50 + }, + "TargetName":{ + "type":"string", + "max":200, + "min":1, + "pattern":"^[-\\.;a-z0-9]+$" + }, + "Time":{"type":"timestamp"}, + "UpdateBandwidthRateLimitInput":{ + "type":"structure", + "required":["GatewayARN"], + "members":{ + "GatewayARN":{"shape":"GatewayARN"}, + "AverageUploadRateLimitInBitsPerSec":{"shape":"BandwidthUploadRateLimit"}, + "AverageDownloadRateLimitInBitsPerSec":{"shape":"BandwidthDownloadRateLimit"} + } + }, + "UpdateBandwidthRateLimitOutput":{ + "type":"structure", + "members":{ + "GatewayARN":{"shape":"GatewayARN"} + } + }, + "UpdateChapCredentialsInput":{ + "type":"structure", + "required":[ + "TargetARN", + "SecretToAuthenticateInitiator", + "InitiatorName" + ], + "members":{ + "TargetARN":{"shape":"TargetARN"}, + "SecretToAuthenticateInitiator":{"shape":"ChapSecret"}, + "InitiatorName":{"shape":"IqnName"}, + "SecretToAuthenticateTarget":{"shape":"ChapSecret"} + } + }, + "UpdateChapCredentialsOutput":{ + "type":"structure", + "members":{ + "TargetARN":{"shape":"TargetARN"}, + "InitiatorName":{"shape":"IqnName"} + } + }, + "UpdateGatewayInformationInput":{ + "type":"structure", + "required":["GatewayARN"], + "members":{ + "GatewayARN":{"shape":"GatewayARN"}, + "GatewayName":{"shape":"GatewayName"}, + "GatewayTimezone":{"shape":"GatewayTimezone"} + } + }, + "UpdateGatewayInformationOutput":{ + "type":"structure", + "members":{ + "GatewayARN":{"shape":"GatewayARN"}, + "GatewayName":{"shape":"string"} + } + }, + "UpdateGatewaySoftwareNowInput":{ + "type":"structure", + "required":["GatewayARN"], + "members":{ + "GatewayARN":{"shape":"GatewayARN"} + } + }, + "UpdateGatewaySoftwareNowOutput":{ + "type":"structure", + "members":{ + "GatewayARN":{"shape":"GatewayARN"} + } + }, + "UpdateMaintenanceStartTimeInput":{ + "type":"structure", + "required":[ + "GatewayARN", + "HourOfDay", + "MinuteOfHour", + "DayOfWeek" + ], + "members":{ + "GatewayARN":{"shape":"GatewayARN"}, + "HourOfDay":{"shape":"HourOfDay"}, + "MinuteOfHour":{"shape":"MinuteOfHour"}, + "DayOfWeek":{"shape":"DayOfWeek"} + } + }, + "UpdateMaintenanceStartTimeOutput":{ + "type":"structure", + "members":{ + "GatewayARN":{"shape":"GatewayARN"} + } + }, + "UpdateSnapshotScheduleInput":{ + "type":"structure", + "required":[ + "VolumeARN", + "StartAt", + "RecurrenceInHours" + ], + "members":{ + "VolumeARN":{"shape":"VolumeARN"}, + "StartAt":{"shape":"HourOfDay"}, + "RecurrenceInHours":{"shape":"RecurrenceInHours"}, + "Description":{"shape":"Description"} + } + }, + "UpdateSnapshotScheduleOutput":{ + "type":"structure", + "members":{ + "VolumeARN":{"shape":"VolumeARN"} + } + }, + "UpdateVTLDeviceTypeInput":{ + "type":"structure", + "required":[ + "VTLDeviceARN", + "DeviceType" + ], + "members":{ + "VTLDeviceARN":{"shape":"VTLDeviceARN"}, + "DeviceType":{"shape":"DeviceType"} + } + }, + "UpdateVTLDeviceTypeOutput":{ + "type":"structure", + "members":{ + "VTLDeviceARN":{"shape":"VTLDeviceARN"} + } + }, + "VTLDevice":{ + "type":"structure", + "members":{ + "VTLDeviceARN":{"shape":"VTLDeviceARN"}, + "VTLDeviceType":{"shape":"VTLDeviceType"}, + "VTLDeviceVendor":{"shape":"VTLDeviceVendor"}, + "VTLDeviceProductIdentifier":{"shape":"VTLDeviceProductIdentifier"}, + "DeviceiSCSIAttributes":{"shape":"DeviceiSCSIAttributes"} + } + }, + "VTLDeviceARN":{ + "type":"string", + "max":500, + "min":50 + }, + "VTLDeviceARNs":{ + "type":"list", + "member":{"shape":"VTLDeviceARN"} + }, + "VTLDeviceProductIdentifier":{"type":"string"}, + "VTLDeviceType":{"type":"string"}, + "VTLDeviceVendor":{"type":"string"}, + "VTLDevices":{ + "type":"list", + "member":{"shape":"VTLDevice"} + }, + "VolumeARN":{ + "type":"string", + "max":500, + "min":50 + }, + "VolumeARNs":{ + "type":"list", + "member":{"shape":"VolumeARN"} + }, + "VolumeId":{ + "type":"string", + "max":30, + "min":12 + }, + "VolumeInfo":{ + "type":"structure", + "members":{ + "VolumeARN":{"shape":"VolumeARN"}, + "VolumeId":{"shape":"VolumeId"}, + "GatewayARN":{"shape":"GatewayARN"}, + "GatewayId":{"shape":"GatewayId"}, + "VolumeType":{"shape":"VolumeType"}, + "VolumeSizeInBytes":{"shape":"long"} + } + }, + "VolumeInfos":{ + "type":"list", + "member":{"shape":"VolumeInfo"} + }, + "VolumeRecoveryPointInfo":{ + "type":"structure", + "members":{ + "VolumeARN":{"shape":"VolumeARN"}, + "VolumeSizeInBytes":{"shape":"long"}, + "VolumeUsageInBytes":{"shape":"long"}, + "VolumeRecoveryPointTime":{"shape":"string"} + } + }, + "VolumeRecoveryPointInfos":{ + "type":"list", + "member":{"shape":"VolumeRecoveryPointInfo"} + }, + "VolumeStatus":{ + "type":"string", + "max":50, + "min":3 + }, + "VolumeType":{ + "type":"string", + "max":100, + "min":3 + }, + "VolumeiSCSIAttributes":{ + "type":"structure", + "members":{ + "TargetARN":{"shape":"TargetARN"}, + "NetworkInterfaceId":{"shape":"NetworkInterfaceId"}, + "NetworkInterfacePort":{"shape":"integer"}, + "LunNumber":{"shape":"PositiveIntObject"}, + "ChapEnabled":{"shape":"boolean"} + } + }, + "boolean":{"type":"boolean"}, + "double":{"type":"double"}, + "errorDetails":{ + "type":"map", + "key":{"shape":"string"}, + "value":{"shape":"string"} + }, + "integer":{"type":"integer"}, + "long":{"type":"long"}, + "string":{"type":"string"} + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/storagegateway/2013-06-30/docs-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/storagegateway/2013-06-30/docs-2.json new file mode 100644 index 000000000..0702cc75f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/storagegateway/2013-06-30/docs-2.json @@ -0,0 +1,1484 @@ +{ + "version": "2.0", + "service": "AWS Storage Gateway Service

    AWS Storage Gateway is the service that connects an on-premises software appliance with cloud-based storage to provide seamless and secure integration between an organization's on-premises IT environment and AWS's storage infrastructure. The service enables you to securely upload data to the AWS cloud for cost effective backup and rapid disaster recovery.

    Use the following links to get started using the AWS Storage Gateway Service API Reference:

    AWS Storage Gateway resource IDs are in uppercase. When you use these resource IDs with the Amazon EC2 API, EC2 expects resource IDs in lowercase. You must change your resource ID to lowercase to use it with the EC2 API. For example, in Storage Gateway the ID for a volume might be vol-1122AABB. When you use this ID with the EC2 API, you must change it to vol-1122aabb. Otherwise, the EC2 API might not behave as expected.

    IDs for Storage Gateway volumes and Amazon EBS snapshots created from gateway volumes are changing to a longer format. Starting in December 2016, all new volumes and snapshots will be created with a 17-character string. Starting in April 2016, you will be able to use these longer IDs so you can test your systems with the new format. For more information, see Longer EC2 and EBS Resource IDs.

    For example, a volume ARN with the longer volume ID format will look like this:

    arn:aws:storagegateway:us-west-2:111122223333:gateway/sgw-12A3456B/volume/vol-1122AABBCCDDEEFFG.

    A snapshot ID with the longer ID format will look like this: snap-78e226633445566ee.

    For more information, see Announcement: Heads-up – Longer AWS Storage Gateway volume and snapshot IDs coming in 2016.

    ", + "operations": { + "ActivateGateway": "

    Activates the gateway you previously deployed on your host. For more information, see Activate the AWS Storage Gateway. In the activation process, you specify information such as the you want to use for storing snapshots, the time zone for scheduled snapshots the gateway snapshot schedule window, an activation key, and a name for your gateway. The activation process also associates your gateway with your account; for more information, see UpdateGatewayInformation.

    You must turn on the gateway VM before you can activate your gateway.

    ", + "AddCache": "

    Configures one or more gateway local disks as cache for a cached-volume gateway. This operation is supported only for the gateway-cached volume architecture (see Storage Gateway Concepts).

    In the request, you specify the gateway Amazon Resource Name (ARN) to which you want to add cache, and one or more disk IDs that you want to configure as cache.

    ", + "AddTagsToResource": "

    Adds one or more tags to the specified resource. You use tags to add metadata to resources, which you can use to categorize these resources. For example, you can categorize resources by purpose, owner, environment, or team. Each tag consists of a key and a value, which you define. You can add tags to the following AWS Storage Gateway resources:

    • Storage gateways of all types

    • Storage Volumes

    • Virtual Tapes

    You can create a maximum of 10 tags for each resource. Virtual tapes and storage volumes that are recovered to a new gateway maintain their tags.

    ", + "AddUploadBuffer": "

    Configures one or more gateway local disks as upload buffer for a specified gateway. This operation is supported for both the gateway-stored and gateway-cached volume architectures.

    In the request, you specify the gateway Amazon Resource Name (ARN) to which you want to add upload buffer, and one or more disk IDs that you want to configure as upload buffer.

    ", + "AddWorkingStorage": "

    Configures one or more gateway local disks as working storage for a gateway. This operation is supported only for the gateway-stored volume architecture. This operation is deprecated in cached-volumes API version 20120630. Use AddUploadBuffer instead.

    Working storage is also referred to as upload buffer. You can also use the AddUploadBuffer operation to add upload buffer to a stored-volume gateway.

    In the request, you specify the gateway Amazon Resource Name (ARN) to which you want to add working storage, and one or more disk IDs that you want to configure as working storage.

    ", + "CancelArchival": "

    Cancels archiving of a virtual tape to the virtual tape shelf (VTS) after the archiving process is initiated.

    ", + "CancelRetrieval": "

    Cancels retrieval of a virtual tape from the virtual tape shelf (VTS) to a gateway after the retrieval process is initiated. The virtual tape is returned to the VTS.

    ", + "CreateCachediSCSIVolume": "

    Creates a cached volume on a specified cached gateway. This operation is supported only for the gateway-cached volume architecture.

    Cache storage must be allocated to the gateway before you can create a cached volume. Use the AddCache operation to add cache storage to a gateway.

    In the request, you must specify the gateway, size of the volume in bytes, the iSCSI target name, an IP address on which to expose the target, and a unique client token. In response, AWS Storage Gateway creates the volume and returns information about it such as the volume Amazon Resource Name (ARN), its size, and the iSCSI target ARN that initiators can use to connect to the volume target.

    ", + "CreateSnapshot": "

    Initiates a snapshot of a volume.

    AWS Storage Gateway provides the ability to back up point-in-time snapshots of your data to Amazon Simple Storage (S3) for durable off-site recovery, as well as import the data to an Amazon Elastic Block Store (EBS) volume in Amazon Elastic Compute Cloud (EC2). You can take snapshots of your gateway volume on a scheduled or ad-hoc basis. This API enables you to take ad-hoc snapshot. For more information, see Working With Snapshots in the AWS Storage Gateway Console.

    In the CreateSnapshot request you identify the volume by providing its Amazon Resource Name (ARN). You must also provide description for the snapshot. When AWS Storage Gateway takes the snapshot of specified volume, the snapshot and description appears in the AWS Storage Gateway Console. In response, AWS Storage Gateway returns you a snapshot ID. You can use this snapshot ID to check the snapshot progress or later use it when you want to create a volume from a snapshot.

    To list or delete a snapshot, you must use the Amazon EC2 API. For more information, see DescribeSnapshots or DeleteSnapshot in the EC2 API reference.

    Volume and snapshot IDs are changing to a longer length ID format. For more information, see the important note on the Welcome page.

    ", + "CreateSnapshotFromVolumeRecoveryPoint": "

    Initiates a snapshot of a gateway from a volume recovery point. This operation is supported only for the gateway-cached volume architecture.

    A volume recovery point is a point in time at which all data of the volume is consistent and from which you can create a snapshot. To get a list of volume recovery point for gateway-cached volumes, use ListVolumeRecoveryPoints.

    In the CreateSnapshotFromVolumeRecoveryPoint request, you identify the volume by providing its Amazon Resource Name (ARN). You must also provide a description for the snapshot. When AWS Storage Gateway takes a snapshot of the specified volume, the snapshot and its description appear in the AWS Storage Gateway console. In response, AWS Storage Gateway returns you a snapshot ID. You can use this snapshot ID to check the snapshot progress or later use it when you want to create a volume from a snapshot.

    To list or delete a snapshot, you must use the Amazon EC2 API. For more information, in Amazon Elastic Compute Cloud API Reference.

    ", + "CreateStorediSCSIVolume": "

    Creates a volume on a specified gateway. This operation is supported only for the gateway-stored volume architecture.

    The size of the volume to create is inferred from the disk size. You can choose to preserve existing data on the disk, create volume from an existing snapshot, or create an empty volume. If you choose to create an empty gateway volume, then any existing data on the disk is erased.

    In the request you must specify the gateway and the disk information on which you are creating the volume. In response, AWS Storage Gateway creates the volume and returns volume information such as the volume Amazon Resource Name (ARN), its size, and the iSCSI target ARN that initiators can use to connect to the volume target.

    ", + "CreateTapeWithBarcode": "

    Creates a virtual tape by using your own barcode. You write data to the virtual tape and then archive the tape.

    Cache storage must be allocated to the gateway before you can create a virtual tape. Use the AddCache operation to add cache storage to a gateway.

    ", + "CreateTapes": "

    Creates one or more virtual tapes. You write data to the virtual tapes and then archive the tapes.

    Cache storage must be allocated to the gateway before you can create virtual tapes. Use the AddCache operation to add cache storage to a gateway.

    ", + "DeleteBandwidthRateLimit": "

    Deletes the bandwidth rate limits of a gateway. You can delete either the upload and download bandwidth rate limit, or you can delete both. If you delete only one of the limits, the other limit remains unchanged. To specify which gateway to work with, use the Amazon Resource Name (ARN) of the gateway in your request.

    ", + "DeleteChapCredentials": "

    Deletes Challenge-Handshake Authentication Protocol (CHAP) credentials for a specified iSCSI target and initiator pair.

    ", + "DeleteGateway": "

    Deletes a gateway. To specify which gateway to delete, use the Amazon Resource Name (ARN) of the gateway in your request. The operation deletes the gateway; however, it does not delete the gateway virtual machine (VM) from your host computer.

    After you delete a gateway, you cannot reactivate it. Completed snapshots of the gateway volumes are not deleted upon deleting the gateway, however, pending snapshots will not complete. After you delete a gateway, your next step is to remove it from your environment.

    You no longer pay software charges after the gateway is deleted; however, your existing Amazon EBS snapshots persist and you will continue to be billed for these snapshots. You can choose to remove all remaining Amazon EBS snapshots by canceling your Amazon EC2 subscription.  If you prefer not to cancel your Amazon EC2 subscription, you can delete your snapshots using the Amazon EC2 console. For more information, see the AWS Storage Gateway Detail Page.

    ", + "DeleteSnapshotSchedule": "

    Deletes a snapshot of a volume.

    You can take snapshots of your gateway volumes on a scheduled or ad hoc basis. This API action enables you to delete a snapshot schedule for a volume. For more information, see Working with Snapshots. In the DeleteSnapshotSchedule request, you identify the volume by providing its Amazon Resource Name (ARN).

    To list or delete a snapshot, you must use the Amazon EC2 API. in Amazon Elastic Compute Cloud API Reference.

    ", + "DeleteTape": "

    Deletes the specified virtual tape.

    ", + "DeleteTapeArchive": "

    Deletes the specified virtual tape from the virtual tape shelf (VTS).

    ", + "DeleteVolume": "

    Deletes the specified gateway volume that you previously created using the CreateCachediSCSIVolume or CreateStorediSCSIVolume API. For gateway-stored volumes, the local disk that was configured as the storage volume is not deleted. You can reuse the local disk to create another storage volume.

    Before you delete a gateway volume, make sure there are no iSCSI connections to the volume you are deleting. You should also make sure there is no snapshot in progress. You can use the Amazon Elastic Compute Cloud (Amazon EC2) API to query snapshots on the volume you are deleting and check the snapshot status. For more information, go to DescribeSnapshots in the Amazon Elastic Compute Cloud API Reference.

    In the request, you must provide the Amazon Resource Name (ARN) of the storage volume you want to delete.

    ", + "DescribeBandwidthRateLimit": "

    Returns the bandwidth rate limits of a gateway. By default, these limits are not set, which means no bandwidth rate limiting is in effect.

    This operation only returns a value for a bandwidth rate limit only if the limit is set. If no limits are set for the gateway, then this operation returns only the gateway ARN in the response body. To specify which gateway to describe, use the Amazon Resource Name (ARN) of the gateway in your request.

    ", + "DescribeCache": "

    Returns information about the cache of a gateway. This operation is supported only for the gateway-cached volume architecture.

    The response includes disk IDs that are configured as cache, and it includes the amount of cache allocated and used.

    ", + "DescribeCachediSCSIVolumes": "

    Returns a description of the gateway volumes specified in the request. This operation is supported only for the gateway-cached volume architecture.

    The list of gateway volumes in the request must be from one gateway. In the response Amazon Storage Gateway returns volume information sorted by volume Amazon Resource Name (ARN).

    ", + "DescribeChapCredentials": "

    Returns an array of Challenge-Handshake Authentication Protocol (CHAP) credentials information for a specified iSCSI target, one for each target-initiator pair.

    ", + "DescribeGatewayInformation": "

    Returns metadata about a gateway such as its name, network interfaces, configured time zone, and the state (whether the gateway is running or not). To specify which gateway to describe, use the Amazon Resource Name (ARN) of the gateway in your request.

    ", + "DescribeMaintenanceStartTime": "

    Returns your gateway's weekly maintenance start time including the day and time of the week. Note that values are in terms of the gateway's time zone.

    ", + "DescribeSnapshotSchedule": "

    Describes the snapshot schedule for the specified gateway volume. The snapshot schedule information includes intervals at which snapshots are automatically initiated on the volume.

    ", + "DescribeStorediSCSIVolumes": "

    Returns the description of the gateway volumes specified in the request. The list of gateway volumes in the request must be from one gateway. In the response Amazon Storage Gateway returns volume information sorted by volume ARNs.

    ", + "DescribeTapeArchives": "

    Returns a description of specified virtual tapes in the virtual tape shelf (VTS).

    If a specific TapeARN is not specified, AWS Storage Gateway returns a description of all virtual tapes found in the VTS associated with your account.

    ", + "DescribeTapeRecoveryPoints": "

    Returns a list of virtual tape recovery points that are available for the specified gateway-VTL.

    A recovery point is a point-in-time view of a virtual tape at which all the data on the virtual tape is consistent. If your gateway crashes, virtual tapes that have recovery points can be recovered to a new gateway.

    ", + "DescribeTapes": "

    Returns a description of the specified Amazon Resource Name (ARN) of virtual tapes. If a TapeARN is not specified, returns a description of all virtual tapes associated with the specified gateway.

    ", + "DescribeUploadBuffer": "

    Returns information about the upload buffer of a gateway. This operation is supported for both the gateway-stored and gateway-cached volume architectures.

    The response includes disk IDs that are configured as upload buffer space, and it includes the amount of upload buffer space allocated and used.

    ", + "DescribeVTLDevices": "

    Returns a description of virtual tape library (VTL) devices for the specified gateway. In the response, AWS Storage Gateway returns VTL device information.

    The list of VTL devices must be from one gateway.

    ", + "DescribeWorkingStorage": "

    Returns information about the working storage of a gateway. This operation is supported only for the gateway-stored volume architecture. This operation is deprecated in cached-volumes API version (20120630). Use DescribeUploadBuffer instead.

    Working storage is also referred to as upload buffer. You can also use the DescribeUploadBuffer operation to add upload buffer to a stored-volume gateway.

    The response includes disk IDs that are configured as working storage, and it includes the amount of working storage allocated and used.

    ", + "DisableGateway": "

    Disables a gateway when the gateway is no longer functioning. For example, if your gateway VM is damaged, you can disable the gateway so you can recover virtual tapes.

    Use this operation for a gateway-VTL that is not reachable or not functioning.

    Once a gateway is disabled it cannot be enabled.

    ", + "ListGateways": "

    Lists gateways owned by an AWS account in a region specified in the request. The returned list is ordered by gateway Amazon Resource Name (ARN).

    By default, the operation returns a maximum of 100 gateways. This operation supports pagination that allows you to optionally reduce the number of gateways returned in a response.

    If you have more gateways than are returned in a response (that is, the response returns only a truncated list of your gateways), the response contains a marker that you can specify in your next request to fetch the next page of gateways.

    ", + "ListLocalDisks": "

    Returns a list of the gateway's local disks. To specify which gateway to describe, you use the Amazon Resource Name (ARN) of the gateway in the body of the request.

    The request returns a list of all disks, specifying which are configured as working storage, cache storage, or stored volume or not configured at all. The response includes a DiskStatus field. This field can have a value of present (the disk is available to use), missing (the disk is no longer connected to the gateway), or mismatch (the disk node is occupied by a disk that has incorrect metadata or the disk content is corrupted).

    ", + "ListTagsForResource": "

    Lists the tags that have been added to the specified resource.

    ", + "ListTapes": "

    Lists virtual tapes in your virtual tape library (VTL) and your virtual tape shelf (VTS). You specify the tapes to list by specifying one or more tape Amazon Resource Names (ARNs). If you don't specify a tape ARN, the operation lists all virtual tapes in both your VTL and VTS.

    This operation supports pagination. By default, the operation returns a maximum of up to 100 tapes. You can optionally specify the Limit parameter in the body to limit the number of tapes in the response. If the number of tapes returned in the response is truncated, the response includes a Marker element that you can use in your subsequent request to retrieve the next set of tapes.

    ", + "ListVolumeInitiators": "

    Lists iSCSI initiators that are connected to a volume. You can use this operation to determine whether a volume is being used or not.

    ", + "ListVolumeRecoveryPoints": "

    Lists the recovery points for a specified gateway. This operation is supported only for the gateway-cached volume architecture.

    Each gateway-cached volume has one recovery point. A volume recovery point is a point in time at which all data of the volume is consistent and from which you can create a snapshot. To create a snapshot from a volume recovery point use the CreateSnapshotFromVolumeRecoveryPoint operation.

    ", + "ListVolumes": "

    Lists the iSCSI stored volumes of a gateway. Results are sorted by volume ARN. The response includes only the volume ARNs. If you want additional volume information, use the DescribeStorediSCSIVolumes API.

    The operation supports pagination. By default, the operation returns a maximum of up to 100 volumes. You can optionally specify the Limit field in the body to limit the number of volumes in the response. If the number of volumes returned in the response is truncated, the response includes a Marker field. You can use this Marker value in your subsequent request to retrieve the next set of volumes.

    ", + "RemoveTagsFromResource": "

    Removes one or more tags from the specified resource.

    ", + "ResetCache": "

    Resets all cache disks that have encountered a error and makes the disks available for reconfiguration as cache storage. If your cache disk encounters a error, the gateway prevents read and write operations on virtual tapes in the gateway. For example, an error can occur when a disk is corrupted or removed from the gateway. When a cache is reset, the gateway loses its cache storage. At this point you can reconfigure the disks as cache disks.

    If the cache disk you are resetting contains data that has not been uploaded to Amazon S3 yet, that data can be lost. After you reset cache disks, there will be no configured cache disks left in the gateway, so you must configure at least one new cache disk for your gateway to function properly.

    ", + "RetrieveTapeArchive": "

    Retrieves an archived virtual tape from the virtual tape shelf (VTS) to a gateway-VTL. Virtual tapes archived in the VTS are not associated with any gateway. However after a tape is retrieved, it is associated with a gateway, even though it is also listed in the VTS.

    Once a tape is successfully retrieved to a gateway, it cannot be retrieved again to another gateway. You must archive the tape again before you can retrieve it to another gateway.

    ", + "RetrieveTapeRecoveryPoint": "

    Retrieves the recovery point for the specified virtual tape.

    A recovery point is a point in time view of a virtual tape at which all the data on the tape is consistent. If your gateway crashes, virtual tapes that have recovery points can be recovered to a new gateway.

    The virtual tape can be retrieved to only one gateway. The retrieved tape is read-only. The virtual tape can be retrieved to only a gateway-VTL. There is no charge for retrieving recovery points.

    ", + "SetLocalConsolePassword": "

    Sets the password for your VM local console. When you log in to the local console for the first time, you log in to the VM with the default credentials. We recommend that you set a new password. You don't need to know the default password to set a new password.

    ", + "ShutdownGateway": "

    Shuts down a gateway. To specify which gateway to shut down, use the Amazon Resource Name (ARN) of the gateway in the body of your request.

    The operation shuts down the gateway service component running in the storage gateway's virtual machine (VM) and not the VM.

    If you want to shut down the VM, it is recommended that you first shut down the gateway component in the VM to avoid unpredictable conditions.

    After the gateway is shutdown, you cannot call any other API except StartGateway, DescribeGatewayInformation, and ListGateways. For more information, see ActivateGateway. Your applications cannot read from or write to the gateway's storage volumes, and there are no snapshots taken.

    When you make a shutdown request, you will get a 200 OK success response immediately. However, it might take some time for the gateway to shut down. You can call the DescribeGatewayInformation API to check the status. For more information, see ActivateGateway.

    If do not intend to use the gateway again, you must delete the gateway (using DeleteGateway) to no longer pay software charges associated with the gateway.

    ", + "StartGateway": "

    Starts a gateway that you previously shut down (see ShutdownGateway). After the gateway starts, you can then make other API calls, your applications can read from or write to the gateway's storage volumes and you will be able to take snapshot backups.

    When you make a request, you will get a 200 OK success response immediately. However, it might take some time for the gateway to be ready. You should call DescribeGatewayInformation and check the status before making any additional API calls. For more information, see ActivateGateway.

    To specify which gateway to start, use the Amazon Resource Name (ARN) of the gateway in your request.

    ", + "UpdateBandwidthRateLimit": "

    Updates the bandwidth rate limits of a gateway. You can update both the upload and download bandwidth rate limit or specify only one of the two. If you don't set a bandwidth rate limit, the existing rate limit remains.

    By default, a gateway's bandwidth rate limits are not set. If you don't set any limit, the gateway does not have any limitations on its bandwidth usage and could potentially use the maximum available bandwidth.

    To specify which gateway to update, use the Amazon Resource Name (ARN) of the gateway in your request.

    ", + "UpdateChapCredentials": "

    Updates the Challenge-Handshake Authentication Protocol (CHAP) credentials for a specified iSCSI target. By default, a gateway does not have CHAP enabled; however, for added security, you might use it.

    When you update CHAP credentials, all existing connections on the target are closed and initiators must reconnect with the new credentials.

    ", + "UpdateGatewayInformation": "

    Updates a gateway's metadata, which includes the gateway's name and time zone. To specify which gateway to update, use the Amazon Resource Name (ARN) of the gateway in your request.

    For Gateways activated after September 2, 2015, the gateway's ARN contains the gateway ID rather than the gateway name. However, changing the name of the gateway has no effect on the gateway's ARN.

    ", + "UpdateGatewaySoftwareNow": "

    Updates the gateway virtual machine (VM) software. The request immediately triggers the software update.

    When you make this request, you get a 200 OK success response immediately. However, it might take some time for the update to complete. You can call DescribeGatewayInformation to verify the gateway is in the STATE_RUNNING state.

    A software update forces a system restart of your gateway. You can minimize the chance of any disruption to your applications by increasing your iSCSI Initiators' timeouts. For more information about increasing iSCSI Initiator timeouts for Windows and Linux, see Customizing Your Windows iSCSI Settings and Customizing Your Linux iSCSI Settings, respectively.

    ", + "UpdateMaintenanceStartTime": "

    Updates a gateway's weekly maintenance start time information, including day and time of the week. The maintenance time is the time in your gateway's time zone.

    ", + "UpdateSnapshotSchedule": "

    Updates a snapshot schedule configured for a gateway volume.

    The default snapshot schedule for volume is once every 24 hours, starting at the creation time of the volume. You can use this API to change the snapshot schedule configured for the volume.

    In the request you must identify the gateway volume whose snapshot schedule you want to update, and the schedule information, including when you want the snapshot to begin on a day and the frequency (in hours) of snapshots.

    ", + "UpdateVTLDeviceType": "

    Updates the type of medium changer in a gateway-VTL. When you activate a gateway-VTL, you select a medium changer type for the gateway-VTL. This operation enables you to select a different type of medium changer after a gateway-VTL is activated.

    " + }, + "shapes": { + "ActivateGatewayInput": { + "base": "

    A JSON object containing one or more of the following fields:

    ", + "refs": { + } + }, + "ActivateGatewayOutput": { + "base": "

    AWS Storage Gateway returns the Amazon Resource Name (ARN) of the activated gateway. It is a string made of information such as your account, gateway name, and region. This ARN is used to reference the gateway in other API operations as well as resource-based authorization.

    For gateways activated prior to September 02, 2015 the gateway ARN contains the gateway name rather than the gateway id. Changing the name of the gateway has no effect on the gateway ARN.

    ", + "refs": { + } + }, + "ActivationKey": { + "base": null, + "refs": { + "ActivateGatewayInput$ActivationKey": "

    Your gateway activation key. You can obtain the activation key by sending an HTTP GET request with redirects enabled to the gateway IP address (port 80). The redirect URL returned in the response provides you the activation key for your gateway in the query string parameter activationKey. It may also include other activation-related parameters, however, these are merely defaults -- the arguments you pass to the ActivateGateway API call determine the actual configuration of your gateway.

    " + } + }, + "AddCacheInput": { + "base": null, + "refs": { + } + }, + "AddCacheOutput": { + "base": null, + "refs": { + } + }, + "AddTagsToResourceInput": { + "base": "

    AddTagsToResourceInput

    ", + "refs": { + } + }, + "AddTagsToResourceOutput": { + "base": "

    AddTagsToResourceOutput

    ", + "refs": { + } + }, + "AddUploadBufferInput": { + "base": null, + "refs": { + } + }, + "AddUploadBufferOutput": { + "base": null, + "refs": { + } + }, + "AddWorkingStorageInput": { + "base": "

    A JSON object containing one or more of the following fields:

    ", + "refs": { + } + }, + "AddWorkingStorageOutput": { + "base": "

    A JSON object containing the of the gateway for which working storage was configured.

    ", + "refs": { + } + }, + "BandwidthDownloadRateLimit": { + "base": null, + "refs": { + "DescribeBandwidthRateLimitOutput$AverageDownloadRateLimitInBitsPerSec": "

    The average download bandwidth rate limit in bits per second. This field does not appear in the response if the download rate limit is not set.

    ", + "UpdateBandwidthRateLimitInput$AverageDownloadRateLimitInBitsPerSec": "

    The average download bandwidth rate limit in bits per second.

    " + } + }, + "BandwidthType": { + "base": null, + "refs": { + "DeleteBandwidthRateLimitInput$BandwidthType": null + } + }, + "BandwidthUploadRateLimit": { + "base": null, + "refs": { + "DescribeBandwidthRateLimitOutput$AverageUploadRateLimitInBitsPerSec": "

    The average upload bandwidth rate limit in bits per second. This field does not appear in the response if the upload rate limit is not set.

    ", + "UpdateBandwidthRateLimitInput$AverageUploadRateLimitInBitsPerSec": "

    The average upload bandwidth rate limit in bits per second.

    " + } + }, + "CachediSCSIVolume": { + "base": null, + "refs": { + "CachediSCSIVolumes$member": null + } + }, + "CachediSCSIVolumes": { + "base": null, + "refs": { + "DescribeCachediSCSIVolumesOutput$CachediSCSIVolumes": "

    An array of objects where each object contains metadata about one cached volume.

    " + } + }, + "CancelArchivalInput": { + "base": "

    CancelArchivalInput

    ", + "refs": { + } + }, + "CancelArchivalOutput": { + "base": "

    CancelArchivalOutput

    ", + "refs": { + } + }, + "CancelRetrievalInput": { + "base": "

    CancelRetrievalInput

    ", + "refs": { + } + }, + "CancelRetrievalOutput": { + "base": "

    CancelRetrievalOutput

    ", + "refs": { + } + }, + "ChapCredentials": { + "base": null, + "refs": { + "DescribeChapCredentialsOutput$ChapCredentials": "

    An array of ChapInfo objects that represent CHAP credentials. Each object in the array contains CHAP credential information for one target-initiator pair. If no CHAP credentials are set, an empty array is returned. CHAP credential information is provided in a JSON object with the following fields:

    • InitiatorName: The iSCSI initiator that connects to the target.

    • SecretToAuthenticateInitiator: The secret key that the initiator (for example, the Windows client) must provide to participate in mutual CHAP with the target.

    • SecretToAuthenticateTarget: The secret key that the target must provide to participate in mutual CHAP with the initiator (e.g. Windows client).

    • TargetARN: The Amazon Resource Name (ARN) of the storage volume.

    " + } + }, + "ChapInfo": { + "base": "

    Describes Challenge-Handshake Authentication Protocol (CHAP) information that supports authentication between your gateway and iSCSI initiators.

    ", + "refs": { + "ChapCredentials$member": null + } + }, + "ChapSecret": { + "base": null, + "refs": { + "ChapInfo$SecretToAuthenticateInitiator": "

    The secret key that the initiator (for example, the Windows client) must provide to participate in mutual CHAP with the target.

    ", + "ChapInfo$SecretToAuthenticateTarget": "

    The secret key that the target must provide to participate in mutual CHAP with the initiator (e.g. Windows client).

    ", + "UpdateChapCredentialsInput$SecretToAuthenticateInitiator": "

    The secret key that the initiator (for example, the Windows client) must provide to participate in mutual CHAP with the target.

    The secret key must be between 12 and 16 bytes when encoded in UTF-8.

    ", + "UpdateChapCredentialsInput$SecretToAuthenticateTarget": "

    The secret key that the target must provide to participate in mutual CHAP with the initiator (e.g. Windows client).

    Byte constraints: Minimum bytes of 12. Maximum bytes of 16.

    The secret key must be between 12 and 16 bytes when encoded in UTF-8.

    " + } + }, + "ClientToken": { + "base": null, + "refs": { + "CreateCachediSCSIVolumeInput$ClientToken": null, + "CreateTapesInput$ClientToken": "

    A unique identifier that you use to retry a request. If you retry a request, use the same ClientToken you specified in the initial request.

    Using the same ClientToken prevents creating the tape multiple times.

    " + } + }, + "CreateCachediSCSIVolumeInput": { + "base": null, + "refs": { + } + }, + "CreateCachediSCSIVolumeOutput": { + "base": null, + "refs": { + } + }, + "CreateSnapshotFromVolumeRecoveryPointInput": { + "base": null, + "refs": { + } + }, + "CreateSnapshotFromVolumeRecoveryPointOutput": { + "base": null, + "refs": { + } + }, + "CreateSnapshotInput": { + "base": "

    A JSON object containing one or more of the following fields:

    ", + "refs": { + } + }, + "CreateSnapshotOutput": { + "base": "

    A JSON object containing the following fields:

    ", + "refs": { + } + }, + "CreateStorediSCSIVolumeInput": { + "base": "

    A JSON object containing one or more of the following fields:

    ", + "refs": { + } + }, + "CreateStorediSCSIVolumeOutput": { + "base": "

    A JSON object containing the following fields:

    ", + "refs": { + } + }, + "CreateTapeWithBarcodeInput": { + "base": "

    CreateTapeWithBarcodeInput

    ", + "refs": { + } + }, + "CreateTapeWithBarcodeOutput": { + "base": "

    CreateTapeOutput

    ", + "refs": { + } + }, + "CreateTapesInput": { + "base": "

    CreateTapesInput

    ", + "refs": { + } + }, + "CreateTapesOutput": { + "base": "

    CreateTapeOutput

    ", + "refs": { + } + }, + "DayOfWeek": { + "base": null, + "refs": { + "DescribeMaintenanceStartTimeOutput$DayOfWeek": null, + "UpdateMaintenanceStartTimeInput$DayOfWeek": "

    The maintenance start time day of the week.

    " + } + }, + "DeleteBandwidthRateLimitInput": { + "base": null, + "refs": { + } + }, + "DeleteBandwidthRateLimitOutput": { + "base": "

    A JSON object containing the of the gateway whose bandwidth rate information was deleted.

    ", + "refs": { + } + }, + "DeleteChapCredentialsInput": { + "base": "

    A JSON object containing one or more of the following fields:

    ", + "refs": { + } + }, + "DeleteChapCredentialsOutput": { + "base": "

    A JSON object containing the following fields:

    ", + "refs": { + } + }, + "DeleteGatewayInput": { + "base": "

    A JSON object containing the id of the gateway to delete.

    ", + "refs": { + } + }, + "DeleteGatewayOutput": { + "base": "

    A JSON object containing the id of the deleted gateway.

    ", + "refs": { + } + }, + "DeleteSnapshotScheduleInput": { + "base": null, + "refs": { + } + }, + "DeleteSnapshotScheduleOutput": { + "base": null, + "refs": { + } + }, + "DeleteTapeArchiveInput": { + "base": "

    DeleteTapeArchiveInput

    ", + "refs": { + } + }, + "DeleteTapeArchiveOutput": { + "base": "

    DeleteTapeArchiveOutput

    ", + "refs": { + } + }, + "DeleteTapeInput": { + "base": "

    DeleteTapeInput

    ", + "refs": { + } + }, + "DeleteTapeOutput": { + "base": "

    DeleteTapeOutput

    ", + "refs": { + } + }, + "DeleteVolumeInput": { + "base": "

    A JSON object containing the DeleteVolumeInput$VolumeARN to delete.

    ", + "refs": { + } + }, + "DeleteVolumeOutput": { + "base": "

    A JSON object containing the of the storage volume that was deleted

    ", + "refs": { + } + }, + "DescribeBandwidthRateLimitInput": { + "base": "

    A JSON object containing the of the gateway.

    ", + "refs": { + } + }, + "DescribeBandwidthRateLimitOutput": { + "base": "

    A JSON object containing the following fields:

    ", + "refs": { + } + }, + "DescribeCacheInput": { + "base": null, + "refs": { + } + }, + "DescribeCacheOutput": { + "base": null, + "refs": { + } + }, + "DescribeCachediSCSIVolumesInput": { + "base": null, + "refs": { + } + }, + "DescribeCachediSCSIVolumesOutput": { + "base": "

    A JSON object containing the following fields:

    ", + "refs": { + } + }, + "DescribeChapCredentialsInput": { + "base": "

    A JSON object containing the Amazon Resource Name (ARN) of the iSCSI volume target.

    ", + "refs": { + } + }, + "DescribeChapCredentialsOutput": { + "base": "

    A JSON object containing a .

    ", + "refs": { + } + }, + "DescribeGatewayInformationInput": { + "base": "

    A JSON object containing the id of the gateway.

    ", + "refs": { + } + }, + "DescribeGatewayInformationOutput": { + "base": "

    A JSON object containing the following fields:

    ", + "refs": { + } + }, + "DescribeMaintenanceStartTimeInput": { + "base": "

    A JSON object containing the of the gateway.

    ", + "refs": { + } + }, + "DescribeMaintenanceStartTimeOutput": { + "base": null, + "refs": { + } + }, + "DescribeSnapshotScheduleInput": { + "base": "

    A JSON object containing the DescribeSnapshotScheduleInput$VolumeARN of the volume.

    ", + "refs": { + } + }, + "DescribeSnapshotScheduleOutput": { + "base": null, + "refs": { + } + }, + "DescribeStorediSCSIVolumesInput": { + "base": "

    A JSON object containing a list of DescribeStorediSCSIVolumesInput$VolumeARNs.

    ", + "refs": { + } + }, + "DescribeStorediSCSIVolumesOutput": { + "base": null, + "refs": { + } + }, + "DescribeTapeArchivesInput": { + "base": "

    DescribeTapeArchivesInput

    ", + "refs": { + } + }, + "DescribeTapeArchivesOutput": { + "base": "

    DescribeTapeArchivesOutput

    ", + "refs": { + } + }, + "DescribeTapeRecoveryPointsInput": { + "base": "

    DescribeTapeRecoveryPointsInput

    ", + "refs": { + } + }, + "DescribeTapeRecoveryPointsOutput": { + "base": "

    DescribeTapeRecoveryPointsOutput

    ", + "refs": { + } + }, + "DescribeTapesInput": { + "base": "

    DescribeTapesInput

    ", + "refs": { + } + }, + "DescribeTapesOutput": { + "base": "

    DescribeTapesOutput

    ", + "refs": { + } + }, + "DescribeUploadBufferInput": { + "base": null, + "refs": { + } + }, + "DescribeUploadBufferOutput": { + "base": null, + "refs": { + } + }, + "DescribeVTLDevicesInput": { + "base": "

    DescribeVTLDevicesInput

    ", + "refs": { + } + }, + "DescribeVTLDevicesOutput": { + "base": "

    DescribeVTLDevicesOutput

    ", + "refs": { + } + }, + "DescribeWorkingStorageInput": { + "base": "

    A JSON object containing the of the gateway.

    ", + "refs": { + } + }, + "DescribeWorkingStorageOutput": { + "base": "

    A JSON object containing the following fields:

    ", + "refs": { + } + }, + "Description": { + "base": null, + "refs": { + "DescribeSnapshotScheduleOutput$Description": null, + "UpdateSnapshotScheduleInput$Description": "

    Optional description of the snapshot that overwrites the existing description.

    " + } + }, + "DeviceType": { + "base": null, + "refs": { + "UpdateVTLDeviceTypeInput$DeviceType": "

    The type of medium changer you want to select.

    Valid Values: \"STK-L700\", \"AWS-Gateway-VTL\"

    " + } + }, + "DeviceiSCSIAttributes": { + "base": "

    Lists iSCSI information about a VTL device.

    ", + "refs": { + "VTLDevice$DeviceiSCSIAttributes": "

    A list of iSCSI information about a VTL device.

    " + } + }, + "DisableGatewayInput": { + "base": "

    DisableGatewayInput

    ", + "refs": { + } + }, + "DisableGatewayOutput": { + "base": "

    DisableGatewayOutput

    ", + "refs": { + } + }, + "Disk": { + "base": null, + "refs": { + "Disks$member": null + } + }, + "DiskAllocationType": { + "base": null, + "refs": { + "Disk$DiskAllocationType": null + } + }, + "DiskId": { + "base": null, + "refs": { + "CreateStorediSCSIVolumeInput$DiskId": "

    The unique identifier for the gateway local disk that is configured as a stored volume. Use ListLocalDisks to list disk IDs for a gateway.

    ", + "Disk$DiskId": null, + "DiskIds$member": null, + "StorediSCSIVolume$VolumeDiskId": null + } + }, + "DiskIds": { + "base": null, + "refs": { + "AddCacheInput$DiskIds": null, + "AddUploadBufferInput$DiskIds": null, + "AddWorkingStorageInput$DiskIds": "

    An array of strings that identify disks that are to be configured as working storage. Each string have a minimum length of 1 and maximum length of 300. You can get the disk IDs from the ListLocalDisks API.

    ", + "DescribeCacheOutput$DiskIds": null, + "DescribeUploadBufferOutput$DiskIds": null, + "DescribeWorkingStorageOutput$DiskIds": "

    An array of the gateway's local disk IDs that are configured as working storage. Each local disk ID is specified as a string (minimum length of 1 and maximum length of 300). If no local disks are configured as working storage, then the DiskIds array is empty.

    " + } + }, + "Disks": { + "base": null, + "refs": { + "ListLocalDisksOutput$Disks": null + } + }, + "DoubleObject": { + "base": null, + "refs": { + "CachediSCSIVolume$VolumeProgress": null, + "StorediSCSIVolume$VolumeProgress": null, + "Tape$Progress": "

    For archiving virtual tapes, indicates how much data remains to be uploaded before archiving is complete.

    Range: 0 (not started) to 100 (complete).

    " + } + }, + "ErrorCode": { + "base": null, + "refs": { + "StorageGatewayError$errorCode": "

    Additional information about the error.

    " + } + }, + "GatewayARN": { + "base": "

    The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation to return a list of gateways for your account and region.

    ", + "refs": { + "ActivateGatewayOutput$GatewayARN": null, + "AddCacheInput$GatewayARN": null, + "AddCacheOutput$GatewayARN": null, + "AddUploadBufferInput$GatewayARN": null, + "AddUploadBufferOutput$GatewayARN": null, + "AddWorkingStorageInput$GatewayARN": null, + "AddWorkingStorageOutput$GatewayARN": null, + "CancelArchivalInput$GatewayARN": null, + "CancelRetrievalInput$GatewayARN": null, + "CreateCachediSCSIVolumeInput$GatewayARN": null, + "CreateStorediSCSIVolumeInput$GatewayARN": null, + "CreateTapeWithBarcodeInput$GatewayARN": "

    The unique Amazon Resource Name (ARN) that represents the gateway to associate the virtual tape with. Use the ListGateways operation to return a list of gateways for your account and region.

    ", + "CreateTapesInput$GatewayARN": "

    The unique Amazon Resource Name (ARN) that represents the gateway to associate the virtual tapes with. Use the ListGateways operation to return a list of gateways for your account and region.

    ", + "DeleteBandwidthRateLimitInput$GatewayARN": null, + "DeleteBandwidthRateLimitOutput$GatewayARN": null, + "DeleteGatewayInput$GatewayARN": null, + "DeleteGatewayOutput$GatewayARN": null, + "DeleteTapeInput$GatewayARN": "

    The unique Amazon Resource Name (ARN) of the gateway that the virtual tape to delete is associated with. Use the ListGateways operation to return a list of gateways for your account and region.

    ", + "DescribeBandwidthRateLimitInput$GatewayARN": null, + "DescribeBandwidthRateLimitOutput$GatewayARN": null, + "DescribeCacheInput$GatewayARN": null, + "DescribeCacheOutput$GatewayARN": null, + "DescribeGatewayInformationInput$GatewayARN": null, + "DescribeGatewayInformationOutput$GatewayARN": null, + "DescribeMaintenanceStartTimeInput$GatewayARN": null, + "DescribeMaintenanceStartTimeOutput$GatewayARN": null, + "DescribeTapeRecoveryPointsInput$GatewayARN": null, + "DescribeTapeRecoveryPointsOutput$GatewayARN": null, + "DescribeTapesInput$GatewayARN": null, + "DescribeUploadBufferInput$GatewayARN": null, + "DescribeUploadBufferOutput$GatewayARN": null, + "DescribeVTLDevicesInput$GatewayARN": null, + "DescribeVTLDevicesOutput$GatewayARN": null, + "DescribeWorkingStorageInput$GatewayARN": null, + "DescribeWorkingStorageOutput$GatewayARN": null, + "DisableGatewayInput$GatewayARN": null, + "DisableGatewayOutput$GatewayARN": "

    The unique Amazon Resource Name of the disabled gateway.

    ", + "GatewayInfo$GatewayARN": "

    The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation to return a list of gateways for your account and region.

    ", + "ListLocalDisksInput$GatewayARN": null, + "ListLocalDisksOutput$GatewayARN": null, + "ListVolumeRecoveryPointsInput$GatewayARN": null, + "ListVolumeRecoveryPointsOutput$GatewayARN": null, + "ListVolumesInput$GatewayARN": null, + "ListVolumesOutput$GatewayARN": null, + "ResetCacheInput$GatewayARN": null, + "ResetCacheOutput$GatewayARN": null, + "RetrieveTapeArchiveInput$GatewayARN": "

    The Amazon Resource Name (ARN) of the gateway you want to retrieve the virtual tape to. Use the ListGateways operation to return a list of gateways for your account and region.

    You retrieve archived virtual tapes to only one gateway and the gateway must be a gateway-VTL.

    ", + "RetrieveTapeRecoveryPointInput$GatewayARN": null, + "SetLocalConsolePasswordInput$GatewayARN": null, + "SetLocalConsolePasswordOutput$GatewayARN": null, + "ShutdownGatewayInput$GatewayARN": null, + "ShutdownGatewayOutput$GatewayARN": null, + "StartGatewayInput$GatewayARN": null, + "StartGatewayOutput$GatewayARN": null, + "TapeArchive$RetrievedTo": "

    The Amazon Resource Name (ARN) of the gateway-VTL that the virtual tape is being retrieved to.

    The virtual tape is retrieved from the virtual tape shelf (VTS).

    ", + "TapeInfo$GatewayARN": "

    The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation to return a list of gateways for your account and region.

    ", + "UpdateBandwidthRateLimitInput$GatewayARN": null, + "UpdateBandwidthRateLimitOutput$GatewayARN": null, + "UpdateGatewayInformationInput$GatewayARN": null, + "UpdateGatewayInformationOutput$GatewayARN": null, + "UpdateGatewaySoftwareNowInput$GatewayARN": null, + "UpdateGatewaySoftwareNowOutput$GatewayARN": null, + "UpdateMaintenanceStartTimeInput$GatewayARN": null, + "UpdateMaintenanceStartTimeOutput$GatewayARN": null, + "VolumeInfo$GatewayARN": null + } + }, + "GatewayId": { + "base": null, + "refs": { + "DescribeGatewayInformationOutput$GatewayId": "

    The unique identifier assigned to your gateway during activation. This ID becomes part of the gateway Amazon Resource Name (ARN), which you use as input for other operations.

    ", + "GatewayInfo$GatewayId": "

    The unique identifier assigned to your gateway during activation. This ID becomes part of the gateway Amazon Resource Name (ARN), which you use as input for other operations.

    ", + "VolumeInfo$GatewayId": "

    The unique identifier assigned to your gateway during activation. This ID becomes part of the gateway Amazon Resource Name (ARN), which you use as input for other operations.

    Valid Values: 50 to 500 lowercase letters, numbers, periods (.), and hyphens (-).

    " + } + }, + "GatewayInfo": { + "base": "

    Describes a gateway object.

    ", + "refs": { + "Gateways$member": null + } + }, + "GatewayName": { + "base": "

    The name you configured for your gateway.

    ", + "refs": { + "ActivateGatewayInput$GatewayName": "

    The name you configured for your gateway.

    ", + "UpdateGatewayInformationInput$GatewayName": null + } + }, + "GatewayNetworkInterfaces": { + "base": null, + "refs": { + "DescribeGatewayInformationOutput$GatewayNetworkInterfaces": "

    A NetworkInterface array that contains descriptions of the gateway network interfaces.

    " + } + }, + "GatewayOperationalState": { + "base": null, + "refs": { + "GatewayInfo$GatewayOperationalState": "

    The state of the gateway.

    Valid Values: DISABLED or ACTIVE

    " + } + }, + "GatewayState": { + "base": null, + "refs": { + "DescribeGatewayInformationOutput$GatewayState": "

    A value that indicates the operating state of the gateway.

    " + } + }, + "GatewayTimezone": { + "base": null, + "refs": { + "ActivateGatewayInput$GatewayTimezone": "

    A value that indicates the time zone you want to set for the gateway. The time zone is used, for example, for scheduling snapshots and your gateway's maintenance schedule.

    ", + "DescribeGatewayInformationOutput$GatewayTimezone": "

    A value that indicates the time zone configured for the gateway.

    ", + "DescribeMaintenanceStartTimeOutput$Timezone": null, + "DescribeSnapshotScheduleOutput$Timezone": null, + "UpdateGatewayInformationInput$GatewayTimezone": null + } + }, + "GatewayType": { + "base": null, + "refs": { + "ActivateGatewayInput$GatewayType": "

    A value that defines the type of gateway to activate. The type specified is critical to all later functions of the gateway and cannot be changed after activation. The default value is STORED.

    ", + "DescribeGatewayInformationOutput$GatewayType": "

    The type of the gateway.

    ", + "GatewayInfo$GatewayType": "

    The type of the gateway.

    " + } + }, + "Gateways": { + "base": null, + "refs": { + "ListGatewaysOutput$Gateways": null + } + }, + "HourOfDay": { + "base": null, + "refs": { + "DescribeMaintenanceStartTimeOutput$HourOfDay": null, + "DescribeSnapshotScheduleOutput$StartAt": null, + "UpdateMaintenanceStartTimeInput$HourOfDay": "

    The hour component of the maintenance start time represented as hh, where hh is the hour (00 to 23). The hour of the day is in the time zone of the gateway.

    ", + "UpdateSnapshotScheduleInput$StartAt": "

    The hour of the day at which the snapshot schedule begins represented as hh, where hh is the hour (0 to 23). The hour of the day is in the time zone of the gateway.

    " + } + }, + "Initiator": { + "base": null, + "refs": { + "Initiators$member": null + } + }, + "Initiators": { + "base": null, + "refs": { + "ListVolumeInitiatorsOutput$Initiators": "

    The host names and port numbers of all iSCSI initiators that are connected to the gateway.

    " + } + }, + "InternalServerError": { + "base": "

    An internal server error has occurred during the request. For more information, see the error and message fields.

    ", + "refs": { + } + }, + "InvalidGatewayRequestException": { + "base": "

    An exception occurred because an invalid gateway request was issued to the service. For more information, see the error and message fields.

    ", + "refs": { + } + }, + "IqnName": { + "base": null, + "refs": { + "ChapInfo$InitiatorName": "

    The iSCSI initiator that connects to the target.

    ", + "DeleteChapCredentialsInput$InitiatorName": "

    The iSCSI initiator that connects to the target.

    ", + "DeleteChapCredentialsOutput$InitiatorName": "

    The iSCSI initiator that connects to the target.

    ", + "UpdateChapCredentialsInput$InitiatorName": "

    The iSCSI initiator that connects to the target.

    ", + "UpdateChapCredentialsOutput$InitiatorName": "

    The iSCSI initiator that connects to the target. This is the same initiator name specified in the request.

    " + } + }, + "LastSoftwareUpdate": { + "base": null, + "refs": { + "DescribeGatewayInformationOutput$LastSoftwareUpdate": "

    The date on which the last software update was applied to the gateway. If the gateway has never been updated, this field does not return a value in the response.

    " + } + }, + "ListGatewaysInput": { + "base": "

    A JSON object containing zero or more of the following fields:

    ", + "refs": { + } + }, + "ListGatewaysOutput": { + "base": null, + "refs": { + } + }, + "ListLocalDisksInput": { + "base": "

    A JSON object containing the of the gateway.

    ", + "refs": { + } + }, + "ListLocalDisksOutput": { + "base": null, + "refs": { + } + }, + "ListTagsForResourceInput": { + "base": "

    ListTagsForResourceInput

    ", + "refs": { + } + }, + "ListTagsForResourceOutput": { + "base": "

    ListTagsForResourceOutput

    ", + "refs": { + } + }, + "ListTapesInput": { + "base": "

    A JSON object that contains one or more of the following fields:

    ", + "refs": { + } + }, + "ListTapesOutput": { + "base": "

    A JSON object containing the following fields:

    ", + "refs": { + } + }, + "ListVolumeInitiatorsInput": { + "base": "

    ListVolumeInitiatorsInput

    ", + "refs": { + } + }, + "ListVolumeInitiatorsOutput": { + "base": "

    ListVolumeInitiatorsOutput

    ", + "refs": { + } + }, + "ListVolumeRecoveryPointsInput": { + "base": null, + "refs": { + } + }, + "ListVolumeRecoveryPointsOutput": { + "base": null, + "refs": { + } + }, + "ListVolumesInput": { + "base": "

    A JSON object that contains one or more of the following fields:

    ", + "refs": { + } + }, + "ListVolumesOutput": { + "base": null, + "refs": { + } + }, + "LocalConsolePassword": { + "base": null, + "refs": { + "SetLocalConsolePasswordInput$LocalConsolePassword": "

    The password you want to set for your VM local console.

    " + } + }, + "Marker": { + "base": null, + "refs": { + "DescribeTapeArchivesInput$Marker": "

    An opaque string that indicates the position at which to begin describing virtual tapes.

    ", + "DescribeTapeArchivesOutput$Marker": "

    An opaque string that indicates the position at which the virtual tapes that were fetched for description ended. Use this marker in your next request to fetch the next set of virtual tapes in the virtual tape shelf (VTS). If there are no more virtual tapes to describe, this field does not appear in the response.

    ", + "DescribeTapeRecoveryPointsInput$Marker": "

    An opaque string that indicates the position at which to begin describing the virtual tape recovery points.

    ", + "DescribeTapeRecoveryPointsOutput$Marker": "

    An opaque string that indicates the position at which the virtual tape recovery points that were listed for description ended.

    Use this marker in your next request to list the next set of virtual tape recovery points in the list. If there are no more recovery points to describe, this field does not appear in the response.

    ", + "DescribeTapesInput$Marker": "

    A marker value, obtained in a previous call to DescribeTapes. This marker indicates which page of results to retrieve.

    If not specified, the first page of results is retrieved.

    ", + "DescribeTapesOutput$Marker": "

    An opaque string which can be used as part of a subsequent DescribeTapes call to retrieve the next page of results.

    If a response does not contain a marker, then there are no more results to be retrieved.

    ", + "DescribeVTLDevicesInput$Marker": "

    An opaque string that indicates the position at which to begin describing the VTL devices.

    ", + "DescribeVTLDevicesOutput$Marker": "

    An opaque string that indicates the position at which the VTL devices that were fetched for description ended. Use the marker in your next request to fetch the next set of VTL devices in the list. If there are no more VTL devices to describe, this field does not appear in the response.

    ", + "ListGatewaysInput$Marker": "

    An opaque string that indicates the position at which to begin the returned list of gateways.

    ", + "ListGatewaysOutput$Marker": null, + "ListTagsForResourceInput$Marker": "

    An opaque string that indicates the position at which to begin returning the list of tags.

    ", + "ListTagsForResourceOutput$Marker": "

    An opaque string that indicates the position at which to stop returning the list of tags.

    ", + "ListTapesInput$Marker": "

    A string that indicates the position at which to begin the returned list of tapes.

    ", + "ListTapesOutput$Marker": "

    A string that indicates the position at which to begin returning the next list of tapes. Use the marker in your next request to continue pagination of tapes. If there are no more tapes to list, this element does not appear in the response body.

    ", + "ListVolumesInput$Marker": "

    A string that indicates the position at which to begin the returned list of volumes. Obtain the marker from the response of a previous List iSCSI Volumes request.

    ", + "ListVolumesOutput$Marker": null + } + }, + "MediumChangerType": { + "base": null, + "refs": { + "ActivateGatewayInput$MediumChangerType": "

    The value that indicates the type of medium changer to use for gateway-VTL. This field is optional.

    Valid Values: \"STK-L700\", \"AWS-Gateway-VTL\"

    " + } + }, + "MinuteOfHour": { + "base": null, + "refs": { + "DescribeMaintenanceStartTimeOutput$MinuteOfHour": null, + "UpdateMaintenanceStartTimeInput$MinuteOfHour": "

    The minute component of the maintenance start time represented as mm, where mm is the minute (00 to 59). The minute of the hour is in the time zone of the gateway.

    " + } + }, + "NetworkInterface": { + "base": "

    Describes a gateway's network interface.

    ", + "refs": { + "GatewayNetworkInterfaces$member": null + } + }, + "NetworkInterfaceId": { + "base": null, + "refs": { + "CreateCachediSCSIVolumeInput$NetworkInterfaceId": null, + "CreateStorediSCSIVolumeInput$NetworkInterfaceId": "

    The network interface of the gateway on which to expose the iSCSI target. Only IPv4 addresses are accepted. Use DescribeGatewayInformation to get a list of the network interfaces available on a gateway.

    Valid Values: A valid IP address.

    ", + "DeviceiSCSIAttributes$NetworkInterfaceId": "

    The network interface identifier of the VTL device.

    ", + "VolumeiSCSIAttributes$NetworkInterfaceId": "

    The network interface identifier.

    " + } + }, + "NextUpdateAvailabilityDate": { + "base": null, + "refs": { + "DescribeGatewayInformationOutput$NextUpdateAvailabilityDate": "

    The date on which an update to the gateway is available. This date is in the time zone of the gateway. If the gateway is not available for an update this field is not returned in the response.

    " + } + }, + "NumTapesToCreate": { + "base": null, + "refs": { + "CreateTapesInput$NumTapesToCreate": "

    The number of virtual tapes that you want to create.

    " + } + }, + "PositiveIntObject": { + "base": null, + "refs": { + "DescribeTapeArchivesInput$Limit": "

    Specifies that the number of virtual tapes descried be limited to the specified number.

    ", + "DescribeTapeRecoveryPointsInput$Limit": "

    Specifies that the number of virtual tape recovery points that are described be limited to the specified number.

    ", + "DescribeTapesInput$Limit": "

    Specifies that the number of virtual tapes described be limited to the specified number.

    Amazon Web Services may impose its own limit, if this field is not set.

    ", + "DescribeVTLDevicesInput$Limit": "

    Specifies that the number of VTL devices described be limited to the specified number.

    ", + "ListGatewaysInput$Limit": "

    Specifies that the list of gateways returned be limited to the specified number of items.

    ", + "ListTagsForResourceInput$Limit": "

    Specifies that the list of tags returned be limited to the specified number of items.

    ", + "ListTapesInput$Limit": "

    An optional number limit for the tapes in the list returned by this call.

    ", + "ListVolumesInput$Limit": "

    Specifies that the list of volumes returned be limited to the specified number of items.

    ", + "VolumeiSCSIAttributes$LunNumber": "

    The logical disk number.

    " + } + }, + "RecurrenceInHours": { + "base": null, + "refs": { + "DescribeSnapshotScheduleOutput$RecurrenceInHours": null, + "UpdateSnapshotScheduleInput$RecurrenceInHours": "

    Frequency of snapshots. Specify the number of hours between snapshots.

    " + } + }, + "RegionId": { + "base": null, + "refs": { + "ActivateGatewayInput$GatewayRegion": "

    A value that indicates the region where you want to store the snapshot backups. The gateway region specified must be the same region as the region in your Host header in the request. For more information about available regions and endpoints for AWS Storage Gateway, see Regions and Endpoints in the Amazon Web Services Glossary.

    Valid Values: \"us-east-1\", \"us-west-1\", \"us-west-2\", \"eu-west-1\", \"eu-central-1\", \"ap-northeast-1\", \"ap-northeast-2\", \"ap-southeast-1\", \"ap-southeast-2\", \"sa-east-1\"

    " + } + }, + "RemoveTagsFromResourceInput": { + "base": "

    RemoveTagsFromResourceInput

    ", + "refs": { + } + }, + "RemoveTagsFromResourceOutput": { + "base": "

    RemoveTagsFromResourceOutput

    ", + "refs": { + } + }, + "ResetCacheInput": { + "base": null, + "refs": { + } + }, + "ResetCacheOutput": { + "base": null, + "refs": { + } + }, + "ResourceARN": { + "base": null, + "refs": { + "AddTagsToResourceInput$ResourceARN": "

    The Amazon Resource Name (ARN) of the resource you want to add tags to.

    ", + "AddTagsToResourceOutput$ResourceARN": "

    The Amazon Resource Name (ARN) of the resource you want to add tags to.

    ", + "ListTagsForResourceInput$ResourceARN": "

    The Amazon Resource Name (ARN) of the resource for which you want to list tags.

    ", + "ListTagsForResourceOutput$ResourceARN": "

    he Amazon Resource Name (ARN) of the resource for which you want to list tags.

    ", + "RemoveTagsFromResourceInput$ResourceARN": "

    The Amazon Resource Name (ARN) of the resource you want to remove the tags from.

    ", + "RemoveTagsFromResourceOutput$ResourceARN": "

    The Amazon Resource Name (ARN) of the resource that the tags were removed from.

    " + } + }, + "RetrieveTapeArchiveInput": { + "base": "

    RetrieveTapeArchiveInput

    ", + "refs": { + } + }, + "RetrieveTapeArchiveOutput": { + "base": "

    RetrieveTapeArchiveOutput

    ", + "refs": { + } + }, + "RetrieveTapeRecoveryPointInput": { + "base": "

    RetrieveTapeRecoveryPointInput

    ", + "refs": { + } + }, + "RetrieveTapeRecoveryPointOutput": { + "base": "

    RetrieveTapeRecoveryPointOutput

    ", + "refs": { + } + }, + "SetLocalConsolePasswordInput": { + "base": "

    SetLocalConsolePasswordInput

    ", + "refs": { + } + }, + "SetLocalConsolePasswordOutput": { + "base": null, + "refs": { + } + }, + "ShutdownGatewayInput": { + "base": "

    A JSON object containing the of the gateway to shut down.

    ", + "refs": { + } + }, + "ShutdownGatewayOutput": { + "base": "

    A JSON object containing the of the gateway that was shut down.

    ", + "refs": { + } + }, + "SnapshotDescription": { + "base": null, + "refs": { + "CreateSnapshotFromVolumeRecoveryPointInput$SnapshotDescription": null, + "CreateSnapshotInput$SnapshotDescription": "

    Textual description of the snapshot that appears in the Amazon EC2 console, Elastic Block Store snapshots panel in the Description field, and in the AWS Storage Gateway snapshot Details pane, Description field

    " + } + }, + "SnapshotId": { + "base": null, + "refs": { + "CachediSCSIVolume$SourceSnapshotId": null, + "CreateCachediSCSIVolumeInput$SnapshotId": null, + "CreateSnapshotFromVolumeRecoveryPointOutput$SnapshotId": null, + "CreateSnapshotOutput$SnapshotId": "

    The snapshot ID that is used to refer to the snapshot in future operations such as describing snapshots (Amazon Elastic Compute Cloud API DescribeSnapshots) or creating a volume from a snapshot (CreateStorediSCSIVolume).

    ", + "CreateStorediSCSIVolumeInput$SnapshotId": "

    The snapshot ID (e.g. \"snap-1122aabb\") of the snapshot to restore as the new stored volume. Specify this field if you want to create the iSCSI storage volume from a snapshot otherwise do not include this field. To list snapshots for your account use DescribeSnapshots in the Amazon Elastic Compute Cloud API Reference.

    ", + "StorediSCSIVolume$SourceSnapshotId": null + } + }, + "StartGatewayInput": { + "base": "

    A JSON object containing the of the gateway to start.

    ", + "refs": { + } + }, + "StartGatewayOutput": { + "base": "

    A JSON object containing the of the gateway that was restarted.

    ", + "refs": { + } + }, + "StorageGatewayError": { + "base": "

    Provides additional information about an error that was returned by the service as an or. See the errorCode and errorDetails members for more information about the error.

    ", + "refs": { + "InternalServerError$error": "

    A StorageGatewayError that provides more information about the cause of the error.

    ", + "InvalidGatewayRequestException$error": "

    A StorageGatewayError that provides more detail about the cause of the error.

    " + } + }, + "StorediSCSIVolume": { + "base": null, + "refs": { + "StorediSCSIVolumes$member": null + } + }, + "StorediSCSIVolumes": { + "base": null, + "refs": { + "DescribeStorediSCSIVolumesOutput$StorediSCSIVolumes": null + } + }, + "Tag": { + "base": null, + "refs": { + "Tags$member": null + } + }, + "TagKey": { + "base": null, + "refs": { + "Tag$Key": null, + "TagKeys$member": null + } + }, + "TagKeys": { + "base": null, + "refs": { + "RemoveTagsFromResourceInput$TagKeys": "

    The keys of the tags you want to remove from the specified resource. A tag is composed of a key/value pair.

    " + } + }, + "TagValue": { + "base": null, + "refs": { + "Tag$Value": null + } + }, + "Tags": { + "base": null, + "refs": { + "AddTagsToResourceInput$Tags": "

    The key-value pair that represents the tag you want to add to the resource. The value can be an empty string.

    Valid characters for key and value are letters, spaces, and numbers representable in UTF-8 format, and the following special characters: + - = . _ : / @.

    ", + "ListTagsForResourceOutput$Tags": "

    An array that contains the tags for the specified resource.

    " + } + }, + "Tape": { + "base": "

    Describes a virtual tape object.

    ", + "refs": { + "Tapes$member": null + } + }, + "TapeARN": { + "base": null, + "refs": { + "CancelArchivalInput$TapeARN": "

    The Amazon Resource Name (ARN) of the virtual tape you want to cancel archiving for.

    ", + "CancelArchivalOutput$TapeARN": "

    The Amazon Resource Name (ARN) of the virtual tape for which archiving was canceled.

    ", + "CancelRetrievalInput$TapeARN": "

    The Amazon Resource Name (ARN) of the virtual tape you want to cancel retrieval for.

    ", + "CancelRetrievalOutput$TapeARN": "

    The Amazon Resource Name (ARN) of the virtual tape for which retrieval was canceled.

    ", + "CreateTapeWithBarcodeOutput$TapeARN": "

    A unique Amazon Resource Name (ARN) that represents the virtual tape that was created.

    ", + "DeleteTapeArchiveInput$TapeARN": "

    The Amazon Resource Name (ARN) of the virtual tape to delete from the virtual tape shelf (VTS).

    ", + "DeleteTapeArchiveOutput$TapeARN": "

    The Amazon Resource Name (ARN) of the virtual tape that was deleted from the virtual tape shelf (VTS).

    ", + "DeleteTapeInput$TapeARN": "

    The Amazon Resource Name (ARN) of the virtual tape to delete.

    ", + "DeleteTapeOutput$TapeARN": "

    The Amazon Resource Name (ARN) of the deleted virtual tape.

    ", + "RetrieveTapeArchiveInput$TapeARN": "

    The Amazon Resource Name (ARN) of the virtual tape you want to retrieve from the virtual tape shelf (VTS).

    ", + "RetrieveTapeArchiveOutput$TapeARN": "

    The Amazon Resource Name (ARN) of the retrieved virtual tape.

    ", + "RetrieveTapeRecoveryPointInput$TapeARN": "

    The Amazon Resource Name (ARN) of the virtual tape for which you want to retrieve the recovery point.

    ", + "RetrieveTapeRecoveryPointOutput$TapeARN": "

    The Amazon Resource Name (ARN) of the virtual tape for which the recovery point was retrieved.

    ", + "Tape$TapeARN": "

    The Amazon Resource Name (ARN) of the virtual tape.

    ", + "TapeARNs$member": null, + "TapeArchive$TapeARN": "

    The Amazon Resource Name (ARN) of an archived virtual tape.

    ", + "TapeInfo$TapeARN": "

    The Amazon Resource Name (ARN) of a virtual tape.

    ", + "TapeRecoveryPointInfo$TapeARN": "

    The Amazon Resource Name (ARN) of the virtual tape.

    " + } + }, + "TapeARNs": { + "base": "

    The Amazon Resource Name (ARN) of each of the tapes you want to list. If you don't specify a tape ARN, the response lists all tapes in both your VTL and VTS.

    ", + "refs": { + "CreateTapesOutput$TapeARNs": "

    A list of unique Amazon Resource Names (ARNs) that represents the virtual tapes that were created.

    ", + "DescribeTapeArchivesInput$TapeARNs": "

    Specifies one or more unique Amazon Resource Names (ARNs) that represent the virtual tapes you want to describe.

    ", + "DescribeTapesInput$TapeARNs": "

    Specifies one or more unique Amazon Resource Names (ARNs) that represent the virtual tapes you want to describe. If this parameter is not specified, AWS Storage Gateway returns a description of all virtual tapes associated with the specified gateway.

    ", + "ListTapesInput$TapeARNs": null + } + }, + "TapeArchive": { + "base": "

    Represents a virtual tape that is archived in the virtual tape shelf (VTS).

    ", + "refs": { + "TapeArchives$member": null + } + }, + "TapeArchiveStatus": { + "base": null, + "refs": { + "TapeArchive$TapeStatus": "

    The current state of the archived virtual tape.

    " + } + }, + "TapeArchives": { + "base": null, + "refs": { + "DescribeTapeArchivesOutput$TapeArchives": "

    An array of virtual tape objects in the virtual tape shelf (VTS). The description includes of the Amazon Resource Name(ARN) of the virtual tapes. The information returned includes the Amazon Resource Names (ARNs) of the tapes, size of the tapes, status of the tapes, progress of the description and tape barcode.

    " + } + }, + "TapeBarcode": { + "base": null, + "refs": { + "CreateTapeWithBarcodeInput$TapeBarcode": "

    The barcode that you want to assign to the tape.

    ", + "Tape$TapeBarcode": "

    The barcode that identifies a specific virtual tape.

    ", + "TapeArchive$TapeBarcode": "

    The barcode that identifies the archived virtual tape.

    ", + "TapeInfo$TapeBarcode": "

    The barcode that identifies a specific virtual tape.

    " + } + }, + "TapeBarcodePrefix": { + "base": null, + "refs": { + "CreateTapesInput$TapeBarcodePrefix": "

    A prefix that you append to the barcode of the virtual tape you are creating. This prefix makes the barcode unique.

    The prefix must be 1 to 4 characters in length and must be one of the uppercase letters from A to Z.

    " + } + }, + "TapeDriveType": { + "base": null, + "refs": { + "ActivateGatewayInput$TapeDriveType": "

    The value that indicates the type of tape drive to use for gateway-VTL. This field is optional.

    Valid Values: \"IBM-ULT3580-TD5\"

    " + } + }, + "TapeInfo": { + "base": "

    Describes a virtual tape.

    ", + "refs": { + "TapeInfos$member": null + } + }, + "TapeInfos": { + "base": "

    An array of TapeInfo objects, where each object describes an a single tape. If there not tapes in the tape library or VTS, then the TapeInfos is an empty array.

    ", + "refs": { + "ListTapesOutput$TapeInfos": null + } + }, + "TapeRecoveryPointInfo": { + "base": "

    Describes a recovery point.

    ", + "refs": { + "TapeRecoveryPointInfos$member": null + } + }, + "TapeRecoveryPointInfos": { + "base": null, + "refs": { + "DescribeTapeRecoveryPointsOutput$TapeRecoveryPointInfos": "

    An array of TapeRecoveryPointInfos that are available for the specified gateway.

    " + } + }, + "TapeRecoveryPointStatus": { + "base": null, + "refs": { + "TapeRecoveryPointInfo$TapeStatus": null + } + }, + "TapeSize": { + "base": null, + "refs": { + "CreateTapeWithBarcodeInput$TapeSizeInBytes": "

    The size, in bytes, of the virtual tape that you want to create.

    The size must be aligned by gigabyte (1024*1024*1024 byte).

    ", + "CreateTapesInput$TapeSizeInBytes": "

    The size, in bytes, of the virtual tapes that you want to create.

    The size must be aligned by gigabyte (1024*1024*1024 byte).

    ", + "Tape$TapeSizeInBytes": "

    The size, in bytes, of the virtual tape.

    ", + "TapeArchive$TapeSizeInBytes": "

    The size, in bytes, of the archived virtual tape.

    ", + "TapeInfo$TapeSizeInBytes": "

    The size, in bytes, of a virtual tape.

    ", + "TapeRecoveryPointInfo$TapeSizeInBytes": "

    The size, in bytes, of the virtual tapes to recover.

    " + } + }, + "TapeStatus": { + "base": null, + "refs": { + "Tape$TapeStatus": "

    The current state of the virtual tape.

    ", + "TapeInfo$TapeStatus": "

    The status of the tape.

    " + } + }, + "Tapes": { + "base": null, + "refs": { + "DescribeTapesOutput$Tapes": "

    An array of virtual tape descriptions.

    " + } + }, + "TargetARN": { + "base": null, + "refs": { + "ChapInfo$TargetARN": "

    The Amazon Resource Name (ARN) of the volume.

    Valid Values: 50 to 500 lowercase letters, numbers, periods (.), and hyphens (-).

    ", + "CreateCachediSCSIVolumeOutput$TargetARN": null, + "CreateStorediSCSIVolumeOutput$TargetARN": "

    he Amazon Resource Name (ARN) of the volume target that includes the iSCSI name that initiators can use to connect to the target.

    ", + "DeleteChapCredentialsInput$TargetARN": "

    The Amazon Resource Name (ARN) of the iSCSI volume target. Use the DescribeStorediSCSIVolumes operation to return to retrieve the TargetARN for specified VolumeARN.

    ", + "DeleteChapCredentialsOutput$TargetARN": "

    The Amazon Resource Name (ARN) of the target.

    ", + "DescribeChapCredentialsInput$TargetARN": "

    The Amazon Resource Name (ARN) of the iSCSI volume target. Use the DescribeStorediSCSIVolumes operation to return to retrieve the TargetARN for specified VolumeARN.

    ", + "DeviceiSCSIAttributes$TargetARN": "

    Specifies the unique Amazon Resource Name(ARN) that encodes the iSCSI qualified name(iqn) of a tape drive or media changer target.

    ", + "UpdateChapCredentialsInput$TargetARN": "

    The Amazon Resource Name (ARN) of the iSCSI volume target. Use the DescribeStorediSCSIVolumes operation to return the TargetARN for specified VolumeARN.

    ", + "UpdateChapCredentialsOutput$TargetARN": "

    The Amazon Resource Name (ARN) of the target. This is the same target specified in the request.

    ", + "VolumeiSCSIAttributes$TargetARN": "

    The Amazon Resource Name (ARN) of the volume target.

    " + } + }, + "TargetName": { + "base": null, + "refs": { + "CreateCachediSCSIVolumeInput$TargetName": null, + "CreateStorediSCSIVolumeInput$TargetName": "

    The name of the iSCSI target used by initiators to connect to the target and as a suffix for the target ARN. For example, specifying TargetName as myvolume results in the target ARN of arn:aws:storagegateway:us-east-1:111122223333:gateway/sgw-12A3456B/target/iqn.1997-05.com.amazon:myvolume. The target name must be unique across all volumes of a gateway.

    " + } + }, + "Time": { + "base": null, + "refs": { + "TapeArchive$CompletionTime": "

    The time that the archiving of the virtual tape was completed.

    The string format of the completion time is in the ISO8601 extended YYYY-MM-DD'T'HH:MM:SS'Z' format.

    ", + "TapeRecoveryPointInfo$TapeRecoveryPointTime": "

    The time when the point-in-time view of the virtual tape was replicated for later recovery.

    The string format of the tape recovery point time is in the ISO8601 extended YYYY-MM-DD'T'HH:MM:SS'Z' format.

    " + } + }, + "UpdateBandwidthRateLimitInput": { + "base": "

    A JSON object containing one or more of the following fields:

    ", + "refs": { + } + }, + "UpdateBandwidthRateLimitOutput": { + "base": "

    A JSON object containing the of the gateway whose throttle information was updated.

    ", + "refs": { + } + }, + "UpdateChapCredentialsInput": { + "base": "

    A JSON object containing one or more of the following fields:

    ", + "refs": { + } + }, + "UpdateChapCredentialsOutput": { + "base": "

    A JSON object containing the following fields:

    ", + "refs": { + } + }, + "UpdateGatewayInformationInput": { + "base": null, + "refs": { + } + }, + "UpdateGatewayInformationOutput": { + "base": "

    A JSON object containing the ARN of the gateway that was updated.

    ", + "refs": { + } + }, + "UpdateGatewaySoftwareNowInput": { + "base": "

    A JSON object containing the of the gateway to update.

    ", + "refs": { + } + }, + "UpdateGatewaySoftwareNowOutput": { + "base": "

    A JSON object containing the of the gateway that was updated.

    ", + "refs": { + } + }, + "UpdateMaintenanceStartTimeInput": { + "base": "

    A JSON object containing the following fields:

    ", + "refs": { + } + }, + "UpdateMaintenanceStartTimeOutput": { + "base": "

    A JSON object containing the of the gateway whose maintenance start time is updated.

    ", + "refs": { + } + }, + "UpdateSnapshotScheduleInput": { + "base": "

    A JSON object containing one or more of the following fields:

    ", + "refs": { + } + }, + "UpdateSnapshotScheduleOutput": { + "base": "

    A JSON object containing the of the updated storage volume.

    ", + "refs": { + } + }, + "UpdateVTLDeviceTypeInput": { + "base": null, + "refs": { + } + }, + "UpdateVTLDeviceTypeOutput": { + "base": "

    UpdateVTLDeviceTypeOutput

    ", + "refs": { + } + }, + "VTLDevice": { + "base": "

    Represents a device object associated with a gateway-VTL.

    ", + "refs": { + "VTLDevices$member": null + } + }, + "VTLDeviceARN": { + "base": null, + "refs": { + "Tape$VTLDevice": "

    The virtual tape library (VTL) device that the virtual tape is associated with.

    ", + "UpdateVTLDeviceTypeInput$VTLDeviceARN": "

    The Amazon Resource Name (ARN) of the medium changer you want to select.

    ", + "UpdateVTLDeviceTypeOutput$VTLDeviceARN": "

    The Amazon Resource Name (ARN) of the medium changer you have selected.

    ", + "VTLDevice$VTLDeviceARN": "

    Specifies the unique Amazon Resource Name (ARN) of the device (tape drive or media changer).

    ", + "VTLDeviceARNs$member": null + } + }, + "VTLDeviceARNs": { + "base": null, + "refs": { + "DescribeVTLDevicesInput$VTLDeviceARNs": "

    An array of strings, where each string represents the Amazon Resource Name (ARN) of a VTL device.

    All of the specified VTL devices must be from the same gateway. If no VTL devices are specified, the result will contain all devices on the specified gateway.

    " + } + }, + "VTLDeviceProductIdentifier": { + "base": null, + "refs": { + "VTLDevice$VTLDeviceProductIdentifier": null + } + }, + "VTLDeviceType": { + "base": null, + "refs": { + "VTLDevice$VTLDeviceType": null + } + }, + "VTLDeviceVendor": { + "base": null, + "refs": { + "VTLDevice$VTLDeviceVendor": null + } + }, + "VTLDevices": { + "base": null, + "refs": { + "DescribeVTLDevicesOutput$VTLDevices": "

    An array of VTL device objects composed of the Amazon Resource Name(ARN) of the VTL devices.

    " + } + }, + "VolumeARN": { + "base": null, + "refs": { + "CachediSCSIVolume$VolumeARN": null, + "CreateCachediSCSIVolumeOutput$VolumeARN": null, + "CreateSnapshotFromVolumeRecoveryPointInput$VolumeARN": null, + "CreateSnapshotFromVolumeRecoveryPointOutput$VolumeARN": null, + "CreateSnapshotInput$VolumeARN": "

    The Amazon Resource Name (ARN) of the volume. Use the ListVolumes operation to return a list of gateway volumes.

    ", + "CreateSnapshotOutput$VolumeARN": "

    The Amazon Resource Name (ARN) of the volume of which the snapshot was taken.

    ", + "CreateStorediSCSIVolumeOutput$VolumeARN": "

    The Amazon Resource Name (ARN) of the configured volume.

    ", + "DeleteSnapshotScheduleInput$VolumeARN": null, + "DeleteSnapshotScheduleOutput$VolumeARN": null, + "DeleteVolumeInput$VolumeARN": "

    The Amazon Resource Name (ARN) of the volume. Use the ListVolumes operation to return a list of gateway volumes.

    ", + "DeleteVolumeOutput$VolumeARN": "

    The Amazon Resource Name (ARN) of the storage volume that was deleted. It is the same ARN you provided in the request.

    ", + "DescribeSnapshotScheduleInput$VolumeARN": "

    The Amazon Resource Name (ARN) of the volume. Use the ListVolumes operation to return a list of gateway volumes.

    ", + "DescribeSnapshotScheduleOutput$VolumeARN": null, + "ListVolumeInitiatorsInput$VolumeARN": "

    The Amazon Resource Name (ARN) of the volume. Use the ListVolumes operation to return a list of gateway volumes for the gateway.

    ", + "StorediSCSIVolume$VolumeARN": null, + "UpdateSnapshotScheduleInput$VolumeARN": "

    The Amazon Resource Name (ARN) of the volume. Use the ListVolumes operation to return a list of gateway volumes.

    ", + "UpdateSnapshotScheduleOutput$VolumeARN": "

    ", + "VolumeARNs$member": null, + "VolumeInfo$VolumeARN": "

    The Amazon Resource Name (ARN) for the storage volume. For example, the following is a valid ARN:

    arn:aws:storagegateway:us-east-1:111122223333:gateway/sgw-12A3456B/volume/vol-1122AABB

    Valid Values: 50 to 500 lowercase letters, numbers, periods (.), and hyphens (-).

    ", + "VolumeRecoveryPointInfo$VolumeARN": null + } + }, + "VolumeARNs": { + "base": null, + "refs": { + "DescribeCachediSCSIVolumesInput$VolumeARNs": null, + "DescribeStorediSCSIVolumesInput$VolumeARNs": "

    An array of strings where each string represents the Amazon Resource Name (ARN) of a stored volume. All of the specified stored volumes must from the same gateway. Use ListVolumes to get volume ARNs for a gateway.

    " + } + }, + "VolumeId": { + "base": null, + "refs": { + "CachediSCSIVolume$VolumeId": null, + "StorediSCSIVolume$VolumeId": null, + "VolumeInfo$VolumeId": "

    The unique identifier assigned to the volume. This ID becomes part of the volume Amazon Resource Name (ARN), which you use as input for other operations.

    Valid Values: 50 to 500 lowercase letters, numbers, periods (.), and hyphens (-).

    " + } + }, + "VolumeInfo": { + "base": "

    Describes a storage volume object.

    ", + "refs": { + "VolumeInfos$member": null + } + }, + "VolumeInfos": { + "base": null, + "refs": { + "ListVolumesOutput$VolumeInfos": null + } + }, + "VolumeRecoveryPointInfo": { + "base": null, + "refs": { + "VolumeRecoveryPointInfos$member": null + } + }, + "VolumeRecoveryPointInfos": { + "base": null, + "refs": { + "ListVolumeRecoveryPointsOutput$VolumeRecoveryPointInfos": null + } + }, + "VolumeStatus": { + "base": null, + "refs": { + "CachediSCSIVolume$VolumeStatus": null, + "StorediSCSIVolume$VolumeStatus": null + } + }, + "VolumeType": { + "base": null, + "refs": { + "CachediSCSIVolume$VolumeType": null, + "StorediSCSIVolume$VolumeType": null, + "VolumeInfo$VolumeType": null + } + }, + "VolumeiSCSIAttributes": { + "base": "

    Lists iSCSI information about a volume.

    ", + "refs": { + "CachediSCSIVolume$VolumeiSCSIAttributes": null, + "StorediSCSIVolume$VolumeiSCSIAttributes": null + } + }, + "boolean": { + "base": null, + "refs": { + "CreateStorediSCSIVolumeInput$PreserveExistingData": "

    Specify this field as true if you want to preserve the data on the local disk. Otherwise, specifying this field as false creates an empty volume.

    Valid Values: true, false

    ", + "DeviceiSCSIAttributes$ChapEnabled": "

    Indicates whether mutual CHAP is enabled for the iSCSI target.

    ", + "StorediSCSIVolume$PreservedExistingData": null, + "VolumeiSCSIAttributes$ChapEnabled": "

    Indicates whether mutual CHAP is enabled for the iSCSI target.

    " + } + }, + "double": { + "base": null, + "refs": { + "DescribeCacheOutput$CacheUsedPercentage": null, + "DescribeCacheOutput$CacheDirtyPercentage": null, + "DescribeCacheOutput$CacheHitPercentage": null, + "DescribeCacheOutput$CacheMissPercentage": null + } + }, + "errorDetails": { + "base": null, + "refs": { + "StorageGatewayError$errorDetails": "

    Human-readable text that provides detail about the error that occurred.

    " + } + }, + "integer": { + "base": null, + "refs": { + "DeviceiSCSIAttributes$NetworkInterfacePort": "

    The port used to communicate with iSCSI VTL device targets.

    ", + "VolumeiSCSIAttributes$NetworkInterfacePort": "

    The port used to communicate with iSCSI targets.

    " + } + }, + "long": { + "base": null, + "refs": { + "CachediSCSIVolume$VolumeSizeInBytes": null, + "CreateCachediSCSIVolumeInput$VolumeSizeInBytes": null, + "CreateStorediSCSIVolumeOutput$VolumeSizeInBytes": "

    The size of the volume in bytes.

    ", + "DescribeCacheOutput$CacheAllocatedInBytes": null, + "DescribeUploadBufferOutput$UploadBufferUsedInBytes": null, + "DescribeUploadBufferOutput$UploadBufferAllocatedInBytes": null, + "DescribeWorkingStorageOutput$WorkingStorageUsedInBytes": "

    The total working storage in bytes in use by the gateway. If no working storage is configured for the gateway, this field returns 0.

    ", + "DescribeWorkingStorageOutput$WorkingStorageAllocatedInBytes": "

    The total working storage in bytes allocated for the gateway. If no working storage is configured for the gateway, this field returns 0.

    ", + "Disk$DiskSizeInBytes": null, + "StorediSCSIVolume$VolumeSizeInBytes": null, + "VolumeInfo$VolumeSizeInBytes": "

    The size, in bytes, of the volume.

    Valid Values: 50 to 500 lowercase letters, numbers, periods (.), and hyphens (-).

    ", + "VolumeRecoveryPointInfo$VolumeSizeInBytes": null, + "VolumeRecoveryPointInfo$VolumeUsageInBytes": null + } + }, + "string": { + "base": null, + "refs": { + "CreateSnapshotFromVolumeRecoveryPointOutput$VolumeRecoveryPointTime": null, + "DescribeGatewayInformationOutput$GatewayName": "

    The name you configured for your gateway.

    ", + "Disk$DiskPath": null, + "Disk$DiskNode": null, + "Disk$DiskStatus": null, + "Disk$DiskAllocationResource": null, + "GatewayInfo$GatewayName": "

    The name of the gateway.

    ", + "InternalServerError$message": "

    A human-readable message describing the error that occurred.

    ", + "InvalidGatewayRequestException$message": "

    A human-readable message describing the error that occurred.

    ", + "NetworkInterface$Ipv4Address": "

    The Internet Protocol version 4 (IPv4) address of the interface.

    ", + "NetworkInterface$MacAddress": "

    The Media Access Control (MAC) address of the interface.

    This is currently unsupported and will not be returned in output.

    ", + "NetworkInterface$Ipv6Address": "

    The Internet Protocol version 6 (IPv6) address of the interface. Currently not supported.

    ", + "UpdateGatewayInformationOutput$GatewayName": null, + "VolumeRecoveryPointInfo$VolumeRecoveryPointTime": null, + "errorDetails$key": null, + "errorDetails$value": null + } + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/storagegateway/2013-06-30/examples-1.json b/vendor/github.com/aws/aws-sdk-go/models/apis/storagegateway/2013-06-30/examples-1.json new file mode 100644 index 000000000..0ea7e3b0b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/storagegateway/2013-06-30/examples-1.json @@ -0,0 +1,5 @@ +{ + "version": "1.0", + "examples": { + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/storagegateway/2013-06-30/paginators-1.json b/vendor/github.com/aws/aws-sdk-go/models/apis/storagegateway/2013-06-30/paginators-1.json new file mode 100644 index 000000000..286915c63 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/storagegateway/2013-06-30/paginators-1.json @@ -0,0 +1,52 @@ +{ + "pagination": { + "DescribeCachediSCSIVolumes": { + "result_key": "CachediSCSIVolumes" + }, + "DescribeStorediSCSIVolumes": { + "result_key": "StorediSCSIVolumes" + }, + "DescribeTapeArchives": { + "input_token": "Marker", + "limit_key": "Limit", + "output_token": "Marker", + "result_key": "TapeArchives" + }, + "DescribeTapeRecoveryPoints": { + "input_token": "Marker", + "limit_key": "Limit", + "output_token": "Marker", + "result_key": "TapeRecoveryPointInfos" + }, + "DescribeTapes": { + "input_token": "Marker", + "limit_key": "Limit", + "output_token": "Marker", + "result_key": "Tapes" + }, + "DescribeVTLDevices": { + "input_token": "Marker", + "limit_key": "Limit", + "output_token": "Marker", + "result_key": "VTLDevices" + }, + "ListGateways": { + "input_token": "Marker", + "limit_key": "Limit", + "output_token": "Marker", + "result_key": "Gateways" + }, + "ListLocalDisks": { + "result_key": "Disks" + }, + "ListVolumeRecoveryPoints": { + "result_key": "VolumeRecoveryPointInfos" + }, + "ListVolumes": { + "input_token": "Marker", + "limit_key": "Limit", + "output_token": "Marker", + "result_key": "VolumeInfos" + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/streams.dynamodb/2012-08-10/api-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/streams.dynamodb/2012-08-10/api-2.json new file mode 100644 index 000000000..6c2839083 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/streams.dynamodb/2012-08-10/api-2.json @@ -0,0 +1,397 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2012-08-10", + "endpointPrefix":"streams.dynamodb", + "jsonVersion":"1.0", + "protocol":"json", + "serviceFullName":"Amazon DynamoDB Streams", + "signatureVersion":"v4", + "signingName":"dynamodb", + "targetPrefix":"DynamoDBStreams_20120810" + }, + "operations":{ + "DescribeStream":{ + "name":"DescribeStream", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeStreamInput"}, + "output":{"shape":"DescribeStreamOutput"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerError"} + ] + }, + "GetRecords":{ + "name":"GetRecords", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetRecordsInput"}, + "output":{"shape":"GetRecordsOutput"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"LimitExceededException"}, + {"shape":"InternalServerError"}, + {"shape":"ExpiredIteratorException"}, + {"shape":"TrimmedDataAccessException"} + ] + }, + "GetShardIterator":{ + "name":"GetShardIterator", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetShardIteratorInput"}, + "output":{"shape":"GetShardIteratorOutput"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerError"}, + {"shape":"TrimmedDataAccessException"} + ] + }, + "ListStreams":{ + "name":"ListStreams", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListStreamsInput"}, + "output":{"shape":"ListStreamsOutput"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerError"} + ] + } + }, + "shapes":{ + "AttributeMap":{ + "type":"map", + "key":{"shape":"AttributeName"}, + "value":{"shape":"AttributeValue"} + }, + "AttributeName":{ + "type":"string", + "max":65535 + }, + "AttributeValue":{ + "type":"structure", + "members":{ + "S":{"shape":"StringAttributeValue"}, + "N":{"shape":"NumberAttributeValue"}, + "B":{"shape":"BinaryAttributeValue"}, + "SS":{"shape":"StringSetAttributeValue"}, + "NS":{"shape":"NumberSetAttributeValue"}, + "BS":{"shape":"BinarySetAttributeValue"}, + "M":{"shape":"MapAttributeValue"}, + "L":{"shape":"ListAttributeValue"}, + "NULL":{"shape":"NullAttributeValue"}, + "BOOL":{"shape":"BooleanAttributeValue"} + } + }, + "BinaryAttributeValue":{"type":"blob"}, + "BinarySetAttributeValue":{ + "type":"list", + "member":{"shape":"BinaryAttributeValue"} + }, + "BooleanAttributeValue":{"type":"boolean"}, + "Date":{"type":"timestamp"}, + "DescribeStreamInput":{ + "type":"structure", + "required":["StreamArn"], + "members":{ + "StreamArn":{"shape":"StreamArn"}, + "Limit":{"shape":"PositiveIntegerObject"}, + "ExclusiveStartShardId":{"shape":"ShardId"} + } + }, + "DescribeStreamOutput":{ + "type":"structure", + "members":{ + "StreamDescription":{"shape":"StreamDescription"} + } + }, + "ErrorMessage":{"type":"string"}, + "ExpiredIteratorException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "exception":true + }, + "GetRecordsInput":{ + "type":"structure", + "required":["ShardIterator"], + "members":{ + "ShardIterator":{"shape":"ShardIterator"}, + "Limit":{"shape":"PositiveIntegerObject"} + } + }, + "GetRecordsOutput":{ + "type":"structure", + "members":{ + "Records":{"shape":"RecordList"}, + "NextShardIterator":{"shape":"ShardIterator"} + } + }, + "GetShardIteratorInput":{ + "type":"structure", + "required":[ + "StreamArn", + "ShardId", + "ShardIteratorType" + ], + "members":{ + "StreamArn":{"shape":"StreamArn"}, + "ShardId":{"shape":"ShardId"}, + "ShardIteratorType":{"shape":"ShardIteratorType"}, + "SequenceNumber":{"shape":"SequenceNumber"} + } + }, + "GetShardIteratorOutput":{ + "type":"structure", + "members":{ + "ShardIterator":{"shape":"ShardIterator"} + } + }, + "InternalServerError":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "exception":true, + "fault":true + }, + "KeySchema":{ + "type":"list", + "member":{"shape":"KeySchemaElement"}, + "max":2, + "min":1 + }, + "KeySchemaAttributeName":{ + "type":"string", + "max":255, + "min":1 + }, + "KeySchemaElement":{ + "type":"structure", + "required":[ + "AttributeName", + "KeyType" + ], + "members":{ + "AttributeName":{"shape":"KeySchemaAttributeName"}, + "KeyType":{"shape":"KeyType"} + } + }, + "KeyType":{ + "type":"string", + "enum":[ + "HASH", + "RANGE" + ] + }, + "LimitExceededException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "exception":true + }, + "ListAttributeValue":{ + "type":"list", + "member":{"shape":"AttributeValue"} + }, + "ListStreamsInput":{ + "type":"structure", + "members":{ + "TableName":{"shape":"TableName"}, + "Limit":{"shape":"PositiveIntegerObject"}, + "ExclusiveStartStreamArn":{"shape":"StreamArn"} + } + }, + "ListStreamsOutput":{ + "type":"structure", + "members":{ + "Streams":{"shape":"StreamList"}, + "LastEvaluatedStreamArn":{"shape":"StreamArn"} + } + }, + "MapAttributeValue":{ + "type":"map", + "key":{"shape":"AttributeName"}, + "value":{"shape":"AttributeValue"} + }, + "NullAttributeValue":{"type":"boolean"}, + "NumberAttributeValue":{"type":"string"}, + "NumberSetAttributeValue":{ + "type":"list", + "member":{"shape":"NumberAttributeValue"} + }, + "OperationType":{ + "type":"string", + "enum":[ + "INSERT", + "MODIFY", + "REMOVE" + ] + }, + "PositiveIntegerObject":{ + "type":"integer", + "min":1 + }, + "PositiveLongObject":{ + "type":"long", + "min":1 + }, + "Record":{ + "type":"structure", + "members":{ + "eventID":{"shape":"String"}, + "eventName":{"shape":"OperationType"}, + "eventVersion":{"shape":"String"}, + "eventSource":{"shape":"String"}, + "awsRegion":{"shape":"String"}, + "dynamodb":{"shape":"StreamRecord"} + } + }, + "RecordList":{ + "type":"list", + "member":{"shape":"Record"} + }, + "ResourceNotFoundException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "exception":true + }, + "SequenceNumber":{ + "type":"string", + "max":40, + "min":21 + }, + "SequenceNumberRange":{ + "type":"structure", + "members":{ + "StartingSequenceNumber":{"shape":"SequenceNumber"}, + "EndingSequenceNumber":{"shape":"SequenceNumber"} + } + }, + "Shard":{ + "type":"structure", + "members":{ + "ShardId":{"shape":"ShardId"}, + "SequenceNumberRange":{"shape":"SequenceNumberRange"}, + "ParentShardId":{"shape":"ShardId"} + } + }, + "ShardDescriptionList":{ + "type":"list", + "member":{"shape":"Shard"} + }, + "ShardId":{ + "type":"string", + "max":65, + "min":28 + }, + "ShardIterator":{ + "type":"string", + "max":2048, + "min":1 + }, + "ShardIteratorType":{ + "type":"string", + "enum":[ + "TRIM_HORIZON", + "LATEST", + "AT_SEQUENCE_NUMBER", + "AFTER_SEQUENCE_NUMBER" + ] + }, + "Stream":{ + "type":"structure", + "members":{ + "StreamArn":{"shape":"StreamArn"}, + "TableName":{"shape":"TableName"}, + "StreamLabel":{"shape":"String"} + } + }, + "StreamArn":{ + "type":"string", + "max":1024, + "min":37 + }, + "StreamDescription":{ + "type":"structure", + "members":{ + "StreamArn":{"shape":"StreamArn"}, + "StreamLabel":{"shape":"String"}, + "StreamStatus":{"shape":"StreamStatus"}, + "StreamViewType":{"shape":"StreamViewType"}, + "CreationRequestDateTime":{"shape":"Date"}, + "TableName":{"shape":"TableName"}, + "KeySchema":{"shape":"KeySchema"}, + "Shards":{"shape":"ShardDescriptionList"}, + "LastEvaluatedShardId":{"shape":"ShardId"} + } + }, + "StreamList":{ + "type":"list", + "member":{"shape":"Stream"} + }, + "StreamRecord":{ + "type":"structure", + "members":{ + "ApproximateCreationDateTime":{"shape":"Date"}, + "Keys":{"shape":"AttributeMap"}, + "NewImage":{"shape":"AttributeMap"}, + "OldImage":{"shape":"AttributeMap"}, + "SequenceNumber":{"shape":"SequenceNumber"}, + "SizeBytes":{"shape":"PositiveLongObject"}, + "StreamViewType":{"shape":"StreamViewType"} + } + }, + "StreamStatus":{ + "type":"string", + "enum":[ + "ENABLING", + "ENABLED", + "DISABLING", + "DISABLED" + ] + }, + "StreamViewType":{ + "type":"string", + "enum":[ + "NEW_IMAGE", + "OLD_IMAGE", + "NEW_AND_OLD_IMAGES", + "KEYS_ONLY" + ] + }, + "String":{"type":"string"}, + "StringAttributeValue":{"type":"string"}, + "StringSetAttributeValue":{ + "type":"list", + "member":{"shape":"StringAttributeValue"} + }, + "TableName":{ + "type":"string", + "max":255, + "min":3, + "pattern":"[a-zA-Z0-9_.-]+" + }, + "TrimmedDataAccessException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "exception":true + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/streams.dynamodb/2012-08-10/docs-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/streams.dynamodb/2012-08-10/docs-2.json new file mode 100644 index 000000000..583c171b5 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/streams.dynamodb/2012-08-10/docs-2.json @@ -0,0 +1,354 @@ +{ + "version": "2.0", + "service": "Amazon DynamoDB

    Amazon DynamoDB Streams provides API actions for accessing streams and processing stream records. To learn more about application development with Streams, see Capturing Table Activity with DynamoDB Streams in the Amazon DynamoDB Developer Guide.

    The following are short descriptions of each low-level DynamoDB Streams action:

    • DescribeStream - Returns detailed information about a particular stream.

    • GetRecords - Retrieves the stream records from within a shard.

    • GetShardIterator - Returns information on how to retrieve the streams record from a shard with a given shard ID.

    • ListStreams - Returns a list of all the streams associated with the current AWS account and endpoint.

    ", + "operations": { + "DescribeStream": "

    Returns information about a stream, including the current status of the stream, its Amazon Resource Name (ARN), the composition of its shards, and its corresponding DynamoDB table.

    You can call DescribeStream at a maximum rate of 10 times per second.

    Each shard in the stream has a SequenceNumberRange associated with it. If the SequenceNumberRange has a StartingSequenceNumber but no EndingSequenceNumber, then the shard is still open (able to receive more stream records). If both StartingSequenceNumber and EndingSequenceNumber are present, then that shard is closed and can no longer receive more data.

    ", + "GetRecords": "

    Retrieves the stream records from a given shard.

    Specify a shard iterator using the ShardIterator parameter. The shard iterator specifies the position in the shard from which you want to start reading stream records sequentially. If there are no stream records available in the portion of the shard that the iterator points to, GetRecords returns an empty list. Note that it might take multiple calls to get to a portion of the shard that contains stream records.

    GetRecords can retrieve a maximum of 1 MB of data or 1000 stream records, whichever comes first.

    ", + "GetShardIterator": "

    Returns a shard iterator. A shard iterator provides information about how to retrieve the stream records from within a shard. Use the shard iterator in a subsequent GetRecords request to read the stream records from the shard.

    A shard iterator expires 15 minutes after it is returned to the requester.

    ", + "ListStreams": "

    Returns an array of stream ARNs associated with the current account and endpoint. If the TableName parameter is present, then ListStreams will return only the streams ARNs for that table.

    You can call ListStreams at a maximum rate of 5 times per second.

    " + }, + "shapes": { + "AttributeMap": { + "base": null, + "refs": { + "StreamRecord$Keys": "

    The primary key attribute(s) for the DynamoDB item that was modified.

    ", + "StreamRecord$NewImage": "

    The item in the DynamoDB table as it appeared after it was modified.

    ", + "StreamRecord$OldImage": "

    The item in the DynamoDB table as it appeared before it was modified.

    " + } + }, + "AttributeName": { + "base": null, + "refs": { + "AttributeMap$key": null, + "MapAttributeValue$key": null + } + }, + "AttributeValue": { + "base": "

    Represents the data for an attribute. You can set one, and only one, of the elements.

    Each attribute in an item is a name-value pair. An attribute can be single-valued or multi-valued set. For example, a book item can have title and authors attributes. Each book has one title but can have many authors. The multi-valued attribute is a set; duplicate values are not allowed.

    ", + "refs": { + "AttributeMap$value": null, + "ListAttributeValue$member": null, + "MapAttributeValue$value": null + } + }, + "BinaryAttributeValue": { + "base": null, + "refs": { + "AttributeValue$B": "

    A Binary data type.

    ", + "BinarySetAttributeValue$member": null + } + }, + "BinarySetAttributeValue": { + "base": null, + "refs": { + "AttributeValue$BS": "

    A Binary Set data type.

    " + } + }, + "BooleanAttributeValue": { + "base": null, + "refs": { + "AttributeValue$BOOL": "

    A Boolean data type.

    " + } + }, + "Date": { + "base": null, + "refs": { + "StreamDescription$CreationRequestDateTime": "

    The date and time when the request to create this stream was issued.

    ", + "StreamRecord$ApproximateCreationDateTime": "

    The approximate date and time when the stream record was created, in UNIX epoch time format.

    " + } + }, + "DescribeStreamInput": { + "base": "

    Represents the input of a DescribeStream operation.

    ", + "refs": { + } + }, + "DescribeStreamOutput": { + "base": "

    Represents the output of a DescribeStream operation.

    ", + "refs": { + } + }, + "ErrorMessage": { + "base": null, + "refs": { + "ExpiredIteratorException$message": "

    The provided iterator exceeds the maximum age allowed.

    ", + "InternalServerError$message": "

    The server encountered an internal error trying to fulfill the request.

    ", + "LimitExceededException$message": "

    Too many operations for a given subscriber.

    ", + "ResourceNotFoundException$message": "

    The resource which is being requested does not exist.

    ", + "TrimmedDataAccessException$message": "

    \"The data you are trying to access has been trimmed.

    " + } + }, + "ExpiredIteratorException": { + "base": "

    The shard iterator has expired and can no longer be used to retrieve stream records. A shard iterator expires 15 minutes after it is retrieved using the GetShardIterator action.

    ", + "refs": { + } + }, + "GetRecordsInput": { + "base": "

    Represents the input of a GetRecords operation.

    ", + "refs": { + } + }, + "GetRecordsOutput": { + "base": "

    Represents the output of a GetRecords operation.

    ", + "refs": { + } + }, + "GetShardIteratorInput": { + "base": "

    Represents the input of a GetShardIterator operation.

    ", + "refs": { + } + }, + "GetShardIteratorOutput": { + "base": "

    Represents the output of a GetShardIterator operation.

    ", + "refs": { + } + }, + "InternalServerError": { + "base": "

    An error occurred on the server side.

    ", + "refs": { + } + }, + "KeySchema": { + "base": null, + "refs": { + "StreamDescription$KeySchema": "

    The key attribute(s) of the stream's DynamoDB table.

    " + } + }, + "KeySchemaAttributeName": { + "base": null, + "refs": { + "KeySchemaElement$AttributeName": "

    The name of a key attribute.

    " + } + }, + "KeySchemaElement": { + "base": "

    Represents a single element of a key schema. A key schema specifies the attributes that make up the primary key of a table, or the key attributes of an index.

    A KeySchemaElement represents exactly one attribute of the primary key. For example, a simple primary key (partition key) would be represented by one KeySchemaElement. A composite primary key (partition key and sort key) would require one KeySchemaElement for the partition key, and another KeySchemaElement for the sort key.

    The partition key of an item is also known as its hash attribute. The term \"hash attribute\" derives from DynamoDB's usage of an internal hash function to evenly distribute data items across partitions, based on their partition key values.

    The sort key of an item is also known as its range attribute. The term \"range attribute\" derives from the way DynamoDB stores items with the same partition key physically close together, in sorted order by the sort key value.

    ", + "refs": { + "KeySchema$member": null + } + }, + "KeyType": { + "base": null, + "refs": { + "KeySchemaElement$KeyType": "

    The attribute data, consisting of the data type and the attribute value itself.

    " + } + }, + "LimitExceededException": { + "base": "

    Your request rate is too high. The AWS SDKs for DynamoDB automatically retry requests that receive this exception. Your request is eventually successful, unless your retry queue is too large to finish. Reduce the frequency of requests and use exponential backoff. For more information, go to Error Retries and Exponential Backoff in the Amazon DynamoDB Developer Guide.

    ", + "refs": { + } + }, + "ListAttributeValue": { + "base": null, + "refs": { + "AttributeValue$L": "

    A List data type.

    " + } + }, + "ListStreamsInput": { + "base": "

    Represents the input of a ListStreams operation.

    ", + "refs": { + } + }, + "ListStreamsOutput": { + "base": "

    Represents the output of a ListStreams operation.

    ", + "refs": { + } + }, + "MapAttributeValue": { + "base": null, + "refs": { + "AttributeValue$M": "

    A Map data type.

    " + } + }, + "NullAttributeValue": { + "base": null, + "refs": { + "AttributeValue$NULL": "

    A Null data type.

    " + } + }, + "NumberAttributeValue": { + "base": null, + "refs": { + "AttributeValue$N": "

    A Number data type.

    ", + "NumberSetAttributeValue$member": null + } + }, + "NumberSetAttributeValue": { + "base": null, + "refs": { + "AttributeValue$NS": "

    A Number Set data type.

    " + } + }, + "OperationType": { + "base": null, + "refs": { + "Record$eventName": "

    The type of data modification that was performed on the DynamoDB table:

    • INSERT - a new item was added to the table.

    • MODIFY - one or more of an existing item's attributes were modified.

    • REMOVE - the item was deleted from the table

    " + } + }, + "PositiveIntegerObject": { + "base": null, + "refs": { + "DescribeStreamInput$Limit": "

    The maximum number of shard objects to return. The upper limit is 100.

    ", + "GetRecordsInput$Limit": "

    The maximum number of records to return from the shard. The upper limit is 1000.

    ", + "ListStreamsInput$Limit": "

    The maximum number of streams to return. The upper limit is 100.

    " + } + }, + "PositiveLongObject": { + "base": null, + "refs": { + "StreamRecord$SizeBytes": "

    The size of the stream record, in bytes.

    " + } + }, + "Record": { + "base": "

    A description of a unique event within a stream.

    ", + "refs": { + "RecordList$member": null + } + }, + "RecordList": { + "base": null, + "refs": { + "GetRecordsOutput$Records": "

    The stream records from the shard, which were retrieved using the shard iterator.

    " + } + }, + "ResourceNotFoundException": { + "base": "

    The operation tried to access a nonexistent stream.

    ", + "refs": { + } + }, + "SequenceNumber": { + "base": null, + "refs": { + "GetShardIteratorInput$SequenceNumber": "

    The sequence number of a stream record in the shard from which to start reading.

    ", + "SequenceNumberRange$StartingSequenceNumber": "

    The first sequence number.

    ", + "SequenceNumberRange$EndingSequenceNumber": "

    The last sequence number.

    ", + "StreamRecord$SequenceNumber": "

    The sequence number of the stream record.

    " + } + }, + "SequenceNumberRange": { + "base": "

    The beginning and ending sequence numbers for the stream records contained within a shard.

    ", + "refs": { + "Shard$SequenceNumberRange": "

    The range of possible sequence numbers for the shard.

    " + } + }, + "Shard": { + "base": "

    A uniquely identified group of stream records within a stream.

    ", + "refs": { + "ShardDescriptionList$member": null + } + }, + "ShardDescriptionList": { + "base": null, + "refs": { + "StreamDescription$Shards": "

    The shards that comprise the stream.

    " + } + }, + "ShardId": { + "base": null, + "refs": { + "DescribeStreamInput$ExclusiveStartShardId": "

    The shard ID of the first item that this operation will evaluate. Use the value that was returned for LastEvaluatedShardId in the previous operation.

    ", + "GetShardIteratorInput$ShardId": "

    The identifier of the shard. The iterator will be returned for this shard ID.

    ", + "Shard$ShardId": "

    The system-generated identifier for this shard.

    ", + "Shard$ParentShardId": "

    The shard ID of the current shard's parent.

    ", + "StreamDescription$LastEvaluatedShardId": "

    The shard ID of the item where the operation stopped, inclusive of the previous result set. Use this value to start a new operation, excluding this value in the new request.

    If LastEvaluatedShardId is empty, then the \"last page\" of results has been processed and there is currently no more data to be retrieved.

    If LastEvaluatedShardId is not empty, it does not necessarily mean that there is more data in the result set. The only way to know when you have reached the end of the result set is when LastEvaluatedShardId is empty.

    " + } + }, + "ShardIterator": { + "base": null, + "refs": { + "GetRecordsInput$ShardIterator": "

    A shard iterator that was retrieved from a previous GetShardIterator operation. This iterator can be used to access the stream records in this shard.

    ", + "GetRecordsOutput$NextShardIterator": "

    The next position in the shard from which to start sequentially reading stream records. If set to null, the shard has been closed and the requested iterator will not return any more data.

    ", + "GetShardIteratorOutput$ShardIterator": "

    The position in the shard from which to start reading stream records sequentially. A shard iterator specifies this position using the sequence number of a stream record in a shard.

    " + } + }, + "ShardIteratorType": { + "base": null, + "refs": { + "GetShardIteratorInput$ShardIteratorType": "

    Determines how the shard iterator is used to start reading stream records from the shard:

    • AT_SEQUENCE_NUMBER - Start reading exactly from the position denoted by a specific sequence number.

    • AFTER_SEQUENCE_NUMBER - Start reading right after the position denoted by a specific sequence number.

    • TRIM_HORIZON - Start reading at the last (untrimmed) stream record, which is the oldest record in the shard. In DynamoDB Streams, there is a 24 hour limit on data retention. Stream records whose age exceeds this limit are subject to removal (trimming) from the stream.

    • LATEST - Start reading just after the most recent stream record in the shard, so that you always read the most recent data in the shard.

    " + } + }, + "Stream": { + "base": "

    Represents all of the data describing a particular stream.

    ", + "refs": { + "StreamList$member": null + } + }, + "StreamArn": { + "base": null, + "refs": { + "DescribeStreamInput$StreamArn": "

    The Amazon Resource Name (ARN) for the stream.

    ", + "GetShardIteratorInput$StreamArn": "

    The Amazon Resource Name (ARN) for the stream.

    ", + "ListStreamsInput$ExclusiveStartStreamArn": "

    The ARN (Amazon Resource Name) of the first item that this operation will evaluate. Use the value that was returned for LastEvaluatedStreamArn in the previous operation.

    ", + "ListStreamsOutput$LastEvaluatedStreamArn": "

    The stream ARN of the item where the operation stopped, inclusive of the previous result set. Use this value to start a new operation, excluding this value in the new request.

    If LastEvaluatedStreamArn is empty, then the \"last page\" of results has been processed and there is no more data to be retrieved.

    If LastEvaluatedStreamArn is not empty, it does not necessarily mean that there is more data in the result set. The only way to know when you have reached the end of the result set is when LastEvaluatedStreamArn is empty.

    ", + "Stream$StreamArn": "

    The Amazon Resource Name (ARN) for the stream.

    ", + "StreamDescription$StreamArn": "

    The Amazon Resource Name (ARN) for the stream.

    " + } + }, + "StreamDescription": { + "base": "

    Represents all of the data describing a particular stream.

    ", + "refs": { + "DescribeStreamOutput$StreamDescription": "

    A complete description of the stream, including its creation date and time, the DynamoDB table associated with the stream, the shard IDs within the stream, and the beginning and ending sequence numbers of stream records within the shards.

    " + } + }, + "StreamList": { + "base": null, + "refs": { + "ListStreamsOutput$Streams": "

    A list of stream descriptors associated with the current account and endpoint.

    " + } + }, + "StreamRecord": { + "base": "

    A description of a single data modification that was performed on an item in a DynamoDB table.

    ", + "refs": { + "Record$dynamodb": "

    The main body of the stream record, containing all of the DynamoDB-specific fields.

    " + } + }, + "StreamStatus": { + "base": null, + "refs": { + "StreamDescription$StreamStatus": "

    Indicates the current status of the stream:

    • ENABLING - Streams is currently being enabled on the DynamoDB table.

    • ENABLED - the stream is enabled.

    • DISABLING - Streams is currently being disabled on the DynamoDB table.

    • DISABLED - the stream is disabled.

    " + } + }, + "StreamViewType": { + "base": null, + "refs": { + "StreamDescription$StreamViewType": "

    Indicates the format of the records within this stream:

    • KEYS_ONLY - only the key attributes of items that were modified in the DynamoDB table.

    • NEW_IMAGE - entire items from the table, as they appeared after they were modified.

    • OLD_IMAGE - entire items from the table, as they appeared before they were modified.

    • NEW_AND_OLD_IMAGES - both the new and the old images of the items from the table.

    ", + "StreamRecord$StreamViewType": "

    The type of data from the modified DynamoDB item that was captured in this stream record:

    • KEYS_ONLY - only the key attributes of the modified item.

    • NEW_IMAGE - the entire item, as it appeared after it was modified.

    • OLD_IMAGE - the entire item, as it appeared before it was modified.

    • NEW_AND_OLD_IMAGES - both the new and the old item images of the item.

    " + } + }, + "String": { + "base": null, + "refs": { + "Record$eventID": "

    A globally unique identifier for the event that was recorded in this stream record.

    ", + "Record$eventVersion": "

    The version number of the stream record format. This number is updated whenever the structure of Record is modified.

    Client applications must not assume that eventVersion will remain at a particular value, as this number is subject to change at any time. In general, eventVersion will only increase as the low-level DynamoDB Streams API evolves.

    ", + "Record$eventSource": "

    The AWS service from which the stream record originated. For DynamoDB Streams, this is aws:dynamodb.

    ", + "Record$awsRegion": "

    The region in which the GetRecords request was received.

    ", + "Stream$StreamLabel": "

    A timestamp, in ISO 8601 format, for this stream.

    Note that LatestStreamLabel is not a unique identifier for the stream, because it is possible that a stream from another table might have the same timestamp. However, the combination of the following three elements is guaranteed to be unique:

    • the AWS customer ID.

    • the table name

    • the StreamLabel

    ", + "StreamDescription$StreamLabel": "

    A timestamp, in ISO 8601 format, for this stream.

    Note that LatestStreamLabel is not a unique identifier for the stream, because it is possible that a stream from another table might have the same timestamp. However, the combination of the following three elements is guaranteed to be unique:

    • the AWS customer ID.

    • the table name

    • the StreamLabel

    " + } + }, + "StringAttributeValue": { + "base": null, + "refs": { + "AttributeValue$S": "

    A String data type.

    ", + "StringSetAttributeValue$member": null + } + }, + "StringSetAttributeValue": { + "base": null, + "refs": { + "AttributeValue$SS": "

    A String Set data type.

    " + } + }, + "TableName": { + "base": null, + "refs": { + "ListStreamsInput$TableName": "

    If this parameter is provided, then only the streams associated with this table name are returned.

    ", + "Stream$TableName": "

    The DynamoDB table with which the stream is associated.

    ", + "StreamDescription$TableName": "

    The DynamoDB table with which the stream is associated.

    " + } + }, + "TrimmedDataAccessException": { + "base": "

    The operation attempted to read past the oldest stream record in a shard.

    In DynamoDB Streams, there is a 24 hour limit on data retention. Stream records whose age exceeds this limit are subject to removal (trimming) from the stream. You might receive a TrimmedDataAccessException if:

    • You request a shard iterator with a sequence number older than the trim point (24 hours).

    • You obtain a shard iterator, but before you use the iterator in a GetRecords request, a stream record in the shard exceeds the 24 hour period and is trimmed. This causes the iterator to access a record that no longer exists.

    ", + "refs": { + } + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/streams.dynamodb/2012-08-10/examples-1.json b/vendor/github.com/aws/aws-sdk-go/models/apis/streams.dynamodb/2012-08-10/examples-1.json new file mode 100644 index 000000000..0ea7e3b0b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/streams.dynamodb/2012-08-10/examples-1.json @@ -0,0 +1,5 @@ +{ + "version": "1.0", + "examples": { + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/sts/2011-06-15/api-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/sts/2011-06-15/api-2.json new file mode 100644 index 000000000..5859e0e50 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/sts/2011-06-15/api-2.json @@ -0,0 +1,521 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2011-06-15", + "endpointPrefix":"sts", + "globalEndpoint":"sts.amazonaws.com", + "protocol":"query", + "serviceAbbreviation":"AWS STS", + "serviceFullName":"AWS Security Token Service", + "signatureVersion":"v4", + "xmlNamespace":"https://sts.amazonaws.com/doc/2011-06-15/" + }, + "operations":{ + "AssumeRole":{ + "name":"AssumeRole", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AssumeRoleRequest"}, + "output":{ + "shape":"AssumeRoleResponse", + "resultWrapper":"AssumeRoleResult" + }, + "errors":[ + {"shape":"MalformedPolicyDocumentException"}, + {"shape":"PackedPolicyTooLargeException"}, + {"shape":"RegionDisabledException"} + ] + }, + "AssumeRoleWithSAML":{ + "name":"AssumeRoleWithSAML", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AssumeRoleWithSAMLRequest"}, + "output":{ + "shape":"AssumeRoleWithSAMLResponse", + "resultWrapper":"AssumeRoleWithSAMLResult" + }, + "errors":[ + {"shape":"MalformedPolicyDocumentException"}, + {"shape":"PackedPolicyTooLargeException"}, + {"shape":"IDPRejectedClaimException"}, + {"shape":"InvalidIdentityTokenException"}, + {"shape":"ExpiredTokenException"}, + {"shape":"RegionDisabledException"} + ] + }, + "AssumeRoleWithWebIdentity":{ + "name":"AssumeRoleWithWebIdentity", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AssumeRoleWithWebIdentityRequest"}, + "output":{ + "shape":"AssumeRoleWithWebIdentityResponse", + "resultWrapper":"AssumeRoleWithWebIdentityResult" + }, + "errors":[ + {"shape":"MalformedPolicyDocumentException"}, + {"shape":"PackedPolicyTooLargeException"}, + {"shape":"IDPRejectedClaimException"}, + {"shape":"IDPCommunicationErrorException"}, + {"shape":"InvalidIdentityTokenException"}, + {"shape":"ExpiredTokenException"}, + {"shape":"RegionDisabledException"} + ] + }, + "DecodeAuthorizationMessage":{ + "name":"DecodeAuthorizationMessage", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DecodeAuthorizationMessageRequest"}, + "output":{ + "shape":"DecodeAuthorizationMessageResponse", + "resultWrapper":"DecodeAuthorizationMessageResult" + }, + "errors":[ + {"shape":"InvalidAuthorizationMessageException"} + ] + }, + "GetCallerIdentity":{ + "name":"GetCallerIdentity", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetCallerIdentityRequest"}, + "output":{ + "shape":"GetCallerIdentityResponse", + "resultWrapper":"GetCallerIdentityResult" + } + }, + "GetFederationToken":{ + "name":"GetFederationToken", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetFederationTokenRequest"}, + "output":{ + "shape":"GetFederationTokenResponse", + "resultWrapper":"GetFederationTokenResult" + }, + "errors":[ + {"shape":"MalformedPolicyDocumentException"}, + {"shape":"PackedPolicyTooLargeException"}, + {"shape":"RegionDisabledException"} + ] + }, + "GetSessionToken":{ + "name":"GetSessionToken", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetSessionTokenRequest"}, + "output":{ + "shape":"GetSessionTokenResponse", + "resultWrapper":"GetSessionTokenResult" + }, + "errors":[ + {"shape":"RegionDisabledException"} + ] + } + }, + "shapes":{ + "AssumeRoleRequest":{ + "type":"structure", + "required":[ + "RoleArn", + "RoleSessionName" + ], + "members":{ + "RoleArn":{"shape":"arnType"}, + "RoleSessionName":{"shape":"roleSessionNameType"}, + "Policy":{"shape":"sessionPolicyDocumentType"}, + "DurationSeconds":{"shape":"roleDurationSecondsType"}, + "ExternalId":{"shape":"externalIdType"}, + "SerialNumber":{"shape":"serialNumberType"}, + "TokenCode":{"shape":"tokenCodeType"} + } + }, + "AssumeRoleResponse":{ + "type":"structure", + "members":{ + "Credentials":{"shape":"Credentials"}, + "AssumedRoleUser":{"shape":"AssumedRoleUser"}, + "PackedPolicySize":{"shape":"nonNegativeIntegerType"} + } + }, + "AssumeRoleWithSAMLRequest":{ + "type":"structure", + "required":[ + "RoleArn", + "PrincipalArn", + "SAMLAssertion" + ], + "members":{ + "RoleArn":{"shape":"arnType"}, + "PrincipalArn":{"shape":"arnType"}, + "SAMLAssertion":{"shape":"SAMLAssertionType"}, + "Policy":{"shape":"sessionPolicyDocumentType"}, + "DurationSeconds":{"shape":"roleDurationSecondsType"} + } + }, + "AssumeRoleWithSAMLResponse":{ + "type":"structure", + "members":{ + "Credentials":{"shape":"Credentials"}, + "AssumedRoleUser":{"shape":"AssumedRoleUser"}, + "PackedPolicySize":{"shape":"nonNegativeIntegerType"}, + "Subject":{"shape":"Subject"}, + "SubjectType":{"shape":"SubjectType"}, + "Issuer":{"shape":"Issuer"}, + "Audience":{"shape":"Audience"}, + "NameQualifier":{"shape":"NameQualifier"} + } + }, + "AssumeRoleWithWebIdentityRequest":{ + "type":"structure", + "required":[ + "RoleArn", + "RoleSessionName", + "WebIdentityToken" + ], + "members":{ + "RoleArn":{"shape":"arnType"}, + "RoleSessionName":{"shape":"roleSessionNameType"}, + "WebIdentityToken":{"shape":"clientTokenType"}, + "ProviderId":{"shape":"urlType"}, + "Policy":{"shape":"sessionPolicyDocumentType"}, + "DurationSeconds":{"shape":"roleDurationSecondsType"} + } + }, + "AssumeRoleWithWebIdentityResponse":{ + "type":"structure", + "members":{ + "Credentials":{"shape":"Credentials"}, + "SubjectFromWebIdentityToken":{"shape":"webIdentitySubjectType"}, + "AssumedRoleUser":{"shape":"AssumedRoleUser"}, + "PackedPolicySize":{"shape":"nonNegativeIntegerType"}, + "Provider":{"shape":"Issuer"}, + "Audience":{"shape":"Audience"} + } + }, + "AssumedRoleUser":{ + "type":"structure", + "required":[ + "AssumedRoleId", + "Arn" + ], + "members":{ + "AssumedRoleId":{"shape":"assumedRoleIdType"}, + "Arn":{"shape":"arnType"} + } + }, + "Audience":{"type":"string"}, + "Credentials":{ + "type":"structure", + "required":[ + "AccessKeyId", + "SecretAccessKey", + "SessionToken", + "Expiration" + ], + "members":{ + "AccessKeyId":{"shape":"accessKeyIdType"}, + "SecretAccessKey":{"shape":"accessKeySecretType"}, + "SessionToken":{"shape":"tokenType"}, + "Expiration":{"shape":"dateType"} + } + }, + "DecodeAuthorizationMessageRequest":{ + "type":"structure", + "required":["EncodedMessage"], + "members":{ + "EncodedMessage":{"shape":"encodedMessageType"} + } + }, + "DecodeAuthorizationMessageResponse":{ + "type":"structure", + "members":{ + "DecodedMessage":{"shape":"decodedMessageType"} + } + }, + "ExpiredTokenException":{ + "type":"structure", + "members":{ + "message":{"shape":"expiredIdentityTokenMessage"} + }, + "error":{ + "code":"ExpiredTokenException", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "FederatedUser":{ + "type":"structure", + "required":[ + "FederatedUserId", + "Arn" + ], + "members":{ + "FederatedUserId":{"shape":"federatedIdType"}, + "Arn":{"shape":"arnType"} + } + }, + "GetCallerIdentityRequest":{ + "type":"structure", + "members":{ + } + }, + "GetCallerIdentityResponse":{ + "type":"structure", + "members":{ + "UserId":{"shape":"userIdType"}, + "Account":{"shape":"accountType"}, + "Arn":{"shape":"arnType"} + } + }, + "GetFederationTokenRequest":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{"shape":"userNameType"}, + "Policy":{"shape":"sessionPolicyDocumentType"}, + "DurationSeconds":{"shape":"durationSecondsType"} + } + }, + "GetFederationTokenResponse":{ + "type":"structure", + "members":{ + "Credentials":{"shape":"Credentials"}, + "FederatedUser":{"shape":"FederatedUser"}, + "PackedPolicySize":{"shape":"nonNegativeIntegerType"} + } + }, + "GetSessionTokenRequest":{ + "type":"structure", + "members":{ + "DurationSeconds":{"shape":"durationSecondsType"}, + "SerialNumber":{"shape":"serialNumberType"}, + "TokenCode":{"shape":"tokenCodeType"} + } + }, + "GetSessionTokenResponse":{ + "type":"structure", + "members":{ + "Credentials":{"shape":"Credentials"} + } + }, + "IDPCommunicationErrorException":{ + "type":"structure", + "members":{ + "message":{"shape":"idpCommunicationErrorMessage"} + }, + "error":{ + "code":"IDPCommunicationError", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "IDPRejectedClaimException":{ + "type":"structure", + "members":{ + "message":{"shape":"idpRejectedClaimMessage"} + }, + "error":{ + "code":"IDPRejectedClaim", + "httpStatusCode":403, + "senderFault":true + }, + "exception":true + }, + "InvalidAuthorizationMessageException":{ + "type":"structure", + "members":{ + "message":{"shape":"invalidAuthorizationMessage"} + }, + "error":{ + "code":"InvalidAuthorizationMessageException", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidIdentityTokenException":{ + "type":"structure", + "members":{ + "message":{"shape":"invalidIdentityTokenMessage"} + }, + "error":{ + "code":"InvalidIdentityToken", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "Issuer":{"type":"string"}, + "MalformedPolicyDocumentException":{ + "type":"structure", + "members":{ + "message":{"shape":"malformedPolicyDocumentMessage"} + }, + "error":{ + "code":"MalformedPolicyDocument", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "NameQualifier":{"type":"string"}, + "PackedPolicyTooLargeException":{ + "type":"structure", + "members":{ + "message":{"shape":"packedPolicyTooLargeMessage"} + }, + "error":{ + "code":"PackedPolicyTooLarge", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "RegionDisabledException":{ + "type":"structure", + "members":{ + "message":{"shape":"regionDisabledMessage"} + }, + "error":{ + "code":"RegionDisabledException", + "httpStatusCode":403, + "senderFault":true + }, + "exception":true + }, + "SAMLAssertionType":{ + "type":"string", + "max":50000, + "min":4 + }, + "Subject":{"type":"string"}, + "SubjectType":{"type":"string"}, + "accessKeyIdType":{ + "type":"string", + "max":32, + "min":16, + "pattern":"[\\w]*" + }, + "accessKeySecretType":{"type":"string"}, + "accountType":{"type":"string"}, + "arnType":{ + "type":"string", + "max":2048, + "min":20, + "pattern":"[\\u0009\\u000A\\u000D\\u0020-\\u007E\\u0085\\u00A0-\\uD7FF\\uE000-\\uFFFD\\u10000-\\u10FFFF]+" + }, + "assumedRoleIdType":{ + "type":"string", + "max":96, + "min":2, + "pattern":"[\\w+=,.@:-]*" + }, + "clientTokenType":{ + "type":"string", + "max":2048, + "min":4 + }, + "dateType":{"type":"timestamp"}, + "decodedMessageType":{"type":"string"}, + "durationSecondsType":{ + "type":"integer", + "max":129600, + "min":900 + }, + "encodedMessageType":{ + "type":"string", + "max":10240, + "min":1 + }, + "expiredIdentityTokenMessage":{"type":"string"}, + "externalIdType":{ + "type":"string", + "max":1224, + "min":2, + "pattern":"[\\w+=,.@:\\/-]*" + }, + "federatedIdType":{ + "type":"string", + "max":96, + "min":2, + "pattern":"[\\w+=,.@\\:-]*" + }, + "idpCommunicationErrorMessage":{"type":"string"}, + "idpRejectedClaimMessage":{"type":"string"}, + "invalidAuthorizationMessage":{"type":"string"}, + "invalidIdentityTokenMessage":{"type":"string"}, + "malformedPolicyDocumentMessage":{"type":"string"}, + "nonNegativeIntegerType":{ + "type":"integer", + "min":0 + }, + "packedPolicyTooLargeMessage":{"type":"string"}, + "regionDisabledMessage":{"type":"string"}, + "roleDurationSecondsType":{ + "type":"integer", + "max":3600, + "min":900 + }, + "roleSessionNameType":{ + "type":"string", + "max":64, + "min":2, + "pattern":"[\\w+=,.@-]*" + }, + "serialNumberType":{ + "type":"string", + "max":256, + "min":9, + "pattern":"[\\w+=/:,.@-]*" + }, + "sessionPolicyDocumentType":{ + "type":"string", + "max":2048, + "min":1, + "pattern":"[\\u0009\\u000A\\u000D\\u0020-\\u00FF]+" + }, + "tokenCodeType":{ + "type":"string", + "max":6, + "min":6, + "pattern":"[\\d]*" + }, + "tokenType":{"type":"string"}, + "urlType":{ + "type":"string", + "max":2048, + "min":4 + }, + "userIdType":{"type":"string"}, + "userNameType":{ + "type":"string", + "max":32, + "min":2, + "pattern":"[\\w+=,.@-]*" + }, + "webIdentitySubjectType":{ + "type":"string", + "max":255, + "min":6 + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/sts/2011-06-15/docs-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/sts/2011-06-15/docs-2.json new file mode 100644 index 000000000..e93e88489 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/sts/2011-06-15/docs-2.json @@ -0,0 +1,391 @@ +{ + "version": "2.0", + "service": "AWS Security Token Service

    The AWS Security Token Service (STS) is a web service that enables you to request temporary, limited-privilege credentials for AWS Identity and Access Management (IAM) users or for users that you authenticate (federated users). This guide provides descriptions of the STS API. For more detailed information about using this service, go to Temporary Security Credentials.

    As an alternative to using the API, you can use one of the AWS SDKs, which consist of libraries and sample code for various programming languages and platforms (Java, Ruby, .NET, iOS, Android, etc.). The SDKs provide a convenient way to create programmatic access to STS. For example, the SDKs take care of cryptographically signing requests, managing errors, and retrying requests automatically. For information about the AWS SDKs, including how to download and install them, see the Tools for Amazon Web Services page.

    For information about setting up signatures and authorization through the API, go to Signing AWS API Requests in the AWS General Reference. For general information about the Query API, go to Making Query Requests in Using IAM. For information about using security tokens with other AWS products, go to AWS Services That Work with IAM in the IAM User Guide.

    If you're new to AWS and need additional technical information about a specific AWS product, you can find the product's technical documentation at http://aws.amazon.com/documentation/.

    Endpoints

    The AWS Security Token Service (STS) has a default endpoint of https://sts.amazonaws.com that maps to the US East (N. Virginia) region. Additional regions are available and are activated by default. For more information, see Activating and Deactivating AWS STS in an AWS Region in the IAM User Guide.

    For information about STS endpoints, see Regions and Endpoints in the AWS General Reference.

    Recording API requests

    STS supports AWS CloudTrail, which is a service that records AWS calls for your AWS account and delivers log files to an Amazon S3 bucket. By using information collected by CloudTrail, you can determine what requests were successfully made to STS, who made the request, when it was made, and so on. To learn more about CloudTrail, including how to turn it on and find your log files, see the AWS CloudTrail User Guide.

    ", + "operations": { + "AssumeRole": "

    Returns a set of temporary security credentials (consisting of an access key ID, a secret access key, and a security token) that you can use to access AWS resources that you might not normally have access to. Typically, you use AssumeRole for cross-account access or federation. For a comparison of AssumeRole with the other APIs that produce temporary credentials, see Requesting Temporary Security Credentials and Comparing the AWS STS APIs in the IAM User Guide.

    Important: You cannot call AssumeRole by using AWS root account credentials; access is denied. You must use credentials for an IAM user or an IAM role to call AssumeRole.

    For cross-account access, imagine that you own multiple accounts and need to access resources in each account. You could create long-term credentials in each account to access those resources. However, managing all those credentials and remembering which one can access which account can be time consuming. Instead, you can create one set of long-term credentials in one account and then use temporary security credentials to access all the other accounts by assuming roles in those accounts. For more information about roles, see IAM Roles (Delegation and Federation) in the IAM User Guide.

    For federation, you can, for example, grant single sign-on access to the AWS Management Console. If you already have an identity and authentication system in your corporate network, you don't have to recreate user identities in AWS in order to grant those user identities access to AWS. Instead, after a user has been authenticated, you call AssumeRole (and specify the role with the appropriate permissions) to get temporary security credentials for that user. With those temporary security credentials, you construct a sign-in URL that users can use to access the console. For more information, see Common Scenarios for Temporary Credentials in the IAM User Guide.

    The temporary security credentials are valid for the duration that you specified when calling AssumeRole, which can be from 900 seconds (15 minutes) to a maximum of 3600 seconds (1 hour). The default is 1 hour.

    The temporary security credentials created by AssumeRole can be used to make API calls to any AWS service with the following exception: you cannot call the STS service's GetFederationToken or GetSessionToken APIs.

    Optionally, you can pass an IAM access policy to this operation. If you choose not to pass a policy, the temporary security credentials that are returned by the operation have the permissions that are defined in the access policy of the role that is being assumed. If you pass a policy to this operation, the temporary security credentials that are returned by the operation have the permissions that are allowed by both the access policy of the role that is being assumed, and the policy that you pass. This gives you a way to further restrict the permissions for the resulting temporary security credentials. You cannot use the passed policy to grant permissions that are in excess of those allowed by the access policy of the role that is being assumed. For more information, see Permissions for AssumeRole, AssumeRoleWithSAML, and AssumeRoleWithWebIdentity in the IAM User Guide.

    To assume a role, your AWS account must be trusted by the role. The trust relationship is defined in the role's trust policy when the role is created. That trust policy states which accounts are allowed to delegate access to this account's role.

    The user who wants to access the role must also have permissions delegated from the role's administrator. If the user is in a different account than the role, then the user's administrator must attach a policy that allows the user to call AssumeRole on the ARN of the role in the other account. If the user is in the same account as the role, then you can either attach a policy to the user (identical to the previous different account user), or you can add the user as a principal directly in the role's trust policy

    Using MFA with AssumeRole

    You can optionally include multi-factor authentication (MFA) information when you call AssumeRole. This is useful for cross-account scenarios in which you want to make sure that the user who is assuming the role has been authenticated using an AWS MFA device. In that scenario, the trust policy of the role being assumed includes a condition that tests for MFA authentication; if the caller does not include valid MFA information, the request to assume the role is denied. The condition in a trust policy that tests for MFA authentication might look like the following example.

    \"Condition\": {\"Bool\": {\"aws:MultiFactorAuthPresent\": true}}

    For more information, see Configuring MFA-Protected API Access in the IAM User Guide guide.

    To use MFA with AssumeRole, you pass values for the SerialNumber and TokenCode parameters. The SerialNumber value identifies the user's hardware or virtual MFA device. The TokenCode is the time-based one-time password (TOTP) that the MFA devices produces.

    ", + "AssumeRoleWithSAML": "

    Returns a set of temporary security credentials for users who have been authenticated via a SAML authentication response. This operation provides a mechanism for tying an enterprise identity store or directory to role-based AWS access without user-specific credentials or configuration. For a comparison of AssumeRoleWithSAML with the other APIs that produce temporary credentials, see Requesting Temporary Security Credentials and Comparing the AWS STS APIs in the IAM User Guide.

    The temporary security credentials returned by this operation consist of an access key ID, a secret access key, and a security token. Applications can use these temporary security credentials to sign calls to AWS services.

    The temporary security credentials are valid for the duration that you specified when calling AssumeRole, or until the time specified in the SAML authentication response's SessionNotOnOrAfter value, whichever is shorter. The duration can be from 900 seconds (15 minutes) to a maximum of 3600 seconds (1 hour). The default is 1 hour.

    The temporary security credentials created by AssumeRoleWithSAML can be used to make API calls to any AWS service with the following exception: you cannot call the STS service's GetFederationToken or GetSessionToken APIs.

    Optionally, you can pass an IAM access policy to this operation. If you choose not to pass a policy, the temporary security credentials that are returned by the operation have the permissions that are defined in the access policy of the role that is being assumed. If you pass a policy to this operation, the temporary security credentials that are returned by the operation have the permissions that are allowed by both the access policy of the role that is being assumed, and the policy that you pass. This gives you a way to further restrict the permissions for the resulting temporary security credentials. You cannot use the passed policy to grant permissions that are in excess of those allowed by the access policy of the role that is being assumed. For more information, see Permissions for AssumeRole, AssumeRoleWithSAML, and AssumeRoleWithWebIdentity in the IAM User Guide.

    Before your application can call AssumeRoleWithSAML, you must configure your SAML identity provider (IdP) to issue the claims required by AWS. Additionally, you must use AWS Identity and Access Management (IAM) to create a SAML provider entity in your AWS account that represents your identity provider, and create an IAM role that specifies this SAML provider in its trust policy.

    Calling AssumeRoleWithSAML does not require the use of AWS security credentials. The identity of the caller is validated by using keys in the metadata document that is uploaded for the SAML provider entity for your identity provider.

    Calling AssumeRoleWithSAML can result in an entry in your AWS CloudTrail logs. The entry includes the value in the NameID element of the SAML assertion. We recommend that you use a NameIDType that is not associated with any personally identifiable information (PII). For example, you could instead use the Persistent Identifier (urn:oasis:names:tc:SAML:2.0:nameid-format:persistent).

    For more information, see the following resources:

    ", + "AssumeRoleWithWebIdentity": "

    Returns a set of temporary security credentials for users who have been authenticated in a mobile or web application with a web identity provider, such as Amazon Cognito, Login with Amazon, Facebook, Google, or any OpenID Connect-compatible identity provider.

    For mobile applications, we recommend that you use Amazon Cognito. You can use Amazon Cognito with the AWS SDK for iOS and the AWS SDK for Android to uniquely identify a user and supply the user with a consistent identity throughout the lifetime of an application.

    To learn more about Amazon Cognito, see Amazon Cognito Overview in the AWS SDK for Android Developer Guide guide and Amazon Cognito Overview in the AWS SDK for iOS Developer Guide.

    Calling AssumeRoleWithWebIdentity does not require the use of AWS security credentials. Therefore, you can distribute an application (for example, on mobile devices) that requests temporary security credentials without including long-term AWS credentials in the application, and without deploying server-based proxy services that use long-term AWS credentials. Instead, the identity of the caller is validated by using a token from the web identity provider. For a comparison of AssumeRoleWithWebIdentity with the other APIs that produce temporary credentials, see Requesting Temporary Security Credentials and Comparing the AWS STS APIs in the IAM User Guide.

    The temporary security credentials returned by this API consist of an access key ID, a secret access key, and a security token. Applications can use these temporary security credentials to sign calls to AWS service APIs.

    The credentials are valid for the duration that you specified when calling AssumeRoleWithWebIdentity, which can be from 900 seconds (15 minutes) to a maximum of 3600 seconds (1 hour). The default is 1 hour.

    The temporary security credentials created by AssumeRoleWithWebIdentity can be used to make API calls to any AWS service with the following exception: you cannot call the STS service's GetFederationToken or GetSessionToken APIs.

    Optionally, you can pass an IAM access policy to this operation. If you choose not to pass a policy, the temporary security credentials that are returned by the operation have the permissions that are defined in the access policy of the role that is being assumed. If you pass a policy to this operation, the temporary security credentials that are returned by the operation have the permissions that are allowed by both the access policy of the role that is being assumed, and the policy that you pass. This gives you a way to further restrict the permissions for the resulting temporary security credentials. You cannot use the passed policy to grant permissions that are in excess of those allowed by the access policy of the role that is being assumed. For more information, see Permissions for AssumeRole, AssumeRoleWithSAML, and AssumeRoleWithWebIdentity in the IAM User Guide.

    Before your application can call AssumeRoleWithWebIdentity, you must have an identity token from a supported identity provider and create a role that the application can assume. The role that your application assumes must trust the identity provider that is associated with the identity token. In other words, the identity provider must be specified in the role's trust policy.

    Calling AssumeRoleWithWebIdentity can result in an entry in your AWS CloudTrail logs. The entry includes the Subject of the provided Web Identity Token. We recommend that you avoid using any personally identifiable information (PII) in this field. For example, you could instead use a GUID or a pairwise identifier, as suggested in the OIDC specification.

    For more information about how to use web identity federation and the AssumeRoleWithWebIdentity API, see the following resources:

    ", + "DecodeAuthorizationMessage": "

    Decodes additional information about the authorization status of a request from an encoded message returned in response to an AWS request.

    For example, if a user is not authorized to perform an action that he or she has requested, the request returns a Client.UnauthorizedOperation response (an HTTP 403 response). Some AWS actions additionally return an encoded message that can provide details about this authorization failure.

    Only certain AWS actions return an encoded authorization message. The documentation for an individual action indicates whether that action returns an encoded message in addition to returning an HTTP code.

    The message is encoded because the details of the authorization status can constitute privileged information that the user who requested the action should not see. To decode an authorization status message, a user must be granted permissions via an IAM policy to request the DecodeAuthorizationMessage (sts:DecodeAuthorizationMessage) action.

    The decoded message includes the following type of information:

    • Whether the request was denied due to an explicit deny or due to the absence of an explicit allow. For more information, see Determining Whether a Request is Allowed or Denied in the IAM User Guide.

    • The principal who made the request.

    • The requested action.

    • The requested resource.

    • The values of condition keys in the context of the user's request.

    ", + "GetCallerIdentity": "

    Returns details about the IAM identity whose credentials are used to call the API.

    ", + "GetFederationToken": "

    Returns a set of temporary security credentials (consisting of an access key ID, a secret access key, and a security token) for a federated user. A typical use is in a proxy application that gets temporary security credentials on behalf of distributed applications inside a corporate network. Because you must call the GetFederationToken action using the long-term security credentials of an IAM user, this call is appropriate in contexts where those credentials can be safely stored, usually in a server-based application. For a comparison of GetFederationToken with the other APIs that produce temporary credentials, see Requesting Temporary Security Credentials and Comparing the AWS STS APIs in the IAM User Guide.

    If you are creating a mobile-based or browser-based app that can authenticate users using a web identity provider like Login with Amazon, Facebook, Google, or an OpenID Connect-compatible identity provider, we recommend that you use Amazon Cognito or AssumeRoleWithWebIdentity. For more information, see Federation Through a Web-based Identity Provider.

    The GetFederationToken action must be called by using the long-term AWS security credentials of an IAM user. You can also call GetFederationToken using the security credentials of an AWS root account, but we do not recommended it. Instead, we recommend that you create an IAM user for the purpose of the proxy application and then attach a policy to the IAM user that limits federated users to only the actions and resources that they need access to. For more information, see IAM Best Practices in the IAM User Guide.

    The temporary security credentials that are obtained by using the long-term credentials of an IAM user are valid for the specified duration, from 900 seconds (15 minutes) up to a maximium of 129600 seconds (36 hours). The default is 43200 seconds (12 hours). Temporary credentials that are obtained by using AWS root account credentials have a maximum duration of 3600 seconds (1 hour).

    The temporary security credentials created by GetFederationToken can be used to make API calls to any AWS service with the following exceptions:

    • You cannot use these credentials to call any IAM APIs.

    • You cannot call any STS APIs.

    Permissions

    The permissions for the temporary security credentials returned by GetFederationToken are determined by a combination of the following:

    • The policy or policies that are attached to the IAM user whose credentials are used to call GetFederationToken.

    • The policy that is passed as a parameter in the call.

    The passed policy is attached to the temporary security credentials that result from the GetFederationToken API call--that is, to the federated user. When the federated user makes an AWS request, AWS evaluates the policy attached to the federated user in combination with the policy or policies attached to the IAM user whose credentials were used to call GetFederationToken. AWS allows the federated user's request only when both the federated user and the IAM user are explicitly allowed to perform the requested action. The passed policy cannot grant more permissions than those that are defined in the IAM user policy.

    A typical use case is that the permissions of the IAM user whose credentials are used to call GetFederationToken are designed to allow access to all the actions and resources that any federated user will need. Then, for individual users, you pass a policy to the operation that scopes down the permissions to a level that's appropriate to that individual user, using a policy that allows only a subset of permissions that are granted to the IAM user.

    If you do not pass a policy, the resulting temporary security credentials have no effective permissions. The only exception is when the temporary security credentials are used to access a resource that has a resource-based policy that specifically allows the federated user to access the resource.

    For more information about how permissions work, see Permissions for GetFederationToken. For information about using GetFederationToken to create temporary security credentials, see GetFederationToken—Federation Through a Custom Identity Broker.

    ", + "GetSessionToken": "

    Returns a set of temporary credentials for an AWS account or IAM user. The credentials consist of an access key ID, a secret access key, and a security token. Typically, you use GetSessionToken if you want to use MFA to protect programmatic calls to specific AWS APIs like Amazon EC2 StopInstances. MFA-enabled IAM users would need to call GetSessionToken and submit an MFA code that is associated with their MFA device. Using the temporary security credentials that are returned from the call, IAM users can then make programmatic calls to APIs that require MFA authentication. If you do not supply a correct MFA code, then the API returns an access denied error. For a comparison of GetSessionToken with the other APIs that produce temporary credentials, see Requesting Temporary Security Credentials and Comparing the AWS STS APIs in the IAM User Guide.

    The GetSessionToken action must be called by using the long-term AWS security credentials of the AWS account or an IAM user. Credentials that are created by IAM users are valid for the duration that you specify, from 900 seconds (15 minutes) up to a maximum of 129600 seconds (36 hours), with a default of 43200 seconds (12 hours); credentials that are created by using account credentials can range from 900 seconds (15 minutes) up to a maximum of 3600 seconds (1 hour), with a default of 1 hour.

    The temporary security credentials created by GetSessionToken can be used to make API calls to any AWS service with the following exceptions:

    • You cannot call any IAM APIs unless MFA authentication information is included in the request.

    • You cannot call any STS API except AssumeRole.

    We recommend that you do not call GetSessionToken with root account credentials. Instead, follow our best practices by creating one or more IAM users, giving them the necessary permissions, and using IAM users for everyday interaction with AWS.

    The permissions associated with the temporary security credentials returned by GetSessionToken are based on the permissions associated with account or IAM user whose credentials are used to call the action. If GetSessionToken is called using root account credentials, the temporary credentials have root account permissions. Similarly, if GetSessionToken is called using the credentials of an IAM user, the temporary credentials have the same permissions as the IAM user.

    For more information about using GetSessionToken to create temporary credentials, go to Temporary Credentials for Users in Untrusted Environments in the IAM User Guide.

    " + }, + "shapes": { + "AssumeRoleRequest": { + "base": null, + "refs": { + } + }, + "AssumeRoleResponse": { + "base": "

    Contains the response to a successful AssumeRole request, including temporary AWS credentials that can be used to make AWS requests.

    ", + "refs": { + } + }, + "AssumeRoleWithSAMLRequest": { + "base": null, + "refs": { + } + }, + "AssumeRoleWithSAMLResponse": { + "base": "

    Contains the response to a successful AssumeRoleWithSAML request, including temporary AWS credentials that can be used to make AWS requests.

    ", + "refs": { + } + }, + "AssumeRoleWithWebIdentityRequest": { + "base": null, + "refs": { + } + }, + "AssumeRoleWithWebIdentityResponse": { + "base": "

    Contains the response to a successful AssumeRoleWithWebIdentity request, including temporary AWS credentials that can be used to make AWS requests.

    ", + "refs": { + } + }, + "AssumedRoleUser": { + "base": "

    The identifiers for the temporary security credentials that the operation returns.

    ", + "refs": { + "AssumeRoleResponse$AssumedRoleUser": "

    The Amazon Resource Name (ARN) and the assumed role ID, which are identifiers that you can use to refer to the resulting temporary security credentials. For example, you can reference these credentials as a principal in a resource-based policy by using the ARN or assumed role ID. The ARN and ID include the RoleSessionName that you specified when you called AssumeRole.

    ", + "AssumeRoleWithSAMLResponse$AssumedRoleUser": "

    The identifiers for the temporary security credentials that the operation returns.

    ", + "AssumeRoleWithWebIdentityResponse$AssumedRoleUser": "

    The Amazon Resource Name (ARN) and the assumed role ID, which are identifiers that you can use to refer to the resulting temporary security credentials. For example, you can reference these credentials as a principal in a resource-based policy by using the ARN or assumed role ID. The ARN and ID include the RoleSessionName that you specified when you called AssumeRole.

    " + } + }, + "Audience": { + "base": null, + "refs": { + "AssumeRoleWithSAMLResponse$Audience": "

    The value of the Recipient attribute of the SubjectConfirmationData element of the SAML assertion.

    ", + "AssumeRoleWithWebIdentityResponse$Audience": "

    The intended audience (also known as client ID) of the web identity token. This is traditionally the client identifier issued to the application that requested the web identity token.

    " + } + }, + "Credentials": { + "base": "

    AWS credentials for API authentication.

    ", + "refs": { + "AssumeRoleResponse$Credentials": "

    The temporary security credentials, which include an access key ID, a secret access key, and a security (or session) token.

    Note: The size of the security token that STS APIs return is not fixed. We strongly recommend that you make no assumptions about the maximum size. As of this writing, the typical size is less than 4096 bytes, but that can vary. Also, future updates to AWS might require larger sizes.

    ", + "AssumeRoleWithSAMLResponse$Credentials": "

    The temporary security credentials, which include an access key ID, a secret access key, and a security (or session) token.

    Note: The size of the security token that STS APIs return is not fixed. We strongly recommend that you make no assumptions about the maximum size. As of this writing, the typical size is less than 4096 bytes, but that can vary. Also, future updates to AWS might require larger sizes.

    ", + "AssumeRoleWithWebIdentityResponse$Credentials": "

    The temporary security credentials, which include an access key ID, a secret access key, and a security token.

    Note: The size of the security token that STS APIs return is not fixed. We strongly recommend that you make no assumptions about the maximum size. As of this writing, the typical size is less than 4096 bytes, but that can vary. Also, future updates to AWS might require larger sizes.

    ", + "GetFederationTokenResponse$Credentials": "

    The temporary security credentials, which include an access key ID, a secret access key, and a security (or session) token.

    Note: The size of the security token that STS APIs return is not fixed. We strongly recommend that you make no assumptions about the maximum size. As of this writing, the typical size is less than 4096 bytes, but that can vary. Also, future updates to AWS might require larger sizes.

    ", + "GetSessionTokenResponse$Credentials": "

    The temporary security credentials, which include an access key ID, a secret access key, and a security (or session) token.

    Note: The size of the security token that STS APIs return is not fixed. We strongly recommend that you make no assumptions about the maximum size. As of this writing, the typical size is less than 4096 bytes, but that can vary. Also, future updates to AWS might require larger sizes.

    " + } + }, + "DecodeAuthorizationMessageRequest": { + "base": null, + "refs": { + } + }, + "DecodeAuthorizationMessageResponse": { + "base": "

    A document that contains additional information about the authorization status of a request from an encoded message that is returned in response to an AWS request.

    ", + "refs": { + } + }, + "ExpiredTokenException": { + "base": "

    The web identity token that was passed is expired or is not valid. Get a new identity token from the identity provider and then retry the request.

    ", + "refs": { + } + }, + "FederatedUser": { + "base": "

    Identifiers for the federated user that is associated with the credentials.

    ", + "refs": { + "GetFederationTokenResponse$FederatedUser": "

    Identifiers for the federated user associated with the credentials (such as arn:aws:sts::123456789012:federated-user/Bob or 123456789012:Bob). You can use the federated user's ARN in your resource-based policies, such as an Amazon S3 bucket policy.

    " + } + }, + "GetCallerIdentityRequest": { + "base": null, + "refs": { + } + }, + "GetCallerIdentityResponse": { + "base": "

    Contains the response to a successful GetCallerIdentity request, including information about the entity making the request.

    ", + "refs": { + } + }, + "GetFederationTokenRequest": { + "base": null, + "refs": { + } + }, + "GetFederationTokenResponse": { + "base": "

    Contains the response to a successful GetFederationToken request, including temporary AWS credentials that can be used to make AWS requests.

    ", + "refs": { + } + }, + "GetSessionTokenRequest": { + "base": null, + "refs": { + } + }, + "GetSessionTokenResponse": { + "base": "

    Contains the response to a successful GetSessionToken request, including temporary AWS credentials that can be used to make AWS requests.

    ", + "refs": { + } + }, + "IDPCommunicationErrorException": { + "base": "

    The request could not be fulfilled because the non-AWS identity provider (IDP) that was asked to verify the incoming identity token could not be reached. This is often a transient error caused by network conditions. Retry the request a limited number of times so that you don't exceed the request rate. If the error persists, the non-AWS identity provider might be down or not responding.

    ", + "refs": { + } + }, + "IDPRejectedClaimException": { + "base": "

    The identity provider (IdP) reported that authentication failed. This might be because the claim is invalid.

    If this error is returned for the AssumeRoleWithWebIdentity operation, it can also mean that the claim has expired or has been explicitly revoked.

    ", + "refs": { + } + }, + "InvalidAuthorizationMessageException": { + "base": "

    The error returned if the message passed to DecodeAuthorizationMessage was invalid. This can happen if the token contains invalid characters, such as linebreaks.

    ", + "refs": { + } + }, + "InvalidIdentityTokenException": { + "base": "

    The web identity token that was passed could not be validated by AWS. Get a new identity token from the identity provider and then retry the request.

    ", + "refs": { + } + }, + "Issuer": { + "base": null, + "refs": { + "AssumeRoleWithSAMLResponse$Issuer": "

    The value of the Issuer element of the SAML assertion.

    ", + "AssumeRoleWithWebIdentityResponse$Provider": "

    The issuing authority of the web identity token presented. For OpenID Connect ID Tokens this contains the value of the iss field. For OAuth 2.0 access tokens, this contains the value of the ProviderId parameter that was passed in the AssumeRoleWithWebIdentity request.

    " + } + }, + "MalformedPolicyDocumentException": { + "base": "

    The request was rejected because the policy document was malformed. The error message describes the specific error.

    ", + "refs": { + } + }, + "NameQualifier": { + "base": null, + "refs": { + "AssumeRoleWithSAMLResponse$NameQualifier": "

    A hash value based on the concatenation of the Issuer response value, the AWS account ID, and the friendly name (the last part of the ARN) of the SAML provider in IAM. The combination of NameQualifier and Subject can be used to uniquely identify a federated user.

    The following pseudocode shows how the hash value is calculated:

    BASE64 ( SHA1 ( \"https://example.com/saml\" + \"123456789012\" + \"/MySAMLIdP\" ) )

    " + } + }, + "PackedPolicyTooLargeException": { + "base": "

    The request was rejected because the policy document was too large. The error message describes how big the policy document is, in packed form, as a percentage of what the API allows.

    ", + "refs": { + } + }, + "RegionDisabledException": { + "base": "

    STS is not activated in the requested region for the account that is being asked to generate credentials. The account administrator must use the IAM console to activate STS in that region. For more information, see Activating and Deactivating AWS STS in an AWS Region in the IAM User Guide.

    ", + "refs": { + } + }, + "SAMLAssertionType": { + "base": null, + "refs": { + "AssumeRoleWithSAMLRequest$SAMLAssertion": "

    The base-64 encoded SAML authentication response provided by the IdP.

    For more information, see Configuring a Relying Party and Adding Claims in the Using IAM guide.

    " + } + }, + "Subject": { + "base": null, + "refs": { + "AssumeRoleWithSAMLResponse$Subject": "

    The value of the NameID element in the Subject element of the SAML assertion.

    " + } + }, + "SubjectType": { + "base": null, + "refs": { + "AssumeRoleWithSAMLResponse$SubjectType": "

    The format of the name ID, as defined by the Format attribute in the NameID element of the SAML assertion. Typical examples of the format are transient or persistent.

    If the format includes the prefix urn:oasis:names:tc:SAML:2.0:nameid-format, that prefix is removed. For example, urn:oasis:names:tc:SAML:2.0:nameid-format:transient is returned as transient. If the format includes any other prefix, the format is returned with no modifications.

    " + } + }, + "accessKeyIdType": { + "base": null, + "refs": { + "Credentials$AccessKeyId": "

    The access key ID that identifies the temporary security credentials.

    " + } + }, + "accessKeySecretType": { + "base": null, + "refs": { + "Credentials$SecretAccessKey": "

    The secret access key that can be used to sign requests.

    " + } + }, + "accountType": { + "base": null, + "refs": { + "GetCallerIdentityResponse$Account": "

    The AWS account ID number of the account that owns or contains the calling entity.

    " + } + }, + "arnType": { + "base": null, + "refs": { + "AssumeRoleRequest$RoleArn": "

    The Amazon Resource Name (ARN) of the role to assume.

    ", + "AssumeRoleWithSAMLRequest$RoleArn": "

    The Amazon Resource Name (ARN) of the role that the caller is assuming.

    ", + "AssumeRoleWithSAMLRequest$PrincipalArn": "

    The Amazon Resource Name (ARN) of the SAML provider in IAM that describes the IdP.

    ", + "AssumeRoleWithWebIdentityRequest$RoleArn": "

    The Amazon Resource Name (ARN) of the role that the caller is assuming.

    ", + "AssumedRoleUser$Arn": "

    The ARN of the temporary security credentials that are returned from the AssumeRole action. For more information about ARNs and how to use them in policies, see IAM Identifiers in Using IAM.

    ", + "FederatedUser$Arn": "

    The ARN that specifies the federated user that is associated with the credentials. For more information about ARNs and how to use them in policies, see IAM Identifiers in Using IAM.

    ", + "GetCallerIdentityResponse$Arn": "

    The AWS ARN associated with the calling entity.

    " + } + }, + "assumedRoleIdType": { + "base": null, + "refs": { + "AssumedRoleUser$AssumedRoleId": "

    A unique identifier that contains the role ID and the role session name of the role that is being assumed. The role ID is generated by AWS when the role is created.

    " + } + }, + "clientTokenType": { + "base": null, + "refs": { + "AssumeRoleWithWebIdentityRequest$WebIdentityToken": "

    The OAuth 2.0 access token or OpenID Connect ID token that is provided by the identity provider. Your application must get this token by authenticating the user who is using your application with a web identity provider before the application makes an AssumeRoleWithWebIdentity call.

    " + } + }, + "dateType": { + "base": null, + "refs": { + "Credentials$Expiration": "

    The date on which the current credentials expire.

    " + } + }, + "decodedMessageType": { + "base": null, + "refs": { + "DecodeAuthorizationMessageResponse$DecodedMessage": "

    An XML document that contains the decoded message.

    " + } + }, + "durationSecondsType": { + "base": null, + "refs": { + "GetFederationTokenRequest$DurationSeconds": "

    The duration, in seconds, that the session should last. Acceptable durations for federation sessions range from 900 seconds (15 minutes) to 129600 seconds (36 hours), with 43200 seconds (12 hours) as the default. Sessions obtained using AWS account (root) credentials are restricted to a maximum of 3600 seconds (one hour). If the specified duration is longer than one hour, the session obtained by using AWS account (root) credentials defaults to one hour.

    ", + "GetSessionTokenRequest$DurationSeconds": "

    The duration, in seconds, that the credentials should remain valid. Acceptable durations for IAM user sessions range from 900 seconds (15 minutes) to 129600 seconds (36 hours), with 43200 seconds (12 hours) as the default. Sessions for AWS account owners are restricted to a maximum of 3600 seconds (one hour). If the duration is longer than one hour, the session for AWS account owners defaults to one hour.

    " + } + }, + "encodedMessageType": { + "base": null, + "refs": { + "DecodeAuthorizationMessageRequest$EncodedMessage": "

    The encoded message that was returned with the response.

    " + } + }, + "expiredIdentityTokenMessage": { + "base": null, + "refs": { + "ExpiredTokenException$message": null + } + }, + "externalIdType": { + "base": null, + "refs": { + "AssumeRoleRequest$ExternalId": "

    A unique identifier that is used by third parties when assuming roles in their customers' accounts. For each role that the third party can assume, they should instruct their customers to ensure the role's trust policy checks for the external ID that the third party generated. Each time the third party assumes the role, they should pass the customer's external ID. The external ID is useful in order to help third parties bind a role to the customer who created it. For more information about the external ID, see How to Use an External ID When Granting Access to Your AWS Resources to a Third Party in the IAM User Guide.

    The format for this parameter, as described by its regex pattern, is a string of characters consisting of upper- and lower-case alphanumeric characters with no spaces. You can also include any of the following characters: =,.@:\\/-

    " + } + }, + "federatedIdType": { + "base": null, + "refs": { + "FederatedUser$FederatedUserId": "

    The string that identifies the federated user associated with the credentials, similar to the unique ID of an IAM user.

    " + } + }, + "idpCommunicationErrorMessage": { + "base": null, + "refs": { + "IDPCommunicationErrorException$message": null + } + }, + "idpRejectedClaimMessage": { + "base": null, + "refs": { + "IDPRejectedClaimException$message": null + } + }, + "invalidAuthorizationMessage": { + "base": null, + "refs": { + "InvalidAuthorizationMessageException$message": null + } + }, + "invalidIdentityTokenMessage": { + "base": null, + "refs": { + "InvalidIdentityTokenException$message": null + } + }, + "malformedPolicyDocumentMessage": { + "base": null, + "refs": { + "MalformedPolicyDocumentException$message": null + } + }, + "nonNegativeIntegerType": { + "base": null, + "refs": { + "AssumeRoleResponse$PackedPolicySize": "

    A percentage value that indicates the size of the policy in packed form. The service rejects any policy with a packed size greater than 100 percent, which means the policy exceeded the allowed space.

    ", + "AssumeRoleWithSAMLResponse$PackedPolicySize": "

    A percentage value that indicates the size of the policy in packed form. The service rejects any policy with a packed size greater than 100 percent, which means the policy exceeded the allowed space.

    ", + "AssumeRoleWithWebIdentityResponse$PackedPolicySize": "

    A percentage value that indicates the size of the policy in packed form. The service rejects any policy with a packed size greater than 100 percent, which means the policy exceeded the allowed space.

    ", + "GetFederationTokenResponse$PackedPolicySize": "

    A percentage value indicating the size of the policy in packed form. The service rejects policies for which the packed size is greater than 100 percent of the allowed value.

    " + } + }, + "packedPolicyTooLargeMessage": { + "base": null, + "refs": { + "PackedPolicyTooLargeException$message": null + } + }, + "regionDisabledMessage": { + "base": null, + "refs": { + "RegionDisabledException$message": null + } + }, + "roleDurationSecondsType": { + "base": null, + "refs": { + "AssumeRoleRequest$DurationSeconds": "

    The duration, in seconds, of the role session. The value can range from 900 seconds (15 minutes) to 3600 seconds (1 hour). By default, the value is set to 3600 seconds.

    ", + "AssumeRoleWithSAMLRequest$DurationSeconds": "

    The duration, in seconds, of the role session. The value can range from 900 seconds (15 minutes) to 3600 seconds (1 hour). By default, the value is set to 3600 seconds. An expiration can also be specified in the SAML authentication response's SessionNotOnOrAfter value. The actual expiration time is whichever value is shorter.

    The maximum duration for a session is 1 hour, and the minimum duration is 15 minutes, even if values outside this range are specified.

    ", + "AssumeRoleWithWebIdentityRequest$DurationSeconds": "

    The duration, in seconds, of the role session. The value can range from 900 seconds (15 minutes) to 3600 seconds (1 hour). By default, the value is set to 3600 seconds.

    " + } + }, + "roleSessionNameType": { + "base": null, + "refs": { + "AssumeRoleRequest$RoleSessionName": "

    An identifier for the assumed role session.

    Use the role session name to uniquely identify a session when the same role is assumed by different principals or for different reasons. In cross-account scenarios, the role session name is visible to, and can be logged by the account that owns the role. The role session name is also used in the ARN of the assumed role principal. This means that subsequent cross-account API requests using the temporary security credentials will expose the role session name to the external account in their CloudTrail logs.

    The format for this parameter, as described by its regex pattern, is a string of characters consisting of upper- and lower-case alphanumeric characters with no spaces. You can also include any of the following characters: =,.@-

    ", + "AssumeRoleWithWebIdentityRequest$RoleSessionName": "

    An identifier for the assumed role session. Typically, you pass the name or identifier that is associated with the user who is using your application. That way, the temporary security credentials that your application will use are associated with that user. This session name is included as part of the ARN and assumed role ID in the AssumedRoleUser response element.

    The format for this parameter, as described by its regex pattern, is a string of characters consisting of upper- and lower-case alphanumeric characters with no spaces. You can also include any of the following characters: =,.@-

    " + } + }, + "serialNumberType": { + "base": null, + "refs": { + "AssumeRoleRequest$SerialNumber": "

    The identification number of the MFA device that is associated with the user who is making the AssumeRole call. Specify this value if the trust policy of the role being assumed includes a condition that requires MFA authentication. The value is either the serial number for a hardware device (such as GAHT12345678) or an Amazon Resource Name (ARN) for a virtual device (such as arn:aws:iam::123456789012:mfa/user).

    The format for this parameter, as described by its regex pattern, is a string of characters consisting of upper- and lower-case alphanumeric characters with no spaces. You can also include any of the following characters: =,.@-

    ", + "GetSessionTokenRequest$SerialNumber": "

    The identification number of the MFA device that is associated with the IAM user who is making the GetSessionToken call. Specify this value if the IAM user has a policy that requires MFA authentication. The value is either the serial number for a hardware device (such as GAHT12345678) or an Amazon Resource Name (ARN) for a virtual device (such as arn:aws:iam::123456789012:mfa/user). You can find the device for an IAM user by going to the AWS Management Console and viewing the user's security credentials.

    The format for this parameter, as described by its regex pattern, is a string of characters consisting of upper- and lower-case alphanumeric characters with no spaces. You can also include any of the following characters: =,.@-

    " + } + }, + "sessionPolicyDocumentType": { + "base": null, + "refs": { + "AssumeRoleRequest$Policy": "

    An IAM policy in JSON format.

    This parameter is optional. If you pass a policy, the temporary security credentials that are returned by the operation have the permissions that are allowed by both (the intersection of) the access policy of the role that is being assumed, and the policy that you pass. This gives you a way to further restrict the permissions for the resulting temporary security credentials. You cannot use the passed policy to grant permissions that are in excess of those allowed by the access policy of the role that is being assumed. For more information, see Permissions for AssumeRole, AssumeRoleWithSAML, and AssumeRoleWithWebIdentity in the IAM User Guide.

    The format for this parameter, as described by its regex pattern, is a string of characters up to 2048 characters in length. The characters can be any ASCII character from the space character to the end of the valid character list (\\u0020-\\u00FF). It can also include the tab (\\u0009), linefeed (\\u000A), and carriage return (\\u000D) characters.

    The policy plain text must be 2048 bytes or shorter. However, an internal conversion compresses it into a packed binary format with a separate limit. The PackedPolicySize response element indicates by percentage how close to the upper size limit the policy is, with 100% equaling the maximum allowed size.

    ", + "AssumeRoleWithSAMLRequest$Policy": "

    An IAM policy in JSON format.

    The policy parameter is optional. If you pass a policy, the temporary security credentials that are returned by the operation have the permissions that are allowed by both the access policy of the role that is being assumed, and the policy that you pass. This gives you a way to further restrict the permissions for the resulting temporary security credentials. You cannot use the passed policy to grant permissions that are in excess of those allowed by the access policy of the role that is being assumed. For more information, Permissions for AssumeRole, AssumeRoleWithSAML, and AssumeRoleWithWebIdentity in the IAM User Guide.

    The format for this parameter, as described by its regex pattern, is a string of characters up to 2048 characters in length. The characters can be any ASCII character from the space character to the end of the valid character list (\\u0020-\\u00FF). It can also include the tab (\\u0009), linefeed (\\u000A), and carriage return (\\u000D) characters.

    The policy plain text must be 2048 bytes or shorter. However, an internal conversion compresses it into a packed binary format with a separate limit. The PackedPolicySize response element indicates by percentage how close to the upper size limit the policy is, with 100% equaling the maximum allowed size.

    ", + "AssumeRoleWithWebIdentityRequest$Policy": "

    An IAM policy in JSON format.

    The policy parameter is optional. If you pass a policy, the temporary security credentials that are returned by the operation have the permissions that are allowed by both the access policy of the role that is being assumed, and the policy that you pass. This gives you a way to further restrict the permissions for the resulting temporary security credentials. You cannot use the passed policy to grant permissions that are in excess of those allowed by the access policy of the role that is being assumed. For more information, see Permissions for AssumeRoleWithWebIdentity in the IAM User Guide.

    The format for this parameter, as described by its regex pattern, is a string of characters up to 2048 characters in length. The characters can be any ASCII character from the space character to the end of the valid character list (\\u0020-\\u00FF). It can also include the tab (\\u0009), linefeed (\\u000A), and carriage return (\\u000D) characters.

    The policy plain text must be 2048 bytes or shorter. However, an internal conversion compresses it into a packed binary format with a separate limit. The PackedPolicySize response element indicates by percentage how close to the upper size limit the policy is, with 100% equaling the maximum allowed size.

    ", + "GetFederationTokenRequest$Policy": "

    An IAM policy in JSON format that is passed with the GetFederationToken call and evaluated along with the policy or policies that are attached to the IAM user whose credentials are used to call GetFederationToken. The passed policy is used to scope down the permissions that are available to the IAM user, by allowing only a subset of the permissions that are granted to the IAM user. The passed policy cannot grant more permissions than those granted to the IAM user. The final permissions for the federated user are the most restrictive set based on the intersection of the passed policy and the IAM user policy.

    If you do not pass a policy, the resulting temporary security credentials have no effective permissions. The only exception is when the temporary security credentials are used to access a resource that has a resource-based policy that specifically allows the federated user to access the resource.

    The format for this parameter, as described by its regex pattern, is a string of characters up to 2048 characters in length. The characters can be any ASCII character from the space character to the end of the valid character list (\\u0020-\\u00FF). It can also include the tab (\\u0009), linefeed (\\u000A), and carriage return (\\u000D) characters.

    The policy plain text must be 2048 bytes or shorter. However, an internal conversion compresses it into a packed binary format with a separate limit. The PackedPolicySize response element indicates by percentage how close to the upper size limit the policy is, with 100% equaling the maximum allowed size.

    For more information about how permissions work, see Permissions for GetFederationToken.

    " + } + }, + "tokenCodeType": { + "base": null, + "refs": { + "AssumeRoleRequest$TokenCode": "

    The value provided by the MFA device, if the trust policy of the role being assumed requires MFA (that is, if the policy includes a condition that tests for MFA). If the role being assumed requires MFA and if the TokenCode value is missing or expired, the AssumeRole call returns an \"access denied\" error.

    The format for this parameter, as described by its regex pattern, is a sequence of six numeric digits.

    ", + "GetSessionTokenRequest$TokenCode": "

    The value provided by the MFA device, if MFA is required. If any policy requires the IAM user to submit an MFA code, specify this value. If MFA authentication is required, and the user does not provide a code when requesting a set of temporary security credentials, the user will receive an \"access denied\" response when requesting resources that require MFA authentication.

    The format for this parameter, as described by its regex pattern, is a sequence of six numeric digits.

    " + } + }, + "tokenType": { + "base": null, + "refs": { + "Credentials$SessionToken": "

    The token that users must pass to the service API to use the temporary credentials.

    " + } + }, + "urlType": { + "base": null, + "refs": { + "AssumeRoleWithWebIdentityRequest$ProviderId": "

    The fully qualified host component of the domain name of the identity provider.

    Specify this value only for OAuth 2.0 access tokens. Currently www.amazon.com and graph.facebook.com are the only supported identity providers for OAuth 2.0 access tokens. Do not include URL schemes and port numbers.

    Do not specify this value for OpenID Connect ID tokens.

    " + } + }, + "userIdType": { + "base": null, + "refs": { + "GetCallerIdentityResponse$UserId": "

    The unique identifier of the calling entity. The exact value depends on the type of entity making the call. The values returned are those listed in the aws:userid column in the Principal table found on the Policy Variables reference page in the IAM User Guide.

    " + } + }, + "userNameType": { + "base": null, + "refs": { + "GetFederationTokenRequest$Name": "

    The name of the federated user. The name is used as an identifier for the temporary security credentials (such as Bob). For example, you can reference the federated user name in a resource-based policy, such as in an Amazon S3 bucket policy.

    The format for this parameter, as described by its regex pattern, is a string of characters consisting of upper- and lower-case alphanumeric characters with no spaces. You can also include any of the following characters: =,.@-

    " + } + }, + "webIdentitySubjectType": { + "base": null, + "refs": { + "AssumeRoleWithWebIdentityResponse$SubjectFromWebIdentityToken": "

    The unique user identifier that is returned by the identity provider. This identifier is associated with the WebIdentityToken that was submitted with the AssumeRoleWithWebIdentity call. The identifier is typically unique to the user and the application that acquired the WebIdentityToken (pairwise identifier). For OpenID Connect ID tokens, this field contains the value returned by the identity provider as the token's sub (Subject) claim.

    " + } + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/sts/2011-06-15/examples-1.json b/vendor/github.com/aws/aws-sdk-go/models/apis/sts/2011-06-15/examples-1.json new file mode 100644 index 000000000..0ea7e3b0b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/sts/2011-06-15/examples-1.json @@ -0,0 +1,5 @@ +{ + "version": "1.0", + "examples": { + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/support/2013-04-15/api-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/support/2013-04-15/api-2.json new file mode 100644 index 000000000..09af5f1af --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/support/2013-04-15/api-2.json @@ -0,0 +1,869 @@ +{ + "metadata":{ + "apiVersion":"2013-04-15", + "endpointPrefix":"support", + "jsonVersion":"1.1", + "serviceFullName":"AWS Support", + "signatureVersion":"v4", + "targetPrefix":"AWSSupport_20130415", + "protocol":"json" + }, + "operations":{ + "AddAttachmentsToSet":{ + "name":"AddAttachmentsToSet", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AddAttachmentsToSetRequest"}, + "output":{"shape":"AddAttachmentsToSetResponse"}, + "errors":[ + { + "shape":"InternalServerError", + "exception":true, + "fault":true + }, + { + "shape":"AttachmentSetIdNotFound", + "exception":true + }, + { + "shape":"AttachmentSetExpired", + "exception":true + }, + { + "shape":"AttachmentSetSizeLimitExceeded", + "exception":true + }, + { + "shape":"AttachmentLimitExceeded", + "exception":true + } + ] + }, + "AddCommunicationToCase":{ + "name":"AddCommunicationToCase", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AddCommunicationToCaseRequest"}, + "output":{"shape":"AddCommunicationToCaseResponse"}, + "errors":[ + { + "shape":"InternalServerError", + "exception":true, + "fault":true + }, + { + "shape":"CaseIdNotFound", + "exception":true + }, + { + "shape":"AttachmentSetIdNotFound", + "exception":true + }, + { + "shape":"AttachmentSetExpired", + "exception":true + } + ] + }, + "CreateCase":{ + "name":"CreateCase", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateCaseRequest"}, + "output":{"shape":"CreateCaseResponse"}, + "errors":[ + { + "shape":"InternalServerError", + "exception":true, + "fault":true + }, + { + "shape":"CaseCreationLimitExceeded", + "exception":true + }, + { + "shape":"AttachmentSetIdNotFound", + "exception":true + }, + { + "shape":"AttachmentSetExpired", + "exception":true + } + ] + }, + "DescribeAttachment":{ + "name":"DescribeAttachment", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeAttachmentRequest"}, + "output":{"shape":"DescribeAttachmentResponse"}, + "errors":[ + { + "shape":"InternalServerError", + "exception":true, + "fault":true + }, + { + "shape":"DescribeAttachmentLimitExceeded", + "exception":true + }, + { + "shape":"AttachmentIdNotFound", + "exception":true + } + ] + }, + "DescribeCases":{ + "name":"DescribeCases", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeCasesRequest"}, + "output":{"shape":"DescribeCasesResponse"}, + "errors":[ + { + "shape":"InternalServerError", + "exception":true, + "fault":true + }, + { + "shape":"CaseIdNotFound", + "exception":true + } + ] + }, + "DescribeCommunications":{ + "name":"DescribeCommunications", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeCommunicationsRequest"}, + "output":{"shape":"DescribeCommunicationsResponse"}, + "errors":[ + { + "shape":"InternalServerError", + "exception":true, + "fault":true + }, + { + "shape":"CaseIdNotFound", + "exception":true + } + ] + }, + "DescribeServices":{ + "name":"DescribeServices", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeServicesRequest"}, + "output":{"shape":"DescribeServicesResponse"}, + "errors":[ + { + "shape":"InternalServerError", + "exception":true, + "fault":true + } + ] + }, + "DescribeSeverityLevels":{ + "name":"DescribeSeverityLevels", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeSeverityLevelsRequest"}, + "output":{"shape":"DescribeSeverityLevelsResponse"}, + "errors":[ + { + "shape":"InternalServerError", + "exception":true, + "fault":true + } + ] + }, + "DescribeTrustedAdvisorCheckRefreshStatuses":{ + "name":"DescribeTrustedAdvisorCheckRefreshStatuses", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeTrustedAdvisorCheckRefreshStatusesRequest"}, + "output":{"shape":"DescribeTrustedAdvisorCheckRefreshStatusesResponse"}, + "errors":[ + { + "shape":"InternalServerError", + "exception":true, + "fault":true + } + ] + }, + "DescribeTrustedAdvisorCheckResult":{ + "name":"DescribeTrustedAdvisorCheckResult", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeTrustedAdvisorCheckResultRequest"}, + "output":{"shape":"DescribeTrustedAdvisorCheckResultResponse"}, + "errors":[ + { + "shape":"InternalServerError", + "exception":true, + "fault":true + } + ] + }, + "DescribeTrustedAdvisorCheckSummaries":{ + "name":"DescribeTrustedAdvisorCheckSummaries", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeTrustedAdvisorCheckSummariesRequest"}, + "output":{"shape":"DescribeTrustedAdvisorCheckSummariesResponse"}, + "errors":[ + { + "shape":"InternalServerError", + "exception":true, + "fault":true + } + ] + }, + "DescribeTrustedAdvisorChecks":{ + "name":"DescribeTrustedAdvisorChecks", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeTrustedAdvisorChecksRequest"}, + "output":{"shape":"DescribeTrustedAdvisorChecksResponse"}, + "errors":[ + { + "shape":"InternalServerError", + "exception":true, + "fault":true + } + ] + }, + "RefreshTrustedAdvisorCheck":{ + "name":"RefreshTrustedAdvisorCheck", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RefreshTrustedAdvisorCheckRequest"}, + "output":{"shape":"RefreshTrustedAdvisorCheckResponse"}, + "errors":[ + { + "shape":"InternalServerError", + "exception":true, + "fault":true + } + ] + }, + "ResolveCase":{ + "name":"ResolveCase", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ResolveCaseRequest"}, + "output":{"shape":"ResolveCaseResponse"}, + "errors":[ + { + "shape":"InternalServerError", + "exception":true, + "fault":true + }, + { + "shape":"CaseIdNotFound", + "exception":true + } + ] + } + }, + "shapes":{ + "AddAttachmentsToSetRequest":{ + "type":"structure", + "required":["attachments"], + "members":{ + "attachmentSetId":{"shape":"AttachmentSetId"}, + "attachments":{"shape":"Attachments"} + } + }, + "AddAttachmentsToSetResponse":{ + "type":"structure", + "members":{ + "attachmentSetId":{"shape":"AttachmentSetId"}, + "expiryTime":{"shape":"ExpiryTime"} + } + }, + "AddCommunicationToCaseRequest":{ + "type":"structure", + "required":["communicationBody"], + "members":{ + "caseId":{"shape":"CaseId"}, + "communicationBody":{"shape":"CommunicationBody"}, + "ccEmailAddresses":{"shape":"CcEmailAddressList"}, + "attachmentSetId":{"shape":"AttachmentSetId"} + } + }, + "AddCommunicationToCaseResponse":{ + "type":"structure", + "members":{ + "result":{"shape":"Result"} + } + }, + "AfterTime":{"type":"string"}, + "Attachment":{ + "type":"structure", + "members":{ + "fileName":{"shape":"FileName"}, + "data":{"shape":"Data"} + } + }, + "AttachmentDetails":{ + "type":"structure", + "members":{ + "attachmentId":{"shape":"AttachmentId"}, + "fileName":{"shape":"FileName"} + } + }, + "AttachmentId":{"type":"string"}, + "AttachmentIdNotFound":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "exception":true + }, + "AttachmentLimitExceeded":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "exception":true + }, + "AttachmentSet":{ + "type":"list", + "member":{"shape":"AttachmentDetails"} + }, + "AttachmentSetExpired":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "exception":true + }, + "AttachmentSetId":{"type":"string"}, + "AttachmentSetIdNotFound":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "exception":true + }, + "AttachmentSetSizeLimitExceeded":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "exception":true + }, + "Attachments":{ + "type":"list", + "member":{"shape":"Attachment"} + }, + "BeforeTime":{"type":"string"}, + "Boolean":{"type":"boolean"}, + "CaseCreationLimitExceeded":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "exception":true + }, + "CaseDetails":{ + "type":"structure", + "members":{ + "caseId":{"shape":"CaseId"}, + "displayId":{"shape":"DisplayId"}, + "subject":{"shape":"Subject"}, + "status":{"shape":"Status"}, + "serviceCode":{"shape":"ServiceCode"}, + "categoryCode":{"shape":"CategoryCode"}, + "severityCode":{"shape":"SeverityCode"}, + "submittedBy":{"shape":"SubmittedBy"}, + "timeCreated":{"shape":"TimeCreated"}, + "recentCommunications":{"shape":"RecentCaseCommunications"}, + "ccEmailAddresses":{"shape":"CcEmailAddressList"}, + "language":{"shape":"Language"} + } + }, + "CaseId":{"type":"string"}, + "CaseIdList":{ + "type":"list", + "member":{"shape":"CaseId"}, + "min":0, + "max":100 + }, + "CaseIdNotFound":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "exception":true + }, + "CaseList":{ + "type":"list", + "member":{"shape":"CaseDetails"} + }, + "CaseStatus":{"type":"string"}, + "Category":{ + "type":"structure", + "members":{ + "code":{"shape":"CategoryCode"}, + "name":{"shape":"CategoryName"} + } + }, + "CategoryCode":{"type":"string"}, + "CategoryList":{ + "type":"list", + "member":{"shape":"Category"} + }, + "CategoryName":{"type":"string"}, + "CcEmailAddress":{"type":"string"}, + "CcEmailAddressList":{ + "type":"list", + "member":{"shape":"CcEmailAddress"} + }, + "Communication":{ + "type":"structure", + "members":{ + "caseId":{"shape":"CaseId"}, + "body":{"shape":"CommunicationBody"}, + "submittedBy":{"shape":"SubmittedBy"}, + "timeCreated":{"shape":"TimeCreated"}, + "attachmentSet":{"shape":"AttachmentSet"} + } + }, + "CommunicationBody":{"type":"string"}, + "CommunicationList":{ + "type":"list", + "member":{"shape":"Communication"} + }, + "CreateCaseRequest":{ + "type":"structure", + "required":[ + "subject", + "communicationBody" + ], + "members":{ + "subject":{"shape":"Subject"}, + "serviceCode":{"shape":"ServiceCode"}, + "severityCode":{"shape":"SeverityCode"}, + "categoryCode":{"shape":"CategoryCode"}, + "communicationBody":{"shape":"CommunicationBody"}, + "ccEmailAddresses":{"shape":"CcEmailAddressList"}, + "language":{"shape":"Language"}, + "issueType":{"shape":"IssueType"}, + "attachmentSetId":{"shape":"AttachmentSetId"} + } + }, + "CreateCaseResponse":{ + "type":"structure", + "members":{ + "caseId":{"shape":"CaseId"} + } + }, + "Data":{"type":"blob"}, + "DescribeAttachmentLimitExceeded":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "exception":true + }, + "DescribeAttachmentRequest":{ + "type":"structure", + "required":["attachmentId"], + "members":{ + "attachmentId":{"shape":"AttachmentId"} + } + }, + "DescribeAttachmentResponse":{ + "type":"structure", + "members":{ + "attachment":{"shape":"Attachment"} + } + }, + "DescribeCasesRequest":{ + "type":"structure", + "members":{ + "caseIdList":{"shape":"CaseIdList"}, + "displayId":{"shape":"DisplayId"}, + "afterTime":{"shape":"AfterTime"}, + "beforeTime":{"shape":"BeforeTime"}, + "includeResolvedCases":{"shape":"IncludeResolvedCases"}, + "nextToken":{"shape":"NextToken"}, + "maxResults":{"shape":"MaxResults"}, + "language":{"shape":"Language"}, + "includeCommunications":{"shape":"IncludeCommunications"} + } + }, + "DescribeCasesResponse":{ + "type":"structure", + "members":{ + "cases":{"shape":"CaseList"}, + "nextToken":{"shape":"NextToken"} + } + }, + "DescribeCommunicationsRequest":{ + "type":"structure", + "required":["caseId"], + "members":{ + "caseId":{"shape":"CaseId"}, + "beforeTime":{"shape":"BeforeTime"}, + "afterTime":{"shape":"AfterTime"}, + "nextToken":{"shape":"NextToken"}, + "maxResults":{"shape":"MaxResults"} + } + }, + "DescribeCommunicationsResponse":{ + "type":"structure", + "members":{ + "communications":{"shape":"CommunicationList"}, + "nextToken":{"shape":"NextToken"} + } + }, + "DescribeServicesRequest":{ + "type":"structure", + "members":{ + "serviceCodeList":{"shape":"ServiceCodeList"}, + "language":{"shape":"Language"} + } + }, + "DescribeServicesResponse":{ + "type":"structure", + "members":{ + "services":{"shape":"ServiceList"} + } + }, + "DescribeSeverityLevelsRequest":{ + "type":"structure", + "members":{ + "language":{"shape":"Language"} + } + }, + "DescribeSeverityLevelsResponse":{ + "type":"structure", + "members":{ + "severityLevels":{"shape":"SeverityLevelsList"} + } + }, + "DescribeTrustedAdvisorCheckRefreshStatusesRequest":{ + "type":"structure", + "required":["checkIds"], + "members":{ + "checkIds":{"shape":"StringList"} + } + }, + "DescribeTrustedAdvisorCheckRefreshStatusesResponse":{ + "type":"structure", + "required":["statuses"], + "members":{ + "statuses":{"shape":"TrustedAdvisorCheckRefreshStatusList"} + } + }, + "DescribeTrustedAdvisorCheckResultRequest":{ + "type":"structure", + "required":["checkId"], + "members":{ + "checkId":{"shape":"String"}, + "language":{"shape":"String"} + } + }, + "DescribeTrustedAdvisorCheckResultResponse":{ + "type":"structure", + "members":{ + "result":{"shape":"TrustedAdvisorCheckResult"} + } + }, + "DescribeTrustedAdvisorCheckSummariesRequest":{ + "type":"structure", + "required":["checkIds"], + "members":{ + "checkIds":{"shape":"StringList"} + } + }, + "DescribeTrustedAdvisorCheckSummariesResponse":{ + "type":"structure", + "required":["summaries"], + "members":{ + "summaries":{"shape":"TrustedAdvisorCheckSummaryList"} + } + }, + "DescribeTrustedAdvisorChecksRequest":{ + "type":"structure", + "required":["language"], + "members":{ + "language":{"shape":"String"} + } + }, + "DescribeTrustedAdvisorChecksResponse":{ + "type":"structure", + "required":["checks"], + "members":{ + "checks":{"shape":"TrustedAdvisorCheckList"} + } + }, + "DisplayId":{"type":"string"}, + "Double":{"type":"double"}, + "ErrorMessage":{"type":"string"}, + "ExpiryTime":{"type":"string"}, + "FileName":{"type":"string"}, + "IncludeCommunications":{"type":"boolean"}, + "IncludeResolvedCases":{"type":"boolean"}, + "InternalServerError":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "exception":true, + "fault":true + }, + "IssueType":{"type":"string"}, + "Language":{"type":"string"}, + "Long":{"type":"long"}, + "MaxResults":{ + "type":"integer", + "min":10, + "max":100 + }, + "NextToken":{"type":"string"}, + "RecentCaseCommunications":{ + "type":"structure", + "members":{ + "communications":{"shape":"CommunicationList"}, + "nextToken":{"shape":"NextToken"} + } + }, + "RefreshTrustedAdvisorCheckRequest":{ + "type":"structure", + "required":["checkId"], + "members":{ + "checkId":{"shape":"String"} + } + }, + "RefreshTrustedAdvisorCheckResponse":{ + "type":"structure", + "required":["status"], + "members":{ + "status":{"shape":"TrustedAdvisorCheckRefreshStatus"} + } + }, + "ResolveCaseRequest":{ + "type":"structure", + "members":{ + "caseId":{"shape":"CaseId"} + } + }, + "ResolveCaseResponse":{ + "type":"structure", + "members":{ + "initialCaseStatus":{"shape":"CaseStatus"}, + "finalCaseStatus":{"shape":"CaseStatus"} + } + }, + "Result":{"type":"boolean"}, + "Service":{ + "type":"structure", + "members":{ + "code":{"shape":"ServiceCode"}, + "name":{"shape":"ServiceName"}, + "categories":{"shape":"CategoryList"} + } + }, + "ServiceCode":{ + "type":"string", + "pattern":"[0-9a-z\\-_]+" + }, + "ServiceCodeList":{ + "type":"list", + "member":{"shape":"ServiceCode"}, + "min":0, + "max":100 + }, + "ServiceList":{ + "type":"list", + "member":{"shape":"Service"} + }, + "ServiceName":{"type":"string"}, + "SeverityCode":{"type":"string"}, + "SeverityLevel":{ + "type":"structure", + "members":{ + "code":{"shape":"SeverityLevelCode"}, + "name":{"shape":"SeverityLevelName"} + } + }, + "SeverityLevelCode":{"type":"string"}, + "SeverityLevelName":{"type":"string"}, + "SeverityLevelsList":{ + "type":"list", + "member":{"shape":"SeverityLevel"} + }, + "Status":{"type":"string"}, + "String":{"type":"string"}, + "StringList":{ + "type":"list", + "member":{"shape":"String"} + }, + "Subject":{"type":"string"}, + "SubmittedBy":{"type":"string"}, + "TimeCreated":{"type":"string"}, + "TrustedAdvisorCategorySpecificSummary":{ + "type":"structure", + "members":{ + "costOptimizing":{"shape":"TrustedAdvisorCostOptimizingSummary"} + } + }, + "TrustedAdvisorCheckDescription":{ + "type":"structure", + "required":[ + "id", + "name", + "description", + "category", + "metadata" + ], + "members":{ + "id":{"shape":"String"}, + "name":{"shape":"String"}, + "description":{"shape":"String"}, + "category":{"shape":"String"}, + "metadata":{"shape":"StringList"} + } + }, + "TrustedAdvisorCheckList":{ + "type":"list", + "member":{"shape":"TrustedAdvisorCheckDescription"} + }, + "TrustedAdvisorCheckRefreshStatus":{ + "type":"structure", + "required":[ + "checkId", + "status", + "millisUntilNextRefreshable" + ], + "members":{ + "checkId":{"shape":"String"}, + "status":{"shape":"String"}, + "millisUntilNextRefreshable":{"shape":"Long"} + } + }, + "TrustedAdvisorCheckRefreshStatusList":{ + "type":"list", + "member":{"shape":"TrustedAdvisorCheckRefreshStatus"} + }, + "TrustedAdvisorCheckResult":{ + "type":"structure", + "required":[ + "checkId", + "timestamp", + "status", + "resourcesSummary", + "categorySpecificSummary", + "flaggedResources" + ], + "members":{ + "checkId":{"shape":"String"}, + "timestamp":{"shape":"String"}, + "status":{"shape":"String"}, + "resourcesSummary":{"shape":"TrustedAdvisorResourcesSummary"}, + "categorySpecificSummary":{"shape":"TrustedAdvisorCategorySpecificSummary"}, + "flaggedResources":{"shape":"TrustedAdvisorResourceDetailList"} + } + }, + "TrustedAdvisorCheckSummary":{ + "type":"structure", + "required":[ + "checkId", + "timestamp", + "status", + "resourcesSummary", + "categorySpecificSummary" + ], + "members":{ + "checkId":{"shape":"String"}, + "timestamp":{"shape":"String"}, + "status":{"shape":"String"}, + "hasFlaggedResources":{"shape":"Boolean"}, + "resourcesSummary":{"shape":"TrustedAdvisorResourcesSummary"}, + "categorySpecificSummary":{"shape":"TrustedAdvisorCategorySpecificSummary"} + } + }, + "TrustedAdvisorCheckSummaryList":{ + "type":"list", + "member":{"shape":"TrustedAdvisorCheckSummary"} + }, + "TrustedAdvisorCostOptimizingSummary":{ + "type":"structure", + "required":[ + "estimatedMonthlySavings", + "estimatedPercentMonthlySavings" + ], + "members":{ + "estimatedMonthlySavings":{"shape":"Double"}, + "estimatedPercentMonthlySavings":{"shape":"Double"} + } + }, + "TrustedAdvisorResourceDetail":{ + "type":"structure", + "required":[ + "status", + "region", + "resourceId", + "metadata" + ], + "members":{ + "status":{"shape":"String"}, + "region":{"shape":"String"}, + "resourceId":{"shape":"String"}, + "isSuppressed":{"shape":"Boolean"}, + "metadata":{"shape":"StringList"} + } + }, + "TrustedAdvisorResourceDetailList":{ + "type":"list", + "member":{"shape":"TrustedAdvisorResourceDetail"} + }, + "TrustedAdvisorResourcesSummary":{ + "type":"structure", + "required":[ + "resourcesProcessed", + "resourcesFlagged", + "resourcesIgnored", + "resourcesSuppressed" + ], + "members":{ + "resourcesProcessed":{"shape":"Long"}, + "resourcesFlagged":{"shape":"Long"}, + "resourcesIgnored":{"shape":"Long"}, + "resourcesSuppressed":{"shape":"Long"} + } + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/support/2013-04-15/docs-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/support/2013-04-15/docs-2.json new file mode 100644 index 000000000..6c902a157 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/support/2013-04-15/docs-2.json @@ -0,0 +1,680 @@ +{ + "operations": { + "AddAttachmentsToSet": "

    Adds one or more attachments to an attachment set. If an AttachmentSetId is not specified, a new attachment set is created, and the ID of the set is returned in the response. If an AttachmentSetId is specified, the attachments are added to the specified set, if it exists.

    An attachment set is a temporary container for attachments that are to be added to a case or case communication. The set is available for one hour after it is created; the ExpiryTime returned in the response indicates when the set expires. The maximum number of attachments in a set is 3, and the maximum size of any attachment in the set is 5 MB.

    ", + "AddCommunicationToCase": "

    Adds additional customer communication to an AWS Support case. You use the CaseId value to identify the case to add communication to. You can list a set of email addresses to copy on the communication using the CcEmailAddresses value. The CommunicationBody value contains the text of the communication.

    The response indicates the success or failure of the request.

    This operation implements a subset of the features of the AWS Support Center.

    ", + "CreateCase": "

    Creates a new case in the AWS Support Center. This operation is modeled on the behavior of the AWS Support Center Create Case page. Its parameters require you to specify the following information:

    1. IssueType. The type of issue for the case. You can specify either \"customer-service\" or \"technical.\" If you do not indicate a value, the default is \"technical.\"
    2. ServiceCode. The code for an AWS service. You obtain the ServiceCode by calling DescribeServices.
    3. CategoryCode. The category for the service defined for the ServiceCode value. You also obtain the category code for a service by calling DescribeServices. Each AWS service defines its own set of category codes.
    4. SeverityCode. A value that indicates the urgency of the case, which in turn determines the response time according to your service level agreement with AWS Support. You obtain the SeverityCode by calling DescribeSeverityLevels.
    5. Subject. The Subject field on the AWS Support Center Create Case page.
    6. CommunicationBody. The Description field on the AWS Support Center Create Case page.
    7. AttachmentSetId. The ID of a set of attachments that has been created by using AddAttachmentsToSet.
    8. Language. The human language in which AWS Support handles the case. English and Japanese are currently supported.
    9. CcEmailAddresses. The AWS Support Center CC field on the Create Case page. You can list email addresses to be copied on any correspondence about the case. The account that opens the case is already identified by passing the AWS Credentials in the HTTP POST method or in a method or function call from one of the programming languages supported by an AWS SDK.

    To add additional communication or attachments to an existing case, use AddCommunicationToCase.

    A successful CreateCase request returns an AWS Support case number. Case numbers are used by the DescribeCases operation to retrieve existing AWS Support cases.

    ", + "DescribeAttachment": "

    Returns the attachment that has the specified ID. Attachment IDs are generated by the case management system when you add an attachment to a case or case communication. Attachment IDs are returned in the AttachmentDetails objects that are returned by the DescribeCommunications operation.

    ", + "DescribeCases": "

    Returns a list of cases that you specify by passing one or more case IDs. In addition, you can filter the cases by date by setting values for the AfterTime and BeforeTime request parameters. You can set values for the IncludeResolvedCases and IncludeCommunications request parameters to control how much information is returned.

    Case data is available for 12 months after creation. If a case was created more than 12 months ago, a request for data might cause an error.

    The response returns the following in JSON format:

    1. One or more CaseDetails data types.
    2. One or more NextToken values, which specify where to paginate the returned records represented by the CaseDetails objects.
    ", + "DescribeCommunications": "

    Returns communications (and attachments) for one or more support cases. You can use the AfterTime and BeforeTime parameters to filter by date. You can use the CaseId parameter to restrict the results to a particular case.

    Case data is available for 12 months after creation. If a case was created more than 12 months ago, a request for data might cause an error.

    You can use the MaxResults and NextToken parameters to control the pagination of the result set. Set MaxResults to the number of cases you want displayed on each page, and use NextToken to specify the resumption of pagination.

    ", + "DescribeServices": "

    Returns the current list of AWS services and a list of service categories that applies to each one. You then use service names and categories in your CreateCase requests. Each AWS service has its own set of categories.

    The service codes and category codes correspond to the values that are displayed in the Service and Category drop-down lists on the AWS Support Center Create Case page. The values in those fields, however, do not necessarily match the service codes and categories returned by the DescribeServices request. Always use the service codes and categories obtained programmatically. This practice ensures that you always have the most recent set of service and category codes.

    ", + "DescribeSeverityLevels": "

    Returns the list of severity levels that you can assign to an AWS Support case. The severity level for a case is also a field in the CaseDetails data type included in any CreateCase request.

    ", + "DescribeTrustedAdvisorCheckRefreshStatuses": "

    Returns the refresh status of the Trusted Advisor checks that have the specified check IDs. Check IDs can be obtained by calling DescribeTrustedAdvisorChecks.

    ", + "DescribeTrustedAdvisorCheckResult": "

    Returns the results of the Trusted Advisor check that has the specified check ID. Check IDs can be obtained by calling DescribeTrustedAdvisorChecks.

    The response contains a TrustedAdvisorCheckResult object, which contains these three objects:

    In addition, the response contains these fields:

    • Status. The alert status of the check: \"ok\" (green), \"warning\" (yellow), \"error\" (red), or \"not_available\".
    • Timestamp. The time of the last refresh of the check.
    • CheckId. The unique identifier for the check.
    ", + "DescribeTrustedAdvisorCheckSummaries": "

    Returns the summaries of the results of the Trusted Advisor checks that have the specified check IDs. Check IDs can be obtained by calling DescribeTrustedAdvisorChecks.

    The response contains an array of TrustedAdvisorCheckSummary objects.

    ", + "DescribeTrustedAdvisorChecks": "

    Returns information about all available Trusted Advisor checks, including name, ID, category, description, and metadata. You must specify a language code; English (\"en\") and Japanese (\"ja\") are currently supported. The response contains a TrustedAdvisorCheckDescription for each check.

    ", + "RefreshTrustedAdvisorCheck": "

    Requests a refresh of the Trusted Advisor check that has the specified check ID. Check IDs can be obtained by calling DescribeTrustedAdvisorChecks.

    The response contains a TrustedAdvisorCheckRefreshStatus object, which contains these fields:

    • Status. The refresh status of the check: \"none\", \"enqueued\", \"processing\", \"success\", or \"abandoned\".
    • MillisUntilNextRefreshable. The amount of time, in milliseconds, until the check is eligible for refresh.
    • CheckId. The unique identifier for the check.
    ", + "ResolveCase": "

    Takes a CaseId and returns the initial state of the case along with the state of the case after the call to ResolveCase completed.

    " + }, + "service": "AWS Support

    The AWS Support API reference is intended for programmers who need detailed information about the AWS Support operations and data types. This service enables you to manage your AWS Support cases programmatically. It uses HTTP methods that return results in JSON format.

    The AWS Support service also exposes a set of Trusted Advisor features. You can retrieve a list of checks and their descriptions, get check results, specify checks to refresh, and get the refresh status of checks.

    The following list describes the AWS Support case management operations:

    The following list describes the operations available from the AWS Support service for Trusted Advisor:

    For authentication of requests, AWS Support uses Signature Version 4 Signing Process.

    See About the AWS Support API in the AWS Support User Guide for information about how to use this service to create and manage your support cases, and how to call Trusted Advisor for results of checks on your resources.

    ", + "shapes": { + "AddAttachmentsToSetRequest": { + "base": null, + "refs": { + } + }, + "AddAttachmentsToSetResponse": { + "base": "

    The ID and expiry time of the attachment set returned by the AddAttachmentsToSet operation.

    ", + "refs": { + } + }, + "AddCommunicationToCaseRequest": { + "base": "

    To be written.

    ", + "refs": { + } + }, + "AddCommunicationToCaseResponse": { + "base": "

    The result of the AddCommunicationToCase operation.

    ", + "refs": { + } + }, + "AfterTime": { + "base": null, + "refs": { + "DescribeCasesRequest$afterTime": "

    The start date for a filtered date search on support case communications. Case communications are available for 12 months after creation.

    ", + "DescribeCommunicationsRequest$afterTime": "

    The start date for a filtered date search on support case communications. Case communications are available for 12 months after creation.

    " + } + }, + "Attachment": { + "base": "

    An attachment to a case communication. The attachment consists of the file name and the content of the file.

    ", + "refs": { + "Attachments$member": null, + "DescribeAttachmentResponse$attachment": "

    The attachment content and file name.

    " + } + }, + "AttachmentDetails": { + "base": "

    The file name and ID of an attachment to a case communication. You can use the ID to retrieve the attachment with the DescribeAttachment operation.

    ", + "refs": { + "AttachmentSet$member": null + } + }, + "AttachmentId": { + "base": null, + "refs": { + "AttachmentDetails$attachmentId": "

    The ID of the attachment.

    ", + "DescribeAttachmentRequest$attachmentId": "

    The ID of the attachment to return. Attachment IDs are returned by the DescribeCommunications operation.

    " + } + }, + "AttachmentIdNotFound": { + "base": "

    An attachment with the specified ID could not be found.

    ", + "refs": { + } + }, + "AttachmentLimitExceeded": { + "base": "

    The limit for the number of attachment sets created in a short period of time has been exceeded.

    ", + "refs": { + } + }, + "AttachmentSet": { + "base": null, + "refs": { + "Communication$attachmentSet": "

    Information about the attachments to the case communication.

    " + } + }, + "AttachmentSetExpired": { + "base": "

    The expiration time of the attachment set has passed. The set expires 1 hour after it is created.

    ", + "refs": { + } + }, + "AttachmentSetId": { + "base": null, + "refs": { + "AddAttachmentsToSetRequest$attachmentSetId": "

    The ID of the attachment set. If an AttachmentSetId is not specified, a new attachment set is created, and the ID of the set is returned in the response. If an AttachmentSetId is specified, the attachments are added to the specified set, if it exists.

    ", + "AddAttachmentsToSetResponse$attachmentSetId": "

    The ID of the attachment set. If an AttachmentSetId was not specified, a new attachment set is created, and the ID of the set is returned in the response. If an AttachmentSetId was specified, the attachments are added to the specified set, if it exists.

    ", + "AddCommunicationToCaseRequest$attachmentSetId": "

    The ID of a set of one or more attachments for the communication to add to the case. Create the set by calling AddAttachmentsToSet

    ", + "CreateCaseRequest$attachmentSetId": "

    The ID of a set of one or more attachments for the case. Create the set by using AddAttachmentsToSet.

    " + } + }, + "AttachmentSetIdNotFound": { + "base": "

    An attachment set with the specified ID could not be found.

    ", + "refs": { + } + }, + "AttachmentSetSizeLimitExceeded": { + "base": "

    A limit for the size of an attachment set has been exceeded. The limits are 3 attachments and 5 MB per attachment.

    ", + "refs": { + } + }, + "Attachments": { + "base": null, + "refs": { + "AddAttachmentsToSetRequest$attachments": "

    One or more attachments to add to the set. The limit is 3 attachments per set, and the size limit is 5 MB per attachment.

    " + } + }, + "BeforeTime": { + "base": null, + "refs": { + "DescribeCasesRequest$beforeTime": "

    The end date for a filtered date search on support case communications. Case communications are available for 12 months after creation.

    ", + "DescribeCommunicationsRequest$beforeTime": "

    The end date for a filtered date search on support case communications. Case communications are available for 12 months after creation.

    " + } + }, + "Boolean": { + "base": null, + "refs": { + "TrustedAdvisorCheckSummary$hasFlaggedResources": "

    Specifies whether the Trusted Advisor check has flagged resources.

    ", + "TrustedAdvisorResourceDetail$isSuppressed": "

    Specifies whether the AWS resource was ignored by Trusted Advisor because it was marked as suppressed by the user.

    " + } + }, + "CaseCreationLimitExceeded": { + "base": "

    The case creation limit for the account has been exceeded.

    ", + "refs": { + } + }, + "CaseDetails": { + "base": "

    A JSON-formatted object that contains the metadata for a support case. It is contained the response from a DescribeCases request. CaseDetails contains the following fields:

    1. CaseID. The AWS Support case ID requested or returned in the call. The case ID is an alphanumeric string formatted as shown in this example: case-12345678910-2013-c4c1d2bf33c5cf47.
    2. CategoryCode. The category of problem for the AWS Support case. Corresponds to the CategoryCode values returned by a call to DescribeServices.
    3. DisplayId. The identifier for the case on pages in the AWS Support Center.
    4. Language. The ISO 639-1 code for the language in which AWS provides support. AWS Support currently supports English (\"en\") and Japanese (\"ja\"). Language parameters must be passed explicitly for operations that take them.
    5. RecentCommunications. One or more Communication objects. Fields of these objects are Attachments, Body, CaseId, SubmittedBy, and TimeCreated.
    6. NextToken. A resumption point for pagination.
    7. ServiceCode. The identifier for the AWS service that corresponds to the service code defined in the call to DescribeServices.
    8. SeverityCode. The severity code assigned to the case. Contains one of the values returned by the call to DescribeSeverityLevels.
    9. Status. The status of the case in the AWS Support Center.
    10. Subject. The subject line of the case.
    11. SubmittedBy. The email address of the account that submitted the case.
    12. TimeCreated. The time the case was created, in ISO-8601 format.
    ", + "refs": { + "CaseList$member": null + } + }, + "CaseId": { + "base": null, + "refs": { + "AddCommunicationToCaseRequest$caseId": "

    The AWS Support case ID requested or returned in the call. The case ID is an alphanumeric string formatted as shown in this example: case-12345678910-2013-c4c1d2bf33c5cf47

    ", + "CaseDetails$caseId": "

    The AWS Support case ID requested or returned in the call. The case ID is an alphanumeric string formatted as shown in this example: case-12345678910-2013-c4c1d2bf33c5cf47

    ", + "CaseIdList$member": null, + "Communication$caseId": "

    The AWS Support case ID requested or returned in the call. The case ID is an alphanumeric string formatted as shown in this example: case-12345678910-2013-c4c1d2bf33c5cf47

    ", + "CreateCaseResponse$caseId": "

    The AWS Support case ID requested or returned in the call. The case ID is an alphanumeric string formatted as shown in this example: case-12345678910-2013-c4c1d2bf33c5cf47

    ", + "DescribeCommunicationsRequest$caseId": "

    The AWS Support case ID requested or returned in the call. The case ID is an alphanumeric string formatted as shown in this example: case-12345678910-2013-c4c1d2bf33c5cf47

    ", + "ResolveCaseRequest$caseId": "

    The AWS Support case ID requested or returned in the call. The case ID is an alphanumeric string formatted as shown in this example: case-12345678910-2013-c4c1d2bf33c5cf47

    " + } + }, + "CaseIdList": { + "base": null, + "refs": { + "DescribeCasesRequest$caseIdList": "

    A list of ID numbers of the support cases you want returned. The maximum number of cases is 100.

    " + } + }, + "CaseIdNotFound": { + "base": "

    The requested CaseId could not be located.

    ", + "refs": { + } + }, + "CaseList": { + "base": null, + "refs": { + "DescribeCasesResponse$cases": "

    The details for the cases that match the request.

    " + } + }, + "CaseStatus": { + "base": null, + "refs": { + "ResolveCaseResponse$initialCaseStatus": "

    The status of the case when the ResolveCase request was sent.

    ", + "ResolveCaseResponse$finalCaseStatus": "

    The status of the case after the ResolveCase request was processed.

    " + } + }, + "Category": { + "base": "

    A JSON-formatted name/value pair that represents the category name and category code of the problem, selected from the DescribeServices response for each AWS service.

    ", + "refs": { + "CategoryList$member": null + } + }, + "CategoryCode": { + "base": null, + "refs": { + "CaseDetails$categoryCode": "

    The category of problem for the AWS Support case.

    ", + "Category$code": "

    The category code for the support case.

    ", + "CreateCaseRequest$categoryCode": "

    The category of problem for the AWS Support case.

    " + } + }, + "CategoryList": { + "base": null, + "refs": { + "Service$categories": "

    A list of categories that describe the type of support issue a case describes. Categories consist of a category name and a category code. Category names and codes are passed to AWS Support when you call CreateCase.

    " + } + }, + "CategoryName": { + "base": null, + "refs": { + "Category$name": "

    The category name for the support case.

    " + } + }, + "CcEmailAddress": { + "base": null, + "refs": { + "CcEmailAddressList$member": null + } + }, + "CcEmailAddressList": { + "base": null, + "refs": { + "AddCommunicationToCaseRequest$ccEmailAddresses": "

    The email addresses in the CC line of an email to be added to the support case.

    ", + "CaseDetails$ccEmailAddresses": "

    The email addresses that receive copies of communication about the case.

    ", + "CreateCaseRequest$ccEmailAddresses": "

    A list of email addresses that AWS Support copies on case correspondence.

    " + } + }, + "Communication": { + "base": "

    A communication associated with an AWS Support case. The communication consists of the case ID, the message body, attachment information, the account email address, and the date and time of the communication.

    ", + "refs": { + "CommunicationList$member": null + } + }, + "CommunicationBody": { + "base": null, + "refs": { + "AddCommunicationToCaseRequest$communicationBody": "

    The body of an email communication to add to the support case.

    ", + "Communication$body": "

    The text of the communication between the customer and AWS Support.

    ", + "CreateCaseRequest$communicationBody": "

    The communication body text when you create an AWS Support case by calling CreateCase.

    " + } + }, + "CommunicationList": { + "base": null, + "refs": { + "DescribeCommunicationsResponse$communications": "

    The communications for the case.

    ", + "RecentCaseCommunications$communications": "

    The five most recent communications associated with the case.

    " + } + }, + "CreateCaseRequest": { + "base": null, + "refs": { + } + }, + "CreateCaseResponse": { + "base": "

    The AWS Support case ID returned by a successful completion of the CreateCase operation.

    ", + "refs": { + } + }, + "Data": { + "base": null, + "refs": { + "Attachment$data": "

    The content of the attachment file.

    " + } + }, + "DescribeAttachmentLimitExceeded": { + "base": "

    The limit for the number of DescribeAttachment requests in a short period of time has been exceeded.

    ", + "refs": { + } + }, + "DescribeAttachmentRequest": { + "base": null, + "refs": { + } + }, + "DescribeAttachmentResponse": { + "base": "

    The content and file name of the attachment returned by the DescribeAttachment operation.

    ", + "refs": { + } + }, + "DescribeCasesRequest": { + "base": null, + "refs": { + } + }, + "DescribeCasesResponse": { + "base": "

    Returns an array of CaseDetails objects and a NextToken that defines a point for pagination in the result set.

    ", + "refs": { + } + }, + "DescribeCommunicationsRequest": { + "base": null, + "refs": { + } + }, + "DescribeCommunicationsResponse": { + "base": "

    The communications returned by the DescribeCommunications operation.

    ", + "refs": { + } + }, + "DescribeServicesRequest": { + "base": null, + "refs": { + } + }, + "DescribeServicesResponse": { + "base": "

    The list of AWS services returned by the DescribeServices operation.

    ", + "refs": { + } + }, + "DescribeSeverityLevelsRequest": { + "base": null, + "refs": { + } + }, + "DescribeSeverityLevelsResponse": { + "base": "

    The list of severity levels returned by the DescribeSeverityLevels operation.

    ", + "refs": { + } + }, + "DescribeTrustedAdvisorCheckRefreshStatusesRequest": { + "base": null, + "refs": { + } + }, + "DescribeTrustedAdvisorCheckRefreshStatusesResponse": { + "base": "

    The statuses of the Trusted Advisor checks returned by the DescribeTrustedAdvisorCheckRefreshStatuses operation.

    ", + "refs": { + } + }, + "DescribeTrustedAdvisorCheckResultRequest": { + "base": null, + "refs": { + } + }, + "DescribeTrustedAdvisorCheckResultResponse": { + "base": "

    The result of the Trusted Advisor check returned by the DescribeTrustedAdvisorCheckResult operation.

    ", + "refs": { + } + }, + "DescribeTrustedAdvisorCheckSummariesRequest": { + "base": null, + "refs": { + } + }, + "DescribeTrustedAdvisorCheckSummariesResponse": { + "base": "

    The summaries of the Trusted Advisor checks returned by the DescribeTrustedAdvisorCheckSummaries operation.

    ", + "refs": { + } + }, + "DescribeTrustedAdvisorChecksRequest": { + "base": null, + "refs": { + } + }, + "DescribeTrustedAdvisorChecksResponse": { + "base": "

    Information about the Trusted Advisor checks returned by the DescribeTrustedAdvisorChecks operation.

    ", + "refs": { + } + }, + "DisplayId": { + "base": null, + "refs": { + "CaseDetails$displayId": "

    The ID displayed for the case in the AWS Support Center. This is a numeric string.

    ", + "DescribeCasesRequest$displayId": "

    The ID displayed for a case in the AWS Support Center user interface.

    " + } + }, + "Double": { + "base": null, + "refs": { + "TrustedAdvisorCostOptimizingSummary$estimatedMonthlySavings": "

    The estimated monthly savings that might be realized if the recommended actions are taken.

    ", + "TrustedAdvisorCostOptimizingSummary$estimatedPercentMonthlySavings": "

    The estimated percentage of savings that might be realized if the recommended actions are taken.

    " + } + }, + "ErrorMessage": { + "base": null, + "refs": { + "AttachmentIdNotFound$message": "

    An attachment with the specified ID could not be found.

    ", + "AttachmentLimitExceeded$message": "

    The limit for the number of attachment sets created in a short period of time has been exceeded.

    ", + "AttachmentSetExpired$message": "

    The expiration time of the attachment set has passed. The set expires 1 hour after it is created.

    ", + "AttachmentSetIdNotFound$message": "

    An attachment set with the specified ID could not be found.

    ", + "AttachmentSetSizeLimitExceeded$message": "

    A limit for the size of an attachment set has been exceeded. The limits are 3 attachments and 5 MB per attachment.

    ", + "CaseCreationLimitExceeded$message": "

    An error message that indicates that you have exceeded the number of cases you can have open.

    ", + "CaseIdNotFound$message": "

    The requested CaseId could not be located.

    ", + "DescribeAttachmentLimitExceeded$message": "

    The limit for the number of DescribeAttachment requests in a short period of time has been exceeded.

    ", + "InternalServerError$message": "

    An internal server error occurred.

    " + } + }, + "ExpiryTime": { + "base": null, + "refs": { + "AddAttachmentsToSetResponse$expiryTime": "

    The time and date when the attachment set expires.

    " + } + }, + "FileName": { + "base": null, + "refs": { + "Attachment$fileName": "

    The name of the attachment file.

    ", + "AttachmentDetails$fileName": "

    The file name of the attachment.

    " + } + }, + "IncludeCommunications": { + "base": null, + "refs": { + "DescribeCasesRequest$includeCommunications": "

    Specifies whether communications should be included in the DescribeCases results. The default is true.

    " + } + }, + "IncludeResolvedCases": { + "base": null, + "refs": { + "DescribeCasesRequest$includeResolvedCases": "

    Specifies whether resolved support cases should be included in the DescribeCases results. The default is false.

    " + } + }, + "InternalServerError": { + "base": "

    An internal server error occurred.

    ", + "refs": { + } + }, + "IssueType": { + "base": null, + "refs": { + "CreateCaseRequest$issueType": "

    The type of issue for the case. You can specify either \"customer-service\" or \"technical.\" If you do not indicate a value, the default is \"technical.\"

    " + } + }, + "Language": { + "base": null, + "refs": { + "CaseDetails$language": "

    The ISO 639-1 code for the language in which AWS provides support. AWS Support currently supports English (\"en\") and Japanese (\"ja\"). Language parameters must be passed explicitly for operations that take them.

    ", + "CreateCaseRequest$language": "

    The ISO 639-1 code for the language in which AWS provides support. AWS Support currently supports English (\"en\") and Japanese (\"ja\"). Language parameters must be passed explicitly for operations that take them.

    ", + "DescribeCasesRequest$language": "

    The ISO 639-1 code for the language in which AWS provides support. AWS Support currently supports English (\"en\") and Japanese (\"ja\"). Language parameters must be passed explicitly for operations that take them.

    ", + "DescribeServicesRequest$language": "

    The ISO 639-1 code for the language in which AWS provides support. AWS Support currently supports English (\"en\") and Japanese (\"ja\"). Language parameters must be passed explicitly for operations that take them.

    ", + "DescribeSeverityLevelsRequest$language": "

    The ISO 639-1 code for the language in which AWS provides support. AWS Support currently supports English (\"en\") and Japanese (\"ja\"). Language parameters must be passed explicitly for operations that take them.

    " + } + }, + "Long": { + "base": null, + "refs": { + "TrustedAdvisorCheckRefreshStatus$millisUntilNextRefreshable": "

    The amount of time, in milliseconds, until the Trusted Advisor check is eligible for refresh.

    ", + "TrustedAdvisorResourcesSummary$resourcesProcessed": "

    The number of AWS resources that were analyzed by the Trusted Advisor check.

    ", + "TrustedAdvisorResourcesSummary$resourcesFlagged": "

    The number of AWS resources that were flagged (listed) by the Trusted Advisor check.

    ", + "TrustedAdvisorResourcesSummary$resourcesIgnored": "

    The number of AWS resources ignored by Trusted Advisor because information was unavailable.

    ", + "TrustedAdvisorResourcesSummary$resourcesSuppressed": "

    The number of AWS resources ignored by Trusted Advisor because they were marked as suppressed by the user.

    " + } + }, + "MaxResults": { + "base": null, + "refs": { + "DescribeCasesRequest$maxResults": "

    The maximum number of results to return before paginating.

    ", + "DescribeCommunicationsRequest$maxResults": "

    The maximum number of results to return before paginating.

    " + } + }, + "NextToken": { + "base": null, + "refs": { + "DescribeCasesRequest$nextToken": "

    A resumption point for pagination.

    ", + "DescribeCasesResponse$nextToken": "

    A resumption point for pagination.

    ", + "DescribeCommunicationsRequest$nextToken": "

    A resumption point for pagination.

    ", + "DescribeCommunicationsResponse$nextToken": "

    A resumption point for pagination.

    ", + "RecentCaseCommunications$nextToken": "

    A resumption point for pagination.

    " + } + }, + "RecentCaseCommunications": { + "base": "

    The five most recent communications associated with the case.

    ", + "refs": { + "CaseDetails$recentCommunications": "

    The five most recent communications between you and AWS Support Center, including the IDs of any attachments to the communications. Also includes a nextToken that you can use to retrieve earlier communications.

    " + } + }, + "RefreshTrustedAdvisorCheckRequest": { + "base": null, + "refs": { + } + }, + "RefreshTrustedAdvisorCheckResponse": { + "base": "

    The current refresh status of a Trusted Advisor check.

    ", + "refs": { + } + }, + "ResolveCaseRequest": { + "base": null, + "refs": { + } + }, + "ResolveCaseResponse": { + "base": "

    The status of the case returned by the ResolveCase operation.

    ", + "refs": { + } + }, + "Result": { + "base": null, + "refs": { + "AddCommunicationToCaseResponse$result": "

    True if AddCommunicationToCase succeeds. Otherwise, returns an error.

    " + } + }, + "Service": { + "base": "

    Information about an AWS service returned by the DescribeServices operation.

    ", + "refs": { + "ServiceList$member": null + } + }, + "ServiceCode": { + "base": null, + "refs": { + "CaseDetails$serviceCode": "

    The code for the AWS service returned by the call to DescribeServices.

    ", + "CreateCaseRequest$serviceCode": "

    The code for the AWS service returned by the call to DescribeServices.

    ", + "Service$code": "

    The code for an AWS service returned by the DescribeServices response. The Name element contains the corresponding friendly name.

    ", + "ServiceCodeList$member": null + } + }, + "ServiceCodeList": { + "base": null, + "refs": { + "DescribeServicesRequest$serviceCodeList": "

    A JSON-formatted list of service codes available for AWS services.

    " + } + }, + "ServiceList": { + "base": null, + "refs": { + "DescribeServicesResponse$services": "

    A JSON-formatted list of AWS services.

    " + } + }, + "ServiceName": { + "base": null, + "refs": { + "Service$name": "

    The friendly name for an AWS service. The Code element contains the corresponding code.

    " + } + }, + "SeverityCode": { + "base": null, + "refs": { + "CaseDetails$severityCode": "

    The code for the severity level returned by the call to DescribeSeverityLevels.

    ", + "CreateCaseRequest$severityCode": "

    The code for the severity level returned by the call to DescribeSeverityLevels.

    The availability of severity levels depends on each customer's support subscription. In other words, your subscription may not necessarily require the urgent level of response time.

    " + } + }, + "SeverityLevel": { + "base": "

    A code and name pair that represent a severity level that can be applied to a support case.

    ", + "refs": { + "SeverityLevelsList$member": null + } + }, + "SeverityLevelCode": { + "base": null, + "refs": { + "SeverityLevel$code": "

    One of four values: \"low,\" \"medium,\" \"high,\" and \"urgent\". These values correspond to response times returned to the caller in SeverityLevel.name.

    " + } + }, + "SeverityLevelName": { + "base": null, + "refs": { + "SeverityLevel$name": "

    The name of the severity level that corresponds to the severity level code.

    " + } + }, + "SeverityLevelsList": { + "base": null, + "refs": { + "DescribeSeverityLevelsResponse$severityLevels": "

    The available severity levels for the support case. Available severity levels are defined by your service level agreement with AWS.

    " + } + }, + "Status": { + "base": null, + "refs": { + "CaseDetails$status": "

    The status of the case.

    " + } + }, + "String": { + "base": null, + "refs": { + "DescribeTrustedAdvisorCheckResultRequest$checkId": "

    The unique identifier for the Trusted Advisor check.

    ", + "DescribeTrustedAdvisorCheckResultRequest$language": "

    The ISO 639-1 code for the language in which AWS provides support. AWS Support currently supports English (\"en\") and Japanese (\"ja\"). Language parameters must be passed explicitly for operations that take them.

    ", + "DescribeTrustedAdvisorChecksRequest$language": "

    The ISO 639-1 code for the language in which AWS provides support. AWS Support currently supports English (\"en\") and Japanese (\"ja\"). Language parameters must be passed explicitly for operations that take them.

    ", + "RefreshTrustedAdvisorCheckRequest$checkId": "

    The unique identifier for the Trusted Advisor check.

    ", + "StringList$member": null, + "TrustedAdvisorCheckDescription$id": "

    The unique identifier for the Trusted Advisor check.

    ", + "TrustedAdvisorCheckDescription$name": "

    The display name for the Trusted Advisor check.

    ", + "TrustedAdvisorCheckDescription$description": "

    The description of the Trusted Advisor check, which includes the alert criteria and recommended actions (contains HTML markup).

    ", + "TrustedAdvisorCheckDescription$category": "

    The category of the Trusted Advisor check.

    ", + "TrustedAdvisorCheckRefreshStatus$checkId": "

    The unique identifier for the Trusted Advisor check.

    ", + "TrustedAdvisorCheckRefreshStatus$status": "

    The status of the Trusted Advisor check for which a refresh has been requested: \"none\", \"enqueued\", \"processing\", \"success\", or \"abandoned\".

    ", + "TrustedAdvisorCheckResult$checkId": "

    The unique identifier for the Trusted Advisor check.

    ", + "TrustedAdvisorCheckResult$timestamp": "

    The time of the last refresh of the check.

    ", + "TrustedAdvisorCheckResult$status": "

    The alert status of the check: \"ok\" (green), \"warning\" (yellow), \"error\" (red), or \"not_available\".

    ", + "TrustedAdvisorCheckSummary$checkId": "

    The unique identifier for the Trusted Advisor check.

    ", + "TrustedAdvisorCheckSummary$timestamp": "

    The time of the last refresh of the check.

    ", + "TrustedAdvisorCheckSummary$status": "

    The alert status of the check: \"ok\" (green), \"warning\" (yellow), \"error\" (red), or \"not_available\".

    ", + "TrustedAdvisorResourceDetail$status": "

    The status code for the resource identified in the Trusted Advisor check.

    ", + "TrustedAdvisorResourceDetail$region": "

    The AWS region in which the identified resource is located.

    ", + "TrustedAdvisorResourceDetail$resourceId": "

    The unique identifier for the identified resource.

    " + } + }, + "StringList": { + "base": null, + "refs": { + "DescribeTrustedAdvisorCheckRefreshStatusesRequest$checkIds": "

    The IDs of the Trusted Advisor checks.

    ", + "DescribeTrustedAdvisorCheckSummariesRequest$checkIds": "

    The IDs of the Trusted Advisor checks.

    ", + "TrustedAdvisorCheckDescription$metadata": "

    The column headings for the data returned by the Trusted Advisor check. The order of the headings corresponds to the order of the data in the Metadata element of the TrustedAdvisorResourceDetail for the check. Metadata contains all the data that is shown in the Excel download, even in those cases where the UI shows just summary data.

    ", + "TrustedAdvisorResourceDetail$metadata": "

    Additional information about the identified resource. The exact metadata and its order can be obtained by inspecting the TrustedAdvisorCheckDescription object returned by the call to DescribeTrustedAdvisorChecks. Metadata contains all the data that is shown in the Excel download, even in those cases where the UI shows just summary data.

    " + } + }, + "Subject": { + "base": null, + "refs": { + "CaseDetails$subject": "

    The subject line for the case in the AWS Support Center.

    ", + "CreateCaseRequest$subject": "

    The title of the AWS Support case.

    " + } + }, + "SubmittedBy": { + "base": null, + "refs": { + "CaseDetails$submittedBy": "

    The email address of the account that submitted the case.

    ", + "Communication$submittedBy": "

    The email address of the account that submitted the AWS Support case.

    " + } + }, + "TimeCreated": { + "base": null, + "refs": { + "CaseDetails$timeCreated": "

    The time that the case was case created in the AWS Support Center.

    ", + "Communication$timeCreated": "

    The time the communication was created.

    " + } + }, + "TrustedAdvisorCategorySpecificSummary": { + "base": "

    The container for summary information that relates to the category of the Trusted Advisor check.

    ", + "refs": { + "TrustedAdvisorCheckResult$categorySpecificSummary": "

    Summary information that relates to the category of the check. Cost Optimizing is the only category that is currently supported.

    ", + "TrustedAdvisorCheckSummary$categorySpecificSummary": "

    Summary information that relates to the category of the check. Cost Optimizing is the only category that is currently supported.

    " + } + }, + "TrustedAdvisorCheckDescription": { + "base": "

    The description and metadata for a Trusted Advisor check.

    ", + "refs": { + "TrustedAdvisorCheckList$member": null + } + }, + "TrustedAdvisorCheckList": { + "base": null, + "refs": { + "DescribeTrustedAdvisorChecksResponse$checks": "

    Information about all available Trusted Advisor checks.

    " + } + }, + "TrustedAdvisorCheckRefreshStatus": { + "base": "

    The refresh status of a Trusted Advisor check.

    ", + "refs": { + "RefreshTrustedAdvisorCheckResponse$status": "

    The current refresh status for a check, including the amount of time until the check is eligible for refresh.

    ", + "TrustedAdvisorCheckRefreshStatusList$member": null + } + }, + "TrustedAdvisorCheckRefreshStatusList": { + "base": null, + "refs": { + "DescribeTrustedAdvisorCheckRefreshStatusesResponse$statuses": "

    The refresh status of the specified Trusted Advisor checks.

    " + } + }, + "TrustedAdvisorCheckResult": { + "base": "

    The results of a Trusted Advisor check returned by DescribeTrustedAdvisorCheckResult.

    ", + "refs": { + "DescribeTrustedAdvisorCheckResultResponse$result": "

    The detailed results of the Trusted Advisor check.

    " + } + }, + "TrustedAdvisorCheckSummary": { + "base": "

    A summary of a Trusted Advisor check result, including the alert status, last refresh, and number of resources examined.

    ", + "refs": { + "TrustedAdvisorCheckSummaryList$member": null + } + }, + "TrustedAdvisorCheckSummaryList": { + "base": null, + "refs": { + "DescribeTrustedAdvisorCheckSummariesResponse$summaries": "

    The summary information for the requested Trusted Advisor checks.

    " + } + }, + "TrustedAdvisorCostOptimizingSummary": { + "base": "

    The estimated cost savings that might be realized if the recommended actions are taken.

    ", + "refs": { + "TrustedAdvisorCategorySpecificSummary$costOptimizing": "

    The summary information about cost savings for a Trusted Advisor check that is in the Cost Optimizing category.

    " + } + }, + "TrustedAdvisorResourceDetail": { + "base": "

    Contains information about a resource identified by a Trusted Advisor check.

    ", + "refs": { + "TrustedAdvisorResourceDetailList$member": null + } + }, + "TrustedAdvisorResourceDetailList": { + "base": null, + "refs": { + "TrustedAdvisorCheckResult$flaggedResources": "

    The details about each resource listed in the check result.

    " + } + }, + "TrustedAdvisorResourcesSummary": { + "base": "

    Details about AWS resources that were analyzed in a call to Trusted Advisor DescribeTrustedAdvisorCheckSummaries.

    ", + "refs": { + "TrustedAdvisorCheckResult$resourcesSummary": null, + "TrustedAdvisorCheckSummary$resourcesSummary": null + } + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/support/2013-04-15/paginators-1.json b/vendor/github.com/aws/aws-sdk-go/models/apis/support/2013-04-15/paginators-1.json new file mode 100644 index 000000000..1368630c8 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/support/2013-04-15/paginators-1.json @@ -0,0 +1,25 @@ +{ + "pagination": { + "DescribeCases": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "cases" + }, + "DescribeCommunications": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "communications" + }, + "DescribeServices": { + "result_key": "services" + }, + "DescribeTrustedAdvisorCheckRefreshStatuses": { + "result_key": "statuses" + }, + "DescribeTrustedAdvisorCheckSummaries": { + "result_key": "summaries" + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/swf/2012-01-25/api-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/swf/2012-01-25/api-2.json new file mode 100644 index 000000000..63486b94d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/swf/2012-01-25/api-2.json @@ -0,0 +1,2838 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2012-01-25", + "endpointPrefix":"swf", + "jsonVersion":"1.0", + "serviceAbbreviation":"Amazon SWF", + "serviceFullName":"Amazon Simple Workflow Service", + "signatureVersion":"v4", + "targetPrefix":"SimpleWorkflowService", + "timestampFormat":"unixTimestamp", + "protocol":"json" + }, + "operations":{ + "CountClosedWorkflowExecutions":{ + "name":"CountClosedWorkflowExecutions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CountClosedWorkflowExecutionsInput"}, + "output":{"shape":"WorkflowExecutionCount"}, + "errors":[ + { + "shape":"UnknownResourceFault", + "exception":true + }, + { + "shape":"OperationNotPermittedFault", + "exception":true + } + ] + }, + "CountOpenWorkflowExecutions":{ + "name":"CountOpenWorkflowExecutions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CountOpenWorkflowExecutionsInput"}, + "output":{"shape":"WorkflowExecutionCount"}, + "errors":[ + { + "shape":"UnknownResourceFault", + "exception":true + }, + { + "shape":"OperationNotPermittedFault", + "exception":true + } + ] + }, + "CountPendingActivityTasks":{ + "name":"CountPendingActivityTasks", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CountPendingActivityTasksInput"}, + "output":{"shape":"PendingTaskCount"}, + "errors":[ + { + "shape":"UnknownResourceFault", + "exception":true + }, + { + "shape":"OperationNotPermittedFault", + "exception":true + } + ] + }, + "CountPendingDecisionTasks":{ + "name":"CountPendingDecisionTasks", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CountPendingDecisionTasksInput"}, + "output":{"shape":"PendingTaskCount"}, + "errors":[ + { + "shape":"UnknownResourceFault", + "exception":true + }, + { + "shape":"OperationNotPermittedFault", + "exception":true + } + ] + }, + "DeprecateActivityType":{ + "name":"DeprecateActivityType", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeprecateActivityTypeInput"}, + "errors":[ + { + "shape":"UnknownResourceFault", + "exception":true + }, + { + "shape":"TypeDeprecatedFault", + "exception":true + }, + { + "shape":"OperationNotPermittedFault", + "exception":true + } + ] + }, + "DeprecateDomain":{ + "name":"DeprecateDomain", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeprecateDomainInput"}, + "errors":[ + { + "shape":"UnknownResourceFault", + "exception":true + }, + { + "shape":"DomainDeprecatedFault", + "exception":true + }, + { + "shape":"OperationNotPermittedFault", + "exception":true + } + ] + }, + "DeprecateWorkflowType":{ + "name":"DeprecateWorkflowType", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeprecateWorkflowTypeInput"}, + "errors":[ + { + "shape":"UnknownResourceFault", + "exception":true + }, + { + "shape":"TypeDeprecatedFault", + "exception":true + }, + { + "shape":"OperationNotPermittedFault", + "exception":true + } + ] + }, + "DescribeActivityType":{ + "name":"DescribeActivityType", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeActivityTypeInput"}, + "output":{"shape":"ActivityTypeDetail"}, + "errors":[ + { + "shape":"UnknownResourceFault", + "exception":true + }, + { + "shape":"OperationNotPermittedFault", + "exception":true + } + ] + }, + "DescribeDomain":{ + "name":"DescribeDomain", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeDomainInput"}, + "output":{"shape":"DomainDetail"}, + "errors":[ + { + "shape":"UnknownResourceFault", + "exception":true + }, + { + "shape":"OperationNotPermittedFault", + "exception":true + } + ] + }, + "DescribeWorkflowExecution":{ + "name":"DescribeWorkflowExecution", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeWorkflowExecutionInput"}, + "output":{"shape":"WorkflowExecutionDetail"}, + "errors":[ + { + "shape":"UnknownResourceFault", + "exception":true + }, + { + "shape":"OperationNotPermittedFault", + "exception":true + } + ] + }, + "DescribeWorkflowType":{ + "name":"DescribeWorkflowType", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeWorkflowTypeInput"}, + "output":{"shape":"WorkflowTypeDetail"}, + "errors":[ + { + "shape":"UnknownResourceFault", + "exception":true + }, + { + "shape":"OperationNotPermittedFault", + "exception":true + } + ] + }, + "GetWorkflowExecutionHistory":{ + "name":"GetWorkflowExecutionHistory", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetWorkflowExecutionHistoryInput"}, + "output":{"shape":"History"}, + "errors":[ + { + "shape":"UnknownResourceFault", + "exception":true + }, + { + "shape":"OperationNotPermittedFault", + "exception":true + } + ] + }, + "ListActivityTypes":{ + "name":"ListActivityTypes", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListActivityTypesInput"}, + "output":{"shape":"ActivityTypeInfos"}, + "errors":[ + { + "shape":"OperationNotPermittedFault", + "exception":true + }, + { + "shape":"UnknownResourceFault", + "exception":true + } + ] + }, + "ListClosedWorkflowExecutions":{ + "name":"ListClosedWorkflowExecutions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListClosedWorkflowExecutionsInput"}, + "output":{"shape":"WorkflowExecutionInfos"}, + "errors":[ + { + "shape":"UnknownResourceFault", + "exception":true + }, + { + "shape":"OperationNotPermittedFault", + "exception":true + } + ] + }, + "ListDomains":{ + "name":"ListDomains", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListDomainsInput"}, + "output":{"shape":"DomainInfos"}, + "errors":[ + { + "shape":"OperationNotPermittedFault", + "exception":true + } + ] + }, + "ListOpenWorkflowExecutions":{ + "name":"ListOpenWorkflowExecutions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListOpenWorkflowExecutionsInput"}, + "output":{"shape":"WorkflowExecutionInfos"}, + "errors":[ + { + "shape":"UnknownResourceFault", + "exception":true + }, + { + "shape":"OperationNotPermittedFault", + "exception":true + } + ] + }, + "ListWorkflowTypes":{ + "name":"ListWorkflowTypes", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListWorkflowTypesInput"}, + "output":{"shape":"WorkflowTypeInfos"}, + "errors":[ + { + "shape":"OperationNotPermittedFault", + "exception":true + }, + { + "shape":"UnknownResourceFault", + "exception":true + } + ] + }, + "PollForActivityTask":{ + "name":"PollForActivityTask", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PollForActivityTaskInput"}, + "output":{"shape":"ActivityTask"}, + "errors":[ + { + "shape":"UnknownResourceFault", + "exception":true + }, + { + "shape":"OperationNotPermittedFault", + "exception":true + }, + { + "shape":"LimitExceededFault", + "exception":true + } + ] + }, + "PollForDecisionTask":{ + "name":"PollForDecisionTask", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PollForDecisionTaskInput"}, + "output":{"shape":"DecisionTask"}, + "errors":[ + { + "shape":"UnknownResourceFault", + "exception":true + }, + { + "shape":"OperationNotPermittedFault", + "exception":true + }, + { + "shape":"LimitExceededFault", + "exception":true + } + ] + }, + "RecordActivityTaskHeartbeat":{ + "name":"RecordActivityTaskHeartbeat", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RecordActivityTaskHeartbeatInput"}, + "output":{"shape":"ActivityTaskStatus"}, + "errors":[ + { + "shape":"UnknownResourceFault", + "exception":true + }, + { + "shape":"OperationNotPermittedFault", + "exception":true + } + ] + }, + "RegisterActivityType":{ + "name":"RegisterActivityType", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RegisterActivityTypeInput"}, + "errors":[ + { + "shape":"TypeAlreadyExistsFault", + "exception":true + }, + { + "shape":"LimitExceededFault", + "exception":true + }, + { + "shape":"UnknownResourceFault", + "exception":true + }, + { + "shape":"OperationNotPermittedFault", + "exception":true + } + ] + }, + "RegisterDomain":{ + "name":"RegisterDomain", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RegisterDomainInput"}, + "errors":[ + { + "shape":"DomainAlreadyExistsFault", + "exception":true + }, + { + "shape":"LimitExceededFault", + "exception":true + }, + { + "shape":"OperationNotPermittedFault", + "exception":true + } + ] + }, + "RegisterWorkflowType":{ + "name":"RegisterWorkflowType", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RegisterWorkflowTypeInput"}, + "errors":[ + { + "shape":"TypeAlreadyExistsFault", + "exception":true + }, + { + "shape":"LimitExceededFault", + "exception":true + }, + { + "shape":"UnknownResourceFault", + "exception":true + }, + { + "shape":"OperationNotPermittedFault", + "exception":true + } + ] + }, + "RequestCancelWorkflowExecution":{ + "name":"RequestCancelWorkflowExecution", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RequestCancelWorkflowExecutionInput"}, + "errors":[ + { + "shape":"UnknownResourceFault", + "exception":true + }, + { + "shape":"OperationNotPermittedFault", + "exception":true + } + ] + }, + "RespondActivityTaskCanceled":{ + "name":"RespondActivityTaskCanceled", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RespondActivityTaskCanceledInput"}, + "errors":[ + { + "shape":"UnknownResourceFault", + "exception":true + }, + { + "shape":"OperationNotPermittedFault", + "exception":true + } + ] + }, + "RespondActivityTaskCompleted":{ + "name":"RespondActivityTaskCompleted", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RespondActivityTaskCompletedInput"}, + "errors":[ + { + "shape":"UnknownResourceFault", + "exception":true + }, + { + "shape":"OperationNotPermittedFault", + "exception":true + } + ] + }, + "RespondActivityTaskFailed":{ + "name":"RespondActivityTaskFailed", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RespondActivityTaskFailedInput"}, + "errors":[ + { + "shape":"UnknownResourceFault", + "exception":true + }, + { + "shape":"OperationNotPermittedFault", + "exception":true + } + ] + }, + "RespondDecisionTaskCompleted":{ + "name":"RespondDecisionTaskCompleted", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RespondDecisionTaskCompletedInput"}, + "errors":[ + { + "shape":"UnknownResourceFault", + "exception":true + }, + { + "shape":"OperationNotPermittedFault", + "exception":true + } + ] + }, + "SignalWorkflowExecution":{ + "name":"SignalWorkflowExecution", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"SignalWorkflowExecutionInput"}, + "errors":[ + { + "shape":"UnknownResourceFault", + "exception":true + }, + { + "shape":"OperationNotPermittedFault", + "exception":true + } + ] + }, + "StartWorkflowExecution":{ + "name":"StartWorkflowExecution", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StartWorkflowExecutionInput"}, + "output":{"shape":"Run"}, + "errors":[ + { + "shape":"UnknownResourceFault", + "exception":true + }, + { + "shape":"TypeDeprecatedFault", + "exception":true + }, + { + "shape":"WorkflowExecutionAlreadyStartedFault", + "exception":true + }, + { + "shape":"LimitExceededFault", + "exception":true + }, + { + "shape":"OperationNotPermittedFault", + "exception":true + }, + { + "shape":"DefaultUndefinedFault", + "exception":true + } + ] + }, + "TerminateWorkflowExecution":{ + "name":"TerminateWorkflowExecution", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"TerminateWorkflowExecutionInput"}, + "errors":[ + { + "shape":"UnknownResourceFault", + "exception":true + }, + { + "shape":"OperationNotPermittedFault", + "exception":true + } + ] + } + }, + "shapes":{ + "ActivityId":{ + "type":"string", + "min":1, + "max":256 + }, + "ActivityTask":{ + "type":"structure", + "required":[ + "taskToken", + "activityId", + "startedEventId", + "workflowExecution", + "activityType" + ], + "members":{ + "taskToken":{"shape":"TaskToken"}, + "activityId":{"shape":"ActivityId"}, + "startedEventId":{"shape":"EventId"}, + "workflowExecution":{"shape":"WorkflowExecution"}, + "activityType":{"shape":"ActivityType"}, + "input":{"shape":"Data"} + } + }, + "ActivityTaskCancelRequestedEventAttributes":{ + "type":"structure", + "required":[ + "decisionTaskCompletedEventId", + "activityId" + ], + "members":{ + "decisionTaskCompletedEventId":{"shape":"EventId"}, + "activityId":{"shape":"ActivityId"} + } + }, + "ActivityTaskCanceledEventAttributes":{ + "type":"structure", + "required":[ + "scheduledEventId", + "startedEventId" + ], + "members":{ + "details":{"shape":"Data"}, + "scheduledEventId":{"shape":"EventId"}, + "startedEventId":{"shape":"EventId"}, + "latestCancelRequestedEventId":{"shape":"EventId"} + } + }, + "ActivityTaskCompletedEventAttributes":{ + "type":"structure", + "required":[ + "scheduledEventId", + "startedEventId" + ], + "members":{ + "result":{"shape":"Data"}, + "scheduledEventId":{"shape":"EventId"}, + "startedEventId":{"shape":"EventId"} + } + }, + "ActivityTaskFailedEventAttributes":{ + "type":"structure", + "required":[ + "scheduledEventId", + "startedEventId" + ], + "members":{ + "reason":{"shape":"FailureReason"}, + "details":{"shape":"Data"}, + "scheduledEventId":{"shape":"EventId"}, + "startedEventId":{"shape":"EventId"} + } + }, + "ActivityTaskScheduledEventAttributes":{ + "type":"structure", + "required":[ + "activityType", + "activityId", + "taskList", + "decisionTaskCompletedEventId" + ], + "members":{ + "activityType":{"shape":"ActivityType"}, + "activityId":{"shape":"ActivityId"}, + "input":{"shape":"Data"}, + "control":{"shape":"Data"}, + "scheduleToStartTimeout":{"shape":"DurationInSecondsOptional"}, + "scheduleToCloseTimeout":{"shape":"DurationInSecondsOptional"}, + "startToCloseTimeout":{"shape":"DurationInSecondsOptional"}, + "taskList":{"shape":"TaskList"}, + "taskPriority":{"shape":"TaskPriority"}, + "decisionTaskCompletedEventId":{"shape":"EventId"}, + "heartbeatTimeout":{"shape":"DurationInSecondsOptional"} + } + }, + "ActivityTaskStartedEventAttributes":{ + "type":"structure", + "required":["scheduledEventId"], + "members":{ + "identity":{"shape":"Identity"}, + "scheduledEventId":{"shape":"EventId"} + } + }, + "ActivityTaskStatus":{ + "type":"structure", + "required":["cancelRequested"], + "members":{ + "cancelRequested":{"shape":"Canceled"} + } + }, + "ActivityTaskTimedOutEventAttributes":{ + "type":"structure", + "required":[ + "timeoutType", + "scheduledEventId", + "startedEventId" + ], + "members":{ + "timeoutType":{"shape":"ActivityTaskTimeoutType"}, + "scheduledEventId":{"shape":"EventId"}, + "startedEventId":{"shape":"EventId"}, + "details":{"shape":"LimitedData"} + } + }, + "ActivityTaskTimeoutType":{ + "type":"string", + "enum":[ + "START_TO_CLOSE", + "SCHEDULE_TO_START", + "SCHEDULE_TO_CLOSE", + "HEARTBEAT" + ] + }, + "ActivityType":{ + "type":"structure", + "required":[ + "name", + "version" + ], + "members":{ + "name":{"shape":"Name"}, + "version":{"shape":"Version"} + } + }, + "ActivityTypeConfiguration":{ + "type":"structure", + "members":{ + "defaultTaskStartToCloseTimeout":{"shape":"DurationInSecondsOptional"}, + "defaultTaskHeartbeatTimeout":{"shape":"DurationInSecondsOptional"}, + "defaultTaskList":{"shape":"TaskList"}, + "defaultTaskPriority":{"shape":"TaskPriority"}, + "defaultTaskScheduleToStartTimeout":{"shape":"DurationInSecondsOptional"}, + "defaultTaskScheduleToCloseTimeout":{"shape":"DurationInSecondsOptional"} + } + }, + "ActivityTypeDetail":{ + "type":"structure", + "required":[ + "typeInfo", + "configuration" + ], + "members":{ + "typeInfo":{"shape":"ActivityTypeInfo"}, + "configuration":{"shape":"ActivityTypeConfiguration"} + } + }, + "ActivityTypeInfo":{ + "type":"structure", + "required":[ + "activityType", + "status", + "creationDate" + ], + "members":{ + "activityType":{"shape":"ActivityType"}, + "status":{"shape":"RegistrationStatus"}, + "description":{"shape":"Description"}, + "creationDate":{"shape":"Timestamp"}, + "deprecationDate":{"shape":"Timestamp"} + } + }, + "ActivityTypeInfoList":{ + "type":"list", + "member":{"shape":"ActivityTypeInfo"} + }, + "ActivityTypeInfos":{ + "type":"structure", + "required":["typeInfos"], + "members":{ + "typeInfos":{"shape":"ActivityTypeInfoList"}, + "nextPageToken":{"shape":"PageToken"} + } + }, + "Arn":{ + "type":"string", + "min":1, + "max":1224 + }, + "CancelTimerDecisionAttributes":{ + "type":"structure", + "required":["timerId"], + "members":{ + "timerId":{"shape":"TimerId"} + } + }, + "CancelTimerFailedCause":{ + "type":"string", + "enum":[ + "TIMER_ID_UNKNOWN", + "OPERATION_NOT_PERMITTED" + ] + }, + "CancelTimerFailedEventAttributes":{ + "type":"structure", + "required":[ + "timerId", + "cause", + "decisionTaskCompletedEventId" + ], + "members":{ + "timerId":{"shape":"TimerId"}, + "cause":{"shape":"CancelTimerFailedCause"}, + "decisionTaskCompletedEventId":{"shape":"EventId"} + } + }, + "CancelWorkflowExecutionDecisionAttributes":{ + "type":"structure", + "members":{ + "details":{"shape":"Data"} + } + }, + "CancelWorkflowExecutionFailedCause":{ + "type":"string", + "enum":[ + "UNHANDLED_DECISION", + "OPERATION_NOT_PERMITTED" + ] + }, + "CancelWorkflowExecutionFailedEventAttributes":{ + "type":"structure", + "required":[ + "cause", + "decisionTaskCompletedEventId" + ], + "members":{ + "cause":{"shape":"CancelWorkflowExecutionFailedCause"}, + "decisionTaskCompletedEventId":{"shape":"EventId"} + } + }, + "Canceled":{"type":"boolean"}, + "CauseMessage":{ + "type":"string", + "max":1728 + }, + "ChildPolicy":{ + "type":"string", + "enum":[ + "TERMINATE", + "REQUEST_CANCEL", + "ABANDON" + ] + }, + "ChildWorkflowExecutionCanceledEventAttributes":{ + "type":"structure", + "required":[ + "workflowExecution", + "workflowType", + "initiatedEventId", + "startedEventId" + ], + "members":{ + "workflowExecution":{"shape":"WorkflowExecution"}, + "workflowType":{"shape":"WorkflowType"}, + "details":{"shape":"Data"}, + "initiatedEventId":{"shape":"EventId"}, + "startedEventId":{"shape":"EventId"} + } + }, + "ChildWorkflowExecutionCompletedEventAttributes":{ + "type":"structure", + "required":[ + "workflowExecution", + "workflowType", + "initiatedEventId", + "startedEventId" + ], + "members":{ + "workflowExecution":{"shape":"WorkflowExecution"}, + "workflowType":{"shape":"WorkflowType"}, + "result":{"shape":"Data"}, + "initiatedEventId":{"shape":"EventId"}, + "startedEventId":{"shape":"EventId"} + } + }, + "ChildWorkflowExecutionFailedEventAttributes":{ + "type":"structure", + "required":[ + "workflowExecution", + "workflowType", + "initiatedEventId", + "startedEventId" + ], + "members":{ + "workflowExecution":{"shape":"WorkflowExecution"}, + "workflowType":{"shape":"WorkflowType"}, + "reason":{"shape":"FailureReason"}, + "details":{"shape":"Data"}, + "initiatedEventId":{"shape":"EventId"}, + "startedEventId":{"shape":"EventId"} + } + }, + "ChildWorkflowExecutionStartedEventAttributes":{ + "type":"structure", + "required":[ + "workflowExecution", + "workflowType", + "initiatedEventId" + ], + "members":{ + "workflowExecution":{"shape":"WorkflowExecution"}, + "workflowType":{"shape":"WorkflowType"}, + "initiatedEventId":{"shape":"EventId"} + } + }, + "ChildWorkflowExecutionTerminatedEventAttributes":{ + "type":"structure", + "required":[ + "workflowExecution", + "workflowType", + "initiatedEventId", + "startedEventId" + ], + "members":{ + "workflowExecution":{"shape":"WorkflowExecution"}, + "workflowType":{"shape":"WorkflowType"}, + "initiatedEventId":{"shape":"EventId"}, + "startedEventId":{"shape":"EventId"} + } + }, + "ChildWorkflowExecutionTimedOutEventAttributes":{ + "type":"structure", + "required":[ + "workflowExecution", + "workflowType", + "timeoutType", + "initiatedEventId", + "startedEventId" + ], + "members":{ + "workflowExecution":{"shape":"WorkflowExecution"}, + "workflowType":{"shape":"WorkflowType"}, + "timeoutType":{"shape":"WorkflowExecutionTimeoutType"}, + "initiatedEventId":{"shape":"EventId"}, + "startedEventId":{"shape":"EventId"} + } + }, + "CloseStatus":{ + "type":"string", + "enum":[ + "COMPLETED", + "FAILED", + "CANCELED", + "TERMINATED", + "CONTINUED_AS_NEW", + "TIMED_OUT" + ] + }, + "CloseStatusFilter":{ + "type":"structure", + "required":["status"], + "members":{ + "status":{"shape":"CloseStatus"} + } + }, + "CompleteWorkflowExecutionDecisionAttributes":{ + "type":"structure", + "members":{ + "result":{"shape":"Data"} + } + }, + "CompleteWorkflowExecutionFailedCause":{ + "type":"string", + "enum":[ + "UNHANDLED_DECISION", + "OPERATION_NOT_PERMITTED" + ] + }, + "CompleteWorkflowExecutionFailedEventAttributes":{ + "type":"structure", + "required":[ + "cause", + "decisionTaskCompletedEventId" + ], + "members":{ + "cause":{"shape":"CompleteWorkflowExecutionFailedCause"}, + "decisionTaskCompletedEventId":{"shape":"EventId"} + } + }, + "ContinueAsNewWorkflowExecutionDecisionAttributes":{ + "type":"structure", + "members":{ + "input":{"shape":"Data"}, + "executionStartToCloseTimeout":{"shape":"DurationInSecondsOptional"}, + "taskList":{"shape":"TaskList"}, + "taskPriority":{"shape":"TaskPriority"}, + "taskStartToCloseTimeout":{"shape":"DurationInSecondsOptional"}, + "childPolicy":{"shape":"ChildPolicy"}, + "tagList":{"shape":"TagList"}, + "workflowTypeVersion":{"shape":"Version"}, + "lambdaRole":{"shape":"Arn"} + } + }, + "ContinueAsNewWorkflowExecutionFailedCause":{ + "type":"string", + "enum":[ + "UNHANDLED_DECISION", + "WORKFLOW_TYPE_DEPRECATED", + "WORKFLOW_TYPE_DOES_NOT_EXIST", + "DEFAULT_EXECUTION_START_TO_CLOSE_TIMEOUT_UNDEFINED", + "DEFAULT_TASK_START_TO_CLOSE_TIMEOUT_UNDEFINED", + "DEFAULT_TASK_LIST_UNDEFINED", + "DEFAULT_CHILD_POLICY_UNDEFINED", + "CONTINUE_AS_NEW_WORKFLOW_EXECUTION_RATE_EXCEEDED", + "OPERATION_NOT_PERMITTED" + ] + }, + "ContinueAsNewWorkflowExecutionFailedEventAttributes":{ + "type":"structure", + "required":[ + "cause", + "decisionTaskCompletedEventId" + ], + "members":{ + "cause":{"shape":"ContinueAsNewWorkflowExecutionFailedCause"}, + "decisionTaskCompletedEventId":{"shape":"EventId"} + } + }, + "Count":{ + "type":"integer", + "min":0 + }, + "CountClosedWorkflowExecutionsInput":{ + "type":"structure", + "required":["domain"], + "members":{ + "domain":{"shape":"DomainName"}, + "startTimeFilter":{"shape":"ExecutionTimeFilter"}, + "closeTimeFilter":{"shape":"ExecutionTimeFilter"}, + "executionFilter":{"shape":"WorkflowExecutionFilter"}, + "typeFilter":{"shape":"WorkflowTypeFilter"}, + "tagFilter":{"shape":"TagFilter"}, + "closeStatusFilter":{"shape":"CloseStatusFilter"} + } + }, + "CountOpenWorkflowExecutionsInput":{ + "type":"structure", + "required":[ + "domain", + "startTimeFilter" + ], + "members":{ + "domain":{"shape":"DomainName"}, + "startTimeFilter":{"shape":"ExecutionTimeFilter"}, + "typeFilter":{"shape":"WorkflowTypeFilter"}, + "tagFilter":{"shape":"TagFilter"}, + "executionFilter":{"shape":"WorkflowExecutionFilter"} + } + }, + "CountPendingActivityTasksInput":{ + "type":"structure", + "required":[ + "domain", + "taskList" + ], + "members":{ + "domain":{"shape":"DomainName"}, + "taskList":{"shape":"TaskList"} + } + }, + "CountPendingDecisionTasksInput":{ + "type":"structure", + "required":[ + "domain", + "taskList" + ], + "members":{ + "domain":{"shape":"DomainName"}, + "taskList":{"shape":"TaskList"} + } + }, + "Data":{ + "type":"string", + "max":32768 + }, + "Decision":{ + "type":"structure", + "required":["decisionType"], + "members":{ + "decisionType":{"shape":"DecisionType"}, + "scheduleActivityTaskDecisionAttributes":{"shape":"ScheduleActivityTaskDecisionAttributes"}, + "requestCancelActivityTaskDecisionAttributes":{"shape":"RequestCancelActivityTaskDecisionAttributes"}, + "completeWorkflowExecutionDecisionAttributes":{"shape":"CompleteWorkflowExecutionDecisionAttributes"}, + "failWorkflowExecutionDecisionAttributes":{"shape":"FailWorkflowExecutionDecisionAttributes"}, + "cancelWorkflowExecutionDecisionAttributes":{"shape":"CancelWorkflowExecutionDecisionAttributes"}, + "continueAsNewWorkflowExecutionDecisionAttributes":{"shape":"ContinueAsNewWorkflowExecutionDecisionAttributes"}, + "recordMarkerDecisionAttributes":{"shape":"RecordMarkerDecisionAttributes"}, + "startTimerDecisionAttributes":{"shape":"StartTimerDecisionAttributes"}, + "cancelTimerDecisionAttributes":{"shape":"CancelTimerDecisionAttributes"}, + "signalExternalWorkflowExecutionDecisionAttributes":{"shape":"SignalExternalWorkflowExecutionDecisionAttributes"}, + "requestCancelExternalWorkflowExecutionDecisionAttributes":{"shape":"RequestCancelExternalWorkflowExecutionDecisionAttributes"}, + "startChildWorkflowExecutionDecisionAttributes":{"shape":"StartChildWorkflowExecutionDecisionAttributes"}, + "scheduleLambdaFunctionDecisionAttributes":{"shape":"ScheduleLambdaFunctionDecisionAttributes"} + } + }, + "DecisionList":{ + "type":"list", + "member":{"shape":"Decision"} + }, + "DecisionTask":{ + "type":"structure", + "required":[ + "taskToken", + "startedEventId", + "workflowExecution", + "workflowType", + "events" + ], + "members":{ + "taskToken":{"shape":"TaskToken"}, + "startedEventId":{"shape":"EventId"}, + "workflowExecution":{"shape":"WorkflowExecution"}, + "workflowType":{"shape":"WorkflowType"}, + "events":{"shape":"HistoryEventList"}, + "nextPageToken":{"shape":"PageToken"}, + "previousStartedEventId":{"shape":"EventId"} + } + }, + "DecisionTaskCompletedEventAttributes":{ + "type":"structure", + "required":[ + "scheduledEventId", + "startedEventId" + ], + "members":{ + "executionContext":{"shape":"Data"}, + "scheduledEventId":{"shape":"EventId"}, + "startedEventId":{"shape":"EventId"} + } + }, + "DecisionTaskScheduledEventAttributes":{ + "type":"structure", + "required":["taskList"], + "members":{ + "taskList":{"shape":"TaskList"}, + "taskPriority":{"shape":"TaskPriority"}, + "startToCloseTimeout":{"shape":"DurationInSecondsOptional"} + } + }, + "DecisionTaskStartedEventAttributes":{ + "type":"structure", + "required":["scheduledEventId"], + "members":{ + "identity":{"shape":"Identity"}, + "scheduledEventId":{"shape":"EventId"} + } + }, + "DecisionTaskTimedOutEventAttributes":{ + "type":"structure", + "required":[ + "timeoutType", + "scheduledEventId", + "startedEventId" + ], + "members":{ + "timeoutType":{"shape":"DecisionTaskTimeoutType"}, + "scheduledEventId":{"shape":"EventId"}, + "startedEventId":{"shape":"EventId"} + } + }, + "DecisionTaskTimeoutType":{ + "type":"string", + "enum":["START_TO_CLOSE"] + }, + "DecisionType":{ + "type":"string", + "enum":[ + "ScheduleActivityTask", + "RequestCancelActivityTask", + "CompleteWorkflowExecution", + "FailWorkflowExecution", + "CancelWorkflowExecution", + "ContinueAsNewWorkflowExecution", + "RecordMarker", + "StartTimer", + "CancelTimer", + "SignalExternalWorkflowExecution", + "RequestCancelExternalWorkflowExecution", + "StartChildWorkflowExecution", + "ScheduleLambdaFunction" + ] + }, + "DefaultUndefinedFault":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "exception":true + }, + "DeprecateActivityTypeInput":{ + "type":"structure", + "required":[ + "domain", + "activityType" + ], + "members":{ + "domain":{"shape":"DomainName"}, + "activityType":{"shape":"ActivityType"} + } + }, + "DeprecateDomainInput":{ + "type":"structure", + "required":["name"], + "members":{ + "name":{"shape":"DomainName"} + } + }, + "DeprecateWorkflowTypeInput":{ + "type":"structure", + "required":[ + "domain", + "workflowType" + ], + "members":{ + "domain":{"shape":"DomainName"}, + "workflowType":{"shape":"WorkflowType"} + } + }, + "DescribeActivityTypeInput":{ + "type":"structure", + "required":[ + "domain", + "activityType" + ], + "members":{ + "domain":{"shape":"DomainName"}, + "activityType":{"shape":"ActivityType"} + } + }, + "DescribeDomainInput":{ + "type":"structure", + "required":["name"], + "members":{ + "name":{"shape":"DomainName"} + } + }, + "DescribeWorkflowExecutionInput":{ + "type":"structure", + "required":[ + "domain", + "execution" + ], + "members":{ + "domain":{"shape":"DomainName"}, + "execution":{"shape":"WorkflowExecution"} + } + }, + "DescribeWorkflowTypeInput":{ + "type":"structure", + "required":[ + "domain", + "workflowType" + ], + "members":{ + "domain":{"shape":"DomainName"}, + "workflowType":{"shape":"WorkflowType"} + } + }, + "Description":{ + "type":"string", + "max":1024 + }, + "DomainAlreadyExistsFault":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "exception":true + }, + "DomainConfiguration":{ + "type":"structure", + "required":["workflowExecutionRetentionPeriodInDays"], + "members":{ + "workflowExecutionRetentionPeriodInDays":{"shape":"DurationInDays"} + } + }, + "DomainDeprecatedFault":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "exception":true + }, + "DomainDetail":{ + "type":"structure", + "required":[ + "domainInfo", + "configuration" + ], + "members":{ + "domainInfo":{"shape":"DomainInfo"}, + "configuration":{"shape":"DomainConfiguration"} + } + }, + "DomainInfo":{ + "type":"structure", + "required":[ + "name", + "status" + ], + "members":{ + "name":{"shape":"DomainName"}, + "status":{"shape":"RegistrationStatus"}, + "description":{"shape":"Description"} + } + }, + "DomainInfoList":{ + "type":"list", + "member":{"shape":"DomainInfo"} + }, + "DomainInfos":{ + "type":"structure", + "required":["domainInfos"], + "members":{ + "domainInfos":{"shape":"DomainInfoList"}, + "nextPageToken":{"shape":"PageToken"} + } + }, + "DomainName":{ + "type":"string", + "min":1, + "max":256 + }, + "DurationInDays":{ + "type":"string", + "min":1, + "max":8 + }, + "DurationInSeconds":{ + "type":"string", + "min":1, + "max":8 + }, + "DurationInSecondsOptional":{ + "type":"string", + "max":8 + }, + "ErrorMessage":{"type":"string"}, + "EventId":{"type":"long"}, + "EventType":{ + "type":"string", + "enum":[ + "WorkflowExecutionStarted", + "WorkflowExecutionCancelRequested", + "WorkflowExecutionCompleted", + "CompleteWorkflowExecutionFailed", + "WorkflowExecutionFailed", + "FailWorkflowExecutionFailed", + "WorkflowExecutionTimedOut", + "WorkflowExecutionCanceled", + "CancelWorkflowExecutionFailed", + "WorkflowExecutionContinuedAsNew", + "ContinueAsNewWorkflowExecutionFailed", + "WorkflowExecutionTerminated", + "DecisionTaskScheduled", + "DecisionTaskStarted", + "DecisionTaskCompleted", + "DecisionTaskTimedOut", + "ActivityTaskScheduled", + "ScheduleActivityTaskFailed", + "ActivityTaskStarted", + "ActivityTaskCompleted", + "ActivityTaskFailed", + "ActivityTaskTimedOut", + "ActivityTaskCanceled", + "ActivityTaskCancelRequested", + "RequestCancelActivityTaskFailed", + "WorkflowExecutionSignaled", + "MarkerRecorded", + "RecordMarkerFailed", + "TimerStarted", + "StartTimerFailed", + "TimerFired", + "TimerCanceled", + "CancelTimerFailed", + "StartChildWorkflowExecutionInitiated", + "StartChildWorkflowExecutionFailed", + "ChildWorkflowExecutionStarted", + "ChildWorkflowExecutionCompleted", + "ChildWorkflowExecutionFailed", + "ChildWorkflowExecutionTimedOut", + "ChildWorkflowExecutionCanceled", + "ChildWorkflowExecutionTerminated", + "SignalExternalWorkflowExecutionInitiated", + "SignalExternalWorkflowExecutionFailed", + "ExternalWorkflowExecutionSignaled", + "RequestCancelExternalWorkflowExecutionInitiated", + "RequestCancelExternalWorkflowExecutionFailed", + "ExternalWorkflowExecutionCancelRequested", + "LambdaFunctionScheduled", + "LambdaFunctionStarted", + "LambdaFunctionCompleted", + "LambdaFunctionFailed", + "LambdaFunctionTimedOut", + "ScheduleLambdaFunctionFailed", + "StartLambdaFunctionFailed" + ] + }, + "ExecutionStatus":{ + "type":"string", + "enum":[ + "OPEN", + "CLOSED" + ] + }, + "ExecutionTimeFilter":{ + "type":"structure", + "required":["oldestDate"], + "members":{ + "oldestDate":{"shape":"Timestamp"}, + "latestDate":{"shape":"Timestamp"} + } + }, + "ExternalWorkflowExecutionCancelRequestedEventAttributes":{ + "type":"structure", + "required":[ + "workflowExecution", + "initiatedEventId" + ], + "members":{ + "workflowExecution":{"shape":"WorkflowExecution"}, + "initiatedEventId":{"shape":"EventId"} + } + }, + "ExternalWorkflowExecutionSignaledEventAttributes":{ + "type":"structure", + "required":[ + "workflowExecution", + "initiatedEventId" + ], + "members":{ + "workflowExecution":{"shape":"WorkflowExecution"}, + "initiatedEventId":{"shape":"EventId"} + } + }, + "FailWorkflowExecutionDecisionAttributes":{ + "type":"structure", + "members":{ + "reason":{"shape":"FailureReason"}, + "details":{"shape":"Data"} + } + }, + "FailWorkflowExecutionFailedCause":{ + "type":"string", + "enum":[ + "UNHANDLED_DECISION", + "OPERATION_NOT_PERMITTED" + ] + }, + "FailWorkflowExecutionFailedEventAttributes":{ + "type":"structure", + "required":[ + "cause", + "decisionTaskCompletedEventId" + ], + "members":{ + "cause":{"shape":"FailWorkflowExecutionFailedCause"}, + "decisionTaskCompletedEventId":{"shape":"EventId"} + } + }, + "FailureReason":{ + "type":"string", + "max":256 + }, + "FunctionId":{ + "type":"string", + "min":1, + "max":256 + }, + "FunctionInput":{ + "type":"string", + "min":1, + "max":32768 + }, + "FunctionName":{ + "type":"string", + "min":1, + "max":64 + }, + "GetWorkflowExecutionHistoryInput":{ + "type":"structure", + "required":[ + "domain", + "execution" + ], + "members":{ + "domain":{"shape":"DomainName"}, + "execution":{"shape":"WorkflowExecution"}, + "nextPageToken":{"shape":"PageToken"}, + "maximumPageSize":{"shape":"PageSize"}, + "reverseOrder":{"shape":"ReverseOrder"} + } + }, + "History":{ + "type":"structure", + "required":["events"], + "members":{ + "events":{"shape":"HistoryEventList"}, + "nextPageToken":{"shape":"PageToken"} + } + }, + "HistoryEvent":{ + "type":"structure", + "required":[ + "eventTimestamp", + "eventType", + "eventId" + ], + "members":{ + "eventTimestamp":{"shape":"Timestamp"}, + "eventType":{"shape":"EventType"}, + "eventId":{"shape":"EventId"}, + "workflowExecutionStartedEventAttributes":{"shape":"WorkflowExecutionStartedEventAttributes"}, + "workflowExecutionCompletedEventAttributes":{"shape":"WorkflowExecutionCompletedEventAttributes"}, + "completeWorkflowExecutionFailedEventAttributes":{"shape":"CompleteWorkflowExecutionFailedEventAttributes"}, + "workflowExecutionFailedEventAttributes":{"shape":"WorkflowExecutionFailedEventAttributes"}, + "failWorkflowExecutionFailedEventAttributes":{"shape":"FailWorkflowExecutionFailedEventAttributes"}, + "workflowExecutionTimedOutEventAttributes":{"shape":"WorkflowExecutionTimedOutEventAttributes"}, + "workflowExecutionCanceledEventAttributes":{"shape":"WorkflowExecutionCanceledEventAttributes"}, + "cancelWorkflowExecutionFailedEventAttributes":{"shape":"CancelWorkflowExecutionFailedEventAttributes"}, + "workflowExecutionContinuedAsNewEventAttributes":{"shape":"WorkflowExecutionContinuedAsNewEventAttributes"}, + "continueAsNewWorkflowExecutionFailedEventAttributes":{"shape":"ContinueAsNewWorkflowExecutionFailedEventAttributes"}, + "workflowExecutionTerminatedEventAttributes":{"shape":"WorkflowExecutionTerminatedEventAttributes"}, + "workflowExecutionCancelRequestedEventAttributes":{"shape":"WorkflowExecutionCancelRequestedEventAttributes"}, + "decisionTaskScheduledEventAttributes":{"shape":"DecisionTaskScheduledEventAttributes"}, + "decisionTaskStartedEventAttributes":{"shape":"DecisionTaskStartedEventAttributes"}, + "decisionTaskCompletedEventAttributes":{"shape":"DecisionTaskCompletedEventAttributes"}, + "decisionTaskTimedOutEventAttributes":{"shape":"DecisionTaskTimedOutEventAttributes"}, + "activityTaskScheduledEventAttributes":{"shape":"ActivityTaskScheduledEventAttributes"}, + "activityTaskStartedEventAttributes":{"shape":"ActivityTaskStartedEventAttributes"}, + "activityTaskCompletedEventAttributes":{"shape":"ActivityTaskCompletedEventAttributes"}, + "activityTaskFailedEventAttributes":{"shape":"ActivityTaskFailedEventAttributes"}, + "activityTaskTimedOutEventAttributes":{"shape":"ActivityTaskTimedOutEventAttributes"}, + "activityTaskCanceledEventAttributes":{"shape":"ActivityTaskCanceledEventAttributes"}, + "activityTaskCancelRequestedEventAttributes":{"shape":"ActivityTaskCancelRequestedEventAttributes"}, + "workflowExecutionSignaledEventAttributes":{"shape":"WorkflowExecutionSignaledEventAttributes"}, + "markerRecordedEventAttributes":{"shape":"MarkerRecordedEventAttributes"}, + "recordMarkerFailedEventAttributes":{"shape":"RecordMarkerFailedEventAttributes"}, + "timerStartedEventAttributes":{"shape":"TimerStartedEventAttributes"}, + "timerFiredEventAttributes":{"shape":"TimerFiredEventAttributes"}, + "timerCanceledEventAttributes":{"shape":"TimerCanceledEventAttributes"}, + "startChildWorkflowExecutionInitiatedEventAttributes":{"shape":"StartChildWorkflowExecutionInitiatedEventAttributes"}, + "childWorkflowExecutionStartedEventAttributes":{"shape":"ChildWorkflowExecutionStartedEventAttributes"}, + "childWorkflowExecutionCompletedEventAttributes":{"shape":"ChildWorkflowExecutionCompletedEventAttributes"}, + "childWorkflowExecutionFailedEventAttributes":{"shape":"ChildWorkflowExecutionFailedEventAttributes"}, + "childWorkflowExecutionTimedOutEventAttributes":{"shape":"ChildWorkflowExecutionTimedOutEventAttributes"}, + "childWorkflowExecutionCanceledEventAttributes":{"shape":"ChildWorkflowExecutionCanceledEventAttributes"}, + "childWorkflowExecutionTerminatedEventAttributes":{"shape":"ChildWorkflowExecutionTerminatedEventAttributes"}, + "signalExternalWorkflowExecutionInitiatedEventAttributes":{"shape":"SignalExternalWorkflowExecutionInitiatedEventAttributes"}, + "externalWorkflowExecutionSignaledEventAttributes":{"shape":"ExternalWorkflowExecutionSignaledEventAttributes"}, + "signalExternalWorkflowExecutionFailedEventAttributes":{"shape":"SignalExternalWorkflowExecutionFailedEventAttributes"}, + "externalWorkflowExecutionCancelRequestedEventAttributes":{"shape":"ExternalWorkflowExecutionCancelRequestedEventAttributes"}, + "requestCancelExternalWorkflowExecutionInitiatedEventAttributes":{"shape":"RequestCancelExternalWorkflowExecutionInitiatedEventAttributes"}, + "requestCancelExternalWorkflowExecutionFailedEventAttributes":{"shape":"RequestCancelExternalWorkflowExecutionFailedEventAttributes"}, + "scheduleActivityTaskFailedEventAttributes":{"shape":"ScheduleActivityTaskFailedEventAttributes"}, + "requestCancelActivityTaskFailedEventAttributes":{"shape":"RequestCancelActivityTaskFailedEventAttributes"}, + "startTimerFailedEventAttributes":{"shape":"StartTimerFailedEventAttributes"}, + "cancelTimerFailedEventAttributes":{"shape":"CancelTimerFailedEventAttributes"}, + "startChildWorkflowExecutionFailedEventAttributes":{"shape":"StartChildWorkflowExecutionFailedEventAttributes"}, + "lambdaFunctionScheduledEventAttributes":{"shape":"LambdaFunctionScheduledEventAttributes"}, + "lambdaFunctionStartedEventAttributes":{"shape":"LambdaFunctionStartedEventAttributes"}, + "lambdaFunctionCompletedEventAttributes":{"shape":"LambdaFunctionCompletedEventAttributes"}, + "lambdaFunctionFailedEventAttributes":{"shape":"LambdaFunctionFailedEventAttributes"}, + "lambdaFunctionTimedOutEventAttributes":{"shape":"LambdaFunctionTimedOutEventAttributes"}, + "scheduleLambdaFunctionFailedEventAttributes":{"shape":"ScheduleLambdaFunctionFailedEventAttributes"}, + "startLambdaFunctionFailedEventAttributes":{"shape":"StartLambdaFunctionFailedEventAttributes"} + } + }, + "HistoryEventList":{ + "type":"list", + "member":{"shape":"HistoryEvent"} + }, + "Identity":{ + "type":"string", + "max":256 + }, + "LambdaFunctionCompletedEventAttributes":{ + "type":"structure", + "required":[ + "scheduledEventId", + "startedEventId" + ], + "members":{ + "scheduledEventId":{"shape":"EventId"}, + "startedEventId":{"shape":"EventId"}, + "result":{"shape":"Data"} + } + }, + "LambdaFunctionFailedEventAttributes":{ + "type":"structure", + "required":[ + "scheduledEventId", + "startedEventId" + ], + "members":{ + "scheduledEventId":{"shape":"EventId"}, + "startedEventId":{"shape":"EventId"}, + "reason":{"shape":"FailureReason"}, + "details":{"shape":"Data"} + } + }, + "LambdaFunctionScheduledEventAttributes":{ + "type":"structure", + "required":[ + "id", + "name", + "decisionTaskCompletedEventId" + ], + "members":{ + "id":{"shape":"FunctionId"}, + "name":{"shape":"FunctionName"}, + "input":{"shape":"FunctionInput"}, + "startToCloseTimeout":{"shape":"DurationInSecondsOptional"}, + "decisionTaskCompletedEventId":{"shape":"EventId"} + } + }, + "LambdaFunctionStartedEventAttributes":{ + "type":"structure", + "required":["scheduledEventId"], + "members":{ + "scheduledEventId":{"shape":"EventId"} + } + }, + "LambdaFunctionTimedOutEventAttributes":{ + "type":"structure", + "required":[ + "scheduledEventId", + "startedEventId" + ], + "members":{ + "scheduledEventId":{"shape":"EventId"}, + "startedEventId":{"shape":"EventId"}, + "timeoutType":{"shape":"LambdaFunctionTimeoutType"} + } + }, + "LambdaFunctionTimeoutType":{ + "type":"string", + "enum":["START_TO_CLOSE"] + }, + "LimitExceededFault":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "exception":true + }, + "LimitedData":{ + "type":"string", + "max":2048 + }, + "ListActivityTypesInput":{ + "type":"structure", + "required":[ + "domain", + "registrationStatus" + ], + "members":{ + "domain":{"shape":"DomainName"}, + "name":{"shape":"Name"}, + "registrationStatus":{"shape":"RegistrationStatus"}, + "nextPageToken":{"shape":"PageToken"}, + "maximumPageSize":{"shape":"PageSize"}, + "reverseOrder":{"shape":"ReverseOrder"} + } + }, + "ListClosedWorkflowExecutionsInput":{ + "type":"structure", + "required":["domain"], + "members":{ + "domain":{"shape":"DomainName"}, + "startTimeFilter":{"shape":"ExecutionTimeFilter"}, + "closeTimeFilter":{"shape":"ExecutionTimeFilter"}, + "executionFilter":{"shape":"WorkflowExecutionFilter"}, + "closeStatusFilter":{"shape":"CloseStatusFilter"}, + "typeFilter":{"shape":"WorkflowTypeFilter"}, + "tagFilter":{"shape":"TagFilter"}, + "nextPageToken":{"shape":"PageToken"}, + "maximumPageSize":{"shape":"PageSize"}, + "reverseOrder":{"shape":"ReverseOrder"} + } + }, + "ListDomainsInput":{ + "type":"structure", + "required":["registrationStatus"], + "members":{ + "nextPageToken":{"shape":"PageToken"}, + "registrationStatus":{"shape":"RegistrationStatus"}, + "maximumPageSize":{"shape":"PageSize"}, + "reverseOrder":{"shape":"ReverseOrder"} + } + }, + "ListOpenWorkflowExecutionsInput":{ + "type":"structure", + "required":[ + "domain", + "startTimeFilter" + ], + "members":{ + "domain":{"shape":"DomainName"}, + "startTimeFilter":{"shape":"ExecutionTimeFilter"}, + "typeFilter":{"shape":"WorkflowTypeFilter"}, + "tagFilter":{"shape":"TagFilter"}, + "nextPageToken":{"shape":"PageToken"}, + "maximumPageSize":{"shape":"PageSize"}, + "reverseOrder":{"shape":"ReverseOrder"}, + "executionFilter":{"shape":"WorkflowExecutionFilter"} + } + }, + "ListWorkflowTypesInput":{ + "type":"structure", + "required":[ + "domain", + "registrationStatus" + ], + "members":{ + "domain":{"shape":"DomainName"}, + "name":{"shape":"Name"}, + "registrationStatus":{"shape":"RegistrationStatus"}, + "nextPageToken":{"shape":"PageToken"}, + "maximumPageSize":{"shape":"PageSize"}, + "reverseOrder":{"shape":"ReverseOrder"} + } + }, + "MarkerName":{ + "type":"string", + "min":1, + "max":256 + }, + "MarkerRecordedEventAttributes":{ + "type":"structure", + "required":[ + "markerName", + "decisionTaskCompletedEventId" + ], + "members":{ + "markerName":{"shape":"MarkerName"}, + "details":{"shape":"Data"}, + "decisionTaskCompletedEventId":{"shape":"EventId"} + } + }, + "Name":{ + "type":"string", + "min":1, + "max":256 + }, + "OpenDecisionTasksCount":{ + "type":"integer", + "min":0, + "max":1 + }, + "OperationNotPermittedFault":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "exception":true + }, + "PageSize":{ + "type":"integer", + "min":0, + "max":1000 + }, + "PageToken":{ + "type":"string", + "max":2048 + }, + "PendingTaskCount":{ + "type":"structure", + "required":["count"], + "members":{ + "count":{"shape":"Count"}, + "truncated":{"shape":"Truncated"} + } + }, + "PollForActivityTaskInput":{ + "type":"structure", + "required":[ + "domain", + "taskList" + ], + "members":{ + "domain":{"shape":"DomainName"}, + "taskList":{"shape":"TaskList"}, + "identity":{"shape":"Identity"} + } + }, + "PollForDecisionTaskInput":{ + "type":"structure", + "required":[ + "domain", + "taskList" + ], + "members":{ + "domain":{"shape":"DomainName"}, + "taskList":{"shape":"TaskList"}, + "identity":{"shape":"Identity"}, + "nextPageToken":{"shape":"PageToken"}, + "maximumPageSize":{"shape":"PageSize"}, + "reverseOrder":{"shape":"ReverseOrder"} + } + }, + "RecordActivityTaskHeartbeatInput":{ + "type":"structure", + "required":["taskToken"], + "members":{ + "taskToken":{"shape":"TaskToken"}, + "details":{"shape":"LimitedData"} + } + }, + "RecordMarkerDecisionAttributes":{ + "type":"structure", + "required":["markerName"], + "members":{ + "markerName":{"shape":"MarkerName"}, + "details":{"shape":"Data"} + } + }, + "RecordMarkerFailedCause":{ + "type":"string", + "enum":["OPERATION_NOT_PERMITTED"] + }, + "RecordMarkerFailedEventAttributes":{ + "type":"structure", + "required":[ + "markerName", + "cause", + "decisionTaskCompletedEventId" + ], + "members":{ + "markerName":{"shape":"MarkerName"}, + "cause":{"shape":"RecordMarkerFailedCause"}, + "decisionTaskCompletedEventId":{"shape":"EventId"} + } + }, + "RegisterActivityTypeInput":{ + "type":"structure", + "required":[ + "domain", + "name", + "version" + ], + "members":{ + "domain":{"shape":"DomainName"}, + "name":{"shape":"Name"}, + "version":{"shape":"Version"}, + "description":{"shape":"Description"}, + "defaultTaskStartToCloseTimeout":{"shape":"DurationInSecondsOptional"}, + "defaultTaskHeartbeatTimeout":{"shape":"DurationInSecondsOptional"}, + "defaultTaskList":{"shape":"TaskList"}, + "defaultTaskPriority":{"shape":"TaskPriority"}, + "defaultTaskScheduleToStartTimeout":{"shape":"DurationInSecondsOptional"}, + "defaultTaskScheduleToCloseTimeout":{"shape":"DurationInSecondsOptional"} + } + }, + "RegisterDomainInput":{ + "type":"structure", + "required":[ + "name", + "workflowExecutionRetentionPeriodInDays" + ], + "members":{ + "name":{"shape":"DomainName"}, + "description":{"shape":"Description"}, + "workflowExecutionRetentionPeriodInDays":{"shape":"DurationInDays"} + } + }, + "RegisterWorkflowTypeInput":{ + "type":"structure", + "required":[ + "domain", + "name", + "version" + ], + "members":{ + "domain":{"shape":"DomainName"}, + "name":{"shape":"Name"}, + "version":{"shape":"Version"}, + "description":{"shape":"Description"}, + "defaultTaskStartToCloseTimeout":{"shape":"DurationInSecondsOptional"}, + "defaultExecutionStartToCloseTimeout":{"shape":"DurationInSecondsOptional"}, + "defaultTaskList":{"shape":"TaskList"}, + "defaultTaskPriority":{"shape":"TaskPriority"}, + "defaultChildPolicy":{"shape":"ChildPolicy"}, + "defaultLambdaRole":{"shape":"Arn"} + } + }, + "RegistrationStatus":{ + "type":"string", + "enum":[ + "REGISTERED", + "DEPRECATED" + ] + }, + "RequestCancelActivityTaskDecisionAttributes":{ + "type":"structure", + "required":["activityId"], + "members":{ + "activityId":{"shape":"ActivityId"} + } + }, + "RequestCancelActivityTaskFailedCause":{ + "type":"string", + "enum":[ + "ACTIVITY_ID_UNKNOWN", + "OPERATION_NOT_PERMITTED" + ] + }, + "RequestCancelActivityTaskFailedEventAttributes":{ + "type":"structure", + "required":[ + "activityId", + "cause", + "decisionTaskCompletedEventId" + ], + "members":{ + "activityId":{"shape":"ActivityId"}, + "cause":{"shape":"RequestCancelActivityTaskFailedCause"}, + "decisionTaskCompletedEventId":{"shape":"EventId"} + } + }, + "RequestCancelExternalWorkflowExecutionDecisionAttributes":{ + "type":"structure", + "required":["workflowId"], + "members":{ + "workflowId":{"shape":"WorkflowId"}, + "runId":{"shape":"RunIdOptional"}, + "control":{"shape":"Data"} + } + }, + "RequestCancelExternalWorkflowExecutionFailedCause":{ + "type":"string", + "enum":[ + "UNKNOWN_EXTERNAL_WORKFLOW_EXECUTION", + "REQUEST_CANCEL_EXTERNAL_WORKFLOW_EXECUTION_RATE_EXCEEDED", + "OPERATION_NOT_PERMITTED" + ] + }, + "RequestCancelExternalWorkflowExecutionFailedEventAttributes":{ + "type":"structure", + "required":[ + "workflowId", + "cause", + "initiatedEventId", + "decisionTaskCompletedEventId" + ], + "members":{ + "workflowId":{"shape":"WorkflowId"}, + "runId":{"shape":"RunIdOptional"}, + "cause":{"shape":"RequestCancelExternalWorkflowExecutionFailedCause"}, + "initiatedEventId":{"shape":"EventId"}, + "decisionTaskCompletedEventId":{"shape":"EventId"}, + "control":{"shape":"Data"} + } + }, + "RequestCancelExternalWorkflowExecutionInitiatedEventAttributes":{ + "type":"structure", + "required":[ + "workflowId", + "decisionTaskCompletedEventId" + ], + "members":{ + "workflowId":{"shape":"WorkflowId"}, + "runId":{"shape":"RunIdOptional"}, + "decisionTaskCompletedEventId":{"shape":"EventId"}, + "control":{"shape":"Data"} + } + }, + "RequestCancelWorkflowExecutionInput":{ + "type":"structure", + "required":[ + "domain", + "workflowId" + ], + "members":{ + "domain":{"shape":"DomainName"}, + "workflowId":{"shape":"WorkflowId"}, + "runId":{"shape":"RunIdOptional"} + } + }, + "RespondActivityTaskCanceledInput":{ + "type":"structure", + "required":["taskToken"], + "members":{ + "taskToken":{"shape":"TaskToken"}, + "details":{"shape":"Data"} + } + }, + "RespondActivityTaskCompletedInput":{ + "type":"structure", + "required":["taskToken"], + "members":{ + "taskToken":{"shape":"TaskToken"}, + "result":{"shape":"Data"} + } + }, + "RespondActivityTaskFailedInput":{ + "type":"structure", + "required":["taskToken"], + "members":{ + "taskToken":{"shape":"TaskToken"}, + "reason":{"shape":"FailureReason"}, + "details":{"shape":"Data"} + } + }, + "RespondDecisionTaskCompletedInput":{ + "type":"structure", + "required":["taskToken"], + "members":{ + "taskToken":{"shape":"TaskToken"}, + "decisions":{"shape":"DecisionList"}, + "executionContext":{"shape":"Data"} + } + }, + "ReverseOrder":{"type":"boolean"}, + "Run":{ + "type":"structure", + "members":{ + "runId":{"shape":"RunId"} + } + }, + "RunId":{ + "type":"string", + "min":1, + "max":64 + }, + "RunIdOptional":{ + "type":"string", + "max":64 + }, + "ScheduleActivityTaskDecisionAttributes":{ + "type":"structure", + "required":[ + "activityType", + "activityId" + ], + "members":{ + "activityType":{"shape":"ActivityType"}, + "activityId":{"shape":"ActivityId"}, + "control":{"shape":"Data"}, + "input":{"shape":"Data"}, + "scheduleToCloseTimeout":{"shape":"DurationInSecondsOptional"}, + "taskList":{"shape":"TaskList"}, + "taskPriority":{"shape":"TaskPriority"}, + "scheduleToStartTimeout":{"shape":"DurationInSecondsOptional"}, + "startToCloseTimeout":{"shape":"DurationInSecondsOptional"}, + "heartbeatTimeout":{"shape":"DurationInSecondsOptional"} + } + }, + "ScheduleActivityTaskFailedCause":{ + "type":"string", + "enum":[ + "ACTIVITY_TYPE_DEPRECATED", + "ACTIVITY_TYPE_DOES_NOT_EXIST", + "ACTIVITY_ID_ALREADY_IN_USE", + "OPEN_ACTIVITIES_LIMIT_EXCEEDED", + "ACTIVITY_CREATION_RATE_EXCEEDED", + "DEFAULT_SCHEDULE_TO_CLOSE_TIMEOUT_UNDEFINED", + "DEFAULT_TASK_LIST_UNDEFINED", + "DEFAULT_SCHEDULE_TO_START_TIMEOUT_UNDEFINED", + "DEFAULT_START_TO_CLOSE_TIMEOUT_UNDEFINED", + "DEFAULT_HEARTBEAT_TIMEOUT_UNDEFINED", + "OPERATION_NOT_PERMITTED" + ] + }, + "ScheduleActivityTaskFailedEventAttributes":{ + "type":"structure", + "required":[ + "activityType", + "activityId", + "cause", + "decisionTaskCompletedEventId" + ], + "members":{ + "activityType":{"shape":"ActivityType"}, + "activityId":{"shape":"ActivityId"}, + "cause":{"shape":"ScheduleActivityTaskFailedCause"}, + "decisionTaskCompletedEventId":{"shape":"EventId"} + } + }, + "ScheduleLambdaFunctionDecisionAttributes":{ + "type":"structure", + "required":[ + "id", + "name" + ], + "members":{ + "id":{"shape":"FunctionId"}, + "name":{"shape":"FunctionName"}, + "input":{"shape":"FunctionInput"}, + "startToCloseTimeout":{"shape":"DurationInSecondsOptional"} + } + }, + "ScheduleLambdaFunctionFailedCause":{ + "type":"string", + "enum":[ + "ID_ALREADY_IN_USE", + "OPEN_LAMBDA_FUNCTIONS_LIMIT_EXCEEDED", + "LAMBDA_FUNCTION_CREATION_RATE_EXCEEDED", + "LAMBDA_SERVICE_NOT_AVAILABLE_IN_REGION" + ] + }, + "ScheduleLambdaFunctionFailedEventAttributes":{ + "type":"structure", + "required":[ + "id", + "name", + "cause", + "decisionTaskCompletedEventId" + ], + "members":{ + "id":{"shape":"FunctionId"}, + "name":{"shape":"FunctionName"}, + "cause":{"shape":"ScheduleLambdaFunctionFailedCause"}, + "decisionTaskCompletedEventId":{"shape":"EventId"} + } + }, + "SignalExternalWorkflowExecutionDecisionAttributes":{ + "type":"structure", + "required":[ + "workflowId", + "signalName" + ], + "members":{ + "workflowId":{"shape":"WorkflowId"}, + "runId":{"shape":"RunIdOptional"}, + "signalName":{"shape":"SignalName"}, + "input":{"shape":"Data"}, + "control":{"shape":"Data"} + } + }, + "SignalExternalWorkflowExecutionFailedCause":{ + "type":"string", + "enum":[ + "UNKNOWN_EXTERNAL_WORKFLOW_EXECUTION", + "SIGNAL_EXTERNAL_WORKFLOW_EXECUTION_RATE_EXCEEDED", + "OPERATION_NOT_PERMITTED" + ] + }, + "SignalExternalWorkflowExecutionFailedEventAttributes":{ + "type":"structure", + "required":[ + "workflowId", + "cause", + "initiatedEventId", + "decisionTaskCompletedEventId" + ], + "members":{ + "workflowId":{"shape":"WorkflowId"}, + "runId":{"shape":"RunIdOptional"}, + "cause":{"shape":"SignalExternalWorkflowExecutionFailedCause"}, + "initiatedEventId":{"shape":"EventId"}, + "decisionTaskCompletedEventId":{"shape":"EventId"}, + "control":{"shape":"Data"} + } + }, + "SignalExternalWorkflowExecutionInitiatedEventAttributes":{ + "type":"structure", + "required":[ + "workflowId", + "signalName", + "decisionTaskCompletedEventId" + ], + "members":{ + "workflowId":{"shape":"WorkflowId"}, + "runId":{"shape":"RunIdOptional"}, + "signalName":{"shape":"SignalName"}, + "input":{"shape":"Data"}, + "decisionTaskCompletedEventId":{"shape":"EventId"}, + "control":{"shape":"Data"} + } + }, + "SignalName":{ + "type":"string", + "min":1, + "max":256 + }, + "SignalWorkflowExecutionInput":{ + "type":"structure", + "required":[ + "domain", + "workflowId", + "signalName" + ], + "members":{ + "domain":{"shape":"DomainName"}, + "workflowId":{"shape":"WorkflowId"}, + "runId":{"shape":"RunIdOptional"}, + "signalName":{"shape":"SignalName"}, + "input":{"shape":"Data"} + } + }, + "StartChildWorkflowExecutionDecisionAttributes":{ + "type":"structure", + "required":[ + "workflowType", + "workflowId" + ], + "members":{ + "workflowType":{"shape":"WorkflowType"}, + "workflowId":{"shape":"WorkflowId"}, + "control":{"shape":"Data"}, + "input":{"shape":"Data"}, + "executionStartToCloseTimeout":{"shape":"DurationInSecondsOptional"}, + "taskList":{"shape":"TaskList"}, + "taskPriority":{"shape":"TaskPriority"}, + "taskStartToCloseTimeout":{"shape":"DurationInSecondsOptional"}, + "childPolicy":{"shape":"ChildPolicy"}, + "tagList":{"shape":"TagList"}, + "lambdaRole":{"shape":"Arn"} + } + }, + "StartChildWorkflowExecutionFailedCause":{ + "type":"string", + "enum":[ + "WORKFLOW_TYPE_DOES_NOT_EXIST", + "WORKFLOW_TYPE_DEPRECATED", + "OPEN_CHILDREN_LIMIT_EXCEEDED", + "OPEN_WORKFLOWS_LIMIT_EXCEEDED", + "CHILD_CREATION_RATE_EXCEEDED", + "WORKFLOW_ALREADY_RUNNING", + "DEFAULT_EXECUTION_START_TO_CLOSE_TIMEOUT_UNDEFINED", + "DEFAULT_TASK_LIST_UNDEFINED", + "DEFAULT_TASK_START_TO_CLOSE_TIMEOUT_UNDEFINED", + "DEFAULT_CHILD_POLICY_UNDEFINED", + "OPERATION_NOT_PERMITTED" + ] + }, + "StartChildWorkflowExecutionFailedEventAttributes":{ + "type":"structure", + "required":[ + "workflowType", + "cause", + "workflowId", + "initiatedEventId", + "decisionTaskCompletedEventId" + ], + "members":{ + "workflowType":{"shape":"WorkflowType"}, + "cause":{"shape":"StartChildWorkflowExecutionFailedCause"}, + "workflowId":{"shape":"WorkflowId"}, + "initiatedEventId":{"shape":"EventId"}, + "decisionTaskCompletedEventId":{"shape":"EventId"}, + "control":{"shape":"Data"} + } + }, + "StartChildWorkflowExecutionInitiatedEventAttributes":{ + "type":"structure", + "required":[ + "workflowId", + "workflowType", + "taskList", + "decisionTaskCompletedEventId", + "childPolicy" + ], + "members":{ + "workflowId":{"shape":"WorkflowId"}, + "workflowType":{"shape":"WorkflowType"}, + "control":{"shape":"Data"}, + "input":{"shape":"Data"}, + "executionStartToCloseTimeout":{"shape":"DurationInSecondsOptional"}, + "taskList":{"shape":"TaskList"}, + "taskPriority":{"shape":"TaskPriority"}, + "decisionTaskCompletedEventId":{"shape":"EventId"}, + "childPolicy":{"shape":"ChildPolicy"}, + "taskStartToCloseTimeout":{"shape":"DurationInSecondsOptional"}, + "tagList":{"shape":"TagList"}, + "lambdaRole":{"shape":"Arn"} + } + }, + "StartLambdaFunctionFailedCause":{ + "type":"string", + "enum":["ASSUME_ROLE_FAILED"] + }, + "StartLambdaFunctionFailedEventAttributes":{ + "type":"structure", + "members":{ + "scheduledEventId":{"shape":"EventId"}, + "cause":{"shape":"StartLambdaFunctionFailedCause"}, + "message":{"shape":"CauseMessage"} + } + }, + "StartTimerDecisionAttributes":{ + "type":"structure", + "required":[ + "timerId", + "startToFireTimeout" + ], + "members":{ + "timerId":{"shape":"TimerId"}, + "control":{"shape":"Data"}, + "startToFireTimeout":{"shape":"DurationInSeconds"} + } + }, + "StartTimerFailedCause":{ + "type":"string", + "enum":[ + "TIMER_ID_ALREADY_IN_USE", + "OPEN_TIMERS_LIMIT_EXCEEDED", + "TIMER_CREATION_RATE_EXCEEDED", + "OPERATION_NOT_PERMITTED" + ] + }, + "StartTimerFailedEventAttributes":{ + "type":"structure", + "required":[ + "timerId", + "cause", + "decisionTaskCompletedEventId" + ], + "members":{ + "timerId":{"shape":"TimerId"}, + "cause":{"shape":"StartTimerFailedCause"}, + "decisionTaskCompletedEventId":{"shape":"EventId"} + } + }, + "StartWorkflowExecutionInput":{ + "type":"structure", + "required":[ + "domain", + "workflowId", + "workflowType" + ], + "members":{ + "domain":{"shape":"DomainName"}, + "workflowId":{"shape":"WorkflowId"}, + "workflowType":{"shape":"WorkflowType"}, + "taskList":{"shape":"TaskList"}, + "taskPriority":{"shape":"TaskPriority"}, + "input":{"shape":"Data"}, + "executionStartToCloseTimeout":{"shape":"DurationInSecondsOptional"}, + "tagList":{"shape":"TagList"}, + "taskStartToCloseTimeout":{"shape":"DurationInSecondsOptional"}, + "childPolicy":{"shape":"ChildPolicy"}, + "lambdaRole":{"shape":"Arn"} + } + }, + "Tag":{ + "type":"string", + "min":1, + "max":256 + }, + "TagFilter":{ + "type":"structure", + "required":["tag"], + "members":{ + "tag":{"shape":"Tag"} + } + }, + "TagList":{ + "type":"list", + "member":{"shape":"Tag"}, + "max":5 + }, + "TaskList":{ + "type":"structure", + "required":["name"], + "members":{ + "name":{"shape":"Name"} + } + }, + "TaskPriority":{ + "type":"string", + "max":11 + }, + "TaskToken":{ + "type":"string", + "min":1, + "max":1024 + }, + "TerminateReason":{ + "type":"string", + "max":256 + }, + "TerminateWorkflowExecutionInput":{ + "type":"structure", + "required":[ + "domain", + "workflowId" + ], + "members":{ + "domain":{"shape":"DomainName"}, + "workflowId":{"shape":"WorkflowId"}, + "runId":{"shape":"RunIdOptional"}, + "reason":{"shape":"TerminateReason"}, + "details":{"shape":"Data"}, + "childPolicy":{"shape":"ChildPolicy"} + } + }, + "TimerCanceledEventAttributes":{ + "type":"structure", + "required":[ + "timerId", + "startedEventId", + "decisionTaskCompletedEventId" + ], + "members":{ + "timerId":{"shape":"TimerId"}, + "startedEventId":{"shape":"EventId"}, + "decisionTaskCompletedEventId":{"shape":"EventId"} + } + }, + "TimerFiredEventAttributes":{ + "type":"structure", + "required":[ + "timerId", + "startedEventId" + ], + "members":{ + "timerId":{"shape":"TimerId"}, + "startedEventId":{"shape":"EventId"} + } + }, + "TimerId":{ + "type":"string", + "min":1, + "max":256 + }, + "TimerStartedEventAttributes":{ + "type":"structure", + "required":[ + "timerId", + "startToFireTimeout", + "decisionTaskCompletedEventId" + ], + "members":{ + "timerId":{"shape":"TimerId"}, + "control":{"shape":"Data"}, + "startToFireTimeout":{"shape":"DurationInSeconds"}, + "decisionTaskCompletedEventId":{"shape":"EventId"} + } + }, + "Timestamp":{"type":"timestamp"}, + "Truncated":{"type":"boolean"}, + "TypeAlreadyExistsFault":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "exception":true + }, + "TypeDeprecatedFault":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "exception":true + }, + "UnknownResourceFault":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "exception":true + }, + "Version":{ + "type":"string", + "min":1, + "max":64 + }, + "VersionOptional":{ + "type":"string", + "max":64 + }, + "WorkflowExecution":{ + "type":"structure", + "required":[ + "workflowId", + "runId" + ], + "members":{ + "workflowId":{"shape":"WorkflowId"}, + "runId":{"shape":"RunId"} + } + }, + "WorkflowExecutionAlreadyStartedFault":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "exception":true + }, + "WorkflowExecutionCancelRequestedCause":{ + "type":"string", + "enum":["CHILD_POLICY_APPLIED"] + }, + "WorkflowExecutionCancelRequestedEventAttributes":{ + "type":"structure", + "members":{ + "externalWorkflowExecution":{"shape":"WorkflowExecution"}, + "externalInitiatedEventId":{"shape":"EventId"}, + "cause":{"shape":"WorkflowExecutionCancelRequestedCause"} + } + }, + "WorkflowExecutionCanceledEventAttributes":{ + "type":"structure", + "required":["decisionTaskCompletedEventId"], + "members":{ + "details":{"shape":"Data"}, + "decisionTaskCompletedEventId":{"shape":"EventId"} + } + }, + "WorkflowExecutionCompletedEventAttributes":{ + "type":"structure", + "required":["decisionTaskCompletedEventId"], + "members":{ + "result":{"shape":"Data"}, + "decisionTaskCompletedEventId":{"shape":"EventId"} + } + }, + "WorkflowExecutionConfiguration":{ + "type":"structure", + "required":[ + "taskStartToCloseTimeout", + "executionStartToCloseTimeout", + "taskList", + "childPolicy" + ], + "members":{ + "taskStartToCloseTimeout":{"shape":"DurationInSeconds"}, + "executionStartToCloseTimeout":{"shape":"DurationInSeconds"}, + "taskList":{"shape":"TaskList"}, + "taskPriority":{"shape":"TaskPriority"}, + "childPolicy":{"shape":"ChildPolicy"}, + "lambdaRole":{"shape":"Arn"} + } + }, + "WorkflowExecutionContinuedAsNewEventAttributes":{ + "type":"structure", + "required":[ + "decisionTaskCompletedEventId", + "newExecutionRunId", + "taskList", + "childPolicy", + "workflowType" + ], + "members":{ + "input":{"shape":"Data"}, + "decisionTaskCompletedEventId":{"shape":"EventId"}, + "newExecutionRunId":{"shape":"RunId"}, + "executionStartToCloseTimeout":{"shape":"DurationInSecondsOptional"}, + "taskList":{"shape":"TaskList"}, + "taskPriority":{"shape":"TaskPriority"}, + "taskStartToCloseTimeout":{"shape":"DurationInSecondsOptional"}, + "childPolicy":{"shape":"ChildPolicy"}, + "tagList":{"shape":"TagList"}, + "workflowType":{"shape":"WorkflowType"}, + "lambdaRole":{"shape":"Arn"} + } + }, + "WorkflowExecutionCount":{ + "type":"structure", + "required":["count"], + "members":{ + "count":{"shape":"Count"}, + "truncated":{"shape":"Truncated"} + } + }, + "WorkflowExecutionDetail":{ + "type":"structure", + "required":[ + "executionInfo", + "executionConfiguration", + "openCounts" + ], + "members":{ + "executionInfo":{"shape":"WorkflowExecutionInfo"}, + "executionConfiguration":{"shape":"WorkflowExecutionConfiguration"}, + "openCounts":{"shape":"WorkflowExecutionOpenCounts"}, + "latestActivityTaskTimestamp":{"shape":"Timestamp"}, + "latestExecutionContext":{"shape":"Data"} + } + }, + "WorkflowExecutionFailedEventAttributes":{ + "type":"structure", + "required":["decisionTaskCompletedEventId"], + "members":{ + "reason":{"shape":"FailureReason"}, + "details":{"shape":"Data"}, + "decisionTaskCompletedEventId":{"shape":"EventId"} + } + }, + "WorkflowExecutionFilter":{ + "type":"structure", + "required":["workflowId"], + "members":{ + "workflowId":{"shape":"WorkflowId"} + } + }, + "WorkflowExecutionInfo":{ + "type":"structure", + "required":[ + "execution", + "workflowType", + "startTimestamp", + "executionStatus" + ], + "members":{ + "execution":{"shape":"WorkflowExecution"}, + "workflowType":{"shape":"WorkflowType"}, + "startTimestamp":{"shape":"Timestamp"}, + "closeTimestamp":{"shape":"Timestamp"}, + "executionStatus":{"shape":"ExecutionStatus"}, + "closeStatus":{"shape":"CloseStatus"}, + "parent":{"shape":"WorkflowExecution"}, + "tagList":{"shape":"TagList"}, + "cancelRequested":{"shape":"Canceled"} + } + }, + "WorkflowExecutionInfoList":{ + "type":"list", + "member":{"shape":"WorkflowExecutionInfo"} + }, + "WorkflowExecutionInfos":{ + "type":"structure", + "required":["executionInfos"], + "members":{ + "executionInfos":{"shape":"WorkflowExecutionInfoList"}, + "nextPageToken":{"shape":"PageToken"} + } + }, + "WorkflowExecutionOpenCounts":{ + "type":"structure", + "required":[ + "openActivityTasks", + "openDecisionTasks", + "openTimers", + "openChildWorkflowExecutions" + ], + "members":{ + "openActivityTasks":{"shape":"Count"}, + "openDecisionTasks":{"shape":"OpenDecisionTasksCount"}, + "openTimers":{"shape":"Count"}, + "openChildWorkflowExecutions":{"shape":"Count"}, + "openLambdaFunctions":{"shape":"Count"} + } + }, + "WorkflowExecutionSignaledEventAttributes":{ + "type":"structure", + "required":["signalName"], + "members":{ + "signalName":{"shape":"SignalName"}, + "input":{"shape":"Data"}, + "externalWorkflowExecution":{"shape":"WorkflowExecution"}, + "externalInitiatedEventId":{"shape":"EventId"} + } + }, + "WorkflowExecutionStartedEventAttributes":{ + "type":"structure", + "required":[ + "childPolicy", + "taskList", + "workflowType" + ], + "members":{ + "input":{"shape":"Data"}, + "executionStartToCloseTimeout":{"shape":"DurationInSecondsOptional"}, + "taskStartToCloseTimeout":{"shape":"DurationInSecondsOptional"}, + "childPolicy":{"shape":"ChildPolicy"}, + "taskList":{"shape":"TaskList"}, + "workflowType":{"shape":"WorkflowType"}, + "tagList":{"shape":"TagList"}, + "taskPriority":{"shape":"TaskPriority"}, + "continuedExecutionRunId":{"shape":"RunIdOptional"}, + "parentWorkflowExecution":{"shape":"WorkflowExecution"}, + "parentInitiatedEventId":{"shape":"EventId"}, + "lambdaRole":{"shape":"Arn"} + } + }, + "WorkflowExecutionTerminatedCause":{ + "type":"string", + "enum":[ + "CHILD_POLICY_APPLIED", + "EVENT_LIMIT_EXCEEDED", + "OPERATOR_INITIATED" + ] + }, + "WorkflowExecutionTerminatedEventAttributes":{ + "type":"structure", + "required":["childPolicy"], + "members":{ + "reason":{"shape":"TerminateReason"}, + "details":{"shape":"Data"}, + "childPolicy":{"shape":"ChildPolicy"}, + "cause":{"shape":"WorkflowExecutionTerminatedCause"} + } + }, + "WorkflowExecutionTimedOutEventAttributes":{ + "type":"structure", + "required":[ + "timeoutType", + "childPolicy" + ], + "members":{ + "timeoutType":{"shape":"WorkflowExecutionTimeoutType"}, + "childPolicy":{"shape":"ChildPolicy"} + } + }, + "WorkflowExecutionTimeoutType":{ + "type":"string", + "enum":["START_TO_CLOSE"] + }, + "WorkflowId":{ + "type":"string", + "min":1, + "max":256 + }, + "WorkflowType":{ + "type":"structure", + "required":[ + "name", + "version" + ], + "members":{ + "name":{"shape":"Name"}, + "version":{"shape":"Version"} + } + }, + "WorkflowTypeConfiguration":{ + "type":"structure", + "members":{ + "defaultTaskStartToCloseTimeout":{"shape":"DurationInSecondsOptional"}, + "defaultExecutionStartToCloseTimeout":{"shape":"DurationInSecondsOptional"}, + "defaultTaskList":{"shape":"TaskList"}, + "defaultTaskPriority":{"shape":"TaskPriority"}, + "defaultChildPolicy":{"shape":"ChildPolicy"}, + "defaultLambdaRole":{"shape":"Arn"} + } + }, + "WorkflowTypeDetail":{ + "type":"structure", + "required":[ + "typeInfo", + "configuration" + ], + "members":{ + "typeInfo":{"shape":"WorkflowTypeInfo"}, + "configuration":{"shape":"WorkflowTypeConfiguration"} + } + }, + "WorkflowTypeFilter":{ + "type":"structure", + "required":["name"], + "members":{ + "name":{"shape":"Name"}, + "version":{"shape":"VersionOptional"} + } + }, + "WorkflowTypeInfo":{ + "type":"structure", + "required":[ + "workflowType", + "status", + "creationDate" + ], + "members":{ + "workflowType":{"shape":"WorkflowType"}, + "status":{"shape":"RegistrationStatus"}, + "description":{"shape":"Description"}, + "creationDate":{"shape":"Timestamp"}, + "deprecationDate":{"shape":"Timestamp"} + } + }, + "WorkflowTypeInfoList":{ + "type":"list", + "member":{"shape":"WorkflowTypeInfo"} + }, + "WorkflowTypeInfos":{ + "type":"structure", + "required":["typeInfos"], + "members":{ + "typeInfos":{"shape":"WorkflowTypeInfoList"}, + "nextPageToken":{"shape":"PageToken"} + } + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/swf/2012-01-25/docs-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/swf/2012-01-25/docs-2.json new file mode 100644 index 000000000..b2b5d16ca --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/swf/2012-01-25/docs-2.json @@ -0,0 +1,1695 @@ +{ + "version": "2.0", + "operations": { + "CountClosedWorkflowExecutions": "

    Returns the number of closed workflow executions within the given domain that meet the specified filtering criteria.

    This operation is eventually consistent. The results are best effort and may not exactly reflect recent updates and changes.

    Access Control

    You can use IAM policies to control this action's access to Amazon SWF resources as follows:

    • Use a Resource element with the domain name to limit the action to only specified domains.
    • Use an Action element to allow or deny permission to call this action.
    • Constrain the following parameters by using a Condition element with the appropriate keys.
      • tagFilter.tag: String constraint. The key is swf:tagFilter.tag.
      • typeFilter.name: String constraint. The key is swf:typeFilter.name.
      • typeFilter.version: String constraint. The key is swf:typeFilter.version.

    If the caller does not have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter will be set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows.

    ", + "CountOpenWorkflowExecutions": "

    Returns the number of open workflow executions within the given domain that meet the specified filtering criteria.

    This operation is eventually consistent. The results are best effort and may not exactly reflect recent updates and changes.

    Access Control

    You can use IAM policies to control this action's access to Amazon SWF resources as follows:

    • Use a Resource element with the domain name to limit the action to only specified domains.
    • Use an Action element to allow or deny permission to call this action.
    • Constrain the following parameters by using a Condition element with the appropriate keys.
      • tagFilter.tag: String constraint. The key is swf:tagFilter.tag.
      • typeFilter.name: String constraint. The key is swf:typeFilter.name.
      • typeFilter.version: String constraint. The key is swf:typeFilter.version.

    If the caller does not have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter will be set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows.

    ", + "CountPendingActivityTasks": "

    Returns the estimated number of activity tasks in the specified task list. The count returned is an approximation and is not guaranteed to be exact. If you specify a task list that no activity task was ever scheduled in then 0 will be returned.

    Access Control

    You can use IAM policies to control this action's access to Amazon SWF resources as follows:

    • Use a Resource element with the domain name to limit the action to only specified domains.
    • Use an Action element to allow or deny permission to call this action.
    • Constrain the taskList.name parameter by using a Condition element with the swf:taskList.name key to allow the action to access only certain task lists.

    If the caller does not have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter will be set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows.

    ", + "CountPendingDecisionTasks": "

    Returns the estimated number of decision tasks in the specified task list. The count returned is an approximation and is not guaranteed to be exact. If you specify a task list that no decision task was ever scheduled in then 0 will be returned.

    Access Control

    You can use IAM policies to control this action's access to Amazon SWF resources as follows:

    • Use a Resource element with the domain name to limit the action to only specified domains.
    • Use an Action element to allow or deny permission to call this action.
    • Constrain the taskList.name parameter by using a Condition element with the swf:taskList.name key to allow the action to access only certain task lists.

    If the caller does not have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter will be set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows.

    ", + "DeprecateActivityType": "

    Deprecates the specified activity type. After an activity type has been deprecated, you cannot create new tasks of that activity type. Tasks of this type that were scheduled before the type was deprecated will continue to run.

    This operation is eventually consistent. The results are best effort and may not exactly reflect recent updates and changes.

    Access Control

    You can use IAM policies to control this action's access to Amazon SWF resources as follows:

    • Use a Resource element with the domain name to limit the action to only specified domains.
    • Use an Action element to allow or deny permission to call this action.
    • Constrain the following parameters by using a Condition element with the appropriate keys.
      • activityType.name: String constraint. The key is swf:activityType.name.
      • activityType.version: String constraint. The key is swf:activityType.version.

    If the caller does not have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter will be set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows.

    ", + "DeprecateDomain": "

    Deprecates the specified domain. After a domain has been deprecated it cannot be used to create new workflow executions or register new types. However, you can still use visibility actions on this domain. Deprecating a domain also deprecates all activity and workflow types registered in the domain. Executions that were started before the domain was deprecated will continue to run.

    This operation is eventually consistent. The results are best effort and may not exactly reflect recent updates and changes.

    Access Control

    You can use IAM policies to control this action's access to Amazon SWF resources as follows:

    • Use a Resource element with the domain name to limit the action to only specified domains.
    • Use an Action element to allow or deny permission to call this action.
    • You cannot use an IAM policy to constrain this action's parameters.

    If the caller does not have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter will be set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows.

    ", + "DeprecateWorkflowType": "

    Deprecates the specified workflow type. After a workflow type has been deprecated, you cannot create new executions of that type. Executions that were started before the type was deprecated will continue to run. A deprecated workflow type may still be used when calling visibility actions.

    This operation is eventually consistent. The results are best effort and may not exactly reflect recent updates and changes.

    Access Control

    You can use IAM policies to control this action's access to Amazon SWF resources as follows:

    • Use a Resource element with the domain name to limit the action to only specified domains.
    • Use an Action element to allow or deny permission to call this action.
    • Constrain the following parameters by using a Condition element with the appropriate keys.
      • workflowType.name: String constraint. The key is swf:workflowType.name.
      • workflowType.version: String constraint. The key is swf:workflowType.version.

    If the caller does not have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter will be set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows.

    ", + "DescribeActivityType": "

    Returns information about the specified activity type. This includes configuration settings provided when the type was registered and other general information about the type.

    Access Control

    You can use IAM policies to control this action's access to Amazon SWF resources as follows:

    • Use a Resource element with the domain name to limit the action to only specified domains.
    • Use an Action element to allow or deny permission to call this action.
    • Constrain the following parameters by using a Condition element with the appropriate keys.
      • activityType.name: String constraint. The key is swf:activityType.name.
      • activityType.version: String constraint. The key is swf:activityType.version.

    If the caller does not have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter will be set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows.

    ", + "DescribeDomain": "

    Returns information about the specified domain, including description and status.

    Access Control

    You can use IAM policies to control this action's access to Amazon SWF resources as follows:

    • Use a Resource element with the domain name to limit the action to only specified domains.
    • Use an Action element to allow or deny permission to call this action.
    • You cannot use an IAM policy to constrain this action's parameters.

    If the caller does not have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter will be set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows.

    ", + "DescribeWorkflowExecution": "

    Returns information about the specified workflow execution including its type and some statistics.

    This operation is eventually consistent. The results are best effort and may not exactly reflect recent updates and changes.

    Access Control

    You can use IAM policies to control this action's access to Amazon SWF resources as follows:

    • Use a Resource element with the domain name to limit the action to only specified domains.
    • Use an Action element to allow or deny permission to call this action.
    • You cannot use an IAM policy to constrain this action's parameters.

    If the caller does not have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter will be set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows.

    ", + "DescribeWorkflowType": "

    Returns information about the specified workflow type. This includes configuration settings specified when the type was registered and other information such as creation date, current status, and so on.

    Access Control

    You can use IAM policies to control this action's access to Amazon SWF resources as follows:

    • Use a Resource element with the domain name to limit the action to only specified domains.
    • Use an Action element to allow or deny permission to call this action.
    • Constrain the following parameters by using a Condition element with the appropriate keys.
      • workflowType.name: String constraint. The key is swf:workflowType.name.
      • workflowType.version: String constraint. The key is swf:workflowType.version.

    If the caller does not have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter will be set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows.

    ", + "GetWorkflowExecutionHistory": "

    Returns the history of the specified workflow execution. The results may be split into multiple pages. To retrieve subsequent pages, make the call again using the nextPageToken returned by the initial call.

    This operation is eventually consistent. The results are best effort and may not exactly reflect recent updates and changes.

    Access Control

    You can use IAM policies to control this action's access to Amazon SWF resources as follows:

    • Use a Resource element with the domain name to limit the action to only specified domains.
    • Use an Action element to allow or deny permission to call this action.
    • You cannot use an IAM policy to constrain this action's parameters.

    If the caller does not have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter will be set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows.

    ", + "ListActivityTypes": "

    Returns information about all activities registered in the specified domain that match the specified name and registration status. The result includes information like creation date, current status of the activity, etc. The results may be split into multiple pages. To retrieve subsequent pages, make the call again using the nextPageToken returned by the initial call.

    Access Control

    You can use IAM policies to control this action's access to Amazon SWF resources as follows:

    • Use a Resource element with the domain name to limit the action to only specified domains.
    • Use an Action element to allow or deny permission to call this action.
    • You cannot use an IAM policy to constrain this action's parameters.

    If the caller does not have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter will be set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows.

    ", + "ListClosedWorkflowExecutions": "

    Returns a list of closed workflow executions in the specified domain that meet the filtering criteria. The results may be split into multiple pages. To retrieve subsequent pages, make the call again using the nextPageToken returned by the initial call.

    This operation is eventually consistent. The results are best effort and may not exactly reflect recent updates and changes.

    Access Control

    You can use IAM policies to control this action's access to Amazon SWF resources as follows:

    • Use a Resource element with the domain name to limit the action to only specified domains.
    • Use an Action element to allow or deny permission to call this action.
    • Constrain the following parameters by using a Condition element with the appropriate keys.
      • tagFilter.tag: String constraint. The key is swf:tagFilter.tag.
      • typeFilter.name: String constraint. The key is swf:typeFilter.name.
      • typeFilter.version: String constraint. The key is swf:typeFilter.version.

    If the caller does not have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter will be set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows.

    ", + "ListDomains": "

    Returns the list of domains registered in the account. The results may be split into multiple pages. To retrieve subsequent pages, make the call again using the nextPageToken returned by the initial call.

    This operation is eventually consistent. The results are best effort and may not exactly reflect recent updates and changes.

    Access Control

    You can use IAM policies to control this action's access to Amazon SWF resources as follows:

    • Use a Resource element with the domain name to limit the action to only specified domains. The element must be set to arn:aws:swf::AccountID:domain/*, where AccountID is the account ID, with no dashes.
    • Use an Action element to allow or deny permission to call this action.
    • You cannot use an IAM policy to constrain this action's parameters.

    If the caller does not have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter will be set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows.

    ", + "ListOpenWorkflowExecutions": "

    Returns a list of open workflow executions in the specified domain that meet the filtering criteria. The results may be split into multiple pages. To retrieve subsequent pages, make the call again using the nextPageToken returned by the initial call.

    This operation is eventually consistent. The results are best effort and may not exactly reflect recent updates and changes.

    Access Control

    You can use IAM policies to control this action's access to Amazon SWF resources as follows:

    • Use a Resource element with the domain name to limit the action to only specified domains.
    • Use an Action element to allow or deny permission to call this action.
    • Constrain the following parameters by using a Condition element with the appropriate keys.
      • tagFilter.tag: String constraint. The key is swf:tagFilter.tag.
      • typeFilter.name: String constraint. The key is swf:typeFilter.name.
      • typeFilter.version: String constraint. The key is swf:typeFilter.version.

    If the caller does not have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter will be set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows.

    ", + "ListWorkflowTypes": "

    Returns information about workflow types in the specified domain. The results may be split into multiple pages that can be retrieved by making the call repeatedly.

    Access Control

    You can use IAM policies to control this action's access to Amazon SWF resources as follows:

    • Use a Resource element with the domain name to limit the action to only specified domains.
    • Use an Action element to allow or deny permission to call this action.
    • You cannot use an IAM policy to constrain this action's parameters.

    If the caller does not have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter will be set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows.

    ", + "PollForActivityTask": "

    Used by workers to get an ActivityTask from the specified activity taskList. This initiates a long poll, where the service holds the HTTP connection open and responds as soon as a task becomes available. The maximum time the service holds on to the request before responding is 60 seconds. If no task is available within 60 seconds, the poll will return an empty result. An empty result, in this context, means that an ActivityTask is returned, but that the value of taskToken is an empty string. If a task is returned, the worker should use its type to identify and process it correctly.

    Workers should set their client side socket timeout to at least 70 seconds (10 seconds higher than the maximum time service may hold the poll request).

    Access Control

    You can use IAM policies to control this action's access to Amazon SWF resources as follows:

    • Use a Resource element with the domain name to limit the action to only specified domains.
    • Use an Action element to allow or deny permission to call this action.
    • Constrain the taskList.name parameter by using a Condition element with the swf:taskList.name key to allow the action to access only certain task lists.

    If the caller does not have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter will be set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows.

    ", + "PollForDecisionTask": "

    Used by deciders to get a DecisionTask from the specified decision taskList. A decision task may be returned for any open workflow execution that is using the specified task list. The task includes a paginated view of the history of the workflow execution. The decider should use the workflow type and the history to determine how to properly handle the task.

    This action initiates a long poll, where the service holds the HTTP connection open and responds as soon a task becomes available. If no decision task is available in the specified task list before the timeout of 60 seconds expires, an empty result is returned. An empty result, in this context, means that a DecisionTask is returned, but that the value of taskToken is an empty string.

    Deciders should set their client-side socket timeout to at least 70 seconds (10 seconds higher than the timeout). Because the number of workflow history events for a single workflow execution might be very large, the result returned might be split up across a number of pages. To retrieve subsequent pages, make additional calls to PollForDecisionTask using the nextPageToken returned by the initial call. Note that you do not call GetWorkflowExecutionHistory with this nextPageToken. Instead, call PollForDecisionTask again.

    Access Control

    You can use IAM policies to control this action's access to Amazon SWF resources as follows:

    • Use a Resource element with the domain name to limit the action to only specified domains.
    • Use an Action element to allow or deny permission to call this action.
    • Constrain the taskList.name parameter by using a Condition element with the swf:taskList.name key to allow the action to access only certain task lists.

    If the caller does not have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter will be set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows.

    ", + "RecordActivityTaskHeartbeat": "

    Used by activity workers to report to the service that the ActivityTask represented by the specified taskToken is still making progress. The worker can also (optionally) specify details of the progress, for example percent complete, using the details parameter. This action can also be used by the worker as a mechanism to check if cancellation is being requested for the activity task. If a cancellation is being attempted for the specified task, then the boolean cancelRequested flag returned by the service is set to true.

    This action resets the taskHeartbeatTimeout clock. The taskHeartbeatTimeout is specified in RegisterActivityType.

    This action does not in itself create an event in the workflow execution history. However, if the task times out, the workflow execution history will contain a ActivityTaskTimedOut event that contains the information from the last heartbeat generated by the activity worker.

    The taskStartToCloseTimeout of an activity type is the maximum duration of an activity task, regardless of the number of RecordActivityTaskHeartbeat requests received. The taskStartToCloseTimeout is also specified in RegisterActivityType. This operation is only useful for long-lived activities to report liveliness of the task and to determine if a cancellation is being attempted. If the cancelRequested flag returns true, a cancellation is being attempted. If the worker can cancel the activity, it should respond with RespondActivityTaskCanceled. Otherwise, it should ignore the cancellation request.

    Access Control

    You can use IAM policies to control this action's access to Amazon SWF resources as follows:

    • Use a Resource element with the domain name to limit the action to only specified domains.
    • Use an Action element to allow or deny permission to call this action.
    • You cannot use an IAM policy to constrain this action's parameters.

    If the caller does not have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter will be set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows.

    ", + "RegisterActivityType": "

    Registers a new activity type along with its configuration settings in the specified domain.

    A TypeAlreadyExists fault is returned if the type already exists in the domain. You cannot change any configuration settings of the type after its registration, and it must be registered as a new version.

    Access Control

    You can use IAM policies to control this action's access to Amazon SWF resources as follows:

    • Use a Resource element with the domain name to limit the action to only specified domains.
    • Use an Action element to allow or deny permission to call this action.
    • Constrain the following parameters by using a Condition element with the appropriate keys.
      • defaultTaskList.name: String constraint. The key is swf:defaultTaskList.name.
      • name: String constraint. The key is swf:name.
      • version: String constraint. The key is swf:version.

    If the caller does not have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter will be set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows.

    ", + "RegisterDomain": "

    Registers a new domain.

    Access Control

    You can use IAM policies to control this action's access to Amazon SWF resources as follows:

    • You cannot use an IAM policy to control domain access for this action. The name of the domain being registered is available as the resource of this action.
    • Use an Action element to allow or deny permission to call this action.
    • You cannot use an IAM policy to constrain this action's parameters.

    If the caller does not have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter will be set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows.

    ", + "RegisterWorkflowType": "

    Registers a new workflow type and its configuration settings in the specified domain.

    The retention period for the workflow history is set by the RegisterDomain action.

    If the type already exists, then a TypeAlreadyExists fault is returned. You cannot change the configuration settings of a workflow type once it is registered and it must be registered as a new version.

    Access Control

    You can use IAM policies to control this action's access to Amazon SWF resources as follows:

    • Use a Resource element with the domain name to limit the action to only specified domains.
    • Use an Action element to allow or deny permission to call this action.
    • Constrain the following parameters by using a Condition element with the appropriate keys.
      • defaultTaskList.name: String constraint. The key is swf:defaultTaskList.name.
      • name: String constraint. The key is swf:name.
      • version: String constraint. The key is swf:version.

    If the caller does not have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter will be set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows.

    ", + "RequestCancelWorkflowExecution": "

    Records a WorkflowExecutionCancelRequested event in the currently running workflow execution identified by the given domain, workflowId, and runId. This logically requests the cancellation of the workflow execution as a whole. It is up to the decider to take appropriate actions when it receives an execution history with this event.

    If the runId is not specified, the WorkflowExecutionCancelRequested event is recorded in the history of the current open workflow execution with the specified workflowId in the domain. Because this action allows the workflow to properly clean up and gracefully close, it should be used instead of TerminateWorkflowExecution when possible.

    Access Control

    You can use IAM policies to control this action's access to Amazon SWF resources as follows:

    • Use a Resource element with the domain name to limit the action to only specified domains.
    • Use an Action element to allow or deny permission to call this action.
    • You cannot use an IAM policy to constrain this action's parameters.

    If the caller does not have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter will be set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows.

    ", + "RespondActivityTaskCanceled": "

    Used by workers to tell the service that the ActivityTask identified by the taskToken was successfully canceled. Additional details can be optionally provided using the details argument.

    These details (if provided) appear in the ActivityTaskCanceled event added to the workflow history.

    Only use this operation if the canceled flag of a RecordActivityTaskHeartbeat request returns true and if the activity can be safely undone or abandoned.

    A task is considered open from the time that it is scheduled until it is closed. Therefore a task is reported as open while a worker is processing it. A task is closed after it has been specified in a call to RespondActivityTaskCompleted, RespondActivityTaskCanceled, RespondActivityTaskFailed, or the task has timed out.

    Access Control

    You can use IAM policies to control this action's access to Amazon SWF resources as follows:

    • Use a Resource element with the domain name to limit the action to only specified domains.
    • Use an Action element to allow or deny permission to call this action.
    • You cannot use an IAM policy to constrain this action's parameters.

    If the caller does not have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter will be set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows.

    ", + "RespondActivityTaskCompleted": "

    Used by workers to tell the service that the ActivityTask identified by the taskToken completed successfully with a result (if provided). The result appears in the ActivityTaskCompleted event in the workflow history.

    If the requested task does not complete successfully, use RespondActivityTaskFailed instead. If the worker finds that the task is canceled through the canceled flag returned by RecordActivityTaskHeartbeat, it should cancel the task, clean up and then call RespondActivityTaskCanceled.

    A task is considered open from the time that it is scheduled until it is closed. Therefore a task is reported as open while a worker is processing it. A task is closed after it has been specified in a call to RespondActivityTaskCompleted, RespondActivityTaskCanceled, RespondActivityTaskFailed, or the task has timed out.

    Access Control

    You can use IAM policies to control this action's access to Amazon SWF resources as follows:

    • Use a Resource element with the domain name to limit the action to only specified domains.
    • Use an Action element to allow or deny permission to call this action.
    • You cannot use an IAM policy to constrain this action's parameters.

    If the caller does not have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter will be set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows.

    ", + "RespondActivityTaskFailed": "

    Used by workers to tell the service that the ActivityTask identified by the taskToken has failed with reason (if specified). The reason and details appear in the ActivityTaskFailed event added to the workflow history.

    A task is considered open from the time that it is scheduled until it is closed. Therefore a task is reported as open while a worker is processing it. A task is closed after it has been specified in a call to RespondActivityTaskCompleted, RespondActivityTaskCanceled, RespondActivityTaskFailed, or the task has timed out.

    Access Control

    You can use IAM policies to control this action's access to Amazon SWF resources as follows:

    • Use a Resource element with the domain name to limit the action to only specified domains.
    • Use an Action element to allow or deny permission to call this action.
    • You cannot use an IAM policy to constrain this action's parameters.

    If the caller does not have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter will be set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows.

    ", + "RespondDecisionTaskCompleted": "

    Used by deciders to tell the service that the DecisionTask identified by the taskToken has successfully completed. The decisions argument specifies the list of decisions made while processing the task.

    A DecisionTaskCompleted event is added to the workflow history. The executionContext specified is attached to the event in the workflow execution history.

    Access Control

    If an IAM policy grants permission to use RespondDecisionTaskCompleted, it can express permissions for the list of decisions in the decisions parameter. Each of the decisions has one or more parameters, much like a regular API call. To allow for policies to be as readable as possible, you can express permissions on decisions as if they were actual API calls, including applying conditions to some parameters. For more information, see Using IAM to Manage Access to Amazon SWF Workflows.

    ", + "SignalWorkflowExecution": "

    Records a WorkflowExecutionSignaled event in the workflow execution history and creates a decision task for the workflow execution identified by the given domain, workflowId and runId. The event is recorded with the specified user defined signalName and input (if provided).

    If a runId is not specified, then the WorkflowExecutionSignaled event is recorded in the history of the current open workflow with the matching workflowId in the domain. If the specified workflow execution is not open, this method fails with UnknownResource.

    Access Control

    You can use IAM policies to control this action's access to Amazon SWF resources as follows:

    • Use a Resource element with the domain name to limit the action to only specified domains.
    • Use an Action element to allow or deny permission to call this action.
    • You cannot use an IAM policy to constrain this action's parameters.

    If the caller does not have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter will be set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows.

    ", + "StartWorkflowExecution": "

    Starts an execution of the workflow type in the specified domain using the provided workflowId and input data.

    This action returns the newly started workflow execution.

    Access Control

    You can use IAM policies to control this action's access to Amazon SWF resources as follows:

    • Use a Resource element with the domain name to limit the action to only specified domains.
    • Use an Action element to allow or deny permission to call this action.
    • Constrain the following parameters by using a Condition element with the appropriate keys.
      • tagList.member.0: The key is swf:tagList.member.0.
      • tagList.member.1: The key is swf:tagList.member.1.
      • tagList.member.2: The key is swf:tagList.member.2.
      • tagList.member.3: The key is swf:tagList.member.3.
      • tagList.member.4: The key is swf:tagList.member.4.
      • taskList: String constraint. The key is swf:taskList.name.
      • workflowType.name: String constraint. The key is swf:workflowType.name.
      • workflowType.version: String constraint. The key is swf:workflowType.version.

    If the caller does not have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter will be set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows.

    ", + "TerminateWorkflowExecution": "

    Records a WorkflowExecutionTerminated event and forces closure of the workflow execution identified by the given domain, runId, and workflowId. The child policy, registered with the workflow type or specified when starting this execution, is applied to any open child workflow executions of this workflow execution.

    If the identified workflow execution was in progress, it is terminated immediately. If a runId is not specified, then the WorkflowExecutionTerminated event is recorded in the history of the current open workflow with the matching workflowId in the domain. You should consider using RequestCancelWorkflowExecution action instead because it allows the workflow to gracefully close while TerminateWorkflowExecution does not.

    Access Control

    You can use IAM policies to control this action's access to Amazon SWF resources as follows:

    • Use a Resource element with the domain name to limit the action to only specified domains.
    • Use an Action element to allow or deny permission to call this action.
    • You cannot use an IAM policy to constrain this action's parameters.

    If the caller does not have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter will be set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows.

    " + }, + "service": "Amazon Simple Workflow Service

    The Amazon Simple Workflow Service (Amazon SWF) makes it easy to build applications that use Amazon's cloud to coordinate work across distributed components. In Amazon SWF, a task represents a logical unit of work that is performed by a component of your workflow. Coordinating tasks in a workflow involves managing intertask dependencies, scheduling, and concurrency in accordance with the logical flow of the application.

    Amazon SWF gives you full control over implementing tasks and coordinating them without worrying about underlying complexities such as tracking their progress and maintaining their state.

    This documentation serves as reference only. For a broader overview of the Amazon SWF programming model, see the Amazon SWF Developer Guide.

    ", + "shapes": { + "ActivityId": { + "base": null, + "refs": { + "ActivityTask$activityId": "

    The unique ID of the task.

    ", + "ActivityTaskCancelRequestedEventAttributes$activityId": "

    The unique ID of the task.

    ", + "ActivityTaskScheduledEventAttributes$activityId": "

    The unique ID of the activity task.

    ", + "RequestCancelActivityTaskDecisionAttributes$activityId": "

    The activityId of the activity task to be canceled.

    ", + "RequestCancelActivityTaskFailedEventAttributes$activityId": "

    The activityId provided in the RequestCancelActivityTask decision that failed.

    ", + "ScheduleActivityTaskDecisionAttributes$activityId": "

    Required. The activityId of the activity task.

    The specified string must not start or end with whitespace. It must not contain a : (colon), / (slash), | (vertical bar), or any control characters (\\u0000-\\u001f | \\u007f - \\u009f). Also, it must not contain the literal string quotarnquot.

    ", + "ScheduleActivityTaskFailedEventAttributes$activityId": "

    The activityId provided in the ScheduleActivityTask decision that failed.

    " + } + }, + "ActivityTask": { + "base": "

    Unit of work sent to an activity worker.

    ", + "refs": { + } + }, + "ActivityTaskCancelRequestedEventAttributes": { + "base": "

    Provides details of the ActivityTaskCancelRequested event.

    ", + "refs": { + "HistoryEvent$activityTaskCancelRequestedEventAttributes": "

    If the event is of type ActivityTaskcancelRequested then this member is set and provides detailed information about the event. It is not set for other event types.

    " + } + }, + "ActivityTaskCanceledEventAttributes": { + "base": "

    Provides details of the ActivityTaskCanceled event.

    ", + "refs": { + "HistoryEvent$activityTaskCanceledEventAttributes": "

    If the event is of type ActivityTaskCanceled then this member is set and provides detailed information about the event. It is not set for other event types.

    " + } + }, + "ActivityTaskCompletedEventAttributes": { + "base": "

    Provides details of the ActivityTaskCompleted event.

    ", + "refs": { + "HistoryEvent$activityTaskCompletedEventAttributes": "

    If the event is of type ActivityTaskCompleted then this member is set and provides detailed information about the event. It is not set for other event types.

    " + } + }, + "ActivityTaskFailedEventAttributes": { + "base": "

    Provides details of the ActivityTaskFailed event.

    ", + "refs": { + "HistoryEvent$activityTaskFailedEventAttributes": "

    If the event is of type ActivityTaskFailed then this member is set and provides detailed information about the event. It is not set for other event types.

    " + } + }, + "ActivityTaskScheduledEventAttributes": { + "base": "

    Provides details of the ActivityTaskScheduled event.

    ", + "refs": { + "HistoryEvent$activityTaskScheduledEventAttributes": "

    If the event is of type ActivityTaskScheduled then this member is set and provides detailed information about the event. It is not set for other event types.

    " + } + }, + "ActivityTaskStartedEventAttributes": { + "base": "

    Provides details of the ActivityTaskStarted event.

    ", + "refs": { + "HistoryEvent$activityTaskStartedEventAttributes": "

    If the event is of type ActivityTaskStarted then this member is set and provides detailed information about the event. It is not set for other event types.

    " + } + }, + "ActivityTaskStatus": { + "base": "

    Status information about an activity task.

    ", + "refs": { + } + }, + "ActivityTaskTimedOutEventAttributes": { + "base": "

    Provides details of the ActivityTaskTimedOut event.

    ", + "refs": { + "HistoryEvent$activityTaskTimedOutEventAttributes": "

    If the event is of type ActivityTaskTimedOut then this member is set and provides detailed information about the event. It is not set for other event types.

    " + } + }, + "ActivityTaskTimeoutType": { + "base": null, + "refs": { + "ActivityTaskTimedOutEventAttributes$timeoutType": "

    The type of the timeout that caused this event.

    " + } + }, + "ActivityType": { + "base": "

    Represents an activity type.

    ", + "refs": { + "ActivityTask$activityType": "

    The type of this activity task.

    ", + "ActivityTaskScheduledEventAttributes$activityType": "

    The type of the activity task.

    ", + "ActivityTypeInfo$activityType": "

    The ActivityType type structure representing the activity type.

    ", + "DeprecateActivityTypeInput$activityType": "

    The activity type to deprecate.

    ", + "DescribeActivityTypeInput$activityType": "

    The activity type to get information about. Activity types are identified by the name and version that were supplied when the activity was registered.

    ", + "ScheduleActivityTaskDecisionAttributes$activityType": "

    Required. The type of the activity task to schedule.

    ", + "ScheduleActivityTaskFailedEventAttributes$activityType": "

    The activity type provided in the ScheduleActivityTask decision that failed.

    " + } + }, + "ActivityTypeConfiguration": { + "base": "

    Configuration settings registered with the activity type.

    ", + "refs": { + "ActivityTypeDetail$configuration": "

    The configuration settings registered with the activity type.

    " + } + }, + "ActivityTypeDetail": { + "base": "

    Detailed information about an activity type.

    ", + "refs": { + } + }, + "ActivityTypeInfo": { + "base": "

    Detailed information about an activity type.

    ", + "refs": { + "ActivityTypeDetail$typeInfo": "

    General information about the activity type.

    The status of activity type (returned in the ActivityTypeInfo structure) can be one of the following.

    • REGISTERED: The type is registered and available. Workers supporting this type should be running.
    • DEPRECATED: The type was deprecated using DeprecateActivityType, but is still in use. You should keep workers supporting this type running. You cannot create new tasks of this type.
    ", + "ActivityTypeInfoList$member": null + } + }, + "ActivityTypeInfoList": { + "base": null, + "refs": { + "ActivityTypeInfos$typeInfos": "

    List of activity type information.

    " + } + }, + "ActivityTypeInfos": { + "base": "

    Contains a paginated list of activity type information structures.

    ", + "refs": { + } + }, + "Arn": { + "base": null, + "refs": { + "ContinueAsNewWorkflowExecutionDecisionAttributes$lambdaRole": "

    The ARN of an IAM role that authorizes Amazon SWF to invoke AWS Lambda functions.

    In order for this workflow execution to invoke AWS Lambda functions, an appropriate IAM role must be specified either as a default for the workflow type or through this field.", + "RegisterWorkflowTypeInput$defaultLambdaRole": "

    The ARN of the default IAM role to use when a workflow execution of this type invokes AWS Lambda functions.

    This default can be overridden when starting a workflow execution using the StartWorkflowExecution action or the StartChildWorkflowExecution and ContinueAsNewWorkflowExecution decision.

    ", + "StartChildWorkflowExecutionDecisionAttributes$lambdaRole": "

    The ARN of an IAM role that authorizes Amazon SWF to invoke AWS Lambda functions.

    In order for this workflow execution to invoke AWS Lambda functions, an appropriate IAM role must be specified either as a default for the workflow type or through this field.", + "StartChildWorkflowExecutionInitiatedEventAttributes$lambdaRole": "

    The IAM role attached to this workflow execution to use when invoking AWS Lambda functions.

    ", + "StartWorkflowExecutionInput$lambdaRole": "

    The ARN of an IAM role that authorizes Amazon SWF to invoke AWS Lambda functions.

    In order for this workflow execution to invoke AWS Lambda functions, an appropriate IAM role must be specified either as a default for the workflow type or through this field.", + "WorkflowExecutionConfiguration$lambdaRole": "

    The IAM role used by this workflow execution when invoking AWS Lambda functions.

    ", + "WorkflowExecutionContinuedAsNewEventAttributes$lambdaRole": "

    The IAM role attached to this workflow execution to use when invoking AWS Lambda functions.

    ", + "WorkflowExecutionStartedEventAttributes$lambdaRole": "

    The IAM role attached to this workflow execution to use when invoking AWS Lambda functions.

    ", + "WorkflowTypeConfiguration$defaultLambdaRole": "

    The default IAM role to use when a workflow execution invokes a AWS Lambda function.

    " + } + }, + "CancelTimerDecisionAttributes": { + "base": "

    Provides details of the CancelTimer decision.

    Access Control

    You can use IAM policies to control this decision's access to Amazon SWF resources as follows:

    • Use a Resource element with the domain name to limit the action to only specified domains.
    • Use an Action element to allow or deny permission to call this action.
    • You cannot use an IAM policy to constrain this action's parameters.

    If the caller does not have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter will be set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows.

    ", + "refs": { + "Decision$cancelTimerDecisionAttributes": "

    Provides details of the CancelTimer decision. It is not set for other decision types.

    " + } + }, + "CancelTimerFailedCause": { + "base": null, + "refs": { + "CancelTimerFailedEventAttributes$cause": "

    The cause of the failure. This information is generated by the system and can be useful for diagnostic purposes.

    If cause is set to OPERATION_NOT_PERMITTED, the decision failed because it lacked sufficient permissions. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows." + } + }, + "CancelTimerFailedEventAttributes": { + "base": "

    Provides details of the CancelTimerFailed event.

    ", + "refs": { + "HistoryEvent$cancelTimerFailedEventAttributes": "

    If the event is of type CancelTimerFailed then this member is set and provides detailed information about the event. It is not set for other event types.

    " + } + }, + "CancelWorkflowExecutionDecisionAttributes": { + "base": "

    Provides details of the CancelWorkflowExecution decision.

    Access Control

    You can use IAM policies to control this decision's access to Amazon SWF resources as follows:

    • Use a Resource element with the domain name to limit the action to only specified domains.
    • Use an Action element to allow or deny permission to call this action.
    • You cannot use an IAM policy to constrain this action's parameters.

    If the caller does not have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter will be set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows.

    ", + "refs": { + "Decision$cancelWorkflowExecutionDecisionAttributes": "

    Provides details of the CancelWorkflowExecution decision. It is not set for other decision types.

    " + } + }, + "CancelWorkflowExecutionFailedCause": { + "base": null, + "refs": { + "CancelWorkflowExecutionFailedEventAttributes$cause": "

    The cause of the failure. This information is generated by the system and can be useful for diagnostic purposes.

    If cause is set to OPERATION_NOT_PERMITTED, the decision failed because it lacked sufficient permissions. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows." + } + }, + "CancelWorkflowExecutionFailedEventAttributes": { + "base": "

    Provides details of the CancelWorkflowExecutionFailed event.

    ", + "refs": { + "HistoryEvent$cancelWorkflowExecutionFailedEventAttributes": "

    If the event is of type CancelWorkflowExecutionFailed then this member is set and provides detailed information about the event. It is not set for other event types.

    " + } + }, + "Canceled": { + "base": null, + "refs": { + "ActivityTaskStatus$cancelRequested": "

    Set to true if cancellation of the task is requested.

    ", + "WorkflowExecutionInfo$cancelRequested": "

    Set to true if a cancellation is requested for this workflow execution.

    " + } + }, + "CauseMessage": { + "base": null, + "refs": { + "StartLambdaFunctionFailedEventAttributes$message": "

    The error message (if any).

    " + } + }, + "ChildPolicy": { + "base": null, + "refs": { + "ContinueAsNewWorkflowExecutionDecisionAttributes$childPolicy": "

    If set, specifies the policy to use for the child workflow executions of the new execution if it is terminated by calling the TerminateWorkflowExecution action explicitly or due to an expired timeout. This policy overrides the default child policy specified when registering the workflow type using RegisterWorkflowType.

    The supported child policies are:

    • TERMINATE: the child executions will be terminated.
    • REQUEST_CANCEL: a request to cancel will be attempted for each child execution by recording a WorkflowExecutionCancelRequested event in its history. It is up to the decider to take appropriate actions when it receives an execution history with this event.
    • ABANDON: no action will be taken. The child executions will continue to run.
    A child policy for this workflow execution must be specified either as a default for the workflow type or through this parameter. If neither this parameter is set nor a default child policy was specified at registration time then a fault will be returned.", + "RegisterWorkflowTypeInput$defaultChildPolicy": "

    If set, specifies the default policy to use for the child workflow executions when a workflow execution of this type is terminated, by calling the TerminateWorkflowExecution action explicitly or due to an expired timeout. This default can be overridden when starting a workflow execution using the StartWorkflowExecution action or the StartChildWorkflowExecution decision.

    The supported child policies are:

    • TERMINATE: the child executions will be terminated.
    • REQUEST_CANCEL: a request to cancel will be attempted for each child execution by recording a WorkflowExecutionCancelRequested event in its history. It is up to the decider to take appropriate actions when it receives an execution history with this event.
    • ABANDON: no action will be taken. The child executions will continue to run.
    ", + "StartChildWorkflowExecutionDecisionAttributes$childPolicy": "

    Optional. If set, specifies the policy to use for the child workflow executions if the workflow execution being started is terminated by calling the TerminateWorkflowExecution action explicitly or due to an expired timeout. This policy overrides the default child policy specified when registering the workflow type using RegisterWorkflowType.

    The supported child policies are:

    • TERMINATE: the child executions will be terminated.
    • REQUEST_CANCEL: a request to cancel will be attempted for each child execution by recording a WorkflowExecutionCancelRequested event in its history. It is up to the decider to take appropriate actions when it receives an execution history with this event.
    • ABANDON: no action will be taken. The child executions will continue to run.
    A child policy for this workflow execution must be specified either as a default for the workflow type or through this parameter. If neither this parameter is set nor a default child policy was specified at registration time then a fault will be returned.", + "StartChildWorkflowExecutionInitiatedEventAttributes$childPolicy": "

    The policy to use for the child workflow executions if this execution gets terminated by explicitly calling the TerminateWorkflowExecution action or due to an expired timeout.

    The supported child policies are:

    • TERMINATE: the child executions will be terminated.
    • REQUEST_CANCEL: a request to cancel will be attempted for each child execution by recording a WorkflowExecutionCancelRequested event in its history. It is up to the decider to take appropriate actions when it receives an execution history with this event.
    • ABANDON: no action will be taken. The child executions will continue to run.
    ", + "StartWorkflowExecutionInput$childPolicy": "

    If set, specifies the policy to use for the child workflow executions of this workflow execution if it is terminated, by calling the TerminateWorkflowExecution action explicitly or due to an expired timeout. This policy overrides the default child policy specified when registering the workflow type using RegisterWorkflowType.

    The supported child policies are:

    • TERMINATE: the child executions will be terminated.
    • REQUEST_CANCEL: a request to cancel will be attempted for each child execution by recording a WorkflowExecutionCancelRequested event in its history. It is up to the decider to take appropriate actions when it receives an execution history with this event.
    • ABANDON: no action will be taken. The child executions will continue to run.
    A child policy for this workflow execution must be specified either as a default for the workflow type or through this parameter. If neither this parameter is set nor a default child policy was specified at registration time then a fault will be returned.", + "TerminateWorkflowExecutionInput$childPolicy": "

    If set, specifies the policy to use for the child workflow executions of the workflow execution being terminated. This policy overrides the child policy specified for the workflow execution at registration time or when starting the execution.

    The supported child policies are:

    • TERMINATE: the child executions will be terminated.
    • REQUEST_CANCEL: a request to cancel will be attempted for each child execution by recording a WorkflowExecutionCancelRequested event in its history. It is up to the decider to take appropriate actions when it receives an execution history with this event.
    • ABANDON: no action will be taken. The child executions will continue to run.
    A child policy for this workflow execution must be specified either as a default for the workflow type or through this parameter. If neither this parameter is set nor a default child policy was specified at registration time then a fault will be returned.", + "WorkflowExecutionConfiguration$childPolicy": "

    The policy to use for the child workflow executions if this workflow execution is terminated, by calling the TerminateWorkflowExecution action explicitly or due to an expired timeout.

    The supported child policies are:

    • TERMINATE: the child executions will be terminated.
    • REQUEST_CANCEL: a request to cancel will be attempted for each child execution by recording a WorkflowExecutionCancelRequested event in its history. It is up to the decider to take appropriate actions when it receives an execution history with this event.
    • ABANDON: no action will be taken. The child executions will continue to run.
    ", + "WorkflowExecutionContinuedAsNewEventAttributes$childPolicy": "

    The policy to use for the child workflow executions of the new execution if it is terminated by calling the TerminateWorkflowExecution action explicitly or due to an expired timeout.

    The supported child policies are:

    • TERMINATE: the child executions will be terminated.
    • REQUEST_CANCEL: a request to cancel will be attempted for each child execution by recording a WorkflowExecutionCancelRequested event in its history. It is up to the decider to take appropriate actions when it receives an execution history with this event.
    • ABANDON: no action will be taken. The child executions will continue to run.
    ", + "WorkflowExecutionStartedEventAttributes$childPolicy": "

    The policy to use for the child workflow executions if this workflow execution is terminated, by calling the TerminateWorkflowExecution action explicitly or due to an expired timeout.

    The supported child policies are:

    • TERMINATE: the child executions will be terminated.
    • REQUEST_CANCEL: a request to cancel will be attempted for each child execution by recording a WorkflowExecutionCancelRequested event in its history. It is up to the decider to take appropriate actions when it receives an execution history with this event.
    • ABANDON: no action will be taken. The child executions will continue to run.
    ", + "WorkflowExecutionTerminatedEventAttributes$childPolicy": "

    The policy used for the child workflow executions of this workflow execution.

    The supported child policies are:

    • TERMINATE: the child executions will be terminated.
    • REQUEST_CANCEL: a request to cancel will be attempted for each child execution by recording a WorkflowExecutionCancelRequested event in its history. It is up to the decider to take appropriate actions when it receives an execution history with this event.
    • ABANDON: no action will be taken. The child executions will continue to run.
    ", + "WorkflowExecutionTimedOutEventAttributes$childPolicy": "

    The policy used for the child workflow executions of this workflow execution.

    The supported child policies are:

    • TERMINATE: the child executions will be terminated.
    • REQUEST_CANCEL: a request to cancel will be attempted for each child execution by recording a WorkflowExecutionCancelRequested event in its history. It is up to the decider to take appropriate actions when it receives an execution history with this event.
    • ABANDON: no action will be taken. The child executions will continue to run.
    ", + "WorkflowTypeConfiguration$defaultChildPolicy": "

    Optional. The default policy to use for the child workflow executions when a workflow execution of this type is terminated, by calling the TerminateWorkflowExecution action explicitly or due to an expired timeout. This default can be overridden when starting a workflow execution using the StartWorkflowExecution action or the StartChildWorkflowExecution decision.

    The supported child policies are:

    • TERMINATE: the child executions will be terminated.
    • REQUEST_CANCEL: a request to cancel will be attempted for each child execution by recording a WorkflowExecutionCancelRequested event in its history. It is up to the decider to take appropriate actions when it receives an execution history with this event.
    • ABANDON: no action will be taken. The child executions will continue to run.
    " + } + }, + "ChildWorkflowExecutionCanceledEventAttributes": { + "base": "

    Provide details of the ChildWorkflowExecutionCanceled event.

    ", + "refs": { + "HistoryEvent$childWorkflowExecutionCanceledEventAttributes": "

    If the event is of type ChildWorkflowExecutionCanceled then this member is set and provides detailed information about the event. It is not set for other event types.

    " + } + }, + "ChildWorkflowExecutionCompletedEventAttributes": { + "base": "

    Provides details of the ChildWorkflowExecutionCompleted event.

    ", + "refs": { + "HistoryEvent$childWorkflowExecutionCompletedEventAttributes": "

    If the event is of type ChildWorkflowExecutionCompleted then this member is set and provides detailed information about the event. It is not set for other event types.

    " + } + }, + "ChildWorkflowExecutionFailedEventAttributes": { + "base": "

    Provides details of the ChildWorkflowExecutionFailed event.

    ", + "refs": { + "HistoryEvent$childWorkflowExecutionFailedEventAttributes": "

    If the event is of type ChildWorkflowExecutionFailed then this member is set and provides detailed information about the event. It is not set for other event types.

    " + } + }, + "ChildWorkflowExecutionStartedEventAttributes": { + "base": "

    Provides details of the ChildWorkflowExecutionStarted event.

    ", + "refs": { + "HistoryEvent$childWorkflowExecutionStartedEventAttributes": "

    If the event is of type ChildWorkflowExecutionStarted then this member is set and provides detailed information about the event. It is not set for other event types.

    " + } + }, + "ChildWorkflowExecutionTerminatedEventAttributes": { + "base": "

    Provides details of the ChildWorkflowExecutionTerminated event.

    ", + "refs": { + "HistoryEvent$childWorkflowExecutionTerminatedEventAttributes": "

    If the event is of type ChildWorkflowExecutionTerminated then this member is set and provides detailed information about the event. It is not set for other event types.

    " + } + }, + "ChildWorkflowExecutionTimedOutEventAttributes": { + "base": "

    Provides details of the ChildWorkflowExecutionTimedOut event.

    ", + "refs": { + "HistoryEvent$childWorkflowExecutionTimedOutEventAttributes": "

    If the event is of type ChildWorkflowExecutionTimedOut then this member is set and provides detailed information about the event. It is not set for other event types.

    " + } + }, + "CloseStatus": { + "base": null, + "refs": { + "CloseStatusFilter$status": "

    Required. The close status that must match the close status of an execution for it to meet the criteria of this filter.

    ", + "WorkflowExecutionInfo$closeStatus": "

    If the execution status is closed then this specifies how the execution was closed:

    • COMPLETED: the execution was successfully completed.
    • CANCELED: the execution was canceled.Cancellation allows the implementation to gracefully clean up before the execution is closed.
    • TERMINATED: the execution was force terminated.
    • FAILED: the execution failed to complete.
    • TIMED_OUT: the execution did not complete in the alloted time and was automatically timed out.
    • CONTINUED_AS_NEW: the execution is logically continued. This means the current execution was completed and a new execution was started to carry on the workflow.
    " + } + }, + "CloseStatusFilter": { + "base": "

    Used to filter the closed workflow executions in visibility APIs by their close status.

    ", + "refs": { + "CountClosedWorkflowExecutionsInput$closeStatusFilter": "

    If specified, only workflow executions that match this close status are counted. This filter has an affect only if executionStatus is specified as CLOSED.

    closeStatusFilter, executionFilter, typeFilter and tagFilter are mutually exclusive. You can specify at most one of these in a request.", + "ListClosedWorkflowExecutionsInput$closeStatusFilter": "

    If specified, only workflow executions that match this close status are listed. For example, if TERMINATED is specified, then only TERMINATED workflow executions are listed.

    closeStatusFilter, executionFilter, typeFilter and tagFilter are mutually exclusive. You can specify at most one of these in a request." + } + }, + "CompleteWorkflowExecutionDecisionAttributes": { + "base": "

    Provides details of the CompleteWorkflowExecution decision.

    Access Control

    You can use IAM policies to control this decision's access to Amazon SWF resources as follows:

    • Use a Resource element with the domain name to limit the action to only specified domains.
    • Use an Action element to allow or deny permission to call this action.
    • You cannot use an IAM policy to constrain this action's parameters.

    If the caller does not have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter will be set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows.

    ", + "refs": { + "Decision$completeWorkflowExecutionDecisionAttributes": "

    Provides details of the CompleteWorkflowExecution decision. It is not set for other decision types.

    " + } + }, + "CompleteWorkflowExecutionFailedCause": { + "base": null, + "refs": { + "CompleteWorkflowExecutionFailedEventAttributes$cause": "

    The cause of the failure. This information is generated by the system and can be useful for diagnostic purposes.

    If cause is set to OPERATION_NOT_PERMITTED, the decision failed because it lacked sufficient permissions. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows." + } + }, + "CompleteWorkflowExecutionFailedEventAttributes": { + "base": "

    Provides details of the CompleteWorkflowExecutionFailed event.

    ", + "refs": { + "HistoryEvent$completeWorkflowExecutionFailedEventAttributes": "

    If the event is of type CompleteWorkflowExecutionFailed then this member is set and provides detailed information about the event. It is not set for other event types.

    " + } + }, + "ContinueAsNewWorkflowExecutionDecisionAttributes": { + "base": "

    Provides details of the ContinueAsNewWorkflowExecution decision.

    Access Control

    You can use IAM policies to control this decision's access to Amazon SWF resources as follows:

    • Use a Resource element with the domain name to limit the action to only specified domains.
    • Use an Action element to allow or deny permission to call this action.
    • Constrain the following parameters by using a Condition element with the appropriate keys.
      • tag: Optional.. A tag used to identify the workflow execution
      • taskList: String constraint. The key is swf:taskList.name.
      • workflowType.version: String constraint. The key is swf:workflowType.version.

    If the caller does not have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter will be set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows.

    ", + "refs": { + "Decision$continueAsNewWorkflowExecutionDecisionAttributes": "

    Provides details of the ContinueAsNewWorkflowExecution decision. It is not set for other decision types.

    " + } + }, + "ContinueAsNewWorkflowExecutionFailedCause": { + "base": null, + "refs": { + "ContinueAsNewWorkflowExecutionFailedEventAttributes$cause": "

    The cause of the failure. This information is generated by the system and can be useful for diagnostic purposes.

    If cause is set to OPERATION_NOT_PERMITTED, the decision failed because it lacked sufficient permissions. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows." + } + }, + "ContinueAsNewWorkflowExecutionFailedEventAttributes": { + "base": "

    Provides details of the ContinueAsNewWorkflowExecutionFailed event.

    ", + "refs": { + "HistoryEvent$continueAsNewWorkflowExecutionFailedEventAttributes": "

    If the event is of type ContinueAsNewWorkflowExecutionFailed then this member is set and provides detailed information about the event. It is not set for other event types.

    " + } + }, + "Count": { + "base": null, + "refs": { + "PendingTaskCount$count": "

    The number of tasks in the task list.

    ", + "WorkflowExecutionCount$count": "

    The number of workflow executions.

    ", + "WorkflowExecutionOpenCounts$openActivityTasks": "

    The count of activity tasks whose status is OPEN.

    ", + "WorkflowExecutionOpenCounts$openTimers": "

    The count of timers started by this workflow execution that have not fired yet.

    ", + "WorkflowExecutionOpenCounts$openChildWorkflowExecutions": "

    The count of child workflow executions whose status is OPEN.

    ", + "WorkflowExecutionOpenCounts$openLambdaFunctions": "

    The count of AWS Lambda functions that are currently executing.

    " + } + }, + "CountClosedWorkflowExecutionsInput": { + "base": null, + "refs": { + } + }, + "CountOpenWorkflowExecutionsInput": { + "base": null, + "refs": { + } + }, + "CountPendingActivityTasksInput": { + "base": null, + "refs": { + } + }, + "CountPendingDecisionTasksInput": { + "base": null, + "refs": { + } + }, + "Data": { + "base": null, + "refs": { + "ActivityTask$input": "

    The inputs provided when the activity task was scheduled. The form of the input is user defined and should be meaningful to the activity implementation.

    ", + "ActivityTaskCanceledEventAttributes$details": "

    Details of the cancellation (if any).

    ", + "ActivityTaskCompletedEventAttributes$result": "

    The results of the activity task (if any).

    ", + "ActivityTaskFailedEventAttributes$details": "

    The details of the failure (if any).

    ", + "ActivityTaskScheduledEventAttributes$input": "

    The input provided to the activity task.

    ", + "ActivityTaskScheduledEventAttributes$control": "

    Optional. Data attached to the event that can be used by the decider in subsequent workflow tasks. This data is not sent to the activity.

    ", + "CancelWorkflowExecutionDecisionAttributes$details": "

    Optional. details of the cancellation.

    ", + "ChildWorkflowExecutionCanceledEventAttributes$details": "

    Details of the cancellation (if provided).

    ", + "ChildWorkflowExecutionCompletedEventAttributes$result": "

    The result of the child workflow execution (if any).

    ", + "ChildWorkflowExecutionFailedEventAttributes$details": "

    The details of the failure (if provided).

    ", + "CompleteWorkflowExecutionDecisionAttributes$result": "

    The result of the workflow execution. The form of the result is implementation defined.

    ", + "ContinueAsNewWorkflowExecutionDecisionAttributes$input": "

    The input provided to the new workflow execution.

    ", + "DecisionTaskCompletedEventAttributes$executionContext": "

    User defined context for the workflow execution.

    ", + "FailWorkflowExecutionDecisionAttributes$details": "

    Optional. Details of the failure.

    ", + "LambdaFunctionCompletedEventAttributes$result": "

    The result of the function execution (if any).

    ", + "LambdaFunctionFailedEventAttributes$details": "

    The details of the failure (if any).

    ", + "MarkerRecordedEventAttributes$details": "

    Details of the marker (if any).

    ", + "RecordMarkerDecisionAttributes$details": "

    Optional. details of the marker.

    ", + "RequestCancelExternalWorkflowExecutionDecisionAttributes$control": "

    Optional. Data attached to the event that can be used by the decider in subsequent workflow tasks.

    ", + "RequestCancelExternalWorkflowExecutionFailedEventAttributes$control": null, + "RequestCancelExternalWorkflowExecutionInitiatedEventAttributes$control": "

    Optional. Data attached to the event that can be used by the decider in subsequent workflow tasks.

    ", + "RespondActivityTaskCanceledInput$details": "

    Optional. Information about the cancellation.

    ", + "RespondActivityTaskCompletedInput$result": "

    The result of the activity task. It is a free form string that is implementation specific.

    ", + "RespondActivityTaskFailedInput$details": "

    Optional. Detailed information about the failure.

    ", + "RespondDecisionTaskCompletedInput$executionContext": "

    User defined context to add to workflow execution.

    ", + "ScheduleActivityTaskDecisionAttributes$control": "

    Optional. Data attached to the event that can be used by the decider in subsequent workflow tasks. This data is not sent to the activity.

    ", + "ScheduleActivityTaskDecisionAttributes$input": "

    The input provided to the activity task.

    ", + "SignalExternalWorkflowExecutionDecisionAttributes$input": "

    Optional. Input data to be provided with the signal. The target workflow execution will use the signal name and input data to process the signal.

    ", + "SignalExternalWorkflowExecutionDecisionAttributes$control": "

    Optional. Data attached to the event that can be used by the decider in subsequent decision tasks.

    ", + "SignalExternalWorkflowExecutionFailedEventAttributes$control": null, + "SignalExternalWorkflowExecutionInitiatedEventAttributes$input": "

    Input provided to the signal (if any).

    ", + "SignalExternalWorkflowExecutionInitiatedEventAttributes$control": "

    Optional. data attached to the event that can be used by the decider in subsequent decision tasks.

    ", + "SignalWorkflowExecutionInput$input": "

    Data to attach to the WorkflowExecutionSignaled event in the target workflow execution's history.

    ", + "StartChildWorkflowExecutionDecisionAttributes$control": "

    Optional. Data attached to the event that can be used by the decider in subsequent workflow tasks. This data is not sent to the child workflow execution.

    ", + "StartChildWorkflowExecutionDecisionAttributes$input": "

    The input to be provided to the workflow execution.

    ", + "StartChildWorkflowExecutionFailedEventAttributes$control": null, + "StartChildWorkflowExecutionInitiatedEventAttributes$control": "

    Optional. Data attached to the event that can be used by the decider in subsequent decision tasks. This data is not sent to the activity.

    ", + "StartChildWorkflowExecutionInitiatedEventAttributes$input": "

    The inputs provided to the child workflow execution (if any).

    ", + "StartTimerDecisionAttributes$control": "

    Optional. Data attached to the event that can be used by the decider in subsequent workflow tasks.

    ", + "StartWorkflowExecutionInput$input": "

    The input for the workflow execution. This is a free form string which should be meaningful to the workflow you are starting. This input is made available to the new workflow execution in the WorkflowExecutionStarted history event.

    ", + "TerminateWorkflowExecutionInput$details": "

    Optional. Details for terminating the workflow execution.

    ", + "TimerStartedEventAttributes$control": "

    Optional. Data attached to the event that can be used by the decider in subsequent workflow tasks.

    ", + "WorkflowExecutionCanceledEventAttributes$details": "

    Details for the cancellation (if any).

    ", + "WorkflowExecutionCompletedEventAttributes$result": "

    The result produced by the workflow execution upon successful completion.

    ", + "WorkflowExecutionContinuedAsNewEventAttributes$input": "

    The input provided to the new workflow execution.

    ", + "WorkflowExecutionDetail$latestExecutionContext": "

    The latest executionContext provided by the decider for this workflow execution. A decider can provide an executionContext (a free-form string) when closing a decision task using RespondDecisionTaskCompleted.

    ", + "WorkflowExecutionFailedEventAttributes$details": "

    The details of the failure (if any).

    ", + "WorkflowExecutionSignaledEventAttributes$input": "

    Inputs provided with the signal (if any). The decider can use the signal name and inputs to determine how to process the signal.

    ", + "WorkflowExecutionStartedEventAttributes$input": "

    The input provided to the workflow execution (if any).

    ", + "WorkflowExecutionTerminatedEventAttributes$details": "

    The details provided for the termination (if any).

    " + } + }, + "Decision": { + "base": "

    Specifies a decision made by the decider. A decision can be one of these types:

    • CancelTimer: cancels a previously started timer and records a TimerCanceled event in the history.
    • CancelWorkflowExecution: closes the workflow execution and records a WorkflowExecutionCanceled event in the history.
    • CompleteWorkflowExecution: closes the workflow execution and records a WorkflowExecutionCompleted event in the history .
    • ContinueAsNewWorkflowExecution: closes the workflow execution and starts a new workflow execution of the same type using the same workflow ID and a unique run ID. A WorkflowExecutionContinuedAsNew event is recorded in the history.
    • FailWorkflowExecution: closes the workflow execution and records a WorkflowExecutionFailed event in the history.
    • RecordMarker: records a MarkerRecorded event in the history. Markers can be used for adding custom information in the history for instance to let deciders know that they do not need to look at the history beyond the marker event.
    • RequestCancelActivityTask: attempts to cancel a previously scheduled activity task. If the activity task was scheduled but has not been assigned to a worker, then it will be canceled. If the activity task was already assigned to a worker, then the worker will be informed that cancellation has been requested in the response to RecordActivityTaskHeartbeat.
    • RequestCancelExternalWorkflowExecution: requests that a request be made to cancel the specified external workflow execution and records a RequestCancelExternalWorkflowExecutionInitiated event in the history.
    • ScheduleActivityTask: schedules an activity task.
    • ScheduleLambdaFunction: schedules a AWS Lambda function.
    • SignalExternalWorkflowExecution: requests a signal to be delivered to the specified external workflow execution and records a SignalExternalWorkflowExecutionInitiated event in the history.
    • StartChildWorkflowExecution: requests that a child workflow execution be started and records a StartChildWorkflowExecutionInitiated event in the history. The child workflow execution is a separate workflow execution with its own history.
    • StartTimer: starts a timer for this workflow execution and records a TimerStarted event in the history. This timer will fire after the specified delay and record a TimerFired event.

    Access Control

    If you grant permission to use RespondDecisionTaskCompleted, you can use IAM policies to express permissions for the list of decisions returned by this action as if they were members of the API. Treating decisions as a pseudo API maintains a uniform conceptual model and helps keep policies readable. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows.

    Decision Failure

    Decisions can fail for several reasons

    • The ordering of decisions should follow a logical flow. Some decisions might not make sense in the current context of the workflow execution and will therefore fail.
    • A limit on your account was reached.
    • The decision lacks sufficient permissions.

    One of the following events might be added to the history to indicate an error. The event attribute's cause parameter indicates the cause. If cause is set to OPERATION_NOT_PERMITTED, the decision failed because it lacked sufficient permissions. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows.

    • ScheduleActivityTaskFailed: a ScheduleActivityTask decision failed. This could happen if the activity type specified in the decision is not registered, is in a deprecated state, or the decision is not properly configured.
    • ScheduleLambdaFunctionFailed: a ScheduleLambdaFunctionFailed decision failed. This could happen if the AWS Lambda function specified in the decision does not exist, or the AWS Lambda service's limits are exceeded.
    • RequestCancelActivityTaskFailed: a RequestCancelActivityTask decision failed. This could happen if there is no open activity task with the specified activityId.
    • StartTimerFailed: a StartTimer decision failed. This could happen if there is another open timer with the same timerId.
    • CancelTimerFailed: a CancelTimer decision failed. This could happen if there is no open timer with the specified timerId.
    • StartChildWorkflowExecutionFailed: a StartChildWorkflowExecution decision failed. This could happen if the workflow type specified is not registered, is deprecated, or the decision is not properly configured.
    • SignalExternalWorkflowExecutionFailed: a SignalExternalWorkflowExecution decision failed. This could happen if the workflowID specified in the decision was incorrect.
    • RequestCancelExternalWorkflowExecutionFailed: a RequestCancelExternalWorkflowExecution decision failed. This could happen if the workflowID specified in the decision was incorrect.
    • CancelWorkflowExecutionFailed: a CancelWorkflowExecution decision failed. This could happen if there is an unhandled decision task pending in the workflow execution.
    • CompleteWorkflowExecutionFailed: a CompleteWorkflowExecution decision failed. This could happen if there is an unhandled decision task pending in the workflow execution.
    • ContinueAsNewWorkflowExecutionFailed: a ContinueAsNewWorkflowExecution decision failed. This could happen if there is an unhandled decision task pending in the workflow execution or the ContinueAsNewWorkflowExecution decision was not configured correctly.
    • FailWorkflowExecutionFailed: a FailWorkflowExecution decision failed. This could happen if there is an unhandled decision task pending in the workflow execution.

    The preceding error events might occur due to an error in the decider logic, which might put the workflow execution in an unstable state The cause field in the event structure for the error event indicates the cause of the error.

    A workflow execution may be closed by the decider by returning one of the following decisions when completing a decision task: CompleteWorkflowExecution, FailWorkflowExecution, CancelWorkflowExecution and ContinueAsNewWorkflowExecution. An UnhandledDecision fault will be returned if a workflow closing decision is specified and a signal or activity event had been added to the history while the decision task was being performed by the decider. Unlike the above situations which are logic issues, this fault is always possible because of race conditions in a distributed system. The right action here is to call RespondDecisionTaskCompleted without any decisions. This would result in another decision task with these new events included in the history. The decider should handle the new events and may decide to close the workflow execution.

    How to code a decision

    You code a decision by first setting the decision type field to one of the above decision values, and then set the corresponding attributes field shown below:

    ", + "refs": { + "DecisionList$member": null + } + }, + "DecisionList": { + "base": null, + "refs": { + "RespondDecisionTaskCompletedInput$decisions": "

    The list of decisions (possibly empty) made by the decider while processing this decision task. See the docs for the decision structure for details.

    " + } + }, + "DecisionTask": { + "base": "

    A structure that represents a decision task. Decision tasks are sent to deciders in order for them to make decisions.

    ", + "refs": { + } + }, + "DecisionTaskCompletedEventAttributes": { + "base": "

    Provides details of the DecisionTaskCompleted event.

    ", + "refs": { + "HistoryEvent$decisionTaskCompletedEventAttributes": "

    If the event is of type DecisionTaskCompleted then this member is set and provides detailed information about the event. It is not set for other event types.

    " + } + }, + "DecisionTaskScheduledEventAttributes": { + "base": "

    Provides details about the DecisionTaskScheduled event.

    ", + "refs": { + "HistoryEvent$decisionTaskScheduledEventAttributes": "

    If the event is of type DecisionTaskScheduled then this member is set and provides detailed information about the event. It is not set for other event types.

    " + } + }, + "DecisionTaskStartedEventAttributes": { + "base": "

    Provides details of the DecisionTaskStarted event.

    ", + "refs": { + "HistoryEvent$decisionTaskStartedEventAttributes": "

    If the event is of type DecisionTaskStarted then this member is set and provides detailed information about the event. It is not set for other event types.

    " + } + }, + "DecisionTaskTimedOutEventAttributes": { + "base": "

    Provides details of the DecisionTaskTimedOut event.

    ", + "refs": { + "HistoryEvent$decisionTaskTimedOutEventAttributes": "

    If the event is of type DecisionTaskTimedOut then this member is set and provides detailed information about the event. It is not set for other event types.

    " + } + }, + "DecisionTaskTimeoutType": { + "base": null, + "refs": { + "DecisionTaskTimedOutEventAttributes$timeoutType": "

    The type of timeout that expired before the decision task could be completed.

    " + } + }, + "DecisionType": { + "base": null, + "refs": { + "Decision$decisionType": "

    Specifies the type of the decision.

    " + } + }, + "DefaultUndefinedFault": { + "base": null, + "refs": { + } + }, + "DeprecateActivityTypeInput": { + "base": null, + "refs": { + } + }, + "DeprecateDomainInput": { + "base": null, + "refs": { + } + }, + "DeprecateWorkflowTypeInput": { + "base": null, + "refs": { + } + }, + "DescribeActivityTypeInput": { + "base": null, + "refs": { + } + }, + "DescribeDomainInput": { + "base": null, + "refs": { + } + }, + "DescribeWorkflowExecutionInput": { + "base": null, + "refs": { + } + }, + "DescribeWorkflowTypeInput": { + "base": null, + "refs": { + } + }, + "Description": { + "base": null, + "refs": { + "ActivityTypeInfo$description": "

    The description of the activity type provided in RegisterActivityType.

    ", + "DomainInfo$description": "

    The description of the domain provided through RegisterDomain.

    ", + "RegisterActivityTypeInput$description": "

    A textual description of the activity type.

    ", + "RegisterDomainInput$description": "

    A text description of the domain.

    ", + "RegisterWorkflowTypeInput$description": "

    Textual description of the workflow type.

    ", + "WorkflowTypeInfo$description": "

    The description of the type registered through RegisterWorkflowType.

    " + } + }, + "DomainAlreadyExistsFault": { + "base": "

    Returned if the specified domain already exists. You will get this fault even if the existing domain is in deprecated status.

    ", + "refs": { + } + }, + "DomainConfiguration": { + "base": "

    Contains the configuration settings of a domain.

    ", + "refs": { + "DomainDetail$configuration": null + } + }, + "DomainDeprecatedFault": { + "base": "

    Returned when the specified domain has been deprecated.

    ", + "refs": { + } + }, + "DomainDetail": { + "base": "

    Contains details of a domain.

    ", + "refs": { + } + }, + "DomainInfo": { + "base": "

    Contains general information about a domain.

    ", + "refs": { + "DomainDetail$domainInfo": null, + "DomainInfoList$member": null + } + }, + "DomainInfoList": { + "base": null, + "refs": { + "DomainInfos$domainInfos": "

    A list of DomainInfo structures.

    " + } + }, + "DomainInfos": { + "base": "

    Contains a paginated collection of DomainInfo structures.

    ", + "refs": { + } + }, + "DomainName": { + "base": null, + "refs": { + "CountClosedWorkflowExecutionsInput$domain": "

    The name of the domain containing the workflow executions to count.

    ", + "CountOpenWorkflowExecutionsInput$domain": "

    The name of the domain containing the workflow executions to count.

    ", + "CountPendingActivityTasksInput$domain": "

    The name of the domain that contains the task list.

    ", + "CountPendingDecisionTasksInput$domain": "

    The name of the domain that contains the task list.

    ", + "DeprecateActivityTypeInput$domain": "

    The name of the domain in which the activity type is registered.

    ", + "DeprecateDomainInput$name": "

    The name of the domain to deprecate.

    ", + "DeprecateWorkflowTypeInput$domain": "

    The name of the domain in which the workflow type is registered.

    ", + "DescribeActivityTypeInput$domain": "

    The name of the domain in which the activity type is registered.

    ", + "DescribeDomainInput$name": "

    The name of the domain to describe.

    ", + "DescribeWorkflowExecutionInput$domain": "

    The name of the domain containing the workflow execution.

    ", + "DescribeWorkflowTypeInput$domain": "

    The name of the domain in which this workflow type is registered.

    ", + "DomainInfo$name": "

    The name of the domain. This name is unique within the account.

    ", + "GetWorkflowExecutionHistoryInput$domain": "

    The name of the domain containing the workflow execution.

    ", + "ListActivityTypesInput$domain": "

    The name of the domain in which the activity types have been registered.

    ", + "ListClosedWorkflowExecutionsInput$domain": "

    The name of the domain that contains the workflow executions to list.

    ", + "ListOpenWorkflowExecutionsInput$domain": "

    The name of the domain that contains the workflow executions to list.

    ", + "ListWorkflowTypesInput$domain": "

    The name of the domain in which the workflow types have been registered.

    ", + "PollForActivityTaskInput$domain": "

    The name of the domain that contains the task lists being polled.

    ", + "PollForDecisionTaskInput$domain": "

    The name of the domain containing the task lists to poll.

    ", + "RegisterActivityTypeInput$domain": "

    The name of the domain in which this activity is to be registered.

    ", + "RegisterDomainInput$name": "

    Name of the domain to register. The name must be unique in the region that the domain is registered in.

    The specified string must not start or end with whitespace. It must not contain a : (colon), / (slash), | (vertical bar), or any control characters (\\u0000-\\u001f | \\u007f - \\u009f). Also, it must not contain the literal string quotarnquot.

    ", + "RegisterWorkflowTypeInput$domain": "

    The name of the domain in which to register the workflow type.

    ", + "RequestCancelWorkflowExecutionInput$domain": "

    The name of the domain containing the workflow execution to cancel.

    ", + "SignalWorkflowExecutionInput$domain": "

    The name of the domain containing the workflow execution to signal.

    ", + "StartWorkflowExecutionInput$domain": "

    The name of the domain in which the workflow execution is created.

    ", + "TerminateWorkflowExecutionInput$domain": "

    The domain of the workflow execution to terminate.

    " + } + }, + "DurationInDays": { + "base": null, + "refs": { + "DomainConfiguration$workflowExecutionRetentionPeriodInDays": "

    The retention period for workflow executions in this domain.

    ", + "RegisterDomainInput$workflowExecutionRetentionPeriodInDays": "

    The duration (in days) that records and histories of workflow executions on the domain should be kept by the service. After the retention period, the workflow execution is not available in the results of visibility calls.

    If you pass the value NONE or 0 (zero), then the workflow execution history will not be retained. As soon as the workflow execution completes, the execution record and its history are deleted.

    The maximum workflow execution retention period is 90 days. For more information about Amazon SWF service limits, see: Amazon SWF Service Limits in the Amazon SWF Developer Guide.

    " + } + }, + "DurationInSeconds": { + "base": null, + "refs": { + "StartTimerDecisionAttributes$startToFireTimeout": "

    Required. The duration to wait before firing the timer.

    The duration is specified in seconds; an integer greater than or equal to 0.

    ", + "TimerStartedEventAttributes$startToFireTimeout": "

    The duration of time after which the timer will fire.

    The duration is specified in seconds; an integer greater than or equal to 0.

    ", + "WorkflowExecutionConfiguration$taskStartToCloseTimeout": "

    The maximum duration allowed for decision tasks for this workflow execution.

    The duration is specified in seconds; an integer greater than or equal to 0. The value \"NONE\" can be used to specify unlimited duration.

    ", + "WorkflowExecutionConfiguration$executionStartToCloseTimeout": "

    The total duration for this workflow execution.

    The duration is specified in seconds; an integer greater than or equal to 0. The value \"NONE\" can be used to specify unlimited duration.

    " + } + }, + "DurationInSecondsOptional": { + "base": null, + "refs": { + "ActivityTaskScheduledEventAttributes$scheduleToStartTimeout": "

    The maximum amount of time the activity task can wait to be assigned to a worker.

    ", + "ActivityTaskScheduledEventAttributes$scheduleToCloseTimeout": "

    The maximum amount of time for this activity task.

    ", + "ActivityTaskScheduledEventAttributes$startToCloseTimeout": "

    The maximum amount of time a worker may take to process the activity task.

    ", + "ActivityTaskScheduledEventAttributes$heartbeatTimeout": "

    The maximum time before which the worker processing this task must report progress by calling RecordActivityTaskHeartbeat. If the timeout is exceeded, the activity task is automatically timed out. If the worker subsequently attempts to record a heartbeat or return a result, it will be ignored.

    ", + "ActivityTypeConfiguration$defaultTaskStartToCloseTimeout": "

    Optional. The default maximum duration for tasks of an activity type specified when registering the activity type. You can override this default when scheduling a task through the ScheduleActivityTask decision.

    The duration is specified in seconds; an integer greater than or equal to 0. The value \"NONE\" can be used to specify unlimited duration.

    ", + "ActivityTypeConfiguration$defaultTaskHeartbeatTimeout": "

    Optional. The default maximum time, in seconds, before which a worker processing a task must report progress by calling RecordActivityTaskHeartbeat.

    You can specify this value only when registering an activity type. The registered default value can be overridden when you schedule a task through the ScheduleActivityTask decision. If the activity worker subsequently attempts to record a heartbeat or returns a result, the activity worker receives an UnknownResource fault. In this case, Amazon SWF no longer considers the activity task to be valid; the activity worker should clean up the activity task.

    The duration is specified in seconds; an integer greater than or equal to 0. The value \"NONE\" can be used to specify unlimited duration.

    ", + "ActivityTypeConfiguration$defaultTaskScheduleToStartTimeout": "

    Optional. The default maximum duration, specified when registering the activity type, that a task of an activity type can wait before being assigned to a worker. You can override this default when scheduling a task through the ScheduleActivityTask decision.

    The duration is specified in seconds; an integer greater than or equal to 0. The value \"NONE\" can be used to specify unlimited duration.

    ", + "ActivityTypeConfiguration$defaultTaskScheduleToCloseTimeout": "

    Optional. The default maximum duration, specified when registering the activity type, for tasks of this activity type. You can override this default when scheduling a task through the ScheduleActivityTask decision.

    The duration is specified in seconds; an integer greater than or equal to 0. The value \"NONE\" can be used to specify unlimited duration.

    ", + "ContinueAsNewWorkflowExecutionDecisionAttributes$executionStartToCloseTimeout": "

    If set, specifies the total duration for this workflow execution. This overrides the defaultExecutionStartToCloseTimeout specified when registering the workflow type.

    The duration is specified in seconds; an integer greater than or equal to 0. The value \"NONE\" can be used to specify unlimited duration.

    An execution start-to-close timeout for this workflow execution must be specified either as a default for the workflow type or through this field. If neither this field is set nor a default execution start-to-close timeout was specified at registration time then a fault will be returned.", + "ContinueAsNewWorkflowExecutionDecisionAttributes$taskStartToCloseTimeout": "

    Specifies the maximum duration of decision tasks for the new workflow execution. This parameter overrides the defaultTaskStartToCloseTimout specified when registering the workflow type using RegisterWorkflowType.

    The duration is specified in seconds; an integer greater than or equal to 0. The value \"NONE\" can be used to specify unlimited duration.

    A task start-to-close timeout for the new workflow execution must be specified either as a default for the workflow type or through this parameter. If neither this parameter is set nor a default task start-to-close timeout was specified at registration time then a fault will be returned.", + "DecisionTaskScheduledEventAttributes$startToCloseTimeout": "

    The maximum duration for this decision task. The task is considered timed out if it does not completed within this duration.

    The duration is specified in seconds; an integer greater than or equal to 0. The value \"NONE\" can be used to specify unlimited duration.

    ", + "LambdaFunctionScheduledEventAttributes$startToCloseTimeout": "

    The maximum time, in seconds, that the AWS Lambda function can take to execute from start to close before it is marked as failed.

    ", + "RegisterActivityTypeInput$defaultTaskStartToCloseTimeout": "

    If set, specifies the default maximum duration that a worker can take to process tasks of this activity type. This default can be overridden when scheduling an activity task using the ScheduleActivityTask decision.

    The duration is specified in seconds; an integer greater than or equal to 0. The value \"NONE\" can be used to specify unlimited duration.

    ", + "RegisterActivityTypeInput$defaultTaskHeartbeatTimeout": "

    If set, specifies the default maximum time before which a worker processing a task of this type must report progress by calling RecordActivityTaskHeartbeat. If the timeout is exceeded, the activity task is automatically timed out. This default can be overridden when scheduling an activity task using the ScheduleActivityTask decision. If the activity worker subsequently attempts to record a heartbeat or returns a result, the activity worker receives an UnknownResource fault. In this case, Amazon SWF no longer considers the activity task to be valid; the activity worker should clean up the activity task.

    The duration is specified in seconds; an integer greater than or equal to 0. The value \"NONE\" can be used to specify unlimited duration.

    ", + "RegisterActivityTypeInput$defaultTaskScheduleToStartTimeout": "

    If set, specifies the default maximum duration that a task of this activity type can wait before being assigned to a worker. This default can be overridden when scheduling an activity task using the ScheduleActivityTask decision.

    The duration is specified in seconds; an integer greater than or equal to 0. The value \"NONE\" can be used to specify unlimited duration.

    ", + "RegisterActivityTypeInput$defaultTaskScheduleToCloseTimeout": "

    If set, specifies the default maximum duration for a task of this activity type. This default can be overridden when scheduling an activity task using the ScheduleActivityTask decision.

    The duration is specified in seconds; an integer greater than or equal to 0. The value \"NONE\" can be used to specify unlimited duration.

    ", + "RegisterWorkflowTypeInput$defaultTaskStartToCloseTimeout": "

    If set, specifies the default maximum duration of decision tasks for this workflow type. This default can be overridden when starting a workflow execution using the StartWorkflowExecution action or the StartChildWorkflowExecution decision.

    The duration is specified in seconds; an integer greater than or equal to 0. The value \"NONE\" can be used to specify unlimited duration.

    ", + "RegisterWorkflowTypeInput$defaultExecutionStartToCloseTimeout": "

    If set, specifies the default maximum duration for executions of this workflow type. You can override this default when starting an execution through the StartWorkflowExecution action or StartChildWorkflowExecution decision.

    The duration is specified in seconds; an integer greater than or equal to 0. Unlike some of the other timeout parameters in Amazon SWF, you cannot specify a value of \"NONE\" for defaultExecutionStartToCloseTimeout; there is a one-year max limit on the time that a workflow execution can run. Exceeding this limit will always cause the workflow execution to time out.

    ", + "ScheduleActivityTaskDecisionAttributes$scheduleToCloseTimeout": "

    The maximum duration for this activity task.

    The duration is specified in seconds; an integer greater than or equal to 0. The value \"NONE\" can be used to specify unlimited duration.

    A schedule-to-close timeout for this activity task must be specified either as a default for the activity type or through this field. If neither this field is set nor a default schedule-to-close timeout was specified at registration time then a fault will be returned.", + "ScheduleActivityTaskDecisionAttributes$scheduleToStartTimeout": "

    Optional. If set, specifies the maximum duration the activity task can wait to be assigned to a worker. This overrides the default schedule-to-start timeout specified when registering the activity type using RegisterActivityType.

    The duration is specified in seconds; an integer greater than or equal to 0. The value \"NONE\" can be used to specify unlimited duration.

    A schedule-to-start timeout for this activity task must be specified either as a default for the activity type or through this field. If neither this field is set nor a default schedule-to-start timeout was specified at registration time then a fault will be returned.", + "ScheduleActivityTaskDecisionAttributes$startToCloseTimeout": "

    If set, specifies the maximum duration a worker may take to process this activity task. This overrides the default start-to-close timeout specified when registering the activity type using RegisterActivityType.

    The duration is specified in seconds; an integer greater than or equal to 0. The value \"NONE\" can be used to specify unlimited duration.

    A start-to-close timeout for this activity task must be specified either as a default for the activity type or through this field. If neither this field is set nor a default start-to-close timeout was specified at registration time then a fault will be returned.", + "ScheduleActivityTaskDecisionAttributes$heartbeatTimeout": "

    If set, specifies the maximum time before which a worker processing a task of this type must report progress by calling RecordActivityTaskHeartbeat. If the timeout is exceeded, the activity task is automatically timed out. If the worker subsequently attempts to record a heartbeat or returns a result, it will be ignored. This overrides the default heartbeat timeout specified when registering the activity type using RegisterActivityType.

    The duration is specified in seconds; an integer greater than or equal to 0. The value \"NONE\" can be used to specify unlimited duration.

    ", + "ScheduleLambdaFunctionDecisionAttributes$startToCloseTimeout": "

    If set, specifies the maximum duration the function may take to execute.

    ", + "StartChildWorkflowExecutionDecisionAttributes$executionStartToCloseTimeout": "

    The total duration for this workflow execution. This overrides the defaultExecutionStartToCloseTimeout specified when registering the workflow type.

    The duration is specified in seconds; an integer greater than or equal to 0. The value \"NONE\" can be used to specify unlimited duration.

    An execution start-to-close timeout for this workflow execution must be specified either as a default for the workflow type or through this parameter. If neither this parameter is set nor a default execution start-to-close timeout was specified at registration time then a fault will be returned.", + "StartChildWorkflowExecutionDecisionAttributes$taskStartToCloseTimeout": "

    Specifies the maximum duration of decision tasks for this workflow execution. This parameter overrides the defaultTaskStartToCloseTimout specified when registering the workflow type using RegisterWorkflowType.

    The duration is specified in seconds; an integer greater than or equal to 0. The value \"NONE\" can be used to specify unlimited duration.

    A task start-to-close timeout for this workflow execution must be specified either as a default for the workflow type or through this parameter. If neither this parameter is set nor a default task start-to-close timeout was specified at registration time then a fault will be returned.", + "StartChildWorkflowExecutionInitiatedEventAttributes$executionStartToCloseTimeout": "

    The maximum duration for the child workflow execution. If the workflow execution is not closed within this duration, it will be timed out and force terminated.

    The duration is specified in seconds; an integer greater than or equal to 0. The value \"NONE\" can be used to specify unlimited duration.

    ", + "StartChildWorkflowExecutionInitiatedEventAttributes$taskStartToCloseTimeout": "

    The maximum duration allowed for the decision tasks for this workflow execution.

    The duration is specified in seconds; an integer greater than or equal to 0. The value \"NONE\" can be used to specify unlimited duration.

    ", + "StartWorkflowExecutionInput$executionStartToCloseTimeout": "

    The total duration for this workflow execution. This overrides the defaultExecutionStartToCloseTimeout specified when registering the workflow type.

    The duration is specified in seconds; an integer greater than or equal to 0. Exceeding this limit will cause the workflow execution to time out. Unlike some of the other timeout parameters in Amazon SWF, you cannot specify a value of \"NONE\" for this timeout; there is a one-year max limit on the time that a workflow execution can run.

    An execution start-to-close timeout must be specified either through this parameter or as a default when the workflow type is registered. If neither this parameter nor a default execution start-to-close timeout is specified, a fault is returned.", + "StartWorkflowExecutionInput$taskStartToCloseTimeout": "

    Specifies the maximum duration of decision tasks for this workflow execution. This parameter overrides the defaultTaskStartToCloseTimout specified when registering the workflow type using RegisterWorkflowType.

    The duration is specified in seconds; an integer greater than or equal to 0. The value \"NONE\" can be used to specify unlimited duration.

    A task start-to-close timeout for this workflow execution must be specified either as a default for the workflow type or through this parameter. If neither this parameter is set nor a default task start-to-close timeout was specified at registration time then a fault will be returned.", + "WorkflowExecutionContinuedAsNewEventAttributes$executionStartToCloseTimeout": "

    The total duration allowed for the new workflow execution.

    The duration is specified in seconds; an integer greater than or equal to 0. The value \"NONE\" can be used to specify unlimited duration.

    ", + "WorkflowExecutionContinuedAsNewEventAttributes$taskStartToCloseTimeout": "

    The maximum duration of decision tasks for the new workflow execution.

    The duration is specified in seconds; an integer greater than or equal to 0. The value \"NONE\" can be used to specify unlimited duration.

    ", + "WorkflowExecutionStartedEventAttributes$executionStartToCloseTimeout": "

    The maximum duration for this workflow execution.

    The duration is specified in seconds; an integer greater than or equal to 0. The value \"NONE\" can be used to specify unlimited duration.

    ", + "WorkflowExecutionStartedEventAttributes$taskStartToCloseTimeout": "

    The maximum duration of decision tasks for this workflow type.

    The duration is specified in seconds; an integer greater than or equal to 0. The value \"NONE\" can be used to specify unlimited duration.

    ", + "WorkflowTypeConfiguration$defaultTaskStartToCloseTimeout": "

    Optional. The default maximum duration, specified when registering the workflow type, that a decision task for executions of this workflow type might take before returning completion or failure. If the task does not close in the specified time then the task is automatically timed out and rescheduled. If the decider eventually reports a completion or failure, it is ignored. This default can be overridden when starting a workflow execution using the StartWorkflowExecution action or the StartChildWorkflowExecution decision.

    The duration is specified in seconds; an integer greater than or equal to 0. The value \"NONE\" can be used to specify unlimited duration.

    ", + "WorkflowTypeConfiguration$defaultExecutionStartToCloseTimeout": "

    Optional. The default maximum duration, specified when registering the workflow type, for executions of this workflow type. This default can be overridden when starting a workflow execution using the StartWorkflowExecution action or the StartChildWorkflowExecution decision.

    The duration is specified in seconds; an integer greater than or equal to 0. The value \"NONE\" can be used to specify unlimited duration.

    " + } + }, + "ErrorMessage": { + "base": null, + "refs": { + "DefaultUndefinedFault$message": null, + "DomainAlreadyExistsFault$message": "

    A description that may help with diagnosing the cause of the fault.

    ", + "DomainDeprecatedFault$message": "

    A description that may help with diagnosing the cause of the fault.

    ", + "LimitExceededFault$message": "

    A description that may help with diagnosing the cause of the fault.

    ", + "OperationNotPermittedFault$message": "

    A description that may help with diagnosing the cause of the fault.

    ", + "TypeAlreadyExistsFault$message": "

    A description that may help with diagnosing the cause of the fault.

    ", + "TypeDeprecatedFault$message": "

    A description that may help with diagnosing the cause of the fault.

    ", + "UnknownResourceFault$message": "

    A description that may help with diagnosing the cause of the fault.

    ", + "WorkflowExecutionAlreadyStartedFault$message": "

    A description that may help with diagnosing the cause of the fault.

    " + } + }, + "EventId": { + "base": null, + "refs": { + "ActivityTask$startedEventId": "

    The ID of the ActivityTaskStarted event recorded in the history.

    ", + "ActivityTaskCancelRequestedEventAttributes$decisionTaskCompletedEventId": "

    The ID of the DecisionTaskCompleted event corresponding to the decision task that resulted in the RequestCancelActivityTask decision for this cancellation request. This information can be useful for diagnosing problems by tracing back the chain of events leading up to this event.

    ", + "ActivityTaskCanceledEventAttributes$scheduledEventId": "

    The ID of the ActivityTaskScheduled event that was recorded when this activity task was scheduled. This information can be useful for diagnosing problems by tracing back the chain of events leading up to this event.

    ", + "ActivityTaskCanceledEventAttributes$startedEventId": "

    The ID of the ActivityTaskStarted event recorded when this activity task was started. This information can be useful for diagnosing problems by tracing back the chain of events leading up to this event.

    ", + "ActivityTaskCanceledEventAttributes$latestCancelRequestedEventId": "

    If set, contains the ID of the last ActivityTaskCancelRequested event recorded for this activity task. This information can be useful for diagnosing problems by tracing back the chain of events leading up to this event.

    ", + "ActivityTaskCompletedEventAttributes$scheduledEventId": "

    The ID of the ActivityTaskScheduled event that was recorded when this activity task was scheduled. This information can be useful for diagnosing problems by tracing back the chain of events leading up to this event.

    ", + "ActivityTaskCompletedEventAttributes$startedEventId": "

    The ID of the ActivityTaskStarted event recorded when this activity task was started. This information can be useful for diagnosing problems by tracing back the chain of events leading up to this event.

    ", + "ActivityTaskFailedEventAttributes$scheduledEventId": "

    The ID of the ActivityTaskScheduled event that was recorded when this activity task was scheduled. This information can be useful for diagnosing problems by tracing back the chain of events leading up to this event.

    ", + "ActivityTaskFailedEventAttributes$startedEventId": "

    The ID of the ActivityTaskStarted event recorded when this activity task was started. This information can be useful for diagnosing problems by tracing back the chain of events leading up to this event.

    ", + "ActivityTaskScheduledEventAttributes$decisionTaskCompletedEventId": "

    The ID of the DecisionTaskCompleted event corresponding to the decision that resulted in the scheduling of this activity task. This information can be useful for diagnosing problems by tracing back the chain of events leading up to this event.

    ", + "ActivityTaskStartedEventAttributes$scheduledEventId": "

    The ID of the ActivityTaskScheduled event that was recorded when this activity task was scheduled. This information can be useful for diagnosing problems by tracing back the chain of events leading up to this event.

    ", + "ActivityTaskTimedOutEventAttributes$scheduledEventId": "

    The ID of the ActivityTaskScheduled event that was recorded when this activity task was scheduled. This information can be useful for diagnosing problems by tracing back the chain of events leading up to this event.

    ", + "ActivityTaskTimedOutEventAttributes$startedEventId": "

    The ID of the ActivityTaskStarted event recorded when this activity task was started. This information can be useful for diagnosing problems by tracing back the chain of events leading up to this event.

    ", + "CancelTimerFailedEventAttributes$decisionTaskCompletedEventId": "

    The ID of the DecisionTaskCompleted event corresponding to the decision task that resulted in the CancelTimer decision to cancel this timer. This information can be useful for diagnosing problems by tracing back the chain of events leading up to this event.

    ", + "CancelWorkflowExecutionFailedEventAttributes$decisionTaskCompletedEventId": "

    The ID of the DecisionTaskCompleted event corresponding to the decision task that resulted in the CancelWorkflowExecution decision for this cancellation request. This information can be useful for diagnosing problems by tracing back the chain of events leading up to this event.

    ", + "ChildWorkflowExecutionCanceledEventAttributes$initiatedEventId": "

    The ID of the StartChildWorkflowExecutionInitiated event corresponding to the StartChildWorkflowExecution decision to start this child workflow execution. This information can be useful for diagnosing problems by tracing back the chain of events leading up to this event.

    ", + "ChildWorkflowExecutionCanceledEventAttributes$startedEventId": "

    The ID of the ChildWorkflowExecutionStarted event recorded when this child workflow execution was started. This information can be useful for diagnosing problems by tracing back the chain of events leading up to this event.

    ", + "ChildWorkflowExecutionCompletedEventAttributes$initiatedEventId": "

    The ID of the StartChildWorkflowExecutionInitiated event corresponding to the StartChildWorkflowExecution decision to start this child workflow execution. This information can be useful for diagnosing problems by tracing back the chain of events leading up to this event.

    ", + "ChildWorkflowExecutionCompletedEventAttributes$startedEventId": "

    The ID of the ChildWorkflowExecutionStarted event recorded when this child workflow execution was started. This information can be useful for diagnosing problems by tracing back the chain of events leading up to this event.

    ", + "ChildWorkflowExecutionFailedEventAttributes$initiatedEventId": "

    The ID of the StartChildWorkflowExecutionInitiated event corresponding to the StartChildWorkflowExecution decision to start this child workflow execution. This information can be useful for diagnosing problems by tracing back the chain of events leading up to this event.

    ", + "ChildWorkflowExecutionFailedEventAttributes$startedEventId": "

    The ID of the ChildWorkflowExecutionStarted event recorded when this child workflow execution was started. This information can be useful for diagnosing problems by tracing back the chain of events leading up to this event.

    ", + "ChildWorkflowExecutionStartedEventAttributes$initiatedEventId": "

    The ID of the StartChildWorkflowExecutionInitiated event corresponding to the StartChildWorkflowExecution decision to start this child workflow execution. This information can be useful for diagnosing problems by tracing back the chain of events leading up to this event.

    ", + "ChildWorkflowExecutionTerminatedEventAttributes$initiatedEventId": "

    The ID of the StartChildWorkflowExecutionInitiated event corresponding to the StartChildWorkflowExecution decision to start this child workflow execution. This information can be useful for diagnosing problems by tracing back the chain of events leading up to this event.

    ", + "ChildWorkflowExecutionTerminatedEventAttributes$startedEventId": "

    The ID of the ChildWorkflowExecutionStarted event recorded when this child workflow execution was started. This information can be useful for diagnosing problems by tracing back the chain of events leading up to this event.

    ", + "ChildWorkflowExecutionTimedOutEventAttributes$initiatedEventId": "

    The ID of the StartChildWorkflowExecutionInitiated event corresponding to the StartChildWorkflowExecution decision to start this child workflow execution. This information can be useful for diagnosing problems by tracing back the chain of events leading up to this event.

    ", + "ChildWorkflowExecutionTimedOutEventAttributes$startedEventId": "

    The ID of the ChildWorkflowExecutionStarted event recorded when this child workflow execution was started. This information can be useful for diagnosing problems by tracing back the chain of events leading up to this event.

    ", + "CompleteWorkflowExecutionFailedEventAttributes$decisionTaskCompletedEventId": "

    The ID of the DecisionTaskCompleted event corresponding to the decision task that resulted in the CompleteWorkflowExecution decision to complete this execution. This information can be useful for diagnosing problems by tracing back the chain of events leading up to this event.

    ", + "ContinueAsNewWorkflowExecutionFailedEventAttributes$decisionTaskCompletedEventId": "

    The ID of the DecisionTaskCompleted event corresponding to the decision task that resulted in the ContinueAsNewWorkflowExecution decision that started this execution. This information can be useful for diagnosing problems by tracing back the chain of events leading up to this event.

    ", + "DecisionTask$startedEventId": "

    The ID of the DecisionTaskStarted event recorded in the history.

    ", + "DecisionTask$previousStartedEventId": "

    The ID of the DecisionTaskStarted event of the previous decision task of this workflow execution that was processed by the decider. This can be used to determine the events in the history new since the last decision task received by the decider.

    ", + "DecisionTaskCompletedEventAttributes$scheduledEventId": "

    The ID of the DecisionTaskScheduled event that was recorded when this decision task was scheduled. This information can be useful for diagnosing problems by tracing back the chain of events leading up to this event.

    ", + "DecisionTaskCompletedEventAttributes$startedEventId": "

    The ID of the DecisionTaskStarted event recorded when this decision task was started. This information can be useful for diagnosing problems by tracing back the chain of events leading up to this event.

    ", + "DecisionTaskStartedEventAttributes$scheduledEventId": "

    The ID of the DecisionTaskScheduled event that was recorded when this decision task was scheduled. This information can be useful for diagnosing problems by tracing back the chain of events leading up to this event.

    ", + "DecisionTaskTimedOutEventAttributes$scheduledEventId": "

    The ID of the DecisionTaskScheduled event that was recorded when this decision task was scheduled. This information can be useful for diagnosing problems by tracing back the chain of events leading up to this event.

    ", + "DecisionTaskTimedOutEventAttributes$startedEventId": "

    The ID of the DecisionTaskStarted event recorded when this decision task was started. This information can be useful for diagnosing problems by tracing back the chain of events leading up to this event.

    ", + "ExternalWorkflowExecutionCancelRequestedEventAttributes$initiatedEventId": "

    The ID of the RequestCancelExternalWorkflowExecutionInitiated event corresponding to the RequestCancelExternalWorkflowExecution decision to cancel this external workflow execution. This information can be useful for diagnosing problems by tracing back the chain of events leading up to this event.

    ", + "ExternalWorkflowExecutionSignaledEventAttributes$initiatedEventId": "

    The ID of the SignalExternalWorkflowExecutionInitiated event corresponding to the SignalExternalWorkflowExecution decision to request this signal. This information can be useful for diagnosing problems by tracing back the chain of events leading up to this event.

    ", + "FailWorkflowExecutionFailedEventAttributes$decisionTaskCompletedEventId": "

    The ID of the DecisionTaskCompleted event corresponding to the decision task that resulted in the FailWorkflowExecution decision to fail this execution. This information can be useful for diagnosing problems by tracing back the chain of events leading up to this event.

    ", + "HistoryEvent$eventId": "

    The system generated ID of the event. This ID uniquely identifies the event with in the workflow execution history.

    ", + "LambdaFunctionCompletedEventAttributes$scheduledEventId": "

    The ID of the LambdaFunctionScheduled event that was recorded when this AWS Lambda function was scheduled. This information can be useful for diagnosing problems by tracing back the chain of events leading up to this event.

    ", + "LambdaFunctionCompletedEventAttributes$startedEventId": "

    The ID of the LambdaFunctionStarted event recorded in the history.

    ", + "LambdaFunctionFailedEventAttributes$scheduledEventId": "

    The ID of the LambdaFunctionScheduled event that was recorded when this AWS Lambda function was scheduled. This information can be useful for diagnosing problems by tracing back the chain of events leading up to this event.

    ", + "LambdaFunctionFailedEventAttributes$startedEventId": "

    The ID of the LambdaFunctionStarted event recorded in the history.

    ", + "LambdaFunctionScheduledEventAttributes$decisionTaskCompletedEventId": "

    The ID of the DecisionTaskCompleted event for the decision that resulted in the scheduling of this AWS Lambda function. This information can be useful for diagnosing problems by tracing back the chain of events leading up to this event.

    ", + "LambdaFunctionStartedEventAttributes$scheduledEventId": "

    The ID of the LambdaFunctionScheduled event that was recorded when this AWS Lambda function was scheduled. This information can be useful for diagnosing problems by tracing back the chain of events leading up to this event.

    ", + "LambdaFunctionTimedOutEventAttributes$scheduledEventId": "

    The ID of the LambdaFunctionScheduled event that was recorded when this AWS Lambda function was scheduled. This information can be useful for diagnosing problems by tracing back the chain of events leading up to this event.

    ", + "LambdaFunctionTimedOutEventAttributes$startedEventId": "

    The ID of the LambdaFunctionStarted event recorded in the history.

    ", + "MarkerRecordedEventAttributes$decisionTaskCompletedEventId": "

    The ID of the DecisionTaskCompleted event corresponding to the decision task that resulted in the RecordMarker decision that requested this marker. This information can be useful for diagnosing problems by tracing back the chain of events leading up to this event.

    ", + "RecordMarkerFailedEventAttributes$decisionTaskCompletedEventId": "

    The ID of the DecisionTaskCompleted event corresponding to the decision task that resulted in the RecordMarkerFailed decision for this cancellation request. This information can be useful for diagnosing problems by tracing back the chain of events leading up to this event.

    ", + "RequestCancelActivityTaskFailedEventAttributes$decisionTaskCompletedEventId": "

    The ID of the DecisionTaskCompleted event corresponding to the decision task that resulted in the RequestCancelActivityTask decision for this cancellation request. This information can be useful for diagnosing problems by tracing back the chain of events leading up to this event.

    ", + "RequestCancelExternalWorkflowExecutionFailedEventAttributes$initiatedEventId": "

    The ID of the RequestCancelExternalWorkflowExecutionInitiated event corresponding to the RequestCancelExternalWorkflowExecution decision to cancel this external workflow execution. This information can be useful for diagnosing problems by tracing back the chain of events leading up to this event.

    ", + "RequestCancelExternalWorkflowExecutionFailedEventAttributes$decisionTaskCompletedEventId": "

    The ID of the DecisionTaskCompleted event corresponding to the decision task that resulted in the RequestCancelExternalWorkflowExecution decision for this cancellation request. This information can be useful for diagnosing problems by tracing back the chain of events leading up to this event.

    ", + "RequestCancelExternalWorkflowExecutionInitiatedEventAttributes$decisionTaskCompletedEventId": "

    The ID of the DecisionTaskCompleted event corresponding to the decision task that resulted in the RequestCancelExternalWorkflowExecution decision for this cancellation request. This information can be useful for diagnosing problems by tracing back the chain of events leading up to this event.

    ", + "ScheduleActivityTaskFailedEventAttributes$decisionTaskCompletedEventId": "

    The ID of the DecisionTaskCompleted event corresponding to the decision that resulted in the scheduling of this activity task. This information can be useful for diagnosing problems by tracing back the chain of events leading up to this event.

    ", + "ScheduleLambdaFunctionFailedEventAttributes$decisionTaskCompletedEventId": "

    The ID of the DecisionTaskCompleted event corresponding to the decision that resulted in the scheduling of this AWS Lambda function. This information can be useful for diagnosing problems by tracing back the chain of events leading up to this event.

    ", + "SignalExternalWorkflowExecutionFailedEventAttributes$initiatedEventId": "

    The ID of the SignalExternalWorkflowExecutionInitiated event corresponding to the SignalExternalWorkflowExecution decision to request this signal. This information can be useful for diagnosing problems by tracing back the chain of events leading up to this event.

    ", + "SignalExternalWorkflowExecutionFailedEventAttributes$decisionTaskCompletedEventId": "

    The ID of the DecisionTaskCompleted event corresponding to the decision task that resulted in the SignalExternalWorkflowExecution decision for this signal. This information can be useful for diagnosing problems by tracing back the chain of events leading up to this event.

    ", + "SignalExternalWorkflowExecutionInitiatedEventAttributes$decisionTaskCompletedEventId": "

    The ID of the DecisionTaskCompleted event corresponding to the decision task that resulted in the SignalExternalWorkflowExecution decision for this signal. This information can be useful for diagnosing problems by tracing back the chain of events leading up to this event.

    ", + "StartChildWorkflowExecutionFailedEventAttributes$initiatedEventId": "

    The ID of the StartChildWorkflowExecutionInitiated event corresponding to the StartChildWorkflowExecution decision to start this child workflow execution. This information can be useful for diagnosing problems by tracing back the chain of events leading up to this event.

    ", + "StartChildWorkflowExecutionFailedEventAttributes$decisionTaskCompletedEventId": "

    The ID of the DecisionTaskCompleted event corresponding to the decision task that resulted in the StartChildWorkflowExecution decision to request this child workflow execution. This information can be useful for diagnosing problems by tracing back the cause of events.

    ", + "StartChildWorkflowExecutionInitiatedEventAttributes$decisionTaskCompletedEventId": "

    The ID of the DecisionTaskCompleted event corresponding to the decision task that resulted in the StartChildWorkflowExecution decision to request this child workflow execution. This information can be useful for diagnosing problems by tracing back the cause of events.

    ", + "StartLambdaFunctionFailedEventAttributes$scheduledEventId": "

    The ID of the LambdaFunctionScheduled event that was recorded when this AWS Lambda function was scheduled. This information can be useful for diagnosing problems by tracing back the chain of events leading up to this event.

    ", + "StartTimerFailedEventAttributes$decisionTaskCompletedEventId": "

    The ID of the DecisionTaskCompleted event corresponding to the decision task that resulted in the StartTimer decision for this activity task. This information can be useful for diagnosing problems by tracing back the chain of events leading up to this event.

    ", + "TimerCanceledEventAttributes$startedEventId": "

    The ID of the TimerStarted event that was recorded when this timer was started. This information can be useful for diagnosing problems by tracing back the chain of events leading up to this event.

    ", + "TimerCanceledEventAttributes$decisionTaskCompletedEventId": "

    The ID of the DecisionTaskCompleted event corresponding to the decision task that resulted in the CancelTimer decision to cancel this timer. This information can be useful for diagnosing problems by tracing back the chain of events leading up to this event.

    ", + "TimerFiredEventAttributes$startedEventId": "

    The ID of the TimerStarted event that was recorded when this timer was started. This information can be useful for diagnosing problems by tracing back the chain of events leading up to this event.

    ", + "TimerStartedEventAttributes$decisionTaskCompletedEventId": "

    The ID of the DecisionTaskCompleted event corresponding to the decision task that resulted in the StartTimer decision for this activity task. This information can be useful for diagnosing problems by tracing back the chain of events leading up to this event.

    ", + "WorkflowExecutionCancelRequestedEventAttributes$externalInitiatedEventId": "

    The ID of the RequestCancelExternalWorkflowExecutionInitiated event corresponding to the RequestCancelExternalWorkflowExecution decision to cancel this workflow execution.The source event with this ID can be found in the history of the source workflow execution. This information can be useful for diagnosing problems by tracing back the chain of events leading up to this event.

    ", + "WorkflowExecutionCanceledEventAttributes$decisionTaskCompletedEventId": "

    The ID of the DecisionTaskCompleted event corresponding to the decision task that resulted in the CancelWorkflowExecution decision for this cancellation request. This information can be useful for diagnosing problems by tracing back the chain of events leading up to this event.

    ", + "WorkflowExecutionCompletedEventAttributes$decisionTaskCompletedEventId": "

    The ID of the DecisionTaskCompleted event corresponding to the decision task that resulted in the CompleteWorkflowExecution decision to complete this execution. This information can be useful for diagnosing problems by tracing back the chain of events leading up to this event.

    ", + "WorkflowExecutionContinuedAsNewEventAttributes$decisionTaskCompletedEventId": "

    The ID of the DecisionTaskCompleted event corresponding to the decision task that resulted in the ContinueAsNewWorkflowExecution decision that started this execution. This information can be useful for diagnosing problems by tracing back the chain of events leading up to this event.

    ", + "WorkflowExecutionFailedEventAttributes$decisionTaskCompletedEventId": "

    The ID of the DecisionTaskCompleted event corresponding to the decision task that resulted in the FailWorkflowExecution decision to fail this execution. This information can be useful for diagnosing problems by tracing back the chain of events leading up to this event.

    ", + "WorkflowExecutionSignaledEventAttributes$externalInitiatedEventId": "

    The ID of the SignalExternalWorkflowExecutionInitiated event corresponding to the SignalExternalWorkflow decision to signal this workflow execution.The source event with this ID can be found in the history of the source workflow execution. This information can be useful for diagnosing problems by tracing back the chain of events leading up to this event. This field is set only if the signal was initiated by another workflow execution.

    ", + "WorkflowExecutionStartedEventAttributes$parentInitiatedEventId": "

    The ID of the StartChildWorkflowExecutionInitiated event corresponding to the StartChildWorkflowExecution decision to start this workflow execution. The source event with this ID can be found in the history of the source workflow execution. This information can be useful for diagnosing problems by tracing back the chain of events leading up to this event.

    " + } + }, + "EventType": { + "base": null, + "refs": { + "HistoryEvent$eventType": "

    The type of the history event.

    " + } + }, + "ExecutionStatus": { + "base": null, + "refs": { + "WorkflowExecutionInfo$executionStatus": "

    The current status of the execution.

    " + } + }, + "ExecutionTimeFilter": { + "base": "

    Used to filter the workflow executions in visibility APIs by various time-based rules. Each parameter, if specified, defines a rule that must be satisfied by each returned query result. The parameter values are in the Unix Time format. For example: \"oldestDate\": 1325376070.

    ", + "refs": { + "CountClosedWorkflowExecutionsInput$startTimeFilter": "

    If specified, only workflow executions that meet the start time criteria of the filter are counted.

    startTimeFilter and closeTimeFilter are mutually exclusive. You must specify one of these in a request but not both.", + "CountClosedWorkflowExecutionsInput$closeTimeFilter": "

    If specified, only workflow executions that meet the close time criteria of the filter are counted.

    startTimeFilter and closeTimeFilter are mutually exclusive. You must specify one of these in a request but not both.", + "CountOpenWorkflowExecutionsInput$startTimeFilter": "

    Specifies the start time criteria that workflow executions must meet in order to be counted.

    ", + "ListClosedWorkflowExecutionsInput$startTimeFilter": "

    If specified, the workflow executions are included in the returned results based on whether their start times are within the range specified by this filter. Also, if this parameter is specified, the returned results are ordered by their start times.

    startTimeFilter and closeTimeFilter are mutually exclusive. You must specify one of these in a request but not both.", + "ListClosedWorkflowExecutionsInput$closeTimeFilter": "

    If specified, the workflow executions are included in the returned results based on whether their close times are within the range specified by this filter. Also, if this parameter is specified, the returned results are ordered by their close times.

    startTimeFilter and closeTimeFilter are mutually exclusive. You must specify one of these in a request but not both.", + "ListOpenWorkflowExecutionsInput$startTimeFilter": "

    Workflow executions are included in the returned results based on whether their start times are within the range specified by this filter.

    " + } + }, + "ExternalWorkflowExecutionCancelRequestedEventAttributes": { + "base": "

    Provides details of the ExternalWorkflowExecutionCancelRequested event.

    ", + "refs": { + "HistoryEvent$externalWorkflowExecutionCancelRequestedEventAttributes": "

    If the event is of type ExternalWorkflowExecutionCancelRequested then this member is set and provides detailed information about the event. It is not set for other event types.

    " + } + }, + "ExternalWorkflowExecutionSignaledEventAttributes": { + "base": "

    Provides details of the ExternalWorkflowExecutionSignaled event.

    ", + "refs": { + "HistoryEvent$externalWorkflowExecutionSignaledEventAttributes": "

    If the event is of type ExternalWorkflowExecutionSignaled then this member is set and provides detailed information about the event. It is not set for other event types.

    " + } + }, + "FailWorkflowExecutionDecisionAttributes": { + "base": "

    Provides details of the FailWorkflowExecution decision.

    Access Control

    You can use IAM policies to control this decision's access to Amazon SWF resources as follows:

    • Use a Resource element with the domain name to limit the action to only specified domains.
    • Use an Action element to allow or deny permission to call this action.
    • You cannot use an IAM policy to constrain this action's parameters.

    If the caller does not have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter will be set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows.

    ", + "refs": { + "Decision$failWorkflowExecutionDecisionAttributes": "

    Provides details of the FailWorkflowExecution decision. It is not set for other decision types.

    " + } + }, + "FailWorkflowExecutionFailedCause": { + "base": null, + "refs": { + "FailWorkflowExecutionFailedEventAttributes$cause": "

    The cause of the failure. This information is generated by the system and can be useful for diagnostic purposes.

    If cause is set to OPERATION_NOT_PERMITTED, the decision failed because it lacked sufficient permissions. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows." + } + }, + "FailWorkflowExecutionFailedEventAttributes": { + "base": "

    Provides details of the FailWorkflowExecutionFailed event.

    ", + "refs": { + "HistoryEvent$failWorkflowExecutionFailedEventAttributes": "

    If the event is of type FailWorkflowExecutionFailed then this member is set and provides detailed information about the event. It is not set for other event types.

    " + } + }, + "FailureReason": { + "base": null, + "refs": { + "ActivityTaskFailedEventAttributes$reason": "

    The reason provided for the failure (if any).

    ", + "ChildWorkflowExecutionFailedEventAttributes$reason": "

    The reason for the failure (if provided).

    ", + "FailWorkflowExecutionDecisionAttributes$reason": "

    A descriptive reason for the failure that may help in diagnostics.

    ", + "LambdaFunctionFailedEventAttributes$reason": "

    The reason provided for the failure (if any).

    ", + "RespondActivityTaskFailedInput$reason": "

    Description of the error that may assist in diagnostics.

    ", + "WorkflowExecutionFailedEventAttributes$reason": "

    The descriptive reason provided for the failure (if any).

    " + } + }, + "FunctionId": { + "base": null, + "refs": { + "LambdaFunctionScheduledEventAttributes$id": "

    The unique Amazon SWF ID for the AWS Lambda task.

    ", + "ScheduleLambdaFunctionDecisionAttributes$id": "

    Required. The SWF id of the AWS Lambda task.

    The specified string must not start or end with whitespace. It must not contain a : (colon), / (slash), | (vertical bar), or any control characters (\\u0000-\\u001f | \\u007f - \\u009f). Also, it must not contain the literal string quotarnquot.

    ", + "ScheduleLambdaFunctionFailedEventAttributes$id": "

    The unique Amazon SWF ID of the AWS Lambda task.

    " + } + }, + "FunctionInput": { + "base": null, + "refs": { + "LambdaFunctionScheduledEventAttributes$input": "

    Input provided to the AWS Lambda function.

    ", + "ScheduleLambdaFunctionDecisionAttributes$input": "

    The input provided to the AWS Lambda function.

    " + } + }, + "FunctionName": { + "base": null, + "refs": { + "LambdaFunctionScheduledEventAttributes$name": "

    The name of the scheduled AWS Lambda function.

    ", + "ScheduleLambdaFunctionDecisionAttributes$name": "

    Required. The name of the AWS Lambda function to invoke.

    ", + "ScheduleLambdaFunctionFailedEventAttributes$name": "

    The name of the scheduled AWS Lambda function.

    " + } + }, + "GetWorkflowExecutionHistoryInput": { + "base": null, + "refs": { + } + }, + "History": { + "base": "

    Paginated representation of a workflow history for a workflow execution. This is the up to date, complete and authoritative record of the events related to all tasks and events in the life of the workflow execution.

    ", + "refs": { + } + }, + "HistoryEvent": { + "base": "

    Event within a workflow execution. A history event can be one of these types:

    • WorkflowExecutionStarted: The workflow execution was started.
    • WorkflowExecutionCompleted: The workflow execution was closed due to successful completion.
    • WorkflowExecutionFailed: The workflow execution closed due to a failure.
    • WorkflowExecutionTimedOut: The workflow execution was closed because a time out was exceeded.
    • WorkflowExecutionCanceled: The workflow execution was successfully canceled and closed.
    • WorkflowExecutionTerminated: The workflow execution was terminated.
    • WorkflowExecutionContinuedAsNew: The workflow execution was closed and a new execution of the same type was created with the same workflowId.
    • WorkflowExecutionCancelRequested: A request to cancel this workflow execution was made.
    • DecisionTaskScheduled: A decision task was scheduled for the workflow execution.
    • DecisionTaskStarted: The decision task was dispatched to a decider.
    • DecisionTaskCompleted: The decider successfully completed a decision task by calling RespondDecisionTaskCompleted.
    • DecisionTaskTimedOut: The decision task timed out.
    • ActivityTaskScheduled: An activity task was scheduled for execution.
    • ScheduleActivityTaskFailed: Failed to process ScheduleActivityTask decision. This happens when the decision is not configured properly, for example the activity type specified is not registered.
    • ActivityTaskStarted: The scheduled activity task was dispatched to a worker.
    • ActivityTaskCompleted: An activity worker successfully completed an activity task by calling RespondActivityTaskCompleted.
    • ActivityTaskFailed: An activity worker failed an activity task by calling RespondActivityTaskFailed.
    • ActivityTaskTimedOut: The activity task timed out.
    • ActivityTaskCanceled: The activity task was successfully canceled.
    • ActivityTaskCancelRequested: A RequestCancelActivityTask decision was received by the system.
    • RequestCancelActivityTaskFailed: Failed to process RequestCancelActivityTask decision. This happens when the decision is not configured properly.
    • WorkflowExecutionSignaled: An external signal was received for the workflow execution.
    • MarkerRecorded: A marker was recorded in the workflow history as the result of a RecordMarker decision.
    • TimerStarted: A timer was started for the workflow execution due to a StartTimer decision.
    • StartTimerFailed: Failed to process StartTimer decision. This happens when the decision is not configured properly, for example a timer already exists with the specified timer ID.
    • TimerFired: A timer, previously started for this workflow execution, fired.
    • TimerCanceled: A timer, previously started for this workflow execution, was successfully canceled.
    • CancelTimerFailed: Failed to process CancelTimer decision. This happens when the decision is not configured properly, for example no timer exists with the specified timer ID.
    • StartChildWorkflowExecutionInitiated: A request was made to start a child workflow execution.
    • StartChildWorkflowExecutionFailed: Failed to process StartChildWorkflowExecution decision. This happens when the decision is not configured properly, for example the workflow type specified is not registered.
    • ChildWorkflowExecutionStarted: A child workflow execution was successfully started.
    • ChildWorkflowExecutionCompleted: A child workflow execution, started by this workflow execution, completed successfully and was closed.
    • ChildWorkflowExecutionFailed: A child workflow execution, started by this workflow execution, failed to complete successfully and was closed.
    • ChildWorkflowExecutionTimedOut: A child workflow execution, started by this workflow execution, timed out and was closed.
    • ChildWorkflowExecutionCanceled: A child workflow execution, started by this workflow execution, was canceled and closed.
    • ChildWorkflowExecutionTerminated: A child workflow execution, started by this workflow execution, was terminated.
    • SignalExternalWorkflowExecutionInitiated: A request to signal an external workflow was made.
    • ExternalWorkflowExecutionSignaled: A signal, requested by this workflow execution, was successfully delivered to the target external workflow execution.
    • SignalExternalWorkflowExecutionFailed: The request to signal an external workflow execution failed.
    • RequestCancelExternalWorkflowExecutionInitiated: A request was made to request the cancellation of an external workflow execution.
    • ExternalWorkflowExecutionCancelRequested: Request to cancel an external workflow execution was successfully delivered to the target execution.
    • RequestCancelExternalWorkflowExecutionFailed: Request to cancel an external workflow execution failed.
    • LambdaFunctionScheduled: An AWS Lambda function was scheduled for execution.
    • LambdaFunctionStarted: The scheduled function was invoked in the AWS Lambda service.
    • LambdaFunctionCompleted: The AWS Lambda function successfully completed.
    • LambdaFunctionFailed: The AWS Lambda function execution failed.
    • LambdaFunctionTimedOut: The AWS Lambda function execution timed out.
    • ScheduleLambdaFunctionFailed: Failed to process ScheduleLambdaFunction decision. This happens when the workflow execution does not have the proper IAM role attached to invoke AWS Lambda functions.
    • StartLambdaFunctionFailed: Failed to invoke the scheduled function in the AWS Lambda service. This happens when the AWS Lambda service is not available in the current region, or received too many requests.
    ", + "refs": { + "HistoryEventList$member": null + } + }, + "HistoryEventList": { + "base": null, + "refs": { + "DecisionTask$events": "

    A paginated list of history events of the workflow execution. The decider uses this during the processing of the decision task.

    ", + "History$events": "

    The list of history events.

    " + } + }, + "Identity": { + "base": null, + "refs": { + "ActivityTaskStartedEventAttributes$identity": "

    Identity of the worker that was assigned this task. This aids diagnostics when problems arise. The form of this identity is user defined.

    ", + "DecisionTaskStartedEventAttributes$identity": "

    Identity of the decider making the request. This enables diagnostic tracing when problems arise. The form of this identity is user defined.

    ", + "PollForActivityTaskInput$identity": "

    Identity of the worker making the request, recorded in the ActivityTaskStarted event in the workflow history. This enables diagnostic tracing when problems arise. The form of this identity is user defined.

    ", + "PollForDecisionTaskInput$identity": "

    Identity of the decider making the request, which is recorded in the DecisionTaskStarted event in the workflow history. This enables diagnostic tracing when problems arise. The form of this identity is user defined.

    " + } + }, + "LambdaFunctionCompletedEventAttributes": { + "base": "

    Provides details for the LambdaFunctionCompleted event.

    ", + "refs": { + "HistoryEvent$lambdaFunctionCompletedEventAttributes": null + } + }, + "LambdaFunctionFailedEventAttributes": { + "base": "

    Provides details for the LambdaFunctionFailed event.

    ", + "refs": { + "HistoryEvent$lambdaFunctionFailedEventAttributes": null + } + }, + "LambdaFunctionScheduledEventAttributes": { + "base": "

    Provides details for the LambdaFunctionScheduled event.

    ", + "refs": { + "HistoryEvent$lambdaFunctionScheduledEventAttributes": null + } + }, + "LambdaFunctionStartedEventAttributes": { + "base": "

    Provides details for the LambdaFunctionStarted event.

    ", + "refs": { + "HistoryEvent$lambdaFunctionStartedEventAttributes": null + } + }, + "LambdaFunctionTimedOutEventAttributes": { + "base": "

    Provides details for the LambdaFunctionTimedOut event.

    ", + "refs": { + "HistoryEvent$lambdaFunctionTimedOutEventAttributes": null + } + }, + "LambdaFunctionTimeoutType": { + "base": null, + "refs": { + "LambdaFunctionTimedOutEventAttributes$timeoutType": "

    The type of the timeout that caused this event.

    " + } + }, + "LimitExceededFault": { + "base": "

    Returned by any operation if a system imposed limitation has been reached. To address this fault you should either clean up unused resources or increase the limit by contacting AWS.

    ", + "refs": { + } + }, + "LimitedData": { + "base": null, + "refs": { + "ActivityTaskTimedOutEventAttributes$details": "

    Contains the content of the details parameter for the last call made by the activity to RecordActivityTaskHeartbeat.

    ", + "RecordActivityTaskHeartbeatInput$details": "

    If specified, contains details about the progress of the task.

    " + } + }, + "ListActivityTypesInput": { + "base": null, + "refs": { + } + }, + "ListClosedWorkflowExecutionsInput": { + "base": null, + "refs": { + } + }, + "ListDomainsInput": { + "base": null, + "refs": { + } + }, + "ListOpenWorkflowExecutionsInput": { + "base": null, + "refs": { + } + }, + "ListWorkflowTypesInput": { + "base": null, + "refs": { + } + }, + "MarkerName": { + "base": null, + "refs": { + "MarkerRecordedEventAttributes$markerName": "

    The name of the marker.

    ", + "RecordMarkerDecisionAttributes$markerName": "

    Required. The name of the marker.

    ", + "RecordMarkerFailedEventAttributes$markerName": "

    The marker's name.

    " + } + }, + "MarkerRecordedEventAttributes": { + "base": "

    Provides details of the MarkerRecorded event.

    ", + "refs": { + "HistoryEvent$markerRecordedEventAttributes": "

    If the event is of type MarkerRecorded then this member is set and provides detailed information about the event. It is not set for other event types.

    " + } + }, + "Name": { + "base": null, + "refs": { + "ActivityType$name": "

    The name of this activity.

    The combination of activity type name and version must be unique within a domain.", + "ListActivityTypesInput$name": "

    If specified, only lists the activity types that have this name.

    ", + "ListWorkflowTypesInput$name": "

    If specified, lists the workflow type with this name.

    ", + "RegisterActivityTypeInput$name": "

    The name of the activity type within the domain.

    The specified string must not start or end with whitespace. It must not contain a : (colon), / (slash), | (vertical bar), or any control characters (\\u0000-\\u001f | \\u007f - \\u009f). Also, it must not contain the literal string quotarnquot.

    ", + "RegisterWorkflowTypeInput$name": "

    The name of the workflow type.

    The specified string must not start or end with whitespace. It must not contain a : (colon), / (slash), | (vertical bar), or any control characters (\\u0000-\\u001f | \\u007f - \\u009f). Also, it must not contain the literal string quotarnquot.

    ", + "TaskList$name": "

    The name of the task list.

    ", + "WorkflowType$name": "

    Required. The name of the workflow type.

    The combination of workflow type name and version must be unique with in a domain.", + "WorkflowTypeFilter$name": "

    Required. Name of the workflow type.

    " + } + }, + "OpenDecisionTasksCount": { + "base": null, + "refs": { + "WorkflowExecutionOpenCounts$openDecisionTasks": "

    The count of decision tasks whose status is OPEN. A workflow execution can have at most one open decision task.

    " + } + }, + "OperationNotPermittedFault": { + "base": "

    Returned when the caller does not have sufficient permissions to invoke the action.

    ", + "refs": { + } + }, + "PageSize": { + "base": null, + "refs": { + "GetWorkflowExecutionHistoryInput$maximumPageSize": "

    The maximum number of results that will be returned per call. nextPageToken can be used to obtain futher pages of results. The default is 1000, which is the maximum allowed page size. You can, however, specify a page size smaller than the maximum.

    This is an upper limit only; the actual number of results returned per call may be fewer than the specified maximum.

    ", + "ListActivityTypesInput$maximumPageSize": "

    The maximum number of results that will be returned per call. nextPageToken can be used to obtain futher pages of results. The default is 1000, which is the maximum allowed page size. You can, however, specify a page size smaller than the maximum.

    This is an upper limit only; the actual number of results returned per call may be fewer than the specified maximum.

    ", + "ListClosedWorkflowExecutionsInput$maximumPageSize": "

    The maximum number of results that will be returned per call. nextPageToken can be used to obtain futher pages of results. The default is 1000, which is the maximum allowed page size. You can, however, specify a page size smaller than the maximum.

    This is an upper limit only; the actual number of results returned per call may be fewer than the specified maximum.

    ", + "ListDomainsInput$maximumPageSize": "

    The maximum number of results that will be returned per call. nextPageToken can be used to obtain futher pages of results. The default is 1000, which is the maximum allowed page size. You can, however, specify a page size smaller than the maximum.

    This is an upper limit only; the actual number of results returned per call may be fewer than the specified maximum.

    ", + "ListOpenWorkflowExecutionsInput$maximumPageSize": "

    The maximum number of results that will be returned per call. nextPageToken can be used to obtain futher pages of results. The default is 1000, which is the maximum allowed page size. You can, however, specify a page size smaller than the maximum.

    This is an upper limit only; the actual number of results returned per call may be fewer than the specified maximum.

    ", + "ListWorkflowTypesInput$maximumPageSize": "

    The maximum number of results that will be returned per call. nextPageToken can be used to obtain futher pages of results. The default is 1000, which is the maximum allowed page size. You can, however, specify a page size smaller than the maximum.

    This is an upper limit only; the actual number of results returned per call may be fewer than the specified maximum.

    ", + "PollForDecisionTaskInput$maximumPageSize": "

    The maximum number of results that will be returned per call. nextPageToken can be used to obtain futher pages of results. The default is 1000, which is the maximum allowed page size. You can, however, specify a page size smaller than the maximum.

    This is an upper limit only; the actual number of results returned per call may be fewer than the specified maximum.

    " + } + }, + "PageToken": { + "base": null, + "refs": { + "ActivityTypeInfos$nextPageToken": "

    If a NextPageToken was returned by a previous call, there are more results available. To retrieve the next page of results, make the call again using the returned token in nextPageToken. Keep all other arguments unchanged.

    The configured maximumPageSize determines how many results can be returned in a single call.

    ", + "DecisionTask$nextPageToken": "

    If a NextPageToken was returned by a previous call, there are more results available. To retrieve the next page of results, make the call again using the returned token in nextPageToken. Keep all other arguments unchanged.

    The configured maximumPageSize determines how many results can be returned in a single call.

    ", + "DomainInfos$nextPageToken": "

    If a NextPageToken was returned by a previous call, there are more results available. To retrieve the next page of results, make the call again using the returned token in nextPageToken. Keep all other arguments unchanged.

    The configured maximumPageSize determines how many results can be returned in a single call.

    ", + "GetWorkflowExecutionHistoryInput$nextPageToken": "

    If a NextPageToken was returned by a previous call, there are more results available. To retrieve the next page of results, make the call again using the returned token in nextPageToken. Keep all other arguments unchanged.

    The configured maximumPageSize determines how many results can be returned in a single call.

    ", + "History$nextPageToken": "

    If a NextPageToken was returned by a previous call, there are more results available. To retrieve the next page of results, make the call again using the returned token in nextPageToken. Keep all other arguments unchanged.

    The configured maximumPageSize determines how many results can be returned in a single call.

    ", + "ListActivityTypesInput$nextPageToken": "

    If a NextPageToken was returned by a previous call, there are more results available. To retrieve the next page of results, make the call again using the returned token in nextPageToken. Keep all other arguments unchanged.

    The configured maximumPageSize determines how many results can be returned in a single call.

    ", + "ListClosedWorkflowExecutionsInput$nextPageToken": "

    If a NextPageToken was returned by a previous call, there are more results available. To retrieve the next page of results, make the call again using the returned token in nextPageToken. Keep all other arguments unchanged.

    The configured maximumPageSize determines how many results can be returned in a single call.

    ", + "ListDomainsInput$nextPageToken": "

    If a NextPageToken was returned by a previous call, there are more results available. To retrieve the next page of results, make the call again using the returned token in nextPageToken. Keep all other arguments unchanged.

    The configured maximumPageSize determines how many results can be returned in a single call.

    ", + "ListOpenWorkflowExecutionsInput$nextPageToken": "

    If a NextPageToken was returned by a previous call, there are more results available. To retrieve the next page of results, make the call again using the returned token in nextPageToken. Keep all other arguments unchanged.

    The configured maximumPageSize determines how many results can be returned in a single call.

    ", + "ListWorkflowTypesInput$nextPageToken": "

    If a NextPageToken was returned by a previous call, there are more results available. To retrieve the next page of results, make the call again using the returned token in nextPageToken. Keep all other arguments unchanged.

    The configured maximumPageSize determines how many results can be returned in a single call.

    ", + "PollForDecisionTaskInput$nextPageToken": "

    If a NextPageToken was returned by a previous call, there are more results available. To retrieve the next page of results, make the call again using the returned token in nextPageToken. Keep all other arguments unchanged.

    The configured maximumPageSize determines how many results can be returned in a single call.

    The nextPageToken returned by this action cannot be used with GetWorkflowExecutionHistory to get the next page. You must call PollForDecisionTask again (with the nextPageToken) to retrieve the next page of history records. Calling PollForDecisionTask with a nextPageToken will not return a new decision task..", + "WorkflowExecutionInfos$nextPageToken": "

    If a NextPageToken was returned by a previous call, there are more results available. To retrieve the next page of results, make the call again using the returned token in nextPageToken. Keep all other arguments unchanged.

    The configured maximumPageSize determines how many results can be returned in a single call.

    ", + "WorkflowTypeInfos$nextPageToken": "

    If a NextPageToken was returned by a previous call, there are more results available. To retrieve the next page of results, make the call again using the returned token in nextPageToken. Keep all other arguments unchanged.

    The configured maximumPageSize determines how many results can be returned in a single call.

    " + } + }, + "PendingTaskCount": { + "base": "

    Contains the count of tasks in a task list.

    ", + "refs": { + } + }, + "PollForActivityTaskInput": { + "base": null, + "refs": { + } + }, + "PollForDecisionTaskInput": { + "base": null, + "refs": { + } + }, + "RecordActivityTaskHeartbeatInput": { + "base": null, + "refs": { + } + }, + "RecordMarkerDecisionAttributes": { + "base": "

    Provides details of the RecordMarker decision.

    Access Control

    You can use IAM policies to control this decision's access to Amazon SWF resources as follows:

    • Use a Resource element with the domain name to limit the action to only specified domains.
    • Use an Action element to allow or deny permission to call this action.
    • You cannot use an IAM policy to constrain this action's parameters.

    If the caller does not have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter will be set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows.

    ", + "refs": { + "Decision$recordMarkerDecisionAttributes": "

    Provides details of the RecordMarker decision. It is not set for other decision types.

    " + } + }, + "RecordMarkerFailedCause": { + "base": null, + "refs": { + "RecordMarkerFailedEventAttributes$cause": "

    The cause of the failure. This information is generated by the system and can be useful for diagnostic purposes.

    If cause is set to OPERATION_NOT_PERMITTED, the decision failed because it lacked sufficient permissions. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows." + } + }, + "RecordMarkerFailedEventAttributes": { + "base": "

    Provides details of the RecordMarkerFailed event.

    ", + "refs": { + "HistoryEvent$recordMarkerFailedEventAttributes": "

    If the event is of type DecisionTaskFailed then this member is set and provides detailed information about the event. It is not set for other event types.

    " + } + }, + "RegisterActivityTypeInput": { + "base": null, + "refs": { + } + }, + "RegisterDomainInput": { + "base": null, + "refs": { + } + }, + "RegisterWorkflowTypeInput": { + "base": null, + "refs": { + } + }, + "RegistrationStatus": { + "base": null, + "refs": { + "ActivityTypeInfo$status": "

    The current status of the activity type.

    ", + "DomainInfo$status": "

    The status of the domain:

    • REGISTERED: The domain is properly registered and available. You can use this domain for registering types and creating new workflow executions.
    • DEPRECATED: The domain was deprecated using DeprecateDomain, but is still in use. You should not create new workflow executions in this domain.
    ", + "ListActivityTypesInput$registrationStatus": "

    Specifies the registration status of the activity types to list.

    ", + "ListDomainsInput$registrationStatus": "

    Specifies the registration status of the domains to list.

    ", + "ListWorkflowTypesInput$registrationStatus": "

    Specifies the registration status of the workflow types to list.

    ", + "WorkflowTypeInfo$status": "

    The current status of the workflow type.

    " + } + }, + "RequestCancelActivityTaskDecisionAttributes": { + "base": "

    Provides details of the RequestCancelActivityTask decision.

    Access Control

    You can use IAM policies to control this decision's access to Amazon SWF resources as follows:

    • Use a Resource element with the domain name to limit the action to only specified domains.
    • Use an Action element to allow or deny permission to call this action.
    • You cannot use an IAM policy to constrain this action's parameters.

    If the caller does not have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter will be set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows.

    ", + "refs": { + "Decision$requestCancelActivityTaskDecisionAttributes": "

    Provides details of the RequestCancelActivityTask decision. It is not set for other decision types.

    " + } + }, + "RequestCancelActivityTaskFailedCause": { + "base": null, + "refs": { + "RequestCancelActivityTaskFailedEventAttributes$cause": "

    The cause of the failure. This information is generated by the system and can be useful for diagnostic purposes.

    If cause is set to OPERATION_NOT_PERMITTED, the decision failed because it lacked sufficient permissions. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows." + } + }, + "RequestCancelActivityTaskFailedEventAttributes": { + "base": "

    Provides details of the RequestCancelActivityTaskFailed event.

    ", + "refs": { + "HistoryEvent$requestCancelActivityTaskFailedEventAttributes": "

    If the event is of type RequestCancelActivityTaskFailed then this member is set and provides detailed information about the event. It is not set for other event types.

    " + } + }, + "RequestCancelExternalWorkflowExecutionDecisionAttributes": { + "base": "

    Provides details of the RequestCancelExternalWorkflowExecution decision.

    Access Control

    You can use IAM policies to control this decision's access to Amazon SWF resources as follows:

    • Use a Resource element with the domain name to limit the action to only specified domains.
    • Use an Action element to allow or deny permission to call this action.
    • You cannot use an IAM policy to constrain this action's parameters.

    If the caller does not have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter will be set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows.

    ", + "refs": { + "Decision$requestCancelExternalWorkflowExecutionDecisionAttributes": "

    Provides details of the RequestCancelExternalWorkflowExecution decision. It is not set for other decision types.

    " + } + }, + "RequestCancelExternalWorkflowExecutionFailedCause": { + "base": null, + "refs": { + "RequestCancelExternalWorkflowExecutionFailedEventAttributes$cause": "

    The cause of the failure. This information is generated by the system and can be useful for diagnostic purposes.

    If cause is set to OPERATION_NOT_PERMITTED, the decision failed because it lacked sufficient permissions. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows." + } + }, + "RequestCancelExternalWorkflowExecutionFailedEventAttributes": { + "base": "

    Provides details of the RequestCancelExternalWorkflowExecutionFailed event.

    ", + "refs": { + "HistoryEvent$requestCancelExternalWorkflowExecutionFailedEventAttributes": "

    If the event is of type RequestCancelExternalWorkflowExecutionFailed then this member is set and provides detailed information about the event. It is not set for other event types.

    " + } + }, + "RequestCancelExternalWorkflowExecutionInitiatedEventAttributes": { + "base": "

    Provides details of the RequestCancelExternalWorkflowExecutionInitiated event.

    ", + "refs": { + "HistoryEvent$requestCancelExternalWorkflowExecutionInitiatedEventAttributes": "

    If the event is of type RequestCancelExternalWorkflowExecutionInitiated then this member is set and provides detailed information about the event. It is not set for other event types.

    " + } + }, + "RequestCancelWorkflowExecutionInput": { + "base": null, + "refs": { + } + }, + "RespondActivityTaskCanceledInput": { + "base": null, + "refs": { + } + }, + "RespondActivityTaskCompletedInput": { + "base": null, + "refs": { + } + }, + "RespondActivityTaskFailedInput": { + "base": null, + "refs": { + } + }, + "RespondDecisionTaskCompletedInput": { + "base": null, + "refs": { + } + }, + "ReverseOrder": { + "base": null, + "refs": { + "GetWorkflowExecutionHistoryInput$reverseOrder": "

    When set to true, returns the events in reverse order. By default the results are returned in ascending order of the eventTimeStamp of the events.

    ", + "ListActivityTypesInput$reverseOrder": "

    When set to true, returns the results in reverse order. By default, the results are returned in ascending alphabetical order by name of the activity types.

    ", + "ListClosedWorkflowExecutionsInput$reverseOrder": "

    When set to true, returns the results in reverse order. By default the results are returned in descending order of the start or the close time of the executions.

    ", + "ListDomainsInput$reverseOrder": "

    When set to true, returns the results in reverse order. By default, the results are returned in ascending alphabetical order by name of the domains.

    ", + "ListOpenWorkflowExecutionsInput$reverseOrder": "

    When set to true, returns the results in reverse order. By default the results are returned in descending order of the start time of the executions.

    ", + "ListWorkflowTypesInput$reverseOrder": "

    When set to true, returns the results in reverse order. By default the results are returned in ascending alphabetical order of the name of the workflow types.

    ", + "PollForDecisionTaskInput$reverseOrder": "

    When set to true, returns the events in reverse order. By default the results are returned in ascending order of the eventTimestamp of the events.

    " + } + }, + "Run": { + "base": "

    Specifies the runId of a workflow execution.

    ", + "refs": { + } + }, + "RunId": { + "base": null, + "refs": { + "Run$runId": "

    The runId of a workflow execution. This ID is generated by the service and can be used to uniquely identify the workflow execution within a domain.

    ", + "WorkflowExecution$runId": "

    A system-generated unique identifier for the workflow execution.

    ", + "WorkflowExecutionContinuedAsNewEventAttributes$newExecutionRunId": "

    The runId of the new workflow execution.

    " + } + }, + "RunIdOptional": { + "base": null, + "refs": { + "RequestCancelExternalWorkflowExecutionDecisionAttributes$runId": "

    The runId of the external workflow execution to cancel.

    ", + "RequestCancelExternalWorkflowExecutionFailedEventAttributes$runId": "

    The runId of the external workflow execution.

    ", + "RequestCancelExternalWorkflowExecutionInitiatedEventAttributes$runId": "

    The runId of the external workflow execution to be canceled.

    ", + "RequestCancelWorkflowExecutionInput$runId": "

    The runId of the workflow execution to cancel.

    ", + "SignalExternalWorkflowExecutionDecisionAttributes$runId": "

    The runId of the workflow execution to be signaled.

    ", + "SignalExternalWorkflowExecutionFailedEventAttributes$runId": "

    The runId of the external workflow execution that the signal was being delivered to.

    ", + "SignalExternalWorkflowExecutionInitiatedEventAttributes$runId": "

    The runId of the external workflow execution to send the signal to.

    ", + "SignalWorkflowExecutionInput$runId": "

    The runId of the workflow execution to signal.

    ", + "TerminateWorkflowExecutionInput$runId": "

    The runId of the workflow execution to terminate.

    ", + "WorkflowExecutionStartedEventAttributes$continuedExecutionRunId": "

    If this workflow execution was started due to a ContinueAsNewWorkflowExecution decision, then it contains the runId of the previous workflow execution that was closed and continued as this execution.

    " + } + }, + "ScheduleActivityTaskDecisionAttributes": { + "base": "

    Provides details of the ScheduleActivityTask decision.

    Access Control

    You can use IAM policies to control this decision's access to Amazon SWF resources as follows:

    • Use a Resource element with the domain name to limit the action to only specified domains.
    • Use an Action element to allow or deny permission to call this action.
    • Constrain the following parameters by using a Condition element with the appropriate keys.
      • activityType.name: String constraint. The key is swf:activityType.name.
      • activityType.version: String constraint. The key is swf:activityType.version.
      • taskList: String constraint. The key is swf:taskList.name.

    If the caller does not have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter will be set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows.

    ", + "refs": { + "Decision$scheduleActivityTaskDecisionAttributes": "

    Provides details of the ScheduleActivityTask decision. It is not set for other decision types.

    " + } + }, + "ScheduleActivityTaskFailedCause": { + "base": null, + "refs": { + "ScheduleActivityTaskFailedEventAttributes$cause": "

    The cause of the failure. This information is generated by the system and can be useful for diagnostic purposes.

    If cause is set to OPERATION_NOT_PERMITTED, the decision failed because it lacked sufficient permissions. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows." + } + }, + "ScheduleActivityTaskFailedEventAttributes": { + "base": "

    Provides details of the ScheduleActivityTaskFailed event.

    ", + "refs": { + "HistoryEvent$scheduleActivityTaskFailedEventAttributes": "

    If the event is of type ScheduleActivityTaskFailed then this member is set and provides detailed information about the event. It is not set for other event types.

    " + } + }, + "ScheduleLambdaFunctionDecisionAttributes": { + "base": "

    Provides details of the ScheduleLambdaFunction decision.

    Access Control

    You can use IAM policies to control this decision's access to Amazon SWF resources as follows:

    • Use a Resource element with the domain name to limit the action to only specified domains.
    • Use an Action element to allow or deny permission to call this action.
    • Constrain the following parameters by using a Condition element with the appropriate keys.
      • activityType.name: String constraint. The key is swf:activityType.name.
      • activityType.version: String constraint. The key is swf:activityType.version.
      • taskList: String constraint. The key is swf:taskList.name.

    If the caller does not have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter will be set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows.

    ", + "refs": { + "Decision$scheduleLambdaFunctionDecisionAttributes": null + } + }, + "ScheduleLambdaFunctionFailedCause": { + "base": null, + "refs": { + "ScheduleLambdaFunctionFailedEventAttributes$cause": "

    The cause of the failure. This information is generated by the system and can be useful for diagnostic purposes.

    If cause is set to OPERATION_NOT_PERMITTED, the decision failed because it lacked sufficient permissions. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows." + } + }, + "ScheduleLambdaFunctionFailedEventAttributes": { + "base": "

    Provides details for the ScheduleLambdaFunctionFailed event.

    ", + "refs": { + "HistoryEvent$scheduleLambdaFunctionFailedEventAttributes": null + } + }, + "SignalExternalWorkflowExecutionDecisionAttributes": { + "base": "

    Provides details of the SignalExternalWorkflowExecution decision.

    Access Control

    You can use IAM policies to control this decision's access to Amazon SWF resources as follows:

    • Use a Resource element with the domain name to limit the action to only specified domains.
    • Use an Action element to allow or deny permission to call this action.
    • You cannot use an IAM policy to constrain this action's parameters.

    If the caller does not have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter will be set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows.

    ", + "refs": { + "Decision$signalExternalWorkflowExecutionDecisionAttributes": "

    Provides details of the SignalExternalWorkflowExecution decision. It is not set for other decision types.

    " + } + }, + "SignalExternalWorkflowExecutionFailedCause": { + "base": null, + "refs": { + "SignalExternalWorkflowExecutionFailedEventAttributes$cause": "

    The cause of the failure. This information is generated by the system and can be useful for diagnostic purposes.

    If cause is set to OPERATION_NOT_PERMITTED, the decision failed because it lacked sufficient permissions. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows." + } + }, + "SignalExternalWorkflowExecutionFailedEventAttributes": { + "base": "

    Provides details of the SignalExternalWorkflowExecutionFailed event.

    ", + "refs": { + "HistoryEvent$signalExternalWorkflowExecutionFailedEventAttributes": "

    If the event is of type SignalExternalWorkflowExecutionFailed then this member is set and provides detailed information about the event. It is not set for other event types.

    " + } + }, + "SignalExternalWorkflowExecutionInitiatedEventAttributes": { + "base": "

    Provides details of the SignalExternalWorkflowExecutionInitiated event.

    ", + "refs": { + "HistoryEvent$signalExternalWorkflowExecutionInitiatedEventAttributes": "

    If the event is of type SignalExternalWorkflowExecutionInitiated then this member is set and provides detailed information about the event. It is not set for other event types.

    " + } + }, + "SignalName": { + "base": null, + "refs": { + "SignalExternalWorkflowExecutionDecisionAttributes$signalName": "

    Required. The name of the signal.The target workflow execution will use the signal name and input to process the signal.

    ", + "SignalExternalWorkflowExecutionInitiatedEventAttributes$signalName": "

    The name of the signal.

    ", + "SignalWorkflowExecutionInput$signalName": "

    The name of the signal. This name must be meaningful to the target workflow.

    ", + "WorkflowExecutionSignaledEventAttributes$signalName": "

    The name of the signal received. The decider can use the signal name and inputs to determine how to the process the signal.

    " + } + }, + "SignalWorkflowExecutionInput": { + "base": null, + "refs": { + } + }, + "StartChildWorkflowExecutionDecisionAttributes": { + "base": "

    Provides details of the StartChildWorkflowExecution decision.

    Access Control

    You can use IAM policies to control this decision's access to Amazon SWF resources as follows:

    • Use a Resource element with the domain name to limit the action to only specified domains.
    • Use an Action element to allow or deny permission to call this action.
    • Constrain the following parameters by using a Condition element with the appropriate keys.
      • tagList.member.N: The key is \"swf:tagList.N\" where N is the tag number from 0 to 4, inclusive.
      • taskList: String constraint. The key is swf:taskList.name.
      • workflowType.name: String constraint. The key is swf:workflowType.name.
      • workflowType.version: String constraint. The key is swf:workflowType.version.

    If the caller does not have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter will be set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows.

    ", + "refs": { + "Decision$startChildWorkflowExecutionDecisionAttributes": "

    Provides details of the StartChildWorkflowExecution decision. It is not set for other decision types.

    " + } + }, + "StartChildWorkflowExecutionFailedCause": { + "base": null, + "refs": { + "StartChildWorkflowExecutionFailedEventAttributes$cause": "

    The cause of the failure. This information is generated by the system and can be useful for diagnostic purposes.

    If cause is set to OPERATION_NOT_PERMITTED, the decision failed because it lacked sufficient permissions. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows." + } + }, + "StartChildWorkflowExecutionFailedEventAttributes": { + "base": "

    Provides details of the StartChildWorkflowExecutionFailed event.

    ", + "refs": { + "HistoryEvent$startChildWorkflowExecutionFailedEventAttributes": "

    If the event is of type StartChildWorkflowExecutionFailed then this member is set and provides detailed information about the event. It is not set for other event types.

    " + } + }, + "StartChildWorkflowExecutionInitiatedEventAttributes": { + "base": "

    Provides details of the StartChildWorkflowExecutionInitiated event.

    ", + "refs": { + "HistoryEvent$startChildWorkflowExecutionInitiatedEventAttributes": "

    If the event is of type StartChildWorkflowExecutionInitiated then this member is set and provides detailed information about the event. It is not set for other event types.

    " + } + }, + "StartLambdaFunctionFailedCause": { + "base": null, + "refs": { + "StartLambdaFunctionFailedEventAttributes$cause": "

    The cause of the failure. This information is generated by the system and can be useful for diagnostic purposes.

    If cause is set to OPERATION_NOT_PERMITTED, the decision failed because it lacked sufficient permissions. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows." + } + }, + "StartLambdaFunctionFailedEventAttributes": { + "base": "

    Provides details for the StartLambdaFunctionFailed event.

    ", + "refs": { + "HistoryEvent$startLambdaFunctionFailedEventAttributes": null + } + }, + "StartTimerDecisionAttributes": { + "base": "

    Provides details of the StartTimer decision.

    Access Control

    You can use IAM policies to control this decision's access to Amazon SWF resources as follows:

    • Use a Resource element with the domain name to limit the action to only specified domains.
    • Use an Action element to allow or deny permission to call this action.
    • You cannot use an IAM policy to constrain this action's parameters.

    If the caller does not have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter will be set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows.

    ", + "refs": { + "Decision$startTimerDecisionAttributes": "

    Provides details of the StartTimer decision. It is not set for other decision types.

    " + } + }, + "StartTimerFailedCause": { + "base": null, + "refs": { + "StartTimerFailedEventAttributes$cause": "

    The cause of the failure. This information is generated by the system and can be useful for diagnostic purposes.

    If cause is set to OPERATION_NOT_PERMITTED, the decision failed because it lacked sufficient permissions. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows." + } + }, + "StartTimerFailedEventAttributes": { + "base": "

    Provides details of the StartTimerFailed event.

    ", + "refs": { + "HistoryEvent$startTimerFailedEventAttributes": "

    If the event is of type StartTimerFailed then this member is set and provides detailed information about the event. It is not set for other event types.

    " + } + }, + "StartWorkflowExecutionInput": { + "base": null, + "refs": { + } + }, + "Tag": { + "base": null, + "refs": { + "TagFilter$tag": "

    Required. Specifies the tag that must be associated with the execution for it to meet the filter criteria.

    ", + "TagList$member": null + } + }, + "TagFilter": { + "base": "

    Used to filter the workflow executions in visibility APIs based on a tag.

    ", + "refs": { + "CountClosedWorkflowExecutionsInput$tagFilter": "

    If specified, only executions that have a tag that matches the filter are counted.

    closeStatusFilter, executionFilter, typeFilter and tagFilter are mutually exclusive. You can specify at most one of these in a request.", + "CountOpenWorkflowExecutionsInput$tagFilter": "

    If specified, only executions that have a tag that matches the filter are counted.

    executionFilter, typeFilter and tagFilter are mutually exclusive. You can specify at most one of these in a request.", + "ListClosedWorkflowExecutionsInput$tagFilter": "

    If specified, only executions that have the matching tag are listed.

    closeStatusFilter, executionFilter, typeFilter and tagFilter are mutually exclusive. You can specify at most one of these in a request.", + "ListOpenWorkflowExecutionsInput$tagFilter": "

    If specified, only executions that have the matching tag are listed.

    executionFilter, typeFilter and tagFilter are mutually exclusive. You can specify at most one of these in a request." + } + }, + "TagList": { + "base": null, + "refs": { + "ContinueAsNewWorkflowExecutionDecisionAttributes$tagList": "

    The list of tags to associate with the new workflow execution. A maximum of 5 tags can be specified. You can list workflow executions with a specific tag by calling ListOpenWorkflowExecutions or ListClosedWorkflowExecutions and specifying a TagFilter.

    ", + "StartChildWorkflowExecutionDecisionAttributes$tagList": "

    The list of tags to associate with the child workflow execution. A maximum of 5 tags can be specified. You can list workflow executions with a specific tag by calling ListOpenWorkflowExecutions or ListClosedWorkflowExecutions and specifying a TagFilter.

    ", + "StartChildWorkflowExecutionInitiatedEventAttributes$tagList": "

    The list of tags to associated with the child workflow execution.

    ", + "StartWorkflowExecutionInput$tagList": "

    The list of tags to associate with the workflow execution. You can specify a maximum of 5 tags. You can list workflow executions with a specific tag by calling ListOpenWorkflowExecutions or ListClosedWorkflowExecutions and specifying a TagFilter.

    ", + "WorkflowExecutionContinuedAsNewEventAttributes$tagList": "

    The list of tags associated with the new workflow execution.

    ", + "WorkflowExecutionInfo$tagList": "

    The list of tags associated with the workflow execution. Tags can be used to identify and list workflow executions of interest through the visibility APIs. A workflow execution can have a maximum of 5 tags.

    ", + "WorkflowExecutionStartedEventAttributes$tagList": "

    The list of tags associated with this workflow execution. An execution can have up to 5 tags.

    " + } + }, + "TaskList": { + "base": "

    Represents a task list.

    ", + "refs": { + "ActivityTaskScheduledEventAttributes$taskList": "

    The task list in which the activity task has been scheduled.

    ", + "ActivityTypeConfiguration$defaultTaskList": "

    Optional. The default task list specified for this activity type at registration. This default is used if a task list is not provided when a task is scheduled through the ScheduleActivityTask decision. You can override the default registered task list when scheduling a task through the ScheduleActivityTask decision.

    ", + "ContinueAsNewWorkflowExecutionDecisionAttributes$taskList": null, + "CountPendingActivityTasksInput$taskList": "

    The name of the task list.

    ", + "CountPendingDecisionTasksInput$taskList": "

    The name of the task list.

    ", + "DecisionTaskScheduledEventAttributes$taskList": "

    The name of the task list in which the decision task was scheduled.

    ", + "PollForActivityTaskInput$taskList": "

    Specifies the task list to poll for activity tasks.

    The specified string must not start or end with whitespace. It must not contain a : (colon), / (slash), | (vertical bar), or any control characters (\\u0000-\\u001f | \\u007f - \\u009f). Also, it must not contain the literal string quotarnquot.

    ", + "PollForDecisionTaskInput$taskList": "

    Specifies the task list to poll for decision tasks.

    The specified string must not start or end with whitespace. It must not contain a : (colon), / (slash), | (vertical bar), or any control characters (\\u0000-\\u001f | \\u007f - \\u009f). Also, it must not contain the literal string quotarnquot.

    ", + "RegisterActivityTypeInput$defaultTaskList": "

    If set, specifies the default task list to use for scheduling tasks of this activity type. This default task list is used if a task list is not provided when a task is scheduled through the ScheduleActivityTask decision.

    ", + "RegisterWorkflowTypeInput$defaultTaskList": "

    If set, specifies the default task list to use for scheduling decision tasks for executions of this workflow type. This default is used only if a task list is not provided when starting the execution through the StartWorkflowExecution action or StartChildWorkflowExecution decision.

    ", + "ScheduleActivityTaskDecisionAttributes$taskList": "

    If set, specifies the name of the task list in which to schedule the activity task. If not specified, the defaultTaskList registered with the activity type will be used.

    A task list for this activity task must be specified either as a default for the activity type or through this field. If neither this field is set nor a default task list was specified at registration time then a fault will be returned.

    The specified string must not start or end with whitespace. It must not contain a : (colon), / (slash), | (vertical bar), or any control characters (\\u0000-\\u001f | \\u007f - \\u009f). Also, it must not contain the literal string quotarnquot.

    ", + "StartChildWorkflowExecutionDecisionAttributes$taskList": "

    The name of the task list to be used for decision tasks of the child workflow execution.

    A task list for this workflow execution must be specified either as a default for the workflow type or through this parameter. If neither this parameter is set nor a default task list was specified at registration time then a fault will be returned.

    The specified string must not start or end with whitespace. It must not contain a : (colon), / (slash), | (vertical bar), or any control characters (\\u0000-\\u001f | \\u007f - \\u009f). Also, it must not contain the literal string quotarnquot.

    ", + "StartChildWorkflowExecutionInitiatedEventAttributes$taskList": "

    The name of the task list used for the decision tasks of the child workflow execution.

    ", + "StartWorkflowExecutionInput$taskList": "

    The task list to use for the decision tasks generated for this workflow execution. This overrides the defaultTaskList specified when registering the workflow type.

    A task list for this workflow execution must be specified either as a default for the workflow type or through this parameter. If neither this parameter is set nor a default task list was specified at registration time then a fault will be returned.

    The specified string must not start or end with whitespace. It must not contain a : (colon), / (slash), | (vertical bar), or any control characters (\\u0000-\\u001f | \\u007f - \\u009f). Also, it must not contain the literal string quotarnquot.

    ", + "WorkflowExecutionConfiguration$taskList": "

    The task list used for the decision tasks generated for this workflow execution.

    ", + "WorkflowExecutionContinuedAsNewEventAttributes$taskList": null, + "WorkflowExecutionStartedEventAttributes$taskList": "

    The name of the task list for scheduling the decision tasks for this workflow execution.

    ", + "WorkflowTypeConfiguration$defaultTaskList": "

    Optional. The default task list, specified when registering the workflow type, for decisions tasks scheduled for workflow executions of this type. This default can be overridden when starting a workflow execution using the StartWorkflowExecution action or the StartChildWorkflowExecution decision.

    " + } + }, + "TaskPriority": { + "base": null, + "refs": { + "ActivityTaskScheduledEventAttributes$taskPriority": "

    Optional. The priority to assign to the scheduled activity task. If set, this will override any default priority value that was assigned when the activity type was registered.

    Valid values are integers that range from Java's Integer.MIN_VALUE (-2147483648) to Integer.MAX_VALUE (2147483647). Higher numbers indicate higher priority.

    For more information about setting task priority, see Setting Task Priority in the Amazon Simple Workflow Developer Guide.

    ", + "ActivityTypeConfiguration$defaultTaskPriority": "

    Optional. The default task priority for tasks of this activity type, specified at registration. If not set, then \"0\" will be used as the default priority. This default can be overridden when scheduling an activity task.

    Valid values are integers that range from Java's Integer.MIN_VALUE (-2147483648) to Integer.MAX_VALUE (2147483647). Higher numbers indicate higher priority.

    For more information about setting task priority, see Setting Task Priority in the Amazon Simple Workflow Developer Guide.

    ", + "ContinueAsNewWorkflowExecutionDecisionAttributes$taskPriority": "

    Optional. The task priority that, if set, specifies the priority for the decision tasks for this workflow execution. This overrides the defaultTaskPriority specified when registering the workflow type. Valid values are integers that range from Java's Integer.MIN_VALUE (-2147483648) to Integer.MAX_VALUE (2147483647). Higher numbers indicate higher priority.

    For more information about setting task priority, see Setting Task Priority in the Amazon Simple Workflow Developer Guide.

    ", + "DecisionTaskScheduledEventAttributes$taskPriority": "

    Optional. A task priority that, if set, specifies the priority for this decision task. Valid values are integers that range from Java's Integer.MIN_VALUE (-2147483648) to Integer.MAX_VALUE (2147483647). Higher numbers indicate higher priority.

    For more information about setting task priority, see Setting Task Priority in the Amazon Simple Workflow Developer Guide.

    ", + "RegisterActivityTypeInput$defaultTaskPriority": "

    The default task priority to assign to the activity type. If not assigned, then \"0\" will be used. Valid values are integers that range from Java's Integer.MIN_VALUE (-2147483648) to Integer.MAX_VALUE (2147483647). Higher numbers indicate higher priority.

    For more information about setting task priority, see Setting Task Priority in the Amazon Simple Workflow Developer Guide.

    ", + "RegisterWorkflowTypeInput$defaultTaskPriority": "

    The default task priority to assign to the workflow type. If not assigned, then \"0\" will be used. Valid values are integers that range from Java's Integer.MIN_VALUE (-2147483648) to Integer.MAX_VALUE (2147483647). Higher numbers indicate higher priority.

    For more information about setting task priority, see Setting Task Priority in the Amazon Simple Workflow Developer Guide.

    ", + "ScheduleActivityTaskDecisionAttributes$taskPriority": "

    Optional. If set, specifies the priority with which the activity task is to be assigned to a worker. This overrides the defaultTaskPriority specified when registering the activity type using RegisterActivityType. Valid values are integers that range from Java's Integer.MIN_VALUE (-2147483648) to Integer.MAX_VALUE (2147483647). Higher numbers indicate higher priority.

    For more information about setting task priority, see Setting Task Priority in the Amazon Simple Workflow Developer Guide.

    ", + "StartChildWorkflowExecutionDecisionAttributes$taskPriority": "

    Optional. A task priority that, if set, specifies the priority for a decision task of this workflow execution. This overrides the defaultTaskPriority specified when registering the workflow type. Valid values are integers that range from Java's Integer.MIN_VALUE (-2147483648) to Integer.MAX_VALUE (2147483647). Higher numbers indicate higher priority.

    For more information about setting task priority, see Setting Task Priority in the Amazon Simple Workflow Developer Guide.

    ", + "StartChildWorkflowExecutionInitiatedEventAttributes$taskPriority": "

    Optional. The priority assigned for the decision tasks for this workflow execution. Valid values are integers that range from Java's Integer.MIN_VALUE (-2147483648) to Integer.MAX_VALUE (2147483647). Higher numbers indicate higher priority.

    For more information about setting task priority, see Setting Task Priority in the Amazon Simple Workflow Developer Guide.

    ", + "StartWorkflowExecutionInput$taskPriority": "

    The task priority to use for this workflow execution. This will override any default priority that was assigned when the workflow type was registered. If not set, then the default task priority for the workflow type will be used. Valid values are integers that range from Java's Integer.MIN_VALUE (-2147483648) to Integer.MAX_VALUE (2147483647). Higher numbers indicate higher priority.

    For more information about setting task priority, see Setting Task Priority in the Amazon Simple Workflow Developer Guide.

    ", + "WorkflowExecutionConfiguration$taskPriority": "

    The priority assigned to decision tasks for this workflow execution. Valid values are integers that range from Java's Integer.MIN_VALUE (-2147483648) to Integer.MAX_VALUE (2147483647). Higher numbers indicate higher priority.

    For more information about setting task priority, see Setting Task Priority in the Amazon Simple Workflow Developer Guide.

    ", + "WorkflowExecutionContinuedAsNewEventAttributes$taskPriority": null, + "WorkflowExecutionStartedEventAttributes$taskPriority": null, + "WorkflowTypeConfiguration$defaultTaskPriority": "

    Optional. The default task priority, specified when registering the workflow type, for all decision tasks of this workflow type. This default can be overridden when starting a workflow execution using the StartWorkflowExecution action or the StartChildWorkflowExecution decision.

    Valid values are integers that range from Java's Integer.MIN_VALUE (-2147483648) to Integer.MAX_VALUE (2147483647). Higher numbers indicate higher priority.

    For more information about setting task priority, see Setting Task Priority in the Amazon Simple Workflow Developer Guide.

    " + } + }, + "TaskToken": { + "base": null, + "refs": { + "ActivityTask$taskToken": "

    The opaque string used as a handle on the task. This token is used by workers to communicate progress and response information back to the system about the task.

    ", + "DecisionTask$taskToken": "

    The opaque string used as a handle on the task. This token is used by workers to communicate progress and response information back to the system about the task.

    ", + "RecordActivityTaskHeartbeatInput$taskToken": "

    The taskToken of the ActivityTask.

    taskToken is generated by the service and should be treated as an opaque value. If the task is passed to another process, its taskToken must also be passed. This enables it to provide its progress and respond with results. ", + "RespondActivityTaskCanceledInput$taskToken": "

    The taskToken of the ActivityTask.

    taskToken is generated by the service and should be treated as an opaque value. If the task is passed to another process, its taskToken must also be passed. This enables it to provide its progress and respond with results.", + "RespondActivityTaskCompletedInput$taskToken": "

    The taskToken of the ActivityTask.

    taskToken is generated by the service and should be treated as an opaque value. If the task is passed to another process, its taskToken must also be passed. This enables it to provide its progress and respond with results.", + "RespondActivityTaskFailedInput$taskToken": "

    The taskToken of the ActivityTask.

    taskToken is generated by the service and should be treated as an opaque value. If the task is passed to another process, its taskToken must also be passed. This enables it to provide its progress and respond with results.", + "RespondDecisionTaskCompletedInput$taskToken": "

    The taskToken from the DecisionTask.

    taskToken is generated by the service and should be treated as an opaque value. If the task is passed to another process, its taskToken must also be passed. This enables it to provide its progress and respond with results." + } + }, + "TerminateReason": { + "base": null, + "refs": { + "TerminateWorkflowExecutionInput$reason": "

    Optional. A descriptive reason for terminating the workflow execution.

    ", + "WorkflowExecutionTerminatedEventAttributes$reason": "

    The reason provided for the termination (if any).

    " + } + }, + "TerminateWorkflowExecutionInput": { + "base": null, + "refs": { + } + }, + "TimerCanceledEventAttributes": { + "base": "

    Provides details of the TimerCanceled event.

    ", + "refs": { + "HistoryEvent$timerCanceledEventAttributes": "

    If the event is of type TimerCanceled then this member is set and provides detailed information about the event. It is not set for other event types.

    " + } + }, + "TimerFiredEventAttributes": { + "base": "

    Provides details of the TimerFired event.

    ", + "refs": { + "HistoryEvent$timerFiredEventAttributes": "

    If the event is of type TimerFired then this member is set and provides detailed information about the event. It is not set for other event types.

    " + } + }, + "TimerId": { + "base": null, + "refs": { + "CancelTimerDecisionAttributes$timerId": "

    Required. The unique ID of the timer to cancel.

    ", + "CancelTimerFailedEventAttributes$timerId": "

    The timerId provided in the CancelTimer decision that failed.

    ", + "StartTimerDecisionAttributes$timerId": "

    Required. The unique ID of the timer.

    The specified string must not start or end with whitespace. It must not contain a : (colon), / (slash), | (vertical bar), or any control characters (\\u0000-\\u001f | \\u007f - \\u009f). Also, it must not contain the literal string quotarnquot.

    ", + "StartTimerFailedEventAttributes$timerId": "

    The timerId provided in the StartTimer decision that failed.

    ", + "TimerCanceledEventAttributes$timerId": "

    The unique ID of the timer that was canceled.

    ", + "TimerFiredEventAttributes$timerId": "

    The unique ID of the timer that fired.

    ", + "TimerStartedEventAttributes$timerId": "

    The unique ID of the timer that was started.

    " + } + }, + "TimerStartedEventAttributes": { + "base": "

    Provides details of the TimerStarted event.

    ", + "refs": { + "HistoryEvent$timerStartedEventAttributes": "

    If the event is of type TimerStarted then this member is set and provides detailed information about the event. It is not set for other event types.

    " + } + }, + "Timestamp": { + "base": null, + "refs": { + "ActivityTypeInfo$creationDate": "

    The date and time this activity type was created through RegisterActivityType.

    ", + "ActivityTypeInfo$deprecationDate": "

    If DEPRECATED, the date and time DeprecateActivityType was called.

    ", + "ExecutionTimeFilter$oldestDate": "

    Specifies the oldest start or close date and time to return.

    ", + "ExecutionTimeFilter$latestDate": "

    Specifies the latest start or close date and time to return.

    ", + "HistoryEvent$eventTimestamp": "

    The date and time when the event occurred.

    ", + "WorkflowExecutionDetail$latestActivityTaskTimestamp": "

    The time when the last activity task was scheduled for this workflow execution. You can use this information to determine if the workflow has not made progress for an unusually long period of time and might require a corrective action.

    ", + "WorkflowExecutionInfo$startTimestamp": "

    The time when the execution was started.

    ", + "WorkflowExecutionInfo$closeTimestamp": "

    The time when the workflow execution was closed. Set only if the execution status is CLOSED.

    ", + "WorkflowTypeInfo$creationDate": "

    The date when this type was registered.

    ", + "WorkflowTypeInfo$deprecationDate": "

    If the type is in deprecated state, then it is set to the date when the type was deprecated.

    " + } + }, + "Truncated": { + "base": null, + "refs": { + "PendingTaskCount$truncated": "

    If set to true, indicates that the actual count was more than the maximum supported by this API and the count returned is the truncated value.

    ", + "WorkflowExecutionCount$truncated": "

    If set to true, indicates that the actual count was more than the maximum supported by this API and the count returned is the truncated value.

    " + } + }, + "TypeAlreadyExistsFault": { + "base": "

    Returned if the type already exists in the specified domain. You will get this fault even if the existing type is in deprecated status. You can specify another version if the intent is to create a new distinct version of the type.

    ", + "refs": { + } + }, + "TypeDeprecatedFault": { + "base": "

    Returned when the specified activity or workflow type was already deprecated.

    ", + "refs": { + } + }, + "UnknownResourceFault": { + "base": "

    Returned when the named resource cannot be found with in the scope of this operation (region or domain). This could happen if the named resource was never created or is no longer available for this operation.

    ", + "refs": { + } + }, + "Version": { + "base": null, + "refs": { + "ActivityType$version": "

    The version of this activity.

    The combination of activity type name and version must be unique with in a domain.", + "ContinueAsNewWorkflowExecutionDecisionAttributes$workflowTypeVersion": null, + "RegisterActivityTypeInput$version": "

    The version of the activity type.

    The activity type consists of the name and version, the combination of which must be unique within the domain.

    The specified string must not start or end with whitespace. It must not contain a : (colon), / (slash), | (vertical bar), or any control characters (\\u0000-\\u001f | \\u007f - \\u009f). Also, it must not contain the literal string quotarnquot.

    ", + "RegisterWorkflowTypeInput$version": "

    The version of the workflow type.

    The workflow type consists of the name and version, the combination of which must be unique within the domain. To get a list of all currently registered workflow types, use the ListWorkflowTypes action.

    The specified string must not start or end with whitespace. It must not contain a : (colon), / (slash), | (vertical bar), or any control characters (\\u0000-\\u001f | \\u007f - \\u009f). Also, it must not contain the literal string quotarnquot.

    ", + "WorkflowType$version": "

    Required. The version of the workflow type.

    The combination of workflow type name and version must be unique with in a domain." + } + }, + "VersionOptional": { + "base": null, + "refs": { + "WorkflowTypeFilter$version": "

    Version of the workflow type.

    " + } + }, + "WorkflowExecution": { + "base": "

    Represents a workflow execution.

    ", + "refs": { + "ActivityTask$workflowExecution": "

    The workflow execution that started this activity task.

    ", + "ChildWorkflowExecutionCanceledEventAttributes$workflowExecution": "

    The child workflow execution that was canceled.

    ", + "ChildWorkflowExecutionCompletedEventAttributes$workflowExecution": "

    The child workflow execution that was completed.

    ", + "ChildWorkflowExecutionFailedEventAttributes$workflowExecution": "

    The child workflow execution that failed.

    ", + "ChildWorkflowExecutionStartedEventAttributes$workflowExecution": "

    The child workflow execution that was started.

    ", + "ChildWorkflowExecutionTerminatedEventAttributes$workflowExecution": "

    The child workflow execution that was terminated.

    ", + "ChildWorkflowExecutionTimedOutEventAttributes$workflowExecution": "

    The child workflow execution that timed out.

    ", + "DecisionTask$workflowExecution": "

    The workflow execution for which this decision task was created.

    ", + "DescribeWorkflowExecutionInput$execution": "

    The workflow execution to describe.

    ", + "ExternalWorkflowExecutionCancelRequestedEventAttributes$workflowExecution": "

    The external workflow execution to which the cancellation request was delivered.

    ", + "ExternalWorkflowExecutionSignaledEventAttributes$workflowExecution": "

    The external workflow execution that the signal was delivered to.

    ", + "GetWorkflowExecutionHistoryInput$execution": "

    Specifies the workflow execution for which to return the history.

    ", + "WorkflowExecutionCancelRequestedEventAttributes$externalWorkflowExecution": "

    The external workflow execution for which the cancellation was requested.

    ", + "WorkflowExecutionInfo$execution": "

    The workflow execution this information is about.

    ", + "WorkflowExecutionInfo$parent": "

    If this workflow execution is a child of another execution then contains the workflow execution that started this execution.

    ", + "WorkflowExecutionSignaledEventAttributes$externalWorkflowExecution": "

    The workflow execution that sent the signal. This is set only of the signal was sent by another workflow execution.

    ", + "WorkflowExecutionStartedEventAttributes$parentWorkflowExecution": "

    The source workflow execution that started this workflow execution. The member is not set if the workflow execution was not started by a workflow.

    " + } + }, + "WorkflowExecutionAlreadyStartedFault": { + "base": "

    Returned by StartWorkflowExecution when an open execution with the same workflowId is already running in the specified domain.

    ", + "refs": { + } + }, + "WorkflowExecutionCancelRequestedCause": { + "base": null, + "refs": { + "WorkflowExecutionCancelRequestedEventAttributes$cause": "

    If set, indicates that the request to cancel the workflow execution was automatically generated, and specifies the cause. This happens if the parent workflow execution times out or is terminated, and the child policy is set to cancel child executions.

    " + } + }, + "WorkflowExecutionCancelRequestedEventAttributes": { + "base": "

    Provides details of the WorkflowExecutionCancelRequested event.

    ", + "refs": { + "HistoryEvent$workflowExecutionCancelRequestedEventAttributes": "

    If the event is of type WorkflowExecutionCancelRequested then this member is set and provides detailed information about the event. It is not set for other event types.

    " + } + }, + "WorkflowExecutionCanceledEventAttributes": { + "base": "

    Provides details of the WorkflowExecutionCanceled event.

    ", + "refs": { + "HistoryEvent$workflowExecutionCanceledEventAttributes": "

    If the event is of type WorkflowExecutionCanceled then this member is set and provides detailed information about the event. It is not set for other event types.

    " + } + }, + "WorkflowExecutionCompletedEventAttributes": { + "base": "

    Provides details of the WorkflowExecutionCompleted event.

    ", + "refs": { + "HistoryEvent$workflowExecutionCompletedEventAttributes": "

    If the event is of type WorkflowExecutionCompleted then this member is set and provides detailed information about the event. It is not set for other event types.

    " + } + }, + "WorkflowExecutionConfiguration": { + "base": "

    The configuration settings for a workflow execution including timeout values, tasklist etc. These configuration settings are determined from the defaults specified when registering the workflow type and those specified when starting the workflow execution.

    ", + "refs": { + "WorkflowExecutionDetail$executionConfiguration": "

    The configuration settings for this workflow execution including timeout values, tasklist etc.

    " + } + }, + "WorkflowExecutionContinuedAsNewEventAttributes": { + "base": "

    Provides details of the WorkflowExecutionContinuedAsNew event.

    ", + "refs": { + "HistoryEvent$workflowExecutionContinuedAsNewEventAttributes": "

    If the event is of type WorkflowExecutionContinuedAsNew then this member is set and provides detailed information about the event. It is not set for other event types.

    " + } + }, + "WorkflowExecutionCount": { + "base": "

    Contains the count of workflow executions returned from CountOpenWorkflowExecutions or CountClosedWorkflowExecutions

    ", + "refs": { + } + }, + "WorkflowExecutionDetail": { + "base": "

    Contains details about a workflow execution.

    ", + "refs": { + } + }, + "WorkflowExecutionFailedEventAttributes": { + "base": "

    Provides details of the WorkflowExecutionFailed event.

    ", + "refs": { + "HistoryEvent$workflowExecutionFailedEventAttributes": "

    If the event is of type WorkflowExecutionFailed then this member is set and provides detailed information about the event. It is not set for other event types.

    " + } + }, + "WorkflowExecutionFilter": { + "base": "

    Used to filter the workflow executions in visibility APIs by their workflowId.

    ", + "refs": { + "CountClosedWorkflowExecutionsInput$executionFilter": "

    If specified, only workflow executions matching the WorkflowId in the filter are counted.

    closeStatusFilter, executionFilter, typeFilter and tagFilter are mutually exclusive. You can specify at most one of these in a request.", + "CountOpenWorkflowExecutionsInput$executionFilter": "

    If specified, only workflow executions matching the WorkflowId in the filter are counted.

    executionFilter, typeFilter and tagFilter are mutually exclusive. You can specify at most one of these in a request.", + "ListClosedWorkflowExecutionsInput$executionFilter": "

    If specified, only workflow executions matching the workflow ID specified in the filter are returned.

    closeStatusFilter, executionFilter, typeFilter and tagFilter are mutually exclusive. You can specify at most one of these in a request.", + "ListOpenWorkflowExecutionsInput$executionFilter": "

    If specified, only workflow executions matching the workflow ID specified in the filter are returned.

    executionFilter, typeFilter and tagFilter are mutually exclusive. You can specify at most one of these in a request." + } + }, + "WorkflowExecutionInfo": { + "base": "

    Contains information about a workflow execution.

    ", + "refs": { + "WorkflowExecutionDetail$executionInfo": "

    Information about the workflow execution.

    ", + "WorkflowExecutionInfoList$member": null + } + }, + "WorkflowExecutionInfoList": { + "base": null, + "refs": { + "WorkflowExecutionInfos$executionInfos": "

    The list of workflow information structures.

    " + } + }, + "WorkflowExecutionInfos": { + "base": "

    Contains a paginated list of information about workflow executions.

    ", + "refs": { + } + }, + "WorkflowExecutionOpenCounts": { + "base": "

    Contains the counts of open tasks, child workflow executions and timers for a workflow execution.

    ", + "refs": { + "WorkflowExecutionDetail$openCounts": "

    The number of tasks for this workflow execution. This includes open and closed tasks of all types.

    " + } + }, + "WorkflowExecutionSignaledEventAttributes": { + "base": "

    Provides details of the WorkflowExecutionSignaled event.

    ", + "refs": { + "HistoryEvent$workflowExecutionSignaledEventAttributes": "

    If the event is of type WorkflowExecutionSignaled then this member is set and provides detailed information about the event. It is not set for other event types.

    " + } + }, + "WorkflowExecutionStartedEventAttributes": { + "base": "

    Provides details of WorkflowExecutionStarted event.

    ", + "refs": { + "HistoryEvent$workflowExecutionStartedEventAttributes": "

    If the event is of type WorkflowExecutionStarted then this member is set and provides detailed information about the event. It is not set for other event types.

    " + } + }, + "WorkflowExecutionTerminatedCause": { + "base": null, + "refs": { + "WorkflowExecutionTerminatedEventAttributes$cause": "

    If set, indicates that the workflow execution was automatically terminated, and specifies the cause. This happens if the parent workflow execution times out or is terminated and the child policy is set to terminate child executions.

    " + } + }, + "WorkflowExecutionTerminatedEventAttributes": { + "base": "

    Provides details of the WorkflowExecutionTerminated event.

    ", + "refs": { + "HistoryEvent$workflowExecutionTerminatedEventAttributes": "

    If the event is of type WorkflowExecutionTerminated then this member is set and provides detailed information about the event. It is not set for other event types.

    " + } + }, + "WorkflowExecutionTimedOutEventAttributes": { + "base": "

    Provides details of the WorkflowExecutionTimedOut event.

    ", + "refs": { + "HistoryEvent$workflowExecutionTimedOutEventAttributes": "

    If the event is of type WorkflowExecutionTimedOut then this member is set and provides detailed information about the event. It is not set for other event types.

    " + } + }, + "WorkflowExecutionTimeoutType": { + "base": null, + "refs": { + "ChildWorkflowExecutionTimedOutEventAttributes$timeoutType": "

    The type of the timeout that caused the child workflow execution to time out.

    ", + "WorkflowExecutionTimedOutEventAttributes$timeoutType": "

    The type of timeout that caused this event.

    " + } + }, + "WorkflowId": { + "base": null, + "refs": { + "RequestCancelExternalWorkflowExecutionDecisionAttributes$workflowId": "

    Required. The workflowId of the external workflow execution to cancel.

    ", + "RequestCancelExternalWorkflowExecutionFailedEventAttributes$workflowId": "

    The workflowId of the external workflow to which the cancel request was to be delivered.

    ", + "RequestCancelExternalWorkflowExecutionInitiatedEventAttributes$workflowId": "

    The workflowId of the external workflow execution to be canceled.

    ", + "RequestCancelWorkflowExecutionInput$workflowId": "

    The workflowId of the workflow execution to cancel.

    ", + "SignalExternalWorkflowExecutionDecisionAttributes$workflowId": "

    Required. The workflowId of the workflow execution to be signaled.

    ", + "SignalExternalWorkflowExecutionFailedEventAttributes$workflowId": "

    The workflowId of the external workflow execution that the signal was being delivered to.

    ", + "SignalExternalWorkflowExecutionInitiatedEventAttributes$workflowId": "

    The workflowId of the external workflow execution.

    ", + "SignalWorkflowExecutionInput$workflowId": "

    The workflowId of the workflow execution to signal.

    ", + "StartChildWorkflowExecutionDecisionAttributes$workflowId": "

    Required. The workflowId of the workflow execution.

    The specified string must not start or end with whitespace. It must not contain a : (colon), / (slash), | (vertical bar), or any control characters (\\u0000-\\u001f | \\u007f - \\u009f). Also, it must not contain the literal string quotarnquot.

    ", + "StartChildWorkflowExecutionFailedEventAttributes$workflowId": "

    The workflowId of the child workflow execution.

    ", + "StartChildWorkflowExecutionInitiatedEventAttributes$workflowId": "

    The workflowId of the child workflow execution.

    ", + "StartWorkflowExecutionInput$workflowId": "

    The user defined identifier associated with the workflow execution. You can use this to associate a custom identifier with the workflow execution. You may specify the same identifier if a workflow execution is logically a restart of a previous execution. You cannot have two open workflow executions with the same workflowId at the same time.

    The specified string must not start or end with whitespace. It must not contain a : (colon), / (slash), | (vertical bar), or any control characters (\\u0000-\\u001f | \\u007f - \\u009f). Also, it must not contain the literal string quotarnquot.

    ", + "TerminateWorkflowExecutionInput$workflowId": "

    The workflowId of the workflow execution to terminate.

    ", + "WorkflowExecution$workflowId": "

    The user defined identifier associated with the workflow execution.

    ", + "WorkflowExecutionFilter$workflowId": "

    The workflowId to pass of match the criteria of this filter.

    " + } + }, + "WorkflowType": { + "base": "

    Represents a workflow type.

    ", + "refs": { + "ChildWorkflowExecutionCanceledEventAttributes$workflowType": "

    The type of the child workflow execution.

    ", + "ChildWorkflowExecutionCompletedEventAttributes$workflowType": "

    The type of the child workflow execution.

    ", + "ChildWorkflowExecutionFailedEventAttributes$workflowType": "

    The type of the child workflow execution.

    ", + "ChildWorkflowExecutionStartedEventAttributes$workflowType": "

    The type of the child workflow execution.

    ", + "ChildWorkflowExecutionTerminatedEventAttributes$workflowType": "

    The type of the child workflow execution.

    ", + "ChildWorkflowExecutionTimedOutEventAttributes$workflowType": "

    The type of the child workflow execution.

    ", + "DecisionTask$workflowType": "

    The type of the workflow execution for which this decision task was created.

    ", + "DeprecateWorkflowTypeInput$workflowType": "

    The workflow type to deprecate.

    ", + "DescribeWorkflowTypeInput$workflowType": "

    The workflow type to describe.

    ", + "StartChildWorkflowExecutionDecisionAttributes$workflowType": "

    Required. The type of the workflow execution to be started.

    ", + "StartChildWorkflowExecutionFailedEventAttributes$workflowType": "

    The workflow type provided in the StartChildWorkflowExecution decision that failed.

    ", + "StartChildWorkflowExecutionInitiatedEventAttributes$workflowType": "

    The type of the child workflow execution.

    ", + "StartWorkflowExecutionInput$workflowType": "

    The type of the workflow to start.

    ", + "WorkflowExecutionContinuedAsNewEventAttributes$workflowType": null, + "WorkflowExecutionInfo$workflowType": "

    The type of the workflow execution.

    ", + "WorkflowExecutionStartedEventAttributes$workflowType": "

    The workflow type of this execution.

    ", + "WorkflowTypeInfo$workflowType": "

    The workflow type this information is about.

    " + } + }, + "WorkflowTypeConfiguration": { + "base": "

    The configuration settings of a workflow type.

    ", + "refs": { + "WorkflowTypeDetail$configuration": "

    Configuration settings of the workflow type registered through RegisterWorkflowType

    " + } + }, + "WorkflowTypeDetail": { + "base": "

    Contains details about a workflow type.

    ", + "refs": { + } + }, + "WorkflowTypeFilter": { + "base": "

    Used to filter workflow execution query results by type. Each parameter, if specified, defines a rule that must be satisfied by each returned result.

    ", + "refs": { + "CountClosedWorkflowExecutionsInput$typeFilter": "

    If specified, indicates the type of the workflow executions to be counted.

    closeStatusFilter, executionFilter, typeFilter and tagFilter are mutually exclusive. You can specify at most one of these in a request.", + "CountOpenWorkflowExecutionsInput$typeFilter": "

    Specifies the type of the workflow executions to be counted.

    executionFilter, typeFilter and tagFilter are mutually exclusive. You can specify at most one of these in a request.", + "ListClosedWorkflowExecutionsInput$typeFilter": "

    If specified, only executions of the type specified in the filter are returned.

    closeStatusFilter, executionFilter, typeFilter and tagFilter are mutually exclusive. You can specify at most one of these in a request.", + "ListOpenWorkflowExecutionsInput$typeFilter": "

    If specified, only executions of the type specified in the filter are returned.

    executionFilter, typeFilter and tagFilter are mutually exclusive. You can specify at most one of these in a request." + } + }, + "WorkflowTypeInfo": { + "base": "

    Contains information about a workflow type.

    ", + "refs": { + "WorkflowTypeDetail$typeInfo": "

    General information about the workflow type.

    The status of the workflow type (returned in the WorkflowTypeInfo structure) can be one of the following.

    • REGISTERED: The type is registered and available. Workers supporting this type should be running.
    • DEPRECATED: The type was deprecated using DeprecateWorkflowType, but is still in use. You should keep workers supporting this type running. You cannot create new workflow executions of this type.
    ", + "WorkflowTypeInfoList$member": null + } + }, + "WorkflowTypeInfoList": { + "base": null, + "refs": { + "WorkflowTypeInfos$typeInfos": "

    The list of workflow type information.

    " + } + }, + "WorkflowTypeInfos": { + "base": "

    Contains a paginated list of information structures about workflow types.

    ", + "refs": { + } + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/swf/2012-01-25/paginators-1.json b/vendor/github.com/aws/aws-sdk-go/models/apis/swf/2012-01-25/paginators-1.json new file mode 100644 index 000000000..892ee38b5 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/swf/2012-01-25/paginators-1.json @@ -0,0 +1,46 @@ +{ + "pagination": { + "GetWorkflowExecutionHistory": { + "limit_key": "maximumPageSize", + "input_token": "nextPageToken", + "output_token": "nextPageToken", + "result_key": "events" + }, + "ListActivityTypes": { + "limit_key": "maximumPageSize", + "input_token": "nextPageToken", + "output_token": "nextPageToken", + "result_key": "typeInfos" + }, + "ListClosedWorkflowExecutions": { + "limit_key": "maximumPageSize", + "input_token": "nextPageToken", + "output_token": "nextPageToken", + "result_key": "executionInfos" + }, + "ListDomains": { + "limit_key": "maximumPageSize", + "input_token": "nextPageToken", + "output_token": "nextPageToken", + "result_key": "domainInfos" + }, + "ListOpenWorkflowExecutions": { + "limit_key": "maximumPageSize", + "input_token": "nextPageToken", + "output_token": "nextPageToken", + "result_key": "executionInfos" + }, + "ListWorkflowTypes": { + "limit_key": "maximumPageSize", + "input_token": "nextPageToken", + "output_token": "nextPageToken", + "result_key": "typeInfos" + }, + "PollForDecisionTask": { + "limit_key": "maximumPageSize", + "input_token": "nextPageToken", + "output_token": "nextPageToken", + "result_key": "events" + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/waf/2015-08-24/api-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/waf/2015-08-24/api-2.json new file mode 100644 index 000000000..b51ee18ed --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/waf/2015-08-24/api-2.json @@ -0,0 +1,1959 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2015-08-24", + "endpointPrefix":"waf", + "jsonVersion":"1.1", + "protocol":"json", + "serviceAbbreviation":"WAF", + "serviceFullName":"AWS WAF", + "signatureVersion":"v4", + "targetPrefix":"AWSWAF_20150824" + }, + "operations":{ + "CreateByteMatchSet":{ + "name":"CreateByteMatchSet", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateByteMatchSetRequest"}, + "output":{"shape":"CreateByteMatchSetResponse"}, + "errors":[ + {"shape":"WAFDisallowedNameException"}, + {"shape":"WAFInternalErrorException"}, + {"shape":"WAFInvalidAccountException"}, + {"shape":"WAFInvalidParameterException"}, + {"shape":"WAFStaleDataException"}, + {"shape":"WAFLimitsExceededException"} + ] + }, + "CreateIPSet":{ + "name":"CreateIPSet", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateIPSetRequest"}, + "output":{"shape":"CreateIPSetResponse"}, + "errors":[ + {"shape":"WAFStaleDataException"}, + {"shape":"WAFInternalErrorException"}, + {"shape":"WAFInvalidAccountException"}, + {"shape":"WAFDisallowedNameException"}, + {"shape":"WAFInvalidParameterException"}, + {"shape":"WAFLimitsExceededException"} + ] + }, + "CreateRule":{ + "name":"CreateRule", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateRuleRequest"}, + "output":{"shape":"CreateRuleResponse"}, + "errors":[ + {"shape":"WAFStaleDataException"}, + {"shape":"WAFInternalErrorException"}, + {"shape":"WAFDisallowedNameException"}, + {"shape":"WAFInvalidParameterException"}, + {"shape":"WAFLimitsExceededException"} + ] + }, + "CreateSizeConstraintSet":{ + "name":"CreateSizeConstraintSet", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateSizeConstraintSetRequest"}, + "output":{"shape":"CreateSizeConstraintSetResponse"}, + "errors":[ + {"shape":"WAFStaleDataException"}, + {"shape":"WAFInternalErrorException"}, + {"shape":"WAFInvalidAccountException"}, + {"shape":"WAFDisallowedNameException"}, + {"shape":"WAFInvalidParameterException"}, + {"shape":"WAFLimitsExceededException"} + ] + }, + "CreateSqlInjectionMatchSet":{ + "name":"CreateSqlInjectionMatchSet", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateSqlInjectionMatchSetRequest"}, + "output":{"shape":"CreateSqlInjectionMatchSetResponse"}, + "errors":[ + {"shape":"WAFDisallowedNameException"}, + {"shape":"WAFInternalErrorException"}, + {"shape":"WAFInvalidAccountException"}, + {"shape":"WAFInvalidParameterException"}, + {"shape":"WAFStaleDataException"}, + {"shape":"WAFLimitsExceededException"} + ] + }, + "CreateWebACL":{ + "name":"CreateWebACL", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateWebACLRequest"}, + "output":{"shape":"CreateWebACLResponse"}, + "errors":[ + {"shape":"WAFStaleDataException"}, + {"shape":"WAFInternalErrorException"}, + {"shape":"WAFInvalidAccountException"}, + {"shape":"WAFDisallowedNameException"}, + {"shape":"WAFInvalidParameterException"}, + {"shape":"WAFLimitsExceededException"} + ] + }, + "CreateXssMatchSet":{ + "name":"CreateXssMatchSet", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateXssMatchSetRequest"}, + "output":{"shape":"CreateXssMatchSetResponse"}, + "errors":[ + {"shape":"WAFDisallowedNameException"}, + {"shape":"WAFInternalErrorException"}, + {"shape":"WAFInvalidAccountException"}, + {"shape":"WAFInvalidParameterException"}, + {"shape":"WAFStaleDataException"}, + {"shape":"WAFLimitsExceededException"} + ] + }, + "DeleteByteMatchSet":{ + "name":"DeleteByteMatchSet", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteByteMatchSetRequest"}, + "output":{"shape":"DeleteByteMatchSetResponse"}, + "errors":[ + {"shape":"WAFInternalErrorException"}, + {"shape":"WAFInvalidAccountException"}, + {"shape":"WAFNonexistentItemException"}, + {"shape":"WAFReferencedItemException"}, + {"shape":"WAFStaleDataException"}, + {"shape":"WAFNonEmptyEntityException"} + ] + }, + "DeleteIPSet":{ + "name":"DeleteIPSet", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteIPSetRequest"}, + "output":{"shape":"DeleteIPSetResponse"}, + "errors":[ + {"shape":"WAFStaleDataException"}, + {"shape":"WAFInternalErrorException"}, + {"shape":"WAFInvalidAccountException"}, + {"shape":"WAFNonexistentItemException"}, + {"shape":"WAFReferencedItemException"}, + {"shape":"WAFNonEmptyEntityException"} + ] + }, + "DeleteRule":{ + "name":"DeleteRule", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteRuleRequest"}, + "output":{"shape":"DeleteRuleResponse"}, + "errors":[ + {"shape":"WAFStaleDataException"}, + {"shape":"WAFInternalErrorException"}, + {"shape":"WAFInvalidAccountException"}, + {"shape":"WAFNonexistentItemException"}, + {"shape":"WAFReferencedItemException"}, + {"shape":"WAFNonEmptyEntityException"} + ] + }, + "DeleteSizeConstraintSet":{ + "name":"DeleteSizeConstraintSet", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteSizeConstraintSetRequest"}, + "output":{"shape":"DeleteSizeConstraintSetResponse"}, + "errors":[ + {"shape":"WAFStaleDataException"}, + {"shape":"WAFInternalErrorException"}, + {"shape":"WAFInvalidAccountException"}, + {"shape":"WAFNonexistentItemException"}, + {"shape":"WAFReferencedItemException"}, + {"shape":"WAFNonEmptyEntityException"} + ] + }, + "DeleteSqlInjectionMatchSet":{ + "name":"DeleteSqlInjectionMatchSet", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteSqlInjectionMatchSetRequest"}, + "output":{"shape":"DeleteSqlInjectionMatchSetResponse"}, + "errors":[ + {"shape":"WAFInternalErrorException"}, + {"shape":"WAFInvalidAccountException"}, + {"shape":"WAFNonexistentItemException"}, + {"shape":"WAFReferencedItemException"}, + {"shape":"WAFStaleDataException"}, + {"shape":"WAFNonEmptyEntityException"} + ] + }, + "DeleteWebACL":{ + "name":"DeleteWebACL", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteWebACLRequest"}, + "output":{"shape":"DeleteWebACLResponse"}, + "errors":[ + {"shape":"WAFStaleDataException"}, + {"shape":"WAFInternalErrorException"}, + {"shape":"WAFInvalidAccountException"}, + {"shape":"WAFNonexistentItemException"}, + {"shape":"WAFReferencedItemException"}, + {"shape":"WAFNonEmptyEntityException"} + ] + }, + "DeleteXssMatchSet":{ + "name":"DeleteXssMatchSet", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteXssMatchSetRequest"}, + "output":{"shape":"DeleteXssMatchSetResponse"}, + "errors":[ + {"shape":"WAFInternalErrorException"}, + {"shape":"WAFInvalidAccountException"}, + {"shape":"WAFNonexistentItemException"}, + {"shape":"WAFReferencedItemException"}, + {"shape":"WAFStaleDataException"}, + {"shape":"WAFNonEmptyEntityException"} + ] + }, + "GetByteMatchSet":{ + "name":"GetByteMatchSet", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetByteMatchSetRequest"}, + "output":{"shape":"GetByteMatchSetResponse"}, + "errors":[ + {"shape":"WAFInternalErrorException"}, + {"shape":"WAFInvalidAccountException"}, + {"shape":"WAFNonexistentItemException"} + ] + }, + "GetChangeToken":{ + "name":"GetChangeToken", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetChangeTokenRequest"}, + "output":{"shape":"GetChangeTokenResponse"}, + "errors":[ + {"shape":"WAFInternalErrorException"} + ] + }, + "GetChangeTokenStatus":{ + "name":"GetChangeTokenStatus", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetChangeTokenStatusRequest"}, + "output":{"shape":"GetChangeTokenStatusResponse"}, + "errors":[ + {"shape":"WAFNonexistentItemException"}, + {"shape":"WAFInternalErrorException"} + ] + }, + "GetIPSet":{ + "name":"GetIPSet", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetIPSetRequest"}, + "output":{"shape":"GetIPSetResponse"}, + "errors":[ + {"shape":"WAFInternalErrorException"}, + {"shape":"WAFInvalidAccountException"}, + {"shape":"WAFNonexistentItemException"} + ] + }, + "GetRule":{ + "name":"GetRule", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetRuleRequest"}, + "output":{"shape":"GetRuleResponse"}, + "errors":[ + {"shape":"WAFInternalErrorException"}, + {"shape":"WAFInvalidAccountException"}, + {"shape":"WAFNonexistentItemException"} + ] + }, + "GetSampledRequests":{ + "name":"GetSampledRequests", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetSampledRequestsRequest"}, + "output":{"shape":"GetSampledRequestsResponse"}, + "errors":[ + {"shape":"WAFNonexistentItemException"}, + {"shape":"WAFInternalErrorException"} + ] + }, + "GetSizeConstraintSet":{ + "name":"GetSizeConstraintSet", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetSizeConstraintSetRequest"}, + "output":{"shape":"GetSizeConstraintSetResponse"}, + "errors":[ + {"shape":"WAFInternalErrorException"}, + {"shape":"WAFInvalidAccountException"}, + {"shape":"WAFNonexistentItemException"} + ] + }, + "GetSqlInjectionMatchSet":{ + "name":"GetSqlInjectionMatchSet", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetSqlInjectionMatchSetRequest"}, + "output":{"shape":"GetSqlInjectionMatchSetResponse"}, + "errors":[ + {"shape":"WAFInternalErrorException"}, + {"shape":"WAFInvalidAccountException"}, + {"shape":"WAFNonexistentItemException"} + ] + }, + "GetWebACL":{ + "name":"GetWebACL", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetWebACLRequest"}, + "output":{"shape":"GetWebACLResponse"}, + "errors":[ + {"shape":"WAFInternalErrorException"}, + {"shape":"WAFInvalidAccountException"}, + {"shape":"WAFNonexistentItemException"} + ] + }, + "GetXssMatchSet":{ + "name":"GetXssMatchSet", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetXssMatchSetRequest"}, + "output":{"shape":"GetXssMatchSetResponse"}, + "errors":[ + {"shape":"WAFInternalErrorException"}, + {"shape":"WAFInvalidAccountException"}, + {"shape":"WAFNonexistentItemException"} + ] + }, + "ListByteMatchSets":{ + "name":"ListByteMatchSets", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListByteMatchSetsRequest"}, + "output":{"shape":"ListByteMatchSetsResponse"}, + "errors":[ + {"shape":"WAFInternalErrorException"}, + {"shape":"WAFInvalidAccountException"} + ] + }, + "ListIPSets":{ + "name":"ListIPSets", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListIPSetsRequest"}, + "output":{"shape":"ListIPSetsResponse"}, + "errors":[ + {"shape":"WAFInternalErrorException"}, + {"shape":"WAFInvalidAccountException"} + ] + }, + "ListRules":{ + "name":"ListRules", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListRulesRequest"}, + "output":{"shape":"ListRulesResponse"}, + "errors":[ + {"shape":"WAFInternalErrorException"}, + {"shape":"WAFInvalidAccountException"} + ] + }, + "ListSizeConstraintSets":{ + "name":"ListSizeConstraintSets", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListSizeConstraintSetsRequest"}, + "output":{"shape":"ListSizeConstraintSetsResponse"}, + "errors":[ + {"shape":"WAFInternalErrorException"}, + {"shape":"WAFInvalidAccountException"} + ] + }, + "ListSqlInjectionMatchSets":{ + "name":"ListSqlInjectionMatchSets", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListSqlInjectionMatchSetsRequest"}, + "output":{"shape":"ListSqlInjectionMatchSetsResponse"}, + "errors":[ + {"shape":"WAFInternalErrorException"}, + {"shape":"WAFInvalidAccountException"} + ] + }, + "ListWebACLs":{ + "name":"ListWebACLs", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListWebACLsRequest"}, + "output":{"shape":"ListWebACLsResponse"}, + "errors":[ + {"shape":"WAFInternalErrorException"}, + {"shape":"WAFInvalidAccountException"} + ] + }, + "ListXssMatchSets":{ + "name":"ListXssMatchSets", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListXssMatchSetsRequest"}, + "output":{"shape":"ListXssMatchSetsResponse"}, + "errors":[ + {"shape":"WAFInternalErrorException"}, + {"shape":"WAFInvalidAccountException"} + ] + }, + "UpdateByteMatchSet":{ + "name":"UpdateByteMatchSet", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateByteMatchSetRequest"}, + "output":{"shape":"UpdateByteMatchSetResponse"}, + "errors":[ + {"shape":"WAFInternalErrorException"}, + {"shape":"WAFInvalidAccountException"}, + {"shape":"WAFInvalidOperationException"}, + {"shape":"WAFInvalidParameterException"}, + {"shape":"WAFNonexistentContainerException"}, + {"shape":"WAFNonexistentItemException"}, + {"shape":"WAFStaleDataException"}, + {"shape":"WAFLimitsExceededException"} + ] + }, + "UpdateIPSet":{ + "name":"UpdateIPSet", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateIPSetRequest"}, + "output":{"shape":"UpdateIPSetResponse"}, + "errors":[ + {"shape":"WAFStaleDataException"}, + {"shape":"WAFInternalErrorException"}, + {"shape":"WAFInvalidAccountException"}, + {"shape":"WAFInvalidOperationException"}, + {"shape":"WAFInvalidParameterException"}, + {"shape":"WAFNonexistentContainerException"}, + {"shape":"WAFNonexistentItemException"}, + {"shape":"WAFReferencedItemException"}, + {"shape":"WAFLimitsExceededException"} + ] + }, + "UpdateRule":{ + "name":"UpdateRule", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateRuleRequest"}, + "output":{"shape":"UpdateRuleResponse"}, + "errors":[ + {"shape":"WAFStaleDataException"}, + {"shape":"WAFInternalErrorException"}, + {"shape":"WAFInvalidAccountException"}, + {"shape":"WAFInvalidOperationException"}, + {"shape":"WAFInvalidParameterException"}, + {"shape":"WAFNonexistentContainerException"}, + {"shape":"WAFNonexistentItemException"}, + {"shape":"WAFReferencedItemException"}, + {"shape":"WAFLimitsExceededException"} + ] + }, + "UpdateSizeConstraintSet":{ + "name":"UpdateSizeConstraintSet", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateSizeConstraintSetRequest"}, + "output":{"shape":"UpdateSizeConstraintSetResponse"}, + "errors":[ + {"shape":"WAFStaleDataException"}, + {"shape":"WAFInternalErrorException"}, + {"shape":"WAFInvalidAccountException"}, + {"shape":"WAFInvalidOperationException"}, + {"shape":"WAFInvalidParameterException"}, + {"shape":"WAFNonexistentContainerException"}, + {"shape":"WAFNonexistentItemException"}, + {"shape":"WAFReferencedItemException"}, + {"shape":"WAFLimitsExceededException"} + ] + }, + "UpdateSqlInjectionMatchSet":{ + "name":"UpdateSqlInjectionMatchSet", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateSqlInjectionMatchSetRequest"}, + "output":{"shape":"UpdateSqlInjectionMatchSetResponse"}, + "errors":[ + {"shape":"WAFInternalErrorException"}, + {"shape":"WAFInvalidAccountException"}, + {"shape":"WAFInvalidOperationException"}, + {"shape":"WAFInvalidParameterException"}, + {"shape":"WAFNonexistentContainerException"}, + {"shape":"WAFNonexistentItemException"}, + {"shape":"WAFStaleDataException"}, + {"shape":"WAFLimitsExceededException"} + ] + }, + "UpdateWebACL":{ + "name":"UpdateWebACL", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateWebACLRequest"}, + "output":{"shape":"UpdateWebACLResponse"}, + "errors":[ + {"shape":"WAFStaleDataException"}, + {"shape":"WAFInternalErrorException"}, + {"shape":"WAFInvalidAccountException"}, + {"shape":"WAFInvalidOperationException"}, + {"shape":"WAFInvalidParameterException"}, + {"shape":"WAFNonexistentContainerException"}, + {"shape":"WAFNonexistentItemException"}, + {"shape":"WAFReferencedItemException"}, + {"shape":"WAFLimitsExceededException"} + ] + }, + "UpdateXssMatchSet":{ + "name":"UpdateXssMatchSet", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateXssMatchSetRequest"}, + "output":{"shape":"UpdateXssMatchSetResponse"}, + "errors":[ + {"shape":"WAFInternalErrorException"}, + {"shape":"WAFInvalidAccountException"}, + {"shape":"WAFInvalidOperationException"}, + {"shape":"WAFInvalidParameterException"}, + {"shape":"WAFNonexistentContainerException"}, + {"shape":"WAFNonexistentItemException"}, + {"shape":"WAFStaleDataException"}, + {"shape":"WAFLimitsExceededException"} + ] + } + }, + "shapes":{ + "Action":{"type":"string"}, + "ActivatedRule":{ + "type":"structure", + "required":[ + "Priority", + "RuleId", + "Action" + ], + "members":{ + "Priority":{"shape":"RulePriority"}, + "RuleId":{"shape":"ResourceId"}, + "Action":{"shape":"WafAction"} + } + }, + "ActivatedRules":{ + "type":"list", + "member":{"shape":"ActivatedRule"} + }, + "ByteMatchSet":{ + "type":"structure", + "required":[ + "ByteMatchSetId", + "ByteMatchTuples" + ], + "members":{ + "ByteMatchSetId":{"shape":"ResourceId"}, + "Name":{"shape":"ResourceName"}, + "ByteMatchTuples":{"shape":"ByteMatchTuples"} + } + }, + "ByteMatchSetSummaries":{ + "type":"list", + "member":{"shape":"ByteMatchSetSummary"} + }, + "ByteMatchSetSummary":{ + "type":"structure", + "required":[ + "ByteMatchSetId", + "Name" + ], + "members":{ + "ByteMatchSetId":{"shape":"ResourceId"}, + "Name":{"shape":"ResourceName"} + } + }, + "ByteMatchSetUpdate":{ + "type":"structure", + "required":[ + "Action", + "ByteMatchTuple" + ], + "members":{ + "Action":{"shape":"ChangeAction"}, + "ByteMatchTuple":{"shape":"ByteMatchTuple"} + } + }, + "ByteMatchSetUpdates":{ + "type":"list", + "member":{"shape":"ByteMatchSetUpdate"} + }, + "ByteMatchTargetString":{"type":"blob"}, + "ByteMatchTuple":{ + "type":"structure", + "required":[ + "FieldToMatch", + "TargetString", + "TextTransformation", + "PositionalConstraint" + ], + "members":{ + "FieldToMatch":{"shape":"FieldToMatch"}, + "TargetString":{"shape":"ByteMatchTargetString"}, + "TextTransformation":{"shape":"TextTransformation"}, + "PositionalConstraint":{"shape":"PositionalConstraint"} + } + }, + "ByteMatchTuples":{ + "type":"list", + "member":{"shape":"ByteMatchTuple"} + }, + "ChangeAction":{ + "type":"string", + "enum":[ + "INSERT", + "DELETE" + ] + }, + "ChangeToken":{"type":"string"}, + "ChangeTokenStatus":{ + "type":"string", + "enum":[ + "PROVISIONED", + "PENDING", + "INSYNC" + ] + }, + "ComparisonOperator":{ + "type":"string", + "enum":[ + "EQ", + "NE", + "LE", + "LT", + "GE", + "GT" + ] + }, + "Country":{"type":"string"}, + "CreateByteMatchSetRequest":{ + "type":"structure", + "required":[ + "Name", + "ChangeToken" + ], + "members":{ + "Name":{"shape":"ResourceName"}, + "ChangeToken":{"shape":"ChangeToken"} + } + }, + "CreateByteMatchSetResponse":{ + "type":"structure", + "members":{ + "ByteMatchSet":{"shape":"ByteMatchSet"}, + "ChangeToken":{"shape":"ChangeToken"} + } + }, + "CreateIPSetRequest":{ + "type":"structure", + "required":[ + "Name", + "ChangeToken" + ], + "members":{ + "Name":{"shape":"ResourceName"}, + "ChangeToken":{"shape":"ChangeToken"} + } + }, + "CreateIPSetResponse":{ + "type":"structure", + "members":{ + "IPSet":{"shape":"IPSet"}, + "ChangeToken":{"shape":"ChangeToken"} + } + }, + "CreateRuleRequest":{ + "type":"structure", + "required":[ + "Name", + "MetricName", + "ChangeToken" + ], + "members":{ + "Name":{"shape":"ResourceName"}, + "MetricName":{"shape":"MetricName"}, + "ChangeToken":{"shape":"ChangeToken"} + } + }, + "CreateRuleResponse":{ + "type":"structure", + "members":{ + "Rule":{"shape":"Rule"}, + "ChangeToken":{"shape":"ChangeToken"} + } + }, + "CreateSizeConstraintSetRequest":{ + "type":"structure", + "required":[ + "Name", + "ChangeToken" + ], + "members":{ + "Name":{"shape":"ResourceName"}, + "ChangeToken":{"shape":"ChangeToken"} + } + }, + "CreateSizeConstraintSetResponse":{ + "type":"structure", + "members":{ + "SizeConstraintSet":{"shape":"SizeConstraintSet"}, + "ChangeToken":{"shape":"ChangeToken"} + } + }, + "CreateSqlInjectionMatchSetRequest":{ + "type":"structure", + "required":[ + "Name", + "ChangeToken" + ], + "members":{ + "Name":{"shape":"ResourceName"}, + "ChangeToken":{"shape":"ChangeToken"} + } + }, + "CreateSqlInjectionMatchSetResponse":{ + "type":"structure", + "members":{ + "SqlInjectionMatchSet":{"shape":"SqlInjectionMatchSet"}, + "ChangeToken":{"shape":"ChangeToken"} + } + }, + "CreateWebACLRequest":{ + "type":"structure", + "required":[ + "Name", + "MetricName", + "DefaultAction", + "ChangeToken" + ], + "members":{ + "Name":{"shape":"ResourceName"}, + "MetricName":{"shape":"MetricName"}, + "DefaultAction":{"shape":"WafAction"}, + "ChangeToken":{"shape":"ChangeToken"} + } + }, + "CreateWebACLResponse":{ + "type":"structure", + "members":{ + "WebACL":{"shape":"WebACL"}, + "ChangeToken":{"shape":"ChangeToken"} + } + }, + "CreateXssMatchSetRequest":{ + "type":"structure", + "required":[ + "Name", + "ChangeToken" + ], + "members":{ + "Name":{"shape":"ResourceName"}, + "ChangeToken":{"shape":"ChangeToken"} + } + }, + "CreateXssMatchSetResponse":{ + "type":"structure", + "members":{ + "XssMatchSet":{"shape":"XssMatchSet"}, + "ChangeToken":{"shape":"ChangeToken"} + } + }, + "DeleteByteMatchSetRequest":{ + "type":"structure", + "required":[ + "ByteMatchSetId", + "ChangeToken" + ], + "members":{ + "ByteMatchSetId":{"shape":"ResourceId"}, + "ChangeToken":{"shape":"ChangeToken"} + } + }, + "DeleteByteMatchSetResponse":{ + "type":"structure", + "members":{ + "ChangeToken":{"shape":"ChangeToken"} + } + }, + "DeleteIPSetRequest":{ + "type":"structure", + "required":[ + "IPSetId", + "ChangeToken" + ], + "members":{ + "IPSetId":{"shape":"ResourceId"}, + "ChangeToken":{"shape":"ChangeToken"} + } + }, + "DeleteIPSetResponse":{ + "type":"structure", + "members":{ + "ChangeToken":{"shape":"ChangeToken"} + } + }, + "DeleteRuleRequest":{ + "type":"structure", + "required":[ + "RuleId", + "ChangeToken" + ], + "members":{ + "RuleId":{"shape":"ResourceId"}, + "ChangeToken":{"shape":"ChangeToken"} + } + }, + "DeleteRuleResponse":{ + "type":"structure", + "members":{ + "ChangeToken":{"shape":"ChangeToken"} + } + }, + "DeleteSizeConstraintSetRequest":{ + "type":"structure", + "required":[ + "SizeConstraintSetId", + "ChangeToken" + ], + "members":{ + "SizeConstraintSetId":{"shape":"ResourceId"}, + "ChangeToken":{"shape":"ChangeToken"} + } + }, + "DeleteSizeConstraintSetResponse":{ + "type":"structure", + "members":{ + "ChangeToken":{"shape":"ChangeToken"} + } + }, + "DeleteSqlInjectionMatchSetRequest":{ + "type":"structure", + "required":[ + "SqlInjectionMatchSetId", + "ChangeToken" + ], + "members":{ + "SqlInjectionMatchSetId":{"shape":"ResourceId"}, + "ChangeToken":{"shape":"ChangeToken"} + } + }, + "DeleteSqlInjectionMatchSetResponse":{ + "type":"structure", + "members":{ + "ChangeToken":{"shape":"ChangeToken"} + } + }, + "DeleteWebACLRequest":{ + "type":"structure", + "required":[ + "WebACLId", + "ChangeToken" + ], + "members":{ + "WebACLId":{"shape":"ResourceId"}, + "ChangeToken":{"shape":"ChangeToken"} + } + }, + "DeleteWebACLResponse":{ + "type":"structure", + "members":{ + "ChangeToken":{"shape":"ChangeToken"} + } + }, + "DeleteXssMatchSetRequest":{ + "type":"structure", + "required":[ + "XssMatchSetId", + "ChangeToken" + ], + "members":{ + "XssMatchSetId":{"shape":"ResourceId"}, + "ChangeToken":{"shape":"ChangeToken"} + } + }, + "DeleteXssMatchSetResponse":{ + "type":"structure", + "members":{ + "ChangeToken":{"shape":"ChangeToken"} + } + }, + "FieldToMatch":{ + "type":"structure", + "required":["Type"], + "members":{ + "Type":{"shape":"MatchFieldType"}, + "Data":{"shape":"MatchFieldData"} + } + }, + "GetByteMatchSetRequest":{ + "type":"structure", + "required":["ByteMatchSetId"], + "members":{ + "ByteMatchSetId":{"shape":"ResourceId"} + } + }, + "GetByteMatchSetResponse":{ + "type":"structure", + "members":{ + "ByteMatchSet":{"shape":"ByteMatchSet"} + } + }, + "GetChangeTokenRequest":{ + "type":"structure", + "members":{ + } + }, + "GetChangeTokenResponse":{ + "type":"structure", + "members":{ + "ChangeToken":{"shape":"ChangeToken"} + } + }, + "GetChangeTokenStatusRequest":{ + "type":"structure", + "required":["ChangeToken"], + "members":{ + "ChangeToken":{"shape":"ChangeToken"} + } + }, + "GetChangeTokenStatusResponse":{ + "type":"structure", + "members":{ + "ChangeTokenStatus":{"shape":"ChangeTokenStatus"} + } + }, + "GetIPSetRequest":{ + "type":"structure", + "required":["IPSetId"], + "members":{ + "IPSetId":{"shape":"ResourceId"} + } + }, + "GetIPSetResponse":{ + "type":"structure", + "members":{ + "IPSet":{"shape":"IPSet"} + } + }, + "GetRuleRequest":{ + "type":"structure", + "required":["RuleId"], + "members":{ + "RuleId":{"shape":"ResourceId"} + } + }, + "GetRuleResponse":{ + "type":"structure", + "members":{ + "Rule":{"shape":"Rule"} + } + }, + "GetSampledRequestsRequest":{ + "type":"structure", + "required":[ + "WebAclId", + "RuleId", + "TimeWindow", + "MaxItems" + ], + "members":{ + "WebAclId":{"shape":"ResourceId"}, + "RuleId":{"shape":"ResourceId"}, + "TimeWindow":{"shape":"TimeWindow"}, + "MaxItems":{"shape":"ListMaxItems"} + } + }, + "GetSampledRequestsResponse":{ + "type":"structure", + "members":{ + "SampledRequests":{"shape":"SampledHTTPRequests"}, + "PopulationSize":{"shape":"PopulationSize"}, + "TimeWindow":{"shape":"TimeWindow"} + } + }, + "GetSizeConstraintSetRequest":{ + "type":"structure", + "required":["SizeConstraintSetId"], + "members":{ + "SizeConstraintSetId":{"shape":"ResourceId"} + } + }, + "GetSizeConstraintSetResponse":{ + "type":"structure", + "members":{ + "SizeConstraintSet":{"shape":"SizeConstraintSet"} + } + }, + "GetSqlInjectionMatchSetRequest":{ + "type":"structure", + "required":["SqlInjectionMatchSetId"], + "members":{ + "SqlInjectionMatchSetId":{"shape":"ResourceId"} + } + }, + "GetSqlInjectionMatchSetResponse":{ + "type":"structure", + "members":{ + "SqlInjectionMatchSet":{"shape":"SqlInjectionMatchSet"} + } + }, + "GetWebACLRequest":{ + "type":"structure", + "required":["WebACLId"], + "members":{ + "WebACLId":{"shape":"ResourceId"} + } + }, + "GetWebACLResponse":{ + "type":"structure", + "members":{ + "WebACL":{"shape":"WebACL"} + } + }, + "GetXssMatchSetRequest":{ + "type":"structure", + "required":["XssMatchSetId"], + "members":{ + "XssMatchSetId":{"shape":"ResourceId"} + } + }, + "GetXssMatchSetResponse":{ + "type":"structure", + "members":{ + "XssMatchSet":{"shape":"XssMatchSet"} + } + }, + "HTTPHeader":{ + "type":"structure", + "members":{ + "Name":{"shape":"HeaderName"}, + "Value":{"shape":"HeaderValue"} + } + }, + "HTTPHeaders":{ + "type":"list", + "member":{"shape":"HTTPHeader"} + }, + "HTTPMethod":{"type":"string"}, + "HTTPRequest":{ + "type":"structure", + "members":{ + "ClientIP":{"shape":"IPString"}, + "Country":{"shape":"Country"}, + "URI":{"shape":"URIString"}, + "Method":{"shape":"HTTPMethod"}, + "HTTPVersion":{"shape":"HTTPVersion"}, + "Headers":{"shape":"HTTPHeaders"} + } + }, + "HTTPVersion":{"type":"string"}, + "HeaderName":{"type":"string"}, + "HeaderValue":{"type":"string"}, + "IPSet":{ + "type":"structure", + "required":[ + "IPSetId", + "IPSetDescriptors" + ], + "members":{ + "IPSetId":{"shape":"ResourceId"}, + "Name":{"shape":"ResourceName"}, + "IPSetDescriptors":{"shape":"IPSetDescriptors"} + } + }, + "IPSetDescriptor":{ + "type":"structure", + "required":[ + "Type", + "Value" + ], + "members":{ + "Type":{"shape":"IPSetDescriptorType"}, + "Value":{"shape":"IPSetDescriptorValue"} + } + }, + "IPSetDescriptorType":{ + "type":"string", + "enum":["IPV4"] + }, + "IPSetDescriptorValue":{"type":"string"}, + "IPSetDescriptors":{ + "type":"list", + "member":{"shape":"IPSetDescriptor"} + }, + "IPSetSummaries":{ + "type":"list", + "member":{"shape":"IPSetSummary"} + }, + "IPSetSummary":{ + "type":"structure", + "required":[ + "IPSetId", + "Name" + ], + "members":{ + "IPSetId":{"shape":"ResourceId"}, + "Name":{"shape":"ResourceName"} + } + }, + "IPSetUpdate":{ + "type":"structure", + "required":[ + "Action", + "IPSetDescriptor" + ], + "members":{ + "Action":{"shape":"ChangeAction"}, + "IPSetDescriptor":{"shape":"IPSetDescriptor"} + } + }, + "IPSetUpdates":{ + "type":"list", + "member":{"shape":"IPSetUpdate"} + }, + "IPString":{"type":"string"}, + "ListByteMatchSetsRequest":{ + "type":"structure", + "required":["Limit"], + "members":{ + "NextMarker":{"shape":"NextMarker"}, + "Limit":{"shape":"PaginationLimit"} + } + }, + "ListByteMatchSetsResponse":{ + "type":"structure", + "members":{ + "NextMarker":{"shape":"NextMarker"}, + "ByteMatchSets":{"shape":"ByteMatchSetSummaries"} + } + }, + "ListIPSetsRequest":{ + "type":"structure", + "required":["Limit"], + "members":{ + "NextMarker":{"shape":"NextMarker"}, + "Limit":{"shape":"PaginationLimit"} + } + }, + "ListIPSetsResponse":{ + "type":"structure", + "members":{ + "NextMarker":{"shape":"NextMarker"}, + "IPSets":{"shape":"IPSetSummaries"} + } + }, + "ListMaxItems":{ + "type":"long", + "max":100, + "min":1 + }, + "ListRulesRequest":{ + "type":"structure", + "required":["Limit"], + "members":{ + "NextMarker":{"shape":"NextMarker"}, + "Limit":{"shape":"PaginationLimit"} + } + }, + "ListRulesResponse":{ + "type":"structure", + "members":{ + "NextMarker":{"shape":"NextMarker"}, + "Rules":{"shape":"RuleSummaries"} + } + }, + "ListSizeConstraintSetsRequest":{ + "type":"structure", + "required":["Limit"], + "members":{ + "NextMarker":{"shape":"NextMarker"}, + "Limit":{"shape":"PaginationLimit"} + } + }, + "ListSizeConstraintSetsResponse":{ + "type":"structure", + "members":{ + "NextMarker":{"shape":"NextMarker"}, + "SizeConstraintSets":{"shape":"SizeConstraintSetSummaries"} + } + }, + "ListSqlInjectionMatchSetsRequest":{ + "type":"structure", + "required":["Limit"], + "members":{ + "NextMarker":{"shape":"NextMarker"}, + "Limit":{"shape":"PaginationLimit"} + } + }, + "ListSqlInjectionMatchSetsResponse":{ + "type":"structure", + "members":{ + "NextMarker":{"shape":"NextMarker"}, + "SqlInjectionMatchSets":{"shape":"SqlInjectionMatchSetSummaries"} + } + }, + "ListWebACLsRequest":{ + "type":"structure", + "required":["Limit"], + "members":{ + "NextMarker":{"shape":"NextMarker"}, + "Limit":{"shape":"PaginationLimit"} + } + }, + "ListWebACLsResponse":{ + "type":"structure", + "members":{ + "NextMarker":{"shape":"NextMarker"}, + "WebACLs":{"shape":"WebACLSummaries"} + } + }, + "ListXssMatchSetsRequest":{ + "type":"structure", + "required":["Limit"], + "members":{ + "NextMarker":{"shape":"NextMarker"}, + "Limit":{"shape":"PaginationLimit"} + } + }, + "ListXssMatchSetsResponse":{ + "type":"structure", + "members":{ + "NextMarker":{"shape":"NextMarker"}, + "XssMatchSets":{"shape":"XssMatchSetSummaries"} + } + }, + "MatchFieldData":{"type":"string"}, + "MatchFieldType":{ + "type":"string", + "enum":[ + "URI", + "QUERY_STRING", + "HEADER", + "METHOD", + "BODY" + ] + }, + "MetricName":{"type":"string"}, + "Negated":{"type":"boolean"}, + "NextMarker":{ + "type":"string", + "min":1 + }, + "PaginationLimit":{ + "type":"integer", + "max":100, + "min":1 + }, + "ParameterExceptionField":{ + "type":"string", + "enum":[ + "CHANGE_ACTION", + "WAF_ACTION", + "PREDICATE_TYPE", + "IPSET_TYPE", + "BYTE_MATCH_FIELD_TYPE", + "SQL_INJECTION_MATCH_FIELD_TYPE", + "BYTE_MATCH_TEXT_TRANSFORMATION", + "BYTE_MATCH_POSITIONAL_CONSTRAINT", + "SIZE_CONSTRAINT_COMPARISON_OPERATOR" + ] + }, + "ParameterExceptionParameter":{ + "type":"string", + "min":1 + }, + "ParameterExceptionReason":{ + "type":"string", + "enum":[ + "INVALID_OPTION", + "ILLEGAL_COMBINATION" + ] + }, + "PopulationSize":{"type":"long"}, + "PositionalConstraint":{ + "type":"string", + "enum":[ + "EXACTLY", + "STARTS_WITH", + "ENDS_WITH", + "CONTAINS", + "CONTAINS_WORD" + ] + }, + "Predicate":{ + "type":"structure", + "required":[ + "Negated", + "Type", + "DataId" + ], + "members":{ + "Negated":{"shape":"Negated"}, + "Type":{"shape":"PredicateType"}, + "DataId":{"shape":"ResourceId"} + } + }, + "PredicateType":{ + "type":"string", + "enum":[ + "IPMatch", + "ByteMatch", + "SqlInjectionMatch", + "SizeConstraint", + "XssMatch" + ] + }, + "Predicates":{ + "type":"list", + "member":{"shape":"Predicate"} + }, + "ResourceId":{ + "type":"string", + "max":128, + "min":1 + }, + "ResourceName":{ + "type":"string", + "max":128, + "min":1 + }, + "Rule":{ + "type":"structure", + "required":[ + "RuleId", + "Predicates" + ], + "members":{ + "RuleId":{"shape":"ResourceId"}, + "Name":{"shape":"ResourceName"}, + "MetricName":{"shape":"MetricName"}, + "Predicates":{"shape":"Predicates"} + } + }, + "RulePriority":{"type":"integer"}, + "RuleSummaries":{ + "type":"list", + "member":{"shape":"RuleSummary"} + }, + "RuleSummary":{ + "type":"structure", + "required":[ + "RuleId", + "Name" + ], + "members":{ + "RuleId":{"shape":"ResourceId"}, + "Name":{"shape":"ResourceName"} + } + }, + "RuleUpdate":{ + "type":"structure", + "required":[ + "Action", + "Predicate" + ], + "members":{ + "Action":{"shape":"ChangeAction"}, + "Predicate":{"shape":"Predicate"} + } + }, + "RuleUpdates":{ + "type":"list", + "member":{"shape":"RuleUpdate"} + }, + "SampleWeight":{ + "type":"long", + "min":0 + }, + "SampledHTTPRequest":{ + "type":"structure", + "required":[ + "Request", + "Weight" + ], + "members":{ + "Request":{"shape":"HTTPRequest"}, + "Weight":{"shape":"SampleWeight"}, + "Timestamp":{"shape":"Timestamp"}, + "Action":{"shape":"Action"} + } + }, + "SampledHTTPRequests":{ + "type":"list", + "member":{"shape":"SampledHTTPRequest"} + }, + "Size":{ + "type":"long", + "max":21474836480, + "min":0 + }, + "SizeConstraint":{ + "type":"structure", + "required":[ + "FieldToMatch", + "TextTransformation", + "ComparisonOperator", + "Size" + ], + "members":{ + "FieldToMatch":{"shape":"FieldToMatch"}, + "TextTransformation":{"shape":"TextTransformation"}, + "ComparisonOperator":{"shape":"ComparisonOperator"}, + "Size":{"shape":"Size"} + } + }, + "SizeConstraintSet":{ + "type":"structure", + "required":[ + "SizeConstraintSetId", + "SizeConstraints" + ], + "members":{ + "SizeConstraintSetId":{"shape":"ResourceId"}, + "Name":{"shape":"ResourceName"}, + "SizeConstraints":{"shape":"SizeConstraints"} + } + }, + "SizeConstraintSetSummaries":{ + "type":"list", + "member":{"shape":"SizeConstraintSetSummary"} + }, + "SizeConstraintSetSummary":{ + "type":"structure", + "required":[ + "SizeConstraintSetId", + "Name" + ], + "members":{ + "SizeConstraintSetId":{"shape":"ResourceId"}, + "Name":{"shape":"ResourceName"} + } + }, + "SizeConstraintSetUpdate":{ + "type":"structure", + "required":[ + "Action", + "SizeConstraint" + ], + "members":{ + "Action":{"shape":"ChangeAction"}, + "SizeConstraint":{"shape":"SizeConstraint"} + } + }, + "SizeConstraintSetUpdates":{ + "type":"list", + "member":{"shape":"SizeConstraintSetUpdate"} + }, + "SizeConstraints":{ + "type":"list", + "member":{"shape":"SizeConstraint"} + }, + "SqlInjectionMatchSet":{ + "type":"structure", + "required":[ + "SqlInjectionMatchSetId", + "SqlInjectionMatchTuples" + ], + "members":{ + "SqlInjectionMatchSetId":{"shape":"ResourceId"}, + "Name":{"shape":"ResourceName"}, + "SqlInjectionMatchTuples":{"shape":"SqlInjectionMatchTuples"} + } + }, + "SqlInjectionMatchSetSummaries":{ + "type":"list", + "member":{"shape":"SqlInjectionMatchSetSummary"} + }, + "SqlInjectionMatchSetSummary":{ + "type":"structure", + "required":[ + "SqlInjectionMatchSetId", + "Name" + ], + "members":{ + "SqlInjectionMatchSetId":{"shape":"ResourceId"}, + "Name":{"shape":"ResourceName"} + } + }, + "SqlInjectionMatchSetUpdate":{ + "type":"structure", + "required":[ + "Action", + "SqlInjectionMatchTuple" + ], + "members":{ + "Action":{"shape":"ChangeAction"}, + "SqlInjectionMatchTuple":{"shape":"SqlInjectionMatchTuple"} + } + }, + "SqlInjectionMatchSetUpdates":{ + "type":"list", + "member":{"shape":"SqlInjectionMatchSetUpdate"} + }, + "SqlInjectionMatchTuple":{ + "type":"structure", + "required":[ + "FieldToMatch", + "TextTransformation" + ], + "members":{ + "FieldToMatch":{"shape":"FieldToMatch"}, + "TextTransformation":{"shape":"TextTransformation"} + } + }, + "SqlInjectionMatchTuples":{ + "type":"list", + "member":{"shape":"SqlInjectionMatchTuple"} + }, + "TextTransformation":{ + "type":"string", + "enum":[ + "NONE", + "COMPRESS_WHITE_SPACE", + "HTML_ENTITY_DECODE", + "LOWERCASE", + "CMD_LINE", + "URL_DECODE" + ] + }, + "TimeWindow":{ + "type":"structure", + "required":[ + "StartTime", + "EndTime" + ], + "members":{ + "StartTime":{"shape":"Timestamp"}, + "EndTime":{"shape":"Timestamp"} + } + }, + "Timestamp":{"type":"timestamp"}, + "URIString":{"type":"string"}, + "UpdateByteMatchSetRequest":{ + "type":"structure", + "required":[ + "ByteMatchSetId", + "ChangeToken", + "Updates" + ], + "members":{ + "ByteMatchSetId":{"shape":"ResourceId"}, + "ChangeToken":{"shape":"ChangeToken"}, + "Updates":{"shape":"ByteMatchSetUpdates"} + } + }, + "UpdateByteMatchSetResponse":{ + "type":"structure", + "members":{ + "ChangeToken":{"shape":"ChangeToken"} + } + }, + "UpdateIPSetRequest":{ + "type":"structure", + "required":[ + "IPSetId", + "ChangeToken", + "Updates" + ], + "members":{ + "IPSetId":{"shape":"ResourceId"}, + "ChangeToken":{"shape":"ChangeToken"}, + "Updates":{"shape":"IPSetUpdates"} + } + }, + "UpdateIPSetResponse":{ + "type":"structure", + "members":{ + "ChangeToken":{"shape":"ChangeToken"} + } + }, + "UpdateRuleRequest":{ + "type":"structure", + "required":[ + "RuleId", + "ChangeToken", + "Updates" + ], + "members":{ + "RuleId":{"shape":"ResourceId"}, + "ChangeToken":{"shape":"ChangeToken"}, + "Updates":{"shape":"RuleUpdates"} + } + }, + "UpdateRuleResponse":{ + "type":"structure", + "members":{ + "ChangeToken":{"shape":"ChangeToken"} + } + }, + "UpdateSizeConstraintSetRequest":{ + "type":"structure", + "required":[ + "SizeConstraintSetId", + "ChangeToken", + "Updates" + ], + "members":{ + "SizeConstraintSetId":{"shape":"ResourceId"}, + "ChangeToken":{"shape":"ChangeToken"}, + "Updates":{"shape":"SizeConstraintSetUpdates"} + } + }, + "UpdateSizeConstraintSetResponse":{ + "type":"structure", + "members":{ + "ChangeToken":{"shape":"ChangeToken"} + } + }, + "UpdateSqlInjectionMatchSetRequest":{ + "type":"structure", + "required":[ + "SqlInjectionMatchSetId", + "ChangeToken", + "Updates" + ], + "members":{ + "SqlInjectionMatchSetId":{"shape":"ResourceId"}, + "ChangeToken":{"shape":"ChangeToken"}, + "Updates":{"shape":"SqlInjectionMatchSetUpdates"} + } + }, + "UpdateSqlInjectionMatchSetResponse":{ + "type":"structure", + "members":{ + "ChangeToken":{"shape":"ChangeToken"} + } + }, + "UpdateWebACLRequest":{ + "type":"structure", + "required":[ + "WebACLId", + "ChangeToken" + ], + "members":{ + "WebACLId":{"shape":"ResourceId"}, + "ChangeToken":{"shape":"ChangeToken"}, + "Updates":{"shape":"WebACLUpdates"}, + "DefaultAction":{"shape":"WafAction"} + } + }, + "UpdateWebACLResponse":{ + "type":"structure", + "members":{ + "ChangeToken":{"shape":"ChangeToken"} + } + }, + "UpdateXssMatchSetRequest":{ + "type":"structure", + "required":[ + "XssMatchSetId", + "ChangeToken", + "Updates" + ], + "members":{ + "XssMatchSetId":{"shape":"ResourceId"}, + "ChangeToken":{"shape":"ChangeToken"}, + "Updates":{"shape":"XssMatchSetUpdates"} + } + }, + "UpdateXssMatchSetResponse":{ + "type":"structure", + "members":{ + "ChangeToken":{"shape":"ChangeToken"} + } + }, + "WAFDisallowedNameException":{ + "type":"structure", + "members":{ + "message":{"shape":"errorMessage"} + }, + "exception":true + }, + "WAFInternalErrorException":{ + "type":"structure", + "members":{ + "message":{"shape":"errorMessage"} + }, + "exception":true, + "fault":true + }, + "WAFInvalidAccountException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "WAFInvalidOperationException":{ + "type":"structure", + "members":{ + "message":{"shape":"errorMessage"} + }, + "exception":true + }, + "WAFInvalidParameterException":{ + "type":"structure", + "members":{ + "field":{"shape":"ParameterExceptionField"}, + "parameter":{"shape":"ParameterExceptionParameter"}, + "reason":{"shape":"ParameterExceptionReason"} + }, + "exception":true + }, + "WAFLimitsExceededException":{ + "type":"structure", + "members":{ + "message":{"shape":"errorMessage"} + }, + "exception":true + }, + "WAFNonEmptyEntityException":{ + "type":"structure", + "members":{ + "message":{"shape":"errorMessage"} + }, + "exception":true + }, + "WAFNonexistentContainerException":{ + "type":"structure", + "members":{ + "message":{"shape":"errorMessage"} + }, + "exception":true + }, + "WAFNonexistentItemException":{ + "type":"structure", + "members":{ + "message":{"shape":"errorMessage"} + }, + "exception":true + }, + "WAFReferencedItemException":{ + "type":"structure", + "members":{ + "message":{"shape":"errorMessage"} + }, + "exception":true + }, + "WAFStaleDataException":{ + "type":"structure", + "members":{ + "message":{"shape":"errorMessage"} + }, + "exception":true + }, + "WafAction":{ + "type":"structure", + "required":["Type"], + "members":{ + "Type":{"shape":"WafActionType"} + } + }, + "WafActionType":{ + "type":"string", + "enum":[ + "BLOCK", + "ALLOW", + "COUNT" + ] + }, + "WebACL":{ + "type":"structure", + "required":[ + "WebACLId", + "DefaultAction", + "Rules" + ], + "members":{ + "WebACLId":{"shape":"ResourceId"}, + "Name":{"shape":"ResourceName"}, + "MetricName":{"shape":"MetricName"}, + "DefaultAction":{"shape":"WafAction"}, + "Rules":{"shape":"ActivatedRules"} + } + }, + "WebACLSummaries":{ + "type":"list", + "member":{"shape":"WebACLSummary"} + }, + "WebACLSummary":{ + "type":"structure", + "required":[ + "WebACLId", + "Name" + ], + "members":{ + "WebACLId":{"shape":"ResourceId"}, + "Name":{"shape":"ResourceName"} + } + }, + "WebACLUpdate":{ + "type":"structure", + "required":[ + "Action", + "ActivatedRule" + ], + "members":{ + "Action":{"shape":"ChangeAction"}, + "ActivatedRule":{"shape":"ActivatedRule"} + } + }, + "WebACLUpdates":{ + "type":"list", + "member":{"shape":"WebACLUpdate"} + }, + "XssMatchSet":{ + "type":"structure", + "required":[ + "XssMatchSetId", + "XssMatchTuples" + ], + "members":{ + "XssMatchSetId":{"shape":"ResourceId"}, + "Name":{"shape":"ResourceName"}, + "XssMatchTuples":{"shape":"XssMatchTuples"} + } + }, + "XssMatchSetSummaries":{ + "type":"list", + "member":{"shape":"XssMatchSetSummary"} + }, + "XssMatchSetSummary":{ + "type":"structure", + "required":[ + "XssMatchSetId", + "Name" + ], + "members":{ + "XssMatchSetId":{"shape":"ResourceId"}, + "Name":{"shape":"ResourceName"} + } + }, + "XssMatchSetUpdate":{ + "type":"structure", + "required":[ + "Action", + "XssMatchTuple" + ], + "members":{ + "Action":{"shape":"ChangeAction"}, + "XssMatchTuple":{"shape":"XssMatchTuple"} + } + }, + "XssMatchSetUpdates":{ + "type":"list", + "member":{"shape":"XssMatchSetUpdate"} + }, + "XssMatchTuple":{ + "type":"structure", + "required":[ + "FieldToMatch", + "TextTransformation" + ], + "members":{ + "FieldToMatch":{"shape":"FieldToMatch"}, + "TextTransformation":{"shape":"TextTransformation"} + } + }, + "XssMatchTuples":{ + "type":"list", + "member":{"shape":"XssMatchTuple"} + }, + "errorMessage":{"type":"string"} + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/waf/2015-08-24/docs-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/waf/2015-08-24/docs-2.json new file mode 100644 index 000000000..a580dc663 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/waf/2015-08-24/docs-2.json @@ -0,0 +1,1208 @@ +{ + "version": "2.0", + "service": "

    This is the AWS WAF API Reference. This guide is for developers who need detailed information about the AWS WAF API actions, data types, and errors. For detailed information about AWS WAF features and an overview of how to use the AWS WAF API, see the AWS WAF Developer Guide.

    ", + "operations": { + "CreateByteMatchSet": "

    Creates a ByteMatchSet. You then use UpdateByteMatchSet to identify the part of a web request that you want AWS WAF to inspect, such as the values of the User-Agent header or the query string. For example, you can create a ByteMatchSet that matches any requests with User-Agent headers that contain the string BadBot. You can then configure AWS WAF to reject those requests.

    To create and configure a ByteMatchSet, perform the following steps:

    1. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of a CreateByteMatchSet request.
    2. Submit a CreateByteMatchSet request.
    3. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of an UpdateByteMatchSet request.
    4. Submit an UpdateByteMatchSet request to specify the part of the request that you want AWS WAF to inspect (for example, the header or the URI) and the value that you want AWS WAF to watch for.

    For more information about how to use the AWS WAF API to allow or block HTTP requests, see the AWS WAF Developer Guide.

    ", + "CreateIPSet": "

    Creates an IPSet, which you use to specify which web requests you want to allow or block based on the IP addresses that the requests originate from. For example, if you're receiving a lot of requests from one or more individual IP addresses or one or more ranges of IP addresses and you want to block the requests, you can create an IPSet that contains those IP addresses and then configure AWS WAF to block the requests.

    To create and configure an IPSet, perform the following steps:

    1. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of a CreateIPSet request.
    2. Submit a CreateIPSet request.
    3. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of an UpdateIPSet request.
    4. Submit an UpdateIPSet request to specify the IP addresses that you want AWS WAF to watch for.

    For more information about how to use the AWS WAF API to allow or block HTTP requests, see the AWS WAF Developer Guide.

    ", + "CreateRule": "

    Creates a Rule, which contains the IPSet objects, ByteMatchSet objects, and other predicates that identify the requests that you want to block. If you add more than one predicate to a Rule, a request must match all of the specifications to be allowed or blocked. For example, suppose you add the following to a Rule:

    • An IPSet that matches the IP address 192.0.2.44/32
    • A ByteMatchSet that matches BadBot in the User-Agent header

    You then add the Rule to a WebACL and specify that you want to blocks requests that satisfy the Rule. For a request to be blocked, it must come from the IP address 192.0.2.44 and the User-Agent header in the request must contain the value BadBot.

    To create and configure a Rule, perform the following steps:

    1. Create and update the predicates that you want to include in the Rule. For more information, see CreateByteMatchSet, CreateIPSet, and CreateSqlInjectionMatchSet.
    2. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of a CreateRule request.
    3. Submit a CreateRule request.
    4. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of an UpdateRule request.
    5. Submit an UpdateRule request to specify the predicates that you want to include in the Rule.
    6. Create and update a WebACL that contains the Rule. For more information, see CreateWebACL.

    For more information about how to use the AWS WAF API to allow or block HTTP requests, see the AWS WAF Developer Guide.

    ", + "CreateSizeConstraintSet": "

    Creates a SizeConstraintSet. You then use UpdateSizeConstraintSet to identify the part of a web request that you want AWS WAF to check for length, such as the length of the User-Agent header or the length of the query string. For example, you can create a SizeConstraintSet that matches any requests that have a query string that is longer than 100 bytes. You can then configure AWS WAF to reject those requests.

    To create and configure a SizeConstraintSet, perform the following steps:

    1. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of a CreateSizeConstraintSet request.
    2. Submit a CreateSizeConstraintSet request.
    3. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of an UpdateSizeConstraintSet request.
    4. Submit an UpdateSizeConstraintSet request to specify the part of the request that you want AWS WAF to inspect (for example, the header or the URI) and the value that you want AWS WAF to watch for.

    For more information about how to use the AWS WAF API to allow or block HTTP requests, see the AWS WAF Developer Guide.

    ", + "CreateSqlInjectionMatchSet": "

    Creates a SqlInjectionMatchSet, which you use to allow, block, or count requests that contain snippets of SQL code in a specified part of web requests. AWS WAF searches for character sequences that are likely to be malicious strings.

    To create and configure a SqlInjectionMatchSet, perform the following steps:

    1. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of a CreateSqlInjectionMatchSet request.
    2. Submit a CreateSqlInjectionMatchSet request.
    3. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of an UpdateSqlInjectionMatchSet request.
    4. Submit an UpdateSqlInjectionMatchSet request to specify the parts of web requests in which you want to allow, block, or count malicious SQL code.

    For more information about how to use the AWS WAF API to allow or block HTTP requests, see the AWS WAF Developer Guide.

    ", + "CreateWebACL": "

    Creates a WebACL, which contains the Rules that identify the CloudFront web requests that you want to allow, block, or count. AWS WAF evaluates Rules in order based on the value of Priority for each Rule.

    You also specify a default action, either ALLOW or BLOCK. If a web request doesn't match any of the Rules in a WebACL, AWS WAF responds to the request with the default action.

    To create and configure a WebACL, perform the following steps:

    1. Create and update the ByteMatchSet objects and other predicates that you want to include in Rules. For more information, see CreateByteMatchSet, UpdateByteMatchSet, CreateIPSet, UpdateIPSet, CreateSqlInjectionMatchSet, and UpdateSqlInjectionMatchSet.
    2. Create and update the Rules that you want to include in the WebACL. For more information, see CreateRule and UpdateRule.
    3. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of a CreateWebACL request.
    4. Submit a CreateWebACL request.
    5. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of an UpdateWebACL request.
    6. Submit an UpdateWebACL request to specify the Rules that you want to include in the WebACL, to specify the default action, and to associate the WebACL with a CloudFront distribution.

    For more information about how to use the AWS WAF API, see the AWS WAF Developer Guide.

    ", + "CreateXssMatchSet": "

    Creates an XssMatchSet, which you use to allow, block, or count requests that contain cross-site scripting attacks in the specified part of web requests. AWS WAF searches for character sequences that are likely to be malicious strings.

    To create and configure an XssMatchSet, perform the following steps:

    1. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of a CreateXssMatchSet request.
    2. Submit a CreateXssMatchSet request.
    3. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of an UpdateXssMatchSet request.
    4. Submit an UpdateXssMatchSet request to specify the parts of web requests in which you want to allow, block, or count cross-site scripting attacks.

    For more information about how to use the AWS WAF API to allow or block HTTP requests, see the AWS WAF Developer Guide.

    ", + "DeleteByteMatchSet": "

    Permanently deletes a ByteMatchSet. You can't delete a ByteMatchSet if it's still used in any Rules or if it still includes any ByteMatchTuple objects (any filters).

    If you just want to remove a ByteMatchSet from a Rule, use UpdateRule.

    To permanently delete a ByteMatchSet, perform the following steps:

    1. Update the ByteMatchSet to remove filters, if any. For more information, see UpdateByteMatchSet.
    2. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of a DeleteByteMatchSet request.
    3. Submit a DeleteByteMatchSet request.
    ", + "DeleteIPSet": "

    Permanently deletes an IPSet. You can't delete an IPSet if it's still used in any Rules or if it still includes any IP addresses.

    If you just want to remove an IPSet from a Rule, use UpdateRule.

    To permanently delete an IPSet from AWS WAF, perform the following steps:

    1. Update the IPSet to remove IP address ranges, if any. For more information, see UpdateIPSet.
    2. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of a DeleteIPSet request.
    3. Submit a DeleteIPSet request.
    ", + "DeleteRule": "

    Permanently deletes a Rule. You can't delete a Rule if it's still used in any WebACL objects or if it still includes any predicates, such as ByteMatchSet objects.

    If you just want to remove a Rule from a WebACL, use UpdateWebACL.

    To permanently delete a Rule from AWS WAF, perform the following steps:

    1. Update the Rule to remove predicates, if any. For more information, see UpdateRule.
    2. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of a DeleteRule request.
    3. Submit a DeleteRule request.
    ", + "DeleteSizeConstraintSet": "

    Permanently deletes a SizeConstraintSet. You can't delete a SizeConstraintSet if it's still used in any Rules or if it still includes any SizeConstraint objects (any filters).

    If you just want to remove a SizeConstraintSet from a Rule, use UpdateRule.

    To permanently delete a SizeConstraintSet, perform the following steps:

    1. Update the SizeConstraintSet to remove filters, if any. For more information, see UpdateSizeConstraintSet.
    2. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of a DeleteSizeConstraintSet request.
    3. Submit a DeleteSizeConstraintSet request.
    ", + "DeleteSqlInjectionMatchSet": "

    Permanently deletes a SqlInjectionMatchSet. You can't delete a SqlInjectionMatchSet if it's still used in any Rules or if it still contains any SqlInjectionMatchTuple objects.

    If you just want to remove a SqlInjectionMatchSet from a Rule, use UpdateRule.

    To permanently delete a SqlInjectionMatchSet from AWS WAF, perform the following steps:

    1. Update the SqlInjectionMatchSet to remove filters, if any. For more information, see UpdateSqlInjectionMatchSet.
    2. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of a DeleteSqlInjectionMatchSet request.
    3. Submit a DeleteSqlInjectionMatchSet request.
    ", + "DeleteWebACL": "

    Permanently deletes a WebACL. You can't delete a WebACL if it still contains any Rules.

    To delete a WebACL, perform the following steps:

    1. Update the WebACL to remove Rules, if any. For more information, see UpdateWebACL.
    2. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of a DeleteWebACL request.
    3. Submit a DeleteWebACL request.
    ", + "DeleteXssMatchSet": "

    Permanently deletes an XssMatchSet. You can't delete an XssMatchSet if it's still used in any Rules or if it still contains any XssMatchTuple objects.

    If you just want to remove an XssMatchSet from a Rule, use UpdateRule.

    To permanently delete an XssMatchSet from AWS WAF, perform the following steps:

    1. Update the XssMatchSet to remove filters, if any. For more information, see UpdateXssMatchSet.
    2. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of a DeleteXssMatchSet request.
    3. Submit a DeleteXssMatchSet request.
    ", + "GetByteMatchSet": "

    Returns the ByteMatchSet specified by ByteMatchSetId.

    ", + "GetChangeToken": "

    When you want to create, update, or delete AWS WAF objects, get a change token and include the change token in the create, update, or delete request. Change tokens ensure that your application doesn't submit conflicting requests to AWS WAF.

    Each create, update, or delete request must use a unique change token. If your application submits a GetChangeToken request and then submits a second GetChangeToken request before submitting a create, update, or delete request, the second GetChangeToken request returns the same value as the first GetChangeToken request.

    When you use a change token in a create, update, or delete request, the status of the change token changes to PENDING, which indicates that AWS WAF is propagating the change to all AWS WAF servers. Use GetChangeTokenStatus to determine the status of your change token.

    ", + "GetChangeTokenStatus": "

    Returns the status of a ChangeToken that you got by calling GetChangeToken. ChangeTokenStatus is one of the following values:

    • PROVISIONED: You requested the change token by calling GetChangeToken, but you haven't used it yet in a call to create, update, or delete an AWS WAF object.
    • PENDING: AWS WAF is propagating the create, update, or delete request to all AWS WAF servers.
    • IN_SYNC: Propagation is complete.
    ", + "GetIPSet": "

    Returns the IPSet that is specified by IPSetId.

    ", + "GetRule": "

    Returns the Rule that is specified by the RuleId that you included in the GetRule request.

    ", + "GetSampledRequests": "

    Gets detailed information about a specified number of requests--a sample--that AWS WAF randomly selects from among the first 5,000 requests that your AWS resource received during a time range that you choose. You can specify a sample size of up to 100 requests, and you can specify any time range in the previous three hours.

    GetSampledRequests returns a time range, which is usually the time range that you specified. However, if your resource (such as a CloudFront distribution) received 5,000 requests before the specified time range elapsed, GetSampledRequests returns an updated time range. This new time range indicates the actual period during which AWS WAF selected the requests in the sample.

    ", + "GetSizeConstraintSet": "

    Returns the SizeConstraintSet specified by SizeConstraintSetId.

    ", + "GetSqlInjectionMatchSet": "

    Returns the SqlInjectionMatchSet that is specified by SqlInjectionMatchSetId.

    ", + "GetWebACL": "

    Returns the WebACL that is specified by WebACLId.

    ", + "GetXssMatchSet": "

    Returns the XssMatchSet that is specified by XssMatchSetId.

    ", + "ListByteMatchSets": "

    Returns an array of ByteMatchSetSummary objects.

    ", + "ListIPSets": "

    Returns an array of IPSetSummary objects in the response.

    ", + "ListRules": "

    Returns an array of RuleSummary objects.

    ", + "ListSizeConstraintSets": "

    Returns an array of SizeConstraintSetSummary objects.

    ", + "ListSqlInjectionMatchSets": "

    Returns an array of SqlInjectionMatchSet objects.

    ", + "ListWebACLs": "

    Returns an array of WebACLSummary objects in the response.

    ", + "ListXssMatchSets": "

    Returns an array of XssMatchSet objects.

    ", + "UpdateByteMatchSet": "

    Inserts or deletes ByteMatchTuple objects (filters) in a ByteMatchSet. For each ByteMatchTuple object, you specify the following values:

    • Whether to insert or delete the object from the array. If you want to change a ByteMatchSetUpdate object, you delete the existing object and add a new one.
    • The part of a web request that you want AWS WAF to inspect, such as a query string or the value of the User-Agent header.
    • The bytes (typically a string that corresponds with ASCII characters) that you want AWS WAF to look for. For more information, including how you specify the values for the AWS WAF API and the AWS CLI or SDKs, see TargetString in the ByteMatchTuple data type.
    • Where to look, such as at the beginning or the end of a query string.
    • Whether to perform any conversions on the request, such as converting it to lowercase, before inspecting it for the specified string.

    For example, you can add a ByteMatchSetUpdate object that matches web requests in which User-Agent headers contain the string BadBot. You can then configure AWS WAF to block those requests.

    To create and configure a ByteMatchSet, perform the following steps:

    1. Create a ByteMatchSet. For more information, see CreateByteMatchSet.
    2. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of an UpdateByteMatchSet request.
    3. Submit an UpdateByteMatchSet request to specify the part of the request that you want AWS WAF to inspect (for example, the header or the URI) and the value that you want AWS WAF to watch for.

    For more information about how to use the AWS WAF API to allow or block HTTP requests, see the AWS WAF Developer Guide.

    ", + "UpdateIPSet": "

    Inserts or deletes IPSetDescriptor objects in an IPSet. For each IPSetDescriptor object, you specify the following values:

    • Whether to insert or delete the object from the array. If you want to change an IPSetDescriptor object, you delete the existing object and add a new one.
    • The IP address version, IPv4.
    • The IP address in CIDR notation, for example, 192.0.2.0/24 (for the range of IP addresses from 192.0.2.0 to 192.0.2.255) or 192.0.2.44/32 (for the individual IP address 192.0.2.44).

    AWS WAF supports /8, /16, /24, and /32 IP address ranges. For more information about CIDR notation, see the Wikipedia entry Classless Inter-Domain Routing.

    You use an IPSet to specify which web requests you want to allow or block based on the IP addresses that the requests originated from. For example, if you're receiving a lot of requests from one or a small number of IP addresses and you want to block the requests, you can create an IPSet that specifies those IP addresses, and then configure AWS WAF to block the requests.

    To create and configure an IPSet, perform the following steps:

    1. Submit a CreateIPSet request.
    2. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of an UpdateIPSet request.
    3. Submit an UpdateIPSet request to specify the IP addresses that you want AWS WAF to watch for.

    When you update an IPSet, you specify the IP addresses that you want to add and/or the IP addresses that you want to delete. If you want to change an IP address, you delete the existing IP address and add the new one.

    For more information about how to use the AWS WAF API to allow or block HTTP requests, see the AWS WAF Developer Guide.

    ", + "UpdateRule": "

    Inserts or deletes Predicate objects in a Rule. Each Predicate object identifies a predicate, such as a ByteMatchSet or an IPSet, that specifies the web requests that you want to allow, block, or count. If you add more than one predicate to a Rule, a request must match all of the specifications to be allowed, blocked, or counted. For example, suppose you add the following to a Rule:

    • A ByteMatchSet that matches the value BadBot in the User-Agent header
    • An IPSet that matches the IP address 192.0.2.44

    You then add the Rule to a WebACL and specify that you want to block requests that satisfy the Rule. For a request to be blocked, the User-Agent header in the request must contain the value BadBot and the request must originate from the IP address 192.0.2.44.

    To create and configure a Rule, perform the following steps:

    1. Create and update the predicates that you want to include in the Rule.
    2. Create the Rule. See CreateRule.
    3. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of an UpdateRule request.
    4. Submit an UpdateRule request to add predicates to the Rule.
    5. Create and update a WebACL that contains the Rule. See CreateWebACL.

    If you want to replace one ByteMatchSet or IPSet with another, you delete the existing one and add the new one.

    For more information about how to use the AWS WAF API to allow or block HTTP requests, see the AWS WAF Developer Guide.

    ", + "UpdateSizeConstraintSet": "

    Inserts or deletes SizeConstraint objects (filters) in a SizeConstraintSet. For each SizeConstraint object, you specify the following values:

    • Whether to insert or delete the object from the array. If you want to change a SizeConstraintSetUpdate object, you delete the existing object and add a new one.
    • The part of a web request that you want AWS WAF to evaluate, such as the length of a query string or the length of the User-Agent header.
    • Whether to perform any transformations on the request, such as converting it to lowercase, before checking its length. Note that transformations of the request body are not supported because the AWS resource forwards only the first 8192 bytes of your request to AWS WAF.
    • A ComparisonOperator used for evaluating the selected part of the request against the specified Size, such as equals, greater than, less than, and so on.
    • The length, in bytes, that you want AWS WAF to watch for in selected part of the request. The length is computed after applying the transformation.

    For example, you can add a SizeConstraintSetUpdate object that matches web requests in which the length of the User-Agent header is greater than 100 bytes. You can then configure AWS WAF to block those requests.

    To create and configure a SizeConstraintSet, perform the following steps:

    1. Create a SizeConstraintSet. For more information, see CreateSizeConstraintSet.
    2. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of an UpdateSizeConstraintSet request.
    3. Submit an UpdateSizeConstraintSet request to specify the part of the request that you want AWS WAF to inspect (for example, the header or the URI) and the value that you want AWS WAF to watch for.

    For more information about how to use the AWS WAF API to allow or block HTTP requests, see the AWS WAF Developer Guide.

    ", + "UpdateSqlInjectionMatchSet": "

    Inserts or deletes SqlInjectionMatchTuple objects (filters) in a SqlInjectionMatchSet. For each SqlInjectionMatchTuple object, you specify the following values:

    • Action: Whether to insert the object into or delete the object from the array. To change a SqlInjectionMatchTuple, you delete the existing object and add a new one.
    • FieldToMatch: The part of web requests that you want AWS WAF to inspect and, if you want AWS WAF to inspect a header, the name of the header.
    • TextTransformation: Which text transformation, if any, to perform on the web request before inspecting the request for snippets of malicious SQL code.

    You use SqlInjectionMatchSet objects to specify which CloudFront requests you want to allow, block, or count. For example, if you're receiving requests that contain snippets of SQL code in the query string and you want to block the requests, you can create a SqlInjectionMatchSet with the applicable settings, and then configure AWS WAF to block the requests.

    To create and configure a SqlInjectionMatchSet, perform the following steps:

    1. Submit a CreateSqlInjectionMatchSet request.
    2. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of an UpdateIPSet request.
    3. Submit an UpdateSqlInjectionMatchSet request to specify the parts of web requests that you want AWS WAF to inspect for snippets of SQL code.

    For more information about how to use the AWS WAF API to allow or block HTTP requests, see the AWS WAF Developer Guide.

    ", + "UpdateWebACL": "

    Inserts or deletes ActivatedRule objects in a WebACL. Each Rule identifies web requests that you want to allow, block, or count. When you update a WebACL, you specify the following values:

    • A default action for the WebACL, either ALLOW or BLOCK. AWS WAF performs the default action if a request doesn't match the criteria in any of the Rules in a WebACL.
    • The Rules that you want to add and/or delete. If you want to replace one Rule with another, you delete the existing Rule and add the new one.
    • For each Rule, whether you want AWS WAF to allow requests, block requests, or count requests that match the conditions in the Rule.
    • The order in which you want AWS WAF to evaluate the Rules in a WebACL. If you add more than one Rule to a WebACL, AWS WAF evaluates each request against the Rules in order based on the value of Priority. (The Rule that has the lowest value for Priority is evaluated first.) When a web request matches all of the predicates (such as ByteMatchSets and IPSets) in a Rule, AWS WAF immediately takes the corresponding action, allow or block, and doesn't evaluate the request against the remaining Rules in the WebACL, if any.
    • The CloudFront distribution that you want to associate with the WebACL.

    To create and configure a WebACL, perform the following steps:

    1. Create and update the predicates that you want to include in Rules. For more information, see CreateByteMatchSet, UpdateByteMatchSet, CreateIPSet, UpdateIPSet, CreateSqlInjectionMatchSet, and UpdateSqlInjectionMatchSet.
    2. Create and update the Rules that you want to include in the WebACL. For more information, see CreateRule and UpdateRule.
    3. Create a WebACL. See CreateWebACL.
    4. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of an UpdateWebACL request.
    5. Submit an UpdateWebACL request to specify the Rules that you want to include in the WebACL, to specify the default action, and to associate the WebACL with a CloudFront distribution.

    For more information about how to use the AWS WAF API to allow or block HTTP requests, see the AWS WAF Developer Guide.

    ", + "UpdateXssMatchSet": "

    Inserts or deletes XssMatchTuple objects (filters) in an XssMatchSet. For each XssMatchTuple object, you specify the following values:

    • Action: Whether to insert the object into or delete the object from the array. To change a XssMatchTuple, you delete the existing object and add a new one.
    • FieldToMatch: The part of web requests that you want AWS WAF to inspect and, if you want AWS WAF to inspect a header, the name of the header.
    • TextTransformation: Which text transformation, if any, to perform on the web request before inspecting the request for cross-site scripting attacks.

    You use XssMatchSet objects to specify which CloudFront requests you want to allow, block, or count. For example, if you're receiving requests that contain cross-site scripting attacks in the request body and you want to block the requests, you can create an XssMatchSet with the applicable settings, and then configure AWS WAF to block the requests.

    To create and configure an XssMatchSet, perform the following steps:

    1. Submit a CreateXssMatchSet request.
    2. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of an UpdateIPSet request.
    3. Submit an UpdateXssMatchSet request to specify the parts of web requests that you want AWS WAF to inspect for cross-site scripting attacks.

    For more information about how to use the AWS WAF API to allow or block HTTP requests, see the AWS WAF Developer Guide.

    " + }, + "shapes": { + "Action": { + "base": null, + "refs": { + "SampledHTTPRequest$Action": "

    The action for the Rule that the request matched: ALLOW, BLOCK, or COUNT.

    " + } + }, + "ActivatedRule": { + "base": "

    The ActivatedRule object in an UpdateWebACL request specifies a Rule that you want to insert or delete, the priority of the Rule in the WebACL, and the action that you want AWS WAF to take when a web request matches the Rule (ALLOW, BLOCK, or COUNT).

    To specify whether to insert or delete a Rule, use the Action parameter in the WebACLUpdate data type.

    ", + "refs": { + "ActivatedRules$member": null, + "WebACLUpdate$ActivatedRule": null + } + }, + "ActivatedRules": { + "base": null, + "refs": { + "WebACL$Rules": "

    An array that contains the action for each Rule in a WebACL, the priority of the Rule, and the ID of the Rule.

    " + } + }, + "ByteMatchSet": { + "base": "

    In a GetByteMatchSet request, ByteMatchSet is a complex type that contains the ByteMatchSetId and Name of a ByteMatchSet, and the values that you specified when you updated the ByteMatchSet.

    A complex type that contains ByteMatchTuple objects, which specify the parts of web requests that you want AWS WAF to inspect and the values that you want AWS WAF to search for. If a ByteMatchSet contains more than one ByteMatchTuple object, a request needs to match the settings in only one ByteMatchTuple to be considered a match.

    ", + "refs": { + "CreateByteMatchSetResponse$ByteMatchSet": "

    A ByteMatchSet that contains no ByteMatchTuple objects.

    ", + "GetByteMatchSetResponse$ByteMatchSet": "

    Information about the ByteMatchSet that you specified in the GetByteMatchSet request. For more information, see the following topics:

    • ByteMatchSet: Contains ByteMatchSetId, ByteMatchTuples, and Name
    • ByteMatchTuples: Contains an array of ByteMatchTuple objects. Each ByteMatchTuple object contains FieldToMatch, PositionalConstraint, TargetString, and TextTransformation
    • FieldToMatch: Contains Data and Type
    " + } + }, + "ByteMatchSetSummaries": { + "base": null, + "refs": { + "ListByteMatchSetsResponse$ByteMatchSets": "

    An array of ByteMatchSetSummary objects.

    " + } + }, + "ByteMatchSetSummary": { + "base": "

    Returned by ListByteMatchSets. Each ByteMatchSetSummary object includes the Name and ByteMatchSetId for one ByteMatchSet.

    ", + "refs": { + "ByteMatchSetSummaries$member": null + } + }, + "ByteMatchSetUpdate": { + "base": "

    In an UpdateByteMatchSet request, ByteMatchSetUpdate specifies whether to insert or delete a ByteMatchTuple and includes the settings for the ByteMatchTuple.

    ", + "refs": { + "ByteMatchSetUpdates$member": null + } + }, + "ByteMatchSetUpdates": { + "base": null, + "refs": { + "UpdateByteMatchSetRequest$Updates": "

    An array of ByteMatchSetUpdate objects that you want to insert into or delete from a ByteMatchSet. For more information, see the applicable data types:

    " + } + }, + "ByteMatchTargetString": { + "base": null, + "refs": { + "ByteMatchTuple$TargetString": "

    The value that you want AWS WAF to search for. AWS WAF searches for the specified string in the part of web requests that you specified in FieldToMatch. The maximum length of the value is 50 bytes.

    Valid values depend on the values that you specified for FieldToMatch:

    • HEADER: The value that you want AWS WAF to search for in the request header that you specified in FieldToMatch, for example, the value of the User-Agent or Referer header.
    • METHOD: The HTTP method, which indicates the type of operation specified in the request. CloudFront supports the following methods: DELETE, GET, HEAD, OPTIONS, PATCH, POST, and PUT.
    • QUERY_STRING: The value that you want AWS WAF to search for in the query string, which is the part of a URL that appears after a ? character.
    • URI: The value that you want AWS WAF to search for in the part of a URL that identifies a resource, for example, /images/daily-ad.jpg.
    • BODY: The part of a request that contains any additional data that you want to send to your web server as the HTTP request body, such as data from a form. The request body immediately follows the request headers. Note that only the first 8192 bytes of the request body are forwarded to AWS WAF for inspection. To allow or block requests based on the length of the body, you can create a size constraint set. For more information, see CreateSizeConstraintSet.

    If TargetString includes alphabetic characters A-Z and a-z, note that the value is case sensitive.

    If you're using the AWS WAF API

    Specify a base64-encoded version of the value. The maximum length of the value before you base64-encode it is 50 bytes.

    For example, suppose the value of Type is HEADER and the value of Data is User-Agent. If you want to search the User-Agent header for the value BadBot, you base64-encode BadBot using MIME base64 encoding and include the resulting value, QmFkQm90, in the value of TargetString.

    If you're using the AWS CLI or one of the AWS SDKs

    The value that you want AWS WAF to search for. The SDK automatically base64 encodes the value.

    " + } + }, + "ByteMatchTuple": { + "base": "

    The bytes (typically a string that corresponds with ASCII characters) that you want AWS WAF to search for in web requests, the location in requests that you want AWS WAF to search, and other settings.

    ", + "refs": { + "ByteMatchSetUpdate$ByteMatchTuple": "

    Information about the part of a web request that you want AWS WAF to inspect and the value that you want AWS WAF to search for. If you specify DELETE for the value of Action, the ByteMatchTuple values must exactly match the values in the ByteMatchTuple that you want to delete from the ByteMatchSet.

    ", + "ByteMatchTuples$member": null + } + }, + "ByteMatchTuples": { + "base": null, + "refs": { + "ByteMatchSet$ByteMatchTuples": "

    Specifies the bytes (typically a string that corresponds with ASCII characters) that you want AWS WAF to search for in web requests, the location in requests that you want AWS WAF to search, and other settings.

    " + } + }, + "ChangeAction": { + "base": null, + "refs": { + "ByteMatchSetUpdate$Action": "

    Specifies whether to insert or delete a ByteMatchTuple.

    ", + "IPSetUpdate$Action": "

    Specifies whether to insert or delete an IP address with UpdateIPSet.

    ", + "RuleUpdate$Action": "

    Specify INSERT to add a Predicate to a Rule. Use DELETE to remove a Predicate from a Rule.

    ", + "SizeConstraintSetUpdate$Action": "

    Specify INSERT to add a SizeConstraintSetUpdate to a SizeConstraintSet. Use DELETE to remove a SizeConstraintSetUpdate from a SizeConstraintSet.

    ", + "SqlInjectionMatchSetUpdate$Action": "

    Specify INSERT to add a SqlInjectionMatchSetUpdate to a SqlInjectionMatchSet. Use DELETE to remove a SqlInjectionMatchSetUpdate from a SqlInjectionMatchSet.

    ", + "WebACLUpdate$Action": "

    Specifies whether to insert a Rule into or delete a Rule from a WebACL.

    ", + "XssMatchSetUpdate$Action": "

    Specify INSERT to add a XssMatchSetUpdate to an XssMatchSet. Use DELETE to remove a XssMatchSetUpdate from an XssMatchSet.

    " + } + }, + "ChangeToken": { + "base": null, + "refs": { + "CreateByteMatchSetRequest$ChangeToken": "

    The value returned by the most recent call to GetChangeToken.

    ", + "CreateByteMatchSetResponse$ChangeToken": "

    The ChangeToken that you used to submit the CreateByteMatchSet request. You can also use this value to query the status of the request. For more information, see GetChangeTokenStatus.

    ", + "CreateIPSetRequest$ChangeToken": "

    The value returned by the most recent call to GetChangeToken.

    ", + "CreateIPSetResponse$ChangeToken": "

    The ChangeToken that you used to submit the CreateIPSet request. You can also use this value to query the status of the request. For more information, see GetChangeTokenStatus.

    ", + "CreateRuleRequest$ChangeToken": "

    The value returned by the most recent call to GetChangeToken.

    ", + "CreateRuleResponse$ChangeToken": "

    The ChangeToken that you used to submit the CreateRule request. You can also use this value to query the status of the request. For more information, see GetChangeTokenStatus.

    ", + "CreateSizeConstraintSetRequest$ChangeToken": "

    The value returned by the most recent call to GetChangeToken.

    ", + "CreateSizeConstraintSetResponse$ChangeToken": "

    The ChangeToken that you used to submit the CreateSizeConstraintSet request. You can also use this value to query the status of the request. For more information, see GetChangeTokenStatus.

    ", + "CreateSqlInjectionMatchSetRequest$ChangeToken": "

    The value returned by the most recent call to GetChangeToken.

    ", + "CreateSqlInjectionMatchSetResponse$ChangeToken": "

    The ChangeToken that you used to submit the CreateSqlInjectionMatchSet request. You can also use this value to query the status of the request. For more information, see GetChangeTokenStatus.

    ", + "CreateWebACLRequest$ChangeToken": "

    The value returned by the most recent call to GetChangeToken.

    ", + "CreateWebACLResponse$ChangeToken": "

    The ChangeToken that you used to submit the CreateWebACL request. You can also use this value to query the status of the request. For more information, see GetChangeTokenStatus.

    ", + "CreateXssMatchSetRequest$ChangeToken": "

    The value returned by the most recent call to GetChangeToken.

    ", + "CreateXssMatchSetResponse$ChangeToken": "

    The ChangeToken that you used to submit the CreateXssMatchSet request. You can also use this value to query the status of the request. For more information, see GetChangeTokenStatus.

    ", + "DeleteByteMatchSetRequest$ChangeToken": "

    The value returned by the most recent call to GetChangeToken.

    ", + "DeleteByteMatchSetResponse$ChangeToken": "

    The ChangeToken that you used to submit the DeleteByteMatchSet request. You can also use this value to query the status of the request. For more information, see GetChangeTokenStatus.

    ", + "DeleteIPSetRequest$ChangeToken": "

    The value returned by the most recent call to GetChangeToken.

    ", + "DeleteIPSetResponse$ChangeToken": "

    The ChangeToken that you used to submit the DeleteIPSet request. You can also use this value to query the status of the request. For more information, see GetChangeTokenStatus.

    ", + "DeleteRuleRequest$ChangeToken": "

    The value returned by the most recent call to GetChangeToken.

    ", + "DeleteRuleResponse$ChangeToken": "

    The ChangeToken that you used to submit the DeleteRule request. You can also use this value to query the status of the request. For more information, see GetChangeTokenStatus.

    ", + "DeleteSizeConstraintSetRequest$ChangeToken": "

    The value returned by the most recent call to GetChangeToken.

    ", + "DeleteSizeConstraintSetResponse$ChangeToken": "

    The ChangeToken that you used to submit the DeleteSizeConstraintSet request. You can also use this value to query the status of the request. For more information, see GetChangeTokenStatus.

    ", + "DeleteSqlInjectionMatchSetRequest$ChangeToken": "

    The value returned by the most recent call to GetChangeToken.

    ", + "DeleteSqlInjectionMatchSetResponse$ChangeToken": "

    The ChangeToken that you used to submit the DeleteSqlInjectionMatchSet request. You can also use this value to query the status of the request. For more information, see GetChangeTokenStatus.

    ", + "DeleteWebACLRequest$ChangeToken": "

    The value returned by the most recent call to GetChangeToken.

    ", + "DeleteWebACLResponse$ChangeToken": "

    The ChangeToken that you used to submit the DeleteWebACL request. You can also use this value to query the status of the request. For more information, see GetChangeTokenStatus.

    ", + "DeleteXssMatchSetRequest$ChangeToken": "

    The value returned by the most recent call to GetChangeToken.

    ", + "DeleteXssMatchSetResponse$ChangeToken": "

    The ChangeToken that you used to submit the DeleteXssMatchSet request. You can also use this value to query the status of the request. For more information, see GetChangeTokenStatus.

    ", + "GetChangeTokenResponse$ChangeToken": "

    The ChangeToken that you used in the request. Use this value in a GetChangeTokenStatus request to get the current status of the request.

    ", + "GetChangeTokenStatusRequest$ChangeToken": "

    The change token for which you want to get the status. This change token was previously returned in the GetChangeToken response.

    ", + "UpdateByteMatchSetRequest$ChangeToken": "

    The value returned by the most recent call to GetChangeToken.

    ", + "UpdateByteMatchSetResponse$ChangeToken": "

    The ChangeToken that you used to submit the UpdateByteMatchSet request. You can also use this value to query the status of the request. For more information, see GetChangeTokenStatus.

    ", + "UpdateIPSetRequest$ChangeToken": "

    The value returned by the most recent call to GetChangeToken.

    ", + "UpdateIPSetResponse$ChangeToken": "

    The ChangeToken that you used to submit the UpdateIPSet request. You can also use this value to query the status of the request. For more information, see GetChangeTokenStatus.

    ", + "UpdateRuleRequest$ChangeToken": "

    The value returned by the most recent call to GetChangeToken.

    ", + "UpdateRuleResponse$ChangeToken": "

    The ChangeToken that you used to submit the UpdateRule request. You can also use this value to query the status of the request. For more information, see GetChangeTokenStatus.

    ", + "UpdateSizeConstraintSetRequest$ChangeToken": "

    The value returned by the most recent call to GetChangeToken.

    ", + "UpdateSizeConstraintSetResponse$ChangeToken": "

    The ChangeToken that you used to submit the UpdateSizeConstraintSet request. You can also use this value to query the status of the request. For more information, see GetChangeTokenStatus.

    ", + "UpdateSqlInjectionMatchSetRequest$ChangeToken": "

    The value returned by the most recent call to GetChangeToken.

    ", + "UpdateSqlInjectionMatchSetResponse$ChangeToken": "

    The ChangeToken that you used to submit the UpdateSqlInjectionMatchSet request. You can also use this value to query the status of the request. For more information, see GetChangeTokenStatus.

    ", + "UpdateWebACLRequest$ChangeToken": "

    The value returned by the most recent call to GetChangeToken.

    ", + "UpdateWebACLResponse$ChangeToken": "

    The ChangeToken that you used to submit the UpdateWebACL request. You can also use this value to query the status of the request. For more information, see GetChangeTokenStatus.

    ", + "UpdateXssMatchSetRequest$ChangeToken": "

    The value returned by the most recent call to GetChangeToken.

    ", + "UpdateXssMatchSetResponse$ChangeToken": "

    The ChangeToken that you used to submit the UpdateXssMatchSet request. You can also use this value to query the status of the request. For more information, see GetChangeTokenStatus.

    " + } + }, + "ChangeTokenStatus": { + "base": null, + "refs": { + "GetChangeTokenStatusResponse$ChangeTokenStatus": "

    The status of the change token.

    " + } + }, + "ComparisonOperator": { + "base": null, + "refs": { + "SizeConstraint$ComparisonOperator": "

    The type of comparison you want AWS WAF to perform. AWS WAF uses this in combination with the provided Size and FieldToMatch to build an expression in the form of \"Size ComparisonOperator size in bytes of FieldToMatch\". If that expression is true, the SizeConstraint is considered to match.

    EQ: Used to test if the Size is equal to the size of the FieldToMatch

    NE: Used to test if the Size is not equal to the size of the FieldToMatch

    LE: Used to test if the Size is less than or equal to the size of the FieldToMatch

    LT: Used to test if the Size is strictly less than the size of the FieldToMatch

    GE: Used to test if the Size is greater than or equal to the size of the FieldToMatch

    GT: Used to test if the Size is strictly greater than the size of the FieldToMatch

    " + } + }, + "Country": { + "base": null, + "refs": { + "HTTPRequest$Country": "

    The two-letter country code for the country that the request originated from. For a current list of country codes, see the Wikipedia entry ISO 3166-1 alpha-2.

    " + } + }, + "CreateByteMatchSetRequest": { + "base": null, + "refs": { + } + }, + "CreateByteMatchSetResponse": { + "base": null, + "refs": { + } + }, + "CreateIPSetRequest": { + "base": null, + "refs": { + } + }, + "CreateIPSetResponse": { + "base": null, + "refs": { + } + }, + "CreateRuleRequest": { + "base": null, + "refs": { + } + }, + "CreateRuleResponse": { + "base": null, + "refs": { + } + }, + "CreateSizeConstraintSetRequest": { + "base": null, + "refs": { + } + }, + "CreateSizeConstraintSetResponse": { + "base": null, + "refs": { + } + }, + "CreateSqlInjectionMatchSetRequest": { + "base": "

    A request to create a SqlInjectionMatchSet.

    ", + "refs": { + } + }, + "CreateSqlInjectionMatchSetResponse": { + "base": "

    The response to a CreateSqlInjectionMatchSet request.

    ", + "refs": { + } + }, + "CreateWebACLRequest": { + "base": null, + "refs": { + } + }, + "CreateWebACLResponse": { + "base": null, + "refs": { + } + }, + "CreateXssMatchSetRequest": { + "base": "

    A request to create an XssMatchSet.

    ", + "refs": { + } + }, + "CreateXssMatchSetResponse": { + "base": "

    The response to a CreateXssMatchSet request.

    ", + "refs": { + } + }, + "DeleteByteMatchSetRequest": { + "base": null, + "refs": { + } + }, + "DeleteByteMatchSetResponse": { + "base": null, + "refs": { + } + }, + "DeleteIPSetRequest": { + "base": null, + "refs": { + } + }, + "DeleteIPSetResponse": { + "base": null, + "refs": { + } + }, + "DeleteRuleRequest": { + "base": null, + "refs": { + } + }, + "DeleteRuleResponse": { + "base": null, + "refs": { + } + }, + "DeleteSizeConstraintSetRequest": { + "base": null, + "refs": { + } + }, + "DeleteSizeConstraintSetResponse": { + "base": null, + "refs": { + } + }, + "DeleteSqlInjectionMatchSetRequest": { + "base": "

    A request to delete a SqlInjectionMatchSet from AWS WAF.

    ", + "refs": { + } + }, + "DeleteSqlInjectionMatchSetResponse": { + "base": "

    The response to a request to delete a SqlInjectionMatchSet from AWS WAF.

    ", + "refs": { + } + }, + "DeleteWebACLRequest": { + "base": null, + "refs": { + } + }, + "DeleteWebACLResponse": { + "base": null, + "refs": { + } + }, + "DeleteXssMatchSetRequest": { + "base": "

    A request to delete an XssMatchSet from AWS WAF.

    ", + "refs": { + } + }, + "DeleteXssMatchSetResponse": { + "base": "

    The response to a request to delete an XssMatchSet from AWS WAF.

    ", + "refs": { + } + }, + "FieldToMatch": { + "base": "

    Specifies where in a web request to look for TargetString.

    ", + "refs": { + "ByteMatchTuple$FieldToMatch": "

    The part of a web request that you want AWS WAF to search, such as a specified header or a query string. For more information, see FieldToMatch.

    ", + "SizeConstraint$FieldToMatch": null, + "SqlInjectionMatchTuple$FieldToMatch": null, + "XssMatchTuple$FieldToMatch": null + } + }, + "GetByteMatchSetRequest": { + "base": null, + "refs": { + } + }, + "GetByteMatchSetResponse": { + "base": null, + "refs": { + } + }, + "GetChangeTokenRequest": { + "base": null, + "refs": { + } + }, + "GetChangeTokenResponse": { + "base": null, + "refs": { + } + }, + "GetChangeTokenStatusRequest": { + "base": null, + "refs": { + } + }, + "GetChangeTokenStatusResponse": { + "base": null, + "refs": { + } + }, + "GetIPSetRequest": { + "base": null, + "refs": { + } + }, + "GetIPSetResponse": { + "base": null, + "refs": { + } + }, + "GetRuleRequest": { + "base": null, + "refs": { + } + }, + "GetRuleResponse": { + "base": null, + "refs": { + } + }, + "GetSampledRequestsRequest": { + "base": null, + "refs": { + } + }, + "GetSampledRequestsResponse": { + "base": null, + "refs": { + } + }, + "GetSizeConstraintSetRequest": { + "base": null, + "refs": { + } + }, + "GetSizeConstraintSetResponse": { + "base": null, + "refs": { + } + }, + "GetSqlInjectionMatchSetRequest": { + "base": "

    A request to get a SqlInjectionMatchSet.

    ", + "refs": { + } + }, + "GetSqlInjectionMatchSetResponse": { + "base": "

    The response to a GetSqlInjectionMatchSet request.

    ", + "refs": { + } + }, + "GetWebACLRequest": { + "base": null, + "refs": { + } + }, + "GetWebACLResponse": { + "base": null, + "refs": { + } + }, + "GetXssMatchSetRequest": { + "base": "

    A request to get an XssMatchSet.

    ", + "refs": { + } + }, + "GetXssMatchSetResponse": { + "base": "

    The response to a GetXssMatchSet request.

    ", + "refs": { + } + }, + "HTTPHeader": { + "base": "

    The response from a GetSampledRequests request includes an HTTPHeader complex type that appears as Headers in the response syntax. HTTPHeader contains the names and values of all of the headers that appear in one of the web requests that were returned by GetSampledRequests.

    ", + "refs": { + "HTTPHeaders$member": null + } + }, + "HTTPHeaders": { + "base": null, + "refs": { + "HTTPRequest$Headers": "

    A complex type that contains two values for each header in the sampled web request: the name of the header and the value of the header.

    " + } + }, + "HTTPMethod": { + "base": null, + "refs": { + "HTTPRequest$Method": "

    The HTTP method specified in the sampled web request. CloudFront supports the following methods: DELETE, GET, HEAD, OPTIONS, PATCH, POST, and PUT.

    " + } + }, + "HTTPRequest": { + "base": "

    The response from a GetSampledRequests request includes an HTTPRequest complex type that appears as Request in the response syntax. HTTPRequest contains information about one of the web requests that were returned by GetSampledRequests.

    ", + "refs": { + "SampledHTTPRequest$Request": "

    A complex type that contains detailed information about the request.

    " + } + }, + "HTTPVersion": { + "base": null, + "refs": { + "HTTPRequest$HTTPVersion": "

    The HTTP version specified in the sampled web request, for example, HTTP/1.1.

    " + } + }, + "HeaderName": { + "base": null, + "refs": { + "HTTPHeader$Name": "

    The name of one of the headers in the sampled web request.

    " + } + }, + "HeaderValue": { + "base": null, + "refs": { + "HTTPHeader$Value": "

    The value of one of the headers in the sampled web request.

    " + } + }, + "IPSet": { + "base": "

    Contains one or more IP addresses or blocks of IP addresses specified in Classless Inter-Domain Routing (CIDR) notation. To specify an individual IP address, you specify the four-part IP address followed by a /32, for example, 192.0.2.0/31. To block a range of IP addresses, you can specify a /24, a /16, or a /8 CIDR. For more information about CIDR notation, perform an Internet search on cidr notation.

    ", + "refs": { + "CreateIPSetResponse$IPSet": "

    The IPSet returned in the CreateIPSet response.

    ", + "GetIPSetResponse$IPSet": "

    Information about the IPSet that you specified in the GetIPSet request. For more information, see the following topics:

    • IPSet: Contains IPSetDescriptors, IPSetId, and Name
    • IPSetDescriptors: Contains an array of IPSetDescriptor objects. Each IPSetDescriptor object contains Type and Value
    " + } + }, + "IPSetDescriptor": { + "base": "

    Specifies the IP address type (IPV4) and the IP address range (in CIDR format) that web requests originate from.

    ", + "refs": { + "IPSetDescriptors$member": null, + "IPSetUpdate$IPSetDescriptor": "

    The IP address type (IPV4) and the IP address range (in CIDR notation) that web requests originate from.

    " + } + }, + "IPSetDescriptorType": { + "base": null, + "refs": { + "IPSetDescriptor$Type": "

    Specify IPV4.

    " + } + }, + "IPSetDescriptorValue": { + "base": null, + "refs": { + "IPSetDescriptor$Value": "

    Specify an IPv4 address by using CIDR notation. For example:

    • To configure AWS WAF to allow, block, or count requests that originated from the IP address 192.0.2.44, specify 192.0.2.44/32.
    • To configure AWS WAF to allow, block, or count requests that originated from IP addresses from 192.0.2.0 to 192.0.2.255, specify 192.0.2.0/24.

    AWS WAF supports only /8, /16, /24, and /32 IP addresses.

    For more information about CIDR notation, see the Wikipedia entry Classless Inter-Domain Routing.

    " + } + }, + "IPSetDescriptors": { + "base": null, + "refs": { + "IPSet$IPSetDescriptors": "

    The IP address type (IPV4) and the IP address range (in CIDR notation) that web requests originate from. If the WebACL is associated with a CloudFront distribution, this is the value of one of the following fields in CloudFront access logs:

    • c-ip, if the viewer did not use an HTTP proxy or a load balancer to send the request
    • x-forwarded-for, if the viewer did use an HTTP proxy or a load balancer to send the request
    " + } + }, + "IPSetSummaries": { + "base": null, + "refs": { + "ListIPSetsResponse$IPSets": "

    An array of IPSetSummary objects.

    " + } + }, + "IPSetSummary": { + "base": "

    Contains the identifier and the name of the IPSet.

    ", + "refs": { + "IPSetSummaries$member": null + } + }, + "IPSetUpdate": { + "base": "

    Specifies the type of update to perform to an IPSet with UpdateIPSet.

    ", + "refs": { + "IPSetUpdates$member": null + } + }, + "IPSetUpdates": { + "base": null, + "refs": { + "UpdateIPSetRequest$Updates": "

    An array of IPSetUpdate objects that you want to insert into or delete from an IPSet. For more information, see the applicable data types:

    " + } + }, + "IPString": { + "base": null, + "refs": { + "HTTPRequest$ClientIP": "

    The IP address that the request originated from. If the WebACL is associated with a CloudFront distribution, this is the value of one of the following fields in CloudFront access logs:

    • c-ip, if the viewer did not use an HTTP proxy or a load balancer to send the request
    • x-forwarded-for, if the viewer did use an HTTP proxy or a load balancer to send the request
    " + } + }, + "ListByteMatchSetsRequest": { + "base": null, + "refs": { + } + }, + "ListByteMatchSetsResponse": { + "base": null, + "refs": { + } + }, + "ListIPSetsRequest": { + "base": null, + "refs": { + } + }, + "ListIPSetsResponse": { + "base": null, + "refs": { + } + }, + "ListMaxItems": { + "base": null, + "refs": { + "GetSampledRequestsRequest$MaxItems": "

    The number of requests that you want AWS WAF to return from among the first 5,000 requests that your AWS resource received during the time range. If your resource received fewer requests than the value of MaxItems, GetSampledRequests returns information about all of them.

    " + } + }, + "ListRulesRequest": { + "base": null, + "refs": { + } + }, + "ListRulesResponse": { + "base": null, + "refs": { + } + }, + "ListSizeConstraintSetsRequest": { + "base": null, + "refs": { + } + }, + "ListSizeConstraintSetsResponse": { + "base": null, + "refs": { + } + }, + "ListSqlInjectionMatchSetsRequest": { + "base": "

    A request to list the SqlInjectionMatchSet objects created by the current AWS account.

    ", + "refs": { + } + }, + "ListSqlInjectionMatchSetsResponse": { + "base": "

    The response to a ListSqlInjectionMatchSets request.

    ", + "refs": { + } + }, + "ListWebACLsRequest": { + "base": null, + "refs": { + } + }, + "ListWebACLsResponse": { + "base": null, + "refs": { + } + }, + "ListXssMatchSetsRequest": { + "base": "

    A request to list the XssMatchSet objects created by the current AWS account.

    ", + "refs": { + } + }, + "ListXssMatchSetsResponse": { + "base": "

    The response to a ListXssMatchSets request.

    ", + "refs": { + } + }, + "MatchFieldData": { + "base": null, + "refs": { + "FieldToMatch$Data": "

    When the value of Type is HEADER, enter the name of the header that you want AWS WAF to search, for example, User-Agent or Referer. If the value of Type is any other value, omit Data.

    The name of the header is not case sensitive.

    " + } + }, + "MatchFieldType": { + "base": null, + "refs": { + "FieldToMatch$Type": "

    The part of the web request that you want AWS WAF to search for a specified string. Parts of a request that you can search include the following:

    • HEADER: A specified request header, for example, the value of the User-Agent or Referer header. If you choose HEADER for the type, specify the name of the header in Data.
    • METHOD: The HTTP method, which indicated the type of operation that the request is asking the origin to perform. Amazon CloudFront supports the following methods: DELETE, GET, HEAD, OPTIONS, PATCH, POST, and PUT.
    • QUERY_STRING: A query string, which is the part of a URL that appears after a ? character, if any.
    • URI: The part of a web request that identifies a resource, for example, /images/daily-ad.jpg.
    • BODY: The part of a request that contains any additional data that you want to send to your web server as the HTTP request body, such as data from a form. The request body immediately follows the request headers. Note that only the first 8192 bytes of the request body are forwarded to AWS WAF for inspection. To allow or block requests based on the length of the body, you can create a size constraint set. For more information, see CreateSizeConstraintSet.
    " + } + }, + "MetricName": { + "base": null, + "refs": { + "CreateRuleRequest$MetricName": "

    A friendly name or description for the metrics for this Rule. The name can contain only alphanumeric characters (A-Z, a-z, 0-9); the name can't contain whitespace. You can't change the name of the metric after you create the Rule.

    ", + "CreateWebACLRequest$MetricName": "

    A friendly name or description for the metrics for this WebACL. The name can contain only alphanumeric characters (A-Z, a-z, 0-9); the name can't contain whitespace. You can't change MetricName after you create the WebACL.

    ", + "Rule$MetricName": null, + "WebACL$MetricName": null + } + }, + "Negated": { + "base": null, + "refs": { + "Predicate$Negated": "

    Set Negated to False if you want AWS WAF to allow, block, or count requests based on the settings in the specified ByteMatchSet, IPSet, SqlInjectionMatchSet, XssMatchSet, or SizeConstraintSet. For example, if an IPSet includes the IP address 192.0.2.44, AWS WAF will allow or block requests based on that IP address.

    Set Negated to True if you want AWS WAF to allow or block a request based on the negation of the settings in the ByteMatchSet, IPSet, SqlInjectionMatchSet, XssMatchSet, or SizeConstraintSet. For example, if an IPSet includes the IP address 192.0.2.44, AWS WAF will allow, block, or count requests based on all IP addresses except 192.0.2.44.

    " + } + }, + "NextMarker": { + "base": null, + "refs": { + "ListByteMatchSetsRequest$NextMarker": "

    If you specify a value for Limit and you have more ByteMatchSets than the value of Limit, AWS WAF returns a NextMarker value in the response that allows you to list another group of ByteMatchSets. For the second and subsequent ListByteMatchSets requests, specify the value of NextMarker from the previous response to get information about another batch of ByteMatchSets.

    ", + "ListByteMatchSetsResponse$NextMarker": "

    If you have more ByteMatchSet objects than the number that you specified for Limit in the request, the response includes a NextMarker value. To list more ByteMatchSet objects, submit another ListByteMatchSets request, and specify the NextMarker value from the response in the NextMarker value in the next request.

    ", + "ListIPSetsRequest$NextMarker": "

    If you specify a value for Limit and you have more IPSets than the value of Limit, AWS WAF returns a NextMarker value in the response that allows you to list another group of IPSets. For the second and subsequent ListIPSets requests, specify the value of NextMarker from the previous response to get information about another batch of ByteMatchSets.

    ", + "ListIPSetsResponse$NextMarker": "

    If you have more IPSet objects than the number that you specified for Limit in the request, the response includes a NextMarker value. To list more IPSet objects, submit another ListIPSets request, and specify the NextMarker value from the response in the NextMarker value in the next request.

    ", + "ListRulesRequest$NextMarker": "

    If you specify a value for Limit and you have more Rules than the value of Limit, AWS WAF returns a NextMarker value in the response that allows you to list another group of Rules. For the second and subsequent ListRules requests, specify the value of NextMarker from the previous response to get information about another batch of Rules.

    ", + "ListRulesResponse$NextMarker": "

    If you have more Rules than the number that you specified for Limit in the request, the response includes a NextMarker value. To list more Rules, submit another ListRules request, and specify the NextMarker value from the response in the NextMarker value in the next request.

    ", + "ListSizeConstraintSetsRequest$NextMarker": "

    If you specify a value for Limit and you have more SizeConstraintSets than the value of Limit, AWS WAF returns a NextMarker value in the response that allows you to list another group of SizeConstraintSets. For the second and subsequent ListSizeConstraintSets requests, specify the value of NextMarker from the previous response to get information about another batch of SizeConstraintSets.

    ", + "ListSizeConstraintSetsResponse$NextMarker": "

    If you have more SizeConstraintSet objects than the number that you specified for Limit in the request, the response includes a NextMarker value. To list more SizeConstraintSet objects, submit another ListSizeConstraintSets request, and specify the NextMarker value from the response in the NextMarker value in the next request.

    ", + "ListSqlInjectionMatchSetsRequest$NextMarker": "

    If you specify a value for Limit and you have more SqlInjectionMatchSet objects than the value of Limit, AWS WAF returns a NextMarker value in the response that allows you to list another group of SqlInjectionMatchSets. For the second and subsequent ListSqlInjectionMatchSets requests, specify the value of NextMarker from the previous response to get information about another batch of SqlInjectionMatchSets.

    ", + "ListSqlInjectionMatchSetsResponse$NextMarker": "

    If you have more SqlInjectionMatchSet objects than the number that you specified for Limit in the request, the response includes a NextMarker value. To list more SqlInjectionMatchSet objects, submit another ListSqlInjectionMatchSets request, and specify the NextMarker value from the response in the NextMarker value in the next request.

    ", + "ListWebACLsRequest$NextMarker": "

    If you specify a value for Limit and you have more WebACL objects than the number that you specify for Limit, AWS WAF returns a NextMarker value in the response that allows you to list another group of WebACL objects. For the second and subsequent ListWebACLs requests, specify the value of NextMarker from the previous response to get information about another batch of WebACL objects.

    ", + "ListWebACLsResponse$NextMarker": "

    If you have more WebACL objects than the number that you specified for Limit in the request, the response includes a NextMarker value. To list more WebACL objects, submit another ListWebACLs request, and specify the NextMarker value from the response in the NextMarker value in the next request.

    ", + "ListXssMatchSetsRequest$NextMarker": "

    If you specify a value for Limit and you have more XssMatchSet objects than the value of Limit, AWS WAF returns a NextMarker value in the response that allows you to list another group of XssMatchSets. For the second and subsequent ListXssMatchSets requests, specify the value of NextMarker from the previous response to get information about another batch of XssMatchSets.

    ", + "ListXssMatchSetsResponse$NextMarker": "

    If you have more XssMatchSet objects than the number that you specified for Limit in the request, the response includes a NextMarker value. To list more XssMatchSet objects, submit another ListXssMatchSets request, and specify the NextMarker value from the response in the NextMarker value in the next request.

    " + } + }, + "PaginationLimit": { + "base": null, + "refs": { + "ListByteMatchSetsRequest$Limit": "

    Specifies the number of ByteMatchSet objects that you want AWS WAF to return for this request. If you have more ByteMatchSets objects than the number you specify for Limit, the response includes a NextMarker value that you can use to get another batch of ByteMatchSet objects.

    ", + "ListIPSetsRequest$Limit": "

    Specifies the number of IPSet objects that you want AWS WAF to return for this request. If you have more IPSet objects than the number you specify for Limit, the response includes a NextMarker value that you can use to get another batch of IPSet objects.

    ", + "ListRulesRequest$Limit": "

    Specifies the number of Rules that you want AWS WAF to return for this request. If you have more Rules than the number that you specify for Limit, the response includes a NextMarker value that you can use to get another batch of Rules.

    ", + "ListSizeConstraintSetsRequest$Limit": "

    Specifies the number of SizeConstraintSet objects that you want AWS WAF to return for this request. If you have more SizeConstraintSets objects than the number you specify for Limit, the response includes a NextMarker value that you can use to get another batch of SizeConstraintSet objects.

    ", + "ListSqlInjectionMatchSetsRequest$Limit": "

    Specifies the number of SqlInjectionMatchSet objects that you want AWS WAF to return for this request. If you have more SqlInjectionMatchSet objects than the number you specify for Limit, the response includes a NextMarker value that you can use to get another batch of Rules.

    ", + "ListWebACLsRequest$Limit": "

    Specifies the number of WebACL objects that you want AWS WAF to return for this request. If you have more WebACL objects than the number that you specify for Limit, the response includes a NextMarker value that you can use to get another batch of WebACL objects.

    ", + "ListXssMatchSetsRequest$Limit": "

    Specifies the number of XssMatchSet objects that you want AWS WAF to return for this request. If you have more XssMatchSet objects than the number you specify for Limit, the response includes a NextMarker value that you can use to get another batch of Rules.

    " + } + }, + "ParameterExceptionField": { + "base": null, + "refs": { + "WAFInvalidParameterException$field": null + } + }, + "ParameterExceptionParameter": { + "base": null, + "refs": { + "WAFInvalidParameterException$parameter": null + } + }, + "ParameterExceptionReason": { + "base": null, + "refs": { + "WAFInvalidParameterException$reason": null + } + }, + "PopulationSize": { + "base": null, + "refs": { + "GetSampledRequestsResponse$PopulationSize": "

    The total number of requests from which GetSampledRequests got a sample of MaxItems requests. If PopulationSize is less than MaxItems, the sample includes every request that your AWS resource received during the specified time range.

    " + } + }, + "PositionalConstraint": { + "base": null, + "refs": { + "ByteMatchTuple$PositionalConstraint": "

    Within the portion of a web request that you want to search (for example, in the query string, if any), specify where you want AWS WAF to search. Valid values include the following:

    CONTAINS

    The specified part of the web request must include the value of TargetString, but the location doesn't matter.

    CONTAINS_WORD

    The specified part of the web request must include the value of TargetString, and TargetString must contain only alphanumeric characters or underscore (A-Z, a-z, 0-9, or _). In addition, TargetString must be a word, which means one of the following:

    • TargetString exactly matches the value of the specified part of the web request, such as the value of a header.
    • TargetString is at the beginning of the specified part of the web request and is followed by a character other than an alphanumeric character or underscore (_), for example, BadBot;.
    • TargetString is at the end of the specified part of the web request and is preceded by a character other than an alphanumeric character or underscore (_), for example, ;BadBot.
    • TargetString is in the middle of the specified part of the web request and is preceded and followed by characters other than alphanumeric characters or underscore (_), for example, -BadBot;.

    EXACTLY

    The value of the specified part of the web request must exactly match the value of TargetString.

    STARTS_WITH

    The value of TargetString must appear at the beginning of the specified part of the web request.

    ENDS_WITH

    The value of TargetString must appear at the end of the specified part of the web request.

    " + } + }, + "Predicate": { + "base": "

    Specifies the ByteMatchSet, IPSet, SqlInjectionMatchSet, XssMatchSet, and SizeConstraintSet objects that you want to add to a Rule and, for each object, indicates whether you want to negate the settings, for example, requests that do NOT originate from the IP address 192.0.2.44.

    ", + "refs": { + "Predicates$member": null, + "RuleUpdate$Predicate": "

    The ID of the Predicate (such as an IPSet) that you want to add to a Rule.

    " + } + }, + "PredicateType": { + "base": null, + "refs": { + "Predicate$Type": "

    The type of predicate in a Rule, such as ByteMatchSet or IPSet.

    " + } + }, + "Predicates": { + "base": null, + "refs": { + "Rule$Predicates": "

    The Predicates object contains one Predicate element for each ByteMatchSet, IPSet, or SqlInjectionMatchSet object that you want to include in a Rule.

    " + } + }, + "ResourceId": { + "base": null, + "refs": { + "ActivatedRule$RuleId": "

    The RuleId for a Rule. You use RuleId to get more information about a Rule (see GetRule), update a Rule (see UpdateRule), insert a Rule into a WebACL or delete a one from a WebACL (see UpdateWebACL), or delete a Rule from AWS WAF (see DeleteRule).

    RuleId is returned by CreateRule and by ListRules.

    ", + "ByteMatchSet$ByteMatchSetId": "

    The ByteMatchSetId for a ByteMatchSet. You use ByteMatchSetId to get information about a ByteMatchSet (see GetByteMatchSet), update a ByteMatchSet (see UpdateByteMatchSet), insert a ByteMatchSet into a Rule or delete one from a Rule (see UpdateRule), and delete a ByteMatchSet from AWS WAF (see DeleteByteMatchSet).

    ByteMatchSetId is returned by CreateByteMatchSet and by ListByteMatchSets.

    ", + "ByteMatchSetSummary$ByteMatchSetId": "

    The ByteMatchSetId for a ByteMatchSet. You use ByteMatchSetId to get information about a ByteMatchSet, update a ByteMatchSet, remove a ByteMatchSet from a Rule, and delete a ByteMatchSet from AWS WAF.

    ByteMatchSetId is returned by CreateByteMatchSet and by ListByteMatchSets.

    ", + "DeleteByteMatchSetRequest$ByteMatchSetId": "

    The ByteMatchSetId of the ByteMatchSet that you want to delete. ByteMatchSetId is returned by CreateByteMatchSet and by ListByteMatchSets.

    ", + "DeleteIPSetRequest$IPSetId": "

    The IPSetId of the IPSet that you want to delete. IPSetId is returned by CreateIPSet and by ListIPSets.

    ", + "DeleteRuleRequest$RuleId": "

    The RuleId of the Rule that you want to delete. RuleId is returned by CreateRule and by ListRules.

    ", + "DeleteSizeConstraintSetRequest$SizeConstraintSetId": "

    The SizeConstraintSetId of the SizeConstraintSet that you want to delete. SizeConstraintSetId is returned by CreateSizeConstraintSet and by ListSizeConstraintSets.

    ", + "DeleteSqlInjectionMatchSetRequest$SqlInjectionMatchSetId": "

    The SqlInjectionMatchSetId of the SqlInjectionMatchSet that you want to delete. SqlInjectionMatchSetId is returned by CreateSqlInjectionMatchSet and by ListSqlInjectionMatchSets.

    ", + "DeleteWebACLRequest$WebACLId": "

    The WebACLId of the WebACL that you want to delete. WebACLId is returned by CreateWebACL and by ListWebACLs.

    ", + "DeleteXssMatchSetRequest$XssMatchSetId": "

    The XssMatchSetId of the XssMatchSet that you want to delete. XssMatchSetId is returned by CreateXssMatchSet and by ListXssMatchSets.

    ", + "GetByteMatchSetRequest$ByteMatchSetId": "

    The ByteMatchSetId of the ByteMatchSet that you want to get. ByteMatchSetId is returned by CreateByteMatchSet and by ListByteMatchSets.

    ", + "GetIPSetRequest$IPSetId": "

    The IPSetId of the IPSet that you want to get. IPSetId is returned by CreateIPSet and by ListIPSets.

    ", + "GetRuleRequest$RuleId": "

    The RuleId of the Rule that you want to get. RuleId is returned by CreateRule and by ListRules.

    ", + "GetSampledRequestsRequest$WebAclId": "

    The WebACLId of the WebACL for which you want GetSampledRequests to return a sample of requests.

    ", + "GetSampledRequestsRequest$RuleId": "

    RuleId is one of two values:

    • The RuleId of the Rule for which you want GetSampledRequests to return a sample of requests.
    • Default_Action, which causes GetSampledRequests to return a sample of the requests that didn't match any of the rules in the specified WebACL.
    ", + "GetSizeConstraintSetRequest$SizeConstraintSetId": "

    The SizeConstraintSetId of the SizeConstraintSet that you want to get. SizeConstraintSetId is returned by CreateSizeConstraintSet and by ListSizeConstraintSets.

    ", + "GetSqlInjectionMatchSetRequest$SqlInjectionMatchSetId": "

    The SqlInjectionMatchSetId of the SqlInjectionMatchSet that you want to get. SqlInjectionMatchSetId is returned by CreateSqlInjectionMatchSet and by ListSqlInjectionMatchSets.

    ", + "GetWebACLRequest$WebACLId": "

    The WebACLId of the WebACL that you want to get. WebACLId is returned by CreateWebACL and by ListWebACLs.

    ", + "GetXssMatchSetRequest$XssMatchSetId": "

    The XssMatchSetId of the XssMatchSet that you want to get. XssMatchSetId is returned by CreateXssMatchSet and by ListXssMatchSets.

    ", + "IPSet$IPSetId": "

    The IPSetId for an IPSet. You use IPSetId to get information about an IPSet (see GetIPSet), update an IPSet (see UpdateIPSet), insert an IPSet into a Rule or delete one from a Rule (see UpdateRule), and delete an IPSet from AWS WAF (see DeleteIPSet).

    IPSetId is returned by CreateIPSet and by ListIPSets.

    ", + "IPSetSummary$IPSetId": "

    The IPSetId for an IPSet. You can use IPSetId in a GetIPSet request to get detailed information about an IPSet.

    ", + "Predicate$DataId": "

    A unique identifier for a predicate in a Rule, such as ByteMatchSetId or IPSetId. The ID is returned by the corresponding Create or List command.

    ", + "Rule$RuleId": "

    A unique identifier for a Rule. You use RuleId to get more information about a Rule (see GetRule), update a Rule (see UpdateRule), insert a Rule into a WebACL or delete a one from a WebACL (see UpdateWebACL), or delete a Rule from AWS WAF (see DeleteRule).

    RuleId is returned by CreateRule and by ListRules.

    ", + "RuleSummary$RuleId": "

    A unique identifier for a Rule. You use RuleId to get more information about a Rule (see GetRule), update a Rule (see UpdateRule), insert a Rule into a WebACL or delete one from a WebACL (see UpdateWebACL), or delete a Rule from AWS WAF (see DeleteRule).

    RuleId is returned by CreateRule and by ListRules.

    ", + "SizeConstraintSet$SizeConstraintSetId": "

    A unique identifier for a SizeConstraintSet. You use SizeConstraintSetId to get information about a SizeConstraintSet (see GetSizeConstraintSet), update a SizeConstraintSet (see UpdateSizeConstraintSet), insert a SizeConstraintSet into a Rule or delete one from a Rule (see UpdateRule), and delete a SizeConstraintSet from AWS WAF (see DeleteSizeConstraintSet).

    SizeConstraintSetId is returned by CreateSizeConstraintSet and by ListSizeConstraintSets.

    ", + "SizeConstraintSetSummary$SizeConstraintSetId": "

    A unique identifier for a SizeConstraintSet. You use SizeConstraintSetId to get information about a SizeConstraintSet (see GetSizeConstraintSet), update a SizeConstraintSet (see UpdateSizeConstraintSet), insert a SizeConstraintSet into a Rule or delete one from a Rule (see UpdateRule), and delete a SizeConstraintSet from AWS WAF (see DeleteSizeConstraintSet).

    SizeConstraintSetId is returned by CreateSizeConstraintSet and by ListSizeConstraintSets.

    ", + "SqlInjectionMatchSet$SqlInjectionMatchSetId": "

    A unique identifier for a SqlInjectionMatchSet. You use SqlInjectionMatchSetId to get information about a SqlInjectionMatchSet (see GetSqlInjectionMatchSet), update a SqlInjectionMatchSet (see UpdateSqlInjectionMatchSet), insert a SqlInjectionMatchSet into a Rule or delete one from a Rule (see UpdateRule), and delete a SqlInjectionMatchSet from AWS WAF (see DeleteSqlInjectionMatchSet).

    SqlInjectionMatchSetId is returned by CreateSqlInjectionMatchSet and by ListSqlInjectionMatchSets.

    ", + "SqlInjectionMatchSetSummary$SqlInjectionMatchSetId": "

    A unique identifier for a SqlInjectionMatchSet. You use SqlInjectionMatchSetId to get information about a SqlInjectionMatchSet (see GetSqlInjectionMatchSet), update a SqlInjectionMatchSet (see UpdateSqlInjectionMatchSet), insert a SqlInjectionMatchSet into a Rule or delete one from a Rule (see UpdateRule), and delete a SqlInjectionMatchSet from AWS WAF (see DeleteSqlInjectionMatchSet).

    SqlInjectionMatchSetId is returned by CreateSqlInjectionMatchSet and by ListSqlInjectionMatchSets.

    ", + "UpdateByteMatchSetRequest$ByteMatchSetId": "

    The ByteMatchSetId of the ByteMatchSet that you want to update. ByteMatchSetId is returned by CreateByteMatchSet and by ListByteMatchSets.

    ", + "UpdateIPSetRequest$IPSetId": "

    The IPSetId of the IPSet that you want to update. IPSetId is returned by CreateIPSet and by ListIPSets.

    ", + "UpdateRuleRequest$RuleId": "

    The RuleId of the Rule that you want to update. RuleId is returned by CreateRule and by ListRules.

    ", + "UpdateSizeConstraintSetRequest$SizeConstraintSetId": "

    The SizeConstraintSetId of the SizeConstraintSet that you want to update. SizeConstraintSetId is returned by CreateSizeConstraintSet and by ListSizeConstraintSets.

    ", + "UpdateSqlInjectionMatchSetRequest$SqlInjectionMatchSetId": "

    The SqlInjectionMatchSetId of the SqlInjectionMatchSet that you want to update. SqlInjectionMatchSetId is returned by CreateSqlInjectionMatchSet and by ListSqlInjectionMatchSets.

    ", + "UpdateWebACLRequest$WebACLId": "

    The WebACLId of the WebACL that you want to update. WebACLId is returned by CreateWebACL and by ListWebACLs.

    ", + "UpdateXssMatchSetRequest$XssMatchSetId": "

    The XssMatchSetId of the XssMatchSet that you want to update. XssMatchSetId is returned by CreateXssMatchSet and by ListXssMatchSets.

    ", + "WebACL$WebACLId": "

    A unique identifier for a WebACL. You use WebACLId to get information about a WebACL (see GetWebACL), update a WebACL (see UpdateWebACL), and delete a WebACL from AWS WAF (see DeleteWebACL).

    WebACLId is returned by CreateWebACL and by ListWebACLs.

    ", + "WebACLSummary$WebACLId": "

    A unique identifier for a WebACL. You use WebACLId to get information about a WebACL (see GetWebACL), update a WebACL (see UpdateWebACL), and delete a WebACL from AWS WAF (see DeleteWebACL).

    WebACLId is returned by CreateWebACL and by ListWebACLs.

    ", + "XssMatchSet$XssMatchSetId": "

    A unique identifier for an XssMatchSet. You use XssMatchSetId to get information about an XssMatchSet (see GetXssMatchSet), update an XssMatchSet (see UpdateXssMatchSet), insert an XssMatchSet into a Rule or delete one from a Rule (see UpdateRule), and delete an XssMatchSet from AWS WAF (see DeleteXssMatchSet).

    XssMatchSetId is returned by CreateXssMatchSet and by ListXssMatchSets.

    ", + "XssMatchSetSummary$XssMatchSetId": "

    A unique identifier for an XssMatchSet. You use XssMatchSetId to get information about a XssMatchSet (see GetXssMatchSet), update an XssMatchSet (see UpdateXssMatchSet), insert an XssMatchSet into a Rule or delete one from a Rule (see UpdateRule), and delete an XssMatchSet from AWS WAF (see DeleteXssMatchSet).

    XssMatchSetId is returned by CreateXssMatchSet and by ListXssMatchSets.

    " + } + }, + "ResourceName": { + "base": null, + "refs": { + "ByteMatchSet$Name": "

    A friendly name or description of the ByteMatchSet. You can't change Name after you create a ByteMatchSet.

    ", + "ByteMatchSetSummary$Name": "

    A friendly name or description of the ByteMatchSet. You can't change Name after you create a ByteMatchSet.

    ", + "CreateByteMatchSetRequest$Name": "

    A friendly name or description of the ByteMatchSet. You can't change Name after you create a ByteMatchSet.

    ", + "CreateIPSetRequest$Name": "

    A friendly name or description of the IPSet. You can't change Name after you create the IPSet.

    ", + "CreateRuleRequest$Name": "

    A friendly name or description of the Rule. You can't change the name of a Rule after you create it.

    ", + "CreateSizeConstraintSetRequest$Name": "

    A friendly name or description of the SizeConstraintSet. You can't change Name after you create a SizeConstraintSet.

    ", + "CreateSqlInjectionMatchSetRequest$Name": "

    A friendly name or description for the SqlInjectionMatchSet that you're creating. You can't change Name after you create the SqlInjectionMatchSet.

    ", + "CreateWebACLRequest$Name": "

    A friendly name or description of the WebACL. You can't change Name after you create the WebACL.

    ", + "CreateXssMatchSetRequest$Name": "

    A friendly name or description for the XssMatchSet that you're creating. You can't change Name after you create the XssMatchSet.

    ", + "IPSet$Name": "

    A friendly name or description of the IPSet. You can't change the name of an IPSet after you create it.

    ", + "IPSetSummary$Name": "

    A friendly name or description of the IPSet. You can't change the name of an IPSet after you create it.

    ", + "Rule$Name": "

    The friendly name or description for the Rule. You can't change the name of a Rule after you create it.

    ", + "RuleSummary$Name": "

    A friendly name or description of the Rule. You can't change the name of a Rule after you create it.

    ", + "SizeConstraintSet$Name": "

    The name, if any, of the SizeConstraintSet.

    ", + "SizeConstraintSetSummary$Name": "

    The name of the SizeConstraintSet, if any.

    ", + "SqlInjectionMatchSet$Name": "

    The name, if any, of the SqlInjectionMatchSet.

    ", + "SqlInjectionMatchSetSummary$Name": "

    The name of the SqlInjectionMatchSet, if any, specified by Id.

    ", + "WebACL$Name": "

    A friendly name or description of the WebACL. You can't change the name of a WebACL after you create it.

    ", + "WebACLSummary$Name": "

    A friendly name or description of the WebACL. You can't change the name of a WebACL after you create it.

    ", + "XssMatchSet$Name": "

    The name, if any, of the XssMatchSet.

    ", + "XssMatchSetSummary$Name": "

    The name of the XssMatchSet, if any, specified by Id.

    " + } + }, + "Rule": { + "base": "

    A combination of ByteMatchSet, IPSet, and/or SqlInjectionMatchSet objects that identify the web requests that you want to allow, block, or count. For example, you might create a Rule that includes the following predicates:

    • An IPSet that causes AWS WAF to search for web requests that originate from the IP address 192.0.2.44
    • A ByteMatchSet that causes AWS WAF to search for web requests for which the value of the User-Agent header is BadBot.

    To match the settings in this Rule, a request must originate from 192.0.2.44 AND include a User-Agent header for which the value is BadBot.

    ", + "refs": { + "CreateRuleResponse$Rule": "

    The Rule returned in the CreateRule response.

    ", + "GetRuleResponse$Rule": "

    Information about the Rule that you specified in the GetRule request. For more information, see the following topics:

    • Rule: Contains MetricName, Name, an array of Predicate objects, and RuleId
    • Predicate: Each Predicate object contains DataId, Negated, and Type
    " + } + }, + "RulePriority": { + "base": null, + "refs": { + "ActivatedRule$Priority": "

    Specifies the order in which the Rules in a WebACL are evaluated. Rules with a lower value for Priority are evaluated before Rules with a higher value. The value must be a unique integer. If you add multiple Rules to a WebACL, the values don't need to be consecutive.

    " + } + }, + "RuleSummaries": { + "base": null, + "refs": { + "ListRulesResponse$Rules": "

    An array of RuleSummary objects.

    " + } + }, + "RuleSummary": { + "base": "

    Contains the identifier and the friendly name or description of the Rule.

    ", + "refs": { + "RuleSummaries$member": null + } + }, + "RuleUpdate": { + "base": "

    Specifies a Predicate (such as an IPSet) and indicates whether you want to add it to a Rule or delete it from a Rule.

    ", + "refs": { + "RuleUpdates$member": null + } + }, + "RuleUpdates": { + "base": null, + "refs": { + "UpdateRuleRequest$Updates": "

    An array of RuleUpdate objects that you want to insert into or delete from a Rule. For more information, see the applicable data types:

    " + } + }, + "SampleWeight": { + "base": null, + "refs": { + "SampledHTTPRequest$Weight": "

    A value that indicates how one result in the response relates proportionally to other results in the response. A result that has a weight of 2 represents roughly twice as many CloudFront web requests as a result that has a weight of 1.

    " + } + }, + "SampledHTTPRequest": { + "base": "

    The response from a GetSampledRequests request includes a SampledHTTPRequests complex type that appears as SampledRequests in the response syntax. SampledHTTPRequests contains one SampledHTTPRequest object for each web request that is returned by GetSampledRequests.

    ", + "refs": { + "SampledHTTPRequests$member": null + } + }, + "SampledHTTPRequests": { + "base": null, + "refs": { + "GetSampledRequestsResponse$SampledRequests": "

    A complex type that contains detailed information about each of the requests in the sample.

    " + } + }, + "Size": { + "base": null, + "refs": { + "SizeConstraint$Size": "

    The size in bytes that you want AWS WAF to compare against the size of the specified FieldToMatch. AWS WAF uses this in combination with ComparisonOperator and FieldToMatch to build an expression in the form of \"Size ComparisonOperator size in bytes of FieldToMatch\". If that expression is true, the SizeConstraint is considered to match.

    Valid values for size are 0 - 21474836480 bytes (0 - 20 GB).

    If you specify URI for the value of Type, the / in the URI counts as one character. For example, the URI /logo.jpg is nine characters long.

    " + } + }, + "SizeConstraint": { + "base": "

    Specifies a constraint on the size of a part of the web request. AWS WAF uses the Size, ComparisonOperator, and FieldToMatch to build an expression in the form of \"Size ComparisonOperator size in bytes of FieldToMatch\". If that expression is true, the SizeConstraint is considered to match.

    ", + "refs": { + "SizeConstraintSetUpdate$SizeConstraint": "

    Specifies a constraint on the size of a part of the web request. AWS WAF uses the Size, ComparisonOperator, and FieldToMatch to build an expression in the form of \"Size ComparisonOperator size in bytes of FieldToMatch\". If that expression is true, the SizeConstraint is considered to match.

    ", + "SizeConstraints$member": null + } + }, + "SizeConstraintSet": { + "base": "

    A complex type that contains SizeConstraint objects, which specify the parts of web requests that you want AWS WAF to inspect the size of. If a SizeConstraintSet contains more than one SizeConstraint object, a request only needs to match one constraint to be considered a match.

    ", + "refs": { + "CreateSizeConstraintSetResponse$SizeConstraintSet": "

    A SizeConstraintSet that contains no SizeConstraint objects.

    ", + "GetSizeConstraintSetResponse$SizeConstraintSet": "

    Information about the SizeConstraintSet that you specified in the GetSizeConstraintSet request. For more information, see the following topics:

    " + } + }, + "SizeConstraintSetSummaries": { + "base": null, + "refs": { + "ListSizeConstraintSetsResponse$SizeConstraintSets": "

    An array of SizeConstraintSetSummary objects.

    " + } + }, + "SizeConstraintSetSummary": { + "base": "

    The Id and Name of a SizeConstraintSet.

    ", + "refs": { + "SizeConstraintSetSummaries$member": null + } + }, + "SizeConstraintSetUpdate": { + "base": "

    Specifies the part of a web request that you want to inspect the size of and indicates whether you want to add the specification to a SizeConstraintSet or delete it from a SizeConstraintSet.

    ", + "refs": { + "SizeConstraintSetUpdates$member": null + } + }, + "SizeConstraintSetUpdates": { + "base": null, + "refs": { + "UpdateSizeConstraintSetRequest$Updates": "

    An array of SizeConstraintSetUpdate objects that you want to insert into or delete from a SizeConstraintSet. For more information, see the applicable data types:

    " + } + }, + "SizeConstraints": { + "base": null, + "refs": { + "SizeConstraintSet$SizeConstraints": "

    Specifies the parts of web requests that you want to inspect the size of.

    " + } + }, + "SqlInjectionMatchSet": { + "base": "

    A complex type that contains SqlInjectionMatchTuple objects, which specify the parts of web requests that you want AWS WAF to inspect for snippets of malicious SQL code and, if you want AWS WAF to inspect a header, the name of the header. If a SqlInjectionMatchSet contains more than one SqlInjectionMatchTuple object, a request needs to include snippets of SQL code in only one of the specified parts of the request to be considered a match.

    ", + "refs": { + "CreateSqlInjectionMatchSetResponse$SqlInjectionMatchSet": "

    A SqlInjectionMatchSet.

    ", + "GetSqlInjectionMatchSetResponse$SqlInjectionMatchSet": "

    Information about the SqlInjectionMatchSet that you specified in the GetSqlInjectionMatchSet request. For more information, see the following topics:

    " + } + }, + "SqlInjectionMatchSetSummaries": { + "base": null, + "refs": { + "ListSqlInjectionMatchSetsResponse$SqlInjectionMatchSets": "

    An array of SqlInjectionMatchSetSummary objects.

    " + } + }, + "SqlInjectionMatchSetSummary": { + "base": "

    The Id and Name of a SqlInjectionMatchSet.

    ", + "refs": { + "SqlInjectionMatchSetSummaries$member": null + } + }, + "SqlInjectionMatchSetUpdate": { + "base": "

    Specifies the part of a web request that you want to inspect for snippets of malicious SQL code and indicates whether you want to add the specification to a SqlInjectionMatchSet or delete it from a SqlInjectionMatchSet.

    ", + "refs": { + "SqlInjectionMatchSetUpdates$member": null + } + }, + "SqlInjectionMatchSetUpdates": { + "base": null, + "refs": { + "UpdateSqlInjectionMatchSetRequest$Updates": "

    An array of SqlInjectionMatchSetUpdate objects that you want to insert into or delete from a SqlInjectionMatchSet. For more information, see the applicable data types:

    " + } + }, + "SqlInjectionMatchTuple": { + "base": "

    Specifies the part of a web request that you want AWS WAF to inspect for snippets of malicious SQL code and, if you want AWS WAF to inspect a header, the name of the header.

    ", + "refs": { + "SqlInjectionMatchSetUpdate$SqlInjectionMatchTuple": "

    Specifies the part of a web request that you want AWS WAF to inspect for snippets of malicious SQL code and, if you want AWS WAF to inspect a header, the name of the header.

    ", + "SqlInjectionMatchTuples$member": null + } + }, + "SqlInjectionMatchTuples": { + "base": null, + "refs": { + "SqlInjectionMatchSet$SqlInjectionMatchTuples": "

    Specifies the parts of web requests that you want to inspect for snippets of malicious SQL code.

    " + } + }, + "TextTransformation": { + "base": null, + "refs": { + "ByteMatchTuple$TextTransformation": "

    Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass AWS WAF. If you specify a transformation, AWS WAF performs the transformation on TargetString before inspecting a request for a match.

    CMD_LINE

    When you're concerned that attackers are injecting an operating system commandline command and using unusual formatting to disguise some or all of the command, use this option to perform the following transformations:

    • Delete the following characters: \\ \" ' ^
    • Delete spaces before the following characters: / (
    • Replace the following characters with a space: , ;
    • Replace multiple spaces with one space
    • Convert uppercase letters (A-Z) to lowercase (a-z)

    COMPRESS_WHITE_SPACE

    Use this option to replace the following characters with a space character (decimal 32):

    • \\f, formfeed, decimal 12
    • \\t, tab, decimal 9
    • \\n, newline, decimal 10
    • \\r, carriage return, decimal 13
    • \\v, vertical tab, decimal 11
    • non-breaking space, decimal 160

    COMPRESS_WHITE_SPACE also replaces multiple spaces with one space.

    HTML_ENTITY_DECODE

    Use this option to replace HTML-encoded characters with unencoded characters. HTML_ENTITY_DECODE performs the following operations:

    • Replaces (ampersand)quot; with \"
    • Replaces (ampersand)nbsp; with a non-breaking space, decimal 160
    • Replaces (ampersand)lt; with a \"less than\" symbol
    • Replaces (ampersand)gt; with >
    • Replaces characters that are represented in hexadecimal format, (ampersand)#xhhhh;, with the corresponding characters
    • Replaces characters that are represented in decimal format, (ampersand)#nnnn;, with the corresponding characters

    LOWERCASE

    Use this option to convert uppercase letters (A-Z) to lowercase (a-z).

    URL_DECODE

    Use this option to decode a URL-encoded value.

    NONE

    Specify NONE if you don't want to perform any text transformations.

    ", + "SizeConstraint$TextTransformation": "

    Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass AWS WAF. If you specify a transformation, AWS WAF performs the transformation on FieldToMatch before inspecting a request for a match.

    Note that if you choose BODY for the value of Type, you must choose NONE for TextTransformation because CloudFront forwards only the first 8192 bytes for inspection.

    NONE

    Specify NONE if you don't want to perform any text transformations.

    CMD_LINE

    When you're concerned that attackers are injecting an operating system command line command and using unusual formatting to disguise some or all of the command, use this option to perform the following transformations:

    • Delete the following characters: \\ \" ' ^
    • Delete spaces before the following characters: / (
    • Replace the following characters with a space: , ;
    • Replace multiple spaces with one space
    • Convert uppercase letters (A-Z) to lowercase (a-z)

    COMPRESS_WHITE_SPACE

    Use this option to replace the following characters with a space character (decimal 32):

    • \\f, formfeed, decimal 12
    • \\t, tab, decimal 9
    • \\n, newline, decimal 10
    • \\r, carriage return, decimal 13
    • \\v, vertical tab, decimal 11
    • non-breaking space, decimal 160

    COMPRESS_WHITE_SPACE also replaces multiple spaces with one space.

    HTML_ENTITY_DECODE

    Use this option to replace HTML-encoded characters with unencoded characters. HTML_ENTITY_DECODE performs the following operations:

    • Replaces (ampersand)quot; with \"
    • Replaces (ampersand)nbsp; with a non-breaking space, decimal 160
    • Replaces (ampersand)lt; with a \"less than\" symbol
    • Replaces (ampersand)gt; with >
    • Replaces characters that are represented in hexadecimal format, (ampersand)#xhhhh;, with the corresponding characters
    • Replaces characters that are represented in decimal format, (ampersand)#nnnn;, with the corresponding characters

    LOWERCASE

    Use this option to convert uppercase letters (A-Z) to lowercase (a-z).

    URL_DECODE

    Use this option to decode a URL-encoded value.

    ", + "SqlInjectionMatchTuple$TextTransformation": "

    Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass AWS WAF. If you specify a transformation, AWS WAF performs the transformation on FieldToMatch before inspecting a request for a match.

    CMD_LINE

    When you're concerned that attackers are injecting an operating system commandline command and using unusual formatting to disguise some or all of the command, use this option to perform the following transformations:

    • Delete the following characters: \\ \" ' ^
    • Delete spaces before the following characters: / (
    • Replace the following characters with a space: , ;
    • Replace multiple spaces with one space
    • Convert uppercase letters (A-Z) to lowercase (a-z)

    COMPRESS_WHITE_SPACE

    Use this option to replace the following characters with a space character (decimal 32):

    • \\f, formfeed, decimal 12
    • \\t, tab, decimal 9
    • \\n, newline, decimal 10
    • \\r, carriage return, decimal 13
    • \\v, vertical tab, decimal 11
    • non-breaking space, decimal 160

    COMPRESS_WHITE_SPACE also replaces multiple spaces with one space.

    HTML_ENTITY_DECODE

    Use this option to replace HTML-encoded characters with unencoded characters. HTML_ENTITY_DECODE performs the following operations:

    • Replaces (ampersand)quot; with \"
    • Replaces (ampersand)nbsp; with a non-breaking space, decimal 160
    • Replaces (ampersand)lt; with a \"less than\" symbol
    • Replaces (ampersand)gt; with >
    • Replaces characters that are represented in hexadecimal format, (ampersand)#xhhhh;, with the corresponding characters
    • Replaces characters that are represented in decimal format, (ampersand)#nnnn;, with the corresponding characters

    LOWERCASE

    Use this option to convert uppercase letters (A-Z) to lowercase (a-z).

    URL_DECODE

    Use this option to decode a URL-encoded value.

    NONE

    Specify NONE if you don't want to perform any text transformations.

    ", + "XssMatchTuple$TextTransformation": "

    Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass AWS WAF. If you specify a transformation, AWS WAF performs the transformation on FieldToMatch before inspecting a request for a match.

    CMD_LINE

    When you're concerned that attackers are injecting an operating system commandline command and using unusual formatting to disguise some or all of the command, use this option to perform the following transformations:

    • Delete the following characters: \\ \" ' ^
    • Delete spaces before the following characters: / (
    • Replace the following characters with a space: , ;
    • Replace multiple spaces with one space
    • Convert uppercase letters (A-Z) to lowercase (a-z)

    COMPRESS_WHITE_SPACE

    Use this option to replace the following characters with a space character (decimal 32):

    • \\f, formfeed, decimal 12
    • \\t, tab, decimal 9
    • \\n, newline, decimal 10
    • \\r, carriage return, decimal 13
    • \\v, vertical tab, decimal 11
    • non-breaking space, decimal 160

    COMPRESS_WHITE_SPACE also replaces multiple spaces with one space.

    HTML_ENTITY_DECODE

    Use this option to replace HTML-encoded characters with unencoded characters. HTML_ENTITY_DECODE performs the following operations:

    • Replaces (ampersand)quot; with \"
    • Replaces (ampersand)nbsp; with a non-breaking space, decimal 160
    • Replaces (ampersand)lt; with a \"less than\" symbol
    • Replaces (ampersand)gt; with >
    • Replaces characters that are represented in hexadecimal format, (ampersand)#xhhhh;, with the corresponding characters
    • Replaces characters that are represented in decimal format, (ampersand)#nnnn;, with the corresponding characters

    LOWERCASE

    Use this option to convert uppercase letters (A-Z) to lowercase (a-z).

    URL_DECODE

    Use this option to decode a URL-encoded value.

    NONE

    Specify NONE if you don't want to perform any text transformations.

    " + } + }, + "TimeWindow": { + "base": "

    In a GetSampledRequests request, the StartTime and EndTime objects specify the time range for which you want AWS WAF to return a sample of web requests.

    In a GetSampledRequests response, the StartTime and EndTime objects specify the time range for which AWS WAF actually returned a sample of web requests. AWS WAF gets the specified number of requests from among the first 5,000 requests that your AWS resource receives during the specified time period. If your resource receives more than 5,000 requests during that period, AWS WAF stops sampling after the 5,000th request. In that case, EndTime is the time that AWS WAF received the 5,000th request.

    ", + "refs": { + "GetSampledRequestsRequest$TimeWindow": "

    The start date and time and the end date and time of the range for which you want GetSampledRequests to return a sample of requests. Specify the date and time in Unix time format (in seconds). You can specify any time range in the previous three hours.

    ", + "GetSampledRequestsResponse$TimeWindow": "

    Usually, TimeWindow is the time range that you specified in the GetSampledRequests request. However, if your AWS resource received more than 5,000 requests during the time range that you specified in the request, GetSampledRequests returns the time range for the first 5,000 requests.

    " + } + }, + "Timestamp": { + "base": null, + "refs": { + "SampledHTTPRequest$Timestamp": "

    The time at which AWS WAF received the request from your AWS resource, in Unix time format (in seconds).

    ", + "TimeWindow$StartTime": "

    The beginning of the time range from which you want GetSampledRequests to return a sample of the requests that your AWS resource received. You can specify any time range in the previous three hours.

    ", + "TimeWindow$EndTime": "

    The end of the time range from which you want GetSampledRequests to return a sample of the requests that your AWS resource received. You can specify any time range in the previous three hours.

    " + } + }, + "URIString": { + "base": null, + "refs": { + "HTTPRequest$URI": "

    The part of a web request that identifies the resource, for example, /images/daily-ad.jpg.

    " + } + }, + "UpdateByteMatchSetRequest": { + "base": null, + "refs": { + } + }, + "UpdateByteMatchSetResponse": { + "base": null, + "refs": { + } + }, + "UpdateIPSetRequest": { + "base": null, + "refs": { + } + }, + "UpdateIPSetResponse": { + "base": null, + "refs": { + } + }, + "UpdateRuleRequest": { + "base": null, + "refs": { + } + }, + "UpdateRuleResponse": { + "base": null, + "refs": { + } + }, + "UpdateSizeConstraintSetRequest": { + "base": null, + "refs": { + } + }, + "UpdateSizeConstraintSetResponse": { + "base": null, + "refs": { + } + }, + "UpdateSqlInjectionMatchSetRequest": { + "base": "

    A request to update a SqlInjectionMatchSet.

    ", + "refs": { + } + }, + "UpdateSqlInjectionMatchSetResponse": { + "base": "

    The response to an UpdateSqlInjectionMatchSets request.

    ", + "refs": { + } + }, + "UpdateWebACLRequest": { + "base": null, + "refs": { + } + }, + "UpdateWebACLResponse": { + "base": null, + "refs": { + } + }, + "UpdateXssMatchSetRequest": { + "base": "

    A request to update an XssMatchSet.

    ", + "refs": { + } + }, + "UpdateXssMatchSetResponse": { + "base": "

    The response to an UpdateXssMatchSets request.

    ", + "refs": { + } + }, + "WAFDisallowedNameException": { + "base": "

    The name specified is invalid.

    ", + "refs": { + } + }, + "WAFInternalErrorException": { + "base": "

    The operation failed because of a system problem, even though the request was valid. Retry your request.

    ", + "refs": { + } + }, + "WAFInvalidAccountException": { + "base": "

    The operation failed because you tried to create, update, or delete an object by using an invalid account identifier.

    ", + "refs": { + } + }, + "WAFInvalidOperationException": { + "base": "

    The operation failed because there was nothing to do. For example:

    • You tried to remove a Rule from a WebACL, but the Rule isn't in the specified WebACL.
    • You tried to remove an IP address from an IPSet, but the IP address isn't in the specified IPSet.
    • You tried to remove a ByteMatchTuple from a ByteMatchSet, but the ByteMatchTuple isn't in the specified WebACL.
    • You tried to add a Rule to a WebACL, but the Rule already exists in the specified WebACL.
    • You tried to add an IP address to an IPSet, but the IP address already exists in the specified IPSet.
    • You tried to add a ByteMatchTuple to a ByteMatchSet, but the ByteMatchTuple already exists in the specified WebACL.
    ", + "refs": { + } + }, + "WAFInvalidParameterException": { + "base": "

    The operation failed because AWS WAF didn't recognize a parameter in the request. For example:

    • You specified an invalid parameter name.
    • You specified an invalid value.
    • You tried to update an object (ByteMatchSet, IPSet, Rule, or WebACL) using an action other than INSERT or DELETE.
    • You tried to create a WebACL with a DefaultAction Type other than ALLOW, BLOCK, or COUNT.
    • You tried to update a WebACL with a WafAction Type other than ALLOW, BLOCK, or COUNT.
    • You tried to update a ByteMatchSet with a FieldToMatch Type other than HEADER, QUERY_STRING, or URI.
    • You tried to update a ByteMatchSet with a Field of HEADER but no value for Data.
    ", + "refs": { + } + }, + "WAFLimitsExceededException": { + "base": "

    The operation exceeds a resource limit, for example, the maximum number of WebACL objects that you can create for an AWS account. For more information, see Limits in the AWS WAF Developer Guide.

    ", + "refs": { + } + }, + "WAFNonEmptyEntityException": { + "base": "

    The operation failed because you tried to delete an object that isn't empty. For example:

    • You tried to delete a WebACL that still contains one or more Rule objects.
    • You tried to delete a Rule that still contains one or more ByteMatchSet objects or other predicates.
    • You tried to delete a ByteMatchSet that contains one or more ByteMatchTuple objects.
    • You tried to delete an IPSet that references one or more IP addresses.
    ", + "refs": { + } + }, + "WAFNonexistentContainerException": { + "base": "

    The operation failed because you tried to add an object to or delete an object from another object that doesn't exist. For example:

    • You tried to add a Rule to or delete a Rule from a WebACL that doesn't exist.
    • You tried to add a ByteMatchSet to or delete a ByteMatchSet from a Rule that doesn't exist.
    • You tried to add an IP address to or delete an IP address from an IPSet that doesn't exist.
    • You tried to add a ByteMatchTuple to or delete a ByteMatchTuple from a ByteMatchSet that doesn't exist.
    ", + "refs": { + } + }, + "WAFNonexistentItemException": { + "base": "

    The operation failed because the referenced object doesn't exist.

    ", + "refs": { + } + }, + "WAFReferencedItemException": { + "base": "

    The operation failed because you tried to delete an object that is still in use. For example:

    • You tried to delete a ByteMatchSet that is still referenced by a Rule.
    • You tried to delete a Rule that is still referenced by a WebACL.

    ", + "refs": { + } + }, + "WAFStaleDataException": { + "base": "

    The operation failed because you tried to create, update, or delete an object by using a change token that has already been used.

    ", + "refs": { + } + }, + "WafAction": { + "base": "

    For the action that is associated with a rule in a WebACL, specifies the action that you want AWS WAF to perform when a web request matches all of the conditions in a rule. For the default action in a WebACL, specifies the action that you want AWS WAF to take when a web request doesn't match all of the conditions in any of the rules in a WebACL.

    ", + "refs": { + "ActivatedRule$Action": "

    Specifies the action that CloudFront or AWS WAF takes when a web request matches the conditions in the Rule. Valid values for Action include the following:

    • ALLOW: CloudFront responds with the requested object.
    • BLOCK: CloudFront responds with an HTTP 403 (Forbidden) status code.
    • COUNT: AWS WAF increments a counter of requests that match the conditions in the rule and then continues to inspect the web request based on the remaining rules in the web ACL.
    ", + "CreateWebACLRequest$DefaultAction": "

    The action that you want AWS WAF to take when a request doesn't match the criteria specified in any of the Rule objects that are associated with the WebACL.

    ", + "UpdateWebACLRequest$DefaultAction": null, + "WebACL$DefaultAction": "

    The action to perform if none of the Rules contained in the WebACL match. The action is specified by the WafAction object.

    " + } + }, + "WafActionType": { + "base": null, + "refs": { + "WafAction$Type": "

    Specifies how you want AWS WAF to respond to requests that match the settings in a Rule. Valid settings include the following:

    • ALLOW: AWS WAF allows requests
    • BLOCK: AWS WAF blocks requests
    • COUNT: AWS WAF increments a counter of the requests that match all of the conditions in the rule. AWS WAF then continues to inspect the web request based on the remaining rules in the web ACL. You can't specify COUNT for the default action for a WebACL.
    " + } + }, + "WebACL": { + "base": "

    Contains the Rules that identify the requests that you want to allow, block, or count. In a WebACL, you also specify a default action (ALLOW or BLOCK), and the action for each Rule that you add to a WebACL, for example, block requests from specified IP addresses or block requests from specified referrers. You also associate the WebACL with a CloudFront distribution to identify the requests that you want AWS WAF to filter. If you add more than one Rule to a WebACL, a request needs to match only one of the specifications to be allowed, blocked, or counted. For more information, see UpdateWebACL.

    ", + "refs": { + "CreateWebACLResponse$WebACL": "

    The WebACL returned in the CreateWebACL response.

    ", + "GetWebACLResponse$WebACL": "

    Information about the WebACL that you specified in the GetWebACL request. For more information, see the following topics:

    • WebACL: Contains DefaultAction, MetricName, Name, an array of Rule objects, and WebACLId
    • DefaultAction (Data type is WafAction): Contains Type
    • Rules: Contains an array of ActivatedRule objects, which contain Action, Priority, and RuleId
    • Action: Contains Type
    " + } + }, + "WebACLSummaries": { + "base": null, + "refs": { + "ListWebACLsResponse$WebACLs": "

    An array of WebACLSummary objects.

    " + } + }, + "WebACLSummary": { + "base": "

    Contains the identifier and the name or description of the WebACL.

    ", + "refs": { + "WebACLSummaries$member": null + } + }, + "WebACLUpdate": { + "base": "

    Specifies whether to insert a Rule into or delete a Rule from a WebACL.

    ", + "refs": { + "WebACLUpdates$member": null + } + }, + "WebACLUpdates": { + "base": null, + "refs": { + "UpdateWebACLRequest$Updates": "

    An array of updates to make to the WebACL.

    An array of WebACLUpdate objects that you want to insert into or delete from a WebACL. For more information, see the applicable data types:

    " + } + }, + "XssMatchSet": { + "base": "

    A complex type that contains XssMatchTuple objects, which specify the parts of web requests that you want AWS WAF to inspect for cross-site scripting attacks and, if you want AWS WAF to inspect a header, the name of the header. If a XssMatchSet contains more than one XssMatchTuple object, a request needs to include cross-site scripting attacks in only one of the specified parts of the request to be considered a match.

    ", + "refs": { + "CreateXssMatchSetResponse$XssMatchSet": "

    An XssMatchSet.

    ", + "GetXssMatchSetResponse$XssMatchSet": "

    Information about the XssMatchSet that you specified in the GetXssMatchSet request. For more information, see the following topics:

    • XssMatchSet: Contains Name, XssMatchSetId, and an array of XssMatchTuple objects
    • XssMatchTuple: Each XssMatchTuple object contains FieldToMatch and TextTransformation
    • FieldToMatch: Contains Data and Type
    " + } + }, + "XssMatchSetSummaries": { + "base": null, + "refs": { + "ListXssMatchSetsResponse$XssMatchSets": "

    An array of XssMatchSetSummary objects.

    " + } + }, + "XssMatchSetSummary": { + "base": "

    The Id and Name of an XssMatchSet.

    ", + "refs": { + "XssMatchSetSummaries$member": null + } + }, + "XssMatchSetUpdate": { + "base": "

    Specifies the part of a web request that you want to inspect for cross-site scripting attacks and indicates whether you want to add the specification to an XssMatchSet or delete it from an XssMatchSet.

    ", + "refs": { + "XssMatchSetUpdates$member": null + } + }, + "XssMatchSetUpdates": { + "base": null, + "refs": { + "UpdateXssMatchSetRequest$Updates": "

    An array of XssMatchSetUpdate objects that you want to insert into or delete from a XssMatchSet. For more information, see the applicable data types:

    " + } + }, + "XssMatchTuple": { + "base": "

    Specifies the part of a web request that you want AWS WAF to inspect for cross-site scripting attacks and, if you want AWS WAF to inspect a header, the name of the header.

    ", + "refs": { + "XssMatchSetUpdate$XssMatchTuple": "

    Specifies the part of a web request that you want AWS WAF to inspect for cross-site scripting attacks and, if you want AWS WAF to inspect a header, the name of the header.

    ", + "XssMatchTuples$member": null + } + }, + "XssMatchTuples": { + "base": null, + "refs": { + "XssMatchSet$XssMatchTuples": "

    Specifies the parts of web requests that you want to inspect for cross-site scripting attacks.

    " + } + }, + "errorMessage": { + "base": null, + "refs": { + "WAFDisallowedNameException$message": null, + "WAFInternalErrorException$message": null, + "WAFInvalidOperationException$message": null, + "WAFLimitsExceededException$message": null, + "WAFNonEmptyEntityException$message": null, + "WAFNonexistentContainerException$message": null, + "WAFNonexistentItemException$message": null, + "WAFReferencedItemException$message": null, + "WAFStaleDataException$message": null + } + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/waf/2015-08-24/examples-1.json b/vendor/github.com/aws/aws-sdk-go/models/apis/waf/2015-08-24/examples-1.json new file mode 100644 index 000000000..0ea7e3b0b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/waf/2015-08-24/examples-1.json @@ -0,0 +1,5 @@ +{ + "version": "1.0", + "examples": { + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/workspaces/2015-04-08/api-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/workspaces/2015-04-08/api-2.json new file mode 100644 index 000000000..f9d661b1a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/workspaces/2015-04-08/api-2.json @@ -0,0 +1,632 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2015-04-08", + "endpointPrefix":"workspaces", + "jsonVersion":"1.1", + "protocol":"json", + "serviceFullName":"Amazon WorkSpaces", + "signatureVersion":"v4", + "targetPrefix":"WorkspacesService" + }, + "operations":{ + "CreateTags":{ + "name":"CreateTags", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateTagsRequest"}, + "output":{"shape":"CreateTagsResult"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidParameterValuesException"}, + {"shape":"ResourceLimitExceededException"} + ] + }, + "CreateWorkspaces":{ + "name":"CreateWorkspaces", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateWorkspacesRequest"}, + "output":{"shape":"CreateWorkspacesResult"}, + "errors":[ + {"shape":"ResourceLimitExceededException"}, + {"shape":"InvalidParameterValuesException"} + ] + }, + "DeleteTags":{ + "name":"DeleteTags", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteTagsRequest"}, + "output":{"shape":"DeleteTagsResult"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidParameterValuesException"} + ] + }, + "DescribeTags":{ + "name":"DescribeTags", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeTagsRequest"}, + "output":{"shape":"DescribeTagsResult"}, + "errors":[ + {"shape":"ResourceNotFoundException"} + ] + }, + "DescribeWorkspaceBundles":{ + "name":"DescribeWorkspaceBundles", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeWorkspaceBundlesRequest"}, + "output":{"shape":"DescribeWorkspaceBundlesResult"}, + "errors":[ + {"shape":"InvalidParameterValuesException"} + ] + }, + "DescribeWorkspaceDirectories":{ + "name":"DescribeWorkspaceDirectories", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeWorkspaceDirectoriesRequest"}, + "output":{"shape":"DescribeWorkspaceDirectoriesResult"}, + "errors":[ + {"shape":"InvalidParameterValuesException"} + ] + }, + "DescribeWorkspaces":{ + "name":"DescribeWorkspaces", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeWorkspacesRequest"}, + "output":{"shape":"DescribeWorkspacesResult"}, + "errors":[ + {"shape":"InvalidParameterValuesException"}, + {"shape":"ResourceUnavailableException"} + ] + }, + "RebootWorkspaces":{ + "name":"RebootWorkspaces", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RebootWorkspacesRequest"}, + "output":{"shape":"RebootWorkspacesResult"} + }, + "RebuildWorkspaces":{ + "name":"RebuildWorkspaces", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RebuildWorkspacesRequest"}, + "output":{"shape":"RebuildWorkspacesResult"} + }, + "TerminateWorkspaces":{ + "name":"TerminateWorkspaces", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"TerminateWorkspacesRequest"}, + "output":{"shape":"TerminateWorkspacesResult"} + } + }, + "shapes":{ + "ARN":{ + "type":"string", + "pattern":"^arn:aws:[A-Za-z0-9][A-za-z0-9_/.-]{0,62}:[A-za-z0-9_/.-]{0,63}:[A-za-z0-9_/.-]{0,63}:[A-Za-z0-9][A-za-z0-9_/.-]{0,127}$" + }, + "Alias":{"type":"string"}, + "BooleanObject":{"type":"boolean"}, + "BundleId":{ + "type":"string", + "pattern":"^wsb-[0-9a-z]{8,63}$" + }, + "BundleIdList":{ + "type":"list", + "member":{"shape":"BundleId"}, + "max":25, + "min":1 + }, + "BundleList":{ + "type":"list", + "member":{"shape":"WorkspaceBundle"} + }, + "BundleOwner":{"type":"string"}, + "Compute":{ + "type":"string", + "enum":[ + "VALUE", + "STANDARD", + "PERFORMANCE" + ] + }, + "ComputeType":{ + "type":"structure", + "members":{ + "Name":{"shape":"Compute"} + } + }, + "ComputerName":{"type":"string"}, + "CreateTagsRequest":{ + "type":"structure", + "required":[ + "ResourceId", + "Tags" + ], + "members":{ + "ResourceId":{"shape":"NonEmptyString"}, + "Tags":{"shape":"TagList"} + } + }, + "CreateTagsResult":{ + "type":"structure", + "members":{ + } + }, + "CreateWorkspacesRequest":{ + "type":"structure", + "required":["Workspaces"], + "members":{ + "Workspaces":{"shape":"WorkspaceRequestList"} + } + }, + "CreateWorkspacesResult":{ + "type":"structure", + "members":{ + "FailedRequests":{"shape":"FailedCreateWorkspaceRequests"}, + "PendingRequests":{"shape":"WorkspaceList"} + } + }, + "DefaultOu":{"type":"string"}, + "DefaultWorkspaceCreationProperties":{ + "type":"structure", + "members":{ + "EnableWorkDocs":{"shape":"BooleanObject"}, + "EnableInternetAccess":{"shape":"BooleanObject"}, + "DefaultOu":{"shape":"DefaultOu"}, + "CustomSecurityGroupId":{"shape":"SecurityGroupId"}, + "UserEnabledAsLocalAdministrator":{"shape":"BooleanObject"} + } + }, + "DeleteTagsRequest":{ + "type":"structure", + "required":[ + "ResourceId", + "TagKeys" + ], + "members":{ + "ResourceId":{"shape":"NonEmptyString"}, + "TagKeys":{"shape":"TagKeyList"} + } + }, + "DeleteTagsResult":{ + "type":"structure", + "members":{ + } + }, + "DescribeTagsRequest":{ + "type":"structure", + "required":["ResourceId"], + "members":{ + "ResourceId":{"shape":"NonEmptyString"} + } + }, + "DescribeTagsResult":{ + "type":"structure", + "members":{ + "TagList":{"shape":"TagList"} + } + }, + "DescribeWorkspaceBundlesRequest":{ + "type":"structure", + "members":{ + "BundleIds":{"shape":"BundleIdList"}, + "Owner":{"shape":"BundleOwner"}, + "NextToken":{"shape":"PaginationToken"} + } + }, + "DescribeWorkspaceBundlesResult":{ + "type":"structure", + "members":{ + "Bundles":{"shape":"BundleList"}, + "NextToken":{"shape":"PaginationToken"} + } + }, + "DescribeWorkspaceDirectoriesRequest":{ + "type":"structure", + "members":{ + "DirectoryIds":{"shape":"DirectoryIdList"}, + "NextToken":{"shape":"PaginationToken"} + } + }, + "DescribeWorkspaceDirectoriesResult":{ + "type":"structure", + "members":{ + "Directories":{"shape":"DirectoryList"}, + "NextToken":{"shape":"PaginationToken"} + } + }, + "DescribeWorkspacesRequest":{ + "type":"structure", + "members":{ + "WorkspaceIds":{"shape":"WorkspaceIdList"}, + "DirectoryId":{"shape":"DirectoryId"}, + "UserName":{"shape":"UserName"}, + "BundleId":{"shape":"BundleId"}, + "Limit":{"shape":"Limit"}, + "NextToken":{"shape":"PaginationToken"} + } + }, + "DescribeWorkspacesResult":{ + "type":"structure", + "members":{ + "Workspaces":{"shape":"WorkspaceList"}, + "NextToken":{"shape":"PaginationToken"} + } + }, + "Description":{"type":"string"}, + "DirectoryId":{ + "type":"string", + "pattern":"^d-[0-9a-f]{8,63}$" + }, + "DirectoryIdList":{ + "type":"list", + "member":{"shape":"DirectoryId"}, + "max":25, + "min":1 + }, + "DirectoryList":{ + "type":"list", + "member":{"shape":"WorkspaceDirectory"} + }, + "DirectoryName":{"type":"string"}, + "DnsIpAddresses":{ + "type":"list", + "member":{"shape":"IpAddress"} + }, + "ErrorType":{"type":"string"}, + "ExceptionMessage":{"type":"string"}, + "FailedCreateWorkspaceRequest":{ + "type":"structure", + "members":{ + "WorkspaceRequest":{"shape":"WorkspaceRequest"}, + "ErrorCode":{"shape":"ErrorType"}, + "ErrorMessage":{"shape":"Description"} + } + }, + "FailedCreateWorkspaceRequests":{ + "type":"list", + "member":{"shape":"FailedCreateWorkspaceRequest"} + }, + "FailedRebootWorkspaceRequests":{ + "type":"list", + "member":{"shape":"FailedWorkspaceChangeRequest"} + }, + "FailedRebuildWorkspaceRequests":{ + "type":"list", + "member":{"shape":"FailedWorkspaceChangeRequest"} + }, + "FailedTerminateWorkspaceRequests":{ + "type":"list", + "member":{"shape":"FailedWorkspaceChangeRequest"} + }, + "FailedWorkspaceChangeRequest":{ + "type":"structure", + "members":{ + "WorkspaceId":{"shape":"WorkspaceId"}, + "ErrorCode":{"shape":"ErrorType"}, + "ErrorMessage":{"shape":"Description"} + } + }, + "InvalidParameterValuesException":{ + "type":"structure", + "members":{ + "message":{"shape":"ExceptionMessage"} + }, + "exception":true + }, + "IpAddress":{"type":"string"}, + "Limit":{ + "type":"integer", + "max":25, + "min":1 + }, + "NonEmptyString":{ + "type":"string", + "min":1 + }, + "PaginationToken":{ + "type":"string", + "max":63, + "min":1 + }, + "RebootRequest":{ + "type":"structure", + "required":["WorkspaceId"], + "members":{ + "WorkspaceId":{"shape":"WorkspaceId"} + } + }, + "RebootWorkspaceRequests":{ + "type":"list", + "member":{"shape":"RebootRequest"}, + "max":25, + "min":1 + }, + "RebootWorkspacesRequest":{ + "type":"structure", + "required":["RebootWorkspaceRequests"], + "members":{ + "RebootWorkspaceRequests":{"shape":"RebootWorkspaceRequests"} + } + }, + "RebootWorkspacesResult":{ + "type":"structure", + "members":{ + "FailedRequests":{"shape":"FailedRebootWorkspaceRequests"} + } + }, + "RebuildRequest":{ + "type":"structure", + "required":["WorkspaceId"], + "members":{ + "WorkspaceId":{"shape":"WorkspaceId"} + } + }, + "RebuildWorkspaceRequests":{ + "type":"list", + "member":{"shape":"RebuildRequest"}, + "max":1, + "min":1 + }, + "RebuildWorkspacesRequest":{ + "type":"structure", + "required":["RebuildWorkspaceRequests"], + "members":{ + "RebuildWorkspaceRequests":{"shape":"RebuildWorkspaceRequests"} + } + }, + "RebuildWorkspacesResult":{ + "type":"structure", + "members":{ + "FailedRequests":{"shape":"FailedRebuildWorkspaceRequests"} + } + }, + "RegistrationCode":{ + "type":"string", + "max":20, + "min":1 + }, + "ResourceLimitExceededException":{ + "type":"structure", + "members":{ + "message":{"shape":"ExceptionMessage"} + }, + "exception":true + }, + "ResourceNotFoundException":{ + "type":"structure", + "members":{ + "message":{"shape":"ExceptionMessage"}, + "ResourceId":{"shape":"NonEmptyString"} + }, + "exception":true + }, + "ResourceUnavailableException":{ + "type":"structure", + "members":{ + "message":{"shape":"ExceptionMessage"}, + "ResourceId":{"shape":"NonEmptyString"} + }, + "exception":true + }, + "SecurityGroupId":{ + "type":"string", + "pattern":"^(sg-[0-9a-f]{8})$" + }, + "SubnetId":{ + "type":"string", + "pattern":"^(subnet-[0-9a-f]{8})$" + }, + "SubnetIds":{ + "type":"list", + "member":{"shape":"SubnetId"} + }, + "Tag":{ + "type":"structure", + "required":["Key"], + "members":{ + "Key":{"shape":"TagKey"}, + "Value":{"shape":"TagValue"} + } + }, + "TagKey":{ + "type":"string", + "max":127, + "min":1 + }, + "TagKeyList":{ + "type":"list", + "member":{"shape":"NonEmptyString"} + }, + "TagList":{ + "type":"list", + "member":{"shape":"Tag"} + }, + "TagValue":{ + "type":"string", + "max":255 + }, + "TerminateRequest":{ + "type":"structure", + "required":["WorkspaceId"], + "members":{ + "WorkspaceId":{"shape":"WorkspaceId"} + } + }, + "TerminateWorkspaceRequests":{ + "type":"list", + "member":{"shape":"TerminateRequest"}, + "max":25, + "min":1 + }, + "TerminateWorkspacesRequest":{ + "type":"structure", + "required":["TerminateWorkspaceRequests"], + "members":{ + "TerminateWorkspaceRequests":{"shape":"TerminateWorkspaceRequests"} + } + }, + "TerminateWorkspacesResult":{ + "type":"structure", + "members":{ + "FailedRequests":{"shape":"FailedTerminateWorkspaceRequests"} + } + }, + "UserName":{ + "type":"string", + "max":63, + "min":1 + }, + "UserStorage":{ + "type":"structure", + "members":{ + "Capacity":{"shape":"NonEmptyString"} + } + }, + "VolumeEncryptionKey":{"type":"string"}, + "Workspace":{ + "type":"structure", + "members":{ + "WorkspaceId":{"shape":"WorkspaceId"}, + "DirectoryId":{"shape":"DirectoryId"}, + "UserName":{"shape":"UserName"}, + "IpAddress":{"shape":"IpAddress"}, + "State":{"shape":"WorkspaceState"}, + "BundleId":{"shape":"BundleId"}, + "SubnetId":{"shape":"SubnetId"}, + "ErrorMessage":{"shape":"Description"}, + "ErrorCode":{"shape":"WorkspaceErrorCode"}, + "ComputerName":{"shape":"ComputerName"}, + "VolumeEncryptionKey":{"shape":"VolumeEncryptionKey"}, + "UserVolumeEncryptionEnabled":{"shape":"BooleanObject"}, + "RootVolumeEncryptionEnabled":{"shape":"BooleanObject"} + } + }, + "WorkspaceBundle":{ + "type":"structure", + "members":{ + "BundleId":{"shape":"BundleId"}, + "Name":{"shape":"NonEmptyString"}, + "Owner":{"shape":"BundleOwner"}, + "Description":{"shape":"Description"}, + "UserStorage":{"shape":"UserStorage"}, + "ComputeType":{"shape":"ComputeType"} + } + }, + "WorkspaceDirectory":{ + "type":"structure", + "members":{ + "DirectoryId":{"shape":"DirectoryId"}, + "Alias":{"shape":"Alias"}, + "DirectoryName":{"shape":"DirectoryName"}, + "RegistrationCode":{"shape":"RegistrationCode"}, + "SubnetIds":{"shape":"SubnetIds"}, + "DnsIpAddresses":{"shape":"DnsIpAddresses"}, + "CustomerUserName":{"shape":"UserName"}, + "IamRoleId":{"shape":"ARN"}, + "DirectoryType":{"shape":"WorkspaceDirectoryType"}, + "WorkspaceSecurityGroupId":{"shape":"SecurityGroupId"}, + "State":{"shape":"WorkspaceDirectoryState"}, + "WorkspaceCreationProperties":{"shape":"DefaultWorkspaceCreationProperties"} + } + }, + "WorkspaceDirectoryState":{ + "type":"string", + "enum":[ + "REGISTERING", + "REGISTERED", + "DEREGISTERING", + "DEREGISTERED", + "ERROR" + ] + }, + "WorkspaceDirectoryType":{ + "type":"string", + "enum":[ + "SIMPLE_AD", + "AD_CONNECTOR" + ] + }, + "WorkspaceErrorCode":{"type":"string"}, + "WorkspaceId":{ + "type":"string", + "pattern":"^ws-[0-9a-z]{8,63}$" + }, + "WorkspaceIdList":{ + "type":"list", + "member":{"shape":"WorkspaceId"}, + "max":25, + "min":1 + }, + "WorkspaceList":{ + "type":"list", + "member":{"shape":"Workspace"} + }, + "WorkspaceRequest":{ + "type":"structure", + "required":[ + "DirectoryId", + "UserName", + "BundleId" + ], + "members":{ + "DirectoryId":{"shape":"DirectoryId"}, + "UserName":{"shape":"UserName"}, + "BundleId":{"shape":"BundleId"}, + "VolumeEncryptionKey":{"shape":"VolumeEncryptionKey"}, + "UserVolumeEncryptionEnabled":{"shape":"BooleanObject"}, + "RootVolumeEncryptionEnabled":{"shape":"BooleanObject"}, + "Tags":{"shape":"TagList"} + } + }, + "WorkspaceRequestList":{ + "type":"list", + "member":{"shape":"WorkspaceRequest"}, + "max":25, + "min":1 + }, + "WorkspaceState":{ + "type":"string", + "enum":[ + "PENDING", + "AVAILABLE", + "IMPAIRED", + "UNHEALTHY", + "REBOOTING", + "REBUILDING", + "TERMINATING", + "TERMINATED", + "SUSPENDED", + "ERROR" + ] + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/workspaces/2015-04-08/docs-2.json b/vendor/github.com/aws/aws-sdk-go/models/apis/workspaces/2015-04-08/docs-2.json new file mode 100644 index 000000000..a48b752c5 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/workspaces/2015-04-08/docs-2.json @@ -0,0 +1,550 @@ +{ + "version": "2.0", + "service": "Amazon WorkSpaces Service

    This is the Amazon WorkSpaces API Reference. This guide provides detailed information about Amazon WorkSpaces operations, data types, parameters, and errors.

    ", + "operations": { + "CreateTags": "

    Creates tags for a WorkSpace.

    ", + "CreateWorkspaces": "

    Creates one or more WorkSpaces.

    This operation is asynchronous and returns before the WorkSpaces are created.

    ", + "DeleteTags": "

    Deletes tags from a WorkSpace.

    ", + "DescribeTags": "

    Describes tags for a WorkSpace.

    ", + "DescribeWorkspaceBundles": "

    Obtains information about the WorkSpace bundles that are available to your account in the specified region.

    You can filter the results with either the BundleIds parameter, or the Owner parameter, but not both.

    This operation supports pagination with the use of the NextToken request and response parameters. If more results are available, the NextToken response member contains a token that you pass in the next call to this operation to retrieve the next set of items.

    ", + "DescribeWorkspaceDirectories": "

    Retrieves information about the AWS Directory Service directories in the region that are registered with Amazon WorkSpaces and are available to your account.

    This operation supports pagination with the use of the NextToken request and response parameters. If more results are available, the NextToken response member contains a token that you pass in the next call to this operation to retrieve the next set of items.

    ", + "DescribeWorkspaces": "

    Obtains information about the specified WorkSpaces.

    Only one of the filter parameters, such as BundleId, DirectoryId, or WorkspaceIds, can be specified at a time.

    This operation supports pagination with the use of the NextToken request and response parameters. If more results are available, the NextToken response member contains a token that you pass in the next call to this operation to retrieve the next set of items.

    ", + "RebootWorkspaces": "

    Reboots the specified WorkSpaces.

    To be able to reboot a WorkSpace, the WorkSpace must have a State of AVAILABLE, IMPAIRED, or INOPERABLE.

    This operation is asynchronous and will return before the WorkSpaces have rebooted.

    ", + "RebuildWorkspaces": "

    Rebuilds the specified WorkSpaces.

    Rebuilding a WorkSpace is a potentially destructive action that can result in the loss of data. Rebuilding a WorkSpace causes the following to occur:

    • The system is restored to the image of the bundle that the WorkSpace is created from. Any applications that have been installed, or system settings that have been made since the WorkSpace was created will be lost.
    • The data drive (D drive) is re-created from the last automatic snapshot taken of the data drive. The current contents of the data drive are overwritten. Automatic snapshots of the data drive are taken every 12 hours, so the snapshot can be as much as 12 hours old.

    To be able to rebuild a WorkSpace, the WorkSpace must have a State of AVAILABLE or ERROR.

    This operation is asynchronous and will return before the WorkSpaces have been completely rebuilt.

    ", + "TerminateWorkspaces": "

    Terminates the specified WorkSpaces.

    Terminating a WorkSpace is a permanent action and cannot be undone. The user's data is not maintained and will be destroyed. If you need to archive any user data, contact Amazon Web Services before terminating the WorkSpace.

    You can terminate a WorkSpace that is in any state except SUSPENDED.

    This operation is asynchronous and will return before the WorkSpaces have been completely terminated.

    " + }, + "shapes": { + "ARN": { + "base": null, + "refs": { + "WorkspaceDirectory$IamRoleId": "

    The identifier of the IAM role. This is the role that allows Amazon WorkSpaces to make calls to other services, such as Amazon EC2, on your behalf.

    " + } + }, + "Alias": { + "base": null, + "refs": { + "WorkspaceDirectory$Alias": "

    The directory alias.

    " + } + }, + "BooleanObject": { + "base": null, + "refs": { + "DefaultWorkspaceCreationProperties$EnableWorkDocs": "

    Specifies if the directory is enabled for Amazon WorkDocs.

    ", + "DefaultWorkspaceCreationProperties$EnableInternetAccess": "

    A public IP address will be attached to all WorkSpaces that are created or rebuilt.

    ", + "DefaultWorkspaceCreationProperties$UserEnabledAsLocalAdministrator": "

    The WorkSpace user is an administrator on the WorkSpace.

    ", + "Workspace$UserVolumeEncryptionEnabled": "

    Specifies whether the data stored on the user volume, or D: drive, is encrypted.

    ", + "Workspace$RootVolumeEncryptionEnabled": "

    Specifies whether the data stored on the root volume, or C: drive, is encrypted.

    ", + "WorkspaceRequest$UserVolumeEncryptionEnabled": "

    Specifies whether the data stored on the user volume, or D: drive, is encrypted.

    ", + "WorkspaceRequest$RootVolumeEncryptionEnabled": "

    Specifies whether the data stored on the root volume, or C: drive, is encrypted.

    " + } + }, + "BundleId": { + "base": null, + "refs": { + "BundleIdList$member": null, + "DescribeWorkspacesRequest$BundleId": "

    The identifier of a bundle to obtain the WorkSpaces for. All WorkSpaces that are created from this bundle will be retrieved. This parameter cannot be combined with any other filter parameter.

    ", + "Workspace$BundleId": "

    The identifier of the bundle that the WorkSpace was created from.

    ", + "WorkspaceBundle$BundleId": "

    The bundle identifier.

    ", + "WorkspaceRequest$BundleId": "

    The identifier of the bundle to create the WorkSpace from. You can use the DescribeWorkspaceBundles operation to obtain a list of the bundles that are available.

    " + } + }, + "BundleIdList": { + "base": null, + "refs": { + "DescribeWorkspaceBundlesRequest$BundleIds": "

    An array of strings that contains the identifiers of the bundles to retrieve. This parameter cannot be combined with any other filter parameter.

    " + } + }, + "BundleList": { + "base": null, + "refs": { + "DescribeWorkspaceBundlesResult$Bundles": "

    An array of structures that contain information about the bundles.

    " + } + }, + "BundleOwner": { + "base": null, + "refs": { + "DescribeWorkspaceBundlesRequest$Owner": "

    The owner of the bundles to retrieve. This parameter cannot be combined with any other filter parameter.

    This contains one of the following values:

    • null - Retrieves the bundles that belong to the account making the call.
    • AMAZON - Retrieves the bundles that are provided by AWS.
    ", + "WorkspaceBundle$Owner": "

    The owner of the bundle. This contains the owner's account identifier, or AMAZON if the bundle is provided by AWS.

    " + } + }, + "Compute": { + "base": null, + "refs": { + "ComputeType$Name": "

    The name of the compute type for the bundle.

    " + } + }, + "ComputeType": { + "base": "

    Contains information about the compute type of a WorkSpace bundle.

    ", + "refs": { + "WorkspaceBundle$ComputeType": "

    A ComputeType object that specifies the compute type for the bundle.

    " + } + }, + "ComputerName": { + "base": null, + "refs": { + "Workspace$ComputerName": "

    The name of the WorkSpace as seen by the operating system.

    " + } + }, + "CreateTagsRequest": { + "base": "

    The request of the create tags action.

    ", + "refs": { + } + }, + "CreateTagsResult": { + "base": "

    The result of the create tags action.

    ", + "refs": { + } + }, + "CreateWorkspacesRequest": { + "base": "

    Contains the inputs for the CreateWorkspaces operation.

    ", + "refs": { + } + }, + "CreateWorkspacesResult": { + "base": "

    Contains the result of the CreateWorkspaces operation.

    ", + "refs": { + } + }, + "DefaultOu": { + "base": null, + "refs": { + "DefaultWorkspaceCreationProperties$DefaultOu": "

    The organizational unit (OU) in the directory that the WorkSpace machine accounts are placed in.

    " + } + }, + "DefaultWorkspaceCreationProperties": { + "base": "

    Contains default WorkSpace creation information.

    ", + "refs": { + "WorkspaceDirectory$WorkspaceCreationProperties": "

    A structure that specifies the default creation properties for all WorkSpaces in the directory.

    " + } + }, + "DeleteTagsRequest": { + "base": "

    The request of the delete tags action.

    ", + "refs": { + } + }, + "DeleteTagsResult": { + "base": "

    The result of the delete tags action.

    ", + "refs": { + } + }, + "DescribeTagsRequest": { + "base": "

    The request of the describe tags action.

    ", + "refs": { + } + }, + "DescribeTagsResult": { + "base": "

    The result of the describe tags action.

    ", + "refs": { + } + }, + "DescribeWorkspaceBundlesRequest": { + "base": "

    Contains the inputs for the DescribeWorkspaceBundles operation.

    ", + "refs": { + } + }, + "DescribeWorkspaceBundlesResult": { + "base": "

    Contains the results of the DescribeWorkspaceBundles operation.

    ", + "refs": { + } + }, + "DescribeWorkspaceDirectoriesRequest": { + "base": "

    Contains the inputs for the DescribeWorkspaceDirectories operation.

    ", + "refs": { + } + }, + "DescribeWorkspaceDirectoriesResult": { + "base": "

    Contains the results of the DescribeWorkspaceDirectories operation.

    ", + "refs": { + } + }, + "DescribeWorkspacesRequest": { + "base": "

    Contains the inputs for the DescribeWorkspaces operation.

    ", + "refs": { + } + }, + "DescribeWorkspacesResult": { + "base": "

    Contains the results for the DescribeWorkspaces operation.

    ", + "refs": { + } + }, + "Description": { + "base": null, + "refs": { + "FailedCreateWorkspaceRequest$ErrorMessage": "

    The textual error message.

    ", + "FailedWorkspaceChangeRequest$ErrorMessage": "

    The textual error message.

    ", + "Workspace$ErrorMessage": "

    If the WorkSpace could not be created, this contains a textual error message that describes the failure.

    ", + "WorkspaceBundle$Description": "

    The bundle description.

    " + } + }, + "DirectoryId": { + "base": null, + "refs": { + "DescribeWorkspacesRequest$DirectoryId": "

    Specifies the directory identifier to which to limit the WorkSpaces. Optionally, you can specify a specific directory user with the UserName parameter. This parameter cannot be combined with any other filter parameter.

    ", + "DirectoryIdList$member": null, + "Workspace$DirectoryId": "

    The identifier of the AWS Directory Service directory that the WorkSpace belongs to.

    ", + "WorkspaceDirectory$DirectoryId": "

    The directory identifier.

    ", + "WorkspaceRequest$DirectoryId": "

    The identifier of the AWS Directory Service directory to create the WorkSpace in. You can use the DescribeWorkspaceDirectories operation to obtain a list of the directories that are available.

    " + } + }, + "DirectoryIdList": { + "base": null, + "refs": { + "DescribeWorkspaceDirectoriesRequest$DirectoryIds": "

    An array of strings that contains the directory identifiers to retrieve information for. If this member is null, all directories are retrieved.

    " + } + }, + "DirectoryList": { + "base": null, + "refs": { + "DescribeWorkspaceDirectoriesResult$Directories": "

    An array of structures that contain information about the directories.

    " + } + }, + "DirectoryName": { + "base": null, + "refs": { + "WorkspaceDirectory$DirectoryName": "

    The name of the directory.

    " + } + }, + "DnsIpAddresses": { + "base": null, + "refs": { + "WorkspaceDirectory$DnsIpAddresses": "

    An array of strings that contains the IP addresses of the DNS servers for the directory.

    " + } + }, + "ErrorType": { + "base": null, + "refs": { + "FailedCreateWorkspaceRequest$ErrorCode": "

    The error code.

    ", + "FailedWorkspaceChangeRequest$ErrorCode": "

    The error code.

    " + } + }, + "ExceptionMessage": { + "base": null, + "refs": { + "InvalidParameterValuesException$message": "

    The exception error message.

    ", + "ResourceLimitExceededException$message": "

    The exception error message.

    ", + "ResourceNotFoundException$message": "

    The resource could not be found.

    ", + "ResourceUnavailableException$message": "

    The exception error message.

    " + } + }, + "FailedCreateWorkspaceRequest": { + "base": "

    Contains information about a WorkSpace that could not be created.

    ", + "refs": { + "FailedCreateWorkspaceRequests$member": null + } + }, + "FailedCreateWorkspaceRequests": { + "base": null, + "refs": { + "CreateWorkspacesResult$FailedRequests": "

    An array of structures that represent the WorkSpaces that could not be created.

    " + } + }, + "FailedRebootWorkspaceRequests": { + "base": null, + "refs": { + "RebootWorkspacesResult$FailedRequests": "

    An array of structures that represent any WorkSpaces that could not be rebooted.

    " + } + }, + "FailedRebuildWorkspaceRequests": { + "base": null, + "refs": { + "RebuildWorkspacesResult$FailedRequests": "

    An array of structures that represent any WorkSpaces that could not be rebuilt.

    " + } + }, + "FailedTerminateWorkspaceRequests": { + "base": null, + "refs": { + "TerminateWorkspacesResult$FailedRequests": "

    An array of structures that represent any WorkSpaces that could not be terminated.

    " + } + }, + "FailedWorkspaceChangeRequest": { + "base": "

    Contains information about a WorkSpace that could not be rebooted (RebootWorkspaces), rebuilt (RebuildWorkspaces), or terminated (TerminateWorkspaces).

    ", + "refs": { + "FailedRebootWorkspaceRequests$member": null, + "FailedRebuildWorkspaceRequests$member": null, + "FailedTerminateWorkspaceRequests$member": null + } + }, + "InvalidParameterValuesException": { + "base": "

    One or more parameter values are not valid.

    ", + "refs": { + } + }, + "IpAddress": { + "base": null, + "refs": { + "DnsIpAddresses$member": null, + "Workspace$IpAddress": "

    The IP address of the WorkSpace.

    " + } + }, + "Limit": { + "base": null, + "refs": { + "DescribeWorkspacesRequest$Limit": "

    The maximum number of items to return.

    " + } + }, + "NonEmptyString": { + "base": null, + "refs": { + "CreateTagsRequest$ResourceId": "

    The resource ID of the request.

    ", + "DeleteTagsRequest$ResourceId": "

    The resource ID of the request.

    ", + "DescribeTagsRequest$ResourceId": "

    The resource ID of the request.

    ", + "ResourceNotFoundException$ResourceId": "

    The resource could not be found.

    ", + "ResourceUnavailableException$ResourceId": "

    The identifier of the resource that is not available.

    ", + "TagKeyList$member": null, + "UserStorage$Capacity": "

    The amount of user storage for the bundle.

    ", + "WorkspaceBundle$Name": "

    The name of the bundle.

    " + } + }, + "PaginationToken": { + "base": null, + "refs": { + "DescribeWorkspaceBundlesRequest$NextToken": "

    The NextToken value from a previous call to this operation. Pass null if this is the first call.

    ", + "DescribeWorkspaceBundlesResult$NextToken": "

    If not null, more results are available. Pass this value for the NextToken parameter in a subsequent call to this operation to retrieve the next set of items. This token is valid for one day and must be used within that timeframe.

    ", + "DescribeWorkspaceDirectoriesRequest$NextToken": "

    The NextToken value from a previous call to this operation. Pass null if this is the first call.

    ", + "DescribeWorkspaceDirectoriesResult$NextToken": "

    If not null, more results are available. Pass this value for the NextToken parameter in a subsequent call to this operation to retrieve the next set of items. This token is valid for one day and must be used within that timeframe.

    ", + "DescribeWorkspacesRequest$NextToken": "

    The NextToken value from a previous call to this operation. Pass null if this is the first call.

    ", + "DescribeWorkspacesResult$NextToken": "

    If not null, more results are available. Pass this value for the NextToken parameter in a subsequent call to this operation to retrieve the next set of items. This token is valid for one day and must be used within that timeframe.

    " + } + }, + "RebootRequest": { + "base": "

    Contains information used with the RebootWorkspaces operation to reboot a WorkSpace.

    ", + "refs": { + "RebootWorkspaceRequests$member": null + } + }, + "RebootWorkspaceRequests": { + "base": null, + "refs": { + "RebootWorkspacesRequest$RebootWorkspaceRequests": "

    An array of structures that specify the WorkSpaces to reboot.

    " + } + }, + "RebootWorkspacesRequest": { + "base": "

    Contains the inputs for the RebootWorkspaces operation.

    ", + "refs": { + } + }, + "RebootWorkspacesResult": { + "base": "

    Contains the results of the RebootWorkspaces operation.

    ", + "refs": { + } + }, + "RebuildRequest": { + "base": "

    Contains information used with the RebuildWorkspaces operation to rebuild a WorkSpace.

    ", + "refs": { + "RebuildWorkspaceRequests$member": null + } + }, + "RebuildWorkspaceRequests": { + "base": null, + "refs": { + "RebuildWorkspacesRequest$RebuildWorkspaceRequests": "

    An array of structures that specify the WorkSpaces to rebuild.

    " + } + }, + "RebuildWorkspacesRequest": { + "base": "

    Contains the inputs for the RebuildWorkspaces operation.

    ", + "refs": { + } + }, + "RebuildWorkspacesResult": { + "base": "

    Contains the results of the RebuildWorkspaces operation.

    ", + "refs": { + } + }, + "RegistrationCode": { + "base": null, + "refs": { + "WorkspaceDirectory$RegistrationCode": "

    The registration code for the directory. This is the code that users enter in their Amazon WorkSpaces client application to connect to the directory.

    " + } + }, + "ResourceLimitExceededException": { + "base": "

    Your resource limits have been exceeded.

    ", + "refs": { + } + }, + "ResourceNotFoundException": { + "base": "

    The resource could not be found.

    ", + "refs": { + } + }, + "ResourceUnavailableException": { + "base": "

    The specified resource is not available.

    ", + "refs": { + } + }, + "SecurityGroupId": { + "base": null, + "refs": { + "DefaultWorkspaceCreationProperties$CustomSecurityGroupId": "

    The identifier of any custom security groups that are applied to the WorkSpaces when they are created.

    ", + "WorkspaceDirectory$WorkspaceSecurityGroupId": "

    The identifier of the security group that is assigned to new WorkSpaces.

    " + } + }, + "SubnetId": { + "base": null, + "refs": { + "SubnetIds$member": null, + "Workspace$SubnetId": "

    The identifier of the subnet that the WorkSpace is in.

    " + } + }, + "SubnetIds": { + "base": null, + "refs": { + "WorkspaceDirectory$SubnetIds": "

    An array of strings that contains the identifiers of the subnets used with the directory.

    " + } + }, + "Tag": { + "base": "

    Describes the tag of the WorkSpace.

    ", + "refs": { + "TagList$member": null + } + }, + "TagKey": { + "base": null, + "refs": { + "Tag$Key": "

    The key of the tag.

    " + } + }, + "TagKeyList": { + "base": null, + "refs": { + "DeleteTagsRequest$TagKeys": "

    The tag keys of the request.

    " + } + }, + "TagList": { + "base": null, + "refs": { + "CreateTagsRequest$Tags": "

    The tags of the request.

    ", + "DescribeTagsResult$TagList": "

    The list of tags.

    ", + "WorkspaceRequest$Tags": "

    The tags of the WorkSpace request.

    " + } + }, + "TagValue": { + "base": null, + "refs": { + "Tag$Value": "

    The value of the tag.

    " + } + }, + "TerminateRequest": { + "base": "

    Contains information used with the TerminateWorkspaces operation to terminate a WorkSpace.

    ", + "refs": { + "TerminateWorkspaceRequests$member": null + } + }, + "TerminateWorkspaceRequests": { + "base": null, + "refs": { + "TerminateWorkspacesRequest$TerminateWorkspaceRequests": "

    An array of structures that specify the WorkSpaces to terminate.

    " + } + }, + "TerminateWorkspacesRequest": { + "base": "

    Contains the inputs for the TerminateWorkspaces operation.

    ", + "refs": { + } + }, + "TerminateWorkspacesResult": { + "base": "

    Contains the results of the TerminateWorkspaces operation.

    ", + "refs": { + } + }, + "UserName": { + "base": null, + "refs": { + "DescribeWorkspacesRequest$UserName": "

    Used with the DirectoryId parameter to specify the directory user for which to obtain the WorkSpace.

    ", + "Workspace$UserName": "

    The user that the WorkSpace is assigned to.

    ", + "WorkspaceDirectory$CustomerUserName": "

    The user name for the service account.

    ", + "WorkspaceRequest$UserName": "

    The username that the WorkSpace is assigned to. This username must exist in the AWS Directory Service directory specified by the DirectoryId member.

    " + } + }, + "UserStorage": { + "base": "

    Contains information about the user storage for a WorkSpace bundle.

    ", + "refs": { + "WorkspaceBundle$UserStorage": "

    A UserStorage object that specifies the amount of user storage that the bundle contains.

    " + } + }, + "VolumeEncryptionKey": { + "base": null, + "refs": { + "Workspace$VolumeEncryptionKey": "

    The KMS key used to encrypt data stored on your WorkSpace.

    ", + "WorkspaceRequest$VolumeEncryptionKey": "

    The KMS key used to encrypt data stored on your WorkSpace.

    " + } + }, + "Workspace": { + "base": "

    Contains information about a WorkSpace.

    ", + "refs": { + "WorkspaceList$member": null + } + }, + "WorkspaceBundle": { + "base": "

    Contains information about a WorkSpace bundle.

    ", + "refs": { + "BundleList$member": null + } + }, + "WorkspaceDirectory": { + "base": "

    Contains information about an AWS Directory Service directory for use with Amazon WorkSpaces.

    ", + "refs": { + "DirectoryList$member": null + } + }, + "WorkspaceDirectoryState": { + "base": null, + "refs": { + "WorkspaceDirectory$State": "

    The state of the directory's registration with Amazon WorkSpaces

    " + } + }, + "WorkspaceDirectoryType": { + "base": null, + "refs": { + "WorkspaceDirectory$DirectoryType": "

    The directory type.

    " + } + }, + "WorkspaceErrorCode": { + "base": null, + "refs": { + "Workspace$ErrorCode": "

    If the WorkSpace could not be created, this contains the error code.

    " + } + }, + "WorkspaceId": { + "base": null, + "refs": { + "FailedWorkspaceChangeRequest$WorkspaceId": "

    The identifier of the WorkSpace.

    ", + "RebootRequest$WorkspaceId": "

    The identifier of the WorkSpace to reboot.

    ", + "RebuildRequest$WorkspaceId": "

    The identifier of the WorkSpace to rebuild.

    ", + "TerminateRequest$WorkspaceId": "

    The identifier of the WorkSpace to terminate.

    ", + "Workspace$WorkspaceId": "

    The identifier of the WorkSpace.

    ", + "WorkspaceIdList$member": null + } + }, + "WorkspaceIdList": { + "base": null, + "refs": { + "DescribeWorkspacesRequest$WorkspaceIds": "

    An array of strings that contain the identifiers of the WorkSpaces for which to retrieve information. This parameter cannot be combined with any other filter parameter.

    Because the CreateWorkspaces operation is asynchronous, the identifier returned by CreateWorkspaces is not immediately available. If you immediately call DescribeWorkspaces with this identifier, no information will be returned.

    " + } + }, + "WorkspaceList": { + "base": null, + "refs": { + "CreateWorkspacesResult$PendingRequests": "

    An array of structures that represent the WorkSpaces that were created.

    Because this operation is asynchronous, the identifier in WorkspaceId is not immediately available. If you immediately call DescribeWorkspaces with this identifier, no information will be returned.

    ", + "DescribeWorkspacesResult$Workspaces": "

    An array of structures that contain the information about the WorkSpaces.

    Because the CreateWorkspaces operation is asynchronous, some of this information may be incomplete for a newly-created WorkSpace.

    " + } + }, + "WorkspaceRequest": { + "base": "

    Contains information about a WorkSpace creation request.

    ", + "refs": { + "FailedCreateWorkspaceRequest$WorkspaceRequest": "

    A WorkspaceRequest object that contains the information about the WorkSpace that could not be created.

    ", + "WorkspaceRequestList$member": null + } + }, + "WorkspaceRequestList": { + "base": null, + "refs": { + "CreateWorkspacesRequest$Workspaces": "

    An array of structures that specify the WorkSpaces to create.

    " + } + }, + "WorkspaceState": { + "base": null, + "refs": { + "Workspace$State": "

    The operational state of the WorkSpace.

    " + } + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/workspaces/2015-04-08/examples-1.json b/vendor/github.com/aws/aws-sdk-go/models/apis/workspaces/2015-04-08/examples-1.json new file mode 100644 index 000000000..0ea7e3b0b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/workspaces/2015-04-08/examples-1.json @@ -0,0 +1,5 @@ +{ + "version": "1.0", + "examples": { + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/apis/workspaces/2015-04-08/paginators-1.json b/vendor/github.com/aws/aws-sdk-go/models/apis/workspaces/2015-04-08/paginators-1.json new file mode 100644 index 000000000..efa8cbad2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/apis/workspaces/2015-04-08/paginators-1.json @@ -0,0 +1,20 @@ +{ + "pagination": { + "DescribeWorkspaceBundles": { + "input_token": "NextToken", + "output_token": "NextToken", + "result_key": "Bundles" + }, + "DescribeWorkspaceDirectories": { + "input_token": "NextToken", + "output_token": "NextToken", + "result_key": "Directories" + }, + "DescribeWorkspaces": { + "limit_key": "Limit", + "input_token": "NextToken", + "output_token": "NextToken", + "result_key": "Workspaces" + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/protocol_tests/generate.go b/vendor/github.com/aws/aws-sdk-go/models/protocol_tests/generate.go new file mode 100644 index 000000000..50a56c3f1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/protocol_tests/generate.go @@ -0,0 +1,432 @@ +package main + +import ( + "bytes" + "encoding/json" + "fmt" + "net/url" + "os" + "os/exec" + "reflect" + "regexp" + "sort" + "strconv" + "strings" + "text/template" + + "github.com/aws/aws-sdk-go/private/model/api" + "github.com/aws/aws-sdk-go/private/util" +) + +// TestSuiteTypeInput input test +// TestSuiteTypeInput output test +const ( + TestSuiteTypeInput = iota + TestSuiteTypeOutput +) + +type testSuite struct { + *api.API + Description string + Cases []testCase + Type uint + title string +} + +type testCase struct { + TestSuite *testSuite + Given *api.Operation + Params interface{} `json:",omitempty"` + Data interface{} `json:"result,omitempty"` + InputTest testExpectation `json:"serialized"` + OutputTest testExpectation `json:"response"` +} + +type testExpectation struct { + Body string + URI string + Headers map[string]string + StatusCode uint `json:"status_code"` +} + +const preamble = ` +var _ bytes.Buffer // always import bytes +var _ http.Request +var _ json.Marshaler +var _ time.Time +var _ xmlutil.XMLNode +var _ xml.Attr +var _ = ioutil.Discard +var _ = util.Trim("") +var _ = url.Values{} +var _ = io.EOF +var _ = aws.String +var _ = fmt.Println + +func init() { + protocol.RandReader = &awstesting.ZeroReader{} +} +` + +var reStripSpace = regexp.MustCompile(`\s(\w)`) + +var reImportRemoval = regexp.MustCompile(`(?s:import \((.+?)\))`) + +func removeImports(code string) string { + return reImportRemoval.ReplaceAllString(code, "") +} + +var extraImports = []string{ + "bytes", + "encoding/json", + "encoding/xml", + "fmt", + "io", + "io/ioutil", + "net/http", + "testing", + "time", + "net/url", + "", + "github.com/aws/aws-sdk-go/awstesting", + "github.com/aws/aws-sdk-go/aws/session", + "github.com/aws/aws-sdk-go/private/protocol", + "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil", + "github.com/aws/aws-sdk-go/private/util", + "github.com/stretchr/testify/assert", +} + +func addImports(code string) string { + importNames := make([]string, len(extraImports)) + for i, n := range extraImports { + if n != "" { + importNames[i] = fmt.Sprintf("%q", n) + } + } + + str := reImportRemoval.ReplaceAllString(code, "import (\n"+strings.Join(importNames, "\n")+"$1\n)") + return str +} + +func (t *testSuite) TestSuite() string { + var buf bytes.Buffer + + t.title = reStripSpace.ReplaceAllStringFunc(t.Description, func(x string) string { + return strings.ToUpper(x[1:]) + }) + t.title = regexp.MustCompile(`\W`).ReplaceAllString(t.title, "") + + for idx, c := range t.Cases { + c.TestSuite = t + buf.WriteString(c.TestCase(idx) + "\n") + } + return buf.String() +} + +var tplInputTestCase = template.Must(template.New("inputcase").Parse(` +func Test{{ .OpName }}(t *testing.T) { + sess := session.New() + svc := New{{ .TestCase.TestSuite.API.StructName }}(sess, &aws.Config{Endpoint: aws.String("https://test")}) + {{ if ne .ParamsString "" }}input := {{ .ParamsString }} + req, _ := svc.{{ .TestCase.Given.ExportedName }}Request(input){{ else }}req, _ := svc.{{ .TestCase.Given.ExportedName }}Request(nil){{ end }} + r := req.HTTPRequest + + // build request + {{ .TestCase.TestSuite.API.ProtocolPackage }}.Build(req) + assert.NoError(t, req.Error) + + {{ if ne .TestCase.InputTest.Body "" }}// assert body + assert.NotNil(t, r.Body) + {{ .BodyAssertions }}{{ end }} + + {{ if ne .TestCase.InputTest.URI "" }}// assert URL + awstesting.AssertURL(t, "https://test{{ .TestCase.InputTest.URI }}", r.URL.String()){{ end }} + + // assert headers +{{ range $k, $v := .TestCase.InputTest.Headers }}assert.Equal(t, "{{ $v }}", r.Header.Get("{{ $k }}")) +{{ end }} +} +`)) + +type tplInputTestCaseData struct { + TestCase *testCase + OpName, ParamsString string +} + +func (t tplInputTestCaseData) BodyAssertions() string { + code := &bytes.Buffer{} + protocol := t.TestCase.TestSuite.API.Metadata.Protocol + + // Extract the body bytes + switch protocol { + case "rest-xml": + fmt.Fprintln(code, "body := util.SortXML(r.Body)") + default: + fmt.Fprintln(code, "body, _ := ioutil.ReadAll(r.Body)") + } + + // Generate the body verification code + expectedBody := util.Trim(t.TestCase.InputTest.Body) + switch protocol { + case "ec2", "query": + fmt.Fprintf(code, "awstesting.AssertQuery(t, `%s`, util.Trim(string(body)))", + expectedBody) + case "rest-xml": + if strings.HasPrefix(expectedBody, "<") { + fmt.Fprintf(code, "awstesting.AssertXML(t, `%s`, util.Trim(string(body)), %s{})", + expectedBody, t.TestCase.Given.InputRef.ShapeName) + } else { + fmt.Fprintf(code, "assert.Equal(t, `%s`, util.Trim(string(body)))", + expectedBody) + } + case "json", "jsonrpc", "rest-json": + if strings.HasPrefix(expectedBody, "{") { + fmt.Fprintf(code, "awstesting.AssertJSON(t, `%s`, util.Trim(string(body)))", + expectedBody) + } else { + fmt.Fprintf(code, "assert.Equal(t, `%s`, util.Trim(string(body)))", + expectedBody) + } + default: + fmt.Fprintf(code, "assert.Equal(t, `%s`, util.Trim(string(body)))", + expectedBody) + } + + return code.String() +} + +var tplOutputTestCase = template.Must(template.New("outputcase").Parse(` +func Test{{ .OpName }}(t *testing.T) { + sess := session.New() + svc := New{{ .TestCase.TestSuite.API.StructName }}(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte({{ .Body }})) + req, out := svc.{{ .TestCase.Given.ExportedName }}Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + {{ range $k, $v := .TestCase.OutputTest.Headers }}req.HTTPResponse.Header.Set("{{ $k }}", "{{ $v }}") + {{ end }} + + // unmarshal response + {{ .TestCase.TestSuite.API.ProtocolPackage }}.UnmarshalMeta(req) + {{ .TestCase.TestSuite.API.ProtocolPackage }}.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + {{ .Assertions }} +} +`)) + +type tplOutputTestCaseData struct { + TestCase *testCase + Body, OpName, Assertions string +} + +func (i *testCase) TestCase(idx int) string { + var buf bytes.Buffer + + opName := i.TestSuite.API.StructName() + i.TestSuite.title + "Case" + strconv.Itoa(idx+1) + + if i.TestSuite.Type == TestSuiteTypeInput { // input test + // query test should sort body as form encoded values + switch i.TestSuite.API.Metadata.Protocol { + case "query", "ec2": + m, _ := url.ParseQuery(i.InputTest.Body) + i.InputTest.Body = m.Encode() + case "rest-xml": + i.InputTest.Body = util.SortXML(bytes.NewReader([]byte(i.InputTest.Body))) + case "json", "rest-json": + i.InputTest.Body = strings.Replace(i.InputTest.Body, " ", "", -1) + } + + input := tplInputTestCaseData{ + TestCase: i, + OpName: strings.ToUpper(opName[0:1]) + opName[1:], + ParamsString: api.ParamsStructFromJSON(i.Params, i.Given.InputRef.Shape, false), + } + + if err := tplInputTestCase.Execute(&buf, input); err != nil { + panic(err) + } + } else if i.TestSuite.Type == TestSuiteTypeOutput { + output := tplOutputTestCaseData{ + TestCase: i, + Body: fmt.Sprintf("%q", i.OutputTest.Body), + OpName: strings.ToUpper(opName[0:1]) + opName[1:], + Assertions: GenerateAssertions(i.Data, i.Given.OutputRef.Shape, "out"), + } + + if err := tplOutputTestCase.Execute(&buf, output); err != nil { + panic(err) + } + } + + return buf.String() +} + +// generateTestSuite generates a protocol test suite for a given configuration +// JSON protocol test file. +func generateTestSuite(filename string) string { + inout := "Input" + if strings.Contains(filename, "output/") { + inout = "Output" + } + + var suites []testSuite + f, err := os.Open(filename) + if err != nil { + panic(err) + } + + err = json.NewDecoder(f).Decode(&suites) + if err != nil { + panic(err) + } + + var buf bytes.Buffer + buf.WriteString("package " + suites[0].ProtocolPackage() + "_test\n\n") + + var innerBuf bytes.Buffer + innerBuf.WriteString("//\n// Tests begin here\n//\n\n\n") + + for i, suite := range suites { + svcPrefix := inout + "Service" + strconv.Itoa(i+1) + suite.API.Metadata.ServiceAbbreviation = svcPrefix + "ProtocolTest" + suite.API.Operations = map[string]*api.Operation{} + for idx, c := range suite.Cases { + c.Given.ExportedName = svcPrefix + "TestCaseOperation" + strconv.Itoa(idx+1) + suite.API.Operations[c.Given.ExportedName] = c.Given + } + + suite.Type = getType(inout) + suite.API.NoInitMethods = true // don't generate init methods + suite.API.NoStringerMethods = true // don't generate stringer methods + suite.API.NoConstServiceNames = true // don't generate service names + suite.API.Setup() + suite.API.Metadata.EndpointPrefix = suite.API.PackageName() + + // Sort in order for deterministic test generation + names := make([]string, 0, len(suite.API.Shapes)) + for n := range suite.API.Shapes { + names = append(names, n) + } + sort.Strings(names) + for _, name := range names { + s := suite.API.Shapes[name] + s.Rename(svcPrefix + "TestShape" + name) + } + + svcCode := addImports(suite.API.ServiceGoCode()) + if i == 0 { + importMatch := reImportRemoval.FindStringSubmatch(svcCode) + buf.WriteString(importMatch[0] + "\n\n") + buf.WriteString(preamble + "\n\n") + } + svcCode = removeImports(svcCode) + svcCode = strings.Replace(svcCode, "func New(", "func New"+suite.API.StructName()+"(", -1) + svcCode = strings.Replace(svcCode, "func newClient(", "func new"+suite.API.StructName()+"Client(", -1) + svcCode = strings.Replace(svcCode, "return newClient(", "return new"+suite.API.StructName()+"Client(", -1) + buf.WriteString(svcCode + "\n\n") + + apiCode := removeImports(suite.API.APIGoCode()) + apiCode = strings.Replace(apiCode, "var oprw sync.Mutex", "", -1) + apiCode = strings.Replace(apiCode, "oprw.Lock()", "", -1) + apiCode = strings.Replace(apiCode, "defer oprw.Unlock()", "", -1) + buf.WriteString(apiCode + "\n\n") + + innerBuf.WriteString(suite.TestSuite() + "\n") + } + + return buf.String() + innerBuf.String() +} + +// findMember searches the shape for the member with the matching key name. +func findMember(shape *api.Shape, key string) string { + for actualKey := range shape.MemberRefs { + if strings.ToLower(key) == strings.ToLower(actualKey) { + return actualKey + } + } + return "" +} + +// GenerateAssertions builds assertions for a shape based on its type. +// +// The shape's recursive values also will have assertions generated for them. +func GenerateAssertions(out interface{}, shape *api.Shape, prefix string) string { + switch t := out.(type) { + case map[string]interface{}: + keys := util.SortedKeys(t) + + code := "" + if shape.Type == "map" { + for _, k := range keys { + v := t[k] + s := shape.ValueRef.Shape + code += GenerateAssertions(v, s, prefix+"[\""+k+"\"]") + } + } else { + for _, k := range keys { + v := t[k] + m := findMember(shape, k) + s := shape.MemberRefs[m].Shape + code += GenerateAssertions(v, s, prefix+"."+m+"") + } + } + return code + case []interface{}: + code := "" + for i, v := range t { + s := shape.MemberRef.Shape + code += GenerateAssertions(v, s, prefix+"["+strconv.Itoa(i)+"]") + } + return code + default: + switch shape.Type { + case "timestamp": + return fmt.Sprintf("assert.Equal(t, time.Unix(%#v, 0).UTC().String(), %s.String())\n", out, prefix) + case "blob": + return fmt.Sprintf("assert.Equal(t, %#v, string(%s))\n", out, prefix) + case "integer", "long": + return fmt.Sprintf("assert.Equal(t, int64(%#v), *%s)\n", out, prefix) + default: + if !reflect.ValueOf(out).IsValid() { + return fmt.Sprintf("assert.Nil(t, %s)\n", prefix) + } + return fmt.Sprintf("assert.Equal(t, %#v, *%s)\n", out, prefix) + } + } +} + +func getType(t string) uint { + switch t { + case "Input": + return TestSuiteTypeInput + case "Output": + return TestSuiteTypeOutput + default: + panic("Invalid type for test suite") + } +} + +func main() { + out := generateTestSuite(os.Args[1]) + if len(os.Args) == 3 { + f, err := os.Create(os.Args[2]) + defer f.Close() + if err != nil { + panic(err) + } + f.WriteString(util.GoFmt(out)) + f.Close() + + c := exec.Command("gofmt", "-s", "-w", os.Args[2]) + if err := c.Run(); err != nil { + panic(err) + } + } else { + fmt.Println(out) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/models/protocol_tests/input/ec2.json b/vendor/github.com/aws/aws-sdk-go/models/protocol_tests/input/ec2.json new file mode 100644 index 000000000..046626eab --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/protocol_tests/input/ec2.json @@ -0,0 +1,422 @@ +[ + { + "description": "Scalar members", + "metadata": { + "protocol": "ec2", + "apiVersion": "2014-01-01" + }, + "shapes": { + "InputShape": { + "type": "structure", + "members": { + "Foo": { + "shape": "StringType" + }, + "Bar": { + "shape": "StringType" + } + } + }, + "StringType": { + "type": "string" + } + }, + "cases": [ + { + "given": { + "input": { + "shape": "InputShape" + }, + "name": "OperationName" + }, + "params": { + "Foo": "val1", + "Bar": "val2" + }, + "serialized": { + "uri": "/", + "body": "Action=OperationName&Version=2014-01-01&Foo=val1&Bar=val2" + } + } + ] + }, + { + "description": "Structure with locationName and queryName applied to members", + "metadata": { + "protocol": "ec2", + "apiVersion": "2014-01-01" + }, + "shapes": { + "InputShape": { + "type": "structure", + "members": { + "Foo": { + "shape": "StringType" + }, + "Bar": { + "shape": "StringType", + "locationName": "barLocationName" + }, + "Yuck": { + "shape": "StringType", + "locationName": "yuckLocationName", + "queryName": "yuckQueryName" + } + } + }, + "StringType": { + "type": "string" + } + }, + "cases": [ + { + "given": { + "input": { + "shape": "InputShape" + }, + "name": "OperationName" + }, + "params": { + "Foo": "val1", + "Bar": "val2", + "Yuck": "val3" + }, + "serialized": { + "uri": "/", + "body": "Action=OperationName&Version=2014-01-01&Foo=val1&BarLocationName=val2&yuckQueryName=val3" + } + } + ] + }, + { + "description": "Nested structure members", + "metadata": { + "protocol": "ec2", + "apiVersion": "2014-01-01" + }, + "shapes": { + "InputShape": { + "type": "structure", + "members": { + "StructArg": { + "shape": "StructType", + "locationName": "Struct" + } + } + }, + "StructType": { + "type": "structure", + "members": { + "ScalarArg": { + "shape": "StringType", + "locationName": "Scalar" + } + } + }, + "StringType": { + "type": "string" + } + }, + "cases": [ + { + "given": { + "input": { + "shape": "InputShape" + }, + "name": "OperationName" + }, + "params": { + "StructArg": { + "ScalarArg": "foo" + } + }, + "serialized": { + "uri": "/", + "body": "Action=OperationName&Version=2014-01-01&Struct.Scalar=foo" + } + } + ] + }, + { + "description": "List types", + "metadata": { + "protocol": "ec2", + "apiVersion": "2014-01-01" + }, + "shapes": { + "InputShape": { + "type": "structure", + "members": { + "ListArg": { + "shape": "ListType" + } + } + }, + "ListType": { + "type": "list", + "member": { + "shape": "Strings" + } + }, + "Strings": { + "type": "string" + } + }, + "cases": [ + { + "given": { + "input": { + "shape": "InputShape" + }, + "name": "OperationName" + }, + "params": { + "ListArg": [ + "foo", + "bar", + "baz" + ] + }, + "serialized": { + "uri": "/", + "body": "Action=OperationName&Version=2014-01-01&ListArg.1=foo&ListArg.2=bar&ListArg.3=baz" + } + } + ] + }, + { + "description": "List with location name applied to member", + "metadata": { + "protocol": "ec2", + "apiVersion": "2014-01-01" + }, + "shapes": { + "InputShape": { + "type": "structure", + "members": { + "ListArg": { + "shape": "ListType", + "locationName": "ListMemberName" + } + } + }, + "ListType": { + "type": "list", + "member": { + "shape": "StringType", + "LocationName": "item" + } + }, + "StringType": { + "type": "string" + } + }, + "cases": [ + { + "given": { + "input": { + "shape": "InputShape" + }, + "name": "OperationName" + }, + "params": { + "ListArg": [ + "a", + "b", + "c" + ] + }, + "serialized": { + "uri": "/", + "body": "Action=OperationName&Version=2014-01-01&ListMemberName.1=a&ListMemberName.2=b&ListMemberName.3=c" + } + } + ] + }, + { + "description": "List with locationName and queryName", + "metadata": { + "protocol": "ec2", + "apiVersion": "2014-01-01" + }, + "shapes": { + "InputShape": { + "type": "structure", + "members": { + "ListArg": { + "shape": "ListType", + "locationName": "ListMemberName", + "queryName": "ListQueryName" + } + } + }, + "ListType": { + "type": "list", + "member": { + "shape": "StringType", + "LocationName": "item" + } + }, + "StringType": { + "type": "string" + } + }, + "cases": [ + { + "given": { + "input": { + "shape": "InputShape" + }, + "name": "OperationName" + }, + "params": { + "ListArg": [ + "a", + "b", + "c" + ] + }, + "serialized": { + "uri": "/", + "body": "Action=OperationName&Version=2014-01-01&ListQueryName.1=a&ListQueryName.2=b&ListQueryName.3=c" + } + } + ] + }, + { + "description": "Base64 encoded Blobs", + "metadata": { + "protocol": "ec2", + "apiVersion": "2014-01-01" + }, + "shapes": { + "InputShape": { + "type": "structure", + "members": { + "BlobArg": { + "shape": "BlobType" + } + } + }, + "BlobType": { + "type": "blob" + } + }, + "cases": [ + { + "given": { + "input": { + "shape": "InputShape" + }, + "name": "OperationName" + }, + "params": { + "BlobArg": "foo" + }, + "serialized": { + "uri": "/", + "body": "Action=OperationName&Version=2014-01-01&BlobArg=Zm9v" + } + } + ] + }, + { + "description": "Timestamp values", + "metadata": { + "protocol": "ec2", + "apiVersion": "2014-01-01" + }, + "shapes": { + "InputShape": { + "type": "structure", + "members": { + "TimeArg": { + "shape": "TimestampType" + } + } + }, + "TimestampType": { + "type": "timestamp" + } + }, + "cases": [ + { + "given": { + "input": { + "shape": "InputShape" + }, + "name": "OperationName" + }, + "params": { + "TimeArg": 1422172800 + }, + "serialized": { + "uri": "/", + "body": "Action=OperationName&Version=2014-01-01&TimeArg=2015-01-25T08%3A00%3A00Z" + } + } + ] + }, + { + "description": "Idempotency token auto fill", + "metadata": { + "protocol": "ec2", + "apiVersion": "2014-01-01" + }, + "shapes": { + "InputShape": { + "type": "structure", + "members": { + "Token": { + "shape": "StringType", + "idempotencyToken": true + } + } + }, + "StringType": { + "type": "string" + } + }, + "cases": [ + { + "given": { + "input": { + "shape": "InputShape" + }, + "http": { + "method": "POST", + "requestUri": "/path" + }, + "name": "OperationName" + }, + "params": { + "Token": "abc123" + }, + "serialized": { + "uri": "/path", + "headers": {}, + "body": "Token=abc123" + } + }, + { + "given": { + "input": { + "shape": "InputShape" + }, + "http": { + "method": "POST", + "requestUri": "/path" + }, + "name": "OperationName" + }, + "params": { + }, + "serialized": { + "uri": "/path", + "headers": {}, + "body": "Token=00000000-0000-4000-8000-000000000000" + } + } + ] + } +] diff --git a/vendor/github.com/aws/aws-sdk-go/models/protocol_tests/input/json.json b/vendor/github.com/aws/aws-sdk-go/models/protocol_tests/input/json.json new file mode 100644 index 000000000..32d734efb --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/protocol_tests/input/json.json @@ -0,0 +1,541 @@ +[ + { + "description": "Scalar members", + "metadata": { + "protocol": "json", + "jsonVersion": "1.1", + "targetPrefix": "com.amazonaws.foo" + }, + "shapes": { + "InputShape": { + "type": "structure", + "members": { + "Name": { + "shape": "StringType" + } + } + }, + "StringType": { + "type": "string" + } + }, + "cases": [ + { + "given": { + "input": { + "shape": "InputShape" + }, + "name": "OperationName", + "http": { + "method": "POST" + } + }, + "params": { + "Name": "myname" + }, + "serialized": { + "body": "{\"Name\": \"myname\"}", + "headers": { + "X-Amz-Target": "com.amazonaws.foo.OperationName", + "Content-Type": "application/x-amz-json-1.1" + }, + "uri": "/" + } + } + ] + }, + { + "description": "Timestamp values", + "metadata": { + "protocol": "json", + "jsonVersion": "1.1", + "targetPrefix": "com.amazonaws.foo" + }, + "shapes": { + "InputShape": { + "type": "structure", + "members": { + "TimeArg": { + "shape": "TimestampType" + } + } + }, + "TimestampType": { + "type": "timestamp" + } + }, + "cases": [ + { + "given": { + "input": { + "shape": "InputShape" + }, + "name": "OperationName" + }, + "params": { + "TimeArg": 1422172800 + }, + "serialized": { + "body": "{\"TimeArg\": 1422172800}", + "headers": { + "X-Amz-Target": "com.amazonaws.foo.OperationName", + "Content-Type": "application/x-amz-json-1.1" + }, + "uri": "/" + } + } + ] + }, + { + "description": "Base64 encoded Blobs", + "metadata": { + "protocol": "json", + "jsonVersion": "1.1", + "targetPrefix": "com.amazonaws.foo" + }, + "shapes": { + "InputShape": { + "type": "structure", + "members": { + "BlobArg": { + "shape": "BlobType" + }, + "BlobMap": { + "shape": "BlobMapType" + } + } + }, + "BlobType": { + "type": "blob" + }, + "BlobMapType": { + "type": "map", + "key": {"shape": "StringType"}, + "value": {"shape": "BlobType"} + }, + "StringType": { + "type": "string" + } + }, + "cases": [ + { + "given": { + "input": { + "shape": "InputShape" + }, + "name": "OperationName" + }, + "params": { + "BlobArg": "foo" + }, + "serialized": { + "body": "{\"BlobArg\": \"Zm9v\"}", + "headers": { + "X-Amz-Target": "com.amazonaws.foo.OperationName", + "Content-Type": "application/x-amz-json-1.1" + }, + "uri": "/" + } + }, + { + "given": { + "input": { + "shape": "InputShape" + }, + "name": "OperationName" + }, + "params": { + "BlobMap": { + "key1": "foo", + "key2": "bar" + } + }, + "serialized": { + "body": "{\"BlobMap\": {\"key1\": \"Zm9v\", \"key2\": \"YmFy\"}}", + "headers": { + "X-Amz-Target": "com.amazonaws.foo.OperationName", + "Content-Type": "application/x-amz-json-1.1" + }, + "uri": "/" + } + } + ] + }, + { + "description": "Nested blobs", + "metadata": { + "protocol": "json", + "jsonVersion": "1.1", + "targetPrefix": "com.amazonaws.foo" + }, + "shapes": { + "InputShape": { + "type": "structure", + "members": { + "ListParam": { + "shape": "ListOfStructures" + } + } + }, + "ListOfStructures": { + "type": "list", + "member": { + "shape": "BlobType" + } + }, + "BlobType": { + "type": "blob" + } + }, + "cases": [ + { + "given": { + "http": { + "method": "POST" + }, + "input": { + "shape": "InputShape" + }, + "name": "OperationName" + }, + "params": { + "ListParam": ["foo", "bar"] + }, + "serialized": { + "body": "{\"ListParam\": [\"Zm9v\", \"YmFy\"]}", + "uri": "/", + "headers": {"X-Amz-Target": "com.amazonaws.foo.OperationName", + "Content-Type": "application/x-amz-json-1.1"} + } + } + ] + }, + { + "description": "Recursive shapes", + "metadata": { + "protocol": "json", + "jsonVersion": "1.1", + "targetPrefix": "com.amazonaws.foo" + }, + "shapes": { + "InputShape": { + "type": "structure", + "members": { + "RecursiveStruct": { + "shape": "RecursiveStructType" + } + } + }, + "RecursiveStructType": { + "type": "structure", + "members": { + "NoRecurse": { + "shape": "StringType" + }, + "RecursiveStruct": { + "shape": "RecursiveStructType" + }, + "RecursiveList": { + "shape": "RecursiveListType" + }, + "RecursiveMap": { + "shape": "RecursiveMapType" + } + } + }, + "RecursiveListType": { + "type": "list", + "member": { + "shape": "RecursiveStructType" + } + }, + "RecursiveMapType": { + "type": "map", + "key": { + "shape": "StringType" + }, + "value": { + "shape": "RecursiveStructType" + } + }, + "StringType": { + "type": "string" + } + }, + "cases": [ + { + "given": { + "input": { + "shape": "InputShape" + }, + "name": "OperationName" + }, + "params": { + "RecursiveStruct": { + "NoRecurse": "foo" + } + }, + "serialized": { + "uri": "/", + "headers": { + "X-Amz-Target": "com.amazonaws.foo.OperationName", + "Content-Type": "application/x-amz-json-1.1" + }, + "body": "{\"RecursiveStruct\": {\"NoRecurse\": \"foo\"}}" + } + }, + { + "given": { + "input": { + "shape": "InputShape" + }, + "name": "OperationName" + }, + "params": { + "RecursiveStruct": { + "RecursiveStruct": { + "NoRecurse": "foo" + } + } + }, + "serialized": { + "uri": "/", + "headers": { + "X-Amz-Target": "com.amazonaws.foo.OperationName", + "Content-Type": "application/x-amz-json-1.1" + }, + "body": "{\"RecursiveStruct\": {\"RecursiveStruct\": {\"NoRecurse\": \"foo\"}}}" + } + }, + { + "given": { + "input": { + "shape": "InputShape" + }, + "name": "OperationName" + }, + "params": { + "RecursiveStruct": { + "RecursiveStruct": { + "RecursiveStruct": { + "RecursiveStruct": { + "NoRecurse": "foo" + } + } + } + } + }, + "serialized": { + "uri": "/", + "headers": { + "X-Amz-Target": "com.amazonaws.foo.OperationName", + "Content-Type": "application/x-amz-json-1.1" + }, + "body": "{\"RecursiveStruct\": {\"RecursiveStruct\": {\"RecursiveStruct\": {\"RecursiveStruct\": {\"NoRecurse\": \"foo\"}}}}}" + } + }, + { + "given": { + "input": { + "shape": "InputShape" + }, + "name": "OperationName" + }, + "params": { + "RecursiveStruct": { + "RecursiveList": [ + { + "NoRecurse": "foo" + }, + { + "NoRecurse": "bar" + } + ] + } + }, + "serialized": { + "uri": "/", + "headers": { + "X-Amz-Target": "com.amazonaws.foo.OperationName", + "Content-Type": "application/x-amz-json-1.1" + }, + "body": "{\"RecursiveStruct\": {\"RecursiveList\": [{\"NoRecurse\": \"foo\"}, {\"NoRecurse\": \"bar\"}]}}" + } + }, + { + "given": { + "input": { + "shape": "InputShape" + }, + "name": "OperationName" + }, + "params": { + "RecursiveStruct": { + "RecursiveList": [ + { + "NoRecurse": "foo" + }, + { + "RecursiveStruct": { + "NoRecurse": "bar" + } + } + ] + } + }, + "serialized": { + "uri": "/", + "headers": { + "X-Amz-Target": "com.amazonaws.foo.OperationName", + "Content-Type": "application/x-amz-json-1.1" + }, + "body": "{\"RecursiveStruct\": {\"RecursiveList\": [{\"NoRecurse\": \"foo\"}, {\"RecursiveStruct\": {\"NoRecurse\": \"bar\"}}]}}" + } + }, + { + "given": { + "input": { + "shape": "InputShape" + }, + "name": "OperationName" + }, + "params": { + "RecursiveStruct": { + "RecursiveMap": { + "foo": { + "NoRecurse": "foo" + }, + "bar": { + "NoRecurse": "bar" + } + } + } + }, + "serialized": { + "uri": "/", + "headers": { + "X-Amz-Target": "com.amazonaws.foo.OperationName", + "Content-Type": "application/x-amz-json-1.1" + }, + "body": "{\"RecursiveStruct\": {\"RecursiveMap\": {\"foo\": {\"NoRecurse\": \"foo\"}, \"bar\": {\"NoRecurse\": \"bar\"}}}}" + } + } + ] + }, + { + "description": "Empty maps", + "metadata": { + "protocol": "json", + "jsonVersion": "1.1", + "targetPrefix": "com.amazonaws.foo" + }, + "shapes": { + "InputShape": { + "type": "structure", + "members": { + "Map": { + "shape": "MapType" + } + } + }, + "MapType": { + "type": "map", + "key": { + "shape": "StringType" + }, + "value": { + "shape": "StringType" + } + }, + "StringType": { + "type": "string" + } + }, + "cases": [ + { + "given": { + "input": { + "shape": "InputShape" + }, + "name": "OperationName", + "http": { + "method": "POST" + } + }, + "params": { + "Map": {} + }, + "serialized": { + "body": "{\"Map\": {}}", + "headers": { + "X-Amz-Target": "com.amazonaws.foo.OperationName", + "Content-Type": "application/x-amz-json-1.1" + }, + "uri": "/" + } + } + ] + }, + { + "description": "Idempotency token auto fill", + "metadata": { + "protocol": "json", + "apiVersion": "2014-01-01" + }, + "shapes": { + "InputShape": { + "type": "structure", + "members": { + "Token": { + "shape": "StringType", + "idempotencyToken": true + } + } + }, + "StringType": { + "type": "string" + } + }, + "cases": [ + { + "given": { + "input": { + "shape": "InputShape" + }, + "http": { + "method": "POST", + "requestUri": "/path" + }, + "name": "OperationName" + }, + "params": { + "Token": "abc123" + }, + "serialized": { + "uri": "/path", + "headers": {}, + "body": "{\"Token\": \"abc123\"}" + } + }, + { + "given": { + "input": { + "shape": "InputShape" + }, + "http": { + "method": "POST", + "requestUri": "/path" + }, + "name": "OperationName" + }, + "params": { + }, + "serialized": { + "uri": "/path", + "headers": {}, + "body": "{\"Token\": \"00000000-0000-4000-8000-000000000000\"}" + } + } + ] + } +] diff --git a/vendor/github.com/aws/aws-sdk-go/models/protocol_tests/input/query.json b/vendor/github.com/aws/aws-sdk-go/models/protocol_tests/input/query.json new file mode 100644 index 000000000..971b7ee61 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/protocol_tests/input/query.json @@ -0,0 +1,842 @@ +[ + { + "description": "Scalar members", + "metadata": { + "protocol": "query", + "apiVersion": "2014-01-01" + }, + "shapes": { + "InputShape": { + "type": "structure", + "members": { + "Foo": { + "shape": "StringType" + }, + "Bar": { + "shape": "StringType" + }, + "Baz": { + "shape": "BooleanType" + } + } + }, + "StringType": { + "type": "string" + }, + "BooleanType": { + "type": "boolean" + } + }, + "cases": [ + { + "given": { + "input": { + "shape": "InputShape" + }, + "name": "OperationName" + }, + "params": { + "Foo": "val1", + "Bar": "val2" + }, + "serialized": { + "uri": "/", + "body": "Action=OperationName&Version=2014-01-01&Foo=val1&Bar=val2" + } + }, + { + "given": { + "input": { + "shape": "InputShape" + }, + "name": "OperationName" + }, + "params": { + "Baz": true + }, + "serialized": { + "uri": "/", + "body": "Action=OperationName&Version=2014-01-01&Baz=true" + } + }, + { + "given": { + "input": { + "shape": "InputShape" + }, + "name": "OperationName" + }, + "params": { + "Baz": false + }, + "serialized": { + "uri": "/", + "body": "Action=OperationName&Version=2014-01-01&Baz=false" + } + } + ] + }, + { + "description": "Nested structure members", + "metadata": { + "protocol": "query", + "apiVersion": "2014-01-01" + }, + "shapes": { + "InputShape": { + "type": "structure", + "members": { + "StructArg": { + "shape": "StructType" + } + } + }, + "StructType": { + "type": "structure", + "members": { + "ScalarArg": { + "shape": "StringType" + } + } + }, + "StringType": { + "type": "string" + } + }, + "cases": [ + { + "given": { + "input": { + "shape": "InputShape" + }, + "name": "OperationName" + }, + "params": { + "StructArg": { + "ScalarArg": "foo" + } + }, + "serialized": { + "uri": "/", + "body": "Action=OperationName&Version=2014-01-01&StructArg.ScalarArg=foo" + } + } + ] + }, + { + "description": "List types", + "metadata": { + "protocol": "query", + "apiVersion": "2014-01-01" + }, + "shapes": { + "InputShape": { + "type": "structure", + "members": { + "ListArg": { + "shape": "ListType" + } + } + }, + "ListType": { + "type": "list", + "member": { + "shape": "Strings" + } + }, + "Strings": { + "type": "string" + } + }, + "cases": [ + { + "given": { + "input": { + "shape": "InputShape" + }, + "name": "OperationName" + }, + "params": { + "ListArg": [ + "foo", + "bar", + "baz" + ] + }, + "serialized": { + "uri": "/", + "body": "Action=OperationName&Version=2014-01-01&ListArg.member.1=foo&ListArg.member.2=bar&ListArg.member.3=baz" + } + }, + { + "given": { + "input": { + "shape": "InputShape" + }, + "name": "OperationName" + }, + "params": { + "ListArg": [] + }, + "serialized": { + "uri": "/", + "body": "Action=OperationName&Version=2014-01-01&ListArg=" + } + } + ] + }, + { + "description": "Flattened list", + "metadata": { + "protocol": "query", + "apiVersion": "2014-01-01" + }, + "shapes": { + "InputShape": { + "type": "structure", + "members": { + "ScalarArg": { + "shape": "StringType" + }, + "ListArg": { + "shape": "ListType" + }, + "NamedListArg": { + "shape": "NamedListType" + } + } + }, + "ListType": { + "type": "list", + "member": { + "shape": "StringType" + }, + "flattened": true + }, + "NamedListType": { + "type": "list", + "member": { + "shape": "StringType", + "locationName": "Foo" + }, + "flattened": true + }, + "StringType": { + "type": "string" + } + }, + "cases": [ + { + "given": { + "input": { + "shape": "InputShape" + }, + "name": "OperationName" + }, + "params": { + "ScalarArg": "foo", + "ListArg": [ + "a", + "b", + "c" + ] + }, + "serialized": { + "uri": "/", + "body": "Action=OperationName&Version=2014-01-01&ScalarArg=foo&ListArg.1=a&ListArg.2=b&ListArg.3=c" + } + }, + { + "given": { + "input": { + "shape": "InputShape" + }, + "name": "OperationName" + }, + "params": { + "NamedListArg": [ + "a" + ] + }, + "serialized": { + "uri": "/", + "body": "Action=OperationName&Version=2014-01-01&Foo.1=a" + } + } + ] + }, + { + "description": "Serialize flattened map type", + "metadata": { + "protocol": "query", + "apiVersion": "2014-01-01" + }, + "shapes": { + "InputShape": { + "type": "structure", + "members": { + "MapArg": { + "shape": "StringMap" + } + } + }, + "StringMap": { + "type": "map", + "key": { + "shape": "StringType" + }, + "value": { + "shape": "StringType" + }, + "flattened": true + }, + "StringType": { + "type": "string" + } + }, + "cases": [ + { + "given": { + "input": { + "shape": "InputShape" + }, + "name": "OperationName" + }, + "params": { + "MapArg": { + "key1": "val1", + "key2": "val2" + } + }, + "serialized": { + "uri": "/", + "body": "Action=OperationName&Version=2014-01-01&MapArg.1.key=key1&MapArg.1.value=val1&MapArg.2.key=key2&MapArg.2.value=val2" + } + } + ] + }, + { + "description": "Non flattened list with LocationName", + "metadata": { + "protocol": "query", + "apiVersion": "2014-01-01" + }, + "shapes": { + "InputShape": { + "type": "structure", + "members": { + "ListArg": { + "shape": "ListType" + } + } + }, + "ListType": { + "type": "list", + "member": { + "shape": "StringType", + "locationName": "item" + } + }, + "StringType": { + "type": "string" + } + }, + "cases": [ + { + "given": { + "input": { + "shape": "InputShape" + }, + "name": "OperationName" + }, + "params": { + "ListArg": [ + "a", + "b", + "c" + ] + }, + "serialized": { + "uri": "/", + "body": "Action=OperationName&Version=2014-01-01&ListArg.item.1=a&ListArg.item.2=b&ListArg.item.3=c" + } + } + ] + }, + { + "description": "Flattened list with LocationName", + "metadata": { + "protocol": "query", + "apiVersion": "2014-01-01" + }, + "shapes": { + "InputShape": { + "type": "structure", + "members": { + "ScalarArg": { + "shape": "StringType" + }, + "ListArg": { + "shape": "ListType" + } + } + }, + "ListType": { + "type": "list", + "member": { + "shape": "StringType", + "locationName": "ListArgLocation" + }, + "flattened": true + }, + "StringType": { + "type": "string" + } + }, + "cases": [ + { + "given": { + "input": { + "shape": "InputShape" + }, + "name": "OperationName" + }, + "params": { + "ScalarArg": "foo", + "ListArg": [ + "a", + "b", + "c" + ] + }, + "serialized": { + "uri": "/", + "body": "Action=OperationName&Version=2014-01-01&ScalarArg=foo&ListArgLocation.1=a&ListArgLocation.2=b&ListArgLocation.3=c" + } + } + ] + }, + { + "description": "Serialize map type", + "metadata": { + "protocol": "query", + "apiVersion": "2014-01-01" + }, + "shapes": { + "InputShape": { + "type": "structure", + "members": { + "MapArg": { + "shape": "StringMap" + } + } + }, + "StringMap": { + "type": "map", + "key": { + "shape": "StringType" + }, + "value": { + "shape": "StringType" + } + }, + "StringType": { + "type": "string" + } + }, + "cases": [ + { + "given": { + "input": { + "shape": "InputShape" + }, + "name": "OperationName" + }, + "params": { + "MapArg": { + "key1": "val1", + "key2": "val2" + } + }, + "serialized": { + "uri": "/", + "body": "Action=OperationName&Version=2014-01-01&MapArg.entry.1.key=key1&MapArg.entry.1.value=val1&MapArg.entry.2.key=key2&MapArg.entry.2.value=val2" + } + } + ] + }, + { + "description": "Serialize map type with locationName", + "metadata": { + "protocol": "query", + "apiVersion": "2014-01-01" + }, + "shapes": { + "InputShape": { + "type": "structure", + "members": { + "MapArg": { + "shape": "StringMap" + } + } + }, + "StringMap": { + "type": "map", + "key": { + "shape": "StringType", + "locationName": "TheKey" + }, + "value": { + "shape": "StringType", + "locationName": "TheValue" + } + }, + "StringType": { + "type": "string" + } + }, + "cases": [ + { + "given": { + "input": { + "shape": "InputShape" + }, + "name": "OperationName" + }, + "params": { + "MapArg": { + "key1": "val1", + "key2": "val2" + } + }, + "serialized": { + "uri": "/", + "body": "Action=OperationName&Version=2014-01-01&MapArg.entry.1.TheKey=key1&MapArg.entry.1.TheValue=val1&MapArg.entry.2.TheKey=key2&MapArg.entry.2.TheValue=val2" + } + } + ] + }, + { + "description": "Base64 encoded Blobs", + "metadata": { + "protocol": "query", + "apiVersion": "2014-01-01" + }, + "shapes": { + "InputShape": { + "type": "structure", + "members": { + "BlobArg": { + "shape": "BlobType" + } + } + }, + "BlobType": { + "type": "blob" + } + }, + "cases": [ + { + "given": { + "input": { + "shape": "InputShape" + }, + "name": "OperationName" + }, + "params": { + "BlobArg": "foo" + }, + "serialized": { + "uri": "/", + "body": "Action=OperationName&Version=2014-01-01&BlobArg=Zm9v" + } + } + ] + }, + { + "description": "Timestamp values", + "metadata": { + "protocol": "query", + "apiVersion": "2014-01-01" + }, + "shapes": { + "InputShape": { + "type": "structure", + "members": { + "TimeArg": { + "shape": "TimestampType" + } + } + }, + "TimestampType": { + "type": "timestamp" + } + }, + "cases": [ + { + "given": { + "input": { + "shape": "InputShape" + }, + "name": "OperationName" + }, + "params": { + "TimeArg": 1422172800 + }, + "serialized": { + "uri": "/", + "body": "Action=OperationName&Version=2014-01-01&TimeArg=2015-01-25T08%3A00%3A00Z" + } + } + ] + }, + { + "description": "Recursive shapes", + "metadata": { + "protocol": "query", + "apiVersion": "2014-01-01" + }, + "shapes": { + "InputShape": { + "type": "structure", + "members": { + "RecursiveStruct": { + "shape": "RecursiveStructType" + } + } + }, + "RecursiveStructType": { + "type": "structure", + "members": { + "NoRecurse": { + "shape": "StringType" + }, + "RecursiveStruct": { + "shape": "RecursiveStructType" + }, + "RecursiveList": { + "shape": "RecursiveListType" + }, + "RecursiveMap": { + "shape": "RecursiveMapType" + } + } + }, + "RecursiveListType": { + "type": "list", + "member": { + "shape": "RecursiveStructType" + } + }, + "RecursiveMapType": { + "type": "map", + "key": { + "shape": "StringType" + }, + "value": { + "shape": "RecursiveStructType" + } + }, + "StringType": { + "type": "string" + } + }, + "cases": [ + { + "given": { + "input": { + "shape": "InputShape" + }, + "name": "OperationName" + }, + "params": { + "RecursiveStruct": { + "NoRecurse": "foo" + } + }, + "serialized": { + "uri": "/", + "body": "Action=OperationName&Version=2014-01-01&RecursiveStruct.NoRecurse=foo" + } + }, + { + "given": { + "input": { + "shape": "InputShape" + }, + "name": "OperationName" + }, + "params": { + "RecursiveStruct": { + "RecursiveStruct": { + "NoRecurse": "foo" + } + } + }, + "serialized": { + "uri": "/", + "body": "Action=OperationName&Version=2014-01-01&RecursiveStruct.RecursiveStruct.NoRecurse=foo" + } + }, + { + "given": { + "input": { + "shape": "InputShape" + }, + "name": "OperationName" + }, + "params": { + "RecursiveStruct": { + "RecursiveStruct": { + "RecursiveStruct": { + "RecursiveStruct": { + "NoRecurse": "foo" + } + } + } + } + }, + "serialized": { + "uri": "/", + "body": "Action=OperationName&Version=2014-01-01&RecursiveStruct.RecursiveStruct.RecursiveStruct.RecursiveStruct.NoRecurse=foo" + } + }, + { + "given": { + "input": { + "shape": "InputShape" + }, + "name": "OperationName" + }, + "params": { + "RecursiveStruct": { + "RecursiveList": [ + { + "NoRecurse": "foo" + }, + { + "NoRecurse": "bar" + } + ] + } + }, + "serialized": { + "uri": "/", + "body": "Action=OperationName&Version=2014-01-01&RecursiveStruct.RecursiveList.member.1.NoRecurse=foo&RecursiveStruct.RecursiveList.member.2.NoRecurse=bar" + } + }, + { + "given": { + "input": { + "shape": "InputShape" + }, + "name": "OperationName" + }, + "params": { + "RecursiveStruct": { + "RecursiveList": [ + { + "NoRecurse": "foo" + }, + { + "RecursiveStruct": { + "NoRecurse": "bar" + } + } + ] + } + }, + "serialized": { + "uri": "/", + "body": "Action=OperationName&Version=2014-01-01&RecursiveStruct.RecursiveList.member.1.NoRecurse=foo&RecursiveStruct.RecursiveList.member.2.RecursiveStruct.NoRecurse=bar" + } + }, + { + "given": { + "input": { + "shape": "InputShape" + }, + "name": "OperationName" + }, + "params": { + "RecursiveStruct": { + "RecursiveMap": { + "foo": { + "NoRecurse": "foo" + }, + "bar": { + "NoRecurse": "bar" + } + } + } + }, + "serialized": { + "uri": "/", + "body": "Action=OperationName&Version=2014-01-01&RecursiveStruct.RecursiveMap.entry.1.key=foo&RecursiveStruct.RecursiveMap.entry.1.value.NoRecurse=foo&RecursiveStruct.RecursiveMap.entry.2.key=bar&RecursiveStruct.RecursiveMap.entry.2.value.NoRecurse=bar" + } + } + ] + }, + { + "description": "Idempotency token auto fill", + "metadata": { + "protocol": "query", + "apiVersion": "2014-01-01" + }, + "shapes": { + "InputShape": { + "type": "structure", + "members": { + "Token": { + "shape": "StringType", + "idempotencyToken": true + } + } + }, + "StringType": { + "type": "string" + } + }, + "cases": [ + { + "given": { + "input": { + "shape": "InputShape" + }, + "http": { + "method": "POST", + "requestUri": "/path" + }, + "name": "OperationName" + }, + "params": { + "Token": "abc123" + }, + "serialized": { + "uri": "/path", + "headers": {}, + "body": "Token=abc123" + } + }, + { + "given": { + "input": { + "shape": "InputShape" + }, + "http": { + "method": "POST", + "requestUri": "/path" + }, + "name": "OperationName" + }, + "params": { + }, + "serialized": { + "uri": "/path", + "headers": {}, + "body": "Token=00000000-0000-4000-8000-000000000000" + } + } + ] + } +] diff --git a/vendor/github.com/aws/aws-sdk-go/models/protocol_tests/input/rest-json.json b/vendor/github.com/aws/aws-sdk-go/models/protocol_tests/input/rest-json.json new file mode 100644 index 000000000..01d6ab217 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/protocol_tests/input/rest-json.json @@ -0,0 +1,1240 @@ +[ + { + "description": "No parameters", + "metadata": { + "protocol": "rest-json", + "apiVersion": "2014-01-01" + }, + "shapes": {}, + "cases": [ + { + "given": { + "http": { + "method": "GET", + "requestUri": "/2014-01-01/jobs" + }, + "name": "OperationName" + }, + "serialized": { + "body": "", + "uri": "/2014-01-01/jobs", + "headers": {} + } + } + ] + }, + { + "description": "URI parameter only with no location name", + "metadata": { + "protocol": "rest-json", + "apiVersion": "2014-01-01" + }, + "shapes": { + "InputShape": { + "type": "structure", + "members": { + "PipelineId": { + "shape": "StringType", + "location": "uri" + } + } + }, + "StringType": { + "type": "string" + } + }, + "cases": [ + { + "given": { + "http": { + "method": "GET", + "requestUri": "/2014-01-01/jobsByPipeline/{PipelineId}" + }, + "input": { + "shape": "InputShape" + }, + "name": "OperationName" + }, + "params": { + "PipelineId": "foo" + }, + "serialized": { + "body": "", + "uri": "/2014-01-01/jobsByPipeline/foo", + "headers": {} + } + } + ] + }, + { + "description": "URI parameter only with location name", + "metadata": { + "protocol": "rest-json", + "apiVersion": "2014-01-01" + }, + "shapes": { + "InputShape": { + "type": "structure", + "members": { + "Foo": { + "shape": "StringType", + "location": "uri", + "locationName": "PipelineId" + } + } + }, + "StringType": { + "type": "string" + } + }, + "cases": [ + { + "given": { + "http": { + "method": "GET", + "requestUri": "/2014-01-01/jobsByPipeline/{PipelineId}" + }, + "input": { + "shape": "InputShape" + }, + "name": "OperationName" + }, + "params": { + "Foo": "bar" + }, + "serialized": { + "body": "", + "uri": "/2014-01-01/jobsByPipeline/bar", + "headers": {} + } + } + ] + }, + { + "description": "Querystring list of strings", + "metadata": { + "protocol": "rest-json", + "apiVersion": "2014-01-01" + }, + "shapes": { + "InputShape": { + "type": "structure", + "members": { + "Items": { + "shape": "StringList", + "location": "querystring", + "locationName": "item" + } + } + }, + "StringList": { + "type": "list", + "member": { + "shape": "String" + } + }, + "String": { + "type": "string" + } + }, + "cases": [ + { + "given": { + "http": { + "method": "GET", + "requestUri": "/path" + }, + "input": { + "shape": "InputShape" + }, + "name": "OperationName" + }, + "params": { + "Items": ["value1", "value2"] + }, + "serialized": { + "body": "", + "uri": "/path?item=value1&item=value2", + "headers": {} + } + } + ] + }, + { + "description": "String to string maps in querystring", + "metadata": { + "protocol": "rest-json", + "apiVersion": "2014-01-01" + }, + "shapes": { + "InputShape": { + "type": "structure", + "members": { + "PipelineId": { + "shape": "StringType", + "location": "uri" + }, + "QueryDoc": { + "shape": "MapStringStringType", + "location": "querystring" + } + } + }, + "MapStringStringType": { + "type": "map", + "key": { + "shape": "StringType" + }, + "value": { + "shape": "StringType" + } + }, + "StringType": { + "type": "string" + } + }, + "cases": [ + { + "given": { + "http": { + "method": "GET", + "requestUri": "/2014-01-01/jobsByPipeline/{PipelineId}" + }, + "input": { + "shape": "InputShape" + }, + "name": "OperationName" + }, + "params": { + "PipelineId": "foo", + "QueryDoc": { + "bar": "baz", + "fizz": "buzz" + } + }, + "serialized": { + "body": "", + "uri": "/2014-01-01/jobsByPipeline/foo?bar=baz&fizz=buzz", + "headers": {} + } + } + ] + }, + { + "description": "String to string list maps in querystring", + "metadata": { + "protocol": "rest-json", + "apiVersion": "2014-01-01" + }, + "shapes": { + "InputShape": { + "type": "structure", + "members": { + "PipelineId": { + "shape": "StringType", + "location": "uri" + }, + "QueryDoc": { + "shape": "MapStringStringListType", + "location": "querystring" + } + } + }, + "MapStringStringListType": { + "type": "map", + "key": { + "shape": "StringType" + }, + "value": { + "shape": "StringListType" + } + }, + "StringListType": { + "type": "list", + "member": { + "shape": "StringType" + } + }, + "StringType": { + "type": "string" + } + }, + "cases": [ + { + "given": { + "http": { + "method": "GET", + "requestUri": "/2014-01-01/jobsByPipeline/{PipelineId}" + }, + "input": { + "shape": "InputShape" + }, + "name": "OperationName" + }, + "params": { + "PipelineId": "id", + "QueryDoc": { + "foo": ["bar", "baz"], + "fizz": ["buzz", "pop"] + } + }, + "serialized": { + "body": "", + "uri": "/2014-01-01/jobsByPipeline/id?foo=bar&foo=baz&fizz=buzz&fizz=pop", + "headers": {} + } + } + ] + }, + { + "description": "URI parameter and querystring params", + "metadata": { + "protocol": "rest-json", + "apiVersion": "2014-01-01" + }, + "shapes": { + "InputShape": { + "type": "structure", + "members": { + "PipelineId": { + "shape": "StringType", + "location": "uri", + "locationName": "PipelineId" + }, + "Ascending": { + "shape": "StringType", + "location": "querystring", + "locationName": "Ascending" + }, + "PageToken": { + "shape": "StringType", + "location": "querystring", + "locationName": "PageToken" + } + } + }, + "StringType": { + "type": "string" + } + }, + "cases": [ + { + "given": { + "http": { + "method": "GET", + "requestUri": "/2014-01-01/jobsByPipeline/{PipelineId}" + }, + "input": { + "shape": "InputShape" + }, + "name": "OperationName" + }, + "params": { + "PipelineId": "foo", + "Ascending": "true", + "PageToken": "bar" + }, + "serialized": { + "body": "", + "uri": "/2014-01-01/jobsByPipeline/foo?Ascending=true&PageToken=bar", + "headers": {} + } + } + ] + }, + { + "description": "URI parameter, querystring params and JSON body", + "metadata": { + "protocol": "rest-json", + "apiVersion": "2014-01-01" + }, + "shapes": { + "InputShape": { + "type": "structure", + "members": { + "PipelineId": { + "shape": "StringType", + "location": "uri", + "locationName": "PipelineId" + }, + "Ascending": { + "shape": "StringType", + "location": "querystring", + "locationName": "Ascending" + }, + "PageToken": { + "shape": "StringType", + "location": "querystring", + "locationName": "PageToken" + }, + "Config": { + "shape": "StructType" + } + } + }, + "StringType": { + "type": "string" + }, + "StructType": { + "type": "structure", + "members": { + "A": { + "shape": "StringType" + }, + "B": { + "shape": "StringType" + } + } + } + }, + "cases": [ + { + "given": { + "http": { + "method": "POST", + "requestUri": "/2014-01-01/jobsByPipeline/{PipelineId}" + }, + "input": { + "shape": "InputShape" + }, + "name": "OperationName" + }, + "params": { + "PipelineId": "foo", + "Ascending": "true", + "PageToken": "bar", + "Config": { + "A": "one", + "B": "two" + } + }, + "serialized": { + "body": "{\"Config\": {\"A\": \"one\", \"B\": \"two\"}}", + "uri": "/2014-01-01/jobsByPipeline/foo?Ascending=true&PageToken=bar", + "headers": {} + } + } + ] + }, + { + "description": "URI parameter, querystring params, headers and JSON body", + "metadata": { + "protocol": "rest-json", + "apiVersion": "2014-01-01" + }, + "shapes": { + "InputShape": { + "type": "structure", + "members": { + "PipelineId": { + "shape": "StringType", + "location": "uri", + "locationName": "PipelineId" + }, + "Ascending": { + "shape": "StringType", + "location": "querystring", + "locationName": "Ascending" + }, + "Checksum": { + "shape": "StringType", + "location": "header", + "locationName": "x-amz-checksum" + }, + "PageToken": { + "shape": "StringType", + "location": "querystring", + "locationName": "PageToken" + }, + "Config": { + "shape": "StructType" + } + } + }, + "StringType": { + "type": "string" + }, + "StructType": { + "type": "structure", + "members": { + "A": { + "shape": "StringType" + }, + "B": { + "shape": "StringType" + } + } + } + }, + "cases": [ + { + "given": { + "http": { + "method": "POST", + "requestUri": "/2014-01-01/jobsByPipeline/{PipelineId}" + }, + "input": { + "shape": "InputShape" + }, + "name": "OperationName" + }, + "params": { + "PipelineId": "foo", + "Ascending": "true", + "Checksum": "12345", + "PageToken": "bar", + "Config": { + "A": "one", + "B": "two" + } + }, + "serialized": { + "body": "{\"Config\": {\"A\": \"one\", \"B\": \"two\"}}", + "uri": "/2014-01-01/jobsByPipeline/foo?Ascending=true&PageToken=bar", + "headers": { + "x-amz-checksum": "12345" + } + } + } + ] + }, + { + "description": "Streaming payload", + "metadata": { + "protocol": "rest-json", + "apiVersion": "2014-01-01" + }, + "shapes": { + "InputShape": { + "type": "structure", + "members": { + "vaultName": { + "shape": "StringType", + "location": "uri", + "locationName": "vaultName" + }, + "checksum": { + "shape": "StringType", + "location": "header", + "locationName": "x-amz-sha256-tree-hash" + }, + "body": { + "shape": "Stream" + } + }, + "required": [ + "vaultName" + ], + "payload": "body" + }, + "StringType": { + "type": "string" + }, + "Stream": { + "type": "blob", + "streaming": true + } + }, + "cases": [ + { + "given": { + "http": { + "method": "POST", + "requestUri": "/2014-01-01/vaults/{vaultName}/archives" + }, + "input": { + "shape": "InputShape" + }, + "name": "OperationName" + }, + "params": { + "vaultName": "name", + "checksum": "foo", + "body": "contents" + }, + "serialized": { + "body": "contents", + "uri": "/2014-01-01/vaults/name/archives", + "headers": { + "x-amz-sha256-tree-hash": "foo" + } + } + } + ] + }, + { + "description": "Serialize blobs in body", + "metadata": { + "protocol": "rest-json", + "apiVersion": "2014-01-01" + }, + "shapes": { + "InputShape": { + "type": "structure", + "members": { + "Foo": { + "shape": "StringType", + "location": "uri", + "locationName": "Foo" + }, + "Bar": {"shape": "BlobType"} + }, + "required": [ + "Foo" + ] + }, + "StringType": { + "type": "string" + }, + "BlobType": { + "type": "blob" + } + }, + "cases": [ + { + "given": { + "http": { + "method": "GET", + "requestUri": "/2014-01-01/{Foo}" + }, + "input": { + "shape": "InputShape" + }, + "name": "OperationName" + }, + "params": { + "Foo": "foo_name", + "Bar": "Blob param" + }, + "serialized": { + "body": "{\"Bar\": \"QmxvYiBwYXJhbQ==\"}", + "uri": "/2014-01-01/foo_name" + } + } + ] + }, + { + "description": "Blob payload", + "metadata": { + "protocol": "rest-json", + "apiVersion": "2014-01-01" + }, + "shapes": { + "InputShape": { + "type": "structure", + "members": { + "foo": { + "shape": "FooShape" + } + } + }, + "FooShape": { + "type": "blob" + } + }, + "cases": [ + { + "given": { + "http": { + "method": "POST", + "requestUri": "/" + }, + "input": { + "shape": "InputShape", + "payload": "foo" + }, + "name": "OperationName" + }, + "params": { + "foo": "bar" + }, + "serialized": { + "method": "POST", + "body": "bar", + "uri": "/" + } + }, + { + "given": { + "http": { + "method": "POST", + "requestUri": "/" + }, + "input": { + "shape": "InputShape", + "payload": "foo" + }, + "name": "OperationName" + }, + "params": { + }, + "serialized": { + "method": "POST", + "body": "", + "uri": "/" + } + } + ] + }, + { + "description": "Structure payload", + "metadata": { + "protocol": "rest-json", + "apiVersion": "2014-01-01" + }, + "shapes": { + "InputShape": { + "type": "structure", + "members": { + "foo": { + "shape": "FooShape" + } + } + }, + "FooShape": { + "locationName": "foo", + "type": "structure", + "members": { + "baz": { + "shape": "BazShape" + } + } + }, + "BazShape": { + "type": "string" + } + }, + "cases": [ + { + "given": { + "http": { + "method": "POST", + "requestUri": "/" + }, + "input": { + "shape": "InputShape", + "payload": "foo" + }, + "name": "OperationName" + }, + "params": { + "foo": { + "baz": "bar" + } + }, + "serialized": { + "method": "POST", + "body": "{\"baz\": \"bar\"}", + "uri": "/" + } + }, + { + "given": { + "http": { + "method": "POST", + "requestUri": "/" + }, + "input": { + "shape": "InputShape", + "payload": "foo" + }, + "name": "OperationName" + }, + "params": {}, + "serialized": { + "method": "POST", + "body": "", + "uri": "/" + } + } + ] + }, + { + "description": "Omits null query params, but serializes empty strings", + "metadata": { + "protocol": "rest-json", + "apiVersion": "2014-01-01" + }, + "shapes": { + "InputShape": { + "type": "structure", + "members": { + "foo": { + "location":"querystring", + "locationName":"param-name", + "shape": "Foo" + } + } + }, + "Foo": { + "type": "string" + } + }, + "cases": [ + { + "given": { + "name": "OperationName", + "http": { + "method": "POST", + "requestUri": "/path" + }, + "input": { "shape": "InputShape" } + }, + "params": { "foo": null }, + "serialized": { + "method": "POST", + "body": "", + "uri": "/path" + } + }, + { + "given": { + "name": "OperationName", + "http": { + "method": "POST", + "requestUri": "/path?abc=mno" + }, + "input": { "shape": "InputShape" } + }, + "params": { "foo": "" }, + "serialized": { + "method": "POST", + "body": "", + "uri": "/path?abc=mno¶m-name=" + } + } + ] + }, + { + "description": "Recursive shapes", + "metadata": { + "protocol": "rest-json", + "apiVersion": "2014-01-01" + }, + "shapes": { + "InputShape": { + "type": "structure", + "members": { + "RecursiveStruct": { + "shape": "RecursiveStructType" + } + } + }, + "RecursiveStructType": { + "type": "structure", + "members": { + "NoRecurse": { + "shape": "StringType" + }, + "RecursiveStruct": { + "shape": "RecursiveStructType" + }, + "RecursiveList": { + "shape": "RecursiveListType" + }, + "RecursiveMap": { + "shape": "RecursiveMapType" + } + } + }, + "RecursiveListType": { + "type": "list", + "member": { + "shape": "RecursiveStructType" + } + }, + "RecursiveMapType": { + "type": "map", + "key": { + "shape": "StringType" + }, + "value": { + "shape": "RecursiveStructType" + } + }, + "StringType": { + "type": "string" + } + }, + "cases": [ + { + "given": { + "input": { + "shape": "InputShape" + }, + "http": { + "method": "POST", + "requestUri": "/path" + }, + "name": "OperationName" + }, + "params": { + "RecursiveStruct": { + "NoRecurse": "foo" + } + }, + "serialized": { + "uri": "/path" , + "headers": {}, + "body": "{\"RecursiveStruct\": {\"NoRecurse\": \"foo\"}}" + } + }, + { + "given": { + "input": { + "shape": "InputShape" + }, + "http": { + "method": "POST", + "requestUri": "/path" + }, + "name": "OperationName" + }, + "params": { + "RecursiveStruct": { + "RecursiveStruct": { + "NoRecurse": "foo" + } + } + }, + "serialized": { + "uri": "/path", + "headers": {}, + "body": "{\"RecursiveStruct\": {\"RecursiveStruct\": {\"NoRecurse\": \"foo\"}}}" + } + }, + { + "given": { + "input": { + "shape": "InputShape" + }, + "http": { + "method": "POST", + "requestUri": "/path" + }, + "name": "OperationName" + }, + "params": { + "RecursiveStruct": { + "RecursiveStruct": { + "RecursiveStruct": { + "RecursiveStruct": { + "NoRecurse": "foo" + } + } + } + } + }, + "serialized": { + "uri": "/path", + "headers": {}, + "body": "{\"RecursiveStruct\": {\"RecursiveStruct\": {\"RecursiveStruct\": {\"RecursiveStruct\": {\"NoRecurse\": \"foo\"}}}}}" + } + }, + { + "given": { + "input": { + "shape": "InputShape" + }, + "http": { + "method": "POST", + "requestUri": "/path" + }, + "name": "OperationName" + }, + "params": { + "RecursiveStruct": { + "RecursiveList": [ + { + "NoRecurse": "foo" + }, + { + "NoRecurse": "bar" + } + ] + } + }, + "serialized": { + "uri": "/path", + "headers": {}, + "body": "{\"RecursiveStruct\": {\"RecursiveList\": [{\"NoRecurse\": \"foo\"}, {\"NoRecurse\": \"bar\"}]}}" + } + }, + { + "given": { + "input": { + "shape": "InputShape" + }, + "http": { + "method": "POST", + "requestUri": "/path" + }, + "name": "OperationName" + }, + "params": { + "RecursiveStruct": { + "RecursiveList": [ + { + "NoRecurse": "foo" + }, + { + "RecursiveStruct": { + "NoRecurse": "bar" + } + } + ] + } + }, + "serialized": { + "uri": "/path", + "headers": {}, + "body": "{\"RecursiveStruct\": {\"RecursiveList\": [{\"NoRecurse\": \"foo\"}, {\"RecursiveStruct\": {\"NoRecurse\": \"bar\"}}]}}" + } + }, + { + "given": { + "input": { + "shape": "InputShape" + }, + "http": { + "method": "POST", + "requestUri": "/path" + }, + "name": "OperationName" + }, + "params": { + "RecursiveStruct": { + "RecursiveMap": { + "foo": { + "NoRecurse": "foo" + }, + "bar": { + "NoRecurse": "bar" + } + } + } + }, + "serialized": { + "uri": "/path", + "headers": {}, + "body": "{\"RecursiveStruct\": {\"RecursiveMap\": {\"foo\": {\"NoRecurse\": \"foo\"}, \"bar\": {\"NoRecurse\": \"bar\"}}}}" + } + } + ] + }, + { + "description": "Timestamp values", + "metadata": { + "protocol": "rest-json", + "apiVersion": "2014-01-01" + }, + "shapes": { + "InputShape": { + "type": "structure", + "members": { + "TimeArg": { + "shape": "TimestampType" + }, + "TimeArgInHeader": { + "shape": "TimestampType", + "location": "header", + "locationName": "x-amz-timearg" + } + } + }, + "TimestampType": { + "type": "timestamp" + } + }, + "cases": [ + { + "given": { + "input": { + "shape": "InputShape" + }, + "http": { + "method": "POST", + "requestUri": "/path" + }, + "name": "OperationName" + }, + "params": { + "TimeArg": 1422172800 + }, + "serialized": { + "uri": "/path", + "headers": {}, + "body": "{\"TimeArg\": 1422172800}" + } + }, + { + "given": { + "input": { + "shape": "InputShape" + }, + "http": { + "method": "POST", + "requestUri": "/path" + }, + "name": "OperationName" + }, + "params": { + "TimeArgInHeader": 1422172800 + }, + "serialized": { + "uri": "/path", + "headers": {"x-amz-timearg": "Sun, 25 Jan 2015 08:00:00 GMT"}, + "body": "" + } + } + ] + }, + { + "description": "Named locations in JSON body", + "metadata": { + "protocol": "rest-json", + "apiVersion": "2014-01-01" + }, + "shapes": { + "InputShape": { + "type": "structure", + "members": { + "TimeArg": { + "shape": "TimestampType", + "locationName": "timestamp_location" + } + } + }, + "TimestampType": { + "type": "timestamp" + } + }, + "cases": [ + { + "given": { + "input": { + "shape": "InputShape" + }, + "http": { + "method": "POST", + "requestUri": "/path" + }, + "name": "OperationName" + }, + "params": { + "TimeArg": 1422172800 + }, + "serialized": { + "uri": "/path", + "headers": {}, + "body": "{\"timestamp_location\": 1422172800}" + } + } + ] + }, + { + "description": "String payload", + "metadata": { + "protocol": "rest-json", + "apiVersion": "2014-01-01" + }, + "shapes": { + "InputShape": { + "type": "structure", + "members": { + "foo": { + "shape": "FooShape" + } + } + }, + "FooShape": { + "type": "string" + } + }, + "cases": [ + { + "given": { + "http": { + "method": "POST", + "requestUri": "/" + }, + "input": { + "shape": "InputShape", + "payload": "foo" + }, + "name": "OperationName" + }, + "params": { + "foo": "bar" + }, + "serialized": { + "method": "POST", + "body": "bar", + "uri": "/" + } + } + ] + }, + { + "description": "Idempotency token auto fill", + "metadata": { + "protocol": "rest-json", + "apiVersion": "2014-01-01" + }, + "shapes": { + "InputShape": { + "type": "structure", + "members": { + "Token": { + "shape": "StringType", + "idempotencyToken": true + } + } + }, + "StringType": { + "type": "string" + } + }, + "cases": [ + { + "given": { + "input": { + "shape": "InputShape" + }, + "http": { + "method": "POST", + "requestUri": "/path" + }, + "name": "OperationName" + }, + "params": { + "Token": "abc123" + }, + "serialized": { + "uri": "/path", + "headers": {}, + "body": "{\"Token\": \"abc123\"}" + } + }, + { + "given": { + "input": { + "shape": "InputShape" + }, + "http": { + "method": "POST", + "requestUri": "/path" + }, + "name": "OperationName" + }, + "params": { + }, + "serialized": { + "uri": "/path", + "headers": {}, + "body": "{\"Token\": \"00000000-0000-4000-8000-000000000000\"}" + } + } + ] + } +] diff --git a/vendor/github.com/aws/aws-sdk-go/models/protocol_tests/input/rest-xml.json b/vendor/github.com/aws/aws-sdk-go/models/protocol_tests/input/rest-xml.json new file mode 100644 index 000000000..ad3f30441 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/protocol_tests/input/rest-xml.json @@ -0,0 +1,1633 @@ +[ + { + "description": "Basic XML serialization", + "metadata": { + "protocol": "rest-xml", + "apiVersion": "2014-01-01" + }, + "shapes": { + "InputShape": { + "type": "structure", + "members": { + "Name": { + "shape": "StringType" + }, + "Description": { + "shape": "StringType" + } + } + }, + "StringType": { + "type": "string" + } + }, + "cases": [ + { + "given": { + "http": { + "method": "POST", + "requestUri": "/2014-01-01/hostedzone" + }, + "input": { + "shape": "InputShape", + "locationName": "OperationRequest", + "xmlNamespace": {"uri": "https://foo/"} + }, + "name": "OperationName" + }, + "params": { + "Name": "foo", + "Description": "bar" + }, + "serialized": { + "method": "POST", + "body": "foobar", + "uri": "/2014-01-01/hostedzone", + "headers": {} + } + }, + { + "given": { + "http": { + "method": "PUT", + "requestUri": "/2014-01-01/hostedzone" + }, + "input": { + "shape": "InputShape", + "locationName": "OperationRequest", + "xmlNamespace": {"uri": "https://foo/"} + }, + "name": "OperationName" + }, + "params": { + "Name": "foo", + "Description": "bar" + }, + "serialized": { + "method": "PUT", + "body": "foobar", + "uri": "/2014-01-01/hostedzone", + "headers": {} + } + }, + { + "given": { + "http": { + "method": "GET", + "requestUri": "/2014-01-01/hostedzone" + }, + "name": "OperationName" + }, + "params": {}, + "serialized": { + "method": "GET", + "body": "", + "uri": "/2014-01-01/hostedzone", + "headers": {} + } + } + ] + }, + { + "description": "Serialize other scalar types", + "metadata": { + "protocol": "rest-xml", + "apiVersion": "2014-01-01" + }, + "shapes": { + "InputShape": { + "type": "structure", + "members": { + "First": { + "shape": "BoolType" + }, + "Second": { + "shape": "BoolType" + }, + "Third": { + "shape": "FloatType" + }, + "Fourth": { + "shape": "IntegerType" + } + } + }, + "BoolType": { + "type": "boolean" + }, + "FloatType": { + "type": "float" + }, + "IntegerType": { + "type": "integer" + } + }, + "cases": [ + { + "given": { + "http": { + "method": "POST", + "requestUri": "/2014-01-01/hostedzone" + }, + "input": { + "shape": "InputShape", + "locationName": "OperationRequest", + "xmlNamespace": {"uri": "https://foo/"} + }, + "name": "OperationName" + }, + "params": { + "First": true, + "Second": false, + "Third": 1.2, + "Fourth": 3 + }, + "serialized": { + "method": "POST", + "body": "truefalse1.23", + "uri": "/2014-01-01/hostedzone", + "headers": {} + } + } + ] + }, + { + "description": "Nested structures", + "metadata": { + "protocol": "rest-xml", + "apiVersion": "2014-01-01" + }, + "shapes": { + "InputShape": { + "type": "structure", + "members": { + "SubStructure": { + "shape": "SubStructure" + }, + "Description": { + "shape": "StringType" + } + } + }, + "SubStructure": { + "type": "structure", + "members": { + "Foo": { + "shape": "StringType" + }, + "Bar": { + "shape": "StringType" + } + } + }, + "StringType": { + "type": "string" + } + }, + "cases": [ + { + "given": { + "http": { + "method": "POST", + "requestUri": "/2014-01-01/hostedzone" + }, + "input": { + "shape": "InputShape", + "locationName": "OperationRequest", + "xmlNamespace": {"uri": "https://foo/"} + }, + "name": "OperationName" + }, + "params": { + "SubStructure": { + "Foo": "a", + "Bar": "b" + }, + "Description": "baz" + }, + "serialized": { + "method": "POST", + "body": "abbaz", + "uri": "/2014-01-01/hostedzone", + "headers": {} + } + }, + { + "given": { + "http": { + "method": "POST", + "requestUri": "/2014-01-01/hostedzone" + }, + "input": { + "shape": "InputShape", + "locationName": "OperationRequest", + "xmlNamespace": {"uri": "https://foo/"} + }, + "name": "OperationName" + }, + "params": { + "SubStructure": { + "Foo": "a", + "Bar": null + }, + "Description": "baz" + }, + "serialized": { + "method": "POST", + "body": "abaz", + "uri": "/2014-01-01/hostedzone", + "headers": {} + } + } + ] + }, + { + "description": "Nested structures", + "metadata": { + "protocol": "rest-xml", + "apiVersion": "2014-01-01" + }, + "shapes": { + "InputShape": { + "type": "structure", + "members": { + "SubStructure": { + "shape": "SubStructure" + }, + "Description": { + "shape": "StringType" + } + } + }, + "SubStructure": { + "type": "structure", + "members": { + "Foo": { + "shape": "StringType" + }, + "Bar": { + "shape": "StringType" + } + } + }, + "StringType": { + "type": "string" + } + }, + "cases": [ + { + "given": { + "http": { + "method": "POST", + "requestUri": "/2014-01-01/hostedzone" + }, + "input": { + "shape": "InputShape", + "locationName": "OperationRequest", + "xmlNamespace": {"uri": "https://foo/"} + }, + "name": "OperationName" + }, + "params": { + "SubStructure": {}, + "Description": "baz" + }, + "serialized": { + "method": "POST", + "body": "baz", + "uri": "/2014-01-01/hostedzone", + "headers": {} + } + } + ] + }, + { + "description": "Non flattened lists", + "metadata": { + "protocol": "rest-xml", + "apiVersion": "2014-01-01" + }, + "shapes": { + "InputShape": { + "type": "structure", + "members": { + "ListParam": { + "shape": "ListShape" + } + } + }, + "ListShape": { + "type": "list", + "member": { + "shape": "StringType" + } + }, + "StringType": { + "type": "string" + } + }, + "cases": [ + { + "given": { + "http": { + "method": "POST", + "requestUri": "/2014-01-01/hostedzone" + }, + "input": { + "shape": "InputShape", + "locationName": "OperationRequest", + "xmlNamespace": {"uri": "https://foo/"} + }, + "name": "OperationName" + }, + "params": { + "ListParam": [ + "one", + "two", + "three" + ] + }, + "serialized": { + "method": "POST", + "body": "onetwothree", + "uri": "/2014-01-01/hostedzone", + "headers": {} + } + } + ] + }, + { + "description": "Non flattened lists with locationName", + "metadata": { + "protocol": "rest-xml", + "apiVersion": "2014-01-01" + }, + "shapes": { + "InputShape": { + "type": "structure", + "members": { + "ListParam": { + "shape": "ListShape", + "locationName": "AlternateName" + } + } + }, + "ListShape": { + "type": "list", + "member": { + "shape": "StringType", + "locationName": "NotMember" + } + }, + "StringType": { + "type": "string" + } + }, + "cases": [ + { + "given": { + "http": { + "method": "POST", + "requestUri": "/2014-01-01/hostedzone" + }, + "input": { + "shape": "InputShape", + "locationName": "OperationRequest", + "xmlNamespace": {"uri": "https://foo/"} + }, + "name": "OperationName" + }, + "params": { + "ListParam": [ + "one", + "two", + "three" + ] + }, + "serialized": { + "method": "POST", + "body": "onetwothree", + "uri": "/2014-01-01/hostedzone", + "headers": {} + } + } + ] + }, + { + "description": "Flattened lists", + "metadata": { + "protocol": "rest-xml", + "apiVersion": "2014-01-01" + }, + "shapes": { + "InputShape": { + "type": "structure", + "members": { + "ListParam": { + "shape": "ListShape" + } + } + }, + "ListShape": { + "type": "list", + "member": { + "shape": "StringType" + }, + "flattened": true + }, + "StringType": { + "type": "string" + } + }, + "cases": [ + { + "given": { + "http": { + "method": "POST", + "requestUri": "/2014-01-01/hostedzone" + }, + "input": { + "shape": "InputShape", + "locationName": "OperationRequest", + "xmlNamespace": {"uri": "https://foo/"} + }, + "name": "OperationName" + }, + "params": { + "ListParam": [ + "one", + "two", + "three" + ] + }, + "serialized": { + "method": "POST", + "body": "onetwothree", + "uri": "/2014-01-01/hostedzone", + "headers": {} + } + } + ] + }, + { + "description": "Flattened lists with locationName", + "metadata": { + "protocol": "rest-xml", + "apiVersion": "2014-01-01" + }, + "shapes": { + "InputShape": { + "type": "structure", + "members": { + "ListParam": { + "shape": "ListShape", + "locationName": "item" + } + } + }, + "ListShape": { + "type": "list", + "member": { + "shape": "StringType" + }, + "flattened": true + }, + "StringType": { + "type": "string" + } + }, + "cases": [ + { + "given": { + "http": { + "method": "POST", + "requestUri": "/2014-01-01/hostedzone" + }, + "input": { + "shape": "InputShape", + "locationName": "OperationRequest", + "xmlNamespace": {"uri": "https://foo/"} + }, + "name": "OperationName" + }, + "params": { + "ListParam": [ + "one", + "two", + "three" + ] + }, + "serialized": { + "method": "POST", + "body": "onetwothree", + "uri": "/2014-01-01/hostedzone", + "headers": {} + } + } + ] + }, + { + "description": "List of structures", + "metadata": { + "protocol": "rest-xml", + "apiVersion": "2014-01-01" + }, + "shapes": { + "InputShape": { + "type": "structure", + "members": { + "ListParam": { + "shape": "ListShape", + "locationName": "item" + } + } + }, + "ListShape": { + "type": "list", + "member": { + "shape": "SingleFieldStruct" + }, + "flattened": true + }, + "StringType": { + "type": "string" + }, + "SingleFieldStruct": { + "type": "structure", + "members": { + "Element": { + "shape": "StringType", + "locationName": "value" + } + } + } + }, + "cases": [ + { + "given": { + "http": { + "method": "POST", + "requestUri": "/2014-01-01/hostedzone" + }, + "input": { + "shape": "InputShape", + "locationName": "OperationRequest", + "xmlNamespace": {"uri": "https://foo/"} + }, + "name": "OperationName" + }, + "params": { + "ListParam": [ + { + "Element": "one" + }, + { + "Element": "two" + }, + { + "Element": "three" + } + ] + }, + "serialized": { + "method": "POST", + "body": "onetwothree", + "uri": "/2014-01-01/hostedzone", + "headers": {} + } + } + ] + }, + { + "description": "Blob and timestamp shapes", + "metadata": { + "protocol": "rest-xml", + "apiVersion": "2014-01-01" + }, + "shapes": { + "InputShape": { + "type": "structure", + "members": { + "StructureParam": { + "shape": "StructureShape" + } + } + }, + "StructureShape": { + "type": "structure", + "members": { + "t": { + "shape": "TShape" + }, + "b": { + "shape": "BShape" + } + } + }, + "TShape": { + "type": "timestamp" + }, + "BShape": { + "type": "blob" + } + }, + "cases": [ + { + "given": { + "http": { + "method": "POST", + "requestUri": "/2014-01-01/hostedzone" + }, + "input": { + "shape": "InputShape", + "locationName": "OperationRequest", + "xmlNamespace": {"uri": "https://foo/"} + }, + "name": "OperationName" + }, + "params": { + "StructureParam": { + "t": 1422172800, + "b": "foo" + } + }, + "serialized": { + "method": "POST", + "body": "2015-01-25T08:00:00ZZm9v", + "uri": "/2014-01-01/hostedzone", + "headers": {} + } + } + ] + }, + { + "description": "Header maps", + "metadata": { + "protocol": "rest-xml", + "apiVersion": "2014-01-01" + }, + "shapes": { + "InputShape": { + "type": "structure", + "members": { + "foo": { + "shape": "FooShape" + } + } + }, + "FooShape": { + "type": "map", + "location": "headers", + "locationName": "x-foo-", + "key": { + "shape": "FooKeyValue" + }, + "value": { + "shape": "FooKeyValue" + } + }, + "FooKeyValue": { + "type": "string" + } + }, + "cases": [ + { + "given": { + "http": { + "method": "POST", + "requestUri": "/" + }, + "input": { + "shape": "InputShape", + "locationName": "OperationRequest", + "xmlNamespace": {"uri": "https://foo/"} + }, + "name": "OperationName" + }, + "params": { + "foo": { + "a": "b", + "c": "d" + } + }, + "serialized": { + "method": "POST", + "body": "", + "uri": "/", + "headers": { + "x-foo-a": "b", + "x-foo-c": "d" + } + } + } + ] + }, + { + "description": "Querystring list of strings", + "metadata": { + "protocol": "rest-xml", + "apiVersion": "2014-01-01" + }, + "shapes": { + "InputShape": { + "type": "structure", + "members": { + "Items": { + "shape": "StringList", + "location": "querystring", + "locationName": "item" + } + } + }, + "StringList": { + "type": "list", + "member": { + "shape": "String" + } + }, + "String": { + "type": "string" + } + }, + "cases": [ + { + "given": { + "http": { + "method": "GET", + "requestUri": "/path" + }, + "input": { + "shape": "InputShape" + }, + "name": "OperationName" + }, + "params": { + "Items": ["value1", "value2"] + }, + "serialized": { + "body": "", + "uri": "/path?item=value1&item=value2", + "headers": {} + } + } + ] + }, + { + "description": "String to string maps in querystring", + "metadata": { + "protocol": "rest-xml", + "apiVersion": "2014-01-01" + }, + "shapes": { + "InputShape": { + "type": "structure", + "members": { + "PipelineId": { + "shape": "StringType", + "location": "uri" + }, + "QueryDoc": { + "shape": "MapStringStringType", + "location": "querystring" + } + } + }, + "MapStringStringType": { + "type": "map", + "key": { + "shape": "StringType" + }, + "value": { + "shape": "StringType" + } + }, + "StringType": { + "type": "string" + } + }, + "cases": [ + { + "given": { + "http": { + "method": "GET", + "requestUri": "/2014-01-01/jobsByPipeline/{PipelineId}" + }, + "input": { + "shape": "InputShape" + }, + "name": "OperationName" + }, + "params": { + "PipelineId": "foo", + "QueryDoc": { + "bar": "baz", + "fizz": "buzz" + } + }, + "serialized": { + "body": "", + "uri": "/2014-01-01/jobsByPipeline/foo?bar=baz&fizz=buzz", + "headers": {} + } + } + ] + }, + { + "description": "String to string list maps in querystring", + "metadata": { + "protocol": "rest-xml", + "apiVersion": "2014-01-01" + }, + "shapes": { + "InputShape": { + "type": "structure", + "members": { + "PipelineId": { + "shape": "StringType", + "location": "uri" + }, + "QueryDoc": { + "shape": "MapStringStringListType", + "location": "querystring" + } + } + }, + "MapStringStringListType": { + "type": "map", + "key": { + "shape": "StringType" + }, + "value": { + "shape": "StringListType" + } + }, + "StringListType": { + "type": "list", + "member": { + "shape": "StringType" + } + }, + "StringType": { + "type": "string" + } + }, + "cases": [ + { + "given": { + "http": { + "method": "GET", + "requestUri": "/2014-01-01/jobsByPipeline/{PipelineId}" + }, + "input": { + "shape": "InputShape" + }, + "name": "OperationName" + }, + "params": { + "PipelineId": "id", + "QueryDoc": { + "foo": ["bar", "baz"], + "fizz": ["buzz", "pop"] + } + }, + "serialized": { + "body": "", + "uri": "/2014-01-01/jobsByPipeline/id?foo=bar&foo=baz&fizz=buzz&fizz=pop", + "headers": {} + } + } + ] + }, + + { + "description": "String payload", + "metadata": { + "protocol": "rest-xml", + "apiVersion": "2014-01-01" + }, + "shapes": { + "InputShape": { + "type": "structure", + "members": { + "foo": { + "shape": "FooShape" + } + }, + "payload": "foo" + }, + "FooShape": { + "type": "string" + } + }, + "cases": [ + { + "given": { + "http": { + "method": "POST", + "requestUri": "/" + }, + "input": { + "shape": "InputShape" + }, + "name": "OperationName" + }, + "params": { + "foo": "bar" + }, + "serialized": { + "method": "POST", + "body": "bar", + "uri": "/" + } + } + ] + }, + { + "description": "Blob payload", + "metadata": { + "protocol": "rest-xml", + "apiVersion": "2014-01-01" + }, + "shapes": { + "InputShape": { + "type": "structure", + "members": { + "foo": { + "shape": "FooShape" + } + }, + "payload": "foo" + }, + "FooShape": { + "type": "blob" + } + }, + "cases": [ + { + "given": { + "http": { + "method": "POST", + "requestUri": "/" + }, + "input": { + "shape": "InputShape" + }, + "name": "OperationName" + }, + "params": { + "foo": "bar" + }, + "serialized": { + "method": "POST", + "body": "bar", + "uri": "/" + } + }, + { + "given": { + "http": { + "method": "POST", + "requestUri": "/" + }, + "input": { + "shape": "InputShape" + }, + "name": "OperationName" + }, + "params": { + }, + "serialized": { + "method": "POST", + "body": "", + "uri": "/" + } + } + ] + }, + { + "description": "Structure payload", + "metadata": { + "protocol": "rest-xml", + "apiVersion": "2014-01-01" + }, + "shapes": { + "InputShape": { + "type": "structure", + "members": { + "foo": { + "shape": "FooShape" + } + }, + "payload": "foo" + }, + "FooShape": { + "locationName": "foo", + "type": "structure", + "members": { + "baz": { + "shape": "BazShape" + } + } + }, + "BazShape": { + "type": "string" + } + }, + "cases": [ + { + "given": { + "http": { + "method": "POST", + "requestUri": "/" + }, + "input": { + "shape": "InputShape" + }, + "name": "OperationName" + }, + "params": { + "foo": { + "baz": "bar" + } + }, + "serialized": { + "method": "POST", + "body": "bar", + "uri": "/" + } + }, + { + "given": { + "http": { + "method": "POST", + "requestUri": "/" + }, + "input": { + "shape": "InputShape" + }, + "name": "OperationName" + }, + "params": {}, + "serialized": { + "method": "POST", + "body": "", + "uri": "/" + } + }, + { + "given": { + "http": { + "method": "POST", + "requestUri": "/" + }, + "input": { + "shape": "InputShape" + }, + "name": "OperationName" + }, + "params": { + "foo": {} + }, + "serialized": { + "method": "POST", + "body": "", + "uri": "/" + } + }, + { + "given": { + "http": { + "method": "POST", + "requestUri": "/" + }, + "input": { + "shape": "InputShape" + }, + "name": "OperationName" + }, + "params": { + "foo": null + }, + "serialized": { + "method": "POST", + "body": "", + "uri": "/" + } + } + ] + }, + { + "description": "XML Attribute", + "metadata": { + "protocol": "rest-xml", + "apiVersion": "2014-01-01" + }, + "shapes": { + "InputShape": { + "type": "structure", + "members": { + "Grant": { + "shape": "Grant" + } + }, + "payload": "Grant" + }, + "Grant": { + "type": "structure", + "locationName": "Grant", + "members": { + "Grantee": { + "shape": "Grantee" + } + } + }, + "Grantee": { + "type": "structure", + "members": { + "Type": { + "shape": "Type", + "locationName": "xsi:type", + "xmlAttribute": true + }, + "EmailAddress": { + "shape": "StringType" + } + }, + "xmlNamespace": { + "prefix": "xsi", + "uri":"http://www.w3.org/2001/XMLSchema-instance" + } + }, + "Type": { + "type": "string" + }, + "StringType": { + "type": "string" + } + }, + "cases": [ + { + "given": { + "http": { + "method": "POST", + "requestUri": "/" + }, + "input": { + "shape": "InputShape" + }, + "name": "OperationName" + }, + "params": { + "Grant": { + "Grantee": { + "EmailAddress": "foo@example.com", + "Type": "CanonicalUser" + } + } + }, + "serialized": { + "method": "POST", + "body": "foo@example.com", + "uri": "/" + } + } + ] + }, + { + "description": "Greedy keys", + "metadata": { + "protocol": "rest-xml", + "apiVersion": "2014-01-01" + }, + "shapes": { + "InputShape": { + "type": "structure", + "members": { + "Bucket": { + "shape": "BucketShape", + "location": "uri" + }, + "Key": { + "shape": "KeyShape", + "location": "uri" + } + } + }, + "BucketShape": { + "type": "string" + }, + "KeyShape": { + "type": "string" + } + }, + "cases": [ + { + "given": { + "http": { + "method": "GET", + "requestUri": "/{Bucket}/{Key+}" + }, + "input": { + "shape": "InputShape" + }, + "name": "OperationName" + }, + "params": { + "Key": "testing /123", + "Bucket": "my/bucket" + }, + "serialized": { + "method": "GET", + "body": "", + "uri": "/my%2Fbucket/testing%20/123" + } + } + ] + }, + { + "description": "Omits null query params, but serializes empty strings", + "metadata": { + "protocol": "rest-xml", + "apiVersion": "2014-01-01" + }, + "shapes": { + "InputShape": { + "type": "structure", + "members": { + "foo": { + "location":"querystring", + "locationName":"param-name", + "shape": "Foo" + } + } + }, + "Foo": { + "type": "string" + } + }, + "cases": [ + { + "given": { + "name": "OperationName", + "http": { + "method": "POST", + "requestUri": "/path" + }, + "input": { "shape": "InputShape" } + }, + "params": { "foo": null }, + "serialized": { + "method": "POST", + "body": "", + "uri": "/path" + } + }, + { + "given": { + "name": "OperationName", + "http": { + "method": "POST", + "requestUri": "/path?abc=mno" + }, + "input": { "shape": "InputShape" } + }, + "params": { "foo": "" }, + "serialized": { + "method": "POST", + "body": "", + "uri": "/path?abc=mno¶m-name=" + } + } + ] + }, + { + "description": "Recursive shapes", + "metadata": { + "protocol": "rest-xml", + "apiVersion": "2014-01-01" + }, + "shapes": { + "InputShape": { + "type": "structure", + "members": { + "RecursiveStruct": { + "shape": "RecursiveStructType" + } + } + }, + "RecursiveStructType": { + "type": "structure", + "members": { + "NoRecurse": { + "shape": "StringType" + }, + "RecursiveStruct": { + "shape": "RecursiveStructType" + }, + "RecursiveList": { + "shape": "RecursiveListType" + }, + "RecursiveMap": { + "shape": "RecursiveMapType" + } + } + }, + "RecursiveListType": { + "type": "list", + "member": { + "shape": "RecursiveStructType" + } + }, + "RecursiveMapType": { + "type": "map", + "key": { + "shape": "StringType" + }, + "value": { + "shape": "RecursiveStructType" + } + }, + "StringType": { + "type": "string" + } + }, + "cases": [ + { + "given": { + "input": { + "shape": "InputShape", + "locationName": "OperationRequest", + "xmlNamespace": {"uri": "https://foo/"} + }, + "http": { + "method": "POST", + "requestUri": "/path" + }, + "name": "OperationName" + }, + "params": { + "RecursiveStruct": { + "NoRecurse": "foo" + } + }, + "serialized": { + "uri": "/path", + "body": "foo" + } + }, + { + "given": { + "input": { + "shape": "InputShape", + "locationName": "OperationRequest", + "xmlNamespace": {"uri": "https://foo/"} + }, + "http": { + "method": "POST", + "requestUri": "/path" + }, + "name": "OperationName" + }, + "params": { + "RecursiveStruct": { + "RecursiveStruct": { + "NoRecurse": "foo" + } + } + }, + "serialized": { + "uri": "/path", + "body": "foo" + } + }, + { + "given": { + "input": { + "shape": "InputShape", + "locationName": "OperationRequest", + "xmlNamespace": {"uri": "https://foo/"} + }, + "http": { + "method": "POST", + "requestUri": "/path" + }, + "name": "OperationName" + }, + "params": { + "RecursiveStruct": { + "RecursiveStruct": { + "RecursiveStruct": { + "RecursiveStruct": { + "NoRecurse": "foo" + } + } + } + } + }, + "serialized": { + "uri": "/path", + "body": "foo" + } + }, + { + "given": { + "input": { + "shape": "InputShape", + "locationName": "OperationRequest", + "xmlNamespace": {"uri": "https://foo/"} + }, + "http": { + "method": "POST", + "requestUri": "/path" + }, + "name": "OperationName" + }, + "params": { + "RecursiveStruct": { + "RecursiveList": [ + { + "NoRecurse": "foo" + }, + { + "NoRecurse": "bar" + } + ] + } + }, + "serialized": { + "uri": "/path", + "body": "foobar" + } + }, + { + "given": { + "input": { + "shape": "InputShape", + "locationName": "OperationRequest", + "xmlNamespace": {"uri": "https://foo/"} + }, + "http": { + "method": "POST", + "requestUri": "/path" + }, + "name": "OperationName" + }, + "params": { + "RecursiveStruct": { + "RecursiveList": [ + { + "NoRecurse": "foo" + }, + { + "RecursiveStruct": { + "NoRecurse": "bar" + } + } + ] + } + }, + "serialized": { + "uri": "/path", + "body": "foobar" + } + }, + { + "given": { + "input": { + "shape": "InputShape", + "locationName": "OperationRequest", + "xmlNamespace": {"uri": "https://foo/"} + }, + "http": { + "method": "POST", + "requestUri": "/path" + }, + "name": "OperationName" + }, + "params": { + "RecursiveStruct": { + "RecursiveMap": { + "foo": { + "NoRecurse": "foo" + }, + "bar": { + "NoRecurse": "bar" + } + } + } + }, + "serialized": { + "uri": "/path", + "body": "foofoobarbar" + } + } + ] + }, + { + "description": "Timestamp in header", + "metadata": { + "protocol": "rest-xml", + "apiVersion": "2014-01-01" + }, + "shapes": { + "InputShape": { + "type": "structure", + "members": { + "TimeArgInHeader": { + "shape": "TimestampType", + "location": "header", + "locationName": "x-amz-timearg" + } + } + }, + "TimestampType": { + "type": "timestamp" + } + }, + "cases": [ + { + "given": { + "input": { + "shape": "InputShape" + }, + "http": { + "method": "POST", + "requestUri": "/path" + }, + "name": "OperationName" + }, + "params": { + "TimeArgInHeader": 1422172800 + }, + "serialized": { + "method": "POST", + "body": "", + "uri": "/path", + "headers": {"x-amz-timearg": "Sun, 25 Jan 2015 08:00:00 GMT"} + } + } + ] + }, + { + "description": "Idempotency token auto fill", + "metadata": { + "protocol": "rest-xml", + "apiVersion": "2014-01-01" + }, + "shapes": { + "InputShape": { + "type": "structure", + "members": { + "Token": { + "shape": "StringType", + "idempotencyToken": true + } + } + }, + "StringType": { + "type": "string" + } + }, + "cases": [ + { + "given": { + "input": { + "shape": "InputShape" + }, + "http": { + "method": "POST", + "requestUri": "/path" + }, + "name": "OperationName" + }, + "params": { + "Token": "abc123" + }, + "serialized": { + "uri": "/path", + "headers": {}, + "body": "abc123" + } + }, + { + "given": { + "input": { + "shape": "InputShape" + }, + "http": { + "method": "POST", + "requestUri": "/path" + }, + "name": "OperationName" + }, + "params": { + }, + "serialized": { + "uri": "/path", + "headers": {}, + "body": "00000000-0000-4000-8000-000000000000" + } + } + ] + } +] diff --git a/vendor/github.com/aws/aws-sdk-go/models/protocol_tests/output/ec2.json b/vendor/github.com/aws/aws-sdk-go/models/protocol_tests/output/ec2.json new file mode 100644 index 000000000..5b76bf5fd --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/protocol_tests/output/ec2.json @@ -0,0 +1,454 @@ +[ + { + "description": "Scalar members", + "metadata": { + "protocol": "ec2" + }, + "shapes": { + "OutputShape": { + "type": "structure", + "members": { + "Str": { + "shape": "StringType" + }, + "Num": { + "shape": "IntegerType", + "locationName": "FooNum" + }, + "FalseBool": { + "shape": "BooleanType" + }, + "TrueBool": { + "shape": "BooleanType" + }, + "Float": { + "shape": "FloatType" + }, + "Double": { + "shape": "DoubleType" + }, + "Long": { + "shape": "LongType" + }, + "Char": { + "shape": "CharType" + } + } + }, + "StringType": { + "type": "string" + }, + "IntegerType": { + "type": "integer" + }, + "BooleanType": { + "type": "boolean" + }, + "FloatType": { + "type": "float" + }, + "DoubleType": { + "type": "double" + }, + "LongType": { + "type": "long" + }, + "CharType": { + "type": "character" + } + }, + "cases": [ + { + "given": { + "output": { + "shape": "OutputShape" + }, + "name": "OperationName" + }, + "result": { + "Str": "myname", + "Num": 123, + "FalseBool": false, + "TrueBool": true, + "Float": 1.2, + "Double": 1.3, + "Long": 200, + "Char": "a" + }, + "response": { + "status_code": 200, + "headers": {}, + "body": "myname123falsetrue1.21.3200arequest-id" + } + } + ] + }, + { + "description": "Blob", + "metadata": { + "protocol": "ec2" + }, + "shapes": { + "OutputShape": { + "type": "structure", + "members": { + "Blob": { + "shape": "BlobType" + } + } + }, + "BlobType": { + "type": "blob" + } + }, + "cases": [ + { + "given": { + "output": { + "shape": "OutputShape" + }, + "name": "OperationName" + }, + "result": { + "Blob": "value" + }, + "response": { + "status_code": 200, + "headers": {}, + "body": "dmFsdWU=requestid" + } + } + ] + }, + { + "description": "Lists", + "metadata": { + "protocol": "ec2" + }, + "shapes": { + "OutputShape": { + "type": "structure", + "members": { + "ListMember": { + "shape": "ListShape" + } + } + }, + "ListShape": { + "type": "list", + "member": { + "shape": "StringType" + } + }, + "StringType": { + "type": "string" + } + }, + "cases": [ + { + "given": { + "output": { + "shape": "OutputShape" + }, + "name": "OperationName" + }, + "result": { + "ListMember": ["abc", "123"] + }, + "response": { + "status_code": 200, + "headers": {}, + "body": "abc123requestid" + } + } + ] + }, + { + "description": "List with custom member name", + "metadata": { + "protocol": "ec2" + }, + "shapes": { + "OutputShape": { + "type": "structure", + "members": { + "ListMember": { + "shape": "ListShape" + } + } + }, + "ListShape": { + "type": "list", + "member": { + "shape": "StringType", + "locationName": "item" + } + }, + "StringType": { + "type": "string" + } + }, + "cases": [ + { + "given": { + "output": { + "shape": "OutputShape" + }, + "name": "OperationName" + }, + "result": { + "ListMember": ["abc", "123"] + }, + "response": { + "status_code": 200, + "headers": {}, + "body": "abc123requestid" + } + } + ] + }, + { + "description": "Flattened List", + "metadata": { + "protocol": "ec2" + }, + "shapes": { + "OutputShape": { + "type": "structure", + "members": { + "ListMember": { + "shape": "ListType", + "flattened": true + } + } + }, + "ListType": { + "type": "list", + "member": { + "shape": "StringType" + } + }, + "StringType": { + "type": "string" + } + }, + "cases": [ + { + "given": { + "output": { + "shape": "OutputShape" + }, + "name": "OperationName" + }, + "result": { + "ListMember": ["abc", "123"] + }, + "response": { + "status_code": 200, + "headers": {}, + "body": "abc123requestid" + } + } + ] + }, + { + "description": "Normal map", + "metadata": { + "protocol": "ec2" + }, + "shapes": { + "OutputShape": { + "type": "structure", + "members": { + "Map": { + "shape": "MapType" + } + } + }, + "MapType": { + "type": "map", + "key": { + "shape": "StringType" + }, + "value": { + "shape": "StructureType" + } + }, + "StructureType": { + "type": "structure", + "members": { + "foo": { + "shape": "StringType" + } + } + }, + "StringType": { + "type": "string" + } + }, + "cases": [ + { + "given": { + "output": { + "shape": "OutputShape" + }, + "name": "OperationName" + }, + "result": { + "Map": { + "qux": { + "foo": "bar" + }, + "baz": { + "foo": "bam" + } + } + }, + "response": { + "status_code": 200, + "headers": {}, + "body": "quxbarbazbamrequestid" + } + } + ] + }, + { + "description": "Flattened map", + "metadata": { + "protocol": "ec2" + }, + "shapes": { + "OutputShape": { + "type": "structure", + "members": { + "Map": { + "shape": "MapType", + "flattened": true + } + } + }, + "MapType": { + "type": "map", + "key": { + "shape": "StringType" + }, + "value": { + "shape": "StringType" + } + }, + "StringType": { + "type": "string" + } + }, + "cases": [ + { + "given": { + "output": { + "shape": "OutputShape" + }, + "name": "OperationName" + }, + "result": { + "Map": { + "qux": "bar", + "baz": "bam" + } + }, + "response": { + "status_code": 200, + "headers": {}, + "body": "quxbarbazbamrequestid" + } + } + ] + }, + { + "description": "Named map", + "metadata": { + "protocol": "ec2" + }, + "shapes": { + "OutputShape": { + "type": "structure", + "members": { + "Map": { + "shape": "MapType", + "flattened": true + } + } + }, + "MapType": { + "type": "map", + "key": { + "shape": "StringType", + "locationName": "foo" + }, + "value": { + "shape": "StringType", + "locationName": "bar" + } + }, + "StringType": { + "type": "string" + } + }, + "cases": [ + { + "given": { + "output": { + "shape": "OutputShape" + }, + "name": "OperationName" + }, + "result": { + "Map": { + "qux": "bar", + "baz": "bam" + } + }, + "response": { + "status_code": 200, + "headers": {}, + "body": "quxbarbazbamrequestid" + } + } + ] + }, + { + "description": "Empty string", + "metadata": { + "protocol": "ec2" + }, + "shapes": { + "OutputShape": { + "type": "structure", + "members": { + "Foo": { + "shape": "StringType" + } + } + }, + "StringType": { + "type": "string" + } + }, + "cases": [ + { + "given": { + "output": { + "shape": "OutputShape" + }, + "name": "OperationName" + }, + "result": { + "Foo": "" + }, + "response": { + "status_code": 200, + "headers": {}, + "body": "requestid" + } + } + ] + } +] diff --git a/vendor/github.com/aws/aws-sdk-go/models/protocol_tests/output/json.json b/vendor/github.com/aws/aws-sdk-go/models/protocol_tests/output/json.json new file mode 100644 index 000000000..17777f5de --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/protocol_tests/output/json.json @@ -0,0 +1,369 @@ +[ + { + "description": "Scalar members", + "metadata": { + "protocol": "json" + }, + "shapes": { + "OutputShape": { + "type": "structure", + "members": { + "Str": { + "shape": "StringType" + }, + "Num": { + "shape": "IntegerType" + }, + "FalseBool": { + "shape": "BooleanType" + }, + "TrueBool": { + "shape": "BooleanType" + }, + "Float": { + "shape": "FloatType" + }, + "Double": { + "shape": "DoubleType" + }, + "Long": { + "shape": "LongType" + }, + "Char": { + "shape": "CharType" + } + } + }, + "StringType": { + "type": "string" + }, + "IntegerType": { + "type": "integer" + }, + "BooleanType": { + "type": "boolean" + }, + "FloatType": { + "type": "float" + }, + "DoubleType": { + "type": "double" + }, + "LongType": { + "type": "long" + }, + "CharType": { + "type": "character" + } + }, + "cases": [ + { + "given": { + "output": { + "shape": "OutputShape" + }, + "name": "OperationName" + }, + "result": { + "Str": "myname", + "Num": 123, + "FalseBool": false, + "TrueBool": true, + "Float": 1.2, + "Double": 1.3, + "Long": 200, + "Char": "a" + }, + "response": { + "status_code": 200, + "headers": {}, + "body": "{\"Str\": \"myname\", \"Num\": 123, \"FalseBool\": false, \"TrueBool\": true, \"Float\": 1.2, \"Double\": 1.3, \"Long\": 200, \"Char\": \"a\"}" + } + } + ] + }, + { + "description": "Blob members", + "metadata": { + "protocol": "json" + }, + "shapes": { + "OutputShape": { + "type": "structure", + "members": { + "BlobMember": { + "shape": "BlobType" + }, + "StructMember": { + "shape": "BlobContainer" + } + } + }, + "BlobType": { + "type": "blob" + }, + "BlobContainer": { + "type": "structure", + "members": { + "foo": { + "shape": "BlobType" + } + } + } + }, + "cases": [ + { + "given": { + "output": { + "shape": "OutputShape" + }, + "name": "OperationName" + }, + "result": { + "BlobMember": "hi!", + "StructMember": { + "foo": "there!" + } + }, + "response": { + "status_code": 200, + "headers": {}, + "body": "{\"BlobMember\": \"aGkh\", \"StructMember\": {\"foo\": \"dGhlcmUh\"}}" + } + } + ] + }, + { + "description": "Timestamp members", + "metadata": { + "protocol": "json" + }, + "shapes": { + "OutputShape": { + "type": "structure", + "members": { + "TimeMember": { + "shape": "TimeType" + }, + "StructMember": { + "shape": "TimeContainer" + } + } + }, + "TimeType": { + "type": "timestamp" + }, + "TimeContainer": { + "type": "structure", + "members": { + "foo": { + "shape": "TimeType" + } + } + } + }, + "cases": [ + { + "given": { + "output": { + "shape": "OutputShape" + }, + "name": "OperationName" + }, + "result": { + "TimeMember": 1398796238, + "StructMember": { + "foo": 1398796238 + } + }, + "response": { + "status_code": 200, + "headers": {}, + "body": "{\"TimeMember\": 1398796238, \"StructMember\": {\"foo\": 1398796238}}" + } + } + ] + }, + { + "description": "Lists", + "metadata": { + "protocol": "json" + }, + "shapes": { + "OutputShape": { + "type": "structure", + "members": { + "ListMember": { + "shape": "ListType" + }, + "ListMemberMap": { + "shape": "ListTypeMap" + }, + "ListMemberStruct": { + "shape": "ListTypeStruct" + } + } + }, + "ListType": { + "type": "list", + "member": { + "shape": "StringType" + } + }, + "ListTypeMap": { + "type": "list", + "member": { + "shape": "MapType" + } + }, + "ListTypeStruct": { + "type": "list", + "member": { + "shape": "StructType" + } + }, + "StringType": { + "type": "string" + }, + "StructType": { + "type": "structure", + "members": { + } + }, + "MapType": { + "type": "map", + "key": { "shape": "StringType" }, + "value": { "shape": "StringType" } + } + }, + "cases": [ + { + "given": { + "output": { + "shape": "OutputShape" + }, + "name": "OperationName" + }, + "result": { + "ListMember": ["a", "b"] + }, + "response": { + "status_code": 200, + "headers": {}, + "body": "{\"ListMember\": [\"a\", \"b\"]}" + } + }, + { + "given": { + "output": { + "shape": "OutputShape" + }, + "name": "OperationName" + }, + "result": { + "ListMember": ["a", null], + "ListMemberMap": [{}, null, null, {}], + "ListMemberStruct": [{}, null, null, {}] + }, + "response": { + "status_code": 200, + "headers": {}, + "body": "{\"ListMember\": [\"a\", null], \"ListMemberMap\": [{}, null, null, {}], \"ListMemberStruct\": [{}, null, null, {}]}" + } + } + ] + }, + { + "description": "Maps", + "metadata": { + "protocol": "json" + }, + "shapes": { + "OutputShape": { + "type": "structure", + "members": { + "MapMember": { + "shape": "MapType" + } + } + }, + "MapType": { + "type": "map", + "key": { + "shape": "StringType" + }, + "value": { + "shape": "NumberList" + } + }, + "StringType": { + "type": "string" + }, + "NumberList": { + "type": "list", + "member": { + "shape": "IntegerType" + } + }, + "IntegerType": { + "type": "integer" + } + }, + "cases": [ + { + "given": { + "output": { + "shape": "OutputShape" + }, + "name": "OperationName" + }, + "result": { + "MapMember": { + "a": [1, 2], + "b": [3, 4] + } + }, + "response": { + "status_code": 200, + "headers": {}, + "body": "{\"MapMember\": {\"a\": [1, 2], \"b\": [3, 4]}}" + } + } + ] + }, + { + "description": "Ignores extra data", + "metadata": { + "protocol": "json" + }, + "shapes": { + "OutputShape": { + "type": "structure", + "members": { + "StrType": { + "shape": "StrType" + } + } + }, + "StrType": { + "type": "string" + } + }, + "cases": [ + { + "given": { + "output": { + "shape": "OutputShape" + }, + "name": "OperationName" + }, + "result": {}, + "response": { + "status_code": 200, + "headers": {}, + "body": "{\"foo\": \"bar\"}" + } + } + ] + } +] diff --git a/vendor/github.com/aws/aws-sdk-go/models/protocol_tests/output/query.json b/vendor/github.com/aws/aws-sdk-go/models/protocol_tests/output/query.json new file mode 100644 index 000000000..505ed1122 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/protocol_tests/output/query.json @@ -0,0 +1,775 @@ +[ + { + "description": "Scalar members", + "metadata": { + "protocol": "query" + }, + "shapes": { + "OutputShape": { + "type": "structure", + "members": { + "Str": { + "shape": "StringType" + }, + "Num": { + "shape": "IntegerType", + "locationName": "FooNum" + }, + "FalseBool": { + "shape": "BooleanType" + }, + "TrueBool": { + "shape": "BooleanType" + }, + "Float": { + "shape": "FloatType" + }, + "Double": { + "shape": "DoubleType" + }, + "Long": { + "shape": "LongType" + }, + "Char": { + "shape": "CharType" + }, + "Timestamp": { + "shape": "TimestampType" + } + } + }, + "StringType": { + "type": "string" + }, + "IntegerType": { + "type": "integer" + }, + "BooleanType": { + "type": "boolean" + }, + "FloatType": { + "type": "float" + }, + "DoubleType": { + "type": "double" + }, + "LongType": { + "type": "long" + }, + "CharType": { + "type": "character" + }, + "TimestampType": { + "type": "timestamp" + } + }, + "cases": [ + { + "given": { + "output": { + "resultWrapper": "OperationNameResult", + "shape": "OutputShape" + }, + "name": "OperationName" + }, + "result": { + "Str": "myname", + "Num": 123, + "FalseBool": false, + "TrueBool": true, + "Float": 1.2, + "Double": 1.3, + "Long": 200, + "Char": "a", + "Timestamp": 1422172800 + }, + "response": { + "status_code": 200, + "headers": {}, + "body": "myname123falsetrue1.21.3200a2015-01-25T08:00:00Zrequest-id" + } + } + ] + }, + { + "description": "Not all members in response", + "metadata": { + "protocol": "query" + }, + "shapes": { + "OutputShape": { + "type": "structure", + "members": { + "Str": { + "shape": "StringType" + }, + "Num": { + "shape": "IntegerType" + } + } + }, + "StringType": { + "type": "string" + }, + "IntegerType": { + "type": "integer" + } + }, + "cases": [ + { + "given": { + "output": { + "resultWrapper": "OperationNameResult", + "shape": "OutputShape" + }, + "name": "OperationName" + }, + "result": { + "Str": "myname" + }, + "response": { + "status_code": 200, + "headers": {}, + "body": "mynamerequest-id" + } + } + ] + }, + { + "description": "Blob", + "metadata": { + "protocol": "query" + }, + "shapes": { + "OutputShape": { + "type": "structure", + "members": { + "Blob": { + "shape": "BlobType" + } + } + }, + "BlobType": { + "type": "blob" + } + }, + "cases": [ + { + "given": { + "output": { + "resultWrapper": "OperationNameResult", + "shape": "OutputShape" + }, + "name": "OperationName" + }, + "result": { + "Blob": "value" + }, + "response": { + "status_code": 200, + "headers": {}, + "body": "dmFsdWU=requestid" + } + } + ] + }, + { + "description": "Lists", + "metadata": { + "protocol": "query" + }, + "shapes": { + "OutputShape": { + "type": "structure", + "members": { + "ListMember": { + "shape": "ListShape" + } + } + }, + "ListShape": { + "type": "list", + "member": { + "shape": "StringType" + } + }, + "StringType": { + "type": "string" + } + }, + "cases": [ + { + "given": { + "output": { + "resultWrapper": "OperationNameResult", + "shape": "OutputShape" + }, + "name": "OperationName" + }, + "result": { + "ListMember": ["abc", "123"] + }, + "response": { + "status_code": 200, + "headers": {}, + "body": "abc123requestid" + } + } + ] + }, + { + "description": "List with custom member name", + "metadata": { + "protocol": "query" + }, + "shapes": { + "OutputShape": { + "type": "structure", + "members": { + "ListMember": { + "shape": "ListShape" + } + } + }, + "ListShape": { + "type": "list", + "member": { + "shape": "StringType", + "locationName": "item" + } + }, + "StringType": { + "type": "string" + } + }, + "cases": [ + { + "given": { + "output": { + "resultWrapper": "OperationNameResult", + "shape": "OutputShape" + }, + "name": "OperationName" + }, + "result": { + "ListMember": ["abc", "123"] + }, + "response": { + "status_code": 200, + "headers": {}, + "body": "abc123requestid" + } + } + ] + }, + { + "description": "Flattened List", + "metadata": { + "protocol": "query" + }, + "shapes": { + "OutputShape": { + "type": "structure", + "members": { + "ListMember": { + "shape": "ListType" + } + } + }, + "ListType": { + "type": "list", + "flattened": true, + "member": { + "shape": "StringType" + } + }, + "StringType": { + "type": "string" + } + }, + "cases": [ + { + "given": { + "output": { + "resultWrapper": "OperationNameResult", + "shape": "OutputShape" + }, + "name": "OperationName" + }, + "result": { + "ListMember": ["abc", "123"] + }, + "response": { + "status_code": 200, + "headers": {}, + "body": "abc123requestid" + } + } + ] + }, + { + "description": "Flattened single element list", + "metadata": { + "protocol": "query" + }, + "shapes": { + "OutputShape": { + "type": "structure", + "members": { + "ListMember": { + "shape": "ListType" + } + } + }, + "ListType": { + "type": "list", + "flattened": true, + "member": { + "shape": "StringType" + } + }, + "StringType": { + "type": "string" + } + }, + "cases": [ + { + "given": { + "output": { + "resultWrapper": "OperationNameResult", + "shape": "OutputShape" + }, + "name": "OperationName" + }, + "result": { + "ListMember": ["abc"] + }, + "response": { + "status_code": 200, + "headers": {}, + "body": "abcrequestid" + } + } + ] + }, + { + "description": "List of structures", + "metadata": { + "protocol": "query" + }, + "shapes": { + "OutputShape": { + "type": "structure", + "members": { + "List": { + "shape": "ListOfStructs" + } + } + }, + "ListOfStructs": { + "type": "list", + "member": { + "shape": "StructureShape" + } + }, + "StructureShape": { + "type": "structure", + "members": { + "Foo": { + "shape": "StringShape" + }, + "Bar": { + "shape": "StringShape" + }, + "Baz": { + "shape": "StringShape" + } + } + }, + "StringShape": { + "type": "string" + } + }, + "cases": [ + { + "given": { + "output": { + "resultWrapper": "OperationNameResult", + "shape": "OutputShape" + }, + "name": "OperationName" + }, + "result": { + "List": [{"Foo": "firstfoo", "Bar": "firstbar", "Baz": "firstbaz"}, {"Foo": "secondfoo", "Bar": "secondbar", "Baz": "secondbaz"}] + }, + "response": { + "status_code": 200, + "headers": {}, + "body": "firstfoofirstbarfirstbazsecondfoosecondbarsecondbazrequestid" + } + } + ] + }, + { + "description": "Flattened list of structures", + "metadata": { + "protocol": "query" + }, + "shapes": { + "OutputShape": { + "type": "structure", + "resultWrapper": "OperationNameResult", + "members": { + "List": { + "shape": "ListOfStructs" + } + } + }, + "ListOfStructs": { + "type": "list", + "flattened": true, + "member": { + "shape": "StructureShape" + } + }, + "StructureShape": { + "type": "structure", + "members": { + "Foo": { + "shape": "StringShape" + }, + "Bar": { + "shape": "StringShape" + }, + "Baz": { + "shape": "StringShape" + } + } + }, + "StringShape": { + "type": "string" + } + }, + "cases": [ + { + "given": { + "output": { + "shape": "OutputShape" + }, + "name": "OperationName" + }, + "result": { + "List": [{"Foo": "firstfoo", "Bar": "firstbar", "Baz": "firstbaz"}, {"Foo": "secondfoo", "Bar": "secondbar", "Baz": "secondbaz"}] + }, + "response": { + "status_code": 200, + "headers": {}, + "body": "firstfoofirstbarfirstbazsecondfoosecondbarsecondbazrequestid" + } + } + ] + }, + { + "description": "Flattened list with location name", + "metadata": { + "protocol": "query" + }, + "shapes": { + "OutputShape": { + "type": "structure", + "members": { + "List": { + "shape": "ListType" + } + } + }, + "ListType": { + "type": "list", + "flattened": true, + "member": { + "shape": "StringShape", + "locationName": "NamedList" + } + }, + "StringShape": { + "type": "string" + } + }, + "cases": [ + { + "given": { + "output": { + "resultWrapper": "OperationNameResult", + "shape": "OutputShape" + }, + "name": "OperationName" + }, + "result": { + "List": ["a", "b"] + }, + "response": { + "status_code": 200, + "headers": {}, + "body": "abrequestid" + } + } + ] + }, + { + "description": "Normal map", + "metadata": { + "protocol": "query" + }, + "shapes": { + "OutputShape": { + "type": "structure", + "members": { + "Map": { + "shape": "StringMap" + } + } + }, + "StringMap": { + "type": "map", + "key": { + "shape": "StringType" + }, + "value": { + "shape": "StructType" + } + }, + "StringType": { + "type": "string" + }, + "StructType": { + "type": "structure", + "members": { + "foo": { + "shape": "StringType" + } + } + } + }, + "cases": [ + { + "given": { + "output": { + "resultWrapper": "OperationNameResult", + "shape": "OutputShape" + }, + "name": "OperationName" + }, + "result": { + "Map": { + "qux": { + "foo": "bar" + }, + "baz": { + "foo": "bam" + } + } + }, + "response": { + "status_code": 200, + "headers": {}, + "body": "quxbarbazbamrequestid" + } + } + ] + }, + { + "description": "Flattened map", + "metadata": { + "protocol": "query" + }, + "shapes": { + "OutputShape": { + "type": "structure", + "members": { + "Map": { + "shape": "StringMap", + "flattened": true + } + } + }, + "StringMap": { + "type": "map", + "key": { + "shape": "StringType" + }, + "value": { + "shape": "StringType" + } + }, + "StringType": { + "type": "string" + } + }, + "cases": [ + { + "given": { + "output": { + "resultWrapper": "OperationNameResult", + "shape": "OutputShape" + }, + "name": "OperationName" + }, + "result": { + "Map": { + "qux": "bar", + "baz": "bam" + } + }, + "response": { + "status_code": 200, + "headers": {}, + "body": "quxbarbazbamrequestid" + } + } + ] + }, + { + "description": "Flattened map in shape definition", + "metadata": { + "protocol": "query" + }, + "shapes": { + "OutputShape": { + "type": "structure", + "members": { + "Map": { + "shape": "StringMap", + "locationName": "Attribute" + } + } + }, + "StringMap": { + "type": "map", + "key": { + "shape": "StringType", + "locationName": "Name" + }, + "value": { + "shape": "StringType", + "locationName": "Value" + }, + "flattened": true, + "locationName": "Attribute" + }, + "StringType": { + "type": "string" + } + }, + "cases": [ + { + "given": { + "output": { + "resultWrapper": "OperationNameResult", + "shape": "OutputShape" + }, + "name": "OperationName" + }, + "result": { + "Map": { + "qux": "bar" + } + }, + "response": { + "status_code": 200, + "headers": {}, + "body": "quxbarrequestid" + } + } + ] + }, + { + "description": "Named map", + "metadata": { + "protocol": "query" + }, + "shapes": { + "OutputShape": { + "type": "structure", + "members": { + "Map": { + "shape": "MapType" + } + } + }, + "MapType": { + "type": "map", + "flattened": true, + "key": { + "locationName": "foo", + "shape": "StringType" + }, + "value": { + "locationName": "bar", + "shape": "StringType" + } + }, + "StringType": { + "type": "string" + } + }, + "cases": [ + { + "given": { + "output": { + "resultWrapper": "OperationNameResult", + "shape": "OutputShape" + }, + "name": "OperationName" + }, + "result": { + "Map": { + "qux": "bar", + "baz": "bam" + } + }, + "response": { + "status_code": 200, + "headers": {}, + "body": "quxbarbazbamrequestid" + } + } + ] + }, + { + "description": "Empty string", + "metadata": { + "protocol": "query" + }, + "shapes": { + "OutputShape": { + "type": "structure", + "members": { + "Foo": { + "shape": "StringType" + } + } + }, + "StringType": { + "type": "string" + } + }, + "cases": [ + { + "given": { + "output": { + "shape": "OutputShape" + }, + "name": "OperationName" + }, + "result": { + "Foo": "" + }, + "response": { + "status_code": 200, + "headers": {}, + "body": "requestid" + } + } + ] + } +] diff --git a/vendor/github.com/aws/aws-sdk-go/models/protocol_tests/output/rest-json.json b/vendor/github.com/aws/aws-sdk-go/models/protocol_tests/output/rest-json.json new file mode 100644 index 000000000..2173d027a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/protocol_tests/output/rest-json.json @@ -0,0 +1,608 @@ +[ + { + "description": "Scalar members", + "metadata": { + "protocol": "rest-json" + }, + "shapes": { + "OutputShape": { + "type": "structure", + "members": { + "ImaHeader": { + "shape": "HeaderShape" + }, + "ImaHeaderLocation": { + "shape": "HeaderShape", + "locationName": "X-Foo" + }, + "Status": { + "shape": "StatusShape", + "location": "statusCode" + }, + "Str": { + "shape": "StringType" + }, + "Num": { + "shape": "IntegerType" + }, + "FalseBool": { + "shape": "BooleanType" + }, + "TrueBool": { + "shape": "BooleanType" + }, + "Float": { + "shape": "FloatType" + }, + "Double": { + "shape": "DoubleType" + }, + "Long": { + "shape": "LongType" + }, + "Char": { + "shape": "CharType" + } + } + }, + "HeaderShape": { + "type": "string", + "location": "header" + }, + "StatusShape": { + "type": "integer" + }, + "StringType": { + "type": "string" + }, + "IntegerType": { + "type": "integer" + }, + "BooleanType": { + "type": "boolean" + }, + "FloatType": { + "type": "float" + }, + "DoubleType": { + "type": "double" + }, + "LongType": { + "type": "long" + }, + "CharType": { + "type": "character" + } + }, + "cases": [ + { + "given": { + "output": { + "shape": "OutputShape" + }, + "name": "OperationName" + }, + "result": { + "ImaHeader": "test", + "ImaHeaderLocation": "abc", + "Status": 200, + "Str": "myname", + "Num": 123, + "FalseBool": false, + "TrueBool": true, + "Float": 1.2, + "Double": 1.3, + "Long": 200, + "Char": "a" + }, + "response": { + "status_code": 200, + "headers": { + "ImaHeader": "test", + "X-Foo": "abc" + }, + "body": "{\"Str\": \"myname\", \"Num\": 123, \"FalseBool\": false, \"TrueBool\": true, \"Float\": 1.2, \"Double\": 1.3, \"Long\": 200, \"Char\": \"a\"}" + } + } + ] + }, + { + "description": "Blob members", + "metadata": { + "protocol": "rest-json" + }, + "shapes": { + "OutputShape": { + "type": "structure", + "members": { + "BlobMember": { + "shape": "BlobType" + }, + "StructMember": { + "shape": "BlobContainer" + } + } + }, + "BlobType": { + "type": "blob" + }, + "BlobContainer": { + "type": "structure", + "members": { + "foo": { + "shape": "BlobType" + } + } + } + }, + "cases": [ + { + "given": { + "output": { + "shape": "OutputShape" + }, + "name": "OperationName" + }, + "result": { + "BlobMember": "hi!", + "StructMember": { + "foo": "there!" + } + }, + "response": { + "status_code": 200, + "headers": {}, + "body": "{\"BlobMember\": \"aGkh\", \"StructMember\": {\"foo\": \"dGhlcmUh\"}}" + } + } + ] + }, + { + "description": "Timestamp members", + "metadata": { + "protocol": "rest-json" + }, + "shapes": { + "OutputShape": { + "type": "structure", + "members": { + "TimeMember": { + "shape": "TimeType" + }, + "StructMember": { + "shape": "TimeContainer" + } + } + }, + "TimeType": { + "type": "timestamp" + }, + "TimeContainer": { + "type": "structure", + "members": { + "foo": { + "shape": "TimeType" + } + } + } + }, + "cases": [ + { + "given": { + "output": { + "shape": "OutputShape" + }, + "name": "OperationName" + }, + "result": { + "TimeMember": 1398796238, + "StructMember": { + "foo": 1398796238 + } + }, + "response": { + "status_code": 200, + "headers": {}, + "body": "{\"TimeMember\": 1398796238, \"StructMember\": {\"foo\": 1398796238}}" + } + } + ] + }, + { + "description": "Lists", + "metadata": { + "protocol": "rest-json" + }, + "shapes": { + "OutputShape": { + "type": "structure", + "members": { + "ListMember": { + "shape": "ListType" + } + } + }, + "ListType": { + "type": "list", + "member": { + "shape": "StringType" + } + }, + "StringType": { + "type": "string" + } + }, + "cases": [ + { + "given": { + "output": { + "shape": "OutputShape" + }, + "name": "OperationName" + }, + "result": { + "ListMember": ["a", "b"] + }, + "response": { + "status_code": 200, + "headers": {}, + "body": "{\"ListMember\": [\"a\", \"b\"]}" + } + } + ] + }, + { + "description": "Lists with structure member", + "metadata": { + "protocol": "rest-json" + }, + "shapes": { + "OutputShape": { + "type": "structure", + "members": { + "ListMember": { + "shape": "ListType" + } + } + }, + "ListType": { + "type": "list", + "member": { + "shape": "SingleStruct" + } + }, + "StringType": { + "type": "string" + }, + "SingleStruct": { + "type": "structure", + "members": { + "Foo": { + "shape": "StringType" + } + } + } + }, + "cases": [ + { + "given": { + "output": { + "shape": "OutputShape" + }, + "name": "OperationName" + }, + "result": { + "ListMember": [{"Foo": "a"}, {"Foo": "b"}] + }, + "response": { + "status_code": 200, + "headers": {}, + "body": "{\"ListMember\": [{\"Foo\": \"a\"}, {\"Foo\": \"b\"}]}" + } + } + ] + }, + { + "description": "Maps", + "metadata": { + "protocol": "rest-json" + }, + "shapes": { + "OutputShape": { + "type": "structure", + "members": { + "MapMember": { + "shape": "MapType" + } + } + }, + "MapType": { + "type": "map", + "key": { + "shape": "StringType" + }, + "value": { + "shape": "ListType" + } + }, + "ListType": { + "type": "list", + "member": { + "shape": "IntegerType" + } + }, + "StringType": { + "type": "string" + }, + "IntegerType": { + "type": "integer" + } + }, + "cases": [ + { + "given": { + "output": { + "shape": "OutputShape" + }, + "name": "OperationName" + }, + "result": { + "MapMember": { + "a": [1, 2], + "b": [3, 4] + } + }, + "response": { + "status_code": 200, + "headers": {}, + "body": "{\"MapMember\": {\"a\": [1, 2], \"b\": [3, 4]}}" + } + } + ] + }, + { + "description": "Complex Map Values", + "metadata": { + "protocol": "rest-json" + }, + "shapes": { + "OutputShape": { + "type": "structure", + "members": { + "MapMember": { + "shape": "MapType" + } + } + }, + "MapType": { + "type": "map", + "key": { + "shape": "StringType" + }, + "value": { + "shape": "TimeType" + } + }, + "TimeType": { + "type": "timestamp" + }, + "StringType": { + "type": "string" + } + }, + "cases": [ + { + "given": { + "output": { + "shape": "OutputShape" + }, + "name": "OperationName" + }, + "result": { + "MapMember": { + "a": 1398796238, + "b": 1398796238 + } + }, + "response": { + "status_code": 200, + "headers": {}, + "body": "{\"MapMember\": {\"a\": 1398796238, \"b\": 1398796238}}" + } + } + ] + }, + { + "description": "Ignores extra data", + "metadata": { + "protocol": "rest-json" + }, + "shapes": { + "OutputShape": { + "type": "structure", + "members": { + "StrType": { + "shape": "StrType" + } + } + }, + "StrType": { + "type": "string" + } + }, + "cases": [ + { + "given": { + "output": { + "shape": "OutputShape" + }, + "name": "OperationName" + }, + "result": {}, + "response": { + "status_code": 200, + "headers": {}, + "body": "{\"foo\": \"bar\"}" + } + } + ] + }, + { + "description": "Supports header maps", + "metadata": { + "protocol": "rest-json" + }, + "shapes": { + "OutputShape": { + "type": "structure", + "members": { + "AllHeaders": { + "shape": "HeaderMap", + "location": "headers" + }, + "PrefixedHeaders": { + "shape": "HeaderMap", + "location": "headers", + "locationName": "X-" + } + } + }, + "HeaderMap": { + "type": "map", + "key": { + "shape": "StringType" + }, + "value": { + "shape": "StringType" + } + }, + "StringType": { + "type": "string" + } + }, + "cases": [ + { + "given": { + "output": { + "shape": "OutputShape" + }, + "name": "OperationName" + }, + "result": { + "AllHeaders": { + "Content-Length": "10", + "X-Foo": "bar", + "X-Bam": "boo" + }, + "PrefixedHeaders": { + "Foo": "bar", + "Bam": "boo" + } + }, + "response": { + "status_code": 200, + "headers": { + "Content-Length": "10", + "X-Foo": "bar", + "X-Bam": "boo" + }, + "body": "{}" + } + } + ] + }, + { + "description": "JSON payload", + "metadata": { + "protocol": "rest-json" + }, + "shapes": { + "OutputShape": { + "type": "structure", + "payload": "Data", + "members": { + "Header": { + "shape": "StringType", + "location": "header", + "locationName": "X-Foo" + }, + "Data": { + "shape": "BodyStructure" + } + } + }, + "BodyStructure": { + "type": "structure", + "members": { + "Foo": { + "shape": "StringType" + } + } + }, + "StringType": { + "type": "string" + } + }, + "cases": [ + { + "given": { + "output": { + "shape": "OutputShape" + }, + "name": "OperationName" + }, + "result": { + "Header": "baz", + "Data": { + "Foo": "abc" + } + }, + "response": { + "status_code": 200, + "headers": { + "X-Foo": "baz" + }, + "body": "{\"Foo\": \"abc\"}" + } + } + ] + }, + { + "description": "Streaming payload", + "metadata": { + "protocol": "rest-json" + }, + "shapes": { + "OutputShape": { + "type": "structure", + "payload": "Stream", + "members": { + "Stream": { + "shape": "Stream" + } + } + }, + "Stream": { + "type": "blob" + } + }, + "cases": [ + { + "given": { + "output": { + "shape": "OutputShape" + }, + "name": "OperationName" + }, + "result": { + "Stream": "abc" + }, + "response": { + "status_code": 200, + "headers": {}, + "body": "abc" + } + } + ] + } +] diff --git a/vendor/github.com/aws/aws-sdk-go/models/protocol_tests/output/rest-xml.json b/vendor/github.com/aws/aws-sdk-go/models/protocol_tests/output/rest-xml.json new file mode 100644 index 000000000..4d4f892a4 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/models/protocol_tests/output/rest-xml.json @@ -0,0 +1,720 @@ +[ + { + "description": "Scalar members", + "metadata": { + "protocol": "rest-xml" + }, + "shapes": { + "OutputShape": { + "type": "structure", + "members": { + "ImaHeader": { + "shape": "HeaderShape" + }, + "ImaHeaderLocation": { + "shape": "HeaderShape", + "locationName": "X-Foo" + }, + "Str": { + "shape": "StringType" + }, + "Num": { + "shape": "IntegerType", + "locationName": "FooNum" + }, + "FalseBool": { + "shape": "BooleanType" + }, + "TrueBool": { + "shape": "BooleanType" + }, + "Float": { + "shape": "FloatType" + }, + "Double": { + "shape": "DoubleType" + }, + "Long": { + "shape": "LongType" + }, + "Char": { + "shape": "CharType" + }, + "Timestamp": { + "shape": "TimestampType" + } + } + }, + "StringType": { + "type": "string" + }, + "IntegerType": { + "type": "integer" + }, + "BooleanType": { + "type": "boolean" + }, + "FloatType": { + "type": "float" + }, + "DoubleType": { + "type": "double" + }, + "LongType": { + "type": "long" + }, + "CharType": { + "type": "character" + }, + "HeaderShape": { + "type": "string", + "location": "header" + }, + "StatusShape": { + "type": "integer", + "location": "statusCode" + }, + "TimestampType": { + "type": "timestamp" + } + }, + "cases": [ + { + "given": { + "output": { + "shape": "OutputShape" + }, + "name": "OperationName" + }, + "result": { + "ImaHeader": "test", + "ImaHeaderLocation": "abc", + "Str": "myname", + "Num": 123, + "FalseBool": false, + "TrueBool": true, + "Float": 1.2, + "Double": 1.3, + "Long": 200, + "Char": "a", + "Timestamp": 1422172800 + }, + "response": { + "status_code": 200, + "headers": { + "ImaHeader": "test", + "X-Foo": "abc" + }, + "body": "myname123falsetrue1.21.3200a2015-01-25T08:00:00Z" + } + }, + { + "given": { + "output": { + "shape": "OutputShape" + }, + "name": "OperationName" + }, + "result": { + "ImaHeader": "test", + "ImaHeaderLocation": "abc", + "Str": "", + "Num": 123, + "FalseBool": false, + "TrueBool": true, + "Float": 1.2, + "Double": 1.3, + "Long": 200, + "Char": "a", + "Timestamp": 1422172800 + }, + "response": { + "status_code": 200, + "headers": { + "ImaHeader": "test", + "X-Foo": "abc" + }, + "body": "123falsetrue1.21.3200a2015-01-25T08:00:00Z" + } + } + ] + }, + { + "description": "Blob", + "metadata": { + "protocol": "rest-xml" + }, + "shapes": { + "OutputShape": { + "type": "structure", + "members": { + "Blob": { + "shape": "BlobType" + } + } + }, + "BlobType": { + "type": "blob" + } + }, + "cases": [ + { + "given": { + "output": { + "shape": "OutputShape" + }, + "name": "OperationName" + }, + "result": { + "Blob": "value" + }, + "response": { + "status_code": 200, + "headers": {}, + "body": "dmFsdWU=" + } + } + ] + }, + { + "description": "Lists", + "metadata": { + "protocol": "rest-xml" + }, + "shapes": { + "OutputShape": { + "type": "structure", + "members": { + "ListMember": { + "shape": "ListShape" + } + } + }, + "ListShape": { + "type": "list", + "member": { + "shape": "StringType" + } + }, + "StringType": { + "type": "string" + } + }, + "cases": [ + { + "given": { + "output": { + "shape": "OutputShape" + }, + "name": "OperationName" + }, + "result": { + "ListMember": ["abc", "123"] + }, + "response": { + "status_code": 200, + "headers": {}, + "body": "abc123" + } + } + ] + }, + { + "description": "List with custom member name", + "metadata": { + "protocol": "rest-xml" + }, + "shapes": { + "OutputShape": { + "type": "structure", + "members": { + "ListMember": { + "shape": "ListShape" + } + } + }, + "ListShape": { + "type": "list", + "member": { + "shape": "StringType", + "locationName": "item" + } + }, + "StringType": { + "type": "string" + } + }, + "cases": [ + { + "given": { + "output": { + "shape": "OutputShape" + }, + "name": "OperationName" + }, + "result": { + "ListMember": ["abc", "123"] + }, + "response": { + "status_code": 200, + "headers": {}, + "body": "abc123" + } + } + ] + }, + { + "description": "Flattened List", + "metadata": { + "protocol": "rest-xml" + }, + "shapes": { + "OutputShape": { + "type": "structure", + "members": { + "ListMember": { + "shape": "StringList", + "flattened": true + } + } + }, + "StringList": { + "type": "list", + "member": { + "shape": "StringType" + } + }, + "StringType": { + "type": "string" + } + }, + "cases": [ + { + "given": { + "output": { + "shape": "OutputShape" + }, + "name": "OperationName" + }, + "result": { + "ListMember": ["abc", "123"] + }, + "response": { + "status_code": 200, + "headers": {}, + "body": "abc123" + } + } + ] + }, + { + "description": "Normal map", + "metadata": { + "protocol": "rest-xml" + }, + "shapes": { + "OutputShape": { + "type": "structure", + "members": { + "Map": { + "shape": "StringMap" + } + } + }, + "StringMap": { + "type": "map", + "key": { + "shape": "StringType" + }, + "value": { + "shape": "SingleStructure" + } + }, + "SingleStructure": { + "type": "structure", + "members": { + "foo": { + "shape": "StringType" + } + } + }, + "StringType": { + "type": "string" + } + }, + "cases": [ + { + "given": { + "output": { + "shape": "OutputShape" + }, + "name": "OperationName" + }, + "result": { + "Map": { + "qux": { + "foo": "bar" + }, + "baz": { + "foo": "bam" + } + } + }, + "response": { + "status_code": 200, + "headers": {}, + "body": "quxbarbazbam" + } + } + ] + }, + { + "description": "Flattened map", + "metadata": { + "protocol": "rest-xml" + }, + "shapes": { + "OutputShape": { + "type": "structure", + "members": { + "Map": { + "shape": "StringMap", + "flattened": true + } + } + }, + "StringMap": { + "type": "map", + "key": { + "shape": "StringType" + }, + "value": { + "shape": "StringType" + } + }, + "StringType": { + "type": "string" + } + }, + "cases": [ + { + "given": { + "output": { + "shape": "OutputShape" + }, + "name": "OperationName" + }, + "result": { + "Map": { + "qux": "bar", + "baz": "bam" + } + }, + "response": { + "status_code": 200, + "headers": {}, + "body": "quxbarbazbam" + } + } + ] + }, + { + "description": "Named map", + "metadata": { + "protocol": "rest-xml" + }, + "shapes": { + "OutputShape": { + "type": "structure", + "members": { + "Map": { + "shape": "StringMap" + } + } + }, + "StringMap": { + "type": "map", + "key": { + "shape": "StringType", + "locationName": "foo" + }, + "value": { + "shape": "StringType", + "locationName": "bar" + } + }, + "StringType": { + "type": "string" + } + }, + "cases": [ + { + "given": { + "output": { + "shape": "OutputShape" + }, + "name": "OperationName" + }, + "result": { + "Map": { + "qux": "bar", + "baz": "bam" + } + }, + "response": { + "status_code": 200, + "headers": {}, + "body": "quxbarbazbam" + } + } + ] + }, + { + "description": "XML payload", + "metadata": { + "protocol": "rest-xml" + }, + "shapes": { + "OutputShape": { + "type": "structure", + "payload": "Data", + "members": { + "Header": { + "shape": "StringType", + "location": "header", + "locationName": "X-Foo" + }, + "Data": { + "shape": "SingleStructure" + } + } + }, + "StringType": { + "type": "string" + }, + "SingleStructure": { + "type": "structure", + "members": { + "Foo": { + "shape": "StringType" + } + } + } + }, + "cases": [ + { + "given": { + "output": { + "shape": "OutputShape" + }, + "name": "OperationName" + }, + "result": { + "Header": "baz", + "Data": { + "Foo": "abc" + } + }, + "response": { + "status_code": 200, + "headers": { + "X-Foo": "baz" + }, + "body": "abc" + } + } + ] + }, + { + "description": "Streaming payload", + "metadata": { + "protocol": "rest-xml" + }, + "shapes": { + "OutputShape": { + "type": "structure", + "payload": "Stream", + "members": { + "Stream": { + "shape": "BlobStream" + } + } + }, + "BlobStream": { + "type": "blob" + } + }, + "cases": [ + { + "given": { + "output": { + "shape": "OutputShape" + }, + "name": "OperationName" + }, + "result": { + "Stream": "abc" + }, + "response": { + "status_code": 200, + "headers": {}, + "body": "abc" + } + } + ] + }, + { + "description": "Scalar members in headers", + "metadata": { + "protocol": "rest-xml" + }, + "shapes": { + "OutputShape": { + "type": "structure", + "members": { + "Str": { + "locationName": "x-str", + "shape": "StringHeaderType" + }, + "Integer": { + "locationName": "x-int", + "shape": "IntegerHeaderType" + }, + "TrueBool": { + "locationName": "x-true-bool", + "shape": "BooleanHeaderType" + }, + "FalseBool": { + "locationName": "x-false-bool", + "shape": "BooleanHeaderType" + }, + "Float": { + "locationName": "x-float", + "shape": "FloatHeaderType" + }, + "Double": { + "locationName": "x-double", + "shape": "DoubleHeaderType" + }, + "Long": { + "locationName": "x-long", + "shape": "LongHeaderType" + }, + "Char": { + "locationName": "x-char", + "shape": "CharHeaderType" + }, + "Timestamp": { + "locationName": "x-timestamp", + "shape": "TimestampHeaderType" + } + } + }, + "StringHeaderType": { + "location": "header", + "type": "string" + }, + "IntegerHeaderType": { + "location": "header", + "type": "integer" + }, + "BooleanHeaderType": { + "location": "header", + "type": "boolean" + }, + "FloatHeaderType": { + "location": "header", + "type": "float" + }, + "DoubleHeaderType": { + "location": "header", + "type": "double" + }, + "LongHeaderType": { + "location": "header", + "type": "long" + }, + "CharHeaderType": { + "location": "header", + "type": "character" + }, + "TimestampHeaderType": { + "location": "header", + "type": "timestamp" + } + }, + "cases": [ + { + "given": { + "output": { + "shape": "OutputShape" + }, + "name": "OperationName" + }, + "result": { + "Str": "string", + "Integer": 1, + "TrueBool": true, + "FalseBool": false, + "Float": 1.5, + "Double": 1.5, + "Long": 100, + "Char": "a", + "Timestamp": 1422172800 + }, + "response": { + "status_code": 200, + "headers": { + "x-str": "string", + "x-int": "1", + "x-true-bool": "true", + "x-false-bool": "false", + "x-float": "1.5", + "x-double": "1.5", + "x-long": "100", + "x-char": "a", + "x-timestamp": "Sun, 25 Jan 2015 08:00:00 GMT" + }, + "body": "" + } + } + ] + }, + { + "description": "Empty string", + "metadata": { + "protocol": "rest-xml" + }, + "shapes": { + "OutputShape": { + "type": "structure", + "members": { + "Foo": { + "shape": "StringType" + } + } + }, + "StringType": { + "type": "string" + } + }, + "cases": [ + { + "given": { + "output": { + "shape": "OutputShape" + }, + "name": "OperationName" + }, + "result": { + "Foo": "" + }, + "response": { + "status_code": 200, + "headers": {}, + "body": "requestid" + } + } + ] + } +] diff --git a/vendor/github.com/aws/aws-sdk-go/private/README.md b/vendor/github.com/aws/aws-sdk-go/private/README.md new file mode 100644 index 000000000..5bdb4c50a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/README.md @@ -0,0 +1,4 @@ +## AWS SDK for Go Private packages ## +`private` is a collection of packages used internally by the SDK, and is subject to have breaking changes. This package is not `internal` so that if you really need to use its functionality, and understand breaking changes will be made, you are able to. + +These packages will be refactored in the future so that the API generator and model parsers are exposed cleanly on their own. Making it easier for you to generate your own code based on the API models. diff --git a/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints.go new file mode 100644 index 000000000..2b279e659 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints.go @@ -0,0 +1,65 @@ +// Package endpoints validates regional endpoints for services. +package endpoints + +//go:generate go run ../model/cli/gen-endpoints/main.go endpoints.json endpoints_map.go +//go:generate gofmt -s -w endpoints_map.go + +import ( + "fmt" + "regexp" + "strings" +) + +// NormalizeEndpoint takes and endpoint and service API information to return a +// normalized endpoint and signing region. If the endpoint is not an empty string +// the service name and region will be used to look up the service's API endpoint. +// If the endpoint is provided the scheme will be added if it is not present. +func NormalizeEndpoint(endpoint, serviceName, region string, disableSSL bool) (normEndpoint, signingRegion string) { + if endpoint == "" { + return EndpointForRegion(serviceName, region, disableSSL) + } + + return AddScheme(endpoint, disableSSL), "" +} + +// EndpointForRegion returns an endpoint and its signing region for a service and region. +// if the service and region pair are not found endpoint and signingRegion will be empty. +func EndpointForRegion(svcName, region string, disableSSL bool) (endpoint, signingRegion string) { + derivedKeys := []string{ + region + "/" + svcName, + region + "/*", + "*/" + svcName, + "*/*", + } + + for _, key := range derivedKeys { + if val, ok := endpointsMap.Endpoints[key]; ok { + ep := val.Endpoint + ep = strings.Replace(ep, "{region}", region, -1) + ep = strings.Replace(ep, "{service}", svcName, -1) + + endpoint = ep + signingRegion = val.SigningRegion + break + } + } + + return AddScheme(endpoint, disableSSL), signingRegion +} + +// Regular expression to determine if the endpoint string is prefixed with a scheme. +var schemeRE = regexp.MustCompile("^([^:]+)://") + +// AddScheme adds the HTTP or HTTPS schemes to a endpoint URL if there is no +// scheme. If disableSSL is true HTTP will be added instead of the default HTTPS. +func AddScheme(endpoint string, disableSSL bool) string { + if endpoint != "" && !schemeRE.MatchString(endpoint) { + scheme := "https" + if disableSSL { + scheme = "http" + } + endpoint = fmt.Sprintf("%s://%s", scheme, endpoint) + } + + return endpoint +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints.json b/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints.json new file mode 100644 index 000000000..5f4991c2b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints.json @@ -0,0 +1,75 @@ +{ + "version": 2, + "endpoints": { + "*/*": { + "endpoint": "{service}.{region}.amazonaws.com" + }, + "cn-north-1/*": { + "endpoint": "{service}.{region}.amazonaws.com.cn", + "signatureVersion": "v4" + }, + "cn-north-1/ec2metadata": { + "endpoint": "http://169.254.169.254/latest" + }, + "us-gov-west-1/iam": { + "endpoint": "iam.us-gov.amazonaws.com" + }, + "us-gov-west-1/sts": { + "endpoint": "sts.us-gov-west-1.amazonaws.com" + }, + "us-gov-west-1/s3": { + "endpoint": "s3-{region}.amazonaws.com" + }, + "us-gov-west-1/ec2metadata": { + "endpoint": "http://169.254.169.254/latest" + }, + "*/cloudfront": { + "endpoint": "cloudfront.amazonaws.com", + "signingRegion": "us-east-1" + }, + "*/cloudsearchdomain": { + "endpoint": "", + "signingRegion": "us-east-1" + }, + "*/data.iot": { + "endpoint": "", + "signingRegion": "us-east-1" + }, + "*/ec2metadata": { + "endpoint": "http://169.254.169.254/latest" + }, + "*/iam": { + "endpoint": "iam.amazonaws.com", + "signingRegion": "us-east-1" + }, + "*/importexport": { + "endpoint": "importexport.amazonaws.com", + "signingRegion": "us-east-1" + }, + "*/route53": { + "endpoint": "route53.amazonaws.com", + "signingRegion": "us-east-1" + }, + "*/sts": { + "endpoint": "sts.amazonaws.com", + "signingRegion": "us-east-1" + }, + "*/waf": { + "endpoint": "waf.amazonaws.com", + "signingRegion": "us-east-1" + }, + "us-east-1/sdb": { + "endpoint": "sdb.amazonaws.com", + "signingRegion": "us-east-1" + }, + "*/s3": { + "endpoint": "s3-{region}.amazonaws.com" + }, + "us-east-1/s3": { + "endpoint": "s3.amazonaws.com" + }, + "eu-central-1/s3": { + "endpoint": "{service}.{region}.amazonaws.com" + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints_map.go b/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints_map.go new file mode 100644 index 000000000..e995315ab --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints_map.go @@ -0,0 +1,88 @@ +package endpoints + +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +type endpointStruct struct { + Version int + Endpoints map[string]endpointEntry +} + +type endpointEntry struct { + Endpoint string + SigningRegion string +} + +var endpointsMap = endpointStruct{ + Version: 2, + Endpoints: map[string]endpointEntry{ + "*/*": { + Endpoint: "{service}.{region}.amazonaws.com", + }, + "*/cloudfront": { + Endpoint: "cloudfront.amazonaws.com", + SigningRegion: "us-east-1", + }, + "*/cloudsearchdomain": { + Endpoint: "", + SigningRegion: "us-east-1", + }, + "*/data.iot": { + Endpoint: "", + SigningRegion: "us-east-1", + }, + "*/ec2metadata": { + Endpoint: "http://169.254.169.254/latest", + }, + "*/iam": { + Endpoint: "iam.amazonaws.com", + SigningRegion: "us-east-1", + }, + "*/importexport": { + Endpoint: "importexport.amazonaws.com", + SigningRegion: "us-east-1", + }, + "*/route53": { + Endpoint: "route53.amazonaws.com", + SigningRegion: "us-east-1", + }, + "*/s3": { + Endpoint: "s3-{region}.amazonaws.com", + }, + "*/sts": { + Endpoint: "sts.amazonaws.com", + SigningRegion: "us-east-1", + }, + "*/waf": { + Endpoint: "waf.amazonaws.com", + SigningRegion: "us-east-1", + }, + "cn-north-1/*": { + Endpoint: "{service}.{region}.amazonaws.com.cn", + }, + "cn-north-1/ec2metadata": { + Endpoint: "http://169.254.169.254/latest", + }, + "eu-central-1/s3": { + Endpoint: "{service}.{region}.amazonaws.com", + }, + "us-east-1/s3": { + Endpoint: "s3.amazonaws.com", + }, + "us-east-1/sdb": { + Endpoint: "sdb.amazonaws.com", + SigningRegion: "us-east-1", + }, + "us-gov-west-1/ec2metadata": { + Endpoint: "http://169.254.169.254/latest", + }, + "us-gov-west-1/iam": { + Endpoint: "iam.us-gov.amazonaws.com", + }, + "us-gov-west-1/s3": { + Endpoint: "s3-{region}.amazonaws.com", + }, + "us-gov-west-1/sts": { + Endpoint: "sts.us-gov-west-1.amazonaws.com", + }, + }, +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints_test.go b/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints_test.go new file mode 100644 index 000000000..0c43c589f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints_test.go @@ -0,0 +1,51 @@ +package endpoints_test + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/aws/aws-sdk-go/private/endpoints" +) + +func TestGenericEndpoint(t *testing.T) { + name := "service" + region := "mock-region-1" + + ep, sr := endpoints.EndpointForRegion(name, region, false) + assert.Equal(t, fmt.Sprintf("https://%s.%s.amazonaws.com", name, region), ep) + assert.Empty(t, sr) +} + +func TestGlobalEndpoints(t *testing.T) { + region := "mock-region-1" + svcs := []string{"cloudfront", "iam", "importexport", "route53", "sts", "waf"} + + for _, name := range svcs { + ep, sr := endpoints.EndpointForRegion(name, region, false) + assert.Equal(t, fmt.Sprintf("https://%s.amazonaws.com", name), ep) + assert.Equal(t, "us-east-1", sr) + } +} + +func TestServicesInCN(t *testing.T) { + region := "cn-north-1" + svcs := []string{"cloudfront", "iam", "importexport", "route53", "sts", "s3", "waf"} + + for _, name := range svcs { + ep, sr := endpoints.EndpointForRegion(name, region, false) + assert.Equal(t, fmt.Sprintf("https://%s.%s.amazonaws.com.cn", name, region), ep) + assert.Empty(t, sr) + } +} + +func TestEC2MetadataEndpoints(t *testing.T) { + regions := []string{"us-east-1", "us-gov-west-1", "cn-north-1", "mock-region-1"} + + for _, region := range regions { + ep, sr := endpoints.EndpointForRegion("ec2metadata", region, false) + assert.Equal(t, "http://169.254.169.254/latest", ep) + assert.Equal(t, "", sr) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/model/api/api.go b/vendor/github.com/aws/aws-sdk-go/private/model/api/api.go new file mode 100644 index 000000000..4a4a5ed9a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/model/api/api.go @@ -0,0 +1,485 @@ +// Package api represents API abstractions for rendering service generated files. +package api + +import ( + "bytes" + "fmt" + "path" + "regexp" + "sort" + "strings" + "text/template" +) + +// An API defines a service API's definition. and logic to serialize the definition. +type API struct { + Metadata Metadata + Operations map[string]*Operation + Shapes map[string]*Shape + Waiters []Waiter + Documentation string + + // Set to true to avoid removing unused shapes + NoRemoveUnusedShapes bool + + // Set to true to avoid renaming to 'Input/Output' postfixed shapes + NoRenameToplevelShapes bool + + // Set to true to ignore service/request init methods (for testing) + NoInitMethods bool + + // Set to true to ignore String() and GoString methods (for generated tests) + NoStringerMethods bool + + // Set to true to not generate API service name constants + NoConstServiceNames bool + + // Set to true to not generate validation shapes + NoValidataShapeMethods bool + + SvcClientImportPath string + + initialized bool + imports map[string]bool + name string + path string +} + +// A Metadata is the metadata about an API's definition. +type Metadata struct { + APIVersion string + EndpointPrefix string + SigningName string + ServiceAbbreviation string + ServiceFullName string + SignatureVersion string + JSONVersion string + TargetPrefix string + Protocol string +} + +// PackageName name of the API package +func (a *API) PackageName() string { + return strings.ToLower(a.StructName()) +} + +// InterfacePackageName returns the package name for the interface. +func (a *API) InterfacePackageName() string { + return a.PackageName() + "iface" +} + +var nameRegex = regexp.MustCompile(`^Amazon|AWS\s*|\(.*|\s+|\W+`) + +// StructName returns the struct name for a given API. +func (a *API) StructName() string { + if a.name == "" { + name := a.Metadata.ServiceAbbreviation + if name == "" { + name = a.Metadata.ServiceFullName + } + + name = nameRegex.ReplaceAllString(name, "") + switch name { + case "ElasticLoadBalancing": + a.name = "ELB" + case "Config": + a.name = "ConfigService" + default: + a.name = name + } + } + return a.name +} + +// UseInitMethods returns if the service's init method should be rendered. +func (a *API) UseInitMethods() bool { + return !a.NoInitMethods +} + +// NiceName returns the human friendly API name. +func (a *API) NiceName() string { + if a.Metadata.ServiceAbbreviation != "" { + return a.Metadata.ServiceAbbreviation + } + return a.Metadata.ServiceFullName +} + +// ProtocolPackage returns the package name of the protocol this API uses. +func (a *API) ProtocolPackage() string { + switch a.Metadata.Protocol { + case "json": + return "jsonrpc" + case "ec2": + return "ec2query" + default: + return strings.Replace(a.Metadata.Protocol, "-", "", -1) + } +} + +// OperationNames returns a slice of API operations supported. +func (a *API) OperationNames() []string { + i, names := 0, make([]string, len(a.Operations)) + for n := range a.Operations { + names[i] = n + i++ + } + sort.Strings(names) + return names +} + +// OperationList returns a slice of API operation pointers +func (a *API) OperationList() []*Operation { + list := make([]*Operation, len(a.Operations)) + for i, n := range a.OperationNames() { + list[i] = a.Operations[n] + } + return list +} + +// OperationHasOutputPlaceholder returns if any of the API operation input +// or output shapes are place holders. +func (a *API) OperationHasOutputPlaceholder() bool { + for _, op := range a.Operations { + if op.OutputRef.Shape.Placeholder { + return true + } + } + return false +} + +// ShapeNames returns a slice of names for each shape used by the API. +func (a *API) ShapeNames() []string { + i, names := 0, make([]string, len(a.Shapes)) + for n := range a.Shapes { + names[i] = n + i++ + } + sort.Strings(names) + return names +} + +// ShapeList returns a slice of shape pointers used by the API. +func (a *API) ShapeList() []*Shape { + list := make([]*Shape, len(a.Shapes)) + for i, n := range a.ShapeNames() { + list[i] = a.Shapes[n] + } + return list +} + +// resetImports resets the import map to default values. +func (a *API) resetImports() { + a.imports = map[string]bool{ + "github.com/aws/aws-sdk-go/aws": true, + } +} + +// importsGoCode returns the generated Go import code. +func (a *API) importsGoCode() string { + if len(a.imports) == 0 { + return "" + } + + corePkgs, extPkgs := []string{}, []string{} + for i := range a.imports { + if strings.Contains(i, ".") { + extPkgs = append(extPkgs, i) + } else { + corePkgs = append(corePkgs, i) + } + } + sort.Strings(corePkgs) + sort.Strings(extPkgs) + + code := "import (\n" + for _, i := range corePkgs { + code += fmt.Sprintf("\t%q\n", i) + } + if len(corePkgs) > 0 { + code += "\n" + } + for _, i := range extPkgs { + code += fmt.Sprintf("\t%q\n", i) + } + code += ")\n\n" + return code +} + +// A tplAPI is the top level template for the API +var tplAPI = template.Must(template.New("api").Parse(` +{{ range $_, $o := .OperationList }} +{{ $o.GoCode }} + +{{ end }} + +{{ range $_, $s := .ShapeList }} +{{ if and $s.IsInternal (eq $s.Type "structure") }}{{ $s.GoCode }}{{ end }} + +{{ end }} + +{{ range $_, $s := .ShapeList }} +{{ if $s.IsEnum }}{{ $s.GoCode }}{{ end }} + +{{ end }} +`)) + +// APIGoCode renders the API in Go code. Returning it as a string +func (a *API) APIGoCode() string { + a.resetImports() + delete(a.imports, "github.com/aws/aws-sdk-go/aws") + a.imports["github.com/aws/aws-sdk-go/aws/awsutil"] = true + a.imports["github.com/aws/aws-sdk-go/aws/request"] = true + if a.OperationHasOutputPlaceholder() { + a.imports["github.com/aws/aws-sdk-go/private/protocol/"+a.ProtocolPackage()] = true + a.imports["github.com/aws/aws-sdk-go/private/protocol"] = true + } + + for _, op := range a.Operations { + if op.AuthType == "none" { + a.imports["github.com/aws/aws-sdk-go/aws/credentials"] = true + break + } + } + + var buf bytes.Buffer + err := tplAPI.Execute(&buf, a) + if err != nil { + panic(err) + } + + code := a.importsGoCode() + strings.TrimSpace(buf.String()) + return code +} + +// A tplService defines the template for the service generated code. +var tplService = template.Must(template.New("service").Parse(` +{{ .Documentation }}//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type {{ .StructName }} struct { + *client.Client +} + +{{ if .UseInitMethods }}// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) +{{ end }} + +{{ if not .NoConstServiceNames }} +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "{{ .Metadata.EndpointPrefix }}" +{{ end }} + +// New creates a new instance of the {{ .StructName }} client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a {{ .StructName }} client from just a session. +// svc := {{ .PackageName }}.New(mySession) +// +// // Create a {{ .StructName }} client with additional configuration +// svc := {{ .PackageName }}.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *{{ .StructName }} { + c := p.ClientConfig({{ if .NoConstServiceNames }}"{{ .Metadata.EndpointPrefix }}"{{ else }}ServiceName{{ end }}, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *{{ .StructName }} { + svc := &{{ .StructName }}{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: {{ if .NoConstServiceNames }}"{{ .Metadata.EndpointPrefix }}"{{ else }}ServiceName{{ end }}, {{ if ne .Metadata.SigningName "" }} + SigningName: "{{ .Metadata.SigningName }}",{{ end }} + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "{{ .Metadata.APIVersion }}", +{{ if eq .Metadata.Protocol "json" }}JSONVersion: "{{ .Metadata.JSONVersion }}", + TargetPrefix: "{{ .Metadata.TargetPrefix }}", +{{ end }} + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed({{if eq .Metadata.SignatureVersion "v2"}}v2{{else}}v4{{end}}.SignRequestHandler) + {{if eq .Metadata.SignatureVersion "v2"}}svc.Handlers.Sign.PushBackNamed(corehandlers.BuildContentLengthHandler) + {{end}}svc.Handlers.Build.PushBackNamed({{ .ProtocolPackage }}.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed({{ .ProtocolPackage }}.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed({{ .ProtocolPackage }}.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed({{ .ProtocolPackage }}.UnmarshalErrorHandler) + + {{ if .UseInitMethods }}// Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + {{ end }} + + return svc +} + +// newRequest creates a new request for a {{ .StructName }} operation and runs any +// custom request initialization. +func (c *{{ .StructName }}) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + {{ if .UseInitMethods }}// Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + {{ end }} + + return req +} +`)) + +// ServiceGoCode renders service go code. Returning it as a string. +func (a *API) ServiceGoCode() string { + a.resetImports() + a.imports["github.com/aws/aws-sdk-go/aws/client"] = true + a.imports["github.com/aws/aws-sdk-go/aws/client/metadata"] = true + a.imports["github.com/aws/aws-sdk-go/aws/request"] = true + if a.Metadata.SignatureVersion == "v2" { + a.imports["github.com/aws/aws-sdk-go/private/signer/v2"] = true + a.imports["github.com/aws/aws-sdk-go/aws/corehandlers"] = true + } else { + a.imports["github.com/aws/aws-sdk-go/aws/signer/v4"] = true + } + a.imports["github.com/aws/aws-sdk-go/private/protocol/"+a.ProtocolPackage()] = true + + var buf bytes.Buffer + err := tplService.Execute(&buf, a) + if err != nil { + panic(err) + } + + code := a.importsGoCode() + buf.String() + return code +} + +// ExampleGoCode renders service example code. Returning it as a string. +func (a *API) ExampleGoCode() string { + exs := []string{} + for _, o := range a.OperationList() { + exs = append(exs, o.Example()) + } + + code := fmt.Sprintf("import (\n%q\n%q\n%q\n\n%q\n%q\n%q\n)\n\n"+ + "var _ time.Duration\nvar _ bytes.Buffer\n\n%s", + "bytes", + "fmt", + "time", + "github.com/aws/aws-sdk-go/aws", + "github.com/aws/aws-sdk-go/aws/session", + path.Join(a.SvcClientImportPath, a.PackageName()), + strings.Join(exs, "\n\n"), + ) + return code +} + +// A tplInterface defines the template for the service interface type. +var tplInterface = template.Must(template.New("interface").Parse(` +// {{ .StructName }}API is the interface type for {{ .PackageName }}.{{ .StructName }}. +type {{ .StructName }}API interface { + {{ range $_, $o := .OperationList }} + {{ $o.InterfaceSignature }} + {{ end }} +} + +var _ {{ .StructName }}API = (*{{ .PackageName }}.{{ .StructName }})(nil) +`)) + +// InterfaceGoCode returns the go code for the service's API operations as an +// interface{}. Assumes that the interface is being created in a different +// package than the service API's package. +func (a *API) InterfaceGoCode() string { + a.resetImports() + a.imports = map[string]bool{ + "github.com/aws/aws-sdk-go/aws/request": true, + path.Join(a.SvcClientImportPath, a.PackageName()): true, + } + + var buf bytes.Buffer + err := tplInterface.Execute(&buf, a) + + if err != nil { + panic(err) + } + + code := a.importsGoCode() + strings.TrimSpace(buf.String()) + return code +} + +// NewAPIGoCodeWithPkgName returns a string of instantiating the API prefixed +// with its package name. Takes a string depicting the Config. +func (a *API) NewAPIGoCodeWithPkgName(cfg string) string { + return fmt.Sprintf("%s.New(%s)", a.PackageName(), cfg) +} + +// computes the validation chain for all input shapes +func (a *API) addShapeValidations() { + for _, o := range a.Operations { + resolveShapeValidations(o.InputRef.Shape) + } +} + +// Updates the source shape and all nested shapes with the validations that +// could possibly be needed. +func resolveShapeValidations(s *Shape, ancestry ...*Shape) { + for _, a := range ancestry { + if a == s { + return + } + } + + children := []string{} + for _, name := range s.MemberNames() { + ref := s.MemberRefs[name] + + if s.IsRequired(name) && !s.Validations.Has(ref, ShapeValidationRequired) { + s.Validations = append(s.Validations, ShapeValidation{ + Name: name, Ref: ref, Type: ShapeValidationRequired, + }) + } + + if ref.Shape.Min != 0 && !s.Validations.Has(ref, ShapeValidationMinVal) { + s.Validations = append(s.Validations, ShapeValidation{ + Name: name, Ref: ref, Type: ShapeValidationMinVal, + }) + } + + switch ref.Shape.Type { + case "map", "list", "structure": + children = append(children, name) + } + } + + ancestry = append(ancestry, s) + for _, name := range children { + ref := s.MemberRefs[name] + nestedShape := ref.Shape.NestedShape() + + var v *ShapeValidation + if len(nestedShape.Validations) > 0 { + v = &ShapeValidation{ + Name: name, Ref: ref, Type: ShapeValidationNested, + } + } else { + resolveShapeValidations(nestedShape, ancestry...) + if len(nestedShape.Validations) > 0 { + v = &ShapeValidation{ + Name: name, Ref: ref, Type: ShapeValidationNested, + } + } + } + + if v != nil && !s.Validations.Has(v.Ref, v.Type) { + s.Validations = append(s.Validations, *v) + } + } + ancestry = ancestry[:len(ancestry)-1] +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/model/api/api_test.go b/vendor/github.com/aws/aws-sdk-go/private/model/api/api_test.go new file mode 100644 index 000000000..e68e3e6fc --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/model/api/api_test.go @@ -0,0 +1,44 @@ +// +build 1.6 + +package api + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestStructNameWithFullName(t *testing.T) { + a := API{ + Metadata: Metadata{ + ServiceFullName: "Amazon Service Name-100", + }, + } + assert.Equal(t, a.StructName(), "ServiceName100") +} + +func TestStructNameWithAbbreviation(t *testing.T) { + a := API{ + Metadata: Metadata{ + ServiceFullName: "AWS Service Name-100", + ServiceAbbreviation: "AWS SN100", + }, + } + assert.Equal(t, a.StructName(), "SN100") +} + +func TestStructNameForExceptions(t *testing.T) { + a := API{ + Metadata: Metadata{ + ServiceFullName: "Elastic Load Balancing", + }, + } + assert.Equal(t, a.StructName(), "ELB") + + a = API{ + Metadata: Metadata{ + ServiceFullName: "AWS Config", + }, + } + assert.Equal(t, a.StructName(), "ConfigService") +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/model/api/customization_passes.go b/vendor/github.com/aws/aws-sdk-go/private/model/api/customization_passes.go new file mode 100644 index 000000000..9027f05f4 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/model/api/customization_passes.go @@ -0,0 +1,104 @@ +package api + +import ( + "fmt" + "path/filepath" + "strings" +) + +// customizationPasses Executes customization logic for the API by package name. +func (a *API) customizationPasses() { + var svcCustomizations = map[string]func(*API){ + "s3": s3Customizations, + "cloudfront": cloudfrontCustomizations, + "dynamodbstreams": dynamodbstreamsCustomizations, + } + + if fn := svcCustomizations[a.PackageName()]; fn != nil { + fn(a) + } + + blobDocStringCustomizations(a) +} + +const base64MarshalDocStr = "// %s is automatically base64 encoded/decoded by the SDK.\n" + +func blobDocStringCustomizations(a *API) { + for _, s := range a.Shapes { + payloadMemberName := s.Payload + + for refName, ref := range s.MemberRefs { + if refName == payloadMemberName { + // Payload members have their own encoding and may + // be raw bytes or io.Reader + continue + } + if ref.Shape.Type == "blob" { + docStr := fmt.Sprintf(base64MarshalDocStr, refName) + if len(strings.TrimSpace(ref.Shape.Documentation)) != 0 { + ref.Shape.Documentation += "//\n" + docStr + } else if len(strings.TrimSpace(ref.Documentation)) != 0 { + ref.Documentation += "//\n" + docStr + } else { + ref.Documentation = docStr + } + } + } + } +} + +// s3Customizations customizes the API generation to replace values specific to S3. +func s3Customizations(a *API) { + var strExpires *Shape + + for name, s := range a.Shapes { + // Remove ContentMD5 members + if _, ok := s.MemberRefs["ContentMD5"]; ok { + delete(s.MemberRefs, "ContentMD5") + } + + // Expires should be a string not time.Time since the format is not + // enforced by S3, and any value can be set to this field outside of the SDK. + if strings.HasSuffix(name, "Output") { + if ref, ok := s.MemberRefs["Expires"]; ok { + if strExpires == nil { + newShape := *ref.Shape + strExpires = &newShape + strExpires.Type = "string" + strExpires.refs = []*ShapeRef{} + } + ref.Shape.removeRef(ref) + ref.Shape = strExpires + ref.Shape.refs = append(ref.Shape.refs, &s.MemberRef) + } + } + } +} + +// cloudfrontCustomizations customized the API generation to replace values +// specific to CloudFront. +func cloudfrontCustomizations(a *API) { + // MaxItems members should always be integers + for _, s := range a.Shapes { + if ref, ok := s.MemberRefs["MaxItems"]; ok { + ref.ShapeName = "Integer" + ref.Shape = a.Shapes["Integer"] + } + } +} + +// dynamodbstreamsCustomizations references any duplicate shapes from DynamoDB +func dynamodbstreamsCustomizations(a *API) { + p := strings.Replace(a.path, "streams.dynamodb", "dynamodb", -1) + file := filepath.Join(p, "api-2.json") + + dbAPI := API{} + dbAPI.Attach(file) + dbAPI.Setup() + + for n := range a.Shapes { + if _, ok := dbAPI.Shapes[n]; ok { + a.Shapes[n].resolvePkg = "github.com/aws/aws-sdk-go/service/dynamodb" + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/model/api/docstring.go b/vendor/github.com/aws/aws-sdk-go/private/model/api/docstring.go new file mode 100644 index 000000000..859aaa4f0 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/model/api/docstring.go @@ -0,0 +1,156 @@ +package api + +import ( + "bytes" + "encoding/json" + "fmt" + "html" + "os" + "regexp" + "strings" +) + +type apiDocumentation struct { + *API + Operations map[string]string + Service string + Shapes map[string]shapeDocumentation +} + +type shapeDocumentation struct { + Base string + Refs map[string]string +} + +// AttachDocs attaches documentation from a JSON filename. +func (a *API) AttachDocs(filename string) { + d := apiDocumentation{API: a} + + f, err := os.Open(filename) + defer f.Close() + if err != nil { + panic(err) + } + err = json.NewDecoder(f).Decode(&d) + if err != nil { + panic(err) + } + + d.setup() + +} + +func (d *apiDocumentation) setup() { + d.API.Documentation = docstring(d.Service) + if d.Service == "" { + d.API.Documentation = + fmt.Sprintf("// %s is a client for %s.\n", d.API.StructName(), d.API.NiceName()) + } + + for op, doc := range d.Operations { + d.API.Operations[op].Documentation = docstring(doc) + } + + for shape, info := range d.Shapes { + if sh := d.API.Shapes[shape]; sh != nil { + sh.Documentation = docstring(info.Base) + } + + for ref, doc := range info.Refs { + if doc == "" { + continue + } + + parts := strings.Split(ref, "$") + if sh := d.API.Shapes[parts[0]]; sh != nil { + if m := sh.MemberRefs[parts[1]]; m != nil { + m.Documentation = docstring(doc) + } + } + } + } +} + +var reNewline = regexp.MustCompile(`\r?\n`) +var reMultiSpace = regexp.MustCompile(`\s+`) +var reComments = regexp.MustCompile(``) +var reFullname = regexp.MustCompile(`\s*.+?<\/fullname?>\s*`) +var reExamples = regexp.MustCompile(`.+?<\/examples?>`) +var rePara = regexp.MustCompile(`<(?:p|h\d)>(.+?)`) +var reLink = regexp.MustCompile(`(.+?)`) +var reTag = regexp.MustCompile(`<.+?>`) +var reEndNL = regexp.MustCompile(`\n+$`) + +// docstring rewrites a string to insert godocs formatting. +func docstring(doc string) string { + doc = reNewline.ReplaceAllString(doc, "") + doc = reMultiSpace.ReplaceAllString(doc, " ") + doc = reComments.ReplaceAllString(doc, "") + doc = reFullname.ReplaceAllString(doc, "") + doc = reExamples.ReplaceAllString(doc, "") + doc = rePara.ReplaceAllString(doc, "$1\n\n") + doc = reLink.ReplaceAllString(doc, "$2 ($1)") + doc = reTag.ReplaceAllString(doc, "$1") + doc = reEndNL.ReplaceAllString(doc, "") + doc = strings.TrimSpace(doc) + if doc == "" { + return "\n" + } + + doc = html.UnescapeString(doc) + doc = wrap(doc, 72) + + return commentify(doc) +} + +// commentify converts a string to a Go comment +func commentify(doc string) string { + lines := strings.Split(doc, "\n") + out := []string{} + for i, line := range lines { + if i > 0 && line == "" && lines[i-1] == "" { + continue + } + out = append(out, "// "+line) + } + + return strings.Join(out, "\n") + "\n" +} + +// wrap returns a rewritten version of text to have line breaks +// at approximately length characters. Line breaks will only be +// inserted into whitespace. +func wrap(text string, length int) string { + var buf bytes.Buffer + var last rune + var lastNL bool + var col int + + for _, c := range text { + switch c { + case '\r': // ignore this + continue // and also don't track `last` + case '\n': // ignore this too, but reset col + if col >= length || last == '\n' { + buf.WriteString("\n\n") + } + col = 0 + case ' ', '\t': // opportunity to split + if col >= length { + buf.WriteByte('\n') + col = 0 + } else { + if !lastNL { + buf.WriteRune(c) + } + col++ // count column + } + default: + buf.WriteRune(c) + col++ + } + lastNL = c == '\n' + last = c + } + return buf.String() +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/model/api/exportable_name.go b/vendor/github.com/aws/aws-sdk-go/private/model/api/exportable_name.go new file mode 100644 index 000000000..efb1356d6 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/model/api/exportable_name.go @@ -0,0 +1,12 @@ +package api + +import "strings" + +// ExportableName a name which is exportable as a value or name in Go code +func (a *API) ExportableName(name string) string { + if name == "" { + return name + } + + return strings.ToUpper(name[0:1]) + name[1:] +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/model/api/load.go b/vendor/github.com/aws/aws-sdk-go/private/model/api/load.go new file mode 100644 index 000000000..c740495ed --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/model/api/load.go @@ -0,0 +1,71 @@ +package api + +import ( + "encoding/json" + "fmt" + "os" + "path/filepath" +) + +// Load takes a set of files for each filetype and returns an API pointer. +// The API will be initialized once all files have been loaded and parsed. +// +// Will panic if any failure opening the definition JSON files, or there +// are unrecognized exported names. +func Load(api, docs, paginators, waiters string) *API { + a := API{} + a.Attach(api) + a.Attach(docs) + a.Attach(paginators) + a.Attach(waiters) + a.Setup() + return &a +} + +// Attach opens a file by name, and unmarshal its JSON data. +// Will proceed to setup the API if not already done so. +func (a *API) Attach(filename string) { + a.path = filepath.Dir(filename) + f, err := os.Open(filename) + defer f.Close() + if err != nil { + panic(err) + } + if err := json.NewDecoder(f).Decode(a); err != nil { + panic(fmt.Errorf("failed to decode %s, err: %v", filename, err)) + } +} + +// AttachString will unmarshal a raw JSON string, and setup the +// API if not already done so. +func (a *API) AttachString(str string) { + json.Unmarshal([]byte(str), a) + + if !a.initialized { + a.Setup() + } +} + +// Setup initializes the API. +func (a *API) Setup() { + a.writeShapeNames() + a.resolveReferences() + a.fixStutterNames() + a.renameExportable() + if !a.NoRenameToplevelShapes { + a.renameToplevelShapes() + } + a.updateTopLevelShapeReferences() + a.createInputOutputShapes() + a.customizationPasses() + + if !a.NoRemoveUnusedShapes { + a.removeUnusedShapes() + } + + if !a.NoValidataShapeMethods { + a.addShapeValidations() + } + + a.initialized = true +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/model/api/load_test.go b/vendor/github.com/aws/aws-sdk-go/private/model/api/load_test.go new file mode 100644 index 000000000..77677261f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/model/api/load_test.go @@ -0,0 +1,32 @@ +// +build 1.6 + +package api + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestResolvedReferences(t *testing.T) { + json := `{ + "operations": { + "OperationName": { + "input": { "shape": "TestName" } + } + }, + "shapes": { + "TestName": { + "type": "structure", + "members": { + "memberName1": { "shape": "OtherTest" }, + "memberName2": { "shape": "OtherTest" } + } + }, + "OtherTest": { "type": "string" } + } + }` + a := API{} + a.AttachString(json) + assert.Equal(t, len(a.Shapes["OtherTest"].refs), 2) +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/model/api/operation.go b/vendor/github.com/aws/aws-sdk-go/private/model/api/operation.go new file mode 100644 index 000000000..8b9e691f4 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/model/api/operation.go @@ -0,0 +1,354 @@ +package api + +import ( + "bytes" + "fmt" + "regexp" + "sort" + "strings" + "text/template" +) + +// An Operation defines a specific API Operation. +type Operation struct { + API *API `json:"-"` + ExportedName string + Name string + Documentation string + HTTP HTTPInfo + InputRef ShapeRef `json:"input"` + OutputRef ShapeRef `json:"output"` + Paginator *Paginator + Deprecated bool `json:"deprecated"` + AuthType string `json:"authtype"` +} + +// A HTTPInfo defines the method of HTTP request for the Operation. +type HTTPInfo struct { + Method string + RequestURI string + ResponseCode uint +} + +// HasInput returns if the Operation accepts an input paramater +func (o *Operation) HasInput() bool { + return o.InputRef.ShapeName != "" +} + +// HasOutput returns if the Operation accepts an output parameter +func (o *Operation) HasOutput() bool { + return o.OutputRef.ShapeName != "" +} + +// tplOperation defines a template for rendering an API Operation +var tplOperation = template.Must(template.New("operation").Parse(` +const op{{ .ExportedName }} = "{{ .Name }}" + +// {{ .ExportedName }}Request generates a "aws/request.Request" representing the +// client's request for the {{ .ExportedName }} operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the {{ .ExportedName }} method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the {{ .ExportedName }}Request method. +// req, resp := client.{{ .ExportedName }}Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *{{ .API.StructName }}) {{ .ExportedName }}Request(` + + `input {{ .InputRef.GoType }}) (req *request.Request, output {{ .OutputRef.GoType }}) { + {{ if (or .Deprecated (or .InputRef.Deprecated .OutputRef.Deprecated)) }}if c.Client.Config.Logger != nil { + c.Client.Config.Logger.Log("This operation, {{ .ExportedName }}, has been deprecated") + } + op := &request.Operation{ {{ else }} op := &request.Operation{ {{ end }} + Name: op{{ .ExportedName }}, + {{ if ne .HTTP.Method "" }}HTTPMethod: "{{ .HTTP.Method }}", + {{ end }}{{ if ne .HTTP.RequestURI "" }}HTTPPath: "{{ .HTTP.RequestURI }}", + {{ end }}{{ if .Paginator }}Paginator: &request.Paginator{ + InputTokens: {{ .Paginator.InputTokensString }}, + OutputTokens: {{ .Paginator.OutputTokensString }}, + LimitToken: "{{ .Paginator.LimitKey }}", + TruncationToken: "{{ .Paginator.MoreResults }}", + }, + {{ end }} + } + + if input == nil { + input = &{{ .InputRef.GoTypeElem }}{} + } + + req = c.newRequest(op, input, output){{ if eq .OutputRef.Shape.Placeholder true }} + req.Handlers.Unmarshal.Remove({{ .API.ProtocolPackage }}.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler){{ end }} + {{ if eq .AuthType "none" }}req.Config.Credentials = credentials.AnonymousCredentials + output = &{{ .OutputRef.GoTypeElem }}{} {{ else }} output = &{{ .OutputRef.GoTypeElem }}{} {{ end }} + req.Data = output + return +} + +{{ .Documentation }}func (c *{{ .API.StructName }}) {{ .ExportedName }}(` + + `input {{ .InputRef.GoType }}) ({{ .OutputRef.GoType }}, error) { + req, out := c.{{ .ExportedName }}Request(input) + err := req.Send() + return out, err +} + +{{ if .Paginator }} +// {{ .ExportedName }}Pages iterates over the pages of a {{ .ExportedName }} operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See {{ .ExportedName }} method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a {{ .ExportedName }} operation. +// pageNum := 0 +// err := client.{{ .ExportedName }}Pages(params, +// func(page {{ .OutputRef.GoType }}, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *{{ .API.StructName }}) {{ .ExportedName }}Pages(` + + `input {{ .InputRef.GoType }}, fn func(p {{ .OutputRef.GoType }}, lastPage bool) (shouldContinue bool)) error { + page, _ := c.{{ .ExportedName }}Request(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.({{ .OutputRef.GoType }}), lastPage) + }) +} +{{ end }} +`)) + +// GoCode returns a string of rendered GoCode for this Operation +func (o *Operation) GoCode() string { + var buf bytes.Buffer + err := tplOperation.Execute(&buf, o) + if err != nil { + panic(err) + } + + return strings.TrimSpace(buf.String()) +} + +// tplInfSig defines the template for rendering an Operation's signature within an Interface definition. +var tplInfSig = template.Must(template.New("opsig").Parse(` +{{ .ExportedName }}Request({{ .InputRef.GoTypeWithPkgName }}) (*request.Request, {{ .OutputRef.GoTypeWithPkgName }}) + +{{ .ExportedName }}({{ .InputRef.GoTypeWithPkgName }}) ({{ .OutputRef.GoTypeWithPkgName }}, error) +{{ if .Paginator }} +{{ .ExportedName }}Pages({{ .InputRef.GoTypeWithPkgName }}, func({{ .OutputRef.GoTypeWithPkgName }}, bool) bool) error{{ end }} +`)) + +// InterfaceSignature returns a string representing the Operation's interface{} +// functional signature. +func (o *Operation) InterfaceSignature() string { + var buf bytes.Buffer + err := tplInfSig.Execute(&buf, o) + if err != nil { + panic(err) + } + + return strings.TrimSpace(buf.String()) +} + +// tplExample defines the template for rendering an Operation example +var tplExample = template.Must(template.New("operationExample").Parse(` +func Example{{ .API.StructName }}_{{ .ExportedName }}() { + svc := {{ .API.PackageName }}.New(session.New()) + + {{ .ExampleInput }} + resp, err := svc.{{ .ExportedName }}(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} +`)) + +// Example returns a string of the rendered Go code for the Operation +func (o *Operation) Example() string { + var buf bytes.Buffer + err := tplExample.Execute(&buf, o) + if err != nil { + panic(err) + } + + return strings.TrimSpace(buf.String()) +} + +// ExampleInput return a string of the rendered Go code for an example's input parameters +func (o *Operation) ExampleInput() string { + if len(o.InputRef.Shape.MemberRefs) == 0 { + return fmt.Sprintf("var params *%s.%s", + o.API.PackageName(), o.InputRef.GoTypeElem()) + } + e := example{o, map[string]int{}} + return "params := " + e.traverseAny(o.InputRef.Shape, false, false) +} + +// A example provides +type example struct { + *Operation + visited map[string]int +} + +// traverseAny returns rendered Go code for the shape. +func (e *example) traverseAny(s *Shape, required, payload bool) string { + str := "" + e.visited[s.ShapeName]++ + + switch s.Type { + case "structure": + str = e.traverseStruct(s, required, payload) + case "list": + str = e.traverseList(s, required, payload) + case "map": + str = e.traverseMap(s, required, payload) + default: + str = e.traverseScalar(s, required, payload) + } + + e.visited[s.ShapeName]-- + + return str +} + +var reType = regexp.MustCompile(`\b([A-Z])`) + +// traverseStruct returns rendered Go code for a structure type shape. +func (e *example) traverseStruct(s *Shape, required, payload bool) string { + var buf bytes.Buffer + buf.WriteString("&" + s.API.PackageName() + "." + s.GoTypeElem() + "{") + if required { + buf.WriteString(" // Required") + } + buf.WriteString("\n") + + req := make([]string, len(s.Required)) + copy(req, s.Required) + sort.Strings(req) + + if e.visited[s.ShapeName] < 2 { + for _, n := range req { + m := s.MemberRefs[n].Shape + p := n == s.Payload && (s.MemberRefs[n].Streaming || m.Streaming) + buf.WriteString(n + ": " + e.traverseAny(m, true, p) + ",") + if m.Type != "list" && m.Type != "structure" && m.Type != "map" { + buf.WriteString(" // Required") + } + buf.WriteString("\n") + } + + for _, n := range s.MemberNames() { + if s.IsRequired(n) { + continue + } + m := s.MemberRefs[n].Shape + p := n == s.Payload && (s.MemberRefs[n].Streaming || m.Streaming) + buf.WriteString(n + ": " + e.traverseAny(m, false, p) + ",\n") + } + } else { + buf.WriteString("// Recursive values...\n") + } + + buf.WriteString("}") + return buf.String() +} + +// traverseMap returns rendered Go code for a map type shape. +func (e *example) traverseMap(s *Shape, required, payload bool) string { + var buf bytes.Buffer + t := reType.ReplaceAllString(s.GoTypeElem(), s.API.PackageName()+".$1") + buf.WriteString(t + "{") + if required { + buf.WriteString(" // Required") + } + buf.WriteString("\n") + + if e.visited[s.ShapeName] < 2 { + m := s.ValueRef.Shape + buf.WriteString("\"Key\": " + e.traverseAny(m, true, false) + ",") + if m.Type != "list" && m.Type != "structure" && m.Type != "map" { + buf.WriteString(" // Required") + } + buf.WriteString("\n// More values...\n") + } else { + buf.WriteString("// Recursive values...\n") + } + buf.WriteString("}") + + return buf.String() +} + +// traverseList returns rendered Go code for a list type shape. +func (e *example) traverseList(s *Shape, required, payload bool) string { + var buf bytes.Buffer + t := reType.ReplaceAllString(s.GoTypeElem(), s.API.PackageName()+".$1") + buf.WriteString(t + "{") + if required { + buf.WriteString(" // Required") + } + buf.WriteString("\n") + + if e.visited[s.ShapeName] < 2 { + m := s.MemberRef.Shape + buf.WriteString(e.traverseAny(m, true, false) + ",") + if m.Type != "list" && m.Type != "structure" && m.Type != "map" { + buf.WriteString(" // Required") + } + buf.WriteString("\n// More values...\n") + } else { + buf.WriteString("// Recursive values...\n") + } + buf.WriteString("}") + + return buf.String() +} + +// traverseScalar returns an AWS Type string representation initialized to a value. +// Will panic if s is an unsupported shape type. +func (e *example) traverseScalar(s *Shape, required, payload bool) string { + str := "" + switch s.Type { + case "integer", "long": + str = `aws.Int64(1)` + case "float", "double": + str = `aws.Float64(1.0)` + case "string", "character": + str = `aws.String("` + s.ShapeName + `")` + case "blob": + if payload { + str = `bytes.NewReader([]byte("PAYLOAD"))` + } else { + str = `[]byte("PAYLOAD")` + } + case "boolean": + str = `aws.Bool(true)` + case "timestamp": + str = `aws.Time(time.Now())` + default: + panic("unsupported shape " + s.Type) + } + + return str +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/model/api/pagination.go b/vendor/github.com/aws/aws-sdk-go/private/model/api/pagination.go new file mode 100644 index 000000000..c07820dd7 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/model/api/pagination.go @@ -0,0 +1,89 @@ +package api + +import ( + "encoding/json" + "fmt" + "os" +) + +// Paginator keeps track of pagination configuration for an API operation. +type Paginator struct { + InputTokens interface{} `json:"input_token"` + OutputTokens interface{} `json:"output_token"` + LimitKey string `json:"limit_key"` + MoreResults string `json:"more_results"` +} + +// InputTokensString returns output tokens formatted as a list +func (p *Paginator) InputTokensString() string { + str := p.InputTokens.([]string) + return fmt.Sprintf("%#v", str) +} + +// OutputTokensString returns output tokens formatted as a list +func (p *Paginator) OutputTokensString() string { + str := p.OutputTokens.([]string) + return fmt.Sprintf("%#v", str) +} + +// used for unmarshaling from the paginators JSON file +type paginationDefinitions struct { + *API + Pagination map[string]Paginator +} + +// AttachPaginators attaches pagination configuration from filename to the API. +func (a *API) AttachPaginators(filename string) { + p := paginationDefinitions{API: a} + + f, err := os.Open(filename) + defer f.Close() + if err != nil { + panic(err) + } + err = json.NewDecoder(f).Decode(&p) + if err != nil { + panic(err) + } + + p.setup() +} + +// setup runs post-processing on the paginator configuration. +func (p *paginationDefinitions) setup() { + for n, e := range p.Pagination { + if e.InputTokens == nil || e.OutputTokens == nil { + continue + } + paginator := e + + switch t := paginator.InputTokens.(type) { + case string: + paginator.InputTokens = []string{t} + case []interface{}: + toks := []string{} + for _, e := range t { + s := e.(string) + toks = append(toks, s) + } + paginator.InputTokens = toks + } + switch t := paginator.OutputTokens.(type) { + case string: + paginator.OutputTokens = []string{t} + case []interface{}: + toks := []string{} + for _, e := range t { + s := e.(string) + toks = append(toks, s) + } + paginator.OutputTokens = toks + } + + if o, ok := p.Operations[n]; ok { + o.Paginator = &paginator + } else { + panic("unknown operation for paginator " + n) + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/model/api/param_filler.go b/vendor/github.com/aws/aws-sdk-go/private/model/api/param_filler.go new file mode 100644 index 000000000..674d1e478 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/model/api/param_filler.go @@ -0,0 +1,131 @@ +package api + +import ( + "fmt" + "reflect" + "strings" + + "github.com/aws/aws-sdk-go/private/util" +) + +// A paramFiller provides string formatting for a shape and its types. +type paramFiller struct { + prefixPackageName bool +} + +// typeName returns the type name of a shape. +func (f paramFiller) typeName(shape *Shape) string { + if f.prefixPackageName && shape.Type == "structure" { + return "*" + shape.API.PackageName() + "." + shape.GoTypeElem() + } + return shape.GoType() +} + +// ParamsStructFromJSON returns a JSON string representation of a structure. +func ParamsStructFromJSON(value interface{}, shape *Shape, prefixPackageName bool) string { + f := paramFiller{prefixPackageName: prefixPackageName} + return util.GoFmt(f.paramsStructAny(value, shape)) +} + +// paramsStructAny returns the string representation of any value. +func (f paramFiller) paramsStructAny(value interface{}, shape *Shape) string { + if value == nil { + return "" + } + + switch shape.Type { + case "structure": + if value != nil { + vmap := value.(map[string]interface{}) + return f.paramsStructStruct(vmap, shape) + } + case "list": + vlist := value.([]interface{}) + return f.paramsStructList(vlist, shape) + case "map": + vmap := value.(map[string]interface{}) + return f.paramsStructMap(vmap, shape) + case "string", "character": + v := reflect.Indirect(reflect.ValueOf(value)) + if v.IsValid() { + return fmt.Sprintf("aws.String(%#v)", v.Interface()) + } + case "blob": + v := reflect.Indirect(reflect.ValueOf(value)) + if v.IsValid() && shape.Streaming { + return fmt.Sprintf("aws.ReadSeekCloser(bytes.NewBufferString(%#v))", v.Interface()) + } else if v.IsValid() { + return fmt.Sprintf("[]byte(%#v)", v.Interface()) + } + case "boolean": + v := reflect.Indirect(reflect.ValueOf(value)) + if v.IsValid() { + return fmt.Sprintf("aws.Bool(%#v)", v.Interface()) + } + case "integer", "long": + v := reflect.Indirect(reflect.ValueOf(value)) + if v.IsValid() { + return fmt.Sprintf("aws.Int64(%v)", v.Interface()) + } + case "float", "double": + v := reflect.Indirect(reflect.ValueOf(value)) + if v.IsValid() { + return fmt.Sprintf("aws.Float64(%v)", v.Interface()) + } + case "timestamp": + v := reflect.Indirect(reflect.ValueOf(value)) + if v.IsValid() { + return fmt.Sprintf("aws.Time(time.Unix(%d, 0))", int(v.Float())) + } + default: + panic("Unhandled type " + shape.Type) + } + return "" +} + +// paramsStructStruct returns the string representation of a structure +func (f paramFiller) paramsStructStruct(value map[string]interface{}, shape *Shape) string { + out := "&" + f.typeName(shape)[1:] + "{\n" + for _, n := range shape.MemberNames() { + ref := shape.MemberRefs[n] + name := findParamMember(value, n) + + if val := f.paramsStructAny(value[name], ref.Shape); val != "" { + out += fmt.Sprintf("%s: %s,\n", n, val) + } + } + out += "}" + return out +} + +// paramsStructMap returns the string representation of a map of values +func (f paramFiller) paramsStructMap(value map[string]interface{}, shape *Shape) string { + out := f.typeName(shape) + "{\n" + keys := util.SortedKeys(value) + for _, k := range keys { + v := value[k] + out += fmt.Sprintf("%q: %s,\n", k, f.paramsStructAny(v, shape.ValueRef.Shape)) + } + out += "}" + return out +} + +// paramsStructList returns the string representation of slice of values +func (f paramFiller) paramsStructList(value []interface{}, shape *Shape) string { + out := f.typeName(shape) + "{\n" + for _, v := range value { + out += fmt.Sprintf("%s,\n", f.paramsStructAny(v, shape.MemberRef.Shape)) + } + out += "}" + return out +} + +// findParamMember searches a map for a key ignoring case. Returns the map key if found. +func findParamMember(value map[string]interface{}, key string) string { + for actualKey := range value { + if strings.ToLower(key) == strings.ToLower(actualKey) { + return actualKey + } + } + return "" +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/model/api/passes.go b/vendor/github.com/aws/aws-sdk-go/private/model/api/passes.go new file mode 100644 index 000000000..3492d58d1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/model/api/passes.go @@ -0,0 +1,255 @@ +package api + +import ( + "fmt" + "regexp" + "strings" +) + +// updateTopLevelShapeReferences moves resultWrapper, locationName, and +// xmlNamespace traits from toplevel shape references to the toplevel +// shapes for easier code generation +func (a *API) updateTopLevelShapeReferences() { + for _, o := range a.Operations { + // these are for REST-XML services + if o.InputRef.LocationName != "" { + o.InputRef.Shape.LocationName = o.InputRef.LocationName + } + if o.InputRef.Location != "" { + o.InputRef.Shape.Location = o.InputRef.Location + } + if o.InputRef.Payload != "" { + o.InputRef.Shape.Payload = o.InputRef.Payload + } + if o.InputRef.XMLNamespace.Prefix != "" { + o.InputRef.Shape.XMLNamespace.Prefix = o.InputRef.XMLNamespace.Prefix + } + if o.InputRef.XMLNamespace.URI != "" { + o.InputRef.Shape.XMLNamespace.URI = o.InputRef.XMLNamespace.URI + } + } + +} + +// writeShapeNames sets each shape's API and shape name values. Binding the +// shape to its parent API. +func (a *API) writeShapeNames() { + for n, s := range a.Shapes { + s.API = a + s.ShapeName = n + } +} + +func (a *API) resolveReferences() { + resolver := referenceResolver{API: a, visited: map[*ShapeRef]bool{}} + + for _, s := range a.Shapes { + resolver.resolveShape(s) + } + + for _, o := range a.Operations { + o.API = a // resolve parent reference + + resolver.resolveReference(&o.InputRef) + resolver.resolveReference(&o.OutputRef) + } +} + +// A referenceResolver provides a way to resolve shape references to +// shape definitions. +type referenceResolver struct { + *API + visited map[*ShapeRef]bool +} + +// resolveReference updates a shape reference to reference the API and +// its shape definition. All other nested references are also resolved. +func (r *referenceResolver) resolveReference(ref *ShapeRef) { + if ref.ShapeName == "" { + return + } + + if shape, ok := r.API.Shapes[ref.ShapeName]; ok { + ref.API = r.API // resolve reference back to API + ref.Shape = shape // resolve shape reference + + if r.visited[ref] { + return + } + r.visited[ref] = true + + shape.refs = append(shape.refs, ref) // register the ref + + // resolve shape's references, if it has any + r.resolveShape(shape) + } +} + +// resolveShape resolves a shape's Member Key Value, and nested member +// shape references. +func (r *referenceResolver) resolveShape(shape *Shape) { + r.resolveReference(&shape.MemberRef) + r.resolveReference(&shape.KeyRef) + r.resolveReference(&shape.ValueRef) + for _, m := range shape.MemberRefs { + r.resolveReference(m) + } +} + +// renameToplevelShapes renames all top level shapes of an API to their +// exportable variant. The shapes are also updated to include notations +// if they are Input or Outputs. +func (a *API) renameToplevelShapes() { + for _, v := range a.Operations { + if v.HasInput() { + name := v.ExportedName + "Input" + switch n := len(v.InputRef.Shape.refs); { + case n == 1 && a.Shapes[name] == nil: + v.InputRef.Shape.Rename(name) + } + } + if v.HasOutput() { + name := v.ExportedName + "Output" + switch n := len(v.OutputRef.Shape.refs); { + case n == 1 && a.Shapes[name] == nil: + v.OutputRef.Shape.Rename(name) + } + } + v.InputRef.Payload = a.ExportableName(v.InputRef.Payload) + v.OutputRef.Payload = a.ExportableName(v.OutputRef.Payload) + } +} + +// fixStutterNames fixes all name struttering based on Go naming conventions. +// "Stuttering" is when the prefix of a structure or function matches the +// package name (case insensitive). +func (a *API) fixStutterNames() { + str, end := a.StructName(), "" + if len(str) > 1 { + l := len(str) - 1 + str, end = str[0:l], str[l:] + } + re := regexp.MustCompile(fmt.Sprintf(`\A(?i:%s)%s`, str, end)) + + for name, op := range a.Operations { + newName := re.ReplaceAllString(name, "") + if newName != name { + delete(a.Operations, name) + a.Operations[newName] = op + } + op.ExportedName = newName + } + + for k, s := range a.Shapes { + newName := re.ReplaceAllString(k, "") + if newName != s.ShapeName { + s.Rename(newName) + } + } +} + +// renameExportable renames all operation names to be exportable names. +// All nested Shape names are also updated to the exportable variant. +func (a *API) renameExportable() { + for name, op := range a.Operations { + newName := a.ExportableName(name) + if newName != name { + delete(a.Operations, name) + a.Operations[newName] = op + } + op.ExportedName = newName + } + + for k, s := range a.Shapes { + // FIXME SNS has lower and uppercased shape names with the same name, + // except the lowercased variant is used exclusively for string and + // other primitive types. Renaming both would cause a collision. + // We work around this by only renaming the structure shapes. + if s.Type == "string" { + continue + } + + for mName, member := range s.MemberRefs { + newName := a.ExportableName(mName) + if newName != mName { + delete(s.MemberRefs, mName) + s.MemberRefs[newName] = member + + // also apply locationName trait so we keep the old one + // but only if there's no locationName trait on ref or shape + if member.LocationName == "" && member.Shape.LocationName == "" { + member.LocationName = mName + } + } + + if newName == "_" { + panic("Shape " + s.ShapeName + " uses reserved member name '_'") + } + } + + newName := a.ExportableName(k) + if newName != s.ShapeName { + s.Rename(newName) + } + + s.Payload = a.ExportableName(s.Payload) + + // fix required trait names + for i, n := range s.Required { + s.Required[i] = a.ExportableName(n) + } + } + + for _, s := range a.Shapes { + // fix enum names + if s.IsEnum() { + s.EnumConsts = make([]string, len(s.Enum)) + for i := range s.Enum { + shape := s.ShapeName + shape = strings.ToUpper(shape[0:1]) + shape[1:] + s.EnumConsts[i] = shape + s.EnumName(i) + } + } + } +} + +// createInputOutputShapes creates toplevel input/output shapes if they +// have not been defined in the API. This normalizes all APIs to always +// have an input and output structure in the signature. +func (a *API) createInputOutputShapes() { + for _, op := range a.Operations { + if !op.HasInput() { + setAsPlacholderShape(&op.InputRef, op.ExportedName+"Input", a) + } + if !op.HasOutput() { + setAsPlacholderShape(&op.OutputRef, op.ExportedName+"Output", a) + } + } +} + +func setAsPlacholderShape(tgtShapeRef *ShapeRef, name string, a *API) { + shape := a.makeIOShape(name) + shape.Placeholder = true + *tgtShapeRef = ShapeRef{API: a, ShapeName: shape.ShapeName, Shape: shape} + shape.refs = append(shape.refs, tgtShapeRef) +} + +// makeIOShape returns a pointer to a new Shape initialized by the name provided. +func (a *API) makeIOShape(name string) *Shape { + shape := &Shape{ + API: a, ShapeName: name, Type: "structure", + MemberRefs: map[string]*ShapeRef{}, + } + a.Shapes[name] = shape + return shape +} + +// removeUnusedShapes removes shapes from the API which are not referenced by any +// other shape in the API. +func (a *API) removeUnusedShapes() { + for n, s := range a.Shapes { + if len(s.refs) == 0 { + delete(a.Shapes, n) + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/model/api/shape.go b/vendor/github.com/aws/aws-sdk-go/private/model/api/shape.go new file mode 100644 index 000000000..7a9d850dd --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/model/api/shape.go @@ -0,0 +1,505 @@ +package api + +import ( + "bytes" + "fmt" + "path" + "regexp" + "sort" + "strings" + "text/template" +) + +// A ShapeRef defines the usage of a shape within the API. +type ShapeRef struct { + API *API `json:"-"` + Shape *Shape `json:"-"` + Documentation string + ShapeName string `json:"shape"` + Location string + LocationName string + QueryName string + Flattened bool + Streaming bool + XMLAttribute bool + XMLNamespace XMLInfo + Payload string + IdempotencyToken bool `json:"idempotencyToken"` + Deprecated bool `json:"deprecated"` +} + +// A XMLInfo defines URL and prefix for Shapes when rendered as XML +type XMLInfo struct { + Prefix string + URI string +} + +// A Shape defines the definition of a shape type +type Shape struct { + API *API `json:"-"` + ShapeName string + Documentation string + MemberRefs map[string]*ShapeRef `json:"members"` + MemberRef ShapeRef `json:"member"` + KeyRef ShapeRef `json:"key"` + ValueRef ShapeRef `json:"value"` + Required []string + Payload string + Type string + Exception bool + Enum []string + EnumConsts []string + Flattened bool + Streaming bool + Location string + LocationName string + IdempotencyToken bool `json:"idempotencyToken"` + XMLNamespace XMLInfo + Min float64 // optional Minimum length (string, list) or value (number) + Max float64 // optional Maximum length (string, list) or value (number) + + refs []*ShapeRef // References to this shape + resolvePkg string // use this package in the goType() if present + + // Defines if the shape is a placeholder and should not be used directly + Placeholder bool + + Deprecated bool `json:"deprecated"` + + Validations ShapeValidations +} + +// GoTags returns the struct tags for a shape. +func (s *Shape) GoTags(root, required bool) string { + ref := &ShapeRef{ShapeName: s.ShapeName, API: s.API, Shape: s} + return ref.GoTags(root, required) +} + +// Rename changes the name of the Shape to newName. Also updates +// the associated API's reference to use newName. +func (s *Shape) Rename(newName string) { + for _, r := range s.refs { + r.ShapeName = newName + } + + delete(s.API.Shapes, s.ShapeName) + s.API.Shapes[newName] = s + s.ShapeName = newName +} + +// MemberNames returns a slice of struct member names. +func (s *Shape) MemberNames() []string { + i, names := 0, make([]string, len(s.MemberRefs)) + for n := range s.MemberRefs { + names[i] = n + i++ + } + sort.Strings(names) + return names +} + +// GoTypeWithPkgName returns a shape's type as a string with the package name in +// . format. Package naming only applies to structures. +func (s *Shape) GoTypeWithPkgName() string { + return goType(s, true) +} + +// GoStructType returns the type of a struct field based on the API +// model definition. +func (s *Shape) GoStructType(name string, ref *ShapeRef) string { + if (ref.Streaming || ref.Shape.Streaming) && s.Payload == name { + rtype := "io.ReadSeeker" + if len(s.refs) > 1 { + rtype = "aws.ReaderSeekCloser" + } else if strings.HasSuffix(s.ShapeName, "Output") { + rtype = "io.ReadCloser" + } + + s.API.imports["io"] = true + return rtype + } + + for _, v := range s.Validations { + // TODO move this to shape validation resolution + if (v.Ref.Shape.Type == "map" || v.Ref.Shape.Type == "list") && v.Type == ShapeValidationNested { + s.API.imports["fmt"] = true + } + } + + return ref.GoType() +} + +// GoType returns a shape's Go type +func (s *Shape) GoType() string { + return goType(s, false) +} + +// GoType returns a shape ref's Go type. +func (ref *ShapeRef) GoType() string { + if ref.Shape == nil { + panic(fmt.Errorf("missing shape definition on reference for %#v", ref)) + } + + return ref.Shape.GoType() +} + +// GoTypeWithPkgName returns a shape's type as a string with the package name in +// . format. Package naming only applies to structures. +func (ref *ShapeRef) GoTypeWithPkgName() string { + if ref.Shape == nil { + panic(fmt.Errorf("missing shape definition on reference for %#v", ref)) + } + + return ref.Shape.GoTypeWithPkgName() +} + +// Returns a string version of the Shape's type. +// If withPkgName is true, the package name will be added as a prefix +func goType(s *Shape, withPkgName bool) string { + switch s.Type { + case "structure": + if withPkgName || s.resolvePkg != "" { + pkg := s.resolvePkg + if pkg != "" { + s.API.imports[pkg] = true + pkg = path.Base(pkg) + } else { + pkg = s.API.PackageName() + } + return fmt.Sprintf("*%s.%s", pkg, s.ShapeName) + } + return "*" + s.ShapeName + case "map": + return "map[string]" + s.ValueRef.GoType() + case "list": + return "[]" + s.MemberRef.GoType() + case "boolean": + return "*bool" + case "string", "character": + return "*string" + case "blob": + return "[]byte" + case "integer", "long": + return "*int64" + case "float", "double": + return "*float64" + case "timestamp": + s.API.imports["time"] = true + return "*time.Time" + default: + panic("Unsupported shape type: " + s.Type) + } +} + +// GoTypeElem returns the Go type for the Shape. If the shape type is a pointer just +// the type will be returned minus the pointer *. +func (s *Shape) GoTypeElem() string { + t := s.GoType() + if strings.HasPrefix(t, "*") { + return t[1:] + } + return t +} + +// GoTypeElem returns the Go type for the Shape. If the shape type is a pointer just +// the type will be returned minus the pointer *. +func (ref *ShapeRef) GoTypeElem() string { + if ref.Shape == nil { + panic(fmt.Errorf("missing shape definition on reference for %#v", ref)) + } + + return ref.Shape.GoTypeElem() +} + +// ShapeTag is a struct tag that will be applied to a shape's generated code +type ShapeTag struct { + Key, Val string +} + +// String returns the string representation of the shape tag +func (s ShapeTag) String() string { + return fmt.Sprintf(`%s:"%s"`, s.Key, s.Val) +} + +// ShapeTags is a collection of shape tags and provides serialization of the +// tags in an ordered list. +type ShapeTags []ShapeTag + +// Join returns an ordered serialization of the shape tags with the provided +// seperator. +func (s ShapeTags) Join(sep string) string { + o := &bytes.Buffer{} + for i, t := range s { + o.WriteString(t.String()) + if i < len(s)-1 { + o.WriteString(sep) + } + } + + return o.String() +} + +// String is an alias for Join with the empty space seperator. +func (s ShapeTags) String() string { + return s.Join(" ") +} + +// GoTags returns the rendered tags string for the ShapeRef +func (ref *ShapeRef) GoTags(toplevel bool, isRequired bool) string { + tags := ShapeTags{} + + if ref.Location != "" { + tags = append(tags, ShapeTag{"location", ref.Location}) + } else if ref.Shape.Location != "" { + tags = append(tags, ShapeTag{"location", ref.Shape.Location}) + } + + if ref.LocationName != "" { + tags = append(tags, ShapeTag{"locationName", ref.LocationName}) + } else if ref.Shape.LocationName != "" { + tags = append(tags, ShapeTag{"locationName", ref.Shape.LocationName}) + } + + if ref.QueryName != "" { + tags = append(tags, ShapeTag{"queryName", ref.QueryName}) + } + if ref.Shape.MemberRef.LocationName != "" { + tags = append(tags, ShapeTag{"locationNameList", ref.Shape.MemberRef.LocationName}) + } + if ref.Shape.KeyRef.LocationName != "" { + tags = append(tags, ShapeTag{"locationNameKey", ref.Shape.KeyRef.LocationName}) + } + if ref.Shape.ValueRef.LocationName != "" { + tags = append(tags, ShapeTag{"locationNameValue", ref.Shape.ValueRef.LocationName}) + } + if ref.Shape.Min > 0 { + tags = append(tags, ShapeTag{"min", fmt.Sprintf("%v", ref.Shape.Min)}) + } + + if ref.Deprecated || ref.Shape.Deprecated { + tags = append(tags, ShapeTag{"deprecated", "true"}) + } + // All shapes have a type + tags = append(tags, ShapeTag{"type", ref.Shape.Type}) + + // embed the timestamp type for easier lookups + if ref.Shape.Type == "timestamp" { + t := ShapeTag{Key: "timestampFormat"} + if ref.Location == "header" { + t.Val = "rfc822" + } else { + switch ref.API.Metadata.Protocol { + case "json", "rest-json": + t.Val = "unix" + case "rest-xml", "ec2", "query": + t.Val = "iso8601" + } + } + tags = append(tags, t) + } + + if ref.Shape.Flattened || ref.Flattened { + tags = append(tags, ShapeTag{"flattened", "true"}) + } + if ref.XMLAttribute { + tags = append(tags, ShapeTag{"xmlAttribute", "true"}) + } + if isRequired { + tags = append(tags, ShapeTag{"required", "true"}) + } + if ref.Shape.IsEnum() { + tags = append(tags, ShapeTag{"enum", ref.ShapeName}) + } + + if toplevel { + if ref.Shape.Payload != "" { + tags = append(tags, ShapeTag{"payload", ref.Shape.Payload}) + } + if ref.XMLNamespace.Prefix != "" { + tags = append(tags, ShapeTag{"xmlPrefix", ref.XMLNamespace.Prefix}) + } else if ref.Shape.XMLNamespace.Prefix != "" { + tags = append(tags, ShapeTag{"xmlPrefix", ref.Shape.XMLNamespace.Prefix}) + } + if ref.XMLNamespace.URI != "" { + tags = append(tags, ShapeTag{"xmlURI", ref.XMLNamespace.URI}) + } else if ref.Shape.XMLNamespace.URI != "" { + tags = append(tags, ShapeTag{"xmlURI", ref.Shape.XMLNamespace.URI}) + } + } + + if ref.IdempotencyToken || ref.Shape.IdempotencyToken { + tags = append(tags, ShapeTag{"idempotencyToken", "true"}) + } + + return fmt.Sprintf("`%s`", tags) +} + +// Docstring returns the godocs formated documentation +func (ref *ShapeRef) Docstring() string { + if ref.Documentation != "" { + return strings.Trim(ref.Documentation, "\n ") + } + return ref.Shape.Docstring() +} + +// Docstring returns the godocs formated documentation +func (s *Shape) Docstring() string { + return strings.Trim(s.Documentation, "\n ") +} + +var goCodeStringerTmpl = template.Must(template.New("goCodeStringerTmpl").Parse(` +// String returns the string representation +func (s {{ .ShapeName }}) String() string { + return awsutil.Prettify(s) +} +// GoString returns the string representation +func (s {{ .ShapeName }}) GoString() string { + return s.String() +} +`)) + +// GoCodeStringers renders the Stringers for API input/output shapes +func (s *Shape) GoCodeStringers() string { + w := bytes.Buffer{} + if err := goCodeStringerTmpl.Execute(&w, s); err != nil { + panic(fmt.Sprintln("Unexpected error executing GoCodeStringers template", err)) + } + + return w.String() +} + +var enumStrip = regexp.MustCompile(`[^a-zA-Z0-9_:\./-]`) +var enumDelims = regexp.MustCompile(`[-_:\./]+`) +var enumCamelCase = regexp.MustCompile(`([a-z])([A-Z])`) + +// EnumName returns the Nth enum in the shapes Enum list +func (s *Shape) EnumName(n int) string { + enum := s.Enum[n] + enum = enumStrip.ReplaceAllLiteralString(enum, "") + enum = enumCamelCase.ReplaceAllString(enum, "$1-$2") + parts := enumDelims.Split(enum, -1) + for i, v := range parts { + v = strings.ToLower(v) + parts[i] = "" + if len(v) > 0 { + parts[i] = strings.ToUpper(v[0:1]) + } + if len(v) > 1 { + parts[i] += v[1:] + } + } + enum = strings.Join(parts, "") + enum = strings.ToUpper(enum[0:1]) + enum[1:] + return enum +} + +// NestedShape returns the shape pointer value for the shape which is nested +// under the current shape. If the shape is not nested nil will be returned. +// +// strucutures, the current shape is returned +// map: the value shape of the map is returned +// list: the element shape of the list is returned +func (s *Shape) NestedShape() *Shape { + var nestedShape *Shape + switch s.Type { + case "structure": + nestedShape = s + case "map": + nestedShape = s.ValueRef.Shape + case "list": + nestedShape = s.MemberRef.Shape + } + + return nestedShape +} + +var structShapeTmpl = template.Must(template.New("StructShape").Parse(` +{{ .Docstring }} +type {{ .ShapeName }} struct { + _ struct{} {{ .GoTags true false }} + + {{ $context := . -}} + {{ range $_, $name := $context.MemberNames -}} + {{ $elem := index $context.MemberRefs $name }} + {{ $isRequired := $context.IsRequired $name }} + {{ $elem.Docstring }} + {{ $name }} {{ $context.GoStructType $name $elem }} {{ $elem.GoTags false $isRequired }} + {{ end }} +} +{{ if not .API.NoStringerMethods }} + {{ .GoCodeStringers }} +{{ end }} +{{ if not .API.NoValidataShapeMethods }} + {{ if .Validations -}} + {{ .Validations.GoCode . }} + {{ end }} +{{ end }} +`)) + +var enumShapeTmpl = template.Must(template.New("EnumShape").Parse(` +{{ .Docstring }} +const ( + {{ $context := . -}} + {{ range $index, $elem := .Enum -}} + // @enum {{ $context.ShapeName }} + {{ index $context.EnumConsts $index }} = "{{ $elem }}" + {{ end }} +) +`)) + +// GoCode returns the rendered Go code for the Shape. +func (s *Shape) GoCode() string { + b := &bytes.Buffer{} + + switch { + case s.Type == "structure": + if err := structShapeTmpl.Execute(b, s); err != nil { + panic(fmt.Sprintf("Failed to generate struct shape %s, %v\n", s.ShapeName, err)) + } + case s.IsEnum(): + if err := enumShapeTmpl.Execute(b, s); err != nil { + panic(fmt.Sprintf("Failed to generate enum shape %s, %v\n", s.ShapeName, err)) + } + default: + panic(fmt.Sprintln("Cannot generate toplevel shape for", s.Type)) + } + + return b.String() +} + +// IsEnum returns whether this shape is an enum list +func (s *Shape) IsEnum() bool { + return s.Type == "string" && len(s.Enum) > 0 +} + +// IsRequired returns if member is a required field. +func (s *Shape) IsRequired(member string) bool { + for _, n := range s.Required { + if n == member { + return true + } + } + return false +} + +// IsInternal returns whether the shape was defined in this package +func (s *Shape) IsInternal() bool { + return s.resolvePkg == "" +} + +// removeRef removes a shape reference from the list of references this +// shape is used in. +func (s *Shape) removeRef(ref *ShapeRef) { + r := s.refs + for i := 0; i < len(r); i++ { + if r[i] == ref { + j := i + 1 + copy(r[i:], r[j:]) + for k, n := len(r)-j+i, len(r); k < n; k++ { + r[k] = nil // free up the end of the list + } // for k + s.refs = r[:len(r)-j+i] + break + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/model/api/shape_validation.go b/vendor/github.com/aws/aws-sdk-go/private/model/api/shape_validation.go new file mode 100644 index 000000000..97093ab2a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/model/api/shape_validation.go @@ -0,0 +1,153 @@ +package api + +import ( + "bytes" + "fmt" + "text/template" +) + +// A ShapeValidationType is the type of validation that a shape needs +type ShapeValidationType int + +const ( + // ShapeValidationRequired states the shape must be set + ShapeValidationRequired = iota + + // ShapeValidationMinVal states the shape must have at least a number of + // elements, or for numbers a minimum value + ShapeValidationMinVal + + // ShapeValidationNested states the shape has nested values that need + // to be validated + ShapeValidationNested +) + +// A ShapeValidation contains information about a shape and the type of validation +// that is needed +type ShapeValidation struct { + // Name of the shape to be validated + Name string + // Reference to the shape within the context the shape is referenced + Ref *ShapeRef + // Type of validation needed + Type ShapeValidationType +} + +var validationGoCodeTmpls = template.Must(template.New("validationGoCodeTmpls").Parse(` +{{ define "requiredValue" -}} + if s.{{ .Name }} == nil { + invalidParams.Add(request.NewErrParamRequired("{{ .Name }}")) + } +{{- end }} +{{ define "minLen" -}} + if s.{{ .Name }} != nil && len(s.{{ .Name }}) < {{ .Ref.Shape.Min }} { + invalidParams.Add(request.NewErrParamMinLen("{{ .Name }}", {{ .Ref.Shape.Min }})) + } +{{- end }} +{{ define "minLenString" -}} + if s.{{ .Name }} != nil && len(*s.{{ .Name }}) < {{ .Ref.Shape.Min }} { + invalidParams.Add(request.NewErrParamMinLen("{{ .Name }}", {{ .Ref.Shape.Min }})) + } +{{- end }} +{{ define "minVal" -}} + if s.{{ .Name }} != nil && *s.{{ .Name }} < {{ .Ref.Shape.Min }} { + invalidParams.Add(request.NewErrParamMinValue("{{ .Name }}", {{ .Ref.Shape.Min }})) + } +{{- end }} +{{ define "nestedMapList" -}} + if s.{{ .Name }} != nil { + for i, v := range s.{{ .Name }} { + if v == nil { continue } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "{{ .Name }}", i), err.(request.ErrInvalidParams)) + } + } + } +{{- end }} +{{ define "nestedStruct" -}} + if s.{{ .Name }} != nil { + if err := s.{{ .Name }}.Validate(); err != nil { + invalidParams.AddNested("{{ .Name }}", err.(request.ErrInvalidParams)) + } + } +{{- end }} +`)) + +// GoCode returns the generated Go code for the Shape with its validation type. +func (sv ShapeValidation) GoCode() string { + var err error + + w := &bytes.Buffer{} + switch sv.Type { + case ShapeValidationRequired: + err = validationGoCodeTmpls.ExecuteTemplate(w, "requiredValue", sv) + case ShapeValidationMinVal: + switch sv.Ref.Shape.Type { + case "list", "map", "blob": + err = validationGoCodeTmpls.ExecuteTemplate(w, "minLen", sv) + case "string": + err = validationGoCodeTmpls.ExecuteTemplate(w, "minLenString", sv) + case "integer", "long", "float", "double": + err = validationGoCodeTmpls.ExecuteTemplate(w, "minVal", sv) + default: + panic(fmt.Sprintf("ShapeValidation.GoCode, %s's type %s, no min value handling", + sv.Name, sv.Ref.Shape.Type)) + } + case ShapeValidationNested: + switch sv.Ref.Shape.Type { + case "map", "list": + err = validationGoCodeTmpls.ExecuteTemplate(w, "nestedMapList", sv) + default: + err = validationGoCodeTmpls.ExecuteTemplate(w, "nestedStruct", sv) + } + default: + panic(fmt.Sprintf("ShapeValidation.GoCode, %s's type %d, unknown validation type", + sv.Name, sv.Type)) + } + + if err != nil { + panic(fmt.Sprintf("ShapeValidation.GoCode failed, err: %v", err)) + } + + return w.String() +} + +// A ShapeValidations is a collection of shape validations needed nested within +// a parent shape +type ShapeValidations []ShapeValidation + +var validateShapeTmpl = template.Must(template.New("ValidateShape").Parse(` +// Validate inspects the fields of the type to determine if they are valid. +func (s *{{ .Shape.ShapeName }}) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "{{ .Shape.ShapeName }}"} + {{ range $_, $v := .Validations -}} + {{ $v.GoCode }} + {{ end }} + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} +`)) + +// GoCode generates the Go code needed to perform validations for the +// shape and its nested fields. +func (vs ShapeValidations) GoCode(shape *Shape) string { + buf := &bytes.Buffer{} + validateShapeTmpl.Execute(buf, map[string]interface{}{ + "Shape": shape, + "Validations": vs, + }) + return buf.String() +} + +// Has returns true or false if the ShapeValidations already contains the +// the reference and validation type. +func (vs ShapeValidations) Has(ref *ShapeRef, typ ShapeValidationType) bool { + for _, v := range vs { + if v.Ref == ref && v.Type == typ { + return true + } + } + return false +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/model/api/shapetag_test.go b/vendor/github.com/aws/aws-sdk-go/private/model/api/shapetag_test.go new file mode 100644 index 000000000..d00547be9 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/model/api/shapetag_test.go @@ -0,0 +1,25 @@ +// +build 1.6 + +package api_test + +import ( + "testing" + + "github.com/aws/aws-sdk-go/private/model/api" + "github.com/stretchr/testify/assert" +) + +func TestShapeTagJoin(t *testing.T) { + s := api.ShapeTags{ + {Key: "location", Val: "query"}, + {Key: "locationName", Val: "abc"}, + {Key: "type", Val: "string"}, + } + + expected := `location:"query" locationName:"abc" type:"string"` + + o := s.Join(" ") + o2 := s.String() + assert.Equal(t, expected, o) + assert.Equal(t, expected, o2) +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/model/api/waiters.go b/vendor/github.com/aws/aws-sdk-go/private/model/api/waiters.go new file mode 100644 index 000000000..ab33c92af --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/model/api/waiters.go @@ -0,0 +1,133 @@ +package api + +import ( + "bytes" + "encoding/json" + "fmt" + "os" + "sort" + "text/template" +) + +// A Waiter is an individual waiter definition. +type Waiter struct { + Name string + Delay int + MaxAttempts int + OperationName string `json:"operation"` + Operation *Operation + Acceptors []WaitAcceptor +} + +// A WaitAcceptor is an individual wait acceptor definition. +type WaitAcceptor struct { + Expected interface{} + Matcher string + State string + Argument string +} + +// WaitersGoCode generates and returns Go code for each of the waiters of +// this API. +func (a *API) WaitersGoCode() string { + var buf bytes.Buffer + fmt.Fprintf(&buf, "import (\n\t%q\n)", + "github.com/aws/aws-sdk-go/private/waiter") + + for _, w := range a.Waiters { + buf.WriteString(w.GoCode()) + } + return buf.String() +} + +// used for unmarshaling from the waiter JSON file +type waiterDefinitions struct { + *API + Waiters map[string]Waiter +} + +// AttachWaiters reads a file of waiter definitions, and adds those to the API. +// Will panic if an error occurs. +func (a *API) AttachWaiters(filename string) { + p := waiterDefinitions{API: a} + + f, err := os.Open(filename) + defer f.Close() + if err != nil { + panic(err) + } + err = json.NewDecoder(f).Decode(&p) + if err != nil { + panic(err) + } + + p.setup() +} + +func (p *waiterDefinitions) setup() { + p.API.Waiters = []Waiter{} + i, keys := 0, make([]string, len(p.Waiters)) + for k := range p.Waiters { + keys[i] = k + i++ + } + sort.Strings(keys) + + for _, n := range keys { + e := p.Waiters[n] + n = p.ExportableName(n) + e.Name = n + e.OperationName = p.ExportableName(e.OperationName) + e.Operation = p.API.Operations[e.OperationName] + if e.Operation == nil { + panic("unknown operation " + e.OperationName + " for waiter " + n) + } + p.API.Waiters = append(p.API.Waiters, e) + } +} + +// ExpectedString returns the string that was expected by the WaitAcceptor +func (a *WaitAcceptor) ExpectedString() string { + switch a.Expected.(type) { + case string: + return fmt.Sprintf("%q", a.Expected) + default: + return fmt.Sprintf("%v", a.Expected) + } +} + +var tplWaiter = template.Must(template.New("waiter").Parse(` +func (c *{{ .Operation.API.StructName }}) WaitUntil{{ .Name }}(input {{ .Operation.InputRef.GoType }}) error { + waiterCfg := waiter.Config{ + Operation: "{{ .OperationName }}", + Delay: {{ .Delay }}, + MaxAttempts: {{ .MaxAttempts }}, + Acceptors: []waiter.WaitAcceptor{ + {{ range $_, $a := .Acceptors }}waiter.WaitAcceptor{ + State: "{{ .State }}", + Matcher: "{{ .Matcher }}", + Argument: "{{ .Argument }}", + Expected: {{ .ExpectedString }}, + }, + {{ end }} + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} +`)) + +// GoCode returns the generated Go code for an individual waiter. +func (w *Waiter) GoCode() string { + var buf bytes.Buffer + if err := tplWaiter.Execute(&buf, w); err != nil { + panic(err) + } + + return buf.String() +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/model/cli/api-info/api-info.go b/vendor/github.com/aws/aws-sdk-go/private/model/cli/api-info/api-info.go new file mode 100644 index 000000000..44faa262e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/model/cli/api-info/api-info.go @@ -0,0 +1,27 @@ +package main + +import ( + "fmt" + "os" + "path/filepath" + "sort" + + "github.com/aws/aws-sdk-go/private/model/api" +) + +func main() { + dir, _ := os.Open(filepath.Join("models", "apis")) + names, _ := dir.Readdirnames(0) + for _, name := range names { + m, _ := filepath.Glob(filepath.Join("models", "apis", name, "*", "api-2.json")) + if len(m) == 0 { + continue + } + + sort.Strings(m) + f := m[len(m)-1] + a := api.API{} + a.Attach(f) + fmt.Printf("%s\t%s\n", a.Metadata.ServiceFullName, a.Metadata.APIVersion) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/model/cli/gen-api/main.go b/vendor/github.com/aws/aws-sdk-go/private/model/cli/gen-api/main.go new file mode 100644 index 000000000..5a0043dfe --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/model/cli/gen-api/main.go @@ -0,0 +1,254 @@ +// Command aws-gen-gocli parses a JSON description of an AWS API and generates a +// Go file containing a client for the API. +// +// aws-gen-gocli apis/s3/2006-03-03/api-2.json +package main + +import ( + "flag" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "runtime/debug" + "sort" + "strings" + "sync" + + "github.com/aws/aws-sdk-go/private/model/api" + "github.com/aws/aws-sdk-go/private/util" +) + +type generateInfo struct { + *api.API + PackageDir string +} + +var excludeServices = map[string]struct{}{ + "importexport": {}, +} + +// newGenerateInfo initializes the service API's folder structure for a specific service. +// If the SERVICES environment variable is set, and this service is not apart of the list +// this service will be skipped. +func newGenerateInfo(modelFile, svcPath, svcImportPath string) *generateInfo { + g := &generateInfo{API: &api.API{SvcClientImportPath: svcImportPath}} + g.API.Attach(modelFile) + + if _, ok := excludeServices[g.API.PackageName()]; ok { + return nil + } + + paginatorsFile := strings.Replace(modelFile, "api-2.json", "paginators-1.json", -1) + if _, err := os.Stat(paginatorsFile); err == nil { + g.API.AttachPaginators(paginatorsFile) + } else if !os.IsNotExist(err) { + fmt.Println("api-2.json error:", err) + } + + docsFile := strings.Replace(modelFile, "api-2.json", "docs-2.json", -1) + if _, err := os.Stat(docsFile); err == nil { + g.API.AttachDocs(docsFile) + } else { + fmt.Println("docs-2.json error:", err) + } + + waitersFile := strings.Replace(modelFile, "api-2.json", "waiters-2.json", -1) + if _, err := os.Stat(waitersFile); err == nil { + g.API.AttachWaiters(waitersFile) + } else if !os.IsNotExist(err) { + fmt.Println("waiters-2.json error:", err) + } + + g.API.Setup() + + if svc := os.Getenv("SERVICES"); svc != "" { + svcs := strings.Split(svc, ",") + + included := false + for _, s := range svcs { + if s == g.API.PackageName() { + included = true + break + } + } + if !included { + // skip this non-included service + return nil + } + } + + // ensure the directory exists + pkgDir := filepath.Join(svcPath, g.API.PackageName()) + os.MkdirAll(pkgDir, 0775) + os.MkdirAll(filepath.Join(pkgDir, g.API.InterfacePackageName()), 0775) + + g.PackageDir = pkgDir + + return g +} + +// Generates service api, examples, and interface from api json definition files. +// +// Flags: +// -path alternative service path to write generated files to for each service. +// +// Env: +// SERVICES comma separated list of services to generate. +func main() { + var svcPath, sessionPath, svcImportPath string + flag.StringVar(&svcPath, "path", "service", "directory to generate service clients in") + flag.StringVar(&sessionPath, "sessionPath", filepath.Join("aws", "session"), "generate session service client factories") + flag.StringVar(&svcImportPath, "svc-import-path", "github.com/aws/aws-sdk-go/service", "namespace to generate service client Go code import path under") + flag.Parse() + + files := []string{} + for i := 0; i < flag.NArg(); i++ { + file := flag.Arg(i) + if strings.Contains(file, "*") { + paths, _ := filepath.Glob(file) + files = append(files, paths...) + } else { + files = append(files, file) + } + } + + for svcName := range excludeServices { + if strings.Contains(os.Getenv("SERVICES"), svcName) { + fmt.Printf("Service %s is not supported\n", svcName) + os.Exit(1) + } + } + + sort.Strings(files) + + // Remove old API versions from list + m := map[string]bool{} + for i := range files { + idx := len(files) - 1 - i + parts := strings.Split(files[idx], string(filepath.Separator)) + svc := parts[len(parts)-3] // service name is 2nd-to-last component + + if m[svc] { + files[idx] = "" // wipe this one out if we already saw the service + } + m[svc] = true + } + + wg := sync.WaitGroup{} + for i := range files { + filename := files[i] + if filename == "" { // empty file + continue + } + + genInfo := newGenerateInfo(filename, svcPath, svcImportPath) + if genInfo == nil { + continue + } + if _, ok := excludeServices[genInfo.API.PackageName()]; ok { + // Skip services not yet supported. + continue + } + + wg.Add(1) + go func(g *generateInfo, filename string) { + defer wg.Done() + writeServiceFiles(g, filename) + }(genInfo, filename) + } + + wg.Wait() +} + +func writeServiceFiles(g *generateInfo, filename string) { + defer func() { + if r := recover(); r != nil { + fmt.Fprintf(os.Stderr, "Error generating %s\n%s\n%s\n", + filename, r, debug.Stack()) + } + }() + + fmt.Printf("Generating %s (%s)...\n", + g.API.PackageName(), g.API.Metadata.APIVersion) + + // write api.go and service.go files + Must(writeAPIFile(g)) + Must(writeExamplesFile(g)) + Must(writeServiceFile(g)) + Must(writeInterfaceFile(g)) + Must(writeWaitersFile(g)) +} + +// Must will panic if the error passed in is not nil. +func Must(err error) { + if err != nil { + panic(err) + } +} + +const codeLayout = `// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. +%s +package %s + +%s +` + +func writeGoFile(file string, layout string, args ...interface{}) error { + return ioutil.WriteFile(file, []byte(util.GoFmt(fmt.Sprintf(layout, args...))), 0664) +} + +// writeExamplesFile writes out the service example file. +func writeExamplesFile(g *generateInfo) error { + return writeGoFile(filepath.Join(g.PackageDir, "examples_test.go"), + codeLayout, + "", + g.API.PackageName()+"_test", + g.API.ExampleGoCode(), + ) +} + +// writeServiceFile writes out the service initialization file. +func writeServiceFile(g *generateInfo) error { + return writeGoFile(filepath.Join(g.PackageDir, "service.go"), + codeLayout, + "", + g.API.PackageName(), + g.API.ServiceGoCode(), + ) +} + +// writeInterfaceFile writes out the service interface file. +func writeInterfaceFile(g *generateInfo) error { + return writeGoFile(filepath.Join(g.PackageDir, g.API.InterfacePackageName(), "interface.go"), + codeLayout, + fmt.Sprintf("\n// Package %s provides an interface for the %s.", + g.API.InterfacePackageName(), g.API.Metadata.ServiceFullName), + g.API.InterfacePackageName(), + g.API.InterfaceGoCode(), + ) +} + +func writeWaitersFile(g *generateInfo) error { + if len(g.API.Waiters) == 0 { + return nil + } + + return writeGoFile(filepath.Join(g.PackageDir, "waiters.go"), + codeLayout, + "", + g.API.PackageName(), + g.API.WaitersGoCode(), + ) +} + +// writeAPIFile writes out the service api file. +func writeAPIFile(g *generateInfo) error { + return writeGoFile(filepath.Join(g.PackageDir, "api.go"), + codeLayout, + fmt.Sprintf("\n// Package %s provides a client for %s.", + g.API.PackageName(), g.API.Metadata.ServiceFullName), + g.API.PackageName(), + g.API.APIGoCode(), + ) +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/model/cli/gen-endpoints/main.go b/vendor/github.com/aws/aws-sdk-go/private/model/cli/gen-endpoints/main.go new file mode 100644 index 000000000..971e989a4 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/model/cli/gen-endpoints/main.go @@ -0,0 +1,47 @@ +// Command aws-gen-goendpoints parses a JSON description of the AWS endpoint +// discovery logic and generates a Go file which returns an endpoint. +// +// aws-gen-goendpoints apis/_endpoints.json aws/endpoints_map.go +package main + +import ( + "encoding/json" + "os" + + "github.com/aws/aws-sdk-go/private/model" +) + +// Generates the endpoints from json description +// +// CLI Args: +// [0] This file's execution path +// [1] The definition file to use +// [2] The output file to generate +func main() { + in, err := os.Open(os.Args[1]) + if err != nil { + panic(err) + } + defer in.Close() + + var endpoints struct { + Version int + Endpoints map[string]struct { + Endpoint string + SigningRegion string + } + } + if err = json.NewDecoder(in).Decode(&endpoints); err != nil { + panic(err) + } + + out, err := os.Create(os.Args[2]) + if err != nil { + panic(err) + } + defer out.Close() + + if err := model.GenerateEndpoints(endpoints, out); err != nil { + panic(err) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/model/endpoints.go b/vendor/github.com/aws/aws-sdk-go/private/model/endpoints.go new file mode 100644 index 000000000..e0ed6a7c4 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/model/endpoints.go @@ -0,0 +1,57 @@ +package model + +import ( + "bytes" + "go/format" + "io" + "text/template" +) + +// GenerateEndpoints writes a Go file to the given writer. +func GenerateEndpoints(endpoints interface{}, w io.Writer) error { + tmpl, err := template.New("endpoints").Parse(t) + if err != nil { + return err + } + + out := bytes.NewBuffer(nil) + if err = tmpl.Execute(out, endpoints); err != nil { + return err + } + + b, err := format.Source(bytes.TrimSpace(out.Bytes())) + if err != nil { + return err + } + + _, err = io.Copy(w, bytes.NewReader(b)) + return err +} + +const t = ` +package endpoints + +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +type endpointStruct struct { + Version int + Endpoints map[string]endpointEntry +} + +type endpointEntry struct { + Endpoint string + SigningRegion string +} + +var endpointsMap = endpointStruct{ + Version: {{ .Version }}, + Endpoints: map[string]endpointEntry{ + {{ range $key, $entry := .Endpoints }}"{{ $key }}": endpointEntry{ + Endpoint: "{{ $entry.Endpoint }}", + {{ if ne $entry.SigningRegion "" }}SigningRegion: "{{ $entry.SigningRegion }}", + {{ end }} + }, + {{ end }} + }, +} +` diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/ec2query/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/ec2query/build.go new file mode 100644 index 000000000..68e344d1f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/ec2query/build.go @@ -0,0 +1,35 @@ +// Package ec2query provides serialisation of AWS EC2 requests and responses. +package ec2query + +//go:generate go run ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/input/ec2.json build_test.go + +import ( + "net/url" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/query/queryutil" +) + +// BuildHandler is a named request handler for building ec2query protocol requests +var BuildHandler = request.NamedHandler{Name: "awssdk.ec2query.Build", Fn: Build} + +// Build builds a request for the EC2 protocol. +func Build(r *request.Request) { + body := url.Values{ + "Action": {r.Operation.Name}, + "Version": {r.ClientInfo.APIVersion}, + } + if err := queryutil.Parse(body, r.Params, true); err != nil { + r.Error = awserr.New("SerializationError", "failed encoding EC2 Query request", err) + } + + if r.ExpireTime == 0 { + r.HTTPRequest.Method = "POST" + r.HTTPRequest.Header.Set("Content-Type", "application/x-www-form-urlencoded; charset=utf-8") + r.SetBufferBody([]byte(body.Encode())) + } else { // This is a pre-signed request + r.HTTPRequest.Method = "GET" + r.HTTPRequest.URL.RawQuery = body.Encode() + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/ec2query/build_bench_test.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/ec2query/build_bench_test.go new file mode 100644 index 000000000..e135b9360 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/ec2query/build_bench_test.go @@ -0,0 +1,85 @@ +// +build bench + +package ec2query_test + +import ( + "testing" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/awstesting" + "github.com/aws/aws-sdk-go/private/protocol/ec2query" + "github.com/aws/aws-sdk-go/service/ec2" +) + +func BenchmarkEC2QueryBuild_Complex_ec2AuthorizeSecurityGroupEgress(b *testing.B) { + params := &ec2.AuthorizeSecurityGroupEgressInput{ + GroupId: aws.String("String"), // Required + CidrIp: aws.String("String"), + DryRun: aws.Bool(true), + FromPort: aws.Int64(1), + IpPermissions: []*ec2.IpPermission{ + { // Required + FromPort: aws.Int64(1), + IpProtocol: aws.String("String"), + IpRanges: []*ec2.IpRange{ + { // Required + CidrIp: aws.String("String"), + }, + // More values... + }, + PrefixListIds: []*ec2.PrefixListId{ + { // Required + PrefixListId: aws.String("String"), + }, + // More values... + }, + ToPort: aws.Int64(1), + UserIdGroupPairs: []*ec2.UserIdGroupPair{ + { // Required + GroupId: aws.String("String"), + GroupName: aws.String("String"), + UserId: aws.String("String"), + }, + // More values... + }, + }, + // More values... + }, + IpProtocol: aws.String("String"), + SourceSecurityGroupName: aws.String("String"), + SourceSecurityGroupOwnerId: aws.String("String"), + ToPort: aws.Int64(1), + } + + benchEC2QueryBuild(b, "AuthorizeSecurityGroupEgress", params) +} + +func BenchmarkEC2QueryBuild_Simple_ec2AttachNetworkInterface(b *testing.B) { + params := &ec2.AttachNetworkInterfaceInput{ + DeviceIndex: aws.Int64(1), // Required + InstanceId: aws.String("String"), // Required + NetworkInterfaceId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + + benchEC2QueryBuild(b, "AttachNetworkInterface", params) +} + +func benchEC2QueryBuild(b *testing.B, opName string, params interface{}) { + svc := awstesting.NewClient() + svc.ServiceName = "ec2" + svc.APIVersion = "2015-04-15" + + for i := 0; i < b.N; i++ { + r := svc.NewRequest(&request.Operation{ + Name: opName, + HTTPMethod: "POST", + HTTPPath: "/", + }, params, nil) + ec2query.Build(r) + if r.Error != nil { + b.Fatal("Unexpected error", r.Error) + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/ec2query/build_test.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/ec2query/build_test.go new file mode 100644 index 000000000..553922826 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/ec2query/build_test.go @@ -0,0 +1,1380 @@ +package ec2query_test + +import ( + "bytes" + "encoding/json" + "encoding/xml" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "testing" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/awstesting" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/ec2query" + "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil" + "github.com/aws/aws-sdk-go/private/util" + "github.com/stretchr/testify/assert" +) + +var _ bytes.Buffer // always import bytes +var _ http.Request +var _ json.Marshaler +var _ time.Time +var _ xmlutil.XMLNode +var _ xml.Attr +var _ = ioutil.Discard +var _ = util.Trim("") +var _ = url.Values{} +var _ = io.EOF +var _ = aws.String +var _ = fmt.Println + +func init() { + protocol.RandReader = &awstesting.ZeroReader{} +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService1ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService1ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService1ProtocolTest client from just a session. +// svc := inputservice1protocoltest.New(mySession) +// +// // Create a InputService1ProtocolTest client with additional configuration +// svc := inputservice1protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService1ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService1ProtocolTest { + c := p.ClientConfig("inputservice1protocoltest", cfgs...) + return newInputService1ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService1ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService1ProtocolTest { + svc := &InputService1ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice1protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(ec2query.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(ec2query.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(ec2query.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(ec2query.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a InputService1ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService1ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService1TestCaseOperation1 = "OperationName" + +// InputService1TestCaseOperation1Request generates a "aws/request.Request" representing the +// client's request for the InputService1TestCaseOperation1 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the InputService1TestCaseOperation1 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the InputService1TestCaseOperation1Request method. +// req, resp := client.InputService1TestCaseOperation1Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *InputService1ProtocolTest) InputService1TestCaseOperation1Request(input *InputService1TestShapeInputService1TestCaseOperation1Input) (req *request.Request, output *InputService1TestShapeInputService1TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService1TestCaseOperation1, + } + + if input == nil { + input = &InputService1TestShapeInputService1TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService1TestShapeInputService1TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService1ProtocolTest) InputService1TestCaseOperation1(input *InputService1TestShapeInputService1TestCaseOperation1Input) (*InputService1TestShapeInputService1TestCaseOperation1Output, error) { + req, out := c.InputService1TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type InputService1TestShapeInputService1TestCaseOperation1Input struct { + _ struct{} `type:"structure"` + + Bar *string `type:"string"` + + Foo *string `type:"string"` +} + +type InputService1TestShapeInputService1TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService2ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService2ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService2ProtocolTest client from just a session. +// svc := inputservice2protocoltest.New(mySession) +// +// // Create a InputService2ProtocolTest client with additional configuration +// svc := inputservice2protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService2ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService2ProtocolTest { + c := p.ClientConfig("inputservice2protocoltest", cfgs...) + return newInputService2ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService2ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService2ProtocolTest { + svc := &InputService2ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice2protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(ec2query.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(ec2query.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(ec2query.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(ec2query.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a InputService2ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService2ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService2TestCaseOperation1 = "OperationName" + +// InputService2TestCaseOperation1Request generates a "aws/request.Request" representing the +// client's request for the InputService2TestCaseOperation1 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the InputService2TestCaseOperation1 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the InputService2TestCaseOperation1Request method. +// req, resp := client.InputService2TestCaseOperation1Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *InputService2ProtocolTest) InputService2TestCaseOperation1Request(input *InputService2TestShapeInputService2TestCaseOperation1Input) (req *request.Request, output *InputService2TestShapeInputService2TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService2TestCaseOperation1, + } + + if input == nil { + input = &InputService2TestShapeInputService2TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService2TestShapeInputService2TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService2ProtocolTest) InputService2TestCaseOperation1(input *InputService2TestShapeInputService2TestCaseOperation1Input) (*InputService2TestShapeInputService2TestCaseOperation1Output, error) { + req, out := c.InputService2TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type InputService2TestShapeInputService2TestCaseOperation1Input struct { + _ struct{} `type:"structure"` + + Bar *string `locationName:"barLocationName" type:"string"` + + Foo *string `type:"string"` + + Yuck *string `locationName:"yuckLocationName" queryName:"yuckQueryName" type:"string"` +} + +type InputService2TestShapeInputService2TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService3ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService3ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService3ProtocolTest client from just a session. +// svc := inputservice3protocoltest.New(mySession) +// +// // Create a InputService3ProtocolTest client with additional configuration +// svc := inputservice3protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService3ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService3ProtocolTest { + c := p.ClientConfig("inputservice3protocoltest", cfgs...) + return newInputService3ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService3ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService3ProtocolTest { + svc := &InputService3ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice3protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(ec2query.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(ec2query.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(ec2query.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(ec2query.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a InputService3ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService3ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService3TestCaseOperation1 = "OperationName" + +// InputService3TestCaseOperation1Request generates a "aws/request.Request" representing the +// client's request for the InputService3TestCaseOperation1 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the InputService3TestCaseOperation1 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the InputService3TestCaseOperation1Request method. +// req, resp := client.InputService3TestCaseOperation1Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *InputService3ProtocolTest) InputService3TestCaseOperation1Request(input *InputService3TestShapeInputService3TestCaseOperation1Input) (req *request.Request, output *InputService3TestShapeInputService3TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService3TestCaseOperation1, + } + + if input == nil { + input = &InputService3TestShapeInputService3TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService3TestShapeInputService3TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService3ProtocolTest) InputService3TestCaseOperation1(input *InputService3TestShapeInputService3TestCaseOperation1Input) (*InputService3TestShapeInputService3TestCaseOperation1Output, error) { + req, out := c.InputService3TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type InputService3TestShapeInputService3TestCaseOperation1Input struct { + _ struct{} `type:"structure"` + + StructArg *InputService3TestShapeStructType `locationName:"Struct" type:"structure"` +} + +type InputService3TestShapeInputService3TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +type InputService3TestShapeStructType struct { + _ struct{} `type:"structure"` + + ScalarArg *string `locationName:"Scalar" type:"string"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService4ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService4ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService4ProtocolTest client from just a session. +// svc := inputservice4protocoltest.New(mySession) +// +// // Create a InputService4ProtocolTest client with additional configuration +// svc := inputservice4protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService4ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService4ProtocolTest { + c := p.ClientConfig("inputservice4protocoltest", cfgs...) + return newInputService4ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService4ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService4ProtocolTest { + svc := &InputService4ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice4protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(ec2query.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(ec2query.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(ec2query.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(ec2query.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a InputService4ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService4ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService4TestCaseOperation1 = "OperationName" + +// InputService4TestCaseOperation1Request generates a "aws/request.Request" representing the +// client's request for the InputService4TestCaseOperation1 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the InputService4TestCaseOperation1 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the InputService4TestCaseOperation1Request method. +// req, resp := client.InputService4TestCaseOperation1Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *InputService4ProtocolTest) InputService4TestCaseOperation1Request(input *InputService4TestShapeInputService4TestCaseOperation1Input) (req *request.Request, output *InputService4TestShapeInputService4TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService4TestCaseOperation1, + } + + if input == nil { + input = &InputService4TestShapeInputService4TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService4TestShapeInputService4TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService4ProtocolTest) InputService4TestCaseOperation1(input *InputService4TestShapeInputService4TestCaseOperation1Input) (*InputService4TestShapeInputService4TestCaseOperation1Output, error) { + req, out := c.InputService4TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type InputService4TestShapeInputService4TestCaseOperation1Input struct { + _ struct{} `type:"structure"` + + ListArg []*string `type:"list"` +} + +type InputService4TestShapeInputService4TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService5ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService5ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService5ProtocolTest client from just a session. +// svc := inputservice5protocoltest.New(mySession) +// +// // Create a InputService5ProtocolTest client with additional configuration +// svc := inputservice5protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService5ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService5ProtocolTest { + c := p.ClientConfig("inputservice5protocoltest", cfgs...) + return newInputService5ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService5ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService5ProtocolTest { + svc := &InputService5ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice5protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(ec2query.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(ec2query.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(ec2query.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(ec2query.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a InputService5ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService5ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService5TestCaseOperation1 = "OperationName" + +// InputService5TestCaseOperation1Request generates a "aws/request.Request" representing the +// client's request for the InputService5TestCaseOperation1 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the InputService5TestCaseOperation1 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the InputService5TestCaseOperation1Request method. +// req, resp := client.InputService5TestCaseOperation1Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *InputService5ProtocolTest) InputService5TestCaseOperation1Request(input *InputService5TestShapeInputService5TestCaseOperation1Input) (req *request.Request, output *InputService5TestShapeInputService5TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService5TestCaseOperation1, + } + + if input == nil { + input = &InputService5TestShapeInputService5TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService5TestShapeInputService5TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService5ProtocolTest) InputService5TestCaseOperation1(input *InputService5TestShapeInputService5TestCaseOperation1Input) (*InputService5TestShapeInputService5TestCaseOperation1Output, error) { + req, out := c.InputService5TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type InputService5TestShapeInputService5TestCaseOperation1Input struct { + _ struct{} `type:"structure"` + + ListArg []*string `locationName:"ListMemberName" locationNameList:"item" type:"list"` +} + +type InputService5TestShapeInputService5TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService6ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService6ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService6ProtocolTest client from just a session. +// svc := inputservice6protocoltest.New(mySession) +// +// // Create a InputService6ProtocolTest client with additional configuration +// svc := inputservice6protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService6ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService6ProtocolTest { + c := p.ClientConfig("inputservice6protocoltest", cfgs...) + return newInputService6ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService6ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService6ProtocolTest { + svc := &InputService6ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice6protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(ec2query.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(ec2query.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(ec2query.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(ec2query.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a InputService6ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService6ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService6TestCaseOperation1 = "OperationName" + +// InputService6TestCaseOperation1Request generates a "aws/request.Request" representing the +// client's request for the InputService6TestCaseOperation1 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the InputService6TestCaseOperation1 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the InputService6TestCaseOperation1Request method. +// req, resp := client.InputService6TestCaseOperation1Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *InputService6ProtocolTest) InputService6TestCaseOperation1Request(input *InputService6TestShapeInputService6TestCaseOperation1Input) (req *request.Request, output *InputService6TestShapeInputService6TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService6TestCaseOperation1, + } + + if input == nil { + input = &InputService6TestShapeInputService6TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService6TestShapeInputService6TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService6ProtocolTest) InputService6TestCaseOperation1(input *InputService6TestShapeInputService6TestCaseOperation1Input) (*InputService6TestShapeInputService6TestCaseOperation1Output, error) { + req, out := c.InputService6TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type InputService6TestShapeInputService6TestCaseOperation1Input struct { + _ struct{} `type:"structure"` + + ListArg []*string `locationName:"ListMemberName" queryName:"ListQueryName" locationNameList:"item" type:"list"` +} + +type InputService6TestShapeInputService6TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService7ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService7ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService7ProtocolTest client from just a session. +// svc := inputservice7protocoltest.New(mySession) +// +// // Create a InputService7ProtocolTest client with additional configuration +// svc := inputservice7protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService7ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService7ProtocolTest { + c := p.ClientConfig("inputservice7protocoltest", cfgs...) + return newInputService7ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService7ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService7ProtocolTest { + svc := &InputService7ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice7protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(ec2query.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(ec2query.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(ec2query.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(ec2query.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a InputService7ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService7ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService7TestCaseOperation1 = "OperationName" + +// InputService7TestCaseOperation1Request generates a "aws/request.Request" representing the +// client's request for the InputService7TestCaseOperation1 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the InputService7TestCaseOperation1 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the InputService7TestCaseOperation1Request method. +// req, resp := client.InputService7TestCaseOperation1Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *InputService7ProtocolTest) InputService7TestCaseOperation1Request(input *InputService7TestShapeInputService7TestCaseOperation1Input) (req *request.Request, output *InputService7TestShapeInputService7TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService7TestCaseOperation1, + } + + if input == nil { + input = &InputService7TestShapeInputService7TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService7TestShapeInputService7TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService7ProtocolTest) InputService7TestCaseOperation1(input *InputService7TestShapeInputService7TestCaseOperation1Input) (*InputService7TestShapeInputService7TestCaseOperation1Output, error) { + req, out := c.InputService7TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type InputService7TestShapeInputService7TestCaseOperation1Input struct { + _ struct{} `type:"structure"` + + // BlobArg is automatically base64 encoded/decoded by the SDK. + BlobArg []byte `type:"blob"` +} + +type InputService7TestShapeInputService7TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService8ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService8ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService8ProtocolTest client from just a session. +// svc := inputservice8protocoltest.New(mySession) +// +// // Create a InputService8ProtocolTest client with additional configuration +// svc := inputservice8protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService8ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService8ProtocolTest { + c := p.ClientConfig("inputservice8protocoltest", cfgs...) + return newInputService8ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService8ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService8ProtocolTest { + svc := &InputService8ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice8protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(ec2query.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(ec2query.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(ec2query.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(ec2query.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a InputService8ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService8ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService8TestCaseOperation1 = "OperationName" + +// InputService8TestCaseOperation1Request generates a "aws/request.Request" representing the +// client's request for the InputService8TestCaseOperation1 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the InputService8TestCaseOperation1 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the InputService8TestCaseOperation1Request method. +// req, resp := client.InputService8TestCaseOperation1Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *InputService8ProtocolTest) InputService8TestCaseOperation1Request(input *InputService8TestShapeInputService8TestCaseOperation1Input) (req *request.Request, output *InputService8TestShapeInputService8TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService8TestCaseOperation1, + } + + if input == nil { + input = &InputService8TestShapeInputService8TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService8TestShapeInputService8TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService8ProtocolTest) InputService8TestCaseOperation1(input *InputService8TestShapeInputService8TestCaseOperation1Input) (*InputService8TestShapeInputService8TestCaseOperation1Output, error) { + req, out := c.InputService8TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type InputService8TestShapeInputService8TestCaseOperation1Input struct { + _ struct{} `type:"structure"` + + TimeArg *time.Time `type:"timestamp" timestampFormat:"iso8601"` +} + +type InputService8TestShapeInputService8TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService9ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService9ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService9ProtocolTest client from just a session. +// svc := inputservice9protocoltest.New(mySession) +// +// // Create a InputService9ProtocolTest client with additional configuration +// svc := inputservice9protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService9ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService9ProtocolTest { + c := p.ClientConfig("inputservice9protocoltest", cfgs...) + return newInputService9ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService9ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService9ProtocolTest { + svc := &InputService9ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice9protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(ec2query.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(ec2query.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(ec2query.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(ec2query.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a InputService9ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService9ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService9TestCaseOperation1 = "OperationName" + +// InputService9TestCaseOperation1Request generates a "aws/request.Request" representing the +// client's request for the InputService9TestCaseOperation1 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the InputService9TestCaseOperation1 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the InputService9TestCaseOperation1Request method. +// req, resp := client.InputService9TestCaseOperation1Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *InputService9ProtocolTest) InputService9TestCaseOperation1Request(input *InputService9TestShapeInputShape) (req *request.Request, output *InputService9TestShapeInputService9TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService9TestCaseOperation1, + HTTPMethod: "POST", + HTTPPath: "/path", + } + + if input == nil { + input = &InputService9TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService9TestShapeInputService9TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService9ProtocolTest) InputService9TestCaseOperation1(input *InputService9TestShapeInputShape) (*InputService9TestShapeInputService9TestCaseOperation1Output, error) { + req, out := c.InputService9TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +const opInputService9TestCaseOperation2 = "OperationName" + +// InputService9TestCaseOperation2Request generates a "aws/request.Request" representing the +// client's request for the InputService9TestCaseOperation2 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the InputService9TestCaseOperation2 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the InputService9TestCaseOperation2Request method. +// req, resp := client.InputService9TestCaseOperation2Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *InputService9ProtocolTest) InputService9TestCaseOperation2Request(input *InputService9TestShapeInputShape) (req *request.Request, output *InputService9TestShapeInputService9TestCaseOperation2Output) { + op := &request.Operation{ + Name: opInputService9TestCaseOperation2, + HTTPMethod: "POST", + HTTPPath: "/path", + } + + if input == nil { + input = &InputService9TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService9TestShapeInputService9TestCaseOperation2Output{} + req.Data = output + return +} + +func (c *InputService9ProtocolTest) InputService9TestCaseOperation2(input *InputService9TestShapeInputShape) (*InputService9TestShapeInputService9TestCaseOperation2Output, error) { + req, out := c.InputService9TestCaseOperation2Request(input) + err := req.Send() + return out, err +} + +type InputService9TestShapeInputService9TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +type InputService9TestShapeInputService9TestCaseOperation2Output struct { + _ struct{} `type:"structure"` +} + +type InputService9TestShapeInputShape struct { + _ struct{} `type:"structure"` + + Token *string `type:"string" idempotencyToken:"true"` +} + +// +// Tests begin here +// + +func TestInputService1ProtocolTestScalarMembersCase1(t *testing.T) { + sess := session.New() + svc := NewInputService1ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService1TestShapeInputService1TestCaseOperation1Input{ + Bar: aws.String("val2"), + Foo: aws.String("val1"), + } + req, _ := svc.InputService1TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + ec2query.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertQuery(t, `Action=OperationName&Bar=val2&Foo=val1&Version=2014-01-01`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService2ProtocolTestStructureWithLocationNameAndQueryNameAppliedToMembersCase1(t *testing.T) { + sess := session.New() + svc := NewInputService2ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService2TestShapeInputService2TestCaseOperation1Input{ + Bar: aws.String("val2"), + Foo: aws.String("val1"), + Yuck: aws.String("val3"), + } + req, _ := svc.InputService2TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + ec2query.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertQuery(t, `Action=OperationName&BarLocationName=val2&Foo=val1&Version=2014-01-01&yuckQueryName=val3`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService3ProtocolTestNestedStructureMembersCase1(t *testing.T) { + sess := session.New() + svc := NewInputService3ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService3TestShapeInputService3TestCaseOperation1Input{ + StructArg: &InputService3TestShapeStructType{ + ScalarArg: aws.String("foo"), + }, + } + req, _ := svc.InputService3TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + ec2query.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertQuery(t, `Action=OperationName&Struct.Scalar=foo&Version=2014-01-01`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService4ProtocolTestListTypesCase1(t *testing.T) { + sess := session.New() + svc := NewInputService4ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService4TestShapeInputService4TestCaseOperation1Input{ + ListArg: []*string{ + aws.String("foo"), + aws.String("bar"), + aws.String("baz"), + }, + } + req, _ := svc.InputService4TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + ec2query.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertQuery(t, `Action=OperationName&ListArg.1=foo&ListArg.2=bar&ListArg.3=baz&Version=2014-01-01`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService5ProtocolTestListWithLocationNameAppliedToMemberCase1(t *testing.T) { + sess := session.New() + svc := NewInputService5ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService5TestShapeInputService5TestCaseOperation1Input{ + ListArg: []*string{ + aws.String("a"), + aws.String("b"), + aws.String("c"), + }, + } + req, _ := svc.InputService5TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + ec2query.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertQuery(t, `Action=OperationName&ListMemberName.1=a&ListMemberName.2=b&ListMemberName.3=c&Version=2014-01-01`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService6ProtocolTestListWithLocationNameAndQueryNameCase1(t *testing.T) { + sess := session.New() + svc := NewInputService6ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService6TestShapeInputService6TestCaseOperation1Input{ + ListArg: []*string{ + aws.String("a"), + aws.String("b"), + aws.String("c"), + }, + } + req, _ := svc.InputService6TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + ec2query.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertQuery(t, `Action=OperationName&ListQueryName.1=a&ListQueryName.2=b&ListQueryName.3=c&Version=2014-01-01`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService7ProtocolTestBase64EncodedBlobsCase1(t *testing.T) { + sess := session.New() + svc := NewInputService7ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService7TestShapeInputService7TestCaseOperation1Input{ + BlobArg: []byte("foo"), + } + req, _ := svc.InputService7TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + ec2query.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertQuery(t, `Action=OperationName&BlobArg=Zm9v&Version=2014-01-01`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService8ProtocolTestTimestampValuesCase1(t *testing.T) { + sess := session.New() + svc := NewInputService8ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService8TestShapeInputService8TestCaseOperation1Input{ + TimeArg: aws.Time(time.Unix(1422172800, 0)), + } + req, _ := svc.InputService8TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + ec2query.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertQuery(t, `Action=OperationName&TimeArg=2015-01-25T08%3A00%3A00Z&Version=2014-01-01`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService9ProtocolTestIdempotencyTokenAutoFillCase1(t *testing.T) { + sess := session.New() + svc := NewInputService9ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService9TestShapeInputShape{ + Token: aws.String("abc123"), + } + req, _ := svc.InputService9TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + ec2query.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertQuery(t, `Token=abc123`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/path", r.URL.String()) + + // assert headers + +} + +func TestInputService9ProtocolTestIdempotencyTokenAutoFillCase2(t *testing.T) { + sess := session.New() + svc := NewInputService9ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService9TestShapeInputShape{} + req, _ := svc.InputService9TestCaseOperation2Request(input) + r := req.HTTPRequest + + // build request + ec2query.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertQuery(t, `Token=00000000-0000-4000-8000-000000000000`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/path", r.URL.String()) + + // assert headers + +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/ec2query/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/ec2query/unmarshal.go new file mode 100644 index 000000000..631e63c0b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/ec2query/unmarshal.go @@ -0,0 +1,63 @@ +package ec2query + +//go:generate go run ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/output/ec2.json unmarshal_test.go + +import ( + "encoding/xml" + "io" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil" +) + +// UnmarshalHandler is a named request handler for unmarshaling ec2query protocol requests +var UnmarshalHandler = request.NamedHandler{Name: "awssdk.ec2query.Unmarshal", Fn: Unmarshal} + +// UnmarshalMetaHandler is a named request handler for unmarshaling ec2query protocol request metadata +var UnmarshalMetaHandler = request.NamedHandler{Name: "awssdk.ec2query.UnmarshalMeta", Fn: UnmarshalMeta} + +// UnmarshalErrorHandler is a named request handler for unmarshaling ec2query protocol request errors +var UnmarshalErrorHandler = request.NamedHandler{Name: "awssdk.ec2query.UnmarshalError", Fn: UnmarshalError} + +// Unmarshal unmarshals a response body for the EC2 protocol. +func Unmarshal(r *request.Request) { + defer r.HTTPResponse.Body.Close() + if r.DataFilled() { + decoder := xml.NewDecoder(r.HTTPResponse.Body) + err := xmlutil.UnmarshalXML(r.Data, decoder, "") + if err != nil { + r.Error = awserr.New("SerializationError", "failed decoding EC2 Query response", err) + return + } + } +} + +// UnmarshalMeta unmarshals response headers for the EC2 protocol. +func UnmarshalMeta(r *request.Request) { + // TODO implement unmarshaling of request IDs +} + +type xmlErrorResponse struct { + XMLName xml.Name `xml:"Response"` + Code string `xml:"Errors>Error>Code"` + Message string `xml:"Errors>Error>Message"` + RequestID string `xml:"RequestID"` +} + +// UnmarshalError unmarshals a response error for the EC2 protocol. +func UnmarshalError(r *request.Request) { + defer r.HTTPResponse.Body.Close() + + resp := &xmlErrorResponse{} + err := xml.NewDecoder(r.HTTPResponse.Body).Decode(resp) + if err != nil && err != io.EOF { + r.Error = awserr.New("SerializationError", "failed decoding EC2 Query error response", err) + } else { + r.Error = awserr.NewRequestFailure( + awserr.New(resp.Code, resp.Message, nil), + r.HTTPResponse.StatusCode, + resp.RequestID, + ) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/ec2query/unmarshal_test.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/ec2query/unmarshal_test.go new file mode 100644 index 000000000..3a38e762d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/ec2query/unmarshal_test.go @@ -0,0 +1,1252 @@ +package ec2query_test + +import ( + "bytes" + "encoding/json" + "encoding/xml" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "testing" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/awstesting" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/ec2query" + "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil" + "github.com/aws/aws-sdk-go/private/util" + "github.com/stretchr/testify/assert" +) + +var _ bytes.Buffer // always import bytes +var _ http.Request +var _ json.Marshaler +var _ time.Time +var _ xmlutil.XMLNode +var _ xml.Attr +var _ = ioutil.Discard +var _ = util.Trim("") +var _ = url.Values{} +var _ = io.EOF +var _ = aws.String +var _ = fmt.Println + +func init() { + protocol.RandReader = &awstesting.ZeroReader{} +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService1ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService1ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService1ProtocolTest client from just a session. +// svc := outputservice1protocoltest.New(mySession) +// +// // Create a OutputService1ProtocolTest client with additional configuration +// svc := outputservice1protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService1ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService1ProtocolTest { + c := p.ClientConfig("outputservice1protocoltest", cfgs...) + return newOutputService1ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService1ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService1ProtocolTest { + svc := &OutputService1ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice1protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(ec2query.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(ec2query.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(ec2query.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(ec2query.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a OutputService1ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService1ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService1TestCaseOperation1 = "OperationName" + +// OutputService1TestCaseOperation1Request generates a "aws/request.Request" representing the +// client's request for the OutputService1TestCaseOperation1 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the OutputService1TestCaseOperation1 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the OutputService1TestCaseOperation1Request method. +// req, resp := client.OutputService1TestCaseOperation1Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *OutputService1ProtocolTest) OutputService1TestCaseOperation1Request(input *OutputService1TestShapeOutputService1TestCaseOperation1Input) (req *request.Request, output *OutputService1TestShapeOutputService1TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService1TestCaseOperation1, + } + + if input == nil { + input = &OutputService1TestShapeOutputService1TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService1TestShapeOutputService1TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService1ProtocolTest) OutputService1TestCaseOperation1(input *OutputService1TestShapeOutputService1TestCaseOperation1Input) (*OutputService1TestShapeOutputService1TestCaseOperation1Output, error) { + req, out := c.OutputService1TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService1TestShapeOutputService1TestCaseOperation1Input struct { + _ struct{} `type:"structure"` +} + +type OutputService1TestShapeOutputService1TestCaseOperation1Output struct { + _ struct{} `type:"structure"` + + Char *string `type:"character"` + + Double *float64 `type:"double"` + + FalseBool *bool `type:"boolean"` + + Float *float64 `type:"float"` + + Long *int64 `type:"long"` + + Num *int64 `locationName:"FooNum" type:"integer"` + + Str *string `type:"string"` + + TrueBool *bool `type:"boolean"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService2ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService2ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService2ProtocolTest client from just a session. +// svc := outputservice2protocoltest.New(mySession) +// +// // Create a OutputService2ProtocolTest client with additional configuration +// svc := outputservice2protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService2ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService2ProtocolTest { + c := p.ClientConfig("outputservice2protocoltest", cfgs...) + return newOutputService2ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService2ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService2ProtocolTest { + svc := &OutputService2ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice2protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(ec2query.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(ec2query.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(ec2query.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(ec2query.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a OutputService2ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService2ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService2TestCaseOperation1 = "OperationName" + +// OutputService2TestCaseOperation1Request generates a "aws/request.Request" representing the +// client's request for the OutputService2TestCaseOperation1 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the OutputService2TestCaseOperation1 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the OutputService2TestCaseOperation1Request method. +// req, resp := client.OutputService2TestCaseOperation1Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *OutputService2ProtocolTest) OutputService2TestCaseOperation1Request(input *OutputService2TestShapeOutputService2TestCaseOperation1Input) (req *request.Request, output *OutputService2TestShapeOutputService2TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService2TestCaseOperation1, + } + + if input == nil { + input = &OutputService2TestShapeOutputService2TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService2TestShapeOutputService2TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService2ProtocolTest) OutputService2TestCaseOperation1(input *OutputService2TestShapeOutputService2TestCaseOperation1Input) (*OutputService2TestShapeOutputService2TestCaseOperation1Output, error) { + req, out := c.OutputService2TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService2TestShapeOutputService2TestCaseOperation1Input struct { + _ struct{} `type:"structure"` +} + +type OutputService2TestShapeOutputService2TestCaseOperation1Output struct { + _ struct{} `type:"structure"` + + // Blob is automatically base64 encoded/decoded by the SDK. + Blob []byte `type:"blob"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService3ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService3ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService3ProtocolTest client from just a session. +// svc := outputservice3protocoltest.New(mySession) +// +// // Create a OutputService3ProtocolTest client with additional configuration +// svc := outputservice3protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService3ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService3ProtocolTest { + c := p.ClientConfig("outputservice3protocoltest", cfgs...) + return newOutputService3ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService3ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService3ProtocolTest { + svc := &OutputService3ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice3protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(ec2query.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(ec2query.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(ec2query.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(ec2query.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a OutputService3ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService3ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService3TestCaseOperation1 = "OperationName" + +// OutputService3TestCaseOperation1Request generates a "aws/request.Request" representing the +// client's request for the OutputService3TestCaseOperation1 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the OutputService3TestCaseOperation1 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the OutputService3TestCaseOperation1Request method. +// req, resp := client.OutputService3TestCaseOperation1Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *OutputService3ProtocolTest) OutputService3TestCaseOperation1Request(input *OutputService3TestShapeOutputService3TestCaseOperation1Input) (req *request.Request, output *OutputService3TestShapeOutputService3TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService3TestCaseOperation1, + } + + if input == nil { + input = &OutputService3TestShapeOutputService3TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService3TestShapeOutputService3TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService3ProtocolTest) OutputService3TestCaseOperation1(input *OutputService3TestShapeOutputService3TestCaseOperation1Input) (*OutputService3TestShapeOutputService3TestCaseOperation1Output, error) { + req, out := c.OutputService3TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService3TestShapeOutputService3TestCaseOperation1Input struct { + _ struct{} `type:"structure"` +} + +type OutputService3TestShapeOutputService3TestCaseOperation1Output struct { + _ struct{} `type:"structure"` + + ListMember []*string `type:"list"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService4ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService4ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService4ProtocolTest client from just a session. +// svc := outputservice4protocoltest.New(mySession) +// +// // Create a OutputService4ProtocolTest client with additional configuration +// svc := outputservice4protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService4ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService4ProtocolTest { + c := p.ClientConfig("outputservice4protocoltest", cfgs...) + return newOutputService4ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService4ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService4ProtocolTest { + svc := &OutputService4ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice4protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(ec2query.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(ec2query.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(ec2query.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(ec2query.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a OutputService4ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService4ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService4TestCaseOperation1 = "OperationName" + +// OutputService4TestCaseOperation1Request generates a "aws/request.Request" representing the +// client's request for the OutputService4TestCaseOperation1 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the OutputService4TestCaseOperation1 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the OutputService4TestCaseOperation1Request method. +// req, resp := client.OutputService4TestCaseOperation1Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *OutputService4ProtocolTest) OutputService4TestCaseOperation1Request(input *OutputService4TestShapeOutputService4TestCaseOperation1Input) (req *request.Request, output *OutputService4TestShapeOutputService4TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService4TestCaseOperation1, + } + + if input == nil { + input = &OutputService4TestShapeOutputService4TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService4TestShapeOutputService4TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService4ProtocolTest) OutputService4TestCaseOperation1(input *OutputService4TestShapeOutputService4TestCaseOperation1Input) (*OutputService4TestShapeOutputService4TestCaseOperation1Output, error) { + req, out := c.OutputService4TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService4TestShapeOutputService4TestCaseOperation1Input struct { + _ struct{} `type:"structure"` +} + +type OutputService4TestShapeOutputService4TestCaseOperation1Output struct { + _ struct{} `type:"structure"` + + ListMember []*string `locationNameList:"item" type:"list"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService5ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService5ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService5ProtocolTest client from just a session. +// svc := outputservice5protocoltest.New(mySession) +// +// // Create a OutputService5ProtocolTest client with additional configuration +// svc := outputservice5protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService5ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService5ProtocolTest { + c := p.ClientConfig("outputservice5protocoltest", cfgs...) + return newOutputService5ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService5ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService5ProtocolTest { + svc := &OutputService5ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice5protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(ec2query.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(ec2query.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(ec2query.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(ec2query.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a OutputService5ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService5ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService5TestCaseOperation1 = "OperationName" + +// OutputService5TestCaseOperation1Request generates a "aws/request.Request" representing the +// client's request for the OutputService5TestCaseOperation1 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the OutputService5TestCaseOperation1 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the OutputService5TestCaseOperation1Request method. +// req, resp := client.OutputService5TestCaseOperation1Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *OutputService5ProtocolTest) OutputService5TestCaseOperation1Request(input *OutputService5TestShapeOutputService5TestCaseOperation1Input) (req *request.Request, output *OutputService5TestShapeOutputService5TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService5TestCaseOperation1, + } + + if input == nil { + input = &OutputService5TestShapeOutputService5TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService5TestShapeOutputService5TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService5ProtocolTest) OutputService5TestCaseOperation1(input *OutputService5TestShapeOutputService5TestCaseOperation1Input) (*OutputService5TestShapeOutputService5TestCaseOperation1Output, error) { + req, out := c.OutputService5TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService5TestShapeOutputService5TestCaseOperation1Input struct { + _ struct{} `type:"structure"` +} + +type OutputService5TestShapeOutputService5TestCaseOperation1Output struct { + _ struct{} `type:"structure"` + + ListMember []*string `type:"list" flattened:"true"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService6ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService6ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService6ProtocolTest client from just a session. +// svc := outputservice6protocoltest.New(mySession) +// +// // Create a OutputService6ProtocolTest client with additional configuration +// svc := outputservice6protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService6ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService6ProtocolTest { + c := p.ClientConfig("outputservice6protocoltest", cfgs...) + return newOutputService6ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService6ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService6ProtocolTest { + svc := &OutputService6ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice6protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(ec2query.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(ec2query.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(ec2query.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(ec2query.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a OutputService6ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService6ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService6TestCaseOperation1 = "OperationName" + +// OutputService6TestCaseOperation1Request generates a "aws/request.Request" representing the +// client's request for the OutputService6TestCaseOperation1 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the OutputService6TestCaseOperation1 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the OutputService6TestCaseOperation1Request method. +// req, resp := client.OutputService6TestCaseOperation1Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *OutputService6ProtocolTest) OutputService6TestCaseOperation1Request(input *OutputService6TestShapeOutputService6TestCaseOperation1Input) (req *request.Request, output *OutputService6TestShapeOutputService6TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService6TestCaseOperation1, + } + + if input == nil { + input = &OutputService6TestShapeOutputService6TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService6TestShapeOutputService6TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService6ProtocolTest) OutputService6TestCaseOperation1(input *OutputService6TestShapeOutputService6TestCaseOperation1Input) (*OutputService6TestShapeOutputService6TestCaseOperation1Output, error) { + req, out := c.OutputService6TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService6TestShapeOutputService6TestCaseOperation1Input struct { + _ struct{} `type:"structure"` +} + +type OutputService6TestShapeOutputService6TestCaseOperation1Output struct { + _ struct{} `type:"structure"` + + Map map[string]*OutputService6TestShapeStructureType `type:"map"` +} + +type OutputService6TestShapeStructureType struct { + _ struct{} `type:"structure"` + + Foo *string `locationName:"foo" type:"string"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService7ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService7ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService7ProtocolTest client from just a session. +// svc := outputservice7protocoltest.New(mySession) +// +// // Create a OutputService7ProtocolTest client with additional configuration +// svc := outputservice7protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService7ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService7ProtocolTest { + c := p.ClientConfig("outputservice7protocoltest", cfgs...) + return newOutputService7ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService7ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService7ProtocolTest { + svc := &OutputService7ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice7protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(ec2query.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(ec2query.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(ec2query.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(ec2query.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a OutputService7ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService7ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService7TestCaseOperation1 = "OperationName" + +// OutputService7TestCaseOperation1Request generates a "aws/request.Request" representing the +// client's request for the OutputService7TestCaseOperation1 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the OutputService7TestCaseOperation1 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the OutputService7TestCaseOperation1Request method. +// req, resp := client.OutputService7TestCaseOperation1Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *OutputService7ProtocolTest) OutputService7TestCaseOperation1Request(input *OutputService7TestShapeOutputService7TestCaseOperation1Input) (req *request.Request, output *OutputService7TestShapeOutputService7TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService7TestCaseOperation1, + } + + if input == nil { + input = &OutputService7TestShapeOutputService7TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService7TestShapeOutputService7TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService7ProtocolTest) OutputService7TestCaseOperation1(input *OutputService7TestShapeOutputService7TestCaseOperation1Input) (*OutputService7TestShapeOutputService7TestCaseOperation1Output, error) { + req, out := c.OutputService7TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService7TestShapeOutputService7TestCaseOperation1Input struct { + _ struct{} `type:"structure"` +} + +type OutputService7TestShapeOutputService7TestCaseOperation1Output struct { + _ struct{} `type:"structure"` + + Map map[string]*string `type:"map" flattened:"true"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService8ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService8ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService8ProtocolTest client from just a session. +// svc := outputservice8protocoltest.New(mySession) +// +// // Create a OutputService8ProtocolTest client with additional configuration +// svc := outputservice8protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService8ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService8ProtocolTest { + c := p.ClientConfig("outputservice8protocoltest", cfgs...) + return newOutputService8ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService8ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService8ProtocolTest { + svc := &OutputService8ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice8protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(ec2query.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(ec2query.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(ec2query.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(ec2query.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a OutputService8ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService8ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService8TestCaseOperation1 = "OperationName" + +// OutputService8TestCaseOperation1Request generates a "aws/request.Request" representing the +// client's request for the OutputService8TestCaseOperation1 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the OutputService8TestCaseOperation1 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the OutputService8TestCaseOperation1Request method. +// req, resp := client.OutputService8TestCaseOperation1Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *OutputService8ProtocolTest) OutputService8TestCaseOperation1Request(input *OutputService8TestShapeOutputService8TestCaseOperation1Input) (req *request.Request, output *OutputService8TestShapeOutputService8TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService8TestCaseOperation1, + } + + if input == nil { + input = &OutputService8TestShapeOutputService8TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService8TestShapeOutputService8TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService8ProtocolTest) OutputService8TestCaseOperation1(input *OutputService8TestShapeOutputService8TestCaseOperation1Input) (*OutputService8TestShapeOutputService8TestCaseOperation1Output, error) { + req, out := c.OutputService8TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService8TestShapeOutputService8TestCaseOperation1Input struct { + _ struct{} `type:"structure"` +} + +type OutputService8TestShapeOutputService8TestCaseOperation1Output struct { + _ struct{} `type:"structure"` + + Map map[string]*string `locationNameKey:"foo" locationNameValue:"bar" type:"map" flattened:"true"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService9ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService9ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService9ProtocolTest client from just a session. +// svc := outputservice9protocoltest.New(mySession) +// +// // Create a OutputService9ProtocolTest client with additional configuration +// svc := outputservice9protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService9ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService9ProtocolTest { + c := p.ClientConfig("outputservice9protocoltest", cfgs...) + return newOutputService9ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService9ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService9ProtocolTest { + svc := &OutputService9ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice9protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(ec2query.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(ec2query.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(ec2query.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(ec2query.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a OutputService9ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService9ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService9TestCaseOperation1 = "OperationName" + +// OutputService9TestCaseOperation1Request generates a "aws/request.Request" representing the +// client's request for the OutputService9TestCaseOperation1 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the OutputService9TestCaseOperation1 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the OutputService9TestCaseOperation1Request method. +// req, resp := client.OutputService9TestCaseOperation1Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *OutputService9ProtocolTest) OutputService9TestCaseOperation1Request(input *OutputService9TestShapeOutputService9TestCaseOperation1Input) (req *request.Request, output *OutputService9TestShapeOutputService9TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService9TestCaseOperation1, + } + + if input == nil { + input = &OutputService9TestShapeOutputService9TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService9TestShapeOutputService9TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService9ProtocolTest) OutputService9TestCaseOperation1(input *OutputService9TestShapeOutputService9TestCaseOperation1Input) (*OutputService9TestShapeOutputService9TestCaseOperation1Output, error) { + req, out := c.OutputService9TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService9TestShapeOutputService9TestCaseOperation1Input struct { + _ struct{} `type:"structure"` +} + +type OutputService9TestShapeOutputService9TestCaseOperation1Output struct { + _ struct{} `type:"structure"` + + Foo *string `type:"string"` +} + +// +// Tests begin here +// + +func TestOutputService1ProtocolTestScalarMembersCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService1ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("myname123falsetrue1.21.3200arequest-id")) + req, out := svc.OutputService1TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + ec2query.UnmarshalMeta(req) + ec2query.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "a", *out.Char) + assert.Equal(t, 1.3, *out.Double) + assert.Equal(t, false, *out.FalseBool) + assert.Equal(t, 1.2, *out.Float) + assert.Equal(t, int64(200), *out.Long) + assert.Equal(t, int64(123), *out.Num) + assert.Equal(t, "myname", *out.Str) + assert.Equal(t, true, *out.TrueBool) + +} + +func TestOutputService2ProtocolTestBlobCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService2ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("dmFsdWU=requestid")) + req, out := svc.OutputService2TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + ec2query.UnmarshalMeta(req) + ec2query.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "value", string(out.Blob)) + +} + +func TestOutputService3ProtocolTestListsCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService3ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("abc123requestid")) + req, out := svc.OutputService3TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + ec2query.UnmarshalMeta(req) + ec2query.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "abc", *out.ListMember[0]) + assert.Equal(t, "123", *out.ListMember[1]) + +} + +func TestOutputService4ProtocolTestListWithCustomMemberNameCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService4ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("abc123requestid")) + req, out := svc.OutputService4TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + ec2query.UnmarshalMeta(req) + ec2query.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "abc", *out.ListMember[0]) + assert.Equal(t, "123", *out.ListMember[1]) + +} + +func TestOutputService5ProtocolTestFlattenedListCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService5ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("abc123requestid")) + req, out := svc.OutputService5TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + ec2query.UnmarshalMeta(req) + ec2query.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "abc", *out.ListMember[0]) + assert.Equal(t, "123", *out.ListMember[1]) + +} + +func TestOutputService6ProtocolTestNormalMapCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService6ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("quxbarbazbamrequestid")) + req, out := svc.OutputService6TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + ec2query.UnmarshalMeta(req) + ec2query.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "bam", *out.Map["baz"].Foo) + assert.Equal(t, "bar", *out.Map["qux"].Foo) + +} + +func TestOutputService7ProtocolTestFlattenedMapCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService7ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("quxbarbazbamrequestid")) + req, out := svc.OutputService7TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + ec2query.UnmarshalMeta(req) + ec2query.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "bam", *out.Map["baz"]) + assert.Equal(t, "bar", *out.Map["qux"]) + +} + +func TestOutputService8ProtocolTestNamedMapCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService8ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("quxbarbazbamrequestid")) + req, out := svc.OutputService8TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + ec2query.UnmarshalMeta(req) + ec2query.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "bam", *out.Map["baz"]) + assert.Equal(t, "bar", *out.Map["qux"]) + +} + +func TestOutputService9ProtocolTestEmptyStringCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService9ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("requestid")) + req, out := svc.OutputService9TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + ec2query.UnmarshalMeta(req) + ec2query.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "", *out.Foo) + +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/idempotency.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/idempotency.go new file mode 100644 index 000000000..53831dff9 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/idempotency.go @@ -0,0 +1,75 @@ +package protocol + +import ( + "crypto/rand" + "fmt" + "reflect" +) + +// RandReader is the random reader the protocol package will use to read +// random bytes from. This is exported for testing, and should not be used. +var RandReader = rand.Reader + +const idempotencyTokenFillTag = `idempotencyToken` + +// CanSetIdempotencyToken returns true if the struct field should be +// automatically populated with a Idempotency token. +// +// Only *string and string type fields that are tagged with idempotencyToken +// which are not already set can be auto filled. +func CanSetIdempotencyToken(v reflect.Value, f reflect.StructField) bool { + switch u := v.Interface().(type) { + // To auto fill an Idempotency token the field must be a string, + // tagged for auto fill, and have a zero value. + case *string: + return u == nil && len(f.Tag.Get(idempotencyTokenFillTag)) != 0 + case string: + return len(u) == 0 && len(f.Tag.Get(idempotencyTokenFillTag)) != 0 + } + + return false +} + +// GetIdempotencyToken returns a randomly generated idempotency token. +func GetIdempotencyToken() string { + b := make([]byte, 16) + RandReader.Read(b) + + return UUIDVersion4(b) +} + +// SetIdempotencyToken will set the value provided with a Idempotency Token. +// Given that the value can be set. Will panic if value is not setable. +func SetIdempotencyToken(v reflect.Value) { + if v.Kind() == reflect.Ptr { + if v.IsNil() && v.CanSet() { + v.Set(reflect.New(v.Type().Elem())) + } + v = v.Elem() + } + v = reflect.Indirect(v) + + if !v.CanSet() { + panic(fmt.Sprintf("unable to set idempotnecy token %v", v)) + } + + b := make([]byte, 16) + _, err := rand.Read(b) + if err != nil { + // TODO handle error + return + } + + v.Set(reflect.ValueOf(UUIDVersion4(b))) +} + +// UUIDVersion4 returns a Version 4 random UUID from the byte slice provided +func UUIDVersion4(u []byte) string { + // https://en.wikipedia.org/wiki/Universally_unique_identifier#Version_4_.28random.29 + // 13th character is "4" + u[6] = (u[6] | 0x40) & 0x4F + // 17th character is "8", "9", "a", or "b" + u[8] = (u[8] | 0x80) & 0xBF + + return fmt.Sprintf(`%X-%X-%X-%X-%X`, u[0:4], u[4:6], u[6:8], u[8:10], u[10:]) +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/idempotency_test.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/idempotency_test.go new file mode 100644 index 000000000..b6ea23562 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/idempotency_test.go @@ -0,0 +1,106 @@ +package protocol_test + +import ( + "reflect" + "testing" + + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/stretchr/testify/assert" +) + +func TestCanSetIdempotencyToken(t *testing.T) { + cases := []struct { + CanSet bool + Case interface{} + }{ + { + true, + struct { + Field *string `idempotencyToken:"true"` + }{}, + }, + { + true, + struct { + Field string `idempotencyToken:"true"` + }{}, + }, + { + false, + struct { + Field *string `idempotencyToken:"true"` + }{Field: new(string)}, + }, + { + false, + struct { + Field string `idempotencyToken:"true"` + }{Field: "value"}, + }, + { + false, + struct { + Field *int `idempotencyToken:"true"` + }{}, + }, + { + false, + struct { + Field *string + }{}, + }, + } + + for i, c := range cases { + v := reflect.Indirect(reflect.ValueOf(c.Case)) + ty := v.Type() + canSet := protocol.CanSetIdempotencyToken(v.Field(0), ty.Field(0)) + assert.Equal(t, c.CanSet, canSet, "Expect case %d can set to match", i) + } +} + +func TestSetIdempotencyToken(t *testing.T) { + cases := []struct { + Case interface{} + }{ + { + &struct { + Field *string `idempotencyToken:"true"` + }{}, + }, + { + &struct { + Field string `idempotencyToken:"true"` + }{}, + }, + { + &struct { + Field *string `idempotencyToken:"true"` + }{Field: new(string)}, + }, + { + &struct { + Field string `idempotencyToken:"true"` + }{Field: ""}, + }, + } + + for i, c := range cases { + v := reflect.Indirect(reflect.ValueOf(c.Case)) + + protocol.SetIdempotencyToken(v.Field(0)) + assert.NotEmpty(t, v.Field(0).Interface(), "Expect case %d to be set", i) + } +} + +func TestUUIDVersion4(t *testing.T) { + uuid := protocol.UUIDVersion4(make([]byte, 16)) + assert.Equal(t, `00000000-0000-4000-8000-000000000000`, uuid) + + b := make([]byte, 16) + for i := 0; i < len(b); i++ { + b[i] = 1 + } + uuid = protocol.UUIDVersion4(b) + assert.Equal(t, `01010101-0101-4101-8101-010101010101`, uuid) +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/build.go new file mode 100644 index 000000000..7ad674278 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/build.go @@ -0,0 +1,254 @@ +// Package jsonutil provides JSON serialisation of AWS requests and responses. +package jsonutil + +import ( + "bytes" + "encoding/base64" + "fmt" + "reflect" + "sort" + "strconv" + "time" + + "github.com/aws/aws-sdk-go/private/protocol" +) + +var timeType = reflect.ValueOf(time.Time{}).Type() +var byteSliceType = reflect.ValueOf([]byte{}).Type() + +// BuildJSON builds a JSON string for a given object v. +func BuildJSON(v interface{}) ([]byte, error) { + var buf bytes.Buffer + + err := buildAny(reflect.ValueOf(v), &buf, "") + return buf.Bytes(), err +} + +func buildAny(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error { + value = reflect.Indirect(value) + if !value.IsValid() { + return nil + } + + vtype := value.Type() + + t := tag.Get("type") + if t == "" { + switch vtype.Kind() { + case reflect.Struct: + // also it can't be a time object + if value.Type() != timeType { + t = "structure" + } + case reflect.Slice: + // also it can't be a byte slice + if _, ok := value.Interface().([]byte); !ok { + t = "list" + } + case reflect.Map: + t = "map" + } + } + + switch t { + case "structure": + if field, ok := vtype.FieldByName("_"); ok { + tag = field.Tag + } + return buildStruct(value, buf, tag) + case "list": + return buildList(value, buf, tag) + case "map": + return buildMap(value, buf, tag) + default: + return buildScalar(value, buf, tag) + } +} + +func buildStruct(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error { + if !value.IsValid() { + return nil + } + + // unwrap payloads + if payload := tag.Get("payload"); payload != "" { + field, _ := value.Type().FieldByName(payload) + tag = field.Tag + value = elemOf(value.FieldByName(payload)) + + if !value.IsValid() { + return nil + } + } + + buf.WriteByte('{') + + t := value.Type() + first := true + for i := 0; i < t.NumField(); i++ { + member := value.Field(i) + field := t.Field(i) + + if field.PkgPath != "" { + continue // ignore unexported fields + } + if field.Tag.Get("json") == "-" { + continue + } + if field.Tag.Get("location") != "" { + continue // ignore non-body elements + } + + if protocol.CanSetIdempotencyToken(member, field) { + token := protocol.GetIdempotencyToken() + member = reflect.ValueOf(&token) + } + + if (member.Kind() == reflect.Ptr || member.Kind() == reflect.Slice || member.Kind() == reflect.Map) && member.IsNil() { + continue // ignore unset fields + } + + if first { + first = false + } else { + buf.WriteByte(',') + } + + // figure out what this field is called + name := field.Name + if locName := field.Tag.Get("locationName"); locName != "" { + name = locName + } + + writeString(name, buf) + buf.WriteString(`:`) + + err := buildAny(member, buf, field.Tag) + if err != nil { + return err + } + + } + + buf.WriteString("}") + + return nil +} + +func buildList(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error { + buf.WriteString("[") + + for i := 0; i < value.Len(); i++ { + buildAny(value.Index(i), buf, "") + + if i < value.Len()-1 { + buf.WriteString(",") + } + } + + buf.WriteString("]") + + return nil +} + +type sortedValues []reflect.Value + +func (sv sortedValues) Len() int { return len(sv) } +func (sv sortedValues) Swap(i, j int) { sv[i], sv[j] = sv[j], sv[i] } +func (sv sortedValues) Less(i, j int) bool { return sv[i].String() < sv[j].String() } + +func buildMap(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error { + buf.WriteString("{") + + var sv sortedValues = value.MapKeys() + sort.Sort(sv) + + for i, k := range sv { + if i > 0 { + buf.WriteByte(',') + } + + writeString(k.String(), buf) + buf.WriteString(`:`) + + buildAny(value.MapIndex(k), buf, "") + } + + buf.WriteString("}") + + return nil +} + +func buildScalar(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error { + switch value.Kind() { + case reflect.String: + writeString(value.String(), buf) + case reflect.Bool: + buf.WriteString(strconv.FormatBool(value.Bool())) + case reflect.Int64: + buf.WriteString(strconv.FormatInt(value.Int(), 10)) + case reflect.Float64: + buf.WriteString(strconv.FormatFloat(value.Float(), 'f', -1, 64)) + default: + switch value.Type() { + case timeType: + converted := value.Interface().(time.Time) + buf.WriteString(strconv.FormatInt(converted.UTC().Unix(), 10)) + case byteSliceType: + if !value.IsNil() { + converted := value.Interface().([]byte) + buf.WriteByte('"') + if len(converted) < 1024 { + // for small buffers, using Encode directly is much faster. + dst := make([]byte, base64.StdEncoding.EncodedLen(len(converted))) + base64.StdEncoding.Encode(dst, converted) + buf.Write(dst) + } else { + // for large buffers, avoid unnecessary extra temporary + // buffer space. + enc := base64.NewEncoder(base64.StdEncoding, buf) + enc.Write(converted) + enc.Close() + } + buf.WriteByte('"') + } + default: + return fmt.Errorf("unsupported JSON value %v (%s)", value.Interface(), value.Type()) + } + } + return nil +} + +func writeString(s string, buf *bytes.Buffer) { + buf.WriteByte('"') + for _, r := range s { + if r == '"' { + buf.WriteString(`\"`) + } else if r == '\\' { + buf.WriteString(`\\`) + } else if r == '\b' { + buf.WriteString(`\b`) + } else if r == '\f' { + buf.WriteString(`\f`) + } else if r == '\r' { + buf.WriteString(`\r`) + } else if r == '\t' { + buf.WriteString(`\t`) + } else if r == '\n' { + buf.WriteString(`\n`) + } else if r < 32 { + fmt.Fprintf(buf, "\\u%0.4x", r) + } else { + buf.WriteRune(r) + } + } + buf.WriteByte('"') +} + +// Returns the reflection element of a value, if it is a pointer. +func elemOf(value reflect.Value) reflect.Value { + for value.Kind() == reflect.Ptr { + value = value.Elem() + } + return value +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/build_test.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/build_test.go new file mode 100644 index 000000000..cb9cc458f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/build_test.go @@ -0,0 +1,100 @@ +package jsonutil_test + +import ( + "encoding/json" + "testing" + "time" + + "github.com/aws/aws-sdk-go/private/protocol/json/jsonutil" + "github.com/stretchr/testify/assert" +) + +func S(s string) *string { + return &s +} + +func D(s int64) *int64 { + return &s +} + +func F(s float64) *float64 { + return &s +} + +func T(s time.Time) *time.Time { + return &s +} + +type J struct { + S *string + SS []string + D *int64 + F *float64 + T *time.Time +} + +var jsonTests = []struct { + in interface{} + out string + err string +}{ + { + J{}, + `{}`, + ``, + }, + { + J{ + S: S("str"), + SS: []string{"A", "B", "C"}, + D: D(123), + F: F(4.56), + T: T(time.Unix(987, 0)), + }, + `{"S":"str","SS":["A","B","C"],"D":123,"F":4.56,"T":987}`, + ``, + }, + { + J{ + S: S(`"''"`), + }, + `{"S":"\"''\""}`, + ``, + }, + { + J{ + S: S("\x00føø\u00FF\n\\\"\r\t\b\f"), + }, + `{"S":"\u0000føøÿ\n\\\"\r\t\b\f"}`, + ``, + }, +} + +func TestBuildJSON(t *testing.T) { + for _, test := range jsonTests { + out, err := jsonutil.BuildJSON(test.in) + if test.err != "" { + assert.Error(t, err) + assert.Contains(t, err.Error(), test.err) + } else { + assert.NoError(t, err) + assert.Equal(t, string(out), test.out) + } + } +} + +func BenchmarkBuildJSON(b *testing.B) { + for i := 0; i < b.N; i++ { + for _, test := range jsonTests { + jsonutil.BuildJSON(test.in) + } + } +} + +func BenchmarkStdlibJSON(b *testing.B) { + for i := 0; i < b.N; i++ { + for _, test := range jsonTests { + json.Marshal(test.in) + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go new file mode 100644 index 000000000..fea535613 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go @@ -0,0 +1,213 @@ +package jsonutil + +import ( + "encoding/base64" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "reflect" + "time" +) + +// UnmarshalJSON reads a stream and unmarshals the results in object v. +func UnmarshalJSON(v interface{}, stream io.Reader) error { + var out interface{} + + b, err := ioutil.ReadAll(stream) + if err != nil { + return err + } + + if len(b) == 0 { + return nil + } + + if err := json.Unmarshal(b, &out); err != nil { + return err + } + + return unmarshalAny(reflect.ValueOf(v), out, "") +} + +func unmarshalAny(value reflect.Value, data interface{}, tag reflect.StructTag) error { + vtype := value.Type() + if vtype.Kind() == reflect.Ptr { + vtype = vtype.Elem() // check kind of actual element type + } + + t := tag.Get("type") + if t == "" { + switch vtype.Kind() { + case reflect.Struct: + // also it can't be a time object + if _, ok := value.Interface().(*time.Time); !ok { + t = "structure" + } + case reflect.Slice: + // also it can't be a byte slice + if _, ok := value.Interface().([]byte); !ok { + t = "list" + } + case reflect.Map: + t = "map" + } + } + + switch t { + case "structure": + if field, ok := vtype.FieldByName("_"); ok { + tag = field.Tag + } + return unmarshalStruct(value, data, tag) + case "list": + return unmarshalList(value, data, tag) + case "map": + return unmarshalMap(value, data, tag) + default: + return unmarshalScalar(value, data, tag) + } +} + +func unmarshalStruct(value reflect.Value, data interface{}, tag reflect.StructTag) error { + if data == nil { + return nil + } + mapData, ok := data.(map[string]interface{}) + if !ok { + return fmt.Errorf("JSON value is not a structure (%#v)", data) + } + + t := value.Type() + if value.Kind() == reflect.Ptr { + if value.IsNil() { // create the structure if it's nil + s := reflect.New(value.Type().Elem()) + value.Set(s) + value = s + } + + value = value.Elem() + t = t.Elem() + } + + // unwrap any payloads + if payload := tag.Get("payload"); payload != "" { + field, _ := t.FieldByName(payload) + return unmarshalAny(value.FieldByName(payload), data, field.Tag) + } + + for i := 0; i < t.NumField(); i++ { + field := t.Field(i) + if field.PkgPath != "" { + continue // ignore unexported fields + } + + // figure out what this field is called + name := field.Name + if locName := field.Tag.Get("locationName"); locName != "" { + name = locName + } + + member := value.FieldByIndex(field.Index) + err := unmarshalAny(member, mapData[name], field.Tag) + if err != nil { + return err + } + } + return nil +} + +func unmarshalList(value reflect.Value, data interface{}, tag reflect.StructTag) error { + if data == nil { + return nil + } + listData, ok := data.([]interface{}) + if !ok { + return fmt.Errorf("JSON value is not a list (%#v)", data) + } + + if value.IsNil() { + l := len(listData) + value.Set(reflect.MakeSlice(value.Type(), l, l)) + } + + for i, c := range listData { + err := unmarshalAny(value.Index(i), c, "") + if err != nil { + return err + } + } + + return nil +} + +func unmarshalMap(value reflect.Value, data interface{}, tag reflect.StructTag) error { + if data == nil { + return nil + } + mapData, ok := data.(map[string]interface{}) + if !ok { + return fmt.Errorf("JSON value is not a map (%#v)", data) + } + + if value.IsNil() { + value.Set(reflect.MakeMap(value.Type())) + } + + for k, v := range mapData { + kvalue := reflect.ValueOf(k) + vvalue := reflect.New(value.Type().Elem()).Elem() + + unmarshalAny(vvalue, v, "") + value.SetMapIndex(kvalue, vvalue) + } + + return nil +} + +func unmarshalScalar(value reflect.Value, data interface{}, tag reflect.StructTag) error { + errf := func() error { + return fmt.Errorf("unsupported value: %v (%s)", value.Interface(), value.Type()) + } + + switch d := data.(type) { + case nil: + return nil // nothing to do here + case string: + switch value.Interface().(type) { + case *string: + value.Set(reflect.ValueOf(&d)) + case []byte: + b, err := base64.StdEncoding.DecodeString(d) + if err != nil { + return err + } + value.Set(reflect.ValueOf(b)) + default: + return errf() + } + case float64: + switch value.Interface().(type) { + case *int64: + di := int64(d) + value.Set(reflect.ValueOf(&di)) + case *float64: + value.Set(reflect.ValueOf(&d)) + case *time.Time: + t := time.Unix(int64(d), 0).UTC() + value.Set(reflect.ValueOf(&t)) + default: + return errf() + } + case bool: + switch value.Interface().(type) { + case *bool: + value.Set(reflect.ValueOf(&d)) + default: + return errf() + } + default: + return fmt.Errorf("unsupported JSON value (%v)", data) + } + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/build_bench_test.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/build_bench_test.go new file mode 100644 index 000000000..563caa05c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/build_bench_test.go @@ -0,0 +1,71 @@ +// +build bench + +package jsonrpc_test + +import ( + "bytes" + "encoding/json" + "testing" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/awstesting" + "github.com/aws/aws-sdk-go/private/protocol/json/jsonutil" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" + "github.com/aws/aws-sdk-go/service/dynamodb" + "github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute" +) + +func BenchmarkJSONRPCBuild_Simple_dynamodbPutItem(b *testing.B) { + svc := awstesting.NewClient() + + params := getDynamodbPutItemParams(b) + + for i := 0; i < b.N; i++ { + r := svc.NewRequest(&request.Operation{Name: "Operation"}, params, nil) + jsonrpc.Build(r) + if r.Error != nil { + b.Fatal("Unexpected error", r.Error) + } + } +} + +func BenchmarkJSONUtilBuild_Simple_dynamodbPutItem(b *testing.B) { + svc := awstesting.NewClient() + + params := getDynamodbPutItemParams(b) + + for i := 0; i < b.N; i++ { + r := svc.NewRequest(&request.Operation{Name: "Operation"}, params, nil) + _, err := jsonutil.BuildJSON(r.Params) + if err != nil { + b.Fatal("Unexpected error", err) + } + } +} + +func BenchmarkEncodingJSONMarshal_Simple_dynamodbPutItem(b *testing.B) { + params := getDynamodbPutItemParams(b) + + for i := 0; i < b.N; i++ { + buf := &bytes.Buffer{} + encoder := json.NewEncoder(buf) + if err := encoder.Encode(params); err != nil { + b.Fatal("Unexpected error", err) + } + } +} + +func getDynamodbPutItemParams(b *testing.B) *dynamodb.PutItemInput { + av, err := dynamodbattribute.ConvertToMap(struct { + Key string + Data string + }{Key: "MyKey", Data: "MyData"}) + if err != nil { + b.Fatal("benchPutItem, expect no ConvertToMap errors", err) + } + return &dynamodb.PutItemInput{ + Item: av, + TableName: aws.String("tablename"), + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/build_test.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/build_test.go new file mode 100644 index 000000000..51ebd68cf --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/build_test.go @@ -0,0 +1,1639 @@ +package jsonrpc_test + +import ( + "bytes" + "encoding/json" + "encoding/xml" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "testing" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/awstesting" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" + "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil" + "github.com/aws/aws-sdk-go/private/util" + "github.com/stretchr/testify/assert" +) + +var _ bytes.Buffer // always import bytes +var _ http.Request +var _ json.Marshaler +var _ time.Time +var _ xmlutil.XMLNode +var _ xml.Attr +var _ = ioutil.Discard +var _ = util.Trim("") +var _ = url.Values{} +var _ = io.EOF +var _ = aws.String +var _ = fmt.Println + +func init() { + protocol.RandReader = &awstesting.ZeroReader{} +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService1ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService1ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService1ProtocolTest client from just a session. +// svc := inputservice1protocoltest.New(mySession) +// +// // Create a InputService1ProtocolTest client with additional configuration +// svc := inputservice1protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService1ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService1ProtocolTest { + c := p.ClientConfig("inputservice1protocoltest", cfgs...) + return newInputService1ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService1ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService1ProtocolTest { + svc := &InputService1ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice1protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + JSONVersion: "1.1", + TargetPrefix: "com.amazonaws.foo", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(jsonrpc.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a InputService1ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService1ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService1TestCaseOperation1 = "OperationName" + +// InputService1TestCaseOperation1Request generates a "aws/request.Request" representing the +// client's request for the InputService1TestCaseOperation1 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the InputService1TestCaseOperation1 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the InputService1TestCaseOperation1Request method. +// req, resp := client.InputService1TestCaseOperation1Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *InputService1ProtocolTest) InputService1TestCaseOperation1Request(input *InputService1TestShapeInputService1TestCaseOperation1Input) (req *request.Request, output *InputService1TestShapeInputService1TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService1TestCaseOperation1, + HTTPMethod: "POST", + } + + if input == nil { + input = &InputService1TestShapeInputService1TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService1TestShapeInputService1TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService1ProtocolTest) InputService1TestCaseOperation1(input *InputService1TestShapeInputService1TestCaseOperation1Input) (*InputService1TestShapeInputService1TestCaseOperation1Output, error) { + req, out := c.InputService1TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type InputService1TestShapeInputService1TestCaseOperation1Input struct { + _ struct{} `type:"structure"` + + Name *string `type:"string"` +} + +type InputService1TestShapeInputService1TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService2ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService2ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService2ProtocolTest client from just a session. +// svc := inputservice2protocoltest.New(mySession) +// +// // Create a InputService2ProtocolTest client with additional configuration +// svc := inputservice2protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService2ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService2ProtocolTest { + c := p.ClientConfig("inputservice2protocoltest", cfgs...) + return newInputService2ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService2ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService2ProtocolTest { + svc := &InputService2ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice2protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + JSONVersion: "1.1", + TargetPrefix: "com.amazonaws.foo", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(jsonrpc.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a InputService2ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService2ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService2TestCaseOperation1 = "OperationName" + +// InputService2TestCaseOperation1Request generates a "aws/request.Request" representing the +// client's request for the InputService2TestCaseOperation1 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the InputService2TestCaseOperation1 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the InputService2TestCaseOperation1Request method. +// req, resp := client.InputService2TestCaseOperation1Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *InputService2ProtocolTest) InputService2TestCaseOperation1Request(input *InputService2TestShapeInputService2TestCaseOperation1Input) (req *request.Request, output *InputService2TestShapeInputService2TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService2TestCaseOperation1, + } + + if input == nil { + input = &InputService2TestShapeInputService2TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService2TestShapeInputService2TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService2ProtocolTest) InputService2TestCaseOperation1(input *InputService2TestShapeInputService2TestCaseOperation1Input) (*InputService2TestShapeInputService2TestCaseOperation1Output, error) { + req, out := c.InputService2TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type InputService2TestShapeInputService2TestCaseOperation1Input struct { + _ struct{} `type:"structure"` + + TimeArg *time.Time `type:"timestamp" timestampFormat:"unix"` +} + +type InputService2TestShapeInputService2TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService3ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService3ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService3ProtocolTest client from just a session. +// svc := inputservice3protocoltest.New(mySession) +// +// // Create a InputService3ProtocolTest client with additional configuration +// svc := inputservice3protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService3ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService3ProtocolTest { + c := p.ClientConfig("inputservice3protocoltest", cfgs...) + return newInputService3ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService3ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService3ProtocolTest { + svc := &InputService3ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice3protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + JSONVersion: "1.1", + TargetPrefix: "com.amazonaws.foo", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(jsonrpc.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a InputService3ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService3ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService3TestCaseOperation1 = "OperationName" + +// InputService3TestCaseOperation1Request generates a "aws/request.Request" representing the +// client's request for the InputService3TestCaseOperation1 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the InputService3TestCaseOperation1 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the InputService3TestCaseOperation1Request method. +// req, resp := client.InputService3TestCaseOperation1Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *InputService3ProtocolTest) InputService3TestCaseOperation1Request(input *InputService3TestShapeInputShape) (req *request.Request, output *InputService3TestShapeInputService3TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService3TestCaseOperation1, + } + + if input == nil { + input = &InputService3TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService3TestShapeInputService3TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService3ProtocolTest) InputService3TestCaseOperation1(input *InputService3TestShapeInputShape) (*InputService3TestShapeInputService3TestCaseOperation1Output, error) { + req, out := c.InputService3TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +const opInputService3TestCaseOperation2 = "OperationName" + +// InputService3TestCaseOperation2Request generates a "aws/request.Request" representing the +// client's request for the InputService3TestCaseOperation2 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the InputService3TestCaseOperation2 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the InputService3TestCaseOperation2Request method. +// req, resp := client.InputService3TestCaseOperation2Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *InputService3ProtocolTest) InputService3TestCaseOperation2Request(input *InputService3TestShapeInputShape) (req *request.Request, output *InputService3TestShapeInputService3TestCaseOperation2Output) { + op := &request.Operation{ + Name: opInputService3TestCaseOperation2, + } + + if input == nil { + input = &InputService3TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService3TestShapeInputService3TestCaseOperation2Output{} + req.Data = output + return +} + +func (c *InputService3ProtocolTest) InputService3TestCaseOperation2(input *InputService3TestShapeInputShape) (*InputService3TestShapeInputService3TestCaseOperation2Output, error) { + req, out := c.InputService3TestCaseOperation2Request(input) + err := req.Send() + return out, err +} + +type InputService3TestShapeInputService3TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +type InputService3TestShapeInputService3TestCaseOperation2Output struct { + _ struct{} `type:"structure"` +} + +type InputService3TestShapeInputShape struct { + _ struct{} `type:"structure"` + + // BlobArg is automatically base64 encoded/decoded by the SDK. + BlobArg []byte `type:"blob"` + + BlobMap map[string][]byte `type:"map"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService4ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService4ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService4ProtocolTest client from just a session. +// svc := inputservice4protocoltest.New(mySession) +// +// // Create a InputService4ProtocolTest client with additional configuration +// svc := inputservice4protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService4ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService4ProtocolTest { + c := p.ClientConfig("inputservice4protocoltest", cfgs...) + return newInputService4ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService4ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService4ProtocolTest { + svc := &InputService4ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice4protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + JSONVersion: "1.1", + TargetPrefix: "com.amazonaws.foo", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(jsonrpc.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a InputService4ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService4ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService4TestCaseOperation1 = "OperationName" + +// InputService4TestCaseOperation1Request generates a "aws/request.Request" representing the +// client's request for the InputService4TestCaseOperation1 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the InputService4TestCaseOperation1 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the InputService4TestCaseOperation1Request method. +// req, resp := client.InputService4TestCaseOperation1Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *InputService4ProtocolTest) InputService4TestCaseOperation1Request(input *InputService4TestShapeInputService4TestCaseOperation1Input) (req *request.Request, output *InputService4TestShapeInputService4TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService4TestCaseOperation1, + HTTPMethod: "POST", + } + + if input == nil { + input = &InputService4TestShapeInputService4TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService4TestShapeInputService4TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService4ProtocolTest) InputService4TestCaseOperation1(input *InputService4TestShapeInputService4TestCaseOperation1Input) (*InputService4TestShapeInputService4TestCaseOperation1Output, error) { + req, out := c.InputService4TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type InputService4TestShapeInputService4TestCaseOperation1Input struct { + _ struct{} `type:"structure"` + + ListParam [][]byte `type:"list"` +} + +type InputService4TestShapeInputService4TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService5ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService5ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService5ProtocolTest client from just a session. +// svc := inputservice5protocoltest.New(mySession) +// +// // Create a InputService5ProtocolTest client with additional configuration +// svc := inputservice5protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService5ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService5ProtocolTest { + c := p.ClientConfig("inputservice5protocoltest", cfgs...) + return newInputService5ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService5ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService5ProtocolTest { + svc := &InputService5ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice5protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + JSONVersion: "1.1", + TargetPrefix: "com.amazonaws.foo", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(jsonrpc.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a InputService5ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService5ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService5TestCaseOperation1 = "OperationName" + +// InputService5TestCaseOperation1Request generates a "aws/request.Request" representing the +// client's request for the InputService5TestCaseOperation1 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the InputService5TestCaseOperation1 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the InputService5TestCaseOperation1Request method. +// req, resp := client.InputService5TestCaseOperation1Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *InputService5ProtocolTest) InputService5TestCaseOperation1Request(input *InputService5TestShapeInputShape) (req *request.Request, output *InputService5TestShapeInputService5TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService5TestCaseOperation1, + } + + if input == nil { + input = &InputService5TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService5TestShapeInputService5TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService5ProtocolTest) InputService5TestCaseOperation1(input *InputService5TestShapeInputShape) (*InputService5TestShapeInputService5TestCaseOperation1Output, error) { + req, out := c.InputService5TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +const opInputService5TestCaseOperation2 = "OperationName" + +// InputService5TestCaseOperation2Request generates a "aws/request.Request" representing the +// client's request for the InputService5TestCaseOperation2 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the InputService5TestCaseOperation2 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the InputService5TestCaseOperation2Request method. +// req, resp := client.InputService5TestCaseOperation2Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *InputService5ProtocolTest) InputService5TestCaseOperation2Request(input *InputService5TestShapeInputShape) (req *request.Request, output *InputService5TestShapeInputService5TestCaseOperation2Output) { + op := &request.Operation{ + Name: opInputService5TestCaseOperation2, + } + + if input == nil { + input = &InputService5TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService5TestShapeInputService5TestCaseOperation2Output{} + req.Data = output + return +} + +func (c *InputService5ProtocolTest) InputService5TestCaseOperation2(input *InputService5TestShapeInputShape) (*InputService5TestShapeInputService5TestCaseOperation2Output, error) { + req, out := c.InputService5TestCaseOperation2Request(input) + err := req.Send() + return out, err +} + +const opInputService5TestCaseOperation3 = "OperationName" + +// InputService5TestCaseOperation3Request generates a "aws/request.Request" representing the +// client's request for the InputService5TestCaseOperation3 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the InputService5TestCaseOperation3 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the InputService5TestCaseOperation3Request method. +// req, resp := client.InputService5TestCaseOperation3Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *InputService5ProtocolTest) InputService5TestCaseOperation3Request(input *InputService5TestShapeInputShape) (req *request.Request, output *InputService5TestShapeInputService5TestCaseOperation3Output) { + op := &request.Operation{ + Name: opInputService5TestCaseOperation3, + } + + if input == nil { + input = &InputService5TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService5TestShapeInputService5TestCaseOperation3Output{} + req.Data = output + return +} + +func (c *InputService5ProtocolTest) InputService5TestCaseOperation3(input *InputService5TestShapeInputShape) (*InputService5TestShapeInputService5TestCaseOperation3Output, error) { + req, out := c.InputService5TestCaseOperation3Request(input) + err := req.Send() + return out, err +} + +const opInputService5TestCaseOperation4 = "OperationName" + +// InputService5TestCaseOperation4Request generates a "aws/request.Request" representing the +// client's request for the InputService5TestCaseOperation4 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the InputService5TestCaseOperation4 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the InputService5TestCaseOperation4Request method. +// req, resp := client.InputService5TestCaseOperation4Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *InputService5ProtocolTest) InputService5TestCaseOperation4Request(input *InputService5TestShapeInputShape) (req *request.Request, output *InputService5TestShapeInputService5TestCaseOperation4Output) { + op := &request.Operation{ + Name: opInputService5TestCaseOperation4, + } + + if input == nil { + input = &InputService5TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService5TestShapeInputService5TestCaseOperation4Output{} + req.Data = output + return +} + +func (c *InputService5ProtocolTest) InputService5TestCaseOperation4(input *InputService5TestShapeInputShape) (*InputService5TestShapeInputService5TestCaseOperation4Output, error) { + req, out := c.InputService5TestCaseOperation4Request(input) + err := req.Send() + return out, err +} + +const opInputService5TestCaseOperation5 = "OperationName" + +// InputService5TestCaseOperation5Request generates a "aws/request.Request" representing the +// client's request for the InputService5TestCaseOperation5 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the InputService5TestCaseOperation5 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the InputService5TestCaseOperation5Request method. +// req, resp := client.InputService5TestCaseOperation5Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *InputService5ProtocolTest) InputService5TestCaseOperation5Request(input *InputService5TestShapeInputShape) (req *request.Request, output *InputService5TestShapeInputService5TestCaseOperation5Output) { + op := &request.Operation{ + Name: opInputService5TestCaseOperation5, + } + + if input == nil { + input = &InputService5TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService5TestShapeInputService5TestCaseOperation5Output{} + req.Data = output + return +} + +func (c *InputService5ProtocolTest) InputService5TestCaseOperation5(input *InputService5TestShapeInputShape) (*InputService5TestShapeInputService5TestCaseOperation5Output, error) { + req, out := c.InputService5TestCaseOperation5Request(input) + err := req.Send() + return out, err +} + +const opInputService5TestCaseOperation6 = "OperationName" + +// InputService5TestCaseOperation6Request generates a "aws/request.Request" representing the +// client's request for the InputService5TestCaseOperation6 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the InputService5TestCaseOperation6 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the InputService5TestCaseOperation6Request method. +// req, resp := client.InputService5TestCaseOperation6Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *InputService5ProtocolTest) InputService5TestCaseOperation6Request(input *InputService5TestShapeInputShape) (req *request.Request, output *InputService5TestShapeInputService5TestCaseOperation6Output) { + op := &request.Operation{ + Name: opInputService5TestCaseOperation6, + } + + if input == nil { + input = &InputService5TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService5TestShapeInputService5TestCaseOperation6Output{} + req.Data = output + return +} + +func (c *InputService5ProtocolTest) InputService5TestCaseOperation6(input *InputService5TestShapeInputShape) (*InputService5TestShapeInputService5TestCaseOperation6Output, error) { + req, out := c.InputService5TestCaseOperation6Request(input) + err := req.Send() + return out, err +} + +type InputService5TestShapeInputService5TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +type InputService5TestShapeInputService5TestCaseOperation2Output struct { + _ struct{} `type:"structure"` +} + +type InputService5TestShapeInputService5TestCaseOperation3Output struct { + _ struct{} `type:"structure"` +} + +type InputService5TestShapeInputService5TestCaseOperation4Output struct { + _ struct{} `type:"structure"` +} + +type InputService5TestShapeInputService5TestCaseOperation5Output struct { + _ struct{} `type:"structure"` +} + +type InputService5TestShapeInputService5TestCaseOperation6Output struct { + _ struct{} `type:"structure"` +} + +type InputService5TestShapeInputShape struct { + _ struct{} `type:"structure"` + + RecursiveStruct *InputService5TestShapeRecursiveStructType `type:"structure"` +} + +type InputService5TestShapeRecursiveStructType struct { + _ struct{} `type:"structure"` + + NoRecurse *string `type:"string"` + + RecursiveList []*InputService5TestShapeRecursiveStructType `type:"list"` + + RecursiveMap map[string]*InputService5TestShapeRecursiveStructType `type:"map"` + + RecursiveStruct *InputService5TestShapeRecursiveStructType `type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService6ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService6ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService6ProtocolTest client from just a session. +// svc := inputservice6protocoltest.New(mySession) +// +// // Create a InputService6ProtocolTest client with additional configuration +// svc := inputservice6protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService6ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService6ProtocolTest { + c := p.ClientConfig("inputservice6protocoltest", cfgs...) + return newInputService6ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService6ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService6ProtocolTest { + svc := &InputService6ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice6protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + JSONVersion: "1.1", + TargetPrefix: "com.amazonaws.foo", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(jsonrpc.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a InputService6ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService6ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService6TestCaseOperation1 = "OperationName" + +// InputService6TestCaseOperation1Request generates a "aws/request.Request" representing the +// client's request for the InputService6TestCaseOperation1 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the InputService6TestCaseOperation1 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the InputService6TestCaseOperation1Request method. +// req, resp := client.InputService6TestCaseOperation1Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *InputService6ProtocolTest) InputService6TestCaseOperation1Request(input *InputService6TestShapeInputService6TestCaseOperation1Input) (req *request.Request, output *InputService6TestShapeInputService6TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService6TestCaseOperation1, + HTTPMethod: "POST", + } + + if input == nil { + input = &InputService6TestShapeInputService6TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService6TestShapeInputService6TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService6ProtocolTest) InputService6TestCaseOperation1(input *InputService6TestShapeInputService6TestCaseOperation1Input) (*InputService6TestShapeInputService6TestCaseOperation1Output, error) { + req, out := c.InputService6TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type InputService6TestShapeInputService6TestCaseOperation1Input struct { + _ struct{} `type:"structure"` + + Map map[string]*string `type:"map"` +} + +type InputService6TestShapeInputService6TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService7ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService7ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService7ProtocolTest client from just a session. +// svc := inputservice7protocoltest.New(mySession) +// +// // Create a InputService7ProtocolTest client with additional configuration +// svc := inputservice7protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService7ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService7ProtocolTest { + c := p.ClientConfig("inputservice7protocoltest", cfgs...) + return newInputService7ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService7ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService7ProtocolTest { + svc := &InputService7ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice7protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + JSONVersion: "", + TargetPrefix: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(jsonrpc.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a InputService7ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService7ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService7TestCaseOperation1 = "OperationName" + +// InputService7TestCaseOperation1Request generates a "aws/request.Request" representing the +// client's request for the InputService7TestCaseOperation1 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the InputService7TestCaseOperation1 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the InputService7TestCaseOperation1Request method. +// req, resp := client.InputService7TestCaseOperation1Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *InputService7ProtocolTest) InputService7TestCaseOperation1Request(input *InputService7TestShapeInputShape) (req *request.Request, output *InputService7TestShapeInputService7TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService7TestCaseOperation1, + HTTPMethod: "POST", + HTTPPath: "/path", + } + + if input == nil { + input = &InputService7TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService7TestShapeInputService7TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService7ProtocolTest) InputService7TestCaseOperation1(input *InputService7TestShapeInputShape) (*InputService7TestShapeInputService7TestCaseOperation1Output, error) { + req, out := c.InputService7TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +const opInputService7TestCaseOperation2 = "OperationName" + +// InputService7TestCaseOperation2Request generates a "aws/request.Request" representing the +// client's request for the InputService7TestCaseOperation2 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the InputService7TestCaseOperation2 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the InputService7TestCaseOperation2Request method. +// req, resp := client.InputService7TestCaseOperation2Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *InputService7ProtocolTest) InputService7TestCaseOperation2Request(input *InputService7TestShapeInputShape) (req *request.Request, output *InputService7TestShapeInputService7TestCaseOperation2Output) { + op := &request.Operation{ + Name: opInputService7TestCaseOperation2, + HTTPMethod: "POST", + HTTPPath: "/path", + } + + if input == nil { + input = &InputService7TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService7TestShapeInputService7TestCaseOperation2Output{} + req.Data = output + return +} + +func (c *InputService7ProtocolTest) InputService7TestCaseOperation2(input *InputService7TestShapeInputShape) (*InputService7TestShapeInputService7TestCaseOperation2Output, error) { + req, out := c.InputService7TestCaseOperation2Request(input) + err := req.Send() + return out, err +} + +type InputService7TestShapeInputService7TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +type InputService7TestShapeInputService7TestCaseOperation2Output struct { + _ struct{} `type:"structure"` +} + +type InputService7TestShapeInputShape struct { + _ struct{} `type:"structure"` + + Token *string `type:"string" idempotencyToken:"true"` +} + +// +// Tests begin here +// + +func TestInputService1ProtocolTestScalarMembersCase1(t *testing.T) { + sess := session.New() + svc := NewInputService1ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService1TestShapeInputService1TestCaseOperation1Input{ + Name: aws.String("myname"), + } + req, _ := svc.InputService1TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + jsonrpc.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertJSON(t, `{"Name":"myname"}`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + assert.Equal(t, "application/x-amz-json-1.1", r.Header.Get("Content-Type")) + assert.Equal(t, "com.amazonaws.foo.OperationName", r.Header.Get("X-Amz-Target")) + +} + +func TestInputService2ProtocolTestTimestampValuesCase1(t *testing.T) { + sess := session.New() + svc := NewInputService2ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService2TestShapeInputService2TestCaseOperation1Input{ + TimeArg: aws.Time(time.Unix(1422172800, 0)), + } + req, _ := svc.InputService2TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + jsonrpc.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertJSON(t, `{"TimeArg":1422172800}`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + assert.Equal(t, "application/x-amz-json-1.1", r.Header.Get("Content-Type")) + assert.Equal(t, "com.amazonaws.foo.OperationName", r.Header.Get("X-Amz-Target")) + +} + +func TestInputService3ProtocolTestBase64EncodedBlobsCase1(t *testing.T) { + sess := session.New() + svc := NewInputService3ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService3TestShapeInputShape{ + BlobArg: []byte("foo"), + } + req, _ := svc.InputService3TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + jsonrpc.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertJSON(t, `{"BlobArg":"Zm9v"}`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + assert.Equal(t, "application/x-amz-json-1.1", r.Header.Get("Content-Type")) + assert.Equal(t, "com.amazonaws.foo.OperationName", r.Header.Get("X-Amz-Target")) + +} + +func TestInputService3ProtocolTestBase64EncodedBlobsCase2(t *testing.T) { + sess := session.New() + svc := NewInputService3ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService3TestShapeInputShape{ + BlobMap: map[string][]byte{ + "key1": []byte("foo"), + "key2": []byte("bar"), + }, + } + req, _ := svc.InputService3TestCaseOperation2Request(input) + r := req.HTTPRequest + + // build request + jsonrpc.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertJSON(t, `{"BlobMap":{"key1":"Zm9v","key2":"YmFy"}}`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + assert.Equal(t, "application/x-amz-json-1.1", r.Header.Get("Content-Type")) + assert.Equal(t, "com.amazonaws.foo.OperationName", r.Header.Get("X-Amz-Target")) + +} + +func TestInputService4ProtocolTestNestedBlobsCase1(t *testing.T) { + sess := session.New() + svc := NewInputService4ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService4TestShapeInputService4TestCaseOperation1Input{ + ListParam: [][]byte{ + []byte("foo"), + []byte("bar"), + }, + } + req, _ := svc.InputService4TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + jsonrpc.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertJSON(t, `{"ListParam":["Zm9v","YmFy"]}`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + assert.Equal(t, "application/x-amz-json-1.1", r.Header.Get("Content-Type")) + assert.Equal(t, "com.amazonaws.foo.OperationName", r.Header.Get("X-Amz-Target")) + +} + +func TestInputService5ProtocolTestRecursiveShapesCase1(t *testing.T) { + sess := session.New() + svc := NewInputService5ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService5TestShapeInputShape{ + RecursiveStruct: &InputService5TestShapeRecursiveStructType{ + NoRecurse: aws.String("foo"), + }, + } + req, _ := svc.InputService5TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + jsonrpc.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertJSON(t, `{"RecursiveStruct":{"NoRecurse":"foo"}}`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + assert.Equal(t, "application/x-amz-json-1.1", r.Header.Get("Content-Type")) + assert.Equal(t, "com.amazonaws.foo.OperationName", r.Header.Get("X-Amz-Target")) + +} + +func TestInputService5ProtocolTestRecursiveShapesCase2(t *testing.T) { + sess := session.New() + svc := NewInputService5ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService5TestShapeInputShape{ + RecursiveStruct: &InputService5TestShapeRecursiveStructType{ + RecursiveStruct: &InputService5TestShapeRecursiveStructType{ + NoRecurse: aws.String("foo"), + }, + }, + } + req, _ := svc.InputService5TestCaseOperation2Request(input) + r := req.HTTPRequest + + // build request + jsonrpc.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertJSON(t, `{"RecursiveStruct":{"RecursiveStruct":{"NoRecurse":"foo"}}}`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + assert.Equal(t, "application/x-amz-json-1.1", r.Header.Get("Content-Type")) + assert.Equal(t, "com.amazonaws.foo.OperationName", r.Header.Get("X-Amz-Target")) + +} + +func TestInputService5ProtocolTestRecursiveShapesCase3(t *testing.T) { + sess := session.New() + svc := NewInputService5ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService5TestShapeInputShape{ + RecursiveStruct: &InputService5TestShapeRecursiveStructType{ + RecursiveStruct: &InputService5TestShapeRecursiveStructType{ + RecursiveStruct: &InputService5TestShapeRecursiveStructType{ + RecursiveStruct: &InputService5TestShapeRecursiveStructType{ + NoRecurse: aws.String("foo"), + }, + }, + }, + }, + } + req, _ := svc.InputService5TestCaseOperation3Request(input) + r := req.HTTPRequest + + // build request + jsonrpc.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertJSON(t, `{"RecursiveStruct":{"RecursiveStruct":{"RecursiveStruct":{"RecursiveStruct":{"NoRecurse":"foo"}}}}}`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + assert.Equal(t, "application/x-amz-json-1.1", r.Header.Get("Content-Type")) + assert.Equal(t, "com.amazonaws.foo.OperationName", r.Header.Get("X-Amz-Target")) + +} + +func TestInputService5ProtocolTestRecursiveShapesCase4(t *testing.T) { + sess := session.New() + svc := NewInputService5ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService5TestShapeInputShape{ + RecursiveStruct: &InputService5TestShapeRecursiveStructType{ + RecursiveList: []*InputService5TestShapeRecursiveStructType{ + { + NoRecurse: aws.String("foo"), + }, + { + NoRecurse: aws.String("bar"), + }, + }, + }, + } + req, _ := svc.InputService5TestCaseOperation4Request(input) + r := req.HTTPRequest + + // build request + jsonrpc.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertJSON(t, `{"RecursiveStruct":{"RecursiveList":[{"NoRecurse":"foo"},{"NoRecurse":"bar"}]}}`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + assert.Equal(t, "application/x-amz-json-1.1", r.Header.Get("Content-Type")) + assert.Equal(t, "com.amazonaws.foo.OperationName", r.Header.Get("X-Amz-Target")) + +} + +func TestInputService5ProtocolTestRecursiveShapesCase5(t *testing.T) { + sess := session.New() + svc := NewInputService5ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService5TestShapeInputShape{ + RecursiveStruct: &InputService5TestShapeRecursiveStructType{ + RecursiveList: []*InputService5TestShapeRecursiveStructType{ + { + NoRecurse: aws.String("foo"), + }, + { + RecursiveStruct: &InputService5TestShapeRecursiveStructType{ + NoRecurse: aws.String("bar"), + }, + }, + }, + }, + } + req, _ := svc.InputService5TestCaseOperation5Request(input) + r := req.HTTPRequest + + // build request + jsonrpc.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertJSON(t, `{"RecursiveStruct":{"RecursiveList":[{"NoRecurse":"foo"},{"RecursiveStruct":{"NoRecurse":"bar"}}]}}`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + assert.Equal(t, "application/x-amz-json-1.1", r.Header.Get("Content-Type")) + assert.Equal(t, "com.amazonaws.foo.OperationName", r.Header.Get("X-Amz-Target")) + +} + +func TestInputService5ProtocolTestRecursiveShapesCase6(t *testing.T) { + sess := session.New() + svc := NewInputService5ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService5TestShapeInputShape{ + RecursiveStruct: &InputService5TestShapeRecursiveStructType{ + RecursiveMap: map[string]*InputService5TestShapeRecursiveStructType{ + "bar": { + NoRecurse: aws.String("bar"), + }, + "foo": { + NoRecurse: aws.String("foo"), + }, + }, + }, + } + req, _ := svc.InputService5TestCaseOperation6Request(input) + r := req.HTTPRequest + + // build request + jsonrpc.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertJSON(t, `{"RecursiveStruct":{"RecursiveMap":{"foo":{"NoRecurse":"foo"},"bar":{"NoRecurse":"bar"}}}}`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + assert.Equal(t, "application/x-amz-json-1.1", r.Header.Get("Content-Type")) + assert.Equal(t, "com.amazonaws.foo.OperationName", r.Header.Get("X-Amz-Target")) + +} + +func TestInputService6ProtocolTestEmptyMapsCase1(t *testing.T) { + sess := session.New() + svc := NewInputService6ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService6TestShapeInputService6TestCaseOperation1Input{ + Map: map[string]*string{}, + } + req, _ := svc.InputService6TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + jsonrpc.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertJSON(t, `{"Map":{}}`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + assert.Equal(t, "application/x-amz-json-1.1", r.Header.Get("Content-Type")) + assert.Equal(t, "com.amazonaws.foo.OperationName", r.Header.Get("X-Amz-Target")) + +} + +func TestInputService7ProtocolTestIdempotencyTokenAutoFillCase1(t *testing.T) { + sess := session.New() + svc := NewInputService7ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService7TestShapeInputShape{ + Token: aws.String("abc123"), + } + req, _ := svc.InputService7TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + jsonrpc.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertJSON(t, `{"Token":"abc123"}`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/path", r.URL.String()) + + // assert headers + +} + +func TestInputService7ProtocolTestIdempotencyTokenAutoFillCase2(t *testing.T) { + sess := session.New() + svc := NewInputService7ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService7TestShapeInputShape{} + req, _ := svc.InputService7TestCaseOperation2Request(input) + r := req.HTTPRequest + + // build request + jsonrpc.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertJSON(t, `{"Token":"00000000-0000-4000-8000-000000000000"}`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/path", r.URL.String()) + + // assert headers + +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/jsonrpc.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/jsonrpc.go new file mode 100644 index 000000000..7aff0e0fa --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/jsonrpc.go @@ -0,0 +1,111 @@ +// Package jsonrpc provides JSON RPC utilities for serialisation of AWS +// requests and responses. +package jsonrpc + +//go:generate go run ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/input/json.json build_test.go +//go:generate go run ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/output/json.json unmarshal_test.go + +import ( + "encoding/json" + "io/ioutil" + "strings" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/json/jsonutil" + "github.com/aws/aws-sdk-go/private/protocol/rest" +) + +var emptyJSON = []byte("{}") + +// BuildHandler is a named request handler for building jsonrpc protocol requests +var BuildHandler = request.NamedHandler{Name: "awssdk.jsonrpc.Build", Fn: Build} + +// UnmarshalHandler is a named request handler for unmarshaling jsonrpc protocol requests +var UnmarshalHandler = request.NamedHandler{Name: "awssdk.jsonrpc.Unmarshal", Fn: Unmarshal} + +// UnmarshalMetaHandler is a named request handler for unmarshaling jsonrpc protocol request metadata +var UnmarshalMetaHandler = request.NamedHandler{Name: "awssdk.jsonrpc.UnmarshalMeta", Fn: UnmarshalMeta} + +// UnmarshalErrorHandler is a named request handler for unmarshaling jsonrpc protocol request errors +var UnmarshalErrorHandler = request.NamedHandler{Name: "awssdk.jsonrpc.UnmarshalError", Fn: UnmarshalError} + +// Build builds a JSON payload for a JSON RPC request. +func Build(req *request.Request) { + var buf []byte + var err error + if req.ParamsFilled() { + buf, err = jsonutil.BuildJSON(req.Params) + if err != nil { + req.Error = awserr.New("SerializationError", "failed encoding JSON RPC request", err) + return + } + } else { + buf = emptyJSON + } + + if req.ClientInfo.TargetPrefix != "" || string(buf) != "{}" { + req.SetBufferBody(buf) + } + + if req.ClientInfo.TargetPrefix != "" { + target := req.ClientInfo.TargetPrefix + "." + req.Operation.Name + req.HTTPRequest.Header.Add("X-Amz-Target", target) + } + if req.ClientInfo.JSONVersion != "" { + jsonVersion := req.ClientInfo.JSONVersion + req.HTTPRequest.Header.Add("Content-Type", "application/x-amz-json-"+jsonVersion) + } +} + +// Unmarshal unmarshals a response for a JSON RPC service. +func Unmarshal(req *request.Request) { + defer req.HTTPResponse.Body.Close() + if req.DataFilled() { + err := jsonutil.UnmarshalJSON(req.Data, req.HTTPResponse.Body) + if err != nil { + req.Error = awserr.New("SerializationError", "failed decoding JSON RPC response", err) + } + } + return +} + +// UnmarshalMeta unmarshals headers from a response for a JSON RPC service. +func UnmarshalMeta(req *request.Request) { + rest.UnmarshalMeta(req) +} + +// UnmarshalError unmarshals an error response for a JSON RPC service. +func UnmarshalError(req *request.Request) { + defer req.HTTPResponse.Body.Close() + bodyBytes, err := ioutil.ReadAll(req.HTTPResponse.Body) + if err != nil { + req.Error = awserr.New("SerializationError", "failed reading JSON RPC error response", err) + return + } + if len(bodyBytes) == 0 { + req.Error = awserr.NewRequestFailure( + awserr.New("SerializationError", req.HTTPResponse.Status, nil), + req.HTTPResponse.StatusCode, + "", + ) + return + } + var jsonErr jsonErrorResponse + if err := json.Unmarshal(bodyBytes, &jsonErr); err != nil { + req.Error = awserr.New("SerializationError", "failed decoding JSON RPC error response", err) + return + } + + codes := strings.SplitN(jsonErr.Code, "#", 2) + req.Error = awserr.NewRequestFailure( + awserr.New(codes[len(codes)-1], jsonErr.Message, nil), + req.HTTPResponse.StatusCode, + req.RequestID, + ) +} + +type jsonErrorResponse struct { + Code string `json:"__type"` + Message string `json:"message"` +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/unmarshal_test.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/unmarshal_test.go new file mode 100644 index 000000000..d6cbc7566 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/unmarshal_test.go @@ -0,0 +1,967 @@ +package jsonrpc_test + +import ( + "bytes" + "encoding/json" + "encoding/xml" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "testing" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/awstesting" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" + "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil" + "github.com/aws/aws-sdk-go/private/util" + "github.com/stretchr/testify/assert" +) + +var _ bytes.Buffer // always import bytes +var _ http.Request +var _ json.Marshaler +var _ time.Time +var _ xmlutil.XMLNode +var _ xml.Attr +var _ = ioutil.Discard +var _ = util.Trim("") +var _ = url.Values{} +var _ = io.EOF +var _ = aws.String +var _ = fmt.Println + +func init() { + protocol.RandReader = &awstesting.ZeroReader{} +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService1ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService1ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService1ProtocolTest client from just a session. +// svc := outputservice1protocoltest.New(mySession) +// +// // Create a OutputService1ProtocolTest client with additional configuration +// svc := outputservice1protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService1ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService1ProtocolTest { + c := p.ClientConfig("outputservice1protocoltest", cfgs...) + return newOutputService1ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService1ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService1ProtocolTest { + svc := &OutputService1ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice1protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + JSONVersion: "", + TargetPrefix: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(jsonrpc.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a OutputService1ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService1ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService1TestCaseOperation1 = "OperationName" + +// OutputService1TestCaseOperation1Request generates a "aws/request.Request" representing the +// client's request for the OutputService1TestCaseOperation1 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the OutputService1TestCaseOperation1 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the OutputService1TestCaseOperation1Request method. +// req, resp := client.OutputService1TestCaseOperation1Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *OutputService1ProtocolTest) OutputService1TestCaseOperation1Request(input *OutputService1TestShapeOutputService1TestCaseOperation1Input) (req *request.Request, output *OutputService1TestShapeOutputService1TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService1TestCaseOperation1, + } + + if input == nil { + input = &OutputService1TestShapeOutputService1TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService1TestShapeOutputService1TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService1ProtocolTest) OutputService1TestCaseOperation1(input *OutputService1TestShapeOutputService1TestCaseOperation1Input) (*OutputService1TestShapeOutputService1TestCaseOperation1Output, error) { + req, out := c.OutputService1TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService1TestShapeOutputService1TestCaseOperation1Input struct { + _ struct{} `type:"structure"` +} + +type OutputService1TestShapeOutputService1TestCaseOperation1Output struct { + _ struct{} `type:"structure"` + + Char *string `type:"character"` + + Double *float64 `type:"double"` + + FalseBool *bool `type:"boolean"` + + Float *float64 `type:"float"` + + Long *int64 `type:"long"` + + Num *int64 `type:"integer"` + + Str *string `type:"string"` + + TrueBool *bool `type:"boolean"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService2ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService2ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService2ProtocolTest client from just a session. +// svc := outputservice2protocoltest.New(mySession) +// +// // Create a OutputService2ProtocolTest client with additional configuration +// svc := outputservice2protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService2ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService2ProtocolTest { + c := p.ClientConfig("outputservice2protocoltest", cfgs...) + return newOutputService2ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService2ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService2ProtocolTest { + svc := &OutputService2ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice2protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + JSONVersion: "", + TargetPrefix: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(jsonrpc.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a OutputService2ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService2ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService2TestCaseOperation1 = "OperationName" + +// OutputService2TestCaseOperation1Request generates a "aws/request.Request" representing the +// client's request for the OutputService2TestCaseOperation1 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the OutputService2TestCaseOperation1 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the OutputService2TestCaseOperation1Request method. +// req, resp := client.OutputService2TestCaseOperation1Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *OutputService2ProtocolTest) OutputService2TestCaseOperation1Request(input *OutputService2TestShapeOutputService2TestCaseOperation1Input) (req *request.Request, output *OutputService2TestShapeOutputService2TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService2TestCaseOperation1, + } + + if input == nil { + input = &OutputService2TestShapeOutputService2TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService2TestShapeOutputService2TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService2ProtocolTest) OutputService2TestCaseOperation1(input *OutputService2TestShapeOutputService2TestCaseOperation1Input) (*OutputService2TestShapeOutputService2TestCaseOperation1Output, error) { + req, out := c.OutputService2TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService2TestShapeBlobContainer struct { + _ struct{} `type:"structure"` + + // Foo is automatically base64 encoded/decoded by the SDK. + Foo []byte `locationName:"foo" type:"blob"` +} + +type OutputService2TestShapeOutputService2TestCaseOperation1Input struct { + _ struct{} `type:"structure"` +} + +type OutputService2TestShapeOutputService2TestCaseOperation1Output struct { + _ struct{} `type:"structure"` + + // BlobMember is automatically base64 encoded/decoded by the SDK. + BlobMember []byte `type:"blob"` + + StructMember *OutputService2TestShapeBlobContainer `type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService3ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService3ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService3ProtocolTest client from just a session. +// svc := outputservice3protocoltest.New(mySession) +// +// // Create a OutputService3ProtocolTest client with additional configuration +// svc := outputservice3protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService3ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService3ProtocolTest { + c := p.ClientConfig("outputservice3protocoltest", cfgs...) + return newOutputService3ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService3ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService3ProtocolTest { + svc := &OutputService3ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice3protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + JSONVersion: "", + TargetPrefix: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(jsonrpc.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a OutputService3ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService3ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService3TestCaseOperation1 = "OperationName" + +// OutputService3TestCaseOperation1Request generates a "aws/request.Request" representing the +// client's request for the OutputService3TestCaseOperation1 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the OutputService3TestCaseOperation1 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the OutputService3TestCaseOperation1Request method. +// req, resp := client.OutputService3TestCaseOperation1Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *OutputService3ProtocolTest) OutputService3TestCaseOperation1Request(input *OutputService3TestShapeOutputService3TestCaseOperation1Input) (req *request.Request, output *OutputService3TestShapeOutputService3TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService3TestCaseOperation1, + } + + if input == nil { + input = &OutputService3TestShapeOutputService3TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService3TestShapeOutputService3TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService3ProtocolTest) OutputService3TestCaseOperation1(input *OutputService3TestShapeOutputService3TestCaseOperation1Input) (*OutputService3TestShapeOutputService3TestCaseOperation1Output, error) { + req, out := c.OutputService3TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService3TestShapeOutputService3TestCaseOperation1Input struct { + _ struct{} `type:"structure"` +} + +type OutputService3TestShapeOutputService3TestCaseOperation1Output struct { + _ struct{} `type:"structure"` + + StructMember *OutputService3TestShapeTimeContainer `type:"structure"` + + TimeMember *time.Time `type:"timestamp" timestampFormat:"unix"` +} + +type OutputService3TestShapeTimeContainer struct { + _ struct{} `type:"structure"` + + Foo *time.Time `locationName:"foo" type:"timestamp" timestampFormat:"unix"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService4ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService4ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService4ProtocolTest client from just a session. +// svc := outputservice4protocoltest.New(mySession) +// +// // Create a OutputService4ProtocolTest client with additional configuration +// svc := outputservice4protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService4ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService4ProtocolTest { + c := p.ClientConfig("outputservice4protocoltest", cfgs...) + return newOutputService4ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService4ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService4ProtocolTest { + svc := &OutputService4ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice4protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + JSONVersion: "", + TargetPrefix: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(jsonrpc.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a OutputService4ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService4ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService4TestCaseOperation1 = "OperationName" + +// OutputService4TestCaseOperation1Request generates a "aws/request.Request" representing the +// client's request for the OutputService4TestCaseOperation1 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the OutputService4TestCaseOperation1 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the OutputService4TestCaseOperation1Request method. +// req, resp := client.OutputService4TestCaseOperation1Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *OutputService4ProtocolTest) OutputService4TestCaseOperation1Request(input *OutputService4TestShapeOutputService4TestCaseOperation1Input) (req *request.Request, output *OutputService4TestShapeOutputShape) { + op := &request.Operation{ + Name: opOutputService4TestCaseOperation1, + } + + if input == nil { + input = &OutputService4TestShapeOutputService4TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService4TestShapeOutputShape{} + req.Data = output + return +} + +func (c *OutputService4ProtocolTest) OutputService4TestCaseOperation1(input *OutputService4TestShapeOutputService4TestCaseOperation1Input) (*OutputService4TestShapeOutputShape, error) { + req, out := c.OutputService4TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +const opOutputService4TestCaseOperation2 = "OperationName" + +// OutputService4TestCaseOperation2Request generates a "aws/request.Request" representing the +// client's request for the OutputService4TestCaseOperation2 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the OutputService4TestCaseOperation2 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the OutputService4TestCaseOperation2Request method. +// req, resp := client.OutputService4TestCaseOperation2Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *OutputService4ProtocolTest) OutputService4TestCaseOperation2Request(input *OutputService4TestShapeOutputService4TestCaseOperation2Input) (req *request.Request, output *OutputService4TestShapeOutputShape) { + op := &request.Operation{ + Name: opOutputService4TestCaseOperation2, + } + + if input == nil { + input = &OutputService4TestShapeOutputService4TestCaseOperation2Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService4TestShapeOutputShape{} + req.Data = output + return +} + +func (c *OutputService4ProtocolTest) OutputService4TestCaseOperation2(input *OutputService4TestShapeOutputService4TestCaseOperation2Input) (*OutputService4TestShapeOutputShape, error) { + req, out := c.OutputService4TestCaseOperation2Request(input) + err := req.Send() + return out, err +} + +type OutputService4TestShapeOutputService4TestCaseOperation1Input struct { + _ struct{} `type:"structure"` +} + +type OutputService4TestShapeOutputService4TestCaseOperation2Input struct { + _ struct{} `type:"structure"` +} + +type OutputService4TestShapeOutputShape struct { + _ struct{} `type:"structure"` + + ListMember []*string `type:"list"` + + ListMemberMap []map[string]*string `type:"list"` + + ListMemberStruct []*OutputService4TestShapeStructType `type:"list"` +} + +type OutputService4TestShapeStructType struct { + _ struct{} `type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService5ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService5ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService5ProtocolTest client from just a session. +// svc := outputservice5protocoltest.New(mySession) +// +// // Create a OutputService5ProtocolTest client with additional configuration +// svc := outputservice5protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService5ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService5ProtocolTest { + c := p.ClientConfig("outputservice5protocoltest", cfgs...) + return newOutputService5ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService5ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService5ProtocolTest { + svc := &OutputService5ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice5protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + JSONVersion: "", + TargetPrefix: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(jsonrpc.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a OutputService5ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService5ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService5TestCaseOperation1 = "OperationName" + +// OutputService5TestCaseOperation1Request generates a "aws/request.Request" representing the +// client's request for the OutputService5TestCaseOperation1 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the OutputService5TestCaseOperation1 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the OutputService5TestCaseOperation1Request method. +// req, resp := client.OutputService5TestCaseOperation1Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *OutputService5ProtocolTest) OutputService5TestCaseOperation1Request(input *OutputService5TestShapeOutputService5TestCaseOperation1Input) (req *request.Request, output *OutputService5TestShapeOutputService5TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService5TestCaseOperation1, + } + + if input == nil { + input = &OutputService5TestShapeOutputService5TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService5TestShapeOutputService5TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService5ProtocolTest) OutputService5TestCaseOperation1(input *OutputService5TestShapeOutputService5TestCaseOperation1Input) (*OutputService5TestShapeOutputService5TestCaseOperation1Output, error) { + req, out := c.OutputService5TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService5TestShapeOutputService5TestCaseOperation1Input struct { + _ struct{} `type:"structure"` +} + +type OutputService5TestShapeOutputService5TestCaseOperation1Output struct { + _ struct{} `type:"structure"` + + MapMember map[string][]*int64 `type:"map"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService6ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService6ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService6ProtocolTest client from just a session. +// svc := outputservice6protocoltest.New(mySession) +// +// // Create a OutputService6ProtocolTest client with additional configuration +// svc := outputservice6protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService6ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService6ProtocolTest { + c := p.ClientConfig("outputservice6protocoltest", cfgs...) + return newOutputService6ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService6ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService6ProtocolTest { + svc := &OutputService6ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice6protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + JSONVersion: "", + TargetPrefix: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(jsonrpc.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a OutputService6ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService6ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService6TestCaseOperation1 = "OperationName" + +// OutputService6TestCaseOperation1Request generates a "aws/request.Request" representing the +// client's request for the OutputService6TestCaseOperation1 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the OutputService6TestCaseOperation1 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the OutputService6TestCaseOperation1Request method. +// req, resp := client.OutputService6TestCaseOperation1Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *OutputService6ProtocolTest) OutputService6TestCaseOperation1Request(input *OutputService6TestShapeOutputService6TestCaseOperation1Input) (req *request.Request, output *OutputService6TestShapeOutputService6TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService6TestCaseOperation1, + } + + if input == nil { + input = &OutputService6TestShapeOutputService6TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService6TestShapeOutputService6TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService6ProtocolTest) OutputService6TestCaseOperation1(input *OutputService6TestShapeOutputService6TestCaseOperation1Input) (*OutputService6TestShapeOutputService6TestCaseOperation1Output, error) { + req, out := c.OutputService6TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService6TestShapeOutputService6TestCaseOperation1Input struct { + _ struct{} `type:"structure"` +} + +type OutputService6TestShapeOutputService6TestCaseOperation1Output struct { + _ struct{} `type:"structure"` + + StrType *string `type:"string"` +} + +// +// Tests begin here +// + +func TestOutputService1ProtocolTestScalarMembersCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService1ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("{\"Str\": \"myname\", \"Num\": 123, \"FalseBool\": false, \"TrueBool\": true, \"Float\": 1.2, \"Double\": 1.3, \"Long\": 200, \"Char\": \"a\"}")) + req, out := svc.OutputService1TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + jsonrpc.UnmarshalMeta(req) + jsonrpc.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "a", *out.Char) + assert.Equal(t, 1.3, *out.Double) + assert.Equal(t, false, *out.FalseBool) + assert.Equal(t, 1.2, *out.Float) + assert.Equal(t, int64(200), *out.Long) + assert.Equal(t, int64(123), *out.Num) + assert.Equal(t, "myname", *out.Str) + assert.Equal(t, true, *out.TrueBool) + +} + +func TestOutputService2ProtocolTestBlobMembersCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService2ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("{\"BlobMember\": \"aGkh\", \"StructMember\": {\"foo\": \"dGhlcmUh\"}}")) + req, out := svc.OutputService2TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + jsonrpc.UnmarshalMeta(req) + jsonrpc.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "hi!", string(out.BlobMember)) + assert.Equal(t, "there!", string(out.StructMember.Foo)) + +} + +func TestOutputService3ProtocolTestTimestampMembersCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService3ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("{\"TimeMember\": 1398796238, \"StructMember\": {\"foo\": 1398796238}}")) + req, out := svc.OutputService3TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + jsonrpc.UnmarshalMeta(req) + jsonrpc.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, time.Unix(1.398796238e+09, 0).UTC().String(), out.StructMember.Foo.String()) + assert.Equal(t, time.Unix(1.398796238e+09, 0).UTC().String(), out.TimeMember.String()) + +} + +func TestOutputService4ProtocolTestListsCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService4ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("{\"ListMember\": [\"a\", \"b\"]}")) + req, out := svc.OutputService4TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + jsonrpc.UnmarshalMeta(req) + jsonrpc.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "a", *out.ListMember[0]) + assert.Equal(t, "b", *out.ListMember[1]) + +} + +func TestOutputService4ProtocolTestListsCase2(t *testing.T) { + sess := session.New() + svc := NewOutputService4ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("{\"ListMember\": [\"a\", null], \"ListMemberMap\": [{}, null, null, {}], \"ListMemberStruct\": [{}, null, null, {}]}")) + req, out := svc.OutputService4TestCaseOperation2Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + jsonrpc.UnmarshalMeta(req) + jsonrpc.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "a", *out.ListMember[0]) + assert.Nil(t, out.ListMember[1]) + assert.Nil(t, out.ListMemberMap[1]) + assert.Nil(t, out.ListMemberMap[2]) + assert.Nil(t, out.ListMemberStruct[1]) + assert.Nil(t, out.ListMemberStruct[2]) + +} + +func TestOutputService5ProtocolTestMapsCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService5ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("{\"MapMember\": {\"a\": [1, 2], \"b\": [3, 4]}}")) + req, out := svc.OutputService5TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + jsonrpc.UnmarshalMeta(req) + jsonrpc.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, int64(1), *out.MapMember["a"][0]) + assert.Equal(t, int64(2), *out.MapMember["a"][1]) + assert.Equal(t, int64(3), *out.MapMember["b"][0]) + assert.Equal(t, int64(4), *out.MapMember["b"][1]) + +} + +func TestOutputService6ProtocolTestIgnoresExtraDataCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService6ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("{\"foo\": \"bar\"}")) + req, out := svc.OutputService6TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + jsonrpc.UnmarshalMeta(req) + jsonrpc.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/protocol_test.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/protocol_test.go new file mode 100644 index 000000000..6ba44124a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/protocol_test.go @@ -0,0 +1,203 @@ +package protocol_test + +import ( + "fmt" + "net/http" + "net/url" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/awstesting" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/ec2query" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" + "github.com/aws/aws-sdk-go/private/protocol/query" + "github.com/aws/aws-sdk-go/private/protocol/rest" + "github.com/aws/aws-sdk-go/private/protocol/restjson" + "github.com/aws/aws-sdk-go/private/protocol/restxml" +) + +func xmlData(set bool, b []byte, size, delta int) { + if !set { + copy(b, []byte("")) + } + if size == 0 { + copy(b[delta-len(""):], []byte("")) + } +} + +func jsonData(set bool, b []byte, size, delta int) { + if !set { + copy(b, []byte("{\"A\": \"")) + } + if size == 0 { + copy(b[delta-len("\"}"):], []byte("\"}")) + } +} + +func buildNewRequest(data interface{}) *request.Request { + v := url.Values{} + v.Set("test", "TEST") + v.Add("test1", "TEST1") + + req := &request.Request{ + HTTPRequest: &http.Request{ + Header: make(http.Header), + Body: &awstesting.ReadCloser{Size: 2048}, + URL: &url.URL{ + RawQuery: v.Encode(), + }, + }, + Params: &struct { + LocationName string `locationName:"test"` + }{ + "Test", + }, + ClientInfo: metadata.ClientInfo{ + ServiceName: "test", + TargetPrefix: "test", + JSONVersion: "test", + APIVersion: "test", + Endpoint: "test", + SigningName: "test", + SigningRegion: "test", + }, + Operation: &request.Operation{ + Name: "test", + }, + } + req.HTTPResponse = &http.Response{ + Body: &awstesting.ReadCloser{Size: 2048}, + Header: http.Header{ + "X-Amzn-Requestid": []string{"1"}, + }, + StatusCode: http.StatusOK, + } + + if data == nil { + data = &struct { + _ struct{} `type:"structure"` + LocationName *string `locationName:"testName"` + Location *string `location:"statusCode"` + A *string `type:"string"` + }{} + } + + req.Data = data + + return req +} + +type expected struct { + dataType int + closed bool + size int + errExists bool +} + +const ( + jsonType = iota + xmlType +) + +func checkForLeak(data interface{}, build, fn func(*request.Request), t *testing.T, result expected) { + req := buildNewRequest(data) + reader := req.HTTPResponse.Body.(*awstesting.ReadCloser) + switch result.dataType { + case jsonType: + reader.FillData = jsonData + case xmlType: + reader.FillData = xmlData + } + build(req) + fn(req) + + if result.errExists { + assert.NotNil(t, req.Error) + } else { + fmt.Println(req.Error) + assert.Nil(t, req.Error) + } + + assert.Equal(t, reader.Closed, result.closed) + assert.Equal(t, reader.Size, result.size) +} + +func TestJSONRpc(t *testing.T) { + checkForLeak(nil, jsonrpc.Build, jsonrpc.Unmarshal, t, expected{jsonType, true, 0, false}) + checkForLeak(nil, jsonrpc.Build, jsonrpc.UnmarshalMeta, t, expected{jsonType, false, 2048, false}) + checkForLeak(nil, jsonrpc.Build, jsonrpc.UnmarshalError, t, expected{jsonType, true, 0, true}) +} + +func TestQuery(t *testing.T) { + checkForLeak(nil, query.Build, query.Unmarshal, t, expected{jsonType, true, 0, false}) + checkForLeak(nil, query.Build, query.UnmarshalMeta, t, expected{jsonType, false, 2048, false}) + checkForLeak(nil, query.Build, query.UnmarshalError, t, expected{jsonType, true, 0, true}) +} + +func TestRest(t *testing.T) { + // case 1: Payload io.ReadSeeker + checkForLeak(nil, rest.Build, rest.Unmarshal, t, expected{jsonType, false, 2048, false}) + checkForLeak(nil, query.Build, query.UnmarshalMeta, t, expected{jsonType, false, 2048, false}) + + // case 2: Payload *string + // should close the body + dataStr := struct { + _ struct{} `type:"structure" payload:"Payload"` + LocationName *string `locationName:"testName"` + Location *string `location:"statusCode"` + A *string `type:"string"` + Payload *string `locationName:"payload" type:"blob" required:"true"` + }{} + checkForLeak(&dataStr, rest.Build, rest.Unmarshal, t, expected{jsonType, true, 0, false}) + checkForLeak(&dataStr, query.Build, query.UnmarshalMeta, t, expected{jsonType, false, 2048, false}) + + // case 3: Payload []byte + // should close the body + dataBytes := struct { + _ struct{} `type:"structure" payload:"Payload"` + LocationName *string `locationName:"testName"` + Location *string `location:"statusCode"` + A *string `type:"string"` + Payload []byte `locationName:"payload" type:"blob" required:"true"` + }{} + checkForLeak(&dataBytes, rest.Build, rest.Unmarshal, t, expected{jsonType, true, 0, false}) + checkForLeak(&dataBytes, query.Build, query.UnmarshalMeta, t, expected{jsonType, false, 2048, false}) + + // case 4: Payload unsupported type + // should close the body + dataUnsupported := struct { + _ struct{} `type:"structure" payload:"Payload"` + LocationName *string `locationName:"testName"` + Location *string `location:"statusCode"` + A *string `type:"string"` + Payload string `locationName:"payload" type:"blob" required:"true"` + }{} + checkForLeak(&dataUnsupported, rest.Build, rest.Unmarshal, t, expected{jsonType, true, 0, true}) + checkForLeak(&dataUnsupported, query.Build, query.UnmarshalMeta, t, expected{jsonType, false, 2048, false}) +} + +func TestRestJSON(t *testing.T) { + checkForLeak(nil, restjson.Build, restjson.Unmarshal, t, expected{jsonType, true, 0, false}) + checkForLeak(nil, restjson.Build, restjson.UnmarshalMeta, t, expected{jsonType, false, 2048, false}) + checkForLeak(nil, restjson.Build, restjson.UnmarshalError, t, expected{jsonType, true, 0, true}) +} + +func TestRestXML(t *testing.T) { + checkForLeak(nil, restxml.Build, restxml.Unmarshal, t, expected{xmlType, true, 0, false}) + checkForLeak(nil, restxml.Build, restxml.UnmarshalMeta, t, expected{xmlType, false, 2048, false}) + checkForLeak(nil, restxml.Build, restxml.UnmarshalError, t, expected{xmlType, true, 0, true}) +} + +func TestXML(t *testing.T) { + checkForLeak(nil, ec2query.Build, ec2query.Unmarshal, t, expected{jsonType, true, 0, false}) + checkForLeak(nil, ec2query.Build, ec2query.UnmarshalMeta, t, expected{jsonType, false, 2048, false}) + checkForLeak(nil, ec2query.Build, ec2query.UnmarshalError, t, expected{jsonType, true, 0, true}) +} + +func TestProtocol(t *testing.T) { + checkForLeak(nil, restxml.Build, protocol.UnmarshalDiscardBody, t, expected{xmlType, true, 0, false}) +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go new file mode 100644 index 000000000..56d69db05 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go @@ -0,0 +1,36 @@ +// Package query provides serialisation of AWS query requests, and responses. +package query + +//go:generate go run ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/input/query.json build_test.go + +import ( + "net/url" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/query/queryutil" +) + +// BuildHandler is a named request handler for building query protocol requests +var BuildHandler = request.NamedHandler{Name: "awssdk.query.Build", Fn: Build} + +// Build builds a request for an AWS Query service. +func Build(r *request.Request) { + body := url.Values{ + "Action": {r.Operation.Name}, + "Version": {r.ClientInfo.APIVersion}, + } + if err := queryutil.Parse(body, r.Params, false); err != nil { + r.Error = awserr.New("SerializationError", "failed encoding Query request", err) + return + } + + if r.ExpireTime == 0 { + r.HTTPRequest.Method = "POST" + r.HTTPRequest.Header.Set("Content-Type", "application/x-www-form-urlencoded; charset=utf-8") + r.SetBufferBody([]byte(body.Encode())) + } else { // This is a pre-signed request + r.HTTPRequest.Method = "GET" + r.HTTPRequest.URL.RawQuery = body.Encode() + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build_test.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build_test.go new file mode 100644 index 000000000..7c2d50d82 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build_test.go @@ -0,0 +1,2682 @@ +package query_test + +import ( + "bytes" + "encoding/json" + "encoding/xml" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "testing" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/awstesting" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/query" + "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil" + "github.com/aws/aws-sdk-go/private/util" + "github.com/stretchr/testify/assert" +) + +var _ bytes.Buffer // always import bytes +var _ http.Request +var _ json.Marshaler +var _ time.Time +var _ xmlutil.XMLNode +var _ xml.Attr +var _ = ioutil.Discard +var _ = util.Trim("") +var _ = url.Values{} +var _ = io.EOF +var _ = aws.String +var _ = fmt.Println + +func init() { + protocol.RandReader = &awstesting.ZeroReader{} +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService1ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService1ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService1ProtocolTest client from just a session. +// svc := inputservice1protocoltest.New(mySession) +// +// // Create a InputService1ProtocolTest client with additional configuration +// svc := inputservice1protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService1ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService1ProtocolTest { + c := p.ClientConfig("inputservice1protocoltest", cfgs...) + return newInputService1ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService1ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService1ProtocolTest { + svc := &InputService1ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice1protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(query.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(query.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(query.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(query.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a InputService1ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService1ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService1TestCaseOperation1 = "OperationName" + +// InputService1TestCaseOperation1Request generates a "aws/request.Request" representing the +// client's request for the InputService1TestCaseOperation1 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the InputService1TestCaseOperation1 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the InputService1TestCaseOperation1Request method. +// req, resp := client.InputService1TestCaseOperation1Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *InputService1ProtocolTest) InputService1TestCaseOperation1Request(input *InputService1TestShapeInputShape) (req *request.Request, output *InputService1TestShapeInputService1TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService1TestCaseOperation1, + } + + if input == nil { + input = &InputService1TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService1TestShapeInputService1TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService1ProtocolTest) InputService1TestCaseOperation1(input *InputService1TestShapeInputShape) (*InputService1TestShapeInputService1TestCaseOperation1Output, error) { + req, out := c.InputService1TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +const opInputService1TestCaseOperation2 = "OperationName" + +// InputService1TestCaseOperation2Request generates a "aws/request.Request" representing the +// client's request for the InputService1TestCaseOperation2 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the InputService1TestCaseOperation2 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the InputService1TestCaseOperation2Request method. +// req, resp := client.InputService1TestCaseOperation2Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *InputService1ProtocolTest) InputService1TestCaseOperation2Request(input *InputService1TestShapeInputShape) (req *request.Request, output *InputService1TestShapeInputService1TestCaseOperation2Output) { + op := &request.Operation{ + Name: opInputService1TestCaseOperation2, + } + + if input == nil { + input = &InputService1TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService1TestShapeInputService1TestCaseOperation2Output{} + req.Data = output + return +} + +func (c *InputService1ProtocolTest) InputService1TestCaseOperation2(input *InputService1TestShapeInputShape) (*InputService1TestShapeInputService1TestCaseOperation2Output, error) { + req, out := c.InputService1TestCaseOperation2Request(input) + err := req.Send() + return out, err +} + +const opInputService1TestCaseOperation3 = "OperationName" + +// InputService1TestCaseOperation3Request generates a "aws/request.Request" representing the +// client's request for the InputService1TestCaseOperation3 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the InputService1TestCaseOperation3 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the InputService1TestCaseOperation3Request method. +// req, resp := client.InputService1TestCaseOperation3Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *InputService1ProtocolTest) InputService1TestCaseOperation3Request(input *InputService1TestShapeInputShape) (req *request.Request, output *InputService1TestShapeInputService1TestCaseOperation3Output) { + op := &request.Operation{ + Name: opInputService1TestCaseOperation3, + } + + if input == nil { + input = &InputService1TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService1TestShapeInputService1TestCaseOperation3Output{} + req.Data = output + return +} + +func (c *InputService1ProtocolTest) InputService1TestCaseOperation3(input *InputService1TestShapeInputShape) (*InputService1TestShapeInputService1TestCaseOperation3Output, error) { + req, out := c.InputService1TestCaseOperation3Request(input) + err := req.Send() + return out, err +} + +type InputService1TestShapeInputService1TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +type InputService1TestShapeInputService1TestCaseOperation2Output struct { + _ struct{} `type:"structure"` +} + +type InputService1TestShapeInputService1TestCaseOperation3Output struct { + _ struct{} `type:"structure"` +} + +type InputService1TestShapeInputShape struct { + _ struct{} `type:"structure"` + + Bar *string `type:"string"` + + Baz *bool `type:"boolean"` + + Foo *string `type:"string"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService2ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService2ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService2ProtocolTest client from just a session. +// svc := inputservice2protocoltest.New(mySession) +// +// // Create a InputService2ProtocolTest client with additional configuration +// svc := inputservice2protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService2ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService2ProtocolTest { + c := p.ClientConfig("inputservice2protocoltest", cfgs...) + return newInputService2ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService2ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService2ProtocolTest { + svc := &InputService2ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice2protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(query.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(query.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(query.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(query.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a InputService2ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService2ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService2TestCaseOperation1 = "OperationName" + +// InputService2TestCaseOperation1Request generates a "aws/request.Request" representing the +// client's request for the InputService2TestCaseOperation1 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the InputService2TestCaseOperation1 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the InputService2TestCaseOperation1Request method. +// req, resp := client.InputService2TestCaseOperation1Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *InputService2ProtocolTest) InputService2TestCaseOperation1Request(input *InputService2TestShapeInputService2TestCaseOperation1Input) (req *request.Request, output *InputService2TestShapeInputService2TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService2TestCaseOperation1, + } + + if input == nil { + input = &InputService2TestShapeInputService2TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService2TestShapeInputService2TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService2ProtocolTest) InputService2TestCaseOperation1(input *InputService2TestShapeInputService2TestCaseOperation1Input) (*InputService2TestShapeInputService2TestCaseOperation1Output, error) { + req, out := c.InputService2TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type InputService2TestShapeInputService2TestCaseOperation1Input struct { + _ struct{} `type:"structure"` + + StructArg *InputService2TestShapeStructType `type:"structure"` +} + +type InputService2TestShapeInputService2TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +type InputService2TestShapeStructType struct { + _ struct{} `type:"structure"` + + ScalarArg *string `type:"string"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService3ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService3ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService3ProtocolTest client from just a session. +// svc := inputservice3protocoltest.New(mySession) +// +// // Create a InputService3ProtocolTest client with additional configuration +// svc := inputservice3protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService3ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService3ProtocolTest { + c := p.ClientConfig("inputservice3protocoltest", cfgs...) + return newInputService3ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService3ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService3ProtocolTest { + svc := &InputService3ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice3protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(query.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(query.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(query.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(query.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a InputService3ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService3ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService3TestCaseOperation1 = "OperationName" + +// InputService3TestCaseOperation1Request generates a "aws/request.Request" representing the +// client's request for the InputService3TestCaseOperation1 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the InputService3TestCaseOperation1 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the InputService3TestCaseOperation1Request method. +// req, resp := client.InputService3TestCaseOperation1Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *InputService3ProtocolTest) InputService3TestCaseOperation1Request(input *InputService3TestShapeInputShape) (req *request.Request, output *InputService3TestShapeInputService3TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService3TestCaseOperation1, + } + + if input == nil { + input = &InputService3TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService3TestShapeInputService3TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService3ProtocolTest) InputService3TestCaseOperation1(input *InputService3TestShapeInputShape) (*InputService3TestShapeInputService3TestCaseOperation1Output, error) { + req, out := c.InputService3TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +const opInputService3TestCaseOperation2 = "OperationName" + +// InputService3TestCaseOperation2Request generates a "aws/request.Request" representing the +// client's request for the InputService3TestCaseOperation2 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the InputService3TestCaseOperation2 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the InputService3TestCaseOperation2Request method. +// req, resp := client.InputService3TestCaseOperation2Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *InputService3ProtocolTest) InputService3TestCaseOperation2Request(input *InputService3TestShapeInputShape) (req *request.Request, output *InputService3TestShapeInputService3TestCaseOperation2Output) { + op := &request.Operation{ + Name: opInputService3TestCaseOperation2, + } + + if input == nil { + input = &InputService3TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService3TestShapeInputService3TestCaseOperation2Output{} + req.Data = output + return +} + +func (c *InputService3ProtocolTest) InputService3TestCaseOperation2(input *InputService3TestShapeInputShape) (*InputService3TestShapeInputService3TestCaseOperation2Output, error) { + req, out := c.InputService3TestCaseOperation2Request(input) + err := req.Send() + return out, err +} + +type InputService3TestShapeInputService3TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +type InputService3TestShapeInputService3TestCaseOperation2Output struct { + _ struct{} `type:"structure"` +} + +type InputService3TestShapeInputShape struct { + _ struct{} `type:"structure"` + + ListArg []*string `type:"list"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService4ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService4ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService4ProtocolTest client from just a session. +// svc := inputservice4protocoltest.New(mySession) +// +// // Create a InputService4ProtocolTest client with additional configuration +// svc := inputservice4protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService4ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService4ProtocolTest { + c := p.ClientConfig("inputservice4protocoltest", cfgs...) + return newInputService4ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService4ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService4ProtocolTest { + svc := &InputService4ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice4protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(query.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(query.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(query.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(query.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a InputService4ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService4ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService4TestCaseOperation1 = "OperationName" + +// InputService4TestCaseOperation1Request generates a "aws/request.Request" representing the +// client's request for the InputService4TestCaseOperation1 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the InputService4TestCaseOperation1 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the InputService4TestCaseOperation1Request method. +// req, resp := client.InputService4TestCaseOperation1Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *InputService4ProtocolTest) InputService4TestCaseOperation1Request(input *InputService4TestShapeInputShape) (req *request.Request, output *InputService4TestShapeInputService4TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService4TestCaseOperation1, + } + + if input == nil { + input = &InputService4TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService4TestShapeInputService4TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService4ProtocolTest) InputService4TestCaseOperation1(input *InputService4TestShapeInputShape) (*InputService4TestShapeInputService4TestCaseOperation1Output, error) { + req, out := c.InputService4TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +const opInputService4TestCaseOperation2 = "OperationName" + +// InputService4TestCaseOperation2Request generates a "aws/request.Request" representing the +// client's request for the InputService4TestCaseOperation2 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the InputService4TestCaseOperation2 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the InputService4TestCaseOperation2Request method. +// req, resp := client.InputService4TestCaseOperation2Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *InputService4ProtocolTest) InputService4TestCaseOperation2Request(input *InputService4TestShapeInputShape) (req *request.Request, output *InputService4TestShapeInputService4TestCaseOperation2Output) { + op := &request.Operation{ + Name: opInputService4TestCaseOperation2, + } + + if input == nil { + input = &InputService4TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService4TestShapeInputService4TestCaseOperation2Output{} + req.Data = output + return +} + +func (c *InputService4ProtocolTest) InputService4TestCaseOperation2(input *InputService4TestShapeInputShape) (*InputService4TestShapeInputService4TestCaseOperation2Output, error) { + req, out := c.InputService4TestCaseOperation2Request(input) + err := req.Send() + return out, err +} + +type InputService4TestShapeInputService4TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +type InputService4TestShapeInputService4TestCaseOperation2Output struct { + _ struct{} `type:"structure"` +} + +type InputService4TestShapeInputShape struct { + _ struct{} `type:"structure"` + + ListArg []*string `type:"list" flattened:"true"` + + NamedListArg []*string `locationNameList:"Foo" type:"list" flattened:"true"` + + ScalarArg *string `type:"string"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService5ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService5ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService5ProtocolTest client from just a session. +// svc := inputservice5protocoltest.New(mySession) +// +// // Create a InputService5ProtocolTest client with additional configuration +// svc := inputservice5protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService5ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService5ProtocolTest { + c := p.ClientConfig("inputservice5protocoltest", cfgs...) + return newInputService5ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService5ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService5ProtocolTest { + svc := &InputService5ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice5protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(query.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(query.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(query.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(query.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a InputService5ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService5ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService5TestCaseOperation1 = "OperationName" + +// InputService5TestCaseOperation1Request generates a "aws/request.Request" representing the +// client's request for the InputService5TestCaseOperation1 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the InputService5TestCaseOperation1 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the InputService5TestCaseOperation1Request method. +// req, resp := client.InputService5TestCaseOperation1Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *InputService5ProtocolTest) InputService5TestCaseOperation1Request(input *InputService5TestShapeInputService5TestCaseOperation1Input) (req *request.Request, output *InputService5TestShapeInputService5TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService5TestCaseOperation1, + } + + if input == nil { + input = &InputService5TestShapeInputService5TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService5TestShapeInputService5TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService5ProtocolTest) InputService5TestCaseOperation1(input *InputService5TestShapeInputService5TestCaseOperation1Input) (*InputService5TestShapeInputService5TestCaseOperation1Output, error) { + req, out := c.InputService5TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type InputService5TestShapeInputService5TestCaseOperation1Input struct { + _ struct{} `type:"structure"` + + MapArg map[string]*string `type:"map" flattened:"true"` +} + +type InputService5TestShapeInputService5TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService6ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService6ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService6ProtocolTest client from just a session. +// svc := inputservice6protocoltest.New(mySession) +// +// // Create a InputService6ProtocolTest client with additional configuration +// svc := inputservice6protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService6ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService6ProtocolTest { + c := p.ClientConfig("inputservice6protocoltest", cfgs...) + return newInputService6ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService6ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService6ProtocolTest { + svc := &InputService6ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice6protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(query.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(query.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(query.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(query.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a InputService6ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService6ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService6TestCaseOperation1 = "OperationName" + +// InputService6TestCaseOperation1Request generates a "aws/request.Request" representing the +// client's request for the InputService6TestCaseOperation1 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the InputService6TestCaseOperation1 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the InputService6TestCaseOperation1Request method. +// req, resp := client.InputService6TestCaseOperation1Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *InputService6ProtocolTest) InputService6TestCaseOperation1Request(input *InputService6TestShapeInputService6TestCaseOperation1Input) (req *request.Request, output *InputService6TestShapeInputService6TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService6TestCaseOperation1, + } + + if input == nil { + input = &InputService6TestShapeInputService6TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService6TestShapeInputService6TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService6ProtocolTest) InputService6TestCaseOperation1(input *InputService6TestShapeInputService6TestCaseOperation1Input) (*InputService6TestShapeInputService6TestCaseOperation1Output, error) { + req, out := c.InputService6TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type InputService6TestShapeInputService6TestCaseOperation1Input struct { + _ struct{} `type:"structure"` + + ListArg []*string `locationNameList:"item" type:"list"` +} + +type InputService6TestShapeInputService6TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService7ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService7ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService7ProtocolTest client from just a session. +// svc := inputservice7protocoltest.New(mySession) +// +// // Create a InputService7ProtocolTest client with additional configuration +// svc := inputservice7protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService7ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService7ProtocolTest { + c := p.ClientConfig("inputservice7protocoltest", cfgs...) + return newInputService7ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService7ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService7ProtocolTest { + svc := &InputService7ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice7protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(query.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(query.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(query.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(query.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a InputService7ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService7ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService7TestCaseOperation1 = "OperationName" + +// InputService7TestCaseOperation1Request generates a "aws/request.Request" representing the +// client's request for the InputService7TestCaseOperation1 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the InputService7TestCaseOperation1 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the InputService7TestCaseOperation1Request method. +// req, resp := client.InputService7TestCaseOperation1Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *InputService7ProtocolTest) InputService7TestCaseOperation1Request(input *InputService7TestShapeInputService7TestCaseOperation1Input) (req *request.Request, output *InputService7TestShapeInputService7TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService7TestCaseOperation1, + } + + if input == nil { + input = &InputService7TestShapeInputService7TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService7TestShapeInputService7TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService7ProtocolTest) InputService7TestCaseOperation1(input *InputService7TestShapeInputService7TestCaseOperation1Input) (*InputService7TestShapeInputService7TestCaseOperation1Output, error) { + req, out := c.InputService7TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type InputService7TestShapeInputService7TestCaseOperation1Input struct { + _ struct{} `type:"structure"` + + ListArg []*string `locationNameList:"ListArgLocation" type:"list" flattened:"true"` + + ScalarArg *string `type:"string"` +} + +type InputService7TestShapeInputService7TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService8ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService8ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService8ProtocolTest client from just a session. +// svc := inputservice8protocoltest.New(mySession) +// +// // Create a InputService8ProtocolTest client with additional configuration +// svc := inputservice8protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService8ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService8ProtocolTest { + c := p.ClientConfig("inputservice8protocoltest", cfgs...) + return newInputService8ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService8ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService8ProtocolTest { + svc := &InputService8ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice8protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(query.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(query.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(query.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(query.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a InputService8ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService8ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService8TestCaseOperation1 = "OperationName" + +// InputService8TestCaseOperation1Request generates a "aws/request.Request" representing the +// client's request for the InputService8TestCaseOperation1 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the InputService8TestCaseOperation1 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the InputService8TestCaseOperation1Request method. +// req, resp := client.InputService8TestCaseOperation1Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *InputService8ProtocolTest) InputService8TestCaseOperation1Request(input *InputService8TestShapeInputService8TestCaseOperation1Input) (req *request.Request, output *InputService8TestShapeInputService8TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService8TestCaseOperation1, + } + + if input == nil { + input = &InputService8TestShapeInputService8TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService8TestShapeInputService8TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService8ProtocolTest) InputService8TestCaseOperation1(input *InputService8TestShapeInputService8TestCaseOperation1Input) (*InputService8TestShapeInputService8TestCaseOperation1Output, error) { + req, out := c.InputService8TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type InputService8TestShapeInputService8TestCaseOperation1Input struct { + _ struct{} `type:"structure"` + + MapArg map[string]*string `type:"map"` +} + +type InputService8TestShapeInputService8TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService9ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService9ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService9ProtocolTest client from just a session. +// svc := inputservice9protocoltest.New(mySession) +// +// // Create a InputService9ProtocolTest client with additional configuration +// svc := inputservice9protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService9ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService9ProtocolTest { + c := p.ClientConfig("inputservice9protocoltest", cfgs...) + return newInputService9ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService9ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService9ProtocolTest { + svc := &InputService9ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice9protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(query.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(query.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(query.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(query.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a InputService9ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService9ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService9TestCaseOperation1 = "OperationName" + +// InputService9TestCaseOperation1Request generates a "aws/request.Request" representing the +// client's request for the InputService9TestCaseOperation1 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the InputService9TestCaseOperation1 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the InputService9TestCaseOperation1Request method. +// req, resp := client.InputService9TestCaseOperation1Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *InputService9ProtocolTest) InputService9TestCaseOperation1Request(input *InputService9TestShapeInputService9TestCaseOperation1Input) (req *request.Request, output *InputService9TestShapeInputService9TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService9TestCaseOperation1, + } + + if input == nil { + input = &InputService9TestShapeInputService9TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService9TestShapeInputService9TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService9ProtocolTest) InputService9TestCaseOperation1(input *InputService9TestShapeInputService9TestCaseOperation1Input) (*InputService9TestShapeInputService9TestCaseOperation1Output, error) { + req, out := c.InputService9TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type InputService9TestShapeInputService9TestCaseOperation1Input struct { + _ struct{} `type:"structure"` + + MapArg map[string]*string `locationNameKey:"TheKey" locationNameValue:"TheValue" type:"map"` +} + +type InputService9TestShapeInputService9TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService10ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService10ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService10ProtocolTest client from just a session. +// svc := inputservice10protocoltest.New(mySession) +// +// // Create a InputService10ProtocolTest client with additional configuration +// svc := inputservice10protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService10ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService10ProtocolTest { + c := p.ClientConfig("inputservice10protocoltest", cfgs...) + return newInputService10ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService10ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService10ProtocolTest { + svc := &InputService10ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice10protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(query.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(query.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(query.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(query.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a InputService10ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService10ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService10TestCaseOperation1 = "OperationName" + +// InputService10TestCaseOperation1Request generates a "aws/request.Request" representing the +// client's request for the InputService10TestCaseOperation1 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the InputService10TestCaseOperation1 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the InputService10TestCaseOperation1Request method. +// req, resp := client.InputService10TestCaseOperation1Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *InputService10ProtocolTest) InputService10TestCaseOperation1Request(input *InputService10TestShapeInputService10TestCaseOperation1Input) (req *request.Request, output *InputService10TestShapeInputService10TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService10TestCaseOperation1, + } + + if input == nil { + input = &InputService10TestShapeInputService10TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService10TestShapeInputService10TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService10ProtocolTest) InputService10TestCaseOperation1(input *InputService10TestShapeInputService10TestCaseOperation1Input) (*InputService10TestShapeInputService10TestCaseOperation1Output, error) { + req, out := c.InputService10TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type InputService10TestShapeInputService10TestCaseOperation1Input struct { + _ struct{} `type:"structure"` + + // BlobArg is automatically base64 encoded/decoded by the SDK. + BlobArg []byte `type:"blob"` +} + +type InputService10TestShapeInputService10TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService11ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService11ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService11ProtocolTest client from just a session. +// svc := inputservice11protocoltest.New(mySession) +// +// // Create a InputService11ProtocolTest client with additional configuration +// svc := inputservice11protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService11ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService11ProtocolTest { + c := p.ClientConfig("inputservice11protocoltest", cfgs...) + return newInputService11ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService11ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService11ProtocolTest { + svc := &InputService11ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice11protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(query.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(query.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(query.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(query.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a InputService11ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService11ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService11TestCaseOperation1 = "OperationName" + +// InputService11TestCaseOperation1Request generates a "aws/request.Request" representing the +// client's request for the InputService11TestCaseOperation1 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the InputService11TestCaseOperation1 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the InputService11TestCaseOperation1Request method. +// req, resp := client.InputService11TestCaseOperation1Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *InputService11ProtocolTest) InputService11TestCaseOperation1Request(input *InputService11TestShapeInputService11TestCaseOperation1Input) (req *request.Request, output *InputService11TestShapeInputService11TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService11TestCaseOperation1, + } + + if input == nil { + input = &InputService11TestShapeInputService11TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService11TestShapeInputService11TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService11ProtocolTest) InputService11TestCaseOperation1(input *InputService11TestShapeInputService11TestCaseOperation1Input) (*InputService11TestShapeInputService11TestCaseOperation1Output, error) { + req, out := c.InputService11TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type InputService11TestShapeInputService11TestCaseOperation1Input struct { + _ struct{} `type:"structure"` + + TimeArg *time.Time `type:"timestamp" timestampFormat:"iso8601"` +} + +type InputService11TestShapeInputService11TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService12ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService12ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService12ProtocolTest client from just a session. +// svc := inputservice12protocoltest.New(mySession) +// +// // Create a InputService12ProtocolTest client with additional configuration +// svc := inputservice12protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService12ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService12ProtocolTest { + c := p.ClientConfig("inputservice12protocoltest", cfgs...) + return newInputService12ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService12ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService12ProtocolTest { + svc := &InputService12ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice12protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(query.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(query.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(query.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(query.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a InputService12ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService12ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService12TestCaseOperation1 = "OperationName" + +// InputService12TestCaseOperation1Request generates a "aws/request.Request" representing the +// client's request for the InputService12TestCaseOperation1 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the InputService12TestCaseOperation1 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the InputService12TestCaseOperation1Request method. +// req, resp := client.InputService12TestCaseOperation1Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *InputService12ProtocolTest) InputService12TestCaseOperation1Request(input *InputService12TestShapeInputShape) (req *request.Request, output *InputService12TestShapeInputService12TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService12TestCaseOperation1, + } + + if input == nil { + input = &InputService12TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService12TestShapeInputService12TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService12ProtocolTest) InputService12TestCaseOperation1(input *InputService12TestShapeInputShape) (*InputService12TestShapeInputService12TestCaseOperation1Output, error) { + req, out := c.InputService12TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +const opInputService12TestCaseOperation2 = "OperationName" + +// InputService12TestCaseOperation2Request generates a "aws/request.Request" representing the +// client's request for the InputService12TestCaseOperation2 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the InputService12TestCaseOperation2 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the InputService12TestCaseOperation2Request method. +// req, resp := client.InputService12TestCaseOperation2Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *InputService12ProtocolTest) InputService12TestCaseOperation2Request(input *InputService12TestShapeInputShape) (req *request.Request, output *InputService12TestShapeInputService12TestCaseOperation2Output) { + op := &request.Operation{ + Name: opInputService12TestCaseOperation2, + } + + if input == nil { + input = &InputService12TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService12TestShapeInputService12TestCaseOperation2Output{} + req.Data = output + return +} + +func (c *InputService12ProtocolTest) InputService12TestCaseOperation2(input *InputService12TestShapeInputShape) (*InputService12TestShapeInputService12TestCaseOperation2Output, error) { + req, out := c.InputService12TestCaseOperation2Request(input) + err := req.Send() + return out, err +} + +const opInputService12TestCaseOperation3 = "OperationName" + +// InputService12TestCaseOperation3Request generates a "aws/request.Request" representing the +// client's request for the InputService12TestCaseOperation3 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the InputService12TestCaseOperation3 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the InputService12TestCaseOperation3Request method. +// req, resp := client.InputService12TestCaseOperation3Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *InputService12ProtocolTest) InputService12TestCaseOperation3Request(input *InputService12TestShapeInputShape) (req *request.Request, output *InputService12TestShapeInputService12TestCaseOperation3Output) { + op := &request.Operation{ + Name: opInputService12TestCaseOperation3, + } + + if input == nil { + input = &InputService12TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService12TestShapeInputService12TestCaseOperation3Output{} + req.Data = output + return +} + +func (c *InputService12ProtocolTest) InputService12TestCaseOperation3(input *InputService12TestShapeInputShape) (*InputService12TestShapeInputService12TestCaseOperation3Output, error) { + req, out := c.InputService12TestCaseOperation3Request(input) + err := req.Send() + return out, err +} + +const opInputService12TestCaseOperation4 = "OperationName" + +// InputService12TestCaseOperation4Request generates a "aws/request.Request" representing the +// client's request for the InputService12TestCaseOperation4 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the InputService12TestCaseOperation4 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the InputService12TestCaseOperation4Request method. +// req, resp := client.InputService12TestCaseOperation4Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *InputService12ProtocolTest) InputService12TestCaseOperation4Request(input *InputService12TestShapeInputShape) (req *request.Request, output *InputService12TestShapeInputService12TestCaseOperation4Output) { + op := &request.Operation{ + Name: opInputService12TestCaseOperation4, + } + + if input == nil { + input = &InputService12TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService12TestShapeInputService12TestCaseOperation4Output{} + req.Data = output + return +} + +func (c *InputService12ProtocolTest) InputService12TestCaseOperation4(input *InputService12TestShapeInputShape) (*InputService12TestShapeInputService12TestCaseOperation4Output, error) { + req, out := c.InputService12TestCaseOperation4Request(input) + err := req.Send() + return out, err +} + +const opInputService12TestCaseOperation5 = "OperationName" + +// InputService12TestCaseOperation5Request generates a "aws/request.Request" representing the +// client's request for the InputService12TestCaseOperation5 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the InputService12TestCaseOperation5 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the InputService12TestCaseOperation5Request method. +// req, resp := client.InputService12TestCaseOperation5Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *InputService12ProtocolTest) InputService12TestCaseOperation5Request(input *InputService12TestShapeInputShape) (req *request.Request, output *InputService12TestShapeInputService12TestCaseOperation5Output) { + op := &request.Operation{ + Name: opInputService12TestCaseOperation5, + } + + if input == nil { + input = &InputService12TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService12TestShapeInputService12TestCaseOperation5Output{} + req.Data = output + return +} + +func (c *InputService12ProtocolTest) InputService12TestCaseOperation5(input *InputService12TestShapeInputShape) (*InputService12TestShapeInputService12TestCaseOperation5Output, error) { + req, out := c.InputService12TestCaseOperation5Request(input) + err := req.Send() + return out, err +} + +const opInputService12TestCaseOperation6 = "OperationName" + +// InputService12TestCaseOperation6Request generates a "aws/request.Request" representing the +// client's request for the InputService12TestCaseOperation6 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the InputService12TestCaseOperation6 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the InputService12TestCaseOperation6Request method. +// req, resp := client.InputService12TestCaseOperation6Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *InputService12ProtocolTest) InputService12TestCaseOperation6Request(input *InputService12TestShapeInputShape) (req *request.Request, output *InputService12TestShapeInputService12TestCaseOperation6Output) { + op := &request.Operation{ + Name: opInputService12TestCaseOperation6, + } + + if input == nil { + input = &InputService12TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService12TestShapeInputService12TestCaseOperation6Output{} + req.Data = output + return +} + +func (c *InputService12ProtocolTest) InputService12TestCaseOperation6(input *InputService12TestShapeInputShape) (*InputService12TestShapeInputService12TestCaseOperation6Output, error) { + req, out := c.InputService12TestCaseOperation6Request(input) + err := req.Send() + return out, err +} + +type InputService12TestShapeInputService12TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +type InputService12TestShapeInputService12TestCaseOperation2Output struct { + _ struct{} `type:"structure"` +} + +type InputService12TestShapeInputService12TestCaseOperation3Output struct { + _ struct{} `type:"structure"` +} + +type InputService12TestShapeInputService12TestCaseOperation4Output struct { + _ struct{} `type:"structure"` +} + +type InputService12TestShapeInputService12TestCaseOperation5Output struct { + _ struct{} `type:"structure"` +} + +type InputService12TestShapeInputService12TestCaseOperation6Output struct { + _ struct{} `type:"structure"` +} + +type InputService12TestShapeInputShape struct { + _ struct{} `type:"structure"` + + RecursiveStruct *InputService12TestShapeRecursiveStructType `type:"structure"` +} + +type InputService12TestShapeRecursiveStructType struct { + _ struct{} `type:"structure"` + + NoRecurse *string `type:"string"` + + RecursiveList []*InputService12TestShapeRecursiveStructType `type:"list"` + + RecursiveMap map[string]*InputService12TestShapeRecursiveStructType `type:"map"` + + RecursiveStruct *InputService12TestShapeRecursiveStructType `type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService13ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService13ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService13ProtocolTest client from just a session. +// svc := inputservice13protocoltest.New(mySession) +// +// // Create a InputService13ProtocolTest client with additional configuration +// svc := inputservice13protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService13ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService13ProtocolTest { + c := p.ClientConfig("inputservice13protocoltest", cfgs...) + return newInputService13ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService13ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService13ProtocolTest { + svc := &InputService13ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice13protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(query.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(query.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(query.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(query.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a InputService13ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService13ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService13TestCaseOperation1 = "OperationName" + +// InputService13TestCaseOperation1Request generates a "aws/request.Request" representing the +// client's request for the InputService13TestCaseOperation1 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the InputService13TestCaseOperation1 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the InputService13TestCaseOperation1Request method. +// req, resp := client.InputService13TestCaseOperation1Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *InputService13ProtocolTest) InputService13TestCaseOperation1Request(input *InputService13TestShapeInputShape) (req *request.Request, output *InputService13TestShapeInputService13TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService13TestCaseOperation1, + HTTPMethod: "POST", + HTTPPath: "/path", + } + + if input == nil { + input = &InputService13TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService13TestShapeInputService13TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService13ProtocolTest) InputService13TestCaseOperation1(input *InputService13TestShapeInputShape) (*InputService13TestShapeInputService13TestCaseOperation1Output, error) { + req, out := c.InputService13TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +const opInputService13TestCaseOperation2 = "OperationName" + +// InputService13TestCaseOperation2Request generates a "aws/request.Request" representing the +// client's request for the InputService13TestCaseOperation2 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the InputService13TestCaseOperation2 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the InputService13TestCaseOperation2Request method. +// req, resp := client.InputService13TestCaseOperation2Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *InputService13ProtocolTest) InputService13TestCaseOperation2Request(input *InputService13TestShapeInputShape) (req *request.Request, output *InputService13TestShapeInputService13TestCaseOperation2Output) { + op := &request.Operation{ + Name: opInputService13TestCaseOperation2, + HTTPMethod: "POST", + HTTPPath: "/path", + } + + if input == nil { + input = &InputService13TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService13TestShapeInputService13TestCaseOperation2Output{} + req.Data = output + return +} + +func (c *InputService13ProtocolTest) InputService13TestCaseOperation2(input *InputService13TestShapeInputShape) (*InputService13TestShapeInputService13TestCaseOperation2Output, error) { + req, out := c.InputService13TestCaseOperation2Request(input) + err := req.Send() + return out, err +} + +type InputService13TestShapeInputService13TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +type InputService13TestShapeInputService13TestCaseOperation2Output struct { + _ struct{} `type:"structure"` +} + +type InputService13TestShapeInputShape struct { + _ struct{} `type:"structure"` + + Token *string `type:"string" idempotencyToken:"true"` +} + +// +// Tests begin here +// + +func TestInputService1ProtocolTestScalarMembersCase1(t *testing.T) { + sess := session.New() + svc := NewInputService1ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService1TestShapeInputShape{ + Bar: aws.String("val2"), + Foo: aws.String("val1"), + } + req, _ := svc.InputService1TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + query.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertQuery(t, `Action=OperationName&Bar=val2&Foo=val1&Version=2014-01-01`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService1ProtocolTestScalarMembersCase2(t *testing.T) { + sess := session.New() + svc := NewInputService1ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService1TestShapeInputShape{ + Baz: aws.Bool(true), + } + req, _ := svc.InputService1TestCaseOperation2Request(input) + r := req.HTTPRequest + + // build request + query.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertQuery(t, `Action=OperationName&Baz=true&Version=2014-01-01`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService1ProtocolTestScalarMembersCase3(t *testing.T) { + sess := session.New() + svc := NewInputService1ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService1TestShapeInputShape{ + Baz: aws.Bool(false), + } + req, _ := svc.InputService1TestCaseOperation3Request(input) + r := req.HTTPRequest + + // build request + query.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertQuery(t, `Action=OperationName&Baz=false&Version=2014-01-01`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService2ProtocolTestNestedStructureMembersCase1(t *testing.T) { + sess := session.New() + svc := NewInputService2ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService2TestShapeInputService2TestCaseOperation1Input{ + StructArg: &InputService2TestShapeStructType{ + ScalarArg: aws.String("foo"), + }, + } + req, _ := svc.InputService2TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + query.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertQuery(t, `Action=OperationName&StructArg.ScalarArg=foo&Version=2014-01-01`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService3ProtocolTestListTypesCase1(t *testing.T) { + sess := session.New() + svc := NewInputService3ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService3TestShapeInputShape{ + ListArg: []*string{ + aws.String("foo"), + aws.String("bar"), + aws.String("baz"), + }, + } + req, _ := svc.InputService3TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + query.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertQuery(t, `Action=OperationName&ListArg.member.1=foo&ListArg.member.2=bar&ListArg.member.3=baz&Version=2014-01-01`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService3ProtocolTestListTypesCase2(t *testing.T) { + sess := session.New() + svc := NewInputService3ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService3TestShapeInputShape{ + ListArg: []*string{}, + } + req, _ := svc.InputService3TestCaseOperation2Request(input) + r := req.HTTPRequest + + // build request + query.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertQuery(t, `Action=OperationName&ListArg=&Version=2014-01-01`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService4ProtocolTestFlattenedListCase1(t *testing.T) { + sess := session.New() + svc := NewInputService4ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService4TestShapeInputShape{ + ListArg: []*string{ + aws.String("a"), + aws.String("b"), + aws.String("c"), + }, + ScalarArg: aws.String("foo"), + } + req, _ := svc.InputService4TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + query.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertQuery(t, `Action=OperationName&ListArg.1=a&ListArg.2=b&ListArg.3=c&ScalarArg=foo&Version=2014-01-01`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService4ProtocolTestFlattenedListCase2(t *testing.T) { + sess := session.New() + svc := NewInputService4ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService4TestShapeInputShape{ + NamedListArg: []*string{ + aws.String("a"), + }, + } + req, _ := svc.InputService4TestCaseOperation2Request(input) + r := req.HTTPRequest + + // build request + query.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertQuery(t, `Action=OperationName&Foo.1=a&Version=2014-01-01`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService5ProtocolTestSerializeFlattenedMapTypeCase1(t *testing.T) { + sess := session.New() + svc := NewInputService5ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService5TestShapeInputService5TestCaseOperation1Input{ + MapArg: map[string]*string{ + "key1": aws.String("val1"), + "key2": aws.String("val2"), + }, + } + req, _ := svc.InputService5TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + query.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertQuery(t, `Action=OperationName&MapArg.1.key=key1&MapArg.1.value=val1&MapArg.2.key=key2&MapArg.2.value=val2&Version=2014-01-01`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService6ProtocolTestNonFlattenedListWithLocationNameCase1(t *testing.T) { + sess := session.New() + svc := NewInputService6ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService6TestShapeInputService6TestCaseOperation1Input{ + ListArg: []*string{ + aws.String("a"), + aws.String("b"), + aws.String("c"), + }, + } + req, _ := svc.InputService6TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + query.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertQuery(t, `Action=OperationName&ListArg.item.1=a&ListArg.item.2=b&ListArg.item.3=c&Version=2014-01-01`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService7ProtocolTestFlattenedListWithLocationNameCase1(t *testing.T) { + sess := session.New() + svc := NewInputService7ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService7TestShapeInputService7TestCaseOperation1Input{ + ListArg: []*string{ + aws.String("a"), + aws.String("b"), + aws.String("c"), + }, + ScalarArg: aws.String("foo"), + } + req, _ := svc.InputService7TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + query.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertQuery(t, `Action=OperationName&ListArgLocation.1=a&ListArgLocation.2=b&ListArgLocation.3=c&ScalarArg=foo&Version=2014-01-01`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService8ProtocolTestSerializeMapTypeCase1(t *testing.T) { + sess := session.New() + svc := NewInputService8ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService8TestShapeInputService8TestCaseOperation1Input{ + MapArg: map[string]*string{ + "key1": aws.String("val1"), + "key2": aws.String("val2"), + }, + } + req, _ := svc.InputService8TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + query.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertQuery(t, `Action=OperationName&MapArg.entry.1.key=key1&MapArg.entry.1.value=val1&MapArg.entry.2.key=key2&MapArg.entry.2.value=val2&Version=2014-01-01`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService9ProtocolTestSerializeMapTypeWithLocationNameCase1(t *testing.T) { + sess := session.New() + svc := NewInputService9ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService9TestShapeInputService9TestCaseOperation1Input{ + MapArg: map[string]*string{ + "key1": aws.String("val1"), + "key2": aws.String("val2"), + }, + } + req, _ := svc.InputService9TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + query.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertQuery(t, `Action=OperationName&MapArg.entry.1.TheKey=key1&MapArg.entry.1.TheValue=val1&MapArg.entry.2.TheKey=key2&MapArg.entry.2.TheValue=val2&Version=2014-01-01`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService10ProtocolTestBase64EncodedBlobsCase1(t *testing.T) { + sess := session.New() + svc := NewInputService10ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService10TestShapeInputService10TestCaseOperation1Input{ + BlobArg: []byte("foo"), + } + req, _ := svc.InputService10TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + query.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertQuery(t, `Action=OperationName&BlobArg=Zm9v&Version=2014-01-01`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService11ProtocolTestTimestampValuesCase1(t *testing.T) { + sess := session.New() + svc := NewInputService11ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService11TestShapeInputService11TestCaseOperation1Input{ + TimeArg: aws.Time(time.Unix(1422172800, 0)), + } + req, _ := svc.InputService11TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + query.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertQuery(t, `Action=OperationName&TimeArg=2015-01-25T08%3A00%3A00Z&Version=2014-01-01`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService12ProtocolTestRecursiveShapesCase1(t *testing.T) { + sess := session.New() + svc := NewInputService12ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService12TestShapeInputShape{ + RecursiveStruct: &InputService12TestShapeRecursiveStructType{ + NoRecurse: aws.String("foo"), + }, + } + req, _ := svc.InputService12TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + query.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertQuery(t, `Action=OperationName&RecursiveStruct.NoRecurse=foo&Version=2014-01-01`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService12ProtocolTestRecursiveShapesCase2(t *testing.T) { + sess := session.New() + svc := NewInputService12ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService12TestShapeInputShape{ + RecursiveStruct: &InputService12TestShapeRecursiveStructType{ + RecursiveStruct: &InputService12TestShapeRecursiveStructType{ + NoRecurse: aws.String("foo"), + }, + }, + } + req, _ := svc.InputService12TestCaseOperation2Request(input) + r := req.HTTPRequest + + // build request + query.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertQuery(t, `Action=OperationName&RecursiveStruct.RecursiveStruct.NoRecurse=foo&Version=2014-01-01`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService12ProtocolTestRecursiveShapesCase3(t *testing.T) { + sess := session.New() + svc := NewInputService12ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService12TestShapeInputShape{ + RecursiveStruct: &InputService12TestShapeRecursiveStructType{ + RecursiveStruct: &InputService12TestShapeRecursiveStructType{ + RecursiveStruct: &InputService12TestShapeRecursiveStructType{ + RecursiveStruct: &InputService12TestShapeRecursiveStructType{ + NoRecurse: aws.String("foo"), + }, + }, + }, + }, + } + req, _ := svc.InputService12TestCaseOperation3Request(input) + r := req.HTTPRequest + + // build request + query.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertQuery(t, `Action=OperationName&RecursiveStruct.RecursiveStruct.RecursiveStruct.RecursiveStruct.NoRecurse=foo&Version=2014-01-01`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService12ProtocolTestRecursiveShapesCase4(t *testing.T) { + sess := session.New() + svc := NewInputService12ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService12TestShapeInputShape{ + RecursiveStruct: &InputService12TestShapeRecursiveStructType{ + RecursiveList: []*InputService12TestShapeRecursiveStructType{ + { + NoRecurse: aws.String("foo"), + }, + { + NoRecurse: aws.String("bar"), + }, + }, + }, + } + req, _ := svc.InputService12TestCaseOperation4Request(input) + r := req.HTTPRequest + + // build request + query.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertQuery(t, `Action=OperationName&RecursiveStruct.RecursiveList.member.1.NoRecurse=foo&RecursiveStruct.RecursiveList.member.2.NoRecurse=bar&Version=2014-01-01`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService12ProtocolTestRecursiveShapesCase5(t *testing.T) { + sess := session.New() + svc := NewInputService12ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService12TestShapeInputShape{ + RecursiveStruct: &InputService12TestShapeRecursiveStructType{ + RecursiveList: []*InputService12TestShapeRecursiveStructType{ + { + NoRecurse: aws.String("foo"), + }, + { + RecursiveStruct: &InputService12TestShapeRecursiveStructType{ + NoRecurse: aws.String("bar"), + }, + }, + }, + }, + } + req, _ := svc.InputService12TestCaseOperation5Request(input) + r := req.HTTPRequest + + // build request + query.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertQuery(t, `Action=OperationName&RecursiveStruct.RecursiveList.member.1.NoRecurse=foo&RecursiveStruct.RecursiveList.member.2.RecursiveStruct.NoRecurse=bar&Version=2014-01-01`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService12ProtocolTestRecursiveShapesCase6(t *testing.T) { + sess := session.New() + svc := NewInputService12ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService12TestShapeInputShape{ + RecursiveStruct: &InputService12TestShapeRecursiveStructType{ + RecursiveMap: map[string]*InputService12TestShapeRecursiveStructType{ + "bar": { + NoRecurse: aws.String("bar"), + }, + "foo": { + NoRecurse: aws.String("foo"), + }, + }, + }, + } + req, _ := svc.InputService12TestCaseOperation6Request(input) + r := req.HTTPRequest + + // build request + query.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertQuery(t, `Action=OperationName&RecursiveStruct.RecursiveMap.entry.1.key=foo&RecursiveStruct.RecursiveMap.entry.1.value.NoRecurse=foo&RecursiveStruct.RecursiveMap.entry.2.key=bar&RecursiveStruct.RecursiveMap.entry.2.value.NoRecurse=bar&Version=2014-01-01`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService13ProtocolTestIdempotencyTokenAutoFillCase1(t *testing.T) { + sess := session.New() + svc := NewInputService13ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService13TestShapeInputShape{ + Token: aws.String("abc123"), + } + req, _ := svc.InputService13TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + query.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertQuery(t, `Token=abc123`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/path", r.URL.String()) + + // assert headers + +} + +func TestInputService13ProtocolTestIdempotencyTokenAutoFillCase2(t *testing.T) { + sess := session.New() + svc := NewInputService13ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService13TestShapeInputShape{} + req, _ := svc.InputService13TestCaseOperation2Request(input) + r := req.HTTPRequest + + // build request + query.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertQuery(t, `Token=00000000-0000-4000-8000-000000000000`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/path", r.URL.String()) + + // assert headers + +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go new file mode 100644 index 000000000..60ea0bd1e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go @@ -0,0 +1,230 @@ +package queryutil + +import ( + "encoding/base64" + "fmt" + "net/url" + "reflect" + "sort" + "strconv" + "strings" + "time" + + "github.com/aws/aws-sdk-go/private/protocol" +) + +// Parse parses an object i and fills a url.Values object. The isEC2 flag +// indicates if this is the EC2 Query sub-protocol. +func Parse(body url.Values, i interface{}, isEC2 bool) error { + q := queryParser{isEC2: isEC2} + return q.parseValue(body, reflect.ValueOf(i), "", "") +} + +func elemOf(value reflect.Value) reflect.Value { + for value.Kind() == reflect.Ptr { + value = value.Elem() + } + return value +} + +type queryParser struct { + isEC2 bool +} + +func (q *queryParser) parseValue(v url.Values, value reflect.Value, prefix string, tag reflect.StructTag) error { + value = elemOf(value) + + // no need to handle zero values + if !value.IsValid() { + return nil + } + + t := tag.Get("type") + if t == "" { + switch value.Kind() { + case reflect.Struct: + t = "structure" + case reflect.Slice: + t = "list" + case reflect.Map: + t = "map" + } + } + + switch t { + case "structure": + return q.parseStruct(v, value, prefix) + case "list": + return q.parseList(v, value, prefix, tag) + case "map": + return q.parseMap(v, value, prefix, tag) + default: + return q.parseScalar(v, value, prefix, tag) + } +} + +func (q *queryParser) parseStruct(v url.Values, value reflect.Value, prefix string) error { + if !value.IsValid() { + return nil + } + + t := value.Type() + for i := 0; i < value.NumField(); i++ { + elemValue := elemOf(value.Field(i)) + field := t.Field(i) + + if field.PkgPath != "" { + continue // ignore unexported fields + } + + if protocol.CanSetIdempotencyToken(value.Field(i), field) { + token := protocol.GetIdempotencyToken() + elemValue = reflect.ValueOf(token) + } + + var name string + if q.isEC2 { + name = field.Tag.Get("queryName") + } + if name == "" { + if field.Tag.Get("flattened") != "" && field.Tag.Get("locationNameList") != "" { + name = field.Tag.Get("locationNameList") + } else if locName := field.Tag.Get("locationName"); locName != "" { + name = locName + } + if name != "" && q.isEC2 { + name = strings.ToUpper(name[0:1]) + name[1:] + } + } + if name == "" { + name = field.Name + } + + if prefix != "" { + name = prefix + "." + name + } + + if err := q.parseValue(v, elemValue, name, field.Tag); err != nil { + return err + } + } + return nil +} + +func (q *queryParser) parseList(v url.Values, value reflect.Value, prefix string, tag reflect.StructTag) error { + // If it's empty, generate an empty value + if !value.IsNil() && value.Len() == 0 { + v.Set(prefix, "") + return nil + } + + // check for unflattened list member + if !q.isEC2 && tag.Get("flattened") == "" { + prefix += ".member" + } + + for i := 0; i < value.Len(); i++ { + slicePrefix := prefix + if slicePrefix == "" { + slicePrefix = strconv.Itoa(i + 1) + } else { + slicePrefix = slicePrefix + "." + strconv.Itoa(i+1) + } + if err := q.parseValue(v, value.Index(i), slicePrefix, ""); err != nil { + return err + } + } + return nil +} + +func (q *queryParser) parseMap(v url.Values, value reflect.Value, prefix string, tag reflect.StructTag) error { + // If it's empty, generate an empty value + if !value.IsNil() && value.Len() == 0 { + v.Set(prefix, "") + return nil + } + + // check for unflattened list member + if !q.isEC2 && tag.Get("flattened") == "" { + prefix += ".entry" + } + + // sort keys for improved serialization consistency. + // this is not strictly necessary for protocol support. + mapKeyValues := value.MapKeys() + mapKeys := map[string]reflect.Value{} + mapKeyNames := make([]string, len(mapKeyValues)) + for i, mapKey := range mapKeyValues { + name := mapKey.String() + mapKeys[name] = mapKey + mapKeyNames[i] = name + } + sort.Strings(mapKeyNames) + + for i, mapKeyName := range mapKeyNames { + mapKey := mapKeys[mapKeyName] + mapValue := value.MapIndex(mapKey) + + kname := tag.Get("locationNameKey") + if kname == "" { + kname = "key" + } + vname := tag.Get("locationNameValue") + if vname == "" { + vname = "value" + } + + // serialize key + var keyName string + if prefix == "" { + keyName = strconv.Itoa(i+1) + "." + kname + } else { + keyName = prefix + "." + strconv.Itoa(i+1) + "." + kname + } + + if err := q.parseValue(v, mapKey, keyName, ""); err != nil { + return err + } + + // serialize value + var valueName string + if prefix == "" { + valueName = strconv.Itoa(i+1) + "." + vname + } else { + valueName = prefix + "." + strconv.Itoa(i+1) + "." + vname + } + + if err := q.parseValue(v, mapValue, valueName, ""); err != nil { + return err + } + } + + return nil +} + +func (q *queryParser) parseScalar(v url.Values, r reflect.Value, name string, tag reflect.StructTag) error { + switch value := r.Interface().(type) { + case string: + v.Set(name, value) + case []byte: + if !r.IsNil() { + v.Set(name, base64.StdEncoding.EncodeToString(value)) + } + case bool: + v.Set(name, strconv.FormatBool(value)) + case int64: + v.Set(name, strconv.FormatInt(value, 10)) + case int: + v.Set(name, strconv.Itoa(value)) + case float64: + v.Set(name, strconv.FormatFloat(value, 'f', -1, 64)) + case float32: + v.Set(name, strconv.FormatFloat(float64(value), 'f', -1, 32)) + case time.Time: + const ISO8601UTC = "2006-01-02T15:04:05Z" + v.Set(name, value.UTC().Format(ISO8601UTC)) + default: + return fmt.Errorf("unsupported value for param %s: %v (%s)", name, r.Interface(), r.Type().Name()) + } + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go new file mode 100644 index 000000000..a3ea40955 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go @@ -0,0 +1,35 @@ +package query + +//go:generate go run ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/output/query.json unmarshal_test.go + +import ( + "encoding/xml" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil" +) + +// UnmarshalHandler is a named request handler for unmarshaling query protocol requests +var UnmarshalHandler = request.NamedHandler{Name: "awssdk.query.Unmarshal", Fn: Unmarshal} + +// UnmarshalMetaHandler is a named request handler for unmarshaling query protocol request metadata +var UnmarshalMetaHandler = request.NamedHandler{Name: "awssdk.query.UnmarshalMeta", Fn: UnmarshalMeta} + +// Unmarshal unmarshals a response for an AWS Query service. +func Unmarshal(r *request.Request) { + defer r.HTTPResponse.Body.Close() + if r.DataFilled() { + decoder := xml.NewDecoder(r.HTTPResponse.Body) + err := xmlutil.UnmarshalXML(r.Data, decoder, r.Operation.Name+"Result") + if err != nil { + r.Error = awserr.New("SerializationError", "failed decoding Query response", err) + return + } + } +} + +// UnmarshalMeta unmarshals header response values for an AWS Query service. +func UnmarshalMeta(r *request.Request) { + r.RequestID = r.HTTPResponse.Header.Get("X-Amzn-Requestid") +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go new file mode 100644 index 000000000..f21429617 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go @@ -0,0 +1,66 @@ +package query + +import ( + "encoding/xml" + "io/ioutil" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" +) + +type xmlErrorResponse struct { + XMLName xml.Name `xml:"ErrorResponse"` + Code string `xml:"Error>Code"` + Message string `xml:"Error>Message"` + RequestID string `xml:"RequestId"` +} + +type xmlServiceUnavailableResponse struct { + XMLName xml.Name `xml:"ServiceUnavailableException"` +} + +// UnmarshalErrorHandler is a name request handler to unmarshal request errors +var UnmarshalErrorHandler = request.NamedHandler{Name: "awssdk.query.UnmarshalError", Fn: UnmarshalError} + +// UnmarshalError unmarshals an error response for an AWS Query service. +func UnmarshalError(r *request.Request) { + defer r.HTTPResponse.Body.Close() + + bodyBytes, err := ioutil.ReadAll(r.HTTPResponse.Body) + if err != nil { + r.Error = awserr.New("SerializationError", "failed to read from query HTTP response body", err) + return + } + + // First check for specific error + resp := xmlErrorResponse{} + decodeErr := xml.Unmarshal(bodyBytes, &resp) + if decodeErr == nil { + reqID := resp.RequestID + if reqID == "" { + reqID = r.RequestID + } + r.Error = awserr.NewRequestFailure( + awserr.New(resp.Code, resp.Message, nil), + r.HTTPResponse.StatusCode, + reqID, + ) + return + } + + // Check for unhandled error + servUnavailResp := xmlServiceUnavailableResponse{} + unavailErr := xml.Unmarshal(bodyBytes, &servUnavailResp) + if unavailErr == nil { + r.Error = awserr.NewRequestFailure( + awserr.New("ServiceUnavailableException", "service is unavailable", nil), + r.HTTPResponse.StatusCode, + r.RequestID, + ) + return + } + + // Failed to retrieve any error message from the response body + r.Error = awserr.New("SerializationError", + "failed to decode query XML error response", decodeErr) +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_test.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_test.go new file mode 100644 index 000000000..27acf8766 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_test.go @@ -0,0 +1,2068 @@ +package query_test + +import ( + "bytes" + "encoding/json" + "encoding/xml" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "testing" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/awstesting" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/query" + "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil" + "github.com/aws/aws-sdk-go/private/util" + "github.com/stretchr/testify/assert" +) + +var _ bytes.Buffer // always import bytes +var _ http.Request +var _ json.Marshaler +var _ time.Time +var _ xmlutil.XMLNode +var _ xml.Attr +var _ = ioutil.Discard +var _ = util.Trim("") +var _ = url.Values{} +var _ = io.EOF +var _ = aws.String +var _ = fmt.Println + +func init() { + protocol.RandReader = &awstesting.ZeroReader{} +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService1ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService1ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService1ProtocolTest client from just a session. +// svc := outputservice1protocoltest.New(mySession) +// +// // Create a OutputService1ProtocolTest client with additional configuration +// svc := outputservice1protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService1ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService1ProtocolTest { + c := p.ClientConfig("outputservice1protocoltest", cfgs...) + return newOutputService1ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService1ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService1ProtocolTest { + svc := &OutputService1ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice1protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(query.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(query.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(query.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(query.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a OutputService1ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService1ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService1TestCaseOperation1 = "OperationName" + +// OutputService1TestCaseOperation1Request generates a "aws/request.Request" representing the +// client's request for the OutputService1TestCaseOperation1 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the OutputService1TestCaseOperation1 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the OutputService1TestCaseOperation1Request method. +// req, resp := client.OutputService1TestCaseOperation1Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *OutputService1ProtocolTest) OutputService1TestCaseOperation1Request(input *OutputService1TestShapeOutputService1TestCaseOperation1Input) (req *request.Request, output *OutputService1TestShapeOutputService1TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService1TestCaseOperation1, + } + + if input == nil { + input = &OutputService1TestShapeOutputService1TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService1TestShapeOutputService1TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService1ProtocolTest) OutputService1TestCaseOperation1(input *OutputService1TestShapeOutputService1TestCaseOperation1Input) (*OutputService1TestShapeOutputService1TestCaseOperation1Output, error) { + req, out := c.OutputService1TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService1TestShapeOutputService1TestCaseOperation1Input struct { + _ struct{} `type:"structure"` +} + +type OutputService1TestShapeOutputService1TestCaseOperation1Output struct { + _ struct{} `type:"structure"` + + Char *string `type:"character"` + + Double *float64 `type:"double"` + + FalseBool *bool `type:"boolean"` + + Float *float64 `type:"float"` + + Long *int64 `type:"long"` + + Num *int64 `locationName:"FooNum" type:"integer"` + + Str *string `type:"string"` + + Timestamp *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + TrueBool *bool `type:"boolean"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService2ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService2ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService2ProtocolTest client from just a session. +// svc := outputservice2protocoltest.New(mySession) +// +// // Create a OutputService2ProtocolTest client with additional configuration +// svc := outputservice2protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService2ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService2ProtocolTest { + c := p.ClientConfig("outputservice2protocoltest", cfgs...) + return newOutputService2ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService2ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService2ProtocolTest { + svc := &OutputService2ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice2protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(query.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(query.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(query.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(query.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a OutputService2ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService2ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService2TestCaseOperation1 = "OperationName" + +// OutputService2TestCaseOperation1Request generates a "aws/request.Request" representing the +// client's request for the OutputService2TestCaseOperation1 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the OutputService2TestCaseOperation1 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the OutputService2TestCaseOperation1Request method. +// req, resp := client.OutputService2TestCaseOperation1Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *OutputService2ProtocolTest) OutputService2TestCaseOperation1Request(input *OutputService2TestShapeOutputService2TestCaseOperation1Input) (req *request.Request, output *OutputService2TestShapeOutputService2TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService2TestCaseOperation1, + } + + if input == nil { + input = &OutputService2TestShapeOutputService2TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService2TestShapeOutputService2TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService2ProtocolTest) OutputService2TestCaseOperation1(input *OutputService2TestShapeOutputService2TestCaseOperation1Input) (*OutputService2TestShapeOutputService2TestCaseOperation1Output, error) { + req, out := c.OutputService2TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService2TestShapeOutputService2TestCaseOperation1Input struct { + _ struct{} `type:"structure"` +} + +type OutputService2TestShapeOutputService2TestCaseOperation1Output struct { + _ struct{} `type:"structure"` + + Num *int64 `type:"integer"` + + Str *string `type:"string"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService3ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService3ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService3ProtocolTest client from just a session. +// svc := outputservice3protocoltest.New(mySession) +// +// // Create a OutputService3ProtocolTest client with additional configuration +// svc := outputservice3protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService3ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService3ProtocolTest { + c := p.ClientConfig("outputservice3protocoltest", cfgs...) + return newOutputService3ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService3ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService3ProtocolTest { + svc := &OutputService3ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice3protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(query.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(query.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(query.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(query.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a OutputService3ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService3ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService3TestCaseOperation1 = "OperationName" + +// OutputService3TestCaseOperation1Request generates a "aws/request.Request" representing the +// client's request for the OutputService3TestCaseOperation1 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the OutputService3TestCaseOperation1 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the OutputService3TestCaseOperation1Request method. +// req, resp := client.OutputService3TestCaseOperation1Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *OutputService3ProtocolTest) OutputService3TestCaseOperation1Request(input *OutputService3TestShapeOutputService3TestCaseOperation1Input) (req *request.Request, output *OutputService3TestShapeOutputService3TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService3TestCaseOperation1, + } + + if input == nil { + input = &OutputService3TestShapeOutputService3TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService3TestShapeOutputService3TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService3ProtocolTest) OutputService3TestCaseOperation1(input *OutputService3TestShapeOutputService3TestCaseOperation1Input) (*OutputService3TestShapeOutputService3TestCaseOperation1Output, error) { + req, out := c.OutputService3TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService3TestShapeOutputService3TestCaseOperation1Input struct { + _ struct{} `type:"structure"` +} + +type OutputService3TestShapeOutputService3TestCaseOperation1Output struct { + _ struct{} `type:"structure"` + + // Blob is automatically base64 encoded/decoded by the SDK. + Blob []byte `type:"blob"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService4ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService4ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService4ProtocolTest client from just a session. +// svc := outputservice4protocoltest.New(mySession) +// +// // Create a OutputService4ProtocolTest client with additional configuration +// svc := outputservice4protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService4ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService4ProtocolTest { + c := p.ClientConfig("outputservice4protocoltest", cfgs...) + return newOutputService4ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService4ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService4ProtocolTest { + svc := &OutputService4ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice4protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(query.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(query.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(query.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(query.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a OutputService4ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService4ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService4TestCaseOperation1 = "OperationName" + +// OutputService4TestCaseOperation1Request generates a "aws/request.Request" representing the +// client's request for the OutputService4TestCaseOperation1 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the OutputService4TestCaseOperation1 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the OutputService4TestCaseOperation1Request method. +// req, resp := client.OutputService4TestCaseOperation1Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *OutputService4ProtocolTest) OutputService4TestCaseOperation1Request(input *OutputService4TestShapeOutputService4TestCaseOperation1Input) (req *request.Request, output *OutputService4TestShapeOutputService4TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService4TestCaseOperation1, + } + + if input == nil { + input = &OutputService4TestShapeOutputService4TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService4TestShapeOutputService4TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService4ProtocolTest) OutputService4TestCaseOperation1(input *OutputService4TestShapeOutputService4TestCaseOperation1Input) (*OutputService4TestShapeOutputService4TestCaseOperation1Output, error) { + req, out := c.OutputService4TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService4TestShapeOutputService4TestCaseOperation1Input struct { + _ struct{} `type:"structure"` +} + +type OutputService4TestShapeOutputService4TestCaseOperation1Output struct { + _ struct{} `type:"structure"` + + ListMember []*string `type:"list"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService5ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService5ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService5ProtocolTest client from just a session. +// svc := outputservice5protocoltest.New(mySession) +// +// // Create a OutputService5ProtocolTest client with additional configuration +// svc := outputservice5protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService5ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService5ProtocolTest { + c := p.ClientConfig("outputservice5protocoltest", cfgs...) + return newOutputService5ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService5ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService5ProtocolTest { + svc := &OutputService5ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice5protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(query.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(query.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(query.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(query.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a OutputService5ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService5ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService5TestCaseOperation1 = "OperationName" + +// OutputService5TestCaseOperation1Request generates a "aws/request.Request" representing the +// client's request for the OutputService5TestCaseOperation1 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the OutputService5TestCaseOperation1 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the OutputService5TestCaseOperation1Request method. +// req, resp := client.OutputService5TestCaseOperation1Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *OutputService5ProtocolTest) OutputService5TestCaseOperation1Request(input *OutputService5TestShapeOutputService5TestCaseOperation1Input) (req *request.Request, output *OutputService5TestShapeOutputService5TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService5TestCaseOperation1, + } + + if input == nil { + input = &OutputService5TestShapeOutputService5TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService5TestShapeOutputService5TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService5ProtocolTest) OutputService5TestCaseOperation1(input *OutputService5TestShapeOutputService5TestCaseOperation1Input) (*OutputService5TestShapeOutputService5TestCaseOperation1Output, error) { + req, out := c.OutputService5TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService5TestShapeOutputService5TestCaseOperation1Input struct { + _ struct{} `type:"structure"` +} + +type OutputService5TestShapeOutputService5TestCaseOperation1Output struct { + _ struct{} `type:"structure"` + + ListMember []*string `locationNameList:"item" type:"list"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService6ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService6ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService6ProtocolTest client from just a session. +// svc := outputservice6protocoltest.New(mySession) +// +// // Create a OutputService6ProtocolTest client with additional configuration +// svc := outputservice6protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService6ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService6ProtocolTest { + c := p.ClientConfig("outputservice6protocoltest", cfgs...) + return newOutputService6ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService6ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService6ProtocolTest { + svc := &OutputService6ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice6protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(query.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(query.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(query.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(query.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a OutputService6ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService6ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService6TestCaseOperation1 = "OperationName" + +// OutputService6TestCaseOperation1Request generates a "aws/request.Request" representing the +// client's request for the OutputService6TestCaseOperation1 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the OutputService6TestCaseOperation1 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the OutputService6TestCaseOperation1Request method. +// req, resp := client.OutputService6TestCaseOperation1Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *OutputService6ProtocolTest) OutputService6TestCaseOperation1Request(input *OutputService6TestShapeOutputService6TestCaseOperation1Input) (req *request.Request, output *OutputService6TestShapeOutputService6TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService6TestCaseOperation1, + } + + if input == nil { + input = &OutputService6TestShapeOutputService6TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService6TestShapeOutputService6TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService6ProtocolTest) OutputService6TestCaseOperation1(input *OutputService6TestShapeOutputService6TestCaseOperation1Input) (*OutputService6TestShapeOutputService6TestCaseOperation1Output, error) { + req, out := c.OutputService6TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService6TestShapeOutputService6TestCaseOperation1Input struct { + _ struct{} `type:"structure"` +} + +type OutputService6TestShapeOutputService6TestCaseOperation1Output struct { + _ struct{} `type:"structure"` + + ListMember []*string `type:"list" flattened:"true"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService7ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService7ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService7ProtocolTest client from just a session. +// svc := outputservice7protocoltest.New(mySession) +// +// // Create a OutputService7ProtocolTest client with additional configuration +// svc := outputservice7protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService7ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService7ProtocolTest { + c := p.ClientConfig("outputservice7protocoltest", cfgs...) + return newOutputService7ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService7ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService7ProtocolTest { + svc := &OutputService7ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice7protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(query.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(query.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(query.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(query.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a OutputService7ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService7ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService7TestCaseOperation1 = "OperationName" + +// OutputService7TestCaseOperation1Request generates a "aws/request.Request" representing the +// client's request for the OutputService7TestCaseOperation1 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the OutputService7TestCaseOperation1 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the OutputService7TestCaseOperation1Request method. +// req, resp := client.OutputService7TestCaseOperation1Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *OutputService7ProtocolTest) OutputService7TestCaseOperation1Request(input *OutputService7TestShapeOutputService7TestCaseOperation1Input) (req *request.Request, output *OutputService7TestShapeOutputService7TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService7TestCaseOperation1, + } + + if input == nil { + input = &OutputService7TestShapeOutputService7TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService7TestShapeOutputService7TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService7ProtocolTest) OutputService7TestCaseOperation1(input *OutputService7TestShapeOutputService7TestCaseOperation1Input) (*OutputService7TestShapeOutputService7TestCaseOperation1Output, error) { + req, out := c.OutputService7TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService7TestShapeOutputService7TestCaseOperation1Input struct { + _ struct{} `type:"structure"` +} + +type OutputService7TestShapeOutputService7TestCaseOperation1Output struct { + _ struct{} `type:"structure"` + + ListMember []*string `type:"list" flattened:"true"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService8ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService8ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService8ProtocolTest client from just a session. +// svc := outputservice8protocoltest.New(mySession) +// +// // Create a OutputService8ProtocolTest client with additional configuration +// svc := outputservice8protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService8ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService8ProtocolTest { + c := p.ClientConfig("outputservice8protocoltest", cfgs...) + return newOutputService8ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService8ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService8ProtocolTest { + svc := &OutputService8ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice8protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(query.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(query.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(query.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(query.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a OutputService8ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService8ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService8TestCaseOperation1 = "OperationName" + +// OutputService8TestCaseOperation1Request generates a "aws/request.Request" representing the +// client's request for the OutputService8TestCaseOperation1 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the OutputService8TestCaseOperation1 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the OutputService8TestCaseOperation1Request method. +// req, resp := client.OutputService8TestCaseOperation1Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *OutputService8ProtocolTest) OutputService8TestCaseOperation1Request(input *OutputService8TestShapeOutputService8TestCaseOperation1Input) (req *request.Request, output *OutputService8TestShapeOutputService8TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService8TestCaseOperation1, + } + + if input == nil { + input = &OutputService8TestShapeOutputService8TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService8TestShapeOutputService8TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService8ProtocolTest) OutputService8TestCaseOperation1(input *OutputService8TestShapeOutputService8TestCaseOperation1Input) (*OutputService8TestShapeOutputService8TestCaseOperation1Output, error) { + req, out := c.OutputService8TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService8TestShapeOutputService8TestCaseOperation1Input struct { + _ struct{} `type:"structure"` +} + +type OutputService8TestShapeOutputService8TestCaseOperation1Output struct { + _ struct{} `type:"structure"` + + List []*OutputService8TestShapeStructureShape `type:"list"` +} + +type OutputService8TestShapeStructureShape struct { + _ struct{} `type:"structure"` + + Bar *string `type:"string"` + + Baz *string `type:"string"` + + Foo *string `type:"string"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService9ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService9ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService9ProtocolTest client from just a session. +// svc := outputservice9protocoltest.New(mySession) +// +// // Create a OutputService9ProtocolTest client with additional configuration +// svc := outputservice9protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService9ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService9ProtocolTest { + c := p.ClientConfig("outputservice9protocoltest", cfgs...) + return newOutputService9ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService9ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService9ProtocolTest { + svc := &OutputService9ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice9protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(query.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(query.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(query.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(query.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a OutputService9ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService9ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService9TestCaseOperation1 = "OperationName" + +// OutputService9TestCaseOperation1Request generates a "aws/request.Request" representing the +// client's request for the OutputService9TestCaseOperation1 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the OutputService9TestCaseOperation1 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the OutputService9TestCaseOperation1Request method. +// req, resp := client.OutputService9TestCaseOperation1Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *OutputService9ProtocolTest) OutputService9TestCaseOperation1Request(input *OutputService9TestShapeOutputService9TestCaseOperation1Input) (req *request.Request, output *OutputService9TestShapeOutputService9TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService9TestCaseOperation1, + } + + if input == nil { + input = &OutputService9TestShapeOutputService9TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService9TestShapeOutputService9TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService9ProtocolTest) OutputService9TestCaseOperation1(input *OutputService9TestShapeOutputService9TestCaseOperation1Input) (*OutputService9TestShapeOutputService9TestCaseOperation1Output, error) { + req, out := c.OutputService9TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService9TestShapeOutputService9TestCaseOperation1Input struct { + _ struct{} `type:"structure"` +} + +type OutputService9TestShapeOutputService9TestCaseOperation1Output struct { + _ struct{} `type:"structure"` + + List []*OutputService9TestShapeStructureShape `type:"list" flattened:"true"` +} + +type OutputService9TestShapeStructureShape struct { + _ struct{} `type:"structure"` + + Bar *string `type:"string"` + + Baz *string `type:"string"` + + Foo *string `type:"string"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService10ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService10ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService10ProtocolTest client from just a session. +// svc := outputservice10protocoltest.New(mySession) +// +// // Create a OutputService10ProtocolTest client with additional configuration +// svc := outputservice10protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService10ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService10ProtocolTest { + c := p.ClientConfig("outputservice10protocoltest", cfgs...) + return newOutputService10ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService10ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService10ProtocolTest { + svc := &OutputService10ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice10protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(query.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(query.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(query.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(query.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a OutputService10ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService10ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService10TestCaseOperation1 = "OperationName" + +// OutputService10TestCaseOperation1Request generates a "aws/request.Request" representing the +// client's request for the OutputService10TestCaseOperation1 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the OutputService10TestCaseOperation1 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the OutputService10TestCaseOperation1Request method. +// req, resp := client.OutputService10TestCaseOperation1Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *OutputService10ProtocolTest) OutputService10TestCaseOperation1Request(input *OutputService10TestShapeOutputService10TestCaseOperation1Input) (req *request.Request, output *OutputService10TestShapeOutputService10TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService10TestCaseOperation1, + } + + if input == nil { + input = &OutputService10TestShapeOutputService10TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService10TestShapeOutputService10TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService10ProtocolTest) OutputService10TestCaseOperation1(input *OutputService10TestShapeOutputService10TestCaseOperation1Input) (*OutputService10TestShapeOutputService10TestCaseOperation1Output, error) { + req, out := c.OutputService10TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService10TestShapeOutputService10TestCaseOperation1Input struct { + _ struct{} `type:"structure"` +} + +type OutputService10TestShapeOutputService10TestCaseOperation1Output struct { + _ struct{} `type:"structure"` + + List []*string `locationNameList:"NamedList" type:"list" flattened:"true"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService11ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService11ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService11ProtocolTest client from just a session. +// svc := outputservice11protocoltest.New(mySession) +// +// // Create a OutputService11ProtocolTest client with additional configuration +// svc := outputservice11protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService11ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService11ProtocolTest { + c := p.ClientConfig("outputservice11protocoltest", cfgs...) + return newOutputService11ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService11ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService11ProtocolTest { + svc := &OutputService11ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice11protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(query.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(query.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(query.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(query.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a OutputService11ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService11ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService11TestCaseOperation1 = "OperationName" + +// OutputService11TestCaseOperation1Request generates a "aws/request.Request" representing the +// client's request for the OutputService11TestCaseOperation1 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the OutputService11TestCaseOperation1 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the OutputService11TestCaseOperation1Request method. +// req, resp := client.OutputService11TestCaseOperation1Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *OutputService11ProtocolTest) OutputService11TestCaseOperation1Request(input *OutputService11TestShapeOutputService11TestCaseOperation1Input) (req *request.Request, output *OutputService11TestShapeOutputService11TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService11TestCaseOperation1, + } + + if input == nil { + input = &OutputService11TestShapeOutputService11TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService11TestShapeOutputService11TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService11ProtocolTest) OutputService11TestCaseOperation1(input *OutputService11TestShapeOutputService11TestCaseOperation1Input) (*OutputService11TestShapeOutputService11TestCaseOperation1Output, error) { + req, out := c.OutputService11TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService11TestShapeOutputService11TestCaseOperation1Input struct { + _ struct{} `type:"structure"` +} + +type OutputService11TestShapeOutputService11TestCaseOperation1Output struct { + _ struct{} `type:"structure"` + + Map map[string]*OutputService11TestShapeStructType `type:"map"` +} + +type OutputService11TestShapeStructType struct { + _ struct{} `type:"structure"` + + Foo *string `locationName:"foo" type:"string"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService12ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService12ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService12ProtocolTest client from just a session. +// svc := outputservice12protocoltest.New(mySession) +// +// // Create a OutputService12ProtocolTest client with additional configuration +// svc := outputservice12protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService12ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService12ProtocolTest { + c := p.ClientConfig("outputservice12protocoltest", cfgs...) + return newOutputService12ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService12ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService12ProtocolTest { + svc := &OutputService12ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice12protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(query.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(query.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(query.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(query.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a OutputService12ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService12ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService12TestCaseOperation1 = "OperationName" + +// OutputService12TestCaseOperation1Request generates a "aws/request.Request" representing the +// client's request for the OutputService12TestCaseOperation1 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the OutputService12TestCaseOperation1 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the OutputService12TestCaseOperation1Request method. +// req, resp := client.OutputService12TestCaseOperation1Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *OutputService12ProtocolTest) OutputService12TestCaseOperation1Request(input *OutputService12TestShapeOutputService12TestCaseOperation1Input) (req *request.Request, output *OutputService12TestShapeOutputService12TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService12TestCaseOperation1, + } + + if input == nil { + input = &OutputService12TestShapeOutputService12TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService12TestShapeOutputService12TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService12ProtocolTest) OutputService12TestCaseOperation1(input *OutputService12TestShapeOutputService12TestCaseOperation1Input) (*OutputService12TestShapeOutputService12TestCaseOperation1Output, error) { + req, out := c.OutputService12TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService12TestShapeOutputService12TestCaseOperation1Input struct { + _ struct{} `type:"structure"` +} + +type OutputService12TestShapeOutputService12TestCaseOperation1Output struct { + _ struct{} `type:"structure"` + + Map map[string]*string `type:"map" flattened:"true"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService13ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService13ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService13ProtocolTest client from just a session. +// svc := outputservice13protocoltest.New(mySession) +// +// // Create a OutputService13ProtocolTest client with additional configuration +// svc := outputservice13protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService13ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService13ProtocolTest { + c := p.ClientConfig("outputservice13protocoltest", cfgs...) + return newOutputService13ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService13ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService13ProtocolTest { + svc := &OutputService13ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice13protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(query.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(query.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(query.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(query.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a OutputService13ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService13ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService13TestCaseOperation1 = "OperationName" + +// OutputService13TestCaseOperation1Request generates a "aws/request.Request" representing the +// client's request for the OutputService13TestCaseOperation1 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the OutputService13TestCaseOperation1 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the OutputService13TestCaseOperation1Request method. +// req, resp := client.OutputService13TestCaseOperation1Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *OutputService13ProtocolTest) OutputService13TestCaseOperation1Request(input *OutputService13TestShapeOutputService13TestCaseOperation1Input) (req *request.Request, output *OutputService13TestShapeOutputService13TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService13TestCaseOperation1, + } + + if input == nil { + input = &OutputService13TestShapeOutputService13TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService13TestShapeOutputService13TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService13ProtocolTest) OutputService13TestCaseOperation1(input *OutputService13TestShapeOutputService13TestCaseOperation1Input) (*OutputService13TestShapeOutputService13TestCaseOperation1Output, error) { + req, out := c.OutputService13TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService13TestShapeOutputService13TestCaseOperation1Input struct { + _ struct{} `type:"structure"` +} + +type OutputService13TestShapeOutputService13TestCaseOperation1Output struct { + _ struct{} `type:"structure"` + + Map map[string]*string `locationName:"Attribute" locationNameKey:"Name" locationNameValue:"Value" type:"map" flattened:"true"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService14ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService14ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService14ProtocolTest client from just a session. +// svc := outputservice14protocoltest.New(mySession) +// +// // Create a OutputService14ProtocolTest client with additional configuration +// svc := outputservice14protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService14ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService14ProtocolTest { + c := p.ClientConfig("outputservice14protocoltest", cfgs...) + return newOutputService14ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService14ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService14ProtocolTest { + svc := &OutputService14ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice14protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(query.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(query.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(query.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(query.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a OutputService14ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService14ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService14TestCaseOperation1 = "OperationName" + +// OutputService14TestCaseOperation1Request generates a "aws/request.Request" representing the +// client's request for the OutputService14TestCaseOperation1 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the OutputService14TestCaseOperation1 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the OutputService14TestCaseOperation1Request method. +// req, resp := client.OutputService14TestCaseOperation1Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *OutputService14ProtocolTest) OutputService14TestCaseOperation1Request(input *OutputService14TestShapeOutputService14TestCaseOperation1Input) (req *request.Request, output *OutputService14TestShapeOutputService14TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService14TestCaseOperation1, + } + + if input == nil { + input = &OutputService14TestShapeOutputService14TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService14TestShapeOutputService14TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService14ProtocolTest) OutputService14TestCaseOperation1(input *OutputService14TestShapeOutputService14TestCaseOperation1Input) (*OutputService14TestShapeOutputService14TestCaseOperation1Output, error) { + req, out := c.OutputService14TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService14TestShapeOutputService14TestCaseOperation1Input struct { + _ struct{} `type:"structure"` +} + +type OutputService14TestShapeOutputService14TestCaseOperation1Output struct { + _ struct{} `type:"structure"` + + Map map[string]*string `locationNameKey:"foo" locationNameValue:"bar" type:"map" flattened:"true"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService15ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService15ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService15ProtocolTest client from just a session. +// svc := outputservice15protocoltest.New(mySession) +// +// // Create a OutputService15ProtocolTest client with additional configuration +// svc := outputservice15protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService15ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService15ProtocolTest { + c := p.ClientConfig("outputservice15protocoltest", cfgs...) + return newOutputService15ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService15ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService15ProtocolTest { + svc := &OutputService15ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice15protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(query.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(query.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(query.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(query.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a OutputService15ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService15ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService15TestCaseOperation1 = "OperationName" + +// OutputService15TestCaseOperation1Request generates a "aws/request.Request" representing the +// client's request for the OutputService15TestCaseOperation1 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the OutputService15TestCaseOperation1 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the OutputService15TestCaseOperation1Request method. +// req, resp := client.OutputService15TestCaseOperation1Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *OutputService15ProtocolTest) OutputService15TestCaseOperation1Request(input *OutputService15TestShapeOutputService15TestCaseOperation1Input) (req *request.Request, output *OutputService15TestShapeOutputService15TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService15TestCaseOperation1, + } + + if input == nil { + input = &OutputService15TestShapeOutputService15TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService15TestShapeOutputService15TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService15ProtocolTest) OutputService15TestCaseOperation1(input *OutputService15TestShapeOutputService15TestCaseOperation1Input) (*OutputService15TestShapeOutputService15TestCaseOperation1Output, error) { + req, out := c.OutputService15TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService15TestShapeOutputService15TestCaseOperation1Input struct { + _ struct{} `type:"structure"` +} + +type OutputService15TestShapeOutputService15TestCaseOperation1Output struct { + _ struct{} `type:"structure"` + + Foo *string `type:"string"` +} + +// +// Tests begin here +// + +func TestOutputService1ProtocolTestScalarMembersCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService1ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("myname123falsetrue1.21.3200a2015-01-25T08:00:00Zrequest-id")) + req, out := svc.OutputService1TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + query.UnmarshalMeta(req) + query.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "a", *out.Char) + assert.Equal(t, 1.3, *out.Double) + assert.Equal(t, false, *out.FalseBool) + assert.Equal(t, 1.2, *out.Float) + assert.Equal(t, int64(200), *out.Long) + assert.Equal(t, int64(123), *out.Num) + assert.Equal(t, "myname", *out.Str) + assert.Equal(t, time.Unix(1.4221728e+09, 0).UTC().String(), out.Timestamp.String()) + assert.Equal(t, true, *out.TrueBool) + +} + +func TestOutputService2ProtocolTestNotAllMembersInResponseCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService2ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("mynamerequest-id")) + req, out := svc.OutputService2TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + query.UnmarshalMeta(req) + query.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "myname", *out.Str) + +} + +func TestOutputService3ProtocolTestBlobCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService3ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("dmFsdWU=requestid")) + req, out := svc.OutputService3TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + query.UnmarshalMeta(req) + query.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "value", string(out.Blob)) + +} + +func TestOutputService4ProtocolTestListsCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService4ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("abc123requestid")) + req, out := svc.OutputService4TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + query.UnmarshalMeta(req) + query.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "abc", *out.ListMember[0]) + assert.Equal(t, "123", *out.ListMember[1]) + +} + +func TestOutputService5ProtocolTestListWithCustomMemberNameCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService5ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("abc123requestid")) + req, out := svc.OutputService5TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + query.UnmarshalMeta(req) + query.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "abc", *out.ListMember[0]) + assert.Equal(t, "123", *out.ListMember[1]) + +} + +func TestOutputService6ProtocolTestFlattenedListCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService6ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("abc123requestid")) + req, out := svc.OutputService6TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + query.UnmarshalMeta(req) + query.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "abc", *out.ListMember[0]) + assert.Equal(t, "123", *out.ListMember[1]) + +} + +func TestOutputService7ProtocolTestFlattenedSingleElementListCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService7ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("abcrequestid")) + req, out := svc.OutputService7TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + query.UnmarshalMeta(req) + query.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "abc", *out.ListMember[0]) + +} + +func TestOutputService8ProtocolTestListOfStructuresCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService8ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("firstfoofirstbarfirstbazsecondfoosecondbarsecondbazrequestid")) + req, out := svc.OutputService8TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + query.UnmarshalMeta(req) + query.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "firstbar", *out.List[0].Bar) + assert.Equal(t, "firstbaz", *out.List[0].Baz) + assert.Equal(t, "firstfoo", *out.List[0].Foo) + assert.Equal(t, "secondbar", *out.List[1].Bar) + assert.Equal(t, "secondbaz", *out.List[1].Baz) + assert.Equal(t, "secondfoo", *out.List[1].Foo) + +} + +func TestOutputService9ProtocolTestFlattenedListOfStructuresCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService9ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("firstfoofirstbarfirstbazsecondfoosecondbarsecondbazrequestid")) + req, out := svc.OutputService9TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + query.UnmarshalMeta(req) + query.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "firstbar", *out.List[0].Bar) + assert.Equal(t, "firstbaz", *out.List[0].Baz) + assert.Equal(t, "firstfoo", *out.List[0].Foo) + assert.Equal(t, "secondbar", *out.List[1].Bar) + assert.Equal(t, "secondbaz", *out.List[1].Baz) + assert.Equal(t, "secondfoo", *out.List[1].Foo) + +} + +func TestOutputService10ProtocolTestFlattenedListWithLocationNameCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService10ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("abrequestid")) + req, out := svc.OutputService10TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + query.UnmarshalMeta(req) + query.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "a", *out.List[0]) + assert.Equal(t, "b", *out.List[1]) + +} + +func TestOutputService11ProtocolTestNormalMapCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService11ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("quxbarbazbamrequestid")) + req, out := svc.OutputService11TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + query.UnmarshalMeta(req) + query.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "bam", *out.Map["baz"].Foo) + assert.Equal(t, "bar", *out.Map["qux"].Foo) + +} + +func TestOutputService12ProtocolTestFlattenedMapCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService12ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("quxbarbazbamrequestid")) + req, out := svc.OutputService12TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + query.UnmarshalMeta(req) + query.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "bam", *out.Map["baz"]) + assert.Equal(t, "bar", *out.Map["qux"]) + +} + +func TestOutputService13ProtocolTestFlattenedMapInShapeDefinitionCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService13ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("quxbarrequestid")) + req, out := svc.OutputService13TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + query.UnmarshalMeta(req) + query.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "bar", *out.Map["qux"]) + +} + +func TestOutputService14ProtocolTestNamedMapCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService14ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("quxbarbazbamrequestid")) + req, out := svc.OutputService14TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + query.UnmarshalMeta(req) + query.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "bam", *out.Map["baz"]) + assert.Equal(t, "bar", *out.Map["qux"]) + +} + +func TestOutputService15ProtocolTestEmptyStringCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService15ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("requestid")) + req, out := svc.OutputService15TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + query.UnmarshalMeta(req) + query.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "", *out.Foo) + +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go new file mode 100644 index 000000000..5f412516d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go @@ -0,0 +1,256 @@ +// Package rest provides RESTful serialization of AWS requests and responses. +package rest + +import ( + "bytes" + "encoding/base64" + "fmt" + "io" + "net/http" + "net/url" + "path" + "reflect" + "strconv" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" +) + +// RFC822 returns an RFC822 formatted timestamp for AWS protocols +const RFC822 = "Mon, 2 Jan 2006 15:04:05 GMT" + +// Whether the byte value can be sent without escaping in AWS URLs +var noEscape [256]bool + +var errValueNotSet = fmt.Errorf("value not set") + +func init() { + for i := 0; i < len(noEscape); i++ { + // AWS expects every character except these to be escaped + noEscape[i] = (i >= 'A' && i <= 'Z') || + (i >= 'a' && i <= 'z') || + (i >= '0' && i <= '9') || + i == '-' || + i == '.' || + i == '_' || + i == '~' + } +} + +// BuildHandler is a named request handler for building rest protocol requests +var BuildHandler = request.NamedHandler{Name: "awssdk.rest.Build", Fn: Build} + +// Build builds the REST component of a service request. +func Build(r *request.Request) { + if r.ParamsFilled() { + v := reflect.ValueOf(r.Params).Elem() + buildLocationElements(r, v) + buildBody(r, v) + } +} + +func buildLocationElements(r *request.Request, v reflect.Value) { + query := r.HTTPRequest.URL.Query() + + for i := 0; i < v.NumField(); i++ { + m := v.Field(i) + if n := v.Type().Field(i).Name; n[0:1] == strings.ToLower(n[0:1]) { + continue + } + + if m.IsValid() { + field := v.Type().Field(i) + name := field.Tag.Get("locationName") + if name == "" { + name = field.Name + } + if m.Kind() == reflect.Ptr { + m = m.Elem() + } + if !m.IsValid() { + continue + } + + var err error + switch field.Tag.Get("location") { + case "headers": // header maps + err = buildHeaderMap(&r.HTTPRequest.Header, m, field.Tag.Get("locationName")) + case "header": + err = buildHeader(&r.HTTPRequest.Header, m, name) + case "uri": + err = buildURI(r.HTTPRequest.URL, m, name) + case "querystring": + err = buildQueryString(query, m, name) + } + r.Error = err + } + if r.Error != nil { + return + } + } + + r.HTTPRequest.URL.RawQuery = query.Encode() + updatePath(r.HTTPRequest.URL, r.HTTPRequest.URL.Path) +} + +func buildBody(r *request.Request, v reflect.Value) { + if field, ok := v.Type().FieldByName("_"); ok { + if payloadName := field.Tag.Get("payload"); payloadName != "" { + pfield, _ := v.Type().FieldByName(payloadName) + if ptag := pfield.Tag.Get("type"); ptag != "" && ptag != "structure" { + payload := reflect.Indirect(v.FieldByName(payloadName)) + if payload.IsValid() && payload.Interface() != nil { + switch reader := payload.Interface().(type) { + case io.ReadSeeker: + r.SetReaderBody(reader) + case []byte: + r.SetBufferBody(reader) + case string: + r.SetStringBody(reader) + default: + r.Error = awserr.New("SerializationError", + "failed to encode REST request", + fmt.Errorf("unknown payload type %s", payload.Type())) + } + } + } + } + } +} + +func buildHeader(header *http.Header, v reflect.Value, name string) error { + str, err := convertType(v) + if err == errValueNotSet { + return nil + } else if err != nil { + return awserr.New("SerializationError", "failed to encode REST request", err) + } + + header.Add(name, str) + + return nil +} + +func buildHeaderMap(header *http.Header, v reflect.Value, prefix string) error { + for _, key := range v.MapKeys() { + str, err := convertType(v.MapIndex(key)) + if err == errValueNotSet { + continue + } else if err != nil { + return awserr.New("SerializationError", "failed to encode REST request", err) + + } + + header.Add(prefix+key.String(), str) + } + return nil +} + +func buildURI(u *url.URL, v reflect.Value, name string) error { + value, err := convertType(v) + if err == errValueNotSet { + return nil + } else if err != nil { + return awserr.New("SerializationError", "failed to encode REST request", err) + } + + uri := u.Path + uri = strings.Replace(uri, "{"+name+"}", EscapePath(value, true), -1) + uri = strings.Replace(uri, "{"+name+"+}", EscapePath(value, false), -1) + u.Path = uri + + return nil +} + +func buildQueryString(query url.Values, v reflect.Value, name string) error { + switch value := v.Interface().(type) { + case []*string: + for _, item := range value { + query.Add(name, *item) + } + case map[string]*string: + for key, item := range value { + query.Add(key, *item) + } + case map[string][]*string: + for key, items := range value { + for _, item := range items { + query.Add(key, *item) + } + } + default: + str, err := convertType(v) + if err == errValueNotSet { + return nil + } else if err != nil { + return awserr.New("SerializationError", "failed to encode REST request", err) + } + query.Set(name, str) + } + + return nil +} + +func updatePath(url *url.URL, urlPath string) { + scheme, query := url.Scheme, url.RawQuery + + hasSlash := strings.HasSuffix(urlPath, "/") + + // clean up path + urlPath = path.Clean(urlPath) + if hasSlash && !strings.HasSuffix(urlPath, "/") { + urlPath += "/" + } + + // get formatted URL minus scheme so we can build this into Opaque + url.Scheme, url.Path, url.RawQuery = "", "", "" + s := url.String() + url.Scheme = scheme + url.RawQuery = query + + // build opaque URI + url.Opaque = s + urlPath +} + +// EscapePath escapes part of a URL path in Amazon style +func EscapePath(path string, encodeSep bool) string { + var buf bytes.Buffer + for i := 0; i < len(path); i++ { + c := path[i] + if noEscape[c] || (c == '/' && !encodeSep) { + buf.WriteByte(c) + } else { + fmt.Fprintf(&buf, "%%%02X", c) + } + } + return buf.String() +} + +func convertType(v reflect.Value) (string, error) { + v = reflect.Indirect(v) + if !v.IsValid() { + return "", errValueNotSet + } + + var str string + switch value := v.Interface().(type) { + case string: + str = value + case []byte: + str = base64.StdEncoding.EncodeToString(value) + case bool: + str = strconv.FormatBool(value) + case int64: + str = strconv.FormatInt(value, 10) + case float64: + str = strconv.FormatFloat(value, 'f', -1, 64) + case time.Time: + str = value.UTC().Format(RFC822) + default: + err := fmt.Errorf("Unsupported value for param %v (%s)", v.Interface(), v.Type()) + return "", err + } + return str, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/payload.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/payload.go new file mode 100644 index 000000000..4366de2e1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/payload.go @@ -0,0 +1,45 @@ +package rest + +import "reflect" + +// PayloadMember returns the payload field member of i if there is one, or nil. +func PayloadMember(i interface{}) interface{} { + if i == nil { + return nil + } + + v := reflect.ValueOf(i).Elem() + if !v.IsValid() { + return nil + } + if field, ok := v.Type().FieldByName("_"); ok { + if payloadName := field.Tag.Get("payload"); payloadName != "" { + field, _ := v.Type().FieldByName(payloadName) + if field.Tag.Get("type") != "structure" { + return nil + } + + payload := v.FieldByName(payloadName) + if payload.IsValid() || (payload.Kind() == reflect.Ptr && !payload.IsNil()) { + return payload.Interface() + } + } + } + return nil +} + +// PayloadType returns the type of a payload field member of i if there is one, or "". +func PayloadType(i interface{}) string { + v := reflect.Indirect(reflect.ValueOf(i)) + if !v.IsValid() { + return "" + } + if field, ok := v.Type().FieldByName("_"); ok { + if payloadName := field.Tag.Get("payload"); payloadName != "" { + if member, ok := v.Type().FieldByName(payloadName); ok { + return member.Tag.Get("type") + } + } + } + return "" +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go new file mode 100644 index 000000000..2cba1d9aa --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go @@ -0,0 +1,198 @@ +package rest + +import ( + "encoding/base64" + "fmt" + "io" + "io/ioutil" + "net/http" + "reflect" + "strconv" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" +) + +// UnmarshalHandler is a named request handler for unmarshaling rest protocol requests +var UnmarshalHandler = request.NamedHandler{Name: "awssdk.rest.Unmarshal", Fn: Unmarshal} + +// UnmarshalMetaHandler is a named request handler for unmarshaling rest protocol request metadata +var UnmarshalMetaHandler = request.NamedHandler{Name: "awssdk.rest.UnmarshalMeta", Fn: UnmarshalMeta} + +// Unmarshal unmarshals the REST component of a response in a REST service. +func Unmarshal(r *request.Request) { + if r.DataFilled() { + v := reflect.Indirect(reflect.ValueOf(r.Data)) + unmarshalBody(r, v) + } +} + +// UnmarshalMeta unmarshals the REST metadata of a response in a REST service +func UnmarshalMeta(r *request.Request) { + r.RequestID = r.HTTPResponse.Header.Get("X-Amzn-Requestid") + if r.RequestID == "" { + // Alternative version of request id in the header + r.RequestID = r.HTTPResponse.Header.Get("X-Amz-Request-Id") + } + if r.DataFilled() { + v := reflect.Indirect(reflect.ValueOf(r.Data)) + unmarshalLocationElements(r, v) + } +} + +func unmarshalBody(r *request.Request, v reflect.Value) { + if field, ok := v.Type().FieldByName("_"); ok { + if payloadName := field.Tag.Get("payload"); payloadName != "" { + pfield, _ := v.Type().FieldByName(payloadName) + if ptag := pfield.Tag.Get("type"); ptag != "" && ptag != "structure" { + payload := v.FieldByName(payloadName) + if payload.IsValid() { + switch payload.Interface().(type) { + case []byte: + defer r.HTTPResponse.Body.Close() + b, err := ioutil.ReadAll(r.HTTPResponse.Body) + if err != nil { + r.Error = awserr.New("SerializationError", "failed to decode REST response", err) + } else { + payload.Set(reflect.ValueOf(b)) + } + case *string: + defer r.HTTPResponse.Body.Close() + b, err := ioutil.ReadAll(r.HTTPResponse.Body) + if err != nil { + r.Error = awserr.New("SerializationError", "failed to decode REST response", err) + } else { + str := string(b) + payload.Set(reflect.ValueOf(&str)) + } + default: + switch payload.Type().String() { + case "io.ReadSeeker": + payload.Set(reflect.ValueOf(aws.ReadSeekCloser(r.HTTPResponse.Body))) + case "aws.ReadSeekCloser", "io.ReadCloser": + payload.Set(reflect.ValueOf(r.HTTPResponse.Body)) + default: + io.Copy(ioutil.Discard, r.HTTPResponse.Body) + defer r.HTTPResponse.Body.Close() + r.Error = awserr.New("SerializationError", + "failed to decode REST response", + fmt.Errorf("unknown payload type %s", payload.Type())) + } + } + } + } + } + } +} + +func unmarshalLocationElements(r *request.Request, v reflect.Value) { + for i := 0; i < v.NumField(); i++ { + m, field := v.Field(i), v.Type().Field(i) + if n := field.Name; n[0:1] == strings.ToLower(n[0:1]) { + continue + } + + if m.IsValid() { + name := field.Tag.Get("locationName") + if name == "" { + name = field.Name + } + + switch field.Tag.Get("location") { + case "statusCode": + unmarshalStatusCode(m, r.HTTPResponse.StatusCode) + case "header": + err := unmarshalHeader(m, r.HTTPResponse.Header.Get(name)) + if err != nil { + r.Error = awserr.New("SerializationError", "failed to decode REST response", err) + break + } + case "headers": + prefix := field.Tag.Get("locationName") + err := unmarshalHeaderMap(m, r.HTTPResponse.Header, prefix) + if err != nil { + r.Error = awserr.New("SerializationError", "failed to decode REST response", err) + break + } + } + } + if r.Error != nil { + return + } + } +} + +func unmarshalStatusCode(v reflect.Value, statusCode int) { + if !v.IsValid() { + return + } + + switch v.Interface().(type) { + case *int64: + s := int64(statusCode) + v.Set(reflect.ValueOf(&s)) + } +} + +func unmarshalHeaderMap(r reflect.Value, headers http.Header, prefix string) error { + switch r.Interface().(type) { + case map[string]*string: // we only support string map value types + out := map[string]*string{} + for k, v := range headers { + k = http.CanonicalHeaderKey(k) + if strings.HasPrefix(strings.ToLower(k), strings.ToLower(prefix)) { + out[k[len(prefix):]] = &v[0] + } + } + r.Set(reflect.ValueOf(out)) + } + return nil +} + +func unmarshalHeader(v reflect.Value, header string) error { + if !v.IsValid() || (header == "" && v.Elem().Kind() != reflect.String) { + return nil + } + + switch v.Interface().(type) { + case *string: + v.Set(reflect.ValueOf(&header)) + case []byte: + b, err := base64.StdEncoding.DecodeString(header) + if err != nil { + return err + } + v.Set(reflect.ValueOf(&b)) + case *bool: + b, err := strconv.ParseBool(header) + if err != nil { + return err + } + v.Set(reflect.ValueOf(&b)) + case *int64: + i, err := strconv.ParseInt(header, 10, 64) + if err != nil { + return err + } + v.Set(reflect.ValueOf(&i)) + case *float64: + f, err := strconv.ParseFloat(header, 64) + if err != nil { + return err + } + v.Set(reflect.ValueOf(&f)) + case *time.Time: + t, err := time.Parse(RFC822, header) + if err != nil { + return err + } + v.Set(reflect.ValueOf(&t)) + default: + err := fmt.Errorf("Unsupported value for param %v (%s)", v.Interface(), v.Type()) + return err + } + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/build_bench_test.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/build_bench_test.go new file mode 100644 index 000000000..31e1d6c04 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/build_bench_test.go @@ -0,0 +1,356 @@ +// +build bench + +package restjson_test + +import ( + "bytes" + "encoding/json" + "testing" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/awstesting" + "github.com/aws/aws-sdk-go/private/protocol/rest" + "github.com/aws/aws-sdk-go/private/protocol/restjson" + "github.com/aws/aws-sdk-go/service/elastictranscoder" +) + +func BenchmarkRESTJSONBuild_Complex_elastictranscoderCreateJobInput(b *testing.B) { + svc := awstesting.NewClient() + svc.ServiceName = "elastictranscoder" + svc.APIVersion = "2012-09-25" + + for i := 0; i < b.N; i++ { + r := svc.NewRequest(&request.Operation{Name: "CreateJobInput"}, restjsonBuildParms, nil) + restjson.Build(r) + if r.Error != nil { + b.Fatal("Unexpected error", r.Error) + } + } +} + +func BenchmarkRESTBuild_Complex_elastictranscoderCreateJobInput(b *testing.B) { + svc := awstesting.NewClient() + svc.ServiceName = "elastictranscoder" + svc.APIVersion = "2012-09-25" + + for i := 0; i < b.N; i++ { + r := svc.NewRequest(&request.Operation{Name: "CreateJobInput"}, restjsonBuildParms, nil) + rest.Build(r) + if r.Error != nil { + b.Fatal("Unexpected error", r.Error) + } + } +} + +func BenchmarkEncodingJSONMarshal_Complex_elastictranscoderCreateJobInput(b *testing.B) { + params := restjsonBuildParms + + for i := 0; i < b.N; i++ { + buf := &bytes.Buffer{} + encoder := json.NewEncoder(buf) + if err := encoder.Encode(params); err != nil { + b.Fatal("Unexpected error", err) + } + } +} + +func BenchmarkRESTJSONBuild_Simple_elastictranscoderListJobsByPipeline(b *testing.B) { + svc := awstesting.NewClient() + svc.ServiceName = "elastictranscoder" + svc.APIVersion = "2012-09-25" + + params := &elastictranscoder.ListJobsByPipelineInput{ + PipelineId: aws.String("Id"), // Required + Ascending: aws.String("Ascending"), + PageToken: aws.String("Id"), + } + + for i := 0; i < b.N; i++ { + r := svc.NewRequest(&request.Operation{Name: "ListJobsByPipeline"}, params, nil) + restjson.Build(r) + if r.Error != nil { + b.Fatal("Unexpected error", r.Error) + } + } +} + +func BenchmarkRESTBuild_Simple_elastictranscoderListJobsByPipeline(b *testing.B) { + svc := awstesting.NewClient() + svc.ServiceName = "elastictranscoder" + svc.APIVersion = "2012-09-25" + + params := &elastictranscoder.ListJobsByPipelineInput{ + PipelineId: aws.String("Id"), // Required + Ascending: aws.String("Ascending"), + PageToken: aws.String("Id"), + } + + for i := 0; i < b.N; i++ { + r := svc.NewRequest(&request.Operation{Name: "ListJobsByPipeline"}, params, nil) + rest.Build(r) + if r.Error != nil { + b.Fatal("Unexpected error", r.Error) + } + } +} + +func BenchmarkEncodingJSONMarshal_Simple_elastictranscoderListJobsByPipeline(b *testing.B) { + params := &elastictranscoder.ListJobsByPipelineInput{ + PipelineId: aws.String("Id"), // Required + Ascending: aws.String("Ascending"), + PageToken: aws.String("Id"), + } + + for i := 0; i < b.N; i++ { + buf := &bytes.Buffer{} + encoder := json.NewEncoder(buf) + if err := encoder.Encode(params); err != nil { + b.Fatal("Unexpected error", err) + } + } +} + +var restjsonBuildParms = &elastictranscoder.CreateJobInput{ + Input: &elastictranscoder.JobInput{ // Required + AspectRatio: aws.String("AspectRatio"), + Container: aws.String("JobContainer"), + DetectedProperties: &elastictranscoder.DetectedProperties{ + DurationMillis: aws.Int64(1), + FileSize: aws.Int64(1), + FrameRate: aws.String("FloatString"), + Height: aws.Int64(1), + Width: aws.Int64(1), + }, + Encryption: &elastictranscoder.Encryption{ + InitializationVector: aws.String("ZeroTo255String"), + Key: aws.String("Base64EncodedString"), + KeyMd5: aws.String("Base64EncodedString"), + Mode: aws.String("EncryptionMode"), + }, + FrameRate: aws.String("FrameRate"), + Interlaced: aws.String("Interlaced"), + Key: aws.String("Key"), + Resolution: aws.String("Resolution"), + }, + PipelineId: aws.String("Id"), // Required + Output: &elastictranscoder.CreateJobOutput{ + AlbumArt: &elastictranscoder.JobAlbumArt{ + Artwork: []*elastictranscoder.Artwork{ + { // Required + AlbumArtFormat: aws.String("JpgOrPng"), + Encryption: &elastictranscoder.Encryption{ + InitializationVector: aws.String("ZeroTo255String"), + Key: aws.String("Base64EncodedString"), + KeyMd5: aws.String("Base64EncodedString"), + Mode: aws.String("EncryptionMode"), + }, + InputKey: aws.String("WatermarkKey"), + MaxHeight: aws.String("DigitsOrAuto"), + MaxWidth: aws.String("DigitsOrAuto"), + PaddingPolicy: aws.String("PaddingPolicy"), + SizingPolicy: aws.String("SizingPolicy"), + }, + // More values... + }, + MergePolicy: aws.String("MergePolicy"), + }, + Captions: &elastictranscoder.Captions{ + CaptionFormats: []*elastictranscoder.CaptionFormat{ + { // Required + Encryption: &elastictranscoder.Encryption{ + InitializationVector: aws.String("ZeroTo255String"), + Key: aws.String("Base64EncodedString"), + KeyMd5: aws.String("Base64EncodedString"), + Mode: aws.String("EncryptionMode"), + }, + Format: aws.String("CaptionFormatFormat"), + Pattern: aws.String("CaptionFormatPattern"), + }, + // More values... + }, + CaptionSources: []*elastictranscoder.CaptionSource{ + { // Required + Encryption: &elastictranscoder.Encryption{ + InitializationVector: aws.String("ZeroTo255String"), + Key: aws.String("Base64EncodedString"), + KeyMd5: aws.String("Base64EncodedString"), + Mode: aws.String("EncryptionMode"), + }, + Key: aws.String("Key"), + Label: aws.String("Name"), + Language: aws.String("Key"), + TimeOffset: aws.String("TimeOffset"), + }, + // More values... + }, + MergePolicy: aws.String("CaptionMergePolicy"), + }, + Composition: []*elastictranscoder.Clip{ + { // Required + TimeSpan: &elastictranscoder.TimeSpan{ + Duration: aws.String("Time"), + StartTime: aws.String("Time"), + }, + }, + // More values... + }, + Encryption: &elastictranscoder.Encryption{ + InitializationVector: aws.String("ZeroTo255String"), + Key: aws.String("Base64EncodedString"), + KeyMd5: aws.String("Base64EncodedString"), + Mode: aws.String("EncryptionMode"), + }, + Key: aws.String("Key"), + PresetId: aws.String("Id"), + Rotate: aws.String("Rotate"), + SegmentDuration: aws.String("FloatString"), + ThumbnailEncryption: &elastictranscoder.Encryption{ + InitializationVector: aws.String("ZeroTo255String"), + Key: aws.String("Base64EncodedString"), + KeyMd5: aws.String("Base64EncodedString"), + Mode: aws.String("EncryptionMode"), + }, + ThumbnailPattern: aws.String("ThumbnailPattern"), + Watermarks: []*elastictranscoder.JobWatermark{ + { // Required + Encryption: &elastictranscoder.Encryption{ + InitializationVector: aws.String("ZeroTo255String"), + Key: aws.String("Base64EncodedString"), + KeyMd5: aws.String("Base64EncodedString"), + Mode: aws.String("EncryptionMode"), + }, + InputKey: aws.String("WatermarkKey"), + PresetWatermarkId: aws.String("PresetWatermarkId"), + }, + // More values... + }, + }, + OutputKeyPrefix: aws.String("Key"), + Outputs: []*elastictranscoder.CreateJobOutput{ + { // Required + AlbumArt: &elastictranscoder.JobAlbumArt{ + Artwork: []*elastictranscoder.Artwork{ + { // Required + AlbumArtFormat: aws.String("JpgOrPng"), + Encryption: &elastictranscoder.Encryption{ + InitializationVector: aws.String("ZeroTo255String"), + Key: aws.String("Base64EncodedString"), + KeyMd5: aws.String("Base64EncodedString"), + Mode: aws.String("EncryptionMode"), + }, + InputKey: aws.String("WatermarkKey"), + MaxHeight: aws.String("DigitsOrAuto"), + MaxWidth: aws.String("DigitsOrAuto"), + PaddingPolicy: aws.String("PaddingPolicy"), + SizingPolicy: aws.String("SizingPolicy"), + }, + // More values... + }, + MergePolicy: aws.String("MergePolicy"), + }, + Captions: &elastictranscoder.Captions{ + CaptionFormats: []*elastictranscoder.CaptionFormat{ + { // Required + Encryption: &elastictranscoder.Encryption{ + InitializationVector: aws.String("ZeroTo255String"), + Key: aws.String("Base64EncodedString"), + KeyMd5: aws.String("Base64EncodedString"), + Mode: aws.String("EncryptionMode"), + }, + Format: aws.String("CaptionFormatFormat"), + Pattern: aws.String("CaptionFormatPattern"), + }, + // More values... + }, + CaptionSources: []*elastictranscoder.CaptionSource{ + { // Required + Encryption: &elastictranscoder.Encryption{ + InitializationVector: aws.String("ZeroTo255String"), + Key: aws.String("Base64EncodedString"), + KeyMd5: aws.String("Base64EncodedString"), + Mode: aws.String("EncryptionMode"), + }, + Key: aws.String("Key"), + Label: aws.String("Name"), + Language: aws.String("Key"), + TimeOffset: aws.String("TimeOffset"), + }, + // More values... + }, + MergePolicy: aws.String("CaptionMergePolicy"), + }, + Composition: []*elastictranscoder.Clip{ + { // Required + TimeSpan: &elastictranscoder.TimeSpan{ + Duration: aws.String("Time"), + StartTime: aws.String("Time"), + }, + }, + // More values... + }, + Encryption: &elastictranscoder.Encryption{ + InitializationVector: aws.String("ZeroTo255String"), + Key: aws.String("Base64EncodedString"), + KeyMd5: aws.String("Base64EncodedString"), + Mode: aws.String("EncryptionMode"), + }, + Key: aws.String("Key"), + PresetId: aws.String("Id"), + Rotate: aws.String("Rotate"), + SegmentDuration: aws.String("FloatString"), + ThumbnailEncryption: &elastictranscoder.Encryption{ + InitializationVector: aws.String("ZeroTo255String"), + Key: aws.String("Base64EncodedString"), + KeyMd5: aws.String("Base64EncodedString"), + Mode: aws.String("EncryptionMode"), + }, + ThumbnailPattern: aws.String("ThumbnailPattern"), + Watermarks: []*elastictranscoder.JobWatermark{ + { // Required + Encryption: &elastictranscoder.Encryption{ + InitializationVector: aws.String("ZeroTo255String"), + Key: aws.String("Base64EncodedString"), + KeyMd5: aws.String("Base64EncodedString"), + Mode: aws.String("EncryptionMode"), + }, + InputKey: aws.String("WatermarkKey"), + PresetWatermarkId: aws.String("PresetWatermarkId"), + }, + // More values... + }, + }, + // More values... + }, + Playlists: []*elastictranscoder.CreateJobPlaylist{ + { // Required + Format: aws.String("PlaylistFormat"), + HlsContentProtection: &elastictranscoder.HlsContentProtection{ + InitializationVector: aws.String("ZeroTo255String"), + Key: aws.String("Base64EncodedString"), + KeyMd5: aws.String("Base64EncodedString"), + KeyStoragePolicy: aws.String("KeyStoragePolicy"), + LicenseAcquisitionUrl: aws.String("ZeroTo512String"), + Method: aws.String("HlsContentProtectionMethod"), + }, + Name: aws.String("Filename"), + OutputKeys: []*string{ + aws.String("Key"), // Required + // More values... + }, + PlayReadyDrm: &elastictranscoder.PlayReadyDrm{ + Format: aws.String("PlayReadyDrmFormatString"), + InitializationVector: aws.String("ZeroTo255String"), + Key: aws.String("NonEmptyBase64EncodedString"), + KeyId: aws.String("KeyIdGuid"), + KeyMd5: aws.String("NonEmptyBase64EncodedString"), + LicenseAcquisitionUrl: aws.String("OneTo512String"), + }, + }, + // More values... + }, + UserMetadata: map[string]*string{ + "Key": aws.String("String"), // Required + // More values... + }, +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/build_test.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/build_test.go new file mode 100644 index 000000000..e9d3c86f8 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/build_test.go @@ -0,0 +1,3551 @@ +package restjson_test + +import ( + "bytes" + "encoding/json" + "encoding/xml" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "testing" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/awstesting" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/restjson" + "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil" + "github.com/aws/aws-sdk-go/private/util" + "github.com/stretchr/testify/assert" +) + +var _ bytes.Buffer // always import bytes +var _ http.Request +var _ json.Marshaler +var _ time.Time +var _ xmlutil.XMLNode +var _ xml.Attr +var _ = ioutil.Discard +var _ = util.Trim("") +var _ = url.Values{} +var _ = io.EOF +var _ = aws.String +var _ = fmt.Println + +func init() { + protocol.RandReader = &awstesting.ZeroReader{} +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService1ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService1ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService1ProtocolTest client from just a session. +// svc := inputservice1protocoltest.New(mySession) +// +// // Create a InputService1ProtocolTest client with additional configuration +// svc := inputservice1protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService1ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService1ProtocolTest { + c := p.ClientConfig("inputservice1protocoltest", cfgs...) + return newInputService1ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService1ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService1ProtocolTest { + svc := &InputService1ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice1protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(restjson.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restjson.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restjson.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restjson.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a InputService1ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService1ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService1TestCaseOperation1 = "OperationName" + +// InputService1TestCaseOperation1Request generates a "aws/request.Request" representing the +// client's request for the InputService1TestCaseOperation1 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the InputService1TestCaseOperation1 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the InputService1TestCaseOperation1Request method. +// req, resp := client.InputService1TestCaseOperation1Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *InputService1ProtocolTest) InputService1TestCaseOperation1Request(input *InputService1TestShapeInputService1TestCaseOperation1Input) (req *request.Request, output *InputService1TestShapeInputService1TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService1TestCaseOperation1, + HTTPMethod: "GET", + HTTPPath: "/2014-01-01/jobs", + } + + if input == nil { + input = &InputService1TestShapeInputService1TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService1TestShapeInputService1TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService1ProtocolTest) InputService1TestCaseOperation1(input *InputService1TestShapeInputService1TestCaseOperation1Input) (*InputService1TestShapeInputService1TestCaseOperation1Output, error) { + req, out := c.InputService1TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type InputService1TestShapeInputService1TestCaseOperation1Input struct { + _ struct{} `type:"structure"` +} + +type InputService1TestShapeInputService1TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService2ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService2ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService2ProtocolTest client from just a session. +// svc := inputservice2protocoltest.New(mySession) +// +// // Create a InputService2ProtocolTest client with additional configuration +// svc := inputservice2protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService2ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService2ProtocolTest { + c := p.ClientConfig("inputservice2protocoltest", cfgs...) + return newInputService2ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService2ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService2ProtocolTest { + svc := &InputService2ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice2protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(restjson.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restjson.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restjson.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restjson.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a InputService2ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService2ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService2TestCaseOperation1 = "OperationName" + +// InputService2TestCaseOperation1Request generates a "aws/request.Request" representing the +// client's request for the InputService2TestCaseOperation1 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the InputService2TestCaseOperation1 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the InputService2TestCaseOperation1Request method. +// req, resp := client.InputService2TestCaseOperation1Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *InputService2ProtocolTest) InputService2TestCaseOperation1Request(input *InputService2TestShapeInputService2TestCaseOperation1Input) (req *request.Request, output *InputService2TestShapeInputService2TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService2TestCaseOperation1, + HTTPMethod: "GET", + HTTPPath: "/2014-01-01/jobsByPipeline/{PipelineId}", + } + + if input == nil { + input = &InputService2TestShapeInputService2TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService2TestShapeInputService2TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService2ProtocolTest) InputService2TestCaseOperation1(input *InputService2TestShapeInputService2TestCaseOperation1Input) (*InputService2TestShapeInputService2TestCaseOperation1Output, error) { + req, out := c.InputService2TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type InputService2TestShapeInputService2TestCaseOperation1Input struct { + _ struct{} `type:"structure"` + + PipelineId *string `location:"uri" type:"string"` +} + +type InputService2TestShapeInputService2TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService3ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService3ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService3ProtocolTest client from just a session. +// svc := inputservice3protocoltest.New(mySession) +// +// // Create a InputService3ProtocolTest client with additional configuration +// svc := inputservice3protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService3ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService3ProtocolTest { + c := p.ClientConfig("inputservice3protocoltest", cfgs...) + return newInputService3ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService3ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService3ProtocolTest { + svc := &InputService3ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice3protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(restjson.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restjson.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restjson.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restjson.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a InputService3ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService3ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService3TestCaseOperation1 = "OperationName" + +// InputService3TestCaseOperation1Request generates a "aws/request.Request" representing the +// client's request for the InputService3TestCaseOperation1 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the InputService3TestCaseOperation1 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the InputService3TestCaseOperation1Request method. +// req, resp := client.InputService3TestCaseOperation1Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *InputService3ProtocolTest) InputService3TestCaseOperation1Request(input *InputService3TestShapeInputService3TestCaseOperation1Input) (req *request.Request, output *InputService3TestShapeInputService3TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService3TestCaseOperation1, + HTTPMethod: "GET", + HTTPPath: "/2014-01-01/jobsByPipeline/{PipelineId}", + } + + if input == nil { + input = &InputService3TestShapeInputService3TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService3TestShapeInputService3TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService3ProtocolTest) InputService3TestCaseOperation1(input *InputService3TestShapeInputService3TestCaseOperation1Input) (*InputService3TestShapeInputService3TestCaseOperation1Output, error) { + req, out := c.InputService3TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type InputService3TestShapeInputService3TestCaseOperation1Input struct { + _ struct{} `type:"structure"` + + Foo *string `location:"uri" locationName:"PipelineId" type:"string"` +} + +type InputService3TestShapeInputService3TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService4ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService4ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService4ProtocolTest client from just a session. +// svc := inputservice4protocoltest.New(mySession) +// +// // Create a InputService4ProtocolTest client with additional configuration +// svc := inputservice4protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService4ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService4ProtocolTest { + c := p.ClientConfig("inputservice4protocoltest", cfgs...) + return newInputService4ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService4ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService4ProtocolTest { + svc := &InputService4ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice4protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(restjson.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restjson.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restjson.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restjson.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a InputService4ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService4ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService4TestCaseOperation1 = "OperationName" + +// InputService4TestCaseOperation1Request generates a "aws/request.Request" representing the +// client's request for the InputService4TestCaseOperation1 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the InputService4TestCaseOperation1 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the InputService4TestCaseOperation1Request method. +// req, resp := client.InputService4TestCaseOperation1Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *InputService4ProtocolTest) InputService4TestCaseOperation1Request(input *InputService4TestShapeInputService4TestCaseOperation1Input) (req *request.Request, output *InputService4TestShapeInputService4TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService4TestCaseOperation1, + HTTPMethod: "GET", + HTTPPath: "/path", + } + + if input == nil { + input = &InputService4TestShapeInputService4TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService4TestShapeInputService4TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService4ProtocolTest) InputService4TestCaseOperation1(input *InputService4TestShapeInputService4TestCaseOperation1Input) (*InputService4TestShapeInputService4TestCaseOperation1Output, error) { + req, out := c.InputService4TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type InputService4TestShapeInputService4TestCaseOperation1Input struct { + _ struct{} `type:"structure"` + + Items []*string `location:"querystring" locationName:"item" type:"list"` +} + +type InputService4TestShapeInputService4TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService5ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService5ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService5ProtocolTest client from just a session. +// svc := inputservice5protocoltest.New(mySession) +// +// // Create a InputService5ProtocolTest client with additional configuration +// svc := inputservice5protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService5ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService5ProtocolTest { + c := p.ClientConfig("inputservice5protocoltest", cfgs...) + return newInputService5ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService5ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService5ProtocolTest { + svc := &InputService5ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice5protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(restjson.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restjson.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restjson.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restjson.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a InputService5ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService5ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService5TestCaseOperation1 = "OperationName" + +// InputService5TestCaseOperation1Request generates a "aws/request.Request" representing the +// client's request for the InputService5TestCaseOperation1 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the InputService5TestCaseOperation1 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the InputService5TestCaseOperation1Request method. +// req, resp := client.InputService5TestCaseOperation1Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *InputService5ProtocolTest) InputService5TestCaseOperation1Request(input *InputService5TestShapeInputService5TestCaseOperation1Input) (req *request.Request, output *InputService5TestShapeInputService5TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService5TestCaseOperation1, + HTTPMethod: "GET", + HTTPPath: "/2014-01-01/jobsByPipeline/{PipelineId}", + } + + if input == nil { + input = &InputService5TestShapeInputService5TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService5TestShapeInputService5TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService5ProtocolTest) InputService5TestCaseOperation1(input *InputService5TestShapeInputService5TestCaseOperation1Input) (*InputService5TestShapeInputService5TestCaseOperation1Output, error) { + req, out := c.InputService5TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type InputService5TestShapeInputService5TestCaseOperation1Input struct { + _ struct{} `type:"structure"` + + PipelineId *string `location:"uri" type:"string"` + + QueryDoc map[string]*string `location:"querystring" type:"map"` +} + +type InputService5TestShapeInputService5TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService6ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService6ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService6ProtocolTest client from just a session. +// svc := inputservice6protocoltest.New(mySession) +// +// // Create a InputService6ProtocolTest client with additional configuration +// svc := inputservice6protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService6ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService6ProtocolTest { + c := p.ClientConfig("inputservice6protocoltest", cfgs...) + return newInputService6ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService6ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService6ProtocolTest { + svc := &InputService6ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice6protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(restjson.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restjson.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restjson.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restjson.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a InputService6ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService6ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService6TestCaseOperation1 = "OperationName" + +// InputService6TestCaseOperation1Request generates a "aws/request.Request" representing the +// client's request for the InputService6TestCaseOperation1 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the InputService6TestCaseOperation1 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the InputService6TestCaseOperation1Request method. +// req, resp := client.InputService6TestCaseOperation1Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *InputService6ProtocolTest) InputService6TestCaseOperation1Request(input *InputService6TestShapeInputService6TestCaseOperation1Input) (req *request.Request, output *InputService6TestShapeInputService6TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService6TestCaseOperation1, + HTTPMethod: "GET", + HTTPPath: "/2014-01-01/jobsByPipeline/{PipelineId}", + } + + if input == nil { + input = &InputService6TestShapeInputService6TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService6TestShapeInputService6TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService6ProtocolTest) InputService6TestCaseOperation1(input *InputService6TestShapeInputService6TestCaseOperation1Input) (*InputService6TestShapeInputService6TestCaseOperation1Output, error) { + req, out := c.InputService6TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type InputService6TestShapeInputService6TestCaseOperation1Input struct { + _ struct{} `type:"structure"` + + PipelineId *string `location:"uri" type:"string"` + + QueryDoc map[string][]*string `location:"querystring" type:"map"` +} + +type InputService6TestShapeInputService6TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService7ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService7ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService7ProtocolTest client from just a session. +// svc := inputservice7protocoltest.New(mySession) +// +// // Create a InputService7ProtocolTest client with additional configuration +// svc := inputservice7protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService7ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService7ProtocolTest { + c := p.ClientConfig("inputservice7protocoltest", cfgs...) + return newInputService7ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService7ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService7ProtocolTest { + svc := &InputService7ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice7protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(restjson.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restjson.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restjson.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restjson.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a InputService7ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService7ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService7TestCaseOperation1 = "OperationName" + +// InputService7TestCaseOperation1Request generates a "aws/request.Request" representing the +// client's request for the InputService7TestCaseOperation1 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the InputService7TestCaseOperation1 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the InputService7TestCaseOperation1Request method. +// req, resp := client.InputService7TestCaseOperation1Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *InputService7ProtocolTest) InputService7TestCaseOperation1Request(input *InputService7TestShapeInputService7TestCaseOperation1Input) (req *request.Request, output *InputService7TestShapeInputService7TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService7TestCaseOperation1, + HTTPMethod: "GET", + HTTPPath: "/2014-01-01/jobsByPipeline/{PipelineId}", + } + + if input == nil { + input = &InputService7TestShapeInputService7TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService7TestShapeInputService7TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService7ProtocolTest) InputService7TestCaseOperation1(input *InputService7TestShapeInputService7TestCaseOperation1Input) (*InputService7TestShapeInputService7TestCaseOperation1Output, error) { + req, out := c.InputService7TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type InputService7TestShapeInputService7TestCaseOperation1Input struct { + _ struct{} `type:"structure"` + + Ascending *string `location:"querystring" locationName:"Ascending" type:"string"` + + PageToken *string `location:"querystring" locationName:"PageToken" type:"string"` + + PipelineId *string `location:"uri" locationName:"PipelineId" type:"string"` +} + +type InputService7TestShapeInputService7TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService8ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService8ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService8ProtocolTest client from just a session. +// svc := inputservice8protocoltest.New(mySession) +// +// // Create a InputService8ProtocolTest client with additional configuration +// svc := inputservice8protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService8ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService8ProtocolTest { + c := p.ClientConfig("inputservice8protocoltest", cfgs...) + return newInputService8ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService8ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService8ProtocolTest { + svc := &InputService8ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice8protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(restjson.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restjson.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restjson.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restjson.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a InputService8ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService8ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService8TestCaseOperation1 = "OperationName" + +// InputService8TestCaseOperation1Request generates a "aws/request.Request" representing the +// client's request for the InputService8TestCaseOperation1 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the InputService8TestCaseOperation1 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the InputService8TestCaseOperation1Request method. +// req, resp := client.InputService8TestCaseOperation1Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *InputService8ProtocolTest) InputService8TestCaseOperation1Request(input *InputService8TestShapeInputService8TestCaseOperation1Input) (req *request.Request, output *InputService8TestShapeInputService8TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService8TestCaseOperation1, + HTTPMethod: "POST", + HTTPPath: "/2014-01-01/jobsByPipeline/{PipelineId}", + } + + if input == nil { + input = &InputService8TestShapeInputService8TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService8TestShapeInputService8TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService8ProtocolTest) InputService8TestCaseOperation1(input *InputService8TestShapeInputService8TestCaseOperation1Input) (*InputService8TestShapeInputService8TestCaseOperation1Output, error) { + req, out := c.InputService8TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type InputService8TestShapeInputService8TestCaseOperation1Input struct { + _ struct{} `type:"structure"` + + Ascending *string `location:"querystring" locationName:"Ascending" type:"string"` + + Config *InputService8TestShapeStructType `type:"structure"` + + PageToken *string `location:"querystring" locationName:"PageToken" type:"string"` + + PipelineId *string `location:"uri" locationName:"PipelineId" type:"string"` +} + +type InputService8TestShapeInputService8TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +type InputService8TestShapeStructType struct { + _ struct{} `type:"structure"` + + A *string `type:"string"` + + B *string `type:"string"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService9ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService9ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService9ProtocolTest client from just a session. +// svc := inputservice9protocoltest.New(mySession) +// +// // Create a InputService9ProtocolTest client with additional configuration +// svc := inputservice9protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService9ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService9ProtocolTest { + c := p.ClientConfig("inputservice9protocoltest", cfgs...) + return newInputService9ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService9ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService9ProtocolTest { + svc := &InputService9ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice9protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(restjson.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restjson.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restjson.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restjson.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a InputService9ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService9ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService9TestCaseOperation1 = "OperationName" + +// InputService9TestCaseOperation1Request generates a "aws/request.Request" representing the +// client's request for the InputService9TestCaseOperation1 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the InputService9TestCaseOperation1 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the InputService9TestCaseOperation1Request method. +// req, resp := client.InputService9TestCaseOperation1Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *InputService9ProtocolTest) InputService9TestCaseOperation1Request(input *InputService9TestShapeInputService9TestCaseOperation1Input) (req *request.Request, output *InputService9TestShapeInputService9TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService9TestCaseOperation1, + HTTPMethod: "POST", + HTTPPath: "/2014-01-01/jobsByPipeline/{PipelineId}", + } + + if input == nil { + input = &InputService9TestShapeInputService9TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService9TestShapeInputService9TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService9ProtocolTest) InputService9TestCaseOperation1(input *InputService9TestShapeInputService9TestCaseOperation1Input) (*InputService9TestShapeInputService9TestCaseOperation1Output, error) { + req, out := c.InputService9TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type InputService9TestShapeInputService9TestCaseOperation1Input struct { + _ struct{} `type:"structure"` + + Ascending *string `location:"querystring" locationName:"Ascending" type:"string"` + + Checksum *string `location:"header" locationName:"x-amz-checksum" type:"string"` + + Config *InputService9TestShapeStructType `type:"structure"` + + PageToken *string `location:"querystring" locationName:"PageToken" type:"string"` + + PipelineId *string `location:"uri" locationName:"PipelineId" type:"string"` +} + +type InputService9TestShapeInputService9TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +type InputService9TestShapeStructType struct { + _ struct{} `type:"structure"` + + A *string `type:"string"` + + B *string `type:"string"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService10ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService10ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService10ProtocolTest client from just a session. +// svc := inputservice10protocoltest.New(mySession) +// +// // Create a InputService10ProtocolTest client with additional configuration +// svc := inputservice10protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService10ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService10ProtocolTest { + c := p.ClientConfig("inputservice10protocoltest", cfgs...) + return newInputService10ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService10ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService10ProtocolTest { + svc := &InputService10ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice10protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(restjson.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restjson.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restjson.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restjson.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a InputService10ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService10ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService10TestCaseOperation1 = "OperationName" + +// InputService10TestCaseOperation1Request generates a "aws/request.Request" representing the +// client's request for the InputService10TestCaseOperation1 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the InputService10TestCaseOperation1 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the InputService10TestCaseOperation1Request method. +// req, resp := client.InputService10TestCaseOperation1Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *InputService10ProtocolTest) InputService10TestCaseOperation1Request(input *InputService10TestShapeInputService10TestCaseOperation1Input) (req *request.Request, output *InputService10TestShapeInputService10TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService10TestCaseOperation1, + HTTPMethod: "POST", + HTTPPath: "/2014-01-01/vaults/{vaultName}/archives", + } + + if input == nil { + input = &InputService10TestShapeInputService10TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService10TestShapeInputService10TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService10ProtocolTest) InputService10TestCaseOperation1(input *InputService10TestShapeInputService10TestCaseOperation1Input) (*InputService10TestShapeInputService10TestCaseOperation1Output, error) { + req, out := c.InputService10TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type InputService10TestShapeInputService10TestCaseOperation1Input struct { + _ struct{} `type:"structure" payload:"Body"` + + Body io.ReadSeeker `locationName:"body" type:"blob"` + + Checksum *string `location:"header" locationName:"x-amz-sha256-tree-hash" type:"string"` + + VaultName *string `location:"uri" locationName:"vaultName" type:"string" required:"true"` +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *InputService10TestShapeInputService10TestCaseOperation1Input) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "InputService10TestShapeInputService10TestCaseOperation1Input"} + if s.VaultName == nil { + invalidParams.Add(request.NewErrParamRequired("VaultName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type InputService10TestShapeInputService10TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService11ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService11ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService11ProtocolTest client from just a session. +// svc := inputservice11protocoltest.New(mySession) +// +// // Create a InputService11ProtocolTest client with additional configuration +// svc := inputservice11protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService11ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService11ProtocolTest { + c := p.ClientConfig("inputservice11protocoltest", cfgs...) + return newInputService11ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService11ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService11ProtocolTest { + svc := &InputService11ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice11protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(restjson.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restjson.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restjson.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restjson.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a InputService11ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService11ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService11TestCaseOperation1 = "OperationName" + +// InputService11TestCaseOperation1Request generates a "aws/request.Request" representing the +// client's request for the InputService11TestCaseOperation1 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the InputService11TestCaseOperation1 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the InputService11TestCaseOperation1Request method. +// req, resp := client.InputService11TestCaseOperation1Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *InputService11ProtocolTest) InputService11TestCaseOperation1Request(input *InputService11TestShapeInputService11TestCaseOperation1Input) (req *request.Request, output *InputService11TestShapeInputService11TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService11TestCaseOperation1, + HTTPMethod: "GET", + HTTPPath: "/2014-01-01/{Foo}", + } + + if input == nil { + input = &InputService11TestShapeInputService11TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService11TestShapeInputService11TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService11ProtocolTest) InputService11TestCaseOperation1(input *InputService11TestShapeInputService11TestCaseOperation1Input) (*InputService11TestShapeInputService11TestCaseOperation1Output, error) { + req, out := c.InputService11TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type InputService11TestShapeInputService11TestCaseOperation1Input struct { + _ struct{} `type:"structure"` + + // Bar is automatically base64 encoded/decoded by the SDK. + Bar []byte `type:"blob"` + + Foo *string `location:"uri" locationName:"Foo" type:"string" required:"true"` +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *InputService11TestShapeInputService11TestCaseOperation1Input) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "InputService11TestShapeInputService11TestCaseOperation1Input"} + if s.Foo == nil { + invalidParams.Add(request.NewErrParamRequired("Foo")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type InputService11TestShapeInputService11TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService12ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService12ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService12ProtocolTest client from just a session. +// svc := inputservice12protocoltest.New(mySession) +// +// // Create a InputService12ProtocolTest client with additional configuration +// svc := inputservice12protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService12ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService12ProtocolTest { + c := p.ClientConfig("inputservice12protocoltest", cfgs...) + return newInputService12ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService12ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService12ProtocolTest { + svc := &InputService12ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice12protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(restjson.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restjson.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restjson.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restjson.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a InputService12ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService12ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService12TestCaseOperation1 = "OperationName" + +// InputService12TestCaseOperation1Request generates a "aws/request.Request" representing the +// client's request for the InputService12TestCaseOperation1 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the InputService12TestCaseOperation1 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the InputService12TestCaseOperation1Request method. +// req, resp := client.InputService12TestCaseOperation1Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *InputService12ProtocolTest) InputService12TestCaseOperation1Request(input *InputService12TestShapeInputShape) (req *request.Request, output *InputService12TestShapeInputService12TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService12TestCaseOperation1, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &InputService12TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService12TestShapeInputService12TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService12ProtocolTest) InputService12TestCaseOperation1(input *InputService12TestShapeInputShape) (*InputService12TestShapeInputService12TestCaseOperation1Output, error) { + req, out := c.InputService12TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +const opInputService12TestCaseOperation2 = "OperationName" + +// InputService12TestCaseOperation2Request generates a "aws/request.Request" representing the +// client's request for the InputService12TestCaseOperation2 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the InputService12TestCaseOperation2 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the InputService12TestCaseOperation2Request method. +// req, resp := client.InputService12TestCaseOperation2Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *InputService12ProtocolTest) InputService12TestCaseOperation2Request(input *InputService12TestShapeInputShape) (req *request.Request, output *InputService12TestShapeInputService12TestCaseOperation2Output) { + op := &request.Operation{ + Name: opInputService12TestCaseOperation2, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &InputService12TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService12TestShapeInputService12TestCaseOperation2Output{} + req.Data = output + return +} + +func (c *InputService12ProtocolTest) InputService12TestCaseOperation2(input *InputService12TestShapeInputShape) (*InputService12TestShapeInputService12TestCaseOperation2Output, error) { + req, out := c.InputService12TestCaseOperation2Request(input) + err := req.Send() + return out, err +} + +type InputService12TestShapeInputService12TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +type InputService12TestShapeInputService12TestCaseOperation2Output struct { + _ struct{} `type:"structure"` +} + +type InputService12TestShapeInputShape struct { + _ struct{} `type:"structure" payload:"Foo"` + + Foo []byte `locationName:"foo" type:"blob"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService13ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService13ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService13ProtocolTest client from just a session. +// svc := inputservice13protocoltest.New(mySession) +// +// // Create a InputService13ProtocolTest client with additional configuration +// svc := inputservice13protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService13ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService13ProtocolTest { + c := p.ClientConfig("inputservice13protocoltest", cfgs...) + return newInputService13ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService13ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService13ProtocolTest { + svc := &InputService13ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice13protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(restjson.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restjson.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restjson.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restjson.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a InputService13ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService13ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService13TestCaseOperation1 = "OperationName" + +// InputService13TestCaseOperation1Request generates a "aws/request.Request" representing the +// client's request for the InputService13TestCaseOperation1 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the InputService13TestCaseOperation1 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the InputService13TestCaseOperation1Request method. +// req, resp := client.InputService13TestCaseOperation1Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *InputService13ProtocolTest) InputService13TestCaseOperation1Request(input *InputService13TestShapeInputShape) (req *request.Request, output *InputService13TestShapeInputService13TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService13TestCaseOperation1, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &InputService13TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService13TestShapeInputService13TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService13ProtocolTest) InputService13TestCaseOperation1(input *InputService13TestShapeInputShape) (*InputService13TestShapeInputService13TestCaseOperation1Output, error) { + req, out := c.InputService13TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +const opInputService13TestCaseOperation2 = "OperationName" + +// InputService13TestCaseOperation2Request generates a "aws/request.Request" representing the +// client's request for the InputService13TestCaseOperation2 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the InputService13TestCaseOperation2 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the InputService13TestCaseOperation2Request method. +// req, resp := client.InputService13TestCaseOperation2Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *InputService13ProtocolTest) InputService13TestCaseOperation2Request(input *InputService13TestShapeInputShape) (req *request.Request, output *InputService13TestShapeInputService13TestCaseOperation2Output) { + op := &request.Operation{ + Name: opInputService13TestCaseOperation2, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &InputService13TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService13TestShapeInputService13TestCaseOperation2Output{} + req.Data = output + return +} + +func (c *InputService13ProtocolTest) InputService13TestCaseOperation2(input *InputService13TestShapeInputShape) (*InputService13TestShapeInputService13TestCaseOperation2Output, error) { + req, out := c.InputService13TestCaseOperation2Request(input) + err := req.Send() + return out, err +} + +type InputService13TestShapeFooShape struct { + _ struct{} `locationName:"foo" type:"structure"` + + Baz *string `locationName:"baz" type:"string"` +} + +type InputService13TestShapeInputService13TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +type InputService13TestShapeInputService13TestCaseOperation2Output struct { + _ struct{} `type:"structure"` +} + +type InputService13TestShapeInputShape struct { + _ struct{} `type:"structure" payload:"Foo"` + + Foo *InputService13TestShapeFooShape `locationName:"foo" type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService14ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService14ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService14ProtocolTest client from just a session. +// svc := inputservice14protocoltest.New(mySession) +// +// // Create a InputService14ProtocolTest client with additional configuration +// svc := inputservice14protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService14ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService14ProtocolTest { + c := p.ClientConfig("inputservice14protocoltest", cfgs...) + return newInputService14ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService14ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService14ProtocolTest { + svc := &InputService14ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice14protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(restjson.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restjson.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restjson.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restjson.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a InputService14ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService14ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService14TestCaseOperation1 = "OperationName" + +// InputService14TestCaseOperation1Request generates a "aws/request.Request" representing the +// client's request for the InputService14TestCaseOperation1 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the InputService14TestCaseOperation1 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the InputService14TestCaseOperation1Request method. +// req, resp := client.InputService14TestCaseOperation1Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *InputService14ProtocolTest) InputService14TestCaseOperation1Request(input *InputService14TestShapeInputShape) (req *request.Request, output *InputService14TestShapeInputService14TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService14TestCaseOperation1, + HTTPMethod: "POST", + HTTPPath: "/path", + } + + if input == nil { + input = &InputService14TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService14TestShapeInputService14TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService14ProtocolTest) InputService14TestCaseOperation1(input *InputService14TestShapeInputShape) (*InputService14TestShapeInputService14TestCaseOperation1Output, error) { + req, out := c.InputService14TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +const opInputService14TestCaseOperation2 = "OperationName" + +// InputService14TestCaseOperation2Request generates a "aws/request.Request" representing the +// client's request for the InputService14TestCaseOperation2 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the InputService14TestCaseOperation2 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the InputService14TestCaseOperation2Request method. +// req, resp := client.InputService14TestCaseOperation2Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *InputService14ProtocolTest) InputService14TestCaseOperation2Request(input *InputService14TestShapeInputShape) (req *request.Request, output *InputService14TestShapeInputService14TestCaseOperation2Output) { + op := &request.Operation{ + Name: opInputService14TestCaseOperation2, + HTTPMethod: "POST", + HTTPPath: "/path?abc=mno", + } + + if input == nil { + input = &InputService14TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService14TestShapeInputService14TestCaseOperation2Output{} + req.Data = output + return +} + +func (c *InputService14ProtocolTest) InputService14TestCaseOperation2(input *InputService14TestShapeInputShape) (*InputService14TestShapeInputService14TestCaseOperation2Output, error) { + req, out := c.InputService14TestCaseOperation2Request(input) + err := req.Send() + return out, err +} + +type InputService14TestShapeInputService14TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +type InputService14TestShapeInputService14TestCaseOperation2Output struct { + _ struct{} `type:"structure"` +} + +type InputService14TestShapeInputShape struct { + _ struct{} `type:"structure"` + + Foo *string `location:"querystring" locationName:"param-name" type:"string"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService15ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService15ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService15ProtocolTest client from just a session. +// svc := inputservice15protocoltest.New(mySession) +// +// // Create a InputService15ProtocolTest client with additional configuration +// svc := inputservice15protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService15ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService15ProtocolTest { + c := p.ClientConfig("inputservice15protocoltest", cfgs...) + return newInputService15ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService15ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService15ProtocolTest { + svc := &InputService15ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice15protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(restjson.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restjson.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restjson.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restjson.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a InputService15ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService15ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService15TestCaseOperation1 = "OperationName" + +// InputService15TestCaseOperation1Request generates a "aws/request.Request" representing the +// client's request for the InputService15TestCaseOperation1 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the InputService15TestCaseOperation1 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the InputService15TestCaseOperation1Request method. +// req, resp := client.InputService15TestCaseOperation1Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *InputService15ProtocolTest) InputService15TestCaseOperation1Request(input *InputService15TestShapeInputShape) (req *request.Request, output *InputService15TestShapeInputService15TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService15TestCaseOperation1, + HTTPMethod: "POST", + HTTPPath: "/path", + } + + if input == nil { + input = &InputService15TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService15TestShapeInputService15TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService15ProtocolTest) InputService15TestCaseOperation1(input *InputService15TestShapeInputShape) (*InputService15TestShapeInputService15TestCaseOperation1Output, error) { + req, out := c.InputService15TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +const opInputService15TestCaseOperation2 = "OperationName" + +// InputService15TestCaseOperation2Request generates a "aws/request.Request" representing the +// client's request for the InputService15TestCaseOperation2 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the InputService15TestCaseOperation2 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the InputService15TestCaseOperation2Request method. +// req, resp := client.InputService15TestCaseOperation2Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *InputService15ProtocolTest) InputService15TestCaseOperation2Request(input *InputService15TestShapeInputShape) (req *request.Request, output *InputService15TestShapeInputService15TestCaseOperation2Output) { + op := &request.Operation{ + Name: opInputService15TestCaseOperation2, + HTTPMethod: "POST", + HTTPPath: "/path", + } + + if input == nil { + input = &InputService15TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService15TestShapeInputService15TestCaseOperation2Output{} + req.Data = output + return +} + +func (c *InputService15ProtocolTest) InputService15TestCaseOperation2(input *InputService15TestShapeInputShape) (*InputService15TestShapeInputService15TestCaseOperation2Output, error) { + req, out := c.InputService15TestCaseOperation2Request(input) + err := req.Send() + return out, err +} + +const opInputService15TestCaseOperation3 = "OperationName" + +// InputService15TestCaseOperation3Request generates a "aws/request.Request" representing the +// client's request for the InputService15TestCaseOperation3 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the InputService15TestCaseOperation3 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the InputService15TestCaseOperation3Request method. +// req, resp := client.InputService15TestCaseOperation3Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *InputService15ProtocolTest) InputService15TestCaseOperation3Request(input *InputService15TestShapeInputShape) (req *request.Request, output *InputService15TestShapeInputService15TestCaseOperation3Output) { + op := &request.Operation{ + Name: opInputService15TestCaseOperation3, + HTTPMethod: "POST", + HTTPPath: "/path", + } + + if input == nil { + input = &InputService15TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService15TestShapeInputService15TestCaseOperation3Output{} + req.Data = output + return +} + +func (c *InputService15ProtocolTest) InputService15TestCaseOperation3(input *InputService15TestShapeInputShape) (*InputService15TestShapeInputService15TestCaseOperation3Output, error) { + req, out := c.InputService15TestCaseOperation3Request(input) + err := req.Send() + return out, err +} + +const opInputService15TestCaseOperation4 = "OperationName" + +// InputService15TestCaseOperation4Request generates a "aws/request.Request" representing the +// client's request for the InputService15TestCaseOperation4 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the InputService15TestCaseOperation4 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the InputService15TestCaseOperation4Request method. +// req, resp := client.InputService15TestCaseOperation4Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *InputService15ProtocolTest) InputService15TestCaseOperation4Request(input *InputService15TestShapeInputShape) (req *request.Request, output *InputService15TestShapeInputService15TestCaseOperation4Output) { + op := &request.Operation{ + Name: opInputService15TestCaseOperation4, + HTTPMethod: "POST", + HTTPPath: "/path", + } + + if input == nil { + input = &InputService15TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService15TestShapeInputService15TestCaseOperation4Output{} + req.Data = output + return +} + +func (c *InputService15ProtocolTest) InputService15TestCaseOperation4(input *InputService15TestShapeInputShape) (*InputService15TestShapeInputService15TestCaseOperation4Output, error) { + req, out := c.InputService15TestCaseOperation4Request(input) + err := req.Send() + return out, err +} + +const opInputService15TestCaseOperation5 = "OperationName" + +// InputService15TestCaseOperation5Request generates a "aws/request.Request" representing the +// client's request for the InputService15TestCaseOperation5 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the InputService15TestCaseOperation5 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the InputService15TestCaseOperation5Request method. +// req, resp := client.InputService15TestCaseOperation5Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *InputService15ProtocolTest) InputService15TestCaseOperation5Request(input *InputService15TestShapeInputShape) (req *request.Request, output *InputService15TestShapeInputService15TestCaseOperation5Output) { + op := &request.Operation{ + Name: opInputService15TestCaseOperation5, + HTTPMethod: "POST", + HTTPPath: "/path", + } + + if input == nil { + input = &InputService15TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService15TestShapeInputService15TestCaseOperation5Output{} + req.Data = output + return +} + +func (c *InputService15ProtocolTest) InputService15TestCaseOperation5(input *InputService15TestShapeInputShape) (*InputService15TestShapeInputService15TestCaseOperation5Output, error) { + req, out := c.InputService15TestCaseOperation5Request(input) + err := req.Send() + return out, err +} + +const opInputService15TestCaseOperation6 = "OperationName" + +// InputService15TestCaseOperation6Request generates a "aws/request.Request" representing the +// client's request for the InputService15TestCaseOperation6 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the InputService15TestCaseOperation6 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the InputService15TestCaseOperation6Request method. +// req, resp := client.InputService15TestCaseOperation6Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *InputService15ProtocolTest) InputService15TestCaseOperation6Request(input *InputService15TestShapeInputShape) (req *request.Request, output *InputService15TestShapeInputService15TestCaseOperation6Output) { + op := &request.Operation{ + Name: opInputService15TestCaseOperation6, + HTTPMethod: "POST", + HTTPPath: "/path", + } + + if input == nil { + input = &InputService15TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService15TestShapeInputService15TestCaseOperation6Output{} + req.Data = output + return +} + +func (c *InputService15ProtocolTest) InputService15TestCaseOperation6(input *InputService15TestShapeInputShape) (*InputService15TestShapeInputService15TestCaseOperation6Output, error) { + req, out := c.InputService15TestCaseOperation6Request(input) + err := req.Send() + return out, err +} + +type InputService15TestShapeInputService15TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +type InputService15TestShapeInputService15TestCaseOperation2Output struct { + _ struct{} `type:"structure"` +} + +type InputService15TestShapeInputService15TestCaseOperation3Output struct { + _ struct{} `type:"structure"` +} + +type InputService15TestShapeInputService15TestCaseOperation4Output struct { + _ struct{} `type:"structure"` +} + +type InputService15TestShapeInputService15TestCaseOperation5Output struct { + _ struct{} `type:"structure"` +} + +type InputService15TestShapeInputService15TestCaseOperation6Output struct { + _ struct{} `type:"structure"` +} + +type InputService15TestShapeInputShape struct { + _ struct{} `type:"structure"` + + RecursiveStruct *InputService15TestShapeRecursiveStructType `type:"structure"` +} + +type InputService15TestShapeRecursiveStructType struct { + _ struct{} `type:"structure"` + + NoRecurse *string `type:"string"` + + RecursiveList []*InputService15TestShapeRecursiveStructType `type:"list"` + + RecursiveMap map[string]*InputService15TestShapeRecursiveStructType `type:"map"` + + RecursiveStruct *InputService15TestShapeRecursiveStructType `type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService16ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService16ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService16ProtocolTest client from just a session. +// svc := inputservice16protocoltest.New(mySession) +// +// // Create a InputService16ProtocolTest client with additional configuration +// svc := inputservice16protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService16ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService16ProtocolTest { + c := p.ClientConfig("inputservice16protocoltest", cfgs...) + return newInputService16ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService16ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService16ProtocolTest { + svc := &InputService16ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice16protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(restjson.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restjson.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restjson.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restjson.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a InputService16ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService16ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService16TestCaseOperation1 = "OperationName" + +// InputService16TestCaseOperation1Request generates a "aws/request.Request" representing the +// client's request for the InputService16TestCaseOperation1 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the InputService16TestCaseOperation1 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the InputService16TestCaseOperation1Request method. +// req, resp := client.InputService16TestCaseOperation1Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *InputService16ProtocolTest) InputService16TestCaseOperation1Request(input *InputService16TestShapeInputShape) (req *request.Request, output *InputService16TestShapeInputService16TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService16TestCaseOperation1, + HTTPMethod: "POST", + HTTPPath: "/path", + } + + if input == nil { + input = &InputService16TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService16TestShapeInputService16TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService16ProtocolTest) InputService16TestCaseOperation1(input *InputService16TestShapeInputShape) (*InputService16TestShapeInputService16TestCaseOperation1Output, error) { + req, out := c.InputService16TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +const opInputService16TestCaseOperation2 = "OperationName" + +// InputService16TestCaseOperation2Request generates a "aws/request.Request" representing the +// client's request for the InputService16TestCaseOperation2 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the InputService16TestCaseOperation2 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the InputService16TestCaseOperation2Request method. +// req, resp := client.InputService16TestCaseOperation2Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *InputService16ProtocolTest) InputService16TestCaseOperation2Request(input *InputService16TestShapeInputShape) (req *request.Request, output *InputService16TestShapeInputService16TestCaseOperation2Output) { + op := &request.Operation{ + Name: opInputService16TestCaseOperation2, + HTTPMethod: "POST", + HTTPPath: "/path", + } + + if input == nil { + input = &InputService16TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService16TestShapeInputService16TestCaseOperation2Output{} + req.Data = output + return +} + +func (c *InputService16ProtocolTest) InputService16TestCaseOperation2(input *InputService16TestShapeInputShape) (*InputService16TestShapeInputService16TestCaseOperation2Output, error) { + req, out := c.InputService16TestCaseOperation2Request(input) + err := req.Send() + return out, err +} + +type InputService16TestShapeInputService16TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +type InputService16TestShapeInputService16TestCaseOperation2Output struct { + _ struct{} `type:"structure"` +} + +type InputService16TestShapeInputShape struct { + _ struct{} `type:"structure"` + + TimeArg *time.Time `type:"timestamp" timestampFormat:"unix"` + + TimeArgInHeader *time.Time `location:"header" locationName:"x-amz-timearg" type:"timestamp" timestampFormat:"rfc822"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService17ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService17ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService17ProtocolTest client from just a session. +// svc := inputservice17protocoltest.New(mySession) +// +// // Create a InputService17ProtocolTest client with additional configuration +// svc := inputservice17protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService17ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService17ProtocolTest { + c := p.ClientConfig("inputservice17protocoltest", cfgs...) + return newInputService17ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService17ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService17ProtocolTest { + svc := &InputService17ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice17protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(restjson.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restjson.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restjson.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restjson.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a InputService17ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService17ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService17TestCaseOperation1 = "OperationName" + +// InputService17TestCaseOperation1Request generates a "aws/request.Request" representing the +// client's request for the InputService17TestCaseOperation1 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the InputService17TestCaseOperation1 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the InputService17TestCaseOperation1Request method. +// req, resp := client.InputService17TestCaseOperation1Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *InputService17ProtocolTest) InputService17TestCaseOperation1Request(input *InputService17TestShapeInputService17TestCaseOperation1Input) (req *request.Request, output *InputService17TestShapeInputService17TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService17TestCaseOperation1, + HTTPMethod: "POST", + HTTPPath: "/path", + } + + if input == nil { + input = &InputService17TestShapeInputService17TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService17TestShapeInputService17TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService17ProtocolTest) InputService17TestCaseOperation1(input *InputService17TestShapeInputService17TestCaseOperation1Input) (*InputService17TestShapeInputService17TestCaseOperation1Output, error) { + req, out := c.InputService17TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type InputService17TestShapeInputService17TestCaseOperation1Input struct { + _ struct{} `type:"structure"` + + TimeArg *time.Time `locationName:"timestamp_location" type:"timestamp" timestampFormat:"unix"` +} + +type InputService17TestShapeInputService17TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService18ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService18ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService18ProtocolTest client from just a session. +// svc := inputservice18protocoltest.New(mySession) +// +// // Create a InputService18ProtocolTest client with additional configuration +// svc := inputservice18protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService18ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService18ProtocolTest { + c := p.ClientConfig("inputservice18protocoltest", cfgs...) + return newInputService18ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService18ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService18ProtocolTest { + svc := &InputService18ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice18protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(restjson.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restjson.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restjson.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restjson.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a InputService18ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService18ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService18TestCaseOperation1 = "OperationName" + +// InputService18TestCaseOperation1Request generates a "aws/request.Request" representing the +// client's request for the InputService18TestCaseOperation1 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the InputService18TestCaseOperation1 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the InputService18TestCaseOperation1Request method. +// req, resp := client.InputService18TestCaseOperation1Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *InputService18ProtocolTest) InputService18TestCaseOperation1Request(input *InputService18TestShapeInputService18TestCaseOperation1Input) (req *request.Request, output *InputService18TestShapeInputService18TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService18TestCaseOperation1, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &InputService18TestShapeInputService18TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService18TestShapeInputService18TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService18ProtocolTest) InputService18TestCaseOperation1(input *InputService18TestShapeInputService18TestCaseOperation1Input) (*InputService18TestShapeInputService18TestCaseOperation1Output, error) { + req, out := c.InputService18TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type InputService18TestShapeInputService18TestCaseOperation1Input struct { + _ struct{} `type:"structure" payload:"Foo"` + + Foo *string `locationName:"foo" type:"string"` +} + +type InputService18TestShapeInputService18TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService19ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService19ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService19ProtocolTest client from just a session. +// svc := inputservice19protocoltest.New(mySession) +// +// // Create a InputService19ProtocolTest client with additional configuration +// svc := inputservice19protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService19ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService19ProtocolTest { + c := p.ClientConfig("inputservice19protocoltest", cfgs...) + return newInputService19ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService19ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService19ProtocolTest { + svc := &InputService19ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice19protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(restjson.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restjson.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restjson.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restjson.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a InputService19ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService19ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService19TestCaseOperation1 = "OperationName" + +// InputService19TestCaseOperation1Request generates a "aws/request.Request" representing the +// client's request for the InputService19TestCaseOperation1 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the InputService19TestCaseOperation1 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the InputService19TestCaseOperation1Request method. +// req, resp := client.InputService19TestCaseOperation1Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *InputService19ProtocolTest) InputService19TestCaseOperation1Request(input *InputService19TestShapeInputShape) (req *request.Request, output *InputService19TestShapeInputService19TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService19TestCaseOperation1, + HTTPMethod: "POST", + HTTPPath: "/path", + } + + if input == nil { + input = &InputService19TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService19TestShapeInputService19TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService19ProtocolTest) InputService19TestCaseOperation1(input *InputService19TestShapeInputShape) (*InputService19TestShapeInputService19TestCaseOperation1Output, error) { + req, out := c.InputService19TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +const opInputService19TestCaseOperation2 = "OperationName" + +// InputService19TestCaseOperation2Request generates a "aws/request.Request" representing the +// client's request for the InputService19TestCaseOperation2 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the InputService19TestCaseOperation2 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the InputService19TestCaseOperation2Request method. +// req, resp := client.InputService19TestCaseOperation2Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *InputService19ProtocolTest) InputService19TestCaseOperation2Request(input *InputService19TestShapeInputShape) (req *request.Request, output *InputService19TestShapeInputService19TestCaseOperation2Output) { + op := &request.Operation{ + Name: opInputService19TestCaseOperation2, + HTTPMethod: "POST", + HTTPPath: "/path", + } + + if input == nil { + input = &InputService19TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService19TestShapeInputService19TestCaseOperation2Output{} + req.Data = output + return +} + +func (c *InputService19ProtocolTest) InputService19TestCaseOperation2(input *InputService19TestShapeInputShape) (*InputService19TestShapeInputService19TestCaseOperation2Output, error) { + req, out := c.InputService19TestCaseOperation2Request(input) + err := req.Send() + return out, err +} + +type InputService19TestShapeInputService19TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +type InputService19TestShapeInputService19TestCaseOperation2Output struct { + _ struct{} `type:"structure"` +} + +type InputService19TestShapeInputShape struct { + _ struct{} `type:"structure"` + + Token *string `type:"string" idempotencyToken:"true"` +} + +// +// Tests begin here +// + +func TestInputService1ProtocolTestNoParametersCase1(t *testing.T) { + sess := session.New() + svc := NewInputService1ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + req, _ := svc.InputService1TestCaseOperation1Request(nil) + r := req.HTTPRequest + + // build request + restjson.Build(req) + assert.NoError(t, req.Error) + + // assert URL + awstesting.AssertURL(t, "https://test/2014-01-01/jobs", r.URL.String()) + + // assert headers + +} + +func TestInputService2ProtocolTestURIParameterOnlyWithNoLocationNameCase1(t *testing.T) { + sess := session.New() + svc := NewInputService2ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService2TestShapeInputService2TestCaseOperation1Input{ + PipelineId: aws.String("foo"), + } + req, _ := svc.InputService2TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + restjson.Build(req) + assert.NoError(t, req.Error) + + // assert URL + awstesting.AssertURL(t, "https://test/2014-01-01/jobsByPipeline/foo", r.URL.String()) + + // assert headers + +} + +func TestInputService3ProtocolTestURIParameterOnlyWithLocationNameCase1(t *testing.T) { + sess := session.New() + svc := NewInputService3ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService3TestShapeInputService3TestCaseOperation1Input{ + Foo: aws.String("bar"), + } + req, _ := svc.InputService3TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + restjson.Build(req) + assert.NoError(t, req.Error) + + // assert URL + awstesting.AssertURL(t, "https://test/2014-01-01/jobsByPipeline/bar", r.URL.String()) + + // assert headers + +} + +func TestInputService4ProtocolTestQuerystringListOfStringsCase1(t *testing.T) { + sess := session.New() + svc := NewInputService4ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService4TestShapeInputService4TestCaseOperation1Input{ + Items: []*string{ + aws.String("value1"), + aws.String("value2"), + }, + } + req, _ := svc.InputService4TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + restjson.Build(req) + assert.NoError(t, req.Error) + + // assert URL + awstesting.AssertURL(t, "https://test/path?item=value1&item=value2", r.URL.String()) + + // assert headers + +} + +func TestInputService5ProtocolTestStringToStringMapsInQuerystringCase1(t *testing.T) { + sess := session.New() + svc := NewInputService5ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService5TestShapeInputService5TestCaseOperation1Input{ + PipelineId: aws.String("foo"), + QueryDoc: map[string]*string{ + "bar": aws.String("baz"), + "fizz": aws.String("buzz"), + }, + } + req, _ := svc.InputService5TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + restjson.Build(req) + assert.NoError(t, req.Error) + + // assert URL + awstesting.AssertURL(t, "https://test/2014-01-01/jobsByPipeline/foo?bar=baz&fizz=buzz", r.URL.String()) + + // assert headers + +} + +func TestInputService6ProtocolTestStringToStringListMapsInQuerystringCase1(t *testing.T) { + sess := session.New() + svc := NewInputService6ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService6TestShapeInputService6TestCaseOperation1Input{ + PipelineId: aws.String("id"), + QueryDoc: map[string][]*string{ + "fizz": { + aws.String("buzz"), + aws.String("pop"), + }, + "foo": { + aws.String("bar"), + aws.String("baz"), + }, + }, + } + req, _ := svc.InputService6TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + restjson.Build(req) + assert.NoError(t, req.Error) + + // assert URL + awstesting.AssertURL(t, "https://test/2014-01-01/jobsByPipeline/id?foo=bar&foo=baz&fizz=buzz&fizz=pop", r.URL.String()) + + // assert headers + +} + +func TestInputService7ProtocolTestURIParameterAndQuerystringParamsCase1(t *testing.T) { + sess := session.New() + svc := NewInputService7ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService7TestShapeInputService7TestCaseOperation1Input{ + Ascending: aws.String("true"), + PageToken: aws.String("bar"), + PipelineId: aws.String("foo"), + } + req, _ := svc.InputService7TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + restjson.Build(req) + assert.NoError(t, req.Error) + + // assert URL + awstesting.AssertURL(t, "https://test/2014-01-01/jobsByPipeline/foo?Ascending=true&PageToken=bar", r.URL.String()) + + // assert headers + +} + +func TestInputService8ProtocolTestURIParameterQuerystringParamsAndJSONBodyCase1(t *testing.T) { + sess := session.New() + svc := NewInputService8ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService8TestShapeInputService8TestCaseOperation1Input{ + Ascending: aws.String("true"), + Config: &InputService8TestShapeStructType{ + A: aws.String("one"), + B: aws.String("two"), + }, + PageToken: aws.String("bar"), + PipelineId: aws.String("foo"), + } + req, _ := svc.InputService8TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + restjson.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertJSON(t, `{"Config":{"A":"one","B":"two"}}`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/2014-01-01/jobsByPipeline/foo?Ascending=true&PageToken=bar", r.URL.String()) + + // assert headers + +} + +func TestInputService9ProtocolTestURIParameterQuerystringParamsHeadersAndJSONBodyCase1(t *testing.T) { + sess := session.New() + svc := NewInputService9ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService9TestShapeInputService9TestCaseOperation1Input{ + Ascending: aws.String("true"), + Checksum: aws.String("12345"), + Config: &InputService9TestShapeStructType{ + A: aws.String("one"), + B: aws.String("two"), + }, + PageToken: aws.String("bar"), + PipelineId: aws.String("foo"), + } + req, _ := svc.InputService9TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + restjson.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertJSON(t, `{"Config":{"A":"one","B":"two"}}`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/2014-01-01/jobsByPipeline/foo?Ascending=true&PageToken=bar", r.URL.String()) + + // assert headers + assert.Equal(t, "12345", r.Header.Get("x-amz-checksum")) + +} + +func TestInputService10ProtocolTestStreamingPayloadCase1(t *testing.T) { + sess := session.New() + svc := NewInputService10ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService10TestShapeInputService10TestCaseOperation1Input{ + Body: aws.ReadSeekCloser(bytes.NewBufferString("contents")), + Checksum: aws.String("foo"), + VaultName: aws.String("name"), + } + req, _ := svc.InputService10TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + restjson.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + assert.Equal(t, `contents`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/2014-01-01/vaults/name/archives", r.URL.String()) + + // assert headers + assert.Equal(t, "foo", r.Header.Get("x-amz-sha256-tree-hash")) + +} + +func TestInputService11ProtocolTestSerializeBlobsInBodyCase1(t *testing.T) { + sess := session.New() + svc := NewInputService11ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService11TestShapeInputService11TestCaseOperation1Input{ + Bar: []byte("Blob param"), + Foo: aws.String("foo_name"), + } + req, _ := svc.InputService11TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + restjson.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertJSON(t, `{"Bar":"QmxvYiBwYXJhbQ=="}`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/2014-01-01/foo_name", r.URL.String()) + + // assert headers + +} + +func TestInputService12ProtocolTestBlobPayloadCase1(t *testing.T) { + sess := session.New() + svc := NewInputService12ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService12TestShapeInputShape{ + Foo: []byte("bar"), + } + req, _ := svc.InputService12TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + restjson.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + assert.Equal(t, `bar`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService12ProtocolTestBlobPayloadCase2(t *testing.T) { + sess := session.New() + svc := NewInputService12ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService12TestShapeInputShape{} + req, _ := svc.InputService12TestCaseOperation2Request(input) + r := req.HTTPRequest + + // build request + restjson.Build(req) + assert.NoError(t, req.Error) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService13ProtocolTestStructurePayloadCase1(t *testing.T) { + sess := session.New() + svc := NewInputService13ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService13TestShapeInputShape{ + Foo: &InputService13TestShapeFooShape{ + Baz: aws.String("bar"), + }, + } + req, _ := svc.InputService13TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + restjson.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertJSON(t, `{"baz":"bar"}`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService13ProtocolTestStructurePayloadCase2(t *testing.T) { + sess := session.New() + svc := NewInputService13ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService13TestShapeInputShape{} + req, _ := svc.InputService13TestCaseOperation2Request(input) + r := req.HTTPRequest + + // build request + restjson.Build(req) + assert.NoError(t, req.Error) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService14ProtocolTestOmitsNullQueryParamsButSerializesEmptyStringsCase1(t *testing.T) { + sess := session.New() + svc := NewInputService14ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService14TestShapeInputShape{} + req, _ := svc.InputService14TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + restjson.Build(req) + assert.NoError(t, req.Error) + + // assert URL + awstesting.AssertURL(t, "https://test/path", r.URL.String()) + + // assert headers + +} + +func TestInputService14ProtocolTestOmitsNullQueryParamsButSerializesEmptyStringsCase2(t *testing.T) { + sess := session.New() + svc := NewInputService14ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService14TestShapeInputShape{ + Foo: aws.String(""), + } + req, _ := svc.InputService14TestCaseOperation2Request(input) + r := req.HTTPRequest + + // build request + restjson.Build(req) + assert.NoError(t, req.Error) + + // assert URL + awstesting.AssertURL(t, "https://test/path?abc=mno¶m-name=", r.URL.String()) + + // assert headers + +} + +func TestInputService15ProtocolTestRecursiveShapesCase1(t *testing.T) { + sess := session.New() + svc := NewInputService15ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService15TestShapeInputShape{ + RecursiveStruct: &InputService15TestShapeRecursiveStructType{ + NoRecurse: aws.String("foo"), + }, + } + req, _ := svc.InputService15TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + restjson.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertJSON(t, `{"RecursiveStruct":{"NoRecurse":"foo"}}`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/path", r.URL.String()) + + // assert headers + +} + +func TestInputService15ProtocolTestRecursiveShapesCase2(t *testing.T) { + sess := session.New() + svc := NewInputService15ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService15TestShapeInputShape{ + RecursiveStruct: &InputService15TestShapeRecursiveStructType{ + RecursiveStruct: &InputService15TestShapeRecursiveStructType{ + NoRecurse: aws.String("foo"), + }, + }, + } + req, _ := svc.InputService15TestCaseOperation2Request(input) + r := req.HTTPRequest + + // build request + restjson.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertJSON(t, `{"RecursiveStruct":{"RecursiveStruct":{"NoRecurse":"foo"}}}`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/path", r.URL.String()) + + // assert headers + +} + +func TestInputService15ProtocolTestRecursiveShapesCase3(t *testing.T) { + sess := session.New() + svc := NewInputService15ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService15TestShapeInputShape{ + RecursiveStruct: &InputService15TestShapeRecursiveStructType{ + RecursiveStruct: &InputService15TestShapeRecursiveStructType{ + RecursiveStruct: &InputService15TestShapeRecursiveStructType{ + RecursiveStruct: &InputService15TestShapeRecursiveStructType{ + NoRecurse: aws.String("foo"), + }, + }, + }, + }, + } + req, _ := svc.InputService15TestCaseOperation3Request(input) + r := req.HTTPRequest + + // build request + restjson.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertJSON(t, `{"RecursiveStruct":{"RecursiveStruct":{"RecursiveStruct":{"RecursiveStruct":{"NoRecurse":"foo"}}}}}`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/path", r.URL.String()) + + // assert headers + +} + +func TestInputService15ProtocolTestRecursiveShapesCase4(t *testing.T) { + sess := session.New() + svc := NewInputService15ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService15TestShapeInputShape{ + RecursiveStruct: &InputService15TestShapeRecursiveStructType{ + RecursiveList: []*InputService15TestShapeRecursiveStructType{ + { + NoRecurse: aws.String("foo"), + }, + { + NoRecurse: aws.String("bar"), + }, + }, + }, + } + req, _ := svc.InputService15TestCaseOperation4Request(input) + r := req.HTTPRequest + + // build request + restjson.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertJSON(t, `{"RecursiveStruct":{"RecursiveList":[{"NoRecurse":"foo"},{"NoRecurse":"bar"}]}}`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/path", r.URL.String()) + + // assert headers + +} + +func TestInputService15ProtocolTestRecursiveShapesCase5(t *testing.T) { + sess := session.New() + svc := NewInputService15ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService15TestShapeInputShape{ + RecursiveStruct: &InputService15TestShapeRecursiveStructType{ + RecursiveList: []*InputService15TestShapeRecursiveStructType{ + { + NoRecurse: aws.String("foo"), + }, + { + RecursiveStruct: &InputService15TestShapeRecursiveStructType{ + NoRecurse: aws.String("bar"), + }, + }, + }, + }, + } + req, _ := svc.InputService15TestCaseOperation5Request(input) + r := req.HTTPRequest + + // build request + restjson.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertJSON(t, `{"RecursiveStruct":{"RecursiveList":[{"NoRecurse":"foo"},{"RecursiveStruct":{"NoRecurse":"bar"}}]}}`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/path", r.URL.String()) + + // assert headers + +} + +func TestInputService15ProtocolTestRecursiveShapesCase6(t *testing.T) { + sess := session.New() + svc := NewInputService15ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService15TestShapeInputShape{ + RecursiveStruct: &InputService15TestShapeRecursiveStructType{ + RecursiveMap: map[string]*InputService15TestShapeRecursiveStructType{ + "bar": { + NoRecurse: aws.String("bar"), + }, + "foo": { + NoRecurse: aws.String("foo"), + }, + }, + }, + } + req, _ := svc.InputService15TestCaseOperation6Request(input) + r := req.HTTPRequest + + // build request + restjson.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertJSON(t, `{"RecursiveStruct":{"RecursiveMap":{"foo":{"NoRecurse":"foo"},"bar":{"NoRecurse":"bar"}}}}`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/path", r.URL.String()) + + // assert headers + +} + +func TestInputService16ProtocolTestTimestampValuesCase1(t *testing.T) { + sess := session.New() + svc := NewInputService16ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService16TestShapeInputShape{ + TimeArg: aws.Time(time.Unix(1422172800, 0)), + } + req, _ := svc.InputService16TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + restjson.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertJSON(t, `{"TimeArg":1422172800}`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/path", r.URL.String()) + + // assert headers + +} + +func TestInputService16ProtocolTestTimestampValuesCase2(t *testing.T) { + sess := session.New() + svc := NewInputService16ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService16TestShapeInputShape{ + TimeArgInHeader: aws.Time(time.Unix(1422172800, 0)), + } + req, _ := svc.InputService16TestCaseOperation2Request(input) + r := req.HTTPRequest + + // build request + restjson.Build(req) + assert.NoError(t, req.Error) + + // assert URL + awstesting.AssertURL(t, "https://test/path", r.URL.String()) + + // assert headers + assert.Equal(t, "Sun, 25 Jan 2015 08:00:00 GMT", r.Header.Get("x-amz-timearg")) + +} + +func TestInputService17ProtocolTestNamedLocationsInJSONBodyCase1(t *testing.T) { + sess := session.New() + svc := NewInputService17ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService17TestShapeInputService17TestCaseOperation1Input{ + TimeArg: aws.Time(time.Unix(1422172800, 0)), + } + req, _ := svc.InputService17TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + restjson.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertJSON(t, `{"timestamp_location":1422172800}`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/path", r.URL.String()) + + // assert headers + +} + +func TestInputService18ProtocolTestStringPayloadCase1(t *testing.T) { + sess := session.New() + svc := NewInputService18ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService18TestShapeInputService18TestCaseOperation1Input{ + Foo: aws.String("bar"), + } + req, _ := svc.InputService18TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + restjson.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + assert.Equal(t, `bar`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService19ProtocolTestIdempotencyTokenAutoFillCase1(t *testing.T) { + sess := session.New() + svc := NewInputService19ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService19TestShapeInputShape{ + Token: aws.String("abc123"), + } + req, _ := svc.InputService19TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + restjson.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertJSON(t, `{"Token":"abc123"}`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/path", r.URL.String()) + + // assert headers + +} + +func TestInputService19ProtocolTestIdempotencyTokenAutoFillCase2(t *testing.T) { + sess := session.New() + svc := NewInputService19ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService19TestShapeInputShape{} + req, _ := svc.InputService19TestCaseOperation2Request(input) + r := req.HTTPRequest + + // build request + restjson.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertJSON(t, `{"Token":"00000000-0000-4000-8000-000000000000"}`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/path", r.URL.String()) + + // assert headers + +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/restjson.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/restjson.go new file mode 100644 index 000000000..2c95a9858 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/restjson.go @@ -0,0 +1,92 @@ +// Package restjson provides RESTful JSON serialisation of AWS +// requests and responses. +package restjson + +//go:generate go run ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/input/rest-json.json build_test.go +//go:generate go run ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/output/rest-json.json unmarshal_test.go + +import ( + "encoding/json" + "io/ioutil" + "strings" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" + "github.com/aws/aws-sdk-go/private/protocol/rest" +) + +// BuildHandler is a named request handler for building restjson protocol requests +var BuildHandler = request.NamedHandler{Name: "awssdk.restjson.Build", Fn: Build} + +// UnmarshalHandler is a named request handler for unmarshaling restjson protocol requests +var UnmarshalHandler = request.NamedHandler{Name: "awssdk.restjson.Unmarshal", Fn: Unmarshal} + +// UnmarshalMetaHandler is a named request handler for unmarshaling restjson protocol request metadata +var UnmarshalMetaHandler = request.NamedHandler{Name: "awssdk.restjson.UnmarshalMeta", Fn: UnmarshalMeta} + +// UnmarshalErrorHandler is a named request handler for unmarshaling restjson protocol request errors +var UnmarshalErrorHandler = request.NamedHandler{Name: "awssdk.restjson.UnmarshalError", Fn: UnmarshalError} + +// Build builds a request for the REST JSON protocol. +func Build(r *request.Request) { + rest.Build(r) + + if t := rest.PayloadType(r.Params); t == "structure" || t == "" { + jsonrpc.Build(r) + } +} + +// Unmarshal unmarshals a response body for the REST JSON protocol. +func Unmarshal(r *request.Request) { + if t := rest.PayloadType(r.Data); t == "structure" || t == "" { + jsonrpc.Unmarshal(r) + } else { + rest.Unmarshal(r) + } +} + +// UnmarshalMeta unmarshals response headers for the REST JSON protocol. +func UnmarshalMeta(r *request.Request) { + rest.UnmarshalMeta(r) +} + +// UnmarshalError unmarshals a response error for the REST JSON protocol. +func UnmarshalError(r *request.Request) { + defer r.HTTPResponse.Body.Close() + code := r.HTTPResponse.Header.Get("X-Amzn-Errortype") + bodyBytes, err := ioutil.ReadAll(r.HTTPResponse.Body) + if err != nil { + r.Error = awserr.New("SerializationError", "failed reading REST JSON error response", err) + return + } + if len(bodyBytes) == 0 { + r.Error = awserr.NewRequestFailure( + awserr.New("SerializationError", r.HTTPResponse.Status, nil), + r.HTTPResponse.StatusCode, + "", + ) + return + } + var jsonErr jsonErrorResponse + if err := json.Unmarshal(bodyBytes, &jsonErr); err != nil { + r.Error = awserr.New("SerializationError", "failed decoding REST JSON error response", err) + return + } + + if code == "" { + code = jsonErr.Code + } + + code = strings.SplitN(code, ":", 2)[0] + r.Error = awserr.NewRequestFailure( + awserr.New(code, jsonErr.Message, nil), + r.HTTPResponse.StatusCode, + r.RequestID, + ) +} + +type jsonErrorResponse struct { + Code string `json:"code"` + Message string `json:"message"` +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/unmarshal_test.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/unmarshal_test.go new file mode 100644 index 000000000..36792e863 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/unmarshal_test.go @@ -0,0 +1,1560 @@ +package restjson_test + +import ( + "bytes" + "encoding/json" + "encoding/xml" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "testing" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/awstesting" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/restjson" + "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil" + "github.com/aws/aws-sdk-go/private/util" + "github.com/stretchr/testify/assert" +) + +var _ bytes.Buffer // always import bytes +var _ http.Request +var _ json.Marshaler +var _ time.Time +var _ xmlutil.XMLNode +var _ xml.Attr +var _ = ioutil.Discard +var _ = util.Trim("") +var _ = url.Values{} +var _ = io.EOF +var _ = aws.String +var _ = fmt.Println + +func init() { + protocol.RandReader = &awstesting.ZeroReader{} +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService1ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService1ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService1ProtocolTest client from just a session. +// svc := outputservice1protocoltest.New(mySession) +// +// // Create a OutputService1ProtocolTest client with additional configuration +// svc := outputservice1protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService1ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService1ProtocolTest { + c := p.ClientConfig("outputservice1protocoltest", cfgs...) + return newOutputService1ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService1ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService1ProtocolTest { + svc := &OutputService1ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice1protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(restjson.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restjson.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restjson.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restjson.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a OutputService1ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService1ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService1TestCaseOperation1 = "OperationName" + +// OutputService1TestCaseOperation1Request generates a "aws/request.Request" representing the +// client's request for the OutputService1TestCaseOperation1 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the OutputService1TestCaseOperation1 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the OutputService1TestCaseOperation1Request method. +// req, resp := client.OutputService1TestCaseOperation1Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *OutputService1ProtocolTest) OutputService1TestCaseOperation1Request(input *OutputService1TestShapeOutputService1TestCaseOperation1Input) (req *request.Request, output *OutputService1TestShapeOutputService1TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService1TestCaseOperation1, + } + + if input == nil { + input = &OutputService1TestShapeOutputService1TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService1TestShapeOutputService1TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService1ProtocolTest) OutputService1TestCaseOperation1(input *OutputService1TestShapeOutputService1TestCaseOperation1Input) (*OutputService1TestShapeOutputService1TestCaseOperation1Output, error) { + req, out := c.OutputService1TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService1TestShapeOutputService1TestCaseOperation1Input struct { + _ struct{} `type:"structure"` +} + +type OutputService1TestShapeOutputService1TestCaseOperation1Output struct { + _ struct{} `type:"structure"` + + Char *string `type:"character"` + + Double *float64 `type:"double"` + + FalseBool *bool `type:"boolean"` + + Float *float64 `type:"float"` + + ImaHeader *string `location:"header" type:"string"` + + ImaHeaderLocation *string `location:"header" locationName:"X-Foo" type:"string"` + + Long *int64 `type:"long"` + + Num *int64 `type:"integer"` + + Status *int64 `location:"statusCode" type:"integer"` + + Str *string `type:"string"` + + TrueBool *bool `type:"boolean"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService2ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService2ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService2ProtocolTest client from just a session. +// svc := outputservice2protocoltest.New(mySession) +// +// // Create a OutputService2ProtocolTest client with additional configuration +// svc := outputservice2protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService2ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService2ProtocolTest { + c := p.ClientConfig("outputservice2protocoltest", cfgs...) + return newOutputService2ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService2ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService2ProtocolTest { + svc := &OutputService2ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice2protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(restjson.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restjson.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restjson.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restjson.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a OutputService2ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService2ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService2TestCaseOperation1 = "OperationName" + +// OutputService2TestCaseOperation1Request generates a "aws/request.Request" representing the +// client's request for the OutputService2TestCaseOperation1 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the OutputService2TestCaseOperation1 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the OutputService2TestCaseOperation1Request method. +// req, resp := client.OutputService2TestCaseOperation1Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *OutputService2ProtocolTest) OutputService2TestCaseOperation1Request(input *OutputService2TestShapeOutputService2TestCaseOperation1Input) (req *request.Request, output *OutputService2TestShapeOutputService2TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService2TestCaseOperation1, + } + + if input == nil { + input = &OutputService2TestShapeOutputService2TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService2TestShapeOutputService2TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService2ProtocolTest) OutputService2TestCaseOperation1(input *OutputService2TestShapeOutputService2TestCaseOperation1Input) (*OutputService2TestShapeOutputService2TestCaseOperation1Output, error) { + req, out := c.OutputService2TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService2TestShapeBlobContainer struct { + _ struct{} `type:"structure"` + + // Foo is automatically base64 encoded/decoded by the SDK. + Foo []byte `locationName:"foo" type:"blob"` +} + +type OutputService2TestShapeOutputService2TestCaseOperation1Input struct { + _ struct{} `type:"structure"` +} + +type OutputService2TestShapeOutputService2TestCaseOperation1Output struct { + _ struct{} `type:"structure"` + + // BlobMember is automatically base64 encoded/decoded by the SDK. + BlobMember []byte `type:"blob"` + + StructMember *OutputService2TestShapeBlobContainer `type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService3ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService3ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService3ProtocolTest client from just a session. +// svc := outputservice3protocoltest.New(mySession) +// +// // Create a OutputService3ProtocolTest client with additional configuration +// svc := outputservice3protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService3ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService3ProtocolTest { + c := p.ClientConfig("outputservice3protocoltest", cfgs...) + return newOutputService3ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService3ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService3ProtocolTest { + svc := &OutputService3ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice3protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(restjson.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restjson.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restjson.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restjson.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a OutputService3ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService3ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService3TestCaseOperation1 = "OperationName" + +// OutputService3TestCaseOperation1Request generates a "aws/request.Request" representing the +// client's request for the OutputService3TestCaseOperation1 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the OutputService3TestCaseOperation1 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the OutputService3TestCaseOperation1Request method. +// req, resp := client.OutputService3TestCaseOperation1Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *OutputService3ProtocolTest) OutputService3TestCaseOperation1Request(input *OutputService3TestShapeOutputService3TestCaseOperation1Input) (req *request.Request, output *OutputService3TestShapeOutputService3TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService3TestCaseOperation1, + } + + if input == nil { + input = &OutputService3TestShapeOutputService3TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService3TestShapeOutputService3TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService3ProtocolTest) OutputService3TestCaseOperation1(input *OutputService3TestShapeOutputService3TestCaseOperation1Input) (*OutputService3TestShapeOutputService3TestCaseOperation1Output, error) { + req, out := c.OutputService3TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService3TestShapeOutputService3TestCaseOperation1Input struct { + _ struct{} `type:"structure"` +} + +type OutputService3TestShapeOutputService3TestCaseOperation1Output struct { + _ struct{} `type:"structure"` + + StructMember *OutputService3TestShapeTimeContainer `type:"structure"` + + TimeMember *time.Time `type:"timestamp" timestampFormat:"unix"` +} + +type OutputService3TestShapeTimeContainer struct { + _ struct{} `type:"structure"` + + Foo *time.Time `locationName:"foo" type:"timestamp" timestampFormat:"unix"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService4ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService4ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService4ProtocolTest client from just a session. +// svc := outputservice4protocoltest.New(mySession) +// +// // Create a OutputService4ProtocolTest client with additional configuration +// svc := outputservice4protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService4ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService4ProtocolTest { + c := p.ClientConfig("outputservice4protocoltest", cfgs...) + return newOutputService4ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService4ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService4ProtocolTest { + svc := &OutputService4ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice4protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(restjson.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restjson.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restjson.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restjson.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a OutputService4ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService4ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService4TestCaseOperation1 = "OperationName" + +// OutputService4TestCaseOperation1Request generates a "aws/request.Request" representing the +// client's request for the OutputService4TestCaseOperation1 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the OutputService4TestCaseOperation1 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the OutputService4TestCaseOperation1Request method. +// req, resp := client.OutputService4TestCaseOperation1Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *OutputService4ProtocolTest) OutputService4TestCaseOperation1Request(input *OutputService4TestShapeOutputService4TestCaseOperation1Input) (req *request.Request, output *OutputService4TestShapeOutputService4TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService4TestCaseOperation1, + } + + if input == nil { + input = &OutputService4TestShapeOutputService4TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService4TestShapeOutputService4TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService4ProtocolTest) OutputService4TestCaseOperation1(input *OutputService4TestShapeOutputService4TestCaseOperation1Input) (*OutputService4TestShapeOutputService4TestCaseOperation1Output, error) { + req, out := c.OutputService4TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService4TestShapeOutputService4TestCaseOperation1Input struct { + _ struct{} `type:"structure"` +} + +type OutputService4TestShapeOutputService4TestCaseOperation1Output struct { + _ struct{} `type:"structure"` + + ListMember []*string `type:"list"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService5ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService5ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService5ProtocolTest client from just a session. +// svc := outputservice5protocoltest.New(mySession) +// +// // Create a OutputService5ProtocolTest client with additional configuration +// svc := outputservice5protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService5ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService5ProtocolTest { + c := p.ClientConfig("outputservice5protocoltest", cfgs...) + return newOutputService5ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService5ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService5ProtocolTest { + svc := &OutputService5ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice5protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(restjson.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restjson.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restjson.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restjson.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a OutputService5ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService5ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService5TestCaseOperation1 = "OperationName" + +// OutputService5TestCaseOperation1Request generates a "aws/request.Request" representing the +// client's request for the OutputService5TestCaseOperation1 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the OutputService5TestCaseOperation1 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the OutputService5TestCaseOperation1Request method. +// req, resp := client.OutputService5TestCaseOperation1Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *OutputService5ProtocolTest) OutputService5TestCaseOperation1Request(input *OutputService5TestShapeOutputService5TestCaseOperation1Input) (req *request.Request, output *OutputService5TestShapeOutputService5TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService5TestCaseOperation1, + } + + if input == nil { + input = &OutputService5TestShapeOutputService5TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService5TestShapeOutputService5TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService5ProtocolTest) OutputService5TestCaseOperation1(input *OutputService5TestShapeOutputService5TestCaseOperation1Input) (*OutputService5TestShapeOutputService5TestCaseOperation1Output, error) { + req, out := c.OutputService5TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService5TestShapeOutputService5TestCaseOperation1Input struct { + _ struct{} `type:"structure"` +} + +type OutputService5TestShapeOutputService5TestCaseOperation1Output struct { + _ struct{} `type:"structure"` + + ListMember []*OutputService5TestShapeSingleStruct `type:"list"` +} + +type OutputService5TestShapeSingleStruct struct { + _ struct{} `type:"structure"` + + Foo *string `type:"string"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService6ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService6ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService6ProtocolTest client from just a session. +// svc := outputservice6protocoltest.New(mySession) +// +// // Create a OutputService6ProtocolTest client with additional configuration +// svc := outputservice6protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService6ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService6ProtocolTest { + c := p.ClientConfig("outputservice6protocoltest", cfgs...) + return newOutputService6ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService6ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService6ProtocolTest { + svc := &OutputService6ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice6protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(restjson.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restjson.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restjson.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restjson.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a OutputService6ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService6ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService6TestCaseOperation1 = "OperationName" + +// OutputService6TestCaseOperation1Request generates a "aws/request.Request" representing the +// client's request for the OutputService6TestCaseOperation1 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the OutputService6TestCaseOperation1 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the OutputService6TestCaseOperation1Request method. +// req, resp := client.OutputService6TestCaseOperation1Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *OutputService6ProtocolTest) OutputService6TestCaseOperation1Request(input *OutputService6TestShapeOutputService6TestCaseOperation1Input) (req *request.Request, output *OutputService6TestShapeOutputService6TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService6TestCaseOperation1, + } + + if input == nil { + input = &OutputService6TestShapeOutputService6TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService6TestShapeOutputService6TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService6ProtocolTest) OutputService6TestCaseOperation1(input *OutputService6TestShapeOutputService6TestCaseOperation1Input) (*OutputService6TestShapeOutputService6TestCaseOperation1Output, error) { + req, out := c.OutputService6TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService6TestShapeOutputService6TestCaseOperation1Input struct { + _ struct{} `type:"structure"` +} + +type OutputService6TestShapeOutputService6TestCaseOperation1Output struct { + _ struct{} `type:"structure"` + + MapMember map[string][]*int64 `type:"map"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService7ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService7ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService7ProtocolTest client from just a session. +// svc := outputservice7protocoltest.New(mySession) +// +// // Create a OutputService7ProtocolTest client with additional configuration +// svc := outputservice7protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService7ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService7ProtocolTest { + c := p.ClientConfig("outputservice7protocoltest", cfgs...) + return newOutputService7ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService7ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService7ProtocolTest { + svc := &OutputService7ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice7protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(restjson.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restjson.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restjson.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restjson.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a OutputService7ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService7ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService7TestCaseOperation1 = "OperationName" + +// OutputService7TestCaseOperation1Request generates a "aws/request.Request" representing the +// client's request for the OutputService7TestCaseOperation1 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the OutputService7TestCaseOperation1 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the OutputService7TestCaseOperation1Request method. +// req, resp := client.OutputService7TestCaseOperation1Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *OutputService7ProtocolTest) OutputService7TestCaseOperation1Request(input *OutputService7TestShapeOutputService7TestCaseOperation1Input) (req *request.Request, output *OutputService7TestShapeOutputService7TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService7TestCaseOperation1, + } + + if input == nil { + input = &OutputService7TestShapeOutputService7TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService7TestShapeOutputService7TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService7ProtocolTest) OutputService7TestCaseOperation1(input *OutputService7TestShapeOutputService7TestCaseOperation1Input) (*OutputService7TestShapeOutputService7TestCaseOperation1Output, error) { + req, out := c.OutputService7TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService7TestShapeOutputService7TestCaseOperation1Input struct { + _ struct{} `type:"structure"` +} + +type OutputService7TestShapeOutputService7TestCaseOperation1Output struct { + _ struct{} `type:"structure"` + + MapMember map[string]*time.Time `type:"map"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService8ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService8ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService8ProtocolTest client from just a session. +// svc := outputservice8protocoltest.New(mySession) +// +// // Create a OutputService8ProtocolTest client with additional configuration +// svc := outputservice8protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService8ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService8ProtocolTest { + c := p.ClientConfig("outputservice8protocoltest", cfgs...) + return newOutputService8ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService8ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService8ProtocolTest { + svc := &OutputService8ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice8protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(restjson.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restjson.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restjson.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restjson.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a OutputService8ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService8ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService8TestCaseOperation1 = "OperationName" + +// OutputService8TestCaseOperation1Request generates a "aws/request.Request" representing the +// client's request for the OutputService8TestCaseOperation1 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the OutputService8TestCaseOperation1 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the OutputService8TestCaseOperation1Request method. +// req, resp := client.OutputService8TestCaseOperation1Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *OutputService8ProtocolTest) OutputService8TestCaseOperation1Request(input *OutputService8TestShapeOutputService8TestCaseOperation1Input) (req *request.Request, output *OutputService8TestShapeOutputService8TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService8TestCaseOperation1, + } + + if input == nil { + input = &OutputService8TestShapeOutputService8TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService8TestShapeOutputService8TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService8ProtocolTest) OutputService8TestCaseOperation1(input *OutputService8TestShapeOutputService8TestCaseOperation1Input) (*OutputService8TestShapeOutputService8TestCaseOperation1Output, error) { + req, out := c.OutputService8TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService8TestShapeOutputService8TestCaseOperation1Input struct { + _ struct{} `type:"structure"` +} + +type OutputService8TestShapeOutputService8TestCaseOperation1Output struct { + _ struct{} `type:"structure"` + + StrType *string `type:"string"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService9ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService9ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService9ProtocolTest client from just a session. +// svc := outputservice9protocoltest.New(mySession) +// +// // Create a OutputService9ProtocolTest client with additional configuration +// svc := outputservice9protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService9ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService9ProtocolTest { + c := p.ClientConfig("outputservice9protocoltest", cfgs...) + return newOutputService9ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService9ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService9ProtocolTest { + svc := &OutputService9ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice9protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(restjson.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restjson.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restjson.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restjson.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a OutputService9ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService9ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService9TestCaseOperation1 = "OperationName" + +// OutputService9TestCaseOperation1Request generates a "aws/request.Request" representing the +// client's request for the OutputService9TestCaseOperation1 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the OutputService9TestCaseOperation1 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the OutputService9TestCaseOperation1Request method. +// req, resp := client.OutputService9TestCaseOperation1Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *OutputService9ProtocolTest) OutputService9TestCaseOperation1Request(input *OutputService9TestShapeOutputService9TestCaseOperation1Input) (req *request.Request, output *OutputService9TestShapeOutputService9TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService9TestCaseOperation1, + } + + if input == nil { + input = &OutputService9TestShapeOutputService9TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService9TestShapeOutputService9TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService9ProtocolTest) OutputService9TestCaseOperation1(input *OutputService9TestShapeOutputService9TestCaseOperation1Input) (*OutputService9TestShapeOutputService9TestCaseOperation1Output, error) { + req, out := c.OutputService9TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService9TestShapeOutputService9TestCaseOperation1Input struct { + _ struct{} `type:"structure"` +} + +type OutputService9TestShapeOutputService9TestCaseOperation1Output struct { + _ struct{} `type:"structure"` + + AllHeaders map[string]*string `location:"headers" type:"map"` + + PrefixedHeaders map[string]*string `location:"headers" locationName:"X-" type:"map"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService10ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService10ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService10ProtocolTest client from just a session. +// svc := outputservice10protocoltest.New(mySession) +// +// // Create a OutputService10ProtocolTest client with additional configuration +// svc := outputservice10protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService10ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService10ProtocolTest { + c := p.ClientConfig("outputservice10protocoltest", cfgs...) + return newOutputService10ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService10ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService10ProtocolTest { + svc := &OutputService10ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice10protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(restjson.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restjson.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restjson.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restjson.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a OutputService10ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService10ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService10TestCaseOperation1 = "OperationName" + +// OutputService10TestCaseOperation1Request generates a "aws/request.Request" representing the +// client's request for the OutputService10TestCaseOperation1 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the OutputService10TestCaseOperation1 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the OutputService10TestCaseOperation1Request method. +// req, resp := client.OutputService10TestCaseOperation1Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *OutputService10ProtocolTest) OutputService10TestCaseOperation1Request(input *OutputService10TestShapeOutputService10TestCaseOperation1Input) (req *request.Request, output *OutputService10TestShapeOutputService10TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService10TestCaseOperation1, + } + + if input == nil { + input = &OutputService10TestShapeOutputService10TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService10TestShapeOutputService10TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService10ProtocolTest) OutputService10TestCaseOperation1(input *OutputService10TestShapeOutputService10TestCaseOperation1Input) (*OutputService10TestShapeOutputService10TestCaseOperation1Output, error) { + req, out := c.OutputService10TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService10TestShapeBodyStructure struct { + _ struct{} `type:"structure"` + + Foo *string `type:"string"` +} + +type OutputService10TestShapeOutputService10TestCaseOperation1Input struct { + _ struct{} `type:"structure"` +} + +type OutputService10TestShapeOutputService10TestCaseOperation1Output struct { + _ struct{} `type:"structure" payload:"Data"` + + Data *OutputService10TestShapeBodyStructure `type:"structure"` + + Header *string `location:"header" locationName:"X-Foo" type:"string"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService11ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService11ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService11ProtocolTest client from just a session. +// svc := outputservice11protocoltest.New(mySession) +// +// // Create a OutputService11ProtocolTest client with additional configuration +// svc := outputservice11protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService11ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService11ProtocolTest { + c := p.ClientConfig("outputservice11protocoltest", cfgs...) + return newOutputService11ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService11ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService11ProtocolTest { + svc := &OutputService11ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice11protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(restjson.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restjson.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restjson.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restjson.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a OutputService11ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService11ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService11TestCaseOperation1 = "OperationName" + +// OutputService11TestCaseOperation1Request generates a "aws/request.Request" representing the +// client's request for the OutputService11TestCaseOperation1 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the OutputService11TestCaseOperation1 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the OutputService11TestCaseOperation1Request method. +// req, resp := client.OutputService11TestCaseOperation1Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *OutputService11ProtocolTest) OutputService11TestCaseOperation1Request(input *OutputService11TestShapeOutputService11TestCaseOperation1Input) (req *request.Request, output *OutputService11TestShapeOutputService11TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService11TestCaseOperation1, + } + + if input == nil { + input = &OutputService11TestShapeOutputService11TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService11TestShapeOutputService11TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService11ProtocolTest) OutputService11TestCaseOperation1(input *OutputService11TestShapeOutputService11TestCaseOperation1Input) (*OutputService11TestShapeOutputService11TestCaseOperation1Output, error) { + req, out := c.OutputService11TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService11TestShapeOutputService11TestCaseOperation1Input struct { + _ struct{} `type:"structure"` +} + +type OutputService11TestShapeOutputService11TestCaseOperation1Output struct { + _ struct{} `type:"structure" payload:"Stream"` + + Stream []byte `type:"blob"` +} + +// +// Tests begin here +// + +func TestOutputService1ProtocolTestScalarMembersCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService1ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("{\"Str\": \"myname\", \"Num\": 123, \"FalseBool\": false, \"TrueBool\": true, \"Float\": 1.2, \"Double\": 1.3, \"Long\": 200, \"Char\": \"a\"}")) + req, out := svc.OutputService1TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + req.HTTPResponse.Header.Set("ImaHeader", "test") + req.HTTPResponse.Header.Set("X-Foo", "abc") + + // unmarshal response + restjson.UnmarshalMeta(req) + restjson.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "a", *out.Char) + assert.Equal(t, 1.3, *out.Double) + assert.Equal(t, false, *out.FalseBool) + assert.Equal(t, 1.2, *out.Float) + assert.Equal(t, "test", *out.ImaHeader) + assert.Equal(t, "abc", *out.ImaHeaderLocation) + assert.Equal(t, int64(200), *out.Long) + assert.Equal(t, int64(123), *out.Num) + assert.Equal(t, int64(200), *out.Status) + assert.Equal(t, "myname", *out.Str) + assert.Equal(t, true, *out.TrueBool) + +} + +func TestOutputService2ProtocolTestBlobMembersCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService2ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("{\"BlobMember\": \"aGkh\", \"StructMember\": {\"foo\": \"dGhlcmUh\"}}")) + req, out := svc.OutputService2TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + restjson.UnmarshalMeta(req) + restjson.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "hi!", string(out.BlobMember)) + assert.Equal(t, "there!", string(out.StructMember.Foo)) + +} + +func TestOutputService3ProtocolTestTimestampMembersCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService3ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("{\"TimeMember\": 1398796238, \"StructMember\": {\"foo\": 1398796238}}")) + req, out := svc.OutputService3TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + restjson.UnmarshalMeta(req) + restjson.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, time.Unix(1.398796238e+09, 0).UTC().String(), out.StructMember.Foo.String()) + assert.Equal(t, time.Unix(1.398796238e+09, 0).UTC().String(), out.TimeMember.String()) + +} + +func TestOutputService4ProtocolTestListsCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService4ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("{\"ListMember\": [\"a\", \"b\"]}")) + req, out := svc.OutputService4TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + restjson.UnmarshalMeta(req) + restjson.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "a", *out.ListMember[0]) + assert.Equal(t, "b", *out.ListMember[1]) + +} + +func TestOutputService5ProtocolTestListsWithStructureMemberCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService5ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("{\"ListMember\": [{\"Foo\": \"a\"}, {\"Foo\": \"b\"}]}")) + req, out := svc.OutputService5TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + restjson.UnmarshalMeta(req) + restjson.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "a", *out.ListMember[0].Foo) + assert.Equal(t, "b", *out.ListMember[1].Foo) + +} + +func TestOutputService6ProtocolTestMapsCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService6ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("{\"MapMember\": {\"a\": [1, 2], \"b\": [3, 4]}}")) + req, out := svc.OutputService6TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + restjson.UnmarshalMeta(req) + restjson.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, int64(1), *out.MapMember["a"][0]) + assert.Equal(t, int64(2), *out.MapMember["a"][1]) + assert.Equal(t, int64(3), *out.MapMember["b"][0]) + assert.Equal(t, int64(4), *out.MapMember["b"][1]) + +} + +func TestOutputService7ProtocolTestComplexMapValuesCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService7ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("{\"MapMember\": {\"a\": 1398796238, \"b\": 1398796238}}")) + req, out := svc.OutputService7TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + restjson.UnmarshalMeta(req) + restjson.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, time.Unix(1.398796238e+09, 0).UTC().String(), out.MapMember["a"].String()) + assert.Equal(t, time.Unix(1.398796238e+09, 0).UTC().String(), out.MapMember["b"].String()) + +} + +func TestOutputService8ProtocolTestIgnoresExtraDataCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService8ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("{\"foo\": \"bar\"}")) + req, out := svc.OutputService8TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + restjson.UnmarshalMeta(req) + restjson.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + +} + +func TestOutputService9ProtocolTestSupportsHeaderMapsCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService9ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("{}")) + req, out := svc.OutputService9TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + req.HTTPResponse.Header.Set("Content-Length", "10") + req.HTTPResponse.Header.Set("X-Bam", "boo") + req.HTTPResponse.Header.Set("X-Foo", "bar") + + // unmarshal response + restjson.UnmarshalMeta(req) + restjson.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "10", *out.AllHeaders["Content-Length"]) + assert.Equal(t, "boo", *out.AllHeaders["X-Bam"]) + assert.Equal(t, "bar", *out.AllHeaders["X-Foo"]) + assert.Equal(t, "boo", *out.PrefixedHeaders["Bam"]) + assert.Equal(t, "bar", *out.PrefixedHeaders["Foo"]) + +} + +func TestOutputService10ProtocolTestJSONPayloadCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService10ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("{\"Foo\": \"abc\"}")) + req, out := svc.OutputService10TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + req.HTTPResponse.Header.Set("X-Foo", "baz") + + // unmarshal response + restjson.UnmarshalMeta(req) + restjson.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "abc", *out.Data.Foo) + assert.Equal(t, "baz", *out.Header) + +} + +func TestOutputService11ProtocolTestStreamingPayloadCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService11ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("abc")) + req, out := svc.OutputService11TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + restjson.UnmarshalMeta(req) + restjson.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "abc", string(out.Stream)) + +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/build_bench_test.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/build_bench_test.go new file mode 100644 index 000000000..081716739 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/build_bench_test.go @@ -0,0 +1,246 @@ +// +build bench + +package restxml_test + +import ( + "testing" + + "bytes" + "encoding/xml" + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/awstesting" + "github.com/aws/aws-sdk-go/private/protocol/restxml" + "github.com/aws/aws-sdk-go/service/cloudfront" +) + +func BenchmarkRESTXMLBuild_Complex_cloudfrontCreateDistribution(b *testing.B) { + params := restxmlBuildCreateDistroParms + + op := &request.Operation{ + Name: "CreateDistribution", + HTTPMethod: "POST", + HTTPPath: "/2015-04-17/distribution/{DistributionId}/invalidation", + } + + benchRESTXMLBuild(b, op, params) +} + +func BenchmarkRESTXMLBuild_Simple_cloudfrontDeleteStreamingDistribution(b *testing.B) { + params := &cloudfront.DeleteDistributionInput{ + Id: aws.String("string"), // Required + IfMatch: aws.String("string"), + } + op := &request.Operation{ + Name: "DeleteStreamingDistribution", + HTTPMethod: "DELETE", + HTTPPath: "/2015-04-17/streaming-distribution/{Id}", + } + benchRESTXMLBuild(b, op, params) +} + +func BenchmarkEncodingXMLMarshal_Simple_cloudfrontDeleteStreamingDistribution(b *testing.B) { + params := &cloudfront.DeleteDistributionInput{ + Id: aws.String("string"), // Required + IfMatch: aws.String("string"), + } + + for i := 0; i < b.N; i++ { + buf := &bytes.Buffer{} + encoder := xml.NewEncoder(buf) + if err := encoder.Encode(params); err != nil { + b.Fatal("Unexpected error", err) + } + } +} + +func benchRESTXMLBuild(b *testing.B, op *request.Operation, params interface{}) { + svc := awstesting.NewClient() + svc.ServiceName = "cloudfront" + svc.APIVersion = "2015-04-17" + + for i := 0; i < b.N; i++ { + r := svc.NewRequest(op, params, nil) + restxml.Build(r) + if r.Error != nil { + b.Fatal("Unexpected error", r.Error) + } + } +} + +var restxmlBuildCreateDistroParms = &cloudfront.CreateDistributionInput{ + DistributionConfig: &cloudfront.DistributionConfig{ // Required + CallerReference: aws.String("string"), // Required + Comment: aws.String("string"), // Required + DefaultCacheBehavior: &cloudfront.DefaultCacheBehavior{ // Required + ForwardedValues: &cloudfront.ForwardedValues{ // Required + Cookies: &cloudfront.CookiePreference{ // Required + Forward: aws.String("ItemSelection"), // Required + WhitelistedNames: &cloudfront.CookieNames{ + Quantity: aws.Int64(1), // Required + Items: []*string{ + aws.String("string"), // Required + // More values... + }, + }, + }, + QueryString: aws.Bool(true), // Required + Headers: &cloudfront.Headers{ + Quantity: aws.Int64(1), // Required + Items: []*string{ + aws.String("string"), // Required + // More values... + }, + }, + }, + MinTTL: aws.Int64(1), // Required + TargetOriginId: aws.String("string"), // Required + TrustedSigners: &cloudfront.TrustedSigners{ // Required + Enabled: aws.Bool(true), // Required + Quantity: aws.Int64(1), // Required + Items: []*string{ + aws.String("string"), // Required + // More values... + }, + }, + ViewerProtocolPolicy: aws.String("ViewerProtocolPolicy"), // Required + AllowedMethods: &cloudfront.AllowedMethods{ + Items: []*string{ // Required + aws.String("Method"), // Required + // More values... + }, + Quantity: aws.Int64(1), // Required + CachedMethods: &cloudfront.CachedMethods{ + Items: []*string{ // Required + aws.String("Method"), // Required + // More values... + }, + Quantity: aws.Int64(1), // Required + }, + }, + DefaultTTL: aws.Int64(1), + MaxTTL: aws.Int64(1), + SmoothStreaming: aws.Bool(true), + }, + Enabled: aws.Bool(true), // Required + Origins: &cloudfront.Origins{ // Required + Quantity: aws.Int64(1), // Required + Items: []*cloudfront.Origin{ + { // Required + DomainName: aws.String("string"), // Required + Id: aws.String("string"), // Required + CustomOriginConfig: &cloudfront.CustomOriginConfig{ + HTTPPort: aws.Int64(1), // Required + HTTPSPort: aws.Int64(1), // Required + OriginProtocolPolicy: aws.String("OriginProtocolPolicy"), // Required + }, + OriginPath: aws.String("string"), + S3OriginConfig: &cloudfront.S3OriginConfig{ + OriginAccessIdentity: aws.String("string"), // Required + }, + }, + // More values... + }, + }, + Aliases: &cloudfront.Aliases{ + Quantity: aws.Int64(1), // Required + Items: []*string{ + aws.String("string"), // Required + // More values... + }, + }, + CacheBehaviors: &cloudfront.CacheBehaviors{ + Quantity: aws.Int64(1), // Required + Items: []*cloudfront.CacheBehavior{ + { // Required + ForwardedValues: &cloudfront.ForwardedValues{ // Required + Cookies: &cloudfront.CookiePreference{ // Required + Forward: aws.String("ItemSelection"), // Required + WhitelistedNames: &cloudfront.CookieNames{ + Quantity: aws.Int64(1), // Required + Items: []*string{ + aws.String("string"), // Required + // More values... + }, + }, + }, + QueryString: aws.Bool(true), // Required + Headers: &cloudfront.Headers{ + Quantity: aws.Int64(1), // Required + Items: []*string{ + aws.String("string"), // Required + // More values... + }, + }, + }, + MinTTL: aws.Int64(1), // Required + PathPattern: aws.String("string"), // Required + TargetOriginId: aws.String("string"), // Required + TrustedSigners: &cloudfront.TrustedSigners{ // Required + Enabled: aws.Bool(true), // Required + Quantity: aws.Int64(1), // Required + Items: []*string{ + aws.String("string"), // Required + // More values... + }, + }, + ViewerProtocolPolicy: aws.String("ViewerProtocolPolicy"), // Required + AllowedMethods: &cloudfront.AllowedMethods{ + Items: []*string{ // Required + aws.String("Method"), // Required + // More values... + }, + Quantity: aws.Int64(1), // Required + CachedMethods: &cloudfront.CachedMethods{ + Items: []*string{ // Required + aws.String("Method"), // Required + // More values... + }, + Quantity: aws.Int64(1), // Required + }, + }, + DefaultTTL: aws.Int64(1), + MaxTTL: aws.Int64(1), + SmoothStreaming: aws.Bool(true), + }, + // More values... + }, + }, + CustomErrorResponses: &cloudfront.CustomErrorResponses{ + Quantity: aws.Int64(1), // Required + Items: []*cloudfront.CustomErrorResponse{ + { // Required + ErrorCode: aws.Int64(1), // Required + ErrorCachingMinTTL: aws.Int64(1), + ResponseCode: aws.String("string"), + ResponsePagePath: aws.String("string"), + }, + // More values... + }, + }, + DefaultRootObject: aws.String("string"), + Logging: &cloudfront.LoggingConfig{ + Bucket: aws.String("string"), // Required + Enabled: aws.Bool(true), // Required + IncludeCookies: aws.Bool(true), // Required + Prefix: aws.String("string"), // Required + }, + PriceClass: aws.String("PriceClass"), + Restrictions: &cloudfront.Restrictions{ + GeoRestriction: &cloudfront.GeoRestriction{ // Required + Quantity: aws.Int64(1), // Required + RestrictionType: aws.String("GeoRestrictionType"), // Required + Items: []*string{ + aws.String("string"), // Required + // More values... + }, + }, + }, + ViewerCertificate: &cloudfront.ViewerCertificate{ + CloudFrontDefaultCertificate: aws.Bool(true), + IAMCertificateId: aws.String("string"), + MinimumProtocolVersion: aws.String("MinimumProtocolVersion"), + SSLSupportMethod: aws.String("SSLSupportMethod"), + }, + }, +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/build_test.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/build_test.go new file mode 100644 index 000000000..947487de7 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/build_test.go @@ -0,0 +1,4443 @@ +package restxml_test + +import ( + "bytes" + "encoding/json" + "encoding/xml" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "testing" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/awstesting" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/restxml" + "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil" + "github.com/aws/aws-sdk-go/private/util" + "github.com/stretchr/testify/assert" +) + +var _ bytes.Buffer // always import bytes +var _ http.Request +var _ json.Marshaler +var _ time.Time +var _ xmlutil.XMLNode +var _ xml.Attr +var _ = ioutil.Discard +var _ = util.Trim("") +var _ = url.Values{} +var _ = io.EOF +var _ = aws.String +var _ = fmt.Println + +func init() { + protocol.RandReader = &awstesting.ZeroReader{} +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService1ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService1ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService1ProtocolTest client from just a session. +// svc := inputservice1protocoltest.New(mySession) +// +// // Create a InputService1ProtocolTest client with additional configuration +// svc := inputservice1protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService1ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService1ProtocolTest { + c := p.ClientConfig("inputservice1protocoltest", cfgs...) + return newInputService1ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService1ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService1ProtocolTest { + svc := &InputService1ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice1protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(restxml.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restxml.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restxml.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restxml.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a InputService1ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService1ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService1TestCaseOperation1 = "OperationName" + +// InputService1TestCaseOperation1Request generates a "aws/request.Request" representing the +// client's request for the InputService1TestCaseOperation1 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the InputService1TestCaseOperation1 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the InputService1TestCaseOperation1Request method. +// req, resp := client.InputService1TestCaseOperation1Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *InputService1ProtocolTest) InputService1TestCaseOperation1Request(input *InputService1TestShapeInputShape) (req *request.Request, output *InputService1TestShapeInputService1TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService1TestCaseOperation1, + HTTPMethod: "POST", + HTTPPath: "/2014-01-01/hostedzone", + } + + if input == nil { + input = &InputService1TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService1TestShapeInputService1TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService1ProtocolTest) InputService1TestCaseOperation1(input *InputService1TestShapeInputShape) (*InputService1TestShapeInputService1TestCaseOperation1Output, error) { + req, out := c.InputService1TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +const opInputService1TestCaseOperation2 = "OperationName" + +// InputService1TestCaseOperation2Request generates a "aws/request.Request" representing the +// client's request for the InputService1TestCaseOperation2 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the InputService1TestCaseOperation2 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the InputService1TestCaseOperation2Request method. +// req, resp := client.InputService1TestCaseOperation2Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *InputService1ProtocolTest) InputService1TestCaseOperation2Request(input *InputService1TestShapeInputShape) (req *request.Request, output *InputService1TestShapeInputService1TestCaseOperation2Output) { + op := &request.Operation{ + Name: opInputService1TestCaseOperation2, + HTTPMethod: "PUT", + HTTPPath: "/2014-01-01/hostedzone", + } + + if input == nil { + input = &InputService1TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService1TestShapeInputService1TestCaseOperation2Output{} + req.Data = output + return +} + +func (c *InputService1ProtocolTest) InputService1TestCaseOperation2(input *InputService1TestShapeInputShape) (*InputService1TestShapeInputService1TestCaseOperation2Output, error) { + req, out := c.InputService1TestCaseOperation2Request(input) + err := req.Send() + return out, err +} + +const opInputService1TestCaseOperation3 = "OperationName" + +// InputService1TestCaseOperation3Request generates a "aws/request.Request" representing the +// client's request for the InputService1TestCaseOperation3 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the InputService1TestCaseOperation3 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the InputService1TestCaseOperation3Request method. +// req, resp := client.InputService1TestCaseOperation3Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *InputService1ProtocolTest) InputService1TestCaseOperation3Request(input *InputService1TestShapeInputService1TestCaseOperation3Input) (req *request.Request, output *InputService1TestShapeInputService1TestCaseOperation3Output) { + op := &request.Operation{ + Name: opInputService1TestCaseOperation3, + HTTPMethod: "GET", + HTTPPath: "/2014-01-01/hostedzone", + } + + if input == nil { + input = &InputService1TestShapeInputService1TestCaseOperation3Input{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService1TestShapeInputService1TestCaseOperation3Output{} + req.Data = output + return +} + +func (c *InputService1ProtocolTest) InputService1TestCaseOperation3(input *InputService1TestShapeInputService1TestCaseOperation3Input) (*InputService1TestShapeInputService1TestCaseOperation3Output, error) { + req, out := c.InputService1TestCaseOperation3Request(input) + err := req.Send() + return out, err +} + +type InputService1TestShapeInputService1TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +type InputService1TestShapeInputService1TestCaseOperation2Output struct { + _ struct{} `type:"structure"` +} + +type InputService1TestShapeInputService1TestCaseOperation3Input struct { + _ struct{} `type:"structure"` +} + +type InputService1TestShapeInputService1TestCaseOperation3Output struct { + _ struct{} `type:"structure"` +} + +type InputService1TestShapeInputShape struct { + _ struct{} `locationName:"OperationRequest" type:"structure" xmlURI:"https://foo/"` + + Description *string `type:"string"` + + Name *string `type:"string"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService2ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService2ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService2ProtocolTest client from just a session. +// svc := inputservice2protocoltest.New(mySession) +// +// // Create a InputService2ProtocolTest client with additional configuration +// svc := inputservice2protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService2ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService2ProtocolTest { + c := p.ClientConfig("inputservice2protocoltest", cfgs...) + return newInputService2ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService2ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService2ProtocolTest { + svc := &InputService2ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice2protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(restxml.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restxml.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restxml.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restxml.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a InputService2ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService2ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService2TestCaseOperation1 = "OperationName" + +// InputService2TestCaseOperation1Request generates a "aws/request.Request" representing the +// client's request for the InputService2TestCaseOperation1 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the InputService2TestCaseOperation1 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the InputService2TestCaseOperation1Request method. +// req, resp := client.InputService2TestCaseOperation1Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *InputService2ProtocolTest) InputService2TestCaseOperation1Request(input *InputService2TestShapeInputService2TestCaseOperation1Input) (req *request.Request, output *InputService2TestShapeInputService2TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService2TestCaseOperation1, + HTTPMethod: "POST", + HTTPPath: "/2014-01-01/hostedzone", + } + + if input == nil { + input = &InputService2TestShapeInputService2TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService2TestShapeInputService2TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService2ProtocolTest) InputService2TestCaseOperation1(input *InputService2TestShapeInputService2TestCaseOperation1Input) (*InputService2TestShapeInputService2TestCaseOperation1Output, error) { + req, out := c.InputService2TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type InputService2TestShapeInputService2TestCaseOperation1Input struct { + _ struct{} `locationName:"OperationRequest" type:"structure" xmlURI:"https://foo/"` + + First *bool `type:"boolean"` + + Fourth *int64 `type:"integer"` + + Second *bool `type:"boolean"` + + Third *float64 `type:"float"` +} + +type InputService2TestShapeInputService2TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService3ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService3ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService3ProtocolTest client from just a session. +// svc := inputservice3protocoltest.New(mySession) +// +// // Create a InputService3ProtocolTest client with additional configuration +// svc := inputservice3protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService3ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService3ProtocolTest { + c := p.ClientConfig("inputservice3protocoltest", cfgs...) + return newInputService3ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService3ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService3ProtocolTest { + svc := &InputService3ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice3protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(restxml.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restxml.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restxml.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restxml.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a InputService3ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService3ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService3TestCaseOperation1 = "OperationName" + +// InputService3TestCaseOperation1Request generates a "aws/request.Request" representing the +// client's request for the InputService3TestCaseOperation1 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the InputService3TestCaseOperation1 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the InputService3TestCaseOperation1Request method. +// req, resp := client.InputService3TestCaseOperation1Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *InputService3ProtocolTest) InputService3TestCaseOperation1Request(input *InputService3TestShapeInputShape) (req *request.Request, output *InputService3TestShapeInputService3TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService3TestCaseOperation1, + HTTPMethod: "POST", + HTTPPath: "/2014-01-01/hostedzone", + } + + if input == nil { + input = &InputService3TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService3TestShapeInputService3TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService3ProtocolTest) InputService3TestCaseOperation1(input *InputService3TestShapeInputShape) (*InputService3TestShapeInputService3TestCaseOperation1Output, error) { + req, out := c.InputService3TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +const opInputService3TestCaseOperation2 = "OperationName" + +// InputService3TestCaseOperation2Request generates a "aws/request.Request" representing the +// client's request for the InputService3TestCaseOperation2 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the InputService3TestCaseOperation2 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the InputService3TestCaseOperation2Request method. +// req, resp := client.InputService3TestCaseOperation2Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *InputService3ProtocolTest) InputService3TestCaseOperation2Request(input *InputService3TestShapeInputShape) (req *request.Request, output *InputService3TestShapeInputService3TestCaseOperation2Output) { + op := &request.Operation{ + Name: opInputService3TestCaseOperation2, + HTTPMethod: "POST", + HTTPPath: "/2014-01-01/hostedzone", + } + + if input == nil { + input = &InputService3TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService3TestShapeInputService3TestCaseOperation2Output{} + req.Data = output + return +} + +func (c *InputService3ProtocolTest) InputService3TestCaseOperation2(input *InputService3TestShapeInputShape) (*InputService3TestShapeInputService3TestCaseOperation2Output, error) { + req, out := c.InputService3TestCaseOperation2Request(input) + err := req.Send() + return out, err +} + +type InputService3TestShapeInputService3TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +type InputService3TestShapeInputService3TestCaseOperation2Output struct { + _ struct{} `type:"structure"` +} + +type InputService3TestShapeInputShape struct { + _ struct{} `locationName:"OperationRequest" type:"structure" xmlURI:"https://foo/"` + + Description *string `type:"string"` + + SubStructure *InputService3TestShapeSubStructure `type:"structure"` +} + +type InputService3TestShapeSubStructure struct { + _ struct{} `type:"structure"` + + Bar *string `type:"string"` + + Foo *string `type:"string"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService4ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService4ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService4ProtocolTest client from just a session. +// svc := inputservice4protocoltest.New(mySession) +// +// // Create a InputService4ProtocolTest client with additional configuration +// svc := inputservice4protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService4ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService4ProtocolTest { + c := p.ClientConfig("inputservice4protocoltest", cfgs...) + return newInputService4ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService4ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService4ProtocolTest { + svc := &InputService4ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice4protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(restxml.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restxml.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restxml.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restxml.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a InputService4ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService4ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService4TestCaseOperation1 = "OperationName" + +// InputService4TestCaseOperation1Request generates a "aws/request.Request" representing the +// client's request for the InputService4TestCaseOperation1 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the InputService4TestCaseOperation1 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the InputService4TestCaseOperation1Request method. +// req, resp := client.InputService4TestCaseOperation1Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *InputService4ProtocolTest) InputService4TestCaseOperation1Request(input *InputService4TestShapeInputService4TestCaseOperation1Input) (req *request.Request, output *InputService4TestShapeInputService4TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService4TestCaseOperation1, + HTTPMethod: "POST", + HTTPPath: "/2014-01-01/hostedzone", + } + + if input == nil { + input = &InputService4TestShapeInputService4TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService4TestShapeInputService4TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService4ProtocolTest) InputService4TestCaseOperation1(input *InputService4TestShapeInputService4TestCaseOperation1Input) (*InputService4TestShapeInputService4TestCaseOperation1Output, error) { + req, out := c.InputService4TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type InputService4TestShapeInputService4TestCaseOperation1Input struct { + _ struct{} `locationName:"OperationRequest" type:"structure" xmlURI:"https://foo/"` + + Description *string `type:"string"` + + SubStructure *InputService4TestShapeSubStructure `type:"structure"` +} + +type InputService4TestShapeInputService4TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +type InputService4TestShapeSubStructure struct { + _ struct{} `type:"structure"` + + Bar *string `type:"string"` + + Foo *string `type:"string"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService5ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService5ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService5ProtocolTest client from just a session. +// svc := inputservice5protocoltest.New(mySession) +// +// // Create a InputService5ProtocolTest client with additional configuration +// svc := inputservice5protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService5ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService5ProtocolTest { + c := p.ClientConfig("inputservice5protocoltest", cfgs...) + return newInputService5ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService5ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService5ProtocolTest { + svc := &InputService5ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice5protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(restxml.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restxml.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restxml.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restxml.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a InputService5ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService5ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService5TestCaseOperation1 = "OperationName" + +// InputService5TestCaseOperation1Request generates a "aws/request.Request" representing the +// client's request for the InputService5TestCaseOperation1 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the InputService5TestCaseOperation1 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the InputService5TestCaseOperation1Request method. +// req, resp := client.InputService5TestCaseOperation1Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *InputService5ProtocolTest) InputService5TestCaseOperation1Request(input *InputService5TestShapeInputService5TestCaseOperation1Input) (req *request.Request, output *InputService5TestShapeInputService5TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService5TestCaseOperation1, + HTTPMethod: "POST", + HTTPPath: "/2014-01-01/hostedzone", + } + + if input == nil { + input = &InputService5TestShapeInputService5TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService5TestShapeInputService5TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService5ProtocolTest) InputService5TestCaseOperation1(input *InputService5TestShapeInputService5TestCaseOperation1Input) (*InputService5TestShapeInputService5TestCaseOperation1Output, error) { + req, out := c.InputService5TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type InputService5TestShapeInputService5TestCaseOperation1Input struct { + _ struct{} `locationName:"OperationRequest" type:"structure" xmlURI:"https://foo/"` + + ListParam []*string `type:"list"` +} + +type InputService5TestShapeInputService5TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService6ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService6ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService6ProtocolTest client from just a session. +// svc := inputservice6protocoltest.New(mySession) +// +// // Create a InputService6ProtocolTest client with additional configuration +// svc := inputservice6protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService6ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService6ProtocolTest { + c := p.ClientConfig("inputservice6protocoltest", cfgs...) + return newInputService6ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService6ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService6ProtocolTest { + svc := &InputService6ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice6protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(restxml.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restxml.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restxml.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restxml.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a InputService6ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService6ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService6TestCaseOperation1 = "OperationName" + +// InputService6TestCaseOperation1Request generates a "aws/request.Request" representing the +// client's request for the InputService6TestCaseOperation1 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the InputService6TestCaseOperation1 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the InputService6TestCaseOperation1Request method. +// req, resp := client.InputService6TestCaseOperation1Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *InputService6ProtocolTest) InputService6TestCaseOperation1Request(input *InputService6TestShapeInputService6TestCaseOperation1Input) (req *request.Request, output *InputService6TestShapeInputService6TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService6TestCaseOperation1, + HTTPMethod: "POST", + HTTPPath: "/2014-01-01/hostedzone", + } + + if input == nil { + input = &InputService6TestShapeInputService6TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService6TestShapeInputService6TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService6ProtocolTest) InputService6TestCaseOperation1(input *InputService6TestShapeInputService6TestCaseOperation1Input) (*InputService6TestShapeInputService6TestCaseOperation1Output, error) { + req, out := c.InputService6TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type InputService6TestShapeInputService6TestCaseOperation1Input struct { + _ struct{} `locationName:"OperationRequest" type:"structure" xmlURI:"https://foo/"` + + ListParam []*string `locationName:"AlternateName" locationNameList:"NotMember" type:"list"` +} + +type InputService6TestShapeInputService6TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService7ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService7ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService7ProtocolTest client from just a session. +// svc := inputservice7protocoltest.New(mySession) +// +// // Create a InputService7ProtocolTest client with additional configuration +// svc := inputservice7protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService7ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService7ProtocolTest { + c := p.ClientConfig("inputservice7protocoltest", cfgs...) + return newInputService7ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService7ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService7ProtocolTest { + svc := &InputService7ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice7protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(restxml.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restxml.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restxml.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restxml.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a InputService7ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService7ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService7TestCaseOperation1 = "OperationName" + +// InputService7TestCaseOperation1Request generates a "aws/request.Request" representing the +// client's request for the InputService7TestCaseOperation1 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the InputService7TestCaseOperation1 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the InputService7TestCaseOperation1Request method. +// req, resp := client.InputService7TestCaseOperation1Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *InputService7ProtocolTest) InputService7TestCaseOperation1Request(input *InputService7TestShapeInputService7TestCaseOperation1Input) (req *request.Request, output *InputService7TestShapeInputService7TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService7TestCaseOperation1, + HTTPMethod: "POST", + HTTPPath: "/2014-01-01/hostedzone", + } + + if input == nil { + input = &InputService7TestShapeInputService7TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService7TestShapeInputService7TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService7ProtocolTest) InputService7TestCaseOperation1(input *InputService7TestShapeInputService7TestCaseOperation1Input) (*InputService7TestShapeInputService7TestCaseOperation1Output, error) { + req, out := c.InputService7TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type InputService7TestShapeInputService7TestCaseOperation1Input struct { + _ struct{} `locationName:"OperationRequest" type:"structure" xmlURI:"https://foo/"` + + ListParam []*string `type:"list" flattened:"true"` +} + +type InputService7TestShapeInputService7TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService8ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService8ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService8ProtocolTest client from just a session. +// svc := inputservice8protocoltest.New(mySession) +// +// // Create a InputService8ProtocolTest client with additional configuration +// svc := inputservice8protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService8ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService8ProtocolTest { + c := p.ClientConfig("inputservice8protocoltest", cfgs...) + return newInputService8ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService8ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService8ProtocolTest { + svc := &InputService8ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice8protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(restxml.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restxml.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restxml.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restxml.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a InputService8ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService8ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService8TestCaseOperation1 = "OperationName" + +// InputService8TestCaseOperation1Request generates a "aws/request.Request" representing the +// client's request for the InputService8TestCaseOperation1 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the InputService8TestCaseOperation1 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the InputService8TestCaseOperation1Request method. +// req, resp := client.InputService8TestCaseOperation1Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *InputService8ProtocolTest) InputService8TestCaseOperation1Request(input *InputService8TestShapeInputService8TestCaseOperation1Input) (req *request.Request, output *InputService8TestShapeInputService8TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService8TestCaseOperation1, + HTTPMethod: "POST", + HTTPPath: "/2014-01-01/hostedzone", + } + + if input == nil { + input = &InputService8TestShapeInputService8TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService8TestShapeInputService8TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService8ProtocolTest) InputService8TestCaseOperation1(input *InputService8TestShapeInputService8TestCaseOperation1Input) (*InputService8TestShapeInputService8TestCaseOperation1Output, error) { + req, out := c.InputService8TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type InputService8TestShapeInputService8TestCaseOperation1Input struct { + _ struct{} `locationName:"OperationRequest" type:"structure" xmlURI:"https://foo/"` + + ListParam []*string `locationName:"item" type:"list" flattened:"true"` +} + +type InputService8TestShapeInputService8TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService9ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService9ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService9ProtocolTest client from just a session. +// svc := inputservice9protocoltest.New(mySession) +// +// // Create a InputService9ProtocolTest client with additional configuration +// svc := inputservice9protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService9ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService9ProtocolTest { + c := p.ClientConfig("inputservice9protocoltest", cfgs...) + return newInputService9ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService9ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService9ProtocolTest { + svc := &InputService9ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice9protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(restxml.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restxml.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restxml.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restxml.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a InputService9ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService9ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService9TestCaseOperation1 = "OperationName" + +// InputService9TestCaseOperation1Request generates a "aws/request.Request" representing the +// client's request for the InputService9TestCaseOperation1 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the InputService9TestCaseOperation1 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the InputService9TestCaseOperation1Request method. +// req, resp := client.InputService9TestCaseOperation1Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *InputService9ProtocolTest) InputService9TestCaseOperation1Request(input *InputService9TestShapeInputService9TestCaseOperation1Input) (req *request.Request, output *InputService9TestShapeInputService9TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService9TestCaseOperation1, + HTTPMethod: "POST", + HTTPPath: "/2014-01-01/hostedzone", + } + + if input == nil { + input = &InputService9TestShapeInputService9TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService9TestShapeInputService9TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService9ProtocolTest) InputService9TestCaseOperation1(input *InputService9TestShapeInputService9TestCaseOperation1Input) (*InputService9TestShapeInputService9TestCaseOperation1Output, error) { + req, out := c.InputService9TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type InputService9TestShapeInputService9TestCaseOperation1Input struct { + _ struct{} `locationName:"OperationRequest" type:"structure" xmlURI:"https://foo/"` + + ListParam []*InputService9TestShapeSingleFieldStruct `locationName:"item" type:"list" flattened:"true"` +} + +type InputService9TestShapeInputService9TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +type InputService9TestShapeSingleFieldStruct struct { + _ struct{} `type:"structure"` + + Element *string `locationName:"value" type:"string"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService10ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService10ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService10ProtocolTest client from just a session. +// svc := inputservice10protocoltest.New(mySession) +// +// // Create a InputService10ProtocolTest client with additional configuration +// svc := inputservice10protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService10ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService10ProtocolTest { + c := p.ClientConfig("inputservice10protocoltest", cfgs...) + return newInputService10ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService10ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService10ProtocolTest { + svc := &InputService10ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice10protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(restxml.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restxml.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restxml.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restxml.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a InputService10ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService10ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService10TestCaseOperation1 = "OperationName" + +// InputService10TestCaseOperation1Request generates a "aws/request.Request" representing the +// client's request for the InputService10TestCaseOperation1 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the InputService10TestCaseOperation1 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the InputService10TestCaseOperation1Request method. +// req, resp := client.InputService10TestCaseOperation1Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *InputService10ProtocolTest) InputService10TestCaseOperation1Request(input *InputService10TestShapeInputService10TestCaseOperation1Input) (req *request.Request, output *InputService10TestShapeInputService10TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService10TestCaseOperation1, + HTTPMethod: "POST", + HTTPPath: "/2014-01-01/hostedzone", + } + + if input == nil { + input = &InputService10TestShapeInputService10TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService10TestShapeInputService10TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService10ProtocolTest) InputService10TestCaseOperation1(input *InputService10TestShapeInputService10TestCaseOperation1Input) (*InputService10TestShapeInputService10TestCaseOperation1Output, error) { + req, out := c.InputService10TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type InputService10TestShapeInputService10TestCaseOperation1Input struct { + _ struct{} `locationName:"OperationRequest" type:"structure" xmlURI:"https://foo/"` + + StructureParam *InputService10TestShapeStructureShape `type:"structure"` +} + +type InputService10TestShapeInputService10TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +type InputService10TestShapeStructureShape struct { + _ struct{} `type:"structure"` + + // B is automatically base64 encoded/decoded by the SDK. + B []byte `locationName:"b" type:"blob"` + + T *time.Time `locationName:"t" type:"timestamp" timestampFormat:"iso8601"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService11ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService11ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService11ProtocolTest client from just a session. +// svc := inputservice11protocoltest.New(mySession) +// +// // Create a InputService11ProtocolTest client with additional configuration +// svc := inputservice11protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService11ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService11ProtocolTest { + c := p.ClientConfig("inputservice11protocoltest", cfgs...) + return newInputService11ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService11ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService11ProtocolTest { + svc := &InputService11ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice11protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(restxml.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restxml.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restxml.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restxml.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a InputService11ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService11ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService11TestCaseOperation1 = "OperationName" + +// InputService11TestCaseOperation1Request generates a "aws/request.Request" representing the +// client's request for the InputService11TestCaseOperation1 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the InputService11TestCaseOperation1 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the InputService11TestCaseOperation1Request method. +// req, resp := client.InputService11TestCaseOperation1Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *InputService11ProtocolTest) InputService11TestCaseOperation1Request(input *InputService11TestShapeInputService11TestCaseOperation1Input) (req *request.Request, output *InputService11TestShapeInputService11TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService11TestCaseOperation1, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &InputService11TestShapeInputService11TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService11TestShapeInputService11TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService11ProtocolTest) InputService11TestCaseOperation1(input *InputService11TestShapeInputService11TestCaseOperation1Input) (*InputService11TestShapeInputService11TestCaseOperation1Output, error) { + req, out := c.InputService11TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type InputService11TestShapeInputService11TestCaseOperation1Input struct { + _ struct{} `locationName:"OperationRequest" type:"structure" xmlURI:"https://foo/"` + + Foo map[string]*string `location:"headers" locationName:"x-foo-" type:"map"` +} + +type InputService11TestShapeInputService11TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService12ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService12ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService12ProtocolTest client from just a session. +// svc := inputservice12protocoltest.New(mySession) +// +// // Create a InputService12ProtocolTest client with additional configuration +// svc := inputservice12protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService12ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService12ProtocolTest { + c := p.ClientConfig("inputservice12protocoltest", cfgs...) + return newInputService12ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService12ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService12ProtocolTest { + svc := &InputService12ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice12protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(restxml.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restxml.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restxml.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restxml.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a InputService12ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService12ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService12TestCaseOperation1 = "OperationName" + +// InputService12TestCaseOperation1Request generates a "aws/request.Request" representing the +// client's request for the InputService12TestCaseOperation1 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the InputService12TestCaseOperation1 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the InputService12TestCaseOperation1Request method. +// req, resp := client.InputService12TestCaseOperation1Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *InputService12ProtocolTest) InputService12TestCaseOperation1Request(input *InputService12TestShapeInputService12TestCaseOperation1Input) (req *request.Request, output *InputService12TestShapeInputService12TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService12TestCaseOperation1, + HTTPMethod: "GET", + HTTPPath: "/path", + } + + if input == nil { + input = &InputService12TestShapeInputService12TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService12TestShapeInputService12TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService12ProtocolTest) InputService12TestCaseOperation1(input *InputService12TestShapeInputService12TestCaseOperation1Input) (*InputService12TestShapeInputService12TestCaseOperation1Output, error) { + req, out := c.InputService12TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type InputService12TestShapeInputService12TestCaseOperation1Input struct { + _ struct{} `type:"structure"` + + Items []*string `location:"querystring" locationName:"item" type:"list"` +} + +type InputService12TestShapeInputService12TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService13ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService13ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService13ProtocolTest client from just a session. +// svc := inputservice13protocoltest.New(mySession) +// +// // Create a InputService13ProtocolTest client with additional configuration +// svc := inputservice13protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService13ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService13ProtocolTest { + c := p.ClientConfig("inputservice13protocoltest", cfgs...) + return newInputService13ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService13ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService13ProtocolTest { + svc := &InputService13ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice13protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(restxml.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restxml.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restxml.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restxml.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a InputService13ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService13ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService13TestCaseOperation1 = "OperationName" + +// InputService13TestCaseOperation1Request generates a "aws/request.Request" representing the +// client's request for the InputService13TestCaseOperation1 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the InputService13TestCaseOperation1 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the InputService13TestCaseOperation1Request method. +// req, resp := client.InputService13TestCaseOperation1Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *InputService13ProtocolTest) InputService13TestCaseOperation1Request(input *InputService13TestShapeInputService13TestCaseOperation1Input) (req *request.Request, output *InputService13TestShapeInputService13TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService13TestCaseOperation1, + HTTPMethod: "GET", + HTTPPath: "/2014-01-01/jobsByPipeline/{PipelineId}", + } + + if input == nil { + input = &InputService13TestShapeInputService13TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService13TestShapeInputService13TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService13ProtocolTest) InputService13TestCaseOperation1(input *InputService13TestShapeInputService13TestCaseOperation1Input) (*InputService13TestShapeInputService13TestCaseOperation1Output, error) { + req, out := c.InputService13TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type InputService13TestShapeInputService13TestCaseOperation1Input struct { + _ struct{} `type:"structure"` + + PipelineId *string `location:"uri" type:"string"` + + QueryDoc map[string]*string `location:"querystring" type:"map"` +} + +type InputService13TestShapeInputService13TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService14ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService14ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService14ProtocolTest client from just a session. +// svc := inputservice14protocoltest.New(mySession) +// +// // Create a InputService14ProtocolTest client with additional configuration +// svc := inputservice14protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService14ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService14ProtocolTest { + c := p.ClientConfig("inputservice14protocoltest", cfgs...) + return newInputService14ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService14ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService14ProtocolTest { + svc := &InputService14ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice14protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(restxml.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restxml.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restxml.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restxml.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a InputService14ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService14ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService14TestCaseOperation1 = "OperationName" + +// InputService14TestCaseOperation1Request generates a "aws/request.Request" representing the +// client's request for the InputService14TestCaseOperation1 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the InputService14TestCaseOperation1 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the InputService14TestCaseOperation1Request method. +// req, resp := client.InputService14TestCaseOperation1Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *InputService14ProtocolTest) InputService14TestCaseOperation1Request(input *InputService14TestShapeInputService14TestCaseOperation1Input) (req *request.Request, output *InputService14TestShapeInputService14TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService14TestCaseOperation1, + HTTPMethod: "GET", + HTTPPath: "/2014-01-01/jobsByPipeline/{PipelineId}", + } + + if input == nil { + input = &InputService14TestShapeInputService14TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService14TestShapeInputService14TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService14ProtocolTest) InputService14TestCaseOperation1(input *InputService14TestShapeInputService14TestCaseOperation1Input) (*InputService14TestShapeInputService14TestCaseOperation1Output, error) { + req, out := c.InputService14TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type InputService14TestShapeInputService14TestCaseOperation1Input struct { + _ struct{} `type:"structure"` + + PipelineId *string `location:"uri" type:"string"` + + QueryDoc map[string][]*string `location:"querystring" type:"map"` +} + +type InputService14TestShapeInputService14TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService15ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService15ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService15ProtocolTest client from just a session. +// svc := inputservice15protocoltest.New(mySession) +// +// // Create a InputService15ProtocolTest client with additional configuration +// svc := inputservice15protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService15ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService15ProtocolTest { + c := p.ClientConfig("inputservice15protocoltest", cfgs...) + return newInputService15ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService15ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService15ProtocolTest { + svc := &InputService15ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice15protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(restxml.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restxml.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restxml.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restxml.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a InputService15ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService15ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService15TestCaseOperation1 = "OperationName" + +// InputService15TestCaseOperation1Request generates a "aws/request.Request" representing the +// client's request for the InputService15TestCaseOperation1 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the InputService15TestCaseOperation1 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the InputService15TestCaseOperation1Request method. +// req, resp := client.InputService15TestCaseOperation1Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *InputService15ProtocolTest) InputService15TestCaseOperation1Request(input *InputService15TestShapeInputService15TestCaseOperation1Input) (req *request.Request, output *InputService15TestShapeInputService15TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService15TestCaseOperation1, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &InputService15TestShapeInputService15TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService15TestShapeInputService15TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService15ProtocolTest) InputService15TestCaseOperation1(input *InputService15TestShapeInputService15TestCaseOperation1Input) (*InputService15TestShapeInputService15TestCaseOperation1Output, error) { + req, out := c.InputService15TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type InputService15TestShapeInputService15TestCaseOperation1Input struct { + _ struct{} `type:"structure" payload:"Foo"` + + Foo *string `locationName:"foo" type:"string"` +} + +type InputService15TestShapeInputService15TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService16ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService16ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService16ProtocolTest client from just a session. +// svc := inputservice16protocoltest.New(mySession) +// +// // Create a InputService16ProtocolTest client with additional configuration +// svc := inputservice16protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService16ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService16ProtocolTest { + c := p.ClientConfig("inputservice16protocoltest", cfgs...) + return newInputService16ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService16ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService16ProtocolTest { + svc := &InputService16ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice16protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(restxml.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restxml.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restxml.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restxml.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a InputService16ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService16ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService16TestCaseOperation1 = "OperationName" + +// InputService16TestCaseOperation1Request generates a "aws/request.Request" representing the +// client's request for the InputService16TestCaseOperation1 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the InputService16TestCaseOperation1 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the InputService16TestCaseOperation1Request method. +// req, resp := client.InputService16TestCaseOperation1Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *InputService16ProtocolTest) InputService16TestCaseOperation1Request(input *InputService16TestShapeInputShape) (req *request.Request, output *InputService16TestShapeInputService16TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService16TestCaseOperation1, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &InputService16TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService16TestShapeInputService16TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService16ProtocolTest) InputService16TestCaseOperation1(input *InputService16TestShapeInputShape) (*InputService16TestShapeInputService16TestCaseOperation1Output, error) { + req, out := c.InputService16TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +const opInputService16TestCaseOperation2 = "OperationName" + +// InputService16TestCaseOperation2Request generates a "aws/request.Request" representing the +// client's request for the InputService16TestCaseOperation2 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the InputService16TestCaseOperation2 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the InputService16TestCaseOperation2Request method. +// req, resp := client.InputService16TestCaseOperation2Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *InputService16ProtocolTest) InputService16TestCaseOperation2Request(input *InputService16TestShapeInputShape) (req *request.Request, output *InputService16TestShapeInputService16TestCaseOperation2Output) { + op := &request.Operation{ + Name: opInputService16TestCaseOperation2, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &InputService16TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService16TestShapeInputService16TestCaseOperation2Output{} + req.Data = output + return +} + +func (c *InputService16ProtocolTest) InputService16TestCaseOperation2(input *InputService16TestShapeInputShape) (*InputService16TestShapeInputService16TestCaseOperation2Output, error) { + req, out := c.InputService16TestCaseOperation2Request(input) + err := req.Send() + return out, err +} + +type InputService16TestShapeInputService16TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +type InputService16TestShapeInputService16TestCaseOperation2Output struct { + _ struct{} `type:"structure"` +} + +type InputService16TestShapeInputShape struct { + _ struct{} `type:"structure" payload:"Foo"` + + Foo []byte `locationName:"foo" type:"blob"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService17ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService17ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService17ProtocolTest client from just a session. +// svc := inputservice17protocoltest.New(mySession) +// +// // Create a InputService17ProtocolTest client with additional configuration +// svc := inputservice17protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService17ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService17ProtocolTest { + c := p.ClientConfig("inputservice17protocoltest", cfgs...) + return newInputService17ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService17ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService17ProtocolTest { + svc := &InputService17ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice17protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(restxml.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restxml.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restxml.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restxml.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a InputService17ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService17ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService17TestCaseOperation1 = "OperationName" + +// InputService17TestCaseOperation1Request generates a "aws/request.Request" representing the +// client's request for the InputService17TestCaseOperation1 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the InputService17TestCaseOperation1 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the InputService17TestCaseOperation1Request method. +// req, resp := client.InputService17TestCaseOperation1Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *InputService17ProtocolTest) InputService17TestCaseOperation1Request(input *InputService17TestShapeInputShape) (req *request.Request, output *InputService17TestShapeInputService17TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService17TestCaseOperation1, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &InputService17TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService17TestShapeInputService17TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService17ProtocolTest) InputService17TestCaseOperation1(input *InputService17TestShapeInputShape) (*InputService17TestShapeInputService17TestCaseOperation1Output, error) { + req, out := c.InputService17TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +const opInputService17TestCaseOperation2 = "OperationName" + +// InputService17TestCaseOperation2Request generates a "aws/request.Request" representing the +// client's request for the InputService17TestCaseOperation2 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the InputService17TestCaseOperation2 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the InputService17TestCaseOperation2Request method. +// req, resp := client.InputService17TestCaseOperation2Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *InputService17ProtocolTest) InputService17TestCaseOperation2Request(input *InputService17TestShapeInputShape) (req *request.Request, output *InputService17TestShapeInputService17TestCaseOperation2Output) { + op := &request.Operation{ + Name: opInputService17TestCaseOperation2, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &InputService17TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService17TestShapeInputService17TestCaseOperation2Output{} + req.Data = output + return +} + +func (c *InputService17ProtocolTest) InputService17TestCaseOperation2(input *InputService17TestShapeInputShape) (*InputService17TestShapeInputService17TestCaseOperation2Output, error) { + req, out := c.InputService17TestCaseOperation2Request(input) + err := req.Send() + return out, err +} + +const opInputService17TestCaseOperation3 = "OperationName" + +// InputService17TestCaseOperation3Request generates a "aws/request.Request" representing the +// client's request for the InputService17TestCaseOperation3 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the InputService17TestCaseOperation3 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the InputService17TestCaseOperation3Request method. +// req, resp := client.InputService17TestCaseOperation3Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *InputService17ProtocolTest) InputService17TestCaseOperation3Request(input *InputService17TestShapeInputShape) (req *request.Request, output *InputService17TestShapeInputService17TestCaseOperation3Output) { + op := &request.Operation{ + Name: opInputService17TestCaseOperation3, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &InputService17TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService17TestShapeInputService17TestCaseOperation3Output{} + req.Data = output + return +} + +func (c *InputService17ProtocolTest) InputService17TestCaseOperation3(input *InputService17TestShapeInputShape) (*InputService17TestShapeInputService17TestCaseOperation3Output, error) { + req, out := c.InputService17TestCaseOperation3Request(input) + err := req.Send() + return out, err +} + +const opInputService17TestCaseOperation4 = "OperationName" + +// InputService17TestCaseOperation4Request generates a "aws/request.Request" representing the +// client's request for the InputService17TestCaseOperation4 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the InputService17TestCaseOperation4 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the InputService17TestCaseOperation4Request method. +// req, resp := client.InputService17TestCaseOperation4Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *InputService17ProtocolTest) InputService17TestCaseOperation4Request(input *InputService17TestShapeInputShape) (req *request.Request, output *InputService17TestShapeInputService17TestCaseOperation4Output) { + op := &request.Operation{ + Name: opInputService17TestCaseOperation4, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &InputService17TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService17TestShapeInputService17TestCaseOperation4Output{} + req.Data = output + return +} + +func (c *InputService17ProtocolTest) InputService17TestCaseOperation4(input *InputService17TestShapeInputShape) (*InputService17TestShapeInputService17TestCaseOperation4Output, error) { + req, out := c.InputService17TestCaseOperation4Request(input) + err := req.Send() + return out, err +} + +type InputService17TestShapeFooShape struct { + _ struct{} `locationName:"foo" type:"structure"` + + Baz *string `locationName:"baz" type:"string"` +} + +type InputService17TestShapeInputService17TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +type InputService17TestShapeInputService17TestCaseOperation2Output struct { + _ struct{} `type:"structure"` +} + +type InputService17TestShapeInputService17TestCaseOperation3Output struct { + _ struct{} `type:"structure"` +} + +type InputService17TestShapeInputService17TestCaseOperation4Output struct { + _ struct{} `type:"structure"` +} + +type InputService17TestShapeInputShape struct { + _ struct{} `type:"structure" payload:"Foo"` + + Foo *InputService17TestShapeFooShape `locationName:"foo" type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService18ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService18ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService18ProtocolTest client from just a session. +// svc := inputservice18protocoltest.New(mySession) +// +// // Create a InputService18ProtocolTest client with additional configuration +// svc := inputservice18protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService18ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService18ProtocolTest { + c := p.ClientConfig("inputservice18protocoltest", cfgs...) + return newInputService18ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService18ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService18ProtocolTest { + svc := &InputService18ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice18protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(restxml.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restxml.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restxml.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restxml.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a InputService18ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService18ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService18TestCaseOperation1 = "OperationName" + +// InputService18TestCaseOperation1Request generates a "aws/request.Request" representing the +// client's request for the InputService18TestCaseOperation1 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the InputService18TestCaseOperation1 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the InputService18TestCaseOperation1Request method. +// req, resp := client.InputService18TestCaseOperation1Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *InputService18ProtocolTest) InputService18TestCaseOperation1Request(input *InputService18TestShapeInputService18TestCaseOperation1Input) (req *request.Request, output *InputService18TestShapeInputService18TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService18TestCaseOperation1, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &InputService18TestShapeInputService18TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService18TestShapeInputService18TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService18ProtocolTest) InputService18TestCaseOperation1(input *InputService18TestShapeInputService18TestCaseOperation1Input) (*InputService18TestShapeInputService18TestCaseOperation1Output, error) { + req, out := c.InputService18TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type InputService18TestShapeGrant struct { + _ struct{} `locationName:"Grant" type:"structure"` + + Grantee *InputService18TestShapeGrantee `type:"structure"` +} + +type InputService18TestShapeGrantee struct { + _ struct{} `type:"structure" xmlPrefix:"xsi" xmlURI:"http://www.w3.org/2001/XMLSchema-instance"` + + EmailAddress *string `type:"string"` + + Type *string `locationName:"xsi:type" type:"string" xmlAttribute:"true"` +} + +type InputService18TestShapeInputService18TestCaseOperation1Input struct { + _ struct{} `type:"structure" payload:"Grant"` + + Grant *InputService18TestShapeGrant `locationName:"Grant" type:"structure"` +} + +type InputService18TestShapeInputService18TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService19ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService19ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService19ProtocolTest client from just a session. +// svc := inputservice19protocoltest.New(mySession) +// +// // Create a InputService19ProtocolTest client with additional configuration +// svc := inputservice19protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService19ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService19ProtocolTest { + c := p.ClientConfig("inputservice19protocoltest", cfgs...) + return newInputService19ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService19ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService19ProtocolTest { + svc := &InputService19ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice19protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(restxml.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restxml.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restxml.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restxml.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a InputService19ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService19ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService19TestCaseOperation1 = "OperationName" + +// InputService19TestCaseOperation1Request generates a "aws/request.Request" representing the +// client's request for the InputService19TestCaseOperation1 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the InputService19TestCaseOperation1 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the InputService19TestCaseOperation1Request method. +// req, resp := client.InputService19TestCaseOperation1Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *InputService19ProtocolTest) InputService19TestCaseOperation1Request(input *InputService19TestShapeInputService19TestCaseOperation1Input) (req *request.Request, output *InputService19TestShapeInputService19TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService19TestCaseOperation1, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}/{Key+}", + } + + if input == nil { + input = &InputService19TestShapeInputService19TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService19TestShapeInputService19TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService19ProtocolTest) InputService19TestCaseOperation1(input *InputService19TestShapeInputService19TestCaseOperation1Input) (*InputService19TestShapeInputService19TestCaseOperation1Output, error) { + req, out := c.InputService19TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type InputService19TestShapeInputService19TestCaseOperation1Input struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" type:"string"` + + Key *string `location:"uri" type:"string"` +} + +type InputService19TestShapeInputService19TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService20ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService20ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService20ProtocolTest client from just a session. +// svc := inputservice20protocoltest.New(mySession) +// +// // Create a InputService20ProtocolTest client with additional configuration +// svc := inputservice20protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService20ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService20ProtocolTest { + c := p.ClientConfig("inputservice20protocoltest", cfgs...) + return newInputService20ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService20ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService20ProtocolTest { + svc := &InputService20ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice20protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(restxml.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restxml.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restxml.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restxml.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a InputService20ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService20ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService20TestCaseOperation1 = "OperationName" + +// InputService20TestCaseOperation1Request generates a "aws/request.Request" representing the +// client's request for the InputService20TestCaseOperation1 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the InputService20TestCaseOperation1 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the InputService20TestCaseOperation1Request method. +// req, resp := client.InputService20TestCaseOperation1Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *InputService20ProtocolTest) InputService20TestCaseOperation1Request(input *InputService20TestShapeInputShape) (req *request.Request, output *InputService20TestShapeInputService20TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService20TestCaseOperation1, + HTTPMethod: "POST", + HTTPPath: "/path", + } + + if input == nil { + input = &InputService20TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService20TestShapeInputService20TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService20ProtocolTest) InputService20TestCaseOperation1(input *InputService20TestShapeInputShape) (*InputService20TestShapeInputService20TestCaseOperation1Output, error) { + req, out := c.InputService20TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +const opInputService20TestCaseOperation2 = "OperationName" + +// InputService20TestCaseOperation2Request generates a "aws/request.Request" representing the +// client's request for the InputService20TestCaseOperation2 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the InputService20TestCaseOperation2 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the InputService20TestCaseOperation2Request method. +// req, resp := client.InputService20TestCaseOperation2Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *InputService20ProtocolTest) InputService20TestCaseOperation2Request(input *InputService20TestShapeInputShape) (req *request.Request, output *InputService20TestShapeInputService20TestCaseOperation2Output) { + op := &request.Operation{ + Name: opInputService20TestCaseOperation2, + HTTPMethod: "POST", + HTTPPath: "/path?abc=mno", + } + + if input == nil { + input = &InputService20TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService20TestShapeInputService20TestCaseOperation2Output{} + req.Data = output + return +} + +func (c *InputService20ProtocolTest) InputService20TestCaseOperation2(input *InputService20TestShapeInputShape) (*InputService20TestShapeInputService20TestCaseOperation2Output, error) { + req, out := c.InputService20TestCaseOperation2Request(input) + err := req.Send() + return out, err +} + +type InputService20TestShapeInputService20TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +type InputService20TestShapeInputService20TestCaseOperation2Output struct { + _ struct{} `type:"structure"` +} + +type InputService20TestShapeInputShape struct { + _ struct{} `type:"structure"` + + Foo *string `location:"querystring" locationName:"param-name" type:"string"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService21ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService21ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService21ProtocolTest client from just a session. +// svc := inputservice21protocoltest.New(mySession) +// +// // Create a InputService21ProtocolTest client with additional configuration +// svc := inputservice21protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService21ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService21ProtocolTest { + c := p.ClientConfig("inputservice21protocoltest", cfgs...) + return newInputService21ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService21ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService21ProtocolTest { + svc := &InputService21ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice21protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(restxml.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restxml.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restxml.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restxml.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a InputService21ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService21ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService21TestCaseOperation1 = "OperationName" + +// InputService21TestCaseOperation1Request generates a "aws/request.Request" representing the +// client's request for the InputService21TestCaseOperation1 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the InputService21TestCaseOperation1 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the InputService21TestCaseOperation1Request method. +// req, resp := client.InputService21TestCaseOperation1Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *InputService21ProtocolTest) InputService21TestCaseOperation1Request(input *InputService21TestShapeInputShape) (req *request.Request, output *InputService21TestShapeInputService21TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService21TestCaseOperation1, + HTTPMethod: "POST", + HTTPPath: "/path", + } + + if input == nil { + input = &InputService21TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService21TestShapeInputService21TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService21ProtocolTest) InputService21TestCaseOperation1(input *InputService21TestShapeInputShape) (*InputService21TestShapeInputService21TestCaseOperation1Output, error) { + req, out := c.InputService21TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +const opInputService21TestCaseOperation2 = "OperationName" + +// InputService21TestCaseOperation2Request generates a "aws/request.Request" representing the +// client's request for the InputService21TestCaseOperation2 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the InputService21TestCaseOperation2 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the InputService21TestCaseOperation2Request method. +// req, resp := client.InputService21TestCaseOperation2Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *InputService21ProtocolTest) InputService21TestCaseOperation2Request(input *InputService21TestShapeInputShape) (req *request.Request, output *InputService21TestShapeInputService21TestCaseOperation2Output) { + op := &request.Operation{ + Name: opInputService21TestCaseOperation2, + HTTPMethod: "POST", + HTTPPath: "/path", + } + + if input == nil { + input = &InputService21TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService21TestShapeInputService21TestCaseOperation2Output{} + req.Data = output + return +} + +func (c *InputService21ProtocolTest) InputService21TestCaseOperation2(input *InputService21TestShapeInputShape) (*InputService21TestShapeInputService21TestCaseOperation2Output, error) { + req, out := c.InputService21TestCaseOperation2Request(input) + err := req.Send() + return out, err +} + +const opInputService21TestCaseOperation3 = "OperationName" + +// InputService21TestCaseOperation3Request generates a "aws/request.Request" representing the +// client's request for the InputService21TestCaseOperation3 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the InputService21TestCaseOperation3 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the InputService21TestCaseOperation3Request method. +// req, resp := client.InputService21TestCaseOperation3Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *InputService21ProtocolTest) InputService21TestCaseOperation3Request(input *InputService21TestShapeInputShape) (req *request.Request, output *InputService21TestShapeInputService21TestCaseOperation3Output) { + op := &request.Operation{ + Name: opInputService21TestCaseOperation3, + HTTPMethod: "POST", + HTTPPath: "/path", + } + + if input == nil { + input = &InputService21TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService21TestShapeInputService21TestCaseOperation3Output{} + req.Data = output + return +} + +func (c *InputService21ProtocolTest) InputService21TestCaseOperation3(input *InputService21TestShapeInputShape) (*InputService21TestShapeInputService21TestCaseOperation3Output, error) { + req, out := c.InputService21TestCaseOperation3Request(input) + err := req.Send() + return out, err +} + +const opInputService21TestCaseOperation4 = "OperationName" + +// InputService21TestCaseOperation4Request generates a "aws/request.Request" representing the +// client's request for the InputService21TestCaseOperation4 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the InputService21TestCaseOperation4 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the InputService21TestCaseOperation4Request method. +// req, resp := client.InputService21TestCaseOperation4Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *InputService21ProtocolTest) InputService21TestCaseOperation4Request(input *InputService21TestShapeInputShape) (req *request.Request, output *InputService21TestShapeInputService21TestCaseOperation4Output) { + op := &request.Operation{ + Name: opInputService21TestCaseOperation4, + HTTPMethod: "POST", + HTTPPath: "/path", + } + + if input == nil { + input = &InputService21TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService21TestShapeInputService21TestCaseOperation4Output{} + req.Data = output + return +} + +func (c *InputService21ProtocolTest) InputService21TestCaseOperation4(input *InputService21TestShapeInputShape) (*InputService21TestShapeInputService21TestCaseOperation4Output, error) { + req, out := c.InputService21TestCaseOperation4Request(input) + err := req.Send() + return out, err +} + +const opInputService21TestCaseOperation5 = "OperationName" + +// InputService21TestCaseOperation5Request generates a "aws/request.Request" representing the +// client's request for the InputService21TestCaseOperation5 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the InputService21TestCaseOperation5 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the InputService21TestCaseOperation5Request method. +// req, resp := client.InputService21TestCaseOperation5Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *InputService21ProtocolTest) InputService21TestCaseOperation5Request(input *InputService21TestShapeInputShape) (req *request.Request, output *InputService21TestShapeInputService21TestCaseOperation5Output) { + op := &request.Operation{ + Name: opInputService21TestCaseOperation5, + HTTPMethod: "POST", + HTTPPath: "/path", + } + + if input == nil { + input = &InputService21TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService21TestShapeInputService21TestCaseOperation5Output{} + req.Data = output + return +} + +func (c *InputService21ProtocolTest) InputService21TestCaseOperation5(input *InputService21TestShapeInputShape) (*InputService21TestShapeInputService21TestCaseOperation5Output, error) { + req, out := c.InputService21TestCaseOperation5Request(input) + err := req.Send() + return out, err +} + +const opInputService21TestCaseOperation6 = "OperationName" + +// InputService21TestCaseOperation6Request generates a "aws/request.Request" representing the +// client's request for the InputService21TestCaseOperation6 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the InputService21TestCaseOperation6 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the InputService21TestCaseOperation6Request method. +// req, resp := client.InputService21TestCaseOperation6Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *InputService21ProtocolTest) InputService21TestCaseOperation6Request(input *InputService21TestShapeInputShape) (req *request.Request, output *InputService21TestShapeInputService21TestCaseOperation6Output) { + op := &request.Operation{ + Name: opInputService21TestCaseOperation6, + HTTPMethod: "POST", + HTTPPath: "/path", + } + + if input == nil { + input = &InputService21TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService21TestShapeInputService21TestCaseOperation6Output{} + req.Data = output + return +} + +func (c *InputService21ProtocolTest) InputService21TestCaseOperation6(input *InputService21TestShapeInputShape) (*InputService21TestShapeInputService21TestCaseOperation6Output, error) { + req, out := c.InputService21TestCaseOperation6Request(input) + err := req.Send() + return out, err +} + +type InputService21TestShapeInputService21TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +type InputService21TestShapeInputService21TestCaseOperation2Output struct { + _ struct{} `type:"structure"` +} + +type InputService21TestShapeInputService21TestCaseOperation3Output struct { + _ struct{} `type:"structure"` +} + +type InputService21TestShapeInputService21TestCaseOperation4Output struct { + _ struct{} `type:"structure"` +} + +type InputService21TestShapeInputService21TestCaseOperation5Output struct { + _ struct{} `type:"structure"` +} + +type InputService21TestShapeInputService21TestCaseOperation6Output struct { + _ struct{} `type:"structure"` +} + +type InputService21TestShapeInputShape struct { + _ struct{} `locationName:"OperationRequest" type:"structure" xmlURI:"https://foo/"` + + RecursiveStruct *InputService21TestShapeRecursiveStructType `type:"structure"` +} + +type InputService21TestShapeRecursiveStructType struct { + _ struct{} `type:"structure"` + + NoRecurse *string `type:"string"` + + RecursiveList []*InputService21TestShapeRecursiveStructType `type:"list"` + + RecursiveMap map[string]*InputService21TestShapeRecursiveStructType `type:"map"` + + RecursiveStruct *InputService21TestShapeRecursiveStructType `type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService22ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService22ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService22ProtocolTest client from just a session. +// svc := inputservice22protocoltest.New(mySession) +// +// // Create a InputService22ProtocolTest client with additional configuration +// svc := inputservice22protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService22ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService22ProtocolTest { + c := p.ClientConfig("inputservice22protocoltest", cfgs...) + return newInputService22ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService22ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService22ProtocolTest { + svc := &InputService22ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice22protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(restxml.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restxml.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restxml.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restxml.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a InputService22ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService22ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService22TestCaseOperation1 = "OperationName" + +// InputService22TestCaseOperation1Request generates a "aws/request.Request" representing the +// client's request for the InputService22TestCaseOperation1 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the InputService22TestCaseOperation1 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the InputService22TestCaseOperation1Request method. +// req, resp := client.InputService22TestCaseOperation1Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *InputService22ProtocolTest) InputService22TestCaseOperation1Request(input *InputService22TestShapeInputService22TestCaseOperation1Input) (req *request.Request, output *InputService22TestShapeInputService22TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService22TestCaseOperation1, + HTTPMethod: "POST", + HTTPPath: "/path", + } + + if input == nil { + input = &InputService22TestShapeInputService22TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService22TestShapeInputService22TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService22ProtocolTest) InputService22TestCaseOperation1(input *InputService22TestShapeInputService22TestCaseOperation1Input) (*InputService22TestShapeInputService22TestCaseOperation1Output, error) { + req, out := c.InputService22TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type InputService22TestShapeInputService22TestCaseOperation1Input struct { + _ struct{} `type:"structure"` + + TimeArgInHeader *time.Time `location:"header" locationName:"x-amz-timearg" type:"timestamp" timestampFormat:"rfc822"` +} + +type InputService22TestShapeInputService22TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService23ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService23ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService23ProtocolTest client from just a session. +// svc := inputservice23protocoltest.New(mySession) +// +// // Create a InputService23ProtocolTest client with additional configuration +// svc := inputservice23protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService23ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService23ProtocolTest { + c := p.ClientConfig("inputservice23protocoltest", cfgs...) + return newInputService23ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService23ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService23ProtocolTest { + svc := &InputService23ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice23protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(restxml.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restxml.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restxml.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restxml.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a InputService23ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService23ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService23TestCaseOperation1 = "OperationName" + +// InputService23TestCaseOperation1Request generates a "aws/request.Request" representing the +// client's request for the InputService23TestCaseOperation1 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the InputService23TestCaseOperation1 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the InputService23TestCaseOperation1Request method. +// req, resp := client.InputService23TestCaseOperation1Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *InputService23ProtocolTest) InputService23TestCaseOperation1Request(input *InputService23TestShapeInputShape) (req *request.Request, output *InputService23TestShapeInputService23TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService23TestCaseOperation1, + HTTPMethod: "POST", + HTTPPath: "/path", + } + + if input == nil { + input = &InputService23TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService23TestShapeInputService23TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService23ProtocolTest) InputService23TestCaseOperation1(input *InputService23TestShapeInputShape) (*InputService23TestShapeInputService23TestCaseOperation1Output, error) { + req, out := c.InputService23TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +const opInputService23TestCaseOperation2 = "OperationName" + +// InputService23TestCaseOperation2Request generates a "aws/request.Request" representing the +// client's request for the InputService23TestCaseOperation2 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the InputService23TestCaseOperation2 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the InputService23TestCaseOperation2Request method. +// req, resp := client.InputService23TestCaseOperation2Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *InputService23ProtocolTest) InputService23TestCaseOperation2Request(input *InputService23TestShapeInputShape) (req *request.Request, output *InputService23TestShapeInputService23TestCaseOperation2Output) { + op := &request.Operation{ + Name: opInputService23TestCaseOperation2, + HTTPMethod: "POST", + HTTPPath: "/path", + } + + if input == nil { + input = &InputService23TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService23TestShapeInputService23TestCaseOperation2Output{} + req.Data = output + return +} + +func (c *InputService23ProtocolTest) InputService23TestCaseOperation2(input *InputService23TestShapeInputShape) (*InputService23TestShapeInputService23TestCaseOperation2Output, error) { + req, out := c.InputService23TestCaseOperation2Request(input) + err := req.Send() + return out, err +} + +type InputService23TestShapeInputService23TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +type InputService23TestShapeInputService23TestCaseOperation2Output struct { + _ struct{} `type:"structure"` +} + +type InputService23TestShapeInputShape struct { + _ struct{} `type:"structure"` + + Token *string `type:"string" idempotencyToken:"true"` +} + +// +// Tests begin here +// + +func TestInputService1ProtocolTestBasicXMLSerializationCase1(t *testing.T) { + sess := session.New() + svc := NewInputService1ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService1TestShapeInputShape{ + Description: aws.String("bar"), + Name: aws.String("foo"), + } + req, _ := svc.InputService1TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + restxml.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body := util.SortXML(r.Body) + awstesting.AssertXML(t, `barfoo`, util.Trim(string(body)), InputService1TestShapeInputShape{}) + + // assert URL + awstesting.AssertURL(t, "https://test/2014-01-01/hostedzone", r.URL.String()) + + // assert headers + +} + +func TestInputService1ProtocolTestBasicXMLSerializationCase2(t *testing.T) { + sess := session.New() + svc := NewInputService1ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService1TestShapeInputShape{ + Description: aws.String("bar"), + Name: aws.String("foo"), + } + req, _ := svc.InputService1TestCaseOperation2Request(input) + r := req.HTTPRequest + + // build request + restxml.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body := util.SortXML(r.Body) + awstesting.AssertXML(t, `barfoo`, util.Trim(string(body)), InputService1TestShapeInputShape{}) + + // assert URL + awstesting.AssertURL(t, "https://test/2014-01-01/hostedzone", r.URL.String()) + + // assert headers + +} + +func TestInputService1ProtocolTestBasicXMLSerializationCase3(t *testing.T) { + sess := session.New() + svc := NewInputService1ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService1TestShapeInputService1TestCaseOperation3Input{} + req, _ := svc.InputService1TestCaseOperation3Request(input) + r := req.HTTPRequest + + // build request + restxml.Build(req) + assert.NoError(t, req.Error) + + // assert URL + awstesting.AssertURL(t, "https://test/2014-01-01/hostedzone", r.URL.String()) + + // assert headers + +} + +func TestInputService2ProtocolTestSerializeOtherScalarTypesCase1(t *testing.T) { + sess := session.New() + svc := NewInputService2ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService2TestShapeInputService2TestCaseOperation1Input{ + First: aws.Bool(true), + Fourth: aws.Int64(3), + Second: aws.Bool(false), + Third: aws.Float64(1.2), + } + req, _ := svc.InputService2TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + restxml.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body := util.SortXML(r.Body) + awstesting.AssertXML(t, `true3false1.2`, util.Trim(string(body)), InputService2TestShapeInputService2TestCaseOperation1Input{}) + + // assert URL + awstesting.AssertURL(t, "https://test/2014-01-01/hostedzone", r.URL.String()) + + // assert headers + +} + +func TestInputService3ProtocolTestNestedStructuresCase1(t *testing.T) { + sess := session.New() + svc := NewInputService3ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService3TestShapeInputShape{ + Description: aws.String("baz"), + SubStructure: &InputService3TestShapeSubStructure{ + Bar: aws.String("b"), + Foo: aws.String("a"), + }, + } + req, _ := svc.InputService3TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + restxml.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body := util.SortXML(r.Body) + awstesting.AssertXML(t, `bazba`, util.Trim(string(body)), InputService3TestShapeInputShape{}) + + // assert URL + awstesting.AssertURL(t, "https://test/2014-01-01/hostedzone", r.URL.String()) + + // assert headers + +} + +func TestInputService3ProtocolTestNestedStructuresCase2(t *testing.T) { + sess := session.New() + svc := NewInputService3ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService3TestShapeInputShape{ + Description: aws.String("baz"), + SubStructure: &InputService3TestShapeSubStructure{ + Foo: aws.String("a"), + }, + } + req, _ := svc.InputService3TestCaseOperation2Request(input) + r := req.HTTPRequest + + // build request + restxml.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body := util.SortXML(r.Body) + awstesting.AssertXML(t, `baza`, util.Trim(string(body)), InputService3TestShapeInputShape{}) + + // assert URL + awstesting.AssertURL(t, "https://test/2014-01-01/hostedzone", r.URL.String()) + + // assert headers + +} + +func TestInputService4ProtocolTestNestedStructuresCase1(t *testing.T) { + sess := session.New() + svc := NewInputService4ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService4TestShapeInputService4TestCaseOperation1Input{ + Description: aws.String("baz"), + SubStructure: &InputService4TestShapeSubStructure{}, + } + req, _ := svc.InputService4TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + restxml.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body := util.SortXML(r.Body) + awstesting.AssertXML(t, `baz`, util.Trim(string(body)), InputService4TestShapeInputService4TestCaseOperation1Input{}) + + // assert URL + awstesting.AssertURL(t, "https://test/2014-01-01/hostedzone", r.URL.String()) + + // assert headers + +} + +func TestInputService5ProtocolTestNonFlattenedListsCase1(t *testing.T) { + sess := session.New() + svc := NewInputService5ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService5TestShapeInputService5TestCaseOperation1Input{ + ListParam: []*string{ + aws.String("one"), + aws.String("two"), + aws.String("three"), + }, + } + req, _ := svc.InputService5TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + restxml.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body := util.SortXML(r.Body) + awstesting.AssertXML(t, `onetwothree`, util.Trim(string(body)), InputService5TestShapeInputService5TestCaseOperation1Input{}) + + // assert URL + awstesting.AssertURL(t, "https://test/2014-01-01/hostedzone", r.URL.String()) + + // assert headers + +} + +func TestInputService6ProtocolTestNonFlattenedListsWithLocationNameCase1(t *testing.T) { + sess := session.New() + svc := NewInputService6ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService6TestShapeInputService6TestCaseOperation1Input{ + ListParam: []*string{ + aws.String("one"), + aws.String("two"), + aws.String("three"), + }, + } + req, _ := svc.InputService6TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + restxml.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body := util.SortXML(r.Body) + awstesting.AssertXML(t, `onetwothree`, util.Trim(string(body)), InputService6TestShapeInputService6TestCaseOperation1Input{}) + + // assert URL + awstesting.AssertURL(t, "https://test/2014-01-01/hostedzone", r.URL.String()) + + // assert headers + +} + +func TestInputService7ProtocolTestFlattenedListsCase1(t *testing.T) { + sess := session.New() + svc := NewInputService7ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService7TestShapeInputService7TestCaseOperation1Input{ + ListParam: []*string{ + aws.String("one"), + aws.String("two"), + aws.String("three"), + }, + } + req, _ := svc.InputService7TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + restxml.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body := util.SortXML(r.Body) + awstesting.AssertXML(t, `onetwothree`, util.Trim(string(body)), InputService7TestShapeInputService7TestCaseOperation1Input{}) + + // assert URL + awstesting.AssertURL(t, "https://test/2014-01-01/hostedzone", r.URL.String()) + + // assert headers + +} + +func TestInputService8ProtocolTestFlattenedListsWithLocationNameCase1(t *testing.T) { + sess := session.New() + svc := NewInputService8ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService8TestShapeInputService8TestCaseOperation1Input{ + ListParam: []*string{ + aws.String("one"), + aws.String("two"), + aws.String("three"), + }, + } + req, _ := svc.InputService8TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + restxml.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body := util.SortXML(r.Body) + awstesting.AssertXML(t, `onetwothree`, util.Trim(string(body)), InputService8TestShapeInputService8TestCaseOperation1Input{}) + + // assert URL + awstesting.AssertURL(t, "https://test/2014-01-01/hostedzone", r.URL.String()) + + // assert headers + +} + +func TestInputService9ProtocolTestListOfStructuresCase1(t *testing.T) { + sess := session.New() + svc := NewInputService9ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService9TestShapeInputService9TestCaseOperation1Input{ + ListParam: []*InputService9TestShapeSingleFieldStruct{ + { + Element: aws.String("one"), + }, + { + Element: aws.String("two"), + }, + { + Element: aws.String("three"), + }, + }, + } + req, _ := svc.InputService9TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + restxml.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body := util.SortXML(r.Body) + awstesting.AssertXML(t, `onetwothree`, util.Trim(string(body)), InputService9TestShapeInputService9TestCaseOperation1Input{}) + + // assert URL + awstesting.AssertURL(t, "https://test/2014-01-01/hostedzone", r.URL.String()) + + // assert headers + +} + +func TestInputService10ProtocolTestBlobAndTimestampShapesCase1(t *testing.T) { + sess := session.New() + svc := NewInputService10ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService10TestShapeInputService10TestCaseOperation1Input{ + StructureParam: &InputService10TestShapeStructureShape{ + B: []byte("foo"), + T: aws.Time(time.Unix(1422172800, 0)), + }, + } + req, _ := svc.InputService10TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + restxml.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body := util.SortXML(r.Body) + awstesting.AssertXML(t, `Zm9v2015-01-25T08:00:00Z`, util.Trim(string(body)), InputService10TestShapeInputService10TestCaseOperation1Input{}) + + // assert URL + awstesting.AssertURL(t, "https://test/2014-01-01/hostedzone", r.URL.String()) + + // assert headers + +} + +func TestInputService11ProtocolTestHeaderMapsCase1(t *testing.T) { + sess := session.New() + svc := NewInputService11ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService11TestShapeInputService11TestCaseOperation1Input{ + Foo: map[string]*string{ + "a": aws.String("b"), + "c": aws.String("d"), + }, + } + req, _ := svc.InputService11TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + restxml.Build(req) + assert.NoError(t, req.Error) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + assert.Equal(t, "b", r.Header.Get("x-foo-a")) + assert.Equal(t, "d", r.Header.Get("x-foo-c")) + +} + +func TestInputService12ProtocolTestQuerystringListOfStringsCase1(t *testing.T) { + sess := session.New() + svc := NewInputService12ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService12TestShapeInputService12TestCaseOperation1Input{ + Items: []*string{ + aws.String("value1"), + aws.String("value2"), + }, + } + req, _ := svc.InputService12TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + restxml.Build(req) + assert.NoError(t, req.Error) + + // assert URL + awstesting.AssertURL(t, "https://test/path?item=value1&item=value2", r.URL.String()) + + // assert headers + +} + +func TestInputService13ProtocolTestStringToStringMapsInQuerystringCase1(t *testing.T) { + sess := session.New() + svc := NewInputService13ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService13TestShapeInputService13TestCaseOperation1Input{ + PipelineId: aws.String("foo"), + QueryDoc: map[string]*string{ + "bar": aws.String("baz"), + "fizz": aws.String("buzz"), + }, + } + req, _ := svc.InputService13TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + restxml.Build(req) + assert.NoError(t, req.Error) + + // assert URL + awstesting.AssertURL(t, "https://test/2014-01-01/jobsByPipeline/foo?bar=baz&fizz=buzz", r.URL.String()) + + // assert headers + +} + +func TestInputService14ProtocolTestStringToStringListMapsInQuerystringCase1(t *testing.T) { + sess := session.New() + svc := NewInputService14ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService14TestShapeInputService14TestCaseOperation1Input{ + PipelineId: aws.String("id"), + QueryDoc: map[string][]*string{ + "fizz": { + aws.String("buzz"), + aws.String("pop"), + }, + "foo": { + aws.String("bar"), + aws.String("baz"), + }, + }, + } + req, _ := svc.InputService14TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + restxml.Build(req) + assert.NoError(t, req.Error) + + // assert URL + awstesting.AssertURL(t, "https://test/2014-01-01/jobsByPipeline/id?foo=bar&foo=baz&fizz=buzz&fizz=pop", r.URL.String()) + + // assert headers + +} + +func TestInputService15ProtocolTestStringPayloadCase1(t *testing.T) { + sess := session.New() + svc := NewInputService15ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService15TestShapeInputService15TestCaseOperation1Input{ + Foo: aws.String("bar"), + } + req, _ := svc.InputService15TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + restxml.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body := util.SortXML(r.Body) + assert.Equal(t, `bar`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService16ProtocolTestBlobPayloadCase1(t *testing.T) { + sess := session.New() + svc := NewInputService16ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService16TestShapeInputShape{ + Foo: []byte("bar"), + } + req, _ := svc.InputService16TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + restxml.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body := util.SortXML(r.Body) + assert.Equal(t, `bar`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService16ProtocolTestBlobPayloadCase2(t *testing.T) { + sess := session.New() + svc := NewInputService16ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService16TestShapeInputShape{} + req, _ := svc.InputService16TestCaseOperation2Request(input) + r := req.HTTPRequest + + // build request + restxml.Build(req) + assert.NoError(t, req.Error) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService17ProtocolTestStructurePayloadCase1(t *testing.T) { + sess := session.New() + svc := NewInputService17ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService17TestShapeInputShape{ + Foo: &InputService17TestShapeFooShape{ + Baz: aws.String("bar"), + }, + } + req, _ := svc.InputService17TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + restxml.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body := util.SortXML(r.Body) + awstesting.AssertXML(t, `bar`, util.Trim(string(body)), InputService17TestShapeInputShape{}) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService17ProtocolTestStructurePayloadCase2(t *testing.T) { + sess := session.New() + svc := NewInputService17ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService17TestShapeInputShape{} + req, _ := svc.InputService17TestCaseOperation2Request(input) + r := req.HTTPRequest + + // build request + restxml.Build(req) + assert.NoError(t, req.Error) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService17ProtocolTestStructurePayloadCase3(t *testing.T) { + sess := session.New() + svc := NewInputService17ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService17TestShapeInputShape{ + Foo: &InputService17TestShapeFooShape{}, + } + req, _ := svc.InputService17TestCaseOperation3Request(input) + r := req.HTTPRequest + + // build request + restxml.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body := util.SortXML(r.Body) + awstesting.AssertXML(t, ``, util.Trim(string(body)), InputService17TestShapeInputShape{}) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService17ProtocolTestStructurePayloadCase4(t *testing.T) { + sess := session.New() + svc := NewInputService17ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService17TestShapeInputShape{} + req, _ := svc.InputService17TestCaseOperation4Request(input) + r := req.HTTPRequest + + // build request + restxml.Build(req) + assert.NoError(t, req.Error) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService18ProtocolTestXMLAttributeCase1(t *testing.T) { + sess := session.New() + svc := NewInputService18ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService18TestShapeInputService18TestCaseOperation1Input{ + Grant: &InputService18TestShapeGrant{ + Grantee: &InputService18TestShapeGrantee{ + EmailAddress: aws.String("foo@example.com"), + Type: aws.String("CanonicalUser"), + }, + }, + } + req, _ := svc.InputService18TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + restxml.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body := util.SortXML(r.Body) + awstesting.AssertXML(t, `foo@example.com`, util.Trim(string(body)), InputService18TestShapeInputService18TestCaseOperation1Input{}) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService19ProtocolTestGreedyKeysCase1(t *testing.T) { + sess := session.New() + svc := NewInputService19ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService19TestShapeInputService19TestCaseOperation1Input{ + Bucket: aws.String("my/bucket"), + Key: aws.String("testing /123"), + } + req, _ := svc.InputService19TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + restxml.Build(req) + assert.NoError(t, req.Error) + + // assert URL + awstesting.AssertURL(t, "https://test/my%2Fbucket/testing%20/123", r.URL.String()) + + // assert headers + +} + +func TestInputService20ProtocolTestOmitsNullQueryParamsButSerializesEmptyStringsCase1(t *testing.T) { + sess := session.New() + svc := NewInputService20ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService20TestShapeInputShape{} + req, _ := svc.InputService20TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + restxml.Build(req) + assert.NoError(t, req.Error) + + // assert URL + awstesting.AssertURL(t, "https://test/path", r.URL.String()) + + // assert headers + +} + +func TestInputService20ProtocolTestOmitsNullQueryParamsButSerializesEmptyStringsCase2(t *testing.T) { + sess := session.New() + svc := NewInputService20ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService20TestShapeInputShape{ + Foo: aws.String(""), + } + req, _ := svc.InputService20TestCaseOperation2Request(input) + r := req.HTTPRequest + + // build request + restxml.Build(req) + assert.NoError(t, req.Error) + + // assert URL + awstesting.AssertURL(t, "https://test/path?abc=mno¶m-name=", r.URL.String()) + + // assert headers + +} + +func TestInputService21ProtocolTestRecursiveShapesCase1(t *testing.T) { + sess := session.New() + svc := NewInputService21ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService21TestShapeInputShape{ + RecursiveStruct: &InputService21TestShapeRecursiveStructType{ + NoRecurse: aws.String("foo"), + }, + } + req, _ := svc.InputService21TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + restxml.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body := util.SortXML(r.Body) + awstesting.AssertXML(t, `foo`, util.Trim(string(body)), InputService21TestShapeInputShape{}) + + // assert URL + awstesting.AssertURL(t, "https://test/path", r.URL.String()) + + // assert headers + +} + +func TestInputService21ProtocolTestRecursiveShapesCase2(t *testing.T) { + sess := session.New() + svc := NewInputService21ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService21TestShapeInputShape{ + RecursiveStruct: &InputService21TestShapeRecursiveStructType{ + RecursiveStruct: &InputService21TestShapeRecursiveStructType{ + NoRecurse: aws.String("foo"), + }, + }, + } + req, _ := svc.InputService21TestCaseOperation2Request(input) + r := req.HTTPRequest + + // build request + restxml.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body := util.SortXML(r.Body) + awstesting.AssertXML(t, `foo`, util.Trim(string(body)), InputService21TestShapeInputShape{}) + + // assert URL + awstesting.AssertURL(t, "https://test/path", r.URL.String()) + + // assert headers + +} + +func TestInputService21ProtocolTestRecursiveShapesCase3(t *testing.T) { + sess := session.New() + svc := NewInputService21ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService21TestShapeInputShape{ + RecursiveStruct: &InputService21TestShapeRecursiveStructType{ + RecursiveStruct: &InputService21TestShapeRecursiveStructType{ + RecursiveStruct: &InputService21TestShapeRecursiveStructType{ + RecursiveStruct: &InputService21TestShapeRecursiveStructType{ + NoRecurse: aws.String("foo"), + }, + }, + }, + }, + } + req, _ := svc.InputService21TestCaseOperation3Request(input) + r := req.HTTPRequest + + // build request + restxml.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body := util.SortXML(r.Body) + awstesting.AssertXML(t, `foo`, util.Trim(string(body)), InputService21TestShapeInputShape{}) + + // assert URL + awstesting.AssertURL(t, "https://test/path", r.URL.String()) + + // assert headers + +} + +func TestInputService21ProtocolTestRecursiveShapesCase4(t *testing.T) { + sess := session.New() + svc := NewInputService21ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService21TestShapeInputShape{ + RecursiveStruct: &InputService21TestShapeRecursiveStructType{ + RecursiveList: []*InputService21TestShapeRecursiveStructType{ + { + NoRecurse: aws.String("foo"), + }, + { + NoRecurse: aws.String("bar"), + }, + }, + }, + } + req, _ := svc.InputService21TestCaseOperation4Request(input) + r := req.HTTPRequest + + // build request + restxml.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body := util.SortXML(r.Body) + awstesting.AssertXML(t, `foobar`, util.Trim(string(body)), InputService21TestShapeInputShape{}) + + // assert URL + awstesting.AssertURL(t, "https://test/path", r.URL.String()) + + // assert headers + +} + +func TestInputService21ProtocolTestRecursiveShapesCase5(t *testing.T) { + sess := session.New() + svc := NewInputService21ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService21TestShapeInputShape{ + RecursiveStruct: &InputService21TestShapeRecursiveStructType{ + RecursiveList: []*InputService21TestShapeRecursiveStructType{ + { + NoRecurse: aws.String("foo"), + }, + { + RecursiveStruct: &InputService21TestShapeRecursiveStructType{ + NoRecurse: aws.String("bar"), + }, + }, + }, + }, + } + req, _ := svc.InputService21TestCaseOperation5Request(input) + r := req.HTTPRequest + + // build request + restxml.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body := util.SortXML(r.Body) + awstesting.AssertXML(t, `foobar`, util.Trim(string(body)), InputService21TestShapeInputShape{}) + + // assert URL + awstesting.AssertURL(t, "https://test/path", r.URL.String()) + + // assert headers + +} + +func TestInputService21ProtocolTestRecursiveShapesCase6(t *testing.T) { + sess := session.New() + svc := NewInputService21ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService21TestShapeInputShape{ + RecursiveStruct: &InputService21TestShapeRecursiveStructType{ + RecursiveMap: map[string]*InputService21TestShapeRecursiveStructType{ + "bar": { + NoRecurse: aws.String("bar"), + }, + "foo": { + NoRecurse: aws.String("foo"), + }, + }, + }, + } + req, _ := svc.InputService21TestCaseOperation6Request(input) + r := req.HTTPRequest + + // build request + restxml.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body := util.SortXML(r.Body) + awstesting.AssertXML(t, `foofoobarbar`, util.Trim(string(body)), InputService21TestShapeInputShape{}) + + // assert URL + awstesting.AssertURL(t, "https://test/path", r.URL.String()) + + // assert headers + +} + +func TestInputService22ProtocolTestTimestampInHeaderCase1(t *testing.T) { + sess := session.New() + svc := NewInputService22ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService22TestShapeInputService22TestCaseOperation1Input{ + TimeArgInHeader: aws.Time(time.Unix(1422172800, 0)), + } + req, _ := svc.InputService22TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + restxml.Build(req) + assert.NoError(t, req.Error) + + // assert URL + awstesting.AssertURL(t, "https://test/path", r.URL.String()) + + // assert headers + assert.Equal(t, "Sun, 25 Jan 2015 08:00:00 GMT", r.Header.Get("x-amz-timearg")) + +} + +func TestInputService23ProtocolTestIdempotencyTokenAutoFillCase1(t *testing.T) { + sess := session.New() + svc := NewInputService23ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService23TestShapeInputShape{ + Token: aws.String("abc123"), + } + req, _ := svc.InputService23TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + restxml.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body := util.SortXML(r.Body) + awstesting.AssertXML(t, `abc123`, util.Trim(string(body)), InputService23TestShapeInputShape{}) + + // assert URL + awstesting.AssertURL(t, "https://test/path", r.URL.String()) + + // assert headers + +} + +func TestInputService23ProtocolTestIdempotencyTokenAutoFillCase2(t *testing.T) { + sess := session.New() + svc := NewInputService23ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService23TestShapeInputShape{} + req, _ := svc.InputService23TestCaseOperation2Request(input) + r := req.HTTPRequest + + // build request + restxml.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body := util.SortXML(r.Body) + awstesting.AssertXML(t, `00000000-0000-4000-8000-000000000000`, util.Trim(string(body)), InputService23TestShapeInputShape{}) + + // assert URL + awstesting.AssertURL(t, "https://test/path", r.URL.String()) + + // assert headers + +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go new file mode 100644 index 000000000..c74088bfe --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go @@ -0,0 +1,69 @@ +// Package restxml provides RESTful XML serialisation of AWS +// requests and responses. +package restxml + +//go:generate go run ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/input/rest-xml.json build_test.go +//go:generate go run ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/output/rest-xml.json unmarshal_test.go + +import ( + "bytes" + "encoding/xml" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/query" + "github.com/aws/aws-sdk-go/private/protocol/rest" + "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil" +) + +// BuildHandler is a named request handler for building restxml protocol requests +var BuildHandler = request.NamedHandler{Name: "awssdk.restxml.Build", Fn: Build} + +// UnmarshalHandler is a named request handler for unmarshaling restxml protocol requests +var UnmarshalHandler = request.NamedHandler{Name: "awssdk.restxml.Unmarshal", Fn: Unmarshal} + +// UnmarshalMetaHandler is a named request handler for unmarshaling restxml protocol request metadata +var UnmarshalMetaHandler = request.NamedHandler{Name: "awssdk.restxml.UnmarshalMeta", Fn: UnmarshalMeta} + +// UnmarshalErrorHandler is a named request handler for unmarshaling restxml protocol request errors +var UnmarshalErrorHandler = request.NamedHandler{Name: "awssdk.restxml.UnmarshalError", Fn: UnmarshalError} + +// Build builds a request payload for the REST XML protocol. +func Build(r *request.Request) { + rest.Build(r) + + if t := rest.PayloadType(r.Params); t == "structure" || t == "" { + var buf bytes.Buffer + err := xmlutil.BuildXML(r.Params, xml.NewEncoder(&buf)) + if err != nil { + r.Error = awserr.New("SerializationError", "failed to encode rest XML request", err) + return + } + r.SetBufferBody(buf.Bytes()) + } +} + +// Unmarshal unmarshals a payload response for the REST XML protocol. +func Unmarshal(r *request.Request) { + if t := rest.PayloadType(r.Data); t == "structure" || t == "" { + defer r.HTTPResponse.Body.Close() + decoder := xml.NewDecoder(r.HTTPResponse.Body) + err := xmlutil.UnmarshalXML(r.Data, decoder, "") + if err != nil { + r.Error = awserr.New("SerializationError", "failed to decode REST XML response", err) + return + } + } else { + rest.Unmarshal(r) + } +} + +// UnmarshalMeta unmarshals response headers for the REST XML protocol. +func UnmarshalMeta(r *request.Request) { + rest.UnmarshalMeta(r) +} + +// UnmarshalError unmarshals a response error for the REST XML protocol. +func UnmarshalError(r *request.Request) { + query.UnmarshalError(r) +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/unmarshal_test.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/unmarshal_test.go new file mode 100644 index 000000000..5582a9c95 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/unmarshal_test.go @@ -0,0 +1,1778 @@ +package restxml_test + +import ( + "bytes" + "encoding/json" + "encoding/xml" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "testing" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/awstesting" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/restxml" + "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil" + "github.com/aws/aws-sdk-go/private/util" + "github.com/stretchr/testify/assert" +) + +var _ bytes.Buffer // always import bytes +var _ http.Request +var _ json.Marshaler +var _ time.Time +var _ xmlutil.XMLNode +var _ xml.Attr +var _ = ioutil.Discard +var _ = util.Trim("") +var _ = url.Values{} +var _ = io.EOF +var _ = aws.String +var _ = fmt.Println + +func init() { + protocol.RandReader = &awstesting.ZeroReader{} +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService1ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService1ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService1ProtocolTest client from just a session. +// svc := outputservice1protocoltest.New(mySession) +// +// // Create a OutputService1ProtocolTest client with additional configuration +// svc := outputservice1protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService1ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService1ProtocolTest { + c := p.ClientConfig("outputservice1protocoltest", cfgs...) + return newOutputService1ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService1ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService1ProtocolTest { + svc := &OutputService1ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice1protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(restxml.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restxml.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restxml.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restxml.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a OutputService1ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService1ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService1TestCaseOperation1 = "OperationName" + +// OutputService1TestCaseOperation1Request generates a "aws/request.Request" representing the +// client's request for the OutputService1TestCaseOperation1 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the OutputService1TestCaseOperation1 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the OutputService1TestCaseOperation1Request method. +// req, resp := client.OutputService1TestCaseOperation1Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *OutputService1ProtocolTest) OutputService1TestCaseOperation1Request(input *OutputService1TestShapeOutputService1TestCaseOperation1Input) (req *request.Request, output *OutputService1TestShapeOutputShape) { + op := &request.Operation{ + Name: opOutputService1TestCaseOperation1, + } + + if input == nil { + input = &OutputService1TestShapeOutputService1TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService1TestShapeOutputShape{} + req.Data = output + return +} + +func (c *OutputService1ProtocolTest) OutputService1TestCaseOperation1(input *OutputService1TestShapeOutputService1TestCaseOperation1Input) (*OutputService1TestShapeOutputShape, error) { + req, out := c.OutputService1TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +const opOutputService1TestCaseOperation2 = "OperationName" + +// OutputService1TestCaseOperation2Request generates a "aws/request.Request" representing the +// client's request for the OutputService1TestCaseOperation2 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the OutputService1TestCaseOperation2 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the OutputService1TestCaseOperation2Request method. +// req, resp := client.OutputService1TestCaseOperation2Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *OutputService1ProtocolTest) OutputService1TestCaseOperation2Request(input *OutputService1TestShapeOutputService1TestCaseOperation2Input) (req *request.Request, output *OutputService1TestShapeOutputShape) { + op := &request.Operation{ + Name: opOutputService1TestCaseOperation2, + } + + if input == nil { + input = &OutputService1TestShapeOutputService1TestCaseOperation2Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService1TestShapeOutputShape{} + req.Data = output + return +} + +func (c *OutputService1ProtocolTest) OutputService1TestCaseOperation2(input *OutputService1TestShapeOutputService1TestCaseOperation2Input) (*OutputService1TestShapeOutputShape, error) { + req, out := c.OutputService1TestCaseOperation2Request(input) + err := req.Send() + return out, err +} + +type OutputService1TestShapeOutputService1TestCaseOperation1Input struct { + _ struct{} `type:"structure"` +} + +type OutputService1TestShapeOutputService1TestCaseOperation2Input struct { + _ struct{} `type:"structure"` +} + +type OutputService1TestShapeOutputShape struct { + _ struct{} `type:"structure"` + + Char *string `type:"character"` + + Double *float64 `type:"double"` + + FalseBool *bool `type:"boolean"` + + Float *float64 `type:"float"` + + ImaHeader *string `location:"header" type:"string"` + + ImaHeaderLocation *string `location:"header" locationName:"X-Foo" type:"string"` + + Long *int64 `type:"long"` + + Num *int64 `locationName:"FooNum" type:"integer"` + + Str *string `type:"string"` + + Timestamp *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + TrueBool *bool `type:"boolean"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService2ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService2ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService2ProtocolTest client from just a session. +// svc := outputservice2protocoltest.New(mySession) +// +// // Create a OutputService2ProtocolTest client with additional configuration +// svc := outputservice2protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService2ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService2ProtocolTest { + c := p.ClientConfig("outputservice2protocoltest", cfgs...) + return newOutputService2ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService2ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService2ProtocolTest { + svc := &OutputService2ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice2protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(restxml.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restxml.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restxml.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restxml.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a OutputService2ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService2ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService2TestCaseOperation1 = "OperationName" + +// OutputService2TestCaseOperation1Request generates a "aws/request.Request" representing the +// client's request for the OutputService2TestCaseOperation1 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the OutputService2TestCaseOperation1 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the OutputService2TestCaseOperation1Request method. +// req, resp := client.OutputService2TestCaseOperation1Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *OutputService2ProtocolTest) OutputService2TestCaseOperation1Request(input *OutputService2TestShapeOutputService2TestCaseOperation1Input) (req *request.Request, output *OutputService2TestShapeOutputService2TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService2TestCaseOperation1, + } + + if input == nil { + input = &OutputService2TestShapeOutputService2TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService2TestShapeOutputService2TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService2ProtocolTest) OutputService2TestCaseOperation1(input *OutputService2TestShapeOutputService2TestCaseOperation1Input) (*OutputService2TestShapeOutputService2TestCaseOperation1Output, error) { + req, out := c.OutputService2TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService2TestShapeOutputService2TestCaseOperation1Input struct { + _ struct{} `type:"structure"` +} + +type OutputService2TestShapeOutputService2TestCaseOperation1Output struct { + _ struct{} `type:"structure"` + + // Blob is automatically base64 encoded/decoded by the SDK. + Blob []byte `type:"blob"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService3ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService3ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService3ProtocolTest client from just a session. +// svc := outputservice3protocoltest.New(mySession) +// +// // Create a OutputService3ProtocolTest client with additional configuration +// svc := outputservice3protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService3ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService3ProtocolTest { + c := p.ClientConfig("outputservice3protocoltest", cfgs...) + return newOutputService3ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService3ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService3ProtocolTest { + svc := &OutputService3ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice3protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(restxml.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restxml.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restxml.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restxml.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a OutputService3ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService3ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService3TestCaseOperation1 = "OperationName" + +// OutputService3TestCaseOperation1Request generates a "aws/request.Request" representing the +// client's request for the OutputService3TestCaseOperation1 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the OutputService3TestCaseOperation1 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the OutputService3TestCaseOperation1Request method. +// req, resp := client.OutputService3TestCaseOperation1Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *OutputService3ProtocolTest) OutputService3TestCaseOperation1Request(input *OutputService3TestShapeOutputService3TestCaseOperation1Input) (req *request.Request, output *OutputService3TestShapeOutputService3TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService3TestCaseOperation1, + } + + if input == nil { + input = &OutputService3TestShapeOutputService3TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService3TestShapeOutputService3TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService3ProtocolTest) OutputService3TestCaseOperation1(input *OutputService3TestShapeOutputService3TestCaseOperation1Input) (*OutputService3TestShapeOutputService3TestCaseOperation1Output, error) { + req, out := c.OutputService3TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService3TestShapeOutputService3TestCaseOperation1Input struct { + _ struct{} `type:"structure"` +} + +type OutputService3TestShapeOutputService3TestCaseOperation1Output struct { + _ struct{} `type:"structure"` + + ListMember []*string `type:"list"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService4ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService4ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService4ProtocolTest client from just a session. +// svc := outputservice4protocoltest.New(mySession) +// +// // Create a OutputService4ProtocolTest client with additional configuration +// svc := outputservice4protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService4ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService4ProtocolTest { + c := p.ClientConfig("outputservice4protocoltest", cfgs...) + return newOutputService4ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService4ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService4ProtocolTest { + svc := &OutputService4ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice4protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(restxml.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restxml.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restxml.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restxml.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a OutputService4ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService4ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService4TestCaseOperation1 = "OperationName" + +// OutputService4TestCaseOperation1Request generates a "aws/request.Request" representing the +// client's request for the OutputService4TestCaseOperation1 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the OutputService4TestCaseOperation1 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the OutputService4TestCaseOperation1Request method. +// req, resp := client.OutputService4TestCaseOperation1Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *OutputService4ProtocolTest) OutputService4TestCaseOperation1Request(input *OutputService4TestShapeOutputService4TestCaseOperation1Input) (req *request.Request, output *OutputService4TestShapeOutputService4TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService4TestCaseOperation1, + } + + if input == nil { + input = &OutputService4TestShapeOutputService4TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService4TestShapeOutputService4TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService4ProtocolTest) OutputService4TestCaseOperation1(input *OutputService4TestShapeOutputService4TestCaseOperation1Input) (*OutputService4TestShapeOutputService4TestCaseOperation1Output, error) { + req, out := c.OutputService4TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService4TestShapeOutputService4TestCaseOperation1Input struct { + _ struct{} `type:"structure"` +} + +type OutputService4TestShapeOutputService4TestCaseOperation1Output struct { + _ struct{} `type:"structure"` + + ListMember []*string `locationNameList:"item" type:"list"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService5ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService5ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService5ProtocolTest client from just a session. +// svc := outputservice5protocoltest.New(mySession) +// +// // Create a OutputService5ProtocolTest client with additional configuration +// svc := outputservice5protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService5ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService5ProtocolTest { + c := p.ClientConfig("outputservice5protocoltest", cfgs...) + return newOutputService5ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService5ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService5ProtocolTest { + svc := &OutputService5ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice5protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(restxml.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restxml.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restxml.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restxml.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a OutputService5ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService5ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService5TestCaseOperation1 = "OperationName" + +// OutputService5TestCaseOperation1Request generates a "aws/request.Request" representing the +// client's request for the OutputService5TestCaseOperation1 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the OutputService5TestCaseOperation1 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the OutputService5TestCaseOperation1Request method. +// req, resp := client.OutputService5TestCaseOperation1Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *OutputService5ProtocolTest) OutputService5TestCaseOperation1Request(input *OutputService5TestShapeOutputService5TestCaseOperation1Input) (req *request.Request, output *OutputService5TestShapeOutputService5TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService5TestCaseOperation1, + } + + if input == nil { + input = &OutputService5TestShapeOutputService5TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService5TestShapeOutputService5TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService5ProtocolTest) OutputService5TestCaseOperation1(input *OutputService5TestShapeOutputService5TestCaseOperation1Input) (*OutputService5TestShapeOutputService5TestCaseOperation1Output, error) { + req, out := c.OutputService5TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService5TestShapeOutputService5TestCaseOperation1Input struct { + _ struct{} `type:"structure"` +} + +type OutputService5TestShapeOutputService5TestCaseOperation1Output struct { + _ struct{} `type:"structure"` + + ListMember []*string `type:"list" flattened:"true"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService6ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService6ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService6ProtocolTest client from just a session. +// svc := outputservice6protocoltest.New(mySession) +// +// // Create a OutputService6ProtocolTest client with additional configuration +// svc := outputservice6protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService6ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService6ProtocolTest { + c := p.ClientConfig("outputservice6protocoltest", cfgs...) + return newOutputService6ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService6ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService6ProtocolTest { + svc := &OutputService6ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice6protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(restxml.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restxml.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restxml.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restxml.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a OutputService6ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService6ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService6TestCaseOperation1 = "OperationName" + +// OutputService6TestCaseOperation1Request generates a "aws/request.Request" representing the +// client's request for the OutputService6TestCaseOperation1 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the OutputService6TestCaseOperation1 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the OutputService6TestCaseOperation1Request method. +// req, resp := client.OutputService6TestCaseOperation1Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *OutputService6ProtocolTest) OutputService6TestCaseOperation1Request(input *OutputService6TestShapeOutputService6TestCaseOperation1Input) (req *request.Request, output *OutputService6TestShapeOutputService6TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService6TestCaseOperation1, + } + + if input == nil { + input = &OutputService6TestShapeOutputService6TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService6TestShapeOutputService6TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService6ProtocolTest) OutputService6TestCaseOperation1(input *OutputService6TestShapeOutputService6TestCaseOperation1Input) (*OutputService6TestShapeOutputService6TestCaseOperation1Output, error) { + req, out := c.OutputService6TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService6TestShapeOutputService6TestCaseOperation1Input struct { + _ struct{} `type:"structure"` +} + +type OutputService6TestShapeOutputService6TestCaseOperation1Output struct { + _ struct{} `type:"structure"` + + Map map[string]*OutputService6TestShapeSingleStructure `type:"map"` +} + +type OutputService6TestShapeSingleStructure struct { + _ struct{} `type:"structure"` + + Foo *string `locationName:"foo" type:"string"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService7ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService7ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService7ProtocolTest client from just a session. +// svc := outputservice7protocoltest.New(mySession) +// +// // Create a OutputService7ProtocolTest client with additional configuration +// svc := outputservice7protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService7ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService7ProtocolTest { + c := p.ClientConfig("outputservice7protocoltest", cfgs...) + return newOutputService7ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService7ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService7ProtocolTest { + svc := &OutputService7ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice7protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(restxml.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restxml.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restxml.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restxml.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a OutputService7ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService7ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService7TestCaseOperation1 = "OperationName" + +// OutputService7TestCaseOperation1Request generates a "aws/request.Request" representing the +// client's request for the OutputService7TestCaseOperation1 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the OutputService7TestCaseOperation1 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the OutputService7TestCaseOperation1Request method. +// req, resp := client.OutputService7TestCaseOperation1Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *OutputService7ProtocolTest) OutputService7TestCaseOperation1Request(input *OutputService7TestShapeOutputService7TestCaseOperation1Input) (req *request.Request, output *OutputService7TestShapeOutputService7TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService7TestCaseOperation1, + } + + if input == nil { + input = &OutputService7TestShapeOutputService7TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService7TestShapeOutputService7TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService7ProtocolTest) OutputService7TestCaseOperation1(input *OutputService7TestShapeOutputService7TestCaseOperation1Input) (*OutputService7TestShapeOutputService7TestCaseOperation1Output, error) { + req, out := c.OutputService7TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService7TestShapeOutputService7TestCaseOperation1Input struct { + _ struct{} `type:"structure"` +} + +type OutputService7TestShapeOutputService7TestCaseOperation1Output struct { + _ struct{} `type:"structure"` + + Map map[string]*string `type:"map" flattened:"true"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService8ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService8ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService8ProtocolTest client from just a session. +// svc := outputservice8protocoltest.New(mySession) +// +// // Create a OutputService8ProtocolTest client with additional configuration +// svc := outputservice8protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService8ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService8ProtocolTest { + c := p.ClientConfig("outputservice8protocoltest", cfgs...) + return newOutputService8ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService8ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService8ProtocolTest { + svc := &OutputService8ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice8protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(restxml.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restxml.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restxml.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restxml.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a OutputService8ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService8ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService8TestCaseOperation1 = "OperationName" + +// OutputService8TestCaseOperation1Request generates a "aws/request.Request" representing the +// client's request for the OutputService8TestCaseOperation1 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the OutputService8TestCaseOperation1 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the OutputService8TestCaseOperation1Request method. +// req, resp := client.OutputService8TestCaseOperation1Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *OutputService8ProtocolTest) OutputService8TestCaseOperation1Request(input *OutputService8TestShapeOutputService8TestCaseOperation1Input) (req *request.Request, output *OutputService8TestShapeOutputService8TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService8TestCaseOperation1, + } + + if input == nil { + input = &OutputService8TestShapeOutputService8TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService8TestShapeOutputService8TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService8ProtocolTest) OutputService8TestCaseOperation1(input *OutputService8TestShapeOutputService8TestCaseOperation1Input) (*OutputService8TestShapeOutputService8TestCaseOperation1Output, error) { + req, out := c.OutputService8TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService8TestShapeOutputService8TestCaseOperation1Input struct { + _ struct{} `type:"structure"` +} + +type OutputService8TestShapeOutputService8TestCaseOperation1Output struct { + _ struct{} `type:"structure"` + + Map map[string]*string `locationNameKey:"foo" locationNameValue:"bar" type:"map"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService9ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService9ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService9ProtocolTest client from just a session. +// svc := outputservice9protocoltest.New(mySession) +// +// // Create a OutputService9ProtocolTest client with additional configuration +// svc := outputservice9protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService9ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService9ProtocolTest { + c := p.ClientConfig("outputservice9protocoltest", cfgs...) + return newOutputService9ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService9ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService9ProtocolTest { + svc := &OutputService9ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice9protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(restxml.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restxml.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restxml.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restxml.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a OutputService9ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService9ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService9TestCaseOperation1 = "OperationName" + +// OutputService9TestCaseOperation1Request generates a "aws/request.Request" representing the +// client's request for the OutputService9TestCaseOperation1 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the OutputService9TestCaseOperation1 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the OutputService9TestCaseOperation1Request method. +// req, resp := client.OutputService9TestCaseOperation1Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *OutputService9ProtocolTest) OutputService9TestCaseOperation1Request(input *OutputService9TestShapeOutputService9TestCaseOperation1Input) (req *request.Request, output *OutputService9TestShapeOutputService9TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService9TestCaseOperation1, + } + + if input == nil { + input = &OutputService9TestShapeOutputService9TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService9TestShapeOutputService9TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService9ProtocolTest) OutputService9TestCaseOperation1(input *OutputService9TestShapeOutputService9TestCaseOperation1Input) (*OutputService9TestShapeOutputService9TestCaseOperation1Output, error) { + req, out := c.OutputService9TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService9TestShapeOutputService9TestCaseOperation1Input struct { + _ struct{} `type:"structure"` +} + +type OutputService9TestShapeOutputService9TestCaseOperation1Output struct { + _ struct{} `type:"structure" payload:"Data"` + + Data *OutputService9TestShapeSingleStructure `type:"structure"` + + Header *string `location:"header" locationName:"X-Foo" type:"string"` +} + +type OutputService9TestShapeSingleStructure struct { + _ struct{} `type:"structure"` + + Foo *string `type:"string"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService10ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService10ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService10ProtocolTest client from just a session. +// svc := outputservice10protocoltest.New(mySession) +// +// // Create a OutputService10ProtocolTest client with additional configuration +// svc := outputservice10protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService10ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService10ProtocolTest { + c := p.ClientConfig("outputservice10protocoltest", cfgs...) + return newOutputService10ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService10ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService10ProtocolTest { + svc := &OutputService10ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice10protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(restxml.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restxml.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restxml.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restxml.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a OutputService10ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService10ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService10TestCaseOperation1 = "OperationName" + +// OutputService10TestCaseOperation1Request generates a "aws/request.Request" representing the +// client's request for the OutputService10TestCaseOperation1 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the OutputService10TestCaseOperation1 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the OutputService10TestCaseOperation1Request method. +// req, resp := client.OutputService10TestCaseOperation1Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *OutputService10ProtocolTest) OutputService10TestCaseOperation1Request(input *OutputService10TestShapeOutputService10TestCaseOperation1Input) (req *request.Request, output *OutputService10TestShapeOutputService10TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService10TestCaseOperation1, + } + + if input == nil { + input = &OutputService10TestShapeOutputService10TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService10TestShapeOutputService10TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService10ProtocolTest) OutputService10TestCaseOperation1(input *OutputService10TestShapeOutputService10TestCaseOperation1Input) (*OutputService10TestShapeOutputService10TestCaseOperation1Output, error) { + req, out := c.OutputService10TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService10TestShapeOutputService10TestCaseOperation1Input struct { + _ struct{} `type:"structure"` +} + +type OutputService10TestShapeOutputService10TestCaseOperation1Output struct { + _ struct{} `type:"structure" payload:"Stream"` + + Stream []byte `type:"blob"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService11ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService11ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService11ProtocolTest client from just a session. +// svc := outputservice11protocoltest.New(mySession) +// +// // Create a OutputService11ProtocolTest client with additional configuration +// svc := outputservice11protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService11ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService11ProtocolTest { + c := p.ClientConfig("outputservice11protocoltest", cfgs...) + return newOutputService11ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService11ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService11ProtocolTest { + svc := &OutputService11ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice11protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(restxml.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restxml.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restxml.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restxml.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a OutputService11ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService11ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService11TestCaseOperation1 = "OperationName" + +// OutputService11TestCaseOperation1Request generates a "aws/request.Request" representing the +// client's request for the OutputService11TestCaseOperation1 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the OutputService11TestCaseOperation1 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the OutputService11TestCaseOperation1Request method. +// req, resp := client.OutputService11TestCaseOperation1Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *OutputService11ProtocolTest) OutputService11TestCaseOperation1Request(input *OutputService11TestShapeOutputService11TestCaseOperation1Input) (req *request.Request, output *OutputService11TestShapeOutputService11TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService11TestCaseOperation1, + } + + if input == nil { + input = &OutputService11TestShapeOutputService11TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService11TestShapeOutputService11TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService11ProtocolTest) OutputService11TestCaseOperation1(input *OutputService11TestShapeOutputService11TestCaseOperation1Input) (*OutputService11TestShapeOutputService11TestCaseOperation1Output, error) { + req, out := c.OutputService11TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService11TestShapeOutputService11TestCaseOperation1Input struct { + _ struct{} `type:"structure"` +} + +type OutputService11TestShapeOutputService11TestCaseOperation1Output struct { + _ struct{} `type:"structure"` + + Char *string `location:"header" locationName:"x-char" type:"character"` + + Double *float64 `location:"header" locationName:"x-double" type:"double"` + + FalseBool *bool `location:"header" locationName:"x-false-bool" type:"boolean"` + + Float *float64 `location:"header" locationName:"x-float" type:"float"` + + Integer *int64 `location:"header" locationName:"x-int" type:"integer"` + + Long *int64 `location:"header" locationName:"x-long" type:"long"` + + Str *string `location:"header" locationName:"x-str" type:"string"` + + Timestamp *time.Time `location:"header" locationName:"x-timestamp" type:"timestamp" timestampFormat:"iso8601"` + + TrueBool *bool `location:"header" locationName:"x-true-bool" type:"boolean"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService12ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService12ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService12ProtocolTest client from just a session. +// svc := outputservice12protocoltest.New(mySession) +// +// // Create a OutputService12ProtocolTest client with additional configuration +// svc := outputservice12protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService12ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService12ProtocolTest { + c := p.ClientConfig("outputservice12protocoltest", cfgs...) + return newOutputService12ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService12ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService12ProtocolTest { + svc := &OutputService12ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice12protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(restxml.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restxml.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restxml.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restxml.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a OutputService12ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService12ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService12TestCaseOperation1 = "OperationName" + +// OutputService12TestCaseOperation1Request generates a "aws/request.Request" representing the +// client's request for the OutputService12TestCaseOperation1 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the OutputService12TestCaseOperation1 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the OutputService12TestCaseOperation1Request method. +// req, resp := client.OutputService12TestCaseOperation1Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *OutputService12ProtocolTest) OutputService12TestCaseOperation1Request(input *OutputService12TestShapeOutputService12TestCaseOperation1Input) (req *request.Request, output *OutputService12TestShapeOutputService12TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService12TestCaseOperation1, + } + + if input == nil { + input = &OutputService12TestShapeOutputService12TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService12TestShapeOutputService12TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService12ProtocolTest) OutputService12TestCaseOperation1(input *OutputService12TestShapeOutputService12TestCaseOperation1Input) (*OutputService12TestShapeOutputService12TestCaseOperation1Output, error) { + req, out := c.OutputService12TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService12TestShapeOutputService12TestCaseOperation1Input struct { + _ struct{} `type:"structure"` +} + +type OutputService12TestShapeOutputService12TestCaseOperation1Output struct { + _ struct{} `type:"structure"` + + Foo *string `type:"string"` +} + +// +// Tests begin here +// + +func TestOutputService1ProtocolTestScalarMembersCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService1ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("myname123falsetrue1.21.3200a2015-01-25T08:00:00Z")) + req, out := svc.OutputService1TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + req.HTTPResponse.Header.Set("ImaHeader", "test") + req.HTTPResponse.Header.Set("X-Foo", "abc") + + // unmarshal response + restxml.UnmarshalMeta(req) + restxml.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "a", *out.Char) + assert.Equal(t, 1.3, *out.Double) + assert.Equal(t, false, *out.FalseBool) + assert.Equal(t, 1.2, *out.Float) + assert.Equal(t, "test", *out.ImaHeader) + assert.Equal(t, "abc", *out.ImaHeaderLocation) + assert.Equal(t, int64(200), *out.Long) + assert.Equal(t, int64(123), *out.Num) + assert.Equal(t, "myname", *out.Str) + assert.Equal(t, time.Unix(1.4221728e+09, 0).UTC().String(), out.Timestamp.String()) + assert.Equal(t, true, *out.TrueBool) + +} + +func TestOutputService1ProtocolTestScalarMembersCase2(t *testing.T) { + sess := session.New() + svc := NewOutputService1ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("123falsetrue1.21.3200a2015-01-25T08:00:00Z")) + req, out := svc.OutputService1TestCaseOperation2Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + req.HTTPResponse.Header.Set("ImaHeader", "test") + req.HTTPResponse.Header.Set("X-Foo", "abc") + + // unmarshal response + restxml.UnmarshalMeta(req) + restxml.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "a", *out.Char) + assert.Equal(t, 1.3, *out.Double) + assert.Equal(t, false, *out.FalseBool) + assert.Equal(t, 1.2, *out.Float) + assert.Equal(t, "test", *out.ImaHeader) + assert.Equal(t, "abc", *out.ImaHeaderLocation) + assert.Equal(t, int64(200), *out.Long) + assert.Equal(t, int64(123), *out.Num) + assert.Equal(t, "", *out.Str) + assert.Equal(t, time.Unix(1.4221728e+09, 0).UTC().String(), out.Timestamp.String()) + assert.Equal(t, true, *out.TrueBool) + +} + +func TestOutputService2ProtocolTestBlobCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService2ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("dmFsdWU=")) + req, out := svc.OutputService2TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + restxml.UnmarshalMeta(req) + restxml.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "value", string(out.Blob)) + +} + +func TestOutputService3ProtocolTestListsCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService3ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("abc123")) + req, out := svc.OutputService3TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + restxml.UnmarshalMeta(req) + restxml.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "abc", *out.ListMember[0]) + assert.Equal(t, "123", *out.ListMember[1]) + +} + +func TestOutputService4ProtocolTestListWithCustomMemberNameCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService4ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("abc123")) + req, out := svc.OutputService4TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + restxml.UnmarshalMeta(req) + restxml.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "abc", *out.ListMember[0]) + assert.Equal(t, "123", *out.ListMember[1]) + +} + +func TestOutputService5ProtocolTestFlattenedListCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService5ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("abc123")) + req, out := svc.OutputService5TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + restxml.UnmarshalMeta(req) + restxml.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "abc", *out.ListMember[0]) + assert.Equal(t, "123", *out.ListMember[1]) + +} + +func TestOutputService6ProtocolTestNormalMapCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService6ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("quxbarbazbam")) + req, out := svc.OutputService6TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + restxml.UnmarshalMeta(req) + restxml.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "bam", *out.Map["baz"].Foo) + assert.Equal(t, "bar", *out.Map["qux"].Foo) + +} + +func TestOutputService7ProtocolTestFlattenedMapCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService7ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("quxbarbazbam")) + req, out := svc.OutputService7TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + restxml.UnmarshalMeta(req) + restxml.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "bam", *out.Map["baz"]) + assert.Equal(t, "bar", *out.Map["qux"]) + +} + +func TestOutputService8ProtocolTestNamedMapCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService8ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("quxbarbazbam")) + req, out := svc.OutputService8TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + restxml.UnmarshalMeta(req) + restxml.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "bam", *out.Map["baz"]) + assert.Equal(t, "bar", *out.Map["qux"]) + +} + +func TestOutputService9ProtocolTestXMLPayloadCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService9ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("abc")) + req, out := svc.OutputService9TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + req.HTTPResponse.Header.Set("X-Foo", "baz") + + // unmarshal response + restxml.UnmarshalMeta(req) + restxml.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "abc", *out.Data.Foo) + assert.Equal(t, "baz", *out.Header) + +} + +func TestOutputService10ProtocolTestStreamingPayloadCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService10ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("abc")) + req, out := svc.OutputService10TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + restxml.UnmarshalMeta(req) + restxml.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "abc", string(out.Stream)) + +} + +func TestOutputService11ProtocolTestScalarMembersInHeadersCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService11ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("")) + req, out := svc.OutputService11TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + req.HTTPResponse.Header.Set("x-char", "a") + req.HTTPResponse.Header.Set("x-double", "1.5") + req.HTTPResponse.Header.Set("x-false-bool", "false") + req.HTTPResponse.Header.Set("x-float", "1.5") + req.HTTPResponse.Header.Set("x-int", "1") + req.HTTPResponse.Header.Set("x-long", "100") + req.HTTPResponse.Header.Set("x-str", "string") + req.HTTPResponse.Header.Set("x-timestamp", "Sun, 25 Jan 2015 08:00:00 GMT") + req.HTTPResponse.Header.Set("x-true-bool", "true") + + // unmarshal response + restxml.UnmarshalMeta(req) + restxml.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "a", *out.Char) + assert.Equal(t, 1.5, *out.Double) + assert.Equal(t, false, *out.FalseBool) + assert.Equal(t, 1.5, *out.Float) + assert.Equal(t, int64(1), *out.Integer) + assert.Equal(t, int64(100), *out.Long) + assert.Equal(t, "string", *out.Str) + assert.Equal(t, time.Unix(1.4221728e+09, 0).UTC().String(), out.Timestamp.String()) + assert.Equal(t, true, *out.TrueBool) + +} + +func TestOutputService12ProtocolTestEmptyStringCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService12ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("requestid")) + req, out := svc.OutputService12TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + restxml.UnmarshalMeta(req) + restxml.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "", *out.Foo) + +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal.go new file mode 100644 index 000000000..da1a68111 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal.go @@ -0,0 +1,21 @@ +package protocol + +import ( + "io" + "io/ioutil" + + "github.com/aws/aws-sdk-go/aws/request" +) + +// UnmarshalDiscardBodyHandler is a named request handler to empty and close a response's body +var UnmarshalDiscardBodyHandler = request.NamedHandler{Name: "awssdk.shared.UnmarshalDiscardBody", Fn: UnmarshalDiscardBody} + +// UnmarshalDiscardBody is a request handler to empty a response's body and closing it. +func UnmarshalDiscardBody(r *request.Request) { + if r.HTTPResponse == nil || r.HTTPResponse.Body == nil { + return + } + + io.Copy(ioutil.Discard, r.HTTPResponse.Body) + r.HTTPResponse.Body.Close() +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal_test.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal_test.go new file mode 100644 index 000000000..2733e993d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal_test.go @@ -0,0 +1,40 @@ +package protocol_test + +import ( + "net/http" + "strings" + "testing" + + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/stretchr/testify/assert" +) + +type mockCloser struct { + *strings.Reader + Closed bool +} + +func (m *mockCloser) Close() error { + m.Closed = true + return nil +} + +func TestUnmarshalDrainBody(t *testing.T) { + b := &mockCloser{Reader: strings.NewReader("example body")} + r := &request.Request{HTTPResponse: &http.Response{ + Body: b, + }} + + protocol.UnmarshalDiscardBody(r) + assert.NoError(t, r.Error) + assert.Equal(t, 0, b.Len()) + assert.True(t, b.Closed) +} + +func TestUnmarshalDrainBodyNoBody(t *testing.T) { + r := &request.Request{HTTPResponse: &http.Response{}} + + protocol.UnmarshalDiscardBody(r) + assert.NoError(t, r.Error) +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go new file mode 100644 index 000000000..ceb4132c5 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go @@ -0,0 +1,293 @@ +// Package xmlutil provides XML serialisation of AWS requests and responses. +package xmlutil + +import ( + "encoding/base64" + "encoding/xml" + "fmt" + "reflect" + "sort" + "strconv" + "time" + + "github.com/aws/aws-sdk-go/private/protocol" +) + +// BuildXML will serialize params into an xml.Encoder. +// Error will be returned if the serialization of any of the params or nested values fails. +func BuildXML(params interface{}, e *xml.Encoder) error { + b := xmlBuilder{encoder: e, namespaces: map[string]string{}} + root := NewXMLElement(xml.Name{}) + if err := b.buildValue(reflect.ValueOf(params), root, ""); err != nil { + return err + } + for _, c := range root.Children { + for _, v := range c { + return StructToXML(e, v, false) + } + } + return nil +} + +// Returns the reflection element of a value, if it is a pointer. +func elemOf(value reflect.Value) reflect.Value { + for value.Kind() == reflect.Ptr { + value = value.Elem() + } + return value +} + +// A xmlBuilder serializes values from Go code to XML +type xmlBuilder struct { + encoder *xml.Encoder + namespaces map[string]string +} + +// buildValue generic XMLNode builder for any type. Will build value for their specific type +// struct, list, map, scalar. +// +// Also takes a "type" tag value to set what type a value should be converted to XMLNode as. If +// type is not provided reflect will be used to determine the value's type. +func (b *xmlBuilder) buildValue(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { + value = elemOf(value) + if !value.IsValid() { // no need to handle zero values + return nil + } else if tag.Get("location") != "" { // don't handle non-body location values + return nil + } + + t := tag.Get("type") + if t == "" { + switch value.Kind() { + case reflect.Struct: + t = "structure" + case reflect.Slice: + t = "list" + case reflect.Map: + t = "map" + } + } + + switch t { + case "structure": + if field, ok := value.Type().FieldByName("_"); ok { + tag = tag + reflect.StructTag(" ") + field.Tag + } + return b.buildStruct(value, current, tag) + case "list": + return b.buildList(value, current, tag) + case "map": + return b.buildMap(value, current, tag) + default: + return b.buildScalar(value, current, tag) + } +} + +// buildStruct adds a struct and its fields to the current XMLNode. All fields any any nested +// types are converted to XMLNodes also. +func (b *xmlBuilder) buildStruct(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { + if !value.IsValid() { + return nil + } + + fieldAdded := false + + // unwrap payloads + if payload := tag.Get("payload"); payload != "" { + field, _ := value.Type().FieldByName(payload) + tag = field.Tag + value = elemOf(value.FieldByName(payload)) + + if !value.IsValid() { + return nil + } + } + + child := NewXMLElement(xml.Name{Local: tag.Get("locationName")}) + + // there is an xmlNamespace associated with this struct + if prefix, uri := tag.Get("xmlPrefix"), tag.Get("xmlURI"); uri != "" { + ns := xml.Attr{ + Name: xml.Name{Local: "xmlns"}, + Value: uri, + } + if prefix != "" { + b.namespaces[prefix] = uri // register the namespace + ns.Name.Local = "xmlns:" + prefix + } + + child.Attr = append(child.Attr, ns) + } + + t := value.Type() + for i := 0; i < value.NumField(); i++ { + member := elemOf(value.Field(i)) + field := t.Field(i) + + if field.PkgPath != "" { + continue // ignore unexported fields + } + + mTag := field.Tag + if mTag.Get("location") != "" { // skip non-body members + continue + } + + if protocol.CanSetIdempotencyToken(value.Field(i), field) { + token := protocol.GetIdempotencyToken() + member = reflect.ValueOf(token) + } + + memberName := mTag.Get("locationName") + if memberName == "" { + memberName = field.Name + mTag = reflect.StructTag(string(mTag) + ` locationName:"` + memberName + `"`) + } + if err := b.buildValue(member, child, mTag); err != nil { + return err + } + + fieldAdded = true + } + + if fieldAdded { // only append this child if we have one ore more valid members + current.AddChild(child) + } + + return nil +} + +// buildList adds the value's list items to the current XMLNode as children nodes. All +// nested values in the list are converted to XMLNodes also. +func (b *xmlBuilder) buildList(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { + if value.IsNil() { // don't build omitted lists + return nil + } + + // check for unflattened list member + flattened := tag.Get("flattened") != "" + + xname := xml.Name{Local: tag.Get("locationName")} + if flattened { + for i := 0; i < value.Len(); i++ { + child := NewXMLElement(xname) + current.AddChild(child) + if err := b.buildValue(value.Index(i), child, ""); err != nil { + return err + } + } + } else { + list := NewXMLElement(xname) + current.AddChild(list) + + for i := 0; i < value.Len(); i++ { + iname := tag.Get("locationNameList") + if iname == "" { + iname = "member" + } + + child := NewXMLElement(xml.Name{Local: iname}) + list.AddChild(child) + if err := b.buildValue(value.Index(i), child, ""); err != nil { + return err + } + } + } + + return nil +} + +// buildMap adds the value's key/value pairs to the current XMLNode as children nodes. All +// nested values in the map are converted to XMLNodes also. +// +// Error will be returned if it is unable to build the map's values into XMLNodes +func (b *xmlBuilder) buildMap(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { + if value.IsNil() { // don't build omitted maps + return nil + } + + maproot := NewXMLElement(xml.Name{Local: tag.Get("locationName")}) + current.AddChild(maproot) + current = maproot + + kname, vname := "key", "value" + if n := tag.Get("locationNameKey"); n != "" { + kname = n + } + if n := tag.Get("locationNameValue"); n != "" { + vname = n + } + + // sorting is not required for compliance, but it makes testing easier + keys := make([]string, value.Len()) + for i, k := range value.MapKeys() { + keys[i] = k.String() + } + sort.Strings(keys) + + for _, k := range keys { + v := value.MapIndex(reflect.ValueOf(k)) + + mapcur := current + if tag.Get("flattened") == "" { // add "entry" tag to non-flat maps + child := NewXMLElement(xml.Name{Local: "entry"}) + mapcur.AddChild(child) + mapcur = child + } + + kchild := NewXMLElement(xml.Name{Local: kname}) + kchild.Text = k + vchild := NewXMLElement(xml.Name{Local: vname}) + mapcur.AddChild(kchild) + mapcur.AddChild(vchild) + + if err := b.buildValue(v, vchild, ""); err != nil { + return err + } + } + + return nil +} + +// buildScalar will convert the value into a string and append it as a attribute or child +// of the current XMLNode. +// +// The value will be added as an attribute if tag contains a "xmlAttribute" attribute value. +// +// Error will be returned if the value type is unsupported. +func (b *xmlBuilder) buildScalar(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { + var str string + switch converted := value.Interface().(type) { + case string: + str = converted + case []byte: + if !value.IsNil() { + str = base64.StdEncoding.EncodeToString(converted) + } + case bool: + str = strconv.FormatBool(converted) + case int64: + str = strconv.FormatInt(converted, 10) + case int: + str = strconv.Itoa(converted) + case float64: + str = strconv.FormatFloat(converted, 'f', -1, 64) + case float32: + str = strconv.FormatFloat(float64(converted), 'f', -1, 32) + case time.Time: + const ISO8601UTC = "2006-01-02T15:04:05Z" + str = converted.UTC().Format(ISO8601UTC) + default: + return fmt.Errorf("unsupported value for param %s: %v (%s)", + tag.Get("locationName"), value.Interface(), value.Type().Name()) + } + + xname := xml.Name{Local: tag.Get("locationName")} + if tag.Get("xmlAttribute") != "" { // put into current node's attribute list + attr := xml.Attr{Name: xname, Value: str} + current.Attr = append(current.Attr, attr) + } else { // regular text node + current.AddChild(&XMLNode{Name: xname, Text: str}) + } + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go new file mode 100644 index 000000000..49f291a85 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go @@ -0,0 +1,260 @@ +package xmlutil + +import ( + "encoding/base64" + "encoding/xml" + "fmt" + "io" + "reflect" + "strconv" + "strings" + "time" +) + +// UnmarshalXML deserializes an xml.Decoder into the container v. V +// needs to match the shape of the XML expected to be decoded. +// If the shape doesn't match unmarshaling will fail. +func UnmarshalXML(v interface{}, d *xml.Decoder, wrapper string) error { + n, _ := XMLToStruct(d, nil) + if n.Children != nil { + for _, root := range n.Children { + for _, c := range root { + if wrappedChild, ok := c.Children[wrapper]; ok { + c = wrappedChild[0] // pull out wrapped element + } + + err := parse(reflect.ValueOf(v), c, "") + if err != nil { + if err == io.EOF { + return nil + } + return err + } + } + } + return nil + } + return nil +} + +// parse deserializes any value from the XMLNode. The type tag is used to infer the type, or reflect +// will be used to determine the type from r. +func parse(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { + rtype := r.Type() + if rtype.Kind() == reflect.Ptr { + rtype = rtype.Elem() // check kind of actual element type + } + + t := tag.Get("type") + if t == "" { + switch rtype.Kind() { + case reflect.Struct: + t = "structure" + case reflect.Slice: + t = "list" + case reflect.Map: + t = "map" + } + } + + switch t { + case "structure": + if field, ok := rtype.FieldByName("_"); ok { + tag = field.Tag + } + return parseStruct(r, node, tag) + case "list": + return parseList(r, node, tag) + case "map": + return parseMap(r, node, tag) + default: + return parseScalar(r, node, tag) + } +} + +// parseStruct deserializes a structure and its fields from an XMLNode. Any nested +// types in the structure will also be deserialized. +func parseStruct(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { + t := r.Type() + if r.Kind() == reflect.Ptr { + if r.IsNil() { // create the structure if it's nil + s := reflect.New(r.Type().Elem()) + r.Set(s) + r = s + } + + r = r.Elem() + t = t.Elem() + } + + // unwrap any payloads + if payload := tag.Get("payload"); payload != "" { + field, _ := t.FieldByName(payload) + return parseStruct(r.FieldByName(payload), node, field.Tag) + } + + for i := 0; i < t.NumField(); i++ { + field := t.Field(i) + if c := field.Name[0:1]; strings.ToLower(c) == c { + continue // ignore unexported fields + } + + // figure out what this field is called + name := field.Name + if field.Tag.Get("flattened") != "" && field.Tag.Get("locationNameList") != "" { + name = field.Tag.Get("locationNameList") + } else if locName := field.Tag.Get("locationName"); locName != "" { + name = locName + } + + // try to find the field by name in elements + elems := node.Children[name] + + if elems == nil { // try to find the field in attributes + for _, a := range node.Attr { + if name == a.Name.Local { + // turn this into a text node for de-serializing + elems = []*XMLNode{{Text: a.Value}} + } + } + } + + member := r.FieldByName(field.Name) + for _, elem := range elems { + err := parse(member, elem, field.Tag) + if err != nil { + return err + } + } + } + return nil +} + +// parseList deserializes a list of values from an XML node. Each list entry +// will also be deserialized. +func parseList(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { + t := r.Type() + + if tag.Get("flattened") == "" { // look at all item entries + mname := "member" + if name := tag.Get("locationNameList"); name != "" { + mname = name + } + + if Children, ok := node.Children[mname]; ok { + if r.IsNil() { + r.Set(reflect.MakeSlice(t, len(Children), len(Children))) + } + + for i, c := range Children { + err := parse(r.Index(i), c, "") + if err != nil { + return err + } + } + } + } else { // flattened list means this is a single element + if r.IsNil() { + r.Set(reflect.MakeSlice(t, 0, 0)) + } + + childR := reflect.Zero(t.Elem()) + r.Set(reflect.Append(r, childR)) + err := parse(r.Index(r.Len()-1), node, "") + if err != nil { + return err + } + } + + return nil +} + +// parseMap deserializes a map from an XMLNode. The direct children of the XMLNode +// will also be deserialized as map entries. +func parseMap(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { + if r.IsNil() { + r.Set(reflect.MakeMap(r.Type())) + } + + if tag.Get("flattened") == "" { // look at all child entries + for _, entry := range node.Children["entry"] { + parseMapEntry(r, entry, tag) + } + } else { // this element is itself an entry + parseMapEntry(r, node, tag) + } + + return nil +} + +// parseMapEntry deserializes a map entry from a XML node. +func parseMapEntry(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { + kname, vname := "key", "value" + if n := tag.Get("locationNameKey"); n != "" { + kname = n + } + if n := tag.Get("locationNameValue"); n != "" { + vname = n + } + + keys, ok := node.Children[kname] + values := node.Children[vname] + if ok { + for i, key := range keys { + keyR := reflect.ValueOf(key.Text) + value := values[i] + valueR := reflect.New(r.Type().Elem()).Elem() + + parse(valueR, value, "") + r.SetMapIndex(keyR, valueR) + } + } + return nil +} + +// parseScaller deserializes an XMLNode value into a concrete type based on the +// interface type of r. +// +// Error is returned if the deserialization fails due to invalid type conversion, +// or unsupported interface type. +func parseScalar(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { + switch r.Interface().(type) { + case *string: + r.Set(reflect.ValueOf(&node.Text)) + return nil + case []byte: + b, err := base64.StdEncoding.DecodeString(node.Text) + if err != nil { + return err + } + r.Set(reflect.ValueOf(b)) + case *bool: + v, err := strconv.ParseBool(node.Text) + if err != nil { + return err + } + r.Set(reflect.ValueOf(&v)) + case *int64: + v, err := strconv.ParseInt(node.Text, 10, 64) + if err != nil { + return err + } + r.Set(reflect.ValueOf(&v)) + case *float64: + v, err := strconv.ParseFloat(node.Text, 64) + if err != nil { + return err + } + r.Set(reflect.ValueOf(&v)) + case *time.Time: + const ISO8601UTC = "2006-01-02T15:04:05Z" + t, err := time.Parse(ISO8601UTC, node.Text) + if err != nil { + return err + } + r.Set(reflect.ValueOf(&t)) + default: + return fmt.Errorf("unsupported value: %v (%s)", r.Interface(), r.Type()) + } + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go new file mode 100644 index 000000000..72c198a9d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go @@ -0,0 +1,105 @@ +package xmlutil + +import ( + "encoding/xml" + "io" + "sort" +) + +// A XMLNode contains the values to be encoded or decoded. +type XMLNode struct { + Name xml.Name `json:",omitempty"` + Children map[string][]*XMLNode `json:",omitempty"` + Text string `json:",omitempty"` + Attr []xml.Attr `json:",omitempty"` +} + +// NewXMLElement returns a pointer to a new XMLNode initialized to default values. +func NewXMLElement(name xml.Name) *XMLNode { + return &XMLNode{ + Name: name, + Children: map[string][]*XMLNode{}, + Attr: []xml.Attr{}, + } +} + +// AddChild adds child to the XMLNode. +func (n *XMLNode) AddChild(child *XMLNode) { + if _, ok := n.Children[child.Name.Local]; !ok { + n.Children[child.Name.Local] = []*XMLNode{} + } + n.Children[child.Name.Local] = append(n.Children[child.Name.Local], child) +} + +// XMLToStruct converts a xml.Decoder stream to XMLNode with nested values. +func XMLToStruct(d *xml.Decoder, s *xml.StartElement) (*XMLNode, error) { + out := &XMLNode{} + for { + tok, err := d.Token() + if tok == nil || err == io.EOF { + break + } + if err != nil { + return out, err + } + + switch typed := tok.(type) { + case xml.CharData: + out.Text = string(typed.Copy()) + case xml.StartElement: + el := typed.Copy() + out.Attr = el.Attr + if out.Children == nil { + out.Children = map[string][]*XMLNode{} + } + + name := typed.Name.Local + slice := out.Children[name] + if slice == nil { + slice = []*XMLNode{} + } + node, e := XMLToStruct(d, &el) + if e != nil { + return out, e + } + node.Name = typed.Name + slice = append(slice, node) + out.Children[name] = slice + case xml.EndElement: + if s != nil && s.Name.Local == typed.Name.Local { // matching end token + return out, nil + } + } + } + return out, nil +} + +// StructToXML writes an XMLNode to a xml.Encoder as tokens. +func StructToXML(e *xml.Encoder, node *XMLNode, sorted bool) error { + e.EncodeToken(xml.StartElement{Name: node.Name, Attr: node.Attr}) + + if node.Text != "" { + e.EncodeToken(xml.CharData([]byte(node.Text))) + } else if sorted { + sortedNames := []string{} + for k := range node.Children { + sortedNames = append(sortedNames, k) + } + sort.Strings(sortedNames) + + for _, k := range sortedNames { + for _, v := range node.Children[k] { + StructToXML(e, v, sorted) + } + } + } else { + for _, c := range node.Children { + for _, v := range c { + StructToXML(e, v, sorted) + } + } + } + + e.EncodeToken(xml.EndElement{Name: node.Name}) + return e.Flush() +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/signer/v2/v2.go b/vendor/github.com/aws/aws-sdk-go/private/signer/v2/v2.go new file mode 100644 index 000000000..88c3a2f61 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/signer/v2/v2.go @@ -0,0 +1,180 @@ +package v2 + +import ( + "crypto/hmac" + "crypto/sha256" + "encoding/base64" + "errors" + "fmt" + "net/http" + "net/url" + "sort" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/request" +) + +var ( + errInvalidMethod = errors.New("v2 signer only handles HTTP POST") +) + +const ( + signatureVersion = "2" + signatureMethod = "HmacSHA256" + timeFormat = "2006-01-02T15:04:05Z" +) + +type signer struct { + // Values that must be populated from the request + Request *http.Request + Time time.Time + Credentials *credentials.Credentials + Debug aws.LogLevelType + Logger aws.Logger + + Query url.Values + stringToSign string + signature string +} + +// SignRequestHandler is a named request handler the SDK will use to sign +// service client request with using the V4 signature. +var SignRequestHandler = request.NamedHandler{ + Name: "v2.SignRequestHandler", Fn: SignSDKRequest, +} + +// SignSDKRequest requests with signature version 2. +// +// Will sign the requests with the service config's Credentials object +// Signing is skipped if the credentials is the credentials.AnonymousCredentials +// object. +func SignSDKRequest(req *request.Request) { + // If the request does not need to be signed ignore the signing of the + // request if the AnonymousCredentials object is used. + if req.Config.Credentials == credentials.AnonymousCredentials { + return + } + + if req.HTTPRequest.Method != "POST" && req.HTTPRequest.Method != "GET" { + // The V2 signer only supports GET and POST + req.Error = errInvalidMethod + return + } + + v2 := signer{ + Request: req.HTTPRequest, + Time: req.Time, + Credentials: req.Config.Credentials, + Debug: req.Config.LogLevel.Value(), + Logger: req.Config.Logger, + } + + req.Error = v2.Sign() + + if req.Error != nil { + return + } + + if req.HTTPRequest.Method == "POST" { + // Set the body of the request based on the modified query parameters + req.SetStringBody(v2.Query.Encode()) + + // Now that the body has changed, remove any Content-Length header, + // because it will be incorrect + req.HTTPRequest.ContentLength = 0 + req.HTTPRequest.Header.Del("Content-Length") + } else { + req.HTTPRequest.URL.RawQuery = v2.Query.Encode() + } +} + +func (v2 *signer) Sign() error { + credValue, err := v2.Credentials.Get() + if err != nil { + return err + } + + if v2.Request.Method == "POST" { + // Parse the HTTP request to obtain the query parameters that will + // be used to build the string to sign. Note that because the HTTP + // request will need to be modified, the PostForm and Form properties + // are reset to nil after parsing. + v2.Request.ParseForm() + v2.Query = v2.Request.PostForm + v2.Request.PostForm = nil + v2.Request.Form = nil + } else { + v2.Query = v2.Request.URL.Query() + } + + // Set new query parameters + v2.Query.Set("AWSAccessKeyId", credValue.AccessKeyID) + v2.Query.Set("SignatureVersion", signatureVersion) + v2.Query.Set("SignatureMethod", signatureMethod) + v2.Query.Set("Timestamp", v2.Time.UTC().Format(timeFormat)) + if credValue.SessionToken != "" { + v2.Query.Set("SecurityToken", credValue.SessionToken) + } + + // in case this is a retry, ensure no signature present + v2.Query.Del("Signature") + + method := v2.Request.Method + host := v2.Request.URL.Host + path := v2.Request.URL.Path + if path == "" { + path = "/" + } + + // obtain all of the query keys and sort them + queryKeys := make([]string, 0, len(v2.Query)) + for key := range v2.Query { + queryKeys = append(queryKeys, key) + } + sort.Strings(queryKeys) + + // build URL-encoded query keys and values + queryKeysAndValues := make([]string, len(queryKeys)) + for i, key := range queryKeys { + k := strings.Replace(url.QueryEscape(key), "+", "%20", -1) + v := strings.Replace(url.QueryEscape(v2.Query.Get(key)), "+", "%20", -1) + queryKeysAndValues[i] = k + "=" + v + } + + // join into one query string + query := strings.Join(queryKeysAndValues, "&") + + // build the canonical string for the V2 signature + v2.stringToSign = strings.Join([]string{ + method, + host, + path, + query, + }, "\n") + + hash := hmac.New(sha256.New, []byte(credValue.SecretAccessKey)) + hash.Write([]byte(v2.stringToSign)) + v2.signature = base64.StdEncoding.EncodeToString(hash.Sum(nil)) + v2.Query.Set("Signature", v2.signature) + + if v2.Debug.Matches(aws.LogDebugWithSigning) { + v2.logSigningInfo() + } + + return nil +} + +const logSignInfoMsg = `DEBUG: Request Signature: +---[ STRING TO SIGN ]-------------------------------- +%s +---[ SIGNATURE ]------------------------------------- +%s +-----------------------------------------------------` + +func (v2 *signer) logSigningInfo() { + msg := fmt.Sprintf(logSignInfoMsg, v2.stringToSign, v2.Query.Get("Signature")) + v2.Logger.Log(msg) +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/signer/v2/v2_test.go b/vendor/github.com/aws/aws-sdk-go/private/signer/v2/v2_test.go new file mode 100644 index 000000000..1e8e04b26 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/signer/v2/v2_test.go @@ -0,0 +1,195 @@ +package v2 + +import ( + "bytes" + "net/http" + "net/url" + "os" + "testing" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/awstesting" + "github.com/stretchr/testify/assert" +) + +type signerBuilder struct { + ServiceName string + Region string + SignTime time.Time + Query url.Values + Method string + SessionToken string +} + +func (sb signerBuilder) BuildSigner() signer { + endpoint := "https://" + sb.ServiceName + "." + sb.Region + ".amazonaws.com" + var req *http.Request + if sb.Method == "POST" { + body := []byte(sb.Query.Encode()) + reader := bytes.NewReader(body) + req, _ = http.NewRequest(sb.Method, endpoint, reader) + req.Header.Add("Content-Type", "application/x-www-form-urlencoded") + req.Header.Add("Content-Length", string(len(body))) + } else { + req, _ = http.NewRequest(sb.Method, endpoint, nil) + req.URL.RawQuery = sb.Query.Encode() + } + + sig := signer{ + Request: req, + Time: sb.SignTime, + Credentials: credentials.NewStaticCredentials( + "AKID", + "SECRET", + sb.SessionToken), + } + + if os.Getenv("DEBUG") != "" { + sig.Debug = aws.LogDebug + sig.Logger = aws.NewDefaultLogger() + } + + return sig +} + +func TestSignRequestWithAndWithoutSession(t *testing.T) { + assert := assert.New(t) + + // have to create more than once, so use a function + newQuery := func() url.Values { + query := make(url.Values) + query.Add("Action", "CreateDomain") + query.Add("DomainName", "TestDomain-1437033376") + query.Add("Version", "2009-04-15") + return query + } + + // create request without a SecurityToken (session) in the credentials + + query := newQuery() + timestamp := time.Date(2015, 7, 16, 7, 56, 16, 0, time.UTC) + builder := signerBuilder{ + Method: "POST", + ServiceName: "sdb", + Region: "ap-southeast-2", + SignTime: timestamp, + Query: query, + } + + signer := builder.BuildSigner() + + err := signer.Sign() + assert.NoError(err) + assert.Equal("tm4dX8Ks7pzFSVHz7qHdoJVXKRLuC4gWz9eti60d8ks=", signer.signature) + assert.Equal(8, len(signer.Query)) + assert.Equal("AKID", signer.Query.Get("AWSAccessKeyId")) + assert.Equal("2015-07-16T07:56:16Z", signer.Query.Get("Timestamp")) + assert.Equal("HmacSHA256", signer.Query.Get("SignatureMethod")) + assert.Equal("2", signer.Query.Get("SignatureVersion")) + assert.Equal("tm4dX8Ks7pzFSVHz7qHdoJVXKRLuC4gWz9eti60d8ks=", signer.Query.Get("Signature")) + assert.Equal("CreateDomain", signer.Query.Get("Action")) + assert.Equal("TestDomain-1437033376", signer.Query.Get("DomainName")) + assert.Equal("2009-04-15", signer.Query.Get("Version")) + + // should not have a SecurityToken parameter + _, ok := signer.Query["SecurityToken"] + assert.False(ok) + + // now sign again, this time with a security token (session) + + query = newQuery() + builder.SessionToken = "SESSION" + signer = builder.BuildSigner() + + err = signer.Sign() + assert.NoError(err) + assert.Equal("Ch6qv3rzXB1SLqY2vFhsgA1WQ9rnQIE2WJCigOvAJwI=", signer.signature) + assert.Equal(9, len(signer.Query)) // expect one more parameter + assert.Equal("Ch6qv3rzXB1SLqY2vFhsgA1WQ9rnQIE2WJCigOvAJwI=", signer.Query.Get("Signature")) + assert.Equal("SESSION", signer.Query.Get("SecurityToken")) +} + +func TestMoreComplexSignRequest(t *testing.T) { + assert := assert.New(t) + query := make(url.Values) + query.Add("Action", "PutAttributes") + query.Add("DomainName", "TestDomain-1437041569") + query.Add("Version", "2009-04-15") + query.Add("Attribute.2.Name", "Attr2") + query.Add("Attribute.2.Value", "Value2") + query.Add("Attribute.2.Replace", "true") + query.Add("Attribute.1.Name", "Attr1-%\\+ %") + query.Add("Attribute.1.Value", " \tValue1 +!@#$%^&*(){}[]\"';:?/.>,<\x12\x00") + query.Add("Attribute.1.Replace", "true") + query.Add("ItemName", "Item 1") + + timestamp := time.Date(2015, 7, 16, 10, 12, 51, 0, time.UTC) + builder := signerBuilder{ + Method: "POST", + ServiceName: "sdb", + Region: "ap-southeast-2", + SignTime: timestamp, + Query: query, + SessionToken: "SESSION", + } + + signer := builder.BuildSigner() + + err := signer.Sign() + assert.NoError(err) + assert.Equal("WNdE62UJKLKoA6XncVY/9RDbrKmcVMdQPQOTAs8SgwQ=", signer.signature) +} + +func TestGet(t *testing.T) { + assert := assert.New(t) + svc := awstesting.NewClient(&aws.Config{ + Credentials: credentials.NewStaticCredentials("AKID", "SECRET", "SESSION"), + Region: aws.String("ap-southeast-2"), + }) + r := svc.NewRequest( + &request.Operation{ + Name: "OpName", + HTTPMethod: "GET", + HTTPPath: "/", + }, + nil, + nil, + ) + + r.Build() + assert.Equal("GET", r.HTTPRequest.Method) + assert.Equal("", r.HTTPRequest.URL.Query().Get("Signature")) + + SignSDKRequest(r) + assert.NoError(r.Error) + t.Logf("Signature: %s", r.HTTPRequest.URL.Query().Get("Signature")) + assert.NotEqual("", r.HTTPRequest.URL.Query().Get("Signature")) +} + +func TestAnonymousCredentials(t *testing.T) { + assert := assert.New(t) + svc := awstesting.NewClient(&aws.Config{ + Credentials: credentials.AnonymousCredentials, + Region: aws.String("ap-southeast-2"), + }) + r := svc.NewRequest( + &request.Operation{ + Name: "PutAttributes", + HTTPMethod: "POST", + HTTPPath: "/", + }, + nil, + nil, + ) + r.Build() + + SignSDKRequest(r) + + req := r.HTTPRequest + req.ParseForm() + + assert.Empty(req.PostForm.Get("Signature")) +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/util/sort_keys.go b/vendor/github.com/aws/aws-sdk-go/private/util/sort_keys.go new file mode 100644 index 000000000..48000565c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/util/sort_keys.go @@ -0,0 +1,14 @@ +package util + +import "sort" + +// SortedKeys returns a sorted slice of keys of a map. +func SortedKeys(m map[string]interface{}) []string { + i, sorted := 0, make([]string, len(m)) + for k := range m { + sorted[i] = k + i++ + } + sort.Strings(sorted) + return sorted +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/util/util.go b/vendor/github.com/aws/aws-sdk-go/private/util/util.go new file mode 100644 index 000000000..5f2dab25e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/util/util.go @@ -0,0 +1,109 @@ +package util + +import ( + "bytes" + "encoding/xml" + "fmt" + "go/format" + "io" + "reflect" + "regexp" + "strings" + + "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil" +) + +// GoFmt returns the Go formated string of the input. +// +// Panics if the format fails. +func GoFmt(buf string) string { + formatted, err := format.Source([]byte(buf)) + if err != nil { + panic(fmt.Errorf("%s\nOriginal code:\n%s", err.Error(), buf)) + } + return string(formatted) +} + +var reTrim = regexp.MustCompile(`\s{2,}`) + +// Trim removes all leading and trailing white space. +// +// All consecutive spaces will be reduced to a single space. +func Trim(s string) string { + return strings.TrimSpace(reTrim.ReplaceAllString(s, " ")) +} + +// Capitalize capitalizes the first character of the string. +func Capitalize(s string) string { + if len(s) == 1 { + return strings.ToUpper(s) + } + return strings.ToUpper(s[0:1]) + s[1:] +} + +// SortXML sorts the reader's XML elements +func SortXML(r io.Reader) string { + var buf bytes.Buffer + d := xml.NewDecoder(r) + root, _ := xmlutil.XMLToStruct(d, nil) + e := xml.NewEncoder(&buf) + xmlutil.StructToXML(e, root, true) + return buf.String() +} + +// PrettyPrint generates a human readable representation of the value v. +// All values of v are recursively found and pretty printed also. +func PrettyPrint(v interface{}) string { + value := reflect.ValueOf(v) + switch value.Kind() { + case reflect.Struct: + str := fullName(value.Type()) + "{\n" + for i := 0; i < value.NumField(); i++ { + l := string(value.Type().Field(i).Name[0]) + if strings.ToUpper(l) == l { + str += value.Type().Field(i).Name + ": " + str += PrettyPrint(value.Field(i).Interface()) + str += ",\n" + } + } + str += "}" + return str + case reflect.Map: + str := "map[" + fullName(value.Type().Key()) + "]" + fullName(value.Type().Elem()) + "{\n" + for _, k := range value.MapKeys() { + str += "\"" + k.String() + "\": " + str += PrettyPrint(value.MapIndex(k).Interface()) + str += ",\n" + } + str += "}" + return str + case reflect.Ptr: + if e := value.Elem(); e.IsValid() { + return "&" + PrettyPrint(e.Interface()) + } + return "nil" + case reflect.Slice: + str := "[]" + fullName(value.Type().Elem()) + "{\n" + for i := 0; i < value.Len(); i++ { + str += PrettyPrint(value.Index(i).Interface()) + str += ",\n" + } + str += "}" + return str + default: + return fmt.Sprintf("%#v", v) + } +} + +func pkgName(t reflect.Type) string { + pkg := t.PkgPath() + c := strings.Split(pkg, "/") + return c[len(c)-1] +} + +func fullName(t reflect.Type) string { + if pkg := pkgName(t); pkg != "" { + return pkg + "." + t.Name() + } + return t.Name() +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/waiter/waiter.go b/vendor/github.com/aws/aws-sdk-go/private/waiter/waiter.go new file mode 100644 index 000000000..b51e9449c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/waiter/waiter.go @@ -0,0 +1,134 @@ +package waiter + +import ( + "fmt" + "reflect" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" +) + +// A Config provides a collection of configuration values to setup a generated +// waiter code with. +type Config struct { + Name string + Delay int + MaxAttempts int + Operation string + Acceptors []WaitAcceptor +} + +// A WaitAcceptor provides the information needed to wait for an API operation +// to complete. +type WaitAcceptor struct { + Expected interface{} + Matcher string + State string + Argument string +} + +// A Waiter provides waiting for an operation to complete. +type Waiter struct { + Config + Client interface{} + Input interface{} +} + +// Wait waits for an operation to complete, expire max attempts, or fail. Error +// is returned if the operation fails. +func (w *Waiter) Wait() error { + client := reflect.ValueOf(w.Client) + in := reflect.ValueOf(w.Input) + method := client.MethodByName(w.Config.Operation + "Request") + + for i := 0; i < w.MaxAttempts; i++ { + res := method.Call([]reflect.Value{in}) + req := res[0].Interface().(*request.Request) + req.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Waiter")) + + err := req.Send() + for _, a := range w.Acceptors { + result := false + var vals []interface{} + switch a.Matcher { + case "pathAll", "path": + // Require all matches to be equal for result to match + vals, _ = awsutil.ValuesAtPath(req.Data, a.Argument) + if len(vals) == 0 { + break + } + result = true + for _, val := range vals { + if !awsutil.DeepEqual(val, a.Expected) { + result = false + break + } + } + case "pathAny": + // Only a single match needs to equal for the result to match + vals, _ = awsutil.ValuesAtPath(req.Data, a.Argument) + for _, val := range vals { + if awsutil.DeepEqual(val, a.Expected) { + result = true + break + } + } + case "status": + s := a.Expected.(int) + result = s == req.HTTPResponse.StatusCode + case "error": + if aerr, ok := err.(awserr.Error); ok { + result = aerr.Code() == a.Expected.(string) + } + case "pathList": + // ignored matcher + default: + logf(client, "WARNING: Waiter for %s encountered unexpected matcher: %s", + w.Config.Operation, a.Matcher) + } + + if !result { + // If there was no matching result found there is nothing more to do + // for this response, retry the request. + continue + } + + switch a.State { + case "success": + // waiter completed + return nil + case "failure": + // Waiter failure state triggered + return awserr.New("ResourceNotReady", + fmt.Sprintf("failed waiting for successful resource state"), err) + case "retry": + // clear the error and retry the operation + err = nil + default: + logf(client, "WARNING: Waiter for %s encountered unexpected state: %s", + w.Config.Operation, a.State) + } + } + if err != nil { + return err + } + + time.Sleep(time.Second * time.Duration(w.Delay)) + } + + return awserr.New("ResourceNotReady", + fmt.Sprintf("exceeded %d wait attempts", w.MaxAttempts), nil) +} + +func logf(client reflect.Value, msg string, args ...interface{}) { + cfgVal := client.FieldByName("Config") + if !cfgVal.IsValid() { + return + } + if cfg, ok := cfgVal.Interface().(*aws.Config); ok && cfg.Logger != nil { + cfg.Logger.Log(fmt.Sprintf(msg, args...)) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/waiter/waiter_test.go b/vendor/github.com/aws/aws-sdk-go/private/waiter/waiter_test.go new file mode 100644 index 000000000..28fac2595 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/waiter/waiter_test.go @@ -0,0 +1,401 @@ +package waiter_test + +import ( + "bytes" + "io/ioutil" + "net/http" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/awstesting" + "github.com/aws/aws-sdk-go/private/waiter" +) + +type mockClient struct { + *client.Client +} +type MockInput struct{} +type MockOutput struct { + States []*MockState +} +type MockState struct { + State *string +} + +func (c *mockClient) MockRequest(input *MockInput) (*request.Request, *MockOutput) { + op := &request.Operation{ + Name: "Mock", + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &MockInput{} + } + + output := &MockOutput{} + req := c.NewRequest(op, input, output) + req.Data = output + return req, output +} + +func TestWaiterPathAll(t *testing.T) { + svc := &mockClient{Client: awstesting.NewClient(&aws.Config{ + Region: aws.String("mock-region"), + })} + svc.Handlers.Send.Clear() // mock sending + svc.Handlers.Unmarshal.Clear() + svc.Handlers.UnmarshalMeta.Clear() + svc.Handlers.ValidateResponse.Clear() + + reqNum := 0 + resps := []*MockOutput{ + { // Request 1 + States: []*MockState{ + {State: aws.String("pending")}, + {State: aws.String("pending")}, + }, + }, + { // Request 2 + States: []*MockState{ + {State: aws.String("running")}, + {State: aws.String("pending")}, + }, + }, + { // Request 3 + States: []*MockState{ + {State: aws.String("running")}, + {State: aws.String("running")}, + }, + }, + } + + numBuiltReq := 0 + svc.Handlers.Build.PushBack(func(r *request.Request) { + numBuiltReq++ + }) + svc.Handlers.Unmarshal.PushBack(func(r *request.Request) { + if reqNum >= len(resps) { + assert.Fail(t, "too many polling requests made") + return + } + r.Data = resps[reqNum] + reqNum++ + }) + + waiterCfg := waiter.Config{ + Operation: "Mock", + Delay: 0, + MaxAttempts: 10, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "pathAll", + Argument: "States[].State", + Expected: "running", + }, + }, + } + w := waiter.Waiter{ + Client: svc, + Input: &MockInput{}, + Config: waiterCfg, + } + + err := w.Wait() + assert.NoError(t, err) + assert.Equal(t, 3, numBuiltReq) + assert.Equal(t, 3, reqNum) +} + +func TestWaiterPath(t *testing.T) { + svc := &mockClient{Client: awstesting.NewClient(&aws.Config{ + Region: aws.String("mock-region"), + })} + svc.Handlers.Send.Clear() // mock sending + svc.Handlers.Unmarshal.Clear() + svc.Handlers.UnmarshalMeta.Clear() + svc.Handlers.ValidateResponse.Clear() + + reqNum := 0 + resps := []*MockOutput{ + { // Request 1 + States: []*MockState{ + {State: aws.String("pending")}, + {State: aws.String("pending")}, + }, + }, + { // Request 2 + States: []*MockState{ + {State: aws.String("running")}, + {State: aws.String("pending")}, + }, + }, + { // Request 3 + States: []*MockState{ + {State: aws.String("running")}, + {State: aws.String("running")}, + }, + }, + } + + numBuiltReq := 0 + svc.Handlers.Build.PushBack(func(r *request.Request) { + numBuiltReq++ + }) + svc.Handlers.Unmarshal.PushBack(func(r *request.Request) { + if reqNum >= len(resps) { + assert.Fail(t, "too many polling requests made") + return + } + r.Data = resps[reqNum] + reqNum++ + }) + + waiterCfg := waiter.Config{ + Operation: "Mock", + Delay: 0, + MaxAttempts: 10, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "path", + Argument: "States[].State", + Expected: "running", + }, + }, + } + w := waiter.Waiter{ + Client: svc, + Input: &MockInput{}, + Config: waiterCfg, + } + + err := w.Wait() + assert.NoError(t, err) + assert.Equal(t, 3, numBuiltReq) + assert.Equal(t, 3, reqNum) +} + +func TestWaiterFailure(t *testing.T) { + svc := &mockClient{Client: awstesting.NewClient(&aws.Config{ + Region: aws.String("mock-region"), + })} + svc.Handlers.Send.Clear() // mock sending + svc.Handlers.Unmarshal.Clear() + svc.Handlers.UnmarshalMeta.Clear() + svc.Handlers.ValidateResponse.Clear() + + reqNum := 0 + resps := []*MockOutput{ + { // Request 1 + States: []*MockState{ + {State: aws.String("pending")}, + {State: aws.String("pending")}, + }, + }, + { // Request 2 + States: []*MockState{ + {State: aws.String("running")}, + {State: aws.String("pending")}, + }, + }, + { // Request 3 + States: []*MockState{ + {State: aws.String("running")}, + {State: aws.String("stopping")}, + }, + }, + } + + numBuiltReq := 0 + svc.Handlers.Build.PushBack(func(r *request.Request) { + numBuiltReq++ + }) + svc.Handlers.Unmarshal.PushBack(func(r *request.Request) { + if reqNum >= len(resps) { + assert.Fail(t, "too many polling requests made") + return + } + r.Data = resps[reqNum] + reqNum++ + }) + + waiterCfg := waiter.Config{ + Operation: "Mock", + Delay: 0, + MaxAttempts: 10, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "pathAll", + Argument: "States[].State", + Expected: "running", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "States[].State", + Expected: "stopping", + }, + }, + } + w := waiter.Waiter{ + Client: svc, + Input: &MockInput{}, + Config: waiterCfg, + } + + err := w.Wait().(awserr.Error) + assert.Error(t, err) + assert.Equal(t, "ResourceNotReady", err.Code()) + assert.Equal(t, "failed waiting for successful resource state", err.Message()) + assert.Equal(t, 3, numBuiltReq) + assert.Equal(t, 3, reqNum) +} + +func TestWaiterError(t *testing.T) { + svc := &mockClient{Client: awstesting.NewClient(&aws.Config{ + Region: aws.String("mock-region"), + })} + svc.Handlers.Send.Clear() // mock sending + svc.Handlers.Unmarshal.Clear() + svc.Handlers.UnmarshalMeta.Clear() + svc.Handlers.UnmarshalError.Clear() + svc.Handlers.ValidateResponse.Clear() + + reqNum := 0 + resps := []*MockOutput{ + { // Request 1 + States: []*MockState{ + {State: aws.String("pending")}, + {State: aws.String("pending")}, + }, + }, + { // Request 2, error case + }, + { // Request 3 + States: []*MockState{ + {State: aws.String("running")}, + {State: aws.String("running")}, + }, + }, + } + + numBuiltReq := 0 + svc.Handlers.Build.PushBack(func(r *request.Request) { + numBuiltReq++ + }) + svc.Handlers.Send.PushBack(func(r *request.Request) { + code := 200 + if reqNum == 1 { + code = 400 + } + r.HTTPResponse = &http.Response{ + StatusCode: code, + Status: http.StatusText(code), + Body: ioutil.NopCloser(bytes.NewReader([]byte{})), + } + }) + svc.Handlers.Unmarshal.PushBack(func(r *request.Request) { + if reqNum >= len(resps) { + assert.Fail(t, "too many polling requests made") + return + } + r.Data = resps[reqNum] + reqNum++ + }) + svc.Handlers.UnmarshalMeta.PushBack(func(r *request.Request) { + if reqNum == 1 { + r.Error = awserr.New("MockException", "mock exception message", nil) + // If there was an error unmarshal error will be called instead of unmarshal + // need to increment count here also + reqNum++ + } + }) + + waiterCfg := waiter.Config{ + Operation: "Mock", + Delay: 0, + MaxAttempts: 10, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "pathAll", + Argument: "States[].State", + Expected: "running", + }, + { + State: "retry", + Matcher: "error", + Argument: "", + Expected: "MockException", + }, + }, + } + w := waiter.Waiter{ + Client: svc, + Input: &MockInput{}, + Config: waiterCfg, + } + + err := w.Wait() + assert.NoError(t, err) + assert.Equal(t, 3, numBuiltReq) + assert.Equal(t, 3, reqNum) +} + +func TestWaiterStatus(t *testing.T) { + svc := &mockClient{Client: awstesting.NewClient(&aws.Config{ + Region: aws.String("mock-region"), + })} + svc.Handlers.Send.Clear() // mock sending + svc.Handlers.Unmarshal.Clear() + svc.Handlers.UnmarshalMeta.Clear() + svc.Handlers.ValidateResponse.Clear() + + reqNum := 0 + svc.Handlers.Build.PushBack(func(r *request.Request) { + reqNum++ + }) + svc.Handlers.Send.PushBack(func(r *request.Request) { + code := 200 + if reqNum == 3 { + code = 404 + r.Error = awserr.New("NotFound", "Not Found", nil) + } + r.HTTPResponse = &http.Response{ + StatusCode: code, + Status: http.StatusText(code), + Body: ioutil.NopCloser(bytes.NewReader([]byte{})), + } + }) + + waiterCfg := waiter.Config{ + Operation: "Mock", + Delay: 0, + MaxAttempts: 10, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "status", + Argument: "", + Expected: 404, + }, + }, + } + w := waiter.Waiter{ + Client: svc, + Input: &MockInput{}, + Config: waiterCfg, + } + + err := w.Wait() + assert.NoError(t, err) + assert.Equal(t, 3, reqNum) +} diff --git a/vendor/github.com/aws/aws-sdk-go/sdk.go b/vendor/github.com/aws/aws-sdk-go/sdk.go new file mode 100644 index 000000000..afa465a22 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/sdk.go @@ -0,0 +1,7 @@ +// Package sdk is the official AWS SDK for the Go programming language. +// +// See our Developer Guide for information for on getting started and using +// the SDK. +// +// https://github.com/aws/aws-sdk-go/wiki +package sdk diff --git a/vendor/github.com/aws/aws-sdk-go/service/acm/acmiface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/acm/acmiface/interface.go new file mode 100644 index 000000000..bd4a5c0a4 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/acm/acmiface/interface.go @@ -0,0 +1,52 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package acmiface provides an interface for the AWS Certificate Manager. +package acmiface + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/acm" +) + +// ACMAPI is the interface type for acm.ACM. +type ACMAPI interface { + AddTagsToCertificateRequest(*acm.AddTagsToCertificateInput) (*request.Request, *acm.AddTagsToCertificateOutput) + + AddTagsToCertificate(*acm.AddTagsToCertificateInput) (*acm.AddTagsToCertificateOutput, error) + + DeleteCertificateRequest(*acm.DeleteCertificateInput) (*request.Request, *acm.DeleteCertificateOutput) + + DeleteCertificate(*acm.DeleteCertificateInput) (*acm.DeleteCertificateOutput, error) + + DescribeCertificateRequest(*acm.DescribeCertificateInput) (*request.Request, *acm.DescribeCertificateOutput) + + DescribeCertificate(*acm.DescribeCertificateInput) (*acm.DescribeCertificateOutput, error) + + GetCertificateRequest(*acm.GetCertificateInput) (*request.Request, *acm.GetCertificateOutput) + + GetCertificate(*acm.GetCertificateInput) (*acm.GetCertificateOutput, error) + + ListCertificatesRequest(*acm.ListCertificatesInput) (*request.Request, *acm.ListCertificatesOutput) + + ListCertificates(*acm.ListCertificatesInput) (*acm.ListCertificatesOutput, error) + + ListCertificatesPages(*acm.ListCertificatesInput, func(*acm.ListCertificatesOutput, bool) bool) error + + ListTagsForCertificateRequest(*acm.ListTagsForCertificateInput) (*request.Request, *acm.ListTagsForCertificateOutput) + + ListTagsForCertificate(*acm.ListTagsForCertificateInput) (*acm.ListTagsForCertificateOutput, error) + + RemoveTagsFromCertificateRequest(*acm.RemoveTagsFromCertificateInput) (*request.Request, *acm.RemoveTagsFromCertificateOutput) + + RemoveTagsFromCertificate(*acm.RemoveTagsFromCertificateInput) (*acm.RemoveTagsFromCertificateOutput, error) + + RequestCertificateRequest(*acm.RequestCertificateInput) (*request.Request, *acm.RequestCertificateOutput) + + RequestCertificate(*acm.RequestCertificateInput) (*acm.RequestCertificateOutput, error) + + ResendValidationEmailRequest(*acm.ResendValidationEmailInput) (*request.Request, *acm.ResendValidationEmailOutput) + + ResendValidationEmail(*acm.ResendValidationEmailInput) (*acm.ResendValidationEmailOutput, error) +} + +var _ ACMAPI = (*acm.ACM)(nil) diff --git a/vendor/github.com/aws/aws-sdk-go/service/acm/api.go b/vendor/github.com/aws/aws-sdk-go/service/acm/api.go new file mode 100644 index 000000000..8a5e882ed --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/acm/api.go @@ -0,0 +1,1452 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package acm provides a client for AWS Certificate Manager. +package acm + +import ( + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" +) + +const opAddTagsToCertificate = "AddTagsToCertificate" + +// AddTagsToCertificateRequest generates a "aws/request.Request" representing the +// client's request for the AddTagsToCertificate operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the AddTagsToCertificate method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the AddTagsToCertificateRequest method. +// req, resp := client.AddTagsToCertificateRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ACM) AddTagsToCertificateRequest(input *AddTagsToCertificateInput) (req *request.Request, output *AddTagsToCertificateOutput) { + op := &request.Operation{ + Name: opAddTagsToCertificate, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AddTagsToCertificateInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &AddTagsToCertificateOutput{} + req.Data = output + return +} + +// Adds one or more tags to an ACM Certificate. Tags are labels that you can +// use to identify and organize your AWS resources. Each tag consists of a key +// and an optional value. You specify the certificate on input by its Amazon +// Resource Name (ARN). You specify the tag by using a key-value pair. +// +// You can apply a tag to just one certificate if you want to identify a specific +// characteristic of that certificate, or you can apply the same tag to multiple +// certificates if you want to filter for a common relationship among those +// certificates. Similarly, you can apply the same tag to multiple resources +// if you want to specify a relationship among those resources. For example, +// you can add the same tag to an ACM Certificate and an Elastic Load Balancing +// load balancer to indicate that they are both used by the same website. For +// more information, see Tagging ACM Certificates (http://docs.aws.amazon.com/acm/latest/userguide/tags.html). +// +// To remove one or more tags, use the RemoveTagsFromCertificate action. To +// view all of the tags that have been applied to the certificate, use the ListTagsForCertificate +// action. +func (c *ACM) AddTagsToCertificate(input *AddTagsToCertificateInput) (*AddTagsToCertificateOutput, error) { + req, out := c.AddTagsToCertificateRequest(input) + err := req.Send() + return out, err +} + +const opDeleteCertificate = "DeleteCertificate" + +// DeleteCertificateRequest generates a "aws/request.Request" representing the +// client's request for the DeleteCertificate operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteCertificate method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteCertificateRequest method. +// req, resp := client.DeleteCertificateRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ACM) DeleteCertificateRequest(input *DeleteCertificateInput) (req *request.Request, output *DeleteCertificateOutput) { + op := &request.Operation{ + Name: opDeleteCertificate, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteCertificateInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteCertificateOutput{} + req.Data = output + return +} + +// Deletes an ACM Certificate and its associated private key. If this action +// succeeds, the certificate no longer appears in the list of ACM Certificates +// that can be displayed by calling the ListCertificates action or be retrieved +// by calling the GetCertificate action. The certificate will not be available +// for use by other AWS services. +// +// You cannot delete an ACM Certificate that is being used by another AWS +// service. To delete a certificate that is in use, the certificate association +// must first be removed. +func (c *ACM) DeleteCertificate(input *DeleteCertificateInput) (*DeleteCertificateOutput, error) { + req, out := c.DeleteCertificateRequest(input) + err := req.Send() + return out, err +} + +const opDescribeCertificate = "DescribeCertificate" + +// DescribeCertificateRequest generates a "aws/request.Request" representing the +// client's request for the DescribeCertificate operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeCertificate method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeCertificateRequest method. +// req, resp := client.DescribeCertificateRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ACM) DescribeCertificateRequest(input *DescribeCertificateInput) (req *request.Request, output *DescribeCertificateOutput) { + op := &request.Operation{ + Name: opDescribeCertificate, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeCertificateInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeCertificateOutput{} + req.Data = output + return +} + +// Returns a list of the fields contained in the specified ACM Certificate. +// For example, this action returns the certificate status, a flag that indicates +// whether the certificate is associated with any other AWS service, and the +// date at which the certificate request was created. You specify the ACM Certificate +// on input by its Amazon Resource Name (ARN). +func (c *ACM) DescribeCertificate(input *DescribeCertificateInput) (*DescribeCertificateOutput, error) { + req, out := c.DescribeCertificateRequest(input) + err := req.Send() + return out, err +} + +const opGetCertificate = "GetCertificate" + +// GetCertificateRequest generates a "aws/request.Request" representing the +// client's request for the GetCertificate operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetCertificate method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetCertificateRequest method. +// req, resp := client.GetCertificateRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ACM) GetCertificateRequest(input *GetCertificateInput) (req *request.Request, output *GetCertificateOutput) { + op := &request.Operation{ + Name: opGetCertificate, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetCertificateInput{} + } + + req = c.newRequest(op, input, output) + output = &GetCertificateOutput{} + req.Data = output + return +} + +// Retrieves an ACM Certificate and certificate chain for the certificate specified +// by an ARN. The chain is an ordered list of certificates that contains the +// root certificate, intermediate certificates of subordinate CAs, and the ACM +// Certificate. The certificate and certificate chain are base64 encoded. If +// you want to decode the certificate chain to see the individual certificate +// fields, you can use OpenSSL. +// +// Currently, ACM Certificates can be used only with Elastic Load Balancing +// and Amazon CloudFront. +func (c *ACM) GetCertificate(input *GetCertificateInput) (*GetCertificateOutput, error) { + req, out := c.GetCertificateRequest(input) + err := req.Send() + return out, err +} + +const opListCertificates = "ListCertificates" + +// ListCertificatesRequest generates a "aws/request.Request" representing the +// client's request for the ListCertificates operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListCertificates method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListCertificatesRequest method. +// req, resp := client.ListCertificatesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ACM) ListCertificatesRequest(input *ListCertificatesInput) (req *request.Request, output *ListCertificatesOutput) { + op := &request.Operation{ + Name: opListCertificates, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxItems", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListCertificatesInput{} + } + + req = c.newRequest(op, input, output) + output = &ListCertificatesOutput{} + req.Data = output + return +} + +// Retrieves a list of ACM Certificates and the domain name for each. You can +// optionally filter the list to return only the certificates that match the +// specified status. +func (c *ACM) ListCertificates(input *ListCertificatesInput) (*ListCertificatesOutput, error) { + req, out := c.ListCertificatesRequest(input) + err := req.Send() + return out, err +} + +// ListCertificatesPages iterates over the pages of a ListCertificates operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListCertificates method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListCertificates operation. +// pageNum := 0 +// err := client.ListCertificatesPages(params, +// func(page *ListCertificatesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *ACM) ListCertificatesPages(input *ListCertificatesInput, fn func(p *ListCertificatesOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListCertificatesRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListCertificatesOutput), lastPage) + }) +} + +const opListTagsForCertificate = "ListTagsForCertificate" + +// ListTagsForCertificateRequest generates a "aws/request.Request" representing the +// client's request for the ListTagsForCertificate operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListTagsForCertificate method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListTagsForCertificateRequest method. +// req, resp := client.ListTagsForCertificateRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ACM) ListTagsForCertificateRequest(input *ListTagsForCertificateInput) (req *request.Request, output *ListTagsForCertificateOutput) { + op := &request.Operation{ + Name: opListTagsForCertificate, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListTagsForCertificateInput{} + } + + req = c.newRequest(op, input, output) + output = &ListTagsForCertificateOutput{} + req.Data = output + return +} + +// Lists the tags that have been applied to the ACM Certificate. Use the certificate +// ARN to specify the certificate. To add a tag to an ACM Certificate, use the +// AddTagsToCertificate action. To delete a tag, use the RemoveTagsFromCertificate +// action. +func (c *ACM) ListTagsForCertificate(input *ListTagsForCertificateInput) (*ListTagsForCertificateOutput, error) { + req, out := c.ListTagsForCertificateRequest(input) + err := req.Send() + return out, err +} + +const opRemoveTagsFromCertificate = "RemoveTagsFromCertificate" + +// RemoveTagsFromCertificateRequest generates a "aws/request.Request" representing the +// client's request for the RemoveTagsFromCertificate operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the RemoveTagsFromCertificate method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the RemoveTagsFromCertificateRequest method. +// req, resp := client.RemoveTagsFromCertificateRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ACM) RemoveTagsFromCertificateRequest(input *RemoveTagsFromCertificateInput) (req *request.Request, output *RemoveTagsFromCertificateOutput) { + op := &request.Operation{ + Name: opRemoveTagsFromCertificate, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RemoveTagsFromCertificateInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &RemoveTagsFromCertificateOutput{} + req.Data = output + return +} + +// Remove one or more tags from an ACM Certificate. A tag consists of a key-value +// pair. If you do not specify the value portion of the tag when calling this +// function, the tag will be removed regardless of value. If you specify a value, +// the tag is removed only if it is associated with the specified value. +// +// To add tags to a certificate, use the AddTagsToCertificate action. To view +// all of the tags that have been applied to a specific ACM Certificate, use +// the ListTagsForCertificate action. +func (c *ACM) RemoveTagsFromCertificate(input *RemoveTagsFromCertificateInput) (*RemoveTagsFromCertificateOutput, error) { + req, out := c.RemoveTagsFromCertificateRequest(input) + err := req.Send() + return out, err +} + +const opRequestCertificate = "RequestCertificate" + +// RequestCertificateRequest generates a "aws/request.Request" representing the +// client's request for the RequestCertificate operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the RequestCertificate method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the RequestCertificateRequest method. +// req, resp := client.RequestCertificateRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ACM) RequestCertificateRequest(input *RequestCertificateInput) (req *request.Request, output *RequestCertificateOutput) { + op := &request.Operation{ + Name: opRequestCertificate, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RequestCertificateInput{} + } + + req = c.newRequest(op, input, output) + output = &RequestCertificateOutput{} + req.Data = output + return +} + +// Requests an ACM Certificate for use with other AWS services. To request an +// ACM Certificate, you must specify the fully qualified domain name (FQDN) +// for your site. You can also specify additional FQDNs if users can reach your +// site by using other names. For each domain name you specify, email is sent +// to the domain owner to request approval to issue the certificate. After receiving +// approval from the domain owner, the ACM Certificate is issued. For more information, +// see the AWS Certificate Manager User Guide (http://docs.aws.amazon.com/acm/latest/userguide/overview.html). +func (c *ACM) RequestCertificate(input *RequestCertificateInput) (*RequestCertificateOutput, error) { + req, out := c.RequestCertificateRequest(input) + err := req.Send() + return out, err +} + +const opResendValidationEmail = "ResendValidationEmail" + +// ResendValidationEmailRequest generates a "aws/request.Request" representing the +// client's request for the ResendValidationEmail operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ResendValidationEmail method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ResendValidationEmailRequest method. +// req, resp := client.ResendValidationEmailRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ACM) ResendValidationEmailRequest(input *ResendValidationEmailInput) (req *request.Request, output *ResendValidationEmailOutput) { + op := &request.Operation{ + Name: opResendValidationEmail, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ResendValidationEmailInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &ResendValidationEmailOutput{} + req.Data = output + return +} + +// Resends the email that requests domain ownership validation. The domain owner +// or an authorized representative must approve the ACM Certificate before it +// can be issued. The certificate can be approved by clicking a link in the +// mail to navigate to the Amazon certificate approval website and then clicking +// I Approve. However, the validation email can be blocked by spam filters. +// Therefore, if you do not receive the original mail, you can request that +// the mail be resent within 72 hours of requesting the ACM Certificate. If +// more than 72 hours have elapsed since your original request or since your +// last attempt to resend validation mail, you must request a new certificate. +func (c *ACM) ResendValidationEmail(input *ResendValidationEmailInput) (*ResendValidationEmailOutput, error) { + req, out := c.ResendValidationEmailRequest(input) + err := req.Send() + return out, err +} + +type AddTagsToCertificateInput struct { + _ struct{} `type:"structure"` + + // String that contains the ARN of the ACM Certificate to which the tag is to + // be applied. This must be of the form: + // + // arn:aws:acm:region:123456789012:certificate/12345678-1234-1234-1234-123456789012 + // + // For more information about ARNs, see Amazon Resource Names (ARNs) and AWS + // Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html). + CertificateArn *string `min:"20" type:"string" required:"true"` + + // The key-value pair that defines the tag. The tag value is optional. + Tags []*Tag `min:"1" type:"list" required:"true"` +} + +// String returns the string representation +func (s AddTagsToCertificateInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddTagsToCertificateInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AddTagsToCertificateInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AddTagsToCertificateInput"} + if s.CertificateArn == nil { + invalidParams.Add(request.NewErrParamRequired("CertificateArn")) + } + if s.CertificateArn != nil && len(*s.CertificateArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("CertificateArn", 20)) + } + if s.Tags == nil { + invalidParams.Add(request.NewErrParamRequired("Tags")) + } + if s.Tags != nil && len(s.Tags) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Tags", 1)) + } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type AddTagsToCertificateOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s AddTagsToCertificateOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddTagsToCertificateOutput) GoString() string { + return s.String() +} + +// Contains detailed metadata about an ACM Certificate. This structure is returned +// in the response to a DescribeCertificate request. +type CertificateDetail struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the certificate. For more information about + // ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html). + CertificateArn *string `min:"20" type:"string"` + + // The time at which the certificate was requested. + CreatedAt *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The fully qualified domain name (FQDN) for the certificate, such as www.example.com + // or example.com. + DomainName *string `min:"1" type:"string"` + + // Contains information about the email address or addresses used for domain + // validation. + DomainValidationOptions []*DomainValidation `min:"1" type:"list"` + + // A list of ARNs for the resources that are using the certificate. An ACM Certificate + // can be used by multiple AWS resources. + InUseBy []*string `type:"list"` + + // The time at which the certificate was issued. + IssuedAt *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The X.500 distinguished name of the CA that issued and signed the certificate. + Issuer *string `type:"string"` + + // The algorithm used to generate the key pair (the public and private key). + // Currently the only supported value is RSA_2048. + KeyAlgorithm *string `type:"string" enum:"KeyAlgorithm"` + + // The time after which the certificate is not valid. + NotAfter *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The time before which the certificate is not valid. + NotBefore *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The reason the certificate was revoked. This value exists only when the certificate + // status is REVOKED. + RevocationReason *string `type:"string" enum:"RevocationReason"` + + // The time at which the certificate was revoked. This value exists only when + // the certificate status is REVOKED. + RevokedAt *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The serial number of the certificate. + Serial *string `type:"string"` + + // The algorithm used to generate a signature. Currently the only supported + // value is SHA256WITHRSA. + SignatureAlgorithm *string `type:"string"` + + // The status of the certificate. + Status *string `type:"string" enum:"CertificateStatus"` + + // The X.500 distinguished name of the entity associated with the public key + // contained in the certificate. + Subject *string `type:"string"` + + // One or more domain names (subject alternative names) included in the certificate + // request. After the certificate is issued, this list includes the domain names + // bound to the public key contained in the certificate. The subject alternative + // names include the canonical domain name (CN) of the certificate and additional + // domain names that can be used to connect to the website. + SubjectAlternativeNames []*string `min:"1" type:"list"` +} + +// String returns the string representation +func (s CertificateDetail) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CertificateDetail) GoString() string { + return s.String() +} + +// This structure is returned in the response object of ListCertificates action. +type CertificateSummary struct { + _ struct{} `type:"structure"` + + // Amazon Resource Name (ARN) of the certificate. This is of the form: + // + // arn:aws:acm:region:123456789012:certificate/12345678-1234-1234-1234-123456789012 + // + // For more information about ARNs, see Amazon Resource Names (ARNs) and AWS + // Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html). + CertificateArn *string `min:"20" type:"string"` + + // Fully qualified domain name (FQDN), such as www.example.com or example.com, + // for the certificate. + DomainName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s CertificateSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CertificateSummary) GoString() string { + return s.String() +} + +type DeleteCertificateInput struct { + _ struct{} `type:"structure"` + + // String that contains the ARN of the ACM Certificate to be deleted. This must + // be of the form: + // + // arn:aws:acm:region:123456789012:certificate/12345678-1234-1234-1234-123456789012 + // + // For more information about ARNs, see Amazon Resource Names (ARNs) and AWS + // Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html). + CertificateArn *string `min:"20" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteCertificateInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteCertificateInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteCertificateInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteCertificateInput"} + if s.CertificateArn == nil { + invalidParams.Add(request.NewErrParamRequired("CertificateArn")) + } + if s.CertificateArn != nil && len(*s.CertificateArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("CertificateArn", 20)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteCertificateOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteCertificateOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteCertificateOutput) GoString() string { + return s.String() +} + +type DescribeCertificateInput struct { + _ struct{} `type:"structure"` + + // String that contains an ACM Certificate ARN. The ARN must be of the form: + // + // arn:aws:acm:region:123456789012:certificate/12345678-1234-1234-1234-123456789012 + // + // For more information about ARNs, see Amazon Resource Names (ARNs) and AWS + // Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html). + CertificateArn *string `min:"20" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeCertificateInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeCertificateInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeCertificateInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeCertificateInput"} + if s.CertificateArn == nil { + invalidParams.Add(request.NewErrParamRequired("CertificateArn")) + } + if s.CertificateArn != nil && len(*s.CertificateArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("CertificateArn", 20)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DescribeCertificateOutput struct { + _ struct{} `type:"structure"` + + // Contains a CertificateDetail structure that lists the fields of an ACM Certificate. + Certificate *CertificateDetail `type:"structure"` +} + +// String returns the string representation +func (s DescribeCertificateOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeCertificateOutput) GoString() string { + return s.String() +} + +// Structure that contains the domain name, the base validation domain to which +// validation email is sent, and the email addresses used to validate the domain +// identity. +type DomainValidation struct { + _ struct{} `type:"structure"` + + // Fully Qualified Domain Name (FQDN) of the form www.example.com or example.com. + DomainName *string `min:"1" type:"string" required:"true"` + + // The base validation domain that acts as the suffix of the email addresses + // that are used to send the emails. + ValidationDomain *string `min:"1" type:"string"` + + // A list of contact address for the domain registrant. + ValidationEmails []*string `type:"list"` +} + +// String returns the string representation +func (s DomainValidation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DomainValidation) GoString() string { + return s.String() +} + +// This structure is used in the request object of the RequestCertificate action. +type DomainValidationOption struct { + _ struct{} `type:"structure"` + + // Fully Qualified Domain Name (FQDN) of the certificate being requested. + DomainName *string `min:"1" type:"string" required:"true"` + + // The domain to which validation email is sent. This is the base validation + // domain that will act as the suffix of the email addresses. This must be the + // same as the DomainName value or a superdomain of the DomainName value. For + // example, if you requested a certificate for site.subdomain.example.com and + // specify a ValidationDomain of subdomain.example.com, ACM sends email to the + // domain registrant, technical contact, and administrative contact in WHOIS + // for the base domain and the following five addresses: + // + // admin@subdomain.example.com + // + // administrator@subdomain.example.com + // + // hostmaster@subdomain.example.com + // + // postmaster@subdomain.example.com + // + // webmaster@subdomain.example.com + ValidationDomain *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DomainValidationOption) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DomainValidationOption) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DomainValidationOption) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DomainValidationOption"} + if s.DomainName == nil { + invalidParams.Add(request.NewErrParamRequired("DomainName")) + } + if s.DomainName != nil && len(*s.DomainName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DomainName", 1)) + } + if s.ValidationDomain == nil { + invalidParams.Add(request.NewErrParamRequired("ValidationDomain")) + } + if s.ValidationDomain != nil && len(*s.ValidationDomain) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ValidationDomain", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type GetCertificateInput struct { + _ struct{} `type:"structure"` + + // String that contains a certificate ARN in the following format: + // + // arn:aws:acm:region:123456789012:certificate/12345678-1234-1234-1234-123456789012 + // + // For more information about ARNs, see Amazon Resource Names (ARNs) and AWS + // Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html). + CertificateArn *string `min:"20" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetCertificateInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetCertificateInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetCertificateInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetCertificateInput"} + if s.CertificateArn == nil { + invalidParams.Add(request.NewErrParamRequired("CertificateArn")) + } + if s.CertificateArn != nil && len(*s.CertificateArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("CertificateArn", 20)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type GetCertificateOutput struct { + _ struct{} `type:"structure"` + + // String that contains the ACM Certificate represented by the ARN specified + // at input. + Certificate *string `min:"1" type:"string"` + + // The certificate chain that contains the root certificate issued by the certificate + // authority (CA). + CertificateChain *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s GetCertificateOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetCertificateOutput) GoString() string { + return s.String() +} + +type ListCertificatesInput struct { + _ struct{} `type:"structure"` + + // The status or statuses on which to filter the list of ACM Certificates. + CertificateStatuses []*string `type:"list"` + + // Use this parameter when paginating results to specify the maximum number + // of items to return in the response. If additional items exist beyond the + // number you specify, the NextToken element is sent in the response. Use this + // NextToken value in a subsequent request to retrieve additional items. + MaxItems *int64 `min:"1" type:"integer"` + + // Use this parameter only when paginating results and only in a subsequent + // request after you receive a response with truncated results. Set it to the + // value of NextToken from the response you just received. + NextToken *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListCertificatesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListCertificatesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListCertificatesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListCertificatesInput"} + if s.MaxItems != nil && *s.MaxItems < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxItems", 1)) + } + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type ListCertificatesOutput struct { + _ struct{} `type:"structure"` + + // A list of ACM Certificates. + CertificateSummaryList []*CertificateSummary `type:"list"` + + // When the list is truncated, this value is present and contains the value + // to use for the NextToken parameter in a subsequent pagination request. + NextToken *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListCertificatesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListCertificatesOutput) GoString() string { + return s.String() +} + +type ListTagsForCertificateInput struct { + _ struct{} `type:"structure"` + + // String that contains the ARN of the ACM Certificate for which you want to + // list the tags. This must be of the form: + // + // arn:aws:acm:region:123456789012:certificate/12345678-1234-1234-1234-123456789012 + // + // For more information about ARNs, see Amazon Resource Names (ARNs) and AWS + // Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html). + CertificateArn *string `min:"20" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListTagsForCertificateInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTagsForCertificateInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListTagsForCertificateInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListTagsForCertificateInput"} + if s.CertificateArn == nil { + invalidParams.Add(request.NewErrParamRequired("CertificateArn")) + } + if s.CertificateArn != nil && len(*s.CertificateArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("CertificateArn", 20)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type ListTagsForCertificateOutput struct { + _ struct{} `type:"structure"` + + // The key-value pairs that define the applied tags. + Tags []*Tag `min:"1" type:"list"` +} + +// String returns the string representation +func (s ListTagsForCertificateOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTagsForCertificateOutput) GoString() string { + return s.String() +} + +type RemoveTagsFromCertificateInput struct { + _ struct{} `type:"structure"` + + // String that contains the ARN of the ACM Certificate with one or more tags + // that you want to remove. This must be of the form: + // + // arn:aws:acm:region:123456789012:certificate/12345678-1234-1234-1234-123456789012 + // + // For more information about ARNs, see Amazon Resource Names (ARNs) and AWS + // Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html). + CertificateArn *string `min:"20" type:"string" required:"true"` + + // The key-value pair that defines the tag to remove. + Tags []*Tag `min:"1" type:"list" required:"true"` +} + +// String returns the string representation +func (s RemoveTagsFromCertificateInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RemoveTagsFromCertificateInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RemoveTagsFromCertificateInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RemoveTagsFromCertificateInput"} + if s.CertificateArn == nil { + invalidParams.Add(request.NewErrParamRequired("CertificateArn")) + } + if s.CertificateArn != nil && len(*s.CertificateArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("CertificateArn", 20)) + } + if s.Tags == nil { + invalidParams.Add(request.NewErrParamRequired("Tags")) + } + if s.Tags != nil && len(s.Tags) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Tags", 1)) + } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type RemoveTagsFromCertificateOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s RemoveTagsFromCertificateOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RemoveTagsFromCertificateOutput) GoString() string { + return s.String() +} + +type RequestCertificateInput struct { + _ struct{} `type:"structure"` + + // Fully qualified domain name (FQDN), such as www.example.com, of the site + // you want to secure with an ACM Certificate. Use an asterisk (*) to create + // a wildcard certificate that protects several sites in the same domain. For + // example, *.example.com protects www.example.com, site.example.com, and images.example.com. + DomainName *string `min:"1" type:"string" required:"true"` + + // The base validation domain that will act as the suffix of the email addresses + // that are used to send the emails. This must be the same as the Domain value + // or a superdomain of the Domain value. For example, if you requested a certificate + // for test.example.com and specify DomainValidationOptions of example.com, + // ACM sends email to the domain registrant, technical contact, and administrative + // contact in WHOIS and the following five addresses: + // + // admin@example.com + // + // administrator@example.com + // + // hostmaster@example.com + // + // postmaster@example.com + // + // webmaster@example.com + DomainValidationOptions []*DomainValidationOption `min:"1" type:"list"` + + // Customer chosen string that can be used to distinguish between calls to RequestCertificate. + // Idempotency tokens time out after one hour. Therefore, if you call RequestCertificate + // multiple times with the same idempotency token within one hour, ACM recognizes + // that you are requesting only one certificate and will issue only one. If + // you change the idempotency token for each call, ACM recognizes that you are + // requesting multiple certificates. + IdempotencyToken *string `min:"1" type:"string"` + + // Additional FQDNs to be included in the Subject Alternative Name extension + // of the ACM Certificate. For example, add the name www.example.net to a certificate + // for which the DomainName field is www.example.com if users can reach your + // site by using either name. + SubjectAlternativeNames []*string `min:"1" type:"list"` +} + +// String returns the string representation +func (s RequestCertificateInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RequestCertificateInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RequestCertificateInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RequestCertificateInput"} + if s.DomainName == nil { + invalidParams.Add(request.NewErrParamRequired("DomainName")) + } + if s.DomainName != nil && len(*s.DomainName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DomainName", 1)) + } + if s.DomainValidationOptions != nil && len(s.DomainValidationOptions) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DomainValidationOptions", 1)) + } + if s.IdempotencyToken != nil && len(*s.IdempotencyToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("IdempotencyToken", 1)) + } + if s.SubjectAlternativeNames != nil && len(s.SubjectAlternativeNames) < 1 { + invalidParams.Add(request.NewErrParamMinLen("SubjectAlternativeNames", 1)) + } + if s.DomainValidationOptions != nil { + for i, v := range s.DomainValidationOptions { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "DomainValidationOptions", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type RequestCertificateOutput struct { + _ struct{} `type:"structure"` + + // String that contains the ARN of the issued certificate. This must be of the + // form: + // + // arn:aws:acm:us-east-1:123456789012:certificate/12345678-1234-1234-1234-123456789012 + CertificateArn *string `min:"20" type:"string"` +} + +// String returns the string representation +func (s RequestCertificateOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RequestCertificateOutput) GoString() string { + return s.String() +} + +type ResendValidationEmailInput struct { + _ struct{} `type:"structure"` + + // String that contains the ARN of the requested certificate. The certificate + // ARN is generated and returned by the RequestCertificate action as soon as + // the request is made. By default, using this parameter causes email to be + // sent to all top-level domains you specified in the certificate request. + // + // The ARN must be of the form: + // + // arn:aws:acm:us-east-1:123456789012:certificate/12345678-1234-1234-1234-123456789012 + CertificateArn *string `min:"20" type:"string" required:"true"` + + // The Fully Qualified Domain Name (FQDN) of the certificate that needs to be + // validated. + Domain *string `min:"1" type:"string" required:"true"` + + // The base validation domain that will act as the suffix of the email addresses + // that are used to send the emails. This must be the same as the Domain value + // or a superdomain of the Domain value. For example, if you requested a certificate + // for site.subdomain.example.com and specify a ValidationDomain of subdomain.example.com, + // ACM sends email to the domain registrant, technical contact, and administrative + // contact in WHOIS and the following five addresses: + // + // admin@subdomain.example.com + // + // administrator@subdomain.example.com + // + // hostmaster@subdomain.example.com + // + // postmaster@subdomain.example.com + // + // webmaster@subdomain.example.com + ValidationDomain *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s ResendValidationEmailInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResendValidationEmailInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ResendValidationEmailInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ResendValidationEmailInput"} + if s.CertificateArn == nil { + invalidParams.Add(request.NewErrParamRequired("CertificateArn")) + } + if s.CertificateArn != nil && len(*s.CertificateArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("CertificateArn", 20)) + } + if s.Domain == nil { + invalidParams.Add(request.NewErrParamRequired("Domain")) + } + if s.Domain != nil && len(*s.Domain) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Domain", 1)) + } + if s.ValidationDomain == nil { + invalidParams.Add(request.NewErrParamRequired("ValidationDomain")) + } + if s.ValidationDomain != nil && len(*s.ValidationDomain) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ValidationDomain", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type ResendValidationEmailOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ResendValidationEmailOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResendValidationEmailOutput) GoString() string { + return s.String() +} + +// A key-value pair that identifies or specifies metadata about an ACM resource. +type Tag struct { + _ struct{} `type:"structure"` + + // The key of the tag. + Key *string `min:"1" type:"string" required:"true"` + + // The value of the tag. + Value *string `type:"string"` +} + +// String returns the string representation +func (s Tag) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Tag) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Tag) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Tag"} + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +const ( + // @enum CertificateStatus + CertificateStatusPendingValidation = "PENDING_VALIDATION" + // @enum CertificateStatus + CertificateStatusIssued = "ISSUED" + // @enum CertificateStatus + CertificateStatusInactive = "INACTIVE" + // @enum CertificateStatus + CertificateStatusExpired = "EXPIRED" + // @enum CertificateStatus + CertificateStatusValidationTimedOut = "VALIDATION_TIMED_OUT" + // @enum CertificateStatus + CertificateStatusRevoked = "REVOKED" + // @enum CertificateStatus + CertificateStatusFailed = "FAILED" +) + +const ( + // @enum KeyAlgorithm + KeyAlgorithmRsa2048 = "RSA_2048" + // @enum KeyAlgorithm + KeyAlgorithmEcPrime256v1 = "EC_prime256v1" +) + +const ( + // @enum RevocationReason + RevocationReasonUnspecified = "UNSPECIFIED" + // @enum RevocationReason + RevocationReasonKeyCompromise = "KEY_COMPROMISE" + // @enum RevocationReason + RevocationReasonCaCompromise = "CA_COMPROMISE" + // @enum RevocationReason + RevocationReasonAffiliationChanged = "AFFILIATION_CHANGED" + // @enum RevocationReason + RevocationReasonSuperceded = "SUPERCEDED" + // @enum RevocationReason + RevocationReasonCessationOfOperation = "CESSATION_OF_OPERATION" + // @enum RevocationReason + RevocationReasonCertificateHold = "CERTIFICATE_HOLD" + // @enum RevocationReason + RevocationReasonRemoveFromCrl = "REMOVE_FROM_CRL" + // @enum RevocationReason + RevocationReasonPrivilegeWithdrawn = "PRIVILEGE_WITHDRAWN" + // @enum RevocationReason + RevocationReasonAACompromise = "A_A_COMPROMISE" +) diff --git a/vendor/github.com/aws/aws-sdk-go/service/acm/examples_test.go b/vendor/github.com/aws/aws-sdk-go/service/acm/examples_test.go new file mode 100644 index 000000000..438ee76b6 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/acm/examples_test.go @@ -0,0 +1,220 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package acm_test + +import ( + "bytes" + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/acm" +) + +var _ time.Duration +var _ bytes.Buffer + +func ExampleACM_AddTagsToCertificate() { + svc := acm.New(session.New()) + + params := &acm.AddTagsToCertificateInput{ + CertificateArn: aws.String("Arn"), // Required + Tags: []*acm.Tag{ // Required + { // Required + Key: aws.String("TagKey"), // Required + Value: aws.String("TagValue"), + }, + // More values... + }, + } + resp, err := svc.AddTagsToCertificate(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleACM_DeleteCertificate() { + svc := acm.New(session.New()) + + params := &acm.DeleteCertificateInput{ + CertificateArn: aws.String("Arn"), // Required + } + resp, err := svc.DeleteCertificate(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleACM_DescribeCertificate() { + svc := acm.New(session.New()) + + params := &acm.DescribeCertificateInput{ + CertificateArn: aws.String("Arn"), // Required + } + resp, err := svc.DescribeCertificate(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleACM_GetCertificate() { + svc := acm.New(session.New()) + + params := &acm.GetCertificateInput{ + CertificateArn: aws.String("Arn"), // Required + } + resp, err := svc.GetCertificate(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleACM_ListCertificates() { + svc := acm.New(session.New()) + + params := &acm.ListCertificatesInput{ + CertificateStatuses: []*string{ + aws.String("CertificateStatus"), // Required + // More values... + }, + MaxItems: aws.Int64(1), + NextToken: aws.String("NextToken"), + } + resp, err := svc.ListCertificates(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleACM_ListTagsForCertificate() { + svc := acm.New(session.New()) + + params := &acm.ListTagsForCertificateInput{ + CertificateArn: aws.String("Arn"), // Required + } + resp, err := svc.ListTagsForCertificate(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleACM_RemoveTagsFromCertificate() { + svc := acm.New(session.New()) + + params := &acm.RemoveTagsFromCertificateInput{ + CertificateArn: aws.String("Arn"), // Required + Tags: []*acm.Tag{ // Required + { // Required + Key: aws.String("TagKey"), // Required + Value: aws.String("TagValue"), + }, + // More values... + }, + } + resp, err := svc.RemoveTagsFromCertificate(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleACM_RequestCertificate() { + svc := acm.New(session.New()) + + params := &acm.RequestCertificateInput{ + DomainName: aws.String("DomainNameString"), // Required + DomainValidationOptions: []*acm.DomainValidationOption{ + { // Required + DomainName: aws.String("DomainNameString"), // Required + ValidationDomain: aws.String("DomainNameString"), // Required + }, + // More values... + }, + IdempotencyToken: aws.String("IdempotencyToken"), + SubjectAlternativeNames: []*string{ + aws.String("DomainNameString"), // Required + // More values... + }, + } + resp, err := svc.RequestCertificate(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleACM_ResendValidationEmail() { + svc := acm.New(session.New()) + + params := &acm.ResendValidationEmailInput{ + CertificateArn: aws.String("Arn"), // Required + Domain: aws.String("DomainNameString"), // Required + ValidationDomain: aws.String("DomainNameString"), // Required + } + resp, err := svc.ResendValidationEmail(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/acm/service.go b/vendor/github.com/aws/aws-sdk-go/service/acm/service.go new file mode 100644 index 000000000..782c2bbaa --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/acm/service.go @@ -0,0 +1,95 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package acm + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" +) + +// Welcome to the AWS Certificate Manager (ACM) Command Reference. This guide +// provides descriptions, syntax, and usage examples for each ACM command. You +// can use AWS Certificate Manager to request ACM Certificates for your AWS-based +// websites and applications. For general information about using ACM and for +// more information about using the console, see the AWS Certificate Manager +// User Guide (http://docs.aws.amazon.com/acm/latest/userguide/acm-overview.html). +// For more information about using the ACM API, see the AWS Certificate Manager +// API Reference (http://docs.aws.amazon.com/acm/latest/APIReference/Welcome.html). +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type ACM struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "acm" + +// New creates a new instance of the ACM client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a ACM client from just a session. +// svc := acm.New(mySession) +// +// // Create a ACM client with additional configuration +// svc := acm.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *ACM { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *ACM { + svc := &ACM{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2015-12-08", + JSONVersion: "1.1", + TargetPrefix: "CertificateManager", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(jsonrpc.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a ACM operation and runs any +// custom request initialization. +func (c *ACM) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/apigateway/api.go b/vendor/github.com/aws/aws-sdk-go/service/apigateway/api.go new file mode 100644 index 000000000..4755978d7 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/apigateway/api.go @@ -0,0 +1,8608 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package apigateway provides a client for Amazon API Gateway. +package apigateway + +import ( + "time" + + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/restjson" +) + +const opCreateApiKey = "CreateApiKey" + +// CreateApiKeyRequest generates a "aws/request.Request" representing the +// client's request for the CreateApiKey operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateApiKey method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateApiKeyRequest method. +// req, resp := client.CreateApiKeyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *APIGateway) CreateApiKeyRequest(input *CreateApiKeyInput) (req *request.Request, output *ApiKey) { + op := &request.Operation{ + Name: opCreateApiKey, + HTTPMethod: "POST", + HTTPPath: "/apikeys", + } + + if input == nil { + input = &CreateApiKeyInput{} + } + + req = c.newRequest(op, input, output) + output = &ApiKey{} + req.Data = output + return +} + +// Create an ApiKey resource. +func (c *APIGateway) CreateApiKey(input *CreateApiKeyInput) (*ApiKey, error) { + req, out := c.CreateApiKeyRequest(input) + err := req.Send() + return out, err +} + +const opCreateAuthorizer = "CreateAuthorizer" + +// CreateAuthorizerRequest generates a "aws/request.Request" representing the +// client's request for the CreateAuthorizer operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateAuthorizer method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateAuthorizerRequest method. +// req, resp := client.CreateAuthorizerRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *APIGateway) CreateAuthorizerRequest(input *CreateAuthorizerInput) (req *request.Request, output *Authorizer) { + op := &request.Operation{ + Name: opCreateAuthorizer, + HTTPMethod: "POST", + HTTPPath: "/restapis/{restapi_id}/authorizers", + } + + if input == nil { + input = &CreateAuthorizerInput{} + } + + req = c.newRequest(op, input, output) + output = &Authorizer{} + req.Data = output + return +} + +// Adds a new Authorizer resource to an existing RestApi resource. +func (c *APIGateway) CreateAuthorizer(input *CreateAuthorizerInput) (*Authorizer, error) { + req, out := c.CreateAuthorizerRequest(input) + err := req.Send() + return out, err +} + +const opCreateBasePathMapping = "CreateBasePathMapping" + +// CreateBasePathMappingRequest generates a "aws/request.Request" representing the +// client's request for the CreateBasePathMapping operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateBasePathMapping method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateBasePathMappingRequest method. +// req, resp := client.CreateBasePathMappingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *APIGateway) CreateBasePathMappingRequest(input *CreateBasePathMappingInput) (req *request.Request, output *BasePathMapping) { + op := &request.Operation{ + Name: opCreateBasePathMapping, + HTTPMethod: "POST", + HTTPPath: "/domainnames/{domain_name}/basepathmappings", + } + + if input == nil { + input = &CreateBasePathMappingInput{} + } + + req = c.newRequest(op, input, output) + output = &BasePathMapping{} + req.Data = output + return +} + +// Creates a new BasePathMapping resource. +func (c *APIGateway) CreateBasePathMapping(input *CreateBasePathMappingInput) (*BasePathMapping, error) { + req, out := c.CreateBasePathMappingRequest(input) + err := req.Send() + return out, err +} + +const opCreateDeployment = "CreateDeployment" + +// CreateDeploymentRequest generates a "aws/request.Request" representing the +// client's request for the CreateDeployment operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateDeployment method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateDeploymentRequest method. +// req, resp := client.CreateDeploymentRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *APIGateway) CreateDeploymentRequest(input *CreateDeploymentInput) (req *request.Request, output *Deployment) { + op := &request.Operation{ + Name: opCreateDeployment, + HTTPMethod: "POST", + HTTPPath: "/restapis/{restapi_id}/deployments", + } + + if input == nil { + input = &CreateDeploymentInput{} + } + + req = c.newRequest(op, input, output) + output = &Deployment{} + req.Data = output + return +} + +// Creates a Deployment resource, which makes a specified RestApi callable over +// the internet. +func (c *APIGateway) CreateDeployment(input *CreateDeploymentInput) (*Deployment, error) { + req, out := c.CreateDeploymentRequest(input) + err := req.Send() + return out, err +} + +const opCreateDomainName = "CreateDomainName" + +// CreateDomainNameRequest generates a "aws/request.Request" representing the +// client's request for the CreateDomainName operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateDomainName method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateDomainNameRequest method. +// req, resp := client.CreateDomainNameRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *APIGateway) CreateDomainNameRequest(input *CreateDomainNameInput) (req *request.Request, output *DomainName) { + op := &request.Operation{ + Name: opCreateDomainName, + HTTPMethod: "POST", + HTTPPath: "/domainnames", + } + + if input == nil { + input = &CreateDomainNameInput{} + } + + req = c.newRequest(op, input, output) + output = &DomainName{} + req.Data = output + return +} + +// Creates a new domain name. +func (c *APIGateway) CreateDomainName(input *CreateDomainNameInput) (*DomainName, error) { + req, out := c.CreateDomainNameRequest(input) + err := req.Send() + return out, err +} + +const opCreateModel = "CreateModel" + +// CreateModelRequest generates a "aws/request.Request" representing the +// client's request for the CreateModel operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateModel method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateModelRequest method. +// req, resp := client.CreateModelRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *APIGateway) CreateModelRequest(input *CreateModelInput) (req *request.Request, output *Model) { + op := &request.Operation{ + Name: opCreateModel, + HTTPMethod: "POST", + HTTPPath: "/restapis/{restapi_id}/models", + } + + if input == nil { + input = &CreateModelInput{} + } + + req = c.newRequest(op, input, output) + output = &Model{} + req.Data = output + return +} + +// Adds a new Model resource to an existing RestApi resource. +func (c *APIGateway) CreateModel(input *CreateModelInput) (*Model, error) { + req, out := c.CreateModelRequest(input) + err := req.Send() + return out, err +} + +const opCreateResource = "CreateResource" + +// CreateResourceRequest generates a "aws/request.Request" representing the +// client's request for the CreateResource operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateResource method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateResourceRequest method. +// req, resp := client.CreateResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *APIGateway) CreateResourceRequest(input *CreateResourceInput) (req *request.Request, output *Resource) { + op := &request.Operation{ + Name: opCreateResource, + HTTPMethod: "POST", + HTTPPath: "/restapis/{restapi_id}/resources/{parent_id}", + } + + if input == nil { + input = &CreateResourceInput{} + } + + req = c.newRequest(op, input, output) + output = &Resource{} + req.Data = output + return +} + +// Creates a Resource resource. +func (c *APIGateway) CreateResource(input *CreateResourceInput) (*Resource, error) { + req, out := c.CreateResourceRequest(input) + err := req.Send() + return out, err +} + +const opCreateRestApi = "CreateRestApi" + +// CreateRestApiRequest generates a "aws/request.Request" representing the +// client's request for the CreateRestApi operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateRestApi method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateRestApiRequest method. +// req, resp := client.CreateRestApiRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *APIGateway) CreateRestApiRequest(input *CreateRestApiInput) (req *request.Request, output *RestApi) { + op := &request.Operation{ + Name: opCreateRestApi, + HTTPMethod: "POST", + HTTPPath: "/restapis", + } + + if input == nil { + input = &CreateRestApiInput{} + } + + req = c.newRequest(op, input, output) + output = &RestApi{} + req.Data = output + return +} + +// Creates a new RestApi resource. +func (c *APIGateway) CreateRestApi(input *CreateRestApiInput) (*RestApi, error) { + req, out := c.CreateRestApiRequest(input) + err := req.Send() + return out, err +} + +const opCreateStage = "CreateStage" + +// CreateStageRequest generates a "aws/request.Request" representing the +// client's request for the CreateStage operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateStage method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateStageRequest method. +// req, resp := client.CreateStageRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *APIGateway) CreateStageRequest(input *CreateStageInput) (req *request.Request, output *Stage) { + op := &request.Operation{ + Name: opCreateStage, + HTTPMethod: "POST", + HTTPPath: "/restapis/{restapi_id}/stages", + } + + if input == nil { + input = &CreateStageInput{} + } + + req = c.newRequest(op, input, output) + output = &Stage{} + req.Data = output + return +} + +// Creates a new Stage resource that references a pre-existing Deployment for +// the API. +func (c *APIGateway) CreateStage(input *CreateStageInput) (*Stage, error) { + req, out := c.CreateStageRequest(input) + err := req.Send() + return out, err +} + +const opDeleteApiKey = "DeleteApiKey" + +// DeleteApiKeyRequest generates a "aws/request.Request" representing the +// client's request for the DeleteApiKey operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteApiKey method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteApiKeyRequest method. +// req, resp := client.DeleteApiKeyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *APIGateway) DeleteApiKeyRequest(input *DeleteApiKeyInput) (req *request.Request, output *DeleteApiKeyOutput) { + op := &request.Operation{ + Name: opDeleteApiKey, + HTTPMethod: "DELETE", + HTTPPath: "/apikeys/{api_Key}", + } + + if input == nil { + input = &DeleteApiKeyInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteApiKeyOutput{} + req.Data = output + return +} + +// Deletes the ApiKey resource. +func (c *APIGateway) DeleteApiKey(input *DeleteApiKeyInput) (*DeleteApiKeyOutput, error) { + req, out := c.DeleteApiKeyRequest(input) + err := req.Send() + return out, err +} + +const opDeleteAuthorizer = "DeleteAuthorizer" + +// DeleteAuthorizerRequest generates a "aws/request.Request" representing the +// client's request for the DeleteAuthorizer operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteAuthorizer method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteAuthorizerRequest method. +// req, resp := client.DeleteAuthorizerRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *APIGateway) DeleteAuthorizerRequest(input *DeleteAuthorizerInput) (req *request.Request, output *DeleteAuthorizerOutput) { + op := &request.Operation{ + Name: opDeleteAuthorizer, + HTTPMethod: "DELETE", + HTTPPath: "/restapis/{restapi_id}/authorizers/{authorizer_id}", + } + + if input == nil { + input = &DeleteAuthorizerInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteAuthorizerOutput{} + req.Data = output + return +} + +// Deletes an existing Authorizer resource. +func (c *APIGateway) DeleteAuthorizer(input *DeleteAuthorizerInput) (*DeleteAuthorizerOutput, error) { + req, out := c.DeleteAuthorizerRequest(input) + err := req.Send() + return out, err +} + +const opDeleteBasePathMapping = "DeleteBasePathMapping" + +// DeleteBasePathMappingRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBasePathMapping operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteBasePathMapping method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteBasePathMappingRequest method. +// req, resp := client.DeleteBasePathMappingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *APIGateway) DeleteBasePathMappingRequest(input *DeleteBasePathMappingInput) (req *request.Request, output *DeleteBasePathMappingOutput) { + op := &request.Operation{ + Name: opDeleteBasePathMapping, + HTTPMethod: "DELETE", + HTTPPath: "/domainnames/{domain_name}/basepathmappings/{base_path}", + } + + if input == nil { + input = &DeleteBasePathMappingInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteBasePathMappingOutput{} + req.Data = output + return +} + +// Deletes the BasePathMapping resource. +func (c *APIGateway) DeleteBasePathMapping(input *DeleteBasePathMappingInput) (*DeleteBasePathMappingOutput, error) { + req, out := c.DeleteBasePathMappingRequest(input) + err := req.Send() + return out, err +} + +const opDeleteClientCertificate = "DeleteClientCertificate" + +// DeleteClientCertificateRequest generates a "aws/request.Request" representing the +// client's request for the DeleteClientCertificate operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteClientCertificate method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteClientCertificateRequest method. +// req, resp := client.DeleteClientCertificateRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *APIGateway) DeleteClientCertificateRequest(input *DeleteClientCertificateInput) (req *request.Request, output *DeleteClientCertificateOutput) { + op := &request.Operation{ + Name: opDeleteClientCertificate, + HTTPMethod: "DELETE", + HTTPPath: "/clientcertificates/{clientcertificate_id}", + } + + if input == nil { + input = &DeleteClientCertificateInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteClientCertificateOutput{} + req.Data = output + return +} + +// Deletes the ClientCertificate resource. +func (c *APIGateway) DeleteClientCertificate(input *DeleteClientCertificateInput) (*DeleteClientCertificateOutput, error) { + req, out := c.DeleteClientCertificateRequest(input) + err := req.Send() + return out, err +} + +const opDeleteDeployment = "DeleteDeployment" + +// DeleteDeploymentRequest generates a "aws/request.Request" representing the +// client's request for the DeleteDeployment operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteDeployment method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteDeploymentRequest method. +// req, resp := client.DeleteDeploymentRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *APIGateway) DeleteDeploymentRequest(input *DeleteDeploymentInput) (req *request.Request, output *DeleteDeploymentOutput) { + op := &request.Operation{ + Name: opDeleteDeployment, + HTTPMethod: "DELETE", + HTTPPath: "/restapis/{restapi_id}/deployments/{deployment_id}", + } + + if input == nil { + input = &DeleteDeploymentInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteDeploymentOutput{} + req.Data = output + return +} + +// Deletes a Deployment resource. Deleting a deployment will only succeed if +// there are no Stage resources associated with it. +func (c *APIGateway) DeleteDeployment(input *DeleteDeploymentInput) (*DeleteDeploymentOutput, error) { + req, out := c.DeleteDeploymentRequest(input) + err := req.Send() + return out, err +} + +const opDeleteDomainName = "DeleteDomainName" + +// DeleteDomainNameRequest generates a "aws/request.Request" representing the +// client's request for the DeleteDomainName operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteDomainName method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteDomainNameRequest method. +// req, resp := client.DeleteDomainNameRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *APIGateway) DeleteDomainNameRequest(input *DeleteDomainNameInput) (req *request.Request, output *DeleteDomainNameOutput) { + op := &request.Operation{ + Name: opDeleteDomainName, + HTTPMethod: "DELETE", + HTTPPath: "/domainnames/{domain_name}", + } + + if input == nil { + input = &DeleteDomainNameInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteDomainNameOutput{} + req.Data = output + return +} + +// Deletes the DomainName resource. +func (c *APIGateway) DeleteDomainName(input *DeleteDomainNameInput) (*DeleteDomainNameOutput, error) { + req, out := c.DeleteDomainNameRequest(input) + err := req.Send() + return out, err +} + +const opDeleteIntegration = "DeleteIntegration" + +// DeleteIntegrationRequest generates a "aws/request.Request" representing the +// client's request for the DeleteIntegration operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteIntegration method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteIntegrationRequest method. +// req, resp := client.DeleteIntegrationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *APIGateway) DeleteIntegrationRequest(input *DeleteIntegrationInput) (req *request.Request, output *DeleteIntegrationOutput) { + op := &request.Operation{ + Name: opDeleteIntegration, + HTTPMethod: "DELETE", + HTTPPath: "/restapis/{restapi_id}/resources/{resource_id}/methods/{http_method}/integration", + } + + if input == nil { + input = &DeleteIntegrationInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteIntegrationOutput{} + req.Data = output + return +} + +// Represents a delete integration. +func (c *APIGateway) DeleteIntegration(input *DeleteIntegrationInput) (*DeleteIntegrationOutput, error) { + req, out := c.DeleteIntegrationRequest(input) + err := req.Send() + return out, err +} + +const opDeleteIntegrationResponse = "DeleteIntegrationResponse" + +// DeleteIntegrationResponseRequest generates a "aws/request.Request" representing the +// client's request for the DeleteIntegrationResponse operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteIntegrationResponse method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteIntegrationResponseRequest method. +// req, resp := client.DeleteIntegrationResponseRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *APIGateway) DeleteIntegrationResponseRequest(input *DeleteIntegrationResponseInput) (req *request.Request, output *DeleteIntegrationResponseOutput) { + op := &request.Operation{ + Name: opDeleteIntegrationResponse, + HTTPMethod: "DELETE", + HTTPPath: "/restapis/{restapi_id}/resources/{resource_id}/methods/{http_method}/integration/responses/{status_code}", + } + + if input == nil { + input = &DeleteIntegrationResponseInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteIntegrationResponseOutput{} + req.Data = output + return +} + +// Represents a delete integration response. +func (c *APIGateway) DeleteIntegrationResponse(input *DeleteIntegrationResponseInput) (*DeleteIntegrationResponseOutput, error) { + req, out := c.DeleteIntegrationResponseRequest(input) + err := req.Send() + return out, err +} + +const opDeleteMethod = "DeleteMethod" + +// DeleteMethodRequest generates a "aws/request.Request" representing the +// client's request for the DeleteMethod operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteMethod method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteMethodRequest method. +// req, resp := client.DeleteMethodRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *APIGateway) DeleteMethodRequest(input *DeleteMethodInput) (req *request.Request, output *DeleteMethodOutput) { + op := &request.Operation{ + Name: opDeleteMethod, + HTTPMethod: "DELETE", + HTTPPath: "/restapis/{restapi_id}/resources/{resource_id}/methods/{http_method}", + } + + if input == nil { + input = &DeleteMethodInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteMethodOutput{} + req.Data = output + return +} + +// Deletes an existing Method resource. +func (c *APIGateway) DeleteMethod(input *DeleteMethodInput) (*DeleteMethodOutput, error) { + req, out := c.DeleteMethodRequest(input) + err := req.Send() + return out, err +} + +const opDeleteMethodResponse = "DeleteMethodResponse" + +// DeleteMethodResponseRequest generates a "aws/request.Request" representing the +// client's request for the DeleteMethodResponse operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteMethodResponse method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteMethodResponseRequest method. +// req, resp := client.DeleteMethodResponseRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *APIGateway) DeleteMethodResponseRequest(input *DeleteMethodResponseInput) (req *request.Request, output *DeleteMethodResponseOutput) { + op := &request.Operation{ + Name: opDeleteMethodResponse, + HTTPMethod: "DELETE", + HTTPPath: "/restapis/{restapi_id}/resources/{resource_id}/methods/{http_method}/responses/{status_code}", + } + + if input == nil { + input = &DeleteMethodResponseInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteMethodResponseOutput{} + req.Data = output + return +} + +// Deletes an existing MethodResponse resource. +func (c *APIGateway) DeleteMethodResponse(input *DeleteMethodResponseInput) (*DeleteMethodResponseOutput, error) { + req, out := c.DeleteMethodResponseRequest(input) + err := req.Send() + return out, err +} + +const opDeleteModel = "DeleteModel" + +// DeleteModelRequest generates a "aws/request.Request" representing the +// client's request for the DeleteModel operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteModel method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteModelRequest method. +// req, resp := client.DeleteModelRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *APIGateway) DeleteModelRequest(input *DeleteModelInput) (req *request.Request, output *DeleteModelOutput) { + op := &request.Operation{ + Name: opDeleteModel, + HTTPMethod: "DELETE", + HTTPPath: "/restapis/{restapi_id}/models/{model_name}", + } + + if input == nil { + input = &DeleteModelInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteModelOutput{} + req.Data = output + return +} + +// Deletes a model. +func (c *APIGateway) DeleteModel(input *DeleteModelInput) (*DeleteModelOutput, error) { + req, out := c.DeleteModelRequest(input) + err := req.Send() + return out, err +} + +const opDeleteResource = "DeleteResource" + +// DeleteResourceRequest generates a "aws/request.Request" representing the +// client's request for the DeleteResource operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteResource method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteResourceRequest method. +// req, resp := client.DeleteResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *APIGateway) DeleteResourceRequest(input *DeleteResourceInput) (req *request.Request, output *DeleteResourceOutput) { + op := &request.Operation{ + Name: opDeleteResource, + HTTPMethod: "DELETE", + HTTPPath: "/restapis/{restapi_id}/resources/{resource_id}", + } + + if input == nil { + input = &DeleteResourceInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteResourceOutput{} + req.Data = output + return +} + +// Deletes a Resource resource. +func (c *APIGateway) DeleteResource(input *DeleteResourceInput) (*DeleteResourceOutput, error) { + req, out := c.DeleteResourceRequest(input) + err := req.Send() + return out, err +} + +const opDeleteRestApi = "DeleteRestApi" + +// DeleteRestApiRequest generates a "aws/request.Request" representing the +// client's request for the DeleteRestApi operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteRestApi method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteRestApiRequest method. +// req, resp := client.DeleteRestApiRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *APIGateway) DeleteRestApiRequest(input *DeleteRestApiInput) (req *request.Request, output *DeleteRestApiOutput) { + op := &request.Operation{ + Name: opDeleteRestApi, + HTTPMethod: "DELETE", + HTTPPath: "/restapis/{restapi_id}", + } + + if input == nil { + input = &DeleteRestApiInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteRestApiOutput{} + req.Data = output + return +} + +// Deletes the specified API. +func (c *APIGateway) DeleteRestApi(input *DeleteRestApiInput) (*DeleteRestApiOutput, error) { + req, out := c.DeleteRestApiRequest(input) + err := req.Send() + return out, err +} + +const opDeleteStage = "DeleteStage" + +// DeleteStageRequest generates a "aws/request.Request" representing the +// client's request for the DeleteStage operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteStage method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteStageRequest method. +// req, resp := client.DeleteStageRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *APIGateway) DeleteStageRequest(input *DeleteStageInput) (req *request.Request, output *DeleteStageOutput) { + op := &request.Operation{ + Name: opDeleteStage, + HTTPMethod: "DELETE", + HTTPPath: "/restapis/{restapi_id}/stages/{stage_name}", + } + + if input == nil { + input = &DeleteStageInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteStageOutput{} + req.Data = output + return +} + +// Deletes a Stage resource. +func (c *APIGateway) DeleteStage(input *DeleteStageInput) (*DeleteStageOutput, error) { + req, out := c.DeleteStageRequest(input) + err := req.Send() + return out, err +} + +const opFlushStageAuthorizersCache = "FlushStageAuthorizersCache" + +// FlushStageAuthorizersCacheRequest generates a "aws/request.Request" representing the +// client's request for the FlushStageAuthorizersCache operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the FlushStageAuthorizersCache method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the FlushStageAuthorizersCacheRequest method. +// req, resp := client.FlushStageAuthorizersCacheRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *APIGateway) FlushStageAuthorizersCacheRequest(input *FlushStageAuthorizersCacheInput) (req *request.Request, output *FlushStageAuthorizersCacheOutput) { + op := &request.Operation{ + Name: opFlushStageAuthorizersCache, + HTTPMethod: "DELETE", + HTTPPath: "/restapis/{restapi_id}/stages/{stage_name}/cache/authorizers", + } + + if input == nil { + input = &FlushStageAuthorizersCacheInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &FlushStageAuthorizersCacheOutput{} + req.Data = output + return +} + +// Flushes all authorizer cache entries on a stage. +func (c *APIGateway) FlushStageAuthorizersCache(input *FlushStageAuthorizersCacheInput) (*FlushStageAuthorizersCacheOutput, error) { + req, out := c.FlushStageAuthorizersCacheRequest(input) + err := req.Send() + return out, err +} + +const opFlushStageCache = "FlushStageCache" + +// FlushStageCacheRequest generates a "aws/request.Request" representing the +// client's request for the FlushStageCache operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the FlushStageCache method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the FlushStageCacheRequest method. +// req, resp := client.FlushStageCacheRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *APIGateway) FlushStageCacheRequest(input *FlushStageCacheInput) (req *request.Request, output *FlushStageCacheOutput) { + op := &request.Operation{ + Name: opFlushStageCache, + HTTPMethod: "DELETE", + HTTPPath: "/restapis/{restapi_id}/stages/{stage_name}/cache/data", + } + + if input == nil { + input = &FlushStageCacheInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &FlushStageCacheOutput{} + req.Data = output + return +} + +// Flushes a stage's cache. +func (c *APIGateway) FlushStageCache(input *FlushStageCacheInput) (*FlushStageCacheOutput, error) { + req, out := c.FlushStageCacheRequest(input) + err := req.Send() + return out, err +} + +const opGenerateClientCertificate = "GenerateClientCertificate" + +// GenerateClientCertificateRequest generates a "aws/request.Request" representing the +// client's request for the GenerateClientCertificate operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GenerateClientCertificate method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GenerateClientCertificateRequest method. +// req, resp := client.GenerateClientCertificateRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *APIGateway) GenerateClientCertificateRequest(input *GenerateClientCertificateInput) (req *request.Request, output *ClientCertificate) { + op := &request.Operation{ + Name: opGenerateClientCertificate, + HTTPMethod: "POST", + HTTPPath: "/clientcertificates", + } + + if input == nil { + input = &GenerateClientCertificateInput{} + } + + req = c.newRequest(op, input, output) + output = &ClientCertificate{} + req.Data = output + return +} + +// Generates a ClientCertificate resource. +func (c *APIGateway) GenerateClientCertificate(input *GenerateClientCertificateInput) (*ClientCertificate, error) { + req, out := c.GenerateClientCertificateRequest(input) + err := req.Send() + return out, err +} + +const opGetAccount = "GetAccount" + +// GetAccountRequest generates a "aws/request.Request" representing the +// client's request for the GetAccount operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetAccount method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetAccountRequest method. +// req, resp := client.GetAccountRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *APIGateway) GetAccountRequest(input *GetAccountInput) (req *request.Request, output *Account) { + op := &request.Operation{ + Name: opGetAccount, + HTTPMethod: "GET", + HTTPPath: "/account", + } + + if input == nil { + input = &GetAccountInput{} + } + + req = c.newRequest(op, input, output) + output = &Account{} + req.Data = output + return +} + +// Gets information about the current Account resource. +func (c *APIGateway) GetAccount(input *GetAccountInput) (*Account, error) { + req, out := c.GetAccountRequest(input) + err := req.Send() + return out, err +} + +const opGetApiKey = "GetApiKey" + +// GetApiKeyRequest generates a "aws/request.Request" representing the +// client's request for the GetApiKey operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetApiKey method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetApiKeyRequest method. +// req, resp := client.GetApiKeyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *APIGateway) GetApiKeyRequest(input *GetApiKeyInput) (req *request.Request, output *ApiKey) { + op := &request.Operation{ + Name: opGetApiKey, + HTTPMethod: "GET", + HTTPPath: "/apikeys/{api_Key}", + } + + if input == nil { + input = &GetApiKeyInput{} + } + + req = c.newRequest(op, input, output) + output = &ApiKey{} + req.Data = output + return +} + +// Gets information about the current ApiKey resource. +func (c *APIGateway) GetApiKey(input *GetApiKeyInput) (*ApiKey, error) { + req, out := c.GetApiKeyRequest(input) + err := req.Send() + return out, err +} + +const opGetApiKeys = "GetApiKeys" + +// GetApiKeysRequest generates a "aws/request.Request" representing the +// client's request for the GetApiKeys operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetApiKeys method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetApiKeysRequest method. +// req, resp := client.GetApiKeysRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *APIGateway) GetApiKeysRequest(input *GetApiKeysInput) (req *request.Request, output *GetApiKeysOutput) { + op := &request.Operation{ + Name: opGetApiKeys, + HTTPMethod: "GET", + HTTPPath: "/apikeys", + Paginator: &request.Paginator{ + InputTokens: []string{"position"}, + OutputTokens: []string{"position"}, + LimitToken: "limit", + TruncationToken: "", + }, + } + + if input == nil { + input = &GetApiKeysInput{} + } + + req = c.newRequest(op, input, output) + output = &GetApiKeysOutput{} + req.Data = output + return +} + +// Gets information about the current ApiKeys resource. +func (c *APIGateway) GetApiKeys(input *GetApiKeysInput) (*GetApiKeysOutput, error) { + req, out := c.GetApiKeysRequest(input) + err := req.Send() + return out, err +} + +// GetApiKeysPages iterates over the pages of a GetApiKeys operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See GetApiKeys method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a GetApiKeys operation. +// pageNum := 0 +// err := client.GetApiKeysPages(params, +// func(page *GetApiKeysOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *APIGateway) GetApiKeysPages(input *GetApiKeysInput, fn func(p *GetApiKeysOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.GetApiKeysRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*GetApiKeysOutput), lastPage) + }) +} + +const opGetAuthorizer = "GetAuthorizer" + +// GetAuthorizerRequest generates a "aws/request.Request" representing the +// client's request for the GetAuthorizer operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetAuthorizer method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetAuthorizerRequest method. +// req, resp := client.GetAuthorizerRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *APIGateway) GetAuthorizerRequest(input *GetAuthorizerInput) (req *request.Request, output *Authorizer) { + op := &request.Operation{ + Name: opGetAuthorizer, + HTTPMethod: "GET", + HTTPPath: "/restapis/{restapi_id}/authorizers/{authorizer_id}", + } + + if input == nil { + input = &GetAuthorizerInput{} + } + + req = c.newRequest(op, input, output) + output = &Authorizer{} + req.Data = output + return +} + +// Describe an existing Authorizer resource. +func (c *APIGateway) GetAuthorizer(input *GetAuthorizerInput) (*Authorizer, error) { + req, out := c.GetAuthorizerRequest(input) + err := req.Send() + return out, err +} + +const opGetAuthorizers = "GetAuthorizers" + +// GetAuthorizersRequest generates a "aws/request.Request" representing the +// client's request for the GetAuthorizers operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetAuthorizers method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetAuthorizersRequest method. +// req, resp := client.GetAuthorizersRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *APIGateway) GetAuthorizersRequest(input *GetAuthorizersInput) (req *request.Request, output *GetAuthorizersOutput) { + op := &request.Operation{ + Name: opGetAuthorizers, + HTTPMethod: "GET", + HTTPPath: "/restapis/{restapi_id}/authorizers", + } + + if input == nil { + input = &GetAuthorizersInput{} + } + + req = c.newRequest(op, input, output) + output = &GetAuthorizersOutput{} + req.Data = output + return +} + +// Describe an existing Authorizers resource. +func (c *APIGateway) GetAuthorizers(input *GetAuthorizersInput) (*GetAuthorizersOutput, error) { + req, out := c.GetAuthorizersRequest(input) + err := req.Send() + return out, err +} + +const opGetBasePathMapping = "GetBasePathMapping" + +// GetBasePathMappingRequest generates a "aws/request.Request" representing the +// client's request for the GetBasePathMapping operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetBasePathMapping method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetBasePathMappingRequest method. +// req, resp := client.GetBasePathMappingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *APIGateway) GetBasePathMappingRequest(input *GetBasePathMappingInput) (req *request.Request, output *BasePathMapping) { + op := &request.Operation{ + Name: opGetBasePathMapping, + HTTPMethod: "GET", + HTTPPath: "/domainnames/{domain_name}/basepathmappings/{base_path}", + } + + if input == nil { + input = &GetBasePathMappingInput{} + } + + req = c.newRequest(op, input, output) + output = &BasePathMapping{} + req.Data = output + return +} + +// Describe a BasePathMapping resource. +func (c *APIGateway) GetBasePathMapping(input *GetBasePathMappingInput) (*BasePathMapping, error) { + req, out := c.GetBasePathMappingRequest(input) + err := req.Send() + return out, err +} + +const opGetBasePathMappings = "GetBasePathMappings" + +// GetBasePathMappingsRequest generates a "aws/request.Request" representing the +// client's request for the GetBasePathMappings operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetBasePathMappings method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetBasePathMappingsRequest method. +// req, resp := client.GetBasePathMappingsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *APIGateway) GetBasePathMappingsRequest(input *GetBasePathMappingsInput) (req *request.Request, output *GetBasePathMappingsOutput) { + op := &request.Operation{ + Name: opGetBasePathMappings, + HTTPMethod: "GET", + HTTPPath: "/domainnames/{domain_name}/basepathmappings", + Paginator: &request.Paginator{ + InputTokens: []string{"position"}, + OutputTokens: []string{"position"}, + LimitToken: "limit", + TruncationToken: "", + }, + } + + if input == nil { + input = &GetBasePathMappingsInput{} + } + + req = c.newRequest(op, input, output) + output = &GetBasePathMappingsOutput{} + req.Data = output + return +} + +// Represents a collection of BasePathMapping resources. +func (c *APIGateway) GetBasePathMappings(input *GetBasePathMappingsInput) (*GetBasePathMappingsOutput, error) { + req, out := c.GetBasePathMappingsRequest(input) + err := req.Send() + return out, err +} + +// GetBasePathMappingsPages iterates over the pages of a GetBasePathMappings operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See GetBasePathMappings method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a GetBasePathMappings operation. +// pageNum := 0 +// err := client.GetBasePathMappingsPages(params, +// func(page *GetBasePathMappingsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *APIGateway) GetBasePathMappingsPages(input *GetBasePathMappingsInput, fn func(p *GetBasePathMappingsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.GetBasePathMappingsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*GetBasePathMappingsOutput), lastPage) + }) +} + +const opGetClientCertificate = "GetClientCertificate" + +// GetClientCertificateRequest generates a "aws/request.Request" representing the +// client's request for the GetClientCertificate operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetClientCertificate method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetClientCertificateRequest method. +// req, resp := client.GetClientCertificateRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *APIGateway) GetClientCertificateRequest(input *GetClientCertificateInput) (req *request.Request, output *ClientCertificate) { + op := &request.Operation{ + Name: opGetClientCertificate, + HTTPMethod: "GET", + HTTPPath: "/clientcertificates/{clientcertificate_id}", + } + + if input == nil { + input = &GetClientCertificateInput{} + } + + req = c.newRequest(op, input, output) + output = &ClientCertificate{} + req.Data = output + return +} + +// Gets information about the current ClientCertificate resource. +func (c *APIGateway) GetClientCertificate(input *GetClientCertificateInput) (*ClientCertificate, error) { + req, out := c.GetClientCertificateRequest(input) + err := req.Send() + return out, err +} + +const opGetClientCertificates = "GetClientCertificates" + +// GetClientCertificatesRequest generates a "aws/request.Request" representing the +// client's request for the GetClientCertificates operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetClientCertificates method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetClientCertificatesRequest method. +// req, resp := client.GetClientCertificatesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *APIGateway) GetClientCertificatesRequest(input *GetClientCertificatesInput) (req *request.Request, output *GetClientCertificatesOutput) { + op := &request.Operation{ + Name: opGetClientCertificates, + HTTPMethod: "GET", + HTTPPath: "/clientcertificates", + Paginator: &request.Paginator{ + InputTokens: []string{"position"}, + OutputTokens: []string{"position"}, + LimitToken: "limit", + TruncationToken: "", + }, + } + + if input == nil { + input = &GetClientCertificatesInput{} + } + + req = c.newRequest(op, input, output) + output = &GetClientCertificatesOutput{} + req.Data = output + return +} + +// Gets a collection of ClientCertificate resources. +func (c *APIGateway) GetClientCertificates(input *GetClientCertificatesInput) (*GetClientCertificatesOutput, error) { + req, out := c.GetClientCertificatesRequest(input) + err := req.Send() + return out, err +} + +// GetClientCertificatesPages iterates over the pages of a GetClientCertificates operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See GetClientCertificates method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a GetClientCertificates operation. +// pageNum := 0 +// err := client.GetClientCertificatesPages(params, +// func(page *GetClientCertificatesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *APIGateway) GetClientCertificatesPages(input *GetClientCertificatesInput, fn func(p *GetClientCertificatesOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.GetClientCertificatesRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*GetClientCertificatesOutput), lastPage) + }) +} + +const opGetDeployment = "GetDeployment" + +// GetDeploymentRequest generates a "aws/request.Request" representing the +// client's request for the GetDeployment operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetDeployment method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetDeploymentRequest method. +// req, resp := client.GetDeploymentRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *APIGateway) GetDeploymentRequest(input *GetDeploymentInput) (req *request.Request, output *Deployment) { + op := &request.Operation{ + Name: opGetDeployment, + HTTPMethod: "GET", + HTTPPath: "/restapis/{restapi_id}/deployments/{deployment_id}", + } + + if input == nil { + input = &GetDeploymentInput{} + } + + req = c.newRequest(op, input, output) + output = &Deployment{} + req.Data = output + return +} + +// Gets information about a Deployment resource. +func (c *APIGateway) GetDeployment(input *GetDeploymentInput) (*Deployment, error) { + req, out := c.GetDeploymentRequest(input) + err := req.Send() + return out, err +} + +const opGetDeployments = "GetDeployments" + +// GetDeploymentsRequest generates a "aws/request.Request" representing the +// client's request for the GetDeployments operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetDeployments method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetDeploymentsRequest method. +// req, resp := client.GetDeploymentsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *APIGateway) GetDeploymentsRequest(input *GetDeploymentsInput) (req *request.Request, output *GetDeploymentsOutput) { + op := &request.Operation{ + Name: opGetDeployments, + HTTPMethod: "GET", + HTTPPath: "/restapis/{restapi_id}/deployments", + Paginator: &request.Paginator{ + InputTokens: []string{"position"}, + OutputTokens: []string{"position"}, + LimitToken: "limit", + TruncationToken: "", + }, + } + + if input == nil { + input = &GetDeploymentsInput{} + } + + req = c.newRequest(op, input, output) + output = &GetDeploymentsOutput{} + req.Data = output + return +} + +// Gets information about a Deployments collection. +func (c *APIGateway) GetDeployments(input *GetDeploymentsInput) (*GetDeploymentsOutput, error) { + req, out := c.GetDeploymentsRequest(input) + err := req.Send() + return out, err +} + +// GetDeploymentsPages iterates over the pages of a GetDeployments operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See GetDeployments method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a GetDeployments operation. +// pageNum := 0 +// err := client.GetDeploymentsPages(params, +// func(page *GetDeploymentsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *APIGateway) GetDeploymentsPages(input *GetDeploymentsInput, fn func(p *GetDeploymentsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.GetDeploymentsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*GetDeploymentsOutput), lastPage) + }) +} + +const opGetDomainName = "GetDomainName" + +// GetDomainNameRequest generates a "aws/request.Request" representing the +// client's request for the GetDomainName operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetDomainName method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetDomainNameRequest method. +// req, resp := client.GetDomainNameRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *APIGateway) GetDomainNameRequest(input *GetDomainNameInput) (req *request.Request, output *DomainName) { + op := &request.Operation{ + Name: opGetDomainName, + HTTPMethod: "GET", + HTTPPath: "/domainnames/{domain_name}", + } + + if input == nil { + input = &GetDomainNameInput{} + } + + req = c.newRequest(op, input, output) + output = &DomainName{} + req.Data = output + return +} + +// Represents a domain name that is contained in a simpler, more intuitive URL +// that can be called. +func (c *APIGateway) GetDomainName(input *GetDomainNameInput) (*DomainName, error) { + req, out := c.GetDomainNameRequest(input) + err := req.Send() + return out, err +} + +const opGetDomainNames = "GetDomainNames" + +// GetDomainNamesRequest generates a "aws/request.Request" representing the +// client's request for the GetDomainNames operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetDomainNames method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetDomainNamesRequest method. +// req, resp := client.GetDomainNamesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *APIGateway) GetDomainNamesRequest(input *GetDomainNamesInput) (req *request.Request, output *GetDomainNamesOutput) { + op := &request.Operation{ + Name: opGetDomainNames, + HTTPMethod: "GET", + HTTPPath: "/domainnames", + Paginator: &request.Paginator{ + InputTokens: []string{"position"}, + OutputTokens: []string{"position"}, + LimitToken: "limit", + TruncationToken: "", + }, + } + + if input == nil { + input = &GetDomainNamesInput{} + } + + req = c.newRequest(op, input, output) + output = &GetDomainNamesOutput{} + req.Data = output + return +} + +// Represents a collection of DomainName resources. +func (c *APIGateway) GetDomainNames(input *GetDomainNamesInput) (*GetDomainNamesOutput, error) { + req, out := c.GetDomainNamesRequest(input) + err := req.Send() + return out, err +} + +// GetDomainNamesPages iterates over the pages of a GetDomainNames operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See GetDomainNames method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a GetDomainNames operation. +// pageNum := 0 +// err := client.GetDomainNamesPages(params, +// func(page *GetDomainNamesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *APIGateway) GetDomainNamesPages(input *GetDomainNamesInput, fn func(p *GetDomainNamesOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.GetDomainNamesRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*GetDomainNamesOutput), lastPage) + }) +} + +const opGetExport = "GetExport" + +// GetExportRequest generates a "aws/request.Request" representing the +// client's request for the GetExport operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetExport method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetExportRequest method. +// req, resp := client.GetExportRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *APIGateway) GetExportRequest(input *GetExportInput) (req *request.Request, output *GetExportOutput) { + op := &request.Operation{ + Name: opGetExport, + HTTPMethod: "GET", + HTTPPath: "/restapis/{restapi_id}/stages/{stage_name}/exports/{export_type}", + } + + if input == nil { + input = &GetExportInput{} + } + + req = c.newRequest(op, input, output) + output = &GetExportOutput{} + req.Data = output + return +} + +// Exports a deployed version of a RestApi in a specified format. +func (c *APIGateway) GetExport(input *GetExportInput) (*GetExportOutput, error) { + req, out := c.GetExportRequest(input) + err := req.Send() + return out, err +} + +const opGetIntegration = "GetIntegration" + +// GetIntegrationRequest generates a "aws/request.Request" representing the +// client's request for the GetIntegration operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetIntegration method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetIntegrationRequest method. +// req, resp := client.GetIntegrationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *APIGateway) GetIntegrationRequest(input *GetIntegrationInput) (req *request.Request, output *Integration) { + op := &request.Operation{ + Name: opGetIntegration, + HTTPMethod: "GET", + HTTPPath: "/restapis/{restapi_id}/resources/{resource_id}/methods/{http_method}/integration", + } + + if input == nil { + input = &GetIntegrationInput{} + } + + req = c.newRequest(op, input, output) + output = &Integration{} + req.Data = output + return +} + +// Represents a get integration. +func (c *APIGateway) GetIntegration(input *GetIntegrationInput) (*Integration, error) { + req, out := c.GetIntegrationRequest(input) + err := req.Send() + return out, err +} + +const opGetIntegrationResponse = "GetIntegrationResponse" + +// GetIntegrationResponseRequest generates a "aws/request.Request" representing the +// client's request for the GetIntegrationResponse operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetIntegrationResponse method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetIntegrationResponseRequest method. +// req, resp := client.GetIntegrationResponseRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *APIGateway) GetIntegrationResponseRequest(input *GetIntegrationResponseInput) (req *request.Request, output *IntegrationResponse) { + op := &request.Operation{ + Name: opGetIntegrationResponse, + HTTPMethod: "GET", + HTTPPath: "/restapis/{restapi_id}/resources/{resource_id}/methods/{http_method}/integration/responses/{status_code}", + } + + if input == nil { + input = &GetIntegrationResponseInput{} + } + + req = c.newRequest(op, input, output) + output = &IntegrationResponse{} + req.Data = output + return +} + +// Represents a get integration response. +func (c *APIGateway) GetIntegrationResponse(input *GetIntegrationResponseInput) (*IntegrationResponse, error) { + req, out := c.GetIntegrationResponseRequest(input) + err := req.Send() + return out, err +} + +const opGetMethod = "GetMethod" + +// GetMethodRequest generates a "aws/request.Request" representing the +// client's request for the GetMethod operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetMethod method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetMethodRequest method. +// req, resp := client.GetMethodRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *APIGateway) GetMethodRequest(input *GetMethodInput) (req *request.Request, output *Method) { + op := &request.Operation{ + Name: opGetMethod, + HTTPMethod: "GET", + HTTPPath: "/restapis/{restapi_id}/resources/{resource_id}/methods/{http_method}", + } + + if input == nil { + input = &GetMethodInput{} + } + + req = c.newRequest(op, input, output) + output = &Method{} + req.Data = output + return +} + +// Describe an existing Method resource. +func (c *APIGateway) GetMethod(input *GetMethodInput) (*Method, error) { + req, out := c.GetMethodRequest(input) + err := req.Send() + return out, err +} + +const opGetMethodResponse = "GetMethodResponse" + +// GetMethodResponseRequest generates a "aws/request.Request" representing the +// client's request for the GetMethodResponse operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetMethodResponse method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetMethodResponseRequest method. +// req, resp := client.GetMethodResponseRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *APIGateway) GetMethodResponseRequest(input *GetMethodResponseInput) (req *request.Request, output *MethodResponse) { + op := &request.Operation{ + Name: opGetMethodResponse, + HTTPMethod: "GET", + HTTPPath: "/restapis/{restapi_id}/resources/{resource_id}/methods/{http_method}/responses/{status_code}", + } + + if input == nil { + input = &GetMethodResponseInput{} + } + + req = c.newRequest(op, input, output) + output = &MethodResponse{} + req.Data = output + return +} + +// Describes a MethodResponse resource. +func (c *APIGateway) GetMethodResponse(input *GetMethodResponseInput) (*MethodResponse, error) { + req, out := c.GetMethodResponseRequest(input) + err := req.Send() + return out, err +} + +const opGetModel = "GetModel" + +// GetModelRequest generates a "aws/request.Request" representing the +// client's request for the GetModel operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetModel method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetModelRequest method. +// req, resp := client.GetModelRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *APIGateway) GetModelRequest(input *GetModelInput) (req *request.Request, output *Model) { + op := &request.Operation{ + Name: opGetModel, + HTTPMethod: "GET", + HTTPPath: "/restapis/{restapi_id}/models/{model_name}", + } + + if input == nil { + input = &GetModelInput{} + } + + req = c.newRequest(op, input, output) + output = &Model{} + req.Data = output + return +} + +// Describes an existing model defined for a RestApi resource. +func (c *APIGateway) GetModel(input *GetModelInput) (*Model, error) { + req, out := c.GetModelRequest(input) + err := req.Send() + return out, err +} + +const opGetModelTemplate = "GetModelTemplate" + +// GetModelTemplateRequest generates a "aws/request.Request" representing the +// client's request for the GetModelTemplate operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetModelTemplate method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetModelTemplateRequest method. +// req, resp := client.GetModelTemplateRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *APIGateway) GetModelTemplateRequest(input *GetModelTemplateInput) (req *request.Request, output *GetModelTemplateOutput) { + op := &request.Operation{ + Name: opGetModelTemplate, + HTTPMethod: "GET", + HTTPPath: "/restapis/{restapi_id}/models/{model_name}/default_template", + } + + if input == nil { + input = &GetModelTemplateInput{} + } + + req = c.newRequest(op, input, output) + output = &GetModelTemplateOutput{} + req.Data = output + return +} + +// Generates a sample mapping template that can be used to transform a payload +// into the structure of a model. +func (c *APIGateway) GetModelTemplate(input *GetModelTemplateInput) (*GetModelTemplateOutput, error) { + req, out := c.GetModelTemplateRequest(input) + err := req.Send() + return out, err +} + +const opGetModels = "GetModels" + +// GetModelsRequest generates a "aws/request.Request" representing the +// client's request for the GetModels operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetModels method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetModelsRequest method. +// req, resp := client.GetModelsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *APIGateway) GetModelsRequest(input *GetModelsInput) (req *request.Request, output *GetModelsOutput) { + op := &request.Operation{ + Name: opGetModels, + HTTPMethod: "GET", + HTTPPath: "/restapis/{restapi_id}/models", + Paginator: &request.Paginator{ + InputTokens: []string{"position"}, + OutputTokens: []string{"position"}, + LimitToken: "limit", + TruncationToken: "", + }, + } + + if input == nil { + input = &GetModelsInput{} + } + + req = c.newRequest(op, input, output) + output = &GetModelsOutput{} + req.Data = output + return +} + +// Describes existing Models defined for a RestApi resource. +func (c *APIGateway) GetModels(input *GetModelsInput) (*GetModelsOutput, error) { + req, out := c.GetModelsRequest(input) + err := req.Send() + return out, err +} + +// GetModelsPages iterates over the pages of a GetModels operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See GetModels method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a GetModels operation. +// pageNum := 0 +// err := client.GetModelsPages(params, +// func(page *GetModelsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *APIGateway) GetModelsPages(input *GetModelsInput, fn func(p *GetModelsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.GetModelsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*GetModelsOutput), lastPage) + }) +} + +const opGetResource = "GetResource" + +// GetResourceRequest generates a "aws/request.Request" representing the +// client's request for the GetResource operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetResource method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetResourceRequest method. +// req, resp := client.GetResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *APIGateway) GetResourceRequest(input *GetResourceInput) (req *request.Request, output *Resource) { + op := &request.Operation{ + Name: opGetResource, + HTTPMethod: "GET", + HTTPPath: "/restapis/{restapi_id}/resources/{resource_id}", + } + + if input == nil { + input = &GetResourceInput{} + } + + req = c.newRequest(op, input, output) + output = &Resource{} + req.Data = output + return +} + +// Lists information about a resource. +func (c *APIGateway) GetResource(input *GetResourceInput) (*Resource, error) { + req, out := c.GetResourceRequest(input) + err := req.Send() + return out, err +} + +const opGetResources = "GetResources" + +// GetResourcesRequest generates a "aws/request.Request" representing the +// client's request for the GetResources operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetResources method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetResourcesRequest method. +// req, resp := client.GetResourcesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *APIGateway) GetResourcesRequest(input *GetResourcesInput) (req *request.Request, output *GetResourcesOutput) { + op := &request.Operation{ + Name: opGetResources, + HTTPMethod: "GET", + HTTPPath: "/restapis/{restapi_id}/resources", + Paginator: &request.Paginator{ + InputTokens: []string{"position"}, + OutputTokens: []string{"position"}, + LimitToken: "limit", + TruncationToken: "", + }, + } + + if input == nil { + input = &GetResourcesInput{} + } + + req = c.newRequest(op, input, output) + output = &GetResourcesOutput{} + req.Data = output + return +} + +// Lists information about a collection of Resource resources. +func (c *APIGateway) GetResources(input *GetResourcesInput) (*GetResourcesOutput, error) { + req, out := c.GetResourcesRequest(input) + err := req.Send() + return out, err +} + +// GetResourcesPages iterates over the pages of a GetResources operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See GetResources method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a GetResources operation. +// pageNum := 0 +// err := client.GetResourcesPages(params, +// func(page *GetResourcesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *APIGateway) GetResourcesPages(input *GetResourcesInput, fn func(p *GetResourcesOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.GetResourcesRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*GetResourcesOutput), lastPage) + }) +} + +const opGetRestApi = "GetRestApi" + +// GetRestApiRequest generates a "aws/request.Request" representing the +// client's request for the GetRestApi operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetRestApi method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetRestApiRequest method. +// req, resp := client.GetRestApiRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *APIGateway) GetRestApiRequest(input *GetRestApiInput) (req *request.Request, output *RestApi) { + op := &request.Operation{ + Name: opGetRestApi, + HTTPMethod: "GET", + HTTPPath: "/restapis/{restapi_id}", + } + + if input == nil { + input = &GetRestApiInput{} + } + + req = c.newRequest(op, input, output) + output = &RestApi{} + req.Data = output + return +} + +// Lists the RestApi resource in the collection. +func (c *APIGateway) GetRestApi(input *GetRestApiInput) (*RestApi, error) { + req, out := c.GetRestApiRequest(input) + err := req.Send() + return out, err +} + +const opGetRestApis = "GetRestApis" + +// GetRestApisRequest generates a "aws/request.Request" representing the +// client's request for the GetRestApis operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetRestApis method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetRestApisRequest method. +// req, resp := client.GetRestApisRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *APIGateway) GetRestApisRequest(input *GetRestApisInput) (req *request.Request, output *GetRestApisOutput) { + op := &request.Operation{ + Name: opGetRestApis, + HTTPMethod: "GET", + HTTPPath: "/restapis", + Paginator: &request.Paginator{ + InputTokens: []string{"position"}, + OutputTokens: []string{"position"}, + LimitToken: "limit", + TruncationToken: "", + }, + } + + if input == nil { + input = &GetRestApisInput{} + } + + req = c.newRequest(op, input, output) + output = &GetRestApisOutput{} + req.Data = output + return +} + +// Lists the RestApis resources for your collection. +func (c *APIGateway) GetRestApis(input *GetRestApisInput) (*GetRestApisOutput, error) { + req, out := c.GetRestApisRequest(input) + err := req.Send() + return out, err +} + +// GetRestApisPages iterates over the pages of a GetRestApis operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See GetRestApis method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a GetRestApis operation. +// pageNum := 0 +// err := client.GetRestApisPages(params, +// func(page *GetRestApisOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *APIGateway) GetRestApisPages(input *GetRestApisInput, fn func(p *GetRestApisOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.GetRestApisRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*GetRestApisOutput), lastPage) + }) +} + +const opGetSdk = "GetSdk" + +// GetSdkRequest generates a "aws/request.Request" representing the +// client's request for the GetSdk operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetSdk method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetSdkRequest method. +// req, resp := client.GetSdkRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *APIGateway) GetSdkRequest(input *GetSdkInput) (req *request.Request, output *GetSdkOutput) { + op := &request.Operation{ + Name: opGetSdk, + HTTPMethod: "GET", + HTTPPath: "/restapis/{restapi_id}/stages/{stage_name}/sdks/{sdk_type}", + } + + if input == nil { + input = &GetSdkInput{} + } + + req = c.newRequest(op, input, output) + output = &GetSdkOutput{} + req.Data = output + return +} + +// Generates a client SDK for a RestApi and Stage. +func (c *APIGateway) GetSdk(input *GetSdkInput) (*GetSdkOutput, error) { + req, out := c.GetSdkRequest(input) + err := req.Send() + return out, err +} + +const opGetStage = "GetStage" + +// GetStageRequest generates a "aws/request.Request" representing the +// client's request for the GetStage operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetStage method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetStageRequest method. +// req, resp := client.GetStageRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *APIGateway) GetStageRequest(input *GetStageInput) (req *request.Request, output *Stage) { + op := &request.Operation{ + Name: opGetStage, + HTTPMethod: "GET", + HTTPPath: "/restapis/{restapi_id}/stages/{stage_name}", + } + + if input == nil { + input = &GetStageInput{} + } + + req = c.newRequest(op, input, output) + output = &Stage{} + req.Data = output + return +} + +// Gets information about a Stage resource. +func (c *APIGateway) GetStage(input *GetStageInput) (*Stage, error) { + req, out := c.GetStageRequest(input) + err := req.Send() + return out, err +} + +const opGetStages = "GetStages" + +// GetStagesRequest generates a "aws/request.Request" representing the +// client's request for the GetStages operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetStages method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetStagesRequest method. +// req, resp := client.GetStagesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *APIGateway) GetStagesRequest(input *GetStagesInput) (req *request.Request, output *GetStagesOutput) { + op := &request.Operation{ + Name: opGetStages, + HTTPMethod: "GET", + HTTPPath: "/restapis/{restapi_id}/stages", + } + + if input == nil { + input = &GetStagesInput{} + } + + req = c.newRequest(op, input, output) + output = &GetStagesOutput{} + req.Data = output + return +} + +// Gets information about one or more Stage resources. +func (c *APIGateway) GetStages(input *GetStagesInput) (*GetStagesOutput, error) { + req, out := c.GetStagesRequest(input) + err := req.Send() + return out, err +} + +const opImportRestApi = "ImportRestApi" + +// ImportRestApiRequest generates a "aws/request.Request" representing the +// client's request for the ImportRestApi operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ImportRestApi method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ImportRestApiRequest method. +// req, resp := client.ImportRestApiRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *APIGateway) ImportRestApiRequest(input *ImportRestApiInput) (req *request.Request, output *RestApi) { + op := &request.Operation{ + Name: opImportRestApi, + HTTPMethod: "POST", + HTTPPath: "/restapis?mode=import", + } + + if input == nil { + input = &ImportRestApiInput{} + } + + req = c.newRequest(op, input, output) + output = &RestApi{} + req.Data = output + return +} + +// A feature of the Amazon API Gateway control service for creating a new API +// from an external API definition file. +func (c *APIGateway) ImportRestApi(input *ImportRestApiInput) (*RestApi, error) { + req, out := c.ImportRestApiRequest(input) + err := req.Send() + return out, err +} + +const opPutIntegration = "PutIntegration" + +// PutIntegrationRequest generates a "aws/request.Request" representing the +// client's request for the PutIntegration operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutIntegration method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutIntegrationRequest method. +// req, resp := client.PutIntegrationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *APIGateway) PutIntegrationRequest(input *PutIntegrationInput) (req *request.Request, output *Integration) { + op := &request.Operation{ + Name: opPutIntegration, + HTTPMethod: "PUT", + HTTPPath: "/restapis/{restapi_id}/resources/{resource_id}/methods/{http_method}/integration", + } + + if input == nil { + input = &PutIntegrationInput{} + } + + req = c.newRequest(op, input, output) + output = &Integration{} + req.Data = output + return +} + +// Represents a put integration. +func (c *APIGateway) PutIntegration(input *PutIntegrationInput) (*Integration, error) { + req, out := c.PutIntegrationRequest(input) + err := req.Send() + return out, err +} + +const opPutIntegrationResponse = "PutIntegrationResponse" + +// PutIntegrationResponseRequest generates a "aws/request.Request" representing the +// client's request for the PutIntegrationResponse operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutIntegrationResponse method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutIntegrationResponseRequest method. +// req, resp := client.PutIntegrationResponseRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *APIGateway) PutIntegrationResponseRequest(input *PutIntegrationResponseInput) (req *request.Request, output *IntegrationResponse) { + op := &request.Operation{ + Name: opPutIntegrationResponse, + HTTPMethod: "PUT", + HTTPPath: "/restapis/{restapi_id}/resources/{resource_id}/methods/{http_method}/integration/responses/{status_code}", + } + + if input == nil { + input = &PutIntegrationResponseInput{} + } + + req = c.newRequest(op, input, output) + output = &IntegrationResponse{} + req.Data = output + return +} + +// Represents a put integration. +func (c *APIGateway) PutIntegrationResponse(input *PutIntegrationResponseInput) (*IntegrationResponse, error) { + req, out := c.PutIntegrationResponseRequest(input) + err := req.Send() + return out, err +} + +const opPutMethod = "PutMethod" + +// PutMethodRequest generates a "aws/request.Request" representing the +// client's request for the PutMethod operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutMethod method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutMethodRequest method. +// req, resp := client.PutMethodRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *APIGateway) PutMethodRequest(input *PutMethodInput) (req *request.Request, output *Method) { + op := &request.Operation{ + Name: opPutMethod, + HTTPMethod: "PUT", + HTTPPath: "/restapis/{restapi_id}/resources/{resource_id}/methods/{http_method}", + } + + if input == nil { + input = &PutMethodInput{} + } + + req = c.newRequest(op, input, output) + output = &Method{} + req.Data = output + return +} + +// Add a method to an existing Resource resource. +func (c *APIGateway) PutMethod(input *PutMethodInput) (*Method, error) { + req, out := c.PutMethodRequest(input) + err := req.Send() + return out, err +} + +const opPutMethodResponse = "PutMethodResponse" + +// PutMethodResponseRequest generates a "aws/request.Request" representing the +// client's request for the PutMethodResponse operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutMethodResponse method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutMethodResponseRequest method. +// req, resp := client.PutMethodResponseRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *APIGateway) PutMethodResponseRequest(input *PutMethodResponseInput) (req *request.Request, output *MethodResponse) { + op := &request.Operation{ + Name: opPutMethodResponse, + HTTPMethod: "PUT", + HTTPPath: "/restapis/{restapi_id}/resources/{resource_id}/methods/{http_method}/responses/{status_code}", + } + + if input == nil { + input = &PutMethodResponseInput{} + } + + req = c.newRequest(op, input, output) + output = &MethodResponse{} + req.Data = output + return +} + +// Adds a MethodResponse to an existing Method resource. +func (c *APIGateway) PutMethodResponse(input *PutMethodResponseInput) (*MethodResponse, error) { + req, out := c.PutMethodResponseRequest(input) + err := req.Send() + return out, err +} + +const opPutRestApi = "PutRestApi" + +// PutRestApiRequest generates a "aws/request.Request" representing the +// client's request for the PutRestApi operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutRestApi method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutRestApiRequest method. +// req, resp := client.PutRestApiRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *APIGateway) PutRestApiRequest(input *PutRestApiInput) (req *request.Request, output *RestApi) { + op := &request.Operation{ + Name: opPutRestApi, + HTTPMethod: "PUT", + HTTPPath: "/restapis/{restapi_id}", + } + + if input == nil { + input = &PutRestApiInput{} + } + + req = c.newRequest(op, input, output) + output = &RestApi{} + req.Data = output + return +} + +// A feature of the Amazon API Gateway control service for updating an existing +// API with an input of external API definitions. The update can take the form +// of merging the supplied definition into the existing API or overwriting the +// existing API. +func (c *APIGateway) PutRestApi(input *PutRestApiInput) (*RestApi, error) { + req, out := c.PutRestApiRequest(input) + err := req.Send() + return out, err +} + +const opTestInvokeAuthorizer = "TestInvokeAuthorizer" + +// TestInvokeAuthorizerRequest generates a "aws/request.Request" representing the +// client's request for the TestInvokeAuthorizer operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the TestInvokeAuthorizer method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the TestInvokeAuthorizerRequest method. +// req, resp := client.TestInvokeAuthorizerRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *APIGateway) TestInvokeAuthorizerRequest(input *TestInvokeAuthorizerInput) (req *request.Request, output *TestInvokeAuthorizerOutput) { + op := &request.Operation{ + Name: opTestInvokeAuthorizer, + HTTPMethod: "POST", + HTTPPath: "/restapis/{restapi_id}/authorizers/{authorizer_id}", + } + + if input == nil { + input = &TestInvokeAuthorizerInput{} + } + + req = c.newRequest(op, input, output) + output = &TestInvokeAuthorizerOutput{} + req.Data = output + return +} + +// Simulate the execution of an Authorizer in your RestApi with headers, parameters, +// and an incoming request body. +func (c *APIGateway) TestInvokeAuthorizer(input *TestInvokeAuthorizerInput) (*TestInvokeAuthorizerOutput, error) { + req, out := c.TestInvokeAuthorizerRequest(input) + err := req.Send() + return out, err +} + +const opTestInvokeMethod = "TestInvokeMethod" + +// TestInvokeMethodRequest generates a "aws/request.Request" representing the +// client's request for the TestInvokeMethod operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the TestInvokeMethod method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the TestInvokeMethodRequest method. +// req, resp := client.TestInvokeMethodRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *APIGateway) TestInvokeMethodRequest(input *TestInvokeMethodInput) (req *request.Request, output *TestInvokeMethodOutput) { + op := &request.Operation{ + Name: opTestInvokeMethod, + HTTPMethod: "POST", + HTTPPath: "/restapis/{restapi_id}/resources/{resource_id}/methods/{http_method}", + } + + if input == nil { + input = &TestInvokeMethodInput{} + } + + req = c.newRequest(op, input, output) + output = &TestInvokeMethodOutput{} + req.Data = output + return +} + +// Simulate the execution of a Method in your RestApi with headers, parameters, +// and an incoming request body. +func (c *APIGateway) TestInvokeMethod(input *TestInvokeMethodInput) (*TestInvokeMethodOutput, error) { + req, out := c.TestInvokeMethodRequest(input) + err := req.Send() + return out, err +} + +const opUpdateAccount = "UpdateAccount" + +// UpdateAccountRequest generates a "aws/request.Request" representing the +// client's request for the UpdateAccount operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateAccount method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateAccountRequest method. +// req, resp := client.UpdateAccountRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *APIGateway) UpdateAccountRequest(input *UpdateAccountInput) (req *request.Request, output *Account) { + op := &request.Operation{ + Name: opUpdateAccount, + HTTPMethod: "PATCH", + HTTPPath: "/account", + } + + if input == nil { + input = &UpdateAccountInput{} + } + + req = c.newRequest(op, input, output) + output = &Account{} + req.Data = output + return +} + +// Changes information about the current Account resource. +func (c *APIGateway) UpdateAccount(input *UpdateAccountInput) (*Account, error) { + req, out := c.UpdateAccountRequest(input) + err := req.Send() + return out, err +} + +const opUpdateApiKey = "UpdateApiKey" + +// UpdateApiKeyRequest generates a "aws/request.Request" representing the +// client's request for the UpdateApiKey operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateApiKey method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateApiKeyRequest method. +// req, resp := client.UpdateApiKeyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *APIGateway) UpdateApiKeyRequest(input *UpdateApiKeyInput) (req *request.Request, output *ApiKey) { + op := &request.Operation{ + Name: opUpdateApiKey, + HTTPMethod: "PATCH", + HTTPPath: "/apikeys/{api_Key}", + } + + if input == nil { + input = &UpdateApiKeyInput{} + } + + req = c.newRequest(op, input, output) + output = &ApiKey{} + req.Data = output + return +} + +// Changes information about an ApiKey resource. +func (c *APIGateway) UpdateApiKey(input *UpdateApiKeyInput) (*ApiKey, error) { + req, out := c.UpdateApiKeyRequest(input) + err := req.Send() + return out, err +} + +const opUpdateAuthorizer = "UpdateAuthorizer" + +// UpdateAuthorizerRequest generates a "aws/request.Request" representing the +// client's request for the UpdateAuthorizer operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateAuthorizer method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateAuthorizerRequest method. +// req, resp := client.UpdateAuthorizerRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *APIGateway) UpdateAuthorizerRequest(input *UpdateAuthorizerInput) (req *request.Request, output *Authorizer) { + op := &request.Operation{ + Name: opUpdateAuthorizer, + HTTPMethod: "PATCH", + HTTPPath: "/restapis/{restapi_id}/authorizers/{authorizer_id}", + } + + if input == nil { + input = &UpdateAuthorizerInput{} + } + + req = c.newRequest(op, input, output) + output = &Authorizer{} + req.Data = output + return +} + +// Updates an existing Authorizer resource. +func (c *APIGateway) UpdateAuthorizer(input *UpdateAuthorizerInput) (*Authorizer, error) { + req, out := c.UpdateAuthorizerRequest(input) + err := req.Send() + return out, err +} + +const opUpdateBasePathMapping = "UpdateBasePathMapping" + +// UpdateBasePathMappingRequest generates a "aws/request.Request" representing the +// client's request for the UpdateBasePathMapping operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateBasePathMapping method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateBasePathMappingRequest method. +// req, resp := client.UpdateBasePathMappingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *APIGateway) UpdateBasePathMappingRequest(input *UpdateBasePathMappingInput) (req *request.Request, output *BasePathMapping) { + op := &request.Operation{ + Name: opUpdateBasePathMapping, + HTTPMethod: "PATCH", + HTTPPath: "/domainnames/{domain_name}/basepathmappings/{base_path}", + } + + if input == nil { + input = &UpdateBasePathMappingInput{} + } + + req = c.newRequest(op, input, output) + output = &BasePathMapping{} + req.Data = output + return +} + +// Changes information about the BasePathMapping resource. +func (c *APIGateway) UpdateBasePathMapping(input *UpdateBasePathMappingInput) (*BasePathMapping, error) { + req, out := c.UpdateBasePathMappingRequest(input) + err := req.Send() + return out, err +} + +const opUpdateClientCertificate = "UpdateClientCertificate" + +// UpdateClientCertificateRequest generates a "aws/request.Request" representing the +// client's request for the UpdateClientCertificate operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateClientCertificate method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateClientCertificateRequest method. +// req, resp := client.UpdateClientCertificateRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *APIGateway) UpdateClientCertificateRequest(input *UpdateClientCertificateInput) (req *request.Request, output *ClientCertificate) { + op := &request.Operation{ + Name: opUpdateClientCertificate, + HTTPMethod: "PATCH", + HTTPPath: "/clientcertificates/{clientcertificate_id}", + } + + if input == nil { + input = &UpdateClientCertificateInput{} + } + + req = c.newRequest(op, input, output) + output = &ClientCertificate{} + req.Data = output + return +} + +// Changes information about an ClientCertificate resource. +func (c *APIGateway) UpdateClientCertificate(input *UpdateClientCertificateInput) (*ClientCertificate, error) { + req, out := c.UpdateClientCertificateRequest(input) + err := req.Send() + return out, err +} + +const opUpdateDeployment = "UpdateDeployment" + +// UpdateDeploymentRequest generates a "aws/request.Request" representing the +// client's request for the UpdateDeployment operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateDeployment method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateDeploymentRequest method. +// req, resp := client.UpdateDeploymentRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *APIGateway) UpdateDeploymentRequest(input *UpdateDeploymentInput) (req *request.Request, output *Deployment) { + op := &request.Operation{ + Name: opUpdateDeployment, + HTTPMethod: "PATCH", + HTTPPath: "/restapis/{restapi_id}/deployments/{deployment_id}", + } + + if input == nil { + input = &UpdateDeploymentInput{} + } + + req = c.newRequest(op, input, output) + output = &Deployment{} + req.Data = output + return +} + +// Changes information about a Deployment resource. +func (c *APIGateway) UpdateDeployment(input *UpdateDeploymentInput) (*Deployment, error) { + req, out := c.UpdateDeploymentRequest(input) + err := req.Send() + return out, err +} + +const opUpdateDomainName = "UpdateDomainName" + +// UpdateDomainNameRequest generates a "aws/request.Request" representing the +// client's request for the UpdateDomainName operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateDomainName method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateDomainNameRequest method. +// req, resp := client.UpdateDomainNameRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *APIGateway) UpdateDomainNameRequest(input *UpdateDomainNameInput) (req *request.Request, output *DomainName) { + op := &request.Operation{ + Name: opUpdateDomainName, + HTTPMethod: "PATCH", + HTTPPath: "/domainnames/{domain_name}", + } + + if input == nil { + input = &UpdateDomainNameInput{} + } + + req = c.newRequest(op, input, output) + output = &DomainName{} + req.Data = output + return +} + +// Changes information about the DomainName resource. +func (c *APIGateway) UpdateDomainName(input *UpdateDomainNameInput) (*DomainName, error) { + req, out := c.UpdateDomainNameRequest(input) + err := req.Send() + return out, err +} + +const opUpdateIntegration = "UpdateIntegration" + +// UpdateIntegrationRequest generates a "aws/request.Request" representing the +// client's request for the UpdateIntegration operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateIntegration method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateIntegrationRequest method. +// req, resp := client.UpdateIntegrationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *APIGateway) UpdateIntegrationRequest(input *UpdateIntegrationInput) (req *request.Request, output *Integration) { + op := &request.Operation{ + Name: opUpdateIntegration, + HTTPMethod: "PATCH", + HTTPPath: "/restapis/{restapi_id}/resources/{resource_id}/methods/{http_method}/integration", + } + + if input == nil { + input = &UpdateIntegrationInput{} + } + + req = c.newRequest(op, input, output) + output = &Integration{} + req.Data = output + return +} + +// Represents an update integration. +func (c *APIGateway) UpdateIntegration(input *UpdateIntegrationInput) (*Integration, error) { + req, out := c.UpdateIntegrationRequest(input) + err := req.Send() + return out, err +} + +const opUpdateIntegrationResponse = "UpdateIntegrationResponse" + +// UpdateIntegrationResponseRequest generates a "aws/request.Request" representing the +// client's request for the UpdateIntegrationResponse operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateIntegrationResponse method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateIntegrationResponseRequest method. +// req, resp := client.UpdateIntegrationResponseRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *APIGateway) UpdateIntegrationResponseRequest(input *UpdateIntegrationResponseInput) (req *request.Request, output *IntegrationResponse) { + op := &request.Operation{ + Name: opUpdateIntegrationResponse, + HTTPMethod: "PATCH", + HTTPPath: "/restapis/{restapi_id}/resources/{resource_id}/methods/{http_method}/integration/responses/{status_code}", + } + + if input == nil { + input = &UpdateIntegrationResponseInput{} + } + + req = c.newRequest(op, input, output) + output = &IntegrationResponse{} + req.Data = output + return +} + +// Represents an update integration response. +func (c *APIGateway) UpdateIntegrationResponse(input *UpdateIntegrationResponseInput) (*IntegrationResponse, error) { + req, out := c.UpdateIntegrationResponseRequest(input) + err := req.Send() + return out, err +} + +const opUpdateMethod = "UpdateMethod" + +// UpdateMethodRequest generates a "aws/request.Request" representing the +// client's request for the UpdateMethod operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateMethod method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateMethodRequest method. +// req, resp := client.UpdateMethodRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *APIGateway) UpdateMethodRequest(input *UpdateMethodInput) (req *request.Request, output *Method) { + op := &request.Operation{ + Name: opUpdateMethod, + HTTPMethod: "PATCH", + HTTPPath: "/restapis/{restapi_id}/resources/{resource_id}/methods/{http_method}", + } + + if input == nil { + input = &UpdateMethodInput{} + } + + req = c.newRequest(op, input, output) + output = &Method{} + req.Data = output + return +} + +// Updates an existing Method resource. +func (c *APIGateway) UpdateMethod(input *UpdateMethodInput) (*Method, error) { + req, out := c.UpdateMethodRequest(input) + err := req.Send() + return out, err +} + +const opUpdateMethodResponse = "UpdateMethodResponse" + +// UpdateMethodResponseRequest generates a "aws/request.Request" representing the +// client's request for the UpdateMethodResponse operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateMethodResponse method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateMethodResponseRequest method. +// req, resp := client.UpdateMethodResponseRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *APIGateway) UpdateMethodResponseRequest(input *UpdateMethodResponseInput) (req *request.Request, output *MethodResponse) { + op := &request.Operation{ + Name: opUpdateMethodResponse, + HTTPMethod: "PATCH", + HTTPPath: "/restapis/{restapi_id}/resources/{resource_id}/methods/{http_method}/responses/{status_code}", + } + + if input == nil { + input = &UpdateMethodResponseInput{} + } + + req = c.newRequest(op, input, output) + output = &MethodResponse{} + req.Data = output + return +} + +// Updates an existing MethodResponse resource. +func (c *APIGateway) UpdateMethodResponse(input *UpdateMethodResponseInput) (*MethodResponse, error) { + req, out := c.UpdateMethodResponseRequest(input) + err := req.Send() + return out, err +} + +const opUpdateModel = "UpdateModel" + +// UpdateModelRequest generates a "aws/request.Request" representing the +// client's request for the UpdateModel operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateModel method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateModelRequest method. +// req, resp := client.UpdateModelRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *APIGateway) UpdateModelRequest(input *UpdateModelInput) (req *request.Request, output *Model) { + op := &request.Operation{ + Name: opUpdateModel, + HTTPMethod: "PATCH", + HTTPPath: "/restapis/{restapi_id}/models/{model_name}", + } + + if input == nil { + input = &UpdateModelInput{} + } + + req = c.newRequest(op, input, output) + output = &Model{} + req.Data = output + return +} + +// Changes information about a model. +func (c *APIGateway) UpdateModel(input *UpdateModelInput) (*Model, error) { + req, out := c.UpdateModelRequest(input) + err := req.Send() + return out, err +} + +const opUpdateResource = "UpdateResource" + +// UpdateResourceRequest generates a "aws/request.Request" representing the +// client's request for the UpdateResource operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateResource method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateResourceRequest method. +// req, resp := client.UpdateResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *APIGateway) UpdateResourceRequest(input *UpdateResourceInput) (req *request.Request, output *Resource) { + op := &request.Operation{ + Name: opUpdateResource, + HTTPMethod: "PATCH", + HTTPPath: "/restapis/{restapi_id}/resources/{resource_id}", + } + + if input == nil { + input = &UpdateResourceInput{} + } + + req = c.newRequest(op, input, output) + output = &Resource{} + req.Data = output + return +} + +// Changes information about a Resource resource. +func (c *APIGateway) UpdateResource(input *UpdateResourceInput) (*Resource, error) { + req, out := c.UpdateResourceRequest(input) + err := req.Send() + return out, err +} + +const opUpdateRestApi = "UpdateRestApi" + +// UpdateRestApiRequest generates a "aws/request.Request" representing the +// client's request for the UpdateRestApi operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateRestApi method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateRestApiRequest method. +// req, resp := client.UpdateRestApiRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *APIGateway) UpdateRestApiRequest(input *UpdateRestApiInput) (req *request.Request, output *RestApi) { + op := &request.Operation{ + Name: opUpdateRestApi, + HTTPMethod: "PATCH", + HTTPPath: "/restapis/{restapi_id}", + } + + if input == nil { + input = &UpdateRestApiInput{} + } + + req = c.newRequest(op, input, output) + output = &RestApi{} + req.Data = output + return +} + +// Changes information about the specified API. +func (c *APIGateway) UpdateRestApi(input *UpdateRestApiInput) (*RestApi, error) { + req, out := c.UpdateRestApiRequest(input) + err := req.Send() + return out, err +} + +const opUpdateStage = "UpdateStage" + +// UpdateStageRequest generates a "aws/request.Request" representing the +// client's request for the UpdateStage operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateStage method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateStageRequest method. +// req, resp := client.UpdateStageRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *APIGateway) UpdateStageRequest(input *UpdateStageInput) (req *request.Request, output *Stage) { + op := &request.Operation{ + Name: opUpdateStage, + HTTPMethod: "PATCH", + HTTPPath: "/restapis/{restapi_id}/stages/{stage_name}", + } + + if input == nil { + input = &UpdateStageInput{} + } + + req = c.newRequest(op, input, output) + output = &Stage{} + req.Data = output + return +} + +// Changes information about a Stage resource. +func (c *APIGateway) UpdateStage(input *UpdateStageInput) (*Stage, error) { + req, out := c.UpdateStageRequest(input) + err := req.Send() + return out, err +} + +// Represents an AWS account that is associated with Amazon API Gateway. +type Account struct { + _ struct{} `type:"structure"` + + // Specifies the Amazon resource name (ARN) of an Amazon CloudWatch role for + // the current Account resource. + CloudwatchRoleArn *string `locationName:"cloudwatchRoleArn" type:"string"` + + // Specifies the application programming interface (API) throttle settings for + // the current Account resource. + ThrottleSettings *ThrottleSettings `locationName:"throttleSettings" type:"structure"` +} + +// String returns the string representation +func (s Account) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Account) GoString() string { + return s.String() +} + +// A resource that can be distributed to callers for executing Method resources +// that require an API key. API keys can be mapped to any Stage on any RestApi, +// which indicates that the callers with the API key can make requests to that +// stage. +type ApiKey struct { + _ struct{} `type:"structure"` + + // The date when the API Key was created, in ISO 8601 format (http://www.iso.org/iso/home/standards/iso8601.htm" + // target="_blank). + CreatedDate *time.Time `locationName:"createdDate" type:"timestamp" timestampFormat:"unix"` + + // The description of the API Key. + Description *string `locationName:"description" type:"string"` + + // Specifies whether the API Key can be used by callers. + Enabled *bool `locationName:"enabled" type:"boolean"` + + // The identifier of the API Key. + Id *string `locationName:"id" type:"string"` + + // When the API Key was last updated, in ISO 8601 format. + LastUpdatedDate *time.Time `locationName:"lastUpdatedDate" type:"timestamp" timestampFormat:"unix"` + + // The name of the API Key. + Name *string `locationName:"name" type:"string"` + + // A list of Stage resources that are associated with the ApiKey resource. + StageKeys []*string `locationName:"stageKeys" type:"list"` +} + +// String returns the string representation +func (s ApiKey) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ApiKey) GoString() string { + return s.String() +} + +// Represents an authorization layer for methods. If enabled on a method, API +// Gateway will activate the authorizer when a client calls the method. +type Authorizer struct { + _ struct{} `type:"structure"` + + // Optional customer-defined field, used in Swagger imports/exports. Has no + // functional impact. + AuthType *string `locationName:"authType" type:"string"` + + // Specifies the credentials required for the authorizer, if any. Two options + // are available. To specify an IAM Role for Amazon API Gateway to assume, use + // the role's Amazon Resource Name (ARN). To use resource-based permissions + // on the Lambda function, specify null. + AuthorizerCredentials *string `locationName:"authorizerCredentials" type:"string"` + + // The TTL in seconds of cached authorizer results. If greater than 0, API Gateway + // will cache authorizer responses. If this field is not set, the default value + // is 300. The maximum value is 3600, or 1 hour. + AuthorizerResultTtlInSeconds *int64 `locationName:"authorizerResultTtlInSeconds" type:"integer"` + + // [Required] Specifies the authorizer's Uniform Resource Identifier (URI). + // For TOKEN authorizers, this must be a well-formed Lambda function URI. The + // URI should be of the form arn:aws:apigateway:{region}:lambda:path/{service_api}. + // Region is used to determine the right endpoint. In this case, path is used + // to indicate that the remaining substring in the URI should be treated as + // the path to the resource, including the initial /. For Lambda functions, + // this is usually of the form /2015-03-31/functions/[FunctionARN]/invocations + AuthorizerUri *string `locationName:"authorizerUri" type:"string"` + + // The identifier for the authorizer resource. + Id *string `locationName:"id" type:"string"` + + // [Required] The source of the identity in an incoming request. For TOKEN authorizers, + // this value is a mapping expression with the same syntax as integration parameter + // mappings. The only valid source for tokens is 'header', so the expression + // should match 'method.request.header.[headerName]'. The value of the header + // '[headerName]' will be interpreted as the incoming token. + IdentitySource *string `locationName:"identitySource" type:"string"` + + // A validation expression for the incoming identity. For TOKEN authorizers, + // this value should be a regular expression. The incoming token from the client + // is matched against this expression, and will proceed if the token matches. + // If the token doesn't match, the client receives a 401 Unauthorized response. + IdentityValidationExpression *string `locationName:"identityValidationExpression" type:"string"` + + // [Required] The name of the authorizer. + Name *string `locationName:"name" type:"string"` + + // [Required] The type of the authorizer. Currently, the only valid type is + // TOKEN. + Type *string `locationName:"type" type:"string" enum:"AuthorizerType"` +} + +// String returns the string representation +func (s Authorizer) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Authorizer) GoString() string { + return s.String() +} + +// Represents the base path that callers of the API that must provide as part +// of the URL after the domain name. +type BasePathMapping struct { + _ struct{} `type:"structure"` + + // The base path name that callers of the API must provide as part of the URL + // after the domain name. + BasePath *string `locationName:"basePath" type:"string"` + + // The name of the API. + RestApiId *string `locationName:"restApiId" type:"string"` + + // The name of the API's stage. + Stage *string `locationName:"stage" type:"string"` +} + +// String returns the string representation +func (s BasePathMapping) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BasePathMapping) GoString() string { + return s.String() +} + +// Represents a Client Certificate used to configure client-side SSL authentication +// while sending requests to the integration endpoint. +type ClientCertificate struct { + _ struct{} `type:"structure"` + + // The identifier of the Client Certificate. + ClientCertificateId *string `locationName:"clientCertificateId" type:"string"` + + // The date when the Client Certificate was created, in ISO 8601 format (http://www.iso.org/iso/home/standards/iso8601.htm" + // target="_blank). + CreatedDate *time.Time `locationName:"createdDate" type:"timestamp" timestampFormat:"unix"` + + // The description of the Client Certificate. + Description *string `locationName:"description" type:"string"` + + // The date when the Client Certificate will expire, in ISO 8601 format (http://www.iso.org/iso/home/standards/iso8601.htm" + // target="_blank). + ExpirationDate *time.Time `locationName:"expirationDate" type:"timestamp" timestampFormat:"unix"` + + // The PEM-encoded public key of the Client Certificate, that can be used to + // configure certificate authentication in the integration endpoint . + PemEncodedCertificate *string `locationName:"pemEncodedCertificate" type:"string"` +} + +// String returns the string representation +func (s ClientCertificate) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ClientCertificate) GoString() string { + return s.String() +} + +// Request to create an ApiKey resource. +type CreateApiKeyInput struct { + _ struct{} `type:"structure"` + + // The description of the ApiKey. + Description *string `locationName:"description" type:"string"` + + // Specifies whether the ApiKey can be used by callers. + Enabled *bool `locationName:"enabled" type:"boolean"` + + // The name of the ApiKey. + Name *string `locationName:"name" type:"string"` + + // Specifies whether the ApiKey can be used by callers. + StageKeys []*StageKey `locationName:"stageKeys" type:"list"` +} + +// String returns the string representation +func (s CreateApiKeyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateApiKeyInput) GoString() string { + return s.String() +} + +// Request to add a new Authorizer to an existing RestApi resource. +type CreateAuthorizerInput struct { + _ struct{} `type:"structure"` + + // Optional customer-defined field, used in Swagger imports/exports. Has no + // functional impact. + AuthType *string `locationName:"authType" type:"string"` + + // Specifies the credentials required for the authorizer, if any. + AuthorizerCredentials *string `locationName:"authorizerCredentials" type:"string"` + + // The TTL of cached authorizer results. + AuthorizerResultTtlInSeconds *int64 `locationName:"authorizerResultTtlInSeconds" type:"integer"` + + // [Required] Specifies the authorizer's Uniform Resource Identifier (URI). + AuthorizerUri *string `locationName:"authorizerUri" type:"string" required:"true"` + + // [Required] The source of the identity in an incoming request. + IdentitySource *string `locationName:"identitySource" type:"string" required:"true"` + + // A validation expression for the incoming identity. + IdentityValidationExpression *string `locationName:"identityValidationExpression" type:"string"` + + // [Required] The name of the authorizer. + Name *string `locationName:"name" type:"string" required:"true"` + + // The RestApi identifier under which the Authorizer will be created. + RestApiId *string `location:"uri" locationName:"restapi_id" type:"string" required:"true"` + + // [Required] The type of the authorizer. + Type *string `locationName:"type" type:"string" required:"true" enum:"AuthorizerType"` +} + +// String returns the string representation +func (s CreateAuthorizerInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateAuthorizerInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateAuthorizerInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateAuthorizerInput"} + if s.AuthorizerUri == nil { + invalidParams.Add(request.NewErrParamRequired("AuthorizerUri")) + } + if s.IdentitySource == nil { + invalidParams.Add(request.NewErrParamRequired("IdentitySource")) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.RestApiId == nil { + invalidParams.Add(request.NewErrParamRequired("RestApiId")) + } + if s.Type == nil { + invalidParams.Add(request.NewErrParamRequired("Type")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Requests Amazon API Gateway to create a new BasePathMapping resource. +type CreateBasePathMappingInput struct { + _ struct{} `type:"structure"` + + // The base path name that callers of the API must provide as part of the URL + // after the domain name. This value must be unique for all of the mappings + // across a single API. Leave this blank if you do not want callers to specify + // a base path name after the domain name. + BasePath *string `locationName:"basePath" type:"string"` + + // The domain name of the BasePathMapping resource to create. + DomainName *string `location:"uri" locationName:"domain_name" type:"string" required:"true"` + + // The name of the API that you want to apply this mapping to. + RestApiId *string `locationName:"restApiId" type:"string" required:"true"` + + // The name of the API's stage that you want to use for this mapping. Leave + // this blank if you do not want callers to explicitly specify the stage name + // after any base path name. + Stage *string `locationName:"stage" type:"string"` +} + +// String returns the string representation +func (s CreateBasePathMappingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateBasePathMappingInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateBasePathMappingInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateBasePathMappingInput"} + if s.DomainName == nil { + invalidParams.Add(request.NewErrParamRequired("DomainName")) + } + if s.RestApiId == nil { + invalidParams.Add(request.NewErrParamRequired("RestApiId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Requests Amazon API Gateway to create a Deployment resource. +type CreateDeploymentInput struct { + _ struct{} `type:"structure"` + + // Enables a cache cluster for the Stage resource specified in the input. + CacheClusterEnabled *bool `locationName:"cacheClusterEnabled" type:"boolean"` + + // Specifies the cache cluster size for the Stage resource specified in the + // input, if a cache cluster is enabled. + CacheClusterSize *string `locationName:"cacheClusterSize" type:"string" enum:"CacheClusterSize"` + + // The description for the Deployment resource to create. + Description *string `locationName:"description" type:"string"` + + // The RestApi resource identifier for the Deployment resource to create. + RestApiId *string `location:"uri" locationName:"restapi_id" type:"string" required:"true"` + + // The description of the Stage resource for the Deployment resource to create. + StageDescription *string `locationName:"stageDescription" type:"string"` + + // The name of the Stage resource for the Deployment resource to create. + StageName *string `locationName:"stageName" type:"string" required:"true"` + + // A map that defines the stage variables for the Stage resource that is associated + // with the new deployment. Variable names can have alphanumeric characters, + // and the values must match [A-Za-z0-9-._~:/?#&=,]+. + Variables map[string]*string `locationName:"variables" type:"map"` +} + +// String returns the string representation +func (s CreateDeploymentInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDeploymentInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateDeploymentInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateDeploymentInput"} + if s.RestApiId == nil { + invalidParams.Add(request.NewErrParamRequired("RestApiId")) + } + if s.StageName == nil { + invalidParams.Add(request.NewErrParamRequired("StageName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// A request to create a new domain name. +type CreateDomainNameInput struct { + _ struct{} `type:"structure"` + + // The body of the server certificate provided by your certificate authority. + CertificateBody *string `locationName:"certificateBody" type:"string" required:"true"` + + // The intermediate certificates and optionally the root certificate, one after + // the other without any blank lines. If you include the root certificate, your + // certificate chain must start with intermediate certificates and end with + // the root certificate. Use the intermediate certificates that were provided + // by your certificate authority. Do not include any intermediaries that are + // not in the chain of trust path. + CertificateChain *string `locationName:"certificateChain" type:"string" required:"true"` + + // The name of the certificate. + CertificateName *string `locationName:"certificateName" type:"string" required:"true"` + + // Your certificate's private key. + CertificatePrivateKey *string `locationName:"certificatePrivateKey" type:"string" required:"true"` + + // The name of the DomainName resource. + DomainName *string `locationName:"domainName" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateDomainNameInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDomainNameInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateDomainNameInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateDomainNameInput"} + if s.CertificateBody == nil { + invalidParams.Add(request.NewErrParamRequired("CertificateBody")) + } + if s.CertificateChain == nil { + invalidParams.Add(request.NewErrParamRequired("CertificateChain")) + } + if s.CertificateName == nil { + invalidParams.Add(request.NewErrParamRequired("CertificateName")) + } + if s.CertificatePrivateKey == nil { + invalidParams.Add(request.NewErrParamRequired("CertificatePrivateKey")) + } + if s.DomainName == nil { + invalidParams.Add(request.NewErrParamRequired("DomainName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Request to add a new Model to an existing RestApi resource. +type CreateModelInput struct { + _ struct{} `type:"structure"` + + // The content-type for the model. + ContentType *string `locationName:"contentType" type:"string" required:"true"` + + // The description of the model. + Description *string `locationName:"description" type:"string"` + + // The name of the model. + Name *string `locationName:"name" type:"string" required:"true"` + + // The RestApi identifier under which the Model will be created. + RestApiId *string `location:"uri" locationName:"restapi_id" type:"string" required:"true"` + + // The schema for the model. For application/json models, this should be JSON-schema + // draft v4 (http://json-schema.org/documentation.html" target="_blank) model. + Schema *string `locationName:"schema" type:"string"` +} + +// String returns the string representation +func (s CreateModelInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateModelInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateModelInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateModelInput"} + if s.ContentType == nil { + invalidParams.Add(request.NewErrParamRequired("ContentType")) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.RestApiId == nil { + invalidParams.Add(request.NewErrParamRequired("RestApiId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Requests Amazon API Gateway to create a Resource resource. +type CreateResourceInput struct { + _ struct{} `type:"structure"` + + // The parent resource's identifier. + ParentId *string `location:"uri" locationName:"parent_id" type:"string" required:"true"` + + // The last path segment for this resource. + PathPart *string `locationName:"pathPart" type:"string" required:"true"` + + // The identifier of the RestApi for the resource. + RestApiId *string `location:"uri" locationName:"restapi_id" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateResourceInput"} + if s.ParentId == nil { + invalidParams.Add(request.NewErrParamRequired("ParentId")) + } + if s.PathPart == nil { + invalidParams.Add(request.NewErrParamRequired("PathPart")) + } + if s.RestApiId == nil { + invalidParams.Add(request.NewErrParamRequired("RestApiId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The POST Request to add a new RestApi resource to your collection. +type CreateRestApiInput struct { + _ struct{} `type:"structure"` + + // The Id of the RestApi that you want to clone from. + CloneFrom *string `locationName:"cloneFrom" type:"string"` + + // The description of the RestApi. + Description *string `locationName:"description" type:"string"` + + // The name of the RestApi. + Name *string `locationName:"name" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateRestApiInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateRestApiInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateRestApiInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateRestApiInput"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Requests Amazon API Gateway to create a Stage resource. +type CreateStageInput struct { + _ struct{} `type:"structure"` + + // Whether cache clustering is enabled for the stage. + CacheClusterEnabled *bool `locationName:"cacheClusterEnabled" type:"boolean"` + + // The stage's cache cluster size. + CacheClusterSize *string `locationName:"cacheClusterSize" type:"string" enum:"CacheClusterSize"` + + // The identifier of the Deployment resource for the Stage resource. + DeploymentId *string `locationName:"deploymentId" type:"string" required:"true"` + + // The description of the Stage resource. + Description *string `locationName:"description" type:"string"` + + // The identifier of the RestApi resource for the Stage resource to create. + RestApiId *string `location:"uri" locationName:"restapi_id" type:"string" required:"true"` + + // The name for the Stage resource. + StageName *string `locationName:"stageName" type:"string" required:"true"` + + // A map that defines the stage variables for the new Stage resource. Variable + // names can have alphanumeric characters, and the values must match [A-Za-z0-9-._~:/?#&=,]+. + Variables map[string]*string `locationName:"variables" type:"map"` +} + +// String returns the string representation +func (s CreateStageInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateStageInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateStageInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateStageInput"} + if s.DeploymentId == nil { + invalidParams.Add(request.NewErrParamRequired("DeploymentId")) + } + if s.RestApiId == nil { + invalidParams.Add(request.NewErrParamRequired("RestApiId")) + } + if s.StageName == nil { + invalidParams.Add(request.NewErrParamRequired("StageName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// A request to delete the ApiKey resource. +type DeleteApiKeyInput struct { + _ struct{} `type:"structure"` + + // The identifier of the ApiKey resource to be deleted. + ApiKey *string `location:"uri" locationName:"api_Key" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteApiKeyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteApiKeyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteApiKeyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteApiKeyInput"} + if s.ApiKey == nil { + invalidParams.Add(request.NewErrParamRequired("ApiKey")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteApiKeyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteApiKeyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteApiKeyOutput) GoString() string { + return s.String() +} + +// Request to delete an existing Authorizer resource. +type DeleteAuthorizerInput struct { + _ struct{} `type:"structure"` + + // The identifier of the Authorizer resource. + AuthorizerId *string `location:"uri" locationName:"authorizer_id" type:"string" required:"true"` + + // The RestApi identifier for the Authorizer resource. + RestApiId *string `location:"uri" locationName:"restapi_id" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteAuthorizerInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteAuthorizerInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteAuthorizerInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteAuthorizerInput"} + if s.AuthorizerId == nil { + invalidParams.Add(request.NewErrParamRequired("AuthorizerId")) + } + if s.RestApiId == nil { + invalidParams.Add(request.NewErrParamRequired("RestApiId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteAuthorizerOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteAuthorizerOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteAuthorizerOutput) GoString() string { + return s.String() +} + +// A request to delete the BasePathMapping resource. +type DeleteBasePathMappingInput struct { + _ struct{} `type:"structure"` + + // The base path name of the BasePathMapping resource to delete. + BasePath *string `location:"uri" locationName:"base_path" type:"string" required:"true"` + + // The domain name of the BasePathMapping resource to delete. + DomainName *string `location:"uri" locationName:"domain_name" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteBasePathMappingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBasePathMappingInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBasePathMappingInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBasePathMappingInput"} + if s.BasePath == nil { + invalidParams.Add(request.NewErrParamRequired("BasePath")) + } + if s.DomainName == nil { + invalidParams.Add(request.NewErrParamRequired("DomainName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteBasePathMappingOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteBasePathMappingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBasePathMappingOutput) GoString() string { + return s.String() +} + +// A request to delete the ClientCertificate resource. +type DeleteClientCertificateInput struct { + _ struct{} `type:"structure"` + + // The identifier of the ClientCertificate resource to be deleted. + ClientCertificateId *string `location:"uri" locationName:"clientcertificate_id" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteClientCertificateInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteClientCertificateInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteClientCertificateInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteClientCertificateInput"} + if s.ClientCertificateId == nil { + invalidParams.Add(request.NewErrParamRequired("ClientCertificateId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteClientCertificateOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteClientCertificateOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteClientCertificateOutput) GoString() string { + return s.String() +} + +// Requests Amazon API Gateway to delete a Deployment resource. +type DeleteDeploymentInput struct { + _ struct{} `type:"structure"` + + // The identifier of the Deployment resource to delete. + DeploymentId *string `location:"uri" locationName:"deployment_id" type:"string" required:"true"` + + // The identifier of the RestApi resource for the Deployment resource to delete. + RestApiId *string `location:"uri" locationName:"restapi_id" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteDeploymentInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteDeploymentInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteDeploymentInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteDeploymentInput"} + if s.DeploymentId == nil { + invalidParams.Add(request.NewErrParamRequired("DeploymentId")) + } + if s.RestApiId == nil { + invalidParams.Add(request.NewErrParamRequired("RestApiId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteDeploymentOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteDeploymentOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteDeploymentOutput) GoString() string { + return s.String() +} + +// A request to delete the DomainName resource. +type DeleteDomainNameInput struct { + _ struct{} `type:"structure"` + + // The name of the DomainName resource to be deleted. + DomainName *string `location:"uri" locationName:"domain_name" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteDomainNameInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteDomainNameInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteDomainNameInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteDomainNameInput"} + if s.DomainName == nil { + invalidParams.Add(request.NewErrParamRequired("DomainName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteDomainNameOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteDomainNameOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteDomainNameOutput) GoString() string { + return s.String() +} + +// Represents a delete integration request. +type DeleteIntegrationInput struct { + _ struct{} `type:"structure"` + + // Specifies a delete integration request's HTTP method. + HttpMethod *string `location:"uri" locationName:"http_method" type:"string" required:"true"` + + // Specifies a delete integration request's resource identifier. + ResourceId *string `location:"uri" locationName:"resource_id" type:"string" required:"true"` + + // Specifies a delete integration request's API identifier. + RestApiId *string `location:"uri" locationName:"restapi_id" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteIntegrationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteIntegrationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteIntegrationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteIntegrationInput"} + if s.HttpMethod == nil { + invalidParams.Add(request.NewErrParamRequired("HttpMethod")) + } + if s.ResourceId == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceId")) + } + if s.RestApiId == nil { + invalidParams.Add(request.NewErrParamRequired("RestApiId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteIntegrationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteIntegrationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteIntegrationOutput) GoString() string { + return s.String() +} + +// Represents a delete integration response request. +type DeleteIntegrationResponseInput struct { + _ struct{} `type:"structure"` + + // Specifies a delete integration response request's HTTP method. + HttpMethod *string `location:"uri" locationName:"http_method" type:"string" required:"true"` + + // Specifies a delete integration response request's resource identifier. + ResourceId *string `location:"uri" locationName:"resource_id" type:"string" required:"true"` + + // Specifies a delete integration response request's API identifier. + RestApiId *string `location:"uri" locationName:"restapi_id" type:"string" required:"true"` + + // Specifies a delete integration response request's status code. + StatusCode *string `location:"uri" locationName:"status_code" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteIntegrationResponseInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteIntegrationResponseInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteIntegrationResponseInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteIntegrationResponseInput"} + if s.HttpMethod == nil { + invalidParams.Add(request.NewErrParamRequired("HttpMethod")) + } + if s.ResourceId == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceId")) + } + if s.RestApiId == nil { + invalidParams.Add(request.NewErrParamRequired("RestApiId")) + } + if s.StatusCode == nil { + invalidParams.Add(request.NewErrParamRequired("StatusCode")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteIntegrationResponseOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteIntegrationResponseOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteIntegrationResponseOutput) GoString() string { + return s.String() +} + +// Request to delete an existing Method resource. +type DeleteMethodInput struct { + _ struct{} `type:"structure"` + + // The HTTP verb that identifies the Method resource. + HttpMethod *string `location:"uri" locationName:"http_method" type:"string" required:"true"` + + // The Resource identifier for the Method resource. + ResourceId *string `location:"uri" locationName:"resource_id" type:"string" required:"true"` + + // The RestApi identifier for the Method resource. + RestApiId *string `location:"uri" locationName:"restapi_id" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteMethodInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteMethodInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteMethodInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteMethodInput"} + if s.HttpMethod == nil { + invalidParams.Add(request.NewErrParamRequired("HttpMethod")) + } + if s.ResourceId == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceId")) + } + if s.RestApiId == nil { + invalidParams.Add(request.NewErrParamRequired("RestApiId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteMethodOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteMethodOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteMethodOutput) GoString() string { + return s.String() +} + +// A request to delete an existing MethodResponse resource. +type DeleteMethodResponseInput struct { + _ struct{} `type:"structure"` + + // The HTTP verb identifier for the parent Method resource. + HttpMethod *string `location:"uri" locationName:"http_method" type:"string" required:"true"` + + // The Resource identifier for the MethodResponse resource. + ResourceId *string `location:"uri" locationName:"resource_id" type:"string" required:"true"` + + // The RestApi identifier for the MethodResponse resource. + RestApiId *string `location:"uri" locationName:"restapi_id" type:"string" required:"true"` + + // The status code identifier for the MethodResponse resource. + StatusCode *string `location:"uri" locationName:"status_code" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteMethodResponseInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteMethodResponseInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteMethodResponseInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteMethodResponseInput"} + if s.HttpMethod == nil { + invalidParams.Add(request.NewErrParamRequired("HttpMethod")) + } + if s.ResourceId == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceId")) + } + if s.RestApiId == nil { + invalidParams.Add(request.NewErrParamRequired("RestApiId")) + } + if s.StatusCode == nil { + invalidParams.Add(request.NewErrParamRequired("StatusCode")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteMethodResponseOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteMethodResponseOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteMethodResponseOutput) GoString() string { + return s.String() +} + +// Request to delete an existing model in an existing RestApi resource. +type DeleteModelInput struct { + _ struct{} `type:"structure"` + + // The name of the model to delete. + ModelName *string `location:"uri" locationName:"model_name" type:"string" required:"true"` + + // The RestApi under which the model will be deleted. + RestApiId *string `location:"uri" locationName:"restapi_id" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteModelInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteModelInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteModelInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteModelInput"} + if s.ModelName == nil { + invalidParams.Add(request.NewErrParamRequired("ModelName")) + } + if s.RestApiId == nil { + invalidParams.Add(request.NewErrParamRequired("RestApiId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteModelOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteModelOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteModelOutput) GoString() string { + return s.String() +} + +// Request to delete a Resource. +type DeleteResourceInput struct { + _ struct{} `type:"structure"` + + // The identifier of the Resource resource. + ResourceId *string `location:"uri" locationName:"resource_id" type:"string" required:"true"` + + // The RestApi identifier for the Resource resource. + RestApiId *string `location:"uri" locationName:"restapi_id" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteResourceInput"} + if s.ResourceId == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceId")) + } + if s.RestApiId == nil { + invalidParams.Add(request.NewErrParamRequired("RestApiId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteResourceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteResourceOutput) GoString() string { + return s.String() +} + +// Request to delete the specified API from your collection. +type DeleteRestApiInput struct { + _ struct{} `type:"structure"` + + // The ID of the RestApi you want to delete. + RestApiId *string `location:"uri" locationName:"restapi_id" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteRestApiInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteRestApiInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteRestApiInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteRestApiInput"} + if s.RestApiId == nil { + invalidParams.Add(request.NewErrParamRequired("RestApiId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteRestApiOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteRestApiOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteRestApiOutput) GoString() string { + return s.String() +} + +// Requests Amazon API Gateway to delete a Stage resource. +type DeleteStageInput struct { + _ struct{} `type:"structure"` + + // The identifier of the RestApi resource for the Stage resource to delete. + RestApiId *string `location:"uri" locationName:"restapi_id" type:"string" required:"true"` + + // The name of the Stage resource to delete. + StageName *string `location:"uri" locationName:"stage_name" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteStageInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteStageInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteStageInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteStageInput"} + if s.RestApiId == nil { + invalidParams.Add(request.NewErrParamRequired("RestApiId")) + } + if s.StageName == nil { + invalidParams.Add(request.NewErrParamRequired("StageName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteStageOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteStageOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteStageOutput) GoString() string { + return s.String() +} + +// An immutable representation of a RestApi resource that can be called by users +// using Stages. A deployment must be associated with a Stage for it to be callable +// over the Internet. +type Deployment struct { + _ struct{} `type:"structure"` + + // Gets a summary of the RestApi at the date and time that the deployment resource + // was created. + ApiSummary map[string]map[string]*MethodSnapshot `locationName:"apiSummary" type:"map"` + + // The date and time that the deployment resource was created. + CreatedDate *time.Time `locationName:"createdDate" type:"timestamp" timestampFormat:"unix"` + + // The description for the deployment resource. + Description *string `locationName:"description" type:"string"` + + // The identifier for the deployment resource. + Id *string `locationName:"id" type:"string"` +} + +// String returns the string representation +func (s Deployment) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Deployment) GoString() string { + return s.String() +} + +// Represents a domain name that is contained in a simpler, more intuitive URL +// that can be called. +type DomainName struct { + _ struct{} `type:"structure"` + + // The name of the certificate. + CertificateName *string `locationName:"certificateName" type:"string"` + + // The date when the certificate was uploaded, in ISO 8601 format (http://www.iso.org/iso/home/standards/iso8601.htm" + // target="_blank). + CertificateUploadDate *time.Time `locationName:"certificateUploadDate" type:"timestamp" timestampFormat:"unix"` + + // The domain name of the Amazon CloudFront distribution. For more information, + // see the Amazon CloudFront documentation (http://aws.amazon.com/documentation/cloudfront/" + // target="_blank). + DistributionDomainName *string `locationName:"distributionDomainName" type:"string"` + + // The name of the DomainName resource. + DomainName *string `locationName:"domainName" type:"string"` +} + +// String returns the string representation +func (s DomainName) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DomainName) GoString() string { + return s.String() +} + +// Request to flush authorizer cache entries on a specified stage. +type FlushStageAuthorizersCacheInput struct { + _ struct{} `type:"structure"` + + // The API identifier of the stage to flush. + RestApiId *string `location:"uri" locationName:"restapi_id" type:"string" required:"true"` + + // The name of the stage to flush. + StageName *string `location:"uri" locationName:"stage_name" type:"string" required:"true"` +} + +// String returns the string representation +func (s FlushStageAuthorizersCacheInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FlushStageAuthorizersCacheInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *FlushStageAuthorizersCacheInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "FlushStageAuthorizersCacheInput"} + if s.RestApiId == nil { + invalidParams.Add(request.NewErrParamRequired("RestApiId")) + } + if s.StageName == nil { + invalidParams.Add(request.NewErrParamRequired("StageName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type FlushStageAuthorizersCacheOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s FlushStageAuthorizersCacheOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FlushStageAuthorizersCacheOutput) GoString() string { + return s.String() +} + +// Requests Amazon API Gateway to flush a stage's cache. +type FlushStageCacheInput struct { + _ struct{} `type:"structure"` + + // The API identifier of the stage to flush its cache. + RestApiId *string `location:"uri" locationName:"restapi_id" type:"string" required:"true"` + + // The name of the stage to flush its cache. + StageName *string `location:"uri" locationName:"stage_name" type:"string" required:"true"` +} + +// String returns the string representation +func (s FlushStageCacheInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FlushStageCacheInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *FlushStageCacheInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "FlushStageCacheInput"} + if s.RestApiId == nil { + invalidParams.Add(request.NewErrParamRequired("RestApiId")) + } + if s.StageName == nil { + invalidParams.Add(request.NewErrParamRequired("StageName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type FlushStageCacheOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s FlushStageCacheOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FlushStageCacheOutput) GoString() string { + return s.String() +} + +// A request to generate a ClientCertificate resource. +type GenerateClientCertificateInput struct { + _ struct{} `type:"structure"` + + // The description of the ClientCertificate. + Description *string `locationName:"description" type:"string"` +} + +// String returns the string representation +func (s GenerateClientCertificateInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GenerateClientCertificateInput) GoString() string { + return s.String() +} + +// Requests Amazon API Gateway to get information about the current Account +// resource. +type GetAccountInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s GetAccountInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetAccountInput) GoString() string { + return s.String() +} + +// A request to get information about the current ApiKey resource. +type GetApiKeyInput struct { + _ struct{} `type:"structure"` + + // The identifier of the ApiKey resource. + ApiKey *string `location:"uri" locationName:"api_Key" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetApiKeyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetApiKeyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetApiKeyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetApiKeyInput"} + if s.ApiKey == nil { + invalidParams.Add(request.NewErrParamRequired("ApiKey")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// A request to get information about the current ApiKeys resource. +type GetApiKeysInput struct { + _ struct{} `type:"structure"` + + // The maximum number of ApiKeys to get information about. + Limit *int64 `location:"querystring" locationName:"limit" type:"integer"` + + // The position of the current ApiKeys resource to get information about. + Position *string `location:"querystring" locationName:"position" type:"string"` +} + +// String returns the string representation +func (s GetApiKeysInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetApiKeysInput) GoString() string { + return s.String() +} + +// Represents a collection of ApiKey resources. +type GetApiKeysOutput struct { + _ struct{} `type:"structure"` + + // The current page of any ApiKey resources in the collection of ApiKey resources. + Items []*ApiKey `locationName:"item" type:"list"` + + Position *string `locationName:"position" type:"string"` +} + +// String returns the string representation +func (s GetApiKeysOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetApiKeysOutput) GoString() string { + return s.String() +} + +// Request to describe an existing Authorizer resource. +type GetAuthorizerInput struct { + _ struct{} `type:"structure"` + + // The identifier of the Authorizer resource. + AuthorizerId *string `location:"uri" locationName:"authorizer_id" type:"string" required:"true"` + + // The RestApi identifier for the Authorizer resource. + RestApiId *string `location:"uri" locationName:"restapi_id" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetAuthorizerInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetAuthorizerInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetAuthorizerInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetAuthorizerInput"} + if s.AuthorizerId == nil { + invalidParams.Add(request.NewErrParamRequired("AuthorizerId")) + } + if s.RestApiId == nil { + invalidParams.Add(request.NewErrParamRequired("RestApiId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Request to describe an existing Authorizers resource. +type GetAuthorizersInput struct { + _ struct{} `type:"structure"` + + // Limit the number of Authorizer resources in the response. + Limit *int64 `location:"querystring" locationName:"limit" type:"integer"` + + // If not all Authorizer resources in the response were present, the position + // will specificy where to start the next page of results. + Position *string `location:"querystring" locationName:"position" type:"string"` + + // The RestApi identifier for the Authorizers resource. + RestApiId *string `location:"uri" locationName:"restapi_id" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetAuthorizersInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetAuthorizersInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetAuthorizersInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetAuthorizersInput"} + if s.RestApiId == nil { + invalidParams.Add(request.NewErrParamRequired("RestApiId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents a collection of Authorizer resources. +type GetAuthorizersOutput struct { + _ struct{} `type:"structure"` + + // Gets the current list of Authorizer resources in the collection. + Items []*Authorizer `locationName:"item" type:"list"` + + Position *string `locationName:"position" type:"string"` +} + +// String returns the string representation +func (s GetAuthorizersOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetAuthorizersOutput) GoString() string { + return s.String() +} + +// Request to describe a BasePathMapping resource. +type GetBasePathMappingInput struct { + _ struct{} `type:"structure"` + + // The base path name that callers of the API must provide as part of the URL + // after the domain name. This value must be unique for all of the mappings + // across a single API. Leave this blank if you do not want callers to specify + // any base path name after the domain name. + BasePath *string `location:"uri" locationName:"base_path" type:"string" required:"true"` + + // The domain name of the BasePathMapping resource to be described. + DomainName *string `location:"uri" locationName:"domain_name" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBasePathMappingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBasePathMappingInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBasePathMappingInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBasePathMappingInput"} + if s.BasePath == nil { + invalidParams.Add(request.NewErrParamRequired("BasePath")) + } + if s.DomainName == nil { + invalidParams.Add(request.NewErrParamRequired("DomainName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// A request to get information about a collection of BasePathMapping resources. +type GetBasePathMappingsInput struct { + _ struct{} `type:"structure"` + + // The domain name of a BasePathMapping resource. + DomainName *string `location:"uri" locationName:"domain_name" type:"string" required:"true"` + + // The maximum number of BasePathMapping resources in the collection to get + // information about. The default limit is 25. It should be an integer between + // 1 - 500. + Limit *int64 `location:"querystring" locationName:"limit" type:"integer"` + + // The position of the current BasePathMapping resource in the collection to + // get information about. + Position *string `location:"querystring" locationName:"position" type:"string"` +} + +// String returns the string representation +func (s GetBasePathMappingsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBasePathMappingsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBasePathMappingsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBasePathMappingsInput"} + if s.DomainName == nil { + invalidParams.Add(request.NewErrParamRequired("DomainName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents a collection of BasePathMapping resources. +type GetBasePathMappingsOutput struct { + _ struct{} `type:"structure"` + + // The current page of any BasePathMapping resources in the collection of base + // path mapping resources. + Items []*BasePathMapping `locationName:"item" type:"list"` + + Position *string `locationName:"position" type:"string"` +} + +// String returns the string representation +func (s GetBasePathMappingsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBasePathMappingsOutput) GoString() string { + return s.String() +} + +// A request to get information about the current ClientCertificate resource. +type GetClientCertificateInput struct { + _ struct{} `type:"structure"` + + // The identifier of the ClientCertificate resource to be described. + ClientCertificateId *string `location:"uri" locationName:"clientcertificate_id" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetClientCertificateInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetClientCertificateInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetClientCertificateInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetClientCertificateInput"} + if s.ClientCertificateId == nil { + invalidParams.Add(request.NewErrParamRequired("ClientCertificateId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// A request to get information about a collection of ClientCertificate resources. +type GetClientCertificatesInput struct { + _ struct{} `type:"structure"` + + // The maximum number of ClientCertificate resources in the collection to get + // information about. The default limit is 25. It should be an integer between + // 1 - 500. + Limit *int64 `location:"querystring" locationName:"limit" type:"integer"` + + // The position of the current ClientCertificate resource in the collection + // to get information about. + Position *string `location:"querystring" locationName:"position" type:"string"` +} + +// String returns the string representation +func (s GetClientCertificatesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetClientCertificatesInput) GoString() string { + return s.String() +} + +// Represents a collection of ClientCertificate resources. +type GetClientCertificatesOutput struct { + _ struct{} `type:"structure"` + + // The current page of any ClientCertificate resources in the collection of + // ClientCertificate resources. + Items []*ClientCertificate `locationName:"item" type:"list"` + + Position *string `locationName:"position" type:"string"` +} + +// String returns the string representation +func (s GetClientCertificatesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetClientCertificatesOutput) GoString() string { + return s.String() +} + +// Requests Amazon API Gateway to get information about a Deployment resource. +type GetDeploymentInput struct { + _ struct{} `type:"structure"` + + // The identifier of the Deployment resource to get information about. + DeploymentId *string `location:"uri" locationName:"deployment_id" type:"string" required:"true"` + + // The identifier of the RestApi resource for the Deployment resource to get + // information about. + RestApiId *string `location:"uri" locationName:"restapi_id" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetDeploymentInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetDeploymentInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetDeploymentInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetDeploymentInput"} + if s.DeploymentId == nil { + invalidParams.Add(request.NewErrParamRequired("DeploymentId")) + } + if s.RestApiId == nil { + invalidParams.Add(request.NewErrParamRequired("RestApiId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Requests Amazon API Gateway to get information about a Deployments collection. +type GetDeploymentsInput struct { + _ struct{} `type:"structure"` + + // The maximum number of Deployment resources in the collection to get information + // about. The default limit is 25. It should be an integer between 1 - 500. + Limit *int64 `location:"querystring" locationName:"limit" type:"integer"` + + // The position of the current Deployment resource in the collection to get + // information about. + Position *string `location:"querystring" locationName:"position" type:"string"` + + // The identifier of the RestApi resource for the collection of Deployment resources + // to get information about. + RestApiId *string `location:"uri" locationName:"restapi_id" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetDeploymentsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetDeploymentsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetDeploymentsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetDeploymentsInput"} + if s.RestApiId == nil { + invalidParams.Add(request.NewErrParamRequired("RestApiId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents a collection resource that contains zero or more references to +// your existing deployments, and links that guide you on ways to interact with +// your collection. The collection offers a paginated view of the contained +// deployments. +type GetDeploymentsOutput struct { + _ struct{} `type:"structure"` + + // The current page of any Deployment resources in the collection of deployment + // resources. + Items []*Deployment `locationName:"item" type:"list"` + + Position *string `locationName:"position" type:"string"` +} + +// String returns the string representation +func (s GetDeploymentsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetDeploymentsOutput) GoString() string { + return s.String() +} + +// Request to get the name of a DomainName resource. +type GetDomainNameInput struct { + _ struct{} `type:"structure"` + + // The name of the DomainName resource. + DomainName *string `location:"uri" locationName:"domain_name" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetDomainNameInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetDomainNameInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetDomainNameInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetDomainNameInput"} + if s.DomainName == nil { + invalidParams.Add(request.NewErrParamRequired("DomainName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Request to describe a collection of DomainName resources. +type GetDomainNamesInput struct { + _ struct{} `type:"structure"` + + // The maximum number of DomainName resources in the collection to get information + // about. The default limit is 25. It should be an integer between 1 - 500. + Limit *int64 `location:"querystring" locationName:"limit" type:"integer"` + + // The position of the current domain names to get information about. + Position *string `location:"querystring" locationName:"position" type:"string"` +} + +// String returns the string representation +func (s GetDomainNamesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetDomainNamesInput) GoString() string { + return s.String() +} + +// Represents a collection of DomainName resources. +type GetDomainNamesOutput struct { + _ struct{} `type:"structure"` + + // The current page of any DomainName resources in the collection of DomainName + // resources. + Items []*DomainName `locationName:"item" type:"list"` + + Position *string `locationName:"position" type:"string"` +} + +// String returns the string representation +func (s GetDomainNamesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetDomainNamesOutput) GoString() string { + return s.String() +} + +// Request a new export of a RestApi for a particular Stage. +type GetExportInput struct { + _ struct{} `type:"structure"` + + // The content-type of the export, for example 'application/json'. Currently + // 'application/json' and 'application/yaml' are supported for exportType 'swagger'. + // Should be specifed in the 'Accept' header for direct API requests. + Accepts *string `location:"header" locationName:"Accept" type:"string"` + + // The type of export. Currently only 'swagger' is supported. + ExportType *string `location:"uri" locationName:"export_type" type:"string" required:"true"` + + // A key-value map of query string parameters that specify properties of the + // export, depending on the requested exportType. For exportType 'swagger', + // any combination of the following parameters are supported: 'integrations' + // will export x-amazon-apigateway-integration extensions 'authorizers' will + // export x-amazon-apigateway-authorizer extensions 'postman' will export with + // Postman extensions, allowing for import to the Postman tool + Parameters map[string]*string `location:"querystring" locationName:"parameters" type:"map"` + + // The identifier of the RestApi to be exported. + RestApiId *string `location:"uri" locationName:"restapi_id" type:"string" required:"true"` + + // The name of the Stage that will be exported. + StageName *string `location:"uri" locationName:"stage_name" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetExportInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetExportInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetExportInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetExportInput"} + if s.ExportType == nil { + invalidParams.Add(request.NewErrParamRequired("ExportType")) + } + if s.RestApiId == nil { + invalidParams.Add(request.NewErrParamRequired("RestApiId")) + } + if s.StageName == nil { + invalidParams.Add(request.NewErrParamRequired("StageName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The binary blob response to GetExport, which contains the generated SDK. +type GetExportOutput struct { + _ struct{} `type:"structure" payload:"Body"` + + // The binary blob response to GetExport, which contains the export. + Body []byte `locationName:"body" type:"blob"` + + // The content-disposition header value in the HTTP reseponse. + ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"` + + // The content-type header value in the HTTP response. This will correspond + // to a valid 'accept' type in the request. + ContentType *string `location:"header" locationName:"Content-Type" type:"string"` +} + +// String returns the string representation +func (s GetExportOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetExportOutput) GoString() string { + return s.String() +} + +// Represents a get integration request. +type GetIntegrationInput struct { + _ struct{} `type:"structure"` + + // Specifies a get integration request's HTTP method. + HttpMethod *string `location:"uri" locationName:"http_method" type:"string" required:"true"` + + // Specifies a get integration request's resource identifier + ResourceId *string `location:"uri" locationName:"resource_id" type:"string" required:"true"` + + // Specifies a get integration request's API identifier. + RestApiId *string `location:"uri" locationName:"restapi_id" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetIntegrationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetIntegrationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetIntegrationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetIntegrationInput"} + if s.HttpMethod == nil { + invalidParams.Add(request.NewErrParamRequired("HttpMethod")) + } + if s.ResourceId == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceId")) + } + if s.RestApiId == nil { + invalidParams.Add(request.NewErrParamRequired("RestApiId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents a get integration response request. +type GetIntegrationResponseInput struct { + _ struct{} `type:"structure"` + + // Specifies a get integration response request's HTTP method. + HttpMethod *string `location:"uri" locationName:"http_method" type:"string" required:"true"` + + // Specifies a get integration response request's resource identifier. + ResourceId *string `location:"uri" locationName:"resource_id" type:"string" required:"true"` + + // Specifies a get integration response request's API identifier. + RestApiId *string `location:"uri" locationName:"restapi_id" type:"string" required:"true"` + + // Specifies a get integration response request's status code. + StatusCode *string `location:"uri" locationName:"status_code" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetIntegrationResponseInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetIntegrationResponseInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetIntegrationResponseInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetIntegrationResponseInput"} + if s.HttpMethod == nil { + invalidParams.Add(request.NewErrParamRequired("HttpMethod")) + } + if s.ResourceId == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceId")) + } + if s.RestApiId == nil { + invalidParams.Add(request.NewErrParamRequired("RestApiId")) + } + if s.StatusCode == nil { + invalidParams.Add(request.NewErrParamRequired("StatusCode")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Request to describe an existing Method resource. +type GetMethodInput struct { + _ struct{} `type:"structure"` + + // Specifies the put method request's HTTP method type. + HttpMethod *string `location:"uri" locationName:"http_method" type:"string" required:"true"` + + // The Resource identifier for the Method resource. + ResourceId *string `location:"uri" locationName:"resource_id" type:"string" required:"true"` + + // The RestApi identifier for the Method resource. + RestApiId *string `location:"uri" locationName:"restapi_id" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetMethodInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetMethodInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetMethodInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetMethodInput"} + if s.HttpMethod == nil { + invalidParams.Add(request.NewErrParamRequired("HttpMethod")) + } + if s.ResourceId == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceId")) + } + if s.RestApiId == nil { + invalidParams.Add(request.NewErrParamRequired("RestApiId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Request to describe a MethodResponse resource. +type GetMethodResponseInput struct { + _ struct{} `type:"structure"` + + // The HTTP verb identifier for the parent Method resource. + HttpMethod *string `location:"uri" locationName:"http_method" type:"string" required:"true"` + + // The Resource identifier for the MethodResponse resource. + ResourceId *string `location:"uri" locationName:"resource_id" type:"string" required:"true"` + + // The RestApi identifier for the MethodResponse resource. + RestApiId *string `location:"uri" locationName:"restapi_id" type:"string" required:"true"` + + // The status code identifier for the MethodResponse resource. + StatusCode *string `location:"uri" locationName:"status_code" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetMethodResponseInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetMethodResponseInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetMethodResponseInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetMethodResponseInput"} + if s.HttpMethod == nil { + invalidParams.Add(request.NewErrParamRequired("HttpMethod")) + } + if s.ResourceId == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceId")) + } + if s.RestApiId == nil { + invalidParams.Add(request.NewErrParamRequired("RestApiId")) + } + if s.StatusCode == nil { + invalidParams.Add(request.NewErrParamRequired("StatusCode")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Request to list information about a model in an existing RestApi resource. +type GetModelInput struct { + _ struct{} `type:"structure"` + + // Resolves all external model references and returns a flattened model schema. + Flatten *bool `location:"querystring" locationName:"flatten" type:"boolean"` + + // The name of the model as an identifier. + ModelName *string `location:"uri" locationName:"model_name" type:"string" required:"true"` + + // The RestApi identifier under which the Model exists. + RestApiId *string `location:"uri" locationName:"restapi_id" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetModelInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetModelInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetModelInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetModelInput"} + if s.ModelName == nil { + invalidParams.Add(request.NewErrParamRequired("ModelName")) + } + if s.RestApiId == nil { + invalidParams.Add(request.NewErrParamRequired("RestApiId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Request to generate a sample mapping template used to transform the payload. +type GetModelTemplateInput struct { + _ struct{} `type:"structure"` + + // The name of the model for which to generate a template. + ModelName *string `location:"uri" locationName:"model_name" type:"string" required:"true"` + + // The ID of the RestApi under which the model exists. + RestApiId *string `location:"uri" locationName:"restapi_id" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetModelTemplateInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetModelTemplateInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetModelTemplateInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetModelTemplateInput"} + if s.ModelName == nil { + invalidParams.Add(request.NewErrParamRequired("ModelName")) + } + if s.RestApiId == nil { + invalidParams.Add(request.NewErrParamRequired("RestApiId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents a mapping template used to transform a payload. +type GetModelTemplateOutput struct { + _ struct{} `type:"structure"` + + // The Apache Velocity Template Language (VTL) (http://velocity.apache.org/engine/devel/vtl-reference-guide.html" + // target="_blank) template content used for the template resource. + Value *string `locationName:"value" type:"string"` +} + +// String returns the string representation +func (s GetModelTemplateOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetModelTemplateOutput) GoString() string { + return s.String() +} + +// Request to list existing Models defined for a RestApi resource. +type GetModelsInput struct { + _ struct{} `type:"structure"` + + // The maximum number of models in the collection to get information about. + // The default limit is 25. It should be an integer between 1 - 500. + Limit *int64 `location:"querystring" locationName:"limit" type:"integer"` + + // The position of the next set of results in the Models resource to get information + // about. + Position *string `location:"querystring" locationName:"position" type:"string"` + + // The RestApi identifier. + RestApiId *string `location:"uri" locationName:"restapi_id" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetModelsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetModelsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetModelsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetModelsInput"} + if s.RestApiId == nil { + invalidParams.Add(request.NewErrParamRequired("RestApiId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents a collection of Model resources. +type GetModelsOutput struct { + _ struct{} `type:"structure"` + + // Gets the current Model resource in the collection. + Items []*Model `locationName:"item" type:"list"` + + Position *string `locationName:"position" type:"string"` +} + +// String returns the string representation +func (s GetModelsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetModelsOutput) GoString() string { + return s.String() +} + +// Request to list information about a resource. +type GetResourceInput struct { + _ struct{} `type:"structure"` + + // The identifier for the Resource resource. + ResourceId *string `location:"uri" locationName:"resource_id" type:"string" required:"true"` + + // The RestApi identifier for the resource. + RestApiId *string `location:"uri" locationName:"restapi_id" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetResourceInput"} + if s.ResourceId == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceId")) + } + if s.RestApiId == nil { + invalidParams.Add(request.NewErrParamRequired("RestApiId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Request to list information about a collection of resources. +type GetResourcesInput struct { + _ struct{} `type:"structure"` + + // The maximum number of Resource resources in the collection to get information + // about. The default limit is 25. It should be an integer between 1 - 500. + Limit *int64 `location:"querystring" locationName:"limit" type:"integer"` + + // The position of the next set of results in the current Resources resource + // to get information about. + Position *string `location:"querystring" locationName:"position" type:"string"` + + // The RestApi identifier for the Resource. + RestApiId *string `location:"uri" locationName:"restapi_id" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetResourcesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetResourcesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetResourcesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetResourcesInput"} + if s.RestApiId == nil { + invalidParams.Add(request.NewErrParamRequired("RestApiId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents a collection of Resource resources. +type GetResourcesOutput struct { + _ struct{} `type:"structure"` + + // Gets the current Resource resource in the collection. + Items []*Resource `locationName:"item" type:"list"` + + Position *string `locationName:"position" type:"string"` +} + +// String returns the string representation +func (s GetResourcesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetResourcesOutput) GoString() string { + return s.String() +} + +// The GET request to list an existing RestApi defined for your collection. +type GetRestApiInput struct { + _ struct{} `type:"structure"` + + // The identifier of the RestApi resource. + RestApiId *string `location:"uri" locationName:"restapi_id" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetRestApiInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetRestApiInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetRestApiInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetRestApiInput"} + if s.RestApiId == nil { + invalidParams.Add(request.NewErrParamRequired("RestApiId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The GET request to list existing RestApis defined for your collection. +type GetRestApisInput struct { + _ struct{} `type:"structure"` + + // The maximum number of RestApi resources in the collection to get information + // about. The default limit is 25. It should be an integer between 1 - 500. + Limit *int64 `location:"querystring" locationName:"limit" type:"integer"` + + // The position of the current RestApis resource in the collection to get information + // about. + Position *string `location:"querystring" locationName:"position" type:"string"` +} + +// String returns the string representation +func (s GetRestApisInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetRestApisInput) GoString() string { + return s.String() +} + +// Contains references to your APIs and links that guide you in ways to interact +// with your collection. A collection offers a paginated view of your APIs. +type GetRestApisOutput struct { + _ struct{} `type:"structure"` + + // An array of links to the current page of RestApi resources. + Items []*RestApi `locationName:"item" type:"list"` + + Position *string `locationName:"position" type:"string"` +} + +// String returns the string representation +func (s GetRestApisOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetRestApisOutput) GoString() string { + return s.String() +} + +// Request a new generated client SDK for a RestApi and Stage. +type GetSdkInput struct { + _ struct{} `type:"structure"` + + // A key-value map of query string parameters that specify properties of the + // SDK, depending on the requested sdkType. For sdkType 'objectivec', a parameter + // named "classPrefix" is required. For sdkType 'android', parameters named + // "groupId", "artifactId", "artifactVersion", and "invokerPackage" are required. + Parameters map[string]*string `location:"querystring" locationName:"parameters" type:"map"` + + // The identifier of the RestApi that the SDK will use. + RestApiId *string `location:"uri" locationName:"restapi_id" type:"string" required:"true"` + + // The language for the generated SDK. Currently javascript, android, and objectivec + // (for iOS) are supported. + SdkType *string `location:"uri" locationName:"sdk_type" type:"string" required:"true"` + + // The name of the Stage that the SDK will use. + StageName *string `location:"uri" locationName:"stage_name" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetSdkInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetSdkInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetSdkInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetSdkInput"} + if s.RestApiId == nil { + invalidParams.Add(request.NewErrParamRequired("RestApiId")) + } + if s.SdkType == nil { + invalidParams.Add(request.NewErrParamRequired("SdkType")) + } + if s.StageName == nil { + invalidParams.Add(request.NewErrParamRequired("StageName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The binary blob response to GetSdk, which contains the generated SDK. +type GetSdkOutput struct { + _ struct{} `type:"structure" payload:"Body"` + + // The binary blob response to GetSdk, which contains the generated SDK. + Body []byte `locationName:"body" type:"blob"` + + // The content-disposition header value in the HTTP reseponse. + ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"` + + // The content-type header value in the HTTP response. + ContentType *string `location:"header" locationName:"Content-Type" type:"string"` +} + +// String returns the string representation +func (s GetSdkOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetSdkOutput) GoString() string { + return s.String() +} + +// Requests Amazon API Gateway to get information about a Stage resource. +type GetStageInput struct { + _ struct{} `type:"structure"` + + // The identifier of the RestApi resource for the Stage resource to get information + // about. + RestApiId *string `location:"uri" locationName:"restapi_id" type:"string" required:"true"` + + // The name of the Stage resource to get information about. + StageName *string `location:"uri" locationName:"stage_name" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetStageInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetStageInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetStageInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetStageInput"} + if s.RestApiId == nil { + invalidParams.Add(request.NewErrParamRequired("RestApiId")) + } + if s.StageName == nil { + invalidParams.Add(request.NewErrParamRequired("StageName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Requests Amazon API Gateway to get information about one or more Stage resources. +type GetStagesInput struct { + _ struct{} `type:"structure"` + + // The stages' deployment identifiers. + DeploymentId *string `location:"querystring" locationName:"deploymentId" type:"string"` + + // The stages' API identifiers. + RestApiId *string `location:"uri" locationName:"restapi_id" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetStagesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetStagesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetStagesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetStagesInput"} + if s.RestApiId == nil { + invalidParams.Add(request.NewErrParamRequired("RestApiId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// A list of Stage resource that are associated with the ApiKey resource. +type GetStagesOutput struct { + _ struct{} `type:"structure"` + + // An individual Stage resource. + Item []*Stage `locationName:"item" type:"list"` +} + +// String returns the string representation +func (s GetStagesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetStagesOutput) GoString() string { + return s.String() +} + +// A POST request to import an API to Amazon API Gateway using an input of an +// API definition file. +type ImportRestApiInput struct { + _ struct{} `type:"structure" payload:"Body"` + + // The POST request body containing external API definitions. Currently, only + // Swagger definition JSON files are supported. + Body []byte `locationName:"body" type:"blob" required:"true"` + + // A query parameter to indicate whether to rollback the API creation (true) + // or not (false) when a warning is encountered. The default value is false. + FailOnWarnings *bool `location:"querystring" locationName:"failonwarnings" type:"boolean"` + + // Custom header parameters as part of the request. + Parameters map[string]*string `location:"querystring" locationName:"parameters" type:"map"` +} + +// String returns the string representation +func (s ImportRestApiInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ImportRestApiInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ImportRestApiInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ImportRestApiInput"} + if s.Body == nil { + invalidParams.Add(request.NewErrParamRequired("Body")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents a HTTP, AWS, or Mock integration. +type Integration struct { + _ struct{} `type:"structure"` + + // Specifies the integration's cache key parameters. + CacheKeyParameters []*string `locationName:"cacheKeyParameters" type:"list"` + + // Specifies the integration's cache namespace. + CacheNamespace *string `locationName:"cacheNamespace" type:"string"` + + // Specifies the credentials required for the integration, if any. For AWS integrations, + // three options are available. To specify an IAM Role for Amazon API Gateway + // to assume, use the role's Amazon Resource Name (ARN). To require that the + // caller's identity be passed through from the request, specify the string + // arn:aws:iam::\*:user/\*. To use resource-based permissions on supported AWS + // services, specify null. + Credentials *string `locationName:"credentials" type:"string"` + + // Specifies the integration's HTTP method type. + HttpMethod *string `locationName:"httpMethod" type:"string"` + + // Specifies the integration's responses. + IntegrationResponses map[string]*IntegrationResponse `locationName:"integrationResponses" type:"map"` + + // Specifies the pass-through behavior for incoming requests based on the Content-Type + // header in the request, and the available requestTemplates defined on the + // Integration. There are three valid values: WHEN_NO_MATCH, WHEN_NO_TEMPLATES, + // and NEVER. + // + // WHEN_NO_MATCH passes the request body for unmapped content types through + // to the Integration backend without transformation. + // + // NEVER rejects unmapped content types with an HTTP 415 'Unsupported Media + // Type' response. + // + // WHEN_NO_TEMPLATES will allow pass-through when the Integration has NO content + // types mapped to templates. However if there is at least one content type + // defined, unmapped content types will be rejected with the same 415 response. + PassthroughBehavior *string `locationName:"passthroughBehavior" type:"string"` + + // Represents requests parameters that are sent with the backend request. Request + // parameters are represented as a key/value map, with a destination as the + // key and a source as the value. A source must match an existing method request + // parameter, or a static value. Static values must be enclosed with single + // quotes, and be pre-encoded based on their destination in the request. The + // destination must match the pattern integration.request.{location}.{name}, + // where location is either querystring, path, or header. name must be a valid, + // unique parameter name. + RequestParameters map[string]*string `locationName:"requestParameters" type:"map"` + + // Represents a map of Velocity templates that are applied on the request payload + // based on the value of the Content-Type header sent by the client. The content + // type value is the key in this map, and the template (as a String) is the + // value. + RequestTemplates map[string]*string `locationName:"requestTemplates" type:"map"` + + // Specifies the integration's type. The valid value is HTTP, AWS, or MOCK. + Type *string `locationName:"type" type:"string" enum:"IntegrationType"` + + // Specifies the integration's Uniform Resource Identifier (URI). For HTTP integrations, + // the URI must be a fully formed, encoded HTTP(S) URL according to the RFC-3986 + // specification (https://www.ietf.org/rfc/rfc3986.txt" target="_blank). For + // AWS integrations, the URI should be of the form arn:aws:apigateway:{region}:{subdomain.service|service}:{path|action}/{service_api}. + // Region, subdomain and service are used to determine the right endpoint. For + // AWS services that use the Action= query string parameter, service_api should + // be a valid action for the desired service. For RESTful AWS service APIs, + // path is used to indicate that the remaining substring in the URI should be + // treated as the path to the resource, including the initial /. + Uri *string `locationName:"uri" type:"string"` +} + +// String returns the string representation +func (s Integration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Integration) GoString() string { + return s.String() +} + +// Represents an integration response. The status code must map to an existing +// MethodResponse, and parameters and templates can be used to transform the +// backend response. +type IntegrationResponse struct { + _ struct{} `type:"structure"` + + // Represents response parameters that can be read from the backend response. + // Response parameters are represented as a key/value map, with a destination + // as the key and a source as the value. A destination must match an existing + // response parameter in the MethodResponse. The source can be a header from + // the backend response, or a static value. Static values are specified using + // enclosing single quotes, and backend response headers can be read using the + // pattern integration.response.header.{name}. + ResponseParameters map[string]*string `locationName:"responseParameters" type:"map"` + + // Specifies the templates used to transform the integration response body. + // Response templates are represented as a key/value map, with a content-type + // as the key and a template as the value. + ResponseTemplates map[string]*string `locationName:"responseTemplates" type:"map"` + + // Specifies the regular expression (regex) pattern used to choose an integration + // response based on the response from the backend. If the backend is an AWS + // Lambda function, the AWS Lambda function error header is matched. For all + // other HTTP and AWS backends, the HTTP status code is matched. + SelectionPattern *string `locationName:"selectionPattern" type:"string"` + + // Specifies the status code that is used to map the integration response to + // an existing MethodResponse. + StatusCode *string `locationName:"statusCode" type:"string"` +} + +// String returns the string representation +func (s IntegrationResponse) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s IntegrationResponse) GoString() string { + return s.String() +} + +// Represents a method. +type Method struct { + _ struct{} `type:"structure"` + + // Specifies whether the method requires a valid ApiKey. + ApiKeyRequired *bool `locationName:"apiKeyRequired" type:"boolean"` + + // The method's authorization type. + AuthorizationType *string `locationName:"authorizationType" type:"string"` + + // Specifies the identifier of an Authorizer to use on this Method. The authorizationType + // must be CUSTOM. + AuthorizerId *string `locationName:"authorizerId" type:"string"` + + // The HTTP method. + HttpMethod *string `locationName:"httpMethod" type:"string"` + + // The method's integration. + MethodIntegration *Integration `locationName:"methodIntegration" type:"structure"` + + // Represents available responses that can be sent to the caller. Method responses + // are represented as a key/value map, with an HTTP status code as the key and + // a MethodResponse as the value. The status codes are available for the Integration + // responses to map to. + MethodResponses map[string]*MethodResponse `locationName:"methodResponses" type:"map"` + + // Specifies the Model resources used for the request's content type. Request + // models are represented as a key/value map, with a content type as the key + // and a Model name as the value. + RequestModels map[string]*string `locationName:"requestModels" type:"map"` + + // Represents request parameters that can be accepted by Amazon API Gateway. + // Request parameters are represented as a key/value map, with a source as the + // key and a Boolean flag as the value. The Boolean flag is used to specify + // whether the parameter is required. A source must match the pattern method.request.{location}.{name}, + // where location is either querystring, path, or header. name is a valid, unique + // parameter name. Sources specified here are available to the integration for + // mapping to integration request parameters or templates. + RequestParameters map[string]*bool `locationName:"requestParameters" type:"map"` +} + +// String returns the string representation +func (s Method) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Method) GoString() string { + return s.String() +} + +// Represents a method response. Amazon API Gateway sends back the status code +// to the caller as the HTTP status code. Parameters and models can be used +// to transform the response from the method's integration. +type MethodResponse struct { + _ struct{} `type:"structure"` + + // Specifies the Model resources used for the response's content-type. Response + // models are represented as a key/value map, with a content-type as the key + // and a Model name as the value. + ResponseModels map[string]*string `locationName:"responseModels" type:"map"` + + // Represents response parameters that can be sent back to the caller by Amazon + // API Gateway. Response parameters are represented as a key/value map, with + // a destination as the key and a boolean flag as the value, which is used to + // specify whether the parameter is required. A destination must match the pattern + // method.response.header.{name}, where name is a valid, unique header name. + // Destinations specified here are available to the integration for mapping + // from integration response parameters. + ResponseParameters map[string]*bool `locationName:"responseParameters" type:"map"` + + // The method response's status code. + StatusCode *string `locationName:"statusCode" type:"string"` +} + +// String returns the string representation +func (s MethodResponse) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MethodResponse) GoString() string { + return s.String() +} + +// Specifies the method setting properties. +type MethodSetting struct { + _ struct{} `type:"structure"` + + // Specifies whether the cached responses are encrypted. The PATCH path for + // this setting is /{method_setting_key}/caching/dataEncrypted, and the value + // is a Boolean. + CacheDataEncrypted *bool `locationName:"cacheDataEncrypted" type:"boolean"` + + // Specifies the time to live (TTL) in seconds, for cached responses. The higher + // a the TTL, the longer the response will be cached. The PATCH path for this + // setting is /{method_setting_key}/caching/ttlInSeconds, and the value is an + // integer. + CacheTtlInSeconds *int64 `locationName:"cacheTtlInSeconds" type:"integer"` + + // Specifies whether responses should be cached and returned for requests. A + // cache cluster must be enabled on the stage for responses to be cached. The + // PATCH path for this setting is /{method_setting_key}/caching/enabled, and + // the value is a Boolean. + CachingEnabled *bool `locationName:"cachingEnabled" type:"boolean"` + + // Specifies the whether data trace logging is enabled for this method, which + // effects the log entries pushed to Amazon CloudWatch Logs. The PATCH path + // for this setting is /{method_setting_key}/logging/dataTrace, and the value + // is a Boolean. + DataTraceEnabled *bool `locationName:"dataTraceEnabled" type:"boolean"` + + // Specifies the logging level for this method, which effects the log entries + // pushed to Amazon CloudWatch Logs. The PATCH path for this setting is /{method_setting_key}/logging/loglevel, + // and the available levels are OFF, ERROR, and INFO. + LoggingLevel *string `locationName:"loggingLevel" type:"string"` + + // Specifies whether Amazon CloudWatch metrics are enabled for this method. + // The PATCH path for this setting is /{method_setting_key}/metrics/enabled, + // and the value is a Boolean. + MetricsEnabled *bool `locationName:"metricsEnabled" type:"boolean"` + + // Specifies whether authorization is required for a cache invalidation request. + // The PATCH path for this setting is /{method_setting_key}/caching/requireAuthorizationForCacheControl, + // and the value is a Boolean. + RequireAuthorizationForCacheControl *bool `locationName:"requireAuthorizationForCacheControl" type:"boolean"` + + // Specifies the throttling burst limit. The PATCH path for this setting is + // /{method_setting_key}/throttling/burstLimit, and the value is an integer. + ThrottlingBurstLimit *int64 `locationName:"throttlingBurstLimit" type:"integer"` + + // Specifies the throttling rate limit. The PATCH path for this setting is /{method_setting_key}/throttling/rateLimit, + // and the value is a double. + ThrottlingRateLimit *float64 `locationName:"throttlingRateLimit" type:"double"` + + // Specifies the strategy on how to handle the unauthorized requests for cache + // invalidation. The PATCH path for this setting is /{method_setting_key}/caching/unauthorizedCacheControlHeaderStrategy, + // and the available values are FAIL_WITH_403, SUCCEED_WITH_RESPONSE_HEADER, + // SUCCEED_WITHOUT_RESPONSE_HEADER. + UnauthorizedCacheControlHeaderStrategy *string `locationName:"unauthorizedCacheControlHeaderStrategy" type:"string" enum:"UnauthorizedCacheControlHeaderStrategy"` +} + +// String returns the string representation +func (s MethodSetting) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MethodSetting) GoString() string { + return s.String() +} + +// Represents a summary of a Method resource, given a particular date and time. +type MethodSnapshot struct { + _ struct{} `type:"structure"` + + // Specifies whether the method requires a valid ApiKey. + ApiKeyRequired *bool `locationName:"apiKeyRequired" type:"boolean"` + + // Specifies the type of authorization used for the method. + AuthorizationType *string `locationName:"authorizationType" type:"string"` +} + +// String returns the string representation +func (s MethodSnapshot) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MethodSnapshot) GoString() string { + return s.String() +} + +// Represents the structure of a request or response payload for a method. +type Model struct { + _ struct{} `type:"structure"` + + // The content-type for the model. + ContentType *string `locationName:"contentType" type:"string"` + + // The description of the model. + Description *string `locationName:"description" type:"string"` + + // The identifier for the model resource. + Id *string `locationName:"id" type:"string"` + + // The name of the model. + Name *string `locationName:"name" type:"string"` + + // The schema for the model. For application/json models, this should be JSON-schema + // draft v4 (http://json-schema.org/documentation.html" target="_blank) model. + Schema *string `locationName:"schema" type:"string"` +} + +// String returns the string representation +func (s Model) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Model) GoString() string { + return s.String() +} + +// A single patch operation to apply to the specified resource. Please refer +// to http://tools.ietf.org/html/rfc6902#section-4 for an explanation of how +// each operation is used. +type PatchOperation struct { + _ struct{} `type:"structure"` + + // The "move" and "copy" operation object MUST contain a "from" member, which + // is a string containing a JSON Pointer value that references the location + // in the target document to move the value from. + From *string `locationName:"from" type:"string"` + + // A patch operation whose value indicates the operation to perform. Its value + // MUST be one of "add", "remove", "replace", "move", "copy", or "test"; other + // values are errors. + Op *string `locationName:"op" type:"string" enum:"op"` + + // Operation objects MUST have exactly one "path" member. That member's value + // is a string containing a `JSON-Pointer` value that references a location + // within the target document (the "target location") where the operation is + // performed. + Path *string `locationName:"path" type:"string"` + + // The actual value content. + Value *string `locationName:"value" type:"string"` +} + +// String returns the string representation +func (s PatchOperation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PatchOperation) GoString() string { + return s.String() +} + +// Represents a put integration request. +type PutIntegrationInput struct { + _ struct{} `type:"structure"` + + // Specifies a put integration input's cache key parameters. + CacheKeyParameters []*string `locationName:"cacheKeyParameters" type:"list"` + + // Specifies a put integration input's cache namespace. + CacheNamespace *string `locationName:"cacheNamespace" type:"string"` + + // Specifies whether credentials are required for a put integration. + Credentials *string `locationName:"credentials" type:"string"` + + // Specifies a put integration request's HTTP method. + HttpMethod *string `location:"uri" locationName:"http_method" type:"string" required:"true"` + + // Specifies a put integration HTTP method. When the integration type is HTTP + // or AWS, this field is required. + IntegrationHttpMethod *string `locationName:"httpMethod" type:"string"` + + // Specifies the pass-through behavior for incoming requests based on the Content-Type + // header in the request, and the available requestTemplates defined on the + // Integration. There are three valid values: WHEN_NO_MATCH, WHEN_NO_TEMPLATES, + // and NEVER. + // + // WHEN_NO_MATCH passes the request body for unmapped content types through + // to the Integration backend without transformation. + // + // NEVER rejects unmapped content types with an HTTP 415 'Unsupported Media + // Type' response. + // + // WHEN_NO_TEMPLATES will allow pass-through when the Integration has NO content + // types mapped to templates. However if there is at least one content type + // defined, unmapped content types will be rejected with the same 415 response. + PassthroughBehavior *string `locationName:"passthroughBehavior" type:"string"` + + // Represents request parameters that are sent with the backend request. Request + // parameters are represented as a key/value map, with a destination as the + // key and a source as the value. A source must match an existing method request + // parameter, or a static value. Static values must be enclosed with single + // quotes, and be pre-encoded based on their destination in the request. The + // destination must match the pattern integration.request.{location}.{name}, + // where location is either querystring, path, or header. name must be a valid, + // unique parameter name. + RequestParameters map[string]*string `locationName:"requestParameters" type:"map"` + + // Represents a map of Velocity templates that are applied on the request payload + // based on the value of the Content-Type header sent by the client. The content + // type value is the key in this map, and the template (as a String) is the + // value. + RequestTemplates map[string]*string `locationName:"requestTemplates" type:"map"` + + // Specifies a put integration request's resource ID. + ResourceId *string `location:"uri" locationName:"resource_id" type:"string" required:"true"` + + // Specifies a put integration request's API identifier. + RestApiId *string `location:"uri" locationName:"restapi_id" type:"string" required:"true"` + + // Specifies a put integration input's type. + Type *string `locationName:"type" type:"string" required:"true" enum:"IntegrationType"` + + // Specifies a put integration input's Uniform Resource Identifier (URI). When + // the integration type is HTTP or AWS, this field is required. For integration + // with Lambda as an AWS service proxy, this value is of the 'arn:aws:apigateway::lambda:path/2015-03-31/functions//invocations' + // format. + Uri *string `locationName:"uri" type:"string"` +} + +// String returns the string representation +func (s PutIntegrationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutIntegrationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutIntegrationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutIntegrationInput"} + if s.HttpMethod == nil { + invalidParams.Add(request.NewErrParamRequired("HttpMethod")) + } + if s.ResourceId == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceId")) + } + if s.RestApiId == nil { + invalidParams.Add(request.NewErrParamRequired("RestApiId")) + } + if s.Type == nil { + invalidParams.Add(request.NewErrParamRequired("Type")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents a put integration response request. +type PutIntegrationResponseInput struct { + _ struct{} `type:"structure"` + + // Specifies a put integration response request's HTTP method. + HttpMethod *string `location:"uri" locationName:"http_method" type:"string" required:"true"` + + // Specifies a put integration response request's resource identifier. + ResourceId *string `location:"uri" locationName:"resource_id" type:"string" required:"true"` + + // Represents response parameters that can be read from the backend response. + // Response parameters are represented as a key/value map, with a destination + // as the key and a source as the value. A destination must match an existing + // response parameter in the Method. The source can be a header from the backend + // response, or a static value. Static values are specified using enclosing + // single quotes, and backend response headers can be read using the pattern + // integration.response.header.{name}. + ResponseParameters map[string]*string `locationName:"responseParameters" type:"map"` + + // Specifies a put integration response's templates. + ResponseTemplates map[string]*string `locationName:"responseTemplates" type:"map"` + + // Specifies a put integration response request's API identifier. + RestApiId *string `location:"uri" locationName:"restapi_id" type:"string" required:"true"` + + // Specifies the selection pattern of a put integration response. + SelectionPattern *string `locationName:"selectionPattern" type:"string"` + + // Specifies the status code that is used to map the integration response to + // an existing MethodResponse. + StatusCode *string `location:"uri" locationName:"status_code" type:"string" required:"true"` +} + +// String returns the string representation +func (s PutIntegrationResponseInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutIntegrationResponseInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutIntegrationResponseInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutIntegrationResponseInput"} + if s.HttpMethod == nil { + invalidParams.Add(request.NewErrParamRequired("HttpMethod")) + } + if s.ResourceId == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceId")) + } + if s.RestApiId == nil { + invalidParams.Add(request.NewErrParamRequired("RestApiId")) + } + if s.StatusCode == nil { + invalidParams.Add(request.NewErrParamRequired("StatusCode")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Request to add a method to an existing Resource resource. +type PutMethodInput struct { + _ struct{} `type:"structure"` + + // Specifies whether the method required a valid ApiKey. + ApiKeyRequired *bool `locationName:"apiKeyRequired" type:"boolean"` + + // Specifies the type of authorization used for the method. + AuthorizationType *string `locationName:"authorizationType" type:"string" required:"true"` + + // Specifies the identifier of an Authorizer to use on this Method, if the type + // is CUSTOM. + AuthorizerId *string `locationName:"authorizerId" type:"string"` + + // Specifies the put method request's HTTP method type. + HttpMethod *string `location:"uri" locationName:"http_method" type:"string" required:"true"` + + // Specifies the Model resources used for the request's content type. Request + // models are represented as a key/value map, with a content type as the key + // and a Model name as the value. + RequestModels map[string]*string `locationName:"requestModels" type:"map"` + + // Represents requests parameters that are sent with the backend request. Request + // parameters are represented as a key/value map, with a destination as the + // key and a source as the value. A source must match an existing method request + // parameter, or a static value. Static values must be enclosed with single + // quotes, and be pre-encoded based on their destination in the request. The + // destination must match the pattern integration.request.{location}.{name}, + // where location is either querystring, path, or header. name must be a valid, + // unique parameter name. + RequestParameters map[string]*bool `locationName:"requestParameters" type:"map"` + + // The Resource identifier for the new Method resource. + ResourceId *string `location:"uri" locationName:"resource_id" type:"string" required:"true"` + + // The RestApi identifier for the new Method resource. + RestApiId *string `location:"uri" locationName:"restapi_id" type:"string" required:"true"` +} + +// String returns the string representation +func (s PutMethodInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutMethodInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutMethodInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutMethodInput"} + if s.AuthorizationType == nil { + invalidParams.Add(request.NewErrParamRequired("AuthorizationType")) + } + if s.HttpMethod == nil { + invalidParams.Add(request.NewErrParamRequired("HttpMethod")) + } + if s.ResourceId == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceId")) + } + if s.RestApiId == nil { + invalidParams.Add(request.NewErrParamRequired("RestApiId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Request to add a MethodResponse to an existing Method resource. +type PutMethodResponseInput struct { + _ struct{} `type:"structure"` + + // The HTTP verb that identifies the Method resource. + HttpMethod *string `location:"uri" locationName:"http_method" type:"string" required:"true"` + + // The Resource identifier for the Method resource. + ResourceId *string `location:"uri" locationName:"resource_id" type:"string" required:"true"` + + // Specifies the Model resources used for the response's content type. Response + // models are represented as a key/value map, with a content type as the key + // and a Model name as the value. + ResponseModels map[string]*string `locationName:"responseModels" type:"map"` + + // Represents response parameters that can be sent back to the caller by Amazon + // API Gateway. Response parameters are represented as a key/value map, with + // a destination as the key and a Boolean flag as the value. The Boolean flag + // is used to specify whether the parameter is required. A destination must + // match the pattern method.response.header.{name}, where name is a valid, unique + // header name. Destinations specified here are available to the integration + // for mapping from integration response parameters. + ResponseParameters map[string]*bool `locationName:"responseParameters" type:"map"` + + // The RestApi identifier for the Method resource. + RestApiId *string `location:"uri" locationName:"restapi_id" type:"string" required:"true"` + + // The method response's status code. + StatusCode *string `location:"uri" locationName:"status_code" type:"string" required:"true"` +} + +// String returns the string representation +func (s PutMethodResponseInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutMethodResponseInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutMethodResponseInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutMethodResponseInput"} + if s.HttpMethod == nil { + invalidParams.Add(request.NewErrParamRequired("HttpMethod")) + } + if s.ResourceId == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceId")) + } + if s.RestApiId == nil { + invalidParams.Add(request.NewErrParamRequired("RestApiId")) + } + if s.StatusCode == nil { + invalidParams.Add(request.NewErrParamRequired("StatusCode")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// A PUT request to update an existing API, with external API definitions specified +// as the request body. +type PutRestApiInput struct { + _ struct{} `type:"structure" payload:"Body"` + + // The PUT request body containing external API definitions. Currently, only + // Swagger definition JSON files are supported. + Body []byte `locationName:"body" type:"blob" required:"true"` + + // A query parameter to indicate whether to rollback the API update (true) or + // not (false) when a warning is encountered. The default value is false. + FailOnWarnings *bool `location:"querystring" locationName:"failonwarnings" type:"boolean"` + + // The mode query parameter to specify the update mode. Valid values are "merge" + // and "overwrite". By default, the update mode is "merge". + Mode *string `location:"querystring" locationName:"mode" type:"string" enum:"PutMode"` + + // Custom headers supplied as part of the request. + Parameters map[string]*string `location:"querystring" locationName:"parameters" type:"map"` + + // The identifier of the RestApi to be updated. + RestApiId *string `location:"uri" locationName:"restapi_id" type:"string" required:"true"` +} + +// String returns the string representation +func (s PutRestApiInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutRestApiInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutRestApiInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutRestApiInput"} + if s.Body == nil { + invalidParams.Add(request.NewErrParamRequired("Body")) + } + if s.RestApiId == nil { + invalidParams.Add(request.NewErrParamRequired("RestApiId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents a resource. +type Resource struct { + _ struct{} `type:"structure"` + + // The resource's identifier. + Id *string `locationName:"id" type:"string"` + + // The parent resource's identifier. + ParentId *string `locationName:"parentId" type:"string"` + + // The full path for this resource. + Path *string `locationName:"path" type:"string"` + + // The last path segment for this resource. + PathPart *string `locationName:"pathPart" type:"string"` + + // Map of methods for this resource, which is included only if the request uses + // the embed query option. + ResourceMethods map[string]*Method `locationName:"resourceMethods" type:"map"` +} + +// String returns the string representation +func (s Resource) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Resource) GoString() string { + return s.String() +} + +// Represents a REST API. +type RestApi struct { + _ struct{} `type:"structure"` + + // The date when the API was created, in ISO 8601 format (http://www.iso.org/iso/home/standards/iso8601.htm" + // target="_blank). + CreatedDate *time.Time `locationName:"createdDate" type:"timestamp" timestampFormat:"unix"` + + // The API's description. + Description *string `locationName:"description" type:"string"` + + // The API's identifier. This identifier is unique across all of your APIs in + // Amazon API Gateway. + Id *string `locationName:"id" type:"string"` + + // The API's name. + Name *string `locationName:"name" type:"string"` + + Warnings []*string `locationName:"warnings" type:"list"` +} + +// String returns the string representation +func (s RestApi) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RestApi) GoString() string { + return s.String() +} + +// Represents a unique identifier for a version of a deployed RestApi that is +// callable by users. +type Stage struct { + _ struct{} `type:"structure"` + + // Specifies whether a cache cluster is enabled for the stage. + CacheClusterEnabled *bool `locationName:"cacheClusterEnabled" type:"boolean"` + + // The size of the cache cluster for the stage, if enabled. + CacheClusterSize *string `locationName:"cacheClusterSize" type:"string" enum:"CacheClusterSize"` + + // The status of the cache cluster for the stage, if enabled. + CacheClusterStatus *string `locationName:"cacheClusterStatus" type:"string" enum:"CacheClusterStatus"` + + ClientCertificateId *string `locationName:"clientCertificateId" type:"string"` + + // The date and time that the stage was created, in ISO 8601 format (http://www.iso.org/iso/home/standards/iso8601.htm" + // target="_blank). + CreatedDate *time.Time `locationName:"createdDate" type:"timestamp" timestampFormat:"unix"` + + // The identifier of the Deployment that the stage points to. + DeploymentId *string `locationName:"deploymentId" type:"string"` + + // The stage's description. + Description *string `locationName:"description" type:"string"` + + // The date and time that information about the stage was last updated, in ISO + // 8601 format (http://www.iso.org/iso/home/standards/iso8601.htm" target="_blank). + LastUpdatedDate *time.Time `locationName:"lastUpdatedDate" type:"timestamp" timestampFormat:"unix"` + + // A map that defines the method settings for a Stage resource. Keys are defined + // as {resource_path}/{http_method} for an individual method override, or \*/\* + // for the settings applied to all methods in the stage. + MethodSettings map[string]*MethodSetting `locationName:"methodSettings" type:"map"` + + // The name of the stage is the first path segment in the Uniform Resource Identifier + // (URI) of a call to Amazon API Gateway. + StageName *string `locationName:"stageName" type:"string"` + + // A map that defines the stage variables for a Stage resource. Variable names + // can have alphanumeric characters, and the values must match [A-Za-z0-9-._~:/?#&=,]+. + Variables map[string]*string `locationName:"variables" type:"map"` +} + +// String returns the string representation +func (s Stage) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Stage) GoString() string { + return s.String() +} + +// A reference to a unique stage identified in the format {restApiId}/{stage}. +type StageKey struct { + _ struct{} `type:"structure"` + + // A list of Stage resources that are associated with the ApiKey resource. + RestApiId *string `locationName:"restApiId" type:"string"` + + // The stage name in the RestApi that the stage key references. + StageName *string `locationName:"stageName" type:"string"` +} + +// String returns the string representation +func (s StageKey) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StageKey) GoString() string { + return s.String() +} + +// Make a request to simulate the execution of an Authorizer. +type TestInvokeAuthorizerInput struct { + _ struct{} `type:"structure"` + + // [Optional] A key-value map of additional context variables. + AdditionalContext map[string]*string `locationName:"additionalContext" type:"map"` + + // Specifies a test invoke authorizer request's Authorizer ID. + AuthorizerId *string `location:"uri" locationName:"authorizer_id" type:"string" required:"true"` + + // [Optional] The simulated request body of an incoming invocation request. + Body *string `locationName:"body" type:"string"` + + // [Required] A key-value map of headers to simulate an incoming invocation + // request. This is where the incoming authorization token, or identity source, + // should be specified. + Headers map[string]*string `locationName:"headers" type:"map"` + + // [Optional] The URI path, including query string, of the simulated invocation + // request. Use this to specify path parameters and query string parameters. + PathWithQueryString *string `locationName:"pathWithQueryString" type:"string"` + + // Specifies a test invoke authorizer request's RestApi identifier. + RestApiId *string `location:"uri" locationName:"restapi_id" type:"string" required:"true"` + + // A key-value map of stage variables to simulate an invocation on a deployed + // Stage. + StageVariables map[string]*string `locationName:"stageVariables" type:"map"` +} + +// String returns the string representation +func (s TestInvokeAuthorizerInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TestInvokeAuthorizerInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TestInvokeAuthorizerInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TestInvokeAuthorizerInput"} + if s.AuthorizerId == nil { + invalidParams.Add(request.NewErrParamRequired("AuthorizerId")) + } + if s.RestApiId == nil { + invalidParams.Add(request.NewErrParamRequired("RestApiId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the response of the test invoke request in for a custom Authorizer +type TestInvokeAuthorizerOutput struct { + _ struct{} `type:"structure"` + + Authorization map[string][]*string `locationName:"authorization" type:"map"` + + // The HTTP status code that the client would have received. Value is 0 if the + // authorizer succeeded. + ClientStatus *int64 `locationName:"clientStatus" type:"integer"` + + // The execution latency of the test authorizer request + Latency *int64 `locationName:"latency" type:"long"` + + // The Amazon API Gateway execution log for the test authorizer request. + Log *string `locationName:"log" type:"string"` + + // The policy JSON document returned by the Authorizer + Policy *string `locationName:"policy" type:"string"` + + // The principal identity returned by the Authorizer + PrincipalId *string `locationName:"principalId" type:"string"` +} + +// String returns the string representation +func (s TestInvokeAuthorizerOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TestInvokeAuthorizerOutput) GoString() string { + return s.String() +} + +// Make a request to simulate the execution of a Method. +type TestInvokeMethodInput struct { + _ struct{} `type:"structure"` + + // The simulated request body of an incoming invocation request. + Body *string `locationName:"body" type:"string"` + + // A ClientCertificate identifier to use in the test invocation. API Gateway + // will use use the certificate when making the HTTPS request to the defined + // backend endpoint. + ClientCertificateId *string `locationName:"clientCertificateId" type:"string"` + + // A key-value map of headers to simulate an incoming invocation request. + Headers map[string]*string `locationName:"headers" type:"map"` + + // Specifies a test invoke method request's HTTP method. + HttpMethod *string `location:"uri" locationName:"http_method" type:"string" required:"true"` + + // The URI path, including query string, of the simulated invocation request. + // Use this to specify path parameters and query string parameters. + PathWithQueryString *string `locationName:"pathWithQueryString" type:"string"` + + // Specifies a test invoke method request's resource ID. + ResourceId *string `location:"uri" locationName:"resource_id" type:"string" required:"true"` + + // Specifies a test invoke method request's API identifier. + RestApiId *string `location:"uri" locationName:"restapi_id" type:"string" required:"true"` + + // A key-value map of stage variables to simulate an invocation on a deployed + // Stage. + StageVariables map[string]*string `locationName:"stageVariables" type:"map"` +} + +// String returns the string representation +func (s TestInvokeMethodInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TestInvokeMethodInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TestInvokeMethodInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TestInvokeMethodInput"} + if s.HttpMethod == nil { + invalidParams.Add(request.NewErrParamRequired("HttpMethod")) + } + if s.ResourceId == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceId")) + } + if s.RestApiId == nil { + invalidParams.Add(request.NewErrParamRequired("RestApiId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the response of the test invoke request in HTTP method. +type TestInvokeMethodOutput struct { + _ struct{} `type:"structure"` + + // The body of HTTP response. + Body *string `locationName:"body" type:"string"` + + // The headers of HTTP response. + Headers map[string]*string `locationName:"headers" type:"map"` + + // The execution latency of the test invoke request. + Latency *int64 `locationName:"latency" type:"long"` + + // The Amazon API Gateway execution log for the test invoke request. + Log *string `locationName:"log" type:"string"` + + // The HTTP status code. + Status *int64 `locationName:"status" type:"integer"` +} + +// String returns the string representation +func (s TestInvokeMethodOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TestInvokeMethodOutput) GoString() string { + return s.String() +} + +// Returns the throttle settings. +type ThrottleSettings struct { + _ struct{} `type:"structure"` + + // Returns the burstLimit when ThrottleSettings is called. + BurstLimit *int64 `locationName:"burstLimit" type:"integer"` + + // Returns the rateLimit when ThrottleSettings is called. + RateLimit *float64 `locationName:"rateLimit" type:"double"` +} + +// String returns the string representation +func (s ThrottleSettings) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ThrottleSettings) GoString() string { + return s.String() +} + +// Requests Amazon API Gateway to change information about the current Account +// resource. +type UpdateAccountInput struct { + _ struct{} `type:"structure"` + + // A list of operations describing the updates to apply to the specified resource. + // The patches are applied in the order specified in the list. + PatchOperations []*PatchOperation `locationName:"patchOperations" type:"list"` +} + +// String returns the string representation +func (s UpdateAccountInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateAccountInput) GoString() string { + return s.String() +} + +// A request to change information about an ApiKey resource. +type UpdateApiKeyInput struct { + _ struct{} `type:"structure"` + + // The identifier of the ApiKey resource to be updated. + ApiKey *string `location:"uri" locationName:"api_Key" type:"string" required:"true"` + + // A list of operations describing the updates to apply to the specified resource. + // The patches are applied in the order specified in the list. + PatchOperations []*PatchOperation `locationName:"patchOperations" type:"list"` +} + +// String returns the string representation +func (s UpdateApiKeyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateApiKeyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateApiKeyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateApiKeyInput"} + if s.ApiKey == nil { + invalidParams.Add(request.NewErrParamRequired("ApiKey")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Request to update an existing Authorizer resource. +type UpdateAuthorizerInput struct { + _ struct{} `type:"structure"` + + // The identifier of the Authorizer resource. + AuthorizerId *string `location:"uri" locationName:"authorizer_id" type:"string" required:"true"` + + // A list of operations describing the updates to apply to the specified resource. + // The patches are applied in the order specified in the list. + PatchOperations []*PatchOperation `locationName:"patchOperations" type:"list"` + + // The RestApi identifier for the Authorizer resource. + RestApiId *string `location:"uri" locationName:"restapi_id" type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateAuthorizerInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateAuthorizerInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateAuthorizerInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateAuthorizerInput"} + if s.AuthorizerId == nil { + invalidParams.Add(request.NewErrParamRequired("AuthorizerId")) + } + if s.RestApiId == nil { + invalidParams.Add(request.NewErrParamRequired("RestApiId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// A request to change information about the BasePathMapping resource. +type UpdateBasePathMappingInput struct { + _ struct{} `type:"structure"` + + // The base path of the BasePathMapping resource to change. + BasePath *string `location:"uri" locationName:"base_path" type:"string" required:"true"` + + // The domain name of the BasePathMapping resource to change. + DomainName *string `location:"uri" locationName:"domain_name" type:"string" required:"true"` + + // A list of operations describing the updates to apply to the specified resource. + // The patches are applied in the order specified in the list. + PatchOperations []*PatchOperation `locationName:"patchOperations" type:"list"` +} + +// String returns the string representation +func (s UpdateBasePathMappingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateBasePathMappingInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateBasePathMappingInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateBasePathMappingInput"} + if s.BasePath == nil { + invalidParams.Add(request.NewErrParamRequired("BasePath")) + } + if s.DomainName == nil { + invalidParams.Add(request.NewErrParamRequired("DomainName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// A request to change information about an ClientCertificate resource. +type UpdateClientCertificateInput struct { + _ struct{} `type:"structure"` + + // The identifier of the ClientCertificate resource to be updated. + ClientCertificateId *string `location:"uri" locationName:"clientcertificate_id" type:"string" required:"true"` + + // A list of operations describing the updates to apply to the specified resource. + // The patches are applied in the order specified in the list. + PatchOperations []*PatchOperation `locationName:"patchOperations" type:"list"` +} + +// String returns the string representation +func (s UpdateClientCertificateInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateClientCertificateInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateClientCertificateInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateClientCertificateInput"} + if s.ClientCertificateId == nil { + invalidParams.Add(request.NewErrParamRequired("ClientCertificateId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Requests Amazon API Gateway to change information about a Deployment resource. +type UpdateDeploymentInput struct { + _ struct{} `type:"structure"` + + // The replacment identifier for the Deployment resource to change information + // about. + DeploymentId *string `location:"uri" locationName:"deployment_id" type:"string" required:"true"` + + // A list of operations describing the updates to apply to the specified resource. + // The patches are applied in the order specified in the list. + PatchOperations []*PatchOperation `locationName:"patchOperations" type:"list"` + + // The replacement identifier of the RestApi resource for the Deployment resource + // to change information about. + RestApiId *string `location:"uri" locationName:"restapi_id" type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateDeploymentInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateDeploymentInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateDeploymentInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateDeploymentInput"} + if s.DeploymentId == nil { + invalidParams.Add(request.NewErrParamRequired("DeploymentId")) + } + if s.RestApiId == nil { + invalidParams.Add(request.NewErrParamRequired("RestApiId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// A request to change information about the DomainName resource. +type UpdateDomainNameInput struct { + _ struct{} `type:"structure"` + + // The name of the DomainName resource to be changed. + DomainName *string `location:"uri" locationName:"domain_name" type:"string" required:"true"` + + // A list of operations describing the updates to apply to the specified resource. + // The patches are applied in the order specified in the list. + PatchOperations []*PatchOperation `locationName:"patchOperations" type:"list"` +} + +// String returns the string representation +func (s UpdateDomainNameInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateDomainNameInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateDomainNameInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateDomainNameInput"} + if s.DomainName == nil { + invalidParams.Add(request.NewErrParamRequired("DomainName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents an update integration request. +type UpdateIntegrationInput struct { + _ struct{} `type:"structure"` + + // Represents an update integration request's HTTP method. + HttpMethod *string `location:"uri" locationName:"http_method" type:"string" required:"true"` + + // A list of operations describing the updates to apply to the specified resource. + // The patches are applied in the order specified in the list. + PatchOperations []*PatchOperation `locationName:"patchOperations" type:"list"` + + // Represents an update integration request's resource identifier. + ResourceId *string `location:"uri" locationName:"resource_id" type:"string" required:"true"` + + // Represents an update integration request's API identifier. + RestApiId *string `location:"uri" locationName:"restapi_id" type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateIntegrationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateIntegrationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateIntegrationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateIntegrationInput"} + if s.HttpMethod == nil { + invalidParams.Add(request.NewErrParamRequired("HttpMethod")) + } + if s.ResourceId == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceId")) + } + if s.RestApiId == nil { + invalidParams.Add(request.NewErrParamRequired("RestApiId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents an update integration response request. +type UpdateIntegrationResponseInput struct { + _ struct{} `type:"structure"` + + // Specifies an update integration response request's HTTP method. + HttpMethod *string `location:"uri" locationName:"http_method" type:"string" required:"true"` + + // A list of operations describing the updates to apply to the specified resource. + // The patches are applied in the order specified in the list. + PatchOperations []*PatchOperation `locationName:"patchOperations" type:"list"` + + // Specifies an update integration response request's resource identifier. + ResourceId *string `location:"uri" locationName:"resource_id" type:"string" required:"true"` + + // Specifies an update integration response request's API identifier. + RestApiId *string `location:"uri" locationName:"restapi_id" type:"string" required:"true"` + + // Specifies an update integration response request's status code. + StatusCode *string `location:"uri" locationName:"status_code" type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateIntegrationResponseInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateIntegrationResponseInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateIntegrationResponseInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateIntegrationResponseInput"} + if s.HttpMethod == nil { + invalidParams.Add(request.NewErrParamRequired("HttpMethod")) + } + if s.ResourceId == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceId")) + } + if s.RestApiId == nil { + invalidParams.Add(request.NewErrParamRequired("RestApiId")) + } + if s.StatusCode == nil { + invalidParams.Add(request.NewErrParamRequired("StatusCode")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Request to update an existing Method resource. +type UpdateMethodInput struct { + _ struct{} `type:"structure"` + + // The HTTP verb that identifies the Method resource. + HttpMethod *string `location:"uri" locationName:"http_method" type:"string" required:"true"` + + // A list of operations describing the updates to apply to the specified resource. + // The patches are applied in the order specified in the list. + PatchOperations []*PatchOperation `locationName:"patchOperations" type:"list"` + + // The Resource identifier for the Method resource. + ResourceId *string `location:"uri" locationName:"resource_id" type:"string" required:"true"` + + // The RestApi identifier for the Method resource. + RestApiId *string `location:"uri" locationName:"restapi_id" type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateMethodInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateMethodInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateMethodInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateMethodInput"} + if s.HttpMethod == nil { + invalidParams.Add(request.NewErrParamRequired("HttpMethod")) + } + if s.ResourceId == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceId")) + } + if s.RestApiId == nil { + invalidParams.Add(request.NewErrParamRequired("RestApiId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// A request to update an existing MethodResponse resource. +type UpdateMethodResponseInput struct { + _ struct{} `type:"structure"` + + // The HTTP verb identifier for the parent Method resource. + HttpMethod *string `location:"uri" locationName:"http_method" type:"string" required:"true"` + + // A list of operations describing the updates to apply to the specified resource. + // The patches are applied in the order specified in the list. + PatchOperations []*PatchOperation `locationName:"patchOperations" type:"list"` + + // The Resource identifier for the MethodResponse resource. + ResourceId *string `location:"uri" locationName:"resource_id" type:"string" required:"true"` + + // The RestApi identifier for the MethodResponse resource. + RestApiId *string `location:"uri" locationName:"restapi_id" type:"string" required:"true"` + + // The status code identifier for the MethodResponse resource. + StatusCode *string `location:"uri" locationName:"status_code" type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateMethodResponseInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateMethodResponseInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateMethodResponseInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateMethodResponseInput"} + if s.HttpMethod == nil { + invalidParams.Add(request.NewErrParamRequired("HttpMethod")) + } + if s.ResourceId == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceId")) + } + if s.RestApiId == nil { + invalidParams.Add(request.NewErrParamRequired("RestApiId")) + } + if s.StatusCode == nil { + invalidParams.Add(request.NewErrParamRequired("StatusCode")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Request to update an existing model in an existing RestApi resource. +type UpdateModelInput struct { + _ struct{} `type:"structure"` + + // The name of the model to update. + ModelName *string `location:"uri" locationName:"model_name" type:"string" required:"true"` + + // A list of operations describing the updates to apply to the specified resource. + // The patches are applied in the order specified in the list. + PatchOperations []*PatchOperation `locationName:"patchOperations" type:"list"` + + // The RestApi identifier under which the model exists. + RestApiId *string `location:"uri" locationName:"restapi_id" type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateModelInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateModelInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateModelInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateModelInput"} + if s.ModelName == nil { + invalidParams.Add(request.NewErrParamRequired("ModelName")) + } + if s.RestApiId == nil { + invalidParams.Add(request.NewErrParamRequired("RestApiId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Request to change information about a Resource resource. +type UpdateResourceInput struct { + _ struct{} `type:"structure"` + + // A list of operations describing the updates to apply to the specified resource. + // The patches are applied in the order specified in the list. + PatchOperations []*PatchOperation `locationName:"patchOperations" type:"list"` + + // The identifier of the Resource resource. + ResourceId *string `location:"uri" locationName:"resource_id" type:"string" required:"true"` + + // The RestApi identifier for the Resource resource. + RestApiId *string `location:"uri" locationName:"restapi_id" type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateResourceInput"} + if s.ResourceId == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceId")) + } + if s.RestApiId == nil { + invalidParams.Add(request.NewErrParamRequired("RestApiId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Request to update an existing RestApi resource in your collection. +type UpdateRestApiInput struct { + _ struct{} `type:"structure"` + + // A list of operations describing the updates to apply to the specified resource. + // The patches are applied in the order specified in the list. + PatchOperations []*PatchOperation `locationName:"patchOperations" type:"list"` + + // The ID of the RestApi you want to update. + RestApiId *string `location:"uri" locationName:"restapi_id" type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateRestApiInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateRestApiInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateRestApiInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateRestApiInput"} + if s.RestApiId == nil { + invalidParams.Add(request.NewErrParamRequired("RestApiId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Requests Amazon API Gateway to change information about a Stage resource. +type UpdateStageInput struct { + _ struct{} `type:"structure"` + + // A list of operations describing the updates to apply to the specified resource. + // The patches are applied in the order specified in the list. + PatchOperations []*PatchOperation `locationName:"patchOperations" type:"list"` + + // The identifier of the RestApi resource for the Stage resource to change information + // about. + RestApiId *string `location:"uri" locationName:"restapi_id" type:"string" required:"true"` + + // The name of the Stage resource to change information about. + StageName *string `location:"uri" locationName:"stage_name" type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateStageInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateStageInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateStageInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateStageInput"} + if s.RestApiId == nil { + invalidParams.Add(request.NewErrParamRequired("RestApiId")) + } + if s.StageName == nil { + invalidParams.Add(request.NewErrParamRequired("StageName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The authorizer type. Only current value is TOKEN. +const ( + // @enum AuthorizerType + AuthorizerTypeToken = "TOKEN" +) + +// Returns the size of the CacheCluster. +const ( + // @enum CacheClusterSize + CacheClusterSize05 = "0.5" + // @enum CacheClusterSize + CacheClusterSize16 = "1.6" + // @enum CacheClusterSize + CacheClusterSize61 = "6.1" + // @enum CacheClusterSize + CacheClusterSize135 = "13.5" + // @enum CacheClusterSize + CacheClusterSize284 = "28.4" + // @enum CacheClusterSize + CacheClusterSize582 = "58.2" + // @enum CacheClusterSize + CacheClusterSize118 = "118" + // @enum CacheClusterSize + CacheClusterSize237 = "237" +) + +// Returns the status of the CacheCluster. +const ( + // @enum CacheClusterStatus + CacheClusterStatusCreateInProgress = "CREATE_IN_PROGRESS" + // @enum CacheClusterStatus + CacheClusterStatusAvailable = "AVAILABLE" + // @enum CacheClusterStatus + CacheClusterStatusDeleteInProgress = "DELETE_IN_PROGRESS" + // @enum CacheClusterStatus + CacheClusterStatusNotAvailable = "NOT_AVAILABLE" + // @enum CacheClusterStatus + CacheClusterStatusFlushInProgress = "FLUSH_IN_PROGRESS" +) + +// The integration type. The valid value is HTTP, AWS, or MOCK. +const ( + // @enum IntegrationType + IntegrationTypeHttp = "HTTP" + // @enum IntegrationType + IntegrationTypeAws = "AWS" + // @enum IntegrationType + IntegrationTypeMock = "MOCK" +) + +const ( + // @enum PutMode + PutModeMerge = "merge" + // @enum PutMode + PutModeOverwrite = "overwrite" +) + +const ( + // @enum UnauthorizedCacheControlHeaderStrategy + UnauthorizedCacheControlHeaderStrategyFailWith403 = "FAIL_WITH_403" + // @enum UnauthorizedCacheControlHeaderStrategy + UnauthorizedCacheControlHeaderStrategySucceedWithResponseHeader = "SUCCEED_WITH_RESPONSE_HEADER" + // @enum UnauthorizedCacheControlHeaderStrategy + UnauthorizedCacheControlHeaderStrategySucceedWithoutResponseHeader = "SUCCEED_WITHOUT_RESPONSE_HEADER" +) + +const ( + // @enum op + OpAdd = "add" + // @enum op + OpRemove = "remove" + // @enum op + OpReplace = "replace" + // @enum op + OpMove = "move" + // @enum op + OpCopy = "copy" + // @enum op + OpTest = "test" +) diff --git a/vendor/github.com/aws/aws-sdk-go/service/apigateway/apigatewayiface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/apigateway/apigatewayiface/interface.go new file mode 100644 index 000000000..7f43df8ec --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/apigateway/apigatewayiface/interface.go @@ -0,0 +1,338 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package apigatewayiface provides an interface for the Amazon API Gateway. +package apigatewayiface + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/apigateway" +) + +// APIGatewayAPI is the interface type for apigateway.APIGateway. +type APIGatewayAPI interface { + CreateApiKeyRequest(*apigateway.CreateApiKeyInput) (*request.Request, *apigateway.ApiKey) + + CreateApiKey(*apigateway.CreateApiKeyInput) (*apigateway.ApiKey, error) + + CreateAuthorizerRequest(*apigateway.CreateAuthorizerInput) (*request.Request, *apigateway.Authorizer) + + CreateAuthorizer(*apigateway.CreateAuthorizerInput) (*apigateway.Authorizer, error) + + CreateBasePathMappingRequest(*apigateway.CreateBasePathMappingInput) (*request.Request, *apigateway.BasePathMapping) + + CreateBasePathMapping(*apigateway.CreateBasePathMappingInput) (*apigateway.BasePathMapping, error) + + CreateDeploymentRequest(*apigateway.CreateDeploymentInput) (*request.Request, *apigateway.Deployment) + + CreateDeployment(*apigateway.CreateDeploymentInput) (*apigateway.Deployment, error) + + CreateDomainNameRequest(*apigateway.CreateDomainNameInput) (*request.Request, *apigateway.DomainName) + + CreateDomainName(*apigateway.CreateDomainNameInput) (*apigateway.DomainName, error) + + CreateModelRequest(*apigateway.CreateModelInput) (*request.Request, *apigateway.Model) + + CreateModel(*apigateway.CreateModelInput) (*apigateway.Model, error) + + CreateResourceRequest(*apigateway.CreateResourceInput) (*request.Request, *apigateway.Resource) + + CreateResource(*apigateway.CreateResourceInput) (*apigateway.Resource, error) + + CreateRestApiRequest(*apigateway.CreateRestApiInput) (*request.Request, *apigateway.RestApi) + + CreateRestApi(*apigateway.CreateRestApiInput) (*apigateway.RestApi, error) + + CreateStageRequest(*apigateway.CreateStageInput) (*request.Request, *apigateway.Stage) + + CreateStage(*apigateway.CreateStageInput) (*apigateway.Stage, error) + + DeleteApiKeyRequest(*apigateway.DeleteApiKeyInput) (*request.Request, *apigateway.DeleteApiKeyOutput) + + DeleteApiKey(*apigateway.DeleteApiKeyInput) (*apigateway.DeleteApiKeyOutput, error) + + DeleteAuthorizerRequest(*apigateway.DeleteAuthorizerInput) (*request.Request, *apigateway.DeleteAuthorizerOutput) + + DeleteAuthorizer(*apigateway.DeleteAuthorizerInput) (*apigateway.DeleteAuthorizerOutput, error) + + DeleteBasePathMappingRequest(*apigateway.DeleteBasePathMappingInput) (*request.Request, *apigateway.DeleteBasePathMappingOutput) + + DeleteBasePathMapping(*apigateway.DeleteBasePathMappingInput) (*apigateway.DeleteBasePathMappingOutput, error) + + DeleteClientCertificateRequest(*apigateway.DeleteClientCertificateInput) (*request.Request, *apigateway.DeleteClientCertificateOutput) + + DeleteClientCertificate(*apigateway.DeleteClientCertificateInput) (*apigateway.DeleteClientCertificateOutput, error) + + DeleteDeploymentRequest(*apigateway.DeleteDeploymentInput) (*request.Request, *apigateway.DeleteDeploymentOutput) + + DeleteDeployment(*apigateway.DeleteDeploymentInput) (*apigateway.DeleteDeploymentOutput, error) + + DeleteDomainNameRequest(*apigateway.DeleteDomainNameInput) (*request.Request, *apigateway.DeleteDomainNameOutput) + + DeleteDomainName(*apigateway.DeleteDomainNameInput) (*apigateway.DeleteDomainNameOutput, error) + + DeleteIntegrationRequest(*apigateway.DeleteIntegrationInput) (*request.Request, *apigateway.DeleteIntegrationOutput) + + DeleteIntegration(*apigateway.DeleteIntegrationInput) (*apigateway.DeleteIntegrationOutput, error) + + DeleteIntegrationResponseRequest(*apigateway.DeleteIntegrationResponseInput) (*request.Request, *apigateway.DeleteIntegrationResponseOutput) + + DeleteIntegrationResponse(*apigateway.DeleteIntegrationResponseInput) (*apigateway.DeleteIntegrationResponseOutput, error) + + DeleteMethodRequest(*apigateway.DeleteMethodInput) (*request.Request, *apigateway.DeleteMethodOutput) + + DeleteMethod(*apigateway.DeleteMethodInput) (*apigateway.DeleteMethodOutput, error) + + DeleteMethodResponseRequest(*apigateway.DeleteMethodResponseInput) (*request.Request, *apigateway.DeleteMethodResponseOutput) + + DeleteMethodResponse(*apigateway.DeleteMethodResponseInput) (*apigateway.DeleteMethodResponseOutput, error) + + DeleteModelRequest(*apigateway.DeleteModelInput) (*request.Request, *apigateway.DeleteModelOutput) + + DeleteModel(*apigateway.DeleteModelInput) (*apigateway.DeleteModelOutput, error) + + DeleteResourceRequest(*apigateway.DeleteResourceInput) (*request.Request, *apigateway.DeleteResourceOutput) + + DeleteResource(*apigateway.DeleteResourceInput) (*apigateway.DeleteResourceOutput, error) + + DeleteRestApiRequest(*apigateway.DeleteRestApiInput) (*request.Request, *apigateway.DeleteRestApiOutput) + + DeleteRestApi(*apigateway.DeleteRestApiInput) (*apigateway.DeleteRestApiOutput, error) + + DeleteStageRequest(*apigateway.DeleteStageInput) (*request.Request, *apigateway.DeleteStageOutput) + + DeleteStage(*apigateway.DeleteStageInput) (*apigateway.DeleteStageOutput, error) + + FlushStageAuthorizersCacheRequest(*apigateway.FlushStageAuthorizersCacheInput) (*request.Request, *apigateway.FlushStageAuthorizersCacheOutput) + + FlushStageAuthorizersCache(*apigateway.FlushStageAuthorizersCacheInput) (*apigateway.FlushStageAuthorizersCacheOutput, error) + + FlushStageCacheRequest(*apigateway.FlushStageCacheInput) (*request.Request, *apigateway.FlushStageCacheOutput) + + FlushStageCache(*apigateway.FlushStageCacheInput) (*apigateway.FlushStageCacheOutput, error) + + GenerateClientCertificateRequest(*apigateway.GenerateClientCertificateInput) (*request.Request, *apigateway.ClientCertificate) + + GenerateClientCertificate(*apigateway.GenerateClientCertificateInput) (*apigateway.ClientCertificate, error) + + GetAccountRequest(*apigateway.GetAccountInput) (*request.Request, *apigateway.Account) + + GetAccount(*apigateway.GetAccountInput) (*apigateway.Account, error) + + GetApiKeyRequest(*apigateway.GetApiKeyInput) (*request.Request, *apigateway.ApiKey) + + GetApiKey(*apigateway.GetApiKeyInput) (*apigateway.ApiKey, error) + + GetApiKeysRequest(*apigateway.GetApiKeysInput) (*request.Request, *apigateway.GetApiKeysOutput) + + GetApiKeys(*apigateway.GetApiKeysInput) (*apigateway.GetApiKeysOutput, error) + + GetApiKeysPages(*apigateway.GetApiKeysInput, func(*apigateway.GetApiKeysOutput, bool) bool) error + + GetAuthorizerRequest(*apigateway.GetAuthorizerInput) (*request.Request, *apigateway.Authorizer) + + GetAuthorizer(*apigateway.GetAuthorizerInput) (*apigateway.Authorizer, error) + + GetAuthorizersRequest(*apigateway.GetAuthorizersInput) (*request.Request, *apigateway.GetAuthorizersOutput) + + GetAuthorizers(*apigateway.GetAuthorizersInput) (*apigateway.GetAuthorizersOutput, error) + + GetBasePathMappingRequest(*apigateway.GetBasePathMappingInput) (*request.Request, *apigateway.BasePathMapping) + + GetBasePathMapping(*apigateway.GetBasePathMappingInput) (*apigateway.BasePathMapping, error) + + GetBasePathMappingsRequest(*apigateway.GetBasePathMappingsInput) (*request.Request, *apigateway.GetBasePathMappingsOutput) + + GetBasePathMappings(*apigateway.GetBasePathMappingsInput) (*apigateway.GetBasePathMappingsOutput, error) + + GetBasePathMappingsPages(*apigateway.GetBasePathMappingsInput, func(*apigateway.GetBasePathMappingsOutput, bool) bool) error + + GetClientCertificateRequest(*apigateway.GetClientCertificateInput) (*request.Request, *apigateway.ClientCertificate) + + GetClientCertificate(*apigateway.GetClientCertificateInput) (*apigateway.ClientCertificate, error) + + GetClientCertificatesRequest(*apigateway.GetClientCertificatesInput) (*request.Request, *apigateway.GetClientCertificatesOutput) + + GetClientCertificates(*apigateway.GetClientCertificatesInput) (*apigateway.GetClientCertificatesOutput, error) + + GetClientCertificatesPages(*apigateway.GetClientCertificatesInput, func(*apigateway.GetClientCertificatesOutput, bool) bool) error + + GetDeploymentRequest(*apigateway.GetDeploymentInput) (*request.Request, *apigateway.Deployment) + + GetDeployment(*apigateway.GetDeploymentInput) (*apigateway.Deployment, error) + + GetDeploymentsRequest(*apigateway.GetDeploymentsInput) (*request.Request, *apigateway.GetDeploymentsOutput) + + GetDeployments(*apigateway.GetDeploymentsInput) (*apigateway.GetDeploymentsOutput, error) + + GetDeploymentsPages(*apigateway.GetDeploymentsInput, func(*apigateway.GetDeploymentsOutput, bool) bool) error + + GetDomainNameRequest(*apigateway.GetDomainNameInput) (*request.Request, *apigateway.DomainName) + + GetDomainName(*apigateway.GetDomainNameInput) (*apigateway.DomainName, error) + + GetDomainNamesRequest(*apigateway.GetDomainNamesInput) (*request.Request, *apigateway.GetDomainNamesOutput) + + GetDomainNames(*apigateway.GetDomainNamesInput) (*apigateway.GetDomainNamesOutput, error) + + GetDomainNamesPages(*apigateway.GetDomainNamesInput, func(*apigateway.GetDomainNamesOutput, bool) bool) error + + GetExportRequest(*apigateway.GetExportInput) (*request.Request, *apigateway.GetExportOutput) + + GetExport(*apigateway.GetExportInput) (*apigateway.GetExportOutput, error) + + GetIntegrationRequest(*apigateway.GetIntegrationInput) (*request.Request, *apigateway.Integration) + + GetIntegration(*apigateway.GetIntegrationInput) (*apigateway.Integration, error) + + GetIntegrationResponseRequest(*apigateway.GetIntegrationResponseInput) (*request.Request, *apigateway.IntegrationResponse) + + GetIntegrationResponse(*apigateway.GetIntegrationResponseInput) (*apigateway.IntegrationResponse, error) + + GetMethodRequest(*apigateway.GetMethodInput) (*request.Request, *apigateway.Method) + + GetMethod(*apigateway.GetMethodInput) (*apigateway.Method, error) + + GetMethodResponseRequest(*apigateway.GetMethodResponseInput) (*request.Request, *apigateway.MethodResponse) + + GetMethodResponse(*apigateway.GetMethodResponseInput) (*apigateway.MethodResponse, error) + + GetModelRequest(*apigateway.GetModelInput) (*request.Request, *apigateway.Model) + + GetModel(*apigateway.GetModelInput) (*apigateway.Model, error) + + GetModelTemplateRequest(*apigateway.GetModelTemplateInput) (*request.Request, *apigateway.GetModelTemplateOutput) + + GetModelTemplate(*apigateway.GetModelTemplateInput) (*apigateway.GetModelTemplateOutput, error) + + GetModelsRequest(*apigateway.GetModelsInput) (*request.Request, *apigateway.GetModelsOutput) + + GetModels(*apigateway.GetModelsInput) (*apigateway.GetModelsOutput, error) + + GetModelsPages(*apigateway.GetModelsInput, func(*apigateway.GetModelsOutput, bool) bool) error + + GetResourceRequest(*apigateway.GetResourceInput) (*request.Request, *apigateway.Resource) + + GetResource(*apigateway.GetResourceInput) (*apigateway.Resource, error) + + GetResourcesRequest(*apigateway.GetResourcesInput) (*request.Request, *apigateway.GetResourcesOutput) + + GetResources(*apigateway.GetResourcesInput) (*apigateway.GetResourcesOutput, error) + + GetResourcesPages(*apigateway.GetResourcesInput, func(*apigateway.GetResourcesOutput, bool) bool) error + + GetRestApiRequest(*apigateway.GetRestApiInput) (*request.Request, *apigateway.RestApi) + + GetRestApi(*apigateway.GetRestApiInput) (*apigateway.RestApi, error) + + GetRestApisRequest(*apigateway.GetRestApisInput) (*request.Request, *apigateway.GetRestApisOutput) + + GetRestApis(*apigateway.GetRestApisInput) (*apigateway.GetRestApisOutput, error) + + GetRestApisPages(*apigateway.GetRestApisInput, func(*apigateway.GetRestApisOutput, bool) bool) error + + GetSdkRequest(*apigateway.GetSdkInput) (*request.Request, *apigateway.GetSdkOutput) + + GetSdk(*apigateway.GetSdkInput) (*apigateway.GetSdkOutput, error) + + GetStageRequest(*apigateway.GetStageInput) (*request.Request, *apigateway.Stage) + + GetStage(*apigateway.GetStageInput) (*apigateway.Stage, error) + + GetStagesRequest(*apigateway.GetStagesInput) (*request.Request, *apigateway.GetStagesOutput) + + GetStages(*apigateway.GetStagesInput) (*apigateway.GetStagesOutput, error) + + ImportRestApiRequest(*apigateway.ImportRestApiInput) (*request.Request, *apigateway.RestApi) + + ImportRestApi(*apigateway.ImportRestApiInput) (*apigateway.RestApi, error) + + PutIntegrationRequest(*apigateway.PutIntegrationInput) (*request.Request, *apigateway.Integration) + + PutIntegration(*apigateway.PutIntegrationInput) (*apigateway.Integration, error) + + PutIntegrationResponseRequest(*apigateway.PutIntegrationResponseInput) (*request.Request, *apigateway.IntegrationResponse) + + PutIntegrationResponse(*apigateway.PutIntegrationResponseInput) (*apigateway.IntegrationResponse, error) + + PutMethodRequest(*apigateway.PutMethodInput) (*request.Request, *apigateway.Method) + + PutMethod(*apigateway.PutMethodInput) (*apigateway.Method, error) + + PutMethodResponseRequest(*apigateway.PutMethodResponseInput) (*request.Request, *apigateway.MethodResponse) + + PutMethodResponse(*apigateway.PutMethodResponseInput) (*apigateway.MethodResponse, error) + + PutRestApiRequest(*apigateway.PutRestApiInput) (*request.Request, *apigateway.RestApi) + + PutRestApi(*apigateway.PutRestApiInput) (*apigateway.RestApi, error) + + TestInvokeAuthorizerRequest(*apigateway.TestInvokeAuthorizerInput) (*request.Request, *apigateway.TestInvokeAuthorizerOutput) + + TestInvokeAuthorizer(*apigateway.TestInvokeAuthorizerInput) (*apigateway.TestInvokeAuthorizerOutput, error) + + TestInvokeMethodRequest(*apigateway.TestInvokeMethodInput) (*request.Request, *apigateway.TestInvokeMethodOutput) + + TestInvokeMethod(*apigateway.TestInvokeMethodInput) (*apigateway.TestInvokeMethodOutput, error) + + UpdateAccountRequest(*apigateway.UpdateAccountInput) (*request.Request, *apigateway.Account) + + UpdateAccount(*apigateway.UpdateAccountInput) (*apigateway.Account, error) + + UpdateApiKeyRequest(*apigateway.UpdateApiKeyInput) (*request.Request, *apigateway.ApiKey) + + UpdateApiKey(*apigateway.UpdateApiKeyInput) (*apigateway.ApiKey, error) + + UpdateAuthorizerRequest(*apigateway.UpdateAuthorizerInput) (*request.Request, *apigateway.Authorizer) + + UpdateAuthorizer(*apigateway.UpdateAuthorizerInput) (*apigateway.Authorizer, error) + + UpdateBasePathMappingRequest(*apigateway.UpdateBasePathMappingInput) (*request.Request, *apigateway.BasePathMapping) + + UpdateBasePathMapping(*apigateway.UpdateBasePathMappingInput) (*apigateway.BasePathMapping, error) + + UpdateClientCertificateRequest(*apigateway.UpdateClientCertificateInput) (*request.Request, *apigateway.ClientCertificate) + + UpdateClientCertificate(*apigateway.UpdateClientCertificateInput) (*apigateway.ClientCertificate, error) + + UpdateDeploymentRequest(*apigateway.UpdateDeploymentInput) (*request.Request, *apigateway.Deployment) + + UpdateDeployment(*apigateway.UpdateDeploymentInput) (*apigateway.Deployment, error) + + UpdateDomainNameRequest(*apigateway.UpdateDomainNameInput) (*request.Request, *apigateway.DomainName) + + UpdateDomainName(*apigateway.UpdateDomainNameInput) (*apigateway.DomainName, error) + + UpdateIntegrationRequest(*apigateway.UpdateIntegrationInput) (*request.Request, *apigateway.Integration) + + UpdateIntegration(*apigateway.UpdateIntegrationInput) (*apigateway.Integration, error) + + UpdateIntegrationResponseRequest(*apigateway.UpdateIntegrationResponseInput) (*request.Request, *apigateway.IntegrationResponse) + + UpdateIntegrationResponse(*apigateway.UpdateIntegrationResponseInput) (*apigateway.IntegrationResponse, error) + + UpdateMethodRequest(*apigateway.UpdateMethodInput) (*request.Request, *apigateway.Method) + + UpdateMethod(*apigateway.UpdateMethodInput) (*apigateway.Method, error) + + UpdateMethodResponseRequest(*apigateway.UpdateMethodResponseInput) (*request.Request, *apigateway.MethodResponse) + + UpdateMethodResponse(*apigateway.UpdateMethodResponseInput) (*apigateway.MethodResponse, error) + + UpdateModelRequest(*apigateway.UpdateModelInput) (*request.Request, *apigateway.Model) + + UpdateModel(*apigateway.UpdateModelInput) (*apigateway.Model, error) + + UpdateResourceRequest(*apigateway.UpdateResourceInput) (*request.Request, *apigateway.Resource) + + UpdateResource(*apigateway.UpdateResourceInput) (*apigateway.Resource, error) + + UpdateRestApiRequest(*apigateway.UpdateRestApiInput) (*request.Request, *apigateway.RestApi) + + UpdateRestApi(*apigateway.UpdateRestApiInput) (*apigateway.RestApi, error) + + UpdateStageRequest(*apigateway.UpdateStageInput) (*request.Request, *apigateway.Stage) + + UpdateStage(*apigateway.UpdateStageInput) (*apigateway.Stage, error) +} + +var _ APIGatewayAPI = (*apigateway.APIGateway)(nil) diff --git a/vendor/github.com/aws/aws-sdk-go/service/apigateway/customization.go b/vendor/github.com/aws/aws-sdk-go/service/apigateway/customization.go new file mode 100644 index 000000000..2dc4a7c44 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/apigateway/customization.go @@ -0,0 +1,14 @@ +package apigateway + +import ( + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/request" +) + +func init() { + initClient = func(c *client.Client) { + c.Handlers.Build.PushBack(func(r *request.Request) { + r.HTTPRequest.Header.Add("Accept", "application/json") + }) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/apigateway/examples_test.go b/vendor/github.com/aws/aws-sdk-go/service/apigateway/examples_test.go new file mode 100644 index 000000000..69e9685d4 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/apigateway/examples_test.go @@ -0,0 +1,1837 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package apigateway_test + +import ( + "bytes" + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/apigateway" +) + +var _ time.Duration +var _ bytes.Buffer + +func ExampleAPIGateway_CreateApiKey() { + svc := apigateway.New(session.New()) + + params := &apigateway.CreateApiKeyInput{ + Description: aws.String("String"), + Enabled: aws.Bool(true), + Name: aws.String("String"), + StageKeys: []*apigateway.StageKey{ + { // Required + RestApiId: aws.String("String"), + StageName: aws.String("String"), + }, + // More values... + }, + } + resp, err := svc.CreateApiKey(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAPIGateway_CreateAuthorizer() { + svc := apigateway.New(session.New()) + + params := &apigateway.CreateAuthorizerInput{ + AuthorizerUri: aws.String("String"), // Required + IdentitySource: aws.String("String"), // Required + Name: aws.String("String"), // Required + RestApiId: aws.String("String"), // Required + Type: aws.String("AuthorizerType"), // Required + AuthType: aws.String("String"), + AuthorizerCredentials: aws.String("String"), + AuthorizerResultTtlInSeconds: aws.Int64(1), + IdentityValidationExpression: aws.String("String"), + } + resp, err := svc.CreateAuthorizer(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAPIGateway_CreateBasePathMapping() { + svc := apigateway.New(session.New()) + + params := &apigateway.CreateBasePathMappingInput{ + DomainName: aws.String("String"), // Required + RestApiId: aws.String("String"), // Required + BasePath: aws.String("String"), + Stage: aws.String("String"), + } + resp, err := svc.CreateBasePathMapping(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAPIGateway_CreateDeployment() { + svc := apigateway.New(session.New()) + + params := &apigateway.CreateDeploymentInput{ + RestApiId: aws.String("String"), // Required + StageName: aws.String("String"), // Required + CacheClusterEnabled: aws.Bool(true), + CacheClusterSize: aws.String("CacheClusterSize"), + Description: aws.String("String"), + StageDescription: aws.String("String"), + Variables: map[string]*string{ + "Key": aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.CreateDeployment(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAPIGateway_CreateDomainName() { + svc := apigateway.New(session.New()) + + params := &apigateway.CreateDomainNameInput{ + CertificateBody: aws.String("String"), // Required + CertificateChain: aws.String("String"), // Required + CertificateName: aws.String("String"), // Required + CertificatePrivateKey: aws.String("String"), // Required + DomainName: aws.String("String"), // Required + } + resp, err := svc.CreateDomainName(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAPIGateway_CreateModel() { + svc := apigateway.New(session.New()) + + params := &apigateway.CreateModelInput{ + ContentType: aws.String("String"), // Required + Name: aws.String("String"), // Required + RestApiId: aws.String("String"), // Required + Description: aws.String("String"), + Schema: aws.String("String"), + } + resp, err := svc.CreateModel(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAPIGateway_CreateResource() { + svc := apigateway.New(session.New()) + + params := &apigateway.CreateResourceInput{ + ParentId: aws.String("String"), // Required + PathPart: aws.String("String"), // Required + RestApiId: aws.String("String"), // Required + } + resp, err := svc.CreateResource(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAPIGateway_CreateRestApi() { + svc := apigateway.New(session.New()) + + params := &apigateway.CreateRestApiInput{ + Name: aws.String("String"), // Required + CloneFrom: aws.String("String"), + Description: aws.String("String"), + } + resp, err := svc.CreateRestApi(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAPIGateway_CreateStage() { + svc := apigateway.New(session.New()) + + params := &apigateway.CreateStageInput{ + DeploymentId: aws.String("String"), // Required + RestApiId: aws.String("String"), // Required + StageName: aws.String("String"), // Required + CacheClusterEnabled: aws.Bool(true), + CacheClusterSize: aws.String("CacheClusterSize"), + Description: aws.String("String"), + Variables: map[string]*string{ + "Key": aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.CreateStage(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAPIGateway_DeleteApiKey() { + svc := apigateway.New(session.New()) + + params := &apigateway.DeleteApiKeyInput{ + ApiKey: aws.String("String"), // Required + } + resp, err := svc.DeleteApiKey(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAPIGateway_DeleteAuthorizer() { + svc := apigateway.New(session.New()) + + params := &apigateway.DeleteAuthorizerInput{ + AuthorizerId: aws.String("String"), // Required + RestApiId: aws.String("String"), // Required + } + resp, err := svc.DeleteAuthorizer(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAPIGateway_DeleteBasePathMapping() { + svc := apigateway.New(session.New()) + + params := &apigateway.DeleteBasePathMappingInput{ + BasePath: aws.String("String"), // Required + DomainName: aws.String("String"), // Required + } + resp, err := svc.DeleteBasePathMapping(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAPIGateway_DeleteClientCertificate() { + svc := apigateway.New(session.New()) + + params := &apigateway.DeleteClientCertificateInput{ + ClientCertificateId: aws.String("String"), // Required + } + resp, err := svc.DeleteClientCertificate(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAPIGateway_DeleteDeployment() { + svc := apigateway.New(session.New()) + + params := &apigateway.DeleteDeploymentInput{ + DeploymentId: aws.String("String"), // Required + RestApiId: aws.String("String"), // Required + } + resp, err := svc.DeleteDeployment(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAPIGateway_DeleteDomainName() { + svc := apigateway.New(session.New()) + + params := &apigateway.DeleteDomainNameInput{ + DomainName: aws.String("String"), // Required + } + resp, err := svc.DeleteDomainName(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAPIGateway_DeleteIntegration() { + svc := apigateway.New(session.New()) + + params := &apigateway.DeleteIntegrationInput{ + HttpMethod: aws.String("String"), // Required + ResourceId: aws.String("String"), // Required + RestApiId: aws.String("String"), // Required + } + resp, err := svc.DeleteIntegration(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAPIGateway_DeleteIntegrationResponse() { + svc := apigateway.New(session.New()) + + params := &apigateway.DeleteIntegrationResponseInput{ + HttpMethod: aws.String("String"), // Required + ResourceId: aws.String("String"), // Required + RestApiId: aws.String("String"), // Required + StatusCode: aws.String("StatusCode"), // Required + } + resp, err := svc.DeleteIntegrationResponse(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAPIGateway_DeleteMethod() { + svc := apigateway.New(session.New()) + + params := &apigateway.DeleteMethodInput{ + HttpMethod: aws.String("String"), // Required + ResourceId: aws.String("String"), // Required + RestApiId: aws.String("String"), // Required + } + resp, err := svc.DeleteMethod(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAPIGateway_DeleteMethodResponse() { + svc := apigateway.New(session.New()) + + params := &apigateway.DeleteMethodResponseInput{ + HttpMethod: aws.String("String"), // Required + ResourceId: aws.String("String"), // Required + RestApiId: aws.String("String"), // Required + StatusCode: aws.String("StatusCode"), // Required + } + resp, err := svc.DeleteMethodResponse(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAPIGateway_DeleteModel() { + svc := apigateway.New(session.New()) + + params := &apigateway.DeleteModelInput{ + ModelName: aws.String("String"), // Required + RestApiId: aws.String("String"), // Required + } + resp, err := svc.DeleteModel(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAPIGateway_DeleteResource() { + svc := apigateway.New(session.New()) + + params := &apigateway.DeleteResourceInput{ + ResourceId: aws.String("String"), // Required + RestApiId: aws.String("String"), // Required + } + resp, err := svc.DeleteResource(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAPIGateway_DeleteRestApi() { + svc := apigateway.New(session.New()) + + params := &apigateway.DeleteRestApiInput{ + RestApiId: aws.String("String"), // Required + } + resp, err := svc.DeleteRestApi(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAPIGateway_DeleteStage() { + svc := apigateway.New(session.New()) + + params := &apigateway.DeleteStageInput{ + RestApiId: aws.String("String"), // Required + StageName: aws.String("String"), // Required + } + resp, err := svc.DeleteStage(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAPIGateway_FlushStageAuthorizersCache() { + svc := apigateway.New(session.New()) + + params := &apigateway.FlushStageAuthorizersCacheInput{ + RestApiId: aws.String("String"), // Required + StageName: aws.String("String"), // Required + } + resp, err := svc.FlushStageAuthorizersCache(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAPIGateway_FlushStageCache() { + svc := apigateway.New(session.New()) + + params := &apigateway.FlushStageCacheInput{ + RestApiId: aws.String("String"), // Required + StageName: aws.String("String"), // Required + } + resp, err := svc.FlushStageCache(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAPIGateway_GenerateClientCertificate() { + svc := apigateway.New(session.New()) + + params := &apigateway.GenerateClientCertificateInput{ + Description: aws.String("String"), + } + resp, err := svc.GenerateClientCertificate(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAPIGateway_GetAccount() { + svc := apigateway.New(session.New()) + + var params *apigateway.GetAccountInput + resp, err := svc.GetAccount(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAPIGateway_GetApiKey() { + svc := apigateway.New(session.New()) + + params := &apigateway.GetApiKeyInput{ + ApiKey: aws.String("String"), // Required + } + resp, err := svc.GetApiKey(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAPIGateway_GetApiKeys() { + svc := apigateway.New(session.New()) + + params := &apigateway.GetApiKeysInput{ + Limit: aws.Int64(1), + Position: aws.String("String"), + } + resp, err := svc.GetApiKeys(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAPIGateway_GetAuthorizer() { + svc := apigateway.New(session.New()) + + params := &apigateway.GetAuthorizerInput{ + AuthorizerId: aws.String("String"), // Required + RestApiId: aws.String("String"), // Required + } + resp, err := svc.GetAuthorizer(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAPIGateway_GetAuthorizers() { + svc := apigateway.New(session.New()) + + params := &apigateway.GetAuthorizersInput{ + RestApiId: aws.String("String"), // Required + Limit: aws.Int64(1), + Position: aws.String("String"), + } + resp, err := svc.GetAuthorizers(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAPIGateway_GetBasePathMapping() { + svc := apigateway.New(session.New()) + + params := &apigateway.GetBasePathMappingInput{ + BasePath: aws.String("String"), // Required + DomainName: aws.String("String"), // Required + } + resp, err := svc.GetBasePathMapping(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAPIGateway_GetBasePathMappings() { + svc := apigateway.New(session.New()) + + params := &apigateway.GetBasePathMappingsInput{ + DomainName: aws.String("String"), // Required + Limit: aws.Int64(1), + Position: aws.String("String"), + } + resp, err := svc.GetBasePathMappings(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAPIGateway_GetClientCertificate() { + svc := apigateway.New(session.New()) + + params := &apigateway.GetClientCertificateInput{ + ClientCertificateId: aws.String("String"), // Required + } + resp, err := svc.GetClientCertificate(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAPIGateway_GetClientCertificates() { + svc := apigateway.New(session.New()) + + params := &apigateway.GetClientCertificatesInput{ + Limit: aws.Int64(1), + Position: aws.String("String"), + } + resp, err := svc.GetClientCertificates(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAPIGateway_GetDeployment() { + svc := apigateway.New(session.New()) + + params := &apigateway.GetDeploymentInput{ + DeploymentId: aws.String("String"), // Required + RestApiId: aws.String("String"), // Required + } + resp, err := svc.GetDeployment(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAPIGateway_GetDeployments() { + svc := apigateway.New(session.New()) + + params := &apigateway.GetDeploymentsInput{ + RestApiId: aws.String("String"), // Required + Limit: aws.Int64(1), + Position: aws.String("String"), + } + resp, err := svc.GetDeployments(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAPIGateway_GetDomainName() { + svc := apigateway.New(session.New()) + + params := &apigateway.GetDomainNameInput{ + DomainName: aws.String("String"), // Required + } + resp, err := svc.GetDomainName(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAPIGateway_GetDomainNames() { + svc := apigateway.New(session.New()) + + params := &apigateway.GetDomainNamesInput{ + Limit: aws.Int64(1), + Position: aws.String("String"), + } + resp, err := svc.GetDomainNames(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAPIGateway_GetExport() { + svc := apigateway.New(session.New()) + + params := &apigateway.GetExportInput{ + ExportType: aws.String("String"), // Required + RestApiId: aws.String("String"), // Required + StageName: aws.String("String"), // Required + Accepts: aws.String("String"), + Parameters: map[string]*string{ + "Key": aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.GetExport(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAPIGateway_GetIntegration() { + svc := apigateway.New(session.New()) + + params := &apigateway.GetIntegrationInput{ + HttpMethod: aws.String("String"), // Required + ResourceId: aws.String("String"), // Required + RestApiId: aws.String("String"), // Required + } + resp, err := svc.GetIntegration(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAPIGateway_GetIntegrationResponse() { + svc := apigateway.New(session.New()) + + params := &apigateway.GetIntegrationResponseInput{ + HttpMethod: aws.String("String"), // Required + ResourceId: aws.String("String"), // Required + RestApiId: aws.String("String"), // Required + StatusCode: aws.String("StatusCode"), // Required + } + resp, err := svc.GetIntegrationResponse(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAPIGateway_GetMethod() { + svc := apigateway.New(session.New()) + + params := &apigateway.GetMethodInput{ + HttpMethod: aws.String("String"), // Required + ResourceId: aws.String("String"), // Required + RestApiId: aws.String("String"), // Required + } + resp, err := svc.GetMethod(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAPIGateway_GetMethodResponse() { + svc := apigateway.New(session.New()) + + params := &apigateway.GetMethodResponseInput{ + HttpMethod: aws.String("String"), // Required + ResourceId: aws.String("String"), // Required + RestApiId: aws.String("String"), // Required + StatusCode: aws.String("StatusCode"), // Required + } + resp, err := svc.GetMethodResponse(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAPIGateway_GetModel() { + svc := apigateway.New(session.New()) + + params := &apigateway.GetModelInput{ + ModelName: aws.String("String"), // Required + RestApiId: aws.String("String"), // Required + Flatten: aws.Bool(true), + } + resp, err := svc.GetModel(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAPIGateway_GetModelTemplate() { + svc := apigateway.New(session.New()) + + params := &apigateway.GetModelTemplateInput{ + ModelName: aws.String("String"), // Required + RestApiId: aws.String("String"), // Required + } + resp, err := svc.GetModelTemplate(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAPIGateway_GetModels() { + svc := apigateway.New(session.New()) + + params := &apigateway.GetModelsInput{ + RestApiId: aws.String("String"), // Required + Limit: aws.Int64(1), + Position: aws.String("String"), + } + resp, err := svc.GetModels(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAPIGateway_GetResource() { + svc := apigateway.New(session.New()) + + params := &apigateway.GetResourceInput{ + ResourceId: aws.String("String"), // Required + RestApiId: aws.String("String"), // Required + } + resp, err := svc.GetResource(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAPIGateway_GetResources() { + svc := apigateway.New(session.New()) + + params := &apigateway.GetResourcesInput{ + RestApiId: aws.String("String"), // Required + Limit: aws.Int64(1), + Position: aws.String("String"), + } + resp, err := svc.GetResources(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAPIGateway_GetRestApi() { + svc := apigateway.New(session.New()) + + params := &apigateway.GetRestApiInput{ + RestApiId: aws.String("String"), // Required + } + resp, err := svc.GetRestApi(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAPIGateway_GetRestApis() { + svc := apigateway.New(session.New()) + + params := &apigateway.GetRestApisInput{ + Limit: aws.Int64(1), + Position: aws.String("String"), + } + resp, err := svc.GetRestApis(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAPIGateway_GetSdk() { + svc := apigateway.New(session.New()) + + params := &apigateway.GetSdkInput{ + RestApiId: aws.String("String"), // Required + SdkType: aws.String("String"), // Required + StageName: aws.String("String"), // Required + Parameters: map[string]*string{ + "Key": aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.GetSdk(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAPIGateway_GetStage() { + svc := apigateway.New(session.New()) + + params := &apigateway.GetStageInput{ + RestApiId: aws.String("String"), // Required + StageName: aws.String("String"), // Required + } + resp, err := svc.GetStage(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAPIGateway_GetStages() { + svc := apigateway.New(session.New()) + + params := &apigateway.GetStagesInput{ + RestApiId: aws.String("String"), // Required + DeploymentId: aws.String("String"), + } + resp, err := svc.GetStages(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAPIGateway_ImportRestApi() { + svc := apigateway.New(session.New()) + + params := &apigateway.ImportRestApiInput{ + Body: []byte("PAYLOAD"), // Required + FailOnWarnings: aws.Bool(true), + Parameters: map[string]*string{ + "Key": aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.ImportRestApi(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAPIGateway_PutIntegration() { + svc := apigateway.New(session.New()) + + params := &apigateway.PutIntegrationInput{ + HttpMethod: aws.String("String"), // Required + ResourceId: aws.String("String"), // Required + RestApiId: aws.String("String"), // Required + Type: aws.String("IntegrationType"), // Required + CacheKeyParameters: []*string{ + aws.String("String"), // Required + // More values... + }, + CacheNamespace: aws.String("String"), + Credentials: aws.String("String"), + IntegrationHttpMethod: aws.String("String"), + PassthroughBehavior: aws.String("String"), + RequestParameters: map[string]*string{ + "Key": aws.String("String"), // Required + // More values... + }, + RequestTemplates: map[string]*string{ + "Key": aws.String("String"), // Required + // More values... + }, + Uri: aws.String("String"), + } + resp, err := svc.PutIntegration(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAPIGateway_PutIntegrationResponse() { + svc := apigateway.New(session.New()) + + params := &apigateway.PutIntegrationResponseInput{ + HttpMethod: aws.String("String"), // Required + ResourceId: aws.String("String"), // Required + RestApiId: aws.String("String"), // Required + StatusCode: aws.String("StatusCode"), // Required + ResponseParameters: map[string]*string{ + "Key": aws.String("String"), // Required + // More values... + }, + ResponseTemplates: map[string]*string{ + "Key": aws.String("String"), // Required + // More values... + }, + SelectionPattern: aws.String("String"), + } + resp, err := svc.PutIntegrationResponse(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAPIGateway_PutMethod() { + svc := apigateway.New(session.New()) + + params := &apigateway.PutMethodInput{ + AuthorizationType: aws.String("String"), // Required + HttpMethod: aws.String("String"), // Required + ResourceId: aws.String("String"), // Required + RestApiId: aws.String("String"), // Required + ApiKeyRequired: aws.Bool(true), + AuthorizerId: aws.String("String"), + RequestModels: map[string]*string{ + "Key": aws.String("String"), // Required + // More values... + }, + RequestParameters: map[string]*bool{ + "Key": aws.Bool(true), // Required + // More values... + }, + } + resp, err := svc.PutMethod(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAPIGateway_PutMethodResponse() { + svc := apigateway.New(session.New()) + + params := &apigateway.PutMethodResponseInput{ + HttpMethod: aws.String("String"), // Required + ResourceId: aws.String("String"), // Required + RestApiId: aws.String("String"), // Required + StatusCode: aws.String("StatusCode"), // Required + ResponseModels: map[string]*string{ + "Key": aws.String("String"), // Required + // More values... + }, + ResponseParameters: map[string]*bool{ + "Key": aws.Bool(true), // Required + // More values... + }, + } + resp, err := svc.PutMethodResponse(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAPIGateway_PutRestApi() { + svc := apigateway.New(session.New()) + + params := &apigateway.PutRestApiInput{ + Body: []byte("PAYLOAD"), // Required + RestApiId: aws.String("String"), // Required + FailOnWarnings: aws.Bool(true), + Mode: aws.String("PutMode"), + Parameters: map[string]*string{ + "Key": aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.PutRestApi(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAPIGateway_TestInvokeAuthorizer() { + svc := apigateway.New(session.New()) + + params := &apigateway.TestInvokeAuthorizerInput{ + AuthorizerId: aws.String("String"), // Required + RestApiId: aws.String("String"), // Required + AdditionalContext: map[string]*string{ + "Key": aws.String("String"), // Required + // More values... + }, + Body: aws.String("String"), + Headers: map[string]*string{ + "Key": aws.String("String"), // Required + // More values... + }, + PathWithQueryString: aws.String("String"), + StageVariables: map[string]*string{ + "Key": aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.TestInvokeAuthorizer(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAPIGateway_TestInvokeMethod() { + svc := apigateway.New(session.New()) + + params := &apigateway.TestInvokeMethodInput{ + HttpMethod: aws.String("String"), // Required + ResourceId: aws.String("String"), // Required + RestApiId: aws.String("String"), // Required + Body: aws.String("String"), + ClientCertificateId: aws.String("String"), + Headers: map[string]*string{ + "Key": aws.String("String"), // Required + // More values... + }, + PathWithQueryString: aws.String("String"), + StageVariables: map[string]*string{ + "Key": aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.TestInvokeMethod(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAPIGateway_UpdateAccount() { + svc := apigateway.New(session.New()) + + params := &apigateway.UpdateAccountInput{ + PatchOperations: []*apigateway.PatchOperation{ + { // Required + From: aws.String("String"), + Op: aws.String("op"), + Path: aws.String("String"), + Value: aws.String("String"), + }, + // More values... + }, + } + resp, err := svc.UpdateAccount(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAPIGateway_UpdateApiKey() { + svc := apigateway.New(session.New()) + + params := &apigateway.UpdateApiKeyInput{ + ApiKey: aws.String("String"), // Required + PatchOperations: []*apigateway.PatchOperation{ + { // Required + From: aws.String("String"), + Op: aws.String("op"), + Path: aws.String("String"), + Value: aws.String("String"), + }, + // More values... + }, + } + resp, err := svc.UpdateApiKey(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAPIGateway_UpdateAuthorizer() { + svc := apigateway.New(session.New()) + + params := &apigateway.UpdateAuthorizerInput{ + AuthorizerId: aws.String("String"), // Required + RestApiId: aws.String("String"), // Required + PatchOperations: []*apigateway.PatchOperation{ + { // Required + From: aws.String("String"), + Op: aws.String("op"), + Path: aws.String("String"), + Value: aws.String("String"), + }, + // More values... + }, + } + resp, err := svc.UpdateAuthorizer(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAPIGateway_UpdateBasePathMapping() { + svc := apigateway.New(session.New()) + + params := &apigateway.UpdateBasePathMappingInput{ + BasePath: aws.String("String"), // Required + DomainName: aws.String("String"), // Required + PatchOperations: []*apigateway.PatchOperation{ + { // Required + From: aws.String("String"), + Op: aws.String("op"), + Path: aws.String("String"), + Value: aws.String("String"), + }, + // More values... + }, + } + resp, err := svc.UpdateBasePathMapping(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAPIGateway_UpdateClientCertificate() { + svc := apigateway.New(session.New()) + + params := &apigateway.UpdateClientCertificateInput{ + ClientCertificateId: aws.String("String"), // Required + PatchOperations: []*apigateway.PatchOperation{ + { // Required + From: aws.String("String"), + Op: aws.String("op"), + Path: aws.String("String"), + Value: aws.String("String"), + }, + // More values... + }, + } + resp, err := svc.UpdateClientCertificate(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAPIGateway_UpdateDeployment() { + svc := apigateway.New(session.New()) + + params := &apigateway.UpdateDeploymentInput{ + DeploymentId: aws.String("String"), // Required + RestApiId: aws.String("String"), // Required + PatchOperations: []*apigateway.PatchOperation{ + { // Required + From: aws.String("String"), + Op: aws.String("op"), + Path: aws.String("String"), + Value: aws.String("String"), + }, + // More values... + }, + } + resp, err := svc.UpdateDeployment(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAPIGateway_UpdateDomainName() { + svc := apigateway.New(session.New()) + + params := &apigateway.UpdateDomainNameInput{ + DomainName: aws.String("String"), // Required + PatchOperations: []*apigateway.PatchOperation{ + { // Required + From: aws.String("String"), + Op: aws.String("op"), + Path: aws.String("String"), + Value: aws.String("String"), + }, + // More values... + }, + } + resp, err := svc.UpdateDomainName(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAPIGateway_UpdateIntegration() { + svc := apigateway.New(session.New()) + + params := &apigateway.UpdateIntegrationInput{ + HttpMethod: aws.String("String"), // Required + ResourceId: aws.String("String"), // Required + RestApiId: aws.String("String"), // Required + PatchOperations: []*apigateway.PatchOperation{ + { // Required + From: aws.String("String"), + Op: aws.String("op"), + Path: aws.String("String"), + Value: aws.String("String"), + }, + // More values... + }, + } + resp, err := svc.UpdateIntegration(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAPIGateway_UpdateIntegrationResponse() { + svc := apigateway.New(session.New()) + + params := &apigateway.UpdateIntegrationResponseInput{ + HttpMethod: aws.String("String"), // Required + ResourceId: aws.String("String"), // Required + RestApiId: aws.String("String"), // Required + StatusCode: aws.String("StatusCode"), // Required + PatchOperations: []*apigateway.PatchOperation{ + { // Required + From: aws.String("String"), + Op: aws.String("op"), + Path: aws.String("String"), + Value: aws.String("String"), + }, + // More values... + }, + } + resp, err := svc.UpdateIntegrationResponse(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAPIGateway_UpdateMethod() { + svc := apigateway.New(session.New()) + + params := &apigateway.UpdateMethodInput{ + HttpMethod: aws.String("String"), // Required + ResourceId: aws.String("String"), // Required + RestApiId: aws.String("String"), // Required + PatchOperations: []*apigateway.PatchOperation{ + { // Required + From: aws.String("String"), + Op: aws.String("op"), + Path: aws.String("String"), + Value: aws.String("String"), + }, + // More values... + }, + } + resp, err := svc.UpdateMethod(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAPIGateway_UpdateMethodResponse() { + svc := apigateway.New(session.New()) + + params := &apigateway.UpdateMethodResponseInput{ + HttpMethod: aws.String("String"), // Required + ResourceId: aws.String("String"), // Required + RestApiId: aws.String("String"), // Required + StatusCode: aws.String("StatusCode"), // Required + PatchOperations: []*apigateway.PatchOperation{ + { // Required + From: aws.String("String"), + Op: aws.String("op"), + Path: aws.String("String"), + Value: aws.String("String"), + }, + // More values... + }, + } + resp, err := svc.UpdateMethodResponse(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAPIGateway_UpdateModel() { + svc := apigateway.New(session.New()) + + params := &apigateway.UpdateModelInput{ + ModelName: aws.String("String"), // Required + RestApiId: aws.String("String"), // Required + PatchOperations: []*apigateway.PatchOperation{ + { // Required + From: aws.String("String"), + Op: aws.String("op"), + Path: aws.String("String"), + Value: aws.String("String"), + }, + // More values... + }, + } + resp, err := svc.UpdateModel(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAPIGateway_UpdateResource() { + svc := apigateway.New(session.New()) + + params := &apigateway.UpdateResourceInput{ + ResourceId: aws.String("String"), // Required + RestApiId: aws.String("String"), // Required + PatchOperations: []*apigateway.PatchOperation{ + { // Required + From: aws.String("String"), + Op: aws.String("op"), + Path: aws.String("String"), + Value: aws.String("String"), + }, + // More values... + }, + } + resp, err := svc.UpdateResource(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAPIGateway_UpdateRestApi() { + svc := apigateway.New(session.New()) + + params := &apigateway.UpdateRestApiInput{ + RestApiId: aws.String("String"), // Required + PatchOperations: []*apigateway.PatchOperation{ + { // Required + From: aws.String("String"), + Op: aws.String("op"), + Path: aws.String("String"), + Value: aws.String("String"), + }, + // More values... + }, + } + resp, err := svc.UpdateRestApi(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAPIGateway_UpdateStage() { + svc := apigateway.New(session.New()) + + params := &apigateway.UpdateStageInput{ + RestApiId: aws.String("String"), // Required + StageName: aws.String("String"), // Required + PatchOperations: []*apigateway.PatchOperation{ + { // Required + From: aws.String("String"), + Op: aws.String("op"), + Path: aws.String("String"), + Value: aws.String("String"), + }, + // More values... + }, + } + resp, err := svc.UpdateStage(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/apigateway/service.go b/vendor/github.com/aws/aws-sdk-go/service/apigateway/service.go new file mode 100644 index 000000000..3372a2f74 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/apigateway/service.go @@ -0,0 +1,90 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package apigateway + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/private/protocol/restjson" +) + +// Amazon API Gateway helps developers deliver robust, secure and scalable mobile +// and web application backends. Amazon API Gateway allows developers to securely +// connect mobile and web applications to APIs that run on AWS Lambda, Amazon +// EC2, or other publicly addressable web services that are hosted outside of +// AWS. +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type APIGateway struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "apigateway" + +// New creates a new instance of the APIGateway client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a APIGateway client from just a session. +// svc := apigateway.New(mySession) +// +// // Create a APIGateway client with additional configuration +// svc := apigateway.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *APIGateway { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *APIGateway { + svc := &APIGateway{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2015-07-09", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(restjson.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restjson.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restjson.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restjson.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a APIGateway operation and runs any +// custom request initialization. +func (c *APIGateway) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/applicationautoscaling/api.go b/vendor/github.com/aws/aws-sdk-go/service/applicationautoscaling/api.go new file mode 100644 index 000000000..f2b3fe245 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/applicationautoscaling/api.go @@ -0,0 +1,1450 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package applicationautoscaling provides a client for Application Auto Scaling. +package applicationautoscaling + +import ( + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" +) + +const opDeleteScalingPolicy = "DeleteScalingPolicy" + +// DeleteScalingPolicyRequest generates a "aws/request.Request" representing the +// client's request for the DeleteScalingPolicy operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteScalingPolicy method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteScalingPolicyRequest method. +// req, resp := client.DeleteScalingPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ApplicationAutoScaling) DeleteScalingPolicyRequest(input *DeleteScalingPolicyInput) (req *request.Request, output *DeleteScalingPolicyOutput) { + op := &request.Operation{ + Name: opDeleteScalingPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteScalingPolicyInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteScalingPolicyOutput{} + req.Data = output + return +} + +// Deletes an Application Auto Scaling scaling policy that was previously created. +// If you are no longer using a scaling policy, you can delete it with this +// operation. +// +// Deleting a policy deletes the underlying alarm action, but does not delete +// the CloudWatch alarm, even if it no longer has an associated action. +// +// To create a new scaling policy or update an existing one, see PutScalingPolicy. +func (c *ApplicationAutoScaling) DeleteScalingPolicy(input *DeleteScalingPolicyInput) (*DeleteScalingPolicyOutput, error) { + req, out := c.DeleteScalingPolicyRequest(input) + err := req.Send() + return out, err +} + +const opDeregisterScalableTarget = "DeregisterScalableTarget" + +// DeregisterScalableTargetRequest generates a "aws/request.Request" representing the +// client's request for the DeregisterScalableTarget operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeregisterScalableTarget method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeregisterScalableTargetRequest method. +// req, resp := client.DeregisterScalableTargetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ApplicationAutoScaling) DeregisterScalableTargetRequest(input *DeregisterScalableTargetInput) (req *request.Request, output *DeregisterScalableTargetOutput) { + op := &request.Operation{ + Name: opDeregisterScalableTarget, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeregisterScalableTargetInput{} + } + + req = c.newRequest(op, input, output) + output = &DeregisterScalableTargetOutput{} + req.Data = output + return +} + +// Deregisters a scalable target that was previously registered. If you are +// no longer using a scalable target, you can delete it with this operation. +// When you deregister a scalable target, all of the scaling policies that are +// associated with that scalable target are deleted. +// +// To create a new scalable target or update an existing one, see RegisterScalableTarget. +func (c *ApplicationAutoScaling) DeregisterScalableTarget(input *DeregisterScalableTargetInput) (*DeregisterScalableTargetOutput, error) { + req, out := c.DeregisterScalableTargetRequest(input) + err := req.Send() + return out, err +} + +const opDescribeScalableTargets = "DescribeScalableTargets" + +// DescribeScalableTargetsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeScalableTargets operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeScalableTargets method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeScalableTargetsRequest method. +// req, resp := client.DescribeScalableTargetsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ApplicationAutoScaling) DescribeScalableTargetsRequest(input *DescribeScalableTargetsInput) (req *request.Request, output *DescribeScalableTargetsOutput) { + op := &request.Operation{ + Name: opDescribeScalableTargets, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeScalableTargetsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeScalableTargetsOutput{} + req.Data = output + return +} + +// Provides descriptive information for scalable targets with a specified service +// namespace. +// +// You can filter the results in a service namespace with the ResourceIds and +// ScalableDimension parameters. +// +// To create a new scalable target or update an existing one, see RegisterScalableTarget. +// If you are no longer using a scalable target, you can deregister it with +// DeregisterScalableTarget. +func (c *ApplicationAutoScaling) DescribeScalableTargets(input *DescribeScalableTargetsInput) (*DescribeScalableTargetsOutput, error) { + req, out := c.DescribeScalableTargetsRequest(input) + err := req.Send() + return out, err +} + +// DescribeScalableTargetsPages iterates over the pages of a DescribeScalableTargets operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeScalableTargets method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeScalableTargets operation. +// pageNum := 0 +// err := client.DescribeScalableTargetsPages(params, +// func(page *DescribeScalableTargetsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *ApplicationAutoScaling) DescribeScalableTargetsPages(input *DescribeScalableTargetsInput, fn func(p *DescribeScalableTargetsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeScalableTargetsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeScalableTargetsOutput), lastPage) + }) +} + +const opDescribeScalingActivities = "DescribeScalingActivities" + +// DescribeScalingActivitiesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeScalingActivities operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeScalingActivities method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeScalingActivitiesRequest method. +// req, resp := client.DescribeScalingActivitiesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ApplicationAutoScaling) DescribeScalingActivitiesRequest(input *DescribeScalingActivitiesInput) (req *request.Request, output *DescribeScalingActivitiesOutput) { + op := &request.Operation{ + Name: opDescribeScalingActivities, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeScalingActivitiesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeScalingActivitiesOutput{} + req.Data = output + return +} + +// Provides descriptive information for scaling activities with a specified +// service namespace. +// +// You can filter the results in a service namespace with the ResourceId and +// ScalableDimension parameters. +// +// Scaling activities are triggered by CloudWatch alarms that are associated +// with scaling policies. To view the existing scaling policies for a service +// namespace, see DescribeScalingPolicies. To create a new scaling policy or +// update an existing one, see PutScalingPolicy. +func (c *ApplicationAutoScaling) DescribeScalingActivities(input *DescribeScalingActivitiesInput) (*DescribeScalingActivitiesOutput, error) { + req, out := c.DescribeScalingActivitiesRequest(input) + err := req.Send() + return out, err +} + +// DescribeScalingActivitiesPages iterates over the pages of a DescribeScalingActivities operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeScalingActivities method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeScalingActivities operation. +// pageNum := 0 +// err := client.DescribeScalingActivitiesPages(params, +// func(page *DescribeScalingActivitiesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *ApplicationAutoScaling) DescribeScalingActivitiesPages(input *DescribeScalingActivitiesInput, fn func(p *DescribeScalingActivitiesOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeScalingActivitiesRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeScalingActivitiesOutput), lastPage) + }) +} + +const opDescribeScalingPolicies = "DescribeScalingPolicies" + +// DescribeScalingPoliciesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeScalingPolicies operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeScalingPolicies method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeScalingPoliciesRequest method. +// req, resp := client.DescribeScalingPoliciesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ApplicationAutoScaling) DescribeScalingPoliciesRequest(input *DescribeScalingPoliciesInput) (req *request.Request, output *DescribeScalingPoliciesOutput) { + op := &request.Operation{ + Name: opDescribeScalingPolicies, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeScalingPoliciesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeScalingPoliciesOutput{} + req.Data = output + return +} + +// Provides descriptive information for scaling policies with a specified service +// namespace. +// +// You can filter the results in a service namespace with the ResourceId, ScalableDimension, +// and PolicyNames parameters. +// +// To create a new scaling policy or update an existing one, see PutScalingPolicy. +// If you are no longer using a scaling policy, you can delete it with DeleteScalingPolicy. +func (c *ApplicationAutoScaling) DescribeScalingPolicies(input *DescribeScalingPoliciesInput) (*DescribeScalingPoliciesOutput, error) { + req, out := c.DescribeScalingPoliciesRequest(input) + err := req.Send() + return out, err +} + +// DescribeScalingPoliciesPages iterates over the pages of a DescribeScalingPolicies operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeScalingPolicies method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeScalingPolicies operation. +// pageNum := 0 +// err := client.DescribeScalingPoliciesPages(params, +// func(page *DescribeScalingPoliciesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *ApplicationAutoScaling) DescribeScalingPoliciesPages(input *DescribeScalingPoliciesInput, fn func(p *DescribeScalingPoliciesOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeScalingPoliciesRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeScalingPoliciesOutput), lastPage) + }) +} + +const opPutScalingPolicy = "PutScalingPolicy" + +// PutScalingPolicyRequest generates a "aws/request.Request" representing the +// client's request for the PutScalingPolicy operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutScalingPolicy method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutScalingPolicyRequest method. +// req, resp := client.PutScalingPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ApplicationAutoScaling) PutScalingPolicyRequest(input *PutScalingPolicyInput) (req *request.Request, output *PutScalingPolicyOutput) { + op := &request.Operation{ + Name: opPutScalingPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutScalingPolicyInput{} + } + + req = c.newRequest(op, input, output) + output = &PutScalingPolicyOutput{} + req.Data = output + return +} + +// Creates or updates a policy for an existing Application Auto Scaling scalable +// target. Each scalable target is identified by service namespace, a resource +// ID, and a scalable dimension, and a scaling policy applies to a scalable +// target that is identified by those three attributes. You cannot create a +// scaling policy without first registering a scalable target with RegisterScalableTarget. +// +// To update an existing policy, use the existing policy name and set the parameters +// you want to change. Any existing parameter not changed in an update to an +// existing policy is not changed in this update request. +// +// You can view the existing scaling policies for a service namespace with +// DescribeScalingPolicies. If you are no longer using a scaling policy, you +// can delete it with DeleteScalingPolicy. +func (c *ApplicationAutoScaling) PutScalingPolicy(input *PutScalingPolicyInput) (*PutScalingPolicyOutput, error) { + req, out := c.PutScalingPolicyRequest(input) + err := req.Send() + return out, err +} + +const opRegisterScalableTarget = "RegisterScalableTarget" + +// RegisterScalableTargetRequest generates a "aws/request.Request" representing the +// client's request for the RegisterScalableTarget operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the RegisterScalableTarget method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the RegisterScalableTargetRequest method. +// req, resp := client.RegisterScalableTargetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ApplicationAutoScaling) RegisterScalableTargetRequest(input *RegisterScalableTargetInput) (req *request.Request, output *RegisterScalableTargetOutput) { + op := &request.Operation{ + Name: opRegisterScalableTarget, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RegisterScalableTargetInput{} + } + + req = c.newRequest(op, input, output) + output = &RegisterScalableTargetOutput{} + req.Data = output + return +} + +// Registers or updates a scalable target. A scalable target is a resource that +// can be scaled up or down with Application Auto Scaling. After you have registered +// a scalable target, you can use this command to update the minimum and maximum +// values for your scalable dimension. +// +// At this time, Application Auto Scaling only supports scaling Amazon ECS +// services. +// +// After you register a scalable target with Application Auto Scaling, you +// can create and apply scaling policies to it with PutScalingPolicy. You can +// view the existing scaling policies for a service namespace with DescribeScalableTargets. +// If you are no longer using a scalable target, you can deregister it with +// DeregisterScalableTarget. +func (c *ApplicationAutoScaling) RegisterScalableTarget(input *RegisterScalableTargetInput) (*RegisterScalableTargetOutput, error) { + req, out := c.RegisterScalableTargetRequest(input) + err := req.Send() + return out, err +} + +// An object representing a CloudWatch alarm associated with a scaling policy. +type Alarm struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the alarm. + AlarmARN *string `type:"string" required:"true"` + + // The name of the alarm. + AlarmName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s Alarm) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Alarm) GoString() string { + return s.String() +} + +type DeleteScalingPolicyInput struct { + _ struct{} `type:"structure"` + + // The name of the scaling policy to delete. + PolicyName *string `min:"1" type:"string" required:"true"` + + // The unique identifier string for the resource associated with the scaling + // policy. For Amazon ECS services, this value is the resource type, followed + // by the cluster name and service name, such as service/default/sample-webapp. + ResourceId *string `min:"1" type:"string" required:"true"` + + // The scalable dimension associated with the scaling policy. The scalable dimension + // contains the service namespace, resource type, and scaling property, such + // as ecs:service:DesiredCount for the desired task count of an Amazon ECS service. + ScalableDimension *string `type:"string" required:"true" enum:"ScalableDimension"` + + // The namespace for the AWS service that the scaling policy is associated with. + // For more information, see AWS Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#genref-aws-service-namespaces) + // in the Amazon Web Services General Reference. + ServiceNamespace *string `type:"string" required:"true" enum:"ServiceNamespace"` +} + +// String returns the string representation +func (s DeleteScalingPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteScalingPolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteScalingPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteScalingPolicyInput"} + if s.PolicyName == nil { + invalidParams.Add(request.NewErrParamRequired("PolicyName")) + } + if s.PolicyName != nil && len(*s.PolicyName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("PolicyName", 1)) + } + if s.ResourceId == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceId")) + } + if s.ResourceId != nil && len(*s.ResourceId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceId", 1)) + } + if s.ScalableDimension == nil { + invalidParams.Add(request.NewErrParamRequired("ScalableDimension")) + } + if s.ServiceNamespace == nil { + invalidParams.Add(request.NewErrParamRequired("ServiceNamespace")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteScalingPolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteScalingPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteScalingPolicyOutput) GoString() string { + return s.String() +} + +type DeregisterScalableTargetInput struct { + _ struct{} `type:"structure"` + + // The unique identifier string for the resource associated with the scalable + // target. For Amazon ECS services, this value is the resource type, followed + // by the cluster name and service name, such as service/default/sample-webapp. + ResourceId *string `min:"1" type:"string" required:"true"` + + // The scalable dimension associated with the scalable target. The scalable + // dimension contains the service namespace, resource type, and scaling property, + // such as ecs:service:DesiredCount for the desired task count of an Amazon + // ECS service. + ScalableDimension *string `type:"string" required:"true" enum:"ScalableDimension"` + + // The namespace for the AWS service that the scalable target is associated + // with. For more information, see AWS Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#genref-aws-service-namespaces) + // in the Amazon Web Services General Reference. + ServiceNamespace *string `type:"string" required:"true" enum:"ServiceNamespace"` +} + +// String returns the string representation +func (s DeregisterScalableTargetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeregisterScalableTargetInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeregisterScalableTargetInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeregisterScalableTargetInput"} + if s.ResourceId == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceId")) + } + if s.ResourceId != nil && len(*s.ResourceId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceId", 1)) + } + if s.ScalableDimension == nil { + invalidParams.Add(request.NewErrParamRequired("ScalableDimension")) + } + if s.ServiceNamespace == nil { + invalidParams.Add(request.NewErrParamRequired("ServiceNamespace")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeregisterScalableTargetOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeregisterScalableTargetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeregisterScalableTargetOutput) GoString() string { + return s.String() +} + +type DescribeScalableTargetsInput struct { + _ struct{} `type:"structure"` + + // The maximum number of scalable target results returned by DescribeScalableTargets + // in paginated output. When this parameter is used, DescribeScalableTargets + // returns up to MaxResults results in a single page along with a NextToken + // response element. The remaining results of the initial request can be seen + // by sending another DescribeScalableTargets request with the returned NextToken + // value. This value can be between 1 and 50. If this parameter is not used, + // then DescribeScalableTargets returns up to 50 results and a NextToken value, + // if applicable. + MaxResults *int64 `type:"integer"` + + // The NextToken value returned from a previous paginated DescribeScalableTargets + // request. Pagination continues from the end of the previous results that returned + // the NextToken value. This value is null when there are no more results to + // return. + NextToken *string `type:"string"` + + // The unique identifier string for the resource associated with the scalable + // target. For Amazon ECS services, this value is the resource type, followed + // by the cluster name and service name, such as service/default/sample-webapp. + // If you specify a scalable dimension, you must also specify a resource ID. + ResourceIds []*string `type:"list"` + + // The scalable dimension associated with the scalable target. The scalable + // dimension contains the service namespace, resource type, and scaling property, + // such as ecs:service:DesiredCount for the desired task count of an Amazon + // ECS service. If you specify a scalable dimension, you must also specify a + // resource ID. + ScalableDimension *string `type:"string" enum:"ScalableDimension"` + + // The namespace for the AWS service that the scalable target is associated + // with. For more information, see AWS Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#genref-aws-service-namespaces) + // in the Amazon Web Services General Reference. + ServiceNamespace *string `type:"string" required:"true" enum:"ServiceNamespace"` +} + +// String returns the string representation +func (s DescribeScalableTargetsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeScalableTargetsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeScalableTargetsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeScalableTargetsInput"} + if s.ServiceNamespace == nil { + invalidParams.Add(request.NewErrParamRequired("ServiceNamespace")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DescribeScalableTargetsOutput struct { + _ struct{} `type:"structure"` + + // The NextToken value to include in a future DescribeScalableTargets request. + // When the results of a DescribeScalableTargets request exceed MaxResults, + // this value can be used to retrieve the next page of results. This value is + // null when there are no more results to return. + NextToken *string `type:"string"` + + // The list of scalable targets that matches the request parameters. + ScalableTargets []*ScalableTarget `type:"list"` +} + +// String returns the string representation +func (s DescribeScalableTargetsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeScalableTargetsOutput) GoString() string { + return s.String() +} + +type DescribeScalingActivitiesInput struct { + _ struct{} `type:"structure"` + + // The maximum number of scaling activity results returned by DescribeScalingActivities + // in paginated output. When this parameter is used, DescribeScalingActivities + // returns up to MaxResults results in a single page along with a NextToken + // response element. The remaining results of the initial request can be seen + // by sending another DescribeScalingActivities request with the returned NextToken + // value. This value can be between 1 and 50. If this parameter is not used, + // then DescribeScalingActivities returns up to 50 results and a NextToken value, + // if applicable. + MaxResults *int64 `type:"integer"` + + // The NextToken value returned from a previous paginated DescribeScalingActivities + // request. Pagination continues from the end of the previous results that returned + // the NextToken value. This value is null when there are no more results to + // return. + NextToken *string `type:"string"` + + // The unique identifier string for the resource associated with the scaling + // activity. For Amazon ECS services, this value is the resource type, followed + // by the cluster name and service name, such as service/default/sample-webapp. + // If you specify a scalable dimension, you must also specify a resource ID. + ResourceId *string `min:"1" type:"string"` + + // The scalable dimension associated with the scaling activity. The scalable + // dimension contains the service namespace, resource type, and scaling property, + // such as ecs:service:DesiredCount for the desired task count of an Amazon + // ECS service. If you specify a scalable dimension, you must also specify a + // resource ID. + ScalableDimension *string `type:"string" enum:"ScalableDimension"` + + // The namespace for the AWS service that the scaling activity is associated + // with. For more information, see AWS Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#genref-aws-service-namespaces) + // in the Amazon Web Services General Reference. + ServiceNamespace *string `type:"string" required:"true" enum:"ServiceNamespace"` +} + +// String returns the string representation +func (s DescribeScalingActivitiesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeScalingActivitiesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeScalingActivitiesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeScalingActivitiesInput"} + if s.ResourceId != nil && len(*s.ResourceId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceId", 1)) + } + if s.ServiceNamespace == nil { + invalidParams.Add(request.NewErrParamRequired("ServiceNamespace")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DescribeScalingActivitiesOutput struct { + _ struct{} `type:"structure"` + + // The NextToken value to include in a future DescribeScalingActivities request. + // When the results of a DescribeScalingActivities request exceed MaxResults, + // this value can be used to retrieve the next page of results. This value is + // null when there are no more results to return. + NextToken *string `type:"string"` + + // A list of scaling activity objects. + ScalingActivities []*ScalingActivity `type:"list"` +} + +// String returns the string representation +func (s DescribeScalingActivitiesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeScalingActivitiesOutput) GoString() string { + return s.String() +} + +type DescribeScalingPoliciesInput struct { + _ struct{} `type:"structure"` + + // The maximum number of scaling policy results returned by DescribeScalingPolicies + // in paginated output. When this parameter is used, DescribeScalingPolicies + // returns up to MaxResults results in a single page along with a NextToken + // response element. The remaining results of the initial request can be seen + // by sending another DescribeScalingPolicies request with the returned NextToken + // value. This value can be between 1 and 50. If this parameter is not used, + // then DescribeScalingPolicies returns up to 50 results and a NextToken value, + // if applicable. + MaxResults *int64 `type:"integer"` + + // The NextToken value returned from a previous paginated DescribeScalingPolicies + // request. Pagination continues from the end of the previous results that returned + // the NextToken value. This value is null when there are no more results to + // return. + NextToken *string `type:"string"` + + // The names of the scaling policies to describe. + PolicyNames []*string `type:"list"` + + // The unique resource identifier string of the scalable target that the scaling + // policy is associated with. For Amazon ECS services, this value is the resource + // type, followed by the cluster name and service name, such as service/default/sample-webapp. + // If you specify a scalable dimension, you must also specify a resource ID. + ResourceId *string `min:"1" type:"string"` + + // The scalable dimension of the scalable target that the scaling policy is + // associated with. The scalable dimension contains the service namespace, resource + // type, and scaling property, such as ecs:service:DesiredCount for the desired + // task count of an Amazon ECS service. If you specify a scalable dimension, + // you must also specify a resource ID. + ScalableDimension *string `type:"string" enum:"ScalableDimension"` + + // The AWS service namespace of the scalable target that the scaling policy + // is associated with. For more information, see AWS Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#genref-aws-service-namespaces) + // in the Amazon Web Services General Reference. + ServiceNamespace *string `type:"string" required:"true" enum:"ServiceNamespace"` +} + +// String returns the string representation +func (s DescribeScalingPoliciesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeScalingPoliciesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeScalingPoliciesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeScalingPoliciesInput"} + if s.ResourceId != nil && len(*s.ResourceId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceId", 1)) + } + if s.ServiceNamespace == nil { + invalidParams.Add(request.NewErrParamRequired("ServiceNamespace")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DescribeScalingPoliciesOutput struct { + _ struct{} `type:"structure"` + + // The NextToken value to include in a future DescribeScalingPolicies request. + // When the results of a DescribeScalingPolicies request exceed MaxResults, + // this value can be used to retrieve the next page of results. This value is + // null when there are no more results to return. + NextToken *string `type:"string"` + + // A list of scaling policy objects. + ScalingPolicies []*ScalingPolicy `type:"list"` +} + +// String returns the string representation +func (s DescribeScalingPoliciesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeScalingPoliciesOutput) GoString() string { + return s.String() +} + +type PutScalingPolicyInput struct { + _ struct{} `type:"structure"` + + // The name of the scaling policy. + PolicyName *string `min:"1" type:"string" required:"true"` + + // The policy type. This parameter is required if you are creating a new policy. + PolicyType *string `type:"string" enum:"PolicyType"` + + // The unique resource identifier string for the scalable target that this scaling + // policy applies to. For Amazon ECS services, this value is the resource type, + // followed by the cluster name and service name, such as service/default/sample-webapp. + ResourceId *string `min:"1" type:"string" required:"true"` + + // The scalable dimension of the scalable target that this scaling policy applies + // to. The scalable dimension contains the service namespace, resource type, + // and scaling property, such as ecs:service:DesiredCount for the desired task + // count of an Amazon ECS service. + ScalableDimension *string `type:"string" required:"true" enum:"ScalableDimension"` + + // The AWS service namespace of the scalable target that this scaling policy + // applies to. For more information, see AWS Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#genref-aws-service-namespaces) + // in the Amazon Web Services General Reference. + ServiceNamespace *string `type:"string" required:"true" enum:"ServiceNamespace"` + + // The configuration for the step scaling policy. This parameter is required + // if you are creating a new policy. For more information, see StepScalingPolicyConfiguration + // and StepAdjustment. + StepScalingPolicyConfiguration *StepScalingPolicyConfiguration `type:"structure"` +} + +// String returns the string representation +func (s PutScalingPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutScalingPolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutScalingPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutScalingPolicyInput"} + if s.PolicyName == nil { + invalidParams.Add(request.NewErrParamRequired("PolicyName")) + } + if s.PolicyName != nil && len(*s.PolicyName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("PolicyName", 1)) + } + if s.ResourceId == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceId")) + } + if s.ResourceId != nil && len(*s.ResourceId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceId", 1)) + } + if s.ScalableDimension == nil { + invalidParams.Add(request.NewErrParamRequired("ScalableDimension")) + } + if s.ServiceNamespace == nil { + invalidParams.Add(request.NewErrParamRequired("ServiceNamespace")) + } + if s.StepScalingPolicyConfiguration != nil { + if err := s.StepScalingPolicyConfiguration.Validate(); err != nil { + invalidParams.AddNested("StepScalingPolicyConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type PutScalingPolicyOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the resulting scaling policy. + PolicyARN *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s PutScalingPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutScalingPolicyOutput) GoString() string { + return s.String() +} + +type RegisterScalableTargetInput struct { + _ struct{} `type:"structure"` + + // The maximum value for this scalable target to scale out to in response to + // scaling activities. This parameter is required if you are registering a new + // scalable target, and it is optional if you are updating an existing one. + MaxCapacity *int64 `type:"integer"` + + // The minimum value for this scalable target to scale in to in response to + // scaling activities. This parameter is required if you are registering a new + // scalable target, and it is optional if you are updating an existing one. + MinCapacity *int64 `type:"integer"` + + // The unique identifier string for the resource to associate with the scalable + // target. For Amazon ECS services, this value is the resource type, followed + // by the cluster name and service name, such as service/default/sample-webapp. + ResourceId *string `min:"1" type:"string" required:"true"` + + // The ARN of the IAM role that allows Application Auto Scaling to modify your + // scalable target on your behalf. This parameter is required if you are registering + // a new scalable target, and it is optional if you are updating an existing + // one. + RoleARN *string `min:"1" type:"string"` + + // The scalable dimension associated with the scalable target. The scalable + // dimension contains the service namespace, resource type, and scaling property, + // such as ecs:service:DesiredCount for the desired task count of an Amazon + // ECS service. + ScalableDimension *string `type:"string" required:"true" enum:"ScalableDimension"` + + // The namespace for the AWS service that the scalable target is associated + // with. For Amazon ECS services, the namespace value is ecs. For more information, + // see AWS Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#genref-aws-service-namespaces) + // in the Amazon Web Services General Reference. + ServiceNamespace *string `type:"string" required:"true" enum:"ServiceNamespace"` +} + +// String returns the string representation +func (s RegisterScalableTargetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RegisterScalableTargetInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RegisterScalableTargetInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RegisterScalableTargetInput"} + if s.ResourceId == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceId")) + } + if s.ResourceId != nil && len(*s.ResourceId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceId", 1)) + } + if s.RoleARN != nil && len(*s.RoleARN) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RoleARN", 1)) + } + if s.ScalableDimension == nil { + invalidParams.Add(request.NewErrParamRequired("ScalableDimension")) + } + if s.ServiceNamespace == nil { + invalidParams.Add(request.NewErrParamRequired("ServiceNamespace")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type RegisterScalableTargetOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s RegisterScalableTargetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RegisterScalableTargetOutput) GoString() string { + return s.String() +} + +// An object representing a scalable target. +type ScalableTarget struct { + _ struct{} `type:"structure"` + + // The Unix timestamp for when the scalable target was created. + CreationTime *time.Time `type:"timestamp" timestampFormat:"unix" required:"true"` + + // The maximum value for this scalable target to scale out to in response to + // scaling activities. + MaxCapacity *int64 `type:"integer" required:"true"` + + // The minimum value for this scalable target to scale in to in response to + // scaling activities. + MinCapacity *int64 `type:"integer" required:"true"` + + // The unique identifier string for the resource associated with the scalable + // target. For Amazon ECS services, this value is the resource type, followed + // by the cluster name and service name, such as service/default/sample-webapp. + ResourceId *string `min:"1" type:"string" required:"true"` + + // The ARN of the IAM role that allows Application Auto Scaling to modify your + // scalable target on your behalf. + RoleARN *string `min:"1" type:"string" required:"true"` + + // The scalable dimension associated with the scalable target. The scalable + // dimension contains the service namespace, resource type, and scaling property, + // such as ecs:service:DesiredCount for the desired task count of an Amazon + // ECS service. + ScalableDimension *string `type:"string" required:"true" enum:"ScalableDimension"` + + // The namespace for the AWS service that the scalable target is associated + // with. For more information, see AWS Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#genref-aws-service-namespaces) + // in the Amazon Web Services General Reference. + ServiceNamespace *string `type:"string" required:"true" enum:"ServiceNamespace"` +} + +// String returns the string representation +func (s ScalableTarget) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ScalableTarget) GoString() string { + return s.String() +} + +// An object representing a scaling activity. +type ScalingActivity struct { + _ struct{} `type:"structure"` + + // The unique identifier string for the scaling activity. + ActivityId *string `type:"string" required:"true"` + + // A simple description of what caused the scaling activity to happen. + Cause *string `type:"string" required:"true"` + + // A simple description of what action the scaling activity intends to accomplish. + Description *string `type:"string" required:"true"` + + // The details about the scaling activity. + Details *string `type:"string"` + + // The Unix timestamp for when the scaling activity ended. + EndTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The unique identifier string for the resource associated with the scaling + // activity. For Amazon ECS services, this value is the resource type, followed + // by the cluster name and service name, such as service/default/sample-webapp. + ResourceId *string `min:"1" type:"string" required:"true"` + + // The scalable dimension associated with the scaling activity. The scalable + // dimension contains the service namespace, resource type, and scaling property, + // such as ecs:service:DesiredCount for the desired task count of an Amazon + // ECS service. + ScalableDimension *string `type:"string" required:"true" enum:"ScalableDimension"` + + // The namespace for the AWS service that the scaling activity is associated + // with. For more information, see AWS Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#genref-aws-service-namespaces) + // in the Amazon Web Services General Reference. + ServiceNamespace *string `type:"string" required:"true" enum:"ServiceNamespace"` + + // The Unix timestamp for when the scaling activity began. + StartTime *time.Time `type:"timestamp" timestampFormat:"unix" required:"true"` + + // Indicates the status of the scaling activity. + StatusCode *string `type:"string" required:"true" enum:"ScalingActivityStatusCode"` + + // A simple message about the current status of the scaling activity. + StatusMessage *string `type:"string"` +} + +// String returns the string representation +func (s ScalingActivity) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ScalingActivity) GoString() string { + return s.String() +} + +// An object representing a scaling policy. +type ScalingPolicy struct { + _ struct{} `type:"structure"` + + // The CloudWatch alarms that are associated with the scaling policy. + Alarms []*Alarm `type:"list"` + + // The Unix timestamp for when the scaling policy was created. + CreationTime *time.Time `type:"timestamp" timestampFormat:"unix" required:"true"` + + // The Amazon Resource Name (ARN) of the scaling policy. + PolicyARN *string `min:"1" type:"string" required:"true"` + + // The name of the scaling policy. + PolicyName *string `min:"1" type:"string" required:"true"` + + // The scaling policy type. + PolicyType *string `type:"string" required:"true" enum:"PolicyType"` + + // The unique identifier string for the resource associated with the scaling + // policy. For Amazon ECS services, this value is the resource type, followed + // by the cluster name and service name, such as service/default/sample-webapp. + ResourceId *string `min:"1" type:"string" required:"true"` + + // The scalable dimension associated with the scaling policy. The scalable dimension + // contains the service namespace, resource type, and scaling property, such + // as ecs:service:DesiredCount for the desired task count of an Amazon ECS service. + ScalableDimension *string `type:"string" required:"true" enum:"ScalableDimension"` + + // The namespace for the AWS service that the scaling policy is associated with. + // For more information, see AWS Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#genref-aws-service-namespaces) + // in the Amazon Web Services General Reference. + ServiceNamespace *string `type:"string" required:"true" enum:"ServiceNamespace"` + + // The configuration for the step scaling policy. + StepScalingPolicyConfiguration *StepScalingPolicyConfiguration `type:"structure"` +} + +// String returns the string representation +func (s ScalingPolicy) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ScalingPolicy) GoString() string { + return s.String() +} + +// An object representing a step adjustment for a StepScalingPolicyConfiguration. +// Describes an adjustment based on the difference between the value of the +// aggregated CloudWatch metric and the breach threshold that you've defined +// for the alarm. +// +// For the following examples, suppose that you have an alarm with a breach +// threshold of 50: +// +// If you want the adjustment to be triggered when the metric is greater +// than or equal to 50 and less than 60, specify a lower bound of 0 and an upper +// bound of 10. +// +// If you want the adjustment to be triggered when the metric is greater +// than 40 and less than or equal to 50, specify a lower bound of -10 and an +// upper bound of 0. +// +// There are a few rules for the step adjustments for your step policy: +// +// The ranges of your step adjustments can't overlap or have a gap. +// +// At most one step adjustment can have a null lower bound. If one step adjustment +// has a negative lower bound, then there must be a step adjustment with a null +// lower bound. +// +// At most one step adjustment can have a null upper bound. If one step adjustment +// has a positive upper bound, then there must be a step adjustment with a null +// upper bound. +// +// The upper and lower bound can't be null in the same step adjustment. +type StepAdjustment struct { + _ struct{} `type:"structure"` + + // The lower bound for the difference between the alarm threshold and the CloudWatch + // metric. If the metric value is above the breach threshold, the lower bound + // is inclusive (the metric must be greater than or equal to the threshold plus + // the lower bound). Otherwise, it is exclusive (the metric must be greater + // than the threshold plus the lower bound). A null value indicates negative + // infinity. + MetricIntervalLowerBound *float64 `type:"double"` + + // The upper bound for the difference between the alarm threshold and the CloudWatch + // metric. If the metric value is above the breach threshold, the upper bound + // is exclusive (the metric must be less than the threshold plus the upper bound). + // Otherwise, it is inclusive (the metric must be less than or equal to the + // threshold plus the upper bound). A null value indicates positive infinity. + // + // The upper bound must be greater than the lower bound. + MetricIntervalUpperBound *float64 `type:"double"` + + // The amount by which to scale, based on the specified adjustment type. A positive + // value adds to the current scalable dimension while a negative number removes + // from the current scalable dimension. + ScalingAdjustment *int64 `type:"integer" required:"true"` +} + +// String returns the string representation +func (s StepAdjustment) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StepAdjustment) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *StepAdjustment) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StepAdjustment"} + if s.ScalingAdjustment == nil { + invalidParams.Add(request.NewErrParamRequired("ScalingAdjustment")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// An object representing a step scaling policy configuration. +type StepScalingPolicyConfiguration struct { + _ struct{} `type:"structure"` + + // The adjustment type, which specifies how the ScalingAdjustment parameter + // in a StepAdjustment is interpreted. + AdjustmentType *string `type:"string" enum:"AdjustmentType"` + + // The amount of time, in seconds, after a scaling activity completes where + // previous trigger-related scaling activities can influence future scaling + // events. + // + // For scale out policies, while Cooldown is in effect, the capacity that has + // been added by the previous scale out event that initiated the Cooldown is + // calculated as part of the desired capacity for the next scale out. The intention + // is to continuously (but not excessively) scale out. For example, an alarm + // triggers a step scaling policy to scale out an Amazon ECS service by 2 tasks, + // the scaling activity completes successfully, and a Cooldown period of 5 minutes + // starts. During the Cooldown period, if the alarm triggers the same policy + // again but at a more aggressive step adjustment to scale out the service by + // 3 tasks, the 2 tasks that were added in the previous scale out event are + // considered part of that capacity and only 1 additional task is added to the + // desired count. + // + // For scale in policies, the Cooldown period is used to block subsequent scale + // in requests until it has expired. The intention is to scale in conservatively + // to protect your application's availability. However, if another alarm triggers + // a scale out policy during the Cooldown period after a scale-in, Application + // Auto Scaling scales out your scalable target immediately. + Cooldown *int64 `type:"integer"` + + // The aggregation type for the CloudWatch metrics. Valid values are Minimum, + // Maximum, and Average. + MetricAggregationType *string `type:"string" enum:"MetricAggregationType"` + + // The minimum number to adjust your scalable dimension as a result of a scaling + // activity. If the adjustment type is PercentChangeInCapacity, the scaling + // policy changes the scalable dimension of the scalable target by this amount. + MinAdjustmentMagnitude *int64 `type:"integer"` + + // A set of adjustments that enable you to scale based on the size of the alarm + // breach. + StepAdjustments []*StepAdjustment `type:"list"` +} + +// String returns the string representation +func (s StepScalingPolicyConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StepScalingPolicyConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *StepScalingPolicyConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StepScalingPolicyConfiguration"} + if s.StepAdjustments != nil { + for i, v := range s.StepAdjustments { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "StepAdjustments", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +const ( + // @enum AdjustmentType + AdjustmentTypeChangeInCapacity = "ChangeInCapacity" + // @enum AdjustmentType + AdjustmentTypePercentChangeInCapacity = "PercentChangeInCapacity" + // @enum AdjustmentType + AdjustmentTypeExactCapacity = "ExactCapacity" +) + +const ( + // @enum MetricAggregationType + MetricAggregationTypeAverage = "Average" + // @enum MetricAggregationType + MetricAggregationTypeMinimum = "Minimum" + // @enum MetricAggregationType + MetricAggregationTypeMaximum = "Maximum" +) + +const ( + // @enum PolicyType + PolicyTypeStepScaling = "StepScaling" +) + +const ( + // @enum ScalableDimension + ScalableDimensionEcsServiceDesiredCount = "ecs:service:DesiredCount" +) + +const ( + // @enum ScalingActivityStatusCode + ScalingActivityStatusCodePending = "Pending" + // @enum ScalingActivityStatusCode + ScalingActivityStatusCodeInProgress = "InProgress" + // @enum ScalingActivityStatusCode + ScalingActivityStatusCodeSuccessful = "Successful" + // @enum ScalingActivityStatusCode + ScalingActivityStatusCodeOverridden = "Overridden" + // @enum ScalingActivityStatusCode + ScalingActivityStatusCodeUnfulfilled = "Unfulfilled" + // @enum ScalingActivityStatusCode + ScalingActivityStatusCodeFailed = "Failed" +) + +const ( + // @enum ServiceNamespace + ServiceNamespaceEcs = "ecs" +) diff --git a/vendor/github.com/aws/aws-sdk-go/service/applicationautoscaling/applicationautoscalingiface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/applicationautoscaling/applicationautoscalingiface/interface.go new file mode 100644 index 000000000..fef485163 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/applicationautoscaling/applicationautoscalingiface/interface.go @@ -0,0 +1,48 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package applicationautoscalingiface provides an interface for the Application Auto Scaling. +package applicationautoscalingiface + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/applicationautoscaling" +) + +// ApplicationAutoScalingAPI is the interface type for applicationautoscaling.ApplicationAutoScaling. +type ApplicationAutoScalingAPI interface { + DeleteScalingPolicyRequest(*applicationautoscaling.DeleteScalingPolicyInput) (*request.Request, *applicationautoscaling.DeleteScalingPolicyOutput) + + DeleteScalingPolicy(*applicationautoscaling.DeleteScalingPolicyInput) (*applicationautoscaling.DeleteScalingPolicyOutput, error) + + DeregisterScalableTargetRequest(*applicationautoscaling.DeregisterScalableTargetInput) (*request.Request, *applicationautoscaling.DeregisterScalableTargetOutput) + + DeregisterScalableTarget(*applicationautoscaling.DeregisterScalableTargetInput) (*applicationautoscaling.DeregisterScalableTargetOutput, error) + + DescribeScalableTargetsRequest(*applicationautoscaling.DescribeScalableTargetsInput) (*request.Request, *applicationautoscaling.DescribeScalableTargetsOutput) + + DescribeScalableTargets(*applicationautoscaling.DescribeScalableTargetsInput) (*applicationautoscaling.DescribeScalableTargetsOutput, error) + + DescribeScalableTargetsPages(*applicationautoscaling.DescribeScalableTargetsInput, func(*applicationautoscaling.DescribeScalableTargetsOutput, bool) bool) error + + DescribeScalingActivitiesRequest(*applicationautoscaling.DescribeScalingActivitiesInput) (*request.Request, *applicationautoscaling.DescribeScalingActivitiesOutput) + + DescribeScalingActivities(*applicationautoscaling.DescribeScalingActivitiesInput) (*applicationautoscaling.DescribeScalingActivitiesOutput, error) + + DescribeScalingActivitiesPages(*applicationautoscaling.DescribeScalingActivitiesInput, func(*applicationautoscaling.DescribeScalingActivitiesOutput, bool) bool) error + + DescribeScalingPoliciesRequest(*applicationautoscaling.DescribeScalingPoliciesInput) (*request.Request, *applicationautoscaling.DescribeScalingPoliciesOutput) + + DescribeScalingPolicies(*applicationautoscaling.DescribeScalingPoliciesInput) (*applicationautoscaling.DescribeScalingPoliciesOutput, error) + + DescribeScalingPoliciesPages(*applicationautoscaling.DescribeScalingPoliciesInput, func(*applicationautoscaling.DescribeScalingPoliciesOutput, bool) bool) error + + PutScalingPolicyRequest(*applicationautoscaling.PutScalingPolicyInput) (*request.Request, *applicationautoscaling.PutScalingPolicyOutput) + + PutScalingPolicy(*applicationautoscaling.PutScalingPolicyInput) (*applicationautoscaling.PutScalingPolicyOutput, error) + + RegisterScalableTargetRequest(*applicationautoscaling.RegisterScalableTargetInput) (*request.Request, *applicationautoscaling.RegisterScalableTargetOutput) + + RegisterScalableTarget(*applicationautoscaling.RegisterScalableTargetInput) (*applicationautoscaling.RegisterScalableTargetOutput, error) +} + +var _ ApplicationAutoScalingAPI = (*applicationautoscaling.ApplicationAutoScaling)(nil) diff --git a/vendor/github.com/aws/aws-sdk-go/service/applicationautoscaling/examples_test.go b/vendor/github.com/aws/aws-sdk-go/service/applicationautoscaling/examples_test.go new file mode 100644 index 000000000..d3249c9a3 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/applicationautoscaling/examples_test.go @@ -0,0 +1,196 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package applicationautoscaling_test + +import ( + "bytes" + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/applicationautoscaling" +) + +var _ time.Duration +var _ bytes.Buffer + +func ExampleApplicationAutoScaling_DeleteScalingPolicy() { + svc := applicationautoscaling.New(session.New()) + + params := &applicationautoscaling.DeleteScalingPolicyInput{ + PolicyName: aws.String("ResourceIdMaxLen1600"), // Required + ResourceId: aws.String("ResourceIdMaxLen1600"), // Required + ScalableDimension: aws.String("ScalableDimension"), // Required + ServiceNamespace: aws.String("ServiceNamespace"), // Required + } + resp, err := svc.DeleteScalingPolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleApplicationAutoScaling_DeregisterScalableTarget() { + svc := applicationautoscaling.New(session.New()) + + params := &applicationautoscaling.DeregisterScalableTargetInput{ + ResourceId: aws.String("ResourceIdMaxLen1600"), // Required + ScalableDimension: aws.String("ScalableDimension"), // Required + ServiceNamespace: aws.String("ServiceNamespace"), // Required + } + resp, err := svc.DeregisterScalableTarget(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleApplicationAutoScaling_DescribeScalableTargets() { + svc := applicationautoscaling.New(session.New()) + + params := &applicationautoscaling.DescribeScalableTargetsInput{ + ServiceNamespace: aws.String("ServiceNamespace"), // Required + MaxResults: aws.Int64(1), + NextToken: aws.String("XmlString"), + ResourceIds: []*string{ + aws.String("ResourceIdMaxLen1600"), // Required + // More values... + }, + ScalableDimension: aws.String("ScalableDimension"), + } + resp, err := svc.DescribeScalableTargets(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleApplicationAutoScaling_DescribeScalingActivities() { + svc := applicationautoscaling.New(session.New()) + + params := &applicationautoscaling.DescribeScalingActivitiesInput{ + ServiceNamespace: aws.String("ServiceNamespace"), // Required + MaxResults: aws.Int64(1), + NextToken: aws.String("XmlString"), + ResourceId: aws.String("ResourceIdMaxLen1600"), + ScalableDimension: aws.String("ScalableDimension"), + } + resp, err := svc.DescribeScalingActivities(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleApplicationAutoScaling_DescribeScalingPolicies() { + svc := applicationautoscaling.New(session.New()) + + params := &applicationautoscaling.DescribeScalingPoliciesInput{ + ServiceNamespace: aws.String("ServiceNamespace"), // Required + MaxResults: aws.Int64(1), + NextToken: aws.String("XmlString"), + PolicyNames: []*string{ + aws.String("ResourceIdMaxLen1600"), // Required + // More values... + }, + ResourceId: aws.String("ResourceIdMaxLen1600"), + ScalableDimension: aws.String("ScalableDimension"), + } + resp, err := svc.DescribeScalingPolicies(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleApplicationAutoScaling_PutScalingPolicy() { + svc := applicationautoscaling.New(session.New()) + + params := &applicationautoscaling.PutScalingPolicyInput{ + PolicyName: aws.String("PolicyName"), // Required + ResourceId: aws.String("ResourceIdMaxLen1600"), // Required + ScalableDimension: aws.String("ScalableDimension"), // Required + ServiceNamespace: aws.String("ServiceNamespace"), // Required + PolicyType: aws.String("PolicyType"), + StepScalingPolicyConfiguration: &applicationautoscaling.StepScalingPolicyConfiguration{ + AdjustmentType: aws.String("AdjustmentType"), + Cooldown: aws.Int64(1), + MetricAggregationType: aws.String("MetricAggregationType"), + MinAdjustmentMagnitude: aws.Int64(1), + StepAdjustments: []*applicationautoscaling.StepAdjustment{ + { // Required + ScalingAdjustment: aws.Int64(1), // Required + MetricIntervalLowerBound: aws.Float64(1.0), + MetricIntervalUpperBound: aws.Float64(1.0), + }, + // More values... + }, + }, + } + resp, err := svc.PutScalingPolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleApplicationAutoScaling_RegisterScalableTarget() { + svc := applicationautoscaling.New(session.New()) + + params := &applicationautoscaling.RegisterScalableTargetInput{ + ResourceId: aws.String("ResourceIdMaxLen1600"), // Required + ScalableDimension: aws.String("ScalableDimension"), // Required + ServiceNamespace: aws.String("ServiceNamespace"), // Required + MaxCapacity: aws.Int64(1), + MinCapacity: aws.Int64(1), + RoleARN: aws.String("ResourceIdMaxLen1600"), + } + resp, err := svc.RegisterScalableTarget(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/applicationautoscaling/service.go b/vendor/github.com/aws/aws-sdk-go/service/applicationautoscaling/service.go new file mode 100644 index 000000000..d6e797ff2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/applicationautoscaling/service.go @@ -0,0 +1,112 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package applicationautoscaling + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" +) + +// Application Auto Scaling is a general purpose Auto Scaling service for supported +// elastic AWS resources. With Application Auto Scaling, you can automatically +// scale your AWS resources, with an experience similar to that of Auto Scaling. +// +// At this time, Application Auto Scaling only supports scaling Amazon ECS +// services. +// +// For example, you can use Application Auto Scaling to accomplish the following +// tasks: +// +// Define scaling policies for automatically adjusting your application’s +// resources +// +// Scale your resources in response to CloudWatch alarms +// +// View history of your scaling events +// +// Application Auto Scaling is available in the following regions: +// +// us-east-1 +// +// us-west-2 +// +// eu-west-1 +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type ApplicationAutoScaling struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "autoscaling" + +// New creates a new instance of the ApplicationAutoScaling client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a ApplicationAutoScaling client from just a session. +// svc := applicationautoscaling.New(mySession) +// +// // Create a ApplicationAutoScaling client with additional configuration +// svc := applicationautoscaling.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *ApplicationAutoScaling { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *ApplicationAutoScaling { + svc := &ApplicationAutoScaling{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningName: "application-autoscaling", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2016-02-06", + JSONVersion: "1.1", + TargetPrefix: "AnyScaleFrontendService", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(jsonrpc.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a ApplicationAutoScaling operation and runs any +// custom request initialization. +func (c *ApplicationAutoScaling) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/applicationdiscoveryservice/api.go b/vendor/github.com/aws/aws-sdk-go/service/applicationdiscoveryservice/api.go new file mode 100644 index 000000000..dd8755b57 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/applicationdiscoveryservice/api.go @@ -0,0 +1,1430 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package applicationdiscoveryservice provides a client for AWS Application Discovery Service. +package applicationdiscoveryservice + +import ( + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" +) + +const opCreateTags = "CreateTags" + +// CreateTagsRequest generates a "aws/request.Request" representing the +// client's request for the CreateTags operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateTags method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateTagsRequest method. +// req, resp := client.CreateTagsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ApplicationDiscoveryService) CreateTagsRequest(input *CreateTagsInput) (req *request.Request, output *CreateTagsOutput) { + op := &request.Operation{ + Name: opCreateTags, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateTagsInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateTagsOutput{} + req.Data = output + return +} + +// Creates one or more tags for configuration items. Tags are metadata that +// help you categorize IT assets. This API accepts a list of multiple configuration +// items. +func (c *ApplicationDiscoveryService) CreateTags(input *CreateTagsInput) (*CreateTagsOutput, error) { + req, out := c.CreateTagsRequest(input) + err := req.Send() + return out, err +} + +const opDeleteTags = "DeleteTags" + +// DeleteTagsRequest generates a "aws/request.Request" representing the +// client's request for the DeleteTags operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteTags method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteTagsRequest method. +// req, resp := client.DeleteTagsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ApplicationDiscoveryService) DeleteTagsRequest(input *DeleteTagsInput) (req *request.Request, output *DeleteTagsOutput) { + op := &request.Operation{ + Name: opDeleteTags, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteTagsInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteTagsOutput{} + req.Data = output + return +} + +// Deletes the association between configuration items and one or more tags. +// This API accepts a list of multiple configuration items. +func (c *ApplicationDiscoveryService) DeleteTags(input *DeleteTagsInput) (*DeleteTagsOutput, error) { + req, out := c.DeleteTagsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeAgents = "DescribeAgents" + +// DescribeAgentsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeAgents operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeAgents method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeAgentsRequest method. +// req, resp := client.DescribeAgentsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ApplicationDiscoveryService) DescribeAgentsRequest(input *DescribeAgentsInput) (req *request.Request, output *DescribeAgentsOutput) { + op := &request.Operation{ + Name: opDescribeAgents, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeAgentsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeAgentsOutput{} + req.Data = output + return +} + +// Lists AWS agents by ID or lists all agents associated with your user account +// if you did not specify an agent ID. +func (c *ApplicationDiscoveryService) DescribeAgents(input *DescribeAgentsInput) (*DescribeAgentsOutput, error) { + req, out := c.DescribeAgentsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeConfigurations = "DescribeConfigurations" + +// DescribeConfigurationsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeConfigurations operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeConfigurations method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeConfigurationsRequest method. +// req, resp := client.DescribeConfigurationsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ApplicationDiscoveryService) DescribeConfigurationsRequest(input *DescribeConfigurationsInput) (req *request.Request, output *DescribeConfigurationsOutput) { + op := &request.Operation{ + Name: opDescribeConfigurations, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeConfigurationsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeConfigurationsOutput{} + req.Data = output + return +} + +// Retrieves a list of attributes for a specific configuration ID. For example, +// the output for a server configuration item includes a list of attributes +// about the server, including host name, operating system, number of network +// cards, etc. +func (c *ApplicationDiscoveryService) DescribeConfigurations(input *DescribeConfigurationsInput) (*DescribeConfigurationsOutput, error) { + req, out := c.DescribeConfigurationsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeExportConfigurations = "DescribeExportConfigurations" + +// DescribeExportConfigurationsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeExportConfigurations operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeExportConfigurations method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeExportConfigurationsRequest method. +// req, resp := client.DescribeExportConfigurationsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ApplicationDiscoveryService) DescribeExportConfigurationsRequest(input *DescribeExportConfigurationsInput) (req *request.Request, output *DescribeExportConfigurationsOutput) { + op := &request.Operation{ + Name: opDescribeExportConfigurations, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeExportConfigurationsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeExportConfigurationsOutput{} + req.Data = output + return +} + +// Retrieves the status of a given export process. You can retrieve status from +// a maximum of 100 processes. +func (c *ApplicationDiscoveryService) DescribeExportConfigurations(input *DescribeExportConfigurationsInput) (*DescribeExportConfigurationsOutput, error) { + req, out := c.DescribeExportConfigurationsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeTags = "DescribeTags" + +// DescribeTagsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeTags operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeTags method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeTagsRequest method. +// req, resp := client.DescribeTagsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ApplicationDiscoveryService) DescribeTagsRequest(input *DescribeTagsInput) (req *request.Request, output *DescribeTagsOutput) { + op := &request.Operation{ + Name: opDescribeTags, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeTagsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeTagsOutput{} + req.Data = output + return +} + +// Retrieves a list of configuration items that are tagged with a specific tag. +// Or retrieves a list of all tags assigned to a specific configuration item. +func (c *ApplicationDiscoveryService) DescribeTags(input *DescribeTagsInput) (*DescribeTagsOutput, error) { + req, out := c.DescribeTagsRequest(input) + err := req.Send() + return out, err +} + +const opExportConfigurations = "ExportConfigurations" + +// ExportConfigurationsRequest generates a "aws/request.Request" representing the +// client's request for the ExportConfigurations operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ExportConfigurations method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ExportConfigurationsRequest method. +// req, resp := client.ExportConfigurationsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ApplicationDiscoveryService) ExportConfigurationsRequest(input *ExportConfigurationsInput) (req *request.Request, output *ExportConfigurationsOutput) { + op := &request.Operation{ + Name: opExportConfigurations, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ExportConfigurationsInput{} + } + + req = c.newRequest(op, input, output) + output = &ExportConfigurationsOutput{} + req.Data = output + return +} + +// Exports all discovered configuration data to an Amazon S3 bucket or an application +// that enables you to view and evaluate the data. Data includes tags and tag +// associations, processes, connections, servers, and system performance. This +// API returns an export ID which you can query using the GetExportStatus API. +// The system imposes a limit of two configuration exports in six hours. +func (c *ApplicationDiscoveryService) ExportConfigurations(input *ExportConfigurationsInput) (*ExportConfigurationsOutput, error) { + req, out := c.ExportConfigurationsRequest(input) + err := req.Send() + return out, err +} + +const opListConfigurations = "ListConfigurations" + +// ListConfigurationsRequest generates a "aws/request.Request" representing the +// client's request for the ListConfigurations operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListConfigurations method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListConfigurationsRequest method. +// req, resp := client.ListConfigurationsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ApplicationDiscoveryService) ListConfigurationsRequest(input *ListConfigurationsInput) (req *request.Request, output *ListConfigurationsOutput) { + op := &request.Operation{ + Name: opListConfigurations, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListConfigurationsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListConfigurationsOutput{} + req.Data = output + return +} + +// Retrieves a list of configurations items according to the criteria you specify +// in a filter. The filter criteria identify relationship requirements. +func (c *ApplicationDiscoveryService) ListConfigurations(input *ListConfigurationsInput) (*ListConfigurationsOutput, error) { + req, out := c.ListConfigurationsRequest(input) + err := req.Send() + return out, err +} + +const opStartDataCollectionByAgentIds = "StartDataCollectionByAgentIds" + +// StartDataCollectionByAgentIdsRequest generates a "aws/request.Request" representing the +// client's request for the StartDataCollectionByAgentIds operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the StartDataCollectionByAgentIds method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the StartDataCollectionByAgentIdsRequest method. +// req, resp := client.StartDataCollectionByAgentIdsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ApplicationDiscoveryService) StartDataCollectionByAgentIdsRequest(input *StartDataCollectionByAgentIdsInput) (req *request.Request, output *StartDataCollectionByAgentIdsOutput) { + op := &request.Operation{ + Name: opStartDataCollectionByAgentIds, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &StartDataCollectionByAgentIdsInput{} + } + + req = c.newRequest(op, input, output) + output = &StartDataCollectionByAgentIdsOutput{} + req.Data = output + return +} + +// Instructs the specified agents to start collecting data. Agents can reside +// on host servers or virtual machines in your data center. +func (c *ApplicationDiscoveryService) StartDataCollectionByAgentIds(input *StartDataCollectionByAgentIdsInput) (*StartDataCollectionByAgentIdsOutput, error) { + req, out := c.StartDataCollectionByAgentIdsRequest(input) + err := req.Send() + return out, err +} + +const opStopDataCollectionByAgentIds = "StopDataCollectionByAgentIds" + +// StopDataCollectionByAgentIdsRequest generates a "aws/request.Request" representing the +// client's request for the StopDataCollectionByAgentIds operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the StopDataCollectionByAgentIds method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the StopDataCollectionByAgentIdsRequest method. +// req, resp := client.StopDataCollectionByAgentIdsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ApplicationDiscoveryService) StopDataCollectionByAgentIdsRequest(input *StopDataCollectionByAgentIdsInput) (req *request.Request, output *StopDataCollectionByAgentIdsOutput) { + op := &request.Operation{ + Name: opStopDataCollectionByAgentIds, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &StopDataCollectionByAgentIdsInput{} + } + + req = c.newRequest(op, input, output) + output = &StopDataCollectionByAgentIdsOutput{} + req.Data = output + return +} + +// Instructs the specified agents to stop collecting data. +func (c *ApplicationDiscoveryService) StopDataCollectionByAgentIds(input *StopDataCollectionByAgentIdsInput) (*StopDataCollectionByAgentIdsOutput, error) { + req, out := c.StopDataCollectionByAgentIdsRequest(input) + err := req.Send() + return out, err +} + +// Information about agents that were instructed to start collecting data. Information +// includes the agent ID, a description of the operation, and whether or not +// the agent configuration was updated. +type AgentConfigurationStatus struct { + _ struct{} `type:"structure"` + + // The agent ID. + AgentId *string `locationName:"agentId" type:"string"` + + // A description of the operation performed. + Description *string `locationName:"description" type:"string"` + + // Information about the status of the StartDataCollection and StopDataCollection + // operations. The system has recorded the data collection operation. The agent + // receives this command the next time it polls for a new command. + OperationSucceeded *bool `locationName:"operationSucceeded" type:"boolean"` +} + +// String returns the string representation +func (s AgentConfigurationStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AgentConfigurationStatus) GoString() string { + return s.String() +} + +// Information about agents associated with the user’s AWS account. Information +// includes agent IDs, IP addresses, media access control (MAC) addresses, agent +// health, hostname where the agent resides, and agent version for each agent. +type AgentInfo struct { + _ struct{} `type:"structure"` + + // The agent ID. + AgentId *string `locationName:"agentId" type:"string"` + + // Network details about the host where the agent resides. + AgentNetworkInfoList []*AgentNetworkInfo `locationName:"agentNetworkInfoList" type:"list"` + + // This data type is currently not valid. + ConnectorId *string `locationName:"connectorId" type:"string"` + + // The health of the agent. + Health *string `locationName:"health" type:"string" enum:"AgentStatus"` + + // The name of the host where the agent resides. The host can be a server or + // virtual machine. + HostName *string `locationName:"hostName" type:"string"` + + // The agent version. + Version *string `locationName:"version" type:"string"` +} + +// String returns the string representation +func (s AgentInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AgentInfo) GoString() string { + return s.String() +} + +// Network details about the host where the agent resides. +type AgentNetworkInfo struct { + _ struct{} `type:"structure"` + + // The IP address for the host where the agent resides. + IpAddress *string `locationName:"ipAddress" type:"string"` + + // The MAC address for the host where the agent resides. + MacAddress *string `locationName:"macAddress" type:"string"` +} + +// String returns the string representation +func (s AgentNetworkInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AgentNetworkInfo) GoString() string { + return s.String() +} + +// Tags for a configuration item. Tags are metadata that help you categorize +// IT assets. +type ConfigurationTag struct { + _ struct{} `type:"structure"` + + // The configuration ID for the item you want to tag. You can specify a list + // of keys and values. + ConfigurationId *string `locationName:"configurationId" type:"string"` + + // A type of IT asset that you want to tag. + ConfigurationType *string `locationName:"configurationType" type:"string" enum:"ConfigurationItemType"` + + // A type of tag to filter on. For example, serverType. + Key *string `locationName:"key" type:"string"` + + // The time the configuration tag was created in Coordinated Universal Time + // (UTC). + TimeOfCreation *time.Time `locationName:"timeOfCreation" type:"timestamp" timestampFormat:"unix"` + + // A value to filter on. For example key = serverType and value = web server. + Value *string `locationName:"value" type:"string"` +} + +// String returns the string representation +func (s ConfigurationTag) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ConfigurationTag) GoString() string { + return s.String() +} + +type CreateTagsInput struct { + _ struct{} `type:"structure"` + + // A list of configuration items that you want to tag. + ConfigurationIds []*string `locationName:"configurationIds" type:"list" required:"true"` + + // Tags that you want to associate with one or more configuration items. Specify + // the tags that you want to create in a key-value format. For example: + // + // {"key": "serverType", "value": "webServer"} + Tags []*Tag `locationName:"tags" locationNameList:"item" type:"list" required:"true"` +} + +// String returns the string representation +func (s CreateTagsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateTagsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateTagsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateTagsInput"} + if s.ConfigurationIds == nil { + invalidParams.Add(request.NewErrParamRequired("ConfigurationIds")) + } + if s.Tags == nil { + invalidParams.Add(request.NewErrParamRequired("Tags")) + } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type CreateTagsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s CreateTagsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateTagsOutput) GoString() string { + return s.String() +} + +type DeleteTagsInput struct { + _ struct{} `type:"structure"` + + // A list of configuration items with tags that you want to delete. + ConfigurationIds []*string `locationName:"configurationIds" type:"list" required:"true"` + + // Tags that you want to delete from one or more configuration items. Specify + // the tags that you want to delete in a key-value format. For example: + // + // {"key": "serverType", "value": "webServer"} + Tags []*Tag `locationName:"tags" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DeleteTagsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteTagsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteTagsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteTagsInput"} + if s.ConfigurationIds == nil { + invalidParams.Add(request.NewErrParamRequired("ConfigurationIds")) + } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteTagsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteTagsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteTagsOutput) GoString() string { + return s.String() +} + +type DescribeAgentsInput struct { + _ struct{} `type:"structure"` + + // The agent IDs for which you want information. If you specify no IDs, the + // system returns information about all agents associated with your AWS user + // account. + AgentIds []*string `locationName:"agentIds" type:"list"` + + // The total number of agents to return. The maximum value is 100. + MaxResults *int64 `locationName:"maxResults" type:"integer"` + + // A token to start the list. Use this token to get the next set of results. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s DescribeAgentsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAgentsInput) GoString() string { + return s.String() +} + +type DescribeAgentsOutput struct { + _ struct{} `type:"structure"` + + // Lists AWS agents by ID or lists all agents associated with your user account + // if you did not specify an agent ID. The output includes agent IDs, IP addresses, + // media access control (MAC) addresses, agent health, host name where the agent + // resides, and the version number of each agent. + AgentsInfo []*AgentInfo `locationName:"agentsInfo" type:"list"` + + // The call returns a token. Use this token to get the next set of results. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s DescribeAgentsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAgentsOutput) GoString() string { + return s.String() +} + +type DescribeConfigurationsInput struct { + _ struct{} `type:"structure"` + + // One or more configuration IDs. + ConfigurationIds []*string `locationName:"configurationIds" type:"list" required:"true"` +} + +// String returns the string representation +func (s DescribeConfigurationsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeConfigurationsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeConfigurationsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeConfigurationsInput"} + if s.ConfigurationIds == nil { + invalidParams.Add(request.NewErrParamRequired("ConfigurationIds")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DescribeConfigurationsOutput struct { + _ struct{} `type:"structure"` + + // A key in the response map. The value is an array of data. + Configurations []map[string]*string `locationName:"configurations" type:"list"` +} + +// String returns the string representation +func (s DescribeConfigurationsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeConfigurationsOutput) GoString() string { + return s.String() +} + +type DescribeExportConfigurationsInput struct { + _ struct{} `type:"structure"` + + // A unique identifier that you can use to query the export status. + ExportIds []*string `locationName:"exportIds" type:"list"` + + // The maximum number of results that you want to display as a part of the query. + MaxResults *int64 `locationName:"maxResults" type:"integer"` + + // A token to get the next set of results. For example, if you specified 100 + // IDs for DescribeConfigurationsRequest$configurationIds but set DescribeExportConfigurationsRequest$maxResults + // to 10, you will get results in a set of 10. Use the token in the query to + // get the next set of 10. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s DescribeExportConfigurationsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeExportConfigurationsInput) GoString() string { + return s.String() +} + +type DescribeExportConfigurationsOutput struct { + _ struct{} `type:"structure"` + + // Returns export details. When the status is complete, the response includes + // a URL for an Amazon S3 bucket where you can view the data in a CSV file. + ExportsInfo []*ExportInfo `locationName:"exportsInfo" type:"list"` + + // A token to get the next set of results. For example, if you specified 100 + // IDs for DescribeConfigurationsRequest$configurationIds but set DescribeExportConfigurationsRequest$maxResults + // to 10, you will get results in a set of 10. Use the token in the query to + // get the next set of 10. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s DescribeExportConfigurationsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeExportConfigurationsOutput) GoString() string { + return s.String() +} + +type DescribeTagsInput struct { + _ struct{} `type:"structure"` + + // You can filter the list using a key-value format. You can separate these + // items by using logical operators. Allowed filters include tagKey, tagValue, + // and configurationId. + Filters []*TagFilter `locationName:"filters" type:"list"` + + // The total number of items to return. The maximum value is 100. + MaxResults *int64 `locationName:"maxResults" type:"integer"` + + // A token to start the list. Use this token to get the next set of results. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s DescribeTagsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeTagsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeTagsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeTagsInput"} + if s.Filters != nil { + for i, v := range s.Filters { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Filters", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DescribeTagsOutput struct { + _ struct{} `type:"structure"` + + // The call returns a token. Use this token to get the next set of results. + NextToken *string `locationName:"nextToken" type:"string"` + + // Depending on the input, this is a list of configuration items tagged with + // a specific tag, or a list of tags for a specific configuration item. + Tags []*ConfigurationTag `locationName:"tags" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeTagsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeTagsOutput) GoString() string { + return s.String() +} + +type ExportConfigurationsInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ExportConfigurationsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ExportConfigurationsInput) GoString() string { + return s.String() +} + +type ExportConfigurationsOutput struct { + _ struct{} `type:"structure"` + + // A unique identifier that you can use to query the export status. + ExportId *string `locationName:"exportId" type:"string"` +} + +// String returns the string representation +func (s ExportConfigurationsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ExportConfigurationsOutput) GoString() string { + return s.String() +} + +// Information regarding the export status of the discovered data. The value +// is an array of objects. +type ExportInfo struct { + _ struct{} `type:"structure"` + + // A URL for an Amazon S3 bucket where you can review the configuration data. + // The URL is displayed only if the export succeeded. + ConfigurationsDownloadUrl *string `locationName:"configurationsDownloadUrl" type:"string"` + + // A unique identifier that you can use to query the export. + ExportId *string `locationName:"exportId" type:"string" required:"true"` + + // The time the configuration data export was initiated. + ExportRequestTime *time.Time `locationName:"exportRequestTime" type:"timestamp" timestampFormat:"unix" required:"true"` + + // The status of the configuration data export. The status can succeed, fail, + // or be in-progress. + ExportStatus *string `locationName:"exportStatus" type:"string" required:"true" enum:"ExportStatus"` + + // Helpful status messages for API callers. For example: Too many exports in + // the last 6 hours. Export in progress. Export was successful. + StatusMessage *string `locationName:"statusMessage" type:"string" required:"true"` +} + +// String returns the string representation +func (s ExportInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ExportInfo) GoString() string { + return s.String() +} + +// A filter that can use conditional operators. +type Filter struct { + _ struct{} `type:"structure"` + + // A conditional operator. The following operators are valid: EQUALS, NOT_EQUALS, + // CONTAINS, NOT_CONTAINS. If you specify multiple filters, the system utilizes + // all filters as though concatenated by AND. If you specify multiple values + // for a particular filter, the system differentiates the values using OR. Calling + // either DescribeConfigurations or ListConfigurations returns attributes of + // matching configuration items. + Condition *string `locationName:"condition" type:"string" required:"true"` + + // The name of the filter. The following filter names are allowed for SERVER + // configuration items. + // + // Server server.hostName + // + // server.osName + // + // server.osVersion + // + // server.configurationid + // + // server.agentid + // + // The name of the filter. The following filter names are allowed for PROCESS + // configuration items. + // + // Process process.configurationid + // + // process.name + // + // process.commandLine + // + // server.configurationid + // + // server.hostName + // + // server.osName + // + // server.osVersion + // + // server.agentId + // + // The name of the filter. The following filter names are allowed for CONNECTION + // configuration items. + // + // Connection connection.sourceIp + // + // connection.destinationIp + // + // connection.destinationPort + // + // sourceProcess.configurationId + // + // sourceProcess.name + // + // sourceProcess.commandLine + // + // destinationProcess.configurationId + // + // destinationProcess.name + // + // destinationProcess.commandLine + // + // sourceServer.configurationId + // + // sourceServer.hostName + // + // sourceServer.osName + // + // sourceServer.osVersion + // + // sourceServer.agentId + // + // destinationServer.configurationId + // + // destinationServer.hostName + // + // destinationServer.osName + // + // destinationServer.osVersion + // + // destinationServer.agentId + Name *string `locationName:"name" type:"string" required:"true"` + + // A string value that you want to filter on. For example, if you choose the + // destinationServer.osVersion filter name, you could specify Ubuntu for the + // value. + Values []*string `locationName:"values" locationNameList:"item" type:"list" required:"true"` +} + +// String returns the string representation +func (s Filter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Filter) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Filter) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Filter"} + if s.Condition == nil { + invalidParams.Add(request.NewErrParamRequired("Condition")) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Values == nil { + invalidParams.Add(request.NewErrParamRequired("Values")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type ListConfigurationsInput struct { + _ struct{} `type:"structure"` + + // A valid configuration identified by the Discovery Service. + ConfigurationType *string `locationName:"configurationType" type:"string" required:"true" enum:"ConfigurationItemType"` + + // You can filter the list using a key-value format. For example: + // + // {"key": "serverType", "value": "webServer"} + // + // You can separate these items by using logical operators. + Filters []*Filter `locationName:"filters" type:"list"` + + // The total number of items to return. The maximum value is 100. + MaxResults *int64 `locationName:"maxResults" type:"integer"` + + // A token to start the list. Use this token to get the next set of results. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s ListConfigurationsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListConfigurationsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListConfigurationsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListConfigurationsInput"} + if s.ConfigurationType == nil { + invalidParams.Add(request.NewErrParamRequired("ConfigurationType")) + } + if s.Filters != nil { + for i, v := range s.Filters { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Filters", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type ListConfigurationsOutput struct { + _ struct{} `type:"structure"` + + // Returns configuration details, including the configuration ID, attribute + // names, and attribute values. + Configurations []map[string]*string `locationName:"configurations" type:"list"` + + // The call returns a token. Use this token to get the next set of results. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s ListConfigurationsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListConfigurationsOutput) GoString() string { + return s.String() +} + +type StartDataCollectionByAgentIdsInput struct { + _ struct{} `type:"structure"` + + // The IDs of the agents that you want to start collecting data. If you send + // a request to an AWS agent ID that you do not have permission to contact, + // according to your AWS account, the service does not throw an exception. Instead, + // it returns the error in the Description field. If you send a request to multiple + // agents and you do not have permission to contact some of those agents, the + // system does not throw an exception. Instead, the system shows Failed in the + // Description field. + AgentIds []*string `locationName:"agentIds" type:"list" required:"true"` +} + +// String returns the string representation +func (s StartDataCollectionByAgentIdsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StartDataCollectionByAgentIdsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *StartDataCollectionByAgentIdsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StartDataCollectionByAgentIdsInput"} + if s.AgentIds == nil { + invalidParams.Add(request.NewErrParamRequired("AgentIds")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type StartDataCollectionByAgentIdsOutput struct { + _ struct{} `type:"structure"` + + // Information about agents that were instructed to start collecting data. Information + // includes the agent ID, a description of the operation performed, and whether + // or not the agent configuration was updated. + AgentsConfigurationStatus []*AgentConfigurationStatus `locationName:"agentsConfigurationStatus" type:"list"` +} + +// String returns the string representation +func (s StartDataCollectionByAgentIdsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StartDataCollectionByAgentIdsOutput) GoString() string { + return s.String() +} + +type StopDataCollectionByAgentIdsInput struct { + _ struct{} `type:"structure"` + + // The IDs of the agents that you want to stop collecting data. + AgentIds []*string `locationName:"agentIds" type:"list" required:"true"` +} + +// String returns the string representation +func (s StopDataCollectionByAgentIdsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StopDataCollectionByAgentIdsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *StopDataCollectionByAgentIdsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StopDataCollectionByAgentIdsInput"} + if s.AgentIds == nil { + invalidParams.Add(request.NewErrParamRequired("AgentIds")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type StopDataCollectionByAgentIdsOutput struct { + _ struct{} `type:"structure"` + + // Information about agents that were instructed to stop collecting data. Information + // includes the agent ID, a description of the operation performed, and whether + // or not the agent configuration was updated. + AgentsConfigurationStatus []*AgentConfigurationStatus `locationName:"agentsConfigurationStatus" type:"list"` +} + +// String returns the string representation +func (s StopDataCollectionByAgentIdsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StopDataCollectionByAgentIdsOutput) GoString() string { + return s.String() +} + +// Metadata that help you categorize IT assets. +type Tag struct { + _ struct{} `type:"structure"` + + // A type of tag to filter on. + Key *string `locationName:"key" type:"string" required:"true"` + + // A value for a tag key to filter on. + Value *string `locationName:"value" type:"string" required:"true"` +} + +// String returns the string representation +func (s Tag) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Tag) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Tag) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Tag"} + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Value == nil { + invalidParams.Add(request.NewErrParamRequired("Value")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The name of a tag filter. Valid names are: tagKey, tagValue, configurationId. +type TagFilter struct { + _ struct{} `type:"structure"` + + // A name of a tag filter. + Name *string `locationName:"name" type:"string" required:"true"` + + // Values of a tag filter. + Values []*string `locationName:"values" locationNameList:"item" type:"list" required:"true"` +} + +// String returns the string representation +func (s TagFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TagFilter) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TagFilter) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TagFilter"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Values == nil { + invalidParams.Add(request.NewErrParamRequired("Values")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +const ( + // @enum AgentStatus + AgentStatusHealthy = "HEALTHY" + // @enum AgentStatus + AgentStatusUnhealthy = "UNHEALTHY" + // @enum AgentStatus + AgentStatusRunning = "RUNNING" + // @enum AgentStatus + AgentStatusUnknown = "UNKNOWN" + // @enum AgentStatus + AgentStatusBlacklisted = "BLACKLISTED" + // @enum AgentStatus + AgentStatusShutdown = "SHUTDOWN" +) + +const ( + // @enum ConfigurationItemType + ConfigurationItemTypeServer = "SERVER" + // @enum ConfigurationItemType + ConfigurationItemTypeProcess = "PROCESS" + // @enum ConfigurationItemType + ConfigurationItemTypeConnection = "CONNECTION" +) + +const ( + // @enum ExportStatus + ExportStatusFailed = "FAILED" + // @enum ExportStatus + ExportStatusSucceeded = "SUCCEEDED" + // @enum ExportStatus + ExportStatusInProgress = "IN_PROGRESS" +) diff --git a/vendor/github.com/aws/aws-sdk-go/service/applicationdiscoveryservice/applicationdiscoveryserviceiface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/applicationdiscoveryservice/applicationdiscoveryserviceiface/interface.go new file mode 100644 index 000000000..b389d1634 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/applicationdiscoveryservice/applicationdiscoveryserviceiface/interface.go @@ -0,0 +1,54 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package applicationdiscoveryserviceiface provides an interface for the AWS Application Discovery Service. +package applicationdiscoveryserviceiface + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/applicationdiscoveryservice" +) + +// ApplicationDiscoveryServiceAPI is the interface type for applicationdiscoveryservice.ApplicationDiscoveryService. +type ApplicationDiscoveryServiceAPI interface { + CreateTagsRequest(*applicationdiscoveryservice.CreateTagsInput) (*request.Request, *applicationdiscoveryservice.CreateTagsOutput) + + CreateTags(*applicationdiscoveryservice.CreateTagsInput) (*applicationdiscoveryservice.CreateTagsOutput, error) + + DeleteTagsRequest(*applicationdiscoveryservice.DeleteTagsInput) (*request.Request, *applicationdiscoveryservice.DeleteTagsOutput) + + DeleteTags(*applicationdiscoveryservice.DeleteTagsInput) (*applicationdiscoveryservice.DeleteTagsOutput, error) + + DescribeAgentsRequest(*applicationdiscoveryservice.DescribeAgentsInput) (*request.Request, *applicationdiscoveryservice.DescribeAgentsOutput) + + DescribeAgents(*applicationdiscoveryservice.DescribeAgentsInput) (*applicationdiscoveryservice.DescribeAgentsOutput, error) + + DescribeConfigurationsRequest(*applicationdiscoveryservice.DescribeConfigurationsInput) (*request.Request, *applicationdiscoveryservice.DescribeConfigurationsOutput) + + DescribeConfigurations(*applicationdiscoveryservice.DescribeConfigurationsInput) (*applicationdiscoveryservice.DescribeConfigurationsOutput, error) + + DescribeExportConfigurationsRequest(*applicationdiscoveryservice.DescribeExportConfigurationsInput) (*request.Request, *applicationdiscoveryservice.DescribeExportConfigurationsOutput) + + DescribeExportConfigurations(*applicationdiscoveryservice.DescribeExportConfigurationsInput) (*applicationdiscoveryservice.DescribeExportConfigurationsOutput, error) + + DescribeTagsRequest(*applicationdiscoveryservice.DescribeTagsInput) (*request.Request, *applicationdiscoveryservice.DescribeTagsOutput) + + DescribeTags(*applicationdiscoveryservice.DescribeTagsInput) (*applicationdiscoveryservice.DescribeTagsOutput, error) + + ExportConfigurationsRequest(*applicationdiscoveryservice.ExportConfigurationsInput) (*request.Request, *applicationdiscoveryservice.ExportConfigurationsOutput) + + ExportConfigurations(*applicationdiscoveryservice.ExportConfigurationsInput) (*applicationdiscoveryservice.ExportConfigurationsOutput, error) + + ListConfigurationsRequest(*applicationdiscoveryservice.ListConfigurationsInput) (*request.Request, *applicationdiscoveryservice.ListConfigurationsOutput) + + ListConfigurations(*applicationdiscoveryservice.ListConfigurationsInput) (*applicationdiscoveryservice.ListConfigurationsOutput, error) + + StartDataCollectionByAgentIdsRequest(*applicationdiscoveryservice.StartDataCollectionByAgentIdsInput) (*request.Request, *applicationdiscoveryservice.StartDataCollectionByAgentIdsOutput) + + StartDataCollectionByAgentIds(*applicationdiscoveryservice.StartDataCollectionByAgentIdsInput) (*applicationdiscoveryservice.StartDataCollectionByAgentIdsOutput, error) + + StopDataCollectionByAgentIdsRequest(*applicationdiscoveryservice.StopDataCollectionByAgentIdsInput) (*request.Request, *applicationdiscoveryservice.StopDataCollectionByAgentIdsOutput) + + StopDataCollectionByAgentIds(*applicationdiscoveryservice.StopDataCollectionByAgentIdsInput) (*applicationdiscoveryservice.StopDataCollectionByAgentIdsOutput, error) +} + +var _ ApplicationDiscoveryServiceAPI = (*applicationdiscoveryservice.ApplicationDiscoveryService)(nil) diff --git a/vendor/github.com/aws/aws-sdk-go/service/applicationdiscoveryservice/examples_test.go b/vendor/github.com/aws/aws-sdk-go/service/applicationdiscoveryservice/examples_test.go new file mode 100644 index 000000000..0f09bcc6f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/applicationdiscoveryservice/examples_test.go @@ -0,0 +1,267 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package applicationdiscoveryservice_test + +import ( + "bytes" + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/applicationdiscoveryservice" +) + +var _ time.Duration +var _ bytes.Buffer + +func ExampleApplicationDiscoveryService_CreateTags() { + svc := applicationdiscoveryservice.New(session.New()) + + params := &applicationdiscoveryservice.CreateTagsInput{ + ConfigurationIds: []*string{ // Required + aws.String("ConfigurationId"), // Required + // More values... + }, + Tags: []*applicationdiscoveryservice.Tag{ // Required + { // Required + Key: aws.String("TagKey"), // Required + Value: aws.String("TagValue"), // Required + }, + // More values... + }, + } + resp, err := svc.CreateTags(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleApplicationDiscoveryService_DeleteTags() { + svc := applicationdiscoveryservice.New(session.New()) + + params := &applicationdiscoveryservice.DeleteTagsInput{ + ConfigurationIds: []*string{ // Required + aws.String("ConfigurationId"), // Required + // More values... + }, + Tags: []*applicationdiscoveryservice.Tag{ + { // Required + Key: aws.String("TagKey"), // Required + Value: aws.String("TagValue"), // Required + }, + // More values... + }, + } + resp, err := svc.DeleteTags(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleApplicationDiscoveryService_DescribeAgents() { + svc := applicationdiscoveryservice.New(session.New()) + + params := &applicationdiscoveryservice.DescribeAgentsInput{ + AgentIds: []*string{ + aws.String("AgentId"), // Required + // More values... + }, + MaxResults: aws.Int64(1), + NextToken: aws.String("NextToken"), + } + resp, err := svc.DescribeAgents(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleApplicationDiscoveryService_DescribeConfigurations() { + svc := applicationdiscoveryservice.New(session.New()) + + params := &applicationdiscoveryservice.DescribeConfigurationsInput{ + ConfigurationIds: []*string{ // Required + aws.String("ConfigurationId"), // Required + // More values... + }, + } + resp, err := svc.DescribeConfigurations(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleApplicationDiscoveryService_DescribeExportConfigurations() { + svc := applicationdiscoveryservice.New(session.New()) + + params := &applicationdiscoveryservice.DescribeExportConfigurationsInput{ + ExportIds: []*string{ + aws.String("ConfigurationsExportId"), // Required + // More values... + }, + MaxResults: aws.Int64(1), + NextToken: aws.String("NextToken"), + } + resp, err := svc.DescribeExportConfigurations(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleApplicationDiscoveryService_DescribeTags() { + svc := applicationdiscoveryservice.New(session.New()) + + params := &applicationdiscoveryservice.DescribeTagsInput{ + Filters: []*applicationdiscoveryservice.TagFilter{ + { // Required + Name: aws.String("FilterName"), // Required + Values: []*string{ // Required + aws.String("FilterValue"), // Required + // More values... + }, + }, + // More values... + }, + MaxResults: aws.Int64(1), + NextToken: aws.String("NextToken"), + } + resp, err := svc.DescribeTags(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleApplicationDiscoveryService_ExportConfigurations() { + svc := applicationdiscoveryservice.New(session.New()) + + var params *applicationdiscoveryservice.ExportConfigurationsInput + resp, err := svc.ExportConfigurations(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleApplicationDiscoveryService_ListConfigurations() { + svc := applicationdiscoveryservice.New(session.New()) + + params := &applicationdiscoveryservice.ListConfigurationsInput{ + ConfigurationType: aws.String("ConfigurationItemType"), // Required + Filters: []*applicationdiscoveryservice.Filter{ + { // Required + Condition: aws.String("Condition"), // Required + Name: aws.String("String"), // Required + Values: []*string{ // Required + aws.String("FilterValue"), // Required + // More values... + }, + }, + // More values... + }, + MaxResults: aws.Int64(1), + NextToken: aws.String("NextToken"), + } + resp, err := svc.ListConfigurations(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleApplicationDiscoveryService_StartDataCollectionByAgentIds() { + svc := applicationdiscoveryservice.New(session.New()) + + params := &applicationdiscoveryservice.StartDataCollectionByAgentIdsInput{ + AgentIds: []*string{ // Required + aws.String("AgentId"), // Required + // More values... + }, + } + resp, err := svc.StartDataCollectionByAgentIds(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleApplicationDiscoveryService_StopDataCollectionByAgentIds() { + svc := applicationdiscoveryservice.New(session.New()) + + params := &applicationdiscoveryservice.StopDataCollectionByAgentIdsInput{ + AgentIds: []*string{ // Required + aws.String("AgentId"), // Required + // More values... + }, + } + resp, err := svc.StopDataCollectionByAgentIds(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/applicationdiscoveryservice/service.go b/vendor/github.com/aws/aws-sdk-go/service/applicationdiscoveryservice/service.go new file mode 100644 index 000000000..0e3ad5726 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/applicationdiscoveryservice/service.go @@ -0,0 +1,282 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package applicationdiscoveryservice + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" +) + +// The AWS Application Discovery Service helps Systems Integrators quickly and +// reliably plan application migration projects by automatically identifying +// applications running in on-premises data centers, their associated dependencies, +// and their performance profile. +// +// Planning data center migrations can involve thousands of workloads that +// are often deeply interdependent. Application discovery and dependency mapping +// are important early first steps in the migration process, but difficult to +// perform at scale due to the lack of automated tools. +// +// The AWS Application Discovery Service automatically collects configuration +// and usage data from servers to develop a list of applications, how they perform, +// and how they are interdependent. This information is securely retained in +// an AWS Application Discovery Service database which you can export as a CSV +// file into your preferred visualization tool or cloud migration solution to +// help reduce the complexity and time in planning your cloud migration. +// +// The Application Discovery Service is currently available for preview. Only +// customers who are engaged with AWS Professional Services (https://aws.amazon.com/professional-services/) +// or a certified AWS partner can use the service. To see the list of certified +// partners and request access to the Application Discovery Service, complete +// the following preview form (http://aws.amazon.com/application-discovery/preview/). +// +// This API reference provides descriptions, syntax, and usage examples for +// each of the actions and data types for the Discovery Service. The topic for +// each action shows the API request parameters and the response. Alternatively, +// you can use one of the AWS SDKs to access an API that is tailored to the +// programming language or platform that you're using. For more information, +// see AWS SDKs (http://aws.amazon.com/tools/#SDKs). +// +// This guide is intended for use with the AWS Discovery Service User Guide +// (http://docs.aws.amazon.com/application-discovery/latest/userguide/what-is-appdiscovery.html). +// +// The following are short descriptions of each API action, organized by function. +// +// Managing AWS Agents Using the Application Discovery Service +// +// An AWS agent is software that you install on on-premises servers and virtual +// machines that are targeted for discovery and migration. Agents run on Linux +// and Windows Server and collect server configuration and activity information +// about your applications and infrastructure. Specifically, agents collect +// the following information and send it to the Application Discovery Service +// using Secure Sockets Layer (SSL) encryption: +// +// User information (user name, home directory) +// +// Group information (name) +// +// List of installed packages +// +// List of kernel modules +// +// All create and stop process events +// +// DNS queries +// +// NIC information +// +// TCP/UDP process listening ports +// +// TCPV4/V6 connections +// +// Operating system information +// +// System performance +// +// Process performance +// +// The Application Discovery Service API includes the following actions to +// manage AWS agents: +// +// StartDataCollectionByAgentIds: Instructs the specified agents to start +// collecting data. The Application Discovery Service takes several minutes +// to receive and process data after you initiate data collection. +// +// StopDataCollectionByAgentIds: Instructs the specified agents to stop +// collecting data. +// +// DescribeAgents: Lists AWS agents by ID or lists all agents associated +// with your user account if you did not specify an agent ID. The output includes +// agent IDs, IP addresses, media access control (MAC) addresses, agent health, +// host name where the agent resides, and the version number of each agent. +// +// Querying Configuration Items +// +// A configuration item is an IT asset that was discovered in your data center +// by an AWS agent. When you use the Application Discovery Service, you can +// specify filters and query specific configuration items. The service supports +// Server, Process, and Connection configuration items. This means you can specify +// a value for the following keys and query your IT assets: +// +// Server server.HostName +// +// server.osName +// +// server.osVersion +// +// server.configurationId +// +// server.agentId +// +// Process process.name +// +// process.CommandLine +// +// process.configurationId +// +// server.hostName +// +// server.osName +// +// server.osVersion +// +// server.configurationId +// +// server.agentId +// +// Connection connection.sourceIp +// +// connection.sourcePort +// +// connection.destinationIp +// +// connection.destinationPort +// +// sourceProcess.configurationId +// +// sourceProcess.commandLine +// +// sourceProcess.name +// +// destinationProcessId.configurationId +// +// destinationProcess.commandLine +// +// destinationProcess.name +// +// sourceServer.configurationId +// +// sourceServer.hostName +// +// sourceServer.osName +// +// sourceServer.osVersion +// +// destinationServer.configurationId +// +// destinationServer.hostName +// +// destinationServer.osName +// +// destinationServer.osVersion +// +// The Application Discovery Service includes the following actions for querying +// configuration items. +// +// DescribeConfigurations: Retrieves a list of attributes for a specific +// configuration ID. For example, the output for a server configuration item +// includes a list of attributes about the server, including host name, operating +// system, number of network cards, etc. +// +// ListConfigurations: Retrieves a list of configuration items according +// to the criteria you specify in a filter. The filter criteria identify relationship +// requirements. For example, you can specify filter criteria of process.name +// with values of nginx and apache. +// +// Tagging Discovered Configuration Items +// +// You can tag discovered configuration items. Tags are metadata that help +// you categorize IT assets in your data center. Tags use a key-value format. +// For example, {"key": "serverType", "value": "webServer"}. +// +// CreateTags: Creates one or more tags for a configuration items. +// +// DescribeTags: Retrieves a list of configuration items that are tagged +// with a specific tag. Or, retrieves a list of all tags assigned to a specific +// configuration item. +// +// DeleteTags: Deletes the association between a configuration item and +// one or more tags. +// +// Exporting Data +// +// You can export data as a CSV file to an Amazon S3 bucket or into your preferred +// visualization tool or cloud migration solution to help reduce the complexity +// and time in planning your cloud migration. +// +// ExportConfigurations: Exports all discovered configuration data to an +// Amazon S3 bucket. Data includes tags and tag associations, processes, connections, +// servers, and system performance. This API returns an export ID which you +// can query using the GetExportStatus API. +// +// DescribeExportConfigurations: Gets the status of the data export. When +// the export is complete, the service returns an Amazon S3 URL where you can +// download CSV files that include the data. +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type ApplicationDiscoveryService struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "discovery" + +// New creates a new instance of the ApplicationDiscoveryService client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a ApplicationDiscoveryService client from just a session. +// svc := applicationdiscoveryservice.New(mySession) +// +// // Create a ApplicationDiscoveryService client with additional configuration +// svc := applicationdiscoveryservice.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *ApplicationDiscoveryService { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *ApplicationDiscoveryService { + svc := &ApplicationDiscoveryService{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2015-11-01", + JSONVersion: "1.1", + TargetPrefix: "AWSPoseidonService_V2015_11_01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(jsonrpc.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a ApplicationDiscoveryService operation and runs any +// custom request initialization. +func (c *ApplicationDiscoveryService) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/autoscaling/api.go b/vendor/github.com/aws/aws-sdk-go/service/autoscaling/api.go new file mode 100644 index 000000000..04caf266b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/autoscaling/api.go @@ -0,0 +1,7179 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package autoscaling provides a client for Auto Scaling. +package autoscaling + +import ( + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/query" +) + +const opAttachInstances = "AttachInstances" + +// AttachInstancesRequest generates a "aws/request.Request" representing the +// client's request for the AttachInstances operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the AttachInstances method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the AttachInstancesRequest method. +// req, resp := client.AttachInstancesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *AutoScaling) AttachInstancesRequest(input *AttachInstancesInput) (req *request.Request, output *AttachInstancesOutput) { + op := &request.Operation{ + Name: opAttachInstances, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AttachInstancesInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &AttachInstancesOutput{} + req.Data = output + return +} + +// Attaches one or more EC2 instances to the specified Auto Scaling group. +// +// When you attach instances, Auto Scaling increases the desired capacity of +// the group by the number of instances being attached. If the number of instances +// being attached plus the desired capacity of the group exceeds the maximum +// size of the group, the operation fails. +// +// For more information, see Attach EC2 Instances to Your Auto Scaling Group +// (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/attach-instance-asg.html) +// in the Auto Scaling Developer Guide. +func (c *AutoScaling) AttachInstances(input *AttachInstancesInput) (*AttachInstancesOutput, error) { + req, out := c.AttachInstancesRequest(input) + err := req.Send() + return out, err +} + +const opAttachLoadBalancers = "AttachLoadBalancers" + +// AttachLoadBalancersRequest generates a "aws/request.Request" representing the +// client's request for the AttachLoadBalancers operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the AttachLoadBalancers method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the AttachLoadBalancersRequest method. +// req, resp := client.AttachLoadBalancersRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *AutoScaling) AttachLoadBalancersRequest(input *AttachLoadBalancersInput) (req *request.Request, output *AttachLoadBalancersOutput) { + op := &request.Operation{ + Name: opAttachLoadBalancers, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AttachLoadBalancersInput{} + } + + req = c.newRequest(op, input, output) + output = &AttachLoadBalancersOutput{} + req.Data = output + return +} + +// Attaches one or more load balancers to the specified Auto Scaling group. +// +// To describe the load balancers for an Auto Scaling group, use DescribeLoadBalancers. +// To detach the load balancer from the Auto Scaling group, use DetachLoadBalancers. +// +// For more information, see Attach a Load Balancer to Your Auto Scaling Group +// (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/attach-load-balancer-asg.html) +// in the Auto Scaling Developer Guide. +func (c *AutoScaling) AttachLoadBalancers(input *AttachLoadBalancersInput) (*AttachLoadBalancersOutput, error) { + req, out := c.AttachLoadBalancersRequest(input) + err := req.Send() + return out, err +} + +const opCompleteLifecycleAction = "CompleteLifecycleAction" + +// CompleteLifecycleActionRequest generates a "aws/request.Request" representing the +// client's request for the CompleteLifecycleAction operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CompleteLifecycleAction method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CompleteLifecycleActionRequest method. +// req, resp := client.CompleteLifecycleActionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *AutoScaling) CompleteLifecycleActionRequest(input *CompleteLifecycleActionInput) (req *request.Request, output *CompleteLifecycleActionOutput) { + op := &request.Operation{ + Name: opCompleteLifecycleAction, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CompleteLifecycleActionInput{} + } + + req = c.newRequest(op, input, output) + output = &CompleteLifecycleActionOutput{} + req.Data = output + return +} + +// Completes the lifecycle action for the specified token or instance with the +// specified result. +// +// This step is a part of the procedure for adding a lifecycle hook to an Auto +// Scaling group: +// +// (Optional) Create a Lambda function and a rule that allows CloudWatch Events +// to invoke your Lambda function when Auto Scaling launches or terminates instances. +// (Optional) Create a notification target and an IAM role. The target can be +// either an Amazon SQS queue or an Amazon SNS topic. The role allows Auto Scaling +// to publish lifecycle notifications to the target. Create the lifecycle hook. +// Specify whether the hook is used when the instances launch or terminate. +// If you need more time, record the lifecycle action heartbeat to keep the +// instance in a pending state. If you finish before the timeout period ends, +// complete the lifecycle action. For more information, see Auto Scaling Lifecycle +// (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/AutoScalingGroupLifecycle.html) +// in the Auto Scaling Developer Guide. +func (c *AutoScaling) CompleteLifecycleAction(input *CompleteLifecycleActionInput) (*CompleteLifecycleActionOutput, error) { + req, out := c.CompleteLifecycleActionRequest(input) + err := req.Send() + return out, err +} + +const opCreateAutoScalingGroup = "CreateAutoScalingGroup" + +// CreateAutoScalingGroupRequest generates a "aws/request.Request" representing the +// client's request for the CreateAutoScalingGroup operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateAutoScalingGroup method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateAutoScalingGroupRequest method. +// req, resp := client.CreateAutoScalingGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *AutoScaling) CreateAutoScalingGroupRequest(input *CreateAutoScalingGroupInput) (req *request.Request, output *CreateAutoScalingGroupOutput) { + op := &request.Operation{ + Name: opCreateAutoScalingGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateAutoScalingGroupInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &CreateAutoScalingGroupOutput{} + req.Data = output + return +} + +// Creates an Auto Scaling group with the specified name and attributes. +// +// If you exceed your maximum limit of Auto Scaling groups, which by default +// is 20 per region, the call fails. For information about viewing and updating +// this limit, see DescribeAccountLimits. +// +// For more information, see Auto Scaling Groups (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/AutoScalingGroup.html) +// in the Auto Scaling Developer Guide. +func (c *AutoScaling) CreateAutoScalingGroup(input *CreateAutoScalingGroupInput) (*CreateAutoScalingGroupOutput, error) { + req, out := c.CreateAutoScalingGroupRequest(input) + err := req.Send() + return out, err +} + +const opCreateLaunchConfiguration = "CreateLaunchConfiguration" + +// CreateLaunchConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the CreateLaunchConfiguration operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateLaunchConfiguration method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateLaunchConfigurationRequest method. +// req, resp := client.CreateLaunchConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *AutoScaling) CreateLaunchConfigurationRequest(input *CreateLaunchConfigurationInput) (req *request.Request, output *CreateLaunchConfigurationOutput) { + op := &request.Operation{ + Name: opCreateLaunchConfiguration, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateLaunchConfigurationInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &CreateLaunchConfigurationOutput{} + req.Data = output + return +} + +// Creates a launch configuration. +// +// If you exceed your maximum limit of launch configurations, which by default +// is 100 per region, the call fails. For information about viewing and updating +// this limit, see DescribeAccountLimits. +// +// For more information, see Launch Configurations (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/LaunchConfiguration.html) +// in the Auto Scaling Developer Guide. +func (c *AutoScaling) CreateLaunchConfiguration(input *CreateLaunchConfigurationInput) (*CreateLaunchConfigurationOutput, error) { + req, out := c.CreateLaunchConfigurationRequest(input) + err := req.Send() + return out, err +} + +const opCreateOrUpdateTags = "CreateOrUpdateTags" + +// CreateOrUpdateTagsRequest generates a "aws/request.Request" representing the +// client's request for the CreateOrUpdateTags operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateOrUpdateTags method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateOrUpdateTagsRequest method. +// req, resp := client.CreateOrUpdateTagsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *AutoScaling) CreateOrUpdateTagsRequest(input *CreateOrUpdateTagsInput) (req *request.Request, output *CreateOrUpdateTagsOutput) { + op := &request.Operation{ + Name: opCreateOrUpdateTags, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateOrUpdateTagsInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &CreateOrUpdateTagsOutput{} + req.Data = output + return +} + +// Creates or updates tags for the specified Auto Scaling group. +// +// When you specify a tag with a key that already exists, the operation overwrites +// the previous tag definition, and you do not get an error message. +// +// For more information, see Tagging Auto Scaling Groups and Instances (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/ASTagging.html) +// in the Auto Scaling Developer Guide. +func (c *AutoScaling) CreateOrUpdateTags(input *CreateOrUpdateTagsInput) (*CreateOrUpdateTagsOutput, error) { + req, out := c.CreateOrUpdateTagsRequest(input) + err := req.Send() + return out, err +} + +const opDeleteAutoScalingGroup = "DeleteAutoScalingGroup" + +// DeleteAutoScalingGroupRequest generates a "aws/request.Request" representing the +// client's request for the DeleteAutoScalingGroup operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteAutoScalingGroup method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteAutoScalingGroupRequest method. +// req, resp := client.DeleteAutoScalingGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *AutoScaling) DeleteAutoScalingGroupRequest(input *DeleteAutoScalingGroupInput) (req *request.Request, output *DeleteAutoScalingGroupOutput) { + op := &request.Operation{ + Name: opDeleteAutoScalingGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteAutoScalingGroupInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteAutoScalingGroupOutput{} + req.Data = output + return +} + +// Deletes the specified Auto Scaling group. +// +// If the group has instances or scaling activities in progress, you must specify +// the option to force the deletion in order for it to succeed. +// +// If the group has policies, deleting the group deletes the policies, the +// underlying alarm actions, and any alarm that no longer has an associated +// action. +// +// To remove instances from the Auto Scaling group before deleting it, call +// DetachInstances with the list of instances and the option to decrement the +// desired capacity so that Auto Scaling does not launch replacement instances. +// +// To terminate all instances before deleting the Auto Scaling group, call +// UpdateAutoScalingGroup and set the minimum size and desired capacity of the +// Auto Scaling group to zero. +func (c *AutoScaling) DeleteAutoScalingGroup(input *DeleteAutoScalingGroupInput) (*DeleteAutoScalingGroupOutput, error) { + req, out := c.DeleteAutoScalingGroupRequest(input) + err := req.Send() + return out, err +} + +const opDeleteLaunchConfiguration = "DeleteLaunchConfiguration" + +// DeleteLaunchConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the DeleteLaunchConfiguration operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteLaunchConfiguration method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteLaunchConfigurationRequest method. +// req, resp := client.DeleteLaunchConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *AutoScaling) DeleteLaunchConfigurationRequest(input *DeleteLaunchConfigurationInput) (req *request.Request, output *DeleteLaunchConfigurationOutput) { + op := &request.Operation{ + Name: opDeleteLaunchConfiguration, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteLaunchConfigurationInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteLaunchConfigurationOutput{} + req.Data = output + return +} + +// Deletes the specified launch configuration. +// +// The launch configuration must not be attached to an Auto Scaling group. +// When this call completes, the launch configuration is no longer available +// for use. +func (c *AutoScaling) DeleteLaunchConfiguration(input *DeleteLaunchConfigurationInput) (*DeleteLaunchConfigurationOutput, error) { + req, out := c.DeleteLaunchConfigurationRequest(input) + err := req.Send() + return out, err +} + +const opDeleteLifecycleHook = "DeleteLifecycleHook" + +// DeleteLifecycleHookRequest generates a "aws/request.Request" representing the +// client's request for the DeleteLifecycleHook operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteLifecycleHook method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteLifecycleHookRequest method. +// req, resp := client.DeleteLifecycleHookRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *AutoScaling) DeleteLifecycleHookRequest(input *DeleteLifecycleHookInput) (req *request.Request, output *DeleteLifecycleHookOutput) { + op := &request.Operation{ + Name: opDeleteLifecycleHook, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteLifecycleHookInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteLifecycleHookOutput{} + req.Data = output + return +} + +// Deletes the specified lifecycle hook. +// +// If there are any outstanding lifecycle actions, they are completed first +// (ABANDON for launching instances, CONTINUE for terminating instances). +func (c *AutoScaling) DeleteLifecycleHook(input *DeleteLifecycleHookInput) (*DeleteLifecycleHookOutput, error) { + req, out := c.DeleteLifecycleHookRequest(input) + err := req.Send() + return out, err +} + +const opDeleteNotificationConfiguration = "DeleteNotificationConfiguration" + +// DeleteNotificationConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the DeleteNotificationConfiguration operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteNotificationConfiguration method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteNotificationConfigurationRequest method. +// req, resp := client.DeleteNotificationConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *AutoScaling) DeleteNotificationConfigurationRequest(input *DeleteNotificationConfigurationInput) (req *request.Request, output *DeleteNotificationConfigurationOutput) { + op := &request.Operation{ + Name: opDeleteNotificationConfiguration, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteNotificationConfigurationInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteNotificationConfigurationOutput{} + req.Data = output + return +} + +// Deletes the specified notification. +func (c *AutoScaling) DeleteNotificationConfiguration(input *DeleteNotificationConfigurationInput) (*DeleteNotificationConfigurationOutput, error) { + req, out := c.DeleteNotificationConfigurationRequest(input) + err := req.Send() + return out, err +} + +const opDeletePolicy = "DeletePolicy" + +// DeletePolicyRequest generates a "aws/request.Request" representing the +// client's request for the DeletePolicy operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeletePolicy method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeletePolicyRequest method. +// req, resp := client.DeletePolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *AutoScaling) DeletePolicyRequest(input *DeletePolicyInput) (req *request.Request, output *DeletePolicyOutput) { + op := &request.Operation{ + Name: opDeletePolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeletePolicyInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeletePolicyOutput{} + req.Data = output + return +} + +// Deletes the specified Auto Scaling policy. +// +// Deleting a policy deletes the underlying alarm action, but does not delete +// the alarm, even if it no longer has an associated action. +func (c *AutoScaling) DeletePolicy(input *DeletePolicyInput) (*DeletePolicyOutput, error) { + req, out := c.DeletePolicyRequest(input) + err := req.Send() + return out, err +} + +const opDeleteScheduledAction = "DeleteScheduledAction" + +// DeleteScheduledActionRequest generates a "aws/request.Request" representing the +// client's request for the DeleteScheduledAction operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteScheduledAction method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteScheduledActionRequest method. +// req, resp := client.DeleteScheduledActionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *AutoScaling) DeleteScheduledActionRequest(input *DeleteScheduledActionInput) (req *request.Request, output *DeleteScheduledActionOutput) { + op := &request.Operation{ + Name: opDeleteScheduledAction, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteScheduledActionInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteScheduledActionOutput{} + req.Data = output + return +} + +// Deletes the specified scheduled action. +func (c *AutoScaling) DeleteScheduledAction(input *DeleteScheduledActionInput) (*DeleteScheduledActionOutput, error) { + req, out := c.DeleteScheduledActionRequest(input) + err := req.Send() + return out, err +} + +const opDeleteTags = "DeleteTags" + +// DeleteTagsRequest generates a "aws/request.Request" representing the +// client's request for the DeleteTags operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteTags method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteTagsRequest method. +// req, resp := client.DeleteTagsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *AutoScaling) DeleteTagsRequest(input *DeleteTagsInput) (req *request.Request, output *DeleteTagsOutput) { + op := &request.Operation{ + Name: opDeleteTags, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteTagsInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteTagsOutput{} + req.Data = output + return +} + +// Deletes the specified tags. +func (c *AutoScaling) DeleteTags(input *DeleteTagsInput) (*DeleteTagsOutput, error) { + req, out := c.DeleteTagsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeAccountLimits = "DescribeAccountLimits" + +// DescribeAccountLimitsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeAccountLimits operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeAccountLimits method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeAccountLimitsRequest method. +// req, resp := client.DescribeAccountLimitsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *AutoScaling) DescribeAccountLimitsRequest(input *DescribeAccountLimitsInput) (req *request.Request, output *DescribeAccountLimitsOutput) { + op := &request.Operation{ + Name: opDescribeAccountLimits, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeAccountLimitsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeAccountLimitsOutput{} + req.Data = output + return +} + +// Describes the current Auto Scaling resource limits for your AWS account. +// +// For information about requesting an increase in these limits, see AWS Service +// Limits (http://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html) +// in the Amazon Web Services General Reference. +func (c *AutoScaling) DescribeAccountLimits(input *DescribeAccountLimitsInput) (*DescribeAccountLimitsOutput, error) { + req, out := c.DescribeAccountLimitsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeAdjustmentTypes = "DescribeAdjustmentTypes" + +// DescribeAdjustmentTypesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeAdjustmentTypes operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeAdjustmentTypes method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeAdjustmentTypesRequest method. +// req, resp := client.DescribeAdjustmentTypesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *AutoScaling) DescribeAdjustmentTypesRequest(input *DescribeAdjustmentTypesInput) (req *request.Request, output *DescribeAdjustmentTypesOutput) { + op := &request.Operation{ + Name: opDescribeAdjustmentTypes, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeAdjustmentTypesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeAdjustmentTypesOutput{} + req.Data = output + return +} + +// Describes the policy adjustment types for use with PutScalingPolicy. +func (c *AutoScaling) DescribeAdjustmentTypes(input *DescribeAdjustmentTypesInput) (*DescribeAdjustmentTypesOutput, error) { + req, out := c.DescribeAdjustmentTypesRequest(input) + err := req.Send() + return out, err +} + +const opDescribeAutoScalingGroups = "DescribeAutoScalingGroups" + +// DescribeAutoScalingGroupsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeAutoScalingGroups operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeAutoScalingGroups method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeAutoScalingGroupsRequest method. +// req, resp := client.DescribeAutoScalingGroupsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *AutoScaling) DescribeAutoScalingGroupsRequest(input *DescribeAutoScalingGroupsInput) (req *request.Request, output *DescribeAutoScalingGroupsOutput) { + op := &request.Operation{ + Name: opDescribeAutoScalingGroups, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeAutoScalingGroupsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeAutoScalingGroupsOutput{} + req.Data = output + return +} + +// Describes one or more Auto Scaling groups. If a list of names is not provided, +// the call describes all Auto Scaling groups. +func (c *AutoScaling) DescribeAutoScalingGroups(input *DescribeAutoScalingGroupsInput) (*DescribeAutoScalingGroupsOutput, error) { + req, out := c.DescribeAutoScalingGroupsRequest(input) + err := req.Send() + return out, err +} + +// DescribeAutoScalingGroupsPages iterates over the pages of a DescribeAutoScalingGroups operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeAutoScalingGroups method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeAutoScalingGroups operation. +// pageNum := 0 +// err := client.DescribeAutoScalingGroupsPages(params, +// func(page *DescribeAutoScalingGroupsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *AutoScaling) DescribeAutoScalingGroupsPages(input *DescribeAutoScalingGroupsInput, fn func(p *DescribeAutoScalingGroupsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeAutoScalingGroupsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeAutoScalingGroupsOutput), lastPage) + }) +} + +const opDescribeAutoScalingInstances = "DescribeAutoScalingInstances" + +// DescribeAutoScalingInstancesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeAutoScalingInstances operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeAutoScalingInstances method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeAutoScalingInstancesRequest method. +// req, resp := client.DescribeAutoScalingInstancesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *AutoScaling) DescribeAutoScalingInstancesRequest(input *DescribeAutoScalingInstancesInput) (req *request.Request, output *DescribeAutoScalingInstancesOutput) { + op := &request.Operation{ + Name: opDescribeAutoScalingInstances, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeAutoScalingInstancesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeAutoScalingInstancesOutput{} + req.Data = output + return +} + +// Describes one or more Auto Scaling instances. If a list is not provided, +// the call describes all instances. +func (c *AutoScaling) DescribeAutoScalingInstances(input *DescribeAutoScalingInstancesInput) (*DescribeAutoScalingInstancesOutput, error) { + req, out := c.DescribeAutoScalingInstancesRequest(input) + err := req.Send() + return out, err +} + +// DescribeAutoScalingInstancesPages iterates over the pages of a DescribeAutoScalingInstances operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeAutoScalingInstances method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeAutoScalingInstances operation. +// pageNum := 0 +// err := client.DescribeAutoScalingInstancesPages(params, +// func(page *DescribeAutoScalingInstancesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *AutoScaling) DescribeAutoScalingInstancesPages(input *DescribeAutoScalingInstancesInput, fn func(p *DescribeAutoScalingInstancesOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeAutoScalingInstancesRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeAutoScalingInstancesOutput), lastPage) + }) +} + +const opDescribeAutoScalingNotificationTypes = "DescribeAutoScalingNotificationTypes" + +// DescribeAutoScalingNotificationTypesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeAutoScalingNotificationTypes operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeAutoScalingNotificationTypes method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeAutoScalingNotificationTypesRequest method. +// req, resp := client.DescribeAutoScalingNotificationTypesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *AutoScaling) DescribeAutoScalingNotificationTypesRequest(input *DescribeAutoScalingNotificationTypesInput) (req *request.Request, output *DescribeAutoScalingNotificationTypesOutput) { + op := &request.Operation{ + Name: opDescribeAutoScalingNotificationTypes, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeAutoScalingNotificationTypesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeAutoScalingNotificationTypesOutput{} + req.Data = output + return +} + +// Describes the notification types that are supported by Auto Scaling. +func (c *AutoScaling) DescribeAutoScalingNotificationTypes(input *DescribeAutoScalingNotificationTypesInput) (*DescribeAutoScalingNotificationTypesOutput, error) { + req, out := c.DescribeAutoScalingNotificationTypesRequest(input) + err := req.Send() + return out, err +} + +const opDescribeLaunchConfigurations = "DescribeLaunchConfigurations" + +// DescribeLaunchConfigurationsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeLaunchConfigurations operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeLaunchConfigurations method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeLaunchConfigurationsRequest method. +// req, resp := client.DescribeLaunchConfigurationsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *AutoScaling) DescribeLaunchConfigurationsRequest(input *DescribeLaunchConfigurationsInput) (req *request.Request, output *DescribeLaunchConfigurationsOutput) { + op := &request.Operation{ + Name: opDescribeLaunchConfigurations, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeLaunchConfigurationsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeLaunchConfigurationsOutput{} + req.Data = output + return +} + +// Describes one or more launch configurations. If you omit the list of names, +// then the call describes all launch configurations. +func (c *AutoScaling) DescribeLaunchConfigurations(input *DescribeLaunchConfigurationsInput) (*DescribeLaunchConfigurationsOutput, error) { + req, out := c.DescribeLaunchConfigurationsRequest(input) + err := req.Send() + return out, err +} + +// DescribeLaunchConfigurationsPages iterates over the pages of a DescribeLaunchConfigurations operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeLaunchConfigurations method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeLaunchConfigurations operation. +// pageNum := 0 +// err := client.DescribeLaunchConfigurationsPages(params, +// func(page *DescribeLaunchConfigurationsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *AutoScaling) DescribeLaunchConfigurationsPages(input *DescribeLaunchConfigurationsInput, fn func(p *DescribeLaunchConfigurationsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeLaunchConfigurationsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeLaunchConfigurationsOutput), lastPage) + }) +} + +const opDescribeLifecycleHookTypes = "DescribeLifecycleHookTypes" + +// DescribeLifecycleHookTypesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeLifecycleHookTypes operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeLifecycleHookTypes method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeLifecycleHookTypesRequest method. +// req, resp := client.DescribeLifecycleHookTypesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *AutoScaling) DescribeLifecycleHookTypesRequest(input *DescribeLifecycleHookTypesInput) (req *request.Request, output *DescribeLifecycleHookTypesOutput) { + op := &request.Operation{ + Name: opDescribeLifecycleHookTypes, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeLifecycleHookTypesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeLifecycleHookTypesOutput{} + req.Data = output + return +} + +// Describes the available types of lifecycle hooks. +func (c *AutoScaling) DescribeLifecycleHookTypes(input *DescribeLifecycleHookTypesInput) (*DescribeLifecycleHookTypesOutput, error) { + req, out := c.DescribeLifecycleHookTypesRequest(input) + err := req.Send() + return out, err +} + +const opDescribeLifecycleHooks = "DescribeLifecycleHooks" + +// DescribeLifecycleHooksRequest generates a "aws/request.Request" representing the +// client's request for the DescribeLifecycleHooks operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeLifecycleHooks method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeLifecycleHooksRequest method. +// req, resp := client.DescribeLifecycleHooksRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *AutoScaling) DescribeLifecycleHooksRequest(input *DescribeLifecycleHooksInput) (req *request.Request, output *DescribeLifecycleHooksOutput) { + op := &request.Operation{ + Name: opDescribeLifecycleHooks, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeLifecycleHooksInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeLifecycleHooksOutput{} + req.Data = output + return +} + +// Describes the lifecycle hooks for the specified Auto Scaling group. +func (c *AutoScaling) DescribeLifecycleHooks(input *DescribeLifecycleHooksInput) (*DescribeLifecycleHooksOutput, error) { + req, out := c.DescribeLifecycleHooksRequest(input) + err := req.Send() + return out, err +} + +const opDescribeLoadBalancers = "DescribeLoadBalancers" + +// DescribeLoadBalancersRequest generates a "aws/request.Request" representing the +// client's request for the DescribeLoadBalancers operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeLoadBalancers method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeLoadBalancersRequest method. +// req, resp := client.DescribeLoadBalancersRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *AutoScaling) DescribeLoadBalancersRequest(input *DescribeLoadBalancersInput) (req *request.Request, output *DescribeLoadBalancersOutput) { + op := &request.Operation{ + Name: opDescribeLoadBalancers, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeLoadBalancersInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeLoadBalancersOutput{} + req.Data = output + return +} + +// Describes the load balancers for the specified Auto Scaling group. +func (c *AutoScaling) DescribeLoadBalancers(input *DescribeLoadBalancersInput) (*DescribeLoadBalancersOutput, error) { + req, out := c.DescribeLoadBalancersRequest(input) + err := req.Send() + return out, err +} + +const opDescribeMetricCollectionTypes = "DescribeMetricCollectionTypes" + +// DescribeMetricCollectionTypesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeMetricCollectionTypes operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeMetricCollectionTypes method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeMetricCollectionTypesRequest method. +// req, resp := client.DescribeMetricCollectionTypesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *AutoScaling) DescribeMetricCollectionTypesRequest(input *DescribeMetricCollectionTypesInput) (req *request.Request, output *DescribeMetricCollectionTypesOutput) { + op := &request.Operation{ + Name: opDescribeMetricCollectionTypes, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeMetricCollectionTypesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeMetricCollectionTypesOutput{} + req.Data = output + return +} + +// Describes the available CloudWatch metrics for Auto Scaling. +// +// Note that the GroupStandbyInstances metric is not returned by default. You +// must explicitly request this metric when calling EnableMetricsCollection. +func (c *AutoScaling) DescribeMetricCollectionTypes(input *DescribeMetricCollectionTypesInput) (*DescribeMetricCollectionTypesOutput, error) { + req, out := c.DescribeMetricCollectionTypesRequest(input) + err := req.Send() + return out, err +} + +const opDescribeNotificationConfigurations = "DescribeNotificationConfigurations" + +// DescribeNotificationConfigurationsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeNotificationConfigurations operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeNotificationConfigurations method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeNotificationConfigurationsRequest method. +// req, resp := client.DescribeNotificationConfigurationsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *AutoScaling) DescribeNotificationConfigurationsRequest(input *DescribeNotificationConfigurationsInput) (req *request.Request, output *DescribeNotificationConfigurationsOutput) { + op := &request.Operation{ + Name: opDescribeNotificationConfigurations, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeNotificationConfigurationsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeNotificationConfigurationsOutput{} + req.Data = output + return +} + +// Describes the notification actions associated with the specified Auto Scaling +// group. +func (c *AutoScaling) DescribeNotificationConfigurations(input *DescribeNotificationConfigurationsInput) (*DescribeNotificationConfigurationsOutput, error) { + req, out := c.DescribeNotificationConfigurationsRequest(input) + err := req.Send() + return out, err +} + +// DescribeNotificationConfigurationsPages iterates over the pages of a DescribeNotificationConfigurations operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeNotificationConfigurations method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeNotificationConfigurations operation. +// pageNum := 0 +// err := client.DescribeNotificationConfigurationsPages(params, +// func(page *DescribeNotificationConfigurationsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *AutoScaling) DescribeNotificationConfigurationsPages(input *DescribeNotificationConfigurationsInput, fn func(p *DescribeNotificationConfigurationsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeNotificationConfigurationsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeNotificationConfigurationsOutput), lastPage) + }) +} + +const opDescribePolicies = "DescribePolicies" + +// DescribePoliciesRequest generates a "aws/request.Request" representing the +// client's request for the DescribePolicies operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribePolicies method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribePoliciesRequest method. +// req, resp := client.DescribePoliciesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *AutoScaling) DescribePoliciesRequest(input *DescribePoliciesInput) (req *request.Request, output *DescribePoliciesOutput) { + op := &request.Operation{ + Name: opDescribePolicies, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribePoliciesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribePoliciesOutput{} + req.Data = output + return +} + +// Describes the policies for the specified Auto Scaling group. +func (c *AutoScaling) DescribePolicies(input *DescribePoliciesInput) (*DescribePoliciesOutput, error) { + req, out := c.DescribePoliciesRequest(input) + err := req.Send() + return out, err +} + +// DescribePoliciesPages iterates over the pages of a DescribePolicies operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribePolicies method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribePolicies operation. +// pageNum := 0 +// err := client.DescribePoliciesPages(params, +// func(page *DescribePoliciesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *AutoScaling) DescribePoliciesPages(input *DescribePoliciesInput, fn func(p *DescribePoliciesOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribePoliciesRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribePoliciesOutput), lastPage) + }) +} + +const opDescribeScalingActivities = "DescribeScalingActivities" + +// DescribeScalingActivitiesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeScalingActivities operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeScalingActivities method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeScalingActivitiesRequest method. +// req, resp := client.DescribeScalingActivitiesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *AutoScaling) DescribeScalingActivitiesRequest(input *DescribeScalingActivitiesInput) (req *request.Request, output *DescribeScalingActivitiesOutput) { + op := &request.Operation{ + Name: opDescribeScalingActivities, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeScalingActivitiesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeScalingActivitiesOutput{} + req.Data = output + return +} + +// Describes one or more scaling activities for the specified Auto Scaling group. +// If you omit the ActivityIds, the call returns all activities from the past +// six weeks. Activities are sorted by the start time. Activities still in progress +// appear first on the list. +func (c *AutoScaling) DescribeScalingActivities(input *DescribeScalingActivitiesInput) (*DescribeScalingActivitiesOutput, error) { + req, out := c.DescribeScalingActivitiesRequest(input) + err := req.Send() + return out, err +} + +// DescribeScalingActivitiesPages iterates over the pages of a DescribeScalingActivities operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeScalingActivities method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeScalingActivities operation. +// pageNum := 0 +// err := client.DescribeScalingActivitiesPages(params, +// func(page *DescribeScalingActivitiesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *AutoScaling) DescribeScalingActivitiesPages(input *DescribeScalingActivitiesInput, fn func(p *DescribeScalingActivitiesOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeScalingActivitiesRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeScalingActivitiesOutput), lastPage) + }) +} + +const opDescribeScalingProcessTypes = "DescribeScalingProcessTypes" + +// DescribeScalingProcessTypesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeScalingProcessTypes operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeScalingProcessTypes method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeScalingProcessTypesRequest method. +// req, resp := client.DescribeScalingProcessTypesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *AutoScaling) DescribeScalingProcessTypesRequest(input *DescribeScalingProcessTypesInput) (req *request.Request, output *DescribeScalingProcessTypesOutput) { + op := &request.Operation{ + Name: opDescribeScalingProcessTypes, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeScalingProcessTypesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeScalingProcessTypesOutput{} + req.Data = output + return +} + +// Describes the scaling process types for use with ResumeProcesses and SuspendProcesses. +func (c *AutoScaling) DescribeScalingProcessTypes(input *DescribeScalingProcessTypesInput) (*DescribeScalingProcessTypesOutput, error) { + req, out := c.DescribeScalingProcessTypesRequest(input) + err := req.Send() + return out, err +} + +const opDescribeScheduledActions = "DescribeScheduledActions" + +// DescribeScheduledActionsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeScheduledActions operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeScheduledActions method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeScheduledActionsRequest method. +// req, resp := client.DescribeScheduledActionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *AutoScaling) DescribeScheduledActionsRequest(input *DescribeScheduledActionsInput) (req *request.Request, output *DescribeScheduledActionsOutput) { + op := &request.Operation{ + Name: opDescribeScheduledActions, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeScheduledActionsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeScheduledActionsOutput{} + req.Data = output + return +} + +// Describes the actions scheduled for your Auto Scaling group that haven't +// run. To describe the actions that have already run, use DescribeScalingActivities. +func (c *AutoScaling) DescribeScheduledActions(input *DescribeScheduledActionsInput) (*DescribeScheduledActionsOutput, error) { + req, out := c.DescribeScheduledActionsRequest(input) + err := req.Send() + return out, err +} + +// DescribeScheduledActionsPages iterates over the pages of a DescribeScheduledActions operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeScheduledActions method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeScheduledActions operation. +// pageNum := 0 +// err := client.DescribeScheduledActionsPages(params, +// func(page *DescribeScheduledActionsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *AutoScaling) DescribeScheduledActionsPages(input *DescribeScheduledActionsInput, fn func(p *DescribeScheduledActionsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeScheduledActionsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeScheduledActionsOutput), lastPage) + }) +} + +const opDescribeTags = "DescribeTags" + +// DescribeTagsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeTags operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeTags method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeTagsRequest method. +// req, resp := client.DescribeTagsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *AutoScaling) DescribeTagsRequest(input *DescribeTagsInput) (req *request.Request, output *DescribeTagsOutput) { + op := &request.Operation{ + Name: opDescribeTags, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeTagsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeTagsOutput{} + req.Data = output + return +} + +// Describes the specified tags. +// +// You can use filters to limit the results. For example, you can query for +// the tags for a specific Auto Scaling group. You can specify multiple values +// for a filter. A tag must match at least one of the specified values for it +// to be included in the results. +// +// You can also specify multiple filters. The result includes information for +// a particular tag only if it matches all the filters. If there's no match, +// no special message is returned. +func (c *AutoScaling) DescribeTags(input *DescribeTagsInput) (*DescribeTagsOutput, error) { + req, out := c.DescribeTagsRequest(input) + err := req.Send() + return out, err +} + +// DescribeTagsPages iterates over the pages of a DescribeTags operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeTags method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeTags operation. +// pageNum := 0 +// err := client.DescribeTagsPages(params, +// func(page *DescribeTagsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *AutoScaling) DescribeTagsPages(input *DescribeTagsInput, fn func(p *DescribeTagsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeTagsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeTagsOutput), lastPage) + }) +} + +const opDescribeTerminationPolicyTypes = "DescribeTerminationPolicyTypes" + +// DescribeTerminationPolicyTypesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeTerminationPolicyTypes operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeTerminationPolicyTypes method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeTerminationPolicyTypesRequest method. +// req, resp := client.DescribeTerminationPolicyTypesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *AutoScaling) DescribeTerminationPolicyTypesRequest(input *DescribeTerminationPolicyTypesInput) (req *request.Request, output *DescribeTerminationPolicyTypesOutput) { + op := &request.Operation{ + Name: opDescribeTerminationPolicyTypes, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeTerminationPolicyTypesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeTerminationPolicyTypesOutput{} + req.Data = output + return +} + +// Describes the termination policies supported by Auto Scaling. +func (c *AutoScaling) DescribeTerminationPolicyTypes(input *DescribeTerminationPolicyTypesInput) (*DescribeTerminationPolicyTypesOutput, error) { + req, out := c.DescribeTerminationPolicyTypesRequest(input) + err := req.Send() + return out, err +} + +const opDetachInstances = "DetachInstances" + +// DetachInstancesRequest generates a "aws/request.Request" representing the +// client's request for the DetachInstances operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DetachInstances method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DetachInstancesRequest method. +// req, resp := client.DetachInstancesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *AutoScaling) DetachInstancesRequest(input *DetachInstancesInput) (req *request.Request, output *DetachInstancesOutput) { + op := &request.Operation{ + Name: opDetachInstances, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DetachInstancesInput{} + } + + req = c.newRequest(op, input, output) + output = &DetachInstancesOutput{} + req.Data = output + return +} + +// Removes one or more instances from the specified Auto Scaling group. +// +// After the instances are detached, you can manage them independently from +// the rest of the Auto Scaling group. +// +// If you do not specify the option to decrement the desired capacity, Auto +// Scaling launches instances to replace the ones that are detached. +// +// For more information, see Detach EC2 Instances from Your Auto Scaling Group +// (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/detach-instance-asg.html) +// in the Auto Scaling Developer Guide. +func (c *AutoScaling) DetachInstances(input *DetachInstancesInput) (*DetachInstancesOutput, error) { + req, out := c.DetachInstancesRequest(input) + err := req.Send() + return out, err +} + +const opDetachLoadBalancers = "DetachLoadBalancers" + +// DetachLoadBalancersRequest generates a "aws/request.Request" representing the +// client's request for the DetachLoadBalancers operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DetachLoadBalancers method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DetachLoadBalancersRequest method. +// req, resp := client.DetachLoadBalancersRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *AutoScaling) DetachLoadBalancersRequest(input *DetachLoadBalancersInput) (req *request.Request, output *DetachLoadBalancersOutput) { + op := &request.Operation{ + Name: opDetachLoadBalancers, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DetachLoadBalancersInput{} + } + + req = c.newRequest(op, input, output) + output = &DetachLoadBalancersOutput{} + req.Data = output + return +} + +// Removes one or more load balancers from the specified Auto Scaling group. +// +// When you detach a load balancer, it enters the Removing state while deregistering +// the instances in the group. When all instances are deregistered, then you +// can no longer describe the load balancer using DescribeLoadBalancers. Note +// that the instances remain running. +func (c *AutoScaling) DetachLoadBalancers(input *DetachLoadBalancersInput) (*DetachLoadBalancersOutput, error) { + req, out := c.DetachLoadBalancersRequest(input) + err := req.Send() + return out, err +} + +const opDisableMetricsCollection = "DisableMetricsCollection" + +// DisableMetricsCollectionRequest generates a "aws/request.Request" representing the +// client's request for the DisableMetricsCollection operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DisableMetricsCollection method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DisableMetricsCollectionRequest method. +// req, resp := client.DisableMetricsCollectionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *AutoScaling) DisableMetricsCollectionRequest(input *DisableMetricsCollectionInput) (req *request.Request, output *DisableMetricsCollectionOutput) { + op := &request.Operation{ + Name: opDisableMetricsCollection, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DisableMetricsCollectionInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DisableMetricsCollectionOutput{} + req.Data = output + return +} + +// Disables monitoring of the specified metrics for the specified Auto Scaling +// group. +func (c *AutoScaling) DisableMetricsCollection(input *DisableMetricsCollectionInput) (*DisableMetricsCollectionOutput, error) { + req, out := c.DisableMetricsCollectionRequest(input) + err := req.Send() + return out, err +} + +const opEnableMetricsCollection = "EnableMetricsCollection" + +// EnableMetricsCollectionRequest generates a "aws/request.Request" representing the +// client's request for the EnableMetricsCollection operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the EnableMetricsCollection method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the EnableMetricsCollectionRequest method. +// req, resp := client.EnableMetricsCollectionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *AutoScaling) EnableMetricsCollectionRequest(input *EnableMetricsCollectionInput) (req *request.Request, output *EnableMetricsCollectionOutput) { + op := &request.Operation{ + Name: opEnableMetricsCollection, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &EnableMetricsCollectionInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &EnableMetricsCollectionOutput{} + req.Data = output + return +} + +// Enables monitoring of the specified metrics for the specified Auto Scaling +// group. +// +// You can only enable metrics collection if InstanceMonitoring in the launch +// configuration for the group is set to True. +func (c *AutoScaling) EnableMetricsCollection(input *EnableMetricsCollectionInput) (*EnableMetricsCollectionOutput, error) { + req, out := c.EnableMetricsCollectionRequest(input) + err := req.Send() + return out, err +} + +const opEnterStandby = "EnterStandby" + +// EnterStandbyRequest generates a "aws/request.Request" representing the +// client's request for the EnterStandby operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the EnterStandby method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the EnterStandbyRequest method. +// req, resp := client.EnterStandbyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *AutoScaling) EnterStandbyRequest(input *EnterStandbyInput) (req *request.Request, output *EnterStandbyOutput) { + op := &request.Operation{ + Name: opEnterStandby, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &EnterStandbyInput{} + } + + req = c.newRequest(op, input, output) + output = &EnterStandbyOutput{} + req.Data = output + return +} + +// Moves the specified instances into Standby mode. +// +// For more information, see Auto Scaling Lifecycle (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/AutoScalingGroupLifecycle.html) +// in the Auto Scaling Developer Guide. +func (c *AutoScaling) EnterStandby(input *EnterStandbyInput) (*EnterStandbyOutput, error) { + req, out := c.EnterStandbyRequest(input) + err := req.Send() + return out, err +} + +const opExecutePolicy = "ExecutePolicy" + +// ExecutePolicyRequest generates a "aws/request.Request" representing the +// client's request for the ExecutePolicy operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ExecutePolicy method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ExecutePolicyRequest method. +// req, resp := client.ExecutePolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *AutoScaling) ExecutePolicyRequest(input *ExecutePolicyInput) (req *request.Request, output *ExecutePolicyOutput) { + op := &request.Operation{ + Name: opExecutePolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ExecutePolicyInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &ExecutePolicyOutput{} + req.Data = output + return +} + +// Executes the specified policy. +func (c *AutoScaling) ExecutePolicy(input *ExecutePolicyInput) (*ExecutePolicyOutput, error) { + req, out := c.ExecutePolicyRequest(input) + err := req.Send() + return out, err +} + +const opExitStandby = "ExitStandby" + +// ExitStandbyRequest generates a "aws/request.Request" representing the +// client's request for the ExitStandby operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ExitStandby method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ExitStandbyRequest method. +// req, resp := client.ExitStandbyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *AutoScaling) ExitStandbyRequest(input *ExitStandbyInput) (req *request.Request, output *ExitStandbyOutput) { + op := &request.Operation{ + Name: opExitStandby, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ExitStandbyInput{} + } + + req = c.newRequest(op, input, output) + output = &ExitStandbyOutput{} + req.Data = output + return +} + +// Moves the specified instances out of Standby mode. +// +// For more information, see Auto Scaling Lifecycle (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/AutoScalingGroupLifecycle.html) +// in the Auto Scaling Developer Guide. +func (c *AutoScaling) ExitStandby(input *ExitStandbyInput) (*ExitStandbyOutput, error) { + req, out := c.ExitStandbyRequest(input) + err := req.Send() + return out, err +} + +const opPutLifecycleHook = "PutLifecycleHook" + +// PutLifecycleHookRequest generates a "aws/request.Request" representing the +// client's request for the PutLifecycleHook operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutLifecycleHook method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutLifecycleHookRequest method. +// req, resp := client.PutLifecycleHookRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *AutoScaling) PutLifecycleHookRequest(input *PutLifecycleHookInput) (req *request.Request, output *PutLifecycleHookOutput) { + op := &request.Operation{ + Name: opPutLifecycleHook, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutLifecycleHookInput{} + } + + req = c.newRequest(op, input, output) + output = &PutLifecycleHookOutput{} + req.Data = output + return +} + +// Creates or updates a lifecycle hook for the specified Auto Scaling Group. +// +// A lifecycle hook tells Auto Scaling that you want to perform an action on +// an instance that is not actively in service; for example, either when the +// instance launches or before the instance terminates. +// +// This step is a part of the procedure for adding a lifecycle hook to an Auto +// Scaling group: +// +// (Optional) Create a Lambda function and a rule that allows CloudWatch Events +// to invoke your Lambda function when Auto Scaling launches or terminates instances. +// (Optional) Create a notification target and an IAM role. The target can be +// either an Amazon SQS queue or an Amazon SNS topic. The role allows Auto Scaling +// to publish lifecycle notifications to the target. Create the lifecycle hook. +// Specify whether the hook is used when the instances launch or terminate. +// If you need more time, record the lifecycle action heartbeat to keep the +// instance in a pending state. If you finish before the timeout period ends, +// complete the lifecycle action. For more information, see Auto Scaling Lifecycle +// (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/AutoScalingGroupLifecycle.html) +// in the Auto Scaling Developer Guide. +// +// If you exceed your maximum limit of lifecycle hooks, which by default is +// 50 per region, the call fails. For information about updating this limit, +// see AWS Service Limits (http://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html) +// in the Amazon Web Services General Reference. +func (c *AutoScaling) PutLifecycleHook(input *PutLifecycleHookInput) (*PutLifecycleHookOutput, error) { + req, out := c.PutLifecycleHookRequest(input) + err := req.Send() + return out, err +} + +const opPutNotificationConfiguration = "PutNotificationConfiguration" + +// PutNotificationConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the PutNotificationConfiguration operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutNotificationConfiguration method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutNotificationConfigurationRequest method. +// req, resp := client.PutNotificationConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *AutoScaling) PutNotificationConfigurationRequest(input *PutNotificationConfigurationInput) (req *request.Request, output *PutNotificationConfigurationOutput) { + op := &request.Operation{ + Name: opPutNotificationConfiguration, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutNotificationConfigurationInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &PutNotificationConfigurationOutput{} + req.Data = output + return +} + +// Configures an Auto Scaling group to send notifications when specified events +// take place. Subscribers to this topic can have messages for events delivered +// to an endpoint such as a web server or email address. +// +// For more information see Getting Notifications When Your Auto Scaling Group +// Changes (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/ASGettingNotifications.html) +// in the Auto Scaling Developer Guide. +// +// This configuration overwrites an existing configuration. +func (c *AutoScaling) PutNotificationConfiguration(input *PutNotificationConfigurationInput) (*PutNotificationConfigurationOutput, error) { + req, out := c.PutNotificationConfigurationRequest(input) + err := req.Send() + return out, err +} + +const opPutScalingPolicy = "PutScalingPolicy" + +// PutScalingPolicyRequest generates a "aws/request.Request" representing the +// client's request for the PutScalingPolicy operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutScalingPolicy method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutScalingPolicyRequest method. +// req, resp := client.PutScalingPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *AutoScaling) PutScalingPolicyRequest(input *PutScalingPolicyInput) (req *request.Request, output *PutScalingPolicyOutput) { + op := &request.Operation{ + Name: opPutScalingPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutScalingPolicyInput{} + } + + req = c.newRequest(op, input, output) + output = &PutScalingPolicyOutput{} + req.Data = output + return +} + +// Creates or updates a policy for an Auto Scaling group. To update an existing +// policy, use the existing policy name and set the parameters you want to change. +// Any existing parameter not changed in an update to an existing policy is +// not changed in this update request. +// +// If you exceed your maximum limit of step adjustments, which by default is +// 20 per region, the call fails. For information about updating this limit, +// see AWS Service Limits (http://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html) +// in the Amazon Web Services General Reference. +func (c *AutoScaling) PutScalingPolicy(input *PutScalingPolicyInput) (*PutScalingPolicyOutput, error) { + req, out := c.PutScalingPolicyRequest(input) + err := req.Send() + return out, err +} + +const opPutScheduledUpdateGroupAction = "PutScheduledUpdateGroupAction" + +// PutScheduledUpdateGroupActionRequest generates a "aws/request.Request" representing the +// client's request for the PutScheduledUpdateGroupAction operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutScheduledUpdateGroupAction method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutScheduledUpdateGroupActionRequest method. +// req, resp := client.PutScheduledUpdateGroupActionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *AutoScaling) PutScheduledUpdateGroupActionRequest(input *PutScheduledUpdateGroupActionInput) (req *request.Request, output *PutScheduledUpdateGroupActionOutput) { + op := &request.Operation{ + Name: opPutScheduledUpdateGroupAction, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutScheduledUpdateGroupActionInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &PutScheduledUpdateGroupActionOutput{} + req.Data = output + return +} + +// Creates or updates a scheduled scaling action for an Auto Scaling group. +// When updating a scheduled scaling action, if you leave a parameter unspecified, +// the corresponding value remains unchanged in the affected Auto Scaling group. +// +// For more information, see Scheduled Scaling (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/schedule_time.html) +// in the Auto Scaling Developer Guide. +func (c *AutoScaling) PutScheduledUpdateGroupAction(input *PutScheduledUpdateGroupActionInput) (*PutScheduledUpdateGroupActionOutput, error) { + req, out := c.PutScheduledUpdateGroupActionRequest(input) + err := req.Send() + return out, err +} + +const opRecordLifecycleActionHeartbeat = "RecordLifecycleActionHeartbeat" + +// RecordLifecycleActionHeartbeatRequest generates a "aws/request.Request" representing the +// client's request for the RecordLifecycleActionHeartbeat operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the RecordLifecycleActionHeartbeat method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the RecordLifecycleActionHeartbeatRequest method. +// req, resp := client.RecordLifecycleActionHeartbeatRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *AutoScaling) RecordLifecycleActionHeartbeatRequest(input *RecordLifecycleActionHeartbeatInput) (req *request.Request, output *RecordLifecycleActionHeartbeatOutput) { + op := &request.Operation{ + Name: opRecordLifecycleActionHeartbeat, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RecordLifecycleActionHeartbeatInput{} + } + + req = c.newRequest(op, input, output) + output = &RecordLifecycleActionHeartbeatOutput{} + req.Data = output + return +} + +// Records a heartbeat for the lifecycle action associated with the specified +// token or instance. This extends the timeout by the length of time defined +// using PutLifecycleHook. +// +// This step is a part of the procedure for adding a lifecycle hook to an Auto +// Scaling group: +// +// (Optional) Create a Lambda function and a rule that allows CloudWatch Events +// to invoke your Lambda function when Auto Scaling launches or terminates instances. +// (Optional) Create a notification target and an IAM role. The target can be +// either an Amazon SQS queue or an Amazon SNS topic. The role allows Auto Scaling +// to publish lifecycle notifications to the target. Create the lifecycle hook. +// Specify whether the hook is used when the instances launch or terminate. +// If you need more time, record the lifecycle action heartbeat to keep the +// instance in a pending state. If you finish before the timeout period ends, +// complete the lifecycle action. For more information, see Auto Scaling Lifecycle +// (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/AutoScalingGroupLifecycle.html) +// in the Auto Scaling Developer Guide. +func (c *AutoScaling) RecordLifecycleActionHeartbeat(input *RecordLifecycleActionHeartbeatInput) (*RecordLifecycleActionHeartbeatOutput, error) { + req, out := c.RecordLifecycleActionHeartbeatRequest(input) + err := req.Send() + return out, err +} + +const opResumeProcesses = "ResumeProcesses" + +// ResumeProcessesRequest generates a "aws/request.Request" representing the +// client's request for the ResumeProcesses operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ResumeProcesses method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ResumeProcessesRequest method. +// req, resp := client.ResumeProcessesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *AutoScaling) ResumeProcessesRequest(input *ScalingProcessQuery) (req *request.Request, output *ResumeProcessesOutput) { + op := &request.Operation{ + Name: opResumeProcesses, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ScalingProcessQuery{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &ResumeProcessesOutput{} + req.Data = output + return +} + +// Resumes the specified suspended Auto Scaling processes, or all suspended +// process, for the specified Auto Scaling group. +// +// For more information, see Suspending and Resuming Auto Scaling Processes +// (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/US_SuspendResume.html) +// in the Auto Scaling Developer Guide. +func (c *AutoScaling) ResumeProcesses(input *ScalingProcessQuery) (*ResumeProcessesOutput, error) { + req, out := c.ResumeProcessesRequest(input) + err := req.Send() + return out, err +} + +const opSetDesiredCapacity = "SetDesiredCapacity" + +// SetDesiredCapacityRequest generates a "aws/request.Request" representing the +// client's request for the SetDesiredCapacity operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the SetDesiredCapacity method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the SetDesiredCapacityRequest method. +// req, resp := client.SetDesiredCapacityRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *AutoScaling) SetDesiredCapacityRequest(input *SetDesiredCapacityInput) (req *request.Request, output *SetDesiredCapacityOutput) { + op := &request.Operation{ + Name: opSetDesiredCapacity, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SetDesiredCapacityInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &SetDesiredCapacityOutput{} + req.Data = output + return +} + +// Sets the size of the specified Auto Scaling group. +// +// For more information about desired capacity, see What Is Auto Scaling? (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/WhatIsAutoScaling.html) +// in the Auto Scaling Developer Guide. +func (c *AutoScaling) SetDesiredCapacity(input *SetDesiredCapacityInput) (*SetDesiredCapacityOutput, error) { + req, out := c.SetDesiredCapacityRequest(input) + err := req.Send() + return out, err +} + +const opSetInstanceHealth = "SetInstanceHealth" + +// SetInstanceHealthRequest generates a "aws/request.Request" representing the +// client's request for the SetInstanceHealth operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the SetInstanceHealth method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the SetInstanceHealthRequest method. +// req, resp := client.SetInstanceHealthRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *AutoScaling) SetInstanceHealthRequest(input *SetInstanceHealthInput) (req *request.Request, output *SetInstanceHealthOutput) { + op := &request.Operation{ + Name: opSetInstanceHealth, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SetInstanceHealthInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &SetInstanceHealthOutput{} + req.Data = output + return +} + +// Sets the health status of the specified instance. +// +// For more information, see Health Checks (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/healthcheck.html) +// in the Auto Scaling Developer Guide. +func (c *AutoScaling) SetInstanceHealth(input *SetInstanceHealthInput) (*SetInstanceHealthOutput, error) { + req, out := c.SetInstanceHealthRequest(input) + err := req.Send() + return out, err +} + +const opSetInstanceProtection = "SetInstanceProtection" + +// SetInstanceProtectionRequest generates a "aws/request.Request" representing the +// client's request for the SetInstanceProtection operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the SetInstanceProtection method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the SetInstanceProtectionRequest method. +// req, resp := client.SetInstanceProtectionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *AutoScaling) SetInstanceProtectionRequest(input *SetInstanceProtectionInput) (req *request.Request, output *SetInstanceProtectionOutput) { + op := &request.Operation{ + Name: opSetInstanceProtection, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SetInstanceProtectionInput{} + } + + req = c.newRequest(op, input, output) + output = &SetInstanceProtectionOutput{} + req.Data = output + return +} + +// Updates the instance protection settings of the specified instances. +// +// For more information, see Instance Protection (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/AutoScalingBehavior.InstanceTermination.html#instance-protection) +// in the Auto Scaling Developer Guide. +func (c *AutoScaling) SetInstanceProtection(input *SetInstanceProtectionInput) (*SetInstanceProtectionOutput, error) { + req, out := c.SetInstanceProtectionRequest(input) + err := req.Send() + return out, err +} + +const opSuspendProcesses = "SuspendProcesses" + +// SuspendProcessesRequest generates a "aws/request.Request" representing the +// client's request for the SuspendProcesses operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the SuspendProcesses method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the SuspendProcessesRequest method. +// req, resp := client.SuspendProcessesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *AutoScaling) SuspendProcessesRequest(input *ScalingProcessQuery) (req *request.Request, output *SuspendProcessesOutput) { + op := &request.Operation{ + Name: opSuspendProcesses, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ScalingProcessQuery{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &SuspendProcessesOutput{} + req.Data = output + return +} + +// Suspends the specified Auto Scaling processes, or all processes, for the +// specified Auto Scaling group. +// +// Note that if you suspend either the Launch or Terminate process types, it +// can prevent other process types from functioning properly. +// +// To resume processes that have been suspended, use ResumeProcesses. +// +// For more information, see Suspending and Resuming Auto Scaling Processes +// (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/US_SuspendResume.html) +// in the Auto Scaling Developer Guide. +func (c *AutoScaling) SuspendProcesses(input *ScalingProcessQuery) (*SuspendProcessesOutput, error) { + req, out := c.SuspendProcessesRequest(input) + err := req.Send() + return out, err +} + +const opTerminateInstanceInAutoScalingGroup = "TerminateInstanceInAutoScalingGroup" + +// TerminateInstanceInAutoScalingGroupRequest generates a "aws/request.Request" representing the +// client's request for the TerminateInstanceInAutoScalingGroup operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the TerminateInstanceInAutoScalingGroup method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the TerminateInstanceInAutoScalingGroupRequest method. +// req, resp := client.TerminateInstanceInAutoScalingGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *AutoScaling) TerminateInstanceInAutoScalingGroupRequest(input *TerminateInstanceInAutoScalingGroupInput) (req *request.Request, output *TerminateInstanceInAutoScalingGroupOutput) { + op := &request.Operation{ + Name: opTerminateInstanceInAutoScalingGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &TerminateInstanceInAutoScalingGroupInput{} + } + + req = c.newRequest(op, input, output) + output = &TerminateInstanceInAutoScalingGroupOutput{} + req.Data = output + return +} + +// Terminates the specified instance and optionally adjusts the desired group +// size. +// +// This call simply makes a termination request. The instance is not terminated +// immediately. +func (c *AutoScaling) TerminateInstanceInAutoScalingGroup(input *TerminateInstanceInAutoScalingGroupInput) (*TerminateInstanceInAutoScalingGroupOutput, error) { + req, out := c.TerminateInstanceInAutoScalingGroupRequest(input) + err := req.Send() + return out, err +} + +const opUpdateAutoScalingGroup = "UpdateAutoScalingGroup" + +// UpdateAutoScalingGroupRequest generates a "aws/request.Request" representing the +// client's request for the UpdateAutoScalingGroup operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateAutoScalingGroup method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateAutoScalingGroupRequest method. +// req, resp := client.UpdateAutoScalingGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *AutoScaling) UpdateAutoScalingGroupRequest(input *UpdateAutoScalingGroupInput) (req *request.Request, output *UpdateAutoScalingGroupOutput) { + op := &request.Operation{ + Name: opUpdateAutoScalingGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateAutoScalingGroupInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &UpdateAutoScalingGroupOutput{} + req.Data = output + return +} + +// Updates the configuration for the specified Auto Scaling group. +// +// To update an Auto Scaling group with a launch configuration with InstanceMonitoring +// set to False, you must first disable the collection of group metrics. Otherwise, +// you will get an error. If you have previously enabled the collection of group +// metrics, you can disable it using DisableMetricsCollection. +// +// The new settings are registered upon the completion of this call. Any launch +// configuration settings take effect on any triggers after this call returns. +// Scaling activities that are currently in progress aren't affected. +// +// Note the following: +// +// If you specify a new value for MinSize without specifying a value for +// DesiredCapacity, and the new MinSize is larger than the current size of the +// group, we implicitly call SetDesiredCapacity to set the size of the group +// to the new value of MinSize. +// +// If you specify a new value for MaxSize without specifying a value for +// DesiredCapacity, and the new MaxSize is smaller than the current size of +// the group, we implicitly call SetDesiredCapacity to set the size of the group +// to the new value of MaxSize. +// +// All other optional parameters are left unchanged if not specified. +func (c *AutoScaling) UpdateAutoScalingGroup(input *UpdateAutoScalingGroupInput) (*UpdateAutoScalingGroupOutput, error) { + req, out := c.UpdateAutoScalingGroupRequest(input) + err := req.Send() + return out, err +} + +// Describes scaling activity, which is a long-running process that represents +// a change to your Auto Scaling group, such as changing its size or replacing +// an instance. +type Activity struct { + _ struct{} `type:"structure"` + + // The ID of the activity. + ActivityId *string `type:"string" required:"true"` + + // The name of the Auto Scaling group. + AutoScalingGroupName *string `min:"1" type:"string" required:"true"` + + // The reason the activity began. + Cause *string `min:"1" type:"string" required:"true"` + + // A friendly, more verbose description of the activity. + Description *string `type:"string"` + + // The details about the activity. + Details *string `type:"string"` + + // The end time of the activity. + EndTime *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // A value between 0 and 100 that indicates the progress of the activity. + Progress *int64 `type:"integer"` + + // The start time of the activity. + StartTime *time.Time `type:"timestamp" timestampFormat:"iso8601" required:"true"` + + // The current status of the activity. + StatusCode *string `type:"string" required:"true" enum:"ScalingActivityStatusCode"` + + // A friendly, more verbose description of the activity status. + StatusMessage *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s Activity) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Activity) GoString() string { + return s.String() +} + +// Describes a policy adjustment type. +// +// For more information, see Dynamic Scaling (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/as-scale-based-on-demand.html) +// in the Auto Scaling Developer Guide. +type AdjustmentType struct { + _ struct{} `type:"structure"` + + // The policy adjustment type. The valid values are ChangeInCapacity, ExactCapacity, + // and PercentChangeInCapacity. + AdjustmentType *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s AdjustmentType) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AdjustmentType) GoString() string { + return s.String() +} + +// Describes an alarm. +type Alarm struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the alarm. + AlarmARN *string `min:"1" type:"string"` + + // The name of the alarm. + AlarmName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s Alarm) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Alarm) GoString() string { + return s.String() +} + +type AttachInstancesInput struct { + _ struct{} `type:"structure"` + + // The name of the group. + AutoScalingGroupName *string `min:"1" type:"string" required:"true"` + + // One or more instance IDs. + InstanceIds []*string `type:"list"` +} + +// String returns the string representation +func (s AttachInstancesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AttachInstancesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AttachInstancesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AttachInstancesInput"} + if s.AutoScalingGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("AutoScalingGroupName")) + } + if s.AutoScalingGroupName != nil && len(*s.AutoScalingGroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AutoScalingGroupName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type AttachInstancesOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s AttachInstancesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AttachInstancesOutput) GoString() string { + return s.String() +} + +type AttachLoadBalancersInput struct { + _ struct{} `type:"structure"` + + // The name of the group. + AutoScalingGroupName *string `min:"1" type:"string"` + + // One or more load balancer names. + LoadBalancerNames []*string `type:"list"` +} + +// String returns the string representation +func (s AttachLoadBalancersInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AttachLoadBalancersInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AttachLoadBalancersInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AttachLoadBalancersInput"} + if s.AutoScalingGroupName != nil && len(*s.AutoScalingGroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AutoScalingGroupName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type AttachLoadBalancersOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s AttachLoadBalancersOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AttachLoadBalancersOutput) GoString() string { + return s.String() +} + +// Describes a block device mapping. +type BlockDeviceMapping struct { + _ struct{} `type:"structure"` + + // The device name exposed to the EC2 instance (for example, /dev/sdh or xvdh). + DeviceName *string `min:"1" type:"string" required:"true"` + + // The information about the Amazon EBS volume. + Ebs *Ebs `type:"structure"` + + // Suppresses a device mapping. + // + // If this parameter is true for the root device, the instance might fail the + // EC2 health check. Auto Scaling launches a replacement instance if the instance + // fails the health check. + NoDevice *bool `type:"boolean"` + + // The name of the virtual device (for example, ephemeral0). + VirtualName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s BlockDeviceMapping) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BlockDeviceMapping) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *BlockDeviceMapping) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "BlockDeviceMapping"} + if s.DeviceName == nil { + invalidParams.Add(request.NewErrParamRequired("DeviceName")) + } + if s.DeviceName != nil && len(*s.DeviceName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DeviceName", 1)) + } + if s.VirtualName != nil && len(*s.VirtualName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("VirtualName", 1)) + } + if s.Ebs != nil { + if err := s.Ebs.Validate(); err != nil { + invalidParams.AddNested("Ebs", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type CompleteLifecycleActionInput struct { + _ struct{} `type:"structure"` + + // The name of the group for the lifecycle hook. + AutoScalingGroupName *string `min:"1" type:"string" required:"true"` + + // The ID of the instance. + InstanceId *string `min:"1" type:"string"` + + // The action for the group to take. This parameter can be either CONTINUE or + // ABANDON. + LifecycleActionResult *string `type:"string" required:"true"` + + // A universally unique identifier (UUID) that identifies a specific lifecycle + // action associated with an instance. Auto Scaling sends this token to the + // notification target you specified when you created the lifecycle hook. + LifecycleActionToken *string `min:"36" type:"string"` + + // The name of the lifecycle hook. + LifecycleHookName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CompleteLifecycleActionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CompleteLifecycleActionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CompleteLifecycleActionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CompleteLifecycleActionInput"} + if s.AutoScalingGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("AutoScalingGroupName")) + } + if s.AutoScalingGroupName != nil && len(*s.AutoScalingGroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AutoScalingGroupName", 1)) + } + if s.InstanceId != nil && len(*s.InstanceId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("InstanceId", 1)) + } + if s.LifecycleActionResult == nil { + invalidParams.Add(request.NewErrParamRequired("LifecycleActionResult")) + } + if s.LifecycleActionToken != nil && len(*s.LifecycleActionToken) < 36 { + invalidParams.Add(request.NewErrParamMinLen("LifecycleActionToken", 36)) + } + if s.LifecycleHookName == nil { + invalidParams.Add(request.NewErrParamRequired("LifecycleHookName")) + } + if s.LifecycleHookName != nil && len(*s.LifecycleHookName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LifecycleHookName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type CompleteLifecycleActionOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s CompleteLifecycleActionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CompleteLifecycleActionOutput) GoString() string { + return s.String() +} + +type CreateAutoScalingGroupInput struct { + _ struct{} `type:"structure"` + + // The name of the group. This name must be unique within the scope of your + // AWS account. + AutoScalingGroupName *string `min:"1" type:"string" required:"true"` + + // One or more Availability Zones for the group. This parameter is optional + // if you specify one or more subnets. + AvailabilityZones []*string `min:"1" type:"list"` + + // The amount of time, in seconds, after a scaling activity completes before + // another scaling activity can start. The default is 300. + // + // For more information, see Auto Scaling Cooldowns (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/Cooldown.html) + // in the Auto Scaling Developer Guide. + DefaultCooldown *int64 `type:"integer"` + + // The number of EC2 instances that should be running in the group. This number + // must be greater than or equal to the minimum size of the group and less than + // or equal to the maximum size of the group. + DesiredCapacity *int64 `type:"integer"` + + // The amount of time, in seconds, that Auto Scaling waits before checking the + // health status of an EC2 instance that has come into service. During this + // time, any health check failures for the instance are ignored. The default + // is 300. + // + // This parameter is required if you are adding an ELB health check. + // + // For more information, see Health Checks (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/healthcheck.html) + // in the Auto Scaling Developer Guide. + HealthCheckGracePeriod *int64 `type:"integer"` + + // The service to use for the health checks. The valid values are EC2 and ELB. + // + // By default, health checks use Amazon EC2 instance status checks to determine + // the health of an instance. For more information, see Health Checks (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/healthcheck.html) + // in the Auto Scaling Developer Guide. + HealthCheckType *string `min:"1" type:"string"` + + // The ID of the instance used to create a launch configuration for the group. + // Alternatively, specify a launch configuration instead of an EC2 instance. + // + // When you specify an ID of an instance, Auto Scaling creates a new launch + // configuration and associates it with the group. This launch configuration + // derives its attributes from the specified instance, with the exception of + // the block device mapping. + // + // For more information, see Create an Auto Scaling Group Using an EC2 Instance + // (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/create-asg-from-instance.html) + // in the Auto Scaling Developer Guide. + InstanceId *string `min:"1" type:"string"` + + // The name of the launch configuration. Alternatively, specify an EC2 instance + // instead of a launch configuration. + LaunchConfigurationName *string `min:"1" type:"string"` + + // One or more load balancers. + // + // For more information, see Using a Load Balancer With an Auto Scaling Group + // (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/US_SetUpASLBApp.html) + // in the Auto Scaling Developer Guide. + LoadBalancerNames []*string `type:"list"` + + // The maximum size of the group. + MaxSize *int64 `type:"integer" required:"true"` + + // The minimum size of the group. + MinSize *int64 `type:"integer" required:"true"` + + // Indicates whether newly launched instances are protected from termination + // by Auto Scaling when scaling in. + NewInstancesProtectedFromScaleIn *bool `type:"boolean"` + + // The name of the placement group into which you'll launch your instances, + // if any. For more information, see Placement Groups (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/placement-groups.html) + // in the Amazon Elastic Compute Cloud User Guide. + PlacementGroup *string `min:"1" type:"string"` + + // One or more tags. + // + // For more information, see Tagging Auto Scaling Groups and Instances (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/ASTagging.html) + // in the Auto Scaling Developer Guide. + Tags []*Tag `type:"list"` + + // One or more termination policies used to select the instance to terminate. + // These policies are executed in the order that they are listed. + // + // For more information, see Controlling Which Instances Auto Scaling Terminates + // During Scale In (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/AutoScalingBehavior.InstanceTermination.html) + // in the Auto Scaling Developer Guide. + TerminationPolicies []*string `type:"list"` + + // A comma-separated list of subnet identifiers for your virtual private cloud + // (VPC). + // + // If you specify subnets and Availability Zones with this call, ensure that + // the subnets' Availability Zones match the Availability Zones specified. + // + // For more information, see Launching Auto Scaling Instances in a VPC (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/asg-in-vpc.html) + // in the Auto Scaling Developer Guide. + VPCZoneIdentifier *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s CreateAutoScalingGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateAutoScalingGroupInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateAutoScalingGroupInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateAutoScalingGroupInput"} + if s.AutoScalingGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("AutoScalingGroupName")) + } + if s.AutoScalingGroupName != nil && len(*s.AutoScalingGroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AutoScalingGroupName", 1)) + } + if s.AvailabilityZones != nil && len(s.AvailabilityZones) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AvailabilityZones", 1)) + } + if s.HealthCheckType != nil && len(*s.HealthCheckType) < 1 { + invalidParams.Add(request.NewErrParamMinLen("HealthCheckType", 1)) + } + if s.InstanceId != nil && len(*s.InstanceId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("InstanceId", 1)) + } + if s.LaunchConfigurationName != nil && len(*s.LaunchConfigurationName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LaunchConfigurationName", 1)) + } + if s.MaxSize == nil { + invalidParams.Add(request.NewErrParamRequired("MaxSize")) + } + if s.MinSize == nil { + invalidParams.Add(request.NewErrParamRequired("MinSize")) + } + if s.PlacementGroup != nil && len(*s.PlacementGroup) < 1 { + invalidParams.Add(request.NewErrParamMinLen("PlacementGroup", 1)) + } + if s.VPCZoneIdentifier != nil && len(*s.VPCZoneIdentifier) < 1 { + invalidParams.Add(request.NewErrParamMinLen("VPCZoneIdentifier", 1)) + } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type CreateAutoScalingGroupOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s CreateAutoScalingGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateAutoScalingGroupOutput) GoString() string { + return s.String() +} + +type CreateLaunchConfigurationInput struct { + _ struct{} `type:"structure"` + + // Used for groups that launch instances into a virtual private cloud (VPC). + // Specifies whether to assign a public IP address to each instance. For more + // information, see Launching Auto Scaling Instances in a VPC (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/asg-in-vpc.html) + // in the Auto Scaling Developer Guide. + // + // If you specify this parameter, be sure to specify at least one subnet when + // you create your group. + // + // Default: If the instance is launched into a default subnet, the default + // is true. If the instance is launched into a nondefault subnet, the default + // is false. For more information, see Supported Platforms (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-supported-platforms.html) + // in the Amazon Elastic Compute Cloud User Guide. + AssociatePublicIpAddress *bool `type:"boolean"` + + // One or more mappings that specify how block devices are exposed to the instance. + // For more information, see Block Device Mapping (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/block-device-mapping-concepts.html) + // in the Amazon Elastic Compute Cloud User Guide. + BlockDeviceMappings []*BlockDeviceMapping `type:"list"` + + // The ID of a ClassicLink-enabled VPC to link your EC2-Classic instances to. + // This parameter is supported only if you are launching EC2-Classic instances. + // For more information, see ClassicLink (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/vpc-classiclink.html) + // in the Amazon Elastic Compute Cloud User Guide. + ClassicLinkVPCId *string `min:"1" type:"string"` + + // The IDs of one or more security groups for the specified ClassicLink-enabled + // VPC. This parameter is required if you specify a ClassicLink-enabled VPC, + // and is not supported otherwise. For more information, see ClassicLink (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/vpc-classiclink.html) + // in the Amazon Elastic Compute Cloud User Guide. + ClassicLinkVPCSecurityGroups []*string `type:"list"` + + // Indicates whether the instance is optimized for Amazon EBS I/O. By default, + // the instance is not optimized for EBS I/O. The optimization provides dedicated + // throughput to Amazon EBS and an optimized configuration stack to provide + // optimal I/O performance. This optimization is not available with all instance + // types. Additional usage charges apply. For more information, see Amazon EBS-Optimized + // Instances (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSOptimized.html) + // in the Amazon Elastic Compute Cloud User Guide. + EbsOptimized *bool `type:"boolean"` + + // The name or the Amazon Resource Name (ARN) of the instance profile associated + // with the IAM role for the instance. + // + // EC2 instances launched with an IAM role will automatically have AWS security + // credentials available. You can use IAM roles with Auto Scaling to automatically + // enable applications running on your EC2 instances to securely access other + // AWS resources. For more information, see Launch Auto Scaling Instances with + // an IAM Role (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/us-iam-role.html) + // in the Auto Scaling Developer Guide. + IamInstanceProfile *string `min:"1" type:"string"` + + // The ID of the Amazon Machine Image (AMI) to use to launch your EC2 instances. + // For more information, see Finding an AMI (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/finding-an-ami.html) + // in the Amazon Elastic Compute Cloud User Guide. + ImageId *string `min:"1" type:"string"` + + // The ID of the instance to use to create the launch configuration. + // + // The new launch configuration derives attributes from the instance, with + // the exception of the block device mapping. + // + // To create a launch configuration with a block device mapping or override + // any other instance attributes, specify them as part of the same request. + // + // For more information, see Create a Launch Configuration Using an EC2 Instance + // (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/create-lc-with-instanceID.html) + // in the Auto Scaling Developer Guide. + InstanceId *string `min:"1" type:"string"` + + // Enables detailed monitoring if it is disabled. Detailed monitoring is enabled + // by default. + // + // When detailed monitoring is enabled, Amazon CloudWatch generates metrics + // every minute and your account is charged a fee. When you disable detailed + // monitoring, by specifying False, CloudWatch generates metrics every 5 minutes. + // For more information, see Monitoring Your Auto Scaling Instances and Groups + // (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/as-instance-monitoring.html) + // in the Auto Scaling Developer Guide. + InstanceMonitoring *InstanceMonitoring `type:"structure"` + + // The instance type of the EC2 instance. For information about available instance + // types, see Available Instance Types (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html#AvailableInstanceTypes) + // in the Amazon Elastic Compute Cloud User Guide. + InstanceType *string `min:"1" type:"string"` + + // The ID of the kernel associated with the AMI. + KernelId *string `min:"1" type:"string"` + + // The name of the key pair. For more information, see Amazon EC2 Key Pairs + // (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html) in + // the Amazon Elastic Compute Cloud User Guide. + KeyName *string `min:"1" type:"string"` + + // The name of the launch configuration. This name must be unique within the + // scope of your AWS account. + LaunchConfigurationName *string `min:"1" type:"string" required:"true"` + + // The tenancy of the instance. An instance with a tenancy of dedicated runs + // on single-tenant hardware and can only be launched into a VPC. + // + // You must set the value of this parameter to dedicated if want to launch + // Dedicated Instances into a shared tenancy VPC (VPC with instance placement + // tenancy attribute set to default). + // + // If you specify this parameter, be sure to specify at least one subnet when + // you create your group. + // + // For more information, see Launching Auto Scaling Instances in a VPC (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/asg-in-vpc.html) + // in the Auto Scaling Developer Guide. + // + // Valid values: default | dedicated + PlacementTenancy *string `min:"1" type:"string"` + + // The ID of the RAM disk associated with the AMI. + RamdiskId *string `min:"1" type:"string"` + + // One or more security groups with which to associate the instances. + // + // If your instances are launched in EC2-Classic, you can either specify security + // group names or the security group IDs. For more information about security + // groups for EC2-Classic, see Amazon EC2 Security Groups (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-network-security.html) + // in the Amazon Elastic Compute Cloud User Guide. + // + // If your instances are launched into a VPC, specify security group IDs. For + // more information, see Security Groups for Your VPC (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_SecurityGroups.html) + // in the Amazon Virtual Private Cloud User Guide. + SecurityGroups []*string `type:"list"` + + // The maximum hourly price to be paid for any Spot Instance launched to fulfill + // the request. Spot Instances are launched when the price you specify exceeds + // the current Spot market price. For more information, see Launching Spot Instances + // in Your Auto Scaling Group (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/US-SpotInstances.html) + // in the Auto Scaling Developer Guide. + SpotPrice *string `min:"1" type:"string"` + + // The user data to make available to the launched EC2 instances. For more information, + // see Instance Metadata and User Data (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html) + // in the Amazon Elastic Compute Cloud User Guide. + UserData *string `type:"string"` +} + +// String returns the string representation +func (s CreateLaunchConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateLaunchConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateLaunchConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateLaunchConfigurationInput"} + if s.ClassicLinkVPCId != nil && len(*s.ClassicLinkVPCId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ClassicLinkVPCId", 1)) + } + if s.IamInstanceProfile != nil && len(*s.IamInstanceProfile) < 1 { + invalidParams.Add(request.NewErrParamMinLen("IamInstanceProfile", 1)) + } + if s.ImageId != nil && len(*s.ImageId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ImageId", 1)) + } + if s.InstanceId != nil && len(*s.InstanceId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("InstanceId", 1)) + } + if s.InstanceType != nil && len(*s.InstanceType) < 1 { + invalidParams.Add(request.NewErrParamMinLen("InstanceType", 1)) + } + if s.KernelId != nil && len(*s.KernelId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("KernelId", 1)) + } + if s.KeyName != nil && len(*s.KeyName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("KeyName", 1)) + } + if s.LaunchConfigurationName == nil { + invalidParams.Add(request.NewErrParamRequired("LaunchConfigurationName")) + } + if s.LaunchConfigurationName != nil && len(*s.LaunchConfigurationName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LaunchConfigurationName", 1)) + } + if s.PlacementTenancy != nil && len(*s.PlacementTenancy) < 1 { + invalidParams.Add(request.NewErrParamMinLen("PlacementTenancy", 1)) + } + if s.RamdiskId != nil && len(*s.RamdiskId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RamdiskId", 1)) + } + if s.SpotPrice != nil && len(*s.SpotPrice) < 1 { + invalidParams.Add(request.NewErrParamMinLen("SpotPrice", 1)) + } + if s.BlockDeviceMappings != nil { + for i, v := range s.BlockDeviceMappings { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "BlockDeviceMappings", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type CreateLaunchConfigurationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s CreateLaunchConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateLaunchConfigurationOutput) GoString() string { + return s.String() +} + +type CreateOrUpdateTagsInput struct { + _ struct{} `type:"structure"` + + // One or more tags. + Tags []*Tag `type:"list" required:"true"` +} + +// String returns the string representation +func (s CreateOrUpdateTagsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateOrUpdateTagsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateOrUpdateTagsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateOrUpdateTagsInput"} + if s.Tags == nil { + invalidParams.Add(request.NewErrParamRequired("Tags")) + } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type CreateOrUpdateTagsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s CreateOrUpdateTagsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateOrUpdateTagsOutput) GoString() string { + return s.String() +} + +type DeleteAutoScalingGroupInput struct { + _ struct{} `type:"structure"` + + // The name of the group to delete. + AutoScalingGroupName *string `min:"1" type:"string" required:"true"` + + // Specifies that the group will be deleted along with all instances associated + // with the group, without waiting for all instances to be terminated. This + // parameter also deletes any lifecycle actions associated with the group. + ForceDelete *bool `type:"boolean"` +} + +// String returns the string representation +func (s DeleteAutoScalingGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteAutoScalingGroupInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteAutoScalingGroupInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteAutoScalingGroupInput"} + if s.AutoScalingGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("AutoScalingGroupName")) + } + if s.AutoScalingGroupName != nil && len(*s.AutoScalingGroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AutoScalingGroupName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteAutoScalingGroupOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteAutoScalingGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteAutoScalingGroupOutput) GoString() string { + return s.String() +} + +type DeleteLaunchConfigurationInput struct { + _ struct{} `type:"structure"` + + // The name of the launch configuration. + LaunchConfigurationName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteLaunchConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteLaunchConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteLaunchConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteLaunchConfigurationInput"} + if s.LaunchConfigurationName == nil { + invalidParams.Add(request.NewErrParamRequired("LaunchConfigurationName")) + } + if s.LaunchConfigurationName != nil && len(*s.LaunchConfigurationName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LaunchConfigurationName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteLaunchConfigurationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteLaunchConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteLaunchConfigurationOutput) GoString() string { + return s.String() +} + +type DeleteLifecycleHookInput struct { + _ struct{} `type:"structure"` + + // The name of the Auto Scaling group for the lifecycle hook. + AutoScalingGroupName *string `min:"1" type:"string" required:"true"` + + // The name of the lifecycle hook. + LifecycleHookName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteLifecycleHookInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteLifecycleHookInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteLifecycleHookInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteLifecycleHookInput"} + if s.AutoScalingGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("AutoScalingGroupName")) + } + if s.AutoScalingGroupName != nil && len(*s.AutoScalingGroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AutoScalingGroupName", 1)) + } + if s.LifecycleHookName == nil { + invalidParams.Add(request.NewErrParamRequired("LifecycleHookName")) + } + if s.LifecycleHookName != nil && len(*s.LifecycleHookName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LifecycleHookName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteLifecycleHookOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteLifecycleHookOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteLifecycleHookOutput) GoString() string { + return s.String() +} + +type DeleteNotificationConfigurationInput struct { + _ struct{} `type:"structure"` + + // The name of the Auto Scaling group. + AutoScalingGroupName *string `min:"1" type:"string" required:"true"` + + // The Amazon Resource Name (ARN) of the Amazon Simple Notification Service + // (SNS) topic. + TopicARN *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteNotificationConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteNotificationConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteNotificationConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteNotificationConfigurationInput"} + if s.AutoScalingGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("AutoScalingGroupName")) + } + if s.AutoScalingGroupName != nil && len(*s.AutoScalingGroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AutoScalingGroupName", 1)) + } + if s.TopicARN == nil { + invalidParams.Add(request.NewErrParamRequired("TopicARN")) + } + if s.TopicARN != nil && len(*s.TopicARN) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TopicARN", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteNotificationConfigurationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteNotificationConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteNotificationConfigurationOutput) GoString() string { + return s.String() +} + +type DeletePolicyInput struct { + _ struct{} `type:"structure"` + + // The name of the Auto Scaling group. + AutoScalingGroupName *string `min:"1" type:"string"` + + // The name or Amazon Resource Name (ARN) of the policy. + PolicyName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeletePolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeletePolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeletePolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeletePolicyInput"} + if s.AutoScalingGroupName != nil && len(*s.AutoScalingGroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AutoScalingGroupName", 1)) + } + if s.PolicyName == nil { + invalidParams.Add(request.NewErrParamRequired("PolicyName")) + } + if s.PolicyName != nil && len(*s.PolicyName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("PolicyName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeletePolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeletePolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeletePolicyOutput) GoString() string { + return s.String() +} + +type DeleteScheduledActionInput struct { + _ struct{} `type:"structure"` + + // The name of the Auto Scaling group. + AutoScalingGroupName *string `min:"1" type:"string"` + + // The name of the action to delete. + ScheduledActionName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteScheduledActionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteScheduledActionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteScheduledActionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteScheduledActionInput"} + if s.AutoScalingGroupName != nil && len(*s.AutoScalingGroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AutoScalingGroupName", 1)) + } + if s.ScheduledActionName == nil { + invalidParams.Add(request.NewErrParamRequired("ScheduledActionName")) + } + if s.ScheduledActionName != nil && len(*s.ScheduledActionName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ScheduledActionName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteScheduledActionOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteScheduledActionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteScheduledActionOutput) GoString() string { + return s.String() +} + +type DeleteTagsInput struct { + _ struct{} `type:"structure"` + + // One or more tags. + Tags []*Tag `type:"list" required:"true"` +} + +// String returns the string representation +func (s DeleteTagsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteTagsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteTagsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteTagsInput"} + if s.Tags == nil { + invalidParams.Add(request.NewErrParamRequired("Tags")) + } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteTagsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteTagsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteTagsOutput) GoString() string { + return s.String() +} + +type DescribeAccountLimitsInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DescribeAccountLimitsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAccountLimitsInput) GoString() string { + return s.String() +} + +type DescribeAccountLimitsOutput struct { + _ struct{} `type:"structure"` + + // The maximum number of groups allowed for your AWS account. The default limit + // is 20 per region. + MaxNumberOfAutoScalingGroups *int64 `type:"integer"` + + // The maximum number of launch configurations allowed for your AWS account. + // The default limit is 100 per region. + MaxNumberOfLaunchConfigurations *int64 `type:"integer"` + + // The current number of groups for your AWS account. + NumberOfAutoScalingGroups *int64 `type:"integer"` + + // The current number of launch configurations for your AWS account. + NumberOfLaunchConfigurations *int64 `type:"integer"` +} + +// String returns the string representation +func (s DescribeAccountLimitsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAccountLimitsOutput) GoString() string { + return s.String() +} + +type DescribeAdjustmentTypesInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DescribeAdjustmentTypesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAdjustmentTypesInput) GoString() string { + return s.String() +} + +type DescribeAdjustmentTypesOutput struct { + _ struct{} `type:"structure"` + + // The policy adjustment types. + AdjustmentTypes []*AdjustmentType `type:"list"` +} + +// String returns the string representation +func (s DescribeAdjustmentTypesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAdjustmentTypesOutput) GoString() string { + return s.String() +} + +type DescribeAutoScalingGroupsInput struct { + _ struct{} `type:"structure"` + + // The group names. + AutoScalingGroupNames []*string `type:"list"` + + // The maximum number of items to return with this call. + MaxRecords *int64 `type:"integer"` + + // The token for the next set of items to return. (You received this token from + // a previous call.) + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeAutoScalingGroupsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAutoScalingGroupsInput) GoString() string { + return s.String() +} + +type DescribeAutoScalingGroupsOutput struct { + _ struct{} `type:"structure"` + + // The groups. + AutoScalingGroups []*Group `type:"list" required:"true"` + + // The token to use when requesting the next set of items. If there are no additional + // items to return, the string is empty. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeAutoScalingGroupsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAutoScalingGroupsOutput) GoString() string { + return s.String() +} + +type DescribeAutoScalingInstancesInput struct { + _ struct{} `type:"structure"` + + // The instances to describe; up to 50 instance IDs. If you omit this parameter, + // all Auto Scaling instances are described. If you specify an ID that does + // not exist, it is ignored with no error. + InstanceIds []*string `type:"list"` + + // The maximum number of items to return with this call. + MaxRecords *int64 `type:"integer"` + + // The token for the next set of items to return. (You received this token from + // a previous call.) + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeAutoScalingInstancesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAutoScalingInstancesInput) GoString() string { + return s.String() +} + +type DescribeAutoScalingInstancesOutput struct { + _ struct{} `type:"structure"` + + // The instances. + AutoScalingInstances []*InstanceDetails `type:"list"` + + // The token to use when requesting the next set of items. If there are no additional + // items to return, the string is empty. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeAutoScalingInstancesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAutoScalingInstancesOutput) GoString() string { + return s.String() +} + +type DescribeAutoScalingNotificationTypesInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DescribeAutoScalingNotificationTypesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAutoScalingNotificationTypesInput) GoString() string { + return s.String() +} + +type DescribeAutoScalingNotificationTypesOutput struct { + _ struct{} `type:"structure"` + + // One or more of the following notification types: + // + // autoscaling:EC2_INSTANCE_LAUNCH + // + // autoscaling:EC2_INSTANCE_LAUNCH_ERROR + // + // autoscaling:EC2_INSTANCE_TERMINATE + // + // autoscaling:EC2_INSTANCE_TERMINATE_ERROR + // + // autoscaling:TEST_NOTIFICATION + AutoScalingNotificationTypes []*string `type:"list"` +} + +// String returns the string representation +func (s DescribeAutoScalingNotificationTypesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAutoScalingNotificationTypesOutput) GoString() string { + return s.String() +} + +type DescribeLaunchConfigurationsInput struct { + _ struct{} `type:"structure"` + + // The launch configuration names. + LaunchConfigurationNames []*string `type:"list"` + + // The maximum number of items to return with this call. The default is 100. + MaxRecords *int64 `type:"integer"` + + // The token for the next set of items to return. (You received this token from + // a previous call.) + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeLaunchConfigurationsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeLaunchConfigurationsInput) GoString() string { + return s.String() +} + +type DescribeLaunchConfigurationsOutput struct { + _ struct{} `type:"structure"` + + // The launch configurations. + LaunchConfigurations []*LaunchConfiguration `type:"list" required:"true"` + + // The token to use when requesting the next set of items. If there are no additional + // items to return, the string is empty. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeLaunchConfigurationsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeLaunchConfigurationsOutput) GoString() string { + return s.String() +} + +type DescribeLifecycleHookTypesInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DescribeLifecycleHookTypesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeLifecycleHookTypesInput) GoString() string { + return s.String() +} + +type DescribeLifecycleHookTypesOutput struct { + _ struct{} `type:"structure"` + + // One or more of the following notification types: + // + // autoscaling:EC2_INSTANCE_LAUNCHING + // + // autoscaling:EC2_INSTANCE_TERMINATING + LifecycleHookTypes []*string `type:"list"` +} + +// String returns the string representation +func (s DescribeLifecycleHookTypesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeLifecycleHookTypesOutput) GoString() string { + return s.String() +} + +type DescribeLifecycleHooksInput struct { + _ struct{} `type:"structure"` + + // The name of the group. + AutoScalingGroupName *string `min:"1" type:"string" required:"true"` + + // The names of one or more lifecycle hooks. + LifecycleHookNames []*string `type:"list"` +} + +// String returns the string representation +func (s DescribeLifecycleHooksInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeLifecycleHooksInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeLifecycleHooksInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeLifecycleHooksInput"} + if s.AutoScalingGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("AutoScalingGroupName")) + } + if s.AutoScalingGroupName != nil && len(*s.AutoScalingGroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AutoScalingGroupName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DescribeLifecycleHooksOutput struct { + _ struct{} `type:"structure"` + + // The lifecycle hooks for the specified group. + LifecycleHooks []*LifecycleHook `type:"list"` +} + +// String returns the string representation +func (s DescribeLifecycleHooksOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeLifecycleHooksOutput) GoString() string { + return s.String() +} + +type DescribeLoadBalancersInput struct { + _ struct{} `type:"structure"` + + // The name of the group. + AutoScalingGroupName *string `min:"1" type:"string" required:"true"` + + // The maximum number of items to return with this call. + MaxRecords *int64 `type:"integer"` + + // The token for the next set of items to return. (You received this token from + // a previous call.) + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeLoadBalancersInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeLoadBalancersInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeLoadBalancersInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeLoadBalancersInput"} + if s.AutoScalingGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("AutoScalingGroupName")) + } + if s.AutoScalingGroupName != nil && len(*s.AutoScalingGroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AutoScalingGroupName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DescribeLoadBalancersOutput struct { + _ struct{} `type:"structure"` + + // The load balancers. + LoadBalancers []*LoadBalancerState `type:"list"` + + // The token to use when requesting the next set of items. If there are no additional + // items to return, the string is empty. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeLoadBalancersOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeLoadBalancersOutput) GoString() string { + return s.String() +} + +type DescribeMetricCollectionTypesInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DescribeMetricCollectionTypesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeMetricCollectionTypesInput) GoString() string { + return s.String() +} + +type DescribeMetricCollectionTypesOutput struct { + _ struct{} `type:"structure"` + + // The granularities for the metrics. + Granularities []*MetricGranularityType `type:"list"` + + // One or more metrics. + Metrics []*MetricCollectionType `type:"list"` +} + +// String returns the string representation +func (s DescribeMetricCollectionTypesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeMetricCollectionTypesOutput) GoString() string { + return s.String() +} + +type DescribeNotificationConfigurationsInput struct { + _ struct{} `type:"structure"` + + // The name of the group. + AutoScalingGroupNames []*string `type:"list"` + + // The maximum number of items to return with this call. + MaxRecords *int64 `type:"integer"` + + // The token for the next set of items to return. (You received this token from + // a previous call.) + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeNotificationConfigurationsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeNotificationConfigurationsInput) GoString() string { + return s.String() +} + +type DescribeNotificationConfigurationsOutput struct { + _ struct{} `type:"structure"` + + // The token to use when requesting the next set of items. If there are no additional + // items to return, the string is empty. + NextToken *string `type:"string"` + + // The notification configurations. + NotificationConfigurations []*NotificationConfiguration `type:"list" required:"true"` +} + +// String returns the string representation +func (s DescribeNotificationConfigurationsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeNotificationConfigurationsOutput) GoString() string { + return s.String() +} + +type DescribePoliciesInput struct { + _ struct{} `type:"structure"` + + // The name of the group. + AutoScalingGroupName *string `min:"1" type:"string"` + + // The maximum number of items to be returned with each call. + MaxRecords *int64 `type:"integer"` + + // The token for the next set of items to return. (You received this token from + // a previous call.) + NextToken *string `type:"string"` + + // One or more policy names or policy ARNs to be described. If you omit this + // list, all policy names are described. If an group name is provided, the results + // are limited to that group. This list is limited to 50 items. If you specify + // an unknown policy name, it is ignored with no error. + PolicyNames []*string `type:"list"` + + // One or more policy types. Valid values are SimpleScaling and StepScaling. + PolicyTypes []*string `type:"list"` +} + +// String returns the string representation +func (s DescribePoliciesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribePoliciesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribePoliciesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribePoliciesInput"} + if s.AutoScalingGroupName != nil && len(*s.AutoScalingGroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AutoScalingGroupName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DescribePoliciesOutput struct { + _ struct{} `type:"structure"` + + // The token to use when requesting the next set of items. If there are no additional + // items to return, the string is empty. + NextToken *string `type:"string"` + + // The scaling policies. + ScalingPolicies []*ScalingPolicy `type:"list"` +} + +// String returns the string representation +func (s DescribePoliciesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribePoliciesOutput) GoString() string { + return s.String() +} + +type DescribeScalingActivitiesInput struct { + _ struct{} `type:"structure"` + + // The activity IDs of the desired scaling activities. If this list is omitted, + // all activities are described. If you specify an Auto Scaling group, the results + // are limited to that group. The list of requested activities cannot contain + // more than 50 items. If unknown activities are requested, they are ignored + // with no error. + ActivityIds []*string `type:"list"` + + // The name of the group. + AutoScalingGroupName *string `min:"1" type:"string"` + + // The maximum number of items to return with this call. + MaxRecords *int64 `type:"integer"` + + // The token for the next set of items to return. (You received this token from + // a previous call.) + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeScalingActivitiesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeScalingActivitiesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeScalingActivitiesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeScalingActivitiesInput"} + if s.AutoScalingGroupName != nil && len(*s.AutoScalingGroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AutoScalingGroupName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DescribeScalingActivitiesOutput struct { + _ struct{} `type:"structure"` + + // The scaling activities. + Activities []*Activity `type:"list" required:"true"` + + // The token to use when requesting the next set of items. If there are no additional + // items to return, the string is empty. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeScalingActivitiesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeScalingActivitiesOutput) GoString() string { + return s.String() +} + +type DescribeScalingProcessTypesInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DescribeScalingProcessTypesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeScalingProcessTypesInput) GoString() string { + return s.String() +} + +type DescribeScalingProcessTypesOutput struct { + _ struct{} `type:"structure"` + + // The names of the process types. + Processes []*ProcessType `type:"list"` +} + +// String returns the string representation +func (s DescribeScalingProcessTypesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeScalingProcessTypesOutput) GoString() string { + return s.String() +} + +type DescribeScheduledActionsInput struct { + _ struct{} `type:"structure"` + + // The name of the group. + AutoScalingGroupName *string `min:"1" type:"string"` + + // The latest scheduled start time to return. If scheduled action names are + // provided, this parameter is ignored. + EndTime *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The maximum number of items to return with this call. + MaxRecords *int64 `type:"integer"` + + // The token for the next set of items to return. (You received this token from + // a previous call.) + NextToken *string `type:"string"` + + // Describes one or more scheduled actions. If you omit this list, the call + // describes all scheduled actions. If you specify an unknown scheduled action + // it is ignored with no error. + // + // You can describe up to a maximum of 50 instances with a single call. If + // there are more items to return, the call returns a token. To get the next + // set of items, repeat the call with the returned token. + ScheduledActionNames []*string `type:"list"` + + // The earliest scheduled start time to return. If scheduled action names are + // provided, this parameter is ignored. + StartTime *time.Time `type:"timestamp" timestampFormat:"iso8601"` +} + +// String returns the string representation +func (s DescribeScheduledActionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeScheduledActionsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeScheduledActionsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeScheduledActionsInput"} + if s.AutoScalingGroupName != nil && len(*s.AutoScalingGroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AutoScalingGroupName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DescribeScheduledActionsOutput struct { + _ struct{} `type:"structure"` + + // The token to use when requesting the next set of items. If there are no additional + // items to return, the string is empty. + NextToken *string `type:"string"` + + // The scheduled actions. + ScheduledUpdateGroupActions []*ScheduledUpdateGroupAction `type:"list"` +} + +// String returns the string representation +func (s DescribeScheduledActionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeScheduledActionsOutput) GoString() string { + return s.String() +} + +type DescribeTagsInput struct { + _ struct{} `type:"structure"` + + // A filter used to scope the tags to return. + Filters []*Filter `type:"list"` + + // The maximum number of items to return with this call. + MaxRecords *int64 `type:"integer"` + + // The token for the next set of items to return. (You received this token from + // a previous call.) + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeTagsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeTagsInput) GoString() string { + return s.String() +} + +type DescribeTagsOutput struct { + _ struct{} `type:"structure"` + + // The token to use when requesting the next set of items. If there are no additional + // items to return, the string is empty. + NextToken *string `type:"string"` + + // One or more tags. + Tags []*TagDescription `type:"list"` +} + +// String returns the string representation +func (s DescribeTagsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeTagsOutput) GoString() string { + return s.String() +} + +type DescribeTerminationPolicyTypesInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DescribeTerminationPolicyTypesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeTerminationPolicyTypesInput) GoString() string { + return s.String() +} + +type DescribeTerminationPolicyTypesOutput struct { + _ struct{} `type:"structure"` + + // The termination policies supported by Auto Scaling (OldestInstance, OldestLaunchConfiguration, + // NewestInstance, ClosestToNextInstanceHour, and Default). + TerminationPolicyTypes []*string `type:"list"` +} + +// String returns the string representation +func (s DescribeTerminationPolicyTypesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeTerminationPolicyTypesOutput) GoString() string { + return s.String() +} + +type DetachInstancesInput struct { + _ struct{} `type:"structure"` + + // The name of the group. + AutoScalingGroupName *string `min:"1" type:"string" required:"true"` + + // One or more instance IDs. + InstanceIds []*string `type:"list"` + + // If True, the Auto Scaling group decrements the desired capacity value by + // the number of instances detached. + ShouldDecrementDesiredCapacity *bool `type:"boolean" required:"true"` +} + +// String returns the string representation +func (s DetachInstancesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DetachInstancesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DetachInstancesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DetachInstancesInput"} + if s.AutoScalingGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("AutoScalingGroupName")) + } + if s.AutoScalingGroupName != nil && len(*s.AutoScalingGroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AutoScalingGroupName", 1)) + } + if s.ShouldDecrementDesiredCapacity == nil { + invalidParams.Add(request.NewErrParamRequired("ShouldDecrementDesiredCapacity")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DetachInstancesOutput struct { + _ struct{} `type:"structure"` + + // The activities related to detaching the instances from the Auto Scaling group. + Activities []*Activity `type:"list"` +} + +// String returns the string representation +func (s DetachInstancesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DetachInstancesOutput) GoString() string { + return s.String() +} + +type DetachLoadBalancersInput struct { + _ struct{} `type:"structure"` + + // The name of the group. + AutoScalingGroupName *string `min:"1" type:"string"` + + // One or more load balancer names. + LoadBalancerNames []*string `type:"list"` +} + +// String returns the string representation +func (s DetachLoadBalancersInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DetachLoadBalancersInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DetachLoadBalancersInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DetachLoadBalancersInput"} + if s.AutoScalingGroupName != nil && len(*s.AutoScalingGroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AutoScalingGroupName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DetachLoadBalancersOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DetachLoadBalancersOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DetachLoadBalancersOutput) GoString() string { + return s.String() +} + +type DisableMetricsCollectionInput struct { + _ struct{} `type:"structure"` + + // The name or Amazon Resource Name (ARN) of the group. + AutoScalingGroupName *string `min:"1" type:"string" required:"true"` + + // One or more of the following metrics. If you omit this parameter, all metrics + // are disabled. + // + // GroupMinSize + // + // GroupMaxSize + // + // GroupDesiredCapacity + // + // GroupInServiceInstances + // + // GroupPendingInstances + // + // GroupStandbyInstances + // + // GroupTerminatingInstances + // + // GroupTotalInstances + Metrics []*string `type:"list"` +} + +// String returns the string representation +func (s DisableMetricsCollectionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisableMetricsCollectionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DisableMetricsCollectionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DisableMetricsCollectionInput"} + if s.AutoScalingGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("AutoScalingGroupName")) + } + if s.AutoScalingGroupName != nil && len(*s.AutoScalingGroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AutoScalingGroupName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DisableMetricsCollectionOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DisableMetricsCollectionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisableMetricsCollectionOutput) GoString() string { + return s.String() +} + +// Describes an Amazon EBS volume. +type Ebs struct { + _ struct{} `type:"structure"` + + // Indicates whether the volume is deleted on instance termination. + // + // Default: true + DeleteOnTermination *bool `type:"boolean"` + + // Indicates whether the volume should be encrypted. Encrypted EBS volumes must + // be attached to instances that support Amazon EBS encryption. Volumes that + // are created from encrypted snapshots are automatically encrypted. There is + // no way to create an encrypted volume from an unencrypted snapshot or an unencrypted + // volume from an encrypted snapshot. For more information, see Amazon EBS Encryption + // (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSEncryption.html) in + // the Amazon Elastic Compute Cloud User Guide. + Encrypted *bool `type:"boolean"` + + // The number of I/O operations per second (IOPS) to provision for the volume. + // + // Constraint: Required when the volume type is io1. + Iops *int64 `min:"100" type:"integer"` + + // The ID of the snapshot. + SnapshotId *string `min:"1" type:"string"` + + // The volume size, in GiB. For standard volumes, specify a value from 1 to + // 1,024. For io1 volumes, specify a value from 4 to 16,384. For gp2 volumes, + // specify a value from 1 to 16,384. If you specify a snapshot, the volume size + // must be equal to or larger than the snapshot size. + // + // Default: If you create a volume from a snapshot and you don't specify a + // volume size, the default is the snapshot size. + VolumeSize *int64 `min:"1" type:"integer"` + + // The volume type. For more information, see Amazon EBS Volume Types (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html) + // in the Amazon Elastic Compute Cloud User Guide. + // + // Valid values: standard | io1 | gp2 + // + // Default: standard + VolumeType *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s Ebs) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Ebs) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Ebs) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Ebs"} + if s.Iops != nil && *s.Iops < 100 { + invalidParams.Add(request.NewErrParamMinValue("Iops", 100)) + } + if s.SnapshotId != nil && len(*s.SnapshotId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("SnapshotId", 1)) + } + if s.VolumeSize != nil && *s.VolumeSize < 1 { + invalidParams.Add(request.NewErrParamMinValue("VolumeSize", 1)) + } + if s.VolumeType != nil && len(*s.VolumeType) < 1 { + invalidParams.Add(request.NewErrParamMinLen("VolumeType", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type EnableMetricsCollectionInput struct { + _ struct{} `type:"structure"` + + // The name or ARN of the Auto Scaling group. + AutoScalingGroupName *string `min:"1" type:"string" required:"true"` + + // The granularity to associate with the metrics to collect. The only valid + // value is 1Minute. + Granularity *string `min:"1" type:"string" required:"true"` + + // One or more of the following metrics. If you omit this parameter, all metrics + // are enabled. + // + // GroupMinSize + // + // GroupMaxSize + // + // GroupDesiredCapacity + // + // GroupInServiceInstances + // + // GroupPendingInstances + // + // GroupStandbyInstances + // + // GroupTerminatingInstances + // + // GroupTotalInstances + // + // Note that the GroupStandbyInstances metric is not enabled by default. You + // must explicitly request this metric. + Metrics []*string `type:"list"` +} + +// String returns the string representation +func (s EnableMetricsCollectionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnableMetricsCollectionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *EnableMetricsCollectionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "EnableMetricsCollectionInput"} + if s.AutoScalingGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("AutoScalingGroupName")) + } + if s.AutoScalingGroupName != nil && len(*s.AutoScalingGroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AutoScalingGroupName", 1)) + } + if s.Granularity == nil { + invalidParams.Add(request.NewErrParamRequired("Granularity")) + } + if s.Granularity != nil && len(*s.Granularity) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Granularity", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type EnableMetricsCollectionOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s EnableMetricsCollectionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnableMetricsCollectionOutput) GoString() string { + return s.String() +} + +// Describes an enabled metric. +type EnabledMetric struct { + _ struct{} `type:"structure"` + + // The granularity of the metric. The only valid value is 1Minute. + Granularity *string `min:"1" type:"string"` + + // One of the following metrics: + // + // GroupMinSize + // + // GroupMaxSize + // + // GroupDesiredCapacity + // + // GroupInServiceInstances + // + // GroupPendingInstances + // + // GroupStandbyInstances + // + // GroupTerminatingInstances + // + // GroupTotalInstances + Metric *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s EnabledMetric) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnabledMetric) GoString() string { + return s.String() +} + +type EnterStandbyInput struct { + _ struct{} `type:"structure"` + + // The name of the Auto Scaling group. + AutoScalingGroupName *string `min:"1" type:"string" required:"true"` + + // One or more instances to move into Standby mode. You must specify at least + // one instance ID. + InstanceIds []*string `type:"list"` + + // Specifies whether the instances moved to Standby mode count as part of the + // Auto Scaling group's desired capacity. If set, the desired capacity for the + // Auto Scaling group decrements by the number of instances moved to Standby + // mode. + ShouldDecrementDesiredCapacity *bool `type:"boolean" required:"true"` +} + +// String returns the string representation +func (s EnterStandbyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnterStandbyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *EnterStandbyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "EnterStandbyInput"} + if s.AutoScalingGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("AutoScalingGroupName")) + } + if s.AutoScalingGroupName != nil && len(*s.AutoScalingGroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AutoScalingGroupName", 1)) + } + if s.ShouldDecrementDesiredCapacity == nil { + invalidParams.Add(request.NewErrParamRequired("ShouldDecrementDesiredCapacity")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type EnterStandbyOutput struct { + _ struct{} `type:"structure"` + + // The activities related to moving instances into Standby mode. + Activities []*Activity `type:"list"` +} + +// String returns the string representation +func (s EnterStandbyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnterStandbyOutput) GoString() string { + return s.String() +} + +type ExecutePolicyInput struct { + _ struct{} `type:"structure"` + + // The name or Amazon Resource Name (ARN) of the Auto Scaling group. + AutoScalingGroupName *string `min:"1" type:"string"` + + // The breach threshold for the alarm. + // + // This parameter is required if the policy type is StepScaling and not supported + // otherwise. + BreachThreshold *float64 `type:"double"` + + // If this parameter is true, Auto Scaling waits for the cooldown period to + // complete before executing the policy. Otherwise, Auto Scaling executes the + // policy without waiting for the cooldown period to complete. + // + // This parameter is not supported if the policy type is StepScaling. + // + // For more information, see Auto Scaling Cooldowns (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/Cooldown.html) + // in the Auto Scaling Developer Guide. + HonorCooldown *bool `type:"boolean"` + + // The metric value to compare to BreachThreshold. This enables you to execute + // a policy of type StepScaling and determine which step adjustment to use. + // For example, if the breach threshold is 50 and you want to use a step adjustment + // with a lower bound of 0 and an upper bound of 10, you can set the metric + // value to 59. + // + // If you specify a metric value that doesn't correspond to a step adjustment + // for the policy, the call returns an error. + // + // This parameter is required if the policy type is StepScaling and not supported + // otherwise. + MetricValue *float64 `type:"double"` + + // The name or ARN of the policy. + PolicyName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s ExecutePolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ExecutePolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ExecutePolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ExecutePolicyInput"} + if s.AutoScalingGroupName != nil && len(*s.AutoScalingGroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AutoScalingGroupName", 1)) + } + if s.PolicyName == nil { + invalidParams.Add(request.NewErrParamRequired("PolicyName")) + } + if s.PolicyName != nil && len(*s.PolicyName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("PolicyName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type ExecutePolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ExecutePolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ExecutePolicyOutput) GoString() string { + return s.String() +} + +type ExitStandbyInput struct { + _ struct{} `type:"structure"` + + // The name of the Auto Scaling group. + AutoScalingGroupName *string `min:"1" type:"string" required:"true"` + + // One or more instance IDs. You must specify at least one instance ID. + InstanceIds []*string `type:"list"` +} + +// String returns the string representation +func (s ExitStandbyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ExitStandbyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ExitStandbyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ExitStandbyInput"} + if s.AutoScalingGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("AutoScalingGroupName")) + } + if s.AutoScalingGroupName != nil && len(*s.AutoScalingGroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AutoScalingGroupName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type ExitStandbyOutput struct { + _ struct{} `type:"structure"` + + // The activities related to moving instances out of Standby mode. + Activities []*Activity `type:"list"` +} + +// String returns the string representation +func (s ExitStandbyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ExitStandbyOutput) GoString() string { + return s.String() +} + +// Describes a filter. +type Filter struct { + _ struct{} `type:"structure"` + + // The name of the filter. The valid values are: "auto-scaling-group", "key", + // "value", and "propagate-at-launch". + Name *string `type:"string"` + + // The value of the filter. + Values []*string `type:"list"` +} + +// String returns the string representation +func (s Filter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Filter) GoString() string { + return s.String() +} + +// Describes an Auto Scaling group. +type Group struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the group. + AutoScalingGroupARN *string `min:"1" type:"string"` + + // The name of the group. + AutoScalingGroupName *string `min:"1" type:"string" required:"true"` + + // One or more Availability Zones for the group. + AvailabilityZones []*string `min:"1" type:"list" required:"true"` + + // The date and time the group was created. + CreatedTime *time.Time `type:"timestamp" timestampFormat:"iso8601" required:"true"` + + // The amount of time, in seconds, after a scaling activity completes before + // another scaling activity can start. + DefaultCooldown *int64 `type:"integer" required:"true"` + + // The desired size of the group. + DesiredCapacity *int64 `type:"integer" required:"true"` + + // The metrics enabled for the group. + EnabledMetrics []*EnabledMetric `type:"list"` + + // The amount of time, in seconds, that Auto Scaling waits before checking the + // health status of an EC2 instance that has come into service. + HealthCheckGracePeriod *int64 `type:"integer"` + + // The service to use for the health checks. The valid values are EC2 and ELB. + HealthCheckType *string `min:"1" type:"string" required:"true"` + + // The EC2 instances associated with the group. + Instances []*Instance `type:"list"` + + // The name of the associated launch configuration. + LaunchConfigurationName *string `min:"1" type:"string"` + + // One or more load balancers associated with the group. + LoadBalancerNames []*string `type:"list"` + + // The maximum size of the group. + MaxSize *int64 `type:"integer" required:"true"` + + // The minimum size of the group. + MinSize *int64 `type:"integer" required:"true"` + + // Indicates whether newly launched instances are protected from termination + // by Auto Scaling when scaling in. + NewInstancesProtectedFromScaleIn *bool `type:"boolean"` + + // The name of the placement group into which you'll launch your instances, + // if any. For more information, see Placement Groups (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/placement-groups.html) + // in the Amazon Elastic Compute Cloud User Guide. + PlacementGroup *string `min:"1" type:"string"` + + // The current state of the group when DeleteAutoScalingGroup is in progress. + Status *string `min:"1" type:"string"` + + // The suspended processes associated with the group. + SuspendedProcesses []*SuspendedProcess `type:"list"` + + // The tags for the group. + Tags []*TagDescription `type:"list"` + + // The termination policies for the group. + TerminationPolicies []*string `type:"list"` + + // One or more subnet IDs, if applicable, separated by commas. + // + // If you specify VPCZoneIdentifier and AvailabilityZones, ensure that the + // Availability Zones of the subnets match the values for AvailabilityZones. + VPCZoneIdentifier *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s Group) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Group) GoString() string { + return s.String() +} + +// Describes an EC2 instance. +type Instance struct { + _ struct{} `type:"structure"` + + // The Availability Zone in which the instance is running. + AvailabilityZone *string `min:"1" type:"string" required:"true"` + + // The health status of the instance. "Healthy" means that the instance is healthy + // and should remain in service. "Unhealthy" means that the instance is unhealthy + // and Auto Scaling should terminate and replace it. + HealthStatus *string `min:"1" type:"string" required:"true"` + + // The ID of the instance. + InstanceId *string `min:"1" type:"string" required:"true"` + + // The launch configuration associated with the instance. + LaunchConfigurationName *string `min:"1" type:"string" required:"true"` + + // A description of the current lifecycle state. Note that the Quarantined state + // is not used. + LifecycleState *string `type:"string" required:"true" enum:"LifecycleState"` + + // Indicates whether the instance is protected from termination by Auto Scaling + // when scaling in. + ProtectedFromScaleIn *bool `type:"boolean" required:"true"` +} + +// String returns the string representation +func (s Instance) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Instance) GoString() string { + return s.String() +} + +// Describes an EC2 instance associated with an Auto Scaling group. +type InstanceDetails struct { + _ struct{} `type:"structure"` + + // The name of the Auto Scaling group associated with the instance. + AutoScalingGroupName *string `min:"1" type:"string" required:"true"` + + // The Availability Zone for the instance. + AvailabilityZone *string `min:"1" type:"string" required:"true"` + + // The health status of this instance. "Healthy" means that the instance is + // healthy and should remain in service. "Unhealthy" means that the instance + // is unhealthy and Auto Scaling should terminate and replace it. + HealthStatus *string `min:"1" type:"string" required:"true"` + + // The ID of the instance. + InstanceId *string `min:"1" type:"string" required:"true"` + + // The launch configuration associated with the instance. + LaunchConfigurationName *string `min:"1" type:"string" required:"true"` + + // The lifecycle state for the instance. For more information, see Auto Scaling + // Lifecycle (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/AutoScalingGroupLifecycle.html) + // in the Auto Scaling Developer Guide. + LifecycleState *string `min:"1" type:"string" required:"true"` + + // Indicates whether the instance is protected from termination by Auto Scaling + // when scaling in. + ProtectedFromScaleIn *bool `type:"boolean" required:"true"` +} + +// String returns the string representation +func (s InstanceDetails) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstanceDetails) GoString() string { + return s.String() +} + +// Describes whether instance monitoring is enabled. +type InstanceMonitoring struct { + _ struct{} `type:"structure"` + + // If True, instance monitoring is enabled. + Enabled *bool `type:"boolean"` +} + +// String returns the string representation +func (s InstanceMonitoring) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstanceMonitoring) GoString() string { + return s.String() +} + +// Describes a launch configuration. +type LaunchConfiguration struct { + _ struct{} `type:"structure"` + + // [EC2-VPC] Indicates whether to assign a public IP address to each instance. + AssociatePublicIpAddress *bool `type:"boolean"` + + // A block device mapping, which specifies the block devices for the instance. + BlockDeviceMappings []*BlockDeviceMapping `type:"list"` + + // The ID of a ClassicLink-enabled VPC to link your EC2-Classic instances to. + // This parameter can only be used if you are launching EC2-Classic instances. + // For more information, see ClassicLink (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/vpc-classiclink.html) + // in the Amazon Elastic Compute Cloud User Guide. + ClassicLinkVPCId *string `min:"1" type:"string"` + + // The IDs of one or more security groups for the VPC specified in ClassicLinkVPCId. + // This parameter is required if you specify a ClassicLink-enabled VPC, and + // cannot be used otherwise. For more information, see ClassicLink (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/vpc-classiclink.html) + // in the Amazon Elastic Compute Cloud User Guide. + ClassicLinkVPCSecurityGroups []*string `type:"list"` + + // The creation date and time for the launch configuration. + CreatedTime *time.Time `type:"timestamp" timestampFormat:"iso8601" required:"true"` + + // Controls whether the instance is optimized for EBS I/O (true) or not (false). + EbsOptimized *bool `type:"boolean"` + + // The name or Amazon Resource Name (ARN) of the instance profile associated + // with the IAM role for the instance. + IamInstanceProfile *string `min:"1" type:"string"` + + // The ID of the Amazon Machine Image (AMI). + ImageId *string `min:"1" type:"string" required:"true"` + + // Controls whether instances in this group are launched with detailed monitoring. + InstanceMonitoring *InstanceMonitoring `type:"structure"` + + // The instance type for the instances. + InstanceType *string `min:"1" type:"string" required:"true"` + + // The ID of the kernel associated with the AMI. + KernelId *string `min:"1" type:"string"` + + // The name of the key pair. + KeyName *string `min:"1" type:"string"` + + // The Amazon Resource Name (ARN) of the launch configuration. + LaunchConfigurationARN *string `min:"1" type:"string"` + + // The name of the launch configuration. + LaunchConfigurationName *string `min:"1" type:"string" required:"true"` + + // The tenancy of the instance, either default or dedicated. An instance with + // dedicated tenancy runs in an isolated, single-tenant hardware and can only + // be launched into a VPC. + PlacementTenancy *string `min:"1" type:"string"` + + // The ID of the RAM disk associated with the AMI. + RamdiskId *string `min:"1" type:"string"` + + // The security groups to associate with the instances. + SecurityGroups []*string `type:"list"` + + // The price to bid when launching Spot Instances. + SpotPrice *string `min:"1" type:"string"` + + // The user data available to the instances. + UserData *string `type:"string"` +} + +// String returns the string representation +func (s LaunchConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LaunchConfiguration) GoString() string { + return s.String() +} + +// Describes a lifecycle hook, which tells Auto Scaling that you want to perform +// an action when an instance launches or terminates. When you have a lifecycle +// hook in place, the Auto Scaling group will either: +// +// Pause the instance after it launches, but before it is put into service +// Pause the instance as it terminates, but before it is fully terminated For +// more information, see Auto Scaling Lifecycle (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/AutoScalingGroupLifecycle.html) +// in the Auto Scaling Developer Guide. +type LifecycleHook struct { + _ struct{} `type:"structure"` + + // The name of the Auto Scaling group for the lifecycle hook. + AutoScalingGroupName *string `min:"1" type:"string"` + + // Defines the action the Auto Scaling group should take when the lifecycle + // hook timeout elapses or if an unexpected failure occurs. The valid values + // are CONTINUE and ABANDON. The default value is CONTINUE. + DefaultResult *string `type:"string"` + + // The maximum time, in seconds, that an instance can remain in a Pending:Wait + // or Terminating:Wait state. The default is 172800 seconds (48 hours). + GlobalTimeout *int64 `type:"integer"` + + // The maximum time, in seconds, that can elapse before the lifecycle hook times + // out. The default is 3600 seconds (1 hour). When the lifecycle hook times + // out, Auto Scaling performs the default action. You can prevent the lifecycle + // hook from timing out by calling RecordLifecycleActionHeartbeat. + HeartbeatTimeout *int64 `type:"integer"` + + // The name of the lifecycle hook. + LifecycleHookName *string `min:"1" type:"string"` + + // The state of the EC2 instance to which you want to attach the lifecycle hook. + // For a list of lifecycle hook types, see DescribeLifecycleHookTypes. + LifecycleTransition *string `type:"string"` + + // Additional information that you want to include any time Auto Scaling sends + // a message to the notification target. + NotificationMetadata *string `min:"1" type:"string"` + + // The ARN of the notification target that Auto Scaling uses to notify you when + // an instance is in the transition state for the lifecycle hook. This ARN target + // can be either an SQS queue or an SNS topic. The notification message sent + // to the target includes the following: + // + // Lifecycle action token User account ID Name of the Auto Scaling group Lifecycle + // hook name EC2 instance ID Lifecycle transition Notification metadata + NotificationTargetARN *string `min:"1" type:"string"` + + // The ARN of the IAM role that allows the Auto Scaling group to publish to + // the specified notification target. + RoleARN *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s LifecycleHook) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LifecycleHook) GoString() string { + return s.String() +} + +// Describes the state of a load balancer. +type LoadBalancerState struct { + _ struct{} `type:"structure"` + + // The name of the load balancer. + LoadBalancerName *string `min:"1" type:"string"` + + // One of the following load balancer states: + // + // Adding - The instances in the group are being registered with the load + // balancer. + // + // Added - All instances in the group are registered with the load balancer. + // + // InService - At least one instance in the group passed an ELB health check. + // + // Removing - The instances are being deregistered from the load balancer. + // If connection draining is enabled, Elastic Load Balancing waits for in-flight + // requests to complete before deregistering the instances. + State *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s LoadBalancerState) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LoadBalancerState) GoString() string { + return s.String() +} + +// Describes a metric. +type MetricCollectionType struct { + _ struct{} `type:"structure"` + + // One of the following metrics: + // + // GroupMinSize + // + // GroupMaxSize + // + // GroupDesiredCapacity + // + // GroupInServiceInstances + // + // GroupPendingInstances + // + // GroupStandbyInstances + // + // GroupTerminatingInstances + // + // GroupTotalInstances + Metric *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s MetricCollectionType) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MetricCollectionType) GoString() string { + return s.String() +} + +// Describes a granularity of a metric. +type MetricGranularityType struct { + _ struct{} `type:"structure"` + + // The granularity. The only valid value is 1Minute. + Granularity *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s MetricGranularityType) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MetricGranularityType) GoString() string { + return s.String() +} + +// Describes a notification. +type NotificationConfiguration struct { + _ struct{} `type:"structure"` + + // The name of the group. + AutoScalingGroupName *string `min:"1" type:"string"` + + // One of the following event notification types: + // + // autoscaling:EC2_INSTANCE_LAUNCH + // + // autoscaling:EC2_INSTANCE_LAUNCH_ERROR + // + // autoscaling:EC2_INSTANCE_TERMINATE + // + // autoscaling:EC2_INSTANCE_TERMINATE_ERROR + // + // autoscaling:TEST_NOTIFICATION + NotificationType *string `min:"1" type:"string"` + + // The Amazon Resource Name (ARN) of the Amazon Simple Notification Service + // (SNS) topic. + TopicARN *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s NotificationConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NotificationConfiguration) GoString() string { + return s.String() +} + +// Describes a process type. +// +// For more information, see Auto Scaling Processes (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/US_SuspendResume.html#process-types) +// in the Auto Scaling Developer Guide. +type ProcessType struct { + _ struct{} `type:"structure"` + + // One of the following processes: + // + // Launch + // + // Terminate + // + // AddToLoadBalancer + // + // AlarmNotification + // + // AZRebalance + // + // HealthCheck + // + // ReplaceUnhealthy + // + // ScheduledActions + ProcessName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s ProcessType) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ProcessType) GoString() string { + return s.String() +} + +type PutLifecycleHookInput struct { + _ struct{} `type:"structure"` + + // The name of the Auto Scaling group to which you want to assign the lifecycle + // hook. + AutoScalingGroupName *string `min:"1" type:"string" required:"true"` + + // Defines the action the Auto Scaling group should take when the lifecycle + // hook timeout elapses or if an unexpected failure occurs. This parameter can + // be either CONTINUE or ABANDON. The default value is ABANDON. + DefaultResult *string `type:"string"` + + // The amount of time, in seconds, that can elapse before the lifecycle hook + // times out. When the lifecycle hook times out, Auto Scaling performs the default + // action. You can prevent the lifecycle hook from timing out by calling RecordLifecycleActionHeartbeat. + // The default is 3600 seconds (1 hour). + HeartbeatTimeout *int64 `type:"integer"` + + // The name of the lifecycle hook. + LifecycleHookName *string `min:"1" type:"string" required:"true"` + + // The instance state to which you want to attach the lifecycle hook. For a + // list of lifecycle hook types, see DescribeLifecycleHookTypes. + // + // This parameter is required for new lifecycle hooks, but optional when updating + // existing hooks. + LifecycleTransition *string `type:"string"` + + // Contains additional information that you want to include any time Auto Scaling + // sends a message to the notification target. + NotificationMetadata *string `min:"1" type:"string"` + + // The ARN of the notification target that Auto Scaling will use to notify you + // when an instance is in the transition state for the lifecycle hook. This + // target can be either an SQS queue or an SNS topic. If you specify an empty + // string, this overrides the current ARN. + // + // The notification messages sent to the target include the following information: + // + // AutoScalingGroupName. The name of the Auto Scaling group. AccountId. The + // AWS account ID. LifecycleTransition. The lifecycle hook type. LifecycleActionToken. + // The lifecycle action token. EC2InstanceId. The EC2 instance ID. LifecycleHookName. + // The name of the lifecycle hook. NotificationMetadata. User-defined information. + // This operation uses the JSON format when sending notifications to an Amazon + // SQS queue, and an email key/value pair format when sending notifications + // to an Amazon SNS topic. + // + // When you specify a notification target, Auto Scaling sends it a test message. + // Test messages contains the following additional key/value pair: "Event": + // "autoscaling:TEST_NOTIFICATION". + NotificationTargetARN *string `type:"string"` + + // The ARN of the IAM role that allows the Auto Scaling group to publish to + // the specified notification target. + // + // This parameter is required for new lifecycle hooks, but optional when updating + // existing hooks. + RoleARN *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s PutLifecycleHookInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutLifecycleHookInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutLifecycleHookInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutLifecycleHookInput"} + if s.AutoScalingGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("AutoScalingGroupName")) + } + if s.AutoScalingGroupName != nil && len(*s.AutoScalingGroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AutoScalingGroupName", 1)) + } + if s.LifecycleHookName == nil { + invalidParams.Add(request.NewErrParamRequired("LifecycleHookName")) + } + if s.LifecycleHookName != nil && len(*s.LifecycleHookName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LifecycleHookName", 1)) + } + if s.NotificationMetadata != nil && len(*s.NotificationMetadata) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NotificationMetadata", 1)) + } + if s.RoleARN != nil && len(*s.RoleARN) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RoleARN", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type PutLifecycleHookOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutLifecycleHookOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutLifecycleHookOutput) GoString() string { + return s.String() +} + +type PutNotificationConfigurationInput struct { + _ struct{} `type:"structure"` + + // The name of the Auto Scaling group. + AutoScalingGroupName *string `min:"1" type:"string" required:"true"` + + // The type of event that will cause the notification to be sent. For details + // about notification types supported by Auto Scaling, see DescribeAutoScalingNotificationTypes. + NotificationTypes []*string `type:"list" required:"true"` + + // The Amazon Resource Name (ARN) of the Amazon Simple Notification Service + // (SNS) topic. + TopicARN *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s PutNotificationConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutNotificationConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutNotificationConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutNotificationConfigurationInput"} + if s.AutoScalingGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("AutoScalingGroupName")) + } + if s.AutoScalingGroupName != nil && len(*s.AutoScalingGroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AutoScalingGroupName", 1)) + } + if s.NotificationTypes == nil { + invalidParams.Add(request.NewErrParamRequired("NotificationTypes")) + } + if s.TopicARN == nil { + invalidParams.Add(request.NewErrParamRequired("TopicARN")) + } + if s.TopicARN != nil && len(*s.TopicARN) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TopicARN", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type PutNotificationConfigurationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutNotificationConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutNotificationConfigurationOutput) GoString() string { + return s.String() +} + +type PutScalingPolicyInput struct { + _ struct{} `type:"structure"` + + // The adjustment type. Valid values are ChangeInCapacity, ExactCapacity, and + // PercentChangeInCapacity. + // + // For more information, see Dynamic Scaling (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/as-scale-based-on-demand.html) + // in the Auto Scaling Developer Guide. + AdjustmentType *string `min:"1" type:"string" required:"true"` + + // The name or ARN of the group. + AutoScalingGroupName *string `min:"1" type:"string" required:"true"` + + // The amount of time, in seconds, after a scaling activity completes and before + // the next scaling activity can start. If this parameter is not specified, + // the default cooldown period for the group applies. + // + // This parameter is not supported unless the policy type is SimpleScaling. + // + // For more information, see Auto Scaling Cooldowns (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/Cooldown.html) + // in the Auto Scaling Developer Guide. + Cooldown *int64 `type:"integer"` + + // The estimated time, in seconds, until a newly launched instance can contribute + // to the CloudWatch metrics. The default is to use the value specified for + // the default cooldown period for the group. + // + // This parameter is not supported if the policy type is SimpleScaling. + EstimatedInstanceWarmup *int64 `type:"integer"` + + // The aggregation type for the CloudWatch metrics. Valid values are Minimum, + // Maximum, and Average. If the aggregation type is null, the value is treated + // as Average. + // + // This parameter is not supported if the policy type is SimpleScaling. + MetricAggregationType *string `min:"1" type:"string"` + + // The minimum number of instances to scale. If the value of AdjustmentType + // is PercentChangeInCapacity, the scaling policy changes the DesiredCapacity + // of the Auto Scaling group by at least this many instances. Otherwise, the + // error is ValidationError. + MinAdjustmentMagnitude *int64 `type:"integer"` + + // Available for backward compatibility. Use MinAdjustmentMagnitude instead. + MinAdjustmentStep *int64 `deprecated:"true" type:"integer"` + + // The name of the policy. + PolicyName *string `min:"1" type:"string" required:"true"` + + // The policy type. Valid values are SimpleScaling and StepScaling. If the policy + // type is null, the value is treated as SimpleScaling. + PolicyType *string `min:"1" type:"string"` + + // The amount by which to scale, based on the specified adjustment type. A positive + // value adds to the current capacity while a negative number removes from the + // current capacity. + // + // This parameter is required if the policy type is SimpleScaling and not supported + // otherwise. + ScalingAdjustment *int64 `type:"integer"` + + // A set of adjustments that enable you to scale based on the size of the alarm + // breach. + // + // This parameter is required if the policy type is StepScaling and not supported + // otherwise. + StepAdjustments []*StepAdjustment `type:"list"` +} + +// String returns the string representation +func (s PutScalingPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutScalingPolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutScalingPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutScalingPolicyInput"} + if s.AdjustmentType == nil { + invalidParams.Add(request.NewErrParamRequired("AdjustmentType")) + } + if s.AdjustmentType != nil && len(*s.AdjustmentType) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AdjustmentType", 1)) + } + if s.AutoScalingGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("AutoScalingGroupName")) + } + if s.AutoScalingGroupName != nil && len(*s.AutoScalingGroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AutoScalingGroupName", 1)) + } + if s.MetricAggregationType != nil && len(*s.MetricAggregationType) < 1 { + invalidParams.Add(request.NewErrParamMinLen("MetricAggregationType", 1)) + } + if s.PolicyName == nil { + invalidParams.Add(request.NewErrParamRequired("PolicyName")) + } + if s.PolicyName != nil && len(*s.PolicyName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("PolicyName", 1)) + } + if s.PolicyType != nil && len(*s.PolicyType) < 1 { + invalidParams.Add(request.NewErrParamMinLen("PolicyType", 1)) + } + if s.StepAdjustments != nil { + for i, v := range s.StepAdjustments { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "StepAdjustments", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type PutScalingPolicyOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the policy. + PolicyARN *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s PutScalingPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutScalingPolicyOutput) GoString() string { + return s.String() +} + +type PutScheduledUpdateGroupActionInput struct { + _ struct{} `type:"structure"` + + // The name or Amazon Resource Name (ARN) of the Auto Scaling group. + AutoScalingGroupName *string `min:"1" type:"string" required:"true"` + + // The number of EC2 instances that should be running in the group. + DesiredCapacity *int64 `type:"integer"` + + // The time for this action to end. + EndTime *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The maximum size for the Auto Scaling group. + MaxSize *int64 `type:"integer"` + + // The minimum size for the Auto Scaling group. + MinSize *int64 `type:"integer"` + + // The time when recurring future actions will start. Start time is specified + // by the user following the Unix cron syntax format. For more information, + // see Cron (http://en.wikipedia.org/wiki/Cron) in Wikipedia. + // + // When StartTime and EndTime are specified with Recurrence, they form the + // boundaries of when the recurring action will start and stop. + Recurrence *string `min:"1" type:"string"` + + // The name of this scaling action. + ScheduledActionName *string `min:"1" type:"string" required:"true"` + + // The time for this action to start, in "YYYY-MM-DDThh:mm:ssZ" format in UTC/GMT + // only (for example, 2014-06-01T00:00:00Z). + // + // If you try to schedule your action in the past, Auto Scaling returns an + // error message. + // + // When StartTime and EndTime are specified with Recurrence, they form the + // boundaries of when the recurring action starts and stops. + StartTime *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // This parameter is deprecated. + Time *time.Time `type:"timestamp" timestampFormat:"iso8601"` +} + +// String returns the string representation +func (s PutScheduledUpdateGroupActionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutScheduledUpdateGroupActionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutScheduledUpdateGroupActionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutScheduledUpdateGroupActionInput"} + if s.AutoScalingGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("AutoScalingGroupName")) + } + if s.AutoScalingGroupName != nil && len(*s.AutoScalingGroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AutoScalingGroupName", 1)) + } + if s.Recurrence != nil && len(*s.Recurrence) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Recurrence", 1)) + } + if s.ScheduledActionName == nil { + invalidParams.Add(request.NewErrParamRequired("ScheduledActionName")) + } + if s.ScheduledActionName != nil && len(*s.ScheduledActionName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ScheduledActionName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type PutScheduledUpdateGroupActionOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutScheduledUpdateGroupActionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutScheduledUpdateGroupActionOutput) GoString() string { + return s.String() +} + +type RecordLifecycleActionHeartbeatInput struct { + _ struct{} `type:"structure"` + + // The name of the Auto Scaling group for the hook. + AutoScalingGroupName *string `min:"1" type:"string" required:"true"` + + // The ID of the instance. + InstanceId *string `min:"1" type:"string"` + + // A token that uniquely identifies a specific lifecycle action associated with + // an instance. Auto Scaling sends this token to the notification target you + // specified when you created the lifecycle hook. + LifecycleActionToken *string `min:"36" type:"string"` + + // The name of the lifecycle hook. + LifecycleHookName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s RecordLifecycleActionHeartbeatInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RecordLifecycleActionHeartbeatInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RecordLifecycleActionHeartbeatInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RecordLifecycleActionHeartbeatInput"} + if s.AutoScalingGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("AutoScalingGroupName")) + } + if s.AutoScalingGroupName != nil && len(*s.AutoScalingGroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AutoScalingGroupName", 1)) + } + if s.InstanceId != nil && len(*s.InstanceId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("InstanceId", 1)) + } + if s.LifecycleActionToken != nil && len(*s.LifecycleActionToken) < 36 { + invalidParams.Add(request.NewErrParamMinLen("LifecycleActionToken", 36)) + } + if s.LifecycleHookName == nil { + invalidParams.Add(request.NewErrParamRequired("LifecycleHookName")) + } + if s.LifecycleHookName != nil && len(*s.LifecycleHookName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LifecycleHookName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type RecordLifecycleActionHeartbeatOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s RecordLifecycleActionHeartbeatOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RecordLifecycleActionHeartbeatOutput) GoString() string { + return s.String() +} + +type ResumeProcessesOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ResumeProcessesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResumeProcessesOutput) GoString() string { + return s.String() +} + +// Describes a scaling policy. +type ScalingPolicy struct { + _ struct{} `type:"structure"` + + // The adjustment type, which specifies how ScalingAdjustment is interpreted. + // Valid values are ChangeInCapacity, ExactCapacity, and PercentChangeInCapacity. + AdjustmentType *string `min:"1" type:"string"` + + // The CloudWatch alarms related to the policy. + Alarms []*Alarm `type:"list"` + + // The name of the Auto Scaling group associated with this scaling policy. + AutoScalingGroupName *string `min:"1" type:"string"` + + // The amount of time, in seconds, after a scaling activity completes before + // any further trigger-related scaling activities can start. + Cooldown *int64 `type:"integer"` + + // The estimated time, in seconds, until a newly launched instance can contribute + // to the CloudWatch metrics. + EstimatedInstanceWarmup *int64 `type:"integer"` + + // The aggregation type for the CloudWatch metrics. Valid values are Minimum, + // Maximum, and Average. + MetricAggregationType *string `min:"1" type:"string"` + + // The minimum number of instances to scale. If the value of AdjustmentType + // is PercentChangeInCapacity, the scaling policy changes the DesiredCapacity + // of the Auto Scaling group by at least this many instances. Otherwise, the + // error is ValidationError. + MinAdjustmentMagnitude *int64 `type:"integer"` + + // Available for backward compatibility. Use MinAdjustmentMagnitude instead. + MinAdjustmentStep *int64 `deprecated:"true" type:"integer"` + + // The Amazon Resource Name (ARN) of the policy. + PolicyARN *string `min:"1" type:"string"` + + // The name of the scaling policy. + PolicyName *string `min:"1" type:"string"` + + // The policy type. Valid values are SimpleScaling and StepScaling. + PolicyType *string `min:"1" type:"string"` + + // The amount by which to scale, based on the specified adjustment type. A positive + // value adds to the current capacity while a negative number removes from the + // current capacity. + ScalingAdjustment *int64 `type:"integer"` + + // A set of adjustments that enable you to scale based on the size of the alarm + // breach. + StepAdjustments []*StepAdjustment `type:"list"` +} + +// String returns the string representation +func (s ScalingPolicy) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ScalingPolicy) GoString() string { + return s.String() +} + +type ScalingProcessQuery struct { + _ struct{} `type:"structure"` + + // The name or Amazon Resource Name (ARN) of the Auto Scaling group. + AutoScalingGroupName *string `min:"1" type:"string" required:"true"` + + // One or more of the following processes: + // + // Launch + // + // Terminate + // + // HealthCheck + // + // ReplaceUnhealthy + // + // AZRebalance + // + // AlarmNotification + // + // ScheduledActions + // + // AddToLoadBalancer + ScalingProcesses []*string `type:"list"` +} + +// String returns the string representation +func (s ScalingProcessQuery) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ScalingProcessQuery) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ScalingProcessQuery) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ScalingProcessQuery"} + if s.AutoScalingGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("AutoScalingGroupName")) + } + if s.AutoScalingGroupName != nil && len(*s.AutoScalingGroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AutoScalingGroupName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Describes a scheduled update to an Auto Scaling group. +type ScheduledUpdateGroupAction struct { + _ struct{} `type:"structure"` + + // The name of the group. + AutoScalingGroupName *string `min:"1" type:"string"` + + // The number of instances you prefer to maintain in the group. + DesiredCapacity *int64 `type:"integer"` + + // The date and time that the action is scheduled to end. This date and time + // can be up to one month in the future. + EndTime *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The maximum size of the group. + MaxSize *int64 `type:"integer"` + + // The minimum size of the group. + MinSize *int64 `type:"integer"` + + // The recurring schedule for the action. + Recurrence *string `min:"1" type:"string"` + + // The Amazon Resource Name (ARN) of the scheduled action. + ScheduledActionARN *string `min:"1" type:"string"` + + // The name of the scheduled action. + ScheduledActionName *string `min:"1" type:"string"` + + // The date and time that the action is scheduled to begin. This date and time + // can be up to one month in the future. + // + // When StartTime and EndTime are specified with Recurrence, they form the + // boundaries of when the recurring action will start and stop. + StartTime *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // This parameter is deprecated. + Time *time.Time `type:"timestamp" timestampFormat:"iso8601"` +} + +// String returns the string representation +func (s ScheduledUpdateGroupAction) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ScheduledUpdateGroupAction) GoString() string { + return s.String() +} + +type SetDesiredCapacityInput struct { + _ struct{} `type:"structure"` + + // The name of the Auto Scaling group. + AutoScalingGroupName *string `min:"1" type:"string" required:"true"` + + // The number of EC2 instances that should be running in the Auto Scaling group. + DesiredCapacity *int64 `type:"integer" required:"true"` + + // By default, SetDesiredCapacity overrides any cooldown period associated with + // the Auto Scaling group. Specify True to make Auto Scaling to wait for the + // cool-down period associated with the Auto Scaling group to complete before + // initiating a scaling activity to set your Auto Scaling group to its new capacity. + HonorCooldown *bool `type:"boolean"` +} + +// String returns the string representation +func (s SetDesiredCapacityInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetDesiredCapacityInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SetDesiredCapacityInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SetDesiredCapacityInput"} + if s.AutoScalingGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("AutoScalingGroupName")) + } + if s.AutoScalingGroupName != nil && len(*s.AutoScalingGroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AutoScalingGroupName", 1)) + } + if s.DesiredCapacity == nil { + invalidParams.Add(request.NewErrParamRequired("DesiredCapacity")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type SetDesiredCapacityOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s SetDesiredCapacityOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetDesiredCapacityOutput) GoString() string { + return s.String() +} + +type SetInstanceHealthInput struct { + _ struct{} `type:"structure"` + + // The health status of the instance. Set to Healthy if you want the instance + // to remain in service. Set to Unhealthy if you want the instance to be out + // of service. Auto Scaling will terminate and replace the unhealthy instance. + HealthStatus *string `min:"1" type:"string" required:"true"` + + // The ID of the instance. + InstanceId *string `min:"1" type:"string" required:"true"` + + // If the Auto Scaling group of the specified instance has a HealthCheckGracePeriod + // specified for the group, by default, this call will respect the grace period. + // Set this to False, if you do not want the call to respect the grace period + // associated with the group. + // + // For more information, see the description of the health check grace period + // for CreateAutoScalingGroup. + ShouldRespectGracePeriod *bool `type:"boolean"` +} + +// String returns the string representation +func (s SetInstanceHealthInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetInstanceHealthInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SetInstanceHealthInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SetInstanceHealthInput"} + if s.HealthStatus == nil { + invalidParams.Add(request.NewErrParamRequired("HealthStatus")) + } + if s.HealthStatus != nil && len(*s.HealthStatus) < 1 { + invalidParams.Add(request.NewErrParamMinLen("HealthStatus", 1)) + } + if s.InstanceId == nil { + invalidParams.Add(request.NewErrParamRequired("InstanceId")) + } + if s.InstanceId != nil && len(*s.InstanceId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("InstanceId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type SetInstanceHealthOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s SetInstanceHealthOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetInstanceHealthOutput) GoString() string { + return s.String() +} + +type SetInstanceProtectionInput struct { + _ struct{} `type:"structure"` + + // The name of the group. + AutoScalingGroupName *string `min:"1" type:"string" required:"true"` + + // One or more instance IDs. + InstanceIds []*string `type:"list" required:"true"` + + // Indicates whether the instance is protected from termination by Auto Scaling + // when scaling in. + ProtectedFromScaleIn *bool `type:"boolean" required:"true"` +} + +// String returns the string representation +func (s SetInstanceProtectionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetInstanceProtectionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SetInstanceProtectionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SetInstanceProtectionInput"} + if s.AutoScalingGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("AutoScalingGroupName")) + } + if s.AutoScalingGroupName != nil && len(*s.AutoScalingGroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AutoScalingGroupName", 1)) + } + if s.InstanceIds == nil { + invalidParams.Add(request.NewErrParamRequired("InstanceIds")) + } + if s.ProtectedFromScaleIn == nil { + invalidParams.Add(request.NewErrParamRequired("ProtectedFromScaleIn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type SetInstanceProtectionOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s SetInstanceProtectionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetInstanceProtectionOutput) GoString() string { + return s.String() +} + +// Describes an adjustment based on the difference between the value of the +// aggregated CloudWatch metric and the breach threshold that you've defined +// for the alarm. +// +// For the following examples, suppose that you have an alarm with a breach +// threshold of 50: +// +// If you want the adjustment to be triggered when the metric is greater +// than or equal to 50 and less than 60, specify a lower bound of 0 and an upper +// bound of 10. +// +// If you want the adjustment to be triggered when the metric is greater +// than 40 and less than or equal to 50, specify a lower bound of -10 and an +// upper bound of 0. +// +// There are a few rules for the step adjustments for your step policy: +// +// The ranges of your step adjustments can't overlap or have a gap. +// +// At most one step adjustment can have a null lower bound. If one step adjustment +// has a negative lower bound, then there must be a step adjustment with a null +// lower bound. +// +// At most one step adjustment can have a null upper bound. If one step adjustment +// has a positive upper bound, then there must be a step adjustment with a null +// upper bound. +// +// The upper and lower bound can't be null in the same step adjustment. +type StepAdjustment struct { + _ struct{} `type:"structure"` + + // The lower bound for the difference between the alarm threshold and the CloudWatch + // metric. If the metric value is above the breach threshold, the lower bound + // is inclusive (the metric must be greater than or equal to the threshold plus + // the lower bound). Otherwise, it is exclusive (the metric must be greater + // than the threshold plus the lower bound). A null value indicates negative + // infinity. + MetricIntervalLowerBound *float64 `type:"double"` + + // The upper bound for the difference between the alarm threshold and the CloudWatch + // metric. If the metric value is above the breach threshold, the upper bound + // is exclusive (the metric must be less than the threshold plus the upper bound). + // Otherwise, it is inclusive (the metric must be less than or equal to the + // threshold plus the upper bound). A null value indicates positive infinity. + // + // The upper bound must be greater than the lower bound. + MetricIntervalUpperBound *float64 `type:"double"` + + // The amount by which to scale, based on the specified adjustment type. A positive + // value adds to the current capacity while a negative number removes from the + // current capacity. + ScalingAdjustment *int64 `type:"integer" required:"true"` +} + +// String returns the string representation +func (s StepAdjustment) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StepAdjustment) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *StepAdjustment) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StepAdjustment"} + if s.ScalingAdjustment == nil { + invalidParams.Add(request.NewErrParamRequired("ScalingAdjustment")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type SuspendProcessesOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s SuspendProcessesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SuspendProcessesOutput) GoString() string { + return s.String() +} + +// Describes an Auto Scaling process that has been suspended. For more information, +// see ProcessType. +type SuspendedProcess struct { + _ struct{} `type:"structure"` + + // The name of the suspended process. + ProcessName *string `min:"1" type:"string"` + + // The reason that the process was suspended. + SuspensionReason *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s SuspendedProcess) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SuspendedProcess) GoString() string { + return s.String() +} + +// Describes a tag for an Auto Scaling group. +type Tag struct { + _ struct{} `type:"structure"` + + // The tag key. + Key *string `min:"1" type:"string" required:"true"` + + // Determines whether the tag is added to new instances as they are launched + // in the group. + PropagateAtLaunch *bool `type:"boolean"` + + // The name of the group. + ResourceId *string `type:"string"` + + // The type of resource. The only supported value is auto-scaling-group. + ResourceType *string `type:"string"` + + // The tag value. + Value *string `type:"string"` +} + +// String returns the string representation +func (s Tag) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Tag) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Tag) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Tag"} + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Describes a tag for an Auto Scaling group. +type TagDescription struct { + _ struct{} `type:"structure"` + + // The tag key. + Key *string `min:"1" type:"string"` + + // Determines whether the tag is added to new instances as they are launched + // in the group. + PropagateAtLaunch *bool `type:"boolean"` + + // The name of the group. + ResourceId *string `type:"string"` + + // The type of resource. The only supported value is auto-scaling-group. + ResourceType *string `type:"string"` + + // The tag value. + Value *string `type:"string"` +} + +// String returns the string representation +func (s TagDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TagDescription) GoString() string { + return s.String() +} + +type TerminateInstanceInAutoScalingGroupInput struct { + _ struct{} `type:"structure"` + + // The ID of the instance. + InstanceId *string `min:"1" type:"string" required:"true"` + + // If true, terminating the instance also decrements the size of the Auto Scaling + // group. + ShouldDecrementDesiredCapacity *bool `type:"boolean" required:"true"` +} + +// String returns the string representation +func (s TerminateInstanceInAutoScalingGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TerminateInstanceInAutoScalingGroupInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TerminateInstanceInAutoScalingGroupInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TerminateInstanceInAutoScalingGroupInput"} + if s.InstanceId == nil { + invalidParams.Add(request.NewErrParamRequired("InstanceId")) + } + if s.InstanceId != nil && len(*s.InstanceId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("InstanceId", 1)) + } + if s.ShouldDecrementDesiredCapacity == nil { + invalidParams.Add(request.NewErrParamRequired("ShouldDecrementDesiredCapacity")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type TerminateInstanceInAutoScalingGroupOutput struct { + _ struct{} `type:"structure"` + + // A scaling activity. + Activity *Activity `type:"structure"` +} + +// String returns the string representation +func (s TerminateInstanceInAutoScalingGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TerminateInstanceInAutoScalingGroupOutput) GoString() string { + return s.String() +} + +type UpdateAutoScalingGroupInput struct { + _ struct{} `type:"structure"` + + // The name of the Auto Scaling group. + AutoScalingGroupName *string `min:"1" type:"string" required:"true"` + + // One or more Availability Zones for the group. + AvailabilityZones []*string `min:"1" type:"list"` + + // The amount of time, in seconds, after a scaling activity completes before + // another scaling activity can start. The default is 300. + // + // For more information, see Auto Scaling Cooldowns (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/Cooldown.html) + // in the Auto Scaling Developer Guide. + DefaultCooldown *int64 `type:"integer"` + + // The number of EC2 instances that should be running in the Auto Scaling group. + // This number must be greater than or equal to the minimum size of the group + // and less than or equal to the maximum size of the group. + DesiredCapacity *int64 `type:"integer"` + + // The amount of time, in seconds, that Auto Scaling waits before checking the + // health status of an EC2 instance that has come into service. The default + // is 300. + // + // For more information, see Health Checks (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/healthcheck.html) + // in the Auto Scaling Developer Guide. + HealthCheckGracePeriod *int64 `type:"integer"` + + // The service to use for the health checks. The valid values are EC2 and ELB. + HealthCheckType *string `min:"1" type:"string"` + + // The name of the launch configuration. + LaunchConfigurationName *string `min:"1" type:"string"` + + // The maximum size of the Auto Scaling group. + MaxSize *int64 `type:"integer"` + + // The minimum size of the Auto Scaling group. + MinSize *int64 `type:"integer"` + + // Indicates whether newly launched instances are protected from termination + // by Auto Scaling when scaling in. + NewInstancesProtectedFromScaleIn *bool `type:"boolean"` + + // The name of the placement group into which you'll launch your instances, + // if any. For more information, see Placement Groups (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/placement-groups.html) + // in the Amazon Elastic Compute Cloud User Guide. + PlacementGroup *string `min:"1" type:"string"` + + // A standalone termination policy or a list of termination policies used to + // select the instance to terminate. The policies are executed in the order + // that they are listed. + // + // For more information, see Controlling Which Instances Auto Scaling Terminates + // During Scale In (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/AutoScalingBehavior.InstanceTermination.html) + // in the Auto Scaling Developer Guide. + TerminationPolicies []*string `type:"list"` + + // The ID of the subnet, if you are launching into a VPC. You can specify several + // subnets in a comma-separated list. + // + // When you specify VPCZoneIdentifier with AvailabilityZones, ensure that the + // subnets' Availability Zones match the values you specify for AvailabilityZones. + // + // For more information, see Launching Auto Scaling Instances in a VPC (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/asg-in-vpc.html) + // in the Auto Scaling Developer Guide. + VPCZoneIdentifier *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s UpdateAutoScalingGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateAutoScalingGroupInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateAutoScalingGroupInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateAutoScalingGroupInput"} + if s.AutoScalingGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("AutoScalingGroupName")) + } + if s.AutoScalingGroupName != nil && len(*s.AutoScalingGroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AutoScalingGroupName", 1)) + } + if s.AvailabilityZones != nil && len(s.AvailabilityZones) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AvailabilityZones", 1)) + } + if s.HealthCheckType != nil && len(*s.HealthCheckType) < 1 { + invalidParams.Add(request.NewErrParamMinLen("HealthCheckType", 1)) + } + if s.LaunchConfigurationName != nil && len(*s.LaunchConfigurationName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LaunchConfigurationName", 1)) + } + if s.PlacementGroup != nil && len(*s.PlacementGroup) < 1 { + invalidParams.Add(request.NewErrParamMinLen("PlacementGroup", 1)) + } + if s.VPCZoneIdentifier != nil && len(*s.VPCZoneIdentifier) < 1 { + invalidParams.Add(request.NewErrParamMinLen("VPCZoneIdentifier", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type UpdateAutoScalingGroupOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UpdateAutoScalingGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateAutoScalingGroupOutput) GoString() string { + return s.String() +} + +const ( + // @enum LifecycleState + LifecycleStatePending = "Pending" + // @enum LifecycleState + LifecycleStatePendingWait = "Pending:Wait" + // @enum LifecycleState + LifecycleStatePendingProceed = "Pending:Proceed" + // @enum LifecycleState + LifecycleStateQuarantined = "Quarantined" + // @enum LifecycleState + LifecycleStateInService = "InService" + // @enum LifecycleState + LifecycleStateTerminating = "Terminating" + // @enum LifecycleState + LifecycleStateTerminatingWait = "Terminating:Wait" + // @enum LifecycleState + LifecycleStateTerminatingProceed = "Terminating:Proceed" + // @enum LifecycleState + LifecycleStateTerminated = "Terminated" + // @enum LifecycleState + LifecycleStateDetaching = "Detaching" + // @enum LifecycleState + LifecycleStateDetached = "Detached" + // @enum LifecycleState + LifecycleStateEnteringStandby = "EnteringStandby" + // @enum LifecycleState + LifecycleStateStandby = "Standby" +) + +const ( + // @enum ScalingActivityStatusCode + ScalingActivityStatusCodePendingSpotBidPlacement = "PendingSpotBidPlacement" + // @enum ScalingActivityStatusCode + ScalingActivityStatusCodeWaitingForSpotInstanceRequestId = "WaitingForSpotInstanceRequestId" + // @enum ScalingActivityStatusCode + ScalingActivityStatusCodeWaitingForSpotInstanceId = "WaitingForSpotInstanceId" + // @enum ScalingActivityStatusCode + ScalingActivityStatusCodeWaitingForInstanceId = "WaitingForInstanceId" + // @enum ScalingActivityStatusCode + ScalingActivityStatusCodePreInService = "PreInService" + // @enum ScalingActivityStatusCode + ScalingActivityStatusCodeInProgress = "InProgress" + // @enum ScalingActivityStatusCode + ScalingActivityStatusCodeWaitingForElbconnectionDraining = "WaitingForELBConnectionDraining" + // @enum ScalingActivityStatusCode + ScalingActivityStatusCodeMidLifecycleAction = "MidLifecycleAction" + // @enum ScalingActivityStatusCode + ScalingActivityStatusCodeWaitingForInstanceWarmup = "WaitingForInstanceWarmup" + // @enum ScalingActivityStatusCode + ScalingActivityStatusCodeSuccessful = "Successful" + // @enum ScalingActivityStatusCode + ScalingActivityStatusCodeFailed = "Failed" + // @enum ScalingActivityStatusCode + ScalingActivityStatusCodeCancelled = "Cancelled" +) diff --git a/vendor/github.com/aws/aws-sdk-go/service/autoscaling/autoscalingiface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/autoscaling/autoscalingiface/interface.go new file mode 100644 index 000000000..5f9ed8e60 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/autoscaling/autoscalingiface/interface.go @@ -0,0 +1,226 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package autoscalingiface provides an interface for the Auto Scaling. +package autoscalingiface + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/autoscaling" +) + +// AutoScalingAPI is the interface type for autoscaling.AutoScaling. +type AutoScalingAPI interface { + AttachInstancesRequest(*autoscaling.AttachInstancesInput) (*request.Request, *autoscaling.AttachInstancesOutput) + + AttachInstances(*autoscaling.AttachInstancesInput) (*autoscaling.AttachInstancesOutput, error) + + AttachLoadBalancersRequest(*autoscaling.AttachLoadBalancersInput) (*request.Request, *autoscaling.AttachLoadBalancersOutput) + + AttachLoadBalancers(*autoscaling.AttachLoadBalancersInput) (*autoscaling.AttachLoadBalancersOutput, error) + + CompleteLifecycleActionRequest(*autoscaling.CompleteLifecycleActionInput) (*request.Request, *autoscaling.CompleteLifecycleActionOutput) + + CompleteLifecycleAction(*autoscaling.CompleteLifecycleActionInput) (*autoscaling.CompleteLifecycleActionOutput, error) + + CreateAutoScalingGroupRequest(*autoscaling.CreateAutoScalingGroupInput) (*request.Request, *autoscaling.CreateAutoScalingGroupOutput) + + CreateAutoScalingGroup(*autoscaling.CreateAutoScalingGroupInput) (*autoscaling.CreateAutoScalingGroupOutput, error) + + CreateLaunchConfigurationRequest(*autoscaling.CreateLaunchConfigurationInput) (*request.Request, *autoscaling.CreateLaunchConfigurationOutput) + + CreateLaunchConfiguration(*autoscaling.CreateLaunchConfigurationInput) (*autoscaling.CreateLaunchConfigurationOutput, error) + + CreateOrUpdateTagsRequest(*autoscaling.CreateOrUpdateTagsInput) (*request.Request, *autoscaling.CreateOrUpdateTagsOutput) + + CreateOrUpdateTags(*autoscaling.CreateOrUpdateTagsInput) (*autoscaling.CreateOrUpdateTagsOutput, error) + + DeleteAutoScalingGroupRequest(*autoscaling.DeleteAutoScalingGroupInput) (*request.Request, *autoscaling.DeleteAutoScalingGroupOutput) + + DeleteAutoScalingGroup(*autoscaling.DeleteAutoScalingGroupInput) (*autoscaling.DeleteAutoScalingGroupOutput, error) + + DeleteLaunchConfigurationRequest(*autoscaling.DeleteLaunchConfigurationInput) (*request.Request, *autoscaling.DeleteLaunchConfigurationOutput) + + DeleteLaunchConfiguration(*autoscaling.DeleteLaunchConfigurationInput) (*autoscaling.DeleteLaunchConfigurationOutput, error) + + DeleteLifecycleHookRequest(*autoscaling.DeleteLifecycleHookInput) (*request.Request, *autoscaling.DeleteLifecycleHookOutput) + + DeleteLifecycleHook(*autoscaling.DeleteLifecycleHookInput) (*autoscaling.DeleteLifecycleHookOutput, error) + + DeleteNotificationConfigurationRequest(*autoscaling.DeleteNotificationConfigurationInput) (*request.Request, *autoscaling.DeleteNotificationConfigurationOutput) + + DeleteNotificationConfiguration(*autoscaling.DeleteNotificationConfigurationInput) (*autoscaling.DeleteNotificationConfigurationOutput, error) + + DeletePolicyRequest(*autoscaling.DeletePolicyInput) (*request.Request, *autoscaling.DeletePolicyOutput) + + DeletePolicy(*autoscaling.DeletePolicyInput) (*autoscaling.DeletePolicyOutput, error) + + DeleteScheduledActionRequest(*autoscaling.DeleteScheduledActionInput) (*request.Request, *autoscaling.DeleteScheduledActionOutput) + + DeleteScheduledAction(*autoscaling.DeleteScheduledActionInput) (*autoscaling.DeleteScheduledActionOutput, error) + + DeleteTagsRequest(*autoscaling.DeleteTagsInput) (*request.Request, *autoscaling.DeleteTagsOutput) + + DeleteTags(*autoscaling.DeleteTagsInput) (*autoscaling.DeleteTagsOutput, error) + + DescribeAccountLimitsRequest(*autoscaling.DescribeAccountLimitsInput) (*request.Request, *autoscaling.DescribeAccountLimitsOutput) + + DescribeAccountLimits(*autoscaling.DescribeAccountLimitsInput) (*autoscaling.DescribeAccountLimitsOutput, error) + + DescribeAdjustmentTypesRequest(*autoscaling.DescribeAdjustmentTypesInput) (*request.Request, *autoscaling.DescribeAdjustmentTypesOutput) + + DescribeAdjustmentTypes(*autoscaling.DescribeAdjustmentTypesInput) (*autoscaling.DescribeAdjustmentTypesOutput, error) + + DescribeAutoScalingGroupsRequest(*autoscaling.DescribeAutoScalingGroupsInput) (*request.Request, *autoscaling.DescribeAutoScalingGroupsOutput) + + DescribeAutoScalingGroups(*autoscaling.DescribeAutoScalingGroupsInput) (*autoscaling.DescribeAutoScalingGroupsOutput, error) + + DescribeAutoScalingGroupsPages(*autoscaling.DescribeAutoScalingGroupsInput, func(*autoscaling.DescribeAutoScalingGroupsOutput, bool) bool) error + + DescribeAutoScalingInstancesRequest(*autoscaling.DescribeAutoScalingInstancesInput) (*request.Request, *autoscaling.DescribeAutoScalingInstancesOutput) + + DescribeAutoScalingInstances(*autoscaling.DescribeAutoScalingInstancesInput) (*autoscaling.DescribeAutoScalingInstancesOutput, error) + + DescribeAutoScalingInstancesPages(*autoscaling.DescribeAutoScalingInstancesInput, func(*autoscaling.DescribeAutoScalingInstancesOutput, bool) bool) error + + DescribeAutoScalingNotificationTypesRequest(*autoscaling.DescribeAutoScalingNotificationTypesInput) (*request.Request, *autoscaling.DescribeAutoScalingNotificationTypesOutput) + + DescribeAutoScalingNotificationTypes(*autoscaling.DescribeAutoScalingNotificationTypesInput) (*autoscaling.DescribeAutoScalingNotificationTypesOutput, error) + + DescribeLaunchConfigurationsRequest(*autoscaling.DescribeLaunchConfigurationsInput) (*request.Request, *autoscaling.DescribeLaunchConfigurationsOutput) + + DescribeLaunchConfigurations(*autoscaling.DescribeLaunchConfigurationsInput) (*autoscaling.DescribeLaunchConfigurationsOutput, error) + + DescribeLaunchConfigurationsPages(*autoscaling.DescribeLaunchConfigurationsInput, func(*autoscaling.DescribeLaunchConfigurationsOutput, bool) bool) error + + DescribeLifecycleHookTypesRequest(*autoscaling.DescribeLifecycleHookTypesInput) (*request.Request, *autoscaling.DescribeLifecycleHookTypesOutput) + + DescribeLifecycleHookTypes(*autoscaling.DescribeLifecycleHookTypesInput) (*autoscaling.DescribeLifecycleHookTypesOutput, error) + + DescribeLifecycleHooksRequest(*autoscaling.DescribeLifecycleHooksInput) (*request.Request, *autoscaling.DescribeLifecycleHooksOutput) + + DescribeLifecycleHooks(*autoscaling.DescribeLifecycleHooksInput) (*autoscaling.DescribeLifecycleHooksOutput, error) + + DescribeLoadBalancersRequest(*autoscaling.DescribeLoadBalancersInput) (*request.Request, *autoscaling.DescribeLoadBalancersOutput) + + DescribeLoadBalancers(*autoscaling.DescribeLoadBalancersInput) (*autoscaling.DescribeLoadBalancersOutput, error) + + DescribeMetricCollectionTypesRequest(*autoscaling.DescribeMetricCollectionTypesInput) (*request.Request, *autoscaling.DescribeMetricCollectionTypesOutput) + + DescribeMetricCollectionTypes(*autoscaling.DescribeMetricCollectionTypesInput) (*autoscaling.DescribeMetricCollectionTypesOutput, error) + + DescribeNotificationConfigurationsRequest(*autoscaling.DescribeNotificationConfigurationsInput) (*request.Request, *autoscaling.DescribeNotificationConfigurationsOutput) + + DescribeNotificationConfigurations(*autoscaling.DescribeNotificationConfigurationsInput) (*autoscaling.DescribeNotificationConfigurationsOutput, error) + + DescribeNotificationConfigurationsPages(*autoscaling.DescribeNotificationConfigurationsInput, func(*autoscaling.DescribeNotificationConfigurationsOutput, bool) bool) error + + DescribePoliciesRequest(*autoscaling.DescribePoliciesInput) (*request.Request, *autoscaling.DescribePoliciesOutput) + + DescribePolicies(*autoscaling.DescribePoliciesInput) (*autoscaling.DescribePoliciesOutput, error) + + DescribePoliciesPages(*autoscaling.DescribePoliciesInput, func(*autoscaling.DescribePoliciesOutput, bool) bool) error + + DescribeScalingActivitiesRequest(*autoscaling.DescribeScalingActivitiesInput) (*request.Request, *autoscaling.DescribeScalingActivitiesOutput) + + DescribeScalingActivities(*autoscaling.DescribeScalingActivitiesInput) (*autoscaling.DescribeScalingActivitiesOutput, error) + + DescribeScalingActivitiesPages(*autoscaling.DescribeScalingActivitiesInput, func(*autoscaling.DescribeScalingActivitiesOutput, bool) bool) error + + DescribeScalingProcessTypesRequest(*autoscaling.DescribeScalingProcessTypesInput) (*request.Request, *autoscaling.DescribeScalingProcessTypesOutput) + + DescribeScalingProcessTypes(*autoscaling.DescribeScalingProcessTypesInput) (*autoscaling.DescribeScalingProcessTypesOutput, error) + + DescribeScheduledActionsRequest(*autoscaling.DescribeScheduledActionsInput) (*request.Request, *autoscaling.DescribeScheduledActionsOutput) + + DescribeScheduledActions(*autoscaling.DescribeScheduledActionsInput) (*autoscaling.DescribeScheduledActionsOutput, error) + + DescribeScheduledActionsPages(*autoscaling.DescribeScheduledActionsInput, func(*autoscaling.DescribeScheduledActionsOutput, bool) bool) error + + DescribeTagsRequest(*autoscaling.DescribeTagsInput) (*request.Request, *autoscaling.DescribeTagsOutput) + + DescribeTags(*autoscaling.DescribeTagsInput) (*autoscaling.DescribeTagsOutput, error) + + DescribeTagsPages(*autoscaling.DescribeTagsInput, func(*autoscaling.DescribeTagsOutput, bool) bool) error + + DescribeTerminationPolicyTypesRequest(*autoscaling.DescribeTerminationPolicyTypesInput) (*request.Request, *autoscaling.DescribeTerminationPolicyTypesOutput) + + DescribeTerminationPolicyTypes(*autoscaling.DescribeTerminationPolicyTypesInput) (*autoscaling.DescribeTerminationPolicyTypesOutput, error) + + DetachInstancesRequest(*autoscaling.DetachInstancesInput) (*request.Request, *autoscaling.DetachInstancesOutput) + + DetachInstances(*autoscaling.DetachInstancesInput) (*autoscaling.DetachInstancesOutput, error) + + DetachLoadBalancersRequest(*autoscaling.DetachLoadBalancersInput) (*request.Request, *autoscaling.DetachLoadBalancersOutput) + + DetachLoadBalancers(*autoscaling.DetachLoadBalancersInput) (*autoscaling.DetachLoadBalancersOutput, error) + + DisableMetricsCollectionRequest(*autoscaling.DisableMetricsCollectionInput) (*request.Request, *autoscaling.DisableMetricsCollectionOutput) + + DisableMetricsCollection(*autoscaling.DisableMetricsCollectionInput) (*autoscaling.DisableMetricsCollectionOutput, error) + + EnableMetricsCollectionRequest(*autoscaling.EnableMetricsCollectionInput) (*request.Request, *autoscaling.EnableMetricsCollectionOutput) + + EnableMetricsCollection(*autoscaling.EnableMetricsCollectionInput) (*autoscaling.EnableMetricsCollectionOutput, error) + + EnterStandbyRequest(*autoscaling.EnterStandbyInput) (*request.Request, *autoscaling.EnterStandbyOutput) + + EnterStandby(*autoscaling.EnterStandbyInput) (*autoscaling.EnterStandbyOutput, error) + + ExecutePolicyRequest(*autoscaling.ExecutePolicyInput) (*request.Request, *autoscaling.ExecutePolicyOutput) + + ExecutePolicy(*autoscaling.ExecutePolicyInput) (*autoscaling.ExecutePolicyOutput, error) + + ExitStandbyRequest(*autoscaling.ExitStandbyInput) (*request.Request, *autoscaling.ExitStandbyOutput) + + ExitStandby(*autoscaling.ExitStandbyInput) (*autoscaling.ExitStandbyOutput, error) + + PutLifecycleHookRequest(*autoscaling.PutLifecycleHookInput) (*request.Request, *autoscaling.PutLifecycleHookOutput) + + PutLifecycleHook(*autoscaling.PutLifecycleHookInput) (*autoscaling.PutLifecycleHookOutput, error) + + PutNotificationConfigurationRequest(*autoscaling.PutNotificationConfigurationInput) (*request.Request, *autoscaling.PutNotificationConfigurationOutput) + + PutNotificationConfiguration(*autoscaling.PutNotificationConfigurationInput) (*autoscaling.PutNotificationConfigurationOutput, error) + + PutScalingPolicyRequest(*autoscaling.PutScalingPolicyInput) (*request.Request, *autoscaling.PutScalingPolicyOutput) + + PutScalingPolicy(*autoscaling.PutScalingPolicyInput) (*autoscaling.PutScalingPolicyOutput, error) + + PutScheduledUpdateGroupActionRequest(*autoscaling.PutScheduledUpdateGroupActionInput) (*request.Request, *autoscaling.PutScheduledUpdateGroupActionOutput) + + PutScheduledUpdateGroupAction(*autoscaling.PutScheduledUpdateGroupActionInput) (*autoscaling.PutScheduledUpdateGroupActionOutput, error) + + RecordLifecycleActionHeartbeatRequest(*autoscaling.RecordLifecycleActionHeartbeatInput) (*request.Request, *autoscaling.RecordLifecycleActionHeartbeatOutput) + + RecordLifecycleActionHeartbeat(*autoscaling.RecordLifecycleActionHeartbeatInput) (*autoscaling.RecordLifecycleActionHeartbeatOutput, error) + + ResumeProcessesRequest(*autoscaling.ScalingProcessQuery) (*request.Request, *autoscaling.ResumeProcessesOutput) + + ResumeProcesses(*autoscaling.ScalingProcessQuery) (*autoscaling.ResumeProcessesOutput, error) + + SetDesiredCapacityRequest(*autoscaling.SetDesiredCapacityInput) (*request.Request, *autoscaling.SetDesiredCapacityOutput) + + SetDesiredCapacity(*autoscaling.SetDesiredCapacityInput) (*autoscaling.SetDesiredCapacityOutput, error) + + SetInstanceHealthRequest(*autoscaling.SetInstanceHealthInput) (*request.Request, *autoscaling.SetInstanceHealthOutput) + + SetInstanceHealth(*autoscaling.SetInstanceHealthInput) (*autoscaling.SetInstanceHealthOutput, error) + + SetInstanceProtectionRequest(*autoscaling.SetInstanceProtectionInput) (*request.Request, *autoscaling.SetInstanceProtectionOutput) + + SetInstanceProtection(*autoscaling.SetInstanceProtectionInput) (*autoscaling.SetInstanceProtectionOutput, error) + + SuspendProcessesRequest(*autoscaling.ScalingProcessQuery) (*request.Request, *autoscaling.SuspendProcessesOutput) + + SuspendProcesses(*autoscaling.ScalingProcessQuery) (*autoscaling.SuspendProcessesOutput, error) + + TerminateInstanceInAutoScalingGroupRequest(*autoscaling.TerminateInstanceInAutoScalingGroupInput) (*request.Request, *autoscaling.TerminateInstanceInAutoScalingGroupOutput) + + TerminateInstanceInAutoScalingGroup(*autoscaling.TerminateInstanceInAutoScalingGroupInput) (*autoscaling.TerminateInstanceInAutoScalingGroupOutput, error) + + UpdateAutoScalingGroupRequest(*autoscaling.UpdateAutoScalingGroupInput) (*request.Request, *autoscaling.UpdateAutoScalingGroupOutput) + + UpdateAutoScalingGroup(*autoscaling.UpdateAutoScalingGroupInput) (*autoscaling.UpdateAutoScalingGroupOutput, error) +} + +var _ AutoScalingAPI = (*autoscaling.AutoScaling)(nil) diff --git a/vendor/github.com/aws/aws-sdk-go/service/autoscaling/examples_test.go b/vendor/github.com/aws/aws-sdk-go/service/autoscaling/examples_test.go new file mode 100644 index 000000000..24b9ee9a8 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/autoscaling/examples_test.go @@ -0,0 +1,1209 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package autoscaling_test + +import ( + "bytes" + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/autoscaling" +) + +var _ time.Duration +var _ bytes.Buffer + +func ExampleAutoScaling_AttachInstances() { + svc := autoscaling.New(session.New()) + + params := &autoscaling.AttachInstancesInput{ + AutoScalingGroupName: aws.String("ResourceName"), // Required + InstanceIds: []*string{ + aws.String("XmlStringMaxLen19"), // Required + // More values... + }, + } + resp, err := svc.AttachInstances(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAutoScaling_AttachLoadBalancers() { + svc := autoscaling.New(session.New()) + + params := &autoscaling.AttachLoadBalancersInput{ + AutoScalingGroupName: aws.String("ResourceName"), + LoadBalancerNames: []*string{ + aws.String("XmlStringMaxLen255"), // Required + // More values... + }, + } + resp, err := svc.AttachLoadBalancers(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAutoScaling_CompleteLifecycleAction() { + svc := autoscaling.New(session.New()) + + params := &autoscaling.CompleteLifecycleActionInput{ + AutoScalingGroupName: aws.String("ResourceName"), // Required + LifecycleActionResult: aws.String("LifecycleActionResult"), // Required + LifecycleHookName: aws.String("AsciiStringMaxLen255"), // Required + InstanceId: aws.String("XmlStringMaxLen19"), + LifecycleActionToken: aws.String("LifecycleActionToken"), + } + resp, err := svc.CompleteLifecycleAction(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAutoScaling_CreateAutoScalingGroup() { + svc := autoscaling.New(session.New()) + + params := &autoscaling.CreateAutoScalingGroupInput{ + AutoScalingGroupName: aws.String("XmlStringMaxLen255"), // Required + MaxSize: aws.Int64(1), // Required + MinSize: aws.Int64(1), // Required + AvailabilityZones: []*string{ + aws.String("XmlStringMaxLen255"), // Required + // More values... + }, + DefaultCooldown: aws.Int64(1), + DesiredCapacity: aws.Int64(1), + HealthCheckGracePeriod: aws.Int64(1), + HealthCheckType: aws.String("XmlStringMaxLen32"), + InstanceId: aws.String("XmlStringMaxLen19"), + LaunchConfigurationName: aws.String("ResourceName"), + LoadBalancerNames: []*string{ + aws.String("XmlStringMaxLen255"), // Required + // More values... + }, + NewInstancesProtectedFromScaleIn: aws.Bool(true), + PlacementGroup: aws.String("XmlStringMaxLen255"), + Tags: []*autoscaling.Tag{ + { // Required + Key: aws.String("TagKey"), // Required + PropagateAtLaunch: aws.Bool(true), + ResourceId: aws.String("XmlString"), + ResourceType: aws.String("XmlString"), + Value: aws.String("TagValue"), + }, + // More values... + }, + TerminationPolicies: []*string{ + aws.String("XmlStringMaxLen1600"), // Required + // More values... + }, + VPCZoneIdentifier: aws.String("XmlStringMaxLen255"), + } + resp, err := svc.CreateAutoScalingGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAutoScaling_CreateLaunchConfiguration() { + svc := autoscaling.New(session.New()) + + params := &autoscaling.CreateLaunchConfigurationInput{ + LaunchConfigurationName: aws.String("XmlStringMaxLen255"), // Required + AssociatePublicIpAddress: aws.Bool(true), + BlockDeviceMappings: []*autoscaling.BlockDeviceMapping{ + { // Required + DeviceName: aws.String("XmlStringMaxLen255"), // Required + Ebs: &autoscaling.Ebs{ + DeleteOnTermination: aws.Bool(true), + Encrypted: aws.Bool(true), + Iops: aws.Int64(1), + SnapshotId: aws.String("XmlStringMaxLen255"), + VolumeSize: aws.Int64(1), + VolumeType: aws.String("BlockDeviceEbsVolumeType"), + }, + NoDevice: aws.Bool(true), + VirtualName: aws.String("XmlStringMaxLen255"), + }, + // More values... + }, + ClassicLinkVPCId: aws.String("XmlStringMaxLen255"), + ClassicLinkVPCSecurityGroups: []*string{ + aws.String("XmlStringMaxLen255"), // Required + // More values... + }, + EbsOptimized: aws.Bool(true), + IamInstanceProfile: aws.String("XmlStringMaxLen1600"), + ImageId: aws.String("XmlStringMaxLen255"), + InstanceId: aws.String("XmlStringMaxLen19"), + InstanceMonitoring: &autoscaling.InstanceMonitoring{ + Enabled: aws.Bool(true), + }, + InstanceType: aws.String("XmlStringMaxLen255"), + KernelId: aws.String("XmlStringMaxLen255"), + KeyName: aws.String("XmlStringMaxLen255"), + PlacementTenancy: aws.String("XmlStringMaxLen64"), + RamdiskId: aws.String("XmlStringMaxLen255"), + SecurityGroups: []*string{ + aws.String("XmlString"), // Required + // More values... + }, + SpotPrice: aws.String("SpotPrice"), + UserData: aws.String("XmlStringUserData"), + } + resp, err := svc.CreateLaunchConfiguration(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAutoScaling_CreateOrUpdateTags() { + svc := autoscaling.New(session.New()) + + params := &autoscaling.CreateOrUpdateTagsInput{ + Tags: []*autoscaling.Tag{ // Required + { // Required + Key: aws.String("TagKey"), // Required + PropagateAtLaunch: aws.Bool(true), + ResourceId: aws.String("XmlString"), + ResourceType: aws.String("XmlString"), + Value: aws.String("TagValue"), + }, + // More values... + }, + } + resp, err := svc.CreateOrUpdateTags(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAutoScaling_DeleteAutoScalingGroup() { + svc := autoscaling.New(session.New()) + + params := &autoscaling.DeleteAutoScalingGroupInput{ + AutoScalingGroupName: aws.String("ResourceName"), // Required + ForceDelete: aws.Bool(true), + } + resp, err := svc.DeleteAutoScalingGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAutoScaling_DeleteLaunchConfiguration() { + svc := autoscaling.New(session.New()) + + params := &autoscaling.DeleteLaunchConfigurationInput{ + LaunchConfigurationName: aws.String("ResourceName"), // Required + } + resp, err := svc.DeleteLaunchConfiguration(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAutoScaling_DeleteLifecycleHook() { + svc := autoscaling.New(session.New()) + + params := &autoscaling.DeleteLifecycleHookInput{ + AutoScalingGroupName: aws.String("ResourceName"), // Required + LifecycleHookName: aws.String("AsciiStringMaxLen255"), // Required + } + resp, err := svc.DeleteLifecycleHook(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAutoScaling_DeleteNotificationConfiguration() { + svc := autoscaling.New(session.New()) + + params := &autoscaling.DeleteNotificationConfigurationInput{ + AutoScalingGroupName: aws.String("ResourceName"), // Required + TopicARN: aws.String("ResourceName"), // Required + } + resp, err := svc.DeleteNotificationConfiguration(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAutoScaling_DeletePolicy() { + svc := autoscaling.New(session.New()) + + params := &autoscaling.DeletePolicyInput{ + PolicyName: aws.String("ResourceName"), // Required + AutoScalingGroupName: aws.String("ResourceName"), + } + resp, err := svc.DeletePolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAutoScaling_DeleteScheduledAction() { + svc := autoscaling.New(session.New()) + + params := &autoscaling.DeleteScheduledActionInput{ + ScheduledActionName: aws.String("ResourceName"), // Required + AutoScalingGroupName: aws.String("ResourceName"), + } + resp, err := svc.DeleteScheduledAction(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAutoScaling_DeleteTags() { + svc := autoscaling.New(session.New()) + + params := &autoscaling.DeleteTagsInput{ + Tags: []*autoscaling.Tag{ // Required + { // Required + Key: aws.String("TagKey"), // Required + PropagateAtLaunch: aws.Bool(true), + ResourceId: aws.String("XmlString"), + ResourceType: aws.String("XmlString"), + Value: aws.String("TagValue"), + }, + // More values... + }, + } + resp, err := svc.DeleteTags(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAutoScaling_DescribeAccountLimits() { + svc := autoscaling.New(session.New()) + + var params *autoscaling.DescribeAccountLimitsInput + resp, err := svc.DescribeAccountLimits(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAutoScaling_DescribeAdjustmentTypes() { + svc := autoscaling.New(session.New()) + + var params *autoscaling.DescribeAdjustmentTypesInput + resp, err := svc.DescribeAdjustmentTypes(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAutoScaling_DescribeAutoScalingGroups() { + svc := autoscaling.New(session.New()) + + params := &autoscaling.DescribeAutoScalingGroupsInput{ + AutoScalingGroupNames: []*string{ + aws.String("ResourceName"), // Required + // More values... + }, + MaxRecords: aws.Int64(1), + NextToken: aws.String("XmlString"), + } + resp, err := svc.DescribeAutoScalingGroups(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAutoScaling_DescribeAutoScalingInstances() { + svc := autoscaling.New(session.New()) + + params := &autoscaling.DescribeAutoScalingInstancesInput{ + InstanceIds: []*string{ + aws.String("XmlStringMaxLen19"), // Required + // More values... + }, + MaxRecords: aws.Int64(1), + NextToken: aws.String("XmlString"), + } + resp, err := svc.DescribeAutoScalingInstances(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAutoScaling_DescribeAutoScalingNotificationTypes() { + svc := autoscaling.New(session.New()) + + var params *autoscaling.DescribeAutoScalingNotificationTypesInput + resp, err := svc.DescribeAutoScalingNotificationTypes(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAutoScaling_DescribeLaunchConfigurations() { + svc := autoscaling.New(session.New()) + + params := &autoscaling.DescribeLaunchConfigurationsInput{ + LaunchConfigurationNames: []*string{ + aws.String("ResourceName"), // Required + // More values... + }, + MaxRecords: aws.Int64(1), + NextToken: aws.String("XmlString"), + } + resp, err := svc.DescribeLaunchConfigurations(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAutoScaling_DescribeLifecycleHookTypes() { + svc := autoscaling.New(session.New()) + + var params *autoscaling.DescribeLifecycleHookTypesInput + resp, err := svc.DescribeLifecycleHookTypes(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAutoScaling_DescribeLifecycleHooks() { + svc := autoscaling.New(session.New()) + + params := &autoscaling.DescribeLifecycleHooksInput{ + AutoScalingGroupName: aws.String("ResourceName"), // Required + LifecycleHookNames: []*string{ + aws.String("AsciiStringMaxLen255"), // Required + // More values... + }, + } + resp, err := svc.DescribeLifecycleHooks(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAutoScaling_DescribeLoadBalancers() { + svc := autoscaling.New(session.New()) + + params := &autoscaling.DescribeLoadBalancersInput{ + AutoScalingGroupName: aws.String("ResourceName"), // Required + MaxRecords: aws.Int64(1), + NextToken: aws.String("XmlString"), + } + resp, err := svc.DescribeLoadBalancers(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAutoScaling_DescribeMetricCollectionTypes() { + svc := autoscaling.New(session.New()) + + var params *autoscaling.DescribeMetricCollectionTypesInput + resp, err := svc.DescribeMetricCollectionTypes(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAutoScaling_DescribeNotificationConfigurations() { + svc := autoscaling.New(session.New()) + + params := &autoscaling.DescribeNotificationConfigurationsInput{ + AutoScalingGroupNames: []*string{ + aws.String("ResourceName"), // Required + // More values... + }, + MaxRecords: aws.Int64(1), + NextToken: aws.String("XmlString"), + } + resp, err := svc.DescribeNotificationConfigurations(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAutoScaling_DescribePolicies() { + svc := autoscaling.New(session.New()) + + params := &autoscaling.DescribePoliciesInput{ + AutoScalingGroupName: aws.String("ResourceName"), + MaxRecords: aws.Int64(1), + NextToken: aws.String("XmlString"), + PolicyNames: []*string{ + aws.String("ResourceName"), // Required + // More values... + }, + PolicyTypes: []*string{ + aws.String("XmlStringMaxLen64"), // Required + // More values... + }, + } + resp, err := svc.DescribePolicies(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAutoScaling_DescribeScalingActivities() { + svc := autoscaling.New(session.New()) + + params := &autoscaling.DescribeScalingActivitiesInput{ + ActivityIds: []*string{ + aws.String("XmlString"), // Required + // More values... + }, + AutoScalingGroupName: aws.String("ResourceName"), + MaxRecords: aws.Int64(1), + NextToken: aws.String("XmlString"), + } + resp, err := svc.DescribeScalingActivities(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAutoScaling_DescribeScalingProcessTypes() { + svc := autoscaling.New(session.New()) + + var params *autoscaling.DescribeScalingProcessTypesInput + resp, err := svc.DescribeScalingProcessTypes(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAutoScaling_DescribeScheduledActions() { + svc := autoscaling.New(session.New()) + + params := &autoscaling.DescribeScheduledActionsInput{ + AutoScalingGroupName: aws.String("ResourceName"), + EndTime: aws.Time(time.Now()), + MaxRecords: aws.Int64(1), + NextToken: aws.String("XmlString"), + ScheduledActionNames: []*string{ + aws.String("ResourceName"), // Required + // More values... + }, + StartTime: aws.Time(time.Now()), + } + resp, err := svc.DescribeScheduledActions(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAutoScaling_DescribeTags() { + svc := autoscaling.New(session.New()) + + params := &autoscaling.DescribeTagsInput{ + Filters: []*autoscaling.Filter{ + { // Required + Name: aws.String("XmlString"), + Values: []*string{ + aws.String("XmlString"), // Required + // More values... + }, + }, + // More values... + }, + MaxRecords: aws.Int64(1), + NextToken: aws.String("XmlString"), + } + resp, err := svc.DescribeTags(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAutoScaling_DescribeTerminationPolicyTypes() { + svc := autoscaling.New(session.New()) + + var params *autoscaling.DescribeTerminationPolicyTypesInput + resp, err := svc.DescribeTerminationPolicyTypes(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAutoScaling_DetachInstances() { + svc := autoscaling.New(session.New()) + + params := &autoscaling.DetachInstancesInput{ + AutoScalingGroupName: aws.String("ResourceName"), // Required + ShouldDecrementDesiredCapacity: aws.Bool(true), // Required + InstanceIds: []*string{ + aws.String("XmlStringMaxLen19"), // Required + // More values... + }, + } + resp, err := svc.DetachInstances(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAutoScaling_DetachLoadBalancers() { + svc := autoscaling.New(session.New()) + + params := &autoscaling.DetachLoadBalancersInput{ + AutoScalingGroupName: aws.String("ResourceName"), + LoadBalancerNames: []*string{ + aws.String("XmlStringMaxLen255"), // Required + // More values... + }, + } + resp, err := svc.DetachLoadBalancers(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAutoScaling_DisableMetricsCollection() { + svc := autoscaling.New(session.New()) + + params := &autoscaling.DisableMetricsCollectionInput{ + AutoScalingGroupName: aws.String("ResourceName"), // Required + Metrics: []*string{ + aws.String("XmlStringMaxLen255"), // Required + // More values... + }, + } + resp, err := svc.DisableMetricsCollection(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAutoScaling_EnableMetricsCollection() { + svc := autoscaling.New(session.New()) + + params := &autoscaling.EnableMetricsCollectionInput{ + AutoScalingGroupName: aws.String("ResourceName"), // Required + Granularity: aws.String("XmlStringMaxLen255"), // Required + Metrics: []*string{ + aws.String("XmlStringMaxLen255"), // Required + // More values... + }, + } + resp, err := svc.EnableMetricsCollection(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAutoScaling_EnterStandby() { + svc := autoscaling.New(session.New()) + + params := &autoscaling.EnterStandbyInput{ + AutoScalingGroupName: aws.String("ResourceName"), // Required + ShouldDecrementDesiredCapacity: aws.Bool(true), // Required + InstanceIds: []*string{ + aws.String("XmlStringMaxLen19"), // Required + // More values... + }, + } + resp, err := svc.EnterStandby(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAutoScaling_ExecutePolicy() { + svc := autoscaling.New(session.New()) + + params := &autoscaling.ExecutePolicyInput{ + PolicyName: aws.String("ResourceName"), // Required + AutoScalingGroupName: aws.String("ResourceName"), + BreachThreshold: aws.Float64(1.0), + HonorCooldown: aws.Bool(true), + MetricValue: aws.Float64(1.0), + } + resp, err := svc.ExecutePolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAutoScaling_ExitStandby() { + svc := autoscaling.New(session.New()) + + params := &autoscaling.ExitStandbyInput{ + AutoScalingGroupName: aws.String("ResourceName"), // Required + InstanceIds: []*string{ + aws.String("XmlStringMaxLen19"), // Required + // More values... + }, + } + resp, err := svc.ExitStandby(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAutoScaling_PutLifecycleHook() { + svc := autoscaling.New(session.New()) + + params := &autoscaling.PutLifecycleHookInput{ + AutoScalingGroupName: aws.String("ResourceName"), // Required + LifecycleHookName: aws.String("AsciiStringMaxLen255"), // Required + DefaultResult: aws.String("LifecycleActionResult"), + HeartbeatTimeout: aws.Int64(1), + LifecycleTransition: aws.String("LifecycleTransition"), + NotificationMetadata: aws.String("XmlStringMaxLen1023"), + NotificationTargetARN: aws.String("NotificationTargetResourceName"), + RoleARN: aws.String("ResourceName"), + } + resp, err := svc.PutLifecycleHook(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAutoScaling_PutNotificationConfiguration() { + svc := autoscaling.New(session.New()) + + params := &autoscaling.PutNotificationConfigurationInput{ + AutoScalingGroupName: aws.String("ResourceName"), // Required + NotificationTypes: []*string{ // Required + aws.String("XmlStringMaxLen255"), // Required + // More values... + }, + TopicARN: aws.String("ResourceName"), // Required + } + resp, err := svc.PutNotificationConfiguration(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAutoScaling_PutScalingPolicy() { + svc := autoscaling.New(session.New()) + + params := &autoscaling.PutScalingPolicyInput{ + AdjustmentType: aws.String("XmlStringMaxLen255"), // Required + AutoScalingGroupName: aws.String("ResourceName"), // Required + PolicyName: aws.String("XmlStringMaxLen255"), // Required + Cooldown: aws.Int64(1), + EstimatedInstanceWarmup: aws.Int64(1), + MetricAggregationType: aws.String("XmlStringMaxLen32"), + MinAdjustmentMagnitude: aws.Int64(1), + MinAdjustmentStep: aws.Int64(1), + PolicyType: aws.String("XmlStringMaxLen64"), + ScalingAdjustment: aws.Int64(1), + StepAdjustments: []*autoscaling.StepAdjustment{ + { // Required + ScalingAdjustment: aws.Int64(1), // Required + MetricIntervalLowerBound: aws.Float64(1.0), + MetricIntervalUpperBound: aws.Float64(1.0), + }, + // More values... + }, + } + resp, err := svc.PutScalingPolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAutoScaling_PutScheduledUpdateGroupAction() { + svc := autoscaling.New(session.New()) + + params := &autoscaling.PutScheduledUpdateGroupActionInput{ + AutoScalingGroupName: aws.String("ResourceName"), // Required + ScheduledActionName: aws.String("XmlStringMaxLen255"), // Required + DesiredCapacity: aws.Int64(1), + EndTime: aws.Time(time.Now()), + MaxSize: aws.Int64(1), + MinSize: aws.Int64(1), + Recurrence: aws.String("XmlStringMaxLen255"), + StartTime: aws.Time(time.Now()), + Time: aws.Time(time.Now()), + } + resp, err := svc.PutScheduledUpdateGroupAction(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAutoScaling_RecordLifecycleActionHeartbeat() { + svc := autoscaling.New(session.New()) + + params := &autoscaling.RecordLifecycleActionHeartbeatInput{ + AutoScalingGroupName: aws.String("ResourceName"), // Required + LifecycleHookName: aws.String("AsciiStringMaxLen255"), // Required + InstanceId: aws.String("XmlStringMaxLen19"), + LifecycleActionToken: aws.String("LifecycleActionToken"), + } + resp, err := svc.RecordLifecycleActionHeartbeat(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAutoScaling_ResumeProcesses() { + svc := autoscaling.New(session.New()) + + params := &autoscaling.ScalingProcessQuery{ + AutoScalingGroupName: aws.String("ResourceName"), // Required + ScalingProcesses: []*string{ + aws.String("XmlStringMaxLen255"), // Required + // More values... + }, + } + resp, err := svc.ResumeProcesses(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAutoScaling_SetDesiredCapacity() { + svc := autoscaling.New(session.New()) + + params := &autoscaling.SetDesiredCapacityInput{ + AutoScalingGroupName: aws.String("ResourceName"), // Required + DesiredCapacity: aws.Int64(1), // Required + HonorCooldown: aws.Bool(true), + } + resp, err := svc.SetDesiredCapacity(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAutoScaling_SetInstanceHealth() { + svc := autoscaling.New(session.New()) + + params := &autoscaling.SetInstanceHealthInput{ + HealthStatus: aws.String("XmlStringMaxLen32"), // Required + InstanceId: aws.String("XmlStringMaxLen19"), // Required + ShouldRespectGracePeriod: aws.Bool(true), + } + resp, err := svc.SetInstanceHealth(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAutoScaling_SetInstanceProtection() { + svc := autoscaling.New(session.New()) + + params := &autoscaling.SetInstanceProtectionInput{ + AutoScalingGroupName: aws.String("ResourceName"), // Required + InstanceIds: []*string{ // Required + aws.String("XmlStringMaxLen19"), // Required + // More values... + }, + ProtectedFromScaleIn: aws.Bool(true), // Required + } + resp, err := svc.SetInstanceProtection(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAutoScaling_SuspendProcesses() { + svc := autoscaling.New(session.New()) + + params := &autoscaling.ScalingProcessQuery{ + AutoScalingGroupName: aws.String("ResourceName"), // Required + ScalingProcesses: []*string{ + aws.String("XmlStringMaxLen255"), // Required + // More values... + }, + } + resp, err := svc.SuspendProcesses(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAutoScaling_TerminateInstanceInAutoScalingGroup() { + svc := autoscaling.New(session.New()) + + params := &autoscaling.TerminateInstanceInAutoScalingGroupInput{ + InstanceId: aws.String("XmlStringMaxLen19"), // Required + ShouldDecrementDesiredCapacity: aws.Bool(true), // Required + } + resp, err := svc.TerminateInstanceInAutoScalingGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAutoScaling_UpdateAutoScalingGroup() { + svc := autoscaling.New(session.New()) + + params := &autoscaling.UpdateAutoScalingGroupInput{ + AutoScalingGroupName: aws.String("ResourceName"), // Required + AvailabilityZones: []*string{ + aws.String("XmlStringMaxLen255"), // Required + // More values... + }, + DefaultCooldown: aws.Int64(1), + DesiredCapacity: aws.Int64(1), + HealthCheckGracePeriod: aws.Int64(1), + HealthCheckType: aws.String("XmlStringMaxLen32"), + LaunchConfigurationName: aws.String("ResourceName"), + MaxSize: aws.Int64(1), + MinSize: aws.Int64(1), + NewInstancesProtectedFromScaleIn: aws.Bool(true), + PlacementGroup: aws.String("XmlStringMaxLen255"), + TerminationPolicies: []*string{ + aws.String("XmlStringMaxLen1600"), // Required + // More values... + }, + VPCZoneIdentifier: aws.String("XmlStringMaxLen255"), + } + resp, err := svc.UpdateAutoScalingGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/autoscaling/service.go b/vendor/github.com/aws/aws-sdk-go/service/autoscaling/service.go new file mode 100644 index 000000000..e529e4de8 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/autoscaling/service.go @@ -0,0 +1,88 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package autoscaling + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/private/protocol/query" +) + +// Auto Scaling is designed to automatically launch or terminate EC2 instances +// based on user-defined policies, schedules, and health checks. Use this service +// in conjunction with the Amazon CloudWatch and Elastic Load Balancing services. +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type AutoScaling struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "autoscaling" + +// New creates a new instance of the AutoScaling client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a AutoScaling client from just a session. +// svc := autoscaling.New(mySession) +// +// // Create a AutoScaling client with additional configuration +// svc := autoscaling.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *AutoScaling { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *AutoScaling { + svc := &AutoScaling{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2011-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(query.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(query.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(query.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(query.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a AutoScaling operation and runs any +// custom request initialization. +func (c *AutoScaling) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/autoscaling/waiters.go b/vendor/github.com/aws/aws-sdk-go/service/autoscaling/waiters.go new file mode 100644 index 000000000..42595d217 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/autoscaling/waiters.go @@ -0,0 +1,94 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package autoscaling + +import ( + "github.com/aws/aws-sdk-go/private/waiter" +) + +func (c *AutoScaling) WaitUntilGroupExists(input *DescribeAutoScalingGroupsInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeAutoScalingGroups", + Delay: 5, + MaxAttempts: 10, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "path", + Argument: "length(AutoScalingGroups) > `0`", + Expected: true, + }, + { + State: "retry", + Matcher: "path", + Argument: "length(AutoScalingGroups) > `0`", + Expected: false, + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *AutoScaling) WaitUntilGroupInService(input *DescribeAutoScalingGroupsInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeAutoScalingGroups", + Delay: 15, + MaxAttempts: 40, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "path", + Argument: "contains(AutoScalingGroups[].[length(Instances[?LifecycleState=='InService']) >= MinSize][], `false`)", + Expected: false, + }, + { + State: "retry", + Matcher: "path", + Argument: "contains(AutoScalingGroups[].[length(Instances[?LifecycleState=='InService']) >= MinSize][], `false`)", + Expected: true, + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *AutoScaling) WaitUntilGroupNotExists(input *DescribeAutoScalingGroupsInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeAutoScalingGroups", + Delay: 15, + MaxAttempts: 40, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "path", + Argument: "length(AutoScalingGroups) > `0`", + Expected: false, + }, + { + State: "retry", + Matcher: "path", + Argument: "length(AutoScalingGroups) > `0`", + Expected: true, + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/cloudformation/api.go b/vendor/github.com/aws/aws-sdk-go/service/cloudformation/api.go new file mode 100644 index 000000000..5ec08091c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/cloudformation/api.go @@ -0,0 +1,4172 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package cloudformation provides a client for AWS CloudFormation. +package cloudformation + +import ( + "time" + + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/query" +) + +const opCancelUpdateStack = "CancelUpdateStack" + +// CancelUpdateStackRequest generates a "aws/request.Request" representing the +// client's request for the CancelUpdateStack operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CancelUpdateStack method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CancelUpdateStackRequest method. +// req, resp := client.CancelUpdateStackRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudFormation) CancelUpdateStackRequest(input *CancelUpdateStackInput) (req *request.Request, output *CancelUpdateStackOutput) { + op := &request.Operation{ + Name: opCancelUpdateStack, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CancelUpdateStackInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &CancelUpdateStackOutput{} + req.Data = output + return +} + +// Cancels an update on the specified stack. If the call completes successfully, +// the stack rolls back the update and reverts to the previous stack configuration. +// +// You can cancel only stacks that are in the UPDATE_IN_PROGRESS state. +func (c *CloudFormation) CancelUpdateStack(input *CancelUpdateStackInput) (*CancelUpdateStackOutput, error) { + req, out := c.CancelUpdateStackRequest(input) + err := req.Send() + return out, err +} + +const opContinueUpdateRollback = "ContinueUpdateRollback" + +// ContinueUpdateRollbackRequest generates a "aws/request.Request" representing the +// client's request for the ContinueUpdateRollback operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ContinueUpdateRollback method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ContinueUpdateRollbackRequest method. +// req, resp := client.ContinueUpdateRollbackRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudFormation) ContinueUpdateRollbackRequest(input *ContinueUpdateRollbackInput) (req *request.Request, output *ContinueUpdateRollbackOutput) { + op := &request.Operation{ + Name: opContinueUpdateRollback, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ContinueUpdateRollbackInput{} + } + + req = c.newRequest(op, input, output) + output = &ContinueUpdateRollbackOutput{} + req.Data = output + return +} + +// For a specified stack that is in the UPDATE_ROLLBACK_FAILED state, continues +// rolling it back to the UPDATE_ROLLBACK_COMPLETE state. Depending on the cause +// of the failure, you can manually fix the error (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/troubleshooting.html#troubleshooting-errors-update-rollback-failed) +// and continue the rollback. By continuing the rollback, you can return your +// stack to a working state (the UPDATE_ROLLBACK_COMPLETE state), and then try +// to update the stack again. +// +// A stack goes into the UPDATE_ROLLBACK_FAILED state when AWS CloudFormation +// cannot roll back all changes after a failed stack update. For example, you +// might have a stack that is rolling back to an old database instance that +// was deleted outside of AWS CloudFormation. Because AWS CloudFormation doesn't +// know the database was deleted, it assumes that the database instance still +// exists and attempts to roll back to it, causing the update rollback to fail. +func (c *CloudFormation) ContinueUpdateRollback(input *ContinueUpdateRollbackInput) (*ContinueUpdateRollbackOutput, error) { + req, out := c.ContinueUpdateRollbackRequest(input) + err := req.Send() + return out, err +} + +const opCreateChangeSet = "CreateChangeSet" + +// CreateChangeSetRequest generates a "aws/request.Request" representing the +// client's request for the CreateChangeSet operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateChangeSet method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateChangeSetRequest method. +// req, resp := client.CreateChangeSetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudFormation) CreateChangeSetRequest(input *CreateChangeSetInput) (req *request.Request, output *CreateChangeSetOutput) { + op := &request.Operation{ + Name: opCreateChangeSet, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateChangeSetInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateChangeSetOutput{} + req.Data = output + return +} + +// Creates a list of changes for a stack. AWS CloudFormation generates the change +// set by comparing the stack's information with the information that you submit. +// A change set can help you understand which resources AWS CloudFormation will +// change and how it will change them before you update your stack. Change sets +// allow you to check before you make a change so that you don't delete or replace +// critical resources. +// +// AWS CloudFormation doesn't make any changes to the stack when you create +// a change set. To make the specified changes, you must execute the change +// set by using the ExecuteChangeSet action. +// +// After the call successfully completes, AWS CloudFormation starts creating +// the change set. To check the status of the change set, use the DescribeChangeSet +// action. +func (c *CloudFormation) CreateChangeSet(input *CreateChangeSetInput) (*CreateChangeSetOutput, error) { + req, out := c.CreateChangeSetRequest(input) + err := req.Send() + return out, err +} + +const opCreateStack = "CreateStack" + +// CreateStackRequest generates a "aws/request.Request" representing the +// client's request for the CreateStack operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateStack method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateStackRequest method. +// req, resp := client.CreateStackRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudFormation) CreateStackRequest(input *CreateStackInput) (req *request.Request, output *CreateStackOutput) { + op := &request.Operation{ + Name: opCreateStack, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateStackInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateStackOutput{} + req.Data = output + return +} + +// Creates a stack as specified in the template. After the call completes successfully, +// the stack creation starts. You can check the status of the stack via the +// DescribeStacks API. +func (c *CloudFormation) CreateStack(input *CreateStackInput) (*CreateStackOutput, error) { + req, out := c.CreateStackRequest(input) + err := req.Send() + return out, err +} + +const opDeleteChangeSet = "DeleteChangeSet" + +// DeleteChangeSetRequest generates a "aws/request.Request" representing the +// client's request for the DeleteChangeSet operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteChangeSet method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteChangeSetRequest method. +// req, resp := client.DeleteChangeSetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudFormation) DeleteChangeSetRequest(input *DeleteChangeSetInput) (req *request.Request, output *DeleteChangeSetOutput) { + op := &request.Operation{ + Name: opDeleteChangeSet, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteChangeSetInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteChangeSetOutput{} + req.Data = output + return +} + +// Deletes the specified change set. Deleting change sets ensures that no one +// executes the wrong change set. +// +// If the call successfully completes, AWS CloudFormation successfully deleted +// the change set. +func (c *CloudFormation) DeleteChangeSet(input *DeleteChangeSetInput) (*DeleteChangeSetOutput, error) { + req, out := c.DeleteChangeSetRequest(input) + err := req.Send() + return out, err +} + +const opDeleteStack = "DeleteStack" + +// DeleteStackRequest generates a "aws/request.Request" representing the +// client's request for the DeleteStack operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteStack method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteStackRequest method. +// req, resp := client.DeleteStackRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudFormation) DeleteStackRequest(input *DeleteStackInput) (req *request.Request, output *DeleteStackOutput) { + op := &request.Operation{ + Name: opDeleteStack, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteStackInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteStackOutput{} + req.Data = output + return +} + +// Deletes a specified stack. Once the call completes successfully, stack deletion +// starts. Deleted stacks do not show up in the DescribeStacks API if the deletion +// has been completed successfully. +func (c *CloudFormation) DeleteStack(input *DeleteStackInput) (*DeleteStackOutput, error) { + req, out := c.DeleteStackRequest(input) + err := req.Send() + return out, err +} + +const opDescribeAccountLimits = "DescribeAccountLimits" + +// DescribeAccountLimitsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeAccountLimits operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeAccountLimits method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeAccountLimitsRequest method. +// req, resp := client.DescribeAccountLimitsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudFormation) DescribeAccountLimitsRequest(input *DescribeAccountLimitsInput) (req *request.Request, output *DescribeAccountLimitsOutput) { + op := &request.Operation{ + Name: opDescribeAccountLimits, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeAccountLimitsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeAccountLimitsOutput{} + req.Data = output + return +} + +// Retrieves your account's AWS CloudFormation limits, such as the maximum number +// of stacks that you can create in your account. +func (c *CloudFormation) DescribeAccountLimits(input *DescribeAccountLimitsInput) (*DescribeAccountLimitsOutput, error) { + req, out := c.DescribeAccountLimitsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeChangeSet = "DescribeChangeSet" + +// DescribeChangeSetRequest generates a "aws/request.Request" representing the +// client's request for the DescribeChangeSet operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeChangeSet method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeChangeSetRequest method. +// req, resp := client.DescribeChangeSetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudFormation) DescribeChangeSetRequest(input *DescribeChangeSetInput) (req *request.Request, output *DescribeChangeSetOutput) { + op := &request.Operation{ + Name: opDescribeChangeSet, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeChangeSetInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeChangeSetOutput{} + req.Data = output + return +} + +// Returns the inputs for the change set and a list of changes that AWS CloudFormation +// will make if you execute the change set. For more information, see Updating +// Stacks Using Change Sets (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-changesets.html) +// in the AWS CloudFormation User Guide. +func (c *CloudFormation) DescribeChangeSet(input *DescribeChangeSetInput) (*DescribeChangeSetOutput, error) { + req, out := c.DescribeChangeSetRequest(input) + err := req.Send() + return out, err +} + +const opDescribeStackEvents = "DescribeStackEvents" + +// DescribeStackEventsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeStackEvents operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeStackEvents method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeStackEventsRequest method. +// req, resp := client.DescribeStackEventsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudFormation) DescribeStackEventsRequest(input *DescribeStackEventsInput) (req *request.Request, output *DescribeStackEventsOutput) { + op := &request.Operation{ + Name: opDescribeStackEvents, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeStackEventsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeStackEventsOutput{} + req.Data = output + return +} + +// Returns all stack related events for a specified stack in reverse chronological +// order. For more information about a stack's event history, go to Stacks (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/concept-stack.html) +// in the AWS CloudFormation User Guide. +// +// You can list events for stacks that have failed to create or have been +// deleted by specifying the unique stack identifier (stack ID). +func (c *CloudFormation) DescribeStackEvents(input *DescribeStackEventsInput) (*DescribeStackEventsOutput, error) { + req, out := c.DescribeStackEventsRequest(input) + err := req.Send() + return out, err +} + +// DescribeStackEventsPages iterates over the pages of a DescribeStackEvents operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeStackEvents method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeStackEvents operation. +// pageNum := 0 +// err := client.DescribeStackEventsPages(params, +// func(page *DescribeStackEventsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *CloudFormation) DescribeStackEventsPages(input *DescribeStackEventsInput, fn func(p *DescribeStackEventsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeStackEventsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeStackEventsOutput), lastPage) + }) +} + +const opDescribeStackResource = "DescribeStackResource" + +// DescribeStackResourceRequest generates a "aws/request.Request" representing the +// client's request for the DescribeStackResource operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeStackResource method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeStackResourceRequest method. +// req, resp := client.DescribeStackResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudFormation) DescribeStackResourceRequest(input *DescribeStackResourceInput) (req *request.Request, output *DescribeStackResourceOutput) { + op := &request.Operation{ + Name: opDescribeStackResource, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeStackResourceInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeStackResourceOutput{} + req.Data = output + return +} + +// Returns a description of the specified resource in the specified stack. +// +// For deleted stacks, DescribeStackResource returns resource information for +// up to 90 days after the stack has been deleted. +func (c *CloudFormation) DescribeStackResource(input *DescribeStackResourceInput) (*DescribeStackResourceOutput, error) { + req, out := c.DescribeStackResourceRequest(input) + err := req.Send() + return out, err +} + +const opDescribeStackResources = "DescribeStackResources" + +// DescribeStackResourcesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeStackResources operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeStackResources method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeStackResourcesRequest method. +// req, resp := client.DescribeStackResourcesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudFormation) DescribeStackResourcesRequest(input *DescribeStackResourcesInput) (req *request.Request, output *DescribeStackResourcesOutput) { + op := &request.Operation{ + Name: opDescribeStackResources, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeStackResourcesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeStackResourcesOutput{} + req.Data = output + return +} + +// Returns AWS resource descriptions for running and deleted stacks. If StackName +// is specified, all the associated resources that are part of the stack are +// returned. If PhysicalResourceId is specified, the associated resources of +// the stack that the resource belongs to are returned. +// +// Only the first 100 resources will be returned. If your stack has more resources +// than this, you should use ListStackResources instead. +// +// For deleted stacks, DescribeStackResources returns resource information +// for up to 90 days after the stack has been deleted. +// +// You must specify either StackName or PhysicalResourceId, but not both. In +// addition, you can specify LogicalResourceId to filter the returned result. +// For more information about resources, the LogicalResourceId and PhysicalResourceId, +// go to the AWS CloudFormation User Guide (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/). +// +// A ValidationError is returned if you specify both StackName and PhysicalResourceId +// in the same request. +func (c *CloudFormation) DescribeStackResources(input *DescribeStackResourcesInput) (*DescribeStackResourcesOutput, error) { + req, out := c.DescribeStackResourcesRequest(input) + err := req.Send() + return out, err +} + +const opDescribeStacks = "DescribeStacks" + +// DescribeStacksRequest generates a "aws/request.Request" representing the +// client's request for the DescribeStacks operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeStacks method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeStacksRequest method. +// req, resp := client.DescribeStacksRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudFormation) DescribeStacksRequest(input *DescribeStacksInput) (req *request.Request, output *DescribeStacksOutput) { + op := &request.Operation{ + Name: opDescribeStacks, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeStacksInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeStacksOutput{} + req.Data = output + return +} + +// Returns the description for the specified stack; if no stack name was specified, +// then it returns the description for all the stacks created. +func (c *CloudFormation) DescribeStacks(input *DescribeStacksInput) (*DescribeStacksOutput, error) { + req, out := c.DescribeStacksRequest(input) + err := req.Send() + return out, err +} + +// DescribeStacksPages iterates over the pages of a DescribeStacks operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeStacks method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeStacks operation. +// pageNum := 0 +// err := client.DescribeStacksPages(params, +// func(page *DescribeStacksOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *CloudFormation) DescribeStacksPages(input *DescribeStacksInput, fn func(p *DescribeStacksOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeStacksRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeStacksOutput), lastPage) + }) +} + +const opEstimateTemplateCost = "EstimateTemplateCost" + +// EstimateTemplateCostRequest generates a "aws/request.Request" representing the +// client's request for the EstimateTemplateCost operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the EstimateTemplateCost method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the EstimateTemplateCostRequest method. +// req, resp := client.EstimateTemplateCostRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudFormation) EstimateTemplateCostRequest(input *EstimateTemplateCostInput) (req *request.Request, output *EstimateTemplateCostOutput) { + op := &request.Operation{ + Name: opEstimateTemplateCost, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &EstimateTemplateCostInput{} + } + + req = c.newRequest(op, input, output) + output = &EstimateTemplateCostOutput{} + req.Data = output + return +} + +// Returns the estimated monthly cost of a template. The return value is an +// AWS Simple Monthly Calculator URL with a query string that describes the +// resources required to run the template. +func (c *CloudFormation) EstimateTemplateCost(input *EstimateTemplateCostInput) (*EstimateTemplateCostOutput, error) { + req, out := c.EstimateTemplateCostRequest(input) + err := req.Send() + return out, err +} + +const opExecuteChangeSet = "ExecuteChangeSet" + +// ExecuteChangeSetRequest generates a "aws/request.Request" representing the +// client's request for the ExecuteChangeSet operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ExecuteChangeSet method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ExecuteChangeSetRequest method. +// req, resp := client.ExecuteChangeSetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudFormation) ExecuteChangeSetRequest(input *ExecuteChangeSetInput) (req *request.Request, output *ExecuteChangeSetOutput) { + op := &request.Operation{ + Name: opExecuteChangeSet, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ExecuteChangeSetInput{} + } + + req = c.newRequest(op, input, output) + output = &ExecuteChangeSetOutput{} + req.Data = output + return +} + +// Updates a stack using the input information that was provided when the specified +// change set was created. After the call successfully completes, AWS CloudFormation +// starts updating the stack. Use the DescribeStacks action to view the status +// of the update. +// +// When you execute a change set, AWS CloudFormation deletes all other change +// sets associated with the stack because they aren't valid for the updated +// stack. +// +// If a stack policy is associated with the stack, AWS CloudFormation enforces +// the policy during the update. You can't specify a temporary stack policy +// that overrides the current policy. +func (c *CloudFormation) ExecuteChangeSet(input *ExecuteChangeSetInput) (*ExecuteChangeSetOutput, error) { + req, out := c.ExecuteChangeSetRequest(input) + err := req.Send() + return out, err +} + +const opGetStackPolicy = "GetStackPolicy" + +// GetStackPolicyRequest generates a "aws/request.Request" representing the +// client's request for the GetStackPolicy operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetStackPolicy method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetStackPolicyRequest method. +// req, resp := client.GetStackPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudFormation) GetStackPolicyRequest(input *GetStackPolicyInput) (req *request.Request, output *GetStackPolicyOutput) { + op := &request.Operation{ + Name: opGetStackPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetStackPolicyInput{} + } + + req = c.newRequest(op, input, output) + output = &GetStackPolicyOutput{} + req.Data = output + return +} + +// Returns the stack policy for a specified stack. If a stack doesn't have a +// policy, a null value is returned. +func (c *CloudFormation) GetStackPolicy(input *GetStackPolicyInput) (*GetStackPolicyOutput, error) { + req, out := c.GetStackPolicyRequest(input) + err := req.Send() + return out, err +} + +const opGetTemplate = "GetTemplate" + +// GetTemplateRequest generates a "aws/request.Request" representing the +// client's request for the GetTemplate operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetTemplate method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetTemplateRequest method. +// req, resp := client.GetTemplateRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudFormation) GetTemplateRequest(input *GetTemplateInput) (req *request.Request, output *GetTemplateOutput) { + op := &request.Operation{ + Name: opGetTemplate, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetTemplateInput{} + } + + req = c.newRequest(op, input, output) + output = &GetTemplateOutput{} + req.Data = output + return +} + +// Returns the template body for a specified stack. You can get the template +// for running or deleted stacks. +// +// For deleted stacks, GetTemplate returns the template for up to 90 days after +// the stack has been deleted. +// +// If the template does not exist, a ValidationError is returned. +func (c *CloudFormation) GetTemplate(input *GetTemplateInput) (*GetTemplateOutput, error) { + req, out := c.GetTemplateRequest(input) + err := req.Send() + return out, err +} + +const opGetTemplateSummary = "GetTemplateSummary" + +// GetTemplateSummaryRequest generates a "aws/request.Request" representing the +// client's request for the GetTemplateSummary operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetTemplateSummary method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetTemplateSummaryRequest method. +// req, resp := client.GetTemplateSummaryRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudFormation) GetTemplateSummaryRequest(input *GetTemplateSummaryInput) (req *request.Request, output *GetTemplateSummaryOutput) { + op := &request.Operation{ + Name: opGetTemplateSummary, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetTemplateSummaryInput{} + } + + req = c.newRequest(op, input, output) + output = &GetTemplateSummaryOutput{} + req.Data = output + return +} + +// Returns information about a new or existing template. The GetTemplateSummary +// action is useful for viewing parameter information, such as default parameter +// values and parameter types, before you create or update a stack. +// +// You can use the GetTemplateSummary action when you submit a template, or +// you can get template information for a running or deleted stack. +// +// For deleted stacks, GetTemplateSummary returns the template information +// for up to 90 days after the stack has been deleted. If the template does +// not exist, a ValidationError is returned. +func (c *CloudFormation) GetTemplateSummary(input *GetTemplateSummaryInput) (*GetTemplateSummaryOutput, error) { + req, out := c.GetTemplateSummaryRequest(input) + err := req.Send() + return out, err +} + +const opListChangeSets = "ListChangeSets" + +// ListChangeSetsRequest generates a "aws/request.Request" representing the +// client's request for the ListChangeSets operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListChangeSets method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListChangeSetsRequest method. +// req, resp := client.ListChangeSetsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudFormation) ListChangeSetsRequest(input *ListChangeSetsInput) (req *request.Request, output *ListChangeSetsOutput) { + op := &request.Operation{ + Name: opListChangeSets, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListChangeSetsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListChangeSetsOutput{} + req.Data = output + return +} + +// Returns the ID and status of each active change set for a stack. For example, +// AWS CloudFormation lists change sets that are in the CREATE_IN_PROGRESS or +// CREATE_PENDING state. +func (c *CloudFormation) ListChangeSets(input *ListChangeSetsInput) (*ListChangeSetsOutput, error) { + req, out := c.ListChangeSetsRequest(input) + err := req.Send() + return out, err +} + +const opListStackResources = "ListStackResources" + +// ListStackResourcesRequest generates a "aws/request.Request" representing the +// client's request for the ListStackResources operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListStackResources method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListStackResourcesRequest method. +// req, resp := client.ListStackResourcesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudFormation) ListStackResourcesRequest(input *ListStackResourcesInput) (req *request.Request, output *ListStackResourcesOutput) { + op := &request.Operation{ + Name: opListStackResources, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListStackResourcesInput{} + } + + req = c.newRequest(op, input, output) + output = &ListStackResourcesOutput{} + req.Data = output + return +} + +// Returns descriptions of all resources of the specified stack. +// +// For deleted stacks, ListStackResources returns resource information for +// up to 90 days after the stack has been deleted. +func (c *CloudFormation) ListStackResources(input *ListStackResourcesInput) (*ListStackResourcesOutput, error) { + req, out := c.ListStackResourcesRequest(input) + err := req.Send() + return out, err +} + +// ListStackResourcesPages iterates over the pages of a ListStackResources operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListStackResources method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListStackResources operation. +// pageNum := 0 +// err := client.ListStackResourcesPages(params, +// func(page *ListStackResourcesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *CloudFormation) ListStackResourcesPages(input *ListStackResourcesInput, fn func(p *ListStackResourcesOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListStackResourcesRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListStackResourcesOutput), lastPage) + }) +} + +const opListStacks = "ListStacks" + +// ListStacksRequest generates a "aws/request.Request" representing the +// client's request for the ListStacks operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListStacks method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListStacksRequest method. +// req, resp := client.ListStacksRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudFormation) ListStacksRequest(input *ListStacksInput) (req *request.Request, output *ListStacksOutput) { + op := &request.Operation{ + Name: opListStacks, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListStacksInput{} + } + + req = c.newRequest(op, input, output) + output = &ListStacksOutput{} + req.Data = output + return +} + +// Returns the summary information for stacks whose status matches the specified +// StackStatusFilter. Summary information for stacks that have been deleted +// is kept for 90 days after the stack is deleted. If no StackStatusFilter is +// specified, summary information for all stacks is returned (including existing +// stacks and stacks that have been deleted). +func (c *CloudFormation) ListStacks(input *ListStacksInput) (*ListStacksOutput, error) { + req, out := c.ListStacksRequest(input) + err := req.Send() + return out, err +} + +// ListStacksPages iterates over the pages of a ListStacks operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListStacks method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListStacks operation. +// pageNum := 0 +// err := client.ListStacksPages(params, +// func(page *ListStacksOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *CloudFormation) ListStacksPages(input *ListStacksInput, fn func(p *ListStacksOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListStacksRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListStacksOutput), lastPage) + }) +} + +const opSetStackPolicy = "SetStackPolicy" + +// SetStackPolicyRequest generates a "aws/request.Request" representing the +// client's request for the SetStackPolicy operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the SetStackPolicy method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the SetStackPolicyRequest method. +// req, resp := client.SetStackPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudFormation) SetStackPolicyRequest(input *SetStackPolicyInput) (req *request.Request, output *SetStackPolicyOutput) { + op := &request.Operation{ + Name: opSetStackPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SetStackPolicyInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &SetStackPolicyOutput{} + req.Data = output + return +} + +// Sets a stack policy for a specified stack. +func (c *CloudFormation) SetStackPolicy(input *SetStackPolicyInput) (*SetStackPolicyOutput, error) { + req, out := c.SetStackPolicyRequest(input) + err := req.Send() + return out, err +} + +const opSignalResource = "SignalResource" + +// SignalResourceRequest generates a "aws/request.Request" representing the +// client's request for the SignalResource operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the SignalResource method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the SignalResourceRequest method. +// req, resp := client.SignalResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudFormation) SignalResourceRequest(input *SignalResourceInput) (req *request.Request, output *SignalResourceOutput) { + op := &request.Operation{ + Name: opSignalResource, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SignalResourceInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &SignalResourceOutput{} + req.Data = output + return +} + +// Sends a signal to the specified resource with a success or failure status. +// You can use the SignalResource API in conjunction with a creation policy +// or update policy. AWS CloudFormation doesn't proceed with a stack creation +// or update until resources receive the required number of signals or the timeout +// period is exceeded. The SignalResource API is useful in cases where you want +// to send signals from anywhere other than an Amazon EC2 instance. +func (c *CloudFormation) SignalResource(input *SignalResourceInput) (*SignalResourceOutput, error) { + req, out := c.SignalResourceRequest(input) + err := req.Send() + return out, err +} + +const opUpdateStack = "UpdateStack" + +// UpdateStackRequest generates a "aws/request.Request" representing the +// client's request for the UpdateStack operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateStack method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateStackRequest method. +// req, resp := client.UpdateStackRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudFormation) UpdateStackRequest(input *UpdateStackInput) (req *request.Request, output *UpdateStackOutput) { + op := &request.Operation{ + Name: opUpdateStack, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateStackInput{} + } + + req = c.newRequest(op, input, output) + output = &UpdateStackOutput{} + req.Data = output + return +} + +// Updates a stack as specified in the template. After the call completes successfully, +// the stack update starts. You can check the status of the stack via the DescribeStacks +// action. +// +// To get a copy of the template for an existing stack, you can use the GetTemplate +// action. +// +// For more information about creating an update template, updating a stack, +// and monitoring the progress of the update, see Updating a Stack (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks.html). +func (c *CloudFormation) UpdateStack(input *UpdateStackInput) (*UpdateStackOutput, error) { + req, out := c.UpdateStackRequest(input) + err := req.Send() + return out, err +} + +const opValidateTemplate = "ValidateTemplate" + +// ValidateTemplateRequest generates a "aws/request.Request" representing the +// client's request for the ValidateTemplate operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ValidateTemplate method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ValidateTemplateRequest method. +// req, resp := client.ValidateTemplateRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudFormation) ValidateTemplateRequest(input *ValidateTemplateInput) (req *request.Request, output *ValidateTemplateOutput) { + op := &request.Operation{ + Name: opValidateTemplate, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ValidateTemplateInput{} + } + + req = c.newRequest(op, input, output) + output = &ValidateTemplateOutput{} + req.Data = output + return +} + +// Validates a specified template. +func (c *CloudFormation) ValidateTemplate(input *ValidateTemplateInput) (*ValidateTemplateOutput, error) { + req, out := c.ValidateTemplateRequest(input) + err := req.Send() + return out, err +} + +// The AccountLimit data type. +type AccountLimit struct { + _ struct{} `type:"structure"` + + // The name of the account limit. Currently, the only account limit is StackLimit. + Name *string `type:"string"` + + // The value that is associated with the account limit name. + Value *int64 `type:"integer"` +} + +// String returns the string representation +func (s AccountLimit) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AccountLimit) GoString() string { + return s.String() +} + +// The input for the CancelUpdateStack action. +type CancelUpdateStackInput struct { + _ struct{} `type:"structure"` + + // The name or the unique stack ID that is associated with the stack. + StackName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CancelUpdateStackInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CancelUpdateStackInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CancelUpdateStackInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CancelUpdateStackInput"} + if s.StackName == nil { + invalidParams.Add(request.NewErrParamRequired("StackName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type CancelUpdateStackOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s CancelUpdateStackOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CancelUpdateStackOutput) GoString() string { + return s.String() +} + +// The Change structure describes the changes AWS CloudFormation will perform +// if you execute the change set. +type Change struct { + _ struct{} `type:"structure"` + + // A ResourceChange structure that describes the resource and action that AWS + // CloudFormation will perform. + ResourceChange *ResourceChange `type:"structure"` + + // The type of entity that AWS CloudFormation changes. Currently, the only entity + // type is Resource. + Type *string `type:"string" enum:"ChangeType"` +} + +// String returns the string representation +func (s Change) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Change) GoString() string { + return s.String() +} + +// The ChangeSetSummary structure describes a change set, its status, and the +// stack with which it's associated. +type ChangeSetSummary struct { + _ struct{} `type:"structure"` + + // The ID of the change set. + ChangeSetId *string `min:"1" type:"string"` + + // The name of the change set. + ChangeSetName *string `min:"1" type:"string"` + + // The start time when the change set was created, in UTC. + CreationTime *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // Descriptive information about the change set. + Description *string `min:"1" type:"string"` + + // If the change set execution status is AVAILABLE, you can execute the change + // set. If you can’t execute the change set, the status indicates why. For example, + // a change set might be in an UNAVAILABLE state because AWS CloudFormation + // is still creating it or in an OBSOLETE state because the stack was already + // updated. + ExecutionStatus *string `type:"string" enum:"ExecutionStatus"` + + // The ID of the stack with which the change set is associated. + StackId *string `type:"string"` + + // The name of the stack with which the change set is associated. + StackName *string `type:"string"` + + // The state of the change set, such as CREATE_IN_PROGRESS, CREATE_COMPLETE, + // or FAILED. + Status *string `type:"string" enum:"ChangeSetStatus"` + + // A description of the change set's status. For example, if your change set + // is in the FAILED state, AWS CloudFormation shows the error message. + StatusReason *string `type:"string"` +} + +// String returns the string representation +func (s ChangeSetSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ChangeSetSummary) GoString() string { + return s.String() +} + +// The input for the ContinueUpdateRollback action. +type ContinueUpdateRollbackInput struct { + _ struct{} `type:"structure"` + + // The name or the unique ID of the stack that you want to continue rolling + // back. + StackName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s ContinueUpdateRollbackInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ContinueUpdateRollbackInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ContinueUpdateRollbackInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ContinueUpdateRollbackInput"} + if s.StackName == nil { + invalidParams.Add(request.NewErrParamRequired("StackName")) + } + if s.StackName != nil && len(*s.StackName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("StackName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The output for a ContinueUpdateRollback action. +type ContinueUpdateRollbackOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ContinueUpdateRollbackOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ContinueUpdateRollbackOutput) GoString() string { + return s.String() +} + +// The input for the CreateChangeSet action. +type CreateChangeSetInput struct { + _ struct{} `type:"structure"` + + // A list of capabilities that you must specify before AWS CloudFormation can + // update certain stacks. Some stack templates might include resources that + // can affect permissions in your AWS account, for example, by creating new + // AWS Identity and Access Management (IAM) users. For those stacks, you must + // explicitly acknowledge their capabilities by specifying this parameter. + // + // Currently, the only valid value is CAPABILITY_IAM, which is required for + // the following resources: AWS::IAM::AccessKey (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-iam-accesskey.html), + // AWS::IAM::Group (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-iam-group.html), + // AWS::IAM::InstanceProfile (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-iam-instanceprofile.html), + // AWS::IAM::Policy (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-iam-policy.html), + // AWS::IAM::Role (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-iam-role.html), + // AWS::IAM::User (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-iam-user.html), + // and AWS::IAM::UserToGroupAddition (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-iam-addusertogroup.html). + // If your stack template contains these resources, we recommend that you review + // all permissions associated with them and edit their permissions if necessary. + // If your template contains any of the listed resources and you don't specify + // this parameter, this action returns an InsufficientCapabilities error. + Capabilities []*string `type:"list"` + + // The name of the change set. The name must be unique among all change sets + // that are associated with the specified stack. + // + // A change set name can contain only alphanumeric, case sensitive characters + // and hyphens. It must start with an alphabetic character and cannot exceed + // 128 characters. + ChangeSetName *string `min:"1" type:"string" required:"true"` + + // A unique identifier for this CreateChangeSet request. Specify this token + // if you plan to retry requests so that AWS CloudFormation knows that you're + // not attempting to create another change set with the same name. You might + // retry CreateChangeSet requests to ensure that AWS CloudFormation successfully + // received them. + ClientToken *string `min:"1" type:"string"` + + // A description to help you identify this change set. + Description *string `min:"1" type:"string"` + + // The Amazon Resource Names (ARNs) of Amazon Simple Notification Service (Amazon + // SNS) topics that AWS CloudFormation associates with the stack. To remove + // all associated notification topics, specify an empty list. + NotificationARNs []*string `type:"list"` + + // A list of Parameter structures that specify input parameters for the change + // set. For more information, see the Parameter (http://docs.aws.amazon.com/AWSCloudFormation/latest/APIReference/API_Parameter.html) + // data type. + Parameters []*Parameter `type:"list"` + + // The template resource types that you have permissions to work with if you + // execute this change set, such as AWS::EC2::Instance, AWS::EC2::*, or Custom::MyCustomInstance. + // + // If the list of resource types doesn't include a resource type that you're + // updating, the stack update fails. By default, AWS CloudFormation grants permissions + // to all resource types. AWS Identity and Access Management (IAM) uses this + // parameter for condition keys in IAM policies for AWS CloudFormation. For + // more information, see Controlling Access with AWS Identity and Access Management + // (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-iam-template.html) + // in the AWS CloudFormation User Guide. + ResourceTypes []*string `type:"list"` + + // The name or the unique ID of the stack for which you are creating a change + // set. AWS CloudFormation generates the change set by comparing this stack's + // information with the information that you submit, such as a modified template + // or different parameter input values. + StackName *string `min:"1" type:"string" required:"true"` + + // Key-value pairs to associate with this stack. AWS CloudFormation also propagates + // these tags to resources in the stack. You can specify a maximum of 10 tags. + Tags []*Tag `type:"list"` + + // A structure that contains the body of the revised template, with a minimum + // length of 1 byte and a maximum length of 51,200 bytes. AWS CloudFormation + // generates the change set by comparing this template with the template of + // the stack that you specified. + // + // Conditional: You must specify only TemplateBody or TemplateURL. + TemplateBody *string `min:"1" type:"string"` + + // The location of the file that contains the revised template. The URL must + // point to a template (max size: 460,800 bytes) that is located in an S3 bucket. + // AWS CloudFormation generates the change set by comparing this template with + // the stack that you specified. + // + // Conditional: You must specify only TemplateBody or TemplateURL. + TemplateURL *string `min:"1" type:"string"` + + // Whether to reuse the template that is associated with the stack to create + // the change set. + UsePreviousTemplate *bool `type:"boolean"` +} + +// String returns the string representation +func (s CreateChangeSetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateChangeSetInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateChangeSetInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateChangeSetInput"} + if s.ChangeSetName == nil { + invalidParams.Add(request.NewErrParamRequired("ChangeSetName")) + } + if s.ChangeSetName != nil && len(*s.ChangeSetName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ChangeSetName", 1)) + } + if s.ClientToken != nil && len(*s.ClientToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ClientToken", 1)) + } + if s.Description != nil && len(*s.Description) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Description", 1)) + } + if s.StackName == nil { + invalidParams.Add(request.NewErrParamRequired("StackName")) + } + if s.StackName != nil && len(*s.StackName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("StackName", 1)) + } + if s.TemplateBody != nil && len(*s.TemplateBody) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TemplateBody", 1)) + } + if s.TemplateURL != nil && len(*s.TemplateURL) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TemplateURL", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The output for the CreateChangeSet action. +type CreateChangeSetOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the change set. + Id *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s CreateChangeSetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateChangeSetOutput) GoString() string { + return s.String() +} + +// The input for CreateStack action. +type CreateStackInput struct { + _ struct{} `type:"structure"` + + // A list of capabilities that you must specify before AWS CloudFormation can + // create certain stacks. Some stack templates might include resources that + // can affect permissions in your AWS account, for example, by creating new + // AWS Identity and Access Management (IAM) users. For those stacks, you must + // explicitly acknowledge their capabilities by specifying this parameter. + // + // Currently, the only valid value is CAPABILITY_IAM, which is required for + // the following resources: AWS::IAM::AccessKey (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-iam-accesskey.html), + // AWS::IAM::Group (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-iam-group.html), + // AWS::IAM::InstanceProfile (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-iam-instanceprofile.html), + // AWS::IAM::Policy (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-iam-policy.html), + // AWS::IAM::Role (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-iam-role.html), + // AWS::IAM::User (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-iam-user.html), + // and AWS::IAM::UserToGroupAddition (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-iam-addusertogroup.html). + // If your stack template contains these resources, we recommend that you review + // all permissions associated with them and edit their permissions if necessary. + // If your template contains any of the listed resources and you don't specify + // this parameter, this action returns an InsufficientCapabilities error. + Capabilities []*string `type:"list"` + + // Set to true to disable rollback of the stack if stack creation failed. You + // can specify either DisableRollback or OnFailure, but not both. + // + // Default: false + DisableRollback *bool `type:"boolean"` + + // The Simple Notification Service (SNS) topic ARNs to publish stack related + // events. You can find your SNS topic ARNs using the SNS console (https://console.aws.amazon.com/sns) + // or your Command Line Interface (CLI). + NotificationARNs []*string `type:"list"` + + // Determines what action will be taken if stack creation fails. This must be + // one of: DO_NOTHING, ROLLBACK, or DELETE. You can specify either OnFailure + // or DisableRollback, but not both. + // + // Default: ROLLBACK + OnFailure *string `type:"string" enum:"OnFailure"` + + // A list of Parameter structures that specify input parameters for the stack. + // For more information, see the Parameter (http://docs.aws.amazon.com/AWSCloudFormation/latest/APIReference/API_Parameter.html) + // data type. + Parameters []*Parameter `type:"list"` + + // The template resource types that you have permissions to work with for this + // create stack action, such as AWS::EC2::Instance, AWS::EC2::*, or Custom::MyCustomInstance. + // Use the following syntax to describe template resource types: AWS::* (for + // all AWS resource), Custom::* (for all custom resources), Custom::logical_ID + // (for a specific custom resource), AWS::service_name::* (for all resources + // of a particular AWS service), and AWS::service_name::resource_logical_ID + // (for a specific AWS resource). + // + // If the list of resource types doesn't include a resource that you're creating, + // the stack creation fails. By default, AWS CloudFormation grants permissions + // to all resource types. AWS Identity and Access Management (IAM) uses this + // parameter for AWS CloudFormation-specific condition keys in IAM policies. + // For more information, see Controlling Access with AWS Identity and Access + // Management (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-iam-template.html). + ResourceTypes []*string `type:"list"` + + // The name that is associated with the stack. The name must be unique in the + // region in which you are creating the stack. + // + // A stack name can contain only alphanumeric characters (case sensitive) + // and hyphens. It must start with an alphabetic character and cannot be longer + // than 128 characters. + StackName *string `type:"string" required:"true"` + + // Structure containing the stack policy body. For more information, go to + // Prevent Updates to Stack Resources (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/protect-stack-resources.html) + // in the AWS CloudFormation User Guide. You can specify either the StackPolicyBody + // or the StackPolicyURL parameter, but not both. + StackPolicyBody *string `min:"1" type:"string"` + + // Location of a file containing the stack policy. The URL must point to a policy + // (max size: 16KB) located in an S3 bucket in the same region as the stack. + // You can specify either the StackPolicyBody or the StackPolicyURL parameter, + // but not both. + StackPolicyURL *string `min:"1" type:"string"` + + // Key-value pairs to associate with this stack. AWS CloudFormation also propagates + // these tags to the resources created in the stack. A maximum number of 10 + // tags can be specified. + Tags []*Tag `type:"list"` + + // Structure containing the template body with a minimum length of 1 byte and + // a maximum length of 51,200 bytes. For more information, go to Template Anatomy + // (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/template-anatomy.html) + // in the AWS CloudFormation User Guide. + // + // Conditional: You must specify either the TemplateBody or the TemplateURL + // parameter, but not both. + TemplateBody *string `min:"1" type:"string"` + + // Location of file containing the template body. The URL must point to a template + // (max size: 460,800 bytes) that is located in an Amazon S3 bucket. For more + // information, go to the Template Anatomy (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/template-anatomy.html) + // in the AWS CloudFormation User Guide. + // + // Conditional: You must specify either the TemplateBody or the TemplateURL + // parameter, but not both. + TemplateURL *string `min:"1" type:"string"` + + // The amount of time that can pass before the stack status becomes CREATE_FAILED; + // if DisableRollback is not set or is set to false, the stack will be rolled + // back. + TimeoutInMinutes *int64 `min:"1" type:"integer"` +} + +// String returns the string representation +func (s CreateStackInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateStackInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateStackInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateStackInput"} + if s.StackName == nil { + invalidParams.Add(request.NewErrParamRequired("StackName")) + } + if s.StackPolicyBody != nil && len(*s.StackPolicyBody) < 1 { + invalidParams.Add(request.NewErrParamMinLen("StackPolicyBody", 1)) + } + if s.StackPolicyURL != nil && len(*s.StackPolicyURL) < 1 { + invalidParams.Add(request.NewErrParamMinLen("StackPolicyURL", 1)) + } + if s.TemplateBody != nil && len(*s.TemplateBody) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TemplateBody", 1)) + } + if s.TemplateURL != nil && len(*s.TemplateURL) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TemplateURL", 1)) + } + if s.TimeoutInMinutes != nil && *s.TimeoutInMinutes < 1 { + invalidParams.Add(request.NewErrParamMinValue("TimeoutInMinutes", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The output for a CreateStack action. +type CreateStackOutput struct { + _ struct{} `type:"structure"` + + // Unique identifier of the stack. + StackId *string `type:"string"` +} + +// String returns the string representation +func (s CreateStackOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateStackOutput) GoString() string { + return s.String() +} + +// The input for the DeleteChangeSet action. +type DeleteChangeSetInput struct { + _ struct{} `type:"structure"` + + // The name or Amazon Resource Name (ARN) of the change set that you want to + // delete. + ChangeSetName *string `min:"1" type:"string" required:"true"` + + // If you specified the name of a change set to delete, specify the stack name + // or ID (ARN) that is associated with it. + StackName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s DeleteChangeSetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteChangeSetInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteChangeSetInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteChangeSetInput"} + if s.ChangeSetName == nil { + invalidParams.Add(request.NewErrParamRequired("ChangeSetName")) + } + if s.ChangeSetName != nil && len(*s.ChangeSetName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ChangeSetName", 1)) + } + if s.StackName != nil && len(*s.StackName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("StackName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The output for the DeleteChangeSet action. +type DeleteChangeSetOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteChangeSetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteChangeSetOutput) GoString() string { + return s.String() +} + +// The input for DeleteStack action. +type DeleteStackInput struct { + _ struct{} `type:"structure"` + + // For stacks in the DELETE_FAILED state, a list of resource logical IDs that + // are associated with the resources you want to retain. During deletion, AWS + // CloudFormation deletes the stack but does not delete the retained resources. + // + // Retaining resources is useful when you cannot delete a resource, such as + // a non-empty S3 bucket, but you want to delete the stack. + RetainResources []*string `type:"list"` + + // The name or the unique stack ID that is associated with the stack. + StackName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteStackInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteStackInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteStackInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteStackInput"} + if s.StackName == nil { + invalidParams.Add(request.NewErrParamRequired("StackName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteStackOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteStackOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteStackOutput) GoString() string { + return s.String() +} + +// The input for the DescribeAccountLimits action. +type DescribeAccountLimitsInput struct { + _ struct{} `type:"structure"` + + // A string that identifies the next page of limits that you want to retrieve. + NextToken *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s DescribeAccountLimitsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAccountLimitsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeAccountLimitsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeAccountLimitsInput"} + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The output for the DescribeAccountLimits action. +type DescribeAccountLimitsOutput struct { + _ struct{} `type:"structure"` + + // An account limit structure that contain a list of AWS CloudFormation account + // limits and their values. + AccountLimits []*AccountLimit `type:"list"` + + // If the output exceeds 1 MB in size, a string that identifies the next page + // of limits. If no additional page exists, this value is null. + NextToken *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s DescribeAccountLimitsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAccountLimitsOutput) GoString() string { + return s.String() +} + +// The input for the DescribeChangeSet action. +type DescribeChangeSetInput struct { + _ struct{} `type:"structure"` + + // The name or Amazon Resource Name (ARN) of the change set that you want to + // describe. + ChangeSetName *string `min:"1" type:"string" required:"true"` + + // A string (provided by the DescribeChangeSet response output) that identifies + // the next page of information that you want to retrieve. + NextToken *string `min:"1" type:"string"` + + // If you specified the name of a change set, specify the stack name or ID (ARN) + // of the change set you want to describe. + StackName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s DescribeChangeSetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeChangeSetInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeChangeSetInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeChangeSetInput"} + if s.ChangeSetName == nil { + invalidParams.Add(request.NewErrParamRequired("ChangeSetName")) + } + if s.ChangeSetName != nil && len(*s.ChangeSetName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ChangeSetName", 1)) + } + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + if s.StackName != nil && len(*s.StackName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("StackName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The output for the DescribeChangeSet action. +type DescribeChangeSetOutput struct { + _ struct{} `type:"structure"` + + // If you execute the change set, the list of capabilities that were explicitly + // acknowledged when the change set was created. + Capabilities []*string `type:"list"` + + // The ARN of the change set. + ChangeSetId *string `min:"1" type:"string"` + + // The name of the change set. + ChangeSetName *string `min:"1" type:"string"` + + // A list of Change structures that describes the resources AWS CloudFormation + // changes if you execute the change set. + Changes []*Change `type:"list"` + + // The start time when the change set was created, in UTC. + CreationTime *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // Information about the change set. + Description *string `min:"1" type:"string"` + + // If the change set execution status is AVAILABLE, you can execute the change + // set. If you can’t execute the change set, the status indicates why. For example, + // a change set might be in an UNAVAILABLE state because AWS CloudFormation + // is still creating it or in an OBSOLETE state because the stack was already + // updated. + ExecutionStatus *string `type:"string" enum:"ExecutionStatus"` + + // If the output exceeds 1 MB, a string that identifies the next page of changes. + // If there is no additional page, this value is null. + NextToken *string `min:"1" type:"string"` + + // The ARNs of the Amazon Simple Notification Service (Amazon SNS) topics that + // will be associated with the stack if you execute the change set. + NotificationARNs []*string `type:"list"` + + // A list of Parameter structures that describes the input parameters and their + // values used to create the change set. For more information, see the Parameter + // (http://docs.aws.amazon.com/AWSCloudFormation/latest/APIReference/API_Parameter.html) + // data type. + Parameters []*Parameter `type:"list"` + + // The ARN of the stack that is associated with the change set. + StackId *string `type:"string"` + + // The name of the stack that is associated with the change set. + StackName *string `type:"string"` + + // The current status of the change set, such as CREATE_IN_PROGRESS, CREATE_COMPLETE, + // or FAILED. + Status *string `type:"string" enum:"ChangeSetStatus"` + + // A description of the change set's status. For example, if your attempt to + // create a change set failed, AWS CloudFormation shows the error message. + StatusReason *string `type:"string"` + + // If you execute the change set, the tags that will be associated with the + // stack. + Tags []*Tag `type:"list"` +} + +// String returns the string representation +func (s DescribeChangeSetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeChangeSetOutput) GoString() string { + return s.String() +} + +// The input for DescribeStackEvents action. +type DescribeStackEventsInput struct { + _ struct{} `type:"structure"` + + // A string that identifies the next page of events that you want to retrieve. + NextToken *string `min:"1" type:"string"` + + // The name or the unique stack ID that is associated with the stack, which + // are not always interchangeable: + // + // Running stacks: You can specify either the stack's name or its unique + // stack ID. + // + // Deleted stacks: You must specify the unique stack ID. + // + // Default: There is no default value. + StackName *string `type:"string"` +} + +// String returns the string representation +func (s DescribeStackEventsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeStackEventsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeStackEventsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeStackEventsInput"} + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The output for a DescribeStackEvents action. +type DescribeStackEventsOutput struct { + _ struct{} `type:"structure"` + + // If the output exceeds 1 MB in size, a string that identifies the next page + // of events. If no additional page exists, this value is null. + NextToken *string `min:"1" type:"string"` + + // A list of StackEvents structures. + StackEvents []*StackEvent `type:"list"` +} + +// String returns the string representation +func (s DescribeStackEventsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeStackEventsOutput) GoString() string { + return s.String() +} + +// The input for DescribeStackResource action. +type DescribeStackResourceInput struct { + _ struct{} `type:"structure"` + + // The logical name of the resource as specified in the template. + // + // Default: There is no default value. + LogicalResourceId *string `type:"string" required:"true"` + + // The name or the unique stack ID that is associated with the stack, which + // are not always interchangeable: + // + // Running stacks: You can specify either the stack's name or its unique + // stack ID. + // + // Deleted stacks: You must specify the unique stack ID. + // + // Default: There is no default value. + StackName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeStackResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeStackResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeStackResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeStackResourceInput"} + if s.LogicalResourceId == nil { + invalidParams.Add(request.NewErrParamRequired("LogicalResourceId")) + } + if s.StackName == nil { + invalidParams.Add(request.NewErrParamRequired("StackName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The output for a DescribeStackResource action. +type DescribeStackResourceOutput struct { + _ struct{} `type:"structure"` + + // A StackResourceDetail structure containing the description of the specified + // resource in the specified stack. + StackResourceDetail *StackResourceDetail `type:"structure"` +} + +// String returns the string representation +func (s DescribeStackResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeStackResourceOutput) GoString() string { + return s.String() +} + +// The input for DescribeStackResources action. +type DescribeStackResourcesInput struct { + _ struct{} `type:"structure"` + + // The logical name of the resource as specified in the template. + // + // Default: There is no default value. + LogicalResourceId *string `type:"string"` + + // The name or unique identifier that corresponds to a physical instance ID + // of a resource supported by AWS CloudFormation. + // + // For example, for an Amazon Elastic Compute Cloud (EC2) instance, PhysicalResourceId + // corresponds to the InstanceId. You can pass the EC2 InstanceId to DescribeStackResources + // to find which stack the instance belongs to and what other resources are + // part of the stack. + // + // Required: Conditional. If you do not specify PhysicalResourceId, you must + // specify StackName. + // + // Default: There is no default value. + PhysicalResourceId *string `type:"string"` + + // The name or the unique stack ID that is associated with the stack, which + // are not always interchangeable: + // + // Running stacks: You can specify either the stack's name or its unique + // stack ID. + // + // Deleted stacks: You must specify the unique stack ID. + // + // Default: There is no default value. + // + // Required: Conditional. If you do not specify StackName, you must specify + // PhysicalResourceId. + StackName *string `type:"string"` +} + +// String returns the string representation +func (s DescribeStackResourcesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeStackResourcesInput) GoString() string { + return s.String() +} + +// The output for a DescribeStackResources action. +type DescribeStackResourcesOutput struct { + _ struct{} `type:"structure"` + + // A list of StackResource structures. + StackResources []*StackResource `type:"list"` +} + +// String returns the string representation +func (s DescribeStackResourcesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeStackResourcesOutput) GoString() string { + return s.String() +} + +// The input for DescribeStacks action. +type DescribeStacksInput struct { + _ struct{} `type:"structure"` + + // A string that identifies the next page of stacks that you want to retrieve. + NextToken *string `min:"1" type:"string"` + + // The name or the unique stack ID that is associated with the stack, which + // are not always interchangeable: + // + // Running stacks: You can specify either the stack's name or its unique + // stack ID. + // + // Deleted stacks: You must specify the unique stack ID. + // + // Default: There is no default value. + StackName *string `type:"string"` +} + +// String returns the string representation +func (s DescribeStacksInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeStacksInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeStacksInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeStacksInput"} + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The output for a DescribeStacks action. +type DescribeStacksOutput struct { + _ struct{} `type:"structure"` + + // If the output exceeds 1 MB in size, a string that identifies the next page + // of stacks. If no additional page exists, this value is null. + NextToken *string `min:"1" type:"string"` + + // A list of stack structures. + Stacks []*Stack `type:"list"` +} + +// String returns the string representation +func (s DescribeStacksOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeStacksOutput) GoString() string { + return s.String() +} + +// The input for an EstimateTemplateCost action. +type EstimateTemplateCostInput struct { + _ struct{} `type:"structure"` + + // A list of Parameter structures that specify input parameters. + Parameters []*Parameter `type:"list"` + + // Structure containing the template body with a minimum length of 1 byte and + // a maximum length of 51,200 bytes. (For more information, go to Template Anatomy + // (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/template-anatomy.html) + // in the AWS CloudFormation User Guide.) + // + // Conditional: You must pass TemplateBody or TemplateURL. If both are passed, + // only TemplateBody is used. + TemplateBody *string `min:"1" type:"string"` + + // Location of file containing the template body. The URL must point to a template + // that is located in an Amazon S3 bucket. For more information, go to Template + // Anatomy (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/template-anatomy.html) + // in the AWS CloudFormation User Guide. + // + // Conditional: You must pass TemplateURL or TemplateBody. If both are passed, + // only TemplateBody is used. + TemplateURL *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s EstimateTemplateCostInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EstimateTemplateCostInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *EstimateTemplateCostInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "EstimateTemplateCostInput"} + if s.TemplateBody != nil && len(*s.TemplateBody) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TemplateBody", 1)) + } + if s.TemplateURL != nil && len(*s.TemplateURL) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TemplateURL", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The output for a EstimateTemplateCost action. +type EstimateTemplateCostOutput struct { + _ struct{} `type:"structure"` + + // An AWS Simple Monthly Calculator URL with a query string that describes the + // resources required to run the template. + Url *string `type:"string"` +} + +// String returns the string representation +func (s EstimateTemplateCostOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EstimateTemplateCostOutput) GoString() string { + return s.String() +} + +// The input for the ExecuteChangeSet action. +type ExecuteChangeSetInput struct { + _ struct{} `type:"structure"` + + // The name or ARN of the change set that you want use to update the specified + // stack. + ChangeSetName *string `min:"1" type:"string" required:"true"` + + // If you specified the name of a change set, specify the stack name or ID (ARN) + // that is associated with the change set you want to execute. + StackName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ExecuteChangeSetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ExecuteChangeSetInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ExecuteChangeSetInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ExecuteChangeSetInput"} + if s.ChangeSetName == nil { + invalidParams.Add(request.NewErrParamRequired("ChangeSetName")) + } + if s.ChangeSetName != nil && len(*s.ChangeSetName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ChangeSetName", 1)) + } + if s.StackName != nil && len(*s.StackName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("StackName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The output for the ExecuteChangeSet action. +type ExecuteChangeSetOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ExecuteChangeSetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ExecuteChangeSetOutput) GoString() string { + return s.String() +} + +// The input for the GetStackPolicy action. +type GetStackPolicyInput struct { + _ struct{} `type:"structure"` + + // The name or unique stack ID that is associated with the stack whose policy + // you want to get. + StackName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s GetStackPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetStackPolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetStackPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetStackPolicyInput"} + if s.StackName == nil { + invalidParams.Add(request.NewErrParamRequired("StackName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The output for the GetStackPolicy action. +type GetStackPolicyOutput struct { + _ struct{} `type:"structure"` + + // Structure containing the stack policy body. (For more information, go to + // Prevent Updates to Stack Resources (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/protect-stack-resources.html) + // in the AWS CloudFormation User Guide.) + StackPolicyBody *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s GetStackPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetStackPolicyOutput) GoString() string { + return s.String() +} + +// The input for a GetTemplate action. +type GetTemplateInput struct { + _ struct{} `type:"structure"` + + // The name or the unique stack ID that is associated with the stack, which + // are not always interchangeable: + // + // Running stacks: You can specify either the stack's name or its unique + // stack ID. + // + // Deleted stacks: You must specify the unique stack ID. + // + // Default: There is no default value. + StackName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s GetTemplateInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetTemplateInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetTemplateInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetTemplateInput"} + if s.StackName == nil { + invalidParams.Add(request.NewErrParamRequired("StackName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The output for GetTemplate action. +type GetTemplateOutput struct { + _ struct{} `type:"structure"` + + // Structure containing the template body. (For more information, go to Template + // Anatomy (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/template-anatomy.html) + // in the AWS CloudFormation User Guide.) + TemplateBody *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s GetTemplateOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetTemplateOutput) GoString() string { + return s.String() +} + +// The input for the GetTemplateSummary action. +type GetTemplateSummaryInput struct { + _ struct{} `type:"structure"` + + // The name or the stack ID that is associated with the stack, which are not + // always interchangeable. For running stacks, you can specify either the stack's + // name or its unique stack ID. For deleted stack, you must specify the unique + // stack ID. + // + // Conditional: You must specify only one of the following parameters: StackName, + // TemplateBody, or TemplateURL. + StackName *string `min:"1" type:"string"` + + // Structure containing the template body with a minimum length of 1 byte and + // a maximum length of 51,200 bytes. For more information about templates, see + // Template Anatomy (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/template-anatomy.html) + // in the AWS CloudFormation User Guide. + // + // Conditional: You must specify only one of the following parameters: StackName, + // TemplateBody, or TemplateURL. + TemplateBody *string `min:"1" type:"string"` + + // Location of file containing the template body. The URL must point to a template + // (max size: 460,800 bytes) that is located in an Amazon S3 bucket. For more + // information about templates, see Template Anatomy (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/template-anatomy.html) + // in the AWS CloudFormation User Guide. + // + // Conditional: You must specify only one of the following parameters: StackName, + // TemplateBody, or TemplateURL. + TemplateURL *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s GetTemplateSummaryInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetTemplateSummaryInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetTemplateSummaryInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetTemplateSummaryInput"} + if s.StackName != nil && len(*s.StackName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("StackName", 1)) + } + if s.TemplateBody != nil && len(*s.TemplateBody) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TemplateBody", 1)) + } + if s.TemplateURL != nil && len(*s.TemplateURL) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TemplateURL", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The output for the GetTemplateSummary action. +type GetTemplateSummaryOutput struct { + _ struct{} `type:"structure"` + + // The capabilities found within the template. Currently, AWS CloudFormation + // supports only the CAPABILITY_IAM capability. If your template contains IAM + // resources, you must specify the CAPABILITY_IAM value for this parameter when + // you use the CreateStack or UpdateStack actions with your template; otherwise, + // those actions return an InsufficientCapabilities error. + Capabilities []*string `type:"list"` + + // The list of resources that generated the values in the Capabilities response + // element. + CapabilitiesReason *string `type:"string"` + + // The value that is defined in the Description property of the template. + Description *string `min:"1" type:"string"` + + // The value that is defined for the Metadata property of the template. + Metadata *string `type:"string"` + + // A list of parameter declarations that describe various properties for each + // parameter. + Parameters []*ParameterDeclaration `type:"list"` + + // A list of all the template resource types that are defined in the template, + // such as AWS::EC2::Instance, AWS::Dynamo::Table, and Custom::MyCustomInstance. + ResourceTypes []*string `type:"list"` + + // The AWS template format version, which identifies the capabilities of the + // template. + Version *string `type:"string"` +} + +// String returns the string representation +func (s GetTemplateSummaryOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetTemplateSummaryOutput) GoString() string { + return s.String() +} + +// The input for the ListChangeSets action. +type ListChangeSetsInput struct { + _ struct{} `type:"structure"` + + // A string (provided by the ListChangeSets response output) that identifies + // the next page of change sets that you want to retrieve. + NextToken *string `min:"1" type:"string"` + + // The name or the Amazon Resource Name (ARN) of the stack for which you want + // to list change sets. + StackName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListChangeSetsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListChangeSetsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListChangeSetsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListChangeSetsInput"} + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + if s.StackName == nil { + invalidParams.Add(request.NewErrParamRequired("StackName")) + } + if s.StackName != nil && len(*s.StackName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("StackName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The output for the ListChangeSets action. +type ListChangeSetsOutput struct { + _ struct{} `type:"structure"` + + // If the output exceeds 1 MB, a string that identifies the next page of change + // sets. If there is no additional page, this value is null. + NextToken *string `min:"1" type:"string"` + + // A list of ChangeSetSummary structures that provides the ID and status of + // each change set for the specified stack. + Summaries []*ChangeSetSummary `type:"list"` +} + +// String returns the string representation +func (s ListChangeSetsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListChangeSetsOutput) GoString() string { + return s.String() +} + +// The input for the ListStackResource action. +type ListStackResourcesInput struct { + _ struct{} `type:"structure"` + + // A string that identifies the next page of stack resources that you want to + // retrieve. + NextToken *string `min:"1" type:"string"` + + // The name or the unique stack ID that is associated with the stack, which + // are not always interchangeable: + // + // Running stacks: You can specify either the stack's name or its unique + // stack ID. + // + // Deleted stacks: You must specify the unique stack ID. + // + // Default: There is no default value. + StackName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ListStackResourcesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListStackResourcesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListStackResourcesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListStackResourcesInput"} + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + if s.StackName == nil { + invalidParams.Add(request.NewErrParamRequired("StackName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The output for a ListStackResources action. +type ListStackResourcesOutput struct { + _ struct{} `type:"structure"` + + // If the output exceeds 1 MB, a string that identifies the next page of stack + // resources. If no additional page exists, this value is null. + NextToken *string `min:"1" type:"string"` + + // A list of StackResourceSummary structures. + StackResourceSummaries []*StackResourceSummary `type:"list"` +} + +// String returns the string representation +func (s ListStackResourcesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListStackResourcesOutput) GoString() string { + return s.String() +} + +// The input for ListStacks action. +type ListStacksInput struct { + _ struct{} `type:"structure"` + + // A string that identifies the next page of stacks that you want to retrieve. + NextToken *string `min:"1" type:"string"` + + // Stack status to use as a filter. Specify one or more stack status codes to + // list only stacks with the specified status codes. For a complete list of + // stack status codes, see the StackStatus parameter of the Stack data type. + StackStatusFilter []*string `type:"list"` +} + +// String returns the string representation +func (s ListStacksInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListStacksInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListStacksInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListStacksInput"} + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The output for ListStacks action. +type ListStacksOutput struct { + _ struct{} `type:"structure"` + + // If the output exceeds 1 MB in size, a string that identifies the next page + // of stacks. If no additional page exists, this value is null. + NextToken *string `min:"1" type:"string"` + + // A list of StackSummary structures containing information about the specified + // stacks. + StackSummaries []*StackSummary `type:"list"` +} + +// String returns the string representation +func (s ListStacksOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListStacksOutput) GoString() string { + return s.String() +} + +// The Output data type. +type Output struct { + _ struct{} `type:"structure"` + + // User defined description associated with the output. + Description *string `min:"1" type:"string"` + + // The key associated with the output. + OutputKey *string `type:"string"` + + // The value associated with the output. + OutputValue *string `type:"string"` +} + +// String returns the string representation +func (s Output) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Output) GoString() string { + return s.String() +} + +// The Parameter data type. +type Parameter struct { + _ struct{} `type:"structure"` + + // The key associated with the parameter. If you don't specify a key and value + // for a particular parameter, AWS CloudFormation uses the default value that + // is specified in your template. + ParameterKey *string `type:"string"` + + // The value associated with the parameter. + ParameterValue *string `type:"string"` + + // During a stack update, use the existing parameter value that the stack is + // using for a given parameter key. If you specify true, do not specify a parameter + // value. + UsePreviousValue *bool `type:"boolean"` +} + +// String returns the string representation +func (s Parameter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Parameter) GoString() string { + return s.String() +} + +// A set of criteria that AWS CloudFormation uses to validate parameter values. +// Although other constraints might be defined in the stack template, AWS CloudFormation +// returns only the AllowedValues property. +type ParameterConstraints struct { + _ struct{} `type:"structure"` + + // A list of values that are permitted for a parameter. + AllowedValues []*string `type:"list"` +} + +// String returns the string representation +func (s ParameterConstraints) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ParameterConstraints) GoString() string { + return s.String() +} + +// The ParameterDeclaration data type. +type ParameterDeclaration struct { + _ struct{} `type:"structure"` + + // The default value of the parameter. + DefaultValue *string `type:"string"` + + // The description that is associate with the parameter. + Description *string `min:"1" type:"string"` + + // Flag that indicates whether the parameter value is shown as plain text in + // logs and in the AWS Management Console. + NoEcho *bool `type:"boolean"` + + // The criteria that AWS CloudFormation uses to validate parameter values. + ParameterConstraints *ParameterConstraints `type:"structure"` + + // The name that is associated with the parameter. + ParameterKey *string `type:"string"` + + // The type of parameter. + ParameterType *string `type:"string"` +} + +// String returns the string representation +func (s ParameterDeclaration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ParameterDeclaration) GoString() string { + return s.String() +} + +// The ResourceChange structure describes the resource and the action that AWS +// CloudFormation will perform on it if you execute this change set. +type ResourceChange struct { + _ struct{} `type:"structure"` + + // The action that AWS CloudFormation takes on the resource, such as Add (adds + // a new resource), Modify (changes a resource), or Remove (deletes a resource). + Action *string `type:"string" enum:"ChangeAction"` + + // For the Modify action, a list of ResourceChangeDetail structures that describes + // the changes that AWS CloudFormation will make to the resource. + Details []*ResourceChangeDetail `type:"list"` + + // The resource's logical ID, which is defined in the stack's template. + LogicalResourceId *string `type:"string"` + + // The resource's physical ID (resource name). Resources that you are adding + // don't have physical IDs because they haven't been created. + PhysicalResourceId *string `type:"string"` + + // For the Modify action, indicates whether AWS CloudFormation will replace + // the resource by creating a new one and deleting the old one. This value depends + // on the value of the RequiresRecreation property in the ResourceTargetDefinition + // structure. For example, if the RequiresRecreation field is Always and the + // Evaluation field is Static, Replacement is True. If the RequiresRecreation + // field is Always and the Evaluation field is Dynamic, Replacement is Conditionally. + // + // If you have multiple changes with different RequiresRecreation values, the + // Replacement value depends on the change with the most impact. A RequiresRecreation + // value of Always has the most impact, followed by Conditionally, and then + // Never. + Replacement *string `type:"string" enum:"Replacement"` + + // The type of AWS CloudFormation resource, such as AWS::S3::Bucket. + ResourceType *string `min:"1" type:"string"` + + // For the Modify action, indicates which resource attribute is triggering this + // update, such as a change in the resource attribute's Metadata, Properties, + // or Tags. + Scope []*string `type:"list"` +} + +// String returns the string representation +func (s ResourceChange) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResourceChange) GoString() string { + return s.String() +} + +// For a resource with Modify as the action, the ResourceChange structure describes +// the changes AWS CloudFormation will make to that resource. +type ResourceChangeDetail struct { + _ struct{} `type:"structure"` + + // The identity of the entity that triggered this change. This entity is a member + // of the group that is specified by the ChangeSource field. For example, if + // you modified the value of the KeyPairName parameter, the CausingEntity is + // the name of the parameter (KeyPairName). + // + // If the ChangeSource value is DirectModification, no value is given for CausingEntity. + CausingEntity *string `type:"string"` + + // The group to which the CausingEntity value belongs. There are five entity + // groups: + // + // ResourceReference entities are Ref intrinsic functions that refer to + // resources in the template, such as { "Ref" : "MyEC2InstanceResource" }. + // + // ParameterReference entities are Ref intrinsic functions that get template + // parameter values, such as { "Ref" : "MyPasswordParameter" }. + // + // ResourceAttribute entities are Fn::GetAtt intrinsic functions that get + // resource attribute values, such as { "Fn::GetAtt" : [ "MyEC2InstanceResource", + // "PublicDnsName" ] }. + // + // DirectModification entities are changes that are made directly to the + // template. + // + // Automatic entities are AWS::CloudFormation::Stack resource types, which + // are also known as nested stacks. If you made no changes to the AWS::CloudFormation::Stack + // resource, AWS CloudFormation sets the ChangeSource to Automatic because the + // nested stack's template might have changed. Changes to a nested stack's template + // aren't visible to AWS CloudFormation until you run an update on the parent + // stack. + ChangeSource *string `type:"string" enum:"ChangeSource"` + + // Indicates whether AWS CloudFormation can determine the target value, and + // whether the target value will change before you execute a change set. + // + // For Static evaluations, AWS CloudFormation can determine that the target + // value will change, and its value. For example, if you directly modify the + // InstanceType property of an EC2 instance, AWS CloudFormation knows that this + // property value will change, and its value, so this is a Static evaluation. + // + // For Dynamic evaluations, cannot determine the target value because it depends + // on the result of an intrinsic function, such as a Ref or Fn::GetAtt intrinsic + // function, when the stack is updated. For example, if your template includes + // a reference to a resource that is conditionally recreated, the value of the + // reference (the physical ID of the resource) might change, depending on if + // the resource is recreated. If the resource is recreated, it will have a new + // physical ID, so all references to that resource will also be updated. + Evaluation *string `type:"string" enum:"EvaluationType"` + + // A ResourceTargetDefinition structure that describes the field that AWS CloudFormation + // will change and whether the resource will be recreated. + Target *ResourceTargetDefinition `type:"structure"` +} + +// String returns the string representation +func (s ResourceChangeDetail) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResourceChangeDetail) GoString() string { + return s.String() +} + +// The field that AWS CloudFormation will change, such as the name of a resource's +// property, and whether the resource will be recreated. +type ResourceTargetDefinition struct { + _ struct{} `type:"structure"` + + // Indicates which resource attribute is triggering this update, such as a change + // in the resource attribute's Metadata, Properties, or Tags. + Attribute *string `type:"string" enum:"ResourceAttribute"` + + // If the Attribute value is Properties, the name of the property. For all other + // attributes, the value is null. + Name *string `type:"string"` + + // If the Attribute value is Properties, indicates whether a change to this + // property causes the resource to be recreated. The value can be Never, Always, + // or Conditionally. To determine the conditions for a Conditionally recreation, + // see the update behavior for that property (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-template-resource-type-ref.html) + // in the AWS CloudFormation User Guide. + RequiresRecreation *string `type:"string" enum:"RequiresRecreation"` +} + +// String returns the string representation +func (s ResourceTargetDefinition) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResourceTargetDefinition) GoString() string { + return s.String() +} + +// The input for the SetStackPolicy action. +type SetStackPolicyInput struct { + _ struct{} `type:"structure"` + + // The name or unique stack ID that you want to associate a policy with. + StackName *string `type:"string" required:"true"` + + // Structure containing the stack policy body. For more information, go to + // Prevent Updates to Stack Resources (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/protect-stack-resources.html) + // in the AWS CloudFormation User Guide. You can specify either the StackPolicyBody + // or the StackPolicyURL parameter, but not both. + StackPolicyBody *string `min:"1" type:"string"` + + // Location of a file containing the stack policy. The URL must point to a policy + // (maximum size: 16 KB) located in an S3 bucket in the same region as the stack. + // You can specify either the StackPolicyBody or the StackPolicyURL parameter, + // but not both. + StackPolicyURL *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s SetStackPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetStackPolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SetStackPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SetStackPolicyInput"} + if s.StackName == nil { + invalidParams.Add(request.NewErrParamRequired("StackName")) + } + if s.StackPolicyBody != nil && len(*s.StackPolicyBody) < 1 { + invalidParams.Add(request.NewErrParamMinLen("StackPolicyBody", 1)) + } + if s.StackPolicyURL != nil && len(*s.StackPolicyURL) < 1 { + invalidParams.Add(request.NewErrParamMinLen("StackPolicyURL", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type SetStackPolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s SetStackPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetStackPolicyOutput) GoString() string { + return s.String() +} + +// The input for the SignalResource action. +type SignalResourceInput struct { + _ struct{} `type:"structure"` + + // The logical ID of the resource that you want to signal. The logical ID is + // the name of the resource that given in the template. + LogicalResourceId *string `type:"string" required:"true"` + + // The stack name or unique stack ID that includes the resource that you want + // to signal. + StackName *string `min:"1" type:"string" required:"true"` + + // The status of the signal, which is either success or failure. A failure signal + // causes AWS CloudFormation to immediately fail the stack creation or update. + Status *string `type:"string" required:"true" enum:"ResourceSignalStatus"` + + // A unique ID of the signal. When you signal Amazon EC2 instances or Auto Scaling + // groups, specify the instance ID that you are signaling as the unique ID. + // If you send multiple signals to a single resource (such as signaling a wait + // condition), each signal requires a different unique ID. + UniqueId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s SignalResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SignalResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SignalResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SignalResourceInput"} + if s.LogicalResourceId == nil { + invalidParams.Add(request.NewErrParamRequired("LogicalResourceId")) + } + if s.StackName == nil { + invalidParams.Add(request.NewErrParamRequired("StackName")) + } + if s.StackName != nil && len(*s.StackName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("StackName", 1)) + } + if s.Status == nil { + invalidParams.Add(request.NewErrParamRequired("Status")) + } + if s.UniqueId == nil { + invalidParams.Add(request.NewErrParamRequired("UniqueId")) + } + if s.UniqueId != nil && len(*s.UniqueId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("UniqueId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type SignalResourceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s SignalResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SignalResourceOutput) GoString() string { + return s.String() +} + +// The Stack data type. +type Stack struct { + _ struct{} `type:"structure"` + + // The capabilities allowed in the stack. + Capabilities []*string `type:"list"` + + // The time at which the stack was created. + CreationTime *time.Time `type:"timestamp" timestampFormat:"iso8601" required:"true"` + + // A user-defined description associated with the stack. + Description *string `min:"1" type:"string"` + + // Boolean to enable or disable rollback on stack creation failures: + // + // true: disable rollback + // + // false: enable rollback + DisableRollback *bool `type:"boolean"` + + // The time the stack was last updated. This field will only be returned if + // the stack has been updated at least once. + LastUpdatedTime *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // SNS topic ARNs to which stack related events are published. + NotificationARNs []*string `type:"list"` + + // A list of output structures. + Outputs []*Output `type:"list"` + + // A list of Parameter structures. + Parameters []*Parameter `type:"list"` + + // Unique identifier of the stack. + StackId *string `type:"string"` + + // The name associated with the stack. + StackName *string `type:"string" required:"true"` + + // Current status of the stack. + StackStatus *string `type:"string" required:"true" enum:"StackStatus"` + + // Success/failure message associated with the stack status. + StackStatusReason *string `type:"string"` + + // A list of Tags that specify information about the stack. + Tags []*Tag `type:"list"` + + // The amount of time within which stack creation should complete. + TimeoutInMinutes *int64 `min:"1" type:"integer"` +} + +// String returns the string representation +func (s Stack) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Stack) GoString() string { + return s.String() +} + +// The StackEvent data type. +type StackEvent struct { + _ struct{} `type:"structure"` + + // The unique ID of this event. + EventId *string `type:"string" required:"true"` + + // The logical name of the resource specified in the template. + LogicalResourceId *string `type:"string"` + + // The name or unique identifier associated with the physical instance of the + // resource. + PhysicalResourceId *string `type:"string"` + + // BLOB of the properties used to create the resource. + ResourceProperties *string `type:"string"` + + // Current status of the resource. + ResourceStatus *string `type:"string" enum:"ResourceStatus"` + + // Success/failure message associated with the resource. + ResourceStatusReason *string `type:"string"` + + // Type of resource. (For more information, go to AWS Resource Types Reference + // (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-template-resource-type-ref.html) + // in the AWS CloudFormation User Guide.) + ResourceType *string `min:"1" type:"string"` + + // The unique ID name of the instance of the stack. + StackId *string `type:"string" required:"true"` + + // The name associated with a stack. + StackName *string `type:"string" required:"true"` + + // Time the status was updated. + Timestamp *time.Time `type:"timestamp" timestampFormat:"iso8601" required:"true"` +} + +// String returns the string representation +func (s StackEvent) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StackEvent) GoString() string { + return s.String() +} + +// The StackResource data type. +type StackResource struct { + _ struct{} `type:"structure"` + + // User defined description associated with the resource. + Description *string `min:"1" type:"string"` + + // The logical name of the resource specified in the template. + LogicalResourceId *string `type:"string" required:"true"` + + // The name or unique identifier that corresponds to a physical instance ID + // of a resource supported by AWS CloudFormation. + PhysicalResourceId *string `type:"string"` + + // Current status of the resource. + ResourceStatus *string `type:"string" required:"true" enum:"ResourceStatus"` + + // Success/failure message associated with the resource. + ResourceStatusReason *string `type:"string"` + + // Type of resource. (For more information, go to AWS Resource Types Reference + // (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-template-resource-type-ref.html) + // in the AWS CloudFormation User Guide.) + ResourceType *string `min:"1" type:"string" required:"true"` + + // Unique identifier of the stack. + StackId *string `type:"string"` + + // The name associated with the stack. + StackName *string `type:"string"` + + // Time the status was updated. + Timestamp *time.Time `type:"timestamp" timestampFormat:"iso8601" required:"true"` +} + +// String returns the string representation +func (s StackResource) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StackResource) GoString() string { + return s.String() +} + +// Contains detailed information about the specified stack resource. +type StackResourceDetail struct { + _ struct{} `type:"structure"` + + // User defined description associated with the resource. + Description *string `min:"1" type:"string"` + + // Time the status was updated. + LastUpdatedTimestamp *time.Time `type:"timestamp" timestampFormat:"iso8601" required:"true"` + + // The logical name of the resource specified in the template. + LogicalResourceId *string `type:"string" required:"true"` + + // The JSON format content of the Metadata attribute declared for the resource. + // For more information, see Metadata Attribute (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-attribute-metadata.html) + // in the AWS CloudFormation User Guide. + Metadata *string `type:"string"` + + // The name or unique identifier that corresponds to a physical instance ID + // of a resource supported by AWS CloudFormation. + PhysicalResourceId *string `type:"string"` + + // Current status of the resource. + ResourceStatus *string `type:"string" required:"true" enum:"ResourceStatus"` + + // Success/failure message associated with the resource. + ResourceStatusReason *string `type:"string"` + + // Type of resource. ((For more information, go to AWS Resource Types Reference + // (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-template-resource-type-ref.html) + // in the AWS CloudFormation User Guide.) + ResourceType *string `min:"1" type:"string" required:"true"` + + // Unique identifier of the stack. + StackId *string `type:"string"` + + // The name associated with the stack. + StackName *string `type:"string"` +} + +// String returns the string representation +func (s StackResourceDetail) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StackResourceDetail) GoString() string { + return s.String() +} + +// Contains high-level information about the specified stack resource. +type StackResourceSummary struct { + _ struct{} `type:"structure"` + + // Time the status was updated. + LastUpdatedTimestamp *time.Time `type:"timestamp" timestampFormat:"iso8601" required:"true"` + + // The logical name of the resource specified in the template. + LogicalResourceId *string `type:"string" required:"true"` + + // The name or unique identifier that corresponds to a physical instance ID + // of the resource. + PhysicalResourceId *string `type:"string"` + + // Current status of the resource. + ResourceStatus *string `type:"string" required:"true" enum:"ResourceStatus"` + + // Success/failure message associated with the resource. + ResourceStatusReason *string `type:"string"` + + // Type of resource. (For more information, go to AWS Resource Types Reference + // (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-template-resource-type-ref.html) + // in the AWS CloudFormation User Guide.) + ResourceType *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s StackResourceSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StackResourceSummary) GoString() string { + return s.String() +} + +// The StackSummary Data Type +type StackSummary struct { + _ struct{} `type:"structure"` + + // The time the stack was created. + CreationTime *time.Time `type:"timestamp" timestampFormat:"iso8601" required:"true"` + + // The time the stack was deleted. + DeletionTime *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The time the stack was last updated. This field will only be returned if + // the stack has been updated at least once. + LastUpdatedTime *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // Unique stack identifier. + StackId *string `type:"string"` + + // The name associated with the stack. + StackName *string `type:"string" required:"true"` + + // The current status of the stack. + StackStatus *string `type:"string" required:"true" enum:"StackStatus"` + + // Success/Failure message associated with the stack status. + StackStatusReason *string `type:"string"` + + // The template description of the template used to create the stack. + TemplateDescription *string `type:"string"` +} + +// String returns the string representation +func (s StackSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StackSummary) GoString() string { + return s.String() +} + +// The Tag type enables you to specify a key-value pair that can be used to +// store information about an AWS CloudFormation stack. +type Tag struct { + _ struct{} `type:"structure"` + + // Required. A string used to identify this tag. You can specify a maximum of + // 128 characters for a tag key. Tags owned by Amazon Web Services (AWS) have + // the reserved prefix: aws:. + Key *string `type:"string"` + + // Required. A string containing the value for this tag. You can specify a maximum + // of 256 characters for a tag value. + Value *string `type:"string"` +} + +// String returns the string representation +func (s Tag) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Tag) GoString() string { + return s.String() +} + +// The TemplateParameter data type. +type TemplateParameter struct { + _ struct{} `type:"structure"` + + // The default value associated with the parameter. + DefaultValue *string `type:"string"` + + // User defined description associated with the parameter. + Description *string `min:"1" type:"string"` + + // Flag indicating whether the parameter should be displayed as plain text in + // logs and UIs. + NoEcho *bool `type:"boolean"` + + // The name associated with the parameter. + ParameterKey *string `type:"string"` +} + +// String returns the string representation +func (s TemplateParameter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TemplateParameter) GoString() string { + return s.String() +} + +// The input for an UpdateStack action. +type UpdateStackInput struct { + _ struct{} `type:"structure"` + + // A list of capabilities that you must specify before AWS CloudFormation can + // update certain stacks. Some stack templates might include resources that + // can affect permissions in your AWS account, for example, by creating new + // AWS Identity and Access Management (IAM) users. For those stacks, you must + // explicitly acknowledge their capabilities by specifying this parameter. + // + // Currently, the only valid value is CAPABILITY_IAM, which is required for + // the following resources: AWS::IAM::AccessKey (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-iam-accesskey.html), + // AWS::IAM::Group (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-iam-group.html), + // AWS::IAM::InstanceProfile (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-iam-instanceprofile.html), + // AWS::IAM::Policy (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-iam-policy.html), + // AWS::IAM::Role (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-iam-role.html), + // AWS::IAM::User (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-iam-user.html), + // and AWS::IAM::UserToGroupAddition (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-iam-addusertogroup.html). + // If your stack template contains these resources, we recommend that you review + // all permissions associated with them and edit their permissions if necessary. + // If your template contains any of the listed resources and you don't specify + // this parameter, this action returns an InsufficientCapabilities error. + Capabilities []*string `type:"list"` + + // Amazon Simple Notification Service topic Amazon Resource Names (ARNs) that + // AWS CloudFormation associates with the stack. Specify an empty list to remove + // all notification topics. + NotificationARNs []*string `type:"list"` + + // A list of Parameter structures that specify input parameters for the stack. + // For more information, see the Parameter (http://docs.aws.amazon.com/AWSCloudFormation/latest/APIReference/API_Parameter.html) + // data type. + Parameters []*Parameter `type:"list"` + + // The template resource types that you have permissions to work with for this + // update stack action, such as AWS::EC2::Instance, AWS::EC2::*, or Custom::MyCustomInstance. + // + // If the list of resource types doesn't include a resource that you're updating, + // the stack update fails. By default, AWS CloudFormation grants permissions + // to all resource types. AWS Identity and Access Management (IAM) uses this + // parameter for AWS CloudFormation-specific condition keys in IAM policies. + // For more information, see Controlling Access with AWS Identity and Access + // Management (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-iam-template.html). + ResourceTypes []*string `type:"list"` + + // The name or unique stack ID of the stack to update. + StackName *string `type:"string" required:"true"` + + // Structure containing a new stack policy body. You can specify either the + // StackPolicyBody or the StackPolicyURL parameter, but not both. + // + // You might update the stack policy, for example, in order to protect a new + // resource that you created during a stack update. If you do not specify a + // stack policy, the current policy that is associated with the stack is unchanged. + StackPolicyBody *string `min:"1" type:"string"` + + // Structure containing the temporary overriding stack policy body. You can + // specify either the StackPolicyDuringUpdateBody or the StackPolicyDuringUpdateURL + // parameter, but not both. + // + // If you want to update protected resources, specify a temporary overriding + // stack policy during this update. If you do not specify a stack policy, the + // current policy that is associated with the stack will be used. + StackPolicyDuringUpdateBody *string `min:"1" type:"string"` + + // Location of a file containing the temporary overriding stack policy. The + // URL must point to a policy (max size: 16KB) located in an S3 bucket in the + // same region as the stack. You can specify either the StackPolicyDuringUpdateBody + // or the StackPolicyDuringUpdateURL parameter, but not both. + // + // If you want to update protected resources, specify a temporary overriding + // stack policy during this update. If you do not specify a stack policy, the + // current policy that is associated with the stack will be used. + StackPolicyDuringUpdateURL *string `min:"1" type:"string"` + + // Location of a file containing the updated stack policy. The URL must point + // to a policy (max size: 16KB) located in an S3 bucket in the same region as + // the stack. You can specify either the StackPolicyBody or the StackPolicyURL + // parameter, but not both. + // + // You might update the stack policy, for example, in order to protect a new + // resource that you created during a stack update. If you do not specify a + // stack policy, the current policy that is associated with the stack is unchanged. + StackPolicyURL *string `min:"1" type:"string"` + + // Key-value pairs to associate with this stack. AWS CloudFormation also propagates + // these tags to supported resources in the stack. You can specify a maximum + // number of 10 tags. + // + // If you don't specify this parameter, AWS CloudFormation doesn't modify the + // stack's tags. If you specify an empty value, AWS CloudFormation removes all + // associated tags. + Tags []*Tag `type:"list"` + + // Structure containing the template body with a minimum length of 1 byte and + // a maximum length of 51,200 bytes. (For more information, go to Template Anatomy + // (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/template-anatomy.html) + // in the AWS CloudFormation User Guide.) + // + // Conditional: You must specify either the TemplateBody or the TemplateURL + // parameter, but not both. + TemplateBody *string `min:"1" type:"string"` + + // Location of file containing the template body. The URL must point to a template + // that is located in an Amazon S3 bucket. For more information, go to Template + // Anatomy (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/template-anatomy.html) + // in the AWS CloudFormation User Guide. + // + // Conditional: You must specify either the TemplateBody or the TemplateURL + // parameter, but not both. + TemplateURL *string `min:"1" type:"string"` + + // Reuse the existing template that is associated with the stack that you are + // updating. + UsePreviousTemplate *bool `type:"boolean"` +} + +// String returns the string representation +func (s UpdateStackInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateStackInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateStackInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateStackInput"} + if s.StackName == nil { + invalidParams.Add(request.NewErrParamRequired("StackName")) + } + if s.StackPolicyBody != nil && len(*s.StackPolicyBody) < 1 { + invalidParams.Add(request.NewErrParamMinLen("StackPolicyBody", 1)) + } + if s.StackPolicyDuringUpdateBody != nil && len(*s.StackPolicyDuringUpdateBody) < 1 { + invalidParams.Add(request.NewErrParamMinLen("StackPolicyDuringUpdateBody", 1)) + } + if s.StackPolicyDuringUpdateURL != nil && len(*s.StackPolicyDuringUpdateURL) < 1 { + invalidParams.Add(request.NewErrParamMinLen("StackPolicyDuringUpdateURL", 1)) + } + if s.StackPolicyURL != nil && len(*s.StackPolicyURL) < 1 { + invalidParams.Add(request.NewErrParamMinLen("StackPolicyURL", 1)) + } + if s.TemplateBody != nil && len(*s.TemplateBody) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TemplateBody", 1)) + } + if s.TemplateURL != nil && len(*s.TemplateURL) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TemplateURL", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The output for an UpdateStack action. +type UpdateStackOutput struct { + _ struct{} `type:"structure"` + + // Unique identifier of the stack. + StackId *string `type:"string"` +} + +// String returns the string representation +func (s UpdateStackOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateStackOutput) GoString() string { + return s.String() +} + +// The input for ValidateTemplate action. +type ValidateTemplateInput struct { + _ struct{} `type:"structure"` + + // Structure containing the template body with a minimum length of 1 byte and + // a maximum length of 51,200 bytes. For more information, go to Template Anatomy + // (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/template-anatomy.html) + // in the AWS CloudFormation User Guide. + // + // Conditional: You must pass TemplateURL or TemplateBody. If both are passed, + // only TemplateBody is used. + TemplateBody *string `min:"1" type:"string"` + + // Location of file containing the template body. The URL must point to a template + // (max size: 460,800 bytes) that is located in an Amazon S3 bucket. For more + // information, go to Template Anatomy (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/template-anatomy.html) + // in the AWS CloudFormation User Guide. + // + // Conditional: You must pass TemplateURL or TemplateBody. If both are passed, + // only TemplateBody is used. + TemplateURL *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ValidateTemplateInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ValidateTemplateInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ValidateTemplateInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ValidateTemplateInput"} + if s.TemplateBody != nil && len(*s.TemplateBody) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TemplateBody", 1)) + } + if s.TemplateURL != nil && len(*s.TemplateURL) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TemplateURL", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The output for ValidateTemplate action. +type ValidateTemplateOutput struct { + _ struct{} `type:"structure"` + + // The capabilities found within the template. Currently, AWS CloudFormation + // supports only the CAPABILITY_IAM capability. If your template contains IAM + // resources, you must specify the CAPABILITY_IAM value for this parameter when + // you use the CreateStack or UpdateStack actions with your template; otherwise, + // those actions return an InsufficientCapabilities error. + Capabilities []*string `type:"list"` + + // The list of resources that generated the values in the Capabilities response + // element. + CapabilitiesReason *string `type:"string"` + + // The description found within the template. + Description *string `min:"1" type:"string"` + + // A list of TemplateParameter structures. + Parameters []*TemplateParameter `type:"list"` +} + +// String returns the string representation +func (s ValidateTemplateOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ValidateTemplateOutput) GoString() string { + return s.String() +} + +const ( + // @enum Capability + CapabilityCapabilityIam = "CAPABILITY_IAM" +) + +const ( + // @enum ChangeAction + ChangeActionAdd = "Add" + // @enum ChangeAction + ChangeActionModify = "Modify" + // @enum ChangeAction + ChangeActionRemove = "Remove" +) + +const ( + // @enum ChangeSetStatus + ChangeSetStatusCreatePending = "CREATE_PENDING" + // @enum ChangeSetStatus + ChangeSetStatusCreateInProgress = "CREATE_IN_PROGRESS" + // @enum ChangeSetStatus + ChangeSetStatusCreateComplete = "CREATE_COMPLETE" + // @enum ChangeSetStatus + ChangeSetStatusDeleteComplete = "DELETE_COMPLETE" + // @enum ChangeSetStatus + ChangeSetStatusFailed = "FAILED" +) + +const ( + // @enum ChangeSource + ChangeSourceResourceReference = "ResourceReference" + // @enum ChangeSource + ChangeSourceParameterReference = "ParameterReference" + // @enum ChangeSource + ChangeSourceResourceAttribute = "ResourceAttribute" + // @enum ChangeSource + ChangeSourceDirectModification = "DirectModification" + // @enum ChangeSource + ChangeSourceAutomatic = "Automatic" +) + +const ( + // @enum ChangeType + ChangeTypeResource = "Resource" +) + +const ( + // @enum EvaluationType + EvaluationTypeStatic = "Static" + // @enum EvaluationType + EvaluationTypeDynamic = "Dynamic" +) + +const ( + // @enum ExecutionStatus + ExecutionStatusUnavailable = "UNAVAILABLE" + // @enum ExecutionStatus + ExecutionStatusAvailable = "AVAILABLE" + // @enum ExecutionStatus + ExecutionStatusExecuteInProgress = "EXECUTE_IN_PROGRESS" + // @enum ExecutionStatus + ExecutionStatusExecuteComplete = "EXECUTE_COMPLETE" + // @enum ExecutionStatus + ExecutionStatusExecuteFailed = "EXECUTE_FAILED" + // @enum ExecutionStatus + ExecutionStatusObsolete = "OBSOLETE" +) + +const ( + // @enum OnFailure + OnFailureDoNothing = "DO_NOTHING" + // @enum OnFailure + OnFailureRollback = "ROLLBACK" + // @enum OnFailure + OnFailureDelete = "DELETE" +) + +const ( + // @enum Replacement + ReplacementTrue = "True" + // @enum Replacement + ReplacementFalse = "False" + // @enum Replacement + ReplacementConditional = "Conditional" +) + +const ( + // @enum RequiresRecreation + RequiresRecreationNever = "Never" + // @enum RequiresRecreation + RequiresRecreationConditionally = "Conditionally" + // @enum RequiresRecreation + RequiresRecreationAlways = "Always" +) + +const ( + // @enum ResourceAttribute + ResourceAttributeProperties = "Properties" + // @enum ResourceAttribute + ResourceAttributeMetadata = "Metadata" + // @enum ResourceAttribute + ResourceAttributeCreationPolicy = "CreationPolicy" + // @enum ResourceAttribute + ResourceAttributeUpdatePolicy = "UpdatePolicy" + // @enum ResourceAttribute + ResourceAttributeDeletionPolicy = "DeletionPolicy" + // @enum ResourceAttribute + ResourceAttributeTags = "Tags" +) + +const ( + // @enum ResourceSignalStatus + ResourceSignalStatusSuccess = "SUCCESS" + // @enum ResourceSignalStatus + ResourceSignalStatusFailure = "FAILURE" +) + +const ( + // @enum ResourceStatus + ResourceStatusCreateInProgress = "CREATE_IN_PROGRESS" + // @enum ResourceStatus + ResourceStatusCreateFailed = "CREATE_FAILED" + // @enum ResourceStatus + ResourceStatusCreateComplete = "CREATE_COMPLETE" + // @enum ResourceStatus + ResourceStatusDeleteInProgress = "DELETE_IN_PROGRESS" + // @enum ResourceStatus + ResourceStatusDeleteFailed = "DELETE_FAILED" + // @enum ResourceStatus + ResourceStatusDeleteComplete = "DELETE_COMPLETE" + // @enum ResourceStatus + ResourceStatusDeleteSkipped = "DELETE_SKIPPED" + // @enum ResourceStatus + ResourceStatusUpdateInProgress = "UPDATE_IN_PROGRESS" + // @enum ResourceStatus + ResourceStatusUpdateFailed = "UPDATE_FAILED" + // @enum ResourceStatus + ResourceStatusUpdateComplete = "UPDATE_COMPLETE" +) + +const ( + // @enum StackStatus + StackStatusCreateInProgress = "CREATE_IN_PROGRESS" + // @enum StackStatus + StackStatusCreateFailed = "CREATE_FAILED" + // @enum StackStatus + StackStatusCreateComplete = "CREATE_COMPLETE" + // @enum StackStatus + StackStatusRollbackInProgress = "ROLLBACK_IN_PROGRESS" + // @enum StackStatus + StackStatusRollbackFailed = "ROLLBACK_FAILED" + // @enum StackStatus + StackStatusRollbackComplete = "ROLLBACK_COMPLETE" + // @enum StackStatus + StackStatusDeleteInProgress = "DELETE_IN_PROGRESS" + // @enum StackStatus + StackStatusDeleteFailed = "DELETE_FAILED" + // @enum StackStatus + StackStatusDeleteComplete = "DELETE_COMPLETE" + // @enum StackStatus + StackStatusUpdateInProgress = "UPDATE_IN_PROGRESS" + // @enum StackStatus + StackStatusUpdateCompleteCleanupInProgress = "UPDATE_COMPLETE_CLEANUP_IN_PROGRESS" + // @enum StackStatus + StackStatusUpdateComplete = "UPDATE_COMPLETE" + // @enum StackStatus + StackStatusUpdateRollbackInProgress = "UPDATE_ROLLBACK_IN_PROGRESS" + // @enum StackStatus + StackStatusUpdateRollbackFailed = "UPDATE_ROLLBACK_FAILED" + // @enum StackStatus + StackStatusUpdateRollbackCompleteCleanupInProgress = "UPDATE_ROLLBACK_COMPLETE_CLEANUP_IN_PROGRESS" + // @enum StackStatus + StackStatusUpdateRollbackComplete = "UPDATE_ROLLBACK_COMPLETE" +) diff --git a/vendor/github.com/aws/aws-sdk-go/service/cloudformation/cloudformationiface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/cloudformation/cloudformationiface/interface.go new file mode 100644 index 000000000..5f078d68d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/cloudformation/cloudformationiface/interface.go @@ -0,0 +1,118 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package cloudformationiface provides an interface for the AWS CloudFormation. +package cloudformationiface + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/cloudformation" +) + +// CloudFormationAPI is the interface type for cloudformation.CloudFormation. +type CloudFormationAPI interface { + CancelUpdateStackRequest(*cloudformation.CancelUpdateStackInput) (*request.Request, *cloudformation.CancelUpdateStackOutput) + + CancelUpdateStack(*cloudformation.CancelUpdateStackInput) (*cloudformation.CancelUpdateStackOutput, error) + + ContinueUpdateRollbackRequest(*cloudformation.ContinueUpdateRollbackInput) (*request.Request, *cloudformation.ContinueUpdateRollbackOutput) + + ContinueUpdateRollback(*cloudformation.ContinueUpdateRollbackInput) (*cloudformation.ContinueUpdateRollbackOutput, error) + + CreateChangeSetRequest(*cloudformation.CreateChangeSetInput) (*request.Request, *cloudformation.CreateChangeSetOutput) + + CreateChangeSet(*cloudformation.CreateChangeSetInput) (*cloudformation.CreateChangeSetOutput, error) + + CreateStackRequest(*cloudformation.CreateStackInput) (*request.Request, *cloudformation.CreateStackOutput) + + CreateStack(*cloudformation.CreateStackInput) (*cloudformation.CreateStackOutput, error) + + DeleteChangeSetRequest(*cloudformation.DeleteChangeSetInput) (*request.Request, *cloudformation.DeleteChangeSetOutput) + + DeleteChangeSet(*cloudformation.DeleteChangeSetInput) (*cloudformation.DeleteChangeSetOutput, error) + + DeleteStackRequest(*cloudformation.DeleteStackInput) (*request.Request, *cloudformation.DeleteStackOutput) + + DeleteStack(*cloudformation.DeleteStackInput) (*cloudformation.DeleteStackOutput, error) + + DescribeAccountLimitsRequest(*cloudformation.DescribeAccountLimitsInput) (*request.Request, *cloudformation.DescribeAccountLimitsOutput) + + DescribeAccountLimits(*cloudformation.DescribeAccountLimitsInput) (*cloudformation.DescribeAccountLimitsOutput, error) + + DescribeChangeSetRequest(*cloudformation.DescribeChangeSetInput) (*request.Request, *cloudformation.DescribeChangeSetOutput) + + DescribeChangeSet(*cloudformation.DescribeChangeSetInput) (*cloudformation.DescribeChangeSetOutput, error) + + DescribeStackEventsRequest(*cloudformation.DescribeStackEventsInput) (*request.Request, *cloudformation.DescribeStackEventsOutput) + + DescribeStackEvents(*cloudformation.DescribeStackEventsInput) (*cloudformation.DescribeStackEventsOutput, error) + + DescribeStackEventsPages(*cloudformation.DescribeStackEventsInput, func(*cloudformation.DescribeStackEventsOutput, bool) bool) error + + DescribeStackResourceRequest(*cloudformation.DescribeStackResourceInput) (*request.Request, *cloudformation.DescribeStackResourceOutput) + + DescribeStackResource(*cloudformation.DescribeStackResourceInput) (*cloudformation.DescribeStackResourceOutput, error) + + DescribeStackResourcesRequest(*cloudformation.DescribeStackResourcesInput) (*request.Request, *cloudformation.DescribeStackResourcesOutput) + + DescribeStackResources(*cloudformation.DescribeStackResourcesInput) (*cloudformation.DescribeStackResourcesOutput, error) + + DescribeStacksRequest(*cloudformation.DescribeStacksInput) (*request.Request, *cloudformation.DescribeStacksOutput) + + DescribeStacks(*cloudformation.DescribeStacksInput) (*cloudformation.DescribeStacksOutput, error) + + DescribeStacksPages(*cloudformation.DescribeStacksInput, func(*cloudformation.DescribeStacksOutput, bool) bool) error + + EstimateTemplateCostRequest(*cloudformation.EstimateTemplateCostInput) (*request.Request, *cloudformation.EstimateTemplateCostOutput) + + EstimateTemplateCost(*cloudformation.EstimateTemplateCostInput) (*cloudformation.EstimateTemplateCostOutput, error) + + ExecuteChangeSetRequest(*cloudformation.ExecuteChangeSetInput) (*request.Request, *cloudformation.ExecuteChangeSetOutput) + + ExecuteChangeSet(*cloudformation.ExecuteChangeSetInput) (*cloudformation.ExecuteChangeSetOutput, error) + + GetStackPolicyRequest(*cloudformation.GetStackPolicyInput) (*request.Request, *cloudformation.GetStackPolicyOutput) + + GetStackPolicy(*cloudformation.GetStackPolicyInput) (*cloudformation.GetStackPolicyOutput, error) + + GetTemplateRequest(*cloudformation.GetTemplateInput) (*request.Request, *cloudformation.GetTemplateOutput) + + GetTemplate(*cloudformation.GetTemplateInput) (*cloudformation.GetTemplateOutput, error) + + GetTemplateSummaryRequest(*cloudformation.GetTemplateSummaryInput) (*request.Request, *cloudformation.GetTemplateSummaryOutput) + + GetTemplateSummary(*cloudformation.GetTemplateSummaryInput) (*cloudformation.GetTemplateSummaryOutput, error) + + ListChangeSetsRequest(*cloudformation.ListChangeSetsInput) (*request.Request, *cloudformation.ListChangeSetsOutput) + + ListChangeSets(*cloudformation.ListChangeSetsInput) (*cloudformation.ListChangeSetsOutput, error) + + ListStackResourcesRequest(*cloudformation.ListStackResourcesInput) (*request.Request, *cloudformation.ListStackResourcesOutput) + + ListStackResources(*cloudformation.ListStackResourcesInput) (*cloudformation.ListStackResourcesOutput, error) + + ListStackResourcesPages(*cloudformation.ListStackResourcesInput, func(*cloudformation.ListStackResourcesOutput, bool) bool) error + + ListStacksRequest(*cloudformation.ListStacksInput) (*request.Request, *cloudformation.ListStacksOutput) + + ListStacks(*cloudformation.ListStacksInput) (*cloudformation.ListStacksOutput, error) + + ListStacksPages(*cloudformation.ListStacksInput, func(*cloudformation.ListStacksOutput, bool) bool) error + + SetStackPolicyRequest(*cloudformation.SetStackPolicyInput) (*request.Request, *cloudformation.SetStackPolicyOutput) + + SetStackPolicy(*cloudformation.SetStackPolicyInput) (*cloudformation.SetStackPolicyOutput, error) + + SignalResourceRequest(*cloudformation.SignalResourceInput) (*request.Request, *cloudformation.SignalResourceOutput) + + SignalResource(*cloudformation.SignalResourceInput) (*cloudformation.SignalResourceOutput, error) + + UpdateStackRequest(*cloudformation.UpdateStackInput) (*request.Request, *cloudformation.UpdateStackOutput) + + UpdateStack(*cloudformation.UpdateStackInput) (*cloudformation.UpdateStackOutput, error) + + ValidateTemplateRequest(*cloudformation.ValidateTemplateInput) (*request.Request, *cloudformation.ValidateTemplateOutput) + + ValidateTemplate(*cloudformation.ValidateTemplateInput) (*cloudformation.ValidateTemplateOutput, error) +} + +var _ CloudFormationAPI = (*cloudformation.CloudFormation)(nil) diff --git a/vendor/github.com/aws/aws-sdk-go/service/cloudformation/examples_test.go b/vendor/github.com/aws/aws-sdk-go/service/cloudformation/examples_test.go new file mode 100644 index 000000000..695686a05 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/cloudformation/examples_test.go @@ -0,0 +1,609 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package cloudformation_test + +import ( + "bytes" + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/cloudformation" +) + +var _ time.Duration +var _ bytes.Buffer + +func ExampleCloudFormation_CancelUpdateStack() { + svc := cloudformation.New(session.New()) + + params := &cloudformation.CancelUpdateStackInput{ + StackName: aws.String("StackName"), // Required + } + resp, err := svc.CancelUpdateStack(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudFormation_ContinueUpdateRollback() { + svc := cloudformation.New(session.New()) + + params := &cloudformation.ContinueUpdateRollbackInput{ + StackName: aws.String("StackNameOrId"), // Required + } + resp, err := svc.ContinueUpdateRollback(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudFormation_CreateChangeSet() { + svc := cloudformation.New(session.New()) + + params := &cloudformation.CreateChangeSetInput{ + ChangeSetName: aws.String("ChangeSetName"), // Required + StackName: aws.String("StackNameOrId"), // Required + Capabilities: []*string{ + aws.String("Capability"), // Required + // More values... + }, + ClientToken: aws.String("ClientToken"), + Description: aws.String("Description"), + NotificationARNs: []*string{ + aws.String("NotificationARN"), // Required + // More values... + }, + Parameters: []*cloudformation.Parameter{ + { // Required + ParameterKey: aws.String("ParameterKey"), + ParameterValue: aws.String("ParameterValue"), + UsePreviousValue: aws.Bool(true), + }, + // More values... + }, + ResourceTypes: []*string{ + aws.String("ResourceType"), // Required + // More values... + }, + Tags: []*cloudformation.Tag{ + { // Required + Key: aws.String("TagKey"), + Value: aws.String("TagValue"), + }, + // More values... + }, + TemplateBody: aws.String("TemplateBody"), + TemplateURL: aws.String("TemplateURL"), + UsePreviousTemplate: aws.Bool(true), + } + resp, err := svc.CreateChangeSet(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudFormation_CreateStack() { + svc := cloudformation.New(session.New()) + + params := &cloudformation.CreateStackInput{ + StackName: aws.String("StackName"), // Required + Capabilities: []*string{ + aws.String("Capability"), // Required + // More values... + }, + DisableRollback: aws.Bool(true), + NotificationARNs: []*string{ + aws.String("NotificationARN"), // Required + // More values... + }, + OnFailure: aws.String("OnFailure"), + Parameters: []*cloudformation.Parameter{ + { // Required + ParameterKey: aws.String("ParameterKey"), + ParameterValue: aws.String("ParameterValue"), + UsePreviousValue: aws.Bool(true), + }, + // More values... + }, + ResourceTypes: []*string{ + aws.String("ResourceType"), // Required + // More values... + }, + StackPolicyBody: aws.String("StackPolicyBody"), + StackPolicyURL: aws.String("StackPolicyURL"), + Tags: []*cloudformation.Tag{ + { // Required + Key: aws.String("TagKey"), + Value: aws.String("TagValue"), + }, + // More values... + }, + TemplateBody: aws.String("TemplateBody"), + TemplateURL: aws.String("TemplateURL"), + TimeoutInMinutes: aws.Int64(1), + } + resp, err := svc.CreateStack(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudFormation_DeleteChangeSet() { + svc := cloudformation.New(session.New()) + + params := &cloudformation.DeleteChangeSetInput{ + ChangeSetName: aws.String("ChangeSetNameOrId"), // Required + StackName: aws.String("StackNameOrId"), + } + resp, err := svc.DeleteChangeSet(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudFormation_DeleteStack() { + svc := cloudformation.New(session.New()) + + params := &cloudformation.DeleteStackInput{ + StackName: aws.String("StackName"), // Required + RetainResources: []*string{ + aws.String("LogicalResourceId"), // Required + // More values... + }, + } + resp, err := svc.DeleteStack(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudFormation_DescribeAccountLimits() { + svc := cloudformation.New(session.New()) + + params := &cloudformation.DescribeAccountLimitsInput{ + NextToken: aws.String("NextToken"), + } + resp, err := svc.DescribeAccountLimits(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudFormation_DescribeChangeSet() { + svc := cloudformation.New(session.New()) + + params := &cloudformation.DescribeChangeSetInput{ + ChangeSetName: aws.String("ChangeSetNameOrId"), // Required + NextToken: aws.String("NextToken"), + StackName: aws.String("StackNameOrId"), + } + resp, err := svc.DescribeChangeSet(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudFormation_DescribeStackEvents() { + svc := cloudformation.New(session.New()) + + params := &cloudformation.DescribeStackEventsInput{ + NextToken: aws.String("NextToken"), + StackName: aws.String("StackName"), + } + resp, err := svc.DescribeStackEvents(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudFormation_DescribeStackResource() { + svc := cloudformation.New(session.New()) + + params := &cloudformation.DescribeStackResourceInput{ + LogicalResourceId: aws.String("LogicalResourceId"), // Required + StackName: aws.String("StackName"), // Required + } + resp, err := svc.DescribeStackResource(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudFormation_DescribeStackResources() { + svc := cloudformation.New(session.New()) + + params := &cloudformation.DescribeStackResourcesInput{ + LogicalResourceId: aws.String("LogicalResourceId"), + PhysicalResourceId: aws.String("PhysicalResourceId"), + StackName: aws.String("StackName"), + } + resp, err := svc.DescribeStackResources(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudFormation_DescribeStacks() { + svc := cloudformation.New(session.New()) + + params := &cloudformation.DescribeStacksInput{ + NextToken: aws.String("NextToken"), + StackName: aws.String("StackName"), + } + resp, err := svc.DescribeStacks(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudFormation_EstimateTemplateCost() { + svc := cloudformation.New(session.New()) + + params := &cloudformation.EstimateTemplateCostInput{ + Parameters: []*cloudformation.Parameter{ + { // Required + ParameterKey: aws.String("ParameterKey"), + ParameterValue: aws.String("ParameterValue"), + UsePreviousValue: aws.Bool(true), + }, + // More values... + }, + TemplateBody: aws.String("TemplateBody"), + TemplateURL: aws.String("TemplateURL"), + } + resp, err := svc.EstimateTemplateCost(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudFormation_ExecuteChangeSet() { + svc := cloudformation.New(session.New()) + + params := &cloudformation.ExecuteChangeSetInput{ + ChangeSetName: aws.String("ChangeSetNameOrId"), // Required + StackName: aws.String("StackNameOrId"), + } + resp, err := svc.ExecuteChangeSet(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudFormation_GetStackPolicy() { + svc := cloudformation.New(session.New()) + + params := &cloudformation.GetStackPolicyInput{ + StackName: aws.String("StackName"), // Required + } + resp, err := svc.GetStackPolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudFormation_GetTemplate() { + svc := cloudformation.New(session.New()) + + params := &cloudformation.GetTemplateInput{ + StackName: aws.String("StackName"), // Required + } + resp, err := svc.GetTemplate(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudFormation_GetTemplateSummary() { + svc := cloudformation.New(session.New()) + + params := &cloudformation.GetTemplateSummaryInput{ + StackName: aws.String("StackNameOrId"), + TemplateBody: aws.String("TemplateBody"), + TemplateURL: aws.String("TemplateURL"), + } + resp, err := svc.GetTemplateSummary(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudFormation_ListChangeSets() { + svc := cloudformation.New(session.New()) + + params := &cloudformation.ListChangeSetsInput{ + StackName: aws.String("StackNameOrId"), // Required + NextToken: aws.String("NextToken"), + } + resp, err := svc.ListChangeSets(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudFormation_ListStackResources() { + svc := cloudformation.New(session.New()) + + params := &cloudformation.ListStackResourcesInput{ + StackName: aws.String("StackName"), // Required + NextToken: aws.String("NextToken"), + } + resp, err := svc.ListStackResources(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudFormation_ListStacks() { + svc := cloudformation.New(session.New()) + + params := &cloudformation.ListStacksInput{ + NextToken: aws.String("NextToken"), + StackStatusFilter: []*string{ + aws.String("StackStatus"), // Required + // More values... + }, + } + resp, err := svc.ListStacks(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudFormation_SetStackPolicy() { + svc := cloudformation.New(session.New()) + + params := &cloudformation.SetStackPolicyInput{ + StackName: aws.String("StackName"), // Required + StackPolicyBody: aws.String("StackPolicyBody"), + StackPolicyURL: aws.String("StackPolicyURL"), + } + resp, err := svc.SetStackPolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudFormation_SignalResource() { + svc := cloudformation.New(session.New()) + + params := &cloudformation.SignalResourceInput{ + LogicalResourceId: aws.String("LogicalResourceId"), // Required + StackName: aws.String("StackNameOrId"), // Required + Status: aws.String("ResourceSignalStatus"), // Required + UniqueId: aws.String("ResourceSignalUniqueId"), // Required + } + resp, err := svc.SignalResource(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudFormation_UpdateStack() { + svc := cloudformation.New(session.New()) + + params := &cloudformation.UpdateStackInput{ + StackName: aws.String("StackName"), // Required + Capabilities: []*string{ + aws.String("Capability"), // Required + // More values... + }, + NotificationARNs: []*string{ + aws.String("NotificationARN"), // Required + // More values... + }, + Parameters: []*cloudformation.Parameter{ + { // Required + ParameterKey: aws.String("ParameterKey"), + ParameterValue: aws.String("ParameterValue"), + UsePreviousValue: aws.Bool(true), + }, + // More values... + }, + ResourceTypes: []*string{ + aws.String("ResourceType"), // Required + // More values... + }, + StackPolicyBody: aws.String("StackPolicyBody"), + StackPolicyDuringUpdateBody: aws.String("StackPolicyDuringUpdateBody"), + StackPolicyDuringUpdateURL: aws.String("StackPolicyDuringUpdateURL"), + StackPolicyURL: aws.String("StackPolicyURL"), + Tags: []*cloudformation.Tag{ + { // Required + Key: aws.String("TagKey"), + Value: aws.String("TagValue"), + }, + // More values... + }, + TemplateBody: aws.String("TemplateBody"), + TemplateURL: aws.String("TemplateURL"), + UsePreviousTemplate: aws.Bool(true), + } + resp, err := svc.UpdateStack(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudFormation_ValidateTemplate() { + svc := cloudformation.New(session.New()) + + params := &cloudformation.ValidateTemplateInput{ + TemplateBody: aws.String("TemplateBody"), + TemplateURL: aws.String("TemplateURL"), + } + resp, err := svc.ValidateTemplate(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/cloudformation/service.go b/vendor/github.com/aws/aws-sdk-go/service/cloudformation/service.go new file mode 100644 index 000000000..bd9d2917e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/cloudformation/service.go @@ -0,0 +1,103 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package cloudformation + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/private/protocol/query" +) + +// AWS CloudFormation enables you to create and manage AWS infrastructure deployments +// predictably and repeatedly. AWS CloudFormation helps you leverage AWS products +// such as Amazon EC2, EBS, Amazon SNS, ELB, and Auto Scaling to build highly-reliable, +// highly scalable, cost effective applications without worrying about creating +// and configuring the underlying AWS infrastructure. +// +// With AWS CloudFormation, you declare all of your resources and dependencies +// in a template file. The template defines a collection of resources as a single +// unit called a stack. AWS CloudFormation creates and deletes all member resources +// of the stack together and manages all dependencies between the resources +// for you. +// +// For more information about this product, go to the CloudFormation Product +// Page (http://aws.amazon.com/cloudformation/). +// +// Amazon CloudFormation makes use of other AWS products. If you need additional +// technical information about a specific AWS product, you can find the product's +// technical documentation at http://docs.aws.amazon.com/ (http://docs.aws.amazon.com/). +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type CloudFormation struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "cloudformation" + +// New creates a new instance of the CloudFormation client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a CloudFormation client from just a session. +// svc := cloudformation.New(mySession) +// +// // Create a CloudFormation client with additional configuration +// svc := cloudformation.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *CloudFormation { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *CloudFormation { + svc := &CloudFormation{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2010-05-15", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(query.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(query.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(query.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(query.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a CloudFormation operation and runs any +// custom request initialization. +func (c *CloudFormation) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/cloudformation/waiters.go b/vendor/github.com/aws/aws-sdk-go/service/cloudformation/waiters.go new file mode 100644 index 000000000..f8ca67514 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/cloudformation/waiters.go @@ -0,0 +1,279 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package cloudformation + +import ( + "github.com/aws/aws-sdk-go/private/waiter" +) + +func (c *CloudFormation) WaitUntilStackCreateComplete(input *DescribeStacksInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeStacks", + Delay: 30, + MaxAttempts: 120, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "pathAll", + Argument: "Stacks[].StackStatus", + Expected: "CREATE_COMPLETE", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Stacks[].StackStatus", + Expected: "CREATE_FAILED", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Stacks[].StackStatus", + Expected: "DELETE_COMPLETE", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Stacks[].StackStatus", + Expected: "DELETE_IN_PROGRESS", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Stacks[].StackStatus", + Expected: "DELETE_FAILED", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Stacks[].StackStatus", + Expected: "ROLLBACK_COMPLETE", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Stacks[].StackStatus", + Expected: "ROLLBACK_FAILED", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Stacks[].StackStatus", + Expected: "ROLLBACK_IN_PROGRESS", + }, + { + State: "failure", + Matcher: "error", + Argument: "", + Expected: "ValidationError", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *CloudFormation) WaitUntilStackDeleteComplete(input *DescribeStacksInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeStacks", + Delay: 30, + MaxAttempts: 120, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "pathAll", + Argument: "Stacks[].StackStatus", + Expected: "DELETE_COMPLETE", + }, + { + State: "success", + Matcher: "error", + Argument: "", + Expected: "ValidationError", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Stacks[].StackStatus", + Expected: "DELETE_FAILED", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Stacks[].StackStatus", + Expected: "CREATE_COMPLETE", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Stacks[].StackStatus", + Expected: "CREATE_FAILED", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Stacks[].StackStatus", + Expected: "CREATE_IN_PROGRESS", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Stacks[].StackStatus", + Expected: "ROLLBACK_COMPLETE", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Stacks[].StackStatus", + Expected: "ROLLBACK_FAILED", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Stacks[].StackStatus", + Expected: "ROLLBACK_IN_PROGRESS", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Stacks[].StackStatus", + Expected: "UPDATE_COMPLETE", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Stacks[].StackStatus", + Expected: "UPDATE_COMPLETE_CLEANUP_IN_PROGRESS", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Stacks[].StackStatus", + Expected: "UPDATE_IN_PROGRESS", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Stacks[].StackStatus", + Expected: "UPDATE_ROLLBACK_COMPLETE", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Stacks[].StackStatus", + Expected: "UPDATE_ROLLBACK_COMPLETE_CLEANUP_IN_PROGRESS", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Stacks[].StackStatus", + Expected: "UPDATE_ROLLBACK_FAILED", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Stacks[].StackStatus", + Expected: "UPDATE_ROLLBACK_IN_PROGRESS", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *CloudFormation) WaitUntilStackExists(input *DescribeStacksInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeStacks", + Delay: 5, + MaxAttempts: 20, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "status", + Argument: "", + Expected: 200, + }, + { + State: "retry", + Matcher: "error", + Argument: "", + Expected: "ValidationError", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *CloudFormation) WaitUntilStackUpdateComplete(input *DescribeStacksInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeStacks", + Delay: 30, + MaxAttempts: 120, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "pathAll", + Argument: "Stacks[].StackStatus", + Expected: "UPDATE_COMPLETE", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Stacks[].StackStatus", + Expected: "UPDATE_FAILED", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Stacks[].StackStatus", + Expected: "UPDATE_ROLLBACK_COMPLETE", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Stacks[].StackStatus", + Expected: "UPDATE_ROLLBACK_FAILED", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Stacks[].StackStatus", + Expected: "UPDATE_ROLLBACK_COMPLETE_CLEANUP_IN_PROGRESS", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Stacks[].StackStatus", + Expected: "UPDATE_ROLLBACK_IN_PROGRESS", + }, + { + State: "failure", + Matcher: "error", + Argument: "", + Expected: "ValidationError", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/cloudfront/api.go b/vendor/github.com/aws/aws-sdk-go/service/cloudfront/api.go new file mode 100644 index 000000000..f9bd33f6b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/cloudfront/api.go @@ -0,0 +1,4951 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package cloudfront provides a client for Amazon CloudFront. +package cloudfront + +import ( + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/restxml" +) + +const opCreateCloudFrontOriginAccessIdentity = "CreateCloudFrontOriginAccessIdentity2016_01_28" + +// CreateCloudFrontOriginAccessIdentityRequest generates a "aws/request.Request" representing the +// client's request for the CreateCloudFrontOriginAccessIdentity operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateCloudFrontOriginAccessIdentity method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateCloudFrontOriginAccessIdentityRequest method. +// req, resp := client.CreateCloudFrontOriginAccessIdentityRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudFront) CreateCloudFrontOriginAccessIdentityRequest(input *CreateCloudFrontOriginAccessIdentityInput) (req *request.Request, output *CreateCloudFrontOriginAccessIdentityOutput) { + op := &request.Operation{ + Name: opCreateCloudFrontOriginAccessIdentity, + HTTPMethod: "POST", + HTTPPath: "/2016-01-28/origin-access-identity/cloudfront", + } + + if input == nil { + input = &CreateCloudFrontOriginAccessIdentityInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateCloudFrontOriginAccessIdentityOutput{} + req.Data = output + return +} + +// Create a new origin access identity. +func (c *CloudFront) CreateCloudFrontOriginAccessIdentity(input *CreateCloudFrontOriginAccessIdentityInput) (*CreateCloudFrontOriginAccessIdentityOutput, error) { + req, out := c.CreateCloudFrontOriginAccessIdentityRequest(input) + err := req.Send() + return out, err +} + +const opCreateDistribution = "CreateDistribution2016_01_28" + +// CreateDistributionRequest generates a "aws/request.Request" representing the +// client's request for the CreateDistribution operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateDistribution method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateDistributionRequest method. +// req, resp := client.CreateDistributionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudFront) CreateDistributionRequest(input *CreateDistributionInput) (req *request.Request, output *CreateDistributionOutput) { + op := &request.Operation{ + Name: opCreateDistribution, + HTTPMethod: "POST", + HTTPPath: "/2016-01-28/distribution", + } + + if input == nil { + input = &CreateDistributionInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateDistributionOutput{} + req.Data = output + return +} + +// Create a new distribution. +func (c *CloudFront) CreateDistribution(input *CreateDistributionInput) (*CreateDistributionOutput, error) { + req, out := c.CreateDistributionRequest(input) + err := req.Send() + return out, err +} + +const opCreateInvalidation = "CreateInvalidation2016_01_28" + +// CreateInvalidationRequest generates a "aws/request.Request" representing the +// client's request for the CreateInvalidation operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateInvalidation method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateInvalidationRequest method. +// req, resp := client.CreateInvalidationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudFront) CreateInvalidationRequest(input *CreateInvalidationInput) (req *request.Request, output *CreateInvalidationOutput) { + op := &request.Operation{ + Name: opCreateInvalidation, + HTTPMethod: "POST", + HTTPPath: "/2016-01-28/distribution/{DistributionId}/invalidation", + } + + if input == nil { + input = &CreateInvalidationInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateInvalidationOutput{} + req.Data = output + return +} + +// Create a new invalidation. +func (c *CloudFront) CreateInvalidation(input *CreateInvalidationInput) (*CreateInvalidationOutput, error) { + req, out := c.CreateInvalidationRequest(input) + err := req.Send() + return out, err +} + +const opCreateStreamingDistribution = "CreateStreamingDistribution2016_01_28" + +// CreateStreamingDistributionRequest generates a "aws/request.Request" representing the +// client's request for the CreateStreamingDistribution operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateStreamingDistribution method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateStreamingDistributionRequest method. +// req, resp := client.CreateStreamingDistributionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudFront) CreateStreamingDistributionRequest(input *CreateStreamingDistributionInput) (req *request.Request, output *CreateStreamingDistributionOutput) { + op := &request.Operation{ + Name: opCreateStreamingDistribution, + HTTPMethod: "POST", + HTTPPath: "/2016-01-28/streaming-distribution", + } + + if input == nil { + input = &CreateStreamingDistributionInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateStreamingDistributionOutput{} + req.Data = output + return +} + +// Create a new streaming distribution. +func (c *CloudFront) CreateStreamingDistribution(input *CreateStreamingDistributionInput) (*CreateStreamingDistributionOutput, error) { + req, out := c.CreateStreamingDistributionRequest(input) + err := req.Send() + return out, err +} + +const opDeleteCloudFrontOriginAccessIdentity = "DeleteCloudFrontOriginAccessIdentity2016_01_28" + +// DeleteCloudFrontOriginAccessIdentityRequest generates a "aws/request.Request" representing the +// client's request for the DeleteCloudFrontOriginAccessIdentity operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteCloudFrontOriginAccessIdentity method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteCloudFrontOriginAccessIdentityRequest method. +// req, resp := client.DeleteCloudFrontOriginAccessIdentityRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudFront) DeleteCloudFrontOriginAccessIdentityRequest(input *DeleteCloudFrontOriginAccessIdentityInput) (req *request.Request, output *DeleteCloudFrontOriginAccessIdentityOutput) { + op := &request.Operation{ + Name: opDeleteCloudFrontOriginAccessIdentity, + HTTPMethod: "DELETE", + HTTPPath: "/2016-01-28/origin-access-identity/cloudfront/{Id}", + } + + if input == nil { + input = &DeleteCloudFrontOriginAccessIdentityInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteCloudFrontOriginAccessIdentityOutput{} + req.Data = output + return +} + +// Delete an origin access identity. +func (c *CloudFront) DeleteCloudFrontOriginAccessIdentity(input *DeleteCloudFrontOriginAccessIdentityInput) (*DeleteCloudFrontOriginAccessIdentityOutput, error) { + req, out := c.DeleteCloudFrontOriginAccessIdentityRequest(input) + err := req.Send() + return out, err +} + +const opDeleteDistribution = "DeleteDistribution2016_01_28" + +// DeleteDistributionRequest generates a "aws/request.Request" representing the +// client's request for the DeleteDistribution operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteDistribution method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteDistributionRequest method. +// req, resp := client.DeleteDistributionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudFront) DeleteDistributionRequest(input *DeleteDistributionInput) (req *request.Request, output *DeleteDistributionOutput) { + op := &request.Operation{ + Name: opDeleteDistribution, + HTTPMethod: "DELETE", + HTTPPath: "/2016-01-28/distribution/{Id}", + } + + if input == nil { + input = &DeleteDistributionInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteDistributionOutput{} + req.Data = output + return +} + +// Delete a distribution. +func (c *CloudFront) DeleteDistribution(input *DeleteDistributionInput) (*DeleteDistributionOutput, error) { + req, out := c.DeleteDistributionRequest(input) + err := req.Send() + return out, err +} + +const opDeleteStreamingDistribution = "DeleteStreamingDistribution2016_01_28" + +// DeleteStreamingDistributionRequest generates a "aws/request.Request" representing the +// client's request for the DeleteStreamingDistribution operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteStreamingDistribution method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteStreamingDistributionRequest method. +// req, resp := client.DeleteStreamingDistributionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudFront) DeleteStreamingDistributionRequest(input *DeleteStreamingDistributionInput) (req *request.Request, output *DeleteStreamingDistributionOutput) { + op := &request.Operation{ + Name: opDeleteStreamingDistribution, + HTTPMethod: "DELETE", + HTTPPath: "/2016-01-28/streaming-distribution/{Id}", + } + + if input == nil { + input = &DeleteStreamingDistributionInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteStreamingDistributionOutput{} + req.Data = output + return +} + +// Delete a streaming distribution. +func (c *CloudFront) DeleteStreamingDistribution(input *DeleteStreamingDistributionInput) (*DeleteStreamingDistributionOutput, error) { + req, out := c.DeleteStreamingDistributionRequest(input) + err := req.Send() + return out, err +} + +const opGetCloudFrontOriginAccessIdentity = "GetCloudFrontOriginAccessIdentity2016_01_28" + +// GetCloudFrontOriginAccessIdentityRequest generates a "aws/request.Request" representing the +// client's request for the GetCloudFrontOriginAccessIdentity operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetCloudFrontOriginAccessIdentity method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetCloudFrontOriginAccessIdentityRequest method. +// req, resp := client.GetCloudFrontOriginAccessIdentityRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudFront) GetCloudFrontOriginAccessIdentityRequest(input *GetCloudFrontOriginAccessIdentityInput) (req *request.Request, output *GetCloudFrontOriginAccessIdentityOutput) { + op := &request.Operation{ + Name: opGetCloudFrontOriginAccessIdentity, + HTTPMethod: "GET", + HTTPPath: "/2016-01-28/origin-access-identity/cloudfront/{Id}", + } + + if input == nil { + input = &GetCloudFrontOriginAccessIdentityInput{} + } + + req = c.newRequest(op, input, output) + output = &GetCloudFrontOriginAccessIdentityOutput{} + req.Data = output + return +} + +// Get the information about an origin access identity. +func (c *CloudFront) GetCloudFrontOriginAccessIdentity(input *GetCloudFrontOriginAccessIdentityInput) (*GetCloudFrontOriginAccessIdentityOutput, error) { + req, out := c.GetCloudFrontOriginAccessIdentityRequest(input) + err := req.Send() + return out, err +} + +const opGetCloudFrontOriginAccessIdentityConfig = "GetCloudFrontOriginAccessIdentityConfig2016_01_28" + +// GetCloudFrontOriginAccessIdentityConfigRequest generates a "aws/request.Request" representing the +// client's request for the GetCloudFrontOriginAccessIdentityConfig operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetCloudFrontOriginAccessIdentityConfig method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetCloudFrontOriginAccessIdentityConfigRequest method. +// req, resp := client.GetCloudFrontOriginAccessIdentityConfigRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudFront) GetCloudFrontOriginAccessIdentityConfigRequest(input *GetCloudFrontOriginAccessIdentityConfigInput) (req *request.Request, output *GetCloudFrontOriginAccessIdentityConfigOutput) { + op := &request.Operation{ + Name: opGetCloudFrontOriginAccessIdentityConfig, + HTTPMethod: "GET", + HTTPPath: "/2016-01-28/origin-access-identity/cloudfront/{Id}/config", + } + + if input == nil { + input = &GetCloudFrontOriginAccessIdentityConfigInput{} + } + + req = c.newRequest(op, input, output) + output = &GetCloudFrontOriginAccessIdentityConfigOutput{} + req.Data = output + return +} + +// Get the configuration information about an origin access identity. +func (c *CloudFront) GetCloudFrontOriginAccessIdentityConfig(input *GetCloudFrontOriginAccessIdentityConfigInput) (*GetCloudFrontOriginAccessIdentityConfigOutput, error) { + req, out := c.GetCloudFrontOriginAccessIdentityConfigRequest(input) + err := req.Send() + return out, err +} + +const opGetDistribution = "GetDistribution2016_01_28" + +// GetDistributionRequest generates a "aws/request.Request" representing the +// client's request for the GetDistribution operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetDistribution method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetDistributionRequest method. +// req, resp := client.GetDistributionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudFront) GetDistributionRequest(input *GetDistributionInput) (req *request.Request, output *GetDistributionOutput) { + op := &request.Operation{ + Name: opGetDistribution, + HTTPMethod: "GET", + HTTPPath: "/2016-01-28/distribution/{Id}", + } + + if input == nil { + input = &GetDistributionInput{} + } + + req = c.newRequest(op, input, output) + output = &GetDistributionOutput{} + req.Data = output + return +} + +// Get the information about a distribution. +func (c *CloudFront) GetDistribution(input *GetDistributionInput) (*GetDistributionOutput, error) { + req, out := c.GetDistributionRequest(input) + err := req.Send() + return out, err +} + +const opGetDistributionConfig = "GetDistributionConfig2016_01_28" + +// GetDistributionConfigRequest generates a "aws/request.Request" representing the +// client's request for the GetDistributionConfig operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetDistributionConfig method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetDistributionConfigRequest method. +// req, resp := client.GetDistributionConfigRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudFront) GetDistributionConfigRequest(input *GetDistributionConfigInput) (req *request.Request, output *GetDistributionConfigOutput) { + op := &request.Operation{ + Name: opGetDistributionConfig, + HTTPMethod: "GET", + HTTPPath: "/2016-01-28/distribution/{Id}/config", + } + + if input == nil { + input = &GetDistributionConfigInput{} + } + + req = c.newRequest(op, input, output) + output = &GetDistributionConfigOutput{} + req.Data = output + return +} + +// Get the configuration information about a distribution. +func (c *CloudFront) GetDistributionConfig(input *GetDistributionConfigInput) (*GetDistributionConfigOutput, error) { + req, out := c.GetDistributionConfigRequest(input) + err := req.Send() + return out, err +} + +const opGetInvalidation = "GetInvalidation2016_01_28" + +// GetInvalidationRequest generates a "aws/request.Request" representing the +// client's request for the GetInvalidation operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetInvalidation method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetInvalidationRequest method. +// req, resp := client.GetInvalidationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudFront) GetInvalidationRequest(input *GetInvalidationInput) (req *request.Request, output *GetInvalidationOutput) { + op := &request.Operation{ + Name: opGetInvalidation, + HTTPMethod: "GET", + HTTPPath: "/2016-01-28/distribution/{DistributionId}/invalidation/{Id}", + } + + if input == nil { + input = &GetInvalidationInput{} + } + + req = c.newRequest(op, input, output) + output = &GetInvalidationOutput{} + req.Data = output + return +} + +// Get the information about an invalidation. +func (c *CloudFront) GetInvalidation(input *GetInvalidationInput) (*GetInvalidationOutput, error) { + req, out := c.GetInvalidationRequest(input) + err := req.Send() + return out, err +} + +const opGetStreamingDistribution = "GetStreamingDistribution2016_01_28" + +// GetStreamingDistributionRequest generates a "aws/request.Request" representing the +// client's request for the GetStreamingDistribution operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetStreamingDistribution method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetStreamingDistributionRequest method. +// req, resp := client.GetStreamingDistributionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudFront) GetStreamingDistributionRequest(input *GetStreamingDistributionInput) (req *request.Request, output *GetStreamingDistributionOutput) { + op := &request.Operation{ + Name: opGetStreamingDistribution, + HTTPMethod: "GET", + HTTPPath: "/2016-01-28/streaming-distribution/{Id}", + } + + if input == nil { + input = &GetStreamingDistributionInput{} + } + + req = c.newRequest(op, input, output) + output = &GetStreamingDistributionOutput{} + req.Data = output + return +} + +// Get the information about a streaming distribution. +func (c *CloudFront) GetStreamingDistribution(input *GetStreamingDistributionInput) (*GetStreamingDistributionOutput, error) { + req, out := c.GetStreamingDistributionRequest(input) + err := req.Send() + return out, err +} + +const opGetStreamingDistributionConfig = "GetStreamingDistributionConfig2016_01_28" + +// GetStreamingDistributionConfigRequest generates a "aws/request.Request" representing the +// client's request for the GetStreamingDistributionConfig operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetStreamingDistributionConfig method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetStreamingDistributionConfigRequest method. +// req, resp := client.GetStreamingDistributionConfigRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudFront) GetStreamingDistributionConfigRequest(input *GetStreamingDistributionConfigInput) (req *request.Request, output *GetStreamingDistributionConfigOutput) { + op := &request.Operation{ + Name: opGetStreamingDistributionConfig, + HTTPMethod: "GET", + HTTPPath: "/2016-01-28/streaming-distribution/{Id}/config", + } + + if input == nil { + input = &GetStreamingDistributionConfigInput{} + } + + req = c.newRequest(op, input, output) + output = &GetStreamingDistributionConfigOutput{} + req.Data = output + return +} + +// Get the configuration information about a streaming distribution. +func (c *CloudFront) GetStreamingDistributionConfig(input *GetStreamingDistributionConfigInput) (*GetStreamingDistributionConfigOutput, error) { + req, out := c.GetStreamingDistributionConfigRequest(input) + err := req.Send() + return out, err +} + +const opListCloudFrontOriginAccessIdentities = "ListCloudFrontOriginAccessIdentities2016_01_28" + +// ListCloudFrontOriginAccessIdentitiesRequest generates a "aws/request.Request" representing the +// client's request for the ListCloudFrontOriginAccessIdentities operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListCloudFrontOriginAccessIdentities method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListCloudFrontOriginAccessIdentitiesRequest method. +// req, resp := client.ListCloudFrontOriginAccessIdentitiesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudFront) ListCloudFrontOriginAccessIdentitiesRequest(input *ListCloudFrontOriginAccessIdentitiesInput) (req *request.Request, output *ListCloudFrontOriginAccessIdentitiesOutput) { + op := &request.Operation{ + Name: opListCloudFrontOriginAccessIdentities, + HTTPMethod: "GET", + HTTPPath: "/2016-01-28/origin-access-identity/cloudfront", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"CloudFrontOriginAccessIdentityList.NextMarker"}, + LimitToken: "MaxItems", + TruncationToken: "CloudFrontOriginAccessIdentityList.IsTruncated", + }, + } + + if input == nil { + input = &ListCloudFrontOriginAccessIdentitiesInput{} + } + + req = c.newRequest(op, input, output) + output = &ListCloudFrontOriginAccessIdentitiesOutput{} + req.Data = output + return +} + +// List origin access identities. +func (c *CloudFront) ListCloudFrontOriginAccessIdentities(input *ListCloudFrontOriginAccessIdentitiesInput) (*ListCloudFrontOriginAccessIdentitiesOutput, error) { + req, out := c.ListCloudFrontOriginAccessIdentitiesRequest(input) + err := req.Send() + return out, err +} + +// ListCloudFrontOriginAccessIdentitiesPages iterates over the pages of a ListCloudFrontOriginAccessIdentities operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListCloudFrontOriginAccessIdentities method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListCloudFrontOriginAccessIdentities operation. +// pageNum := 0 +// err := client.ListCloudFrontOriginAccessIdentitiesPages(params, +// func(page *ListCloudFrontOriginAccessIdentitiesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *CloudFront) ListCloudFrontOriginAccessIdentitiesPages(input *ListCloudFrontOriginAccessIdentitiesInput, fn func(p *ListCloudFrontOriginAccessIdentitiesOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListCloudFrontOriginAccessIdentitiesRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListCloudFrontOriginAccessIdentitiesOutput), lastPage) + }) +} + +const opListDistributions = "ListDistributions2016_01_28" + +// ListDistributionsRequest generates a "aws/request.Request" representing the +// client's request for the ListDistributions operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListDistributions method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListDistributionsRequest method. +// req, resp := client.ListDistributionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudFront) ListDistributionsRequest(input *ListDistributionsInput) (req *request.Request, output *ListDistributionsOutput) { + op := &request.Operation{ + Name: opListDistributions, + HTTPMethod: "GET", + HTTPPath: "/2016-01-28/distribution", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"DistributionList.NextMarker"}, + LimitToken: "MaxItems", + TruncationToken: "DistributionList.IsTruncated", + }, + } + + if input == nil { + input = &ListDistributionsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListDistributionsOutput{} + req.Data = output + return +} + +// List distributions. +func (c *CloudFront) ListDistributions(input *ListDistributionsInput) (*ListDistributionsOutput, error) { + req, out := c.ListDistributionsRequest(input) + err := req.Send() + return out, err +} + +// ListDistributionsPages iterates over the pages of a ListDistributions operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListDistributions method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListDistributions operation. +// pageNum := 0 +// err := client.ListDistributionsPages(params, +// func(page *ListDistributionsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *CloudFront) ListDistributionsPages(input *ListDistributionsInput, fn func(p *ListDistributionsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListDistributionsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListDistributionsOutput), lastPage) + }) +} + +const opListDistributionsByWebACLId = "ListDistributionsByWebACLId2016_01_28" + +// ListDistributionsByWebACLIdRequest generates a "aws/request.Request" representing the +// client's request for the ListDistributionsByWebACLId operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListDistributionsByWebACLId method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListDistributionsByWebACLIdRequest method. +// req, resp := client.ListDistributionsByWebACLIdRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudFront) ListDistributionsByWebACLIdRequest(input *ListDistributionsByWebACLIdInput) (req *request.Request, output *ListDistributionsByWebACLIdOutput) { + op := &request.Operation{ + Name: opListDistributionsByWebACLId, + HTTPMethod: "GET", + HTTPPath: "/2016-01-28/distributionsByWebACLId/{WebACLId}", + } + + if input == nil { + input = &ListDistributionsByWebACLIdInput{} + } + + req = c.newRequest(op, input, output) + output = &ListDistributionsByWebACLIdOutput{} + req.Data = output + return +} + +// List the distributions that are associated with a specified AWS WAF web ACL. +func (c *CloudFront) ListDistributionsByWebACLId(input *ListDistributionsByWebACLIdInput) (*ListDistributionsByWebACLIdOutput, error) { + req, out := c.ListDistributionsByWebACLIdRequest(input) + err := req.Send() + return out, err +} + +const opListInvalidations = "ListInvalidations2016_01_28" + +// ListInvalidationsRequest generates a "aws/request.Request" representing the +// client's request for the ListInvalidations operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListInvalidations method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListInvalidationsRequest method. +// req, resp := client.ListInvalidationsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudFront) ListInvalidationsRequest(input *ListInvalidationsInput) (req *request.Request, output *ListInvalidationsOutput) { + op := &request.Operation{ + Name: opListInvalidations, + HTTPMethod: "GET", + HTTPPath: "/2016-01-28/distribution/{DistributionId}/invalidation", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"InvalidationList.NextMarker"}, + LimitToken: "MaxItems", + TruncationToken: "InvalidationList.IsTruncated", + }, + } + + if input == nil { + input = &ListInvalidationsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListInvalidationsOutput{} + req.Data = output + return +} + +// List invalidation batches. +func (c *CloudFront) ListInvalidations(input *ListInvalidationsInput) (*ListInvalidationsOutput, error) { + req, out := c.ListInvalidationsRequest(input) + err := req.Send() + return out, err +} + +// ListInvalidationsPages iterates over the pages of a ListInvalidations operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListInvalidations method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListInvalidations operation. +// pageNum := 0 +// err := client.ListInvalidationsPages(params, +// func(page *ListInvalidationsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *CloudFront) ListInvalidationsPages(input *ListInvalidationsInput, fn func(p *ListInvalidationsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListInvalidationsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListInvalidationsOutput), lastPage) + }) +} + +const opListStreamingDistributions = "ListStreamingDistributions2016_01_28" + +// ListStreamingDistributionsRequest generates a "aws/request.Request" representing the +// client's request for the ListStreamingDistributions operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListStreamingDistributions method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListStreamingDistributionsRequest method. +// req, resp := client.ListStreamingDistributionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudFront) ListStreamingDistributionsRequest(input *ListStreamingDistributionsInput) (req *request.Request, output *ListStreamingDistributionsOutput) { + op := &request.Operation{ + Name: opListStreamingDistributions, + HTTPMethod: "GET", + HTTPPath: "/2016-01-28/streaming-distribution", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"StreamingDistributionList.NextMarker"}, + LimitToken: "MaxItems", + TruncationToken: "StreamingDistributionList.IsTruncated", + }, + } + + if input == nil { + input = &ListStreamingDistributionsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListStreamingDistributionsOutput{} + req.Data = output + return +} + +// List streaming distributions. +func (c *CloudFront) ListStreamingDistributions(input *ListStreamingDistributionsInput) (*ListStreamingDistributionsOutput, error) { + req, out := c.ListStreamingDistributionsRequest(input) + err := req.Send() + return out, err +} + +// ListStreamingDistributionsPages iterates over the pages of a ListStreamingDistributions operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListStreamingDistributions method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListStreamingDistributions operation. +// pageNum := 0 +// err := client.ListStreamingDistributionsPages(params, +// func(page *ListStreamingDistributionsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *CloudFront) ListStreamingDistributionsPages(input *ListStreamingDistributionsInput, fn func(p *ListStreamingDistributionsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListStreamingDistributionsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListStreamingDistributionsOutput), lastPage) + }) +} + +const opUpdateCloudFrontOriginAccessIdentity = "UpdateCloudFrontOriginAccessIdentity2016_01_28" + +// UpdateCloudFrontOriginAccessIdentityRequest generates a "aws/request.Request" representing the +// client's request for the UpdateCloudFrontOriginAccessIdentity operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateCloudFrontOriginAccessIdentity method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateCloudFrontOriginAccessIdentityRequest method. +// req, resp := client.UpdateCloudFrontOriginAccessIdentityRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudFront) UpdateCloudFrontOriginAccessIdentityRequest(input *UpdateCloudFrontOriginAccessIdentityInput) (req *request.Request, output *UpdateCloudFrontOriginAccessIdentityOutput) { + op := &request.Operation{ + Name: opUpdateCloudFrontOriginAccessIdentity, + HTTPMethod: "PUT", + HTTPPath: "/2016-01-28/origin-access-identity/cloudfront/{Id}/config", + } + + if input == nil { + input = &UpdateCloudFrontOriginAccessIdentityInput{} + } + + req = c.newRequest(op, input, output) + output = &UpdateCloudFrontOriginAccessIdentityOutput{} + req.Data = output + return +} + +// Update an origin access identity. +func (c *CloudFront) UpdateCloudFrontOriginAccessIdentity(input *UpdateCloudFrontOriginAccessIdentityInput) (*UpdateCloudFrontOriginAccessIdentityOutput, error) { + req, out := c.UpdateCloudFrontOriginAccessIdentityRequest(input) + err := req.Send() + return out, err +} + +const opUpdateDistribution = "UpdateDistribution2016_01_28" + +// UpdateDistributionRequest generates a "aws/request.Request" representing the +// client's request for the UpdateDistribution operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateDistribution method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateDistributionRequest method. +// req, resp := client.UpdateDistributionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudFront) UpdateDistributionRequest(input *UpdateDistributionInput) (req *request.Request, output *UpdateDistributionOutput) { + op := &request.Operation{ + Name: opUpdateDistribution, + HTTPMethod: "PUT", + HTTPPath: "/2016-01-28/distribution/{Id}/config", + } + + if input == nil { + input = &UpdateDistributionInput{} + } + + req = c.newRequest(op, input, output) + output = &UpdateDistributionOutput{} + req.Data = output + return +} + +// Update a distribution. +func (c *CloudFront) UpdateDistribution(input *UpdateDistributionInput) (*UpdateDistributionOutput, error) { + req, out := c.UpdateDistributionRequest(input) + err := req.Send() + return out, err +} + +const opUpdateStreamingDistribution = "UpdateStreamingDistribution2016_01_28" + +// UpdateStreamingDistributionRequest generates a "aws/request.Request" representing the +// client's request for the UpdateStreamingDistribution operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateStreamingDistribution method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateStreamingDistributionRequest method. +// req, resp := client.UpdateStreamingDistributionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudFront) UpdateStreamingDistributionRequest(input *UpdateStreamingDistributionInput) (req *request.Request, output *UpdateStreamingDistributionOutput) { + op := &request.Operation{ + Name: opUpdateStreamingDistribution, + HTTPMethod: "PUT", + HTTPPath: "/2016-01-28/streaming-distribution/{Id}/config", + } + + if input == nil { + input = &UpdateStreamingDistributionInput{} + } + + req = c.newRequest(op, input, output) + output = &UpdateStreamingDistributionOutput{} + req.Data = output + return +} + +// Update a streaming distribution. +func (c *CloudFront) UpdateStreamingDistribution(input *UpdateStreamingDistributionInput) (*UpdateStreamingDistributionOutput, error) { + req, out := c.UpdateStreamingDistributionRequest(input) + err := req.Send() + return out, err +} + +// A complex type that lists the AWS accounts, if any, that you included in +// the TrustedSigners complex type for the default cache behavior or for any +// of the other cache behaviors for this distribution. These are accounts that +// you want to allow to create signed URLs for private content. +type ActiveTrustedSigners struct { + _ struct{} `type:"structure"` + + // Each active trusted signer. + Enabled *bool `type:"boolean" required:"true"` + + // A complex type that contains one Signer complex type for each unique trusted + // signer that is specified in the TrustedSigners complex type, including trusted + // signers in the default cache behavior and in all of the other cache behaviors. + Items []*Signer `locationNameList:"Signer" type:"list"` + + // The number of unique trusted signers included in all cache behaviors. For + // example, if three cache behaviors all list the same three AWS accounts, the + // value of Quantity for ActiveTrustedSigners will be 3. + Quantity *int64 `type:"integer" required:"true"` +} + +// String returns the string representation +func (s ActiveTrustedSigners) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ActiveTrustedSigners) GoString() string { + return s.String() +} + +// A complex type that contains information about CNAMEs (alternate domain names), +// if any, for this distribution. +type Aliases struct { + _ struct{} `type:"structure"` + + // Optional: A complex type that contains CNAME elements, if any, for this distribution. + // If Quantity is 0, you can omit Items. + Items []*string `locationNameList:"CNAME" type:"list"` + + // The number of CNAMEs, if any, for this distribution. + Quantity *int64 `type:"integer" required:"true"` +} + +// String returns the string representation +func (s Aliases) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Aliases) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Aliases) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Aliases"} + if s.Quantity == nil { + invalidParams.Add(request.NewErrParamRequired("Quantity")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// A complex type that controls which HTTP methods CloudFront processes and +// forwards to your Amazon S3 bucket or your custom origin. There are three +// choices: - CloudFront forwards only GET and HEAD requests. - CloudFront forwards +// only GET, HEAD and OPTIONS requests. - CloudFront forwards GET, HEAD, OPTIONS, +// PUT, PATCH, POST, and DELETE requests. If you pick the third choice, you +// may need to restrict access to your Amazon S3 bucket or to your custom origin +// so users can't perform operations that you don't want them to. For example, +// you may not want users to have permission to delete objects from your origin. +type AllowedMethods struct { + _ struct{} `type:"structure"` + + // A complex type that controls whether CloudFront caches the response to requests + // using the specified HTTP methods. There are two choices: - CloudFront caches + // responses to GET and HEAD requests. - CloudFront caches responses to GET, + // HEAD, and OPTIONS requests. If you pick the second choice for your S3 Origin, + // you may need to forward Access-Control-Request-Method, Access-Control-Request-Headers + // and Origin headers for the responses to be cached correctly. + CachedMethods *CachedMethods `type:"structure"` + + // A complex type that contains the HTTP methods that you want CloudFront to + // process and forward to your origin. + Items []*string `locationNameList:"Method" type:"list" required:"true"` + + // The number of HTTP methods that you want CloudFront to forward to your origin. + // Valid values are 2 (for GET and HEAD requests), 3 (for GET, HEAD and OPTIONS + // requests) and 7 (for GET, HEAD, OPTIONS, PUT, PATCH, POST, and DELETE requests). + Quantity *int64 `type:"integer" required:"true"` +} + +// String returns the string representation +func (s AllowedMethods) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AllowedMethods) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AllowedMethods) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AllowedMethods"} + if s.Items == nil { + invalidParams.Add(request.NewErrParamRequired("Items")) + } + if s.Quantity == nil { + invalidParams.Add(request.NewErrParamRequired("Quantity")) + } + if s.CachedMethods != nil { + if err := s.CachedMethods.Validate(); err != nil { + invalidParams.AddNested("CachedMethods", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// A complex type that describes how CloudFront processes requests. You can +// create up to 10 cache behaviors.You must create at least as many cache behaviors +// (including the default cache behavior) as you have origins if you want CloudFront +// to distribute objects from all of the origins. Each cache behavior specifies +// the one origin from which you want CloudFront to get objects. If you have +// two origins and only the default cache behavior, the default cache behavior +// will cause CloudFront to get objects from one of the origins, but the other +// origin will never be used. If you don't want to specify any cache behaviors, +// include only an empty CacheBehaviors element. Don't include an empty CacheBehavior +// element, or CloudFront returns a MalformedXML error. To delete all cache +// behaviors in an existing distribution, update the distribution configuration +// and include only an empty CacheBehaviors element. To add, change, or remove +// one or more cache behaviors, update the distribution configuration and specify +// all of the cache behaviors that you want to include in the updated distribution. +type CacheBehavior struct { + _ struct{} `type:"structure"` + + // A complex type that controls which HTTP methods CloudFront processes and + // forwards to your Amazon S3 bucket or your custom origin. There are three + // choices: - CloudFront forwards only GET and HEAD requests. - CloudFront forwards + // only GET, HEAD and OPTIONS requests. - CloudFront forwards GET, HEAD, OPTIONS, + // PUT, PATCH, POST, and DELETE requests. If you pick the third choice, you + // may need to restrict access to your Amazon S3 bucket or to your custom origin + // so users can't perform operations that you don't want them to. For example, + // you may not want users to have permission to delete objects from your origin. + AllowedMethods *AllowedMethods `type:"structure"` + + // Whether you want CloudFront to automatically compress content for web requests + // that include Accept-Encoding: gzip in the request header. If so, specify + // true; if not, specify false. CloudFront compresses files larger than 1000 + // bytes and less than 1 megabyte for both Amazon S3 and custom origins. When + // a CloudFront edge location is unusually busy, some files might not be compressed. + // The value of the Content-Type header must be on the list of file types that + // CloudFront will compress. For the current list, see Serving Compressed Content + // (http://docs.aws.amazon.com/console/cloudfront/compressed-content) in the + // Amazon CloudFront Developer Guide. If you configure CloudFront to compress + // content, CloudFront removes the ETag response header from the objects that + // it compresses. The ETag header indicates that the version in a CloudFront + // edge cache is identical to the version on the origin server, but after compression + // the two versions are no longer identical. As a result, for compressed objects, + // CloudFront can't use the ETag header to determine whether an expired object + // in the CloudFront edge cache is still the latest version. + Compress *bool `type:"boolean"` + + // If you don't configure your origin to add a Cache-Control max-age directive + // or an Expires header, DefaultTTL is the default amount of time (in seconds) + // that an object is in a CloudFront cache before CloudFront forwards another + // request to your origin to determine whether the object has been updated. + // The value that you specify applies only when your origin does not add HTTP + // headers such as Cache-Control max-age, Cache-Control s-maxage, and Expires + // to objects. You can specify a value from 0 to 3,153,600,000 seconds (100 + // years). + DefaultTTL *int64 `type:"long"` + + // A complex type that specifies how CloudFront handles query strings, cookies + // and headers. + ForwardedValues *ForwardedValues `type:"structure" required:"true"` + + // The maximum amount of time (in seconds) that an object is in a CloudFront + // cache before CloudFront forwards another request to your origin to determine + // whether the object has been updated. The value that you specify applies only + // when your origin adds HTTP headers such as Cache-Control max-age, Cache-Control + // s-maxage, and Expires to objects. You can specify a value from 0 to 3,153,600,000 + // seconds (100 years). + MaxTTL *int64 `type:"long"` + + // The minimum amount of time that you want objects to stay in CloudFront caches + // before CloudFront queries your origin to see whether the object has been + // updated.You can specify a value from 0 to 3,153,600,000 seconds (100 years). + MinTTL *int64 `type:"long" required:"true"` + + // The pattern (for example, images/*.jpg) that specifies which requests you + // want this cache behavior to apply to. When CloudFront receives an end-user + // request, the requested path is compared with path patterns in the order in + // which cache behaviors are listed in the distribution. The path pattern for + // the default cache behavior is * and cannot be changed. If the request for + // an object does not match the path pattern for any cache behaviors, CloudFront + // applies the behavior in the default cache behavior. + PathPattern *string `type:"string" required:"true"` + + // Indicates whether you want to distribute media files in Microsoft Smooth + // Streaming format using the origin that is associated with this cache behavior. + // If so, specify true; if not, specify false. + SmoothStreaming *bool `type:"boolean"` + + // The value of ID for the origin that you want CloudFront to route requests + // to when a request matches the path pattern either for a cache behavior or + // for the default cache behavior. + TargetOriginId *string `type:"string" required:"true"` + + // A complex type that specifies the AWS accounts, if any, that you want to + // allow to create signed URLs for private content. If you want to require signed + // URLs in requests for objects in the target origin that match the PathPattern + // for this cache behavior, specify true for Enabled, and specify the applicable + // values for Quantity and Items. For more information, go to Using a Signed + // URL to Serve Private Content in the Amazon CloudFront Developer Guide. If + // you don't want to require signed URLs in requests for objects that match + // PathPattern, specify false for Enabled and 0 for Quantity. Omit Items. To + // add, change, or remove one or more trusted signers, change Enabled to true + // (if it's currently false), change Quantity as applicable, and specify all + // of the trusted signers that you want to include in the updated distribution. + TrustedSigners *TrustedSigners `type:"structure" required:"true"` + + // Use this element to specify the protocol that users can use to access the + // files in the origin specified by TargetOriginId when a request matches the + // path pattern in PathPattern. If you want CloudFront to allow end users to + // use any available protocol, specify allow-all. If you want CloudFront to + // require HTTPS, specify https. If you want CloudFront to respond to an HTTP + // request with an HTTP status code of 301 (Moved Permanently) and the HTTPS + // URL, specify redirect-to-https. The viewer then resubmits the request using + // the HTTPS URL. + ViewerProtocolPolicy *string `type:"string" required:"true" enum:"ViewerProtocolPolicy"` +} + +// String returns the string representation +func (s CacheBehavior) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CacheBehavior) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CacheBehavior) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CacheBehavior"} + if s.ForwardedValues == nil { + invalidParams.Add(request.NewErrParamRequired("ForwardedValues")) + } + if s.MinTTL == nil { + invalidParams.Add(request.NewErrParamRequired("MinTTL")) + } + if s.PathPattern == nil { + invalidParams.Add(request.NewErrParamRequired("PathPattern")) + } + if s.TargetOriginId == nil { + invalidParams.Add(request.NewErrParamRequired("TargetOriginId")) + } + if s.TrustedSigners == nil { + invalidParams.Add(request.NewErrParamRequired("TrustedSigners")) + } + if s.ViewerProtocolPolicy == nil { + invalidParams.Add(request.NewErrParamRequired("ViewerProtocolPolicy")) + } + if s.AllowedMethods != nil { + if err := s.AllowedMethods.Validate(); err != nil { + invalidParams.AddNested("AllowedMethods", err.(request.ErrInvalidParams)) + } + } + if s.ForwardedValues != nil { + if err := s.ForwardedValues.Validate(); err != nil { + invalidParams.AddNested("ForwardedValues", err.(request.ErrInvalidParams)) + } + } + if s.TrustedSigners != nil { + if err := s.TrustedSigners.Validate(); err != nil { + invalidParams.AddNested("TrustedSigners", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// A complex type that contains zero or more CacheBehavior elements. +type CacheBehaviors struct { + _ struct{} `type:"structure"` + + // Optional: A complex type that contains cache behaviors for this distribution. + // If Quantity is 0, you can omit Items. + Items []*CacheBehavior `locationNameList:"CacheBehavior" type:"list"` + + // The number of cache behaviors for this distribution. + Quantity *int64 `type:"integer" required:"true"` +} + +// String returns the string representation +func (s CacheBehaviors) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CacheBehaviors) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CacheBehaviors) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CacheBehaviors"} + if s.Quantity == nil { + invalidParams.Add(request.NewErrParamRequired("Quantity")) + } + if s.Items != nil { + for i, v := range s.Items { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Items", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// A complex type that controls whether CloudFront caches the response to requests +// using the specified HTTP methods. There are two choices: - CloudFront caches +// responses to GET and HEAD requests. - CloudFront caches responses to GET, +// HEAD, and OPTIONS requests. If you pick the second choice for your S3 Origin, +// you may need to forward Access-Control-Request-Method, Access-Control-Request-Headers +// and Origin headers for the responses to be cached correctly. +type CachedMethods struct { + _ struct{} `type:"structure"` + + // A complex type that contains the HTTP methods that you want CloudFront to + // cache responses to. + Items []*string `locationNameList:"Method" type:"list" required:"true"` + + // The number of HTTP methods for which you want CloudFront to cache responses. + // Valid values are 2 (for caching responses to GET and HEAD requests) and 3 + // (for caching responses to GET, HEAD, and OPTIONS requests). + Quantity *int64 `type:"integer" required:"true"` +} + +// String returns the string representation +func (s CachedMethods) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CachedMethods) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CachedMethods) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CachedMethods"} + if s.Items == nil { + invalidParams.Add(request.NewErrParamRequired("Items")) + } + if s.Quantity == nil { + invalidParams.Add(request.NewErrParamRequired("Quantity")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// A complex type that specifies the whitelisted cookies, if any, that you want +// CloudFront to forward to your origin that is associated with this cache behavior. +type CookieNames struct { + _ struct{} `type:"structure"` + + // Optional: A complex type that contains whitelisted cookies for this cache + // behavior. If Quantity is 0, you can omit Items. + Items []*string `locationNameList:"Name" type:"list"` + + // The number of whitelisted cookies for this cache behavior. + Quantity *int64 `type:"integer" required:"true"` +} + +// String returns the string representation +func (s CookieNames) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CookieNames) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CookieNames) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CookieNames"} + if s.Quantity == nil { + invalidParams.Add(request.NewErrParamRequired("Quantity")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// A complex type that specifies the cookie preferences associated with this +// cache behavior. +type CookiePreference struct { + _ struct{} `type:"structure"` + + // Use this element to specify whether you want CloudFront to forward cookies + // to the origin that is associated with this cache behavior. You can specify + // all, none or whitelist. If you choose All, CloudFront forwards all cookies + // regardless of how many your application uses. + Forward *string `type:"string" required:"true" enum:"ItemSelection"` + + // A complex type that specifies the whitelisted cookies, if any, that you want + // CloudFront to forward to your origin that is associated with this cache behavior. + WhitelistedNames *CookieNames `type:"structure"` +} + +// String returns the string representation +func (s CookiePreference) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CookiePreference) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CookiePreference) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CookiePreference"} + if s.Forward == nil { + invalidParams.Add(request.NewErrParamRequired("Forward")) + } + if s.WhitelistedNames != nil { + if err := s.WhitelistedNames.Validate(); err != nil { + invalidParams.AddNested("WhitelistedNames", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The request to create a new origin access identity. +type CreateCloudFrontOriginAccessIdentityInput struct { + _ struct{} `type:"structure" payload:"CloudFrontOriginAccessIdentityConfig"` + + // The origin access identity's configuration information. + CloudFrontOriginAccessIdentityConfig *OriginAccessIdentityConfig `locationName:"CloudFrontOriginAccessIdentityConfig" type:"structure" required:"true"` +} + +// String returns the string representation +func (s CreateCloudFrontOriginAccessIdentityInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateCloudFrontOriginAccessIdentityInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateCloudFrontOriginAccessIdentityInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateCloudFrontOriginAccessIdentityInput"} + if s.CloudFrontOriginAccessIdentityConfig == nil { + invalidParams.Add(request.NewErrParamRequired("CloudFrontOriginAccessIdentityConfig")) + } + if s.CloudFrontOriginAccessIdentityConfig != nil { + if err := s.CloudFrontOriginAccessIdentityConfig.Validate(); err != nil { + invalidParams.AddNested("CloudFrontOriginAccessIdentityConfig", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The returned result of the corresponding request. +type CreateCloudFrontOriginAccessIdentityOutput struct { + _ struct{} `type:"structure" payload:"CloudFrontOriginAccessIdentity"` + + // The origin access identity's information. + CloudFrontOriginAccessIdentity *OriginAccessIdentity `type:"structure"` + + // The current version of the origin access identity created. + ETag *string `location:"header" locationName:"ETag" type:"string"` + + // The fully qualified URI of the new origin access identity just created. For + // example: https://cloudfront.amazonaws.com/2010-11-01/origin-access-identity/cloudfront/E74FTE3AJFJ256A. + Location *string `location:"header" locationName:"Location" type:"string"` +} + +// String returns the string representation +func (s CreateCloudFrontOriginAccessIdentityOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateCloudFrontOriginAccessIdentityOutput) GoString() string { + return s.String() +} + +// The request to create a new distribution. +type CreateDistributionInput struct { + _ struct{} `type:"structure" payload:"DistributionConfig"` + + // The distribution's configuration information. + DistributionConfig *DistributionConfig `locationName:"DistributionConfig" type:"structure" required:"true"` +} + +// String returns the string representation +func (s CreateDistributionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDistributionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateDistributionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateDistributionInput"} + if s.DistributionConfig == nil { + invalidParams.Add(request.NewErrParamRequired("DistributionConfig")) + } + if s.DistributionConfig != nil { + if err := s.DistributionConfig.Validate(); err != nil { + invalidParams.AddNested("DistributionConfig", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The returned result of the corresponding request. +type CreateDistributionOutput struct { + _ struct{} `type:"structure" payload:"Distribution"` + + // The distribution's information. + Distribution *Distribution `type:"structure"` + + // The current version of the distribution created. + ETag *string `location:"header" locationName:"ETag" type:"string"` + + // The fully qualified URI of the new distribution resource just created. For + // example: https://cloudfront.amazonaws.com/2010-11-01/distribution/EDFDVBD632BHDS5. + Location *string `location:"header" locationName:"Location" type:"string"` +} + +// String returns the string representation +func (s CreateDistributionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDistributionOutput) GoString() string { + return s.String() +} + +// The request to create an invalidation. +type CreateInvalidationInput struct { + _ struct{} `type:"structure" payload:"InvalidationBatch"` + + // The distribution's id. + DistributionId *string `location:"uri" locationName:"DistributionId" type:"string" required:"true"` + + // The batch information for the invalidation. + InvalidationBatch *InvalidationBatch `locationName:"InvalidationBatch" type:"structure" required:"true"` +} + +// String returns the string representation +func (s CreateInvalidationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateInvalidationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateInvalidationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateInvalidationInput"} + if s.DistributionId == nil { + invalidParams.Add(request.NewErrParamRequired("DistributionId")) + } + if s.InvalidationBatch == nil { + invalidParams.Add(request.NewErrParamRequired("InvalidationBatch")) + } + if s.InvalidationBatch != nil { + if err := s.InvalidationBatch.Validate(); err != nil { + invalidParams.AddNested("InvalidationBatch", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The returned result of the corresponding request. +type CreateInvalidationOutput struct { + _ struct{} `type:"structure" payload:"Invalidation"` + + // The invalidation's information. + Invalidation *Invalidation `type:"structure"` + + // The fully qualified URI of the distribution and invalidation batch request, + // including the Invalidation ID. + Location *string `location:"header" locationName:"Location" type:"string"` +} + +// String returns the string representation +func (s CreateInvalidationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateInvalidationOutput) GoString() string { + return s.String() +} + +// The request to create a new streaming distribution. +type CreateStreamingDistributionInput struct { + _ struct{} `type:"structure" payload:"StreamingDistributionConfig"` + + // The streaming distribution's configuration information. + StreamingDistributionConfig *StreamingDistributionConfig `locationName:"StreamingDistributionConfig" type:"structure" required:"true"` +} + +// String returns the string representation +func (s CreateStreamingDistributionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateStreamingDistributionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateStreamingDistributionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateStreamingDistributionInput"} + if s.StreamingDistributionConfig == nil { + invalidParams.Add(request.NewErrParamRequired("StreamingDistributionConfig")) + } + if s.StreamingDistributionConfig != nil { + if err := s.StreamingDistributionConfig.Validate(); err != nil { + invalidParams.AddNested("StreamingDistributionConfig", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The returned result of the corresponding request. +type CreateStreamingDistributionOutput struct { + _ struct{} `type:"structure" payload:"StreamingDistribution"` + + // The current version of the streaming distribution created. + ETag *string `location:"header" locationName:"ETag" type:"string"` + + // The fully qualified URI of the new streaming distribution resource just created. + // For example: https://cloudfront.amazonaws.com/2010-11-01/streaming-distribution/EGTXBD79H29TRA8. + Location *string `location:"header" locationName:"Location" type:"string"` + + // The streaming distribution's information. + StreamingDistribution *StreamingDistribution `type:"structure"` +} + +// String returns the string representation +func (s CreateStreamingDistributionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateStreamingDistributionOutput) GoString() string { + return s.String() +} + +// A complex type that describes how you'd prefer CloudFront to respond to requests +// that result in either a 4xx or 5xx response. You can control whether a custom +// error page should be displayed, what the desired response code should be +// for this error page and how long should the error response be cached by CloudFront. +// If you don't want to specify any custom error responses, include only an +// empty CustomErrorResponses element. To delete all custom error responses +// in an existing distribution, update the distribution configuration and include +// only an empty CustomErrorResponses element. To add, change, or remove one +// or more custom error responses, update the distribution configuration and +// specify all of the custom error responses that you want to include in the +// updated distribution. +type CustomErrorResponse struct { + _ struct{} `type:"structure"` + + // The minimum amount of time you want HTTP error codes to stay in CloudFront + // caches before CloudFront queries your origin to see whether the object has + // been updated. You can specify a value from 0 to 31,536,000. + ErrorCachingMinTTL *int64 `type:"long"` + + // The 4xx or 5xx HTTP status code that you want to customize. For a list of + // HTTP status codes that you can customize, see CloudFront documentation. + ErrorCode *int64 `type:"integer" required:"true"` + + // The HTTP status code that you want CloudFront to return with the custom error + // page to the viewer. For a list of HTTP status codes that you can replace, + // see CloudFront Documentation. + ResponseCode *string `type:"string"` + + // The path of the custom error page (for example, /custom_404.html). The path + // is relative to the distribution and must begin with a slash (/). If the path + // includes any non-ASCII characters or unsafe characters as defined in RFC + // 1783 (http://www.ietf.org/rfc/rfc1738.txt), URL encode those characters. + // Do not URL encode any other characters in the path, or CloudFront will not + // return the custom error page to the viewer. + ResponsePagePath *string `type:"string"` +} + +// String returns the string representation +func (s CustomErrorResponse) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CustomErrorResponse) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CustomErrorResponse) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CustomErrorResponse"} + if s.ErrorCode == nil { + invalidParams.Add(request.NewErrParamRequired("ErrorCode")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// A complex type that contains zero or more CustomErrorResponse elements. +type CustomErrorResponses struct { + _ struct{} `type:"structure"` + + // Optional: A complex type that contains custom error responses for this distribution. + // If Quantity is 0, you can omit Items. + Items []*CustomErrorResponse `locationNameList:"CustomErrorResponse" type:"list"` + + // The number of custom error responses for this distribution. + Quantity *int64 `type:"integer" required:"true"` +} + +// String returns the string representation +func (s CustomErrorResponses) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CustomErrorResponses) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CustomErrorResponses) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CustomErrorResponses"} + if s.Quantity == nil { + invalidParams.Add(request.NewErrParamRequired("Quantity")) + } + if s.Items != nil { + for i, v := range s.Items { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Items", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// A complex type that contains the list of Custom Headers for each origin. +type CustomHeaders struct { + _ struct{} `type:"structure"` + + // A complex type that contains the custom headers for this Origin. + Items []*OriginCustomHeader `locationNameList:"OriginCustomHeader" type:"list"` + + // The number of custom headers for this origin. + Quantity *int64 `type:"integer" required:"true"` +} + +// String returns the string representation +func (s CustomHeaders) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CustomHeaders) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CustomHeaders) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CustomHeaders"} + if s.Quantity == nil { + invalidParams.Add(request.NewErrParamRequired("Quantity")) + } + if s.Items != nil { + for i, v := range s.Items { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Items", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// A customer origin. +type CustomOriginConfig struct { + _ struct{} `type:"structure"` + + // The HTTP port the custom origin listens on. + HTTPPort *int64 `type:"integer" required:"true"` + + // The HTTPS port the custom origin listens on. + HTTPSPort *int64 `type:"integer" required:"true"` + + // The origin protocol policy to apply to your origin. + OriginProtocolPolicy *string `type:"string" required:"true" enum:"OriginProtocolPolicy"` + + // The SSL/TLS protocols that you want CloudFront to use when communicating + // with your origin over HTTPS. + OriginSslProtocols *OriginSslProtocols `type:"structure"` +} + +// String returns the string representation +func (s CustomOriginConfig) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CustomOriginConfig) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CustomOriginConfig) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CustomOriginConfig"} + if s.HTTPPort == nil { + invalidParams.Add(request.NewErrParamRequired("HTTPPort")) + } + if s.HTTPSPort == nil { + invalidParams.Add(request.NewErrParamRequired("HTTPSPort")) + } + if s.OriginProtocolPolicy == nil { + invalidParams.Add(request.NewErrParamRequired("OriginProtocolPolicy")) + } + if s.OriginSslProtocols != nil { + if err := s.OriginSslProtocols.Validate(); err != nil { + invalidParams.AddNested("OriginSslProtocols", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// A complex type that describes the default cache behavior if you do not specify +// a CacheBehavior element or if files don't match any of the values of PathPattern +// in CacheBehavior elements.You must create exactly one default cache behavior. +type DefaultCacheBehavior struct { + _ struct{} `type:"structure"` + + // A complex type that controls which HTTP methods CloudFront processes and + // forwards to your Amazon S3 bucket or your custom origin. There are three + // choices: - CloudFront forwards only GET and HEAD requests. - CloudFront forwards + // only GET, HEAD and OPTIONS requests. - CloudFront forwards GET, HEAD, OPTIONS, + // PUT, PATCH, POST, and DELETE requests. If you pick the third choice, you + // may need to restrict access to your Amazon S3 bucket or to your custom origin + // so users can't perform operations that you don't want them to. For example, + // you may not want users to have permission to delete objects from your origin. + AllowedMethods *AllowedMethods `type:"structure"` + + // Whether you want CloudFront to automatically compress content for web requests + // that include Accept-Encoding: gzip in the request header. If so, specify + // true; if not, specify false. CloudFront compresses files larger than 1000 + // bytes and less than 1 megabyte for both Amazon S3 and custom origins. When + // a CloudFront edge location is unusually busy, some files might not be compressed. + // The value of the Content-Type header must be on the list of file types that + // CloudFront will compress. For the current list, see Serving Compressed Content + // (http://docs.aws.amazon.com/console/cloudfront/compressed-content) in the + // Amazon CloudFront Developer Guide. If you configure CloudFront to compress + // content, CloudFront removes the ETag response header from the objects that + // it compresses. The ETag header indicates that the version in a CloudFront + // edge cache is identical to the version on the origin server, but after compression + // the two versions are no longer identical. As a result, for compressed objects, + // CloudFront can't use the ETag header to determine whether an expired object + // in the CloudFront edge cache is still the latest version. + Compress *bool `type:"boolean"` + + // If you don't configure your origin to add a Cache-Control max-age directive + // or an Expires header, DefaultTTL is the default amount of time (in seconds) + // that an object is in a CloudFront cache before CloudFront forwards another + // request to your origin to determine whether the object has been updated. + // The value that you specify applies only when your origin does not add HTTP + // headers such as Cache-Control max-age, Cache-Control s-maxage, and Expires + // to objects. You can specify a value from 0 to 3,153,600,000 seconds (100 + // years). + DefaultTTL *int64 `type:"long"` + + // A complex type that specifies how CloudFront handles query strings, cookies + // and headers. + ForwardedValues *ForwardedValues `type:"structure" required:"true"` + + // The maximum amount of time (in seconds) that an object is in a CloudFront + // cache before CloudFront forwards another request to your origin to determine + // whether the object has been updated. The value that you specify applies only + // when your origin adds HTTP headers such as Cache-Control max-age, Cache-Control + // s-maxage, and Expires to objects. You can specify a value from 0 to 3,153,600,000 + // seconds (100 years). + MaxTTL *int64 `type:"long"` + + // The minimum amount of time that you want objects to stay in CloudFront caches + // before CloudFront queries your origin to see whether the object has been + // updated.You can specify a value from 0 to 3,153,600,000 seconds (100 years). + MinTTL *int64 `type:"long" required:"true"` + + // Indicates whether you want to distribute media files in Microsoft Smooth + // Streaming format using the origin that is associated with this cache behavior. + // If so, specify true; if not, specify false. + SmoothStreaming *bool `type:"boolean"` + + // The value of ID for the origin that you want CloudFront to route requests + // to when a request matches the path pattern either for a cache behavior or + // for the default cache behavior. + TargetOriginId *string `type:"string" required:"true"` + + // A complex type that specifies the AWS accounts, if any, that you want to + // allow to create signed URLs for private content. If you want to require signed + // URLs in requests for objects in the target origin that match the PathPattern + // for this cache behavior, specify true for Enabled, and specify the applicable + // values for Quantity and Items. For more information, go to Using a Signed + // URL to Serve Private Content in the Amazon CloudFront Developer Guide. If + // you don't want to require signed URLs in requests for objects that match + // PathPattern, specify false for Enabled and 0 for Quantity. Omit Items. To + // add, change, or remove one or more trusted signers, change Enabled to true + // (if it's currently false), change Quantity as applicable, and specify all + // of the trusted signers that you want to include in the updated distribution. + TrustedSigners *TrustedSigners `type:"structure" required:"true"` + + // Use this element to specify the protocol that users can use to access the + // files in the origin specified by TargetOriginId when a request matches the + // path pattern in PathPattern. If you want CloudFront to allow end users to + // use any available protocol, specify allow-all. If you want CloudFront to + // require HTTPS, specify https. If you want CloudFront to respond to an HTTP + // request with an HTTP status code of 301 (Moved Permanently) and the HTTPS + // URL, specify redirect-to-https. The viewer then resubmits the request using + // the HTTPS URL. + ViewerProtocolPolicy *string `type:"string" required:"true" enum:"ViewerProtocolPolicy"` +} + +// String returns the string representation +func (s DefaultCacheBehavior) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DefaultCacheBehavior) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DefaultCacheBehavior) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DefaultCacheBehavior"} + if s.ForwardedValues == nil { + invalidParams.Add(request.NewErrParamRequired("ForwardedValues")) + } + if s.MinTTL == nil { + invalidParams.Add(request.NewErrParamRequired("MinTTL")) + } + if s.TargetOriginId == nil { + invalidParams.Add(request.NewErrParamRequired("TargetOriginId")) + } + if s.TrustedSigners == nil { + invalidParams.Add(request.NewErrParamRequired("TrustedSigners")) + } + if s.ViewerProtocolPolicy == nil { + invalidParams.Add(request.NewErrParamRequired("ViewerProtocolPolicy")) + } + if s.AllowedMethods != nil { + if err := s.AllowedMethods.Validate(); err != nil { + invalidParams.AddNested("AllowedMethods", err.(request.ErrInvalidParams)) + } + } + if s.ForwardedValues != nil { + if err := s.ForwardedValues.Validate(); err != nil { + invalidParams.AddNested("ForwardedValues", err.(request.ErrInvalidParams)) + } + } + if s.TrustedSigners != nil { + if err := s.TrustedSigners.Validate(); err != nil { + invalidParams.AddNested("TrustedSigners", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The request to delete a origin access identity. +type DeleteCloudFrontOriginAccessIdentityInput struct { + _ struct{} `type:"structure"` + + // The origin access identity's id. + Id *string `location:"uri" locationName:"Id" type:"string" required:"true"` + + // The value of the ETag header you received from a previous GET or PUT request. + // For example: E2QWRUHAPOMQZL. + IfMatch *string `location:"header" locationName:"If-Match" type:"string"` +} + +// String returns the string representation +func (s DeleteCloudFrontOriginAccessIdentityInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteCloudFrontOriginAccessIdentityInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteCloudFrontOriginAccessIdentityInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteCloudFrontOriginAccessIdentityInput"} + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteCloudFrontOriginAccessIdentityOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteCloudFrontOriginAccessIdentityOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteCloudFrontOriginAccessIdentityOutput) GoString() string { + return s.String() +} + +// The request to delete a distribution. +type DeleteDistributionInput struct { + _ struct{} `type:"structure"` + + // The distribution id. + Id *string `location:"uri" locationName:"Id" type:"string" required:"true"` + + // The value of the ETag header you received when you disabled the distribution. + // For example: E2QWRUHAPOMQZL. + IfMatch *string `location:"header" locationName:"If-Match" type:"string"` +} + +// String returns the string representation +func (s DeleteDistributionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteDistributionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteDistributionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteDistributionInput"} + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteDistributionOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteDistributionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteDistributionOutput) GoString() string { + return s.String() +} + +// The request to delete a streaming distribution. +type DeleteStreamingDistributionInput struct { + _ struct{} `type:"structure"` + + // The distribution id. + Id *string `location:"uri" locationName:"Id" type:"string" required:"true"` + + // The value of the ETag header you received when you disabled the streaming + // distribution. For example: E2QWRUHAPOMQZL. + IfMatch *string `location:"header" locationName:"If-Match" type:"string"` +} + +// String returns the string representation +func (s DeleteStreamingDistributionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteStreamingDistributionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteStreamingDistributionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteStreamingDistributionInput"} + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteStreamingDistributionOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteStreamingDistributionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteStreamingDistributionOutput) GoString() string { + return s.String() +} + +// A distribution. +type Distribution struct { + _ struct{} `type:"structure"` + + // CloudFront automatically adds this element to the response only if you've + // set up the distribution to serve private content with signed URLs. The element + // lists the key pair IDs that CloudFront is aware of for each trusted signer. + // The Signer child element lists the AWS account number of the trusted signer + // (or an empty Self element if the signer is you). The Signer element also + // includes the IDs of any active key pairs associated with the trusted signer's + // AWS account. If no KeyPairId element appears for a Signer, that signer can't + // create working signed URLs. + ActiveTrustedSigners *ActiveTrustedSigners `type:"structure" required:"true"` + + // The current configuration information for the distribution. + DistributionConfig *DistributionConfig `type:"structure" required:"true"` + + // The domain name corresponding to the distribution. For example: d604721fxaaqy9.cloudfront.net. + DomainName *string `type:"string" required:"true"` + + // The identifier for the distribution. For example: EDFDVBD632BHDS5. + Id *string `type:"string" required:"true"` + + // The number of invalidation batches currently in progress. + InProgressInvalidationBatches *int64 `type:"integer" required:"true"` + + // The date and time the distribution was last modified. + LastModifiedTime *time.Time `type:"timestamp" timestampFormat:"iso8601" required:"true"` + + // This response element indicates the current status of the distribution. When + // the status is Deployed, the distribution's information is fully propagated + // throughout the Amazon CloudFront system. + Status *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s Distribution) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Distribution) GoString() string { + return s.String() +} + +// A distribution Configuration. +type DistributionConfig struct { + _ struct{} `type:"structure"` + + // A complex type that contains information about CNAMEs (alternate domain names), + // if any, for this distribution. + Aliases *Aliases `type:"structure"` + + // A complex type that contains zero or more CacheBehavior elements. + CacheBehaviors *CacheBehaviors `type:"structure"` + + // A unique number that ensures the request can't be replayed. If the CallerReference + // is new (no matter the content of the DistributionConfig object), a new distribution + // is created. If the CallerReference is a value you already sent in a previous + // request to create a distribution, and the content of the DistributionConfig + // is identical to the original request (ignoring white space), the response + // includes the same information returned to the original request. If the CallerReference + // is a value you already sent in a previous request to create a distribution + // but the content of the DistributionConfig is different from the original + // request, CloudFront returns a DistributionAlreadyExists error. + CallerReference *string `type:"string" required:"true"` + + // Any comments you want to include about the distribution. + Comment *string `type:"string" required:"true"` + + // A complex type that contains zero or more CustomErrorResponse elements. + CustomErrorResponses *CustomErrorResponses `type:"structure"` + + // A complex type that describes the default cache behavior if you do not specify + // a CacheBehavior element or if files don't match any of the values of PathPattern + // in CacheBehavior elements.You must create exactly one default cache behavior. + DefaultCacheBehavior *DefaultCacheBehavior `type:"structure" required:"true"` + + // The object that you want CloudFront to return (for example, index.html) when + // an end user requests the root URL for your distribution (http://www.example.com) + // instead of an object in your distribution (http://www.example.com/index.html). + // Specifying a default root object avoids exposing the contents of your distribution. + // If you don't want to specify a default root object when you create a distribution, + // include an empty DefaultRootObject element. To delete the default root object + // from an existing distribution, update the distribution configuration and + // include an empty DefaultRootObject element. To replace the default root object, + // update the distribution configuration and specify the new object. + DefaultRootObject *string `type:"string"` + + // Whether the distribution is enabled to accept end user requests for content. + Enabled *bool `type:"boolean" required:"true"` + + // A complex type that controls whether access logs are written for the distribution. + Logging *LoggingConfig `type:"structure"` + + // A complex type that contains information about origins for this distribution. + Origins *Origins `type:"structure" required:"true"` + + // A complex type that contains information about price class for this distribution. + PriceClass *string `type:"string" enum:"PriceClass"` + + // A complex type that identifies ways in which you want to restrict distribution + // of your content. + Restrictions *Restrictions `type:"structure"` + + // A complex type that contains information about viewer certificates for this + // distribution. + ViewerCertificate *ViewerCertificate `type:"structure"` + + // (Optional) If you're using AWS WAF to filter CloudFront requests, the Id + // of the AWS WAF web ACL that is associated with the distribution. + WebACLId *string `type:"string"` +} + +// String returns the string representation +func (s DistributionConfig) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DistributionConfig) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DistributionConfig) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DistributionConfig"} + if s.CallerReference == nil { + invalidParams.Add(request.NewErrParamRequired("CallerReference")) + } + if s.Comment == nil { + invalidParams.Add(request.NewErrParamRequired("Comment")) + } + if s.DefaultCacheBehavior == nil { + invalidParams.Add(request.NewErrParamRequired("DefaultCacheBehavior")) + } + if s.Enabled == nil { + invalidParams.Add(request.NewErrParamRequired("Enabled")) + } + if s.Origins == nil { + invalidParams.Add(request.NewErrParamRequired("Origins")) + } + if s.Aliases != nil { + if err := s.Aliases.Validate(); err != nil { + invalidParams.AddNested("Aliases", err.(request.ErrInvalidParams)) + } + } + if s.CacheBehaviors != nil { + if err := s.CacheBehaviors.Validate(); err != nil { + invalidParams.AddNested("CacheBehaviors", err.(request.ErrInvalidParams)) + } + } + if s.CustomErrorResponses != nil { + if err := s.CustomErrorResponses.Validate(); err != nil { + invalidParams.AddNested("CustomErrorResponses", err.(request.ErrInvalidParams)) + } + } + if s.DefaultCacheBehavior != nil { + if err := s.DefaultCacheBehavior.Validate(); err != nil { + invalidParams.AddNested("DefaultCacheBehavior", err.(request.ErrInvalidParams)) + } + } + if s.Logging != nil { + if err := s.Logging.Validate(); err != nil { + invalidParams.AddNested("Logging", err.(request.ErrInvalidParams)) + } + } + if s.Origins != nil { + if err := s.Origins.Validate(); err != nil { + invalidParams.AddNested("Origins", err.(request.ErrInvalidParams)) + } + } + if s.Restrictions != nil { + if err := s.Restrictions.Validate(); err != nil { + invalidParams.AddNested("Restrictions", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// A distribution list. +type DistributionList struct { + _ struct{} `type:"structure"` + + // A flag that indicates whether more distributions remain to be listed. If + // your results were truncated, you can make a follow-up pagination request + // using the Marker request parameter to retrieve more distributions in the + // list. + IsTruncated *bool `type:"boolean" required:"true"` + + // A complex type that contains one DistributionSummary element for each distribution + // that was created by the current AWS account. + Items []*DistributionSummary `locationNameList:"DistributionSummary" type:"list"` + + // The value you provided for the Marker request parameter. + Marker *string `type:"string" required:"true"` + + // The value you provided for the MaxItems request parameter. + MaxItems *int64 `type:"integer" required:"true"` + + // If IsTruncated is true, this element is present and contains the value you + // can use for the Marker request parameter to continue listing your distributions + // where they left off. + NextMarker *string `type:"string"` + + // The number of distributions that were created by the current AWS account. + Quantity *int64 `type:"integer" required:"true"` +} + +// String returns the string representation +func (s DistributionList) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DistributionList) GoString() string { + return s.String() +} + +// A summary of the information for an Amazon CloudFront distribution. +type DistributionSummary struct { + _ struct{} `type:"structure"` + + // A complex type that contains information about CNAMEs (alternate domain names), + // if any, for this distribution. + Aliases *Aliases `type:"structure" required:"true"` + + // A complex type that contains zero or more CacheBehavior elements. + CacheBehaviors *CacheBehaviors `type:"structure" required:"true"` + + // The comment originally specified when this distribution was created. + Comment *string `type:"string" required:"true"` + + // A complex type that contains zero or more CustomErrorResponses elements. + CustomErrorResponses *CustomErrorResponses `type:"structure" required:"true"` + + // A complex type that describes the default cache behavior if you do not specify + // a CacheBehavior element or if files don't match any of the values of PathPattern + // in CacheBehavior elements.You must create exactly one default cache behavior. + DefaultCacheBehavior *DefaultCacheBehavior `type:"structure" required:"true"` + + // The domain name corresponding to the distribution. For example: d604721fxaaqy9.cloudfront.net. + DomainName *string `type:"string" required:"true"` + + // Whether the distribution is enabled to accept end user requests for content. + Enabled *bool `type:"boolean" required:"true"` + + // The identifier for the distribution. For example: EDFDVBD632BHDS5. + Id *string `type:"string" required:"true"` + + // The date and time the distribution was last modified. + LastModifiedTime *time.Time `type:"timestamp" timestampFormat:"iso8601" required:"true"` + + // A complex type that contains information about origins for this distribution. + Origins *Origins `type:"structure" required:"true"` + + PriceClass *string `type:"string" required:"true" enum:"PriceClass"` + + // A complex type that identifies ways in which you want to restrict distribution + // of your content. + Restrictions *Restrictions `type:"structure" required:"true"` + + // This response element indicates the current status of the distribution. When + // the status is Deployed, the distribution's information is fully propagated + // throughout the Amazon CloudFront system. + Status *string `type:"string" required:"true"` + + // A complex type that contains information about viewer certificates for this + // distribution. + ViewerCertificate *ViewerCertificate `type:"structure" required:"true"` + + // The Web ACL Id (if any) associated with the distribution. + WebACLId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DistributionSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DistributionSummary) GoString() string { + return s.String() +} + +// A complex type that specifies how CloudFront handles query strings, cookies +// and headers. +type ForwardedValues struct { + _ struct{} `type:"structure"` + + // A complex type that specifies how CloudFront handles cookies. + Cookies *CookiePreference `type:"structure" required:"true"` + + // A complex type that specifies the Headers, if any, that you want CloudFront + // to vary upon for this cache behavior. + Headers *Headers `type:"structure"` + + // Indicates whether you want CloudFront to forward query strings to the origin + // that is associated with this cache behavior. If so, specify true; if not, + // specify false. + QueryString *bool `type:"boolean" required:"true"` +} + +// String returns the string representation +func (s ForwardedValues) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ForwardedValues) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ForwardedValues) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ForwardedValues"} + if s.Cookies == nil { + invalidParams.Add(request.NewErrParamRequired("Cookies")) + } + if s.QueryString == nil { + invalidParams.Add(request.NewErrParamRequired("QueryString")) + } + if s.Cookies != nil { + if err := s.Cookies.Validate(); err != nil { + invalidParams.AddNested("Cookies", err.(request.ErrInvalidParams)) + } + } + if s.Headers != nil { + if err := s.Headers.Validate(); err != nil { + invalidParams.AddNested("Headers", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// A complex type that controls the countries in which your content is distributed. +// For more information about geo restriction, go to Customizing Error Responses +// in the Amazon CloudFront Developer Guide. CloudFront determines the location +// of your users using MaxMind GeoIP databases. For information about the accuracy +// of these databases, see How accurate are your GeoIP databases? on the MaxMind +// website. +type GeoRestriction struct { + _ struct{} `type:"structure"` + + // A complex type that contains a Location element for each country in which + // you want CloudFront either to distribute your content (whitelist) or not + // distribute your content (blacklist). The Location element is a two-letter, + // uppercase country code for a country that you want to include in your blacklist + // or whitelist. Include one Location element for each country. CloudFront and + // MaxMind both use ISO 3166 country codes. For the current list of countries + // and the corresponding codes, see ISO 3166-1-alpha-2 code on the International + // Organization for Standardization website. You can also refer to the country + // list in the CloudFront console, which includes both country names and codes. + Items []*string `locationNameList:"Location" type:"list"` + + // When geo restriction is enabled, this is the number of countries in your + // whitelist or blacklist. Otherwise, when it is not enabled, Quantity is 0, + // and you can omit Items. + Quantity *int64 `type:"integer" required:"true"` + + // The method that you want to use to restrict distribution of your content + // by country: - none: No geo restriction is enabled, meaning access to content + // is not restricted by client geo location. - blacklist: The Location elements + // specify the countries in which you do not want CloudFront to distribute your + // content. - whitelist: The Location elements specify the countries in which + // you want CloudFront to distribute your content. + RestrictionType *string `type:"string" required:"true" enum:"GeoRestrictionType"` +} + +// String returns the string representation +func (s GeoRestriction) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GeoRestriction) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GeoRestriction) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GeoRestriction"} + if s.Quantity == nil { + invalidParams.Add(request.NewErrParamRequired("Quantity")) + } + if s.RestrictionType == nil { + invalidParams.Add(request.NewErrParamRequired("RestrictionType")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The request to get an origin access identity's configuration. +type GetCloudFrontOriginAccessIdentityConfigInput struct { + _ struct{} `type:"structure"` + + // The identity's id. + Id *string `location:"uri" locationName:"Id" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetCloudFrontOriginAccessIdentityConfigInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetCloudFrontOriginAccessIdentityConfigInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetCloudFrontOriginAccessIdentityConfigInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetCloudFrontOriginAccessIdentityConfigInput"} + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The returned result of the corresponding request. +type GetCloudFrontOriginAccessIdentityConfigOutput struct { + _ struct{} `type:"structure" payload:"CloudFrontOriginAccessIdentityConfig"` + + // The origin access identity's configuration information. + CloudFrontOriginAccessIdentityConfig *OriginAccessIdentityConfig `type:"structure"` + + // The current version of the configuration. For example: E2QWRUHAPOMQZL. + ETag *string `location:"header" locationName:"ETag" type:"string"` +} + +// String returns the string representation +func (s GetCloudFrontOriginAccessIdentityConfigOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetCloudFrontOriginAccessIdentityConfigOutput) GoString() string { + return s.String() +} + +// The request to get an origin access identity's information. +type GetCloudFrontOriginAccessIdentityInput struct { + _ struct{} `type:"structure"` + + // The identity's id. + Id *string `location:"uri" locationName:"Id" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetCloudFrontOriginAccessIdentityInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetCloudFrontOriginAccessIdentityInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetCloudFrontOriginAccessIdentityInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetCloudFrontOriginAccessIdentityInput"} + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The returned result of the corresponding request. +type GetCloudFrontOriginAccessIdentityOutput struct { + _ struct{} `type:"structure" payload:"CloudFrontOriginAccessIdentity"` + + // The origin access identity's information. + CloudFrontOriginAccessIdentity *OriginAccessIdentity `type:"structure"` + + // The current version of the origin access identity's information. For example: + // E2QWRUHAPOMQZL. + ETag *string `location:"header" locationName:"ETag" type:"string"` +} + +// String returns the string representation +func (s GetCloudFrontOriginAccessIdentityOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetCloudFrontOriginAccessIdentityOutput) GoString() string { + return s.String() +} + +// The request to get a distribution configuration. +type GetDistributionConfigInput struct { + _ struct{} `type:"structure"` + + // The distribution's id. + Id *string `location:"uri" locationName:"Id" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetDistributionConfigInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetDistributionConfigInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetDistributionConfigInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetDistributionConfigInput"} + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The returned result of the corresponding request. +type GetDistributionConfigOutput struct { + _ struct{} `type:"structure" payload:"DistributionConfig"` + + // The distribution's configuration information. + DistributionConfig *DistributionConfig `type:"structure"` + + // The current version of the configuration. For example: E2QWRUHAPOMQZL. + ETag *string `location:"header" locationName:"ETag" type:"string"` +} + +// String returns the string representation +func (s GetDistributionConfigOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetDistributionConfigOutput) GoString() string { + return s.String() +} + +// The request to get a distribution's information. +type GetDistributionInput struct { + _ struct{} `type:"structure"` + + // The distribution's id. + Id *string `location:"uri" locationName:"Id" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetDistributionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetDistributionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetDistributionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetDistributionInput"} + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The returned result of the corresponding request. +type GetDistributionOutput struct { + _ struct{} `type:"structure" payload:"Distribution"` + + // The distribution's information. + Distribution *Distribution `type:"structure"` + + // The current version of the distribution's information. For example: E2QWRUHAPOMQZL. + ETag *string `location:"header" locationName:"ETag" type:"string"` +} + +// String returns the string representation +func (s GetDistributionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetDistributionOutput) GoString() string { + return s.String() +} + +// The request to get an invalidation's information. +type GetInvalidationInput struct { + _ struct{} `type:"structure"` + + // The distribution's id. + DistributionId *string `location:"uri" locationName:"DistributionId" type:"string" required:"true"` + + // The invalidation's id. + Id *string `location:"uri" locationName:"Id" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetInvalidationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetInvalidationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetInvalidationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetInvalidationInput"} + if s.DistributionId == nil { + invalidParams.Add(request.NewErrParamRequired("DistributionId")) + } + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The returned result of the corresponding request. +type GetInvalidationOutput struct { + _ struct{} `type:"structure" payload:"Invalidation"` + + // The invalidation's information. + Invalidation *Invalidation `type:"structure"` +} + +// String returns the string representation +func (s GetInvalidationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetInvalidationOutput) GoString() string { + return s.String() +} + +// To request to get a streaming distribution configuration. +type GetStreamingDistributionConfigInput struct { + _ struct{} `type:"structure"` + + // The streaming distribution's id. + Id *string `location:"uri" locationName:"Id" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetStreamingDistributionConfigInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetStreamingDistributionConfigInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetStreamingDistributionConfigInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetStreamingDistributionConfigInput"} + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The returned result of the corresponding request. +type GetStreamingDistributionConfigOutput struct { + _ struct{} `type:"structure" payload:"StreamingDistributionConfig"` + + // The current version of the configuration. For example: E2QWRUHAPOMQZL. + ETag *string `location:"header" locationName:"ETag" type:"string"` + + // The streaming distribution's configuration information. + StreamingDistributionConfig *StreamingDistributionConfig `type:"structure"` +} + +// String returns the string representation +func (s GetStreamingDistributionConfigOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetStreamingDistributionConfigOutput) GoString() string { + return s.String() +} + +// The request to get a streaming distribution's information. +type GetStreamingDistributionInput struct { + _ struct{} `type:"structure"` + + // The streaming distribution's id. + Id *string `location:"uri" locationName:"Id" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetStreamingDistributionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetStreamingDistributionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetStreamingDistributionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetStreamingDistributionInput"} + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The returned result of the corresponding request. +type GetStreamingDistributionOutput struct { + _ struct{} `type:"structure" payload:"StreamingDistribution"` + + // The current version of the streaming distribution's information. For example: + // E2QWRUHAPOMQZL. + ETag *string `location:"header" locationName:"ETag" type:"string"` + + // The streaming distribution's information. + StreamingDistribution *StreamingDistribution `type:"structure"` +} + +// String returns the string representation +func (s GetStreamingDistributionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetStreamingDistributionOutput) GoString() string { + return s.String() +} + +// A complex type that specifies the headers that you want CloudFront to forward +// to the origin for this cache behavior. For the headers that you specify, +// CloudFront also caches separate versions of a given object based on the header +// values in viewer requests; this is known as varying on headers. For example, +// suppose viewer requests for logo.jpg contain a custom Product header that +// has a value of either Acme or Apex, and you configure CloudFront to vary +// on the Product header. CloudFront forwards the Product header to the origin +// and caches the response from the origin once for each header value. +type Headers struct { + _ struct{} `type:"structure"` + + // Optional: A complex type that contains a Name element for each header that + // you want CloudFront to forward to the origin and to vary on for this cache + // behavior. If Quantity is 0, omit Items. + Items []*string `locationNameList:"Name" type:"list"` + + // The number of different headers that you want CloudFront to forward to the + // origin and to vary on for this cache behavior. The maximum number of headers + // that you can specify by name is 10. If you want CloudFront to forward all + // headers to the origin and vary on all of them, specify 1 for Quantity and + // * for Name. If you don't want CloudFront to forward any additional headers + // to the origin or to vary on any headers, specify 0 for Quantity and omit + // Items. + Quantity *int64 `type:"integer" required:"true"` +} + +// String returns the string representation +func (s Headers) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Headers) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Headers) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Headers"} + if s.Quantity == nil { + invalidParams.Add(request.NewErrParamRequired("Quantity")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// An invalidation. +type Invalidation struct { + _ struct{} `type:"structure"` + + // The date and time the invalidation request was first made. + CreateTime *time.Time `type:"timestamp" timestampFormat:"iso8601" required:"true"` + + // The identifier for the invalidation request. For example: IDFDVBD632BHDS5. + Id *string `type:"string" required:"true"` + + // The current invalidation information for the batch request. + InvalidationBatch *InvalidationBatch `type:"structure" required:"true"` + + // The status of the invalidation request. When the invalidation batch is finished, + // the status is Completed. + Status *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s Invalidation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Invalidation) GoString() string { + return s.String() +} + +// An invalidation batch. +type InvalidationBatch struct { + _ struct{} `type:"structure"` + + // A unique name that ensures the request can't be replayed. If the CallerReference + // is new (no matter the content of the Path object), a new distribution is + // created. If the CallerReference is a value you already sent in a previous + // request to create an invalidation batch, and the content of each Path element + // is identical to the original request, the response includes the same information + // returned to the original request. If the CallerReference is a value you already + // sent in a previous request to create a distribution but the content of any + // Path is different from the original request, CloudFront returns an InvalidationBatchAlreadyExists + // error. + CallerReference *string `type:"string" required:"true"` + + // The path of the object to invalidate. The path is relative to the distribution + // and must begin with a slash (/). You must enclose each invalidation object + // with the Path element tags. If the path includes non-ASCII characters or + // unsafe characters as defined in RFC 1783 (http://www.ietf.org/rfc/rfc1738.txt), + // URL encode those characters. Do not URL encode any other characters in the + // path, or CloudFront will not invalidate the old version of the updated object. + Paths *Paths `type:"structure" required:"true"` +} + +// String returns the string representation +func (s InvalidationBatch) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InvalidationBatch) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *InvalidationBatch) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "InvalidationBatch"} + if s.CallerReference == nil { + invalidParams.Add(request.NewErrParamRequired("CallerReference")) + } + if s.Paths == nil { + invalidParams.Add(request.NewErrParamRequired("Paths")) + } + if s.Paths != nil { + if err := s.Paths.Validate(); err != nil { + invalidParams.AddNested("Paths", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// An invalidation list. +type InvalidationList struct { + _ struct{} `type:"structure"` + + // A flag that indicates whether more invalidation batch requests remain to + // be listed. If your results were truncated, you can make a follow-up pagination + // request using the Marker request parameter to retrieve more invalidation + // batches in the list. + IsTruncated *bool `type:"boolean" required:"true"` + + // A complex type that contains one InvalidationSummary element for each invalidation + // batch that was created by the current AWS account. + Items []*InvalidationSummary `locationNameList:"InvalidationSummary" type:"list"` + + // The value you provided for the Marker request parameter. + Marker *string `type:"string" required:"true"` + + // The value you provided for the MaxItems request parameter. + MaxItems *int64 `type:"integer" required:"true"` + + // If IsTruncated is true, this element is present and contains the value you + // can use for the Marker request parameter to continue listing your invalidation + // batches where they left off. + NextMarker *string `type:"string"` + + // The number of invalidation batches that were created by the current AWS account. + Quantity *int64 `type:"integer" required:"true"` +} + +// String returns the string representation +func (s InvalidationList) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InvalidationList) GoString() string { + return s.String() +} + +// Summary of an invalidation request. +type InvalidationSummary struct { + _ struct{} `type:"structure"` + + CreateTime *time.Time `type:"timestamp" timestampFormat:"iso8601" required:"true"` + + // The unique ID for an invalidation request. + Id *string `type:"string" required:"true"` + + // The status of an invalidation request. + Status *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s InvalidationSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InvalidationSummary) GoString() string { + return s.String() +} + +// A complex type that lists the active CloudFront key pairs, if any, that are +// associated with AwsAccountNumber. +type KeyPairIds struct { + _ struct{} `type:"structure"` + + // A complex type that lists the active CloudFront key pairs, if any, that are + // associated with AwsAccountNumber. + Items []*string `locationNameList:"KeyPairId" type:"list"` + + // The number of active CloudFront key pairs for AwsAccountNumber. + Quantity *int64 `type:"integer" required:"true"` +} + +// String returns the string representation +func (s KeyPairIds) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s KeyPairIds) GoString() string { + return s.String() +} + +// The request to list origin access identities. +type ListCloudFrontOriginAccessIdentitiesInput struct { + _ struct{} `type:"structure"` + + // Use this when paginating results to indicate where to begin in your list + // of origin access identities. The results include identities in the list that + // occur after the marker. To get the next page of results, set the Marker to + // the value of the NextMarker from the current page's response (which is also + // the ID of the last identity on that page). + Marker *string `location:"querystring" locationName:"Marker" type:"string"` + + // The maximum number of origin access identities you want in the response body. + MaxItems *int64 `location:"querystring" locationName:"MaxItems" type:"integer"` +} + +// String returns the string representation +func (s ListCloudFrontOriginAccessIdentitiesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListCloudFrontOriginAccessIdentitiesInput) GoString() string { + return s.String() +} + +// The returned result of the corresponding request. +type ListCloudFrontOriginAccessIdentitiesOutput struct { + _ struct{} `type:"structure" payload:"CloudFrontOriginAccessIdentityList"` + + // The CloudFrontOriginAccessIdentityList type. + CloudFrontOriginAccessIdentityList *OriginAccessIdentityList `type:"structure"` +} + +// String returns the string representation +func (s ListCloudFrontOriginAccessIdentitiesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListCloudFrontOriginAccessIdentitiesOutput) GoString() string { + return s.String() +} + +// The request to list distributions that are associated with a specified AWS +// WAF web ACL. +type ListDistributionsByWebACLIdInput struct { + _ struct{} `type:"structure"` + + // Use Marker and MaxItems to control pagination of results. If you have more + // than MaxItems distributions that satisfy the request, the response includes + // a NextMarker element. To get the next page of results, submit another request. + // For the value of Marker, specify the value of NextMarker from the last response. + // (For the first request, omit Marker.) + Marker *string `location:"querystring" locationName:"Marker" type:"string"` + + // The maximum number of distributions that you want CloudFront to return in + // the response body. The maximum and default values are both 100. + MaxItems *int64 `location:"querystring" locationName:"MaxItems" type:"integer"` + + // The Id of the AWS WAF web ACL for which you want to list the associated distributions. + // If you specify "null" for the Id, the request returns a list of the distributions + // that aren't associated with a web ACL. + WebACLId *string `location:"uri" locationName:"WebACLId" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListDistributionsByWebACLIdInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListDistributionsByWebACLIdInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListDistributionsByWebACLIdInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListDistributionsByWebACLIdInput"} + if s.WebACLId == nil { + invalidParams.Add(request.NewErrParamRequired("WebACLId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The response to a request to list the distributions that are associated with +// a specified AWS WAF web ACL. +type ListDistributionsByWebACLIdOutput struct { + _ struct{} `type:"structure" payload:"DistributionList"` + + // The DistributionList type. + DistributionList *DistributionList `type:"structure"` +} + +// String returns the string representation +func (s ListDistributionsByWebACLIdOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListDistributionsByWebACLIdOutput) GoString() string { + return s.String() +} + +// The request to list your distributions. +type ListDistributionsInput struct { + _ struct{} `type:"structure"` + + // Use Marker and MaxItems to control pagination of results. If you have more + // than MaxItems distributions that satisfy the request, the response includes + // a NextMarker element. To get the next page of results, submit another request. + // For the value of Marker, specify the value of NextMarker from the last response. + // (For the first request, omit Marker.) + Marker *string `location:"querystring" locationName:"Marker" type:"string"` + + // The maximum number of distributions that you want CloudFront to return in + // the response body. The maximum and default values are both 100. + MaxItems *int64 `location:"querystring" locationName:"MaxItems" type:"integer"` +} + +// String returns the string representation +func (s ListDistributionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListDistributionsInput) GoString() string { + return s.String() +} + +// The returned result of the corresponding request. +type ListDistributionsOutput struct { + _ struct{} `type:"structure" payload:"DistributionList"` + + // The DistributionList type. + DistributionList *DistributionList `type:"structure"` +} + +// String returns the string representation +func (s ListDistributionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListDistributionsOutput) GoString() string { + return s.String() +} + +// The request to list invalidations. +type ListInvalidationsInput struct { + _ struct{} `type:"structure"` + + // The distribution's id. + DistributionId *string `location:"uri" locationName:"DistributionId" type:"string" required:"true"` + + // Use this parameter when paginating results to indicate where to begin in + // your list of invalidation batches. Because the results are returned in decreasing + // order from most recent to oldest, the most recent results are on the first + // page, the second page will contain earlier results, and so on. To get the + // next page of results, set the Marker to the value of the NextMarker from + // the current page's response. This value is the same as the ID of the last + // invalidation batch on that page. + Marker *string `location:"querystring" locationName:"Marker" type:"string"` + + // The maximum number of invalidation batches you want in the response body. + MaxItems *int64 `location:"querystring" locationName:"MaxItems" type:"integer"` +} + +// String returns the string representation +func (s ListInvalidationsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListInvalidationsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListInvalidationsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListInvalidationsInput"} + if s.DistributionId == nil { + invalidParams.Add(request.NewErrParamRequired("DistributionId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The returned result of the corresponding request. +type ListInvalidationsOutput struct { + _ struct{} `type:"structure" payload:"InvalidationList"` + + // Information about invalidation batches. + InvalidationList *InvalidationList `type:"structure"` +} + +// String returns the string representation +func (s ListInvalidationsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListInvalidationsOutput) GoString() string { + return s.String() +} + +// The request to list your streaming distributions. +type ListStreamingDistributionsInput struct { + _ struct{} `type:"structure"` + + // Use this when paginating results to indicate where to begin in your list + // of streaming distributions. The results include distributions in the list + // that occur after the marker. To get the next page of results, set the Marker + // to the value of the NextMarker from the current page's response (which is + // also the ID of the last distribution on that page). + Marker *string `location:"querystring" locationName:"Marker" type:"string"` + + // The maximum number of streaming distributions you want in the response body. + MaxItems *int64 `location:"querystring" locationName:"MaxItems" type:"integer"` +} + +// String returns the string representation +func (s ListStreamingDistributionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListStreamingDistributionsInput) GoString() string { + return s.String() +} + +// The returned result of the corresponding request. +type ListStreamingDistributionsOutput struct { + _ struct{} `type:"structure" payload:"StreamingDistributionList"` + + // The StreamingDistributionList type. + StreamingDistributionList *StreamingDistributionList `type:"structure"` +} + +// String returns the string representation +func (s ListStreamingDistributionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListStreamingDistributionsOutput) GoString() string { + return s.String() +} + +// A complex type that controls whether access logs are written for the distribution. +type LoggingConfig struct { + _ struct{} `type:"structure"` + + // The Amazon S3 bucket to store the access logs in, for example, myawslogbucket.s3.amazonaws.com. + Bucket *string `type:"string" required:"true"` + + // Specifies whether you want CloudFront to save access logs to an Amazon S3 + // bucket. If you do not want to enable logging when you create a distribution + // or if you want to disable logging for an existing distribution, specify false + // for Enabled, and specify empty Bucket and Prefix elements. If you specify + // false for Enabled but you specify values for Bucket, prefix and IncludeCookies, + // the values are automatically deleted. + Enabled *bool `type:"boolean" required:"true"` + + // Specifies whether you want CloudFront to include cookies in access logs, + // specify true for IncludeCookies. If you choose to include cookies in logs, + // CloudFront logs all cookies regardless of how you configure the cache behaviors + // for this distribution. If you do not want to include cookies when you create + // a distribution or if you want to disable include cookies for an existing + // distribution, specify false for IncludeCookies. + IncludeCookies *bool `type:"boolean" required:"true"` + + // An optional string that you want CloudFront to prefix to the access log filenames + // for this distribution, for example, myprefix/. If you want to enable logging, + // but you do not want to specify a prefix, you still must include an empty + // Prefix element in the Logging element. + Prefix *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s LoggingConfig) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LoggingConfig) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *LoggingConfig) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "LoggingConfig"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Enabled == nil { + invalidParams.Add(request.NewErrParamRequired("Enabled")) + } + if s.IncludeCookies == nil { + invalidParams.Add(request.NewErrParamRequired("IncludeCookies")) + } + if s.Prefix == nil { + invalidParams.Add(request.NewErrParamRequired("Prefix")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// A complex type that describes the Amazon S3 bucket or the HTTP server (for +// example, a web server) from which CloudFront gets your files.You must create +// at least one origin. +type Origin struct { + _ struct{} `type:"structure"` + + // A complex type that contains information about the custom headers associated + // with this Origin. + CustomHeaders *CustomHeaders `type:"structure"` + + // A complex type that contains information about a custom origin. If the origin + // is an Amazon S3 bucket, use the S3OriginConfig element instead. + CustomOriginConfig *CustomOriginConfig `type:"structure"` + + // Amazon S3 origins: The DNS name of the Amazon S3 bucket from which you want + // CloudFront to get objects for this origin, for example, myawsbucket.s3.amazonaws.com. + // Custom origins: The DNS domain name for the HTTP server from which you want + // CloudFront to get objects for this origin, for example, www.example.com. + DomainName *string `type:"string" required:"true"` + + // A unique identifier for the origin. The value of Id must be unique within + // the distribution. You use the value of Id when you create a cache behavior. + // The Id identifies the origin that CloudFront routes a request to when the + // request matches the path pattern for that cache behavior. + Id *string `type:"string" required:"true"` + + // An optional element that causes CloudFront to request your content from a + // directory in your Amazon S3 bucket or your custom origin. When you include + // the OriginPath element, specify the directory name, beginning with a /. CloudFront + // appends the directory name to the value of DomainName. + OriginPath *string `type:"string"` + + // A complex type that contains information about the Amazon S3 origin. If the + // origin is a custom origin, use the CustomOriginConfig element instead. + S3OriginConfig *S3OriginConfig `type:"structure"` +} + +// String returns the string representation +func (s Origin) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Origin) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Origin) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Origin"} + if s.DomainName == nil { + invalidParams.Add(request.NewErrParamRequired("DomainName")) + } + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + if s.CustomHeaders != nil { + if err := s.CustomHeaders.Validate(); err != nil { + invalidParams.AddNested("CustomHeaders", err.(request.ErrInvalidParams)) + } + } + if s.CustomOriginConfig != nil { + if err := s.CustomOriginConfig.Validate(); err != nil { + invalidParams.AddNested("CustomOriginConfig", err.(request.ErrInvalidParams)) + } + } + if s.S3OriginConfig != nil { + if err := s.S3OriginConfig.Validate(); err != nil { + invalidParams.AddNested("S3OriginConfig", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// CloudFront origin access identity. +type OriginAccessIdentity struct { + _ struct{} `type:"structure"` + + // The current configuration information for the identity. + CloudFrontOriginAccessIdentityConfig *OriginAccessIdentityConfig `type:"structure"` + + // The ID for the origin access identity. For example: E74FTE3AJFJ256A. + Id *string `type:"string" required:"true"` + + // The Amazon S3 canonical user ID for the origin access identity, which you + // use when giving the origin access identity read permission to an object in + // Amazon S3. + S3CanonicalUserId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s OriginAccessIdentity) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s OriginAccessIdentity) GoString() string { + return s.String() +} + +// Origin access identity configuration. +type OriginAccessIdentityConfig struct { + _ struct{} `type:"structure"` + + // A unique number that ensures the request can't be replayed. If the CallerReference + // is new (no matter the content of the CloudFrontOriginAccessIdentityConfig + // object), a new origin access identity is created. If the CallerReference + // is a value you already sent in a previous request to create an identity, + // and the content of the CloudFrontOriginAccessIdentityConfig is identical + // to the original request (ignoring white space), the response includes the + // same information returned to the original request. If the CallerReference + // is a value you already sent in a previous request to create an identity but + // the content of the CloudFrontOriginAccessIdentityConfig is different from + // the original request, CloudFront returns a CloudFrontOriginAccessIdentityAlreadyExists + // error. + CallerReference *string `type:"string" required:"true"` + + // Any comments you want to include about the origin access identity. + Comment *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s OriginAccessIdentityConfig) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s OriginAccessIdentityConfig) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *OriginAccessIdentityConfig) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "OriginAccessIdentityConfig"} + if s.CallerReference == nil { + invalidParams.Add(request.NewErrParamRequired("CallerReference")) + } + if s.Comment == nil { + invalidParams.Add(request.NewErrParamRequired("Comment")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The CloudFrontOriginAccessIdentityList type. +type OriginAccessIdentityList struct { + _ struct{} `type:"structure"` + + // A flag that indicates whether more origin access identities remain to be + // listed. If your results were truncated, you can make a follow-up pagination + // request using the Marker request parameter to retrieve more items in the + // list. + IsTruncated *bool `type:"boolean" required:"true"` + + // A complex type that contains one CloudFrontOriginAccessIdentitySummary element + // for each origin access identity that was created by the current AWS account. + Items []*OriginAccessIdentitySummary `locationNameList:"CloudFrontOriginAccessIdentitySummary" type:"list"` + + // The value you provided for the Marker request parameter. + Marker *string `type:"string" required:"true"` + + // The value you provided for the MaxItems request parameter. + MaxItems *int64 `type:"integer" required:"true"` + + // If IsTruncated is true, this element is present and contains the value you + // can use for the Marker request parameter to continue listing your origin + // access identities where they left off. + NextMarker *string `type:"string"` + + // The number of CloudFront origin access identities that were created by the + // current AWS account. + Quantity *int64 `type:"integer" required:"true"` +} + +// String returns the string representation +func (s OriginAccessIdentityList) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s OriginAccessIdentityList) GoString() string { + return s.String() +} + +// Summary of the information about a CloudFront origin access identity. +type OriginAccessIdentitySummary struct { + _ struct{} `type:"structure"` + + // The comment for this origin access identity, as originally specified when + // created. + Comment *string `type:"string" required:"true"` + + // The ID for the origin access identity. For example: E74FTE3AJFJ256A. + Id *string `type:"string" required:"true"` + + // The Amazon S3 canonical user ID for the origin access identity, which you + // use when giving the origin access identity read permission to an object in + // Amazon S3. + S3CanonicalUserId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s OriginAccessIdentitySummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s OriginAccessIdentitySummary) GoString() string { + return s.String() +} + +// A complex type that contains information related to a Header +type OriginCustomHeader struct { + _ struct{} `type:"structure"` + + // The header's name. + HeaderName *string `type:"string" required:"true"` + + // The header's value. + HeaderValue *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s OriginCustomHeader) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s OriginCustomHeader) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *OriginCustomHeader) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "OriginCustomHeader"} + if s.HeaderName == nil { + invalidParams.Add(request.NewErrParamRequired("HeaderName")) + } + if s.HeaderValue == nil { + invalidParams.Add(request.NewErrParamRequired("HeaderValue")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// A complex type that contains the list of SSL/TLS protocols that you want +// CloudFront to use when communicating with your origin over HTTPS. +type OriginSslProtocols struct { + _ struct{} `type:"structure"` + + // A complex type that contains one SslProtocol element for each SSL/TLS protocol + // that you want to allow CloudFront to use when establishing an HTTPS connection + // with this origin. + Items []*string `locationNameList:"SslProtocol" type:"list" required:"true"` + + // The number of SSL/TLS protocols that you want to allow CloudFront to use + // when establishing an HTTPS connection with this origin. + Quantity *int64 `type:"integer" required:"true"` +} + +// String returns the string representation +func (s OriginSslProtocols) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s OriginSslProtocols) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *OriginSslProtocols) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "OriginSslProtocols"} + if s.Items == nil { + invalidParams.Add(request.NewErrParamRequired("Items")) + } + if s.Quantity == nil { + invalidParams.Add(request.NewErrParamRequired("Quantity")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// A complex type that contains information about origins for this distribution. +type Origins struct { + _ struct{} `type:"structure"` + + // A complex type that contains origins for this distribution. + Items []*Origin `locationNameList:"Origin" min:"1" type:"list"` + + // The number of origins for this distribution. + Quantity *int64 `type:"integer" required:"true"` +} + +// String returns the string representation +func (s Origins) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Origins) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Origins) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Origins"} + if s.Items != nil && len(s.Items) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Items", 1)) + } + if s.Quantity == nil { + invalidParams.Add(request.NewErrParamRequired("Quantity")) + } + if s.Items != nil { + for i, v := range s.Items { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Items", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// A complex type that contains information about the objects that you want +// to invalidate. +type Paths struct { + _ struct{} `type:"structure"` + + // A complex type that contains a list of the objects that you want to invalidate. + Items []*string `locationNameList:"Path" type:"list"` + + // The number of objects that you want to invalidate. + Quantity *int64 `type:"integer" required:"true"` +} + +// String returns the string representation +func (s Paths) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Paths) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Paths) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Paths"} + if s.Quantity == nil { + invalidParams.Add(request.NewErrParamRequired("Quantity")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// A complex type that identifies ways in which you want to restrict distribution +// of your content. +type Restrictions struct { + _ struct{} `type:"structure"` + + // A complex type that controls the countries in which your content is distributed. + // For more information about geo restriction, go to Customizing Error Responses + // in the Amazon CloudFront Developer Guide. CloudFront determines the location + // of your users using MaxMind GeoIP databases. For information about the accuracy + // of these databases, see How accurate are your GeoIP databases? on the MaxMind + // website. + GeoRestriction *GeoRestriction `type:"structure" required:"true"` +} + +// String returns the string representation +func (s Restrictions) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Restrictions) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Restrictions) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Restrictions"} + if s.GeoRestriction == nil { + invalidParams.Add(request.NewErrParamRequired("GeoRestriction")) + } + if s.GeoRestriction != nil { + if err := s.GeoRestriction.Validate(); err != nil { + invalidParams.AddNested("GeoRestriction", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// A complex type that contains information about the Amazon S3 bucket from +// which you want CloudFront to get your media files for distribution. +type S3Origin struct { + _ struct{} `type:"structure"` + + // The DNS name of the S3 origin. + DomainName *string `type:"string" required:"true"` + + // Your S3 origin's origin access identity. + OriginAccessIdentity *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s S3Origin) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s S3Origin) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *S3Origin) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "S3Origin"} + if s.DomainName == nil { + invalidParams.Add(request.NewErrParamRequired("DomainName")) + } + if s.OriginAccessIdentity == nil { + invalidParams.Add(request.NewErrParamRequired("OriginAccessIdentity")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// A complex type that contains information about the Amazon S3 origin. If the +// origin is a custom origin, use the CustomOriginConfig element instead. +type S3OriginConfig struct { + _ struct{} `type:"structure"` + + // The CloudFront origin access identity to associate with the origin. Use an + // origin access identity to configure the origin so that end users can only + // access objects in an Amazon S3 bucket through CloudFront. If you want end + // users to be able to access objects using either the CloudFront URL or the + // Amazon S3 URL, specify an empty OriginAccessIdentity element. To delete the + // origin access identity from an existing distribution, update the distribution + // configuration and include an empty OriginAccessIdentity element. To replace + // the origin access identity, update the distribution configuration and specify + // the new origin access identity. Use the format origin-access-identity/cloudfront/Id + // where Id is the value that CloudFront returned in the Id element when you + // created the origin access identity. + OriginAccessIdentity *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s S3OriginConfig) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s S3OriginConfig) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *S3OriginConfig) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "S3OriginConfig"} + if s.OriginAccessIdentity == nil { + invalidParams.Add(request.NewErrParamRequired("OriginAccessIdentity")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// A complex type that lists the AWS accounts that were included in the TrustedSigners +// complex type, as well as their active CloudFront key pair IDs, if any. +type Signer struct { + _ struct{} `type:"structure"` + + // Specifies an AWS account that can create signed URLs. Values: self, which + // indicates that the AWS account that was used to create the distribution can + // created signed URLs, or an AWS account number. Omit the dashes in the account + // number. + AwsAccountNumber *string `type:"string"` + + // A complex type that lists the active CloudFront key pairs, if any, that are + // associated with AwsAccountNumber. + KeyPairIds *KeyPairIds `type:"structure"` +} + +// String returns the string representation +func (s Signer) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Signer) GoString() string { + return s.String() +} + +// A streaming distribution. +type StreamingDistribution struct { + _ struct{} `type:"structure"` + + // CloudFront automatically adds this element to the response only if you've + // set up the distribution to serve private content with signed URLs. The element + // lists the key pair IDs that CloudFront is aware of for each trusted signer. + // The Signer child element lists the AWS account number of the trusted signer + // (or an empty Self element if the signer is you). The Signer element also + // includes the IDs of any active key pairs associated with the trusted signer's + // AWS account. If no KeyPairId element appears for a Signer, that signer can't + // create working signed URLs. + ActiveTrustedSigners *ActiveTrustedSigners `type:"structure" required:"true"` + + // The domain name corresponding to the streaming distribution. For example: + // s5c39gqb8ow64r.cloudfront.net. + DomainName *string `type:"string" required:"true"` + + // The identifier for the streaming distribution. For example: EGTXBD79H29TRA8. + Id *string `type:"string" required:"true"` + + // The date and time the distribution was last modified. + LastModifiedTime *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The current status of the streaming distribution. When the status is Deployed, + // the distribution's information is fully propagated throughout the Amazon + // CloudFront system. + Status *string `type:"string" required:"true"` + + // The current configuration information for the streaming distribution. + StreamingDistributionConfig *StreamingDistributionConfig `type:"structure" required:"true"` +} + +// String returns the string representation +func (s StreamingDistribution) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StreamingDistribution) GoString() string { + return s.String() +} + +// The configuration for the streaming distribution. +type StreamingDistributionConfig struct { + _ struct{} `type:"structure"` + + // A complex type that contains information about CNAMEs (alternate domain names), + // if any, for this streaming distribution. + Aliases *Aliases `type:"structure"` + + // A unique number that ensures the request can't be replayed. If the CallerReference + // is new (no matter the content of the StreamingDistributionConfig object), + // a new streaming distribution is created. If the CallerReference is a value + // you already sent in a previous request to create a streaming distribution, + // and the content of the StreamingDistributionConfig is identical to the original + // request (ignoring white space), the response includes the same information + // returned to the original request. If the CallerReference is a value you already + // sent in a previous request to create a streaming distribution but the content + // of the StreamingDistributionConfig is different from the original request, + // CloudFront returns a DistributionAlreadyExists error. + CallerReference *string `type:"string" required:"true"` + + // Any comments you want to include about the streaming distribution. + Comment *string `type:"string" required:"true"` + + // Whether the streaming distribution is enabled to accept end user requests + // for content. + Enabled *bool `type:"boolean" required:"true"` + + // A complex type that controls whether access logs are written for the streaming + // distribution. + Logging *StreamingLoggingConfig `type:"structure"` + + // A complex type that contains information about price class for this streaming + // distribution. + PriceClass *string `type:"string" enum:"PriceClass"` + + // A complex type that contains information about the Amazon S3 bucket from + // which you want CloudFront to get your media files for distribution. + S3Origin *S3Origin `type:"structure" required:"true"` + + // A complex type that specifies the AWS accounts, if any, that you want to + // allow to create signed URLs for private content. If you want to require signed + // URLs in requests for objects in the target origin that match the PathPattern + // for this cache behavior, specify true for Enabled, and specify the applicable + // values for Quantity and Items. For more information, go to Using a Signed + // URL to Serve Private Content in the Amazon CloudFront Developer Guide. If + // you don't want to require signed URLs in requests for objects that match + // PathPattern, specify false for Enabled and 0 for Quantity. Omit Items. To + // add, change, or remove one or more trusted signers, change Enabled to true + // (if it's currently false), change Quantity as applicable, and specify all + // of the trusted signers that you want to include in the updated distribution. + TrustedSigners *TrustedSigners `type:"structure" required:"true"` +} + +// String returns the string representation +func (s StreamingDistributionConfig) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StreamingDistributionConfig) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *StreamingDistributionConfig) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StreamingDistributionConfig"} + if s.CallerReference == nil { + invalidParams.Add(request.NewErrParamRequired("CallerReference")) + } + if s.Comment == nil { + invalidParams.Add(request.NewErrParamRequired("Comment")) + } + if s.Enabled == nil { + invalidParams.Add(request.NewErrParamRequired("Enabled")) + } + if s.S3Origin == nil { + invalidParams.Add(request.NewErrParamRequired("S3Origin")) + } + if s.TrustedSigners == nil { + invalidParams.Add(request.NewErrParamRequired("TrustedSigners")) + } + if s.Aliases != nil { + if err := s.Aliases.Validate(); err != nil { + invalidParams.AddNested("Aliases", err.(request.ErrInvalidParams)) + } + } + if s.Logging != nil { + if err := s.Logging.Validate(); err != nil { + invalidParams.AddNested("Logging", err.(request.ErrInvalidParams)) + } + } + if s.S3Origin != nil { + if err := s.S3Origin.Validate(); err != nil { + invalidParams.AddNested("S3Origin", err.(request.ErrInvalidParams)) + } + } + if s.TrustedSigners != nil { + if err := s.TrustedSigners.Validate(); err != nil { + invalidParams.AddNested("TrustedSigners", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// A streaming distribution list. +type StreamingDistributionList struct { + _ struct{} `type:"structure"` + + // A flag that indicates whether more streaming distributions remain to be listed. + // If your results were truncated, you can make a follow-up pagination request + // using the Marker request parameter to retrieve more distributions in the + // list. + IsTruncated *bool `type:"boolean" required:"true"` + + // A complex type that contains one StreamingDistributionSummary element for + // each distribution that was created by the current AWS account. + Items []*StreamingDistributionSummary `locationNameList:"StreamingDistributionSummary" type:"list"` + + // The value you provided for the Marker request parameter. + Marker *string `type:"string" required:"true"` + + // The value you provided for the MaxItems request parameter. + MaxItems *int64 `type:"integer" required:"true"` + + // If IsTruncated is true, this element is present and contains the value you + // can use for the Marker request parameter to continue listing your streaming + // distributions where they left off. + NextMarker *string `type:"string"` + + // The number of streaming distributions that were created by the current AWS + // account. + Quantity *int64 `type:"integer" required:"true"` +} + +// String returns the string representation +func (s StreamingDistributionList) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StreamingDistributionList) GoString() string { + return s.String() +} + +// A summary of the information for an Amazon CloudFront streaming distribution. +type StreamingDistributionSummary struct { + _ struct{} `type:"structure"` + + // A complex type that contains information about CNAMEs (alternate domain names), + // if any, for this streaming distribution. + Aliases *Aliases `type:"structure" required:"true"` + + // The comment originally specified when this distribution was created. + Comment *string `type:"string" required:"true"` + + // The domain name corresponding to the distribution. For example: d604721fxaaqy9.cloudfront.net. + DomainName *string `type:"string" required:"true"` + + // Whether the distribution is enabled to accept end user requests for content. + Enabled *bool `type:"boolean" required:"true"` + + // The identifier for the distribution. For example: EDFDVBD632BHDS5. + Id *string `type:"string" required:"true"` + + // The date and time the distribution was last modified. + LastModifiedTime *time.Time `type:"timestamp" timestampFormat:"iso8601" required:"true"` + + PriceClass *string `type:"string" required:"true" enum:"PriceClass"` + + // A complex type that contains information about the Amazon S3 bucket from + // which you want CloudFront to get your media files for distribution. + S3Origin *S3Origin `type:"structure" required:"true"` + + // Indicates the current status of the distribution. When the status is Deployed, + // the distribution's information is fully propagated throughout the Amazon + // CloudFront system. + Status *string `type:"string" required:"true"` + + // A complex type that specifies the AWS accounts, if any, that you want to + // allow to create signed URLs for private content. If you want to require signed + // URLs in requests for objects in the target origin that match the PathPattern + // for this cache behavior, specify true for Enabled, and specify the applicable + // values for Quantity and Items. For more information, go to Using a Signed + // URL to Serve Private Content in the Amazon CloudFront Developer Guide. If + // you don't want to require signed URLs in requests for objects that match + // PathPattern, specify false for Enabled and 0 for Quantity. Omit Items. To + // add, change, or remove one or more trusted signers, change Enabled to true + // (if it's currently false), change Quantity as applicable, and specify all + // of the trusted signers that you want to include in the updated distribution. + TrustedSigners *TrustedSigners `type:"structure" required:"true"` +} + +// String returns the string representation +func (s StreamingDistributionSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StreamingDistributionSummary) GoString() string { + return s.String() +} + +// A complex type that controls whether access logs are written for this streaming +// distribution. +type StreamingLoggingConfig struct { + _ struct{} `type:"structure"` + + // The Amazon S3 bucket to store the access logs in, for example, myawslogbucket.s3.amazonaws.com. + Bucket *string `type:"string" required:"true"` + + // Specifies whether you want CloudFront to save access logs to an Amazon S3 + // bucket. If you do not want to enable logging when you create a streaming + // distribution or if you want to disable logging for an existing streaming + // distribution, specify false for Enabled, and specify empty Bucket and Prefix + // elements. If you specify false for Enabled but you specify values for Bucket + // and Prefix, the values are automatically deleted. + Enabled *bool `type:"boolean" required:"true"` + + // An optional string that you want CloudFront to prefix to the access log filenames + // for this streaming distribution, for example, myprefix/. If you want to enable + // logging, but you do not want to specify a prefix, you still must include + // an empty Prefix element in the Logging element. + Prefix *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s StreamingLoggingConfig) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StreamingLoggingConfig) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *StreamingLoggingConfig) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StreamingLoggingConfig"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Enabled == nil { + invalidParams.Add(request.NewErrParamRequired("Enabled")) + } + if s.Prefix == nil { + invalidParams.Add(request.NewErrParamRequired("Prefix")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// A complex type that specifies the AWS accounts, if any, that you want to +// allow to create signed URLs for private content. If you want to require signed +// URLs in requests for objects in the target origin that match the PathPattern +// for this cache behavior, specify true for Enabled, and specify the applicable +// values for Quantity and Items. For more information, go to Using a Signed +// URL to Serve Private Content in the Amazon CloudFront Developer Guide. If +// you don't want to require signed URLs in requests for objects that match +// PathPattern, specify false for Enabled and 0 for Quantity. Omit Items. To +// add, change, or remove one or more trusted signers, change Enabled to true +// (if it's currently false), change Quantity as applicable, and specify all +// of the trusted signers that you want to include in the updated distribution. +type TrustedSigners struct { + _ struct{} `type:"structure"` + + // Specifies whether you want to require end users to use signed URLs to access + // the files specified by PathPattern and TargetOriginId. + Enabled *bool `type:"boolean" required:"true"` + + // Optional: A complex type that contains trusted signers for this cache behavior. + // If Quantity is 0, you can omit Items. + Items []*string `locationNameList:"AwsAccountNumber" type:"list"` + + // The number of trusted signers for this cache behavior. + Quantity *int64 `type:"integer" required:"true"` +} + +// String returns the string representation +func (s TrustedSigners) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TrustedSigners) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TrustedSigners) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TrustedSigners"} + if s.Enabled == nil { + invalidParams.Add(request.NewErrParamRequired("Enabled")) + } + if s.Quantity == nil { + invalidParams.Add(request.NewErrParamRequired("Quantity")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The request to update an origin access identity. +type UpdateCloudFrontOriginAccessIdentityInput struct { + _ struct{} `type:"structure" payload:"CloudFrontOriginAccessIdentityConfig"` + + // The identity's configuration information. + CloudFrontOriginAccessIdentityConfig *OriginAccessIdentityConfig `locationName:"CloudFrontOriginAccessIdentityConfig" type:"structure" required:"true"` + + // The identity's id. + Id *string `location:"uri" locationName:"Id" type:"string" required:"true"` + + // The value of the ETag header you received when retrieving the identity's + // configuration. For example: E2QWRUHAPOMQZL. + IfMatch *string `location:"header" locationName:"If-Match" type:"string"` +} + +// String returns the string representation +func (s UpdateCloudFrontOriginAccessIdentityInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateCloudFrontOriginAccessIdentityInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateCloudFrontOriginAccessIdentityInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateCloudFrontOriginAccessIdentityInput"} + if s.CloudFrontOriginAccessIdentityConfig == nil { + invalidParams.Add(request.NewErrParamRequired("CloudFrontOriginAccessIdentityConfig")) + } + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + if s.CloudFrontOriginAccessIdentityConfig != nil { + if err := s.CloudFrontOriginAccessIdentityConfig.Validate(); err != nil { + invalidParams.AddNested("CloudFrontOriginAccessIdentityConfig", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The returned result of the corresponding request. +type UpdateCloudFrontOriginAccessIdentityOutput struct { + _ struct{} `type:"structure" payload:"CloudFrontOriginAccessIdentity"` + + // The origin access identity's information. + CloudFrontOriginAccessIdentity *OriginAccessIdentity `type:"structure"` + + // The current version of the configuration. For example: E2QWRUHAPOMQZL. + ETag *string `location:"header" locationName:"ETag" type:"string"` +} + +// String returns the string representation +func (s UpdateCloudFrontOriginAccessIdentityOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateCloudFrontOriginAccessIdentityOutput) GoString() string { + return s.String() +} + +// The request to update a distribution. +type UpdateDistributionInput struct { + _ struct{} `type:"structure" payload:"DistributionConfig"` + + // The distribution's configuration information. + DistributionConfig *DistributionConfig `locationName:"DistributionConfig" type:"structure" required:"true"` + + // The distribution's id. + Id *string `location:"uri" locationName:"Id" type:"string" required:"true"` + + // The value of the ETag header you received when retrieving the distribution's + // configuration. For example: E2QWRUHAPOMQZL. + IfMatch *string `location:"header" locationName:"If-Match" type:"string"` +} + +// String returns the string representation +func (s UpdateDistributionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateDistributionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateDistributionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateDistributionInput"} + if s.DistributionConfig == nil { + invalidParams.Add(request.NewErrParamRequired("DistributionConfig")) + } + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + if s.DistributionConfig != nil { + if err := s.DistributionConfig.Validate(); err != nil { + invalidParams.AddNested("DistributionConfig", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The returned result of the corresponding request. +type UpdateDistributionOutput struct { + _ struct{} `type:"structure" payload:"Distribution"` + + // The distribution's information. + Distribution *Distribution `type:"structure"` + + // The current version of the configuration. For example: E2QWRUHAPOMQZL. + ETag *string `location:"header" locationName:"ETag" type:"string"` +} + +// String returns the string representation +func (s UpdateDistributionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateDistributionOutput) GoString() string { + return s.String() +} + +// The request to update a streaming distribution. +type UpdateStreamingDistributionInput struct { + _ struct{} `type:"structure" payload:"StreamingDistributionConfig"` + + // The streaming distribution's id. + Id *string `location:"uri" locationName:"Id" type:"string" required:"true"` + + // The value of the ETag header you received when retrieving the streaming distribution's + // configuration. For example: E2QWRUHAPOMQZL. + IfMatch *string `location:"header" locationName:"If-Match" type:"string"` + + // The streaming distribution's configuration information. + StreamingDistributionConfig *StreamingDistributionConfig `locationName:"StreamingDistributionConfig" type:"structure" required:"true"` +} + +// String returns the string representation +func (s UpdateStreamingDistributionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateStreamingDistributionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateStreamingDistributionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateStreamingDistributionInput"} + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + if s.StreamingDistributionConfig == nil { + invalidParams.Add(request.NewErrParamRequired("StreamingDistributionConfig")) + } + if s.StreamingDistributionConfig != nil { + if err := s.StreamingDistributionConfig.Validate(); err != nil { + invalidParams.AddNested("StreamingDistributionConfig", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The returned result of the corresponding request. +type UpdateStreamingDistributionOutput struct { + _ struct{} `type:"structure" payload:"StreamingDistribution"` + + // The current version of the configuration. For example: E2QWRUHAPOMQZL. + ETag *string `location:"header" locationName:"ETag" type:"string"` + + // The streaming distribution's information. + StreamingDistribution *StreamingDistribution `type:"structure"` +} + +// String returns the string representation +func (s UpdateStreamingDistributionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateStreamingDistributionOutput) GoString() string { + return s.String() +} + +// A complex type that contains information about viewer certificates for this +// distribution. +type ViewerCertificate struct { + _ struct{} `type:"structure"` + + // If you want viewers to use HTTPS to request your objects and you're using + // an alternate domain name in your object URLs (for example, https://example.com/logo.jpg), + // specify the ACM certificate ARN of the custom viewer certificate for this + // distribution. Specify either this value, IAMCertificateId, or CloudFrontDefaultCertificate. + ACMCertificateArn *string `type:"string"` + + // Note: this field is deprecated. Please use one of [ACMCertificateArn, IAMCertificateId, + // CloudFrontDefaultCertificate]. + Certificate *string `deprecated:"true" type:"string"` + + // Note: this field is deprecated. Please use one of [ACMCertificateArn, IAMCertificateId, + // CloudFrontDefaultCertificate]. + CertificateSource *string `deprecated:"true" type:"string" enum:"CertificateSource"` + + // If you want viewers to use HTTPS to request your objects and you're using + // the CloudFront domain name of your distribution in your object URLs (for + // example, https://d111111abcdef8.cloudfront.net/logo.jpg), set to true. Omit + // this value if you are setting an ACMCertificateArn or IAMCertificateId. + CloudFrontDefaultCertificate *bool `type:"boolean"` + + // If you want viewers to use HTTPS to request your objects and you're using + // an alternate domain name in your object URLs (for example, https://example.com/logo.jpg), + // specify the IAM certificate identifier of the custom viewer certificate for + // this distribution. Specify either this value, ACMCertificateArn, or CloudFrontDefaultCertificate. + IAMCertificateId *string `type:"string"` + + // Specify the minimum version of the SSL protocol that you want CloudFront + // to use, SSLv3 or TLSv1, for HTTPS connections. CloudFront will serve your + // objects only to browsers or devices that support at least the SSL version + // that you specify. The TLSv1 protocol is more secure, so we recommend that + // you specify SSLv3 only if your users are using browsers or devices that don't + // support TLSv1. If you're using a custom certificate (if you specify a value + // for IAMCertificateId) and if you're using dedicated IP (if you specify vip + // for SSLSupportMethod), you can choose SSLv3 or TLSv1 as the MinimumProtocolVersion. + // If you're using a custom certificate (if you specify a value for IAMCertificateId) + // and if you're using SNI (if you specify sni-only for SSLSupportMethod), you + // must specify TLSv1 for MinimumProtocolVersion. + MinimumProtocolVersion *string `type:"string" enum:"MinimumProtocolVersion"` + + // If you specify a value for IAMCertificateId, you must also specify how you + // want CloudFront to serve HTTPS requests. Valid values are vip and sni-only. + // If you specify vip, CloudFront uses dedicated IP addresses for your content + // and can respond to HTTPS requests from any viewer. However, you must request + // permission to use this feature, and you incur additional monthly charges. + // If you specify sni-only, CloudFront can only respond to HTTPS requests from + // viewers that support Server Name Indication (SNI). All modern browsers support + // SNI, but some browsers still in use don't support SNI. Do not specify a value + // for SSLSupportMethod if you specified true for CloudFrontDefaultCertificate. + SSLSupportMethod *string `type:"string" enum:"SSLSupportMethod"` +} + +// String returns the string representation +func (s ViewerCertificate) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ViewerCertificate) GoString() string { + return s.String() +} + +const ( + // @enum CertificateSource + CertificateSourceCloudfront = "cloudfront" + // @enum CertificateSource + CertificateSourceIam = "iam" + // @enum CertificateSource + CertificateSourceAcm = "acm" +) + +const ( + // @enum GeoRestrictionType + GeoRestrictionTypeBlacklist = "blacklist" + // @enum GeoRestrictionType + GeoRestrictionTypeWhitelist = "whitelist" + // @enum GeoRestrictionType + GeoRestrictionTypeNone = "none" +) + +const ( + // @enum ItemSelection + ItemSelectionNone = "none" + // @enum ItemSelection + ItemSelectionWhitelist = "whitelist" + // @enum ItemSelection + ItemSelectionAll = "all" +) + +const ( + // @enum Method + MethodGet = "GET" + // @enum Method + MethodHead = "HEAD" + // @enum Method + MethodPost = "POST" + // @enum Method + MethodPut = "PUT" + // @enum Method + MethodPatch = "PATCH" + // @enum Method + MethodOptions = "OPTIONS" + // @enum Method + MethodDelete = "DELETE" +) + +const ( + // @enum MinimumProtocolVersion + MinimumProtocolVersionSslv3 = "SSLv3" + // @enum MinimumProtocolVersion + MinimumProtocolVersionTlsv1 = "TLSv1" +) + +const ( + // @enum OriginProtocolPolicy + OriginProtocolPolicyHttpOnly = "http-only" + // @enum OriginProtocolPolicy + OriginProtocolPolicyMatchViewer = "match-viewer" + // @enum OriginProtocolPolicy + OriginProtocolPolicyHttpsOnly = "https-only" +) + +const ( + // @enum PriceClass + PriceClassPriceClass100 = "PriceClass_100" + // @enum PriceClass + PriceClassPriceClass200 = "PriceClass_200" + // @enum PriceClass + PriceClassPriceClassAll = "PriceClass_All" +) + +const ( + // @enum SSLSupportMethod + SSLSupportMethodSniOnly = "sni-only" + // @enum SSLSupportMethod + SSLSupportMethodVip = "vip" +) + +const ( + // @enum SslProtocol + SslProtocolSslv3 = "SSLv3" + // @enum SslProtocol + SslProtocolTlsv1 = "TLSv1" + // @enum SslProtocol + SslProtocolTlsv11 = "TLSv1.1" + // @enum SslProtocol + SslProtocolTlsv12 = "TLSv1.2" +) + +const ( + // @enum ViewerProtocolPolicy + ViewerProtocolPolicyAllowAll = "allow-all" + // @enum ViewerProtocolPolicy + ViewerProtocolPolicyHttpsOnly = "https-only" + // @enum ViewerProtocolPolicy + ViewerProtocolPolicyRedirectToHttps = "redirect-to-https" +) diff --git a/vendor/github.com/aws/aws-sdk-go/service/cloudfront/cloudfrontiface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/cloudfront/cloudfrontiface/interface.go new file mode 100644 index 000000000..117091437 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/cloudfront/cloudfrontiface/interface.go @@ -0,0 +1,110 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package cloudfrontiface provides an interface for the Amazon CloudFront. +package cloudfrontiface + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/cloudfront" +) + +// CloudFrontAPI is the interface type for cloudfront.CloudFront. +type CloudFrontAPI interface { + CreateCloudFrontOriginAccessIdentityRequest(*cloudfront.CreateCloudFrontOriginAccessIdentityInput) (*request.Request, *cloudfront.CreateCloudFrontOriginAccessIdentityOutput) + + CreateCloudFrontOriginAccessIdentity(*cloudfront.CreateCloudFrontOriginAccessIdentityInput) (*cloudfront.CreateCloudFrontOriginAccessIdentityOutput, error) + + CreateDistributionRequest(*cloudfront.CreateDistributionInput) (*request.Request, *cloudfront.CreateDistributionOutput) + + CreateDistribution(*cloudfront.CreateDistributionInput) (*cloudfront.CreateDistributionOutput, error) + + CreateInvalidationRequest(*cloudfront.CreateInvalidationInput) (*request.Request, *cloudfront.CreateInvalidationOutput) + + CreateInvalidation(*cloudfront.CreateInvalidationInput) (*cloudfront.CreateInvalidationOutput, error) + + CreateStreamingDistributionRequest(*cloudfront.CreateStreamingDistributionInput) (*request.Request, *cloudfront.CreateStreamingDistributionOutput) + + CreateStreamingDistribution(*cloudfront.CreateStreamingDistributionInput) (*cloudfront.CreateStreamingDistributionOutput, error) + + DeleteCloudFrontOriginAccessIdentityRequest(*cloudfront.DeleteCloudFrontOriginAccessIdentityInput) (*request.Request, *cloudfront.DeleteCloudFrontOriginAccessIdentityOutput) + + DeleteCloudFrontOriginAccessIdentity(*cloudfront.DeleteCloudFrontOriginAccessIdentityInput) (*cloudfront.DeleteCloudFrontOriginAccessIdentityOutput, error) + + DeleteDistributionRequest(*cloudfront.DeleteDistributionInput) (*request.Request, *cloudfront.DeleteDistributionOutput) + + DeleteDistribution(*cloudfront.DeleteDistributionInput) (*cloudfront.DeleteDistributionOutput, error) + + DeleteStreamingDistributionRequest(*cloudfront.DeleteStreamingDistributionInput) (*request.Request, *cloudfront.DeleteStreamingDistributionOutput) + + DeleteStreamingDistribution(*cloudfront.DeleteStreamingDistributionInput) (*cloudfront.DeleteStreamingDistributionOutput, error) + + GetCloudFrontOriginAccessIdentityRequest(*cloudfront.GetCloudFrontOriginAccessIdentityInput) (*request.Request, *cloudfront.GetCloudFrontOriginAccessIdentityOutput) + + GetCloudFrontOriginAccessIdentity(*cloudfront.GetCloudFrontOriginAccessIdentityInput) (*cloudfront.GetCloudFrontOriginAccessIdentityOutput, error) + + GetCloudFrontOriginAccessIdentityConfigRequest(*cloudfront.GetCloudFrontOriginAccessIdentityConfigInput) (*request.Request, *cloudfront.GetCloudFrontOriginAccessIdentityConfigOutput) + + GetCloudFrontOriginAccessIdentityConfig(*cloudfront.GetCloudFrontOriginAccessIdentityConfigInput) (*cloudfront.GetCloudFrontOriginAccessIdentityConfigOutput, error) + + GetDistributionRequest(*cloudfront.GetDistributionInput) (*request.Request, *cloudfront.GetDistributionOutput) + + GetDistribution(*cloudfront.GetDistributionInput) (*cloudfront.GetDistributionOutput, error) + + GetDistributionConfigRequest(*cloudfront.GetDistributionConfigInput) (*request.Request, *cloudfront.GetDistributionConfigOutput) + + GetDistributionConfig(*cloudfront.GetDistributionConfigInput) (*cloudfront.GetDistributionConfigOutput, error) + + GetInvalidationRequest(*cloudfront.GetInvalidationInput) (*request.Request, *cloudfront.GetInvalidationOutput) + + GetInvalidation(*cloudfront.GetInvalidationInput) (*cloudfront.GetInvalidationOutput, error) + + GetStreamingDistributionRequest(*cloudfront.GetStreamingDistributionInput) (*request.Request, *cloudfront.GetStreamingDistributionOutput) + + GetStreamingDistribution(*cloudfront.GetStreamingDistributionInput) (*cloudfront.GetStreamingDistributionOutput, error) + + GetStreamingDistributionConfigRequest(*cloudfront.GetStreamingDistributionConfigInput) (*request.Request, *cloudfront.GetStreamingDistributionConfigOutput) + + GetStreamingDistributionConfig(*cloudfront.GetStreamingDistributionConfigInput) (*cloudfront.GetStreamingDistributionConfigOutput, error) + + ListCloudFrontOriginAccessIdentitiesRequest(*cloudfront.ListCloudFrontOriginAccessIdentitiesInput) (*request.Request, *cloudfront.ListCloudFrontOriginAccessIdentitiesOutput) + + ListCloudFrontOriginAccessIdentities(*cloudfront.ListCloudFrontOriginAccessIdentitiesInput) (*cloudfront.ListCloudFrontOriginAccessIdentitiesOutput, error) + + ListCloudFrontOriginAccessIdentitiesPages(*cloudfront.ListCloudFrontOriginAccessIdentitiesInput, func(*cloudfront.ListCloudFrontOriginAccessIdentitiesOutput, bool) bool) error + + ListDistributionsRequest(*cloudfront.ListDistributionsInput) (*request.Request, *cloudfront.ListDistributionsOutput) + + ListDistributions(*cloudfront.ListDistributionsInput) (*cloudfront.ListDistributionsOutput, error) + + ListDistributionsPages(*cloudfront.ListDistributionsInput, func(*cloudfront.ListDistributionsOutput, bool) bool) error + + ListDistributionsByWebACLIdRequest(*cloudfront.ListDistributionsByWebACLIdInput) (*request.Request, *cloudfront.ListDistributionsByWebACLIdOutput) + + ListDistributionsByWebACLId(*cloudfront.ListDistributionsByWebACLIdInput) (*cloudfront.ListDistributionsByWebACLIdOutput, error) + + ListInvalidationsRequest(*cloudfront.ListInvalidationsInput) (*request.Request, *cloudfront.ListInvalidationsOutput) + + ListInvalidations(*cloudfront.ListInvalidationsInput) (*cloudfront.ListInvalidationsOutput, error) + + ListInvalidationsPages(*cloudfront.ListInvalidationsInput, func(*cloudfront.ListInvalidationsOutput, bool) bool) error + + ListStreamingDistributionsRequest(*cloudfront.ListStreamingDistributionsInput) (*request.Request, *cloudfront.ListStreamingDistributionsOutput) + + ListStreamingDistributions(*cloudfront.ListStreamingDistributionsInput) (*cloudfront.ListStreamingDistributionsOutput, error) + + ListStreamingDistributionsPages(*cloudfront.ListStreamingDistributionsInput, func(*cloudfront.ListStreamingDistributionsOutput, bool) bool) error + + UpdateCloudFrontOriginAccessIdentityRequest(*cloudfront.UpdateCloudFrontOriginAccessIdentityInput) (*request.Request, *cloudfront.UpdateCloudFrontOriginAccessIdentityOutput) + + UpdateCloudFrontOriginAccessIdentity(*cloudfront.UpdateCloudFrontOriginAccessIdentityInput) (*cloudfront.UpdateCloudFrontOriginAccessIdentityOutput, error) + + UpdateDistributionRequest(*cloudfront.UpdateDistributionInput) (*request.Request, *cloudfront.UpdateDistributionOutput) + + UpdateDistribution(*cloudfront.UpdateDistributionInput) (*cloudfront.UpdateDistributionOutput, error) + + UpdateStreamingDistributionRequest(*cloudfront.UpdateStreamingDistributionInput) (*request.Request, *cloudfront.UpdateStreamingDistributionOutput) + + UpdateStreamingDistribution(*cloudfront.UpdateStreamingDistributionInput) (*cloudfront.UpdateStreamingDistributionOutput, error) +} + +var _ CloudFrontAPI = (*cloudfront.CloudFront)(nil) diff --git a/vendor/github.com/aws/aws-sdk-go/service/cloudfront/examples_test.go b/vendor/github.com/aws/aws-sdk-go/service/cloudfront/examples_test.go new file mode 100644 index 000000000..a10507eb5 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/cloudfront/examples_test.go @@ -0,0 +1,917 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package cloudfront_test + +import ( + "bytes" + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/cloudfront" +) + +var _ time.Duration +var _ bytes.Buffer + +func ExampleCloudFront_CreateCloudFrontOriginAccessIdentity() { + svc := cloudfront.New(session.New()) + + params := &cloudfront.CreateCloudFrontOriginAccessIdentityInput{ + CloudFrontOriginAccessIdentityConfig: &cloudfront.OriginAccessIdentityConfig{ // Required + CallerReference: aws.String("string"), // Required + Comment: aws.String("string"), // Required + }, + } + resp, err := svc.CreateCloudFrontOriginAccessIdentity(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudFront_CreateDistribution() { + svc := cloudfront.New(session.New()) + + params := &cloudfront.CreateDistributionInput{ + DistributionConfig: &cloudfront.DistributionConfig{ // Required + CallerReference: aws.String("string"), // Required + Comment: aws.String("string"), // Required + DefaultCacheBehavior: &cloudfront.DefaultCacheBehavior{ // Required + ForwardedValues: &cloudfront.ForwardedValues{ // Required + Cookies: &cloudfront.CookiePreference{ // Required + Forward: aws.String("ItemSelection"), // Required + WhitelistedNames: &cloudfront.CookieNames{ + Quantity: aws.Int64(1), // Required + Items: []*string{ + aws.String("string"), // Required + // More values... + }, + }, + }, + QueryString: aws.Bool(true), // Required + Headers: &cloudfront.Headers{ + Quantity: aws.Int64(1), // Required + Items: []*string{ + aws.String("string"), // Required + // More values... + }, + }, + }, + MinTTL: aws.Int64(1), // Required + TargetOriginId: aws.String("string"), // Required + TrustedSigners: &cloudfront.TrustedSigners{ // Required + Enabled: aws.Bool(true), // Required + Quantity: aws.Int64(1), // Required + Items: []*string{ + aws.String("string"), // Required + // More values... + }, + }, + ViewerProtocolPolicy: aws.String("ViewerProtocolPolicy"), // Required + AllowedMethods: &cloudfront.AllowedMethods{ + Items: []*string{ // Required + aws.String("Method"), // Required + // More values... + }, + Quantity: aws.Int64(1), // Required + CachedMethods: &cloudfront.CachedMethods{ + Items: []*string{ // Required + aws.String("Method"), // Required + // More values... + }, + Quantity: aws.Int64(1), // Required + }, + }, + Compress: aws.Bool(true), + DefaultTTL: aws.Int64(1), + MaxTTL: aws.Int64(1), + SmoothStreaming: aws.Bool(true), + }, + Enabled: aws.Bool(true), // Required + Origins: &cloudfront.Origins{ // Required + Quantity: aws.Int64(1), // Required + Items: []*cloudfront.Origin{ + { // Required + DomainName: aws.String("string"), // Required + Id: aws.String("string"), // Required + CustomHeaders: &cloudfront.CustomHeaders{ + Quantity: aws.Int64(1), // Required + Items: []*cloudfront.OriginCustomHeader{ + { // Required + HeaderName: aws.String("string"), // Required + HeaderValue: aws.String("string"), // Required + }, + // More values... + }, + }, + CustomOriginConfig: &cloudfront.CustomOriginConfig{ + HTTPPort: aws.Int64(1), // Required + HTTPSPort: aws.Int64(1), // Required + OriginProtocolPolicy: aws.String("OriginProtocolPolicy"), // Required + OriginSslProtocols: &cloudfront.OriginSslProtocols{ + Items: []*string{ // Required + aws.String("SslProtocol"), // Required + // More values... + }, + Quantity: aws.Int64(1), // Required + }, + }, + OriginPath: aws.String("string"), + S3OriginConfig: &cloudfront.S3OriginConfig{ + OriginAccessIdentity: aws.String("string"), // Required + }, + }, + // More values... + }, + }, + Aliases: &cloudfront.Aliases{ + Quantity: aws.Int64(1), // Required + Items: []*string{ + aws.String("string"), // Required + // More values... + }, + }, + CacheBehaviors: &cloudfront.CacheBehaviors{ + Quantity: aws.Int64(1), // Required + Items: []*cloudfront.CacheBehavior{ + { // Required + ForwardedValues: &cloudfront.ForwardedValues{ // Required + Cookies: &cloudfront.CookiePreference{ // Required + Forward: aws.String("ItemSelection"), // Required + WhitelistedNames: &cloudfront.CookieNames{ + Quantity: aws.Int64(1), // Required + Items: []*string{ + aws.String("string"), // Required + // More values... + }, + }, + }, + QueryString: aws.Bool(true), // Required + Headers: &cloudfront.Headers{ + Quantity: aws.Int64(1), // Required + Items: []*string{ + aws.String("string"), // Required + // More values... + }, + }, + }, + MinTTL: aws.Int64(1), // Required + PathPattern: aws.String("string"), // Required + TargetOriginId: aws.String("string"), // Required + TrustedSigners: &cloudfront.TrustedSigners{ // Required + Enabled: aws.Bool(true), // Required + Quantity: aws.Int64(1), // Required + Items: []*string{ + aws.String("string"), // Required + // More values... + }, + }, + ViewerProtocolPolicy: aws.String("ViewerProtocolPolicy"), // Required + AllowedMethods: &cloudfront.AllowedMethods{ + Items: []*string{ // Required + aws.String("Method"), // Required + // More values... + }, + Quantity: aws.Int64(1), // Required + CachedMethods: &cloudfront.CachedMethods{ + Items: []*string{ // Required + aws.String("Method"), // Required + // More values... + }, + Quantity: aws.Int64(1), // Required + }, + }, + Compress: aws.Bool(true), + DefaultTTL: aws.Int64(1), + MaxTTL: aws.Int64(1), + SmoothStreaming: aws.Bool(true), + }, + // More values... + }, + }, + CustomErrorResponses: &cloudfront.CustomErrorResponses{ + Quantity: aws.Int64(1), // Required + Items: []*cloudfront.CustomErrorResponse{ + { // Required + ErrorCode: aws.Int64(1), // Required + ErrorCachingMinTTL: aws.Int64(1), + ResponseCode: aws.String("string"), + ResponsePagePath: aws.String("string"), + }, + // More values... + }, + }, + DefaultRootObject: aws.String("string"), + Logging: &cloudfront.LoggingConfig{ + Bucket: aws.String("string"), // Required + Enabled: aws.Bool(true), // Required + IncludeCookies: aws.Bool(true), // Required + Prefix: aws.String("string"), // Required + }, + PriceClass: aws.String("PriceClass"), + Restrictions: &cloudfront.Restrictions{ + GeoRestriction: &cloudfront.GeoRestriction{ // Required + Quantity: aws.Int64(1), // Required + RestrictionType: aws.String("GeoRestrictionType"), // Required + Items: []*string{ + aws.String("string"), // Required + // More values... + }, + }, + }, + ViewerCertificate: &cloudfront.ViewerCertificate{ + ACMCertificateArn: aws.String("string"), + Certificate: aws.String("string"), + CertificateSource: aws.String("CertificateSource"), + CloudFrontDefaultCertificate: aws.Bool(true), + IAMCertificateId: aws.String("string"), + MinimumProtocolVersion: aws.String("MinimumProtocolVersion"), + SSLSupportMethod: aws.String("SSLSupportMethod"), + }, + WebACLId: aws.String("string"), + }, + } + resp, err := svc.CreateDistribution(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudFront_CreateInvalidation() { + svc := cloudfront.New(session.New()) + + params := &cloudfront.CreateInvalidationInput{ + DistributionId: aws.String("string"), // Required + InvalidationBatch: &cloudfront.InvalidationBatch{ // Required + CallerReference: aws.String("string"), // Required + Paths: &cloudfront.Paths{ // Required + Quantity: aws.Int64(1), // Required + Items: []*string{ + aws.String("string"), // Required + // More values... + }, + }, + }, + } + resp, err := svc.CreateInvalidation(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudFront_CreateStreamingDistribution() { + svc := cloudfront.New(session.New()) + + params := &cloudfront.CreateStreamingDistributionInput{ + StreamingDistributionConfig: &cloudfront.StreamingDistributionConfig{ // Required + CallerReference: aws.String("string"), // Required + Comment: aws.String("string"), // Required + Enabled: aws.Bool(true), // Required + S3Origin: &cloudfront.S3Origin{ // Required + DomainName: aws.String("string"), // Required + OriginAccessIdentity: aws.String("string"), // Required + }, + TrustedSigners: &cloudfront.TrustedSigners{ // Required + Enabled: aws.Bool(true), // Required + Quantity: aws.Int64(1), // Required + Items: []*string{ + aws.String("string"), // Required + // More values... + }, + }, + Aliases: &cloudfront.Aliases{ + Quantity: aws.Int64(1), // Required + Items: []*string{ + aws.String("string"), // Required + // More values... + }, + }, + Logging: &cloudfront.StreamingLoggingConfig{ + Bucket: aws.String("string"), // Required + Enabled: aws.Bool(true), // Required + Prefix: aws.String("string"), // Required + }, + PriceClass: aws.String("PriceClass"), + }, + } + resp, err := svc.CreateStreamingDistribution(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudFront_DeleteCloudFrontOriginAccessIdentity() { + svc := cloudfront.New(session.New()) + + params := &cloudfront.DeleteCloudFrontOriginAccessIdentityInput{ + Id: aws.String("string"), // Required + IfMatch: aws.String("string"), + } + resp, err := svc.DeleteCloudFrontOriginAccessIdentity(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudFront_DeleteDistribution() { + svc := cloudfront.New(session.New()) + + params := &cloudfront.DeleteDistributionInput{ + Id: aws.String("string"), // Required + IfMatch: aws.String("string"), + } + resp, err := svc.DeleteDistribution(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudFront_DeleteStreamingDistribution() { + svc := cloudfront.New(session.New()) + + params := &cloudfront.DeleteStreamingDistributionInput{ + Id: aws.String("string"), // Required + IfMatch: aws.String("string"), + } + resp, err := svc.DeleteStreamingDistribution(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudFront_GetCloudFrontOriginAccessIdentity() { + svc := cloudfront.New(session.New()) + + params := &cloudfront.GetCloudFrontOriginAccessIdentityInput{ + Id: aws.String("string"), // Required + } + resp, err := svc.GetCloudFrontOriginAccessIdentity(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudFront_GetCloudFrontOriginAccessIdentityConfig() { + svc := cloudfront.New(session.New()) + + params := &cloudfront.GetCloudFrontOriginAccessIdentityConfigInput{ + Id: aws.String("string"), // Required + } + resp, err := svc.GetCloudFrontOriginAccessIdentityConfig(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudFront_GetDistribution() { + svc := cloudfront.New(session.New()) + + params := &cloudfront.GetDistributionInput{ + Id: aws.String("string"), // Required + } + resp, err := svc.GetDistribution(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudFront_GetDistributionConfig() { + svc := cloudfront.New(session.New()) + + params := &cloudfront.GetDistributionConfigInput{ + Id: aws.String("string"), // Required + } + resp, err := svc.GetDistributionConfig(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudFront_GetInvalidation() { + svc := cloudfront.New(session.New()) + + params := &cloudfront.GetInvalidationInput{ + DistributionId: aws.String("string"), // Required + Id: aws.String("string"), // Required + } + resp, err := svc.GetInvalidation(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudFront_GetStreamingDistribution() { + svc := cloudfront.New(session.New()) + + params := &cloudfront.GetStreamingDistributionInput{ + Id: aws.String("string"), // Required + } + resp, err := svc.GetStreamingDistribution(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudFront_GetStreamingDistributionConfig() { + svc := cloudfront.New(session.New()) + + params := &cloudfront.GetStreamingDistributionConfigInput{ + Id: aws.String("string"), // Required + } + resp, err := svc.GetStreamingDistributionConfig(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudFront_ListCloudFrontOriginAccessIdentities() { + svc := cloudfront.New(session.New()) + + params := &cloudfront.ListCloudFrontOriginAccessIdentitiesInput{ + Marker: aws.String("string"), + MaxItems: aws.Int64(1), + } + resp, err := svc.ListCloudFrontOriginAccessIdentities(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudFront_ListDistributions() { + svc := cloudfront.New(session.New()) + + params := &cloudfront.ListDistributionsInput{ + Marker: aws.String("string"), + MaxItems: aws.Int64(1), + } + resp, err := svc.ListDistributions(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudFront_ListDistributionsByWebACLId() { + svc := cloudfront.New(session.New()) + + params := &cloudfront.ListDistributionsByWebACLIdInput{ + WebACLId: aws.String("string"), // Required + Marker: aws.String("string"), + MaxItems: aws.Int64(1), + } + resp, err := svc.ListDistributionsByWebACLId(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudFront_ListInvalidations() { + svc := cloudfront.New(session.New()) + + params := &cloudfront.ListInvalidationsInput{ + DistributionId: aws.String("string"), // Required + Marker: aws.String("string"), + MaxItems: aws.Int64(1), + } + resp, err := svc.ListInvalidations(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudFront_ListStreamingDistributions() { + svc := cloudfront.New(session.New()) + + params := &cloudfront.ListStreamingDistributionsInput{ + Marker: aws.String("string"), + MaxItems: aws.Int64(1), + } + resp, err := svc.ListStreamingDistributions(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudFront_UpdateCloudFrontOriginAccessIdentity() { + svc := cloudfront.New(session.New()) + + params := &cloudfront.UpdateCloudFrontOriginAccessIdentityInput{ + CloudFrontOriginAccessIdentityConfig: &cloudfront.OriginAccessIdentityConfig{ // Required + CallerReference: aws.String("string"), // Required + Comment: aws.String("string"), // Required + }, + Id: aws.String("string"), // Required + IfMatch: aws.String("string"), + } + resp, err := svc.UpdateCloudFrontOriginAccessIdentity(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudFront_UpdateDistribution() { + svc := cloudfront.New(session.New()) + + params := &cloudfront.UpdateDistributionInput{ + DistributionConfig: &cloudfront.DistributionConfig{ // Required + CallerReference: aws.String("string"), // Required + Comment: aws.String("string"), // Required + DefaultCacheBehavior: &cloudfront.DefaultCacheBehavior{ // Required + ForwardedValues: &cloudfront.ForwardedValues{ // Required + Cookies: &cloudfront.CookiePreference{ // Required + Forward: aws.String("ItemSelection"), // Required + WhitelistedNames: &cloudfront.CookieNames{ + Quantity: aws.Int64(1), // Required + Items: []*string{ + aws.String("string"), // Required + // More values... + }, + }, + }, + QueryString: aws.Bool(true), // Required + Headers: &cloudfront.Headers{ + Quantity: aws.Int64(1), // Required + Items: []*string{ + aws.String("string"), // Required + // More values... + }, + }, + }, + MinTTL: aws.Int64(1), // Required + TargetOriginId: aws.String("string"), // Required + TrustedSigners: &cloudfront.TrustedSigners{ // Required + Enabled: aws.Bool(true), // Required + Quantity: aws.Int64(1), // Required + Items: []*string{ + aws.String("string"), // Required + // More values... + }, + }, + ViewerProtocolPolicy: aws.String("ViewerProtocolPolicy"), // Required + AllowedMethods: &cloudfront.AllowedMethods{ + Items: []*string{ // Required + aws.String("Method"), // Required + // More values... + }, + Quantity: aws.Int64(1), // Required + CachedMethods: &cloudfront.CachedMethods{ + Items: []*string{ // Required + aws.String("Method"), // Required + // More values... + }, + Quantity: aws.Int64(1), // Required + }, + }, + Compress: aws.Bool(true), + DefaultTTL: aws.Int64(1), + MaxTTL: aws.Int64(1), + SmoothStreaming: aws.Bool(true), + }, + Enabled: aws.Bool(true), // Required + Origins: &cloudfront.Origins{ // Required + Quantity: aws.Int64(1), // Required + Items: []*cloudfront.Origin{ + { // Required + DomainName: aws.String("string"), // Required + Id: aws.String("string"), // Required + CustomHeaders: &cloudfront.CustomHeaders{ + Quantity: aws.Int64(1), // Required + Items: []*cloudfront.OriginCustomHeader{ + { // Required + HeaderName: aws.String("string"), // Required + HeaderValue: aws.String("string"), // Required + }, + // More values... + }, + }, + CustomOriginConfig: &cloudfront.CustomOriginConfig{ + HTTPPort: aws.Int64(1), // Required + HTTPSPort: aws.Int64(1), // Required + OriginProtocolPolicy: aws.String("OriginProtocolPolicy"), // Required + OriginSslProtocols: &cloudfront.OriginSslProtocols{ + Items: []*string{ // Required + aws.String("SslProtocol"), // Required + // More values... + }, + Quantity: aws.Int64(1), // Required + }, + }, + OriginPath: aws.String("string"), + S3OriginConfig: &cloudfront.S3OriginConfig{ + OriginAccessIdentity: aws.String("string"), // Required + }, + }, + // More values... + }, + }, + Aliases: &cloudfront.Aliases{ + Quantity: aws.Int64(1), // Required + Items: []*string{ + aws.String("string"), // Required + // More values... + }, + }, + CacheBehaviors: &cloudfront.CacheBehaviors{ + Quantity: aws.Int64(1), // Required + Items: []*cloudfront.CacheBehavior{ + { // Required + ForwardedValues: &cloudfront.ForwardedValues{ // Required + Cookies: &cloudfront.CookiePreference{ // Required + Forward: aws.String("ItemSelection"), // Required + WhitelistedNames: &cloudfront.CookieNames{ + Quantity: aws.Int64(1), // Required + Items: []*string{ + aws.String("string"), // Required + // More values... + }, + }, + }, + QueryString: aws.Bool(true), // Required + Headers: &cloudfront.Headers{ + Quantity: aws.Int64(1), // Required + Items: []*string{ + aws.String("string"), // Required + // More values... + }, + }, + }, + MinTTL: aws.Int64(1), // Required + PathPattern: aws.String("string"), // Required + TargetOriginId: aws.String("string"), // Required + TrustedSigners: &cloudfront.TrustedSigners{ // Required + Enabled: aws.Bool(true), // Required + Quantity: aws.Int64(1), // Required + Items: []*string{ + aws.String("string"), // Required + // More values... + }, + }, + ViewerProtocolPolicy: aws.String("ViewerProtocolPolicy"), // Required + AllowedMethods: &cloudfront.AllowedMethods{ + Items: []*string{ // Required + aws.String("Method"), // Required + // More values... + }, + Quantity: aws.Int64(1), // Required + CachedMethods: &cloudfront.CachedMethods{ + Items: []*string{ // Required + aws.String("Method"), // Required + // More values... + }, + Quantity: aws.Int64(1), // Required + }, + }, + Compress: aws.Bool(true), + DefaultTTL: aws.Int64(1), + MaxTTL: aws.Int64(1), + SmoothStreaming: aws.Bool(true), + }, + // More values... + }, + }, + CustomErrorResponses: &cloudfront.CustomErrorResponses{ + Quantity: aws.Int64(1), // Required + Items: []*cloudfront.CustomErrorResponse{ + { // Required + ErrorCode: aws.Int64(1), // Required + ErrorCachingMinTTL: aws.Int64(1), + ResponseCode: aws.String("string"), + ResponsePagePath: aws.String("string"), + }, + // More values... + }, + }, + DefaultRootObject: aws.String("string"), + Logging: &cloudfront.LoggingConfig{ + Bucket: aws.String("string"), // Required + Enabled: aws.Bool(true), // Required + IncludeCookies: aws.Bool(true), // Required + Prefix: aws.String("string"), // Required + }, + PriceClass: aws.String("PriceClass"), + Restrictions: &cloudfront.Restrictions{ + GeoRestriction: &cloudfront.GeoRestriction{ // Required + Quantity: aws.Int64(1), // Required + RestrictionType: aws.String("GeoRestrictionType"), // Required + Items: []*string{ + aws.String("string"), // Required + // More values... + }, + }, + }, + ViewerCertificate: &cloudfront.ViewerCertificate{ + ACMCertificateArn: aws.String("string"), + Certificate: aws.String("string"), + CertificateSource: aws.String("CertificateSource"), + CloudFrontDefaultCertificate: aws.Bool(true), + IAMCertificateId: aws.String("string"), + MinimumProtocolVersion: aws.String("MinimumProtocolVersion"), + SSLSupportMethod: aws.String("SSLSupportMethod"), + }, + WebACLId: aws.String("string"), + }, + Id: aws.String("string"), // Required + IfMatch: aws.String("string"), + } + resp, err := svc.UpdateDistribution(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudFront_UpdateStreamingDistribution() { + svc := cloudfront.New(session.New()) + + params := &cloudfront.UpdateStreamingDistributionInput{ + Id: aws.String("string"), // Required + StreamingDistributionConfig: &cloudfront.StreamingDistributionConfig{ // Required + CallerReference: aws.String("string"), // Required + Comment: aws.String("string"), // Required + Enabled: aws.Bool(true), // Required + S3Origin: &cloudfront.S3Origin{ // Required + DomainName: aws.String("string"), // Required + OriginAccessIdentity: aws.String("string"), // Required + }, + TrustedSigners: &cloudfront.TrustedSigners{ // Required + Enabled: aws.Bool(true), // Required + Quantity: aws.Int64(1), // Required + Items: []*string{ + aws.String("string"), // Required + // More values... + }, + }, + Aliases: &cloudfront.Aliases{ + Quantity: aws.Int64(1), // Required + Items: []*string{ + aws.String("string"), // Required + // More values... + }, + }, + Logging: &cloudfront.StreamingLoggingConfig{ + Bucket: aws.String("string"), // Required + Enabled: aws.Bool(true), // Required + Prefix: aws.String("string"), // Required + }, + PriceClass: aws.String("PriceClass"), + }, + IfMatch: aws.String("string"), + } + resp, err := svc.UpdateStreamingDistribution(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/cloudfront/service.go b/vendor/github.com/aws/aws-sdk-go/service/cloudfront/service.go new file mode 100644 index 000000000..99d23c72e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/cloudfront/service.go @@ -0,0 +1,86 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package cloudfront + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/private/protocol/restxml" +) + +// CloudFront is a client for CloudFront. +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type CloudFront struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "cloudfront" + +// New creates a new instance of the CloudFront client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a CloudFront client from just a session. +// svc := cloudfront.New(mySession) +// +// // Create a CloudFront client with additional configuration +// svc := cloudfront.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *CloudFront { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *CloudFront { + svc := &CloudFront{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2016-01-28", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(restxml.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restxml.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restxml.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restxml.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a CloudFront operation and runs any +// custom request initialization. +func (c *CloudFront) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/cloudfront/sign/policy.go b/vendor/github.com/aws/aws-sdk-go/service/cloudfront/sign/policy.go new file mode 100644 index 000000000..75263b1b6 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/cloudfront/sign/policy.go @@ -0,0 +1,226 @@ +package sign + +import ( + "bytes" + "crypto" + "crypto/rand" + "crypto/rsa" + "crypto/sha1" + "encoding/base64" + "encoding/json" + "fmt" + "io" + "net/url" + "strings" + "time" + "unicode" +) + +// An AWSEpochTime wraps a time value providing JSON serialization needed for +// AWS Policy epoch time fields. +type AWSEpochTime struct { + time.Time +} + +// NewAWSEpochTime returns a new AWSEpochTime pointer wrapping the Go time provided. +func NewAWSEpochTime(t time.Time) *AWSEpochTime { + return &AWSEpochTime{t} +} + +// MarshalJSON serializes the epoch time as AWS Profile epoch time. +func (t AWSEpochTime) MarshalJSON() ([]byte, error) { + return []byte(fmt.Sprintf(`{"AWS:EpochTime":%d}`, t.UTC().Unix())), nil +} + +// An IPAddress wraps an IPAddress source IP providing JSON serialization information +type IPAddress struct { + SourceIP string `json:"AWS:SourceIp"` +} + +// A Condition defines the restrictions for how a signed URL can be used. +type Condition struct { + // Optional IP address mask the signed URL must be requested from. + IPAddress *IPAddress `json:"IpAddress,omitempty"` + + // Optional date that the signed URL cannot be used until. It is invalid + // to make requests with the signed URL prior to this date. + DateGreaterThan *AWSEpochTime `json:",omitempty"` + + // Required date that the signed URL will expire. A DateLessThan is required + // sign cloud front URLs + DateLessThan *AWSEpochTime `json:",omitempty"` +} + +// A Statement is a collection of conditions for resources +type Statement struct { + // The Web or RTMP resource the URL will be signed for + Resource string + + // The set of conditions for this resource + Condition Condition +} + +// A Policy defines the resources that a signed will be signed for. +// +// See the following page for more information on how policies are constructed. +// http://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/private-content-creating-signed-url-custom-policy.html#private-content-custom-policy-statement +type Policy struct { + // List of resource and condition statements. + // Signed URLs should only provide a single statement. + Statements []Statement `json:"Statement"` +} + +// Override for testing to mock out usage of crypto/rand.Reader +var randReader = rand.Reader + +// Sign will sign a policy using an RSA private key. It will return a base 64 +// encoded signature and policy if no error is encountered. +// +// The signature and policy should be added to the signed URL following the +// guidelines in: +// http://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/private-content-signed-urls.html +func (p *Policy) Sign(privKey *rsa.PrivateKey) (b64Signature, b64Policy []byte, err error) { + if err = p.Validate(); err != nil { + return nil, nil, err + } + + // Build and escape the policy + b64Policy, jsonPolicy, err := encodePolicy(p) + if err != nil { + return nil, nil, err + } + awsEscapeEncoded(b64Policy) + + // Build and escape the signature + b64Signature, err = signEncodedPolicy(randReader, jsonPolicy, privKey) + if err != nil { + return nil, nil, err + } + awsEscapeEncoded(b64Signature) + + return b64Signature, b64Policy, nil +} + +// Validate verifies that the policy is valid and usable, and returns an +// error if there is a problem. +func (p *Policy) Validate() error { + if len(p.Statements) == 0 { + return fmt.Errorf("at least one policy statement is required") + } + for i, s := range p.Statements { + if s.Resource == "" { + return fmt.Errorf("statement at index %d does not have a resource", i) + } + if !isASCII(s.Resource) { + return fmt.Errorf("unable to sign resource, [%s]. "+ + "Resources must only contain ascii characters. "+ + "Hostnames with unicode should be encoded as Punycode, (e.g. golang.org/x/net/idna), "+ + "and URL unicode path/query characters should be escaped.", s.Resource) + } + } + + return nil +} + +// CreateResource constructs, validates, and returns a resource URL string. An +// error will be returned if unable to create the resource string. +func CreateResource(scheme, u string) (string, error) { + scheme = strings.ToLower(scheme) + + if scheme == "http" || scheme == "https" || scheme == "http*" || scheme == "*" { + return u, nil + } + + if scheme == "rtmp" { + parsed, err := url.Parse(u) + if err != nil { + return "", fmt.Errorf("unable to parse rtmp URL, err: %s", err) + } + + rtmpURL := strings.TrimLeft(parsed.Path, "/") + if parsed.RawQuery != "" { + rtmpURL = fmt.Sprintf("%s?%s", rtmpURL, parsed.RawQuery) + } + + return rtmpURL, nil + } + + return "", fmt.Errorf("invalid URL scheme must be http, https, or rtmp. Provided: %s", scheme) +} + +// NewCannedPolicy returns a new Canned Policy constructed using the resource +// and expires time. This can be used to generate the basic model for a Policy +// that can be then augmented with additional conditions. +// +// See the following page for more information on how policies are constructed. +// http://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/private-content-creating-signed-url-custom-policy.html#private-content-custom-policy-statement +func NewCannedPolicy(resource string, expires time.Time) *Policy { + return &Policy{ + Statements: []Statement{ + { + Resource: resource, + Condition: Condition{ + DateLessThan: NewAWSEpochTime(expires), + }, + }, + }, + } +} + +// encodePolicy encodes the Policy as JSON and also base 64 encodes it. +func encodePolicy(p *Policy) (b64Policy, jsonPolicy []byte, err error) { + jsonPolicy, err = json.Marshal(p) + if err != nil { + return nil, nil, fmt.Errorf("failed to encode policy, %s", err.Error()) + } + + // Remove leading and trailing white space, JSON encoding will note include + // whitespace within the encoding. + jsonPolicy = bytes.TrimSpace(jsonPolicy) + + b64Policy = make([]byte, base64.StdEncoding.EncodedLen(len(jsonPolicy))) + base64.StdEncoding.Encode(b64Policy, jsonPolicy) + return b64Policy, jsonPolicy, nil +} + +// signEncodedPolicy will sign and base 64 encode the JSON encoded policy. +func signEncodedPolicy(randReader io.Reader, jsonPolicy []byte, privKey *rsa.PrivateKey) ([]byte, error) { + hash := sha1.New() + if _, err := bytes.NewReader(jsonPolicy).WriteTo(hash); err != nil { + return nil, fmt.Errorf("failed to calculate signing hash, %s", err.Error()) + } + + sig, err := rsa.SignPKCS1v15(randReader, privKey, crypto.SHA1, hash.Sum(nil)) + if err != nil { + return nil, fmt.Errorf("failed to sign policy, %s", err.Error()) + } + + b64Sig := make([]byte, base64.StdEncoding.EncodedLen(len(sig))) + base64.StdEncoding.Encode(b64Sig, sig) + return b64Sig, nil +} + +// special characters to be replaced with awsEscapeEncoded +var invalidEncodedChar = map[byte]byte{ + '+': '-', + '=': '_', + '/': '~', +} + +// awsEscapeEncoded will replace base64 encoding's special characters to be URL safe. +func awsEscapeEncoded(b []byte) { + for i, v := range b { + if r, ok := invalidEncodedChar[v]; ok { + b[i] = r + } + } +} + +func isASCII(u string) bool { + for _, c := range u { + if c > unicode.MaxASCII { + return false + } + } + return true +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/cloudfront/sign/policy_test.go b/vendor/github.com/aws/aws-sdk-go/service/cloudfront/sign/policy_test.go new file mode 100644 index 000000000..7f335aafc --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/cloudfront/sign/policy_test.go @@ -0,0 +1,139 @@ +package sign + +import ( + "bytes" + "crypto" + "crypto/rsa" + "crypto/sha1" + "encoding/base64" + "fmt" + "math/rand" + "strings" + "testing" + "time" +) + +func TestEpochTimeMarshal(t *testing.T) { + v := AWSEpochTime{time.Now()} + b, err := v.MarshalJSON() + if err != nil { + t.Fatalf("Unexpected error, %#v", err) + } + + expected := fmt.Sprintf(`{"AWS:EpochTime":%d}`, v.UTC().Unix()) + if string(b) != expected { + t.Errorf("Expected marshaled time to match, expect: %s, actual: %s", + expected, string(b)) + } +} + +var testCreateResource = []struct { + scheme, u string + expect string + errPrefix string +}{ + { + "https", "https://example.com/a?b=1", + "https://example.com/a?b=1", "", + }, + { + "http", "http*://example.com/a?b=1", + "http*://example.com/a?b=1", "", + }, + { + "rtmp", "https://example.com/a?b=1", + "a?b=1", "", + }, + { + "ftp", "ftp://example.com/a?b=1", + "", "invalid URL scheme", + }, +} + +func TestCreateResource(t *testing.T) { + for i, v := range testCreateResource { + r, err := CreateResource(v.scheme, v.u) + if err != nil { + if v.errPrefix == "" { + t.Errorf("%d, Unexpected error %s", i, err.Error()) + continue + } + if !strings.HasPrefix(err.Error(), v.errPrefix) { + t.Errorf("%d, Expected to find prefix\nexpect: %s\nactual: %s", i, v.errPrefix, err.Error()) + continue + } + } else if v.errPrefix != "" { + t.Errorf("%d, Expected error %s", i, v.errPrefix) + continue + } + + if v.expect != r { + t.Errorf("%d, Expected to find prefix\nexpect: %s\nactual: %s", i, v.expect, r) + } + } +} + +var testTime = time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC) + +const expectedJSONPolicy = `{"Statement":[{"Resource":"https://example.com/a","Condition":{"DateLessThan":{"AWS:EpochTime":1257894000}}}]}` +const expectedB64Policy = `eyJTdGF0ZW1lbnQiOlt7IlJlc291cmNlIjoiaHR0cHM6Ly9leGFtcGxlLmNvbS9hIiwiQ29uZGl0aW9uIjp7IkRhdGVMZXNzVGhhbiI6eyJBV1M6RXBvY2hUaW1lIjoxMjU3ODk0MDAwfX19XX0=` + +func TestEncodePolicy(t *testing.T) { + p := NewCannedPolicy("https://example.com/a", testTime) + + b64Policy, jsonPolicy, err := encodePolicy(p) + if err != nil { + t.Fatalf("Unexpected error, %#v", err) + } + + if string(jsonPolicy) != expectedJSONPolicy { + t.Errorf("Expected json encoding to match, \nexpect: %s\nactual: %s\n", expectedJSONPolicy, jsonPolicy) + } + + if string(b64Policy) != expectedB64Policy { + t.Errorf("Expected b64 encoding to match, \nexpect: %s\nactual: %s\n", expectedB64Policy, b64Policy) + } +} + +func TestSignEncodedPolicy(t *testing.T) { + p := NewCannedPolicy("https://example.com/a", testTime) + _, jsonPolicy, err := encodePolicy(p) + if err != nil { + t.Fatalf("Unexpected policy encode error, %#v", err) + } + + r := newRandomReader(rand.New(rand.NewSource(1))) + + privKey, err := rsa.GenerateKey(r, 1024) + if err != nil { + t.Fatalf("Unexpected priv key error, %#v", err) + } + + b64Signature, err := signEncodedPolicy(r, jsonPolicy, privKey) + if err != nil { + t.Fatalf("Unexpected policy sign error, %#v", err) + } + + hash := sha1.New() + if _, err = bytes.NewReader(jsonPolicy).WriteTo(hash); err != nil { + t.Fatalf("Unexpected hash error, %#v", err) + } + + decodedSig, err := base64.StdEncoding.DecodeString(string(b64Signature)) + if err != nil { + t.Fatalf("Unexpected base64 decode signature, %#v", err) + } + + if err := rsa.VerifyPKCS1v15(&privKey.PublicKey, crypto.SHA1, hash.Sum(nil), decodedSig); err != nil { + t.Fatalf("Unable to verify signature, %#v", err) + } +} + +func TestAWSEscape(t *testing.T) { + expect := "a-b_c~" + actual := []byte("a+b=c/") + awsEscapeEncoded(actual) + if string(actual) != expect { + t.Errorf("expect: %s, actual: %s", expect, string(actual)) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/cloudfront/sign/privkey.go b/vendor/github.com/aws/aws-sdk-go/service/cloudfront/sign/privkey.go new file mode 100644 index 000000000..ffb3c3a75 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/cloudfront/sign/privkey.go @@ -0,0 +1,68 @@ +package sign + +import ( + "crypto/rsa" + "crypto/x509" + "encoding/pem" + "fmt" + "io" + "io/ioutil" + "os" +) + +// LoadPEMPrivKeyFile reads a PEM encoded RSA private key from the file name. +// A new RSA private key will be returned if no error. +func LoadPEMPrivKeyFile(name string) (*rsa.PrivateKey, error) { + file, err := os.Open(name) + if err != nil { + return nil, err + } + defer file.Close() + + return LoadPEMPrivKey(file) +} + +// LoadPEMPrivKey reads a PEM encoded RSA private key from the io.Reader. +// A new RSA private key will be returned if no error. +func LoadPEMPrivKey(reader io.Reader) (*rsa.PrivateKey, error) { + block, err := loadPem(reader) + if err != nil { + return nil, err + } + + return x509.ParsePKCS1PrivateKey(block.Bytes) +} + +// LoadEncryptedPEMPrivKey decrypts the PEM encoded private key using the +// password provided returning a RSA private key. If the PEM data is invalid, +// or unable to decrypt an error will be returned. +func LoadEncryptedPEMPrivKey(reader io.Reader, password []byte) (*rsa.PrivateKey, error) { + block, err := loadPem(reader) + if err != nil { + return nil, err + } + + decryptedBlock, err := x509.DecryptPEMBlock(block, password) + if err != nil { + return nil, err + } + + return x509.ParsePKCS1PrivateKey(decryptedBlock) +} + +func loadPem(reader io.Reader) (*pem.Block, error) { + b, err := ioutil.ReadAll(reader) + if err != nil { + return nil, err + } + + block, _ := pem.Decode(b) + if block == nil { + // pem.Decode will set block to nil if there is no PEM data in the input + // the second parameter will contain the provided bytes that failed + // to be decoded. + return nil, fmt.Errorf("no valid PEM data provided") + } + + return block, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/cloudfront/sign/privkey_test.go b/vendor/github.com/aws/aws-sdk-go/service/cloudfront/sign/privkey_test.go new file mode 100644 index 000000000..84750d8f5 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/cloudfront/sign/privkey_test.go @@ -0,0 +1,90 @@ +package sign + +import ( + "bytes" + "crypto/rsa" + "crypto/x509" + "encoding/pem" + "io" + "math/rand" + "strings" + "testing" +) + +func generatePEM(randReader io.Reader, password []byte) (buf *bytes.Buffer, err error) { + k, err := rsa.GenerateKey(randReader, 1024) + if err != nil { + return nil, err + } + + derBytes := x509.MarshalPKCS1PrivateKey(k) + + var block *pem.Block + if password != nil { + block, err = x509.EncryptPEMBlock(randReader, "RSA PRIVATE KEY", derBytes, password, x509.PEMCipherAES128) + } else { + block = &pem.Block{ + Type: "RSA PRIVATE KEY", + Bytes: derBytes, + } + } + + buf = &bytes.Buffer{} + err = pem.Encode(buf, block) + return buf, err +} + +func TestLoadPemPrivKey(t *testing.T) { + reader, err := generatePEM(newRandomReader(rand.New(rand.NewSource(1))), nil) + if err != nil { + t.Errorf("Unexpected pem generation err %s", err.Error()) + } + + privKey, err := LoadPEMPrivKey(reader) + if err != nil { + t.Errorf("Unexpected key load error, %s", err.Error()) + } + if privKey == nil { + t.Errorf("Expected valid privKey, but got nil") + } +} + +func TestLoadPemPrivKeyInvalidPEM(t *testing.T) { + reader := strings.NewReader("invalid PEM data") + privKey, err := LoadPEMPrivKey(reader) + + if err == nil { + t.Errorf("Expected error invalid PEM data error") + } + if privKey != nil { + t.Errorf("Expected nil privKey but got %#v", privKey) + } +} + +func TestLoadEncryptedPEMPrivKey(t *testing.T) { + reader, err := generatePEM(newRandomReader(rand.New(rand.NewSource(1))), []byte("password")) + if err != nil { + t.Errorf("Unexpected pem generation err %s", err.Error()) + } + + privKey, err := LoadEncryptedPEMPrivKey(reader, []byte("password")) + + if err != nil { + t.Errorf("Unexpected key load error, %s", err.Error()) + } + if privKey == nil { + t.Errorf("Expected valid privKey, but got nil") + } +} + +func TestLoadEncryptedPEMPrivKeyWrongPassword(t *testing.T) { + reader, err := generatePEM(newRandomReader(rand.New(rand.NewSource(1))), []byte("password")) + privKey, err := LoadEncryptedPEMPrivKey(reader, []byte("wrong password")) + + if err == nil { + t.Errorf("Expected error invalid PEM data error") + } + if privKey != nil { + t.Errorf("Expected nil privKey but got %#v", privKey) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/cloudfront/sign/randomreader.go b/vendor/github.com/aws/aws-sdk-go/service/cloudfront/sign/randomreader.go new file mode 100644 index 000000000..7138e22fa --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/cloudfront/sign/randomreader.go @@ -0,0 +1,30 @@ +package sign + +import ( + "bytes" + "encoding/binary" + "math/rand" +) + +// A randomReader wraps a math/rand.Rand within an reader so that it can used +// as a predictable testing replacement for crypto/rand.Reader +type randomReader struct { + b *bytes.Buffer + r *rand.Rand +} + +// newRandomReader returns a new instance of the random reader +func newRandomReader(r *rand.Rand) *randomReader { + return &randomReader{b: &bytes.Buffer{}, r: r} +} + +// Read will read random bytes from up to the length of b. +func (m *randomReader) Read(b []byte) (int, error) { + for i := 0; i < len(b); { + binary.Write(m.b, binary.LittleEndian, m.r.Int63()) + n, _ := m.b.Read(b[i:]) + i += n + } + + return len(b), nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/cloudfront/sign/sign_cookie.go b/vendor/github.com/aws/aws-sdk-go/service/cloudfront/sign/sign_cookie.go new file mode 100644 index 000000000..9b2deadf1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/cloudfront/sign/sign_cookie.go @@ -0,0 +1,241 @@ +package sign + +import ( + "crypto/rsa" + "fmt" + "net/http" + "strings" + "time" +) + +const ( + // CookiePolicyName name of the policy cookie + CookiePolicyName = "CloudFront-Policy" + // CookieSignatureName name of the signature cookie + CookieSignatureName = "CloudFront-Signature" + // CookieKeyIDName name of the signing Key ID cookie + CookieKeyIDName = "CloudFront-Key-Pair-Id" +) + +// A CookieOptions optional additonal options that can be applied to the signed +// cookies. +type CookieOptions struct { + Path string + Domain string + Secure bool +} + +// apply will integration the options provided into the base cookie options +// a new copy will be returned. The base CookieOption will not be modified. +func (o CookieOptions) apply(opts ...func(*CookieOptions)) CookieOptions { + if len(opts) == 0 { + return o + } + + for _, opt := range opts { + opt(&o) + } + + return o +} + +// A CookieSigner provides signing utilities to sign Cookies for Amazon CloudFront +// resources. Using a private key and Credential Key Pair key ID the CookieSigner +// only needs to be created once per Credential Key Pair key ID and private key. +// +// More information about signed Cookies and their structure can be found at: +// http://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/private-content-setting-signed-cookie-custom-policy.html +// +// To sign a Cookie, create a CookieSigner with your private key and credential +// pair key ID. Once you have a CookieSigner instance you can call Sign or +// SignWithPolicy to sign the URLs. +// +// The signer is safe to use concurrently, but the optional cookies options +// are not safe to modify concurrently. +type CookieSigner struct { + keyID string + privKey *rsa.PrivateKey + + Opts CookieOptions +} + +// NewCookieSigner constructs and returns a new CookieSigner to be used to for +// signing Amazon CloudFront URL resources with. +func NewCookieSigner(keyID string, privKey *rsa.PrivateKey, opts ...func(*CookieOptions)) *CookieSigner { + signer := &CookieSigner{ + keyID: keyID, + privKey: privKey, + Opts: CookieOptions{}.apply(opts...), + } + + return signer +} + +// Sign returns the cookies needed to allow user agents to make arbetrary +// requests to cloudfront for the resource(s) defined by the policy. +// +// Sign will create a CloudFront policy with only a resource and condition of +// DateLessThan equal to the expires time provided. +// +// The returned slice cookies should all be added to the Client's cookies or +// server's response. +// +// Example: +// s := NewCookieSigner(keyID, privKey) +// +// // Get Signed cookies for a resource that will expire in 1 hour +// cookies, err := s.Sign("*", time.Now().Add(1 * time.Hour)) +// if err != nil { +// fmt.Println("failed to create signed cookies", err) +// return +// } +// +// // Or get Signed cookies for a resource that will expire in 1 hour +// // and set path and domain of cookies +// cookies, err := s.Sign("*", time.Now().Add(1 * time.Hour), func(o *sign.CookieOptions) { +// o.Path = "/" +// o.Domain = ".example.com" +// }) +// if err != nil { +// fmt.Println("failed to create signed cookies", err) +// return +// } +// +// // Server Response via http.ResponseWriter +// for _, c := range cookies { +// http.SetCookie(w, c) +// } +// +// // Client request via the cookie jar +// if client.CookieJar != nil { +// for _, c := range cookies { +// client.Cookie(w, c) +// } +// } +func (s CookieSigner) Sign(u string, expires time.Time, opts ...func(*CookieOptions)) ([]*http.Cookie, error) { + scheme, err := cookieURLScheme(u) + if err != nil { + return nil, err + } + + resource, err := CreateResource(scheme, u) + if err != nil { + return nil, err + } + + p := NewCannedPolicy(resource, expires) + return createCookies(p, s.keyID, s.privKey, s.Opts.apply(opts...)) +} + +// Returns and validates the URL's scheme. +// http://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/private-content-setting-signed-cookie-custom-policy.html#private-content-custom-policy-statement-cookies +func cookieURLScheme(u string) (string, error) { + parts := strings.SplitN(u, "://", 2) + if len(parts) != 2 { + return "", fmt.Errorf("invalid cookie URL, missing scheme") + } + + scheme := strings.ToLower(parts[0]) + if scheme != "http" && scheme != "https" && scheme != "http*" { + return "", fmt.Errorf("invalid cookie URL scheme. Expect http, https, or http*. Go, %s", scheme) + } + + return scheme, nil +} + +// SignWithPolicy returns the cookies needed to allow user agents to make +// arbetrairy requets to cloudfront for the resource(s) defined by the policy. +// +// The returned slice cookies should all be added to the Client's cookies or +// server's response. +// +// Example: +// s := NewCookieSigner(keyID, privKey) +// +// policy := &sign.Policy{ +// Statements: []sign.Statement{ +// { +// // Read the provided documentation on how to set this +// // correctly, you'll probably want to use wildcards. +// Resource: RawCloudFrontURL, +// Condition: sign.Condition{ +// // Optional IP source address range +// IPAddress: &sign.IPAddress{SourceIP: "192.0.2.0/24"}, +// // Optional date URL is not valid until +// DateGreaterThan: &sign.AWSEpochTime{time.Now().Add(30 * time.Minute)}, +// // Required date the URL will expire after +// DateLessThan: &sign.AWSEpochTime{time.Now().Add(1 * time.Hour)}, +// }, +// }, +// }, +// } +// +// // Get Signed cookies for a resource that will expire in 1 hour +// cookies, err := s.SignWithPolicy(policy) +// if err != nil { +// fmt.Println("failed to create signed cookies", err) +// return +// } +// +// // Or get Signed cookies for a resource that will expire in 1 hour +// // and set path and domain of cookies +// cookies, err := s.Sign(policy, func(o *sign.CookieOptions) { +// o.Path = "/" +// o.Domain = ".example.com" +// }) +// if err != nil { +// fmt.Println("failed to create signed cookies", err) +// return +// } +// +// // Server Response via http.ResponseWriter +// for _, c := range cookies { +// http.SetCookie(w, c) +// } +// +// // Client request via the cookie jar +// if client.CookieJar != nil { +// for _, c := range cookies { +// client.Cookie(w, c) +// } +// } +func (s CookieSigner) SignWithPolicy(p *Policy, opts ...func(*CookieOptions)) ([]*http.Cookie, error) { + return createCookies(p, s.keyID, s.privKey, s.Opts.apply(opts...)) +} + +// Prepares the cookies to be attached to the header. An (optional) options +// struct is provided in case people don't want to manually edit their cookies. +func createCookies(p *Policy, keyID string, privKey *rsa.PrivateKey, opt CookieOptions) ([]*http.Cookie, error) { + b64Sig, b64Policy, err := p.Sign(privKey) + if err != nil { + return nil, err + } + + // Creates proper cookies + cPolicy := &http.Cookie{ + Name: CookiePolicyName, + Value: string(b64Policy), + HttpOnly: true, + } + cSignature := &http.Cookie{ + Name: CookieSignatureName, + Value: string(b64Sig), + HttpOnly: true, + } + cKey := &http.Cookie{ + Name: CookieKeyIDName, + Value: keyID, + HttpOnly: true, + } + + cookies := []*http.Cookie{cPolicy, cSignature, cKey} + + // Applie the cookie options + for _, c := range cookies { + c.Path = opt.Path + c.Domain = opt.Domain + c.Secure = opt.Secure + } + + return cookies, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/cloudfront/sign/sign_cookie_example_test.go b/vendor/github.com/aws/aws-sdk-go/service/cloudfront/sign/sign_cookie_example_test.go new file mode 100644 index 000000000..3683360e8 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/cloudfront/sign/sign_cookie_example_test.go @@ -0,0 +1,163 @@ +package sign + +import ( + "fmt" + "io" + "math/rand" + "net/http" + "time" +) + +func examplePEMReader() io.Reader { + reader, err := generatePEM(randReader, nil) + if err != nil { + panic(fmt.Sprintf("Unexpected pem generation err %v", err)) + } + + return reader +} + +func ExampleCookieSigner_Sign() { + origRandReader := randReader + randReader = newRandomReader(rand.New(rand.NewSource(1))) + defer func() { + randReader = origRandReader + }() + + // Load your private key so it can be used by the CookieSigner + // To load private key from file use `sign.LoadPEMPrivKeyFile`. + privKey, err := LoadPEMPrivKey(examplePEMReader()) + if err != nil { + fmt.Println("failed to load private key", err) + return + } + + cookieSigner := NewCookieSigner("keyID", privKey) + + // Use the signer to sign the URL + cookies, err := cookieSigner.Sign("http://example.com/somepath/*", testSignTime.Add(30*time.Minute)) + if err != nil { + fmt.Println("failed to sign cookies with policy,", err) + return + } + + printExampleCookies(cookies) + // Output: + // Cookies: + // CloudFront-Policy: eyJTdGF0ZW1lbnQiOlt7IlJlc291cmNlIjoiaHR0cDovL2V4YW1wbGUuY29tL3NvbWVwYXRoLyoiLCJDb25kaXRpb24iOnsiRGF0ZUxlc3NUaGFuIjp7IkFXUzpFcG9jaFRpbWUiOjEyNTc4OTU4MDB9fX1dfQ__, , , false + // CloudFront-Signature: o~jvj~CFkvGZB~yYED3elicKZag-CRijy8yD2E5yF1s7VNV7kNeQWC7MDtEcBQ8-eh7Xgjh0wMPQdAVdh09gBObd-hXDpKUyh8YKxogj~oloV~8KOvqE5xzWiKcqjdfJjmT5iEqIui~H1ExYjyKjgir79npmlyYkaJS5s62EQa8_, , , false + // CloudFront-Key-Pair-Id: keyID, , , false +} + +func ExampleCookieSigner_SignWithPolicy() { + origRandReader := randReader + randReader = newRandomReader(rand.New(rand.NewSource(1))) + defer func() { + randReader = origRandReader + }() + + // Sign cookie to be valid for 30 minutes from now, expires one hour + // from now, and restricted to the 192.0.2.0/24 IP address range. + // http://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/private-content-setting-signed-cookie-custom-policy.html + p := &Policy{ + // Only a single policy statement can be used with CloudFront + // cookie signatures. + Statements: []Statement{{ + // Read the provided documentation on how to set this correctly, + // you'll probably want to use wildcards + Resource: "http://sub.cloudfront.com", + Condition: Condition{ + // Optional IP source address range + IPAddress: &IPAddress{SourceIP: "192.0.2.0/24"}, + // Optional date URL is not valid until + DateGreaterThan: &AWSEpochTime{testSignTime.Add(30 * time.Minute)}, + // Required date the URL will expire after + DateLessThan: &AWSEpochTime{testSignTime.Add(1 * time.Hour)}, + }, + }, + }, + } + + // Load your private key so it can be used by the CookieSigner + // To load private key from file use `sign.LoadPEMPrivKeyFile`. + privKey, err := LoadPEMPrivKey(examplePEMReader()) + if err != nil { + fmt.Println("failed to load private key", err) + return + } + + // Key ID that represents the key pair associated with the private key + keyID := "privateKeyID" + + // Set credentials to the CookieSigner. + cookieSigner := NewCookieSigner(keyID, privKey) + + // Avoid adding an Expire or MaxAge. See provided AWS Documentation for + // more info. + cookies, err := cookieSigner.SignWithPolicy(p) + if err != nil { + fmt.Println("failed to sign cookies with policy,", err) + return + } + + printExampleCookies(cookies) + // Output: + // Cookies: + // CloudFront-Policy: eyJTdGF0ZW1lbnQiOlt7IlJlc291cmNlIjoiaHR0cDovL3N1Yi5jbG91ZGZyb250LmNvbSIsIkNvbmRpdGlvbiI6eyJJcEFkZHJlc3MiOnsiQVdTOlNvdXJjZUlwIjoiMTkyLjAuMi4wLzI0In0sIkRhdGVHcmVhdGVyVGhhbiI6eyJBV1M6RXBvY2hUaW1lIjoxMjU3ODk1ODAwfSwiRGF0ZUxlc3NUaGFuIjp7IkFXUzpFcG9jaFRpbWUiOjEyNTc4OTc2MDB9fX1dfQ__, , , false + // CloudFront-Signature: JaWdcbr98colrDAhOpkyxqCZev2IAxURu1RKKo1wS~sI5XdNXWYbZJs2FdpbJ475ZvmhZ1-r4ENUqBXAlRfPfOc21Hm4~24jRmPTO3512D4uuJHrPVxSfgeGuFeigfCGWAqyfYYH1DsFl5JQDpzetsNI3ZhGRkQb8V-oYFanddg_, , , false + // CloudFront-Key-Pair-Id: privateKeyID, , , false +} + +func ExampleCookieSigner_SignOptions() { + origRandReader := randReader + randReader = newRandomReader(rand.New(rand.NewSource(1))) + defer func() { + randReader = origRandReader + }() + + // Load your private key so it can be used by the CookieSigner + // To load private key from file use `sign.LoadPEMPrivKeyFile`. + privKey, err := LoadPEMPrivKey(examplePEMReader()) + if err != nil { + fmt.Println("failed to load private key", err) + return + } + + // Create the CookieSigner with options set. These options can be set + // directly with cookieSigner.Opts. These values can be overriden on + // individual Sign and SignWithProfile calls. + cookieSigner := NewCookieSigner("keyID", privKey, func(o *CookieOptions) { + //provide an optional struct fields to specify other options + o.Path = "/" + + // http://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/CNAMEs.html + o.Domain = ".cNameAssociatedWithMyDistribution.com" + + // Make sure your app/site can handle https payloads, otherwise + // set this to false. + o.Secure = true + }) + + // Use the signer to sign the URL + cookies, err := cookieSigner.Sign("http*://*", testSignTime.Add(30*time.Minute), func(o *CookieOptions) { + o.Path = "/mypath/" + }) + if err != nil { + fmt.Println("failed to sign cookies with policy,", err) + return + } + + printExampleCookies(cookies) + // Output: + // Cookies: + // CloudFront-Policy: eyJTdGF0ZW1lbnQiOlt7IlJlc291cmNlIjoiaHR0cCo6Ly8qIiwiQ29uZGl0aW9uIjp7IkRhdGVMZXNzVGhhbiI6eyJBV1M6RXBvY2hUaW1lIjoxMjU3ODk1ODAwfX19XX0_, /mypath/, .cNameAssociatedWithMyDistribution.com, true + // CloudFront-Signature: Yco06vgowwvSYgTSY9XbXpBcTlUlqpyyYXgRhus3nfnC74A7oQ~fMBH0we-rGxvph8ZyHnTxC5ubbPKSzo3EHUm2IcQeEo4p6WCgZZMzCuLlkpeMKhMAkCqX7rmUfkXhTslBHe~ylcmaZqo-hdnOiWrXk2U974ZQbbt5cOjwQG0_, /mypath/, .cNameAssociatedWithMyDistribution.com, true + // CloudFront-Key-Pair-Id: keyID, /mypath/, .cNameAssociatedWithMyDistribution.com, true +} + +func printExampleCookies(cookies []*http.Cookie) { + fmt.Println("Cookies:") + for _, c := range cookies { + fmt.Printf("%s: %s, %s, %s, %t\n", c.Name, c.Value, c.Path, c.Domain, c.Secure) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/cloudfront/sign/sign_cookie_test.go b/vendor/github.com/aws/aws-sdk-go/service/cloudfront/sign/sign_cookie_test.go new file mode 100644 index 000000000..3bcd8672f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/cloudfront/sign/sign_cookie_test.go @@ -0,0 +1,83 @@ +package sign + +import ( + "crypto/rsa" + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +func TestNewCookieSigner(t *testing.T) { + privKey, err := rsa.GenerateKey(randReader, 1024) + if err != nil { + t.Fatalf("Unexpected priv key error, %#v", err) + } + + signer := NewCookieSigner("keyID", privKey) + assert.Equal(t, "keyID", signer.keyID) + assert.Equal(t, privKey, signer.privKey) +} + +func TestSignCookie(t *testing.T) { + privKey, err := rsa.GenerateKey(randReader, 1024) + assert.NoError(t, err) + + signer := NewCookieSigner("keyID", privKey) + cookies, err := signer.Sign("http*://*", time.Now().Add(1*time.Hour)) + + assert.NoError(t, err) + assert.Equal(t, CookiePolicyName, cookies[0].Name) + assert.Equal(t, CookieSignatureName, cookies[1].Name) + assert.Equal(t, CookieKeyIDName, cookies[2].Name) +} + +func TestSignCookie_WithPolicy(t *testing.T) { + privKey, err := rsa.GenerateKey(randReader, 1024) + assert.NoError(t, err) + + p := &Policy{ + Statements: []Statement{ + { + Resource: "*", + Condition: Condition{ + DateLessThan: &AWSEpochTime{time.Now().Add(1 * time.Hour)}, + }, + }, + }, + } + + signer := NewCookieSigner("keyID", privKey) + cookies, err := signer.SignWithPolicy(p) + + assert.NoError(t, err) + assert.Equal(t, CookiePolicyName, cookies[0].Name) + assert.Equal(t, CookieSignatureName, cookies[1].Name) + assert.Equal(t, CookieKeyIDName, cookies[2].Name) +} + +func TestSignCookie_WithCookieOptions(t *testing.T) { + privKey, err := rsa.GenerateKey(randReader, 1024) + assert.NoError(t, err) + + expires := time.Now().Add(1 * time.Hour) + + signer := NewCookieSigner("keyID", privKey) + cookies, err := signer.Sign("https://example.com/*", expires, func(o *CookieOptions) { + o.Path = "/" + o.Domain = ".example.com" + o.Secure = true + + }) + + assert.NoError(t, err) + assert.Equal(t, CookiePolicyName, cookies[0].Name) + assert.Equal(t, CookieSignatureName, cookies[1].Name) + assert.Equal(t, CookieKeyIDName, cookies[2].Name) + + for _, c := range cookies { + assert.Equal(t, "/", c.Path) + assert.Equal(t, ".example.com", c.Domain) + assert.True(t, c.Secure) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/cloudfront/sign/sign_url.go b/vendor/github.com/aws/aws-sdk-go/service/cloudfront/sign/sign_url.go new file mode 100644 index 000000000..ba56b4a78 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/cloudfront/sign/sign_url.go @@ -0,0 +1,205 @@ +// Package sign provides utilities to generate signed URLs for Amazon CloudFront. +// +// More information about signed URLs and their structure can be found at: +// http://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/private-content-creating-signed-url-canned-policy.html +// +// To sign a URL create a URLSigner with your private key and credential pair key ID. +// Once you have a URLSigner instance you can call Sign or SignWithPolicy to +// sign the URLs. +// +// Example: +// +// // Sign URL to be valid for 1 hour from now. +// signer := sign.NewURLSigner(keyID, privKey) +// signedURL, err := signer.Sign(rawURL, time.Now().Add(1*time.Hour)) +// if err != nil { +// log.Fatalf("Failed to sign url, err: %s\n", err.Error()) +// } +// +package sign + +import ( + "crypto/rsa" + "fmt" + "net/url" + "strings" + "time" +) + +// An URLSigner provides URL signing utilities to sign URLs for Amazon CloudFront +// resources. Using a private key and Credential Key Pair key ID the URLSigner +// only needs to be created once per Credential Key Pair key ID and private key. +// +// The signer is safe to use concurrently. +type URLSigner struct { + keyID string + privKey *rsa.PrivateKey +} + +// NewURLSigner constructs and returns a new URLSigner to be used to for signing +// Amazon CloudFront URL resources with. +func NewURLSigner(keyID string, privKey *rsa.PrivateKey) *URLSigner { + return &URLSigner{ + keyID: keyID, + privKey: privKey, + } +} + +// Sign will sign a single URL to expire at the time of expires sign using the +// Amazon CloudFront default Canned Policy. The URL will be signed with the +// private key and Credential Key Pair Key ID previously provided to URLSigner. +// +// This is the default method of signing Amazon CloudFront URLs. If extra policy +// conditions are need other than URL expiry use SignWithPolicy instead. +// +// Example: +// +// // Sign URL to be valid for 1 hour from now. +// signer := sign.NewURLSigner(keyID, privKey) +// signedURL, err := signer.Sign(rawURL, time.Now().Add(1*time.Hour)) +// if err != nil { +// log.Fatalf("Failed to sign url, err: %s\n", err.Error()) +// } +// +func (s URLSigner) Sign(url string, expires time.Time) (string, error) { + scheme, cleanedURL, err := cleanURLScheme(url) + if err != nil { + return "", err + } + + resource, err := CreateResource(scheme, url) + if err != nil { + return "", err + } + + return signURL(scheme, cleanedURL, s.keyID, NewCannedPolicy(resource, expires), false, s.privKey) +} + +// SignWithPolicy will sign a URL with the Policy provided. The URL will be +// signed with the private key and Credential Key Pair Key ID previously provided to URLSigner. +// +// Use this signing method if you are looking to sign a URL with more than just +// the URL's expiry time, or reusing Policies between multiple URL signings. +// If only the expiry time is needed you can use Sign and provide just the +// URL's expiry time. A minimum of at least one policy statement is required for a signed URL. +// +// Note: It is not safe to use Polices between multiple signers concurrently +// +// Example: +// +// // Sign URL to be valid for 30 minutes from now, expires one hour from now, and +// // restricted to the 192.0.2.0/24 IP address range. +// policy := &sign.Policy{ +// Statements: []sign.Statement{ +// { +// Resource: rawURL, +// Condition: sign.Condition{ +// // Optional IP source address range +// IPAddress: &sign.IPAddress{SourceIP: "192.0.2.0/24"}, +// // Optional date URL is not valid until +// DateGreaterThan: &sign.AWSEpochTime{time.Now().Add(30 * time.Minute)}, +// // Required date the URL will expire after +// DateLessThan: &sign.AWSEpochTime{time.Now().Add(1 * time.Hour)}, +// }, +// }, +// }, +// } +// +// signer := sign.NewURLSigner(keyID, privKey) +// signedURL, err := signer.SignWithPolicy(rawURL, policy) +// if err != nil { +// log.Fatalf("Failed to sign url, err: %s\n", err.Error()) +// } +// +func (s URLSigner) SignWithPolicy(url string, p *Policy) (string, error) { + scheme, cleanedURL, err := cleanURLScheme(url) + if err != nil { + return "", err + } + + return signURL(scheme, cleanedURL, s.keyID, p, true, s.privKey) +} + +func signURL(scheme, url, keyID string, p *Policy, customPolicy bool, privKey *rsa.PrivateKey) (string, error) { + // Validation URL elements + if err := validateURL(url); err != nil { + return "", err + } + + b64Signature, b64Policy, err := p.Sign(privKey) + if err != nil { + return "", err + } + + // build and return signed URL + builtURL := buildSignedURL(url, keyID, p, customPolicy, b64Policy, b64Signature) + if scheme == "rtmp" { + return buildRTMPURL(builtURL) + } + + return builtURL, nil +} + +func buildSignedURL(baseURL, keyID string, p *Policy, customPolicy bool, b64Policy, b64Signature []byte) string { + pred := "?" + if strings.Contains(baseURL, "?") { + pred = "&" + } + signedURL := baseURL + pred + + if customPolicy { + signedURL += "Policy=" + string(b64Policy) + } else { + signedURL += fmt.Sprintf("Expires=%d", p.Statements[0].Condition.DateLessThan.UTC().Unix()) + } + signedURL += fmt.Sprintf("&Signature=%s&Key-Pair-Id=%s", string(b64Signature), keyID) + + return signedURL +} + +func buildRTMPURL(u string) (string, error) { + parsed, err := url.Parse(u) + if err != nil { + return "", fmt.Errorf("unable to parse rtmp signed URL, err: %s", err) + } + + rtmpURL := strings.TrimLeft(parsed.Path, "/") + if parsed.RawQuery != "" { + rtmpURL = fmt.Sprintf("%s?%s", rtmpURL, parsed.RawQuery) + } + + return rtmpURL, nil +} + +func cleanURLScheme(u string) (scheme, cleanedURL string, err error) { + parts := strings.SplitN(u, "://", 2) + if len(parts) != 2 { + return "", "", fmt.Errorf("invalid URL, missing scheme and domain/path") + } + scheme = strings.Replace(parts[0], "*", "", 1) + cleanedURL = fmt.Sprintf("%s://%s", scheme, parts[1]) + + return strings.ToLower(scheme), cleanedURL, nil +} + +var illegalQueryParms = []string{"Expires", "Policy", "Signature", "Key-Pair-Id"} + +func validateURL(u string) error { + parsed, err := url.Parse(u) + if err != nil { + return fmt.Errorf("unable to parse URL, err: %s", err.Error()) + } + + if parsed.Scheme == "" { + return fmt.Errorf("URL missing valid scheme, %s", u) + } + + q := parsed.Query() + for _, p := range illegalQueryParms { + if _, ok := q[p]; ok { + return fmt.Errorf("%s cannot be a query parameter for a signed URL", p) + } + } + + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/cloudfront/sign/sign_url_test.go b/vendor/github.com/aws/aws-sdk-go/service/cloudfront/sign/sign_url_test.go new file mode 100644 index 000000000..21acfabb3 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/cloudfront/sign/sign_url_test.go @@ -0,0 +1,149 @@ +package sign + +import ( + "crypto/rsa" + "math/rand" + "strings" + "testing" + "time" +) + +var testSignTime = time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC) + +var testSignURL = []struct { + u string + p *Policy + t time.Time + customPolicy bool + expectErr bool + out string +}{ + { + "http://example.com/a", NewCannedPolicy("http://example.com/a", testSignTime), time.Time{}, true, false, + "http://example.com/a?Policy=eyJTdGF0ZW1lbnQiOlt7IlJlc291cmNlIjoiaHR0cDovL2V4YW1wbGUuY29tL2EiLCJDb25kaXRpb24iOnsiRGF0ZUxlc3NUaGFuIjp7IkFXUzpFcG9jaFRpbWUiOjEyNTc4OTQwMDB9fX1dfQ__&Signature=Y6qvWOZNl99uNPMGprvrKXEmXpLWJ-xXKVHL~nmF0BR1jPb2XA2jor0MUYKBE4ViTkWZZ1dz46zSFMsEEfw~n6-SVYXZ2QHBBTkSAoxGtH6dH33Ph9pz~f9Wy7aYXq~9I-Ah0E6yC~BMiQuXe5qAOucuMPorKgPfC0dvLMw2EF0_&Key-Pair-Id=KeyID", + }, + { + "http://example.com/a", nil, testSignTime, false, false, + "http://example.com/a?Expires=1257894000&Signature=Y6qvWOZNl99uNPMGprvrKXEmXpLWJ-xXKVHL~nmF0BR1jPb2XA2jor0MUYKBE4ViTkWZZ1dz46zSFMsEEfw~n6-SVYXZ2QHBBTkSAoxGtH6dH33Ph9pz~f9Wy7aYXq~9I-Ah0E6yC~BMiQuXe5qAOucuMPorKgPfC0dvLMw2EF0_&Key-Pair-Id=KeyID", + }, + { + "http://example.com/Ƿ", nil, testSignTime, false, true, + "http://example.com/Ƿ?Expires=1257894000&Signature=Y6qvWOZNl99uNPMGprvrKXEmXpLWJ-xXKVHL~nmF0BR1jPb2XA2jor0MUYKBE4ViTkWZZ1dz46zSFMsEEfw~n6-SVYXZ2QHBBTkSAoxGtH6dH33Ph9pz~f9Wy7aYXq~9I-Ah0E6yC~BMiQuXe5qAOucuMPorKgPfC0dvLMw2EF0_&Key-Pair-Id=KeyID", + }, + { + "http://example.com/a", &Policy{}, time.Time{}, true, true, + "http://example.com/a?Policy=eyJTdGF0ZW1lbnQiOlt7IlJlc291cmNlIjoiaHR0cDovL2V4YW1wbGUuY29tL2EiLCJDb25kaXRpb24iOnsiRGF0ZUxlc3NUaGFuIjp7IkFXUzpFcG9jaFRpbWUiOjEyNTc4OTQwMDB9fX1dfQ__&Signature=Y6qvWOZNl99uNPMGprvrKXEmXpLWJ-xXKVHL~nmF0BR1jPb2XA2jor0MUYKBE4ViTkWZZ1dz46zSFMsEEfw~n6-SVYXZ2QHBBTkSAoxGtH6dH33Ph9pz~f9Wy7aYXq~9I-Ah0E6yC~BMiQuXe5qAOucuMPorKgPfC0dvLMw2EF0_&Key-Pair-Id=KeyID", + }, + { + "http://example.com/a", NewCannedPolicy("", testSignTime), time.Time{}, true, true, + "http://example.com/a?Policy=eyJTdGF0ZW1lbnQiOlt7IlJlc291cmNlIjoiaHR0cDovL2V4YW1wbGUuY29tL2EiLCJDb25kaXRpb24iOnsiRGF0ZUxlc3NUaGFuIjp7IkFXUzpFcG9jaFRpbWUiOjEyNTc4OTQwMDB9fX1dfQ__&Signature=Y6qvWOZNl99uNPMGprvrKXEmXpLWJ-xXKVHL~nmF0BR1jPb2XA2jor0MUYKBE4ViTkWZZ1dz46zSFMsEEfw~n6-SVYXZ2QHBBTkSAoxGtH6dH33Ph9pz~f9Wy7aYXq~9I-Ah0E6yC~BMiQuXe5qAOucuMPorKgPfC0dvLMw2EF0_&Key-Pair-Id=KeyID", + }, + { + "rtmp://example.com/a", nil, testSignTime, false, false, + "a?Expires=1257894000&Signature=Ds9NbpGwIcDKG1iZDyjfPXp0ZFYSIzfvGzJj-x28XlXfrarHrJbTOQj3bec~aAyb8NAqghBYRdKF9~RdjNrdyxyiequo-SCjFgFHnRNIk0FiqH0fVt2NO63f0X8-Kbur9cPtJoHR9Jzk0I1CQnECqhL6A0OgPhijTfKUITocmzA_&Key-Pair-Id=KeyID", + }, + { + "rtmp://example.com/a", NewCannedPolicy("a", testSignTime), time.Time{}, true, false, + "a?Policy=eyJTdGF0ZW1lbnQiOlt7IlJlc291cmNlIjoiYSIsIkNvbmRpdGlvbiI6eyJEYXRlTGVzc1RoYW4iOnsiQVdTOkVwb2NoVGltZSI6MTI1Nzg5NDAwMH19fV19&Signature=Ds9NbpGwIcDKG1iZDyjfPXp0ZFYSIzfvGzJj-x28XlXfrarHrJbTOQj3bec~aAyb8NAqghBYRdKF9~RdjNrdyxyiequo-SCjFgFHnRNIk0FiqH0fVt2NO63f0X8-Kbur9cPtJoHR9Jzk0I1CQnECqhL6A0OgPhijTfKUITocmzA_&Key-Pair-Id=KeyID", + }, +} + +// TODO Sign URL HTTP +// TODO Sign URL RMTP +func TestSignURL(t *testing.T) { + origRandReader := randReader + randReader = newRandomReader(rand.New(rand.NewSource(1))) + defer func() { + randReader = origRandReader + }() + + privKey, err := rsa.GenerateKey(randReader, 1024) + if err != nil { + t.Fatalf("Unexpected priv key error, %#v", err) + } + + s := NewURLSigner("KeyID", privKey) + + for i, v := range testSignURL { + var u string + var err error + + if v.customPolicy { + u, err = s.SignWithPolicy(v.u, v.p) + } else { + u, err = s.Sign(v.u, v.t) + } + + if err != nil { + if v.expectErr { + continue + } + t.Errorf("%d, Unexpected error, %s", i, err.Error()) + continue + } else if v.expectErr { + t.Errorf("%d Expected error, but got none", i) + continue + } + + if u != v.out { + t.Errorf("%d, Unexpected URL\nexpect: %s\nactual: %s\n", i, v.out, u) + } + } + +} + +var testBuildSignedURL = []struct { + u, keyID string + p *Policy + customPolicy bool + b64Policy, b64Sig []byte + out string +}{ + { + "https://example.com/a?b=1", "KeyID", NewCannedPolicy("", testSignTime), true, []byte("b64Policy"), []byte("b64Sig"), + "https://example.com/a?b=1&Policy=b64Policy&Signature=b64Sig&Key-Pair-Id=KeyID", + }, + { + "https://example.com/a", "KeyID", NewCannedPolicy("", testSignTime), true, []byte("b64Policy"), []byte("b64Sig"), + "https://example.com/a?Policy=b64Policy&Signature=b64Sig&Key-Pair-Id=KeyID", + }, + { + "https://example.com/a?b=1", "KeyID", NewCannedPolicy("https://example.com/a?b=1", testSignTime), false, []byte("b64Policy"), []byte("b64Sig"), + "https://example.com/a?b=1&Expires=1257894000&Signature=b64Sig&Key-Pair-Id=KeyID", + }, +} + +func TestBuildSignedURL(t *testing.T) { + for i, v := range testBuildSignedURL { + u := buildSignedURL(v.u, v.keyID, v.p, v.customPolicy, v.b64Policy, v.b64Sig) + if u != v.out { + t.Errorf("%d, Unexpected URL\nexpect: %s\nactual: %s\n", i, v.out, u) + } + } +} + +var testValidURL = []struct { + in, errPrefix string +}{ + {"https://example.com/a?b=1&else=b", ""}, + {"https://example.com/a?b=1&Policy=something&else=b", "Policy"}, + {"https://example.com/a?b=1&Signature=something&else=b", "Signature"}, + {"https://example.com/a?b=1&Key-Pair-Id=something&else=b", "Key-Pair-Id"}, + {"http?://example.com/a?b=1", "URL missing valid scheme"}, +} + +func TestValidateURL(t *testing.T) { + for i, v := range testValidURL { + err := validateURL(v.in) + if err != nil { + if v.errPrefix == "" { + t.Errorf("%d, Unexpected error %s", i, err.Error()) + } + if !strings.HasPrefix(err.Error(), v.errPrefix) { + t.Errorf("%d, Expected to find prefix\nexpect: %s\nactual: %s", i, v.errPrefix, err.Error()) + } + } else if v.errPrefix != "" { + t.Errorf("%d, Expected error %s", i, v.errPrefix) + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/cloudfront/waiters.go b/vendor/github.com/aws/aws-sdk-go/service/cloudfront/waiters.go new file mode 100644 index 000000000..7a0525d17 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/cloudfront/waiters.go @@ -0,0 +1,76 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package cloudfront + +import ( + "github.com/aws/aws-sdk-go/private/waiter" +) + +func (c *CloudFront) WaitUntilDistributionDeployed(input *GetDistributionInput) error { + waiterCfg := waiter.Config{ + Operation: "GetDistribution", + Delay: 60, + MaxAttempts: 25, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "path", + Argument: "Distribution.Status", + Expected: "Deployed", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *CloudFront) WaitUntilInvalidationCompleted(input *GetInvalidationInput) error { + waiterCfg := waiter.Config{ + Operation: "GetInvalidation", + Delay: 20, + MaxAttempts: 30, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "path", + Argument: "Invalidation.Status", + Expected: "Completed", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *CloudFront) WaitUntilStreamingDistributionDeployed(input *GetStreamingDistributionInput) error { + waiterCfg := waiter.Config{ + Operation: "GetStreamingDistribution", + Delay: 60, + MaxAttempts: 25, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "path", + Argument: "StreamingDistribution.Status", + Expected: "Deployed", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/cloudhsm/api.go b/vendor/github.com/aws/aws-sdk-go/service/cloudhsm/api.go new file mode 100644 index 000000000..18d45bff5 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/cloudhsm/api.go @@ -0,0 +1,2226 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package cloudhsm provides a client for Amazon CloudHSM. +package cloudhsm + +import ( + "fmt" + + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" +) + +const opAddTagsToResource = "AddTagsToResource" + +// AddTagsToResourceRequest generates a "aws/request.Request" representing the +// client's request for the AddTagsToResource operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the AddTagsToResource method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the AddTagsToResourceRequest method. +// req, resp := client.AddTagsToResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudHSM) AddTagsToResourceRequest(input *AddTagsToResourceInput) (req *request.Request, output *AddTagsToResourceOutput) { + op := &request.Operation{ + Name: opAddTagsToResource, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AddTagsToResourceInput{} + } + + req = c.newRequest(op, input, output) + output = &AddTagsToResourceOutput{} + req.Data = output + return +} + +// Adds or overwrites one or more tags for the specified AWS CloudHSM resource. +// +// Each tag consists of a key and a value. Tag keys must be unique to each +// resource. +func (c *CloudHSM) AddTagsToResource(input *AddTagsToResourceInput) (*AddTagsToResourceOutput, error) { + req, out := c.AddTagsToResourceRequest(input) + err := req.Send() + return out, err +} + +const opCreateHapg = "CreateHapg" + +// CreateHapgRequest generates a "aws/request.Request" representing the +// client's request for the CreateHapg operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateHapg method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateHapgRequest method. +// req, resp := client.CreateHapgRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudHSM) CreateHapgRequest(input *CreateHapgInput) (req *request.Request, output *CreateHapgOutput) { + op := &request.Operation{ + Name: opCreateHapg, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateHapgInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateHapgOutput{} + req.Data = output + return +} + +// Creates a high-availability partition group. A high-availability partition +// group is a group of partitions that spans multiple physical HSMs. +func (c *CloudHSM) CreateHapg(input *CreateHapgInput) (*CreateHapgOutput, error) { + req, out := c.CreateHapgRequest(input) + err := req.Send() + return out, err +} + +const opCreateHsm = "CreateHsm" + +// CreateHsmRequest generates a "aws/request.Request" representing the +// client's request for the CreateHsm operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateHsm method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateHsmRequest method. +// req, resp := client.CreateHsmRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudHSM) CreateHsmRequest(input *CreateHsmInput) (req *request.Request, output *CreateHsmOutput) { + op := &request.Operation{ + Name: opCreateHsm, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateHsmInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateHsmOutput{} + req.Data = output + return +} + +// Creates an uninitialized HSM instance. +// +// There is an upfront fee charged for each HSM instance that you create with +// the CreateHsm operation. If you accidentally provision an HSM and want to +// request a refund, delete the instance using the DeleteHsm operation, go to +// the AWS Support Center (https://console.aws.amazon.com/support/home#/), create +// a new case, and select Account and Billing Support. +// +// It can take up to 20 minutes to create and provision an HSM. You can monitor +// the status of the HSM with the DescribeHsm operation. The HSM is ready to +// be initialized when the status changes to RUNNING. +func (c *CloudHSM) CreateHsm(input *CreateHsmInput) (*CreateHsmOutput, error) { + req, out := c.CreateHsmRequest(input) + err := req.Send() + return out, err +} + +const opCreateLunaClient = "CreateLunaClient" + +// CreateLunaClientRequest generates a "aws/request.Request" representing the +// client's request for the CreateLunaClient operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateLunaClient method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateLunaClientRequest method. +// req, resp := client.CreateLunaClientRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudHSM) CreateLunaClientRequest(input *CreateLunaClientInput) (req *request.Request, output *CreateLunaClientOutput) { + op := &request.Operation{ + Name: opCreateLunaClient, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateLunaClientInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateLunaClientOutput{} + req.Data = output + return +} + +// Creates an HSM client. +func (c *CloudHSM) CreateLunaClient(input *CreateLunaClientInput) (*CreateLunaClientOutput, error) { + req, out := c.CreateLunaClientRequest(input) + err := req.Send() + return out, err +} + +const opDeleteHapg = "DeleteHapg" + +// DeleteHapgRequest generates a "aws/request.Request" representing the +// client's request for the DeleteHapg operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteHapg method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteHapgRequest method. +// req, resp := client.DeleteHapgRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudHSM) DeleteHapgRequest(input *DeleteHapgInput) (req *request.Request, output *DeleteHapgOutput) { + op := &request.Operation{ + Name: opDeleteHapg, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteHapgInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteHapgOutput{} + req.Data = output + return +} + +// Deletes a high-availability partition group. +func (c *CloudHSM) DeleteHapg(input *DeleteHapgInput) (*DeleteHapgOutput, error) { + req, out := c.DeleteHapgRequest(input) + err := req.Send() + return out, err +} + +const opDeleteHsm = "DeleteHsm" + +// DeleteHsmRequest generates a "aws/request.Request" representing the +// client's request for the DeleteHsm operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteHsm method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteHsmRequest method. +// req, resp := client.DeleteHsmRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudHSM) DeleteHsmRequest(input *DeleteHsmInput) (req *request.Request, output *DeleteHsmOutput) { + op := &request.Operation{ + Name: opDeleteHsm, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteHsmInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteHsmOutput{} + req.Data = output + return +} + +// Deletes an HSM. After completion, this operation cannot be undone and your +// key material cannot be recovered. +func (c *CloudHSM) DeleteHsm(input *DeleteHsmInput) (*DeleteHsmOutput, error) { + req, out := c.DeleteHsmRequest(input) + err := req.Send() + return out, err +} + +const opDeleteLunaClient = "DeleteLunaClient" + +// DeleteLunaClientRequest generates a "aws/request.Request" representing the +// client's request for the DeleteLunaClient operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteLunaClient method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteLunaClientRequest method. +// req, resp := client.DeleteLunaClientRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudHSM) DeleteLunaClientRequest(input *DeleteLunaClientInput) (req *request.Request, output *DeleteLunaClientOutput) { + op := &request.Operation{ + Name: opDeleteLunaClient, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteLunaClientInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteLunaClientOutput{} + req.Data = output + return +} + +// Deletes a client. +func (c *CloudHSM) DeleteLunaClient(input *DeleteLunaClientInput) (*DeleteLunaClientOutput, error) { + req, out := c.DeleteLunaClientRequest(input) + err := req.Send() + return out, err +} + +const opDescribeHapg = "DescribeHapg" + +// DescribeHapgRequest generates a "aws/request.Request" representing the +// client's request for the DescribeHapg operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeHapg method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeHapgRequest method. +// req, resp := client.DescribeHapgRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudHSM) DescribeHapgRequest(input *DescribeHapgInput) (req *request.Request, output *DescribeHapgOutput) { + op := &request.Operation{ + Name: opDescribeHapg, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeHapgInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeHapgOutput{} + req.Data = output + return +} + +// Retrieves information about a high-availability partition group. +func (c *CloudHSM) DescribeHapg(input *DescribeHapgInput) (*DescribeHapgOutput, error) { + req, out := c.DescribeHapgRequest(input) + err := req.Send() + return out, err +} + +const opDescribeHsm = "DescribeHsm" + +// DescribeHsmRequest generates a "aws/request.Request" representing the +// client's request for the DescribeHsm operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeHsm method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeHsmRequest method. +// req, resp := client.DescribeHsmRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudHSM) DescribeHsmRequest(input *DescribeHsmInput) (req *request.Request, output *DescribeHsmOutput) { + op := &request.Operation{ + Name: opDescribeHsm, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeHsmInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeHsmOutput{} + req.Data = output + return +} + +// Retrieves information about an HSM. You can identify the HSM by its ARN or +// its serial number. +func (c *CloudHSM) DescribeHsm(input *DescribeHsmInput) (*DescribeHsmOutput, error) { + req, out := c.DescribeHsmRequest(input) + err := req.Send() + return out, err +} + +const opDescribeLunaClient = "DescribeLunaClient" + +// DescribeLunaClientRequest generates a "aws/request.Request" representing the +// client's request for the DescribeLunaClient operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeLunaClient method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeLunaClientRequest method. +// req, resp := client.DescribeLunaClientRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudHSM) DescribeLunaClientRequest(input *DescribeLunaClientInput) (req *request.Request, output *DescribeLunaClientOutput) { + op := &request.Operation{ + Name: opDescribeLunaClient, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeLunaClientInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeLunaClientOutput{} + req.Data = output + return +} + +// Retrieves information about an HSM client. +func (c *CloudHSM) DescribeLunaClient(input *DescribeLunaClientInput) (*DescribeLunaClientOutput, error) { + req, out := c.DescribeLunaClientRequest(input) + err := req.Send() + return out, err +} + +const opGetConfig = "GetConfig" + +// GetConfigRequest generates a "aws/request.Request" representing the +// client's request for the GetConfig operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetConfig method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetConfigRequest method. +// req, resp := client.GetConfigRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudHSM) GetConfigRequest(input *GetConfigInput) (req *request.Request, output *GetConfigOutput) { + op := &request.Operation{ + Name: opGetConfig, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetConfigInput{} + } + + req = c.newRequest(op, input, output) + output = &GetConfigOutput{} + req.Data = output + return +} + +// Gets the configuration files necessary to connect to all high availability +// partition groups the client is associated with. +func (c *CloudHSM) GetConfig(input *GetConfigInput) (*GetConfigOutput, error) { + req, out := c.GetConfigRequest(input) + err := req.Send() + return out, err +} + +const opListAvailableZones = "ListAvailableZones" + +// ListAvailableZonesRequest generates a "aws/request.Request" representing the +// client's request for the ListAvailableZones operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListAvailableZones method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListAvailableZonesRequest method. +// req, resp := client.ListAvailableZonesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudHSM) ListAvailableZonesRequest(input *ListAvailableZonesInput) (req *request.Request, output *ListAvailableZonesOutput) { + op := &request.Operation{ + Name: opListAvailableZones, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListAvailableZonesInput{} + } + + req = c.newRequest(op, input, output) + output = &ListAvailableZonesOutput{} + req.Data = output + return +} + +// Lists the Availability Zones that have available AWS CloudHSM capacity. +func (c *CloudHSM) ListAvailableZones(input *ListAvailableZonesInput) (*ListAvailableZonesOutput, error) { + req, out := c.ListAvailableZonesRequest(input) + err := req.Send() + return out, err +} + +const opListHapgs = "ListHapgs" + +// ListHapgsRequest generates a "aws/request.Request" representing the +// client's request for the ListHapgs operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListHapgs method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListHapgsRequest method. +// req, resp := client.ListHapgsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudHSM) ListHapgsRequest(input *ListHapgsInput) (req *request.Request, output *ListHapgsOutput) { + op := &request.Operation{ + Name: opListHapgs, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListHapgsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListHapgsOutput{} + req.Data = output + return +} + +// Lists the high-availability partition groups for the account. +// +// This operation supports pagination with the use of the NextToken member. +// If more results are available, the NextToken member of the response contains +// a token that you pass in the next call to ListHapgs to retrieve the next +// set of items. +func (c *CloudHSM) ListHapgs(input *ListHapgsInput) (*ListHapgsOutput, error) { + req, out := c.ListHapgsRequest(input) + err := req.Send() + return out, err +} + +const opListHsms = "ListHsms" + +// ListHsmsRequest generates a "aws/request.Request" representing the +// client's request for the ListHsms operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListHsms method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListHsmsRequest method. +// req, resp := client.ListHsmsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudHSM) ListHsmsRequest(input *ListHsmsInput) (req *request.Request, output *ListHsmsOutput) { + op := &request.Operation{ + Name: opListHsms, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListHsmsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListHsmsOutput{} + req.Data = output + return +} + +// Retrieves the identifiers of all of the HSMs provisioned for the current +// customer. +// +// This operation supports pagination with the use of the NextToken member. +// If more results are available, the NextToken member of the response contains +// a token that you pass in the next call to ListHsms to retrieve the next set +// of items. +func (c *CloudHSM) ListHsms(input *ListHsmsInput) (*ListHsmsOutput, error) { + req, out := c.ListHsmsRequest(input) + err := req.Send() + return out, err +} + +const opListLunaClients = "ListLunaClients" + +// ListLunaClientsRequest generates a "aws/request.Request" representing the +// client's request for the ListLunaClients operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListLunaClients method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListLunaClientsRequest method. +// req, resp := client.ListLunaClientsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudHSM) ListLunaClientsRequest(input *ListLunaClientsInput) (req *request.Request, output *ListLunaClientsOutput) { + op := &request.Operation{ + Name: opListLunaClients, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListLunaClientsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListLunaClientsOutput{} + req.Data = output + return +} + +// Lists all of the clients. +// +// This operation supports pagination with the use of the NextToken member. +// If more results are available, the NextToken member of the response contains +// a token that you pass in the next call to ListLunaClients to retrieve the +// next set of items. +func (c *CloudHSM) ListLunaClients(input *ListLunaClientsInput) (*ListLunaClientsOutput, error) { + req, out := c.ListLunaClientsRequest(input) + err := req.Send() + return out, err +} + +const opListTagsForResource = "ListTagsForResource" + +// ListTagsForResourceRequest generates a "aws/request.Request" representing the +// client's request for the ListTagsForResource operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListTagsForResource method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListTagsForResourceRequest method. +// req, resp := client.ListTagsForResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudHSM) ListTagsForResourceRequest(input *ListTagsForResourceInput) (req *request.Request, output *ListTagsForResourceOutput) { + op := &request.Operation{ + Name: opListTagsForResource, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListTagsForResourceInput{} + } + + req = c.newRequest(op, input, output) + output = &ListTagsForResourceOutput{} + req.Data = output + return +} + +// Returns a list of all tags for the specified AWS CloudHSM resource. +func (c *CloudHSM) ListTagsForResource(input *ListTagsForResourceInput) (*ListTagsForResourceOutput, error) { + req, out := c.ListTagsForResourceRequest(input) + err := req.Send() + return out, err +} + +const opModifyHapg = "ModifyHapg" + +// ModifyHapgRequest generates a "aws/request.Request" representing the +// client's request for the ModifyHapg operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ModifyHapg method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ModifyHapgRequest method. +// req, resp := client.ModifyHapgRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudHSM) ModifyHapgRequest(input *ModifyHapgInput) (req *request.Request, output *ModifyHapgOutput) { + op := &request.Operation{ + Name: opModifyHapg, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyHapgInput{} + } + + req = c.newRequest(op, input, output) + output = &ModifyHapgOutput{} + req.Data = output + return +} + +// Modifies an existing high-availability partition group. +func (c *CloudHSM) ModifyHapg(input *ModifyHapgInput) (*ModifyHapgOutput, error) { + req, out := c.ModifyHapgRequest(input) + err := req.Send() + return out, err +} + +const opModifyHsm = "ModifyHsm" + +// ModifyHsmRequest generates a "aws/request.Request" representing the +// client's request for the ModifyHsm operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ModifyHsm method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ModifyHsmRequest method. +// req, resp := client.ModifyHsmRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudHSM) ModifyHsmRequest(input *ModifyHsmInput) (req *request.Request, output *ModifyHsmOutput) { + op := &request.Operation{ + Name: opModifyHsm, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyHsmInput{} + } + + req = c.newRequest(op, input, output) + output = &ModifyHsmOutput{} + req.Data = output + return +} + +// Modifies an HSM. +// +// This operation can result in the HSM being offline for up to 15 minutes +// while the AWS CloudHSM service is reconfigured. If you are modifying a production +// HSM, you should ensure that your AWS CloudHSM service is configured for high +// availability, and consider executing this operation during a maintenance +// window. +func (c *CloudHSM) ModifyHsm(input *ModifyHsmInput) (*ModifyHsmOutput, error) { + req, out := c.ModifyHsmRequest(input) + err := req.Send() + return out, err +} + +const opModifyLunaClient = "ModifyLunaClient" + +// ModifyLunaClientRequest generates a "aws/request.Request" representing the +// client's request for the ModifyLunaClient operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ModifyLunaClient method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ModifyLunaClientRequest method. +// req, resp := client.ModifyLunaClientRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudHSM) ModifyLunaClientRequest(input *ModifyLunaClientInput) (req *request.Request, output *ModifyLunaClientOutput) { + op := &request.Operation{ + Name: opModifyLunaClient, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyLunaClientInput{} + } + + req = c.newRequest(op, input, output) + output = &ModifyLunaClientOutput{} + req.Data = output + return +} + +// Modifies the certificate used by the client. +// +// This action can potentially start a workflow to install the new certificate +// on the client's HSMs. +func (c *CloudHSM) ModifyLunaClient(input *ModifyLunaClientInput) (*ModifyLunaClientOutput, error) { + req, out := c.ModifyLunaClientRequest(input) + err := req.Send() + return out, err +} + +const opRemoveTagsFromResource = "RemoveTagsFromResource" + +// RemoveTagsFromResourceRequest generates a "aws/request.Request" representing the +// client's request for the RemoveTagsFromResource operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the RemoveTagsFromResource method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the RemoveTagsFromResourceRequest method. +// req, resp := client.RemoveTagsFromResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudHSM) RemoveTagsFromResourceRequest(input *RemoveTagsFromResourceInput) (req *request.Request, output *RemoveTagsFromResourceOutput) { + op := &request.Operation{ + Name: opRemoveTagsFromResource, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RemoveTagsFromResourceInput{} + } + + req = c.newRequest(op, input, output) + output = &RemoveTagsFromResourceOutput{} + req.Data = output + return +} + +// Removes one or more tags from the specified AWS CloudHSM resource. +// +// To remove a tag, specify only the tag key to remove (not the value). To +// overwrite the value for an existing tag, use AddTagsToResource. +func (c *CloudHSM) RemoveTagsFromResource(input *RemoveTagsFromResourceInput) (*RemoveTagsFromResourceOutput, error) { + req, out := c.RemoveTagsFromResourceRequest(input) + err := req.Send() + return out, err +} + +type AddTagsToResourceInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the AWS CloudHSM resource to tag. + ResourceArn *string `type:"string" required:"true"` + + // One or more tags. + TagList []*Tag `type:"list" required:"true"` +} + +// String returns the string representation +func (s AddTagsToResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddTagsToResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AddTagsToResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AddTagsToResourceInput"} + if s.ResourceArn == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceArn")) + } + if s.TagList == nil { + invalidParams.Add(request.NewErrParamRequired("TagList")) + } + if s.TagList != nil { + for i, v := range s.TagList { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "TagList", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type AddTagsToResourceOutput struct { + _ struct{} `type:"structure"` + + // The status of the operation. + Status *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s AddTagsToResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddTagsToResourceOutput) GoString() string { + return s.String() +} + +// Contains the inputs for the CreateHapgRequest action. +type CreateHapgInput struct { + _ struct{} `type:"structure"` + + // The label of the new high-availability partition group. + Label *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateHapgInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateHapgInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateHapgInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateHapgInput"} + if s.Label == nil { + invalidParams.Add(request.NewErrParamRequired("Label")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the output of the CreateHAPartitionGroup action. +type CreateHapgOutput struct { + _ struct{} `type:"structure"` + + // The ARN of the high-availability partition group. + HapgArn *string `type:"string"` +} + +// String returns the string representation +func (s CreateHapgOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateHapgOutput) GoString() string { + return s.String() +} + +// Contains the inputs for the CreateHsm operation. +type CreateHsmInput struct { + _ struct{} `locationName:"CreateHsmRequest" type:"structure"` + + // A user-defined token to ensure idempotence. Subsequent calls to this operation + // with the same token will be ignored. + ClientToken *string `locationName:"ClientToken" type:"string"` + + // The IP address to assign to the HSM's ENI. + // + // If an IP address is not specified, an IP address will be randomly chosen + // from the CIDR range of the subnet. + EniIp *string `locationName:"EniIp" type:"string"` + + // The external ID from IamRoleArn, if present. + ExternalId *string `locationName:"ExternalId" type:"string"` + + // The ARN of an IAM role to enable the AWS CloudHSM service to allocate an + // ENI on your behalf. + IamRoleArn *string `locationName:"IamRoleArn" type:"string" required:"true"` + + // The SSH public key to install on the HSM. + SshKey *string `locationName:"SshKey" type:"string" required:"true"` + + // The identifier of the subnet in your VPC in which to place the HSM. + SubnetId *string `locationName:"SubnetId" type:"string" required:"true"` + + // Specifies the type of subscription for the HSM. + // + // PRODUCTION - The HSM is being used in a production environment. TRIAL - + // The HSM is being used in a product trial. + SubscriptionType *string `locationName:"SubscriptionType" type:"string" required:"true" enum:"SubscriptionType"` + + // The IP address for the syslog monitoring server. The AWS CloudHSM service + // only supports one syslog monitoring server. + SyslogIp *string `locationName:"SyslogIp" type:"string"` +} + +// String returns the string representation +func (s CreateHsmInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateHsmInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateHsmInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateHsmInput"} + if s.IamRoleArn == nil { + invalidParams.Add(request.NewErrParamRequired("IamRoleArn")) + } + if s.SshKey == nil { + invalidParams.Add(request.NewErrParamRequired("SshKey")) + } + if s.SubnetId == nil { + invalidParams.Add(request.NewErrParamRequired("SubnetId")) + } + if s.SubscriptionType == nil { + invalidParams.Add(request.NewErrParamRequired("SubscriptionType")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the output of the CreateHsm operation. +type CreateHsmOutput struct { + _ struct{} `type:"structure"` + + // The ARN of the HSM. + HsmArn *string `type:"string"` +} + +// String returns the string representation +func (s CreateHsmOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateHsmOutput) GoString() string { + return s.String() +} + +// Contains the inputs for the CreateLunaClient action. +type CreateLunaClientInput struct { + _ struct{} `type:"structure"` + + // The contents of a Base64-Encoded X.509 v3 certificate to be installed on + // the HSMs used by this client. + Certificate *string `min:"600" type:"string" required:"true"` + + // The label for the client. + Label *string `type:"string"` +} + +// String returns the string representation +func (s CreateLunaClientInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateLunaClientInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateLunaClientInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateLunaClientInput"} + if s.Certificate == nil { + invalidParams.Add(request.NewErrParamRequired("Certificate")) + } + if s.Certificate != nil && len(*s.Certificate) < 600 { + invalidParams.Add(request.NewErrParamMinLen("Certificate", 600)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the output of the CreateLunaClient action. +type CreateLunaClientOutput struct { + _ struct{} `type:"structure"` + + // The ARN of the client. + ClientArn *string `type:"string"` +} + +// String returns the string representation +func (s CreateLunaClientOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateLunaClientOutput) GoString() string { + return s.String() +} + +// Contains the inputs for the DeleteHapg action. +type DeleteHapgInput struct { + _ struct{} `type:"structure"` + + // The ARN of the high-availability partition group to delete. + HapgArn *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteHapgInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteHapgInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteHapgInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteHapgInput"} + if s.HapgArn == nil { + invalidParams.Add(request.NewErrParamRequired("HapgArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the output of the DeleteHapg action. +type DeleteHapgOutput struct { + _ struct{} `type:"structure"` + + // The status of the action. + Status *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteHapgOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteHapgOutput) GoString() string { + return s.String() +} + +// Contains the inputs for the DeleteHsm operation. +type DeleteHsmInput struct { + _ struct{} `locationName:"DeleteHsmRequest" type:"structure"` + + // The ARN of the HSM to delete. + HsmArn *string `locationName:"HsmArn" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteHsmInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteHsmInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteHsmInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteHsmInput"} + if s.HsmArn == nil { + invalidParams.Add(request.NewErrParamRequired("HsmArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the output of the DeleteHsm operation. +type DeleteHsmOutput struct { + _ struct{} `type:"structure"` + + // The status of the operation. + Status *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteHsmOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteHsmOutput) GoString() string { + return s.String() +} + +type DeleteLunaClientInput struct { + _ struct{} `type:"structure"` + + // The ARN of the client to delete. + ClientArn *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteLunaClientInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteLunaClientInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteLunaClientInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteLunaClientInput"} + if s.ClientArn == nil { + invalidParams.Add(request.NewErrParamRequired("ClientArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteLunaClientOutput struct { + _ struct{} `type:"structure"` + + // The status of the action. + Status *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteLunaClientOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteLunaClientOutput) GoString() string { + return s.String() +} + +// Contains the inputs for the DescribeHapg action. +type DescribeHapgInput struct { + _ struct{} `type:"structure"` + + // The ARN of the high-availability partition group to describe. + HapgArn *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeHapgInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeHapgInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeHapgInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeHapgInput"} + if s.HapgArn == nil { + invalidParams.Add(request.NewErrParamRequired("HapgArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the output of the DescribeHapg action. +type DescribeHapgOutput struct { + _ struct{} `type:"structure"` + + // The ARN of the high-availability partition group. + HapgArn *string `type:"string"` + + // The serial number of the high-availability partition group. + HapgSerial *string `type:"string"` + + // Contains a list of ARNs that identify the HSMs. + HsmsLastActionFailed []*string `type:"list"` + + // Contains a list of ARNs that identify the HSMs. + HsmsPendingDeletion []*string `type:"list"` + + // Contains a list of ARNs that identify the HSMs. + HsmsPendingRegistration []*string `type:"list"` + + // The label for the high-availability partition group. + Label *string `type:"string"` + + // The date and time the high-availability partition group was last modified. + LastModifiedTimestamp *string `type:"string"` + + // The list of partition serial numbers that belong to the high-availability + // partition group. + PartitionSerialList []*string `type:"list"` + + // The state of the high-availability partition group. + State *string `type:"string" enum:"CloudHsmObjectState"` +} + +// String returns the string representation +func (s DescribeHapgOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeHapgOutput) GoString() string { + return s.String() +} + +// Contains the inputs for the DescribeHsm operation. +type DescribeHsmInput struct { + _ struct{} `type:"structure"` + + // The ARN of the HSM. Either the HsmArn or the SerialNumber parameter must + // be specified. + HsmArn *string `type:"string"` + + // The serial number of the HSM. Either the HsmArn or the HsmSerialNumber parameter + // must be specified. + HsmSerialNumber *string `type:"string"` +} + +// String returns the string representation +func (s DescribeHsmInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeHsmInput) GoString() string { + return s.String() +} + +// Contains the output of the DescribeHsm operation. +type DescribeHsmOutput struct { + _ struct{} `type:"structure"` + + // The Availability Zone that the HSM is in. + AvailabilityZone *string `type:"string"` + + // The identifier of the elastic network interface (ENI) attached to the HSM. + EniId *string `type:"string"` + + // The IP address assigned to the HSM's ENI. + EniIp *string `type:"string"` + + // The ARN of the HSM. + HsmArn *string `type:"string"` + + // The HSM model type. + HsmType *string `type:"string"` + + // The ARN of the IAM role assigned to the HSM. + IamRoleArn *string `type:"string"` + + // The list of partitions on the HSM. + Partitions []*string `type:"list"` + + // The serial number of the HSM. + SerialNumber *string `type:"string"` + + // The date and time that the server certificate was last updated. + ServerCertLastUpdated *string `type:"string"` + + // The URI of the certificate server. + ServerCertUri *string `type:"string"` + + // The HSM software version. + SoftwareVersion *string `type:"string"` + + // The date and time that the SSH key was last updated. + SshKeyLastUpdated *string `type:"string"` + + // The public SSH key. + SshPublicKey *string `type:"string"` + + // The status of the HSM. + Status *string `type:"string" enum:"HsmStatus"` + + // Contains additional information about the status of the HSM. + StatusDetails *string `type:"string"` + + // The identifier of the subnet that the HSM is in. + SubnetId *string `type:"string"` + + // The subscription end date. + SubscriptionEndDate *string `type:"string"` + + // The subscription start date. + SubscriptionStartDate *string `type:"string"` + + // Specifies the type of subscription for the HSM. + // + // PRODUCTION - The HSM is being used in a production environment. TRIAL - + // The HSM is being used in a product trial. + SubscriptionType *string `type:"string" enum:"SubscriptionType"` + + // The name of the HSM vendor. + VendorName *string `type:"string"` + + // The identifier of the VPC that the HSM is in. + VpcId *string `type:"string"` +} + +// String returns the string representation +func (s DescribeHsmOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeHsmOutput) GoString() string { + return s.String() +} + +type DescribeLunaClientInput struct { + _ struct{} `type:"structure"` + + // The certificate fingerprint. + CertificateFingerprint *string `type:"string"` + + // The ARN of the client. + ClientArn *string `type:"string"` +} + +// String returns the string representation +func (s DescribeLunaClientInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeLunaClientInput) GoString() string { + return s.String() +} + +type DescribeLunaClientOutput struct { + _ struct{} `type:"structure"` + + // The certificate installed on the HSMs used by this client. + Certificate *string `min:"600" type:"string"` + + // The certificate fingerprint. + CertificateFingerprint *string `type:"string"` + + // The ARN of the client. + ClientArn *string `type:"string"` + + // The label of the client. + Label *string `type:"string"` + + // The date and time the client was last modified. + LastModifiedTimestamp *string `type:"string"` +} + +// String returns the string representation +func (s DescribeLunaClientOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeLunaClientOutput) GoString() string { + return s.String() +} + +type GetConfigInput struct { + _ struct{} `type:"structure"` + + // The ARN of the client. + ClientArn *string `type:"string" required:"true"` + + // The client version. + ClientVersion *string `type:"string" required:"true" enum:"ClientVersion"` + + // A list of ARNs that identify the high-availability partition groups that + // are associated with the client. + HapgList []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s GetConfigInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetConfigInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetConfigInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetConfigInput"} + if s.ClientArn == nil { + invalidParams.Add(request.NewErrParamRequired("ClientArn")) + } + if s.ClientVersion == nil { + invalidParams.Add(request.NewErrParamRequired("ClientVersion")) + } + if s.HapgList == nil { + invalidParams.Add(request.NewErrParamRequired("HapgList")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type GetConfigOutput struct { + _ struct{} `type:"structure"` + + // The certificate file containing the server.pem files of the HSMs. + ConfigCred *string `type:"string"` + + // The chrystoki.conf configuration file. + ConfigFile *string `type:"string"` + + // The type of credentials. + ConfigType *string `type:"string"` +} + +// String returns the string representation +func (s GetConfigOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetConfigOutput) GoString() string { + return s.String() +} + +// Contains the inputs for the ListAvailableZones action. +type ListAvailableZonesInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ListAvailableZonesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListAvailableZonesInput) GoString() string { + return s.String() +} + +type ListAvailableZonesOutput struct { + _ struct{} `type:"structure"` + + // The list of Availability Zones that have available AWS CloudHSM capacity. + AZList []*string `type:"list"` +} + +// String returns the string representation +func (s ListAvailableZonesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListAvailableZonesOutput) GoString() string { + return s.String() +} + +type ListHapgsInput struct { + _ struct{} `type:"structure"` + + // The NextToken value from a previous call to ListHapgs. Pass null if this + // is the first call. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s ListHapgsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListHapgsInput) GoString() string { + return s.String() +} + +type ListHapgsOutput struct { + _ struct{} `type:"structure"` + + // The list of high-availability partition groups. + HapgList []*string `type:"list" required:"true"` + + // If not null, more results are available. Pass this value to ListHapgs to + // retrieve the next set of items. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s ListHapgsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListHapgsOutput) GoString() string { + return s.String() +} + +type ListHsmsInput struct { + _ struct{} `type:"structure"` + + // The NextToken value from a previous call to ListHsms. Pass null if this is + // the first call. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s ListHsmsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListHsmsInput) GoString() string { + return s.String() +} + +// Contains the output of the ListHsms operation. +type ListHsmsOutput struct { + _ struct{} `type:"structure"` + + // The list of ARNs that identify the HSMs. + HsmList []*string `type:"list"` + + // If not null, more results are available. Pass this value to ListHsms to retrieve + // the next set of items. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s ListHsmsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListHsmsOutput) GoString() string { + return s.String() +} + +type ListLunaClientsInput struct { + _ struct{} `type:"structure"` + + // The NextToken value from a previous call to ListLunaClients. Pass null if + // this is the first call. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s ListLunaClientsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListLunaClientsInput) GoString() string { + return s.String() +} + +type ListLunaClientsOutput struct { + _ struct{} `type:"structure"` + + // The list of clients. + ClientList []*string `type:"list" required:"true"` + + // If not null, more results are available. Pass this to ListLunaClients to + // retrieve the next set of items. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s ListLunaClientsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListLunaClientsOutput) GoString() string { + return s.String() +} + +type ListTagsForResourceInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the AWS CloudHSM resource. + ResourceArn *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ListTagsForResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTagsForResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListTagsForResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListTagsForResourceInput"} + if s.ResourceArn == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type ListTagsForResourceOutput struct { + _ struct{} `type:"structure"` + + // One or more tags. + TagList []*Tag `type:"list" required:"true"` +} + +// String returns the string representation +func (s ListTagsForResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTagsForResourceOutput) GoString() string { + return s.String() +} + +type ModifyHapgInput struct { + _ struct{} `type:"structure"` + + // The ARN of the high-availability partition group to modify. + HapgArn *string `type:"string" required:"true"` + + // The new label for the high-availability partition group. + Label *string `type:"string"` + + // The list of partition serial numbers to make members of the high-availability + // partition group. + PartitionSerialList []*string `type:"list"` +} + +// String returns the string representation +func (s ModifyHapgInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyHapgInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ModifyHapgInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ModifyHapgInput"} + if s.HapgArn == nil { + invalidParams.Add(request.NewErrParamRequired("HapgArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type ModifyHapgOutput struct { + _ struct{} `type:"structure"` + + // The ARN of the high-availability partition group. + HapgArn *string `type:"string"` +} + +// String returns the string representation +func (s ModifyHapgOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyHapgOutput) GoString() string { + return s.String() +} + +// Contains the inputs for the ModifyHsm operation. +type ModifyHsmInput struct { + _ struct{} `locationName:"ModifyHsmRequest" type:"structure"` + + // The new IP address for the elastic network interface (ENI) attached to the + // HSM. + // + // If the HSM is moved to a different subnet, and an IP address is not specified, + // an IP address will be randomly chosen from the CIDR range of the new subnet. + EniIp *string `locationName:"EniIp" type:"string"` + + // The new external ID. + ExternalId *string `locationName:"ExternalId" type:"string"` + + // The ARN of the HSM to modify. + HsmArn *string `locationName:"HsmArn" type:"string" required:"true"` + + // The new IAM role ARN. + IamRoleArn *string `locationName:"IamRoleArn" type:"string"` + + // The new identifier of the subnet that the HSM is in. The new subnet must + // be in the same Availability Zone as the current subnet. + SubnetId *string `locationName:"SubnetId" type:"string"` + + // The new IP address for the syslog monitoring server. The AWS CloudHSM service + // only supports one syslog monitoring server. + SyslogIp *string `locationName:"SyslogIp" type:"string"` +} + +// String returns the string representation +func (s ModifyHsmInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyHsmInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ModifyHsmInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ModifyHsmInput"} + if s.HsmArn == nil { + invalidParams.Add(request.NewErrParamRequired("HsmArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the output of the ModifyHsm operation. +type ModifyHsmOutput struct { + _ struct{} `type:"structure"` + + // The ARN of the HSM. + HsmArn *string `type:"string"` +} + +// String returns the string representation +func (s ModifyHsmOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyHsmOutput) GoString() string { + return s.String() +} + +type ModifyLunaClientInput struct { + _ struct{} `type:"structure"` + + // The new certificate for the client. + Certificate *string `min:"600" type:"string" required:"true"` + + // The ARN of the client. + ClientArn *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ModifyLunaClientInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyLunaClientInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ModifyLunaClientInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ModifyLunaClientInput"} + if s.Certificate == nil { + invalidParams.Add(request.NewErrParamRequired("Certificate")) + } + if s.Certificate != nil && len(*s.Certificate) < 600 { + invalidParams.Add(request.NewErrParamMinLen("Certificate", 600)) + } + if s.ClientArn == nil { + invalidParams.Add(request.NewErrParamRequired("ClientArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type ModifyLunaClientOutput struct { + _ struct{} `type:"structure"` + + // The ARN of the client. + ClientArn *string `type:"string"` +} + +// String returns the string representation +func (s ModifyLunaClientOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyLunaClientOutput) GoString() string { + return s.String() +} + +type RemoveTagsFromResourceInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the AWS CloudHSM resource. + ResourceArn *string `type:"string" required:"true"` + + // The tag key or keys to remove. + // + // Specify only the tag key to remove (not the value). To overwrite the value + // for an existing tag, use AddTagsToResource. + TagKeyList []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s RemoveTagsFromResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RemoveTagsFromResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RemoveTagsFromResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RemoveTagsFromResourceInput"} + if s.ResourceArn == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceArn")) + } + if s.TagKeyList == nil { + invalidParams.Add(request.NewErrParamRequired("TagKeyList")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type RemoveTagsFromResourceOutput struct { + _ struct{} `type:"structure"` + + // The status of the operation. + Status *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s RemoveTagsFromResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RemoveTagsFromResourceOutput) GoString() string { + return s.String() +} + +// A key-value pair that identifies or specifies metadata about an AWS CloudHSM +// resource. +type Tag struct { + _ struct{} `type:"structure"` + + // The key of the tag. + Key *string `min:"1" type:"string" required:"true"` + + // The value of the tag. + Value *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s Tag) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Tag) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Tag) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Tag"} + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.Value == nil { + invalidParams.Add(request.NewErrParamRequired("Value")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +const ( + // @enum ClientVersion + ClientVersion51 = "5.1" + // @enum ClientVersion + ClientVersion53 = "5.3" +) + +const ( + // @enum CloudHsmObjectState + CloudHsmObjectStateReady = "READY" + // @enum CloudHsmObjectState + CloudHsmObjectStateUpdating = "UPDATING" + // @enum CloudHsmObjectState + CloudHsmObjectStateDegraded = "DEGRADED" +) + +const ( + // @enum HsmStatus + HsmStatusPending = "PENDING" + // @enum HsmStatus + HsmStatusRunning = "RUNNING" + // @enum HsmStatus + HsmStatusUpdating = "UPDATING" + // @enum HsmStatus + HsmStatusSuspended = "SUSPENDED" + // @enum HsmStatus + HsmStatusTerminating = "TERMINATING" + // @enum HsmStatus + HsmStatusTerminated = "TERMINATED" + // @enum HsmStatus + HsmStatusDegraded = "DEGRADED" +) + +// Specifies the type of subscription for the HSM. +// +// PRODUCTION - The HSM is being used in a production environment. TRIAL - +// The HSM is being used in a product trial. +const ( + // @enum SubscriptionType + SubscriptionTypeProduction = "PRODUCTION" +) diff --git a/vendor/github.com/aws/aws-sdk-go/service/cloudhsm/cloudhsmiface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/cloudhsm/cloudhsmiface/interface.go new file mode 100644 index 000000000..c7155e543 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/cloudhsm/cloudhsmiface/interface.go @@ -0,0 +1,94 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package cloudhsmiface provides an interface for the Amazon CloudHSM. +package cloudhsmiface + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/cloudhsm" +) + +// CloudHSMAPI is the interface type for cloudhsm.CloudHSM. +type CloudHSMAPI interface { + AddTagsToResourceRequest(*cloudhsm.AddTagsToResourceInput) (*request.Request, *cloudhsm.AddTagsToResourceOutput) + + AddTagsToResource(*cloudhsm.AddTagsToResourceInput) (*cloudhsm.AddTagsToResourceOutput, error) + + CreateHapgRequest(*cloudhsm.CreateHapgInput) (*request.Request, *cloudhsm.CreateHapgOutput) + + CreateHapg(*cloudhsm.CreateHapgInput) (*cloudhsm.CreateHapgOutput, error) + + CreateHsmRequest(*cloudhsm.CreateHsmInput) (*request.Request, *cloudhsm.CreateHsmOutput) + + CreateHsm(*cloudhsm.CreateHsmInput) (*cloudhsm.CreateHsmOutput, error) + + CreateLunaClientRequest(*cloudhsm.CreateLunaClientInput) (*request.Request, *cloudhsm.CreateLunaClientOutput) + + CreateLunaClient(*cloudhsm.CreateLunaClientInput) (*cloudhsm.CreateLunaClientOutput, error) + + DeleteHapgRequest(*cloudhsm.DeleteHapgInput) (*request.Request, *cloudhsm.DeleteHapgOutput) + + DeleteHapg(*cloudhsm.DeleteHapgInput) (*cloudhsm.DeleteHapgOutput, error) + + DeleteHsmRequest(*cloudhsm.DeleteHsmInput) (*request.Request, *cloudhsm.DeleteHsmOutput) + + DeleteHsm(*cloudhsm.DeleteHsmInput) (*cloudhsm.DeleteHsmOutput, error) + + DeleteLunaClientRequest(*cloudhsm.DeleteLunaClientInput) (*request.Request, *cloudhsm.DeleteLunaClientOutput) + + DeleteLunaClient(*cloudhsm.DeleteLunaClientInput) (*cloudhsm.DeleteLunaClientOutput, error) + + DescribeHapgRequest(*cloudhsm.DescribeHapgInput) (*request.Request, *cloudhsm.DescribeHapgOutput) + + DescribeHapg(*cloudhsm.DescribeHapgInput) (*cloudhsm.DescribeHapgOutput, error) + + DescribeHsmRequest(*cloudhsm.DescribeHsmInput) (*request.Request, *cloudhsm.DescribeHsmOutput) + + DescribeHsm(*cloudhsm.DescribeHsmInput) (*cloudhsm.DescribeHsmOutput, error) + + DescribeLunaClientRequest(*cloudhsm.DescribeLunaClientInput) (*request.Request, *cloudhsm.DescribeLunaClientOutput) + + DescribeLunaClient(*cloudhsm.DescribeLunaClientInput) (*cloudhsm.DescribeLunaClientOutput, error) + + GetConfigRequest(*cloudhsm.GetConfigInput) (*request.Request, *cloudhsm.GetConfigOutput) + + GetConfig(*cloudhsm.GetConfigInput) (*cloudhsm.GetConfigOutput, error) + + ListAvailableZonesRequest(*cloudhsm.ListAvailableZonesInput) (*request.Request, *cloudhsm.ListAvailableZonesOutput) + + ListAvailableZones(*cloudhsm.ListAvailableZonesInput) (*cloudhsm.ListAvailableZonesOutput, error) + + ListHapgsRequest(*cloudhsm.ListHapgsInput) (*request.Request, *cloudhsm.ListHapgsOutput) + + ListHapgs(*cloudhsm.ListHapgsInput) (*cloudhsm.ListHapgsOutput, error) + + ListHsmsRequest(*cloudhsm.ListHsmsInput) (*request.Request, *cloudhsm.ListHsmsOutput) + + ListHsms(*cloudhsm.ListHsmsInput) (*cloudhsm.ListHsmsOutput, error) + + ListLunaClientsRequest(*cloudhsm.ListLunaClientsInput) (*request.Request, *cloudhsm.ListLunaClientsOutput) + + ListLunaClients(*cloudhsm.ListLunaClientsInput) (*cloudhsm.ListLunaClientsOutput, error) + + ListTagsForResourceRequest(*cloudhsm.ListTagsForResourceInput) (*request.Request, *cloudhsm.ListTagsForResourceOutput) + + ListTagsForResource(*cloudhsm.ListTagsForResourceInput) (*cloudhsm.ListTagsForResourceOutput, error) + + ModifyHapgRequest(*cloudhsm.ModifyHapgInput) (*request.Request, *cloudhsm.ModifyHapgOutput) + + ModifyHapg(*cloudhsm.ModifyHapgInput) (*cloudhsm.ModifyHapgOutput, error) + + ModifyHsmRequest(*cloudhsm.ModifyHsmInput) (*request.Request, *cloudhsm.ModifyHsmOutput) + + ModifyHsm(*cloudhsm.ModifyHsmInput) (*cloudhsm.ModifyHsmOutput, error) + + ModifyLunaClientRequest(*cloudhsm.ModifyLunaClientInput) (*request.Request, *cloudhsm.ModifyLunaClientOutput) + + ModifyLunaClient(*cloudhsm.ModifyLunaClientInput) (*cloudhsm.ModifyLunaClientOutput, error) + + RemoveTagsFromResourceRequest(*cloudhsm.RemoveTagsFromResourceInput) (*request.Request, *cloudhsm.RemoveTagsFromResourceOutput) + + RemoveTagsFromResource(*cloudhsm.RemoveTagsFromResourceInput) (*cloudhsm.RemoveTagsFromResourceOutput, error) +} + +var _ CloudHSMAPI = (*cloudhsm.CloudHSM)(nil) diff --git a/vendor/github.com/aws/aws-sdk-go/service/cloudhsm/examples_test.go b/vendor/github.com/aws/aws-sdk-go/service/cloudhsm/examples_test.go new file mode 100644 index 000000000..186378923 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/cloudhsm/examples_test.go @@ -0,0 +1,431 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package cloudhsm_test + +import ( + "bytes" + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/cloudhsm" +) + +var _ time.Duration +var _ bytes.Buffer + +func ExampleCloudHSM_AddTagsToResource() { + svc := cloudhsm.New(session.New()) + + params := &cloudhsm.AddTagsToResourceInput{ + ResourceArn: aws.String("String"), // Required + TagList: []*cloudhsm.Tag{ // Required + { // Required + Key: aws.String("TagKey"), // Required + Value: aws.String("TagValue"), // Required + }, + // More values... + }, + } + resp, err := svc.AddTagsToResource(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudHSM_CreateHapg() { + svc := cloudhsm.New(session.New()) + + params := &cloudhsm.CreateHapgInput{ + Label: aws.String("Label"), // Required + } + resp, err := svc.CreateHapg(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudHSM_CreateHsm() { + svc := cloudhsm.New(session.New()) + + params := &cloudhsm.CreateHsmInput{ + IamRoleArn: aws.String("IamRoleArn"), // Required + SshKey: aws.String("SshKey"), // Required + SubnetId: aws.String("SubnetId"), // Required + SubscriptionType: aws.String("SubscriptionType"), // Required + ClientToken: aws.String("ClientToken"), + EniIp: aws.String("IpAddress"), + ExternalId: aws.String("ExternalId"), + SyslogIp: aws.String("IpAddress"), + } + resp, err := svc.CreateHsm(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudHSM_CreateLunaClient() { + svc := cloudhsm.New(session.New()) + + params := &cloudhsm.CreateLunaClientInput{ + Certificate: aws.String("Certificate"), // Required + Label: aws.String("ClientLabel"), + } + resp, err := svc.CreateLunaClient(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudHSM_DeleteHapg() { + svc := cloudhsm.New(session.New()) + + params := &cloudhsm.DeleteHapgInput{ + HapgArn: aws.String("HapgArn"), // Required + } + resp, err := svc.DeleteHapg(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudHSM_DeleteHsm() { + svc := cloudhsm.New(session.New()) + + params := &cloudhsm.DeleteHsmInput{ + HsmArn: aws.String("HsmArn"), // Required + } + resp, err := svc.DeleteHsm(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudHSM_DeleteLunaClient() { + svc := cloudhsm.New(session.New()) + + params := &cloudhsm.DeleteLunaClientInput{ + ClientArn: aws.String("ClientArn"), // Required + } + resp, err := svc.DeleteLunaClient(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudHSM_DescribeHapg() { + svc := cloudhsm.New(session.New()) + + params := &cloudhsm.DescribeHapgInput{ + HapgArn: aws.String("HapgArn"), // Required + } + resp, err := svc.DescribeHapg(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudHSM_DescribeHsm() { + svc := cloudhsm.New(session.New()) + + params := &cloudhsm.DescribeHsmInput{ + HsmArn: aws.String("HsmArn"), + HsmSerialNumber: aws.String("HsmSerialNumber"), + } + resp, err := svc.DescribeHsm(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudHSM_DescribeLunaClient() { + svc := cloudhsm.New(session.New()) + + params := &cloudhsm.DescribeLunaClientInput{ + CertificateFingerprint: aws.String("CertificateFingerprint"), + ClientArn: aws.String("ClientArn"), + } + resp, err := svc.DescribeLunaClient(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudHSM_GetConfig() { + svc := cloudhsm.New(session.New()) + + params := &cloudhsm.GetConfigInput{ + ClientArn: aws.String("ClientArn"), // Required + ClientVersion: aws.String("ClientVersion"), // Required + HapgList: []*string{ // Required + aws.String("HapgArn"), // Required + // More values... + }, + } + resp, err := svc.GetConfig(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudHSM_ListAvailableZones() { + svc := cloudhsm.New(session.New()) + + var params *cloudhsm.ListAvailableZonesInput + resp, err := svc.ListAvailableZones(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudHSM_ListHapgs() { + svc := cloudhsm.New(session.New()) + + params := &cloudhsm.ListHapgsInput{ + NextToken: aws.String("PaginationToken"), + } + resp, err := svc.ListHapgs(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudHSM_ListHsms() { + svc := cloudhsm.New(session.New()) + + params := &cloudhsm.ListHsmsInput{ + NextToken: aws.String("PaginationToken"), + } + resp, err := svc.ListHsms(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudHSM_ListLunaClients() { + svc := cloudhsm.New(session.New()) + + params := &cloudhsm.ListLunaClientsInput{ + NextToken: aws.String("PaginationToken"), + } + resp, err := svc.ListLunaClients(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudHSM_ListTagsForResource() { + svc := cloudhsm.New(session.New()) + + params := &cloudhsm.ListTagsForResourceInput{ + ResourceArn: aws.String("String"), // Required + } + resp, err := svc.ListTagsForResource(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudHSM_ModifyHapg() { + svc := cloudhsm.New(session.New()) + + params := &cloudhsm.ModifyHapgInput{ + HapgArn: aws.String("HapgArn"), // Required + Label: aws.String("Label"), + PartitionSerialList: []*string{ + aws.String("PartitionSerial"), // Required + // More values... + }, + } + resp, err := svc.ModifyHapg(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudHSM_ModifyHsm() { + svc := cloudhsm.New(session.New()) + + params := &cloudhsm.ModifyHsmInput{ + HsmArn: aws.String("HsmArn"), // Required + EniIp: aws.String("IpAddress"), + ExternalId: aws.String("ExternalId"), + IamRoleArn: aws.String("IamRoleArn"), + SubnetId: aws.String("SubnetId"), + SyslogIp: aws.String("IpAddress"), + } + resp, err := svc.ModifyHsm(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudHSM_ModifyLunaClient() { + svc := cloudhsm.New(session.New()) + + params := &cloudhsm.ModifyLunaClientInput{ + Certificate: aws.String("Certificate"), // Required + ClientArn: aws.String("ClientArn"), // Required + } + resp, err := svc.ModifyLunaClient(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudHSM_RemoveTagsFromResource() { + svc := cloudhsm.New(session.New()) + + params := &cloudhsm.RemoveTagsFromResourceInput{ + ResourceArn: aws.String("String"), // Required + TagKeyList: []*string{ // Required + aws.String("TagKey"), // Required + // More values... + }, + } + resp, err := svc.RemoveTagsFromResource(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/cloudhsm/service.go b/vendor/github.com/aws/aws-sdk-go/service/cloudhsm/service.go new file mode 100644 index 000000000..4b6267158 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/cloudhsm/service.go @@ -0,0 +1,87 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package cloudhsm + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" +) + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type CloudHSM struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "cloudhsm" + +// New creates a new instance of the CloudHSM client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a CloudHSM client from just a session. +// svc := cloudhsm.New(mySession) +// +// // Create a CloudHSM client with additional configuration +// svc := cloudhsm.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *CloudHSM { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *CloudHSM { + svc := &CloudHSM{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-05-30", + JSONVersion: "1.1", + TargetPrefix: "CloudHsmFrontendService", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(jsonrpc.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a CloudHSM operation and runs any +// custom request initialization. +func (c *CloudHSM) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/cloudsearch/api.go b/vendor/github.com/aws/aws-sdk-go/service/cloudsearch/api.go new file mode 100644 index 000000000..c90cbe507 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/cloudsearch/api.go @@ -0,0 +1,4103 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package cloudsearch provides a client for Amazon CloudSearch. +package cloudsearch + +import ( + "time" + + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" +) + +const opBuildSuggesters = "BuildSuggesters" + +// BuildSuggestersRequest generates a "aws/request.Request" representing the +// client's request for the BuildSuggesters operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the BuildSuggesters method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the BuildSuggestersRequest method. +// req, resp := client.BuildSuggestersRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudSearch) BuildSuggestersRequest(input *BuildSuggestersInput) (req *request.Request, output *BuildSuggestersOutput) { + op := &request.Operation{ + Name: opBuildSuggesters, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &BuildSuggestersInput{} + } + + req = c.newRequest(op, input, output) + output = &BuildSuggestersOutput{} + req.Data = output + return +} + +// Indexes the search suggestions. For more information, see Configuring Suggesters +// (http://docs.aws.amazon.com/cloudsearch/latest/developerguide/getting-suggestions.html#configuring-suggesters) +// in the Amazon CloudSearch Developer Guide. +func (c *CloudSearch) BuildSuggesters(input *BuildSuggestersInput) (*BuildSuggestersOutput, error) { + req, out := c.BuildSuggestersRequest(input) + err := req.Send() + return out, err +} + +const opCreateDomain = "CreateDomain" + +// CreateDomainRequest generates a "aws/request.Request" representing the +// client's request for the CreateDomain operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateDomain method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateDomainRequest method. +// req, resp := client.CreateDomainRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudSearch) CreateDomainRequest(input *CreateDomainInput) (req *request.Request, output *CreateDomainOutput) { + op := &request.Operation{ + Name: opCreateDomain, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateDomainInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateDomainOutput{} + req.Data = output + return +} + +// Creates a new search domain. For more information, see Creating a Search +// Domain (http://docs.aws.amazon.com/cloudsearch/latest/developerguide/creating-domains.html" +// target="_blank) in the Amazon CloudSearch Developer Guide. +func (c *CloudSearch) CreateDomain(input *CreateDomainInput) (*CreateDomainOutput, error) { + req, out := c.CreateDomainRequest(input) + err := req.Send() + return out, err +} + +const opDefineAnalysisScheme = "DefineAnalysisScheme" + +// DefineAnalysisSchemeRequest generates a "aws/request.Request" representing the +// client's request for the DefineAnalysisScheme operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DefineAnalysisScheme method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DefineAnalysisSchemeRequest method. +// req, resp := client.DefineAnalysisSchemeRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudSearch) DefineAnalysisSchemeRequest(input *DefineAnalysisSchemeInput) (req *request.Request, output *DefineAnalysisSchemeOutput) { + op := &request.Operation{ + Name: opDefineAnalysisScheme, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DefineAnalysisSchemeInput{} + } + + req = c.newRequest(op, input, output) + output = &DefineAnalysisSchemeOutput{} + req.Data = output + return +} + +// Configures an analysis scheme that can be applied to a text or text-array +// field to define language-specific text processing options. For more information, +// see Configuring Analysis Schemes (http://docs.aws.amazon.com/cloudsearch/latest/developerguide/configuring-analysis-schemes.html" +// target="_blank) in the Amazon CloudSearch Developer Guide. +func (c *CloudSearch) DefineAnalysisScheme(input *DefineAnalysisSchemeInput) (*DefineAnalysisSchemeOutput, error) { + req, out := c.DefineAnalysisSchemeRequest(input) + err := req.Send() + return out, err +} + +const opDefineExpression = "DefineExpression" + +// DefineExpressionRequest generates a "aws/request.Request" representing the +// client's request for the DefineExpression operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DefineExpression method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DefineExpressionRequest method. +// req, resp := client.DefineExpressionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudSearch) DefineExpressionRequest(input *DefineExpressionInput) (req *request.Request, output *DefineExpressionOutput) { + op := &request.Operation{ + Name: opDefineExpression, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DefineExpressionInput{} + } + + req = c.newRequest(op, input, output) + output = &DefineExpressionOutput{} + req.Data = output + return +} + +// Configures an Expression for the search domain. Used to create new expressions +// and modify existing ones. If the expression exists, the new configuration +// replaces the old one. For more information, see Configuring Expressions (http://docs.aws.amazon.com/cloudsearch/latest/developerguide/configuring-expressions.html" +// target="_blank) in the Amazon CloudSearch Developer Guide. +func (c *CloudSearch) DefineExpression(input *DefineExpressionInput) (*DefineExpressionOutput, error) { + req, out := c.DefineExpressionRequest(input) + err := req.Send() + return out, err +} + +const opDefineIndexField = "DefineIndexField" + +// DefineIndexFieldRequest generates a "aws/request.Request" representing the +// client's request for the DefineIndexField operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DefineIndexField method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DefineIndexFieldRequest method. +// req, resp := client.DefineIndexFieldRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudSearch) DefineIndexFieldRequest(input *DefineIndexFieldInput) (req *request.Request, output *DefineIndexFieldOutput) { + op := &request.Operation{ + Name: opDefineIndexField, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DefineIndexFieldInput{} + } + + req = c.newRequest(op, input, output) + output = &DefineIndexFieldOutput{} + req.Data = output + return +} + +// Configures an IndexField for the search domain. Used to create new fields +// and modify existing ones. You must specify the name of the domain you are +// configuring and an index field configuration. The index field configuration +// specifies a unique name, the index field type, and the options you want to +// configure for the field. The options you can specify depend on the IndexFieldType. +// If the field exists, the new configuration replaces the old one. For more +// information, see Configuring Index Fields (http://docs.aws.amazon.com/cloudsearch/latest/developerguide/configuring-index-fields.html" +// target="_blank) in the Amazon CloudSearch Developer Guide. +func (c *CloudSearch) DefineIndexField(input *DefineIndexFieldInput) (*DefineIndexFieldOutput, error) { + req, out := c.DefineIndexFieldRequest(input) + err := req.Send() + return out, err +} + +const opDefineSuggester = "DefineSuggester" + +// DefineSuggesterRequest generates a "aws/request.Request" representing the +// client's request for the DefineSuggester operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DefineSuggester method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DefineSuggesterRequest method. +// req, resp := client.DefineSuggesterRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudSearch) DefineSuggesterRequest(input *DefineSuggesterInput) (req *request.Request, output *DefineSuggesterOutput) { + op := &request.Operation{ + Name: opDefineSuggester, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DefineSuggesterInput{} + } + + req = c.newRequest(op, input, output) + output = &DefineSuggesterOutput{} + req.Data = output + return +} + +// Configures a suggester for a domain. A suggester enables you to display possible +// matches before users finish typing their queries. When you configure a suggester, +// you must specify the name of the text field you want to search for possible +// matches and a unique name for the suggester. For more information, see Getting +// Search Suggestions (http://docs.aws.amazon.com/cloudsearch/latest/developerguide/getting-suggestions.html" +// target="_blank) in the Amazon CloudSearch Developer Guide. +func (c *CloudSearch) DefineSuggester(input *DefineSuggesterInput) (*DefineSuggesterOutput, error) { + req, out := c.DefineSuggesterRequest(input) + err := req.Send() + return out, err +} + +const opDeleteAnalysisScheme = "DeleteAnalysisScheme" + +// DeleteAnalysisSchemeRequest generates a "aws/request.Request" representing the +// client's request for the DeleteAnalysisScheme operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteAnalysisScheme method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteAnalysisSchemeRequest method. +// req, resp := client.DeleteAnalysisSchemeRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudSearch) DeleteAnalysisSchemeRequest(input *DeleteAnalysisSchemeInput) (req *request.Request, output *DeleteAnalysisSchemeOutput) { + op := &request.Operation{ + Name: opDeleteAnalysisScheme, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteAnalysisSchemeInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteAnalysisSchemeOutput{} + req.Data = output + return +} + +// Deletes an analysis scheme. For more information, see Configuring Analysis +// Schemes (http://docs.aws.amazon.com/cloudsearch/latest/developerguide/configuring-analysis-schemes.html" +// target="_blank) in the Amazon CloudSearch Developer Guide. +func (c *CloudSearch) DeleteAnalysisScheme(input *DeleteAnalysisSchemeInput) (*DeleteAnalysisSchemeOutput, error) { + req, out := c.DeleteAnalysisSchemeRequest(input) + err := req.Send() + return out, err +} + +const opDeleteDomain = "DeleteDomain" + +// DeleteDomainRequest generates a "aws/request.Request" representing the +// client's request for the DeleteDomain operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteDomain method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteDomainRequest method. +// req, resp := client.DeleteDomainRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudSearch) DeleteDomainRequest(input *DeleteDomainInput) (req *request.Request, output *DeleteDomainOutput) { + op := &request.Operation{ + Name: opDeleteDomain, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteDomainInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteDomainOutput{} + req.Data = output + return +} + +// Permanently deletes a search domain and all of its data. Once a domain has +// been deleted, it cannot be recovered. For more information, see Deleting +// a Search Domain (http://docs.aws.amazon.com/cloudsearch/latest/developerguide/deleting-domains.html" +// target="_blank) in the Amazon CloudSearch Developer Guide. +func (c *CloudSearch) DeleteDomain(input *DeleteDomainInput) (*DeleteDomainOutput, error) { + req, out := c.DeleteDomainRequest(input) + err := req.Send() + return out, err +} + +const opDeleteExpression = "DeleteExpression" + +// DeleteExpressionRequest generates a "aws/request.Request" representing the +// client's request for the DeleteExpression operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteExpression method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteExpressionRequest method. +// req, resp := client.DeleteExpressionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudSearch) DeleteExpressionRequest(input *DeleteExpressionInput) (req *request.Request, output *DeleteExpressionOutput) { + op := &request.Operation{ + Name: opDeleteExpression, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteExpressionInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteExpressionOutput{} + req.Data = output + return +} + +// Removes an Expression from the search domain. For more information, see Configuring +// Expressions (http://docs.aws.amazon.com/cloudsearch/latest/developerguide/configuring-expressions.html" +// target="_blank) in the Amazon CloudSearch Developer Guide. +func (c *CloudSearch) DeleteExpression(input *DeleteExpressionInput) (*DeleteExpressionOutput, error) { + req, out := c.DeleteExpressionRequest(input) + err := req.Send() + return out, err +} + +const opDeleteIndexField = "DeleteIndexField" + +// DeleteIndexFieldRequest generates a "aws/request.Request" representing the +// client's request for the DeleteIndexField operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteIndexField method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteIndexFieldRequest method. +// req, resp := client.DeleteIndexFieldRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudSearch) DeleteIndexFieldRequest(input *DeleteIndexFieldInput) (req *request.Request, output *DeleteIndexFieldOutput) { + op := &request.Operation{ + Name: opDeleteIndexField, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteIndexFieldInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteIndexFieldOutput{} + req.Data = output + return +} + +// Removes an IndexField from the search domain. For more information, see Configuring +// Index Fields (http://docs.aws.amazon.com/cloudsearch/latest/developerguide/configuring-index-fields.html" +// target="_blank) in the Amazon CloudSearch Developer Guide. +func (c *CloudSearch) DeleteIndexField(input *DeleteIndexFieldInput) (*DeleteIndexFieldOutput, error) { + req, out := c.DeleteIndexFieldRequest(input) + err := req.Send() + return out, err +} + +const opDeleteSuggester = "DeleteSuggester" + +// DeleteSuggesterRequest generates a "aws/request.Request" representing the +// client's request for the DeleteSuggester operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteSuggester method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteSuggesterRequest method. +// req, resp := client.DeleteSuggesterRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudSearch) DeleteSuggesterRequest(input *DeleteSuggesterInput) (req *request.Request, output *DeleteSuggesterOutput) { + op := &request.Operation{ + Name: opDeleteSuggester, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteSuggesterInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteSuggesterOutput{} + req.Data = output + return +} + +// Deletes a suggester. For more information, see Getting Search Suggestions +// (http://docs.aws.amazon.com/cloudsearch/latest/developerguide/getting-suggestions.html" +// target="_blank) in the Amazon CloudSearch Developer Guide. +func (c *CloudSearch) DeleteSuggester(input *DeleteSuggesterInput) (*DeleteSuggesterOutput, error) { + req, out := c.DeleteSuggesterRequest(input) + err := req.Send() + return out, err +} + +const opDescribeAnalysisSchemes = "DescribeAnalysisSchemes" + +// DescribeAnalysisSchemesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeAnalysisSchemes operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeAnalysisSchemes method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeAnalysisSchemesRequest method. +// req, resp := client.DescribeAnalysisSchemesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudSearch) DescribeAnalysisSchemesRequest(input *DescribeAnalysisSchemesInput) (req *request.Request, output *DescribeAnalysisSchemesOutput) { + op := &request.Operation{ + Name: opDescribeAnalysisSchemes, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeAnalysisSchemesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeAnalysisSchemesOutput{} + req.Data = output + return +} + +// Gets the analysis schemes configured for a domain. An analysis scheme defines +// language-specific text processing options for a text field. Can be limited +// to specific analysis schemes by name. By default, shows all analysis schemes +// and includes any pending changes to the configuration. Set the Deployed option +// to true to show the active configuration and exclude pending changes. For +// more information, see Configuring Analysis Schemes (http://docs.aws.amazon.com/cloudsearch/latest/developerguide/configuring-analysis-schemes.html" +// target="_blank) in the Amazon CloudSearch Developer Guide. +func (c *CloudSearch) DescribeAnalysisSchemes(input *DescribeAnalysisSchemesInput) (*DescribeAnalysisSchemesOutput, error) { + req, out := c.DescribeAnalysisSchemesRequest(input) + err := req.Send() + return out, err +} + +const opDescribeAvailabilityOptions = "DescribeAvailabilityOptions" + +// DescribeAvailabilityOptionsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeAvailabilityOptions operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeAvailabilityOptions method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeAvailabilityOptionsRequest method. +// req, resp := client.DescribeAvailabilityOptionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudSearch) DescribeAvailabilityOptionsRequest(input *DescribeAvailabilityOptionsInput) (req *request.Request, output *DescribeAvailabilityOptionsOutput) { + op := &request.Operation{ + Name: opDescribeAvailabilityOptions, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeAvailabilityOptionsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeAvailabilityOptionsOutput{} + req.Data = output + return +} + +// Gets the availability options configured for a domain. By default, shows +// the configuration with any pending changes. Set the Deployed option to true +// to show the active configuration and exclude pending changes. For more information, +// see Configuring Availability Options (http://docs.aws.amazon.com/cloudsearch/latest/developerguide/configuring-availability-options.html" +// target="_blank) in the Amazon CloudSearch Developer Guide. +func (c *CloudSearch) DescribeAvailabilityOptions(input *DescribeAvailabilityOptionsInput) (*DescribeAvailabilityOptionsOutput, error) { + req, out := c.DescribeAvailabilityOptionsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeDomains = "DescribeDomains" + +// DescribeDomainsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeDomains operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeDomains method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeDomainsRequest method. +// req, resp := client.DescribeDomainsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudSearch) DescribeDomainsRequest(input *DescribeDomainsInput) (req *request.Request, output *DescribeDomainsOutput) { + op := &request.Operation{ + Name: opDescribeDomains, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeDomainsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeDomainsOutput{} + req.Data = output + return +} + +// Gets information about the search domains owned by this account. Can be limited +// to specific domains. Shows all domains by default. To get the number of searchable +// documents in a domain, use the console or submit a matchall request to your +// domain's search endpoint: q=matchall&q.parser=structured&size=0. +// For more information, see Getting Information about a Search Domain (http://docs.aws.amazon.com/cloudsearch/latest/developerguide/getting-domain-info.html" +// target="_blank) in the Amazon CloudSearch Developer Guide. +func (c *CloudSearch) DescribeDomains(input *DescribeDomainsInput) (*DescribeDomainsOutput, error) { + req, out := c.DescribeDomainsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeExpressions = "DescribeExpressions" + +// DescribeExpressionsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeExpressions operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeExpressions method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeExpressionsRequest method. +// req, resp := client.DescribeExpressionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudSearch) DescribeExpressionsRequest(input *DescribeExpressionsInput) (req *request.Request, output *DescribeExpressionsOutput) { + op := &request.Operation{ + Name: opDescribeExpressions, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeExpressionsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeExpressionsOutput{} + req.Data = output + return +} + +// Gets the expressions configured for the search domain. Can be limited to +// specific expressions by name. By default, shows all expressions and includes +// any pending changes to the configuration. Set the Deployed option to true +// to show the active configuration and exclude pending changes. For more information, +// see Configuring Expressions (http://docs.aws.amazon.com/cloudsearch/latest/developerguide/configuring-expressions.html" +// target="_blank) in the Amazon CloudSearch Developer Guide. +func (c *CloudSearch) DescribeExpressions(input *DescribeExpressionsInput) (*DescribeExpressionsOutput, error) { + req, out := c.DescribeExpressionsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeIndexFields = "DescribeIndexFields" + +// DescribeIndexFieldsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeIndexFields operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeIndexFields method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeIndexFieldsRequest method. +// req, resp := client.DescribeIndexFieldsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudSearch) DescribeIndexFieldsRequest(input *DescribeIndexFieldsInput) (req *request.Request, output *DescribeIndexFieldsOutput) { + op := &request.Operation{ + Name: opDescribeIndexFields, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeIndexFieldsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeIndexFieldsOutput{} + req.Data = output + return +} + +// Gets information about the index fields configured for the search domain. +// Can be limited to specific fields by name. By default, shows all fields and +// includes any pending changes to the configuration. Set the Deployed option +// to true to show the active configuration and exclude pending changes. For +// more information, see Getting Domain Information (http://docs.aws.amazon.com/cloudsearch/latest/developerguide/getting-domain-info.html" +// target="_blank) in the Amazon CloudSearch Developer Guide. +func (c *CloudSearch) DescribeIndexFields(input *DescribeIndexFieldsInput) (*DescribeIndexFieldsOutput, error) { + req, out := c.DescribeIndexFieldsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeScalingParameters = "DescribeScalingParameters" + +// DescribeScalingParametersRequest generates a "aws/request.Request" representing the +// client's request for the DescribeScalingParameters operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeScalingParameters method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeScalingParametersRequest method. +// req, resp := client.DescribeScalingParametersRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudSearch) DescribeScalingParametersRequest(input *DescribeScalingParametersInput) (req *request.Request, output *DescribeScalingParametersOutput) { + op := &request.Operation{ + Name: opDescribeScalingParameters, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeScalingParametersInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeScalingParametersOutput{} + req.Data = output + return +} + +// Gets the scaling parameters configured for a domain. A domain's scaling parameters +// specify the desired search instance type and replication count. For more +// information, see Configuring Scaling Options (http://docs.aws.amazon.com/cloudsearch/latest/developerguide/configuring-scaling-options.html" +// target="_blank) in the Amazon CloudSearch Developer Guide. +func (c *CloudSearch) DescribeScalingParameters(input *DescribeScalingParametersInput) (*DescribeScalingParametersOutput, error) { + req, out := c.DescribeScalingParametersRequest(input) + err := req.Send() + return out, err +} + +const opDescribeServiceAccessPolicies = "DescribeServiceAccessPolicies" + +// DescribeServiceAccessPoliciesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeServiceAccessPolicies operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeServiceAccessPolicies method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeServiceAccessPoliciesRequest method. +// req, resp := client.DescribeServiceAccessPoliciesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudSearch) DescribeServiceAccessPoliciesRequest(input *DescribeServiceAccessPoliciesInput) (req *request.Request, output *DescribeServiceAccessPoliciesOutput) { + op := &request.Operation{ + Name: opDescribeServiceAccessPolicies, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeServiceAccessPoliciesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeServiceAccessPoliciesOutput{} + req.Data = output + return +} + +// Gets information about the access policies that control access to the domain's +// document and search endpoints. By default, shows the configuration with any +// pending changes. Set the Deployed option to true to show the active configuration +// and exclude pending changes. For more information, see Configuring Access +// for a Search Domain (http://docs.aws.amazon.com/cloudsearch/latest/developerguide/configuring-access.html" +// target="_blank) in the Amazon CloudSearch Developer Guide. +func (c *CloudSearch) DescribeServiceAccessPolicies(input *DescribeServiceAccessPoliciesInput) (*DescribeServiceAccessPoliciesOutput, error) { + req, out := c.DescribeServiceAccessPoliciesRequest(input) + err := req.Send() + return out, err +} + +const opDescribeSuggesters = "DescribeSuggesters" + +// DescribeSuggestersRequest generates a "aws/request.Request" representing the +// client's request for the DescribeSuggesters operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeSuggesters method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeSuggestersRequest method. +// req, resp := client.DescribeSuggestersRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudSearch) DescribeSuggestersRequest(input *DescribeSuggestersInput) (req *request.Request, output *DescribeSuggestersOutput) { + op := &request.Operation{ + Name: opDescribeSuggesters, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeSuggestersInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeSuggestersOutput{} + req.Data = output + return +} + +// Gets the suggesters configured for a domain. A suggester enables you to display +// possible matches before users finish typing their queries. Can be limited +// to specific suggesters by name. By default, shows all suggesters and includes +// any pending changes to the configuration. Set the Deployed option to true +// to show the active configuration and exclude pending changes. For more information, +// see Getting Search Suggestions (http://docs.aws.amazon.com/cloudsearch/latest/developerguide/getting-suggestions.html" +// target="_blank) in the Amazon CloudSearch Developer Guide. +func (c *CloudSearch) DescribeSuggesters(input *DescribeSuggestersInput) (*DescribeSuggestersOutput, error) { + req, out := c.DescribeSuggestersRequest(input) + err := req.Send() + return out, err +} + +const opIndexDocuments = "IndexDocuments" + +// IndexDocumentsRequest generates a "aws/request.Request" representing the +// client's request for the IndexDocuments operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the IndexDocuments method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the IndexDocumentsRequest method. +// req, resp := client.IndexDocumentsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudSearch) IndexDocumentsRequest(input *IndexDocumentsInput) (req *request.Request, output *IndexDocumentsOutput) { + op := &request.Operation{ + Name: opIndexDocuments, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &IndexDocumentsInput{} + } + + req = c.newRequest(op, input, output) + output = &IndexDocumentsOutput{} + req.Data = output + return +} + +// Tells the search domain to start indexing its documents using the latest +// indexing options. This operation must be invoked to activate options whose +// OptionStatus is RequiresIndexDocuments. +func (c *CloudSearch) IndexDocuments(input *IndexDocumentsInput) (*IndexDocumentsOutput, error) { + req, out := c.IndexDocumentsRequest(input) + err := req.Send() + return out, err +} + +const opListDomainNames = "ListDomainNames" + +// ListDomainNamesRequest generates a "aws/request.Request" representing the +// client's request for the ListDomainNames operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListDomainNames method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListDomainNamesRequest method. +// req, resp := client.ListDomainNamesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudSearch) ListDomainNamesRequest(input *ListDomainNamesInput) (req *request.Request, output *ListDomainNamesOutput) { + op := &request.Operation{ + Name: opListDomainNames, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListDomainNamesInput{} + } + + req = c.newRequest(op, input, output) + output = &ListDomainNamesOutput{} + req.Data = output + return +} + +// Lists all search domains owned by an account. +func (c *CloudSearch) ListDomainNames(input *ListDomainNamesInput) (*ListDomainNamesOutput, error) { + req, out := c.ListDomainNamesRequest(input) + err := req.Send() + return out, err +} + +const opUpdateAvailabilityOptions = "UpdateAvailabilityOptions" + +// UpdateAvailabilityOptionsRequest generates a "aws/request.Request" representing the +// client's request for the UpdateAvailabilityOptions operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateAvailabilityOptions method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateAvailabilityOptionsRequest method. +// req, resp := client.UpdateAvailabilityOptionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudSearch) UpdateAvailabilityOptionsRequest(input *UpdateAvailabilityOptionsInput) (req *request.Request, output *UpdateAvailabilityOptionsOutput) { + op := &request.Operation{ + Name: opUpdateAvailabilityOptions, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateAvailabilityOptionsInput{} + } + + req = c.newRequest(op, input, output) + output = &UpdateAvailabilityOptionsOutput{} + req.Data = output + return +} + +// Configures the availability options for a domain. Enabling the Multi-AZ option +// expands an Amazon CloudSearch domain to an additional Availability Zone in +// the same Region to increase fault tolerance in the event of a service disruption. +// Changes to the Multi-AZ option can take about half an hour to become active. +// For more information, see Configuring Availability Options (http://docs.aws.amazon.com/cloudsearch/latest/developerguide/configuring-availability-options.html" +// target="_blank) in the Amazon CloudSearch Developer Guide. +func (c *CloudSearch) UpdateAvailabilityOptions(input *UpdateAvailabilityOptionsInput) (*UpdateAvailabilityOptionsOutput, error) { + req, out := c.UpdateAvailabilityOptionsRequest(input) + err := req.Send() + return out, err +} + +const opUpdateScalingParameters = "UpdateScalingParameters" + +// UpdateScalingParametersRequest generates a "aws/request.Request" representing the +// client's request for the UpdateScalingParameters operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateScalingParameters method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateScalingParametersRequest method. +// req, resp := client.UpdateScalingParametersRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudSearch) UpdateScalingParametersRequest(input *UpdateScalingParametersInput) (req *request.Request, output *UpdateScalingParametersOutput) { + op := &request.Operation{ + Name: opUpdateScalingParameters, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateScalingParametersInput{} + } + + req = c.newRequest(op, input, output) + output = &UpdateScalingParametersOutput{} + req.Data = output + return +} + +// Configures scaling parameters for a domain. A domain's scaling parameters +// specify the desired search instance type and replication count. Amazon CloudSearch +// will still automatically scale your domain based on the volume of data and +// traffic, but not below the desired instance type and replication count. If +// the Multi-AZ option is enabled, these values control the resources used per +// Availability Zone. For more information, see Configuring Scaling Options +// (http://docs.aws.amazon.com/cloudsearch/latest/developerguide/configuring-scaling-options.html" +// target="_blank) in the Amazon CloudSearch Developer Guide. +func (c *CloudSearch) UpdateScalingParameters(input *UpdateScalingParametersInput) (*UpdateScalingParametersOutput, error) { + req, out := c.UpdateScalingParametersRequest(input) + err := req.Send() + return out, err +} + +const opUpdateServiceAccessPolicies = "UpdateServiceAccessPolicies" + +// UpdateServiceAccessPoliciesRequest generates a "aws/request.Request" representing the +// client's request for the UpdateServiceAccessPolicies operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateServiceAccessPolicies method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateServiceAccessPoliciesRequest method. +// req, resp := client.UpdateServiceAccessPoliciesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudSearch) UpdateServiceAccessPoliciesRequest(input *UpdateServiceAccessPoliciesInput) (req *request.Request, output *UpdateServiceAccessPoliciesOutput) { + op := &request.Operation{ + Name: opUpdateServiceAccessPolicies, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateServiceAccessPoliciesInput{} + } + + req = c.newRequest(op, input, output) + output = &UpdateServiceAccessPoliciesOutput{} + req.Data = output + return +} + +// Configures the access rules that control access to the domain's document +// and search endpoints. For more information, see Configuring Access for an +// Amazon CloudSearch Domain (http://docs.aws.amazon.com/cloudsearch/latest/developerguide/configuring-access.html" +// target="_blank). +func (c *CloudSearch) UpdateServiceAccessPolicies(input *UpdateServiceAccessPoliciesInput) (*UpdateServiceAccessPoliciesOutput, error) { + req, out := c.UpdateServiceAccessPoliciesRequest(input) + err := req.Send() + return out, err +} + +// The configured access rules for the domain's document and search endpoints, +// and the current status of those rules. +type AccessPoliciesStatus struct { + _ struct{} `type:"structure"` + + // Access rules for a domain's document or search service endpoints. For more + // information, see Configuring Access for a Search Domain (http://docs.aws.amazon.com/cloudsearch/latest/developerguide/configuring-access.html" + // target="_blank) in the Amazon CloudSearch Developer Guide. The maximum size + // of a policy document is 100 KB. + Options *string `type:"string" required:"true"` + + // The status of domain configuration option. + Status *OptionStatus `type:"structure" required:"true"` +} + +// String returns the string representation +func (s AccessPoliciesStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AccessPoliciesStatus) GoString() string { + return s.String() +} + +// Synonyms, stopwords, and stemming options for an analysis scheme. Includes +// tokenization dictionary for Japanese. +type AnalysisOptions struct { + _ struct{} `type:"structure"` + + // The level of algorithmic stemming to perform: none, minimal, light, or full. + // The available levels vary depending on the language. For more information, + // see Language Specific Text Processing Settings (http://docs.aws.amazon.com/cloudsearch/latest/developerguide/text-processing.html#text-processing-settings" + // target="_blank) in the Amazon CloudSearch Developer Guide + AlgorithmicStemming *string `type:"string" enum:"AlgorithmicStemming"` + + // A JSON array that contains a collection of terms, tokens, readings and part + // of speech for Japanese Tokenizaiton. The Japanese tokenization dictionary + // enables you to override the default tokenization for selected terms. This + // is only valid for Japanese language fields. + JapaneseTokenizationDictionary *string `type:"string"` + + // A JSON object that contains a collection of string:value pairs that each + // map a term to its stem. For example, {"term1": "stem1", "term2": "stem2", + // "term3": "stem3"}. The stemming dictionary is applied in addition to any + // algorithmic stemming. This enables you to override the results of the algorithmic + // stemming to correct specific cases of overstemming or understemming. The + // maximum size of a stemming dictionary is 500 KB. + StemmingDictionary *string `type:"string"` + + // A JSON array of terms to ignore during indexing and searching. For example, + // ["a", "an", "the", "of"]. The stopwords dictionary must explicitly list each + // word you want to ignore. Wildcards and regular expressions are not supported. + Stopwords *string `type:"string"` + + // A JSON object that defines synonym groups and aliases. A synonym group is + // an array of arrays, where each sub-array is a group of terms where each term + // in the group is considered a synonym of every other term in the group. The + // aliases value is an object that contains a collection of string:value pairs + // where the string specifies a term and the array of values specifies each + // of the aliases for that term. An alias is considered a synonym of the specified + // term, but the term is not considered a synonym of the alias. For more information + // about specifying synonyms, see Synonyms (http://docs.aws.amazon.com/cloudsearch/latest/developerguide/configuring-analysis-schemes.html#synonyms) + // in the Amazon CloudSearch Developer Guide. + Synonyms *string `type:"string"` +} + +// String returns the string representation +func (s AnalysisOptions) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AnalysisOptions) GoString() string { + return s.String() +} + +// Configuration information for an analysis scheme. Each analysis scheme has +// a unique name and specifies the language of the text to be processed. The +// following options can be configured for an analysis scheme: Synonyms, Stopwords, +// StemmingDictionary, JapaneseTokenizationDictionary and AlgorithmicStemming. +type AnalysisScheme struct { + _ struct{} `type:"structure"` + + // Synonyms, stopwords, and stemming options for an analysis scheme. Includes + // tokenization dictionary for Japanese. + AnalysisOptions *AnalysisOptions `type:"structure"` + + // An IETF RFC 4646 (http://tools.ietf.org/html/rfc4646" target="_blank) language + // code or mul for multiple languages. + AnalysisSchemeLanguage *string `type:"string" required:"true" enum:"AnalysisSchemeLanguage"` + + // Names must begin with a letter and can contain the following characters: + // a-z (lowercase), 0-9, and _ (underscore). + AnalysisSchemeName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s AnalysisScheme) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AnalysisScheme) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AnalysisScheme) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AnalysisScheme"} + if s.AnalysisSchemeLanguage == nil { + invalidParams.Add(request.NewErrParamRequired("AnalysisSchemeLanguage")) + } + if s.AnalysisSchemeName == nil { + invalidParams.Add(request.NewErrParamRequired("AnalysisSchemeName")) + } + if s.AnalysisSchemeName != nil && len(*s.AnalysisSchemeName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AnalysisSchemeName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The status and configuration of an AnalysisScheme. +type AnalysisSchemeStatus struct { + _ struct{} `type:"structure"` + + // Configuration information for an analysis scheme. Each analysis scheme has + // a unique name and specifies the language of the text to be processed. The + // following options can be configured for an analysis scheme: Synonyms, Stopwords, + // StemmingDictionary, JapaneseTokenizationDictionary and AlgorithmicStemming. + Options *AnalysisScheme `type:"structure" required:"true"` + + // The status of domain configuration option. + Status *OptionStatus `type:"structure" required:"true"` +} + +// String returns the string representation +func (s AnalysisSchemeStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AnalysisSchemeStatus) GoString() string { + return s.String() +} + +// The status and configuration of the domain's availability options. +type AvailabilityOptionsStatus struct { + _ struct{} `type:"structure"` + + // The availability options configured for the domain. + Options *bool `type:"boolean" required:"true"` + + // The status of domain configuration option. + Status *OptionStatus `type:"structure" required:"true"` +} + +// String returns the string representation +func (s AvailabilityOptionsStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AvailabilityOptionsStatus) GoString() string { + return s.String() +} + +// Container for the parameters to the BuildSuggester operation. Specifies the +// name of the domain you want to update. +type BuildSuggestersInput struct { + _ struct{} `type:"structure"` + + // A string that represents the name of a domain. Domain names are unique across + // the domains owned by an account within an AWS region. Domain names start + // with a letter or number and can contain the following characters: a-z (lowercase), + // 0-9, and - (hyphen). + DomainName *string `min:"3" type:"string" required:"true"` +} + +// String returns the string representation +func (s BuildSuggestersInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BuildSuggestersInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *BuildSuggestersInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "BuildSuggestersInput"} + if s.DomainName == nil { + invalidParams.Add(request.NewErrParamRequired("DomainName")) + } + if s.DomainName != nil && len(*s.DomainName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("DomainName", 3)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The result of a BuildSuggester request. Contains a list of the fields used +// for suggestions. +type BuildSuggestersOutput struct { + _ struct{} `type:"structure"` + + // A list of field names. + FieldNames []*string `type:"list"` +} + +// String returns the string representation +func (s BuildSuggestersOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BuildSuggestersOutput) GoString() string { + return s.String() +} + +// Container for the parameters to the CreateDomain operation. Specifies a name +// for the new search domain. +type CreateDomainInput struct { + _ struct{} `type:"structure"` + + // A name for the domain you are creating. Allowed characters are a-z (lower-case + // letters), 0-9, and hyphen (-). Domain names must start with a letter or number + // and be at least 3 and no more than 28 characters long. + DomainName *string `min:"3" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateDomainInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDomainInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateDomainInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateDomainInput"} + if s.DomainName == nil { + invalidParams.Add(request.NewErrParamRequired("DomainName")) + } + if s.DomainName != nil && len(*s.DomainName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("DomainName", 3)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The result of a CreateDomainRequest. Contains the status of a newly created +// domain. +type CreateDomainOutput struct { + _ struct{} `type:"structure"` + + // The current status of the search domain. + DomainStatus *DomainStatus `type:"structure"` +} + +// String returns the string representation +func (s CreateDomainOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDomainOutput) GoString() string { + return s.String() +} + +// Options for a field that contains an array of dates. Present if IndexFieldType +// specifies the field is of type date-array. All options are enabled by default. +type DateArrayOptions struct { + _ struct{} `type:"structure"` + + // A value to use for the field if the field isn't specified for a document. + DefaultValue *string `type:"string"` + + // Whether facet information can be returned for the field. + FacetEnabled *bool `type:"boolean"` + + // Whether the contents of the field can be returned in the search results. + ReturnEnabled *bool `type:"boolean"` + + // Whether the contents of the field are searchable. + SearchEnabled *bool `type:"boolean"` + + // A list of source fields to map to the field. + SourceFields *string `type:"string"` +} + +// String returns the string representation +func (s DateArrayOptions) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DateArrayOptions) GoString() string { + return s.String() +} + +// Options for a date field. Dates and times are specified in UTC (Coordinated +// Universal Time) according to IETF RFC3339: yyyy-mm-ddT00:00:00Z. Present +// if IndexFieldType specifies the field is of type date. All options are enabled +// by default. +type DateOptions struct { + _ struct{} `type:"structure"` + + // A value to use for the field if the field isn't specified for a document. + DefaultValue *string `type:"string"` + + // Whether facet information can be returned for the field. + FacetEnabled *bool `type:"boolean"` + + // Whether the contents of the field can be returned in the search results. + ReturnEnabled *bool `type:"boolean"` + + // Whether the contents of the field are searchable. + SearchEnabled *bool `type:"boolean"` + + // Whether the field can be used to sort the search results. + SortEnabled *bool `type:"boolean"` + + // A string that represents the name of an index field. CloudSearch supports + // regular index fields as well as dynamic fields. A dynamic field's name defines + // a pattern that begins or ends with a wildcard. Any document fields that don't + // map to a regular index field but do match a dynamic field's pattern are configured + // with the dynamic field's indexing options. + // + // Regular field names begin with a letter and can contain the following characters: + // a-z (lowercase), 0-9, and _ (underscore). Dynamic field names must begin + // or end with a wildcard (*). The wildcard can also be the only character in + // a dynamic field name. Multiple wildcards, and wildcards embedded within a + // string are not supported. + // + // The name score is reserved and cannot be used as a field name. To reference + // a document's ID, you can use the name _id. + SourceField *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s DateOptions) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DateOptions) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DateOptions) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DateOptions"} + if s.SourceField != nil && len(*s.SourceField) < 1 { + invalidParams.Add(request.NewErrParamMinLen("SourceField", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Container for the parameters to the DefineAnalysisScheme operation. Specifies +// the name of the domain you want to update and the analysis scheme configuration. +type DefineAnalysisSchemeInput struct { + _ struct{} `type:"structure"` + + // Configuration information for an analysis scheme. Each analysis scheme has + // a unique name and specifies the language of the text to be processed. The + // following options can be configured for an analysis scheme: Synonyms, Stopwords, + // StemmingDictionary, JapaneseTokenizationDictionary and AlgorithmicStemming. + AnalysisScheme *AnalysisScheme `type:"structure" required:"true"` + + // A string that represents the name of a domain. Domain names are unique across + // the domains owned by an account within an AWS region. Domain names start + // with a letter or number and can contain the following characters: a-z (lowercase), + // 0-9, and - (hyphen). + DomainName *string `min:"3" type:"string" required:"true"` +} + +// String returns the string representation +func (s DefineAnalysisSchemeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DefineAnalysisSchemeInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DefineAnalysisSchemeInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DefineAnalysisSchemeInput"} + if s.AnalysisScheme == nil { + invalidParams.Add(request.NewErrParamRequired("AnalysisScheme")) + } + if s.DomainName == nil { + invalidParams.Add(request.NewErrParamRequired("DomainName")) + } + if s.DomainName != nil && len(*s.DomainName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("DomainName", 3)) + } + if s.AnalysisScheme != nil { + if err := s.AnalysisScheme.Validate(); err != nil { + invalidParams.AddNested("AnalysisScheme", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The result of a DefineAnalysisScheme request. Contains the status of the +// newly-configured analysis scheme. +type DefineAnalysisSchemeOutput struct { + _ struct{} `type:"structure"` + + // The status and configuration of an AnalysisScheme. + AnalysisScheme *AnalysisSchemeStatus `type:"structure" required:"true"` +} + +// String returns the string representation +func (s DefineAnalysisSchemeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DefineAnalysisSchemeOutput) GoString() string { + return s.String() +} + +// Container for the parameters to the DefineExpression operation. Specifies +// the name of the domain you want to update and the expression you want to +// configure. +type DefineExpressionInput struct { + _ struct{} `type:"structure"` + + // A string that represents the name of a domain. Domain names are unique across + // the domains owned by an account within an AWS region. Domain names start + // with a letter or number and can contain the following characters: a-z (lowercase), + // 0-9, and - (hyphen). + DomainName *string `min:"3" type:"string" required:"true"` + + // A named expression that can be evaluated at search time. Can be used to sort + // the search results, define other expressions, or return computed information + // in the search results. + Expression *Expression `type:"structure" required:"true"` +} + +// String returns the string representation +func (s DefineExpressionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DefineExpressionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DefineExpressionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DefineExpressionInput"} + if s.DomainName == nil { + invalidParams.Add(request.NewErrParamRequired("DomainName")) + } + if s.DomainName != nil && len(*s.DomainName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("DomainName", 3)) + } + if s.Expression == nil { + invalidParams.Add(request.NewErrParamRequired("Expression")) + } + if s.Expression != nil { + if err := s.Expression.Validate(); err != nil { + invalidParams.AddNested("Expression", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The result of a DefineExpression request. Contains the status of the newly-configured +// expression. +type DefineExpressionOutput struct { + _ struct{} `type:"structure"` + + // The value of an Expression and its current status. + Expression *ExpressionStatus `type:"structure" required:"true"` +} + +// String returns the string representation +func (s DefineExpressionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DefineExpressionOutput) GoString() string { + return s.String() +} + +// Container for the parameters to the DefineIndexField operation. Specifies +// the name of the domain you want to update and the index field configuration. +type DefineIndexFieldInput struct { + _ struct{} `type:"structure"` + + // A string that represents the name of a domain. Domain names are unique across + // the domains owned by an account within an AWS region. Domain names start + // with a letter or number and can contain the following characters: a-z (lowercase), + // 0-9, and - (hyphen). + DomainName *string `min:"3" type:"string" required:"true"` + + // The index field and field options you want to configure. + IndexField *IndexField `type:"structure" required:"true"` +} + +// String returns the string representation +func (s DefineIndexFieldInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DefineIndexFieldInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DefineIndexFieldInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DefineIndexFieldInput"} + if s.DomainName == nil { + invalidParams.Add(request.NewErrParamRequired("DomainName")) + } + if s.DomainName != nil && len(*s.DomainName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("DomainName", 3)) + } + if s.IndexField == nil { + invalidParams.Add(request.NewErrParamRequired("IndexField")) + } + if s.IndexField != nil { + if err := s.IndexField.Validate(); err != nil { + invalidParams.AddNested("IndexField", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The result of a DefineIndexField request. Contains the status of the newly-configured +// index field. +type DefineIndexFieldOutput struct { + _ struct{} `type:"structure"` + + // The value of an IndexField and its current status. + IndexField *IndexFieldStatus `type:"structure" required:"true"` +} + +// String returns the string representation +func (s DefineIndexFieldOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DefineIndexFieldOutput) GoString() string { + return s.String() +} + +// Container for the parameters to the DefineSuggester operation. Specifies +// the name of the domain you want to update and the suggester configuration. +type DefineSuggesterInput struct { + _ struct{} `type:"structure"` + + // A string that represents the name of a domain. Domain names are unique across + // the domains owned by an account within an AWS region. Domain names start + // with a letter or number and can contain the following characters: a-z (lowercase), + // 0-9, and - (hyphen). + DomainName *string `min:"3" type:"string" required:"true"` + + // Configuration information for a search suggester. Each suggester has a unique + // name and specifies the text field you want to use for suggestions. The following + // options can be configured for a suggester: FuzzyMatching, SortExpression. + Suggester *Suggester `type:"structure" required:"true"` +} + +// String returns the string representation +func (s DefineSuggesterInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DefineSuggesterInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DefineSuggesterInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DefineSuggesterInput"} + if s.DomainName == nil { + invalidParams.Add(request.NewErrParamRequired("DomainName")) + } + if s.DomainName != nil && len(*s.DomainName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("DomainName", 3)) + } + if s.Suggester == nil { + invalidParams.Add(request.NewErrParamRequired("Suggester")) + } + if s.Suggester != nil { + if err := s.Suggester.Validate(); err != nil { + invalidParams.AddNested("Suggester", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The result of a DefineSuggester request. Contains the status of the newly-configured +// suggester. +type DefineSuggesterOutput struct { + _ struct{} `type:"structure"` + + // The value of a Suggester and its current status. + Suggester *SuggesterStatus `type:"structure" required:"true"` +} + +// String returns the string representation +func (s DefineSuggesterOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DefineSuggesterOutput) GoString() string { + return s.String() +} + +// Container for the parameters to the DeleteAnalysisScheme operation. Specifies +// the name of the domain you want to update and the analysis scheme you want +// to delete. +type DeleteAnalysisSchemeInput struct { + _ struct{} `type:"structure"` + + // The name of the analysis scheme you want to delete. + AnalysisSchemeName *string `min:"1" type:"string" required:"true"` + + // A string that represents the name of a domain. Domain names are unique across + // the domains owned by an account within an AWS region. Domain names start + // with a letter or number and can contain the following characters: a-z (lowercase), + // 0-9, and - (hyphen). + DomainName *string `min:"3" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteAnalysisSchemeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteAnalysisSchemeInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteAnalysisSchemeInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteAnalysisSchemeInput"} + if s.AnalysisSchemeName == nil { + invalidParams.Add(request.NewErrParamRequired("AnalysisSchemeName")) + } + if s.AnalysisSchemeName != nil && len(*s.AnalysisSchemeName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AnalysisSchemeName", 1)) + } + if s.DomainName == nil { + invalidParams.Add(request.NewErrParamRequired("DomainName")) + } + if s.DomainName != nil && len(*s.DomainName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("DomainName", 3)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The result of a DeleteAnalysisScheme request. Contains the status of the +// deleted analysis scheme. +type DeleteAnalysisSchemeOutput struct { + _ struct{} `type:"structure"` + + // The status of the analysis scheme being deleted. + AnalysisScheme *AnalysisSchemeStatus `type:"structure" required:"true"` +} + +// String returns the string representation +func (s DeleteAnalysisSchemeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteAnalysisSchemeOutput) GoString() string { + return s.String() +} + +// Container for the parameters to the DeleteDomain operation. Specifies the +// name of the domain you want to delete. +type DeleteDomainInput struct { + _ struct{} `type:"structure"` + + // The name of the domain you want to permanently delete. + DomainName *string `min:"3" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteDomainInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteDomainInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteDomainInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteDomainInput"} + if s.DomainName == nil { + invalidParams.Add(request.NewErrParamRequired("DomainName")) + } + if s.DomainName != nil && len(*s.DomainName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("DomainName", 3)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The result of a DeleteDomain request. Contains the status of a newly deleted +// domain, or no status if the domain has already been completely deleted. +type DeleteDomainOutput struct { + _ struct{} `type:"structure"` + + // The current status of the search domain. + DomainStatus *DomainStatus `type:"structure"` +} + +// String returns the string representation +func (s DeleteDomainOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteDomainOutput) GoString() string { + return s.String() +} + +// Container for the parameters to the DeleteExpression operation. Specifies +// the name of the domain you want to update and the name of the expression +// you want to delete. +type DeleteExpressionInput struct { + _ struct{} `type:"structure"` + + // A string that represents the name of a domain. Domain names are unique across + // the domains owned by an account within an AWS region. Domain names start + // with a letter or number and can contain the following characters: a-z (lowercase), + // 0-9, and - (hyphen). + DomainName *string `min:"3" type:"string" required:"true"` + + // The name of the Expression to delete. + ExpressionName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteExpressionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteExpressionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteExpressionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteExpressionInput"} + if s.DomainName == nil { + invalidParams.Add(request.NewErrParamRequired("DomainName")) + } + if s.DomainName != nil && len(*s.DomainName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("DomainName", 3)) + } + if s.ExpressionName == nil { + invalidParams.Add(request.NewErrParamRequired("ExpressionName")) + } + if s.ExpressionName != nil && len(*s.ExpressionName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ExpressionName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The result of a DeleteExpression request. Specifies the expression being +// deleted. +type DeleteExpressionOutput struct { + _ struct{} `type:"structure"` + + // The status of the expression being deleted. + Expression *ExpressionStatus `type:"structure" required:"true"` +} + +// String returns the string representation +func (s DeleteExpressionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteExpressionOutput) GoString() string { + return s.String() +} + +// Container for the parameters to the DeleteIndexField operation. Specifies +// the name of the domain you want to update and the name of the index field +// you want to delete. +type DeleteIndexFieldInput struct { + _ struct{} `type:"structure"` + + // A string that represents the name of a domain. Domain names are unique across + // the domains owned by an account within an AWS region. Domain names start + // with a letter or number and can contain the following characters: a-z (lowercase), + // 0-9, and - (hyphen). + DomainName *string `min:"3" type:"string" required:"true"` + + // The name of the index field your want to remove from the domain's indexing + // options. + IndexFieldName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteIndexFieldInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteIndexFieldInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteIndexFieldInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteIndexFieldInput"} + if s.DomainName == nil { + invalidParams.Add(request.NewErrParamRequired("DomainName")) + } + if s.DomainName != nil && len(*s.DomainName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("DomainName", 3)) + } + if s.IndexFieldName == nil { + invalidParams.Add(request.NewErrParamRequired("IndexFieldName")) + } + if s.IndexFieldName != nil && len(*s.IndexFieldName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("IndexFieldName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The result of a DeleteIndexField request. +type DeleteIndexFieldOutput struct { + _ struct{} `type:"structure"` + + // The status of the index field being deleted. + IndexField *IndexFieldStatus `type:"structure" required:"true"` +} + +// String returns the string representation +func (s DeleteIndexFieldOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteIndexFieldOutput) GoString() string { + return s.String() +} + +// Container for the parameters to the DeleteSuggester operation. Specifies +// the name of the domain you want to update and name of the suggester you want +// to delete. +type DeleteSuggesterInput struct { + _ struct{} `type:"structure"` + + // A string that represents the name of a domain. Domain names are unique across + // the domains owned by an account within an AWS region. Domain names start + // with a letter or number and can contain the following characters: a-z (lowercase), + // 0-9, and - (hyphen). + DomainName *string `min:"3" type:"string" required:"true"` + + // Specifies the name of the suggester you want to delete. + SuggesterName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteSuggesterInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteSuggesterInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteSuggesterInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteSuggesterInput"} + if s.DomainName == nil { + invalidParams.Add(request.NewErrParamRequired("DomainName")) + } + if s.DomainName != nil && len(*s.DomainName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("DomainName", 3)) + } + if s.SuggesterName == nil { + invalidParams.Add(request.NewErrParamRequired("SuggesterName")) + } + if s.SuggesterName != nil && len(*s.SuggesterName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("SuggesterName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The result of a DeleteSuggester request. Contains the status of the deleted +// suggester. +type DeleteSuggesterOutput struct { + _ struct{} `type:"structure"` + + // The status of the suggester being deleted. + Suggester *SuggesterStatus `type:"structure" required:"true"` +} + +// String returns the string representation +func (s DeleteSuggesterOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteSuggesterOutput) GoString() string { + return s.String() +} + +// Container for the parameters to the DescribeAnalysisSchemes operation. Specifies +// the name of the domain you want to describe. To limit the response to particular +// analysis schemes, specify the names of the analysis schemes you want to describe. +// To show the active configuration and exclude any pending changes, set the +// Deployed option to true. +type DescribeAnalysisSchemesInput struct { + _ struct{} `type:"structure"` + + // The analysis schemes you want to describe. + AnalysisSchemeNames []*string `type:"list"` + + // Whether to display the deployed configuration (true) or include any pending + // changes (false). Defaults to false. + Deployed *bool `type:"boolean"` + + // The name of the domain you want to describe. + DomainName *string `min:"3" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeAnalysisSchemesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAnalysisSchemesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeAnalysisSchemesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeAnalysisSchemesInput"} + if s.DomainName == nil { + invalidParams.Add(request.NewErrParamRequired("DomainName")) + } + if s.DomainName != nil && len(*s.DomainName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("DomainName", 3)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The result of a DescribeAnalysisSchemes request. Contains the analysis schemes +// configured for the domain specified in the request. +type DescribeAnalysisSchemesOutput struct { + _ struct{} `type:"structure"` + + // The analysis scheme descriptions. + AnalysisSchemes []*AnalysisSchemeStatus `type:"list" required:"true"` +} + +// String returns the string representation +func (s DescribeAnalysisSchemesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAnalysisSchemesOutput) GoString() string { + return s.String() +} + +// Container for the parameters to the DescribeAvailabilityOptions operation. +// Specifies the name of the domain you want to describe. To show the active +// configuration and exclude any pending changes, set the Deployed option to +// true. +type DescribeAvailabilityOptionsInput struct { + _ struct{} `type:"structure"` + + // Whether to display the deployed configuration (true) or include any pending + // changes (false). Defaults to false. + Deployed *bool `type:"boolean"` + + // The name of the domain you want to describe. + DomainName *string `min:"3" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeAvailabilityOptionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAvailabilityOptionsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeAvailabilityOptionsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeAvailabilityOptionsInput"} + if s.DomainName == nil { + invalidParams.Add(request.NewErrParamRequired("DomainName")) + } + if s.DomainName != nil && len(*s.DomainName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("DomainName", 3)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The result of a DescribeAvailabilityOptions request. Indicates whether or +// not the Multi-AZ option is enabled for the domain specified in the request. +type DescribeAvailabilityOptionsOutput struct { + _ struct{} `type:"structure"` + + // The availability options configured for the domain. Indicates whether Multi-AZ + // is enabled for the domain. + AvailabilityOptions *AvailabilityOptionsStatus `type:"structure"` +} + +// String returns the string representation +func (s DescribeAvailabilityOptionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAvailabilityOptionsOutput) GoString() string { + return s.String() +} + +// Container for the parameters to the DescribeDomains operation. By default +// shows the status of all domains. To restrict the response to particular domains, +// specify the names of the domains you want to describe. +type DescribeDomainsInput struct { + _ struct{} `type:"structure"` + + // The names of the domains you want to include in the response. + DomainNames []*string `type:"list"` +} + +// String returns the string representation +func (s DescribeDomainsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDomainsInput) GoString() string { + return s.String() +} + +// The result of a DescribeDomains request. Contains the status of the domains +// specified in the request or all domains owned by the account. +type DescribeDomainsOutput struct { + _ struct{} `type:"structure"` + + // A list that contains the status of each requested domain. + DomainStatusList []*DomainStatus `type:"list" required:"true"` +} + +// String returns the string representation +func (s DescribeDomainsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDomainsOutput) GoString() string { + return s.String() +} + +// Container for the parameters to the DescribeDomains operation. Specifies +// the name of the domain you want to describe. To restrict the response to +// particular expressions, specify the names of the expressions you want to +// describe. To show the active configuration and exclude any pending changes, +// set the Deployed option to true. +type DescribeExpressionsInput struct { + _ struct{} `type:"structure"` + + // Whether to display the deployed configuration (true) or include any pending + // changes (false). Defaults to false. + Deployed *bool `type:"boolean"` + + // The name of the domain you want to describe. + DomainName *string `min:"3" type:"string" required:"true"` + + // Limits the DescribeExpressions response to the specified expressions. If + // not specified, all expressions are shown. + ExpressionNames []*string `type:"list"` +} + +// String returns the string representation +func (s DescribeExpressionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeExpressionsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeExpressionsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeExpressionsInput"} + if s.DomainName == nil { + invalidParams.Add(request.NewErrParamRequired("DomainName")) + } + if s.DomainName != nil && len(*s.DomainName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("DomainName", 3)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The result of a DescribeExpressions request. Contains the expressions configured +// for the domain specified in the request. +type DescribeExpressionsOutput struct { + _ struct{} `type:"structure"` + + // The expressions configured for the domain. + Expressions []*ExpressionStatus `type:"list" required:"true"` +} + +// String returns the string representation +func (s DescribeExpressionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeExpressionsOutput) GoString() string { + return s.String() +} + +// Container for the parameters to the DescribeIndexFields operation. Specifies +// the name of the domain you want to describe. To restrict the response to +// particular index fields, specify the names of the index fields you want to +// describe. To show the active configuration and exclude any pending changes, +// set the Deployed option to true. +type DescribeIndexFieldsInput struct { + _ struct{} `type:"structure"` + + // Whether to display the deployed configuration (true) or include any pending + // changes (false). Defaults to false. + Deployed *bool `type:"boolean"` + + // The name of the domain you want to describe. + DomainName *string `min:"3" type:"string" required:"true"` + + // A list of the index fields you want to describe. If not specified, information + // is returned for all configured index fields. + FieldNames []*string `type:"list"` +} + +// String returns the string representation +func (s DescribeIndexFieldsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeIndexFieldsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeIndexFieldsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeIndexFieldsInput"} + if s.DomainName == nil { + invalidParams.Add(request.NewErrParamRequired("DomainName")) + } + if s.DomainName != nil && len(*s.DomainName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("DomainName", 3)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The result of a DescribeIndexFields request. Contains the index fields configured +// for the domain specified in the request. +type DescribeIndexFieldsOutput struct { + _ struct{} `type:"structure"` + + // The index fields configured for the domain. + IndexFields []*IndexFieldStatus `type:"list" required:"true"` +} + +// String returns the string representation +func (s DescribeIndexFieldsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeIndexFieldsOutput) GoString() string { + return s.String() +} + +// Container for the parameters to the DescribeScalingParameters operation. +// Specifies the name of the domain you want to describe. +type DescribeScalingParametersInput struct { + _ struct{} `type:"structure"` + + // A string that represents the name of a domain. Domain names are unique across + // the domains owned by an account within an AWS region. Domain names start + // with a letter or number and can contain the following characters: a-z (lowercase), + // 0-9, and - (hyphen). + DomainName *string `min:"3" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeScalingParametersInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeScalingParametersInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeScalingParametersInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeScalingParametersInput"} + if s.DomainName == nil { + invalidParams.Add(request.NewErrParamRequired("DomainName")) + } + if s.DomainName != nil && len(*s.DomainName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("DomainName", 3)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The result of a DescribeScalingParameters request. Contains the scaling parameters +// configured for the domain specified in the request. +type DescribeScalingParametersOutput struct { + _ struct{} `type:"structure"` + + // The status and configuration of a search domain's scaling parameters. + ScalingParameters *ScalingParametersStatus `type:"structure" required:"true"` +} + +// String returns the string representation +func (s DescribeScalingParametersOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeScalingParametersOutput) GoString() string { + return s.String() +} + +// Container for the parameters to the DescribeServiceAccessPolicies operation. +// Specifies the name of the domain you want to describe. To show the active +// configuration and exclude any pending changes, set the Deployed option to +// true. +type DescribeServiceAccessPoliciesInput struct { + _ struct{} `type:"structure"` + + // Whether to display the deployed configuration (true) or include any pending + // changes (false). Defaults to false. + Deployed *bool `type:"boolean"` + + // The name of the domain you want to describe. + DomainName *string `min:"3" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeServiceAccessPoliciesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeServiceAccessPoliciesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeServiceAccessPoliciesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeServiceAccessPoliciesInput"} + if s.DomainName == nil { + invalidParams.Add(request.NewErrParamRequired("DomainName")) + } + if s.DomainName != nil && len(*s.DomainName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("DomainName", 3)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The result of a DescribeServiceAccessPolicies request. +type DescribeServiceAccessPoliciesOutput struct { + _ struct{} `type:"structure"` + + // The access rules configured for the domain specified in the request. + AccessPolicies *AccessPoliciesStatus `type:"structure" required:"true"` +} + +// String returns the string representation +func (s DescribeServiceAccessPoliciesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeServiceAccessPoliciesOutput) GoString() string { + return s.String() +} + +// Container for the parameters to the DescribeSuggester operation. Specifies +// the name of the domain you want to describe. To restrict the response to +// particular suggesters, specify the names of the suggesters you want to describe. +// To show the active configuration and exclude any pending changes, set the +// Deployed option to true. +type DescribeSuggestersInput struct { + _ struct{} `type:"structure"` + + // Whether to display the deployed configuration (true) or include any pending + // changes (false). Defaults to false. + Deployed *bool `type:"boolean"` + + // The name of the domain you want to describe. + DomainName *string `min:"3" type:"string" required:"true"` + + // The suggesters you want to describe. + SuggesterNames []*string `type:"list"` +} + +// String returns the string representation +func (s DescribeSuggestersInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeSuggestersInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeSuggestersInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeSuggestersInput"} + if s.DomainName == nil { + invalidParams.Add(request.NewErrParamRequired("DomainName")) + } + if s.DomainName != nil && len(*s.DomainName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("DomainName", 3)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The result of a DescribeSuggesters request. +type DescribeSuggestersOutput struct { + _ struct{} `type:"structure"` + + // The suggesters configured for the domain specified in the request. + Suggesters []*SuggesterStatus `type:"list" required:"true"` +} + +// String returns the string representation +func (s DescribeSuggestersOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeSuggestersOutput) GoString() string { + return s.String() +} + +// Options for a search suggester. +type DocumentSuggesterOptions struct { + _ struct{} `type:"structure"` + + // The level of fuzziness allowed when suggesting matches for a string: none, + // low, or high. With none, the specified string is treated as an exact prefix. + // With low, suggestions must differ from the specified string by no more than + // one character. With high, suggestions can differ by up to two characters. + // The default is none. + FuzzyMatching *string `type:"string" enum:"SuggesterFuzzyMatching"` + + // An expression that computes a score for each suggestion to control how they + // are sorted. The scores are rounded to the nearest integer, with a floor of + // 0 and a ceiling of 2^31-1. A document's relevance score is not computed for + // suggestions, so sort expressions cannot reference the _score value. To sort + // suggestions using a numeric field or existing expression, simply specify + // the name of the field or expression. If no expression is configured for the + // suggester, the suggestions are sorted with the closest matches listed first. + SortExpression *string `type:"string"` + + // The name of the index field you want to use for suggestions. + SourceField *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DocumentSuggesterOptions) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DocumentSuggesterOptions) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DocumentSuggesterOptions) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DocumentSuggesterOptions"} + if s.SourceField == nil { + invalidParams.Add(request.NewErrParamRequired("SourceField")) + } + if s.SourceField != nil && len(*s.SourceField) < 1 { + invalidParams.Add(request.NewErrParamMinLen("SourceField", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The current status of the search domain. +type DomainStatus struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the search domain. See Identifiers for + // IAM Entities (http://docs.aws.amazon.com/IAM/latest/UserGuide/index.html?Using_Identifiers.html" + // target="_blank) in Using AWS Identity and Access Management for more information. + ARN *string `type:"string"` + + // True if the search domain is created. It can take several minutes to initialize + // a domain when CreateDomain is called. Newly created search domains are returned + // from DescribeDomains with a false value for Created until domain creation + // is complete. + Created *bool `type:"boolean"` + + // True if the search domain has been deleted. The system must clean up resources + // dedicated to the search domain when DeleteDomain is called. Newly deleted + // search domains are returned from DescribeDomains with a true value for IsDeleted + // for several minutes until resource cleanup is complete. + Deleted *bool `type:"boolean"` + + // The service endpoint for updating documents in a search domain. + DocService *ServiceEndpoint `type:"structure"` + + // An internally generated unique identifier for a domain. + DomainId *string `min:"1" type:"string" required:"true"` + + // A string that represents the name of a domain. Domain names are unique across + // the domains owned by an account within an AWS region. Domain names start + // with a letter or number and can contain the following characters: a-z (lowercase), + // 0-9, and - (hyphen). + DomainName *string `min:"3" type:"string" required:"true"` + + Limits *Limits `type:"structure"` + + // True if processing is being done to activate the current domain configuration. + Processing *bool `type:"boolean"` + + // True if IndexDocuments needs to be called to activate the current domain + // configuration. + RequiresIndexDocuments *bool `type:"boolean" required:"true"` + + // The number of search instances that are available to process search requests. + SearchInstanceCount *int64 `min:"1" type:"integer"` + + // The instance type that is being used to process search requests. + SearchInstanceType *string `type:"string"` + + // The number of partitions across which the search index is spread. + SearchPartitionCount *int64 `min:"1" type:"integer"` + + // The service endpoint for requesting search results from a search domain. + SearchService *ServiceEndpoint `type:"structure"` +} + +// String returns the string representation +func (s DomainStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DomainStatus) GoString() string { + return s.String() +} + +// Options for a field that contains an array of double-precision 64-bit floating +// point values. Present if IndexFieldType specifies the field is of type double-array. +// All options are enabled by default. +type DoubleArrayOptions struct { + _ struct{} `type:"structure"` + + // A value to use for the field if the field isn't specified for a document. + DefaultValue *float64 `type:"double"` + + // Whether facet information can be returned for the field. + FacetEnabled *bool `type:"boolean"` + + // Whether the contents of the field can be returned in the search results. + ReturnEnabled *bool `type:"boolean"` + + // Whether the contents of the field are searchable. + SearchEnabled *bool `type:"boolean"` + + // A list of source fields to map to the field. + SourceFields *string `type:"string"` +} + +// String returns the string representation +func (s DoubleArrayOptions) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DoubleArrayOptions) GoString() string { + return s.String() +} + +// Options for a double-precision 64-bit floating point field. Present if IndexFieldType +// specifies the field is of type double. All options are enabled by default. +type DoubleOptions struct { + _ struct{} `type:"structure"` + + // A value to use for the field if the field isn't specified for a document. + // This can be important if you are using the field in an expression and that + // field is not present in every document. + DefaultValue *float64 `type:"double"` + + // Whether facet information can be returned for the field. + FacetEnabled *bool `type:"boolean"` + + // Whether the contents of the field can be returned in the search results. + ReturnEnabled *bool `type:"boolean"` + + // Whether the contents of the field are searchable. + SearchEnabled *bool `type:"boolean"` + + // Whether the field can be used to sort the search results. + SortEnabled *bool `type:"boolean"` + + // The name of the source field to map to the field. + SourceField *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s DoubleOptions) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DoubleOptions) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DoubleOptions) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DoubleOptions"} + if s.SourceField != nil && len(*s.SourceField) < 1 { + invalidParams.Add(request.NewErrParamMinLen("SourceField", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// A named expression that can be evaluated at search time. Can be used to sort +// the search results, define other expressions, or return computed information +// in the search results. +type Expression struct { + _ struct{} `type:"structure"` + + // Names must begin with a letter and can contain the following characters: + // a-z (lowercase), 0-9, and _ (underscore). + ExpressionName *string `min:"1" type:"string" required:"true"` + + // The expression to evaluate for sorting while processing a search request. + // The Expression syntax is based on JavaScript expressions. For more information, + // see Configuring Expressions (http://docs.aws.amazon.com/cloudsearch/latest/developerguide/configuring-expressions.html" + // target="_blank) in the Amazon CloudSearch Developer Guide. + ExpressionValue *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s Expression) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Expression) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Expression) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Expression"} + if s.ExpressionName == nil { + invalidParams.Add(request.NewErrParamRequired("ExpressionName")) + } + if s.ExpressionName != nil && len(*s.ExpressionName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ExpressionName", 1)) + } + if s.ExpressionValue == nil { + invalidParams.Add(request.NewErrParamRequired("ExpressionValue")) + } + if s.ExpressionValue != nil && len(*s.ExpressionValue) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ExpressionValue", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The value of an Expression and its current status. +type ExpressionStatus struct { + _ struct{} `type:"structure"` + + // The expression that is evaluated for sorting while processing a search request. + Options *Expression `type:"structure" required:"true"` + + // The status of domain configuration option. + Status *OptionStatus `type:"structure" required:"true"` +} + +// String returns the string representation +func (s ExpressionStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ExpressionStatus) GoString() string { + return s.String() +} + +// Container for the parameters to the IndexDocuments operation. Specifies the +// name of the domain you want to re-index. +type IndexDocumentsInput struct { + _ struct{} `type:"structure"` + + // A string that represents the name of a domain. Domain names are unique across + // the domains owned by an account within an AWS region. Domain names start + // with a letter or number and can contain the following characters: a-z (lowercase), + // 0-9, and - (hyphen). + DomainName *string `min:"3" type:"string" required:"true"` +} + +// String returns the string representation +func (s IndexDocumentsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s IndexDocumentsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *IndexDocumentsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "IndexDocumentsInput"} + if s.DomainName == nil { + invalidParams.Add(request.NewErrParamRequired("DomainName")) + } + if s.DomainName != nil && len(*s.DomainName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("DomainName", 3)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The result of an IndexDocuments request. Contains the status of the indexing +// operation, including the fields being indexed. +type IndexDocumentsOutput struct { + _ struct{} `type:"structure"` + + // The names of the fields that are currently being indexed. + FieldNames []*string `type:"list"` +} + +// String returns the string representation +func (s IndexDocumentsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s IndexDocumentsOutput) GoString() string { + return s.String() +} + +// Configuration information for a field in the index, including its name, type, +// and options. The supported options depend on the IndexFieldType. +type IndexField struct { + _ struct{} `type:"structure"` + + // Options for a field that contains an array of dates. Present if IndexFieldType + // specifies the field is of type date-array. All options are enabled by default. + DateArrayOptions *DateArrayOptions `type:"structure"` + + // Options for a date field. Dates and times are specified in UTC (Coordinated + // Universal Time) according to IETF RFC3339: yyyy-mm-ddT00:00:00Z. Present + // if IndexFieldType specifies the field is of type date. All options are enabled + // by default. + DateOptions *DateOptions `type:"structure"` + + // Options for a field that contains an array of double-precision 64-bit floating + // point values. Present if IndexFieldType specifies the field is of type double-array. + // All options are enabled by default. + DoubleArrayOptions *DoubleArrayOptions `type:"structure"` + + // Options for a double-precision 64-bit floating point field. Present if IndexFieldType + // specifies the field is of type double. All options are enabled by default. + DoubleOptions *DoubleOptions `type:"structure"` + + // A string that represents the name of an index field. CloudSearch supports + // regular index fields as well as dynamic fields. A dynamic field's name defines + // a pattern that begins or ends with a wildcard. Any document fields that don't + // map to a regular index field but do match a dynamic field's pattern are configured + // with the dynamic field's indexing options. + // + // Regular field names begin with a letter and can contain the following characters: + // a-z (lowercase), 0-9, and _ (underscore). Dynamic field names must begin + // or end with a wildcard (*). The wildcard can also be the only character in + // a dynamic field name. Multiple wildcards, and wildcards embedded within a + // string are not supported. + // + // The name score is reserved and cannot be used as a field name. To reference + // a document's ID, you can use the name _id. + IndexFieldName *string `min:"1" type:"string" required:"true"` + + // The type of field. The valid options for a field depend on the field type. + // For more information about the supported field types, see Configuring Index + // Fields (http://docs.aws.amazon.com/cloudsearch/latest/developerguide/configuring-index-fields.html" + // target="_blank) in the Amazon CloudSearch Developer Guide. + IndexFieldType *string `type:"string" required:"true" enum:"IndexFieldType"` + + // Options for a field that contains an array of 64-bit signed integers. Present + // if IndexFieldType specifies the field is of type int-array. All options are + // enabled by default. + IntArrayOptions *IntArrayOptions `type:"structure"` + + // Options for a 64-bit signed integer field. Present if IndexFieldType specifies + // the field is of type int. All options are enabled by default. + IntOptions *IntOptions `type:"structure"` + + // Options for a latlon field. A latlon field contains a location stored as + // a latitude and longitude value pair. Present if IndexFieldType specifies + // the field is of type latlon. All options are enabled by default. + LatLonOptions *LatLonOptions `type:"structure"` + + // Options for a field that contains an array of literal strings. Present if + // IndexFieldType specifies the field is of type literal-array. All options + // are enabled by default. + LiteralArrayOptions *LiteralArrayOptions `type:"structure"` + + // Options for literal field. Present if IndexFieldType specifies the field + // is of type literal. All options are enabled by default. + LiteralOptions *LiteralOptions `type:"structure"` + + // Options for a field that contains an array of text strings. Present if IndexFieldType + // specifies the field is of type text-array. A text-array field is always searchable. + // All options are enabled by default. + TextArrayOptions *TextArrayOptions `type:"structure"` + + // Options for text field. Present if IndexFieldType specifies the field is + // of type text. A text field is always searchable. All options are enabled + // by default. + TextOptions *TextOptions `type:"structure"` +} + +// String returns the string representation +func (s IndexField) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s IndexField) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *IndexField) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "IndexField"} + if s.IndexFieldName == nil { + invalidParams.Add(request.NewErrParamRequired("IndexFieldName")) + } + if s.IndexFieldName != nil && len(*s.IndexFieldName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("IndexFieldName", 1)) + } + if s.IndexFieldType == nil { + invalidParams.Add(request.NewErrParamRequired("IndexFieldType")) + } + if s.DateOptions != nil { + if err := s.DateOptions.Validate(); err != nil { + invalidParams.AddNested("DateOptions", err.(request.ErrInvalidParams)) + } + } + if s.DoubleOptions != nil { + if err := s.DoubleOptions.Validate(); err != nil { + invalidParams.AddNested("DoubleOptions", err.(request.ErrInvalidParams)) + } + } + if s.IntOptions != nil { + if err := s.IntOptions.Validate(); err != nil { + invalidParams.AddNested("IntOptions", err.(request.ErrInvalidParams)) + } + } + if s.LatLonOptions != nil { + if err := s.LatLonOptions.Validate(); err != nil { + invalidParams.AddNested("LatLonOptions", err.(request.ErrInvalidParams)) + } + } + if s.LiteralOptions != nil { + if err := s.LiteralOptions.Validate(); err != nil { + invalidParams.AddNested("LiteralOptions", err.(request.ErrInvalidParams)) + } + } + if s.TextOptions != nil { + if err := s.TextOptions.Validate(); err != nil { + invalidParams.AddNested("TextOptions", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The value of an IndexField and its current status. +type IndexFieldStatus struct { + _ struct{} `type:"structure"` + + // Configuration information for a field in the index, including its name, type, + // and options. The supported options depend on the IndexFieldType. + Options *IndexField `type:"structure" required:"true"` + + // The status of domain configuration option. + Status *OptionStatus `type:"structure" required:"true"` +} + +// String returns the string representation +func (s IndexFieldStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s IndexFieldStatus) GoString() string { + return s.String() +} + +// Options for a field that contains an array of 64-bit signed integers. Present +// if IndexFieldType specifies the field is of type int-array. All options are +// enabled by default. +type IntArrayOptions struct { + _ struct{} `type:"structure"` + + // A value to use for the field if the field isn't specified for a document. + DefaultValue *int64 `type:"long"` + + // Whether facet information can be returned for the field. + FacetEnabled *bool `type:"boolean"` + + // Whether the contents of the field can be returned in the search results. + ReturnEnabled *bool `type:"boolean"` + + // Whether the contents of the field are searchable. + SearchEnabled *bool `type:"boolean"` + + // A list of source fields to map to the field. + SourceFields *string `type:"string"` +} + +// String returns the string representation +func (s IntArrayOptions) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s IntArrayOptions) GoString() string { + return s.String() +} + +// Options for a 64-bit signed integer field. Present if IndexFieldType specifies +// the field is of type int. All options are enabled by default. +type IntOptions struct { + _ struct{} `type:"structure"` + + // A value to use for the field if the field isn't specified for a document. + // This can be important if you are using the field in an expression and that + // field is not present in every document. + DefaultValue *int64 `type:"long"` + + // Whether facet information can be returned for the field. + FacetEnabled *bool `type:"boolean"` + + // Whether the contents of the field can be returned in the search results. + ReturnEnabled *bool `type:"boolean"` + + // Whether the contents of the field are searchable. + SearchEnabled *bool `type:"boolean"` + + // Whether the field can be used to sort the search results. + SortEnabled *bool `type:"boolean"` + + // The name of the source field to map to the field. + SourceField *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s IntOptions) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s IntOptions) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *IntOptions) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "IntOptions"} + if s.SourceField != nil && len(*s.SourceField) < 1 { + invalidParams.Add(request.NewErrParamMinLen("SourceField", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Options for a latlon field. A latlon field contains a location stored as +// a latitude and longitude value pair. Present if IndexFieldType specifies +// the field is of type latlon. All options are enabled by default. +type LatLonOptions struct { + _ struct{} `type:"structure"` + + // A value to use for the field if the field isn't specified for a document. + DefaultValue *string `type:"string"` + + // Whether facet information can be returned for the field. + FacetEnabled *bool `type:"boolean"` + + // Whether the contents of the field can be returned in the search results. + ReturnEnabled *bool `type:"boolean"` + + // Whether the contents of the field are searchable. + SearchEnabled *bool `type:"boolean"` + + // Whether the field can be used to sort the search results. + SortEnabled *bool `type:"boolean"` + + // A string that represents the name of an index field. CloudSearch supports + // regular index fields as well as dynamic fields. A dynamic field's name defines + // a pattern that begins or ends with a wildcard. Any document fields that don't + // map to a regular index field but do match a dynamic field's pattern are configured + // with the dynamic field's indexing options. + // + // Regular field names begin with a letter and can contain the following characters: + // a-z (lowercase), 0-9, and _ (underscore). Dynamic field names must begin + // or end with a wildcard (*). The wildcard can also be the only character in + // a dynamic field name. Multiple wildcards, and wildcards embedded within a + // string are not supported. + // + // The name score is reserved and cannot be used as a field name. To reference + // a document's ID, you can use the name _id. + SourceField *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s LatLonOptions) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LatLonOptions) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *LatLonOptions) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "LatLonOptions"} + if s.SourceField != nil && len(*s.SourceField) < 1 { + invalidParams.Add(request.NewErrParamMinLen("SourceField", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type Limits struct { + _ struct{} `type:"structure"` + + MaximumPartitionCount *int64 `min:"1" type:"integer" required:"true"` + + MaximumReplicationCount *int64 `min:"1" type:"integer" required:"true"` +} + +// String returns the string representation +func (s Limits) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Limits) GoString() string { + return s.String() +} + +type ListDomainNamesInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ListDomainNamesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListDomainNamesInput) GoString() string { + return s.String() +} + +// The result of a ListDomainNames request. Contains a list of the domains owned +// by an account. +type ListDomainNamesOutput struct { + _ struct{} `type:"structure"` + + // The names of the search domains owned by an account. + DomainNames map[string]*string `type:"map"` +} + +// String returns the string representation +func (s ListDomainNamesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListDomainNamesOutput) GoString() string { + return s.String() +} + +// Options for a field that contains an array of literal strings. Present if +// IndexFieldType specifies the field is of type literal-array. All options +// are enabled by default. +type LiteralArrayOptions struct { + _ struct{} `type:"structure"` + + // A value to use for the field if the field isn't specified for a document. + DefaultValue *string `type:"string"` + + // Whether facet information can be returned for the field. + FacetEnabled *bool `type:"boolean"` + + // Whether the contents of the field can be returned in the search results. + ReturnEnabled *bool `type:"boolean"` + + // Whether the contents of the field are searchable. + SearchEnabled *bool `type:"boolean"` + + // A list of source fields to map to the field. + SourceFields *string `type:"string"` +} + +// String returns the string representation +func (s LiteralArrayOptions) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LiteralArrayOptions) GoString() string { + return s.String() +} + +// Options for literal field. Present if IndexFieldType specifies the field +// is of type literal. All options are enabled by default. +type LiteralOptions struct { + _ struct{} `type:"structure"` + + // A value to use for the field if the field isn't specified for a document. + DefaultValue *string `type:"string"` + + // Whether facet information can be returned for the field. + FacetEnabled *bool `type:"boolean"` + + // Whether the contents of the field can be returned in the search results. + ReturnEnabled *bool `type:"boolean"` + + // Whether the contents of the field are searchable. + SearchEnabled *bool `type:"boolean"` + + // Whether the field can be used to sort the search results. + SortEnabled *bool `type:"boolean"` + + // A string that represents the name of an index field. CloudSearch supports + // regular index fields as well as dynamic fields. A dynamic field's name defines + // a pattern that begins or ends with a wildcard. Any document fields that don't + // map to a regular index field but do match a dynamic field's pattern are configured + // with the dynamic field's indexing options. + // + // Regular field names begin with a letter and can contain the following characters: + // a-z (lowercase), 0-9, and _ (underscore). Dynamic field names must begin + // or end with a wildcard (*). The wildcard can also be the only character in + // a dynamic field name. Multiple wildcards, and wildcards embedded within a + // string are not supported. + // + // The name score is reserved and cannot be used as a field name. To reference + // a document's ID, you can use the name _id. + SourceField *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s LiteralOptions) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LiteralOptions) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *LiteralOptions) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "LiteralOptions"} + if s.SourceField != nil && len(*s.SourceField) < 1 { + invalidParams.Add(request.NewErrParamMinLen("SourceField", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The status of domain configuration option. +type OptionStatus struct { + _ struct{} `type:"structure"` + + // A timestamp for when this option was created. + CreationDate *time.Time `type:"timestamp" timestampFormat:"iso8601" required:"true"` + + // Indicates that the option will be deleted once processing is complete. + PendingDeletion *bool `type:"boolean"` + + // The state of processing a change to an option. Possible values: + // + // RequiresIndexDocuments: the option's latest value will not be deployed + // until IndexDocuments has been called and indexing is complete. Processing: + // the option's latest value is in the process of being activated. Active: + // the option's latest value is completely deployed. FailedToValidate: the + // option value is not compatible with the domain's data and cannot be used + // to index the data. You must either modify the option value or update or remove + // the incompatible documents. + State *string `type:"string" required:"true" enum:"OptionState"` + + // A timestamp for when this option was last updated. + UpdateDate *time.Time `type:"timestamp" timestampFormat:"iso8601" required:"true"` + + // A unique integer that indicates when this option was last updated. + UpdateVersion *int64 `type:"integer"` +} + +// String returns the string representation +func (s OptionStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s OptionStatus) GoString() string { + return s.String() +} + +// The desired instance type and desired number of replicas of each index partition. +type ScalingParameters struct { + _ struct{} `type:"structure"` + + // The instance type that you want to preconfigure for your domain. For example, + // search.m1.small. + DesiredInstanceType *string `type:"string" enum:"PartitionInstanceType"` + + // The number of partitions you want to preconfigure for your domain. Only valid + // when you select m2.2xlarge as the desired instance type. + DesiredPartitionCount *int64 `type:"integer"` + + // The number of replicas you want to preconfigure for each index partition. + DesiredReplicationCount *int64 `type:"integer"` +} + +// String returns the string representation +func (s ScalingParameters) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ScalingParameters) GoString() string { + return s.String() +} + +// The status and configuration of a search domain's scaling parameters. +type ScalingParametersStatus struct { + _ struct{} `type:"structure"` + + // The desired instance type and desired number of replicas of each index partition. + Options *ScalingParameters `type:"structure" required:"true"` + + // The status of domain configuration option. + Status *OptionStatus `type:"structure" required:"true"` +} + +// String returns the string representation +func (s ScalingParametersStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ScalingParametersStatus) GoString() string { + return s.String() +} + +// The endpoint to which service requests can be submitted. +type ServiceEndpoint struct { + _ struct{} `type:"structure"` + + // The endpoint to which service requests can be submitted. For example, search-imdb-movies-oopcnjfn6ugofer3zx5iadxxca.eu-west-1.cloudsearch.amazonaws.com + // or doc-imdb-movies-oopcnjfn6ugofer3zx5iadxxca.eu-west-1.cloudsearch.amazonaws.com. + Endpoint *string `type:"string"` +} + +// String returns the string representation +func (s ServiceEndpoint) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ServiceEndpoint) GoString() string { + return s.String() +} + +// Configuration information for a search suggester. Each suggester has a unique +// name and specifies the text field you want to use for suggestions. The following +// options can be configured for a suggester: FuzzyMatching, SortExpression. +type Suggester struct { + _ struct{} `type:"structure"` + + // Options for a search suggester. + DocumentSuggesterOptions *DocumentSuggesterOptions `type:"structure" required:"true"` + + // Names must begin with a letter and can contain the following characters: + // a-z (lowercase), 0-9, and _ (underscore). + SuggesterName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s Suggester) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Suggester) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Suggester) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Suggester"} + if s.DocumentSuggesterOptions == nil { + invalidParams.Add(request.NewErrParamRequired("DocumentSuggesterOptions")) + } + if s.SuggesterName == nil { + invalidParams.Add(request.NewErrParamRequired("SuggesterName")) + } + if s.SuggesterName != nil && len(*s.SuggesterName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("SuggesterName", 1)) + } + if s.DocumentSuggesterOptions != nil { + if err := s.DocumentSuggesterOptions.Validate(); err != nil { + invalidParams.AddNested("DocumentSuggesterOptions", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The value of a Suggester and its current status. +type SuggesterStatus struct { + _ struct{} `type:"structure"` + + // Configuration information for a search suggester. Each suggester has a unique + // name and specifies the text field you want to use for suggestions. The following + // options can be configured for a suggester: FuzzyMatching, SortExpression. + Options *Suggester `type:"structure" required:"true"` + + // The status of domain configuration option. + Status *OptionStatus `type:"structure" required:"true"` +} + +// String returns the string representation +func (s SuggesterStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SuggesterStatus) GoString() string { + return s.String() +} + +// Options for a field that contains an array of text strings. Present if IndexFieldType +// specifies the field is of type text-array. A text-array field is always searchable. +// All options are enabled by default. +type TextArrayOptions struct { + _ struct{} `type:"structure"` + + // The name of an analysis scheme for a text-array field. + AnalysisScheme *string `type:"string"` + + // A value to use for the field if the field isn't specified for a document. + DefaultValue *string `type:"string"` + + // Whether highlights can be returned for the field. + HighlightEnabled *bool `type:"boolean"` + + // Whether the contents of the field can be returned in the search results. + ReturnEnabled *bool `type:"boolean"` + + // A list of source fields to map to the field. + SourceFields *string `type:"string"` +} + +// String returns the string representation +func (s TextArrayOptions) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TextArrayOptions) GoString() string { + return s.String() +} + +// Options for text field. Present if IndexFieldType specifies the field is +// of type text. A text field is always searchable. All options are enabled +// by default. +type TextOptions struct { + _ struct{} `type:"structure"` + + // The name of an analysis scheme for a text field. + AnalysisScheme *string `type:"string"` + + // A value to use for the field if the field isn't specified for a document. + DefaultValue *string `type:"string"` + + // Whether highlights can be returned for the field. + HighlightEnabled *bool `type:"boolean"` + + // Whether the contents of the field can be returned in the search results. + ReturnEnabled *bool `type:"boolean"` + + // Whether the field can be used to sort the search results. + SortEnabled *bool `type:"boolean"` + + // A string that represents the name of an index field. CloudSearch supports + // regular index fields as well as dynamic fields. A dynamic field's name defines + // a pattern that begins or ends with a wildcard. Any document fields that don't + // map to a regular index field but do match a dynamic field's pattern are configured + // with the dynamic field's indexing options. + // + // Regular field names begin with a letter and can contain the following characters: + // a-z (lowercase), 0-9, and _ (underscore). Dynamic field names must begin + // or end with a wildcard (*). The wildcard can also be the only character in + // a dynamic field name. Multiple wildcards, and wildcards embedded within a + // string are not supported. + // + // The name score is reserved and cannot be used as a field name. To reference + // a document's ID, you can use the name _id. + SourceField *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s TextOptions) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TextOptions) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TextOptions) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TextOptions"} + if s.SourceField != nil && len(*s.SourceField) < 1 { + invalidParams.Add(request.NewErrParamMinLen("SourceField", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Container for the parameters to the UpdateAvailabilityOptions operation. +// Specifies the name of the domain you want to update and the Multi-AZ availability +// option. +type UpdateAvailabilityOptionsInput struct { + _ struct{} `type:"structure"` + + // A string that represents the name of a domain. Domain names are unique across + // the domains owned by an account within an AWS region. Domain names start + // with a letter or number and can contain the following characters: a-z (lowercase), + // 0-9, and - (hyphen). + DomainName *string `min:"3" type:"string" required:"true"` + + // You expand an existing search domain to a second Availability Zone by setting + // the Multi-AZ option to true. Similarly, you can turn off the Multi-AZ option + // to downgrade the domain to a single Availability Zone by setting the Multi-AZ + // option to false. + MultiAZ *bool `type:"boolean" required:"true"` +} + +// String returns the string representation +func (s UpdateAvailabilityOptionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateAvailabilityOptionsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateAvailabilityOptionsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateAvailabilityOptionsInput"} + if s.DomainName == nil { + invalidParams.Add(request.NewErrParamRequired("DomainName")) + } + if s.DomainName != nil && len(*s.DomainName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("DomainName", 3)) + } + if s.MultiAZ == nil { + invalidParams.Add(request.NewErrParamRequired("MultiAZ")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The result of a UpdateAvailabilityOptions request. Contains the status of +// the domain's availability options. +type UpdateAvailabilityOptionsOutput struct { + _ struct{} `type:"structure"` + + // The newly-configured availability options. Indicates whether Multi-AZ is + // enabled for the domain. + AvailabilityOptions *AvailabilityOptionsStatus `type:"structure"` +} + +// String returns the string representation +func (s UpdateAvailabilityOptionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateAvailabilityOptionsOutput) GoString() string { + return s.String() +} + +// Container for the parameters to the UpdateScalingParameters operation. Specifies +// the name of the domain you want to update and the scaling parameters you +// want to configure. +type UpdateScalingParametersInput struct { + _ struct{} `type:"structure"` + + // A string that represents the name of a domain. Domain names are unique across + // the domains owned by an account within an AWS region. Domain names start + // with a letter or number and can contain the following characters: a-z (lowercase), + // 0-9, and - (hyphen). + DomainName *string `min:"3" type:"string" required:"true"` + + // The desired instance type and desired number of replicas of each index partition. + ScalingParameters *ScalingParameters `type:"structure" required:"true"` +} + +// String returns the string representation +func (s UpdateScalingParametersInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateScalingParametersInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateScalingParametersInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateScalingParametersInput"} + if s.DomainName == nil { + invalidParams.Add(request.NewErrParamRequired("DomainName")) + } + if s.DomainName != nil && len(*s.DomainName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("DomainName", 3)) + } + if s.ScalingParameters == nil { + invalidParams.Add(request.NewErrParamRequired("ScalingParameters")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The result of a UpdateScalingParameters request. Contains the status of the +// newly-configured scaling parameters. +type UpdateScalingParametersOutput struct { + _ struct{} `type:"structure"` + + // The status and configuration of a search domain's scaling parameters. + ScalingParameters *ScalingParametersStatus `type:"structure" required:"true"` +} + +// String returns the string representation +func (s UpdateScalingParametersOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateScalingParametersOutput) GoString() string { + return s.String() +} + +// Container for the parameters to the UpdateServiceAccessPolicies operation. +// Specifies the name of the domain you want to update and the access rules +// you want to configure. +type UpdateServiceAccessPoliciesInput struct { + _ struct{} `type:"structure"` + + // The access rules you want to configure. These rules replace any existing + // rules. + AccessPolicies *string `type:"string" required:"true"` + + // A string that represents the name of a domain. Domain names are unique across + // the domains owned by an account within an AWS region. Domain names start + // with a letter or number and can contain the following characters: a-z (lowercase), + // 0-9, and - (hyphen). + DomainName *string `min:"3" type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateServiceAccessPoliciesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateServiceAccessPoliciesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateServiceAccessPoliciesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateServiceAccessPoliciesInput"} + if s.AccessPolicies == nil { + invalidParams.Add(request.NewErrParamRequired("AccessPolicies")) + } + if s.DomainName == nil { + invalidParams.Add(request.NewErrParamRequired("DomainName")) + } + if s.DomainName != nil && len(*s.DomainName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("DomainName", 3)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The result of an UpdateServiceAccessPolicies request. Contains the new access +// policies. +type UpdateServiceAccessPoliciesOutput struct { + _ struct{} `type:"structure"` + + // The access rules configured for the domain. + AccessPolicies *AccessPoliciesStatus `type:"structure" required:"true"` +} + +// String returns the string representation +func (s UpdateServiceAccessPoliciesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateServiceAccessPoliciesOutput) GoString() string { + return s.String() +} + +const ( + // @enum AlgorithmicStemming + AlgorithmicStemmingNone = "none" + // @enum AlgorithmicStemming + AlgorithmicStemmingMinimal = "minimal" + // @enum AlgorithmicStemming + AlgorithmicStemmingLight = "light" + // @enum AlgorithmicStemming + AlgorithmicStemmingFull = "full" +) + +// An IETF RFC 4646 (http://tools.ietf.org/html/rfc4646" target="_blank) language +// code or mul for multiple languages. +const ( + // @enum AnalysisSchemeLanguage + AnalysisSchemeLanguageAr = "ar" + // @enum AnalysisSchemeLanguage + AnalysisSchemeLanguageBg = "bg" + // @enum AnalysisSchemeLanguage + AnalysisSchemeLanguageCa = "ca" + // @enum AnalysisSchemeLanguage + AnalysisSchemeLanguageCs = "cs" + // @enum AnalysisSchemeLanguage + AnalysisSchemeLanguageDa = "da" + // @enum AnalysisSchemeLanguage + AnalysisSchemeLanguageDe = "de" + // @enum AnalysisSchemeLanguage + AnalysisSchemeLanguageEl = "el" + // @enum AnalysisSchemeLanguage + AnalysisSchemeLanguageEn = "en" + // @enum AnalysisSchemeLanguage + AnalysisSchemeLanguageEs = "es" + // @enum AnalysisSchemeLanguage + AnalysisSchemeLanguageEu = "eu" + // @enum AnalysisSchemeLanguage + AnalysisSchemeLanguageFa = "fa" + // @enum AnalysisSchemeLanguage + AnalysisSchemeLanguageFi = "fi" + // @enum AnalysisSchemeLanguage + AnalysisSchemeLanguageFr = "fr" + // @enum AnalysisSchemeLanguage + AnalysisSchemeLanguageGa = "ga" + // @enum AnalysisSchemeLanguage + AnalysisSchemeLanguageGl = "gl" + // @enum AnalysisSchemeLanguage + AnalysisSchemeLanguageHe = "he" + // @enum AnalysisSchemeLanguage + AnalysisSchemeLanguageHi = "hi" + // @enum AnalysisSchemeLanguage + AnalysisSchemeLanguageHu = "hu" + // @enum AnalysisSchemeLanguage + AnalysisSchemeLanguageHy = "hy" + // @enum AnalysisSchemeLanguage + AnalysisSchemeLanguageId = "id" + // @enum AnalysisSchemeLanguage + AnalysisSchemeLanguageIt = "it" + // @enum AnalysisSchemeLanguage + AnalysisSchemeLanguageJa = "ja" + // @enum AnalysisSchemeLanguage + AnalysisSchemeLanguageKo = "ko" + // @enum AnalysisSchemeLanguage + AnalysisSchemeLanguageLv = "lv" + // @enum AnalysisSchemeLanguage + AnalysisSchemeLanguageMul = "mul" + // @enum AnalysisSchemeLanguage + AnalysisSchemeLanguageNl = "nl" + // @enum AnalysisSchemeLanguage + AnalysisSchemeLanguageNo = "no" + // @enum AnalysisSchemeLanguage + AnalysisSchemeLanguagePt = "pt" + // @enum AnalysisSchemeLanguage + AnalysisSchemeLanguageRo = "ro" + // @enum AnalysisSchemeLanguage + AnalysisSchemeLanguageRu = "ru" + // @enum AnalysisSchemeLanguage + AnalysisSchemeLanguageSv = "sv" + // @enum AnalysisSchemeLanguage + AnalysisSchemeLanguageTh = "th" + // @enum AnalysisSchemeLanguage + AnalysisSchemeLanguageTr = "tr" + // @enum AnalysisSchemeLanguage + AnalysisSchemeLanguageZhHans = "zh-Hans" + // @enum AnalysisSchemeLanguage + AnalysisSchemeLanguageZhHant = "zh-Hant" +) + +// The type of field. The valid options for a field depend on the field type. +// For more information about the supported field types, see Configuring Index +// Fields (http://docs.aws.amazon.com/cloudsearch/latest/developerguide/configuring-index-fields.html" +// target="_blank) in the Amazon CloudSearch Developer Guide. +const ( + // @enum IndexFieldType + IndexFieldTypeInt = "int" + // @enum IndexFieldType + IndexFieldTypeDouble = "double" + // @enum IndexFieldType + IndexFieldTypeLiteral = "literal" + // @enum IndexFieldType + IndexFieldTypeText = "text" + // @enum IndexFieldType + IndexFieldTypeDate = "date" + // @enum IndexFieldType + IndexFieldTypeLatlon = "latlon" + // @enum IndexFieldType + IndexFieldTypeIntArray = "int-array" + // @enum IndexFieldType + IndexFieldTypeDoubleArray = "double-array" + // @enum IndexFieldType + IndexFieldTypeLiteralArray = "literal-array" + // @enum IndexFieldType + IndexFieldTypeTextArray = "text-array" + // @enum IndexFieldType + IndexFieldTypeDateArray = "date-array" +) + +// The state of processing a change to an option. One of: +// +// RequiresIndexDocuments: The option's latest value will not be deployed +// until IndexDocuments has been called and indexing is complete. Processing: +// The option's latest value is in the process of being activated. Active: The +// option's latest value is fully deployed. FailedToValidate: The option value +// is not compatible with the domain's data and cannot be used to index the +// data. You must either modify the option value or update or remove the incompatible +// documents. +const ( + // @enum OptionState + OptionStateRequiresIndexDocuments = "RequiresIndexDocuments" + // @enum OptionState + OptionStateProcessing = "Processing" + // @enum OptionState + OptionStateActive = "Active" + // @enum OptionState + OptionStateFailedToValidate = "FailedToValidate" +) + +// The instance type (such as search.m1.small) on which an index partition is +// hosted. +const ( + // @enum PartitionInstanceType + PartitionInstanceTypeSearchM1Small = "search.m1.small" + // @enum PartitionInstanceType + PartitionInstanceTypeSearchM1Large = "search.m1.large" + // @enum PartitionInstanceType + PartitionInstanceTypeSearchM2Xlarge = "search.m2.xlarge" + // @enum PartitionInstanceType + PartitionInstanceTypeSearchM22xlarge = "search.m2.2xlarge" + // @enum PartitionInstanceType + PartitionInstanceTypeSearchM3Medium = "search.m3.medium" + // @enum PartitionInstanceType + PartitionInstanceTypeSearchM3Large = "search.m3.large" + // @enum PartitionInstanceType + PartitionInstanceTypeSearchM3Xlarge = "search.m3.xlarge" + // @enum PartitionInstanceType + PartitionInstanceTypeSearchM32xlarge = "search.m3.2xlarge" +) + +const ( + // @enum SuggesterFuzzyMatching + SuggesterFuzzyMatchingNone = "none" + // @enum SuggesterFuzzyMatching + SuggesterFuzzyMatchingLow = "low" + // @enum SuggesterFuzzyMatching + SuggesterFuzzyMatchingHigh = "high" +) diff --git a/vendor/github.com/aws/aws-sdk-go/service/cloudsearch/cloudsearchiface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/cloudsearch/cloudsearchiface/interface.go new file mode 100644 index 000000000..5b3369562 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/cloudsearch/cloudsearchiface/interface.go @@ -0,0 +1,110 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package cloudsearchiface provides an interface for the Amazon CloudSearch. +package cloudsearchiface + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/cloudsearch" +) + +// CloudSearchAPI is the interface type for cloudsearch.CloudSearch. +type CloudSearchAPI interface { + BuildSuggestersRequest(*cloudsearch.BuildSuggestersInput) (*request.Request, *cloudsearch.BuildSuggestersOutput) + + BuildSuggesters(*cloudsearch.BuildSuggestersInput) (*cloudsearch.BuildSuggestersOutput, error) + + CreateDomainRequest(*cloudsearch.CreateDomainInput) (*request.Request, *cloudsearch.CreateDomainOutput) + + CreateDomain(*cloudsearch.CreateDomainInput) (*cloudsearch.CreateDomainOutput, error) + + DefineAnalysisSchemeRequest(*cloudsearch.DefineAnalysisSchemeInput) (*request.Request, *cloudsearch.DefineAnalysisSchemeOutput) + + DefineAnalysisScheme(*cloudsearch.DefineAnalysisSchemeInput) (*cloudsearch.DefineAnalysisSchemeOutput, error) + + DefineExpressionRequest(*cloudsearch.DefineExpressionInput) (*request.Request, *cloudsearch.DefineExpressionOutput) + + DefineExpression(*cloudsearch.DefineExpressionInput) (*cloudsearch.DefineExpressionOutput, error) + + DefineIndexFieldRequest(*cloudsearch.DefineIndexFieldInput) (*request.Request, *cloudsearch.DefineIndexFieldOutput) + + DefineIndexField(*cloudsearch.DefineIndexFieldInput) (*cloudsearch.DefineIndexFieldOutput, error) + + DefineSuggesterRequest(*cloudsearch.DefineSuggesterInput) (*request.Request, *cloudsearch.DefineSuggesterOutput) + + DefineSuggester(*cloudsearch.DefineSuggesterInput) (*cloudsearch.DefineSuggesterOutput, error) + + DeleteAnalysisSchemeRequest(*cloudsearch.DeleteAnalysisSchemeInput) (*request.Request, *cloudsearch.DeleteAnalysisSchemeOutput) + + DeleteAnalysisScheme(*cloudsearch.DeleteAnalysisSchemeInput) (*cloudsearch.DeleteAnalysisSchemeOutput, error) + + DeleteDomainRequest(*cloudsearch.DeleteDomainInput) (*request.Request, *cloudsearch.DeleteDomainOutput) + + DeleteDomain(*cloudsearch.DeleteDomainInput) (*cloudsearch.DeleteDomainOutput, error) + + DeleteExpressionRequest(*cloudsearch.DeleteExpressionInput) (*request.Request, *cloudsearch.DeleteExpressionOutput) + + DeleteExpression(*cloudsearch.DeleteExpressionInput) (*cloudsearch.DeleteExpressionOutput, error) + + DeleteIndexFieldRequest(*cloudsearch.DeleteIndexFieldInput) (*request.Request, *cloudsearch.DeleteIndexFieldOutput) + + DeleteIndexField(*cloudsearch.DeleteIndexFieldInput) (*cloudsearch.DeleteIndexFieldOutput, error) + + DeleteSuggesterRequest(*cloudsearch.DeleteSuggesterInput) (*request.Request, *cloudsearch.DeleteSuggesterOutput) + + DeleteSuggester(*cloudsearch.DeleteSuggesterInput) (*cloudsearch.DeleteSuggesterOutput, error) + + DescribeAnalysisSchemesRequest(*cloudsearch.DescribeAnalysisSchemesInput) (*request.Request, *cloudsearch.DescribeAnalysisSchemesOutput) + + DescribeAnalysisSchemes(*cloudsearch.DescribeAnalysisSchemesInput) (*cloudsearch.DescribeAnalysisSchemesOutput, error) + + DescribeAvailabilityOptionsRequest(*cloudsearch.DescribeAvailabilityOptionsInput) (*request.Request, *cloudsearch.DescribeAvailabilityOptionsOutput) + + DescribeAvailabilityOptions(*cloudsearch.DescribeAvailabilityOptionsInput) (*cloudsearch.DescribeAvailabilityOptionsOutput, error) + + DescribeDomainsRequest(*cloudsearch.DescribeDomainsInput) (*request.Request, *cloudsearch.DescribeDomainsOutput) + + DescribeDomains(*cloudsearch.DescribeDomainsInput) (*cloudsearch.DescribeDomainsOutput, error) + + DescribeExpressionsRequest(*cloudsearch.DescribeExpressionsInput) (*request.Request, *cloudsearch.DescribeExpressionsOutput) + + DescribeExpressions(*cloudsearch.DescribeExpressionsInput) (*cloudsearch.DescribeExpressionsOutput, error) + + DescribeIndexFieldsRequest(*cloudsearch.DescribeIndexFieldsInput) (*request.Request, *cloudsearch.DescribeIndexFieldsOutput) + + DescribeIndexFields(*cloudsearch.DescribeIndexFieldsInput) (*cloudsearch.DescribeIndexFieldsOutput, error) + + DescribeScalingParametersRequest(*cloudsearch.DescribeScalingParametersInput) (*request.Request, *cloudsearch.DescribeScalingParametersOutput) + + DescribeScalingParameters(*cloudsearch.DescribeScalingParametersInput) (*cloudsearch.DescribeScalingParametersOutput, error) + + DescribeServiceAccessPoliciesRequest(*cloudsearch.DescribeServiceAccessPoliciesInput) (*request.Request, *cloudsearch.DescribeServiceAccessPoliciesOutput) + + DescribeServiceAccessPolicies(*cloudsearch.DescribeServiceAccessPoliciesInput) (*cloudsearch.DescribeServiceAccessPoliciesOutput, error) + + DescribeSuggestersRequest(*cloudsearch.DescribeSuggestersInput) (*request.Request, *cloudsearch.DescribeSuggestersOutput) + + DescribeSuggesters(*cloudsearch.DescribeSuggestersInput) (*cloudsearch.DescribeSuggestersOutput, error) + + IndexDocumentsRequest(*cloudsearch.IndexDocumentsInput) (*request.Request, *cloudsearch.IndexDocumentsOutput) + + IndexDocuments(*cloudsearch.IndexDocumentsInput) (*cloudsearch.IndexDocumentsOutput, error) + + ListDomainNamesRequest(*cloudsearch.ListDomainNamesInput) (*request.Request, *cloudsearch.ListDomainNamesOutput) + + ListDomainNames(*cloudsearch.ListDomainNamesInput) (*cloudsearch.ListDomainNamesOutput, error) + + UpdateAvailabilityOptionsRequest(*cloudsearch.UpdateAvailabilityOptionsInput) (*request.Request, *cloudsearch.UpdateAvailabilityOptionsOutput) + + UpdateAvailabilityOptions(*cloudsearch.UpdateAvailabilityOptionsInput) (*cloudsearch.UpdateAvailabilityOptionsOutput, error) + + UpdateScalingParametersRequest(*cloudsearch.UpdateScalingParametersInput) (*request.Request, *cloudsearch.UpdateScalingParametersOutput) + + UpdateScalingParameters(*cloudsearch.UpdateScalingParametersInput) (*cloudsearch.UpdateScalingParametersOutput, error) + + UpdateServiceAccessPoliciesRequest(*cloudsearch.UpdateServiceAccessPoliciesInput) (*request.Request, *cloudsearch.UpdateServiceAccessPoliciesOutput) + + UpdateServiceAccessPolicies(*cloudsearch.UpdateServiceAccessPoliciesInput) (*cloudsearch.UpdateServiceAccessPoliciesOutput, error) +} + +var _ CloudSearchAPI = (*cloudsearch.CloudSearch)(nil) diff --git a/vendor/github.com/aws/aws-sdk-go/service/cloudsearch/examples_test.go b/vendor/github.com/aws/aws-sdk-go/service/cloudsearch/examples_test.go new file mode 100644 index 000000000..d91e62d1c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/cloudsearch/examples_test.go @@ -0,0 +1,616 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package cloudsearch_test + +import ( + "bytes" + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/cloudsearch" +) + +var _ time.Duration +var _ bytes.Buffer + +func ExampleCloudSearch_BuildSuggesters() { + svc := cloudsearch.New(session.New()) + + params := &cloudsearch.BuildSuggestersInput{ + DomainName: aws.String("DomainName"), // Required + } + resp, err := svc.BuildSuggesters(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudSearch_CreateDomain() { + svc := cloudsearch.New(session.New()) + + params := &cloudsearch.CreateDomainInput{ + DomainName: aws.String("DomainName"), // Required + } + resp, err := svc.CreateDomain(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudSearch_DefineAnalysisScheme() { + svc := cloudsearch.New(session.New()) + + params := &cloudsearch.DefineAnalysisSchemeInput{ + AnalysisScheme: &cloudsearch.AnalysisScheme{ // Required + AnalysisSchemeLanguage: aws.String("AnalysisSchemeLanguage"), // Required + AnalysisSchemeName: aws.String("StandardName"), // Required + AnalysisOptions: &cloudsearch.AnalysisOptions{ + AlgorithmicStemming: aws.String("AlgorithmicStemming"), + JapaneseTokenizationDictionary: aws.String("String"), + StemmingDictionary: aws.String("String"), + Stopwords: aws.String("String"), + Synonyms: aws.String("String"), + }, + }, + DomainName: aws.String("DomainName"), // Required + } + resp, err := svc.DefineAnalysisScheme(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudSearch_DefineExpression() { + svc := cloudsearch.New(session.New()) + + params := &cloudsearch.DefineExpressionInput{ + DomainName: aws.String("DomainName"), // Required + Expression: &cloudsearch.Expression{ // Required + ExpressionName: aws.String("StandardName"), // Required + ExpressionValue: aws.String("ExpressionValue"), // Required + }, + } + resp, err := svc.DefineExpression(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudSearch_DefineIndexField() { + svc := cloudsearch.New(session.New()) + + params := &cloudsearch.DefineIndexFieldInput{ + DomainName: aws.String("DomainName"), // Required + IndexField: &cloudsearch.IndexField{ // Required + IndexFieldName: aws.String("DynamicFieldName"), // Required + IndexFieldType: aws.String("IndexFieldType"), // Required + DateArrayOptions: &cloudsearch.DateArrayOptions{ + DefaultValue: aws.String("FieldValue"), + FacetEnabled: aws.Bool(true), + ReturnEnabled: aws.Bool(true), + SearchEnabled: aws.Bool(true), + SourceFields: aws.String("FieldNameCommaList"), + }, + DateOptions: &cloudsearch.DateOptions{ + DefaultValue: aws.String("FieldValue"), + FacetEnabled: aws.Bool(true), + ReturnEnabled: aws.Bool(true), + SearchEnabled: aws.Bool(true), + SortEnabled: aws.Bool(true), + SourceField: aws.String("FieldName"), + }, + DoubleArrayOptions: &cloudsearch.DoubleArrayOptions{ + DefaultValue: aws.Float64(1.0), + FacetEnabled: aws.Bool(true), + ReturnEnabled: aws.Bool(true), + SearchEnabled: aws.Bool(true), + SourceFields: aws.String("FieldNameCommaList"), + }, + DoubleOptions: &cloudsearch.DoubleOptions{ + DefaultValue: aws.Float64(1.0), + FacetEnabled: aws.Bool(true), + ReturnEnabled: aws.Bool(true), + SearchEnabled: aws.Bool(true), + SortEnabled: aws.Bool(true), + SourceField: aws.String("FieldName"), + }, + IntArrayOptions: &cloudsearch.IntArrayOptions{ + DefaultValue: aws.Int64(1), + FacetEnabled: aws.Bool(true), + ReturnEnabled: aws.Bool(true), + SearchEnabled: aws.Bool(true), + SourceFields: aws.String("FieldNameCommaList"), + }, + IntOptions: &cloudsearch.IntOptions{ + DefaultValue: aws.Int64(1), + FacetEnabled: aws.Bool(true), + ReturnEnabled: aws.Bool(true), + SearchEnabled: aws.Bool(true), + SortEnabled: aws.Bool(true), + SourceField: aws.String("FieldName"), + }, + LatLonOptions: &cloudsearch.LatLonOptions{ + DefaultValue: aws.String("FieldValue"), + FacetEnabled: aws.Bool(true), + ReturnEnabled: aws.Bool(true), + SearchEnabled: aws.Bool(true), + SortEnabled: aws.Bool(true), + SourceField: aws.String("FieldName"), + }, + LiteralArrayOptions: &cloudsearch.LiteralArrayOptions{ + DefaultValue: aws.String("FieldValue"), + FacetEnabled: aws.Bool(true), + ReturnEnabled: aws.Bool(true), + SearchEnabled: aws.Bool(true), + SourceFields: aws.String("FieldNameCommaList"), + }, + LiteralOptions: &cloudsearch.LiteralOptions{ + DefaultValue: aws.String("FieldValue"), + FacetEnabled: aws.Bool(true), + ReturnEnabled: aws.Bool(true), + SearchEnabled: aws.Bool(true), + SortEnabled: aws.Bool(true), + SourceField: aws.String("FieldName"), + }, + TextArrayOptions: &cloudsearch.TextArrayOptions{ + AnalysisScheme: aws.String("Word"), + DefaultValue: aws.String("FieldValue"), + HighlightEnabled: aws.Bool(true), + ReturnEnabled: aws.Bool(true), + SourceFields: aws.String("FieldNameCommaList"), + }, + TextOptions: &cloudsearch.TextOptions{ + AnalysisScheme: aws.String("Word"), + DefaultValue: aws.String("FieldValue"), + HighlightEnabled: aws.Bool(true), + ReturnEnabled: aws.Bool(true), + SortEnabled: aws.Bool(true), + SourceField: aws.String("FieldName"), + }, + }, + } + resp, err := svc.DefineIndexField(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudSearch_DefineSuggester() { + svc := cloudsearch.New(session.New()) + + params := &cloudsearch.DefineSuggesterInput{ + DomainName: aws.String("DomainName"), // Required + Suggester: &cloudsearch.Suggester{ // Required + DocumentSuggesterOptions: &cloudsearch.DocumentSuggesterOptions{ // Required + SourceField: aws.String("FieldName"), // Required + FuzzyMatching: aws.String("SuggesterFuzzyMatching"), + SortExpression: aws.String("String"), + }, + SuggesterName: aws.String("StandardName"), // Required + }, + } + resp, err := svc.DefineSuggester(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudSearch_DeleteAnalysisScheme() { + svc := cloudsearch.New(session.New()) + + params := &cloudsearch.DeleteAnalysisSchemeInput{ + AnalysisSchemeName: aws.String("StandardName"), // Required + DomainName: aws.String("DomainName"), // Required + } + resp, err := svc.DeleteAnalysisScheme(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudSearch_DeleteDomain() { + svc := cloudsearch.New(session.New()) + + params := &cloudsearch.DeleteDomainInput{ + DomainName: aws.String("DomainName"), // Required + } + resp, err := svc.DeleteDomain(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudSearch_DeleteExpression() { + svc := cloudsearch.New(session.New()) + + params := &cloudsearch.DeleteExpressionInput{ + DomainName: aws.String("DomainName"), // Required + ExpressionName: aws.String("StandardName"), // Required + } + resp, err := svc.DeleteExpression(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudSearch_DeleteIndexField() { + svc := cloudsearch.New(session.New()) + + params := &cloudsearch.DeleteIndexFieldInput{ + DomainName: aws.String("DomainName"), // Required + IndexFieldName: aws.String("DynamicFieldName"), // Required + } + resp, err := svc.DeleteIndexField(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudSearch_DeleteSuggester() { + svc := cloudsearch.New(session.New()) + + params := &cloudsearch.DeleteSuggesterInput{ + DomainName: aws.String("DomainName"), // Required + SuggesterName: aws.String("StandardName"), // Required + } + resp, err := svc.DeleteSuggester(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudSearch_DescribeAnalysisSchemes() { + svc := cloudsearch.New(session.New()) + + params := &cloudsearch.DescribeAnalysisSchemesInput{ + DomainName: aws.String("DomainName"), // Required + AnalysisSchemeNames: []*string{ + aws.String("StandardName"), // Required + // More values... + }, + Deployed: aws.Bool(true), + } + resp, err := svc.DescribeAnalysisSchemes(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudSearch_DescribeAvailabilityOptions() { + svc := cloudsearch.New(session.New()) + + params := &cloudsearch.DescribeAvailabilityOptionsInput{ + DomainName: aws.String("DomainName"), // Required + Deployed: aws.Bool(true), + } + resp, err := svc.DescribeAvailabilityOptions(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudSearch_DescribeDomains() { + svc := cloudsearch.New(session.New()) + + params := &cloudsearch.DescribeDomainsInput{ + DomainNames: []*string{ + aws.String("DomainName"), // Required + // More values... + }, + } + resp, err := svc.DescribeDomains(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudSearch_DescribeExpressions() { + svc := cloudsearch.New(session.New()) + + params := &cloudsearch.DescribeExpressionsInput{ + DomainName: aws.String("DomainName"), // Required + Deployed: aws.Bool(true), + ExpressionNames: []*string{ + aws.String("StandardName"), // Required + // More values... + }, + } + resp, err := svc.DescribeExpressions(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudSearch_DescribeIndexFields() { + svc := cloudsearch.New(session.New()) + + params := &cloudsearch.DescribeIndexFieldsInput{ + DomainName: aws.String("DomainName"), // Required + Deployed: aws.Bool(true), + FieldNames: []*string{ + aws.String("DynamicFieldName"), // Required + // More values... + }, + } + resp, err := svc.DescribeIndexFields(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudSearch_DescribeScalingParameters() { + svc := cloudsearch.New(session.New()) + + params := &cloudsearch.DescribeScalingParametersInput{ + DomainName: aws.String("DomainName"), // Required + } + resp, err := svc.DescribeScalingParameters(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudSearch_DescribeServiceAccessPolicies() { + svc := cloudsearch.New(session.New()) + + params := &cloudsearch.DescribeServiceAccessPoliciesInput{ + DomainName: aws.String("DomainName"), // Required + Deployed: aws.Bool(true), + } + resp, err := svc.DescribeServiceAccessPolicies(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudSearch_DescribeSuggesters() { + svc := cloudsearch.New(session.New()) + + params := &cloudsearch.DescribeSuggestersInput{ + DomainName: aws.String("DomainName"), // Required + Deployed: aws.Bool(true), + SuggesterNames: []*string{ + aws.String("StandardName"), // Required + // More values... + }, + } + resp, err := svc.DescribeSuggesters(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudSearch_IndexDocuments() { + svc := cloudsearch.New(session.New()) + + params := &cloudsearch.IndexDocumentsInput{ + DomainName: aws.String("DomainName"), // Required + } + resp, err := svc.IndexDocuments(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudSearch_ListDomainNames() { + svc := cloudsearch.New(session.New()) + + var params *cloudsearch.ListDomainNamesInput + resp, err := svc.ListDomainNames(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudSearch_UpdateAvailabilityOptions() { + svc := cloudsearch.New(session.New()) + + params := &cloudsearch.UpdateAvailabilityOptionsInput{ + DomainName: aws.String("DomainName"), // Required + MultiAZ: aws.Bool(true), // Required + } + resp, err := svc.UpdateAvailabilityOptions(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudSearch_UpdateScalingParameters() { + svc := cloudsearch.New(session.New()) + + params := &cloudsearch.UpdateScalingParametersInput{ + DomainName: aws.String("DomainName"), // Required + ScalingParameters: &cloudsearch.ScalingParameters{ // Required + DesiredInstanceType: aws.String("PartitionInstanceType"), + DesiredPartitionCount: aws.Int64(1), + DesiredReplicationCount: aws.Int64(1), + }, + } + resp, err := svc.UpdateScalingParameters(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudSearch_UpdateServiceAccessPolicies() { + svc := cloudsearch.New(session.New()) + + params := &cloudsearch.UpdateServiceAccessPoliciesInput{ + AccessPolicies: aws.String("PolicyDocument"), // Required + DomainName: aws.String("DomainName"), // Required + } + resp, err := svc.UpdateServiceAccessPolicies(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/cloudsearch/service.go b/vendor/github.com/aws/aws-sdk-go/service/cloudsearch/service.go new file mode 100644 index 000000000..e4a16069a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/cloudsearch/service.go @@ -0,0 +1,94 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package cloudsearch + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/private/protocol/query" +) + +// You use the Amazon CloudSearch configuration service to create, configure, +// and manage search domains. Configuration service requests are submitted using +// the AWS Query protocol. AWS Query requests are HTTP or HTTPS requests submitted +// via HTTP GET or POST with a query parameter named Action. +// +// The endpoint for configuration service requests is region-specific: cloudsearch.region.amazonaws.com. +// For example, cloudsearch.us-east-1.amazonaws.com. For a current list of supported +// regions and endpoints, see Regions and Endpoints (http://docs.aws.amazon.com/general/latest/gr/rande.html#cloudsearch_region" +// target="_blank). +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type CloudSearch struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "cloudsearch" + +// New creates a new instance of the CloudSearch client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a CloudSearch client from just a session. +// svc := cloudsearch.New(mySession) +// +// // Create a CloudSearch client with additional configuration +// svc := cloudsearch.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *CloudSearch { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *CloudSearch { + svc := &CloudSearch{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2013-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(query.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(query.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(query.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(query.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a CloudSearch operation and runs any +// custom request initialization. +func (c *CloudSearch) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/cloudsearchdomain/api.go b/vendor/github.com/aws/aws-sdk-go/service/cloudsearchdomain/api.go new file mode 100644 index 000000000..0510f298c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/cloudsearchdomain/api.go @@ -0,0 +1,964 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package cloudsearchdomain provides a client for Amazon CloudSearch Domain. +package cloudsearchdomain + +import ( + "io" + + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" +) + +const opSearch = "Search" + +// SearchRequest generates a "aws/request.Request" representing the +// client's request for the Search operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the Search method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the SearchRequest method. +// req, resp := client.SearchRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudSearchDomain) SearchRequest(input *SearchInput) (req *request.Request, output *SearchOutput) { + op := &request.Operation{ + Name: opSearch, + HTTPMethod: "GET", + HTTPPath: "/2013-01-01/search?format=sdk&pretty=true", + } + + if input == nil { + input = &SearchInput{} + } + + req = c.newRequest(op, input, output) + output = &SearchOutput{} + req.Data = output + return +} + +// Retrieves a list of documents that match the specified search criteria. How +// you specify the search criteria depends on which query parser you use. Amazon +// CloudSearch supports four query parsers: +// +// simple: search all text and text-array fields for the specified string. +// Search for phrases, individual terms, and prefixes. structured: search specific +// fields, construct compound queries using Boolean operators, and use advanced +// features such as term boosting and proximity searching. lucene: specify search +// criteria using the Apache Lucene query parser syntax. dismax: specify search +// criteria using the simplified subset of the Apache Lucene query parser syntax +// defined by the DisMax query parser. For more information, see Searching +// Your Data (http://docs.aws.amazon.com/cloudsearch/latest/developerguide/searching.html) +// in the Amazon CloudSearch Developer Guide. +// +// The endpoint for submitting Search requests is domain-specific. You submit +// search requests to a domain's search endpoint. To get the search endpoint +// for your domain, use the Amazon CloudSearch configuration service DescribeDomains +// action. A domain's endpoints are also displayed on the domain dashboard in +// the Amazon CloudSearch console. +func (c *CloudSearchDomain) Search(input *SearchInput) (*SearchOutput, error) { + req, out := c.SearchRequest(input) + err := req.Send() + return out, err +} + +const opSuggest = "Suggest" + +// SuggestRequest generates a "aws/request.Request" representing the +// client's request for the Suggest operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the Suggest method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the SuggestRequest method. +// req, resp := client.SuggestRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudSearchDomain) SuggestRequest(input *SuggestInput) (req *request.Request, output *SuggestOutput) { + op := &request.Operation{ + Name: opSuggest, + HTTPMethod: "GET", + HTTPPath: "/2013-01-01/suggest?format=sdk&pretty=true", + } + + if input == nil { + input = &SuggestInput{} + } + + req = c.newRequest(op, input, output) + output = &SuggestOutput{} + req.Data = output + return +} + +// Retrieves autocomplete suggestions for a partial query string. You can use +// suggestions enable you to display likely matches before users finish typing. +// In Amazon CloudSearch, suggestions are based on the contents of a particular +// text field. When you request suggestions, Amazon CloudSearch finds all of +// the documents whose values in the suggester field start with the specified +// query string. The beginning of the field must match the query string to be +// considered a match. +// +// For more information about configuring suggesters and retrieving suggestions, +// see Getting Suggestions (http://docs.aws.amazon.com/cloudsearch/latest/developerguide/getting-suggestions.html) +// in the Amazon CloudSearch Developer Guide. +// +// The endpoint for submitting Suggest requests is domain-specific. You submit +// suggest requests to a domain's search endpoint. To get the search endpoint +// for your domain, use the Amazon CloudSearch configuration service DescribeDomains +// action. A domain's endpoints are also displayed on the domain dashboard in +// the Amazon CloudSearch console. +func (c *CloudSearchDomain) Suggest(input *SuggestInput) (*SuggestOutput, error) { + req, out := c.SuggestRequest(input) + err := req.Send() + return out, err +} + +const opUploadDocuments = "UploadDocuments" + +// UploadDocumentsRequest generates a "aws/request.Request" representing the +// client's request for the UploadDocuments operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UploadDocuments method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UploadDocumentsRequest method. +// req, resp := client.UploadDocumentsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudSearchDomain) UploadDocumentsRequest(input *UploadDocumentsInput) (req *request.Request, output *UploadDocumentsOutput) { + op := &request.Operation{ + Name: opUploadDocuments, + HTTPMethod: "POST", + HTTPPath: "/2013-01-01/documents/batch?format=sdk", + } + + if input == nil { + input = &UploadDocumentsInput{} + } + + req = c.newRequest(op, input, output) + output = &UploadDocumentsOutput{} + req.Data = output + return +} + +// Posts a batch of documents to a search domain for indexing. A document batch +// is a collection of add and delete operations that represent the documents +// you want to add, update, or delete from your domain. Batches can be described +// in either JSON or XML. Each item that you want Amazon CloudSearch to return +// as a search result (such as a product) is represented as a document. Every +// document has a unique ID and one or more fields that contain the data that +// you want to search and return in results. Individual documents cannot contain +// more than 1 MB of data. The entire batch cannot exceed 5 MB. To get the best +// possible upload performance, group add and delete operations in batches that +// are close the 5 MB limit. Submitting a large volume of single-document batches +// can overload a domain's document service. +// +// The endpoint for submitting UploadDocuments requests is domain-specific. +// To get the document endpoint for your domain, use the Amazon CloudSearch +// configuration service DescribeDomains action. A domain's endpoints are also +// displayed on the domain dashboard in the Amazon CloudSearch console. +// +// For more information about formatting your data for Amazon CloudSearch, +// see Preparing Your Data (http://docs.aws.amazon.com/cloudsearch/latest/developerguide/preparing-data.html) +// in the Amazon CloudSearch Developer Guide. For more information about uploading +// data for indexing, see Uploading Data (http://docs.aws.amazon.com/cloudsearch/latest/developerguide/uploading-data.html) +// in the Amazon CloudSearch Developer Guide. +func (c *CloudSearchDomain) UploadDocuments(input *UploadDocumentsInput) (*UploadDocumentsOutput, error) { + req, out := c.UploadDocumentsRequest(input) + err := req.Send() + return out, err +} + +// A container for facet information. +type Bucket struct { + _ struct{} `type:"structure"` + + // The number of hits that contain the facet value in the specified facet field. + Count *int64 `locationName:"count" type:"long"` + + // The facet value being counted. + Value *string `locationName:"value" type:"string"` +} + +// String returns the string representation +func (s Bucket) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Bucket) GoString() string { + return s.String() +} + +// A container for the calculated facet values and counts. +type BucketInfo struct { + _ struct{} `type:"structure"` + + // A list of the calculated facet values and counts. + Buckets []*Bucket `locationName:"buckets" type:"list"` +} + +// String returns the string representation +func (s BucketInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BucketInfo) GoString() string { + return s.String() +} + +// A warning returned by the document service when an issue is discovered while +// processing an upload request. +type DocumentServiceWarning struct { + _ struct{} `type:"structure"` + + // The description for a warning returned by the document service. + Message *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s DocumentServiceWarning) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DocumentServiceWarning) GoString() string { + return s.String() +} + +// The statistics for a field calculated in the request. +type FieldStats struct { + _ struct{} `type:"structure"` + + // The number of documents that contain a value in the specified field in the + // result set. + Count *int64 `locationName:"count" type:"long"` + + // The maximum value found in the specified field in the result set. + // + // If the field is numeric (int, int-array, double, or double-array), max is + // the string representation of a double-precision 64-bit floating point value. + // If the field is date or date-array, max is the string representation of a + // date with the format specified in IETF RFC3339 (http://tools.ietf.org/html/rfc3339): + // yyyy-mm-ddTHH:mm:ss.SSSZ. + Max *string `locationName:"max" type:"string"` + + // The average of the values found in the specified field in the result set. + // + // If the field is numeric (int, int-array, double, or double-array), mean + // is the string representation of a double-precision 64-bit floating point + // value. If the field is date or date-array, mean is the string representation + // of a date with the format specified in IETF RFC3339 (http://tools.ietf.org/html/rfc3339): + // yyyy-mm-ddTHH:mm:ss.SSSZ. + Mean *string `locationName:"mean" type:"string"` + + // The minimum value found in the specified field in the result set. + // + // If the field is numeric (int, int-array, double, or double-array), min is + // the string representation of a double-precision 64-bit floating point value. + // If the field is date or date-array, min is the string representation of a + // date with the format specified in IETF RFC3339 (http://tools.ietf.org/html/rfc3339): + // yyyy-mm-ddTHH:mm:ss.SSSZ. + Min *string `locationName:"min" type:"string"` + + // The number of documents that do not contain a value in the specified field + // in the result set. + Missing *int64 `locationName:"missing" type:"long"` + + // The standard deviation of the values in the specified field in the result + // set. + Stddev *float64 `locationName:"stddev" type:"double"` + + // The sum of the field values across the documents in the result set. null + // for date fields. + Sum *float64 `locationName:"sum" type:"double"` + + // The sum of all field values in the result set squared. + SumOfSquares *float64 `locationName:"sumOfSquares" type:"double"` +} + +// String returns the string representation +func (s FieldStats) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FieldStats) GoString() string { + return s.String() +} + +// Information about a document that matches the search request. +type Hit struct { + _ struct{} `type:"structure"` + + // The expressions returned from a document that matches the search request. + Exprs map[string]*string `locationName:"exprs" type:"map"` + + // The fields returned from a document that matches the search request. + Fields map[string][]*string `locationName:"fields" type:"map"` + + // The highlights returned from a document that matches the search request. + Highlights map[string]*string `locationName:"highlights" type:"map"` + + // The document ID of a document that matches the search request. + Id *string `locationName:"id" type:"string"` +} + +// String returns the string representation +func (s Hit) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Hit) GoString() string { + return s.String() +} + +// The collection of documents that match the search request. +type Hits struct { + _ struct{} `type:"structure"` + + // A cursor that can be used to retrieve the next set of matching documents + // when you want to page through a large result set. + Cursor *string `locationName:"cursor" type:"string"` + + // The total number of documents that match the search request. + Found *int64 `locationName:"found" type:"long"` + + // A document that matches the search request. + Hit []*Hit `locationName:"hit" type:"list"` + + // The index of the first matching document. + Start *int64 `locationName:"start" type:"long"` +} + +// String returns the string representation +func (s Hits) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Hits) GoString() string { + return s.String() +} + +// Container for the parameters to the Search request. +type SearchInput struct { + _ struct{} `type:"structure"` + + // Retrieves a cursor value you can use to page through large result sets. Use + // the size parameter to control the number of hits to include in each response. + // You can specify either the cursor or start parameter in a request; they are + // mutually exclusive. To get the first cursor, set the cursor value to initial. + // In subsequent requests, specify the cursor value returned in the hits section + // of the response. + // + // For more information, see Paginating Results (http://docs.aws.amazon.com/cloudsearch/latest/developerguide/paginating-results.html) + // in the Amazon CloudSearch Developer Guide. + Cursor *string `location:"querystring" locationName:"cursor" type:"string"` + + // Defines one or more numeric expressions that can be used to sort results + // or specify search or filter criteria. You can also specify expressions as + // return fields. + // + // You specify the expressions in JSON using the form {"EXPRESSIONNAME":"EXPRESSION"}. + // You can define and use multiple expressions in a search request. For example: + // + // {"expression1":"_score*rating", "expression2":"(1/rank)*year"} + // + // For information about the variables, operators, and functions you can use + // in expressions, see Writing Expressions (http://docs.aws.amazon.com/cloudsearch/latest/developerguide/configuring-expressions.html#writing-expressions) + // in the Amazon CloudSearch Developer Guide. + Expr *string `location:"querystring" locationName:"expr" type:"string"` + + // Specifies one or more fields for which to get facet information, and options + // that control how the facet information is returned. Each specified field + // must be facet-enabled in the domain configuration. The fields and options + // are specified in JSON using the form {"FIELD":{"OPTION":VALUE,"OPTION:"STRING"},"FIELD":{"OPTION":VALUE,"OPTION":"STRING"}}. + // + // You can specify the following faceting options: + // + // buckets specifies an array of the facet values or ranges to count. Ranges + // are specified using the same syntax that you use to search for a range of + // values. For more information, see Searching for a Range of Values (http://docs.aws.amazon.com/cloudsearch/latest/developerguide/searching-ranges.html) + // in the Amazon CloudSearch Developer Guide. Buckets are returned in the order + // they are specified in the request. The sort and size options are not valid + // if you specify buckets. + // + // size specifies the maximum number of facets to include in the results. + // By default, Amazon CloudSearch returns counts for the top 10. The size parameter + // is only valid when you specify the sort option; it cannot be used in conjunction + // with buckets. + // + // sort specifies how you want to sort the facets in the results: bucket + // or count. Specify bucket to sort alphabetically or numerically by facet value + // (in ascending order). Specify count to sort by the facet counts computed + // for each facet value (in descending order). To retrieve facet counts for + // particular values or ranges of values, use the buckets option instead of + // sort. + // + // If no facet options are specified, facet counts are computed for all field + // values, the facets are sorted by facet count, and the top 10 facets are returned + // in the results. + // + // To count particular buckets of values, use the buckets option. For example, + // the following request uses the buckets option to calculate and return facet + // counts by decade. + // + // {"year":{"buckets":["[1970,1979]","[1980,1989]","[1990,1999]","[2000,2009]","[2010,}"]}} + // + // To sort facets by facet count, use the count option. For example, the following + // request sets the sort option to count to sort the facet values by facet count, + // with the facet values that have the most matching documents listed first. + // Setting the size option to 3 returns only the top three facet values. + // + // {"year":{"sort":"count","size":3}} + // + // To sort the facets by value, use the bucket option. For example, the following + // request sets the sort option to bucket to sort the facet values numerically + // by year, with earliest year listed first. + // + // {"year":{"sort":"bucket"}} + // + // For more information, see Getting and Using Facet Information (http://docs.aws.amazon.com/cloudsearch/latest/developerguide/faceting.html) + // in the Amazon CloudSearch Developer Guide. + Facet *string `location:"querystring" locationName:"facet" type:"string"` + + // Specifies a structured query that filters the results of a search without + // affecting how the results are scored and sorted. You use filterQuery in conjunction + // with the query parameter to filter the documents that match the constraints + // specified in the query parameter. Specifying a filter controls only which + // matching documents are included in the results, it has no effect on how they + // are scored and sorted. The filterQuery parameter supports the full structured + // query syntax. + // + // For more information about using filters, see Filtering Matching Documents + // (http://docs.aws.amazon.com/cloudsearch/latest/developerguide/filtering-results.html) + // in the Amazon CloudSearch Developer Guide. + FilterQuery *string `location:"querystring" locationName:"fq" type:"string"` + + // Retrieves highlights for matches in the specified text or text-array fields. + // Each specified field must be highlight enabled in the domain configuration. + // The fields and options are specified in JSON using the form {"FIELD":{"OPTION":VALUE,"OPTION:"STRING"},"FIELD":{"OPTION":VALUE,"OPTION":"STRING"}}. + // + // You can specify the following highlight options: + // + // format: specifies the format of the data in the text field: text or html. + // When data is returned as HTML, all non-alphanumeric characters are encoded. + // The default is html. max_phrases: specifies the maximum number of occurrences + // of the search term(s) you want to highlight. By default, the first occurrence + // is highlighted. pre_tag: specifies the string to prepend to an occurrence + // of a search term. The default for HTML highlights is <em>. The default + // for text highlights is *. post_tag: specifies the string to append to an + // occurrence of a search term. The default for HTML highlights is </em>. + // The default for text highlights is *. If no highlight options are specified + // for a field, the returned field text is treated as HTML and the first match + // is highlighted with emphasis tags: <em>search-term</em>. + // + // For example, the following request retrieves highlights for the actors and + // title fields. + // + // { "actors": {}, "title": {"format": "text","max_phrases": 2,"pre_tag": + // "","post_tag": ""} } + Highlight *string `location:"querystring" locationName:"highlight" type:"string"` + + // Enables partial results to be returned if one or more index partitions are + // unavailable. When your search index is partitioned across multiple search + // instances, by default Amazon CloudSearch only returns results if every partition + // can be queried. This means that the failure of a single search instance can + // result in 5xx (internal server) errors. When you enable partial results, + // Amazon CloudSearch returns whatever results are available and includes the + // percentage of documents searched in the search results (percent-searched). + // This enables you to more gracefully degrade your users' search experience. + // For example, rather than displaying no results, you could display the partial + // results and a message indicating that the results might be incomplete due + // to a temporary system outage. + Partial *bool `location:"querystring" locationName:"partial" type:"boolean"` + + // Specifies the search criteria for the request. How you specify the search + // criteria depends on the query parser used for the request and the parser + // options specified in the queryOptions parameter. By default, the simple query + // parser is used to process requests. To use the structured, lucene, or dismax + // query parser, you must also specify the queryParser parameter. + // + // For more information about specifying search criteria, see Searching Your + // Data (http://docs.aws.amazon.com/cloudsearch/latest/developerguide/searching.html) + // in the Amazon CloudSearch Developer Guide. + Query *string `location:"querystring" locationName:"q" type:"string" required:"true"` + + // Configures options for the query parser specified in the queryParser parameter. + // You specify the options in JSON using the following form {"OPTION1":"VALUE1","OPTION2":VALUE2"..."OPTIONN":"VALUEN"}. + // + // The options you can configure vary according to which parser you use: + // + // defaultOperator: The default operator used to combine individual terms + // in the search string. For example: defaultOperator: 'or'. For the dismax + // parser, you specify a percentage that represents the percentage of terms + // in the search string (rounded down) that must match, rather than a default + // operator. A value of 0% is the equivalent to OR, and a value of 100% is equivalent + // to AND. The percentage must be specified as a value in the range 0-100 followed + // by the percent (%) symbol. For example, defaultOperator: 50%. Valid values: + // and, or, a percentage in the range 0%-100% (dismax). Default: and (simple, + // structured, lucene) or 100 (dismax). Valid for: simple, structured, lucene, + // and dismax. fields: An array of the fields to search when no fields are specified + // in a search. If no fields are specified in a search and this option is not + // specified, all text and text-array fields are searched. You can specify a + // weight for each field to control the relative importance of each field when + // Amazon CloudSearch calculates relevance scores. To specify a field weight, + // append a caret (^) symbol and the weight to the field name. For example, + // to boost the importance of the title field over the description field you + // could specify: "fields":["title^5","description"]. Valid values: The name + // of any configured field and an optional numeric value greater than zero. + // Default: All text and text-array fields. Valid for: simple, structured, lucene, + // and dismax. operators: An array of the operators or special characters you + // want to disable for the simple query parser. If you disable the and, or, + // or not operators, the corresponding operators (+, |, -) have no special meaning + // and are dropped from the search string. Similarly, disabling prefix disables + // the wildcard operator (*) and disabling phrase disables the ability to search + // for phrases by enclosing phrases in double quotes. Disabling precedence disables + // the ability to control order of precedence using parentheses. Disabling near + // disables the ability to use the ~ operator to perform a sloppy phrase search. + // Disabling the fuzzy operator disables the ability to use the ~ operator to + // perform a fuzzy search. escape disables the ability to use a backslash (\) + // to escape special characters within the search string. Disabling whitespace + // is an advanced option that prevents the parser from tokenizing on whitespace, + // which can be useful for Vietnamese. (It prevents Vietnamese words from being + // split incorrectly.) For example, you could disable all operators other than + // the phrase operator to support just simple term and phrase queries: "operators":["and","not","or", + // "prefix"]. Valid values: and, escape, fuzzy, near, not, or, phrase, precedence, + // prefix, whitespace. Default: All operators and special characters are enabled. + // Valid for: simple. phraseFields: An array of the text or text-array fields + // you want to use for phrase searches. When the terms in the search string + // appear in close proximity within a field, the field scores higher. You can + // specify a weight for each field to boost that score. The phraseSlop option + // controls how much the matches can deviate from the search string and still + // be boosted. To specify a field weight, append a caret (^) symbol and the + // weight to the field name. For example, to boost phrase matches in the title + // field over the abstract field, you could specify: "phraseFields":["title^3", + // "plot"] Valid values: The name of any text or text-array field and an optional + // numeric value greater than zero. Default: No fields. If you don't specify + // any fields with phraseFields, proximity scoring is disabled even if phraseSlop + // is specified. Valid for: dismax. phraseSlop: An integer value that specifies + // how much matches can deviate from the search phrase and still be boosted + // according to the weights specified in the phraseFields option; for example, + // phraseSlop: 2. You must also specify phraseFields to enable proximity scoring. + // Valid values: positive integers. Default: 0. Valid for: dismax. explicitPhraseSlop: + // An integer value that specifies how much a match can deviate from the search + // phrase when the phrase is enclosed in double quotes in the search string. + // (Phrases that exceed this proximity distance are not considered a match.) + // For example, to specify a slop of three for dismax phrase queries, you would + // specify "explicitPhraseSlop":3. Valid values: positive integers. Default: + // 0. Valid for: dismax. tieBreaker: When a term in the search string is found + // in a document's field, a score is calculated for that field based on how + // common the word is in that field compared to other documents. If the term + // occurs in multiple fields within a document, by default only the highest + // scoring field contributes to the document's overall score. You can specify + // a tieBreaker value to enable the matches in lower-scoring fields to contribute + // to the document's score. That way, if two documents have the same max field + // score for a particular term, the score for the document that has matches + // in more fields will be higher. The formula for calculating the score with + // a tieBreaker is (max field score) + (tieBreaker) * (sum of the scores for + // the rest of the matching fields). Set tieBreaker to 0 to disregard all but + // the highest scoring field (pure max): "tieBreaker":0. Set to 1 to sum the + // scores from all fields (pure sum): "tieBreaker":1. Valid values: 0.0 to 1.0. + // Default: 0.0. Valid for: dismax. + QueryOptions *string `location:"querystring" locationName:"q.options" type:"string"` + + // Specifies which query parser to use to process the request. If queryParser + // is not specified, Amazon CloudSearch uses the simple query parser. + // + // Amazon CloudSearch supports four query parsers: + // + // simple: perform simple searches of text and text-array fields. By default, + // the simple query parser searches all text and text-array fields. You can + // specify which fields to search by with the queryOptions parameter. If you + // prefix a search term with a plus sign (+) documents must contain the term + // to be considered a match. (This is the default, unless you configure the + // default operator with the queryOptions parameter.) You can use the - (NOT), + // | (OR), and * (wildcard) operators to exclude particular terms, find results + // that match any of the specified terms, or search for a prefix. To search + // for a phrase rather than individual terms, enclose the phrase in double quotes. + // For more information, see Searching for Text (http://docs.aws.amazon.com/cloudsearch/latest/developerguide/searching-text.html) + // in the Amazon CloudSearch Developer Guide. structured: perform advanced + // searches by combining multiple expressions to define the search criteria. + // You can also search within particular fields, search for values and ranges + // of values, and use advanced options such as term boosting, matchall, and + // near. For more information, see Constructing Compound Queries (http://docs.aws.amazon.com/cloudsearch/latest/developerguide/searching-compound-queries.html) + // in the Amazon CloudSearch Developer Guide. lucene: search using the Apache + // Lucene query parser syntax. For more information, see Apache Lucene Query + // Parser Syntax (http://lucene.apache.org/core/4_6_0/queryparser/org/apache/lucene/queryparser/classic/package-summary.html#package_description). + // dismax: search using the simplified subset of the Apache Lucene query parser + // syntax defined by the DisMax query parser. For more information, see DisMax + // Query Parser Syntax (http://wiki.apache.org/solr/DisMaxQParserPlugin#Query_Syntax). + QueryParser *string `location:"querystring" locationName:"q.parser" type:"string" enum:"QueryParser"` + + // Specifies the field and expression values to include in the response. Multiple + // fields or expressions are specified as a comma-separated list. By default, + // a search response includes all return enabled fields (_all_fields). To return + // only the document IDs for the matching documents, specify _no_fields. To + // retrieve the relevance score calculated for each document, specify _score. + Return *string `location:"querystring" locationName:"return" type:"string"` + + // Specifies the maximum number of search hits to include in the response. + Size *int64 `location:"querystring" locationName:"size" type:"long"` + + // Specifies the fields or custom expressions to use to sort the search results. + // Multiple fields or expressions are specified as a comma-separated list. You + // must specify the sort direction (asc or desc) for each field; for example, + // year desc,title asc. To use a field to sort results, the field must be sort-enabled + // in the domain configuration. Array type fields cannot be used for sorting. + // If no sort parameter is specified, results are sorted by their default relevance + // scores in descending order: _score desc. You can also sort by document ID + // (_id asc) and version (_version desc). + // + // For more information, see Sorting Results (http://docs.aws.amazon.com/cloudsearch/latest/developerguide/sorting-results.html) + // in the Amazon CloudSearch Developer Guide. + Sort *string `location:"querystring" locationName:"sort" type:"string"` + + // Specifies the offset of the first search hit you want to return. Note that + // the result set is zero-based; the first result is at index 0. You can specify + // either the start or cursor parameter in a request, they are mutually exclusive. + // + // For more information, see Paginating Results (http://docs.aws.amazon.com/cloudsearch/latest/developerguide/paginating-results.html) + // in the Amazon CloudSearch Developer Guide. + Start *int64 `location:"querystring" locationName:"start" type:"long"` + + // Specifies one or more fields for which to get statistics information. Each + // specified field must be facet-enabled in the domain configuration. The fields + // are specified in JSON using the form: + // + // {"FIELD-A":{},"FIELD-B":{}} There are currently no options supported for + // statistics. + Stats *string `location:"querystring" locationName:"stats" type:"string"` +} + +// String returns the string representation +func (s SearchInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SearchInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SearchInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SearchInput"} + if s.Query == nil { + invalidParams.Add(request.NewErrParamRequired("Query")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The result of a Search request. Contains the documents that match the specified +// search criteria and any requested fields, highlights, and facet information. +type SearchOutput struct { + _ struct{} `type:"structure"` + + // The requested facet information. + Facets map[string]*BucketInfo `locationName:"facets" type:"map"` + + // The documents that match the search criteria. + Hits *Hits `locationName:"hits" type:"structure"` + + // The requested field statistics information. + Stats map[string]*FieldStats `locationName:"stats" type:"map"` + + // The status information returned for the search request. + Status *SearchStatus `locationName:"status" type:"structure"` +} + +// String returns the string representation +func (s SearchOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SearchOutput) GoString() string { + return s.String() +} + +// Contains the resource id (rid) and the time it took to process the request +// (timems). +type SearchStatus struct { + _ struct{} `type:"structure"` + + // The encrypted resource ID for the request. + Rid *string `locationName:"rid" type:"string"` + + // How long it took to process the request, in milliseconds. + Timems *int64 `locationName:"timems" type:"long"` +} + +// String returns the string representation +func (s SearchStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SearchStatus) GoString() string { + return s.String() +} + +// Container for the parameters to the Suggest request. +type SuggestInput struct { + _ struct{} `type:"structure"` + + // Specifies the string for which you want to get suggestions. + Query *string `location:"querystring" locationName:"q" type:"string" required:"true"` + + // Specifies the maximum number of suggestions to return. + Size *int64 `location:"querystring" locationName:"size" type:"long"` + + // Specifies the name of the suggester to use to find suggested matches. + Suggester *string `location:"querystring" locationName:"suggester" type:"string" required:"true"` +} + +// String returns the string representation +func (s SuggestInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SuggestInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SuggestInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SuggestInput"} + if s.Query == nil { + invalidParams.Add(request.NewErrParamRequired("Query")) + } + if s.Suggester == nil { + invalidParams.Add(request.NewErrParamRequired("Suggester")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Container for the suggestion information returned in a SuggestResponse. +type SuggestModel struct { + _ struct{} `type:"structure"` + + // The number of documents that were found to match the query string. + Found *int64 `locationName:"found" type:"long"` + + // The query string specified in the suggest request. + Query *string `locationName:"query" type:"string"` + + // The documents that match the query string. + Suggestions []*SuggestionMatch `locationName:"suggestions" type:"list"` +} + +// String returns the string representation +func (s SuggestModel) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SuggestModel) GoString() string { + return s.String() +} + +// Contains the response to a Suggest request. +type SuggestOutput struct { + _ struct{} `type:"structure"` + + // The status of a SuggestRequest. Contains the resource ID (rid) and how long + // it took to process the request (timems). + Status *SuggestStatus `locationName:"status" type:"structure"` + + // Container for the matching search suggestion information. + Suggest *SuggestModel `locationName:"suggest" type:"structure"` +} + +// String returns the string representation +func (s SuggestOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SuggestOutput) GoString() string { + return s.String() +} + +// Contains the resource id (rid) and the time it took to process the request +// (timems). +type SuggestStatus struct { + _ struct{} `type:"structure"` + + // The encrypted resource ID for the request. + Rid *string `locationName:"rid" type:"string"` + + // How long it took to process the request, in milliseconds. + Timems *int64 `locationName:"timems" type:"long"` +} + +// String returns the string representation +func (s SuggestStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SuggestStatus) GoString() string { + return s.String() +} + +// An autocomplete suggestion that matches the query string specified in a SuggestRequest. +type SuggestionMatch struct { + _ struct{} `type:"structure"` + + // The document ID of the suggested document. + Id *string `locationName:"id" type:"string"` + + // The relevance score of a suggested match. + Score *int64 `locationName:"score" type:"long"` + + // The string that matches the query string specified in the SuggestRequest. + Suggestion *string `locationName:"suggestion" type:"string"` +} + +// String returns the string representation +func (s SuggestionMatch) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SuggestionMatch) GoString() string { + return s.String() +} + +// Container for the parameters to the UploadDocuments request. +type UploadDocumentsInput struct { + _ struct{} `type:"structure" payload:"Documents"` + + // The format of the batch you are uploading. Amazon CloudSearch supports two + // document batch formats: + // + // application/json application/xml + ContentType *string `location:"header" locationName:"Content-Type" type:"string" required:"true" enum:"ContentType"` + + // A batch of documents formatted in JSON or HTML. + Documents io.ReadSeeker `locationName:"documents" type:"blob" required:"true"` +} + +// String returns the string representation +func (s UploadDocumentsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UploadDocumentsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UploadDocumentsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UploadDocumentsInput"} + if s.ContentType == nil { + invalidParams.Add(request.NewErrParamRequired("ContentType")) + } + if s.Documents == nil { + invalidParams.Add(request.NewErrParamRequired("Documents")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the response to an UploadDocuments request. +type UploadDocumentsOutput struct { + _ struct{} `type:"structure"` + + // The number of documents that were added to the search domain. + Adds *int64 `locationName:"adds" type:"long"` + + // The number of documents that were deleted from the search domain. + Deletes *int64 `locationName:"deletes" type:"long"` + + // The status of an UploadDocumentsRequest. + Status *string `locationName:"status" type:"string"` + + // Any warnings returned by the document service about the documents being uploaded. + Warnings []*DocumentServiceWarning `locationName:"warnings" type:"list"` +} + +// String returns the string representation +func (s UploadDocumentsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UploadDocumentsOutput) GoString() string { + return s.String() +} + +const ( + // @enum ContentType + ContentTypeApplicationJson = "application/json" + // @enum ContentType + ContentTypeApplicationXml = "application/xml" +) + +const ( + // @enum QueryParser + QueryParserSimple = "simple" + // @enum QueryParser + QueryParserStructured = "structured" + // @enum QueryParser + QueryParserLucene = "lucene" + // @enum QueryParser + QueryParserDismax = "dismax" +) diff --git a/vendor/github.com/aws/aws-sdk-go/service/cloudsearchdomain/cloudsearchdomainiface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/cloudsearchdomain/cloudsearchdomainiface/interface.go new file mode 100644 index 000000000..4b80f620b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/cloudsearchdomain/cloudsearchdomainiface/interface.go @@ -0,0 +1,26 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package cloudsearchdomainiface provides an interface for the Amazon CloudSearch Domain. +package cloudsearchdomainiface + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/cloudsearchdomain" +) + +// CloudSearchDomainAPI is the interface type for cloudsearchdomain.CloudSearchDomain. +type CloudSearchDomainAPI interface { + SearchRequest(*cloudsearchdomain.SearchInput) (*request.Request, *cloudsearchdomain.SearchOutput) + + Search(*cloudsearchdomain.SearchInput) (*cloudsearchdomain.SearchOutput, error) + + SuggestRequest(*cloudsearchdomain.SuggestInput) (*request.Request, *cloudsearchdomain.SuggestOutput) + + Suggest(*cloudsearchdomain.SuggestInput) (*cloudsearchdomain.SuggestOutput, error) + + UploadDocumentsRequest(*cloudsearchdomain.UploadDocumentsInput) (*request.Request, *cloudsearchdomain.UploadDocumentsOutput) + + UploadDocuments(*cloudsearchdomain.UploadDocumentsInput) (*cloudsearchdomain.UploadDocumentsOutput, error) +} + +var _ CloudSearchDomainAPI = (*cloudsearchdomain.CloudSearchDomain)(nil) diff --git a/vendor/github.com/aws/aws-sdk-go/service/cloudsearchdomain/customizations_test.go b/vendor/github.com/aws/aws-sdk-go/service/cloudsearchdomain/customizations_test.go new file mode 100644 index 000000000..49bfeb8b4 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/cloudsearchdomain/customizations_test.go @@ -0,0 +1,50 @@ +package cloudsearchdomain_test + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/awstesting/unit" + "github.com/aws/aws-sdk-go/service/cloudsearchdomain" +) + +func TestRequireEndpointIfRegionProvided(t *testing.T) { + svc := cloudsearchdomain.New(unit.Session, &aws.Config{ + Region: aws.String("mock-region"), + DisableParamValidation: aws.Bool(true), + }) + req, _ := svc.SearchRequest(nil) + err := req.Build() + + assert.Equal(t, "", svc.Endpoint) + assert.Error(t, err) + assert.Equal(t, aws.ErrMissingEndpoint, err) +} + +func TestRequireEndpointIfNoRegionProvided(t *testing.T) { + svc := cloudsearchdomain.New(unit.Session, &aws.Config{ + Region: aws.String(""), + DisableParamValidation: aws.Bool(true), + }) + req, _ := svc.SearchRequest(nil) + err := req.Build() + + assert.Equal(t, "", svc.Endpoint) + assert.Error(t, err) + assert.Equal(t, aws.ErrMissingEndpoint, err) +} + +func TestRequireEndpointUsed(t *testing.T) { + svc := cloudsearchdomain.New(unit.Session, &aws.Config{ + Region: aws.String("mock-region"), + DisableParamValidation: aws.Bool(true), + Endpoint: aws.String("https://endpoint"), + }) + req, _ := svc.SearchRequest(nil) + err := req.Build() + + assert.Equal(t, "https://endpoint", svc.Endpoint) + assert.NoError(t, err) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/cloudsearchdomain/examples_test.go b/vendor/github.com/aws/aws-sdk-go/service/cloudsearchdomain/examples_test.go new file mode 100644 index 000000000..84c94c9a5 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/cloudsearchdomain/examples_test.go @@ -0,0 +1,89 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package cloudsearchdomain_test + +import ( + "bytes" + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/cloudsearchdomain" +) + +var _ time.Duration +var _ bytes.Buffer + +func ExampleCloudSearchDomain_Search() { + svc := cloudsearchdomain.New(session.New()) + + params := &cloudsearchdomain.SearchInput{ + Query: aws.String("Query"), // Required + Cursor: aws.String("Cursor"), + Expr: aws.String("Expr"), + Facet: aws.String("Facet"), + FilterQuery: aws.String("FilterQuery"), + Highlight: aws.String("Highlight"), + Partial: aws.Bool(true), + QueryOptions: aws.String("QueryOptions"), + QueryParser: aws.String("QueryParser"), + Return: aws.String("Return"), + Size: aws.Int64(1), + Sort: aws.String("Sort"), + Start: aws.Int64(1), + Stats: aws.String("Stat"), + } + resp, err := svc.Search(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudSearchDomain_Suggest() { + svc := cloudsearchdomain.New(session.New()) + + params := &cloudsearchdomain.SuggestInput{ + Query: aws.String("Query"), // Required + Suggester: aws.String("Suggester"), // Required + Size: aws.Int64(1), + } + resp, err := svc.Suggest(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudSearchDomain_UploadDocuments() { + svc := cloudsearchdomain.New(session.New()) + + params := &cloudsearchdomain.UploadDocumentsInput{ + ContentType: aws.String("ContentType"), // Required + Documents: bytes.NewReader([]byte("PAYLOAD")), // Required + } + resp, err := svc.UploadDocuments(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/cloudsearchdomain/service.go b/vendor/github.com/aws/aws-sdk-go/service/cloudsearchdomain/service.go new file mode 100644 index 000000000..1ac8e7ba4 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/cloudsearchdomain/service.go @@ -0,0 +1,96 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package cloudsearchdomain + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/private/protocol/restjson" +) + +// You use the AmazonCloudSearch2013 API to upload documents to a search domain +// and search those documents. +// +// The endpoints for submitting UploadDocuments, Search, and Suggest requests +// are domain-specific. To get the endpoints for your domain, use the Amazon +// CloudSearch configuration service DescribeDomains action. The domain endpoints +// are also displayed on the domain dashboard in the Amazon CloudSearch console. +// You submit suggest requests to the search endpoint. +// +// For more information, see the Amazon CloudSearch Developer Guide (http://docs.aws.amazon.com/cloudsearch/latest/developerguide). +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type CloudSearchDomain struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "cloudsearchdomain" + +// New creates a new instance of the CloudSearchDomain client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a CloudSearchDomain client from just a session. +// svc := cloudsearchdomain.New(mySession) +// +// // Create a CloudSearchDomain client with additional configuration +// svc := cloudsearchdomain.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *CloudSearchDomain { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *CloudSearchDomain { + svc := &CloudSearchDomain{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningName: "cloudsearch", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2013-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(restjson.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restjson.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restjson.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restjson.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a CloudSearchDomain operation and runs any +// custom request initialization. +func (c *CloudSearchDomain) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/cloudtrail/api.go b/vendor/github.com/aws/aws-sdk-go/service/cloudtrail/api.go new file mode 100644 index 000000000..de18a11ca --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/cloudtrail/api.go @@ -0,0 +1,1920 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package cloudtrail provides a client for AWS CloudTrail. +package cloudtrail + +import ( + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" +) + +const opAddTags = "AddTags" + +// AddTagsRequest generates a "aws/request.Request" representing the +// client's request for the AddTags operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the AddTags method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the AddTagsRequest method. +// req, resp := client.AddTagsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudTrail) AddTagsRequest(input *AddTagsInput) (req *request.Request, output *AddTagsOutput) { + op := &request.Operation{ + Name: opAddTags, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AddTagsInput{} + } + + req = c.newRequest(op, input, output) + output = &AddTagsOutput{} + req.Data = output + return +} + +// Adds one or more tags to a trail, up to a limit of 10. Tags must be unique +// per trail. Overwrites an existing tag's value when a new value is specified +// for an existing tag key. If you specify a key without a value, the tag will +// be created with the specified key and a value of null. You can tag a trail +// that applies to all regions only from the region in which the trail was created +// (that is, from its home region). +func (c *CloudTrail) AddTags(input *AddTagsInput) (*AddTagsOutput, error) { + req, out := c.AddTagsRequest(input) + err := req.Send() + return out, err +} + +const opCreateTrail = "CreateTrail" + +// CreateTrailRequest generates a "aws/request.Request" representing the +// client's request for the CreateTrail operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateTrail method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateTrailRequest method. +// req, resp := client.CreateTrailRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudTrail) CreateTrailRequest(input *CreateTrailInput) (req *request.Request, output *CreateTrailOutput) { + op := &request.Operation{ + Name: opCreateTrail, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateTrailInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateTrailOutput{} + req.Data = output + return +} + +// Creates a trail that specifies the settings for delivery of log data to an +// Amazon S3 bucket. A maximum of five trails can exist in a region, irrespective +// of the region in which they were created. +func (c *CloudTrail) CreateTrail(input *CreateTrailInput) (*CreateTrailOutput, error) { + req, out := c.CreateTrailRequest(input) + err := req.Send() + return out, err +} + +const opDeleteTrail = "DeleteTrail" + +// DeleteTrailRequest generates a "aws/request.Request" representing the +// client's request for the DeleteTrail operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteTrail method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteTrailRequest method. +// req, resp := client.DeleteTrailRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudTrail) DeleteTrailRequest(input *DeleteTrailInput) (req *request.Request, output *DeleteTrailOutput) { + op := &request.Operation{ + Name: opDeleteTrail, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteTrailInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteTrailOutput{} + req.Data = output + return +} + +// Deletes a trail. This operation must be called from the region in which the +// trail was created. DeleteTrail cannot be called on the shadow trails (replicated +// trails in other regions) of a trail that is enabled in all regions. +func (c *CloudTrail) DeleteTrail(input *DeleteTrailInput) (*DeleteTrailOutput, error) { + req, out := c.DeleteTrailRequest(input) + err := req.Send() + return out, err +} + +const opDescribeTrails = "DescribeTrails" + +// DescribeTrailsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeTrails operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeTrails method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeTrailsRequest method. +// req, resp := client.DescribeTrailsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudTrail) DescribeTrailsRequest(input *DescribeTrailsInput) (req *request.Request, output *DescribeTrailsOutput) { + op := &request.Operation{ + Name: opDescribeTrails, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeTrailsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeTrailsOutput{} + req.Data = output + return +} + +// Retrieves settings for the trail associated with the current region for your +// account. +func (c *CloudTrail) DescribeTrails(input *DescribeTrailsInput) (*DescribeTrailsOutput, error) { + req, out := c.DescribeTrailsRequest(input) + err := req.Send() + return out, err +} + +const opGetTrailStatus = "GetTrailStatus" + +// GetTrailStatusRequest generates a "aws/request.Request" representing the +// client's request for the GetTrailStatus operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetTrailStatus method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetTrailStatusRequest method. +// req, resp := client.GetTrailStatusRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudTrail) GetTrailStatusRequest(input *GetTrailStatusInput) (req *request.Request, output *GetTrailStatusOutput) { + op := &request.Operation{ + Name: opGetTrailStatus, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetTrailStatusInput{} + } + + req = c.newRequest(op, input, output) + output = &GetTrailStatusOutput{} + req.Data = output + return +} + +// Returns a JSON-formatted list of information about the specified trail. Fields +// include information on delivery errors, Amazon SNS and Amazon S3 errors, +// and start and stop logging times for each trail. This operation returns trail +// status from a single region. To return trail status from all regions, you +// must call the operation on each region. +func (c *CloudTrail) GetTrailStatus(input *GetTrailStatusInput) (*GetTrailStatusOutput, error) { + req, out := c.GetTrailStatusRequest(input) + err := req.Send() + return out, err +} + +const opListPublicKeys = "ListPublicKeys" + +// ListPublicKeysRequest generates a "aws/request.Request" representing the +// client's request for the ListPublicKeys operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListPublicKeys method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListPublicKeysRequest method. +// req, resp := client.ListPublicKeysRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudTrail) ListPublicKeysRequest(input *ListPublicKeysInput) (req *request.Request, output *ListPublicKeysOutput) { + op := &request.Operation{ + Name: opListPublicKeys, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListPublicKeysInput{} + } + + req = c.newRequest(op, input, output) + output = &ListPublicKeysOutput{} + req.Data = output + return +} + +// Returns all public keys whose private keys were used to sign the digest files +// within the specified time range. The public key is needed to validate digest +// files that were signed with its corresponding private key. +// +// CloudTrail uses different private/public key pairs per region. Each digest +// file is signed with a private key unique to its region. Therefore, when you +// validate a digest file from a particular region, you must look in the same +// region for its corresponding public key. +func (c *CloudTrail) ListPublicKeys(input *ListPublicKeysInput) (*ListPublicKeysOutput, error) { + req, out := c.ListPublicKeysRequest(input) + err := req.Send() + return out, err +} + +const opListTags = "ListTags" + +// ListTagsRequest generates a "aws/request.Request" representing the +// client's request for the ListTags operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListTags method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListTagsRequest method. +// req, resp := client.ListTagsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudTrail) ListTagsRequest(input *ListTagsInput) (req *request.Request, output *ListTagsOutput) { + op := &request.Operation{ + Name: opListTags, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListTagsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListTagsOutput{} + req.Data = output + return +} + +// Lists the tags for the trail in the current region. +func (c *CloudTrail) ListTags(input *ListTagsInput) (*ListTagsOutput, error) { + req, out := c.ListTagsRequest(input) + err := req.Send() + return out, err +} + +const opLookupEvents = "LookupEvents" + +// LookupEventsRequest generates a "aws/request.Request" representing the +// client's request for the LookupEvents operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the LookupEvents method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the LookupEventsRequest method. +// req, resp := client.LookupEventsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudTrail) LookupEventsRequest(input *LookupEventsInput) (req *request.Request, output *LookupEventsOutput) { + op := &request.Operation{ + Name: opLookupEvents, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &LookupEventsInput{} + } + + req = c.newRequest(op, input, output) + output = &LookupEventsOutput{} + req.Data = output + return +} + +// Looks up API activity events captured by CloudTrail that create, update, +// or delete resources in your account. Events for a region can be looked up +// for the times in which you had CloudTrail turned on in that region during +// the last seven days. Lookup supports five different attributes: time range +// (defined by a start time and end time), user name, event name, resource type, +// and resource name. All attributes are optional. The maximum number of attributes +// that can be specified in any one lookup request are time range and one other +// attribute. The default number of results returned is 10, with a maximum of +// 50 possible. The response includes a token that you can use to get the next +// page of results. +// +// The rate of lookup requests is limited to one per second per account. If +// this limit is exceeded, a throttling error occurs. +// +// Events that occurred during the selected time range will not be available +// for lookup if CloudTrail logging was not enabled when the events occurred. +func (c *CloudTrail) LookupEvents(input *LookupEventsInput) (*LookupEventsOutput, error) { + req, out := c.LookupEventsRequest(input) + err := req.Send() + return out, err +} + +const opRemoveTags = "RemoveTags" + +// RemoveTagsRequest generates a "aws/request.Request" representing the +// client's request for the RemoveTags operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the RemoveTags method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the RemoveTagsRequest method. +// req, resp := client.RemoveTagsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudTrail) RemoveTagsRequest(input *RemoveTagsInput) (req *request.Request, output *RemoveTagsOutput) { + op := &request.Operation{ + Name: opRemoveTags, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RemoveTagsInput{} + } + + req = c.newRequest(op, input, output) + output = &RemoveTagsOutput{} + req.Data = output + return +} + +// Removes the specified tags from a trail. +func (c *CloudTrail) RemoveTags(input *RemoveTagsInput) (*RemoveTagsOutput, error) { + req, out := c.RemoveTagsRequest(input) + err := req.Send() + return out, err +} + +const opStartLogging = "StartLogging" + +// StartLoggingRequest generates a "aws/request.Request" representing the +// client's request for the StartLogging operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the StartLogging method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the StartLoggingRequest method. +// req, resp := client.StartLoggingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudTrail) StartLoggingRequest(input *StartLoggingInput) (req *request.Request, output *StartLoggingOutput) { + op := &request.Operation{ + Name: opStartLogging, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &StartLoggingInput{} + } + + req = c.newRequest(op, input, output) + output = &StartLoggingOutput{} + req.Data = output + return +} + +// Starts the recording of AWS API calls and log file delivery for a trail. +// For a trail that is enabled in all regions, this operation must be called +// from the region in which the trail was created. This operation cannot be +// called on the shadow trails (replicated trails in other regions) of a trail +// that is enabled in all regions. +func (c *CloudTrail) StartLogging(input *StartLoggingInput) (*StartLoggingOutput, error) { + req, out := c.StartLoggingRequest(input) + err := req.Send() + return out, err +} + +const opStopLogging = "StopLogging" + +// StopLoggingRequest generates a "aws/request.Request" representing the +// client's request for the StopLogging operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the StopLogging method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the StopLoggingRequest method. +// req, resp := client.StopLoggingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudTrail) StopLoggingRequest(input *StopLoggingInput) (req *request.Request, output *StopLoggingOutput) { + op := &request.Operation{ + Name: opStopLogging, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &StopLoggingInput{} + } + + req = c.newRequest(op, input, output) + output = &StopLoggingOutput{} + req.Data = output + return +} + +// Suspends the recording of AWS API calls and log file delivery for the specified +// trail. Under most circumstances, there is no need to use this action. You +// can update a trail without stopping it first. This action is the only way +// to stop recording. For a trail enabled in all regions, this operation must +// be called from the region in which the trail was created, or an InvalidHomeRegionException +// will occur. This operation cannot be called on the shadow trails (replicated +// trails in other regions) of a trail enabled in all regions. +func (c *CloudTrail) StopLogging(input *StopLoggingInput) (*StopLoggingOutput, error) { + req, out := c.StopLoggingRequest(input) + err := req.Send() + return out, err +} + +const opUpdateTrail = "UpdateTrail" + +// UpdateTrailRequest generates a "aws/request.Request" representing the +// client's request for the UpdateTrail operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateTrail method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateTrailRequest method. +// req, resp := client.UpdateTrailRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudTrail) UpdateTrailRequest(input *UpdateTrailInput) (req *request.Request, output *UpdateTrailOutput) { + op := &request.Operation{ + Name: opUpdateTrail, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateTrailInput{} + } + + req = c.newRequest(op, input, output) + output = &UpdateTrailOutput{} + req.Data = output + return +} + +// Updates the settings that specify delivery of log files. Changes to a trail +// do not require stopping the CloudTrail service. Use this action to designate +// an existing bucket for log delivery. If the existing bucket has previously +// been a target for CloudTrail log files, an IAM policy exists for the bucket. +// UpdateTrail must be called from the region in which the trail was created; +// otherwise, an InvalidHomeRegionException is thrown. +func (c *CloudTrail) UpdateTrail(input *UpdateTrailInput) (*UpdateTrailOutput, error) { + req, out := c.UpdateTrailRequest(input) + err := req.Send() + return out, err +} + +// Specifies the tags to add to a trail. +type AddTagsInput struct { + _ struct{} `type:"structure"` + + // Specifies the ARN of the trail to which one or more tags will be added. The + // format of a trail ARN is: + // + // arn:aws:cloudtrail:us-east-1:123456789012:trail/MyTrail + ResourceId *string `type:"string" required:"true"` + + // Contains a list of CloudTrail tags, up to a limit of 10. + TagsList []*Tag `type:"list"` +} + +// String returns the string representation +func (s AddTagsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddTagsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AddTagsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AddTagsInput"} + if s.ResourceId == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceId")) + } + if s.TagsList != nil { + for i, v := range s.TagsList { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "TagsList", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Returns the objects or data listed below if successful. Otherwise, returns +// an error. +type AddTagsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s AddTagsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddTagsOutput) GoString() string { + return s.String() +} + +// Specifies the settings for each trail. +type CreateTrailInput struct { + _ struct{} `type:"structure"` + + // Specifies a log group name using an Amazon Resource Name (ARN), a unique + // identifier that represents the log group to which CloudTrail logs will be + // delivered. Not required unless you specify CloudWatchLogsRoleArn. + CloudWatchLogsLogGroupArn *string `type:"string"` + + // Specifies the role for the CloudWatch Logs endpoint to assume to write to + // a user's log group. + CloudWatchLogsRoleArn *string `type:"string"` + + // Specifies whether log file integrity validation is enabled. The default is + // false. + // + // When you disable log file integrity validation, the chain of digest files + // is broken after one hour. CloudTrail will not create digest files for log + // files that were delivered during a period in which log file integrity validation + // was disabled. For example, if you enable log file integrity validation at + // noon on January 1, disable it at noon on January 2, and re-enable it at noon + // on January 10, digest files will not be created for the log files delivered + // from noon on January 2 to noon on January 10. The same applies whenever you + // stop CloudTrail logging or delete a trail. + EnableLogFileValidation *bool `type:"boolean"` + + // Specifies whether the trail is publishing events from global services such + // as IAM to the log files. + IncludeGlobalServiceEvents *bool `type:"boolean"` + + // Specifies whether the trail is created in the current region or in all regions. + // The default is false. + IsMultiRegionTrail *bool `type:"boolean"` + + // Specifies the KMS key ID to use to encrypt the logs delivered by CloudTrail. + // The value can be a an alias name prefixed by "alias/", a fully specified + // ARN to an alias, a fully specified ARN to a key, or a globally unique identifier. + // + // Examples: + // + // alias/MyAliasName + // + // arn:aws:kms:us-east-1:123456789012:alias/MyAliasName + // + // arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012 + // + // 12345678-1234-1234-1234-123456789012 + KmsKeyId *string `type:"string"` + + // Specifies the name of the trail. The name must meet the following requirements: + // + // Contain only ASCII letters (a-z, A-Z), numbers (0-9), periods (.), underscores + // (_), or dashes (-) + // + // Start with a letter or number, and end with a letter or number + // + // Be between 3 and 128 characters + // + // Have no adjacent periods, underscores or dashes. Names like my-_namespace + // and my--namespace are invalid. + // + // Not be in IP address format (for example, 192.168.5.4) + Name *string `type:"string" required:"true"` + + // Specifies the name of the Amazon S3 bucket designated for publishing log + // files. See Amazon S3 Bucket Naming Requirements (http://docs.aws.amazon.com/awscloudtrail/latest/userguide/create_trail_naming_policy.html). + S3BucketName *string `type:"string" required:"true"` + + // Specifies the Amazon S3 key prefix that comes after the name of the bucket + // you have designated for log file delivery. For more information, see Finding + // Your CloudTrail Log Files (http://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-find-log-files.html). + // The maximum length is 200 characters. + S3KeyPrefix *string `type:"string"` + + // Specifies the name of the Amazon SNS topic defined for notification of log + // file delivery. The maximum length is 256 characters. + SnsTopicName *string `type:"string"` +} + +// String returns the string representation +func (s CreateTrailInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateTrailInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateTrailInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateTrailInput"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.S3BucketName == nil { + invalidParams.Add(request.NewErrParamRequired("S3BucketName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Returns the objects or data listed below if successful. Otherwise, returns +// an error. +type CreateTrailOutput struct { + _ struct{} `type:"structure"` + + // Specifies the Amazon Resource Name (ARN) of the log group to which CloudTrail + // logs will be delivered. + CloudWatchLogsLogGroupArn *string `type:"string"` + + // Specifies the role for the CloudWatch Logs endpoint to assume to write to + // a user's log group. + CloudWatchLogsRoleArn *string `type:"string"` + + // Specifies whether the trail is publishing events from global services such + // as IAM to the log files. + IncludeGlobalServiceEvents *bool `type:"boolean"` + + // Specifies whether the trail exists in one region or in all regions. + IsMultiRegionTrail *bool `type:"boolean"` + + // Specifies the KMS key ID that encrypts the logs delivered by CloudTrail. + // The value is a fully specified ARN to a KMS key in the format: + // + // arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012 + KmsKeyId *string `type:"string"` + + // Specifies whether log file integrity validation is enabled. + LogFileValidationEnabled *bool `type:"boolean"` + + // Specifies the name of the trail. + Name *string `type:"string"` + + // Specifies the name of the Amazon S3 bucket designated for publishing log + // files. + S3BucketName *string `type:"string"` + + // Specifies the Amazon S3 key prefix that comes after the name of the bucket + // you have designated for log file delivery. For more information, see Finding + // Your CloudTrail Log Files (http://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-find-log-files.html). + S3KeyPrefix *string `type:"string"` + + // Specifies the ARN of the Amazon SNS topic that CloudTrail uses to send notifications + // when log files are delivered. The format of a topic ARN is: + // + // arn:aws:sns:us-east-1:123456789012:MyTopic + SnsTopicARN *string `type:"string"` + + // This field is deprecated. Use SnsTopicARN. + SnsTopicName *string `deprecated:"true" type:"string"` + + // Specifies the ARN of the trail that was created. The format of a trail ARN + // is: + // + // arn:aws:cloudtrail:us-east-1:123456789012:trail/MyTrail + TrailARN *string `type:"string"` +} + +// String returns the string representation +func (s CreateTrailOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateTrailOutput) GoString() string { + return s.String() +} + +// The request that specifies the name of a trail to delete. +type DeleteTrailInput struct { + _ struct{} `type:"structure"` + + // Specifies the name or the CloudTrail ARN of the trail to be deleted. The + // format of a trail ARN is: + // + // arn:aws:cloudtrail:us-east-1:123456789012:trail/MyTrail + Name *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteTrailInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteTrailInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteTrailInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteTrailInput"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Returns the objects or data listed below if successful. Otherwise, returns +// an error. +type DeleteTrailOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteTrailOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteTrailOutput) GoString() string { + return s.String() +} + +// Returns information about the trail. +type DescribeTrailsInput struct { + _ struct{} `type:"structure"` + + // Specifies whether to include shadow trails in the response. A shadow trail + // is the replication in a region of a trail that was created in a different + // region. The default is true. + IncludeShadowTrails *bool `locationName:"includeShadowTrails" type:"boolean"` + + // Specifies a list of trail names, trail ARNs, or both, of the trails to describe. + // The format of a trail ARN is: + // + // arn:aws:cloudtrail:us-east-1:123456789012:trail/MyTrail + // + // If an empty list is specified, information for the trail in the current + // region is returned. + // + // If an empty list is specified and IncludeShadowTrails is false, then information + // for all trails in the current region is returned. + // + // If an empty list is specified and IncludeShadowTrails is null or true, + // then information for all trails in the current region and any associated + // shadow trails in other regions is returned. + // + // If one or more trail names are specified, information is returned only + // if the names match the names of trails belonging only to the current region. + // To return information about a trail in another region, you must specify its + // trail ARN. + TrailNameList []*string `locationName:"trailNameList" type:"list"` +} + +// String returns the string representation +func (s DescribeTrailsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeTrailsInput) GoString() string { + return s.String() +} + +// Returns the objects or data listed below if successful. Otherwise, returns +// an error. +type DescribeTrailsOutput struct { + _ struct{} `type:"structure"` + + // The list of trail objects. + TrailList []*Trail `locationName:"trailList" type:"list"` +} + +// String returns the string representation +func (s DescribeTrailsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeTrailsOutput) GoString() string { + return s.String() +} + +// Contains information about an event that was returned by a lookup request. +// The result includes a representation of a CloudTrail event. +type Event struct { + _ struct{} `type:"structure"` + + // A JSON string that contains a representation of the event returned. + CloudTrailEvent *string `type:"string"` + + // The CloudTrail ID of the event returned. + EventId *string `type:"string"` + + // The name of the event returned. + EventName *string `type:"string"` + + // The date and time of the event returned. + EventTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // A list of resources referenced by the event returned. + Resources []*Resource `type:"list"` + + // A user name or role name of the requester that called the API in the event + // returned. + Username *string `type:"string"` +} + +// String returns the string representation +func (s Event) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Event) GoString() string { + return s.String() +} + +// The name of a trail about which you want the current status. +type GetTrailStatusInput struct { + _ struct{} `type:"structure"` + + // Specifies the name or the CloudTrail ARN of the trail for which you are requesting + // status. To get the status of a shadow trail (a replication of the trail in + // another region), you must specify its ARN. The format of a trail ARN is: + // + // arn:aws:cloudtrail:us-east-1:123456789012:trail/MyTrail + Name *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s GetTrailStatusInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetTrailStatusInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetTrailStatusInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetTrailStatusInput"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Returns the objects or data listed below if successful. Otherwise, returns +// an error. +type GetTrailStatusOutput struct { + _ struct{} `type:"structure"` + + // Whether the CloudTrail is currently logging AWS API calls. + IsLogging *bool `type:"boolean"` + + // Displays any CloudWatch Logs error that CloudTrail encountered when attempting + // to deliver logs to CloudWatch Logs. + LatestCloudWatchLogsDeliveryError *string `type:"string"` + + // Displays the most recent date and time when CloudTrail delivered logs to + // CloudWatch Logs. + LatestCloudWatchLogsDeliveryTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // This field is deprecated. + LatestDeliveryAttemptSucceeded *string `type:"string"` + + // This field is deprecated. + LatestDeliveryAttemptTime *string `type:"string"` + + // Displays any Amazon S3 error that CloudTrail encountered when attempting + // to deliver log files to the designated bucket. For more information see the + // topic Error Responses (http://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html) + // in the Amazon S3 API Reference. + // + // This error occurs only when there is a problem with the destination S3 + // bucket and will not occur for timeouts. To resolve the issue, create a new + // bucket and call UpdateTrail to specify the new bucket, or fix the existing + // objects so that CloudTrail can again write to the bucket. + LatestDeliveryError *string `type:"string"` + + // Specifies the date and time that CloudTrail last delivered log files to an + // account's Amazon S3 bucket. + LatestDeliveryTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // Displays any Amazon S3 error that CloudTrail encountered when attempting + // to deliver a digest file to the designated bucket. For more information see + // the topic Error Responses (http://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html) + // in the Amazon S3 API Reference. + // + // This error occurs only when there is a problem with the destination S3 + // bucket and will not occur for timeouts. To resolve the issue, create a new + // bucket and call UpdateTrail to specify the new bucket, or fix the existing + // objects so that CloudTrail can again write to the bucket. + LatestDigestDeliveryError *string `type:"string"` + + // Specifies the date and time that CloudTrail last delivered a digest file + // to an account's Amazon S3 bucket. + LatestDigestDeliveryTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // This field is deprecated. + LatestNotificationAttemptSucceeded *string `type:"string"` + + // This field is deprecated. + LatestNotificationAttemptTime *string `type:"string"` + + // Displays any Amazon SNS error that CloudTrail encountered when attempting + // to send a notification. For more information about Amazon SNS errors, see + // the Amazon SNS Developer Guide (http://docs.aws.amazon.com/sns/latest/dg/welcome.html). + LatestNotificationError *string `type:"string"` + + // Specifies the date and time of the most recent Amazon SNS notification that + // CloudTrail has written a new log file to an account's Amazon S3 bucket. + LatestNotificationTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // Specifies the most recent date and time when CloudTrail started recording + // API calls for an AWS account. + StartLoggingTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // Specifies the most recent date and time when CloudTrail stopped recording + // API calls for an AWS account. + StopLoggingTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // This field is deprecated. + TimeLoggingStarted *string `type:"string"` + + // This field is deprecated. + TimeLoggingStopped *string `type:"string"` +} + +// String returns the string representation +func (s GetTrailStatusOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetTrailStatusOutput) GoString() string { + return s.String() +} + +// Requests the public keys for a specified time range. +type ListPublicKeysInput struct { + _ struct{} `type:"structure"` + + // Optionally specifies, in UTC, the end of the time range to look up public + // keys for CloudTrail digest files. If not specified, the current time is used. + EndTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // Reserved for future use. + NextToken *string `type:"string"` + + // Optionally specifies, in UTC, the start of the time range to look up public + // keys for CloudTrail digest files. If not specified, the current time is used, + // and the current public key is returned. + StartTime *time.Time `type:"timestamp" timestampFormat:"unix"` +} + +// String returns the string representation +func (s ListPublicKeysInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListPublicKeysInput) GoString() string { + return s.String() +} + +// Returns the objects or data listed below if successful. Otherwise, returns +// an error. +type ListPublicKeysOutput struct { + _ struct{} `type:"structure"` + + // Reserved for future use. + NextToken *string `type:"string"` + + // Contains an array of PublicKey objects. + // + // The returned public keys may have validity time ranges that overlap. + PublicKeyList []*PublicKey `type:"list"` +} + +// String returns the string representation +func (s ListPublicKeysOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListPublicKeysOutput) GoString() string { + return s.String() +} + +// Specifies a list of trail tags to return. +type ListTagsInput struct { + _ struct{} `type:"structure"` + + // Reserved for future use. + NextToken *string `type:"string"` + + // Specifies a list of trail ARNs whose tags will be listed. The list has a + // limit of 20 ARNs. The format of a trail ARN is: + // + // arn:aws:cloudtrail:us-east-1:123456789012:trail/MyTrail + ResourceIdList []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s ListTagsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTagsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListTagsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListTagsInput"} + if s.ResourceIdList == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceIdList")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Returns the objects or data listed below if successful. Otherwise, returns +// an error. +type ListTagsOutput struct { + _ struct{} `type:"structure"` + + // Reserved for future use. + NextToken *string `type:"string"` + + // A list of resource tags. + ResourceTagList []*ResourceTag `type:"list"` +} + +// String returns the string representation +func (s ListTagsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTagsOutput) GoString() string { + return s.String() +} + +// Specifies an attribute and value that filter the events returned. +type LookupAttribute struct { + _ struct{} `type:"structure"` + + // Specifies an attribute on which to filter the events returned. + AttributeKey *string `type:"string" required:"true" enum:"LookupAttributeKey"` + + // Specifies a value for the specified AttributeKey. + AttributeValue *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s LookupAttribute) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LookupAttribute) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *LookupAttribute) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "LookupAttribute"} + if s.AttributeKey == nil { + invalidParams.Add(request.NewErrParamRequired("AttributeKey")) + } + if s.AttributeValue == nil { + invalidParams.Add(request.NewErrParamRequired("AttributeValue")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains a request for LookupEvents. +type LookupEventsInput struct { + _ struct{} `type:"structure"` + + // Specifies that only events that occur before or at the specified time are + // returned. If the specified end time is before the specified start time, an + // error is returned. + EndTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // Contains a list of lookup attributes. Currently the list can contain only + // one item. + LookupAttributes []*LookupAttribute `type:"list"` + + // The number of events to return. Possible values are 1 through 50. The default + // is 10. + MaxResults *int64 `min:"1" type:"integer"` + + // The token to use to get the next page of results after a previous API call. + // This token must be passed in with the same parameters that were specified + // in the the original call. For example, if the original call specified an + // AttributeKey of 'Username' with a value of 'root', the call with NextToken + // should include those same parameters. + NextToken *string `type:"string"` + + // Specifies that only events that occur after or at the specified time are + // returned. If the specified start time is after the specified end time, an + // error is returned. + StartTime *time.Time `type:"timestamp" timestampFormat:"unix"` +} + +// String returns the string representation +func (s LookupEventsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LookupEventsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *LookupEventsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "LookupEventsInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + if s.LookupAttributes != nil { + for i, v := range s.LookupAttributes { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "LookupAttributes", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains a response to a LookupEvents action. +type LookupEventsOutput struct { + _ struct{} `type:"structure"` + + // A list of events returned based on the lookup attributes specified and the + // CloudTrail event. The events list is sorted by time. The most recent event + // is listed first. + Events []*Event `type:"list"` + + // The token to use to get the next page of results after a previous API call. + // If the token does not appear, there are no more results to return. The token + // must be passed in with the same parameters as the previous call. For example, + // if the original call specified an AttributeKey of 'Username' with a value + // of 'root', the call with NextToken should include those same parameters. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s LookupEventsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LookupEventsOutput) GoString() string { + return s.String() +} + +// Contains information about a returned public key. +type PublicKey struct { + _ struct{} `type:"structure"` + + // The fingerprint of the public key. + Fingerprint *string `type:"string"` + + // The ending time of validity of the public key. + ValidityEndTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The starting time of validity of the public key. + ValidityStartTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The DER encoded public key value in PKCS#1 format. + // + // Value is automatically base64 encoded/decoded by the SDK. + Value []byte `type:"blob"` +} + +// String returns the string representation +func (s PublicKey) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PublicKey) GoString() string { + return s.String() +} + +// Specifies the tags to remove from a trail. +type RemoveTagsInput struct { + _ struct{} `type:"structure"` + + // Specifies the ARN of the trail from which tags should be removed. The format + // of a trail ARN is: + // + // arn:aws:cloudtrail:us-east-1:123456789012:trail/MyTrail + ResourceId *string `type:"string" required:"true"` + + // Specifies a list of tags to be removed. + TagsList []*Tag `type:"list"` +} + +// String returns the string representation +func (s RemoveTagsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RemoveTagsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RemoveTagsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RemoveTagsInput"} + if s.ResourceId == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceId")) + } + if s.TagsList != nil { + for i, v := range s.TagsList { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "TagsList", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Returns the objects or data listed below if successful. Otherwise, returns +// an error. +type RemoveTagsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s RemoveTagsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RemoveTagsOutput) GoString() string { + return s.String() +} + +// Specifies the type and name of a resource referenced by an event. +type Resource struct { + _ struct{} `type:"structure"` + + // The name of the resource referenced by the event returned. These are user-created + // names whose values will depend on the environment. For example, the resource + // name might be "auto-scaling-test-group" for an Auto Scaling Group or "i-1234567" + // for an EC2 Instance. + ResourceName *string `type:"string"` + + // The type of a resource referenced by the event returned. When the resource + // type cannot be determined, null is returned. Some examples of resource types + // are: Instance for EC2, Trail for CloudTrail, DBInstance for RDS, and AccessKey + // for IAM. For a list of resource types supported for event lookup, see Resource + // Types Supported for Event Lookup (http://docs.aws.amazon.com/awscloudtrail/latest/userguide/lookup_supported_resourcetypes.html). + ResourceType *string `type:"string"` +} + +// String returns the string representation +func (s Resource) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Resource) GoString() string { + return s.String() +} + +// A resource tag. +type ResourceTag struct { + _ struct{} `type:"structure"` + + // Specifies the ARN of the resource. + ResourceId *string `type:"string"` + + // A list of tags. + TagsList []*Tag `type:"list"` +} + +// String returns the string representation +func (s ResourceTag) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResourceTag) GoString() string { + return s.String() +} + +// The request to CloudTrail to start logging AWS API calls for an account. +type StartLoggingInput struct { + _ struct{} `type:"structure"` + + // Specifies the name or the CloudTrail ARN of the trail for which CloudTrail + // logs AWS API calls. The format of a trail ARN is: + // + // arn:aws:cloudtrail:us-east-1:123456789012:trail/MyTrail + Name *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s StartLoggingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StartLoggingInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *StartLoggingInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StartLoggingInput"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Returns the objects or data listed below if successful. Otherwise, returns +// an error. +type StartLoggingOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s StartLoggingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StartLoggingOutput) GoString() string { + return s.String() +} + +// Passes the request to CloudTrail to stop logging AWS API calls for the specified +// account. +type StopLoggingInput struct { + _ struct{} `type:"structure"` + + // Specifies the name or the CloudTrail ARN of the trail for which CloudTrail + // will stop logging AWS API calls. The format of a trail ARN is: + // + // arn:aws:cloudtrail:us-east-1:123456789012:trail/MyTrail + Name *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s StopLoggingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StopLoggingInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *StopLoggingInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StopLoggingInput"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Returns the objects or data listed below if successful. Otherwise, returns +// an error. +type StopLoggingOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s StopLoggingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StopLoggingOutput) GoString() string { + return s.String() +} + +// A custom key-value pair associated with a resource such as a CloudTrail trail. +type Tag struct { + _ struct{} `type:"structure"` + + // The key in a key-value pair. The key must be must be no longer than 128 Unicode + // characters. The key must be unique for the resource to which it applies. + Key *string `type:"string" required:"true"` + + // The value in a key-value pair of a tag. The value must be no longer than + // 256 Unicode characters. + Value *string `type:"string"` +} + +// String returns the string representation +func (s Tag) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Tag) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Tag) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Tag"} + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The settings for a trail. +type Trail struct { + _ struct{} `type:"structure"` + + // Specifies an Amazon Resource Name (ARN), a unique identifier that represents + // the log group to which CloudTrail logs will be delivered. + CloudWatchLogsLogGroupArn *string `type:"string"` + + // Specifies the role for the CloudWatch Logs endpoint to assume to write to + // a user's log group. + CloudWatchLogsRoleArn *string `type:"string"` + + // The region in which the trail was created. + HomeRegion *string `type:"string"` + + // Set to True to include AWS API calls from AWS global services such as IAM. + // Otherwise, False. + IncludeGlobalServiceEvents *bool `type:"boolean"` + + // Specifies whether the trail belongs only to one region or exists in all regions. + IsMultiRegionTrail *bool `type:"boolean"` + + // Specifies the KMS key ID that encrypts the logs delivered by CloudTrail. + // The value is a fully specified ARN to a KMS key in the format: + // + // arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012 + KmsKeyId *string `type:"string"` + + // Specifies whether log file validation is enabled. + LogFileValidationEnabled *bool `type:"boolean"` + + // Name of the trail set by calling CreateTrail. The maximum length is 128 characters. + Name *string `type:"string"` + + // Name of the Amazon S3 bucket into which CloudTrail delivers your trail files. + // See Amazon S3 Bucket Naming Requirements (http://docs.aws.amazon.com/awscloudtrail/latest/userguide/create_trail_naming_policy.html). + S3BucketName *string `type:"string"` + + // Specifies the Amazon S3 key prefix that comes after the name of the bucket + // you have designated for log file delivery. For more information, see Finding + // Your CloudTrail Log Files (http://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-find-log-files.html).The + // maximum length is 200 characters. + S3KeyPrefix *string `type:"string"` + + // Specifies the ARN of the Amazon SNS topic that CloudTrail uses to send notifications + // when log files are delivered. The format of a topic ARN is: + // + // arn:aws:sns:us-east-1:123456789012:MyTopic + SnsTopicARN *string `type:"string"` + + // This field is deprecated. Use SnsTopicARN. + SnsTopicName *string `deprecated:"true" type:"string"` + + // Specifies the ARN of the trail. The format of a trail ARN is: + // + // arn:aws:cloudtrail:us-east-1:123456789012:trail/MyTrail + TrailARN *string `type:"string"` +} + +// String returns the string representation +func (s Trail) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Trail) GoString() string { + return s.String() +} + +// Specifies settings to update for the trail. +type UpdateTrailInput struct { + _ struct{} `type:"structure"` + + // Specifies a log group name using an Amazon Resource Name (ARN), a unique + // identifier that represents the log group to which CloudTrail logs will be + // delivered. Not required unless you specify CloudWatchLogsRoleArn. + CloudWatchLogsLogGroupArn *string `type:"string"` + + // Specifies the role for the CloudWatch Logs endpoint to assume to write to + // a user's log group. + CloudWatchLogsRoleArn *string `type:"string"` + + // Specifies whether log file validation is enabled. The default is false. + // + // When you disable log file integrity validation, the chain of digest files + // is broken after one hour. CloudTrail will not create digest files for log + // files that were delivered during a period in which log file integrity validation + // was disabled. For example, if you enable log file integrity validation at + // noon on January 1, disable it at noon on January 2, and re-enable it at noon + // on January 10, digest files will not be created for the log files delivered + // from noon on January 2 to noon on January 10. The same applies whenever you + // stop CloudTrail logging or delete a trail. + EnableLogFileValidation *bool `type:"boolean"` + + // Specifies whether the trail is publishing events from global services such + // as IAM to the log files. + IncludeGlobalServiceEvents *bool `type:"boolean"` + + // Specifies whether the trail applies only to the current region or to all + // regions. The default is false. If the trail exists only in the current region + // and this value is set to true, shadow trails (replications of the trail) + // will be created in the other regions. If the trail exists in all regions + // and this value is set to false, the trail will remain in the region where + // it was created, and its shadow trails in other regions will be deleted. + IsMultiRegionTrail *bool `type:"boolean"` + + // Specifies the KMS key ID to use to encrypt the logs delivered by CloudTrail. + // The value can be a an alias name prefixed by "alias/", a fully specified + // ARN to an alias, a fully specified ARN to a key, or a globally unique identifier. + // + // Examples: + // + // alias/MyAliasName + // + // arn:aws:kms:us-east-1:123456789012:alias/MyAliasName + // + // arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012 + // + // 12345678-1234-1234-1234-123456789012 + KmsKeyId *string `type:"string"` + + // Specifies the name of the trail or trail ARN. If Name is a trail name, the + // string must meet the following requirements: + // + // Contain only ASCII letters (a-z, A-Z), numbers (0-9), periods (.), underscores + // (_), or dashes (-) + // + // Start with a letter or number, and end with a letter or number + // + // Be between 3 and 128 characters + // + // Have no adjacent periods, underscores or dashes. Names like my-_namespace + // and my--namespace are invalid. + // + // Not be in IP address format (for example, 192.168.5.4) + // + // If Name is a trail ARN, it must be in the format: + // + // arn:aws:cloudtrail:us-east-1:123456789012:trail/MyTrail + Name *string `type:"string" required:"true"` + + // Specifies the name of the Amazon S3 bucket designated for publishing log + // files. See Amazon S3 Bucket Naming Requirements (http://docs.aws.amazon.com/awscloudtrail/latest/userguide/create_trail_naming_policy.html). + S3BucketName *string `type:"string"` + + // Specifies the Amazon S3 key prefix that comes after the name of the bucket + // you have designated for log file delivery. For more information, see Finding + // Your CloudTrail Log Files (http://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-find-log-files.html). + // The maximum length is 200 characters. + S3KeyPrefix *string `type:"string"` + + // Specifies the name of the Amazon SNS topic defined for notification of log + // file delivery. The maximum length is 256 characters. + SnsTopicName *string `type:"string"` +} + +// String returns the string representation +func (s UpdateTrailInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateTrailInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateTrailInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateTrailInput"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Returns the objects or data listed below if successful. Otherwise, returns +// an error. +type UpdateTrailOutput struct { + _ struct{} `type:"structure"` + + // Specifies the Amazon Resource Name (ARN) of the log group to which CloudTrail + // logs will be delivered. + CloudWatchLogsLogGroupArn *string `type:"string"` + + // Specifies the role for the CloudWatch Logs endpoint to assume to write to + // a user's log group. + CloudWatchLogsRoleArn *string `type:"string"` + + // Specifies whether the trail is publishing events from global services such + // as IAM to the log files. + IncludeGlobalServiceEvents *bool `type:"boolean"` + + // Specifies whether the trail exists in one region or in all regions. + IsMultiRegionTrail *bool `type:"boolean"` + + // Specifies the KMS key ID that encrypts the logs delivered by CloudTrail. + // The value is a fully specified ARN to a KMS key in the format: + // + // arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012 + KmsKeyId *string `type:"string"` + + // Specifies whether log file integrity validation is enabled. + LogFileValidationEnabled *bool `type:"boolean"` + + // Specifies the name of the trail. + Name *string `type:"string"` + + // Specifies the name of the Amazon S3 bucket designated for publishing log + // files. + S3BucketName *string `type:"string"` + + // Specifies the Amazon S3 key prefix that comes after the name of the bucket + // you have designated for log file delivery. For more information, see Finding + // Your CloudTrail Log Files (http://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-find-log-files.html). + S3KeyPrefix *string `type:"string"` + + // Specifies the ARN of the Amazon SNS topic that CloudTrail uses to send notifications + // when log files are delivered. The format of a topic ARN is: + // + // arn:aws:sns:us-east-1:123456789012:MyTopic + SnsTopicARN *string `type:"string"` + + // This field is deprecated. Use SnsTopicARN. + SnsTopicName *string `deprecated:"true" type:"string"` + + // Specifies the ARN of the trail that was updated. The format of a trail ARN + // is: + // + // arn:aws:cloudtrail:us-east-1:123456789012:trail/MyTrail + TrailARN *string `type:"string"` +} + +// String returns the string representation +func (s UpdateTrailOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateTrailOutput) GoString() string { + return s.String() +} + +const ( + // @enum LookupAttributeKey + LookupAttributeKeyEventId = "EventId" + // @enum LookupAttributeKey + LookupAttributeKeyEventName = "EventName" + // @enum LookupAttributeKey + LookupAttributeKeyUsername = "Username" + // @enum LookupAttributeKey + LookupAttributeKeyResourceType = "ResourceType" + // @enum LookupAttributeKey + LookupAttributeKeyResourceName = "ResourceName" +) diff --git a/vendor/github.com/aws/aws-sdk-go/service/cloudtrail/cloudtrailiface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/cloudtrail/cloudtrailiface/interface.go new file mode 100644 index 000000000..3100f701a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/cloudtrail/cloudtrailiface/interface.go @@ -0,0 +1,62 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package cloudtrailiface provides an interface for the AWS CloudTrail. +package cloudtrailiface + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/cloudtrail" +) + +// CloudTrailAPI is the interface type for cloudtrail.CloudTrail. +type CloudTrailAPI interface { + AddTagsRequest(*cloudtrail.AddTagsInput) (*request.Request, *cloudtrail.AddTagsOutput) + + AddTags(*cloudtrail.AddTagsInput) (*cloudtrail.AddTagsOutput, error) + + CreateTrailRequest(*cloudtrail.CreateTrailInput) (*request.Request, *cloudtrail.CreateTrailOutput) + + CreateTrail(*cloudtrail.CreateTrailInput) (*cloudtrail.CreateTrailOutput, error) + + DeleteTrailRequest(*cloudtrail.DeleteTrailInput) (*request.Request, *cloudtrail.DeleteTrailOutput) + + DeleteTrail(*cloudtrail.DeleteTrailInput) (*cloudtrail.DeleteTrailOutput, error) + + DescribeTrailsRequest(*cloudtrail.DescribeTrailsInput) (*request.Request, *cloudtrail.DescribeTrailsOutput) + + DescribeTrails(*cloudtrail.DescribeTrailsInput) (*cloudtrail.DescribeTrailsOutput, error) + + GetTrailStatusRequest(*cloudtrail.GetTrailStatusInput) (*request.Request, *cloudtrail.GetTrailStatusOutput) + + GetTrailStatus(*cloudtrail.GetTrailStatusInput) (*cloudtrail.GetTrailStatusOutput, error) + + ListPublicKeysRequest(*cloudtrail.ListPublicKeysInput) (*request.Request, *cloudtrail.ListPublicKeysOutput) + + ListPublicKeys(*cloudtrail.ListPublicKeysInput) (*cloudtrail.ListPublicKeysOutput, error) + + ListTagsRequest(*cloudtrail.ListTagsInput) (*request.Request, *cloudtrail.ListTagsOutput) + + ListTags(*cloudtrail.ListTagsInput) (*cloudtrail.ListTagsOutput, error) + + LookupEventsRequest(*cloudtrail.LookupEventsInput) (*request.Request, *cloudtrail.LookupEventsOutput) + + LookupEvents(*cloudtrail.LookupEventsInput) (*cloudtrail.LookupEventsOutput, error) + + RemoveTagsRequest(*cloudtrail.RemoveTagsInput) (*request.Request, *cloudtrail.RemoveTagsOutput) + + RemoveTags(*cloudtrail.RemoveTagsInput) (*cloudtrail.RemoveTagsOutput, error) + + StartLoggingRequest(*cloudtrail.StartLoggingInput) (*request.Request, *cloudtrail.StartLoggingOutput) + + StartLogging(*cloudtrail.StartLoggingInput) (*cloudtrail.StartLoggingOutput, error) + + StopLoggingRequest(*cloudtrail.StopLoggingInput) (*request.Request, *cloudtrail.StopLoggingOutput) + + StopLogging(*cloudtrail.StopLoggingInput) (*cloudtrail.StopLoggingOutput, error) + + UpdateTrailRequest(*cloudtrail.UpdateTrailInput) (*request.Request, *cloudtrail.UpdateTrailOutput) + + UpdateTrail(*cloudtrail.UpdateTrailInput) (*cloudtrail.UpdateTrailOutput, error) +} + +var _ CloudTrailAPI = (*cloudtrail.CloudTrail)(nil) diff --git a/vendor/github.com/aws/aws-sdk-go/service/cloudtrail/examples_test.go b/vendor/github.com/aws/aws-sdk-go/service/cloudtrail/examples_test.go new file mode 100644 index 000000000..2a78c44e7 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/cloudtrail/examples_test.go @@ -0,0 +1,296 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package cloudtrail_test + +import ( + "bytes" + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/cloudtrail" +) + +var _ time.Duration +var _ bytes.Buffer + +func ExampleCloudTrail_AddTags() { + svc := cloudtrail.New(session.New()) + + params := &cloudtrail.AddTagsInput{ + ResourceId: aws.String("String"), // Required + TagsList: []*cloudtrail.Tag{ + { // Required + Key: aws.String("String"), // Required + Value: aws.String("String"), + }, + // More values... + }, + } + resp, err := svc.AddTags(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudTrail_CreateTrail() { + svc := cloudtrail.New(session.New()) + + params := &cloudtrail.CreateTrailInput{ + Name: aws.String("String"), // Required + S3BucketName: aws.String("String"), // Required + CloudWatchLogsLogGroupArn: aws.String("String"), + CloudWatchLogsRoleArn: aws.String("String"), + EnableLogFileValidation: aws.Bool(true), + IncludeGlobalServiceEvents: aws.Bool(true), + IsMultiRegionTrail: aws.Bool(true), + KmsKeyId: aws.String("String"), + S3KeyPrefix: aws.String("String"), + SnsTopicName: aws.String("String"), + } + resp, err := svc.CreateTrail(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudTrail_DeleteTrail() { + svc := cloudtrail.New(session.New()) + + params := &cloudtrail.DeleteTrailInput{ + Name: aws.String("String"), // Required + } + resp, err := svc.DeleteTrail(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudTrail_DescribeTrails() { + svc := cloudtrail.New(session.New()) + + params := &cloudtrail.DescribeTrailsInput{ + IncludeShadowTrails: aws.Bool(true), + TrailNameList: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DescribeTrails(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudTrail_GetTrailStatus() { + svc := cloudtrail.New(session.New()) + + params := &cloudtrail.GetTrailStatusInput{ + Name: aws.String("String"), // Required + } + resp, err := svc.GetTrailStatus(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudTrail_ListPublicKeys() { + svc := cloudtrail.New(session.New()) + + params := &cloudtrail.ListPublicKeysInput{ + EndTime: aws.Time(time.Now()), + NextToken: aws.String("String"), + StartTime: aws.Time(time.Now()), + } + resp, err := svc.ListPublicKeys(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudTrail_ListTags() { + svc := cloudtrail.New(session.New()) + + params := &cloudtrail.ListTagsInput{ + ResourceIdList: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + NextToken: aws.String("String"), + } + resp, err := svc.ListTags(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudTrail_LookupEvents() { + svc := cloudtrail.New(session.New()) + + params := &cloudtrail.LookupEventsInput{ + EndTime: aws.Time(time.Now()), + LookupAttributes: []*cloudtrail.LookupAttribute{ + { // Required + AttributeKey: aws.String("LookupAttributeKey"), // Required + AttributeValue: aws.String("String"), // Required + }, + // More values... + }, + MaxResults: aws.Int64(1), + NextToken: aws.String("NextToken"), + StartTime: aws.Time(time.Now()), + } + resp, err := svc.LookupEvents(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudTrail_RemoveTags() { + svc := cloudtrail.New(session.New()) + + params := &cloudtrail.RemoveTagsInput{ + ResourceId: aws.String("String"), // Required + TagsList: []*cloudtrail.Tag{ + { // Required + Key: aws.String("String"), // Required + Value: aws.String("String"), + }, + // More values... + }, + } + resp, err := svc.RemoveTags(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudTrail_StartLogging() { + svc := cloudtrail.New(session.New()) + + params := &cloudtrail.StartLoggingInput{ + Name: aws.String("String"), // Required + } + resp, err := svc.StartLogging(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudTrail_StopLogging() { + svc := cloudtrail.New(session.New()) + + params := &cloudtrail.StopLoggingInput{ + Name: aws.String("String"), // Required + } + resp, err := svc.StopLogging(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudTrail_UpdateTrail() { + svc := cloudtrail.New(session.New()) + + params := &cloudtrail.UpdateTrailInput{ + Name: aws.String("String"), // Required + CloudWatchLogsLogGroupArn: aws.String("String"), + CloudWatchLogsRoleArn: aws.String("String"), + EnableLogFileValidation: aws.Bool(true), + IncludeGlobalServiceEvents: aws.Bool(true), + IsMultiRegionTrail: aws.Bool(true), + KmsKeyId: aws.String("String"), + S3BucketName: aws.String("String"), + S3KeyPrefix: aws.String("String"), + SnsTopicName: aws.String("String"), + } + resp, err := svc.UpdateTrail(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/cloudtrail/service.go b/vendor/github.com/aws/aws-sdk-go/service/cloudtrail/service.go new file mode 100644 index 000000000..621e16698 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/cloudtrail/service.go @@ -0,0 +1,107 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package cloudtrail + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" +) + +// This is the CloudTrail API Reference. It provides descriptions of actions, +// data types, common parameters, and common errors for CloudTrail. +// +// CloudTrail is a web service that records AWS API calls for your AWS account +// and delivers log files to an Amazon S3 bucket. The recorded information includes +// the identity of the user, the start time of the AWS API call, the source +// IP address, the request parameters, and the response elements returned by +// the service. +// +// As an alternative to the API, you can use one of the AWS SDKs, which consist +// of libraries and sample code for various programming languages and platforms +// (Java, Ruby, .NET, iOS, Android, etc.). The SDKs provide a convenient way +// to create programmatic access to AWSCloudTrail. For example, the SDKs take +// care of cryptographically signing requests, managing errors, and retrying +// requests automatically. For information about the AWS SDKs, including how +// to download and install them, see the Tools for Amazon Web Services page +// (http://aws.amazon.com/tools/). +// +// See the CloudTrail User Guide for information about the data that is included +// with each AWS API call listed in the log files. +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type CloudTrail struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "cloudtrail" + +// New creates a new instance of the CloudTrail client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a CloudTrail client from just a session. +// svc := cloudtrail.New(mySession) +// +// // Create a CloudTrail client with additional configuration +// svc := cloudtrail.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *CloudTrail { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *CloudTrail { + svc := &CloudTrail{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2013-11-01", + JSONVersion: "1.1", + TargetPrefix: "com.amazonaws.cloudtrail.v20131101.CloudTrail_20131101", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(jsonrpc.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a CloudTrail operation and runs any +// custom request initialization. +func (c *CloudTrail) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/cloudwatch/api.go b/vendor/github.com/aws/aws-sdk-go/service/cloudwatch/api.go new file mode 100644 index 000000000..e23db4766 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/cloudwatch/api.go @@ -0,0 +1,2123 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package cloudwatch provides a client for Amazon CloudWatch. +package cloudwatch + +import ( + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/query" +) + +const opDeleteAlarms = "DeleteAlarms" + +// DeleteAlarmsRequest generates a "aws/request.Request" representing the +// client's request for the DeleteAlarms operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteAlarms method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteAlarmsRequest method. +// req, resp := client.DeleteAlarmsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudWatch) DeleteAlarmsRequest(input *DeleteAlarmsInput) (req *request.Request, output *DeleteAlarmsOutput) { + op := &request.Operation{ + Name: opDeleteAlarms, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteAlarmsInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteAlarmsOutput{} + req.Data = output + return +} + +// Deletes all specified alarms. In the event of an error, no alarms are deleted. +func (c *CloudWatch) DeleteAlarms(input *DeleteAlarmsInput) (*DeleteAlarmsOutput, error) { + req, out := c.DeleteAlarmsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeAlarmHistory = "DescribeAlarmHistory" + +// DescribeAlarmHistoryRequest generates a "aws/request.Request" representing the +// client's request for the DescribeAlarmHistory operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeAlarmHistory method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeAlarmHistoryRequest method. +// req, resp := client.DescribeAlarmHistoryRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudWatch) DescribeAlarmHistoryRequest(input *DescribeAlarmHistoryInput) (req *request.Request, output *DescribeAlarmHistoryOutput) { + op := &request.Operation{ + Name: opDescribeAlarmHistory, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeAlarmHistoryInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeAlarmHistoryOutput{} + req.Data = output + return +} + +// Retrieves history for the specified alarm. Filter alarms by date range or +// item type. If an alarm name is not specified, Amazon CloudWatch returns histories +// for all of the owner's alarms. +// +// Amazon CloudWatch retains the history of an alarm for two weeks, whether +// or not you delete the alarm. +func (c *CloudWatch) DescribeAlarmHistory(input *DescribeAlarmHistoryInput) (*DescribeAlarmHistoryOutput, error) { + req, out := c.DescribeAlarmHistoryRequest(input) + err := req.Send() + return out, err +} + +// DescribeAlarmHistoryPages iterates over the pages of a DescribeAlarmHistory operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeAlarmHistory method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeAlarmHistory operation. +// pageNum := 0 +// err := client.DescribeAlarmHistoryPages(params, +// func(page *DescribeAlarmHistoryOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *CloudWatch) DescribeAlarmHistoryPages(input *DescribeAlarmHistoryInput, fn func(p *DescribeAlarmHistoryOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeAlarmHistoryRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeAlarmHistoryOutput), lastPage) + }) +} + +const opDescribeAlarms = "DescribeAlarms" + +// DescribeAlarmsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeAlarms operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeAlarms method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeAlarmsRequest method. +// req, resp := client.DescribeAlarmsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudWatch) DescribeAlarmsRequest(input *DescribeAlarmsInput) (req *request.Request, output *DescribeAlarmsOutput) { + op := &request.Operation{ + Name: opDescribeAlarms, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeAlarmsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeAlarmsOutput{} + req.Data = output + return +} + +// Retrieves alarms with the specified names. If no name is specified, all alarms +// for the user are returned. Alarms can be retrieved by using only a prefix +// for the alarm name, the alarm state, or a prefix for any action. +func (c *CloudWatch) DescribeAlarms(input *DescribeAlarmsInput) (*DescribeAlarmsOutput, error) { + req, out := c.DescribeAlarmsRequest(input) + err := req.Send() + return out, err +} + +// DescribeAlarmsPages iterates over the pages of a DescribeAlarms operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeAlarms method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeAlarms operation. +// pageNum := 0 +// err := client.DescribeAlarmsPages(params, +// func(page *DescribeAlarmsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *CloudWatch) DescribeAlarmsPages(input *DescribeAlarmsInput, fn func(p *DescribeAlarmsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeAlarmsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeAlarmsOutput), lastPage) + }) +} + +const opDescribeAlarmsForMetric = "DescribeAlarmsForMetric" + +// DescribeAlarmsForMetricRequest generates a "aws/request.Request" representing the +// client's request for the DescribeAlarmsForMetric operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeAlarmsForMetric method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeAlarmsForMetricRequest method. +// req, resp := client.DescribeAlarmsForMetricRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudWatch) DescribeAlarmsForMetricRequest(input *DescribeAlarmsForMetricInput) (req *request.Request, output *DescribeAlarmsForMetricOutput) { + op := &request.Operation{ + Name: opDescribeAlarmsForMetric, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeAlarmsForMetricInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeAlarmsForMetricOutput{} + req.Data = output + return +} + +// Retrieves all alarms for a single metric. Specify a statistic, period, or +// unit to filter the set of alarms further. +func (c *CloudWatch) DescribeAlarmsForMetric(input *DescribeAlarmsForMetricInput) (*DescribeAlarmsForMetricOutput, error) { + req, out := c.DescribeAlarmsForMetricRequest(input) + err := req.Send() + return out, err +} + +const opDisableAlarmActions = "DisableAlarmActions" + +// DisableAlarmActionsRequest generates a "aws/request.Request" representing the +// client's request for the DisableAlarmActions operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DisableAlarmActions method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DisableAlarmActionsRequest method. +// req, resp := client.DisableAlarmActionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudWatch) DisableAlarmActionsRequest(input *DisableAlarmActionsInput) (req *request.Request, output *DisableAlarmActionsOutput) { + op := &request.Operation{ + Name: opDisableAlarmActions, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DisableAlarmActionsInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DisableAlarmActionsOutput{} + req.Data = output + return +} + +// Disables actions for the specified alarms. When an alarm's actions are disabled +// the alarm's state may change, but none of the alarm's actions will execute. +func (c *CloudWatch) DisableAlarmActions(input *DisableAlarmActionsInput) (*DisableAlarmActionsOutput, error) { + req, out := c.DisableAlarmActionsRequest(input) + err := req.Send() + return out, err +} + +const opEnableAlarmActions = "EnableAlarmActions" + +// EnableAlarmActionsRequest generates a "aws/request.Request" representing the +// client's request for the EnableAlarmActions operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the EnableAlarmActions method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the EnableAlarmActionsRequest method. +// req, resp := client.EnableAlarmActionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudWatch) EnableAlarmActionsRequest(input *EnableAlarmActionsInput) (req *request.Request, output *EnableAlarmActionsOutput) { + op := &request.Operation{ + Name: opEnableAlarmActions, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &EnableAlarmActionsInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &EnableAlarmActionsOutput{} + req.Data = output + return +} + +// Enables actions for the specified alarms. +func (c *CloudWatch) EnableAlarmActions(input *EnableAlarmActionsInput) (*EnableAlarmActionsOutput, error) { + req, out := c.EnableAlarmActionsRequest(input) + err := req.Send() + return out, err +} + +const opGetMetricStatistics = "GetMetricStatistics" + +// GetMetricStatisticsRequest generates a "aws/request.Request" representing the +// client's request for the GetMetricStatistics operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetMetricStatistics method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetMetricStatisticsRequest method. +// req, resp := client.GetMetricStatisticsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudWatch) GetMetricStatisticsRequest(input *GetMetricStatisticsInput) (req *request.Request, output *GetMetricStatisticsOutput) { + op := &request.Operation{ + Name: opGetMetricStatistics, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetMetricStatisticsInput{} + } + + req = c.newRequest(op, input, output) + output = &GetMetricStatisticsOutput{} + req.Data = output + return +} + +// Gets statistics for the specified metric. +// +// The maximum number of data points that can be queried is 50,850, whereas +// the maximum number of data points returned from a single GetMetricStatistics +// request is 1,440. If you make a request that generates more than 1,440 data +// points, Amazon CloudWatch returns an error. In such a case, you can alter +// the request by narrowing the specified time range or increasing the specified +// period. Alternatively, you can make multiple requests across adjacent time +// ranges. GetMetricStatistics does not return the data in chronological order. +// +// Amazon CloudWatch aggregates data points based on the length of the period +// that you specify. For example, if you request statistics with a one-minute +// granularity, Amazon CloudWatch aggregates data points with time stamps that +// fall within the same one-minute period. In such a case, the data points queried +// can greatly outnumber the data points returned. +// +// The following examples show various statistics allowed by the data point +// query maximum of 50,850 when you call GetMetricStatistics on Amazon EC2 instances +// with detailed (one-minute) monitoring enabled: +// +// Statistics for up to 400 instances for a span of one hour Statistics for +// up to 35 instances over a span of 24 hours Statistics for up to 2 instances +// over a span of 2 weeks For information about the namespace, metric names, +// and dimensions that other Amazon Web Services products use to send metrics +// to CloudWatch, go to Amazon CloudWatch Metrics, Namespaces, and Dimensions +// Reference (http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/CW_Support_For_AWS.html) +// in the Amazon CloudWatch Developer Guide. +func (c *CloudWatch) GetMetricStatistics(input *GetMetricStatisticsInput) (*GetMetricStatisticsOutput, error) { + req, out := c.GetMetricStatisticsRequest(input) + err := req.Send() + return out, err +} + +const opListMetrics = "ListMetrics" + +// ListMetricsRequest generates a "aws/request.Request" representing the +// client's request for the ListMetrics operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListMetrics method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListMetricsRequest method. +// req, resp := client.ListMetricsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudWatch) ListMetricsRequest(input *ListMetricsInput) (req *request.Request, output *ListMetricsOutput) { + op := &request.Operation{ + Name: opListMetrics, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListMetricsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListMetricsOutput{} + req.Data = output + return +} + +// Returns a list of valid metrics stored for the AWS account owner. Returned +// metrics can be used with GetMetricStatistics to obtain statistical data for +// a given metric. +// +// Up to 500 results are returned for any one call. To retrieve further results, +// use returned NextToken values with subsequent ListMetrics operations. If +// you create a metric with the PutMetricData action, allow up to fifteen minutes +// for the metric to appear in calls to the ListMetrics action. Statistics about +// the metric, however, are available sooner using GetMetricStatistics. +func (c *CloudWatch) ListMetrics(input *ListMetricsInput) (*ListMetricsOutput, error) { + req, out := c.ListMetricsRequest(input) + err := req.Send() + return out, err +} + +// ListMetricsPages iterates over the pages of a ListMetrics operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListMetrics method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListMetrics operation. +// pageNum := 0 +// err := client.ListMetricsPages(params, +// func(page *ListMetricsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *CloudWatch) ListMetricsPages(input *ListMetricsInput, fn func(p *ListMetricsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListMetricsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListMetricsOutput), lastPage) + }) +} + +const opPutMetricAlarm = "PutMetricAlarm" + +// PutMetricAlarmRequest generates a "aws/request.Request" representing the +// client's request for the PutMetricAlarm operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutMetricAlarm method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutMetricAlarmRequest method. +// req, resp := client.PutMetricAlarmRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudWatch) PutMetricAlarmRequest(input *PutMetricAlarmInput) (req *request.Request, output *PutMetricAlarmOutput) { + op := &request.Operation{ + Name: opPutMetricAlarm, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutMetricAlarmInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &PutMetricAlarmOutput{} + req.Data = output + return +} + +// Creates or updates an alarm and associates it with the specified Amazon CloudWatch +// metric. Optionally, this operation can associate one or more Amazon Simple +// Notification Service resources with the alarm. +// +// When this operation creates an alarm, the alarm state is immediately set +// to INSUFFICIENT_DATA. The alarm is evaluated and its StateValue is set appropriately. +// Any actions associated with the StateValue is then executed. +// +// When updating an existing alarm, its StateValue is left unchanged. If +// you are using an AWS Identity and Access Management (IAM) account to create +// or modify an alarm, you must have the following Amazon EC2 permissions: +// ec2:DescribeInstanceStatus and ec2:DescribeInstances for all alarms on Amazon +// EC2 instance status metrics. ec2:StopInstances for alarms with stop actions. +// ec2:TerminateInstances for alarms with terminate actions. ec2:DescribeInstanceRecoveryAttribute, +// and ec2:RecoverInstances for alarms with recover actions. If you have read/write +// permissions for Amazon CloudWatch but not for Amazon EC2, you can still create +// an alarm but the stop or terminate actions won't be performed on the Amazon +// EC2 instance. However, if you are later granted permission to use the associated +// Amazon EC2 APIs, the alarm actions you created earlier will be performed. +// For more information about IAM permissions, see Permissions and Policies +// (http://docs.aws.amazon.com//IAM/latest/UserGuide/PermissionsAndPolicies.html) +// in Using IAM. +// +// If you are using an IAM role (e.g., an Amazon EC2 instance profile), you +// cannot stop or terminate the instance using alarm actions. However, you can +// still see the alarm state and perform any other actions such as Amazon SNS +// notifications or Auto Scaling policies. +// +// If you are using temporary security credentials granted using the AWS Security +// Token Service (AWS STS), you cannot stop or terminate an Amazon EC2 instance +// using alarm actions. +func (c *CloudWatch) PutMetricAlarm(input *PutMetricAlarmInput) (*PutMetricAlarmOutput, error) { + req, out := c.PutMetricAlarmRequest(input) + err := req.Send() + return out, err +} + +const opPutMetricData = "PutMetricData" + +// PutMetricDataRequest generates a "aws/request.Request" representing the +// client's request for the PutMetricData operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutMetricData method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutMetricDataRequest method. +// req, resp := client.PutMetricDataRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudWatch) PutMetricDataRequest(input *PutMetricDataInput) (req *request.Request, output *PutMetricDataOutput) { + op := &request.Operation{ + Name: opPutMetricData, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutMetricDataInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &PutMetricDataOutput{} + req.Data = output + return +} + +// Publishes metric data points to Amazon CloudWatch. Amazon CloudWatch associates +// the data points with the specified metric. If the specified metric does not +// exist, Amazon CloudWatch creates the metric. When Amazon CloudWatch creates +// a metric, it can take up to fifteen minutes for the metric to appear in calls +// to the ListMetrics action. +// +// Each PutMetricData request is limited to 8 KB in size for HTTP GET requests +// and is limited to 40 KB in size for HTTP POST requests. +// +// Although the Value parameter accepts numbers of type Double, Amazon CloudWatch +// rejects values that are either too small or too large. Values must be in +// the range of 8.515920e-109 to 1.174271e+108 (Base 10) or 2e-360 to 2e360 +// (Base 2). In addition, special values (e.g., NaN, +Infinity, -Infinity) are +// not supported. Data that is timestamped 24 hours or more in the past may +// take in excess of 48 hours to become available from submission time using +// GetMetricStatistics. +func (c *CloudWatch) PutMetricData(input *PutMetricDataInput) (*PutMetricDataOutput, error) { + req, out := c.PutMetricDataRequest(input) + err := req.Send() + return out, err +} + +const opSetAlarmState = "SetAlarmState" + +// SetAlarmStateRequest generates a "aws/request.Request" representing the +// client's request for the SetAlarmState operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the SetAlarmState method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the SetAlarmStateRequest method. +// req, resp := client.SetAlarmStateRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudWatch) SetAlarmStateRequest(input *SetAlarmStateInput) (req *request.Request, output *SetAlarmStateOutput) { + op := &request.Operation{ + Name: opSetAlarmState, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SetAlarmStateInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &SetAlarmStateOutput{} + req.Data = output + return +} + +// Temporarily sets the state of an alarm. When the updated StateValue differs +// from the previous value, the action configured for the appropriate state +// is invoked. For example, if your alarm is configured to send an Amazon SNS +// message when an alarm is triggered, temporarily changing the alarm's state +// to ALARM will send an Amazon SNS message. This is not a permanent change. +// The next periodic alarm check (in about a minute) will set the alarm to its +// actual state. Because the alarm state change happens very quickly, it is +// typically only visibile in the alarm's History tab in the Amazon CloudWatch +// console or through DescribeAlarmHistory. +func (c *CloudWatch) SetAlarmState(input *SetAlarmStateInput) (*SetAlarmStateOutput, error) { + req, out := c.SetAlarmStateRequest(input) + err := req.Send() + return out, err +} + +// The AlarmHistoryItem data type contains descriptive information about the +// history of a specific alarm. If you call DescribeAlarmHistory, Amazon CloudWatch +// returns this data type as part of the DescribeAlarmHistoryResult data type. +type AlarmHistoryItem struct { + _ struct{} `type:"structure"` + + // The descriptive name for the alarm. + AlarmName *string `min:"1" type:"string"` + + // Machine-readable data about the alarm in JSON format. + HistoryData *string `min:"1" type:"string"` + + // The type of alarm history item. + HistoryItemType *string `type:"string" enum:"HistoryItemType"` + + // A human-readable summary of the alarm history. + HistorySummary *string `min:"1" type:"string"` + + // The time stamp for the alarm history item. + Timestamp *time.Time `type:"timestamp" timestampFormat:"iso8601"` +} + +// String returns the string representation +func (s AlarmHistoryItem) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AlarmHistoryItem) GoString() string { + return s.String() +} + +// The Datapoint data type encapsulates the statistical data that Amazon CloudWatch +// computes from metric data. +type Datapoint struct { + _ struct{} `type:"structure"` + + // The average of metric values that correspond to the datapoint. + Average *float64 `type:"double"` + + // The maximum of the metric value used for the datapoint. + Maximum *float64 `type:"double"` + + // The minimum metric value used for the datapoint. + Minimum *float64 `type:"double"` + + // The number of metric values that contributed to the aggregate value of this + // datapoint. + SampleCount *float64 `type:"double"` + + // The sum of metric values used for the datapoint. + Sum *float64 `type:"double"` + + // The time stamp used for the datapoint. + Timestamp *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The standard unit used for the datapoint. + Unit *string `type:"string" enum:"StandardUnit"` +} + +// String returns the string representation +func (s Datapoint) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Datapoint) GoString() string { + return s.String() +} + +type DeleteAlarmsInput struct { + _ struct{} `type:"structure"` + + // A list of alarms to be deleted. + AlarmNames []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s DeleteAlarmsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteAlarmsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteAlarmsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteAlarmsInput"} + if s.AlarmNames == nil { + invalidParams.Add(request.NewErrParamRequired("AlarmNames")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteAlarmsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteAlarmsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteAlarmsOutput) GoString() string { + return s.String() +} + +type DescribeAlarmHistoryInput struct { + _ struct{} `type:"structure"` + + // The name of the alarm. + AlarmName *string `min:"1" type:"string"` + + // The ending date to retrieve alarm history. + EndDate *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The type of alarm histories to retrieve. + HistoryItemType *string `type:"string" enum:"HistoryItemType"` + + // The maximum number of alarm history records to retrieve. + MaxRecords *int64 `min:"1" type:"integer"` + + // The token returned by a previous call to indicate that there is more data + // available. + NextToken *string `type:"string"` + + // The starting date to retrieve alarm history. + StartDate *time.Time `type:"timestamp" timestampFormat:"iso8601"` +} + +// String returns the string representation +func (s DescribeAlarmHistoryInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAlarmHistoryInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeAlarmHistoryInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeAlarmHistoryInput"} + if s.AlarmName != nil && len(*s.AlarmName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AlarmName", 1)) + } + if s.MaxRecords != nil && *s.MaxRecords < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxRecords", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The output for the DescribeAlarmHistory action. +type DescribeAlarmHistoryOutput struct { + _ struct{} `type:"structure"` + + // A list of alarm histories in JSON format. + AlarmHistoryItems []*AlarmHistoryItem `type:"list"` + + // A string that marks the start of the next batch of returned results. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeAlarmHistoryOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAlarmHistoryOutput) GoString() string { + return s.String() +} + +type DescribeAlarmsForMetricInput struct { + _ struct{} `type:"structure"` + + // The list of dimensions associated with the metric. If the metric has any + // associated dimensions, you must specify them in order for the DescribeAlarmsForMetric + // to succeed. + Dimensions []*Dimension `type:"list"` + + // The name of the metric. + MetricName *string `min:"1" type:"string" required:"true"` + + // The namespace of the metric. + Namespace *string `min:"1" type:"string" required:"true"` + + // The period in seconds over which the statistic is applied. + Period *int64 `min:"60" type:"integer"` + + // The statistic for the metric. + Statistic *string `type:"string" enum:"Statistic"` + + // The unit for the metric. + Unit *string `type:"string" enum:"StandardUnit"` +} + +// String returns the string representation +func (s DescribeAlarmsForMetricInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAlarmsForMetricInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeAlarmsForMetricInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeAlarmsForMetricInput"} + if s.MetricName == nil { + invalidParams.Add(request.NewErrParamRequired("MetricName")) + } + if s.MetricName != nil && len(*s.MetricName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("MetricName", 1)) + } + if s.Namespace == nil { + invalidParams.Add(request.NewErrParamRequired("Namespace")) + } + if s.Namespace != nil && len(*s.Namespace) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Namespace", 1)) + } + if s.Period != nil && *s.Period < 60 { + invalidParams.Add(request.NewErrParamMinValue("Period", 60)) + } + if s.Dimensions != nil { + for i, v := range s.Dimensions { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Dimensions", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The output for the DescribeAlarmsForMetric action. +type DescribeAlarmsForMetricOutput struct { + _ struct{} `type:"structure"` + + // A list of information for each alarm with the specified metric. + MetricAlarms []*MetricAlarm `type:"list"` +} + +// String returns the string representation +func (s DescribeAlarmsForMetricOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAlarmsForMetricOutput) GoString() string { + return s.String() +} + +type DescribeAlarmsInput struct { + _ struct{} `type:"structure"` + + // The action name prefix. + ActionPrefix *string `min:"1" type:"string"` + + // The alarm name prefix. AlarmNames cannot be specified if this parameter is + // specified. + AlarmNamePrefix *string `min:"1" type:"string"` + + // A list of alarm names to retrieve information for. + AlarmNames []*string `type:"list"` + + // The maximum number of alarm descriptions to retrieve. + MaxRecords *int64 `min:"1" type:"integer"` + + // The token returned by a previous call to indicate that there is more data + // available. + NextToken *string `type:"string"` + + // The state value to be used in matching alarms. + StateValue *string `type:"string" enum:"StateValue"` +} + +// String returns the string representation +func (s DescribeAlarmsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAlarmsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeAlarmsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeAlarmsInput"} + if s.ActionPrefix != nil && len(*s.ActionPrefix) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ActionPrefix", 1)) + } + if s.AlarmNamePrefix != nil && len(*s.AlarmNamePrefix) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AlarmNamePrefix", 1)) + } + if s.MaxRecords != nil && *s.MaxRecords < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxRecords", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The output for the DescribeAlarms action. +type DescribeAlarmsOutput struct { + _ struct{} `type:"structure"` + + // A list of information for the specified alarms. + MetricAlarms []*MetricAlarm `type:"list"` + + // A string that marks the start of the next batch of returned results. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeAlarmsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAlarmsOutput) GoString() string { + return s.String() +} + +// The Dimension data type further expands on the identity of a metric using +// a Name, Value pair. +// +// For examples that use one or more dimensions, see PutMetricData. +type Dimension struct { + _ struct{} `type:"structure"` + + // The name of the dimension. + Name *string `min:"1" type:"string" required:"true"` + + // The value representing the dimension measurement + Value *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s Dimension) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Dimension) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Dimension) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Dimension"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + if s.Value == nil { + invalidParams.Add(request.NewErrParamRequired("Value")) + } + if s.Value != nil && len(*s.Value) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Value", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The DimensionFilter data type is used to filter ListMetrics results. +type DimensionFilter struct { + _ struct{} `type:"structure"` + + // The dimension name to be matched. + Name *string `min:"1" type:"string" required:"true"` + + // The value of the dimension to be matched. + // + // Specifying a Name without specifying a Value returns all values associated + // with that Name. + Value *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s DimensionFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DimensionFilter) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DimensionFilter) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DimensionFilter"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + if s.Value != nil && len(*s.Value) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Value", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DisableAlarmActionsInput struct { + _ struct{} `type:"structure"` + + // The names of the alarms to disable actions for. + AlarmNames []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s DisableAlarmActionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisableAlarmActionsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DisableAlarmActionsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DisableAlarmActionsInput"} + if s.AlarmNames == nil { + invalidParams.Add(request.NewErrParamRequired("AlarmNames")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DisableAlarmActionsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DisableAlarmActionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisableAlarmActionsOutput) GoString() string { + return s.String() +} + +type EnableAlarmActionsInput struct { + _ struct{} `type:"structure"` + + // The names of the alarms to enable actions for. + AlarmNames []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s EnableAlarmActionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnableAlarmActionsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *EnableAlarmActionsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "EnableAlarmActionsInput"} + if s.AlarmNames == nil { + invalidParams.Add(request.NewErrParamRequired("AlarmNames")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type EnableAlarmActionsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s EnableAlarmActionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnableAlarmActionsOutput) GoString() string { + return s.String() +} + +type GetMetricStatisticsInput struct { + _ struct{} `type:"structure"` + + // A list of dimensions describing qualities of the metric. + Dimensions []*Dimension `type:"list"` + + // The time stamp to use for determining the last datapoint to return. The value + // specified is exclusive; results will include datapoints up to the time stamp + // specified. The time stamp must be in ISO 8601 UTC format (e.g., 2014-09-03T23:00:00Z). + EndTime *time.Time `type:"timestamp" timestampFormat:"iso8601" required:"true"` + + // The name of the metric, with or without spaces. + MetricName *string `min:"1" type:"string" required:"true"` + + // The namespace of the metric, with or without spaces. + Namespace *string `min:"1" type:"string" required:"true"` + + // The granularity, in seconds, of the returned datapoints. Period must be at + // least 60 seconds and must be a multiple of 60. The default value is 60. + Period *int64 `min:"60" type:"integer" required:"true"` + + // The time stamp to use for determining the first datapoint to return. The + // value specified is inclusive; results include datapoints with the time stamp + // specified. The time stamp must be in ISO 8601 UTC format (e.g., 2014-09-03T23:00:00Z). + // + // The specified start time is rounded down to the nearest value. Datapoints + // are returned for start times up to two weeks in the past. Specified start + // times that are more than two weeks in the past will not return datapoints + // for metrics that are older than two weeks. Data that is timestamped 24 hours + // or more in the past may take in excess of 48 hours to become available from + // submission time using GetMetricStatistics. + StartTime *time.Time `type:"timestamp" timestampFormat:"iso8601" required:"true"` + + // The metric statistics to return. For information about specific statistics + // returned by GetMetricStatistics, see Statistics (http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/cloudwatch_concepts.html#Statistic) + // in the Amazon CloudWatch Developer Guide. + Statistics []*string `min:"1" type:"list" required:"true"` + + // The unit for the metric. + Unit *string `type:"string" enum:"StandardUnit"` +} + +// String returns the string representation +func (s GetMetricStatisticsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetMetricStatisticsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetMetricStatisticsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetMetricStatisticsInput"} + if s.EndTime == nil { + invalidParams.Add(request.NewErrParamRequired("EndTime")) + } + if s.MetricName == nil { + invalidParams.Add(request.NewErrParamRequired("MetricName")) + } + if s.MetricName != nil && len(*s.MetricName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("MetricName", 1)) + } + if s.Namespace == nil { + invalidParams.Add(request.NewErrParamRequired("Namespace")) + } + if s.Namespace != nil && len(*s.Namespace) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Namespace", 1)) + } + if s.Period == nil { + invalidParams.Add(request.NewErrParamRequired("Period")) + } + if s.Period != nil && *s.Period < 60 { + invalidParams.Add(request.NewErrParamMinValue("Period", 60)) + } + if s.StartTime == nil { + invalidParams.Add(request.NewErrParamRequired("StartTime")) + } + if s.Statistics == nil { + invalidParams.Add(request.NewErrParamRequired("Statistics")) + } + if s.Statistics != nil && len(s.Statistics) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Statistics", 1)) + } + if s.Dimensions != nil { + for i, v := range s.Dimensions { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Dimensions", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The output for the GetMetricStatistics action. +type GetMetricStatisticsOutput struct { + _ struct{} `type:"structure"` + + // The datapoints for the specified metric. + Datapoints []*Datapoint `type:"list"` + + // A label describing the specified metric. + Label *string `type:"string"` +} + +// String returns the string representation +func (s GetMetricStatisticsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetMetricStatisticsOutput) GoString() string { + return s.String() +} + +type ListMetricsInput struct { + _ struct{} `type:"structure"` + + // A list of dimensions to filter against. + Dimensions []*DimensionFilter `type:"list"` + + // The name of the metric to filter against. + MetricName *string `min:"1" type:"string"` + + // The namespace to filter against. + Namespace *string `min:"1" type:"string"` + + // The token returned by a previous call to indicate that there is more data + // available. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s ListMetricsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListMetricsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListMetricsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListMetricsInput"} + if s.MetricName != nil && len(*s.MetricName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("MetricName", 1)) + } + if s.Namespace != nil && len(*s.Namespace) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Namespace", 1)) + } + if s.Dimensions != nil { + for i, v := range s.Dimensions { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Dimensions", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The output for the ListMetrics action. +type ListMetricsOutput struct { + _ struct{} `type:"structure"` + + // A list of metrics used to generate statistics for an AWS account. + Metrics []*Metric `type:"list"` + + // A string that marks the start of the next batch of returned results. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s ListMetricsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListMetricsOutput) GoString() string { + return s.String() +} + +// The Metric data type contains information about a specific metric. If you +// call ListMetrics, Amazon CloudWatch returns information contained by this +// data type. +// +// The example in the Examples section publishes two metrics named buffers +// and latency. Both metrics are in the examples namespace. Both metrics have +// two dimensions, InstanceID and InstanceType. +type Metric struct { + _ struct{} `type:"structure"` + + // A list of dimensions associated with the metric. + Dimensions []*Dimension `type:"list"` + + // The name of the metric. + MetricName *string `min:"1" type:"string"` + + // The namespace of the metric. + Namespace *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s Metric) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Metric) GoString() string { + return s.String() +} + +// The MetricAlarm data type represents an alarm. You can use PutMetricAlarm +// to create or update an alarm. +type MetricAlarm struct { + _ struct{} `type:"structure"` + + // Indicates whether actions should be executed during any changes to the alarm's + // state. + ActionsEnabled *bool `type:"boolean"` + + // The list of actions to execute when this alarm transitions into an ALARM + // state from any other state. Each action is specified as an Amazon Resource + // Name (ARN). + AlarmActions []*string `type:"list"` + + // The Amazon Resource Name (ARN) of the alarm. + AlarmArn *string `min:"1" type:"string"` + + // The time stamp of the last update to the alarm configuration. + AlarmConfigurationUpdatedTimestamp *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The description for the alarm. + AlarmDescription *string `type:"string"` + + // The name of the alarm. + AlarmName *string `min:"1" type:"string"` + + // The arithmetic operation to use when comparing the specified Statistic and + // Threshold. The specified Statistic value is used as the first operand. + ComparisonOperator *string `type:"string" enum:"ComparisonOperator"` + + // The list of dimensions associated with the alarm's associated metric. + Dimensions []*Dimension `type:"list"` + + // The number of periods over which data is compared to the specified threshold. + EvaluationPeriods *int64 `min:"1" type:"integer"` + + // The list of actions to execute when this alarm transitions into an INSUFFICIENT_DATA + // state from any other state. Each action is specified as an Amazon Resource + // Name (ARN). + // + // The current WSDL lists this attribute as UnknownActions. + InsufficientDataActions []*string `type:"list"` + + // The name of the alarm's metric. + MetricName *string `min:"1" type:"string"` + + // The namespace of alarm's associated metric. + Namespace *string `min:"1" type:"string"` + + // The list of actions to execute when this alarm transitions into an OK state + // from any other state. Each action is specified as an Amazon Resource Name + // (ARN). + OKActions []*string `type:"list"` + + // The period in seconds over which the statistic is applied. + Period *int64 `min:"60" type:"integer"` + + // A human-readable explanation for the alarm's state. + StateReason *string `type:"string"` + + // An explanation for the alarm's state in machine-readable JSON format + StateReasonData *string `type:"string"` + + // The time stamp of the last update to the alarm's state. + StateUpdatedTimestamp *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The state value for the alarm. + StateValue *string `type:"string" enum:"StateValue"` + + // The statistic to apply to the alarm's associated metric. + Statistic *string `type:"string" enum:"Statistic"` + + // The value against which the specified statistic is compared. + Threshold *float64 `type:"double"` + + // The unit of the alarm's associated metric. + Unit *string `type:"string" enum:"StandardUnit"` +} + +// String returns the string representation +func (s MetricAlarm) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MetricAlarm) GoString() string { + return s.String() +} + +// The MetricDatum data type encapsulates the information sent with PutMetricData +// to either create a new metric or add new values to be aggregated into an +// existing metric. +type MetricDatum struct { + _ struct{} `type:"structure"` + + // A list of dimensions associated with the metric. Note, when using the Dimensions + // value in a query, you need to append .member.N to it (e.g., Dimensions.member.N). + Dimensions []*Dimension `type:"list"` + + // The name of the metric. + MetricName *string `min:"1" type:"string" required:"true"` + + // A set of statistical values describing the metric. + StatisticValues *StatisticSet `type:"structure"` + + // The time stamp used for the metric in ISO 8601 Universal Coordinated Time + // (UTC) format. If not specified, the default value is set to the time the + // metric data was received. + Timestamp *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The unit of the metric. + Unit *string `type:"string" enum:"StandardUnit"` + + // The value for the metric. + // + // Although the Value parameter accepts numbers of type Double, Amazon CloudWatch + // rejects values that are either too small or too large. Values must be in + // the range of 8.515920e-109 to 1.174271e+108 (Base 10) or 2e-360 to 2e360 + // (Base 2). In addition, special values (e.g., NaN, +Infinity, -Infinity) are + // not supported. + Value *float64 `type:"double"` +} + +// String returns the string representation +func (s MetricDatum) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MetricDatum) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *MetricDatum) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "MetricDatum"} + if s.MetricName == nil { + invalidParams.Add(request.NewErrParamRequired("MetricName")) + } + if s.MetricName != nil && len(*s.MetricName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("MetricName", 1)) + } + if s.Dimensions != nil { + for i, v := range s.Dimensions { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Dimensions", i), err.(request.ErrInvalidParams)) + } + } + } + if s.StatisticValues != nil { + if err := s.StatisticValues.Validate(); err != nil { + invalidParams.AddNested("StatisticValues", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type PutMetricAlarmInput struct { + _ struct{} `type:"structure"` + + // Indicates whether or not actions should be executed during any changes to + // the alarm's state. + ActionsEnabled *bool `type:"boolean"` + + // The list of actions to execute when this alarm transitions into an ALARM + // state from any other state. Each action is specified as an Amazon Resource + // Name (ARN). + // + // Valid Values: arn:aws:automate:region (e.g., us-east-1):ec2:stop | arn:aws:automate:region + // (e.g., us-east-1):ec2:terminate | arn:aws:automate:region (e.g., us-east-1):ec2:recover + // + // Valid Values (for use with IAM roles): arn:aws:swf:us-east-1:{customer-account}:action/actions/AWS_EC2.InstanceId.Stop/1.0 + // | arn:aws:swf:us-east-1:{customer-account}:action/actions/AWS_EC2.InstanceId.Terminate/1.0 + // | arn:aws:swf:us-east-1:{customer-account}:action/actions/AWS_EC2.InstanceId.Reboot/1.0 + // + // Note: You must create at least one stop, terminate, or reboot alarm using + // the Amazon EC2 or CloudWatch console to create the EC2ActionsAccess IAM role + // for the first time. After this IAM role is created, you can create stop, + // terminate, or reboot alarms using the CLI. + AlarmActions []*string `type:"list"` + + // The description for the alarm. + AlarmDescription *string `type:"string"` + + // The descriptive name for the alarm. This name must be unique within the user's + // AWS account + AlarmName *string `min:"1" type:"string" required:"true"` + + // The arithmetic operation to use when comparing the specified Statistic and + // Threshold. The specified Statistic value is used as the first operand. + ComparisonOperator *string `type:"string" required:"true" enum:"ComparisonOperator"` + + // The dimensions for the alarm's associated metric. + Dimensions []*Dimension `type:"list"` + + // The number of periods over which data is compared to the specified threshold. + EvaluationPeriods *int64 `min:"1" type:"integer" required:"true"` + + // The list of actions to execute when this alarm transitions into an INSUFFICIENT_DATA + // state from any other state. Each action is specified as an Amazon Resource + // Name (ARN). + // + // Valid Values: arn:aws:automate:region (e.g., us-east-1):ec2:stop | arn:aws:automate:region + // (e.g., us-east-1):ec2:terminate | arn:aws:automate:region (e.g., us-east-1):ec2:recover + // + // Valid Values (for use with IAM roles): arn:aws:swf:us-east-1:{customer-account}:action/actions/AWS_EC2.InstanceId.Stop/1.0 + // | arn:aws:swf:us-east-1:{customer-account}:action/actions/AWS_EC2.InstanceId.Terminate/1.0 + // | arn:aws:swf:us-east-1:{customer-account}:action/actions/AWS_EC2.InstanceId.Reboot/1.0 + // + // Note: You must create at least one stop, terminate, or reboot alarm using + // the Amazon EC2 or CloudWatch console to create the EC2ActionsAccess IAM role + // for the first time. After this IAM role is created, you can create stop, + // terminate, or reboot alarms using the CLI. + InsufficientDataActions []*string `type:"list"` + + // The name for the alarm's associated metric. + MetricName *string `min:"1" type:"string" required:"true"` + + // The namespace for the alarm's associated metric. + Namespace *string `min:"1" type:"string" required:"true"` + + // The list of actions to execute when this alarm transitions into an OK state + // from any other state. Each action is specified as an Amazon Resource Name + // (ARN). + // + // Valid Values: arn:aws:automate:region (e.g., us-east-1):ec2:stop | arn:aws:automate:region + // (e.g., us-east-1):ec2:terminate | arn:aws:automate:region (e.g., us-east-1):ec2:recover + // + // Valid Values (for use with IAM roles): arn:aws:swf:us-east-1:{customer-account}:action/actions/AWS_EC2.InstanceId.Stop/1.0 + // | arn:aws:swf:us-east-1:{customer-account}:action/actions/AWS_EC2.InstanceId.Terminate/1.0 + // | arn:aws:swf:us-east-1:{customer-account}:action/actions/AWS_EC2.InstanceId.Reboot/1.0 + // + // Note: You must create at least one stop, terminate, or reboot alarm using + // the Amazon EC2 or CloudWatch console to create the EC2ActionsAccess IAM role + // for the first time. After this IAM role is created, you can create stop, + // terminate, or reboot alarms using the CLI. + OKActions []*string `type:"list"` + + // The period in seconds over which the specified statistic is applied. + Period *int64 `min:"60" type:"integer" required:"true"` + + // The statistic to apply to the alarm's associated metric. + Statistic *string `type:"string" required:"true" enum:"Statistic"` + + // The value against which the specified statistic is compared. + Threshold *float64 `type:"double" required:"true"` + + // The statistic's unit of measure. For example, the units for the Amazon EC2 + // NetworkIn metric are Bytes because NetworkIn tracks the number of bytes that + // an instance receives on all network interfaces. You can also specify a unit + // when you create a custom metric. Units help provide conceptual meaning to + // your data. Metric data points that specify a unit of measure, such as Percent, + // are aggregated separately. + // + // Note: If you specify a unit, you must use a unit that is appropriate for + // the metric. Otherwise, this can cause an Amazon CloudWatch alarm to get stuck + // in the INSUFFICIENT DATA state. + Unit *string `type:"string" enum:"StandardUnit"` +} + +// String returns the string representation +func (s PutMetricAlarmInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutMetricAlarmInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutMetricAlarmInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutMetricAlarmInput"} + if s.AlarmName == nil { + invalidParams.Add(request.NewErrParamRequired("AlarmName")) + } + if s.AlarmName != nil && len(*s.AlarmName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AlarmName", 1)) + } + if s.ComparisonOperator == nil { + invalidParams.Add(request.NewErrParamRequired("ComparisonOperator")) + } + if s.EvaluationPeriods == nil { + invalidParams.Add(request.NewErrParamRequired("EvaluationPeriods")) + } + if s.EvaluationPeriods != nil && *s.EvaluationPeriods < 1 { + invalidParams.Add(request.NewErrParamMinValue("EvaluationPeriods", 1)) + } + if s.MetricName == nil { + invalidParams.Add(request.NewErrParamRequired("MetricName")) + } + if s.MetricName != nil && len(*s.MetricName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("MetricName", 1)) + } + if s.Namespace == nil { + invalidParams.Add(request.NewErrParamRequired("Namespace")) + } + if s.Namespace != nil && len(*s.Namespace) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Namespace", 1)) + } + if s.Period == nil { + invalidParams.Add(request.NewErrParamRequired("Period")) + } + if s.Period != nil && *s.Period < 60 { + invalidParams.Add(request.NewErrParamMinValue("Period", 60)) + } + if s.Statistic == nil { + invalidParams.Add(request.NewErrParamRequired("Statistic")) + } + if s.Threshold == nil { + invalidParams.Add(request.NewErrParamRequired("Threshold")) + } + if s.Dimensions != nil { + for i, v := range s.Dimensions { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Dimensions", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type PutMetricAlarmOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutMetricAlarmOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutMetricAlarmOutput) GoString() string { + return s.String() +} + +type PutMetricDataInput struct { + _ struct{} `type:"structure"` + + // A list of data describing the metric. + MetricData []*MetricDatum `type:"list" required:"true"` + + // The namespace for the metric data. + // + // You cannot specify a namespace that begins with "AWS/". Namespaces that + // begin with "AWS/" are reserved for other Amazon Web Services products that + // send metrics to Amazon CloudWatch. + Namespace *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s PutMetricDataInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutMetricDataInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutMetricDataInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutMetricDataInput"} + if s.MetricData == nil { + invalidParams.Add(request.NewErrParamRequired("MetricData")) + } + if s.Namespace == nil { + invalidParams.Add(request.NewErrParamRequired("Namespace")) + } + if s.Namespace != nil && len(*s.Namespace) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Namespace", 1)) + } + if s.MetricData != nil { + for i, v := range s.MetricData { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "MetricData", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type PutMetricDataOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutMetricDataOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutMetricDataOutput) GoString() string { + return s.String() +} + +type SetAlarmStateInput struct { + _ struct{} `type:"structure"` + + // The descriptive name for the alarm. This name must be unique within the user's + // AWS account. The maximum length is 255 characters. + AlarmName *string `min:"1" type:"string" required:"true"` + + // The reason that this alarm is set to this specific state (in human-readable + // text format) + StateReason *string `type:"string" required:"true"` + + // The reason that this alarm is set to this specific state (in machine-readable + // JSON format) + StateReasonData *string `type:"string"` + + // The value of the state. + StateValue *string `type:"string" required:"true" enum:"StateValue"` +} + +// String returns the string representation +func (s SetAlarmStateInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetAlarmStateInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SetAlarmStateInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SetAlarmStateInput"} + if s.AlarmName == nil { + invalidParams.Add(request.NewErrParamRequired("AlarmName")) + } + if s.AlarmName != nil && len(*s.AlarmName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AlarmName", 1)) + } + if s.StateReason == nil { + invalidParams.Add(request.NewErrParamRequired("StateReason")) + } + if s.StateValue == nil { + invalidParams.Add(request.NewErrParamRequired("StateValue")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type SetAlarmStateOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s SetAlarmStateOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetAlarmStateOutput) GoString() string { + return s.String() +} + +// The StatisticSet data type describes the StatisticValues component of MetricDatum, +// and represents a set of statistics that describes a specific metric. +type StatisticSet struct { + _ struct{} `type:"structure"` + + // The maximum value of the sample set. + Maximum *float64 `type:"double" required:"true"` + + // The minimum value of the sample set. + Minimum *float64 `type:"double" required:"true"` + + // The number of samples used for the statistic set. + SampleCount *float64 `type:"double" required:"true"` + + // The sum of values for the sample set. + Sum *float64 `type:"double" required:"true"` +} + +// String returns the string representation +func (s StatisticSet) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StatisticSet) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *StatisticSet) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StatisticSet"} + if s.Maximum == nil { + invalidParams.Add(request.NewErrParamRequired("Maximum")) + } + if s.Minimum == nil { + invalidParams.Add(request.NewErrParamRequired("Minimum")) + } + if s.SampleCount == nil { + invalidParams.Add(request.NewErrParamRequired("SampleCount")) + } + if s.Sum == nil { + invalidParams.Add(request.NewErrParamRequired("Sum")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +const ( + // @enum ComparisonOperator + ComparisonOperatorGreaterThanOrEqualToThreshold = "GreaterThanOrEqualToThreshold" + // @enum ComparisonOperator + ComparisonOperatorGreaterThanThreshold = "GreaterThanThreshold" + // @enum ComparisonOperator + ComparisonOperatorLessThanThreshold = "LessThanThreshold" + // @enum ComparisonOperator + ComparisonOperatorLessThanOrEqualToThreshold = "LessThanOrEqualToThreshold" +) + +const ( + // @enum HistoryItemType + HistoryItemTypeConfigurationUpdate = "ConfigurationUpdate" + // @enum HistoryItemType + HistoryItemTypeStateUpdate = "StateUpdate" + // @enum HistoryItemType + HistoryItemTypeAction = "Action" +) + +const ( + // @enum StandardUnit + StandardUnitSeconds = "Seconds" + // @enum StandardUnit + StandardUnitMicroseconds = "Microseconds" + // @enum StandardUnit + StandardUnitMilliseconds = "Milliseconds" + // @enum StandardUnit + StandardUnitBytes = "Bytes" + // @enum StandardUnit + StandardUnitKilobytes = "Kilobytes" + // @enum StandardUnit + StandardUnitMegabytes = "Megabytes" + // @enum StandardUnit + StandardUnitGigabytes = "Gigabytes" + // @enum StandardUnit + StandardUnitTerabytes = "Terabytes" + // @enum StandardUnit + StandardUnitBits = "Bits" + // @enum StandardUnit + StandardUnitKilobits = "Kilobits" + // @enum StandardUnit + StandardUnitMegabits = "Megabits" + // @enum StandardUnit + StandardUnitGigabits = "Gigabits" + // @enum StandardUnit + StandardUnitTerabits = "Terabits" + // @enum StandardUnit + StandardUnitPercent = "Percent" + // @enum StandardUnit + StandardUnitCount = "Count" + // @enum StandardUnit + StandardUnitBytesSecond = "Bytes/Second" + // @enum StandardUnit + StandardUnitKilobytesSecond = "Kilobytes/Second" + // @enum StandardUnit + StandardUnitMegabytesSecond = "Megabytes/Second" + // @enum StandardUnit + StandardUnitGigabytesSecond = "Gigabytes/Second" + // @enum StandardUnit + StandardUnitTerabytesSecond = "Terabytes/Second" + // @enum StandardUnit + StandardUnitBitsSecond = "Bits/Second" + // @enum StandardUnit + StandardUnitKilobitsSecond = "Kilobits/Second" + // @enum StandardUnit + StandardUnitMegabitsSecond = "Megabits/Second" + // @enum StandardUnit + StandardUnitGigabitsSecond = "Gigabits/Second" + // @enum StandardUnit + StandardUnitTerabitsSecond = "Terabits/Second" + // @enum StandardUnit + StandardUnitCountSecond = "Count/Second" + // @enum StandardUnit + StandardUnitNone = "None" +) + +const ( + // @enum StateValue + StateValueOk = "OK" + // @enum StateValue + StateValueAlarm = "ALARM" + // @enum StateValue + StateValueInsufficientData = "INSUFFICIENT_DATA" +) + +const ( + // @enum Statistic + StatisticSampleCount = "SampleCount" + // @enum Statistic + StatisticAverage = "Average" + // @enum Statistic + StatisticSum = "Sum" + // @enum Statistic + StatisticMinimum = "Minimum" + // @enum Statistic + StatisticMaximum = "Maximum" +) diff --git a/vendor/github.com/aws/aws-sdk-go/service/cloudwatch/cloudwatchiface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/cloudwatch/cloudwatchiface/interface.go new file mode 100644 index 000000000..140dc9e98 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/cloudwatch/cloudwatchiface/interface.go @@ -0,0 +1,64 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package cloudwatchiface provides an interface for the Amazon CloudWatch. +package cloudwatchiface + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/cloudwatch" +) + +// CloudWatchAPI is the interface type for cloudwatch.CloudWatch. +type CloudWatchAPI interface { + DeleteAlarmsRequest(*cloudwatch.DeleteAlarmsInput) (*request.Request, *cloudwatch.DeleteAlarmsOutput) + + DeleteAlarms(*cloudwatch.DeleteAlarmsInput) (*cloudwatch.DeleteAlarmsOutput, error) + + DescribeAlarmHistoryRequest(*cloudwatch.DescribeAlarmHistoryInput) (*request.Request, *cloudwatch.DescribeAlarmHistoryOutput) + + DescribeAlarmHistory(*cloudwatch.DescribeAlarmHistoryInput) (*cloudwatch.DescribeAlarmHistoryOutput, error) + + DescribeAlarmHistoryPages(*cloudwatch.DescribeAlarmHistoryInput, func(*cloudwatch.DescribeAlarmHistoryOutput, bool) bool) error + + DescribeAlarmsRequest(*cloudwatch.DescribeAlarmsInput) (*request.Request, *cloudwatch.DescribeAlarmsOutput) + + DescribeAlarms(*cloudwatch.DescribeAlarmsInput) (*cloudwatch.DescribeAlarmsOutput, error) + + DescribeAlarmsPages(*cloudwatch.DescribeAlarmsInput, func(*cloudwatch.DescribeAlarmsOutput, bool) bool) error + + DescribeAlarmsForMetricRequest(*cloudwatch.DescribeAlarmsForMetricInput) (*request.Request, *cloudwatch.DescribeAlarmsForMetricOutput) + + DescribeAlarmsForMetric(*cloudwatch.DescribeAlarmsForMetricInput) (*cloudwatch.DescribeAlarmsForMetricOutput, error) + + DisableAlarmActionsRequest(*cloudwatch.DisableAlarmActionsInput) (*request.Request, *cloudwatch.DisableAlarmActionsOutput) + + DisableAlarmActions(*cloudwatch.DisableAlarmActionsInput) (*cloudwatch.DisableAlarmActionsOutput, error) + + EnableAlarmActionsRequest(*cloudwatch.EnableAlarmActionsInput) (*request.Request, *cloudwatch.EnableAlarmActionsOutput) + + EnableAlarmActions(*cloudwatch.EnableAlarmActionsInput) (*cloudwatch.EnableAlarmActionsOutput, error) + + GetMetricStatisticsRequest(*cloudwatch.GetMetricStatisticsInput) (*request.Request, *cloudwatch.GetMetricStatisticsOutput) + + GetMetricStatistics(*cloudwatch.GetMetricStatisticsInput) (*cloudwatch.GetMetricStatisticsOutput, error) + + ListMetricsRequest(*cloudwatch.ListMetricsInput) (*request.Request, *cloudwatch.ListMetricsOutput) + + ListMetrics(*cloudwatch.ListMetricsInput) (*cloudwatch.ListMetricsOutput, error) + + ListMetricsPages(*cloudwatch.ListMetricsInput, func(*cloudwatch.ListMetricsOutput, bool) bool) error + + PutMetricAlarmRequest(*cloudwatch.PutMetricAlarmInput) (*request.Request, *cloudwatch.PutMetricAlarmOutput) + + PutMetricAlarm(*cloudwatch.PutMetricAlarmInput) (*cloudwatch.PutMetricAlarmOutput, error) + + PutMetricDataRequest(*cloudwatch.PutMetricDataInput) (*request.Request, *cloudwatch.PutMetricDataOutput) + + PutMetricData(*cloudwatch.PutMetricDataInput) (*cloudwatch.PutMetricDataOutput, error) + + SetAlarmStateRequest(*cloudwatch.SetAlarmStateInput) (*request.Request, *cloudwatch.SetAlarmStateOutput) + + SetAlarmState(*cloudwatch.SetAlarmStateInput) (*cloudwatch.SetAlarmStateOutput, error) +} + +var _ CloudWatchAPI = (*cloudwatch.CloudWatch)(nil) diff --git a/vendor/github.com/aws/aws-sdk-go/service/cloudwatch/examples_test.go b/vendor/github.com/aws/aws-sdk-go/service/cloudwatch/examples_test.go new file mode 100644 index 000000000..07b010852 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/cloudwatch/examples_test.go @@ -0,0 +1,337 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package cloudwatch_test + +import ( + "bytes" + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/cloudwatch" +) + +var _ time.Duration +var _ bytes.Buffer + +func ExampleCloudWatch_DeleteAlarms() { + svc := cloudwatch.New(session.New()) + + params := &cloudwatch.DeleteAlarmsInput{ + AlarmNames: []*string{ // Required + aws.String("AlarmName"), // Required + // More values... + }, + } + resp, err := svc.DeleteAlarms(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudWatch_DescribeAlarmHistory() { + svc := cloudwatch.New(session.New()) + + params := &cloudwatch.DescribeAlarmHistoryInput{ + AlarmName: aws.String("AlarmName"), + EndDate: aws.Time(time.Now()), + HistoryItemType: aws.String("HistoryItemType"), + MaxRecords: aws.Int64(1), + NextToken: aws.String("NextToken"), + StartDate: aws.Time(time.Now()), + } + resp, err := svc.DescribeAlarmHistory(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudWatch_DescribeAlarms() { + svc := cloudwatch.New(session.New()) + + params := &cloudwatch.DescribeAlarmsInput{ + ActionPrefix: aws.String("ActionPrefix"), + AlarmNamePrefix: aws.String("AlarmNamePrefix"), + AlarmNames: []*string{ + aws.String("AlarmName"), // Required + // More values... + }, + MaxRecords: aws.Int64(1), + NextToken: aws.String("NextToken"), + StateValue: aws.String("StateValue"), + } + resp, err := svc.DescribeAlarms(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudWatch_DescribeAlarmsForMetric() { + svc := cloudwatch.New(session.New()) + + params := &cloudwatch.DescribeAlarmsForMetricInput{ + MetricName: aws.String("MetricName"), // Required + Namespace: aws.String("Namespace"), // Required + Dimensions: []*cloudwatch.Dimension{ + { // Required + Name: aws.String("DimensionName"), // Required + Value: aws.String("DimensionValue"), // Required + }, + // More values... + }, + Period: aws.Int64(1), + Statistic: aws.String("Statistic"), + Unit: aws.String("StandardUnit"), + } + resp, err := svc.DescribeAlarmsForMetric(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudWatch_DisableAlarmActions() { + svc := cloudwatch.New(session.New()) + + params := &cloudwatch.DisableAlarmActionsInput{ + AlarmNames: []*string{ // Required + aws.String("AlarmName"), // Required + // More values... + }, + } + resp, err := svc.DisableAlarmActions(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudWatch_EnableAlarmActions() { + svc := cloudwatch.New(session.New()) + + params := &cloudwatch.EnableAlarmActionsInput{ + AlarmNames: []*string{ // Required + aws.String("AlarmName"), // Required + // More values... + }, + } + resp, err := svc.EnableAlarmActions(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudWatch_GetMetricStatistics() { + svc := cloudwatch.New(session.New()) + + params := &cloudwatch.GetMetricStatisticsInput{ + EndTime: aws.Time(time.Now()), // Required + MetricName: aws.String("MetricName"), // Required + Namespace: aws.String("Namespace"), // Required + Period: aws.Int64(1), // Required + StartTime: aws.Time(time.Now()), // Required + Statistics: []*string{ // Required + aws.String("Statistic"), // Required + // More values... + }, + Dimensions: []*cloudwatch.Dimension{ + { // Required + Name: aws.String("DimensionName"), // Required + Value: aws.String("DimensionValue"), // Required + }, + // More values... + }, + Unit: aws.String("StandardUnit"), + } + resp, err := svc.GetMetricStatistics(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudWatch_ListMetrics() { + svc := cloudwatch.New(session.New()) + + params := &cloudwatch.ListMetricsInput{ + Dimensions: []*cloudwatch.DimensionFilter{ + { // Required + Name: aws.String("DimensionName"), // Required + Value: aws.String("DimensionValue"), + }, + // More values... + }, + MetricName: aws.String("MetricName"), + Namespace: aws.String("Namespace"), + NextToken: aws.String("NextToken"), + } + resp, err := svc.ListMetrics(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudWatch_PutMetricAlarm() { + svc := cloudwatch.New(session.New()) + + params := &cloudwatch.PutMetricAlarmInput{ + AlarmName: aws.String("AlarmName"), // Required + ComparisonOperator: aws.String("ComparisonOperator"), // Required + EvaluationPeriods: aws.Int64(1), // Required + MetricName: aws.String("MetricName"), // Required + Namespace: aws.String("Namespace"), // Required + Period: aws.Int64(1), // Required + Statistic: aws.String("Statistic"), // Required + Threshold: aws.Float64(1.0), // Required + ActionsEnabled: aws.Bool(true), + AlarmActions: []*string{ + aws.String("ResourceName"), // Required + // More values... + }, + AlarmDescription: aws.String("AlarmDescription"), + Dimensions: []*cloudwatch.Dimension{ + { // Required + Name: aws.String("DimensionName"), // Required + Value: aws.String("DimensionValue"), // Required + }, + // More values... + }, + InsufficientDataActions: []*string{ + aws.String("ResourceName"), // Required + // More values... + }, + OKActions: []*string{ + aws.String("ResourceName"), // Required + // More values... + }, + Unit: aws.String("StandardUnit"), + } + resp, err := svc.PutMetricAlarm(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudWatch_PutMetricData() { + svc := cloudwatch.New(session.New()) + + params := &cloudwatch.PutMetricDataInput{ + MetricData: []*cloudwatch.MetricDatum{ // Required + { // Required + MetricName: aws.String("MetricName"), // Required + Dimensions: []*cloudwatch.Dimension{ + { // Required + Name: aws.String("DimensionName"), // Required + Value: aws.String("DimensionValue"), // Required + }, + // More values... + }, + StatisticValues: &cloudwatch.StatisticSet{ + Maximum: aws.Float64(1.0), // Required + Minimum: aws.Float64(1.0), // Required + SampleCount: aws.Float64(1.0), // Required + Sum: aws.Float64(1.0), // Required + }, + Timestamp: aws.Time(time.Now()), + Unit: aws.String("StandardUnit"), + Value: aws.Float64(1.0), + }, + // More values... + }, + Namespace: aws.String("Namespace"), // Required + } + resp, err := svc.PutMetricData(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudWatch_SetAlarmState() { + svc := cloudwatch.New(session.New()) + + params := &cloudwatch.SetAlarmStateInput{ + AlarmName: aws.String("AlarmName"), // Required + StateReason: aws.String("StateReason"), // Required + StateValue: aws.String("StateValue"), // Required + StateReasonData: aws.String("StateReasonData"), + } + resp, err := svc.SetAlarmState(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/cloudwatch/service.go b/vendor/github.com/aws/aws-sdk-go/service/cloudwatch/service.go new file mode 100644 index 000000000..8b707e5f1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/cloudwatch/service.go @@ -0,0 +1,100 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package cloudwatch + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/private/protocol/query" +) + +// Amazon CloudWatch monitors your Amazon Web Services (AWS) resources and the +// applications you run on AWS in real-time. You can use CloudWatch to collect +// and track metrics, which are the variables you want to measure for your resources +// and applications. +// +// CloudWatch alarms send notifications or automatically make changes to the +// resources you are monitoring based on rules that you define. For example, +// you can monitor the CPU usage and disk reads and writes of your Amazon Elastic +// Compute Cloud (Amazon EC2) instances and then use this data to determine +// whether you should launch additional instances to handle increased load. +// You can also use this data to stop under-used instances to save money. +// +// In addition to monitoring the built-in metrics that come with AWS, you can +// monitor your own custom metrics. With CloudWatch, you gain system-wide visibility +// into resource utilization, application performance, and operational health. +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type CloudWatch struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "monitoring" + +// New creates a new instance of the CloudWatch client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a CloudWatch client from just a session. +// svc := cloudwatch.New(mySession) +// +// // Create a CloudWatch client with additional configuration +// svc := cloudwatch.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *CloudWatch { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *CloudWatch { + svc := &CloudWatch{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2010-08-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(query.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(query.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(query.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(query.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a CloudWatch operation and runs any +// custom request initialization. +func (c *CloudWatch) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/cloudwatch/waiters.go b/vendor/github.com/aws/aws-sdk-go/service/cloudwatch/waiters.go new file mode 100644 index 000000000..c1ca3f334 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/cloudwatch/waiters.go @@ -0,0 +1,30 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package cloudwatch + +import ( + "github.com/aws/aws-sdk-go/private/waiter" +) + +func (c *CloudWatch) WaitUntilAlarmExists(input *DescribeAlarmsInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeAlarms", + Delay: 5, + MaxAttempts: 40, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "path", + Argument: "length(MetricAlarms[]) > `0`", + Expected: true, + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/cloudwatchevents/api.go b/vendor/github.com/aws/aws-sdk-go/service/cloudwatchevents/api.go new file mode 100644 index 000000000..2ae268d88 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/cloudwatchevents/api.go @@ -0,0 +1,1622 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package cloudwatchevents provides a client for Amazon CloudWatch Events. +package cloudwatchevents + +import ( + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" +) + +const opDeleteRule = "DeleteRule" + +// DeleteRuleRequest generates a "aws/request.Request" representing the +// client's request for the DeleteRule operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteRule method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteRuleRequest method. +// req, resp := client.DeleteRuleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudWatchEvents) DeleteRuleRequest(input *DeleteRuleInput) (req *request.Request, output *DeleteRuleOutput) { + op := &request.Operation{ + Name: opDeleteRule, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteRuleInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteRuleOutput{} + req.Data = output + return +} + +// Deletes a rule. You must remove all targets from a rule using RemoveTargets +// before you can delete the rule. +// +// Note: When you delete a rule, incoming events might still continue to match +// to the deleted rule. Please allow a short period of time for changes to take +// effect. +func (c *CloudWatchEvents) DeleteRule(input *DeleteRuleInput) (*DeleteRuleOutput, error) { + req, out := c.DeleteRuleRequest(input) + err := req.Send() + return out, err +} + +const opDescribeRule = "DescribeRule" + +// DescribeRuleRequest generates a "aws/request.Request" representing the +// client's request for the DescribeRule operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeRule method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeRuleRequest method. +// req, resp := client.DescribeRuleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudWatchEvents) DescribeRuleRequest(input *DescribeRuleInput) (req *request.Request, output *DescribeRuleOutput) { + op := &request.Operation{ + Name: opDescribeRule, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeRuleInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeRuleOutput{} + req.Data = output + return +} + +// Describes the details of the specified rule. +func (c *CloudWatchEvents) DescribeRule(input *DescribeRuleInput) (*DescribeRuleOutput, error) { + req, out := c.DescribeRuleRequest(input) + err := req.Send() + return out, err +} + +const opDisableRule = "DisableRule" + +// DisableRuleRequest generates a "aws/request.Request" representing the +// client's request for the DisableRule operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DisableRule method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DisableRuleRequest method. +// req, resp := client.DisableRuleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudWatchEvents) DisableRuleRequest(input *DisableRuleInput) (req *request.Request, output *DisableRuleOutput) { + op := &request.Operation{ + Name: opDisableRule, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DisableRuleInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DisableRuleOutput{} + req.Data = output + return +} + +// Disables a rule. A disabled rule won't match any events, and won't self-trigger +// if it has a schedule expression. +// +// Note: When you disable a rule, incoming events might still continue to +// match to the disabled rule. Please allow a short period of time for changes +// to take effect. +func (c *CloudWatchEvents) DisableRule(input *DisableRuleInput) (*DisableRuleOutput, error) { + req, out := c.DisableRuleRequest(input) + err := req.Send() + return out, err +} + +const opEnableRule = "EnableRule" + +// EnableRuleRequest generates a "aws/request.Request" representing the +// client's request for the EnableRule operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the EnableRule method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the EnableRuleRequest method. +// req, resp := client.EnableRuleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudWatchEvents) EnableRuleRequest(input *EnableRuleInput) (req *request.Request, output *EnableRuleOutput) { + op := &request.Operation{ + Name: opEnableRule, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &EnableRuleInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &EnableRuleOutput{} + req.Data = output + return +} + +// Enables a rule. If the rule does not exist, the operation fails. +// +// Note: When you enable a rule, incoming events might not immediately start +// matching to a newly enabled rule. Please allow a short period of time for +// changes to take effect. +func (c *CloudWatchEvents) EnableRule(input *EnableRuleInput) (*EnableRuleOutput, error) { + req, out := c.EnableRuleRequest(input) + err := req.Send() + return out, err +} + +const opListRuleNamesByTarget = "ListRuleNamesByTarget" + +// ListRuleNamesByTargetRequest generates a "aws/request.Request" representing the +// client's request for the ListRuleNamesByTarget operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListRuleNamesByTarget method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListRuleNamesByTargetRequest method. +// req, resp := client.ListRuleNamesByTargetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudWatchEvents) ListRuleNamesByTargetRequest(input *ListRuleNamesByTargetInput) (req *request.Request, output *ListRuleNamesByTargetOutput) { + op := &request.Operation{ + Name: opListRuleNamesByTarget, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListRuleNamesByTargetInput{} + } + + req = c.newRequest(op, input, output) + output = &ListRuleNamesByTargetOutput{} + req.Data = output + return +} + +// Lists the names of the rules that the given target is put to. You can see +// which of the rules in Amazon CloudWatch Events can invoke a specific target +// in your account. If you have more rules in your account than the given limit, +// the results will be paginated. In that case, use the next token returned +// in the response and repeat ListRulesByTarget until the NextToken in the response +// is returned as null. +func (c *CloudWatchEvents) ListRuleNamesByTarget(input *ListRuleNamesByTargetInput) (*ListRuleNamesByTargetOutput, error) { + req, out := c.ListRuleNamesByTargetRequest(input) + err := req.Send() + return out, err +} + +const opListRules = "ListRules" + +// ListRulesRequest generates a "aws/request.Request" representing the +// client's request for the ListRules operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListRules method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListRulesRequest method. +// req, resp := client.ListRulesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudWatchEvents) ListRulesRequest(input *ListRulesInput) (req *request.Request, output *ListRulesOutput) { + op := &request.Operation{ + Name: opListRules, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListRulesInput{} + } + + req = c.newRequest(op, input, output) + output = &ListRulesOutput{} + req.Data = output + return +} + +// Lists the Amazon CloudWatch Events rules in your account. You can either +// list all the rules or you can provide a prefix to match to the rule names. +// If you have more rules in your account than the given limit, the results +// will be paginated. In that case, use the next token returned in the response +// and repeat ListRules until the NextToken in the response is returned as null. +func (c *CloudWatchEvents) ListRules(input *ListRulesInput) (*ListRulesOutput, error) { + req, out := c.ListRulesRequest(input) + err := req.Send() + return out, err +} + +const opListTargetsByRule = "ListTargetsByRule" + +// ListTargetsByRuleRequest generates a "aws/request.Request" representing the +// client's request for the ListTargetsByRule operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListTargetsByRule method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListTargetsByRuleRequest method. +// req, resp := client.ListTargetsByRuleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudWatchEvents) ListTargetsByRuleRequest(input *ListTargetsByRuleInput) (req *request.Request, output *ListTargetsByRuleOutput) { + op := &request.Operation{ + Name: opListTargetsByRule, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListTargetsByRuleInput{} + } + + req = c.newRequest(op, input, output) + output = &ListTargetsByRuleOutput{} + req.Data = output + return +} + +// Lists of targets assigned to the rule. +func (c *CloudWatchEvents) ListTargetsByRule(input *ListTargetsByRuleInput) (*ListTargetsByRuleOutput, error) { + req, out := c.ListTargetsByRuleRequest(input) + err := req.Send() + return out, err +} + +const opPutEvents = "PutEvents" + +// PutEventsRequest generates a "aws/request.Request" representing the +// client's request for the PutEvents operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutEvents method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutEventsRequest method. +// req, resp := client.PutEventsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudWatchEvents) PutEventsRequest(input *PutEventsInput) (req *request.Request, output *PutEventsOutput) { + op := &request.Operation{ + Name: opPutEvents, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutEventsInput{} + } + + req = c.newRequest(op, input, output) + output = &PutEventsOutput{} + req.Data = output + return +} + +// Sends custom events to Amazon CloudWatch Events so that they can be matched +// to rules. +func (c *CloudWatchEvents) PutEvents(input *PutEventsInput) (*PutEventsOutput, error) { + req, out := c.PutEventsRequest(input) + err := req.Send() + return out, err +} + +const opPutRule = "PutRule" + +// PutRuleRequest generates a "aws/request.Request" representing the +// client's request for the PutRule operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutRule method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutRuleRequest method. +// req, resp := client.PutRuleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudWatchEvents) PutRuleRequest(input *PutRuleInput) (req *request.Request, output *PutRuleOutput) { + op := &request.Operation{ + Name: opPutRule, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutRuleInput{} + } + + req = c.newRequest(op, input, output) + output = &PutRuleOutput{} + req.Data = output + return +} + +// Creates or updates a rule. Rules are enabled by default, or based on value +// of the State parameter. You can disable a rule using DisableRule. +// +// Note: When you create or update a rule, incoming events might not immediately +// start matching to new or updated rules. Please allow a short period of time +// for changes to take effect. +// +// A rule must contain at least an EventPattern or ScheduleExpression. Rules +// with EventPatterns are triggered when a matching event is observed. Rules +// with ScheduleExpressions self-trigger based on the given schedule. A rule +// can have both an EventPattern and a ScheduleExpression, in which case the +// rule will trigger on matching events as well as on a schedule. +// +// Note: Most services in AWS treat : or / as the same character in Amazon +// Resource Names (ARNs). However, CloudWatch Events uses an exact match in +// event patterns and rules. Be sure to use the correct ARN characters when +// creating event patterns so that they match the ARN syntax in the event you +// want to match. +func (c *CloudWatchEvents) PutRule(input *PutRuleInput) (*PutRuleOutput, error) { + req, out := c.PutRuleRequest(input) + err := req.Send() + return out, err +} + +const opPutTargets = "PutTargets" + +// PutTargetsRequest generates a "aws/request.Request" representing the +// client's request for the PutTargets operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutTargets method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutTargetsRequest method. +// req, resp := client.PutTargetsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudWatchEvents) PutTargetsRequest(input *PutTargetsInput) (req *request.Request, output *PutTargetsOutput) { + op := &request.Operation{ + Name: opPutTargets, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutTargetsInput{} + } + + req = c.newRequest(op, input, output) + output = &PutTargetsOutput{} + req.Data = output + return +} + +// Adds target(s) to a rule. Targets are the resources that can be invoked when +// a rule is triggered. For example, AWS Lambda functions, Amazon Kinesis streams, +// and built-in targets. Updates the target(s) if they are already associated +// with the role. In other words, if there is already a target with the given +// target ID, then the target associated with that ID is updated. +// +// In order to be able to make API calls against the resources you own, Amazon +// CloudWatch Events needs the appropriate permissions. For AWS Lambda and Amazon +// SNS resources, CloudWatch Events relies on resource-based policies. For Amazon +// Kinesis streams, CloudWatch Events relies on IAM roles. For more information, +// see Permissions for Sending Events to Targets (http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/EventsTargetPermissions.html) +// in the Amazon CloudWatch Developer Guide. +// +// Input and InputPath are mutually-exclusive and optional parameters of a +// target. When a rule is triggered due to a matched event, if for a target: +// +// Neither Input nor InputPath is specified, then the entire event is passed +// to the target in JSON form. InputPath is specified in the form of JSONPath +// (e.g. $.detail), then only the part of the event specified in the path is +// passed to the target (e.g. only the detail part of the event is passed). +// Input is specified in the form of a valid JSON, then the matched event +// is overridden with this constant. Note: When you add targets to a rule, +// when the associated rule triggers, new or updated targets might not be immediately +// invoked. Please allow a short period of time for changes to take effect. +func (c *CloudWatchEvents) PutTargets(input *PutTargetsInput) (*PutTargetsOutput, error) { + req, out := c.PutTargetsRequest(input) + err := req.Send() + return out, err +} + +const opRemoveTargets = "RemoveTargets" + +// RemoveTargetsRequest generates a "aws/request.Request" representing the +// client's request for the RemoveTargets operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the RemoveTargets method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the RemoveTargetsRequest method. +// req, resp := client.RemoveTargetsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudWatchEvents) RemoveTargetsRequest(input *RemoveTargetsInput) (req *request.Request, output *RemoveTargetsOutput) { + op := &request.Operation{ + Name: opRemoveTargets, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RemoveTargetsInput{} + } + + req = c.newRequest(op, input, output) + output = &RemoveTargetsOutput{} + req.Data = output + return +} + +// Removes target(s) from a rule so that when the rule is triggered, those targets +// will no longer be invoked. +// +// Note: When you remove a target, when the associated rule triggers, removed +// targets might still continue to be invoked. Please allow a short period of +// time for changes to take effect. +func (c *CloudWatchEvents) RemoveTargets(input *RemoveTargetsInput) (*RemoveTargetsOutput, error) { + req, out := c.RemoveTargetsRequest(input) + err := req.Send() + return out, err +} + +const opTestEventPattern = "TestEventPattern" + +// TestEventPatternRequest generates a "aws/request.Request" representing the +// client's request for the TestEventPattern operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the TestEventPattern method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the TestEventPatternRequest method. +// req, resp := client.TestEventPatternRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudWatchEvents) TestEventPatternRequest(input *TestEventPatternInput) (req *request.Request, output *TestEventPatternOutput) { + op := &request.Operation{ + Name: opTestEventPattern, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &TestEventPatternInput{} + } + + req = c.newRequest(op, input, output) + output = &TestEventPatternOutput{} + req.Data = output + return +} + +// Tests whether an event pattern matches the provided event. +// +// Note: Most services in AWS treat : or / as the same character in Amazon +// Resource Names (ARNs). However, CloudWatch Events uses an exact match in +// event patterns and rules. Be sure to use the correct ARN characters when +// creating event patterns so that they match the ARN syntax in the event you +// want to match. +func (c *CloudWatchEvents) TestEventPattern(input *TestEventPatternInput) (*TestEventPatternOutput, error) { + req, out := c.TestEventPatternRequest(input) + err := req.Send() + return out, err +} + +// Container for the parameters to the DeleteRule operation. +type DeleteRuleInput struct { + _ struct{} `type:"structure"` + + // The name of the rule to be deleted. + Name *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteRuleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteRuleInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteRuleInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteRuleInput"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteRuleOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteRuleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteRuleOutput) GoString() string { + return s.String() +} + +// Container for the parameters to the DescribeRule operation. +type DescribeRuleInput struct { + _ struct{} `type:"structure"` + + // The name of the rule you want to describe details for. + Name *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeRuleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeRuleInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeRuleInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeRuleInput"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The result of the DescribeRule operation. +type DescribeRuleOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) associated with the rule. + Arn *string `min:"1" type:"string"` + + // The rule's description. + Description *string `type:"string"` + + // The event pattern. + EventPattern *string `type:"string"` + + // The rule's name. + Name *string `min:"1" type:"string"` + + // The Amazon Resource Name (ARN) of the IAM role associated with the rule. + RoleArn *string `min:"1" type:"string"` + + // The scheduling expression. For example, "cron(0 20 * * ? *)", "rate(5 minutes)". + ScheduleExpression *string `type:"string"` + + // Specifies whether the rule is enabled or disabled. + State *string `type:"string" enum:"RuleState"` +} + +// String returns the string representation +func (s DescribeRuleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeRuleOutput) GoString() string { + return s.String() +} + +// Container for the parameters to the DisableRule operation. +type DisableRuleInput struct { + _ struct{} `type:"structure"` + + // The name of the rule you want to disable. + Name *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DisableRuleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisableRuleInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DisableRuleInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DisableRuleInput"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DisableRuleOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DisableRuleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisableRuleOutput) GoString() string { + return s.String() +} + +// Container for the parameters to the EnableRule operation. +type EnableRuleInput struct { + _ struct{} `type:"structure"` + + // The name of the rule that you want to enable. + Name *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s EnableRuleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnableRuleInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *EnableRuleInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "EnableRuleInput"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type EnableRuleOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s EnableRuleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnableRuleOutput) GoString() string { + return s.String() +} + +// Container for the parameters to the ListRuleNamesByTarget operation. +type ListRuleNamesByTargetInput struct { + _ struct{} `type:"structure"` + + // The maximum number of results to return. + Limit *int64 `min:"1" type:"integer"` + + // The token returned by a previous call to indicate that there is more data + // available. + NextToken *string `min:"1" type:"string"` + + // The Amazon Resource Name (ARN) of the target resource that you want to list + // the rules for. + TargetArn *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListRuleNamesByTargetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListRuleNamesByTargetInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListRuleNamesByTargetInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListRuleNamesByTargetInput"} + if s.Limit != nil && *s.Limit < 1 { + invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) + } + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + if s.TargetArn == nil { + invalidParams.Add(request.NewErrParamRequired("TargetArn")) + } + if s.TargetArn != nil && len(*s.TargetArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TargetArn", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The result of the ListRuleNamesByTarget operation. +type ListRuleNamesByTargetOutput struct { + _ struct{} `type:"structure"` + + // Indicates that there are additional results to retrieve. + NextToken *string `min:"1" type:"string"` + + // List of rules names that can invoke the given target. + RuleNames []*string `type:"list"` +} + +// String returns the string representation +func (s ListRuleNamesByTargetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListRuleNamesByTargetOutput) GoString() string { + return s.String() +} + +// Container for the parameters to the ListRules operation. +type ListRulesInput struct { + _ struct{} `type:"structure"` + + // The maximum number of results to return. + Limit *int64 `min:"1" type:"integer"` + + // The prefix matching the rule name. + NamePrefix *string `min:"1" type:"string"` + + // The token returned by a previous call to indicate that there is more data + // available. + NextToken *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListRulesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListRulesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListRulesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListRulesInput"} + if s.Limit != nil && *s.Limit < 1 { + invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) + } + if s.NamePrefix != nil && len(*s.NamePrefix) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NamePrefix", 1)) + } + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The result of the ListRules operation. +type ListRulesOutput struct { + _ struct{} `type:"structure"` + + // Indicates that there are additional results to retrieve. + NextToken *string `min:"1" type:"string"` + + // List of rules matching the specified criteria. + Rules []*Rule `type:"list"` +} + +// String returns the string representation +func (s ListRulesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListRulesOutput) GoString() string { + return s.String() +} + +// Container for the parameters to the ListTargetsByRule operation. +type ListTargetsByRuleInput struct { + _ struct{} `type:"structure"` + + // The maximum number of results to return. + Limit *int64 `min:"1" type:"integer"` + + // The token returned by a previous call to indicate that there is more data + // available. + NextToken *string `min:"1" type:"string"` + + // The name of the rule whose targets you want to list. + Rule *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListTargetsByRuleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTargetsByRuleInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListTargetsByRuleInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListTargetsByRuleInput"} + if s.Limit != nil && *s.Limit < 1 { + invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) + } + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + if s.Rule == nil { + invalidParams.Add(request.NewErrParamRequired("Rule")) + } + if s.Rule != nil && len(*s.Rule) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Rule", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The result of the ListTargetsByRule operation. +type ListTargetsByRuleOutput struct { + _ struct{} `type:"structure"` + + // Indicates that there are additional results to retrieve. + NextToken *string `min:"1" type:"string"` + + // Lists the targets assigned to the rule. + Targets []*Target `type:"list"` +} + +// String returns the string representation +func (s ListTargetsByRuleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTargetsByRuleOutput) GoString() string { + return s.String() +} + +// Container for the parameters to the PutEvents operation. +type PutEventsInput struct { + _ struct{} `type:"structure"` + + // The entry that defines an event in your system. You can specify several parameters + // for the entry such as the source and type of the event, resources associated + // with the event, and so on. + Entries []*PutEventsRequestEntry `min:"1" type:"list" required:"true"` +} + +// String returns the string representation +func (s PutEventsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutEventsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutEventsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutEventsInput"} + if s.Entries == nil { + invalidParams.Add(request.NewErrParamRequired("Entries")) + } + if s.Entries != nil && len(s.Entries) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Entries", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The result of the PutEvents operation. +type PutEventsOutput struct { + _ struct{} `type:"structure"` + + // A list of successfully and unsuccessfully ingested events results. If the + // ingestion was successful, the entry will have the event ID in it. If not, + // then the ErrorCode and ErrorMessage can be used to identify the problem with + // the entry. + Entries []*PutEventsResultEntry `type:"list"` + + // The number of failed entries. + FailedEntryCount *int64 `type:"integer"` +} + +// String returns the string representation +func (s PutEventsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutEventsOutput) GoString() string { + return s.String() +} + +// Contains information about the event to be used in PutEvents. +type PutEventsRequestEntry struct { + _ struct{} `type:"structure"` + + // In the JSON sense, an object containing fields, which may also contain nested + // sub-objects. No constraints are imposed on its contents. + Detail *string `type:"string"` + + // Free-form string used to decide what fields to expect in the event detail. + DetailType *string `type:"string"` + + // AWS resources, identified by Amazon Resource Name (ARN), which the event + // primarily concerns. Any number, including zero, may be present. + Resources []*string `type:"list"` + + // The source of the event. + Source *string `type:"string"` + + // Timestamp of event, per RFC3339 (https://www.rfc-editor.org/rfc/rfc3339.txt). + // If no timestamp is provided, the timestamp of the PutEvents call will be + // used. + Time *time.Time `type:"timestamp" timestampFormat:"unix"` +} + +// String returns the string representation +func (s PutEventsRequestEntry) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutEventsRequestEntry) GoString() string { + return s.String() +} + +// A PutEventsResult contains a list of PutEventsResultEntry. +type PutEventsResultEntry struct { + _ struct{} `type:"structure"` + + // The error code representing why the event submission failed on this entry. + ErrorCode *string `type:"string"` + + // The error message explaining why the event submission failed on this entry. + ErrorMessage *string `type:"string"` + + // The ID of the event submitted to Amazon CloudWatch Events. + EventId *string `type:"string"` +} + +// String returns the string representation +func (s PutEventsResultEntry) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutEventsResultEntry) GoString() string { + return s.String() +} + +// Container for the parameters to the PutRule operation. +type PutRuleInput struct { + _ struct{} `type:"structure"` + + // A description of the rule. + Description *string `type:"string"` + + // The event pattern. + EventPattern *string `type:"string"` + + // The name of the rule that you are creating or updating. + Name *string `min:"1" type:"string" required:"true"` + + // The Amazon Resource Name (ARN) of the IAM role associated with the rule. + RoleArn *string `min:"1" type:"string"` + + // The scheduling expression. For example, "cron(0 20 * * ? *)", "rate(5 minutes)". + ScheduleExpression *string `type:"string"` + + // Indicates whether the rule is enabled or disabled. + State *string `type:"string" enum:"RuleState"` +} + +// String returns the string representation +func (s PutRuleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutRuleInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutRuleInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutRuleInput"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + if s.RoleArn != nil && len(*s.RoleArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RoleArn", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The result of the PutRule operation. +type PutRuleOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) that identifies the rule. + RuleArn *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s PutRuleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutRuleOutput) GoString() string { + return s.String() +} + +// Container for the parameters to the PutTargets operation. +type PutTargetsInput struct { + _ struct{} `type:"structure"` + + // The name of the rule you want to add targets to. + Rule *string `min:"1" type:"string" required:"true"` + + // List of targets you want to update or add to the rule. + Targets []*Target `type:"list" required:"true"` +} + +// String returns the string representation +func (s PutTargetsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutTargetsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutTargetsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutTargetsInput"} + if s.Rule == nil { + invalidParams.Add(request.NewErrParamRequired("Rule")) + } + if s.Rule != nil && len(*s.Rule) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Rule", 1)) + } + if s.Targets == nil { + invalidParams.Add(request.NewErrParamRequired("Targets")) + } + if s.Targets != nil { + for i, v := range s.Targets { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Targets", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The result of the PutTargets operation. +type PutTargetsOutput struct { + _ struct{} `type:"structure"` + + // An array of failed target entries. + FailedEntries []*PutTargetsResultEntry `type:"list"` + + // The number of failed entries. + FailedEntryCount *int64 `type:"integer"` +} + +// String returns the string representation +func (s PutTargetsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutTargetsOutput) GoString() string { + return s.String() +} + +// A PutTargetsResult contains a list of PutTargetsResultEntry. +type PutTargetsResultEntry struct { + _ struct{} `type:"structure"` + + // The error code representing why the target submission failed on this entry. + ErrorCode *string `type:"string"` + + // The error message explaining why the target submission failed on this entry. + ErrorMessage *string `type:"string"` + + // The ID of the target submitted to Amazon CloudWatch Events. + TargetId *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s PutTargetsResultEntry) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutTargetsResultEntry) GoString() string { + return s.String() +} + +// Container for the parameters to the RemoveTargets operation. +type RemoveTargetsInput struct { + _ struct{} `type:"structure"` + + // The list of target IDs to remove from the rule. + Ids []*string `min:"1" type:"list" required:"true"` + + // The name of the rule you want to remove targets from. + Rule *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s RemoveTargetsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RemoveTargetsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RemoveTargetsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RemoveTargetsInput"} + if s.Ids == nil { + invalidParams.Add(request.NewErrParamRequired("Ids")) + } + if s.Ids != nil && len(s.Ids) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Ids", 1)) + } + if s.Rule == nil { + invalidParams.Add(request.NewErrParamRequired("Rule")) + } + if s.Rule != nil && len(*s.Rule) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Rule", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The result of the RemoveTargets operation. +type RemoveTargetsOutput struct { + _ struct{} `type:"structure"` + + // An array of failed target entries. + FailedEntries []*RemoveTargetsResultEntry `type:"list"` + + // The number of failed entries. + FailedEntryCount *int64 `type:"integer"` +} + +// String returns the string representation +func (s RemoveTargetsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RemoveTargetsOutput) GoString() string { + return s.String() +} + +// The ID of the target requested to be removed from the rule by Amazon CloudWatch +// Events. +type RemoveTargetsResultEntry struct { + _ struct{} `type:"structure"` + + // The error code representing why the target removal failed on this entry. + ErrorCode *string `type:"string"` + + // The error message explaining why the target removal failed on this entry. + ErrorMessage *string `type:"string"` + + // The ID of the target requested to be removed by Amazon CloudWatch Events. + TargetId *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s RemoveTargetsResultEntry) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RemoveTargetsResultEntry) GoString() string { + return s.String() +} + +// Contains information about a rule in Amazon CloudWatch Events. A ListRulesResult +// contains a list of Rules. +type Rule struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the rule. + Arn *string `min:"1" type:"string"` + + // The description of the rule. + Description *string `type:"string"` + + // The event pattern of the rule. + EventPattern *string `type:"string"` + + // The rule's name. + Name *string `min:"1" type:"string"` + + // The Amazon Resource Name (ARN) associated with the role that is used for + // target invocation. + RoleArn *string `min:"1" type:"string"` + + // The scheduling expression. For example, "cron(0 20 * * ? *)", "rate(5 minutes)". + ScheduleExpression *string `type:"string"` + + // The rule's state. + State *string `type:"string" enum:"RuleState"` +} + +// String returns the string representation +func (s Rule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Rule) GoString() string { + return s.String() +} + +// Targets are the resources that can be invoked when a rule is triggered. For +// example, AWS Lambda functions, Amazon Kinesis streams, and built-in targets. +// +// Input and InputPath are mutually-exclusive and optional parameters of a +// target. When a rule is triggered due to a matched event, if for a target: +// +// Neither Input nor InputPath is specified, then the entire event is passed +// to the target in JSON form. InputPath is specified in the form of JSONPath +// (e.g. $.detail), then only the part of the event specified in the path is +// passed to the target (e.g. only the detail part of the event is passed). +// Input is specified in the form of a valid JSON, then the matched event +// is overridden with this constant. +type Target struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) associated of the target. + Arn *string `min:"1" type:"string" required:"true"` + + // The unique target assignment ID. + Id *string `min:"1" type:"string" required:"true"` + + // Valid JSON text passed to the target. For more information about JSON text, + // see The JavaScript Object Notation (JSON) Data Interchange Format (http://www.rfc-editor.org/rfc/rfc7159.txt). + Input *string `type:"string"` + + // The value of the JSONPath that is used for extracting part of the matched + // event when passing it to the target. For more information about JSON paths, + // see JSONPath (http://goessner.net/articles/JsonPath/). + InputPath *string `type:"string"` +} + +// String returns the string representation +func (s Target) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Target) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Target) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Target"} + if s.Arn == nil { + invalidParams.Add(request.NewErrParamRequired("Arn")) + } + if s.Arn != nil && len(*s.Arn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Arn", 1)) + } + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + if s.Id != nil && len(*s.Id) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Id", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Container for the parameters to the TestEventPattern operation. +type TestEventPatternInput struct { + _ struct{} `type:"structure"` + + // The event in the JSON format to test against the event pattern. + Event *string `type:"string" required:"true"` + + // The event pattern you want to test. + EventPattern *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s TestEventPatternInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TestEventPatternInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TestEventPatternInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TestEventPatternInput"} + if s.Event == nil { + invalidParams.Add(request.NewErrParamRequired("Event")) + } + if s.EventPattern == nil { + invalidParams.Add(request.NewErrParamRequired("EventPattern")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The result of the TestEventPattern operation. +type TestEventPatternOutput struct { + _ struct{} `type:"structure"` + + // Indicates whether the event matches the event pattern. + Result *bool `type:"boolean"` +} + +// String returns the string representation +func (s TestEventPatternOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TestEventPatternOutput) GoString() string { + return s.String() +} + +const ( + // @enum RuleState + RuleStateEnabled = "ENABLED" + // @enum RuleState + RuleStateDisabled = "DISABLED" +) diff --git a/vendor/github.com/aws/aws-sdk-go/service/cloudwatchevents/cloudwatcheventsiface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/cloudwatchevents/cloudwatcheventsiface/interface.go new file mode 100644 index 000000000..92d299f1e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/cloudwatchevents/cloudwatcheventsiface/interface.go @@ -0,0 +1,62 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package cloudwatcheventsiface provides an interface for the Amazon CloudWatch Events. +package cloudwatcheventsiface + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/cloudwatchevents" +) + +// CloudWatchEventsAPI is the interface type for cloudwatchevents.CloudWatchEvents. +type CloudWatchEventsAPI interface { + DeleteRuleRequest(*cloudwatchevents.DeleteRuleInput) (*request.Request, *cloudwatchevents.DeleteRuleOutput) + + DeleteRule(*cloudwatchevents.DeleteRuleInput) (*cloudwatchevents.DeleteRuleOutput, error) + + DescribeRuleRequest(*cloudwatchevents.DescribeRuleInput) (*request.Request, *cloudwatchevents.DescribeRuleOutput) + + DescribeRule(*cloudwatchevents.DescribeRuleInput) (*cloudwatchevents.DescribeRuleOutput, error) + + DisableRuleRequest(*cloudwatchevents.DisableRuleInput) (*request.Request, *cloudwatchevents.DisableRuleOutput) + + DisableRule(*cloudwatchevents.DisableRuleInput) (*cloudwatchevents.DisableRuleOutput, error) + + EnableRuleRequest(*cloudwatchevents.EnableRuleInput) (*request.Request, *cloudwatchevents.EnableRuleOutput) + + EnableRule(*cloudwatchevents.EnableRuleInput) (*cloudwatchevents.EnableRuleOutput, error) + + ListRuleNamesByTargetRequest(*cloudwatchevents.ListRuleNamesByTargetInput) (*request.Request, *cloudwatchevents.ListRuleNamesByTargetOutput) + + ListRuleNamesByTarget(*cloudwatchevents.ListRuleNamesByTargetInput) (*cloudwatchevents.ListRuleNamesByTargetOutput, error) + + ListRulesRequest(*cloudwatchevents.ListRulesInput) (*request.Request, *cloudwatchevents.ListRulesOutput) + + ListRules(*cloudwatchevents.ListRulesInput) (*cloudwatchevents.ListRulesOutput, error) + + ListTargetsByRuleRequest(*cloudwatchevents.ListTargetsByRuleInput) (*request.Request, *cloudwatchevents.ListTargetsByRuleOutput) + + ListTargetsByRule(*cloudwatchevents.ListTargetsByRuleInput) (*cloudwatchevents.ListTargetsByRuleOutput, error) + + PutEventsRequest(*cloudwatchevents.PutEventsInput) (*request.Request, *cloudwatchevents.PutEventsOutput) + + PutEvents(*cloudwatchevents.PutEventsInput) (*cloudwatchevents.PutEventsOutput, error) + + PutRuleRequest(*cloudwatchevents.PutRuleInput) (*request.Request, *cloudwatchevents.PutRuleOutput) + + PutRule(*cloudwatchevents.PutRuleInput) (*cloudwatchevents.PutRuleOutput, error) + + PutTargetsRequest(*cloudwatchevents.PutTargetsInput) (*request.Request, *cloudwatchevents.PutTargetsOutput) + + PutTargets(*cloudwatchevents.PutTargetsInput) (*cloudwatchevents.PutTargetsOutput, error) + + RemoveTargetsRequest(*cloudwatchevents.RemoveTargetsInput) (*request.Request, *cloudwatchevents.RemoveTargetsOutput) + + RemoveTargets(*cloudwatchevents.RemoveTargetsInput) (*cloudwatchevents.RemoveTargetsOutput, error) + + TestEventPatternRequest(*cloudwatchevents.TestEventPatternInput) (*request.Request, *cloudwatchevents.TestEventPatternOutput) + + TestEventPattern(*cloudwatchevents.TestEventPatternInput) (*cloudwatchevents.TestEventPatternOutput, error) +} + +var _ CloudWatchEventsAPI = (*cloudwatchevents.CloudWatchEvents)(nil) diff --git a/vendor/github.com/aws/aws-sdk-go/service/cloudwatchevents/examples_test.go b/vendor/github.com/aws/aws-sdk-go/service/cloudwatchevents/examples_test.go new file mode 100644 index 000000000..e0f725d80 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/cloudwatchevents/examples_test.go @@ -0,0 +1,281 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package cloudwatchevents_test + +import ( + "bytes" + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/cloudwatchevents" +) + +var _ time.Duration +var _ bytes.Buffer + +func ExampleCloudWatchEvents_DeleteRule() { + svc := cloudwatchevents.New(session.New()) + + params := &cloudwatchevents.DeleteRuleInput{ + Name: aws.String("RuleName"), // Required + } + resp, err := svc.DeleteRule(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudWatchEvents_DescribeRule() { + svc := cloudwatchevents.New(session.New()) + + params := &cloudwatchevents.DescribeRuleInput{ + Name: aws.String("RuleName"), // Required + } + resp, err := svc.DescribeRule(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudWatchEvents_DisableRule() { + svc := cloudwatchevents.New(session.New()) + + params := &cloudwatchevents.DisableRuleInput{ + Name: aws.String("RuleName"), // Required + } + resp, err := svc.DisableRule(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudWatchEvents_EnableRule() { + svc := cloudwatchevents.New(session.New()) + + params := &cloudwatchevents.EnableRuleInput{ + Name: aws.String("RuleName"), // Required + } + resp, err := svc.EnableRule(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudWatchEvents_ListRuleNamesByTarget() { + svc := cloudwatchevents.New(session.New()) + + params := &cloudwatchevents.ListRuleNamesByTargetInput{ + TargetArn: aws.String("TargetArn"), // Required + Limit: aws.Int64(1), + NextToken: aws.String("NextToken"), + } + resp, err := svc.ListRuleNamesByTarget(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudWatchEvents_ListRules() { + svc := cloudwatchevents.New(session.New()) + + params := &cloudwatchevents.ListRulesInput{ + Limit: aws.Int64(1), + NamePrefix: aws.String("RuleName"), + NextToken: aws.String("NextToken"), + } + resp, err := svc.ListRules(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudWatchEvents_ListTargetsByRule() { + svc := cloudwatchevents.New(session.New()) + + params := &cloudwatchevents.ListTargetsByRuleInput{ + Rule: aws.String("RuleName"), // Required + Limit: aws.Int64(1), + NextToken: aws.String("NextToken"), + } + resp, err := svc.ListTargetsByRule(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudWatchEvents_PutEvents() { + svc := cloudwatchevents.New(session.New()) + + params := &cloudwatchevents.PutEventsInput{ + Entries: []*cloudwatchevents.PutEventsRequestEntry{ // Required + { // Required + Detail: aws.String("String"), + DetailType: aws.String("String"), + Resources: []*string{ + aws.String("EventResource"), // Required + // More values... + }, + Source: aws.String("String"), + Time: aws.Time(time.Now()), + }, + // More values... + }, + } + resp, err := svc.PutEvents(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudWatchEvents_PutRule() { + svc := cloudwatchevents.New(session.New()) + + params := &cloudwatchevents.PutRuleInput{ + Name: aws.String("RuleName"), // Required + Description: aws.String("RuleDescription"), + EventPattern: aws.String("EventPattern"), + RoleArn: aws.String("RoleArn"), + ScheduleExpression: aws.String("ScheduleExpression"), + State: aws.String("RuleState"), + } + resp, err := svc.PutRule(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudWatchEvents_PutTargets() { + svc := cloudwatchevents.New(session.New()) + + params := &cloudwatchevents.PutTargetsInput{ + Rule: aws.String("RuleName"), // Required + Targets: []*cloudwatchevents.Target{ // Required + { // Required + Arn: aws.String("TargetArn"), // Required + Id: aws.String("TargetId"), // Required + Input: aws.String("TargetInput"), + InputPath: aws.String("TargetInputPath"), + }, + // More values... + }, + } + resp, err := svc.PutTargets(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudWatchEvents_RemoveTargets() { + svc := cloudwatchevents.New(session.New()) + + params := &cloudwatchevents.RemoveTargetsInput{ + Ids: []*string{ // Required + aws.String("TargetId"), // Required + // More values... + }, + Rule: aws.String("RuleName"), // Required + } + resp, err := svc.RemoveTargets(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudWatchEvents_TestEventPattern() { + svc := cloudwatchevents.New(session.New()) + + params := &cloudwatchevents.TestEventPatternInput{ + Event: aws.String("String"), // Required + EventPattern: aws.String("EventPattern"), // Required + } + resp, err := svc.TestEventPattern(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/cloudwatchevents/service.go b/vendor/github.com/aws/aws-sdk-go/service/cloudwatchevents/service.go new file mode 100644 index 000000000..0501a80d9 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/cloudwatchevents/service.go @@ -0,0 +1,101 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package cloudwatchevents + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" +) + +// Amazon CloudWatch Events helps you to respond to state changes in your AWS +// resources. When your resources change state they automatically send events +// into an event stream. You can create rules that match selected events in +// the stream and route them to targets to take action. You can also use rules +// to take action on a pre-determined schedule. For example, you can configure +// rules to: +// +// Automatically invoke an AWS Lambda function to update DNS entries when +// an event notifies you that Amazon EC2 instance enters the running state. +// Direct specific API records from CloudTrail to an Amazon Kinesis stream for +// detailed analysis of potential security or availability risks. Periodically +// invoke a built-in target to create a snapshot of an Amazon EBS volume. +// For more information about Amazon CloudWatch Events features, see the Amazon +// CloudWatch Developer Guide (http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide). +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type CloudWatchEvents struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "events" + +// New creates a new instance of the CloudWatchEvents client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a CloudWatchEvents client from just a session. +// svc := cloudwatchevents.New(mySession) +// +// // Create a CloudWatchEvents client with additional configuration +// svc := cloudwatchevents.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *CloudWatchEvents { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *CloudWatchEvents { + svc := &CloudWatchEvents{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2015-10-07", + JSONVersion: "1.1", + TargetPrefix: "AWSEvents", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(jsonrpc.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a CloudWatchEvents operation and runs any +// custom request initialization. +func (c *CloudWatchEvents) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/cloudwatchlogs/api.go b/vendor/github.com/aws/aws-sdk-go/service/cloudwatchlogs/api.go new file mode 100644 index 000000000..d2f65e266 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/cloudwatchlogs/api.go @@ -0,0 +1,3785 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package cloudwatchlogs provides a client for Amazon CloudWatch Logs. +package cloudwatchlogs + +import ( + "fmt" + + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" +) + +const opCancelExportTask = "CancelExportTask" + +// CancelExportTaskRequest generates a "aws/request.Request" representing the +// client's request for the CancelExportTask operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CancelExportTask method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CancelExportTaskRequest method. +// req, resp := client.CancelExportTaskRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudWatchLogs) CancelExportTaskRequest(input *CancelExportTaskInput) (req *request.Request, output *CancelExportTaskOutput) { + op := &request.Operation{ + Name: opCancelExportTask, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CancelExportTaskInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &CancelExportTaskOutput{} + req.Data = output + return +} + +// Cancels an export task if it is in PENDING or RUNNING state. +func (c *CloudWatchLogs) CancelExportTask(input *CancelExportTaskInput) (*CancelExportTaskOutput, error) { + req, out := c.CancelExportTaskRequest(input) + err := req.Send() + return out, err +} + +const opCreateExportTask = "CreateExportTask" + +// CreateExportTaskRequest generates a "aws/request.Request" representing the +// client's request for the CreateExportTask operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateExportTask method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateExportTaskRequest method. +// req, resp := client.CreateExportTaskRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudWatchLogs) CreateExportTaskRequest(input *CreateExportTaskInput) (req *request.Request, output *CreateExportTaskOutput) { + op := &request.Operation{ + Name: opCreateExportTask, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateExportTaskInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateExportTaskOutput{} + req.Data = output + return +} + +// Creates an ExportTask which allows you to efficiently export data from a +// Log Group to your Amazon S3 bucket. +// +// This is an asynchronous call. If all the required information is provided, +// this API will initiate an export task and respond with the task Id. Once +// started, DescribeExportTasks can be used to get the status of an export task. +// You can only have one active (RUNNING or PENDING) export task at a time, +// per account. +// +// You can export logs from multiple log groups or multiple time ranges to +// the same Amazon S3 bucket. To separate out log data for each export task, +// you can specify a prefix that will be used as the Amazon S3 key prefix for +// all exported objects. +func (c *CloudWatchLogs) CreateExportTask(input *CreateExportTaskInput) (*CreateExportTaskOutput, error) { + req, out := c.CreateExportTaskRequest(input) + err := req.Send() + return out, err +} + +const opCreateLogGroup = "CreateLogGroup" + +// CreateLogGroupRequest generates a "aws/request.Request" representing the +// client's request for the CreateLogGroup operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateLogGroup method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateLogGroupRequest method. +// req, resp := client.CreateLogGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudWatchLogs) CreateLogGroupRequest(input *CreateLogGroupInput) (req *request.Request, output *CreateLogGroupOutput) { + op := &request.Operation{ + Name: opCreateLogGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateLogGroupInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &CreateLogGroupOutput{} + req.Data = output + return +} + +// Creates a new log group with the specified name. The name of the log group +// must be unique within a region for an AWS account. You can create up to 500 +// log groups per account. +// +// You must use the following guidelines when naming a log group: Log group +// names can be between 1 and 512 characters long. Allowed characters are a-z, +// A-Z, 0-9, '_' (underscore), '-' (hyphen), '/' (forward slash), and '.' (period). +func (c *CloudWatchLogs) CreateLogGroup(input *CreateLogGroupInput) (*CreateLogGroupOutput, error) { + req, out := c.CreateLogGroupRequest(input) + err := req.Send() + return out, err +} + +const opCreateLogStream = "CreateLogStream" + +// CreateLogStreamRequest generates a "aws/request.Request" representing the +// client's request for the CreateLogStream operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateLogStream method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateLogStreamRequest method. +// req, resp := client.CreateLogStreamRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudWatchLogs) CreateLogStreamRequest(input *CreateLogStreamInput) (req *request.Request, output *CreateLogStreamOutput) { + op := &request.Operation{ + Name: opCreateLogStream, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateLogStreamInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &CreateLogStreamOutput{} + req.Data = output + return +} + +// Creates a new log stream in the specified log group. The name of the log +// stream must be unique within the log group. There is no limit on the number +// of log streams that can exist in a log group. +// +// You must use the following guidelines when naming a log stream: Log stream +// names can be between 1 and 512 characters long. The ':' colon character is +// not allowed. +func (c *CloudWatchLogs) CreateLogStream(input *CreateLogStreamInput) (*CreateLogStreamOutput, error) { + req, out := c.CreateLogStreamRequest(input) + err := req.Send() + return out, err +} + +const opDeleteDestination = "DeleteDestination" + +// DeleteDestinationRequest generates a "aws/request.Request" representing the +// client's request for the DeleteDestination operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteDestination method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteDestinationRequest method. +// req, resp := client.DeleteDestinationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudWatchLogs) DeleteDestinationRequest(input *DeleteDestinationInput) (req *request.Request, output *DeleteDestinationOutput) { + op := &request.Operation{ + Name: opDeleteDestination, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteDestinationInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteDestinationOutput{} + req.Data = output + return +} + +// Deletes the destination with the specified name and eventually disables all +// the subscription filters that publish to it. This will not delete the physical +// resource encapsulated by the destination. +func (c *CloudWatchLogs) DeleteDestination(input *DeleteDestinationInput) (*DeleteDestinationOutput, error) { + req, out := c.DeleteDestinationRequest(input) + err := req.Send() + return out, err +} + +const opDeleteLogGroup = "DeleteLogGroup" + +// DeleteLogGroupRequest generates a "aws/request.Request" representing the +// client's request for the DeleteLogGroup operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteLogGroup method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteLogGroupRequest method. +// req, resp := client.DeleteLogGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudWatchLogs) DeleteLogGroupRequest(input *DeleteLogGroupInput) (req *request.Request, output *DeleteLogGroupOutput) { + op := &request.Operation{ + Name: opDeleteLogGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteLogGroupInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteLogGroupOutput{} + req.Data = output + return +} + +// Deletes the log group with the specified name and permanently deletes all +// the archived log events associated with it. +func (c *CloudWatchLogs) DeleteLogGroup(input *DeleteLogGroupInput) (*DeleteLogGroupOutput, error) { + req, out := c.DeleteLogGroupRequest(input) + err := req.Send() + return out, err +} + +const opDeleteLogStream = "DeleteLogStream" + +// DeleteLogStreamRequest generates a "aws/request.Request" representing the +// client's request for the DeleteLogStream operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteLogStream method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteLogStreamRequest method. +// req, resp := client.DeleteLogStreamRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudWatchLogs) DeleteLogStreamRequest(input *DeleteLogStreamInput) (req *request.Request, output *DeleteLogStreamOutput) { + op := &request.Operation{ + Name: opDeleteLogStream, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteLogStreamInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteLogStreamOutput{} + req.Data = output + return +} + +// Deletes a log stream and permanently deletes all the archived log events +// associated with it. +func (c *CloudWatchLogs) DeleteLogStream(input *DeleteLogStreamInput) (*DeleteLogStreamOutput, error) { + req, out := c.DeleteLogStreamRequest(input) + err := req.Send() + return out, err +} + +const opDeleteMetricFilter = "DeleteMetricFilter" + +// DeleteMetricFilterRequest generates a "aws/request.Request" representing the +// client's request for the DeleteMetricFilter operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteMetricFilter method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteMetricFilterRequest method. +// req, resp := client.DeleteMetricFilterRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudWatchLogs) DeleteMetricFilterRequest(input *DeleteMetricFilterInput) (req *request.Request, output *DeleteMetricFilterOutput) { + op := &request.Operation{ + Name: opDeleteMetricFilter, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteMetricFilterInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteMetricFilterOutput{} + req.Data = output + return +} + +// Deletes a metric filter associated with the specified log group. +func (c *CloudWatchLogs) DeleteMetricFilter(input *DeleteMetricFilterInput) (*DeleteMetricFilterOutput, error) { + req, out := c.DeleteMetricFilterRequest(input) + err := req.Send() + return out, err +} + +const opDeleteRetentionPolicy = "DeleteRetentionPolicy" + +// DeleteRetentionPolicyRequest generates a "aws/request.Request" representing the +// client's request for the DeleteRetentionPolicy operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteRetentionPolicy method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteRetentionPolicyRequest method. +// req, resp := client.DeleteRetentionPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudWatchLogs) DeleteRetentionPolicyRequest(input *DeleteRetentionPolicyInput) (req *request.Request, output *DeleteRetentionPolicyOutput) { + op := &request.Operation{ + Name: opDeleteRetentionPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteRetentionPolicyInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteRetentionPolicyOutput{} + req.Data = output + return +} + +// Deletes the retention policy of the specified log group. Log events would +// not expire if they belong to log groups without a retention policy. +func (c *CloudWatchLogs) DeleteRetentionPolicy(input *DeleteRetentionPolicyInput) (*DeleteRetentionPolicyOutput, error) { + req, out := c.DeleteRetentionPolicyRequest(input) + err := req.Send() + return out, err +} + +const opDeleteSubscriptionFilter = "DeleteSubscriptionFilter" + +// DeleteSubscriptionFilterRequest generates a "aws/request.Request" representing the +// client's request for the DeleteSubscriptionFilter operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteSubscriptionFilter method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteSubscriptionFilterRequest method. +// req, resp := client.DeleteSubscriptionFilterRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudWatchLogs) DeleteSubscriptionFilterRequest(input *DeleteSubscriptionFilterInput) (req *request.Request, output *DeleteSubscriptionFilterOutput) { + op := &request.Operation{ + Name: opDeleteSubscriptionFilter, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteSubscriptionFilterInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteSubscriptionFilterOutput{} + req.Data = output + return +} + +// Deletes a subscription filter associated with the specified log group. +func (c *CloudWatchLogs) DeleteSubscriptionFilter(input *DeleteSubscriptionFilterInput) (*DeleteSubscriptionFilterOutput, error) { + req, out := c.DeleteSubscriptionFilterRequest(input) + err := req.Send() + return out, err +} + +const opDescribeDestinations = "DescribeDestinations" + +// DescribeDestinationsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeDestinations operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeDestinations method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeDestinationsRequest method. +// req, resp := client.DescribeDestinationsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudWatchLogs) DescribeDestinationsRequest(input *DescribeDestinationsInput) (req *request.Request, output *DescribeDestinationsOutput) { + op := &request.Operation{ + Name: opDescribeDestinations, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "limit", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeDestinationsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeDestinationsOutput{} + req.Data = output + return +} + +// Returns all the destinations that are associated with the AWS account making +// the request. The list returned in the response is ASCII-sorted by destination +// name. +// +// By default, this operation returns up to 50 destinations. If there are +// more destinations to list, the response would contain a nextToken value in +// the response body. You can also limit the number of destinations returned +// in the response by specifying the limit parameter in the request. +func (c *CloudWatchLogs) DescribeDestinations(input *DescribeDestinationsInput) (*DescribeDestinationsOutput, error) { + req, out := c.DescribeDestinationsRequest(input) + err := req.Send() + return out, err +} + +// DescribeDestinationsPages iterates over the pages of a DescribeDestinations operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeDestinations method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeDestinations operation. +// pageNum := 0 +// err := client.DescribeDestinationsPages(params, +// func(page *DescribeDestinationsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *CloudWatchLogs) DescribeDestinationsPages(input *DescribeDestinationsInput, fn func(p *DescribeDestinationsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeDestinationsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeDestinationsOutput), lastPage) + }) +} + +const opDescribeExportTasks = "DescribeExportTasks" + +// DescribeExportTasksRequest generates a "aws/request.Request" representing the +// client's request for the DescribeExportTasks operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeExportTasks method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeExportTasksRequest method. +// req, resp := client.DescribeExportTasksRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudWatchLogs) DescribeExportTasksRequest(input *DescribeExportTasksInput) (req *request.Request, output *DescribeExportTasksOutput) { + op := &request.Operation{ + Name: opDescribeExportTasks, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeExportTasksInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeExportTasksOutput{} + req.Data = output + return +} + +// Returns all the export tasks that are associated with the AWS account making +// the request. The export tasks can be filtered based on TaskId or TaskStatus. +// +// By default, this operation returns up to 50 export tasks that satisfy the +// specified filters. If there are more export tasks to list, the response would +// contain a nextToken value in the response body. You can also limit the number +// of export tasks returned in the response by specifying the limit parameter +// in the request. +func (c *CloudWatchLogs) DescribeExportTasks(input *DescribeExportTasksInput) (*DescribeExportTasksOutput, error) { + req, out := c.DescribeExportTasksRequest(input) + err := req.Send() + return out, err +} + +const opDescribeLogGroups = "DescribeLogGroups" + +// DescribeLogGroupsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeLogGroups operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeLogGroups method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeLogGroupsRequest method. +// req, resp := client.DescribeLogGroupsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudWatchLogs) DescribeLogGroupsRequest(input *DescribeLogGroupsInput) (req *request.Request, output *DescribeLogGroupsOutput) { + op := &request.Operation{ + Name: opDescribeLogGroups, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "limit", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeLogGroupsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeLogGroupsOutput{} + req.Data = output + return +} + +// Returns all the log groups that are associated with the AWS account making +// the request. The list returned in the response is ASCII-sorted by log group +// name. +// +// By default, this operation returns up to 50 log groups. If there are more +// log groups to list, the response would contain a nextToken value in the response +// body. You can also limit the number of log groups returned in the response +// by specifying the limit parameter in the request. +func (c *CloudWatchLogs) DescribeLogGroups(input *DescribeLogGroupsInput) (*DescribeLogGroupsOutput, error) { + req, out := c.DescribeLogGroupsRequest(input) + err := req.Send() + return out, err +} + +// DescribeLogGroupsPages iterates over the pages of a DescribeLogGroups operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeLogGroups method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeLogGroups operation. +// pageNum := 0 +// err := client.DescribeLogGroupsPages(params, +// func(page *DescribeLogGroupsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *CloudWatchLogs) DescribeLogGroupsPages(input *DescribeLogGroupsInput, fn func(p *DescribeLogGroupsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeLogGroupsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeLogGroupsOutput), lastPage) + }) +} + +const opDescribeLogStreams = "DescribeLogStreams" + +// DescribeLogStreamsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeLogStreams operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeLogStreams method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeLogStreamsRequest method. +// req, resp := client.DescribeLogStreamsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudWatchLogs) DescribeLogStreamsRequest(input *DescribeLogStreamsInput) (req *request.Request, output *DescribeLogStreamsOutput) { + op := &request.Operation{ + Name: opDescribeLogStreams, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "limit", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeLogStreamsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeLogStreamsOutput{} + req.Data = output + return +} + +// Returns all the log streams that are associated with the specified log group. +// The list returned in the response is ASCII-sorted by log stream name. +// +// By default, this operation returns up to 50 log streams. If there are more +// log streams to list, the response would contain a nextToken value in the +// response body. You can also limit the number of log streams returned in the +// response by specifying the limit parameter in the request. This operation +// has a limit of five transactions per second, after which transactions are +// throttled. +func (c *CloudWatchLogs) DescribeLogStreams(input *DescribeLogStreamsInput) (*DescribeLogStreamsOutput, error) { + req, out := c.DescribeLogStreamsRequest(input) + err := req.Send() + return out, err +} + +// DescribeLogStreamsPages iterates over the pages of a DescribeLogStreams operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeLogStreams method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeLogStreams operation. +// pageNum := 0 +// err := client.DescribeLogStreamsPages(params, +// func(page *DescribeLogStreamsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *CloudWatchLogs) DescribeLogStreamsPages(input *DescribeLogStreamsInput, fn func(p *DescribeLogStreamsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeLogStreamsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeLogStreamsOutput), lastPage) + }) +} + +const opDescribeMetricFilters = "DescribeMetricFilters" + +// DescribeMetricFiltersRequest generates a "aws/request.Request" representing the +// client's request for the DescribeMetricFilters operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeMetricFilters method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeMetricFiltersRequest method. +// req, resp := client.DescribeMetricFiltersRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudWatchLogs) DescribeMetricFiltersRequest(input *DescribeMetricFiltersInput) (req *request.Request, output *DescribeMetricFiltersOutput) { + op := &request.Operation{ + Name: opDescribeMetricFilters, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "limit", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeMetricFiltersInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeMetricFiltersOutput{} + req.Data = output + return +} + +// Returns all the metrics filters associated with the specified log group. +// The list returned in the response is ASCII-sorted by filter name. +// +// By default, this operation returns up to 50 metric filters. If there are +// more metric filters to list, the response would contain a nextToken value +// in the response body. You can also limit the number of metric filters returned +// in the response by specifying the limit parameter in the request. +func (c *CloudWatchLogs) DescribeMetricFilters(input *DescribeMetricFiltersInput) (*DescribeMetricFiltersOutput, error) { + req, out := c.DescribeMetricFiltersRequest(input) + err := req.Send() + return out, err +} + +// DescribeMetricFiltersPages iterates over the pages of a DescribeMetricFilters operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeMetricFilters method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeMetricFilters operation. +// pageNum := 0 +// err := client.DescribeMetricFiltersPages(params, +// func(page *DescribeMetricFiltersOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *CloudWatchLogs) DescribeMetricFiltersPages(input *DescribeMetricFiltersInput, fn func(p *DescribeMetricFiltersOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeMetricFiltersRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeMetricFiltersOutput), lastPage) + }) +} + +const opDescribeSubscriptionFilters = "DescribeSubscriptionFilters" + +// DescribeSubscriptionFiltersRequest generates a "aws/request.Request" representing the +// client's request for the DescribeSubscriptionFilters operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeSubscriptionFilters method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeSubscriptionFiltersRequest method. +// req, resp := client.DescribeSubscriptionFiltersRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudWatchLogs) DescribeSubscriptionFiltersRequest(input *DescribeSubscriptionFiltersInput) (req *request.Request, output *DescribeSubscriptionFiltersOutput) { + op := &request.Operation{ + Name: opDescribeSubscriptionFilters, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "limit", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeSubscriptionFiltersInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeSubscriptionFiltersOutput{} + req.Data = output + return +} + +// Returns all the subscription filters associated with the specified log group. +// The list returned in the response is ASCII-sorted by filter name. +// +// By default, this operation returns up to 50 subscription filters. If there +// are more subscription filters to list, the response would contain a nextToken +// value in the response body. You can also limit the number of subscription +// filters returned in the response by specifying the limit parameter in the +// request. +func (c *CloudWatchLogs) DescribeSubscriptionFilters(input *DescribeSubscriptionFiltersInput) (*DescribeSubscriptionFiltersOutput, error) { + req, out := c.DescribeSubscriptionFiltersRequest(input) + err := req.Send() + return out, err +} + +// DescribeSubscriptionFiltersPages iterates over the pages of a DescribeSubscriptionFilters operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeSubscriptionFilters method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeSubscriptionFilters operation. +// pageNum := 0 +// err := client.DescribeSubscriptionFiltersPages(params, +// func(page *DescribeSubscriptionFiltersOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *CloudWatchLogs) DescribeSubscriptionFiltersPages(input *DescribeSubscriptionFiltersInput, fn func(p *DescribeSubscriptionFiltersOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeSubscriptionFiltersRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeSubscriptionFiltersOutput), lastPage) + }) +} + +const opFilterLogEvents = "FilterLogEvents" + +// FilterLogEventsRequest generates a "aws/request.Request" representing the +// client's request for the FilterLogEvents operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the FilterLogEvents method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the FilterLogEventsRequest method. +// req, resp := client.FilterLogEventsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudWatchLogs) FilterLogEventsRequest(input *FilterLogEventsInput) (req *request.Request, output *FilterLogEventsOutput) { + op := &request.Operation{ + Name: opFilterLogEvents, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "limit", + TruncationToken: "", + }, + } + + if input == nil { + input = &FilterLogEventsInput{} + } + + req = c.newRequest(op, input, output) + output = &FilterLogEventsOutput{} + req.Data = output + return +} + +// Retrieves log events, optionally filtered by a filter pattern from the specified +// log group. You can provide an optional time range to filter the results on +// the event timestamp. You can limit the streams searched to an explicit list +// of logStreamNames. +// +// By default, this operation returns as much matching log events as can fit +// in a response size of 1MB, up to 10,000 log events, or all the events found +// within a time-bounded scan window. If the response includes a nextToken, +// then there is more data to search, and the search can be resumed with a new +// request providing the nextToken. The response will contain a list of searchedLogStreams +// that contains information about which streams were searched in the request +// and whether they have been searched completely or require further pagination. +// The limit parameter in the request. can be used to specify the maximum number +// of events to return in a page. +func (c *CloudWatchLogs) FilterLogEvents(input *FilterLogEventsInput) (*FilterLogEventsOutput, error) { + req, out := c.FilterLogEventsRequest(input) + err := req.Send() + return out, err +} + +// FilterLogEventsPages iterates over the pages of a FilterLogEvents operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See FilterLogEvents method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a FilterLogEvents operation. +// pageNum := 0 +// err := client.FilterLogEventsPages(params, +// func(page *FilterLogEventsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *CloudWatchLogs) FilterLogEventsPages(input *FilterLogEventsInput, fn func(p *FilterLogEventsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.FilterLogEventsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*FilterLogEventsOutput), lastPage) + }) +} + +const opGetLogEvents = "GetLogEvents" + +// GetLogEventsRequest generates a "aws/request.Request" representing the +// client's request for the GetLogEvents operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetLogEvents method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetLogEventsRequest method. +// req, resp := client.GetLogEventsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudWatchLogs) GetLogEventsRequest(input *GetLogEventsInput) (req *request.Request, output *GetLogEventsOutput) { + op := &request.Operation{ + Name: opGetLogEvents, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextForwardToken"}, + LimitToken: "limit", + TruncationToken: "", + }, + } + + if input == nil { + input = &GetLogEventsInput{} + } + + req = c.newRequest(op, input, output) + output = &GetLogEventsOutput{} + req.Data = output + return +} + +// Retrieves log events from the specified log stream. You can provide an optional +// time range to filter the results on the event timestamp. +// +// By default, this operation returns as much log events as can fit in a response +// size of 1MB, up to 10,000 log events. The response will always include a +// nextForwardToken and a nextBackwardToken in the response body. You can use +// any of these tokens in subsequent GetLogEvents requests to paginate through +// events in either forward or backward direction. You can also limit the number +// of log events returned in the response by specifying the limit parameter +// in the request. +func (c *CloudWatchLogs) GetLogEvents(input *GetLogEventsInput) (*GetLogEventsOutput, error) { + req, out := c.GetLogEventsRequest(input) + err := req.Send() + return out, err +} + +// GetLogEventsPages iterates over the pages of a GetLogEvents operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See GetLogEvents method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a GetLogEvents operation. +// pageNum := 0 +// err := client.GetLogEventsPages(params, +// func(page *GetLogEventsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *CloudWatchLogs) GetLogEventsPages(input *GetLogEventsInput, fn func(p *GetLogEventsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.GetLogEventsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*GetLogEventsOutput), lastPage) + }) +} + +const opPutDestination = "PutDestination" + +// PutDestinationRequest generates a "aws/request.Request" representing the +// client's request for the PutDestination operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutDestination method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutDestinationRequest method. +// req, resp := client.PutDestinationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudWatchLogs) PutDestinationRequest(input *PutDestinationInput) (req *request.Request, output *PutDestinationOutput) { + op := &request.Operation{ + Name: opPutDestination, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutDestinationInput{} + } + + req = c.newRequest(op, input, output) + output = &PutDestinationOutput{} + req.Data = output + return +} + +// Creates or updates a Destination. A destination encapsulates a physical resource +// (such as a Kinesis stream) and allows you to subscribe to a real-time stream +// of log events of a different account, ingested through PutLogEvents requests. +// Currently, the only supported physical resource is a Amazon Kinesis stream +// belonging to the same account as the destination. +// +// A destination controls what is written to its Amazon Kinesis stream through +// an access policy. By default, PutDestination does not set any access policy +// with the destination, which means a cross-account user will not be able to +// call PutSubscriptionFilter against this destination. To enable that, the +// destination owner must call PutDestinationPolicy after PutDestination. +func (c *CloudWatchLogs) PutDestination(input *PutDestinationInput) (*PutDestinationOutput, error) { + req, out := c.PutDestinationRequest(input) + err := req.Send() + return out, err +} + +const opPutDestinationPolicy = "PutDestinationPolicy" + +// PutDestinationPolicyRequest generates a "aws/request.Request" representing the +// client's request for the PutDestinationPolicy operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutDestinationPolicy method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutDestinationPolicyRequest method. +// req, resp := client.PutDestinationPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudWatchLogs) PutDestinationPolicyRequest(input *PutDestinationPolicyInput) (req *request.Request, output *PutDestinationPolicyOutput) { + op := &request.Operation{ + Name: opPutDestinationPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutDestinationPolicyInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &PutDestinationPolicyOutput{} + req.Data = output + return +} + +// Creates or updates an access policy associated with an existing Destination. +// An access policy is an IAM policy document (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies_overview.html) +// that is used to authorize claims to register a subscription filter against +// a given destination. +func (c *CloudWatchLogs) PutDestinationPolicy(input *PutDestinationPolicyInput) (*PutDestinationPolicyOutput, error) { + req, out := c.PutDestinationPolicyRequest(input) + err := req.Send() + return out, err +} + +const opPutLogEvents = "PutLogEvents" + +// PutLogEventsRequest generates a "aws/request.Request" representing the +// client's request for the PutLogEvents operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutLogEvents method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutLogEventsRequest method. +// req, resp := client.PutLogEventsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudWatchLogs) PutLogEventsRequest(input *PutLogEventsInput) (req *request.Request, output *PutLogEventsOutput) { + op := &request.Operation{ + Name: opPutLogEvents, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutLogEventsInput{} + } + + req = c.newRequest(op, input, output) + output = &PutLogEventsOutput{} + req.Data = output + return +} + +// Uploads a batch of log events to the specified log stream. +// +// Every PutLogEvents request must include the sequenceToken obtained from +// the response of the previous request. An upload in a newly created log stream +// does not require a sequenceToken. +// +// The batch of events must satisfy the following constraints: The maximum +// batch size is 1,048,576 bytes, and this size is calculated as the sum of +// all event messages in UTF-8, plus 26 bytes for each log event. None of the +// log events in the batch can be more than 2 hours in the future. None of the +// log events in the batch can be older than 14 days or the retention period +// of the log group. The log events in the batch must be in chronological ordered +// by their timestamp. The maximum number of log events in a batch is 10,000. +// A batch of log events in a single PutLogEvents request cannot span more than +// 24 hours. Otherwise, the PutLogEvents operation will fail. +func (c *CloudWatchLogs) PutLogEvents(input *PutLogEventsInput) (*PutLogEventsOutput, error) { + req, out := c.PutLogEventsRequest(input) + err := req.Send() + return out, err +} + +const opPutMetricFilter = "PutMetricFilter" + +// PutMetricFilterRequest generates a "aws/request.Request" representing the +// client's request for the PutMetricFilter operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutMetricFilter method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutMetricFilterRequest method. +// req, resp := client.PutMetricFilterRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudWatchLogs) PutMetricFilterRequest(input *PutMetricFilterInput) (req *request.Request, output *PutMetricFilterOutput) { + op := &request.Operation{ + Name: opPutMetricFilter, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutMetricFilterInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &PutMetricFilterOutput{} + req.Data = output + return +} + +// Creates or updates a metric filter and associates it with the specified log +// group. Metric filters allow you to configure rules to extract metric data +// from log events ingested through PutLogEvents requests. +// +// The maximum number of metric filters that can be associated with a log +// group is 100. +func (c *CloudWatchLogs) PutMetricFilter(input *PutMetricFilterInput) (*PutMetricFilterOutput, error) { + req, out := c.PutMetricFilterRequest(input) + err := req.Send() + return out, err +} + +const opPutRetentionPolicy = "PutRetentionPolicy" + +// PutRetentionPolicyRequest generates a "aws/request.Request" representing the +// client's request for the PutRetentionPolicy operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutRetentionPolicy method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutRetentionPolicyRequest method. +// req, resp := client.PutRetentionPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudWatchLogs) PutRetentionPolicyRequest(input *PutRetentionPolicyInput) (req *request.Request, output *PutRetentionPolicyOutput) { + op := &request.Operation{ + Name: opPutRetentionPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutRetentionPolicyInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &PutRetentionPolicyOutput{} + req.Data = output + return +} + +// Sets the retention of the specified log group. A retention policy allows +// you to configure the number of days you want to retain log events in the +// specified log group. +func (c *CloudWatchLogs) PutRetentionPolicy(input *PutRetentionPolicyInput) (*PutRetentionPolicyOutput, error) { + req, out := c.PutRetentionPolicyRequest(input) + err := req.Send() + return out, err +} + +const opPutSubscriptionFilter = "PutSubscriptionFilter" + +// PutSubscriptionFilterRequest generates a "aws/request.Request" representing the +// client's request for the PutSubscriptionFilter operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutSubscriptionFilter method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutSubscriptionFilterRequest method. +// req, resp := client.PutSubscriptionFilterRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudWatchLogs) PutSubscriptionFilterRequest(input *PutSubscriptionFilterInput) (req *request.Request, output *PutSubscriptionFilterOutput) { + op := &request.Operation{ + Name: opPutSubscriptionFilter, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutSubscriptionFilterInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &PutSubscriptionFilterOutput{} + req.Data = output + return +} + +// Creates or updates a subscription filter and associates it with the specified +// log group. Subscription filters allow you to subscribe to a real-time stream +// of log events ingested through PutLogEvents requests and have them delivered +// to a specific destination. Currently, the supported destinations are: An +// Amazon Kinesis stream belonging to the same account as the subscription filter, +// for same-account delivery. A logical destination (used via an ARN of Destination) +// belonging to a different account, for cross-account delivery. An Amazon +// Kinesis Firehose stream belonging to the same account as the subscription +// filter, for same-account delivery. An AWS Lambda function belonging to +// the same account as the subscription filter, for same-account delivery. +// +// +// Currently there can only be one subscription filter associated with a log +// group. +func (c *CloudWatchLogs) PutSubscriptionFilter(input *PutSubscriptionFilterInput) (*PutSubscriptionFilterOutput, error) { + req, out := c.PutSubscriptionFilterRequest(input) + err := req.Send() + return out, err +} + +const opTestMetricFilter = "TestMetricFilter" + +// TestMetricFilterRequest generates a "aws/request.Request" representing the +// client's request for the TestMetricFilter operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the TestMetricFilter method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the TestMetricFilterRequest method. +// req, resp := client.TestMetricFilterRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudWatchLogs) TestMetricFilterRequest(input *TestMetricFilterInput) (req *request.Request, output *TestMetricFilterOutput) { + op := &request.Operation{ + Name: opTestMetricFilter, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &TestMetricFilterInput{} + } + + req = c.newRequest(op, input, output) + output = &TestMetricFilterOutput{} + req.Data = output + return +} + +// Tests the filter pattern of a metric filter against a sample of log event +// messages. You can use this operation to validate the correctness of a metric +// filter pattern. +func (c *CloudWatchLogs) TestMetricFilter(input *TestMetricFilterInput) (*TestMetricFilterOutput, error) { + req, out := c.TestMetricFilterRequest(input) + err := req.Send() + return out, err +} + +type CancelExportTaskInput struct { + _ struct{} `type:"structure"` + + // Id of the export task to cancel. + TaskId *string `locationName:"taskId" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CancelExportTaskInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CancelExportTaskInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CancelExportTaskInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CancelExportTaskInput"} + if s.TaskId == nil { + invalidParams.Add(request.NewErrParamRequired("TaskId")) + } + if s.TaskId != nil && len(*s.TaskId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TaskId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type CancelExportTaskOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s CancelExportTaskOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CancelExportTaskOutput) GoString() string { + return s.String() +} + +type CreateExportTaskInput struct { + _ struct{} `type:"structure"` + + // Name of Amazon S3 bucket to which the log data will be exported. + // + // Note: Only buckets in the same AWS region are supported. + Destination *string `locationName:"destination" min:"1" type:"string" required:"true"` + + // Prefix that will be used as the start of Amazon S3 key for every object exported. + // If not specified, this defaults to 'exportedlogs'. + DestinationPrefix *string `locationName:"destinationPrefix" type:"string"` + + // A point in time expressed as the number of milliseconds since Jan 1, 1970 + // 00:00:00 UTC. It indicates the start time of the range for the request. Events + // with a timestamp prior to this time will not be exported. + From *int64 `locationName:"from" type:"long" required:"true"` + + // The name of the log group to export. + LogGroupName *string `locationName:"logGroupName" min:"1" type:"string" required:"true"` + + // Will only export log streams that match the provided logStreamNamePrefix. + // If you don't specify a value, no prefix filter is applied. + LogStreamNamePrefix *string `locationName:"logStreamNamePrefix" min:"1" type:"string"` + + // The name of the export task. + TaskName *string `locationName:"taskName" min:"1" type:"string"` + + // A point in time expressed as the number of milliseconds since Jan 1, 1970 + // 00:00:00 UTC. It indicates the end time of the range for the request. Events + // with a timestamp later than this time will not be exported. + To *int64 `locationName:"to" type:"long" required:"true"` +} + +// String returns the string representation +func (s CreateExportTaskInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateExportTaskInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateExportTaskInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateExportTaskInput"} + if s.Destination == nil { + invalidParams.Add(request.NewErrParamRequired("Destination")) + } + if s.Destination != nil && len(*s.Destination) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Destination", 1)) + } + if s.From == nil { + invalidParams.Add(request.NewErrParamRequired("From")) + } + if s.LogGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("LogGroupName")) + } + if s.LogGroupName != nil && len(*s.LogGroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LogGroupName", 1)) + } + if s.LogStreamNamePrefix != nil && len(*s.LogStreamNamePrefix) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LogStreamNamePrefix", 1)) + } + if s.TaskName != nil && len(*s.TaskName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TaskName", 1)) + } + if s.To == nil { + invalidParams.Add(request.NewErrParamRequired("To")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type CreateExportTaskOutput struct { + _ struct{} `type:"structure"` + + // Id of the export task that got created. + TaskId *string `locationName:"taskId" min:"1" type:"string"` +} + +// String returns the string representation +func (s CreateExportTaskOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateExportTaskOutput) GoString() string { + return s.String() +} + +type CreateLogGroupInput struct { + _ struct{} `type:"structure"` + + // The name of the log group to create. + LogGroupName *string `locationName:"logGroupName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateLogGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateLogGroupInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateLogGroupInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateLogGroupInput"} + if s.LogGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("LogGroupName")) + } + if s.LogGroupName != nil && len(*s.LogGroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LogGroupName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type CreateLogGroupOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s CreateLogGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateLogGroupOutput) GoString() string { + return s.String() +} + +type CreateLogStreamInput struct { + _ struct{} `type:"structure"` + + // The name of the log group under which the log stream is to be created. + LogGroupName *string `locationName:"logGroupName" min:"1" type:"string" required:"true"` + + // The name of the log stream to create. + LogStreamName *string `locationName:"logStreamName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateLogStreamInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateLogStreamInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateLogStreamInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateLogStreamInput"} + if s.LogGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("LogGroupName")) + } + if s.LogGroupName != nil && len(*s.LogGroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LogGroupName", 1)) + } + if s.LogStreamName == nil { + invalidParams.Add(request.NewErrParamRequired("LogStreamName")) + } + if s.LogStreamName != nil && len(*s.LogStreamName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LogStreamName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type CreateLogStreamOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s CreateLogStreamOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateLogStreamOutput) GoString() string { + return s.String() +} + +type DeleteDestinationInput struct { + _ struct{} `type:"structure"` + + // The name of destination to delete. + DestinationName *string `locationName:"destinationName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteDestinationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteDestinationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteDestinationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteDestinationInput"} + if s.DestinationName == nil { + invalidParams.Add(request.NewErrParamRequired("DestinationName")) + } + if s.DestinationName != nil && len(*s.DestinationName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DestinationName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteDestinationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteDestinationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteDestinationOutput) GoString() string { + return s.String() +} + +type DeleteLogGroupInput struct { + _ struct{} `type:"structure"` + + // The name of the log group to delete. + LogGroupName *string `locationName:"logGroupName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteLogGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteLogGroupInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteLogGroupInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteLogGroupInput"} + if s.LogGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("LogGroupName")) + } + if s.LogGroupName != nil && len(*s.LogGroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LogGroupName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteLogGroupOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteLogGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteLogGroupOutput) GoString() string { + return s.String() +} + +type DeleteLogStreamInput struct { + _ struct{} `type:"structure"` + + // The name of the log group under which the log stream to delete belongs. + LogGroupName *string `locationName:"logGroupName" min:"1" type:"string" required:"true"` + + // The name of the log stream to delete. + LogStreamName *string `locationName:"logStreamName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteLogStreamInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteLogStreamInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteLogStreamInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteLogStreamInput"} + if s.LogGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("LogGroupName")) + } + if s.LogGroupName != nil && len(*s.LogGroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LogGroupName", 1)) + } + if s.LogStreamName == nil { + invalidParams.Add(request.NewErrParamRequired("LogStreamName")) + } + if s.LogStreamName != nil && len(*s.LogStreamName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LogStreamName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteLogStreamOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteLogStreamOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteLogStreamOutput) GoString() string { + return s.String() +} + +type DeleteMetricFilterInput struct { + _ struct{} `type:"structure"` + + // The name of the metric filter to delete. + FilterName *string `locationName:"filterName" min:"1" type:"string" required:"true"` + + // The name of the log group that is associated with the metric filter to delete. + LogGroupName *string `locationName:"logGroupName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteMetricFilterInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteMetricFilterInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteMetricFilterInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteMetricFilterInput"} + if s.FilterName == nil { + invalidParams.Add(request.NewErrParamRequired("FilterName")) + } + if s.FilterName != nil && len(*s.FilterName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("FilterName", 1)) + } + if s.LogGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("LogGroupName")) + } + if s.LogGroupName != nil && len(*s.LogGroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LogGroupName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteMetricFilterOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteMetricFilterOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteMetricFilterOutput) GoString() string { + return s.String() +} + +type DeleteRetentionPolicyInput struct { + _ struct{} `type:"structure"` + + // The name of the log group that is associated with the retention policy to + // delete. + LogGroupName *string `locationName:"logGroupName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteRetentionPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteRetentionPolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteRetentionPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteRetentionPolicyInput"} + if s.LogGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("LogGroupName")) + } + if s.LogGroupName != nil && len(*s.LogGroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LogGroupName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteRetentionPolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteRetentionPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteRetentionPolicyOutput) GoString() string { + return s.String() +} + +type DeleteSubscriptionFilterInput struct { + _ struct{} `type:"structure"` + + // The name of the subscription filter to delete. + FilterName *string `locationName:"filterName" min:"1" type:"string" required:"true"` + + // The name of the log group that is associated with the subscription filter + // to delete. + LogGroupName *string `locationName:"logGroupName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteSubscriptionFilterInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteSubscriptionFilterInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteSubscriptionFilterInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteSubscriptionFilterInput"} + if s.FilterName == nil { + invalidParams.Add(request.NewErrParamRequired("FilterName")) + } + if s.FilterName != nil && len(*s.FilterName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("FilterName", 1)) + } + if s.LogGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("LogGroupName")) + } + if s.LogGroupName != nil && len(*s.LogGroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LogGroupName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteSubscriptionFilterOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteSubscriptionFilterOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteSubscriptionFilterOutput) GoString() string { + return s.String() +} + +type DescribeDestinationsInput struct { + _ struct{} `type:"structure"` + + // Will only return destinations that match the provided destinationNamePrefix. + // If you don't specify a value, no prefix is applied. + DestinationNamePrefix *string `min:"1" type:"string"` + + // The maximum number of results to return. + Limit *int64 `locationName:"limit" min:"1" type:"integer"` + + // A string token used for pagination that points to the next page of results. + // It must be a value obtained from the response of the previous request. The + // token expires after 24 hours. + NextToken *string `locationName:"nextToken" min:"1" type:"string"` +} + +// String returns the string representation +func (s DescribeDestinationsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDestinationsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeDestinationsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeDestinationsInput"} + if s.DestinationNamePrefix != nil && len(*s.DestinationNamePrefix) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DestinationNamePrefix", 1)) + } + if s.Limit != nil && *s.Limit < 1 { + invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) + } + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DescribeDestinationsOutput struct { + _ struct{} `type:"structure"` + + Destinations []*Destination `locationName:"destinations" type:"list"` + + // A string token used for pagination that points to the next page of results. + // It must be a value obtained from the response of the previous request. The + // token expires after 24 hours. + NextToken *string `locationName:"nextToken" min:"1" type:"string"` +} + +// String returns the string representation +func (s DescribeDestinationsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDestinationsOutput) GoString() string { + return s.String() +} + +type DescribeExportTasksInput struct { + _ struct{} `type:"structure"` + + // The maximum number of items returned in the response. If you don't specify + // a value, the request would return up to 50 items. + Limit *int64 `locationName:"limit" min:"1" type:"integer"` + + // A string token used for pagination that points to the next page of results. + // It must be a value obtained from the response of the previous DescribeExportTasks + // request. + NextToken *string `locationName:"nextToken" min:"1" type:"string"` + + // All export tasks that matches the specified status code will be returned. + // This can return zero or more export tasks. + StatusCode *string `locationName:"statusCode" type:"string" enum:"ExportTaskStatusCode"` + + // Export task that matches the specified task Id will be returned. This can + // result in zero or one export task. + TaskId *string `locationName:"taskId" min:"1" type:"string"` +} + +// String returns the string representation +func (s DescribeExportTasksInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeExportTasksInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeExportTasksInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeExportTasksInput"} + if s.Limit != nil && *s.Limit < 1 { + invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) + } + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + if s.TaskId != nil && len(*s.TaskId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TaskId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DescribeExportTasksOutput struct { + _ struct{} `type:"structure"` + + // A list of export tasks. + ExportTasks []*ExportTask `locationName:"exportTasks" type:"list"` + + // A string token used for pagination that points to the next page of results. + // It must be a value obtained from the response of the previous request. The + // token expires after 24 hours. + NextToken *string `locationName:"nextToken" min:"1" type:"string"` +} + +// String returns the string representation +func (s DescribeExportTasksOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeExportTasksOutput) GoString() string { + return s.String() +} + +type DescribeLogGroupsInput struct { + _ struct{} `type:"structure"` + + // The maximum number of items returned in the response. If you don't specify + // a value, the request would return up to 50 items. + Limit *int64 `locationName:"limit" min:"1" type:"integer"` + + // Will only return log groups that match the provided logGroupNamePrefix. If + // you don't specify a value, no prefix filter is applied. + LogGroupNamePrefix *string `locationName:"logGroupNamePrefix" min:"1" type:"string"` + + // A string token used for pagination that points to the next page of results. + // It must be a value obtained from the response of the previous DescribeLogGroups + // request. + NextToken *string `locationName:"nextToken" min:"1" type:"string"` +} + +// String returns the string representation +func (s DescribeLogGroupsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeLogGroupsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeLogGroupsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeLogGroupsInput"} + if s.Limit != nil && *s.Limit < 1 { + invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) + } + if s.LogGroupNamePrefix != nil && len(*s.LogGroupNamePrefix) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LogGroupNamePrefix", 1)) + } + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DescribeLogGroupsOutput struct { + _ struct{} `type:"structure"` + + // A list of log groups. + LogGroups []*LogGroup `locationName:"logGroups" type:"list"` + + // A string token used for pagination that points to the next page of results. + // It must be a value obtained from the response of the previous request. The + // token expires after 24 hours. + NextToken *string `locationName:"nextToken" min:"1" type:"string"` +} + +// String returns the string representation +func (s DescribeLogGroupsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeLogGroupsOutput) GoString() string { + return s.String() +} + +type DescribeLogStreamsInput struct { + _ struct{} `type:"structure"` + + // If set to true, results are returned in descending order. If you don't specify + // a value or set it to false, results are returned in ascending order. + Descending *bool `locationName:"descending" type:"boolean"` + + // The maximum number of items returned in the response. If you don't specify + // a value, the request would return up to 50 items. + Limit *int64 `locationName:"limit" min:"1" type:"integer"` + + // The log group name for which log streams are to be listed. + LogGroupName *string `locationName:"logGroupName" min:"1" type:"string" required:"true"` + + // Will only return log streams that match the provided logStreamNamePrefix. + // If you don't specify a value, no prefix filter is applied. + LogStreamNamePrefix *string `locationName:"logStreamNamePrefix" min:"1" type:"string"` + + // A string token used for pagination that points to the next page of results. + // It must be a value obtained from the response of the previous DescribeLogStreams + // request. + NextToken *string `locationName:"nextToken" min:"1" type:"string"` + + // Specifies what to order the returned log streams by. Valid arguments are + // 'LogStreamName' or 'LastEventTime'. If you don't specify a value, results + // are ordered by LogStreamName. If 'LastEventTime' is chosen, the request cannot + // also contain a logStreamNamePrefix. + OrderBy *string `locationName:"orderBy" type:"string" enum:"OrderBy"` +} + +// String returns the string representation +func (s DescribeLogStreamsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeLogStreamsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeLogStreamsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeLogStreamsInput"} + if s.Limit != nil && *s.Limit < 1 { + invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) + } + if s.LogGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("LogGroupName")) + } + if s.LogGroupName != nil && len(*s.LogGroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LogGroupName", 1)) + } + if s.LogStreamNamePrefix != nil && len(*s.LogStreamNamePrefix) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LogStreamNamePrefix", 1)) + } + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DescribeLogStreamsOutput struct { + _ struct{} `type:"structure"` + + // A list of log streams. + LogStreams []*LogStream `locationName:"logStreams" type:"list"` + + // A string token used for pagination that points to the next page of results. + // It must be a value obtained from the response of the previous request. The + // token expires after 24 hours. + NextToken *string `locationName:"nextToken" min:"1" type:"string"` +} + +// String returns the string representation +func (s DescribeLogStreamsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeLogStreamsOutput) GoString() string { + return s.String() +} + +type DescribeMetricFiltersInput struct { + _ struct{} `type:"structure"` + + // Will only return metric filters that match the provided filterNamePrefix. + // If you don't specify a value, no prefix filter is applied. + FilterNamePrefix *string `locationName:"filterNamePrefix" min:"1" type:"string"` + + // The maximum number of items returned in the response. If you don't specify + // a value, the request would return up to 50 items. + Limit *int64 `locationName:"limit" min:"1" type:"integer"` + + // The log group name for which metric filters are to be listed. + LogGroupName *string `locationName:"logGroupName" min:"1" type:"string" required:"true"` + + // A string token used for pagination that points to the next page of results. + // It must be a value obtained from the response of the previous DescribeMetricFilters + // request. + NextToken *string `locationName:"nextToken" min:"1" type:"string"` +} + +// String returns the string representation +func (s DescribeMetricFiltersInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeMetricFiltersInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeMetricFiltersInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeMetricFiltersInput"} + if s.FilterNamePrefix != nil && len(*s.FilterNamePrefix) < 1 { + invalidParams.Add(request.NewErrParamMinLen("FilterNamePrefix", 1)) + } + if s.Limit != nil && *s.Limit < 1 { + invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) + } + if s.LogGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("LogGroupName")) + } + if s.LogGroupName != nil && len(*s.LogGroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LogGroupName", 1)) + } + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DescribeMetricFiltersOutput struct { + _ struct{} `type:"structure"` + + MetricFilters []*MetricFilter `locationName:"metricFilters" type:"list"` + + // A string token used for pagination that points to the next page of results. + // It must be a value obtained from the response of the previous request. The + // token expires after 24 hours. + NextToken *string `locationName:"nextToken" min:"1" type:"string"` +} + +// String returns the string representation +func (s DescribeMetricFiltersOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeMetricFiltersOutput) GoString() string { + return s.String() +} + +type DescribeSubscriptionFiltersInput struct { + _ struct{} `type:"structure"` + + // Will only return subscription filters that match the provided filterNamePrefix. + // If you don't specify a value, no prefix filter is applied. + FilterNamePrefix *string `locationName:"filterNamePrefix" min:"1" type:"string"` + + // The maximum number of results to return. + Limit *int64 `locationName:"limit" min:"1" type:"integer"` + + // The log group name for which subscription filters are to be listed. + LogGroupName *string `locationName:"logGroupName" min:"1" type:"string" required:"true"` + + // A string token used for pagination that points to the next page of results. + // It must be a value obtained from the response of the previous request. The + // token expires after 24 hours. + NextToken *string `locationName:"nextToken" min:"1" type:"string"` +} + +// String returns the string representation +func (s DescribeSubscriptionFiltersInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeSubscriptionFiltersInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeSubscriptionFiltersInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeSubscriptionFiltersInput"} + if s.FilterNamePrefix != nil && len(*s.FilterNamePrefix) < 1 { + invalidParams.Add(request.NewErrParamMinLen("FilterNamePrefix", 1)) + } + if s.Limit != nil && *s.Limit < 1 { + invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) + } + if s.LogGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("LogGroupName")) + } + if s.LogGroupName != nil && len(*s.LogGroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LogGroupName", 1)) + } + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DescribeSubscriptionFiltersOutput struct { + _ struct{} `type:"structure"` + + // A string token used for pagination that points to the next page of results. + // It must be a value obtained from the response of the previous request. The + // token expires after 24 hours. + NextToken *string `locationName:"nextToken" min:"1" type:"string"` + + SubscriptionFilters []*SubscriptionFilter `locationName:"subscriptionFilters" type:"list"` +} + +// String returns the string representation +func (s DescribeSubscriptionFiltersOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeSubscriptionFiltersOutput) GoString() string { + return s.String() +} + +// A cross account destination that is the recipient of subscription log events. +type Destination struct { + _ struct{} `type:"structure"` + + // An IAM policy document that governs which AWS accounts can create subscription + // filters against this destination. + AccessPolicy *string `locationName:"accessPolicy" min:"1" type:"string"` + + // ARN of this destination. + Arn *string `locationName:"arn" type:"string"` + + // A point in time expressed as the number of milliseconds since Jan 1, 1970 + // 00:00:00 UTC specifying when this destination was created. + CreationTime *int64 `locationName:"creationTime" type:"long"` + + // Name of the destination. + DestinationName *string `locationName:"destinationName" min:"1" type:"string"` + + // A role for impersonation for delivering log events to the target. + RoleArn *string `locationName:"roleArn" min:"1" type:"string"` + + // ARN of the physical target where the log events will be delivered (eg. ARN + // of a Kinesis stream). + TargetArn *string `locationName:"targetArn" min:"1" type:"string"` +} + +// String returns the string representation +func (s Destination) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Destination) GoString() string { + return s.String() +} + +// Represents an export task. +type ExportTask struct { + _ struct{} `type:"structure"` + + // Name of Amazon S3 bucket to which the log data was exported. + Destination *string `locationName:"destination" min:"1" type:"string"` + + // Prefix that was used as the start of Amazon S3 key for every object exported. + DestinationPrefix *string `locationName:"destinationPrefix" type:"string"` + + // Execution info about the export task. + ExecutionInfo *ExportTaskExecutionInfo `locationName:"executionInfo" type:"structure"` + + // A point in time expressed as the number of milliseconds since Jan 1, 1970 + // 00:00:00 UTC. Events with a timestamp prior to this time are not exported. + From *int64 `locationName:"from" type:"long"` + + // The name of the log group from which logs data was exported. + LogGroupName *string `locationName:"logGroupName" min:"1" type:"string"` + + // Status of the export task. + Status *ExportTaskStatus `locationName:"status" type:"structure"` + + // Id of the export task. + TaskId *string `locationName:"taskId" min:"1" type:"string"` + + // The name of the export task. + TaskName *string `locationName:"taskName" min:"1" type:"string"` + + // A point in time expressed as the number of milliseconds since Jan 1, 1970 + // 00:00:00 UTC. Events with a timestamp later than this time are not exported. + To *int64 `locationName:"to" type:"long"` +} + +// String returns the string representation +func (s ExportTask) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ExportTask) GoString() string { + return s.String() +} + +// Represents the status of an export task. +type ExportTaskExecutionInfo struct { + _ struct{} `type:"structure"` + + // A point in time when the export task got completed. + CompletionTime *int64 `locationName:"completionTime" type:"long"` + + // A point in time when the export task got created. + CreationTime *int64 `locationName:"creationTime" type:"long"` +} + +// String returns the string representation +func (s ExportTaskExecutionInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ExportTaskExecutionInfo) GoString() string { + return s.String() +} + +// Represents the status of an export task. +type ExportTaskStatus struct { + _ struct{} `type:"structure"` + + // Status code of the export task. + Code *string `locationName:"code" type:"string" enum:"ExportTaskStatusCode"` + + // Status message related to the code. + Message *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s ExportTaskStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ExportTaskStatus) GoString() string { + return s.String() +} + +type FilterLogEventsInput struct { + _ struct{} `type:"structure"` + + // A point in time expressed as the number of milliseconds since Jan 1, 1970 + // 00:00:00 UTC. If provided, events with a timestamp later than this time are + // not returned. + EndTime *int64 `locationName:"endTime" type:"long"` + + // A valid CloudWatch Logs filter pattern to use for filtering the response. + // If not provided, all the events are matched. + FilterPattern *string `locationName:"filterPattern" type:"string"` + + // If provided, the API will make a best effort to provide responses that contain + // events from multiple log streams within the log group interleaved in a single + // response. If not provided, all the matched log events in the first log stream + // will be searched first, then those in the next log stream, etc. + Interleaved *bool `locationName:"interleaved" type:"boolean"` + + // The maximum number of events to return in a page of results. Default is 10,000 + // events. + Limit *int64 `locationName:"limit" min:"1" type:"integer"` + + // The name of the log group to query. + LogGroupName *string `locationName:"logGroupName" min:"1" type:"string" required:"true"` + + // Optional list of log stream names within the specified log group to search. + // Defaults to all the log streams in the log group. + LogStreamNames []*string `locationName:"logStreamNames" min:"1" type:"list"` + + // A pagination token obtained from a FilterLogEvents response to continue paginating + // the FilterLogEvents results. This token is omitted from the response when + // there are no other events to display. + NextToken *string `locationName:"nextToken" min:"1" type:"string"` + + // A point in time expressed as the number of milliseconds since Jan 1, 1970 + // 00:00:00 UTC. If provided, events with a timestamp prior to this time are + // not returned. + StartTime *int64 `locationName:"startTime" type:"long"` +} + +// String returns the string representation +func (s FilterLogEventsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FilterLogEventsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *FilterLogEventsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "FilterLogEventsInput"} + if s.Limit != nil && *s.Limit < 1 { + invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) + } + if s.LogGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("LogGroupName")) + } + if s.LogGroupName != nil && len(*s.LogGroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LogGroupName", 1)) + } + if s.LogStreamNames != nil && len(s.LogStreamNames) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LogStreamNames", 1)) + } + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type FilterLogEventsOutput struct { + _ struct{} `type:"structure"` + + // A list of FilteredLogEvent objects representing the matched events from the + // request. + Events []*FilteredLogEvent `locationName:"events" type:"list"` + + // A pagination token obtained from a FilterLogEvents response to continue paginating + // the FilterLogEvents results. This token is omitted from the response when + // there are no other events to display. + NextToken *string `locationName:"nextToken" min:"1" type:"string"` + + // A list of SearchedLogStream objects indicating which log streams have been + // searched in this request and whether each has been searched completely or + // still has more to be paginated. + SearchedLogStreams []*SearchedLogStream `locationName:"searchedLogStreams" type:"list"` +} + +// String returns the string representation +func (s FilterLogEventsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FilterLogEventsOutput) GoString() string { + return s.String() +} + +// Represents a matched event from a FilterLogEvents request. +type FilteredLogEvent struct { + _ struct{} `type:"structure"` + + // A unique identifier for this event. + EventId *string `locationName:"eventId" type:"string"` + + // A point in time expressed as the number of milliseconds since Jan 1, 1970 + // 00:00:00 UTC. + IngestionTime *int64 `locationName:"ingestionTime" type:"long"` + + // The name of the log stream this event belongs to. + LogStreamName *string `locationName:"logStreamName" min:"1" type:"string"` + + // The data contained in the log event. + Message *string `locationName:"message" min:"1" type:"string"` + + // A point in time expressed as the number of milliseconds since Jan 1, 1970 + // 00:00:00 UTC. + Timestamp *int64 `locationName:"timestamp" type:"long"` +} + +// String returns the string representation +func (s FilteredLogEvent) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FilteredLogEvent) GoString() string { + return s.String() +} + +type GetLogEventsInput struct { + _ struct{} `type:"structure"` + + // A point in time expressed as the number of milliseconds since Jan 1, 1970 + // 00:00:00 UTC. + EndTime *int64 `locationName:"endTime" type:"long"` + + // The maximum number of log events returned in the response. If you don't specify + // a value, the request would return as many log events as can fit in a response + // size of 1MB, up to 10,000 log events. + Limit *int64 `locationName:"limit" min:"1" type:"integer"` + + // The name of the log group to query. + LogGroupName *string `locationName:"logGroupName" min:"1" type:"string" required:"true"` + + // The name of the log stream to query. + LogStreamName *string `locationName:"logStreamName" min:"1" type:"string" required:"true"` + + // A string token used for pagination that points to the next page of results. + // It must be a value obtained from the nextForwardToken or nextBackwardToken + // fields in the response of the previous GetLogEvents request. + NextToken *string `locationName:"nextToken" min:"1" type:"string"` + + // If set to true, the earliest log events would be returned first. The default + // is false (the latest log events are returned first). + StartFromHead *bool `locationName:"startFromHead" type:"boolean"` + + // A point in time expressed as the number of milliseconds since Jan 1, 1970 + // 00:00:00 UTC. + StartTime *int64 `locationName:"startTime" type:"long"` +} + +// String returns the string representation +func (s GetLogEventsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetLogEventsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetLogEventsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetLogEventsInput"} + if s.Limit != nil && *s.Limit < 1 { + invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) + } + if s.LogGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("LogGroupName")) + } + if s.LogGroupName != nil && len(*s.LogGroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LogGroupName", 1)) + } + if s.LogStreamName == nil { + invalidParams.Add(request.NewErrParamRequired("LogStreamName")) + } + if s.LogStreamName != nil && len(*s.LogStreamName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LogStreamName", 1)) + } + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type GetLogEventsOutput struct { + _ struct{} `type:"structure"` + + Events []*OutputLogEvent `locationName:"events" type:"list"` + + // A string token used for pagination that points to the next page of results. + // It must be a value obtained from the response of the previous request. The + // token expires after 24 hours. + NextBackwardToken *string `locationName:"nextBackwardToken" min:"1" type:"string"` + + // A string token used for pagination that points to the next page of results. + // It must be a value obtained from the response of the previous request. The + // token expires after 24 hours. + NextForwardToken *string `locationName:"nextForwardToken" min:"1" type:"string"` +} + +// String returns the string representation +func (s GetLogEventsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetLogEventsOutput) GoString() string { + return s.String() +} + +// A log event is a record of some activity that was recorded by the application +// or resource being monitored. The log event record that CloudWatch Logs understands +// contains two properties: the timestamp of when the event occurred, and the +// raw event message. +type InputLogEvent struct { + _ struct{} `type:"structure"` + + Message *string `locationName:"message" min:"1" type:"string" required:"true"` + + // A point in time expressed as the number of milliseconds since Jan 1, 1970 + // 00:00:00 UTC. + Timestamp *int64 `locationName:"timestamp" type:"long" required:"true"` +} + +// String returns the string representation +func (s InputLogEvent) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InputLogEvent) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *InputLogEvent) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "InputLogEvent"} + if s.Message == nil { + invalidParams.Add(request.NewErrParamRequired("Message")) + } + if s.Message != nil && len(*s.Message) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Message", 1)) + } + if s.Timestamp == nil { + invalidParams.Add(request.NewErrParamRequired("Timestamp")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type LogGroup struct { + _ struct{} `type:"structure"` + + Arn *string `locationName:"arn" type:"string"` + + // A point in time expressed as the number of milliseconds since Jan 1, 1970 + // 00:00:00 UTC. + CreationTime *int64 `locationName:"creationTime" type:"long"` + + LogGroupName *string `locationName:"logGroupName" min:"1" type:"string"` + + // The number of metric filters associated with the log group. + MetricFilterCount *int64 `locationName:"metricFilterCount" type:"integer"` + + // Specifies the number of days you want to retain log events in the specified + // log group. Possible values are: 1, 3, 5, 7, 14, 30, 60, 90, 120, 150, 180, + // 365, 400, 545, 731, 1827, 3653. + RetentionInDays *int64 `locationName:"retentionInDays" type:"integer"` + + StoredBytes *int64 `locationName:"storedBytes" type:"long"` +} + +// String returns the string representation +func (s LogGroup) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LogGroup) GoString() string { + return s.String() +} + +// A log stream is sequence of log events from a single emitter of logs. +type LogStream struct { + _ struct{} `type:"structure"` + + Arn *string `locationName:"arn" type:"string"` + + // A point in time expressed as the number of milliseconds since Jan 1, 1970 + // 00:00:00 UTC. + CreationTime *int64 `locationName:"creationTime" type:"long"` + + // A point in time expressed as the number of milliseconds since Jan 1, 1970 + // 00:00:00 UTC. + FirstEventTimestamp *int64 `locationName:"firstEventTimestamp" type:"long"` + + // A point in time expressed as the number of milliseconds since Jan 1, 1970 + // 00:00:00 UTC. + LastEventTimestamp *int64 `locationName:"lastEventTimestamp" type:"long"` + + // A point in time expressed as the number of milliseconds since Jan 1, 1970 + // 00:00:00 UTC. + LastIngestionTime *int64 `locationName:"lastIngestionTime" type:"long"` + + LogStreamName *string `locationName:"logStreamName" min:"1" type:"string"` + + StoredBytes *int64 `locationName:"storedBytes" type:"long"` + + // A string token used for making PutLogEvents requests. A sequenceToken can + // only be used once, and PutLogEvents requests must include the sequenceToken + // obtained from the response of the previous request. + UploadSequenceToken *string `locationName:"uploadSequenceToken" min:"1" type:"string"` +} + +// String returns the string representation +func (s LogStream) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LogStream) GoString() string { + return s.String() +} + +// Metric filters can be used to express how CloudWatch Logs would extract metric +// observations from ingested log events and transform them to metric data in +// a CloudWatch metric. +type MetricFilter struct { + _ struct{} `type:"structure"` + + // A point in time expressed as the number of milliseconds since Jan 1, 1970 + // 00:00:00 UTC. + CreationTime *int64 `locationName:"creationTime" type:"long"` + + // A name for a metric or subscription filter. + FilterName *string `locationName:"filterName" min:"1" type:"string"` + + // A symbolic description of how CloudWatch Logs should interpret the data in + // each log event. For example, a log event may contain timestamps, IP addresses, + // strings, and so on. You use the filter pattern to specify what to look for + // in the log event message. + FilterPattern *string `locationName:"filterPattern" type:"string"` + + MetricTransformations []*MetricTransformation `locationName:"metricTransformations" min:"1" type:"list"` +} + +// String returns the string representation +func (s MetricFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MetricFilter) GoString() string { + return s.String() +} + +type MetricFilterMatchRecord struct { + _ struct{} `type:"structure"` + + EventMessage *string `locationName:"eventMessage" min:"1" type:"string"` + + EventNumber *int64 `locationName:"eventNumber" type:"long"` + + ExtractedValues map[string]*string `locationName:"extractedValues" type:"map"` +} + +// String returns the string representation +func (s MetricFilterMatchRecord) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MetricFilterMatchRecord) GoString() string { + return s.String() +} + +type MetricTransformation struct { + _ struct{} `type:"structure"` + + // The name of the CloudWatch metric to which the monitored log information + // should be published. For example, you may publish to a metric called ErrorCount. + MetricName *string `locationName:"metricName" type:"string" required:"true"` + + // The destination namespace of the new CloudWatch metric. + MetricNamespace *string `locationName:"metricNamespace" type:"string" required:"true"` + + // What to publish to the metric. For example, if you're counting the occurrences + // of a particular term like "Error", the value will be "1" for each occurrence. + // If you're counting the bytes transferred the published value will be the + // value in the log event. + MetricValue *string `locationName:"metricValue" type:"string" required:"true"` +} + +// String returns the string representation +func (s MetricTransformation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MetricTransformation) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *MetricTransformation) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "MetricTransformation"} + if s.MetricName == nil { + invalidParams.Add(request.NewErrParamRequired("MetricName")) + } + if s.MetricNamespace == nil { + invalidParams.Add(request.NewErrParamRequired("MetricNamespace")) + } + if s.MetricValue == nil { + invalidParams.Add(request.NewErrParamRequired("MetricValue")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type OutputLogEvent struct { + _ struct{} `type:"structure"` + + // A point in time expressed as the number of milliseconds since Jan 1, 1970 + // 00:00:00 UTC. + IngestionTime *int64 `locationName:"ingestionTime" type:"long"` + + Message *string `locationName:"message" min:"1" type:"string"` + + // A point in time expressed as the number of milliseconds since Jan 1, 1970 + // 00:00:00 UTC. + Timestamp *int64 `locationName:"timestamp" type:"long"` +} + +// String returns the string representation +func (s OutputLogEvent) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s OutputLogEvent) GoString() string { + return s.String() +} + +type PutDestinationInput struct { + _ struct{} `type:"structure"` + + // A name for the destination. + DestinationName *string `locationName:"destinationName" min:"1" type:"string" required:"true"` + + // The ARN of an IAM role that grants CloudWatch Logs permissions to do Amazon + // Kinesis PutRecord requests on the desitnation stream. + RoleArn *string `locationName:"roleArn" min:"1" type:"string" required:"true"` + + // The ARN of an Amazon Kinesis stream to deliver matching log events to. + TargetArn *string `locationName:"targetArn" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s PutDestinationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutDestinationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutDestinationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutDestinationInput"} + if s.DestinationName == nil { + invalidParams.Add(request.NewErrParamRequired("DestinationName")) + } + if s.DestinationName != nil && len(*s.DestinationName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DestinationName", 1)) + } + if s.RoleArn == nil { + invalidParams.Add(request.NewErrParamRequired("RoleArn")) + } + if s.RoleArn != nil && len(*s.RoleArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RoleArn", 1)) + } + if s.TargetArn == nil { + invalidParams.Add(request.NewErrParamRequired("TargetArn")) + } + if s.TargetArn != nil && len(*s.TargetArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TargetArn", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type PutDestinationOutput struct { + _ struct{} `type:"structure"` + + // A cross account destination that is the recipient of subscription log events. + Destination *Destination `locationName:"destination" type:"structure"` +} + +// String returns the string representation +func (s PutDestinationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutDestinationOutput) GoString() string { + return s.String() +} + +type PutDestinationPolicyInput struct { + _ struct{} `type:"structure"` + + // An IAM policy document that authorizes cross-account users to deliver their + // log events to associated destination. + AccessPolicy *string `locationName:"accessPolicy" min:"1" type:"string" required:"true"` + + // A name for an existing destination. + DestinationName *string `locationName:"destinationName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s PutDestinationPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutDestinationPolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutDestinationPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutDestinationPolicyInput"} + if s.AccessPolicy == nil { + invalidParams.Add(request.NewErrParamRequired("AccessPolicy")) + } + if s.AccessPolicy != nil && len(*s.AccessPolicy) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AccessPolicy", 1)) + } + if s.DestinationName == nil { + invalidParams.Add(request.NewErrParamRequired("DestinationName")) + } + if s.DestinationName != nil && len(*s.DestinationName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DestinationName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type PutDestinationPolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutDestinationPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutDestinationPolicyOutput) GoString() string { + return s.String() +} + +type PutLogEventsInput struct { + _ struct{} `type:"structure"` + + // A list of log events belonging to a log stream. + LogEvents []*InputLogEvent `locationName:"logEvents" min:"1" type:"list" required:"true"` + + // The name of the log group to put log events to. + LogGroupName *string `locationName:"logGroupName" min:"1" type:"string" required:"true"` + + // The name of the log stream to put log events to. + LogStreamName *string `locationName:"logStreamName" min:"1" type:"string" required:"true"` + + // A string token that must be obtained from the response of the previous PutLogEvents + // request. + SequenceToken *string `locationName:"sequenceToken" min:"1" type:"string"` +} + +// String returns the string representation +func (s PutLogEventsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutLogEventsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutLogEventsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutLogEventsInput"} + if s.LogEvents == nil { + invalidParams.Add(request.NewErrParamRequired("LogEvents")) + } + if s.LogEvents != nil && len(s.LogEvents) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LogEvents", 1)) + } + if s.LogGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("LogGroupName")) + } + if s.LogGroupName != nil && len(*s.LogGroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LogGroupName", 1)) + } + if s.LogStreamName == nil { + invalidParams.Add(request.NewErrParamRequired("LogStreamName")) + } + if s.LogStreamName != nil && len(*s.LogStreamName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LogStreamName", 1)) + } + if s.SequenceToken != nil && len(*s.SequenceToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("SequenceToken", 1)) + } + if s.LogEvents != nil { + for i, v := range s.LogEvents { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "LogEvents", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type PutLogEventsOutput struct { + _ struct{} `type:"structure"` + + // A string token used for making PutLogEvents requests. A sequenceToken can + // only be used once, and PutLogEvents requests must include the sequenceToken + // obtained from the response of the previous request. + NextSequenceToken *string `locationName:"nextSequenceToken" min:"1" type:"string"` + + RejectedLogEventsInfo *RejectedLogEventsInfo `locationName:"rejectedLogEventsInfo" type:"structure"` +} + +// String returns the string representation +func (s PutLogEventsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutLogEventsOutput) GoString() string { + return s.String() +} + +type PutMetricFilterInput struct { + _ struct{} `type:"structure"` + + // A name for the metric filter. + FilterName *string `locationName:"filterName" min:"1" type:"string" required:"true"` + + // A valid CloudWatch Logs filter pattern for extracting metric data out of + // ingested log events. + FilterPattern *string `locationName:"filterPattern" type:"string" required:"true"` + + // The name of the log group to associate the metric filter with. + LogGroupName *string `locationName:"logGroupName" min:"1" type:"string" required:"true"` + + // A collection of information needed to define how metric data gets emitted. + MetricTransformations []*MetricTransformation `locationName:"metricTransformations" min:"1" type:"list" required:"true"` +} + +// String returns the string representation +func (s PutMetricFilterInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutMetricFilterInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutMetricFilterInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutMetricFilterInput"} + if s.FilterName == nil { + invalidParams.Add(request.NewErrParamRequired("FilterName")) + } + if s.FilterName != nil && len(*s.FilterName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("FilterName", 1)) + } + if s.FilterPattern == nil { + invalidParams.Add(request.NewErrParamRequired("FilterPattern")) + } + if s.LogGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("LogGroupName")) + } + if s.LogGroupName != nil && len(*s.LogGroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LogGroupName", 1)) + } + if s.MetricTransformations == nil { + invalidParams.Add(request.NewErrParamRequired("MetricTransformations")) + } + if s.MetricTransformations != nil && len(s.MetricTransformations) < 1 { + invalidParams.Add(request.NewErrParamMinLen("MetricTransformations", 1)) + } + if s.MetricTransformations != nil { + for i, v := range s.MetricTransformations { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "MetricTransformations", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type PutMetricFilterOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutMetricFilterOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutMetricFilterOutput) GoString() string { + return s.String() +} + +type PutRetentionPolicyInput struct { + _ struct{} `type:"structure"` + + // The name of the log group to associate the retention policy with. + LogGroupName *string `locationName:"logGroupName" min:"1" type:"string" required:"true"` + + // Specifies the number of days you want to retain log events in the specified + // log group. Possible values are: 1, 3, 5, 7, 14, 30, 60, 90, 120, 150, 180, + // 365, 400, 545, 731, 1827, 3653. + RetentionInDays *int64 `locationName:"retentionInDays" type:"integer" required:"true"` +} + +// String returns the string representation +func (s PutRetentionPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutRetentionPolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutRetentionPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutRetentionPolicyInput"} + if s.LogGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("LogGroupName")) + } + if s.LogGroupName != nil && len(*s.LogGroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LogGroupName", 1)) + } + if s.RetentionInDays == nil { + invalidParams.Add(request.NewErrParamRequired("RetentionInDays")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type PutRetentionPolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutRetentionPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutRetentionPolicyOutput) GoString() string { + return s.String() +} + +type PutSubscriptionFilterInput struct { + _ struct{} `type:"structure"` + + // The ARN of the destination to deliver matching log events to. Currently, + // the supported destinations are: An Amazon Kinesis stream belonging to the + // same account as the subscription filter, for same-account delivery. A logical + // destination (used via an ARN of Destination) belonging to a different account, + // for cross-account delivery. An Amazon Kinesis Firehose stream belonging + // to the same account as the subscription filter, for same-account delivery. + // An AWS Lambda function belonging to the same account as the subscription + // filter, for same-account delivery. + DestinationArn *string `locationName:"destinationArn" min:"1" type:"string" required:"true"` + + // A name for the subscription filter. + FilterName *string `locationName:"filterName" min:"1" type:"string" required:"true"` + + // A valid CloudWatch Logs filter pattern for subscribing to a filtered stream + // of log events. + FilterPattern *string `locationName:"filterPattern" type:"string" required:"true"` + + // The name of the log group to associate the subscription filter with. + LogGroupName *string `locationName:"logGroupName" min:"1" type:"string" required:"true"` + + // The ARN of an IAM role that grants CloudWatch Logs permissions to deliver + // ingested log events to the destination stream. You don't need to provide + // the ARN when you are working with a logical destination (used via an ARN + // of Destination) for cross-account delivery. + RoleArn *string `locationName:"roleArn" min:"1" type:"string"` +} + +// String returns the string representation +func (s PutSubscriptionFilterInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutSubscriptionFilterInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutSubscriptionFilterInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutSubscriptionFilterInput"} + if s.DestinationArn == nil { + invalidParams.Add(request.NewErrParamRequired("DestinationArn")) + } + if s.DestinationArn != nil && len(*s.DestinationArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DestinationArn", 1)) + } + if s.FilterName == nil { + invalidParams.Add(request.NewErrParamRequired("FilterName")) + } + if s.FilterName != nil && len(*s.FilterName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("FilterName", 1)) + } + if s.FilterPattern == nil { + invalidParams.Add(request.NewErrParamRequired("FilterPattern")) + } + if s.LogGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("LogGroupName")) + } + if s.LogGroupName != nil && len(*s.LogGroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LogGroupName", 1)) + } + if s.RoleArn != nil && len(*s.RoleArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RoleArn", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type PutSubscriptionFilterOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutSubscriptionFilterOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutSubscriptionFilterOutput) GoString() string { + return s.String() +} + +type RejectedLogEventsInfo struct { + _ struct{} `type:"structure"` + + ExpiredLogEventEndIndex *int64 `locationName:"expiredLogEventEndIndex" type:"integer"` + + TooNewLogEventStartIndex *int64 `locationName:"tooNewLogEventStartIndex" type:"integer"` + + TooOldLogEventEndIndex *int64 `locationName:"tooOldLogEventEndIndex" type:"integer"` +} + +// String returns the string representation +func (s RejectedLogEventsInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RejectedLogEventsInfo) GoString() string { + return s.String() +} + +// An object indicating the search status of a log stream in a FilterLogEvents +// request. +type SearchedLogStream struct { + _ struct{} `type:"structure"` + + // The name of the log stream. + LogStreamName *string `locationName:"logStreamName" min:"1" type:"string"` + + // Indicates whether all the events in this log stream were searched or more + // data exists to search by paginating further. + SearchedCompletely *bool `locationName:"searchedCompletely" type:"boolean"` +} + +// String returns the string representation +func (s SearchedLogStream) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SearchedLogStream) GoString() string { + return s.String() +} + +type SubscriptionFilter struct { + _ struct{} `type:"structure"` + + // A point in time expressed as the number of milliseconds since Jan 1, 1970 + // 00:00:00 UTC. + CreationTime *int64 `locationName:"creationTime" type:"long"` + + DestinationArn *string `locationName:"destinationArn" min:"1" type:"string"` + + // A name for a metric or subscription filter. + FilterName *string `locationName:"filterName" min:"1" type:"string"` + + // A symbolic description of how CloudWatch Logs should interpret the data in + // each log event. For example, a log event may contain timestamps, IP addresses, + // strings, and so on. You use the filter pattern to specify what to look for + // in the log event message. + FilterPattern *string `locationName:"filterPattern" type:"string"` + + LogGroupName *string `locationName:"logGroupName" min:"1" type:"string"` + + RoleArn *string `locationName:"roleArn" min:"1" type:"string"` +} + +// String returns the string representation +func (s SubscriptionFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SubscriptionFilter) GoString() string { + return s.String() +} + +type TestMetricFilterInput struct { + _ struct{} `type:"structure"` + + // A symbolic description of how CloudWatch Logs should interpret the data in + // each log event. For example, a log event may contain timestamps, IP addresses, + // strings, and so on. You use the filter pattern to specify what to look for + // in the log event message. + FilterPattern *string `locationName:"filterPattern" type:"string" required:"true"` + + // A list of log event messages to test. + LogEventMessages []*string `locationName:"logEventMessages" min:"1" type:"list" required:"true"` +} + +// String returns the string representation +func (s TestMetricFilterInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TestMetricFilterInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TestMetricFilterInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TestMetricFilterInput"} + if s.FilterPattern == nil { + invalidParams.Add(request.NewErrParamRequired("FilterPattern")) + } + if s.LogEventMessages == nil { + invalidParams.Add(request.NewErrParamRequired("LogEventMessages")) + } + if s.LogEventMessages != nil && len(s.LogEventMessages) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LogEventMessages", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type TestMetricFilterOutput struct { + _ struct{} `type:"structure"` + + Matches []*MetricFilterMatchRecord `locationName:"matches" type:"list"` +} + +// String returns the string representation +func (s TestMetricFilterOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TestMetricFilterOutput) GoString() string { + return s.String() +} + +const ( + // @enum ExportTaskStatusCode + ExportTaskStatusCodeCancelled = "CANCELLED" + // @enum ExportTaskStatusCode + ExportTaskStatusCodeCompleted = "COMPLETED" + // @enum ExportTaskStatusCode + ExportTaskStatusCodeFailed = "FAILED" + // @enum ExportTaskStatusCode + ExportTaskStatusCodePending = "PENDING" + // @enum ExportTaskStatusCode + ExportTaskStatusCodePendingCancel = "PENDING_CANCEL" + // @enum ExportTaskStatusCode + ExportTaskStatusCodeRunning = "RUNNING" +) + +const ( + // @enum OrderBy + OrderByLogStreamName = "LogStreamName" + // @enum OrderBy + OrderByLastEventTime = "LastEventTime" +) diff --git a/vendor/github.com/aws/aws-sdk-go/service/cloudwatchlogs/cloudwatchlogsiface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/cloudwatchlogs/cloudwatchlogsiface/interface.go new file mode 100644 index 000000000..c4d87b795 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/cloudwatchlogs/cloudwatchlogsiface/interface.go @@ -0,0 +1,128 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package cloudwatchlogsiface provides an interface for the Amazon CloudWatch Logs. +package cloudwatchlogsiface + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/cloudwatchlogs" +) + +// CloudWatchLogsAPI is the interface type for cloudwatchlogs.CloudWatchLogs. +type CloudWatchLogsAPI interface { + CancelExportTaskRequest(*cloudwatchlogs.CancelExportTaskInput) (*request.Request, *cloudwatchlogs.CancelExportTaskOutput) + + CancelExportTask(*cloudwatchlogs.CancelExportTaskInput) (*cloudwatchlogs.CancelExportTaskOutput, error) + + CreateExportTaskRequest(*cloudwatchlogs.CreateExportTaskInput) (*request.Request, *cloudwatchlogs.CreateExportTaskOutput) + + CreateExportTask(*cloudwatchlogs.CreateExportTaskInput) (*cloudwatchlogs.CreateExportTaskOutput, error) + + CreateLogGroupRequest(*cloudwatchlogs.CreateLogGroupInput) (*request.Request, *cloudwatchlogs.CreateLogGroupOutput) + + CreateLogGroup(*cloudwatchlogs.CreateLogGroupInput) (*cloudwatchlogs.CreateLogGroupOutput, error) + + CreateLogStreamRequest(*cloudwatchlogs.CreateLogStreamInput) (*request.Request, *cloudwatchlogs.CreateLogStreamOutput) + + CreateLogStream(*cloudwatchlogs.CreateLogStreamInput) (*cloudwatchlogs.CreateLogStreamOutput, error) + + DeleteDestinationRequest(*cloudwatchlogs.DeleteDestinationInput) (*request.Request, *cloudwatchlogs.DeleteDestinationOutput) + + DeleteDestination(*cloudwatchlogs.DeleteDestinationInput) (*cloudwatchlogs.DeleteDestinationOutput, error) + + DeleteLogGroupRequest(*cloudwatchlogs.DeleteLogGroupInput) (*request.Request, *cloudwatchlogs.DeleteLogGroupOutput) + + DeleteLogGroup(*cloudwatchlogs.DeleteLogGroupInput) (*cloudwatchlogs.DeleteLogGroupOutput, error) + + DeleteLogStreamRequest(*cloudwatchlogs.DeleteLogStreamInput) (*request.Request, *cloudwatchlogs.DeleteLogStreamOutput) + + DeleteLogStream(*cloudwatchlogs.DeleteLogStreamInput) (*cloudwatchlogs.DeleteLogStreamOutput, error) + + DeleteMetricFilterRequest(*cloudwatchlogs.DeleteMetricFilterInput) (*request.Request, *cloudwatchlogs.DeleteMetricFilterOutput) + + DeleteMetricFilter(*cloudwatchlogs.DeleteMetricFilterInput) (*cloudwatchlogs.DeleteMetricFilterOutput, error) + + DeleteRetentionPolicyRequest(*cloudwatchlogs.DeleteRetentionPolicyInput) (*request.Request, *cloudwatchlogs.DeleteRetentionPolicyOutput) + + DeleteRetentionPolicy(*cloudwatchlogs.DeleteRetentionPolicyInput) (*cloudwatchlogs.DeleteRetentionPolicyOutput, error) + + DeleteSubscriptionFilterRequest(*cloudwatchlogs.DeleteSubscriptionFilterInput) (*request.Request, *cloudwatchlogs.DeleteSubscriptionFilterOutput) + + DeleteSubscriptionFilter(*cloudwatchlogs.DeleteSubscriptionFilterInput) (*cloudwatchlogs.DeleteSubscriptionFilterOutput, error) + + DescribeDestinationsRequest(*cloudwatchlogs.DescribeDestinationsInput) (*request.Request, *cloudwatchlogs.DescribeDestinationsOutput) + + DescribeDestinations(*cloudwatchlogs.DescribeDestinationsInput) (*cloudwatchlogs.DescribeDestinationsOutput, error) + + DescribeDestinationsPages(*cloudwatchlogs.DescribeDestinationsInput, func(*cloudwatchlogs.DescribeDestinationsOutput, bool) bool) error + + DescribeExportTasksRequest(*cloudwatchlogs.DescribeExportTasksInput) (*request.Request, *cloudwatchlogs.DescribeExportTasksOutput) + + DescribeExportTasks(*cloudwatchlogs.DescribeExportTasksInput) (*cloudwatchlogs.DescribeExportTasksOutput, error) + + DescribeLogGroupsRequest(*cloudwatchlogs.DescribeLogGroupsInput) (*request.Request, *cloudwatchlogs.DescribeLogGroupsOutput) + + DescribeLogGroups(*cloudwatchlogs.DescribeLogGroupsInput) (*cloudwatchlogs.DescribeLogGroupsOutput, error) + + DescribeLogGroupsPages(*cloudwatchlogs.DescribeLogGroupsInput, func(*cloudwatchlogs.DescribeLogGroupsOutput, bool) bool) error + + DescribeLogStreamsRequest(*cloudwatchlogs.DescribeLogStreamsInput) (*request.Request, *cloudwatchlogs.DescribeLogStreamsOutput) + + DescribeLogStreams(*cloudwatchlogs.DescribeLogStreamsInput) (*cloudwatchlogs.DescribeLogStreamsOutput, error) + + DescribeLogStreamsPages(*cloudwatchlogs.DescribeLogStreamsInput, func(*cloudwatchlogs.DescribeLogStreamsOutput, bool) bool) error + + DescribeMetricFiltersRequest(*cloudwatchlogs.DescribeMetricFiltersInput) (*request.Request, *cloudwatchlogs.DescribeMetricFiltersOutput) + + DescribeMetricFilters(*cloudwatchlogs.DescribeMetricFiltersInput) (*cloudwatchlogs.DescribeMetricFiltersOutput, error) + + DescribeMetricFiltersPages(*cloudwatchlogs.DescribeMetricFiltersInput, func(*cloudwatchlogs.DescribeMetricFiltersOutput, bool) bool) error + + DescribeSubscriptionFiltersRequest(*cloudwatchlogs.DescribeSubscriptionFiltersInput) (*request.Request, *cloudwatchlogs.DescribeSubscriptionFiltersOutput) + + DescribeSubscriptionFilters(*cloudwatchlogs.DescribeSubscriptionFiltersInput) (*cloudwatchlogs.DescribeSubscriptionFiltersOutput, error) + + DescribeSubscriptionFiltersPages(*cloudwatchlogs.DescribeSubscriptionFiltersInput, func(*cloudwatchlogs.DescribeSubscriptionFiltersOutput, bool) bool) error + + FilterLogEventsRequest(*cloudwatchlogs.FilterLogEventsInput) (*request.Request, *cloudwatchlogs.FilterLogEventsOutput) + + FilterLogEvents(*cloudwatchlogs.FilterLogEventsInput) (*cloudwatchlogs.FilterLogEventsOutput, error) + + FilterLogEventsPages(*cloudwatchlogs.FilterLogEventsInput, func(*cloudwatchlogs.FilterLogEventsOutput, bool) bool) error + + GetLogEventsRequest(*cloudwatchlogs.GetLogEventsInput) (*request.Request, *cloudwatchlogs.GetLogEventsOutput) + + GetLogEvents(*cloudwatchlogs.GetLogEventsInput) (*cloudwatchlogs.GetLogEventsOutput, error) + + GetLogEventsPages(*cloudwatchlogs.GetLogEventsInput, func(*cloudwatchlogs.GetLogEventsOutput, bool) bool) error + + PutDestinationRequest(*cloudwatchlogs.PutDestinationInput) (*request.Request, *cloudwatchlogs.PutDestinationOutput) + + PutDestination(*cloudwatchlogs.PutDestinationInput) (*cloudwatchlogs.PutDestinationOutput, error) + + PutDestinationPolicyRequest(*cloudwatchlogs.PutDestinationPolicyInput) (*request.Request, *cloudwatchlogs.PutDestinationPolicyOutput) + + PutDestinationPolicy(*cloudwatchlogs.PutDestinationPolicyInput) (*cloudwatchlogs.PutDestinationPolicyOutput, error) + + PutLogEventsRequest(*cloudwatchlogs.PutLogEventsInput) (*request.Request, *cloudwatchlogs.PutLogEventsOutput) + + PutLogEvents(*cloudwatchlogs.PutLogEventsInput) (*cloudwatchlogs.PutLogEventsOutput, error) + + PutMetricFilterRequest(*cloudwatchlogs.PutMetricFilterInput) (*request.Request, *cloudwatchlogs.PutMetricFilterOutput) + + PutMetricFilter(*cloudwatchlogs.PutMetricFilterInput) (*cloudwatchlogs.PutMetricFilterOutput, error) + + PutRetentionPolicyRequest(*cloudwatchlogs.PutRetentionPolicyInput) (*request.Request, *cloudwatchlogs.PutRetentionPolicyOutput) + + PutRetentionPolicy(*cloudwatchlogs.PutRetentionPolicyInput) (*cloudwatchlogs.PutRetentionPolicyOutput, error) + + PutSubscriptionFilterRequest(*cloudwatchlogs.PutSubscriptionFilterInput) (*request.Request, *cloudwatchlogs.PutSubscriptionFilterOutput) + + PutSubscriptionFilter(*cloudwatchlogs.PutSubscriptionFilterInput) (*cloudwatchlogs.PutSubscriptionFilterOutput, error) + + TestMetricFilterRequest(*cloudwatchlogs.TestMetricFilterInput) (*request.Request, *cloudwatchlogs.TestMetricFilterOutput) + + TestMetricFilter(*cloudwatchlogs.TestMetricFilterInput) (*cloudwatchlogs.TestMetricFilterOutput, error) +} + +var _ CloudWatchLogsAPI = (*cloudwatchlogs.CloudWatchLogs)(nil) diff --git a/vendor/github.com/aws/aws-sdk-go/service/cloudwatchlogs/examples_test.go b/vendor/github.com/aws/aws-sdk-go/service/cloudwatchlogs/examples_test.go new file mode 100644 index 000000000..13c56fc88 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/cloudwatchlogs/examples_test.go @@ -0,0 +1,566 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package cloudwatchlogs_test + +import ( + "bytes" + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/cloudwatchlogs" +) + +var _ time.Duration +var _ bytes.Buffer + +func ExampleCloudWatchLogs_CancelExportTask() { + svc := cloudwatchlogs.New(session.New()) + + params := &cloudwatchlogs.CancelExportTaskInput{ + TaskId: aws.String("ExportTaskId"), // Required + } + resp, err := svc.CancelExportTask(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudWatchLogs_CreateExportTask() { + svc := cloudwatchlogs.New(session.New()) + + params := &cloudwatchlogs.CreateExportTaskInput{ + Destination: aws.String("ExportDestinationBucket"), // Required + From: aws.Int64(1), // Required + LogGroupName: aws.String("LogGroupName"), // Required + To: aws.Int64(1), // Required + DestinationPrefix: aws.String("ExportDestinationPrefix"), + LogStreamNamePrefix: aws.String("LogStreamName"), + TaskName: aws.String("ExportTaskName"), + } + resp, err := svc.CreateExportTask(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudWatchLogs_CreateLogGroup() { + svc := cloudwatchlogs.New(session.New()) + + params := &cloudwatchlogs.CreateLogGroupInput{ + LogGroupName: aws.String("LogGroupName"), // Required + } + resp, err := svc.CreateLogGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudWatchLogs_CreateLogStream() { + svc := cloudwatchlogs.New(session.New()) + + params := &cloudwatchlogs.CreateLogStreamInput{ + LogGroupName: aws.String("LogGroupName"), // Required + LogStreamName: aws.String("LogStreamName"), // Required + } + resp, err := svc.CreateLogStream(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudWatchLogs_DeleteDestination() { + svc := cloudwatchlogs.New(session.New()) + + params := &cloudwatchlogs.DeleteDestinationInput{ + DestinationName: aws.String("DestinationName"), // Required + } + resp, err := svc.DeleteDestination(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudWatchLogs_DeleteLogGroup() { + svc := cloudwatchlogs.New(session.New()) + + params := &cloudwatchlogs.DeleteLogGroupInput{ + LogGroupName: aws.String("LogGroupName"), // Required + } + resp, err := svc.DeleteLogGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudWatchLogs_DeleteLogStream() { + svc := cloudwatchlogs.New(session.New()) + + params := &cloudwatchlogs.DeleteLogStreamInput{ + LogGroupName: aws.String("LogGroupName"), // Required + LogStreamName: aws.String("LogStreamName"), // Required + } + resp, err := svc.DeleteLogStream(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudWatchLogs_DeleteMetricFilter() { + svc := cloudwatchlogs.New(session.New()) + + params := &cloudwatchlogs.DeleteMetricFilterInput{ + FilterName: aws.String("FilterName"), // Required + LogGroupName: aws.String("LogGroupName"), // Required + } + resp, err := svc.DeleteMetricFilter(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudWatchLogs_DeleteRetentionPolicy() { + svc := cloudwatchlogs.New(session.New()) + + params := &cloudwatchlogs.DeleteRetentionPolicyInput{ + LogGroupName: aws.String("LogGroupName"), // Required + } + resp, err := svc.DeleteRetentionPolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudWatchLogs_DeleteSubscriptionFilter() { + svc := cloudwatchlogs.New(session.New()) + + params := &cloudwatchlogs.DeleteSubscriptionFilterInput{ + FilterName: aws.String("FilterName"), // Required + LogGroupName: aws.String("LogGroupName"), // Required + } + resp, err := svc.DeleteSubscriptionFilter(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudWatchLogs_DescribeDestinations() { + svc := cloudwatchlogs.New(session.New()) + + params := &cloudwatchlogs.DescribeDestinationsInput{ + DestinationNamePrefix: aws.String("DestinationName"), + Limit: aws.Int64(1), + NextToken: aws.String("NextToken"), + } + resp, err := svc.DescribeDestinations(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudWatchLogs_DescribeExportTasks() { + svc := cloudwatchlogs.New(session.New()) + + params := &cloudwatchlogs.DescribeExportTasksInput{ + Limit: aws.Int64(1), + NextToken: aws.String("NextToken"), + StatusCode: aws.String("ExportTaskStatusCode"), + TaskId: aws.String("ExportTaskId"), + } + resp, err := svc.DescribeExportTasks(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudWatchLogs_DescribeLogGroups() { + svc := cloudwatchlogs.New(session.New()) + + params := &cloudwatchlogs.DescribeLogGroupsInput{ + Limit: aws.Int64(1), + LogGroupNamePrefix: aws.String("LogGroupName"), + NextToken: aws.String("NextToken"), + } + resp, err := svc.DescribeLogGroups(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudWatchLogs_DescribeLogStreams() { + svc := cloudwatchlogs.New(session.New()) + + params := &cloudwatchlogs.DescribeLogStreamsInput{ + LogGroupName: aws.String("LogGroupName"), // Required + Descending: aws.Bool(true), + Limit: aws.Int64(1), + LogStreamNamePrefix: aws.String("LogStreamName"), + NextToken: aws.String("NextToken"), + OrderBy: aws.String("OrderBy"), + } + resp, err := svc.DescribeLogStreams(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudWatchLogs_DescribeMetricFilters() { + svc := cloudwatchlogs.New(session.New()) + + params := &cloudwatchlogs.DescribeMetricFiltersInput{ + LogGroupName: aws.String("LogGroupName"), // Required + FilterNamePrefix: aws.String("FilterName"), + Limit: aws.Int64(1), + NextToken: aws.String("NextToken"), + } + resp, err := svc.DescribeMetricFilters(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudWatchLogs_DescribeSubscriptionFilters() { + svc := cloudwatchlogs.New(session.New()) + + params := &cloudwatchlogs.DescribeSubscriptionFiltersInput{ + LogGroupName: aws.String("LogGroupName"), // Required + FilterNamePrefix: aws.String("FilterName"), + Limit: aws.Int64(1), + NextToken: aws.String("NextToken"), + } + resp, err := svc.DescribeSubscriptionFilters(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudWatchLogs_FilterLogEvents() { + svc := cloudwatchlogs.New(session.New()) + + params := &cloudwatchlogs.FilterLogEventsInput{ + LogGroupName: aws.String("LogGroupName"), // Required + EndTime: aws.Int64(1), + FilterPattern: aws.String("FilterPattern"), + Interleaved: aws.Bool(true), + Limit: aws.Int64(1), + LogStreamNames: []*string{ + aws.String("LogStreamName"), // Required + // More values... + }, + NextToken: aws.String("NextToken"), + StartTime: aws.Int64(1), + } + resp, err := svc.FilterLogEvents(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudWatchLogs_GetLogEvents() { + svc := cloudwatchlogs.New(session.New()) + + params := &cloudwatchlogs.GetLogEventsInput{ + LogGroupName: aws.String("LogGroupName"), // Required + LogStreamName: aws.String("LogStreamName"), // Required + EndTime: aws.Int64(1), + Limit: aws.Int64(1), + NextToken: aws.String("NextToken"), + StartFromHead: aws.Bool(true), + StartTime: aws.Int64(1), + } + resp, err := svc.GetLogEvents(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudWatchLogs_PutDestination() { + svc := cloudwatchlogs.New(session.New()) + + params := &cloudwatchlogs.PutDestinationInput{ + DestinationName: aws.String("DestinationName"), // Required + RoleArn: aws.String("RoleArn"), // Required + TargetArn: aws.String("TargetArn"), // Required + } + resp, err := svc.PutDestination(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudWatchLogs_PutDestinationPolicy() { + svc := cloudwatchlogs.New(session.New()) + + params := &cloudwatchlogs.PutDestinationPolicyInput{ + AccessPolicy: aws.String("AccessPolicy"), // Required + DestinationName: aws.String("DestinationName"), // Required + } + resp, err := svc.PutDestinationPolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudWatchLogs_PutLogEvents() { + svc := cloudwatchlogs.New(session.New()) + + params := &cloudwatchlogs.PutLogEventsInput{ + LogEvents: []*cloudwatchlogs.InputLogEvent{ // Required + { // Required + Message: aws.String("EventMessage"), // Required + Timestamp: aws.Int64(1), // Required + }, + // More values... + }, + LogGroupName: aws.String("LogGroupName"), // Required + LogStreamName: aws.String("LogStreamName"), // Required + SequenceToken: aws.String("SequenceToken"), + } + resp, err := svc.PutLogEvents(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudWatchLogs_PutMetricFilter() { + svc := cloudwatchlogs.New(session.New()) + + params := &cloudwatchlogs.PutMetricFilterInput{ + FilterName: aws.String("FilterName"), // Required + FilterPattern: aws.String("FilterPattern"), // Required + LogGroupName: aws.String("LogGroupName"), // Required + MetricTransformations: []*cloudwatchlogs.MetricTransformation{ // Required + { // Required + MetricName: aws.String("MetricName"), // Required + MetricNamespace: aws.String("MetricNamespace"), // Required + MetricValue: aws.String("MetricValue"), // Required + }, + // More values... + }, + } + resp, err := svc.PutMetricFilter(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudWatchLogs_PutRetentionPolicy() { + svc := cloudwatchlogs.New(session.New()) + + params := &cloudwatchlogs.PutRetentionPolicyInput{ + LogGroupName: aws.String("LogGroupName"), // Required + RetentionInDays: aws.Int64(1), // Required + } + resp, err := svc.PutRetentionPolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudWatchLogs_PutSubscriptionFilter() { + svc := cloudwatchlogs.New(session.New()) + + params := &cloudwatchlogs.PutSubscriptionFilterInput{ + DestinationArn: aws.String("DestinationArn"), // Required + FilterName: aws.String("FilterName"), // Required + FilterPattern: aws.String("FilterPattern"), // Required + LogGroupName: aws.String("LogGroupName"), // Required + RoleArn: aws.String("RoleArn"), + } + resp, err := svc.PutSubscriptionFilter(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudWatchLogs_TestMetricFilter() { + svc := cloudwatchlogs.New(session.New()) + + params := &cloudwatchlogs.TestMetricFilterInput{ + FilterPattern: aws.String("FilterPattern"), // Required + LogEventMessages: []*string{ // Required + aws.String("EventMessage"), // Required + // More values... + }, + } + resp, err := svc.TestMetricFilter(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/cloudwatchlogs/service.go b/vendor/github.com/aws/aws-sdk-go/service/cloudwatchlogs/service.go new file mode 100644 index 000000000..064110304 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/cloudwatchlogs/service.go @@ -0,0 +1,116 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package cloudwatchlogs + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" +) + +// You can use Amazon CloudWatch Logs to monitor, store, and access your log +// files from Amazon Elastic Compute Cloud (Amazon EC2) instances, Amazon CloudTrail, +// or other sources. You can then retrieve the associated log data from CloudWatch +// Logs using the Amazon CloudWatch console, the CloudWatch Logs commands in +// the AWS CLI, the CloudWatch Logs API, or the CloudWatch Logs SDK. +// +// You can use CloudWatch Logs to: +// +// Monitor Logs from Amazon EC2 Instances in Real-time: You can use CloudWatch +// Logs to monitor applications and systems using log data. For example, CloudWatch +// Logs can track the number of errors that occur in your application logs and +// send you a notification whenever the rate of errors exceeds a threshold you +// specify. CloudWatch Logs uses your log data for monitoring; so, no code changes +// are required. For example, you can monitor application logs for specific +// literal terms (such as "NullReferenceException") or count the number of occurrences +// of a literal term at a particular position in log data (such as "404" status +// codes in an Apache access log). When the term you are searching for is found, +// CloudWatch Logs reports the data to a Amazon CloudWatch metric that you specify. +// +// Monitor Amazon CloudTrail Logged Events: You can create alarms in Amazon +// CloudWatch and receive notifications of particular API activity as captured +// by CloudTrail and use the notification to perform troubleshooting. +// +// Archive Log Data: You can use CloudWatch Logs to store your log data in +// highly durable storage. You can change the log retention setting so that +// any log events older than this setting are automatically deleted. The CloudWatch +// Logs agent makes it easy to quickly send both rotated and non-rotated log +// data off of a host and into the log service. You can then access the raw +// log data when you need it. +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type CloudWatchLogs struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "logs" + +// New creates a new instance of the CloudWatchLogs client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a CloudWatchLogs client from just a session. +// svc := cloudwatchlogs.New(mySession) +// +// // Create a CloudWatchLogs client with additional configuration +// svc := cloudwatchlogs.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *CloudWatchLogs { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *CloudWatchLogs { + svc := &CloudWatchLogs{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-03-28", + JSONVersion: "1.1", + TargetPrefix: "Logs_20140328", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(jsonrpc.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a CloudWatchLogs operation and runs any +// custom request initialization. +func (c *CloudWatchLogs) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/codecommit/api.go b/vendor/github.com/aws/aws-sdk-go/service/codecommit/api.go new file mode 100644 index 000000000..e4e8c8cd0 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/codecommit/api.go @@ -0,0 +1,1909 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package codecommit provides a client for AWS CodeCommit. +package codecommit + +import ( + "time" + + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" +) + +const opBatchGetRepositories = "BatchGetRepositories" + +// BatchGetRepositoriesRequest generates a "aws/request.Request" representing the +// client's request for the BatchGetRepositories operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the BatchGetRepositories method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the BatchGetRepositoriesRequest method. +// req, resp := client.BatchGetRepositoriesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CodeCommit) BatchGetRepositoriesRequest(input *BatchGetRepositoriesInput) (req *request.Request, output *BatchGetRepositoriesOutput) { + op := &request.Operation{ + Name: opBatchGetRepositories, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &BatchGetRepositoriesInput{} + } + + req = c.newRequest(op, input, output) + output = &BatchGetRepositoriesOutput{} + req.Data = output + return +} + +// Returns information about one or more repositories. +// +// The description field for a repository accepts all HTML characters and all +// valid Unicode characters. Applications that do not HTML-encode the description +// and display it in a web page could expose users to potentially malicious +// code. Make sure that you HTML-encode the description field in any application +// that uses this API to display the repository description on a web page. +func (c *CodeCommit) BatchGetRepositories(input *BatchGetRepositoriesInput) (*BatchGetRepositoriesOutput, error) { + req, out := c.BatchGetRepositoriesRequest(input) + err := req.Send() + return out, err +} + +const opCreateBranch = "CreateBranch" + +// CreateBranchRequest generates a "aws/request.Request" representing the +// client's request for the CreateBranch operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateBranch method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateBranchRequest method. +// req, resp := client.CreateBranchRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CodeCommit) CreateBranchRequest(input *CreateBranchInput) (req *request.Request, output *CreateBranchOutput) { + op := &request.Operation{ + Name: opCreateBranch, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateBranchInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &CreateBranchOutput{} + req.Data = output + return +} + +// Creates a new branch in a repository and points the branch to a commit. +// +// Calling the create branch operation does not set a repository's default +// branch. To do this, call the update default branch operation. +func (c *CodeCommit) CreateBranch(input *CreateBranchInput) (*CreateBranchOutput, error) { + req, out := c.CreateBranchRequest(input) + err := req.Send() + return out, err +} + +const opCreateRepository = "CreateRepository" + +// CreateRepositoryRequest generates a "aws/request.Request" representing the +// client's request for the CreateRepository operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateRepository method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateRepositoryRequest method. +// req, resp := client.CreateRepositoryRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CodeCommit) CreateRepositoryRequest(input *CreateRepositoryInput) (req *request.Request, output *CreateRepositoryOutput) { + op := &request.Operation{ + Name: opCreateRepository, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateRepositoryInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateRepositoryOutput{} + req.Data = output + return +} + +// Creates a new, empty repository. +func (c *CodeCommit) CreateRepository(input *CreateRepositoryInput) (*CreateRepositoryOutput, error) { + req, out := c.CreateRepositoryRequest(input) + err := req.Send() + return out, err +} + +const opDeleteRepository = "DeleteRepository" + +// DeleteRepositoryRequest generates a "aws/request.Request" representing the +// client's request for the DeleteRepository operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteRepository method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteRepositoryRequest method. +// req, resp := client.DeleteRepositoryRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CodeCommit) DeleteRepositoryRequest(input *DeleteRepositoryInput) (req *request.Request, output *DeleteRepositoryOutput) { + op := &request.Operation{ + Name: opDeleteRepository, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteRepositoryInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteRepositoryOutput{} + req.Data = output + return +} + +// Deletes a repository. If a specified repository was already deleted, a null +// repository ID will be returned. +// +// Deleting a repository also deletes all associated objects and metadata. +// After a repository is deleted, all future push calls to the deleted repository +// will fail. +func (c *CodeCommit) DeleteRepository(input *DeleteRepositoryInput) (*DeleteRepositoryOutput, error) { + req, out := c.DeleteRepositoryRequest(input) + err := req.Send() + return out, err +} + +const opGetBranch = "GetBranch" + +// GetBranchRequest generates a "aws/request.Request" representing the +// client's request for the GetBranch operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetBranch method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetBranchRequest method. +// req, resp := client.GetBranchRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CodeCommit) GetBranchRequest(input *GetBranchInput) (req *request.Request, output *GetBranchOutput) { + op := &request.Operation{ + Name: opGetBranch, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetBranchInput{} + } + + req = c.newRequest(op, input, output) + output = &GetBranchOutput{} + req.Data = output + return +} + +// Returns information about a repository branch, including its name and the +// last commit ID. +func (c *CodeCommit) GetBranch(input *GetBranchInput) (*GetBranchOutput, error) { + req, out := c.GetBranchRequest(input) + err := req.Send() + return out, err +} + +const opGetCommit = "GetCommit" + +// GetCommitRequest generates a "aws/request.Request" representing the +// client's request for the GetCommit operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetCommit method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetCommitRequest method. +// req, resp := client.GetCommitRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CodeCommit) GetCommitRequest(input *GetCommitInput) (req *request.Request, output *GetCommitOutput) { + op := &request.Operation{ + Name: opGetCommit, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetCommitInput{} + } + + req = c.newRequest(op, input, output) + output = &GetCommitOutput{} + req.Data = output + return +} + +// Returns information about a commit, including commit message and committer +// information. +func (c *CodeCommit) GetCommit(input *GetCommitInput) (*GetCommitOutput, error) { + req, out := c.GetCommitRequest(input) + err := req.Send() + return out, err +} + +const opGetRepository = "GetRepository" + +// GetRepositoryRequest generates a "aws/request.Request" representing the +// client's request for the GetRepository operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetRepository method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetRepositoryRequest method. +// req, resp := client.GetRepositoryRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CodeCommit) GetRepositoryRequest(input *GetRepositoryInput) (req *request.Request, output *GetRepositoryOutput) { + op := &request.Operation{ + Name: opGetRepository, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetRepositoryInput{} + } + + req = c.newRequest(op, input, output) + output = &GetRepositoryOutput{} + req.Data = output + return +} + +// Returns information about a repository. +// +// The description field for a repository accepts all HTML characters and all +// valid Unicode characters. Applications that do not HTML-encode the description +// and display it in a web page could expose users to potentially malicious +// code. Make sure that you HTML-encode the description field in any application +// that uses this API to display the repository description on a web page. +func (c *CodeCommit) GetRepository(input *GetRepositoryInput) (*GetRepositoryOutput, error) { + req, out := c.GetRepositoryRequest(input) + err := req.Send() + return out, err +} + +const opGetRepositoryTriggers = "GetRepositoryTriggers" + +// GetRepositoryTriggersRequest generates a "aws/request.Request" representing the +// client's request for the GetRepositoryTriggers operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetRepositoryTriggers method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetRepositoryTriggersRequest method. +// req, resp := client.GetRepositoryTriggersRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CodeCommit) GetRepositoryTriggersRequest(input *GetRepositoryTriggersInput) (req *request.Request, output *GetRepositoryTriggersOutput) { + op := &request.Operation{ + Name: opGetRepositoryTriggers, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetRepositoryTriggersInput{} + } + + req = c.newRequest(op, input, output) + output = &GetRepositoryTriggersOutput{} + req.Data = output + return +} + +// Gets information about triggers configured for a repository. +func (c *CodeCommit) GetRepositoryTriggers(input *GetRepositoryTriggersInput) (*GetRepositoryTriggersOutput, error) { + req, out := c.GetRepositoryTriggersRequest(input) + err := req.Send() + return out, err +} + +const opListBranches = "ListBranches" + +// ListBranchesRequest generates a "aws/request.Request" representing the +// client's request for the ListBranches operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListBranches method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListBranchesRequest method. +// req, resp := client.ListBranchesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CodeCommit) ListBranchesRequest(input *ListBranchesInput) (req *request.Request, output *ListBranchesOutput) { + op := &request.Operation{ + Name: opListBranches, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListBranchesInput{} + } + + req = c.newRequest(op, input, output) + output = &ListBranchesOutput{} + req.Data = output + return +} + +// Gets information about one or more branches in a repository. +func (c *CodeCommit) ListBranches(input *ListBranchesInput) (*ListBranchesOutput, error) { + req, out := c.ListBranchesRequest(input) + err := req.Send() + return out, err +} + +// ListBranchesPages iterates over the pages of a ListBranches operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListBranches method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListBranches operation. +// pageNum := 0 +// err := client.ListBranchesPages(params, +// func(page *ListBranchesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *CodeCommit) ListBranchesPages(input *ListBranchesInput, fn func(p *ListBranchesOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListBranchesRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListBranchesOutput), lastPage) + }) +} + +const opListRepositories = "ListRepositories" + +// ListRepositoriesRequest generates a "aws/request.Request" representing the +// client's request for the ListRepositories operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListRepositories method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListRepositoriesRequest method. +// req, resp := client.ListRepositoriesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CodeCommit) ListRepositoriesRequest(input *ListRepositoriesInput) (req *request.Request, output *ListRepositoriesOutput) { + op := &request.Operation{ + Name: opListRepositories, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListRepositoriesInput{} + } + + req = c.newRequest(op, input, output) + output = &ListRepositoriesOutput{} + req.Data = output + return +} + +// Gets information about one or more repositories. +func (c *CodeCommit) ListRepositories(input *ListRepositoriesInput) (*ListRepositoriesOutput, error) { + req, out := c.ListRepositoriesRequest(input) + err := req.Send() + return out, err +} + +// ListRepositoriesPages iterates over the pages of a ListRepositories operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListRepositories method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListRepositories operation. +// pageNum := 0 +// err := client.ListRepositoriesPages(params, +// func(page *ListRepositoriesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *CodeCommit) ListRepositoriesPages(input *ListRepositoriesInput, fn func(p *ListRepositoriesOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListRepositoriesRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListRepositoriesOutput), lastPage) + }) +} + +const opPutRepositoryTriggers = "PutRepositoryTriggers" + +// PutRepositoryTriggersRequest generates a "aws/request.Request" representing the +// client's request for the PutRepositoryTriggers operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutRepositoryTriggers method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutRepositoryTriggersRequest method. +// req, resp := client.PutRepositoryTriggersRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CodeCommit) PutRepositoryTriggersRequest(input *PutRepositoryTriggersInput) (req *request.Request, output *PutRepositoryTriggersOutput) { + op := &request.Operation{ + Name: opPutRepositoryTriggers, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutRepositoryTriggersInput{} + } + + req = c.newRequest(op, input, output) + output = &PutRepositoryTriggersOutput{} + req.Data = output + return +} + +// Replaces all triggers for a repository. This can be used to create or delete +// triggers. +func (c *CodeCommit) PutRepositoryTriggers(input *PutRepositoryTriggersInput) (*PutRepositoryTriggersOutput, error) { + req, out := c.PutRepositoryTriggersRequest(input) + err := req.Send() + return out, err +} + +const opTestRepositoryTriggers = "TestRepositoryTriggers" + +// TestRepositoryTriggersRequest generates a "aws/request.Request" representing the +// client's request for the TestRepositoryTriggers operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the TestRepositoryTriggers method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the TestRepositoryTriggersRequest method. +// req, resp := client.TestRepositoryTriggersRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CodeCommit) TestRepositoryTriggersRequest(input *TestRepositoryTriggersInput) (req *request.Request, output *TestRepositoryTriggersOutput) { + op := &request.Operation{ + Name: opTestRepositoryTriggers, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &TestRepositoryTriggersInput{} + } + + req = c.newRequest(op, input, output) + output = &TestRepositoryTriggersOutput{} + req.Data = output + return +} + +// Tests the functionality of repository triggers by sending information to +// the trigger target. If real data is available in the repository, the test +// will send data from the last commit. If no data is available, sample data +// will be generated. +func (c *CodeCommit) TestRepositoryTriggers(input *TestRepositoryTriggersInput) (*TestRepositoryTriggersOutput, error) { + req, out := c.TestRepositoryTriggersRequest(input) + err := req.Send() + return out, err +} + +const opUpdateDefaultBranch = "UpdateDefaultBranch" + +// UpdateDefaultBranchRequest generates a "aws/request.Request" representing the +// client's request for the UpdateDefaultBranch operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateDefaultBranch method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateDefaultBranchRequest method. +// req, resp := client.UpdateDefaultBranchRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CodeCommit) UpdateDefaultBranchRequest(input *UpdateDefaultBranchInput) (req *request.Request, output *UpdateDefaultBranchOutput) { + op := &request.Operation{ + Name: opUpdateDefaultBranch, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateDefaultBranchInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &UpdateDefaultBranchOutput{} + req.Data = output + return +} + +// Sets or changes the default branch name for the specified repository. +// +// If you use this operation to change the default branch name to the current +// default branch name, a success message is returned even though the default +// branch did not change. +func (c *CodeCommit) UpdateDefaultBranch(input *UpdateDefaultBranchInput) (*UpdateDefaultBranchOutput, error) { + req, out := c.UpdateDefaultBranchRequest(input) + err := req.Send() + return out, err +} + +const opUpdateRepositoryDescription = "UpdateRepositoryDescription" + +// UpdateRepositoryDescriptionRequest generates a "aws/request.Request" representing the +// client's request for the UpdateRepositoryDescription operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateRepositoryDescription method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateRepositoryDescriptionRequest method. +// req, resp := client.UpdateRepositoryDescriptionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CodeCommit) UpdateRepositoryDescriptionRequest(input *UpdateRepositoryDescriptionInput) (req *request.Request, output *UpdateRepositoryDescriptionOutput) { + op := &request.Operation{ + Name: opUpdateRepositoryDescription, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateRepositoryDescriptionInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &UpdateRepositoryDescriptionOutput{} + req.Data = output + return +} + +// Sets or changes the comment or description for a repository. +// +// The description field for a repository accepts all HTML characters and all +// valid Unicode characters. Applications that do not HTML-encode the description +// and display it in a web page could expose users to potentially malicious +// code. Make sure that you HTML-encode the description field in any application +// that uses this API to display the repository description on a web page. +func (c *CodeCommit) UpdateRepositoryDescription(input *UpdateRepositoryDescriptionInput) (*UpdateRepositoryDescriptionOutput, error) { + req, out := c.UpdateRepositoryDescriptionRequest(input) + err := req.Send() + return out, err +} + +const opUpdateRepositoryName = "UpdateRepositoryName" + +// UpdateRepositoryNameRequest generates a "aws/request.Request" representing the +// client's request for the UpdateRepositoryName operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateRepositoryName method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateRepositoryNameRequest method. +// req, resp := client.UpdateRepositoryNameRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CodeCommit) UpdateRepositoryNameRequest(input *UpdateRepositoryNameInput) (req *request.Request, output *UpdateRepositoryNameOutput) { + op := &request.Operation{ + Name: opUpdateRepositoryName, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateRepositoryNameInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &UpdateRepositoryNameOutput{} + req.Data = output + return +} + +// Renames a repository. The repository name must be unique across the calling +// AWS account. In addition, repository names are limited to 100 alphanumeric, +// dash, and underscore characters, and cannot include certain characters. The +// suffix ".git" is prohibited. For a full description of the limits on repository +// names, see Limits (http://docs.aws.amazon.com/codecommit/latest/userguide/limits.html) +// in the AWS CodeCommit User Guide. +func (c *CodeCommit) UpdateRepositoryName(input *UpdateRepositoryNameInput) (*UpdateRepositoryNameOutput, error) { + req, out := c.UpdateRepositoryNameRequest(input) + err := req.Send() + return out, err +} + +// Represents the input of a batch get repositories operation. +type BatchGetRepositoriesInput struct { + _ struct{} `type:"structure"` + + // The names of the repositories to get information about. + RepositoryNames []*string `locationName:"repositoryNames" type:"list" required:"true"` +} + +// String returns the string representation +func (s BatchGetRepositoriesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchGetRepositoriesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *BatchGetRepositoriesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "BatchGetRepositoriesInput"} + if s.RepositoryNames == nil { + invalidParams.Add(request.NewErrParamRequired("RepositoryNames")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the output of a batch get repositories operation. +type BatchGetRepositoriesOutput struct { + _ struct{} `type:"structure"` + + // A list of repositories returned by the batch get repositories operation. + Repositories []*RepositoryMetadata `locationName:"repositories" type:"list"` + + // Returns a list of repository names for which information could not be found. + RepositoriesNotFound []*string `locationName:"repositoriesNotFound" type:"list"` +} + +// String returns the string representation +func (s BatchGetRepositoriesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchGetRepositoriesOutput) GoString() string { + return s.String() +} + +// Returns information about a branch. +type BranchInfo struct { + _ struct{} `type:"structure"` + + // The name of the branch. + BranchName *string `locationName:"branchName" min:"1" type:"string"` + + // The ID of the last commit made to the branch. + CommitId *string `locationName:"commitId" type:"string"` +} + +// String returns the string representation +func (s BranchInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BranchInfo) GoString() string { + return s.String() +} + +// Returns information about a specific commit. +type Commit struct { + _ struct{} `type:"structure"` + + // Any additional data associated with the specified commit. + AdditionalData *string `locationName:"additionalData" type:"string"` + + // Information about the author of the specified commit. + Author *UserInfo `locationName:"author" type:"structure"` + + // Information about the person who committed the specified commit, also known + // as the committer. For more information about the difference between an author + // and a committer in Git, see Viewing the Commit History (http://git-scm.com/book/ch2-3.html) + // in Pro Git by Scott Chacon and Ben Straub. + Committer *UserInfo `locationName:"committer" type:"structure"` + + // The message associated with the specified commit. + Message *string `locationName:"message" type:"string"` + + // The parent list for the specified commit. + Parents []*string `locationName:"parents" type:"list"` + + // Tree information for the specified commit. + TreeId *string `locationName:"treeId" type:"string"` +} + +// String returns the string representation +func (s Commit) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Commit) GoString() string { + return s.String() +} + +// Represents the input of a create branch operation. +type CreateBranchInput struct { + _ struct{} `type:"structure"` + + // The name of the new branch to create. + BranchName *string `locationName:"branchName" min:"1" type:"string" required:"true"` + + // The ID of the commit to point the new branch to. + CommitId *string `locationName:"commitId" type:"string" required:"true"` + + // The name of the repository in which you want to create the new branch. + RepositoryName *string `locationName:"repositoryName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateBranchInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateBranchInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateBranchInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateBranchInput"} + if s.BranchName == nil { + invalidParams.Add(request.NewErrParamRequired("BranchName")) + } + if s.BranchName != nil && len(*s.BranchName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("BranchName", 1)) + } + if s.CommitId == nil { + invalidParams.Add(request.NewErrParamRequired("CommitId")) + } + if s.RepositoryName == nil { + invalidParams.Add(request.NewErrParamRequired("RepositoryName")) + } + if s.RepositoryName != nil && len(*s.RepositoryName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RepositoryName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type CreateBranchOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s CreateBranchOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateBranchOutput) GoString() string { + return s.String() +} + +// Represents the input of a create repository operation. +type CreateRepositoryInput struct { + _ struct{} `type:"structure"` + + // A comment or description about the new repository. + // + // The description field for a repository accepts all HTML characters and all + // valid Unicode characters. Applications that do not HTML-encode the description + // and display it in a web page could expose users to potentially malicious + // code. Make sure that you HTML-encode the description field in any application + // that uses this API to display the repository description on a web page. + RepositoryDescription *string `locationName:"repositoryDescription" type:"string"` + + // The name of the new repository to be created. + // + // The repository name must be unique across the calling AWS account. In addition, + // repository names are limited to 100 alphanumeric, dash, and underscore characters, + // and cannot include certain characters. For a full description of the limits + // on repository names, see Limits (http://docs.aws.amazon.com/codecommit/latest/userguide/limits.html) + // in the AWS CodeCommit User Guide. The suffix ".git" is prohibited. + RepositoryName *string `locationName:"repositoryName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateRepositoryInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateRepositoryInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateRepositoryInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateRepositoryInput"} + if s.RepositoryName == nil { + invalidParams.Add(request.NewErrParamRequired("RepositoryName")) + } + if s.RepositoryName != nil && len(*s.RepositoryName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RepositoryName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the output of a create repository operation. +type CreateRepositoryOutput struct { + _ struct{} `type:"structure"` + + // Information about the newly created repository. + RepositoryMetadata *RepositoryMetadata `locationName:"repositoryMetadata" type:"structure"` +} + +// String returns the string representation +func (s CreateRepositoryOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateRepositoryOutput) GoString() string { + return s.String() +} + +// Represents the input of a delete repository operation. +type DeleteRepositoryInput struct { + _ struct{} `type:"structure"` + + // The name of the repository to delete. + RepositoryName *string `locationName:"repositoryName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteRepositoryInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteRepositoryInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteRepositoryInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteRepositoryInput"} + if s.RepositoryName == nil { + invalidParams.Add(request.NewErrParamRequired("RepositoryName")) + } + if s.RepositoryName != nil && len(*s.RepositoryName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RepositoryName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the output of a delete repository operation. +type DeleteRepositoryOutput struct { + _ struct{} `type:"structure"` + + // The ID of the repository that was deleted. + RepositoryId *string `locationName:"repositoryId" type:"string"` +} + +// String returns the string representation +func (s DeleteRepositoryOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteRepositoryOutput) GoString() string { + return s.String() +} + +// Represents the input of a get branch operation. +type GetBranchInput struct { + _ struct{} `type:"structure"` + + // The name of the branch for which you want to retrieve information. + BranchName *string `locationName:"branchName" min:"1" type:"string"` + + // The name of the repository that contains the branch for which you want to + // retrieve information. + RepositoryName *string `locationName:"repositoryName" min:"1" type:"string"` +} + +// String returns the string representation +func (s GetBranchInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBranchInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBranchInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBranchInput"} + if s.BranchName != nil && len(*s.BranchName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("BranchName", 1)) + } + if s.RepositoryName != nil && len(*s.RepositoryName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RepositoryName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the output of a get branch operation. +type GetBranchOutput struct { + _ struct{} `type:"structure"` + + // The name of the branch. + Branch *BranchInfo `locationName:"branch" type:"structure"` +} + +// String returns the string representation +func (s GetBranchOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBranchOutput) GoString() string { + return s.String() +} + +// Represents the input of a get commit operation. +type GetCommitInput struct { + _ struct{} `type:"structure"` + + // The commit ID. + CommitId *string `locationName:"commitId" type:"string" required:"true"` + + // The name of the repository to which the commit was made. + RepositoryName *string `locationName:"repositoryName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetCommitInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetCommitInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetCommitInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetCommitInput"} + if s.CommitId == nil { + invalidParams.Add(request.NewErrParamRequired("CommitId")) + } + if s.RepositoryName == nil { + invalidParams.Add(request.NewErrParamRequired("RepositoryName")) + } + if s.RepositoryName != nil && len(*s.RepositoryName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RepositoryName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the output of a get commit operation. +type GetCommitOutput struct { + _ struct{} `type:"structure"` + + // Information about the specified commit. + Commit *Commit `locationName:"commit" type:"structure" required:"true"` +} + +// String returns the string representation +func (s GetCommitOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetCommitOutput) GoString() string { + return s.String() +} + +// Represents the input of a get repository operation. +type GetRepositoryInput struct { + _ struct{} `type:"structure"` + + // The name of the repository to get information about. + RepositoryName *string `locationName:"repositoryName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetRepositoryInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetRepositoryInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetRepositoryInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetRepositoryInput"} + if s.RepositoryName == nil { + invalidParams.Add(request.NewErrParamRequired("RepositoryName")) + } + if s.RepositoryName != nil && len(*s.RepositoryName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RepositoryName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the output of a get repository operation. +type GetRepositoryOutput struct { + _ struct{} `type:"structure"` + + // Information about the repository. + RepositoryMetadata *RepositoryMetadata `locationName:"repositoryMetadata" type:"structure"` +} + +// String returns the string representation +func (s GetRepositoryOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetRepositoryOutput) GoString() string { + return s.String() +} + +// Represents the input of a get repository triggers operation. +type GetRepositoryTriggersInput struct { + _ struct{} `type:"structure"` + + // The name of the repository for which the trigger is configured. + RepositoryName *string `locationName:"repositoryName" min:"1" type:"string"` +} + +// String returns the string representation +func (s GetRepositoryTriggersInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetRepositoryTriggersInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetRepositoryTriggersInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetRepositoryTriggersInput"} + if s.RepositoryName != nil && len(*s.RepositoryName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RepositoryName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the output of a get repository triggers operation. +type GetRepositoryTriggersOutput struct { + _ struct{} `type:"structure"` + + // The system-generated unique ID for the trigger. + ConfigurationId *string `locationName:"configurationId" type:"string"` + + // The JSON block of configuration information for each trigger. + Triggers []*RepositoryTrigger `locationName:"triggers" type:"list"` +} + +// String returns the string representation +func (s GetRepositoryTriggersOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetRepositoryTriggersOutput) GoString() string { + return s.String() +} + +// Represents the input of a list branches operation. +type ListBranchesInput struct { + _ struct{} `type:"structure"` + + // An enumeration token that allows the operation to batch the results. + NextToken *string `locationName:"nextToken" type:"string"` + + // The name of the repository that contains the branches. + RepositoryName *string `locationName:"repositoryName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListBranchesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListBranchesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListBranchesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListBranchesInput"} + if s.RepositoryName == nil { + invalidParams.Add(request.NewErrParamRequired("RepositoryName")) + } + if s.RepositoryName != nil && len(*s.RepositoryName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RepositoryName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the output of a list branches operation. +type ListBranchesOutput struct { + _ struct{} `type:"structure"` + + // The list of branch names. + Branches []*string `locationName:"branches" type:"list"` + + // An enumeration token that returns the batch of the results. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s ListBranchesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListBranchesOutput) GoString() string { + return s.String() +} + +// Represents the input of a list repositories operation. +type ListRepositoriesInput struct { + _ struct{} `type:"structure"` + + // An enumeration token that allows the operation to batch the results of the + // operation. Batch sizes are 1,000 for list repository operations. When the + // client sends the token back to AWS CodeCommit, another page of 1,000 records + // is retrieved. + NextToken *string `locationName:"nextToken" type:"string"` + + // The order in which to sort the results of a list repositories operation. + Order *string `locationName:"order" type:"string" enum:"OrderEnum"` + + // The criteria used to sort the results of a list repositories operation. + SortBy *string `locationName:"sortBy" type:"string" enum:"SortByEnum"` +} + +// String returns the string representation +func (s ListRepositoriesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListRepositoriesInput) GoString() string { + return s.String() +} + +// Represents the output of a list repositories operation. +type ListRepositoriesOutput struct { + _ struct{} `type:"structure"` + + // An enumeration token that allows the operation to batch the results of the + // operation. Batch sizes are 1,000 for list repository operations. When the + // client sends the token back to AWS CodeCommit, another page of 1,000 records + // is retrieved. + NextToken *string `locationName:"nextToken" type:"string"` + + // Lists the repositories called by the list repositories operation. + Repositories []*RepositoryNameIdPair `locationName:"repositories" type:"list"` +} + +// String returns the string representation +func (s ListRepositoriesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListRepositoriesOutput) GoString() string { + return s.String() +} + +// Represents the input ofa put repository triggers operation. +type PutRepositoryTriggersInput struct { + _ struct{} `type:"structure"` + + // The name of the repository where you want to create or update the trigger. + RepositoryName *string `locationName:"repositoryName" min:"1" type:"string"` + + // The JSON block of configuration information for each trigger. + Triggers []*RepositoryTrigger `locationName:"triggers" type:"list"` +} + +// String returns the string representation +func (s PutRepositoryTriggersInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutRepositoryTriggersInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutRepositoryTriggersInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutRepositoryTriggersInput"} + if s.RepositoryName != nil && len(*s.RepositoryName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RepositoryName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the output of a put repository triggers operation. +type PutRepositoryTriggersOutput struct { + _ struct{} `type:"structure"` + + // The system-generated unique ID for the create or update operation. + ConfigurationId *string `locationName:"configurationId" type:"string"` +} + +// String returns the string representation +func (s PutRepositoryTriggersOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutRepositoryTriggersOutput) GoString() string { + return s.String() +} + +// Information about a repository. +type RepositoryMetadata struct { + _ struct{} `type:"structure"` + + // The ID of the AWS account associated with the repository. + AccountId *string `locationName:"accountId" type:"string"` + + // The Amazon Resource Name (ARN) of the repository. + Arn *string `type:"string"` + + // The URL to use for cloning the repository over HTTPS. + CloneUrlHttp *string `locationName:"cloneUrlHttp" type:"string"` + + // The URL to use for cloning the repository over SSH. + CloneUrlSsh *string `locationName:"cloneUrlSsh" type:"string"` + + // The date and time the repository was created, in timestamp format. + CreationDate *time.Time `locationName:"creationDate" type:"timestamp" timestampFormat:"unix"` + + // The repository's default branch name. + DefaultBranch *string `locationName:"defaultBranch" min:"1" type:"string"` + + // The date and time the repository was last modified, in timestamp format. + LastModifiedDate *time.Time `locationName:"lastModifiedDate" type:"timestamp" timestampFormat:"unix"` + + // A comment or description about the repository. + RepositoryDescription *string `locationName:"repositoryDescription" type:"string"` + + // The ID of the repository. + RepositoryId *string `locationName:"repositoryId" type:"string"` + + // The repository's name. + RepositoryName *string `locationName:"repositoryName" min:"1" type:"string"` +} + +// String returns the string representation +func (s RepositoryMetadata) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RepositoryMetadata) GoString() string { + return s.String() +} + +// Information about a repository name and ID. +type RepositoryNameIdPair struct { + _ struct{} `type:"structure"` + + // The ID associated with the repository. + RepositoryId *string `locationName:"repositoryId" type:"string"` + + // The name associated with the repository. + RepositoryName *string `locationName:"repositoryName" min:"1" type:"string"` +} + +// String returns the string representation +func (s RepositoryNameIdPair) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RepositoryNameIdPair) GoString() string { + return s.String() +} + +// Information about a trigger for a repository. +type RepositoryTrigger struct { + _ struct{} `type:"structure"` + + // The branches that will be included in the trigger configuration. If no branches + // are specified, the trigger will apply to all branches. + Branches []*string `locationName:"branches" type:"list"` + + // Any custom data associated with the trigger that will be included in the + // information sent to the target of the trigger. + CustomData *string `locationName:"customData" type:"string"` + + // The ARN of the resource that is the target for a trigger. For example, the + // ARN of a topic in Amazon Simple Notification Service (SNS). + DestinationArn *string `locationName:"destinationArn" type:"string"` + + // The repository events that will cause the trigger to run actions in another + // service, such as sending a notification through Amazon Simple Notification + // Service (SNS). If no events are specified, the trigger will run for all repository + // events. + Events []*string `locationName:"events" type:"list"` + + // The name of the trigger. + Name *string `locationName:"name" type:"string"` +} + +// String returns the string representation +func (s RepositoryTrigger) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RepositoryTrigger) GoString() string { + return s.String() +} + +// A trigger failed to run. +type RepositoryTriggerExecutionFailure struct { + _ struct{} `type:"structure"` + + // Additional message information about the trigger that did not run. + FailureMessage *string `locationName:"failureMessage" type:"string"` + + // The name of the trigger that did not run. + Trigger *string `locationName:"trigger" type:"string"` +} + +// String returns the string representation +func (s RepositoryTriggerExecutionFailure) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RepositoryTriggerExecutionFailure) GoString() string { + return s.String() +} + +// Represents the input of a test repository triggers operation. +type TestRepositoryTriggersInput struct { + _ struct{} `type:"structure"` + + // The name of the repository in which to test the triggers. + RepositoryName *string `locationName:"repositoryName" min:"1" type:"string"` + + // The list of triggers to test. + Triggers []*RepositoryTrigger `locationName:"triggers" type:"list"` +} + +// String returns the string representation +func (s TestRepositoryTriggersInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TestRepositoryTriggersInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TestRepositoryTriggersInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TestRepositoryTriggersInput"} + if s.RepositoryName != nil && len(*s.RepositoryName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RepositoryName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the output of a test repository triggers operation. +type TestRepositoryTriggersOutput struct { + _ struct{} `type:"structure"` + + // The list of triggers that were not able to be tested. This list provides + // the names of the triggers that could not be tested, separated by commas. + FailedExecutions []*RepositoryTriggerExecutionFailure `locationName:"failedExecutions" type:"list"` + + // The list of triggers that were successfully tested. This list provides the + // names of the triggers that were successfully tested, separated by commas. + SuccessfulExecutions []*string `locationName:"successfulExecutions" type:"list"` +} + +// String returns the string representation +func (s TestRepositoryTriggersOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TestRepositoryTriggersOutput) GoString() string { + return s.String() +} + +// Represents the input of an update default branch operation. +type UpdateDefaultBranchInput struct { + _ struct{} `type:"structure"` + + // The name of the branch to set as the default. + DefaultBranchName *string `locationName:"defaultBranchName" min:"1" type:"string" required:"true"` + + // The name of the repository to set or change the default branch for. + RepositoryName *string `locationName:"repositoryName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateDefaultBranchInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateDefaultBranchInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateDefaultBranchInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateDefaultBranchInput"} + if s.DefaultBranchName == nil { + invalidParams.Add(request.NewErrParamRequired("DefaultBranchName")) + } + if s.DefaultBranchName != nil && len(*s.DefaultBranchName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DefaultBranchName", 1)) + } + if s.RepositoryName == nil { + invalidParams.Add(request.NewErrParamRequired("RepositoryName")) + } + if s.RepositoryName != nil && len(*s.RepositoryName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RepositoryName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type UpdateDefaultBranchOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UpdateDefaultBranchOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateDefaultBranchOutput) GoString() string { + return s.String() +} + +// Represents the input of an update repository description operation. +type UpdateRepositoryDescriptionInput struct { + _ struct{} `type:"structure"` + + // The new comment or description for the specified repository. Repository descriptions + // are limited to 1,000 characters. + RepositoryDescription *string `locationName:"repositoryDescription" type:"string"` + + // The name of the repository to set or change the comment or description for. + RepositoryName *string `locationName:"repositoryName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateRepositoryDescriptionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateRepositoryDescriptionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateRepositoryDescriptionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateRepositoryDescriptionInput"} + if s.RepositoryName == nil { + invalidParams.Add(request.NewErrParamRequired("RepositoryName")) + } + if s.RepositoryName != nil && len(*s.RepositoryName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RepositoryName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type UpdateRepositoryDescriptionOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UpdateRepositoryDescriptionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateRepositoryDescriptionOutput) GoString() string { + return s.String() +} + +// Represents the input of an update repository description operation. +type UpdateRepositoryNameInput struct { + _ struct{} `type:"structure"` + + // The new name for the repository. + NewName *string `locationName:"newName" min:"1" type:"string" required:"true"` + + // The existing name of the repository. + OldName *string `locationName:"oldName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateRepositoryNameInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateRepositoryNameInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateRepositoryNameInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateRepositoryNameInput"} + if s.NewName == nil { + invalidParams.Add(request.NewErrParamRequired("NewName")) + } + if s.NewName != nil && len(*s.NewName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NewName", 1)) + } + if s.OldName == nil { + invalidParams.Add(request.NewErrParamRequired("OldName")) + } + if s.OldName != nil && len(*s.OldName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("OldName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type UpdateRepositoryNameOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UpdateRepositoryNameOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateRepositoryNameOutput) GoString() string { + return s.String() +} + +// Information about the user who made a specified commit. +type UserInfo struct { + _ struct{} `type:"structure"` + + // The date when the specified commit was pushed to the repository. + Date *string `locationName:"date" type:"string"` + + // The email address associated with the user who made the commit, if any. + Email *string `locationName:"email" type:"string"` + + // The name of the user who made the specified commit. + Name *string `locationName:"name" type:"string"` +} + +// String returns the string representation +func (s UserInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UserInfo) GoString() string { + return s.String() +} + +const ( + // @enum OrderEnum + OrderEnumAscending = "ascending" + // @enum OrderEnum + OrderEnumDescending = "descending" +) + +const ( + // @enum RepositoryTriggerEventEnum + RepositoryTriggerEventEnumAll = "all" + // @enum RepositoryTriggerEventEnum + RepositoryTriggerEventEnumUpdateReference = "updateReference" + // @enum RepositoryTriggerEventEnum + RepositoryTriggerEventEnumCreateReference = "createReference" + // @enum RepositoryTriggerEventEnum + RepositoryTriggerEventEnumDeleteReference = "deleteReference" +) + +const ( + // @enum SortByEnum + SortByEnumRepositoryName = "repositoryName" + // @enum SortByEnum + SortByEnumLastModifiedDate = "lastModifiedDate" +) diff --git a/vendor/github.com/aws/aws-sdk-go/service/codecommit/codecommitiface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/codecommit/codecommitiface/interface.go new file mode 100644 index 000000000..0401313c5 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/codecommit/codecommitiface/interface.go @@ -0,0 +1,78 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package codecommitiface provides an interface for the AWS CodeCommit. +package codecommitiface + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/codecommit" +) + +// CodeCommitAPI is the interface type for codecommit.CodeCommit. +type CodeCommitAPI interface { + BatchGetRepositoriesRequest(*codecommit.BatchGetRepositoriesInput) (*request.Request, *codecommit.BatchGetRepositoriesOutput) + + BatchGetRepositories(*codecommit.BatchGetRepositoriesInput) (*codecommit.BatchGetRepositoriesOutput, error) + + CreateBranchRequest(*codecommit.CreateBranchInput) (*request.Request, *codecommit.CreateBranchOutput) + + CreateBranch(*codecommit.CreateBranchInput) (*codecommit.CreateBranchOutput, error) + + CreateRepositoryRequest(*codecommit.CreateRepositoryInput) (*request.Request, *codecommit.CreateRepositoryOutput) + + CreateRepository(*codecommit.CreateRepositoryInput) (*codecommit.CreateRepositoryOutput, error) + + DeleteRepositoryRequest(*codecommit.DeleteRepositoryInput) (*request.Request, *codecommit.DeleteRepositoryOutput) + + DeleteRepository(*codecommit.DeleteRepositoryInput) (*codecommit.DeleteRepositoryOutput, error) + + GetBranchRequest(*codecommit.GetBranchInput) (*request.Request, *codecommit.GetBranchOutput) + + GetBranch(*codecommit.GetBranchInput) (*codecommit.GetBranchOutput, error) + + GetCommitRequest(*codecommit.GetCommitInput) (*request.Request, *codecommit.GetCommitOutput) + + GetCommit(*codecommit.GetCommitInput) (*codecommit.GetCommitOutput, error) + + GetRepositoryRequest(*codecommit.GetRepositoryInput) (*request.Request, *codecommit.GetRepositoryOutput) + + GetRepository(*codecommit.GetRepositoryInput) (*codecommit.GetRepositoryOutput, error) + + GetRepositoryTriggersRequest(*codecommit.GetRepositoryTriggersInput) (*request.Request, *codecommit.GetRepositoryTriggersOutput) + + GetRepositoryTriggers(*codecommit.GetRepositoryTriggersInput) (*codecommit.GetRepositoryTriggersOutput, error) + + ListBranchesRequest(*codecommit.ListBranchesInput) (*request.Request, *codecommit.ListBranchesOutput) + + ListBranches(*codecommit.ListBranchesInput) (*codecommit.ListBranchesOutput, error) + + ListBranchesPages(*codecommit.ListBranchesInput, func(*codecommit.ListBranchesOutput, bool) bool) error + + ListRepositoriesRequest(*codecommit.ListRepositoriesInput) (*request.Request, *codecommit.ListRepositoriesOutput) + + ListRepositories(*codecommit.ListRepositoriesInput) (*codecommit.ListRepositoriesOutput, error) + + ListRepositoriesPages(*codecommit.ListRepositoriesInput, func(*codecommit.ListRepositoriesOutput, bool) bool) error + + PutRepositoryTriggersRequest(*codecommit.PutRepositoryTriggersInput) (*request.Request, *codecommit.PutRepositoryTriggersOutput) + + PutRepositoryTriggers(*codecommit.PutRepositoryTriggersInput) (*codecommit.PutRepositoryTriggersOutput, error) + + TestRepositoryTriggersRequest(*codecommit.TestRepositoryTriggersInput) (*request.Request, *codecommit.TestRepositoryTriggersOutput) + + TestRepositoryTriggers(*codecommit.TestRepositoryTriggersInput) (*codecommit.TestRepositoryTriggersOutput, error) + + UpdateDefaultBranchRequest(*codecommit.UpdateDefaultBranchInput) (*request.Request, *codecommit.UpdateDefaultBranchOutput) + + UpdateDefaultBranch(*codecommit.UpdateDefaultBranchInput) (*codecommit.UpdateDefaultBranchOutput, error) + + UpdateRepositoryDescriptionRequest(*codecommit.UpdateRepositoryDescriptionInput) (*request.Request, *codecommit.UpdateRepositoryDescriptionOutput) + + UpdateRepositoryDescription(*codecommit.UpdateRepositoryDescriptionInput) (*codecommit.UpdateRepositoryDescriptionOutput, error) + + UpdateRepositoryNameRequest(*codecommit.UpdateRepositoryNameInput) (*request.Request, *codecommit.UpdateRepositoryNameOutput) + + UpdateRepositoryName(*codecommit.UpdateRepositoryNameInput) (*codecommit.UpdateRepositoryNameOutput, error) +} + +var _ CodeCommitAPI = (*codecommit.CodeCommit)(nil) diff --git a/vendor/github.com/aws/aws-sdk-go/service/codecommit/examples_test.go b/vendor/github.com/aws/aws-sdk-go/service/codecommit/examples_test.go new file mode 100644 index 000000000..8aa9228ac --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/codecommit/examples_test.go @@ -0,0 +1,347 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package codecommit_test + +import ( + "bytes" + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/codecommit" +) + +var _ time.Duration +var _ bytes.Buffer + +func ExampleCodeCommit_BatchGetRepositories() { + svc := codecommit.New(session.New()) + + params := &codecommit.BatchGetRepositoriesInput{ + RepositoryNames: []*string{ // Required + aws.String("RepositoryName"), // Required + // More values... + }, + } + resp, err := svc.BatchGetRepositories(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodeCommit_CreateBranch() { + svc := codecommit.New(session.New()) + + params := &codecommit.CreateBranchInput{ + BranchName: aws.String("BranchName"), // Required + CommitId: aws.String("CommitId"), // Required + RepositoryName: aws.String("RepositoryName"), // Required + } + resp, err := svc.CreateBranch(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodeCommit_CreateRepository() { + svc := codecommit.New(session.New()) + + params := &codecommit.CreateRepositoryInput{ + RepositoryName: aws.String("RepositoryName"), // Required + RepositoryDescription: aws.String("RepositoryDescription"), + } + resp, err := svc.CreateRepository(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodeCommit_DeleteRepository() { + svc := codecommit.New(session.New()) + + params := &codecommit.DeleteRepositoryInput{ + RepositoryName: aws.String("RepositoryName"), // Required + } + resp, err := svc.DeleteRepository(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodeCommit_GetBranch() { + svc := codecommit.New(session.New()) + + params := &codecommit.GetBranchInput{ + BranchName: aws.String("BranchName"), + RepositoryName: aws.String("RepositoryName"), + } + resp, err := svc.GetBranch(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodeCommit_GetCommit() { + svc := codecommit.New(session.New()) + + params := &codecommit.GetCommitInput{ + CommitId: aws.String("ObjectId"), // Required + RepositoryName: aws.String("RepositoryName"), // Required + } + resp, err := svc.GetCommit(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodeCommit_GetRepository() { + svc := codecommit.New(session.New()) + + params := &codecommit.GetRepositoryInput{ + RepositoryName: aws.String("RepositoryName"), // Required + } + resp, err := svc.GetRepository(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodeCommit_GetRepositoryTriggers() { + svc := codecommit.New(session.New()) + + params := &codecommit.GetRepositoryTriggersInput{ + RepositoryName: aws.String("RepositoryName"), + } + resp, err := svc.GetRepositoryTriggers(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodeCommit_ListBranches() { + svc := codecommit.New(session.New()) + + params := &codecommit.ListBranchesInput{ + RepositoryName: aws.String("RepositoryName"), // Required + NextToken: aws.String("NextToken"), + } + resp, err := svc.ListBranches(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodeCommit_ListRepositories() { + svc := codecommit.New(session.New()) + + params := &codecommit.ListRepositoriesInput{ + NextToken: aws.String("NextToken"), + Order: aws.String("OrderEnum"), + SortBy: aws.String("SortByEnum"), + } + resp, err := svc.ListRepositories(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodeCommit_PutRepositoryTriggers() { + svc := codecommit.New(session.New()) + + params := &codecommit.PutRepositoryTriggersInput{ + RepositoryName: aws.String("RepositoryName"), + Triggers: []*codecommit.RepositoryTrigger{ + { // Required + Branches: []*string{ + aws.String("BranchName"), // Required + // More values... + }, + CustomData: aws.String("RepositoryTriggerCustomData"), + DestinationArn: aws.String("Arn"), + Events: []*string{ + aws.String("RepositoryTriggerEventEnum"), // Required + // More values... + }, + Name: aws.String("RepositoryTriggerName"), + }, + // More values... + }, + } + resp, err := svc.PutRepositoryTriggers(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodeCommit_TestRepositoryTriggers() { + svc := codecommit.New(session.New()) + + params := &codecommit.TestRepositoryTriggersInput{ + RepositoryName: aws.String("RepositoryName"), + Triggers: []*codecommit.RepositoryTrigger{ + { // Required + Branches: []*string{ + aws.String("BranchName"), // Required + // More values... + }, + CustomData: aws.String("RepositoryTriggerCustomData"), + DestinationArn: aws.String("Arn"), + Events: []*string{ + aws.String("RepositoryTriggerEventEnum"), // Required + // More values... + }, + Name: aws.String("RepositoryTriggerName"), + }, + // More values... + }, + } + resp, err := svc.TestRepositoryTriggers(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodeCommit_UpdateDefaultBranch() { + svc := codecommit.New(session.New()) + + params := &codecommit.UpdateDefaultBranchInput{ + DefaultBranchName: aws.String("BranchName"), // Required + RepositoryName: aws.String("RepositoryName"), // Required + } + resp, err := svc.UpdateDefaultBranch(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodeCommit_UpdateRepositoryDescription() { + svc := codecommit.New(session.New()) + + params := &codecommit.UpdateRepositoryDescriptionInput{ + RepositoryName: aws.String("RepositoryName"), // Required + RepositoryDescription: aws.String("RepositoryDescription"), + } + resp, err := svc.UpdateRepositoryDescription(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodeCommit_UpdateRepositoryName() { + svc := codecommit.New(session.New()) + + params := &codecommit.UpdateRepositoryNameInput{ + NewName: aws.String("RepositoryName"), // Required + OldName: aws.String("RepositoryName"), // Required + } + resp, err := svc.UpdateRepositoryName(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/codecommit/service.go b/vendor/github.com/aws/aws-sdk-go/service/codecommit/service.go new file mode 100644 index 000000000..ec608b7c1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/codecommit/service.go @@ -0,0 +1,114 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package codecommit + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" +) + +// This is the AWS CodeCommit API Reference. This reference provides descriptions +// of the operations and data types for AWS CodeCommit API. +// +// You can use the AWS CodeCommit API to work with the following objects: +// +// Repositories, by calling the following: BatchGetRepositories, which returns +// information about one or more repositories associated with your AWS account +// CreateRepository, which creates an AWS CodeCommit repository DeleteRepository, +// which deletes an AWS CodeCommit repository GetRepository, which returns information +// about a specified repository ListRepositories, which lists all AWS CodeCommit +// repositories associated with your AWS account UpdateRepositoryDescription, +// which sets or updates the description of the repository UpdateRepositoryName, +// which changes the name of the repository. If you change the name of a repository, +// no other users of that repository will be able to access it until you send +// them the new HTTPS or SSH URL to use. Branches, by calling the following: +// CreateBranch, which creates a new branch in a specified repository GetBranch, +// which returns information about a specified branch ListBranches, which lists +// all branches for a specified repository UpdateDefaultBranch, which changes +// the default branch for a repository Information about committed code in +// a repository, by calling the following: GetCommit, which returns information +// about a commit, including commit messages and committer information. Triggers, +// by calling the following: GetRepositoryTriggers, which returns information +// about triggers configured for a repository PutRepositoryTriggers, which replaces +// all triggers for a repository and can be used to create or delete triggers +// TestRepositoryTriggers, which tests the functionality of a repository trigger +// by sending data to the trigger target For information about how to use +// AWS CodeCommit, see the AWS CodeCommit User Guide (http://docs.aws.amazon.com/codecommit/latest/userguide/welcome.html). +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type CodeCommit struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "codecommit" + +// New creates a new instance of the CodeCommit client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a CodeCommit client from just a session. +// svc := codecommit.New(mySession) +// +// // Create a CodeCommit client with additional configuration +// svc := codecommit.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *CodeCommit { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *CodeCommit { + svc := &CodeCommit{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2015-04-13", + JSONVersion: "1.1", + TargetPrefix: "CodeCommit_20150413", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(jsonrpc.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a CodeCommit operation and runs any +// custom request initialization. +func (c *CodeCommit) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/codedeploy/api.go b/vendor/github.com/aws/aws-sdk-go/service/codedeploy/api.go new file mode 100644 index 000000000..241f23ab9 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/codedeploy/api.go @@ -0,0 +1,4927 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package codedeploy provides a client for AWS CodeDeploy. +package codedeploy + +import ( + "time" + + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" +) + +const opAddTagsToOnPremisesInstances = "AddTagsToOnPremisesInstances" + +// AddTagsToOnPremisesInstancesRequest generates a "aws/request.Request" representing the +// client's request for the AddTagsToOnPremisesInstances operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the AddTagsToOnPremisesInstances method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the AddTagsToOnPremisesInstancesRequest method. +// req, resp := client.AddTagsToOnPremisesInstancesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CodeDeploy) AddTagsToOnPremisesInstancesRequest(input *AddTagsToOnPremisesInstancesInput) (req *request.Request, output *AddTagsToOnPremisesInstancesOutput) { + op := &request.Operation{ + Name: opAddTagsToOnPremisesInstances, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AddTagsToOnPremisesInstancesInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &AddTagsToOnPremisesInstancesOutput{} + req.Data = output + return +} + +// Adds tags to on-premises instances. +func (c *CodeDeploy) AddTagsToOnPremisesInstances(input *AddTagsToOnPremisesInstancesInput) (*AddTagsToOnPremisesInstancesOutput, error) { + req, out := c.AddTagsToOnPremisesInstancesRequest(input) + err := req.Send() + return out, err +} + +const opBatchGetApplicationRevisions = "BatchGetApplicationRevisions" + +// BatchGetApplicationRevisionsRequest generates a "aws/request.Request" representing the +// client's request for the BatchGetApplicationRevisions operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the BatchGetApplicationRevisions method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the BatchGetApplicationRevisionsRequest method. +// req, resp := client.BatchGetApplicationRevisionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CodeDeploy) BatchGetApplicationRevisionsRequest(input *BatchGetApplicationRevisionsInput) (req *request.Request, output *BatchGetApplicationRevisionsOutput) { + op := &request.Operation{ + Name: opBatchGetApplicationRevisions, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &BatchGetApplicationRevisionsInput{} + } + + req = c.newRequest(op, input, output) + output = &BatchGetApplicationRevisionsOutput{} + req.Data = output + return +} + +// Gets information about one or more application revisions. +func (c *CodeDeploy) BatchGetApplicationRevisions(input *BatchGetApplicationRevisionsInput) (*BatchGetApplicationRevisionsOutput, error) { + req, out := c.BatchGetApplicationRevisionsRequest(input) + err := req.Send() + return out, err +} + +const opBatchGetApplications = "BatchGetApplications" + +// BatchGetApplicationsRequest generates a "aws/request.Request" representing the +// client's request for the BatchGetApplications operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the BatchGetApplications method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the BatchGetApplicationsRequest method. +// req, resp := client.BatchGetApplicationsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CodeDeploy) BatchGetApplicationsRequest(input *BatchGetApplicationsInput) (req *request.Request, output *BatchGetApplicationsOutput) { + op := &request.Operation{ + Name: opBatchGetApplications, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &BatchGetApplicationsInput{} + } + + req = c.newRequest(op, input, output) + output = &BatchGetApplicationsOutput{} + req.Data = output + return +} + +// Gets information about one or more applications. +func (c *CodeDeploy) BatchGetApplications(input *BatchGetApplicationsInput) (*BatchGetApplicationsOutput, error) { + req, out := c.BatchGetApplicationsRequest(input) + err := req.Send() + return out, err +} + +const opBatchGetDeploymentGroups = "BatchGetDeploymentGroups" + +// BatchGetDeploymentGroupsRequest generates a "aws/request.Request" representing the +// client's request for the BatchGetDeploymentGroups operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the BatchGetDeploymentGroups method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the BatchGetDeploymentGroupsRequest method. +// req, resp := client.BatchGetDeploymentGroupsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CodeDeploy) BatchGetDeploymentGroupsRequest(input *BatchGetDeploymentGroupsInput) (req *request.Request, output *BatchGetDeploymentGroupsOutput) { + op := &request.Operation{ + Name: opBatchGetDeploymentGroups, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &BatchGetDeploymentGroupsInput{} + } + + req = c.newRequest(op, input, output) + output = &BatchGetDeploymentGroupsOutput{} + req.Data = output + return +} + +// Get information about one or more deployment groups. +func (c *CodeDeploy) BatchGetDeploymentGroups(input *BatchGetDeploymentGroupsInput) (*BatchGetDeploymentGroupsOutput, error) { + req, out := c.BatchGetDeploymentGroupsRequest(input) + err := req.Send() + return out, err +} + +const opBatchGetDeploymentInstances = "BatchGetDeploymentInstances" + +// BatchGetDeploymentInstancesRequest generates a "aws/request.Request" representing the +// client's request for the BatchGetDeploymentInstances operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the BatchGetDeploymentInstances method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the BatchGetDeploymentInstancesRequest method. +// req, resp := client.BatchGetDeploymentInstancesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CodeDeploy) BatchGetDeploymentInstancesRequest(input *BatchGetDeploymentInstancesInput) (req *request.Request, output *BatchGetDeploymentInstancesOutput) { + op := &request.Operation{ + Name: opBatchGetDeploymentInstances, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &BatchGetDeploymentInstancesInput{} + } + + req = c.newRequest(op, input, output) + output = &BatchGetDeploymentInstancesOutput{} + req.Data = output + return +} + +// Gets information about one or more instance that are part of a deployment +// group. +func (c *CodeDeploy) BatchGetDeploymentInstances(input *BatchGetDeploymentInstancesInput) (*BatchGetDeploymentInstancesOutput, error) { + req, out := c.BatchGetDeploymentInstancesRequest(input) + err := req.Send() + return out, err +} + +const opBatchGetDeployments = "BatchGetDeployments" + +// BatchGetDeploymentsRequest generates a "aws/request.Request" representing the +// client's request for the BatchGetDeployments operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the BatchGetDeployments method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the BatchGetDeploymentsRequest method. +// req, resp := client.BatchGetDeploymentsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CodeDeploy) BatchGetDeploymentsRequest(input *BatchGetDeploymentsInput) (req *request.Request, output *BatchGetDeploymentsOutput) { + op := &request.Operation{ + Name: opBatchGetDeployments, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &BatchGetDeploymentsInput{} + } + + req = c.newRequest(op, input, output) + output = &BatchGetDeploymentsOutput{} + req.Data = output + return +} + +// Gets information about one or more deployments. +func (c *CodeDeploy) BatchGetDeployments(input *BatchGetDeploymentsInput) (*BatchGetDeploymentsOutput, error) { + req, out := c.BatchGetDeploymentsRequest(input) + err := req.Send() + return out, err +} + +const opBatchGetOnPremisesInstances = "BatchGetOnPremisesInstances" + +// BatchGetOnPremisesInstancesRequest generates a "aws/request.Request" representing the +// client's request for the BatchGetOnPremisesInstances operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the BatchGetOnPremisesInstances method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the BatchGetOnPremisesInstancesRequest method. +// req, resp := client.BatchGetOnPremisesInstancesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CodeDeploy) BatchGetOnPremisesInstancesRequest(input *BatchGetOnPremisesInstancesInput) (req *request.Request, output *BatchGetOnPremisesInstancesOutput) { + op := &request.Operation{ + Name: opBatchGetOnPremisesInstances, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &BatchGetOnPremisesInstancesInput{} + } + + req = c.newRequest(op, input, output) + output = &BatchGetOnPremisesInstancesOutput{} + req.Data = output + return +} + +// Gets information about one or more on-premises instances. +func (c *CodeDeploy) BatchGetOnPremisesInstances(input *BatchGetOnPremisesInstancesInput) (*BatchGetOnPremisesInstancesOutput, error) { + req, out := c.BatchGetOnPremisesInstancesRequest(input) + err := req.Send() + return out, err +} + +const opCreateApplication = "CreateApplication" + +// CreateApplicationRequest generates a "aws/request.Request" representing the +// client's request for the CreateApplication operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateApplication method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateApplicationRequest method. +// req, resp := client.CreateApplicationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CodeDeploy) CreateApplicationRequest(input *CreateApplicationInput) (req *request.Request, output *CreateApplicationOutput) { + op := &request.Operation{ + Name: opCreateApplication, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateApplicationInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateApplicationOutput{} + req.Data = output + return +} + +// Creates an application. +func (c *CodeDeploy) CreateApplication(input *CreateApplicationInput) (*CreateApplicationOutput, error) { + req, out := c.CreateApplicationRequest(input) + err := req.Send() + return out, err +} + +const opCreateDeployment = "CreateDeployment" + +// CreateDeploymentRequest generates a "aws/request.Request" representing the +// client's request for the CreateDeployment operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateDeployment method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateDeploymentRequest method. +// req, resp := client.CreateDeploymentRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CodeDeploy) CreateDeploymentRequest(input *CreateDeploymentInput) (req *request.Request, output *CreateDeploymentOutput) { + op := &request.Operation{ + Name: opCreateDeployment, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateDeploymentInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateDeploymentOutput{} + req.Data = output + return +} + +// Deploys an application revision through the specified deployment group. +func (c *CodeDeploy) CreateDeployment(input *CreateDeploymentInput) (*CreateDeploymentOutput, error) { + req, out := c.CreateDeploymentRequest(input) + err := req.Send() + return out, err +} + +const opCreateDeploymentConfig = "CreateDeploymentConfig" + +// CreateDeploymentConfigRequest generates a "aws/request.Request" representing the +// client's request for the CreateDeploymentConfig operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateDeploymentConfig method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateDeploymentConfigRequest method. +// req, resp := client.CreateDeploymentConfigRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CodeDeploy) CreateDeploymentConfigRequest(input *CreateDeploymentConfigInput) (req *request.Request, output *CreateDeploymentConfigOutput) { + op := &request.Operation{ + Name: opCreateDeploymentConfig, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateDeploymentConfigInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateDeploymentConfigOutput{} + req.Data = output + return +} + +// Creates a deployment configuration. +func (c *CodeDeploy) CreateDeploymentConfig(input *CreateDeploymentConfigInput) (*CreateDeploymentConfigOutput, error) { + req, out := c.CreateDeploymentConfigRequest(input) + err := req.Send() + return out, err +} + +const opCreateDeploymentGroup = "CreateDeploymentGroup" + +// CreateDeploymentGroupRequest generates a "aws/request.Request" representing the +// client's request for the CreateDeploymentGroup operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateDeploymentGroup method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateDeploymentGroupRequest method. +// req, resp := client.CreateDeploymentGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CodeDeploy) CreateDeploymentGroupRequest(input *CreateDeploymentGroupInput) (req *request.Request, output *CreateDeploymentGroupOutput) { + op := &request.Operation{ + Name: opCreateDeploymentGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateDeploymentGroupInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateDeploymentGroupOutput{} + req.Data = output + return +} + +// Creates a deployment group to which application revisions will be deployed. +func (c *CodeDeploy) CreateDeploymentGroup(input *CreateDeploymentGroupInput) (*CreateDeploymentGroupOutput, error) { + req, out := c.CreateDeploymentGroupRequest(input) + err := req.Send() + return out, err +} + +const opDeleteApplication = "DeleteApplication" + +// DeleteApplicationRequest generates a "aws/request.Request" representing the +// client's request for the DeleteApplication operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteApplication method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteApplicationRequest method. +// req, resp := client.DeleteApplicationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CodeDeploy) DeleteApplicationRequest(input *DeleteApplicationInput) (req *request.Request, output *DeleteApplicationOutput) { + op := &request.Operation{ + Name: opDeleteApplication, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteApplicationInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteApplicationOutput{} + req.Data = output + return +} + +// Deletes an application. +func (c *CodeDeploy) DeleteApplication(input *DeleteApplicationInput) (*DeleteApplicationOutput, error) { + req, out := c.DeleteApplicationRequest(input) + err := req.Send() + return out, err +} + +const opDeleteDeploymentConfig = "DeleteDeploymentConfig" + +// DeleteDeploymentConfigRequest generates a "aws/request.Request" representing the +// client's request for the DeleteDeploymentConfig operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteDeploymentConfig method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteDeploymentConfigRequest method. +// req, resp := client.DeleteDeploymentConfigRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CodeDeploy) DeleteDeploymentConfigRequest(input *DeleteDeploymentConfigInput) (req *request.Request, output *DeleteDeploymentConfigOutput) { + op := &request.Operation{ + Name: opDeleteDeploymentConfig, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteDeploymentConfigInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteDeploymentConfigOutput{} + req.Data = output + return +} + +// Deletes a deployment configuration. +// +// A deployment configuration cannot be deleted if it is currently in use. +// Predefined configurations cannot be deleted. +func (c *CodeDeploy) DeleteDeploymentConfig(input *DeleteDeploymentConfigInput) (*DeleteDeploymentConfigOutput, error) { + req, out := c.DeleteDeploymentConfigRequest(input) + err := req.Send() + return out, err +} + +const opDeleteDeploymentGroup = "DeleteDeploymentGroup" + +// DeleteDeploymentGroupRequest generates a "aws/request.Request" representing the +// client's request for the DeleteDeploymentGroup operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteDeploymentGroup method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteDeploymentGroupRequest method. +// req, resp := client.DeleteDeploymentGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CodeDeploy) DeleteDeploymentGroupRequest(input *DeleteDeploymentGroupInput) (req *request.Request, output *DeleteDeploymentGroupOutput) { + op := &request.Operation{ + Name: opDeleteDeploymentGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteDeploymentGroupInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteDeploymentGroupOutput{} + req.Data = output + return +} + +// Deletes a deployment group. +func (c *CodeDeploy) DeleteDeploymentGroup(input *DeleteDeploymentGroupInput) (*DeleteDeploymentGroupOutput, error) { + req, out := c.DeleteDeploymentGroupRequest(input) + err := req.Send() + return out, err +} + +const opDeregisterOnPremisesInstance = "DeregisterOnPremisesInstance" + +// DeregisterOnPremisesInstanceRequest generates a "aws/request.Request" representing the +// client's request for the DeregisterOnPremisesInstance operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeregisterOnPremisesInstance method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeregisterOnPremisesInstanceRequest method. +// req, resp := client.DeregisterOnPremisesInstanceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CodeDeploy) DeregisterOnPremisesInstanceRequest(input *DeregisterOnPremisesInstanceInput) (req *request.Request, output *DeregisterOnPremisesInstanceOutput) { + op := &request.Operation{ + Name: opDeregisterOnPremisesInstance, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeregisterOnPremisesInstanceInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeregisterOnPremisesInstanceOutput{} + req.Data = output + return +} + +// Deregisters an on-premises instance. +func (c *CodeDeploy) DeregisterOnPremisesInstance(input *DeregisterOnPremisesInstanceInput) (*DeregisterOnPremisesInstanceOutput, error) { + req, out := c.DeregisterOnPremisesInstanceRequest(input) + err := req.Send() + return out, err +} + +const opGetApplication = "GetApplication" + +// GetApplicationRequest generates a "aws/request.Request" representing the +// client's request for the GetApplication operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetApplication method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetApplicationRequest method. +// req, resp := client.GetApplicationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CodeDeploy) GetApplicationRequest(input *GetApplicationInput) (req *request.Request, output *GetApplicationOutput) { + op := &request.Operation{ + Name: opGetApplication, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetApplicationInput{} + } + + req = c.newRequest(op, input, output) + output = &GetApplicationOutput{} + req.Data = output + return +} + +// Gets information about an application. +func (c *CodeDeploy) GetApplication(input *GetApplicationInput) (*GetApplicationOutput, error) { + req, out := c.GetApplicationRequest(input) + err := req.Send() + return out, err +} + +const opGetApplicationRevision = "GetApplicationRevision" + +// GetApplicationRevisionRequest generates a "aws/request.Request" representing the +// client's request for the GetApplicationRevision operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetApplicationRevision method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetApplicationRevisionRequest method. +// req, resp := client.GetApplicationRevisionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CodeDeploy) GetApplicationRevisionRequest(input *GetApplicationRevisionInput) (req *request.Request, output *GetApplicationRevisionOutput) { + op := &request.Operation{ + Name: opGetApplicationRevision, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetApplicationRevisionInput{} + } + + req = c.newRequest(op, input, output) + output = &GetApplicationRevisionOutput{} + req.Data = output + return +} + +// Gets information about an application revision. +func (c *CodeDeploy) GetApplicationRevision(input *GetApplicationRevisionInput) (*GetApplicationRevisionOutput, error) { + req, out := c.GetApplicationRevisionRequest(input) + err := req.Send() + return out, err +} + +const opGetDeployment = "GetDeployment" + +// GetDeploymentRequest generates a "aws/request.Request" representing the +// client's request for the GetDeployment operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetDeployment method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetDeploymentRequest method. +// req, resp := client.GetDeploymentRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CodeDeploy) GetDeploymentRequest(input *GetDeploymentInput) (req *request.Request, output *GetDeploymentOutput) { + op := &request.Operation{ + Name: opGetDeployment, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetDeploymentInput{} + } + + req = c.newRequest(op, input, output) + output = &GetDeploymentOutput{} + req.Data = output + return +} + +// Gets information about a deployment. +func (c *CodeDeploy) GetDeployment(input *GetDeploymentInput) (*GetDeploymentOutput, error) { + req, out := c.GetDeploymentRequest(input) + err := req.Send() + return out, err +} + +const opGetDeploymentConfig = "GetDeploymentConfig" + +// GetDeploymentConfigRequest generates a "aws/request.Request" representing the +// client's request for the GetDeploymentConfig operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetDeploymentConfig method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetDeploymentConfigRequest method. +// req, resp := client.GetDeploymentConfigRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CodeDeploy) GetDeploymentConfigRequest(input *GetDeploymentConfigInput) (req *request.Request, output *GetDeploymentConfigOutput) { + op := &request.Operation{ + Name: opGetDeploymentConfig, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetDeploymentConfigInput{} + } + + req = c.newRequest(op, input, output) + output = &GetDeploymentConfigOutput{} + req.Data = output + return +} + +// Gets information about a deployment configuration. +func (c *CodeDeploy) GetDeploymentConfig(input *GetDeploymentConfigInput) (*GetDeploymentConfigOutput, error) { + req, out := c.GetDeploymentConfigRequest(input) + err := req.Send() + return out, err +} + +const opGetDeploymentGroup = "GetDeploymentGroup" + +// GetDeploymentGroupRequest generates a "aws/request.Request" representing the +// client's request for the GetDeploymentGroup operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetDeploymentGroup method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetDeploymentGroupRequest method. +// req, resp := client.GetDeploymentGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CodeDeploy) GetDeploymentGroupRequest(input *GetDeploymentGroupInput) (req *request.Request, output *GetDeploymentGroupOutput) { + op := &request.Operation{ + Name: opGetDeploymentGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetDeploymentGroupInput{} + } + + req = c.newRequest(op, input, output) + output = &GetDeploymentGroupOutput{} + req.Data = output + return +} + +// Gets information about a deployment group. +func (c *CodeDeploy) GetDeploymentGroup(input *GetDeploymentGroupInput) (*GetDeploymentGroupOutput, error) { + req, out := c.GetDeploymentGroupRequest(input) + err := req.Send() + return out, err +} + +const opGetDeploymentInstance = "GetDeploymentInstance" + +// GetDeploymentInstanceRequest generates a "aws/request.Request" representing the +// client's request for the GetDeploymentInstance operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetDeploymentInstance method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetDeploymentInstanceRequest method. +// req, resp := client.GetDeploymentInstanceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CodeDeploy) GetDeploymentInstanceRequest(input *GetDeploymentInstanceInput) (req *request.Request, output *GetDeploymentInstanceOutput) { + op := &request.Operation{ + Name: opGetDeploymentInstance, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetDeploymentInstanceInput{} + } + + req = c.newRequest(op, input, output) + output = &GetDeploymentInstanceOutput{} + req.Data = output + return +} + +// Gets information about an instance as part of a deployment. +func (c *CodeDeploy) GetDeploymentInstance(input *GetDeploymentInstanceInput) (*GetDeploymentInstanceOutput, error) { + req, out := c.GetDeploymentInstanceRequest(input) + err := req.Send() + return out, err +} + +const opGetOnPremisesInstance = "GetOnPremisesInstance" + +// GetOnPremisesInstanceRequest generates a "aws/request.Request" representing the +// client's request for the GetOnPremisesInstance operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetOnPremisesInstance method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetOnPremisesInstanceRequest method. +// req, resp := client.GetOnPremisesInstanceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CodeDeploy) GetOnPremisesInstanceRequest(input *GetOnPremisesInstanceInput) (req *request.Request, output *GetOnPremisesInstanceOutput) { + op := &request.Operation{ + Name: opGetOnPremisesInstance, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetOnPremisesInstanceInput{} + } + + req = c.newRequest(op, input, output) + output = &GetOnPremisesInstanceOutput{} + req.Data = output + return +} + +// Gets information about an on-premises instance. +func (c *CodeDeploy) GetOnPremisesInstance(input *GetOnPremisesInstanceInput) (*GetOnPremisesInstanceOutput, error) { + req, out := c.GetOnPremisesInstanceRequest(input) + err := req.Send() + return out, err +} + +const opListApplicationRevisions = "ListApplicationRevisions" + +// ListApplicationRevisionsRequest generates a "aws/request.Request" representing the +// client's request for the ListApplicationRevisions operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListApplicationRevisions method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListApplicationRevisionsRequest method. +// req, resp := client.ListApplicationRevisionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CodeDeploy) ListApplicationRevisionsRequest(input *ListApplicationRevisionsInput) (req *request.Request, output *ListApplicationRevisionsOutput) { + op := &request.Operation{ + Name: opListApplicationRevisions, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListApplicationRevisionsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListApplicationRevisionsOutput{} + req.Data = output + return +} + +// Lists information about revisions for an application. +func (c *CodeDeploy) ListApplicationRevisions(input *ListApplicationRevisionsInput) (*ListApplicationRevisionsOutput, error) { + req, out := c.ListApplicationRevisionsRequest(input) + err := req.Send() + return out, err +} + +// ListApplicationRevisionsPages iterates over the pages of a ListApplicationRevisions operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListApplicationRevisions method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListApplicationRevisions operation. +// pageNum := 0 +// err := client.ListApplicationRevisionsPages(params, +// func(page *ListApplicationRevisionsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *CodeDeploy) ListApplicationRevisionsPages(input *ListApplicationRevisionsInput, fn func(p *ListApplicationRevisionsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListApplicationRevisionsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListApplicationRevisionsOutput), lastPage) + }) +} + +const opListApplications = "ListApplications" + +// ListApplicationsRequest generates a "aws/request.Request" representing the +// client's request for the ListApplications operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListApplications method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListApplicationsRequest method. +// req, resp := client.ListApplicationsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CodeDeploy) ListApplicationsRequest(input *ListApplicationsInput) (req *request.Request, output *ListApplicationsOutput) { + op := &request.Operation{ + Name: opListApplications, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListApplicationsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListApplicationsOutput{} + req.Data = output + return +} + +// Lists the applications registered with the applicable IAM user or AWS account. +func (c *CodeDeploy) ListApplications(input *ListApplicationsInput) (*ListApplicationsOutput, error) { + req, out := c.ListApplicationsRequest(input) + err := req.Send() + return out, err +} + +// ListApplicationsPages iterates over the pages of a ListApplications operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListApplications method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListApplications operation. +// pageNum := 0 +// err := client.ListApplicationsPages(params, +// func(page *ListApplicationsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *CodeDeploy) ListApplicationsPages(input *ListApplicationsInput, fn func(p *ListApplicationsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListApplicationsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListApplicationsOutput), lastPage) + }) +} + +const opListDeploymentConfigs = "ListDeploymentConfigs" + +// ListDeploymentConfigsRequest generates a "aws/request.Request" representing the +// client's request for the ListDeploymentConfigs operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListDeploymentConfigs method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListDeploymentConfigsRequest method. +// req, resp := client.ListDeploymentConfigsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CodeDeploy) ListDeploymentConfigsRequest(input *ListDeploymentConfigsInput) (req *request.Request, output *ListDeploymentConfigsOutput) { + op := &request.Operation{ + Name: opListDeploymentConfigs, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListDeploymentConfigsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListDeploymentConfigsOutput{} + req.Data = output + return +} + +// Lists the deployment configurations with the applicable IAM user or AWS account. +func (c *CodeDeploy) ListDeploymentConfigs(input *ListDeploymentConfigsInput) (*ListDeploymentConfigsOutput, error) { + req, out := c.ListDeploymentConfigsRequest(input) + err := req.Send() + return out, err +} + +// ListDeploymentConfigsPages iterates over the pages of a ListDeploymentConfigs operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListDeploymentConfigs method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListDeploymentConfigs operation. +// pageNum := 0 +// err := client.ListDeploymentConfigsPages(params, +// func(page *ListDeploymentConfigsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *CodeDeploy) ListDeploymentConfigsPages(input *ListDeploymentConfigsInput, fn func(p *ListDeploymentConfigsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListDeploymentConfigsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListDeploymentConfigsOutput), lastPage) + }) +} + +const opListDeploymentGroups = "ListDeploymentGroups" + +// ListDeploymentGroupsRequest generates a "aws/request.Request" representing the +// client's request for the ListDeploymentGroups operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListDeploymentGroups method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListDeploymentGroupsRequest method. +// req, resp := client.ListDeploymentGroupsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CodeDeploy) ListDeploymentGroupsRequest(input *ListDeploymentGroupsInput) (req *request.Request, output *ListDeploymentGroupsOutput) { + op := &request.Operation{ + Name: opListDeploymentGroups, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListDeploymentGroupsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListDeploymentGroupsOutput{} + req.Data = output + return +} + +// Lists the deployment groups for an application registered with the applicable +// IAM user or AWS account. +func (c *CodeDeploy) ListDeploymentGroups(input *ListDeploymentGroupsInput) (*ListDeploymentGroupsOutput, error) { + req, out := c.ListDeploymentGroupsRequest(input) + err := req.Send() + return out, err +} + +// ListDeploymentGroupsPages iterates over the pages of a ListDeploymentGroups operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListDeploymentGroups method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListDeploymentGroups operation. +// pageNum := 0 +// err := client.ListDeploymentGroupsPages(params, +// func(page *ListDeploymentGroupsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *CodeDeploy) ListDeploymentGroupsPages(input *ListDeploymentGroupsInput, fn func(p *ListDeploymentGroupsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListDeploymentGroupsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListDeploymentGroupsOutput), lastPage) + }) +} + +const opListDeploymentInstances = "ListDeploymentInstances" + +// ListDeploymentInstancesRequest generates a "aws/request.Request" representing the +// client's request for the ListDeploymentInstances operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListDeploymentInstances method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListDeploymentInstancesRequest method. +// req, resp := client.ListDeploymentInstancesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CodeDeploy) ListDeploymentInstancesRequest(input *ListDeploymentInstancesInput) (req *request.Request, output *ListDeploymentInstancesOutput) { + op := &request.Operation{ + Name: opListDeploymentInstances, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListDeploymentInstancesInput{} + } + + req = c.newRequest(op, input, output) + output = &ListDeploymentInstancesOutput{} + req.Data = output + return +} + +// Lists the instance for a deployment associated with the applicable IAM user +// or AWS account. +func (c *CodeDeploy) ListDeploymentInstances(input *ListDeploymentInstancesInput) (*ListDeploymentInstancesOutput, error) { + req, out := c.ListDeploymentInstancesRequest(input) + err := req.Send() + return out, err +} + +// ListDeploymentInstancesPages iterates over the pages of a ListDeploymentInstances operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListDeploymentInstances method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListDeploymentInstances operation. +// pageNum := 0 +// err := client.ListDeploymentInstancesPages(params, +// func(page *ListDeploymentInstancesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *CodeDeploy) ListDeploymentInstancesPages(input *ListDeploymentInstancesInput, fn func(p *ListDeploymentInstancesOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListDeploymentInstancesRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListDeploymentInstancesOutput), lastPage) + }) +} + +const opListDeployments = "ListDeployments" + +// ListDeploymentsRequest generates a "aws/request.Request" representing the +// client's request for the ListDeployments operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListDeployments method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListDeploymentsRequest method. +// req, resp := client.ListDeploymentsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CodeDeploy) ListDeploymentsRequest(input *ListDeploymentsInput) (req *request.Request, output *ListDeploymentsOutput) { + op := &request.Operation{ + Name: opListDeployments, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListDeploymentsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListDeploymentsOutput{} + req.Data = output + return +} + +// Lists the deployments in a deployment group for an application registered +// with the applicable IAM user or AWS account. +func (c *CodeDeploy) ListDeployments(input *ListDeploymentsInput) (*ListDeploymentsOutput, error) { + req, out := c.ListDeploymentsRequest(input) + err := req.Send() + return out, err +} + +// ListDeploymentsPages iterates over the pages of a ListDeployments operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListDeployments method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListDeployments operation. +// pageNum := 0 +// err := client.ListDeploymentsPages(params, +// func(page *ListDeploymentsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *CodeDeploy) ListDeploymentsPages(input *ListDeploymentsInput, fn func(p *ListDeploymentsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListDeploymentsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListDeploymentsOutput), lastPage) + }) +} + +const opListOnPremisesInstances = "ListOnPremisesInstances" + +// ListOnPremisesInstancesRequest generates a "aws/request.Request" representing the +// client's request for the ListOnPremisesInstances operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListOnPremisesInstances method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListOnPremisesInstancesRequest method. +// req, resp := client.ListOnPremisesInstancesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CodeDeploy) ListOnPremisesInstancesRequest(input *ListOnPremisesInstancesInput) (req *request.Request, output *ListOnPremisesInstancesOutput) { + op := &request.Operation{ + Name: opListOnPremisesInstances, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListOnPremisesInstancesInput{} + } + + req = c.newRequest(op, input, output) + output = &ListOnPremisesInstancesOutput{} + req.Data = output + return +} + +// Gets a list of names for one or more on-premises instances. +// +// Unless otherwise specified, both registered and deregistered on-premises +// instance names will be listed. To list only registered or deregistered on-premises +// instance names, use the registration status parameter. +func (c *CodeDeploy) ListOnPremisesInstances(input *ListOnPremisesInstancesInput) (*ListOnPremisesInstancesOutput, error) { + req, out := c.ListOnPremisesInstancesRequest(input) + err := req.Send() + return out, err +} + +const opRegisterApplicationRevision = "RegisterApplicationRevision" + +// RegisterApplicationRevisionRequest generates a "aws/request.Request" representing the +// client's request for the RegisterApplicationRevision operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the RegisterApplicationRevision method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the RegisterApplicationRevisionRequest method. +// req, resp := client.RegisterApplicationRevisionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CodeDeploy) RegisterApplicationRevisionRequest(input *RegisterApplicationRevisionInput) (req *request.Request, output *RegisterApplicationRevisionOutput) { + op := &request.Operation{ + Name: opRegisterApplicationRevision, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RegisterApplicationRevisionInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &RegisterApplicationRevisionOutput{} + req.Data = output + return +} + +// Registers with AWS CodeDeploy a revision for the specified application. +func (c *CodeDeploy) RegisterApplicationRevision(input *RegisterApplicationRevisionInput) (*RegisterApplicationRevisionOutput, error) { + req, out := c.RegisterApplicationRevisionRequest(input) + err := req.Send() + return out, err +} + +const opRegisterOnPremisesInstance = "RegisterOnPremisesInstance" + +// RegisterOnPremisesInstanceRequest generates a "aws/request.Request" representing the +// client's request for the RegisterOnPremisesInstance operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the RegisterOnPremisesInstance method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the RegisterOnPremisesInstanceRequest method. +// req, resp := client.RegisterOnPremisesInstanceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CodeDeploy) RegisterOnPremisesInstanceRequest(input *RegisterOnPremisesInstanceInput) (req *request.Request, output *RegisterOnPremisesInstanceOutput) { + op := &request.Operation{ + Name: opRegisterOnPremisesInstance, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RegisterOnPremisesInstanceInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &RegisterOnPremisesInstanceOutput{} + req.Data = output + return +} + +// Registers an on-premises instance. +func (c *CodeDeploy) RegisterOnPremisesInstance(input *RegisterOnPremisesInstanceInput) (*RegisterOnPremisesInstanceOutput, error) { + req, out := c.RegisterOnPremisesInstanceRequest(input) + err := req.Send() + return out, err +} + +const opRemoveTagsFromOnPremisesInstances = "RemoveTagsFromOnPremisesInstances" + +// RemoveTagsFromOnPremisesInstancesRequest generates a "aws/request.Request" representing the +// client's request for the RemoveTagsFromOnPremisesInstances operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the RemoveTagsFromOnPremisesInstances method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the RemoveTagsFromOnPremisesInstancesRequest method. +// req, resp := client.RemoveTagsFromOnPremisesInstancesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CodeDeploy) RemoveTagsFromOnPremisesInstancesRequest(input *RemoveTagsFromOnPremisesInstancesInput) (req *request.Request, output *RemoveTagsFromOnPremisesInstancesOutput) { + op := &request.Operation{ + Name: opRemoveTagsFromOnPremisesInstances, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RemoveTagsFromOnPremisesInstancesInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &RemoveTagsFromOnPremisesInstancesOutput{} + req.Data = output + return +} + +// Removes one or more tags from one or more on-premises instances. +func (c *CodeDeploy) RemoveTagsFromOnPremisesInstances(input *RemoveTagsFromOnPremisesInstancesInput) (*RemoveTagsFromOnPremisesInstancesOutput, error) { + req, out := c.RemoveTagsFromOnPremisesInstancesRequest(input) + err := req.Send() + return out, err +} + +const opStopDeployment = "StopDeployment" + +// StopDeploymentRequest generates a "aws/request.Request" representing the +// client's request for the StopDeployment operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the StopDeployment method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the StopDeploymentRequest method. +// req, resp := client.StopDeploymentRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CodeDeploy) StopDeploymentRequest(input *StopDeploymentInput) (req *request.Request, output *StopDeploymentOutput) { + op := &request.Operation{ + Name: opStopDeployment, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &StopDeploymentInput{} + } + + req = c.newRequest(op, input, output) + output = &StopDeploymentOutput{} + req.Data = output + return +} + +// Attempts to stop an ongoing deployment. +func (c *CodeDeploy) StopDeployment(input *StopDeploymentInput) (*StopDeploymentOutput, error) { + req, out := c.StopDeploymentRequest(input) + err := req.Send() + return out, err +} + +const opUpdateApplication = "UpdateApplication" + +// UpdateApplicationRequest generates a "aws/request.Request" representing the +// client's request for the UpdateApplication operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateApplication method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateApplicationRequest method. +// req, resp := client.UpdateApplicationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CodeDeploy) UpdateApplicationRequest(input *UpdateApplicationInput) (req *request.Request, output *UpdateApplicationOutput) { + op := &request.Operation{ + Name: opUpdateApplication, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateApplicationInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &UpdateApplicationOutput{} + req.Data = output + return +} + +// Changes the name of an application. +func (c *CodeDeploy) UpdateApplication(input *UpdateApplicationInput) (*UpdateApplicationOutput, error) { + req, out := c.UpdateApplicationRequest(input) + err := req.Send() + return out, err +} + +const opUpdateDeploymentGroup = "UpdateDeploymentGroup" + +// UpdateDeploymentGroupRequest generates a "aws/request.Request" representing the +// client's request for the UpdateDeploymentGroup operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateDeploymentGroup method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateDeploymentGroupRequest method. +// req, resp := client.UpdateDeploymentGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CodeDeploy) UpdateDeploymentGroupRequest(input *UpdateDeploymentGroupInput) (req *request.Request, output *UpdateDeploymentGroupOutput) { + op := &request.Operation{ + Name: opUpdateDeploymentGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateDeploymentGroupInput{} + } + + req = c.newRequest(op, input, output) + output = &UpdateDeploymentGroupOutput{} + req.Data = output + return +} + +// Changes information about a deployment group. +func (c *CodeDeploy) UpdateDeploymentGroup(input *UpdateDeploymentGroupInput) (*UpdateDeploymentGroupOutput, error) { + req, out := c.UpdateDeploymentGroupRequest(input) + err := req.Send() + return out, err +} + +// Represents the input of, and adds tags to, an on-premises instance operation. +type AddTagsToOnPremisesInstancesInput struct { + _ struct{} `type:"structure"` + + // The names of the on-premises instances to which to add tags. + InstanceNames []*string `locationName:"instanceNames" type:"list" required:"true"` + + // The tag key-value pairs to add to the on-premises instances. + // + // Keys and values are both required. Keys cannot be null or empty strings. + // Value-only tags are not allowed. + Tags []*Tag `locationName:"tags" type:"list" required:"true"` +} + +// String returns the string representation +func (s AddTagsToOnPremisesInstancesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddTagsToOnPremisesInstancesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AddTagsToOnPremisesInstancesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AddTagsToOnPremisesInstancesInput"} + if s.InstanceNames == nil { + invalidParams.Add(request.NewErrParamRequired("InstanceNames")) + } + if s.Tags == nil { + invalidParams.Add(request.NewErrParamRequired("Tags")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type AddTagsToOnPremisesInstancesOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s AddTagsToOnPremisesInstancesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddTagsToOnPremisesInstancesOutput) GoString() string { + return s.String() +} + +// Information about an application. +type ApplicationInfo struct { + _ struct{} `type:"structure"` + + // The application ID. + ApplicationId *string `locationName:"applicationId" type:"string"` + + // The application name. + ApplicationName *string `locationName:"applicationName" min:"1" type:"string"` + + // The time at which the application was created. + CreateTime *time.Time `locationName:"createTime" type:"timestamp" timestampFormat:"unix"` + + // True if the user has authenticated with GitHub for the specified application; + // otherwise, false. + LinkedToGitHub *bool `locationName:"linkedToGitHub" type:"boolean"` +} + +// String returns the string representation +func (s ApplicationInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ApplicationInfo) GoString() string { + return s.String() +} + +// Information about an Auto Scaling group. +type AutoScalingGroup struct { + _ struct{} `type:"structure"` + + // An Auto Scaling lifecycle event hook name. + Hook *string `locationName:"hook" type:"string"` + + // The Auto Scaling group name. + Name *string `locationName:"name" type:"string"` +} + +// String returns the string representation +func (s AutoScalingGroup) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AutoScalingGroup) GoString() string { + return s.String() +} + +// Represents the input of a batch get application revisions operation. +type BatchGetApplicationRevisionsInput struct { + _ struct{} `type:"structure"` + + // The name of an AWS CodeDeploy application about which to get revision information. + ApplicationName *string `locationName:"applicationName" min:"1" type:"string" required:"true"` + + // Information to get about the application revisions, including type and location. + Revisions []*RevisionLocation `locationName:"revisions" type:"list" required:"true"` +} + +// String returns the string representation +func (s BatchGetApplicationRevisionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchGetApplicationRevisionsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *BatchGetApplicationRevisionsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "BatchGetApplicationRevisionsInput"} + if s.ApplicationName == nil { + invalidParams.Add(request.NewErrParamRequired("ApplicationName")) + } + if s.ApplicationName != nil && len(*s.ApplicationName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ApplicationName", 1)) + } + if s.Revisions == nil { + invalidParams.Add(request.NewErrParamRequired("Revisions")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the output of a batch get application revisions operation. +type BatchGetApplicationRevisionsOutput struct { + _ struct{} `type:"structure"` + + // The name of the application that corresponds to the revisions. + ApplicationName *string `locationName:"applicationName" min:"1" type:"string"` + + // Information about errors that may have occurred during the API call. + ErrorMessage *string `locationName:"errorMessage" type:"string"` + + // Additional information about the revisions, including the type and location. + Revisions []*RevisionInfo `locationName:"revisions" type:"list"` +} + +// String returns the string representation +func (s BatchGetApplicationRevisionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchGetApplicationRevisionsOutput) GoString() string { + return s.String() +} + +// Represents the input of a batch get applications operation. +type BatchGetApplicationsInput struct { + _ struct{} `type:"structure"` + + // A list of application names separated by spaces. + ApplicationNames []*string `locationName:"applicationNames" type:"list"` +} + +// String returns the string representation +func (s BatchGetApplicationsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchGetApplicationsInput) GoString() string { + return s.String() +} + +// Represents the output of a batch get applications operation. +type BatchGetApplicationsOutput struct { + _ struct{} `type:"structure"` + + // Information about the applications. + ApplicationsInfo []*ApplicationInfo `locationName:"applicationsInfo" type:"list"` +} + +// String returns the string representation +func (s BatchGetApplicationsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchGetApplicationsOutput) GoString() string { + return s.String() +} + +// Represents the input of a batch get deployment groups operation. +type BatchGetDeploymentGroupsInput struct { + _ struct{} `type:"structure"` + + // The name of an AWS CodeDeploy application associated with the applicable + // IAM user or AWS account. + ApplicationName *string `locationName:"applicationName" min:"1" type:"string" required:"true"` + + // The deployment groups' names. + DeploymentGroupNames []*string `locationName:"deploymentGroupNames" type:"list" required:"true"` +} + +// String returns the string representation +func (s BatchGetDeploymentGroupsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchGetDeploymentGroupsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *BatchGetDeploymentGroupsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "BatchGetDeploymentGroupsInput"} + if s.ApplicationName == nil { + invalidParams.Add(request.NewErrParamRequired("ApplicationName")) + } + if s.ApplicationName != nil && len(*s.ApplicationName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ApplicationName", 1)) + } + if s.DeploymentGroupNames == nil { + invalidParams.Add(request.NewErrParamRequired("DeploymentGroupNames")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the output of a batch get deployment groups operation. +type BatchGetDeploymentGroupsOutput struct { + _ struct{} `type:"structure"` + + // Information about the deployment groups. + DeploymentGroupsInfo []*DeploymentGroupInfo `locationName:"deploymentGroupsInfo" type:"list"` + + // Information about errors that may have occurred during the API call. + ErrorMessage *string `locationName:"errorMessage" type:"string"` +} + +// String returns the string representation +func (s BatchGetDeploymentGroupsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchGetDeploymentGroupsOutput) GoString() string { + return s.String() +} + +// Represents the input of a batch get deployment instances operation. +type BatchGetDeploymentInstancesInput struct { + _ struct{} `type:"structure"` + + // The unique ID of a deployment. + DeploymentId *string `locationName:"deploymentId" type:"string" required:"true"` + + // The unique IDs of instances in the deployment group. + InstanceIds []*string `locationName:"instanceIds" type:"list" required:"true"` +} + +// String returns the string representation +func (s BatchGetDeploymentInstancesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchGetDeploymentInstancesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *BatchGetDeploymentInstancesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "BatchGetDeploymentInstancesInput"} + if s.DeploymentId == nil { + invalidParams.Add(request.NewErrParamRequired("DeploymentId")) + } + if s.InstanceIds == nil { + invalidParams.Add(request.NewErrParamRequired("InstanceIds")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the output of a batch get deployment instance operation. +type BatchGetDeploymentInstancesOutput struct { + _ struct{} `type:"structure"` + + // Information about errors that may have occurred during the API call. + ErrorMessage *string `locationName:"errorMessage" type:"string"` + + // Information about the instance. + InstancesSummary []*InstanceSummary `locationName:"instancesSummary" type:"list"` +} + +// String returns the string representation +func (s BatchGetDeploymentInstancesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchGetDeploymentInstancesOutput) GoString() string { + return s.String() +} + +// Represents the input of a batch get deployments operation. +type BatchGetDeploymentsInput struct { + _ struct{} `type:"structure"` + + // A list of deployment IDs, separated by spaces. + DeploymentIds []*string `locationName:"deploymentIds" type:"list"` +} + +// String returns the string representation +func (s BatchGetDeploymentsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchGetDeploymentsInput) GoString() string { + return s.String() +} + +// Represents the output of a batch get deployments operation. +type BatchGetDeploymentsOutput struct { + _ struct{} `type:"structure"` + + // Information about the deployments. + DeploymentsInfo []*DeploymentInfo `locationName:"deploymentsInfo" type:"list"` +} + +// String returns the string representation +func (s BatchGetDeploymentsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchGetDeploymentsOutput) GoString() string { + return s.String() +} + +// Represents the input of a batch get on-premises instances operation. +type BatchGetOnPremisesInstancesInput struct { + _ struct{} `type:"structure"` + + // The names of the on-premises instances about which to get information. + InstanceNames []*string `locationName:"instanceNames" type:"list"` +} + +// String returns the string representation +func (s BatchGetOnPremisesInstancesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchGetOnPremisesInstancesInput) GoString() string { + return s.String() +} + +// Represents the output of a batch get on-premises instances operation. +type BatchGetOnPremisesInstancesOutput struct { + _ struct{} `type:"structure"` + + // Information about the on-premises instances. + InstanceInfos []*InstanceInfo `locationName:"instanceInfos" type:"list"` +} + +// String returns the string representation +func (s BatchGetOnPremisesInstancesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchGetOnPremisesInstancesOutput) GoString() string { + return s.String() +} + +// Represents the input of a create application operation. +type CreateApplicationInput struct { + _ struct{} `type:"structure"` + + // The name of the application. This name must be unique with the applicable + // IAM user or AWS account. + ApplicationName *string `locationName:"applicationName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateApplicationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateApplicationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateApplicationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateApplicationInput"} + if s.ApplicationName == nil { + invalidParams.Add(request.NewErrParamRequired("ApplicationName")) + } + if s.ApplicationName != nil && len(*s.ApplicationName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ApplicationName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the output of a create application operation. +type CreateApplicationOutput struct { + _ struct{} `type:"structure"` + + // A unique application ID. + ApplicationId *string `locationName:"applicationId" type:"string"` +} + +// String returns the string representation +func (s CreateApplicationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateApplicationOutput) GoString() string { + return s.String() +} + +// Represents the input of a create deployment configuration operation. +type CreateDeploymentConfigInput struct { + _ struct{} `type:"structure"` + + // The name of the deployment configuration to create. + DeploymentConfigName *string `locationName:"deploymentConfigName" min:"1" type:"string" required:"true"` + + // The minimum number of healthy instances that should be available at any time + // during the deployment. There are two parameters expected in the input: type + // and value. + // + // The type parameter takes either of the following values: + // + // HOST_COUNT: The value parameter represents the minimum number of healthy + // instances as an absolute value. FLEET_PERCENT: The value parameter represents + // the minimum number of healthy instances as a percentage of the total number + // of instances in the deployment. If you specify FLEET_PERCENT, at the start + // of the deployment, AWS CodeDeploy converts the percentage to the equivalent + // number of instance and rounds up fractional instances. The value parameter + // takes an integer. + // + // For example, to set a minimum of 95% healthy instance, specify a type of + // FLEET_PERCENT and a value of 95. + MinimumHealthyHosts *MinimumHealthyHosts `locationName:"minimumHealthyHosts" type:"structure"` +} + +// String returns the string representation +func (s CreateDeploymentConfigInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDeploymentConfigInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateDeploymentConfigInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateDeploymentConfigInput"} + if s.DeploymentConfigName == nil { + invalidParams.Add(request.NewErrParamRequired("DeploymentConfigName")) + } + if s.DeploymentConfigName != nil && len(*s.DeploymentConfigName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DeploymentConfigName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the output of a create deployment configuration operation. +type CreateDeploymentConfigOutput struct { + _ struct{} `type:"structure"` + + // A unique deployment configuration ID. + DeploymentConfigId *string `locationName:"deploymentConfigId" type:"string"` +} + +// String returns the string representation +func (s CreateDeploymentConfigOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDeploymentConfigOutput) GoString() string { + return s.String() +} + +// Represents the input of a create deployment group operation. +type CreateDeploymentGroupInput struct { + _ struct{} `type:"structure"` + + // The name of an AWS CodeDeploy application associated with the applicable + // IAM user or AWS account. + ApplicationName *string `locationName:"applicationName" min:"1" type:"string" required:"true"` + + // A list of associated Auto Scaling groups. + AutoScalingGroups []*string `locationName:"autoScalingGroups" type:"list"` + + // If specified, the deployment configuration name can be either one of the + // predefined configurations provided with AWS CodeDeploy or a custom deployment + // configuration that you create by calling the create deployment configuration + // operation. + // + // CodeDeployDefault.OneAtATime is the default deployment configuration. It + // is used if a configuration isn't specified for the deployment or the deployment + // group. + // + // The predefined deployment configurations include the following: + // + // CodeDeployDefault.AllAtOnce attempts to deploy an application revision + // to as many instance as possible at once. The status of the overall deployment + // will be displayed as Succeeded if the application revision is deployed to + // one or more of the instances. The status of the overall deployment will be + // displayed as Failed if the application revision is not deployed to any of + // the instances. Using an example of nine instance, CodeDeployDefault.AllAtOnce + // will attempt to deploy to all nine instance at once. The overall deployment + // will succeed if deployment to even a single instance is successful; it will + // fail only if deployments to all nine instance fail. + // + // CodeDeployDefault.HalfAtATime deploys to up to half of the instances at + // a time (with fractions rounded down). The overall deployment succeeds if + // the application revision is deployed to at least half of the instances (with + // fractions rounded up); otherwise, the deployment fails. In the example of + // nine instances, it will deploy to up to four instance at a time. The overall + // deployment succeeds if deployment to five or more instances succeed; otherwise, + // the deployment fails. The deployment may be successfully deployed to some + // instances even if the overall deployment fails. + // + // CodeDeployDefault.OneAtATime deploys the application revision to only + // one instance at a time. + // + // For deployment groups that contain more than one instance: + // + // The overall deployment succeeds if the application revision is deployed + // to all of the instances. The exception to this rule is if deployment to the + // last instance fails, the overall deployment still succeeds. This is because + // AWS CodeDeploy allows only one instance at a time to be taken offline with + // the CodeDeployDefault.OneAtATime configuration. + // + // The overall deployment fails as soon as the application revision fails + // to be deployed to any but the last instance. The deployment may be successfully + // deployed to some instances even if the overall deployment fails. + // + // In an example using nine instance, it will deploy to one instance at a + // time. The overall deployment succeeds if deployment to the first eight instance + // is successful; the overall deployment fails if deployment to any of the first + // eight instance fails. + // + // For deployment groups that contain only one instance, the overall deployment + // is successful only if deployment to the single instance is successful + DeploymentConfigName *string `locationName:"deploymentConfigName" min:"1" type:"string"` + + // The name of a new deployment group for the specified application. + DeploymentGroupName *string `locationName:"deploymentGroupName" min:"1" type:"string" required:"true"` + + // The Amazon EC2 tags on which to filter. + Ec2TagFilters []*EC2TagFilter `locationName:"ec2TagFilters" type:"list"` + + // The on-premises instance tags on which to filter. + OnPremisesInstanceTagFilters []*TagFilter `locationName:"onPremisesInstanceTagFilters" type:"list"` + + // A service role ARN that allows AWS CodeDeploy to act on the user's behalf + // when interacting with AWS services. + ServiceRoleArn *string `locationName:"serviceRoleArn" type:"string" required:"true"` + + // Information about triggers to create when the deployment group is created. + TriggerConfigurations []*TriggerConfig `locationName:"triggerConfigurations" type:"list"` +} + +// String returns the string representation +func (s CreateDeploymentGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDeploymentGroupInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateDeploymentGroupInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateDeploymentGroupInput"} + if s.ApplicationName == nil { + invalidParams.Add(request.NewErrParamRequired("ApplicationName")) + } + if s.ApplicationName != nil && len(*s.ApplicationName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ApplicationName", 1)) + } + if s.DeploymentConfigName != nil && len(*s.DeploymentConfigName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DeploymentConfigName", 1)) + } + if s.DeploymentGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("DeploymentGroupName")) + } + if s.DeploymentGroupName != nil && len(*s.DeploymentGroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DeploymentGroupName", 1)) + } + if s.ServiceRoleArn == nil { + invalidParams.Add(request.NewErrParamRequired("ServiceRoleArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the output of a create deployment group operation. +type CreateDeploymentGroupOutput struct { + _ struct{} `type:"structure"` + + // A unique deployment group ID. + DeploymentGroupId *string `locationName:"deploymentGroupId" type:"string"` +} + +// String returns the string representation +func (s CreateDeploymentGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDeploymentGroupOutput) GoString() string { + return s.String() +} + +// Represents the input of a create deployment operation. +type CreateDeploymentInput struct { + _ struct{} `type:"structure"` + + // The name of an AWS CodeDeploy application associated with the applicable + // IAM user or AWS account. + ApplicationName *string `locationName:"applicationName" min:"1" type:"string" required:"true"` + + // The name of a deployment configuration associated with the applicable IAM + // user or AWS account. + // + // If not specified, the value configured in the deployment group will be used + // as the default. If the deployment group does not have a deployment configuration + // associated with it, then CodeDeployDefault.OneAtATime will be used by default. + DeploymentConfigName *string `locationName:"deploymentConfigName" min:"1" type:"string"` + + // The name of the deployment group. + DeploymentGroupName *string `locationName:"deploymentGroupName" min:"1" type:"string"` + + // A comment about the deployment. + Description *string `locationName:"description" type:"string"` + + // If set to true, then if the deployment causes the ApplicationStop deployment + // lifecycle event to an instance to fail, the deployment to that instance will + // not be considered to have failed at that point and will continue on to the + // BeforeInstall deployment lifecycle event. + // + // If set to false or not specified, then if the deployment causes the ApplicationStop + // deployment lifecycle event to fail to an instance, the deployment to that + // instance will stop, and the deployment to that instance will be considered + // to have failed. + IgnoreApplicationStopFailures *bool `locationName:"ignoreApplicationStopFailures" type:"boolean"` + + // The type and location of the revision to deploy. + Revision *RevisionLocation `locationName:"revision" type:"structure"` +} + +// String returns the string representation +func (s CreateDeploymentInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDeploymentInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateDeploymentInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateDeploymentInput"} + if s.ApplicationName == nil { + invalidParams.Add(request.NewErrParamRequired("ApplicationName")) + } + if s.ApplicationName != nil && len(*s.ApplicationName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ApplicationName", 1)) + } + if s.DeploymentConfigName != nil && len(*s.DeploymentConfigName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DeploymentConfigName", 1)) + } + if s.DeploymentGroupName != nil && len(*s.DeploymentGroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DeploymentGroupName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the output of a create deployment operation. +type CreateDeploymentOutput struct { + _ struct{} `type:"structure"` + + // A unique deployment ID. + DeploymentId *string `locationName:"deploymentId" type:"string"` +} + +// String returns the string representation +func (s CreateDeploymentOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDeploymentOutput) GoString() string { + return s.String() +} + +// Represents the input of a delete application operation. +type DeleteApplicationInput struct { + _ struct{} `type:"structure"` + + // The name of an AWS CodeDeploy application associated with the applicable + // IAM user or AWS account. + ApplicationName *string `locationName:"applicationName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteApplicationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteApplicationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteApplicationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteApplicationInput"} + if s.ApplicationName == nil { + invalidParams.Add(request.NewErrParamRequired("ApplicationName")) + } + if s.ApplicationName != nil && len(*s.ApplicationName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ApplicationName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteApplicationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteApplicationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteApplicationOutput) GoString() string { + return s.String() +} + +// Represents the input of a delete deployment configuration operation. +type DeleteDeploymentConfigInput struct { + _ struct{} `type:"structure"` + + // The name of a deployment configuration associated with the applicable IAM + // user or AWS account. + DeploymentConfigName *string `locationName:"deploymentConfigName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteDeploymentConfigInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteDeploymentConfigInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteDeploymentConfigInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteDeploymentConfigInput"} + if s.DeploymentConfigName == nil { + invalidParams.Add(request.NewErrParamRequired("DeploymentConfigName")) + } + if s.DeploymentConfigName != nil && len(*s.DeploymentConfigName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DeploymentConfigName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteDeploymentConfigOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteDeploymentConfigOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteDeploymentConfigOutput) GoString() string { + return s.String() +} + +// Represents the input of a delete deployment group operation. +type DeleteDeploymentGroupInput struct { + _ struct{} `type:"structure"` + + // The name of an AWS CodeDeploy application associated with the applicable + // IAM user or AWS account. + ApplicationName *string `locationName:"applicationName" min:"1" type:"string" required:"true"` + + // The name of an existing deployment group for the specified application. + DeploymentGroupName *string `locationName:"deploymentGroupName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteDeploymentGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteDeploymentGroupInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteDeploymentGroupInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteDeploymentGroupInput"} + if s.ApplicationName == nil { + invalidParams.Add(request.NewErrParamRequired("ApplicationName")) + } + if s.ApplicationName != nil && len(*s.ApplicationName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ApplicationName", 1)) + } + if s.DeploymentGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("DeploymentGroupName")) + } + if s.DeploymentGroupName != nil && len(*s.DeploymentGroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DeploymentGroupName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the output of a delete deployment group operation. +type DeleteDeploymentGroupOutput struct { + _ struct{} `type:"structure"` + + // If the output contains no data, and the corresponding deployment group contained + // at least one Auto Scaling group, AWS CodeDeploy successfully removed all + // corresponding Auto Scaling lifecycle event hooks from the Amazon EC2 instances + // in the Auto Scaling group. If the output contains data, AWS CodeDeploy could + // not remove some Auto Scaling lifecycle event hooks from the Amazon EC2 instances + // in the Auto Scaling group. + HooksNotCleanedUp []*AutoScalingGroup `locationName:"hooksNotCleanedUp" type:"list"` +} + +// String returns the string representation +func (s DeleteDeploymentGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteDeploymentGroupOutput) GoString() string { + return s.String() +} + +// Information about a deployment configuration. +type DeploymentConfigInfo struct { + _ struct{} `type:"structure"` + + // The time at which the deployment configuration was created. + CreateTime *time.Time `locationName:"createTime" type:"timestamp" timestampFormat:"unix"` + + // The deployment configuration ID. + DeploymentConfigId *string `locationName:"deploymentConfigId" type:"string"` + + // The deployment configuration name. + DeploymentConfigName *string `locationName:"deploymentConfigName" min:"1" type:"string"` + + // Information about the number or percentage of minimum healthy instance. + MinimumHealthyHosts *MinimumHealthyHosts `locationName:"minimumHealthyHosts" type:"structure"` +} + +// String returns the string representation +func (s DeploymentConfigInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeploymentConfigInfo) GoString() string { + return s.String() +} + +// Information about a deployment group. +type DeploymentGroupInfo struct { + _ struct{} `type:"structure"` + + // The application name. + ApplicationName *string `locationName:"applicationName" min:"1" type:"string"` + + // A list of associated Auto Scaling groups. + AutoScalingGroups []*AutoScalingGroup `locationName:"autoScalingGroups" type:"list"` + + // The deployment configuration name. + DeploymentConfigName *string `locationName:"deploymentConfigName" min:"1" type:"string"` + + // The deployment group ID. + DeploymentGroupId *string `locationName:"deploymentGroupId" type:"string"` + + // The deployment group name. + DeploymentGroupName *string `locationName:"deploymentGroupName" min:"1" type:"string"` + + // The Amazon EC2 tags on which to filter. + Ec2TagFilters []*EC2TagFilter `locationName:"ec2TagFilters" type:"list"` + + // The on-premises instance tags on which to filter. + OnPremisesInstanceTagFilters []*TagFilter `locationName:"onPremisesInstanceTagFilters" type:"list"` + + // A service role ARN. + ServiceRoleArn *string `locationName:"serviceRoleArn" type:"string"` + + // Information about the deployment group's target revision, including type + // and location. + TargetRevision *RevisionLocation `locationName:"targetRevision" type:"structure"` + + // A list of associated triggers. + TriggerConfigurations []*TriggerConfig `locationName:"triggerConfigurations" type:"list"` +} + +// String returns the string representation +func (s DeploymentGroupInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeploymentGroupInfo) GoString() string { + return s.String() +} + +// Information about a deployment. +type DeploymentInfo struct { + _ struct{} `type:"structure"` + + // The application name. + ApplicationName *string `locationName:"applicationName" min:"1" type:"string"` + + // A timestamp indicating when the deployment was complete. + CompleteTime *time.Time `locationName:"completeTime" type:"timestamp" timestampFormat:"unix"` + + // A timestamp indicating when the deployment was created. + CreateTime *time.Time `locationName:"createTime" type:"timestamp" timestampFormat:"unix"` + + // The means by which the deployment was created: + // + // user: A user created the deployment. autoscaling: Auto Scaling created + // the deployment. + Creator *string `locationName:"creator" type:"string" enum:"DeploymentCreator"` + + // The deployment configuration name. + DeploymentConfigName *string `locationName:"deploymentConfigName" min:"1" type:"string"` + + // The deployment group name. + DeploymentGroupName *string `locationName:"deploymentGroupName" min:"1" type:"string"` + + // The deployment ID. + DeploymentId *string `locationName:"deploymentId" type:"string"` + + // A summary of the deployment status of the instances in the deployment. + DeploymentOverview *DeploymentOverview `locationName:"deploymentOverview" type:"structure"` + + // A comment about the deployment. + Description *string `locationName:"description" type:"string"` + + // Information about any error associated with this deployment. + ErrorInformation *ErrorInformation `locationName:"errorInformation" type:"structure"` + + // If true, then if the deployment causes the ApplicationStop deployment lifecycle + // event to an instance to fail, the deployment to that instance will not be + // considered to have failed at that point and will continue on to the BeforeInstall + // deployment lifecycle event. + // + // If false or not specified, then if the deployment causes the ApplicationStop + // deployment lifecycle event to an instance to fail, the deployment to that + // instance will stop, and the deployment to that instance will be considered + // to have failed. + IgnoreApplicationStopFailures *bool `locationName:"ignoreApplicationStopFailures" type:"boolean"` + + // Information about the location of stored application artifacts and the service + // from which to retrieve them. + Revision *RevisionLocation `locationName:"revision" type:"structure"` + + // A timestamp indicating when the deployment was deployed to the deployment + // group. + // + // In some cases, the reported value of the start time may be later than the + // complete time. This is due to differences in the clock settings of back-end + // servers that participate in the deployment process. + StartTime *time.Time `locationName:"startTime" type:"timestamp" timestampFormat:"unix"` + + // The current state of the deployment as a whole. + Status *string `locationName:"status" type:"string" enum:"DeploymentStatus"` +} + +// String returns the string representation +func (s DeploymentInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeploymentInfo) GoString() string { + return s.String() +} + +// Information about the deployment status of the instances in the deployment. +type DeploymentOverview struct { + _ struct{} `type:"structure"` + + // The number of instances in the deployment in a failed state. + Failed *int64 `type:"long"` + + // The number of instances in which the deployment is in progress. + InProgress *int64 `type:"long"` + + // The number of instances in the deployment in a pending state. + Pending *int64 `type:"long"` + + // The number of instances in the deployment in a skipped state. + Skipped *int64 `type:"long"` + + // The number of instances in the deployment to which revisions have been successfully + // deployed. + Succeeded *int64 `type:"long"` +} + +// String returns the string representation +func (s DeploymentOverview) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeploymentOverview) GoString() string { + return s.String() +} + +// Represents the input of a deregister on-premises instance operation. +type DeregisterOnPremisesInstanceInput struct { + _ struct{} `type:"structure"` + + // The name of the on-premises instance to deregister. + InstanceName *string `locationName:"instanceName" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeregisterOnPremisesInstanceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeregisterOnPremisesInstanceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeregisterOnPremisesInstanceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeregisterOnPremisesInstanceInput"} + if s.InstanceName == nil { + invalidParams.Add(request.NewErrParamRequired("InstanceName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeregisterOnPremisesInstanceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeregisterOnPremisesInstanceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeregisterOnPremisesInstanceOutput) GoString() string { + return s.String() +} + +// Diagnostic information about executable scripts that are part of a deployment. +type Diagnostics struct { + _ struct{} `type:"structure"` + + // The associated error code: + // + // Success: The specified script ran. ScriptMissing: The specified script + // was not found in the specified location. ScriptNotExecutable: The specified + // script is not a recognized executable file type. ScriptTimedOut: The specified + // script did not finish running in the specified time period. ScriptFailed: + // The specified script failed to run as expected. UnknownError: The specified + // script did not run for an unknown reason. + ErrorCode *string `locationName:"errorCode" type:"string" enum:"LifecycleErrorCode"` + + // The last portion of the diagnostic log. + // + // If available, AWS CodeDeploy returns up to the last 4 KB of the diagnostic + // log. + LogTail *string `locationName:"logTail" type:"string"` + + // The message associated with the error. + Message *string `locationName:"message" type:"string"` + + // The name of the script. + ScriptName *string `locationName:"scriptName" type:"string"` +} + +// String returns the string representation +func (s Diagnostics) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Diagnostics) GoString() string { + return s.String() +} + +// Information about a tag filter. +type EC2TagFilter struct { + _ struct{} `type:"structure"` + + // The tag filter key. + Key *string `type:"string"` + + // The tag filter type: + // + // KEY_ONLY: Key only. VALUE_ONLY: Value only. KEY_AND_VALUE: Key and value. + Type *string `type:"string" enum:"EC2TagFilterType"` + + // The tag filter value. + Value *string `type:"string"` +} + +// String returns the string representation +func (s EC2TagFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EC2TagFilter) GoString() string { + return s.String() +} + +// Information about a deployment error. +type ErrorInformation struct { + _ struct{} `type:"structure"` + + // The error code: + // + // APPLICATION_MISSING: The application was missing. This error code will + // most likely be raised if the application is deleted after the deployment + // is created but before it is started. DEPLOYMENT_GROUP_MISSING: The deployment + // group was missing. This error code will most likely be raised if the deployment + // group is deleted after the deployment is created but before it is started. + // HEALTH_CONSTRAINTS: The deployment failed on too many instances to be successfully + // deployed within the instance health constraints specified. HEALTH_CONSTRAINTS_INVALID: + // The revision cannot be successfully deployed within the instance health constraints + // specified. IAM_ROLE_MISSING: The service role cannot be accessed. IAM_ROLE_PERMISSIONS: + // The service role does not have the correct permissions. INTERNAL_ERROR: There + // was an internal error. NO_EC2_SUBSCRIPTION: The calling account is not subscribed + // to the Amazon EC2 service. NO_INSTANCES: No instance were specified, or no + // instance can be found. OVER_MAX_INSTANCES: The maximum number of instance + // was exceeded. THROTTLED: The operation was throttled because the calling + // account exceeded the throttling limits of one or more AWS services. TIMEOUT: + // The deployment has timed out. REVISION_MISSING: The revision ID was missing. + // This error code will most likely be raised if the revision is deleted after + // the deployment is created but before it is started. + Code *string `locationName:"code" type:"string" enum:"ErrorCode"` + + // An accompanying error message. + Message *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s ErrorInformation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ErrorInformation) GoString() string { + return s.String() +} + +// Information about an application revision. +type GenericRevisionInfo struct { + _ struct{} `type:"structure"` + + // The deployment groups for which this is the current target revision. + DeploymentGroups []*string `locationName:"deploymentGroups" type:"list"` + + // A comment about the revision. + Description *string `locationName:"description" type:"string"` + + // When the revision was first used by AWS CodeDeploy. + FirstUsedTime *time.Time `locationName:"firstUsedTime" type:"timestamp" timestampFormat:"unix"` + + // When the revision was last used by AWS CodeDeploy. + LastUsedTime *time.Time `locationName:"lastUsedTime" type:"timestamp" timestampFormat:"unix"` + + // When the revision was registered with AWS CodeDeploy. + RegisterTime *time.Time `locationName:"registerTime" type:"timestamp" timestampFormat:"unix"` +} + +// String returns the string representation +func (s GenericRevisionInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GenericRevisionInfo) GoString() string { + return s.String() +} + +// Represents the input of a get application operation. +type GetApplicationInput struct { + _ struct{} `type:"structure"` + + // The name of an AWS CodeDeploy application associated with the applicable + // IAM user or AWS account. + ApplicationName *string `locationName:"applicationName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetApplicationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetApplicationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetApplicationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetApplicationInput"} + if s.ApplicationName == nil { + invalidParams.Add(request.NewErrParamRequired("ApplicationName")) + } + if s.ApplicationName != nil && len(*s.ApplicationName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ApplicationName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the output of a get application operation. +type GetApplicationOutput struct { + _ struct{} `type:"structure"` + + // Information about the application. + Application *ApplicationInfo `locationName:"application" type:"structure"` +} + +// String returns the string representation +func (s GetApplicationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetApplicationOutput) GoString() string { + return s.String() +} + +// Represents the input of a get application revision operation. +type GetApplicationRevisionInput struct { + _ struct{} `type:"structure"` + + // The name of the application that corresponds to the revision. + ApplicationName *string `locationName:"applicationName" min:"1" type:"string" required:"true"` + + // Information about the application revision to get, including type and location. + Revision *RevisionLocation `locationName:"revision" type:"structure" required:"true"` +} + +// String returns the string representation +func (s GetApplicationRevisionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetApplicationRevisionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetApplicationRevisionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetApplicationRevisionInput"} + if s.ApplicationName == nil { + invalidParams.Add(request.NewErrParamRequired("ApplicationName")) + } + if s.ApplicationName != nil && len(*s.ApplicationName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ApplicationName", 1)) + } + if s.Revision == nil { + invalidParams.Add(request.NewErrParamRequired("Revision")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the output of a get application revision operation. +type GetApplicationRevisionOutput struct { + _ struct{} `type:"structure"` + + // The name of the application that corresponds to the revision. + ApplicationName *string `locationName:"applicationName" min:"1" type:"string"` + + // Additional information about the revision, including type and location. + Revision *RevisionLocation `locationName:"revision" type:"structure"` + + // General information about the revision. + RevisionInfo *GenericRevisionInfo `locationName:"revisionInfo" type:"structure"` +} + +// String returns the string representation +func (s GetApplicationRevisionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetApplicationRevisionOutput) GoString() string { + return s.String() +} + +// Represents the input of a get deployment configuration operation. +type GetDeploymentConfigInput struct { + _ struct{} `type:"structure"` + + // The name of a deployment configuration associated with the applicable IAM + // user or AWS account. + DeploymentConfigName *string `locationName:"deploymentConfigName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetDeploymentConfigInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetDeploymentConfigInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetDeploymentConfigInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetDeploymentConfigInput"} + if s.DeploymentConfigName == nil { + invalidParams.Add(request.NewErrParamRequired("DeploymentConfigName")) + } + if s.DeploymentConfigName != nil && len(*s.DeploymentConfigName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DeploymentConfigName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the output of a get deployment configuration operation. +type GetDeploymentConfigOutput struct { + _ struct{} `type:"structure"` + + // Information about the deployment configuration. + DeploymentConfigInfo *DeploymentConfigInfo `locationName:"deploymentConfigInfo" type:"structure"` +} + +// String returns the string representation +func (s GetDeploymentConfigOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetDeploymentConfigOutput) GoString() string { + return s.String() +} + +// Represents the input of a get deployment group operation. +type GetDeploymentGroupInput struct { + _ struct{} `type:"structure"` + + // The name of an AWS CodeDeploy application associated with the applicable + // IAM user or AWS account. + ApplicationName *string `locationName:"applicationName" min:"1" type:"string" required:"true"` + + // The name of an existing deployment group for the specified application. + DeploymentGroupName *string `locationName:"deploymentGroupName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetDeploymentGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetDeploymentGroupInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetDeploymentGroupInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetDeploymentGroupInput"} + if s.ApplicationName == nil { + invalidParams.Add(request.NewErrParamRequired("ApplicationName")) + } + if s.ApplicationName != nil && len(*s.ApplicationName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ApplicationName", 1)) + } + if s.DeploymentGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("DeploymentGroupName")) + } + if s.DeploymentGroupName != nil && len(*s.DeploymentGroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DeploymentGroupName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the output of a get deployment group operation. +type GetDeploymentGroupOutput struct { + _ struct{} `type:"structure"` + + // Information about the deployment group. + DeploymentGroupInfo *DeploymentGroupInfo `locationName:"deploymentGroupInfo" type:"structure"` +} + +// String returns the string representation +func (s GetDeploymentGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetDeploymentGroupOutput) GoString() string { + return s.String() +} + +// Represents the input of a get deployment operation. +type GetDeploymentInput struct { + _ struct{} `type:"structure"` + + // A deployment ID associated with the applicable IAM user or AWS account. + DeploymentId *string `locationName:"deploymentId" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetDeploymentInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetDeploymentInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetDeploymentInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetDeploymentInput"} + if s.DeploymentId == nil { + invalidParams.Add(request.NewErrParamRequired("DeploymentId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the input of a get deployment instance operation. +type GetDeploymentInstanceInput struct { + _ struct{} `type:"structure"` + + // The unique ID of a deployment. + DeploymentId *string `locationName:"deploymentId" type:"string" required:"true"` + + // The unique ID of an instance in the deployment group. + InstanceId *string `locationName:"instanceId" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetDeploymentInstanceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetDeploymentInstanceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetDeploymentInstanceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetDeploymentInstanceInput"} + if s.DeploymentId == nil { + invalidParams.Add(request.NewErrParamRequired("DeploymentId")) + } + if s.InstanceId == nil { + invalidParams.Add(request.NewErrParamRequired("InstanceId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the output of a get deployment instance operation. +type GetDeploymentInstanceOutput struct { + _ struct{} `type:"structure"` + + // Information about the instance. + InstanceSummary *InstanceSummary `locationName:"instanceSummary" type:"structure"` +} + +// String returns the string representation +func (s GetDeploymentInstanceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetDeploymentInstanceOutput) GoString() string { + return s.String() +} + +// Represents the output of a get deployment operation. +type GetDeploymentOutput struct { + _ struct{} `type:"structure"` + + // Information about the deployment. + DeploymentInfo *DeploymentInfo `locationName:"deploymentInfo" type:"structure"` +} + +// String returns the string representation +func (s GetDeploymentOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetDeploymentOutput) GoString() string { + return s.String() +} + +// Represents the input of a get on-premises instance operation. +type GetOnPremisesInstanceInput struct { + _ struct{} `type:"structure"` + + // The name of the on-premises instance about which to get information. + InstanceName *string `locationName:"instanceName" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetOnPremisesInstanceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetOnPremisesInstanceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetOnPremisesInstanceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetOnPremisesInstanceInput"} + if s.InstanceName == nil { + invalidParams.Add(request.NewErrParamRequired("InstanceName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the output of a get on-premises instance operation. +type GetOnPremisesInstanceOutput struct { + _ struct{} `type:"structure"` + + // Information about the on-premises instance. + InstanceInfo *InstanceInfo `locationName:"instanceInfo" type:"structure"` +} + +// String returns the string representation +func (s GetOnPremisesInstanceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetOnPremisesInstanceOutput) GoString() string { + return s.String() +} + +// Information about the location of application artifacts stored in GitHub. +type GitHubLocation struct { + _ struct{} `type:"structure"` + + // The SHA1 commit ID of the GitHub commit that represents the bundled artifacts + // for the application revision. + CommitId *string `locationName:"commitId" type:"string"` + + // The GitHub account and repository pair that stores a reference to the commit + // that represents the bundled artifacts for the application revision. + // + // Specified as account/repository. + Repository *string `locationName:"repository" type:"string"` +} + +// String returns the string representation +func (s GitHubLocation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GitHubLocation) GoString() string { + return s.String() +} + +// Information about an on-premises instance. +type InstanceInfo struct { + _ struct{} `type:"structure"` + + // If the on-premises instance was deregistered, the time at which the on-premises + // instance was deregistered. + DeregisterTime *time.Time `locationName:"deregisterTime" type:"timestamp" timestampFormat:"unix"` + + // The IAM user ARN associated with the on-premises instance. + IamUserArn *string `locationName:"iamUserArn" type:"string"` + + // The ARN of the on-premises instance. + InstanceArn *string `locationName:"instanceArn" type:"string"` + + // The name of the on-premises instance. + InstanceName *string `locationName:"instanceName" type:"string"` + + // The time at which the on-premises instance was registered. + RegisterTime *time.Time `locationName:"registerTime" type:"timestamp" timestampFormat:"unix"` + + // The tags currently associated with the on-premises instance. + Tags []*Tag `locationName:"tags" type:"list"` +} + +// String returns the string representation +func (s InstanceInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstanceInfo) GoString() string { + return s.String() +} + +// Information about an instance in a deployment. +type InstanceSummary struct { + _ struct{} `type:"structure"` + + // The deployment ID. + DeploymentId *string `locationName:"deploymentId" type:"string"` + + // The instance ID. + InstanceId *string `locationName:"instanceId" type:"string"` + + // A timestamp indicating when the instance information was last updated. + LastUpdatedAt *time.Time `locationName:"lastUpdatedAt" type:"timestamp" timestampFormat:"unix"` + + // A list of lifecycle events for this instance. + LifecycleEvents []*LifecycleEvent `locationName:"lifecycleEvents" type:"list"` + + // The deployment status for this instance: + // + // Pending: The deployment is pending for this instance. In Progress: The + // deployment is in progress for this instance. Succeeded: The deployment has + // succeeded for this instance. Failed: The deployment has failed for this instance. + // Skipped: The deployment has been skipped for this instance. Unknown: The + // deployment status is unknown for this instance. + Status *string `locationName:"status" type:"string" enum:"InstanceStatus"` +} + +// String returns the string representation +func (s InstanceSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstanceSummary) GoString() string { + return s.String() +} + +// Information about a deployment lifecycle event. +type LifecycleEvent struct { + _ struct{} `type:"structure"` + + // Diagnostic information about the deployment lifecycle event. + Diagnostics *Diagnostics `locationName:"diagnostics" type:"structure"` + + // A timestamp indicating when the deployment lifecycle event ended. + EndTime *time.Time `locationName:"endTime" type:"timestamp" timestampFormat:"unix"` + + // The deployment lifecycle event name, such as ApplicationStop, BeforeInstall, + // AfterInstall, ApplicationStart, or ValidateService. + LifecycleEventName *string `locationName:"lifecycleEventName" type:"string"` + + // A timestamp indicating when the deployment lifecycle event started. + StartTime *time.Time `locationName:"startTime" type:"timestamp" timestampFormat:"unix"` + + // The deployment lifecycle event status: + // + // Pending: The deployment lifecycle event is pending. InProgress: The deployment + // lifecycle event is in progress. Succeeded: The deployment lifecycle event + // ran successfully. Failed: The deployment lifecycle event has failed. Skipped: + // The deployment lifecycle event has been skipped. Unknown: The deployment + // lifecycle event is unknown. + Status *string `locationName:"status" type:"string" enum:"LifecycleEventStatus"` +} + +// String returns the string representation +func (s LifecycleEvent) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LifecycleEvent) GoString() string { + return s.String() +} + +// Represents the input of a list application revisions operation. +type ListApplicationRevisionsInput struct { + _ struct{} `type:"structure"` + + // The name of an AWS CodeDeploy application associated with the applicable + // IAM user or AWS account. + ApplicationName *string `locationName:"applicationName" min:"1" type:"string" required:"true"` + + // Whether to list revisions based on whether the revision is the target revision + // of an deployment group: + // + // include: List revisions that are target revisions of a deployment group. + // exclude: Do not list revisions that are target revisions of a deployment + // group. ignore: List all revisions. + Deployed *string `locationName:"deployed" type:"string" enum:"ListStateFilterAction"` + + // An identifier returned from the previous list application revisions call. + // It can be used to return the next set of applications in the list. + NextToken *string `locationName:"nextToken" type:"string"` + + // An Amazon S3 bucket name to limit the search for revisions. + // + // If set to null, all of the user's buckets will be searched. + S3Bucket *string `locationName:"s3Bucket" type:"string"` + + // A key prefix for the set of Amazon S3 objects to limit the search for revisions. + S3KeyPrefix *string `locationName:"s3KeyPrefix" type:"string"` + + // The column name to use to sort the list results: + // + // registerTime: Sort by the time the revisions were registered with AWS CodeDeploy. + // firstUsedTime: Sort by the time the revisions were first used in a deployment. + // lastUsedTime: Sort by the time the revisions were last used in a deployment. + // If not specified or set to null, the results will be returned in an arbitrary + // order. + SortBy *string `locationName:"sortBy" type:"string" enum:"ApplicationRevisionSortBy"` + + // The order in which to sort the list results: + // + // ascending: ascending order. descending: descending order. If not specified, + // the results will be sorted in ascending order. + // + // If set to null, the results will be sorted in an arbitrary order. + SortOrder *string `locationName:"sortOrder" type:"string" enum:"SortOrder"` +} + +// String returns the string representation +func (s ListApplicationRevisionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListApplicationRevisionsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListApplicationRevisionsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListApplicationRevisionsInput"} + if s.ApplicationName == nil { + invalidParams.Add(request.NewErrParamRequired("ApplicationName")) + } + if s.ApplicationName != nil && len(*s.ApplicationName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ApplicationName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the output of a list application revisions operation. +type ListApplicationRevisionsOutput struct { + _ struct{} `type:"structure"` + + // If a large amount of information is returned, an identifier will also be + // returned. It can be used in a subsequent list application revisions call + // to return the next set of application revisions in the list. + NextToken *string `locationName:"nextToken" type:"string"` + + // A list of locations that contain the matching revisions. + Revisions []*RevisionLocation `locationName:"revisions" type:"list"` +} + +// String returns the string representation +func (s ListApplicationRevisionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListApplicationRevisionsOutput) GoString() string { + return s.String() +} + +// Represents the input of a list applications operation. +type ListApplicationsInput struct { + _ struct{} `type:"structure"` + + // An identifier returned from the previous list applications call. It can be + // used to return the next set of applications in the list. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s ListApplicationsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListApplicationsInput) GoString() string { + return s.String() +} + +// Represents the output of a list applications operation. +type ListApplicationsOutput struct { + _ struct{} `type:"structure"` + + // A list of application names. + Applications []*string `locationName:"applications" type:"list"` + + // If a large amount of information is returned, an identifier is also returned. + // It can be used in a subsequent list applications call to return the next + // set of applications, will also be returned. in the list. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s ListApplicationsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListApplicationsOutput) GoString() string { + return s.String() +} + +// Represents the input of a list deployment configurations operation. +type ListDeploymentConfigsInput struct { + _ struct{} `type:"structure"` + + // An identifier returned from the previous list deployment configurations call. + // It can be used to return the next set of deployment configurations in the + // list. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s ListDeploymentConfigsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListDeploymentConfigsInput) GoString() string { + return s.String() +} + +// Represents the output of a list deployment configurations operation. +type ListDeploymentConfigsOutput struct { + _ struct{} `type:"structure"` + + // A list of deployment configurations, including built-in configurations such + // as CodeDeployDefault.OneAtATime. + DeploymentConfigsList []*string `locationName:"deploymentConfigsList" type:"list"` + + // If a large amount of information is returned, an identifier is also returned. + // It can be used in a subsequent list deployment configurations call to return + // the next set of deployment configurations in the list. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s ListDeploymentConfigsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListDeploymentConfigsOutput) GoString() string { + return s.String() +} + +// Represents the input of a list deployment groups operation. +type ListDeploymentGroupsInput struct { + _ struct{} `type:"structure"` + + // The name of an AWS CodeDeploy application associated with the applicable + // IAM user or AWS account. + ApplicationName *string `locationName:"applicationName" min:"1" type:"string" required:"true"` + + // An identifier returned from the previous list deployment groups call. It + // can be used to return the next set of deployment groups in the list. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s ListDeploymentGroupsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListDeploymentGroupsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListDeploymentGroupsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListDeploymentGroupsInput"} + if s.ApplicationName == nil { + invalidParams.Add(request.NewErrParamRequired("ApplicationName")) + } + if s.ApplicationName != nil && len(*s.ApplicationName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ApplicationName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the output of a list deployment groups operation. +type ListDeploymentGroupsOutput struct { + _ struct{} `type:"structure"` + + // The application name. + ApplicationName *string `locationName:"applicationName" min:"1" type:"string"` + + // A list of corresponding deployment group names. + DeploymentGroups []*string `locationName:"deploymentGroups" type:"list"` + + // If a large amount of information is returned, an identifier is also returned. + // It can be used in a subsequent list deployment groups call to return the + // next set of deployment groups in the list. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s ListDeploymentGroupsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListDeploymentGroupsOutput) GoString() string { + return s.String() +} + +// Represents the input of a list deployment instances operation. +type ListDeploymentInstancesInput struct { + _ struct{} `type:"structure"` + + // The unique ID of a deployment. + DeploymentId *string `locationName:"deploymentId" type:"string" required:"true"` + + // A subset of instances to list by status: + // + // Pending: Include those instance with pending deployments. InProgress: Include + // those instance where deployments are still in progress. Succeeded: Include + // those instances with successful deployments. Failed: Include those instance + // with failed deployments. Skipped: Include those instance with skipped deployments. + // Unknown: Include those instance with deployments in an unknown state. + InstanceStatusFilter []*string `locationName:"instanceStatusFilter" type:"list"` + + // An identifier returned from the previous list deployment instances call. + // It can be used to return the next set of deployment instances in the list. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s ListDeploymentInstancesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListDeploymentInstancesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListDeploymentInstancesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListDeploymentInstancesInput"} + if s.DeploymentId == nil { + invalidParams.Add(request.NewErrParamRequired("DeploymentId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the output of a list deployment instances operation. +type ListDeploymentInstancesOutput struct { + _ struct{} `type:"structure"` + + // A list of instance IDs. + InstancesList []*string `locationName:"instancesList" type:"list"` + + // If a large amount of information is returned, an identifier is also returned. + // It can be used in a subsequent list deployment instances call to return the + // next set of deployment instances in the list. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s ListDeploymentInstancesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListDeploymentInstancesOutput) GoString() string { + return s.String() +} + +// Represents the input of a list deployments operation. +type ListDeploymentsInput struct { + _ struct{} `type:"structure"` + + // The name of an AWS CodeDeploy application associated with the applicable + // IAM user or AWS account. + ApplicationName *string `locationName:"applicationName" min:"1" type:"string"` + + // A time range (start and end) for returning a subset of the list of deployments. + CreateTimeRange *TimeRange `locationName:"createTimeRange" type:"structure"` + + // The name of an existing deployment group for the specified application. + DeploymentGroupName *string `locationName:"deploymentGroupName" min:"1" type:"string"` + + // A subset of deployments to list by status: + // + // Created: Include created deployments in the resulting list. Queued: Include + // queued deployments in the resulting list. In Progress: Include in-progress + // deployments in the resulting list. Succeeded: Include successful deployments + // in the resulting list. Failed: Include failed deployments in the resulting + // list. Stopped: Include stopped deployments in the resulting list. + IncludeOnlyStatuses []*string `locationName:"includeOnlyStatuses" type:"list"` + + // An identifier returned from the previous list deployments call. It can be + // used to return the next set of deployments in the list. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s ListDeploymentsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListDeploymentsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListDeploymentsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListDeploymentsInput"} + if s.ApplicationName != nil && len(*s.ApplicationName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ApplicationName", 1)) + } + if s.DeploymentGroupName != nil && len(*s.DeploymentGroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DeploymentGroupName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the output of a list deployments operation. +type ListDeploymentsOutput struct { + _ struct{} `type:"structure"` + + // A list of deployment IDs. + Deployments []*string `locationName:"deployments" type:"list"` + + // If a large amount of information is returned, an identifier is also returned. + // It can be used in a subsequent list deployments call to return the next set + // of deployments in the list. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s ListDeploymentsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListDeploymentsOutput) GoString() string { + return s.String() +} + +// Represents the input of a list on-premises instances operation. +// +// . +type ListOnPremisesInstancesInput struct { + _ struct{} `type:"structure"` + + // An identifier returned from the previous list on-premises instances call. + // It can be used to return the next set of on-premises instances in the list. + NextToken *string `locationName:"nextToken" type:"string"` + + // The registration status of the on-premises instances: + // + // Deregistered: Include deregistered on-premises instances in the resulting + // list. Registered: Include registered on-premises instances in the resulting + // list. + RegistrationStatus *string `locationName:"registrationStatus" type:"string" enum:"RegistrationStatus"` + + // The on-premises instance tags that will be used to restrict the corresponding + // on-premises instance names returned. + TagFilters []*TagFilter `locationName:"tagFilters" type:"list"` +} + +// String returns the string representation +func (s ListOnPremisesInstancesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListOnPremisesInstancesInput) GoString() string { + return s.String() +} + +// Represents the output of list on-premises instances operation. +type ListOnPremisesInstancesOutput struct { + _ struct{} `type:"structure"` + + // The list of matching on-premises instance names. + InstanceNames []*string `locationName:"instanceNames" type:"list"` + + // If a large amount of information is returned, an identifier is also returned. + // It can be used in a subsequent list on-premises instances call to return + // the next set of on-premises instances in the list. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s ListOnPremisesInstancesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListOnPremisesInstancesOutput) GoString() string { + return s.String() +} + +// Information about minimum healthy instance. +type MinimumHealthyHosts struct { + _ struct{} `type:"structure"` + + // The minimum healthy instance type: + // + // HOST_COUNT: The minimum number of healthy instance as an absolute value. + // FLEET_PERCENT: The minimum number of healthy instance as a percentage of + // the total number of instance in the deployment. In an example of nine instance, + // if a HOST_COUNT of six is specified, deploy to up to three instances at a + // time. The deployment will be successful if six or more instances are deployed + // to successfully; otherwise, the deployment fails. If a FLEET_PERCENT of 40 + // is specified, deploy to up to five instance at a time. The deployment will + // be successful if four or more instance are deployed to successfully; otherwise, + // the deployment fails. + // + // In a call to the get deployment configuration operation, CodeDeployDefault.OneAtATime + // will return a minimum healthy instance type of MOST_CONCURRENCY and a value + // of 1. This means a deployment to only one instance at a time. (You cannot + // set the type to MOST_CONCURRENCY, only to HOST_COUNT or FLEET_PERCENT.) In + // addition, with CodeDeployDefault.OneAtATime, AWS CodeDeploy will try to ensure + // that all instances but one are kept in a healthy state during the deployment. + // Although this allows one instance at a time to be taken offline for a new + // deployment, it also means that if the deployment to the last instance fails, + // the overall deployment still succeeds. + Type *string `locationName:"type" type:"string" enum:"MinimumHealthyHostsType"` + + // The minimum healthy instance value. + Value *int64 `locationName:"value" type:"integer"` +} + +// String returns the string representation +func (s MinimumHealthyHosts) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MinimumHealthyHosts) GoString() string { + return s.String() +} + +// Represents the input of a register application revision operation. +type RegisterApplicationRevisionInput struct { + _ struct{} `type:"structure"` + + // The name of an AWS CodeDeploy application associated with the applicable + // IAM user or AWS account. + ApplicationName *string `locationName:"applicationName" min:"1" type:"string" required:"true"` + + // A comment about the revision. + Description *string `locationName:"description" type:"string"` + + // Information about the application revision to register, including type and + // location. + Revision *RevisionLocation `locationName:"revision" type:"structure" required:"true"` +} + +// String returns the string representation +func (s RegisterApplicationRevisionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RegisterApplicationRevisionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RegisterApplicationRevisionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RegisterApplicationRevisionInput"} + if s.ApplicationName == nil { + invalidParams.Add(request.NewErrParamRequired("ApplicationName")) + } + if s.ApplicationName != nil && len(*s.ApplicationName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ApplicationName", 1)) + } + if s.Revision == nil { + invalidParams.Add(request.NewErrParamRequired("Revision")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type RegisterApplicationRevisionOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s RegisterApplicationRevisionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RegisterApplicationRevisionOutput) GoString() string { + return s.String() +} + +// Represents the input of the register on-premises instance operation. +type RegisterOnPremisesInstanceInput struct { + _ struct{} `type:"structure"` + + // The ARN of the IAM user to associate with the on-premises instance. + IamUserArn *string `locationName:"iamUserArn" type:"string" required:"true"` + + // The name of the on-premises instance to register. + InstanceName *string `locationName:"instanceName" type:"string" required:"true"` +} + +// String returns the string representation +func (s RegisterOnPremisesInstanceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RegisterOnPremisesInstanceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RegisterOnPremisesInstanceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RegisterOnPremisesInstanceInput"} + if s.IamUserArn == nil { + invalidParams.Add(request.NewErrParamRequired("IamUserArn")) + } + if s.InstanceName == nil { + invalidParams.Add(request.NewErrParamRequired("InstanceName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type RegisterOnPremisesInstanceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s RegisterOnPremisesInstanceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RegisterOnPremisesInstanceOutput) GoString() string { + return s.String() +} + +// Represents the input of a remove tags from on-premises instances operation. +type RemoveTagsFromOnPremisesInstancesInput struct { + _ struct{} `type:"structure"` + + // The names of the on-premises instances from which to remove tags. + InstanceNames []*string `locationName:"instanceNames" type:"list" required:"true"` + + // The tag key-value pairs to remove from the on-premises instances. + Tags []*Tag `locationName:"tags" type:"list" required:"true"` +} + +// String returns the string representation +func (s RemoveTagsFromOnPremisesInstancesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RemoveTagsFromOnPremisesInstancesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RemoveTagsFromOnPremisesInstancesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RemoveTagsFromOnPremisesInstancesInput"} + if s.InstanceNames == nil { + invalidParams.Add(request.NewErrParamRequired("InstanceNames")) + } + if s.Tags == nil { + invalidParams.Add(request.NewErrParamRequired("Tags")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type RemoveTagsFromOnPremisesInstancesOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s RemoveTagsFromOnPremisesInstancesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RemoveTagsFromOnPremisesInstancesOutput) GoString() string { + return s.String() +} + +// Information about an application revision. +type RevisionInfo struct { + _ struct{} `type:"structure"` + + // Information about an application revision. + GenericRevisionInfo *GenericRevisionInfo `locationName:"genericRevisionInfo" type:"structure"` + + // Information about the location of an application revision. + RevisionLocation *RevisionLocation `locationName:"revisionLocation" type:"structure"` +} + +// String returns the string representation +func (s RevisionInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RevisionInfo) GoString() string { + return s.String() +} + +// Information about the location of an application revision. +type RevisionLocation struct { + _ struct{} `type:"structure"` + + // Information about the location of application artifacts stored in GitHub. + GitHubLocation *GitHubLocation `locationName:"gitHubLocation" type:"structure"` + + // The type of application revision: + // + // S3: An application revision stored in Amazon S3. GitHub: An application + // revision stored in GitHub. + RevisionType *string `locationName:"revisionType" type:"string" enum:"RevisionLocationType"` + + // Information about the location of application artifacts stored in Amazon + // S3. + S3Location *S3Location `locationName:"s3Location" type:"structure"` +} + +// String returns the string representation +func (s RevisionLocation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RevisionLocation) GoString() string { + return s.String() +} + +// Information about the location of application artifacts stored in Amazon +// S3. +type S3Location struct { + _ struct{} `type:"structure"` + + // The name of the Amazon S3 bucket where the application revision is stored. + Bucket *string `locationName:"bucket" type:"string"` + + // The file type of the application revision. Must be one of the following: + // + // tar: A tar archive file. tgz: A compressed tar archive file. zip: A zip + // archive file. + BundleType *string `locationName:"bundleType" type:"string" enum:"BundleType"` + + // The ETag of the Amazon S3 object that represents the bundled artifacts for + // the application revision. + // + // If the ETag is not specified as an input parameter, ETag validation of the + // object will be skipped. + ETag *string `locationName:"eTag" type:"string"` + + // The name of the Amazon S3 object that represents the bundled artifacts for + // the application revision. + Key *string `locationName:"key" type:"string"` + + // A specific version of the Amazon S3 object that represents the bundled artifacts + // for the application revision. + // + // If the version is not specified, the system will use the most recent version + // by default. + Version *string `locationName:"version" type:"string"` +} + +// String returns the string representation +func (s S3Location) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s S3Location) GoString() string { + return s.String() +} + +// Represents the input of a stop deployment operation. +type StopDeploymentInput struct { + _ struct{} `type:"structure"` + + // The unique ID of a deployment. + DeploymentId *string `locationName:"deploymentId" type:"string" required:"true"` +} + +// String returns the string representation +func (s StopDeploymentInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StopDeploymentInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *StopDeploymentInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StopDeploymentInput"} + if s.DeploymentId == nil { + invalidParams.Add(request.NewErrParamRequired("DeploymentId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the output of a stop deployment operation. +type StopDeploymentOutput struct { + _ struct{} `type:"structure"` + + // The status of the stop deployment operation: + // + // Pending: The stop operation is pending. Succeeded: The stop operation was + // successful. + Status *string `locationName:"status" type:"string" enum:"StopStatus"` + + // An accompanying status message. + StatusMessage *string `locationName:"statusMessage" type:"string"` +} + +// String returns the string representation +func (s StopDeploymentOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StopDeploymentOutput) GoString() string { + return s.String() +} + +// Information about a tag. +type Tag struct { + _ struct{} `type:"structure"` + + // The tag's key. + Key *string `type:"string"` + + // The tag's value. + Value *string `type:"string"` +} + +// String returns the string representation +func (s Tag) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Tag) GoString() string { + return s.String() +} + +// Information about an on-premises instance tag filter. +type TagFilter struct { + _ struct{} `type:"structure"` + + // The on-premises instance tag filter key. + Key *string `type:"string"` + + // The on-premises instance tag filter type: + // + // KEY_ONLY: Key only. VALUE_ONLY: Value only. KEY_AND_VALUE: Key and value. + Type *string `type:"string" enum:"TagFilterType"` + + // The on-premises instance tag filter value. + Value *string `type:"string"` +} + +// String returns the string representation +func (s TagFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TagFilter) GoString() string { + return s.String() +} + +// Information about a time range. +type TimeRange struct { + _ struct{} `type:"structure"` + + // The end time of the time range. + // + // Specify null to leave the end time open-ended. + End *time.Time `locationName:"end" type:"timestamp" timestampFormat:"unix"` + + // The start time of the time range. + // + // Specify null to leave the start time open-ended. + Start *time.Time `locationName:"start" type:"timestamp" timestampFormat:"unix"` +} + +// String returns the string representation +func (s TimeRange) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TimeRange) GoString() string { + return s.String() +} + +// Information about notification triggers for the deployment group. +type TriggerConfig struct { + _ struct{} `type:"structure"` + + // The event type or types for which notifications are triggered. + // + // The following event type values are supported: + // + // DEPLOYMENT_START DEPLOYMENT_SUCCESS DEPLOYMENT_FAILURE DEPLOYMENT_STOP + // INSTANCE_START INSTANCE_SUCCESS INSTANCE_FAILURE + TriggerEvents []*string `locationName:"triggerEvents" type:"list"` + + // The name of the notification trigger. + TriggerName *string `locationName:"triggerName" type:"string"` + + // The ARN of the Amazon Simple Notification Service topic through which notifications + // about deployment or instance events are sent. + TriggerTargetArn *string `locationName:"triggerTargetArn" type:"string"` +} + +// String returns the string representation +func (s TriggerConfig) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TriggerConfig) GoString() string { + return s.String() +} + +// Represents the input of an update application operation. +type UpdateApplicationInput struct { + _ struct{} `type:"structure"` + + // The current name of the application you want to change. + ApplicationName *string `locationName:"applicationName" min:"1" type:"string"` + + // The new name to give the application. + NewApplicationName *string `locationName:"newApplicationName" min:"1" type:"string"` +} + +// String returns the string representation +func (s UpdateApplicationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateApplicationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateApplicationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateApplicationInput"} + if s.ApplicationName != nil && len(*s.ApplicationName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ApplicationName", 1)) + } + if s.NewApplicationName != nil && len(*s.NewApplicationName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NewApplicationName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type UpdateApplicationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UpdateApplicationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateApplicationOutput) GoString() string { + return s.String() +} + +// Represents the input of an update deployment group operation. +type UpdateDeploymentGroupInput struct { + _ struct{} `type:"structure"` + + // The application name corresponding to the deployment group to update. + ApplicationName *string `locationName:"applicationName" min:"1" type:"string" required:"true"` + + // The replacement list of Auto Scaling groups to be included in the deployment + // group, if you want to change them. To keep the Auto Scaling groups, enter + // their names. To remove Auto Scaling groups, do not enter any Auto Scaling + // group names. + AutoScalingGroups []*string `locationName:"autoScalingGroups" type:"list"` + + // The current name of the deployment group. + CurrentDeploymentGroupName *string `locationName:"currentDeploymentGroupName" min:"1" type:"string" required:"true"` + + // The replacement deployment configuration name to use, if you want to change + // it. + DeploymentConfigName *string `locationName:"deploymentConfigName" min:"1" type:"string"` + + // The replacement set of Amazon EC2 tags on which to filter, if you want to + // change them. To keep the existing tags, enter their names. To remove tags, + // do not enter any tag names. + Ec2TagFilters []*EC2TagFilter `locationName:"ec2TagFilters" type:"list"` + + // The new name of the deployment group, if you want to change it. + NewDeploymentGroupName *string `locationName:"newDeploymentGroupName" min:"1" type:"string"` + + // The replacement set of on-premises instance tags on which to filter, if you + // want to change them. To keep the existing tags, enter their names. To remove + // tags, do not enter any tag names. + OnPremisesInstanceTagFilters []*TagFilter `locationName:"onPremisesInstanceTagFilters" type:"list"` + + // A replacement ARN for the service role, if you want to change it. + ServiceRoleArn *string `locationName:"serviceRoleArn" type:"string"` + + // Information about triggers to change when the deployment group is updated. + TriggerConfigurations []*TriggerConfig `locationName:"triggerConfigurations" type:"list"` +} + +// String returns the string representation +func (s UpdateDeploymentGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateDeploymentGroupInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateDeploymentGroupInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateDeploymentGroupInput"} + if s.ApplicationName == nil { + invalidParams.Add(request.NewErrParamRequired("ApplicationName")) + } + if s.ApplicationName != nil && len(*s.ApplicationName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ApplicationName", 1)) + } + if s.CurrentDeploymentGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("CurrentDeploymentGroupName")) + } + if s.CurrentDeploymentGroupName != nil && len(*s.CurrentDeploymentGroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("CurrentDeploymentGroupName", 1)) + } + if s.DeploymentConfigName != nil && len(*s.DeploymentConfigName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DeploymentConfigName", 1)) + } + if s.NewDeploymentGroupName != nil && len(*s.NewDeploymentGroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NewDeploymentGroupName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the output of an update deployment group operation. +type UpdateDeploymentGroupOutput struct { + _ struct{} `type:"structure"` + + // If the output contains no data, and the corresponding deployment group contained + // at least one Auto Scaling group, AWS CodeDeploy successfully removed all + // corresponding Auto Scaling lifecycle event hooks from the AWS account. If + // the output contains data, AWS CodeDeploy could not remove some Auto Scaling + // lifecycle event hooks from the AWS account. + HooksNotCleanedUp []*AutoScalingGroup `locationName:"hooksNotCleanedUp" type:"list"` +} + +// String returns the string representation +func (s UpdateDeploymentGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateDeploymentGroupOutput) GoString() string { + return s.String() +} + +const ( + // @enum ApplicationRevisionSortBy + ApplicationRevisionSortByRegisterTime = "registerTime" + // @enum ApplicationRevisionSortBy + ApplicationRevisionSortByFirstUsedTime = "firstUsedTime" + // @enum ApplicationRevisionSortBy + ApplicationRevisionSortByLastUsedTime = "lastUsedTime" +) + +const ( + // @enum BundleType + BundleTypeTar = "tar" + // @enum BundleType + BundleTypeTgz = "tgz" + // @enum BundleType + BundleTypeZip = "zip" +) + +const ( + // @enum DeploymentCreator + DeploymentCreatorUser = "user" + // @enum DeploymentCreator + DeploymentCreatorAutoscaling = "autoscaling" +) + +const ( + // @enum DeploymentStatus + DeploymentStatusCreated = "Created" + // @enum DeploymentStatus + DeploymentStatusQueued = "Queued" + // @enum DeploymentStatus + DeploymentStatusInProgress = "InProgress" + // @enum DeploymentStatus + DeploymentStatusSucceeded = "Succeeded" + // @enum DeploymentStatus + DeploymentStatusFailed = "Failed" + // @enum DeploymentStatus + DeploymentStatusStopped = "Stopped" +) + +const ( + // @enum EC2TagFilterType + EC2TagFilterTypeKeyOnly = "KEY_ONLY" + // @enum EC2TagFilterType + EC2TagFilterTypeValueOnly = "VALUE_ONLY" + // @enum EC2TagFilterType + EC2TagFilterTypeKeyAndValue = "KEY_AND_VALUE" +) + +const ( + // @enum ErrorCode + ErrorCodeDeploymentGroupMissing = "DEPLOYMENT_GROUP_MISSING" + // @enum ErrorCode + ErrorCodeApplicationMissing = "APPLICATION_MISSING" + // @enum ErrorCode + ErrorCodeRevisionMissing = "REVISION_MISSING" + // @enum ErrorCode + ErrorCodeIamRoleMissing = "IAM_ROLE_MISSING" + // @enum ErrorCode + ErrorCodeIamRolePermissions = "IAM_ROLE_PERMISSIONS" + // @enum ErrorCode + ErrorCodeNoEc2Subscription = "NO_EC2_SUBSCRIPTION" + // @enum ErrorCode + ErrorCodeOverMaxInstances = "OVER_MAX_INSTANCES" + // @enum ErrorCode + ErrorCodeNoInstances = "NO_INSTANCES" + // @enum ErrorCode + ErrorCodeTimeout = "TIMEOUT" + // @enum ErrorCode + ErrorCodeHealthConstraintsInvalid = "HEALTH_CONSTRAINTS_INVALID" + // @enum ErrorCode + ErrorCodeHealthConstraints = "HEALTH_CONSTRAINTS" + // @enum ErrorCode + ErrorCodeInternalError = "INTERNAL_ERROR" + // @enum ErrorCode + ErrorCodeThrottled = "THROTTLED" +) + +const ( + // @enum InstanceStatus + InstanceStatusPending = "Pending" + // @enum InstanceStatus + InstanceStatusInProgress = "InProgress" + // @enum InstanceStatus + InstanceStatusSucceeded = "Succeeded" + // @enum InstanceStatus + InstanceStatusFailed = "Failed" + // @enum InstanceStatus + InstanceStatusSkipped = "Skipped" + // @enum InstanceStatus + InstanceStatusUnknown = "Unknown" +) + +const ( + // @enum LifecycleErrorCode + LifecycleErrorCodeSuccess = "Success" + // @enum LifecycleErrorCode + LifecycleErrorCodeScriptMissing = "ScriptMissing" + // @enum LifecycleErrorCode + LifecycleErrorCodeScriptNotExecutable = "ScriptNotExecutable" + // @enum LifecycleErrorCode + LifecycleErrorCodeScriptTimedOut = "ScriptTimedOut" + // @enum LifecycleErrorCode + LifecycleErrorCodeScriptFailed = "ScriptFailed" + // @enum LifecycleErrorCode + LifecycleErrorCodeUnknownError = "UnknownError" +) + +const ( + // @enum LifecycleEventStatus + LifecycleEventStatusPending = "Pending" + // @enum LifecycleEventStatus + LifecycleEventStatusInProgress = "InProgress" + // @enum LifecycleEventStatus + LifecycleEventStatusSucceeded = "Succeeded" + // @enum LifecycleEventStatus + LifecycleEventStatusFailed = "Failed" + // @enum LifecycleEventStatus + LifecycleEventStatusSkipped = "Skipped" + // @enum LifecycleEventStatus + LifecycleEventStatusUnknown = "Unknown" +) + +const ( + // @enum ListStateFilterAction + ListStateFilterActionInclude = "include" + // @enum ListStateFilterAction + ListStateFilterActionExclude = "exclude" + // @enum ListStateFilterAction + ListStateFilterActionIgnore = "ignore" +) + +const ( + // @enum MinimumHealthyHostsType + MinimumHealthyHostsTypeHostCount = "HOST_COUNT" + // @enum MinimumHealthyHostsType + MinimumHealthyHostsTypeFleetPercent = "FLEET_PERCENT" +) + +const ( + // @enum RegistrationStatus + RegistrationStatusRegistered = "Registered" + // @enum RegistrationStatus + RegistrationStatusDeregistered = "Deregistered" +) + +const ( + // @enum RevisionLocationType + RevisionLocationTypeS3 = "S3" + // @enum RevisionLocationType + RevisionLocationTypeGitHub = "GitHub" +) + +const ( + // @enum SortOrder + SortOrderAscending = "ascending" + // @enum SortOrder + SortOrderDescending = "descending" +) + +const ( + // @enum StopStatus + StopStatusPending = "Pending" + // @enum StopStatus + StopStatusSucceeded = "Succeeded" +) + +const ( + // @enum TagFilterType + TagFilterTypeKeyOnly = "KEY_ONLY" + // @enum TagFilterType + TagFilterTypeValueOnly = "VALUE_ONLY" + // @enum TagFilterType + TagFilterTypeKeyAndValue = "KEY_AND_VALUE" +) + +const ( + // @enum TriggerEventType + TriggerEventTypeDeploymentStart = "DeploymentStart" + // @enum TriggerEventType + TriggerEventTypeDeploymentSuccess = "DeploymentSuccess" + // @enum TriggerEventType + TriggerEventTypeDeploymentFailure = "DeploymentFailure" + // @enum TriggerEventType + TriggerEventTypeDeploymentStop = "DeploymentStop" + // @enum TriggerEventType + TriggerEventTypeInstanceStart = "InstanceStart" + // @enum TriggerEventType + TriggerEventTypeInstanceSuccess = "InstanceSuccess" + // @enum TriggerEventType + TriggerEventTypeInstanceFailure = "InstanceFailure" +) diff --git a/vendor/github.com/aws/aws-sdk-go/service/codedeploy/codedeployiface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/codedeploy/codedeployiface/interface.go new file mode 100644 index 000000000..27da74320 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/codedeploy/codedeployiface/interface.go @@ -0,0 +1,166 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package codedeployiface provides an interface for the AWS CodeDeploy. +package codedeployiface + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/codedeploy" +) + +// CodeDeployAPI is the interface type for codedeploy.CodeDeploy. +type CodeDeployAPI interface { + AddTagsToOnPremisesInstancesRequest(*codedeploy.AddTagsToOnPremisesInstancesInput) (*request.Request, *codedeploy.AddTagsToOnPremisesInstancesOutput) + + AddTagsToOnPremisesInstances(*codedeploy.AddTagsToOnPremisesInstancesInput) (*codedeploy.AddTagsToOnPremisesInstancesOutput, error) + + BatchGetApplicationRevisionsRequest(*codedeploy.BatchGetApplicationRevisionsInput) (*request.Request, *codedeploy.BatchGetApplicationRevisionsOutput) + + BatchGetApplicationRevisions(*codedeploy.BatchGetApplicationRevisionsInput) (*codedeploy.BatchGetApplicationRevisionsOutput, error) + + BatchGetApplicationsRequest(*codedeploy.BatchGetApplicationsInput) (*request.Request, *codedeploy.BatchGetApplicationsOutput) + + BatchGetApplications(*codedeploy.BatchGetApplicationsInput) (*codedeploy.BatchGetApplicationsOutput, error) + + BatchGetDeploymentGroupsRequest(*codedeploy.BatchGetDeploymentGroupsInput) (*request.Request, *codedeploy.BatchGetDeploymentGroupsOutput) + + BatchGetDeploymentGroups(*codedeploy.BatchGetDeploymentGroupsInput) (*codedeploy.BatchGetDeploymentGroupsOutput, error) + + BatchGetDeploymentInstancesRequest(*codedeploy.BatchGetDeploymentInstancesInput) (*request.Request, *codedeploy.BatchGetDeploymentInstancesOutput) + + BatchGetDeploymentInstances(*codedeploy.BatchGetDeploymentInstancesInput) (*codedeploy.BatchGetDeploymentInstancesOutput, error) + + BatchGetDeploymentsRequest(*codedeploy.BatchGetDeploymentsInput) (*request.Request, *codedeploy.BatchGetDeploymentsOutput) + + BatchGetDeployments(*codedeploy.BatchGetDeploymentsInput) (*codedeploy.BatchGetDeploymentsOutput, error) + + BatchGetOnPremisesInstancesRequest(*codedeploy.BatchGetOnPremisesInstancesInput) (*request.Request, *codedeploy.BatchGetOnPremisesInstancesOutput) + + BatchGetOnPremisesInstances(*codedeploy.BatchGetOnPremisesInstancesInput) (*codedeploy.BatchGetOnPremisesInstancesOutput, error) + + CreateApplicationRequest(*codedeploy.CreateApplicationInput) (*request.Request, *codedeploy.CreateApplicationOutput) + + CreateApplication(*codedeploy.CreateApplicationInput) (*codedeploy.CreateApplicationOutput, error) + + CreateDeploymentRequest(*codedeploy.CreateDeploymentInput) (*request.Request, *codedeploy.CreateDeploymentOutput) + + CreateDeployment(*codedeploy.CreateDeploymentInput) (*codedeploy.CreateDeploymentOutput, error) + + CreateDeploymentConfigRequest(*codedeploy.CreateDeploymentConfigInput) (*request.Request, *codedeploy.CreateDeploymentConfigOutput) + + CreateDeploymentConfig(*codedeploy.CreateDeploymentConfigInput) (*codedeploy.CreateDeploymentConfigOutput, error) + + CreateDeploymentGroupRequest(*codedeploy.CreateDeploymentGroupInput) (*request.Request, *codedeploy.CreateDeploymentGroupOutput) + + CreateDeploymentGroup(*codedeploy.CreateDeploymentGroupInput) (*codedeploy.CreateDeploymentGroupOutput, error) + + DeleteApplicationRequest(*codedeploy.DeleteApplicationInput) (*request.Request, *codedeploy.DeleteApplicationOutput) + + DeleteApplication(*codedeploy.DeleteApplicationInput) (*codedeploy.DeleteApplicationOutput, error) + + DeleteDeploymentConfigRequest(*codedeploy.DeleteDeploymentConfigInput) (*request.Request, *codedeploy.DeleteDeploymentConfigOutput) + + DeleteDeploymentConfig(*codedeploy.DeleteDeploymentConfigInput) (*codedeploy.DeleteDeploymentConfigOutput, error) + + DeleteDeploymentGroupRequest(*codedeploy.DeleteDeploymentGroupInput) (*request.Request, *codedeploy.DeleteDeploymentGroupOutput) + + DeleteDeploymentGroup(*codedeploy.DeleteDeploymentGroupInput) (*codedeploy.DeleteDeploymentGroupOutput, error) + + DeregisterOnPremisesInstanceRequest(*codedeploy.DeregisterOnPremisesInstanceInput) (*request.Request, *codedeploy.DeregisterOnPremisesInstanceOutput) + + DeregisterOnPremisesInstance(*codedeploy.DeregisterOnPremisesInstanceInput) (*codedeploy.DeregisterOnPremisesInstanceOutput, error) + + GetApplicationRequest(*codedeploy.GetApplicationInput) (*request.Request, *codedeploy.GetApplicationOutput) + + GetApplication(*codedeploy.GetApplicationInput) (*codedeploy.GetApplicationOutput, error) + + GetApplicationRevisionRequest(*codedeploy.GetApplicationRevisionInput) (*request.Request, *codedeploy.GetApplicationRevisionOutput) + + GetApplicationRevision(*codedeploy.GetApplicationRevisionInput) (*codedeploy.GetApplicationRevisionOutput, error) + + GetDeploymentRequest(*codedeploy.GetDeploymentInput) (*request.Request, *codedeploy.GetDeploymentOutput) + + GetDeployment(*codedeploy.GetDeploymentInput) (*codedeploy.GetDeploymentOutput, error) + + GetDeploymentConfigRequest(*codedeploy.GetDeploymentConfigInput) (*request.Request, *codedeploy.GetDeploymentConfigOutput) + + GetDeploymentConfig(*codedeploy.GetDeploymentConfigInput) (*codedeploy.GetDeploymentConfigOutput, error) + + GetDeploymentGroupRequest(*codedeploy.GetDeploymentGroupInput) (*request.Request, *codedeploy.GetDeploymentGroupOutput) + + GetDeploymentGroup(*codedeploy.GetDeploymentGroupInput) (*codedeploy.GetDeploymentGroupOutput, error) + + GetDeploymentInstanceRequest(*codedeploy.GetDeploymentInstanceInput) (*request.Request, *codedeploy.GetDeploymentInstanceOutput) + + GetDeploymentInstance(*codedeploy.GetDeploymentInstanceInput) (*codedeploy.GetDeploymentInstanceOutput, error) + + GetOnPremisesInstanceRequest(*codedeploy.GetOnPremisesInstanceInput) (*request.Request, *codedeploy.GetOnPremisesInstanceOutput) + + GetOnPremisesInstance(*codedeploy.GetOnPremisesInstanceInput) (*codedeploy.GetOnPremisesInstanceOutput, error) + + ListApplicationRevisionsRequest(*codedeploy.ListApplicationRevisionsInput) (*request.Request, *codedeploy.ListApplicationRevisionsOutput) + + ListApplicationRevisions(*codedeploy.ListApplicationRevisionsInput) (*codedeploy.ListApplicationRevisionsOutput, error) + + ListApplicationRevisionsPages(*codedeploy.ListApplicationRevisionsInput, func(*codedeploy.ListApplicationRevisionsOutput, bool) bool) error + + ListApplicationsRequest(*codedeploy.ListApplicationsInput) (*request.Request, *codedeploy.ListApplicationsOutput) + + ListApplications(*codedeploy.ListApplicationsInput) (*codedeploy.ListApplicationsOutput, error) + + ListApplicationsPages(*codedeploy.ListApplicationsInput, func(*codedeploy.ListApplicationsOutput, bool) bool) error + + ListDeploymentConfigsRequest(*codedeploy.ListDeploymentConfigsInput) (*request.Request, *codedeploy.ListDeploymentConfigsOutput) + + ListDeploymentConfigs(*codedeploy.ListDeploymentConfigsInput) (*codedeploy.ListDeploymentConfigsOutput, error) + + ListDeploymentConfigsPages(*codedeploy.ListDeploymentConfigsInput, func(*codedeploy.ListDeploymentConfigsOutput, bool) bool) error + + ListDeploymentGroupsRequest(*codedeploy.ListDeploymentGroupsInput) (*request.Request, *codedeploy.ListDeploymentGroupsOutput) + + ListDeploymentGroups(*codedeploy.ListDeploymentGroupsInput) (*codedeploy.ListDeploymentGroupsOutput, error) + + ListDeploymentGroupsPages(*codedeploy.ListDeploymentGroupsInput, func(*codedeploy.ListDeploymentGroupsOutput, bool) bool) error + + ListDeploymentInstancesRequest(*codedeploy.ListDeploymentInstancesInput) (*request.Request, *codedeploy.ListDeploymentInstancesOutput) + + ListDeploymentInstances(*codedeploy.ListDeploymentInstancesInput) (*codedeploy.ListDeploymentInstancesOutput, error) + + ListDeploymentInstancesPages(*codedeploy.ListDeploymentInstancesInput, func(*codedeploy.ListDeploymentInstancesOutput, bool) bool) error + + ListDeploymentsRequest(*codedeploy.ListDeploymentsInput) (*request.Request, *codedeploy.ListDeploymentsOutput) + + ListDeployments(*codedeploy.ListDeploymentsInput) (*codedeploy.ListDeploymentsOutput, error) + + ListDeploymentsPages(*codedeploy.ListDeploymentsInput, func(*codedeploy.ListDeploymentsOutput, bool) bool) error + + ListOnPremisesInstancesRequest(*codedeploy.ListOnPremisesInstancesInput) (*request.Request, *codedeploy.ListOnPremisesInstancesOutput) + + ListOnPremisesInstances(*codedeploy.ListOnPremisesInstancesInput) (*codedeploy.ListOnPremisesInstancesOutput, error) + + RegisterApplicationRevisionRequest(*codedeploy.RegisterApplicationRevisionInput) (*request.Request, *codedeploy.RegisterApplicationRevisionOutput) + + RegisterApplicationRevision(*codedeploy.RegisterApplicationRevisionInput) (*codedeploy.RegisterApplicationRevisionOutput, error) + + RegisterOnPremisesInstanceRequest(*codedeploy.RegisterOnPremisesInstanceInput) (*request.Request, *codedeploy.RegisterOnPremisesInstanceOutput) + + RegisterOnPremisesInstance(*codedeploy.RegisterOnPremisesInstanceInput) (*codedeploy.RegisterOnPremisesInstanceOutput, error) + + RemoveTagsFromOnPremisesInstancesRequest(*codedeploy.RemoveTagsFromOnPremisesInstancesInput) (*request.Request, *codedeploy.RemoveTagsFromOnPremisesInstancesOutput) + + RemoveTagsFromOnPremisesInstances(*codedeploy.RemoveTagsFromOnPremisesInstancesInput) (*codedeploy.RemoveTagsFromOnPremisesInstancesOutput, error) + + StopDeploymentRequest(*codedeploy.StopDeploymentInput) (*request.Request, *codedeploy.StopDeploymentOutput) + + StopDeployment(*codedeploy.StopDeploymentInput) (*codedeploy.StopDeploymentOutput, error) + + UpdateApplicationRequest(*codedeploy.UpdateApplicationInput) (*request.Request, *codedeploy.UpdateApplicationOutput) + + UpdateApplication(*codedeploy.UpdateApplicationInput) (*codedeploy.UpdateApplicationOutput, error) + + UpdateDeploymentGroupRequest(*codedeploy.UpdateDeploymentGroupInput) (*request.Request, *codedeploy.UpdateDeploymentGroupOutput) + + UpdateDeploymentGroup(*codedeploy.UpdateDeploymentGroupInput) (*codedeploy.UpdateDeploymentGroupOutput, error) +} + +var _ CodeDeployAPI = (*codedeploy.CodeDeploy)(nil) diff --git a/vendor/github.com/aws/aws-sdk-go/service/codedeploy/examples_test.go b/vendor/github.com/aws/aws-sdk-go/service/codedeploy/examples_test.go new file mode 100644 index 000000000..e037f9090 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/codedeploy/examples_test.go @@ -0,0 +1,891 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package codedeploy_test + +import ( + "bytes" + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/codedeploy" +) + +var _ time.Duration +var _ bytes.Buffer + +func ExampleCodeDeploy_AddTagsToOnPremisesInstances() { + svc := codedeploy.New(session.New()) + + params := &codedeploy.AddTagsToOnPremisesInstancesInput{ + InstanceNames: []*string{ // Required + aws.String("InstanceName"), // Required + // More values... + }, + Tags: []*codedeploy.Tag{ // Required + { // Required + Key: aws.String("Key"), + Value: aws.String("Value"), + }, + // More values... + }, + } + resp, err := svc.AddTagsToOnPremisesInstances(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodeDeploy_BatchGetApplicationRevisions() { + svc := codedeploy.New(session.New()) + + params := &codedeploy.BatchGetApplicationRevisionsInput{ + ApplicationName: aws.String("ApplicationName"), // Required + Revisions: []*codedeploy.RevisionLocation{ // Required + { // Required + GitHubLocation: &codedeploy.GitHubLocation{ + CommitId: aws.String("CommitId"), + Repository: aws.String("Repository"), + }, + RevisionType: aws.String("RevisionLocationType"), + S3Location: &codedeploy.S3Location{ + Bucket: aws.String("S3Bucket"), + BundleType: aws.String("BundleType"), + ETag: aws.String("ETag"), + Key: aws.String("S3Key"), + Version: aws.String("VersionId"), + }, + }, + // More values... + }, + } + resp, err := svc.BatchGetApplicationRevisions(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodeDeploy_BatchGetApplications() { + svc := codedeploy.New(session.New()) + + params := &codedeploy.BatchGetApplicationsInput{ + ApplicationNames: []*string{ + aws.String("ApplicationName"), // Required + // More values... + }, + } + resp, err := svc.BatchGetApplications(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodeDeploy_BatchGetDeploymentGroups() { + svc := codedeploy.New(session.New()) + + params := &codedeploy.BatchGetDeploymentGroupsInput{ + ApplicationName: aws.String("ApplicationName"), // Required + DeploymentGroupNames: []*string{ // Required + aws.String("DeploymentGroupName"), // Required + // More values... + }, + } + resp, err := svc.BatchGetDeploymentGroups(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodeDeploy_BatchGetDeploymentInstances() { + svc := codedeploy.New(session.New()) + + params := &codedeploy.BatchGetDeploymentInstancesInput{ + DeploymentId: aws.String("DeploymentId"), // Required + InstanceIds: []*string{ // Required + aws.String("InstanceId"), // Required + // More values... + }, + } + resp, err := svc.BatchGetDeploymentInstances(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodeDeploy_BatchGetDeployments() { + svc := codedeploy.New(session.New()) + + params := &codedeploy.BatchGetDeploymentsInput{ + DeploymentIds: []*string{ + aws.String("DeploymentId"), // Required + // More values... + }, + } + resp, err := svc.BatchGetDeployments(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodeDeploy_BatchGetOnPremisesInstances() { + svc := codedeploy.New(session.New()) + + params := &codedeploy.BatchGetOnPremisesInstancesInput{ + InstanceNames: []*string{ + aws.String("InstanceName"), // Required + // More values... + }, + } + resp, err := svc.BatchGetOnPremisesInstances(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodeDeploy_CreateApplication() { + svc := codedeploy.New(session.New()) + + params := &codedeploy.CreateApplicationInput{ + ApplicationName: aws.String("ApplicationName"), // Required + } + resp, err := svc.CreateApplication(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodeDeploy_CreateDeployment() { + svc := codedeploy.New(session.New()) + + params := &codedeploy.CreateDeploymentInput{ + ApplicationName: aws.String("ApplicationName"), // Required + DeploymentConfigName: aws.String("DeploymentConfigName"), + DeploymentGroupName: aws.String("DeploymentGroupName"), + Description: aws.String("Description"), + IgnoreApplicationStopFailures: aws.Bool(true), + Revision: &codedeploy.RevisionLocation{ + GitHubLocation: &codedeploy.GitHubLocation{ + CommitId: aws.String("CommitId"), + Repository: aws.String("Repository"), + }, + RevisionType: aws.String("RevisionLocationType"), + S3Location: &codedeploy.S3Location{ + Bucket: aws.String("S3Bucket"), + BundleType: aws.String("BundleType"), + ETag: aws.String("ETag"), + Key: aws.String("S3Key"), + Version: aws.String("VersionId"), + }, + }, + } + resp, err := svc.CreateDeployment(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodeDeploy_CreateDeploymentConfig() { + svc := codedeploy.New(session.New()) + + params := &codedeploy.CreateDeploymentConfigInput{ + DeploymentConfigName: aws.String("DeploymentConfigName"), // Required + MinimumHealthyHosts: &codedeploy.MinimumHealthyHosts{ + Type: aws.String("MinimumHealthyHostsType"), + Value: aws.Int64(1), + }, + } + resp, err := svc.CreateDeploymentConfig(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodeDeploy_CreateDeploymentGroup() { + svc := codedeploy.New(session.New()) + + params := &codedeploy.CreateDeploymentGroupInput{ + ApplicationName: aws.String("ApplicationName"), // Required + DeploymentGroupName: aws.String("DeploymentGroupName"), // Required + ServiceRoleArn: aws.String("Role"), // Required + AutoScalingGroups: []*string{ + aws.String("AutoScalingGroupName"), // Required + // More values... + }, + DeploymentConfigName: aws.String("DeploymentConfigName"), + Ec2TagFilters: []*codedeploy.EC2TagFilter{ + { // Required + Key: aws.String("Key"), + Type: aws.String("EC2TagFilterType"), + Value: aws.String("Value"), + }, + // More values... + }, + OnPremisesInstanceTagFilters: []*codedeploy.TagFilter{ + { // Required + Key: aws.String("Key"), + Type: aws.String("TagFilterType"), + Value: aws.String("Value"), + }, + // More values... + }, + TriggerConfigurations: []*codedeploy.TriggerConfig{ + { // Required + TriggerEvents: []*string{ + aws.String("TriggerEventType"), // Required + // More values... + }, + TriggerName: aws.String("TriggerName"), + TriggerTargetArn: aws.String("TriggerTargetArn"), + }, + // More values... + }, + } + resp, err := svc.CreateDeploymentGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodeDeploy_DeleteApplication() { + svc := codedeploy.New(session.New()) + + params := &codedeploy.DeleteApplicationInput{ + ApplicationName: aws.String("ApplicationName"), // Required + } + resp, err := svc.DeleteApplication(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodeDeploy_DeleteDeploymentConfig() { + svc := codedeploy.New(session.New()) + + params := &codedeploy.DeleteDeploymentConfigInput{ + DeploymentConfigName: aws.String("DeploymentConfigName"), // Required + } + resp, err := svc.DeleteDeploymentConfig(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodeDeploy_DeleteDeploymentGroup() { + svc := codedeploy.New(session.New()) + + params := &codedeploy.DeleteDeploymentGroupInput{ + ApplicationName: aws.String("ApplicationName"), // Required + DeploymentGroupName: aws.String("DeploymentGroupName"), // Required + } + resp, err := svc.DeleteDeploymentGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodeDeploy_DeregisterOnPremisesInstance() { + svc := codedeploy.New(session.New()) + + params := &codedeploy.DeregisterOnPremisesInstanceInput{ + InstanceName: aws.String("InstanceName"), // Required + } + resp, err := svc.DeregisterOnPremisesInstance(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodeDeploy_GetApplication() { + svc := codedeploy.New(session.New()) + + params := &codedeploy.GetApplicationInput{ + ApplicationName: aws.String("ApplicationName"), // Required + } + resp, err := svc.GetApplication(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodeDeploy_GetApplicationRevision() { + svc := codedeploy.New(session.New()) + + params := &codedeploy.GetApplicationRevisionInput{ + ApplicationName: aws.String("ApplicationName"), // Required + Revision: &codedeploy.RevisionLocation{ // Required + GitHubLocation: &codedeploy.GitHubLocation{ + CommitId: aws.String("CommitId"), + Repository: aws.String("Repository"), + }, + RevisionType: aws.String("RevisionLocationType"), + S3Location: &codedeploy.S3Location{ + Bucket: aws.String("S3Bucket"), + BundleType: aws.String("BundleType"), + ETag: aws.String("ETag"), + Key: aws.String("S3Key"), + Version: aws.String("VersionId"), + }, + }, + } + resp, err := svc.GetApplicationRevision(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodeDeploy_GetDeployment() { + svc := codedeploy.New(session.New()) + + params := &codedeploy.GetDeploymentInput{ + DeploymentId: aws.String("DeploymentId"), // Required + } + resp, err := svc.GetDeployment(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodeDeploy_GetDeploymentConfig() { + svc := codedeploy.New(session.New()) + + params := &codedeploy.GetDeploymentConfigInput{ + DeploymentConfigName: aws.String("DeploymentConfigName"), // Required + } + resp, err := svc.GetDeploymentConfig(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodeDeploy_GetDeploymentGroup() { + svc := codedeploy.New(session.New()) + + params := &codedeploy.GetDeploymentGroupInput{ + ApplicationName: aws.String("ApplicationName"), // Required + DeploymentGroupName: aws.String("DeploymentGroupName"), // Required + } + resp, err := svc.GetDeploymentGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodeDeploy_GetDeploymentInstance() { + svc := codedeploy.New(session.New()) + + params := &codedeploy.GetDeploymentInstanceInput{ + DeploymentId: aws.String("DeploymentId"), // Required + InstanceId: aws.String("InstanceId"), // Required + } + resp, err := svc.GetDeploymentInstance(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodeDeploy_GetOnPremisesInstance() { + svc := codedeploy.New(session.New()) + + params := &codedeploy.GetOnPremisesInstanceInput{ + InstanceName: aws.String("InstanceName"), // Required + } + resp, err := svc.GetOnPremisesInstance(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodeDeploy_ListApplicationRevisions() { + svc := codedeploy.New(session.New()) + + params := &codedeploy.ListApplicationRevisionsInput{ + ApplicationName: aws.String("ApplicationName"), // Required + Deployed: aws.String("ListStateFilterAction"), + NextToken: aws.String("NextToken"), + S3Bucket: aws.String("S3Bucket"), + S3KeyPrefix: aws.String("S3Key"), + SortBy: aws.String("ApplicationRevisionSortBy"), + SortOrder: aws.String("SortOrder"), + } + resp, err := svc.ListApplicationRevisions(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodeDeploy_ListApplications() { + svc := codedeploy.New(session.New()) + + params := &codedeploy.ListApplicationsInput{ + NextToken: aws.String("NextToken"), + } + resp, err := svc.ListApplications(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodeDeploy_ListDeploymentConfigs() { + svc := codedeploy.New(session.New()) + + params := &codedeploy.ListDeploymentConfigsInput{ + NextToken: aws.String("NextToken"), + } + resp, err := svc.ListDeploymentConfigs(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodeDeploy_ListDeploymentGroups() { + svc := codedeploy.New(session.New()) + + params := &codedeploy.ListDeploymentGroupsInput{ + ApplicationName: aws.String("ApplicationName"), // Required + NextToken: aws.String("NextToken"), + } + resp, err := svc.ListDeploymentGroups(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodeDeploy_ListDeploymentInstances() { + svc := codedeploy.New(session.New()) + + params := &codedeploy.ListDeploymentInstancesInput{ + DeploymentId: aws.String("DeploymentId"), // Required + InstanceStatusFilter: []*string{ + aws.String("InstanceStatus"), // Required + // More values... + }, + NextToken: aws.String("NextToken"), + } + resp, err := svc.ListDeploymentInstances(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodeDeploy_ListDeployments() { + svc := codedeploy.New(session.New()) + + params := &codedeploy.ListDeploymentsInput{ + ApplicationName: aws.String("ApplicationName"), + CreateTimeRange: &codedeploy.TimeRange{ + End: aws.Time(time.Now()), + Start: aws.Time(time.Now()), + }, + DeploymentGroupName: aws.String("DeploymentGroupName"), + IncludeOnlyStatuses: []*string{ + aws.String("DeploymentStatus"), // Required + // More values... + }, + NextToken: aws.String("NextToken"), + } + resp, err := svc.ListDeployments(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodeDeploy_ListOnPremisesInstances() { + svc := codedeploy.New(session.New()) + + params := &codedeploy.ListOnPremisesInstancesInput{ + NextToken: aws.String("NextToken"), + RegistrationStatus: aws.String("RegistrationStatus"), + TagFilters: []*codedeploy.TagFilter{ + { // Required + Key: aws.String("Key"), + Type: aws.String("TagFilterType"), + Value: aws.String("Value"), + }, + // More values... + }, + } + resp, err := svc.ListOnPremisesInstances(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodeDeploy_RegisterApplicationRevision() { + svc := codedeploy.New(session.New()) + + params := &codedeploy.RegisterApplicationRevisionInput{ + ApplicationName: aws.String("ApplicationName"), // Required + Revision: &codedeploy.RevisionLocation{ // Required + GitHubLocation: &codedeploy.GitHubLocation{ + CommitId: aws.String("CommitId"), + Repository: aws.String("Repository"), + }, + RevisionType: aws.String("RevisionLocationType"), + S3Location: &codedeploy.S3Location{ + Bucket: aws.String("S3Bucket"), + BundleType: aws.String("BundleType"), + ETag: aws.String("ETag"), + Key: aws.String("S3Key"), + Version: aws.String("VersionId"), + }, + }, + Description: aws.String("Description"), + } + resp, err := svc.RegisterApplicationRevision(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodeDeploy_RegisterOnPremisesInstance() { + svc := codedeploy.New(session.New()) + + params := &codedeploy.RegisterOnPremisesInstanceInput{ + IamUserArn: aws.String("IamUserArn"), // Required + InstanceName: aws.String("InstanceName"), // Required + } + resp, err := svc.RegisterOnPremisesInstance(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodeDeploy_RemoveTagsFromOnPremisesInstances() { + svc := codedeploy.New(session.New()) + + params := &codedeploy.RemoveTagsFromOnPremisesInstancesInput{ + InstanceNames: []*string{ // Required + aws.String("InstanceName"), // Required + // More values... + }, + Tags: []*codedeploy.Tag{ // Required + { // Required + Key: aws.String("Key"), + Value: aws.String("Value"), + }, + // More values... + }, + } + resp, err := svc.RemoveTagsFromOnPremisesInstances(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodeDeploy_StopDeployment() { + svc := codedeploy.New(session.New()) + + params := &codedeploy.StopDeploymentInput{ + DeploymentId: aws.String("DeploymentId"), // Required + } + resp, err := svc.StopDeployment(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodeDeploy_UpdateApplication() { + svc := codedeploy.New(session.New()) + + params := &codedeploy.UpdateApplicationInput{ + ApplicationName: aws.String("ApplicationName"), + NewApplicationName: aws.String("ApplicationName"), + } + resp, err := svc.UpdateApplication(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodeDeploy_UpdateDeploymentGroup() { + svc := codedeploy.New(session.New()) + + params := &codedeploy.UpdateDeploymentGroupInput{ + ApplicationName: aws.String("ApplicationName"), // Required + CurrentDeploymentGroupName: aws.String("DeploymentGroupName"), // Required + AutoScalingGroups: []*string{ + aws.String("AutoScalingGroupName"), // Required + // More values... + }, + DeploymentConfigName: aws.String("DeploymentConfigName"), + Ec2TagFilters: []*codedeploy.EC2TagFilter{ + { // Required + Key: aws.String("Key"), + Type: aws.String("EC2TagFilterType"), + Value: aws.String("Value"), + }, + // More values... + }, + NewDeploymentGroupName: aws.String("DeploymentGroupName"), + OnPremisesInstanceTagFilters: []*codedeploy.TagFilter{ + { // Required + Key: aws.String("Key"), + Type: aws.String("TagFilterType"), + Value: aws.String("Value"), + }, + // More values... + }, + ServiceRoleArn: aws.String("Role"), + TriggerConfigurations: []*codedeploy.TriggerConfig{ + { // Required + TriggerEvents: []*string{ + aws.String("TriggerEventType"), // Required + // More values... + }, + TriggerName: aws.String("TriggerName"), + TriggerTargetArn: aws.String("TriggerTargetArn"), + }, + // More values... + }, + } + resp, err := svc.UpdateDeploymentGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/codedeploy/service.go b/vendor/github.com/aws/aws-sdk-go/service/codedeploy/service.go new file mode 100644 index 000000000..e89d8da85 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/codedeploy/service.go @@ -0,0 +1,135 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package codedeploy + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" +) + +// Overview This reference guide provides descriptions of the AWS CodeDeploy +// APIs. For more information about AWS CodeDeploy, see the AWS CodeDeploy User +// Guide (docs.aws.amazon.com/codedeploy/latest/userguide). +// +// Using the APIs You can use the AWS CodeDeploy APIs to work with the following: +// +// Applications are unique identifiers used by AWS CodeDeploy to ensure the +// correct combinations of revisions, deployment configurations, and deployment +// groups are being referenced during deployments. +// +// You can use the AWS CodeDeploy APIs to create, delete, get, list, and update +// applications. +// +// Deployment configurations are sets of deployment rules and success and +// failure conditions used by AWS CodeDeploy during deployments. +// +// You can use the AWS CodeDeploy APIs to create, delete, get, and list deployment +// configurations. +// +// Deployment groups are groups of instances to which application revisions +// can be deployed. +// +// You can use the AWS CodeDeploy APIs to create, delete, get, list, and update +// deployment groups. +// +// Instances represent Amazon EC2 instances to which application revisions +// are deployed. Instances are identified by their Amazon EC2 tags or Auto Scaling +// group names. Instances belong to deployment groups. +// +// You can use the AWS CodeDeploy APIs to get and list instance. +// +// Deployments represent the process of deploying revisions to instances. +// +// You can use the AWS CodeDeploy APIs to create, get, list, and stop deployments. +// +// Application revisions are archive files stored in Amazon S3 buckets or +// GitHub repositories. These revisions contain source content (such as source +// code, web pages, executable files, and deployment scripts) along with an +// application specification (AppSpec) file. (The AppSpec file is unique to +// AWS CodeDeploy; it defines the deployment actions you want AWS CodeDeploy +// to execute.) Ffor application revisions stored in Amazon S3 buckets, an application +// revision is uniquely identified by its Amazon S3 object key and its ETag, +// version, or both. For application revisions stored in GitHub repositories, +// an application revision is uniquely identified by its repository name and +// commit ID. Application revisions are deployed through deployment groups. +// +// You can use the AWS CodeDeploy APIs to get, list, and register application +// revisions. +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type CodeDeploy struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "codedeploy" + +// New creates a new instance of the CodeDeploy client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a CodeDeploy client from just a session. +// svc := codedeploy.New(mySession) +// +// // Create a CodeDeploy client with additional configuration +// svc := codedeploy.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *CodeDeploy { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *CodeDeploy { + svc := &CodeDeploy{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-10-06", + JSONVersion: "1.1", + TargetPrefix: "CodeDeploy_20141006", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(jsonrpc.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a CodeDeploy operation and runs any +// custom request initialization. +func (c *CodeDeploy) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/codepipeline/api.go b/vendor/github.com/aws/aws-sdk-go/service/codepipeline/api.go new file mode 100644 index 000000000..3459bbf67 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/codepipeline/api.go @@ -0,0 +1,4535 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package codepipeline provides a client for AWS CodePipeline. +package codepipeline + +import ( + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" +) + +const opAcknowledgeJob = "AcknowledgeJob" + +// AcknowledgeJobRequest generates a "aws/request.Request" representing the +// client's request for the AcknowledgeJob operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the AcknowledgeJob method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the AcknowledgeJobRequest method. +// req, resp := client.AcknowledgeJobRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CodePipeline) AcknowledgeJobRequest(input *AcknowledgeJobInput) (req *request.Request, output *AcknowledgeJobOutput) { + op := &request.Operation{ + Name: opAcknowledgeJob, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AcknowledgeJobInput{} + } + + req = c.newRequest(op, input, output) + output = &AcknowledgeJobOutput{} + req.Data = output + return +} + +// Returns information about a specified job and whether that job has been received +// by the job worker. Only used for custom actions. +func (c *CodePipeline) AcknowledgeJob(input *AcknowledgeJobInput) (*AcknowledgeJobOutput, error) { + req, out := c.AcknowledgeJobRequest(input) + err := req.Send() + return out, err +} + +const opAcknowledgeThirdPartyJob = "AcknowledgeThirdPartyJob" + +// AcknowledgeThirdPartyJobRequest generates a "aws/request.Request" representing the +// client's request for the AcknowledgeThirdPartyJob operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the AcknowledgeThirdPartyJob method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the AcknowledgeThirdPartyJobRequest method. +// req, resp := client.AcknowledgeThirdPartyJobRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CodePipeline) AcknowledgeThirdPartyJobRequest(input *AcknowledgeThirdPartyJobInput) (req *request.Request, output *AcknowledgeThirdPartyJobOutput) { + op := &request.Operation{ + Name: opAcknowledgeThirdPartyJob, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AcknowledgeThirdPartyJobInput{} + } + + req = c.newRequest(op, input, output) + output = &AcknowledgeThirdPartyJobOutput{} + req.Data = output + return +} + +// Confirms a job worker has received the specified job. Only used for partner +// actions. +func (c *CodePipeline) AcknowledgeThirdPartyJob(input *AcknowledgeThirdPartyJobInput) (*AcknowledgeThirdPartyJobOutput, error) { + req, out := c.AcknowledgeThirdPartyJobRequest(input) + err := req.Send() + return out, err +} + +const opCreateCustomActionType = "CreateCustomActionType" + +// CreateCustomActionTypeRequest generates a "aws/request.Request" representing the +// client's request for the CreateCustomActionType operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateCustomActionType method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateCustomActionTypeRequest method. +// req, resp := client.CreateCustomActionTypeRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CodePipeline) CreateCustomActionTypeRequest(input *CreateCustomActionTypeInput) (req *request.Request, output *CreateCustomActionTypeOutput) { + op := &request.Operation{ + Name: opCreateCustomActionType, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateCustomActionTypeInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateCustomActionTypeOutput{} + req.Data = output + return +} + +// Creates a new custom action that can be used in all pipelines associated +// with the AWS account. Only used for custom actions. +func (c *CodePipeline) CreateCustomActionType(input *CreateCustomActionTypeInput) (*CreateCustomActionTypeOutput, error) { + req, out := c.CreateCustomActionTypeRequest(input) + err := req.Send() + return out, err +} + +const opCreatePipeline = "CreatePipeline" + +// CreatePipelineRequest generates a "aws/request.Request" representing the +// client's request for the CreatePipeline operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreatePipeline method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreatePipelineRequest method. +// req, resp := client.CreatePipelineRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CodePipeline) CreatePipelineRequest(input *CreatePipelineInput) (req *request.Request, output *CreatePipelineOutput) { + op := &request.Operation{ + Name: opCreatePipeline, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreatePipelineInput{} + } + + req = c.newRequest(op, input, output) + output = &CreatePipelineOutput{} + req.Data = output + return +} + +// Creates a pipeline. +func (c *CodePipeline) CreatePipeline(input *CreatePipelineInput) (*CreatePipelineOutput, error) { + req, out := c.CreatePipelineRequest(input) + err := req.Send() + return out, err +} + +const opDeleteCustomActionType = "DeleteCustomActionType" + +// DeleteCustomActionTypeRequest generates a "aws/request.Request" representing the +// client's request for the DeleteCustomActionType operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteCustomActionType method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteCustomActionTypeRequest method. +// req, resp := client.DeleteCustomActionTypeRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CodePipeline) DeleteCustomActionTypeRequest(input *DeleteCustomActionTypeInput) (req *request.Request, output *DeleteCustomActionTypeOutput) { + op := &request.Operation{ + Name: opDeleteCustomActionType, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteCustomActionTypeInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteCustomActionTypeOutput{} + req.Data = output + return +} + +// Marks a custom action as deleted. PollForJobs for the custom action will +// fail after the action is marked for deletion. Only used for custom actions. +// +// You cannot recreate a custom action after it has been deleted unless you +// increase the version number of the action. +func (c *CodePipeline) DeleteCustomActionType(input *DeleteCustomActionTypeInput) (*DeleteCustomActionTypeOutput, error) { + req, out := c.DeleteCustomActionTypeRequest(input) + err := req.Send() + return out, err +} + +const opDeletePipeline = "DeletePipeline" + +// DeletePipelineRequest generates a "aws/request.Request" representing the +// client's request for the DeletePipeline operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeletePipeline method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeletePipelineRequest method. +// req, resp := client.DeletePipelineRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CodePipeline) DeletePipelineRequest(input *DeletePipelineInput) (req *request.Request, output *DeletePipelineOutput) { + op := &request.Operation{ + Name: opDeletePipeline, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeletePipelineInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeletePipelineOutput{} + req.Data = output + return +} + +// Deletes the specified pipeline. +func (c *CodePipeline) DeletePipeline(input *DeletePipelineInput) (*DeletePipelineOutput, error) { + req, out := c.DeletePipelineRequest(input) + err := req.Send() + return out, err +} + +const opDisableStageTransition = "DisableStageTransition" + +// DisableStageTransitionRequest generates a "aws/request.Request" representing the +// client's request for the DisableStageTransition operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DisableStageTransition method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DisableStageTransitionRequest method. +// req, resp := client.DisableStageTransitionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CodePipeline) DisableStageTransitionRequest(input *DisableStageTransitionInput) (req *request.Request, output *DisableStageTransitionOutput) { + op := &request.Operation{ + Name: opDisableStageTransition, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DisableStageTransitionInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DisableStageTransitionOutput{} + req.Data = output + return +} + +// Prevents artifacts in a pipeline from transitioning to the next stage in +// the pipeline. +func (c *CodePipeline) DisableStageTransition(input *DisableStageTransitionInput) (*DisableStageTransitionOutput, error) { + req, out := c.DisableStageTransitionRequest(input) + err := req.Send() + return out, err +} + +const opEnableStageTransition = "EnableStageTransition" + +// EnableStageTransitionRequest generates a "aws/request.Request" representing the +// client's request for the EnableStageTransition operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the EnableStageTransition method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the EnableStageTransitionRequest method. +// req, resp := client.EnableStageTransitionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CodePipeline) EnableStageTransitionRequest(input *EnableStageTransitionInput) (req *request.Request, output *EnableStageTransitionOutput) { + op := &request.Operation{ + Name: opEnableStageTransition, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &EnableStageTransitionInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &EnableStageTransitionOutput{} + req.Data = output + return +} + +// Enables artifacts in a pipeline to transition to a stage in a pipeline. +func (c *CodePipeline) EnableStageTransition(input *EnableStageTransitionInput) (*EnableStageTransitionOutput, error) { + req, out := c.EnableStageTransitionRequest(input) + err := req.Send() + return out, err +} + +const opGetJobDetails = "GetJobDetails" + +// GetJobDetailsRequest generates a "aws/request.Request" representing the +// client's request for the GetJobDetails operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetJobDetails method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetJobDetailsRequest method. +// req, resp := client.GetJobDetailsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CodePipeline) GetJobDetailsRequest(input *GetJobDetailsInput) (req *request.Request, output *GetJobDetailsOutput) { + op := &request.Operation{ + Name: opGetJobDetails, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetJobDetailsInput{} + } + + req = c.newRequest(op, input, output) + output = &GetJobDetailsOutput{} + req.Data = output + return +} + +// Returns information about a job. Only used for custom actions. +// +// When this API is called, AWS CodePipeline returns temporary credentials +// for the Amazon S3 bucket used to store artifacts for the pipeline, if the +// action requires access to that Amazon S3 bucket for input or output artifacts. +// Additionally, this API returns any secret values defined for the action. +func (c *CodePipeline) GetJobDetails(input *GetJobDetailsInput) (*GetJobDetailsOutput, error) { + req, out := c.GetJobDetailsRequest(input) + err := req.Send() + return out, err +} + +const opGetPipeline = "GetPipeline" + +// GetPipelineRequest generates a "aws/request.Request" representing the +// client's request for the GetPipeline operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetPipeline method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetPipelineRequest method. +// req, resp := client.GetPipelineRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CodePipeline) GetPipelineRequest(input *GetPipelineInput) (req *request.Request, output *GetPipelineOutput) { + op := &request.Operation{ + Name: opGetPipeline, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetPipelineInput{} + } + + req = c.newRequest(op, input, output) + output = &GetPipelineOutput{} + req.Data = output + return +} + +// Returns the metadata, structure, stages, and actions of a pipeline. Can be +// used to return the entire structure of a pipeline in JSON format, which can +// then be modified and used to update the pipeline structure with UpdatePipeline. +func (c *CodePipeline) GetPipeline(input *GetPipelineInput) (*GetPipelineOutput, error) { + req, out := c.GetPipelineRequest(input) + err := req.Send() + return out, err +} + +const opGetPipelineState = "GetPipelineState" + +// GetPipelineStateRequest generates a "aws/request.Request" representing the +// client's request for the GetPipelineState operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetPipelineState method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetPipelineStateRequest method. +// req, resp := client.GetPipelineStateRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CodePipeline) GetPipelineStateRequest(input *GetPipelineStateInput) (req *request.Request, output *GetPipelineStateOutput) { + op := &request.Operation{ + Name: opGetPipelineState, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetPipelineStateInput{} + } + + req = c.newRequest(op, input, output) + output = &GetPipelineStateOutput{} + req.Data = output + return +} + +// Returns information about the state of a pipeline, including the stages and +// actions. +func (c *CodePipeline) GetPipelineState(input *GetPipelineStateInput) (*GetPipelineStateOutput, error) { + req, out := c.GetPipelineStateRequest(input) + err := req.Send() + return out, err +} + +const opGetThirdPartyJobDetails = "GetThirdPartyJobDetails" + +// GetThirdPartyJobDetailsRequest generates a "aws/request.Request" representing the +// client's request for the GetThirdPartyJobDetails operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetThirdPartyJobDetails method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetThirdPartyJobDetailsRequest method. +// req, resp := client.GetThirdPartyJobDetailsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CodePipeline) GetThirdPartyJobDetailsRequest(input *GetThirdPartyJobDetailsInput) (req *request.Request, output *GetThirdPartyJobDetailsOutput) { + op := &request.Operation{ + Name: opGetThirdPartyJobDetails, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetThirdPartyJobDetailsInput{} + } + + req = c.newRequest(op, input, output) + output = &GetThirdPartyJobDetailsOutput{} + req.Data = output + return +} + +// Requests the details of a job for a third party action. Only used for partner +// actions. +// +// When this API is called, AWS CodePipeline returns temporary credentials +// for the Amazon S3 bucket used to store artifacts for the pipeline, if the +// action requires access to that Amazon S3 bucket for input or output artifacts. +// Additionally, this API returns any secret values defined for the action. +func (c *CodePipeline) GetThirdPartyJobDetails(input *GetThirdPartyJobDetailsInput) (*GetThirdPartyJobDetailsOutput, error) { + req, out := c.GetThirdPartyJobDetailsRequest(input) + err := req.Send() + return out, err +} + +const opListActionTypes = "ListActionTypes" + +// ListActionTypesRequest generates a "aws/request.Request" representing the +// client's request for the ListActionTypes operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListActionTypes method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListActionTypesRequest method. +// req, resp := client.ListActionTypesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CodePipeline) ListActionTypesRequest(input *ListActionTypesInput) (req *request.Request, output *ListActionTypesOutput) { + op := &request.Operation{ + Name: opListActionTypes, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListActionTypesInput{} + } + + req = c.newRequest(op, input, output) + output = &ListActionTypesOutput{} + req.Data = output + return +} + +// Gets a summary of all AWS CodePipeline action types associated with your +// account. +func (c *CodePipeline) ListActionTypes(input *ListActionTypesInput) (*ListActionTypesOutput, error) { + req, out := c.ListActionTypesRequest(input) + err := req.Send() + return out, err +} + +const opListPipelines = "ListPipelines" + +// ListPipelinesRequest generates a "aws/request.Request" representing the +// client's request for the ListPipelines operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListPipelines method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListPipelinesRequest method. +// req, resp := client.ListPipelinesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CodePipeline) ListPipelinesRequest(input *ListPipelinesInput) (req *request.Request, output *ListPipelinesOutput) { + op := &request.Operation{ + Name: opListPipelines, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListPipelinesInput{} + } + + req = c.newRequest(op, input, output) + output = &ListPipelinesOutput{} + req.Data = output + return +} + +// Gets a summary of all of the pipelines associated with your account. +func (c *CodePipeline) ListPipelines(input *ListPipelinesInput) (*ListPipelinesOutput, error) { + req, out := c.ListPipelinesRequest(input) + err := req.Send() + return out, err +} + +const opPollForJobs = "PollForJobs" + +// PollForJobsRequest generates a "aws/request.Request" representing the +// client's request for the PollForJobs operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PollForJobs method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PollForJobsRequest method. +// req, resp := client.PollForJobsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CodePipeline) PollForJobsRequest(input *PollForJobsInput) (req *request.Request, output *PollForJobsOutput) { + op := &request.Operation{ + Name: opPollForJobs, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PollForJobsInput{} + } + + req = c.newRequest(op, input, output) + output = &PollForJobsOutput{} + req.Data = output + return +} + +// Returns information about any jobs for AWS CodePipeline to act upon. +// +// When this API is called, AWS CodePipeline returns temporary credentials +// for the Amazon S3 bucket used to store artifacts for the pipeline, if the +// action requires access to that Amazon S3 bucket for input or output artifacts. +// Additionally, this API returns any secret values defined for the action. +func (c *CodePipeline) PollForJobs(input *PollForJobsInput) (*PollForJobsOutput, error) { + req, out := c.PollForJobsRequest(input) + err := req.Send() + return out, err +} + +const opPollForThirdPartyJobs = "PollForThirdPartyJobs" + +// PollForThirdPartyJobsRequest generates a "aws/request.Request" representing the +// client's request for the PollForThirdPartyJobs operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PollForThirdPartyJobs method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PollForThirdPartyJobsRequest method. +// req, resp := client.PollForThirdPartyJobsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CodePipeline) PollForThirdPartyJobsRequest(input *PollForThirdPartyJobsInput) (req *request.Request, output *PollForThirdPartyJobsOutput) { + op := &request.Operation{ + Name: opPollForThirdPartyJobs, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PollForThirdPartyJobsInput{} + } + + req = c.newRequest(op, input, output) + output = &PollForThirdPartyJobsOutput{} + req.Data = output + return +} + +// Determines whether there are any third party jobs for a job worker to act +// on. Only used for partner actions. +// +// When this API is called, AWS CodePipeline returns temporary credentials +// for the Amazon S3 bucket used to store artifacts for the pipeline, if the +// action requires access to that Amazon S3 bucket for input or output artifacts. +func (c *CodePipeline) PollForThirdPartyJobs(input *PollForThirdPartyJobsInput) (*PollForThirdPartyJobsOutput, error) { + req, out := c.PollForThirdPartyJobsRequest(input) + err := req.Send() + return out, err +} + +const opPutActionRevision = "PutActionRevision" + +// PutActionRevisionRequest generates a "aws/request.Request" representing the +// client's request for the PutActionRevision operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutActionRevision method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutActionRevisionRequest method. +// req, resp := client.PutActionRevisionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CodePipeline) PutActionRevisionRequest(input *PutActionRevisionInput) (req *request.Request, output *PutActionRevisionOutput) { + op := &request.Operation{ + Name: opPutActionRevision, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutActionRevisionInput{} + } + + req = c.newRequest(op, input, output) + output = &PutActionRevisionOutput{} + req.Data = output + return +} + +// Provides information to AWS CodePipeline about new revisions to a source. +func (c *CodePipeline) PutActionRevision(input *PutActionRevisionInput) (*PutActionRevisionOutput, error) { + req, out := c.PutActionRevisionRequest(input) + err := req.Send() + return out, err +} + +const opPutApprovalResult = "PutApprovalResult" + +// PutApprovalResultRequest generates a "aws/request.Request" representing the +// client's request for the PutApprovalResult operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutApprovalResult method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutApprovalResultRequest method. +// req, resp := client.PutApprovalResultRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CodePipeline) PutApprovalResultRequest(input *PutApprovalResultInput) (req *request.Request, output *PutApprovalResultOutput) { + op := &request.Operation{ + Name: opPutApprovalResult, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutApprovalResultInput{} + } + + req = c.newRequest(op, input, output) + output = &PutApprovalResultOutput{} + req.Data = output + return +} + +// Provides the response to a manual approval request to AWS CodePipeline. Valid +// responses include Approved and Rejected. +func (c *CodePipeline) PutApprovalResult(input *PutApprovalResultInput) (*PutApprovalResultOutput, error) { + req, out := c.PutApprovalResultRequest(input) + err := req.Send() + return out, err +} + +const opPutJobFailureResult = "PutJobFailureResult" + +// PutJobFailureResultRequest generates a "aws/request.Request" representing the +// client's request for the PutJobFailureResult operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutJobFailureResult method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutJobFailureResultRequest method. +// req, resp := client.PutJobFailureResultRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CodePipeline) PutJobFailureResultRequest(input *PutJobFailureResultInput) (req *request.Request, output *PutJobFailureResultOutput) { + op := &request.Operation{ + Name: opPutJobFailureResult, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutJobFailureResultInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &PutJobFailureResultOutput{} + req.Data = output + return +} + +// Represents the failure of a job as returned to the pipeline by a job worker. +// Only used for custom actions. +func (c *CodePipeline) PutJobFailureResult(input *PutJobFailureResultInput) (*PutJobFailureResultOutput, error) { + req, out := c.PutJobFailureResultRequest(input) + err := req.Send() + return out, err +} + +const opPutJobSuccessResult = "PutJobSuccessResult" + +// PutJobSuccessResultRequest generates a "aws/request.Request" representing the +// client's request for the PutJobSuccessResult operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutJobSuccessResult method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutJobSuccessResultRequest method. +// req, resp := client.PutJobSuccessResultRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CodePipeline) PutJobSuccessResultRequest(input *PutJobSuccessResultInput) (req *request.Request, output *PutJobSuccessResultOutput) { + op := &request.Operation{ + Name: opPutJobSuccessResult, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutJobSuccessResultInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &PutJobSuccessResultOutput{} + req.Data = output + return +} + +// Represents the success of a job as returned to the pipeline by a job worker. +// Only used for custom actions. +func (c *CodePipeline) PutJobSuccessResult(input *PutJobSuccessResultInput) (*PutJobSuccessResultOutput, error) { + req, out := c.PutJobSuccessResultRequest(input) + err := req.Send() + return out, err +} + +const opPutThirdPartyJobFailureResult = "PutThirdPartyJobFailureResult" + +// PutThirdPartyJobFailureResultRequest generates a "aws/request.Request" representing the +// client's request for the PutThirdPartyJobFailureResult operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutThirdPartyJobFailureResult method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutThirdPartyJobFailureResultRequest method. +// req, resp := client.PutThirdPartyJobFailureResultRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CodePipeline) PutThirdPartyJobFailureResultRequest(input *PutThirdPartyJobFailureResultInput) (req *request.Request, output *PutThirdPartyJobFailureResultOutput) { + op := &request.Operation{ + Name: opPutThirdPartyJobFailureResult, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutThirdPartyJobFailureResultInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &PutThirdPartyJobFailureResultOutput{} + req.Data = output + return +} + +// Represents the failure of a third party job as returned to the pipeline by +// a job worker. Only used for partner actions. +func (c *CodePipeline) PutThirdPartyJobFailureResult(input *PutThirdPartyJobFailureResultInput) (*PutThirdPartyJobFailureResultOutput, error) { + req, out := c.PutThirdPartyJobFailureResultRequest(input) + err := req.Send() + return out, err +} + +const opPutThirdPartyJobSuccessResult = "PutThirdPartyJobSuccessResult" + +// PutThirdPartyJobSuccessResultRequest generates a "aws/request.Request" representing the +// client's request for the PutThirdPartyJobSuccessResult operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutThirdPartyJobSuccessResult method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutThirdPartyJobSuccessResultRequest method. +// req, resp := client.PutThirdPartyJobSuccessResultRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CodePipeline) PutThirdPartyJobSuccessResultRequest(input *PutThirdPartyJobSuccessResultInput) (req *request.Request, output *PutThirdPartyJobSuccessResultOutput) { + op := &request.Operation{ + Name: opPutThirdPartyJobSuccessResult, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutThirdPartyJobSuccessResultInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &PutThirdPartyJobSuccessResultOutput{} + req.Data = output + return +} + +// Represents the success of a third party job as returned to the pipeline by +// a job worker. Only used for partner actions. +func (c *CodePipeline) PutThirdPartyJobSuccessResult(input *PutThirdPartyJobSuccessResultInput) (*PutThirdPartyJobSuccessResultOutput, error) { + req, out := c.PutThirdPartyJobSuccessResultRequest(input) + err := req.Send() + return out, err +} + +const opRetryStageExecution = "RetryStageExecution" + +// RetryStageExecutionRequest generates a "aws/request.Request" representing the +// client's request for the RetryStageExecution operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the RetryStageExecution method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the RetryStageExecutionRequest method. +// req, resp := client.RetryStageExecutionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CodePipeline) RetryStageExecutionRequest(input *RetryStageExecutionInput) (req *request.Request, output *RetryStageExecutionOutput) { + op := &request.Operation{ + Name: opRetryStageExecution, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RetryStageExecutionInput{} + } + + req = c.newRequest(op, input, output) + output = &RetryStageExecutionOutput{} + req.Data = output + return +} + +// Resumes the pipeline execution by retrying the last failed actions in a stage. +func (c *CodePipeline) RetryStageExecution(input *RetryStageExecutionInput) (*RetryStageExecutionOutput, error) { + req, out := c.RetryStageExecutionRequest(input) + err := req.Send() + return out, err +} + +const opStartPipelineExecution = "StartPipelineExecution" + +// StartPipelineExecutionRequest generates a "aws/request.Request" representing the +// client's request for the StartPipelineExecution operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the StartPipelineExecution method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the StartPipelineExecutionRequest method. +// req, resp := client.StartPipelineExecutionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CodePipeline) StartPipelineExecutionRequest(input *StartPipelineExecutionInput) (req *request.Request, output *StartPipelineExecutionOutput) { + op := &request.Operation{ + Name: opStartPipelineExecution, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &StartPipelineExecutionInput{} + } + + req = c.newRequest(op, input, output) + output = &StartPipelineExecutionOutput{} + req.Data = output + return +} + +// Starts the specified pipeline. Specifically, it begins processing the latest +// commit to the source location specified as part of the pipeline. +func (c *CodePipeline) StartPipelineExecution(input *StartPipelineExecutionInput) (*StartPipelineExecutionOutput, error) { + req, out := c.StartPipelineExecutionRequest(input) + err := req.Send() + return out, err +} + +const opUpdatePipeline = "UpdatePipeline" + +// UpdatePipelineRequest generates a "aws/request.Request" representing the +// client's request for the UpdatePipeline operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdatePipeline method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdatePipelineRequest method. +// req, resp := client.UpdatePipelineRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CodePipeline) UpdatePipelineRequest(input *UpdatePipelineInput) (req *request.Request, output *UpdatePipelineOutput) { + op := &request.Operation{ + Name: opUpdatePipeline, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdatePipelineInput{} + } + + req = c.newRequest(op, input, output) + output = &UpdatePipelineOutput{} + req.Data = output + return +} + +// Updates a specified pipeline with edits or changes to its structure. Use +// a JSON file with the pipeline structure in conjunction with UpdatePipeline +// to provide the full structure of the pipeline. Updating the pipeline increases +// the version number of the pipeline by 1. +func (c *CodePipeline) UpdatePipeline(input *UpdatePipelineInput) (*UpdatePipelineOutput, error) { + req, out := c.UpdatePipelineRequest(input) + err := req.Send() + return out, err +} + +// Represents an AWS session credentials object. These credentials are temporary +// credentials that are issued by AWS Secure Token Service (STS). They can be +// used to access input and output artifacts in the Amazon S3 bucket used to +// store artifact for the pipeline in AWS CodePipeline. +type AWSSessionCredentials struct { + _ struct{} `type:"structure"` + + // The access key for the session. + AccessKeyId *string `locationName:"accessKeyId" type:"string" required:"true"` + + // The secret access key for the session. + SecretAccessKey *string `locationName:"secretAccessKey" type:"string" required:"true"` + + // The token for the session. + SessionToken *string `locationName:"sessionToken" type:"string" required:"true"` +} + +// String returns the string representation +func (s AWSSessionCredentials) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AWSSessionCredentials) GoString() string { + return s.String() +} + +// Represents the input of an acknowledge job action. +type AcknowledgeJobInput struct { + _ struct{} `type:"structure"` + + // The unique system-generated ID of the job for which you want to confirm receipt. + JobId *string `locationName:"jobId" type:"string" required:"true"` + + // A system-generated random number that AWS CodePipeline uses to ensure that + // the job is being worked on by only one job worker. This number must be returned + // in the response. + Nonce *string `locationName:"nonce" type:"string" required:"true"` +} + +// String returns the string representation +func (s AcknowledgeJobInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AcknowledgeJobInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AcknowledgeJobInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AcknowledgeJobInput"} + if s.JobId == nil { + invalidParams.Add(request.NewErrParamRequired("JobId")) + } + if s.Nonce == nil { + invalidParams.Add(request.NewErrParamRequired("Nonce")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the output of an acknowledge job action. +type AcknowledgeJobOutput struct { + _ struct{} `type:"structure"` + + // Whether the job worker has received the specified job. + Status *string `locationName:"status" type:"string" enum:"JobStatus"` +} + +// String returns the string representation +func (s AcknowledgeJobOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AcknowledgeJobOutput) GoString() string { + return s.String() +} + +// Represents the input of an acknowledge third party job action. +type AcknowledgeThirdPartyJobInput struct { + _ struct{} `type:"structure"` + + // The clientToken portion of the clientId and clientToken pair used to verify + // that the calling entity is allowed access to the job and its details. + ClientToken *string `locationName:"clientToken" type:"string" required:"true"` + + // The unique system-generated ID of the job. + JobId *string `locationName:"jobId" min:"1" type:"string" required:"true"` + + // A system-generated random number that AWS CodePipeline uses to ensure that + // the job is being worked on by only one job worker. This number must be returned + // in the response. + Nonce *string `locationName:"nonce" type:"string" required:"true"` +} + +// String returns the string representation +func (s AcknowledgeThirdPartyJobInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AcknowledgeThirdPartyJobInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AcknowledgeThirdPartyJobInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AcknowledgeThirdPartyJobInput"} + if s.ClientToken == nil { + invalidParams.Add(request.NewErrParamRequired("ClientToken")) + } + if s.JobId == nil { + invalidParams.Add(request.NewErrParamRequired("JobId")) + } + if s.JobId != nil && len(*s.JobId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("JobId", 1)) + } + if s.Nonce == nil { + invalidParams.Add(request.NewErrParamRequired("Nonce")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the output of an acknowledge third party job action. +type AcknowledgeThirdPartyJobOutput struct { + _ struct{} `type:"structure"` + + // The status information for the third party job, if any. + Status *string `locationName:"status" type:"string" enum:"JobStatus"` +} + +// String returns the string representation +func (s AcknowledgeThirdPartyJobOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AcknowledgeThirdPartyJobOutput) GoString() string { + return s.String() +} + +// Represents information about an action configuration. +type ActionConfiguration struct { + _ struct{} `type:"structure"` + + // The configuration data for the action. + Configuration map[string]*string `locationName:"configuration" type:"map"` +} + +// String returns the string representation +func (s ActionConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ActionConfiguration) GoString() string { + return s.String() +} + +// Represents information about an action configuration property. +type ActionConfigurationProperty struct { + _ struct{} `type:"structure"` + + // The description of the action configuration property that will be displayed + // to users. + Description *string `locationName:"description" min:"1" type:"string"` + + // Whether the configuration property is a key. + Key *bool `locationName:"key" type:"boolean" required:"true"` + + // The name of the action configuration property. + Name *string `locationName:"name" min:"1" type:"string" required:"true"` + + // Indicates that the proprety will be used in conjunction with PollForJobs. + // When creating a custom action, an action can have up to one queryable property. + // If it has one, that property must be both required and not secret. + // + // If you create a pipeline with a custom action type, and that custom action + // contains a queryable property, the value for that configuration property + // is subject to additional restrictions. The value must be less than or equal + // to twenty (20) characters. The value can contain only alphanumeric characters, + // underscores, and hyphens. + Queryable *bool `locationName:"queryable" type:"boolean"` + + // Whether the configuration property is a required value. + Required *bool `locationName:"required" type:"boolean" required:"true"` + + // Whether the configuration property is secret. Secrets are hidden from all + // calls except for GetJobDetails, GetThirdPartyJobDetails, PollForJobs, and + // PollForThirdPartyJobs. + // + // When updating a pipeline, passing * * * * * without changing any other values + // of the action will preserve the prior value of the secret. + Secret *bool `locationName:"secret" type:"boolean" required:"true"` + + // The type of the configuration property. + Type *string `locationName:"type" type:"string" enum:"ActionConfigurationPropertyType"` +} + +// String returns the string representation +func (s ActionConfigurationProperty) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ActionConfigurationProperty) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ActionConfigurationProperty) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ActionConfigurationProperty"} + if s.Description != nil && len(*s.Description) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Description", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + if s.Required == nil { + invalidParams.Add(request.NewErrParamRequired("Required")) + } + if s.Secret == nil { + invalidParams.Add(request.NewErrParamRequired("Secret")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the context of an action within the stage of a pipeline to a job +// worker. +type ActionContext struct { + _ struct{} `type:"structure"` + + // The name of the action within the context of a job. + Name *string `locationName:"name" min:"1" type:"string"` +} + +// String returns the string representation +func (s ActionContext) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ActionContext) GoString() string { + return s.String() +} + +// Represents information about an action declaration. +type ActionDeclaration struct { + _ struct{} `type:"structure"` + + // The configuration information for the action type. + ActionTypeId *ActionTypeId `locationName:"actionTypeId" type:"structure" required:"true"` + + // The action declaration's configuration. + Configuration map[string]*string `locationName:"configuration" type:"map"` + + // The name or ID of the artifact consumed by the action, such as a test or + // build artifact. + InputArtifacts []*InputArtifact `locationName:"inputArtifacts" type:"list"` + + // The action declaration's name. + Name *string `locationName:"name" min:"1" type:"string" required:"true"` + + // The name or ID of the result of the action declaration, such as a test or + // build artifact. + OutputArtifacts []*OutputArtifact `locationName:"outputArtifacts" type:"list"` + + // The ARN of the IAM service role that will perform the declared action. This + // is assumed through the roleArn for the pipeline. + RoleArn *string `locationName:"roleArn" type:"string"` + + // The order in which actions are run. + RunOrder *int64 `locationName:"runOrder" min:"1" type:"integer"` +} + +// String returns the string representation +func (s ActionDeclaration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ActionDeclaration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ActionDeclaration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ActionDeclaration"} + if s.ActionTypeId == nil { + invalidParams.Add(request.NewErrParamRequired("ActionTypeId")) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + if s.RunOrder != nil && *s.RunOrder < 1 { + invalidParams.Add(request.NewErrParamMinValue("RunOrder", 1)) + } + if s.ActionTypeId != nil { + if err := s.ActionTypeId.Validate(); err != nil { + invalidParams.AddNested("ActionTypeId", err.(request.ErrInvalidParams)) + } + } + if s.InputArtifacts != nil { + for i, v := range s.InputArtifacts { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "InputArtifacts", i), err.(request.ErrInvalidParams)) + } + } + } + if s.OutputArtifacts != nil { + for i, v := range s.OutputArtifacts { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "OutputArtifacts", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents information about the run of an action. +type ActionExecution struct { + _ struct{} `type:"structure"` + + // The details of an error returned by a URL external to AWS. + ErrorDetails *ErrorDetails `locationName:"errorDetails" type:"structure"` + + // The external ID of the run of the action. + ExternalExecutionId *string `locationName:"externalExecutionId" min:"1" type:"string"` + + // The URL of a resource external to AWS that will be used when running the + // action, for example an external repository URL. + ExternalExecutionUrl *string `locationName:"externalExecutionUrl" min:"1" type:"string"` + + // The last status change of the action. + LastStatusChange *time.Time `locationName:"lastStatusChange" type:"timestamp" timestampFormat:"unix"` + + // The ARN of the user who last changed the pipeline. + LastUpdatedBy *string `locationName:"lastUpdatedBy" type:"string"` + + // A percentage of completeness of the action as it runs. + PercentComplete *int64 `locationName:"percentComplete" type:"integer"` + + // The status of the action, or for a completed action, the last status of the + // action. + Status *string `locationName:"status" type:"string" enum:"ActionExecutionStatus"` + + // A summary of the run of the action. + Summary *string `locationName:"summary" type:"string"` + + // The system-generated token used to identify a unique approval request. The + // token for each open approval request can be obtained using the GetPipelineState + // command and is used to validate that the approval request corresponding to + // this token is still valid. + Token *string `locationName:"token" type:"string"` +} + +// String returns the string representation +func (s ActionExecution) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ActionExecution) GoString() string { + return s.String() +} + +// Represents information about the version (or revision) of an action. +type ActionRevision struct { + _ struct{} `type:"structure"` + + // The date and time when the most recent version of the action was created, + // in timestamp format. + Created *time.Time `locationName:"created" type:"timestamp" timestampFormat:"unix" required:"true"` + + // The unique identifier of the change that set the state to this revision, + // for example a deployment ID or timestamp. + RevisionChangeId *string `locationName:"revisionChangeId" min:"1" type:"string" required:"true"` + + // The system-generated unique ID that identifies the revision number of the + // action. + RevisionId *string `locationName:"revisionId" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s ActionRevision) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ActionRevision) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ActionRevision) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ActionRevision"} + if s.Created == nil { + invalidParams.Add(request.NewErrParamRequired("Created")) + } + if s.RevisionChangeId == nil { + invalidParams.Add(request.NewErrParamRequired("RevisionChangeId")) + } + if s.RevisionChangeId != nil && len(*s.RevisionChangeId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RevisionChangeId", 1)) + } + if s.RevisionId == nil { + invalidParams.Add(request.NewErrParamRequired("RevisionId")) + } + if s.RevisionId != nil && len(*s.RevisionId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RevisionId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents information about the state of an action. +type ActionState struct { + _ struct{} `type:"structure"` + + // The name of the action. + ActionName *string `locationName:"actionName" min:"1" type:"string"` + + // Represents information about the version (or revision) of an action. + CurrentRevision *ActionRevision `locationName:"currentRevision" type:"structure"` + + // A URL link for more information about the state of the action, such as a + // deployment group details page. + EntityUrl *string `locationName:"entityUrl" min:"1" type:"string"` + + // Represents information about the run of an action. + LatestExecution *ActionExecution `locationName:"latestExecution" type:"structure"` + + // A URL link for more information about the revision, such as a commit details + // page. + RevisionUrl *string `locationName:"revisionUrl" min:"1" type:"string"` +} + +// String returns the string representation +func (s ActionState) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ActionState) GoString() string { + return s.String() +} + +// Returns information about the details of an action type. +type ActionType struct { + _ struct{} `type:"structure"` + + // The configuration properties for the action type. + ActionConfigurationProperties []*ActionConfigurationProperty `locationName:"actionConfigurationProperties" type:"list"` + + // Represents information about an action type. + Id *ActionTypeId `locationName:"id" type:"structure" required:"true"` + + // The details of the input artifact for the action, such as its commit ID. + InputArtifactDetails *ArtifactDetails `locationName:"inputArtifactDetails" type:"structure" required:"true"` + + // The details of the output artifact of the action, such as its commit ID. + OutputArtifactDetails *ArtifactDetails `locationName:"outputArtifactDetails" type:"structure" required:"true"` + + // The settings for the action type. + Settings *ActionTypeSettings `locationName:"settings" type:"structure"` +} + +// String returns the string representation +func (s ActionType) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ActionType) GoString() string { + return s.String() +} + +// Represents information about an action type. +type ActionTypeId struct { + _ struct{} `type:"structure"` + + // A category defines what kind of action can be taken in the stage, and constrains + // the provider type for the action. Valid categories are limited to one of + // the values below. + Category *string `locationName:"category" type:"string" required:"true" enum:"ActionCategory"` + + // The creator of the action being called. + Owner *string `locationName:"owner" type:"string" required:"true" enum:"ActionOwner"` + + // The provider of the service being called by the action. Valid providers are + // determined by the action category. For example, an action in the Deploy category + // type might have a provider of AWS CodeDeploy, which would be specified as + // CodeDeploy. + Provider *string `locationName:"provider" min:"1" type:"string" required:"true"` + + // A string that identifies the action type. + Version *string `locationName:"version" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s ActionTypeId) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ActionTypeId) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ActionTypeId) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ActionTypeId"} + if s.Category == nil { + invalidParams.Add(request.NewErrParamRequired("Category")) + } + if s.Owner == nil { + invalidParams.Add(request.NewErrParamRequired("Owner")) + } + if s.Provider == nil { + invalidParams.Add(request.NewErrParamRequired("Provider")) + } + if s.Provider != nil && len(*s.Provider) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Provider", 1)) + } + if s.Version == nil { + invalidParams.Add(request.NewErrParamRequired("Version")) + } + if s.Version != nil && len(*s.Version) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Version", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Returns information about the settings for an action type. +type ActionTypeSettings struct { + _ struct{} `type:"structure"` + + // The URL returned to the AWS CodePipeline console that provides a deep link + // to the resources of the external system, such as the configuration page for + // an AWS CodeDeploy deployment group. This link is provided as part of the + // action display within the pipeline. + EntityUrlTemplate *string `locationName:"entityUrlTemplate" min:"1" type:"string"` + + // The URL returned to the AWS CodePipeline console that contains a link to + // the top-level landing page for the external system, such as console page + // for AWS CodeDeploy. This link is shown on the pipeline view page in the AWS + // CodePipeline console and provides a link to the execution entity of the external + // action. + ExecutionUrlTemplate *string `locationName:"executionUrlTemplate" min:"1" type:"string"` + + // The URL returned to the AWS CodePipeline console that contains a link to + // the page where customers can update or change the configuration of the external + // action. + RevisionUrlTemplate *string `locationName:"revisionUrlTemplate" min:"1" type:"string"` + + // The URL of a sign-up page where users can sign up for an external service + // and perform initial configuration of the action provided by that service. + ThirdPartyConfigurationUrl *string `locationName:"thirdPartyConfigurationUrl" min:"1" type:"string"` +} + +// String returns the string representation +func (s ActionTypeSettings) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ActionTypeSettings) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ActionTypeSettings) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ActionTypeSettings"} + if s.EntityUrlTemplate != nil && len(*s.EntityUrlTemplate) < 1 { + invalidParams.Add(request.NewErrParamMinLen("EntityUrlTemplate", 1)) + } + if s.ExecutionUrlTemplate != nil && len(*s.ExecutionUrlTemplate) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ExecutionUrlTemplate", 1)) + } + if s.RevisionUrlTemplate != nil && len(*s.RevisionUrlTemplate) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RevisionUrlTemplate", 1)) + } + if s.ThirdPartyConfigurationUrl != nil && len(*s.ThirdPartyConfigurationUrl) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ThirdPartyConfigurationUrl", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents information about the result of an approval request. +type ApprovalResult struct { + _ struct{} `type:"structure"` + + // The response submitted by a reviewer assigned to an approval action request. + Status *string `locationName:"status" type:"string" required:"true" enum:"ApprovalStatus"` + + // The summary of the current status of the approval request. + Summary *string `locationName:"summary" type:"string" required:"true"` +} + +// String returns the string representation +func (s ApprovalResult) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ApprovalResult) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ApprovalResult) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ApprovalResult"} + if s.Status == nil { + invalidParams.Add(request.NewErrParamRequired("Status")) + } + if s.Summary == nil { + invalidParams.Add(request.NewErrParamRequired("Summary")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents information about an artifact that will be worked upon by actions +// in the pipeline. +type Artifact struct { + _ struct{} `type:"structure"` + + // The location of an artifact. + Location *ArtifactLocation `locationName:"location" type:"structure"` + + // The artifact's name. + Name *string `locationName:"name" min:"1" type:"string"` + + // The artifact's revision ID. Depending on the type of object, this could be + // a commit ID (GitHub) or a revision ID (Amazon S3). + Revision *string `locationName:"revision" min:"1" type:"string"` +} + +// String returns the string representation +func (s Artifact) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Artifact) GoString() string { + return s.String() +} + +// Returns information about the details of an artifact. +type ArtifactDetails struct { + _ struct{} `type:"structure"` + + // The maximum number of artifacts allowed for the action type. + MaximumCount *int64 `locationName:"maximumCount" type:"integer" required:"true"` + + // The minimum number of artifacts allowed for the action type. + MinimumCount *int64 `locationName:"minimumCount" type:"integer" required:"true"` +} + +// String returns the string representation +func (s ArtifactDetails) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ArtifactDetails) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ArtifactDetails) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ArtifactDetails"} + if s.MaximumCount == nil { + invalidParams.Add(request.NewErrParamRequired("MaximumCount")) + } + if s.MinimumCount == nil { + invalidParams.Add(request.NewErrParamRequired("MinimumCount")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents information about the location of an artifact. +type ArtifactLocation struct { + _ struct{} `type:"structure"` + + // The Amazon S3 bucket that contains the artifact. + S3Location *S3ArtifactLocation `locationName:"s3Location" type:"structure"` + + // The type of artifact in the location. + Type *string `locationName:"type" type:"string" enum:"ArtifactLocationType"` +} + +// String returns the string representation +func (s ArtifactLocation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ArtifactLocation) GoString() string { + return s.String() +} + +// The Amazon S3 location where artifacts are stored for the pipeline. If this +// Amazon S3 bucket is created manually, it must meet the requirements for AWS +// CodePipeline. For more information, see the Concepts (http://docs.aws.amazon.com/codepipeline/latest/userguide/concepts.html#CPS3Bucket). +type ArtifactStore struct { + _ struct{} `type:"structure"` + + // The encryption key used to encrypt the data in the artifact store, such as + // an AWS Key Management Service (AWS KMS) key. If this is undefined, the default + // key for Amazon S3 is used. + EncryptionKey *EncryptionKey `locationName:"encryptionKey" type:"structure"` + + // The location for storing the artifacts for a pipeline, such as an S3 bucket + // or folder. + Location *string `locationName:"location" min:"3" type:"string" required:"true"` + + // The type of the artifact store, such as S3. + Type *string `locationName:"type" type:"string" required:"true" enum:"ArtifactStoreType"` +} + +// String returns the string representation +func (s ArtifactStore) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ArtifactStore) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ArtifactStore) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ArtifactStore"} + if s.Location == nil { + invalidParams.Add(request.NewErrParamRequired("Location")) + } + if s.Location != nil && len(*s.Location) < 3 { + invalidParams.Add(request.NewErrParamMinLen("Location", 3)) + } + if s.Type == nil { + invalidParams.Add(request.NewErrParamRequired("Type")) + } + if s.EncryptionKey != nil { + if err := s.EncryptionKey.Validate(); err != nil { + invalidParams.AddNested("EncryptionKey", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Reserved for future use. +type BlockerDeclaration struct { + _ struct{} `type:"structure"` + + // Reserved for future use. + Name *string `locationName:"name" min:"1" type:"string" required:"true"` + + // Reserved for future use. + Type *string `locationName:"type" type:"string" required:"true" enum:"BlockerType"` +} + +// String returns the string representation +func (s BlockerDeclaration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BlockerDeclaration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *BlockerDeclaration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "BlockerDeclaration"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + if s.Type == nil { + invalidParams.Add(request.NewErrParamRequired("Type")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the input of a create custom action operation. +type CreateCustomActionTypeInput struct { + _ struct{} `type:"structure"` + + // The category of the custom action, such as a source action or a build action. + // + // Although Source is listed as a valid value, it is not currently functional. + // This value is reserved for future use. + Category *string `locationName:"category" type:"string" required:"true" enum:"ActionCategory"` + + // The configuration properties for the custom action. + // + // You can refer to a name in the configuration properties of the custom action + // within the URL templates by following the format of {Config:name}, as long + // as the configuration property is both required and not secret. For more information, + // see Create a Custom Action for a Pipeline (http://docs.aws.amazon.com/codepipeline/latest/userguide/how-to-create-custom-action.html). + ConfigurationProperties []*ActionConfigurationProperty `locationName:"configurationProperties" type:"list"` + + // Returns information about the details of an artifact. + InputArtifactDetails *ArtifactDetails `locationName:"inputArtifactDetails" type:"structure" required:"true"` + + // Returns information about the details of an artifact. + OutputArtifactDetails *ArtifactDetails `locationName:"outputArtifactDetails" type:"structure" required:"true"` + + // The provider of the service used in the custom action, such as AWS CodeDeploy. + Provider *string `locationName:"provider" min:"1" type:"string" required:"true"` + + // Returns information about the settings for an action type. + Settings *ActionTypeSettings `locationName:"settings" type:"structure"` + + // The version number of the custom action. + Version *string `locationName:"version" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateCustomActionTypeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateCustomActionTypeInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateCustomActionTypeInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateCustomActionTypeInput"} + if s.Category == nil { + invalidParams.Add(request.NewErrParamRequired("Category")) + } + if s.InputArtifactDetails == nil { + invalidParams.Add(request.NewErrParamRequired("InputArtifactDetails")) + } + if s.OutputArtifactDetails == nil { + invalidParams.Add(request.NewErrParamRequired("OutputArtifactDetails")) + } + if s.Provider == nil { + invalidParams.Add(request.NewErrParamRequired("Provider")) + } + if s.Provider != nil && len(*s.Provider) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Provider", 1)) + } + if s.Version == nil { + invalidParams.Add(request.NewErrParamRequired("Version")) + } + if s.Version != nil && len(*s.Version) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Version", 1)) + } + if s.ConfigurationProperties != nil { + for i, v := range s.ConfigurationProperties { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "ConfigurationProperties", i), err.(request.ErrInvalidParams)) + } + } + } + if s.InputArtifactDetails != nil { + if err := s.InputArtifactDetails.Validate(); err != nil { + invalidParams.AddNested("InputArtifactDetails", err.(request.ErrInvalidParams)) + } + } + if s.OutputArtifactDetails != nil { + if err := s.OutputArtifactDetails.Validate(); err != nil { + invalidParams.AddNested("OutputArtifactDetails", err.(request.ErrInvalidParams)) + } + } + if s.Settings != nil { + if err := s.Settings.Validate(); err != nil { + invalidParams.AddNested("Settings", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the output of a create custom action operation. +type CreateCustomActionTypeOutput struct { + _ struct{} `type:"structure"` + + // Returns information about the details of an action type. + ActionType *ActionType `locationName:"actionType" type:"structure" required:"true"` +} + +// String returns the string representation +func (s CreateCustomActionTypeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateCustomActionTypeOutput) GoString() string { + return s.String() +} + +// Represents the input of a create pipeline action. +type CreatePipelineInput struct { + _ struct{} `type:"structure"` + + // Represents the structure of actions and stages to be performed in the pipeline. + Pipeline *PipelineDeclaration `locationName:"pipeline" type:"structure" required:"true"` +} + +// String returns the string representation +func (s CreatePipelineInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreatePipelineInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreatePipelineInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreatePipelineInput"} + if s.Pipeline == nil { + invalidParams.Add(request.NewErrParamRequired("Pipeline")) + } + if s.Pipeline != nil { + if err := s.Pipeline.Validate(); err != nil { + invalidParams.AddNested("Pipeline", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the output of a create pipeline action. +type CreatePipelineOutput struct { + _ struct{} `type:"structure"` + + // Represents the structure of actions and stages to be performed in the pipeline. + Pipeline *PipelineDeclaration `locationName:"pipeline" type:"structure"` +} + +// String returns the string representation +func (s CreatePipelineOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreatePipelineOutput) GoString() string { + return s.String() +} + +// Represents information about a current revision. +type CurrentRevision struct { + _ struct{} `type:"structure"` + + // The change identifier for the current revision. + ChangeIdentifier *string `locationName:"changeIdentifier" min:"1" type:"string" required:"true"` + + // The revision ID of the current version of an artifact. + Revision *string `locationName:"revision" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CurrentRevision) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CurrentRevision) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CurrentRevision) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CurrentRevision"} + if s.ChangeIdentifier == nil { + invalidParams.Add(request.NewErrParamRequired("ChangeIdentifier")) + } + if s.ChangeIdentifier != nil && len(*s.ChangeIdentifier) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ChangeIdentifier", 1)) + } + if s.Revision == nil { + invalidParams.Add(request.NewErrParamRequired("Revision")) + } + if s.Revision != nil && len(*s.Revision) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Revision", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the input of a delete custom action operation. The custom action +// will be marked as deleted. +type DeleteCustomActionTypeInput struct { + _ struct{} `type:"structure"` + + // The category of the custom action that you want to delete, such as source + // or deploy. + Category *string `locationName:"category" type:"string" required:"true" enum:"ActionCategory"` + + // The provider of the service used in the custom action, such as AWS CodeDeploy. + Provider *string `locationName:"provider" min:"1" type:"string" required:"true"` + + // The version of the custom action to delete. + Version *string `locationName:"version" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteCustomActionTypeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteCustomActionTypeInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteCustomActionTypeInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteCustomActionTypeInput"} + if s.Category == nil { + invalidParams.Add(request.NewErrParamRequired("Category")) + } + if s.Provider == nil { + invalidParams.Add(request.NewErrParamRequired("Provider")) + } + if s.Provider != nil && len(*s.Provider) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Provider", 1)) + } + if s.Version == nil { + invalidParams.Add(request.NewErrParamRequired("Version")) + } + if s.Version != nil && len(*s.Version) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Version", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteCustomActionTypeOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteCustomActionTypeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteCustomActionTypeOutput) GoString() string { + return s.String() +} + +// Represents the input of a delete pipeline action. +type DeletePipelineInput struct { + _ struct{} `type:"structure"` + + // The name of the pipeline to be deleted. + Name *string `locationName:"name" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeletePipelineInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeletePipelineInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeletePipelineInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeletePipelineInput"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeletePipelineOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeletePipelineOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeletePipelineOutput) GoString() string { + return s.String() +} + +// Represents the input of a disable stage transition input action. +type DisableStageTransitionInput struct { + _ struct{} `type:"structure"` + + // The name of the pipeline in which you want to disable the flow of artifacts + // from one stage to another. + PipelineName *string `locationName:"pipelineName" min:"1" type:"string" required:"true"` + + // The reason given to the user why a stage is disabled, such as waiting for + // manual approval or manual tests. This message is displayed in the pipeline + // console UI. + Reason *string `locationName:"reason" min:"1" type:"string" required:"true"` + + // The name of the stage where you want to disable the inbound or outbound transition + // of artifacts. + StageName *string `locationName:"stageName" min:"1" type:"string" required:"true"` + + // Specifies whether artifacts will be prevented from transitioning into the + // stage and being processed by the actions in that stage (inbound), or prevented + // from transitioning from the stage after they have been processed by the actions + // in that stage (outbound). + TransitionType *string `locationName:"transitionType" type:"string" required:"true" enum:"StageTransitionType"` +} + +// String returns the string representation +func (s DisableStageTransitionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisableStageTransitionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DisableStageTransitionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DisableStageTransitionInput"} + if s.PipelineName == nil { + invalidParams.Add(request.NewErrParamRequired("PipelineName")) + } + if s.PipelineName != nil && len(*s.PipelineName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("PipelineName", 1)) + } + if s.Reason == nil { + invalidParams.Add(request.NewErrParamRequired("Reason")) + } + if s.Reason != nil && len(*s.Reason) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Reason", 1)) + } + if s.StageName == nil { + invalidParams.Add(request.NewErrParamRequired("StageName")) + } + if s.StageName != nil && len(*s.StageName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("StageName", 1)) + } + if s.TransitionType == nil { + invalidParams.Add(request.NewErrParamRequired("TransitionType")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DisableStageTransitionOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DisableStageTransitionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisableStageTransitionOutput) GoString() string { + return s.String() +} + +// Represents the input of an enable stage transition action. +type EnableStageTransitionInput struct { + _ struct{} `type:"structure"` + + // The name of the pipeline in which you want to enable the flow of artifacts + // from one stage to another. + PipelineName *string `locationName:"pipelineName" min:"1" type:"string" required:"true"` + + // The name of the stage where you want to enable the transition of artifacts, + // either into the stage (inbound) or from that stage to the next stage (outbound). + StageName *string `locationName:"stageName" min:"1" type:"string" required:"true"` + + // Specifies whether artifacts will be allowed to enter the stage and be processed + // by the actions in that stage (inbound) or whether already-processed artifacts + // will be allowed to transition to the next stage (outbound). + TransitionType *string `locationName:"transitionType" type:"string" required:"true" enum:"StageTransitionType"` +} + +// String returns the string representation +func (s EnableStageTransitionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnableStageTransitionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *EnableStageTransitionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "EnableStageTransitionInput"} + if s.PipelineName == nil { + invalidParams.Add(request.NewErrParamRequired("PipelineName")) + } + if s.PipelineName != nil && len(*s.PipelineName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("PipelineName", 1)) + } + if s.StageName == nil { + invalidParams.Add(request.NewErrParamRequired("StageName")) + } + if s.StageName != nil && len(*s.StageName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("StageName", 1)) + } + if s.TransitionType == nil { + invalidParams.Add(request.NewErrParamRequired("TransitionType")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type EnableStageTransitionOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s EnableStageTransitionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnableStageTransitionOutput) GoString() string { + return s.String() +} + +// Represents information about the key used to encrypt data in the artifact +// store, such as an AWS Key Management Service (AWS KMS) key. +type EncryptionKey struct { + _ struct{} `type:"structure"` + + // The ID used to identify the key. For an AWS KMS key, this is the key ID or + // key ARN. + Id *string `locationName:"id" min:"1" type:"string" required:"true"` + + // The type of encryption key, such as an AWS Key Management Service (AWS KMS) + // key. When creating or updating a pipeline, the value must be set to 'KMS'. + Type *string `locationName:"type" type:"string" required:"true" enum:"EncryptionKeyType"` +} + +// String returns the string representation +func (s EncryptionKey) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EncryptionKey) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *EncryptionKey) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "EncryptionKey"} + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + if s.Id != nil && len(*s.Id) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Id", 1)) + } + if s.Type == nil { + invalidParams.Add(request.NewErrParamRequired("Type")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents information about an error in AWS CodePipeline. +type ErrorDetails struct { + _ struct{} `type:"structure"` + + // The system ID or error number code of the error. + Code *string `locationName:"code" type:"string"` + + // The text of the error message. + Message *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s ErrorDetails) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ErrorDetails) GoString() string { + return s.String() +} + +// The details of the actions taken and results produced on an artifact as it +// passes through stages in the pipeline. +type ExecutionDetails struct { + _ struct{} `type:"structure"` + + // The system-generated unique ID of this action used to identify this job worker + // in any external systems, such as AWS CodeDeploy. + ExternalExecutionId *string `locationName:"externalExecutionId" min:"1" type:"string"` + + // The percentage of work completed on the action, represented on a scale of + // zero to one hundred percent. + PercentComplete *int64 `locationName:"percentComplete" type:"integer"` + + // The summary of the current status of the actions. + Summary *string `locationName:"summary" type:"string"` +} + +// String returns the string representation +func (s ExecutionDetails) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ExecutionDetails) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ExecutionDetails) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ExecutionDetails"} + if s.ExternalExecutionId != nil && len(*s.ExternalExecutionId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ExternalExecutionId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents information about failure details. +type FailureDetails struct { + _ struct{} `type:"structure"` + + // The external ID of the run of the action that failed. + ExternalExecutionId *string `locationName:"externalExecutionId" min:"1" type:"string"` + + // The message about the failure. + Message *string `locationName:"message" type:"string" required:"true"` + + // The type of the failure. + Type *string `locationName:"type" type:"string" required:"true" enum:"FailureType"` +} + +// String returns the string representation +func (s FailureDetails) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FailureDetails) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *FailureDetails) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "FailureDetails"} + if s.ExternalExecutionId != nil && len(*s.ExternalExecutionId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ExternalExecutionId", 1)) + } + if s.Message == nil { + invalidParams.Add(request.NewErrParamRequired("Message")) + } + if s.Type == nil { + invalidParams.Add(request.NewErrParamRequired("Type")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the input of a get job details action. +type GetJobDetailsInput struct { + _ struct{} `type:"structure"` + + // The unique system-generated ID for the job. + JobId *string `locationName:"jobId" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetJobDetailsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetJobDetailsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetJobDetailsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetJobDetailsInput"} + if s.JobId == nil { + invalidParams.Add(request.NewErrParamRequired("JobId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the output of a get job details action. +type GetJobDetailsOutput struct { + _ struct{} `type:"structure"` + + // The details of the job. + // + // If AWSSessionCredentials is used, a long-running job can call GetJobDetails + // again to obtain new credentials. + JobDetails *JobDetails `locationName:"jobDetails" type:"structure"` +} + +// String returns the string representation +func (s GetJobDetailsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetJobDetailsOutput) GoString() string { + return s.String() +} + +// Represents the input of a get pipeline action. +type GetPipelineInput struct { + _ struct{} `type:"structure"` + + // The name of the pipeline for which you want to get information. Pipeline + // names must be unique under an Amazon Web Services (AWS) user account. + Name *string `locationName:"name" min:"1" type:"string" required:"true"` + + // The version number of the pipeline. If you do not specify a version, defaults + // to the most current version. + Version *int64 `locationName:"version" min:"1" type:"integer"` +} + +// String returns the string representation +func (s GetPipelineInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetPipelineInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetPipelineInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetPipelineInput"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + if s.Version != nil && *s.Version < 1 { + invalidParams.Add(request.NewErrParamMinValue("Version", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the output of a get pipeline action. +type GetPipelineOutput struct { + _ struct{} `type:"structure"` + + // Represents the structure of actions and stages to be performed in the pipeline. + Pipeline *PipelineDeclaration `locationName:"pipeline" type:"structure"` +} + +// String returns the string representation +func (s GetPipelineOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetPipelineOutput) GoString() string { + return s.String() +} + +// Represents the input of a get pipeline state action. +type GetPipelineStateInput struct { + _ struct{} `type:"structure"` + + // The name of the pipeline about which you want to get information. + Name *string `locationName:"name" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetPipelineStateInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetPipelineStateInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetPipelineStateInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetPipelineStateInput"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the output of a get pipeline state action. +type GetPipelineStateOutput struct { + _ struct{} `type:"structure"` + + // The date and time the pipeline was created, in timestamp format. + Created *time.Time `locationName:"created" type:"timestamp" timestampFormat:"unix"` + + // The name of the pipeline for which you want to get the state. + PipelineName *string `locationName:"pipelineName" min:"1" type:"string"` + + // The version number of the pipeline. + // + // A newly-created pipeline is always assigned a version number of 1. + PipelineVersion *int64 `locationName:"pipelineVersion" min:"1" type:"integer"` + + // A list of the pipeline stage output information, including stage name, state, + // most recent run details, whether the stage is disabled, and other data. + StageStates []*StageState `locationName:"stageStates" type:"list"` + + // The date and time the pipeline was last updated, in timestamp format. + Updated *time.Time `locationName:"updated" type:"timestamp" timestampFormat:"unix"` +} + +// String returns the string representation +func (s GetPipelineStateOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetPipelineStateOutput) GoString() string { + return s.String() +} + +// Represents the input of a get third party job details action. +type GetThirdPartyJobDetailsInput struct { + _ struct{} `type:"structure"` + + // The clientToken portion of the clientId and clientToken pair used to verify + // that the calling entity is allowed access to the job and its details. + ClientToken *string `locationName:"clientToken" type:"string" required:"true"` + + // The unique system-generated ID used for identifying the job. + JobId *string `locationName:"jobId" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetThirdPartyJobDetailsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetThirdPartyJobDetailsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetThirdPartyJobDetailsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetThirdPartyJobDetailsInput"} + if s.ClientToken == nil { + invalidParams.Add(request.NewErrParamRequired("ClientToken")) + } + if s.JobId == nil { + invalidParams.Add(request.NewErrParamRequired("JobId")) + } + if s.JobId != nil && len(*s.JobId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("JobId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the output of a get third party job details action. +type GetThirdPartyJobDetailsOutput struct { + _ struct{} `type:"structure"` + + // The details of the job, including any protected values defined for the job. + JobDetails *ThirdPartyJobDetails `locationName:"jobDetails" type:"structure"` +} + +// String returns the string representation +func (s GetThirdPartyJobDetailsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetThirdPartyJobDetailsOutput) GoString() string { + return s.String() +} + +// Represents information about an artifact to be worked on, such as a test +// or build artifact. +type InputArtifact struct { + _ struct{} `type:"structure"` + + // The name of the artifact to be worked on, for example, "My App". + // + // The input artifact of an action must exactly match the output artifact declared + // in a preceding action, but the input artifact does not have to be the next + // action in strict sequence from the action that provided the output artifact. + // Actions in parallel can declare different output artifacts, which are in + // turn consumed by different following actions. + Name *string `locationName:"name" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s InputArtifact) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InputArtifact) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *InputArtifact) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "InputArtifact"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents information about a job. +type Job struct { + _ struct{} `type:"structure"` + + // The ID of the AWS account to use when performing the job. + AccountId *string `locationName:"accountId" type:"string"` + + // Additional data about a job. + Data *JobData `locationName:"data" type:"structure"` + + // The unique system-generated ID of the job. + Id *string `locationName:"id" type:"string"` + + // A system-generated random number that AWS CodePipeline uses to ensure that + // the job is being worked on by only one job worker. This number must be returned + // in the response. + Nonce *string `locationName:"nonce" type:"string"` +} + +// String returns the string representation +func (s Job) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Job) GoString() string { + return s.String() +} + +// Represents additional information about a job required for a job worker to +// complete the job. +type JobData struct { + _ struct{} `type:"structure"` + + // Represents information about an action configuration. + ActionConfiguration *ActionConfiguration `locationName:"actionConfiguration" type:"structure"` + + // Represents information about an action type. + ActionTypeId *ActionTypeId `locationName:"actionTypeId" type:"structure"` + + // Represents an AWS session credentials object. These credentials are temporary + // credentials that are issued by AWS Secure Token Service (STS). They can be + // used to access input and output artifacts in the Amazon S3 bucket used to + // store artifact for the pipeline in AWS CodePipeline. + ArtifactCredentials *AWSSessionCredentials `locationName:"artifactCredentials" type:"structure"` + + // A system-generated token, such as a AWS CodeDeploy deployment ID, that a + // job requires in order to continue the job asynchronously. + ContinuationToken *string `locationName:"continuationToken" type:"string"` + + // Represents information about the key used to encrypt data in the artifact + // store, such as an AWS Key Management Service (AWS KMS) key. + EncryptionKey *EncryptionKey `locationName:"encryptionKey" type:"structure"` + + // The artifact supplied to the job. + InputArtifacts []*Artifact `locationName:"inputArtifacts" type:"list"` + + // The output of the job. + OutputArtifacts []*Artifact `locationName:"outputArtifacts" type:"list"` + + // Represents information about a pipeline to a job worker. + PipelineContext *PipelineContext `locationName:"pipelineContext" type:"structure"` +} + +// String returns the string representation +func (s JobData) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s JobData) GoString() string { + return s.String() +} + +// Represents information about the details of a job. +type JobDetails struct { + _ struct{} `type:"structure"` + + // The AWS account ID associated with the job. + AccountId *string `locationName:"accountId" type:"string"` + + // Represents additional information about a job required for a job worker to + // complete the job. + Data *JobData `locationName:"data" type:"structure"` + + // The unique system-generated ID of the job. + Id *string `locationName:"id" type:"string"` +} + +// String returns the string representation +func (s JobDetails) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s JobDetails) GoString() string { + return s.String() +} + +// Represents the input of a list action types action. +type ListActionTypesInput struct { + _ struct{} `type:"structure"` + + // Filters the list of action types to those created by a specified entity. + ActionOwnerFilter *string `locationName:"actionOwnerFilter" type:"string" enum:"ActionOwner"` + + // An identifier that was returned from the previous list action types call, + // which can be used to return the next set of action types in the list. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s ListActionTypesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListActionTypesInput) GoString() string { + return s.String() +} + +// Represents the output of a list action types action. +type ListActionTypesOutput struct { + _ struct{} `type:"structure"` + + // Provides details of the action types. + ActionTypes []*ActionType `locationName:"actionTypes" type:"list" required:"true"` + + // If the amount of returned information is significantly large, an identifier + // is also returned which can be used in a subsequent list action types call + // to return the next set of action types in the list. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s ListActionTypesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListActionTypesOutput) GoString() string { + return s.String() +} + +// Represents the input of a list pipelines action. +type ListPipelinesInput struct { + _ struct{} `type:"structure"` + + // An identifier that was returned from the previous list pipelines call, which + // can be used to return the next set of pipelines in the list. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s ListPipelinesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListPipelinesInput) GoString() string { + return s.String() +} + +// Represents the output of a list pipelines action. +type ListPipelinesOutput struct { + _ struct{} `type:"structure"` + + // If the amount of returned information is significantly large, an identifier + // is also returned which can be used in a subsequent list pipelines call to + // return the next set of pipelines in the list. + NextToken *string `locationName:"nextToken" type:"string"` + + // The list of pipelines. + Pipelines []*PipelineSummary `locationName:"pipelines" type:"list"` +} + +// String returns the string representation +func (s ListPipelinesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListPipelinesOutput) GoString() string { + return s.String() +} + +// Represents information about the output of an action. +type OutputArtifact struct { + _ struct{} `type:"structure"` + + // The name of the output of an artifact, such as "My App". + // + // The input artifact of an action must exactly match the output artifact declared + // in a preceding action, but the input artifact does not have to be the next + // action in strict sequence from the action that provided the output artifact. + // Actions in parallel can declare different output artifacts, which are in + // turn consumed by different following actions. + // + // Output artifact names must be unique within a pipeline. + Name *string `locationName:"name" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s OutputArtifact) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s OutputArtifact) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *OutputArtifact) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "OutputArtifact"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents information about a pipeline to a job worker. +type PipelineContext struct { + _ struct{} `type:"structure"` + + // Represents the context of an action within the stage of a pipeline to a job + // worker. + Action *ActionContext `locationName:"action" type:"structure"` + + // The name of the pipeline. This is a user-specified value. Pipeline names + // must be unique across all pipeline names under an Amazon Web Services account. + PipelineName *string `locationName:"pipelineName" min:"1" type:"string"` + + // The stage of the pipeline. + Stage *StageContext `locationName:"stage" type:"structure"` +} + +// String returns the string representation +func (s PipelineContext) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PipelineContext) GoString() string { + return s.String() +} + +// Represents the structure of actions and stages to be performed in the pipeline. +type PipelineDeclaration struct { + _ struct{} `type:"structure"` + + // The Amazon S3 location where artifacts are stored for the pipeline. If this + // Amazon S3 bucket is created manually, it must meet the requirements for AWS + // CodePipeline. For more information, see the Concepts (http://docs.aws.amazon.com/codepipeline/latest/userguide/concepts.html#CPS3Bucket). + ArtifactStore *ArtifactStore `locationName:"artifactStore" type:"structure" required:"true"` + + // The name of the action to be performed. + Name *string `locationName:"name" min:"1" type:"string" required:"true"` + + // The Amazon Resource Name (ARN) for AWS CodePipeline to use to either perform + // actions with no actionRoleArn, or to use to assume roles for actions with + // an actionRoleArn. + RoleArn *string `locationName:"roleArn" type:"string" required:"true"` + + // The stage in which to perform the action. + Stages []*StageDeclaration `locationName:"stages" type:"list" required:"true"` + + // The version number of the pipeline. A new pipeline always has a version number + // of 1. This number is automatically incremented when a pipeline is updated. + Version *int64 `locationName:"version" min:"1" type:"integer"` +} + +// String returns the string representation +func (s PipelineDeclaration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PipelineDeclaration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PipelineDeclaration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PipelineDeclaration"} + if s.ArtifactStore == nil { + invalidParams.Add(request.NewErrParamRequired("ArtifactStore")) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + if s.RoleArn == nil { + invalidParams.Add(request.NewErrParamRequired("RoleArn")) + } + if s.Stages == nil { + invalidParams.Add(request.NewErrParamRequired("Stages")) + } + if s.Version != nil && *s.Version < 1 { + invalidParams.Add(request.NewErrParamMinValue("Version", 1)) + } + if s.ArtifactStore != nil { + if err := s.ArtifactStore.Validate(); err != nil { + invalidParams.AddNested("ArtifactStore", err.(request.ErrInvalidParams)) + } + } + if s.Stages != nil { + for i, v := range s.Stages { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Stages", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Returns a summary of a pipeline. +type PipelineSummary struct { + _ struct{} `type:"structure"` + + // The date and time the pipeline was created, in timestamp format. + Created *time.Time `locationName:"created" type:"timestamp" timestampFormat:"unix"` + + // The name of the pipeline. + Name *string `locationName:"name" min:"1" type:"string"` + + // The date and time of the last update to the pipeline, in timestamp format. + Updated *time.Time `locationName:"updated" type:"timestamp" timestampFormat:"unix"` + + // The version number of the pipeline. + Version *int64 `locationName:"version" min:"1" type:"integer"` +} + +// String returns the string representation +func (s PipelineSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PipelineSummary) GoString() string { + return s.String() +} + +// Represents the input of a poll for jobs action. +type PollForJobsInput struct { + _ struct{} `type:"structure"` + + // Represents information about an action type. + ActionTypeId *ActionTypeId `locationName:"actionTypeId" type:"structure" required:"true"` + + // The maximum number of jobs to return in a poll for jobs call. + MaxBatchSize *int64 `locationName:"maxBatchSize" min:"1" type:"integer"` + + // A map of property names and values. For an action type with no queryable + // properties, this value must be null or an empty map. For an action type with + // a queryable property, you must supply that property as a key in the map. + // Only jobs whose action configuration matches the mapped value will be returned. + QueryParam map[string]*string `locationName:"queryParam" type:"map"` +} + +// String returns the string representation +func (s PollForJobsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PollForJobsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PollForJobsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PollForJobsInput"} + if s.ActionTypeId == nil { + invalidParams.Add(request.NewErrParamRequired("ActionTypeId")) + } + if s.MaxBatchSize != nil && *s.MaxBatchSize < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxBatchSize", 1)) + } + if s.ActionTypeId != nil { + if err := s.ActionTypeId.Validate(); err != nil { + invalidParams.AddNested("ActionTypeId", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the output of a poll for jobs action. +type PollForJobsOutput struct { + _ struct{} `type:"structure"` + + // Information about the jobs to take action on. + Jobs []*Job `locationName:"jobs" type:"list"` +} + +// String returns the string representation +func (s PollForJobsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PollForJobsOutput) GoString() string { + return s.String() +} + +// Represents the input of a poll for third party jobs action. +type PollForThirdPartyJobsInput struct { + _ struct{} `type:"structure"` + + // Represents information about an action type. + ActionTypeId *ActionTypeId `locationName:"actionTypeId" type:"structure" required:"true"` + + // The maximum number of jobs to return in a poll for jobs call. + MaxBatchSize *int64 `locationName:"maxBatchSize" min:"1" type:"integer"` +} + +// String returns the string representation +func (s PollForThirdPartyJobsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PollForThirdPartyJobsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PollForThirdPartyJobsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PollForThirdPartyJobsInput"} + if s.ActionTypeId == nil { + invalidParams.Add(request.NewErrParamRequired("ActionTypeId")) + } + if s.MaxBatchSize != nil && *s.MaxBatchSize < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxBatchSize", 1)) + } + if s.ActionTypeId != nil { + if err := s.ActionTypeId.Validate(); err != nil { + invalidParams.AddNested("ActionTypeId", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the output of a poll for third party jobs action. +type PollForThirdPartyJobsOutput struct { + _ struct{} `type:"structure"` + + // Information about the jobs to take action on. + Jobs []*ThirdPartyJob `locationName:"jobs" type:"list"` +} + +// String returns the string representation +func (s PollForThirdPartyJobsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PollForThirdPartyJobsOutput) GoString() string { + return s.String() +} + +// Represents the input of a put action revision action. +type PutActionRevisionInput struct { + _ struct{} `type:"structure"` + + // The name of the action that will process the revision. + ActionName *string `locationName:"actionName" min:"1" type:"string" required:"true"` + + // Represents information about the version (or revision) of an action. + ActionRevision *ActionRevision `locationName:"actionRevision" type:"structure" required:"true"` + + // The name of the pipeline that will start processing the revision to the source. + PipelineName *string `locationName:"pipelineName" min:"1" type:"string" required:"true"` + + // The name of the stage that contains the action that will act upon the revision. + StageName *string `locationName:"stageName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s PutActionRevisionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutActionRevisionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutActionRevisionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutActionRevisionInput"} + if s.ActionName == nil { + invalidParams.Add(request.NewErrParamRequired("ActionName")) + } + if s.ActionName != nil && len(*s.ActionName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ActionName", 1)) + } + if s.ActionRevision == nil { + invalidParams.Add(request.NewErrParamRequired("ActionRevision")) + } + if s.PipelineName == nil { + invalidParams.Add(request.NewErrParamRequired("PipelineName")) + } + if s.PipelineName != nil && len(*s.PipelineName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("PipelineName", 1)) + } + if s.StageName == nil { + invalidParams.Add(request.NewErrParamRequired("StageName")) + } + if s.StageName != nil && len(*s.StageName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("StageName", 1)) + } + if s.ActionRevision != nil { + if err := s.ActionRevision.Validate(); err != nil { + invalidParams.AddNested("ActionRevision", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the output of a put action revision action. +type PutActionRevisionOutput struct { + _ struct{} `type:"structure"` + + // The new revision number or ID for the revision after the action completes. + NewRevision *bool `locationName:"newRevision" type:"boolean"` + + // The ID of the current workflow state of the pipeline. + PipelineExecutionId *string `locationName:"pipelineExecutionId" type:"string"` +} + +// String returns the string representation +func (s PutActionRevisionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutActionRevisionOutput) GoString() string { + return s.String() +} + +// Represents the input of a put approval result action. +type PutApprovalResultInput struct { + _ struct{} `type:"structure"` + + // The name of the action for which approval is requested. + ActionName *string `locationName:"actionName" min:"1" type:"string" required:"true"` + + // The name of the pipeline that contains the action. + PipelineName *string `locationName:"pipelineName" min:"1" type:"string" required:"true"` + + // Represents information about the result of the approval request. + Result *ApprovalResult `locationName:"result" type:"structure" required:"true"` + + // The name of the stage that contains the action. + StageName *string `locationName:"stageName" min:"1" type:"string" required:"true"` + + // The system-generated token used to identify a unique approval request. The + // token for each open approval request can be obtained using the GetPipelineState + // action and is used to validate that the approval request corresponding to + // this token is still valid. + Token *string `locationName:"token" type:"string"` +} + +// String returns the string representation +func (s PutApprovalResultInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutApprovalResultInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutApprovalResultInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutApprovalResultInput"} + if s.ActionName == nil { + invalidParams.Add(request.NewErrParamRequired("ActionName")) + } + if s.ActionName != nil && len(*s.ActionName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ActionName", 1)) + } + if s.PipelineName == nil { + invalidParams.Add(request.NewErrParamRequired("PipelineName")) + } + if s.PipelineName != nil && len(*s.PipelineName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("PipelineName", 1)) + } + if s.Result == nil { + invalidParams.Add(request.NewErrParamRequired("Result")) + } + if s.StageName == nil { + invalidParams.Add(request.NewErrParamRequired("StageName")) + } + if s.StageName != nil && len(*s.StageName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("StageName", 1)) + } + if s.Result != nil { + if err := s.Result.Validate(); err != nil { + invalidParams.AddNested("Result", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the output of a put approval result action. +type PutApprovalResultOutput struct { + _ struct{} `type:"structure"` + + // The timestamp showing when the approval or rejection was submitted. + ApprovedAt *time.Time `locationName:"approvedAt" type:"timestamp" timestampFormat:"unix"` +} + +// String returns the string representation +func (s PutApprovalResultOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutApprovalResultOutput) GoString() string { + return s.String() +} + +// Represents the input of a put job failure result action. +type PutJobFailureResultInput struct { + _ struct{} `type:"structure"` + + // The details about the failure of a job. + FailureDetails *FailureDetails `locationName:"failureDetails" type:"structure" required:"true"` + + // The unique system-generated ID of the job that failed. This is the same ID + // returned from PollForJobs. + JobId *string `locationName:"jobId" type:"string" required:"true"` +} + +// String returns the string representation +func (s PutJobFailureResultInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutJobFailureResultInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutJobFailureResultInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutJobFailureResultInput"} + if s.FailureDetails == nil { + invalidParams.Add(request.NewErrParamRequired("FailureDetails")) + } + if s.JobId == nil { + invalidParams.Add(request.NewErrParamRequired("JobId")) + } + if s.FailureDetails != nil { + if err := s.FailureDetails.Validate(); err != nil { + invalidParams.AddNested("FailureDetails", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type PutJobFailureResultOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutJobFailureResultOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutJobFailureResultOutput) GoString() string { + return s.String() +} + +// Represents the input of a put job success result action. +type PutJobSuccessResultInput struct { + _ struct{} `type:"structure"` + + // A token generated by a job worker, such as an AWS CodeDeploy deployment ID, + // that a successful job provides to identify a custom action in progress. Future + // jobs will use this token in order to identify the running instance of the + // action. It can be reused to return additional information about the progress + // of the custom action. When the action is complete, no continuation token + // should be supplied. + ContinuationToken *string `locationName:"continuationToken" type:"string"` + + // The ID of the current revision of the artifact successfully worked upon by + // the job. + CurrentRevision *CurrentRevision `locationName:"currentRevision" type:"structure"` + + // The execution details of the successful job, such as the actions taken by + // the job worker. + ExecutionDetails *ExecutionDetails `locationName:"executionDetails" type:"structure"` + + // The unique system-generated ID of the job that succeeded. This is the same + // ID returned from PollForJobs. + JobId *string `locationName:"jobId" type:"string" required:"true"` +} + +// String returns the string representation +func (s PutJobSuccessResultInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutJobSuccessResultInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutJobSuccessResultInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutJobSuccessResultInput"} + if s.JobId == nil { + invalidParams.Add(request.NewErrParamRequired("JobId")) + } + if s.CurrentRevision != nil { + if err := s.CurrentRevision.Validate(); err != nil { + invalidParams.AddNested("CurrentRevision", err.(request.ErrInvalidParams)) + } + } + if s.ExecutionDetails != nil { + if err := s.ExecutionDetails.Validate(); err != nil { + invalidParams.AddNested("ExecutionDetails", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type PutJobSuccessResultOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutJobSuccessResultOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutJobSuccessResultOutput) GoString() string { + return s.String() +} + +// Represents the input of a third party job failure result action. +type PutThirdPartyJobFailureResultInput struct { + _ struct{} `type:"structure"` + + // The clientToken portion of the clientId and clientToken pair used to verify + // that the calling entity is allowed access to the job and its details. + ClientToken *string `locationName:"clientToken" type:"string" required:"true"` + + // Represents information about failure details. + FailureDetails *FailureDetails `locationName:"failureDetails" type:"structure" required:"true"` + + // The ID of the job that failed. This is the same ID returned from PollForThirdPartyJobs. + JobId *string `locationName:"jobId" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s PutThirdPartyJobFailureResultInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutThirdPartyJobFailureResultInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutThirdPartyJobFailureResultInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutThirdPartyJobFailureResultInput"} + if s.ClientToken == nil { + invalidParams.Add(request.NewErrParamRequired("ClientToken")) + } + if s.FailureDetails == nil { + invalidParams.Add(request.NewErrParamRequired("FailureDetails")) + } + if s.JobId == nil { + invalidParams.Add(request.NewErrParamRequired("JobId")) + } + if s.JobId != nil && len(*s.JobId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("JobId", 1)) + } + if s.FailureDetails != nil { + if err := s.FailureDetails.Validate(); err != nil { + invalidParams.AddNested("FailureDetails", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type PutThirdPartyJobFailureResultOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutThirdPartyJobFailureResultOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutThirdPartyJobFailureResultOutput) GoString() string { + return s.String() +} + +// Represents the input of a put third party job success result action. +type PutThirdPartyJobSuccessResultInput struct { + _ struct{} `type:"structure"` + + // The clientToken portion of the clientId and clientToken pair used to verify + // that the calling entity is allowed access to the job and its details. + ClientToken *string `locationName:"clientToken" type:"string" required:"true"` + + // A token generated by a job worker, such as an AWS CodeDeploy deployment ID, + // that a successful job provides to identify a partner action in progress. + // Future jobs will use this token in order to identify the running instance + // of the action. It can be reused to return additional information about the + // progress of the partner action. When the action is complete, no continuation + // token should be supplied. + ContinuationToken *string `locationName:"continuationToken" type:"string"` + + // Represents information about a current revision. + CurrentRevision *CurrentRevision `locationName:"currentRevision" type:"structure"` + + // The details of the actions taken and results produced on an artifact as it + // passes through stages in the pipeline. + ExecutionDetails *ExecutionDetails `locationName:"executionDetails" type:"structure"` + + // The ID of the job that successfully completed. This is the same ID returned + // from PollForThirdPartyJobs. + JobId *string `locationName:"jobId" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s PutThirdPartyJobSuccessResultInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutThirdPartyJobSuccessResultInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutThirdPartyJobSuccessResultInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutThirdPartyJobSuccessResultInput"} + if s.ClientToken == nil { + invalidParams.Add(request.NewErrParamRequired("ClientToken")) + } + if s.JobId == nil { + invalidParams.Add(request.NewErrParamRequired("JobId")) + } + if s.JobId != nil && len(*s.JobId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("JobId", 1)) + } + if s.CurrentRevision != nil { + if err := s.CurrentRevision.Validate(); err != nil { + invalidParams.AddNested("CurrentRevision", err.(request.ErrInvalidParams)) + } + } + if s.ExecutionDetails != nil { + if err := s.ExecutionDetails.Validate(); err != nil { + invalidParams.AddNested("ExecutionDetails", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type PutThirdPartyJobSuccessResultOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutThirdPartyJobSuccessResultOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutThirdPartyJobSuccessResultOutput) GoString() string { + return s.String() +} + +// Represents the input of a retry stage execution action. +type RetryStageExecutionInput struct { + _ struct{} `type:"structure"` + + // The ID of the pipeline execution in the failed stage to be retried. Use the + // GetPipelineState action to retrieve the current pipelineExecutionId of the + // failed stage + PipelineExecutionId *string `locationName:"pipelineExecutionId" type:"string" required:"true"` + + // The name of the pipeline that contains the failed stage. + PipelineName *string `locationName:"pipelineName" min:"1" type:"string" required:"true"` + + // The scope of the retry attempt. Currently, the only supported value is FAILED_ACTIONS. + RetryMode *string `locationName:"retryMode" type:"string" required:"true" enum:"StageRetryMode"` + + // The name of the failed stage to be retried. + StageName *string `locationName:"stageName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s RetryStageExecutionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RetryStageExecutionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RetryStageExecutionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RetryStageExecutionInput"} + if s.PipelineExecutionId == nil { + invalidParams.Add(request.NewErrParamRequired("PipelineExecutionId")) + } + if s.PipelineName == nil { + invalidParams.Add(request.NewErrParamRequired("PipelineName")) + } + if s.PipelineName != nil && len(*s.PipelineName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("PipelineName", 1)) + } + if s.RetryMode == nil { + invalidParams.Add(request.NewErrParamRequired("RetryMode")) + } + if s.StageName == nil { + invalidParams.Add(request.NewErrParamRequired("StageName")) + } + if s.StageName != nil && len(*s.StageName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("StageName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the output of a retry stage execution action. +type RetryStageExecutionOutput struct { + _ struct{} `type:"structure"` + + // The ID of the current workflow execution in the failed stage. + PipelineExecutionId *string `locationName:"pipelineExecutionId" type:"string"` +} + +// String returns the string representation +func (s RetryStageExecutionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RetryStageExecutionOutput) GoString() string { + return s.String() +} + +// The location of the Amazon S3 bucket that contains a revision. +type S3ArtifactLocation struct { + _ struct{} `type:"structure"` + + // The name of the Amazon S3 bucket. + BucketName *string `locationName:"bucketName" type:"string" required:"true"` + + // The key of the object in the Amazon S3 bucket, which uniquely identifies + // the object in the bucket. + ObjectKey *string `locationName:"objectKey" type:"string" required:"true"` +} + +// String returns the string representation +func (s S3ArtifactLocation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s S3ArtifactLocation) GoString() string { + return s.String() +} + +// Represents information about a stage to a job worker. +type StageContext struct { + _ struct{} `type:"structure"` + + // The name of the stage. + Name *string `locationName:"name" min:"1" type:"string"` +} + +// String returns the string representation +func (s StageContext) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StageContext) GoString() string { + return s.String() +} + +// Represents information about a stage and its definition. +type StageDeclaration struct { + _ struct{} `type:"structure"` + + // The actions included in a stage. + Actions []*ActionDeclaration `locationName:"actions" type:"list" required:"true"` + + // Reserved for future use. + Blockers []*BlockerDeclaration `locationName:"blockers" type:"list"` + + // The name of the stage. + Name *string `locationName:"name" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s StageDeclaration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StageDeclaration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *StageDeclaration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StageDeclaration"} + if s.Actions == nil { + invalidParams.Add(request.NewErrParamRequired("Actions")) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + if s.Actions != nil { + for i, v := range s.Actions { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Actions", i), err.(request.ErrInvalidParams)) + } + } + } + if s.Blockers != nil { + for i, v := range s.Blockers { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Blockers", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents information about the run of a stage. +type StageExecution struct { + _ struct{} `type:"structure"` + + // The ID of the pipeline execution associated with the stage. + PipelineExecutionId *string `locationName:"pipelineExecutionId" type:"string" required:"true"` + + // The status of the stage, or for a completed stage, the last status of the + // stage. + Status *string `locationName:"status" type:"string" required:"true" enum:"StageExecutionStatus"` +} + +// String returns the string representation +func (s StageExecution) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StageExecution) GoString() string { + return s.String() +} + +// Represents information about the state of the stage. +type StageState struct { + _ struct{} `type:"structure"` + + // The state of the stage. + ActionStates []*ActionState `locationName:"actionStates" type:"list"` + + // The state of the inbound transition, which is either enabled or disabled. + InboundTransitionState *TransitionState `locationName:"inboundTransitionState" type:"structure"` + + // Information about the latest execution in the stage, including its ID and + // status. + LatestExecution *StageExecution `locationName:"latestExecution" type:"structure"` + + // The name of the stage. + StageName *string `locationName:"stageName" min:"1" type:"string"` +} + +// String returns the string representation +func (s StageState) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StageState) GoString() string { + return s.String() +} + +// Represents the input of a start pipeline execution action. +type StartPipelineExecutionInput struct { + _ struct{} `type:"structure"` + + // The name of the pipeline to start. + Name *string `locationName:"name" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s StartPipelineExecutionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StartPipelineExecutionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *StartPipelineExecutionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StartPipelineExecutionInput"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the output of a start pipeline execution action. +type StartPipelineExecutionOutput struct { + _ struct{} `type:"structure"` + + // The unique system-generated ID of the pipeline that was started. + PipelineExecutionId *string `locationName:"pipelineExecutionId" type:"string"` +} + +// String returns the string representation +func (s StartPipelineExecutionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StartPipelineExecutionOutput) GoString() string { + return s.String() +} + +// A response to a PollForThirdPartyJobs request returned by AWS CodePipeline +// when there is a job to be worked upon by a partner action. +type ThirdPartyJob struct { + _ struct{} `type:"structure"` + + // The clientToken portion of the clientId and clientToken pair used to verify + // that the calling entity is allowed access to the job and its details. + ClientId *string `locationName:"clientId" type:"string"` + + // The identifier used to identify the job in AWS CodePipeline. + JobId *string `locationName:"jobId" type:"string"` +} + +// String returns the string representation +func (s ThirdPartyJob) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ThirdPartyJob) GoString() string { + return s.String() +} + +// Represents information about the job data for a partner action. +type ThirdPartyJobData struct { + _ struct{} `type:"structure"` + + // Represents information about an action configuration. + ActionConfiguration *ActionConfiguration `locationName:"actionConfiguration" type:"structure"` + + // Represents information about an action type. + ActionTypeId *ActionTypeId `locationName:"actionTypeId" type:"structure"` + + // Represents an AWS session credentials object. These credentials are temporary + // credentials that are issued by AWS Secure Token Service (STS). They can be + // used to access input and output artifacts in the Amazon S3 bucket used to + // store artifact for the pipeline in AWS CodePipeline. + ArtifactCredentials *AWSSessionCredentials `locationName:"artifactCredentials" type:"structure"` + + // A system-generated token, such as a AWS CodeDeploy deployment ID, that a + // job requires in order to continue the job asynchronously. + ContinuationToken *string `locationName:"continuationToken" type:"string"` + + // The encryption key used to encrypt and decrypt data in the artifact store + // for the pipeline, such as an AWS Key Management Service (AWS KMS) key. This + // is optional and might not be present. + EncryptionKey *EncryptionKey `locationName:"encryptionKey" type:"structure"` + + // The name of the artifact that will be worked upon by the action, if any. + // This name might be system-generated, such as "MyApp", or might be defined + // by the user when the action is created. The input artifact name must match + // the name of an output artifact generated by an action in an earlier action + // or stage of the pipeline. + InputArtifacts []*Artifact `locationName:"inputArtifacts" type:"list"` + + // The name of the artifact that will be the result of the action, if any. This + // name might be system-generated, such as "MyBuiltApp", or might be defined + // by the user when the action is created. + OutputArtifacts []*Artifact `locationName:"outputArtifacts" type:"list"` + + // Represents information about a pipeline to a job worker. + PipelineContext *PipelineContext `locationName:"pipelineContext" type:"structure"` +} + +// String returns the string representation +func (s ThirdPartyJobData) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ThirdPartyJobData) GoString() string { + return s.String() +} + +// The details of a job sent in response to a GetThirdPartyJobDetails request. +type ThirdPartyJobDetails struct { + _ struct{} `type:"structure"` + + // The data to be returned by the third party job worker. + Data *ThirdPartyJobData `locationName:"data" type:"structure"` + + // The identifier used to identify the job details in AWS CodePipeline. + Id *string `locationName:"id" min:"1" type:"string"` + + // A system-generated random number that AWS CodePipeline uses to ensure that + // the job is being worked on by only one job worker. This number must be returned + // in the response. + Nonce *string `locationName:"nonce" type:"string"` +} + +// String returns the string representation +func (s ThirdPartyJobDetails) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ThirdPartyJobDetails) GoString() string { + return s.String() +} + +// Represents information about the state of transitions between one stage and +// another stage. +type TransitionState struct { + _ struct{} `type:"structure"` + + // The user-specified reason why the transition between two stages of a pipeline + // was disabled. + DisabledReason *string `locationName:"disabledReason" min:"1" type:"string"` + + // Whether the transition between stages is enabled (true) or disabled (false). + Enabled *bool `locationName:"enabled" type:"boolean"` + + // The timestamp when the transition state was last changed. + LastChangedAt *time.Time `locationName:"lastChangedAt" type:"timestamp" timestampFormat:"unix"` + + // The ID of the user who last changed the transition state. + LastChangedBy *string `locationName:"lastChangedBy" type:"string"` +} + +// String returns the string representation +func (s TransitionState) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TransitionState) GoString() string { + return s.String() +} + +// Represents the input of an update pipeline action. +type UpdatePipelineInput struct { + _ struct{} `type:"structure"` + + // The name of the pipeline to be updated. + Pipeline *PipelineDeclaration `locationName:"pipeline" type:"structure" required:"true"` +} + +// String returns the string representation +func (s UpdatePipelineInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdatePipelineInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdatePipelineInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdatePipelineInput"} + if s.Pipeline == nil { + invalidParams.Add(request.NewErrParamRequired("Pipeline")) + } + if s.Pipeline != nil { + if err := s.Pipeline.Validate(); err != nil { + invalidParams.AddNested("Pipeline", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the output of an update pipeline action. +type UpdatePipelineOutput struct { + _ struct{} `type:"structure"` + + // The structure of the updated pipeline. + Pipeline *PipelineDeclaration `locationName:"pipeline" type:"structure"` +} + +// String returns the string representation +func (s UpdatePipelineOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdatePipelineOutput) GoString() string { + return s.String() +} + +const ( + // @enum ActionCategory + ActionCategorySource = "Source" + // @enum ActionCategory + ActionCategoryBuild = "Build" + // @enum ActionCategory + ActionCategoryDeploy = "Deploy" + // @enum ActionCategory + ActionCategoryTest = "Test" + // @enum ActionCategory + ActionCategoryInvoke = "Invoke" + // @enum ActionCategory + ActionCategoryApproval = "Approval" +) + +const ( + // @enum ActionConfigurationPropertyType + ActionConfigurationPropertyTypeString = "String" + // @enum ActionConfigurationPropertyType + ActionConfigurationPropertyTypeNumber = "Number" + // @enum ActionConfigurationPropertyType + ActionConfigurationPropertyTypeBoolean = "Boolean" +) + +const ( + // @enum ActionExecutionStatus + ActionExecutionStatusInProgress = "InProgress" + // @enum ActionExecutionStatus + ActionExecutionStatusSucceeded = "Succeeded" + // @enum ActionExecutionStatus + ActionExecutionStatusFailed = "Failed" +) + +const ( + // @enum ActionOwner + ActionOwnerAws = "AWS" + // @enum ActionOwner + ActionOwnerThirdParty = "ThirdParty" + // @enum ActionOwner + ActionOwnerCustom = "Custom" +) + +const ( + // @enum ApprovalStatus + ApprovalStatusApproved = "Approved" + // @enum ApprovalStatus + ApprovalStatusRejected = "Rejected" +) + +const ( + // @enum ArtifactLocationType + ArtifactLocationTypeS3 = "S3" +) + +const ( + // @enum ArtifactStoreType + ArtifactStoreTypeS3 = "S3" +) + +const ( + // @enum BlockerType + BlockerTypeSchedule = "Schedule" +) + +const ( + // @enum EncryptionKeyType + EncryptionKeyTypeKms = "KMS" +) + +const ( + // @enum FailureType + FailureTypeJobFailed = "JobFailed" + // @enum FailureType + FailureTypeConfigurationError = "ConfigurationError" + // @enum FailureType + FailureTypePermissionError = "PermissionError" + // @enum FailureType + FailureTypeRevisionOutOfSync = "RevisionOutOfSync" + // @enum FailureType + FailureTypeRevisionUnavailable = "RevisionUnavailable" + // @enum FailureType + FailureTypeSystemUnavailable = "SystemUnavailable" +) + +const ( + // @enum JobStatus + JobStatusCreated = "Created" + // @enum JobStatus + JobStatusQueued = "Queued" + // @enum JobStatus + JobStatusDispatched = "Dispatched" + // @enum JobStatus + JobStatusInProgress = "InProgress" + // @enum JobStatus + JobStatusTimedOut = "TimedOut" + // @enum JobStatus + JobStatusSucceeded = "Succeeded" + // @enum JobStatus + JobStatusFailed = "Failed" +) + +const ( + // @enum StageExecutionStatus + StageExecutionStatusInProgress = "InProgress" + // @enum StageExecutionStatus + StageExecutionStatusFailed = "Failed" + // @enum StageExecutionStatus + StageExecutionStatusSucceeded = "Succeeded" +) + +const ( + // @enum StageRetryMode + StageRetryModeFailedActions = "FAILED_ACTIONS" +) + +const ( + // @enum StageTransitionType + StageTransitionTypeInbound = "Inbound" + // @enum StageTransitionType + StageTransitionTypeOutbound = "Outbound" +) diff --git a/vendor/github.com/aws/aws-sdk-go/service/codepipeline/codepipelineiface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/codepipeline/codepipelineiface/interface.go new file mode 100644 index 000000000..612fd9682 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/codepipeline/codepipelineiface/interface.go @@ -0,0 +1,114 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package codepipelineiface provides an interface for the AWS CodePipeline. +package codepipelineiface + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/codepipeline" +) + +// CodePipelineAPI is the interface type for codepipeline.CodePipeline. +type CodePipelineAPI interface { + AcknowledgeJobRequest(*codepipeline.AcknowledgeJobInput) (*request.Request, *codepipeline.AcknowledgeJobOutput) + + AcknowledgeJob(*codepipeline.AcknowledgeJobInput) (*codepipeline.AcknowledgeJobOutput, error) + + AcknowledgeThirdPartyJobRequest(*codepipeline.AcknowledgeThirdPartyJobInput) (*request.Request, *codepipeline.AcknowledgeThirdPartyJobOutput) + + AcknowledgeThirdPartyJob(*codepipeline.AcknowledgeThirdPartyJobInput) (*codepipeline.AcknowledgeThirdPartyJobOutput, error) + + CreateCustomActionTypeRequest(*codepipeline.CreateCustomActionTypeInput) (*request.Request, *codepipeline.CreateCustomActionTypeOutput) + + CreateCustomActionType(*codepipeline.CreateCustomActionTypeInput) (*codepipeline.CreateCustomActionTypeOutput, error) + + CreatePipelineRequest(*codepipeline.CreatePipelineInput) (*request.Request, *codepipeline.CreatePipelineOutput) + + CreatePipeline(*codepipeline.CreatePipelineInput) (*codepipeline.CreatePipelineOutput, error) + + DeleteCustomActionTypeRequest(*codepipeline.DeleteCustomActionTypeInput) (*request.Request, *codepipeline.DeleteCustomActionTypeOutput) + + DeleteCustomActionType(*codepipeline.DeleteCustomActionTypeInput) (*codepipeline.DeleteCustomActionTypeOutput, error) + + DeletePipelineRequest(*codepipeline.DeletePipelineInput) (*request.Request, *codepipeline.DeletePipelineOutput) + + DeletePipeline(*codepipeline.DeletePipelineInput) (*codepipeline.DeletePipelineOutput, error) + + DisableStageTransitionRequest(*codepipeline.DisableStageTransitionInput) (*request.Request, *codepipeline.DisableStageTransitionOutput) + + DisableStageTransition(*codepipeline.DisableStageTransitionInput) (*codepipeline.DisableStageTransitionOutput, error) + + EnableStageTransitionRequest(*codepipeline.EnableStageTransitionInput) (*request.Request, *codepipeline.EnableStageTransitionOutput) + + EnableStageTransition(*codepipeline.EnableStageTransitionInput) (*codepipeline.EnableStageTransitionOutput, error) + + GetJobDetailsRequest(*codepipeline.GetJobDetailsInput) (*request.Request, *codepipeline.GetJobDetailsOutput) + + GetJobDetails(*codepipeline.GetJobDetailsInput) (*codepipeline.GetJobDetailsOutput, error) + + GetPipelineRequest(*codepipeline.GetPipelineInput) (*request.Request, *codepipeline.GetPipelineOutput) + + GetPipeline(*codepipeline.GetPipelineInput) (*codepipeline.GetPipelineOutput, error) + + GetPipelineStateRequest(*codepipeline.GetPipelineStateInput) (*request.Request, *codepipeline.GetPipelineStateOutput) + + GetPipelineState(*codepipeline.GetPipelineStateInput) (*codepipeline.GetPipelineStateOutput, error) + + GetThirdPartyJobDetailsRequest(*codepipeline.GetThirdPartyJobDetailsInput) (*request.Request, *codepipeline.GetThirdPartyJobDetailsOutput) + + GetThirdPartyJobDetails(*codepipeline.GetThirdPartyJobDetailsInput) (*codepipeline.GetThirdPartyJobDetailsOutput, error) + + ListActionTypesRequest(*codepipeline.ListActionTypesInput) (*request.Request, *codepipeline.ListActionTypesOutput) + + ListActionTypes(*codepipeline.ListActionTypesInput) (*codepipeline.ListActionTypesOutput, error) + + ListPipelinesRequest(*codepipeline.ListPipelinesInput) (*request.Request, *codepipeline.ListPipelinesOutput) + + ListPipelines(*codepipeline.ListPipelinesInput) (*codepipeline.ListPipelinesOutput, error) + + PollForJobsRequest(*codepipeline.PollForJobsInput) (*request.Request, *codepipeline.PollForJobsOutput) + + PollForJobs(*codepipeline.PollForJobsInput) (*codepipeline.PollForJobsOutput, error) + + PollForThirdPartyJobsRequest(*codepipeline.PollForThirdPartyJobsInput) (*request.Request, *codepipeline.PollForThirdPartyJobsOutput) + + PollForThirdPartyJobs(*codepipeline.PollForThirdPartyJobsInput) (*codepipeline.PollForThirdPartyJobsOutput, error) + + PutActionRevisionRequest(*codepipeline.PutActionRevisionInput) (*request.Request, *codepipeline.PutActionRevisionOutput) + + PutActionRevision(*codepipeline.PutActionRevisionInput) (*codepipeline.PutActionRevisionOutput, error) + + PutApprovalResultRequest(*codepipeline.PutApprovalResultInput) (*request.Request, *codepipeline.PutApprovalResultOutput) + + PutApprovalResult(*codepipeline.PutApprovalResultInput) (*codepipeline.PutApprovalResultOutput, error) + + PutJobFailureResultRequest(*codepipeline.PutJobFailureResultInput) (*request.Request, *codepipeline.PutJobFailureResultOutput) + + PutJobFailureResult(*codepipeline.PutJobFailureResultInput) (*codepipeline.PutJobFailureResultOutput, error) + + PutJobSuccessResultRequest(*codepipeline.PutJobSuccessResultInput) (*request.Request, *codepipeline.PutJobSuccessResultOutput) + + PutJobSuccessResult(*codepipeline.PutJobSuccessResultInput) (*codepipeline.PutJobSuccessResultOutput, error) + + PutThirdPartyJobFailureResultRequest(*codepipeline.PutThirdPartyJobFailureResultInput) (*request.Request, *codepipeline.PutThirdPartyJobFailureResultOutput) + + PutThirdPartyJobFailureResult(*codepipeline.PutThirdPartyJobFailureResultInput) (*codepipeline.PutThirdPartyJobFailureResultOutput, error) + + PutThirdPartyJobSuccessResultRequest(*codepipeline.PutThirdPartyJobSuccessResultInput) (*request.Request, *codepipeline.PutThirdPartyJobSuccessResultOutput) + + PutThirdPartyJobSuccessResult(*codepipeline.PutThirdPartyJobSuccessResultInput) (*codepipeline.PutThirdPartyJobSuccessResultOutput, error) + + RetryStageExecutionRequest(*codepipeline.RetryStageExecutionInput) (*request.Request, *codepipeline.RetryStageExecutionOutput) + + RetryStageExecution(*codepipeline.RetryStageExecutionInput) (*codepipeline.RetryStageExecutionOutput, error) + + StartPipelineExecutionRequest(*codepipeline.StartPipelineExecutionInput) (*request.Request, *codepipeline.StartPipelineExecutionOutput) + + StartPipelineExecution(*codepipeline.StartPipelineExecutionInput) (*codepipeline.StartPipelineExecutionOutput, error) + + UpdatePipelineRequest(*codepipeline.UpdatePipelineInput) (*request.Request, *codepipeline.UpdatePipelineOutput) + + UpdatePipeline(*codepipeline.UpdatePipelineInput) (*codepipeline.UpdatePipelineOutput, error) +} + +var _ CodePipelineAPI = (*codepipeline.CodePipeline)(nil) diff --git a/vendor/github.com/aws/aws-sdk-go/service/codepipeline/examples_test.go b/vendor/github.com/aws/aws-sdk-go/service/codepipeline/examples_test.go new file mode 100644 index 000000000..1cba0713d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/codepipeline/examples_test.go @@ -0,0 +1,707 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package codepipeline_test + +import ( + "bytes" + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/codepipeline" +) + +var _ time.Duration +var _ bytes.Buffer + +func ExampleCodePipeline_AcknowledgeJob() { + svc := codepipeline.New(session.New()) + + params := &codepipeline.AcknowledgeJobInput{ + JobId: aws.String("JobId"), // Required + Nonce: aws.String("Nonce"), // Required + } + resp, err := svc.AcknowledgeJob(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodePipeline_AcknowledgeThirdPartyJob() { + svc := codepipeline.New(session.New()) + + params := &codepipeline.AcknowledgeThirdPartyJobInput{ + ClientToken: aws.String("ClientToken"), // Required + JobId: aws.String("ThirdPartyJobId"), // Required + Nonce: aws.String("Nonce"), // Required + } + resp, err := svc.AcknowledgeThirdPartyJob(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodePipeline_CreateCustomActionType() { + svc := codepipeline.New(session.New()) + + params := &codepipeline.CreateCustomActionTypeInput{ + Category: aws.String("ActionCategory"), // Required + InputArtifactDetails: &codepipeline.ArtifactDetails{ // Required + MaximumCount: aws.Int64(1), // Required + MinimumCount: aws.Int64(1), // Required + }, + OutputArtifactDetails: &codepipeline.ArtifactDetails{ // Required + MaximumCount: aws.Int64(1), // Required + MinimumCount: aws.Int64(1), // Required + }, + Provider: aws.String("ActionProvider"), // Required + Version: aws.String("Version"), // Required + ConfigurationProperties: []*codepipeline.ActionConfigurationProperty{ + { // Required + Key: aws.Bool(true), // Required + Name: aws.String("ActionConfigurationKey"), // Required + Required: aws.Bool(true), // Required + Secret: aws.Bool(true), // Required + Description: aws.String("Description"), + Queryable: aws.Bool(true), + Type: aws.String("ActionConfigurationPropertyType"), + }, + // More values... + }, + Settings: &codepipeline.ActionTypeSettings{ + EntityUrlTemplate: aws.String("UrlTemplate"), + ExecutionUrlTemplate: aws.String("UrlTemplate"), + RevisionUrlTemplate: aws.String("UrlTemplate"), + ThirdPartyConfigurationUrl: aws.String("Url"), + }, + } + resp, err := svc.CreateCustomActionType(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodePipeline_CreatePipeline() { + svc := codepipeline.New(session.New()) + + params := &codepipeline.CreatePipelineInput{ + Pipeline: &codepipeline.PipelineDeclaration{ // Required + ArtifactStore: &codepipeline.ArtifactStore{ // Required + Location: aws.String("ArtifactStoreLocation"), // Required + Type: aws.String("ArtifactStoreType"), // Required + EncryptionKey: &codepipeline.EncryptionKey{ + Id: aws.String("EncryptionKeyId"), // Required + Type: aws.String("EncryptionKeyType"), // Required + }, + }, + Name: aws.String("PipelineName"), // Required + RoleArn: aws.String("RoleArn"), // Required + Stages: []*codepipeline.StageDeclaration{ // Required + { // Required + Actions: []*codepipeline.ActionDeclaration{ // Required + { // Required + ActionTypeId: &codepipeline.ActionTypeId{ // Required + Category: aws.String("ActionCategory"), // Required + Owner: aws.String("ActionOwner"), // Required + Provider: aws.String("ActionProvider"), // Required + Version: aws.String("Version"), // Required + }, + Name: aws.String("ActionName"), // Required + Configuration: map[string]*string{ + "Key": aws.String("ActionConfigurationValue"), // Required + // More values... + }, + InputArtifacts: []*codepipeline.InputArtifact{ + { // Required + Name: aws.String("ArtifactName"), // Required + }, + // More values... + }, + OutputArtifacts: []*codepipeline.OutputArtifact{ + { // Required + Name: aws.String("ArtifactName"), // Required + }, + // More values... + }, + RoleArn: aws.String("RoleArn"), + RunOrder: aws.Int64(1), + }, + // More values... + }, + Name: aws.String("StageName"), // Required + Blockers: []*codepipeline.BlockerDeclaration{ + { // Required + Name: aws.String("BlockerName"), // Required + Type: aws.String("BlockerType"), // Required + }, + // More values... + }, + }, + // More values... + }, + Version: aws.Int64(1), + }, + } + resp, err := svc.CreatePipeline(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodePipeline_DeleteCustomActionType() { + svc := codepipeline.New(session.New()) + + params := &codepipeline.DeleteCustomActionTypeInput{ + Category: aws.String("ActionCategory"), // Required + Provider: aws.String("ActionProvider"), // Required + Version: aws.String("Version"), // Required + } + resp, err := svc.DeleteCustomActionType(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodePipeline_DeletePipeline() { + svc := codepipeline.New(session.New()) + + params := &codepipeline.DeletePipelineInput{ + Name: aws.String("PipelineName"), // Required + } + resp, err := svc.DeletePipeline(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodePipeline_DisableStageTransition() { + svc := codepipeline.New(session.New()) + + params := &codepipeline.DisableStageTransitionInput{ + PipelineName: aws.String("PipelineName"), // Required + Reason: aws.String("DisabledReason"), // Required + StageName: aws.String("StageName"), // Required + TransitionType: aws.String("StageTransitionType"), // Required + } + resp, err := svc.DisableStageTransition(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodePipeline_EnableStageTransition() { + svc := codepipeline.New(session.New()) + + params := &codepipeline.EnableStageTransitionInput{ + PipelineName: aws.String("PipelineName"), // Required + StageName: aws.String("StageName"), // Required + TransitionType: aws.String("StageTransitionType"), // Required + } + resp, err := svc.EnableStageTransition(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodePipeline_GetJobDetails() { + svc := codepipeline.New(session.New()) + + params := &codepipeline.GetJobDetailsInput{ + JobId: aws.String("JobId"), // Required + } + resp, err := svc.GetJobDetails(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodePipeline_GetPipeline() { + svc := codepipeline.New(session.New()) + + params := &codepipeline.GetPipelineInput{ + Name: aws.String("PipelineName"), // Required + Version: aws.Int64(1), + } + resp, err := svc.GetPipeline(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodePipeline_GetPipelineState() { + svc := codepipeline.New(session.New()) + + params := &codepipeline.GetPipelineStateInput{ + Name: aws.String("PipelineName"), // Required + } + resp, err := svc.GetPipelineState(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodePipeline_GetThirdPartyJobDetails() { + svc := codepipeline.New(session.New()) + + params := &codepipeline.GetThirdPartyJobDetailsInput{ + ClientToken: aws.String("ClientToken"), // Required + JobId: aws.String("ThirdPartyJobId"), // Required + } + resp, err := svc.GetThirdPartyJobDetails(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodePipeline_ListActionTypes() { + svc := codepipeline.New(session.New()) + + params := &codepipeline.ListActionTypesInput{ + ActionOwnerFilter: aws.String("ActionOwner"), + NextToken: aws.String("NextToken"), + } + resp, err := svc.ListActionTypes(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodePipeline_ListPipelines() { + svc := codepipeline.New(session.New()) + + params := &codepipeline.ListPipelinesInput{ + NextToken: aws.String("NextToken"), + } + resp, err := svc.ListPipelines(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodePipeline_PollForJobs() { + svc := codepipeline.New(session.New()) + + params := &codepipeline.PollForJobsInput{ + ActionTypeId: &codepipeline.ActionTypeId{ // Required + Category: aws.String("ActionCategory"), // Required + Owner: aws.String("ActionOwner"), // Required + Provider: aws.String("ActionProvider"), // Required + Version: aws.String("Version"), // Required + }, + MaxBatchSize: aws.Int64(1), + QueryParam: map[string]*string{ + "Key": aws.String("ActionConfigurationQueryableValue"), // Required + // More values... + }, + } + resp, err := svc.PollForJobs(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodePipeline_PollForThirdPartyJobs() { + svc := codepipeline.New(session.New()) + + params := &codepipeline.PollForThirdPartyJobsInput{ + ActionTypeId: &codepipeline.ActionTypeId{ // Required + Category: aws.String("ActionCategory"), // Required + Owner: aws.String("ActionOwner"), // Required + Provider: aws.String("ActionProvider"), // Required + Version: aws.String("Version"), // Required + }, + MaxBatchSize: aws.Int64(1), + } + resp, err := svc.PollForThirdPartyJobs(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodePipeline_PutActionRevision() { + svc := codepipeline.New(session.New()) + + params := &codepipeline.PutActionRevisionInput{ + ActionName: aws.String("ActionName"), // Required + ActionRevision: &codepipeline.ActionRevision{ // Required + Created: aws.Time(time.Now()), // Required + RevisionChangeId: aws.String("RevisionChangeIdentifier"), // Required + RevisionId: aws.String("Revision"), // Required + }, + PipelineName: aws.String("PipelineName"), // Required + StageName: aws.String("StageName"), // Required + } + resp, err := svc.PutActionRevision(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodePipeline_PutApprovalResult() { + svc := codepipeline.New(session.New()) + + params := &codepipeline.PutApprovalResultInput{ + ActionName: aws.String("ActionName"), // Required + PipelineName: aws.String("PipelineName"), // Required + Result: &codepipeline.ApprovalResult{ // Required + Status: aws.String("ApprovalStatus"), // Required + Summary: aws.String("ApprovalSummary"), // Required + }, + StageName: aws.String("StageName"), // Required + Token: aws.String("ApprovalToken"), + } + resp, err := svc.PutApprovalResult(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodePipeline_PutJobFailureResult() { + svc := codepipeline.New(session.New()) + + params := &codepipeline.PutJobFailureResultInput{ + FailureDetails: &codepipeline.FailureDetails{ // Required + Message: aws.String("Message"), // Required + Type: aws.String("FailureType"), // Required + ExternalExecutionId: aws.String("ExecutionId"), + }, + JobId: aws.String("JobId"), // Required + } + resp, err := svc.PutJobFailureResult(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodePipeline_PutJobSuccessResult() { + svc := codepipeline.New(session.New()) + + params := &codepipeline.PutJobSuccessResultInput{ + JobId: aws.String("JobId"), // Required + ContinuationToken: aws.String("ContinuationToken"), + CurrentRevision: &codepipeline.CurrentRevision{ + ChangeIdentifier: aws.String("RevisionChangeIdentifier"), // Required + Revision: aws.String("Revision"), // Required + }, + ExecutionDetails: &codepipeline.ExecutionDetails{ + ExternalExecutionId: aws.String("ExecutionId"), + PercentComplete: aws.Int64(1), + Summary: aws.String("ExecutionSummary"), + }, + } + resp, err := svc.PutJobSuccessResult(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodePipeline_PutThirdPartyJobFailureResult() { + svc := codepipeline.New(session.New()) + + params := &codepipeline.PutThirdPartyJobFailureResultInput{ + ClientToken: aws.String("ClientToken"), // Required + FailureDetails: &codepipeline.FailureDetails{ // Required + Message: aws.String("Message"), // Required + Type: aws.String("FailureType"), // Required + ExternalExecutionId: aws.String("ExecutionId"), + }, + JobId: aws.String("ThirdPartyJobId"), // Required + } + resp, err := svc.PutThirdPartyJobFailureResult(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodePipeline_PutThirdPartyJobSuccessResult() { + svc := codepipeline.New(session.New()) + + params := &codepipeline.PutThirdPartyJobSuccessResultInput{ + ClientToken: aws.String("ClientToken"), // Required + JobId: aws.String("ThirdPartyJobId"), // Required + ContinuationToken: aws.String("ContinuationToken"), + CurrentRevision: &codepipeline.CurrentRevision{ + ChangeIdentifier: aws.String("RevisionChangeIdentifier"), // Required + Revision: aws.String("Revision"), // Required + }, + ExecutionDetails: &codepipeline.ExecutionDetails{ + ExternalExecutionId: aws.String("ExecutionId"), + PercentComplete: aws.Int64(1), + Summary: aws.String("ExecutionSummary"), + }, + } + resp, err := svc.PutThirdPartyJobSuccessResult(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodePipeline_RetryStageExecution() { + svc := codepipeline.New(session.New()) + + params := &codepipeline.RetryStageExecutionInput{ + PipelineExecutionId: aws.String("PipelineExecutionId"), // Required + PipelineName: aws.String("PipelineName"), // Required + RetryMode: aws.String("StageRetryMode"), // Required + StageName: aws.String("StageName"), // Required + } + resp, err := svc.RetryStageExecution(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodePipeline_StartPipelineExecution() { + svc := codepipeline.New(session.New()) + + params := &codepipeline.StartPipelineExecutionInput{ + Name: aws.String("PipelineName"), // Required + } + resp, err := svc.StartPipelineExecution(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodePipeline_UpdatePipeline() { + svc := codepipeline.New(session.New()) + + params := &codepipeline.UpdatePipelineInput{ + Pipeline: &codepipeline.PipelineDeclaration{ // Required + ArtifactStore: &codepipeline.ArtifactStore{ // Required + Location: aws.String("ArtifactStoreLocation"), // Required + Type: aws.String("ArtifactStoreType"), // Required + EncryptionKey: &codepipeline.EncryptionKey{ + Id: aws.String("EncryptionKeyId"), // Required + Type: aws.String("EncryptionKeyType"), // Required + }, + }, + Name: aws.String("PipelineName"), // Required + RoleArn: aws.String("RoleArn"), // Required + Stages: []*codepipeline.StageDeclaration{ // Required + { // Required + Actions: []*codepipeline.ActionDeclaration{ // Required + { // Required + ActionTypeId: &codepipeline.ActionTypeId{ // Required + Category: aws.String("ActionCategory"), // Required + Owner: aws.String("ActionOwner"), // Required + Provider: aws.String("ActionProvider"), // Required + Version: aws.String("Version"), // Required + }, + Name: aws.String("ActionName"), // Required + Configuration: map[string]*string{ + "Key": aws.String("ActionConfigurationValue"), // Required + // More values... + }, + InputArtifacts: []*codepipeline.InputArtifact{ + { // Required + Name: aws.String("ArtifactName"), // Required + }, + // More values... + }, + OutputArtifacts: []*codepipeline.OutputArtifact{ + { // Required + Name: aws.String("ArtifactName"), // Required + }, + // More values... + }, + RoleArn: aws.String("RoleArn"), + RunOrder: aws.Int64(1), + }, + // More values... + }, + Name: aws.String("StageName"), // Required + Blockers: []*codepipeline.BlockerDeclaration{ + { // Required + Name: aws.String("BlockerName"), // Required + Type: aws.String("BlockerType"), // Required + }, + // More values... + }, + }, + // More values... + }, + Version: aws.Int64(1), + }, + } + resp, err := svc.UpdatePipeline(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/codepipeline/service.go b/vendor/github.com/aws/aws-sdk-go/service/codepipeline/service.go new file mode 100644 index 000000000..8daf80c68 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/codepipeline/service.go @@ -0,0 +1,192 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package codepipeline + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" +) + +// Overview +// +// This is the AWS CodePipeline API Reference. This guide provides descriptions +// of the actions and data types for AWS CodePipeline. Some functionality for +// your pipeline is only configurable through the API. For additional information, +// see the AWS CodePipeline User Guide (http://docs.aws.amazon.com/codepipeline/latest/userguide/welcome.html). +// +// You can use the AWS CodePipeline API to work with pipelines, stages, actions, +// gates, and transitions, as described below. +// +// Pipelines are models of automated release processes. Each pipeline is uniquely +// named, and consists of actions, gates, and stages. +// +// You can work with pipelines by calling: +// +// CreatePipeline, which creates a uniquely-named pipeline. +// +// DeletePipeline, which deletes the specified pipeline. +// +// GetPipeline, which returns information about a pipeline structure. +// +// GetPipelineState, which returns information about the current state of +// the stages and actions of a pipeline. +// +// ListPipelines, which gets a summary of all of the pipelines associated +// with your account. +// +// StartPipelineExecution, which runs the the most recent revision of an +// artifact through the pipeline. +// +// UpdatePipeline, which updates a pipeline with edits or changes to the +// structure of the pipeline. +// +// Pipelines include stages, which are which are logical groupings of gates +// and actions. Each stage contains one or more actions that must complete before +// the next stage begins. A stage will result in success or failure. If a stage +// fails, then the pipeline stops at that stage and will remain stopped until +// either a new version of an artifact appears in the source location, or a +// user takes action to re-run the most recent artifact through the pipeline. +// You can call GetPipelineState, which displays the status of a pipeline, including +// the status of stages in the pipeline, or GetPipeline, which returns the entire +// structure of the pipeline, including the stages of that pipeline. For more +// information about the structure of stages and actions, also refer to the +// AWS CodePipeline Pipeline Structure Reference (http://docs.aws.amazon.com/codepipeline/latest/userguide/pipeline-structure.html). +// +// Pipeline stages include actions, which are categorized into categories such +// as source or build actions performed within a stage of a pipeline. For example, +// you can use a source action to import artifacts into a pipeline from a source +// such as Amazon S3. Like stages, you do not work with actions directly in +// most cases, but you do define and interact with actions when working with +// pipeline operations such as CreatePipeline and GetPipelineState. +// +// Pipelines also include transitions, which allow the transition of artifacts +// from one stage to the next in a pipeline after the actions in one stage complete. +// +// You can work with transitions by calling: +// +// DisableStageTransition, which prevents artifacts from transitioning to +// the next stage in a pipeline. +// +// EnableStageTransition, which enables transition of artifacts between +// stages in a pipeline. +// +// Using the API to integrate with AWS CodePipeline +// +// For third-party integrators or developers who want to create their own integrations +// with AWS CodePipeline, the expected sequence varies from the standard API +// user. In order to integrate with AWS CodePipeline, developers will need to +// work with the following items: +// +// Jobs, which are instances of an action. For example, a job for a source +// action might import a revision of an artifact from a source. +// +// You can work with jobs by calling: +// +// AcknowledgeJob, which confirms whether a job worker has received the +// specified job, +// +// GetJobDetails, which returns the details of a job, +// +// PollForJobs, which determines whether there are any jobs to act upon, +// +// PutJobFailureResult, which provides details of a job failure, and +// +// PutJobSuccessResult, which provides details of a job success. +// +// Third party jobs, which are instances of an action created by a partner +// action and integrated into AWS CodePipeline. Partner actions are created +// by members of the AWS Partner Network. +// +// You can work with third party jobs by calling: +// +// AcknowledgeThirdPartyJob, which confirms whether a job worker has received +// the specified job, +// +// GetThirdPartyJobDetails, which requests the details of a job for a partner +// action, +// +// PollForThirdPartyJobs, which determines whether there are any jobs to +// act upon, +// +// PutThirdPartyJobFailureResult, which provides details of a job failure, +// and +// +// PutThirdPartyJobSuccessResult, which provides details of a job success. +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type CodePipeline struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "codepipeline" + +// New creates a new instance of the CodePipeline client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a CodePipeline client from just a session. +// svc := codepipeline.New(mySession) +// +// // Create a CodePipeline client with additional configuration +// svc := codepipeline.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *CodePipeline { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *CodePipeline { + svc := &CodePipeline{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2015-07-09", + JSONVersion: "1.1", + TargetPrefix: "CodePipeline_20150709", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(jsonrpc.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a CodePipeline operation and runs any +// custom request initialization. +func (c *CodePipeline) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/cognitoidentity/api.go b/vendor/github.com/aws/aws-sdk-go/service/cognitoidentity/api.go new file mode 100644 index 000000000..375d45f16 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/cognitoidentity/api.go @@ -0,0 +1,2281 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package cognitoidentity provides a client for Amazon Cognito Identity. +package cognitoidentity + +import ( + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" +) + +const opCreateIdentityPool = "CreateIdentityPool" + +// CreateIdentityPoolRequest generates a "aws/request.Request" representing the +// client's request for the CreateIdentityPool operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateIdentityPool method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateIdentityPoolRequest method. +// req, resp := client.CreateIdentityPoolRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CognitoIdentity) CreateIdentityPoolRequest(input *CreateIdentityPoolInput) (req *request.Request, output *IdentityPool) { + op := &request.Operation{ + Name: opCreateIdentityPool, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateIdentityPoolInput{} + } + + req = c.newRequest(op, input, output) + output = &IdentityPool{} + req.Data = output + return +} + +// Creates a new identity pool. The identity pool is a store of user identity +// information that is specific to your AWS account. The limit on identity pools +// is 60 per account. The keys for SupportedLoginProviders are as follows: +// Facebook: graph.facebook.com Google: accounts.google.com Amazon: www.amazon.com +// Twitter: api.twitter.com Digits: www.digits.com You must use AWS Developer +// credentials to call this API. +func (c *CognitoIdentity) CreateIdentityPool(input *CreateIdentityPoolInput) (*IdentityPool, error) { + req, out := c.CreateIdentityPoolRequest(input) + err := req.Send() + return out, err +} + +const opDeleteIdentities = "DeleteIdentities" + +// DeleteIdentitiesRequest generates a "aws/request.Request" representing the +// client's request for the DeleteIdentities operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteIdentities method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteIdentitiesRequest method. +// req, resp := client.DeleteIdentitiesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CognitoIdentity) DeleteIdentitiesRequest(input *DeleteIdentitiesInput) (req *request.Request, output *DeleteIdentitiesOutput) { + op := &request.Operation{ + Name: opDeleteIdentities, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteIdentitiesInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteIdentitiesOutput{} + req.Data = output + return +} + +// Deletes identities from an identity pool. You can specify a list of 1-60 +// identities that you want to delete. +// +// You must use AWS Developer credentials to call this API. +func (c *CognitoIdentity) DeleteIdentities(input *DeleteIdentitiesInput) (*DeleteIdentitiesOutput, error) { + req, out := c.DeleteIdentitiesRequest(input) + err := req.Send() + return out, err +} + +const opDeleteIdentityPool = "DeleteIdentityPool" + +// DeleteIdentityPoolRequest generates a "aws/request.Request" representing the +// client's request for the DeleteIdentityPool operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteIdentityPool method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteIdentityPoolRequest method. +// req, resp := client.DeleteIdentityPoolRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CognitoIdentity) DeleteIdentityPoolRequest(input *DeleteIdentityPoolInput) (req *request.Request, output *DeleteIdentityPoolOutput) { + op := &request.Operation{ + Name: opDeleteIdentityPool, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteIdentityPoolInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteIdentityPoolOutput{} + req.Data = output + return +} + +// Deletes a user pool. Once a pool is deleted, users will not be able to authenticate +// with the pool. +// +// You must use AWS Developer credentials to call this API. +func (c *CognitoIdentity) DeleteIdentityPool(input *DeleteIdentityPoolInput) (*DeleteIdentityPoolOutput, error) { + req, out := c.DeleteIdentityPoolRequest(input) + err := req.Send() + return out, err +} + +const opDescribeIdentity = "DescribeIdentity" + +// DescribeIdentityRequest generates a "aws/request.Request" representing the +// client's request for the DescribeIdentity operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeIdentity method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeIdentityRequest method. +// req, resp := client.DescribeIdentityRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CognitoIdentity) DescribeIdentityRequest(input *DescribeIdentityInput) (req *request.Request, output *IdentityDescription) { + op := &request.Operation{ + Name: opDescribeIdentity, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeIdentityInput{} + } + + req = c.newRequest(op, input, output) + output = &IdentityDescription{} + req.Data = output + return +} + +// Returns metadata related to the given identity, including when the identity +// was created and any associated linked logins. +// +// You must use AWS Developer credentials to call this API. +func (c *CognitoIdentity) DescribeIdentity(input *DescribeIdentityInput) (*IdentityDescription, error) { + req, out := c.DescribeIdentityRequest(input) + err := req.Send() + return out, err +} + +const opDescribeIdentityPool = "DescribeIdentityPool" + +// DescribeIdentityPoolRequest generates a "aws/request.Request" representing the +// client's request for the DescribeIdentityPool operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeIdentityPool method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeIdentityPoolRequest method. +// req, resp := client.DescribeIdentityPoolRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CognitoIdentity) DescribeIdentityPoolRequest(input *DescribeIdentityPoolInput) (req *request.Request, output *IdentityPool) { + op := &request.Operation{ + Name: opDescribeIdentityPool, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeIdentityPoolInput{} + } + + req = c.newRequest(op, input, output) + output = &IdentityPool{} + req.Data = output + return +} + +// Gets details about a particular identity pool, including the pool name, ID +// description, creation date, and current number of users. +// +// You must use AWS Developer credentials to call this API. +func (c *CognitoIdentity) DescribeIdentityPool(input *DescribeIdentityPoolInput) (*IdentityPool, error) { + req, out := c.DescribeIdentityPoolRequest(input) + err := req.Send() + return out, err +} + +const opGetCredentialsForIdentity = "GetCredentialsForIdentity" + +// GetCredentialsForIdentityRequest generates a "aws/request.Request" representing the +// client's request for the GetCredentialsForIdentity operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetCredentialsForIdentity method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetCredentialsForIdentityRequest method. +// req, resp := client.GetCredentialsForIdentityRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CognitoIdentity) GetCredentialsForIdentityRequest(input *GetCredentialsForIdentityInput) (req *request.Request, output *GetCredentialsForIdentityOutput) { + op := &request.Operation{ + Name: opGetCredentialsForIdentity, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetCredentialsForIdentityInput{} + } + + req = c.newRequest(op, input, output) + output = &GetCredentialsForIdentityOutput{} + req.Data = output + return +} + +// Returns credentials for the provided identity ID. Any provided logins will +// be validated against supported login providers. If the token is for cognito-identity.amazonaws.com, +// it will be passed through to AWS Security Token Service with the appropriate +// role for the token. +// +// This is a public API. You do not need any credentials to call this API. +func (c *CognitoIdentity) GetCredentialsForIdentity(input *GetCredentialsForIdentityInput) (*GetCredentialsForIdentityOutput, error) { + req, out := c.GetCredentialsForIdentityRequest(input) + err := req.Send() + return out, err +} + +const opGetId = "GetId" + +// GetIdRequest generates a "aws/request.Request" representing the +// client's request for the GetId operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetId method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetIdRequest method. +// req, resp := client.GetIdRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CognitoIdentity) GetIdRequest(input *GetIdInput) (req *request.Request, output *GetIdOutput) { + op := &request.Operation{ + Name: opGetId, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetIdInput{} + } + + req = c.newRequest(op, input, output) + output = &GetIdOutput{} + req.Data = output + return +} + +// Generates (or retrieves) a Cognito ID. Supplying multiple logins will create +// an implicit linked account. +// +// This is a public API. You do not need any credentials to call this API. +func (c *CognitoIdentity) GetId(input *GetIdInput) (*GetIdOutput, error) { + req, out := c.GetIdRequest(input) + err := req.Send() + return out, err +} + +const opGetIdentityPoolRoles = "GetIdentityPoolRoles" + +// GetIdentityPoolRolesRequest generates a "aws/request.Request" representing the +// client's request for the GetIdentityPoolRoles operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetIdentityPoolRoles method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetIdentityPoolRolesRequest method. +// req, resp := client.GetIdentityPoolRolesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CognitoIdentity) GetIdentityPoolRolesRequest(input *GetIdentityPoolRolesInput) (req *request.Request, output *GetIdentityPoolRolesOutput) { + op := &request.Operation{ + Name: opGetIdentityPoolRoles, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetIdentityPoolRolesInput{} + } + + req = c.newRequest(op, input, output) + output = &GetIdentityPoolRolesOutput{} + req.Data = output + return +} + +// Gets the roles for an identity pool. +// +// You must use AWS Developer credentials to call this API. +func (c *CognitoIdentity) GetIdentityPoolRoles(input *GetIdentityPoolRolesInput) (*GetIdentityPoolRolesOutput, error) { + req, out := c.GetIdentityPoolRolesRequest(input) + err := req.Send() + return out, err +} + +const opGetOpenIdToken = "GetOpenIdToken" + +// GetOpenIdTokenRequest generates a "aws/request.Request" representing the +// client's request for the GetOpenIdToken operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetOpenIdToken method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetOpenIdTokenRequest method. +// req, resp := client.GetOpenIdTokenRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CognitoIdentity) GetOpenIdTokenRequest(input *GetOpenIdTokenInput) (req *request.Request, output *GetOpenIdTokenOutput) { + op := &request.Operation{ + Name: opGetOpenIdToken, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetOpenIdTokenInput{} + } + + req = c.newRequest(op, input, output) + output = &GetOpenIdTokenOutput{} + req.Data = output + return +} + +// Gets an OpenID token, using a known Cognito ID. This known Cognito ID is +// returned by GetId. You can optionally add additional logins for the identity. +// Supplying multiple logins creates an implicit link. +// +// The OpenId token is valid for 15 minutes. +// +// This is a public API. You do not need any credentials to call this API. +func (c *CognitoIdentity) GetOpenIdToken(input *GetOpenIdTokenInput) (*GetOpenIdTokenOutput, error) { + req, out := c.GetOpenIdTokenRequest(input) + err := req.Send() + return out, err +} + +const opGetOpenIdTokenForDeveloperIdentity = "GetOpenIdTokenForDeveloperIdentity" + +// GetOpenIdTokenForDeveloperIdentityRequest generates a "aws/request.Request" representing the +// client's request for the GetOpenIdTokenForDeveloperIdentity operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetOpenIdTokenForDeveloperIdentity method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetOpenIdTokenForDeveloperIdentityRequest method. +// req, resp := client.GetOpenIdTokenForDeveloperIdentityRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CognitoIdentity) GetOpenIdTokenForDeveloperIdentityRequest(input *GetOpenIdTokenForDeveloperIdentityInput) (req *request.Request, output *GetOpenIdTokenForDeveloperIdentityOutput) { + op := &request.Operation{ + Name: opGetOpenIdTokenForDeveloperIdentity, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetOpenIdTokenForDeveloperIdentityInput{} + } + + req = c.newRequest(op, input, output) + output = &GetOpenIdTokenForDeveloperIdentityOutput{} + req.Data = output + return +} + +// Registers (or retrieves) a Cognito IdentityId and an OpenID Connect token +// for a user authenticated by your backend authentication process. Supplying +// multiple logins will create an implicit linked account. You can only specify +// one developer provider as part of the Logins map, which is linked to the +// identity pool. The developer provider is the "domain" by which Cognito will +// refer to your users. +// +// You can use GetOpenIdTokenForDeveloperIdentity to create a new identity +// and to link new logins (that is, user credentials issued by a public provider +// or developer provider) to an existing identity. When you want to create a +// new identity, the IdentityId should be null. When you want to associate a +// new login with an existing authenticated/unauthenticated identity, you can +// do so by providing the existing IdentityId. This API will create the identity +// in the specified IdentityPoolId. +// +// You must use AWS Developer credentials to call this API. +func (c *CognitoIdentity) GetOpenIdTokenForDeveloperIdentity(input *GetOpenIdTokenForDeveloperIdentityInput) (*GetOpenIdTokenForDeveloperIdentityOutput, error) { + req, out := c.GetOpenIdTokenForDeveloperIdentityRequest(input) + err := req.Send() + return out, err +} + +const opListIdentities = "ListIdentities" + +// ListIdentitiesRequest generates a "aws/request.Request" representing the +// client's request for the ListIdentities operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListIdentities method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListIdentitiesRequest method. +// req, resp := client.ListIdentitiesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CognitoIdentity) ListIdentitiesRequest(input *ListIdentitiesInput) (req *request.Request, output *ListIdentitiesOutput) { + op := &request.Operation{ + Name: opListIdentities, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListIdentitiesInput{} + } + + req = c.newRequest(op, input, output) + output = &ListIdentitiesOutput{} + req.Data = output + return +} + +// Lists the identities in a pool. +// +// You must use AWS Developer credentials to call this API. +func (c *CognitoIdentity) ListIdentities(input *ListIdentitiesInput) (*ListIdentitiesOutput, error) { + req, out := c.ListIdentitiesRequest(input) + err := req.Send() + return out, err +} + +const opListIdentityPools = "ListIdentityPools" + +// ListIdentityPoolsRequest generates a "aws/request.Request" representing the +// client's request for the ListIdentityPools operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListIdentityPools method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListIdentityPoolsRequest method. +// req, resp := client.ListIdentityPoolsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CognitoIdentity) ListIdentityPoolsRequest(input *ListIdentityPoolsInput) (req *request.Request, output *ListIdentityPoolsOutput) { + op := &request.Operation{ + Name: opListIdentityPools, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListIdentityPoolsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListIdentityPoolsOutput{} + req.Data = output + return +} + +// Lists all of the Cognito identity pools registered for your account. +// +// You must use AWS Developer credentials to call this API. +func (c *CognitoIdentity) ListIdentityPools(input *ListIdentityPoolsInput) (*ListIdentityPoolsOutput, error) { + req, out := c.ListIdentityPoolsRequest(input) + err := req.Send() + return out, err +} + +const opLookupDeveloperIdentity = "LookupDeveloperIdentity" + +// LookupDeveloperIdentityRequest generates a "aws/request.Request" representing the +// client's request for the LookupDeveloperIdentity operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the LookupDeveloperIdentity method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the LookupDeveloperIdentityRequest method. +// req, resp := client.LookupDeveloperIdentityRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CognitoIdentity) LookupDeveloperIdentityRequest(input *LookupDeveloperIdentityInput) (req *request.Request, output *LookupDeveloperIdentityOutput) { + op := &request.Operation{ + Name: opLookupDeveloperIdentity, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &LookupDeveloperIdentityInput{} + } + + req = c.newRequest(op, input, output) + output = &LookupDeveloperIdentityOutput{} + req.Data = output + return +} + +// Retrieves the IdentityID associated with a DeveloperUserIdentifier or the +// list of DeveloperUserIdentifiers associated with an IdentityId for an existing +// identity. Either IdentityID or DeveloperUserIdentifier must not be null. +// If you supply only one of these values, the other value will be searched +// in the database and returned as a part of the response. If you supply both, +// DeveloperUserIdentifier will be matched against IdentityID. If the values +// are verified against the database, the response returns both values and is +// the same as the request. Otherwise a ResourceConflictException is thrown. +// +// You must use AWS Developer credentials to call this API. +func (c *CognitoIdentity) LookupDeveloperIdentity(input *LookupDeveloperIdentityInput) (*LookupDeveloperIdentityOutput, error) { + req, out := c.LookupDeveloperIdentityRequest(input) + err := req.Send() + return out, err +} + +const opMergeDeveloperIdentities = "MergeDeveloperIdentities" + +// MergeDeveloperIdentitiesRequest generates a "aws/request.Request" representing the +// client's request for the MergeDeveloperIdentities operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the MergeDeveloperIdentities method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the MergeDeveloperIdentitiesRequest method. +// req, resp := client.MergeDeveloperIdentitiesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CognitoIdentity) MergeDeveloperIdentitiesRequest(input *MergeDeveloperIdentitiesInput) (req *request.Request, output *MergeDeveloperIdentitiesOutput) { + op := &request.Operation{ + Name: opMergeDeveloperIdentities, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &MergeDeveloperIdentitiesInput{} + } + + req = c.newRequest(op, input, output) + output = &MergeDeveloperIdentitiesOutput{} + req.Data = output + return +} + +// Merges two users having different IdentityIds, existing in the same identity +// pool, and identified by the same developer provider. You can use this action +// to request that discrete users be merged and identified as a single user +// in the Cognito environment. Cognito associates the given source user (SourceUserIdentifier) +// with the IdentityId of the DestinationUserIdentifier. Only developer-authenticated +// users can be merged. If the users to be merged are associated with the same +// public provider, but as two different users, an exception will be thrown. +// +// You must use AWS Developer credentials to call this API. +func (c *CognitoIdentity) MergeDeveloperIdentities(input *MergeDeveloperIdentitiesInput) (*MergeDeveloperIdentitiesOutput, error) { + req, out := c.MergeDeveloperIdentitiesRequest(input) + err := req.Send() + return out, err +} + +const opSetIdentityPoolRoles = "SetIdentityPoolRoles" + +// SetIdentityPoolRolesRequest generates a "aws/request.Request" representing the +// client's request for the SetIdentityPoolRoles operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the SetIdentityPoolRoles method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the SetIdentityPoolRolesRequest method. +// req, resp := client.SetIdentityPoolRolesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CognitoIdentity) SetIdentityPoolRolesRequest(input *SetIdentityPoolRolesInput) (req *request.Request, output *SetIdentityPoolRolesOutput) { + op := &request.Operation{ + Name: opSetIdentityPoolRoles, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SetIdentityPoolRolesInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &SetIdentityPoolRolesOutput{} + req.Data = output + return +} + +// Sets the roles for an identity pool. These roles are used when making calls +// to GetCredentialsForIdentity action. +// +// You must use AWS Developer credentials to call this API. +func (c *CognitoIdentity) SetIdentityPoolRoles(input *SetIdentityPoolRolesInput) (*SetIdentityPoolRolesOutput, error) { + req, out := c.SetIdentityPoolRolesRequest(input) + err := req.Send() + return out, err +} + +const opUnlinkDeveloperIdentity = "UnlinkDeveloperIdentity" + +// UnlinkDeveloperIdentityRequest generates a "aws/request.Request" representing the +// client's request for the UnlinkDeveloperIdentity operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UnlinkDeveloperIdentity method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UnlinkDeveloperIdentityRequest method. +// req, resp := client.UnlinkDeveloperIdentityRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CognitoIdentity) UnlinkDeveloperIdentityRequest(input *UnlinkDeveloperIdentityInput) (req *request.Request, output *UnlinkDeveloperIdentityOutput) { + op := &request.Operation{ + Name: opUnlinkDeveloperIdentity, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UnlinkDeveloperIdentityInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &UnlinkDeveloperIdentityOutput{} + req.Data = output + return +} + +// Unlinks a DeveloperUserIdentifier from an existing identity. Unlinked developer +// users will be considered new identities next time they are seen. If, for +// a given Cognito identity, you remove all federated identities as well as +// the developer user identifier, the Cognito identity becomes inaccessible. +// +// You must use AWS Developer credentials to call this API. +func (c *CognitoIdentity) UnlinkDeveloperIdentity(input *UnlinkDeveloperIdentityInput) (*UnlinkDeveloperIdentityOutput, error) { + req, out := c.UnlinkDeveloperIdentityRequest(input) + err := req.Send() + return out, err +} + +const opUnlinkIdentity = "UnlinkIdentity" + +// UnlinkIdentityRequest generates a "aws/request.Request" representing the +// client's request for the UnlinkIdentity operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UnlinkIdentity method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UnlinkIdentityRequest method. +// req, resp := client.UnlinkIdentityRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CognitoIdentity) UnlinkIdentityRequest(input *UnlinkIdentityInput) (req *request.Request, output *UnlinkIdentityOutput) { + op := &request.Operation{ + Name: opUnlinkIdentity, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UnlinkIdentityInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &UnlinkIdentityOutput{} + req.Data = output + return +} + +// Unlinks a federated identity from an existing account. Unlinked logins will +// be considered new identities next time they are seen. Removing the last linked +// login will make this identity inaccessible. +// +// This is a public API. You do not need any credentials to call this API. +func (c *CognitoIdentity) UnlinkIdentity(input *UnlinkIdentityInput) (*UnlinkIdentityOutput, error) { + req, out := c.UnlinkIdentityRequest(input) + err := req.Send() + return out, err +} + +const opUpdateIdentityPool = "UpdateIdentityPool" + +// UpdateIdentityPoolRequest generates a "aws/request.Request" representing the +// client's request for the UpdateIdentityPool operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateIdentityPool method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateIdentityPoolRequest method. +// req, resp := client.UpdateIdentityPoolRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CognitoIdentity) UpdateIdentityPoolRequest(input *IdentityPool) (req *request.Request, output *IdentityPool) { + op := &request.Operation{ + Name: opUpdateIdentityPool, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &IdentityPool{} + } + + req = c.newRequest(op, input, output) + output = &IdentityPool{} + req.Data = output + return +} + +// Updates a user pool. +// +// You must use AWS Developer credentials to call this API. +func (c *CognitoIdentity) UpdateIdentityPool(input *IdentityPool) (*IdentityPool, error) { + req, out := c.UpdateIdentityPoolRequest(input) + err := req.Send() + return out, err +} + +// Input to the CreateIdentityPool action. +type CreateIdentityPoolInput struct { + _ struct{} `type:"structure"` + + // TRUE if the identity pool supports unauthenticated logins. + AllowUnauthenticatedIdentities *bool `type:"boolean" required:"true"` + + // An array of Amazon Cognito Identity user pools. + CognitoIdentityProviders []*Provider `type:"list"` + + // The "domain" by which Cognito will refer to your users. This name acts as + // a placeholder that allows your backend and the Cognito service to communicate + // about the developer provider. For the DeveloperProviderName, you can use + // letters as well as period (.), underscore (_), and dash (-). + // + // Once you have set a developer provider name, you cannot change it. Please + // take care in setting this parameter. + DeveloperProviderName *string `min:"1" type:"string"` + + // A string that you provide. + IdentityPoolName *string `min:"1" type:"string" required:"true"` + + // A list of OpendID Connect provider ARNs. + OpenIdConnectProviderARNs []*string `type:"list"` + + // An array of Amazon Resource Names (ARNs) of the SAML provider for your identity + // pool. + SamlProviderARNs []*string `type:"list"` + + // Optional key:value pairs mapping provider names to provider app IDs. + SupportedLoginProviders map[string]*string `type:"map"` +} + +// String returns the string representation +func (s CreateIdentityPoolInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateIdentityPoolInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateIdentityPoolInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateIdentityPoolInput"} + if s.AllowUnauthenticatedIdentities == nil { + invalidParams.Add(request.NewErrParamRequired("AllowUnauthenticatedIdentities")) + } + if s.DeveloperProviderName != nil && len(*s.DeveloperProviderName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DeveloperProviderName", 1)) + } + if s.IdentityPoolName == nil { + invalidParams.Add(request.NewErrParamRequired("IdentityPoolName")) + } + if s.IdentityPoolName != nil && len(*s.IdentityPoolName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("IdentityPoolName", 1)) + } + if s.CognitoIdentityProviders != nil { + for i, v := range s.CognitoIdentityProviders { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "CognitoIdentityProviders", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Credentials for the provided identity ID. +type Credentials struct { + _ struct{} `type:"structure"` + + // The Access Key portion of the credentials. + AccessKeyId *string `type:"string"` + + // The date at which these credentials will expire. + Expiration *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The Secret Access Key portion of the credentials + SecretKey *string `type:"string"` + + // The Session Token portion of the credentials + SessionToken *string `type:"string"` +} + +// String returns the string representation +func (s Credentials) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Credentials) GoString() string { + return s.String() +} + +// Input to the DeleteIdentities action. +type DeleteIdentitiesInput struct { + _ struct{} `type:"structure"` + + // A list of 1-60 identities that you want to delete. + IdentityIdsToDelete []*string `min:"1" type:"list" required:"true"` +} + +// String returns the string representation +func (s DeleteIdentitiesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteIdentitiesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteIdentitiesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteIdentitiesInput"} + if s.IdentityIdsToDelete == nil { + invalidParams.Add(request.NewErrParamRequired("IdentityIdsToDelete")) + } + if s.IdentityIdsToDelete != nil && len(s.IdentityIdsToDelete) < 1 { + invalidParams.Add(request.NewErrParamMinLen("IdentityIdsToDelete", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Returned in response to a successful DeleteIdentities operation. +type DeleteIdentitiesOutput struct { + _ struct{} `type:"structure"` + + // An array of UnprocessedIdentityId objects, each of which contains an ErrorCode + // and IdentityId. + UnprocessedIdentityIds []*UnprocessedIdentityId `type:"list"` +} + +// String returns the string representation +func (s DeleteIdentitiesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteIdentitiesOutput) GoString() string { + return s.String() +} + +// Input to the DeleteIdentityPool action. +type DeleteIdentityPoolInput struct { + _ struct{} `type:"structure"` + + // An identity pool ID in the format REGION:GUID. + IdentityPoolId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteIdentityPoolInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteIdentityPoolInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteIdentityPoolInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteIdentityPoolInput"} + if s.IdentityPoolId == nil { + invalidParams.Add(request.NewErrParamRequired("IdentityPoolId")) + } + if s.IdentityPoolId != nil && len(*s.IdentityPoolId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("IdentityPoolId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteIdentityPoolOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteIdentityPoolOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteIdentityPoolOutput) GoString() string { + return s.String() +} + +// Input to the DescribeIdentity action. +type DescribeIdentityInput struct { + _ struct{} `type:"structure"` + + // A unique identifier in the format REGION:GUID. + IdentityId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeIdentityInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeIdentityInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeIdentityInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeIdentityInput"} + if s.IdentityId == nil { + invalidParams.Add(request.NewErrParamRequired("IdentityId")) + } + if s.IdentityId != nil && len(*s.IdentityId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("IdentityId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Input to the DescribeIdentityPool action. +type DescribeIdentityPoolInput struct { + _ struct{} `type:"structure"` + + // An identity pool ID in the format REGION:GUID. + IdentityPoolId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeIdentityPoolInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeIdentityPoolInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeIdentityPoolInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeIdentityPoolInput"} + if s.IdentityPoolId == nil { + invalidParams.Add(request.NewErrParamRequired("IdentityPoolId")) + } + if s.IdentityPoolId != nil && len(*s.IdentityPoolId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("IdentityPoolId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Input to the GetCredentialsForIdentity action. +type GetCredentialsForIdentityInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the role to be assumed when multiple roles + // were received in the token from the identity provider. For example, a SAML-based + // identity provider. This parameter is optional for identity providers that + // do not support role customization. + CustomRoleArn *string `min:"20" type:"string"` + + // A unique identifier in the format REGION:GUID. + IdentityId *string `min:"1" type:"string" required:"true"` + + // A set of optional name-value pairs that map provider names to provider tokens. + Logins map[string]*string `type:"map"` +} + +// String returns the string representation +func (s GetCredentialsForIdentityInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetCredentialsForIdentityInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetCredentialsForIdentityInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetCredentialsForIdentityInput"} + if s.CustomRoleArn != nil && len(*s.CustomRoleArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("CustomRoleArn", 20)) + } + if s.IdentityId == nil { + invalidParams.Add(request.NewErrParamRequired("IdentityId")) + } + if s.IdentityId != nil && len(*s.IdentityId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("IdentityId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Returned in response to a successful GetCredentialsForIdentity operation. +type GetCredentialsForIdentityOutput struct { + _ struct{} `type:"structure"` + + // Credentials for the provided identity ID. + Credentials *Credentials `type:"structure"` + + // A unique identifier in the format REGION:GUID. + IdentityId *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s GetCredentialsForIdentityOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetCredentialsForIdentityOutput) GoString() string { + return s.String() +} + +// Input to the GetId action. +type GetIdInput struct { + _ struct{} `type:"structure"` + + // A standard AWS account ID (9+ digits). + AccountId *string `min:"1" type:"string"` + + // An identity pool ID in the format REGION:GUID. + IdentityPoolId *string `min:"1" type:"string" required:"true"` + + // A set of optional name-value pairs that map provider names to provider tokens. + // + // The available provider names for Logins are as follows: Facebook: graph.facebook.com + // Google: accounts.google.com Amazon: www.amazon.com Twitter: api.twitter.com + // Digits: www.digits.com + Logins map[string]*string `type:"map"` +} + +// String returns the string representation +func (s GetIdInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetIdInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetIdInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetIdInput"} + if s.AccountId != nil && len(*s.AccountId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AccountId", 1)) + } + if s.IdentityPoolId == nil { + invalidParams.Add(request.NewErrParamRequired("IdentityPoolId")) + } + if s.IdentityPoolId != nil && len(*s.IdentityPoolId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("IdentityPoolId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Returned in response to a GetId request. +type GetIdOutput struct { + _ struct{} `type:"structure"` + + // A unique identifier in the format REGION:GUID. + IdentityId *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s GetIdOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetIdOutput) GoString() string { + return s.String() +} + +// Input to the GetIdentityPoolRoles action. +type GetIdentityPoolRolesInput struct { + _ struct{} `type:"structure"` + + // An identity pool ID in the format REGION:GUID. + IdentityPoolId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetIdentityPoolRolesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetIdentityPoolRolesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetIdentityPoolRolesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetIdentityPoolRolesInput"} + if s.IdentityPoolId == nil { + invalidParams.Add(request.NewErrParamRequired("IdentityPoolId")) + } + if s.IdentityPoolId != nil && len(*s.IdentityPoolId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("IdentityPoolId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Returned in response to a successful GetIdentityPoolRoles operation. +type GetIdentityPoolRolesOutput struct { + _ struct{} `type:"structure"` + + // An identity pool ID in the format REGION:GUID. + IdentityPoolId *string `min:"1" type:"string"` + + // The map of roles associated with this pool. Currently only authenticated + // and unauthenticated roles are supported. + Roles map[string]*string `type:"map"` +} + +// String returns the string representation +func (s GetIdentityPoolRolesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetIdentityPoolRolesOutput) GoString() string { + return s.String() +} + +// Input to the GetOpenIdTokenForDeveloperIdentity action. +type GetOpenIdTokenForDeveloperIdentityInput struct { + _ struct{} `type:"structure"` + + // A unique identifier in the format REGION:GUID. + IdentityId *string `min:"1" type:"string"` + + // An identity pool ID in the format REGION:GUID. + IdentityPoolId *string `min:"1" type:"string" required:"true"` + + // A set of optional name-value pairs that map provider names to provider tokens. + // Each name-value pair represents a user from a public provider or developer + // provider. If the user is from a developer provider, the name-value pair will + // follow the syntax "developer_provider_name": "developer_user_identifier". + // The developer provider is the "domain" by which Cognito will refer to your + // users; you provided this domain while creating/updating the identity pool. + // The developer user identifier is an identifier from your backend that uniquely + // identifies a user. When you create an identity pool, you can specify the + // supported logins. + Logins map[string]*string `type:"map" required:"true"` + + // The expiration time of the token, in seconds. You can specify a custom expiration + // time for the token so that you can cache it. If you don't provide an expiration + // time, the token is valid for 15 minutes. You can exchange the token with + // Amazon STS for temporary AWS credentials, which are valid for a maximum of + // one hour. The maximum token duration you can set is 24 hours. You should + // take care in setting the expiration time for a token, as there are significant + // security implications: an attacker could use a leaked token to access your + // AWS resources for the token's duration. + TokenDuration *int64 `min:"1" type:"long"` +} + +// String returns the string representation +func (s GetOpenIdTokenForDeveloperIdentityInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetOpenIdTokenForDeveloperIdentityInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetOpenIdTokenForDeveloperIdentityInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetOpenIdTokenForDeveloperIdentityInput"} + if s.IdentityId != nil && len(*s.IdentityId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("IdentityId", 1)) + } + if s.IdentityPoolId == nil { + invalidParams.Add(request.NewErrParamRequired("IdentityPoolId")) + } + if s.IdentityPoolId != nil && len(*s.IdentityPoolId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("IdentityPoolId", 1)) + } + if s.Logins == nil { + invalidParams.Add(request.NewErrParamRequired("Logins")) + } + if s.TokenDuration != nil && *s.TokenDuration < 1 { + invalidParams.Add(request.NewErrParamMinValue("TokenDuration", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Returned in response to a successful GetOpenIdTokenForDeveloperIdentity request. +type GetOpenIdTokenForDeveloperIdentityOutput struct { + _ struct{} `type:"structure"` + + // A unique identifier in the format REGION:GUID. + IdentityId *string `min:"1" type:"string"` + + // An OpenID token. + Token *string `type:"string"` +} + +// String returns the string representation +func (s GetOpenIdTokenForDeveloperIdentityOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetOpenIdTokenForDeveloperIdentityOutput) GoString() string { + return s.String() +} + +// Input to the GetOpenIdToken action. +type GetOpenIdTokenInput struct { + _ struct{} `type:"structure"` + + // A unique identifier in the format REGION:GUID. + IdentityId *string `min:"1" type:"string" required:"true"` + + // A set of optional name-value pairs that map provider names to provider tokens. + // When using graph.facebook.com and www.amazon.com, supply the access_token + // returned from the provider's authflow. For accounts.google.com or any other + // OpenId Connect provider, always include the id_token. + Logins map[string]*string `type:"map"` +} + +// String returns the string representation +func (s GetOpenIdTokenInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetOpenIdTokenInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetOpenIdTokenInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetOpenIdTokenInput"} + if s.IdentityId == nil { + invalidParams.Add(request.NewErrParamRequired("IdentityId")) + } + if s.IdentityId != nil && len(*s.IdentityId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("IdentityId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Returned in response to a successful GetOpenIdToken request. +type GetOpenIdTokenOutput struct { + _ struct{} `type:"structure"` + + // A unique identifier in the format REGION:GUID. Note that the IdentityId returned + // may not match the one passed on input. + IdentityId *string `min:"1" type:"string"` + + // An OpenID token, valid for 15 minutes. + Token *string `type:"string"` +} + +// String returns the string representation +func (s GetOpenIdTokenOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetOpenIdTokenOutput) GoString() string { + return s.String() +} + +// A description of the identity. +type IdentityDescription struct { + _ struct{} `type:"structure"` + + // Date on which the identity was created. + CreationDate *time.Time `type:"timestamp" timestampFormat:"unix"` + + // A unique identifier in the format REGION:GUID. + IdentityId *string `min:"1" type:"string"` + + // Date on which the identity was last modified. + LastModifiedDate *time.Time `type:"timestamp" timestampFormat:"unix"` + + // A set of optional name-value pairs that map provider names to provider tokens. + Logins []*string `type:"list"` +} + +// String returns the string representation +func (s IdentityDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s IdentityDescription) GoString() string { + return s.String() +} + +// An object representing a Cognito identity pool. +type IdentityPool struct { + _ struct{} `type:"structure"` + + // TRUE if the identity pool supports unauthenticated logins. + AllowUnauthenticatedIdentities *bool `type:"boolean" required:"true"` + + // A list representing an Amazon Cognito Identity User Pool and its client ID. + CognitoIdentityProviders []*Provider `type:"list"` + + // The "domain" by which Cognito will refer to your users. + DeveloperProviderName *string `min:"1" type:"string"` + + // An identity pool ID in the format REGION:GUID. + IdentityPoolId *string `min:"1" type:"string" required:"true"` + + // A string that you provide. + IdentityPoolName *string `min:"1" type:"string" required:"true"` + + // A list of OpendID Connect provider ARNs. + OpenIdConnectProviderARNs []*string `type:"list"` + + // An array of Amazon Resource Names (ARNs) of the SAML provider for your identity + // pool. + SamlProviderARNs []*string `type:"list"` + + // Optional key:value pairs mapping provider names to provider app IDs. + SupportedLoginProviders map[string]*string `type:"map"` +} + +// String returns the string representation +func (s IdentityPool) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s IdentityPool) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *IdentityPool) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "IdentityPool"} + if s.AllowUnauthenticatedIdentities == nil { + invalidParams.Add(request.NewErrParamRequired("AllowUnauthenticatedIdentities")) + } + if s.DeveloperProviderName != nil && len(*s.DeveloperProviderName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DeveloperProviderName", 1)) + } + if s.IdentityPoolId == nil { + invalidParams.Add(request.NewErrParamRequired("IdentityPoolId")) + } + if s.IdentityPoolId != nil && len(*s.IdentityPoolId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("IdentityPoolId", 1)) + } + if s.IdentityPoolName == nil { + invalidParams.Add(request.NewErrParamRequired("IdentityPoolName")) + } + if s.IdentityPoolName != nil && len(*s.IdentityPoolName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("IdentityPoolName", 1)) + } + if s.CognitoIdentityProviders != nil { + for i, v := range s.CognitoIdentityProviders { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "CognitoIdentityProviders", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// A description of the identity pool. +type IdentityPoolShortDescription struct { + _ struct{} `type:"structure"` + + // An identity pool ID in the format REGION:GUID. + IdentityPoolId *string `min:"1" type:"string"` + + // A string that you provide. + IdentityPoolName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s IdentityPoolShortDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s IdentityPoolShortDescription) GoString() string { + return s.String() +} + +// Input to the ListIdentities action. +type ListIdentitiesInput struct { + _ struct{} `type:"structure"` + + // An optional boolean parameter that allows you to hide disabled identities. + // If omitted, the ListIdentities API will include disabled identities in the + // response. + HideDisabled *bool `type:"boolean"` + + // An identity pool ID in the format REGION:GUID. + IdentityPoolId *string `min:"1" type:"string" required:"true"` + + // The maximum number of identities to return. + MaxResults *int64 `min:"1" type:"integer" required:"true"` + + // A pagination token. + NextToken *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListIdentitiesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListIdentitiesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListIdentitiesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListIdentitiesInput"} + if s.IdentityPoolId == nil { + invalidParams.Add(request.NewErrParamRequired("IdentityPoolId")) + } + if s.IdentityPoolId != nil && len(*s.IdentityPoolId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("IdentityPoolId", 1)) + } + if s.MaxResults == nil { + invalidParams.Add(request.NewErrParamRequired("MaxResults")) + } + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The response to a ListIdentities request. +type ListIdentitiesOutput struct { + _ struct{} `type:"structure"` + + // An object containing a set of identities and associated mappings. + Identities []*IdentityDescription `type:"list"` + + // An identity pool ID in the format REGION:GUID. + IdentityPoolId *string `min:"1" type:"string"` + + // A pagination token. + NextToken *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListIdentitiesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListIdentitiesOutput) GoString() string { + return s.String() +} + +// Input to the ListIdentityPools action. +type ListIdentityPoolsInput struct { + _ struct{} `type:"structure"` + + // The maximum number of identities to return. + MaxResults *int64 `min:"1" type:"integer" required:"true"` + + // A pagination token. + NextToken *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListIdentityPoolsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListIdentityPoolsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListIdentityPoolsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListIdentityPoolsInput"} + if s.MaxResults == nil { + invalidParams.Add(request.NewErrParamRequired("MaxResults")) + } + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The result of a successful ListIdentityPools action. +type ListIdentityPoolsOutput struct { + _ struct{} `type:"structure"` + + // The identity pools returned by the ListIdentityPools action. + IdentityPools []*IdentityPoolShortDescription `type:"list"` + + // A pagination token. + NextToken *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListIdentityPoolsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListIdentityPoolsOutput) GoString() string { + return s.String() +} + +// Input to the LookupDeveloperIdentityInput action. +type LookupDeveloperIdentityInput struct { + _ struct{} `type:"structure"` + + // A unique ID used by your backend authentication process to identify a user. + // Typically, a developer identity provider would issue many developer user + // identifiers, in keeping with the number of users. + DeveloperUserIdentifier *string `min:"1" type:"string"` + + // A unique identifier in the format REGION:GUID. + IdentityId *string `min:"1" type:"string"` + + // An identity pool ID in the format REGION:GUID. + IdentityPoolId *string `min:"1" type:"string" required:"true"` + + // The maximum number of identities to return. + MaxResults *int64 `min:"1" type:"integer"` + + // A pagination token. The first call you make will have NextToken set to null. + // After that the service will return NextToken values as needed. For example, + // let's say you make a request with MaxResults set to 10, and there are 20 + // matches in the database. The service will return a pagination token as a + // part of the response. This token can be used to call the API again and get + // results starting from the 11th match. + NextToken *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s LookupDeveloperIdentityInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LookupDeveloperIdentityInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *LookupDeveloperIdentityInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "LookupDeveloperIdentityInput"} + if s.DeveloperUserIdentifier != nil && len(*s.DeveloperUserIdentifier) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DeveloperUserIdentifier", 1)) + } + if s.IdentityId != nil && len(*s.IdentityId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("IdentityId", 1)) + } + if s.IdentityPoolId == nil { + invalidParams.Add(request.NewErrParamRequired("IdentityPoolId")) + } + if s.IdentityPoolId != nil && len(*s.IdentityPoolId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("IdentityPoolId", 1)) + } + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Returned in response to a successful LookupDeveloperIdentity action. +type LookupDeveloperIdentityOutput struct { + _ struct{} `type:"structure"` + + // This is the list of developer user identifiers associated with an identity + // ID. Cognito supports the association of multiple developer user identifiers + // with an identity ID. + DeveloperUserIdentifierList []*string `type:"list"` + + // A unique identifier in the format REGION:GUID. + IdentityId *string `min:"1" type:"string"` + + // A pagination token. The first call you make will have NextToken set to null. + // After that the service will return NextToken values as needed. For example, + // let's say you make a request with MaxResults set to 10, and there are 20 + // matches in the database. The service will return a pagination token as a + // part of the response. This token can be used to call the API again and get + // results starting from the 11th match. + NextToken *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s LookupDeveloperIdentityOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LookupDeveloperIdentityOutput) GoString() string { + return s.String() +} + +// Input to the MergeDeveloperIdentities action. +type MergeDeveloperIdentitiesInput struct { + _ struct{} `type:"structure"` + + // User identifier for the destination user. The value should be a DeveloperUserIdentifier. + DestinationUserIdentifier *string `min:"1" type:"string" required:"true"` + + // The "domain" by which Cognito will refer to your users. This is a (pseudo) + // domain name that you provide while creating an identity pool. This name acts + // as a placeholder that allows your backend and the Cognito service to communicate + // about the developer provider. For the DeveloperProviderName, you can use + // letters as well as period (.), underscore (_), and dash (-). + DeveloperProviderName *string `min:"1" type:"string" required:"true"` + + // An identity pool ID in the format REGION:GUID. + IdentityPoolId *string `min:"1" type:"string" required:"true"` + + // User identifier for the source user. The value should be a DeveloperUserIdentifier. + SourceUserIdentifier *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s MergeDeveloperIdentitiesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MergeDeveloperIdentitiesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *MergeDeveloperIdentitiesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "MergeDeveloperIdentitiesInput"} + if s.DestinationUserIdentifier == nil { + invalidParams.Add(request.NewErrParamRequired("DestinationUserIdentifier")) + } + if s.DestinationUserIdentifier != nil && len(*s.DestinationUserIdentifier) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DestinationUserIdentifier", 1)) + } + if s.DeveloperProviderName == nil { + invalidParams.Add(request.NewErrParamRequired("DeveloperProviderName")) + } + if s.DeveloperProviderName != nil && len(*s.DeveloperProviderName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DeveloperProviderName", 1)) + } + if s.IdentityPoolId == nil { + invalidParams.Add(request.NewErrParamRequired("IdentityPoolId")) + } + if s.IdentityPoolId != nil && len(*s.IdentityPoolId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("IdentityPoolId", 1)) + } + if s.SourceUserIdentifier == nil { + invalidParams.Add(request.NewErrParamRequired("SourceUserIdentifier")) + } + if s.SourceUserIdentifier != nil && len(*s.SourceUserIdentifier) < 1 { + invalidParams.Add(request.NewErrParamMinLen("SourceUserIdentifier", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Returned in response to a successful MergeDeveloperIdentities action. +type MergeDeveloperIdentitiesOutput struct { + _ struct{} `type:"structure"` + + // A unique identifier in the format REGION:GUID. + IdentityId *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s MergeDeveloperIdentitiesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MergeDeveloperIdentitiesOutput) GoString() string { + return s.String() +} + +// A provider representing an Amazon Cognito Identity User Pool and its client +// ID. +type Provider struct { + _ struct{} `type:"structure"` + + // The client ID for the Amazon Cognito Identity User Pool. + ClientId *string `min:"1" type:"string"` + + // The provider name for an Amazon Cognito Identity User Pool. For example, + // cognito-idp.us-east-1.amazonaws.com/us-east-1_123456789. + ProviderName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s Provider) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Provider) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Provider) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Provider"} + if s.ClientId != nil && len(*s.ClientId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ClientId", 1)) + } + if s.ProviderName != nil && len(*s.ProviderName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ProviderName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Input to the SetIdentityPoolRoles action. +type SetIdentityPoolRolesInput struct { + _ struct{} `type:"structure"` + + // An identity pool ID in the format REGION:GUID. + IdentityPoolId *string `min:"1" type:"string" required:"true"` + + // The map of roles associated with this pool. For a given role, the key will + // be either "authenticated" or "unauthenticated" and the value will be the + // Role ARN. + Roles map[string]*string `type:"map" required:"true"` +} + +// String returns the string representation +func (s SetIdentityPoolRolesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetIdentityPoolRolesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SetIdentityPoolRolesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SetIdentityPoolRolesInput"} + if s.IdentityPoolId == nil { + invalidParams.Add(request.NewErrParamRequired("IdentityPoolId")) + } + if s.IdentityPoolId != nil && len(*s.IdentityPoolId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("IdentityPoolId", 1)) + } + if s.Roles == nil { + invalidParams.Add(request.NewErrParamRequired("Roles")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type SetIdentityPoolRolesOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s SetIdentityPoolRolesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetIdentityPoolRolesOutput) GoString() string { + return s.String() +} + +// Input to the UnlinkDeveloperIdentity action. +type UnlinkDeveloperIdentityInput struct { + _ struct{} `type:"structure"` + + // The "domain" by which Cognito will refer to your users. + DeveloperProviderName *string `min:"1" type:"string" required:"true"` + + // A unique ID used by your backend authentication process to identify a user. + DeveloperUserIdentifier *string `min:"1" type:"string" required:"true"` + + // A unique identifier in the format REGION:GUID. + IdentityId *string `min:"1" type:"string" required:"true"` + + // An identity pool ID in the format REGION:GUID. + IdentityPoolId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s UnlinkDeveloperIdentityInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UnlinkDeveloperIdentityInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UnlinkDeveloperIdentityInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UnlinkDeveloperIdentityInput"} + if s.DeveloperProviderName == nil { + invalidParams.Add(request.NewErrParamRequired("DeveloperProviderName")) + } + if s.DeveloperProviderName != nil && len(*s.DeveloperProviderName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DeveloperProviderName", 1)) + } + if s.DeveloperUserIdentifier == nil { + invalidParams.Add(request.NewErrParamRequired("DeveloperUserIdentifier")) + } + if s.DeveloperUserIdentifier != nil && len(*s.DeveloperUserIdentifier) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DeveloperUserIdentifier", 1)) + } + if s.IdentityId == nil { + invalidParams.Add(request.NewErrParamRequired("IdentityId")) + } + if s.IdentityId != nil && len(*s.IdentityId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("IdentityId", 1)) + } + if s.IdentityPoolId == nil { + invalidParams.Add(request.NewErrParamRequired("IdentityPoolId")) + } + if s.IdentityPoolId != nil && len(*s.IdentityPoolId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("IdentityPoolId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type UnlinkDeveloperIdentityOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UnlinkDeveloperIdentityOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UnlinkDeveloperIdentityOutput) GoString() string { + return s.String() +} + +// Input to the UnlinkIdentity action. +type UnlinkIdentityInput struct { + _ struct{} `type:"structure"` + + // A unique identifier in the format REGION:GUID. + IdentityId *string `min:"1" type:"string" required:"true"` + + // A set of optional name-value pairs that map provider names to provider tokens. + Logins map[string]*string `type:"map" required:"true"` + + // Provider names to unlink from this identity. + LoginsToRemove []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s UnlinkIdentityInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UnlinkIdentityInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UnlinkIdentityInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UnlinkIdentityInput"} + if s.IdentityId == nil { + invalidParams.Add(request.NewErrParamRequired("IdentityId")) + } + if s.IdentityId != nil && len(*s.IdentityId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("IdentityId", 1)) + } + if s.Logins == nil { + invalidParams.Add(request.NewErrParamRequired("Logins")) + } + if s.LoginsToRemove == nil { + invalidParams.Add(request.NewErrParamRequired("LoginsToRemove")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type UnlinkIdentityOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UnlinkIdentityOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UnlinkIdentityOutput) GoString() string { + return s.String() +} + +// An array of UnprocessedIdentityId objects, each of which contains an ErrorCode +// and IdentityId. +type UnprocessedIdentityId struct { + _ struct{} `type:"structure"` + + // The error code indicating the type of error that occurred. + ErrorCode *string `type:"string" enum:"ErrorCode"` + + // A unique identifier in the format REGION:GUID. + IdentityId *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s UnprocessedIdentityId) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UnprocessedIdentityId) GoString() string { + return s.String() +} + +const ( + // @enum ErrorCode + ErrorCodeAccessDenied = "AccessDenied" + // @enum ErrorCode + ErrorCodeInternalServerError = "InternalServerError" +) diff --git a/vendor/github.com/aws/aws-sdk-go/service/cognitoidentity/cognitoidentityiface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/cognitoidentity/cognitoidentityiface/interface.go new file mode 100644 index 000000000..66dad2e1d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/cognitoidentity/cognitoidentityiface/interface.go @@ -0,0 +1,86 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package cognitoidentityiface provides an interface for the Amazon Cognito Identity. +package cognitoidentityiface + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/cognitoidentity" +) + +// CognitoIdentityAPI is the interface type for cognitoidentity.CognitoIdentity. +type CognitoIdentityAPI interface { + CreateIdentityPoolRequest(*cognitoidentity.CreateIdentityPoolInput) (*request.Request, *cognitoidentity.IdentityPool) + + CreateIdentityPool(*cognitoidentity.CreateIdentityPoolInput) (*cognitoidentity.IdentityPool, error) + + DeleteIdentitiesRequest(*cognitoidentity.DeleteIdentitiesInput) (*request.Request, *cognitoidentity.DeleteIdentitiesOutput) + + DeleteIdentities(*cognitoidentity.DeleteIdentitiesInput) (*cognitoidentity.DeleteIdentitiesOutput, error) + + DeleteIdentityPoolRequest(*cognitoidentity.DeleteIdentityPoolInput) (*request.Request, *cognitoidentity.DeleteIdentityPoolOutput) + + DeleteIdentityPool(*cognitoidentity.DeleteIdentityPoolInput) (*cognitoidentity.DeleteIdentityPoolOutput, error) + + DescribeIdentityRequest(*cognitoidentity.DescribeIdentityInput) (*request.Request, *cognitoidentity.IdentityDescription) + + DescribeIdentity(*cognitoidentity.DescribeIdentityInput) (*cognitoidentity.IdentityDescription, error) + + DescribeIdentityPoolRequest(*cognitoidentity.DescribeIdentityPoolInput) (*request.Request, *cognitoidentity.IdentityPool) + + DescribeIdentityPool(*cognitoidentity.DescribeIdentityPoolInput) (*cognitoidentity.IdentityPool, error) + + GetCredentialsForIdentityRequest(*cognitoidentity.GetCredentialsForIdentityInput) (*request.Request, *cognitoidentity.GetCredentialsForIdentityOutput) + + GetCredentialsForIdentity(*cognitoidentity.GetCredentialsForIdentityInput) (*cognitoidentity.GetCredentialsForIdentityOutput, error) + + GetIdRequest(*cognitoidentity.GetIdInput) (*request.Request, *cognitoidentity.GetIdOutput) + + GetId(*cognitoidentity.GetIdInput) (*cognitoidentity.GetIdOutput, error) + + GetIdentityPoolRolesRequest(*cognitoidentity.GetIdentityPoolRolesInput) (*request.Request, *cognitoidentity.GetIdentityPoolRolesOutput) + + GetIdentityPoolRoles(*cognitoidentity.GetIdentityPoolRolesInput) (*cognitoidentity.GetIdentityPoolRolesOutput, error) + + GetOpenIdTokenRequest(*cognitoidentity.GetOpenIdTokenInput) (*request.Request, *cognitoidentity.GetOpenIdTokenOutput) + + GetOpenIdToken(*cognitoidentity.GetOpenIdTokenInput) (*cognitoidentity.GetOpenIdTokenOutput, error) + + GetOpenIdTokenForDeveloperIdentityRequest(*cognitoidentity.GetOpenIdTokenForDeveloperIdentityInput) (*request.Request, *cognitoidentity.GetOpenIdTokenForDeveloperIdentityOutput) + + GetOpenIdTokenForDeveloperIdentity(*cognitoidentity.GetOpenIdTokenForDeveloperIdentityInput) (*cognitoidentity.GetOpenIdTokenForDeveloperIdentityOutput, error) + + ListIdentitiesRequest(*cognitoidentity.ListIdentitiesInput) (*request.Request, *cognitoidentity.ListIdentitiesOutput) + + ListIdentities(*cognitoidentity.ListIdentitiesInput) (*cognitoidentity.ListIdentitiesOutput, error) + + ListIdentityPoolsRequest(*cognitoidentity.ListIdentityPoolsInput) (*request.Request, *cognitoidentity.ListIdentityPoolsOutput) + + ListIdentityPools(*cognitoidentity.ListIdentityPoolsInput) (*cognitoidentity.ListIdentityPoolsOutput, error) + + LookupDeveloperIdentityRequest(*cognitoidentity.LookupDeveloperIdentityInput) (*request.Request, *cognitoidentity.LookupDeveloperIdentityOutput) + + LookupDeveloperIdentity(*cognitoidentity.LookupDeveloperIdentityInput) (*cognitoidentity.LookupDeveloperIdentityOutput, error) + + MergeDeveloperIdentitiesRequest(*cognitoidentity.MergeDeveloperIdentitiesInput) (*request.Request, *cognitoidentity.MergeDeveloperIdentitiesOutput) + + MergeDeveloperIdentities(*cognitoidentity.MergeDeveloperIdentitiesInput) (*cognitoidentity.MergeDeveloperIdentitiesOutput, error) + + SetIdentityPoolRolesRequest(*cognitoidentity.SetIdentityPoolRolesInput) (*request.Request, *cognitoidentity.SetIdentityPoolRolesOutput) + + SetIdentityPoolRoles(*cognitoidentity.SetIdentityPoolRolesInput) (*cognitoidentity.SetIdentityPoolRolesOutput, error) + + UnlinkDeveloperIdentityRequest(*cognitoidentity.UnlinkDeveloperIdentityInput) (*request.Request, *cognitoidentity.UnlinkDeveloperIdentityOutput) + + UnlinkDeveloperIdentity(*cognitoidentity.UnlinkDeveloperIdentityInput) (*cognitoidentity.UnlinkDeveloperIdentityOutput, error) + + UnlinkIdentityRequest(*cognitoidentity.UnlinkIdentityInput) (*request.Request, *cognitoidentity.UnlinkIdentityOutput) + + UnlinkIdentity(*cognitoidentity.UnlinkIdentityInput) (*cognitoidentity.UnlinkIdentityOutput, error) + + UpdateIdentityPoolRequest(*cognitoidentity.IdentityPool) (*request.Request, *cognitoidentity.IdentityPool) + + UpdateIdentityPool(*cognitoidentity.IdentityPool) (*cognitoidentity.IdentityPool, error) +} + +var _ CognitoIdentityAPI = (*cognitoidentity.CognitoIdentity)(nil) diff --git a/vendor/github.com/aws/aws-sdk-go/service/cognitoidentity/customizations.go b/vendor/github.com/aws/aws-sdk-go/service/cognitoidentity/customizations.go new file mode 100644 index 000000000..4bf243c35 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/cognitoidentity/customizations.go @@ -0,0 +1,12 @@ +package cognitoidentity + +import "github.com/aws/aws-sdk-go/aws/request" + +func init() { + initRequest = func(r *request.Request) { + switch r.Operation.Name { + case opGetOpenIdToken, opGetId, opGetCredentialsForIdentity: + r.Handlers.Sign.Clear() // these operations are unsigned + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/cognitoidentity/customizations_test.go b/vendor/github.com/aws/aws-sdk-go/service/cognitoidentity/customizations_test.go new file mode 100644 index 000000000..dea3026e9 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/cognitoidentity/customizations_test.go @@ -0,0 +1,42 @@ +package cognitoidentity_test + +import ( + "testing" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/awstesting/unit" + "github.com/aws/aws-sdk-go/service/cognitoidentity" + "github.com/stretchr/testify/assert" +) + +var svc = cognitoidentity.New(unit.Session) + +func TestUnsignedRequest_GetID(t *testing.T) { + req, _ := svc.GetIdRequest(&cognitoidentity.GetIdInput{ + IdentityPoolId: aws.String("IdentityPoolId"), + }) + + err := req.Sign() + assert.NoError(t, err) + assert.Equal(t, "", req.HTTPRequest.Header.Get("Authorization")) +} + +func TestUnsignedRequest_GetOpenIDToken(t *testing.T) { + req, _ := svc.GetOpenIdTokenRequest(&cognitoidentity.GetOpenIdTokenInput{ + IdentityId: aws.String("IdentityId"), + }) + + err := req.Sign() + assert.NoError(t, err) + assert.Equal(t, "", req.HTTPRequest.Header.Get("Authorization")) +} + +func TestUnsignedRequest_GetCredentialsForIdentity(t *testing.T) { + req, _ := svc.GetCredentialsForIdentityRequest(&cognitoidentity.GetCredentialsForIdentityInput{ + IdentityId: aws.String("IdentityId"), + }) + + err := req.Sign() + assert.NoError(t, err) + assert.Equal(t, "", req.HTTPRequest.Header.Get("Authorization")) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/cognitoidentity/examples_test.go b/vendor/github.com/aws/aws-sdk-go/service/cognitoidentity/examples_test.go new file mode 100644 index 000000000..9389663b3 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/cognitoidentity/examples_test.go @@ -0,0 +1,450 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package cognitoidentity_test + +import ( + "bytes" + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/cognitoidentity" +) + +var _ time.Duration +var _ bytes.Buffer + +func ExampleCognitoIdentity_CreateIdentityPool() { + svc := cognitoidentity.New(session.New()) + + params := &cognitoidentity.CreateIdentityPoolInput{ + AllowUnauthenticatedIdentities: aws.Bool(true), // Required + IdentityPoolName: aws.String("IdentityPoolName"), // Required + CognitoIdentityProviders: []*cognitoidentity.Provider{ + { // Required + ClientId: aws.String("ProviderClientId"), + ProviderName: aws.String("ProviderName"), + }, + // More values... + }, + DeveloperProviderName: aws.String("DeveloperProviderName"), + OpenIdConnectProviderARNs: []*string{ + aws.String("ARNString"), // Required + // More values... + }, + SamlProviderARNs: []*string{ + aws.String("ARNString"), // Required + // More values... + }, + SupportedLoginProviders: map[string]*string{ + "Key": aws.String("IdentityProviderId"), // Required + // More values... + }, + } + resp, err := svc.CreateIdentityPool(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCognitoIdentity_DeleteIdentities() { + svc := cognitoidentity.New(session.New()) + + params := &cognitoidentity.DeleteIdentitiesInput{ + IdentityIdsToDelete: []*string{ // Required + aws.String("IdentityId"), // Required + // More values... + }, + } + resp, err := svc.DeleteIdentities(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCognitoIdentity_DeleteIdentityPool() { + svc := cognitoidentity.New(session.New()) + + params := &cognitoidentity.DeleteIdentityPoolInput{ + IdentityPoolId: aws.String("IdentityPoolId"), // Required + } + resp, err := svc.DeleteIdentityPool(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCognitoIdentity_DescribeIdentity() { + svc := cognitoidentity.New(session.New()) + + params := &cognitoidentity.DescribeIdentityInput{ + IdentityId: aws.String("IdentityId"), // Required + } + resp, err := svc.DescribeIdentity(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCognitoIdentity_DescribeIdentityPool() { + svc := cognitoidentity.New(session.New()) + + params := &cognitoidentity.DescribeIdentityPoolInput{ + IdentityPoolId: aws.String("IdentityPoolId"), // Required + } + resp, err := svc.DescribeIdentityPool(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCognitoIdentity_GetCredentialsForIdentity() { + svc := cognitoidentity.New(session.New()) + + params := &cognitoidentity.GetCredentialsForIdentityInput{ + IdentityId: aws.String("IdentityId"), // Required + CustomRoleArn: aws.String("ARNString"), + Logins: map[string]*string{ + "Key": aws.String("IdentityProviderToken"), // Required + // More values... + }, + } + resp, err := svc.GetCredentialsForIdentity(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCognitoIdentity_GetId() { + svc := cognitoidentity.New(session.New()) + + params := &cognitoidentity.GetIdInput{ + IdentityPoolId: aws.String("IdentityPoolId"), // Required + AccountId: aws.String("AccountId"), + Logins: map[string]*string{ + "Key": aws.String("IdentityProviderToken"), // Required + // More values... + }, + } + resp, err := svc.GetId(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCognitoIdentity_GetIdentityPoolRoles() { + svc := cognitoidentity.New(session.New()) + + params := &cognitoidentity.GetIdentityPoolRolesInput{ + IdentityPoolId: aws.String("IdentityPoolId"), // Required + } + resp, err := svc.GetIdentityPoolRoles(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCognitoIdentity_GetOpenIdToken() { + svc := cognitoidentity.New(session.New()) + + params := &cognitoidentity.GetOpenIdTokenInput{ + IdentityId: aws.String("IdentityId"), // Required + Logins: map[string]*string{ + "Key": aws.String("IdentityProviderToken"), // Required + // More values... + }, + } + resp, err := svc.GetOpenIdToken(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCognitoIdentity_GetOpenIdTokenForDeveloperIdentity() { + svc := cognitoidentity.New(session.New()) + + params := &cognitoidentity.GetOpenIdTokenForDeveloperIdentityInput{ + IdentityPoolId: aws.String("IdentityPoolId"), // Required + Logins: map[string]*string{ // Required + "Key": aws.String("IdentityProviderToken"), // Required + // More values... + }, + IdentityId: aws.String("IdentityId"), + TokenDuration: aws.Int64(1), + } + resp, err := svc.GetOpenIdTokenForDeveloperIdentity(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCognitoIdentity_ListIdentities() { + svc := cognitoidentity.New(session.New()) + + params := &cognitoidentity.ListIdentitiesInput{ + IdentityPoolId: aws.String("IdentityPoolId"), // Required + MaxResults: aws.Int64(1), // Required + HideDisabled: aws.Bool(true), + NextToken: aws.String("PaginationKey"), + } + resp, err := svc.ListIdentities(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCognitoIdentity_ListIdentityPools() { + svc := cognitoidentity.New(session.New()) + + params := &cognitoidentity.ListIdentityPoolsInput{ + MaxResults: aws.Int64(1), // Required + NextToken: aws.String("PaginationKey"), + } + resp, err := svc.ListIdentityPools(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCognitoIdentity_LookupDeveloperIdentity() { + svc := cognitoidentity.New(session.New()) + + params := &cognitoidentity.LookupDeveloperIdentityInput{ + IdentityPoolId: aws.String("IdentityPoolId"), // Required + DeveloperUserIdentifier: aws.String("DeveloperUserIdentifier"), + IdentityId: aws.String("IdentityId"), + MaxResults: aws.Int64(1), + NextToken: aws.String("PaginationKey"), + } + resp, err := svc.LookupDeveloperIdentity(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCognitoIdentity_MergeDeveloperIdentities() { + svc := cognitoidentity.New(session.New()) + + params := &cognitoidentity.MergeDeveloperIdentitiesInput{ + DestinationUserIdentifier: aws.String("DeveloperUserIdentifier"), // Required + DeveloperProviderName: aws.String("DeveloperProviderName"), // Required + IdentityPoolId: aws.String("IdentityPoolId"), // Required + SourceUserIdentifier: aws.String("DeveloperUserIdentifier"), // Required + } + resp, err := svc.MergeDeveloperIdentities(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCognitoIdentity_SetIdentityPoolRoles() { + svc := cognitoidentity.New(session.New()) + + params := &cognitoidentity.SetIdentityPoolRolesInput{ + IdentityPoolId: aws.String("IdentityPoolId"), // Required + Roles: map[string]*string{ // Required + "Key": aws.String("ARNString"), // Required + // More values... + }, + } + resp, err := svc.SetIdentityPoolRoles(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCognitoIdentity_UnlinkDeveloperIdentity() { + svc := cognitoidentity.New(session.New()) + + params := &cognitoidentity.UnlinkDeveloperIdentityInput{ + DeveloperProviderName: aws.String("DeveloperProviderName"), // Required + DeveloperUserIdentifier: aws.String("DeveloperUserIdentifier"), // Required + IdentityId: aws.String("IdentityId"), // Required + IdentityPoolId: aws.String("IdentityPoolId"), // Required + } + resp, err := svc.UnlinkDeveloperIdentity(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCognitoIdentity_UnlinkIdentity() { + svc := cognitoidentity.New(session.New()) + + params := &cognitoidentity.UnlinkIdentityInput{ + IdentityId: aws.String("IdentityId"), // Required + Logins: map[string]*string{ // Required + "Key": aws.String("IdentityProviderToken"), // Required + // More values... + }, + LoginsToRemove: []*string{ // Required + aws.String("IdentityProviderName"), // Required + // More values... + }, + } + resp, err := svc.UnlinkIdentity(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCognitoIdentity_UpdateIdentityPool() { + svc := cognitoidentity.New(session.New()) + + params := &cognitoidentity.IdentityPool{ + AllowUnauthenticatedIdentities: aws.Bool(true), // Required + IdentityPoolId: aws.String("IdentityPoolId"), // Required + IdentityPoolName: aws.String("IdentityPoolName"), // Required + CognitoIdentityProviders: []*cognitoidentity.Provider{ + { // Required + ClientId: aws.String("ProviderClientId"), + ProviderName: aws.String("ProviderName"), + }, + // More values... + }, + DeveloperProviderName: aws.String("DeveloperProviderName"), + OpenIdConnectProviderARNs: []*string{ + aws.String("ARNString"), // Required + // More values... + }, + SamlProviderARNs: []*string{ + aws.String("ARNString"), // Required + // More values... + }, + SupportedLoginProviders: map[string]*string{ + "Key": aws.String("IdentityProviderId"), // Required + // More values... + }, + } + resp, err := svc.UpdateIdentityPool(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/cognitoidentity/service.go b/vendor/github.com/aws/aws-sdk-go/service/cognitoidentity/service.go new file mode 100644 index 000000000..f240b7e41 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/cognitoidentity/service.go @@ -0,0 +1,119 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package cognitoidentity + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" +) + +// Amazon Cognito is a web service that delivers scoped temporary credentials +// to mobile devices and other untrusted environments. Amazon Cognito uniquely +// identifies a device and supplies the user with a consistent identity over +// the lifetime of an application. +// +// Using Amazon Cognito, you can enable authentication with one or more third-party +// identity providers (Facebook, Google, or Login with Amazon), and you can +// also choose to support unauthenticated access from your app. Cognito delivers +// a unique identifier for each user and acts as an OpenID token provider trusted +// by AWS Security Token Service (STS) to access temporary, limited-privilege +// AWS credentials. +// +// To provide end-user credentials, first make an unsigned call to GetId. If +// the end user is authenticated with one of the supported identity providers, +// set the Logins map with the identity provider token. GetId returns a unique +// identifier for the user. +// +// Next, make an unsigned call to GetCredentialsForIdentity. This call expects +// the same Logins map as the GetId call, as well as the IdentityID originally +// returned by GetId. Assuming your identity pool has been configured via the +// SetIdentityPoolRoles operation, GetCredentialsForIdentity will return AWS +// credentials for your use. If your pool has not been configured with SetIdentityPoolRoles, +// or if you want to follow legacy flow, make an unsigned call to GetOpenIdToken, +// which returns the OpenID token necessary to call STS and retrieve AWS credentials. +// This call expects the same Logins map as the GetId call, as well as the IdentityID +// originally returned by GetId. The token returned by GetOpenIdToken can be +// passed to the STS operation AssumeRoleWithWebIdentity (http://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRoleWithWebIdentity.html) +// to retrieve AWS credentials. +// +// If you want to use Amazon Cognito in an Android, iOS, or Unity application, +// you will probably want to make API calls via the AWS Mobile SDK. To learn +// more, see the AWS Mobile SDK Developer Guide (http://docs.aws.amazon.com/mobile/index.html). +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type CognitoIdentity struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "cognito-identity" + +// New creates a new instance of the CognitoIdentity client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a CognitoIdentity client from just a session. +// svc := cognitoidentity.New(mySession) +// +// // Create a CognitoIdentity client with additional configuration +// svc := cognitoidentity.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *CognitoIdentity { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *CognitoIdentity { + svc := &CognitoIdentity{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-06-30", + JSONVersion: "1.1", + TargetPrefix: "AWSCognitoIdentityService", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(jsonrpc.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a CognitoIdentity operation and runs any +// custom request initialization. +func (c *CognitoIdentity) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/cognitoidentityprovider/api.go b/vendor/github.com/aws/aws-sdk-go/service/cognitoidentityprovider/api.go new file mode 100644 index 000000000..dc8f9d541 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/cognitoidentityprovider/api.go @@ -0,0 +1,4546 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package cognitoidentityprovider provides a client for Amazon Cognito Identity Provider. +package cognitoidentityprovider + +import ( + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" +) + +const opAddCustomAttributes = "AddCustomAttributes" + +// AddCustomAttributesRequest generates a "aws/request.Request" representing the +// client's request for the AddCustomAttributes operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the AddCustomAttributes method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the AddCustomAttributesRequest method. +// req, resp := client.AddCustomAttributesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CognitoIdentityProvider) AddCustomAttributesRequest(input *AddCustomAttributesInput) (req *request.Request, output *AddCustomAttributesOutput) { + op := &request.Operation{ + Name: opAddCustomAttributes, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AddCustomAttributesInput{} + } + + req = c.newRequest(op, input, output) + output = &AddCustomAttributesOutput{} + req.Data = output + return +} + +// Adds additional user attributes to the user pool schema. +func (c *CognitoIdentityProvider) AddCustomAttributes(input *AddCustomAttributesInput) (*AddCustomAttributesOutput, error) { + req, out := c.AddCustomAttributesRequest(input) + err := req.Send() + return out, err +} + +const opAdminConfirmSignUp = "AdminConfirmSignUp" + +// AdminConfirmSignUpRequest generates a "aws/request.Request" representing the +// client's request for the AdminConfirmSignUp operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the AdminConfirmSignUp method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the AdminConfirmSignUpRequest method. +// req, resp := client.AdminConfirmSignUpRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CognitoIdentityProvider) AdminConfirmSignUpRequest(input *AdminConfirmSignUpInput) (req *request.Request, output *AdminConfirmSignUpOutput) { + op := &request.Operation{ + Name: opAdminConfirmSignUp, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AdminConfirmSignUpInput{} + } + + req = c.newRequest(op, input, output) + output = &AdminConfirmSignUpOutput{} + req.Data = output + return +} + +// Confirms user registration as an admin without using a confirmation code. +// Works on any user. +func (c *CognitoIdentityProvider) AdminConfirmSignUp(input *AdminConfirmSignUpInput) (*AdminConfirmSignUpOutput, error) { + req, out := c.AdminConfirmSignUpRequest(input) + err := req.Send() + return out, err +} + +const opAdminDeleteUser = "AdminDeleteUser" + +// AdminDeleteUserRequest generates a "aws/request.Request" representing the +// client's request for the AdminDeleteUser operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the AdminDeleteUser method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the AdminDeleteUserRequest method. +// req, resp := client.AdminDeleteUserRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CognitoIdentityProvider) AdminDeleteUserRequest(input *AdminDeleteUserInput) (req *request.Request, output *AdminDeleteUserOutput) { + op := &request.Operation{ + Name: opAdminDeleteUser, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AdminDeleteUserInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &AdminDeleteUserOutput{} + req.Data = output + return +} + +// Deletes a user as an administrator. Works on any user. +func (c *CognitoIdentityProvider) AdminDeleteUser(input *AdminDeleteUserInput) (*AdminDeleteUserOutput, error) { + req, out := c.AdminDeleteUserRequest(input) + err := req.Send() + return out, err +} + +const opAdminDeleteUserAttributes = "AdminDeleteUserAttributes" + +// AdminDeleteUserAttributesRequest generates a "aws/request.Request" representing the +// client's request for the AdminDeleteUserAttributes operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the AdminDeleteUserAttributes method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the AdminDeleteUserAttributesRequest method. +// req, resp := client.AdminDeleteUserAttributesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CognitoIdentityProvider) AdminDeleteUserAttributesRequest(input *AdminDeleteUserAttributesInput) (req *request.Request, output *AdminDeleteUserAttributesOutput) { + op := &request.Operation{ + Name: opAdminDeleteUserAttributes, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AdminDeleteUserAttributesInput{} + } + + req = c.newRequest(op, input, output) + output = &AdminDeleteUserAttributesOutput{} + req.Data = output + return +} + +// Deletes the user attributes in a user pool as an administrator. Works on +// any user. +func (c *CognitoIdentityProvider) AdminDeleteUserAttributes(input *AdminDeleteUserAttributesInput) (*AdminDeleteUserAttributesOutput, error) { + req, out := c.AdminDeleteUserAttributesRequest(input) + err := req.Send() + return out, err +} + +const opAdminDisableUser = "AdminDisableUser" + +// AdminDisableUserRequest generates a "aws/request.Request" representing the +// client's request for the AdminDisableUser operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the AdminDisableUser method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the AdminDisableUserRequest method. +// req, resp := client.AdminDisableUserRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CognitoIdentityProvider) AdminDisableUserRequest(input *AdminDisableUserInput) (req *request.Request, output *AdminDisableUserOutput) { + op := &request.Operation{ + Name: opAdminDisableUser, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AdminDisableUserInput{} + } + + req = c.newRequest(op, input, output) + output = &AdminDisableUserOutput{} + req.Data = output + return +} + +// Disables the specified user as an administrator. Works on any user. +func (c *CognitoIdentityProvider) AdminDisableUser(input *AdminDisableUserInput) (*AdminDisableUserOutput, error) { + req, out := c.AdminDisableUserRequest(input) + err := req.Send() + return out, err +} + +const opAdminEnableUser = "AdminEnableUser" + +// AdminEnableUserRequest generates a "aws/request.Request" representing the +// client's request for the AdminEnableUser operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the AdminEnableUser method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the AdminEnableUserRequest method. +// req, resp := client.AdminEnableUserRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CognitoIdentityProvider) AdminEnableUserRequest(input *AdminEnableUserInput) (req *request.Request, output *AdminEnableUserOutput) { + op := &request.Operation{ + Name: opAdminEnableUser, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AdminEnableUserInput{} + } + + req = c.newRequest(op, input, output) + output = &AdminEnableUserOutput{} + req.Data = output + return +} + +// Enables the specified user as an administrator. Works on any user. +func (c *CognitoIdentityProvider) AdminEnableUser(input *AdminEnableUserInput) (*AdminEnableUserOutput, error) { + req, out := c.AdminEnableUserRequest(input) + err := req.Send() + return out, err +} + +const opAdminGetUser = "AdminGetUser" + +// AdminGetUserRequest generates a "aws/request.Request" representing the +// client's request for the AdminGetUser operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the AdminGetUser method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the AdminGetUserRequest method. +// req, resp := client.AdminGetUserRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CognitoIdentityProvider) AdminGetUserRequest(input *AdminGetUserInput) (req *request.Request, output *AdminGetUserOutput) { + op := &request.Operation{ + Name: opAdminGetUser, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AdminGetUserInput{} + } + + req = c.newRequest(op, input, output) + output = &AdminGetUserOutput{} + req.Data = output + return +} + +// Gets the specified user by user name in a user pool as an administrator. +// Works on any user. +func (c *CognitoIdentityProvider) AdminGetUser(input *AdminGetUserInput) (*AdminGetUserOutput, error) { + req, out := c.AdminGetUserRequest(input) + err := req.Send() + return out, err +} + +const opAdminResetUserPassword = "AdminResetUserPassword" + +// AdminResetUserPasswordRequest generates a "aws/request.Request" representing the +// client's request for the AdminResetUserPassword operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the AdminResetUserPassword method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the AdminResetUserPasswordRequest method. +// req, resp := client.AdminResetUserPasswordRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CognitoIdentityProvider) AdminResetUserPasswordRequest(input *AdminResetUserPasswordInput) (req *request.Request, output *AdminResetUserPasswordOutput) { + op := &request.Operation{ + Name: opAdminResetUserPassword, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AdminResetUserPasswordInput{} + } + + req = c.newRequest(op, input, output) + output = &AdminResetUserPasswordOutput{} + req.Data = output + return +} + +// Resets the specified user's password in a user pool as an administrator. +// Works on any user. +func (c *CognitoIdentityProvider) AdminResetUserPassword(input *AdminResetUserPasswordInput) (*AdminResetUserPasswordOutput, error) { + req, out := c.AdminResetUserPasswordRequest(input) + err := req.Send() + return out, err +} + +const opAdminSetUserSettings = "AdminSetUserSettings" + +// AdminSetUserSettingsRequest generates a "aws/request.Request" representing the +// client's request for the AdminSetUserSettings operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the AdminSetUserSettings method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the AdminSetUserSettingsRequest method. +// req, resp := client.AdminSetUserSettingsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CognitoIdentityProvider) AdminSetUserSettingsRequest(input *AdminSetUserSettingsInput) (req *request.Request, output *AdminSetUserSettingsOutput) { + op := &request.Operation{ + Name: opAdminSetUserSettings, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AdminSetUserSettingsInput{} + } + + req = c.newRequest(op, input, output) + output = &AdminSetUserSettingsOutput{} + req.Data = output + return +} + +// Sets all the user settings for a specified user name. Works on any user. +func (c *CognitoIdentityProvider) AdminSetUserSettings(input *AdminSetUserSettingsInput) (*AdminSetUserSettingsOutput, error) { + req, out := c.AdminSetUserSettingsRequest(input) + err := req.Send() + return out, err +} + +const opAdminUpdateUserAttributes = "AdminUpdateUserAttributes" + +// AdminUpdateUserAttributesRequest generates a "aws/request.Request" representing the +// client's request for the AdminUpdateUserAttributes operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the AdminUpdateUserAttributes method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the AdminUpdateUserAttributesRequest method. +// req, resp := client.AdminUpdateUserAttributesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CognitoIdentityProvider) AdminUpdateUserAttributesRequest(input *AdminUpdateUserAttributesInput) (req *request.Request, output *AdminUpdateUserAttributesOutput) { + op := &request.Operation{ + Name: opAdminUpdateUserAttributes, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AdminUpdateUserAttributesInput{} + } + + req = c.newRequest(op, input, output) + output = &AdminUpdateUserAttributesOutput{} + req.Data = output + return +} + +// Updates the specified user's attributes, including developer attributes, +// as an administrator. Works on any user. +func (c *CognitoIdentityProvider) AdminUpdateUserAttributes(input *AdminUpdateUserAttributesInput) (*AdminUpdateUserAttributesOutput, error) { + req, out := c.AdminUpdateUserAttributesRequest(input) + err := req.Send() + return out, err +} + +const opChangePassword = "ChangePassword" + +// ChangePasswordRequest generates a "aws/request.Request" representing the +// client's request for the ChangePassword operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ChangePassword method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ChangePasswordRequest method. +// req, resp := client.ChangePasswordRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CognitoIdentityProvider) ChangePasswordRequest(input *ChangePasswordInput) (req *request.Request, output *ChangePasswordOutput) { + op := &request.Operation{ + Name: opChangePassword, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ChangePasswordInput{} + } + + req = c.newRequest(op, input, output) + req.Config.Credentials = credentials.AnonymousCredentials + output = &ChangePasswordOutput{} + req.Data = output + return +} + +// Changes the password for a specified user in a user pool. +func (c *CognitoIdentityProvider) ChangePassword(input *ChangePasswordInput) (*ChangePasswordOutput, error) { + req, out := c.ChangePasswordRequest(input) + err := req.Send() + return out, err +} + +const opConfirmForgotPassword = "ConfirmForgotPassword" + +// ConfirmForgotPasswordRequest generates a "aws/request.Request" representing the +// client's request for the ConfirmForgotPassword operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ConfirmForgotPassword method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ConfirmForgotPasswordRequest method. +// req, resp := client.ConfirmForgotPasswordRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CognitoIdentityProvider) ConfirmForgotPasswordRequest(input *ConfirmForgotPasswordInput) (req *request.Request, output *ConfirmForgotPasswordOutput) { + op := &request.Operation{ + Name: opConfirmForgotPassword, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ConfirmForgotPasswordInput{} + } + + req = c.newRequest(op, input, output) + req.Config.Credentials = credentials.AnonymousCredentials + output = &ConfirmForgotPasswordOutput{} + req.Data = output + return +} + +// Allows a user to enter a code provided when they reset their password to +// update their password. +func (c *CognitoIdentityProvider) ConfirmForgotPassword(input *ConfirmForgotPasswordInput) (*ConfirmForgotPasswordOutput, error) { + req, out := c.ConfirmForgotPasswordRequest(input) + err := req.Send() + return out, err +} + +const opConfirmSignUp = "ConfirmSignUp" + +// ConfirmSignUpRequest generates a "aws/request.Request" representing the +// client's request for the ConfirmSignUp operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ConfirmSignUp method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ConfirmSignUpRequest method. +// req, resp := client.ConfirmSignUpRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CognitoIdentityProvider) ConfirmSignUpRequest(input *ConfirmSignUpInput) (req *request.Request, output *ConfirmSignUpOutput) { + op := &request.Operation{ + Name: opConfirmSignUp, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ConfirmSignUpInput{} + } + + req = c.newRequest(op, input, output) + req.Config.Credentials = credentials.AnonymousCredentials + output = &ConfirmSignUpOutput{} + req.Data = output + return +} + +// Confirms registration of a user and handles the existing alias from a previous +// user. +func (c *CognitoIdentityProvider) ConfirmSignUp(input *ConfirmSignUpInput) (*ConfirmSignUpOutput, error) { + req, out := c.ConfirmSignUpRequest(input) + err := req.Send() + return out, err +} + +const opCreateUserPool = "CreateUserPool" + +// CreateUserPoolRequest generates a "aws/request.Request" representing the +// client's request for the CreateUserPool operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateUserPool method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateUserPoolRequest method. +// req, resp := client.CreateUserPoolRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CognitoIdentityProvider) CreateUserPoolRequest(input *CreateUserPoolInput) (req *request.Request, output *CreateUserPoolOutput) { + op := &request.Operation{ + Name: opCreateUserPool, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateUserPoolInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateUserPoolOutput{} + req.Data = output + return +} + +// Creates a new Amazon Cognito user pool and sets the password policy for the +// pool. +func (c *CognitoIdentityProvider) CreateUserPool(input *CreateUserPoolInput) (*CreateUserPoolOutput, error) { + req, out := c.CreateUserPoolRequest(input) + err := req.Send() + return out, err +} + +const opCreateUserPoolClient = "CreateUserPoolClient" + +// CreateUserPoolClientRequest generates a "aws/request.Request" representing the +// client's request for the CreateUserPoolClient operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateUserPoolClient method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateUserPoolClientRequest method. +// req, resp := client.CreateUserPoolClientRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CognitoIdentityProvider) CreateUserPoolClientRequest(input *CreateUserPoolClientInput) (req *request.Request, output *CreateUserPoolClientOutput) { + op := &request.Operation{ + Name: opCreateUserPoolClient, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateUserPoolClientInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateUserPoolClientOutput{} + req.Data = output + return +} + +// Creates the user pool client. +func (c *CognitoIdentityProvider) CreateUserPoolClient(input *CreateUserPoolClientInput) (*CreateUserPoolClientOutput, error) { + req, out := c.CreateUserPoolClientRequest(input) + err := req.Send() + return out, err +} + +const opDeleteUser = "DeleteUser" + +// DeleteUserRequest generates a "aws/request.Request" representing the +// client's request for the DeleteUser operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteUser method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteUserRequest method. +// req, resp := client.DeleteUserRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CognitoIdentityProvider) DeleteUserRequest(input *DeleteUserInput) (req *request.Request, output *DeleteUserOutput) { + op := &request.Operation{ + Name: opDeleteUser, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteUserInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + req.Config.Credentials = credentials.AnonymousCredentials + output = &DeleteUserOutput{} + req.Data = output + return +} + +// Allows a user to delete one's self. +func (c *CognitoIdentityProvider) DeleteUser(input *DeleteUserInput) (*DeleteUserOutput, error) { + req, out := c.DeleteUserRequest(input) + err := req.Send() + return out, err +} + +const opDeleteUserAttributes = "DeleteUserAttributes" + +// DeleteUserAttributesRequest generates a "aws/request.Request" representing the +// client's request for the DeleteUserAttributes operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteUserAttributes method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteUserAttributesRequest method. +// req, resp := client.DeleteUserAttributesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CognitoIdentityProvider) DeleteUserAttributesRequest(input *DeleteUserAttributesInput) (req *request.Request, output *DeleteUserAttributesOutput) { + op := &request.Operation{ + Name: opDeleteUserAttributes, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteUserAttributesInput{} + } + + req = c.newRequest(op, input, output) + req.Config.Credentials = credentials.AnonymousCredentials + output = &DeleteUserAttributesOutput{} + req.Data = output + return +} + +// Deletes the attributes for a user. +func (c *CognitoIdentityProvider) DeleteUserAttributes(input *DeleteUserAttributesInput) (*DeleteUserAttributesOutput, error) { + req, out := c.DeleteUserAttributesRequest(input) + err := req.Send() + return out, err +} + +const opDeleteUserPool = "DeleteUserPool" + +// DeleteUserPoolRequest generates a "aws/request.Request" representing the +// client's request for the DeleteUserPool operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteUserPool method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteUserPoolRequest method. +// req, resp := client.DeleteUserPoolRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CognitoIdentityProvider) DeleteUserPoolRequest(input *DeleteUserPoolInput) (req *request.Request, output *DeleteUserPoolOutput) { + op := &request.Operation{ + Name: opDeleteUserPool, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteUserPoolInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteUserPoolOutput{} + req.Data = output + return +} + +// Deletes the specified Amazon Cognito user pool. +func (c *CognitoIdentityProvider) DeleteUserPool(input *DeleteUserPoolInput) (*DeleteUserPoolOutput, error) { + req, out := c.DeleteUserPoolRequest(input) + err := req.Send() + return out, err +} + +const opDeleteUserPoolClient = "DeleteUserPoolClient" + +// DeleteUserPoolClientRequest generates a "aws/request.Request" representing the +// client's request for the DeleteUserPoolClient operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteUserPoolClient method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteUserPoolClientRequest method. +// req, resp := client.DeleteUserPoolClientRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CognitoIdentityProvider) DeleteUserPoolClientRequest(input *DeleteUserPoolClientInput) (req *request.Request, output *DeleteUserPoolClientOutput) { + op := &request.Operation{ + Name: opDeleteUserPoolClient, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteUserPoolClientInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteUserPoolClientOutput{} + req.Data = output + return +} + +// Allows the developer to delete the user pool client. +func (c *CognitoIdentityProvider) DeleteUserPoolClient(input *DeleteUserPoolClientInput) (*DeleteUserPoolClientOutput, error) { + req, out := c.DeleteUserPoolClientRequest(input) + err := req.Send() + return out, err +} + +const opDescribeUserPool = "DescribeUserPool" + +// DescribeUserPoolRequest generates a "aws/request.Request" representing the +// client's request for the DescribeUserPool operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeUserPool method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeUserPoolRequest method. +// req, resp := client.DescribeUserPoolRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CognitoIdentityProvider) DescribeUserPoolRequest(input *DescribeUserPoolInput) (req *request.Request, output *DescribeUserPoolOutput) { + op := &request.Operation{ + Name: opDescribeUserPool, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeUserPoolInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeUserPoolOutput{} + req.Data = output + return +} + +// Returns the configuration information and metadata of the specified user +// pool. +func (c *CognitoIdentityProvider) DescribeUserPool(input *DescribeUserPoolInput) (*DescribeUserPoolOutput, error) { + req, out := c.DescribeUserPoolRequest(input) + err := req.Send() + return out, err +} + +const opDescribeUserPoolClient = "DescribeUserPoolClient" + +// DescribeUserPoolClientRequest generates a "aws/request.Request" representing the +// client's request for the DescribeUserPoolClient operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeUserPoolClient method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeUserPoolClientRequest method. +// req, resp := client.DescribeUserPoolClientRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CognitoIdentityProvider) DescribeUserPoolClientRequest(input *DescribeUserPoolClientInput) (req *request.Request, output *DescribeUserPoolClientOutput) { + op := &request.Operation{ + Name: opDescribeUserPoolClient, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeUserPoolClientInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeUserPoolClientOutput{} + req.Data = output + return +} + +// Client method for returning the configuration information and metadata of +// the specified user pool client. +func (c *CognitoIdentityProvider) DescribeUserPoolClient(input *DescribeUserPoolClientInput) (*DescribeUserPoolClientOutput, error) { + req, out := c.DescribeUserPoolClientRequest(input) + err := req.Send() + return out, err +} + +const opForgotPassword = "ForgotPassword" + +// ForgotPasswordRequest generates a "aws/request.Request" representing the +// client's request for the ForgotPassword operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ForgotPassword method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ForgotPasswordRequest method. +// req, resp := client.ForgotPasswordRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CognitoIdentityProvider) ForgotPasswordRequest(input *ForgotPasswordInput) (req *request.Request, output *ForgotPasswordOutput) { + op := &request.Operation{ + Name: opForgotPassword, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ForgotPasswordInput{} + } + + req = c.newRequest(op, input, output) + req.Config.Credentials = credentials.AnonymousCredentials + output = &ForgotPasswordOutput{} + req.Data = output + return +} + +// Retrieves the password for the specified client ID or username. +func (c *CognitoIdentityProvider) ForgotPassword(input *ForgotPasswordInput) (*ForgotPasswordOutput, error) { + req, out := c.ForgotPasswordRequest(input) + err := req.Send() + return out, err +} + +const opGetUser = "GetUser" + +// GetUserRequest generates a "aws/request.Request" representing the +// client's request for the GetUser operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetUser method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetUserRequest method. +// req, resp := client.GetUserRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CognitoIdentityProvider) GetUserRequest(input *GetUserInput) (req *request.Request, output *GetUserOutput) { + op := &request.Operation{ + Name: opGetUser, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetUserInput{} + } + + req = c.newRequest(op, input, output) + req.Config.Credentials = credentials.AnonymousCredentials + output = &GetUserOutput{} + req.Data = output + return +} + +// Gets the user attributes and metadata for a user. +func (c *CognitoIdentityProvider) GetUser(input *GetUserInput) (*GetUserOutput, error) { + req, out := c.GetUserRequest(input) + err := req.Send() + return out, err +} + +const opGetUserAttributeVerificationCode = "GetUserAttributeVerificationCode" + +// GetUserAttributeVerificationCodeRequest generates a "aws/request.Request" representing the +// client's request for the GetUserAttributeVerificationCode operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetUserAttributeVerificationCode method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetUserAttributeVerificationCodeRequest method. +// req, resp := client.GetUserAttributeVerificationCodeRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CognitoIdentityProvider) GetUserAttributeVerificationCodeRequest(input *GetUserAttributeVerificationCodeInput) (req *request.Request, output *GetUserAttributeVerificationCodeOutput) { + op := &request.Operation{ + Name: opGetUserAttributeVerificationCode, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetUserAttributeVerificationCodeInput{} + } + + req = c.newRequest(op, input, output) + req.Config.Credentials = credentials.AnonymousCredentials + output = &GetUserAttributeVerificationCodeOutput{} + req.Data = output + return +} + +// Gets the user attribute verification code for the specified attribute name. +func (c *CognitoIdentityProvider) GetUserAttributeVerificationCode(input *GetUserAttributeVerificationCodeInput) (*GetUserAttributeVerificationCodeOutput, error) { + req, out := c.GetUserAttributeVerificationCodeRequest(input) + err := req.Send() + return out, err +} + +const opListUserPoolClients = "ListUserPoolClients" + +// ListUserPoolClientsRequest generates a "aws/request.Request" representing the +// client's request for the ListUserPoolClients operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListUserPoolClients method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListUserPoolClientsRequest method. +// req, resp := client.ListUserPoolClientsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CognitoIdentityProvider) ListUserPoolClientsRequest(input *ListUserPoolClientsInput) (req *request.Request, output *ListUserPoolClientsOutput) { + op := &request.Operation{ + Name: opListUserPoolClients, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListUserPoolClientsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListUserPoolClientsOutput{} + req.Data = output + return +} + +// Lists the clients that have been created for the specified user pool. +func (c *CognitoIdentityProvider) ListUserPoolClients(input *ListUserPoolClientsInput) (*ListUserPoolClientsOutput, error) { + req, out := c.ListUserPoolClientsRequest(input) + err := req.Send() + return out, err +} + +const opListUserPools = "ListUserPools" + +// ListUserPoolsRequest generates a "aws/request.Request" representing the +// client's request for the ListUserPools operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListUserPools method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListUserPoolsRequest method. +// req, resp := client.ListUserPoolsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CognitoIdentityProvider) ListUserPoolsRequest(input *ListUserPoolsInput) (req *request.Request, output *ListUserPoolsOutput) { + op := &request.Operation{ + Name: opListUserPools, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListUserPoolsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListUserPoolsOutput{} + req.Data = output + return +} + +// Lists the user pools associated with an AWS account. +func (c *CognitoIdentityProvider) ListUserPools(input *ListUserPoolsInput) (*ListUserPoolsOutput, error) { + req, out := c.ListUserPoolsRequest(input) + err := req.Send() + return out, err +} + +const opListUsers = "ListUsers" + +// ListUsersRequest generates a "aws/request.Request" representing the +// client's request for the ListUsers operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListUsers method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListUsersRequest method. +// req, resp := client.ListUsersRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CognitoIdentityProvider) ListUsersRequest(input *ListUsersInput) (req *request.Request, output *ListUsersOutput) { + op := &request.Operation{ + Name: opListUsers, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListUsersInput{} + } + + req = c.newRequest(op, input, output) + output = &ListUsersOutput{} + req.Data = output + return +} + +// Lists the users in the Amazon Cognito user pool. +func (c *CognitoIdentityProvider) ListUsers(input *ListUsersInput) (*ListUsersOutput, error) { + req, out := c.ListUsersRequest(input) + err := req.Send() + return out, err +} + +const opResendConfirmationCode = "ResendConfirmationCode" + +// ResendConfirmationCodeRequest generates a "aws/request.Request" representing the +// client's request for the ResendConfirmationCode operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ResendConfirmationCode method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ResendConfirmationCodeRequest method. +// req, resp := client.ResendConfirmationCodeRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CognitoIdentityProvider) ResendConfirmationCodeRequest(input *ResendConfirmationCodeInput) (req *request.Request, output *ResendConfirmationCodeOutput) { + op := &request.Operation{ + Name: opResendConfirmationCode, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ResendConfirmationCodeInput{} + } + + req = c.newRequest(op, input, output) + req.Config.Credentials = credentials.AnonymousCredentials + output = &ResendConfirmationCodeOutput{} + req.Data = output + return +} + +// Resends the confirmation (for confirmation of registration) to a specific +// user in the user pool. +func (c *CognitoIdentityProvider) ResendConfirmationCode(input *ResendConfirmationCodeInput) (*ResendConfirmationCodeOutput, error) { + req, out := c.ResendConfirmationCodeRequest(input) + err := req.Send() + return out, err +} + +const opSetUserSettings = "SetUserSettings" + +// SetUserSettingsRequest generates a "aws/request.Request" representing the +// client's request for the SetUserSettings operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the SetUserSettings method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the SetUserSettingsRequest method. +// req, resp := client.SetUserSettingsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CognitoIdentityProvider) SetUserSettingsRequest(input *SetUserSettingsInput) (req *request.Request, output *SetUserSettingsOutput) { + op := &request.Operation{ + Name: opSetUserSettings, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SetUserSettingsInput{} + } + + req = c.newRequest(op, input, output) + req.Config.Credentials = credentials.AnonymousCredentials + output = &SetUserSettingsOutput{} + req.Data = output + return +} + +// Sets the user settings like multi-factor authentication (MFA). If MFA is +// to be removed for a particular attribute pass the attribute with code delivery +// as null. If null list is passed, all MFA options are removed. +func (c *CognitoIdentityProvider) SetUserSettings(input *SetUserSettingsInput) (*SetUserSettingsOutput, error) { + req, out := c.SetUserSettingsRequest(input) + err := req.Send() + return out, err +} + +const opSignUp = "SignUp" + +// SignUpRequest generates a "aws/request.Request" representing the +// client's request for the SignUp operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the SignUp method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the SignUpRequest method. +// req, resp := client.SignUpRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CognitoIdentityProvider) SignUpRequest(input *SignUpInput) (req *request.Request, output *SignUpOutput) { + op := &request.Operation{ + Name: opSignUp, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SignUpInput{} + } + + req = c.newRequest(op, input, output) + req.Config.Credentials = credentials.AnonymousCredentials + output = &SignUpOutput{} + req.Data = output + return +} + +// Registers the user in the specified user pool and creates a user name, password, +// and user attributes. +func (c *CognitoIdentityProvider) SignUp(input *SignUpInput) (*SignUpOutput, error) { + req, out := c.SignUpRequest(input) + err := req.Send() + return out, err +} + +const opUpdateUserAttributes = "UpdateUserAttributes" + +// UpdateUserAttributesRequest generates a "aws/request.Request" representing the +// client's request for the UpdateUserAttributes operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateUserAttributes method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateUserAttributesRequest method. +// req, resp := client.UpdateUserAttributesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CognitoIdentityProvider) UpdateUserAttributesRequest(input *UpdateUserAttributesInput) (req *request.Request, output *UpdateUserAttributesOutput) { + op := &request.Operation{ + Name: opUpdateUserAttributes, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateUserAttributesInput{} + } + + req = c.newRequest(op, input, output) + req.Config.Credentials = credentials.AnonymousCredentials + output = &UpdateUserAttributesOutput{} + req.Data = output + return +} + +// Allows a user to update a specific attribute (one at a time). +func (c *CognitoIdentityProvider) UpdateUserAttributes(input *UpdateUserAttributesInput) (*UpdateUserAttributesOutput, error) { + req, out := c.UpdateUserAttributesRequest(input) + err := req.Send() + return out, err +} + +const opUpdateUserPool = "UpdateUserPool" + +// UpdateUserPoolRequest generates a "aws/request.Request" representing the +// client's request for the UpdateUserPool operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateUserPool method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateUserPoolRequest method. +// req, resp := client.UpdateUserPoolRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CognitoIdentityProvider) UpdateUserPoolRequest(input *UpdateUserPoolInput) (req *request.Request, output *UpdateUserPoolOutput) { + op := &request.Operation{ + Name: opUpdateUserPool, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateUserPoolInput{} + } + + req = c.newRequest(op, input, output) + output = &UpdateUserPoolOutput{} + req.Data = output + return +} + +// Updates the specified user pool with the specified attributes. +func (c *CognitoIdentityProvider) UpdateUserPool(input *UpdateUserPoolInput) (*UpdateUserPoolOutput, error) { + req, out := c.UpdateUserPoolRequest(input) + err := req.Send() + return out, err +} + +const opUpdateUserPoolClient = "UpdateUserPoolClient" + +// UpdateUserPoolClientRequest generates a "aws/request.Request" representing the +// client's request for the UpdateUserPoolClient operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateUserPoolClient method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateUserPoolClientRequest method. +// req, resp := client.UpdateUserPoolClientRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CognitoIdentityProvider) UpdateUserPoolClientRequest(input *UpdateUserPoolClientInput) (req *request.Request, output *UpdateUserPoolClientOutput) { + op := &request.Operation{ + Name: opUpdateUserPoolClient, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateUserPoolClientInput{} + } + + req = c.newRequest(op, input, output) + output = &UpdateUserPoolClientOutput{} + req.Data = output + return +} + +// Allows the developer to update the specified user pool client and password +// policy. +func (c *CognitoIdentityProvider) UpdateUserPoolClient(input *UpdateUserPoolClientInput) (*UpdateUserPoolClientOutput, error) { + req, out := c.UpdateUserPoolClientRequest(input) + err := req.Send() + return out, err +} + +const opVerifyUserAttribute = "VerifyUserAttribute" + +// VerifyUserAttributeRequest generates a "aws/request.Request" representing the +// client's request for the VerifyUserAttribute operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the VerifyUserAttribute method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the VerifyUserAttributeRequest method. +// req, resp := client.VerifyUserAttributeRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CognitoIdentityProvider) VerifyUserAttributeRequest(input *VerifyUserAttributeInput) (req *request.Request, output *VerifyUserAttributeOutput) { + op := &request.Operation{ + Name: opVerifyUserAttribute, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &VerifyUserAttributeInput{} + } + + req = c.newRequest(op, input, output) + req.Config.Credentials = credentials.AnonymousCredentials + output = &VerifyUserAttributeOutput{} + req.Data = output + return +} + +// Verifies the specified user attributes in the user pool. +func (c *CognitoIdentityProvider) VerifyUserAttribute(input *VerifyUserAttributeInput) (*VerifyUserAttributeOutput, error) { + req, out := c.VerifyUserAttributeRequest(input) + err := req.Send() + return out, err +} + +// Represents the request to add custom attributes. +type AddCustomAttributesInput struct { + _ struct{} `type:"structure"` + + // An array of custom attributes, such as Mutable and Name. + CustomAttributes []*SchemaAttributeType `min:"1" type:"list" required:"true"` + + // The user pool ID for the user pool where you want to add custom attributes. + UserPoolId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s AddCustomAttributesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddCustomAttributesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AddCustomAttributesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AddCustomAttributesInput"} + if s.CustomAttributes == nil { + invalidParams.Add(request.NewErrParamRequired("CustomAttributes")) + } + if s.CustomAttributes != nil && len(s.CustomAttributes) < 1 { + invalidParams.Add(request.NewErrParamMinLen("CustomAttributes", 1)) + } + if s.UserPoolId == nil { + invalidParams.Add(request.NewErrParamRequired("UserPoolId")) + } + if s.UserPoolId != nil && len(*s.UserPoolId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("UserPoolId", 1)) + } + if s.CustomAttributes != nil { + for i, v := range s.CustomAttributes { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "CustomAttributes", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the response from the server for the request to add custom attributes. +type AddCustomAttributesOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s AddCustomAttributesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddCustomAttributesOutput) GoString() string { + return s.String() +} + +// Represents the request to confirm user registration. +type AdminConfirmSignUpInput struct { + _ struct{} `type:"structure"` + + // The user pool ID for which you want to confirm user registration. + UserPoolId *string `min:"1" type:"string" required:"true"` + + // The user name for which you want to confirm user registration. + Username *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s AdminConfirmSignUpInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AdminConfirmSignUpInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AdminConfirmSignUpInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AdminConfirmSignUpInput"} + if s.UserPoolId == nil { + invalidParams.Add(request.NewErrParamRequired("UserPoolId")) + } + if s.UserPoolId != nil && len(*s.UserPoolId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("UserPoolId", 1)) + } + if s.Username == nil { + invalidParams.Add(request.NewErrParamRequired("Username")) + } + if s.Username != nil && len(*s.Username) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Username", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the response from the server for the request to confirm registration. +type AdminConfirmSignUpOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s AdminConfirmSignUpOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AdminConfirmSignUpOutput) GoString() string { + return s.String() +} + +// Represents the request to delete user attributes as an administrator. +type AdminDeleteUserAttributesInput struct { + _ struct{} `type:"structure"` + + // An array of strings representing the user attribute names you wish to delete. + UserAttributeNames []*string `type:"list" required:"true"` + + // The user pool ID for the user pool where you want to delete user attributes. + UserPoolId *string `min:"1" type:"string" required:"true"` + + // The user name of the user from which you would like to delete attributes. + Username *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s AdminDeleteUserAttributesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AdminDeleteUserAttributesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AdminDeleteUserAttributesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AdminDeleteUserAttributesInput"} + if s.UserAttributeNames == nil { + invalidParams.Add(request.NewErrParamRequired("UserAttributeNames")) + } + if s.UserPoolId == nil { + invalidParams.Add(request.NewErrParamRequired("UserPoolId")) + } + if s.UserPoolId != nil && len(*s.UserPoolId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("UserPoolId", 1)) + } + if s.Username == nil { + invalidParams.Add(request.NewErrParamRequired("Username")) + } + if s.Username != nil && len(*s.Username) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Username", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the response received from the server for a request to delete +// user attributes. +type AdminDeleteUserAttributesOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s AdminDeleteUserAttributesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AdminDeleteUserAttributesOutput) GoString() string { + return s.String() +} + +// Represents the request to delete a user as an administrator. +type AdminDeleteUserInput struct { + _ struct{} `type:"structure"` + + // The user pool ID for the user pool where you want to delete the user. + UserPoolId *string `min:"1" type:"string" required:"true"` + + // The user name of the user you wish to delete. + Username *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s AdminDeleteUserInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AdminDeleteUserInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AdminDeleteUserInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AdminDeleteUserInput"} + if s.UserPoolId == nil { + invalidParams.Add(request.NewErrParamRequired("UserPoolId")) + } + if s.UserPoolId != nil && len(*s.UserPoolId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("UserPoolId", 1)) + } + if s.Username == nil { + invalidParams.Add(request.NewErrParamRequired("Username")) + } + if s.Username != nil && len(*s.Username) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Username", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type AdminDeleteUserOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s AdminDeleteUserOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AdminDeleteUserOutput) GoString() string { + return s.String() +} + +// Represents the request to disable any user as an administrator. +type AdminDisableUserInput struct { + _ struct{} `type:"structure"` + + // The user pool ID for the user pool where you want to disable the user. + UserPoolId *string `min:"1" type:"string" required:"true"` + + // The user name of the user you wish to disable. + Username *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s AdminDisableUserInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AdminDisableUserInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AdminDisableUserInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AdminDisableUserInput"} + if s.UserPoolId == nil { + invalidParams.Add(request.NewErrParamRequired("UserPoolId")) + } + if s.UserPoolId != nil && len(*s.UserPoolId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("UserPoolId", 1)) + } + if s.Username == nil { + invalidParams.Add(request.NewErrParamRequired("Username")) + } + if s.Username != nil && len(*s.Username) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Username", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the response received from the server to disable the user as an +// administrator. +type AdminDisableUserOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s AdminDisableUserOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AdminDisableUserOutput) GoString() string { + return s.String() +} + +// Represents the request that enables the user as an administrator. +type AdminEnableUserInput struct { + _ struct{} `type:"structure"` + + // The user pool ID for the user pool where you want to enable the user. + UserPoolId *string `min:"1" type:"string" required:"true"` + + // The user name of the user you wish to ebable. + Username *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s AdminEnableUserInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AdminEnableUserInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AdminEnableUserInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AdminEnableUserInput"} + if s.UserPoolId == nil { + invalidParams.Add(request.NewErrParamRequired("UserPoolId")) + } + if s.UserPoolId != nil && len(*s.UserPoolId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("UserPoolId", 1)) + } + if s.Username == nil { + invalidParams.Add(request.NewErrParamRequired("Username")) + } + if s.Username != nil && len(*s.Username) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Username", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the response from the server for the request to enable a user +// as an administrator. +type AdminEnableUserOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s AdminEnableUserOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AdminEnableUserOutput) GoString() string { + return s.String() +} + +// Represents the request to get the specified user as an administrator. +type AdminGetUserInput struct { + _ struct{} `type:"structure"` + + // The user pool ID for the user pool where you want to get information about + // the user. + UserPoolId *string `min:"1" type:"string" required:"true"` + + // The user name of the user you wish to retrieve. + Username *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s AdminGetUserInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AdminGetUserInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AdminGetUserInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AdminGetUserInput"} + if s.UserPoolId == nil { + invalidParams.Add(request.NewErrParamRequired("UserPoolId")) + } + if s.UserPoolId != nil && len(*s.UserPoolId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("UserPoolId", 1)) + } + if s.Username == nil { + invalidParams.Add(request.NewErrParamRequired("Username")) + } + if s.Username != nil && len(*s.Username) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Username", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the response from the server from the request to get the specified +// user as an administrator. +type AdminGetUserOutput struct { + _ struct{} `type:"structure"` + + // Indicates that the status is enabled. + Enabled *bool `type:"boolean"` + + // Specifies the options for MFA (e.g., email or phone number). + MFAOptions []*MFAOptionType `type:"list"` + + // An array of name-value pairs representing user attributes. + UserAttributes []*AttributeType `type:"list"` + + // The date the user was created. + UserCreateDate *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The date the user was last modified. + UserLastModifiedDate *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The user status. Can be one of the following: + // + // UNCONFIRMED - User has been created but not confirmed. CONFIRMED - User + // has been confirmed. ARCHIVED - User is no longer active. COMPROMISED - User + // is disabled due to a potential security threat. UNKNOWN - User status is + // not known. + UserStatus *string `type:"string" enum:"UserStatusType"` + + // The user name of the user about whom you are receiving information. + Username *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s AdminGetUserOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AdminGetUserOutput) GoString() string { + return s.String() +} + +// Represents the request to reset a user's password as an administrator. +type AdminResetUserPasswordInput struct { + _ struct{} `type:"structure"` + + // The user pool ID for the user pool where you want to reset the user's password. + UserPoolId *string `min:"1" type:"string" required:"true"` + + // The user name of the user whose password you wish to reset. + Username *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s AdminResetUserPasswordInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AdminResetUserPasswordInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AdminResetUserPasswordInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AdminResetUserPasswordInput"} + if s.UserPoolId == nil { + invalidParams.Add(request.NewErrParamRequired("UserPoolId")) + } + if s.UserPoolId != nil && len(*s.UserPoolId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("UserPoolId", 1)) + } + if s.Username == nil { + invalidParams.Add(request.NewErrParamRequired("Username")) + } + if s.Username != nil && len(*s.Username) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Username", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the response from the server to reset a user password as an administrator. +type AdminResetUserPasswordOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s AdminResetUserPasswordOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AdminResetUserPasswordOutput) GoString() string { + return s.String() +} + +// Represents the request to set user settings as an administrator. +type AdminSetUserSettingsInput struct { + _ struct{} `type:"structure"` + + // Specifies the options for MFA (e.g., email or phone number). + MFAOptions []*MFAOptionType `type:"list" required:"true"` + + // The user pool ID for the user pool where you want to set the user's settings, + // such as MFA options. + UserPoolId *string `min:"1" type:"string" required:"true"` + + // The user name of the user for whom you wish to set user settings. + Username *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s AdminSetUserSettingsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AdminSetUserSettingsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AdminSetUserSettingsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AdminSetUserSettingsInput"} + if s.MFAOptions == nil { + invalidParams.Add(request.NewErrParamRequired("MFAOptions")) + } + if s.UserPoolId == nil { + invalidParams.Add(request.NewErrParamRequired("UserPoolId")) + } + if s.UserPoolId != nil && len(*s.UserPoolId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("UserPoolId", 1)) + } + if s.Username == nil { + invalidParams.Add(request.NewErrParamRequired("Username")) + } + if s.Username != nil && len(*s.Username) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Username", 1)) + } + if s.MFAOptions != nil { + for i, v := range s.MFAOptions { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "MFAOptions", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the response from the server to set user settings as an administrator. +type AdminSetUserSettingsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s AdminSetUserSettingsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AdminSetUserSettingsOutput) GoString() string { + return s.String() +} + +// Represents the request to update the user's attributes as an administrator. +type AdminUpdateUserAttributesInput struct { + _ struct{} `type:"structure"` + + // An array of name-value pairs representing user attributes. + UserAttributes []*AttributeType `type:"list" required:"true"` + + // The user pool ID for the user pool where you want to update user attributes. + UserPoolId *string `min:"1" type:"string" required:"true"` + + // The user name of the user for whom you want to update user attributes. + Username *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s AdminUpdateUserAttributesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AdminUpdateUserAttributesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AdminUpdateUserAttributesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AdminUpdateUserAttributesInput"} + if s.UserAttributes == nil { + invalidParams.Add(request.NewErrParamRequired("UserAttributes")) + } + if s.UserPoolId == nil { + invalidParams.Add(request.NewErrParamRequired("UserPoolId")) + } + if s.UserPoolId != nil && len(*s.UserPoolId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("UserPoolId", 1)) + } + if s.Username == nil { + invalidParams.Add(request.NewErrParamRequired("Username")) + } + if s.Username != nil && len(*s.Username) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Username", 1)) + } + if s.UserAttributes != nil { + for i, v := range s.UserAttributes { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "UserAttributes", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the response from the server for the request to update user attributes +// as an administrator. +type AdminUpdateUserAttributesOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s AdminUpdateUserAttributesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AdminUpdateUserAttributesOutput) GoString() string { + return s.String() +} + +// Specifies whether the attribute is standard or custom. +type AttributeType struct { + _ struct{} `type:"structure"` + + // The name of the attribute. + Name *string `min:"1" type:"string" required:"true"` + + // The value of the attribute. + Value *string `type:"string"` +} + +// String returns the string representation +func (s AttributeType) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AttributeType) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AttributeType) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AttributeType"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the request to change a user password. +type ChangePasswordInput struct { + _ struct{} `type:"structure"` + + // The access token in the change password request. + AccessToken *string `type:"string"` + + // The old password in the change password request. + PreviousPassword *string `min:"6" type:"string" required:"true"` + + // The new password in the change password request. + ProposedPassword *string `min:"6" type:"string" required:"true"` +} + +// String returns the string representation +func (s ChangePasswordInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ChangePasswordInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ChangePasswordInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ChangePasswordInput"} + if s.PreviousPassword == nil { + invalidParams.Add(request.NewErrParamRequired("PreviousPassword")) + } + if s.PreviousPassword != nil && len(*s.PreviousPassword) < 6 { + invalidParams.Add(request.NewErrParamMinLen("PreviousPassword", 6)) + } + if s.ProposedPassword == nil { + invalidParams.Add(request.NewErrParamRequired("ProposedPassword")) + } + if s.ProposedPassword != nil && len(*s.ProposedPassword) < 6 { + invalidParams.Add(request.NewErrParamMinLen("ProposedPassword", 6)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The response from the server to the change password request. +type ChangePasswordOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ChangePasswordOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ChangePasswordOutput) GoString() string { + return s.String() +} + +// The type of code delivery details being returned from the server. +type CodeDeliveryDetailsType struct { + _ struct{} `type:"structure"` + + // The name of the attribute in the code delivery details type. + AttributeName *string `min:"1" type:"string"` + + // The delivery medium (email message or phone number). + DeliveryMedium *string `type:"string" enum:"DeliveryMediumType"` + + // The destination for the code delivery details. + Destination *string `type:"string"` +} + +// String returns the string representation +func (s CodeDeliveryDetailsType) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CodeDeliveryDetailsType) GoString() string { + return s.String() +} + +// The request representing the confirmation for a password reset. +type ConfirmForgotPasswordInput struct { + _ struct{} `type:"structure"` + + // The ID of the client associated with the user pool. + ClientId *string `min:"1" type:"string" required:"true"` + + // The confirmation code sent by a user's request to retrieve a forgotten password. + ConfirmationCode *string `min:"1" type:"string" required:"true"` + + // The password sent by sent by a user's request to retrieve a forgotten password. + Password *string `min:"6" type:"string" required:"true"` + + // A keyed-hash message authentication code (HMAC) calculated using the secret + // key of a user pool client and username plus the client ID in the message. + SecretHash *string `min:"1" type:"string"` + + // The user name of the user for whom you want to enter a code to retrieve a + // forgotten password. + Username *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s ConfirmForgotPasswordInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ConfirmForgotPasswordInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ConfirmForgotPasswordInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ConfirmForgotPasswordInput"} + if s.ClientId == nil { + invalidParams.Add(request.NewErrParamRequired("ClientId")) + } + if s.ClientId != nil && len(*s.ClientId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ClientId", 1)) + } + if s.ConfirmationCode == nil { + invalidParams.Add(request.NewErrParamRequired("ConfirmationCode")) + } + if s.ConfirmationCode != nil && len(*s.ConfirmationCode) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ConfirmationCode", 1)) + } + if s.Password == nil { + invalidParams.Add(request.NewErrParamRequired("Password")) + } + if s.Password != nil && len(*s.Password) < 6 { + invalidParams.Add(request.NewErrParamMinLen("Password", 6)) + } + if s.SecretHash != nil && len(*s.SecretHash) < 1 { + invalidParams.Add(request.NewErrParamMinLen("SecretHash", 1)) + } + if s.Username == nil { + invalidParams.Add(request.NewErrParamRequired("Username")) + } + if s.Username != nil && len(*s.Username) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Username", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The response from the server that results from a user's request to retrieve +// a forgotten password. +type ConfirmForgotPasswordOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ConfirmForgotPasswordOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ConfirmForgotPasswordOutput) GoString() string { + return s.String() +} + +// Represents the request to confirm registration of a user. +type ConfirmSignUpInput struct { + _ struct{} `type:"structure"` + + // The ID of the client associated with the user pool. + ClientId *string `min:"1" type:"string" required:"true"` + + // The confirmation code sent by a user's request to confirm registration. + ConfirmationCode *string `min:"1" type:"string" required:"true"` + + // Boolean to be specified to force user confirmation irrespective of existing + // alias. By default set to False. If this parameter is set to True and the + // phone number/email used for sign up confirmation already exists as an alias + // with a different user, the API call will migrate the alias from the previous + // user to the newly created user being confirmed. If set to False, the API + // will throw an AliasExistsException error. + ForceAliasCreation *bool `type:"boolean"` + + // A keyed-hash message authentication code (HMAC) calculated using the secret + // key of a user pool client and username plus the client ID in the message. + SecretHash *string `min:"1" type:"string"` + + // The user name of the user whose registration you wish to confirm. + Username *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s ConfirmSignUpInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ConfirmSignUpInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ConfirmSignUpInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ConfirmSignUpInput"} + if s.ClientId == nil { + invalidParams.Add(request.NewErrParamRequired("ClientId")) + } + if s.ClientId != nil && len(*s.ClientId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ClientId", 1)) + } + if s.ConfirmationCode == nil { + invalidParams.Add(request.NewErrParamRequired("ConfirmationCode")) + } + if s.ConfirmationCode != nil && len(*s.ConfirmationCode) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ConfirmationCode", 1)) + } + if s.SecretHash != nil && len(*s.SecretHash) < 1 { + invalidParams.Add(request.NewErrParamMinLen("SecretHash", 1)) + } + if s.Username == nil { + invalidParams.Add(request.NewErrParamRequired("Username")) + } + if s.Username != nil && len(*s.Username) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Username", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the response from the server for the registration confirmation. +type ConfirmSignUpOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ConfirmSignUpOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ConfirmSignUpOutput) GoString() string { + return s.String() +} + +// Represents the request to create a user pool client. +type CreateUserPoolClientInput struct { + _ struct{} `type:"structure"` + + // The client name for the user pool client you would like to create. + ClientName *string `min:"1" type:"string" required:"true"` + + // Boolean to specify whether you want to generate a secret for the user pool + // client being created. + GenerateSecret *bool `type:"boolean"` + + // The user pool ID for the user pool where you want to create a user pool client. + UserPoolId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateUserPoolClientInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateUserPoolClientInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateUserPoolClientInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateUserPoolClientInput"} + if s.ClientName == nil { + invalidParams.Add(request.NewErrParamRequired("ClientName")) + } + if s.ClientName != nil && len(*s.ClientName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ClientName", 1)) + } + if s.UserPoolId == nil { + invalidParams.Add(request.NewErrParamRequired("UserPoolId")) + } + if s.UserPoolId != nil && len(*s.UserPoolId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("UserPoolId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the response from the server to create a user pool client. +type CreateUserPoolClientOutput struct { + _ struct{} `type:"structure"` + + // The user pool client that was just created. + UserPoolClient *UserPoolClientType `type:"structure"` +} + +// String returns the string representation +func (s CreateUserPoolClientOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateUserPoolClientOutput) GoString() string { + return s.String() +} + +// Represents the request to create a user pool. +type CreateUserPoolInput struct { + _ struct{} `type:"structure"` + + // Attributes supported as an alias for this user pool. Possible values: phone_number, + // email, or preferred_username. + AliasAttributes []*string `type:"list"` + + // The attributes to be auto-verified. Possible values: email, phone_number. + AutoVerifiedAttributes []*string `type:"list"` + + // A string representing the email verification message. + EmailVerificationMessage *string `min:"6" type:"string"` + + // A string representing the email verification subject. + EmailVerificationSubject *string `min:"1" type:"string"` + + // The Lambda trigger configuration information for the new user pool. + LambdaConfig *LambdaConfigType `type:"structure"` + + // Specifies MFA configuration details. + MfaConfiguration *string `type:"string" enum:"UserPoolMfaType"` + + // The policies associated with the new user pool. + Policies *UserPoolPolicyType `type:"structure"` + + // A string used to name the user pool. + PoolName *string `min:"1" type:"string" required:"true"` + + // A string representing the SMS authentication message. + SmsAuthenticationMessage *string `min:"6" type:"string"` + + // A string representing the SMS verification message. + SmsVerificationMessage *string `min:"6" type:"string"` +} + +// String returns the string representation +func (s CreateUserPoolInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateUserPoolInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateUserPoolInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateUserPoolInput"} + if s.EmailVerificationMessage != nil && len(*s.EmailVerificationMessage) < 6 { + invalidParams.Add(request.NewErrParamMinLen("EmailVerificationMessage", 6)) + } + if s.EmailVerificationSubject != nil && len(*s.EmailVerificationSubject) < 1 { + invalidParams.Add(request.NewErrParamMinLen("EmailVerificationSubject", 1)) + } + if s.PoolName == nil { + invalidParams.Add(request.NewErrParamRequired("PoolName")) + } + if s.PoolName != nil && len(*s.PoolName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("PoolName", 1)) + } + if s.SmsAuthenticationMessage != nil && len(*s.SmsAuthenticationMessage) < 6 { + invalidParams.Add(request.NewErrParamMinLen("SmsAuthenticationMessage", 6)) + } + if s.SmsVerificationMessage != nil && len(*s.SmsVerificationMessage) < 6 { + invalidParams.Add(request.NewErrParamMinLen("SmsVerificationMessage", 6)) + } + if s.LambdaConfig != nil { + if err := s.LambdaConfig.Validate(); err != nil { + invalidParams.AddNested("LambdaConfig", err.(request.ErrInvalidParams)) + } + } + if s.Policies != nil { + if err := s.Policies.Validate(); err != nil { + invalidParams.AddNested("Policies", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the response from the server for the request to create a user +// pool. +type CreateUserPoolOutput struct { + _ struct{} `type:"structure"` + + // A container for the user pool details. + UserPool *UserPoolType `type:"structure"` +} + +// String returns the string representation +func (s CreateUserPoolOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateUserPoolOutput) GoString() string { + return s.String() +} + +// Represents the request to delete user attributes. +type DeleteUserAttributesInput struct { + _ struct{} `type:"structure"` + + // The access token used in the request to delete user attributes. + AccessToken *string `type:"string"` + + // An array of strings representing the user attribute names you wish to delete. + UserAttributeNames []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s DeleteUserAttributesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteUserAttributesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteUserAttributesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteUserAttributesInput"} + if s.UserAttributeNames == nil { + invalidParams.Add(request.NewErrParamRequired("UserAttributeNames")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the response from the server to delete user attributes. +type DeleteUserAttributesOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteUserAttributesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteUserAttributesOutput) GoString() string { + return s.String() +} + +// Represents the request to delete a user. +type DeleteUserInput struct { + _ struct{} `type:"structure"` + + // The access token from a request to delete a user. + AccessToken *string `type:"string"` +} + +// String returns the string representation +func (s DeleteUserInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteUserInput) GoString() string { + return s.String() +} + +type DeleteUserOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteUserOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteUserOutput) GoString() string { + return s.String() +} + +// Represents the request to delete a user pool client. +type DeleteUserPoolClientInput struct { + _ struct{} `type:"structure"` + + // The ID of the client associated with the user pool. + ClientId *string `min:"1" type:"string" required:"true"` + + // The user pool ID for the user pool where you want to delete the client. + UserPoolId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteUserPoolClientInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteUserPoolClientInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteUserPoolClientInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteUserPoolClientInput"} + if s.ClientId == nil { + invalidParams.Add(request.NewErrParamRequired("ClientId")) + } + if s.ClientId != nil && len(*s.ClientId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ClientId", 1)) + } + if s.UserPoolId == nil { + invalidParams.Add(request.NewErrParamRequired("UserPoolId")) + } + if s.UserPoolId != nil && len(*s.UserPoolId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("UserPoolId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteUserPoolClientOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteUserPoolClientOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteUserPoolClientOutput) GoString() string { + return s.String() +} + +// Represents the request to delete a user pool. +type DeleteUserPoolInput struct { + _ struct{} `type:"structure"` + + // The user pool ID for the user pool you want to delete. + UserPoolId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteUserPoolInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteUserPoolInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteUserPoolInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteUserPoolInput"} + if s.UserPoolId == nil { + invalidParams.Add(request.NewErrParamRequired("UserPoolId")) + } + if s.UserPoolId != nil && len(*s.UserPoolId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("UserPoolId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteUserPoolOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteUserPoolOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteUserPoolOutput) GoString() string { + return s.String() +} + +// Represents the request to describe a user pool client. +type DescribeUserPoolClientInput struct { + _ struct{} `type:"structure"` + + // The ID of the client associated with the user pool. + ClientId *string `min:"1" type:"string" required:"true"` + + // The user pool ID for the user pool you want to describe. + UserPoolId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeUserPoolClientInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeUserPoolClientInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeUserPoolClientInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeUserPoolClientInput"} + if s.ClientId == nil { + invalidParams.Add(request.NewErrParamRequired("ClientId")) + } + if s.ClientId != nil && len(*s.ClientId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ClientId", 1)) + } + if s.UserPoolId == nil { + invalidParams.Add(request.NewErrParamRequired("UserPoolId")) + } + if s.UserPoolId != nil && len(*s.UserPoolId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("UserPoolId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the response from the server from a request to describe the user +// pool client. +type DescribeUserPoolClientOutput struct { + _ struct{} `type:"structure"` + + // The user pool client from a server response to describe the user pool client. + UserPoolClient *UserPoolClientType `type:"structure"` +} + +// String returns the string representation +func (s DescribeUserPoolClientOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeUserPoolClientOutput) GoString() string { + return s.String() +} + +// Represents the request to describe the user pool. +type DescribeUserPoolInput struct { + _ struct{} `type:"structure"` + + // The user pool ID for the user pool you want to describe. + UserPoolId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeUserPoolInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeUserPoolInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeUserPoolInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeUserPoolInput"} + if s.UserPoolId == nil { + invalidParams.Add(request.NewErrParamRequired("UserPoolId")) + } + if s.UserPoolId != nil && len(*s.UserPoolId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("UserPoolId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the response to describe the user pool. +type DescribeUserPoolOutput struct { + _ struct{} `type:"structure"` + + // The container of metadata returned by the server to describe the pool. + UserPool *UserPoolType `type:"structure"` +} + +// String returns the string representation +func (s DescribeUserPoolOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeUserPoolOutput) GoString() string { + return s.String() +} + +// Represents the request to reset a user's password. +type ForgotPasswordInput struct { + _ struct{} `type:"structure"` + + // The ID of the client associated with the user pool. + ClientId *string `min:"1" type:"string" required:"true"` + + // A keyed-hash message authentication code (HMAC) calculated using the secret + // key of a user pool client and username plus the client ID in the message. + SecretHash *string `min:"1" type:"string"` + + // The user name of the user for whom you want to enter a code to retrieve a + // forgotten password. + Username *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s ForgotPasswordInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ForgotPasswordInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ForgotPasswordInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ForgotPasswordInput"} + if s.ClientId == nil { + invalidParams.Add(request.NewErrParamRequired("ClientId")) + } + if s.ClientId != nil && len(*s.ClientId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ClientId", 1)) + } + if s.SecretHash != nil && len(*s.SecretHash) < 1 { + invalidParams.Add(request.NewErrParamMinLen("SecretHash", 1)) + } + if s.Username == nil { + invalidParams.Add(request.NewErrParamRequired("Username")) + } + if s.Username != nil && len(*s.Username) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Username", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Respresents the response from the server regarding the request to reset a +// password. +type ForgotPasswordOutput struct { + _ struct{} `type:"structure"` + + // The type of code delivery details being returned from the server. + CodeDeliveryDetails *CodeDeliveryDetailsType `type:"structure"` +} + +// String returns the string representation +func (s ForgotPasswordOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ForgotPasswordOutput) GoString() string { + return s.String() +} + +// Represents the request to get user attribute verification. +type GetUserAttributeVerificationCodeInput struct { + _ struct{} `type:"structure"` + + // The access token returned by the server response to get the user attribute + // verification code. + AccessToken *string `type:"string"` + + // The attribute name returned by the server response to get the user attribute + // verification code. + AttributeName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetUserAttributeVerificationCodeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetUserAttributeVerificationCodeInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetUserAttributeVerificationCodeInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetUserAttributeVerificationCodeInput"} + if s.AttributeName == nil { + invalidParams.Add(request.NewErrParamRequired("AttributeName")) + } + if s.AttributeName != nil && len(*s.AttributeName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AttributeName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The verification code response returned by the server response to get the +// user attribute verification code. +type GetUserAttributeVerificationCodeOutput struct { + _ struct{} `type:"structure"` + + // The code delivery details returned by the server response to get the user + // attribute verification code. + CodeDeliveryDetails *CodeDeliveryDetailsType `type:"structure"` +} + +// String returns the string representation +func (s GetUserAttributeVerificationCodeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetUserAttributeVerificationCodeOutput) GoString() string { + return s.String() +} + +// Represents the request to get information about the user. +type GetUserInput struct { + _ struct{} `type:"structure"` + + // The access token returned by the server response to get information about + // the user. + AccessToken *string `type:"string"` +} + +// String returns the string representation +func (s GetUserInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetUserInput) GoString() string { + return s.String() +} + +// Represents the response from the server from the request to get information +// about the user. +type GetUserOutput struct { + _ struct{} `type:"structure"` + + // Specifies the options for MFA (e.g., email or phone number). + MFAOptions []*MFAOptionType `type:"list"` + + // An array of name-value pairs representing user attributes. + UserAttributes []*AttributeType `type:"list" required:"true"` + + // The user name of the user you wish to retrieve from the get user request. + Username *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetUserOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetUserOutput) GoString() string { + return s.String() +} + +// Specifies the type of configuration for AWS Lambda triggers. +type LambdaConfigType struct { + _ struct{} `type:"structure"` + + // A custom Message AWS Lambda trigger. + CustomMessage *string `min:"20" type:"string"` + + // A post-authentication AWS Lambda trigger. + PostAuthentication *string `min:"20" type:"string"` + + // A post-confirmation AWS Lambda trigger. + PostConfirmation *string `min:"20" type:"string"` + + // A pre-authentication AWS Lambda trigger. + PreAuthentication *string `min:"20" type:"string"` + + // A pre-registration AWS Lambda trigger. + PreSignUp *string `min:"20" type:"string"` +} + +// String returns the string representation +func (s LambdaConfigType) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LambdaConfigType) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *LambdaConfigType) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "LambdaConfigType"} + if s.CustomMessage != nil && len(*s.CustomMessage) < 20 { + invalidParams.Add(request.NewErrParamMinLen("CustomMessage", 20)) + } + if s.PostAuthentication != nil && len(*s.PostAuthentication) < 20 { + invalidParams.Add(request.NewErrParamMinLen("PostAuthentication", 20)) + } + if s.PostConfirmation != nil && len(*s.PostConfirmation) < 20 { + invalidParams.Add(request.NewErrParamMinLen("PostConfirmation", 20)) + } + if s.PreAuthentication != nil && len(*s.PreAuthentication) < 20 { + invalidParams.Add(request.NewErrParamMinLen("PreAuthentication", 20)) + } + if s.PreSignUp != nil && len(*s.PreSignUp) < 20 { + invalidParams.Add(request.NewErrParamMinLen("PreSignUp", 20)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the request to list the user pool clients. +type ListUserPoolClientsInput struct { + _ struct{} `type:"structure"` + + // The maximum number of results you want the request to return when listing + // the user pool clients. + MaxResults *int64 `min:"1" type:"integer"` + + // An identifier that was returned from the previous call to this operation, + // which can be used to return the next set of items in the list. + NextToken *string `min:"1" type:"string"` + + // The user pool ID for the user pool where you want to list user pool clients. + UserPoolId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListUserPoolClientsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListUserPoolClientsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListUserPoolClientsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListUserPoolClientsInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + if s.UserPoolId == nil { + invalidParams.Add(request.NewErrParamRequired("UserPoolId")) + } + if s.UserPoolId != nil && len(*s.UserPoolId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("UserPoolId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the response from the server that lists user pool clients. +type ListUserPoolClientsOutput struct { + _ struct{} `type:"structure"` + + // An identifier that was returned from the previous call to this operation, + // which can be used to return the next set of items in the list. + NextToken *string `min:"1" type:"string"` + + // The user pool clients in the response that lists user pool clients. + UserPoolClients []*UserPoolClientDescription `type:"list"` +} + +// String returns the string representation +func (s ListUserPoolClientsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListUserPoolClientsOutput) GoString() string { + return s.String() +} + +// Represents the request to list user pools. +type ListUserPoolsInput struct { + _ struct{} `type:"structure"` + + // The maximum number of results you want the request to return when listing + // the user pools. + MaxResults *int64 `min:"1" type:"integer" required:"true"` + + // An identifier that was returned from the previous call to this operation, + // which can be used to return the next set of items in the list. + NextToken *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListUserPoolsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListUserPoolsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListUserPoolsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListUserPoolsInput"} + if s.MaxResults == nil { + invalidParams.Add(request.NewErrParamRequired("MaxResults")) + } + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the response to list user pools. +type ListUserPoolsOutput struct { + _ struct{} `type:"structure"` + + // An identifier that was returned from the previous call to this operation, + // which can be used to return the next set of items in the list. + NextToken *string `min:"1" type:"string"` + + // The user pools from the response to list users. + UserPools []*UserPoolDescriptionType `type:"list"` +} + +// String returns the string representation +func (s ListUserPoolsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListUserPoolsOutput) GoString() string { + return s.String() +} + +// Represents the request to list users. +type ListUsersInput struct { + _ struct{} `type:"structure"` + + // The attributes to get from the request to list users. + AttributesToGet []*string `type:"list"` + + // The limit of the request to list users. + Limit *int64 `min:"1" type:"integer"` + + // An identifier that was returned from the previous call to this operation, + // which can be used to return the next set of items in the list. + PaginationToken *string `min:"1" type:"string"` + + // The user pool ID for which you want to list users. + UserPoolId *string `min:"1" type:"string" required:"true"` + + // The user status. Can be one of the following: + // + // UNCONFIRMED - User has been created but not confirmed. CONFIRMED - User + // has been confirmed. ARCHIVED - User is no longer active. COMPROMISED - User + // is disabled due to a potential security threat. UNKNOWN - User status is + // not known. + UserStatus *string `type:"string" enum:"UserStatusType"` +} + +// String returns the string representation +func (s ListUsersInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListUsersInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListUsersInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListUsersInput"} + if s.Limit != nil && *s.Limit < 1 { + invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) + } + if s.PaginationToken != nil && len(*s.PaginationToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("PaginationToken", 1)) + } + if s.UserPoolId == nil { + invalidParams.Add(request.NewErrParamRequired("UserPoolId")) + } + if s.UserPoolId != nil && len(*s.UserPoolId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("UserPoolId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The response from the request to list users. +type ListUsersOutput struct { + _ struct{} `type:"structure"` + + // An identifier that was returned from the previous call to this operation, + // which can be used to return the next set of items in the list. + PaginationToken *string `min:"1" type:"string"` + + // The users returned in the request to list users. + Users []*UserType `type:"list"` +} + +// String returns the string representation +func (s ListUsersOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListUsersOutput) GoString() string { + return s.String() +} + +// Specifies the different settings for multi-factor authentication (MFA). +type MFAOptionType struct { + _ struct{} `type:"structure"` + + // The attribute name of the MFA option type. + AttributeName *string `min:"1" type:"string"` + + // The delivery medium (email message or SMS message) to send the MFA code. + DeliveryMedium *string `type:"string" enum:"DeliveryMediumType"` +} + +// String returns the string representation +func (s MFAOptionType) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MFAOptionType) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *MFAOptionType) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "MFAOptionType"} + if s.AttributeName != nil && len(*s.AttributeName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AttributeName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The minimum and maximum value of an attribute that is of the number data +// type. +type NumberAttributeConstraintsType struct { + _ struct{} `type:"structure"` + + // The maximum value of an attribute that is of the number data type. + MaxValue *string `type:"string"` + + // The minimum value of an attribute that is of the number data type. + MinValue *string `type:"string"` +} + +// String returns the string representation +func (s NumberAttributeConstraintsType) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NumberAttributeConstraintsType) GoString() string { + return s.String() +} + +// The password policy type. +type PasswordPolicyType struct { + _ struct{} `type:"structure"` + + // The minimum length of the password policy that you have set. Cannot be less + // than 6. + MinimumLength *int64 `min:"6" type:"integer"` + + // In the password policy that you have set, refers to whether you have required + // users to use at least one lowercase letter in their password. + RequireLowercase *bool `type:"boolean"` + + // In the password policy that you have set, refers to whether you have required + // users to use at least one number in their password. + RequireNumbers *bool `type:"boolean"` + + // In the password policy that you have set, refers to whether you have required + // users to use at least one symbol in their password. + RequireSymbols *bool `type:"boolean"` + + // In the password policy that you have set, refers to whether you have required + // users to use at least one uppercase letter in their password. + RequireUppercase *bool `type:"boolean"` +} + +// String returns the string representation +func (s PasswordPolicyType) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PasswordPolicyType) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PasswordPolicyType) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PasswordPolicyType"} + if s.MinimumLength != nil && *s.MinimumLength < 6 { + invalidParams.Add(request.NewErrParamMinValue("MinimumLength", 6)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the request to resend the confirmation code. +type ResendConfirmationCodeInput struct { + _ struct{} `type:"structure"` + + // The ID of the client associated with the user pool. + ClientId *string `min:"1" type:"string" required:"true"` + + // A keyed-hash message authentication code (HMAC) calculated using the secret + // key of a user pool client and username plus the client ID in the message. + SecretHash *string `min:"1" type:"string"` + + // The user name of the user to whom you wish to resend a confirmation code. + Username *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s ResendConfirmationCodeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResendConfirmationCodeInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ResendConfirmationCodeInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ResendConfirmationCodeInput"} + if s.ClientId == nil { + invalidParams.Add(request.NewErrParamRequired("ClientId")) + } + if s.ClientId != nil && len(*s.ClientId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ClientId", 1)) + } + if s.SecretHash != nil && len(*s.SecretHash) < 1 { + invalidParams.Add(request.NewErrParamMinLen("SecretHash", 1)) + } + if s.Username == nil { + invalidParams.Add(request.NewErrParamRequired("Username")) + } + if s.Username != nil && len(*s.Username) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Username", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The response from the server when the Amazon Cognito service makes the request +// to resend a confirmation code. +type ResendConfirmationCodeOutput struct { + _ struct{} `type:"structure"` + + // The type of code delivery details being returned from the server. + CodeDeliveryDetails *CodeDeliveryDetailsType `type:"structure"` +} + +// String returns the string representation +func (s ResendConfirmationCodeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResendConfirmationCodeOutput) GoString() string { + return s.String() +} + +// Contains information about the schema attribute. +type SchemaAttributeType struct { + _ struct{} `type:"structure"` + + // The attribute data type. + AttributeDataType *string `type:"string" enum:"AttributeDataType"` + + // Specifies whether the attribute type is developer only. + DeveloperOnlyAttribute *bool `type:"boolean"` + + // Specifies whether the attribute can be changed once it has been created. + Mutable *bool `type:"boolean"` + + // A schema attribute of the name type. + Name *string `min:"1" type:"string"` + + // Specifies the constraints for an attribute of the number type. + NumberAttributeConstraints *NumberAttributeConstraintsType `type:"structure"` + + // Specifies whether a user pool attribute is required. If the attribute is + // required and the user does not provide a value, registration or sign-in will + // fail. + Required *bool `type:"boolean"` + + // Specifies the constraints for an attribute of the string type. + StringAttributeConstraints *StringAttributeConstraintsType `type:"structure"` +} + +// String returns the string representation +func (s SchemaAttributeType) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SchemaAttributeType) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SchemaAttributeType) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SchemaAttributeType"} + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the request to set user settings. +type SetUserSettingsInput struct { + _ struct{} `type:"structure"` + + // The access token for the set user settings request. + AccessToken *string `type:"string" required:"true"` + + // Specifies the options for MFA (e.g., email or phone number). + MFAOptions []*MFAOptionType `type:"list" required:"true"` +} + +// String returns the string representation +func (s SetUserSettingsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetUserSettingsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SetUserSettingsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SetUserSettingsInput"} + if s.AccessToken == nil { + invalidParams.Add(request.NewErrParamRequired("AccessToken")) + } + if s.MFAOptions == nil { + invalidParams.Add(request.NewErrParamRequired("MFAOptions")) + } + if s.MFAOptions != nil { + for i, v := range s.MFAOptions { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "MFAOptions", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The response from the server for a set user settings request. +type SetUserSettingsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s SetUserSettingsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetUserSettingsOutput) GoString() string { + return s.String() +} + +// Represents the request to register a user. +type SignUpInput struct { + _ struct{} `type:"structure"` + + // The ID of the client associated with the user pool. + ClientId *string `min:"1" type:"string" required:"true"` + + // The password of the user you wish to register. + Password *string `min:"6" type:"string" required:"true"` + + // A keyed-hash message authentication code (HMAC) calculated using the secret + // key of a user pool client and username plus the client ID in the message. + SecretHash *string `min:"1" type:"string"` + + // An array of name-value pairs representing user attributes. + UserAttributes []*AttributeType `type:"list"` + + // The user name of the user you wish to register. + Username *string `min:"1" type:"string" required:"true"` + + // The validation data in the request to register a user. + ValidationData []*AttributeType `type:"list"` +} + +// String returns the string representation +func (s SignUpInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SignUpInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SignUpInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SignUpInput"} + if s.ClientId == nil { + invalidParams.Add(request.NewErrParamRequired("ClientId")) + } + if s.ClientId != nil && len(*s.ClientId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ClientId", 1)) + } + if s.Password == nil { + invalidParams.Add(request.NewErrParamRequired("Password")) + } + if s.Password != nil && len(*s.Password) < 6 { + invalidParams.Add(request.NewErrParamMinLen("Password", 6)) + } + if s.SecretHash != nil && len(*s.SecretHash) < 1 { + invalidParams.Add(request.NewErrParamMinLen("SecretHash", 1)) + } + if s.Username == nil { + invalidParams.Add(request.NewErrParamRequired("Username")) + } + if s.Username != nil && len(*s.Username) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Username", 1)) + } + if s.UserAttributes != nil { + for i, v := range s.UserAttributes { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "UserAttributes", i), err.(request.ErrInvalidParams)) + } + } + } + if s.ValidationData != nil { + for i, v := range s.ValidationData { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "ValidationData", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The response from the server for a registration request. +type SignUpOutput struct { + _ struct{} `type:"structure"` + + // The type of code delivery details being returned from the server. + CodeDeliveryDetails *CodeDeliveryDetailsType `type:"structure"` + + // A response from the server indicating that a user registration has been confirmed. + UserConfirmed *bool `type:"boolean"` +} + +// String returns the string representation +func (s SignUpOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SignUpOutput) GoString() string { + return s.String() +} + +// The type of constraints associated with an attribute of the string type. +type StringAttributeConstraintsType struct { + _ struct{} `type:"structure"` + + // The maximum length of an attribute value of the string type. + MaxLength *string `type:"string"` + + // The minimum length of an attribute value of the string type. + MinLength *string `type:"string"` +} + +// String returns the string representation +func (s StringAttributeConstraintsType) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StringAttributeConstraintsType) GoString() string { + return s.String() +} + +// Represents the request to update user attributes. +type UpdateUserAttributesInput struct { + _ struct{} `type:"structure"` + + // The access token for the request to update user attributes. + AccessToken *string `type:"string"` + + // An array of name-value pairs representing user attributes. + UserAttributes []*AttributeType `type:"list" required:"true"` +} + +// String returns the string representation +func (s UpdateUserAttributesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateUserAttributesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateUserAttributesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateUserAttributesInput"} + if s.UserAttributes == nil { + invalidParams.Add(request.NewErrParamRequired("UserAttributes")) + } + if s.UserAttributes != nil { + for i, v := range s.UserAttributes { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "UserAttributes", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the response from the server for the request to update user attributes. +type UpdateUserAttributesOutput struct { + _ struct{} `type:"structure"` + + // The code delivery details list from the server for the request to update + // user attributes. + CodeDeliveryDetailsList []*CodeDeliveryDetailsType `type:"list"` +} + +// String returns the string representation +func (s UpdateUserAttributesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateUserAttributesOutput) GoString() string { + return s.String() +} + +// Represents the request to update the user pool client. +type UpdateUserPoolClientInput struct { + _ struct{} `type:"structure"` + + // The ID of the client associated with the user pool. + ClientId *string `min:"1" type:"string" required:"true"` + + // The client name from the update user pool client request. + ClientName *string `min:"1" type:"string"` + + // The user pool ID for the user pool where you want to update the user pool + // client. + UserPoolId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateUserPoolClientInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateUserPoolClientInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateUserPoolClientInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateUserPoolClientInput"} + if s.ClientId == nil { + invalidParams.Add(request.NewErrParamRequired("ClientId")) + } + if s.ClientId != nil && len(*s.ClientId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ClientId", 1)) + } + if s.ClientName != nil && len(*s.ClientName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ClientName", 1)) + } + if s.UserPoolId == nil { + invalidParams.Add(request.NewErrParamRequired("UserPoolId")) + } + if s.UserPoolId != nil && len(*s.UserPoolId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("UserPoolId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the response from the server to the request to update the user +// pool client. +type UpdateUserPoolClientOutput struct { + _ struct{} `type:"structure"` + + // The user pool client value from the response from the server when an update + // user pool client request is made. + UserPoolClient *UserPoolClientType `type:"structure"` +} + +// String returns the string representation +func (s UpdateUserPoolClientOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateUserPoolClientOutput) GoString() string { + return s.String() +} + +// Represents the request to update the user pool. +type UpdateUserPoolInput struct { + _ struct{} `type:"structure"` + + // The attributes that are automatically verified when the Amazon Cognito service + // makes a request to update user pools. + AutoVerifiedAttributes []*string `type:"list"` + + // The contents of the email verification message. + EmailVerificationMessage *string `min:"6" type:"string"` + + // The subject of the email verfication message + EmailVerificationSubject *string `min:"1" type:"string"` + + // The AWS Lambda configuration information from the request to update the user + // pool. + LambdaConfig *LambdaConfigType `type:"structure"` + + // Can be one of the following values: + // + // OFF - MFA tokens are not required and cannot be specified during user registration. + // ON - MFA tokens are required for all user registrations. You can only specify + // required when you are initially creating a user pool. OPTIONAL - Users have + // the option when registering to create an MFA token. + MfaConfiguration *string `type:"string" enum:"UserPoolMfaType"` + + // A container with the policies you wish to update in a user pool. + Policies *UserPoolPolicyType `type:"structure"` + + // The contents of the SMS authentication message. + SmsAuthenticationMessage *string `min:"6" type:"string"` + + // A container with information about the SMS verification message. + SmsVerificationMessage *string `min:"6" type:"string"` + + // The user pool ID for the user pool you want to update. + UserPoolId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateUserPoolInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateUserPoolInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateUserPoolInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateUserPoolInput"} + if s.EmailVerificationMessage != nil && len(*s.EmailVerificationMessage) < 6 { + invalidParams.Add(request.NewErrParamMinLen("EmailVerificationMessage", 6)) + } + if s.EmailVerificationSubject != nil && len(*s.EmailVerificationSubject) < 1 { + invalidParams.Add(request.NewErrParamMinLen("EmailVerificationSubject", 1)) + } + if s.SmsAuthenticationMessage != nil && len(*s.SmsAuthenticationMessage) < 6 { + invalidParams.Add(request.NewErrParamMinLen("SmsAuthenticationMessage", 6)) + } + if s.SmsVerificationMessage != nil && len(*s.SmsVerificationMessage) < 6 { + invalidParams.Add(request.NewErrParamMinLen("SmsVerificationMessage", 6)) + } + if s.UserPoolId == nil { + invalidParams.Add(request.NewErrParamRequired("UserPoolId")) + } + if s.UserPoolId != nil && len(*s.UserPoolId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("UserPoolId", 1)) + } + if s.LambdaConfig != nil { + if err := s.LambdaConfig.Validate(); err != nil { + invalidParams.AddNested("LambdaConfig", err.(request.ErrInvalidParams)) + } + } + if s.Policies != nil { + if err := s.Policies.Validate(); err != nil { + invalidParams.AddNested("Policies", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the response from the server when you make a request to update +// the user pool. +type UpdateUserPoolOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UpdateUserPoolOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateUserPoolOutput) GoString() string { + return s.String() +} + +// The description of the user poool client. +type UserPoolClientDescription struct { + _ struct{} `type:"structure"` + + // The ID of the client associated with the user pool. + ClientId *string `min:"1" type:"string"` + + // The client name from the user pool client description. + ClientName *string `min:"1" type:"string"` + + // The user pool ID for the user pool where you want to describe the user pool + // client. + UserPoolId *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s UserPoolClientDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UserPoolClientDescription) GoString() string { + return s.String() +} + +// A user pool of the client type. +type UserPoolClientType struct { + _ struct{} `type:"structure"` + + // The ID of the client associated with the user pool. + ClientId *string `min:"1" type:"string"` + + // The client name from the user pool request of the client type. + ClientName *string `min:"1" type:"string"` + + // The client secret from the user pool request of the client type. + ClientSecret *string `min:"1" type:"string"` + + // The creation date from the user pool request of the client type. + CreationDate *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The last modified date from the user pool request of the client type. + LastModifiedDate *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The user pool ID for the user pool client. + UserPoolId *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s UserPoolClientType) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UserPoolClientType) GoString() string { + return s.String() +} + +// A user pool description. +type UserPoolDescriptionType struct { + _ struct{} `type:"structure"` + + // The creation date in a user pool description. + CreationDate *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The ID in a user pool description. + Id *string `min:"1" type:"string"` + + // The AWS Lambda configuration information in a user pool description. + LambdaConfig *LambdaConfigType `type:"structure"` + + // The last modified date in a user pool description. + LastModifiedDate *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The name in a user pool description. + Name *string `min:"1" type:"string"` + + // The user pool status in a user pool description. + Status *string `type:"string" enum:"StatusType"` +} + +// String returns the string representation +func (s UserPoolDescriptionType) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UserPoolDescriptionType) GoString() string { + return s.String() +} + +// The type of policy in a user pool. +type UserPoolPolicyType struct { + _ struct{} `type:"structure"` + + // A container with information about the user pool password policy. + PasswordPolicy *PasswordPolicyType `type:"structure"` +} + +// String returns the string representation +func (s UserPoolPolicyType) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UserPoolPolicyType) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UserPoolPolicyType) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UserPoolPolicyType"} + if s.PasswordPolicy != nil { + if err := s.PasswordPolicy.Validate(); err != nil { + invalidParams.AddNested("PasswordPolicy", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// A container with information about the user pool type. +type UserPoolType struct { + _ struct{} `type:"structure"` + + // Specifies the attributes that are aliased in a user pool. + AliasAttributes []*string `type:"list"` + + // Specifies the attributes that are auto-verified in a user pool. + AutoVerifiedAttributes []*string `type:"list"` + + // The creation date of a user pool. + CreationDate *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The contents of the email verification message. + EmailVerificationMessage *string `min:"6" type:"string"` + + // The subject of the email verification message. + EmailVerificationSubject *string `min:"1" type:"string"` + + // A number estimating the size of the user pool. + EstimatedNumberOfUsers *int64 `type:"integer"` + + // The ID of the user pool. + Id *string `min:"1" type:"string"` + + // A container describing the AWS Lambda triggers associated with a user pool. + LambdaConfig *LambdaConfigType `type:"structure"` + + // The last modified date of a user pool. + LastModifiedDate *time.Time `type:"timestamp" timestampFormat:"unix"` + + // Can be one of the following values: + // + // OFF - MFA tokens are not required and cannot be specified during user registration. + // ON - MFA tokens are required for all user registrations. You can only specify + // required when you are initially creating a user pool. OPTIONAL - Users have + // the option when registering to create an MFA token. + MfaConfiguration *string `type:"string" enum:"UserPoolMfaType"` + + // The name of the user pool. + Name *string `min:"1" type:"string"` + + // A container describing the policies associated with a user pool. + Policies *UserPoolPolicyType `type:"structure"` + + // A container with the schema attributes of a user pool. + SchemaAttributes []*SchemaAttributeType `min:"1" type:"list"` + + // The contents of the SMS authentication message. + SmsAuthenticationMessage *string `min:"6" type:"string"` + + // The contents of the SMS verification message. + SmsVerificationMessage *string `min:"6" type:"string"` + + // The status of a user pool. + Status *string `type:"string" enum:"StatusType"` +} + +// String returns the string representation +func (s UserPoolType) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UserPoolType) GoString() string { + return s.String() +} + +// The user type. +type UserType struct { + _ struct{} `type:"structure"` + + // A container with information about the user type attributes. + Attributes []*AttributeType `type:"list"` + + // Specifies whether the user is enabled. + Enabled *bool `type:"boolean"` + + // The creation date of the user. + UserCreateDate *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The last modified date of the user. + UserLastModifiedDate *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The user status. Can be one of the following: + // + // UNCONFIRMED - User has been created but not confirmed. CONFIRMED - User + // has been confirmed. ARCHIVED - User is no longer active. COMPROMISED - User + // is disabled due to a potential security threat. UNKNOWN - User status is + // not known. + UserStatus *string `type:"string" enum:"UserStatusType"` + + // The user name of the user you wish to describe. + Username *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s UserType) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UserType) GoString() string { + return s.String() +} + +// Represents the request to verify user attributes. +type VerifyUserAttributeInput struct { + _ struct{} `type:"structure"` + + // Represents the access token of the request to verify user attributes. + AccessToken *string `type:"string"` + + // The attribute name in the request to verify user attributes. + AttributeName *string `min:"1" type:"string" required:"true"` + + // The verification code in the request to verify user attributes. + Code *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s VerifyUserAttributeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VerifyUserAttributeInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *VerifyUserAttributeInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "VerifyUserAttributeInput"} + if s.AttributeName == nil { + invalidParams.Add(request.NewErrParamRequired("AttributeName")) + } + if s.AttributeName != nil && len(*s.AttributeName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AttributeName", 1)) + } + if s.Code == nil { + invalidParams.Add(request.NewErrParamRequired("Code")) + } + if s.Code != nil && len(*s.Code) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Code", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// A container representing the response from the server from the request to +// verify user attributes. +type VerifyUserAttributeOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s VerifyUserAttributeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VerifyUserAttributeOutput) GoString() string { + return s.String() +} + +const ( + // @enum AliasAttributeType + AliasAttributeTypePhoneNumber = "phone_number" + // @enum AliasAttributeType + AliasAttributeTypeEmail = "email" + // @enum AliasAttributeType + AliasAttributeTypePreferredUsername = "preferred_username" +) + +const ( + // @enum AttributeDataType + AttributeDataTypeString = "String" + // @enum AttributeDataType + AttributeDataTypeNumber = "Number" + // @enum AttributeDataType + AttributeDataTypeDateTime = "DateTime" + // @enum AttributeDataType + AttributeDataTypeBoolean = "Boolean" +) + +const ( + // @enum DeliveryMediumType + DeliveryMediumTypeSms = "SMS" + // @enum DeliveryMediumType + DeliveryMediumTypeEmail = "EMAIL" +) + +const ( + // @enum StatusType + StatusTypeEnabled = "Enabled" + // @enum StatusType + StatusTypeDisabled = "Disabled" +) + +const ( + // @enum UserPoolMfaType + UserPoolMfaTypeOff = "OFF" + // @enum UserPoolMfaType + UserPoolMfaTypeOn = "ON" + // @enum UserPoolMfaType + UserPoolMfaTypeOptional = "OPTIONAL" +) + +const ( + // @enum UserStatusType + UserStatusTypeUnconfirmed = "UNCONFIRMED" + // @enum UserStatusType + UserStatusTypeConfirmed = "CONFIRMED" + // @enum UserStatusType + UserStatusTypeArchived = "ARCHIVED" + // @enum UserStatusType + UserStatusTypeCompromised = "COMPROMISED" + // @enum UserStatusType + UserStatusTypeUnknown = "UNKNOWN" +) + +const ( + // @enum VerifiedAttributeType + VerifiedAttributeTypePhoneNumber = "phone_number" + // @enum VerifiedAttributeType + VerifiedAttributeTypeEmail = "email" +) diff --git a/vendor/github.com/aws/aws-sdk-go/service/cognitoidentityprovider/cognitoidentityprovideriface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/cognitoidentityprovider/cognitoidentityprovideriface/interface.go new file mode 100644 index 000000000..726453087 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/cognitoidentityprovider/cognitoidentityprovideriface/interface.go @@ -0,0 +1,150 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package cognitoidentityprovideriface provides an interface for the Amazon Cognito Identity Provider. +package cognitoidentityprovideriface + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/cognitoidentityprovider" +) + +// CognitoIdentityProviderAPI is the interface type for cognitoidentityprovider.CognitoIdentityProvider. +type CognitoIdentityProviderAPI interface { + AddCustomAttributesRequest(*cognitoidentityprovider.AddCustomAttributesInput) (*request.Request, *cognitoidentityprovider.AddCustomAttributesOutput) + + AddCustomAttributes(*cognitoidentityprovider.AddCustomAttributesInput) (*cognitoidentityprovider.AddCustomAttributesOutput, error) + + AdminConfirmSignUpRequest(*cognitoidentityprovider.AdminConfirmSignUpInput) (*request.Request, *cognitoidentityprovider.AdminConfirmSignUpOutput) + + AdminConfirmSignUp(*cognitoidentityprovider.AdminConfirmSignUpInput) (*cognitoidentityprovider.AdminConfirmSignUpOutput, error) + + AdminDeleteUserRequest(*cognitoidentityprovider.AdminDeleteUserInput) (*request.Request, *cognitoidentityprovider.AdminDeleteUserOutput) + + AdminDeleteUser(*cognitoidentityprovider.AdminDeleteUserInput) (*cognitoidentityprovider.AdminDeleteUserOutput, error) + + AdminDeleteUserAttributesRequest(*cognitoidentityprovider.AdminDeleteUserAttributesInput) (*request.Request, *cognitoidentityprovider.AdminDeleteUserAttributesOutput) + + AdminDeleteUserAttributes(*cognitoidentityprovider.AdminDeleteUserAttributesInput) (*cognitoidentityprovider.AdminDeleteUserAttributesOutput, error) + + AdminDisableUserRequest(*cognitoidentityprovider.AdminDisableUserInput) (*request.Request, *cognitoidentityprovider.AdminDisableUserOutput) + + AdminDisableUser(*cognitoidentityprovider.AdminDisableUserInput) (*cognitoidentityprovider.AdminDisableUserOutput, error) + + AdminEnableUserRequest(*cognitoidentityprovider.AdminEnableUserInput) (*request.Request, *cognitoidentityprovider.AdminEnableUserOutput) + + AdminEnableUser(*cognitoidentityprovider.AdminEnableUserInput) (*cognitoidentityprovider.AdminEnableUserOutput, error) + + AdminGetUserRequest(*cognitoidentityprovider.AdminGetUserInput) (*request.Request, *cognitoidentityprovider.AdminGetUserOutput) + + AdminGetUser(*cognitoidentityprovider.AdminGetUserInput) (*cognitoidentityprovider.AdminGetUserOutput, error) + + AdminResetUserPasswordRequest(*cognitoidentityprovider.AdminResetUserPasswordInput) (*request.Request, *cognitoidentityprovider.AdminResetUserPasswordOutput) + + AdminResetUserPassword(*cognitoidentityprovider.AdminResetUserPasswordInput) (*cognitoidentityprovider.AdminResetUserPasswordOutput, error) + + AdminSetUserSettingsRequest(*cognitoidentityprovider.AdminSetUserSettingsInput) (*request.Request, *cognitoidentityprovider.AdminSetUserSettingsOutput) + + AdminSetUserSettings(*cognitoidentityprovider.AdminSetUserSettingsInput) (*cognitoidentityprovider.AdminSetUserSettingsOutput, error) + + AdminUpdateUserAttributesRequest(*cognitoidentityprovider.AdminUpdateUserAttributesInput) (*request.Request, *cognitoidentityprovider.AdminUpdateUserAttributesOutput) + + AdminUpdateUserAttributes(*cognitoidentityprovider.AdminUpdateUserAttributesInput) (*cognitoidentityprovider.AdminUpdateUserAttributesOutput, error) + + ChangePasswordRequest(*cognitoidentityprovider.ChangePasswordInput) (*request.Request, *cognitoidentityprovider.ChangePasswordOutput) + + ChangePassword(*cognitoidentityprovider.ChangePasswordInput) (*cognitoidentityprovider.ChangePasswordOutput, error) + + ConfirmForgotPasswordRequest(*cognitoidentityprovider.ConfirmForgotPasswordInput) (*request.Request, *cognitoidentityprovider.ConfirmForgotPasswordOutput) + + ConfirmForgotPassword(*cognitoidentityprovider.ConfirmForgotPasswordInput) (*cognitoidentityprovider.ConfirmForgotPasswordOutput, error) + + ConfirmSignUpRequest(*cognitoidentityprovider.ConfirmSignUpInput) (*request.Request, *cognitoidentityprovider.ConfirmSignUpOutput) + + ConfirmSignUp(*cognitoidentityprovider.ConfirmSignUpInput) (*cognitoidentityprovider.ConfirmSignUpOutput, error) + + CreateUserPoolRequest(*cognitoidentityprovider.CreateUserPoolInput) (*request.Request, *cognitoidentityprovider.CreateUserPoolOutput) + + CreateUserPool(*cognitoidentityprovider.CreateUserPoolInput) (*cognitoidentityprovider.CreateUserPoolOutput, error) + + CreateUserPoolClientRequest(*cognitoidentityprovider.CreateUserPoolClientInput) (*request.Request, *cognitoidentityprovider.CreateUserPoolClientOutput) + + CreateUserPoolClient(*cognitoidentityprovider.CreateUserPoolClientInput) (*cognitoidentityprovider.CreateUserPoolClientOutput, error) + + DeleteUserRequest(*cognitoidentityprovider.DeleteUserInput) (*request.Request, *cognitoidentityprovider.DeleteUserOutput) + + DeleteUser(*cognitoidentityprovider.DeleteUserInput) (*cognitoidentityprovider.DeleteUserOutput, error) + + DeleteUserAttributesRequest(*cognitoidentityprovider.DeleteUserAttributesInput) (*request.Request, *cognitoidentityprovider.DeleteUserAttributesOutput) + + DeleteUserAttributes(*cognitoidentityprovider.DeleteUserAttributesInput) (*cognitoidentityprovider.DeleteUserAttributesOutput, error) + + DeleteUserPoolRequest(*cognitoidentityprovider.DeleteUserPoolInput) (*request.Request, *cognitoidentityprovider.DeleteUserPoolOutput) + + DeleteUserPool(*cognitoidentityprovider.DeleteUserPoolInput) (*cognitoidentityprovider.DeleteUserPoolOutput, error) + + DeleteUserPoolClientRequest(*cognitoidentityprovider.DeleteUserPoolClientInput) (*request.Request, *cognitoidentityprovider.DeleteUserPoolClientOutput) + + DeleteUserPoolClient(*cognitoidentityprovider.DeleteUserPoolClientInput) (*cognitoidentityprovider.DeleteUserPoolClientOutput, error) + + DescribeUserPoolRequest(*cognitoidentityprovider.DescribeUserPoolInput) (*request.Request, *cognitoidentityprovider.DescribeUserPoolOutput) + + DescribeUserPool(*cognitoidentityprovider.DescribeUserPoolInput) (*cognitoidentityprovider.DescribeUserPoolOutput, error) + + DescribeUserPoolClientRequest(*cognitoidentityprovider.DescribeUserPoolClientInput) (*request.Request, *cognitoidentityprovider.DescribeUserPoolClientOutput) + + DescribeUserPoolClient(*cognitoidentityprovider.DescribeUserPoolClientInput) (*cognitoidentityprovider.DescribeUserPoolClientOutput, error) + + ForgotPasswordRequest(*cognitoidentityprovider.ForgotPasswordInput) (*request.Request, *cognitoidentityprovider.ForgotPasswordOutput) + + ForgotPassword(*cognitoidentityprovider.ForgotPasswordInput) (*cognitoidentityprovider.ForgotPasswordOutput, error) + + GetUserRequest(*cognitoidentityprovider.GetUserInput) (*request.Request, *cognitoidentityprovider.GetUserOutput) + + GetUser(*cognitoidentityprovider.GetUserInput) (*cognitoidentityprovider.GetUserOutput, error) + + GetUserAttributeVerificationCodeRequest(*cognitoidentityprovider.GetUserAttributeVerificationCodeInput) (*request.Request, *cognitoidentityprovider.GetUserAttributeVerificationCodeOutput) + + GetUserAttributeVerificationCode(*cognitoidentityprovider.GetUserAttributeVerificationCodeInput) (*cognitoidentityprovider.GetUserAttributeVerificationCodeOutput, error) + + ListUserPoolClientsRequest(*cognitoidentityprovider.ListUserPoolClientsInput) (*request.Request, *cognitoidentityprovider.ListUserPoolClientsOutput) + + ListUserPoolClients(*cognitoidentityprovider.ListUserPoolClientsInput) (*cognitoidentityprovider.ListUserPoolClientsOutput, error) + + ListUserPoolsRequest(*cognitoidentityprovider.ListUserPoolsInput) (*request.Request, *cognitoidentityprovider.ListUserPoolsOutput) + + ListUserPools(*cognitoidentityprovider.ListUserPoolsInput) (*cognitoidentityprovider.ListUserPoolsOutput, error) + + ListUsersRequest(*cognitoidentityprovider.ListUsersInput) (*request.Request, *cognitoidentityprovider.ListUsersOutput) + + ListUsers(*cognitoidentityprovider.ListUsersInput) (*cognitoidentityprovider.ListUsersOutput, error) + + ResendConfirmationCodeRequest(*cognitoidentityprovider.ResendConfirmationCodeInput) (*request.Request, *cognitoidentityprovider.ResendConfirmationCodeOutput) + + ResendConfirmationCode(*cognitoidentityprovider.ResendConfirmationCodeInput) (*cognitoidentityprovider.ResendConfirmationCodeOutput, error) + + SetUserSettingsRequest(*cognitoidentityprovider.SetUserSettingsInput) (*request.Request, *cognitoidentityprovider.SetUserSettingsOutput) + + SetUserSettings(*cognitoidentityprovider.SetUserSettingsInput) (*cognitoidentityprovider.SetUserSettingsOutput, error) + + SignUpRequest(*cognitoidentityprovider.SignUpInput) (*request.Request, *cognitoidentityprovider.SignUpOutput) + + SignUp(*cognitoidentityprovider.SignUpInput) (*cognitoidentityprovider.SignUpOutput, error) + + UpdateUserAttributesRequest(*cognitoidentityprovider.UpdateUserAttributesInput) (*request.Request, *cognitoidentityprovider.UpdateUserAttributesOutput) + + UpdateUserAttributes(*cognitoidentityprovider.UpdateUserAttributesInput) (*cognitoidentityprovider.UpdateUserAttributesOutput, error) + + UpdateUserPoolRequest(*cognitoidentityprovider.UpdateUserPoolInput) (*request.Request, *cognitoidentityprovider.UpdateUserPoolOutput) + + UpdateUserPool(*cognitoidentityprovider.UpdateUserPoolInput) (*cognitoidentityprovider.UpdateUserPoolOutput, error) + + UpdateUserPoolClientRequest(*cognitoidentityprovider.UpdateUserPoolClientInput) (*request.Request, *cognitoidentityprovider.UpdateUserPoolClientOutput) + + UpdateUserPoolClient(*cognitoidentityprovider.UpdateUserPoolClientInput) (*cognitoidentityprovider.UpdateUserPoolClientOutput, error) + + VerifyUserAttributeRequest(*cognitoidentityprovider.VerifyUserAttributeInput) (*request.Request, *cognitoidentityprovider.VerifyUserAttributeOutput) + + VerifyUserAttribute(*cognitoidentityprovider.VerifyUserAttributeInput) (*cognitoidentityprovider.VerifyUserAttributeOutput, error) +} + +var _ CognitoIdentityProviderAPI = (*cognitoidentityprovider.CognitoIdentityProvider)(nil) diff --git a/vendor/github.com/aws/aws-sdk-go/service/cognitoidentityprovider/examples_test.go b/vendor/github.com/aws/aws-sdk-go/service/cognitoidentityprovider/examples_test.go new file mode 100644 index 000000000..def7ec0da --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/cognitoidentityprovider/examples_test.go @@ -0,0 +1,829 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package cognitoidentityprovider_test + +import ( + "bytes" + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/cognitoidentityprovider" +) + +var _ time.Duration +var _ bytes.Buffer + +func ExampleCognitoIdentityProvider_AddCustomAttributes() { + svc := cognitoidentityprovider.New(session.New()) + + params := &cognitoidentityprovider.AddCustomAttributesInput{ + CustomAttributes: []*cognitoidentityprovider.SchemaAttributeType{ // Required + { // Required + AttributeDataType: aws.String("AttributeDataType"), + DeveloperOnlyAttribute: aws.Bool(true), + Mutable: aws.Bool(true), + Name: aws.String("CustomAttributeNameType"), + NumberAttributeConstraints: &cognitoidentityprovider.NumberAttributeConstraintsType{ + MaxValue: aws.String("StringType"), + MinValue: aws.String("StringType"), + }, + Required: aws.Bool(true), + StringAttributeConstraints: &cognitoidentityprovider.StringAttributeConstraintsType{ + MaxLength: aws.String("StringType"), + MinLength: aws.String("StringType"), + }, + }, + // More values... + }, + UserPoolId: aws.String("UserPoolIdType"), // Required + } + resp, err := svc.AddCustomAttributes(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCognitoIdentityProvider_AdminConfirmSignUp() { + svc := cognitoidentityprovider.New(session.New()) + + params := &cognitoidentityprovider.AdminConfirmSignUpInput{ + UserPoolId: aws.String("UserPoolIdType"), // Required + Username: aws.String("UsernameType"), // Required + } + resp, err := svc.AdminConfirmSignUp(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCognitoIdentityProvider_AdminDeleteUser() { + svc := cognitoidentityprovider.New(session.New()) + + params := &cognitoidentityprovider.AdminDeleteUserInput{ + UserPoolId: aws.String("UserPoolIdType"), // Required + Username: aws.String("UsernameType"), // Required + } + resp, err := svc.AdminDeleteUser(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCognitoIdentityProvider_AdminDeleteUserAttributes() { + svc := cognitoidentityprovider.New(session.New()) + + params := &cognitoidentityprovider.AdminDeleteUserAttributesInput{ + UserAttributeNames: []*string{ // Required + aws.String("AttributeNameType"), // Required + // More values... + }, + UserPoolId: aws.String("UserPoolIdType"), // Required + Username: aws.String("UsernameType"), // Required + } + resp, err := svc.AdminDeleteUserAttributes(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCognitoIdentityProvider_AdminDisableUser() { + svc := cognitoidentityprovider.New(session.New()) + + params := &cognitoidentityprovider.AdminDisableUserInput{ + UserPoolId: aws.String("UserPoolIdType"), // Required + Username: aws.String("UsernameType"), // Required + } + resp, err := svc.AdminDisableUser(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCognitoIdentityProvider_AdminEnableUser() { + svc := cognitoidentityprovider.New(session.New()) + + params := &cognitoidentityprovider.AdminEnableUserInput{ + UserPoolId: aws.String("UserPoolIdType"), // Required + Username: aws.String("UsernameType"), // Required + } + resp, err := svc.AdminEnableUser(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCognitoIdentityProvider_AdminGetUser() { + svc := cognitoidentityprovider.New(session.New()) + + params := &cognitoidentityprovider.AdminGetUserInput{ + UserPoolId: aws.String("UserPoolIdType"), // Required + Username: aws.String("UsernameType"), // Required + } + resp, err := svc.AdminGetUser(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCognitoIdentityProvider_AdminResetUserPassword() { + svc := cognitoidentityprovider.New(session.New()) + + params := &cognitoidentityprovider.AdminResetUserPasswordInput{ + UserPoolId: aws.String("UserPoolIdType"), // Required + Username: aws.String("UsernameType"), // Required + } + resp, err := svc.AdminResetUserPassword(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCognitoIdentityProvider_AdminSetUserSettings() { + svc := cognitoidentityprovider.New(session.New()) + + params := &cognitoidentityprovider.AdminSetUserSettingsInput{ + MFAOptions: []*cognitoidentityprovider.MFAOptionType{ // Required + { // Required + AttributeName: aws.String("AttributeNameType"), + DeliveryMedium: aws.String("DeliveryMediumType"), + }, + // More values... + }, + UserPoolId: aws.String("UserPoolIdType"), // Required + Username: aws.String("UsernameType"), // Required + } + resp, err := svc.AdminSetUserSettings(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCognitoIdentityProvider_AdminUpdateUserAttributes() { + svc := cognitoidentityprovider.New(session.New()) + + params := &cognitoidentityprovider.AdminUpdateUserAttributesInput{ + UserAttributes: []*cognitoidentityprovider.AttributeType{ // Required + { // Required + Name: aws.String("AttributeNameType"), // Required + Value: aws.String("AttributeValueType"), + }, + // More values... + }, + UserPoolId: aws.String("UserPoolIdType"), // Required + Username: aws.String("UsernameType"), // Required + } + resp, err := svc.AdminUpdateUserAttributes(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCognitoIdentityProvider_ChangePassword() { + svc := cognitoidentityprovider.New(session.New()) + + params := &cognitoidentityprovider.ChangePasswordInput{ + PreviousPassword: aws.String("PasswordType"), // Required + ProposedPassword: aws.String("PasswordType"), // Required + AccessToken: aws.String("TokenModelType"), + } + resp, err := svc.ChangePassword(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCognitoIdentityProvider_ConfirmForgotPassword() { + svc := cognitoidentityprovider.New(session.New()) + + params := &cognitoidentityprovider.ConfirmForgotPasswordInput{ + ClientId: aws.String("ClientIdType"), // Required + ConfirmationCode: aws.String("ConfirmationCodeType"), // Required + Password: aws.String("PasswordType"), // Required + Username: aws.String("UsernameType"), // Required + SecretHash: aws.String("SecretHashType"), + } + resp, err := svc.ConfirmForgotPassword(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCognitoIdentityProvider_ConfirmSignUp() { + svc := cognitoidentityprovider.New(session.New()) + + params := &cognitoidentityprovider.ConfirmSignUpInput{ + ClientId: aws.String("ClientIdType"), // Required + ConfirmationCode: aws.String("ConfirmationCodeType"), // Required + Username: aws.String("UsernameType"), // Required + ForceAliasCreation: aws.Bool(true), + SecretHash: aws.String("SecretHashType"), + } + resp, err := svc.ConfirmSignUp(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCognitoIdentityProvider_CreateUserPool() { + svc := cognitoidentityprovider.New(session.New()) + + params := &cognitoidentityprovider.CreateUserPoolInput{ + PoolName: aws.String("UserPoolNameType"), // Required + AliasAttributes: []*string{ + aws.String("AliasAttributeType"), // Required + // More values... + }, + AutoVerifiedAttributes: []*string{ + aws.String("VerifiedAttributeType"), // Required + // More values... + }, + EmailVerificationMessage: aws.String("EmailVerificationMessageType"), + EmailVerificationSubject: aws.String("EmailVerificationSubjectType"), + LambdaConfig: &cognitoidentityprovider.LambdaConfigType{ + CustomMessage: aws.String("ArnType"), + PostAuthentication: aws.String("ArnType"), + PostConfirmation: aws.String("ArnType"), + PreAuthentication: aws.String("ArnType"), + PreSignUp: aws.String("ArnType"), + }, + MfaConfiguration: aws.String("UserPoolMfaType"), + Policies: &cognitoidentityprovider.UserPoolPolicyType{ + PasswordPolicy: &cognitoidentityprovider.PasswordPolicyType{ + MinimumLength: aws.Int64(1), + RequireLowercase: aws.Bool(true), + RequireNumbers: aws.Bool(true), + RequireSymbols: aws.Bool(true), + RequireUppercase: aws.Bool(true), + }, + }, + SmsAuthenticationMessage: aws.String("SmsVerificationMessageType"), + SmsVerificationMessage: aws.String("SmsVerificationMessageType"), + } + resp, err := svc.CreateUserPool(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCognitoIdentityProvider_CreateUserPoolClient() { + svc := cognitoidentityprovider.New(session.New()) + + params := &cognitoidentityprovider.CreateUserPoolClientInput{ + ClientName: aws.String("ClientNameType"), // Required + UserPoolId: aws.String("UserPoolIdType"), // Required + GenerateSecret: aws.Bool(true), + } + resp, err := svc.CreateUserPoolClient(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCognitoIdentityProvider_DeleteUser() { + svc := cognitoidentityprovider.New(session.New()) + + params := &cognitoidentityprovider.DeleteUserInput{ + AccessToken: aws.String("TokenModelType"), + } + resp, err := svc.DeleteUser(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCognitoIdentityProvider_DeleteUserAttributes() { + svc := cognitoidentityprovider.New(session.New()) + + params := &cognitoidentityprovider.DeleteUserAttributesInput{ + UserAttributeNames: []*string{ // Required + aws.String("AttributeNameType"), // Required + // More values... + }, + AccessToken: aws.String("TokenModelType"), + } + resp, err := svc.DeleteUserAttributes(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCognitoIdentityProvider_DeleteUserPool() { + svc := cognitoidentityprovider.New(session.New()) + + params := &cognitoidentityprovider.DeleteUserPoolInput{ + UserPoolId: aws.String("UserPoolIdType"), // Required + } + resp, err := svc.DeleteUserPool(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCognitoIdentityProvider_DeleteUserPoolClient() { + svc := cognitoidentityprovider.New(session.New()) + + params := &cognitoidentityprovider.DeleteUserPoolClientInput{ + ClientId: aws.String("ClientIdType"), // Required + UserPoolId: aws.String("UserPoolIdType"), // Required + } + resp, err := svc.DeleteUserPoolClient(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCognitoIdentityProvider_DescribeUserPool() { + svc := cognitoidentityprovider.New(session.New()) + + params := &cognitoidentityprovider.DescribeUserPoolInput{ + UserPoolId: aws.String("UserPoolIdType"), // Required + } + resp, err := svc.DescribeUserPool(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCognitoIdentityProvider_DescribeUserPoolClient() { + svc := cognitoidentityprovider.New(session.New()) + + params := &cognitoidentityprovider.DescribeUserPoolClientInput{ + ClientId: aws.String("ClientIdType"), // Required + UserPoolId: aws.String("UserPoolIdType"), // Required + } + resp, err := svc.DescribeUserPoolClient(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCognitoIdentityProvider_ForgotPassword() { + svc := cognitoidentityprovider.New(session.New()) + + params := &cognitoidentityprovider.ForgotPasswordInput{ + ClientId: aws.String("ClientIdType"), // Required + Username: aws.String("UsernameType"), // Required + SecretHash: aws.String("SecretHashType"), + } + resp, err := svc.ForgotPassword(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCognitoIdentityProvider_GetUser() { + svc := cognitoidentityprovider.New(session.New()) + + params := &cognitoidentityprovider.GetUserInput{ + AccessToken: aws.String("TokenModelType"), + } + resp, err := svc.GetUser(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCognitoIdentityProvider_GetUserAttributeVerificationCode() { + svc := cognitoidentityprovider.New(session.New()) + + params := &cognitoidentityprovider.GetUserAttributeVerificationCodeInput{ + AttributeName: aws.String("AttributeNameType"), // Required + AccessToken: aws.String("TokenModelType"), + } + resp, err := svc.GetUserAttributeVerificationCode(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCognitoIdentityProvider_ListUserPoolClients() { + svc := cognitoidentityprovider.New(session.New()) + + params := &cognitoidentityprovider.ListUserPoolClientsInput{ + UserPoolId: aws.String("UserPoolIdType"), // Required + MaxResults: aws.Int64(1), + NextToken: aws.String("PaginationKey"), + } + resp, err := svc.ListUserPoolClients(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCognitoIdentityProvider_ListUserPools() { + svc := cognitoidentityprovider.New(session.New()) + + params := &cognitoidentityprovider.ListUserPoolsInput{ + MaxResults: aws.Int64(1), // Required + NextToken: aws.String("PaginationKeyType"), + } + resp, err := svc.ListUserPools(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCognitoIdentityProvider_ListUsers() { + svc := cognitoidentityprovider.New(session.New()) + + params := &cognitoidentityprovider.ListUsersInput{ + UserPoolId: aws.String("UserPoolIdType"), // Required + AttributesToGet: []*string{ + aws.String("AttributeNameType"), // Required + // More values... + }, + Limit: aws.Int64(1), + PaginationToken: aws.String("SearchPaginationTokenType"), + UserStatus: aws.String("UserStatusType"), + } + resp, err := svc.ListUsers(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCognitoIdentityProvider_ResendConfirmationCode() { + svc := cognitoidentityprovider.New(session.New()) + + params := &cognitoidentityprovider.ResendConfirmationCodeInput{ + ClientId: aws.String("ClientIdType"), // Required + Username: aws.String("UsernameType"), // Required + SecretHash: aws.String("SecretHashType"), + } + resp, err := svc.ResendConfirmationCode(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCognitoIdentityProvider_SetUserSettings() { + svc := cognitoidentityprovider.New(session.New()) + + params := &cognitoidentityprovider.SetUserSettingsInput{ + AccessToken: aws.String("TokenModelType"), // Required + MFAOptions: []*cognitoidentityprovider.MFAOptionType{ // Required + { // Required + AttributeName: aws.String("AttributeNameType"), + DeliveryMedium: aws.String("DeliveryMediumType"), + }, + // More values... + }, + } + resp, err := svc.SetUserSettings(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCognitoIdentityProvider_SignUp() { + svc := cognitoidentityprovider.New(session.New()) + + params := &cognitoidentityprovider.SignUpInput{ + ClientId: aws.String("ClientIdType"), // Required + Password: aws.String("PasswordType"), // Required + Username: aws.String("UsernameType"), // Required + SecretHash: aws.String("SecretHashType"), + UserAttributes: []*cognitoidentityprovider.AttributeType{ + { // Required + Name: aws.String("AttributeNameType"), // Required + Value: aws.String("AttributeValueType"), + }, + // More values... + }, + ValidationData: []*cognitoidentityprovider.AttributeType{ + { // Required + Name: aws.String("AttributeNameType"), // Required + Value: aws.String("AttributeValueType"), + }, + // More values... + }, + } + resp, err := svc.SignUp(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCognitoIdentityProvider_UpdateUserAttributes() { + svc := cognitoidentityprovider.New(session.New()) + + params := &cognitoidentityprovider.UpdateUserAttributesInput{ + UserAttributes: []*cognitoidentityprovider.AttributeType{ // Required + { // Required + Name: aws.String("AttributeNameType"), // Required + Value: aws.String("AttributeValueType"), + }, + // More values... + }, + AccessToken: aws.String("TokenModelType"), + } + resp, err := svc.UpdateUserAttributes(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCognitoIdentityProvider_UpdateUserPool() { + svc := cognitoidentityprovider.New(session.New()) + + params := &cognitoidentityprovider.UpdateUserPoolInput{ + UserPoolId: aws.String("UserPoolIdType"), // Required + AutoVerifiedAttributes: []*string{ + aws.String("VerifiedAttributeType"), // Required + // More values... + }, + EmailVerificationMessage: aws.String("EmailVerificationMessageType"), + EmailVerificationSubject: aws.String("EmailVerificationSubjectType"), + LambdaConfig: &cognitoidentityprovider.LambdaConfigType{ + CustomMessage: aws.String("ArnType"), + PostAuthentication: aws.String("ArnType"), + PostConfirmation: aws.String("ArnType"), + PreAuthentication: aws.String("ArnType"), + PreSignUp: aws.String("ArnType"), + }, + MfaConfiguration: aws.String("UserPoolMfaType"), + Policies: &cognitoidentityprovider.UserPoolPolicyType{ + PasswordPolicy: &cognitoidentityprovider.PasswordPolicyType{ + MinimumLength: aws.Int64(1), + RequireLowercase: aws.Bool(true), + RequireNumbers: aws.Bool(true), + RequireSymbols: aws.Bool(true), + RequireUppercase: aws.Bool(true), + }, + }, + SmsAuthenticationMessage: aws.String("SmsVerificationMessageType"), + SmsVerificationMessage: aws.String("SmsVerificationMessageType"), + } + resp, err := svc.UpdateUserPool(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCognitoIdentityProvider_UpdateUserPoolClient() { + svc := cognitoidentityprovider.New(session.New()) + + params := &cognitoidentityprovider.UpdateUserPoolClientInput{ + ClientId: aws.String("ClientIdType"), // Required + UserPoolId: aws.String("UserPoolIdType"), // Required + ClientName: aws.String("ClientNameType"), + } + resp, err := svc.UpdateUserPoolClient(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCognitoIdentityProvider_VerifyUserAttribute() { + svc := cognitoidentityprovider.New(session.New()) + + params := &cognitoidentityprovider.VerifyUserAttributeInput{ + AttributeName: aws.String("AttributeNameType"), // Required + Code: aws.String("ConfirmationCodeType"), // Required + AccessToken: aws.String("TokenModelType"), + } + resp, err := svc.VerifyUserAttribute(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/cognitoidentityprovider/service.go b/vendor/github.com/aws/aws-sdk-go/service/cognitoidentityprovider/service.go new file mode 100644 index 000000000..a176c34da --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/cognitoidentityprovider/service.go @@ -0,0 +1,93 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package cognitoidentityprovider + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" +) + +// You can create a user pool in Amazon Cognito Identity to manage directories +// and users. You can authenticate a user to obtain tokens related to user identity +// and access policies. +// +// This API reference provides information about user pools in Amazon Cognito +// Identity, which is a new capability that is available as a beta. +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type CognitoIdentityProvider struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "cognito-idp" + +// New creates a new instance of the CognitoIdentityProvider client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a CognitoIdentityProvider client from just a session. +// svc := cognitoidentityprovider.New(mySession) +// +// // Create a CognitoIdentityProvider client with additional configuration +// svc := cognitoidentityprovider.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *CognitoIdentityProvider { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *CognitoIdentityProvider { + svc := &CognitoIdentityProvider{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2016-04-18", + JSONVersion: "1.1", + TargetPrefix: "AWSCognitoIdentityProviderService", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(jsonrpc.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a CognitoIdentityProvider operation and runs any +// custom request initialization. +func (c *CognitoIdentityProvider) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/cognitosync/api.go b/vendor/github.com/aws/aws-sdk-go/service/cognitosync/api.go new file mode 100644 index 000000000..2fd459f9c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/cognitosync/api.go @@ -0,0 +1,2436 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package cognitosync provides a client for Amazon Cognito Sync. +package cognitosync + +import ( + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/restjson" +) + +const opBulkPublish = "BulkPublish" + +// BulkPublishRequest generates a "aws/request.Request" representing the +// client's request for the BulkPublish operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the BulkPublish method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the BulkPublishRequest method. +// req, resp := client.BulkPublishRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CognitoSync) BulkPublishRequest(input *BulkPublishInput) (req *request.Request, output *BulkPublishOutput) { + op := &request.Operation{ + Name: opBulkPublish, + HTTPMethod: "POST", + HTTPPath: "/identitypools/{IdentityPoolId}/bulkpublish", + } + + if input == nil { + input = &BulkPublishInput{} + } + + req = c.newRequest(op, input, output) + output = &BulkPublishOutput{} + req.Data = output + return +} + +// Initiates a bulk publish of all existing datasets for an Identity Pool to +// the configured stream. Customers are limited to one successful bulk publish +// per 24 hours. Bulk publish is an asynchronous request, customers can see +// the status of the request via the GetBulkPublishDetails operation. +// +// This API can only be called with developer credentials. You cannot call +// this API with the temporary user credentials provided by Cognito Identity. +func (c *CognitoSync) BulkPublish(input *BulkPublishInput) (*BulkPublishOutput, error) { + req, out := c.BulkPublishRequest(input) + err := req.Send() + return out, err +} + +const opDeleteDataset = "DeleteDataset" + +// DeleteDatasetRequest generates a "aws/request.Request" representing the +// client's request for the DeleteDataset operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteDataset method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteDatasetRequest method. +// req, resp := client.DeleteDatasetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CognitoSync) DeleteDatasetRequest(input *DeleteDatasetInput) (req *request.Request, output *DeleteDatasetOutput) { + op := &request.Operation{ + Name: opDeleteDataset, + HTTPMethod: "DELETE", + HTTPPath: "/identitypools/{IdentityPoolId}/identities/{IdentityId}/datasets/{DatasetName}", + } + + if input == nil { + input = &DeleteDatasetInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteDatasetOutput{} + req.Data = output + return +} + +// Deletes the specific dataset. The dataset will be deleted permanently, and +// the action can't be undone. Datasets that this dataset was merged with will +// no longer report the merge. Any subsequent operation on this dataset will +// result in a ResourceNotFoundException. +// +// This API can be called with temporary user credentials provided by Cognito +// Identity or with developer credentials. +func (c *CognitoSync) DeleteDataset(input *DeleteDatasetInput) (*DeleteDatasetOutput, error) { + req, out := c.DeleteDatasetRequest(input) + err := req.Send() + return out, err +} + +const opDescribeDataset = "DescribeDataset" + +// DescribeDatasetRequest generates a "aws/request.Request" representing the +// client's request for the DescribeDataset operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeDataset method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeDatasetRequest method. +// req, resp := client.DescribeDatasetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CognitoSync) DescribeDatasetRequest(input *DescribeDatasetInput) (req *request.Request, output *DescribeDatasetOutput) { + op := &request.Operation{ + Name: opDescribeDataset, + HTTPMethod: "GET", + HTTPPath: "/identitypools/{IdentityPoolId}/identities/{IdentityId}/datasets/{DatasetName}", + } + + if input == nil { + input = &DescribeDatasetInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeDatasetOutput{} + req.Data = output + return +} + +// Gets meta data about a dataset by identity and dataset name. With Amazon +// Cognito Sync, each identity has access only to its own data. Thus, the credentials +// used to make this API call need to have access to the identity data. +// +// This API can be called with temporary user credentials provided by Cognito +// Identity or with developer credentials. You should use Cognito Identity credentials +// to make this API call. +func (c *CognitoSync) DescribeDataset(input *DescribeDatasetInput) (*DescribeDatasetOutput, error) { + req, out := c.DescribeDatasetRequest(input) + err := req.Send() + return out, err +} + +const opDescribeIdentityPoolUsage = "DescribeIdentityPoolUsage" + +// DescribeIdentityPoolUsageRequest generates a "aws/request.Request" representing the +// client's request for the DescribeIdentityPoolUsage operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeIdentityPoolUsage method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeIdentityPoolUsageRequest method. +// req, resp := client.DescribeIdentityPoolUsageRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CognitoSync) DescribeIdentityPoolUsageRequest(input *DescribeIdentityPoolUsageInput) (req *request.Request, output *DescribeIdentityPoolUsageOutput) { + op := &request.Operation{ + Name: opDescribeIdentityPoolUsage, + HTTPMethod: "GET", + HTTPPath: "/identitypools/{IdentityPoolId}", + } + + if input == nil { + input = &DescribeIdentityPoolUsageInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeIdentityPoolUsageOutput{} + req.Data = output + return +} + +// Gets usage details (for example, data storage) about a particular identity +// pool. +// +// This API can only be called with developer credentials. You cannot call +// this API with the temporary user credentials provided by Cognito Identity. +func (c *CognitoSync) DescribeIdentityPoolUsage(input *DescribeIdentityPoolUsageInput) (*DescribeIdentityPoolUsageOutput, error) { + req, out := c.DescribeIdentityPoolUsageRequest(input) + err := req.Send() + return out, err +} + +const opDescribeIdentityUsage = "DescribeIdentityUsage" + +// DescribeIdentityUsageRequest generates a "aws/request.Request" representing the +// client's request for the DescribeIdentityUsage operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeIdentityUsage method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeIdentityUsageRequest method. +// req, resp := client.DescribeIdentityUsageRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CognitoSync) DescribeIdentityUsageRequest(input *DescribeIdentityUsageInput) (req *request.Request, output *DescribeIdentityUsageOutput) { + op := &request.Operation{ + Name: opDescribeIdentityUsage, + HTTPMethod: "GET", + HTTPPath: "/identitypools/{IdentityPoolId}/identities/{IdentityId}", + } + + if input == nil { + input = &DescribeIdentityUsageInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeIdentityUsageOutput{} + req.Data = output + return +} + +// Gets usage information for an identity, including number of datasets and +// data usage. +// +// This API can be called with temporary user credentials provided by Cognito +// Identity or with developer credentials. +func (c *CognitoSync) DescribeIdentityUsage(input *DescribeIdentityUsageInput) (*DescribeIdentityUsageOutput, error) { + req, out := c.DescribeIdentityUsageRequest(input) + err := req.Send() + return out, err +} + +const opGetBulkPublishDetails = "GetBulkPublishDetails" + +// GetBulkPublishDetailsRequest generates a "aws/request.Request" representing the +// client's request for the GetBulkPublishDetails operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetBulkPublishDetails method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetBulkPublishDetailsRequest method. +// req, resp := client.GetBulkPublishDetailsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CognitoSync) GetBulkPublishDetailsRequest(input *GetBulkPublishDetailsInput) (req *request.Request, output *GetBulkPublishDetailsOutput) { + op := &request.Operation{ + Name: opGetBulkPublishDetails, + HTTPMethod: "POST", + HTTPPath: "/identitypools/{IdentityPoolId}/getBulkPublishDetails", + } + + if input == nil { + input = &GetBulkPublishDetailsInput{} + } + + req = c.newRequest(op, input, output) + output = &GetBulkPublishDetailsOutput{} + req.Data = output + return +} + +// Get the status of the last BulkPublish operation for an identity pool. +// +// This API can only be called with developer credentials. You cannot call +// this API with the temporary user credentials provided by Cognito Identity. +func (c *CognitoSync) GetBulkPublishDetails(input *GetBulkPublishDetailsInput) (*GetBulkPublishDetailsOutput, error) { + req, out := c.GetBulkPublishDetailsRequest(input) + err := req.Send() + return out, err +} + +const opGetCognitoEvents = "GetCognitoEvents" + +// GetCognitoEventsRequest generates a "aws/request.Request" representing the +// client's request for the GetCognitoEvents operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetCognitoEvents method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetCognitoEventsRequest method. +// req, resp := client.GetCognitoEventsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CognitoSync) GetCognitoEventsRequest(input *GetCognitoEventsInput) (req *request.Request, output *GetCognitoEventsOutput) { + op := &request.Operation{ + Name: opGetCognitoEvents, + HTTPMethod: "GET", + HTTPPath: "/identitypools/{IdentityPoolId}/events", + } + + if input == nil { + input = &GetCognitoEventsInput{} + } + + req = c.newRequest(op, input, output) + output = &GetCognitoEventsOutput{} + req.Data = output + return +} + +// Gets the events and the corresponding Lambda functions associated with an +// identity pool. +// +// This API can only be called with developer credentials. You cannot call +// this API with the temporary user credentials provided by Cognito Identity. +func (c *CognitoSync) GetCognitoEvents(input *GetCognitoEventsInput) (*GetCognitoEventsOutput, error) { + req, out := c.GetCognitoEventsRequest(input) + err := req.Send() + return out, err +} + +const opGetIdentityPoolConfiguration = "GetIdentityPoolConfiguration" + +// GetIdentityPoolConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the GetIdentityPoolConfiguration operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetIdentityPoolConfiguration method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetIdentityPoolConfigurationRequest method. +// req, resp := client.GetIdentityPoolConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CognitoSync) GetIdentityPoolConfigurationRequest(input *GetIdentityPoolConfigurationInput) (req *request.Request, output *GetIdentityPoolConfigurationOutput) { + op := &request.Operation{ + Name: opGetIdentityPoolConfiguration, + HTTPMethod: "GET", + HTTPPath: "/identitypools/{IdentityPoolId}/configuration", + } + + if input == nil { + input = &GetIdentityPoolConfigurationInput{} + } + + req = c.newRequest(op, input, output) + output = &GetIdentityPoolConfigurationOutput{} + req.Data = output + return +} + +// Gets the configuration settings of an identity pool. +// +// This API can only be called with developer credentials. You cannot call +// this API with the temporary user credentials provided by Cognito Identity. +func (c *CognitoSync) GetIdentityPoolConfiguration(input *GetIdentityPoolConfigurationInput) (*GetIdentityPoolConfigurationOutput, error) { + req, out := c.GetIdentityPoolConfigurationRequest(input) + err := req.Send() + return out, err +} + +const opListDatasets = "ListDatasets" + +// ListDatasetsRequest generates a "aws/request.Request" representing the +// client's request for the ListDatasets operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListDatasets method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListDatasetsRequest method. +// req, resp := client.ListDatasetsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CognitoSync) ListDatasetsRequest(input *ListDatasetsInput) (req *request.Request, output *ListDatasetsOutput) { + op := &request.Operation{ + Name: opListDatasets, + HTTPMethod: "GET", + HTTPPath: "/identitypools/{IdentityPoolId}/identities/{IdentityId}/datasets", + } + + if input == nil { + input = &ListDatasetsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListDatasetsOutput{} + req.Data = output + return +} + +// Lists datasets for an identity. With Amazon Cognito Sync, each identity has +// access only to its own data. Thus, the credentials used to make this API +// call need to have access to the identity data. +// +// ListDatasets can be called with temporary user credentials provided by Cognito +// Identity or with developer credentials. You should use the Cognito Identity +// credentials to make this API call. +func (c *CognitoSync) ListDatasets(input *ListDatasetsInput) (*ListDatasetsOutput, error) { + req, out := c.ListDatasetsRequest(input) + err := req.Send() + return out, err +} + +const opListIdentityPoolUsage = "ListIdentityPoolUsage" + +// ListIdentityPoolUsageRequest generates a "aws/request.Request" representing the +// client's request for the ListIdentityPoolUsage operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListIdentityPoolUsage method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListIdentityPoolUsageRequest method. +// req, resp := client.ListIdentityPoolUsageRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CognitoSync) ListIdentityPoolUsageRequest(input *ListIdentityPoolUsageInput) (req *request.Request, output *ListIdentityPoolUsageOutput) { + op := &request.Operation{ + Name: opListIdentityPoolUsage, + HTTPMethod: "GET", + HTTPPath: "/identitypools", + } + + if input == nil { + input = &ListIdentityPoolUsageInput{} + } + + req = c.newRequest(op, input, output) + output = &ListIdentityPoolUsageOutput{} + req.Data = output + return +} + +// Gets a list of identity pools registered with Cognito. +// +// ListIdentityPoolUsage can only be called with developer credentials. You +// cannot make this API call with the temporary user credentials provided by +// Cognito Identity. +func (c *CognitoSync) ListIdentityPoolUsage(input *ListIdentityPoolUsageInput) (*ListIdentityPoolUsageOutput, error) { + req, out := c.ListIdentityPoolUsageRequest(input) + err := req.Send() + return out, err +} + +const opListRecords = "ListRecords" + +// ListRecordsRequest generates a "aws/request.Request" representing the +// client's request for the ListRecords operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListRecords method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListRecordsRequest method. +// req, resp := client.ListRecordsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CognitoSync) ListRecordsRequest(input *ListRecordsInput) (req *request.Request, output *ListRecordsOutput) { + op := &request.Operation{ + Name: opListRecords, + HTTPMethod: "GET", + HTTPPath: "/identitypools/{IdentityPoolId}/identities/{IdentityId}/datasets/{DatasetName}/records", + } + + if input == nil { + input = &ListRecordsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListRecordsOutput{} + req.Data = output + return +} + +// Gets paginated records, optionally changed after a particular sync count +// for a dataset and identity. With Amazon Cognito Sync, each identity has access +// only to its own data. Thus, the credentials used to make this API call need +// to have access to the identity data. +// +// ListRecords can be called with temporary user credentials provided by Cognito +// Identity or with developer credentials. You should use Cognito Identity credentials +// to make this API call. +func (c *CognitoSync) ListRecords(input *ListRecordsInput) (*ListRecordsOutput, error) { + req, out := c.ListRecordsRequest(input) + err := req.Send() + return out, err +} + +const opRegisterDevice = "RegisterDevice" + +// RegisterDeviceRequest generates a "aws/request.Request" representing the +// client's request for the RegisterDevice operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the RegisterDevice method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the RegisterDeviceRequest method. +// req, resp := client.RegisterDeviceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CognitoSync) RegisterDeviceRequest(input *RegisterDeviceInput) (req *request.Request, output *RegisterDeviceOutput) { + op := &request.Operation{ + Name: opRegisterDevice, + HTTPMethod: "POST", + HTTPPath: "/identitypools/{IdentityPoolId}/identity/{IdentityId}/device", + } + + if input == nil { + input = &RegisterDeviceInput{} + } + + req = c.newRequest(op, input, output) + output = &RegisterDeviceOutput{} + req.Data = output + return +} + +// Registers a device to receive push sync notifications. +// +// This API can only be called with temporary credentials provided by Cognito +// Identity. You cannot call this API with developer credentials. +func (c *CognitoSync) RegisterDevice(input *RegisterDeviceInput) (*RegisterDeviceOutput, error) { + req, out := c.RegisterDeviceRequest(input) + err := req.Send() + return out, err +} + +const opSetCognitoEvents = "SetCognitoEvents" + +// SetCognitoEventsRequest generates a "aws/request.Request" representing the +// client's request for the SetCognitoEvents operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the SetCognitoEvents method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the SetCognitoEventsRequest method. +// req, resp := client.SetCognitoEventsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CognitoSync) SetCognitoEventsRequest(input *SetCognitoEventsInput) (req *request.Request, output *SetCognitoEventsOutput) { + op := &request.Operation{ + Name: opSetCognitoEvents, + HTTPMethod: "POST", + HTTPPath: "/identitypools/{IdentityPoolId}/events", + } + + if input == nil { + input = &SetCognitoEventsInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &SetCognitoEventsOutput{} + req.Data = output + return +} + +// Sets the AWS Lambda function for a given event type for an identity pool. +// This request only updates the key/value pair specified. Other key/values +// pairs are not updated. To remove a key value pair, pass a empty value for +// the particular key. +// +// This API can only be called with developer credentials. You cannot call +// this API with the temporary user credentials provided by Cognito Identity. +func (c *CognitoSync) SetCognitoEvents(input *SetCognitoEventsInput) (*SetCognitoEventsOutput, error) { + req, out := c.SetCognitoEventsRequest(input) + err := req.Send() + return out, err +} + +const opSetIdentityPoolConfiguration = "SetIdentityPoolConfiguration" + +// SetIdentityPoolConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the SetIdentityPoolConfiguration operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the SetIdentityPoolConfiguration method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the SetIdentityPoolConfigurationRequest method. +// req, resp := client.SetIdentityPoolConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CognitoSync) SetIdentityPoolConfigurationRequest(input *SetIdentityPoolConfigurationInput) (req *request.Request, output *SetIdentityPoolConfigurationOutput) { + op := &request.Operation{ + Name: opSetIdentityPoolConfiguration, + HTTPMethod: "POST", + HTTPPath: "/identitypools/{IdentityPoolId}/configuration", + } + + if input == nil { + input = &SetIdentityPoolConfigurationInput{} + } + + req = c.newRequest(op, input, output) + output = &SetIdentityPoolConfigurationOutput{} + req.Data = output + return +} + +// Sets the necessary configuration for push sync. +// +// This API can only be called with developer credentials. You cannot call +// this API with the temporary user credentials provided by Cognito Identity. +func (c *CognitoSync) SetIdentityPoolConfiguration(input *SetIdentityPoolConfigurationInput) (*SetIdentityPoolConfigurationOutput, error) { + req, out := c.SetIdentityPoolConfigurationRequest(input) + err := req.Send() + return out, err +} + +const opSubscribeToDataset = "SubscribeToDataset" + +// SubscribeToDatasetRequest generates a "aws/request.Request" representing the +// client's request for the SubscribeToDataset operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the SubscribeToDataset method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the SubscribeToDatasetRequest method. +// req, resp := client.SubscribeToDatasetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CognitoSync) SubscribeToDatasetRequest(input *SubscribeToDatasetInput) (req *request.Request, output *SubscribeToDatasetOutput) { + op := &request.Operation{ + Name: opSubscribeToDataset, + HTTPMethod: "POST", + HTTPPath: "/identitypools/{IdentityPoolId}/identities/{IdentityId}/datasets/{DatasetName}/subscriptions/{DeviceId}", + } + + if input == nil { + input = &SubscribeToDatasetInput{} + } + + req = c.newRequest(op, input, output) + output = &SubscribeToDatasetOutput{} + req.Data = output + return +} + +// Subscribes to receive notifications when a dataset is modified by another +// device. +// +// This API can only be called with temporary credentials provided by Cognito +// Identity. You cannot call this API with developer credentials. +func (c *CognitoSync) SubscribeToDataset(input *SubscribeToDatasetInput) (*SubscribeToDatasetOutput, error) { + req, out := c.SubscribeToDatasetRequest(input) + err := req.Send() + return out, err +} + +const opUnsubscribeFromDataset = "UnsubscribeFromDataset" + +// UnsubscribeFromDatasetRequest generates a "aws/request.Request" representing the +// client's request for the UnsubscribeFromDataset operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UnsubscribeFromDataset method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UnsubscribeFromDatasetRequest method. +// req, resp := client.UnsubscribeFromDatasetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CognitoSync) UnsubscribeFromDatasetRequest(input *UnsubscribeFromDatasetInput) (req *request.Request, output *UnsubscribeFromDatasetOutput) { + op := &request.Operation{ + Name: opUnsubscribeFromDataset, + HTTPMethod: "DELETE", + HTTPPath: "/identitypools/{IdentityPoolId}/identities/{IdentityId}/datasets/{DatasetName}/subscriptions/{DeviceId}", + } + + if input == nil { + input = &UnsubscribeFromDatasetInput{} + } + + req = c.newRequest(op, input, output) + output = &UnsubscribeFromDatasetOutput{} + req.Data = output + return +} + +// Unsubscribes from receiving notifications when a dataset is modified by another +// device. +// +// This API can only be called with temporary credentials provided by Cognito +// Identity. You cannot call this API with developer credentials. +func (c *CognitoSync) UnsubscribeFromDataset(input *UnsubscribeFromDatasetInput) (*UnsubscribeFromDatasetOutput, error) { + req, out := c.UnsubscribeFromDatasetRequest(input) + err := req.Send() + return out, err +} + +const opUpdateRecords = "UpdateRecords" + +// UpdateRecordsRequest generates a "aws/request.Request" representing the +// client's request for the UpdateRecords operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateRecords method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateRecordsRequest method. +// req, resp := client.UpdateRecordsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CognitoSync) UpdateRecordsRequest(input *UpdateRecordsInput) (req *request.Request, output *UpdateRecordsOutput) { + op := &request.Operation{ + Name: opUpdateRecords, + HTTPMethod: "POST", + HTTPPath: "/identitypools/{IdentityPoolId}/identities/{IdentityId}/datasets/{DatasetName}", + } + + if input == nil { + input = &UpdateRecordsInput{} + } + + req = c.newRequest(op, input, output) + output = &UpdateRecordsOutput{} + req.Data = output + return +} + +// Posts updates to records and adds and deletes records for a dataset and user. +// +// The sync count in the record patch is your last known sync count for that +// record. The server will reject an UpdateRecords request with a ResourceConflictException +// if you try to patch a record with a new value but a stale sync count. +// +// For example, if the sync count on the server is 5 for a key called highScore +// and you try and submit a new highScore with sync count of 4, the request +// will be rejected. To obtain the current sync count for a record, call ListRecords. +// On a successful update of the record, the response returns the new sync count +// for that record. You should present that sync count the next time you try +// to update that same record. When the record does not exist, specify the sync +// count as 0. +// +// This API can be called with temporary user credentials provided by Cognito +// Identity or with developer credentials. +func (c *CognitoSync) UpdateRecords(input *UpdateRecordsInput) (*UpdateRecordsOutput, error) { + req, out := c.UpdateRecordsRequest(input) + err := req.Send() + return out, err +} + +// The input for the BulkPublish operation. +type BulkPublishInput struct { + _ struct{} `type:"structure"` + + // A name-spaced GUID (for example, us-east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) + // created by Amazon Cognito. GUID generation is unique within a region. + IdentityPoolId *string `location:"uri" locationName:"IdentityPoolId" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s BulkPublishInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BulkPublishInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *BulkPublishInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "BulkPublishInput"} + if s.IdentityPoolId == nil { + invalidParams.Add(request.NewErrParamRequired("IdentityPoolId")) + } + if s.IdentityPoolId != nil && len(*s.IdentityPoolId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("IdentityPoolId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The output for the BulkPublish operation. +type BulkPublishOutput struct { + _ struct{} `type:"structure"` + + // A name-spaced GUID (for example, us-east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) + // created by Amazon Cognito. GUID generation is unique within a region. + IdentityPoolId *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s BulkPublishOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BulkPublishOutput) GoString() string { + return s.String() +} + +// Configuration options for configure Cognito streams. +type CognitoStreams struct { + _ struct{} `type:"structure"` + + // The ARN of the role Amazon Cognito can assume in order to publish to the + // stream. This role must grant access to Amazon Cognito (cognito-sync) to invoke + // PutRecord on your Cognito stream. + RoleArn *string `min:"20" type:"string"` + + // The name of the Cognito stream to receive updates. This stream must be in + // the developers account and in the same region as the identity pool. + StreamName *string `min:"1" type:"string"` + + // Status of the Cognito streams. Valid values are: ENABLED - Streaming of updates + // to identity pool is enabled. + // + // DISABLED - Streaming of updates to identity pool is disabled. Bulk publish + // will also fail if StreamingStatus is DISABLED. + StreamingStatus *string `type:"string" enum:"StreamingStatus"` +} + +// String returns the string representation +func (s CognitoStreams) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CognitoStreams) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CognitoStreams) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CognitoStreams"} + if s.RoleArn != nil && len(*s.RoleArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("RoleArn", 20)) + } + if s.StreamName != nil && len(*s.StreamName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("StreamName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// A collection of data for an identity pool. An identity pool can have multiple +// datasets. A dataset is per identity and can be general or associated with +// a particular entity in an application (like a saved game). Datasets are automatically +// created if they don't exist. Data is synced by dataset, and a dataset can +// hold up to 1MB of key-value pairs. +type Dataset struct { + _ struct{} `type:"structure"` + + // Date on which the dataset was created. + CreationDate *time.Time `type:"timestamp" timestampFormat:"unix"` + + // Total size in bytes of the records in this dataset. + DataStorage *int64 `type:"long"` + + // A string of up to 128 characters. Allowed characters are a-z, A-Z, 0-9, '_' + // (underscore), '-' (dash), and '.' (dot). + DatasetName *string `min:"1" type:"string"` + + // A name-spaced GUID (for example, us-east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) + // created by Amazon Cognito. GUID generation is unique within a region. + IdentityId *string `min:"1" type:"string"` + + // The device that made the last change to this dataset. + LastModifiedBy *string `type:"string"` + + // Date when the dataset was last modified. + LastModifiedDate *time.Time `type:"timestamp" timestampFormat:"unix"` + + // Number of records in this dataset. + NumRecords *int64 `type:"long"` +} + +// String returns the string representation +func (s Dataset) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Dataset) GoString() string { + return s.String() +} + +// A request to delete the specific dataset. +type DeleteDatasetInput struct { + _ struct{} `type:"structure"` + + // A string of up to 128 characters. Allowed characters are a-z, A-Z, 0-9, '_' + // (underscore), '-' (dash), and '.' (dot). + DatasetName *string `location:"uri" locationName:"DatasetName" min:"1" type:"string" required:"true"` + + // A name-spaced GUID (for example, us-east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) + // created by Amazon Cognito. GUID generation is unique within a region. + IdentityId *string `location:"uri" locationName:"IdentityId" min:"1" type:"string" required:"true"` + + // A name-spaced GUID (for example, us-east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) + // created by Amazon Cognito. GUID generation is unique within a region. + IdentityPoolId *string `location:"uri" locationName:"IdentityPoolId" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteDatasetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteDatasetInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteDatasetInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteDatasetInput"} + if s.DatasetName == nil { + invalidParams.Add(request.NewErrParamRequired("DatasetName")) + } + if s.DatasetName != nil && len(*s.DatasetName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DatasetName", 1)) + } + if s.IdentityId == nil { + invalidParams.Add(request.NewErrParamRequired("IdentityId")) + } + if s.IdentityId != nil && len(*s.IdentityId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("IdentityId", 1)) + } + if s.IdentityPoolId == nil { + invalidParams.Add(request.NewErrParamRequired("IdentityPoolId")) + } + if s.IdentityPoolId != nil && len(*s.IdentityPoolId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("IdentityPoolId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Response to a successful DeleteDataset request. +type DeleteDatasetOutput struct { + _ struct{} `type:"structure"` + + // A collection of data for an identity pool. An identity pool can have multiple + // datasets. A dataset is per identity and can be general or associated with + // a particular entity in an application (like a saved game). Datasets are automatically + // created if they don't exist. Data is synced by dataset, and a dataset can + // hold up to 1MB of key-value pairs. + Dataset *Dataset `type:"structure"` +} + +// String returns the string representation +func (s DeleteDatasetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteDatasetOutput) GoString() string { + return s.String() +} + +// A request for meta data about a dataset (creation date, number of records, +// size) by owner and dataset name. +type DescribeDatasetInput struct { + _ struct{} `type:"structure"` + + // A string of up to 128 characters. Allowed characters are a-z, A-Z, 0-9, '_' + // (underscore), '-' (dash), and '.' (dot). + DatasetName *string `location:"uri" locationName:"DatasetName" min:"1" type:"string" required:"true"` + + // A name-spaced GUID (for example, us-east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) + // created by Amazon Cognito. GUID generation is unique within a region. + IdentityId *string `location:"uri" locationName:"IdentityId" min:"1" type:"string" required:"true"` + + // A name-spaced GUID (for example, us-east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) + // created by Amazon Cognito. GUID generation is unique within a region. + IdentityPoolId *string `location:"uri" locationName:"IdentityPoolId" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeDatasetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDatasetInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeDatasetInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeDatasetInput"} + if s.DatasetName == nil { + invalidParams.Add(request.NewErrParamRequired("DatasetName")) + } + if s.DatasetName != nil && len(*s.DatasetName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DatasetName", 1)) + } + if s.IdentityId == nil { + invalidParams.Add(request.NewErrParamRequired("IdentityId")) + } + if s.IdentityId != nil && len(*s.IdentityId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("IdentityId", 1)) + } + if s.IdentityPoolId == nil { + invalidParams.Add(request.NewErrParamRequired("IdentityPoolId")) + } + if s.IdentityPoolId != nil && len(*s.IdentityPoolId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("IdentityPoolId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Response to a successful DescribeDataset request. +type DescribeDatasetOutput struct { + _ struct{} `type:"structure"` + + // Meta data for a collection of data for an identity. An identity can have + // multiple datasets. A dataset can be general or associated with a particular + // entity in an application (like a saved game). Datasets are automatically + // created if they don't exist. Data is synced by dataset, and a dataset can + // hold up to 1MB of key-value pairs. + Dataset *Dataset `type:"structure"` +} + +// String returns the string representation +func (s DescribeDatasetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDatasetOutput) GoString() string { + return s.String() +} + +// A request for usage information about the identity pool. +type DescribeIdentityPoolUsageInput struct { + _ struct{} `type:"structure"` + + // A name-spaced GUID (for example, us-east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) + // created by Amazon Cognito. GUID generation is unique within a region. + IdentityPoolId *string `location:"uri" locationName:"IdentityPoolId" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeIdentityPoolUsageInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeIdentityPoolUsageInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeIdentityPoolUsageInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeIdentityPoolUsageInput"} + if s.IdentityPoolId == nil { + invalidParams.Add(request.NewErrParamRequired("IdentityPoolId")) + } + if s.IdentityPoolId != nil && len(*s.IdentityPoolId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("IdentityPoolId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Response to a successful DescribeIdentityPoolUsage request. +type DescribeIdentityPoolUsageOutput struct { + _ struct{} `type:"structure"` + + // Information about the usage of the identity pool. + IdentityPoolUsage *IdentityPoolUsage `type:"structure"` +} + +// String returns the string representation +func (s DescribeIdentityPoolUsageOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeIdentityPoolUsageOutput) GoString() string { + return s.String() +} + +// A request for information about the usage of an identity pool. +type DescribeIdentityUsageInput struct { + _ struct{} `type:"structure"` + + // A name-spaced GUID (for example, us-east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) + // created by Amazon Cognito. GUID generation is unique within a region. + IdentityId *string `location:"uri" locationName:"IdentityId" min:"1" type:"string" required:"true"` + + // A name-spaced GUID (for example, us-east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) + // created by Amazon Cognito. GUID generation is unique within a region. + IdentityPoolId *string `location:"uri" locationName:"IdentityPoolId" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeIdentityUsageInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeIdentityUsageInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeIdentityUsageInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeIdentityUsageInput"} + if s.IdentityId == nil { + invalidParams.Add(request.NewErrParamRequired("IdentityId")) + } + if s.IdentityId != nil && len(*s.IdentityId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("IdentityId", 1)) + } + if s.IdentityPoolId == nil { + invalidParams.Add(request.NewErrParamRequired("IdentityPoolId")) + } + if s.IdentityPoolId != nil && len(*s.IdentityPoolId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("IdentityPoolId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The response to a successful DescribeIdentityUsage request. +type DescribeIdentityUsageOutput struct { + _ struct{} `type:"structure"` + + // Usage information for the identity. + IdentityUsage *IdentityUsage `type:"structure"` +} + +// String returns the string representation +func (s DescribeIdentityUsageOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeIdentityUsageOutput) GoString() string { + return s.String() +} + +// The input for the GetBulkPublishDetails operation. +type GetBulkPublishDetailsInput struct { + _ struct{} `type:"structure"` + + // A name-spaced GUID (for example, us-east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) + // created by Amazon Cognito. GUID generation is unique within a region. + IdentityPoolId *string `location:"uri" locationName:"IdentityPoolId" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBulkPublishDetailsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBulkPublishDetailsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBulkPublishDetailsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBulkPublishDetailsInput"} + if s.IdentityPoolId == nil { + invalidParams.Add(request.NewErrParamRequired("IdentityPoolId")) + } + if s.IdentityPoolId != nil && len(*s.IdentityPoolId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("IdentityPoolId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The output for the GetBulkPublishDetails operation. +type GetBulkPublishDetailsOutput struct { + _ struct{} `type:"structure"` + + // If BulkPublishStatus is SUCCEEDED, the time the last bulk publish operation + // completed. + BulkPublishCompleteTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The date/time at which the last bulk publish was initiated. + BulkPublishStartTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // Status of the last bulk publish operation, valid values are: NOT_STARTED + // - No bulk publish has been requested for this identity pool + // + // IN_PROGRESS - Data is being published to the configured stream + // + // SUCCEEDED - All data for the identity pool has been published to the configured + // stream + // + // FAILED - Some portion of the data has failed to publish, check FailureMessage + // for the cause. + BulkPublishStatus *string `type:"string" enum:"BulkPublishStatus"` + + // If BulkPublishStatus is FAILED this field will contain the error message + // that caused the bulk publish to fail. + FailureMessage *string `type:"string"` + + // A name-spaced GUID (for example, us-east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) + // created by Amazon Cognito. GUID generation is unique within a region. + IdentityPoolId *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s GetBulkPublishDetailsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBulkPublishDetailsOutput) GoString() string { + return s.String() +} + +// A request for a list of the configured Cognito Events +type GetCognitoEventsInput struct { + _ struct{} `type:"structure"` + + // The Cognito Identity Pool ID for the request + IdentityPoolId *string `location:"uri" locationName:"IdentityPoolId" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetCognitoEventsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetCognitoEventsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetCognitoEventsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetCognitoEventsInput"} + if s.IdentityPoolId == nil { + invalidParams.Add(request.NewErrParamRequired("IdentityPoolId")) + } + if s.IdentityPoolId != nil && len(*s.IdentityPoolId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("IdentityPoolId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The response from the GetCognitoEvents request +type GetCognitoEventsOutput struct { + _ struct{} `type:"structure"` + + // The Cognito Events returned from the GetCognitoEvents request + Events map[string]*string `type:"map"` +} + +// String returns the string representation +func (s GetCognitoEventsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetCognitoEventsOutput) GoString() string { + return s.String() +} + +// The input for the GetIdentityPoolConfiguration operation. +type GetIdentityPoolConfigurationInput struct { + _ struct{} `type:"structure"` + + // A name-spaced GUID (for example, us-east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) + // created by Amazon Cognito. This is the ID of the pool for which to return + // a configuration. + IdentityPoolId *string `location:"uri" locationName:"IdentityPoolId" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetIdentityPoolConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetIdentityPoolConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetIdentityPoolConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetIdentityPoolConfigurationInput"} + if s.IdentityPoolId == nil { + invalidParams.Add(request.NewErrParamRequired("IdentityPoolId")) + } + if s.IdentityPoolId != nil && len(*s.IdentityPoolId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("IdentityPoolId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The output for the GetIdentityPoolConfiguration operation. +type GetIdentityPoolConfigurationOutput struct { + _ struct{} `type:"structure"` + + // Options to apply to this identity pool for Amazon Cognito streams. + CognitoStreams *CognitoStreams `type:"structure"` + + // A name-spaced GUID (for example, us-east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) + // created by Amazon Cognito. + IdentityPoolId *string `min:"1" type:"string"` + + // Options to apply to this identity pool for push synchronization. + PushSync *PushSync `type:"structure"` +} + +// String returns the string representation +func (s GetIdentityPoolConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetIdentityPoolConfigurationOutput) GoString() string { + return s.String() +} + +// Usage information for the identity pool. +type IdentityPoolUsage struct { + _ struct{} `type:"structure"` + + // Data storage information for the identity pool. + DataStorage *int64 `type:"long"` + + // A name-spaced GUID (for example, us-east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) + // created by Amazon Cognito. GUID generation is unique within a region. + IdentityPoolId *string `min:"1" type:"string"` + + // Date on which the identity pool was last modified. + LastModifiedDate *time.Time `type:"timestamp" timestampFormat:"unix"` + + // Number of sync sessions for the identity pool. + SyncSessionsCount *int64 `type:"long"` +} + +// String returns the string representation +func (s IdentityPoolUsage) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s IdentityPoolUsage) GoString() string { + return s.String() +} + +// Usage information for the identity. +type IdentityUsage struct { + _ struct{} `type:"structure"` + + // Total data storage for this identity. + DataStorage *int64 `type:"long"` + + // Number of datasets for the identity. + DatasetCount *int64 `type:"integer"` + + // A name-spaced GUID (for example, us-east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) + // created by Amazon Cognito. GUID generation is unique within a region. + IdentityId *string `min:"1" type:"string"` + + // A name-spaced GUID (for example, us-east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) + // created by Amazon Cognito. GUID generation is unique within a region. + IdentityPoolId *string `min:"1" type:"string"` + + // Date on which the identity was last modified. + LastModifiedDate *time.Time `type:"timestamp" timestampFormat:"unix"` +} + +// String returns the string representation +func (s IdentityUsage) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s IdentityUsage) GoString() string { + return s.String() +} + +// Request for a list of datasets for an identity. +type ListDatasetsInput struct { + _ struct{} `type:"structure"` + + // A name-spaced GUID (for example, us-east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) + // created by Amazon Cognito. GUID generation is unique within a region. + IdentityId *string `location:"uri" locationName:"IdentityId" min:"1" type:"string" required:"true"` + + // A name-spaced GUID (for example, us-east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) + // created by Amazon Cognito. GUID generation is unique within a region. + IdentityPoolId *string `location:"uri" locationName:"IdentityPoolId" min:"1" type:"string" required:"true"` + + // The maximum number of results to be returned. + MaxResults *int64 `location:"querystring" locationName:"maxResults" type:"integer"` + + // A pagination token for obtaining the next page of results. + NextToken *string `location:"querystring" locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s ListDatasetsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListDatasetsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListDatasetsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListDatasetsInput"} + if s.IdentityId == nil { + invalidParams.Add(request.NewErrParamRequired("IdentityId")) + } + if s.IdentityId != nil && len(*s.IdentityId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("IdentityId", 1)) + } + if s.IdentityPoolId == nil { + invalidParams.Add(request.NewErrParamRequired("IdentityPoolId")) + } + if s.IdentityPoolId != nil && len(*s.IdentityPoolId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("IdentityPoolId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Returned for a successful ListDatasets request. +type ListDatasetsOutput struct { + _ struct{} `type:"structure"` + + // Number of datasets returned. + Count *int64 `type:"integer"` + + // A set of datasets. + Datasets []*Dataset `type:"list"` + + // A pagination token for obtaining the next page of results. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s ListDatasetsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListDatasetsOutput) GoString() string { + return s.String() +} + +// A request for usage information on an identity pool. +type ListIdentityPoolUsageInput struct { + _ struct{} `type:"structure"` + + // The maximum number of results to be returned. + MaxResults *int64 `location:"querystring" locationName:"maxResults" type:"integer"` + + // A pagination token for obtaining the next page of results. + NextToken *string `location:"querystring" locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s ListIdentityPoolUsageInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListIdentityPoolUsageInput) GoString() string { + return s.String() +} + +// Returned for a successful ListIdentityPoolUsage request. +type ListIdentityPoolUsageOutput struct { + _ struct{} `type:"structure"` + + // Total number of identities for the identity pool. + Count *int64 `type:"integer"` + + // Usage information for the identity pools. + IdentityPoolUsages []*IdentityPoolUsage `type:"list"` + + // The maximum number of results to be returned. + MaxResults *int64 `type:"integer"` + + // A pagination token for obtaining the next page of results. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s ListIdentityPoolUsageOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListIdentityPoolUsageOutput) GoString() string { + return s.String() +} + +// A request for a list of records. +type ListRecordsInput struct { + _ struct{} `type:"structure"` + + // A string of up to 128 characters. Allowed characters are a-z, A-Z, 0-9, '_' + // (underscore), '-' (dash), and '.' (dot). + DatasetName *string `location:"uri" locationName:"DatasetName" min:"1" type:"string" required:"true"` + + // A name-spaced GUID (for example, us-east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) + // created by Amazon Cognito. GUID generation is unique within a region. + IdentityId *string `location:"uri" locationName:"IdentityId" min:"1" type:"string" required:"true"` + + // A name-spaced GUID (for example, us-east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) + // created by Amazon Cognito. GUID generation is unique within a region. + IdentityPoolId *string `location:"uri" locationName:"IdentityPoolId" min:"1" type:"string" required:"true"` + + // The last server sync count for this record. + LastSyncCount *int64 `location:"querystring" locationName:"lastSyncCount" type:"long"` + + // The maximum number of results to be returned. + MaxResults *int64 `location:"querystring" locationName:"maxResults" type:"integer"` + + // A pagination token for obtaining the next page of results. + NextToken *string `location:"querystring" locationName:"nextToken" type:"string"` + + // A token containing a session ID, identity ID, and expiration. + SyncSessionToken *string `location:"querystring" locationName:"syncSessionToken" type:"string"` +} + +// String returns the string representation +func (s ListRecordsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListRecordsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListRecordsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListRecordsInput"} + if s.DatasetName == nil { + invalidParams.Add(request.NewErrParamRequired("DatasetName")) + } + if s.DatasetName != nil && len(*s.DatasetName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DatasetName", 1)) + } + if s.IdentityId == nil { + invalidParams.Add(request.NewErrParamRequired("IdentityId")) + } + if s.IdentityId != nil && len(*s.IdentityId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("IdentityId", 1)) + } + if s.IdentityPoolId == nil { + invalidParams.Add(request.NewErrParamRequired("IdentityPoolId")) + } + if s.IdentityPoolId != nil && len(*s.IdentityPoolId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("IdentityPoolId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Returned for a successful ListRecordsRequest. +type ListRecordsOutput struct { + _ struct{} `type:"structure"` + + // Total number of records. + Count *int64 `type:"integer"` + + // A boolean value specifying whether to delete the dataset locally. + DatasetDeletedAfterRequestedSyncCount *bool `type:"boolean"` + + // Indicates whether the dataset exists. + DatasetExists *bool `type:"boolean"` + + // Server sync count for this dataset. + DatasetSyncCount *int64 `type:"long"` + + // The user/device that made the last change to this record. + LastModifiedBy *string `type:"string"` + + // Names of merged datasets. + MergedDatasetNames []*string `type:"list"` + + // A pagination token for obtaining the next page of results. + NextToken *string `type:"string"` + + // A list of all records. + Records []*Record `type:"list"` + + // A token containing a session ID, identity ID, and expiration. + SyncSessionToken *string `type:"string"` +} + +// String returns the string representation +func (s ListRecordsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListRecordsOutput) GoString() string { + return s.String() +} + +// Configuration options to be applied to the identity pool. +type PushSync struct { + _ struct{} `type:"structure"` + + // List of SNS platform application ARNs that could be used by clients. + ApplicationArns []*string `type:"list"` + + // A role configured to allow Cognito to call SNS on behalf of the developer. + RoleArn *string `min:"20" type:"string"` +} + +// String returns the string representation +func (s PushSync) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PushSync) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PushSync) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PushSync"} + if s.RoleArn != nil && len(*s.RoleArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("RoleArn", 20)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The basic data structure of a dataset. +type Record struct { + _ struct{} `type:"structure"` + + // The last modified date of the client device. + DeviceLastModifiedDate *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The key for the record. + Key *string `min:"1" type:"string"` + + // The user/device that made the last change to this record. + LastModifiedBy *string `type:"string"` + + // The date on which the record was last modified. + LastModifiedDate *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The server sync count for this record. + SyncCount *int64 `type:"long"` + + // The value for the record. + Value *string `type:"string"` +} + +// String returns the string representation +func (s Record) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Record) GoString() string { + return s.String() +} + +// An update operation for a record. +type RecordPatch struct { + _ struct{} `type:"structure"` + + // The last modified date of the client device. + DeviceLastModifiedDate *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The key associated with the record patch. + Key *string `min:"1" type:"string" required:"true"` + + // An operation, either replace or remove. + Op *string `type:"string" required:"true" enum:"Operation"` + + // Last known server sync count for this record. Set to 0 if unknown. + SyncCount *int64 `type:"long" required:"true"` + + // The value associated with the record patch. + Value *string `type:"string"` +} + +// String returns the string representation +func (s RecordPatch) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RecordPatch) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RecordPatch) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RecordPatch"} + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.Op == nil { + invalidParams.Add(request.NewErrParamRequired("Op")) + } + if s.SyncCount == nil { + invalidParams.Add(request.NewErrParamRequired("SyncCount")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// A request to RegisterDevice. +type RegisterDeviceInput struct { + _ struct{} `type:"structure"` + + // The unique ID for this identity. + IdentityId *string `location:"uri" locationName:"IdentityId" min:"1" type:"string" required:"true"` + + // A name-spaced GUID (for example, us-east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) + // created by Amazon Cognito. Here, the ID of the pool that the identity belongs + // to. + IdentityPoolId *string `location:"uri" locationName:"IdentityPoolId" min:"1" type:"string" required:"true"` + + // The SNS platform type (e.g. GCM, SDM, APNS, APNS_SANDBOX). + Platform *string `type:"string" required:"true" enum:"Platform"` + + // The push token. + Token *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s RegisterDeviceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RegisterDeviceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RegisterDeviceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RegisterDeviceInput"} + if s.IdentityId == nil { + invalidParams.Add(request.NewErrParamRequired("IdentityId")) + } + if s.IdentityId != nil && len(*s.IdentityId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("IdentityId", 1)) + } + if s.IdentityPoolId == nil { + invalidParams.Add(request.NewErrParamRequired("IdentityPoolId")) + } + if s.IdentityPoolId != nil && len(*s.IdentityPoolId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("IdentityPoolId", 1)) + } + if s.Platform == nil { + invalidParams.Add(request.NewErrParamRequired("Platform")) + } + if s.Token == nil { + invalidParams.Add(request.NewErrParamRequired("Token")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Response to a RegisterDevice request. +type RegisterDeviceOutput struct { + _ struct{} `type:"structure"` + + // The unique ID generated for this device by Cognito. + DeviceId *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s RegisterDeviceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RegisterDeviceOutput) GoString() string { + return s.String() +} + +// A request to configure Cognito Events" +// +// " +type SetCognitoEventsInput struct { + _ struct{} `type:"structure"` + + // The events to configure + Events map[string]*string `type:"map" required:"true"` + + // The Cognito Identity Pool to use when configuring Cognito Events + IdentityPoolId *string `location:"uri" locationName:"IdentityPoolId" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s SetCognitoEventsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetCognitoEventsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SetCognitoEventsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SetCognitoEventsInput"} + if s.Events == nil { + invalidParams.Add(request.NewErrParamRequired("Events")) + } + if s.IdentityPoolId == nil { + invalidParams.Add(request.NewErrParamRequired("IdentityPoolId")) + } + if s.IdentityPoolId != nil && len(*s.IdentityPoolId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("IdentityPoolId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type SetCognitoEventsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s SetCognitoEventsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetCognitoEventsOutput) GoString() string { + return s.String() +} + +// The input for the SetIdentityPoolConfiguration operation. +type SetIdentityPoolConfigurationInput struct { + _ struct{} `type:"structure"` + + // Options to apply to this identity pool for Amazon Cognito streams. + CognitoStreams *CognitoStreams `type:"structure"` + + // A name-spaced GUID (for example, us-east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) + // created by Amazon Cognito. This is the ID of the pool to modify. + IdentityPoolId *string `location:"uri" locationName:"IdentityPoolId" min:"1" type:"string" required:"true"` + + // Options to apply to this identity pool for push synchronization. + PushSync *PushSync `type:"structure"` +} + +// String returns the string representation +func (s SetIdentityPoolConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetIdentityPoolConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SetIdentityPoolConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SetIdentityPoolConfigurationInput"} + if s.IdentityPoolId == nil { + invalidParams.Add(request.NewErrParamRequired("IdentityPoolId")) + } + if s.IdentityPoolId != nil && len(*s.IdentityPoolId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("IdentityPoolId", 1)) + } + if s.CognitoStreams != nil { + if err := s.CognitoStreams.Validate(); err != nil { + invalidParams.AddNested("CognitoStreams", err.(request.ErrInvalidParams)) + } + } + if s.PushSync != nil { + if err := s.PushSync.Validate(); err != nil { + invalidParams.AddNested("PushSync", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The output for the SetIdentityPoolConfiguration operation +type SetIdentityPoolConfigurationOutput struct { + _ struct{} `type:"structure"` + + // Options to apply to this identity pool for Amazon Cognito streams. + CognitoStreams *CognitoStreams `type:"structure"` + + // A name-spaced GUID (for example, us-east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) + // created by Amazon Cognito. + IdentityPoolId *string `min:"1" type:"string"` + + // Options to apply to this identity pool for push synchronization. + PushSync *PushSync `type:"structure"` +} + +// String returns the string representation +func (s SetIdentityPoolConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetIdentityPoolConfigurationOutput) GoString() string { + return s.String() +} + +// A request to SubscribeToDatasetRequest. +type SubscribeToDatasetInput struct { + _ struct{} `type:"structure"` + + // The name of the dataset to subcribe to. + DatasetName *string `location:"uri" locationName:"DatasetName" min:"1" type:"string" required:"true"` + + // The unique ID generated for this device by Cognito. + DeviceId *string `location:"uri" locationName:"DeviceId" min:"1" type:"string" required:"true"` + + // Unique ID for this identity. + IdentityId *string `location:"uri" locationName:"IdentityId" min:"1" type:"string" required:"true"` + + // A name-spaced GUID (for example, us-east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) + // created by Amazon Cognito. The ID of the pool to which the identity belongs. + IdentityPoolId *string `location:"uri" locationName:"IdentityPoolId" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s SubscribeToDatasetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SubscribeToDatasetInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SubscribeToDatasetInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SubscribeToDatasetInput"} + if s.DatasetName == nil { + invalidParams.Add(request.NewErrParamRequired("DatasetName")) + } + if s.DatasetName != nil && len(*s.DatasetName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DatasetName", 1)) + } + if s.DeviceId == nil { + invalidParams.Add(request.NewErrParamRequired("DeviceId")) + } + if s.DeviceId != nil && len(*s.DeviceId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DeviceId", 1)) + } + if s.IdentityId == nil { + invalidParams.Add(request.NewErrParamRequired("IdentityId")) + } + if s.IdentityId != nil && len(*s.IdentityId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("IdentityId", 1)) + } + if s.IdentityPoolId == nil { + invalidParams.Add(request.NewErrParamRequired("IdentityPoolId")) + } + if s.IdentityPoolId != nil && len(*s.IdentityPoolId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("IdentityPoolId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Response to a SubscribeToDataset request. +type SubscribeToDatasetOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s SubscribeToDatasetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SubscribeToDatasetOutput) GoString() string { + return s.String() +} + +// A request to UnsubscribeFromDataset. +type UnsubscribeFromDatasetInput struct { + _ struct{} `type:"structure"` + + // The name of the dataset from which to unsubcribe. + DatasetName *string `location:"uri" locationName:"DatasetName" min:"1" type:"string" required:"true"` + + // The unique ID generated for this device by Cognito. + DeviceId *string `location:"uri" locationName:"DeviceId" min:"1" type:"string" required:"true"` + + // Unique ID for this identity. + IdentityId *string `location:"uri" locationName:"IdentityId" min:"1" type:"string" required:"true"` + + // A name-spaced GUID (for example, us-east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) + // created by Amazon Cognito. The ID of the pool to which this identity belongs. + IdentityPoolId *string `location:"uri" locationName:"IdentityPoolId" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s UnsubscribeFromDatasetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UnsubscribeFromDatasetInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UnsubscribeFromDatasetInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UnsubscribeFromDatasetInput"} + if s.DatasetName == nil { + invalidParams.Add(request.NewErrParamRequired("DatasetName")) + } + if s.DatasetName != nil && len(*s.DatasetName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DatasetName", 1)) + } + if s.DeviceId == nil { + invalidParams.Add(request.NewErrParamRequired("DeviceId")) + } + if s.DeviceId != nil && len(*s.DeviceId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DeviceId", 1)) + } + if s.IdentityId == nil { + invalidParams.Add(request.NewErrParamRequired("IdentityId")) + } + if s.IdentityId != nil && len(*s.IdentityId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("IdentityId", 1)) + } + if s.IdentityPoolId == nil { + invalidParams.Add(request.NewErrParamRequired("IdentityPoolId")) + } + if s.IdentityPoolId != nil && len(*s.IdentityPoolId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("IdentityPoolId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Response to an UnsubscribeFromDataset request. +type UnsubscribeFromDatasetOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UnsubscribeFromDatasetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UnsubscribeFromDatasetOutput) GoString() string { + return s.String() +} + +// A request to post updates to records or add and delete records for a dataset +// and user. +type UpdateRecordsInput struct { + _ struct{} `type:"structure"` + + // Intended to supply a device ID that will populate the lastModifiedBy field + // referenced in other methods. The ClientContext field is not yet implemented. + ClientContext *string `location:"header" locationName:"x-amz-Client-Context" type:"string"` + + // A string of up to 128 characters. Allowed characters are a-z, A-Z, 0-9, '_' + // (underscore), '-' (dash), and '.' (dot). + DatasetName *string `location:"uri" locationName:"DatasetName" min:"1" type:"string" required:"true"` + + // The unique ID generated for this device by Cognito. + DeviceId *string `min:"1" type:"string"` + + // A name-spaced GUID (for example, us-east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) + // created by Amazon Cognito. GUID generation is unique within a region. + IdentityId *string `location:"uri" locationName:"IdentityId" min:"1" type:"string" required:"true"` + + // A name-spaced GUID (for example, us-east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) + // created by Amazon Cognito. GUID generation is unique within a region. + IdentityPoolId *string `location:"uri" locationName:"IdentityPoolId" min:"1" type:"string" required:"true"` + + // A list of patch operations. + RecordPatches []*RecordPatch `type:"list"` + + // The SyncSessionToken returned by a previous call to ListRecords for this + // dataset and identity. + SyncSessionToken *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateRecordsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateRecordsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateRecordsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateRecordsInput"} + if s.DatasetName == nil { + invalidParams.Add(request.NewErrParamRequired("DatasetName")) + } + if s.DatasetName != nil && len(*s.DatasetName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DatasetName", 1)) + } + if s.DeviceId != nil && len(*s.DeviceId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DeviceId", 1)) + } + if s.IdentityId == nil { + invalidParams.Add(request.NewErrParamRequired("IdentityId")) + } + if s.IdentityId != nil && len(*s.IdentityId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("IdentityId", 1)) + } + if s.IdentityPoolId == nil { + invalidParams.Add(request.NewErrParamRequired("IdentityPoolId")) + } + if s.IdentityPoolId != nil && len(*s.IdentityPoolId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("IdentityPoolId", 1)) + } + if s.SyncSessionToken == nil { + invalidParams.Add(request.NewErrParamRequired("SyncSessionToken")) + } + if s.RecordPatches != nil { + for i, v := range s.RecordPatches { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "RecordPatches", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Returned for a successful UpdateRecordsRequest. +type UpdateRecordsOutput struct { + _ struct{} `type:"structure"` + + // A list of records that have been updated. + Records []*Record `type:"list"` +} + +// String returns the string representation +func (s UpdateRecordsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateRecordsOutput) GoString() string { + return s.String() +} + +const ( + // @enum BulkPublishStatus + BulkPublishStatusNotStarted = "NOT_STARTED" + // @enum BulkPublishStatus + BulkPublishStatusInProgress = "IN_PROGRESS" + // @enum BulkPublishStatus + BulkPublishStatusFailed = "FAILED" + // @enum BulkPublishStatus + BulkPublishStatusSucceeded = "SUCCEEDED" +) + +const ( + // @enum Operation + OperationReplace = "replace" + // @enum Operation + OperationRemove = "remove" +) + +const ( + // @enum Platform + PlatformApns = "APNS" + // @enum Platform + PlatformApnsSandbox = "APNS_SANDBOX" + // @enum Platform + PlatformGcm = "GCM" + // @enum Platform + PlatformAdm = "ADM" +) + +const ( + // @enum StreamingStatus + StreamingStatusEnabled = "ENABLED" + // @enum StreamingStatus + StreamingStatusDisabled = "DISABLED" +) diff --git a/vendor/github.com/aws/aws-sdk-go/service/cognitosync/cognitosynciface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/cognitosync/cognitosynciface/interface.go new file mode 100644 index 000000000..a87454804 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/cognitosync/cognitosynciface/interface.go @@ -0,0 +1,82 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package cognitosynciface provides an interface for the Amazon Cognito Sync. +package cognitosynciface + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/cognitosync" +) + +// CognitoSyncAPI is the interface type for cognitosync.CognitoSync. +type CognitoSyncAPI interface { + BulkPublishRequest(*cognitosync.BulkPublishInput) (*request.Request, *cognitosync.BulkPublishOutput) + + BulkPublish(*cognitosync.BulkPublishInput) (*cognitosync.BulkPublishOutput, error) + + DeleteDatasetRequest(*cognitosync.DeleteDatasetInput) (*request.Request, *cognitosync.DeleteDatasetOutput) + + DeleteDataset(*cognitosync.DeleteDatasetInput) (*cognitosync.DeleteDatasetOutput, error) + + DescribeDatasetRequest(*cognitosync.DescribeDatasetInput) (*request.Request, *cognitosync.DescribeDatasetOutput) + + DescribeDataset(*cognitosync.DescribeDatasetInput) (*cognitosync.DescribeDatasetOutput, error) + + DescribeIdentityPoolUsageRequest(*cognitosync.DescribeIdentityPoolUsageInput) (*request.Request, *cognitosync.DescribeIdentityPoolUsageOutput) + + DescribeIdentityPoolUsage(*cognitosync.DescribeIdentityPoolUsageInput) (*cognitosync.DescribeIdentityPoolUsageOutput, error) + + DescribeIdentityUsageRequest(*cognitosync.DescribeIdentityUsageInput) (*request.Request, *cognitosync.DescribeIdentityUsageOutput) + + DescribeIdentityUsage(*cognitosync.DescribeIdentityUsageInput) (*cognitosync.DescribeIdentityUsageOutput, error) + + GetBulkPublishDetailsRequest(*cognitosync.GetBulkPublishDetailsInput) (*request.Request, *cognitosync.GetBulkPublishDetailsOutput) + + GetBulkPublishDetails(*cognitosync.GetBulkPublishDetailsInput) (*cognitosync.GetBulkPublishDetailsOutput, error) + + GetCognitoEventsRequest(*cognitosync.GetCognitoEventsInput) (*request.Request, *cognitosync.GetCognitoEventsOutput) + + GetCognitoEvents(*cognitosync.GetCognitoEventsInput) (*cognitosync.GetCognitoEventsOutput, error) + + GetIdentityPoolConfigurationRequest(*cognitosync.GetIdentityPoolConfigurationInput) (*request.Request, *cognitosync.GetIdentityPoolConfigurationOutput) + + GetIdentityPoolConfiguration(*cognitosync.GetIdentityPoolConfigurationInput) (*cognitosync.GetIdentityPoolConfigurationOutput, error) + + ListDatasetsRequest(*cognitosync.ListDatasetsInput) (*request.Request, *cognitosync.ListDatasetsOutput) + + ListDatasets(*cognitosync.ListDatasetsInput) (*cognitosync.ListDatasetsOutput, error) + + ListIdentityPoolUsageRequest(*cognitosync.ListIdentityPoolUsageInput) (*request.Request, *cognitosync.ListIdentityPoolUsageOutput) + + ListIdentityPoolUsage(*cognitosync.ListIdentityPoolUsageInput) (*cognitosync.ListIdentityPoolUsageOutput, error) + + ListRecordsRequest(*cognitosync.ListRecordsInput) (*request.Request, *cognitosync.ListRecordsOutput) + + ListRecords(*cognitosync.ListRecordsInput) (*cognitosync.ListRecordsOutput, error) + + RegisterDeviceRequest(*cognitosync.RegisterDeviceInput) (*request.Request, *cognitosync.RegisterDeviceOutput) + + RegisterDevice(*cognitosync.RegisterDeviceInput) (*cognitosync.RegisterDeviceOutput, error) + + SetCognitoEventsRequest(*cognitosync.SetCognitoEventsInput) (*request.Request, *cognitosync.SetCognitoEventsOutput) + + SetCognitoEvents(*cognitosync.SetCognitoEventsInput) (*cognitosync.SetCognitoEventsOutput, error) + + SetIdentityPoolConfigurationRequest(*cognitosync.SetIdentityPoolConfigurationInput) (*request.Request, *cognitosync.SetIdentityPoolConfigurationOutput) + + SetIdentityPoolConfiguration(*cognitosync.SetIdentityPoolConfigurationInput) (*cognitosync.SetIdentityPoolConfigurationOutput, error) + + SubscribeToDatasetRequest(*cognitosync.SubscribeToDatasetInput) (*request.Request, *cognitosync.SubscribeToDatasetOutput) + + SubscribeToDataset(*cognitosync.SubscribeToDatasetInput) (*cognitosync.SubscribeToDatasetOutput, error) + + UnsubscribeFromDatasetRequest(*cognitosync.UnsubscribeFromDatasetInput) (*request.Request, *cognitosync.UnsubscribeFromDatasetOutput) + + UnsubscribeFromDataset(*cognitosync.UnsubscribeFromDatasetInput) (*cognitosync.UnsubscribeFromDatasetOutput, error) + + UpdateRecordsRequest(*cognitosync.UpdateRecordsInput) (*request.Request, *cognitosync.UpdateRecordsOutput) + + UpdateRecords(*cognitosync.UpdateRecordsInput) (*cognitosync.UpdateRecordsOutput, error) +} + +var _ CognitoSyncAPI = (*cognitosync.CognitoSync)(nil) diff --git a/vendor/github.com/aws/aws-sdk-go/service/cognitosync/examples_test.go b/vendor/github.com/aws/aws-sdk-go/service/cognitosync/examples_test.go new file mode 100644 index 000000000..0e38d401e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/cognitosync/examples_test.go @@ -0,0 +1,394 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package cognitosync_test + +import ( + "bytes" + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/cognitosync" +) + +var _ time.Duration +var _ bytes.Buffer + +func ExampleCognitoSync_BulkPublish() { + svc := cognitosync.New(session.New()) + + params := &cognitosync.BulkPublishInput{ + IdentityPoolId: aws.String("IdentityPoolId"), // Required + } + resp, err := svc.BulkPublish(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCognitoSync_DeleteDataset() { + svc := cognitosync.New(session.New()) + + params := &cognitosync.DeleteDatasetInput{ + DatasetName: aws.String("DatasetName"), // Required + IdentityId: aws.String("IdentityId"), // Required + IdentityPoolId: aws.String("IdentityPoolId"), // Required + } + resp, err := svc.DeleteDataset(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCognitoSync_DescribeDataset() { + svc := cognitosync.New(session.New()) + + params := &cognitosync.DescribeDatasetInput{ + DatasetName: aws.String("DatasetName"), // Required + IdentityId: aws.String("IdentityId"), // Required + IdentityPoolId: aws.String("IdentityPoolId"), // Required + } + resp, err := svc.DescribeDataset(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCognitoSync_DescribeIdentityPoolUsage() { + svc := cognitosync.New(session.New()) + + params := &cognitosync.DescribeIdentityPoolUsageInput{ + IdentityPoolId: aws.String("IdentityPoolId"), // Required + } + resp, err := svc.DescribeIdentityPoolUsage(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCognitoSync_DescribeIdentityUsage() { + svc := cognitosync.New(session.New()) + + params := &cognitosync.DescribeIdentityUsageInput{ + IdentityId: aws.String("IdentityId"), // Required + IdentityPoolId: aws.String("IdentityPoolId"), // Required + } + resp, err := svc.DescribeIdentityUsage(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCognitoSync_GetBulkPublishDetails() { + svc := cognitosync.New(session.New()) + + params := &cognitosync.GetBulkPublishDetailsInput{ + IdentityPoolId: aws.String("IdentityPoolId"), // Required + } + resp, err := svc.GetBulkPublishDetails(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCognitoSync_GetCognitoEvents() { + svc := cognitosync.New(session.New()) + + params := &cognitosync.GetCognitoEventsInput{ + IdentityPoolId: aws.String("IdentityPoolId"), // Required + } + resp, err := svc.GetCognitoEvents(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCognitoSync_GetIdentityPoolConfiguration() { + svc := cognitosync.New(session.New()) + + params := &cognitosync.GetIdentityPoolConfigurationInput{ + IdentityPoolId: aws.String("IdentityPoolId"), // Required + } + resp, err := svc.GetIdentityPoolConfiguration(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCognitoSync_ListDatasets() { + svc := cognitosync.New(session.New()) + + params := &cognitosync.ListDatasetsInput{ + IdentityId: aws.String("IdentityId"), // Required + IdentityPoolId: aws.String("IdentityPoolId"), // Required + MaxResults: aws.Int64(1), + NextToken: aws.String("String"), + } + resp, err := svc.ListDatasets(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCognitoSync_ListIdentityPoolUsage() { + svc := cognitosync.New(session.New()) + + params := &cognitosync.ListIdentityPoolUsageInput{ + MaxResults: aws.Int64(1), + NextToken: aws.String("String"), + } + resp, err := svc.ListIdentityPoolUsage(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCognitoSync_ListRecords() { + svc := cognitosync.New(session.New()) + + params := &cognitosync.ListRecordsInput{ + DatasetName: aws.String("DatasetName"), // Required + IdentityId: aws.String("IdentityId"), // Required + IdentityPoolId: aws.String("IdentityPoolId"), // Required + LastSyncCount: aws.Int64(1), + MaxResults: aws.Int64(1), + NextToken: aws.String("String"), + SyncSessionToken: aws.String("SyncSessionToken"), + } + resp, err := svc.ListRecords(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCognitoSync_RegisterDevice() { + svc := cognitosync.New(session.New()) + + params := &cognitosync.RegisterDeviceInput{ + IdentityId: aws.String("IdentityId"), // Required + IdentityPoolId: aws.String("IdentityPoolId"), // Required + Platform: aws.String("Platform"), // Required + Token: aws.String("PushToken"), // Required + } + resp, err := svc.RegisterDevice(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCognitoSync_SetCognitoEvents() { + svc := cognitosync.New(session.New()) + + params := &cognitosync.SetCognitoEventsInput{ + Events: map[string]*string{ // Required + "Key": aws.String("LambdaFunctionArn"), // Required + // More values... + }, + IdentityPoolId: aws.String("IdentityPoolId"), // Required + } + resp, err := svc.SetCognitoEvents(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCognitoSync_SetIdentityPoolConfiguration() { + svc := cognitosync.New(session.New()) + + params := &cognitosync.SetIdentityPoolConfigurationInput{ + IdentityPoolId: aws.String("IdentityPoolId"), // Required + CognitoStreams: &cognitosync.CognitoStreams{ + RoleArn: aws.String("AssumeRoleArn"), + StreamName: aws.String("StreamName"), + StreamingStatus: aws.String("StreamingStatus"), + }, + PushSync: &cognitosync.PushSync{ + ApplicationArns: []*string{ + aws.String("ApplicationArn"), // Required + // More values... + }, + RoleArn: aws.String("AssumeRoleArn"), + }, + } + resp, err := svc.SetIdentityPoolConfiguration(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCognitoSync_SubscribeToDataset() { + svc := cognitosync.New(session.New()) + + params := &cognitosync.SubscribeToDatasetInput{ + DatasetName: aws.String("DatasetName"), // Required + DeviceId: aws.String("DeviceId"), // Required + IdentityId: aws.String("IdentityId"), // Required + IdentityPoolId: aws.String("IdentityPoolId"), // Required + } + resp, err := svc.SubscribeToDataset(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCognitoSync_UnsubscribeFromDataset() { + svc := cognitosync.New(session.New()) + + params := &cognitosync.UnsubscribeFromDatasetInput{ + DatasetName: aws.String("DatasetName"), // Required + DeviceId: aws.String("DeviceId"), // Required + IdentityId: aws.String("IdentityId"), // Required + IdentityPoolId: aws.String("IdentityPoolId"), // Required + } + resp, err := svc.UnsubscribeFromDataset(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCognitoSync_UpdateRecords() { + svc := cognitosync.New(session.New()) + + params := &cognitosync.UpdateRecordsInput{ + DatasetName: aws.String("DatasetName"), // Required + IdentityId: aws.String("IdentityId"), // Required + IdentityPoolId: aws.String("IdentityPoolId"), // Required + SyncSessionToken: aws.String("SyncSessionToken"), // Required + ClientContext: aws.String("ClientContext"), + DeviceId: aws.String("DeviceId"), + RecordPatches: []*cognitosync.RecordPatch{ + { // Required + Key: aws.String("RecordKey"), // Required + Op: aws.String("Operation"), // Required + SyncCount: aws.Int64(1), // Required + DeviceLastModifiedDate: aws.Time(time.Now()), + Value: aws.String("RecordValue"), + }, + // More values... + }, + } + resp, err := svc.UpdateRecords(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/cognitosync/service.go b/vendor/github.com/aws/aws-sdk-go/service/cognitosync/service.go new file mode 100644 index 000000000..a7c0db013 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/cognitosync/service.go @@ -0,0 +1,103 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package cognitosync + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/private/protocol/restjson" +) + +// Amazon Cognito Sync provides an AWS service and client library that enable +// cross-device syncing of application-related user data. High-level client +// libraries are available for both iOS and Android. You can use these libraries +// to persist data locally so that it's available even if the device is offline. +// Developer credentials don't need to be stored on the mobile device to access +// the service. You can use Amazon Cognito to obtain a normalized user ID and +// credentials. User data is persisted in a dataset that can store up to 1 MB +// of key-value pairs, and you can have up to 20 datasets per user identity. +// +// With Amazon Cognito Sync, the data stored for each identity is accessible +// only to credentials assigned to that identity. In order to use the Cognito +// Sync service, you need to make API calls using credentials retrieved with +// Amazon Cognito Identity service (http://docs.aws.amazon.com/cognitoidentity/latest/APIReference/Welcome.html). +// +// If you want to use Cognito Sync in an Android or iOS application, you will +// probably want to make API calls via the AWS Mobile SDK. To learn more, see +// the Developer Guide for Android (http://docs.aws.amazon.com/mobile/sdkforandroid/developerguide/cognito-sync.html) +// and the Developer Guide for iOS (http://docs.aws.amazon.com/mobile/sdkforios/developerguide/cognito-sync.html). +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type CognitoSync struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "cognito-sync" + +// New creates a new instance of the CognitoSync client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a CognitoSync client from just a session. +// svc := cognitosync.New(mySession) +// +// // Create a CognitoSync client with additional configuration +// svc := cognitosync.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *CognitoSync { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *CognitoSync { + svc := &CognitoSync{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-06-30", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(restjson.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restjson.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restjson.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restjson.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a CognitoSync operation and runs any +// custom request initialization. +func (c *CognitoSync) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/configservice/api.go b/vendor/github.com/aws/aws-sdk-go/service/configservice/api.go new file mode 100644 index 000000000..0ee2dfa77 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/configservice/api.go @@ -0,0 +1,3845 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package configservice provides a client for AWS Config. +package configservice + +import ( + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" +) + +const opDeleteConfigRule = "DeleteConfigRule" + +// DeleteConfigRuleRequest generates a "aws/request.Request" representing the +// client's request for the DeleteConfigRule operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteConfigRule method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteConfigRuleRequest method. +// req, resp := client.DeleteConfigRuleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ConfigService) DeleteConfigRuleRequest(input *DeleteConfigRuleInput) (req *request.Request, output *DeleteConfigRuleOutput) { + op := &request.Operation{ + Name: opDeleteConfigRule, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteConfigRuleInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteConfigRuleOutput{} + req.Data = output + return +} + +// Deletes the specified AWS Config rule and all of its evaluation results. +// +// AWS Config sets the state of a rule to DELETING until the deletion is complete. +// You cannot update a rule while it is in this state. If you make a PutConfigRule +// or DeleteConfigRule request for the rule, you will receive a ResourceInUseException. +// +// You can check the state of a rule by using the DescribeConfigRules request. +func (c *ConfigService) DeleteConfigRule(input *DeleteConfigRuleInput) (*DeleteConfigRuleOutput, error) { + req, out := c.DeleteConfigRuleRequest(input) + err := req.Send() + return out, err +} + +const opDeleteConfigurationRecorder = "DeleteConfigurationRecorder" + +// DeleteConfigurationRecorderRequest generates a "aws/request.Request" representing the +// client's request for the DeleteConfigurationRecorder operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteConfigurationRecorder method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteConfigurationRecorderRequest method. +// req, resp := client.DeleteConfigurationRecorderRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ConfigService) DeleteConfigurationRecorderRequest(input *DeleteConfigurationRecorderInput) (req *request.Request, output *DeleteConfigurationRecorderOutput) { + op := &request.Operation{ + Name: opDeleteConfigurationRecorder, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteConfigurationRecorderInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteConfigurationRecorderOutput{} + req.Data = output + return +} + +// Deletes the configuration recorder. +// +// After the configuration recorder is deleted, AWS Config will not record +// resource configuration changes until you create a new configuration recorder. +// +// This action does not delete the configuration information that was previously +// recorded. You will be able to access the previously recorded information +// by using the GetResourceConfigHistory action, but you will not be able to +// access this information in the AWS Config console until you create a new +// configuration recorder. +func (c *ConfigService) DeleteConfigurationRecorder(input *DeleteConfigurationRecorderInput) (*DeleteConfigurationRecorderOutput, error) { + req, out := c.DeleteConfigurationRecorderRequest(input) + err := req.Send() + return out, err +} + +const opDeleteDeliveryChannel = "DeleteDeliveryChannel" + +// DeleteDeliveryChannelRequest generates a "aws/request.Request" representing the +// client's request for the DeleteDeliveryChannel operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteDeliveryChannel method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteDeliveryChannelRequest method. +// req, resp := client.DeleteDeliveryChannelRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ConfigService) DeleteDeliveryChannelRequest(input *DeleteDeliveryChannelInput) (req *request.Request, output *DeleteDeliveryChannelOutput) { + op := &request.Operation{ + Name: opDeleteDeliveryChannel, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteDeliveryChannelInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteDeliveryChannelOutput{} + req.Data = output + return +} + +// Deletes the delivery channel. +// +// Before you can delete the delivery channel, you must stop the configuration +// recorder by using the StopConfigurationRecorder action. +func (c *ConfigService) DeleteDeliveryChannel(input *DeleteDeliveryChannelInput) (*DeleteDeliveryChannelOutput, error) { + req, out := c.DeleteDeliveryChannelRequest(input) + err := req.Send() + return out, err +} + +const opDeliverConfigSnapshot = "DeliverConfigSnapshot" + +// DeliverConfigSnapshotRequest generates a "aws/request.Request" representing the +// client's request for the DeliverConfigSnapshot operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeliverConfigSnapshot method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeliverConfigSnapshotRequest method. +// req, resp := client.DeliverConfigSnapshotRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ConfigService) DeliverConfigSnapshotRequest(input *DeliverConfigSnapshotInput) (req *request.Request, output *DeliverConfigSnapshotOutput) { + op := &request.Operation{ + Name: opDeliverConfigSnapshot, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeliverConfigSnapshotInput{} + } + + req = c.newRequest(op, input, output) + output = &DeliverConfigSnapshotOutput{} + req.Data = output + return +} + +// Schedules delivery of a configuration snapshot to the Amazon S3 bucket in +// the specified delivery channel. After the delivery has started, AWS Config +// sends following notifications using an Amazon SNS topic that you have specified. +// +// Notification of starting the delivery. Notification of delivery completed, +// if the delivery was successfully completed. Notification of delivery failure, +// if the delivery failed to complete. +func (c *ConfigService) DeliverConfigSnapshot(input *DeliverConfigSnapshotInput) (*DeliverConfigSnapshotOutput, error) { + req, out := c.DeliverConfigSnapshotRequest(input) + err := req.Send() + return out, err +} + +const opDescribeComplianceByConfigRule = "DescribeComplianceByConfigRule" + +// DescribeComplianceByConfigRuleRequest generates a "aws/request.Request" representing the +// client's request for the DescribeComplianceByConfigRule operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeComplianceByConfigRule method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeComplianceByConfigRuleRequest method. +// req, resp := client.DescribeComplianceByConfigRuleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ConfigService) DescribeComplianceByConfigRuleRequest(input *DescribeComplianceByConfigRuleInput) (req *request.Request, output *DescribeComplianceByConfigRuleOutput) { + op := &request.Operation{ + Name: opDescribeComplianceByConfigRule, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeComplianceByConfigRuleInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeComplianceByConfigRuleOutput{} + req.Data = output + return +} + +// Indicates whether the specified AWS Config rules are compliant. If a rule +// is noncompliant, this action returns the number of AWS resources that do +// not comply with the rule. +// +// A rule is compliant if all of the evaluated resources comply with it, and +// it is noncompliant if any of these resources do not comply. +// +// If AWS Config has no current evaluation results for the rule, it returns +// INSUFFICIENT_DATA. This result might indicate one of the following conditions: +// +// AWS Config has never invoked an evaluation for the rule. To check whether +// it has, use the DescribeConfigRuleEvaluationStatus action to get the LastSuccessfulInvocationTime +// and LastFailedInvocationTime. The rule's AWS Lambda function is failing to +// send evaluation results to AWS Config. Verify that the role that you assigned +// to your configuration recorder includes the config:PutEvaluations permission. +// If the rule is a customer managed rule, verify that the AWS Lambda execution +// role includes the config:PutEvaluations permission. The rule's AWS Lambda +// function has returned NOT_APPLICABLE for all evaluation results. This can +// occur if the resources were deleted or removed from the rule's scope. +func (c *ConfigService) DescribeComplianceByConfigRule(input *DescribeComplianceByConfigRuleInput) (*DescribeComplianceByConfigRuleOutput, error) { + req, out := c.DescribeComplianceByConfigRuleRequest(input) + err := req.Send() + return out, err +} + +const opDescribeComplianceByResource = "DescribeComplianceByResource" + +// DescribeComplianceByResourceRequest generates a "aws/request.Request" representing the +// client's request for the DescribeComplianceByResource operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeComplianceByResource method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeComplianceByResourceRequest method. +// req, resp := client.DescribeComplianceByResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ConfigService) DescribeComplianceByResourceRequest(input *DescribeComplianceByResourceInput) (req *request.Request, output *DescribeComplianceByResourceOutput) { + op := &request.Operation{ + Name: opDescribeComplianceByResource, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeComplianceByResourceInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeComplianceByResourceOutput{} + req.Data = output + return +} + +// Indicates whether the specified AWS resources are compliant. If a resource +// is noncompliant, this action returns the number of AWS Config rules that +// the resource does not comply with. +// +// A resource is compliant if it complies with all the AWS Config rules that +// evaluate it. It is noncompliant if it does not comply with one or more of +// these rules. +// +// If AWS Config has no current evaluation results for the resource, it returns +// INSUFFICIENT_DATA. This result might indicate one of the following conditions +// about the rules that evaluate the resource: +// +// AWS Config has never invoked an evaluation for the rule. To check whether +// it has, use the DescribeConfigRuleEvaluationStatus action to get the LastSuccessfulInvocationTime +// and LastFailedInvocationTime. The rule's AWS Lambda function is failing to +// send evaluation results to AWS Config. Verify that the role that you assigned +// to your configuration recorder includes the config:PutEvaluations permission. +// If the rule is a customer managed rule, verify that the AWS Lambda execution +// role includes the config:PutEvaluations permission. The rule's AWS Lambda +// function has returned NOT_APPLICABLE for all evaluation results. This can +// occur if the resources were deleted or removed from the rule's scope. +func (c *ConfigService) DescribeComplianceByResource(input *DescribeComplianceByResourceInput) (*DescribeComplianceByResourceOutput, error) { + req, out := c.DescribeComplianceByResourceRequest(input) + err := req.Send() + return out, err +} + +const opDescribeConfigRuleEvaluationStatus = "DescribeConfigRuleEvaluationStatus" + +// DescribeConfigRuleEvaluationStatusRequest generates a "aws/request.Request" representing the +// client's request for the DescribeConfigRuleEvaluationStatus operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeConfigRuleEvaluationStatus method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeConfigRuleEvaluationStatusRequest method. +// req, resp := client.DescribeConfigRuleEvaluationStatusRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ConfigService) DescribeConfigRuleEvaluationStatusRequest(input *DescribeConfigRuleEvaluationStatusInput) (req *request.Request, output *DescribeConfigRuleEvaluationStatusOutput) { + op := &request.Operation{ + Name: opDescribeConfigRuleEvaluationStatus, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeConfigRuleEvaluationStatusInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeConfigRuleEvaluationStatusOutput{} + req.Data = output + return +} + +// Returns status information for each of your AWS managed Config rules. The +// status includes information such as the last time AWS Config invoked the +// rule, the last time AWS Config failed to invoke the rule, and the related +// error for the last failure. +func (c *ConfigService) DescribeConfigRuleEvaluationStatus(input *DescribeConfigRuleEvaluationStatusInput) (*DescribeConfigRuleEvaluationStatusOutput, error) { + req, out := c.DescribeConfigRuleEvaluationStatusRequest(input) + err := req.Send() + return out, err +} + +const opDescribeConfigRules = "DescribeConfigRules" + +// DescribeConfigRulesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeConfigRules operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeConfigRules method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeConfigRulesRequest method. +// req, resp := client.DescribeConfigRulesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ConfigService) DescribeConfigRulesRequest(input *DescribeConfigRulesInput) (req *request.Request, output *DescribeConfigRulesOutput) { + op := &request.Operation{ + Name: opDescribeConfigRules, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeConfigRulesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeConfigRulesOutput{} + req.Data = output + return +} + +// Returns details about your AWS Config rules. +func (c *ConfigService) DescribeConfigRules(input *DescribeConfigRulesInput) (*DescribeConfigRulesOutput, error) { + req, out := c.DescribeConfigRulesRequest(input) + err := req.Send() + return out, err +} + +const opDescribeConfigurationRecorderStatus = "DescribeConfigurationRecorderStatus" + +// DescribeConfigurationRecorderStatusRequest generates a "aws/request.Request" representing the +// client's request for the DescribeConfigurationRecorderStatus operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeConfigurationRecorderStatus method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeConfigurationRecorderStatusRequest method. +// req, resp := client.DescribeConfigurationRecorderStatusRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ConfigService) DescribeConfigurationRecorderStatusRequest(input *DescribeConfigurationRecorderStatusInput) (req *request.Request, output *DescribeConfigurationRecorderStatusOutput) { + op := &request.Operation{ + Name: opDescribeConfigurationRecorderStatus, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeConfigurationRecorderStatusInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeConfigurationRecorderStatusOutput{} + req.Data = output + return +} + +// Returns the current status of the specified configuration recorder. If a +// configuration recorder is not specified, this action returns the status of +// all configuration recorder associated with the account. +// +// Currently, you can specify only one configuration recorder per account. +func (c *ConfigService) DescribeConfigurationRecorderStatus(input *DescribeConfigurationRecorderStatusInput) (*DescribeConfigurationRecorderStatusOutput, error) { + req, out := c.DescribeConfigurationRecorderStatusRequest(input) + err := req.Send() + return out, err +} + +const opDescribeConfigurationRecorders = "DescribeConfigurationRecorders" + +// DescribeConfigurationRecordersRequest generates a "aws/request.Request" representing the +// client's request for the DescribeConfigurationRecorders operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeConfigurationRecorders method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeConfigurationRecordersRequest method. +// req, resp := client.DescribeConfigurationRecordersRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ConfigService) DescribeConfigurationRecordersRequest(input *DescribeConfigurationRecordersInput) (req *request.Request, output *DescribeConfigurationRecordersOutput) { + op := &request.Operation{ + Name: opDescribeConfigurationRecorders, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeConfigurationRecordersInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeConfigurationRecordersOutput{} + req.Data = output + return +} + +// Returns the name of one or more specified configuration recorders. If the +// recorder name is not specified, this action returns the names of all the +// configuration recorders associated with the account. +// +// Currently, you can specify only one configuration recorder per account. +func (c *ConfigService) DescribeConfigurationRecorders(input *DescribeConfigurationRecordersInput) (*DescribeConfigurationRecordersOutput, error) { + req, out := c.DescribeConfigurationRecordersRequest(input) + err := req.Send() + return out, err +} + +const opDescribeDeliveryChannelStatus = "DescribeDeliveryChannelStatus" + +// DescribeDeliveryChannelStatusRequest generates a "aws/request.Request" representing the +// client's request for the DescribeDeliveryChannelStatus operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeDeliveryChannelStatus method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeDeliveryChannelStatusRequest method. +// req, resp := client.DescribeDeliveryChannelStatusRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ConfigService) DescribeDeliveryChannelStatusRequest(input *DescribeDeliveryChannelStatusInput) (req *request.Request, output *DescribeDeliveryChannelStatusOutput) { + op := &request.Operation{ + Name: opDescribeDeliveryChannelStatus, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeDeliveryChannelStatusInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeDeliveryChannelStatusOutput{} + req.Data = output + return +} + +// Returns the current status of the specified delivery channel. If a delivery +// channel is not specified, this action returns the current status of all delivery +// channels associated with the account. +// +// Currently, you can specify only one delivery channel per account. +func (c *ConfigService) DescribeDeliveryChannelStatus(input *DescribeDeliveryChannelStatusInput) (*DescribeDeliveryChannelStatusOutput, error) { + req, out := c.DescribeDeliveryChannelStatusRequest(input) + err := req.Send() + return out, err +} + +const opDescribeDeliveryChannels = "DescribeDeliveryChannels" + +// DescribeDeliveryChannelsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeDeliveryChannels operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeDeliveryChannels method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeDeliveryChannelsRequest method. +// req, resp := client.DescribeDeliveryChannelsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ConfigService) DescribeDeliveryChannelsRequest(input *DescribeDeliveryChannelsInput) (req *request.Request, output *DescribeDeliveryChannelsOutput) { + op := &request.Operation{ + Name: opDescribeDeliveryChannels, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeDeliveryChannelsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeDeliveryChannelsOutput{} + req.Data = output + return +} + +// Returns details about the specified delivery channel. If a delivery channel +// is not specified, this action returns the details of all delivery channels +// associated with the account. +// +// Currently, you can specify only one delivery channel per account. +func (c *ConfigService) DescribeDeliveryChannels(input *DescribeDeliveryChannelsInput) (*DescribeDeliveryChannelsOutput, error) { + req, out := c.DescribeDeliveryChannelsRequest(input) + err := req.Send() + return out, err +} + +const opGetComplianceDetailsByConfigRule = "GetComplianceDetailsByConfigRule" + +// GetComplianceDetailsByConfigRuleRequest generates a "aws/request.Request" representing the +// client's request for the GetComplianceDetailsByConfigRule operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetComplianceDetailsByConfigRule method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetComplianceDetailsByConfigRuleRequest method. +// req, resp := client.GetComplianceDetailsByConfigRuleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ConfigService) GetComplianceDetailsByConfigRuleRequest(input *GetComplianceDetailsByConfigRuleInput) (req *request.Request, output *GetComplianceDetailsByConfigRuleOutput) { + op := &request.Operation{ + Name: opGetComplianceDetailsByConfigRule, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetComplianceDetailsByConfigRuleInput{} + } + + req = c.newRequest(op, input, output) + output = &GetComplianceDetailsByConfigRuleOutput{} + req.Data = output + return +} + +// Returns the evaluation results for the specified AWS Config rule. The results +// indicate which AWS resources were evaluated by the rule, when each resource +// was last evaluated, and whether each resource complies with the rule. +func (c *ConfigService) GetComplianceDetailsByConfigRule(input *GetComplianceDetailsByConfigRuleInput) (*GetComplianceDetailsByConfigRuleOutput, error) { + req, out := c.GetComplianceDetailsByConfigRuleRequest(input) + err := req.Send() + return out, err +} + +const opGetComplianceDetailsByResource = "GetComplianceDetailsByResource" + +// GetComplianceDetailsByResourceRequest generates a "aws/request.Request" representing the +// client's request for the GetComplianceDetailsByResource operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetComplianceDetailsByResource method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetComplianceDetailsByResourceRequest method. +// req, resp := client.GetComplianceDetailsByResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ConfigService) GetComplianceDetailsByResourceRequest(input *GetComplianceDetailsByResourceInput) (req *request.Request, output *GetComplianceDetailsByResourceOutput) { + op := &request.Operation{ + Name: opGetComplianceDetailsByResource, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetComplianceDetailsByResourceInput{} + } + + req = c.newRequest(op, input, output) + output = &GetComplianceDetailsByResourceOutput{} + req.Data = output + return +} + +// Returns the evaluation results for the specified AWS resource. The results +// indicate which AWS Config rules were used to evaluate the resource, when +// each rule was last used, and whether the resource complies with each rule. +func (c *ConfigService) GetComplianceDetailsByResource(input *GetComplianceDetailsByResourceInput) (*GetComplianceDetailsByResourceOutput, error) { + req, out := c.GetComplianceDetailsByResourceRequest(input) + err := req.Send() + return out, err +} + +const opGetComplianceSummaryByConfigRule = "GetComplianceSummaryByConfigRule" + +// GetComplianceSummaryByConfigRuleRequest generates a "aws/request.Request" representing the +// client's request for the GetComplianceSummaryByConfigRule operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetComplianceSummaryByConfigRule method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetComplianceSummaryByConfigRuleRequest method. +// req, resp := client.GetComplianceSummaryByConfigRuleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ConfigService) GetComplianceSummaryByConfigRuleRequest(input *GetComplianceSummaryByConfigRuleInput) (req *request.Request, output *GetComplianceSummaryByConfigRuleOutput) { + op := &request.Operation{ + Name: opGetComplianceSummaryByConfigRule, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetComplianceSummaryByConfigRuleInput{} + } + + req = c.newRequest(op, input, output) + output = &GetComplianceSummaryByConfigRuleOutput{} + req.Data = output + return +} + +// Returns the number of AWS Config rules that are compliant and noncompliant, +// up to a maximum of 25 for each. +func (c *ConfigService) GetComplianceSummaryByConfigRule(input *GetComplianceSummaryByConfigRuleInput) (*GetComplianceSummaryByConfigRuleOutput, error) { + req, out := c.GetComplianceSummaryByConfigRuleRequest(input) + err := req.Send() + return out, err +} + +const opGetComplianceSummaryByResourceType = "GetComplianceSummaryByResourceType" + +// GetComplianceSummaryByResourceTypeRequest generates a "aws/request.Request" representing the +// client's request for the GetComplianceSummaryByResourceType operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetComplianceSummaryByResourceType method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetComplianceSummaryByResourceTypeRequest method. +// req, resp := client.GetComplianceSummaryByResourceTypeRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ConfigService) GetComplianceSummaryByResourceTypeRequest(input *GetComplianceSummaryByResourceTypeInput) (req *request.Request, output *GetComplianceSummaryByResourceTypeOutput) { + op := &request.Operation{ + Name: opGetComplianceSummaryByResourceType, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetComplianceSummaryByResourceTypeInput{} + } + + req = c.newRequest(op, input, output) + output = &GetComplianceSummaryByResourceTypeOutput{} + req.Data = output + return +} + +// Returns the number of resources that are compliant and the number that are +// noncompliant. You can specify one or more resource types to get these numbers +// for each resource type. The maximum number returned is 100. +func (c *ConfigService) GetComplianceSummaryByResourceType(input *GetComplianceSummaryByResourceTypeInput) (*GetComplianceSummaryByResourceTypeOutput, error) { + req, out := c.GetComplianceSummaryByResourceTypeRequest(input) + err := req.Send() + return out, err +} + +const opGetResourceConfigHistory = "GetResourceConfigHistory" + +// GetResourceConfigHistoryRequest generates a "aws/request.Request" representing the +// client's request for the GetResourceConfigHistory operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetResourceConfigHistory method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetResourceConfigHistoryRequest method. +// req, resp := client.GetResourceConfigHistoryRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ConfigService) GetResourceConfigHistoryRequest(input *GetResourceConfigHistoryInput) (req *request.Request, output *GetResourceConfigHistoryOutput) { + op := &request.Operation{ + Name: opGetResourceConfigHistory, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "limit", + TruncationToken: "", + }, + } + + if input == nil { + input = &GetResourceConfigHistoryInput{} + } + + req = c.newRequest(op, input, output) + output = &GetResourceConfigHistoryOutput{} + req.Data = output + return +} + +// Returns a list of configuration items for the specified resource. The list +// contains details about each state of the resource during the specified time +// interval. +// +// The response is paginated, and by default, AWS Config returns a limit of +// 10 configuration items per page. You can customize this number with the limit +// parameter. The response includes a nextToken string, and to get the next +// page of results, run the request again and enter this string for the nextToken +// parameter. +// +// Each call to the API is limited to span a duration of seven days. It is +// likely that the number of records returned is smaller than the specified +// limit. In such cases, you can make another call, using the nextToken. +func (c *ConfigService) GetResourceConfigHistory(input *GetResourceConfigHistoryInput) (*GetResourceConfigHistoryOutput, error) { + req, out := c.GetResourceConfigHistoryRequest(input) + err := req.Send() + return out, err +} + +// GetResourceConfigHistoryPages iterates over the pages of a GetResourceConfigHistory operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See GetResourceConfigHistory method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a GetResourceConfigHistory operation. +// pageNum := 0 +// err := client.GetResourceConfigHistoryPages(params, +// func(page *GetResourceConfigHistoryOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *ConfigService) GetResourceConfigHistoryPages(input *GetResourceConfigHistoryInput, fn func(p *GetResourceConfigHistoryOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.GetResourceConfigHistoryRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*GetResourceConfigHistoryOutput), lastPage) + }) +} + +const opListDiscoveredResources = "ListDiscoveredResources" + +// ListDiscoveredResourcesRequest generates a "aws/request.Request" representing the +// client's request for the ListDiscoveredResources operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListDiscoveredResources method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListDiscoveredResourcesRequest method. +// req, resp := client.ListDiscoveredResourcesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ConfigService) ListDiscoveredResourcesRequest(input *ListDiscoveredResourcesInput) (req *request.Request, output *ListDiscoveredResourcesOutput) { + op := &request.Operation{ + Name: opListDiscoveredResources, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListDiscoveredResourcesInput{} + } + + req = c.newRequest(op, input, output) + output = &ListDiscoveredResourcesOutput{} + req.Data = output + return +} + +// Accepts a resource type and returns a list of resource identifiers for the +// resources of that type. A resource identifier includes the resource type, +// ID, and (if available) the custom resource name. The results consist of resources +// that AWS Config has discovered, including those that AWS Config is not currently +// recording. You can narrow the results to include only resources that have +// specific resource IDs or a resource name. +// +// You can specify either resource IDs or a resource name but not both in +// the same request. +// +// The response is paginated, and by default AWS Config lists 100 resource +// identifiers on each page. You can customize this number with the limit parameter. +// The response includes a nextToken string, and to get the next page of results, +// run the request again and enter this string for the nextToken parameter. +func (c *ConfigService) ListDiscoveredResources(input *ListDiscoveredResourcesInput) (*ListDiscoveredResourcesOutput, error) { + req, out := c.ListDiscoveredResourcesRequest(input) + err := req.Send() + return out, err +} + +const opPutConfigRule = "PutConfigRule" + +// PutConfigRuleRequest generates a "aws/request.Request" representing the +// client's request for the PutConfigRule operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutConfigRule method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutConfigRuleRequest method. +// req, resp := client.PutConfigRuleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ConfigService) PutConfigRuleRequest(input *PutConfigRuleInput) (req *request.Request, output *PutConfigRuleOutput) { + op := &request.Operation{ + Name: opPutConfigRule, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutConfigRuleInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &PutConfigRuleOutput{} + req.Data = output + return +} + +// Adds or updates an AWS Config rule for evaluating whether your AWS resources +// comply with your desired configurations. +// +// You can use this action for customer managed Config rules and AWS managed +// Config rules. A customer managed Config rule is a custom rule that you develop +// and maintain. An AWS managed Config rule is a customizable, predefined rule +// that is provided by AWS Config. +// +// If you are adding a new customer managed Config rule, you must first create +// the AWS Lambda function that the rule invokes to evaluate your resources. +// When you use the PutConfigRule action to add the rule to AWS Config, you +// must specify the Amazon Resource Name (ARN) that AWS Lambda assigns to the +// function. Specify the ARN for the SourceIdentifier key. This key is part +// of the Source object, which is part of the ConfigRule object. +// +// If you are adding a new AWS managed Config rule, specify the rule's identifier +// for the SourceIdentifier key. To reference AWS managed Config rule identifiers, +// see Using AWS Managed Config Rules (http://docs.aws.amazon.com/config/latest/developerguide/evaluate-config_use-managed-rules.html). +// +// For any new rule that you add, specify the ConfigRuleName in the ConfigRule +// object. Do not specify the ConfigRuleArn or the ConfigRuleId. These values +// are generated by AWS Config for new rules. +// +// If you are updating a rule that you have added previously, specify the rule's +// ConfigRuleName, ConfigRuleId, or ConfigRuleArn in the ConfigRule data type +// that you use in this request. +// +// The maximum number of rules that AWS Config supports is 25. +// +// For more information about developing and using AWS Config rules, see Evaluating +// AWS Resource Configurations with AWS Config (http://docs.aws.amazon.com/config/latest/developerguide/evaluate-config.html) +// in the AWS Config Developer Guide. +func (c *ConfigService) PutConfigRule(input *PutConfigRuleInput) (*PutConfigRuleOutput, error) { + req, out := c.PutConfigRuleRequest(input) + err := req.Send() + return out, err +} + +const opPutConfigurationRecorder = "PutConfigurationRecorder" + +// PutConfigurationRecorderRequest generates a "aws/request.Request" representing the +// client's request for the PutConfigurationRecorder operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutConfigurationRecorder method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutConfigurationRecorderRequest method. +// req, resp := client.PutConfigurationRecorderRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ConfigService) PutConfigurationRecorderRequest(input *PutConfigurationRecorderInput) (req *request.Request, output *PutConfigurationRecorderOutput) { + op := &request.Operation{ + Name: opPutConfigurationRecorder, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutConfigurationRecorderInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &PutConfigurationRecorderOutput{} + req.Data = output + return +} + +// Creates a new configuration recorder to record the selected resource configurations. +// +// You can use this action to change the role roleARN and/or the recordingGroup +// of an existing recorder. To change the role, call the action on the existing +// configuration recorder and specify a role. +// +// Currently, you can specify only one configuration recorder per account. +// +// If ConfigurationRecorder does not have the recordingGroup parameter specified, +// the default is to record all supported resource types. +func (c *ConfigService) PutConfigurationRecorder(input *PutConfigurationRecorderInput) (*PutConfigurationRecorderOutput, error) { + req, out := c.PutConfigurationRecorderRequest(input) + err := req.Send() + return out, err +} + +const opPutDeliveryChannel = "PutDeliveryChannel" + +// PutDeliveryChannelRequest generates a "aws/request.Request" representing the +// client's request for the PutDeliveryChannel operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutDeliveryChannel method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutDeliveryChannelRequest method. +// req, resp := client.PutDeliveryChannelRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ConfigService) PutDeliveryChannelRequest(input *PutDeliveryChannelInput) (req *request.Request, output *PutDeliveryChannelOutput) { + op := &request.Operation{ + Name: opPutDeliveryChannel, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutDeliveryChannelInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &PutDeliveryChannelOutput{} + req.Data = output + return +} + +// Creates a delivery channel object to deliver configuration information to +// an Amazon S3 bucket and Amazon SNS topic. +// +// Before you can create a delivery channel, you must create a configuration +// recorder. +// +// You can use this action to change the Amazon S3 bucket or an Amazon SNS +// topic of the existing delivery channel. To change the Amazon S3 bucket or +// an Amazon SNS topic, call this action and specify the changed values for +// the S3 bucket and the SNS topic. If you specify a different value for either +// the S3 bucket or the SNS topic, this action will keep the existing value +// for the parameter that is not changed. +// +// You can have only one delivery channel per AWS account. +func (c *ConfigService) PutDeliveryChannel(input *PutDeliveryChannelInput) (*PutDeliveryChannelOutput, error) { + req, out := c.PutDeliveryChannelRequest(input) + err := req.Send() + return out, err +} + +const opPutEvaluations = "PutEvaluations" + +// PutEvaluationsRequest generates a "aws/request.Request" representing the +// client's request for the PutEvaluations operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutEvaluations method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutEvaluationsRequest method. +// req, resp := client.PutEvaluationsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ConfigService) PutEvaluationsRequest(input *PutEvaluationsInput) (req *request.Request, output *PutEvaluationsOutput) { + op := &request.Operation{ + Name: opPutEvaluations, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutEvaluationsInput{} + } + + req = c.newRequest(op, input, output) + output = &PutEvaluationsOutput{} + req.Data = output + return +} + +// Used by an AWS Lambda function to deliver evaluation results to AWS Config. +// This action is required in every AWS Lambda function that is invoked by an +// AWS Config rule. +func (c *ConfigService) PutEvaluations(input *PutEvaluationsInput) (*PutEvaluationsOutput, error) { + req, out := c.PutEvaluationsRequest(input) + err := req.Send() + return out, err +} + +const opStartConfigurationRecorder = "StartConfigurationRecorder" + +// StartConfigurationRecorderRequest generates a "aws/request.Request" representing the +// client's request for the StartConfigurationRecorder operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the StartConfigurationRecorder method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the StartConfigurationRecorderRequest method. +// req, resp := client.StartConfigurationRecorderRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ConfigService) StartConfigurationRecorderRequest(input *StartConfigurationRecorderInput) (req *request.Request, output *StartConfigurationRecorderOutput) { + op := &request.Operation{ + Name: opStartConfigurationRecorder, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &StartConfigurationRecorderInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &StartConfigurationRecorderOutput{} + req.Data = output + return +} + +// Starts recording configurations of the AWS resources you have selected to +// record in your AWS account. +// +// You must have created at least one delivery channel to successfully start +// the configuration recorder. +func (c *ConfigService) StartConfigurationRecorder(input *StartConfigurationRecorderInput) (*StartConfigurationRecorderOutput, error) { + req, out := c.StartConfigurationRecorderRequest(input) + err := req.Send() + return out, err +} + +const opStopConfigurationRecorder = "StopConfigurationRecorder" + +// StopConfigurationRecorderRequest generates a "aws/request.Request" representing the +// client's request for the StopConfigurationRecorder operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the StopConfigurationRecorder method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the StopConfigurationRecorderRequest method. +// req, resp := client.StopConfigurationRecorderRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ConfigService) StopConfigurationRecorderRequest(input *StopConfigurationRecorderInput) (req *request.Request, output *StopConfigurationRecorderOutput) { + op := &request.Operation{ + Name: opStopConfigurationRecorder, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &StopConfigurationRecorderInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &StopConfigurationRecorderOutput{} + req.Data = output + return +} + +// Stops recording configurations of the AWS resources you have selected to +// record in your AWS account. +func (c *ConfigService) StopConfigurationRecorder(input *StopConfigurationRecorderInput) (*StopConfigurationRecorderOutput, error) { + req, out := c.StopConfigurationRecorderRequest(input) + err := req.Send() + return out, err +} + +// Indicates whether an AWS resource or AWS Config rule is compliant and provides +// the number of contributors that affect the compliance. +type Compliance struct { + _ struct{} `type:"structure"` + + // The number of AWS resources or AWS Config rules that cause a result of NON_COMPLIANT, + // up to a maximum number. + ComplianceContributorCount *ComplianceContributorCount `type:"structure"` + + // Indicates whether an AWS resource or AWS Config rule is compliant. + // + // A resource is compliant if it complies with all of the AWS Config rules + // that evaluate it, and it is noncompliant if it does not comply with one or + // more of these rules. + // + // A rule is compliant if all of the resources that the rule evaluates comply + // with it, and it is noncompliant if any of these resources do not comply. + // + // AWS Config returns the INSUFFICIENT_DATA value when no evaluation results + // are available for the AWS resource or Config rule. + // + // For the Compliance data type, AWS Config supports only COMPLIANT, NON_COMPLIANT, + // and INSUFFICIENT_DATA values. AWS Config does not support the NOT_APPLICABLE + // value for the Compliance data type. + ComplianceType *string `type:"string" enum:"ComplianceType"` +} + +// String returns the string representation +func (s Compliance) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Compliance) GoString() string { + return s.String() +} + +// Indicates whether an AWS Config rule is compliant. A rule is compliant if +// all of the resources that the rule evaluated comply with it, and it is noncompliant +// if any of these resources do not comply. +type ComplianceByConfigRule struct { + _ struct{} `type:"structure"` + + // Indicates whether the AWS Config rule is compliant. + Compliance *Compliance `type:"structure"` + + // The name of the AWS Config rule. + ConfigRuleName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ComplianceByConfigRule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ComplianceByConfigRule) GoString() string { + return s.String() +} + +// Indicates whether an AWS resource that is evaluated according to one or more +// AWS Config rules is compliant. A resource is compliant if it complies with +// all of the rules that evaluate it, and it is noncompliant if it does not +// comply with one or more of these rules. +type ComplianceByResource struct { + _ struct{} `type:"structure"` + + // Indicates whether the AWS resource complies with all of the AWS Config rules + // that evaluated it. + Compliance *Compliance `type:"structure"` + + // The ID of the AWS resource that was evaluated. + ResourceId *string `min:"1" type:"string"` + + // The type of the AWS resource that was evaluated. + ResourceType *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ComplianceByResource) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ComplianceByResource) GoString() string { + return s.String() +} + +// The number of AWS resources or AWS Config rules responsible for the current +// compliance of the item, up to a maximum number. +type ComplianceContributorCount struct { + _ struct{} `type:"structure"` + + // Indicates whether the maximum count is reached. + CapExceeded *bool `type:"boolean"` + + // The number of AWS resources or AWS Config rules responsible for the current + // compliance of the item. + CappedCount *int64 `type:"integer"` +} + +// String returns the string representation +func (s ComplianceContributorCount) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ComplianceContributorCount) GoString() string { + return s.String() +} + +// The number of AWS Config rules or AWS resources that are compliant and noncompliant, +// up to a maximum. +type ComplianceSummary struct { + _ struct{} `type:"structure"` + + // The time that AWS Config created the compliance summary. + ComplianceSummaryTimestamp *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The number of AWS Config rules or AWS resources that are compliant, up to + // a maximum of 25 for rules and 100 for resources. + CompliantResourceCount *ComplianceContributorCount `type:"structure"` + + // The number of AWS Config rules or AWS resources that are noncompliant, up + // to a maximum of 25 for rules and 100 for resources. + NonCompliantResourceCount *ComplianceContributorCount `type:"structure"` +} + +// String returns the string representation +func (s ComplianceSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ComplianceSummary) GoString() string { + return s.String() +} + +// The number of AWS resources of a specific type that are compliant or noncompliant, +// up to a maximum of 100 for each compliance. +type ComplianceSummaryByResourceType struct { + _ struct{} `type:"structure"` + + // The number of AWS resources that are compliant or noncompliant, up to a maximum + // of 100 for each compliance. + ComplianceSummary *ComplianceSummary `type:"structure"` + + // The type of AWS resource. + ResourceType *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ComplianceSummaryByResourceType) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ComplianceSummaryByResourceType) GoString() string { + return s.String() +} + +// A list that contains the status of the delivery of either the snapshot or +// the configuration history to the specified Amazon S3 bucket. +type ConfigExportDeliveryInfo struct { + _ struct{} `type:"structure"` + + // The time of the last attempted delivery. + LastAttemptTime *time.Time `locationName:"lastAttemptTime" type:"timestamp" timestampFormat:"unix"` + + // The error code from the last attempted delivery. + LastErrorCode *string `locationName:"lastErrorCode" type:"string"` + + // The error message from the last attempted delivery. + LastErrorMessage *string `locationName:"lastErrorMessage" type:"string"` + + // Status of the last attempted delivery. + LastStatus *string `locationName:"lastStatus" type:"string" enum:"DeliveryStatus"` + + // The time of the last successful delivery. + LastSuccessfulTime *time.Time `locationName:"lastSuccessfulTime" type:"timestamp" timestampFormat:"unix"` + + // The time that the next delivery occurs. + NextDeliveryTime *time.Time `locationName:"nextDeliveryTime" type:"timestamp" timestampFormat:"unix"` +} + +// String returns the string representation +func (s ConfigExportDeliveryInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ConfigExportDeliveryInfo) GoString() string { + return s.String() +} + +// An AWS Lambda function that evaluates configuration items to assess whether +// your AWS resources comply with your desired configurations. This function +// can run when AWS Config detects a configuration change to an AWS resource, +// or when it delivers a configuration snapshot of the resources in the account. +// +// For more information about developing and using AWS Config rules, see Evaluating +// AWS Resource Configurations with AWS Config (http://docs.aws.amazon.com/config/latest/developerguide/evaluate-config.html) +// in the AWS Config Developer Guide. +type ConfigRule struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the AWS Config rule. + ConfigRuleArn *string `type:"string"` + + // The ID of the AWS Config rule. + ConfigRuleId *string `type:"string"` + + // The name that you assign to the AWS Config rule. The name is required if + // you are adding a new rule. + ConfigRuleName *string `min:"1" type:"string"` + + // Indicates whether the AWS Config rule is active or currently being deleted + // by AWS Config. + // + // AWS Config sets the state of a rule to DELETING temporarily after you use + // the DeleteConfigRule request to delete the rule. After AWS Config finishes + // deleting a rule, the rule and all of its evaluations are erased and no longer + // available. + // + // You cannot add a rule to AWS Config that has the state set to DELETING. + // If you want to delete a rule, you must use the DeleteConfigRule request. + ConfigRuleState *string `type:"string" enum:"ConfigRuleState"` + + // The description that you provide for the AWS Config rule. + Description *string `type:"string"` + + // A string in JSON format that is passed to the AWS Config rule Lambda function. + InputParameters *string `min:"1" type:"string"` + + // The maximum frequency at which the AWS Config rule runs evaluations. + // + // If your rule is periodic, meaning it runs an evaluation when AWS Config + // delivers a configuration snapshot, then it cannot run evaluations more frequently + // than AWS Config delivers the snapshots. For periodic rules, set the value + // of the MaximumExecutionFrequency key to be equal to or greater than the value + // of the deliveryFrequency key, which is part of ConfigSnapshotDeliveryProperties. + // To update the frequency with which AWS Config delivers your snapshots, use + // the PutDeliveryChannel action. + MaximumExecutionFrequency *string `type:"string" enum:"MaximumExecutionFrequency"` + + // Defines which resources can trigger an evaluation for the rule. The scope + // can include one or more resource types, a combination of one resource type + // and one resource ID, or a combination of a tag key and value. Specify a scope + // to constrain the resources that can trigger an evaluation for the rule. If + // you do not specify a scope, evaluations are triggered when any resource in + // the recording group changes. + Scope *Scope `type:"structure"` + + // Provides the rule owner (AWS or customer), the rule identifier, and the events + // that cause the function to evaluate your AWS resources. + Source *Source `type:"structure" required:"true"` +} + +// String returns the string representation +func (s ConfigRule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ConfigRule) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ConfigRule) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ConfigRule"} + if s.ConfigRuleName != nil && len(*s.ConfigRuleName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ConfigRuleName", 1)) + } + if s.InputParameters != nil && len(*s.InputParameters) < 1 { + invalidParams.Add(request.NewErrParamMinLen("InputParameters", 1)) + } + if s.Source == nil { + invalidParams.Add(request.NewErrParamRequired("Source")) + } + if s.Scope != nil { + if err := s.Scope.Validate(); err != nil { + invalidParams.AddNested("Scope", err.(request.ErrInvalidParams)) + } + } + if s.Source != nil { + if err := s.Source.Validate(); err != nil { + invalidParams.AddNested("Source", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Status information for your AWS managed Config rules. The status includes +// information such as the last time the rule ran, the last time it failed, +// and the related error for the last failure. +// +// This action does not return status information about customer managed Config +// rules. +type ConfigRuleEvaluationStatus struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the AWS Config rule. + ConfigRuleArn *string `type:"string"` + + // The ID of the AWS Config rule. + ConfigRuleId *string `type:"string"` + + // The name of the AWS Config rule. + ConfigRuleName *string `min:"1" type:"string"` + + // The time that you first activated the AWS Config rule. + FirstActivatedTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // Indicates whether AWS Config has evaluated your resources against the rule + // at least once. + // + // true - AWS Config has evaluated your AWS resources against the rule at + // least once. false - AWS Config has not once finished evaluating your AWS + // resources against the rule. + FirstEvaluationStarted *bool `type:"boolean"` + + // The error code that AWS Config returned when the rule last failed. + LastErrorCode *string `type:"string"` + + // The error message that AWS Config returned when the rule last failed. + LastErrorMessage *string `type:"string"` + + // The time that AWS Config last failed to evaluate your AWS resources against + // the rule. + LastFailedEvaluationTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The time that AWS Config last failed to invoke the AWS Config rule to evaluate + // your AWS resources. + LastFailedInvocationTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The time that AWS Config last successfully evaluated your AWS resources against + // the rule. + LastSuccessfulEvaluationTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The time that AWS Config last successfully invoked the AWS Config rule to + // evaluate your AWS resources. + LastSuccessfulInvocationTime *time.Time `type:"timestamp" timestampFormat:"unix"` +} + +// String returns the string representation +func (s ConfigRuleEvaluationStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ConfigRuleEvaluationStatus) GoString() string { + return s.String() +} + +// Options for how AWS Config delivers configuration snapshots to the Amazon +// S3 bucket in your delivery channel. +type ConfigSnapshotDeliveryProperties struct { + _ struct{} `type:"structure"` + + // The frequency with which AWS Config recurringly delivers configuration snapshots. + DeliveryFrequency *string `locationName:"deliveryFrequency" type:"string" enum:"MaximumExecutionFrequency"` +} + +// String returns the string representation +func (s ConfigSnapshotDeliveryProperties) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ConfigSnapshotDeliveryProperties) GoString() string { + return s.String() +} + +// A list that contains the status of the delivery of the configuration stream +// notification to the Amazon SNS topic. +type ConfigStreamDeliveryInfo struct { + _ struct{} `type:"structure"` + + // The error code from the last attempted delivery. + LastErrorCode *string `locationName:"lastErrorCode" type:"string"` + + // The error message from the last attempted delivery. + LastErrorMessage *string `locationName:"lastErrorMessage" type:"string"` + + // Status of the last attempted delivery. + // + // Note Providing an SNS topic on a DeliveryChannel (http://docs.aws.amazon.com/config/latest/APIReference/API_DeliveryChannel.html) + // for AWS Config is optional. If the SNS delivery is turned off, the last status + // will be Not_Applicable. + LastStatus *string `locationName:"lastStatus" type:"string" enum:"DeliveryStatus"` + + // The time from the last status change. + LastStatusChangeTime *time.Time `locationName:"lastStatusChangeTime" type:"timestamp" timestampFormat:"unix"` +} + +// String returns the string representation +func (s ConfigStreamDeliveryInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ConfigStreamDeliveryInfo) GoString() string { + return s.String() +} + +// A list that contains detailed configurations of a specified resource. +// +// Currently, the list does not contain information about non-AWS components +// (for example, applications on your Amazon EC2 instances). +type ConfigurationItem struct { + _ struct{} `type:"structure"` + + // The 12 digit AWS account ID associated with the resource. + AccountId *string `locationName:"accountId" type:"string"` + + // The Amazon Resource Name (ARN) of the resource. + Arn *string `locationName:"arn" type:"string"` + + // The Availability Zone associated with the resource. + AvailabilityZone *string `locationName:"availabilityZone" type:"string"` + + // The region where the resource resides. + AwsRegion *string `locationName:"awsRegion" type:"string"` + + // The description of the resource configuration. + Configuration *string `locationName:"configuration" type:"string"` + + // The time when the configuration recording was initiated. + ConfigurationItemCaptureTime *time.Time `locationName:"configurationItemCaptureTime" type:"timestamp" timestampFormat:"unix"` + + // Unique MD5 hash that represents the configuration item's state. + // + // You can use MD5 hash to compare the states of two or more configuration + // items that are associated with the same resource. + ConfigurationItemMD5Hash *string `locationName:"configurationItemMD5Hash" type:"string"` + + // The configuration item status. + ConfigurationItemStatus *string `locationName:"configurationItemStatus" type:"string" enum:"ConfigurationItemStatus"` + + // An identifier that indicates the ordering of the configuration items of a + // resource. + ConfigurationStateId *string `locationName:"configurationStateId" type:"string"` + + // A list of CloudTrail event IDs. + // + // A populated field indicates that the current configuration was initiated + // by the events recorded in the CloudTrail log. For more information about + // CloudTrail, see What is AWS CloudTrail? (http://docs.aws.amazon.com/awscloudtrail/latest/userguide/what_is_cloud_trail_top_level.html). + // + // An empty field indicates that the current configuration was not initiated + // by any event. + RelatedEvents []*string `locationName:"relatedEvents" type:"list"` + + // A list of related AWS resources. + Relationships []*Relationship `locationName:"relationships" type:"list"` + + // The time stamp when the resource was created. + ResourceCreationTime *time.Time `locationName:"resourceCreationTime" type:"timestamp" timestampFormat:"unix"` + + // The ID of the resource (for example., sg-xxxxxx). + ResourceId *string `locationName:"resourceId" type:"string"` + + // The custom name of the resource, if available. + ResourceName *string `locationName:"resourceName" type:"string"` + + // The type of AWS resource. + ResourceType *string `locationName:"resourceType" type:"string" enum:"ResourceType"` + + // A mapping of key value tags associated with the resource. + Tags map[string]*string `locationName:"tags" type:"map"` + + // The version number of the resource configuration. + Version *string `locationName:"version" type:"string"` +} + +// String returns the string representation +func (s ConfigurationItem) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ConfigurationItem) GoString() string { + return s.String() +} + +// An object that represents the recording of configuration changes of an AWS +// resource. +type ConfigurationRecorder struct { + _ struct{} `type:"structure"` + + // The name of the recorder. By default, AWS Config automatically assigns the + // name "default" when creating the configuration recorder. You cannot change + // the assigned name. + Name *string `locationName:"name" min:"1" type:"string"` + + // Specifies the types of AWS resource for which AWS Config records configuration + // changes. + RecordingGroup *RecordingGroup `locationName:"recordingGroup" type:"structure"` + + // Amazon Resource Name (ARN) of the IAM role used to describe the AWS resources + // associated with the account. + RoleARN *string `locationName:"roleARN" type:"string"` +} + +// String returns the string representation +func (s ConfigurationRecorder) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ConfigurationRecorder) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ConfigurationRecorder) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ConfigurationRecorder"} + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The current status of the configuration recorder. +type ConfigurationRecorderStatus struct { + _ struct{} `type:"structure"` + + // The error code indicating that the recording failed. + LastErrorCode *string `locationName:"lastErrorCode" type:"string"` + + // The message indicating that the recording failed due to an error. + LastErrorMessage *string `locationName:"lastErrorMessage" type:"string"` + + // The time the recorder was last started. + LastStartTime *time.Time `locationName:"lastStartTime" type:"timestamp" timestampFormat:"unix"` + + // The last (previous) status of the recorder. + LastStatus *string `locationName:"lastStatus" type:"string" enum:"RecorderStatus"` + + // The time when the status was last changed. + LastStatusChangeTime *time.Time `locationName:"lastStatusChangeTime" type:"timestamp" timestampFormat:"unix"` + + // The time the recorder was last stopped. + LastStopTime *time.Time `locationName:"lastStopTime" type:"timestamp" timestampFormat:"unix"` + + // The name of the configuration recorder. + Name *string `locationName:"name" type:"string"` + + // Specifies whether the recorder is currently recording or not. + Recording *bool `locationName:"recording" type:"boolean"` +} + +// String returns the string representation +func (s ConfigurationRecorderStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ConfigurationRecorderStatus) GoString() string { + return s.String() +} + +type DeleteConfigRuleInput struct { + _ struct{} `type:"structure"` + + // The name of the AWS Config rule that you want to delete. + ConfigRuleName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteConfigRuleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteConfigRuleInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteConfigRuleInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteConfigRuleInput"} + if s.ConfigRuleName == nil { + invalidParams.Add(request.NewErrParamRequired("ConfigRuleName")) + } + if s.ConfigRuleName != nil && len(*s.ConfigRuleName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ConfigRuleName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteConfigRuleOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteConfigRuleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteConfigRuleOutput) GoString() string { + return s.String() +} + +// The request object for the DeleteConfigurationRecorder action. +type DeleteConfigurationRecorderInput struct { + _ struct{} `type:"structure"` + + // The name of the configuration recorder to be deleted. You can retrieve the + // name of your configuration recorder by using the DescribeConfigurationRecorders + // action. + ConfigurationRecorderName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteConfigurationRecorderInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteConfigurationRecorderInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteConfigurationRecorderInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteConfigurationRecorderInput"} + if s.ConfigurationRecorderName == nil { + invalidParams.Add(request.NewErrParamRequired("ConfigurationRecorderName")) + } + if s.ConfigurationRecorderName != nil && len(*s.ConfigurationRecorderName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ConfigurationRecorderName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteConfigurationRecorderOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteConfigurationRecorderOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteConfigurationRecorderOutput) GoString() string { + return s.String() +} + +// The input for the DeleteDeliveryChannel action. The action accepts the following +// data in JSON format. +type DeleteDeliveryChannelInput struct { + _ struct{} `type:"structure"` + + // The name of the delivery channel to delete. + DeliveryChannelName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteDeliveryChannelInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteDeliveryChannelInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteDeliveryChannelInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteDeliveryChannelInput"} + if s.DeliveryChannelName == nil { + invalidParams.Add(request.NewErrParamRequired("DeliveryChannelName")) + } + if s.DeliveryChannelName != nil && len(*s.DeliveryChannelName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DeliveryChannelName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteDeliveryChannelOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteDeliveryChannelOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteDeliveryChannelOutput) GoString() string { + return s.String() +} + +// The input for the DeliverConfigSnapshot action. +type DeliverConfigSnapshotInput struct { + _ struct{} `type:"structure"` + + // The name of the delivery channel through which the snapshot is delivered. + DeliveryChannelName *string `locationName:"deliveryChannelName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeliverConfigSnapshotInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeliverConfigSnapshotInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeliverConfigSnapshotInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeliverConfigSnapshotInput"} + if s.DeliveryChannelName == nil { + invalidParams.Add(request.NewErrParamRequired("DeliveryChannelName")) + } + if s.DeliveryChannelName != nil && len(*s.DeliveryChannelName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DeliveryChannelName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The output for the DeliverConfigSnapshot action in JSON format. +type DeliverConfigSnapshotOutput struct { + _ struct{} `type:"structure"` + + // The ID of the snapshot that is being created. + ConfigSnapshotId *string `locationName:"configSnapshotId" type:"string"` +} + +// String returns the string representation +func (s DeliverConfigSnapshotOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeliverConfigSnapshotOutput) GoString() string { + return s.String() +} + +// The channel through which AWS Config delivers notifications and updated configuration +// states. +type DeliveryChannel struct { + _ struct{} `type:"structure"` + + // Options for how AWS Config delivers configuration snapshots to the Amazon + // S3 bucket in your delivery channel. + ConfigSnapshotDeliveryProperties *ConfigSnapshotDeliveryProperties `locationName:"configSnapshotDeliveryProperties" type:"structure"` + + // The name of the delivery channel. By default, AWS Config assigns the name + // "default" when creating the delivery channel. To change the delivery channel + // name, you must use the DeleteDeliveryChannel action to delete your current + // delivery channel, and then you must use the PutDeliveryChannel command to + // create a delivery channel that has the desired name. + Name *string `locationName:"name" min:"1" type:"string"` + + // The name of the Amazon S3 bucket to which AWS Config delivers configuration + // snapshots and configuration history files. + // + // If you specify a bucket that belongs to another AWS account, that bucket + // must have policies that grant access permissions to AWS Config. For more + // information, see Permissions for the Amazon S3 Bucket (http://docs.aws.amazon.com/config/latest/developerguide/s3-bucket-policy.html) + // in the AWS Config Developer Guide. + S3BucketName *string `locationName:"s3BucketName" type:"string"` + + // The prefix for the specified Amazon S3 bucket. + S3KeyPrefix *string `locationName:"s3KeyPrefix" type:"string"` + + // The Amazon Resource Name (ARN) of the Amazon SNS topic to which AWS Config + // sends notifications about configuration changes. + // + // If you choose a topic from another account, the topic must have policies + // that grant access permissions to AWS Config. For more information, see Permissions + // for the Amazon SNS Topic (http://docs.aws.amazon.com/config/latest/developerguide/sns-topic-policy.html) + // in the AWS Config Developer Guide. + SnsTopicARN *string `locationName:"snsTopicARN" type:"string"` +} + +// String returns the string representation +func (s DeliveryChannel) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeliveryChannel) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeliveryChannel) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeliveryChannel"} + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The status of a specified delivery channel. +// +// Valid values: Success | Failure +type DeliveryChannelStatus struct { + _ struct{} `type:"structure"` + + // A list that contains the status of the delivery of the configuration history + // to the specified Amazon S3 bucket. + ConfigHistoryDeliveryInfo *ConfigExportDeliveryInfo `locationName:"configHistoryDeliveryInfo" type:"structure"` + + // A list containing the status of the delivery of the snapshot to the specified + // Amazon S3 bucket. + ConfigSnapshotDeliveryInfo *ConfigExportDeliveryInfo `locationName:"configSnapshotDeliveryInfo" type:"structure"` + + // A list containing the status of the delivery of the configuration stream + // notification to the specified Amazon SNS topic. + ConfigStreamDeliveryInfo *ConfigStreamDeliveryInfo `locationName:"configStreamDeliveryInfo" type:"structure"` + + // The name of the delivery channel. + Name *string `locationName:"name" type:"string"` +} + +// String returns the string representation +func (s DeliveryChannelStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeliveryChannelStatus) GoString() string { + return s.String() +} + +type DescribeComplianceByConfigRuleInput struct { + _ struct{} `type:"structure"` + + // Filters the results by compliance. + // + // The allowed values are COMPLIANT, NON_COMPLIANT, and INSUFFICIENT_DATA. + ComplianceTypes []*string `type:"list"` + + // Specify one or more AWS Config rule names to filter the results by rule. + ConfigRuleNames []*string `type:"list"` + + // The nextToken string returned on a previous page that you use to get the + // next page of results in a paginated response. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeComplianceByConfigRuleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeComplianceByConfigRuleInput) GoString() string { + return s.String() +} + +type DescribeComplianceByConfigRuleOutput struct { + _ struct{} `type:"structure"` + + // Indicates whether each of the specified AWS Config rules is compliant. + ComplianceByConfigRules []*ComplianceByConfigRule `type:"list"` + + // The string that you use in a subsequent request to get the next page of results + // in a paginated response. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeComplianceByConfigRuleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeComplianceByConfigRuleOutput) GoString() string { + return s.String() +} + +type DescribeComplianceByResourceInput struct { + _ struct{} `type:"structure"` + + // Filters the results by compliance. + // + // The allowed values are COMPLIANT, NON_COMPLIANT, and INSUFFICIENT_DATA. + ComplianceTypes []*string `type:"list"` + + // The maximum number of evaluation results returned on each page. The default + // is 10. You cannot specify a limit greater than 100. If you specify 0, AWS + // Config uses the default. + Limit *int64 `type:"integer"` + + // The nextToken string returned on a previous page that you use to get the + // next page of results in a paginated response. + NextToken *string `type:"string"` + + // The ID of the AWS resource for which you want compliance information. You + // can specify only one resource ID. If you specify a resource ID, you must + // also specify a type for ResourceType. + ResourceId *string `min:"1" type:"string"` + + // The types of AWS resources for which you want compliance information; for + // example, AWS::EC2::Instance. For this action, you can specify that the resource + // type is an AWS account by specifying AWS::::Account. + ResourceType *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s DescribeComplianceByResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeComplianceByResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeComplianceByResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeComplianceByResourceInput"} + if s.ResourceId != nil && len(*s.ResourceId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceId", 1)) + } + if s.ResourceType != nil && len(*s.ResourceType) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceType", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DescribeComplianceByResourceOutput struct { + _ struct{} `type:"structure"` + + // Indicates whether the specified AWS resource complies with all of the AWS + // Config rules that evaluate it. + ComplianceByResources []*ComplianceByResource `type:"list"` + + // The string that you use in a subsequent request to get the next page of results + // in a paginated response. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeComplianceByResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeComplianceByResourceOutput) GoString() string { + return s.String() +} + +type DescribeConfigRuleEvaluationStatusInput struct { + _ struct{} `type:"structure"` + + // The name of the AWS managed Config rules for which you want status information. + // If you do not specify any names, AWS Config returns status information for + // all AWS managed Config rules that you use. + ConfigRuleNames []*string `type:"list"` +} + +// String returns the string representation +func (s DescribeConfigRuleEvaluationStatusInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeConfigRuleEvaluationStatusInput) GoString() string { + return s.String() +} + +type DescribeConfigRuleEvaluationStatusOutput struct { + _ struct{} `type:"structure"` + + // Status information about your AWS managed Config rules. + ConfigRulesEvaluationStatus []*ConfigRuleEvaluationStatus `type:"list"` +} + +// String returns the string representation +func (s DescribeConfigRuleEvaluationStatusOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeConfigRuleEvaluationStatusOutput) GoString() string { + return s.String() +} + +type DescribeConfigRulesInput struct { + _ struct{} `type:"structure"` + + // The names of the AWS Config rules for which you want details. If you do not + // specify any names, AWS Config returns details for all your rules. + ConfigRuleNames []*string `type:"list"` + + // The nextToken string returned on a previous page that you use to get the + // next page of results in a paginated response. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeConfigRulesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeConfigRulesInput) GoString() string { + return s.String() +} + +type DescribeConfigRulesOutput struct { + _ struct{} `type:"structure"` + + // The details about your AWS Config rules. + ConfigRules []*ConfigRule `type:"list"` + + // The string that you use in a subsequent request to get the next page of results + // in a paginated response. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeConfigRulesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeConfigRulesOutput) GoString() string { + return s.String() +} + +// The input for the DescribeConfigurationRecorderStatus action. +type DescribeConfigurationRecorderStatusInput struct { + _ struct{} `type:"structure"` + + // The name(s) of the configuration recorder. If the name is not specified, + // the action returns the current status of all the configuration recorders + // associated with the account. + ConfigurationRecorderNames []*string `type:"list"` +} + +// String returns the string representation +func (s DescribeConfigurationRecorderStatusInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeConfigurationRecorderStatusInput) GoString() string { + return s.String() +} + +// The output for the DescribeConfigurationRecorderStatus action in JSON format. +type DescribeConfigurationRecorderStatusOutput struct { + _ struct{} `type:"structure"` + + // A list that contains status of the specified recorders. + ConfigurationRecordersStatus []*ConfigurationRecorderStatus `type:"list"` +} + +// String returns the string representation +func (s DescribeConfigurationRecorderStatusOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeConfigurationRecorderStatusOutput) GoString() string { + return s.String() +} + +// The input for the DescribeConfigurationRecorders action. +type DescribeConfigurationRecordersInput struct { + _ struct{} `type:"structure"` + + // A list of configuration recorder names. + ConfigurationRecorderNames []*string `type:"list"` +} + +// String returns the string representation +func (s DescribeConfigurationRecordersInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeConfigurationRecordersInput) GoString() string { + return s.String() +} + +// The output for the DescribeConfigurationRecorders action. +type DescribeConfigurationRecordersOutput struct { + _ struct{} `type:"structure"` + + // A list that contains the descriptions of the specified configuration recorders. + ConfigurationRecorders []*ConfigurationRecorder `type:"list"` +} + +// String returns the string representation +func (s DescribeConfigurationRecordersOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeConfigurationRecordersOutput) GoString() string { + return s.String() +} + +// The input for the DeliveryChannelStatus action. +type DescribeDeliveryChannelStatusInput struct { + _ struct{} `type:"structure"` + + // A list of delivery channel names. + DeliveryChannelNames []*string `type:"list"` +} + +// String returns the string representation +func (s DescribeDeliveryChannelStatusInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDeliveryChannelStatusInput) GoString() string { + return s.String() +} + +// The output for the DescribeDeliveryChannelStatus action. +type DescribeDeliveryChannelStatusOutput struct { + _ struct{} `type:"structure"` + + // A list that contains the status of a specified delivery channel. + DeliveryChannelsStatus []*DeliveryChannelStatus `type:"list"` +} + +// String returns the string representation +func (s DescribeDeliveryChannelStatusOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDeliveryChannelStatusOutput) GoString() string { + return s.String() +} + +// The input for the DescribeDeliveryChannels action. +type DescribeDeliveryChannelsInput struct { + _ struct{} `type:"structure"` + + // A list of delivery channel names. + DeliveryChannelNames []*string `type:"list"` +} + +// String returns the string representation +func (s DescribeDeliveryChannelsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDeliveryChannelsInput) GoString() string { + return s.String() +} + +// The output for the DescribeDeliveryChannels action. +type DescribeDeliveryChannelsOutput struct { + _ struct{} `type:"structure"` + + // A list that contains the descriptions of the specified delivery channel. + DeliveryChannels []*DeliveryChannel `type:"list"` +} + +// String returns the string representation +func (s DescribeDeliveryChannelsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDeliveryChannelsOutput) GoString() string { + return s.String() +} + +// Identifies an AWS resource and indicates whether it complies with the AWS +// Config rule that it was evaluated against. +type Evaluation struct { + _ struct{} `type:"structure"` + + // Supplementary information about how the evaluation determined the compliance. + Annotation *string `min:"1" type:"string"` + + // The ID of the AWS resource that was evaluated. + ComplianceResourceId *string `min:"1" type:"string" required:"true"` + + // The type of AWS resource that was evaluated. + ComplianceResourceType *string `min:"1" type:"string" required:"true"` + + // Indicates whether the AWS resource complies with the AWS Config rule that + // it was evaluated against. + // + // For the Evaluation data type, AWS Config supports only the COMPLIANT, NON_COMPLIANT, + // and NOT_APPLICABLE values. AWS Config does not support the INSUFFICIENT_DATA + // value for this data type. + // + // Similarly, AWS Config does not accept INSUFFICIENT_DATA as the value for + // ComplianceType from a PutEvaluations request. For example, an AWS Lambda + // function for a custom Config rule cannot pass an INSUFFICIENT_DATA value + // to AWS Config. + ComplianceType *string `type:"string" required:"true" enum:"ComplianceType"` + + // The time of the event in AWS Config that triggered the evaluation. For event-based + // evaluations, the time indicates when AWS Config created the configuration + // item that triggered the evaluation. For periodic evaluations, the time indicates + // when AWS Config delivered the configuration snapshot that triggered the evaluation. + OrderingTimestamp *time.Time `type:"timestamp" timestampFormat:"unix" required:"true"` +} + +// String returns the string representation +func (s Evaluation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Evaluation) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Evaluation) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Evaluation"} + if s.Annotation != nil && len(*s.Annotation) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Annotation", 1)) + } + if s.ComplianceResourceId == nil { + invalidParams.Add(request.NewErrParamRequired("ComplianceResourceId")) + } + if s.ComplianceResourceId != nil && len(*s.ComplianceResourceId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ComplianceResourceId", 1)) + } + if s.ComplianceResourceType == nil { + invalidParams.Add(request.NewErrParamRequired("ComplianceResourceType")) + } + if s.ComplianceResourceType != nil && len(*s.ComplianceResourceType) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ComplianceResourceType", 1)) + } + if s.ComplianceType == nil { + invalidParams.Add(request.NewErrParamRequired("ComplianceType")) + } + if s.OrderingTimestamp == nil { + invalidParams.Add(request.NewErrParamRequired("OrderingTimestamp")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The details of an AWS Config evaluation. Provides the AWS resource that was +// evaluated, the compliance of the resource, related timestamps, and supplementary +// information. +type EvaluationResult struct { + _ struct{} `type:"structure"` + + // Supplementary information about how the evaluation determined the compliance. + Annotation *string `min:"1" type:"string"` + + // Indicates whether the AWS resource complies with the AWS Config rule that + // evaluated it. + // + // For the EvaluationResult data type, AWS Config supports only the COMPLIANT, + // NON_COMPLIANT, and NOT_APPLICABLE values. AWS Config does not support the + // INSUFFICIENT_DATA value for the EvaluationResult data type. + ComplianceType *string `type:"string" enum:"ComplianceType"` + + // The time when the AWS Config rule evaluated the AWS resource. + ConfigRuleInvokedTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // Uniquely identifies the evaluation result. + EvaluationResultIdentifier *EvaluationResultIdentifier `type:"structure"` + + // The time when AWS Config recorded the evaluation result. + ResultRecordedTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // An encrypted token that associates an evaluation with an AWS Config rule. + // The token identifies the rule, the AWS resource being evaluated, and the + // event that triggered the evaluation. + ResultToken *string `type:"string"` +} + +// String returns the string representation +func (s EvaluationResult) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EvaluationResult) GoString() string { + return s.String() +} + +// Uniquely identifies an evaluation result. +type EvaluationResultIdentifier struct { + _ struct{} `type:"structure"` + + // Identifies an AWS Config rule used to evaluate an AWS resource, and provides + // the type and ID of the evaluated resource. + EvaluationResultQualifier *EvaluationResultQualifier `type:"structure"` + + // The time of the event that triggered the evaluation of your AWS resources. + // The time can indicate when AWS Config delivered a configuration item change + // notification, or it can indicate when AWS Config delivered the configuration + // snapshot, depending on which event triggered the evaluation. + OrderingTimestamp *time.Time `type:"timestamp" timestampFormat:"unix"` +} + +// String returns the string representation +func (s EvaluationResultIdentifier) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EvaluationResultIdentifier) GoString() string { + return s.String() +} + +// Identifies an AWS Config rule that evaluated an AWS resource, and provides +// the type and ID of the resource that the rule evaluated. +type EvaluationResultQualifier struct { + _ struct{} `type:"structure"` + + // The name of the AWS Config rule that was used in the evaluation. + ConfigRuleName *string `min:"1" type:"string"` + + // The ID of the evaluated AWS resource. + ResourceId *string `min:"1" type:"string"` + + // The type of AWS resource that was evaluated. + ResourceType *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s EvaluationResultQualifier) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EvaluationResultQualifier) GoString() string { + return s.String() +} + +type GetComplianceDetailsByConfigRuleInput struct { + _ struct{} `type:"structure"` + + // Filters the results by compliance. + // + // The allowed values are COMPLIANT, NON_COMPLIANT, and NOT_APPLICABLE. + ComplianceTypes []*string `type:"list"` + + // The name of the AWS Config rule for which you want compliance information. + ConfigRuleName *string `min:"1" type:"string" required:"true"` + + // The maximum number of evaluation results returned on each page. The default + // is 10. You cannot specify a limit greater than 100. If you specify 0, AWS + // Config uses the default. + Limit *int64 `type:"integer"` + + // The nextToken string returned on a previous page that you use to get the + // next page of results in a paginated response. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s GetComplianceDetailsByConfigRuleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetComplianceDetailsByConfigRuleInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetComplianceDetailsByConfigRuleInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetComplianceDetailsByConfigRuleInput"} + if s.ConfigRuleName == nil { + invalidParams.Add(request.NewErrParamRequired("ConfigRuleName")) + } + if s.ConfigRuleName != nil && len(*s.ConfigRuleName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ConfigRuleName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type GetComplianceDetailsByConfigRuleOutput struct { + _ struct{} `type:"structure"` + + // Indicates whether the AWS resource complies with the specified AWS Config + // rule. + EvaluationResults []*EvaluationResult `type:"list"` + + // The string that you use in a subsequent request to get the next page of results + // in a paginated response. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s GetComplianceDetailsByConfigRuleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetComplianceDetailsByConfigRuleOutput) GoString() string { + return s.String() +} + +type GetComplianceDetailsByResourceInput struct { + _ struct{} `type:"structure"` + + // Filters the results by compliance. + // + // The allowed values are COMPLIANT, NON_COMPLIANT, and NOT_APPLICABLE. + ComplianceTypes []*string `type:"list"` + + // The nextToken string returned on a previous page that you use to get the + // next page of results in a paginated response. + NextToken *string `type:"string"` + + // The ID of the AWS resource for which you want compliance information. + ResourceId *string `min:"1" type:"string" required:"true"` + + // The type of the AWS resource for which you want compliance information. + ResourceType *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetComplianceDetailsByResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetComplianceDetailsByResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetComplianceDetailsByResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetComplianceDetailsByResourceInput"} + if s.ResourceId == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceId")) + } + if s.ResourceId != nil && len(*s.ResourceId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceId", 1)) + } + if s.ResourceType == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceType")) + } + if s.ResourceType != nil && len(*s.ResourceType) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceType", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type GetComplianceDetailsByResourceOutput struct { + _ struct{} `type:"structure"` + + // Indicates whether the specified AWS resource complies each AWS Config rule. + EvaluationResults []*EvaluationResult `type:"list"` + + // The string that you use in a subsequent request to get the next page of results + // in a paginated response. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s GetComplianceDetailsByResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetComplianceDetailsByResourceOutput) GoString() string { + return s.String() +} + +type GetComplianceSummaryByConfigRuleInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s GetComplianceSummaryByConfigRuleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetComplianceSummaryByConfigRuleInput) GoString() string { + return s.String() +} + +type GetComplianceSummaryByConfigRuleOutput struct { + _ struct{} `type:"structure"` + + // The number of AWS Config rules that are compliant and the number that are + // noncompliant, up to a maximum of 25 for each. + ComplianceSummary *ComplianceSummary `type:"structure"` +} + +// String returns the string representation +func (s GetComplianceSummaryByConfigRuleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetComplianceSummaryByConfigRuleOutput) GoString() string { + return s.String() +} + +type GetComplianceSummaryByResourceTypeInput struct { + _ struct{} `type:"structure"` + + // Specify one or more resource types to get the number of resources that are + // compliant and the number that are noncompliant for each resource type. + // + // For this request, you can specify an AWS resource type such as AWS::EC2::Instance, + // and you can specify that the resource type is an AWS account by specifying + // AWS::::Account. + ResourceTypes []*string `type:"list"` +} + +// String returns the string representation +func (s GetComplianceSummaryByResourceTypeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetComplianceSummaryByResourceTypeInput) GoString() string { + return s.String() +} + +type GetComplianceSummaryByResourceTypeOutput struct { + _ struct{} `type:"structure"` + + // The number of resources that are compliant and the number that are noncompliant. + // If one or more resource types were provided with the request, the numbers + // are returned for each resource type. The maximum number returned is 100. + ComplianceSummariesByResourceType []*ComplianceSummaryByResourceType `type:"list"` +} + +// String returns the string representation +func (s GetComplianceSummaryByResourceTypeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetComplianceSummaryByResourceTypeOutput) GoString() string { + return s.String() +} + +// The input for the GetResourceConfigHistory action. +type GetResourceConfigHistoryInput struct { + _ struct{} `type:"structure"` + + // The chronological order for configuration items listed. By default the results + // are listed in reverse chronological order. + ChronologicalOrder *string `locationName:"chronologicalOrder" type:"string" enum:"ChronologicalOrder"` + + // The time stamp that indicates an earlier time. If not specified, the action + // returns paginated results that contain configuration items that start from + // when the first configuration item was recorded. + EarlierTime *time.Time `locationName:"earlierTime" type:"timestamp" timestampFormat:"unix"` + + // The time stamp that indicates a later time. If not specified, current time + // is taken. + LaterTime *time.Time `locationName:"laterTime" type:"timestamp" timestampFormat:"unix"` + + // The maximum number of configuration items returned on each page. The default + // is 10. You cannot specify a limit greater than 100. If you specify 0, AWS + // Config uses the default. + Limit *int64 `locationName:"limit" type:"integer"` + + // The nextToken string returned on a previous page that you use to get the + // next page of results in a paginated response. + NextToken *string `locationName:"nextToken" type:"string"` + + // The ID of the resource (for example., sg-xxxxxx). + ResourceId *string `locationName:"resourceId" type:"string" required:"true"` + + // The resource type. + ResourceType *string `locationName:"resourceType" type:"string" required:"true" enum:"ResourceType"` +} + +// String returns the string representation +func (s GetResourceConfigHistoryInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetResourceConfigHistoryInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetResourceConfigHistoryInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetResourceConfigHistoryInput"} + if s.ResourceId == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceId")) + } + if s.ResourceType == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceType")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The output for the GetResourceConfigHistory action. +type GetResourceConfigHistoryOutput struct { + _ struct{} `type:"structure"` + + // A list that contains the configuration history of one or more resources. + ConfigurationItems []*ConfigurationItem `locationName:"configurationItems" type:"list"` + + // The string that you use in a subsequent request to get the next page of results + // in a paginated response. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s GetResourceConfigHistoryOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetResourceConfigHistoryOutput) GoString() string { + return s.String() +} + +type ListDiscoveredResourcesInput struct { + _ struct{} `type:"structure"` + + // Specifies whether AWS Config includes deleted resources in the results. By + // default, deleted resources are not included. + IncludeDeletedResources *bool `locationName:"includeDeletedResources" type:"boolean"` + + // The maximum number of resource identifiers returned on each page. The default + // is 100. You cannot specify a limit greater than 100. If you specify 0, AWS + // Config uses the default. + Limit *int64 `locationName:"limit" type:"integer"` + + // The nextToken string returned on a previous page that you use to get the + // next page of results in a paginated response. + NextToken *string `locationName:"nextToken" type:"string"` + + // The IDs of only those resources that you want AWS Config to list in the response. + // If you do not specify this parameter, AWS Config lists all resources of the + // specified type that it has discovered. + ResourceIds []*string `locationName:"resourceIds" type:"list"` + + // The custom name of only those resources that you want AWS Config to list + // in the response. If you do not specify this parameter, AWS Config lists all + // resources of the specified type that it has discovered. + ResourceName *string `locationName:"resourceName" type:"string"` + + // The type of resources that you want AWS Config to list in the response. + ResourceType *string `locationName:"resourceType" type:"string" required:"true" enum:"ResourceType"` +} + +// String returns the string representation +func (s ListDiscoveredResourcesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListDiscoveredResourcesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListDiscoveredResourcesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListDiscoveredResourcesInput"} + if s.ResourceType == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceType")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type ListDiscoveredResourcesOutput struct { + _ struct{} `type:"structure"` + + // The string that you use in a subsequent request to get the next page of results + // in a paginated response. + NextToken *string `locationName:"nextToken" type:"string"` + + // The details that identify a resource that is discovered by AWS Config, including + // the resource type, ID, and (if available) the custom resource name. + ResourceIdentifiers []*ResourceIdentifier `locationName:"resourceIdentifiers" type:"list"` +} + +// String returns the string representation +func (s ListDiscoveredResourcesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListDiscoveredResourcesOutput) GoString() string { + return s.String() +} + +type PutConfigRuleInput struct { + _ struct{} `type:"structure"` + + // An AWS Lambda function that evaluates configuration items to assess whether + // your AWS resources comply with your desired configurations. This function + // can run when AWS Config detects a configuration change to an AWS resource, + // or when it delivers a configuration snapshot of the resources in the account. + // + // For more information about developing and using AWS Config rules, see Evaluating + // AWS Resource Configurations with AWS Config (http://docs.aws.amazon.com/config/latest/developerguide/evaluate-config.html) + // in the AWS Config Developer Guide. + ConfigRule *ConfigRule `type:"structure" required:"true"` +} + +// String returns the string representation +func (s PutConfigRuleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutConfigRuleInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutConfigRuleInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutConfigRuleInput"} + if s.ConfigRule == nil { + invalidParams.Add(request.NewErrParamRequired("ConfigRule")) + } + if s.ConfigRule != nil { + if err := s.ConfigRule.Validate(); err != nil { + invalidParams.AddNested("ConfigRule", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type PutConfigRuleOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutConfigRuleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutConfigRuleOutput) GoString() string { + return s.String() +} + +// The input for the PutConfigurationRecorder action. +type PutConfigurationRecorderInput struct { + _ struct{} `type:"structure"` + + // The configuration recorder object that records each configuration change + // made to the resources. + ConfigurationRecorder *ConfigurationRecorder `type:"structure" required:"true"` +} + +// String returns the string representation +func (s PutConfigurationRecorderInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutConfigurationRecorderInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutConfigurationRecorderInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutConfigurationRecorderInput"} + if s.ConfigurationRecorder == nil { + invalidParams.Add(request.NewErrParamRequired("ConfigurationRecorder")) + } + if s.ConfigurationRecorder != nil { + if err := s.ConfigurationRecorder.Validate(); err != nil { + invalidParams.AddNested("ConfigurationRecorder", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type PutConfigurationRecorderOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutConfigurationRecorderOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutConfigurationRecorderOutput) GoString() string { + return s.String() +} + +// The input for the PutDeliveryChannel action. +type PutDeliveryChannelInput struct { + _ struct{} `type:"structure"` + + // The configuration delivery channel object that delivers the configuration + // information to an Amazon S3 bucket, and to an Amazon SNS topic. + DeliveryChannel *DeliveryChannel `type:"structure" required:"true"` +} + +// String returns the string representation +func (s PutDeliveryChannelInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutDeliveryChannelInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutDeliveryChannelInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutDeliveryChannelInput"} + if s.DeliveryChannel == nil { + invalidParams.Add(request.NewErrParamRequired("DeliveryChannel")) + } + if s.DeliveryChannel != nil { + if err := s.DeliveryChannel.Validate(); err != nil { + invalidParams.AddNested("DeliveryChannel", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type PutDeliveryChannelOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutDeliveryChannelOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutDeliveryChannelOutput) GoString() string { + return s.String() +} + +type PutEvaluationsInput struct { + _ struct{} `type:"structure"` + + // The assessments that the AWS Lambda function performs. Each evaluation identifies + // an AWS resource and indicates whether it complies with the AWS Config rule + // that invokes the AWS Lambda function. + Evaluations []*Evaluation `type:"list"` + + // An encrypted token that associates an evaluation with an AWS Config rule. + // Identifies the rule and the event that triggered the evaluation + ResultToken *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s PutEvaluationsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutEvaluationsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutEvaluationsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutEvaluationsInput"} + if s.ResultToken == nil { + invalidParams.Add(request.NewErrParamRequired("ResultToken")) + } + if s.Evaluations != nil { + for i, v := range s.Evaluations { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Evaluations", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type PutEvaluationsOutput struct { + _ struct{} `type:"structure"` + + // Requests that failed because of a client or server error. + FailedEvaluations []*Evaluation `type:"list"` +} + +// String returns the string representation +func (s PutEvaluationsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutEvaluationsOutput) GoString() string { + return s.String() +} + +// Specifies the types of AWS resource for which AWS Config records configuration +// changes. +// +// In the recording group, you specify whether all supported types or specific +// types of resources are recorded. +// +// By default, AWS Config records configuration changes for all supported types +// of regional resources that AWS Config discovers in the region in which it +// is running. Regional resources are tied to a region and can be used only +// in that region. Examples of regional resources are EC2 instances and EBS +// volumes. +// +// You can also have AWS Config record configuration changes for supported +// types of global resources (for example, IAM resources). Global resources +// are not tied to an individual region and can be used in all regions. +// +// The configuration details for any global resource are the same in all regions. +// If you customize AWS Config in multiple regions to record global resources, +// it will create multiple configuration items each time a global resource changes: +// one configuration item for each region. These configuration items will contain +// identical data. To prevent duplicate configuration items, you should consider +// customizing AWS Config in only one region to record global resources, unless +// you want the configuration items to be available in multiple regions. +// +// If you don't want AWS Config to record all resources, you can specify which +// types of resources it will record with the resourceTypes parameter. +// +// For a list of supported resource types, see Supported resource types (http://docs.aws.amazon.com/config/latest/developerguide/resource-config-reference.html#supported-resources). +// +// For more information, see Selecting Which Resources AWS Config Records (http://docs.aws.amazon.com/config/latest/developerguide/select-resources.html). +type RecordingGroup struct { + _ struct{} `type:"structure"` + + // Specifies whether AWS Config records configuration changes for every supported + // type of regional resource. + // + // If you set this option to true, when AWS Config adds support for a new type + // of regional resource, it automatically starts recording resources of that + // type. + // + // If you set this option to true, you cannot enumerate a list of resourceTypes. + AllSupported *bool `locationName:"allSupported" type:"boolean"` + + // Specifies whether AWS Config includes all supported types of global resources + // (for example, IAM resources) with the resources that it records. + // + // Before you can set this option to true, you must set the allSupported option + // to true. + // + // If you set this option to true, when AWS Config adds support for a new type + // of global resource, it automatically starts recording resources of that type. + // + // The configuration details for any global resource are the same in all regions. + // To prevent duplicate configuration items, you should consider customizing + // AWS Config in only one region to record global resources. + IncludeGlobalResourceTypes *bool `locationName:"includeGlobalResourceTypes" type:"boolean"` + + // A comma-separated list that specifies the types of AWS resources for which + // AWS Config records configuration changes (for example, AWS::EC2::Instance + // or AWS::CloudTrail::Trail). + // + // Before you can set this option to true, you must set the allSupported option + // to false. + // + // If you set this option to true, when AWS Config adds support for a new type + // of resource, it will not record resources of that type unless you manually + // add that type to your recording group. + // + // For a list of valid resourceTypes values, see the resourceType Value column + // in Supported AWS Resource Types (http://docs.aws.amazon.com/config/latest/developerguide/resource-config-reference.html#supported-resources). + ResourceTypes []*string `locationName:"resourceTypes" type:"list"` +} + +// String returns the string representation +func (s RecordingGroup) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RecordingGroup) GoString() string { + return s.String() +} + +// The relationship of the related resource to the main resource. +type Relationship struct { + _ struct{} `type:"structure"` + + // The type of relationship with the related resource. + RelationshipName *string `locationName:"relationshipName" type:"string"` + + // The ID of the related resource (for example, sg-xxxxxx). + ResourceId *string `locationName:"resourceId" type:"string"` + + // The custom name of the related resource, if available. + ResourceName *string `locationName:"resourceName" type:"string"` + + // The resource type of the related resource. + ResourceType *string `locationName:"resourceType" type:"string" enum:"ResourceType"` +} + +// String returns the string representation +func (s Relationship) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Relationship) GoString() string { + return s.String() +} + +// The details that identify a resource that is discovered by AWS Config, including +// the resource type, ID, and (if available) the custom resource name. +type ResourceIdentifier struct { + _ struct{} `type:"structure"` + + // The time that the resource was deleted. + ResourceDeletionTime *time.Time `locationName:"resourceDeletionTime" type:"timestamp" timestampFormat:"unix"` + + // The ID of the resource (for example., sg-xxxxxx). + ResourceId *string `locationName:"resourceId" type:"string"` + + // The custom name of the resource (if available). + ResourceName *string `locationName:"resourceName" type:"string"` + + // The type of resource. + ResourceType *string `locationName:"resourceType" type:"string" enum:"ResourceType"` +} + +// String returns the string representation +func (s ResourceIdentifier) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResourceIdentifier) GoString() string { + return s.String() +} + +// Defines which resources trigger an evaluation for an AWS Config rule. The +// scope can include one or more resource types, a combination of a tag key +// and value, or a combination of one resource type and one resource ID. Specify +// a scope to constrain which resources trigger an evaluation for a rule. Otherwise, +// evaluations for the rule are triggered when any resource in your recording +// group changes in configuration. +type Scope struct { + _ struct{} `type:"structure"` + + // The IDs of the only AWS resource that you want to trigger an evaluation for + // the rule. If you specify a resource ID, you must specify one resource type + // for ComplianceResourceTypes. + ComplianceResourceId *string `min:"1" type:"string"` + + // The resource types of only those AWS resources that you want to trigger an + // evaluation for the rule. You can only specify one type if you also specify + // a resource ID for ComplianceResourceId. + ComplianceResourceTypes []*string `type:"list"` + + // The tag key that is applied to only those AWS resources that you want you + // want to trigger an evaluation for the rule. + TagKey *string `min:"1" type:"string"` + + // The tag value applied to only those AWS resources that you want to trigger + // an evaluation for the rule. If you specify a value for TagValue, you must + // also specify a value for TagKey. + TagValue *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s Scope) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Scope) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Scope) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Scope"} + if s.ComplianceResourceId != nil && len(*s.ComplianceResourceId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ComplianceResourceId", 1)) + } + if s.TagKey != nil && len(*s.TagKey) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TagKey", 1)) + } + if s.TagValue != nil && len(*s.TagValue) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TagValue", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Provides the AWS Config rule owner (AWS or customer), the rule identifier, +// and the events that trigger the evaluation of your AWS resources. +type Source struct { + _ struct{} `type:"structure"` + + // Indicates whether AWS or the customer owns and manages the AWS Config rule. + Owner *string `type:"string" enum:"Owner"` + + // Provides the source and type of the event that causes AWS Config to evaluate + // your AWS resources. + SourceDetails []*SourceDetail `type:"list"` + + // For AWS managed Config rules, a pre-defined identifier from a list. To reference + // the list, see Using AWS Managed Config Rules (http://docs.aws.amazon.com/config/latest/developerguide/evaluate-config_use-managed-rules.html). + // + // For customer managed Config rules, the identifier is the Amazon Resource + // Name (ARN) of the rule's AWS Lambda function. + SourceIdentifier *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s Source) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Source) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Source) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Source"} + if s.SourceIdentifier != nil && len(*s.SourceIdentifier) < 1 { + invalidParams.Add(request.NewErrParamMinLen("SourceIdentifier", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Provides the source and type of the event that triggers AWS Config to evaluate +// your AWS resources against a rule. +type SourceDetail struct { + _ struct{} `type:"structure"` + + // The source of the event, such as an AWS service, that triggers AWS Config + // to evaluate your AWS resources. + EventSource *string `type:"string" enum:"EventSource"` + + // The type of SNS message that triggers AWS Config to run an evaluation. For + // evaluations that are initiated when AWS Config delivers a configuration item + // change notification, you must use ConfigurationItemChangeNotification. For + // evaluations that are initiated when AWS Config delivers a configuration snapshot, + // you must use ConfigurationSnapshotDeliveryCompleted. + MessageType *string `type:"string" enum:"MessageType"` +} + +// String returns the string representation +func (s SourceDetail) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SourceDetail) GoString() string { + return s.String() +} + +// The input for the StartConfigurationRecorder action. +type StartConfigurationRecorderInput struct { + _ struct{} `type:"structure"` + + // The name of the recorder object that records each configuration change made + // to the resources. + ConfigurationRecorderName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s StartConfigurationRecorderInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StartConfigurationRecorderInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *StartConfigurationRecorderInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StartConfigurationRecorderInput"} + if s.ConfigurationRecorderName == nil { + invalidParams.Add(request.NewErrParamRequired("ConfigurationRecorderName")) + } + if s.ConfigurationRecorderName != nil && len(*s.ConfigurationRecorderName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ConfigurationRecorderName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type StartConfigurationRecorderOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s StartConfigurationRecorderOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StartConfigurationRecorderOutput) GoString() string { + return s.String() +} + +// The input for the StopConfigurationRecorder action. +type StopConfigurationRecorderInput struct { + _ struct{} `type:"structure"` + + // The name of the recorder object that records each configuration change made + // to the resources. + ConfigurationRecorderName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s StopConfigurationRecorderInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StopConfigurationRecorderInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *StopConfigurationRecorderInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StopConfigurationRecorderInput"} + if s.ConfigurationRecorderName == nil { + invalidParams.Add(request.NewErrParamRequired("ConfigurationRecorderName")) + } + if s.ConfigurationRecorderName != nil && len(*s.ConfigurationRecorderName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ConfigurationRecorderName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type StopConfigurationRecorderOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s StopConfigurationRecorderOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StopConfigurationRecorderOutput) GoString() string { + return s.String() +} + +const ( + // @enum ChronologicalOrder + ChronologicalOrderReverse = "Reverse" + // @enum ChronologicalOrder + ChronologicalOrderForward = "Forward" +) + +const ( + // @enum ComplianceType + ComplianceTypeCompliant = "COMPLIANT" + // @enum ComplianceType + ComplianceTypeNonCompliant = "NON_COMPLIANT" + // @enum ComplianceType + ComplianceTypeNotApplicable = "NOT_APPLICABLE" + // @enum ComplianceType + ComplianceTypeInsufficientData = "INSUFFICIENT_DATA" +) + +const ( + // @enum ConfigRuleState + ConfigRuleStateActive = "ACTIVE" + // @enum ConfigRuleState + ConfigRuleStateDeleting = "DELETING" +) + +const ( + // @enum ConfigurationItemStatus + ConfigurationItemStatusOk = "Ok" + // @enum ConfigurationItemStatus + ConfigurationItemStatusFailed = "Failed" + // @enum ConfigurationItemStatus + ConfigurationItemStatusDiscovered = "Discovered" + // @enum ConfigurationItemStatus + ConfigurationItemStatusDeleted = "Deleted" +) + +const ( + // @enum DeliveryStatus + DeliveryStatusSuccess = "Success" + // @enum DeliveryStatus + DeliveryStatusFailure = "Failure" + // @enum DeliveryStatus + DeliveryStatusNotApplicable = "Not_Applicable" +) + +const ( + // @enum EventSource + EventSourceAwsConfig = "aws.config" +) + +const ( + // @enum MaximumExecutionFrequency + MaximumExecutionFrequencyOneHour = "One_Hour" + // @enum MaximumExecutionFrequency + MaximumExecutionFrequencyThreeHours = "Three_Hours" + // @enum MaximumExecutionFrequency + MaximumExecutionFrequencySixHours = "Six_Hours" + // @enum MaximumExecutionFrequency + MaximumExecutionFrequencyTwelveHours = "Twelve_Hours" + // @enum MaximumExecutionFrequency + MaximumExecutionFrequencyTwentyFourHours = "TwentyFour_Hours" +) + +const ( + // @enum MessageType + MessageTypeConfigurationItemChangeNotification = "ConfigurationItemChangeNotification" + // @enum MessageType + MessageTypeConfigurationSnapshotDeliveryCompleted = "ConfigurationSnapshotDeliveryCompleted" +) + +const ( + // @enum Owner + OwnerCustomLambda = "CUSTOM_LAMBDA" + // @enum Owner + OwnerAws = "AWS" +) + +const ( + // @enum RecorderStatus + RecorderStatusPending = "Pending" + // @enum RecorderStatus + RecorderStatusSuccess = "Success" + // @enum RecorderStatus + RecorderStatusFailure = "Failure" +) + +const ( + // @enum ResourceType + ResourceTypeAwsEc2CustomerGateway = "AWS::EC2::CustomerGateway" + // @enum ResourceType + ResourceTypeAwsEc2Eip = "AWS::EC2::EIP" + // @enum ResourceType + ResourceTypeAwsEc2Host = "AWS::EC2::Host" + // @enum ResourceType + ResourceTypeAwsEc2Instance = "AWS::EC2::Instance" + // @enum ResourceType + ResourceTypeAwsEc2InternetGateway = "AWS::EC2::InternetGateway" + // @enum ResourceType + ResourceTypeAwsEc2NetworkAcl = "AWS::EC2::NetworkAcl" + // @enum ResourceType + ResourceTypeAwsEc2NetworkInterface = "AWS::EC2::NetworkInterface" + // @enum ResourceType + ResourceTypeAwsEc2RouteTable = "AWS::EC2::RouteTable" + // @enum ResourceType + ResourceTypeAwsEc2SecurityGroup = "AWS::EC2::SecurityGroup" + // @enum ResourceType + ResourceTypeAwsEc2Subnet = "AWS::EC2::Subnet" + // @enum ResourceType + ResourceTypeAwsCloudTrailTrail = "AWS::CloudTrail::Trail" + // @enum ResourceType + ResourceTypeAwsEc2Volume = "AWS::EC2::Volume" + // @enum ResourceType + ResourceTypeAwsEc2Vpc = "AWS::EC2::VPC" + // @enum ResourceType + ResourceTypeAwsEc2Vpnconnection = "AWS::EC2::VPNConnection" + // @enum ResourceType + ResourceTypeAwsEc2Vpngateway = "AWS::EC2::VPNGateway" + // @enum ResourceType + ResourceTypeAwsIamGroup = "AWS::IAM::Group" + // @enum ResourceType + ResourceTypeAwsIamPolicy = "AWS::IAM::Policy" + // @enum ResourceType + ResourceTypeAwsIamRole = "AWS::IAM::Role" + // @enum ResourceType + ResourceTypeAwsIamUser = "AWS::IAM::User" +) diff --git a/vendor/github.com/aws/aws-sdk-go/service/configservice/configserviceiface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/configservice/configserviceiface/interface.go new file mode 100644 index 000000000..2930a1196 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/configservice/configserviceiface/interface.go @@ -0,0 +1,112 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package configserviceiface provides an interface for the AWS Config. +package configserviceiface + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/configservice" +) + +// ConfigServiceAPI is the interface type for configservice.ConfigService. +type ConfigServiceAPI interface { + DeleteConfigRuleRequest(*configservice.DeleteConfigRuleInput) (*request.Request, *configservice.DeleteConfigRuleOutput) + + DeleteConfigRule(*configservice.DeleteConfigRuleInput) (*configservice.DeleteConfigRuleOutput, error) + + DeleteConfigurationRecorderRequest(*configservice.DeleteConfigurationRecorderInput) (*request.Request, *configservice.DeleteConfigurationRecorderOutput) + + DeleteConfigurationRecorder(*configservice.DeleteConfigurationRecorderInput) (*configservice.DeleteConfigurationRecorderOutput, error) + + DeleteDeliveryChannelRequest(*configservice.DeleteDeliveryChannelInput) (*request.Request, *configservice.DeleteDeliveryChannelOutput) + + DeleteDeliveryChannel(*configservice.DeleteDeliveryChannelInput) (*configservice.DeleteDeliveryChannelOutput, error) + + DeliverConfigSnapshotRequest(*configservice.DeliverConfigSnapshotInput) (*request.Request, *configservice.DeliverConfigSnapshotOutput) + + DeliverConfigSnapshot(*configservice.DeliverConfigSnapshotInput) (*configservice.DeliverConfigSnapshotOutput, error) + + DescribeComplianceByConfigRuleRequest(*configservice.DescribeComplianceByConfigRuleInput) (*request.Request, *configservice.DescribeComplianceByConfigRuleOutput) + + DescribeComplianceByConfigRule(*configservice.DescribeComplianceByConfigRuleInput) (*configservice.DescribeComplianceByConfigRuleOutput, error) + + DescribeComplianceByResourceRequest(*configservice.DescribeComplianceByResourceInput) (*request.Request, *configservice.DescribeComplianceByResourceOutput) + + DescribeComplianceByResource(*configservice.DescribeComplianceByResourceInput) (*configservice.DescribeComplianceByResourceOutput, error) + + DescribeConfigRuleEvaluationStatusRequest(*configservice.DescribeConfigRuleEvaluationStatusInput) (*request.Request, *configservice.DescribeConfigRuleEvaluationStatusOutput) + + DescribeConfigRuleEvaluationStatus(*configservice.DescribeConfigRuleEvaluationStatusInput) (*configservice.DescribeConfigRuleEvaluationStatusOutput, error) + + DescribeConfigRulesRequest(*configservice.DescribeConfigRulesInput) (*request.Request, *configservice.DescribeConfigRulesOutput) + + DescribeConfigRules(*configservice.DescribeConfigRulesInput) (*configservice.DescribeConfigRulesOutput, error) + + DescribeConfigurationRecorderStatusRequest(*configservice.DescribeConfigurationRecorderStatusInput) (*request.Request, *configservice.DescribeConfigurationRecorderStatusOutput) + + DescribeConfigurationRecorderStatus(*configservice.DescribeConfigurationRecorderStatusInput) (*configservice.DescribeConfigurationRecorderStatusOutput, error) + + DescribeConfigurationRecordersRequest(*configservice.DescribeConfigurationRecordersInput) (*request.Request, *configservice.DescribeConfigurationRecordersOutput) + + DescribeConfigurationRecorders(*configservice.DescribeConfigurationRecordersInput) (*configservice.DescribeConfigurationRecordersOutput, error) + + DescribeDeliveryChannelStatusRequest(*configservice.DescribeDeliveryChannelStatusInput) (*request.Request, *configservice.DescribeDeliveryChannelStatusOutput) + + DescribeDeliveryChannelStatus(*configservice.DescribeDeliveryChannelStatusInput) (*configservice.DescribeDeliveryChannelStatusOutput, error) + + DescribeDeliveryChannelsRequest(*configservice.DescribeDeliveryChannelsInput) (*request.Request, *configservice.DescribeDeliveryChannelsOutput) + + DescribeDeliveryChannels(*configservice.DescribeDeliveryChannelsInput) (*configservice.DescribeDeliveryChannelsOutput, error) + + GetComplianceDetailsByConfigRuleRequest(*configservice.GetComplianceDetailsByConfigRuleInput) (*request.Request, *configservice.GetComplianceDetailsByConfigRuleOutput) + + GetComplianceDetailsByConfigRule(*configservice.GetComplianceDetailsByConfigRuleInput) (*configservice.GetComplianceDetailsByConfigRuleOutput, error) + + GetComplianceDetailsByResourceRequest(*configservice.GetComplianceDetailsByResourceInput) (*request.Request, *configservice.GetComplianceDetailsByResourceOutput) + + GetComplianceDetailsByResource(*configservice.GetComplianceDetailsByResourceInput) (*configservice.GetComplianceDetailsByResourceOutput, error) + + GetComplianceSummaryByConfigRuleRequest(*configservice.GetComplianceSummaryByConfigRuleInput) (*request.Request, *configservice.GetComplianceSummaryByConfigRuleOutput) + + GetComplianceSummaryByConfigRule(*configservice.GetComplianceSummaryByConfigRuleInput) (*configservice.GetComplianceSummaryByConfigRuleOutput, error) + + GetComplianceSummaryByResourceTypeRequest(*configservice.GetComplianceSummaryByResourceTypeInput) (*request.Request, *configservice.GetComplianceSummaryByResourceTypeOutput) + + GetComplianceSummaryByResourceType(*configservice.GetComplianceSummaryByResourceTypeInput) (*configservice.GetComplianceSummaryByResourceTypeOutput, error) + + GetResourceConfigHistoryRequest(*configservice.GetResourceConfigHistoryInput) (*request.Request, *configservice.GetResourceConfigHistoryOutput) + + GetResourceConfigHistory(*configservice.GetResourceConfigHistoryInput) (*configservice.GetResourceConfigHistoryOutput, error) + + GetResourceConfigHistoryPages(*configservice.GetResourceConfigHistoryInput, func(*configservice.GetResourceConfigHistoryOutput, bool) bool) error + + ListDiscoveredResourcesRequest(*configservice.ListDiscoveredResourcesInput) (*request.Request, *configservice.ListDiscoveredResourcesOutput) + + ListDiscoveredResources(*configservice.ListDiscoveredResourcesInput) (*configservice.ListDiscoveredResourcesOutput, error) + + PutConfigRuleRequest(*configservice.PutConfigRuleInput) (*request.Request, *configservice.PutConfigRuleOutput) + + PutConfigRule(*configservice.PutConfigRuleInput) (*configservice.PutConfigRuleOutput, error) + + PutConfigurationRecorderRequest(*configservice.PutConfigurationRecorderInput) (*request.Request, *configservice.PutConfigurationRecorderOutput) + + PutConfigurationRecorder(*configservice.PutConfigurationRecorderInput) (*configservice.PutConfigurationRecorderOutput, error) + + PutDeliveryChannelRequest(*configservice.PutDeliveryChannelInput) (*request.Request, *configservice.PutDeliveryChannelOutput) + + PutDeliveryChannel(*configservice.PutDeliveryChannelInput) (*configservice.PutDeliveryChannelOutput, error) + + PutEvaluationsRequest(*configservice.PutEvaluationsInput) (*request.Request, *configservice.PutEvaluationsOutput) + + PutEvaluations(*configservice.PutEvaluationsInput) (*configservice.PutEvaluationsOutput, error) + + StartConfigurationRecorderRequest(*configservice.StartConfigurationRecorderInput) (*request.Request, *configservice.StartConfigurationRecorderOutput) + + StartConfigurationRecorder(*configservice.StartConfigurationRecorderInput) (*configservice.StartConfigurationRecorderOutput, error) + + StopConfigurationRecorderRequest(*configservice.StopConfigurationRecorderInput) (*request.Request, *configservice.StopConfigurationRecorderOutput) + + StopConfigurationRecorder(*configservice.StopConfigurationRecorderInput) (*configservice.StopConfigurationRecorderOutput, error) +} + +var _ ConfigServiceAPI = (*configservice.ConfigService)(nil) diff --git a/vendor/github.com/aws/aws-sdk-go/service/configservice/examples_test.go b/vendor/github.com/aws/aws-sdk-go/service/configservice/examples_test.go new file mode 100644 index 000000000..e23834d94 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/configservice/examples_test.go @@ -0,0 +1,590 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package configservice_test + +import ( + "bytes" + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/configservice" +) + +var _ time.Duration +var _ bytes.Buffer + +func ExampleConfigService_DeleteConfigRule() { + svc := configservice.New(session.New()) + + params := &configservice.DeleteConfigRuleInput{ + ConfigRuleName: aws.String("StringWithCharLimit64"), // Required + } + resp, err := svc.DeleteConfigRule(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleConfigService_DeleteConfigurationRecorder() { + svc := configservice.New(session.New()) + + params := &configservice.DeleteConfigurationRecorderInput{ + ConfigurationRecorderName: aws.String("RecorderName"), // Required + } + resp, err := svc.DeleteConfigurationRecorder(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleConfigService_DeleteDeliveryChannel() { + svc := configservice.New(session.New()) + + params := &configservice.DeleteDeliveryChannelInput{ + DeliveryChannelName: aws.String("ChannelName"), // Required + } + resp, err := svc.DeleteDeliveryChannel(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleConfigService_DeliverConfigSnapshot() { + svc := configservice.New(session.New()) + + params := &configservice.DeliverConfigSnapshotInput{ + DeliveryChannelName: aws.String("ChannelName"), // Required + } + resp, err := svc.DeliverConfigSnapshot(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleConfigService_DescribeComplianceByConfigRule() { + svc := configservice.New(session.New()) + + params := &configservice.DescribeComplianceByConfigRuleInput{ + ComplianceTypes: []*string{ + aws.String("ComplianceType"), // Required + // More values... + }, + ConfigRuleNames: []*string{ + aws.String("StringWithCharLimit64"), // Required + // More values... + }, + NextToken: aws.String("String"), + } + resp, err := svc.DescribeComplianceByConfigRule(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleConfigService_DescribeComplianceByResource() { + svc := configservice.New(session.New()) + + params := &configservice.DescribeComplianceByResourceInput{ + ComplianceTypes: []*string{ + aws.String("ComplianceType"), // Required + // More values... + }, + Limit: aws.Int64(1), + NextToken: aws.String("NextToken"), + ResourceId: aws.String("StringWithCharLimit256"), + ResourceType: aws.String("StringWithCharLimit256"), + } + resp, err := svc.DescribeComplianceByResource(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleConfigService_DescribeConfigRuleEvaluationStatus() { + svc := configservice.New(session.New()) + + params := &configservice.DescribeConfigRuleEvaluationStatusInput{ + ConfigRuleNames: []*string{ + aws.String("StringWithCharLimit64"), // Required + // More values... + }, + } + resp, err := svc.DescribeConfigRuleEvaluationStatus(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleConfigService_DescribeConfigRules() { + svc := configservice.New(session.New()) + + params := &configservice.DescribeConfigRulesInput{ + ConfigRuleNames: []*string{ + aws.String("StringWithCharLimit64"), // Required + // More values... + }, + NextToken: aws.String("String"), + } + resp, err := svc.DescribeConfigRules(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleConfigService_DescribeConfigurationRecorderStatus() { + svc := configservice.New(session.New()) + + params := &configservice.DescribeConfigurationRecorderStatusInput{ + ConfigurationRecorderNames: []*string{ + aws.String("RecorderName"), // Required + // More values... + }, + } + resp, err := svc.DescribeConfigurationRecorderStatus(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleConfigService_DescribeConfigurationRecorders() { + svc := configservice.New(session.New()) + + params := &configservice.DescribeConfigurationRecordersInput{ + ConfigurationRecorderNames: []*string{ + aws.String("RecorderName"), // Required + // More values... + }, + } + resp, err := svc.DescribeConfigurationRecorders(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleConfigService_DescribeDeliveryChannelStatus() { + svc := configservice.New(session.New()) + + params := &configservice.DescribeDeliveryChannelStatusInput{ + DeliveryChannelNames: []*string{ + aws.String("ChannelName"), // Required + // More values... + }, + } + resp, err := svc.DescribeDeliveryChannelStatus(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleConfigService_DescribeDeliveryChannels() { + svc := configservice.New(session.New()) + + params := &configservice.DescribeDeliveryChannelsInput{ + DeliveryChannelNames: []*string{ + aws.String("ChannelName"), // Required + // More values... + }, + } + resp, err := svc.DescribeDeliveryChannels(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleConfigService_GetComplianceDetailsByConfigRule() { + svc := configservice.New(session.New()) + + params := &configservice.GetComplianceDetailsByConfigRuleInput{ + ConfigRuleName: aws.String("StringWithCharLimit64"), // Required + ComplianceTypes: []*string{ + aws.String("ComplianceType"), // Required + // More values... + }, + Limit: aws.Int64(1), + NextToken: aws.String("NextToken"), + } + resp, err := svc.GetComplianceDetailsByConfigRule(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleConfigService_GetComplianceDetailsByResource() { + svc := configservice.New(session.New()) + + params := &configservice.GetComplianceDetailsByResourceInput{ + ResourceId: aws.String("StringWithCharLimit256"), // Required + ResourceType: aws.String("StringWithCharLimit256"), // Required + ComplianceTypes: []*string{ + aws.String("ComplianceType"), // Required + // More values... + }, + NextToken: aws.String("String"), + } + resp, err := svc.GetComplianceDetailsByResource(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleConfigService_GetComplianceSummaryByConfigRule() { + svc := configservice.New(session.New()) + + var params *configservice.GetComplianceSummaryByConfigRuleInput + resp, err := svc.GetComplianceSummaryByConfigRule(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleConfigService_GetComplianceSummaryByResourceType() { + svc := configservice.New(session.New()) + + params := &configservice.GetComplianceSummaryByResourceTypeInput{ + ResourceTypes: []*string{ + aws.String("StringWithCharLimit256"), // Required + // More values... + }, + } + resp, err := svc.GetComplianceSummaryByResourceType(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleConfigService_GetResourceConfigHistory() { + svc := configservice.New(session.New()) + + params := &configservice.GetResourceConfigHistoryInput{ + ResourceId: aws.String("ResourceId"), // Required + ResourceType: aws.String("ResourceType"), // Required + ChronologicalOrder: aws.String("ChronologicalOrder"), + EarlierTime: aws.Time(time.Now()), + LaterTime: aws.Time(time.Now()), + Limit: aws.Int64(1), + NextToken: aws.String("NextToken"), + } + resp, err := svc.GetResourceConfigHistory(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleConfigService_ListDiscoveredResources() { + svc := configservice.New(session.New()) + + params := &configservice.ListDiscoveredResourcesInput{ + ResourceType: aws.String("ResourceType"), // Required + IncludeDeletedResources: aws.Bool(true), + Limit: aws.Int64(1), + NextToken: aws.String("NextToken"), + ResourceIds: []*string{ + aws.String("ResourceId"), // Required + // More values... + }, + ResourceName: aws.String("ResourceName"), + } + resp, err := svc.ListDiscoveredResources(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleConfigService_PutConfigRule() { + svc := configservice.New(session.New()) + + params := &configservice.PutConfigRuleInput{ + ConfigRule: &configservice.ConfigRule{ // Required + Source: &configservice.Source{ // Required + Owner: aws.String("Owner"), + SourceDetails: []*configservice.SourceDetail{ + { // Required + EventSource: aws.String("EventSource"), + MessageType: aws.String("MessageType"), + }, + // More values... + }, + SourceIdentifier: aws.String("StringWithCharLimit256"), + }, + ConfigRuleArn: aws.String("String"), + ConfigRuleId: aws.String("String"), + ConfigRuleName: aws.String("StringWithCharLimit64"), + ConfigRuleState: aws.String("ConfigRuleState"), + Description: aws.String("EmptiableStringWithCharLimit256"), + InputParameters: aws.String("StringWithCharLimit256"), + MaximumExecutionFrequency: aws.String("MaximumExecutionFrequency"), + Scope: &configservice.Scope{ + ComplianceResourceId: aws.String("StringWithCharLimit256"), + ComplianceResourceTypes: []*string{ + aws.String("StringWithCharLimit256"), // Required + // More values... + }, + TagKey: aws.String("StringWithCharLimit128"), + TagValue: aws.String("StringWithCharLimit256"), + }, + }, + } + resp, err := svc.PutConfigRule(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleConfigService_PutConfigurationRecorder() { + svc := configservice.New(session.New()) + + params := &configservice.PutConfigurationRecorderInput{ + ConfigurationRecorder: &configservice.ConfigurationRecorder{ // Required + Name: aws.String("RecorderName"), + RecordingGroup: &configservice.RecordingGroup{ + AllSupported: aws.Bool(true), + IncludeGlobalResourceTypes: aws.Bool(true), + ResourceTypes: []*string{ + aws.String("ResourceType"), // Required + // More values... + }, + }, + RoleARN: aws.String("String"), + }, + } + resp, err := svc.PutConfigurationRecorder(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleConfigService_PutDeliveryChannel() { + svc := configservice.New(session.New()) + + params := &configservice.PutDeliveryChannelInput{ + DeliveryChannel: &configservice.DeliveryChannel{ // Required + ConfigSnapshotDeliveryProperties: &configservice.ConfigSnapshotDeliveryProperties{ + DeliveryFrequency: aws.String("MaximumExecutionFrequency"), + }, + Name: aws.String("ChannelName"), + S3BucketName: aws.String("String"), + S3KeyPrefix: aws.String("String"), + SnsTopicARN: aws.String("String"), + }, + } + resp, err := svc.PutDeliveryChannel(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleConfigService_PutEvaluations() { + svc := configservice.New(session.New()) + + params := &configservice.PutEvaluationsInput{ + ResultToken: aws.String("String"), // Required + Evaluations: []*configservice.Evaluation{ + { // Required + ComplianceResourceId: aws.String("StringWithCharLimit256"), // Required + ComplianceResourceType: aws.String("StringWithCharLimit256"), // Required + ComplianceType: aws.String("ComplianceType"), // Required + OrderingTimestamp: aws.Time(time.Now()), // Required + Annotation: aws.String("StringWithCharLimit256"), + }, + // More values... + }, + } + resp, err := svc.PutEvaluations(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleConfigService_StartConfigurationRecorder() { + svc := configservice.New(session.New()) + + params := &configservice.StartConfigurationRecorderInput{ + ConfigurationRecorderName: aws.String("RecorderName"), // Required + } + resp, err := svc.StartConfigurationRecorder(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleConfigService_StopConfigurationRecorder() { + svc := configservice.New(session.New()) + + params := &configservice.StopConfigurationRecorderInput{ + ConfigurationRecorderName: aws.String("RecorderName"), // Required + } + resp, err := svc.StopConfigurationRecorder(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/configservice/service.go b/vendor/github.com/aws/aws-sdk-go/service/configservice/service.go new file mode 100644 index 000000000..473f60336 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/configservice/service.go @@ -0,0 +1,111 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package configservice + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" +) + +// AWS Config provides a way to keep track of the configurations of all the +// AWS resources associated with your AWS account. You can use AWS Config to +// get the current and historical configurations of each AWS resource and also +// to get information about the relationship between the resources. An AWS resource +// can be an Amazon Compute Cloud (Amazon EC2) instance, an Elastic Block Store +// (EBS) volume, an Elastic network Interface (ENI), or a security group. For +// a complete list of resources currently supported by AWS Config, see Supported +// AWS Resources (http://docs.aws.amazon.com/config/latest/developerguide/resource-config-reference.html#supported-resources). +// +// You can access and manage AWS Config through the AWS Management Console, +// the AWS Command Line Interface (AWS CLI), the AWS Config API, or the AWS +// SDKs for AWS Config +// +// This reference guide contains documentation for the AWS Config API and the +// AWS CLI commands that you can use to manage AWS Config. +// +// The AWS Config API uses the Signature Version 4 protocol for signing requests. +// For more information about how to sign a request with this protocol, see +// Signature Version 4 Signing Process (http://docs.aws.amazon.com/general/latest/gr/signature-version-4.html). +// +// For detailed information about AWS Config features and their associated +// actions or commands, as well as how to work with AWS Management Console, +// see What Is AWS Config? (http://docs.aws.amazon.com/config/latest/developerguide/WhatIsConfig.html) +// in the AWS Config Developer Guide. +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type ConfigService struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "config" + +// New creates a new instance of the ConfigService client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a ConfigService client from just a session. +// svc := configservice.New(mySession) +// +// // Create a ConfigService client with additional configuration +// svc := configservice.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *ConfigService { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *ConfigService { + svc := &ConfigService{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-11-12", + JSONVersion: "1.1", + TargetPrefix: "StarlingDoveService", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(jsonrpc.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a ConfigService operation and runs any +// custom request initialization. +func (c *ConfigService) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/databasemigrationservice/api.go b/vendor/github.com/aws/aws-sdk-go/service/databasemigrationservice/api.go new file mode 100644 index 000000000..4a4c6a713 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/databasemigrationservice/api.go @@ -0,0 +1,3957 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package databasemigrationservice provides a client for AWS Database Migration Service. +package databasemigrationservice + +import ( + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" +) + +const opAddTagsToResource = "AddTagsToResource" + +// AddTagsToResourceRequest generates a "aws/request.Request" representing the +// client's request for the AddTagsToResource operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the AddTagsToResource method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the AddTagsToResourceRequest method. +// req, resp := client.AddTagsToResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *DatabaseMigrationService) AddTagsToResourceRequest(input *AddTagsToResourceInput) (req *request.Request, output *AddTagsToResourceOutput) { + op := &request.Operation{ + Name: opAddTagsToResource, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AddTagsToResourceInput{} + } + + req = c.newRequest(op, input, output) + output = &AddTagsToResourceOutput{} + req.Data = output + return +} + +// Adds metadata tags to a DMS resource, including replication instance, endpoint, +// security group, and migration task. These tags can also be used with cost +// allocation reporting to track cost associated with DMS resources, or used +// in a Condition statement in an IAM policy for DMS. +func (c *DatabaseMigrationService) AddTagsToResource(input *AddTagsToResourceInput) (*AddTagsToResourceOutput, error) { + req, out := c.AddTagsToResourceRequest(input) + err := req.Send() + return out, err +} + +const opCreateEndpoint = "CreateEndpoint" + +// CreateEndpointRequest generates a "aws/request.Request" representing the +// client's request for the CreateEndpoint operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateEndpoint method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateEndpointRequest method. +// req, resp := client.CreateEndpointRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *DatabaseMigrationService) CreateEndpointRequest(input *CreateEndpointInput) (req *request.Request, output *CreateEndpointOutput) { + op := &request.Operation{ + Name: opCreateEndpoint, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateEndpointInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateEndpointOutput{} + req.Data = output + return +} + +// Creates an endpoint using the provided settings. +func (c *DatabaseMigrationService) CreateEndpoint(input *CreateEndpointInput) (*CreateEndpointOutput, error) { + req, out := c.CreateEndpointRequest(input) + err := req.Send() + return out, err +} + +const opCreateReplicationInstance = "CreateReplicationInstance" + +// CreateReplicationInstanceRequest generates a "aws/request.Request" representing the +// client's request for the CreateReplicationInstance operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateReplicationInstance method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateReplicationInstanceRequest method. +// req, resp := client.CreateReplicationInstanceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *DatabaseMigrationService) CreateReplicationInstanceRequest(input *CreateReplicationInstanceInput) (req *request.Request, output *CreateReplicationInstanceOutput) { + op := &request.Operation{ + Name: opCreateReplicationInstance, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateReplicationInstanceInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateReplicationInstanceOutput{} + req.Data = output + return +} + +// Creates the replication instance using the specified parameters. +func (c *DatabaseMigrationService) CreateReplicationInstance(input *CreateReplicationInstanceInput) (*CreateReplicationInstanceOutput, error) { + req, out := c.CreateReplicationInstanceRequest(input) + err := req.Send() + return out, err +} + +const opCreateReplicationSubnetGroup = "CreateReplicationSubnetGroup" + +// CreateReplicationSubnetGroupRequest generates a "aws/request.Request" representing the +// client's request for the CreateReplicationSubnetGroup operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateReplicationSubnetGroup method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateReplicationSubnetGroupRequest method. +// req, resp := client.CreateReplicationSubnetGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *DatabaseMigrationService) CreateReplicationSubnetGroupRequest(input *CreateReplicationSubnetGroupInput) (req *request.Request, output *CreateReplicationSubnetGroupOutput) { + op := &request.Operation{ + Name: opCreateReplicationSubnetGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateReplicationSubnetGroupInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateReplicationSubnetGroupOutput{} + req.Data = output + return +} + +// Creates a replication subnet group given a list of the subnet IDs in a VPC. +func (c *DatabaseMigrationService) CreateReplicationSubnetGroup(input *CreateReplicationSubnetGroupInput) (*CreateReplicationSubnetGroupOutput, error) { + req, out := c.CreateReplicationSubnetGroupRequest(input) + err := req.Send() + return out, err +} + +const opCreateReplicationTask = "CreateReplicationTask" + +// CreateReplicationTaskRequest generates a "aws/request.Request" representing the +// client's request for the CreateReplicationTask operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateReplicationTask method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateReplicationTaskRequest method. +// req, resp := client.CreateReplicationTaskRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *DatabaseMigrationService) CreateReplicationTaskRequest(input *CreateReplicationTaskInput) (req *request.Request, output *CreateReplicationTaskOutput) { + op := &request.Operation{ + Name: opCreateReplicationTask, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateReplicationTaskInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateReplicationTaskOutput{} + req.Data = output + return +} + +// Creates a replication task using the specified parameters. +func (c *DatabaseMigrationService) CreateReplicationTask(input *CreateReplicationTaskInput) (*CreateReplicationTaskOutput, error) { + req, out := c.CreateReplicationTaskRequest(input) + err := req.Send() + return out, err +} + +const opDeleteEndpoint = "DeleteEndpoint" + +// DeleteEndpointRequest generates a "aws/request.Request" representing the +// client's request for the DeleteEndpoint operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteEndpoint method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteEndpointRequest method. +// req, resp := client.DeleteEndpointRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *DatabaseMigrationService) DeleteEndpointRequest(input *DeleteEndpointInput) (req *request.Request, output *DeleteEndpointOutput) { + op := &request.Operation{ + Name: opDeleteEndpoint, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteEndpointInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteEndpointOutput{} + req.Data = output + return +} + +// Deletes the specified endpoint. +// +// All tasks associated with the endpoint must be deleted before you can delete +// the endpoint. +func (c *DatabaseMigrationService) DeleteEndpoint(input *DeleteEndpointInput) (*DeleteEndpointOutput, error) { + req, out := c.DeleteEndpointRequest(input) + err := req.Send() + return out, err +} + +const opDeleteReplicationInstance = "DeleteReplicationInstance" + +// DeleteReplicationInstanceRequest generates a "aws/request.Request" representing the +// client's request for the DeleteReplicationInstance operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteReplicationInstance method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteReplicationInstanceRequest method. +// req, resp := client.DeleteReplicationInstanceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *DatabaseMigrationService) DeleteReplicationInstanceRequest(input *DeleteReplicationInstanceInput) (req *request.Request, output *DeleteReplicationInstanceOutput) { + op := &request.Operation{ + Name: opDeleteReplicationInstance, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteReplicationInstanceInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteReplicationInstanceOutput{} + req.Data = output + return +} + +// Deletes the specified replication instance. +// +// You must delete any migration tasks that are associated with the replication +// instance before you can delete it. +func (c *DatabaseMigrationService) DeleteReplicationInstance(input *DeleteReplicationInstanceInput) (*DeleteReplicationInstanceOutput, error) { + req, out := c.DeleteReplicationInstanceRequest(input) + err := req.Send() + return out, err +} + +const opDeleteReplicationSubnetGroup = "DeleteReplicationSubnetGroup" + +// DeleteReplicationSubnetGroupRequest generates a "aws/request.Request" representing the +// client's request for the DeleteReplicationSubnetGroup operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteReplicationSubnetGroup method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteReplicationSubnetGroupRequest method. +// req, resp := client.DeleteReplicationSubnetGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *DatabaseMigrationService) DeleteReplicationSubnetGroupRequest(input *DeleteReplicationSubnetGroupInput) (req *request.Request, output *DeleteReplicationSubnetGroupOutput) { + op := &request.Operation{ + Name: opDeleteReplicationSubnetGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteReplicationSubnetGroupInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteReplicationSubnetGroupOutput{} + req.Data = output + return +} + +// Deletes a subnet group. +func (c *DatabaseMigrationService) DeleteReplicationSubnetGroup(input *DeleteReplicationSubnetGroupInput) (*DeleteReplicationSubnetGroupOutput, error) { + req, out := c.DeleteReplicationSubnetGroupRequest(input) + err := req.Send() + return out, err +} + +const opDeleteReplicationTask = "DeleteReplicationTask" + +// DeleteReplicationTaskRequest generates a "aws/request.Request" representing the +// client's request for the DeleteReplicationTask operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteReplicationTask method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteReplicationTaskRequest method. +// req, resp := client.DeleteReplicationTaskRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *DatabaseMigrationService) DeleteReplicationTaskRequest(input *DeleteReplicationTaskInput) (req *request.Request, output *DeleteReplicationTaskOutput) { + op := &request.Operation{ + Name: opDeleteReplicationTask, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteReplicationTaskInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteReplicationTaskOutput{} + req.Data = output + return +} + +// Deletes the specified replication task. +func (c *DatabaseMigrationService) DeleteReplicationTask(input *DeleteReplicationTaskInput) (*DeleteReplicationTaskOutput, error) { + req, out := c.DeleteReplicationTaskRequest(input) + err := req.Send() + return out, err +} + +const opDescribeAccountAttributes = "DescribeAccountAttributes" + +// DescribeAccountAttributesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeAccountAttributes operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeAccountAttributes method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeAccountAttributesRequest method. +// req, resp := client.DescribeAccountAttributesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *DatabaseMigrationService) DescribeAccountAttributesRequest(input *DescribeAccountAttributesInput) (req *request.Request, output *DescribeAccountAttributesOutput) { + op := &request.Operation{ + Name: opDescribeAccountAttributes, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeAccountAttributesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeAccountAttributesOutput{} + req.Data = output + return +} + +// Lists all of the AWS DMS attributes for a customer account. The attributes +// include AWS DMS quotas for the account, such as the number of replication +// instances allowed. The description for a quota includes the quota name, current +// usage toward that quota, and the quota's maximum value. +// +// This command does not take any parameters. +func (c *DatabaseMigrationService) DescribeAccountAttributes(input *DescribeAccountAttributesInput) (*DescribeAccountAttributesOutput, error) { + req, out := c.DescribeAccountAttributesRequest(input) + err := req.Send() + return out, err +} + +const opDescribeConnections = "DescribeConnections" + +// DescribeConnectionsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeConnections operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeConnections method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeConnectionsRequest method. +// req, resp := client.DescribeConnectionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *DatabaseMigrationService) DescribeConnectionsRequest(input *DescribeConnectionsInput) (req *request.Request, output *DescribeConnectionsOutput) { + op := &request.Operation{ + Name: opDescribeConnections, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeConnectionsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeConnectionsOutput{} + req.Data = output + return +} + +// Describes the status of the connections that have been made between the replication +// instance and an endpoint. Connections are created when you test an endpoint. +func (c *DatabaseMigrationService) DescribeConnections(input *DescribeConnectionsInput) (*DescribeConnectionsOutput, error) { + req, out := c.DescribeConnectionsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeEndpointTypes = "DescribeEndpointTypes" + +// DescribeEndpointTypesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeEndpointTypes operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeEndpointTypes method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeEndpointTypesRequest method. +// req, resp := client.DescribeEndpointTypesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *DatabaseMigrationService) DescribeEndpointTypesRequest(input *DescribeEndpointTypesInput) (req *request.Request, output *DescribeEndpointTypesOutput) { + op := &request.Operation{ + Name: opDescribeEndpointTypes, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeEndpointTypesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeEndpointTypesOutput{} + req.Data = output + return +} + +// Returns information about the type of endpoints available. +func (c *DatabaseMigrationService) DescribeEndpointTypes(input *DescribeEndpointTypesInput) (*DescribeEndpointTypesOutput, error) { + req, out := c.DescribeEndpointTypesRequest(input) + err := req.Send() + return out, err +} + +const opDescribeEndpoints = "DescribeEndpoints" + +// DescribeEndpointsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeEndpoints operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeEndpoints method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeEndpointsRequest method. +// req, resp := client.DescribeEndpointsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *DatabaseMigrationService) DescribeEndpointsRequest(input *DescribeEndpointsInput) (req *request.Request, output *DescribeEndpointsOutput) { + op := &request.Operation{ + Name: opDescribeEndpoints, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeEndpointsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeEndpointsOutput{} + req.Data = output + return +} + +// Returns information about the endpoints for your account in the current region. +func (c *DatabaseMigrationService) DescribeEndpoints(input *DescribeEndpointsInput) (*DescribeEndpointsOutput, error) { + req, out := c.DescribeEndpointsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeOrderableReplicationInstances = "DescribeOrderableReplicationInstances" + +// DescribeOrderableReplicationInstancesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeOrderableReplicationInstances operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeOrderableReplicationInstances method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeOrderableReplicationInstancesRequest method. +// req, resp := client.DescribeOrderableReplicationInstancesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *DatabaseMigrationService) DescribeOrderableReplicationInstancesRequest(input *DescribeOrderableReplicationInstancesInput) (req *request.Request, output *DescribeOrderableReplicationInstancesOutput) { + op := &request.Operation{ + Name: opDescribeOrderableReplicationInstances, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeOrderableReplicationInstancesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeOrderableReplicationInstancesOutput{} + req.Data = output + return +} + +// Returns information about the replication instance types that can be created +// in the specified region. +func (c *DatabaseMigrationService) DescribeOrderableReplicationInstances(input *DescribeOrderableReplicationInstancesInput) (*DescribeOrderableReplicationInstancesOutput, error) { + req, out := c.DescribeOrderableReplicationInstancesRequest(input) + err := req.Send() + return out, err +} + +const opDescribeRefreshSchemasStatus = "DescribeRefreshSchemasStatus" + +// DescribeRefreshSchemasStatusRequest generates a "aws/request.Request" representing the +// client's request for the DescribeRefreshSchemasStatus operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeRefreshSchemasStatus method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeRefreshSchemasStatusRequest method. +// req, resp := client.DescribeRefreshSchemasStatusRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *DatabaseMigrationService) DescribeRefreshSchemasStatusRequest(input *DescribeRefreshSchemasStatusInput) (req *request.Request, output *DescribeRefreshSchemasStatusOutput) { + op := &request.Operation{ + Name: opDescribeRefreshSchemasStatus, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeRefreshSchemasStatusInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeRefreshSchemasStatusOutput{} + req.Data = output + return +} + +// Returns the status of the RefreshSchemas operation. +func (c *DatabaseMigrationService) DescribeRefreshSchemasStatus(input *DescribeRefreshSchemasStatusInput) (*DescribeRefreshSchemasStatusOutput, error) { + req, out := c.DescribeRefreshSchemasStatusRequest(input) + err := req.Send() + return out, err +} + +const opDescribeReplicationInstances = "DescribeReplicationInstances" + +// DescribeReplicationInstancesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeReplicationInstances operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeReplicationInstances method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeReplicationInstancesRequest method. +// req, resp := client.DescribeReplicationInstancesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *DatabaseMigrationService) DescribeReplicationInstancesRequest(input *DescribeReplicationInstancesInput) (req *request.Request, output *DescribeReplicationInstancesOutput) { + op := &request.Operation{ + Name: opDescribeReplicationInstances, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeReplicationInstancesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeReplicationInstancesOutput{} + req.Data = output + return +} + +// Returns information about replication instances for your account in the current +// region. +func (c *DatabaseMigrationService) DescribeReplicationInstances(input *DescribeReplicationInstancesInput) (*DescribeReplicationInstancesOutput, error) { + req, out := c.DescribeReplicationInstancesRequest(input) + err := req.Send() + return out, err +} + +const opDescribeReplicationSubnetGroups = "DescribeReplicationSubnetGroups" + +// DescribeReplicationSubnetGroupsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeReplicationSubnetGroups operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeReplicationSubnetGroups method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeReplicationSubnetGroupsRequest method. +// req, resp := client.DescribeReplicationSubnetGroupsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *DatabaseMigrationService) DescribeReplicationSubnetGroupsRequest(input *DescribeReplicationSubnetGroupsInput) (req *request.Request, output *DescribeReplicationSubnetGroupsOutput) { + op := &request.Operation{ + Name: opDescribeReplicationSubnetGroups, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeReplicationSubnetGroupsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeReplicationSubnetGroupsOutput{} + req.Data = output + return +} + +// Returns information about the replication subnet groups. +func (c *DatabaseMigrationService) DescribeReplicationSubnetGroups(input *DescribeReplicationSubnetGroupsInput) (*DescribeReplicationSubnetGroupsOutput, error) { + req, out := c.DescribeReplicationSubnetGroupsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeReplicationTasks = "DescribeReplicationTasks" + +// DescribeReplicationTasksRequest generates a "aws/request.Request" representing the +// client's request for the DescribeReplicationTasks operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeReplicationTasks method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeReplicationTasksRequest method. +// req, resp := client.DescribeReplicationTasksRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *DatabaseMigrationService) DescribeReplicationTasksRequest(input *DescribeReplicationTasksInput) (req *request.Request, output *DescribeReplicationTasksOutput) { + op := &request.Operation{ + Name: opDescribeReplicationTasks, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeReplicationTasksInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeReplicationTasksOutput{} + req.Data = output + return +} + +// Returns information about replication tasks for your account in the current +// region. +func (c *DatabaseMigrationService) DescribeReplicationTasks(input *DescribeReplicationTasksInput) (*DescribeReplicationTasksOutput, error) { + req, out := c.DescribeReplicationTasksRequest(input) + err := req.Send() + return out, err +} + +const opDescribeSchemas = "DescribeSchemas" + +// DescribeSchemasRequest generates a "aws/request.Request" representing the +// client's request for the DescribeSchemas operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeSchemas method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeSchemasRequest method. +// req, resp := client.DescribeSchemasRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *DatabaseMigrationService) DescribeSchemasRequest(input *DescribeSchemasInput) (req *request.Request, output *DescribeSchemasOutput) { + op := &request.Operation{ + Name: opDescribeSchemas, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeSchemasInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeSchemasOutput{} + req.Data = output + return +} + +// Returns information about the schema for the specified endpoint. +func (c *DatabaseMigrationService) DescribeSchemas(input *DescribeSchemasInput) (*DescribeSchemasOutput, error) { + req, out := c.DescribeSchemasRequest(input) + err := req.Send() + return out, err +} + +const opDescribeTableStatistics = "DescribeTableStatistics" + +// DescribeTableStatisticsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeTableStatistics operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeTableStatistics method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeTableStatisticsRequest method. +// req, resp := client.DescribeTableStatisticsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *DatabaseMigrationService) DescribeTableStatisticsRequest(input *DescribeTableStatisticsInput) (req *request.Request, output *DescribeTableStatisticsOutput) { + op := &request.Operation{ + Name: opDescribeTableStatistics, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeTableStatisticsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeTableStatisticsOutput{} + req.Data = output + return +} + +// Returns table statistics on the database migration task, including table +// name, rows inserted, rows updated, and rows deleted. +func (c *DatabaseMigrationService) DescribeTableStatistics(input *DescribeTableStatisticsInput) (*DescribeTableStatisticsOutput, error) { + req, out := c.DescribeTableStatisticsRequest(input) + err := req.Send() + return out, err +} + +const opListTagsForResource = "ListTagsForResource" + +// ListTagsForResourceRequest generates a "aws/request.Request" representing the +// client's request for the ListTagsForResource operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListTagsForResource method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListTagsForResourceRequest method. +// req, resp := client.ListTagsForResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *DatabaseMigrationService) ListTagsForResourceRequest(input *ListTagsForResourceInput) (req *request.Request, output *ListTagsForResourceOutput) { + op := &request.Operation{ + Name: opListTagsForResource, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListTagsForResourceInput{} + } + + req = c.newRequest(op, input, output) + output = &ListTagsForResourceOutput{} + req.Data = output + return +} + +// Lists all tags for an AWS DMS resource. +func (c *DatabaseMigrationService) ListTagsForResource(input *ListTagsForResourceInput) (*ListTagsForResourceOutput, error) { + req, out := c.ListTagsForResourceRequest(input) + err := req.Send() + return out, err +} + +const opModifyEndpoint = "ModifyEndpoint" + +// ModifyEndpointRequest generates a "aws/request.Request" representing the +// client's request for the ModifyEndpoint operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ModifyEndpoint method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ModifyEndpointRequest method. +// req, resp := client.ModifyEndpointRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *DatabaseMigrationService) ModifyEndpointRequest(input *ModifyEndpointInput) (req *request.Request, output *ModifyEndpointOutput) { + op := &request.Operation{ + Name: opModifyEndpoint, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyEndpointInput{} + } + + req = c.newRequest(op, input, output) + output = &ModifyEndpointOutput{} + req.Data = output + return +} + +// Modifies the specified endpoint. +func (c *DatabaseMigrationService) ModifyEndpoint(input *ModifyEndpointInput) (*ModifyEndpointOutput, error) { + req, out := c.ModifyEndpointRequest(input) + err := req.Send() + return out, err +} + +const opModifyReplicationInstance = "ModifyReplicationInstance" + +// ModifyReplicationInstanceRequest generates a "aws/request.Request" representing the +// client's request for the ModifyReplicationInstance operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ModifyReplicationInstance method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ModifyReplicationInstanceRequest method. +// req, resp := client.ModifyReplicationInstanceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *DatabaseMigrationService) ModifyReplicationInstanceRequest(input *ModifyReplicationInstanceInput) (req *request.Request, output *ModifyReplicationInstanceOutput) { + op := &request.Operation{ + Name: opModifyReplicationInstance, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyReplicationInstanceInput{} + } + + req = c.newRequest(op, input, output) + output = &ModifyReplicationInstanceOutput{} + req.Data = output + return +} + +// Modifies the replication instance to apply new settings. You can change one +// or more parameters by specifying these parameters and the new values in the +// request. +// +// Some settings are applied during the maintenance window. +func (c *DatabaseMigrationService) ModifyReplicationInstance(input *ModifyReplicationInstanceInput) (*ModifyReplicationInstanceOutput, error) { + req, out := c.ModifyReplicationInstanceRequest(input) + err := req.Send() + return out, err +} + +const opModifyReplicationSubnetGroup = "ModifyReplicationSubnetGroup" + +// ModifyReplicationSubnetGroupRequest generates a "aws/request.Request" representing the +// client's request for the ModifyReplicationSubnetGroup operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ModifyReplicationSubnetGroup method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ModifyReplicationSubnetGroupRequest method. +// req, resp := client.ModifyReplicationSubnetGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *DatabaseMigrationService) ModifyReplicationSubnetGroupRequest(input *ModifyReplicationSubnetGroupInput) (req *request.Request, output *ModifyReplicationSubnetGroupOutput) { + op := &request.Operation{ + Name: opModifyReplicationSubnetGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyReplicationSubnetGroupInput{} + } + + req = c.newRequest(op, input, output) + output = &ModifyReplicationSubnetGroupOutput{} + req.Data = output + return +} + +// Modifies the settings for the specified replication subnet group. +func (c *DatabaseMigrationService) ModifyReplicationSubnetGroup(input *ModifyReplicationSubnetGroupInput) (*ModifyReplicationSubnetGroupOutput, error) { + req, out := c.ModifyReplicationSubnetGroupRequest(input) + err := req.Send() + return out, err +} + +const opRefreshSchemas = "RefreshSchemas" + +// RefreshSchemasRequest generates a "aws/request.Request" representing the +// client's request for the RefreshSchemas operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the RefreshSchemas method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the RefreshSchemasRequest method. +// req, resp := client.RefreshSchemasRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *DatabaseMigrationService) RefreshSchemasRequest(input *RefreshSchemasInput) (req *request.Request, output *RefreshSchemasOutput) { + op := &request.Operation{ + Name: opRefreshSchemas, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RefreshSchemasInput{} + } + + req = c.newRequest(op, input, output) + output = &RefreshSchemasOutput{} + req.Data = output + return +} + +// Populates the schema for the specified endpoint. This is an asynchronous +// operation and can take several minutes. You can check the status of this +// operation by calling the DescribeRefreshSchemasStatus operation. +func (c *DatabaseMigrationService) RefreshSchemas(input *RefreshSchemasInput) (*RefreshSchemasOutput, error) { + req, out := c.RefreshSchemasRequest(input) + err := req.Send() + return out, err +} + +const opRemoveTagsFromResource = "RemoveTagsFromResource" + +// RemoveTagsFromResourceRequest generates a "aws/request.Request" representing the +// client's request for the RemoveTagsFromResource operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the RemoveTagsFromResource method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the RemoveTagsFromResourceRequest method. +// req, resp := client.RemoveTagsFromResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *DatabaseMigrationService) RemoveTagsFromResourceRequest(input *RemoveTagsFromResourceInput) (req *request.Request, output *RemoveTagsFromResourceOutput) { + op := &request.Operation{ + Name: opRemoveTagsFromResource, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RemoveTagsFromResourceInput{} + } + + req = c.newRequest(op, input, output) + output = &RemoveTagsFromResourceOutput{} + req.Data = output + return +} + +// Removes metadata tags from a DMS resource. +func (c *DatabaseMigrationService) RemoveTagsFromResource(input *RemoveTagsFromResourceInput) (*RemoveTagsFromResourceOutput, error) { + req, out := c.RemoveTagsFromResourceRequest(input) + err := req.Send() + return out, err +} + +const opStartReplicationTask = "StartReplicationTask" + +// StartReplicationTaskRequest generates a "aws/request.Request" representing the +// client's request for the StartReplicationTask operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the StartReplicationTask method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the StartReplicationTaskRequest method. +// req, resp := client.StartReplicationTaskRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *DatabaseMigrationService) StartReplicationTaskRequest(input *StartReplicationTaskInput) (req *request.Request, output *StartReplicationTaskOutput) { + op := &request.Operation{ + Name: opStartReplicationTask, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &StartReplicationTaskInput{} + } + + req = c.newRequest(op, input, output) + output = &StartReplicationTaskOutput{} + req.Data = output + return +} + +// Starts the replication task. +func (c *DatabaseMigrationService) StartReplicationTask(input *StartReplicationTaskInput) (*StartReplicationTaskOutput, error) { + req, out := c.StartReplicationTaskRequest(input) + err := req.Send() + return out, err +} + +const opStopReplicationTask = "StopReplicationTask" + +// StopReplicationTaskRequest generates a "aws/request.Request" representing the +// client's request for the StopReplicationTask operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the StopReplicationTask method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the StopReplicationTaskRequest method. +// req, resp := client.StopReplicationTaskRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *DatabaseMigrationService) StopReplicationTaskRequest(input *StopReplicationTaskInput) (req *request.Request, output *StopReplicationTaskOutput) { + op := &request.Operation{ + Name: opStopReplicationTask, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &StopReplicationTaskInput{} + } + + req = c.newRequest(op, input, output) + output = &StopReplicationTaskOutput{} + req.Data = output + return +} + +// Stops the replication task. +func (c *DatabaseMigrationService) StopReplicationTask(input *StopReplicationTaskInput) (*StopReplicationTaskOutput, error) { + req, out := c.StopReplicationTaskRequest(input) + err := req.Send() + return out, err +} + +const opTestConnection = "TestConnection" + +// TestConnectionRequest generates a "aws/request.Request" representing the +// client's request for the TestConnection operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the TestConnection method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the TestConnectionRequest method. +// req, resp := client.TestConnectionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *DatabaseMigrationService) TestConnectionRequest(input *TestConnectionInput) (req *request.Request, output *TestConnectionOutput) { + op := &request.Operation{ + Name: opTestConnection, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &TestConnectionInput{} + } + + req = c.newRequest(op, input, output) + output = &TestConnectionOutput{} + req.Data = output + return +} + +// Tests the connection between the replication instance and the endpoint. +func (c *DatabaseMigrationService) TestConnection(input *TestConnectionInput) (*TestConnectionOutput, error) { + req, out := c.TestConnectionRequest(input) + err := req.Send() + return out, err +} + +// Describes a quota for an AWS account, for example, the number of replication +// instances allowed. +type AccountQuota struct { + _ struct{} `type:"structure"` + + // The name of the AWS DMS quota for this AWS account. + AccountQuotaName *string `type:"string"` + + // The maximum allowed value for the quota. + Max *int64 `type:"long"` + + // The amount currently used toward the quota maximum. + Used *int64 `type:"long"` +} + +// String returns the string representation +func (s AccountQuota) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AccountQuota) GoString() string { + return s.String() +} + +type AddTagsToResourceInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the AWS DMS resource the tag is to be added + // to. AWS DMS resources include a replication instance, endpoint, and a replication + // task. + ResourceArn *string `type:"string" required:"true"` + + // The tag to be assigned to the DMS resource. + Tags []*Tag `locationNameList:"Tag" type:"list" required:"true"` +} + +// String returns the string representation +func (s AddTagsToResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddTagsToResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AddTagsToResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AddTagsToResourceInput"} + if s.ResourceArn == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceArn")) + } + if s.Tags == nil { + invalidParams.Add(request.NewErrParamRequired("Tags")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type AddTagsToResourceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s AddTagsToResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddTagsToResourceOutput) GoString() string { + return s.String() +} + +type AvailabilityZone struct { + _ struct{} `type:"structure"` + + // The name of the availability zone. + Name *string `type:"string"` +} + +// String returns the string representation +func (s AvailabilityZone) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AvailabilityZone) GoString() string { + return s.String() +} + +type Connection struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) string that uniquely identifies the endpoint. + EndpointArn *string `type:"string"` + + // The identifier of the endpoint. Identifiers must begin with a letter; must + // contain only ASCII letters, digits, and hyphens; and must not end with a + // hyphen or contain two consecutive hyphens. + EndpointIdentifier *string `type:"string"` + + // The error message when the connection last failed. + LastFailureMessage *string `type:"string"` + + // The Amazon Resource Name (ARN) of the replication instance. + ReplicationInstanceArn *string `type:"string"` + + // The replication instance identifier. This parameter is stored as a lowercase + // string. + ReplicationInstanceIdentifier *string `type:"string"` + + // The connection status. + Status *string `type:"string"` +} + +// String returns the string representation +func (s Connection) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Connection) GoString() string { + return s.String() +} + +type CreateEndpointInput struct { + _ struct{} `type:"structure"` + + // The name of the endpoint database. + DatabaseName *string `type:"string"` + + // The database endpoint identifier. Identifiers must begin with a letter; must + // contain only ASCII letters, digits, and hyphens; and must not end with a + // hyphen or contain two consecutive hyphens. + EndpointIdentifier *string `type:"string" required:"true"` + + // The type of endpoint. + EndpointType *string `type:"string" required:"true" enum:"ReplicationEndpointTypeValue"` + + // The type of engine for the endpoint. Valid values include MYSQL, ORACLE, + // POSTGRES, MARIADB, AURORA, REDSHIFT, and SQLSERVER. + EngineName *string `type:"string" required:"true"` + + // Additional attributes associated with the connection. + ExtraConnectionAttributes *string `type:"string"` + + // The KMS key identifier that will be used to encrypt the connection parameters. + // If you do not specify a value for the KmsKeyId parameter, then AWS DMS will + // use your default encryption key. AWS KMS creates the default encryption key + // for your AWS account. Your AWS account has a different default encryption + // key for each AWS region. + KmsKeyId *string `type:"string"` + + // The password to be used to login to the endpoint database. + Password *string `type:"string" required:"true"` + + // The port used by the endpoint database. + Port *int64 `type:"integer" required:"true"` + + // The name of the server where the endpoint database resides. + ServerName *string `type:"string" required:"true"` + + // Tags to be added to the endpoint. + Tags []*Tag `locationNameList:"Tag" type:"list"` + + // The user name to be used to login to the endpoint database. + Username *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateEndpointInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateEndpointInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateEndpointInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateEndpointInput"} + if s.EndpointIdentifier == nil { + invalidParams.Add(request.NewErrParamRequired("EndpointIdentifier")) + } + if s.EndpointType == nil { + invalidParams.Add(request.NewErrParamRequired("EndpointType")) + } + if s.EngineName == nil { + invalidParams.Add(request.NewErrParamRequired("EngineName")) + } + if s.Password == nil { + invalidParams.Add(request.NewErrParamRequired("Password")) + } + if s.Port == nil { + invalidParams.Add(request.NewErrParamRequired("Port")) + } + if s.ServerName == nil { + invalidParams.Add(request.NewErrParamRequired("ServerName")) + } + if s.Username == nil { + invalidParams.Add(request.NewErrParamRequired("Username")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type CreateEndpointOutput struct { + _ struct{} `type:"structure"` + + // The endpoint that was created. + Endpoint *Endpoint `type:"structure"` +} + +// String returns the string representation +func (s CreateEndpointOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateEndpointOutput) GoString() string { + return s.String() +} + +type CreateReplicationInstanceInput struct { + _ struct{} `type:"structure"` + + // The amount of storage (in gigabytes) to be initially allocated for the replication + // instance. + AllocatedStorage *int64 `type:"integer"` + + // Indicates that minor engine upgrades will be applied automatically to the + // replication instance during the maintenance window. + // + // Default: true + AutoMinorVersionUpgrade *bool `type:"boolean"` + + // The EC2 Availability Zone that the replication instance will be created in. + // + // Default: A random, system-chosen Availability Zone in the endpoint's region. + // + // Example: us-east-1d + AvailabilityZone *string `type:"string"` + + // The engine version number of the replication instance. + EngineVersion *string `type:"string"` + + // The KMS key identifier that will be used to encrypt the content on the replication + // instance. If you do not specify a value for the KmsKeyId parameter, then + // AWS DMS will use your default encryption key. AWS KMS creates the default + // encryption key for your AWS account. Your AWS account has a different default + // encryption key for each AWS region. + KmsKeyId *string `type:"string"` + + // The weekly time range during which system maintenance can occur, in Universal + // Coordinated Time (UTC). + // + // Format: ddd:hh24:mi-ddd:hh24:mi + // + // Default: A 30-minute window selected at random from an 8-hour block of time + // per region, occurring on a random day of the week. + // + // Valid Days: Mon, Tue, Wed, Thu, Fri, Sat, Sun + // + // Constraints: Minimum 30-minute window. + PreferredMaintenanceWindow *string `type:"string"` + + // Specifies the accessibility options for the replication instance. A value + // of true represents an instance with a public IP address. A value of false + // represents an instance with a private IP address. The default value is true. + PubliclyAccessible *bool `type:"boolean"` + + // The compute and memory capacity of the replication instance as specified + // by the replication instance class. + // + // Valid Values: dms.t2.micro | dms.t2.small | dms.t2.medium | dms.t2.large + // | dms.c4.large | dms.c4.xlarge | dms.c4.2xlarge | dms.c4.4xlarge + ReplicationInstanceClass *string `type:"string" required:"true"` + + // The replication instance identifier. This parameter is stored as a lowercase + // string. + // + // Constraints: + // + // Must contain from 1 to 63 alphanumeric characters or hyphens. + // + // First character must be a letter. + // + // Cannot end with a hyphen or contain two consecutive hyphens. + // + // Example: myrepinstance + ReplicationInstanceIdentifier *string `type:"string" required:"true"` + + // A subnet group to associate with the replication instance. + ReplicationSubnetGroupIdentifier *string `type:"string"` + + // Tags to be associated with the replication instance. + Tags []*Tag `locationNameList:"Tag" type:"list"` + + // Specifies the VPC security group to be used with the replication instance. + // The VPC security group must work with the VPC containing the replication + // instance. + VpcSecurityGroupIds []*string `locationNameList:"VpcSecurityGroupId" type:"list"` +} + +// String returns the string representation +func (s CreateReplicationInstanceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateReplicationInstanceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateReplicationInstanceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateReplicationInstanceInput"} + if s.ReplicationInstanceClass == nil { + invalidParams.Add(request.NewErrParamRequired("ReplicationInstanceClass")) + } + if s.ReplicationInstanceIdentifier == nil { + invalidParams.Add(request.NewErrParamRequired("ReplicationInstanceIdentifier")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type CreateReplicationInstanceOutput struct { + _ struct{} `type:"structure"` + + // The replication instance that was created. + ReplicationInstance *ReplicationInstance `type:"structure"` +} + +// String returns the string representation +func (s CreateReplicationInstanceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateReplicationInstanceOutput) GoString() string { + return s.String() +} + +type CreateReplicationSubnetGroupInput struct { + _ struct{} `type:"structure"` + + // The description for the subnet group. + ReplicationSubnetGroupDescription *string `type:"string" required:"true"` + + // The name for the replication subnet group. This value is stored as a lowercase + // string. + // + // Constraints: Must contain no more than 255 alphanumeric characters, periods, + // spaces, underscores, or hyphens. Must not be "default". + // + // Example: mySubnetgroup + ReplicationSubnetGroupIdentifier *string `type:"string" required:"true"` + + // The EC2 subnet IDs for the subnet group. + SubnetIds []*string `locationNameList:"SubnetIdentifier" type:"list" required:"true"` + + // The tag to be assigned to the subnet group. + Tags []*Tag `locationNameList:"Tag" type:"list"` +} + +// String returns the string representation +func (s CreateReplicationSubnetGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateReplicationSubnetGroupInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateReplicationSubnetGroupInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateReplicationSubnetGroupInput"} + if s.ReplicationSubnetGroupDescription == nil { + invalidParams.Add(request.NewErrParamRequired("ReplicationSubnetGroupDescription")) + } + if s.ReplicationSubnetGroupIdentifier == nil { + invalidParams.Add(request.NewErrParamRequired("ReplicationSubnetGroupIdentifier")) + } + if s.SubnetIds == nil { + invalidParams.Add(request.NewErrParamRequired("SubnetIds")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type CreateReplicationSubnetGroupOutput struct { + _ struct{} `type:"structure"` + + // The replication subnet group that was created. + ReplicationSubnetGroup *ReplicationSubnetGroup `type:"structure"` +} + +// String returns the string representation +func (s CreateReplicationSubnetGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateReplicationSubnetGroupOutput) GoString() string { + return s.String() +} + +type CreateReplicationTaskInput struct { + _ struct{} `type:"structure"` + + // The start time for the Change Data Capture (CDC) operation. + CdcStartTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The migration type. + MigrationType *string `type:"string" required:"true" enum:"MigrationTypeValue"` + + // The Amazon Resource Name (ARN) of the replication instance. + ReplicationInstanceArn *string `type:"string" required:"true"` + + // The replication task identifier. + // + // Constraints: + // + // Must contain from 1 to 63 alphanumeric characters or hyphens. + // + // First character must be a letter. + // + // Cannot end with a hyphen or contain two consecutive hyphens. + ReplicationTaskIdentifier *string `type:"string" required:"true"` + + // Settings for the task, such as target metadata settings. + ReplicationTaskSettings *string `type:"string"` + + // The Amazon Resource Name (ARN) string that uniquely identifies the endpoint. + SourceEndpointArn *string `type:"string" required:"true"` + + // The path of the JSON file that contains the table mappings. + TableMappings *string `type:"string" required:"true"` + + // Tags to be added to the replication instance. + Tags []*Tag `locationNameList:"Tag" type:"list"` + + // The Amazon Resource Name (ARN) string that uniquely identifies the endpoint. + TargetEndpointArn *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateReplicationTaskInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateReplicationTaskInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateReplicationTaskInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateReplicationTaskInput"} + if s.MigrationType == nil { + invalidParams.Add(request.NewErrParamRequired("MigrationType")) + } + if s.ReplicationInstanceArn == nil { + invalidParams.Add(request.NewErrParamRequired("ReplicationInstanceArn")) + } + if s.ReplicationTaskIdentifier == nil { + invalidParams.Add(request.NewErrParamRequired("ReplicationTaskIdentifier")) + } + if s.SourceEndpointArn == nil { + invalidParams.Add(request.NewErrParamRequired("SourceEndpointArn")) + } + if s.TableMappings == nil { + invalidParams.Add(request.NewErrParamRequired("TableMappings")) + } + if s.TargetEndpointArn == nil { + invalidParams.Add(request.NewErrParamRequired("TargetEndpointArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type CreateReplicationTaskOutput struct { + _ struct{} `type:"structure"` + + // The replication task that was created. + ReplicationTask *ReplicationTask `type:"structure"` +} + +// String returns the string representation +func (s CreateReplicationTaskOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateReplicationTaskOutput) GoString() string { + return s.String() +} + +type DeleteEndpointInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) string that uniquely identifies the endpoint. + EndpointArn *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteEndpointInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteEndpointInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteEndpointInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteEndpointInput"} + if s.EndpointArn == nil { + invalidParams.Add(request.NewErrParamRequired("EndpointArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteEndpointOutput struct { + _ struct{} `type:"structure"` + + // The endpoint that was deleted. + Endpoint *Endpoint `type:"structure"` +} + +// String returns the string representation +func (s DeleteEndpointOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteEndpointOutput) GoString() string { + return s.String() +} + +type DeleteReplicationInstanceInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the replication instance to be deleted. + ReplicationInstanceArn *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteReplicationInstanceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteReplicationInstanceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteReplicationInstanceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteReplicationInstanceInput"} + if s.ReplicationInstanceArn == nil { + invalidParams.Add(request.NewErrParamRequired("ReplicationInstanceArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteReplicationInstanceOutput struct { + _ struct{} `type:"structure"` + + // The replication instance that was deleted. + ReplicationInstance *ReplicationInstance `type:"structure"` +} + +// String returns the string representation +func (s DeleteReplicationInstanceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteReplicationInstanceOutput) GoString() string { + return s.String() +} + +type DeleteReplicationSubnetGroupInput struct { + _ struct{} `type:"structure"` + + // The subnet group name of the replication instance. + ReplicationSubnetGroupIdentifier *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteReplicationSubnetGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteReplicationSubnetGroupInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteReplicationSubnetGroupInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteReplicationSubnetGroupInput"} + if s.ReplicationSubnetGroupIdentifier == nil { + invalidParams.Add(request.NewErrParamRequired("ReplicationSubnetGroupIdentifier")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteReplicationSubnetGroupOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteReplicationSubnetGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteReplicationSubnetGroupOutput) GoString() string { + return s.String() +} + +type DeleteReplicationTaskInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the replication task to be deleted. + ReplicationTaskArn *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteReplicationTaskInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteReplicationTaskInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteReplicationTaskInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteReplicationTaskInput"} + if s.ReplicationTaskArn == nil { + invalidParams.Add(request.NewErrParamRequired("ReplicationTaskArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteReplicationTaskOutput struct { + _ struct{} `type:"structure"` + + // The deleted replication task. + ReplicationTask *ReplicationTask `type:"structure"` +} + +// String returns the string representation +func (s DeleteReplicationTaskOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteReplicationTaskOutput) GoString() string { + return s.String() +} + +type DescribeAccountAttributesInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DescribeAccountAttributesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAccountAttributesInput) GoString() string { + return s.String() +} + +type DescribeAccountAttributesOutput struct { + _ struct{} `type:"structure"` + + // Account quota information. + AccountQuotas []*AccountQuota `locationNameList:"AccountQuota" type:"list"` +} + +// String returns the string representation +func (s DescribeAccountAttributesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAccountAttributesOutput) GoString() string { + return s.String() +} + +type DescribeConnectionsInput struct { + _ struct{} `type:"structure"` + + // The filters applied to the connection. + // + // Valid filter names: endpoint-arn | replication-instance-arn + Filters []*Filter `locationNameList:"Filter" type:"list"` + + // An optional pagination token provided by a previous request. If this parameter + // is specified, the response includes only records beyond the marker, up to + // the value specified by MaxRecords. + Marker *string `type:"string"` + + // The maximum number of records to include in the response. If more records + // exist than the specified MaxRecords value, a pagination token called a marker + // is included in the response so that the remaining results can be retrieved. + // + // Default: 100 + // + // Constraints: Minimum 20, maximum 100. + MaxRecords *int64 `type:"integer"` +} + +// String returns the string representation +func (s DescribeConnectionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeConnectionsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeConnectionsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeConnectionsInput"} + if s.Filters != nil { + for i, v := range s.Filters { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Filters", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DescribeConnectionsOutput struct { + _ struct{} `type:"structure"` + + // A description of the connections. + Connections []*Connection `locationNameList:"Connection" type:"list"` + + // An optional pagination token provided by a previous request. If this parameter + // is specified, the response includes only records beyond the marker, up to + // the value specified by MaxRecords. + Marker *string `type:"string"` +} + +// String returns the string representation +func (s DescribeConnectionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeConnectionsOutput) GoString() string { + return s.String() +} + +type DescribeEndpointTypesInput struct { + _ struct{} `type:"structure"` + + // Filters applied to the describe action. + // + // Valid filter names: engine-name | endpoint-type + Filters []*Filter `locationNameList:"Filter" type:"list"` + + // An optional pagination token provided by a previous request. If this parameter + // is specified, the response includes only records beyond the marker, up to + // the value specified by MaxRecords. + Marker *string `type:"string"` + + // The maximum number of records to include in the response. If more records + // exist than the specified MaxRecords value, a pagination token called a marker + // is included in the response so that the remaining results can be retrieved. + // + // Default: 100 + // + // Constraints: Minimum 20, maximum 100. + MaxRecords *int64 `type:"integer"` +} + +// String returns the string representation +func (s DescribeEndpointTypesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeEndpointTypesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeEndpointTypesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeEndpointTypesInput"} + if s.Filters != nil { + for i, v := range s.Filters { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Filters", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DescribeEndpointTypesOutput struct { + _ struct{} `type:"structure"` + + // An optional pagination token provided by a previous request. If this parameter + // is specified, the response includes only records beyond the marker, up to + // the value specified by MaxRecords. + Marker *string `type:"string"` + + // The type of endpoints that are supported. + SupportedEndpointTypes []*SupportedEndpointType `locationNameList:"SupportedEndpointType" type:"list"` +} + +// String returns the string representation +func (s DescribeEndpointTypesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeEndpointTypesOutput) GoString() string { + return s.String() +} + +type DescribeEndpointsInput struct { + _ struct{} `type:"structure"` + + // Filters applied to the describe action. + // + // Valid filter names: endpoint-arn | endpoint-type | endpoint-id | engine-name + Filters []*Filter `locationNameList:"Filter" type:"list"` + + // An optional pagination token provided by a previous request. If this parameter + // is specified, the response includes only records beyond the marker, up to + // the value specified by MaxRecords. + Marker *string `type:"string"` + + // The maximum number of records to include in the response. If more records + // exist than the specified MaxRecords value, a pagination token called a marker + // is included in the response so that the remaining results can be retrieved. + // + // Default: 100 + // + // Constraints: Minimum 20, maximum 100. + MaxRecords *int64 `type:"integer"` +} + +// String returns the string representation +func (s DescribeEndpointsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeEndpointsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeEndpointsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeEndpointsInput"} + if s.Filters != nil { + for i, v := range s.Filters { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Filters", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DescribeEndpointsOutput struct { + _ struct{} `type:"structure"` + + // Endpoint description. + Endpoints []*Endpoint `locationNameList:"Endpoint" type:"list"` + + // An optional pagination token provided by a previous request. If this parameter + // is specified, the response includes only records beyond the marker, up to + // the value specified by MaxRecords. + Marker *string `type:"string"` +} + +// String returns the string representation +func (s DescribeEndpointsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeEndpointsOutput) GoString() string { + return s.String() +} + +type DescribeOrderableReplicationInstancesInput struct { + _ struct{} `type:"structure"` + + // An optional pagination token provided by a previous request. If this parameter + // is specified, the response includes only records beyond the marker, up to + // the value specified by MaxRecords. + Marker *string `type:"string"` + + // The maximum number of records to include in the response. If more records + // exist than the specified MaxRecords value, a pagination token called a marker + // is included in the response so that the remaining results can be retrieved. + // + // Default: 100 + // + // Constraints: Minimum 20, maximum 100. + MaxRecords *int64 `type:"integer"` +} + +// String returns the string representation +func (s DescribeOrderableReplicationInstancesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeOrderableReplicationInstancesInput) GoString() string { + return s.String() +} + +type DescribeOrderableReplicationInstancesOutput struct { + _ struct{} `type:"structure"` + + // An optional pagination token provided by a previous request. If this parameter + // is specified, the response includes only records beyond the marker, up to + // the value specified by MaxRecords. + Marker *string `type:"string"` + + // The order-able replication instances available. + OrderableReplicationInstances []*OrderableReplicationInstance `locationNameList:"OrderableReplicationInstance" type:"list"` +} + +// String returns the string representation +func (s DescribeOrderableReplicationInstancesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeOrderableReplicationInstancesOutput) GoString() string { + return s.String() +} + +type DescribeRefreshSchemasStatusInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) string that uniquely identifies the endpoint. + EndpointArn *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeRefreshSchemasStatusInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeRefreshSchemasStatusInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeRefreshSchemasStatusInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeRefreshSchemasStatusInput"} + if s.EndpointArn == nil { + invalidParams.Add(request.NewErrParamRequired("EndpointArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DescribeRefreshSchemasStatusOutput struct { + _ struct{} `type:"structure"` + + // The status of the schema. + RefreshSchemasStatus *RefreshSchemasStatus `type:"structure"` +} + +// String returns the string representation +func (s DescribeRefreshSchemasStatusOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeRefreshSchemasStatusOutput) GoString() string { + return s.String() +} + +type DescribeReplicationInstancesInput struct { + _ struct{} `type:"structure"` + + // Filters applied to the describe action. + // + // Valid filter names: replication-instance-arn | replication-instance-id | + // replication-instance-class | engine-version + Filters []*Filter `locationNameList:"Filter" type:"list"` + + // An optional pagination token provided by a previous request. If this parameter + // is specified, the response includes only records beyond the marker, up to + // the value specified by MaxRecords. + Marker *string `type:"string"` + + // The maximum number of records to include in the response. If more records + // exist than the specified MaxRecords value, a pagination token called a marker + // is included in the response so that the remaining results can be retrieved. + // + // Default: 100 + // + // Constraints: Minimum 20, maximum 100. + MaxRecords *int64 `type:"integer"` +} + +// String returns the string representation +func (s DescribeReplicationInstancesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeReplicationInstancesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeReplicationInstancesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeReplicationInstancesInput"} + if s.Filters != nil { + for i, v := range s.Filters { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Filters", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DescribeReplicationInstancesOutput struct { + _ struct{} `type:"structure"` + + // An optional pagination token provided by a previous request. If this parameter + // is specified, the response includes only records beyond the marker, up to + // the value specified by MaxRecords. + Marker *string `type:"string"` + + // The replication instances described. + ReplicationInstances []*ReplicationInstance `locationNameList:"ReplicationInstance" type:"list"` +} + +// String returns the string representation +func (s DescribeReplicationInstancesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeReplicationInstancesOutput) GoString() string { + return s.String() +} + +type DescribeReplicationSubnetGroupsInput struct { + _ struct{} `type:"structure"` + + // Filters applied to the describe action. + Filters []*Filter `locationNameList:"Filter" type:"list"` + + // An optional pagination token provided by a previous request. If this parameter + // is specified, the response includes only records beyond the marker, up to + // the value specified by MaxRecords. + Marker *string `type:"string"` + + // The maximum number of records to include in the response. If more records + // exist than the specified MaxRecords value, a pagination token called a marker + // is included in the response so that the remaining results can be retrieved. + // + // Default: 100 + // + // Constraints: Minimum 20, maximum 100. + MaxRecords *int64 `type:"integer"` +} + +// String returns the string representation +func (s DescribeReplicationSubnetGroupsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeReplicationSubnetGroupsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeReplicationSubnetGroupsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeReplicationSubnetGroupsInput"} + if s.Filters != nil { + for i, v := range s.Filters { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Filters", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DescribeReplicationSubnetGroupsOutput struct { + _ struct{} `type:"structure"` + + // An optional pagination token provided by a previous request. If this parameter + // is specified, the response includes only records beyond the marker, up to + // the value specified by MaxRecords. + Marker *string `type:"string"` + + // A description of the replication subnet groups. + ReplicationSubnetGroups []*ReplicationSubnetGroup `locationNameList:"ReplicationSubnetGroup" type:"list"` +} + +// String returns the string representation +func (s DescribeReplicationSubnetGroupsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeReplicationSubnetGroupsOutput) GoString() string { + return s.String() +} + +type DescribeReplicationTasksInput struct { + _ struct{} `type:"structure"` + + // Filters applied to the describe action. + // + // Valid filter names: replication-task-arn | replication-task-id | migration-type + // | endpoint-arn | replication-instance-arn + Filters []*Filter `locationNameList:"Filter" type:"list"` + + // An optional pagination token provided by a previous request. If this parameter + // is specified, the response includes only records beyond the marker, up to + // the value specified by MaxRecords. + Marker *string `type:"string"` + + // The maximum number of records to include in the response. If more records + // exist than the specified MaxRecords value, a pagination token called a marker + // is included in the response so that the remaining results can be retrieved. + // + // Default: 100 + // + // Constraints: Minimum 20, maximum 100. + MaxRecords *int64 `type:"integer"` +} + +// String returns the string representation +func (s DescribeReplicationTasksInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeReplicationTasksInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeReplicationTasksInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeReplicationTasksInput"} + if s.Filters != nil { + for i, v := range s.Filters { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Filters", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DescribeReplicationTasksOutput struct { + _ struct{} `type:"structure"` + + // An optional pagination token provided by a previous request. If this parameter + // is specified, the response includes only records beyond the marker, up to + // the value specified by MaxRecords. + Marker *string `type:"string"` + + // A description of the replication tasks. + ReplicationTasks []*ReplicationTask `locationNameList:"ReplicationTask" type:"list"` +} + +// String returns the string representation +func (s DescribeReplicationTasksOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeReplicationTasksOutput) GoString() string { + return s.String() +} + +type DescribeSchemasInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) string that uniquely identifies the endpoint. + EndpointArn *string `type:"string" required:"true"` + + // An optional pagination token provided by a previous request. If this parameter + // is specified, the response includes only records beyond the marker, up to + // the value specified by MaxRecords. + Marker *string `type:"string"` + + // The maximum number of records to include in the response. If more records + // exist than the specified MaxRecords value, a pagination token called a marker + // is included in the response so that the remaining results can be retrieved. + // + // Default: 100 + // + // Constraints: Minimum 20, maximum 100. + MaxRecords *int64 `type:"integer"` +} + +// String returns the string representation +func (s DescribeSchemasInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeSchemasInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeSchemasInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeSchemasInput"} + if s.EndpointArn == nil { + invalidParams.Add(request.NewErrParamRequired("EndpointArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DescribeSchemasOutput struct { + _ struct{} `type:"structure"` + + // An optional pagination token provided by a previous request. If this parameter + // is specified, the response includes only records beyond the marker, up to + // the value specified by MaxRecords. + Marker *string `type:"string"` + + // The described schema. + Schemas []*string `type:"list"` +} + +// String returns the string representation +func (s DescribeSchemasOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeSchemasOutput) GoString() string { + return s.String() +} + +type DescribeTableStatisticsInput struct { + _ struct{} `type:"structure"` + + // An optional pagination token provided by a previous request. If this parameter + // is specified, the response includes only records beyond the marker, up to + // the value specified by MaxRecords. + Marker *string `type:"string"` + + // The maximum number of records to include in the response. If more records + // exist than the specified MaxRecords value, a pagination token called a marker + // is included in the response so that the remaining results can be retrieved. + // + // Default: 100 + // + // Constraints: Minimum 20, maximum 100. + MaxRecords *int64 `type:"integer"` + + // The Amazon Resource Name (ARN) of the replication task. + ReplicationTaskArn *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeTableStatisticsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeTableStatisticsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeTableStatisticsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeTableStatisticsInput"} + if s.ReplicationTaskArn == nil { + invalidParams.Add(request.NewErrParamRequired("ReplicationTaskArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DescribeTableStatisticsOutput struct { + _ struct{} `type:"structure"` + + // An optional pagination token provided by a previous request. If this parameter + // is specified, the response includes only records beyond the marker, up to + // the value specified by MaxRecords. + Marker *string `type:"string"` + + // The Amazon Resource Name (ARN) of the replication task. + ReplicationTaskArn *string `type:"string"` + + // The table statistics. + TableStatistics []*TableStatistics `type:"list"` +} + +// String returns the string representation +func (s DescribeTableStatisticsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeTableStatisticsOutput) GoString() string { + return s.String() +} + +type Endpoint struct { + _ struct{} `type:"structure"` + + // The name of the database at the endpoint. + DatabaseName *string `type:"string"` + + // The Amazon Resource Name (ARN) string that uniquely identifies the endpoint. + EndpointArn *string `type:"string"` + + // The database endpoint identifier. Identifiers must begin with a letter; must + // contain only ASCII letters, digits, and hyphens; and must not end with a + // hyphen or contain two consecutive hyphens. + EndpointIdentifier *string `type:"string"` + + // The type of endpoint. + EndpointType *string `type:"string" enum:"ReplicationEndpointTypeValue"` + + // The database engine name. + EngineName *string `type:"string"` + + // Additional connection attributes used to connect to the endpoint. + ExtraConnectionAttributes *string `type:"string"` + + // The KMS key identifier that will be used to encrypt the connection parameters. + // If you do not specify a value for the KmsKeyId parameter, then AWS DMS will + // use your default encryption key. AWS KMS creates the default encryption key + // for your AWS account. Your AWS account has a different default encryption + // key for each AWS region. + KmsKeyId *string `type:"string"` + + // The port value used to access the endpoint. + Port *int64 `type:"integer"` + + // The name of the server at the endpoint. + ServerName *string `type:"string"` + + // The status of the endpoint. + Status *string `type:"string"` + + // The user name used to connect to the endpoint. + Username *string `type:"string"` +} + +// String returns the string representation +func (s Endpoint) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Endpoint) GoString() string { + return s.String() +} + +type Filter struct { + _ struct{} `type:"structure"` + + // The name of the filter. + Name *string `type:"string" required:"true"` + + // The filter value. + Values []*string `locationNameList:"Value" type:"list" required:"true"` +} + +// String returns the string representation +func (s Filter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Filter) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Filter) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Filter"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Values == nil { + invalidParams.Add(request.NewErrParamRequired("Values")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type ListTagsForResourceInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) string that uniquely identifies the AWS DMS + // resource. + ResourceArn *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ListTagsForResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTagsForResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListTagsForResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListTagsForResourceInput"} + if s.ResourceArn == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type ListTagsForResourceOutput struct { + _ struct{} `type:"structure"` + + // A list of tags for the resource. + TagList []*Tag `locationNameList:"Tag" type:"list"` +} + +// String returns the string representation +func (s ListTagsForResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTagsForResourceOutput) GoString() string { + return s.String() +} + +type ModifyEndpointInput struct { + _ struct{} `type:"structure"` + + // The name of the endpoint database. + DatabaseName *string `type:"string"` + + // The Amazon Resource Name (ARN) string that uniquely identifies the endpoint. + EndpointArn *string `type:"string" required:"true"` + + // The database endpoint identifier. Identifiers must begin with a letter; must + // contain only ASCII letters, digits, and hyphens; and must not end with a + // hyphen or contain two consecutive hyphens. + EndpointIdentifier *string `type:"string"` + + // The type of endpoint. + EndpointType *string `type:"string" enum:"ReplicationEndpointTypeValue"` + + // The type of engine for the endpoint. Valid values include MYSQL, ORACLE, + // POSTGRES, MARIADB, AURORA, REDSHIFT, and SQLSERVER. + EngineName *string `type:"string"` + + // Additional attributes associated with the connection. + ExtraConnectionAttributes *string `type:"string"` + + // The password to be used to login to the endpoint database. + Password *string `type:"string"` + + // The port used by the endpoint database. + Port *int64 `type:"integer"` + + // The name of the server where the endpoint database resides. + ServerName *string `type:"string"` + + // The user name to be used to login to the endpoint database. + Username *string `type:"string"` +} + +// String returns the string representation +func (s ModifyEndpointInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyEndpointInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ModifyEndpointInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ModifyEndpointInput"} + if s.EndpointArn == nil { + invalidParams.Add(request.NewErrParamRequired("EndpointArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type ModifyEndpointOutput struct { + _ struct{} `type:"structure"` + + // The modified endpoint. + Endpoint *Endpoint `type:"structure"` +} + +// String returns the string representation +func (s ModifyEndpointOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyEndpointOutput) GoString() string { + return s.String() +} + +type ModifyReplicationInstanceInput struct { + _ struct{} `type:"structure"` + + // The amount of storage (in gigabytes) to be allocated for the replication + // instance. + AllocatedStorage *int64 `type:"integer"` + + // Indicates that major version upgrades are allowed. Changing this parameter + // does not result in an outage and the change is asynchronously applied as + // soon as possible. + // + // Constraints: This parameter must be set to true when specifying a value + // for the EngineVersion parameter that is a different major version than the + // replication instance's current version. + AllowMajorVersionUpgrade *bool `type:"boolean"` + + // Indicates whether the changes should be applied immediately or during the + // next maintenance window. + ApplyImmediately *bool `type:"boolean"` + + // Indicates that minor version upgrades will be applied automatically to the + // replication instance during the maintenance window. Changing this parameter + // does not result in an outage except in the following case and the change + // is asynchronously applied as soon as possible. An outage will result if this + // parameter is set to true during the maintenance window, and a newer minor + // version is available, and AWS DMS has enabled auto patching for that engine + // version. + AutoMinorVersionUpgrade *bool `type:"boolean"` + + // The engine version number of the replication instance. + EngineVersion *string `type:"string"` + + // The weekly time range (in UTC) during which system maintenance can occur, + // which might result in an outage. Changing this parameter does not result + // in an outage, except in the following situation, and the change is asynchronously + // applied as soon as possible. If moving this window to the current time, there + // must be at least 30 minutes between the current time and end of the window + // to ensure pending changes are applied. + // + // Default: Uses existing setting + // + // Format: ddd:hh24:mi-ddd:hh24:mi + // + // Valid Days: Mon | Tue | Wed | Thu | Fri | Sat | Sun + // + // Constraints: Must be at least 30 minutes + PreferredMaintenanceWindow *string `type:"string"` + + // The Amazon Resource Name (ARN) of the replication instance. + ReplicationInstanceArn *string `type:"string" required:"true"` + + // The compute and memory capacity of the replication instance. + // + // Valid Values: dms.t2.micro | dms.t2.small | dms.t2.medium | dms.t2.large + // | dms.c4.large | dms.c4.xlarge | dms.c4.2xlarge | dms.c4.4xlarge + ReplicationInstanceClass *string `type:"string"` + + // The replication instance identifier. This parameter is stored as a lowercase + // string. + ReplicationInstanceIdentifier *string `type:"string"` + + // Specifies the VPC security group to be used with the replication instance. + // The VPC security group must work with the VPC containing the replication + // instance. + VpcSecurityGroupIds []*string `locationNameList:"VpcSecurityGroupId" type:"list"` +} + +// String returns the string representation +func (s ModifyReplicationInstanceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyReplicationInstanceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ModifyReplicationInstanceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ModifyReplicationInstanceInput"} + if s.ReplicationInstanceArn == nil { + invalidParams.Add(request.NewErrParamRequired("ReplicationInstanceArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type ModifyReplicationInstanceOutput struct { + _ struct{} `type:"structure"` + + // The modified replication instance. + ReplicationInstance *ReplicationInstance `type:"structure"` +} + +// String returns the string representation +func (s ModifyReplicationInstanceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyReplicationInstanceOutput) GoString() string { + return s.String() +} + +type ModifyReplicationSubnetGroupInput struct { + _ struct{} `type:"structure"` + + // The description of the replication instance subnet group. + ReplicationSubnetGroupDescription *string `type:"string"` + + // The name of the replication instance subnet group. + ReplicationSubnetGroupIdentifier *string `type:"string" required:"true"` + + // A list of subnet IDs. + SubnetIds []*string `locationNameList:"SubnetIdentifier" type:"list" required:"true"` +} + +// String returns the string representation +func (s ModifyReplicationSubnetGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyReplicationSubnetGroupInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ModifyReplicationSubnetGroupInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ModifyReplicationSubnetGroupInput"} + if s.ReplicationSubnetGroupIdentifier == nil { + invalidParams.Add(request.NewErrParamRequired("ReplicationSubnetGroupIdentifier")) + } + if s.SubnetIds == nil { + invalidParams.Add(request.NewErrParamRequired("SubnetIds")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type ModifyReplicationSubnetGroupOutput struct { + _ struct{} `type:"structure"` + + // The modified replication subnet group. + ReplicationSubnetGroup *ReplicationSubnetGroup `type:"structure"` +} + +// String returns the string representation +func (s ModifyReplicationSubnetGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyReplicationSubnetGroupOutput) GoString() string { + return s.String() +} + +type OrderableReplicationInstance struct { + _ struct{} `type:"structure"` + + // The default amount of storage (in gigabytes) that is allocated for the replication + // instance. + DefaultAllocatedStorage *int64 `type:"integer"` + + // The version of the replication engine. + EngineVersion *string `type:"string"` + + // The amount of storage (in gigabytes) that is allocated for the replication + // instance. + IncludedAllocatedStorage *int64 `type:"integer"` + + // The minimum amount of storage (in gigabytes) that can be allocated for the + // replication instance. + MaxAllocatedStorage *int64 `type:"integer"` + + // The minimum amount of storage (in gigabytes) that can be allocated for the + // replication instance. + MinAllocatedStorage *int64 `type:"integer"` + + // The compute and memory capacity of the replication instance. + // + // Valid Values: dms.t2.micro | dms.t2.small | dms.t2.medium | dms.t2.large + // | dms.c4.large | dms.c4.xlarge | dms.c4.2xlarge | dms.c4.4xlarge + ReplicationInstanceClass *string `type:"string"` + + // The type of storage used by the replication instance. + StorageType *string `type:"string"` +} + +// String returns the string representation +func (s OrderableReplicationInstance) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s OrderableReplicationInstance) GoString() string { + return s.String() +} + +type RefreshSchemasInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) string that uniquely identifies the endpoint. + EndpointArn *string `type:"string" required:"true"` + + // The Amazon Resource Name (ARN) of the replication instance. + ReplicationInstanceArn *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s RefreshSchemasInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RefreshSchemasInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RefreshSchemasInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RefreshSchemasInput"} + if s.EndpointArn == nil { + invalidParams.Add(request.NewErrParamRequired("EndpointArn")) + } + if s.ReplicationInstanceArn == nil { + invalidParams.Add(request.NewErrParamRequired("ReplicationInstanceArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type RefreshSchemasOutput struct { + _ struct{} `type:"structure"` + + // The status of the refreshed schema. + RefreshSchemasStatus *RefreshSchemasStatus `type:"structure"` +} + +// String returns the string representation +func (s RefreshSchemasOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RefreshSchemasOutput) GoString() string { + return s.String() +} + +type RefreshSchemasStatus struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) string that uniquely identifies the endpoint. + EndpointArn *string `type:"string"` + + // The last failure message for the schema. + LastFailureMessage *string `type:"string"` + + // The date the schema was last refreshed. + LastRefreshDate *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The Amazon Resource Name (ARN) of the replication instance. + ReplicationInstanceArn *string `type:"string"` + + // The status of the schema. + Status *string `type:"string" enum:"RefreshSchemasStatusTypeValue"` +} + +// String returns the string representation +func (s RefreshSchemasStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RefreshSchemasStatus) GoString() string { + return s.String() +} + +type RemoveTagsFromResourceInput struct { + _ struct{} `type:"structure"` + + // >The Amazon Resource Name (ARN) of the AWS DMS resource the tag is to be + // removed from. + ResourceArn *string `type:"string" required:"true"` + + // The tag key (name) of the tag to be removed. + TagKeys []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s RemoveTagsFromResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RemoveTagsFromResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RemoveTagsFromResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RemoveTagsFromResourceInput"} + if s.ResourceArn == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceArn")) + } + if s.TagKeys == nil { + invalidParams.Add(request.NewErrParamRequired("TagKeys")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type RemoveTagsFromResourceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s RemoveTagsFromResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RemoveTagsFromResourceOutput) GoString() string { + return s.String() +} + +type ReplicationInstance struct { + _ struct{} `type:"structure"` + + // The amount of storage (in gigabytes) that is allocated for the replication + // instance. + AllocatedStorage *int64 `type:"integer"` + + // Boolean value indicating if minor version upgrades will be automatically + // applied to the instance. + AutoMinorVersionUpgrade *bool `type:"boolean"` + + // The Availability Zone for the instance. + AvailabilityZone *string `type:"string"` + + // The engine version number of the replication instance. + EngineVersion *string `type:"string"` + + // The time the replication instance was created. + InstanceCreateTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The KMS key identifier that is used to encrypt the content on the replication + // instance. If you do not specify a value for the KmsKeyId parameter, then + // AWS DMS will use your default encryption key. AWS KMS creates the default + // encryption key for your AWS account. Your AWS account has a different default + // encryption key for each AWS region. + KmsKeyId *string `type:"string"` + + // The pending modification values. + PendingModifiedValues *ReplicationPendingModifiedValues `type:"structure"` + + // The maintenance window times for the replication instance. + PreferredMaintenanceWindow *string `type:"string"` + + // Specifies the accessibility options for the replication instance. A value + // of true represents an instance with a public IP address. A value of false + // represents an instance with a private IP address. The default value is true. + PubliclyAccessible *bool `type:"boolean"` + + // The Amazon Resource Name (ARN) of the replication instance. + ReplicationInstanceArn *string `type:"string"` + + // The compute and memory capacity of the replication instance. + // + // Valid Values: dms.t2.micro | dms.t2.small | dms.t2.medium | dms.t2.large + // | dms.c4.large | dms.c4.xlarge | dms.c4.2xlarge | dms.c4.4xlarge + ReplicationInstanceClass *string `type:"string"` + + // The replication instance identifier. This parameter is stored as a lowercase + // string. + // + // Constraints: + // + // Must contain from 1 to 63 alphanumeric characters or hyphens. + // + // First character must be a letter. + // + // Cannot end with a hyphen or contain two consecutive hyphens. + // + // Example: myrepinstance + ReplicationInstanceIdentifier *string `type:"string"` + + // The private IP address of the replication instance. + ReplicationInstancePrivateIpAddress *string `type:"string"` + + // The public IP address of the replication instance. + ReplicationInstancePublicIpAddress *string `type:"string"` + + // The status of the replication instance. + ReplicationInstanceStatus *string `type:"string"` + + // The subnet group for the replication instance. + ReplicationSubnetGroup *ReplicationSubnetGroup `type:"structure"` + + // The VPC security group for the instance. + VpcSecurityGroups []*VpcSecurityGroupMembership `locationNameList:"VpcSecurityGroupMembership" type:"list"` +} + +// String returns the string representation +func (s ReplicationInstance) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReplicationInstance) GoString() string { + return s.String() +} + +type ReplicationPendingModifiedValues struct { + _ struct{} `type:"structure"` + + // The amount of storage (in gigabytes) that is allocated for the replication + // instance. + AllocatedStorage *int64 `type:"integer"` + + // The engine version number of the replication instance. + EngineVersion *string `type:"string"` + + // The compute and memory capacity of the replication instance. + // + // Valid Values: dms.t2.micro | dms.t2.small | dms.t2.medium | dms.t2.large + // | dms.c4.large | dms.c4.xlarge | dms.c4.2xlarge | dms.c4.4xlarge + ReplicationInstanceClass *string `type:"string"` +} + +// String returns the string representation +func (s ReplicationPendingModifiedValues) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReplicationPendingModifiedValues) GoString() string { + return s.String() +} + +type ReplicationSubnetGroup struct { + _ struct{} `type:"structure"` + + // The description of the replication subnet group. + ReplicationSubnetGroupDescription *string `type:"string"` + + // The identifier of the replication instance subnet group. + ReplicationSubnetGroupIdentifier *string `type:"string"` + + // The status of the subnet group. + SubnetGroupStatus *string `type:"string"` + + // The subnets that are in the subnet group. + Subnets []*Subnet `locationNameList:"Subnet" type:"list"` + + // The ID of the VPC. + VpcId *string `type:"string"` +} + +// String returns the string representation +func (s ReplicationSubnetGroup) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReplicationSubnetGroup) GoString() string { + return s.String() +} + +type ReplicationTask struct { + _ struct{} `type:"structure"` + + // The last error (failure) message generated for the replication instance. + LastFailureMessage *string `type:"string"` + + // The type of migration. + MigrationType *string `type:"string" enum:"MigrationTypeValue"` + + // The Amazon Resource Name (ARN) of the replication instance. + ReplicationInstanceArn *string `type:"string"` + + // The Amazon Resource Name (ARN) of the replication task. + ReplicationTaskArn *string `type:"string"` + + // The date the replication task was created. + ReplicationTaskCreationDate *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The replication task identifier. + // + // Constraints: + // + // Must contain from 1 to 63 alphanumeric characters or hyphens. + // + // First character must be a letter. + // + // Cannot end with a hyphen or contain two consecutive hyphens. + ReplicationTaskIdentifier *string `type:"string"` + + // The settings for the replication task. + ReplicationTaskSettings *string `type:"string"` + + // The date the replication task is scheduled to start. + ReplicationTaskStartDate *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The statistics for the task, including elapsed time, tables loaded, and table + // errors. + ReplicationTaskStats *ReplicationTaskStats `type:"structure"` + + // The Amazon Resource Name (ARN) string that uniquely identifies the endpoint. + SourceEndpointArn *string `type:"string"` + + // The status of the replication task. + Status *string `type:"string"` + + // Table mappings specified in the task. + TableMappings *string `type:"string"` + + // The Amazon Resource Name (ARN) string that uniquely identifies the endpoint. + TargetEndpointArn *string `type:"string"` +} + +// String returns the string representation +func (s ReplicationTask) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReplicationTask) GoString() string { + return s.String() +} + +type ReplicationTaskStats struct { + _ struct{} `type:"structure"` + + // The elapsed time of the task, in milliseconds. + ElapsedTimeMillis *int64 `type:"long"` + + // The percent complete for the full load migration task. + FullLoadProgressPercent *int64 `type:"integer"` + + // The number of errors that have occurred during this task. + TablesErrored *int64 `type:"integer"` + + // The number of tables loaded for this task. + TablesLoaded *int64 `type:"integer"` + + // The number of tables currently loading for this task. + TablesLoading *int64 `type:"integer"` + + // The number of tables queued for this task. + TablesQueued *int64 `type:"integer"` +} + +// String returns the string representation +func (s ReplicationTaskStats) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReplicationTaskStats) GoString() string { + return s.String() +} + +type StartReplicationTaskInput struct { + _ struct{} `type:"structure"` + + // The start time for the Change Data Capture (CDC) operation. + CdcStartTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The Amazon Resource Number (ARN) of the replication task to be started. + ReplicationTaskArn *string `type:"string" required:"true"` + + // The type of replication task. + StartReplicationTaskType *string `type:"string" required:"true" enum:"StartReplicationTaskTypeValue"` +} + +// String returns the string representation +func (s StartReplicationTaskInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StartReplicationTaskInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *StartReplicationTaskInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StartReplicationTaskInput"} + if s.ReplicationTaskArn == nil { + invalidParams.Add(request.NewErrParamRequired("ReplicationTaskArn")) + } + if s.StartReplicationTaskType == nil { + invalidParams.Add(request.NewErrParamRequired("StartReplicationTaskType")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type StartReplicationTaskOutput struct { + _ struct{} `type:"structure"` + + // The replication task started. + ReplicationTask *ReplicationTask `type:"structure"` +} + +// String returns the string representation +func (s StartReplicationTaskOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StartReplicationTaskOutput) GoString() string { + return s.String() +} + +type StopReplicationTaskInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Number(ARN) of the replication task to be stopped. + ReplicationTaskArn *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s StopReplicationTaskInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StopReplicationTaskInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *StopReplicationTaskInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StopReplicationTaskInput"} + if s.ReplicationTaskArn == nil { + invalidParams.Add(request.NewErrParamRequired("ReplicationTaskArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type StopReplicationTaskOutput struct { + _ struct{} `type:"structure"` + + // The replication task stopped. + ReplicationTask *ReplicationTask `type:"structure"` +} + +// String returns the string representation +func (s StopReplicationTaskOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StopReplicationTaskOutput) GoString() string { + return s.String() +} + +type Subnet struct { + _ struct{} `type:"structure"` + + // The Availability Zone of the subnet. + SubnetAvailabilityZone *AvailabilityZone `type:"structure"` + + // The subnet identifier. + SubnetIdentifier *string `type:"string"` + + // The status of the subnet. + SubnetStatus *string `type:"string"` +} + +// String returns the string representation +func (s Subnet) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Subnet) GoString() string { + return s.String() +} + +type SupportedEndpointType struct { + _ struct{} `type:"structure"` + + // The type of endpoint. + EndpointType *string `type:"string" enum:"ReplicationEndpointTypeValue"` + + // The database engine name. + EngineName *string `type:"string"` + + // Indicates if Change Data Capture (CDC) is supported. + SupportsCDC *bool `type:"boolean"` +} + +// String returns the string representation +func (s SupportedEndpointType) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SupportedEndpointType) GoString() string { + return s.String() +} + +type TableStatistics struct { + _ struct{} `type:"structure"` + + // The Data Definition Language (DDL) used to build and modify the structure + // of your tables. + Ddls *int64 `type:"long"` + + // The number of delete actions performed on a table. + Deletes *int64 `type:"long"` + + // The number of rows added during the Full Load operation. + FullLoadRows *int64 `type:"long"` + + // The number of insert actions performed on a table. + Inserts *int64 `type:"long"` + + // The last time the table was updated. + LastUpdateTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The schema name. + SchemaName *string `type:"string"` + + // The name of the table. + TableName *string `type:"string"` + + // The state of the table. + TableState *string `type:"string"` + + // The number of update actions performed on a table. + Updates *int64 `type:"long"` +} + +// String returns the string representation +func (s TableStatistics) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TableStatistics) GoString() string { + return s.String() +} + +type Tag struct { + _ struct{} `type:"structure"` + + // A key is the required name of the tag. The string value can be from 1 to + // 128 Unicode characters in length and cannot be prefixed with "aws:" or "dms:". + // The string can only contain only the set of Unicode letters, digits, white-space, + // '_', '.', '/', '=', '+', '-' (Java regex: "^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-]*)$"). + Key *string `type:"string"` + + // A value is the optional value of the tag. The string value can be from 1 + // to 256 Unicode characters in length and cannot be prefixed with "aws:" or + // "dms:". The string can only contain only the set of Unicode letters, digits, + // white-space, '_', '.', '/', '=', '+', '-' (Java regex: "^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-]*)$"). + Value *string `type:"string"` +} + +// String returns the string representation +func (s Tag) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Tag) GoString() string { + return s.String() +} + +type TestConnectionInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) string that uniquely identifies the endpoint. + EndpointArn *string `type:"string" required:"true"` + + // The Amazon Resource Number (ARN) of the replication instance. + ReplicationInstanceArn *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s TestConnectionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TestConnectionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TestConnectionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TestConnectionInput"} + if s.EndpointArn == nil { + invalidParams.Add(request.NewErrParamRequired("EndpointArn")) + } + if s.ReplicationInstanceArn == nil { + invalidParams.Add(request.NewErrParamRequired("ReplicationInstanceArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type TestConnectionOutput struct { + _ struct{} `type:"structure"` + + // The connection tested. + Connection *Connection `type:"structure"` +} + +// String returns the string representation +func (s TestConnectionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TestConnectionOutput) GoString() string { + return s.String() +} + +type VpcSecurityGroupMembership struct { + _ struct{} `type:"structure"` + + // The status of the VPC security group. + Status *string `type:"string"` + + // The VPC security group Id. + VpcSecurityGroupId *string `type:"string"` +} + +// String returns the string representation +func (s VpcSecurityGroupMembership) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VpcSecurityGroupMembership) GoString() string { + return s.String() +} + +const ( + // @enum MigrationTypeValue + MigrationTypeValueFullLoad = "full-load" + // @enum MigrationTypeValue + MigrationTypeValueCdc = "cdc" + // @enum MigrationTypeValue + MigrationTypeValueFullLoadAndCdc = "full-load-and-cdc" +) + +const ( + // @enum RefreshSchemasStatusTypeValue + RefreshSchemasStatusTypeValueSuccessful = "successful" + // @enum RefreshSchemasStatusTypeValue + RefreshSchemasStatusTypeValueFailed = "failed" + // @enum RefreshSchemasStatusTypeValue + RefreshSchemasStatusTypeValueRefreshing = "refreshing" +) + +const ( + // @enum ReplicationEndpointTypeValue + ReplicationEndpointTypeValueSource = "source" + // @enum ReplicationEndpointTypeValue + ReplicationEndpointTypeValueTarget = "target" +) + +const ( + // @enum StartReplicationTaskTypeValue + StartReplicationTaskTypeValueStartReplication = "start-replication" + // @enum StartReplicationTaskTypeValue + StartReplicationTaskTypeValueResumeProcessing = "resume-processing" + // @enum StartReplicationTaskTypeValue + StartReplicationTaskTypeValueReloadTarget = "reload-target" +) diff --git a/vendor/github.com/aws/aws-sdk-go/service/databasemigrationservice/databasemigrationserviceiface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/databasemigrationservice/databasemigrationserviceiface/interface.go new file mode 100644 index 000000000..8c335be72 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/databasemigrationservice/databasemigrationserviceiface/interface.go @@ -0,0 +1,130 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package databasemigrationserviceiface provides an interface for the AWS Database Migration Service. +package databasemigrationserviceiface + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/databasemigrationservice" +) + +// DatabaseMigrationServiceAPI is the interface type for databasemigrationservice.DatabaseMigrationService. +type DatabaseMigrationServiceAPI interface { + AddTagsToResourceRequest(*databasemigrationservice.AddTagsToResourceInput) (*request.Request, *databasemigrationservice.AddTagsToResourceOutput) + + AddTagsToResource(*databasemigrationservice.AddTagsToResourceInput) (*databasemigrationservice.AddTagsToResourceOutput, error) + + CreateEndpointRequest(*databasemigrationservice.CreateEndpointInput) (*request.Request, *databasemigrationservice.CreateEndpointOutput) + + CreateEndpoint(*databasemigrationservice.CreateEndpointInput) (*databasemigrationservice.CreateEndpointOutput, error) + + CreateReplicationInstanceRequest(*databasemigrationservice.CreateReplicationInstanceInput) (*request.Request, *databasemigrationservice.CreateReplicationInstanceOutput) + + CreateReplicationInstance(*databasemigrationservice.CreateReplicationInstanceInput) (*databasemigrationservice.CreateReplicationInstanceOutput, error) + + CreateReplicationSubnetGroupRequest(*databasemigrationservice.CreateReplicationSubnetGroupInput) (*request.Request, *databasemigrationservice.CreateReplicationSubnetGroupOutput) + + CreateReplicationSubnetGroup(*databasemigrationservice.CreateReplicationSubnetGroupInput) (*databasemigrationservice.CreateReplicationSubnetGroupOutput, error) + + CreateReplicationTaskRequest(*databasemigrationservice.CreateReplicationTaskInput) (*request.Request, *databasemigrationservice.CreateReplicationTaskOutput) + + CreateReplicationTask(*databasemigrationservice.CreateReplicationTaskInput) (*databasemigrationservice.CreateReplicationTaskOutput, error) + + DeleteEndpointRequest(*databasemigrationservice.DeleteEndpointInput) (*request.Request, *databasemigrationservice.DeleteEndpointOutput) + + DeleteEndpoint(*databasemigrationservice.DeleteEndpointInput) (*databasemigrationservice.DeleteEndpointOutput, error) + + DeleteReplicationInstanceRequest(*databasemigrationservice.DeleteReplicationInstanceInput) (*request.Request, *databasemigrationservice.DeleteReplicationInstanceOutput) + + DeleteReplicationInstance(*databasemigrationservice.DeleteReplicationInstanceInput) (*databasemigrationservice.DeleteReplicationInstanceOutput, error) + + DeleteReplicationSubnetGroupRequest(*databasemigrationservice.DeleteReplicationSubnetGroupInput) (*request.Request, *databasemigrationservice.DeleteReplicationSubnetGroupOutput) + + DeleteReplicationSubnetGroup(*databasemigrationservice.DeleteReplicationSubnetGroupInput) (*databasemigrationservice.DeleteReplicationSubnetGroupOutput, error) + + DeleteReplicationTaskRequest(*databasemigrationservice.DeleteReplicationTaskInput) (*request.Request, *databasemigrationservice.DeleteReplicationTaskOutput) + + DeleteReplicationTask(*databasemigrationservice.DeleteReplicationTaskInput) (*databasemigrationservice.DeleteReplicationTaskOutput, error) + + DescribeAccountAttributesRequest(*databasemigrationservice.DescribeAccountAttributesInput) (*request.Request, *databasemigrationservice.DescribeAccountAttributesOutput) + + DescribeAccountAttributes(*databasemigrationservice.DescribeAccountAttributesInput) (*databasemigrationservice.DescribeAccountAttributesOutput, error) + + DescribeConnectionsRequest(*databasemigrationservice.DescribeConnectionsInput) (*request.Request, *databasemigrationservice.DescribeConnectionsOutput) + + DescribeConnections(*databasemigrationservice.DescribeConnectionsInput) (*databasemigrationservice.DescribeConnectionsOutput, error) + + DescribeEndpointTypesRequest(*databasemigrationservice.DescribeEndpointTypesInput) (*request.Request, *databasemigrationservice.DescribeEndpointTypesOutput) + + DescribeEndpointTypes(*databasemigrationservice.DescribeEndpointTypesInput) (*databasemigrationservice.DescribeEndpointTypesOutput, error) + + DescribeEndpointsRequest(*databasemigrationservice.DescribeEndpointsInput) (*request.Request, *databasemigrationservice.DescribeEndpointsOutput) + + DescribeEndpoints(*databasemigrationservice.DescribeEndpointsInput) (*databasemigrationservice.DescribeEndpointsOutput, error) + + DescribeOrderableReplicationInstancesRequest(*databasemigrationservice.DescribeOrderableReplicationInstancesInput) (*request.Request, *databasemigrationservice.DescribeOrderableReplicationInstancesOutput) + + DescribeOrderableReplicationInstances(*databasemigrationservice.DescribeOrderableReplicationInstancesInput) (*databasemigrationservice.DescribeOrderableReplicationInstancesOutput, error) + + DescribeRefreshSchemasStatusRequest(*databasemigrationservice.DescribeRefreshSchemasStatusInput) (*request.Request, *databasemigrationservice.DescribeRefreshSchemasStatusOutput) + + DescribeRefreshSchemasStatus(*databasemigrationservice.DescribeRefreshSchemasStatusInput) (*databasemigrationservice.DescribeRefreshSchemasStatusOutput, error) + + DescribeReplicationInstancesRequest(*databasemigrationservice.DescribeReplicationInstancesInput) (*request.Request, *databasemigrationservice.DescribeReplicationInstancesOutput) + + DescribeReplicationInstances(*databasemigrationservice.DescribeReplicationInstancesInput) (*databasemigrationservice.DescribeReplicationInstancesOutput, error) + + DescribeReplicationSubnetGroupsRequest(*databasemigrationservice.DescribeReplicationSubnetGroupsInput) (*request.Request, *databasemigrationservice.DescribeReplicationSubnetGroupsOutput) + + DescribeReplicationSubnetGroups(*databasemigrationservice.DescribeReplicationSubnetGroupsInput) (*databasemigrationservice.DescribeReplicationSubnetGroupsOutput, error) + + DescribeReplicationTasksRequest(*databasemigrationservice.DescribeReplicationTasksInput) (*request.Request, *databasemigrationservice.DescribeReplicationTasksOutput) + + DescribeReplicationTasks(*databasemigrationservice.DescribeReplicationTasksInput) (*databasemigrationservice.DescribeReplicationTasksOutput, error) + + DescribeSchemasRequest(*databasemigrationservice.DescribeSchemasInput) (*request.Request, *databasemigrationservice.DescribeSchemasOutput) + + DescribeSchemas(*databasemigrationservice.DescribeSchemasInput) (*databasemigrationservice.DescribeSchemasOutput, error) + + DescribeTableStatisticsRequest(*databasemigrationservice.DescribeTableStatisticsInput) (*request.Request, *databasemigrationservice.DescribeTableStatisticsOutput) + + DescribeTableStatistics(*databasemigrationservice.DescribeTableStatisticsInput) (*databasemigrationservice.DescribeTableStatisticsOutput, error) + + ListTagsForResourceRequest(*databasemigrationservice.ListTagsForResourceInput) (*request.Request, *databasemigrationservice.ListTagsForResourceOutput) + + ListTagsForResource(*databasemigrationservice.ListTagsForResourceInput) (*databasemigrationservice.ListTagsForResourceOutput, error) + + ModifyEndpointRequest(*databasemigrationservice.ModifyEndpointInput) (*request.Request, *databasemigrationservice.ModifyEndpointOutput) + + ModifyEndpoint(*databasemigrationservice.ModifyEndpointInput) (*databasemigrationservice.ModifyEndpointOutput, error) + + ModifyReplicationInstanceRequest(*databasemigrationservice.ModifyReplicationInstanceInput) (*request.Request, *databasemigrationservice.ModifyReplicationInstanceOutput) + + ModifyReplicationInstance(*databasemigrationservice.ModifyReplicationInstanceInput) (*databasemigrationservice.ModifyReplicationInstanceOutput, error) + + ModifyReplicationSubnetGroupRequest(*databasemigrationservice.ModifyReplicationSubnetGroupInput) (*request.Request, *databasemigrationservice.ModifyReplicationSubnetGroupOutput) + + ModifyReplicationSubnetGroup(*databasemigrationservice.ModifyReplicationSubnetGroupInput) (*databasemigrationservice.ModifyReplicationSubnetGroupOutput, error) + + RefreshSchemasRequest(*databasemigrationservice.RefreshSchemasInput) (*request.Request, *databasemigrationservice.RefreshSchemasOutput) + + RefreshSchemas(*databasemigrationservice.RefreshSchemasInput) (*databasemigrationservice.RefreshSchemasOutput, error) + + RemoveTagsFromResourceRequest(*databasemigrationservice.RemoveTagsFromResourceInput) (*request.Request, *databasemigrationservice.RemoveTagsFromResourceOutput) + + RemoveTagsFromResource(*databasemigrationservice.RemoveTagsFromResourceInput) (*databasemigrationservice.RemoveTagsFromResourceOutput, error) + + StartReplicationTaskRequest(*databasemigrationservice.StartReplicationTaskInput) (*request.Request, *databasemigrationservice.StartReplicationTaskOutput) + + StartReplicationTask(*databasemigrationservice.StartReplicationTaskInput) (*databasemigrationservice.StartReplicationTaskOutput, error) + + StopReplicationTaskRequest(*databasemigrationservice.StopReplicationTaskInput) (*request.Request, *databasemigrationservice.StopReplicationTaskOutput) + + StopReplicationTask(*databasemigrationservice.StopReplicationTaskInput) (*databasemigrationservice.StopReplicationTaskOutput, error) + + TestConnectionRequest(*databasemigrationservice.TestConnectionInput) (*request.Request, *databasemigrationservice.TestConnectionOutput) + + TestConnection(*databasemigrationservice.TestConnectionInput) (*databasemigrationservice.TestConnectionOutput, error) +} + +var _ DatabaseMigrationServiceAPI = (*databasemigrationservice.DatabaseMigrationService)(nil) diff --git a/vendor/github.com/aws/aws-sdk-go/service/databasemigrationservice/examples_test.go b/vendor/github.com/aws/aws-sdk-go/service/databasemigrationservice/examples_test.go new file mode 100644 index 000000000..bca5c0ace --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/databasemigrationservice/examples_test.go @@ -0,0 +1,739 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package databasemigrationservice_test + +import ( + "bytes" + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/databasemigrationservice" +) + +var _ time.Duration +var _ bytes.Buffer + +func ExampleDatabaseMigrationService_AddTagsToResource() { + svc := databasemigrationservice.New(session.New()) + + params := &databasemigrationservice.AddTagsToResourceInput{ + ResourceArn: aws.String("String"), // Required + Tags: []*databasemigrationservice.Tag{ // Required + { // Required + Key: aws.String("String"), + Value: aws.String("String"), + }, + // More values... + }, + } + resp, err := svc.AddTagsToResource(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDatabaseMigrationService_CreateEndpoint() { + svc := databasemigrationservice.New(session.New()) + + params := &databasemigrationservice.CreateEndpointInput{ + EndpointIdentifier: aws.String("String"), // Required + EndpointType: aws.String("ReplicationEndpointTypeValue"), // Required + EngineName: aws.String("String"), // Required + Password: aws.String("SecretString"), // Required + Port: aws.Int64(1), // Required + ServerName: aws.String("String"), // Required + Username: aws.String("String"), // Required + DatabaseName: aws.String("String"), + ExtraConnectionAttributes: aws.String("String"), + KmsKeyId: aws.String("String"), + Tags: []*databasemigrationservice.Tag{ + { // Required + Key: aws.String("String"), + Value: aws.String("String"), + }, + // More values... + }, + } + resp, err := svc.CreateEndpoint(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDatabaseMigrationService_CreateReplicationInstance() { + svc := databasemigrationservice.New(session.New()) + + params := &databasemigrationservice.CreateReplicationInstanceInput{ + ReplicationInstanceClass: aws.String("String"), // Required + ReplicationInstanceIdentifier: aws.String("String"), // Required + AllocatedStorage: aws.Int64(1), + AutoMinorVersionUpgrade: aws.Bool(true), + AvailabilityZone: aws.String("String"), + EngineVersion: aws.String("String"), + KmsKeyId: aws.String("String"), + PreferredMaintenanceWindow: aws.String("String"), + PubliclyAccessible: aws.Bool(true), + ReplicationSubnetGroupIdentifier: aws.String("String"), + Tags: []*databasemigrationservice.Tag{ + { // Required + Key: aws.String("String"), + Value: aws.String("String"), + }, + // More values... + }, + VpcSecurityGroupIds: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.CreateReplicationInstance(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDatabaseMigrationService_CreateReplicationSubnetGroup() { + svc := databasemigrationservice.New(session.New()) + + params := &databasemigrationservice.CreateReplicationSubnetGroupInput{ + ReplicationSubnetGroupDescription: aws.String("String"), // Required + ReplicationSubnetGroupIdentifier: aws.String("String"), // Required + SubnetIds: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + Tags: []*databasemigrationservice.Tag{ + { // Required + Key: aws.String("String"), + Value: aws.String("String"), + }, + // More values... + }, + } + resp, err := svc.CreateReplicationSubnetGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDatabaseMigrationService_CreateReplicationTask() { + svc := databasemigrationservice.New(session.New()) + + params := &databasemigrationservice.CreateReplicationTaskInput{ + MigrationType: aws.String("MigrationTypeValue"), // Required + ReplicationInstanceArn: aws.String("String"), // Required + ReplicationTaskIdentifier: aws.String("String"), // Required + SourceEndpointArn: aws.String("String"), // Required + TableMappings: aws.String("String"), // Required + TargetEndpointArn: aws.String("String"), // Required + CdcStartTime: aws.Time(time.Now()), + ReplicationTaskSettings: aws.String("String"), + Tags: []*databasemigrationservice.Tag{ + { // Required + Key: aws.String("String"), + Value: aws.String("String"), + }, + // More values... + }, + } + resp, err := svc.CreateReplicationTask(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDatabaseMigrationService_DeleteEndpoint() { + svc := databasemigrationservice.New(session.New()) + + params := &databasemigrationservice.DeleteEndpointInput{ + EndpointArn: aws.String("String"), // Required + } + resp, err := svc.DeleteEndpoint(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDatabaseMigrationService_DeleteReplicationInstance() { + svc := databasemigrationservice.New(session.New()) + + params := &databasemigrationservice.DeleteReplicationInstanceInput{ + ReplicationInstanceArn: aws.String("String"), // Required + } + resp, err := svc.DeleteReplicationInstance(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDatabaseMigrationService_DeleteReplicationSubnetGroup() { + svc := databasemigrationservice.New(session.New()) + + params := &databasemigrationservice.DeleteReplicationSubnetGroupInput{ + ReplicationSubnetGroupIdentifier: aws.String("String"), // Required + } + resp, err := svc.DeleteReplicationSubnetGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDatabaseMigrationService_DeleteReplicationTask() { + svc := databasemigrationservice.New(session.New()) + + params := &databasemigrationservice.DeleteReplicationTaskInput{ + ReplicationTaskArn: aws.String("String"), // Required + } + resp, err := svc.DeleteReplicationTask(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDatabaseMigrationService_DescribeAccountAttributes() { + svc := databasemigrationservice.New(session.New()) + + var params *databasemigrationservice.DescribeAccountAttributesInput + resp, err := svc.DescribeAccountAttributes(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDatabaseMigrationService_DescribeConnections() { + svc := databasemigrationservice.New(session.New()) + + params := &databasemigrationservice.DescribeConnectionsInput{ + Filters: []*databasemigrationservice.Filter{ + { // Required + Name: aws.String("String"), // Required + Values: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + Marker: aws.String("String"), + MaxRecords: aws.Int64(1), + } + resp, err := svc.DescribeConnections(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDatabaseMigrationService_DescribeEndpointTypes() { + svc := databasemigrationservice.New(session.New()) + + params := &databasemigrationservice.DescribeEndpointTypesInput{ + Filters: []*databasemigrationservice.Filter{ + { // Required + Name: aws.String("String"), // Required + Values: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + Marker: aws.String("String"), + MaxRecords: aws.Int64(1), + } + resp, err := svc.DescribeEndpointTypes(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDatabaseMigrationService_DescribeEndpoints() { + svc := databasemigrationservice.New(session.New()) + + params := &databasemigrationservice.DescribeEndpointsInput{ + Filters: []*databasemigrationservice.Filter{ + { // Required + Name: aws.String("String"), // Required + Values: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + Marker: aws.String("String"), + MaxRecords: aws.Int64(1), + } + resp, err := svc.DescribeEndpoints(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDatabaseMigrationService_DescribeOrderableReplicationInstances() { + svc := databasemigrationservice.New(session.New()) + + params := &databasemigrationservice.DescribeOrderableReplicationInstancesInput{ + Marker: aws.String("String"), + MaxRecords: aws.Int64(1), + } + resp, err := svc.DescribeOrderableReplicationInstances(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDatabaseMigrationService_DescribeRefreshSchemasStatus() { + svc := databasemigrationservice.New(session.New()) + + params := &databasemigrationservice.DescribeRefreshSchemasStatusInput{ + EndpointArn: aws.String("String"), // Required + } + resp, err := svc.DescribeRefreshSchemasStatus(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDatabaseMigrationService_DescribeReplicationInstances() { + svc := databasemigrationservice.New(session.New()) + + params := &databasemigrationservice.DescribeReplicationInstancesInput{ + Filters: []*databasemigrationservice.Filter{ + { // Required + Name: aws.String("String"), // Required + Values: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + Marker: aws.String("String"), + MaxRecords: aws.Int64(1), + } + resp, err := svc.DescribeReplicationInstances(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDatabaseMigrationService_DescribeReplicationSubnetGroups() { + svc := databasemigrationservice.New(session.New()) + + params := &databasemigrationservice.DescribeReplicationSubnetGroupsInput{ + Filters: []*databasemigrationservice.Filter{ + { // Required + Name: aws.String("String"), // Required + Values: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + Marker: aws.String("String"), + MaxRecords: aws.Int64(1), + } + resp, err := svc.DescribeReplicationSubnetGroups(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDatabaseMigrationService_DescribeReplicationTasks() { + svc := databasemigrationservice.New(session.New()) + + params := &databasemigrationservice.DescribeReplicationTasksInput{ + Filters: []*databasemigrationservice.Filter{ + { // Required + Name: aws.String("String"), // Required + Values: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + Marker: aws.String("String"), + MaxRecords: aws.Int64(1), + } + resp, err := svc.DescribeReplicationTasks(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDatabaseMigrationService_DescribeSchemas() { + svc := databasemigrationservice.New(session.New()) + + params := &databasemigrationservice.DescribeSchemasInput{ + EndpointArn: aws.String("String"), // Required + Marker: aws.String("String"), + MaxRecords: aws.Int64(1), + } + resp, err := svc.DescribeSchemas(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDatabaseMigrationService_DescribeTableStatistics() { + svc := databasemigrationservice.New(session.New()) + + params := &databasemigrationservice.DescribeTableStatisticsInput{ + ReplicationTaskArn: aws.String("String"), // Required + Marker: aws.String("String"), + MaxRecords: aws.Int64(1), + } + resp, err := svc.DescribeTableStatistics(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDatabaseMigrationService_ListTagsForResource() { + svc := databasemigrationservice.New(session.New()) + + params := &databasemigrationservice.ListTagsForResourceInput{ + ResourceArn: aws.String("String"), // Required + } + resp, err := svc.ListTagsForResource(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDatabaseMigrationService_ModifyEndpoint() { + svc := databasemigrationservice.New(session.New()) + + params := &databasemigrationservice.ModifyEndpointInput{ + EndpointArn: aws.String("String"), // Required + DatabaseName: aws.String("String"), + EndpointIdentifier: aws.String("String"), + EndpointType: aws.String("ReplicationEndpointTypeValue"), + EngineName: aws.String("String"), + ExtraConnectionAttributes: aws.String("String"), + Password: aws.String("SecretString"), + Port: aws.Int64(1), + ServerName: aws.String("String"), + Username: aws.String("String"), + } + resp, err := svc.ModifyEndpoint(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDatabaseMigrationService_ModifyReplicationInstance() { + svc := databasemigrationservice.New(session.New()) + + params := &databasemigrationservice.ModifyReplicationInstanceInput{ + ReplicationInstanceArn: aws.String("String"), // Required + AllocatedStorage: aws.Int64(1), + AllowMajorVersionUpgrade: aws.Bool(true), + ApplyImmediately: aws.Bool(true), + AutoMinorVersionUpgrade: aws.Bool(true), + EngineVersion: aws.String("String"), + PreferredMaintenanceWindow: aws.String("String"), + ReplicationInstanceClass: aws.String("String"), + ReplicationInstanceIdentifier: aws.String("String"), + VpcSecurityGroupIds: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.ModifyReplicationInstance(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDatabaseMigrationService_ModifyReplicationSubnetGroup() { + svc := databasemigrationservice.New(session.New()) + + params := &databasemigrationservice.ModifyReplicationSubnetGroupInput{ + ReplicationSubnetGroupIdentifier: aws.String("String"), // Required + SubnetIds: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + ReplicationSubnetGroupDescription: aws.String("String"), + } + resp, err := svc.ModifyReplicationSubnetGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDatabaseMigrationService_RefreshSchemas() { + svc := databasemigrationservice.New(session.New()) + + params := &databasemigrationservice.RefreshSchemasInput{ + EndpointArn: aws.String("String"), // Required + ReplicationInstanceArn: aws.String("String"), // Required + } + resp, err := svc.RefreshSchemas(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDatabaseMigrationService_RemoveTagsFromResource() { + svc := databasemigrationservice.New(session.New()) + + params := &databasemigrationservice.RemoveTagsFromResourceInput{ + ResourceArn: aws.String("String"), // Required + TagKeys: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.RemoveTagsFromResource(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDatabaseMigrationService_StartReplicationTask() { + svc := databasemigrationservice.New(session.New()) + + params := &databasemigrationservice.StartReplicationTaskInput{ + ReplicationTaskArn: aws.String("String"), // Required + StartReplicationTaskType: aws.String("StartReplicationTaskTypeValue"), // Required + CdcStartTime: aws.Time(time.Now()), + } + resp, err := svc.StartReplicationTask(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDatabaseMigrationService_StopReplicationTask() { + svc := databasemigrationservice.New(session.New()) + + params := &databasemigrationservice.StopReplicationTaskInput{ + ReplicationTaskArn: aws.String("String"), // Required + } + resp, err := svc.StopReplicationTask(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDatabaseMigrationService_TestConnection() { + svc := databasemigrationservice.New(session.New()) + + params := &databasemigrationservice.TestConnectionInput{ + EndpointArn: aws.String("String"), // Required + ReplicationInstanceArn: aws.String("String"), // Required + } + resp, err := svc.TestConnection(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/databasemigrationservice/service.go b/vendor/github.com/aws/aws-sdk-go/service/databasemigrationservice/service.go new file mode 100644 index 000000000..513427dbb --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/databasemigrationservice/service.go @@ -0,0 +1,93 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package databasemigrationservice + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" +) + +// AWS Database Migration Service (AWS DMS) can migrate your data to and from +// the most widely used commercial and open-source databases such as Oracle, +// PostgreSQL, Microsoft SQL Server, Amazon Redshift, MariaDB, Amazon Aurora, +// and MySQL. The service supports homogeneous migrations such as Oracle to +// Oracle, as well as heterogeneous migrations between different database platforms, +// such as Oracle to MySQL or SQL Server to PostgreSQL. +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type DatabaseMigrationService struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "dms" + +// New creates a new instance of the DatabaseMigrationService client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a DatabaseMigrationService client from just a session. +// svc := databasemigrationservice.New(mySession) +// +// // Create a DatabaseMigrationService client with additional configuration +// svc := databasemigrationservice.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *DatabaseMigrationService { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *DatabaseMigrationService { + svc := &DatabaseMigrationService{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2016-01-01", + JSONVersion: "1.1", + TargetPrefix: "AmazonDMSv20160101", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(jsonrpc.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a DatabaseMigrationService operation and runs any +// custom request initialization. +func (c *DatabaseMigrationService) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/datapipeline/api.go b/vendor/github.com/aws/aws-sdk-go/service/datapipeline/api.go new file mode 100644 index 000000000..4593b87a0 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/datapipeline/api.go @@ -0,0 +1,2976 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package datapipeline provides a client for AWS Data Pipeline. +package datapipeline + +import ( + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" +) + +const opActivatePipeline = "ActivatePipeline" + +// ActivatePipelineRequest generates a "aws/request.Request" representing the +// client's request for the ActivatePipeline operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ActivatePipeline method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ActivatePipelineRequest method. +// req, resp := client.ActivatePipelineRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *DataPipeline) ActivatePipelineRequest(input *ActivatePipelineInput) (req *request.Request, output *ActivatePipelineOutput) { + op := &request.Operation{ + Name: opActivatePipeline, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ActivatePipelineInput{} + } + + req = c.newRequest(op, input, output) + output = &ActivatePipelineOutput{} + req.Data = output + return +} + +// Validates the specified pipeline and starts processing pipeline tasks. If +// the pipeline does not pass validation, activation fails. +// +// If you need to pause the pipeline to investigate an issue with a component, +// such as a data source or script, call DeactivatePipeline. +// +// To activate a finished pipeline, modify the end date for the pipeline and +// then activate it. +func (c *DataPipeline) ActivatePipeline(input *ActivatePipelineInput) (*ActivatePipelineOutput, error) { + req, out := c.ActivatePipelineRequest(input) + err := req.Send() + return out, err +} + +const opAddTags = "AddTags" + +// AddTagsRequest generates a "aws/request.Request" representing the +// client's request for the AddTags operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the AddTags method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the AddTagsRequest method. +// req, resp := client.AddTagsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *DataPipeline) AddTagsRequest(input *AddTagsInput) (req *request.Request, output *AddTagsOutput) { + op := &request.Operation{ + Name: opAddTags, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AddTagsInput{} + } + + req = c.newRequest(op, input, output) + output = &AddTagsOutput{} + req.Data = output + return +} + +// Adds or modifies tags for the specified pipeline. +func (c *DataPipeline) AddTags(input *AddTagsInput) (*AddTagsOutput, error) { + req, out := c.AddTagsRequest(input) + err := req.Send() + return out, err +} + +const opCreatePipeline = "CreatePipeline" + +// CreatePipelineRequest generates a "aws/request.Request" representing the +// client's request for the CreatePipeline operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreatePipeline method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreatePipelineRequest method. +// req, resp := client.CreatePipelineRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *DataPipeline) CreatePipelineRequest(input *CreatePipelineInput) (req *request.Request, output *CreatePipelineOutput) { + op := &request.Operation{ + Name: opCreatePipeline, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreatePipelineInput{} + } + + req = c.newRequest(op, input, output) + output = &CreatePipelineOutput{} + req.Data = output + return +} + +// Creates a new, empty pipeline. Use PutPipelineDefinition to populate the +// pipeline. +func (c *DataPipeline) CreatePipeline(input *CreatePipelineInput) (*CreatePipelineOutput, error) { + req, out := c.CreatePipelineRequest(input) + err := req.Send() + return out, err +} + +const opDeactivatePipeline = "DeactivatePipeline" + +// DeactivatePipelineRequest generates a "aws/request.Request" representing the +// client's request for the DeactivatePipeline operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeactivatePipeline method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeactivatePipelineRequest method. +// req, resp := client.DeactivatePipelineRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *DataPipeline) DeactivatePipelineRequest(input *DeactivatePipelineInput) (req *request.Request, output *DeactivatePipelineOutput) { + op := &request.Operation{ + Name: opDeactivatePipeline, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeactivatePipelineInput{} + } + + req = c.newRequest(op, input, output) + output = &DeactivatePipelineOutput{} + req.Data = output + return +} + +// Deactivates the specified running pipeline. The pipeline is set to the DEACTIVATING +// state until the deactivation process completes. +// +// To resume a deactivated pipeline, use ActivatePipeline. By default, the +// pipeline resumes from the last completed execution. Optionally, you can specify +// the date and time to resume the pipeline. +func (c *DataPipeline) DeactivatePipeline(input *DeactivatePipelineInput) (*DeactivatePipelineOutput, error) { + req, out := c.DeactivatePipelineRequest(input) + err := req.Send() + return out, err +} + +const opDeletePipeline = "DeletePipeline" + +// DeletePipelineRequest generates a "aws/request.Request" representing the +// client's request for the DeletePipeline operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeletePipeline method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeletePipelineRequest method. +// req, resp := client.DeletePipelineRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *DataPipeline) DeletePipelineRequest(input *DeletePipelineInput) (req *request.Request, output *DeletePipelineOutput) { + op := &request.Operation{ + Name: opDeletePipeline, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeletePipelineInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeletePipelineOutput{} + req.Data = output + return +} + +// Deletes a pipeline, its pipeline definition, and its run history. AWS Data +// Pipeline attempts to cancel instances associated with the pipeline that are +// currently being processed by task runners. +// +// Deleting a pipeline cannot be undone. You cannot query or restore a deleted +// pipeline. To temporarily pause a pipeline instead of deleting it, call SetStatus +// with the status set to PAUSE on individual components. Components that are +// paused by SetStatus can be resumed. +func (c *DataPipeline) DeletePipeline(input *DeletePipelineInput) (*DeletePipelineOutput, error) { + req, out := c.DeletePipelineRequest(input) + err := req.Send() + return out, err +} + +const opDescribeObjects = "DescribeObjects" + +// DescribeObjectsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeObjects operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeObjects method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeObjectsRequest method. +// req, resp := client.DescribeObjectsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *DataPipeline) DescribeObjectsRequest(input *DescribeObjectsInput) (req *request.Request, output *DescribeObjectsOutput) { + op := &request.Operation{ + Name: opDescribeObjects, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"marker"}, + OutputTokens: []string{"marker"}, + LimitToken: "", + TruncationToken: "hasMoreResults", + }, + } + + if input == nil { + input = &DescribeObjectsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeObjectsOutput{} + req.Data = output + return +} + +// Gets the object definitions for a set of objects associated with the pipeline. +// Object definitions are composed of a set of fields that define the properties +// of the object. +func (c *DataPipeline) DescribeObjects(input *DescribeObjectsInput) (*DescribeObjectsOutput, error) { + req, out := c.DescribeObjectsRequest(input) + err := req.Send() + return out, err +} + +// DescribeObjectsPages iterates over the pages of a DescribeObjects operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeObjects method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeObjects operation. +// pageNum := 0 +// err := client.DescribeObjectsPages(params, +// func(page *DescribeObjectsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *DataPipeline) DescribeObjectsPages(input *DescribeObjectsInput, fn func(p *DescribeObjectsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeObjectsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeObjectsOutput), lastPage) + }) +} + +const opDescribePipelines = "DescribePipelines" + +// DescribePipelinesRequest generates a "aws/request.Request" representing the +// client's request for the DescribePipelines operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribePipelines method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribePipelinesRequest method. +// req, resp := client.DescribePipelinesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *DataPipeline) DescribePipelinesRequest(input *DescribePipelinesInput) (req *request.Request, output *DescribePipelinesOutput) { + op := &request.Operation{ + Name: opDescribePipelines, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribePipelinesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribePipelinesOutput{} + req.Data = output + return +} + +// Retrieves metadata about one or more pipelines. The information retrieved +// includes the name of the pipeline, the pipeline identifier, its current state, +// and the user account that owns the pipeline. Using account credentials, you +// can retrieve metadata about pipelines that you or your IAM users have created. +// If you are using an IAM user account, you can retrieve metadata about only +// those pipelines for which you have read permissions. +// +// To retrieve the full pipeline definition instead of metadata about the pipeline, +// call GetPipelineDefinition. +func (c *DataPipeline) DescribePipelines(input *DescribePipelinesInput) (*DescribePipelinesOutput, error) { + req, out := c.DescribePipelinesRequest(input) + err := req.Send() + return out, err +} + +const opEvaluateExpression = "EvaluateExpression" + +// EvaluateExpressionRequest generates a "aws/request.Request" representing the +// client's request for the EvaluateExpression operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the EvaluateExpression method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the EvaluateExpressionRequest method. +// req, resp := client.EvaluateExpressionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *DataPipeline) EvaluateExpressionRequest(input *EvaluateExpressionInput) (req *request.Request, output *EvaluateExpressionOutput) { + op := &request.Operation{ + Name: opEvaluateExpression, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &EvaluateExpressionInput{} + } + + req = c.newRequest(op, input, output) + output = &EvaluateExpressionOutput{} + req.Data = output + return +} + +// Task runners call EvaluateExpression to evaluate a string in the context +// of the specified object. For example, a task runner can evaluate SQL queries +// stored in Amazon S3. +func (c *DataPipeline) EvaluateExpression(input *EvaluateExpressionInput) (*EvaluateExpressionOutput, error) { + req, out := c.EvaluateExpressionRequest(input) + err := req.Send() + return out, err +} + +const opGetPipelineDefinition = "GetPipelineDefinition" + +// GetPipelineDefinitionRequest generates a "aws/request.Request" representing the +// client's request for the GetPipelineDefinition operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetPipelineDefinition method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetPipelineDefinitionRequest method. +// req, resp := client.GetPipelineDefinitionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *DataPipeline) GetPipelineDefinitionRequest(input *GetPipelineDefinitionInput) (req *request.Request, output *GetPipelineDefinitionOutput) { + op := &request.Operation{ + Name: opGetPipelineDefinition, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetPipelineDefinitionInput{} + } + + req = c.newRequest(op, input, output) + output = &GetPipelineDefinitionOutput{} + req.Data = output + return +} + +// Gets the definition of the specified pipeline. You can call GetPipelineDefinition +// to retrieve the pipeline definition that you provided using PutPipelineDefinition. +func (c *DataPipeline) GetPipelineDefinition(input *GetPipelineDefinitionInput) (*GetPipelineDefinitionOutput, error) { + req, out := c.GetPipelineDefinitionRequest(input) + err := req.Send() + return out, err +} + +const opListPipelines = "ListPipelines" + +// ListPipelinesRequest generates a "aws/request.Request" representing the +// client's request for the ListPipelines operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListPipelines method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListPipelinesRequest method. +// req, resp := client.ListPipelinesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *DataPipeline) ListPipelinesRequest(input *ListPipelinesInput) (req *request.Request, output *ListPipelinesOutput) { + op := &request.Operation{ + Name: opListPipelines, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"marker"}, + OutputTokens: []string{"marker"}, + LimitToken: "", + TruncationToken: "hasMoreResults", + }, + } + + if input == nil { + input = &ListPipelinesInput{} + } + + req = c.newRequest(op, input, output) + output = &ListPipelinesOutput{} + req.Data = output + return +} + +// Lists the pipeline identifiers for all active pipelines that you have permission +// to access. +func (c *DataPipeline) ListPipelines(input *ListPipelinesInput) (*ListPipelinesOutput, error) { + req, out := c.ListPipelinesRequest(input) + err := req.Send() + return out, err +} + +// ListPipelinesPages iterates over the pages of a ListPipelines operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListPipelines method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListPipelines operation. +// pageNum := 0 +// err := client.ListPipelinesPages(params, +// func(page *ListPipelinesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *DataPipeline) ListPipelinesPages(input *ListPipelinesInput, fn func(p *ListPipelinesOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListPipelinesRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListPipelinesOutput), lastPage) + }) +} + +const opPollForTask = "PollForTask" + +// PollForTaskRequest generates a "aws/request.Request" representing the +// client's request for the PollForTask operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PollForTask method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PollForTaskRequest method. +// req, resp := client.PollForTaskRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *DataPipeline) PollForTaskRequest(input *PollForTaskInput) (req *request.Request, output *PollForTaskOutput) { + op := &request.Operation{ + Name: opPollForTask, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PollForTaskInput{} + } + + req = c.newRequest(op, input, output) + output = &PollForTaskOutput{} + req.Data = output + return +} + +// Task runners call PollForTask to receive a task to perform from AWS Data +// Pipeline. The task runner specifies which tasks it can perform by setting +// a value for the workerGroup parameter. The task returned can come from any +// of the pipelines that match the workerGroup value passed in by the task runner +// and that was launched using the IAM user credentials specified by the task +// runner. +// +// If tasks are ready in the work queue, PollForTask returns a response immediately. +// If no tasks are available in the queue, PollForTask uses long-polling and +// holds on to a poll connection for up to a 90 seconds, during which time the +// first newly scheduled task is handed to the task runner. To accomodate this, +// set the socket timeout in your task runner to 90 seconds. The task runner +// should not call PollForTask again on the same workerGroup until it receives +// a response, and this can take up to 90 seconds. +func (c *DataPipeline) PollForTask(input *PollForTaskInput) (*PollForTaskOutput, error) { + req, out := c.PollForTaskRequest(input) + err := req.Send() + return out, err +} + +const opPutPipelineDefinition = "PutPipelineDefinition" + +// PutPipelineDefinitionRequest generates a "aws/request.Request" representing the +// client's request for the PutPipelineDefinition operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutPipelineDefinition method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutPipelineDefinitionRequest method. +// req, resp := client.PutPipelineDefinitionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *DataPipeline) PutPipelineDefinitionRequest(input *PutPipelineDefinitionInput) (req *request.Request, output *PutPipelineDefinitionOutput) { + op := &request.Operation{ + Name: opPutPipelineDefinition, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutPipelineDefinitionInput{} + } + + req = c.newRequest(op, input, output) + output = &PutPipelineDefinitionOutput{} + req.Data = output + return +} + +// Adds tasks, schedules, and preconditions to the specified pipeline. You can +// use PutPipelineDefinition to populate a new pipeline. +// +// PutPipelineDefinition also validates the configuration as it adds it to +// the pipeline. Changes to the pipeline are saved unless one of the following +// three validation errors exists in the pipeline. +// +// An object is missing a name or identifier field. A string or reference +// field is empty. The number of objects in the pipeline exceeds the maximum +// allowed objects. The pipeline is in a FINISHED state. Pipeline object definitions +// are passed to the PutPipelineDefinition action and returned by the GetPipelineDefinition +// action. +func (c *DataPipeline) PutPipelineDefinition(input *PutPipelineDefinitionInput) (*PutPipelineDefinitionOutput, error) { + req, out := c.PutPipelineDefinitionRequest(input) + err := req.Send() + return out, err +} + +const opQueryObjects = "QueryObjects" + +// QueryObjectsRequest generates a "aws/request.Request" representing the +// client's request for the QueryObjects operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the QueryObjects method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the QueryObjectsRequest method. +// req, resp := client.QueryObjectsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *DataPipeline) QueryObjectsRequest(input *QueryObjectsInput) (req *request.Request, output *QueryObjectsOutput) { + op := &request.Operation{ + Name: opQueryObjects, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"marker"}, + OutputTokens: []string{"marker"}, + LimitToken: "limit", + TruncationToken: "hasMoreResults", + }, + } + + if input == nil { + input = &QueryObjectsInput{} + } + + req = c.newRequest(op, input, output) + output = &QueryObjectsOutput{} + req.Data = output + return +} + +// Queries the specified pipeline for the names of objects that match the specified +// set of conditions. +func (c *DataPipeline) QueryObjects(input *QueryObjectsInput) (*QueryObjectsOutput, error) { + req, out := c.QueryObjectsRequest(input) + err := req.Send() + return out, err +} + +// QueryObjectsPages iterates over the pages of a QueryObjects operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See QueryObjects method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a QueryObjects operation. +// pageNum := 0 +// err := client.QueryObjectsPages(params, +// func(page *QueryObjectsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *DataPipeline) QueryObjectsPages(input *QueryObjectsInput, fn func(p *QueryObjectsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.QueryObjectsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*QueryObjectsOutput), lastPage) + }) +} + +const opRemoveTags = "RemoveTags" + +// RemoveTagsRequest generates a "aws/request.Request" representing the +// client's request for the RemoveTags operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the RemoveTags method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the RemoveTagsRequest method. +// req, resp := client.RemoveTagsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *DataPipeline) RemoveTagsRequest(input *RemoveTagsInput) (req *request.Request, output *RemoveTagsOutput) { + op := &request.Operation{ + Name: opRemoveTags, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RemoveTagsInput{} + } + + req = c.newRequest(op, input, output) + output = &RemoveTagsOutput{} + req.Data = output + return +} + +// Removes existing tags from the specified pipeline. +func (c *DataPipeline) RemoveTags(input *RemoveTagsInput) (*RemoveTagsOutput, error) { + req, out := c.RemoveTagsRequest(input) + err := req.Send() + return out, err +} + +const opReportTaskProgress = "ReportTaskProgress" + +// ReportTaskProgressRequest generates a "aws/request.Request" representing the +// client's request for the ReportTaskProgress operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ReportTaskProgress method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ReportTaskProgressRequest method. +// req, resp := client.ReportTaskProgressRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *DataPipeline) ReportTaskProgressRequest(input *ReportTaskProgressInput) (req *request.Request, output *ReportTaskProgressOutput) { + op := &request.Operation{ + Name: opReportTaskProgress, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ReportTaskProgressInput{} + } + + req = c.newRequest(op, input, output) + output = &ReportTaskProgressOutput{} + req.Data = output + return +} + +// Task runners call ReportTaskProgress when assigned a task to acknowledge +// that it has the task. If the web service does not receive this acknowledgement +// within 2 minutes, it assigns the task in a subsequent PollForTask call. After +// this initial acknowledgement, the task runner only needs to report progress +// every 15 minutes to maintain its ownership of the task. You can change this +// reporting time from 15 minutes by specifying a reportProgressTimeout field +// in your pipeline. +// +// If a task runner does not report its status after 5 minutes, AWS Data Pipeline +// assumes that the task runner is unable to process the task and reassigns +// the task in a subsequent response to PollForTask. Task runners should call +// ReportTaskProgress every 60 seconds. +func (c *DataPipeline) ReportTaskProgress(input *ReportTaskProgressInput) (*ReportTaskProgressOutput, error) { + req, out := c.ReportTaskProgressRequest(input) + err := req.Send() + return out, err +} + +const opReportTaskRunnerHeartbeat = "ReportTaskRunnerHeartbeat" + +// ReportTaskRunnerHeartbeatRequest generates a "aws/request.Request" representing the +// client's request for the ReportTaskRunnerHeartbeat operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ReportTaskRunnerHeartbeat method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ReportTaskRunnerHeartbeatRequest method. +// req, resp := client.ReportTaskRunnerHeartbeatRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *DataPipeline) ReportTaskRunnerHeartbeatRequest(input *ReportTaskRunnerHeartbeatInput) (req *request.Request, output *ReportTaskRunnerHeartbeatOutput) { + op := &request.Operation{ + Name: opReportTaskRunnerHeartbeat, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ReportTaskRunnerHeartbeatInput{} + } + + req = c.newRequest(op, input, output) + output = &ReportTaskRunnerHeartbeatOutput{} + req.Data = output + return +} + +// Task runners call ReportTaskRunnerHeartbeat every 15 minutes to indicate +// that they are operational. If the AWS Data Pipeline Task Runner is launched +// on a resource managed by AWS Data Pipeline, the web service can use this +// call to detect when the task runner application has failed and restart a +// new instance. +func (c *DataPipeline) ReportTaskRunnerHeartbeat(input *ReportTaskRunnerHeartbeatInput) (*ReportTaskRunnerHeartbeatOutput, error) { + req, out := c.ReportTaskRunnerHeartbeatRequest(input) + err := req.Send() + return out, err +} + +const opSetStatus = "SetStatus" + +// SetStatusRequest generates a "aws/request.Request" representing the +// client's request for the SetStatus operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the SetStatus method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the SetStatusRequest method. +// req, resp := client.SetStatusRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *DataPipeline) SetStatusRequest(input *SetStatusInput) (req *request.Request, output *SetStatusOutput) { + op := &request.Operation{ + Name: opSetStatus, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SetStatusInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &SetStatusOutput{} + req.Data = output + return +} + +// Requests that the status of the specified physical or logical pipeline objects +// be updated in the specified pipeline. This update might not occur immediately, +// but is eventually consistent. The status that can be set depends on the type +// of object (for example, DataNode or Activity). You cannot perform this operation +// on FINISHED pipelines and attempting to do so returns InvalidRequestException. +func (c *DataPipeline) SetStatus(input *SetStatusInput) (*SetStatusOutput, error) { + req, out := c.SetStatusRequest(input) + err := req.Send() + return out, err +} + +const opSetTaskStatus = "SetTaskStatus" + +// SetTaskStatusRequest generates a "aws/request.Request" representing the +// client's request for the SetTaskStatus operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the SetTaskStatus method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the SetTaskStatusRequest method. +// req, resp := client.SetTaskStatusRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *DataPipeline) SetTaskStatusRequest(input *SetTaskStatusInput) (req *request.Request, output *SetTaskStatusOutput) { + op := &request.Operation{ + Name: opSetTaskStatus, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SetTaskStatusInput{} + } + + req = c.newRequest(op, input, output) + output = &SetTaskStatusOutput{} + req.Data = output + return +} + +// Task runners call SetTaskStatus to notify AWS Data Pipeline that a task is +// completed and provide information about the final status. A task runner makes +// this call regardless of whether the task was sucessful. A task runner does +// not need to call SetTaskStatus for tasks that are canceled by the web service +// during a call to ReportTaskProgress. +func (c *DataPipeline) SetTaskStatus(input *SetTaskStatusInput) (*SetTaskStatusOutput, error) { + req, out := c.SetTaskStatusRequest(input) + err := req.Send() + return out, err +} + +const opValidatePipelineDefinition = "ValidatePipelineDefinition" + +// ValidatePipelineDefinitionRequest generates a "aws/request.Request" representing the +// client's request for the ValidatePipelineDefinition operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ValidatePipelineDefinition method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ValidatePipelineDefinitionRequest method. +// req, resp := client.ValidatePipelineDefinitionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *DataPipeline) ValidatePipelineDefinitionRequest(input *ValidatePipelineDefinitionInput) (req *request.Request, output *ValidatePipelineDefinitionOutput) { + op := &request.Operation{ + Name: opValidatePipelineDefinition, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ValidatePipelineDefinitionInput{} + } + + req = c.newRequest(op, input, output) + output = &ValidatePipelineDefinitionOutput{} + req.Data = output + return +} + +// Validates the specified pipeline definition to ensure that it is well formed +// and can be run without error. +func (c *DataPipeline) ValidatePipelineDefinition(input *ValidatePipelineDefinitionInput) (*ValidatePipelineDefinitionOutput, error) { + req, out := c.ValidatePipelineDefinitionRequest(input) + err := req.Send() + return out, err +} + +// Contains the parameters for ActivatePipeline. +type ActivatePipelineInput struct { + _ struct{} `type:"structure"` + + // A list of parameter values to pass to the pipeline at activation. + ParameterValues []*ParameterValue `locationName:"parameterValues" type:"list"` + + // The ID of the pipeline. + PipelineId *string `locationName:"pipelineId" min:"1" type:"string" required:"true"` + + // The date and time to resume the pipeline. By default, the pipeline resumes + // from the last completed execution. + StartTimestamp *time.Time `locationName:"startTimestamp" type:"timestamp" timestampFormat:"unix"` +} + +// String returns the string representation +func (s ActivatePipelineInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ActivatePipelineInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ActivatePipelineInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ActivatePipelineInput"} + if s.PipelineId == nil { + invalidParams.Add(request.NewErrParamRequired("PipelineId")) + } + if s.PipelineId != nil && len(*s.PipelineId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("PipelineId", 1)) + } + if s.ParameterValues != nil { + for i, v := range s.ParameterValues { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "ParameterValues", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the output of ActivatePipeline. +type ActivatePipelineOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ActivatePipelineOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ActivatePipelineOutput) GoString() string { + return s.String() +} + +// Contains the parameters for AddTags. +type AddTagsInput struct { + _ struct{} `type:"structure"` + + // The ID of the pipeline. + PipelineId *string `locationName:"pipelineId" min:"1" type:"string" required:"true"` + + // The tags to add, as key/value pairs. + Tags []*Tag `locationName:"tags" type:"list" required:"true"` +} + +// String returns the string representation +func (s AddTagsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddTagsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AddTagsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AddTagsInput"} + if s.PipelineId == nil { + invalidParams.Add(request.NewErrParamRequired("PipelineId")) + } + if s.PipelineId != nil && len(*s.PipelineId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("PipelineId", 1)) + } + if s.Tags == nil { + invalidParams.Add(request.NewErrParamRequired("Tags")) + } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the output of AddTags. +type AddTagsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s AddTagsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddTagsOutput) GoString() string { + return s.String() +} + +// Contains the parameters for CreatePipeline. +type CreatePipelineInput struct { + _ struct{} `type:"structure"` + + // The description for the pipeline. + Description *string `locationName:"description" type:"string"` + + // The name for the pipeline. You can use the same name for multiple pipelines + // associated with your AWS account, because AWS Data Pipeline assigns each + // pipeline a unique pipeline identifier. + Name *string `locationName:"name" min:"1" type:"string" required:"true"` + + // A list of tags to associate with the pipeline at creation. Tags let you control + // access to pipelines. For more information, see Controlling User Access to + // Pipelines (http://docs.aws.amazon.com/datapipeline/latest/DeveloperGuide/dp-control-access.html) + // in the AWS Data Pipeline Developer Guide. + Tags []*Tag `locationName:"tags" type:"list"` + + // A unique identifier. This identifier is not the same as the pipeline identifier + // assigned by AWS Data Pipeline. You are responsible for defining the format + // and ensuring the uniqueness of this identifier. You use this parameter to + // ensure idempotency during repeated calls to CreatePipeline. For example, + // if the first call to CreatePipeline does not succeed, you can pass in the + // same unique identifier and pipeline name combination on a subsequent call + // to CreatePipeline. CreatePipeline ensures that if a pipeline already exists + // with the same name and unique identifier, a new pipeline is not created. + // Instead, you'll receive the pipeline identifier from the previous attempt. + // The uniqueness of the name and unique identifier combination is scoped to + // the AWS account or IAM user credentials. + UniqueId *string `locationName:"uniqueId" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreatePipelineInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreatePipelineInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreatePipelineInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreatePipelineInput"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + if s.UniqueId == nil { + invalidParams.Add(request.NewErrParamRequired("UniqueId")) + } + if s.UniqueId != nil && len(*s.UniqueId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("UniqueId", 1)) + } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the output of CreatePipeline. +type CreatePipelineOutput struct { + _ struct{} `type:"structure"` + + // The ID that AWS Data Pipeline assigns the newly created pipeline. For example, + // df-06372391ZG65EXAMPLE. + PipelineId *string `locationName:"pipelineId" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreatePipelineOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreatePipelineOutput) GoString() string { + return s.String() +} + +// Contains the parameters for DeactivatePipeline. +type DeactivatePipelineInput struct { + _ struct{} `type:"structure"` + + // Indicates whether to cancel any running objects. The default is true, which + // sets the state of any running objects to CANCELED. If this value is false, + // the pipeline is deactivated after all running objects finish. + CancelActive *bool `locationName:"cancelActive" type:"boolean"` + + // The ID of the pipeline. + PipelineId *string `locationName:"pipelineId" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeactivatePipelineInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeactivatePipelineInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeactivatePipelineInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeactivatePipelineInput"} + if s.PipelineId == nil { + invalidParams.Add(request.NewErrParamRequired("PipelineId")) + } + if s.PipelineId != nil && len(*s.PipelineId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("PipelineId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the output of DeactivatePipeline. +type DeactivatePipelineOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeactivatePipelineOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeactivatePipelineOutput) GoString() string { + return s.String() +} + +// Contains the parameters for DeletePipeline. +type DeletePipelineInput struct { + _ struct{} `type:"structure"` + + // The ID of the pipeline. + PipelineId *string `locationName:"pipelineId" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeletePipelineInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeletePipelineInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeletePipelineInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeletePipelineInput"} + if s.PipelineId == nil { + invalidParams.Add(request.NewErrParamRequired("PipelineId")) + } + if s.PipelineId != nil && len(*s.PipelineId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("PipelineId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeletePipelineOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeletePipelineOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeletePipelineOutput) GoString() string { + return s.String() +} + +// Contains the parameters for DescribeObjects. +type DescribeObjectsInput struct { + _ struct{} `type:"structure"` + + // Indicates whether any expressions in the object should be evaluated when + // the object descriptions are returned. + EvaluateExpressions *bool `locationName:"evaluateExpressions" type:"boolean"` + + // The starting point for the results to be returned. For the first call, this + // value should be empty. As long as there are more results, continue to call + // DescribeObjects with the marker value from the previous call to retrieve + // the next set of results. + Marker *string `locationName:"marker" type:"string"` + + // The IDs of the pipeline objects that contain the definitions to be described. + // You can pass as many as 25 identifiers in a single call to DescribeObjects. + ObjectIds []*string `locationName:"objectIds" type:"list" required:"true"` + + // The ID of the pipeline that contains the object definitions. + PipelineId *string `locationName:"pipelineId" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeObjectsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeObjectsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeObjectsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeObjectsInput"} + if s.ObjectIds == nil { + invalidParams.Add(request.NewErrParamRequired("ObjectIds")) + } + if s.PipelineId == nil { + invalidParams.Add(request.NewErrParamRequired("PipelineId")) + } + if s.PipelineId != nil && len(*s.PipelineId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("PipelineId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the output of DescribeObjects. +type DescribeObjectsOutput struct { + _ struct{} `type:"structure"` + + // Indicates whether there are more results to return. + HasMoreResults *bool `locationName:"hasMoreResults" type:"boolean"` + + // The starting point for the next page of results. To view the next page of + // results, call DescribeObjects again with this marker value. If the value + // is null, there are no more results. + Marker *string `locationName:"marker" type:"string"` + + // An array of object definitions. + PipelineObjects []*PipelineObject `locationName:"pipelineObjects" type:"list" required:"true"` +} + +// String returns the string representation +func (s DescribeObjectsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeObjectsOutput) GoString() string { + return s.String() +} + +// Contains the parameters for DescribePipelines. +type DescribePipelinesInput struct { + _ struct{} `type:"structure"` + + // The IDs of the pipelines to describe. You can pass as many as 25 identifiers + // in a single call. To obtain pipeline IDs, call ListPipelines. + PipelineIds []*string `locationName:"pipelineIds" type:"list" required:"true"` +} + +// String returns the string representation +func (s DescribePipelinesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribePipelinesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribePipelinesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribePipelinesInput"} + if s.PipelineIds == nil { + invalidParams.Add(request.NewErrParamRequired("PipelineIds")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the output of DescribePipelines. +type DescribePipelinesOutput struct { + _ struct{} `type:"structure"` + + // An array of descriptions for the specified pipelines. + PipelineDescriptionList []*PipelineDescription `locationName:"pipelineDescriptionList" type:"list" required:"true"` +} + +// String returns the string representation +func (s DescribePipelinesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribePipelinesOutput) GoString() string { + return s.String() +} + +// Contains the parameters for EvaluateExpression. +type EvaluateExpressionInput struct { + _ struct{} `type:"structure"` + + // The expression to evaluate. + Expression *string `locationName:"expression" type:"string" required:"true"` + + // The ID of the object. + ObjectId *string `locationName:"objectId" min:"1" type:"string" required:"true"` + + // The ID of the pipeline. + PipelineId *string `locationName:"pipelineId" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s EvaluateExpressionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EvaluateExpressionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *EvaluateExpressionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "EvaluateExpressionInput"} + if s.Expression == nil { + invalidParams.Add(request.NewErrParamRequired("Expression")) + } + if s.ObjectId == nil { + invalidParams.Add(request.NewErrParamRequired("ObjectId")) + } + if s.ObjectId != nil && len(*s.ObjectId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ObjectId", 1)) + } + if s.PipelineId == nil { + invalidParams.Add(request.NewErrParamRequired("PipelineId")) + } + if s.PipelineId != nil && len(*s.PipelineId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("PipelineId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the output of EvaluateExpression. +type EvaluateExpressionOutput struct { + _ struct{} `type:"structure"` + + // The evaluated expression. + EvaluatedExpression *string `locationName:"evaluatedExpression" type:"string" required:"true"` +} + +// String returns the string representation +func (s EvaluateExpressionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EvaluateExpressionOutput) GoString() string { + return s.String() +} + +// A key-value pair that describes a property of a pipeline object. The value +// is specified as either a string value (StringValue) or a reference to another +// object (RefValue) but not as both. +type Field struct { + _ struct{} `type:"structure"` + + // The field identifier. + Key *string `locationName:"key" min:"1" type:"string" required:"true"` + + // The field value, expressed as the identifier of another object. + RefValue *string `locationName:"refValue" min:"1" type:"string"` + + // The field value, expressed as a String. + StringValue *string `locationName:"stringValue" type:"string"` +} + +// String returns the string representation +func (s Field) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Field) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Field) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Field"} + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.RefValue != nil && len(*s.RefValue) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RefValue", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the parameters for GetPipelineDefinition. +type GetPipelineDefinitionInput struct { + _ struct{} `type:"structure"` + + // The ID of the pipeline. + PipelineId *string `locationName:"pipelineId" min:"1" type:"string" required:"true"` + + // The version of the pipeline definition to retrieve. Set this parameter to + // latest (default) to use the last definition saved to the pipeline or active + // to use the last definition that was activated. + Version *string `locationName:"version" type:"string"` +} + +// String returns the string representation +func (s GetPipelineDefinitionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetPipelineDefinitionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetPipelineDefinitionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetPipelineDefinitionInput"} + if s.PipelineId == nil { + invalidParams.Add(request.NewErrParamRequired("PipelineId")) + } + if s.PipelineId != nil && len(*s.PipelineId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("PipelineId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the output of GetPipelineDefinition. +type GetPipelineDefinitionOutput struct { + _ struct{} `type:"structure"` + + // The parameter objects used in the pipeline definition. + ParameterObjects []*ParameterObject `locationName:"parameterObjects" type:"list"` + + // The parameter values used in the pipeline definition. + ParameterValues []*ParameterValue `locationName:"parameterValues" type:"list"` + + // The objects defined in the pipeline. + PipelineObjects []*PipelineObject `locationName:"pipelineObjects" type:"list"` +} + +// String returns the string representation +func (s GetPipelineDefinitionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetPipelineDefinitionOutput) GoString() string { + return s.String() +} + +// Identity information for the EC2 instance that is hosting the task runner. +// You can get this value by calling a metadata URI from the EC2 instance. For +// more information, see Instance Metadata (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/AESDG-chapter-instancedata.html) +// in the Amazon Elastic Compute Cloud User Guide. Passing in this value proves +// that your task runner is running on an EC2 instance, and ensures the proper +// AWS Data Pipeline service charges are applied to your pipeline. +type InstanceIdentity struct { + _ struct{} `type:"structure"` + + // A description of an EC2 instance that is generated when the instance is launched + // and exposed to the instance via the instance metadata service in the form + // of a JSON representation of an object. + Document *string `locationName:"document" type:"string"` + + // A signature which can be used to verify the accuracy and authenticity of + // the information provided in the instance identity document. + Signature *string `locationName:"signature" type:"string"` +} + +// String returns the string representation +func (s InstanceIdentity) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstanceIdentity) GoString() string { + return s.String() +} + +// Contains the parameters for ListPipelines. +type ListPipelinesInput struct { + _ struct{} `type:"structure"` + + // The starting point for the results to be returned. For the first call, this + // value should be empty. As long as there are more results, continue to call + // ListPipelines with the marker value from the previous call to retrieve the + // next set of results. + Marker *string `locationName:"marker" type:"string"` +} + +// String returns the string representation +func (s ListPipelinesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListPipelinesInput) GoString() string { + return s.String() +} + +// Contains the output of ListPipelines. +type ListPipelinesOutput struct { + _ struct{} `type:"structure"` + + // Indicates whether there are more results that can be obtained by a subsequent + // call. + HasMoreResults *bool `locationName:"hasMoreResults" type:"boolean"` + + // The starting point for the next page of results. To view the next page of + // results, call ListPipelinesOutput again with this marker value. If the value + // is null, there are no more results. + Marker *string `locationName:"marker" type:"string"` + + // The pipeline identifiers. If you require additional information about the + // pipelines, you can use these identifiers to call DescribePipelines and GetPipelineDefinition. + PipelineIdList []*PipelineIdName `locationName:"pipelineIdList" type:"list" required:"true"` +} + +// String returns the string representation +func (s ListPipelinesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListPipelinesOutput) GoString() string { + return s.String() +} + +// Contains a logical operation for comparing the value of a field with a specified +// value. +type Operator struct { + _ struct{} `type:"structure"` + + // The logical operation to be performed: equal (EQ), equal reference (REF_EQ), + // less than or equal (LE), greater than or equal (GE), or between (BETWEEN). + // Equal reference (REF_EQ) can be used only with reference fields. The other + // comparison types can be used only with String fields. The comparison types + // you can use apply only to certain object fields, as detailed below. + // + // The comparison operators EQ and REF_EQ act on the following fields: + // + // name @sphere parent @componentParent @instanceParent @status @scheduledStartTime + // @scheduledEndTime @actualStartTime @actualEndTime The comparison operators + // GE, LE, and BETWEEN act on the following fields: + // + // @scheduledStartTime @scheduledEndTime @actualStartTime @actualEndTime + // Note that fields beginning with the at sign (@) are read-only and set by + // the web service. When you name fields, you should choose names containing + // only alpha-numeric values, as symbols may be reserved by AWS Data Pipeline. + // User-defined fields that you add to a pipeline should prefix their name with + // the string "my". + Type *string `locationName:"type" type:"string" enum:"OperatorType"` + + // The value that the actual field value will be compared with. + Values []*string `locationName:"values" type:"list"` +} + +// String returns the string representation +func (s Operator) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Operator) GoString() string { + return s.String() +} + +// The attributes allowed or specified with a parameter object. +type ParameterAttribute struct { + _ struct{} `type:"structure"` + + // The field identifier. + Key *string `locationName:"key" min:"1" type:"string" required:"true"` + + // The field value, expressed as a String. + StringValue *string `locationName:"stringValue" type:"string" required:"true"` +} + +// String returns the string representation +func (s ParameterAttribute) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ParameterAttribute) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ParameterAttribute) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ParameterAttribute"} + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.StringValue == nil { + invalidParams.Add(request.NewErrParamRequired("StringValue")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains information about a parameter object. +type ParameterObject struct { + _ struct{} `type:"structure"` + + // The attributes of the parameter object. + Attributes []*ParameterAttribute `locationName:"attributes" type:"list" required:"true"` + + // The ID of the parameter object. + Id *string `locationName:"id" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s ParameterObject) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ParameterObject) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ParameterObject) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ParameterObject"} + if s.Attributes == nil { + invalidParams.Add(request.NewErrParamRequired("Attributes")) + } + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + if s.Id != nil && len(*s.Id) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Id", 1)) + } + if s.Attributes != nil { + for i, v := range s.Attributes { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Attributes", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// A value or list of parameter values. +type ParameterValue struct { + _ struct{} `type:"structure"` + + // The ID of the parameter value. + Id *string `locationName:"id" min:"1" type:"string" required:"true"` + + // The field value, expressed as a String. + StringValue *string `locationName:"stringValue" type:"string" required:"true"` +} + +// String returns the string representation +func (s ParameterValue) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ParameterValue) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ParameterValue) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ParameterValue"} + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + if s.Id != nil && len(*s.Id) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Id", 1)) + } + if s.StringValue == nil { + invalidParams.Add(request.NewErrParamRequired("StringValue")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains pipeline metadata. +type PipelineDescription struct { + _ struct{} `type:"structure"` + + // Description of the pipeline. + Description *string `locationName:"description" type:"string"` + + // A list of read-only fields that contain metadata about the pipeline: @userId, + // @accountId, and @pipelineState. + Fields []*Field `locationName:"fields" type:"list" required:"true"` + + // The name of the pipeline. + Name *string `locationName:"name" min:"1" type:"string" required:"true"` + + // The pipeline identifier that was assigned by AWS Data Pipeline. This is a + // string of the form df-297EG78HU43EEXAMPLE. + PipelineId *string `locationName:"pipelineId" min:"1" type:"string" required:"true"` + + // A list of tags to associated with a pipeline. Tags let you control access + // to pipelines. For more information, see Controlling User Access to Pipelines + // (http://docs.aws.amazon.com/datapipeline/latest/DeveloperGuide/dp-control-access.html) + // in the AWS Data Pipeline Developer Guide. + Tags []*Tag `locationName:"tags" type:"list"` +} + +// String returns the string representation +func (s PipelineDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PipelineDescription) GoString() string { + return s.String() +} + +// Contains the name and identifier of a pipeline. +type PipelineIdName struct { + _ struct{} `type:"structure"` + + // The ID of the pipeline that was assigned by AWS Data Pipeline. This is a + // string of the form df-297EG78HU43EEXAMPLE. + Id *string `locationName:"id" min:"1" type:"string"` + + // The name of the pipeline. + Name *string `locationName:"name" min:"1" type:"string"` +} + +// String returns the string representation +func (s PipelineIdName) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PipelineIdName) GoString() string { + return s.String() +} + +// Contains information about a pipeline object. This can be a logical, physical, +// or physical attempt pipeline object. The complete set of components of a +// pipeline defines the pipeline. +type PipelineObject struct { + _ struct{} `type:"structure"` + + // Key-value pairs that define the properties of the object. + Fields []*Field `locationName:"fields" type:"list" required:"true"` + + // The ID of the object. + Id *string `locationName:"id" min:"1" type:"string" required:"true"` + + // The name of the object. + Name *string `locationName:"name" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s PipelineObject) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PipelineObject) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PipelineObject) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PipelineObject"} + if s.Fields == nil { + invalidParams.Add(request.NewErrParamRequired("Fields")) + } + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + if s.Id != nil && len(*s.Id) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Id", 1)) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + if s.Fields != nil { + for i, v := range s.Fields { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Fields", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the parameters for PollForTask. +type PollForTaskInput struct { + _ struct{} `type:"structure"` + + // The public DNS name of the calling task runner. + Hostname *string `locationName:"hostname" min:"1" type:"string"` + + // Identity information for the EC2 instance that is hosting the task runner. + // You can get this value from the instance using http://169.254.169.254/latest/meta-data/instance-id. + // For more information, see Instance Metadata (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/AESDG-chapter-instancedata.html) + // in the Amazon Elastic Compute Cloud User Guide. Passing in this value proves + // that your task runner is running on an EC2 instance, and ensures the proper + // AWS Data Pipeline service charges are applied to your pipeline. + InstanceIdentity *InstanceIdentity `locationName:"instanceIdentity" type:"structure"` + + // The type of task the task runner is configured to accept and process. The + // worker group is set as a field on objects in the pipeline when they are created. + // You can only specify a single value for workerGroup in the call to PollForTask. + // There are no wildcard values permitted in workerGroup; the string must be + // an exact, case-sensitive, match. + WorkerGroup *string `locationName:"workerGroup" type:"string" required:"true"` +} + +// String returns the string representation +func (s PollForTaskInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PollForTaskInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PollForTaskInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PollForTaskInput"} + if s.Hostname != nil && len(*s.Hostname) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Hostname", 1)) + } + if s.WorkerGroup == nil { + invalidParams.Add(request.NewErrParamRequired("WorkerGroup")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the output of PollForTask. +type PollForTaskOutput struct { + _ struct{} `type:"structure"` + + // The information needed to complete the task that is being assigned to the + // task runner. One of the fields returned in this object is taskId, which contains + // an identifier for the task being assigned. The calling task runner uses taskId + // in subsequent calls to ReportTaskProgress and SetTaskStatus. + TaskObject *TaskObject `locationName:"taskObject" type:"structure"` +} + +// String returns the string representation +func (s PollForTaskOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PollForTaskOutput) GoString() string { + return s.String() +} + +// Contains the parameters for PutPipelineDefinition. +type PutPipelineDefinitionInput struct { + _ struct{} `type:"structure"` + + // The parameter objects used with the pipeline. + ParameterObjects []*ParameterObject `locationName:"parameterObjects" type:"list"` + + // The parameter values used with the pipeline. + ParameterValues []*ParameterValue `locationName:"parameterValues" type:"list"` + + // The ID of the pipeline. + PipelineId *string `locationName:"pipelineId" min:"1" type:"string" required:"true"` + + // The objects that define the pipeline. These objects overwrite the existing + // pipeline definition. + PipelineObjects []*PipelineObject `locationName:"pipelineObjects" type:"list" required:"true"` +} + +// String returns the string representation +func (s PutPipelineDefinitionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutPipelineDefinitionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutPipelineDefinitionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutPipelineDefinitionInput"} + if s.PipelineId == nil { + invalidParams.Add(request.NewErrParamRequired("PipelineId")) + } + if s.PipelineId != nil && len(*s.PipelineId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("PipelineId", 1)) + } + if s.PipelineObjects == nil { + invalidParams.Add(request.NewErrParamRequired("PipelineObjects")) + } + if s.ParameterObjects != nil { + for i, v := range s.ParameterObjects { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "ParameterObjects", i), err.(request.ErrInvalidParams)) + } + } + } + if s.ParameterValues != nil { + for i, v := range s.ParameterValues { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "ParameterValues", i), err.(request.ErrInvalidParams)) + } + } + } + if s.PipelineObjects != nil { + for i, v := range s.PipelineObjects { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "PipelineObjects", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the output of PutPipelineDefinition. +type PutPipelineDefinitionOutput struct { + _ struct{} `type:"structure"` + + // Indicates whether there were validation errors, and the pipeline definition + // is stored but cannot be activated until you correct the pipeline and call + // PutPipelineDefinition to commit the corrected pipeline. + Errored *bool `locationName:"errored" type:"boolean" required:"true"` + + // The validation errors that are associated with the objects defined in pipelineObjects. + ValidationErrors []*ValidationError `locationName:"validationErrors" type:"list"` + + // The validation warnings that are associated with the objects defined in pipelineObjects. + ValidationWarnings []*ValidationWarning `locationName:"validationWarnings" type:"list"` +} + +// String returns the string representation +func (s PutPipelineDefinitionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutPipelineDefinitionOutput) GoString() string { + return s.String() +} + +// Defines the query to run against an object. +type Query struct { + _ struct{} `type:"structure"` + + // List of selectors that define the query. An object must satisfy all of the + // selectors to match the query. + Selectors []*Selector `locationName:"selectors" type:"list"` +} + +// String returns the string representation +func (s Query) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Query) GoString() string { + return s.String() +} + +// Contains the parameters for QueryObjects. +type QueryObjectsInput struct { + _ struct{} `type:"structure"` + + // The maximum number of object names that QueryObjects will return in a single + // call. The default value is 100. + Limit *int64 `locationName:"limit" type:"integer"` + + // The starting point for the results to be returned. For the first call, this + // value should be empty. As long as there are more results, continue to call + // QueryObjects with the marker value from the previous call to retrieve the + // next set of results. + Marker *string `locationName:"marker" type:"string"` + + // The ID of the pipeline. + PipelineId *string `locationName:"pipelineId" min:"1" type:"string" required:"true"` + + // The query that defines the objects to be returned. The Query object can contain + // a maximum of ten selectors. The conditions in the query are limited to top-level + // String fields in the object. These filters can be applied to components, + // instances, and attempts. + Query *Query `locationName:"query" type:"structure"` + + // Indicates whether the query applies to components or instances. The possible + // values are: COMPONENT, INSTANCE, and ATTEMPT. + Sphere *string `locationName:"sphere" type:"string" required:"true"` +} + +// String returns the string representation +func (s QueryObjectsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s QueryObjectsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *QueryObjectsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "QueryObjectsInput"} + if s.PipelineId == nil { + invalidParams.Add(request.NewErrParamRequired("PipelineId")) + } + if s.PipelineId != nil && len(*s.PipelineId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("PipelineId", 1)) + } + if s.Sphere == nil { + invalidParams.Add(request.NewErrParamRequired("Sphere")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the output of QueryObjects. +type QueryObjectsOutput struct { + _ struct{} `type:"structure"` + + // Indicates whether there are more results that can be obtained by a subsequent + // call. + HasMoreResults *bool `locationName:"hasMoreResults" type:"boolean"` + + // The identifiers that match the query selectors. + Ids []*string `locationName:"ids" type:"list"` + + // The starting point for the next page of results. To view the next page of + // results, call QueryObjects again with this marker value. If the value is + // null, there are no more results. + Marker *string `locationName:"marker" type:"string"` +} + +// String returns the string representation +func (s QueryObjectsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s QueryObjectsOutput) GoString() string { + return s.String() +} + +// Contains the parameters for RemoveTags. +type RemoveTagsInput struct { + _ struct{} `type:"structure"` + + // The ID of the pipeline. + PipelineId *string `locationName:"pipelineId" min:"1" type:"string" required:"true"` + + // The keys of the tags to remove. + TagKeys []*string `locationName:"tagKeys" type:"list" required:"true"` +} + +// String returns the string representation +func (s RemoveTagsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RemoveTagsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RemoveTagsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RemoveTagsInput"} + if s.PipelineId == nil { + invalidParams.Add(request.NewErrParamRequired("PipelineId")) + } + if s.PipelineId != nil && len(*s.PipelineId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("PipelineId", 1)) + } + if s.TagKeys == nil { + invalidParams.Add(request.NewErrParamRequired("TagKeys")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the output of RemoveTags. +type RemoveTagsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s RemoveTagsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RemoveTagsOutput) GoString() string { + return s.String() +} + +// Contains the parameters for ReportTaskProgress. +type ReportTaskProgressInput struct { + _ struct{} `type:"structure"` + + // Key-value pairs that define the properties of the ReportTaskProgressInput + // object. + Fields []*Field `locationName:"fields" type:"list"` + + // The ID of the task assigned to the task runner. This value is provided in + // the response for PollForTask. + TaskId *string `locationName:"taskId" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s ReportTaskProgressInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReportTaskProgressInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ReportTaskProgressInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ReportTaskProgressInput"} + if s.TaskId == nil { + invalidParams.Add(request.NewErrParamRequired("TaskId")) + } + if s.TaskId != nil && len(*s.TaskId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TaskId", 1)) + } + if s.Fields != nil { + for i, v := range s.Fields { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Fields", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the output of ReportTaskProgress. +type ReportTaskProgressOutput struct { + _ struct{} `type:"structure"` + + // If true, the calling task runner should cancel processing of the task. The + // task runner does not need to call SetTaskStatus for canceled tasks. + Canceled *bool `locationName:"canceled" type:"boolean" required:"true"` +} + +// String returns the string representation +func (s ReportTaskProgressOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReportTaskProgressOutput) GoString() string { + return s.String() +} + +// Contains the parameters for ReportTaskRunnerHeartbeat. +type ReportTaskRunnerHeartbeatInput struct { + _ struct{} `type:"structure"` + + // The public DNS name of the task runner. + Hostname *string `locationName:"hostname" min:"1" type:"string"` + + // The ID of the task runner. This value should be unique across your AWS account. + // In the case of AWS Data Pipeline Task Runner launched on a resource managed + // by AWS Data Pipeline, the web service provides a unique identifier when it + // launches the application. If you have written a custom task runner, you should + // assign a unique identifier for the task runner. + TaskrunnerId *string `locationName:"taskrunnerId" min:"1" type:"string" required:"true"` + + // The type of task the task runner is configured to accept and process. The + // worker group is set as a field on objects in the pipeline when they are created. + // You can only specify a single value for workerGroup. There are no wildcard + // values permitted in workerGroup; the string must be an exact, case-sensitive, + // match. + WorkerGroup *string `locationName:"workerGroup" type:"string"` +} + +// String returns the string representation +func (s ReportTaskRunnerHeartbeatInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReportTaskRunnerHeartbeatInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ReportTaskRunnerHeartbeatInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ReportTaskRunnerHeartbeatInput"} + if s.Hostname != nil && len(*s.Hostname) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Hostname", 1)) + } + if s.TaskrunnerId == nil { + invalidParams.Add(request.NewErrParamRequired("TaskrunnerId")) + } + if s.TaskrunnerId != nil && len(*s.TaskrunnerId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TaskrunnerId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the output of ReportTaskRunnerHeartbeat. +type ReportTaskRunnerHeartbeatOutput struct { + _ struct{} `type:"structure"` + + // Indicates whether the calling task runner should terminate. + Terminate *bool `locationName:"terminate" type:"boolean" required:"true"` +} + +// String returns the string representation +func (s ReportTaskRunnerHeartbeatOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReportTaskRunnerHeartbeatOutput) GoString() string { + return s.String() +} + +// A comparision that is used to determine whether a query should return this +// object. +type Selector struct { + _ struct{} `type:"structure"` + + // The name of the field that the operator will be applied to. The field name + // is the "key" portion of the field definition in the pipeline definition syntax + // that is used by the AWS Data Pipeline API. If the field is not set on the + // object, the condition fails. + FieldName *string `locationName:"fieldName" type:"string"` + + // Contains a logical operation for comparing the value of a field with a specified + // value. + Operator *Operator `locationName:"operator" type:"structure"` +} + +// String returns the string representation +func (s Selector) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Selector) GoString() string { + return s.String() +} + +// Contains the parameters for SetStatus. +type SetStatusInput struct { + _ struct{} `type:"structure"` + + // The IDs of the objects. The corresponding objects can be either physical + // or components, but not a mix of both types. + ObjectIds []*string `locationName:"objectIds" type:"list" required:"true"` + + // The ID of the pipeline that contains the objects. + PipelineId *string `locationName:"pipelineId" min:"1" type:"string" required:"true"` + + // The status to be set on all the objects specified in objectIds. For components, + // use PAUSE or RESUME. For instances, use TRY_CANCEL, RERUN, or MARK_FINISHED. + Status *string `locationName:"status" type:"string" required:"true"` +} + +// String returns the string representation +func (s SetStatusInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetStatusInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SetStatusInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SetStatusInput"} + if s.ObjectIds == nil { + invalidParams.Add(request.NewErrParamRequired("ObjectIds")) + } + if s.PipelineId == nil { + invalidParams.Add(request.NewErrParamRequired("PipelineId")) + } + if s.PipelineId != nil && len(*s.PipelineId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("PipelineId", 1)) + } + if s.Status == nil { + invalidParams.Add(request.NewErrParamRequired("Status")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type SetStatusOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s SetStatusOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetStatusOutput) GoString() string { + return s.String() +} + +// Contains the parameters for SetTaskStatus. +type SetTaskStatusInput struct { + _ struct{} `type:"structure"` + + // If an error occurred during the task, this value specifies the error code. + // This value is set on the physical attempt object. It is used to display error + // information to the user. It should not start with string "Service_" which + // is reserved by the system. + ErrorId *string `locationName:"errorId" type:"string"` + + // If an error occurred during the task, this value specifies a text description + // of the error. This value is set on the physical attempt object. It is used + // to display error information to the user. The web service does not parse + // this value. + ErrorMessage *string `locationName:"errorMessage" type:"string"` + + // If an error occurred during the task, this value specifies the stack trace + // associated with the error. This value is set on the physical attempt object. + // It is used to display error information to the user. The web service does + // not parse this value. + ErrorStackTrace *string `locationName:"errorStackTrace" type:"string"` + + // The ID of the task assigned to the task runner. This value is provided in + // the response for PollForTask. + TaskId *string `locationName:"taskId" min:"1" type:"string" required:"true"` + + // If FINISHED, the task successfully completed. If FAILED, the task ended unsuccessfully. + // Preconditions use false. + TaskStatus *string `locationName:"taskStatus" type:"string" required:"true" enum:"TaskStatus"` +} + +// String returns the string representation +func (s SetTaskStatusInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetTaskStatusInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SetTaskStatusInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SetTaskStatusInput"} + if s.TaskId == nil { + invalidParams.Add(request.NewErrParamRequired("TaskId")) + } + if s.TaskId != nil && len(*s.TaskId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TaskId", 1)) + } + if s.TaskStatus == nil { + invalidParams.Add(request.NewErrParamRequired("TaskStatus")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the output of SetTaskStatus. +type SetTaskStatusOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s SetTaskStatusOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetTaskStatusOutput) GoString() string { + return s.String() +} + +// Tags are key/value pairs defined by a user and associated with a pipeline +// to control access. AWS Data Pipeline allows you to associate ten tags per +// pipeline. For more information, see Controlling User Access to Pipelines +// (http://docs.aws.amazon.com/datapipeline/latest/DeveloperGuide/dp-control-access.html) +// in the AWS Data Pipeline Developer Guide. +type Tag struct { + _ struct{} `type:"structure"` + + // The key name of a tag defined by a user. For more information, see Controlling + // User Access to Pipelines (http://docs.aws.amazon.com/datapipeline/latest/DeveloperGuide/dp-control-access.html) + // in the AWS Data Pipeline Developer Guide. + Key *string `locationName:"key" min:"1" type:"string" required:"true"` + + // The optional value portion of a tag defined by a user. For more information, + // see Controlling User Access to Pipelines (http://docs.aws.amazon.com/datapipeline/latest/DeveloperGuide/dp-control-access.html) + // in the AWS Data Pipeline Developer Guide. + Value *string `locationName:"value" type:"string" required:"true"` +} + +// String returns the string representation +func (s Tag) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Tag) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Tag) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Tag"} + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.Value == nil { + invalidParams.Add(request.NewErrParamRequired("Value")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains information about a pipeline task that is assigned to a task runner. +type TaskObject struct { + _ struct{} `type:"structure"` + + // The ID of the pipeline task attempt object. AWS Data Pipeline uses this value + // to track how many times a task is attempted. + AttemptId *string `locationName:"attemptId" min:"1" type:"string"` + + // Connection information for the location where the task runner will publish + // the output of the task. + Objects map[string]*PipelineObject `locationName:"objects" type:"map"` + + // The ID of the pipeline that provided the task. + PipelineId *string `locationName:"pipelineId" min:"1" type:"string"` + + // An internal identifier for the task. This ID is passed to the SetTaskStatus + // and ReportTaskProgress actions. + TaskId *string `locationName:"taskId" min:"1" type:"string"` +} + +// String returns the string representation +func (s TaskObject) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TaskObject) GoString() string { + return s.String() +} + +// Contains the parameters for ValidatePipelineDefinition. +type ValidatePipelineDefinitionInput struct { + _ struct{} `type:"structure"` + + // The parameter objects used with the pipeline. + ParameterObjects []*ParameterObject `locationName:"parameterObjects" type:"list"` + + // The parameter values used with the pipeline. + ParameterValues []*ParameterValue `locationName:"parameterValues" type:"list"` + + // The ID of the pipeline. + PipelineId *string `locationName:"pipelineId" min:"1" type:"string" required:"true"` + + // The objects that define the pipeline changes to validate against the pipeline. + PipelineObjects []*PipelineObject `locationName:"pipelineObjects" type:"list" required:"true"` +} + +// String returns the string representation +func (s ValidatePipelineDefinitionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ValidatePipelineDefinitionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ValidatePipelineDefinitionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ValidatePipelineDefinitionInput"} + if s.PipelineId == nil { + invalidParams.Add(request.NewErrParamRequired("PipelineId")) + } + if s.PipelineId != nil && len(*s.PipelineId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("PipelineId", 1)) + } + if s.PipelineObjects == nil { + invalidParams.Add(request.NewErrParamRequired("PipelineObjects")) + } + if s.ParameterObjects != nil { + for i, v := range s.ParameterObjects { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "ParameterObjects", i), err.(request.ErrInvalidParams)) + } + } + } + if s.ParameterValues != nil { + for i, v := range s.ParameterValues { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "ParameterValues", i), err.(request.ErrInvalidParams)) + } + } + } + if s.PipelineObjects != nil { + for i, v := range s.PipelineObjects { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "PipelineObjects", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the output of ValidatePipelineDefinition. +type ValidatePipelineDefinitionOutput struct { + _ struct{} `type:"structure"` + + // Indicates whether there were validation errors. + Errored *bool `locationName:"errored" type:"boolean" required:"true"` + + // Any validation errors that were found. + ValidationErrors []*ValidationError `locationName:"validationErrors" type:"list"` + + // Any validation warnings that were found. + ValidationWarnings []*ValidationWarning `locationName:"validationWarnings" type:"list"` +} + +// String returns the string representation +func (s ValidatePipelineDefinitionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ValidatePipelineDefinitionOutput) GoString() string { + return s.String() +} + +// Defines a validation error. Validation errors prevent pipeline activation. +// The set of validation errors that can be returned are defined by AWS Data +// Pipeline. +type ValidationError struct { + _ struct{} `type:"structure"` + + // A description of the validation error. + Errors []*string `locationName:"errors" type:"list"` + + // The identifier of the object that contains the validation error. + Id *string `locationName:"id" min:"1" type:"string"` +} + +// String returns the string representation +func (s ValidationError) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ValidationError) GoString() string { + return s.String() +} + +// Defines a validation warning. Validation warnings do not prevent pipeline +// activation. The set of validation warnings that can be returned are defined +// by AWS Data Pipeline. +type ValidationWarning struct { + _ struct{} `type:"structure"` + + // The identifier of the object that contains the validation warning. + Id *string `locationName:"id" min:"1" type:"string"` + + // A description of the validation warning. + Warnings []*string `locationName:"warnings" type:"list"` +} + +// String returns the string representation +func (s ValidationWarning) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ValidationWarning) GoString() string { + return s.String() +} + +const ( + // @enum OperatorType + OperatorTypeEq = "EQ" + // @enum OperatorType + OperatorTypeRefEq = "REF_EQ" + // @enum OperatorType + OperatorTypeLe = "LE" + // @enum OperatorType + OperatorTypeGe = "GE" + // @enum OperatorType + OperatorTypeBetween = "BETWEEN" +) + +const ( + // @enum TaskStatus + TaskStatusFinished = "FINISHED" + // @enum TaskStatus + TaskStatusFailed = "FAILED" + // @enum TaskStatus + TaskStatusFalse = "FALSE" +) diff --git a/vendor/github.com/aws/aws-sdk-go/service/datapipeline/datapipelineiface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/datapipeline/datapipelineiface/interface.go new file mode 100644 index 000000000..df64c8cd6 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/datapipeline/datapipelineiface/interface.go @@ -0,0 +1,96 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package datapipelineiface provides an interface for the AWS Data Pipeline. +package datapipelineiface + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/datapipeline" +) + +// DataPipelineAPI is the interface type for datapipeline.DataPipeline. +type DataPipelineAPI interface { + ActivatePipelineRequest(*datapipeline.ActivatePipelineInput) (*request.Request, *datapipeline.ActivatePipelineOutput) + + ActivatePipeline(*datapipeline.ActivatePipelineInput) (*datapipeline.ActivatePipelineOutput, error) + + AddTagsRequest(*datapipeline.AddTagsInput) (*request.Request, *datapipeline.AddTagsOutput) + + AddTags(*datapipeline.AddTagsInput) (*datapipeline.AddTagsOutput, error) + + CreatePipelineRequest(*datapipeline.CreatePipelineInput) (*request.Request, *datapipeline.CreatePipelineOutput) + + CreatePipeline(*datapipeline.CreatePipelineInput) (*datapipeline.CreatePipelineOutput, error) + + DeactivatePipelineRequest(*datapipeline.DeactivatePipelineInput) (*request.Request, *datapipeline.DeactivatePipelineOutput) + + DeactivatePipeline(*datapipeline.DeactivatePipelineInput) (*datapipeline.DeactivatePipelineOutput, error) + + DeletePipelineRequest(*datapipeline.DeletePipelineInput) (*request.Request, *datapipeline.DeletePipelineOutput) + + DeletePipeline(*datapipeline.DeletePipelineInput) (*datapipeline.DeletePipelineOutput, error) + + DescribeObjectsRequest(*datapipeline.DescribeObjectsInput) (*request.Request, *datapipeline.DescribeObjectsOutput) + + DescribeObjects(*datapipeline.DescribeObjectsInput) (*datapipeline.DescribeObjectsOutput, error) + + DescribeObjectsPages(*datapipeline.DescribeObjectsInput, func(*datapipeline.DescribeObjectsOutput, bool) bool) error + + DescribePipelinesRequest(*datapipeline.DescribePipelinesInput) (*request.Request, *datapipeline.DescribePipelinesOutput) + + DescribePipelines(*datapipeline.DescribePipelinesInput) (*datapipeline.DescribePipelinesOutput, error) + + EvaluateExpressionRequest(*datapipeline.EvaluateExpressionInput) (*request.Request, *datapipeline.EvaluateExpressionOutput) + + EvaluateExpression(*datapipeline.EvaluateExpressionInput) (*datapipeline.EvaluateExpressionOutput, error) + + GetPipelineDefinitionRequest(*datapipeline.GetPipelineDefinitionInput) (*request.Request, *datapipeline.GetPipelineDefinitionOutput) + + GetPipelineDefinition(*datapipeline.GetPipelineDefinitionInput) (*datapipeline.GetPipelineDefinitionOutput, error) + + ListPipelinesRequest(*datapipeline.ListPipelinesInput) (*request.Request, *datapipeline.ListPipelinesOutput) + + ListPipelines(*datapipeline.ListPipelinesInput) (*datapipeline.ListPipelinesOutput, error) + + ListPipelinesPages(*datapipeline.ListPipelinesInput, func(*datapipeline.ListPipelinesOutput, bool) bool) error + + PollForTaskRequest(*datapipeline.PollForTaskInput) (*request.Request, *datapipeline.PollForTaskOutput) + + PollForTask(*datapipeline.PollForTaskInput) (*datapipeline.PollForTaskOutput, error) + + PutPipelineDefinitionRequest(*datapipeline.PutPipelineDefinitionInput) (*request.Request, *datapipeline.PutPipelineDefinitionOutput) + + PutPipelineDefinition(*datapipeline.PutPipelineDefinitionInput) (*datapipeline.PutPipelineDefinitionOutput, error) + + QueryObjectsRequest(*datapipeline.QueryObjectsInput) (*request.Request, *datapipeline.QueryObjectsOutput) + + QueryObjects(*datapipeline.QueryObjectsInput) (*datapipeline.QueryObjectsOutput, error) + + QueryObjectsPages(*datapipeline.QueryObjectsInput, func(*datapipeline.QueryObjectsOutput, bool) bool) error + + RemoveTagsRequest(*datapipeline.RemoveTagsInput) (*request.Request, *datapipeline.RemoveTagsOutput) + + RemoveTags(*datapipeline.RemoveTagsInput) (*datapipeline.RemoveTagsOutput, error) + + ReportTaskProgressRequest(*datapipeline.ReportTaskProgressInput) (*request.Request, *datapipeline.ReportTaskProgressOutput) + + ReportTaskProgress(*datapipeline.ReportTaskProgressInput) (*datapipeline.ReportTaskProgressOutput, error) + + ReportTaskRunnerHeartbeatRequest(*datapipeline.ReportTaskRunnerHeartbeatInput) (*request.Request, *datapipeline.ReportTaskRunnerHeartbeatOutput) + + ReportTaskRunnerHeartbeat(*datapipeline.ReportTaskRunnerHeartbeatInput) (*datapipeline.ReportTaskRunnerHeartbeatOutput, error) + + SetStatusRequest(*datapipeline.SetStatusInput) (*request.Request, *datapipeline.SetStatusOutput) + + SetStatus(*datapipeline.SetStatusInput) (*datapipeline.SetStatusOutput, error) + + SetTaskStatusRequest(*datapipeline.SetTaskStatusInput) (*request.Request, *datapipeline.SetTaskStatusOutput) + + SetTaskStatus(*datapipeline.SetTaskStatusInput) (*datapipeline.SetTaskStatusOutput, error) + + ValidatePipelineDefinitionRequest(*datapipeline.ValidatePipelineDefinitionInput) (*request.Request, *datapipeline.ValidatePipelineDefinitionOutput) + + ValidatePipelineDefinition(*datapipeline.ValidatePipelineDefinitionInput) (*datapipeline.ValidatePipelineDefinitionOutput, error) +} + +var _ DataPipelineAPI = (*datapipeline.DataPipeline)(nil) diff --git a/vendor/github.com/aws/aws-sdk-go/service/datapipeline/examples_test.go b/vendor/github.com/aws/aws-sdk-go/service/datapipeline/examples_test.go new file mode 100644 index 000000000..1c2efcd89 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/datapipeline/examples_test.go @@ -0,0 +1,530 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package datapipeline_test + +import ( + "bytes" + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/datapipeline" +) + +var _ time.Duration +var _ bytes.Buffer + +func ExampleDataPipeline_ActivatePipeline() { + svc := datapipeline.New(session.New()) + + params := &datapipeline.ActivatePipelineInput{ + PipelineId: aws.String("id"), // Required + ParameterValues: []*datapipeline.ParameterValue{ + { // Required + Id: aws.String("fieldNameString"), // Required + StringValue: aws.String("fieldStringValue"), // Required + }, + // More values... + }, + StartTimestamp: aws.Time(time.Now()), + } + resp, err := svc.ActivatePipeline(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDataPipeline_AddTags() { + svc := datapipeline.New(session.New()) + + params := &datapipeline.AddTagsInput{ + PipelineId: aws.String("id"), // Required + Tags: []*datapipeline.Tag{ // Required + { // Required + Key: aws.String("tagKey"), // Required + Value: aws.String("tagValue"), // Required + }, + // More values... + }, + } + resp, err := svc.AddTags(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDataPipeline_CreatePipeline() { + svc := datapipeline.New(session.New()) + + params := &datapipeline.CreatePipelineInput{ + Name: aws.String("id"), // Required + UniqueId: aws.String("id"), // Required + Description: aws.String("string"), + Tags: []*datapipeline.Tag{ + { // Required + Key: aws.String("tagKey"), // Required + Value: aws.String("tagValue"), // Required + }, + // More values... + }, + } + resp, err := svc.CreatePipeline(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDataPipeline_DeactivatePipeline() { + svc := datapipeline.New(session.New()) + + params := &datapipeline.DeactivatePipelineInput{ + PipelineId: aws.String("id"), // Required + CancelActive: aws.Bool(true), + } + resp, err := svc.DeactivatePipeline(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDataPipeline_DeletePipeline() { + svc := datapipeline.New(session.New()) + + params := &datapipeline.DeletePipelineInput{ + PipelineId: aws.String("id"), // Required + } + resp, err := svc.DeletePipeline(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDataPipeline_DescribeObjects() { + svc := datapipeline.New(session.New()) + + params := &datapipeline.DescribeObjectsInput{ + ObjectIds: []*string{ // Required + aws.String("id"), // Required + // More values... + }, + PipelineId: aws.String("id"), // Required + EvaluateExpressions: aws.Bool(true), + Marker: aws.String("string"), + } + resp, err := svc.DescribeObjects(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDataPipeline_DescribePipelines() { + svc := datapipeline.New(session.New()) + + params := &datapipeline.DescribePipelinesInput{ + PipelineIds: []*string{ // Required + aws.String("id"), // Required + // More values... + }, + } + resp, err := svc.DescribePipelines(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDataPipeline_EvaluateExpression() { + svc := datapipeline.New(session.New()) + + params := &datapipeline.EvaluateExpressionInput{ + Expression: aws.String("longString"), // Required + ObjectId: aws.String("id"), // Required + PipelineId: aws.String("id"), // Required + } + resp, err := svc.EvaluateExpression(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDataPipeline_GetPipelineDefinition() { + svc := datapipeline.New(session.New()) + + params := &datapipeline.GetPipelineDefinitionInput{ + PipelineId: aws.String("id"), // Required + Version: aws.String("string"), + } + resp, err := svc.GetPipelineDefinition(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDataPipeline_ListPipelines() { + svc := datapipeline.New(session.New()) + + params := &datapipeline.ListPipelinesInput{ + Marker: aws.String("string"), + } + resp, err := svc.ListPipelines(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDataPipeline_PollForTask() { + svc := datapipeline.New(session.New()) + + params := &datapipeline.PollForTaskInput{ + WorkerGroup: aws.String("string"), // Required + Hostname: aws.String("id"), + InstanceIdentity: &datapipeline.InstanceIdentity{ + Document: aws.String("string"), + Signature: aws.String("string"), + }, + } + resp, err := svc.PollForTask(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDataPipeline_PutPipelineDefinition() { + svc := datapipeline.New(session.New()) + + params := &datapipeline.PutPipelineDefinitionInput{ + PipelineId: aws.String("id"), // Required + PipelineObjects: []*datapipeline.PipelineObject{ // Required + { // Required + Fields: []*datapipeline.Field{ // Required + { // Required + Key: aws.String("fieldNameString"), // Required + RefValue: aws.String("fieldNameString"), + StringValue: aws.String("fieldStringValue"), + }, + // More values... + }, + Id: aws.String("id"), // Required + Name: aws.String("id"), // Required + }, + // More values... + }, + ParameterObjects: []*datapipeline.ParameterObject{ + { // Required + Attributes: []*datapipeline.ParameterAttribute{ // Required + { // Required + Key: aws.String("attributeNameString"), // Required + StringValue: aws.String("attributeValueString"), // Required + }, + // More values... + }, + Id: aws.String("fieldNameString"), // Required + }, + // More values... + }, + ParameterValues: []*datapipeline.ParameterValue{ + { // Required + Id: aws.String("fieldNameString"), // Required + StringValue: aws.String("fieldStringValue"), // Required + }, + // More values... + }, + } + resp, err := svc.PutPipelineDefinition(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDataPipeline_QueryObjects() { + svc := datapipeline.New(session.New()) + + params := &datapipeline.QueryObjectsInput{ + PipelineId: aws.String("id"), // Required + Sphere: aws.String("string"), // Required + Limit: aws.Int64(1), + Marker: aws.String("string"), + Query: &datapipeline.Query{ + Selectors: []*datapipeline.Selector{ + { // Required + FieldName: aws.String("string"), + Operator: &datapipeline.Operator{ + Type: aws.String("OperatorType"), + Values: []*string{ + aws.String("string"), // Required + // More values... + }, + }, + }, + // More values... + }, + }, + } + resp, err := svc.QueryObjects(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDataPipeline_RemoveTags() { + svc := datapipeline.New(session.New()) + + params := &datapipeline.RemoveTagsInput{ + PipelineId: aws.String("id"), // Required + TagKeys: []*string{ // Required + aws.String("string"), // Required + // More values... + }, + } + resp, err := svc.RemoveTags(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDataPipeline_ReportTaskProgress() { + svc := datapipeline.New(session.New()) + + params := &datapipeline.ReportTaskProgressInput{ + TaskId: aws.String("taskId"), // Required + Fields: []*datapipeline.Field{ + { // Required + Key: aws.String("fieldNameString"), // Required + RefValue: aws.String("fieldNameString"), + StringValue: aws.String("fieldStringValue"), + }, + // More values... + }, + } + resp, err := svc.ReportTaskProgress(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDataPipeline_ReportTaskRunnerHeartbeat() { + svc := datapipeline.New(session.New()) + + params := &datapipeline.ReportTaskRunnerHeartbeatInput{ + TaskrunnerId: aws.String("id"), // Required + Hostname: aws.String("id"), + WorkerGroup: aws.String("string"), + } + resp, err := svc.ReportTaskRunnerHeartbeat(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDataPipeline_SetStatus() { + svc := datapipeline.New(session.New()) + + params := &datapipeline.SetStatusInput{ + ObjectIds: []*string{ // Required + aws.String("id"), // Required + // More values... + }, + PipelineId: aws.String("id"), // Required + Status: aws.String("string"), // Required + } + resp, err := svc.SetStatus(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDataPipeline_SetTaskStatus() { + svc := datapipeline.New(session.New()) + + params := &datapipeline.SetTaskStatusInput{ + TaskId: aws.String("taskId"), // Required + TaskStatus: aws.String("TaskStatus"), // Required + ErrorId: aws.String("string"), + ErrorMessage: aws.String("errorMessage"), + ErrorStackTrace: aws.String("string"), + } + resp, err := svc.SetTaskStatus(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDataPipeline_ValidatePipelineDefinition() { + svc := datapipeline.New(session.New()) + + params := &datapipeline.ValidatePipelineDefinitionInput{ + PipelineId: aws.String("id"), // Required + PipelineObjects: []*datapipeline.PipelineObject{ // Required + { // Required + Fields: []*datapipeline.Field{ // Required + { // Required + Key: aws.String("fieldNameString"), // Required + RefValue: aws.String("fieldNameString"), + StringValue: aws.String("fieldStringValue"), + }, + // More values... + }, + Id: aws.String("id"), // Required + Name: aws.String("id"), // Required + }, + // More values... + }, + ParameterObjects: []*datapipeline.ParameterObject{ + { // Required + Attributes: []*datapipeline.ParameterAttribute{ // Required + { // Required + Key: aws.String("attributeNameString"), // Required + StringValue: aws.String("attributeValueString"), // Required + }, + // More values... + }, + Id: aws.String("fieldNameString"), // Required + }, + // More values... + }, + ParameterValues: []*datapipeline.ParameterValue{ + { // Required + Id: aws.String("fieldNameString"), // Required + StringValue: aws.String("fieldStringValue"), // Required + }, + // More values... + }, + } + resp, err := svc.ValidatePipelineDefinition(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/datapipeline/service.go b/vendor/github.com/aws/aws-sdk-go/service/datapipeline/service.go new file mode 100644 index 000000000..b993e569f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/datapipeline/service.go @@ -0,0 +1,109 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package datapipeline + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" +) + +// AWS Data Pipeline configures and manages a data-driven workflow called a +// pipeline. AWS Data Pipeline handles the details of scheduling and ensuring +// that data dependencies are met so that your application can focus on processing +// the data. +// +// AWS Data Pipeline provides a JAR implementation of a task runner called +// AWS Data Pipeline Task Runner. AWS Data Pipeline Task Runner provides logic +// for common data management scenarios, such as performing database queries +// and running data analysis using Amazon Elastic MapReduce (Amazon EMR). You +// can use AWS Data Pipeline Task Runner as your task runner, or you can write +// your own task runner to provide custom data management. +// +// AWS Data Pipeline implements two main sets of functionality. Use the first +// set to create a pipeline and define data sources, schedules, dependencies, +// and the transforms to be performed on the data. Use the second set in your +// task runner application to receive the next task ready for processing. The +// logic for performing the task, such as querying the data, running data analysis, +// or converting the data from one format to another, is contained within the +// task runner. The task runner performs the task assigned to it by the web +// service, reporting progress to the web service as it does so. When the task +// is done, the task runner reports the final success or failure of the task +// to the web service. +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type DataPipeline struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "datapipeline" + +// New creates a new instance of the DataPipeline client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a DataPipeline client from just a session. +// svc := datapipeline.New(mySession) +// +// // Create a DataPipeline client with additional configuration +// svc := datapipeline.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *DataPipeline { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *DataPipeline { + svc := &DataPipeline{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2012-10-29", + JSONVersion: "1.1", + TargetPrefix: "DataPipeline", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(jsonrpc.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a DataPipeline operation and runs any +// custom request initialization. +func (c *DataPipeline) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/devicefarm/api.go b/vendor/github.com/aws/aws-sdk-go/service/devicefarm/api.go new file mode 100644 index 000000000..c7ed71ecc --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/devicefarm/api.go @@ -0,0 +1,6288 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package devicefarm provides a client for AWS Device Farm. +package devicefarm + +import ( + "time" + + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" +) + +const opCreateDevicePool = "CreateDevicePool" + +// CreateDevicePoolRequest generates a "aws/request.Request" representing the +// client's request for the CreateDevicePool operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateDevicePool method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateDevicePoolRequest method. +// req, resp := client.CreateDevicePoolRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *DeviceFarm) CreateDevicePoolRequest(input *CreateDevicePoolInput) (req *request.Request, output *CreateDevicePoolOutput) { + op := &request.Operation{ + Name: opCreateDevicePool, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateDevicePoolInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateDevicePoolOutput{} + req.Data = output + return +} + +// Creates a device pool. +func (c *DeviceFarm) CreateDevicePool(input *CreateDevicePoolInput) (*CreateDevicePoolOutput, error) { + req, out := c.CreateDevicePoolRequest(input) + err := req.Send() + return out, err +} + +const opCreateProject = "CreateProject" + +// CreateProjectRequest generates a "aws/request.Request" representing the +// client's request for the CreateProject operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateProject method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateProjectRequest method. +// req, resp := client.CreateProjectRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *DeviceFarm) CreateProjectRequest(input *CreateProjectInput) (req *request.Request, output *CreateProjectOutput) { + op := &request.Operation{ + Name: opCreateProject, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateProjectInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateProjectOutput{} + req.Data = output + return +} + +// Creates a new project. +func (c *DeviceFarm) CreateProject(input *CreateProjectInput) (*CreateProjectOutput, error) { + req, out := c.CreateProjectRequest(input) + err := req.Send() + return out, err +} + +const opCreateUpload = "CreateUpload" + +// CreateUploadRequest generates a "aws/request.Request" representing the +// client's request for the CreateUpload operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateUpload method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateUploadRequest method. +// req, resp := client.CreateUploadRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *DeviceFarm) CreateUploadRequest(input *CreateUploadInput) (req *request.Request, output *CreateUploadOutput) { + op := &request.Operation{ + Name: opCreateUpload, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateUploadInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateUploadOutput{} + req.Data = output + return +} + +// Uploads an app or test scripts. +func (c *DeviceFarm) CreateUpload(input *CreateUploadInput) (*CreateUploadOutput, error) { + req, out := c.CreateUploadRequest(input) + err := req.Send() + return out, err +} + +const opDeleteDevicePool = "DeleteDevicePool" + +// DeleteDevicePoolRequest generates a "aws/request.Request" representing the +// client's request for the DeleteDevicePool operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteDevicePool method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteDevicePoolRequest method. +// req, resp := client.DeleteDevicePoolRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *DeviceFarm) DeleteDevicePoolRequest(input *DeleteDevicePoolInput) (req *request.Request, output *DeleteDevicePoolOutput) { + op := &request.Operation{ + Name: opDeleteDevicePool, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteDevicePoolInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteDevicePoolOutput{} + req.Data = output + return +} + +// Deletes a device pool given the pool ARN. Does not allow deletion of curated +// pools owned by the system. +func (c *DeviceFarm) DeleteDevicePool(input *DeleteDevicePoolInput) (*DeleteDevicePoolOutput, error) { + req, out := c.DeleteDevicePoolRequest(input) + err := req.Send() + return out, err +} + +const opDeleteProject = "DeleteProject" + +// DeleteProjectRequest generates a "aws/request.Request" representing the +// client's request for the DeleteProject operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteProject method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteProjectRequest method. +// req, resp := client.DeleteProjectRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *DeviceFarm) DeleteProjectRequest(input *DeleteProjectInput) (req *request.Request, output *DeleteProjectOutput) { + op := &request.Operation{ + Name: opDeleteProject, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteProjectInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteProjectOutput{} + req.Data = output + return +} + +// Deletes an AWS Device Farm project, given the project ARN. +// +// Note Deleting this resource does not stop an in-progress run. +func (c *DeviceFarm) DeleteProject(input *DeleteProjectInput) (*DeleteProjectOutput, error) { + req, out := c.DeleteProjectRequest(input) + err := req.Send() + return out, err +} + +const opDeleteRun = "DeleteRun" + +// DeleteRunRequest generates a "aws/request.Request" representing the +// client's request for the DeleteRun operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteRun method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteRunRequest method. +// req, resp := client.DeleteRunRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *DeviceFarm) DeleteRunRequest(input *DeleteRunInput) (req *request.Request, output *DeleteRunOutput) { + op := &request.Operation{ + Name: opDeleteRun, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteRunInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteRunOutput{} + req.Data = output + return +} + +// Deletes the run, given the run ARN. +// +// Note Deleting this resource does not stop an in-progress run. +func (c *DeviceFarm) DeleteRun(input *DeleteRunInput) (*DeleteRunOutput, error) { + req, out := c.DeleteRunRequest(input) + err := req.Send() + return out, err +} + +const opDeleteUpload = "DeleteUpload" + +// DeleteUploadRequest generates a "aws/request.Request" representing the +// client's request for the DeleteUpload operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteUpload method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteUploadRequest method. +// req, resp := client.DeleteUploadRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *DeviceFarm) DeleteUploadRequest(input *DeleteUploadInput) (req *request.Request, output *DeleteUploadOutput) { + op := &request.Operation{ + Name: opDeleteUpload, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteUploadInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteUploadOutput{} + req.Data = output + return +} + +// Deletes an upload given the upload ARN. +func (c *DeviceFarm) DeleteUpload(input *DeleteUploadInput) (*DeleteUploadOutput, error) { + req, out := c.DeleteUploadRequest(input) + err := req.Send() + return out, err +} + +const opGetAccountSettings = "GetAccountSettings" + +// GetAccountSettingsRequest generates a "aws/request.Request" representing the +// client's request for the GetAccountSettings operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetAccountSettings method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetAccountSettingsRequest method. +// req, resp := client.GetAccountSettingsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *DeviceFarm) GetAccountSettingsRequest(input *GetAccountSettingsInput) (req *request.Request, output *GetAccountSettingsOutput) { + op := &request.Operation{ + Name: opGetAccountSettings, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetAccountSettingsInput{} + } + + req = c.newRequest(op, input, output) + output = &GetAccountSettingsOutput{} + req.Data = output + return +} + +// Returns the number of unmetered iOS and/or unmetered Android devices that +// have been purchased by the account. +func (c *DeviceFarm) GetAccountSettings(input *GetAccountSettingsInput) (*GetAccountSettingsOutput, error) { + req, out := c.GetAccountSettingsRequest(input) + err := req.Send() + return out, err +} + +const opGetDevice = "GetDevice" + +// GetDeviceRequest generates a "aws/request.Request" representing the +// client's request for the GetDevice operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetDevice method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetDeviceRequest method. +// req, resp := client.GetDeviceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *DeviceFarm) GetDeviceRequest(input *GetDeviceInput) (req *request.Request, output *GetDeviceOutput) { + op := &request.Operation{ + Name: opGetDevice, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetDeviceInput{} + } + + req = c.newRequest(op, input, output) + output = &GetDeviceOutput{} + req.Data = output + return +} + +// Gets information about a unique device type. +func (c *DeviceFarm) GetDevice(input *GetDeviceInput) (*GetDeviceOutput, error) { + req, out := c.GetDeviceRequest(input) + err := req.Send() + return out, err +} + +const opGetDevicePool = "GetDevicePool" + +// GetDevicePoolRequest generates a "aws/request.Request" representing the +// client's request for the GetDevicePool operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetDevicePool method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetDevicePoolRequest method. +// req, resp := client.GetDevicePoolRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *DeviceFarm) GetDevicePoolRequest(input *GetDevicePoolInput) (req *request.Request, output *GetDevicePoolOutput) { + op := &request.Operation{ + Name: opGetDevicePool, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetDevicePoolInput{} + } + + req = c.newRequest(op, input, output) + output = &GetDevicePoolOutput{} + req.Data = output + return +} + +// Gets information about a device pool. +func (c *DeviceFarm) GetDevicePool(input *GetDevicePoolInput) (*GetDevicePoolOutput, error) { + req, out := c.GetDevicePoolRequest(input) + err := req.Send() + return out, err +} + +const opGetDevicePoolCompatibility = "GetDevicePoolCompatibility" + +// GetDevicePoolCompatibilityRequest generates a "aws/request.Request" representing the +// client's request for the GetDevicePoolCompatibility operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetDevicePoolCompatibility method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetDevicePoolCompatibilityRequest method. +// req, resp := client.GetDevicePoolCompatibilityRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *DeviceFarm) GetDevicePoolCompatibilityRequest(input *GetDevicePoolCompatibilityInput) (req *request.Request, output *GetDevicePoolCompatibilityOutput) { + op := &request.Operation{ + Name: opGetDevicePoolCompatibility, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetDevicePoolCompatibilityInput{} + } + + req = c.newRequest(op, input, output) + output = &GetDevicePoolCompatibilityOutput{} + req.Data = output + return +} + +// Gets information about compatibility with a device pool. +func (c *DeviceFarm) GetDevicePoolCompatibility(input *GetDevicePoolCompatibilityInput) (*GetDevicePoolCompatibilityOutput, error) { + req, out := c.GetDevicePoolCompatibilityRequest(input) + err := req.Send() + return out, err +} + +const opGetJob = "GetJob" + +// GetJobRequest generates a "aws/request.Request" representing the +// client's request for the GetJob operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetJob method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetJobRequest method. +// req, resp := client.GetJobRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *DeviceFarm) GetJobRequest(input *GetJobInput) (req *request.Request, output *GetJobOutput) { + op := &request.Operation{ + Name: opGetJob, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetJobInput{} + } + + req = c.newRequest(op, input, output) + output = &GetJobOutput{} + req.Data = output + return +} + +// Gets information about a job. +func (c *DeviceFarm) GetJob(input *GetJobInput) (*GetJobOutput, error) { + req, out := c.GetJobRequest(input) + err := req.Send() + return out, err +} + +const opGetOfferingStatus = "GetOfferingStatus" + +// GetOfferingStatusRequest generates a "aws/request.Request" representing the +// client's request for the GetOfferingStatus operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetOfferingStatus method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetOfferingStatusRequest method. +// req, resp := client.GetOfferingStatusRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *DeviceFarm) GetOfferingStatusRequest(input *GetOfferingStatusInput) (req *request.Request, output *GetOfferingStatusOutput) { + op := &request.Operation{ + Name: opGetOfferingStatus, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "", + TruncationToken: "", + }, + } + + if input == nil { + input = &GetOfferingStatusInput{} + } + + req = c.newRequest(op, input, output) + output = &GetOfferingStatusOutput{} + req.Data = output + return +} + +// Gets the current status and future status of all offerings purchased by an +// AWS account. The response indicates how many offerings are currently available +// and the offerings that will be available in the next period. The API returns +// a NotEligible error if the user is not permitted to invoke the operation. +// Please contact aws-devicefarm-support@amazon.com (mailto:aws-devicefarm-support@amazon.com) +// if you believe that you should be able to invoke this operation. +func (c *DeviceFarm) GetOfferingStatus(input *GetOfferingStatusInput) (*GetOfferingStatusOutput, error) { + req, out := c.GetOfferingStatusRequest(input) + err := req.Send() + return out, err +} + +// GetOfferingStatusPages iterates over the pages of a GetOfferingStatus operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See GetOfferingStatus method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a GetOfferingStatus operation. +// pageNum := 0 +// err := client.GetOfferingStatusPages(params, +// func(page *GetOfferingStatusOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *DeviceFarm) GetOfferingStatusPages(input *GetOfferingStatusInput, fn func(p *GetOfferingStatusOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.GetOfferingStatusRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*GetOfferingStatusOutput), lastPage) + }) +} + +const opGetProject = "GetProject" + +// GetProjectRequest generates a "aws/request.Request" representing the +// client's request for the GetProject operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetProject method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetProjectRequest method. +// req, resp := client.GetProjectRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *DeviceFarm) GetProjectRequest(input *GetProjectInput) (req *request.Request, output *GetProjectOutput) { + op := &request.Operation{ + Name: opGetProject, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetProjectInput{} + } + + req = c.newRequest(op, input, output) + output = &GetProjectOutput{} + req.Data = output + return +} + +// Gets information about a project. +func (c *DeviceFarm) GetProject(input *GetProjectInput) (*GetProjectOutput, error) { + req, out := c.GetProjectRequest(input) + err := req.Send() + return out, err +} + +const opGetRun = "GetRun" + +// GetRunRequest generates a "aws/request.Request" representing the +// client's request for the GetRun operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetRun method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetRunRequest method. +// req, resp := client.GetRunRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *DeviceFarm) GetRunRequest(input *GetRunInput) (req *request.Request, output *GetRunOutput) { + op := &request.Operation{ + Name: opGetRun, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetRunInput{} + } + + req = c.newRequest(op, input, output) + output = &GetRunOutput{} + req.Data = output + return +} + +// Gets information about a run. +func (c *DeviceFarm) GetRun(input *GetRunInput) (*GetRunOutput, error) { + req, out := c.GetRunRequest(input) + err := req.Send() + return out, err +} + +const opGetSuite = "GetSuite" + +// GetSuiteRequest generates a "aws/request.Request" representing the +// client's request for the GetSuite operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetSuite method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetSuiteRequest method. +// req, resp := client.GetSuiteRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *DeviceFarm) GetSuiteRequest(input *GetSuiteInput) (req *request.Request, output *GetSuiteOutput) { + op := &request.Operation{ + Name: opGetSuite, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetSuiteInput{} + } + + req = c.newRequest(op, input, output) + output = &GetSuiteOutput{} + req.Data = output + return +} + +// Gets information about a suite. +func (c *DeviceFarm) GetSuite(input *GetSuiteInput) (*GetSuiteOutput, error) { + req, out := c.GetSuiteRequest(input) + err := req.Send() + return out, err +} + +const opGetTest = "GetTest" + +// GetTestRequest generates a "aws/request.Request" representing the +// client's request for the GetTest operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetTest method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetTestRequest method. +// req, resp := client.GetTestRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *DeviceFarm) GetTestRequest(input *GetTestInput) (req *request.Request, output *GetTestOutput) { + op := &request.Operation{ + Name: opGetTest, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetTestInput{} + } + + req = c.newRequest(op, input, output) + output = &GetTestOutput{} + req.Data = output + return +} + +// Gets information about a test. +func (c *DeviceFarm) GetTest(input *GetTestInput) (*GetTestOutput, error) { + req, out := c.GetTestRequest(input) + err := req.Send() + return out, err +} + +const opGetUpload = "GetUpload" + +// GetUploadRequest generates a "aws/request.Request" representing the +// client's request for the GetUpload operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetUpload method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetUploadRequest method. +// req, resp := client.GetUploadRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *DeviceFarm) GetUploadRequest(input *GetUploadInput) (req *request.Request, output *GetUploadOutput) { + op := &request.Operation{ + Name: opGetUpload, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetUploadInput{} + } + + req = c.newRequest(op, input, output) + output = &GetUploadOutput{} + req.Data = output + return +} + +// Gets information about an upload. +func (c *DeviceFarm) GetUpload(input *GetUploadInput) (*GetUploadOutput, error) { + req, out := c.GetUploadRequest(input) + err := req.Send() + return out, err +} + +const opListArtifacts = "ListArtifacts" + +// ListArtifactsRequest generates a "aws/request.Request" representing the +// client's request for the ListArtifacts operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListArtifacts method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListArtifactsRequest method. +// req, resp := client.ListArtifactsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *DeviceFarm) ListArtifactsRequest(input *ListArtifactsInput) (req *request.Request, output *ListArtifactsOutput) { + op := &request.Operation{ + Name: opListArtifacts, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListArtifactsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListArtifactsOutput{} + req.Data = output + return +} + +// Gets information about artifacts. +func (c *DeviceFarm) ListArtifacts(input *ListArtifactsInput) (*ListArtifactsOutput, error) { + req, out := c.ListArtifactsRequest(input) + err := req.Send() + return out, err +} + +// ListArtifactsPages iterates over the pages of a ListArtifacts operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListArtifacts method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListArtifacts operation. +// pageNum := 0 +// err := client.ListArtifactsPages(params, +// func(page *ListArtifactsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *DeviceFarm) ListArtifactsPages(input *ListArtifactsInput, fn func(p *ListArtifactsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListArtifactsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListArtifactsOutput), lastPage) + }) +} + +const opListDevicePools = "ListDevicePools" + +// ListDevicePoolsRequest generates a "aws/request.Request" representing the +// client's request for the ListDevicePools operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListDevicePools method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListDevicePoolsRequest method. +// req, resp := client.ListDevicePoolsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *DeviceFarm) ListDevicePoolsRequest(input *ListDevicePoolsInput) (req *request.Request, output *ListDevicePoolsOutput) { + op := &request.Operation{ + Name: opListDevicePools, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListDevicePoolsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListDevicePoolsOutput{} + req.Data = output + return +} + +// Gets information about device pools. +func (c *DeviceFarm) ListDevicePools(input *ListDevicePoolsInput) (*ListDevicePoolsOutput, error) { + req, out := c.ListDevicePoolsRequest(input) + err := req.Send() + return out, err +} + +// ListDevicePoolsPages iterates over the pages of a ListDevicePools operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListDevicePools method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListDevicePools operation. +// pageNum := 0 +// err := client.ListDevicePoolsPages(params, +// func(page *ListDevicePoolsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *DeviceFarm) ListDevicePoolsPages(input *ListDevicePoolsInput, fn func(p *ListDevicePoolsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListDevicePoolsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListDevicePoolsOutput), lastPage) + }) +} + +const opListDevices = "ListDevices" + +// ListDevicesRequest generates a "aws/request.Request" representing the +// client's request for the ListDevices operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListDevices method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListDevicesRequest method. +// req, resp := client.ListDevicesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *DeviceFarm) ListDevicesRequest(input *ListDevicesInput) (req *request.Request, output *ListDevicesOutput) { + op := &request.Operation{ + Name: opListDevices, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListDevicesInput{} + } + + req = c.newRequest(op, input, output) + output = &ListDevicesOutput{} + req.Data = output + return +} + +// Gets information about unique device types. +func (c *DeviceFarm) ListDevices(input *ListDevicesInput) (*ListDevicesOutput, error) { + req, out := c.ListDevicesRequest(input) + err := req.Send() + return out, err +} + +// ListDevicesPages iterates over the pages of a ListDevices operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListDevices method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListDevices operation. +// pageNum := 0 +// err := client.ListDevicesPages(params, +// func(page *ListDevicesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *DeviceFarm) ListDevicesPages(input *ListDevicesInput, fn func(p *ListDevicesOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListDevicesRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListDevicesOutput), lastPage) + }) +} + +const opListJobs = "ListJobs" + +// ListJobsRequest generates a "aws/request.Request" representing the +// client's request for the ListJobs operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListJobs method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListJobsRequest method. +// req, resp := client.ListJobsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *DeviceFarm) ListJobsRequest(input *ListJobsInput) (req *request.Request, output *ListJobsOutput) { + op := &request.Operation{ + Name: opListJobs, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListJobsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListJobsOutput{} + req.Data = output + return +} + +// Gets information about jobs. +func (c *DeviceFarm) ListJobs(input *ListJobsInput) (*ListJobsOutput, error) { + req, out := c.ListJobsRequest(input) + err := req.Send() + return out, err +} + +// ListJobsPages iterates over the pages of a ListJobs operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListJobs method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListJobs operation. +// pageNum := 0 +// err := client.ListJobsPages(params, +// func(page *ListJobsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *DeviceFarm) ListJobsPages(input *ListJobsInput, fn func(p *ListJobsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListJobsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListJobsOutput), lastPage) + }) +} + +const opListOfferingTransactions = "ListOfferingTransactions" + +// ListOfferingTransactionsRequest generates a "aws/request.Request" representing the +// client's request for the ListOfferingTransactions operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListOfferingTransactions method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListOfferingTransactionsRequest method. +// req, resp := client.ListOfferingTransactionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *DeviceFarm) ListOfferingTransactionsRequest(input *ListOfferingTransactionsInput) (req *request.Request, output *ListOfferingTransactionsOutput) { + op := &request.Operation{ + Name: opListOfferingTransactions, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListOfferingTransactionsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListOfferingTransactionsOutput{} + req.Data = output + return +} + +// Returns a list of all historical purchases, renewals, and system renewal +// transactions for an AWS account. The list is paginated and ordered by a descending +// timestamp (most recent transactions are first). The API returns a NotEligible +// error if the user is not permitted to invoke the operation. Please contact +// aws-devicefarm-support@amazon.com (mailto:aws-devicefarm-support@amazon.com) +// if you believe that you should be able to invoke this operation. +func (c *DeviceFarm) ListOfferingTransactions(input *ListOfferingTransactionsInput) (*ListOfferingTransactionsOutput, error) { + req, out := c.ListOfferingTransactionsRequest(input) + err := req.Send() + return out, err +} + +// ListOfferingTransactionsPages iterates over the pages of a ListOfferingTransactions operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListOfferingTransactions method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListOfferingTransactions operation. +// pageNum := 0 +// err := client.ListOfferingTransactionsPages(params, +// func(page *ListOfferingTransactionsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *DeviceFarm) ListOfferingTransactionsPages(input *ListOfferingTransactionsInput, fn func(p *ListOfferingTransactionsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListOfferingTransactionsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListOfferingTransactionsOutput), lastPage) + }) +} + +const opListOfferings = "ListOfferings" + +// ListOfferingsRequest generates a "aws/request.Request" representing the +// client's request for the ListOfferings operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListOfferings method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListOfferingsRequest method. +// req, resp := client.ListOfferingsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *DeviceFarm) ListOfferingsRequest(input *ListOfferingsInput) (req *request.Request, output *ListOfferingsOutput) { + op := &request.Operation{ + Name: opListOfferings, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListOfferingsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListOfferingsOutput{} + req.Data = output + return +} + +// Returns a list of products or offerings that the user can manage through +// the API. Each offering record indicates the recurring price per unit and +// the frequency for that offering. The API returns a NotEligible error if the +// user is not permitted to invoke the operation. Please contact aws-devicefarm-support@amazon.com +// (mailto:aws-devicefarm-support@amazon.com) if you believe that you should +// be able to invoke this operation. +func (c *DeviceFarm) ListOfferings(input *ListOfferingsInput) (*ListOfferingsOutput, error) { + req, out := c.ListOfferingsRequest(input) + err := req.Send() + return out, err +} + +// ListOfferingsPages iterates over the pages of a ListOfferings operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListOfferings method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListOfferings operation. +// pageNum := 0 +// err := client.ListOfferingsPages(params, +// func(page *ListOfferingsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *DeviceFarm) ListOfferingsPages(input *ListOfferingsInput, fn func(p *ListOfferingsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListOfferingsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListOfferingsOutput), lastPage) + }) +} + +const opListProjects = "ListProjects" + +// ListProjectsRequest generates a "aws/request.Request" representing the +// client's request for the ListProjects operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListProjects method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListProjectsRequest method. +// req, resp := client.ListProjectsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *DeviceFarm) ListProjectsRequest(input *ListProjectsInput) (req *request.Request, output *ListProjectsOutput) { + op := &request.Operation{ + Name: opListProjects, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListProjectsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListProjectsOutput{} + req.Data = output + return +} + +// Gets information about projects. +func (c *DeviceFarm) ListProjects(input *ListProjectsInput) (*ListProjectsOutput, error) { + req, out := c.ListProjectsRequest(input) + err := req.Send() + return out, err +} + +// ListProjectsPages iterates over the pages of a ListProjects operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListProjects method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListProjects operation. +// pageNum := 0 +// err := client.ListProjectsPages(params, +// func(page *ListProjectsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *DeviceFarm) ListProjectsPages(input *ListProjectsInput, fn func(p *ListProjectsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListProjectsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListProjectsOutput), lastPage) + }) +} + +const opListRuns = "ListRuns" + +// ListRunsRequest generates a "aws/request.Request" representing the +// client's request for the ListRuns operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListRuns method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListRunsRequest method. +// req, resp := client.ListRunsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *DeviceFarm) ListRunsRequest(input *ListRunsInput) (req *request.Request, output *ListRunsOutput) { + op := &request.Operation{ + Name: opListRuns, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListRunsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListRunsOutput{} + req.Data = output + return +} + +// Gets information about runs. +func (c *DeviceFarm) ListRuns(input *ListRunsInput) (*ListRunsOutput, error) { + req, out := c.ListRunsRequest(input) + err := req.Send() + return out, err +} + +// ListRunsPages iterates over the pages of a ListRuns operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListRuns method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListRuns operation. +// pageNum := 0 +// err := client.ListRunsPages(params, +// func(page *ListRunsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *DeviceFarm) ListRunsPages(input *ListRunsInput, fn func(p *ListRunsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListRunsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListRunsOutput), lastPage) + }) +} + +const opListSamples = "ListSamples" + +// ListSamplesRequest generates a "aws/request.Request" representing the +// client's request for the ListSamples operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListSamples method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListSamplesRequest method. +// req, resp := client.ListSamplesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *DeviceFarm) ListSamplesRequest(input *ListSamplesInput) (req *request.Request, output *ListSamplesOutput) { + op := &request.Operation{ + Name: opListSamples, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListSamplesInput{} + } + + req = c.newRequest(op, input, output) + output = &ListSamplesOutput{} + req.Data = output + return +} + +// Gets information about samples. +func (c *DeviceFarm) ListSamples(input *ListSamplesInput) (*ListSamplesOutput, error) { + req, out := c.ListSamplesRequest(input) + err := req.Send() + return out, err +} + +// ListSamplesPages iterates over the pages of a ListSamples operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListSamples method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListSamples operation. +// pageNum := 0 +// err := client.ListSamplesPages(params, +// func(page *ListSamplesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *DeviceFarm) ListSamplesPages(input *ListSamplesInput, fn func(p *ListSamplesOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListSamplesRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListSamplesOutput), lastPage) + }) +} + +const opListSuites = "ListSuites" + +// ListSuitesRequest generates a "aws/request.Request" representing the +// client's request for the ListSuites operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListSuites method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListSuitesRequest method. +// req, resp := client.ListSuitesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *DeviceFarm) ListSuitesRequest(input *ListSuitesInput) (req *request.Request, output *ListSuitesOutput) { + op := &request.Operation{ + Name: opListSuites, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListSuitesInput{} + } + + req = c.newRequest(op, input, output) + output = &ListSuitesOutput{} + req.Data = output + return +} + +// Gets information about suites. +func (c *DeviceFarm) ListSuites(input *ListSuitesInput) (*ListSuitesOutput, error) { + req, out := c.ListSuitesRequest(input) + err := req.Send() + return out, err +} + +// ListSuitesPages iterates over the pages of a ListSuites operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListSuites method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListSuites operation. +// pageNum := 0 +// err := client.ListSuitesPages(params, +// func(page *ListSuitesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *DeviceFarm) ListSuitesPages(input *ListSuitesInput, fn func(p *ListSuitesOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListSuitesRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListSuitesOutput), lastPage) + }) +} + +const opListTests = "ListTests" + +// ListTestsRequest generates a "aws/request.Request" representing the +// client's request for the ListTests operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListTests method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListTestsRequest method. +// req, resp := client.ListTestsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *DeviceFarm) ListTestsRequest(input *ListTestsInput) (req *request.Request, output *ListTestsOutput) { + op := &request.Operation{ + Name: opListTests, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListTestsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListTestsOutput{} + req.Data = output + return +} + +// Gets information about tests. +func (c *DeviceFarm) ListTests(input *ListTestsInput) (*ListTestsOutput, error) { + req, out := c.ListTestsRequest(input) + err := req.Send() + return out, err +} + +// ListTestsPages iterates over the pages of a ListTests operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListTests method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListTests operation. +// pageNum := 0 +// err := client.ListTestsPages(params, +// func(page *ListTestsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *DeviceFarm) ListTestsPages(input *ListTestsInput, fn func(p *ListTestsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListTestsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListTestsOutput), lastPage) + }) +} + +const opListUniqueProblems = "ListUniqueProblems" + +// ListUniqueProblemsRequest generates a "aws/request.Request" representing the +// client's request for the ListUniqueProblems operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListUniqueProblems method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListUniqueProblemsRequest method. +// req, resp := client.ListUniqueProblemsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *DeviceFarm) ListUniqueProblemsRequest(input *ListUniqueProblemsInput) (req *request.Request, output *ListUniqueProblemsOutput) { + op := &request.Operation{ + Name: opListUniqueProblems, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListUniqueProblemsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListUniqueProblemsOutput{} + req.Data = output + return +} + +// Gets information about unique problems. +func (c *DeviceFarm) ListUniqueProblems(input *ListUniqueProblemsInput) (*ListUniqueProblemsOutput, error) { + req, out := c.ListUniqueProblemsRequest(input) + err := req.Send() + return out, err +} + +// ListUniqueProblemsPages iterates over the pages of a ListUniqueProblems operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListUniqueProblems method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListUniqueProblems operation. +// pageNum := 0 +// err := client.ListUniqueProblemsPages(params, +// func(page *ListUniqueProblemsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *DeviceFarm) ListUniqueProblemsPages(input *ListUniqueProblemsInput, fn func(p *ListUniqueProblemsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListUniqueProblemsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListUniqueProblemsOutput), lastPage) + }) +} + +const opListUploads = "ListUploads" + +// ListUploadsRequest generates a "aws/request.Request" representing the +// client's request for the ListUploads operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListUploads method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListUploadsRequest method. +// req, resp := client.ListUploadsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *DeviceFarm) ListUploadsRequest(input *ListUploadsInput) (req *request.Request, output *ListUploadsOutput) { + op := &request.Operation{ + Name: opListUploads, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListUploadsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListUploadsOutput{} + req.Data = output + return +} + +// Gets information about uploads. +func (c *DeviceFarm) ListUploads(input *ListUploadsInput) (*ListUploadsOutput, error) { + req, out := c.ListUploadsRequest(input) + err := req.Send() + return out, err +} + +// ListUploadsPages iterates over the pages of a ListUploads operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListUploads method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListUploads operation. +// pageNum := 0 +// err := client.ListUploadsPages(params, +// func(page *ListUploadsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *DeviceFarm) ListUploadsPages(input *ListUploadsInput, fn func(p *ListUploadsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListUploadsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListUploadsOutput), lastPage) + }) +} + +const opPurchaseOffering = "PurchaseOffering" + +// PurchaseOfferingRequest generates a "aws/request.Request" representing the +// client's request for the PurchaseOffering operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PurchaseOffering method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PurchaseOfferingRequest method. +// req, resp := client.PurchaseOfferingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *DeviceFarm) PurchaseOfferingRequest(input *PurchaseOfferingInput) (req *request.Request, output *PurchaseOfferingOutput) { + op := &request.Operation{ + Name: opPurchaseOffering, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PurchaseOfferingInput{} + } + + req = c.newRequest(op, input, output) + output = &PurchaseOfferingOutput{} + req.Data = output + return +} + +// Immediately purchases offerings for an AWS account. Offerings renew with +// the latest total purchased quantity for an offering, unless the renewal was +// overridden. The API returns a NotEligible error if the user is not permitted +// to invoke the operation. Please contact aws-devicefarm-support@amazon.com +// (mailto:aws-devicefarm-support@amazon.com) if you believe that you should +// be able to invoke this operation. +func (c *DeviceFarm) PurchaseOffering(input *PurchaseOfferingInput) (*PurchaseOfferingOutput, error) { + req, out := c.PurchaseOfferingRequest(input) + err := req.Send() + return out, err +} + +const opRenewOffering = "RenewOffering" + +// RenewOfferingRequest generates a "aws/request.Request" representing the +// client's request for the RenewOffering operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the RenewOffering method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the RenewOfferingRequest method. +// req, resp := client.RenewOfferingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *DeviceFarm) RenewOfferingRequest(input *RenewOfferingInput) (req *request.Request, output *RenewOfferingOutput) { + op := &request.Operation{ + Name: opRenewOffering, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RenewOfferingInput{} + } + + req = c.newRequest(op, input, output) + output = &RenewOfferingOutput{} + req.Data = output + return +} + +// Explicitly sets the quantity of devices to renew for an offering, starting +// from the effectiveDate of the next period. The API returns a NotEligible +// error if the user is not permitted to invoke the operation. Please contact +// aws-devicefarm-support@amazon.com (mailto:aws-devicefarm-support@amazon.com) +// if you believe that you should be able to invoke this operation. +func (c *DeviceFarm) RenewOffering(input *RenewOfferingInput) (*RenewOfferingOutput, error) { + req, out := c.RenewOfferingRequest(input) + err := req.Send() + return out, err +} + +const opScheduleRun = "ScheduleRun" + +// ScheduleRunRequest generates a "aws/request.Request" representing the +// client's request for the ScheduleRun operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ScheduleRun method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ScheduleRunRequest method. +// req, resp := client.ScheduleRunRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *DeviceFarm) ScheduleRunRequest(input *ScheduleRunInput) (req *request.Request, output *ScheduleRunOutput) { + op := &request.Operation{ + Name: opScheduleRun, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ScheduleRunInput{} + } + + req = c.newRequest(op, input, output) + output = &ScheduleRunOutput{} + req.Data = output + return +} + +// Schedules a run. +func (c *DeviceFarm) ScheduleRun(input *ScheduleRunInput) (*ScheduleRunOutput, error) { + req, out := c.ScheduleRunRequest(input) + err := req.Send() + return out, err +} + +const opStopRun = "StopRun" + +// StopRunRequest generates a "aws/request.Request" representing the +// client's request for the StopRun operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the StopRun method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the StopRunRequest method. +// req, resp := client.StopRunRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *DeviceFarm) StopRunRequest(input *StopRunInput) (req *request.Request, output *StopRunOutput) { + op := &request.Operation{ + Name: opStopRun, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &StopRunInput{} + } + + req = c.newRequest(op, input, output) + output = &StopRunOutput{} + req.Data = output + return +} + +// Initiates a stop request for the current test run. AWS Device Farm will immediately +// stop the run on devices where tests have not started executing, and you will +// not be billed for these devices. On devices where tests have started executing, +// Setup Suite and Teardown Suite tests will run to completion before stopping +// execution on those devices. You will be billed for Setup, Teardown, and any +// tests that were in progress or already completed. +func (c *DeviceFarm) StopRun(input *StopRunInput) (*StopRunOutput, error) { + req, out := c.StopRunRequest(input) + err := req.Send() + return out, err +} + +const opUpdateDevicePool = "UpdateDevicePool" + +// UpdateDevicePoolRequest generates a "aws/request.Request" representing the +// client's request for the UpdateDevicePool operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateDevicePool method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateDevicePoolRequest method. +// req, resp := client.UpdateDevicePoolRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *DeviceFarm) UpdateDevicePoolRequest(input *UpdateDevicePoolInput) (req *request.Request, output *UpdateDevicePoolOutput) { + op := &request.Operation{ + Name: opUpdateDevicePool, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateDevicePoolInput{} + } + + req = c.newRequest(op, input, output) + output = &UpdateDevicePoolOutput{} + req.Data = output + return +} + +// Modifies the name, description, and rules in a device pool given the attributes +// and the pool ARN. Rule updates are all-or-nothing, meaning they can only +// be updated as a whole (or not at all). +func (c *DeviceFarm) UpdateDevicePool(input *UpdateDevicePoolInput) (*UpdateDevicePoolOutput, error) { + req, out := c.UpdateDevicePoolRequest(input) + err := req.Send() + return out, err +} + +const opUpdateProject = "UpdateProject" + +// UpdateProjectRequest generates a "aws/request.Request" representing the +// client's request for the UpdateProject operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateProject method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateProjectRequest method. +// req, resp := client.UpdateProjectRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *DeviceFarm) UpdateProjectRequest(input *UpdateProjectInput) (req *request.Request, output *UpdateProjectOutput) { + op := &request.Operation{ + Name: opUpdateProject, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateProjectInput{} + } + + req = c.newRequest(op, input, output) + output = &UpdateProjectOutput{} + req.Data = output + return +} + +// Modifies the specified project name, given the project ARN and a new name. +func (c *DeviceFarm) UpdateProject(input *UpdateProjectInput) (*UpdateProjectOutput, error) { + req, out := c.UpdateProjectRequest(input) + err := req.Send() + return out, err +} + +// A container for account-level settings within AWS Device Farm. +type AccountSettings struct { + _ struct{} `type:"structure"` + + // The AWS account number specified in the AccountSettings container. + AwsAccountNumber *string `locationName:"awsAccountNumber" min:"2" type:"string"` + + // Returns the unmetered devices you have purchased or want to purchase. + UnmeteredDevices map[string]*int64 `locationName:"unmeteredDevices" type:"map"` + + // Returns the unmetered remote access devices you have purchased or want to + // purchase. + UnmeteredRemoteAccessDevices map[string]*int64 `locationName:"unmeteredRemoteAccessDevices" type:"map"` +} + +// String returns the string representation +func (s AccountSettings) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AccountSettings) GoString() string { + return s.String() +} + +// Represents the output of a test. Examples of artifacts include logs and screenshots. +type Artifact struct { + _ struct{} `type:"structure"` + + // The artifact's ARN. + Arn *string `locationName:"arn" min:"32" type:"string"` + + // The artifact's file extension. + Extension *string `locationName:"extension" type:"string"` + + // The artifact's name. + Name *string `locationName:"name" type:"string"` + + // The artifact's type. + // + // Allowed values include the following: + // + // UNKNOWN: An unknown type. + // + // SCREENSHOT: The screenshot type. + // + // DEVICE_LOG: The device log type. + // + // MESSAGE_LOG: The message log type. + // + // RESULT_LOG: The result log type. + // + // SERVICE_LOG: The service log type. + // + // WEBKIT_LOG: The web kit log type. + // + // INSTRUMENTATION_OUTPUT: The instrumentation type. + // + // EXERCISER_MONKEY_OUTPUT: For Android, the artifact (log) generated by an + // Android fuzz test. + // + // CALABASH_JSON_OUTPUT: The Calabash JSON output type. + // + // CALABASH_PRETTY_OUTPUT: The Calabash pretty output type. + // + // CALABASH_STANDARD_OUTPUT: The Calabash standard output type. + // + // CALABASH_JAVA_XML_OUTPUT: The Calabash Java XML output type. + // + // AUTOMATION_OUTPUT: The automation output type. + // + // APPIUM_SERVER_OUTPUT: The Appium server output type. + // + // APPIUM_JAVA_OUTPUT: The Appium Java output type. + // + // APPIUM_JAVA_XML_OUTPUT: The Appium Java XML output type. + // + // APPIUM_PYTHON_OUTPUT: The Appium Python output type. + // + // APPIUM_PYTHON_XML_OUTPUT: The Appium Python XML output type. + // + // EXPLORER_EVENT_LOG: The Explorer event log output type. + // + // EXPLORER_SUMMARY_LOG: The Explorer summary log output type. + // + // APPLICATION_CRASH_REPORT: The application crash report output type. + // + // XCTEST_LOG: The XCode test output type. + Type *string `locationName:"type" type:"string" enum:"ArtifactType"` + + // The pre-signed Amazon S3 URL that can be used with a corresponding GET request + // to download the artifact's file. + Url *string `locationName:"url" type:"string"` +} + +// String returns the string representation +func (s Artifact) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Artifact) GoString() string { + return s.String() +} + +// Represents the amount of CPU that an app is using on a physical device. +// +// Note that this does not represent system-wide CPU usage. +type CPU struct { + _ struct{} `type:"structure"` + + // The CPU's architecture, for example x86 or ARM. + Architecture *string `locationName:"architecture" type:"string"` + + // The clock speed of the device's CPU, expressed in hertz (Hz). For example, + // a 1.2 GHz CPU is expressed as 1200000000. + Clock *float64 `locationName:"clock" type:"double"` + + // The CPU's frequency. + Frequency *string `locationName:"frequency" type:"string"` +} + +// String returns the string representation +func (s CPU) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CPU) GoString() string { + return s.String() +} + +// Represents entity counters. +type Counters struct { + _ struct{} `type:"structure"` + + // The number of errored entities. + Errored *int64 `locationName:"errored" type:"integer"` + + // The number of failed entities. + Failed *int64 `locationName:"failed" type:"integer"` + + // The number of passed entities. + Passed *int64 `locationName:"passed" type:"integer"` + + // The number of skipped entities. + Skipped *int64 `locationName:"skipped" type:"integer"` + + // The number of stopped entities. + Stopped *int64 `locationName:"stopped" type:"integer"` + + // The total number of entities. + Total *int64 `locationName:"total" type:"integer"` + + // The number of warned entities. + Warned *int64 `locationName:"warned" type:"integer"` +} + +// String returns the string representation +func (s Counters) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Counters) GoString() string { + return s.String() +} + +// Represents a request to the create device pool operation. +type CreateDevicePoolInput struct { + _ struct{} `type:"structure"` + + // The device pool's description. + Description *string `locationName:"description" type:"string"` + + // The device pool's name. + Name *string `locationName:"name" type:"string" required:"true"` + + // The ARN of the project for the device pool. + ProjectArn *string `locationName:"projectArn" min:"32" type:"string" required:"true"` + + // The device pool's rules. + Rules []*Rule `locationName:"rules" type:"list" required:"true"` +} + +// String returns the string representation +func (s CreateDevicePoolInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDevicePoolInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateDevicePoolInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateDevicePoolInput"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.ProjectArn == nil { + invalidParams.Add(request.NewErrParamRequired("ProjectArn")) + } + if s.ProjectArn != nil && len(*s.ProjectArn) < 32 { + invalidParams.Add(request.NewErrParamMinLen("ProjectArn", 32)) + } + if s.Rules == nil { + invalidParams.Add(request.NewErrParamRequired("Rules")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the result of a create device pool request. +type CreateDevicePoolOutput struct { + _ struct{} `type:"structure"` + + // The newly created device pool. + DevicePool *DevicePool `locationName:"devicePool" type:"structure"` +} + +// String returns the string representation +func (s CreateDevicePoolOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDevicePoolOutput) GoString() string { + return s.String() +} + +// Represents a request to the create project operation. +type CreateProjectInput struct { + _ struct{} `type:"structure"` + + // The project's name. + Name *string `locationName:"name" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateProjectInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateProjectInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateProjectInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateProjectInput"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the result of a create project request. +type CreateProjectOutput struct { + _ struct{} `type:"structure"` + + // The newly created project. + Project *Project `locationName:"project" type:"structure"` +} + +// String returns the string representation +func (s CreateProjectOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateProjectOutput) GoString() string { + return s.String() +} + +// Represents a request to the create upload operation. +type CreateUploadInput struct { + _ struct{} `type:"structure"` + + // The upload's content type (for example, "application/octet-stream"). + ContentType *string `locationName:"contentType" type:"string"` + + // The upload's file name. + Name *string `locationName:"name" type:"string" required:"true"` + + // The ARN of the project for the upload. + ProjectArn *string `locationName:"projectArn" min:"32" type:"string" required:"true"` + + // The upload's upload type. + // + // Must be one of the following values: + // + // ANDROID_APP: An Android upload. + // + // IOS_APP: An iOS upload. + // + // WEB_APP: A web appliction upload. + // + // EXTERNAL_DATA: An external data upload. + // + // APPIUM_JAVA_JUNIT_TEST_PACKAGE: An Appium Java JUnit test package upload. + // + // APPIUM_JAVA_TESTNG_TEST_PACKAGE: An Appium Java TestNG test package upload. + // + // APPIUM_PYTHON_TEST_PACKAGE: An Appium Python test package upload. + // + // APPIUM_WEB_JAVA_JUNIT_TEST_PACKAGE: An Appium Java JUnit test package upload. + // + // APPIUM_WEB_JAVA_TESTNG_TEST_PACKAGE: An Appium Java TestNG test package + // upload. + // + // APPIUM_WEB_PYTHON_TEST_PACKAGE: An Appium Python test package upload. + // + // CALABASH_TEST_PACKAGE: A Calabash test package upload. + // + // INSTRUMENTATION_TEST_PACKAGE: An instrumentation upload. + // + // UIAUTOMATION_TEST_PACKAGE: A uiautomation test package upload. + // + // UIAUTOMATOR_TEST_PACKAGE: A uiautomator test package upload. + // + // XCTEST_TEST_PACKAGE: An XCode test package upload. + // + // XCTEST_UI_TEST_PACKAGE: An XCode UI test package upload. + // + // Note If you call CreateUpload with WEB_APP specified, AWS Device Farm throws + // an ArgumentException error. + Type *string `locationName:"type" type:"string" required:"true" enum:"UploadType"` +} + +// String returns the string representation +func (s CreateUploadInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateUploadInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateUploadInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateUploadInput"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.ProjectArn == nil { + invalidParams.Add(request.NewErrParamRequired("ProjectArn")) + } + if s.ProjectArn != nil && len(*s.ProjectArn) < 32 { + invalidParams.Add(request.NewErrParamMinLen("ProjectArn", 32)) + } + if s.Type == nil { + invalidParams.Add(request.NewErrParamRequired("Type")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the result of a create upload request. +type CreateUploadOutput struct { + _ struct{} `type:"structure"` + + // The newly created upload. + Upload *Upload `locationName:"upload" type:"structure"` +} + +// String returns the string representation +func (s CreateUploadOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateUploadOutput) GoString() string { + return s.String() +} + +// Represents a request to the delete device pool operation. +type DeleteDevicePoolInput struct { + _ struct{} `type:"structure"` + + // Represents the Amazon Resource Name (ARN) of the Device Farm device pool + // you wish to delete. + Arn *string `locationName:"arn" min:"32" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteDevicePoolInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteDevicePoolInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteDevicePoolInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteDevicePoolInput"} + if s.Arn == nil { + invalidParams.Add(request.NewErrParamRequired("Arn")) + } + if s.Arn != nil && len(*s.Arn) < 32 { + invalidParams.Add(request.NewErrParamMinLen("Arn", 32)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the result of a delete device pool request. +type DeleteDevicePoolOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteDevicePoolOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteDevicePoolOutput) GoString() string { + return s.String() +} + +// Represents a request to the delete project operation. +type DeleteProjectInput struct { + _ struct{} `type:"structure"` + + // Represents the Amazon Resource Name (ARN) of the Device Farm project you + // wish to delete. + Arn *string `locationName:"arn" min:"32" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteProjectInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteProjectInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteProjectInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteProjectInput"} + if s.Arn == nil { + invalidParams.Add(request.NewErrParamRequired("Arn")) + } + if s.Arn != nil && len(*s.Arn) < 32 { + invalidParams.Add(request.NewErrParamMinLen("Arn", 32)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the result of a delete project request. +type DeleteProjectOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteProjectOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteProjectOutput) GoString() string { + return s.String() +} + +// Represents a request to the delete run operation. +type DeleteRunInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) for the run you wish to delete. + Arn *string `locationName:"arn" min:"32" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteRunInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteRunInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteRunInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteRunInput"} + if s.Arn == nil { + invalidParams.Add(request.NewErrParamRequired("Arn")) + } + if s.Arn != nil && len(*s.Arn) < 32 { + invalidParams.Add(request.NewErrParamMinLen("Arn", 32)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the result of a delete run request. +type DeleteRunOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteRunOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteRunOutput) GoString() string { + return s.String() +} + +// Represents a request to the delete upload operation. +type DeleteUploadInput struct { + _ struct{} `type:"structure"` + + // Represents the Amazon Resource Name (ARN) of the Device Farm upload you wish + // to delete. + Arn *string `locationName:"arn" min:"32" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteUploadInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteUploadInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteUploadInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteUploadInput"} + if s.Arn == nil { + invalidParams.Add(request.NewErrParamRequired("Arn")) + } + if s.Arn != nil && len(*s.Arn) < 32 { + invalidParams.Add(request.NewErrParamMinLen("Arn", 32)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the result of a delete upload request. +type DeleteUploadOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteUploadOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteUploadOutput) GoString() string { + return s.String() +} + +// Represents a device type that an app is tested against. +type Device struct { + _ struct{} `type:"structure"` + + // The device's ARN. + Arn *string `locationName:"arn" min:"32" type:"string"` + + // The device's carrier. + Carrier *string `locationName:"carrier" type:"string"` + + // Information about the device's CPU. + Cpu *CPU `locationName:"cpu" type:"structure"` + + // The device's form factor. + // + // Allowed values include: + // + // PHONE: The phone form factor. + // + // TABLET: The tablet form factor. + FormFactor *string `locationName:"formFactor" type:"string" enum:"DeviceFormFactor"` + + // The device's heap size, expressed in bytes. + HeapSize *int64 `locationName:"heapSize" type:"long"` + + // The device's image name. + Image *string `locationName:"image" type:"string"` + + // The device's manufacturer name. + Manufacturer *string `locationName:"manufacturer" type:"string"` + + // The device's total memory size, expressed in bytes. + Memory *int64 `locationName:"memory" type:"long"` + + // The device's model name. + Model *string `locationName:"model" type:"string"` + + // The device's display name. + Name *string `locationName:"name" type:"string"` + + // The device's operating system type. + Os *string `locationName:"os" type:"string"` + + // The device's platform. + // + // Allowed values include: + // + // ANDROID: The Android platform. + // + // IOS: The iOS platform. + Platform *string `locationName:"platform" type:"string" enum:"DevicePlatform"` + + // The device's radio. + Radio *string `locationName:"radio" type:"string"` + + // Represents the screen resolution of a device in height and width, expressed + // in pixels. + Resolution *Resolution `locationName:"resolution" type:"structure"` +} + +// String returns the string representation +func (s Device) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Device) GoString() string { + return s.String() +} + +// Represents the total (metered or unmetered) minutes used by the resource +// to run tests. Contains the sum of minutes consumed by all children. +type DeviceMinutes struct { + _ struct{} `type:"structure"` + + // When specified, represents only the sum of metered minutes used by the resource + // to run tests. + Metered *float64 `locationName:"metered" type:"double"` + + // When specified, represents the total minutes used by the resource to run + // tests. + Total *float64 `locationName:"total" type:"double"` + + // When specified, represents only the sum of unmetered minutes used by the + // resource to run tests. + Unmetered *float64 `locationName:"unmetered" type:"double"` +} + +// String returns the string representation +func (s DeviceMinutes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeviceMinutes) GoString() string { + return s.String() +} + +// Represents a collection of device types. +type DevicePool struct { + _ struct{} `type:"structure"` + + // The device pool's ARN. + Arn *string `locationName:"arn" min:"32" type:"string"` + + // The device pool's description. + Description *string `locationName:"description" type:"string"` + + // The device pool's name. + Name *string `locationName:"name" type:"string"` + + // Information about the device pool's rules. + Rules []*Rule `locationName:"rules" type:"list"` + + // The device pool's type. + // + // Allowed values include: + // + // CURATED: A device pool that is created and managed by AWS Device Farm. + // + // PRIVATE: A device pool that is created and managed by the device pool developer. + Type *string `locationName:"type" type:"string" enum:"DevicePoolType"` +} + +// String returns the string representation +func (s DevicePool) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DevicePool) GoString() string { + return s.String() +} + +// Represents a device pool compatibility result. +type DevicePoolCompatibilityResult struct { + _ struct{} `type:"structure"` + + // Whether the result was compatible with the device pool. + Compatible *bool `locationName:"compatible" type:"boolean"` + + // Represents a device type that an app is tested against. + Device *Device `locationName:"device" type:"structure"` + + // Information about the compatibility. + IncompatibilityMessages []*IncompatibilityMessage `locationName:"incompatibilityMessages" type:"list"` +} + +// String returns the string representation +func (s DevicePoolCompatibilityResult) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DevicePoolCompatibilityResult) GoString() string { + return s.String() +} + +// Represents the request sent to retrieve the account settings. +type GetAccountSettingsInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s GetAccountSettingsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetAccountSettingsInput) GoString() string { + return s.String() +} + +// Represents the account settings return values from the GetAccountSettings +// request. +type GetAccountSettingsOutput struct { + _ struct{} `type:"structure"` + + // A container for account-level settings within AWS Device Farm. + AccountSettings *AccountSettings `locationName:"accountSettings" type:"structure"` +} + +// String returns the string representation +func (s GetAccountSettingsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetAccountSettingsOutput) GoString() string { + return s.String() +} + +// Represents a request to the get device request. +type GetDeviceInput struct { + _ struct{} `type:"structure"` + + // The device type's ARN. + Arn *string `locationName:"arn" min:"32" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetDeviceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetDeviceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetDeviceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetDeviceInput"} + if s.Arn == nil { + invalidParams.Add(request.NewErrParamRequired("Arn")) + } + if s.Arn != nil && len(*s.Arn) < 32 { + invalidParams.Add(request.NewErrParamMinLen("Arn", 32)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the result of a get device request. +type GetDeviceOutput struct { + _ struct{} `type:"structure"` + + // Represents a device type that an app is tested against. + Device *Device `locationName:"device" type:"structure"` +} + +// String returns the string representation +func (s GetDeviceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetDeviceOutput) GoString() string { + return s.String() +} + +// Represents a request to the get device pool compatibility operation. +type GetDevicePoolCompatibilityInput struct { + _ struct{} `type:"structure"` + + // The ARN of the app that is associated with the specified device pool. + AppArn *string `locationName:"appArn" min:"32" type:"string"` + + // The device pool's ARN. + DevicePoolArn *string `locationName:"devicePoolArn" min:"32" type:"string" required:"true"` + + // The test type for the specified device pool. + // + // Allowed values include the following: + // + // BUILTIN_FUZZ: The built-in fuzz type. + // + // BUILTIN_EXPLORER: For Android, an app explorer that will traverse an Android + // app, interacting with it and capturing screenshots at the same time. + // + // APPIUM_JAVA_JUNIT: The Appium Java JUnit type. + // + // APPIUM_JAVA_TESTNG: The Appium Java TestNG type. + // + // APPIUM_PYTHON: The Appium Python type. + // + // APPIUM_WEB_JAVA_JUNIT: The Appium Java JUnit type for Web apps. + // + // APPIUM_WEB_JAVA_TESTNG: The Appium Java TestNG type for Web apps. + // + // APPIUM_WEB_PYTHON: The Appium Python type for Web apps. + // + // CALABASH: The Calabash type. + // + // INSTRUMENTATION: The Instrumentation type. + // + // UIAUTOMATION: The uiautomation type. + // + // UIAUTOMATOR: The uiautomator type. + // + // XCTEST: The XCode test type. + // + // XCTEST_UI: The XCode UI test type. + TestType *string `locationName:"testType" type:"string" enum:"TestType"` +} + +// String returns the string representation +func (s GetDevicePoolCompatibilityInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetDevicePoolCompatibilityInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetDevicePoolCompatibilityInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetDevicePoolCompatibilityInput"} + if s.AppArn != nil && len(*s.AppArn) < 32 { + invalidParams.Add(request.NewErrParamMinLen("AppArn", 32)) + } + if s.DevicePoolArn == nil { + invalidParams.Add(request.NewErrParamRequired("DevicePoolArn")) + } + if s.DevicePoolArn != nil && len(*s.DevicePoolArn) < 32 { + invalidParams.Add(request.NewErrParamMinLen("DevicePoolArn", 32)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the result of describe device pool compatibility request. +type GetDevicePoolCompatibilityOutput struct { + _ struct{} `type:"structure"` + + // Information about compatible devices. + CompatibleDevices []*DevicePoolCompatibilityResult `locationName:"compatibleDevices" type:"list"` + + // Information about incompatible devices. + IncompatibleDevices []*DevicePoolCompatibilityResult `locationName:"incompatibleDevices" type:"list"` +} + +// String returns the string representation +func (s GetDevicePoolCompatibilityOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetDevicePoolCompatibilityOutput) GoString() string { + return s.String() +} + +// Represents a request to the get device pool operation. +type GetDevicePoolInput struct { + _ struct{} `type:"structure"` + + // The device pool's ARN. + Arn *string `locationName:"arn" min:"32" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetDevicePoolInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetDevicePoolInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetDevicePoolInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetDevicePoolInput"} + if s.Arn == nil { + invalidParams.Add(request.NewErrParamRequired("Arn")) + } + if s.Arn != nil && len(*s.Arn) < 32 { + invalidParams.Add(request.NewErrParamMinLen("Arn", 32)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the result of a get device pool request. +type GetDevicePoolOutput struct { + _ struct{} `type:"structure"` + + // Represents a collection of device types. + DevicePool *DevicePool `locationName:"devicePool" type:"structure"` +} + +// String returns the string representation +func (s GetDevicePoolOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetDevicePoolOutput) GoString() string { + return s.String() +} + +// Represents a request to the get job operation. +type GetJobInput struct { + _ struct{} `type:"structure"` + + // The job's ARN. + Arn *string `locationName:"arn" min:"32" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetJobInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetJobInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetJobInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetJobInput"} + if s.Arn == nil { + invalidParams.Add(request.NewErrParamRequired("Arn")) + } + if s.Arn != nil && len(*s.Arn) < 32 { + invalidParams.Add(request.NewErrParamMinLen("Arn", 32)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the result of a get job request. +type GetJobOutput struct { + _ struct{} `type:"structure"` + + // Represents a device. + Job *Job `locationName:"job" type:"structure"` +} + +// String returns the string representation +func (s GetJobOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetJobOutput) GoString() string { + return s.String() +} + +// Represents the request to retrieve the offering status for the specified +// customer or account. +type GetOfferingStatusInput struct { + _ struct{} `type:"structure"` + + // An identifier that was returned from the previous call to this operation, + // which can be used to return the next set of items in the list. + NextToken *string `locationName:"nextToken" min:"4" type:"string"` +} + +// String returns the string representation +func (s GetOfferingStatusInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetOfferingStatusInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetOfferingStatusInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetOfferingStatusInput"} + if s.NextToken != nil && len(*s.NextToken) < 4 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 4)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Returns the status result for a device offering. +type GetOfferingStatusOutput struct { + _ struct{} `type:"structure"` + + // When specified, gets the offering status for the current period. + Current map[string]*OfferingStatus `locationName:"current" type:"map"` + + // When specified, gets the offering status for the next period. + NextPeriod map[string]*OfferingStatus `locationName:"nextPeriod" type:"map"` + + // An identifier that was returned from the previous call to this operation, + // which can be used to return the next set of items in the list. + NextToken *string `locationName:"nextToken" min:"4" type:"string"` +} + +// String returns the string representation +func (s GetOfferingStatusOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetOfferingStatusOutput) GoString() string { + return s.String() +} + +// Represents a request to the get project operation. +type GetProjectInput struct { + _ struct{} `type:"structure"` + + // The project's ARN. + Arn *string `locationName:"arn" min:"32" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetProjectInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetProjectInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetProjectInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetProjectInput"} + if s.Arn == nil { + invalidParams.Add(request.NewErrParamRequired("Arn")) + } + if s.Arn != nil && len(*s.Arn) < 32 { + invalidParams.Add(request.NewErrParamMinLen("Arn", 32)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the result of a get project request. +type GetProjectOutput struct { + _ struct{} `type:"structure"` + + // Represents an operating-system neutral workspace for running and managing + // tests. + Project *Project `locationName:"project" type:"structure"` +} + +// String returns the string representation +func (s GetProjectOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetProjectOutput) GoString() string { + return s.String() +} + +// Represents a request to the get run operation. +type GetRunInput struct { + _ struct{} `type:"structure"` + + // The run's ARN. + Arn *string `locationName:"arn" min:"32" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetRunInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetRunInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetRunInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetRunInput"} + if s.Arn == nil { + invalidParams.Add(request.NewErrParamRequired("Arn")) + } + if s.Arn != nil && len(*s.Arn) < 32 { + invalidParams.Add(request.NewErrParamMinLen("Arn", 32)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the result of a get run request. +type GetRunOutput struct { + _ struct{} `type:"structure"` + + // Represents an app on a set of devices with a specific test and configuration. + Run *Run `locationName:"run" type:"structure"` +} + +// String returns the string representation +func (s GetRunOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetRunOutput) GoString() string { + return s.String() +} + +// Represents a request to the get suite operation. +type GetSuiteInput struct { + _ struct{} `type:"structure"` + + // The suite's ARN. + Arn *string `locationName:"arn" min:"32" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetSuiteInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetSuiteInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetSuiteInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetSuiteInput"} + if s.Arn == nil { + invalidParams.Add(request.NewErrParamRequired("Arn")) + } + if s.Arn != nil && len(*s.Arn) < 32 { + invalidParams.Add(request.NewErrParamMinLen("Arn", 32)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the result of a get suite request. +type GetSuiteOutput struct { + _ struct{} `type:"structure"` + + // Represents a collection of one or more tests. + Suite *Suite `locationName:"suite" type:"structure"` +} + +// String returns the string representation +func (s GetSuiteOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetSuiteOutput) GoString() string { + return s.String() +} + +// Represents a request to the get test operation. +type GetTestInput struct { + _ struct{} `type:"structure"` + + // The test's ARN. + Arn *string `locationName:"arn" min:"32" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetTestInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetTestInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetTestInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetTestInput"} + if s.Arn == nil { + invalidParams.Add(request.NewErrParamRequired("Arn")) + } + if s.Arn != nil && len(*s.Arn) < 32 { + invalidParams.Add(request.NewErrParamMinLen("Arn", 32)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the result of a get test request. +type GetTestOutput struct { + _ struct{} `type:"structure"` + + // Represents a condition that is evaluated. + Test *Test `locationName:"test" type:"structure"` +} + +// String returns the string representation +func (s GetTestOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetTestOutput) GoString() string { + return s.String() +} + +// Represents a request to the get upload operation. +type GetUploadInput struct { + _ struct{} `type:"structure"` + + // The upload's ARN. + Arn *string `locationName:"arn" min:"32" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetUploadInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetUploadInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetUploadInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetUploadInput"} + if s.Arn == nil { + invalidParams.Add(request.NewErrParamRequired("Arn")) + } + if s.Arn != nil && len(*s.Arn) < 32 { + invalidParams.Add(request.NewErrParamMinLen("Arn", 32)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the result of a get upload request. +type GetUploadOutput struct { + _ struct{} `type:"structure"` + + // An app or a set of one or more tests to upload or that have been uploaded. + Upload *Upload `locationName:"upload" type:"structure"` +} + +// String returns the string representation +func (s GetUploadOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetUploadOutput) GoString() string { + return s.String() +} + +// Represents information about incompatibility. +type IncompatibilityMessage struct { + _ struct{} `type:"structure"` + + // A message about the incompatibility. + Message *string `locationName:"message" type:"string"` + + // The type of incompatibility. + // + // Allowed values include: + // + // ARN: The ARN. + // + // FORM_FACTOR: The form factor (for example, phone or tablet). + // + // MANUFACTURER: The manufacturer. + // + // PLATFORM: The platform (for example, Android or iOS). + Type *string `locationName:"type" type:"string" enum:"DeviceAttribute"` +} + +// String returns the string representation +func (s IncompatibilityMessage) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s IncompatibilityMessage) GoString() string { + return s.String() +} + +// Represents a device. +type Job struct { + _ struct{} `type:"structure"` + + // The job's ARN. + Arn *string `locationName:"arn" min:"32" type:"string"` + + // The job's result counters. + Counters *Counters `locationName:"counters" type:"structure"` + + // When the job was created. + Created *time.Time `locationName:"created" type:"timestamp" timestampFormat:"unix"` + + // Represents a device type that an app is tested against. + Device *Device `locationName:"device" type:"structure"` + + // Represents the total (metered or unmetered) minutes used by the job. + DeviceMinutes *DeviceMinutes `locationName:"deviceMinutes" type:"structure"` + + // A message about the job's result. + Message *string `locationName:"message" type:"string"` + + // The job's name. + Name *string `locationName:"name" type:"string"` + + // The job's result. + // + // Allowed values include: + // + // PENDING: A pending condition. + // + // PASSED: A passing condition. + // + // WARNED: A warning condition. + // + // FAILED: A failed condition. + // + // SKIPPED: A skipped condition. + // + // ERRORED: An error condition. + // + // STOPPED: A stopped condition. + Result *string `locationName:"result" type:"string" enum:"ExecutionResult"` + + // The job's start time. + Started *time.Time `locationName:"started" type:"timestamp" timestampFormat:"unix"` + + // The job's status. + // + // Allowed values include: + // + // PENDING: A pending status. + // + // PENDING_CONCURRENCY: A pending concurrency status. + // + // PENDING_DEVICE: A pending device status. + // + // PROCESSING: A processing status. + // + // SCHEDULING: A scheduling status. + // + // PREPARING: A preparing status. + // + // RUNNING: A running status. + // + // COMPLETED: A completed status. + // + // STOPPING: A stopping status. + Status *string `locationName:"status" type:"string" enum:"ExecutionStatus"` + + // The job's stop time. + Stopped *time.Time `locationName:"stopped" type:"timestamp" timestampFormat:"unix"` + + // The job's type. + // + // Allowed values include the following: + // + // BUILTIN_FUZZ: The built-in fuzz type. + // + // BUILTIN_EXPLORER: For Android, an app explorer that will traverse an Android + // app, interacting with it and capturing screenshots at the same time. + // + // APPIUM_JAVA_JUNIT: The Appium Java JUnit type. + // + // APPIUM_JAVA_TESTNG: The Appium Java TestNG type. + // + // APPIUM_PYTHON: The Appium Python type. + // + // APPIUM_WEB_JAVA_JUNIT: The Appium Java JUnit type for Web apps. + // + // APPIUM_WEB_JAVA_TESTNG: The Appium Java TestNG type for Web apps. + // + // APPIUM_WEB_PYTHON: The Appium Python type for Web apps. + // + // CALABASH: The Calabash type. + // + // INSTRUMENTATION: The Instrumentation type. + // + // UIAUTOMATION: The uiautomation type. + // + // UIAUTOMATOR: The uiautomator type. + // + // XCTEST: The XCode test type. + // + // XCTEST_UI: The XCode UI test type. + Type *string `locationName:"type" type:"string" enum:"TestType"` +} + +// String returns the string representation +func (s Job) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Job) GoString() string { + return s.String() +} + +// Represents a request to the list artifacts operation. +type ListArtifactsInput struct { + _ struct{} `type:"structure"` + + // The Run, Job, Suite, or Test ARN. + Arn *string `locationName:"arn" min:"32" type:"string" required:"true"` + + // An identifier that was returned from the previous call to this operation, + // which can be used to return the next set of items in the list. + NextToken *string `locationName:"nextToken" min:"4" type:"string"` + + // The artifacts' type. + // + // Allowed values include: + // + // FILE: The artifacts are files. LOG: The artifacts are logs. SCREENSHOT: + // The artifacts are screenshots. + Type *string `locationName:"type" type:"string" required:"true" enum:"ArtifactCategory"` +} + +// String returns the string representation +func (s ListArtifactsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListArtifactsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListArtifactsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListArtifactsInput"} + if s.Arn == nil { + invalidParams.Add(request.NewErrParamRequired("Arn")) + } + if s.Arn != nil && len(*s.Arn) < 32 { + invalidParams.Add(request.NewErrParamMinLen("Arn", 32)) + } + if s.NextToken != nil && len(*s.NextToken) < 4 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 4)) + } + if s.Type == nil { + invalidParams.Add(request.NewErrParamRequired("Type")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the result of a list artifacts operation. +type ListArtifactsOutput struct { + _ struct{} `type:"structure"` + + // Information about the artifacts. + Artifacts []*Artifact `locationName:"artifacts" type:"list"` + + // If the number of items that are returned is significantly large, this is + // an identifier that is also returned, which can be used in a subsequent call + // to this operation to return the next set of items in the list. + NextToken *string `locationName:"nextToken" min:"4" type:"string"` +} + +// String returns the string representation +func (s ListArtifactsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListArtifactsOutput) GoString() string { + return s.String() +} + +// Represents the result of a list device pools request. +type ListDevicePoolsInput struct { + _ struct{} `type:"structure"` + + // The project ARN. + Arn *string `locationName:"arn" min:"32" type:"string" required:"true"` + + // An identifier that was returned from the previous call to this operation, + // which can be used to return the next set of items in the list. + NextToken *string `locationName:"nextToken" min:"4" type:"string"` + + // The device pools' type. + // + // Allowed values include: + // + // CURATED: A device pool that is created and managed by AWS Device Farm. + // + // PRIVATE: A device pool that is created and managed by the device pool developer. + Type *string `locationName:"type" type:"string" enum:"DevicePoolType"` +} + +// String returns the string representation +func (s ListDevicePoolsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListDevicePoolsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListDevicePoolsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListDevicePoolsInput"} + if s.Arn == nil { + invalidParams.Add(request.NewErrParamRequired("Arn")) + } + if s.Arn != nil && len(*s.Arn) < 32 { + invalidParams.Add(request.NewErrParamMinLen("Arn", 32)) + } + if s.NextToken != nil && len(*s.NextToken) < 4 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 4)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the result of a list device pools request. +type ListDevicePoolsOutput struct { + _ struct{} `type:"structure"` + + // Information about the device pools. + DevicePools []*DevicePool `locationName:"devicePools" type:"list"` + + // If the number of items that are returned is significantly large, this is + // an identifier that is also returned, which can be used in a subsequent call + // to this operation to return the next set of items in the list. + NextToken *string `locationName:"nextToken" min:"4" type:"string"` +} + +// String returns the string representation +func (s ListDevicePoolsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListDevicePoolsOutput) GoString() string { + return s.String() +} + +// Represents the result of a list devices request. +type ListDevicesInput struct { + _ struct{} `type:"structure"` + + // The device types' ARNs. + Arn *string `locationName:"arn" min:"32" type:"string"` + + // An identifier that was returned from the previous call to this operation, + // which can be used to return the next set of items in the list. + NextToken *string `locationName:"nextToken" min:"4" type:"string"` +} + +// String returns the string representation +func (s ListDevicesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListDevicesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListDevicesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListDevicesInput"} + if s.Arn != nil && len(*s.Arn) < 32 { + invalidParams.Add(request.NewErrParamMinLen("Arn", 32)) + } + if s.NextToken != nil && len(*s.NextToken) < 4 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 4)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the result of a list devices operation. +type ListDevicesOutput struct { + _ struct{} `type:"structure"` + + // Information about the devices. + Devices []*Device `locationName:"devices" type:"list"` + + // If the number of items that are returned is significantly large, this is + // an identifier that is also returned, which can be used in a subsequent call + // to this operation to return the next set of items in the list. + NextToken *string `locationName:"nextToken" min:"4" type:"string"` +} + +// String returns the string representation +func (s ListDevicesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListDevicesOutput) GoString() string { + return s.String() +} + +// Represents a request to the list jobs operation. +type ListJobsInput struct { + _ struct{} `type:"structure"` + + // The jobs' ARNs. + Arn *string `locationName:"arn" min:"32" type:"string" required:"true"` + + // An identifier that was returned from the previous call to this operation, + // which can be used to return the next set of items in the list. + NextToken *string `locationName:"nextToken" min:"4" type:"string"` +} + +// String returns the string representation +func (s ListJobsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListJobsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListJobsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListJobsInput"} + if s.Arn == nil { + invalidParams.Add(request.NewErrParamRequired("Arn")) + } + if s.Arn != nil && len(*s.Arn) < 32 { + invalidParams.Add(request.NewErrParamMinLen("Arn", 32)) + } + if s.NextToken != nil && len(*s.NextToken) < 4 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 4)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the result of a list jobs request. +type ListJobsOutput struct { + _ struct{} `type:"structure"` + + // Information about the jobs. + Jobs []*Job `locationName:"jobs" type:"list"` + + // If the number of items that are returned is significantly large, this is + // an identifier that is also returned, which can be used in a subsequent call + // to this operation to return the next set of items in the list. + NextToken *string `locationName:"nextToken" min:"4" type:"string"` +} + +// String returns the string representation +func (s ListJobsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListJobsOutput) GoString() string { + return s.String() +} + +// Represents the request to list the offering transaction history. +type ListOfferingTransactionsInput struct { + _ struct{} `type:"structure"` + + // An identifier that was returned from the previous call to this operation, + // which can be used to return the next set of items in the list. + NextToken *string `locationName:"nextToken" min:"4" type:"string"` +} + +// String returns the string representation +func (s ListOfferingTransactionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListOfferingTransactionsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListOfferingTransactionsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListOfferingTransactionsInput"} + if s.NextToken != nil && len(*s.NextToken) < 4 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 4)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Returns the transaction log of the specified offerings. +type ListOfferingTransactionsOutput struct { + _ struct{} `type:"structure"` + + // An identifier that was returned from the previous call to this operation, + // which can be used to return the next set of items in the list. + NextToken *string `locationName:"nextToken" min:"4" type:"string"` + + // The audit log of subscriptions you have purchased and modified through AWS + // Device Farm. + OfferingTransactions []*OfferingTransaction `locationName:"offeringTransactions" type:"list"` +} + +// String returns the string representation +func (s ListOfferingTransactionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListOfferingTransactionsOutput) GoString() string { + return s.String() +} + +// Represents the request to list all offerings. +type ListOfferingsInput struct { + _ struct{} `type:"structure"` + + // An identifier that was returned from the previous call to this operation, + // which can be used to return the next set of items in the list. + NextToken *string `locationName:"nextToken" min:"4" type:"string"` +} + +// String returns the string representation +func (s ListOfferingsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListOfferingsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListOfferingsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListOfferingsInput"} + if s.NextToken != nil && len(*s.NextToken) < 4 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 4)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the return values of the list of offerings. +type ListOfferingsOutput struct { + _ struct{} `type:"structure"` + + // An identifier that was returned from the previous call to this operation, + // which can be used to return the next set of items in the list. + NextToken *string `locationName:"nextToken" min:"4" type:"string"` + + // A value representing the list offering results. + Offerings []*Offering `locationName:"offerings" type:"list"` +} + +// String returns the string representation +func (s ListOfferingsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListOfferingsOutput) GoString() string { + return s.String() +} + +// Represents a request to the list projects operation. +type ListProjectsInput struct { + _ struct{} `type:"structure"` + + // The projects' ARNs. + Arn *string `locationName:"arn" min:"32" type:"string"` + + // An identifier that was returned from the previous call to this operation, + // which can be used to return the next set of items in the list. + NextToken *string `locationName:"nextToken" min:"4" type:"string"` +} + +// String returns the string representation +func (s ListProjectsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListProjectsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListProjectsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListProjectsInput"} + if s.Arn != nil && len(*s.Arn) < 32 { + invalidParams.Add(request.NewErrParamMinLen("Arn", 32)) + } + if s.NextToken != nil && len(*s.NextToken) < 4 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 4)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the result of a list projects request. +type ListProjectsOutput struct { + _ struct{} `type:"structure"` + + // If the number of items that are returned is significantly large, this is + // an identifier that is also returned, which can be used in a subsequent call + // to this operation to return the next set of items in the list. + NextToken *string `locationName:"nextToken" min:"4" type:"string"` + + // Information about the projects. + Projects []*Project `locationName:"projects" type:"list"` +} + +// String returns the string representation +func (s ListProjectsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListProjectsOutput) GoString() string { + return s.String() +} + +// Represents a request to the list runs operation. +type ListRunsInput struct { + _ struct{} `type:"structure"` + + // The runs' ARNs. + Arn *string `locationName:"arn" min:"32" type:"string" required:"true"` + + // An identifier that was returned from the previous call to this operation, + // which can be used to return the next set of items in the list. + NextToken *string `locationName:"nextToken" min:"4" type:"string"` +} + +// String returns the string representation +func (s ListRunsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListRunsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListRunsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListRunsInput"} + if s.Arn == nil { + invalidParams.Add(request.NewErrParamRequired("Arn")) + } + if s.Arn != nil && len(*s.Arn) < 32 { + invalidParams.Add(request.NewErrParamMinLen("Arn", 32)) + } + if s.NextToken != nil && len(*s.NextToken) < 4 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 4)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the result of a list runs request. +type ListRunsOutput struct { + _ struct{} `type:"structure"` + + // If the number of items that are returned is significantly large, this is + // an identifier that is also returned, which can be used in a subsequent call + // to this operation to return the next set of items in the list. + NextToken *string `locationName:"nextToken" min:"4" type:"string"` + + // Information about the runs. + Runs []*Run `locationName:"runs" type:"list"` +} + +// String returns the string representation +func (s ListRunsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListRunsOutput) GoString() string { + return s.String() +} + +// Represents a request to the list samples operation. +type ListSamplesInput struct { + _ struct{} `type:"structure"` + + // The samples' ARNs. + Arn *string `locationName:"arn" min:"32" type:"string" required:"true"` + + // An identifier that was returned from the previous call to this operation, + // which can be used to return the next set of items in the list. + NextToken *string `locationName:"nextToken" min:"4" type:"string"` +} + +// String returns the string representation +func (s ListSamplesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListSamplesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListSamplesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListSamplesInput"} + if s.Arn == nil { + invalidParams.Add(request.NewErrParamRequired("Arn")) + } + if s.Arn != nil && len(*s.Arn) < 32 { + invalidParams.Add(request.NewErrParamMinLen("Arn", 32)) + } + if s.NextToken != nil && len(*s.NextToken) < 4 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 4)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the result of a list samples request. +type ListSamplesOutput struct { + _ struct{} `type:"structure"` + + // If the number of items that are returned is significantly large, this is + // an identifier that is also returned, which can be used in a subsequent call + // to this operation to return the next set of items in the list. + NextToken *string `locationName:"nextToken" min:"4" type:"string"` + + // Information about the samples. + Samples []*Sample `locationName:"samples" type:"list"` +} + +// String returns the string representation +func (s ListSamplesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListSamplesOutput) GoString() string { + return s.String() +} + +// Represents a request to the list suites operation. +type ListSuitesInput struct { + _ struct{} `type:"structure"` + + // The suites' ARNs. + Arn *string `locationName:"arn" min:"32" type:"string" required:"true"` + + // An identifier that was returned from the previous call to this operation, + // which can be used to return the next set of items in the list. + NextToken *string `locationName:"nextToken" min:"4" type:"string"` +} + +// String returns the string representation +func (s ListSuitesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListSuitesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListSuitesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListSuitesInput"} + if s.Arn == nil { + invalidParams.Add(request.NewErrParamRequired("Arn")) + } + if s.Arn != nil && len(*s.Arn) < 32 { + invalidParams.Add(request.NewErrParamMinLen("Arn", 32)) + } + if s.NextToken != nil && len(*s.NextToken) < 4 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 4)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the result of a list suites request. +type ListSuitesOutput struct { + _ struct{} `type:"structure"` + + // If the number of items that are returned is significantly large, this is + // an identifier that is also returned, which can be used in a subsequent call + // to this operation to return the next set of items in the list. + NextToken *string `locationName:"nextToken" min:"4" type:"string"` + + // Information about the suites. + Suites []*Suite `locationName:"suites" type:"list"` +} + +// String returns the string representation +func (s ListSuitesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListSuitesOutput) GoString() string { + return s.String() +} + +// Represents a request to the list tests operation. +type ListTestsInput struct { + _ struct{} `type:"structure"` + + // The tests' ARNs. + Arn *string `locationName:"arn" min:"32" type:"string" required:"true"` + + // An identifier that was returned from the previous call to this operation, + // which can be used to return the next set of items in the list. + NextToken *string `locationName:"nextToken" min:"4" type:"string"` +} + +// String returns the string representation +func (s ListTestsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTestsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListTestsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListTestsInput"} + if s.Arn == nil { + invalidParams.Add(request.NewErrParamRequired("Arn")) + } + if s.Arn != nil && len(*s.Arn) < 32 { + invalidParams.Add(request.NewErrParamMinLen("Arn", 32)) + } + if s.NextToken != nil && len(*s.NextToken) < 4 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 4)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the result of a list tests request. +type ListTestsOutput struct { + _ struct{} `type:"structure"` + + // If the number of items that are returned is significantly large, this is + // an identifier that is also returned, which can be used in a subsequent call + // to this operation to return the next set of items in the list. + NextToken *string `locationName:"nextToken" min:"4" type:"string"` + + // Information about the tests. + Tests []*Test `locationName:"tests" type:"list"` +} + +// String returns the string representation +func (s ListTestsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTestsOutput) GoString() string { + return s.String() +} + +// Represents a request to the list unique problems operation. +type ListUniqueProblemsInput struct { + _ struct{} `type:"structure"` + + // The unique problems' ARNs. + Arn *string `locationName:"arn" min:"32" type:"string" required:"true"` + + // An identifier that was returned from the previous call to this operation, + // which can be used to return the next set of items in the list. + NextToken *string `locationName:"nextToken" min:"4" type:"string"` +} + +// String returns the string representation +func (s ListUniqueProblemsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListUniqueProblemsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListUniqueProblemsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListUniqueProblemsInput"} + if s.Arn == nil { + invalidParams.Add(request.NewErrParamRequired("Arn")) + } + if s.Arn != nil && len(*s.Arn) < 32 { + invalidParams.Add(request.NewErrParamMinLen("Arn", 32)) + } + if s.NextToken != nil && len(*s.NextToken) < 4 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 4)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the result of a list unique problems request. +type ListUniqueProblemsOutput struct { + _ struct{} `type:"structure"` + + // If the number of items that are returned is significantly large, this is + // an identifier that is also returned, which can be used in a subsequent call + // to this operation to return the next set of items in the list. + NextToken *string `locationName:"nextToken" min:"4" type:"string"` + + // Information about the unique problems. + // + // Allowed values include: + // + // PENDING: A pending condition. + // + // PASSED: A passing condition. + // + // WARNED: A warning condition. + // + // FAILED: A failed condition. + // + // SKIPPED: A skipped condition. + // + // ERRORED: An error condition. + // + // STOPPED: A stopped condition. + UniqueProblems map[string][]*UniqueProblem `locationName:"uniqueProblems" type:"map"` +} + +// String returns the string representation +func (s ListUniqueProblemsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListUniqueProblemsOutput) GoString() string { + return s.String() +} + +// Represents a request to the list uploads operation. +type ListUploadsInput struct { + _ struct{} `type:"structure"` + + // The uploads' ARNs. + Arn *string `locationName:"arn" min:"32" type:"string" required:"true"` + + // An identifier that was returned from the previous call to this operation, + // which can be used to return the next set of items in the list. + NextToken *string `locationName:"nextToken" min:"4" type:"string"` +} + +// String returns the string representation +func (s ListUploadsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListUploadsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListUploadsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListUploadsInput"} + if s.Arn == nil { + invalidParams.Add(request.NewErrParamRequired("Arn")) + } + if s.Arn != nil && len(*s.Arn) < 32 { + invalidParams.Add(request.NewErrParamMinLen("Arn", 32)) + } + if s.NextToken != nil && len(*s.NextToken) < 4 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 4)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the result of a list uploads request. +type ListUploadsOutput struct { + _ struct{} `type:"structure"` + + // If the number of items that are returned is significantly large, this is + // an identifier that is also returned, which can be used in a subsequent call + // to this operation to return the next set of items in the list. + NextToken *string `locationName:"nextToken" min:"4" type:"string"` + + // Information about the uploads. + Uploads []*Upload `locationName:"uploads" type:"list"` +} + +// String returns the string representation +func (s ListUploadsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListUploadsOutput) GoString() string { + return s.String() +} + +// Represents a latitude and longitude pair, expressed in geographic coordinate +// system degrees (for example 47.6204, -122.3491). +// +// Elevation is currently not supported. +type Location struct { + _ struct{} `type:"structure"` + + // The latitude. + Latitude *float64 `locationName:"latitude" type:"double" required:"true"` + + // The longitude. + Longitude *float64 `locationName:"longitude" type:"double" required:"true"` +} + +// String returns the string representation +func (s Location) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Location) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Location) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Location"} + if s.Latitude == nil { + invalidParams.Add(request.NewErrParamRequired("Latitude")) + } + if s.Longitude == nil { + invalidParams.Add(request.NewErrParamRequired("Longitude")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// A number representing the monetary amount for an offering or transaction. +type MonetaryAmount struct { + _ struct{} `type:"structure"` + + // The numerical amount of an offering or transaction. + Amount *float64 `locationName:"amount" type:"double"` + + // The currency code of a monetary amount. For example, USD means "U.S. dollars." + CurrencyCode *string `locationName:"currencyCode" type:"string" enum:"CurrencyCode"` +} + +// String returns the string representation +func (s MonetaryAmount) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MonetaryAmount) GoString() string { + return s.String() +} + +// Represents the metadata of a device offering. +type Offering struct { + _ struct{} `type:"structure"` + + // A string describing the offering. + Description *string `locationName:"description" type:"string"` + + // The ID that corresponds to a device offering. + Id *string `locationName:"id" min:"32" type:"string"` + + // The platform of the device (e.g., ANDROID or IOS). + Platform *string `locationName:"platform" type:"string" enum:"DevicePlatform"` + + // Specifies whether there are recurring charges for the offering. + RecurringCharges []*RecurringCharge `locationName:"recurringCharges" type:"list"` + + // The type of offering (e.g., "RECURRING") for a device. + Type *string `locationName:"type" type:"string" enum:"OfferingType"` +} + +// String returns the string representation +func (s Offering) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Offering) GoString() string { + return s.String() +} + +// The status of the offering. +type OfferingStatus struct { + _ struct{} `type:"structure"` + + // The date on which the offering is effective. + EffectiveOn *time.Time `locationName:"effectiveOn" type:"timestamp" timestampFormat:"unix"` + + // Represents the metadata of an offering status. + Offering *Offering `locationName:"offering" type:"structure"` + + // The number of available devices in the offering. + Quantity *int64 `locationName:"quantity" type:"integer"` + + // The type specified for the offering status. + Type *string `locationName:"type" type:"string" enum:"OfferingTransactionType"` +} + +// String returns the string representation +func (s OfferingStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s OfferingStatus) GoString() string { + return s.String() +} + +// Represents the metadata of an offering transaction. +type OfferingTransaction struct { + _ struct{} `type:"structure"` + + // The cost of an offering transaction. + Cost *MonetaryAmount `locationName:"cost" type:"structure"` + + // The date on which an offering transaction was created. + CreatedOn *time.Time `locationName:"createdOn" type:"timestamp" timestampFormat:"unix"` + + // The status of an offering transaction. + OfferingStatus *OfferingStatus `locationName:"offeringStatus" type:"structure"` + + // The transaction ID of the offering transaction. + TransactionId *string `locationName:"transactionId" min:"32" type:"string"` +} + +// String returns the string representation +func (s OfferingTransaction) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s OfferingTransaction) GoString() string { + return s.String() +} + +// Represents a specific warning or failure. +type Problem struct { + _ struct{} `type:"structure"` + + // Information about the associated device. + Device *Device `locationName:"device" type:"structure"` + + // Information about the associated job. + Job *ProblemDetail `locationName:"job" type:"structure"` + + // A message about the problem's result. + Message *string `locationName:"message" type:"string"` + + // The problem's result. + // + // Allowed values include: + // + // PENDING: A pending condition. + // + // PASSED: A passing condition. + // + // WARNED: A warning condition. + // + // FAILED: A failed condition. + // + // SKIPPED: A skipped condition. + // + // ERRORED: An error condition. + // + // STOPPED: A stopped condition. + Result *string `locationName:"result" type:"string" enum:"ExecutionResult"` + + // Information about the associated run. + Run *ProblemDetail `locationName:"run" type:"structure"` + + // Information about the associated suite. + Suite *ProblemDetail `locationName:"suite" type:"structure"` + + // Information about the associated test. + Test *ProblemDetail `locationName:"test" type:"structure"` +} + +// String returns the string representation +func (s Problem) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Problem) GoString() string { + return s.String() +} + +// Information about a problem detail. +type ProblemDetail struct { + _ struct{} `type:"structure"` + + // The problem detail's ARN. + Arn *string `locationName:"arn" min:"32" type:"string"` + + // The problem detail's name. + Name *string `locationName:"name" type:"string"` +} + +// String returns the string representation +func (s ProblemDetail) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ProblemDetail) GoString() string { + return s.String() +} + +// Represents an operating-system neutral workspace for running and managing +// tests. +type Project struct { + _ struct{} `type:"structure"` + + // The project's ARN. + Arn *string `locationName:"arn" min:"32" type:"string"` + + // When the project was created. + Created *time.Time `locationName:"created" type:"timestamp" timestampFormat:"unix"` + + // The project's name. + Name *string `locationName:"name" type:"string"` +} + +// String returns the string representation +func (s Project) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Project) GoString() string { + return s.String() +} + +// Represents a request for a purchase offering. +type PurchaseOfferingInput struct { + _ struct{} `type:"structure"` + + // The ID of the offering. + OfferingId *string `locationName:"offeringId" min:"32" type:"string"` + + // The number of device slots you wish to purchase in an offering request. + Quantity *int64 `locationName:"quantity" type:"integer"` +} + +// String returns the string representation +func (s PurchaseOfferingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PurchaseOfferingInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PurchaseOfferingInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PurchaseOfferingInput"} + if s.OfferingId != nil && len(*s.OfferingId) < 32 { + invalidParams.Add(request.NewErrParamMinLen("OfferingId", 32)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The result of the purchase offering (e.g., success or failure). +type PurchaseOfferingOutput struct { + _ struct{} `type:"structure"` + + // Represents the offering transaction for the purchase result. + OfferingTransaction *OfferingTransaction `locationName:"offeringTransaction" type:"structure"` +} + +// String returns the string representation +func (s PurchaseOfferingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PurchaseOfferingOutput) GoString() string { + return s.String() +} + +// Represents the set of radios and their states on a device. Examples of radios +// include Wi-Fi, GPS, Bluetooth, and NFC. +type Radios struct { + _ struct{} `type:"structure"` + + // True if Bluetooth is enabled at the beginning of the test; otherwise, false. + Bluetooth *bool `locationName:"bluetooth" type:"boolean"` + + // True if GPS is enabled at the beginning of the test; otherwise, false. + Gps *bool `locationName:"gps" type:"boolean"` + + // True if NFC is enabled at the beginning of the test; otherwise, false. + Nfc *bool `locationName:"nfc" type:"boolean"` + + // True if Wi-Fi is enabled at the beginning of the test; otherwise, false. + Wifi *bool `locationName:"wifi" type:"boolean"` +} + +// String returns the string representation +func (s Radios) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Radios) GoString() string { + return s.String() +} + +// Specifies whether charges for devices will be recurring. +type RecurringCharge struct { + _ struct{} `type:"structure"` + + // The cost of the recurring charge. + Cost *MonetaryAmount `locationName:"cost" type:"structure"` + + // The frequency in which charges will recur. + Frequency *string `locationName:"frequency" type:"string" enum:"RecurringChargeFrequency"` +} + +// String returns the string representation +func (s RecurringCharge) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RecurringCharge) GoString() string { + return s.String() +} + +// A request representing an offering renewal. +type RenewOfferingInput struct { + _ struct{} `type:"structure"` + + // The ID of a request to renew an offering. + OfferingId *string `locationName:"offeringId" min:"32" type:"string"` + + // The quantity requested in an offering renewal. + Quantity *int64 `locationName:"quantity" type:"integer"` +} + +// String returns the string representation +func (s RenewOfferingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RenewOfferingInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RenewOfferingInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RenewOfferingInput"} + if s.OfferingId != nil && len(*s.OfferingId) < 32 { + invalidParams.Add(request.NewErrParamMinLen("OfferingId", 32)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The result of a renewal offering. +type RenewOfferingOutput struct { + _ struct{} `type:"structure"` + + // Represents the status of the offering transaction for the renewal. + OfferingTransaction *OfferingTransaction `locationName:"offeringTransaction" type:"structure"` +} + +// String returns the string representation +func (s RenewOfferingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RenewOfferingOutput) GoString() string { + return s.String() +} + +// Represents the screen resolution of a device in height and width, expressed +// in pixels. +type Resolution struct { + _ struct{} `type:"structure"` + + // The screen resolution's height, expressed in pixels. + Height *int64 `locationName:"height" type:"integer"` + + // The screen resolution's width, expressed in pixels. + Width *int64 `locationName:"width" type:"integer"` +} + +// String returns the string representation +func (s Resolution) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Resolution) GoString() string { + return s.String() +} + +// Represents a condition for a device pool. +type Rule struct { + _ struct{} `type:"structure"` + + // The rule's stringified attribute. For example, specify the value as "\"abc\"". + // + // Allowed values include: + // + // ARN: The ARN. + // + // FORM_FACTOR: The form factor (for example, phone or tablet). + // + // MANUFACTURER: The manufacturer. + // + // PLATFORM: The platform (for example, Android or iOS). + Attribute *string `locationName:"attribute" type:"string" enum:"DeviceAttribute"` + + // The rule's operator. + // + // EQUALS: The equals operator. + // + // GREATER_THAN: The greater-than operator. + // + // IN: The in operator. + // + // LESS_THAN: The less-than operator. + // + // NOT_IN: The not-in operator. + Operator *string `locationName:"operator" type:"string" enum:"RuleOperator"` + + // The rule's value. + Value *string `locationName:"value" type:"string"` +} + +// String returns the string representation +func (s Rule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Rule) GoString() string { + return s.String() +} + +// Represents an app on a set of devices with a specific test and configuration. +type Run struct { + _ struct{} `type:"structure"` + + // The run's ARN. + Arn *string `locationName:"arn" min:"32" type:"string"` + + // Specifies the billing method for a test run: metered or unmetered. If the + // parameter is not specified, the default value is unmetered. + BillingMethod *string `locationName:"billingMethod" type:"string" enum:"BillingMethod"` + + // The total number of completed jobs. + CompletedJobs *int64 `locationName:"completedJobs" type:"integer"` + + // The run's result counters. + Counters *Counters `locationName:"counters" type:"structure"` + + // When the run was created. + Created *time.Time `locationName:"created" type:"timestamp" timestampFormat:"unix"` + + // Represents the total (metered or unmetered) minutes used by the test run. + DeviceMinutes *DeviceMinutes `locationName:"deviceMinutes" type:"structure"` + + // A message about the run's result. + Message *string `locationName:"message" type:"string"` + + // The run's name. + Name *string `locationName:"name" type:"string"` + + // The run's platform. + // + // Allowed values include: + // + // ANDROID: The Android platform. + // + // IOS: The iOS platform. + Platform *string `locationName:"platform" type:"string" enum:"DevicePlatform"` + + // The run's result. + // + // Allowed values include: + // + // PENDING: A pending condition. + // + // PASSED: A passing condition. + // + // WARNED: A warning condition. + // + // FAILED: A failed condition. + // + // SKIPPED: A skipped condition. + // + // ERRORED: An error condition. + // + // STOPPED: A stopped condition. + Result *string `locationName:"result" type:"string" enum:"ExecutionResult"` + + // The run's start time. + Started *time.Time `locationName:"started" type:"timestamp" timestampFormat:"unix"` + + // The run's status. + // + // Allowed values include: + // + // PENDING: A pending status. + // + // PENDING_CONCURRENCY: A pending concurrency status. + // + // PENDING_DEVICE: A pending device status. + // + // PROCESSING: A processing status. + // + // SCHEDULING: A scheduling status. + // + // PREPARING: A preparing status. + // + // RUNNING: A running status. + // + // COMPLETED: A completed status. + // + // STOPPING: A stopping status. + Status *string `locationName:"status" type:"string" enum:"ExecutionStatus"` + + // The run's stop time. + Stopped *time.Time `locationName:"stopped" type:"timestamp" timestampFormat:"unix"` + + // The total number of jobs for the run. + TotalJobs *int64 `locationName:"totalJobs" type:"integer"` + + // The run's type. + // + // Must be one of the following values: + // + // BUILTIN_FUZZ: The built-in fuzz type. + // + // BUILTIN_EXPLORER: For Android, an app explorer that will traverse an Android + // app, interacting with it and capturing screenshots at the same time. + // + // APPIUM_JAVA_JUNIT: The Appium Java JUnit type. + // + // APPIUM_JAVA_TESTNG: The Appium Java TestNG type. + // + // APPIUM_PYTHON: The Appium Python type. + // + // APPIUM_WEB_JAVA_JUNIT: The Appium Java JUnit type for Web apps. + // + // APPIUM_WEB_JAVA_TESTNG: The Appium Java TestNG type for Web apps. + // + // APPIUM_WEB_PYTHON: The Appium Python type for Web apps. + // + // CALABASH: The Calabash type. + // + // INSTRUMENTATION: The Instrumentation type. + // + // UIAUTOMATION: The uiautomation type. + // + // UIAUTOMATOR: The uiautomator type. + // + // XCTEST: The XCode test type. + // + // XCTEST_UI: The XCode UI test type. + Type *string `locationName:"type" type:"string" enum:"TestType"` +} + +// String returns the string representation +func (s Run) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Run) GoString() string { + return s.String() +} + +// Represents a sample of performance data. +type Sample struct { + _ struct{} `type:"structure"` + + // The sample's ARN. + Arn *string `locationName:"arn" min:"32" type:"string"` + + // The sample's type. + // + // Must be one of the following values: + // + // CPU: A CPU sample type. This is expressed as the app processing CPU time + // (including child processes) as reported by process, as a percentage. + // + // MEMORY: A memory usage sample type. This is expressed as the total proportional + // set size of an app process, in kilobytes. + // + // NATIVE_AVG_DRAWTIME + // + // NATIVE_FPS + // + // NATIVE_FRAMES + // + // NATIVE_MAX_DRAWTIME + // + // NATIVE_MIN_DRAWTIME + // + // OPENGL_AVG_DRAWTIME + // + // OPENGL_FPS + // + // OPENGL_FRAMES + // + // OPENGL_MAX_DRAWTIME + // + // OPENGL_MIN_DRAWTIME + // + // RX + // + // RX_RATE: The total number of bytes per second (TCP and UDP) that are sent, + // by app process. + // + // THREADS: A threads sample type. This is expressed as the total number of + // threads per app process. + // + // TX + // + // TX_RATE: The total number of bytes per second (TCP and UDP) that are received, + // by app process. + Type *string `locationName:"type" type:"string" enum:"SampleType"` + + // The pre-signed Amazon S3 URL that can be used with a corresponding GET request + // to download the sample's file. + Url *string `locationName:"url" type:"string"` +} + +// String returns the string representation +func (s Sample) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Sample) GoString() string { + return s.String() +} + +// Represents the settings for a run. Includes things like location, radio states, +// auxiliary apps, and network profiles. +type ScheduleRunConfiguration struct { + _ struct{} `type:"structure"` + + // A list of auxiliary apps for the run. + AuxiliaryApps []*string `locationName:"auxiliaryApps" type:"list"` + + // Specifies the billing method for a test run: metered or unmetered. If the + // parameter is not specified, the default value is unmetered. + BillingMethod *string `locationName:"billingMethod" type:"string" enum:"BillingMethod"` + + // The ARN of the extra data for the run. The extra data is a .zip file that + // AWS Device Farm will extract to external data for Android or the app's sandbox + // for iOS. + ExtraDataPackageArn *string `locationName:"extraDataPackageArn" min:"32" type:"string"` + + // Information about the locale that is used for the run. + Locale *string `locationName:"locale" type:"string"` + + // Information about the location that is used for the run. + Location *Location `locationName:"location" type:"structure"` + + // Reserved for internal use. + NetworkProfileArn *string `locationName:"networkProfileArn" min:"32" type:"string"` + + // Information about the radio states for the run. + Radios *Radios `locationName:"radios" type:"structure"` +} + +// String returns the string representation +func (s ScheduleRunConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ScheduleRunConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ScheduleRunConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ScheduleRunConfiguration"} + if s.ExtraDataPackageArn != nil && len(*s.ExtraDataPackageArn) < 32 { + invalidParams.Add(request.NewErrParamMinLen("ExtraDataPackageArn", 32)) + } + if s.NetworkProfileArn != nil && len(*s.NetworkProfileArn) < 32 { + invalidParams.Add(request.NewErrParamMinLen("NetworkProfileArn", 32)) + } + if s.Location != nil { + if err := s.Location.Validate(); err != nil { + invalidParams.AddNested("Location", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents a request to the schedule run operation. +type ScheduleRunInput struct { + _ struct{} `type:"structure"` + + // The ARN of the app to schedule a run. + AppArn *string `locationName:"appArn" min:"32" type:"string"` + + // Information about the settings for the run to be scheduled. + Configuration *ScheduleRunConfiguration `locationName:"configuration" type:"structure"` + + // The ARN of the device pool for the run to be scheduled. + DevicePoolArn *string `locationName:"devicePoolArn" min:"32" type:"string" required:"true"` + + // The name for the run to be scheduled. + Name *string `locationName:"name" type:"string"` + + // The ARN of the project for the run to be scheduled. + ProjectArn *string `locationName:"projectArn" min:"32" type:"string" required:"true"` + + // Information about the test for the run to be scheduled. + Test *ScheduleRunTest `locationName:"test" type:"structure" required:"true"` +} + +// String returns the string representation +func (s ScheduleRunInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ScheduleRunInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ScheduleRunInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ScheduleRunInput"} + if s.AppArn != nil && len(*s.AppArn) < 32 { + invalidParams.Add(request.NewErrParamMinLen("AppArn", 32)) + } + if s.DevicePoolArn == nil { + invalidParams.Add(request.NewErrParamRequired("DevicePoolArn")) + } + if s.DevicePoolArn != nil && len(*s.DevicePoolArn) < 32 { + invalidParams.Add(request.NewErrParamMinLen("DevicePoolArn", 32)) + } + if s.ProjectArn == nil { + invalidParams.Add(request.NewErrParamRequired("ProjectArn")) + } + if s.ProjectArn != nil && len(*s.ProjectArn) < 32 { + invalidParams.Add(request.NewErrParamMinLen("ProjectArn", 32)) + } + if s.Test == nil { + invalidParams.Add(request.NewErrParamRequired("Test")) + } + if s.Configuration != nil { + if err := s.Configuration.Validate(); err != nil { + invalidParams.AddNested("Configuration", err.(request.ErrInvalidParams)) + } + } + if s.Test != nil { + if err := s.Test.Validate(); err != nil { + invalidParams.AddNested("Test", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the result of a schedule run request. +type ScheduleRunOutput struct { + _ struct{} `type:"structure"` + + // Information about the scheduled run. + Run *Run `locationName:"run" type:"structure"` +} + +// String returns the string representation +func (s ScheduleRunOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ScheduleRunOutput) GoString() string { + return s.String() +} + +// Represents additional test settings. +type ScheduleRunTest struct { + _ struct{} `type:"structure"` + + // The test's filter. + Filter *string `locationName:"filter" type:"string"` + + // The test's parameters, such as test framework parameters and fixture settings. + Parameters map[string]*string `locationName:"parameters" type:"map"` + + // The ARN of the uploaded test that will be run. + TestPackageArn *string `locationName:"testPackageArn" min:"32" type:"string"` + + // The test's type. + // + // Must be one of the following values: + // + // BUILTIN_FUZZ: The built-in fuzz type. + // + // BUILTIN_EXPLORER: For Android, an app explorer that will traverse an Android + // app, interacting with it and capturing screenshots at the same time. + // + // APPIUM_JAVA_JUNIT: The Appium Java JUnit type. + // + // APPIUM_JAVA_TESTNG: The Appium Java TestNG type. + // + // APPIUM_PYTHON: The Appium Python type. + // + // APPIUM_WEB_JAVA_JUNIT: The Appium Java JUnit type for Web apps. + // + // APPIUM_WEB_JAVA_TESTNG: The Appium Java TestNG type for Web apps. + // + // APPIUM_WEB_PYTHON: The Appium Python type for Web apps. + // + // CALABASH: The Calabash type. + // + // INSTRUMENTATION: The Instrumentation type. + // + // UIAUTOMATION: The uiautomation type. + // + // UIAUTOMATOR: The uiautomator type. + // + // XCTEST: The XCode test type. + // + // XCTEST_UI: The XCode UI test type. + Type *string `locationName:"type" type:"string" required:"true" enum:"TestType"` +} + +// String returns the string representation +func (s ScheduleRunTest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ScheduleRunTest) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ScheduleRunTest) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ScheduleRunTest"} + if s.TestPackageArn != nil && len(*s.TestPackageArn) < 32 { + invalidParams.Add(request.NewErrParamMinLen("TestPackageArn", 32)) + } + if s.Type == nil { + invalidParams.Add(request.NewErrParamRequired("Type")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the request to stop a specific run. +type StopRunInput struct { + _ struct{} `type:"structure"` + + // Represents the Amazon Resource Name (ARN) of the Device Farm run you wish + // to stop. + Arn *string `locationName:"arn" min:"32" type:"string" required:"true"` +} + +// String returns the string representation +func (s StopRunInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StopRunInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *StopRunInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StopRunInput"} + if s.Arn == nil { + invalidParams.Add(request.NewErrParamRequired("Arn")) + } + if s.Arn != nil && len(*s.Arn) < 32 { + invalidParams.Add(request.NewErrParamMinLen("Arn", 32)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the results of your stop run attempt. +type StopRunOutput struct { + _ struct{} `type:"structure"` + + // Represents an app on a set of devices with a specific test and configuration. + Run *Run `locationName:"run" type:"structure"` +} + +// String returns the string representation +func (s StopRunOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StopRunOutput) GoString() string { + return s.String() +} + +// Represents a collection of one or more tests. +type Suite struct { + _ struct{} `type:"structure"` + + // The suite's ARN. + Arn *string `locationName:"arn" min:"32" type:"string"` + + // The suite's result counters. + Counters *Counters `locationName:"counters" type:"structure"` + + // When the suite was created. + Created *time.Time `locationName:"created" type:"timestamp" timestampFormat:"unix"` + + // Represents the total (metered or unmetered) minutes used by the test suite. + DeviceMinutes *DeviceMinutes `locationName:"deviceMinutes" type:"structure"` + + // A message about the suite's result. + Message *string `locationName:"message" type:"string"` + + // The suite's name. + Name *string `locationName:"name" type:"string"` + + // The suite's result. + // + // Allowed values include: + // + // PENDING: A pending condition. + // + // PASSED: A passing condition. + // + // WARNED: A warning condition. + // + // FAILED: A failed condition. + // + // SKIPPED: A skipped condition. + // + // ERRORED: An error condition. + // + // STOPPED: A stopped condition. + Result *string `locationName:"result" type:"string" enum:"ExecutionResult"` + + // The suite's start time. + Started *time.Time `locationName:"started" type:"timestamp" timestampFormat:"unix"` + + // The suite's status. + // + // Allowed values include: + // + // PENDING: A pending status. + // + // PENDING_CONCURRENCY: A pending concurrency status. + // + // PENDING_DEVICE: A pending device status. + // + // PROCESSING: A processing status. + // + // SCHEDULING: A scheduling status. + // + // PREPARING: A preparing status. + // + // RUNNING: A running status. + // + // COMPLETED: A completed status. + // + // STOPPING: A stopping status. + Status *string `locationName:"status" type:"string" enum:"ExecutionStatus"` + + // The suite's stop time. + Stopped *time.Time `locationName:"stopped" type:"timestamp" timestampFormat:"unix"` + + // The suite's type. + // + // Must be one of the following values: + // + // BUILTIN_FUZZ: The built-in fuzz type. + // + // BUILTIN_EXPLORER: For Android, an app explorer that will traverse an Android + // app, interacting with it and capturing screenshots at the same time. + // + // APPIUM_JAVA_JUNIT: The Appium Java JUnit type. + // + // APPIUM_JAVA_TESTNG: The Appium Java TestNG type. + // + // APPIUM_PYTHON: The Appium Python type. + // + // APPIUM_WEB_JAVA_JUNIT: The Appium Java JUnit type for Web apps. + // + // APPIUM_WEB_JAVA_TESTNG: The Appium Java TestNG type for Web apps. + // + // APPIUM_WEB_PYTHON: The Appium Python type for Web apps. + // + // CALABASH: The Calabash type. + // + // INSTRUMENTATION: The Instrumentation type. + // + // UIAUTOMATION: The uiautomation type. + // + // UIAUTOMATOR: The uiautomator type. + // + // XCTEST: The XCode test type. + // + // XCTEST_UI: The XCode UI test type. + Type *string `locationName:"type" type:"string" enum:"TestType"` +} + +// String returns the string representation +func (s Suite) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Suite) GoString() string { + return s.String() +} + +// Represents a condition that is evaluated. +type Test struct { + _ struct{} `type:"structure"` + + // The test's ARN. + Arn *string `locationName:"arn" min:"32" type:"string"` + + // The test's result counters. + Counters *Counters `locationName:"counters" type:"structure"` + + // When the test was created. + Created *time.Time `locationName:"created" type:"timestamp" timestampFormat:"unix"` + + // Represents the total (metered or unmetered) minutes used by the test. + DeviceMinutes *DeviceMinutes `locationName:"deviceMinutes" type:"structure"` + + // A message about the test's result. + Message *string `locationName:"message" type:"string"` + + // The test's name. + Name *string `locationName:"name" type:"string"` + + // The test's result. + // + // Allowed values include: + // + // PENDING: A pending condition. + // + // PASSED: A passing condition. + // + // WARNED: A warning condition. + // + // FAILED: A failed condition. + // + // SKIPPED: A skipped condition. + // + // ERRORED: An error condition. + // + // STOPPED: A stopped condition. + Result *string `locationName:"result" type:"string" enum:"ExecutionResult"` + + // The test's start time. + Started *time.Time `locationName:"started" type:"timestamp" timestampFormat:"unix"` + + // The test's status. + // + // Allowed values include: + // + // PENDING: A pending status. + // + // PENDING_CONCURRENCY: A pending concurrency status. + // + // PENDING_DEVICE: A pending device status. + // + // PROCESSING: A processing status. + // + // SCHEDULING: A scheduling status. + // + // PREPARING: A preparing status. + // + // RUNNING: A running status. + // + // COMPLETED: A completed status. + // + // STOPPING: A stopping status. + Status *string `locationName:"status" type:"string" enum:"ExecutionStatus"` + + // The test's stop time. + Stopped *time.Time `locationName:"stopped" type:"timestamp" timestampFormat:"unix"` + + // The test's type. + // + // Must be one of the following values: + // + // BUILTIN_FUZZ: The built-in fuzz type. + // + // BUILTIN_EXPLORER: For Android, an app explorer that will traverse an Android + // app, interacting with it and capturing screenshots at the same time. + // + // APPIUM_JAVA_JUNIT: The Appium Java JUnit type. + // + // APPIUM_JAVA_TESTNG: The Appium Java TestNG type. + // + // APPIUM_PYTHON: The Appium Python type. + // + // APPIUM_WEB_JAVA_JUNIT: The Appium Java JUnit type for Web apps. + // + // APPIUM_WEB_JAVA_TESTNG: The Appium Java TestNG type for Web apps. + // + // APPIUM_WEB_PYTHON: The Appium Python type for Web apps. + // + // CALABASH: The Calabash type. + // + // INSTRUMENTATION: The Instrumentation type. + // + // UIAUTOMATION: The uiautomation type. + // + // UIAUTOMATOR: The uiautomator type. + // + // XCTEST: The XCode test type. + // + // XCTEST_UI: The XCode UI test type. + Type *string `locationName:"type" type:"string" enum:"TestType"` +} + +// String returns the string representation +func (s Test) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Test) GoString() string { + return s.String() +} + +// A collection of one or more problems, grouped by their result. +type UniqueProblem struct { + _ struct{} `type:"structure"` + + // A message about the unique problems' result. + Message *string `locationName:"message" type:"string"` + + // Information about the problems. + Problems []*Problem `locationName:"problems" type:"list"` +} + +// String returns the string representation +func (s UniqueProblem) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UniqueProblem) GoString() string { + return s.String() +} + +// Represents a request to the update device pool operation. +type UpdateDevicePoolInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resourc Name (ARN) of the Device Farm device pool you wish to + // update. + Arn *string `locationName:"arn" min:"32" type:"string" required:"true"` + + // A description of the device pool you wish to update. + Description *string `locationName:"description" type:"string"` + + // A string representing the name of the device pool you wish to update. + Name *string `locationName:"name" type:"string"` + + // Represents the rules you wish to modify for the device pool. Updating rules + // is optional; however, if you choose to update rules for your request, the + // update will replace the existing rules. + Rules []*Rule `locationName:"rules" type:"list"` +} + +// String returns the string representation +func (s UpdateDevicePoolInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateDevicePoolInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateDevicePoolInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateDevicePoolInput"} + if s.Arn == nil { + invalidParams.Add(request.NewErrParamRequired("Arn")) + } + if s.Arn != nil && len(*s.Arn) < 32 { + invalidParams.Add(request.NewErrParamMinLen("Arn", 32)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the result of an update device pool request. +type UpdateDevicePoolOutput struct { + _ struct{} `type:"structure"` + + // Represents a collection of device types. + DevicePool *DevicePool `locationName:"devicePool" type:"structure"` +} + +// String returns the string representation +func (s UpdateDevicePoolOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateDevicePoolOutput) GoString() string { + return s.String() +} + +// Represents a request to the update project operation. +type UpdateProjectInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the project whose name you wish to update. + Arn *string `locationName:"arn" min:"32" type:"string" required:"true"` + + // A string representing the new name of the project that you are updating. + Name *string `locationName:"name" type:"string"` +} + +// String returns the string representation +func (s UpdateProjectInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateProjectInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateProjectInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateProjectInput"} + if s.Arn == nil { + invalidParams.Add(request.NewErrParamRequired("Arn")) + } + if s.Arn != nil && len(*s.Arn) < 32 { + invalidParams.Add(request.NewErrParamMinLen("Arn", 32)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the result of an update project request. +type UpdateProjectOutput struct { + _ struct{} `type:"structure"` + + // Represents an operating-system neutral workspace for running and managing + // tests. + Project *Project `locationName:"project" type:"structure"` +} + +// String returns the string representation +func (s UpdateProjectOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateProjectOutput) GoString() string { + return s.String() +} + +// An app or a set of one or more tests to upload or that have been uploaded. +type Upload struct { + _ struct{} `type:"structure"` + + // The upload's ARN. + Arn *string `locationName:"arn" min:"32" type:"string"` + + // The upload's content type (for example, "application/octet-stream"). + ContentType *string `locationName:"contentType" type:"string"` + + // When the upload was created. + Created *time.Time `locationName:"created" type:"timestamp" timestampFormat:"unix"` + + // A message about the upload's result. + Message *string `locationName:"message" type:"string"` + + // The upload's metadata. For example, for Android, this contains information + // that is parsed from the manifest and is displayed in the AWS Device Farm + // console after the associated app is uploaded. + Metadata *string `locationName:"metadata" type:"string"` + + // The upload's file name. + Name *string `locationName:"name" type:"string"` + + // The upload's status. + // + // Must be one of the following values: + // + // FAILED: A failed status. + // + // INITIALIZED: An initialized status. + // + // PROCESSING: A processing status. + // + // SUCCEEDED: A succeeded status. + Status *string `locationName:"status" type:"string" enum:"UploadStatus"` + + // The upload's type. + // + // Must be one of the following values: + // + // ANDROID_APP: An Android upload. + // + // IOS_APP: An iOS upload. + // + // WEB_APP: A web appliction upload. + // + // EXTERNAL_DATA: An external data upload. + // + // APPIUM_JAVA_JUNIT_TEST_PACKAGE: An Appium Java JUnit test package upload. + // + // APPIUM_JAVA_TESTNG_TEST_PACKAGE: An Appium Java TestNG test package upload. + // + // APPIUM_PYTHON_TEST_PACKAGE: An Appium Python test package upload. + // + // APPIUM_WEB_JAVA_JUNIT_TEST_PACKAGE: An Appium Java JUnit test package upload. + // + // APPIUM_WEB_JAVA_TESTNG_TEST_PACKAGE: An Appium Java TestNG test package + // upload. + // + // APPIUM_WEB_PYTHON_TEST_PACKAGE: An Appium Python test package upload. + // + // CALABASH_TEST_PACKAGE: A Calabash test package upload. + // + // INSTRUMENTATION_TEST_PACKAGE: An instrumentation upload. + // + // UIAUTOMATION_TEST_PACKAGE: A uiautomation test package upload. + // + // UIAUTOMATOR_TEST_PACKAGE: A uiautomator test package upload. + // + // XCTEST_TEST_PACKAGE: An XCode test package upload. + // + // XCTEST_UI_TEST_PACKAGE: An XCode UI test package upload. + Type *string `locationName:"type" type:"string" enum:"UploadType"` + + // The pre-signed Amazon S3 URL that was used to store a file through a corresponding + // PUT request. + Url *string `locationName:"url" type:"string"` +} + +// String returns the string representation +func (s Upload) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Upload) GoString() string { + return s.String() +} + +const ( + // @enum ArtifactCategory + ArtifactCategoryScreenshot = "SCREENSHOT" + // @enum ArtifactCategory + ArtifactCategoryFile = "FILE" + // @enum ArtifactCategory + ArtifactCategoryLog = "LOG" +) + +const ( + // @enum ArtifactType + ArtifactTypeUnknown = "UNKNOWN" + // @enum ArtifactType + ArtifactTypeScreenshot = "SCREENSHOT" + // @enum ArtifactType + ArtifactTypeDeviceLog = "DEVICE_LOG" + // @enum ArtifactType + ArtifactTypeMessageLog = "MESSAGE_LOG" + // @enum ArtifactType + ArtifactTypeResultLog = "RESULT_LOG" + // @enum ArtifactType + ArtifactTypeServiceLog = "SERVICE_LOG" + // @enum ArtifactType + ArtifactTypeWebkitLog = "WEBKIT_LOG" + // @enum ArtifactType + ArtifactTypeInstrumentationOutput = "INSTRUMENTATION_OUTPUT" + // @enum ArtifactType + ArtifactTypeExerciserMonkeyOutput = "EXERCISER_MONKEY_OUTPUT" + // @enum ArtifactType + ArtifactTypeCalabashJsonOutput = "CALABASH_JSON_OUTPUT" + // @enum ArtifactType + ArtifactTypeCalabashPrettyOutput = "CALABASH_PRETTY_OUTPUT" + // @enum ArtifactType + ArtifactTypeCalabashStandardOutput = "CALABASH_STANDARD_OUTPUT" + // @enum ArtifactType + ArtifactTypeCalabashJavaXmlOutput = "CALABASH_JAVA_XML_OUTPUT" + // @enum ArtifactType + ArtifactTypeAutomationOutput = "AUTOMATION_OUTPUT" + // @enum ArtifactType + ArtifactTypeAppiumServerOutput = "APPIUM_SERVER_OUTPUT" + // @enum ArtifactType + ArtifactTypeAppiumJavaOutput = "APPIUM_JAVA_OUTPUT" + // @enum ArtifactType + ArtifactTypeAppiumJavaXmlOutput = "APPIUM_JAVA_XML_OUTPUT" + // @enum ArtifactType + ArtifactTypeAppiumPythonOutput = "APPIUM_PYTHON_OUTPUT" + // @enum ArtifactType + ArtifactTypeAppiumPythonXmlOutput = "APPIUM_PYTHON_XML_OUTPUT" + // @enum ArtifactType + ArtifactTypeExplorerEventLog = "EXPLORER_EVENT_LOG" + // @enum ArtifactType + ArtifactTypeExplorerSummaryLog = "EXPLORER_SUMMARY_LOG" + // @enum ArtifactType + ArtifactTypeApplicationCrashReport = "APPLICATION_CRASH_REPORT" + // @enum ArtifactType + ArtifactTypeXctestLog = "XCTEST_LOG" + // @enum ArtifactType + ArtifactTypeVideo = "VIDEO" +) + +const ( + // @enum BillingMethod + BillingMethodMetered = "METERED" + // @enum BillingMethod + BillingMethodUnmetered = "UNMETERED" +) + +const ( + // @enum CurrencyCode + CurrencyCodeUsd = "USD" +) + +const ( + // @enum DeviceAttribute + DeviceAttributeArn = "ARN" + // @enum DeviceAttribute + DeviceAttributePlatform = "PLATFORM" + // @enum DeviceAttribute + DeviceAttributeFormFactor = "FORM_FACTOR" + // @enum DeviceAttribute + DeviceAttributeManufacturer = "MANUFACTURER" +) + +const ( + // @enum DeviceFormFactor + DeviceFormFactorPhone = "PHONE" + // @enum DeviceFormFactor + DeviceFormFactorTablet = "TABLET" +) + +const ( + // @enum DevicePlatform + DevicePlatformAndroid = "ANDROID" + // @enum DevicePlatform + DevicePlatformIos = "IOS" +) + +const ( + // @enum DevicePoolType + DevicePoolTypeCurated = "CURATED" + // @enum DevicePoolType + DevicePoolTypePrivate = "PRIVATE" +) + +const ( + // @enum ExecutionResult + ExecutionResultPending = "PENDING" + // @enum ExecutionResult + ExecutionResultPassed = "PASSED" + // @enum ExecutionResult + ExecutionResultWarned = "WARNED" + // @enum ExecutionResult + ExecutionResultFailed = "FAILED" + // @enum ExecutionResult + ExecutionResultSkipped = "SKIPPED" + // @enum ExecutionResult + ExecutionResultErrored = "ERRORED" + // @enum ExecutionResult + ExecutionResultStopped = "STOPPED" +) + +const ( + // @enum ExecutionStatus + ExecutionStatusPending = "PENDING" + // @enum ExecutionStatus + ExecutionStatusPendingConcurrency = "PENDING_CONCURRENCY" + // @enum ExecutionStatus + ExecutionStatusPendingDevice = "PENDING_DEVICE" + // @enum ExecutionStatus + ExecutionStatusProcessing = "PROCESSING" + // @enum ExecutionStatus + ExecutionStatusScheduling = "SCHEDULING" + // @enum ExecutionStatus + ExecutionStatusPreparing = "PREPARING" + // @enum ExecutionStatus + ExecutionStatusRunning = "RUNNING" + // @enum ExecutionStatus + ExecutionStatusCompleted = "COMPLETED" + // @enum ExecutionStatus + ExecutionStatusStopping = "STOPPING" +) + +const ( + // @enum OfferingTransactionType + OfferingTransactionTypePurchase = "PURCHASE" + // @enum OfferingTransactionType + OfferingTransactionTypeRenew = "RENEW" + // @enum OfferingTransactionType + OfferingTransactionTypeSystem = "SYSTEM" +) + +const ( + // @enum OfferingType + OfferingTypeRecurring = "RECURRING" +) + +const ( + // @enum RecurringChargeFrequency + RecurringChargeFrequencyMonthly = "MONTHLY" +) + +const ( + // @enum RuleOperator + RuleOperatorEquals = "EQUALS" + // @enum RuleOperator + RuleOperatorLessThan = "LESS_THAN" + // @enum RuleOperator + RuleOperatorGreaterThan = "GREATER_THAN" + // @enum RuleOperator + RuleOperatorIn = "IN" + // @enum RuleOperator + RuleOperatorNotIn = "NOT_IN" +) + +const ( + // @enum SampleType + SampleTypeCpu = "CPU" + // @enum SampleType + SampleTypeMemory = "MEMORY" + // @enum SampleType + SampleTypeThreads = "THREADS" + // @enum SampleType + SampleTypeRxRate = "RX_RATE" + // @enum SampleType + SampleTypeTxRate = "TX_RATE" + // @enum SampleType + SampleTypeRx = "RX" + // @enum SampleType + SampleTypeTx = "TX" + // @enum SampleType + SampleTypeNativeFrames = "NATIVE_FRAMES" + // @enum SampleType + SampleTypeNativeFps = "NATIVE_FPS" + // @enum SampleType + SampleTypeNativeMinDrawtime = "NATIVE_MIN_DRAWTIME" + // @enum SampleType + SampleTypeNativeAvgDrawtime = "NATIVE_AVG_DRAWTIME" + // @enum SampleType + SampleTypeNativeMaxDrawtime = "NATIVE_MAX_DRAWTIME" + // @enum SampleType + SampleTypeOpenglFrames = "OPENGL_FRAMES" + // @enum SampleType + SampleTypeOpenglFps = "OPENGL_FPS" + // @enum SampleType + SampleTypeOpenglMinDrawtime = "OPENGL_MIN_DRAWTIME" + // @enum SampleType + SampleTypeOpenglAvgDrawtime = "OPENGL_AVG_DRAWTIME" + // @enum SampleType + SampleTypeOpenglMaxDrawtime = "OPENGL_MAX_DRAWTIME" +) + +const ( + // @enum TestType + TestTypeBuiltinFuzz = "BUILTIN_FUZZ" + // @enum TestType + TestTypeBuiltinExplorer = "BUILTIN_EXPLORER" + // @enum TestType + TestTypeAppiumJavaJunit = "APPIUM_JAVA_JUNIT" + // @enum TestType + TestTypeAppiumJavaTestng = "APPIUM_JAVA_TESTNG" + // @enum TestType + TestTypeAppiumPython = "APPIUM_PYTHON" + // @enum TestType + TestTypeAppiumWebJavaJunit = "APPIUM_WEB_JAVA_JUNIT" + // @enum TestType + TestTypeAppiumWebJavaTestng = "APPIUM_WEB_JAVA_TESTNG" + // @enum TestType + TestTypeAppiumWebPython = "APPIUM_WEB_PYTHON" + // @enum TestType + TestTypeCalabash = "CALABASH" + // @enum TestType + TestTypeInstrumentation = "INSTRUMENTATION" + // @enum TestType + TestTypeUiautomation = "UIAUTOMATION" + // @enum TestType + TestTypeUiautomator = "UIAUTOMATOR" + // @enum TestType + TestTypeXctest = "XCTEST" + // @enum TestType + TestTypeXctestUi = "XCTEST_UI" +) + +const ( + // @enum UploadStatus + UploadStatusInitialized = "INITIALIZED" + // @enum UploadStatus + UploadStatusProcessing = "PROCESSING" + // @enum UploadStatus + UploadStatusSucceeded = "SUCCEEDED" + // @enum UploadStatus + UploadStatusFailed = "FAILED" +) + +const ( + // @enum UploadType + UploadTypeAndroidApp = "ANDROID_APP" + // @enum UploadType + UploadTypeIosApp = "IOS_APP" + // @enum UploadType + UploadTypeWebApp = "WEB_APP" + // @enum UploadType + UploadTypeExternalData = "EXTERNAL_DATA" + // @enum UploadType + UploadTypeAppiumJavaJunitTestPackage = "APPIUM_JAVA_JUNIT_TEST_PACKAGE" + // @enum UploadType + UploadTypeAppiumJavaTestngTestPackage = "APPIUM_JAVA_TESTNG_TEST_PACKAGE" + // @enum UploadType + UploadTypeAppiumPythonTestPackage = "APPIUM_PYTHON_TEST_PACKAGE" + // @enum UploadType + UploadTypeAppiumWebJavaJunitTestPackage = "APPIUM_WEB_JAVA_JUNIT_TEST_PACKAGE" + // @enum UploadType + UploadTypeAppiumWebJavaTestngTestPackage = "APPIUM_WEB_JAVA_TESTNG_TEST_PACKAGE" + // @enum UploadType + UploadTypeAppiumWebPythonTestPackage = "APPIUM_WEB_PYTHON_TEST_PACKAGE" + // @enum UploadType + UploadTypeCalabashTestPackage = "CALABASH_TEST_PACKAGE" + // @enum UploadType + UploadTypeInstrumentationTestPackage = "INSTRUMENTATION_TEST_PACKAGE" + // @enum UploadType + UploadTypeUiautomationTestPackage = "UIAUTOMATION_TEST_PACKAGE" + // @enum UploadType + UploadTypeUiautomatorTestPackage = "UIAUTOMATOR_TEST_PACKAGE" + // @enum UploadType + UploadTypeXctestTestPackage = "XCTEST_TEST_PACKAGE" + // @enum UploadType + UploadTypeXctestUiTestPackage = "XCTEST_UI_TEST_PACKAGE" +) diff --git a/vendor/github.com/aws/aws-sdk-go/service/devicefarm/devicefarmiface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/devicefarm/devicefarmiface/interface.go new file mode 100644 index 000000000..23e5f1878 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/devicefarm/devicefarmiface/interface.go @@ -0,0 +1,190 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package devicefarmiface provides an interface for the AWS Device Farm. +package devicefarmiface + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/devicefarm" +) + +// DeviceFarmAPI is the interface type for devicefarm.DeviceFarm. +type DeviceFarmAPI interface { + CreateDevicePoolRequest(*devicefarm.CreateDevicePoolInput) (*request.Request, *devicefarm.CreateDevicePoolOutput) + + CreateDevicePool(*devicefarm.CreateDevicePoolInput) (*devicefarm.CreateDevicePoolOutput, error) + + CreateProjectRequest(*devicefarm.CreateProjectInput) (*request.Request, *devicefarm.CreateProjectOutput) + + CreateProject(*devicefarm.CreateProjectInput) (*devicefarm.CreateProjectOutput, error) + + CreateUploadRequest(*devicefarm.CreateUploadInput) (*request.Request, *devicefarm.CreateUploadOutput) + + CreateUpload(*devicefarm.CreateUploadInput) (*devicefarm.CreateUploadOutput, error) + + DeleteDevicePoolRequest(*devicefarm.DeleteDevicePoolInput) (*request.Request, *devicefarm.DeleteDevicePoolOutput) + + DeleteDevicePool(*devicefarm.DeleteDevicePoolInput) (*devicefarm.DeleteDevicePoolOutput, error) + + DeleteProjectRequest(*devicefarm.DeleteProjectInput) (*request.Request, *devicefarm.DeleteProjectOutput) + + DeleteProject(*devicefarm.DeleteProjectInput) (*devicefarm.DeleteProjectOutput, error) + + DeleteRunRequest(*devicefarm.DeleteRunInput) (*request.Request, *devicefarm.DeleteRunOutput) + + DeleteRun(*devicefarm.DeleteRunInput) (*devicefarm.DeleteRunOutput, error) + + DeleteUploadRequest(*devicefarm.DeleteUploadInput) (*request.Request, *devicefarm.DeleteUploadOutput) + + DeleteUpload(*devicefarm.DeleteUploadInput) (*devicefarm.DeleteUploadOutput, error) + + GetAccountSettingsRequest(*devicefarm.GetAccountSettingsInput) (*request.Request, *devicefarm.GetAccountSettingsOutput) + + GetAccountSettings(*devicefarm.GetAccountSettingsInput) (*devicefarm.GetAccountSettingsOutput, error) + + GetDeviceRequest(*devicefarm.GetDeviceInput) (*request.Request, *devicefarm.GetDeviceOutput) + + GetDevice(*devicefarm.GetDeviceInput) (*devicefarm.GetDeviceOutput, error) + + GetDevicePoolRequest(*devicefarm.GetDevicePoolInput) (*request.Request, *devicefarm.GetDevicePoolOutput) + + GetDevicePool(*devicefarm.GetDevicePoolInput) (*devicefarm.GetDevicePoolOutput, error) + + GetDevicePoolCompatibilityRequest(*devicefarm.GetDevicePoolCompatibilityInput) (*request.Request, *devicefarm.GetDevicePoolCompatibilityOutput) + + GetDevicePoolCompatibility(*devicefarm.GetDevicePoolCompatibilityInput) (*devicefarm.GetDevicePoolCompatibilityOutput, error) + + GetJobRequest(*devicefarm.GetJobInput) (*request.Request, *devicefarm.GetJobOutput) + + GetJob(*devicefarm.GetJobInput) (*devicefarm.GetJobOutput, error) + + GetOfferingStatusRequest(*devicefarm.GetOfferingStatusInput) (*request.Request, *devicefarm.GetOfferingStatusOutput) + + GetOfferingStatus(*devicefarm.GetOfferingStatusInput) (*devicefarm.GetOfferingStatusOutput, error) + + GetOfferingStatusPages(*devicefarm.GetOfferingStatusInput, func(*devicefarm.GetOfferingStatusOutput, bool) bool) error + + GetProjectRequest(*devicefarm.GetProjectInput) (*request.Request, *devicefarm.GetProjectOutput) + + GetProject(*devicefarm.GetProjectInput) (*devicefarm.GetProjectOutput, error) + + GetRunRequest(*devicefarm.GetRunInput) (*request.Request, *devicefarm.GetRunOutput) + + GetRun(*devicefarm.GetRunInput) (*devicefarm.GetRunOutput, error) + + GetSuiteRequest(*devicefarm.GetSuiteInput) (*request.Request, *devicefarm.GetSuiteOutput) + + GetSuite(*devicefarm.GetSuiteInput) (*devicefarm.GetSuiteOutput, error) + + GetTestRequest(*devicefarm.GetTestInput) (*request.Request, *devicefarm.GetTestOutput) + + GetTest(*devicefarm.GetTestInput) (*devicefarm.GetTestOutput, error) + + GetUploadRequest(*devicefarm.GetUploadInput) (*request.Request, *devicefarm.GetUploadOutput) + + GetUpload(*devicefarm.GetUploadInput) (*devicefarm.GetUploadOutput, error) + + ListArtifactsRequest(*devicefarm.ListArtifactsInput) (*request.Request, *devicefarm.ListArtifactsOutput) + + ListArtifacts(*devicefarm.ListArtifactsInput) (*devicefarm.ListArtifactsOutput, error) + + ListArtifactsPages(*devicefarm.ListArtifactsInput, func(*devicefarm.ListArtifactsOutput, bool) bool) error + + ListDevicePoolsRequest(*devicefarm.ListDevicePoolsInput) (*request.Request, *devicefarm.ListDevicePoolsOutput) + + ListDevicePools(*devicefarm.ListDevicePoolsInput) (*devicefarm.ListDevicePoolsOutput, error) + + ListDevicePoolsPages(*devicefarm.ListDevicePoolsInput, func(*devicefarm.ListDevicePoolsOutput, bool) bool) error + + ListDevicesRequest(*devicefarm.ListDevicesInput) (*request.Request, *devicefarm.ListDevicesOutput) + + ListDevices(*devicefarm.ListDevicesInput) (*devicefarm.ListDevicesOutput, error) + + ListDevicesPages(*devicefarm.ListDevicesInput, func(*devicefarm.ListDevicesOutput, bool) bool) error + + ListJobsRequest(*devicefarm.ListJobsInput) (*request.Request, *devicefarm.ListJobsOutput) + + ListJobs(*devicefarm.ListJobsInput) (*devicefarm.ListJobsOutput, error) + + ListJobsPages(*devicefarm.ListJobsInput, func(*devicefarm.ListJobsOutput, bool) bool) error + + ListOfferingTransactionsRequest(*devicefarm.ListOfferingTransactionsInput) (*request.Request, *devicefarm.ListOfferingTransactionsOutput) + + ListOfferingTransactions(*devicefarm.ListOfferingTransactionsInput) (*devicefarm.ListOfferingTransactionsOutput, error) + + ListOfferingTransactionsPages(*devicefarm.ListOfferingTransactionsInput, func(*devicefarm.ListOfferingTransactionsOutput, bool) bool) error + + ListOfferingsRequest(*devicefarm.ListOfferingsInput) (*request.Request, *devicefarm.ListOfferingsOutput) + + ListOfferings(*devicefarm.ListOfferingsInput) (*devicefarm.ListOfferingsOutput, error) + + ListOfferingsPages(*devicefarm.ListOfferingsInput, func(*devicefarm.ListOfferingsOutput, bool) bool) error + + ListProjectsRequest(*devicefarm.ListProjectsInput) (*request.Request, *devicefarm.ListProjectsOutput) + + ListProjects(*devicefarm.ListProjectsInput) (*devicefarm.ListProjectsOutput, error) + + ListProjectsPages(*devicefarm.ListProjectsInput, func(*devicefarm.ListProjectsOutput, bool) bool) error + + ListRunsRequest(*devicefarm.ListRunsInput) (*request.Request, *devicefarm.ListRunsOutput) + + ListRuns(*devicefarm.ListRunsInput) (*devicefarm.ListRunsOutput, error) + + ListRunsPages(*devicefarm.ListRunsInput, func(*devicefarm.ListRunsOutput, bool) bool) error + + ListSamplesRequest(*devicefarm.ListSamplesInput) (*request.Request, *devicefarm.ListSamplesOutput) + + ListSamples(*devicefarm.ListSamplesInput) (*devicefarm.ListSamplesOutput, error) + + ListSamplesPages(*devicefarm.ListSamplesInput, func(*devicefarm.ListSamplesOutput, bool) bool) error + + ListSuitesRequest(*devicefarm.ListSuitesInput) (*request.Request, *devicefarm.ListSuitesOutput) + + ListSuites(*devicefarm.ListSuitesInput) (*devicefarm.ListSuitesOutput, error) + + ListSuitesPages(*devicefarm.ListSuitesInput, func(*devicefarm.ListSuitesOutput, bool) bool) error + + ListTestsRequest(*devicefarm.ListTestsInput) (*request.Request, *devicefarm.ListTestsOutput) + + ListTests(*devicefarm.ListTestsInput) (*devicefarm.ListTestsOutput, error) + + ListTestsPages(*devicefarm.ListTestsInput, func(*devicefarm.ListTestsOutput, bool) bool) error + + ListUniqueProblemsRequest(*devicefarm.ListUniqueProblemsInput) (*request.Request, *devicefarm.ListUniqueProblemsOutput) + + ListUniqueProblems(*devicefarm.ListUniqueProblemsInput) (*devicefarm.ListUniqueProblemsOutput, error) + + ListUniqueProblemsPages(*devicefarm.ListUniqueProblemsInput, func(*devicefarm.ListUniqueProblemsOutput, bool) bool) error + + ListUploadsRequest(*devicefarm.ListUploadsInput) (*request.Request, *devicefarm.ListUploadsOutput) + + ListUploads(*devicefarm.ListUploadsInput) (*devicefarm.ListUploadsOutput, error) + + ListUploadsPages(*devicefarm.ListUploadsInput, func(*devicefarm.ListUploadsOutput, bool) bool) error + + PurchaseOfferingRequest(*devicefarm.PurchaseOfferingInput) (*request.Request, *devicefarm.PurchaseOfferingOutput) + + PurchaseOffering(*devicefarm.PurchaseOfferingInput) (*devicefarm.PurchaseOfferingOutput, error) + + RenewOfferingRequest(*devicefarm.RenewOfferingInput) (*request.Request, *devicefarm.RenewOfferingOutput) + + RenewOffering(*devicefarm.RenewOfferingInput) (*devicefarm.RenewOfferingOutput, error) + + ScheduleRunRequest(*devicefarm.ScheduleRunInput) (*request.Request, *devicefarm.ScheduleRunOutput) + + ScheduleRun(*devicefarm.ScheduleRunInput) (*devicefarm.ScheduleRunOutput, error) + + StopRunRequest(*devicefarm.StopRunInput) (*request.Request, *devicefarm.StopRunOutput) + + StopRun(*devicefarm.StopRunInput) (*devicefarm.StopRunOutput, error) + + UpdateDevicePoolRequest(*devicefarm.UpdateDevicePoolInput) (*request.Request, *devicefarm.UpdateDevicePoolOutput) + + UpdateDevicePool(*devicefarm.UpdateDevicePoolInput) (*devicefarm.UpdateDevicePoolOutput, error) + + UpdateProjectRequest(*devicefarm.UpdateProjectInput) (*request.Request, *devicefarm.UpdateProjectOutput) + + UpdateProject(*devicefarm.UpdateProjectInput) (*devicefarm.UpdateProjectOutput, error) +} + +var _ DeviceFarmAPI = (*devicefarm.DeviceFarm)(nil) diff --git a/vendor/github.com/aws/aws-sdk-go/service/devicefarm/examples_test.go b/vendor/github.com/aws/aws-sdk-go/service/devicefarm/examples_test.go new file mode 100644 index 000000000..56519d9c7 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/devicefarm/examples_test.go @@ -0,0 +1,790 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package devicefarm_test + +import ( + "bytes" + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/devicefarm" +) + +var _ time.Duration +var _ bytes.Buffer + +func ExampleDeviceFarm_CreateDevicePool() { + svc := devicefarm.New(session.New()) + + params := &devicefarm.CreateDevicePoolInput{ + Name: aws.String("Name"), // Required + ProjectArn: aws.String("AmazonResourceName"), // Required + Rules: []*devicefarm.Rule{ // Required + { // Required + Attribute: aws.String("DeviceAttribute"), + Operator: aws.String("RuleOperator"), + Value: aws.String("String"), + }, + // More values... + }, + Description: aws.String("Message"), + } + resp, err := svc.CreateDevicePool(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDeviceFarm_CreateProject() { + svc := devicefarm.New(session.New()) + + params := &devicefarm.CreateProjectInput{ + Name: aws.String("Name"), // Required + } + resp, err := svc.CreateProject(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDeviceFarm_CreateUpload() { + svc := devicefarm.New(session.New()) + + params := &devicefarm.CreateUploadInput{ + Name: aws.String("Name"), // Required + ProjectArn: aws.String("AmazonResourceName"), // Required + Type: aws.String("UploadType"), // Required + ContentType: aws.String("ContentType"), + } + resp, err := svc.CreateUpload(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDeviceFarm_DeleteDevicePool() { + svc := devicefarm.New(session.New()) + + params := &devicefarm.DeleteDevicePoolInput{ + Arn: aws.String("AmazonResourceName"), // Required + } + resp, err := svc.DeleteDevicePool(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDeviceFarm_DeleteProject() { + svc := devicefarm.New(session.New()) + + params := &devicefarm.DeleteProjectInput{ + Arn: aws.String("AmazonResourceName"), // Required + } + resp, err := svc.DeleteProject(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDeviceFarm_DeleteRun() { + svc := devicefarm.New(session.New()) + + params := &devicefarm.DeleteRunInput{ + Arn: aws.String("AmazonResourceName"), // Required + } + resp, err := svc.DeleteRun(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDeviceFarm_DeleteUpload() { + svc := devicefarm.New(session.New()) + + params := &devicefarm.DeleteUploadInput{ + Arn: aws.String("AmazonResourceName"), // Required + } + resp, err := svc.DeleteUpload(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDeviceFarm_GetAccountSettings() { + svc := devicefarm.New(session.New()) + + var params *devicefarm.GetAccountSettingsInput + resp, err := svc.GetAccountSettings(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDeviceFarm_GetDevice() { + svc := devicefarm.New(session.New()) + + params := &devicefarm.GetDeviceInput{ + Arn: aws.String("AmazonResourceName"), // Required + } + resp, err := svc.GetDevice(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDeviceFarm_GetDevicePool() { + svc := devicefarm.New(session.New()) + + params := &devicefarm.GetDevicePoolInput{ + Arn: aws.String("AmazonResourceName"), // Required + } + resp, err := svc.GetDevicePool(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDeviceFarm_GetDevicePoolCompatibility() { + svc := devicefarm.New(session.New()) + + params := &devicefarm.GetDevicePoolCompatibilityInput{ + DevicePoolArn: aws.String("AmazonResourceName"), // Required + AppArn: aws.String("AmazonResourceName"), + TestType: aws.String("TestType"), + } + resp, err := svc.GetDevicePoolCompatibility(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDeviceFarm_GetJob() { + svc := devicefarm.New(session.New()) + + params := &devicefarm.GetJobInput{ + Arn: aws.String("AmazonResourceName"), // Required + } + resp, err := svc.GetJob(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDeviceFarm_GetOfferingStatus() { + svc := devicefarm.New(session.New()) + + params := &devicefarm.GetOfferingStatusInput{ + NextToken: aws.String("PaginationToken"), + } + resp, err := svc.GetOfferingStatus(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDeviceFarm_GetProject() { + svc := devicefarm.New(session.New()) + + params := &devicefarm.GetProjectInput{ + Arn: aws.String("AmazonResourceName"), // Required + } + resp, err := svc.GetProject(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDeviceFarm_GetRun() { + svc := devicefarm.New(session.New()) + + params := &devicefarm.GetRunInput{ + Arn: aws.String("AmazonResourceName"), // Required + } + resp, err := svc.GetRun(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDeviceFarm_GetSuite() { + svc := devicefarm.New(session.New()) + + params := &devicefarm.GetSuiteInput{ + Arn: aws.String("AmazonResourceName"), // Required + } + resp, err := svc.GetSuite(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDeviceFarm_GetTest() { + svc := devicefarm.New(session.New()) + + params := &devicefarm.GetTestInput{ + Arn: aws.String("AmazonResourceName"), // Required + } + resp, err := svc.GetTest(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDeviceFarm_GetUpload() { + svc := devicefarm.New(session.New()) + + params := &devicefarm.GetUploadInput{ + Arn: aws.String("AmazonResourceName"), // Required + } + resp, err := svc.GetUpload(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDeviceFarm_ListArtifacts() { + svc := devicefarm.New(session.New()) + + params := &devicefarm.ListArtifactsInput{ + Arn: aws.String("AmazonResourceName"), // Required + Type: aws.String("ArtifactCategory"), // Required + NextToken: aws.String("PaginationToken"), + } + resp, err := svc.ListArtifacts(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDeviceFarm_ListDevicePools() { + svc := devicefarm.New(session.New()) + + params := &devicefarm.ListDevicePoolsInput{ + Arn: aws.String("AmazonResourceName"), // Required + NextToken: aws.String("PaginationToken"), + Type: aws.String("DevicePoolType"), + } + resp, err := svc.ListDevicePools(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDeviceFarm_ListDevices() { + svc := devicefarm.New(session.New()) + + params := &devicefarm.ListDevicesInput{ + Arn: aws.String("AmazonResourceName"), + NextToken: aws.String("PaginationToken"), + } + resp, err := svc.ListDevices(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDeviceFarm_ListJobs() { + svc := devicefarm.New(session.New()) + + params := &devicefarm.ListJobsInput{ + Arn: aws.String("AmazonResourceName"), // Required + NextToken: aws.String("PaginationToken"), + } + resp, err := svc.ListJobs(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDeviceFarm_ListOfferingTransactions() { + svc := devicefarm.New(session.New()) + + params := &devicefarm.ListOfferingTransactionsInput{ + NextToken: aws.String("PaginationToken"), + } + resp, err := svc.ListOfferingTransactions(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDeviceFarm_ListOfferings() { + svc := devicefarm.New(session.New()) + + params := &devicefarm.ListOfferingsInput{ + NextToken: aws.String("PaginationToken"), + } + resp, err := svc.ListOfferings(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDeviceFarm_ListProjects() { + svc := devicefarm.New(session.New()) + + params := &devicefarm.ListProjectsInput{ + Arn: aws.String("AmazonResourceName"), + NextToken: aws.String("PaginationToken"), + } + resp, err := svc.ListProjects(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDeviceFarm_ListRuns() { + svc := devicefarm.New(session.New()) + + params := &devicefarm.ListRunsInput{ + Arn: aws.String("AmazonResourceName"), // Required + NextToken: aws.String("PaginationToken"), + } + resp, err := svc.ListRuns(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDeviceFarm_ListSamples() { + svc := devicefarm.New(session.New()) + + params := &devicefarm.ListSamplesInput{ + Arn: aws.String("AmazonResourceName"), // Required + NextToken: aws.String("PaginationToken"), + } + resp, err := svc.ListSamples(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDeviceFarm_ListSuites() { + svc := devicefarm.New(session.New()) + + params := &devicefarm.ListSuitesInput{ + Arn: aws.String("AmazonResourceName"), // Required + NextToken: aws.String("PaginationToken"), + } + resp, err := svc.ListSuites(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDeviceFarm_ListTests() { + svc := devicefarm.New(session.New()) + + params := &devicefarm.ListTestsInput{ + Arn: aws.String("AmazonResourceName"), // Required + NextToken: aws.String("PaginationToken"), + } + resp, err := svc.ListTests(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDeviceFarm_ListUniqueProblems() { + svc := devicefarm.New(session.New()) + + params := &devicefarm.ListUniqueProblemsInput{ + Arn: aws.String("AmazonResourceName"), // Required + NextToken: aws.String("PaginationToken"), + } + resp, err := svc.ListUniqueProblems(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDeviceFarm_ListUploads() { + svc := devicefarm.New(session.New()) + + params := &devicefarm.ListUploadsInput{ + Arn: aws.String("AmazonResourceName"), // Required + NextToken: aws.String("PaginationToken"), + } + resp, err := svc.ListUploads(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDeviceFarm_PurchaseOffering() { + svc := devicefarm.New(session.New()) + + params := &devicefarm.PurchaseOfferingInput{ + OfferingId: aws.String("OfferingIdentifier"), + Quantity: aws.Int64(1), + } + resp, err := svc.PurchaseOffering(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDeviceFarm_RenewOffering() { + svc := devicefarm.New(session.New()) + + params := &devicefarm.RenewOfferingInput{ + OfferingId: aws.String("OfferingIdentifier"), + Quantity: aws.Int64(1), + } + resp, err := svc.RenewOffering(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDeviceFarm_ScheduleRun() { + svc := devicefarm.New(session.New()) + + params := &devicefarm.ScheduleRunInput{ + DevicePoolArn: aws.String("AmazonResourceName"), // Required + ProjectArn: aws.String("AmazonResourceName"), // Required + Test: &devicefarm.ScheduleRunTest{ // Required + Type: aws.String("TestType"), // Required + Filter: aws.String("Filter"), + Parameters: map[string]*string{ + "Key": aws.String("String"), // Required + // More values... + }, + TestPackageArn: aws.String("AmazonResourceName"), + }, + AppArn: aws.String("AmazonResourceName"), + Configuration: &devicefarm.ScheduleRunConfiguration{ + AuxiliaryApps: []*string{ + aws.String("AmazonResourceName"), // Required + // More values... + }, + BillingMethod: aws.String("BillingMethod"), + ExtraDataPackageArn: aws.String("AmazonResourceName"), + Locale: aws.String("String"), + Location: &devicefarm.Location{ + Latitude: aws.Float64(1.0), // Required + Longitude: aws.Float64(1.0), // Required + }, + NetworkProfileArn: aws.String("AmazonResourceName"), + Radios: &devicefarm.Radios{ + Bluetooth: aws.Bool(true), + Gps: aws.Bool(true), + Nfc: aws.Bool(true), + Wifi: aws.Bool(true), + }, + }, + Name: aws.String("Name"), + } + resp, err := svc.ScheduleRun(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDeviceFarm_StopRun() { + svc := devicefarm.New(session.New()) + + params := &devicefarm.StopRunInput{ + Arn: aws.String("AmazonResourceName"), // Required + } + resp, err := svc.StopRun(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDeviceFarm_UpdateDevicePool() { + svc := devicefarm.New(session.New()) + + params := &devicefarm.UpdateDevicePoolInput{ + Arn: aws.String("AmazonResourceName"), // Required + Description: aws.String("Message"), + Name: aws.String("Name"), + Rules: []*devicefarm.Rule{ + { // Required + Attribute: aws.String("DeviceAttribute"), + Operator: aws.String("RuleOperator"), + Value: aws.String("String"), + }, + // More values... + }, + } + resp, err := svc.UpdateDevicePool(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDeviceFarm_UpdateProject() { + svc := devicefarm.New(session.New()) + + params := &devicefarm.UpdateProjectInput{ + Arn: aws.String("AmazonResourceName"), // Required + Name: aws.String("Name"), + } + resp, err := svc.UpdateProject(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/devicefarm/service.go b/vendor/github.com/aws/aws-sdk-go/service/devicefarm/service.go new file mode 100644 index 000000000..12cfb829e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/devicefarm/service.go @@ -0,0 +1,90 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package devicefarm + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" +) + +// AWS Device Farm is a service that enables mobile app developers to test Android, +// iOS, and Fire OS apps on physical phones, tablets, and other devices in the +// cloud. +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type DeviceFarm struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "devicefarm" + +// New creates a new instance of the DeviceFarm client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a DeviceFarm client from just a session. +// svc := devicefarm.New(mySession) +// +// // Create a DeviceFarm client with additional configuration +// svc := devicefarm.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *DeviceFarm { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *DeviceFarm { + svc := &DeviceFarm{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2015-06-23", + JSONVersion: "1.1", + TargetPrefix: "DeviceFarm_20150623", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(jsonrpc.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a DeviceFarm operation and runs any +// custom request initialization. +func (c *DeviceFarm) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/directconnect/api.go b/vendor/github.com/aws/aws-sdk-go/service/directconnect/api.go new file mode 100644 index 000000000..942ac9118 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/directconnect/api.go @@ -0,0 +1,3122 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package directconnect provides a client for AWS Direct Connect. +package directconnect + +import ( + "time" + + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" +) + +const opAllocateConnectionOnInterconnect = "AllocateConnectionOnInterconnect" + +// AllocateConnectionOnInterconnectRequest generates a "aws/request.Request" representing the +// client's request for the AllocateConnectionOnInterconnect operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the AllocateConnectionOnInterconnect method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the AllocateConnectionOnInterconnectRequest method. +// req, resp := client.AllocateConnectionOnInterconnectRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *DirectConnect) AllocateConnectionOnInterconnectRequest(input *AllocateConnectionOnInterconnectInput) (req *request.Request, output *Connection) { + op := &request.Operation{ + Name: opAllocateConnectionOnInterconnect, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AllocateConnectionOnInterconnectInput{} + } + + req = c.newRequest(op, input, output) + output = &Connection{} + req.Data = output + return +} + +// Creates a hosted connection on an interconnect. +// +// Allocates a VLAN number and a specified amount of bandwidth for use by a +// hosted connection on the given interconnect. +// +// This is intended for use by AWS Direct Connect partners only. +func (c *DirectConnect) AllocateConnectionOnInterconnect(input *AllocateConnectionOnInterconnectInput) (*Connection, error) { + req, out := c.AllocateConnectionOnInterconnectRequest(input) + err := req.Send() + return out, err +} + +const opAllocatePrivateVirtualInterface = "AllocatePrivateVirtualInterface" + +// AllocatePrivateVirtualInterfaceRequest generates a "aws/request.Request" representing the +// client's request for the AllocatePrivateVirtualInterface operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the AllocatePrivateVirtualInterface method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the AllocatePrivateVirtualInterfaceRequest method. +// req, resp := client.AllocatePrivateVirtualInterfaceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *DirectConnect) AllocatePrivateVirtualInterfaceRequest(input *AllocatePrivateVirtualInterfaceInput) (req *request.Request, output *VirtualInterface) { + op := &request.Operation{ + Name: opAllocatePrivateVirtualInterface, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AllocatePrivateVirtualInterfaceInput{} + } + + req = c.newRequest(op, input, output) + output = &VirtualInterface{} + req.Data = output + return +} + +// Provisions a private virtual interface to be owned by a different customer. +// +// The owner of a connection calls this function to provision a private virtual +// interface which will be owned by another AWS customer. +// +// Virtual interfaces created using this function must be confirmed by the +// virtual interface owner by calling ConfirmPrivateVirtualInterface. Until +// this step has been completed, the virtual interface will be in 'Confirming' +// state, and will not be available for handling traffic. +func (c *DirectConnect) AllocatePrivateVirtualInterface(input *AllocatePrivateVirtualInterfaceInput) (*VirtualInterface, error) { + req, out := c.AllocatePrivateVirtualInterfaceRequest(input) + err := req.Send() + return out, err +} + +const opAllocatePublicVirtualInterface = "AllocatePublicVirtualInterface" + +// AllocatePublicVirtualInterfaceRequest generates a "aws/request.Request" representing the +// client's request for the AllocatePublicVirtualInterface operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the AllocatePublicVirtualInterface method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the AllocatePublicVirtualInterfaceRequest method. +// req, resp := client.AllocatePublicVirtualInterfaceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *DirectConnect) AllocatePublicVirtualInterfaceRequest(input *AllocatePublicVirtualInterfaceInput) (req *request.Request, output *VirtualInterface) { + op := &request.Operation{ + Name: opAllocatePublicVirtualInterface, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AllocatePublicVirtualInterfaceInput{} + } + + req = c.newRequest(op, input, output) + output = &VirtualInterface{} + req.Data = output + return +} + +// Provisions a public virtual interface to be owned by a different customer. +// +// The owner of a connection calls this function to provision a public virtual +// interface which will be owned by another AWS customer. +// +// Virtual interfaces created using this function must be confirmed by the +// virtual interface owner by calling ConfirmPublicVirtualInterface. Until this +// step has been completed, the virtual interface will be in 'Confirming' state, +// and will not be available for handling traffic. +func (c *DirectConnect) AllocatePublicVirtualInterface(input *AllocatePublicVirtualInterfaceInput) (*VirtualInterface, error) { + req, out := c.AllocatePublicVirtualInterfaceRequest(input) + err := req.Send() + return out, err +} + +const opConfirmConnection = "ConfirmConnection" + +// ConfirmConnectionRequest generates a "aws/request.Request" representing the +// client's request for the ConfirmConnection operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ConfirmConnection method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ConfirmConnectionRequest method. +// req, resp := client.ConfirmConnectionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *DirectConnect) ConfirmConnectionRequest(input *ConfirmConnectionInput) (req *request.Request, output *ConfirmConnectionOutput) { + op := &request.Operation{ + Name: opConfirmConnection, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ConfirmConnectionInput{} + } + + req = c.newRequest(op, input, output) + output = &ConfirmConnectionOutput{} + req.Data = output + return +} + +// Confirm the creation of a hosted connection on an interconnect. +// +// Upon creation, the hosted connection is initially in the 'Ordering' state, +// and will remain in this state until the owner calls ConfirmConnection to +// confirm creation of the hosted connection. +func (c *DirectConnect) ConfirmConnection(input *ConfirmConnectionInput) (*ConfirmConnectionOutput, error) { + req, out := c.ConfirmConnectionRequest(input) + err := req.Send() + return out, err +} + +const opConfirmPrivateVirtualInterface = "ConfirmPrivateVirtualInterface" + +// ConfirmPrivateVirtualInterfaceRequest generates a "aws/request.Request" representing the +// client's request for the ConfirmPrivateVirtualInterface operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ConfirmPrivateVirtualInterface method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ConfirmPrivateVirtualInterfaceRequest method. +// req, resp := client.ConfirmPrivateVirtualInterfaceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *DirectConnect) ConfirmPrivateVirtualInterfaceRequest(input *ConfirmPrivateVirtualInterfaceInput) (req *request.Request, output *ConfirmPrivateVirtualInterfaceOutput) { + op := &request.Operation{ + Name: opConfirmPrivateVirtualInterface, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ConfirmPrivateVirtualInterfaceInput{} + } + + req = c.newRequest(op, input, output) + output = &ConfirmPrivateVirtualInterfaceOutput{} + req.Data = output + return +} + +// Accept ownership of a private virtual interface created by another customer. +// +// After the virtual interface owner calls this function, the virtual interface +// will be created and attached to the given virtual private gateway, and will +// be available for handling traffic. +func (c *DirectConnect) ConfirmPrivateVirtualInterface(input *ConfirmPrivateVirtualInterfaceInput) (*ConfirmPrivateVirtualInterfaceOutput, error) { + req, out := c.ConfirmPrivateVirtualInterfaceRequest(input) + err := req.Send() + return out, err +} + +const opConfirmPublicVirtualInterface = "ConfirmPublicVirtualInterface" + +// ConfirmPublicVirtualInterfaceRequest generates a "aws/request.Request" representing the +// client's request for the ConfirmPublicVirtualInterface operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ConfirmPublicVirtualInterface method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ConfirmPublicVirtualInterfaceRequest method. +// req, resp := client.ConfirmPublicVirtualInterfaceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *DirectConnect) ConfirmPublicVirtualInterfaceRequest(input *ConfirmPublicVirtualInterfaceInput) (req *request.Request, output *ConfirmPublicVirtualInterfaceOutput) { + op := &request.Operation{ + Name: opConfirmPublicVirtualInterface, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ConfirmPublicVirtualInterfaceInput{} + } + + req = c.newRequest(op, input, output) + output = &ConfirmPublicVirtualInterfaceOutput{} + req.Data = output + return +} + +// Accept ownership of a public virtual interface created by another customer. +// +// After the virtual interface owner calls this function, the specified virtual +// interface will be created and made available for handling traffic. +func (c *DirectConnect) ConfirmPublicVirtualInterface(input *ConfirmPublicVirtualInterfaceInput) (*ConfirmPublicVirtualInterfaceOutput, error) { + req, out := c.ConfirmPublicVirtualInterfaceRequest(input) + err := req.Send() + return out, err +} + +const opCreateConnection = "CreateConnection" + +// CreateConnectionRequest generates a "aws/request.Request" representing the +// client's request for the CreateConnection operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateConnection method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateConnectionRequest method. +// req, resp := client.CreateConnectionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *DirectConnect) CreateConnectionRequest(input *CreateConnectionInput) (req *request.Request, output *Connection) { + op := &request.Operation{ + Name: opCreateConnection, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateConnectionInput{} + } + + req = c.newRequest(op, input, output) + output = &Connection{} + req.Data = output + return +} + +// Creates a new connection between the customer network and a specific AWS +// Direct Connect location. +// +// A connection links your internal network to an AWS Direct Connect location +// over a standard 1 gigabit or 10 gigabit Ethernet fiber-optic cable. One end +// of the cable is connected to your router, the other to an AWS Direct Connect +// router. An AWS Direct Connect location provides access to Amazon Web Services +// in the region it is associated with. You can establish connections with AWS +// Direct Connect locations in multiple regions, but a connection in one region +// does not provide connectivity to other regions. +func (c *DirectConnect) CreateConnection(input *CreateConnectionInput) (*Connection, error) { + req, out := c.CreateConnectionRequest(input) + err := req.Send() + return out, err +} + +const opCreateInterconnect = "CreateInterconnect" + +// CreateInterconnectRequest generates a "aws/request.Request" representing the +// client's request for the CreateInterconnect operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateInterconnect method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateInterconnectRequest method. +// req, resp := client.CreateInterconnectRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *DirectConnect) CreateInterconnectRequest(input *CreateInterconnectInput) (req *request.Request, output *Interconnect) { + op := &request.Operation{ + Name: opCreateInterconnect, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateInterconnectInput{} + } + + req = c.newRequest(op, input, output) + output = &Interconnect{} + req.Data = output + return +} + +// Creates a new interconnect between a AWS Direct Connect partner's network +// and a specific AWS Direct Connect location. +// +// An interconnect is a connection which is capable of hosting other connections. +// The AWS Direct Connect partner can use an interconnect to provide sub-1Gbps +// AWS Direct Connect service to tier 2 customers who do not have their own +// connections. Like a standard connection, an interconnect links the AWS Direct +// Connect partner's network to an AWS Direct Connect location over a standard +// 1 Gbps or 10 Gbps Ethernet fiber-optic cable. One end is connected to the +// partner's router, the other to an AWS Direct Connect router. +// +// For each end customer, the AWS Direct Connect partner provisions a connection +// on their interconnect by calling AllocateConnectionOnInterconnect. The end +// customer can then connect to AWS resources by creating a virtual interface +// on their connection, using the VLAN assigned to them by the AWS Direct Connect +// partner. +// +// This is intended for use by AWS Direct Connect partners only. +func (c *DirectConnect) CreateInterconnect(input *CreateInterconnectInput) (*Interconnect, error) { + req, out := c.CreateInterconnectRequest(input) + err := req.Send() + return out, err +} + +const opCreatePrivateVirtualInterface = "CreatePrivateVirtualInterface" + +// CreatePrivateVirtualInterfaceRequest generates a "aws/request.Request" representing the +// client's request for the CreatePrivateVirtualInterface operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreatePrivateVirtualInterface method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreatePrivateVirtualInterfaceRequest method. +// req, resp := client.CreatePrivateVirtualInterfaceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *DirectConnect) CreatePrivateVirtualInterfaceRequest(input *CreatePrivateVirtualInterfaceInput) (req *request.Request, output *VirtualInterface) { + op := &request.Operation{ + Name: opCreatePrivateVirtualInterface, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreatePrivateVirtualInterfaceInput{} + } + + req = c.newRequest(op, input, output) + output = &VirtualInterface{} + req.Data = output + return +} + +// Creates a new private virtual interface. A virtual interface is the VLAN +// that transports AWS Direct Connect traffic. A private virtual interface supports +// sending traffic to a single virtual private cloud (VPC). +func (c *DirectConnect) CreatePrivateVirtualInterface(input *CreatePrivateVirtualInterfaceInput) (*VirtualInterface, error) { + req, out := c.CreatePrivateVirtualInterfaceRequest(input) + err := req.Send() + return out, err +} + +const opCreatePublicVirtualInterface = "CreatePublicVirtualInterface" + +// CreatePublicVirtualInterfaceRequest generates a "aws/request.Request" representing the +// client's request for the CreatePublicVirtualInterface operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreatePublicVirtualInterface method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreatePublicVirtualInterfaceRequest method. +// req, resp := client.CreatePublicVirtualInterfaceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *DirectConnect) CreatePublicVirtualInterfaceRequest(input *CreatePublicVirtualInterfaceInput) (req *request.Request, output *VirtualInterface) { + op := &request.Operation{ + Name: opCreatePublicVirtualInterface, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreatePublicVirtualInterfaceInput{} + } + + req = c.newRequest(op, input, output) + output = &VirtualInterface{} + req.Data = output + return +} + +// Creates a new public virtual interface. A virtual interface is the VLAN that +// transports AWS Direct Connect traffic. A public virtual interface supports +// sending traffic to public services of AWS such as Amazon Simple Storage Service +// (Amazon S3). +func (c *DirectConnect) CreatePublicVirtualInterface(input *CreatePublicVirtualInterfaceInput) (*VirtualInterface, error) { + req, out := c.CreatePublicVirtualInterfaceRequest(input) + err := req.Send() + return out, err +} + +const opDeleteConnection = "DeleteConnection" + +// DeleteConnectionRequest generates a "aws/request.Request" representing the +// client's request for the DeleteConnection operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteConnection method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteConnectionRequest method. +// req, resp := client.DeleteConnectionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *DirectConnect) DeleteConnectionRequest(input *DeleteConnectionInput) (req *request.Request, output *Connection) { + op := &request.Operation{ + Name: opDeleteConnection, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteConnectionInput{} + } + + req = c.newRequest(op, input, output) + output = &Connection{} + req.Data = output + return +} + +// Deletes the connection. +// +// Deleting a connection only stops the AWS Direct Connect port hour and data +// transfer charges. You need to cancel separately with the providers any services +// or charges for cross-connects or network circuits that connect you to the +// AWS Direct Connect location. +func (c *DirectConnect) DeleteConnection(input *DeleteConnectionInput) (*Connection, error) { + req, out := c.DeleteConnectionRequest(input) + err := req.Send() + return out, err +} + +const opDeleteInterconnect = "DeleteInterconnect" + +// DeleteInterconnectRequest generates a "aws/request.Request" representing the +// client's request for the DeleteInterconnect operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteInterconnect method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteInterconnectRequest method. +// req, resp := client.DeleteInterconnectRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *DirectConnect) DeleteInterconnectRequest(input *DeleteInterconnectInput) (req *request.Request, output *DeleteInterconnectOutput) { + op := &request.Operation{ + Name: opDeleteInterconnect, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteInterconnectInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteInterconnectOutput{} + req.Data = output + return +} + +// Deletes the specified interconnect. +// +// This is intended for use by AWS Direct Connect partners only. +func (c *DirectConnect) DeleteInterconnect(input *DeleteInterconnectInput) (*DeleteInterconnectOutput, error) { + req, out := c.DeleteInterconnectRequest(input) + err := req.Send() + return out, err +} + +const opDeleteVirtualInterface = "DeleteVirtualInterface" + +// DeleteVirtualInterfaceRequest generates a "aws/request.Request" representing the +// client's request for the DeleteVirtualInterface operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteVirtualInterface method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteVirtualInterfaceRequest method. +// req, resp := client.DeleteVirtualInterfaceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *DirectConnect) DeleteVirtualInterfaceRequest(input *DeleteVirtualInterfaceInput) (req *request.Request, output *DeleteVirtualInterfaceOutput) { + op := &request.Operation{ + Name: opDeleteVirtualInterface, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteVirtualInterfaceInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteVirtualInterfaceOutput{} + req.Data = output + return +} + +// Deletes a virtual interface. +func (c *DirectConnect) DeleteVirtualInterface(input *DeleteVirtualInterfaceInput) (*DeleteVirtualInterfaceOutput, error) { + req, out := c.DeleteVirtualInterfaceRequest(input) + err := req.Send() + return out, err +} + +const opDescribeConnectionLoa = "DescribeConnectionLoa" + +// DescribeConnectionLoaRequest generates a "aws/request.Request" representing the +// client's request for the DescribeConnectionLoa operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeConnectionLoa method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeConnectionLoaRequest method. +// req, resp := client.DescribeConnectionLoaRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *DirectConnect) DescribeConnectionLoaRequest(input *DescribeConnectionLoaInput) (req *request.Request, output *DescribeConnectionLoaOutput) { + op := &request.Operation{ + Name: opDescribeConnectionLoa, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeConnectionLoaInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeConnectionLoaOutput{} + req.Data = output + return +} + +// Returns the LOA-CFA for a Connection. +// +// The Letter of Authorization - Connecting Facility Assignment (LOA-CFA) is +// a document that your APN partner or service provider uses when establishing +// your cross connect to AWS at the colocation facility. For more information, +// see Requesting Cross Connects at AWS Direct Connect Locations (http://docs.aws.amazon.com/directconnect/latest/UserGuide/Colocation.html) +// in the AWS Direct Connect user guide. +func (c *DirectConnect) DescribeConnectionLoa(input *DescribeConnectionLoaInput) (*DescribeConnectionLoaOutput, error) { + req, out := c.DescribeConnectionLoaRequest(input) + err := req.Send() + return out, err +} + +const opDescribeConnections = "DescribeConnections" + +// DescribeConnectionsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeConnections operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeConnections method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeConnectionsRequest method. +// req, resp := client.DescribeConnectionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *DirectConnect) DescribeConnectionsRequest(input *DescribeConnectionsInput) (req *request.Request, output *Connections) { + op := &request.Operation{ + Name: opDescribeConnections, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeConnectionsInput{} + } + + req = c.newRequest(op, input, output) + output = &Connections{} + req.Data = output + return +} + +// Displays all connections in this region. +// +// If a connection ID is provided, the call returns only that particular connection. +func (c *DirectConnect) DescribeConnections(input *DescribeConnectionsInput) (*Connections, error) { + req, out := c.DescribeConnectionsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeConnectionsOnInterconnect = "DescribeConnectionsOnInterconnect" + +// DescribeConnectionsOnInterconnectRequest generates a "aws/request.Request" representing the +// client's request for the DescribeConnectionsOnInterconnect operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeConnectionsOnInterconnect method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeConnectionsOnInterconnectRequest method. +// req, resp := client.DescribeConnectionsOnInterconnectRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *DirectConnect) DescribeConnectionsOnInterconnectRequest(input *DescribeConnectionsOnInterconnectInput) (req *request.Request, output *Connections) { + op := &request.Operation{ + Name: opDescribeConnectionsOnInterconnect, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeConnectionsOnInterconnectInput{} + } + + req = c.newRequest(op, input, output) + output = &Connections{} + req.Data = output + return +} + +// Return a list of connections that have been provisioned on the given interconnect. +// +// This is intended for use by AWS Direct Connect partners only. +func (c *DirectConnect) DescribeConnectionsOnInterconnect(input *DescribeConnectionsOnInterconnectInput) (*Connections, error) { + req, out := c.DescribeConnectionsOnInterconnectRequest(input) + err := req.Send() + return out, err +} + +const opDescribeInterconnectLoa = "DescribeInterconnectLoa" + +// DescribeInterconnectLoaRequest generates a "aws/request.Request" representing the +// client's request for the DescribeInterconnectLoa operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeInterconnectLoa method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeInterconnectLoaRequest method. +// req, resp := client.DescribeInterconnectLoaRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *DirectConnect) DescribeInterconnectLoaRequest(input *DescribeInterconnectLoaInput) (req *request.Request, output *DescribeInterconnectLoaOutput) { + op := &request.Operation{ + Name: opDescribeInterconnectLoa, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeInterconnectLoaInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeInterconnectLoaOutput{} + req.Data = output + return +} + +// Returns the LOA-CFA for an Interconnect. +// +// The Letter of Authorization - Connecting Facility Assignment (LOA-CFA) is +// a document that is used when establishing your cross connect to AWS at the +// colocation facility. For more information, see Requesting Cross Connects +// at AWS Direct Connect Locations (http://docs.aws.amazon.com/directconnect/latest/UserGuide/Colocation.html) +// in the AWS Direct Connect user guide. +func (c *DirectConnect) DescribeInterconnectLoa(input *DescribeInterconnectLoaInput) (*DescribeInterconnectLoaOutput, error) { + req, out := c.DescribeInterconnectLoaRequest(input) + err := req.Send() + return out, err +} + +const opDescribeInterconnects = "DescribeInterconnects" + +// DescribeInterconnectsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeInterconnects operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeInterconnects method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeInterconnectsRequest method. +// req, resp := client.DescribeInterconnectsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *DirectConnect) DescribeInterconnectsRequest(input *DescribeInterconnectsInput) (req *request.Request, output *DescribeInterconnectsOutput) { + op := &request.Operation{ + Name: opDescribeInterconnects, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeInterconnectsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeInterconnectsOutput{} + req.Data = output + return +} + +// Returns a list of interconnects owned by the AWS account. +// +// If an interconnect ID is provided, it will only return this particular interconnect. +func (c *DirectConnect) DescribeInterconnects(input *DescribeInterconnectsInput) (*DescribeInterconnectsOutput, error) { + req, out := c.DescribeInterconnectsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeLocations = "DescribeLocations" + +// DescribeLocationsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeLocations operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeLocations method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeLocationsRequest method. +// req, resp := client.DescribeLocationsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *DirectConnect) DescribeLocationsRequest(input *DescribeLocationsInput) (req *request.Request, output *DescribeLocationsOutput) { + op := &request.Operation{ + Name: opDescribeLocations, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeLocationsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeLocationsOutput{} + req.Data = output + return +} + +// Returns the list of AWS Direct Connect locations in the current AWS region. +// These are the locations that may be selected when calling CreateConnection +// or CreateInterconnect. +func (c *DirectConnect) DescribeLocations(input *DescribeLocationsInput) (*DescribeLocationsOutput, error) { + req, out := c.DescribeLocationsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeVirtualGateways = "DescribeVirtualGateways" + +// DescribeVirtualGatewaysRequest generates a "aws/request.Request" representing the +// client's request for the DescribeVirtualGateways operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeVirtualGateways method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeVirtualGatewaysRequest method. +// req, resp := client.DescribeVirtualGatewaysRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *DirectConnect) DescribeVirtualGatewaysRequest(input *DescribeVirtualGatewaysInput) (req *request.Request, output *DescribeVirtualGatewaysOutput) { + op := &request.Operation{ + Name: opDescribeVirtualGateways, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeVirtualGatewaysInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeVirtualGatewaysOutput{} + req.Data = output + return +} + +// Returns a list of virtual private gateways owned by the AWS account. +// +// You can create one or more AWS Direct Connect private virtual interfaces +// linking to a virtual private gateway. A virtual private gateway can be managed +// via Amazon Virtual Private Cloud (VPC) console or the EC2 CreateVpnGateway +// (http://docs.aws.amazon.com/AWSEC2/latest/APIReference/ApiReference-query-CreateVpnGateway.html) +// action. +func (c *DirectConnect) DescribeVirtualGateways(input *DescribeVirtualGatewaysInput) (*DescribeVirtualGatewaysOutput, error) { + req, out := c.DescribeVirtualGatewaysRequest(input) + err := req.Send() + return out, err +} + +const opDescribeVirtualInterfaces = "DescribeVirtualInterfaces" + +// DescribeVirtualInterfacesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeVirtualInterfaces operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeVirtualInterfaces method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeVirtualInterfacesRequest method. +// req, resp := client.DescribeVirtualInterfacesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *DirectConnect) DescribeVirtualInterfacesRequest(input *DescribeVirtualInterfacesInput) (req *request.Request, output *DescribeVirtualInterfacesOutput) { + op := &request.Operation{ + Name: opDescribeVirtualInterfaces, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeVirtualInterfacesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeVirtualInterfacesOutput{} + req.Data = output + return +} + +// Displays all virtual interfaces for an AWS account. Virtual interfaces deleted +// fewer than 15 minutes before DescribeVirtualInterfaces is called are also +// returned. If a connection ID is included then only virtual interfaces associated +// with this connection will be returned. If a virtual interface ID is included +// then only a single virtual interface will be returned. +// +// A virtual interface (VLAN) transmits the traffic between the AWS Direct +// Connect location and the customer. +// +// If a connection ID is provided, only virtual interfaces provisioned on the +// specified connection will be returned. If a virtual interface ID is provided, +// only this particular virtual interface will be returned. +func (c *DirectConnect) DescribeVirtualInterfaces(input *DescribeVirtualInterfacesInput) (*DescribeVirtualInterfacesOutput, error) { + req, out := c.DescribeVirtualInterfacesRequest(input) + err := req.Send() + return out, err +} + +// Container for the parameters to the AllocateConnectionOnInterconnect operation. +type AllocateConnectionOnInterconnectInput struct { + _ struct{} `type:"structure"` + + // Bandwidth of the connection. + // + // Example: "500Mbps" + // + // Default: None + // + // Values: 50M, 100M, 200M, 300M, 400M, or 500M + Bandwidth *string `locationName:"bandwidth" type:"string" required:"true"` + + // Name of the provisioned connection. + // + // Example: "500M Connection to AWS" + // + // Default: None + ConnectionName *string `locationName:"connectionName" type:"string" required:"true"` + + // ID of the interconnect on which the connection will be provisioned. + // + // Example: dxcon-456abc78 + // + // Default: None + InterconnectId *string `locationName:"interconnectId" type:"string" required:"true"` + + // Numeric account Id of the customer for whom the connection will be provisioned. + // + // Example: 123443215678 + // + // Default: None + OwnerAccount *string `locationName:"ownerAccount" type:"string" required:"true"` + + // The dedicated VLAN provisioned to the connection. + // + // Example: 101 + // + // Default: None + Vlan *int64 `locationName:"vlan" type:"integer" required:"true"` +} + +// String returns the string representation +func (s AllocateConnectionOnInterconnectInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AllocateConnectionOnInterconnectInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AllocateConnectionOnInterconnectInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AllocateConnectionOnInterconnectInput"} + if s.Bandwidth == nil { + invalidParams.Add(request.NewErrParamRequired("Bandwidth")) + } + if s.ConnectionName == nil { + invalidParams.Add(request.NewErrParamRequired("ConnectionName")) + } + if s.InterconnectId == nil { + invalidParams.Add(request.NewErrParamRequired("InterconnectId")) + } + if s.OwnerAccount == nil { + invalidParams.Add(request.NewErrParamRequired("OwnerAccount")) + } + if s.Vlan == nil { + invalidParams.Add(request.NewErrParamRequired("Vlan")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Container for the parameters to the AllocatePrivateVirtualInterface operation. +type AllocatePrivateVirtualInterfaceInput struct { + _ struct{} `type:"structure"` + + // The connection ID on which the private virtual interface is provisioned. + // + // Default: None + ConnectionId *string `locationName:"connectionId" type:"string" required:"true"` + + // Detailed information for the private virtual interface to be provisioned. + // + // Default: None + NewPrivateVirtualInterfaceAllocation *NewPrivateVirtualInterfaceAllocation `locationName:"newPrivateVirtualInterfaceAllocation" type:"structure" required:"true"` + + // The AWS account that will own the new private virtual interface. + // + // Default: None + OwnerAccount *string `locationName:"ownerAccount" type:"string" required:"true"` +} + +// String returns the string representation +func (s AllocatePrivateVirtualInterfaceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AllocatePrivateVirtualInterfaceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AllocatePrivateVirtualInterfaceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AllocatePrivateVirtualInterfaceInput"} + if s.ConnectionId == nil { + invalidParams.Add(request.NewErrParamRequired("ConnectionId")) + } + if s.NewPrivateVirtualInterfaceAllocation == nil { + invalidParams.Add(request.NewErrParamRequired("NewPrivateVirtualInterfaceAllocation")) + } + if s.OwnerAccount == nil { + invalidParams.Add(request.NewErrParamRequired("OwnerAccount")) + } + if s.NewPrivateVirtualInterfaceAllocation != nil { + if err := s.NewPrivateVirtualInterfaceAllocation.Validate(); err != nil { + invalidParams.AddNested("NewPrivateVirtualInterfaceAllocation", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Container for the parameters to the AllocatePublicVirtualInterface operation. +type AllocatePublicVirtualInterfaceInput struct { + _ struct{} `type:"structure"` + + // The connection ID on which the public virtual interface is provisioned. + // + // Default: None + ConnectionId *string `locationName:"connectionId" type:"string" required:"true"` + + // Detailed information for the public virtual interface to be provisioned. + // + // Default: None + NewPublicVirtualInterfaceAllocation *NewPublicVirtualInterfaceAllocation `locationName:"newPublicVirtualInterfaceAllocation" type:"structure" required:"true"` + + // The AWS account that will own the new public virtual interface. + // + // Default: None + OwnerAccount *string `locationName:"ownerAccount" type:"string" required:"true"` +} + +// String returns the string representation +func (s AllocatePublicVirtualInterfaceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AllocatePublicVirtualInterfaceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AllocatePublicVirtualInterfaceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AllocatePublicVirtualInterfaceInput"} + if s.ConnectionId == nil { + invalidParams.Add(request.NewErrParamRequired("ConnectionId")) + } + if s.NewPublicVirtualInterfaceAllocation == nil { + invalidParams.Add(request.NewErrParamRequired("NewPublicVirtualInterfaceAllocation")) + } + if s.OwnerAccount == nil { + invalidParams.Add(request.NewErrParamRequired("OwnerAccount")) + } + if s.NewPublicVirtualInterfaceAllocation != nil { + if err := s.NewPublicVirtualInterfaceAllocation.Validate(); err != nil { + invalidParams.AddNested("NewPublicVirtualInterfaceAllocation", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Container for the parameters to the ConfirmConnection operation. +type ConfirmConnectionInput struct { + _ struct{} `type:"structure"` + + // ID of the connection. + // + // Example: dxcon-fg5678gh + // + // Default: None + ConnectionId *string `locationName:"connectionId" type:"string" required:"true"` +} + +// String returns the string representation +func (s ConfirmConnectionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ConfirmConnectionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ConfirmConnectionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ConfirmConnectionInput"} + if s.ConnectionId == nil { + invalidParams.Add(request.NewErrParamRequired("ConnectionId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The response received when ConfirmConnection is called. +type ConfirmConnectionOutput struct { + _ struct{} `type:"structure"` + + // State of the connection. + // + // Ordering: The initial state of a hosted connection provisioned on an + // interconnect. The connection stays in the ordering state until the owner + // of the hosted connection confirms or declines the connection order. + // + // Requested: The initial state of a standard connection. The connection + // stays in the requested state until the Letter of Authorization (LOA) is sent + // to the customer. + // + // Pending: The connection has been approved, and is being initialized. + // + // Available: The network link is up, and the connection is ready for use. + // + // Down: The network link is down. + // + // Deleting: The connection is in the process of being deleted. + // + // Deleted: The connection has been deleted. + // + // Rejected: A hosted connection in the 'Ordering' state will enter the + // 'Rejected' state if it is deleted by the end customer. + ConnectionState *string `locationName:"connectionState" type:"string" enum:"ConnectionState"` +} + +// String returns the string representation +func (s ConfirmConnectionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ConfirmConnectionOutput) GoString() string { + return s.String() +} + +// Container for the parameters to the ConfirmPrivateVirtualInterface operation. +type ConfirmPrivateVirtualInterfaceInput struct { + _ struct{} `type:"structure"` + + // ID of the virtual private gateway that will be attached to the virtual interface. + // + // A virtual private gateway can be managed via the Amazon Virtual Private + // Cloud (VPC) console or the EC2 CreateVpnGateway (http://docs.aws.amazon.com/AWSEC2/latest/APIReference/ApiReference-query-CreateVpnGateway.html) + // action. + // + // Default: None + VirtualGatewayId *string `locationName:"virtualGatewayId" type:"string" required:"true"` + + // ID of the virtual interface. + // + // Example: dxvif-123dfg56 + // + // Default: None + VirtualInterfaceId *string `locationName:"virtualInterfaceId" type:"string" required:"true"` +} + +// String returns the string representation +func (s ConfirmPrivateVirtualInterfaceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ConfirmPrivateVirtualInterfaceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ConfirmPrivateVirtualInterfaceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ConfirmPrivateVirtualInterfaceInput"} + if s.VirtualGatewayId == nil { + invalidParams.Add(request.NewErrParamRequired("VirtualGatewayId")) + } + if s.VirtualInterfaceId == nil { + invalidParams.Add(request.NewErrParamRequired("VirtualInterfaceId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The response received when ConfirmPrivateVirtualInterface is called. +type ConfirmPrivateVirtualInterfaceOutput struct { + _ struct{} `type:"structure"` + + // State of the virtual interface. + // + // Confirming: The creation of the virtual interface is pending confirmation + // from the virtual interface owner. If the owner of the virtual interface is + // different from the owner of the connection on which it is provisioned, then + // the virtual interface will remain in this state until it is confirmed by + // the virtual interface owner. + // + // Verifying: This state only applies to public virtual interfaces. Each + // public virtual interface needs validation before the virtual interface can + // be created. + // + // Pending: A virtual interface is in this state from the time that it is + // created until the virtual interface is ready to forward traffic. + // + // Available: A virtual interface that is able to forward traffic. + // + // Down: A virtual interface that is BGP down. + // + // Deleting: A virtual interface is in this state immediately after calling + // DeleteVirtualInterface until it can no longer forward traffic. + // + // Deleted: A virtual interface that cannot forward traffic. + // + // Rejected: The virtual interface owner has declined creation of the virtual + // interface. If a virtual interface in the 'Confirming' state is deleted by + // the virtual interface owner, the virtual interface will enter the 'Rejected' + // state. + VirtualInterfaceState *string `locationName:"virtualInterfaceState" type:"string" enum:"VirtualInterfaceState"` +} + +// String returns the string representation +func (s ConfirmPrivateVirtualInterfaceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ConfirmPrivateVirtualInterfaceOutput) GoString() string { + return s.String() +} + +// Container for the parameters to the ConfirmPublicVirtualInterface operation. +type ConfirmPublicVirtualInterfaceInput struct { + _ struct{} `type:"structure"` + + // ID of the virtual interface. + // + // Example: dxvif-123dfg56 + // + // Default: None + VirtualInterfaceId *string `locationName:"virtualInterfaceId" type:"string" required:"true"` +} + +// String returns the string representation +func (s ConfirmPublicVirtualInterfaceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ConfirmPublicVirtualInterfaceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ConfirmPublicVirtualInterfaceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ConfirmPublicVirtualInterfaceInput"} + if s.VirtualInterfaceId == nil { + invalidParams.Add(request.NewErrParamRequired("VirtualInterfaceId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The response received when ConfirmPublicVirtualInterface is called. +type ConfirmPublicVirtualInterfaceOutput struct { + _ struct{} `type:"structure"` + + // State of the virtual interface. + // + // Confirming: The creation of the virtual interface is pending confirmation + // from the virtual interface owner. If the owner of the virtual interface is + // different from the owner of the connection on which it is provisioned, then + // the virtual interface will remain in this state until it is confirmed by + // the virtual interface owner. + // + // Verifying: This state only applies to public virtual interfaces. Each + // public virtual interface needs validation before the virtual interface can + // be created. + // + // Pending: A virtual interface is in this state from the time that it is + // created until the virtual interface is ready to forward traffic. + // + // Available: A virtual interface that is able to forward traffic. + // + // Down: A virtual interface that is BGP down. + // + // Deleting: A virtual interface is in this state immediately after calling + // DeleteVirtualInterface until it can no longer forward traffic. + // + // Deleted: A virtual interface that cannot forward traffic. + // + // Rejected: The virtual interface owner has declined creation of the virtual + // interface. If a virtual interface in the 'Confirming' state is deleted by + // the virtual interface owner, the virtual interface will enter the 'Rejected' + // state. + VirtualInterfaceState *string `locationName:"virtualInterfaceState" type:"string" enum:"VirtualInterfaceState"` +} + +// String returns the string representation +func (s ConfirmPublicVirtualInterfaceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ConfirmPublicVirtualInterfaceOutput) GoString() string { + return s.String() +} + +// A connection represents the physical network connection between the AWS Direct +// Connect location and the customer. +type Connection struct { + _ struct{} `type:"structure"` + + // Bandwidth of the connection. + // + // Example: 1Gbps (for regular connections), or 500Mbps (for hosted connections) + // + // Default: None + Bandwidth *string `locationName:"bandwidth" type:"string"` + + // ID of the connection. + // + // Example: dxcon-fg5678gh + // + // Default: None + ConnectionId *string `locationName:"connectionId" type:"string"` + + // The name of the connection. + // + // Example: "My Connection to AWS" + // + // Default: None + ConnectionName *string `locationName:"connectionName" type:"string"` + + // State of the connection. + // + // Ordering: The initial state of a hosted connection provisioned on an + // interconnect. The connection stays in the ordering state until the owner + // of the hosted connection confirms or declines the connection order. + // + // Requested: The initial state of a standard connection. The connection + // stays in the requested state until the Letter of Authorization (LOA) is sent + // to the customer. + // + // Pending: The connection has been approved, and is being initialized. + // + // Available: The network link is up, and the connection is ready for use. + // + // Down: The network link is down. + // + // Deleting: The connection is in the process of being deleted. + // + // Deleted: The connection has been deleted. + // + // Rejected: A hosted connection in the 'Ordering' state will enter the + // 'Rejected' state if it is deleted by the end customer. + ConnectionState *string `locationName:"connectionState" type:"string" enum:"ConnectionState"` + + // The time of the most recent call to DescribeConnectionLoa for this Connection. + LoaIssueTime *time.Time `locationName:"loaIssueTime" type:"timestamp" timestampFormat:"unix"` + + // Where the connection is located. + // + // Example: EqSV5 + // + // Default: None + Location *string `locationName:"location" type:"string"` + + // The AWS account that will own the new connection. + OwnerAccount *string `locationName:"ownerAccount" type:"string"` + + // The name of the AWS Direct Connect service provider associated with the connection. + PartnerName *string `locationName:"partnerName" type:"string"` + + // The AWS region where the connection is located. + // + // Example: us-east-1 + // + // Default: None + Region *string `locationName:"region" type:"string"` + + // The VLAN ID. + // + // Example: 101 + Vlan *int64 `locationName:"vlan" type:"integer"` +} + +// String returns the string representation +func (s Connection) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Connection) GoString() string { + return s.String() +} + +// A structure containing a list of connections. +type Connections struct { + _ struct{} `type:"structure"` + + // A list of connections. + Connections []*Connection `locationName:"connections" type:"list"` +} + +// String returns the string representation +func (s Connections) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Connections) GoString() string { + return s.String() +} + +// Container for the parameters to the CreateConnection operation. +type CreateConnectionInput struct { + _ struct{} `type:"structure"` + + // Bandwidth of the connection. + // + // Example: 1Gbps + // + // Default: None + Bandwidth *string `locationName:"bandwidth" type:"string" required:"true"` + + // The name of the connection. + // + // Example: "My Connection to AWS" + // + // Default: None + ConnectionName *string `locationName:"connectionName" type:"string" required:"true"` + + // Where the connection is located. + // + // Example: EqSV5 + // + // Default: None + Location *string `locationName:"location" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateConnectionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateConnectionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateConnectionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateConnectionInput"} + if s.Bandwidth == nil { + invalidParams.Add(request.NewErrParamRequired("Bandwidth")) + } + if s.ConnectionName == nil { + invalidParams.Add(request.NewErrParamRequired("ConnectionName")) + } + if s.Location == nil { + invalidParams.Add(request.NewErrParamRequired("Location")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Container for the parameters to the CreateInterconnect operation. +type CreateInterconnectInput struct { + _ struct{} `type:"structure"` + + // The port bandwidth + // + // Example: 1Gbps + // + // Default: None + // + // Available values: 1Gbps,10Gbps + Bandwidth *string `locationName:"bandwidth" type:"string" required:"true"` + + // The name of the interconnect. + // + // Example: "1G Interconnect to AWS" + // + // Default: None + InterconnectName *string `locationName:"interconnectName" type:"string" required:"true"` + + // Where the interconnect is located + // + // Example: EqSV5 + // + // Default: None + Location *string `locationName:"location" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateInterconnectInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateInterconnectInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateInterconnectInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateInterconnectInput"} + if s.Bandwidth == nil { + invalidParams.Add(request.NewErrParamRequired("Bandwidth")) + } + if s.InterconnectName == nil { + invalidParams.Add(request.NewErrParamRequired("InterconnectName")) + } + if s.Location == nil { + invalidParams.Add(request.NewErrParamRequired("Location")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Container for the parameters to the CreatePrivateVirtualInterface operation. +type CreatePrivateVirtualInterfaceInput struct { + _ struct{} `type:"structure"` + + // ID of the connection. + // + // Example: dxcon-fg5678gh + // + // Default: None + ConnectionId *string `locationName:"connectionId" type:"string" required:"true"` + + // Detailed information for the private virtual interface to be created. + // + // Default: None + NewPrivateVirtualInterface *NewPrivateVirtualInterface `locationName:"newPrivateVirtualInterface" type:"structure" required:"true"` +} + +// String returns the string representation +func (s CreatePrivateVirtualInterfaceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreatePrivateVirtualInterfaceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreatePrivateVirtualInterfaceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreatePrivateVirtualInterfaceInput"} + if s.ConnectionId == nil { + invalidParams.Add(request.NewErrParamRequired("ConnectionId")) + } + if s.NewPrivateVirtualInterface == nil { + invalidParams.Add(request.NewErrParamRequired("NewPrivateVirtualInterface")) + } + if s.NewPrivateVirtualInterface != nil { + if err := s.NewPrivateVirtualInterface.Validate(); err != nil { + invalidParams.AddNested("NewPrivateVirtualInterface", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Container for the parameters to the CreatePublicVirtualInterface operation. +type CreatePublicVirtualInterfaceInput struct { + _ struct{} `type:"structure"` + + // ID of the connection. + // + // Example: dxcon-fg5678gh + // + // Default: None + ConnectionId *string `locationName:"connectionId" type:"string" required:"true"` + + // Detailed information for the public virtual interface to be created. + // + // Default: None + NewPublicVirtualInterface *NewPublicVirtualInterface `locationName:"newPublicVirtualInterface" type:"structure" required:"true"` +} + +// String returns the string representation +func (s CreatePublicVirtualInterfaceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreatePublicVirtualInterfaceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreatePublicVirtualInterfaceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreatePublicVirtualInterfaceInput"} + if s.ConnectionId == nil { + invalidParams.Add(request.NewErrParamRequired("ConnectionId")) + } + if s.NewPublicVirtualInterface == nil { + invalidParams.Add(request.NewErrParamRequired("NewPublicVirtualInterface")) + } + if s.NewPublicVirtualInterface != nil { + if err := s.NewPublicVirtualInterface.Validate(); err != nil { + invalidParams.AddNested("NewPublicVirtualInterface", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Container for the parameters to the DeleteConnection operation. +type DeleteConnectionInput struct { + _ struct{} `type:"structure"` + + // ID of the connection. + // + // Example: dxcon-fg5678gh + // + // Default: None + ConnectionId *string `locationName:"connectionId" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteConnectionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteConnectionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteConnectionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteConnectionInput"} + if s.ConnectionId == nil { + invalidParams.Add(request.NewErrParamRequired("ConnectionId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Container for the parameters to the DeleteInterconnect operation. +type DeleteInterconnectInput struct { + _ struct{} `type:"structure"` + + // The ID of the interconnect. + // + // Example: dxcon-abc123 + InterconnectId *string `locationName:"interconnectId" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteInterconnectInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteInterconnectInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteInterconnectInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteInterconnectInput"} + if s.InterconnectId == nil { + invalidParams.Add(request.NewErrParamRequired("InterconnectId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The response received when DeleteInterconnect is called. +type DeleteInterconnectOutput struct { + _ struct{} `type:"structure"` + + // State of the interconnect. + // + // Requested: The initial state of an interconnect. The interconnect stays + // in the requested state until the Letter of Authorization (LOA) is sent to + // the customer. + // + // Pending>: The interconnect has been approved, and is being initialized. + // + // Available: The network link is up, and the interconnect is ready for + // use. + // + // Down: The network link is down. + // + // Deleting: The interconnect is in the process of being deleted. + // + // Deleted: The interconnect has been deleted. + InterconnectState *string `locationName:"interconnectState" type:"string" enum:"InterconnectState"` +} + +// String returns the string representation +func (s DeleteInterconnectOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteInterconnectOutput) GoString() string { + return s.String() +} + +// Container for the parameters to the DeleteVirtualInterface operation. +type DeleteVirtualInterfaceInput struct { + _ struct{} `type:"structure"` + + // ID of the virtual interface. + // + // Example: dxvif-123dfg56 + // + // Default: None + VirtualInterfaceId *string `locationName:"virtualInterfaceId" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteVirtualInterfaceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteVirtualInterfaceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteVirtualInterfaceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteVirtualInterfaceInput"} + if s.VirtualInterfaceId == nil { + invalidParams.Add(request.NewErrParamRequired("VirtualInterfaceId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The response received when DeleteVirtualInterface is called. +type DeleteVirtualInterfaceOutput struct { + _ struct{} `type:"structure"` + + // State of the virtual interface. + // + // Confirming: The creation of the virtual interface is pending confirmation + // from the virtual interface owner. If the owner of the virtual interface is + // different from the owner of the connection on which it is provisioned, then + // the virtual interface will remain in this state until it is confirmed by + // the virtual interface owner. + // + // Verifying: This state only applies to public virtual interfaces. Each + // public virtual interface needs validation before the virtual interface can + // be created. + // + // Pending: A virtual interface is in this state from the time that it is + // created until the virtual interface is ready to forward traffic. + // + // Available: A virtual interface that is able to forward traffic. + // + // Down: A virtual interface that is BGP down. + // + // Deleting: A virtual interface is in this state immediately after calling + // DeleteVirtualInterface until it can no longer forward traffic. + // + // Deleted: A virtual interface that cannot forward traffic. + // + // Rejected: The virtual interface owner has declined creation of the virtual + // interface. If a virtual interface in the 'Confirming' state is deleted by + // the virtual interface owner, the virtual interface will enter the 'Rejected' + // state. + VirtualInterfaceState *string `locationName:"virtualInterfaceState" type:"string" enum:"VirtualInterfaceState"` +} + +// String returns the string representation +func (s DeleteVirtualInterfaceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteVirtualInterfaceOutput) GoString() string { + return s.String() +} + +// Container for the parameters to the DescribeConnectionLoa operation. +type DescribeConnectionLoaInput struct { + _ struct{} `type:"structure"` + + // ID of the connection. + // + // Example: dxcon-fg5678gh + // + // Default: None + ConnectionId *string `locationName:"connectionId" type:"string" required:"true"` + + // A standard media type indicating the content type of the LOA-CFA document. + // Currently, the only supported value is "application/pdf". + // + // Default: application/pdf + LoaContentType *string `locationName:"loaContentType" type:"string" enum:"LoaContentType"` + + // The name of the APN partner or service provider who establishes connectivity + // on your behalf. If you supply this parameter, the LOA-CFA lists the provider + // name alongside your company name as the requester of the cross connect. + // + // Default: None + ProviderName *string `locationName:"providerName" type:"string"` +} + +// String returns the string representation +func (s DescribeConnectionLoaInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeConnectionLoaInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeConnectionLoaInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeConnectionLoaInput"} + if s.ConnectionId == nil { + invalidParams.Add(request.NewErrParamRequired("ConnectionId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The response received when DescribeConnectionLoa is called. +type DescribeConnectionLoaOutput struct { + _ struct{} `type:"structure"` + + // A structure containing the Letter of Authorization - Connecting Facility + // Assignment (LOA-CFA) for a connection. + Loa *Loa `locationName:"loa" type:"structure"` +} + +// String returns the string representation +func (s DescribeConnectionLoaOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeConnectionLoaOutput) GoString() string { + return s.String() +} + +// Container for the parameters to the DescribeConnections operation. +type DescribeConnectionsInput struct { + _ struct{} `type:"structure"` + + // ID of the connection. + // + // Example: dxcon-fg5678gh + // + // Default: None + ConnectionId *string `locationName:"connectionId" type:"string"` +} + +// String returns the string representation +func (s DescribeConnectionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeConnectionsInput) GoString() string { + return s.String() +} + +// Container for the parameters to the DescribeConnectionsOnInterconnect operation. +type DescribeConnectionsOnInterconnectInput struct { + _ struct{} `type:"structure"` + + // ID of the interconnect on which a list of connection is provisioned. + // + // Example: dxcon-abc123 + // + // Default: None + InterconnectId *string `locationName:"interconnectId" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeConnectionsOnInterconnectInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeConnectionsOnInterconnectInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeConnectionsOnInterconnectInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeConnectionsOnInterconnectInput"} + if s.InterconnectId == nil { + invalidParams.Add(request.NewErrParamRequired("InterconnectId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Container for the parameters to the DescribeInterconnectLoa operation. +type DescribeInterconnectLoaInput struct { + _ struct{} `type:"structure"` + + // The ID of the interconnect. + // + // Example: dxcon-abc123 + InterconnectId *string `locationName:"interconnectId" type:"string" required:"true"` + + // A standard media type indicating the content type of the LOA-CFA document. + // Currently, the only supported value is "application/pdf". + // + // Default: application/pdf + LoaContentType *string `locationName:"loaContentType" type:"string" enum:"LoaContentType"` + + // The name of the service provider who establishes connectivity on your behalf. + // If you supply this parameter, the LOA-CFA lists the provider name alongside + // your company name as the requester of the cross connect. + // + // Default: None + ProviderName *string `locationName:"providerName" type:"string"` +} + +// String returns the string representation +func (s DescribeInterconnectLoaInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeInterconnectLoaInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeInterconnectLoaInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeInterconnectLoaInput"} + if s.InterconnectId == nil { + invalidParams.Add(request.NewErrParamRequired("InterconnectId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The response received when DescribeInterconnectLoa is called. +type DescribeInterconnectLoaOutput struct { + _ struct{} `type:"structure"` + + // A structure containing the Letter of Authorization - Connecting Facility + // Assignment (LOA-CFA) for a connection. + Loa *Loa `locationName:"loa" type:"structure"` +} + +// String returns the string representation +func (s DescribeInterconnectLoaOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeInterconnectLoaOutput) GoString() string { + return s.String() +} + +// Container for the parameters to the DescribeInterconnects operation. +type DescribeInterconnectsInput struct { + _ struct{} `type:"structure"` + + // The ID of the interconnect. + // + // Example: dxcon-abc123 + InterconnectId *string `locationName:"interconnectId" type:"string"` +} + +// String returns the string representation +func (s DescribeInterconnectsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeInterconnectsInput) GoString() string { + return s.String() +} + +// A structure containing a list of interconnects. +type DescribeInterconnectsOutput struct { + _ struct{} `type:"structure"` + + // A list of interconnects. + Interconnects []*Interconnect `locationName:"interconnects" type:"list"` +} + +// String returns the string representation +func (s DescribeInterconnectsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeInterconnectsOutput) GoString() string { + return s.String() +} + +type DescribeLocationsInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DescribeLocationsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeLocationsInput) GoString() string { + return s.String() +} + +// A location is a network facility where AWS Direct Connect routers are available +// to be connected. Generally, these are colocation hubs where many network +// providers have equipment, and where cross connects can be delivered. Locations +// include a name and facility code, and must be provided when creating a connection. +type DescribeLocationsOutput struct { + _ struct{} `type:"structure"` + + // A list of colocation hubs where network providers have equipment. Most regions + // have multiple locations available. + Locations []*Location `locationName:"locations" type:"list"` +} + +// String returns the string representation +func (s DescribeLocationsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeLocationsOutput) GoString() string { + return s.String() +} + +type DescribeVirtualGatewaysInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DescribeVirtualGatewaysInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeVirtualGatewaysInput) GoString() string { + return s.String() +} + +// A structure containing a list of virtual private gateways. +type DescribeVirtualGatewaysOutput struct { + _ struct{} `type:"structure"` + + // A list of virtual private gateways. + VirtualGateways []*VirtualGateway `locationName:"virtualGateways" type:"list"` +} + +// String returns the string representation +func (s DescribeVirtualGatewaysOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeVirtualGatewaysOutput) GoString() string { + return s.String() +} + +// Container for the parameters to the DescribeVirtualInterfaces operation. +type DescribeVirtualInterfacesInput struct { + _ struct{} `type:"structure"` + + // ID of the connection. + // + // Example: dxcon-fg5678gh + // + // Default: None + ConnectionId *string `locationName:"connectionId" type:"string"` + + // ID of the virtual interface. + // + // Example: dxvif-123dfg56 + // + // Default: None + VirtualInterfaceId *string `locationName:"virtualInterfaceId" type:"string"` +} + +// String returns the string representation +func (s DescribeVirtualInterfacesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeVirtualInterfacesInput) GoString() string { + return s.String() +} + +// A structure containing a list of virtual interfaces. +type DescribeVirtualInterfacesOutput struct { + _ struct{} `type:"structure"` + + // A list of virtual interfaces. + VirtualInterfaces []*VirtualInterface `locationName:"virtualInterfaces" type:"list"` +} + +// String returns the string representation +func (s DescribeVirtualInterfacesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeVirtualInterfacesOutput) GoString() string { + return s.String() +} + +// An interconnect is a connection that can host other connections. +// +// Like a standard AWS Direct Connect connection, an interconnect represents +// the physical connection between an AWS Direct Connect partner's network and +// a specific Direct Connect location. An AWS Direct Connect partner who owns +// an interconnect can provision hosted connections on the interconnect for +// their end customers, thereby providing the end customers with connectivity +// to AWS services. +// +// The resources of the interconnect, including bandwidth and VLAN numbers, +// are shared by all of the hosted connections on the interconnect, and the +// owner of the interconnect determines how these resources are assigned. +type Interconnect struct { + _ struct{} `type:"structure"` + + // Bandwidth of the connection. + // + // Example: 1Gbps + // + // Default: None + Bandwidth *string `locationName:"bandwidth" type:"string"` + + // The ID of the interconnect. + // + // Example: dxcon-abc123 + InterconnectId *string `locationName:"interconnectId" type:"string"` + + // The name of the interconnect. + // + // Example: "1G Interconnect to AWS" + InterconnectName *string `locationName:"interconnectName" type:"string"` + + // State of the interconnect. + // + // Requested: The initial state of an interconnect. The interconnect stays + // in the requested state until the Letter of Authorization (LOA) is sent to + // the customer. + // + // Pending>: The interconnect has been approved, and is being initialized. + // + // Available: The network link is up, and the interconnect is ready for + // use. + // + // Down: The network link is down. + // + // Deleting: The interconnect is in the process of being deleted. + // + // Deleted: The interconnect has been deleted. + InterconnectState *string `locationName:"interconnectState" type:"string" enum:"InterconnectState"` + + // The time of the most recent call to DescribeInterconnectLoa for this Interconnect. + LoaIssueTime *time.Time `locationName:"loaIssueTime" type:"timestamp" timestampFormat:"unix"` + + // Where the connection is located. + // + // Example: EqSV5 + // + // Default: None + Location *string `locationName:"location" type:"string"` + + // The AWS region where the connection is located. + // + // Example: us-east-1 + // + // Default: None + Region *string `locationName:"region" type:"string"` +} + +// String returns the string representation +func (s Interconnect) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Interconnect) GoString() string { + return s.String() +} + +// A structure containing the Letter of Authorization - Connecting Facility +// Assignment (LOA-CFA) for a connection. +type Loa struct { + _ struct{} `type:"structure"` + + // The binary contents of the LOA-CFA document. + // + // LoaContent is automatically base64 encoded/decoded by the SDK. + LoaContent []byte `locationName:"loaContent" type:"blob"` + + // A standard media type indicating the content type of the LOA-CFA document. + // Currently, the only supported value is "application/pdf". + // + // Default: application/pdf + LoaContentType *string `locationName:"loaContentType" type:"string" enum:"LoaContentType"` +} + +// String returns the string representation +func (s Loa) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Loa) GoString() string { + return s.String() +} + +// An AWS Direct Connect location where connections and interconnects can be +// requested. +type Location struct { + _ struct{} `type:"structure"` + + // The code used to indicate the AWS Direct Connect location. + LocationCode *string `locationName:"locationCode" type:"string"` + + // The name of the AWS Direct Connect location. The name includes the colocation + // partner name and the physical site of the lit building. + LocationName *string `locationName:"locationName" type:"string"` +} + +// String returns the string representation +func (s Location) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Location) GoString() string { + return s.String() +} + +// A structure containing information about a new private virtual interface. +type NewPrivateVirtualInterface struct { + _ struct{} `type:"structure"` + + // IP address assigned to the Amazon interface. + // + // Example: 192.168.1.1/30 + AmazonAddress *string `locationName:"amazonAddress" type:"string"` + + // Autonomous system (AS) number for Border Gateway Protocol (BGP) configuration. + // + // Example: 65000 + Asn *int64 `locationName:"asn" type:"integer" required:"true"` + + // Authentication key for BGP configuration. + // + // Example: asdf34example + AuthKey *string `locationName:"authKey" type:"string"` + + // IP address assigned to the customer interface. + // + // Example: 192.168.1.2/30 + CustomerAddress *string `locationName:"customerAddress" type:"string"` + + // The ID of the virtual private gateway to a VPC. This only applies to private + // virtual interfaces. + // + // Example: vgw-123er56 + VirtualGatewayId *string `locationName:"virtualGatewayId" type:"string" required:"true"` + + // The name of the virtual interface assigned by the customer. + // + // Example: "My VPC" + VirtualInterfaceName *string `locationName:"virtualInterfaceName" type:"string" required:"true"` + + // The VLAN ID. + // + // Example: 101 + Vlan *int64 `locationName:"vlan" type:"integer" required:"true"` +} + +// String returns the string representation +func (s NewPrivateVirtualInterface) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NewPrivateVirtualInterface) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *NewPrivateVirtualInterface) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "NewPrivateVirtualInterface"} + if s.Asn == nil { + invalidParams.Add(request.NewErrParamRequired("Asn")) + } + if s.VirtualGatewayId == nil { + invalidParams.Add(request.NewErrParamRequired("VirtualGatewayId")) + } + if s.VirtualInterfaceName == nil { + invalidParams.Add(request.NewErrParamRequired("VirtualInterfaceName")) + } + if s.Vlan == nil { + invalidParams.Add(request.NewErrParamRequired("Vlan")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// A structure containing information about a private virtual interface that +// will be provisioned on a connection. +type NewPrivateVirtualInterfaceAllocation struct { + _ struct{} `type:"structure"` + + // IP address assigned to the Amazon interface. + // + // Example: 192.168.1.1/30 + AmazonAddress *string `locationName:"amazonAddress" type:"string"` + + // Autonomous system (AS) number for Border Gateway Protocol (BGP) configuration. + // + // Example: 65000 + Asn *int64 `locationName:"asn" type:"integer" required:"true"` + + // Authentication key for BGP configuration. + // + // Example: asdf34example + AuthKey *string `locationName:"authKey" type:"string"` + + // IP address assigned to the customer interface. + // + // Example: 192.168.1.2/30 + CustomerAddress *string `locationName:"customerAddress" type:"string"` + + // The name of the virtual interface assigned by the customer. + // + // Example: "My VPC" + VirtualInterfaceName *string `locationName:"virtualInterfaceName" type:"string" required:"true"` + + // The VLAN ID. + // + // Example: 101 + Vlan *int64 `locationName:"vlan" type:"integer" required:"true"` +} + +// String returns the string representation +func (s NewPrivateVirtualInterfaceAllocation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NewPrivateVirtualInterfaceAllocation) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *NewPrivateVirtualInterfaceAllocation) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "NewPrivateVirtualInterfaceAllocation"} + if s.Asn == nil { + invalidParams.Add(request.NewErrParamRequired("Asn")) + } + if s.VirtualInterfaceName == nil { + invalidParams.Add(request.NewErrParamRequired("VirtualInterfaceName")) + } + if s.Vlan == nil { + invalidParams.Add(request.NewErrParamRequired("Vlan")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// A structure containing information about a new public virtual interface. +type NewPublicVirtualInterface struct { + _ struct{} `type:"structure"` + + // IP address assigned to the Amazon interface. + // + // Example: 192.168.1.1/30 + AmazonAddress *string `locationName:"amazonAddress" type:"string" required:"true"` + + // Autonomous system (AS) number for Border Gateway Protocol (BGP) configuration. + // + // Example: 65000 + Asn *int64 `locationName:"asn" type:"integer" required:"true"` + + // Authentication key for BGP configuration. + // + // Example: asdf34example + AuthKey *string `locationName:"authKey" type:"string"` + + // IP address assigned to the customer interface. + // + // Example: 192.168.1.2/30 + CustomerAddress *string `locationName:"customerAddress" type:"string" required:"true"` + + // A list of routes to be advertised to the AWS network in this region (public + // virtual interface). + RouteFilterPrefixes []*RouteFilterPrefix `locationName:"routeFilterPrefixes" type:"list" required:"true"` + + // The name of the virtual interface assigned by the customer. + // + // Example: "My VPC" + VirtualInterfaceName *string `locationName:"virtualInterfaceName" type:"string" required:"true"` + + // The VLAN ID. + // + // Example: 101 + Vlan *int64 `locationName:"vlan" type:"integer" required:"true"` +} + +// String returns the string representation +func (s NewPublicVirtualInterface) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NewPublicVirtualInterface) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *NewPublicVirtualInterface) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "NewPublicVirtualInterface"} + if s.AmazonAddress == nil { + invalidParams.Add(request.NewErrParamRequired("AmazonAddress")) + } + if s.Asn == nil { + invalidParams.Add(request.NewErrParamRequired("Asn")) + } + if s.CustomerAddress == nil { + invalidParams.Add(request.NewErrParamRequired("CustomerAddress")) + } + if s.RouteFilterPrefixes == nil { + invalidParams.Add(request.NewErrParamRequired("RouteFilterPrefixes")) + } + if s.VirtualInterfaceName == nil { + invalidParams.Add(request.NewErrParamRequired("VirtualInterfaceName")) + } + if s.Vlan == nil { + invalidParams.Add(request.NewErrParamRequired("Vlan")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// A structure containing information about a public virtual interface that +// will be provisioned on a connection. +type NewPublicVirtualInterfaceAllocation struct { + _ struct{} `type:"structure"` + + // IP address assigned to the Amazon interface. + // + // Example: 192.168.1.1/30 + AmazonAddress *string `locationName:"amazonAddress" type:"string" required:"true"` + + // Autonomous system (AS) number for Border Gateway Protocol (BGP) configuration. + // + // Example: 65000 + Asn *int64 `locationName:"asn" type:"integer" required:"true"` + + // Authentication key for BGP configuration. + // + // Example: asdf34example + AuthKey *string `locationName:"authKey" type:"string"` + + // IP address assigned to the customer interface. + // + // Example: 192.168.1.2/30 + CustomerAddress *string `locationName:"customerAddress" type:"string" required:"true"` + + // A list of routes to be advertised to the AWS network in this region (public + // virtual interface). + RouteFilterPrefixes []*RouteFilterPrefix `locationName:"routeFilterPrefixes" type:"list" required:"true"` + + // The name of the virtual interface assigned by the customer. + // + // Example: "My VPC" + VirtualInterfaceName *string `locationName:"virtualInterfaceName" type:"string" required:"true"` + + // The VLAN ID. + // + // Example: 101 + Vlan *int64 `locationName:"vlan" type:"integer" required:"true"` +} + +// String returns the string representation +func (s NewPublicVirtualInterfaceAllocation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NewPublicVirtualInterfaceAllocation) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *NewPublicVirtualInterfaceAllocation) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "NewPublicVirtualInterfaceAllocation"} + if s.AmazonAddress == nil { + invalidParams.Add(request.NewErrParamRequired("AmazonAddress")) + } + if s.Asn == nil { + invalidParams.Add(request.NewErrParamRequired("Asn")) + } + if s.CustomerAddress == nil { + invalidParams.Add(request.NewErrParamRequired("CustomerAddress")) + } + if s.RouteFilterPrefixes == nil { + invalidParams.Add(request.NewErrParamRequired("RouteFilterPrefixes")) + } + if s.VirtualInterfaceName == nil { + invalidParams.Add(request.NewErrParamRequired("VirtualInterfaceName")) + } + if s.Vlan == nil { + invalidParams.Add(request.NewErrParamRequired("Vlan")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// A route filter prefix that the customer can advertise through Border Gateway +// Protocol (BGP) over a public virtual interface. +type RouteFilterPrefix struct { + _ struct{} `type:"structure"` + + // CIDR notation for the advertised route. Multiple routes are separated by + // commas. + // + // Example: 10.10.10.0/24,10.10.11.0/24 + Cidr *string `locationName:"cidr" type:"string"` +} + +// String returns the string representation +func (s RouteFilterPrefix) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RouteFilterPrefix) GoString() string { + return s.String() +} + +// You can create one or more AWS Direct Connect private virtual interfaces +// linking to your virtual private gateway. +// +// Virtual private gateways can be managed using the Amazon Virtual Private +// Cloud (Amazon VPC) console or the Amazon EC2 CreateVpnGateway action (http://docs.aws.amazon.com/AWSEC2/latest/APIReference/ApiReference-query-CreateVpnGateway.html). +type VirtualGateway struct { + _ struct{} `type:"structure"` + + // The ID of the virtual private gateway to a VPC. This only applies to private + // virtual interfaces. + // + // Example: vgw-123er56 + VirtualGatewayId *string `locationName:"virtualGatewayId" type:"string"` + + // State of the virtual private gateway. + // + // Pending: This is the initial state after calling CreateVpnGateway. + // + // Available: Ready for use by a private virtual interface. + // + // Deleting: This is the initial state after calling DeleteVpnGateway. + // + // Deleted: In this state, a private virtual interface is unable to send + // traffic over this gateway. + VirtualGatewayState *string `locationName:"virtualGatewayState" type:"string"` +} + +// String returns the string representation +func (s VirtualGateway) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VirtualGateway) GoString() string { + return s.String() +} + +// A virtual interface (VLAN) transmits the traffic between the AWS Direct Connect +// location and the customer. +type VirtualInterface struct { + _ struct{} `type:"structure"` + + // IP address assigned to the Amazon interface. + // + // Example: 192.168.1.1/30 + AmazonAddress *string `locationName:"amazonAddress" type:"string"` + + // Autonomous system (AS) number for Border Gateway Protocol (BGP) configuration. + // + // Example: 65000 + Asn *int64 `locationName:"asn" type:"integer"` + + // Authentication key for BGP configuration. + // + // Example: asdf34example + AuthKey *string `locationName:"authKey" type:"string"` + + // ID of the connection. + // + // Example: dxcon-fg5678gh + // + // Default: None + ConnectionId *string `locationName:"connectionId" type:"string"` + + // IP address assigned to the customer interface. + // + // Example: 192.168.1.2/30 + CustomerAddress *string `locationName:"customerAddress" type:"string"` + + // Information for generating the customer router configuration. + CustomerRouterConfig *string `locationName:"customerRouterConfig" type:"string"` + + // Where the connection is located. + // + // Example: EqSV5 + // + // Default: None + Location *string `locationName:"location" type:"string"` + + // The AWS account that will own the new virtual interface. + OwnerAccount *string `locationName:"ownerAccount" type:"string"` + + // A list of routes to be advertised to the AWS network in this region (public + // virtual interface). + RouteFilterPrefixes []*RouteFilterPrefix `locationName:"routeFilterPrefixes" type:"list"` + + // The ID of the virtual private gateway to a VPC. This only applies to private + // virtual interfaces. + // + // Example: vgw-123er56 + VirtualGatewayId *string `locationName:"virtualGatewayId" type:"string"` + + // ID of the virtual interface. + // + // Example: dxvif-123dfg56 + // + // Default: None + VirtualInterfaceId *string `locationName:"virtualInterfaceId" type:"string"` + + // The name of the virtual interface assigned by the customer. + // + // Example: "My VPC" + VirtualInterfaceName *string `locationName:"virtualInterfaceName" type:"string"` + + // State of the virtual interface. + // + // Confirming: The creation of the virtual interface is pending confirmation + // from the virtual interface owner. If the owner of the virtual interface is + // different from the owner of the connection on which it is provisioned, then + // the virtual interface will remain in this state until it is confirmed by + // the virtual interface owner. + // + // Verifying: This state only applies to public virtual interfaces. Each + // public virtual interface needs validation before the virtual interface can + // be created. + // + // Pending: A virtual interface is in this state from the time that it is + // created until the virtual interface is ready to forward traffic. + // + // Available: A virtual interface that is able to forward traffic. + // + // Down: A virtual interface that is BGP down. + // + // Deleting: A virtual interface is in this state immediately after calling + // DeleteVirtualInterface until it can no longer forward traffic. + // + // Deleted: A virtual interface that cannot forward traffic. + // + // Rejected: The virtual interface owner has declined creation of the virtual + // interface. If a virtual interface in the 'Confirming' state is deleted by + // the virtual interface owner, the virtual interface will enter the 'Rejected' + // state. + VirtualInterfaceState *string `locationName:"virtualInterfaceState" type:"string" enum:"VirtualInterfaceState"` + + // The type of virtual interface. + // + // Example: private (Amazon VPC) or public (Amazon S3, Amazon DynamoDB, and + // so on.) + VirtualInterfaceType *string `locationName:"virtualInterfaceType" type:"string"` + + // The VLAN ID. + // + // Example: 101 + Vlan *int64 `locationName:"vlan" type:"integer"` +} + +// String returns the string representation +func (s VirtualInterface) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VirtualInterface) GoString() string { + return s.String() +} + +// State of the connection. +// +// Ordering: The initial state of a hosted connection provisioned on an +// interconnect. The connection stays in the ordering state until the owner +// of the hosted connection confirms or declines the connection order. +// +// Requested: The initial state of a standard connection. The connection +// stays in the requested state until the Letter of Authorization (LOA) is sent +// to the customer. +// +// Pending: The connection has been approved, and is being initialized. +// +// Available: The network link is up, and the connection is ready for use. +// +// Down: The network link is down. +// +// Deleting: The connection is in the process of being deleted. +// +// Deleted: The connection has been deleted. +// +// Rejected: A hosted connection in the 'Ordering' state will enter the +// 'Rejected' state if it is deleted by the end customer. +const ( + // @enum ConnectionState + ConnectionStateOrdering = "ordering" + // @enum ConnectionState + ConnectionStateRequested = "requested" + // @enum ConnectionState + ConnectionStatePending = "pending" + // @enum ConnectionState + ConnectionStateAvailable = "available" + // @enum ConnectionState + ConnectionStateDown = "down" + // @enum ConnectionState + ConnectionStateDeleting = "deleting" + // @enum ConnectionState + ConnectionStateDeleted = "deleted" + // @enum ConnectionState + ConnectionStateRejected = "rejected" +) + +// State of the interconnect. +// +// Requested: The initial state of an interconnect. The interconnect stays +// in the requested state until the Letter of Authorization (LOA) is sent to +// the customer. +// +// Pending>: The interconnect has been approved, and is being initialized. +// +// Available: The network link is up, and the interconnect is ready for +// use. +// +// Down: The network link is down. +// +// Deleting: The interconnect is in the process of being deleted. +// +// Deleted: The interconnect has been deleted. +const ( + // @enum InterconnectState + InterconnectStateRequested = "requested" + // @enum InterconnectState + InterconnectStatePending = "pending" + // @enum InterconnectState + InterconnectStateAvailable = "available" + // @enum InterconnectState + InterconnectStateDown = "down" + // @enum InterconnectState + InterconnectStateDeleting = "deleting" + // @enum InterconnectState + InterconnectStateDeleted = "deleted" +) + +// A standard media type indicating the content type of the LOA-CFA document. +// Currently, the only supported value is "application/pdf". +// +// Default: application/pdf +const ( + // @enum LoaContentType + LoaContentTypeApplicationPdf = "application/pdf" +) + +// State of the virtual interface. +// +// Confirming: The creation of the virtual interface is pending confirmation +// from the virtual interface owner. If the owner of the virtual interface is +// different from the owner of the connection on which it is provisioned, then +// the virtual interface will remain in this state until it is confirmed by +// the virtual interface owner. +// +// Verifying: This state only applies to public virtual interfaces. Each +// public virtual interface needs validation before the virtual interface can +// be created. +// +// Pending: A virtual interface is in this state from the time that it is +// created until the virtual interface is ready to forward traffic. +// +// Available: A virtual interface that is able to forward traffic. +// +// Down: A virtual interface that is BGP down. +// +// Deleting: A virtual interface is in this state immediately after calling +// DeleteVirtualInterface until it can no longer forward traffic. +// +// Deleted: A virtual interface that cannot forward traffic. +// +// Rejected: The virtual interface owner has declined creation of the virtual +// interface. If a virtual interface in the 'Confirming' state is deleted by +// the virtual interface owner, the virtual interface will enter the 'Rejected' +// state. +const ( + // @enum VirtualInterfaceState + VirtualInterfaceStateConfirming = "confirming" + // @enum VirtualInterfaceState + VirtualInterfaceStateVerifying = "verifying" + // @enum VirtualInterfaceState + VirtualInterfaceStatePending = "pending" + // @enum VirtualInterfaceState + VirtualInterfaceStateAvailable = "available" + // @enum VirtualInterfaceState + VirtualInterfaceStateDown = "down" + // @enum VirtualInterfaceState + VirtualInterfaceStateDeleting = "deleting" + // @enum VirtualInterfaceState + VirtualInterfaceStateDeleted = "deleted" + // @enum VirtualInterfaceState + VirtualInterfaceStateRejected = "rejected" +) diff --git a/vendor/github.com/aws/aws-sdk-go/service/directconnect/directconnectiface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/directconnect/directconnectiface/interface.go new file mode 100644 index 000000000..96fb888f7 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/directconnect/directconnectiface/interface.go @@ -0,0 +1,98 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package directconnectiface provides an interface for the AWS Direct Connect. +package directconnectiface + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/directconnect" +) + +// DirectConnectAPI is the interface type for directconnect.DirectConnect. +type DirectConnectAPI interface { + AllocateConnectionOnInterconnectRequest(*directconnect.AllocateConnectionOnInterconnectInput) (*request.Request, *directconnect.Connection) + + AllocateConnectionOnInterconnect(*directconnect.AllocateConnectionOnInterconnectInput) (*directconnect.Connection, error) + + AllocatePrivateVirtualInterfaceRequest(*directconnect.AllocatePrivateVirtualInterfaceInput) (*request.Request, *directconnect.VirtualInterface) + + AllocatePrivateVirtualInterface(*directconnect.AllocatePrivateVirtualInterfaceInput) (*directconnect.VirtualInterface, error) + + AllocatePublicVirtualInterfaceRequest(*directconnect.AllocatePublicVirtualInterfaceInput) (*request.Request, *directconnect.VirtualInterface) + + AllocatePublicVirtualInterface(*directconnect.AllocatePublicVirtualInterfaceInput) (*directconnect.VirtualInterface, error) + + ConfirmConnectionRequest(*directconnect.ConfirmConnectionInput) (*request.Request, *directconnect.ConfirmConnectionOutput) + + ConfirmConnection(*directconnect.ConfirmConnectionInput) (*directconnect.ConfirmConnectionOutput, error) + + ConfirmPrivateVirtualInterfaceRequest(*directconnect.ConfirmPrivateVirtualInterfaceInput) (*request.Request, *directconnect.ConfirmPrivateVirtualInterfaceOutput) + + ConfirmPrivateVirtualInterface(*directconnect.ConfirmPrivateVirtualInterfaceInput) (*directconnect.ConfirmPrivateVirtualInterfaceOutput, error) + + ConfirmPublicVirtualInterfaceRequest(*directconnect.ConfirmPublicVirtualInterfaceInput) (*request.Request, *directconnect.ConfirmPublicVirtualInterfaceOutput) + + ConfirmPublicVirtualInterface(*directconnect.ConfirmPublicVirtualInterfaceInput) (*directconnect.ConfirmPublicVirtualInterfaceOutput, error) + + CreateConnectionRequest(*directconnect.CreateConnectionInput) (*request.Request, *directconnect.Connection) + + CreateConnection(*directconnect.CreateConnectionInput) (*directconnect.Connection, error) + + CreateInterconnectRequest(*directconnect.CreateInterconnectInput) (*request.Request, *directconnect.Interconnect) + + CreateInterconnect(*directconnect.CreateInterconnectInput) (*directconnect.Interconnect, error) + + CreatePrivateVirtualInterfaceRequest(*directconnect.CreatePrivateVirtualInterfaceInput) (*request.Request, *directconnect.VirtualInterface) + + CreatePrivateVirtualInterface(*directconnect.CreatePrivateVirtualInterfaceInput) (*directconnect.VirtualInterface, error) + + CreatePublicVirtualInterfaceRequest(*directconnect.CreatePublicVirtualInterfaceInput) (*request.Request, *directconnect.VirtualInterface) + + CreatePublicVirtualInterface(*directconnect.CreatePublicVirtualInterfaceInput) (*directconnect.VirtualInterface, error) + + DeleteConnectionRequest(*directconnect.DeleteConnectionInput) (*request.Request, *directconnect.Connection) + + DeleteConnection(*directconnect.DeleteConnectionInput) (*directconnect.Connection, error) + + DeleteInterconnectRequest(*directconnect.DeleteInterconnectInput) (*request.Request, *directconnect.DeleteInterconnectOutput) + + DeleteInterconnect(*directconnect.DeleteInterconnectInput) (*directconnect.DeleteInterconnectOutput, error) + + DeleteVirtualInterfaceRequest(*directconnect.DeleteVirtualInterfaceInput) (*request.Request, *directconnect.DeleteVirtualInterfaceOutput) + + DeleteVirtualInterface(*directconnect.DeleteVirtualInterfaceInput) (*directconnect.DeleteVirtualInterfaceOutput, error) + + DescribeConnectionLoaRequest(*directconnect.DescribeConnectionLoaInput) (*request.Request, *directconnect.DescribeConnectionLoaOutput) + + DescribeConnectionLoa(*directconnect.DescribeConnectionLoaInput) (*directconnect.DescribeConnectionLoaOutput, error) + + DescribeConnectionsRequest(*directconnect.DescribeConnectionsInput) (*request.Request, *directconnect.Connections) + + DescribeConnections(*directconnect.DescribeConnectionsInput) (*directconnect.Connections, error) + + DescribeConnectionsOnInterconnectRequest(*directconnect.DescribeConnectionsOnInterconnectInput) (*request.Request, *directconnect.Connections) + + DescribeConnectionsOnInterconnect(*directconnect.DescribeConnectionsOnInterconnectInput) (*directconnect.Connections, error) + + DescribeInterconnectLoaRequest(*directconnect.DescribeInterconnectLoaInput) (*request.Request, *directconnect.DescribeInterconnectLoaOutput) + + DescribeInterconnectLoa(*directconnect.DescribeInterconnectLoaInput) (*directconnect.DescribeInterconnectLoaOutput, error) + + DescribeInterconnectsRequest(*directconnect.DescribeInterconnectsInput) (*request.Request, *directconnect.DescribeInterconnectsOutput) + + DescribeInterconnects(*directconnect.DescribeInterconnectsInput) (*directconnect.DescribeInterconnectsOutput, error) + + DescribeLocationsRequest(*directconnect.DescribeLocationsInput) (*request.Request, *directconnect.DescribeLocationsOutput) + + DescribeLocations(*directconnect.DescribeLocationsInput) (*directconnect.DescribeLocationsOutput, error) + + DescribeVirtualGatewaysRequest(*directconnect.DescribeVirtualGatewaysInput) (*request.Request, *directconnect.DescribeVirtualGatewaysOutput) + + DescribeVirtualGateways(*directconnect.DescribeVirtualGatewaysInput) (*directconnect.DescribeVirtualGatewaysOutput, error) + + DescribeVirtualInterfacesRequest(*directconnect.DescribeVirtualInterfacesInput) (*request.Request, *directconnect.DescribeVirtualInterfacesOutput) + + DescribeVirtualInterfaces(*directconnect.DescribeVirtualInterfacesInput) (*directconnect.DescribeVirtualInterfacesOutput, error) +} + +var _ DirectConnectAPI = (*directconnect.DirectConnect)(nil) diff --git a/vendor/github.com/aws/aws-sdk-go/service/directconnect/examples_test.go b/vendor/github.com/aws/aws-sdk-go/service/directconnect/examples_test.go new file mode 100644 index 000000000..22ef66330 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/directconnect/examples_test.go @@ -0,0 +1,472 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package directconnect_test + +import ( + "bytes" + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/directconnect" +) + +var _ time.Duration +var _ bytes.Buffer + +func ExampleDirectConnect_AllocateConnectionOnInterconnect() { + svc := directconnect.New(session.New()) + + params := &directconnect.AllocateConnectionOnInterconnectInput{ + Bandwidth: aws.String("Bandwidth"), // Required + ConnectionName: aws.String("ConnectionName"), // Required + InterconnectId: aws.String("InterconnectId"), // Required + OwnerAccount: aws.String("OwnerAccount"), // Required + Vlan: aws.Int64(1), // Required + } + resp, err := svc.AllocateConnectionOnInterconnect(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDirectConnect_AllocatePrivateVirtualInterface() { + svc := directconnect.New(session.New()) + + params := &directconnect.AllocatePrivateVirtualInterfaceInput{ + ConnectionId: aws.String("ConnectionId"), // Required + NewPrivateVirtualInterfaceAllocation: &directconnect.NewPrivateVirtualInterfaceAllocation{ // Required + Asn: aws.Int64(1), // Required + VirtualInterfaceName: aws.String("VirtualInterfaceName"), // Required + Vlan: aws.Int64(1), // Required + AmazonAddress: aws.String("AmazonAddress"), + AuthKey: aws.String("BGPAuthKey"), + CustomerAddress: aws.String("CustomerAddress"), + }, + OwnerAccount: aws.String("OwnerAccount"), // Required + } + resp, err := svc.AllocatePrivateVirtualInterface(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDirectConnect_AllocatePublicVirtualInterface() { + svc := directconnect.New(session.New()) + + params := &directconnect.AllocatePublicVirtualInterfaceInput{ + ConnectionId: aws.String("ConnectionId"), // Required + NewPublicVirtualInterfaceAllocation: &directconnect.NewPublicVirtualInterfaceAllocation{ // Required + AmazonAddress: aws.String("AmazonAddress"), // Required + Asn: aws.Int64(1), // Required + CustomerAddress: aws.String("CustomerAddress"), // Required + RouteFilterPrefixes: []*directconnect.RouteFilterPrefix{ // Required + { // Required + Cidr: aws.String("CIDR"), + }, + // More values... + }, + VirtualInterfaceName: aws.String("VirtualInterfaceName"), // Required + Vlan: aws.Int64(1), // Required + AuthKey: aws.String("BGPAuthKey"), + }, + OwnerAccount: aws.String("OwnerAccount"), // Required + } + resp, err := svc.AllocatePublicVirtualInterface(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDirectConnect_ConfirmConnection() { + svc := directconnect.New(session.New()) + + params := &directconnect.ConfirmConnectionInput{ + ConnectionId: aws.String("ConnectionId"), // Required + } + resp, err := svc.ConfirmConnection(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDirectConnect_ConfirmPrivateVirtualInterface() { + svc := directconnect.New(session.New()) + + params := &directconnect.ConfirmPrivateVirtualInterfaceInput{ + VirtualGatewayId: aws.String("VirtualGatewayId"), // Required + VirtualInterfaceId: aws.String("VirtualInterfaceId"), // Required + } + resp, err := svc.ConfirmPrivateVirtualInterface(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDirectConnect_ConfirmPublicVirtualInterface() { + svc := directconnect.New(session.New()) + + params := &directconnect.ConfirmPublicVirtualInterfaceInput{ + VirtualInterfaceId: aws.String("VirtualInterfaceId"), // Required + } + resp, err := svc.ConfirmPublicVirtualInterface(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDirectConnect_CreateConnection() { + svc := directconnect.New(session.New()) + + params := &directconnect.CreateConnectionInput{ + Bandwidth: aws.String("Bandwidth"), // Required + ConnectionName: aws.String("ConnectionName"), // Required + Location: aws.String("LocationCode"), // Required + } + resp, err := svc.CreateConnection(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDirectConnect_CreateInterconnect() { + svc := directconnect.New(session.New()) + + params := &directconnect.CreateInterconnectInput{ + Bandwidth: aws.String("Bandwidth"), // Required + InterconnectName: aws.String("InterconnectName"), // Required + Location: aws.String("LocationCode"), // Required + } + resp, err := svc.CreateInterconnect(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDirectConnect_CreatePrivateVirtualInterface() { + svc := directconnect.New(session.New()) + + params := &directconnect.CreatePrivateVirtualInterfaceInput{ + ConnectionId: aws.String("ConnectionId"), // Required + NewPrivateVirtualInterface: &directconnect.NewPrivateVirtualInterface{ // Required + Asn: aws.Int64(1), // Required + VirtualGatewayId: aws.String("VirtualGatewayId"), // Required + VirtualInterfaceName: aws.String("VirtualInterfaceName"), // Required + Vlan: aws.Int64(1), // Required + AmazonAddress: aws.String("AmazonAddress"), + AuthKey: aws.String("BGPAuthKey"), + CustomerAddress: aws.String("CustomerAddress"), + }, + } + resp, err := svc.CreatePrivateVirtualInterface(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDirectConnect_CreatePublicVirtualInterface() { + svc := directconnect.New(session.New()) + + params := &directconnect.CreatePublicVirtualInterfaceInput{ + ConnectionId: aws.String("ConnectionId"), // Required + NewPublicVirtualInterface: &directconnect.NewPublicVirtualInterface{ // Required + AmazonAddress: aws.String("AmazonAddress"), // Required + Asn: aws.Int64(1), // Required + CustomerAddress: aws.String("CustomerAddress"), // Required + RouteFilterPrefixes: []*directconnect.RouteFilterPrefix{ // Required + { // Required + Cidr: aws.String("CIDR"), + }, + // More values... + }, + VirtualInterfaceName: aws.String("VirtualInterfaceName"), // Required + Vlan: aws.Int64(1), // Required + AuthKey: aws.String("BGPAuthKey"), + }, + } + resp, err := svc.CreatePublicVirtualInterface(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDirectConnect_DeleteConnection() { + svc := directconnect.New(session.New()) + + params := &directconnect.DeleteConnectionInput{ + ConnectionId: aws.String("ConnectionId"), // Required + } + resp, err := svc.DeleteConnection(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDirectConnect_DeleteInterconnect() { + svc := directconnect.New(session.New()) + + params := &directconnect.DeleteInterconnectInput{ + InterconnectId: aws.String("InterconnectId"), // Required + } + resp, err := svc.DeleteInterconnect(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDirectConnect_DeleteVirtualInterface() { + svc := directconnect.New(session.New()) + + params := &directconnect.DeleteVirtualInterfaceInput{ + VirtualInterfaceId: aws.String("VirtualInterfaceId"), // Required + } + resp, err := svc.DeleteVirtualInterface(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDirectConnect_DescribeConnectionLoa() { + svc := directconnect.New(session.New()) + + params := &directconnect.DescribeConnectionLoaInput{ + ConnectionId: aws.String("ConnectionId"), // Required + LoaContentType: aws.String("LoaContentType"), + ProviderName: aws.String("ProviderName"), + } + resp, err := svc.DescribeConnectionLoa(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDirectConnect_DescribeConnections() { + svc := directconnect.New(session.New()) + + params := &directconnect.DescribeConnectionsInput{ + ConnectionId: aws.String("ConnectionId"), + } + resp, err := svc.DescribeConnections(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDirectConnect_DescribeConnectionsOnInterconnect() { + svc := directconnect.New(session.New()) + + params := &directconnect.DescribeConnectionsOnInterconnectInput{ + InterconnectId: aws.String("InterconnectId"), // Required + } + resp, err := svc.DescribeConnectionsOnInterconnect(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDirectConnect_DescribeInterconnectLoa() { + svc := directconnect.New(session.New()) + + params := &directconnect.DescribeInterconnectLoaInput{ + InterconnectId: aws.String("InterconnectId"), // Required + LoaContentType: aws.String("LoaContentType"), + ProviderName: aws.String("ProviderName"), + } + resp, err := svc.DescribeInterconnectLoa(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDirectConnect_DescribeInterconnects() { + svc := directconnect.New(session.New()) + + params := &directconnect.DescribeInterconnectsInput{ + InterconnectId: aws.String("InterconnectId"), + } + resp, err := svc.DescribeInterconnects(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDirectConnect_DescribeLocations() { + svc := directconnect.New(session.New()) + + var params *directconnect.DescribeLocationsInput + resp, err := svc.DescribeLocations(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDirectConnect_DescribeVirtualGateways() { + svc := directconnect.New(session.New()) + + var params *directconnect.DescribeVirtualGatewaysInput + resp, err := svc.DescribeVirtualGateways(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDirectConnect_DescribeVirtualInterfaces() { + svc := directconnect.New(session.New()) + + params := &directconnect.DescribeVirtualInterfacesInput{ + ConnectionId: aws.String("ConnectionId"), + VirtualInterfaceId: aws.String("VirtualInterfaceId"), + } + resp, err := svc.DescribeVirtualInterfaces(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/directconnect/service.go b/vendor/github.com/aws/aws-sdk-go/service/directconnect/service.go new file mode 100644 index 000000000..d6fe21435 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/directconnect/service.go @@ -0,0 +1,99 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package directconnect + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" +) + +// AWS Direct Connect links your internal network to an AWS Direct Connect location +// over a standard 1 gigabit or 10 gigabit Ethernet fiber-optic cable. One end +// of the cable is connected to your router, the other to an AWS Direct Connect +// router. With this connection in place, you can create virtual interfaces +// directly to the AWS cloud (for example, to Amazon Elastic Compute Cloud (Amazon +// EC2) and Amazon Simple Storage Service (Amazon S3)) and to Amazon Virtual +// Private Cloud (Amazon VPC), bypassing Internet service providers in your +// network path. An AWS Direct Connect location provides access to AWS in the +// region it is associated with, as well as access to other US regions. For +// example, you can provision a single connection to any AWS Direct Connect +// location in the US and use it to access public AWS services in all US Regions +// and AWS GovCloud (US). +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type DirectConnect struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "directconnect" + +// New creates a new instance of the DirectConnect client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a DirectConnect client from just a session. +// svc := directconnect.New(mySession) +// +// // Create a DirectConnect client with additional configuration +// svc := directconnect.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *DirectConnect { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *DirectConnect { + svc := &DirectConnect{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2012-10-25", + JSONVersion: "1.1", + TargetPrefix: "OvertureService", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(jsonrpc.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a DirectConnect operation and runs any +// custom request initialization. +func (c *DirectConnect) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/directoryservice/api.go b/vendor/github.com/aws/aws-sdk-go/service/directoryservice/api.go new file mode 100644 index 000000000..0cd145ee6 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/directoryservice/api.go @@ -0,0 +1,4253 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package directoryservice provides a client for AWS Directory Service. +package directoryservice + +import ( + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" +) + +const opAddTagsToResource = "AddTagsToResource" + +// AddTagsToResourceRequest generates a "aws/request.Request" representing the +// client's request for the AddTagsToResource operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the AddTagsToResource method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the AddTagsToResourceRequest method. +// req, resp := client.AddTagsToResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *DirectoryService) AddTagsToResourceRequest(input *AddTagsToResourceInput) (req *request.Request, output *AddTagsToResourceOutput) { + op := &request.Operation{ + Name: opAddTagsToResource, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AddTagsToResourceInput{} + } + + req = c.newRequest(op, input, output) + output = &AddTagsToResourceOutput{} + req.Data = output + return +} + +// Adds or overwrites one or more tags for the specified Amazon Directory Services +// directory. Each directory can have a maximum of 10 tags. Each tag consists +// of a key and optional value. Tag keys must be unique per resource. +func (c *DirectoryService) AddTagsToResource(input *AddTagsToResourceInput) (*AddTagsToResourceOutput, error) { + req, out := c.AddTagsToResourceRequest(input) + err := req.Send() + return out, err +} + +const opConnectDirectory = "ConnectDirectory" + +// ConnectDirectoryRequest generates a "aws/request.Request" representing the +// client's request for the ConnectDirectory operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ConnectDirectory method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ConnectDirectoryRequest method. +// req, resp := client.ConnectDirectoryRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *DirectoryService) ConnectDirectoryRequest(input *ConnectDirectoryInput) (req *request.Request, output *ConnectDirectoryOutput) { + op := &request.Operation{ + Name: opConnectDirectory, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ConnectDirectoryInput{} + } + + req = c.newRequest(op, input, output) + output = &ConnectDirectoryOutput{} + req.Data = output + return +} + +// Creates an AD Connector to connect to an on-premises directory. +func (c *DirectoryService) ConnectDirectory(input *ConnectDirectoryInput) (*ConnectDirectoryOutput, error) { + req, out := c.ConnectDirectoryRequest(input) + err := req.Send() + return out, err +} + +const opCreateAlias = "CreateAlias" + +// CreateAliasRequest generates a "aws/request.Request" representing the +// client's request for the CreateAlias operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateAlias method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateAliasRequest method. +// req, resp := client.CreateAliasRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *DirectoryService) CreateAliasRequest(input *CreateAliasInput) (req *request.Request, output *CreateAliasOutput) { + op := &request.Operation{ + Name: opCreateAlias, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateAliasInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateAliasOutput{} + req.Data = output + return +} + +// Creates an alias for a directory and assigns the alias to the directory. +// The alias is used to construct the access URL for the directory, such as +// http://.awsapps.com. +// +// After an alias has been created, it cannot be deleted or reused, so this +// operation should only be used when absolutely necessary. +func (c *DirectoryService) CreateAlias(input *CreateAliasInput) (*CreateAliasOutput, error) { + req, out := c.CreateAliasRequest(input) + err := req.Send() + return out, err +} + +const opCreateComputer = "CreateComputer" + +// CreateComputerRequest generates a "aws/request.Request" representing the +// client's request for the CreateComputer operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateComputer method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateComputerRequest method. +// req, resp := client.CreateComputerRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *DirectoryService) CreateComputerRequest(input *CreateComputerInput) (req *request.Request, output *CreateComputerOutput) { + op := &request.Operation{ + Name: opCreateComputer, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateComputerInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateComputerOutput{} + req.Data = output + return +} + +// Creates a computer account in the specified directory, and joins the computer +// to the directory. +func (c *DirectoryService) CreateComputer(input *CreateComputerInput) (*CreateComputerOutput, error) { + req, out := c.CreateComputerRequest(input) + err := req.Send() + return out, err +} + +const opCreateConditionalForwarder = "CreateConditionalForwarder" + +// CreateConditionalForwarderRequest generates a "aws/request.Request" representing the +// client's request for the CreateConditionalForwarder operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateConditionalForwarder method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateConditionalForwarderRequest method. +// req, resp := client.CreateConditionalForwarderRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *DirectoryService) CreateConditionalForwarderRequest(input *CreateConditionalForwarderInput) (req *request.Request, output *CreateConditionalForwarderOutput) { + op := &request.Operation{ + Name: opCreateConditionalForwarder, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateConditionalForwarderInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateConditionalForwarderOutput{} + req.Data = output + return +} + +// Creates a conditional forwarder associated with your AWS directory. Conditional +// forwarders are required in order to set up a trust relationship with another +// domain. The conditional forwarder points to the trusted domain. +func (c *DirectoryService) CreateConditionalForwarder(input *CreateConditionalForwarderInput) (*CreateConditionalForwarderOutput, error) { + req, out := c.CreateConditionalForwarderRequest(input) + err := req.Send() + return out, err +} + +const opCreateDirectory = "CreateDirectory" + +// CreateDirectoryRequest generates a "aws/request.Request" representing the +// client's request for the CreateDirectory operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateDirectory method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateDirectoryRequest method. +// req, resp := client.CreateDirectoryRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *DirectoryService) CreateDirectoryRequest(input *CreateDirectoryInput) (req *request.Request, output *CreateDirectoryOutput) { + op := &request.Operation{ + Name: opCreateDirectory, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateDirectoryInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateDirectoryOutput{} + req.Data = output + return +} + +// Creates a Simple AD directory. +func (c *DirectoryService) CreateDirectory(input *CreateDirectoryInput) (*CreateDirectoryOutput, error) { + req, out := c.CreateDirectoryRequest(input) + err := req.Send() + return out, err +} + +const opCreateMicrosoftAD = "CreateMicrosoftAD" + +// CreateMicrosoftADRequest generates a "aws/request.Request" representing the +// client's request for the CreateMicrosoftAD operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateMicrosoftAD method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateMicrosoftADRequest method. +// req, resp := client.CreateMicrosoftADRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *DirectoryService) CreateMicrosoftADRequest(input *CreateMicrosoftADInput) (req *request.Request, output *CreateMicrosoftADOutput) { + op := &request.Operation{ + Name: opCreateMicrosoftAD, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateMicrosoftADInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateMicrosoftADOutput{} + req.Data = output + return +} + +// Creates a Microsoft AD in the AWS cloud. +func (c *DirectoryService) CreateMicrosoftAD(input *CreateMicrosoftADInput) (*CreateMicrosoftADOutput, error) { + req, out := c.CreateMicrosoftADRequest(input) + err := req.Send() + return out, err +} + +const opCreateSnapshot = "CreateSnapshot" + +// CreateSnapshotRequest generates a "aws/request.Request" representing the +// client's request for the CreateSnapshot operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateSnapshot method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateSnapshotRequest method. +// req, resp := client.CreateSnapshotRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *DirectoryService) CreateSnapshotRequest(input *CreateSnapshotInput) (req *request.Request, output *CreateSnapshotOutput) { + op := &request.Operation{ + Name: opCreateSnapshot, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateSnapshotInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateSnapshotOutput{} + req.Data = output + return +} + +// Creates a snapshot of a Simple AD or Microsoft AD directory in the AWS cloud. +// +// You cannot take snapshots of AD Connector directories. +func (c *DirectoryService) CreateSnapshot(input *CreateSnapshotInput) (*CreateSnapshotOutput, error) { + req, out := c.CreateSnapshotRequest(input) + err := req.Send() + return out, err +} + +const opCreateTrust = "CreateTrust" + +// CreateTrustRequest generates a "aws/request.Request" representing the +// client's request for the CreateTrust operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateTrust method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateTrustRequest method. +// req, resp := client.CreateTrustRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *DirectoryService) CreateTrustRequest(input *CreateTrustInput) (req *request.Request, output *CreateTrustOutput) { + op := &request.Operation{ + Name: opCreateTrust, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateTrustInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateTrustOutput{} + req.Data = output + return +} + +// AWS Directory Service for Microsoft Active Directory allows you to configure +// trust relationships. For example, you can establish a trust between your +// Microsoft AD in the AWS cloud, and your existing on-premises Microsoft Active +// Directory. This would allow you to provide users and groups access to resources +// in either domain, with a single set of credentials. +// +// This action initiates the creation of the AWS side of a trust relationship +// between a Microsoft AD in the AWS cloud and an external domain. +func (c *DirectoryService) CreateTrust(input *CreateTrustInput) (*CreateTrustOutput, error) { + req, out := c.CreateTrustRequest(input) + err := req.Send() + return out, err +} + +const opDeleteConditionalForwarder = "DeleteConditionalForwarder" + +// DeleteConditionalForwarderRequest generates a "aws/request.Request" representing the +// client's request for the DeleteConditionalForwarder operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteConditionalForwarder method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteConditionalForwarderRequest method. +// req, resp := client.DeleteConditionalForwarderRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *DirectoryService) DeleteConditionalForwarderRequest(input *DeleteConditionalForwarderInput) (req *request.Request, output *DeleteConditionalForwarderOutput) { + op := &request.Operation{ + Name: opDeleteConditionalForwarder, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteConditionalForwarderInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteConditionalForwarderOutput{} + req.Data = output + return +} + +// Deletes a conditional forwarder that has been set up for your AWS directory. +func (c *DirectoryService) DeleteConditionalForwarder(input *DeleteConditionalForwarderInput) (*DeleteConditionalForwarderOutput, error) { + req, out := c.DeleteConditionalForwarderRequest(input) + err := req.Send() + return out, err +} + +const opDeleteDirectory = "DeleteDirectory" + +// DeleteDirectoryRequest generates a "aws/request.Request" representing the +// client's request for the DeleteDirectory operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteDirectory method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteDirectoryRequest method. +// req, resp := client.DeleteDirectoryRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *DirectoryService) DeleteDirectoryRequest(input *DeleteDirectoryInput) (req *request.Request, output *DeleteDirectoryOutput) { + op := &request.Operation{ + Name: opDeleteDirectory, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteDirectoryInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteDirectoryOutput{} + req.Data = output + return +} + +// Deletes an AWS Directory Service directory. +func (c *DirectoryService) DeleteDirectory(input *DeleteDirectoryInput) (*DeleteDirectoryOutput, error) { + req, out := c.DeleteDirectoryRequest(input) + err := req.Send() + return out, err +} + +const opDeleteSnapshot = "DeleteSnapshot" + +// DeleteSnapshotRequest generates a "aws/request.Request" representing the +// client's request for the DeleteSnapshot operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteSnapshot method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteSnapshotRequest method. +// req, resp := client.DeleteSnapshotRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *DirectoryService) DeleteSnapshotRequest(input *DeleteSnapshotInput) (req *request.Request, output *DeleteSnapshotOutput) { + op := &request.Operation{ + Name: opDeleteSnapshot, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteSnapshotInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteSnapshotOutput{} + req.Data = output + return +} + +// Deletes a directory snapshot. +func (c *DirectoryService) DeleteSnapshot(input *DeleteSnapshotInput) (*DeleteSnapshotOutput, error) { + req, out := c.DeleteSnapshotRequest(input) + err := req.Send() + return out, err +} + +const opDeleteTrust = "DeleteTrust" + +// DeleteTrustRequest generates a "aws/request.Request" representing the +// client's request for the DeleteTrust operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteTrust method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteTrustRequest method. +// req, resp := client.DeleteTrustRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *DirectoryService) DeleteTrustRequest(input *DeleteTrustInput) (req *request.Request, output *DeleteTrustOutput) { + op := &request.Operation{ + Name: opDeleteTrust, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteTrustInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteTrustOutput{} + req.Data = output + return +} + +// Deletes an existing trust relationship between your Microsoft AD in the AWS +// cloud and an external domain. +func (c *DirectoryService) DeleteTrust(input *DeleteTrustInput) (*DeleteTrustOutput, error) { + req, out := c.DeleteTrustRequest(input) + err := req.Send() + return out, err +} + +const opDeregisterEventTopic = "DeregisterEventTopic" + +// DeregisterEventTopicRequest generates a "aws/request.Request" representing the +// client's request for the DeregisterEventTopic operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeregisterEventTopic method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeregisterEventTopicRequest method. +// req, resp := client.DeregisterEventTopicRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *DirectoryService) DeregisterEventTopicRequest(input *DeregisterEventTopicInput) (req *request.Request, output *DeregisterEventTopicOutput) { + op := &request.Operation{ + Name: opDeregisterEventTopic, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeregisterEventTopicInput{} + } + + req = c.newRequest(op, input, output) + output = &DeregisterEventTopicOutput{} + req.Data = output + return +} + +// Removes the specified directory as a publisher to the specified SNS topic. +func (c *DirectoryService) DeregisterEventTopic(input *DeregisterEventTopicInput) (*DeregisterEventTopicOutput, error) { + req, out := c.DeregisterEventTopicRequest(input) + err := req.Send() + return out, err +} + +const opDescribeConditionalForwarders = "DescribeConditionalForwarders" + +// DescribeConditionalForwardersRequest generates a "aws/request.Request" representing the +// client's request for the DescribeConditionalForwarders operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeConditionalForwarders method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeConditionalForwardersRequest method. +// req, resp := client.DescribeConditionalForwardersRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *DirectoryService) DescribeConditionalForwardersRequest(input *DescribeConditionalForwardersInput) (req *request.Request, output *DescribeConditionalForwardersOutput) { + op := &request.Operation{ + Name: opDescribeConditionalForwarders, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeConditionalForwardersInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeConditionalForwardersOutput{} + req.Data = output + return +} + +// Obtains information about the conditional forwarders for this account. +// +// If no input parameters are provided for RemoteDomainNames, this request +// describes all conditional forwarders for the specified directory ID. +func (c *DirectoryService) DescribeConditionalForwarders(input *DescribeConditionalForwardersInput) (*DescribeConditionalForwardersOutput, error) { + req, out := c.DescribeConditionalForwardersRequest(input) + err := req.Send() + return out, err +} + +const opDescribeDirectories = "DescribeDirectories" + +// DescribeDirectoriesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeDirectories operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeDirectories method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeDirectoriesRequest method. +// req, resp := client.DescribeDirectoriesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *DirectoryService) DescribeDirectoriesRequest(input *DescribeDirectoriesInput) (req *request.Request, output *DescribeDirectoriesOutput) { + op := &request.Operation{ + Name: opDescribeDirectories, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeDirectoriesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeDirectoriesOutput{} + req.Data = output + return +} + +// Obtains information about the directories that belong to this account. +// +// You can retrieve information about specific directories by passing the directory +// identifiers in the DirectoryIds parameter. Otherwise, all directories that +// belong to the current account are returned. +// +// This operation supports pagination with the use of the NextToken request +// and response parameters. If more results are available, the DescribeDirectoriesResult.NextToken +// member contains a token that you pass in the next call to DescribeDirectories +// to retrieve the next set of items. +// +// You can also specify a maximum number of return results with the Limit parameter. +func (c *DirectoryService) DescribeDirectories(input *DescribeDirectoriesInput) (*DescribeDirectoriesOutput, error) { + req, out := c.DescribeDirectoriesRequest(input) + err := req.Send() + return out, err +} + +const opDescribeEventTopics = "DescribeEventTopics" + +// DescribeEventTopicsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeEventTopics operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeEventTopics method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeEventTopicsRequest method. +// req, resp := client.DescribeEventTopicsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *DirectoryService) DescribeEventTopicsRequest(input *DescribeEventTopicsInput) (req *request.Request, output *DescribeEventTopicsOutput) { + op := &request.Operation{ + Name: opDescribeEventTopics, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeEventTopicsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeEventTopicsOutput{} + req.Data = output + return +} + +// Obtains information about which SNS topics receive status messages from the +// specified directory. +// +// If no input parameters are provided, such as DirectoryId or TopicName, this +// request describes all of the associations in the account. +func (c *DirectoryService) DescribeEventTopics(input *DescribeEventTopicsInput) (*DescribeEventTopicsOutput, error) { + req, out := c.DescribeEventTopicsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeSnapshots = "DescribeSnapshots" + +// DescribeSnapshotsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeSnapshots operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeSnapshots method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeSnapshotsRequest method. +// req, resp := client.DescribeSnapshotsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *DirectoryService) DescribeSnapshotsRequest(input *DescribeSnapshotsInput) (req *request.Request, output *DescribeSnapshotsOutput) { + op := &request.Operation{ + Name: opDescribeSnapshots, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeSnapshotsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeSnapshotsOutput{} + req.Data = output + return +} + +// Obtains information about the directory snapshots that belong to this account. +// +// This operation supports pagination with the use of the NextToken request +// and response parameters. If more results are available, the DescribeSnapshots.NextToken +// member contains a token that you pass in the next call to DescribeSnapshots +// to retrieve the next set of items. +// +// You can also specify a maximum number of return results with the Limit parameter. +func (c *DirectoryService) DescribeSnapshots(input *DescribeSnapshotsInput) (*DescribeSnapshotsOutput, error) { + req, out := c.DescribeSnapshotsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeTrusts = "DescribeTrusts" + +// DescribeTrustsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeTrusts operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeTrusts method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeTrustsRequest method. +// req, resp := client.DescribeTrustsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *DirectoryService) DescribeTrustsRequest(input *DescribeTrustsInput) (req *request.Request, output *DescribeTrustsOutput) { + op := &request.Operation{ + Name: opDescribeTrusts, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeTrustsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeTrustsOutput{} + req.Data = output + return +} + +// Obtains information about the trust relationships for this account. +// +// If no input parameters are provided, such as DirectoryId or TrustIds, this +// request describes all the trust relationships belonging to the account. +func (c *DirectoryService) DescribeTrusts(input *DescribeTrustsInput) (*DescribeTrustsOutput, error) { + req, out := c.DescribeTrustsRequest(input) + err := req.Send() + return out, err +} + +const opDisableRadius = "DisableRadius" + +// DisableRadiusRequest generates a "aws/request.Request" representing the +// client's request for the DisableRadius operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DisableRadius method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DisableRadiusRequest method. +// req, resp := client.DisableRadiusRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *DirectoryService) DisableRadiusRequest(input *DisableRadiusInput) (req *request.Request, output *DisableRadiusOutput) { + op := &request.Operation{ + Name: opDisableRadius, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DisableRadiusInput{} + } + + req = c.newRequest(op, input, output) + output = &DisableRadiusOutput{} + req.Data = output + return +} + +// Disables multi-factor authentication (MFA) with the Remote Authentication +// Dial In User Service (RADIUS) server for an AD Connector directory. +func (c *DirectoryService) DisableRadius(input *DisableRadiusInput) (*DisableRadiusOutput, error) { + req, out := c.DisableRadiusRequest(input) + err := req.Send() + return out, err +} + +const opDisableSso = "DisableSso" + +// DisableSsoRequest generates a "aws/request.Request" representing the +// client's request for the DisableSso operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DisableSso method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DisableSsoRequest method. +// req, resp := client.DisableSsoRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *DirectoryService) DisableSsoRequest(input *DisableSsoInput) (req *request.Request, output *DisableSsoOutput) { + op := &request.Operation{ + Name: opDisableSso, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DisableSsoInput{} + } + + req = c.newRequest(op, input, output) + output = &DisableSsoOutput{} + req.Data = output + return +} + +// Disables single-sign on for a directory. +func (c *DirectoryService) DisableSso(input *DisableSsoInput) (*DisableSsoOutput, error) { + req, out := c.DisableSsoRequest(input) + err := req.Send() + return out, err +} + +const opEnableRadius = "EnableRadius" + +// EnableRadiusRequest generates a "aws/request.Request" representing the +// client's request for the EnableRadius operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the EnableRadius method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the EnableRadiusRequest method. +// req, resp := client.EnableRadiusRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *DirectoryService) EnableRadiusRequest(input *EnableRadiusInput) (req *request.Request, output *EnableRadiusOutput) { + op := &request.Operation{ + Name: opEnableRadius, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &EnableRadiusInput{} + } + + req = c.newRequest(op, input, output) + output = &EnableRadiusOutput{} + req.Data = output + return +} + +// Enables multi-factor authentication (MFA) with the Remote Authentication +// Dial In User Service (RADIUS) server for an AD Connector directory. +func (c *DirectoryService) EnableRadius(input *EnableRadiusInput) (*EnableRadiusOutput, error) { + req, out := c.EnableRadiusRequest(input) + err := req.Send() + return out, err +} + +const opEnableSso = "EnableSso" + +// EnableSsoRequest generates a "aws/request.Request" representing the +// client's request for the EnableSso operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the EnableSso method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the EnableSsoRequest method. +// req, resp := client.EnableSsoRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *DirectoryService) EnableSsoRequest(input *EnableSsoInput) (req *request.Request, output *EnableSsoOutput) { + op := &request.Operation{ + Name: opEnableSso, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &EnableSsoInput{} + } + + req = c.newRequest(op, input, output) + output = &EnableSsoOutput{} + req.Data = output + return +} + +// Enables single-sign on for a directory. +func (c *DirectoryService) EnableSso(input *EnableSsoInput) (*EnableSsoOutput, error) { + req, out := c.EnableSsoRequest(input) + err := req.Send() + return out, err +} + +const opGetDirectoryLimits = "GetDirectoryLimits" + +// GetDirectoryLimitsRequest generates a "aws/request.Request" representing the +// client's request for the GetDirectoryLimits operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetDirectoryLimits method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetDirectoryLimitsRequest method. +// req, resp := client.GetDirectoryLimitsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *DirectoryService) GetDirectoryLimitsRequest(input *GetDirectoryLimitsInput) (req *request.Request, output *GetDirectoryLimitsOutput) { + op := &request.Operation{ + Name: opGetDirectoryLimits, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetDirectoryLimitsInput{} + } + + req = c.newRequest(op, input, output) + output = &GetDirectoryLimitsOutput{} + req.Data = output + return +} + +// Obtains directory limit information for the current region. +func (c *DirectoryService) GetDirectoryLimits(input *GetDirectoryLimitsInput) (*GetDirectoryLimitsOutput, error) { + req, out := c.GetDirectoryLimitsRequest(input) + err := req.Send() + return out, err +} + +const opGetSnapshotLimits = "GetSnapshotLimits" + +// GetSnapshotLimitsRequest generates a "aws/request.Request" representing the +// client's request for the GetSnapshotLimits operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetSnapshotLimits method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetSnapshotLimitsRequest method. +// req, resp := client.GetSnapshotLimitsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *DirectoryService) GetSnapshotLimitsRequest(input *GetSnapshotLimitsInput) (req *request.Request, output *GetSnapshotLimitsOutput) { + op := &request.Operation{ + Name: opGetSnapshotLimits, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetSnapshotLimitsInput{} + } + + req = c.newRequest(op, input, output) + output = &GetSnapshotLimitsOutput{} + req.Data = output + return +} + +// Obtains the manual snapshot limits for a directory. +func (c *DirectoryService) GetSnapshotLimits(input *GetSnapshotLimitsInput) (*GetSnapshotLimitsOutput, error) { + req, out := c.GetSnapshotLimitsRequest(input) + err := req.Send() + return out, err +} + +const opListTagsForResource = "ListTagsForResource" + +// ListTagsForResourceRequest generates a "aws/request.Request" representing the +// client's request for the ListTagsForResource operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListTagsForResource method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListTagsForResourceRequest method. +// req, resp := client.ListTagsForResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *DirectoryService) ListTagsForResourceRequest(input *ListTagsForResourceInput) (req *request.Request, output *ListTagsForResourceOutput) { + op := &request.Operation{ + Name: opListTagsForResource, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListTagsForResourceInput{} + } + + req = c.newRequest(op, input, output) + output = &ListTagsForResourceOutput{} + req.Data = output + return +} + +// Lists all tags on an Amazon Directory Services directory. +func (c *DirectoryService) ListTagsForResource(input *ListTagsForResourceInput) (*ListTagsForResourceOutput, error) { + req, out := c.ListTagsForResourceRequest(input) + err := req.Send() + return out, err +} + +const opRegisterEventTopic = "RegisterEventTopic" + +// RegisterEventTopicRequest generates a "aws/request.Request" representing the +// client's request for the RegisterEventTopic operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the RegisterEventTopic method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the RegisterEventTopicRequest method. +// req, resp := client.RegisterEventTopicRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *DirectoryService) RegisterEventTopicRequest(input *RegisterEventTopicInput) (req *request.Request, output *RegisterEventTopicOutput) { + op := &request.Operation{ + Name: opRegisterEventTopic, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RegisterEventTopicInput{} + } + + req = c.newRequest(op, input, output) + output = &RegisterEventTopicOutput{} + req.Data = output + return +} + +// Associates a directory with an SNS topic. This establishes the directory +// as a publisher to the specified SNS topic. You can then receive email or +// text (SMS) messages when the status of your directory changes. You get notified +// if your directory goes from an Active status to an Impaired or Inoperable +// status. You also receive a notification when the directory returns to an +// Active status. +func (c *DirectoryService) RegisterEventTopic(input *RegisterEventTopicInput) (*RegisterEventTopicOutput, error) { + req, out := c.RegisterEventTopicRequest(input) + err := req.Send() + return out, err +} + +const opRemoveTagsFromResource = "RemoveTagsFromResource" + +// RemoveTagsFromResourceRequest generates a "aws/request.Request" representing the +// client's request for the RemoveTagsFromResource operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the RemoveTagsFromResource method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the RemoveTagsFromResourceRequest method. +// req, resp := client.RemoveTagsFromResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *DirectoryService) RemoveTagsFromResourceRequest(input *RemoveTagsFromResourceInput) (req *request.Request, output *RemoveTagsFromResourceOutput) { + op := &request.Operation{ + Name: opRemoveTagsFromResource, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RemoveTagsFromResourceInput{} + } + + req = c.newRequest(op, input, output) + output = &RemoveTagsFromResourceOutput{} + req.Data = output + return +} + +// Removes tags from an Amazon Directory Services directory. +func (c *DirectoryService) RemoveTagsFromResource(input *RemoveTagsFromResourceInput) (*RemoveTagsFromResourceOutput, error) { + req, out := c.RemoveTagsFromResourceRequest(input) + err := req.Send() + return out, err +} + +const opRestoreFromSnapshot = "RestoreFromSnapshot" + +// RestoreFromSnapshotRequest generates a "aws/request.Request" representing the +// client's request for the RestoreFromSnapshot operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the RestoreFromSnapshot method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the RestoreFromSnapshotRequest method. +// req, resp := client.RestoreFromSnapshotRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *DirectoryService) RestoreFromSnapshotRequest(input *RestoreFromSnapshotInput) (req *request.Request, output *RestoreFromSnapshotOutput) { + op := &request.Operation{ + Name: opRestoreFromSnapshot, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RestoreFromSnapshotInput{} + } + + req = c.newRequest(op, input, output) + output = &RestoreFromSnapshotOutput{} + req.Data = output + return +} + +// Restores a directory using an existing directory snapshot. +// +// When you restore a directory from a snapshot, any changes made to the directory +// after the snapshot date are overwritten. +// +// This action returns as soon as the restore operation is initiated. You can +// monitor the progress of the restore operation by calling the DescribeDirectories +// operation with the directory identifier. When the DirectoryDescription.Stage +// value changes to Active, the restore operation is complete. +func (c *DirectoryService) RestoreFromSnapshot(input *RestoreFromSnapshotInput) (*RestoreFromSnapshotOutput, error) { + req, out := c.RestoreFromSnapshotRequest(input) + err := req.Send() + return out, err +} + +const opUpdateConditionalForwarder = "UpdateConditionalForwarder" + +// UpdateConditionalForwarderRequest generates a "aws/request.Request" representing the +// client's request for the UpdateConditionalForwarder operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateConditionalForwarder method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateConditionalForwarderRequest method. +// req, resp := client.UpdateConditionalForwarderRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *DirectoryService) UpdateConditionalForwarderRequest(input *UpdateConditionalForwarderInput) (req *request.Request, output *UpdateConditionalForwarderOutput) { + op := &request.Operation{ + Name: opUpdateConditionalForwarder, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateConditionalForwarderInput{} + } + + req = c.newRequest(op, input, output) + output = &UpdateConditionalForwarderOutput{} + req.Data = output + return +} + +// Updates a conditional forwarder that has been set up for your AWS directory. +func (c *DirectoryService) UpdateConditionalForwarder(input *UpdateConditionalForwarderInput) (*UpdateConditionalForwarderOutput, error) { + req, out := c.UpdateConditionalForwarderRequest(input) + err := req.Send() + return out, err +} + +const opUpdateRadius = "UpdateRadius" + +// UpdateRadiusRequest generates a "aws/request.Request" representing the +// client's request for the UpdateRadius operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateRadius method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateRadiusRequest method. +// req, resp := client.UpdateRadiusRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *DirectoryService) UpdateRadiusRequest(input *UpdateRadiusInput) (req *request.Request, output *UpdateRadiusOutput) { + op := &request.Operation{ + Name: opUpdateRadius, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateRadiusInput{} + } + + req = c.newRequest(op, input, output) + output = &UpdateRadiusOutput{} + req.Data = output + return +} + +// Updates the Remote Authentication Dial In User Service (RADIUS) server information +// for an AD Connector directory. +func (c *DirectoryService) UpdateRadius(input *UpdateRadiusInput) (*UpdateRadiusOutput, error) { + req, out := c.UpdateRadiusRequest(input) + err := req.Send() + return out, err +} + +const opVerifyTrust = "VerifyTrust" + +// VerifyTrustRequest generates a "aws/request.Request" representing the +// client's request for the VerifyTrust operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the VerifyTrust method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the VerifyTrustRequest method. +// req, resp := client.VerifyTrustRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *DirectoryService) VerifyTrustRequest(input *VerifyTrustInput) (req *request.Request, output *VerifyTrustOutput) { + op := &request.Operation{ + Name: opVerifyTrust, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &VerifyTrustInput{} + } + + req = c.newRequest(op, input, output) + output = &VerifyTrustOutput{} + req.Data = output + return +} + +// AWS Directory Service for Microsoft Active Directory allows you to configure +// and verify trust relationships. +// +// This action verifies a trust relationship between your Microsoft AD in the +// AWS cloud and an external domain. +func (c *DirectoryService) VerifyTrust(input *VerifyTrustInput) (*VerifyTrustOutput, error) { + req, out := c.VerifyTrustRequest(input) + err := req.Send() + return out, err +} + +type AddTagsToResourceInput struct { + _ struct{} `type:"structure"` + + // The ID of the directory to which to add the tag. + ResourceId *string `type:"string" required:"true"` + + // The tags to be assigned to the Amazon Directory Services directory. + Tags []*Tag `type:"list" required:"true"` +} + +// String returns the string representation +func (s AddTagsToResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddTagsToResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AddTagsToResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AddTagsToResourceInput"} + if s.ResourceId == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceId")) + } + if s.Tags == nil { + invalidParams.Add(request.NewErrParamRequired("Tags")) + } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type AddTagsToResourceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s AddTagsToResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddTagsToResourceOutput) GoString() string { + return s.String() +} + +// Represents a named directory attribute. +type Attribute struct { + _ struct{} `type:"structure"` + + // The name of the attribute. + Name *string `min:"1" type:"string"` + + // The value of the attribute. + Value *string `type:"string"` +} + +// String returns the string representation +func (s Attribute) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Attribute) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Attribute) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Attribute"} + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains information about a computer account in a directory. +type Computer struct { + _ struct{} `type:"structure"` + + // An array of Attribute objects containing the LDAP attributes that belong + // to the computer account. + ComputerAttributes []*Attribute `type:"list"` + + // The identifier of the computer. + ComputerId *string `min:"1" type:"string"` + + // The computer name. + ComputerName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s Computer) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Computer) GoString() string { + return s.String() +} + +// Points to a remote domain with which you are setting up a trust relationship. +// Conditional forwarders are required in order to set up a trust relationship +// with another domain. +type ConditionalForwarder struct { + _ struct{} `type:"structure"` + + // The IP addresses of the remote DNS server associated with RemoteDomainName. + // This is the IP address of the DNS server that your conditional forwarder + // points to. + DnsIpAddrs []*string `type:"list"` + + // The fully qualified domain name (FQDN) of the remote domains pointed to by + // the conditional forwarder. + RemoteDomainName *string `type:"string"` + + // The replication scope of the conditional forwarder. The only allowed value + // is Domain, which will replicate the conditional forwarder to all of the domain + // controllers for your AWS directory. + ReplicationScope *string `type:"string" enum:"ReplicationScope"` +} + +// String returns the string representation +func (s ConditionalForwarder) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ConditionalForwarder) GoString() string { + return s.String() +} + +// Contains the inputs for the ConnectDirectory operation. +type ConnectDirectoryInput struct { + _ struct{} `type:"structure"` + + // A DirectoryConnectSettings object that contains additional information for + // the operation. + ConnectSettings *DirectoryConnectSettings `type:"structure" required:"true"` + + // A textual description for the directory. + Description *string `type:"string"` + + // The fully-qualified name of the on-premises directory, such as corp.example.com. + Name *string `type:"string" required:"true"` + + // The password for the on-premises user account. + Password *string `min:"1" type:"string" required:"true"` + + // The NetBIOS name of the on-premises directory, such as CORP. + ShortName *string `type:"string"` + + // The size of the directory. + Size *string `type:"string" required:"true" enum:"DirectorySize"` +} + +// String returns the string representation +func (s ConnectDirectoryInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ConnectDirectoryInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ConnectDirectoryInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ConnectDirectoryInput"} + if s.ConnectSettings == nil { + invalidParams.Add(request.NewErrParamRequired("ConnectSettings")) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Password == nil { + invalidParams.Add(request.NewErrParamRequired("Password")) + } + if s.Password != nil && len(*s.Password) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Password", 1)) + } + if s.Size == nil { + invalidParams.Add(request.NewErrParamRequired("Size")) + } + if s.ConnectSettings != nil { + if err := s.ConnectSettings.Validate(); err != nil { + invalidParams.AddNested("ConnectSettings", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the results of the ConnectDirectory operation. +type ConnectDirectoryOutput struct { + _ struct{} `type:"structure"` + + // The identifier of the new directory. + DirectoryId *string `type:"string"` +} + +// String returns the string representation +func (s ConnectDirectoryOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ConnectDirectoryOutput) GoString() string { + return s.String() +} + +// Contains the inputs for the CreateAlias operation. +type CreateAliasInput struct { + _ struct{} `type:"structure"` + + // The requested alias. + // + // The alias must be unique amongst all aliases in AWS. This operation throws + // an EntityAlreadyExistsException error if the alias already exists. + Alias *string `min:"1" type:"string" required:"true"` + + // The identifier of the directory for which to create the alias. + DirectoryId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateAliasInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateAliasInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateAliasInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateAliasInput"} + if s.Alias == nil { + invalidParams.Add(request.NewErrParamRequired("Alias")) + } + if s.Alias != nil && len(*s.Alias) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Alias", 1)) + } + if s.DirectoryId == nil { + invalidParams.Add(request.NewErrParamRequired("DirectoryId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the results of the CreateAlias operation. +type CreateAliasOutput struct { + _ struct{} `type:"structure"` + + // The alias for the directory. + Alias *string `min:"1" type:"string"` + + // The identifier of the directory. + DirectoryId *string `type:"string"` +} + +// String returns the string representation +func (s CreateAliasOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateAliasOutput) GoString() string { + return s.String() +} + +// Contains the inputs for the CreateComputer operation. +type CreateComputerInput struct { + _ struct{} `type:"structure"` + + // An array of Attribute objects that contain any LDAP attributes to apply to + // the computer account. + ComputerAttributes []*Attribute `type:"list"` + + // The name of the computer account. + ComputerName *string `min:"1" type:"string" required:"true"` + + // The identifier of the directory in which to create the computer account. + DirectoryId *string `type:"string" required:"true"` + + // The fully-qualified distinguished name of the organizational unit to place + // the computer account in. + OrganizationalUnitDistinguishedName *string `min:"1" type:"string"` + + // A one-time password that is used to join the computer to the directory. You + // should generate a random, strong password to use for this parameter. + Password *string `min:"8" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateComputerInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateComputerInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateComputerInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateComputerInput"} + if s.ComputerName == nil { + invalidParams.Add(request.NewErrParamRequired("ComputerName")) + } + if s.ComputerName != nil && len(*s.ComputerName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ComputerName", 1)) + } + if s.DirectoryId == nil { + invalidParams.Add(request.NewErrParamRequired("DirectoryId")) + } + if s.OrganizationalUnitDistinguishedName != nil && len(*s.OrganizationalUnitDistinguishedName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("OrganizationalUnitDistinguishedName", 1)) + } + if s.Password == nil { + invalidParams.Add(request.NewErrParamRequired("Password")) + } + if s.Password != nil && len(*s.Password) < 8 { + invalidParams.Add(request.NewErrParamMinLen("Password", 8)) + } + if s.ComputerAttributes != nil { + for i, v := range s.ComputerAttributes { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "ComputerAttributes", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the results for the CreateComputer operation. +type CreateComputerOutput struct { + _ struct{} `type:"structure"` + + // A Computer object that represents the computer account. + Computer *Computer `type:"structure"` +} + +// String returns the string representation +func (s CreateComputerOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateComputerOutput) GoString() string { + return s.String() +} + +// Initiates the creation of a conditional forwarder for your AWS Directory +// Service for Microsoft Active Directory. Conditional forwarders are required +// in order to set up a trust relationship with another domain. +type CreateConditionalForwarderInput struct { + _ struct{} `type:"structure"` + + // The directory ID of the AWS directory for which you are creating the conditional + // forwarder. + DirectoryId *string `type:"string" required:"true"` + + // The IP addresses of the remote DNS server associated with RemoteDomainName. + DnsIpAddrs []*string `type:"list" required:"true"` + + // The fully qualified domain name (FQDN) of the remote domain with which you + // will set up a trust relationship. + RemoteDomainName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateConditionalForwarderInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateConditionalForwarderInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateConditionalForwarderInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateConditionalForwarderInput"} + if s.DirectoryId == nil { + invalidParams.Add(request.NewErrParamRequired("DirectoryId")) + } + if s.DnsIpAddrs == nil { + invalidParams.Add(request.NewErrParamRequired("DnsIpAddrs")) + } + if s.RemoteDomainName == nil { + invalidParams.Add(request.NewErrParamRequired("RemoteDomainName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The result of a CreateConditinalForwarder request. +type CreateConditionalForwarderOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s CreateConditionalForwarderOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateConditionalForwarderOutput) GoString() string { + return s.String() +} + +// Contains the inputs for the CreateDirectory operation. +type CreateDirectoryInput struct { + _ struct{} `type:"structure"` + + // A textual description for the directory. + Description *string `type:"string"` + + // The fully qualified name for the directory, such as corp.example.com. + Name *string `type:"string" required:"true"` + + // The password for the directory administrator. The directory creation process + // creates a directory administrator account with the username Administrator + // and this password. + Password *string `type:"string" required:"true"` + + // The short name of the directory, such as CORP. + ShortName *string `type:"string"` + + // The size of the directory. + Size *string `type:"string" required:"true" enum:"DirectorySize"` + + // A DirectoryVpcSettings object that contains additional information for the + // operation. + VpcSettings *DirectoryVpcSettings `type:"structure"` +} + +// String returns the string representation +func (s CreateDirectoryInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDirectoryInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateDirectoryInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateDirectoryInput"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Password == nil { + invalidParams.Add(request.NewErrParamRequired("Password")) + } + if s.Size == nil { + invalidParams.Add(request.NewErrParamRequired("Size")) + } + if s.VpcSettings != nil { + if err := s.VpcSettings.Validate(); err != nil { + invalidParams.AddNested("VpcSettings", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the results of the CreateDirectory operation. +type CreateDirectoryOutput struct { + _ struct{} `type:"structure"` + + // The identifier of the directory that was created. + DirectoryId *string `type:"string"` +} + +// String returns the string representation +func (s CreateDirectoryOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDirectoryOutput) GoString() string { + return s.String() +} + +// Creates a Microsoft AD in the AWS cloud. +type CreateMicrosoftADInput struct { + _ struct{} `type:"structure"` + + // A textual description for the directory. This label will appear on the AWS + // console Directory Details page after the directory is created. + Description *string `type:"string"` + + // The fully qualified domain name for the directory, such as corp.example.com. + // This name will resolve inside your VPC only. It does not need to be publicly + // resolvable. + Name *string `type:"string" required:"true"` + + // The password for the default administrative user named Admin. + Password *string `type:"string" required:"true"` + + // The NetBIOS name for your domain. A short identifier for your domain, such + // as CORP. If you don't specify a NetBIOS name, it will default to the first + // part of your directory DNS. For example, CORP for the directory DNS corp.example.com. + ShortName *string `type:"string"` + + // Contains VPC information for the CreateDirectory or CreateMicrosoftAD operation. + VpcSettings *DirectoryVpcSettings `type:"structure" required:"true"` +} + +// String returns the string representation +func (s CreateMicrosoftADInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateMicrosoftADInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateMicrosoftADInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateMicrosoftADInput"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Password == nil { + invalidParams.Add(request.NewErrParamRequired("Password")) + } + if s.VpcSettings == nil { + invalidParams.Add(request.NewErrParamRequired("VpcSettings")) + } + if s.VpcSettings != nil { + if err := s.VpcSettings.Validate(); err != nil { + invalidParams.AddNested("VpcSettings", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Result of a CreateMicrosoftAD request. +type CreateMicrosoftADOutput struct { + _ struct{} `type:"structure"` + + // The identifier of the directory that was created. + DirectoryId *string `type:"string"` +} + +// String returns the string representation +func (s CreateMicrosoftADOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateMicrosoftADOutput) GoString() string { + return s.String() +} + +// Contains the inputs for the CreateSnapshot operation. +type CreateSnapshotInput struct { + _ struct{} `type:"structure"` + + // The identifier of the directory of which to take a snapshot. + DirectoryId *string `type:"string" required:"true"` + + // The descriptive name to apply to the snapshot. + Name *string `type:"string"` +} + +// String returns the string representation +func (s CreateSnapshotInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateSnapshotInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateSnapshotInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateSnapshotInput"} + if s.DirectoryId == nil { + invalidParams.Add(request.NewErrParamRequired("DirectoryId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the results of the CreateSnapshot operation. +type CreateSnapshotOutput struct { + _ struct{} `type:"structure"` + + // The identifier of the snapshot that was created. + SnapshotId *string `type:"string"` +} + +// String returns the string representation +func (s CreateSnapshotOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateSnapshotOutput) GoString() string { + return s.String() +} + +// AWS Directory Service for Microsoft Active Directory allows you to configure +// trust relationships. For example, you can establish a trust between your +// Microsoft AD in the AWS cloud, and your existing on-premises Microsoft Active +// Directory. This would allow you to provide users and groups access to resources +// in either domain, with a single set of credentials. +// +// This action initiates the creation of the AWS side of a trust relationship +// between a Microsoft AD in the AWS cloud and an external domain. +type CreateTrustInput struct { + _ struct{} `type:"structure"` + + // The IP addresses of the remote DNS server associated with RemoteDomainName. + ConditionalForwarderIpAddrs []*string `type:"list"` + + // The Directory ID of the Microsoft AD in the AWS cloud for which to establish + // the trust relationship. + DirectoryId *string `type:"string" required:"true"` + + // The Fully Qualified Domain Name (FQDN) of the external domain for which to + // create the trust relationship. + RemoteDomainName *string `type:"string" required:"true"` + + // The direction of the trust relationship. + TrustDirection *string `type:"string" required:"true" enum:"TrustDirection"` + + // The trust password. The must be the same password that was used when creating + // the trust relationship on the external domain. + TrustPassword *string `min:"1" type:"string" required:"true"` + + // The trust relationship type. + TrustType *string `type:"string" enum:"TrustType"` +} + +// String returns the string representation +func (s CreateTrustInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateTrustInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateTrustInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateTrustInput"} + if s.DirectoryId == nil { + invalidParams.Add(request.NewErrParamRequired("DirectoryId")) + } + if s.RemoteDomainName == nil { + invalidParams.Add(request.NewErrParamRequired("RemoteDomainName")) + } + if s.TrustDirection == nil { + invalidParams.Add(request.NewErrParamRequired("TrustDirection")) + } + if s.TrustPassword == nil { + invalidParams.Add(request.NewErrParamRequired("TrustPassword")) + } + if s.TrustPassword != nil && len(*s.TrustPassword) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TrustPassword", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The result of a CreateTrust request. +type CreateTrustOutput struct { + _ struct{} `type:"structure"` + + // A unique identifier for the trust relationship that was created. + TrustId *string `type:"string"` +} + +// String returns the string representation +func (s CreateTrustOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateTrustOutput) GoString() string { + return s.String() +} + +// Deletes a conditional forwarder. +type DeleteConditionalForwarderInput struct { + _ struct{} `type:"structure"` + + // The directory ID for which you are deleting the conditional forwarder. + DirectoryId *string `type:"string" required:"true"` + + // The fully qualified domain name (FQDN) of the remote domain with which you + // are deleting the conditional forwarder. + RemoteDomainName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteConditionalForwarderInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteConditionalForwarderInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteConditionalForwarderInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteConditionalForwarderInput"} + if s.DirectoryId == nil { + invalidParams.Add(request.NewErrParamRequired("DirectoryId")) + } + if s.RemoteDomainName == nil { + invalidParams.Add(request.NewErrParamRequired("RemoteDomainName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The result of a DeleteConditionalForwarder request. +type DeleteConditionalForwarderOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteConditionalForwarderOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteConditionalForwarderOutput) GoString() string { + return s.String() +} + +// Contains the inputs for the DeleteDirectory operation. +type DeleteDirectoryInput struct { + _ struct{} `type:"structure"` + + // The identifier of the directory to delete. + DirectoryId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteDirectoryInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteDirectoryInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteDirectoryInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteDirectoryInput"} + if s.DirectoryId == nil { + invalidParams.Add(request.NewErrParamRequired("DirectoryId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the results of the DeleteDirectory operation. +type DeleteDirectoryOutput struct { + _ struct{} `type:"structure"` + + // The directory identifier. + DirectoryId *string `type:"string"` +} + +// String returns the string representation +func (s DeleteDirectoryOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteDirectoryOutput) GoString() string { + return s.String() +} + +// Contains the inputs for the DeleteSnapshot operation. +type DeleteSnapshotInput struct { + _ struct{} `type:"structure"` + + // The identifier of the directory snapshot to be deleted. + SnapshotId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteSnapshotInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteSnapshotInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteSnapshotInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteSnapshotInput"} + if s.SnapshotId == nil { + invalidParams.Add(request.NewErrParamRequired("SnapshotId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the results of the DeleteSnapshot operation. +type DeleteSnapshotOutput struct { + _ struct{} `type:"structure"` + + // The identifier of the directory snapshot that was deleted. + SnapshotId *string `type:"string"` +} + +// String returns the string representation +func (s DeleteSnapshotOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteSnapshotOutput) GoString() string { + return s.String() +} + +// Deletes the local side of an existing trust relationship between the Microsoft +// AD in the AWS cloud and the external domain. +type DeleteTrustInput struct { + _ struct{} `type:"structure"` + + // Delete a conditional forwarder as part of a DeleteTrustRequest. + DeleteAssociatedConditionalForwarder *bool `type:"boolean"` + + // The Trust ID of the trust relationship to be deleted. + TrustId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteTrustInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteTrustInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteTrustInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteTrustInput"} + if s.TrustId == nil { + invalidParams.Add(request.NewErrParamRequired("TrustId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The result of a DeleteTrust request. +type DeleteTrustOutput struct { + _ struct{} `type:"structure"` + + // The Trust ID of the trust relationship that was deleted. + TrustId *string `type:"string"` +} + +// String returns the string representation +func (s DeleteTrustOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteTrustOutput) GoString() string { + return s.String() +} + +// Removes the specified directory as a publisher to the specified SNS topic. +type DeregisterEventTopicInput struct { + _ struct{} `type:"structure"` + + // The Directory ID to remove as a publisher. This directory will no longer + // send messages to the specified SNS topic. + DirectoryId *string `type:"string" required:"true"` + + // The name of the SNS topic from which to remove the directory as a publisher. + TopicName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeregisterEventTopicInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeregisterEventTopicInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeregisterEventTopicInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeregisterEventTopicInput"} + if s.DirectoryId == nil { + invalidParams.Add(request.NewErrParamRequired("DirectoryId")) + } + if s.TopicName == nil { + invalidParams.Add(request.NewErrParamRequired("TopicName")) + } + if s.TopicName != nil && len(*s.TopicName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TopicName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The result of a DeregisterEventTopic request. +type DeregisterEventTopicOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeregisterEventTopicOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeregisterEventTopicOutput) GoString() string { + return s.String() +} + +// Describes a conditional forwarder. +type DescribeConditionalForwardersInput struct { + _ struct{} `type:"structure"` + + // The directory ID for which to get the list of associated conditional forwarders. + DirectoryId *string `type:"string" required:"true"` + + // The fully qualified domain names (FQDN) of the remote domains for which to + // get the list of associated conditional forwarders. If this member is null, + // all conditional forwarders are returned. + RemoteDomainNames []*string `type:"list"` +} + +// String returns the string representation +func (s DescribeConditionalForwardersInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeConditionalForwardersInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeConditionalForwardersInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeConditionalForwardersInput"} + if s.DirectoryId == nil { + invalidParams.Add(request.NewErrParamRequired("DirectoryId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The result of a DescribeConditionalForwarder request. +type DescribeConditionalForwardersOutput struct { + _ struct{} `type:"structure"` + + // The list of conditional forwarders that have been created. + ConditionalForwarders []*ConditionalForwarder `type:"list"` +} + +// String returns the string representation +func (s DescribeConditionalForwardersOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeConditionalForwardersOutput) GoString() string { + return s.String() +} + +// Contains the inputs for the DescribeDirectories operation. +type DescribeDirectoriesInput struct { + _ struct{} `type:"structure"` + + // A list of identifiers of the directories for which to obtain the information. + // If this member is null, all directories that belong to the current account + // are returned. + // + // An empty list results in an InvalidParameterException being thrown. + DirectoryIds []*string `type:"list"` + + // The maximum number of items to return. If this value is zero, the maximum + // number of items is specified by the limitations of the operation. + Limit *int64 `type:"integer"` + + // The DescribeDirectoriesResult.NextToken value from a previous call to DescribeDirectories. + // Pass null if this is the first call. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeDirectoriesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDirectoriesInput) GoString() string { + return s.String() +} + +// Contains the results of the DescribeDirectories operation. +type DescribeDirectoriesOutput struct { + _ struct{} `type:"structure"` + + // The list of DirectoryDescription objects that were retrieved. + // + // It is possible that this list contains less than the number of items specified + // in the Limit member of the request. This occurs if there are less than the + // requested number of items left to retrieve, or if the limitations of the + // operation have been exceeded. + DirectoryDescriptions []*DirectoryDescription `type:"list"` + + // If not null, more results are available. Pass this value for the NextToken + // parameter in a subsequent call to DescribeDirectories to retrieve the next + // set of items. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeDirectoriesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDirectoriesOutput) GoString() string { + return s.String() +} + +// Describes event topics. +type DescribeEventTopicsInput struct { + _ struct{} `type:"structure"` + + // The Directory ID for which to get the list of associated SNS topics. If this + // member is null, associations for all Directory IDs are returned. + DirectoryId *string `type:"string"` + + // A list of SNS topic names for which to obtain the information. If this member + // is null, all associations for the specified Directory ID are returned. + // + // An empty list results in an InvalidParameterException being thrown. + TopicNames []*string `type:"list"` +} + +// String returns the string representation +func (s DescribeEventTopicsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeEventTopicsInput) GoString() string { + return s.String() +} + +// The result of a DescribeEventTopic request. +type DescribeEventTopicsOutput struct { + _ struct{} `type:"structure"` + + // A list of SNS topic names that receive status messages from the specified + // Directory ID. + EventTopics []*EventTopic `type:"list"` +} + +// String returns the string representation +func (s DescribeEventTopicsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeEventTopicsOutput) GoString() string { + return s.String() +} + +// Contains the inputs for the DescribeSnapshots operation. +type DescribeSnapshotsInput struct { + _ struct{} `type:"structure"` + + // The identifier of the directory for which to retrieve snapshot information. + DirectoryId *string `type:"string"` + + // The maximum number of objects to return. + Limit *int64 `type:"integer"` + + // The DescribeSnapshotsResult.NextToken value from a previous call to DescribeSnapshots. + // Pass null if this is the first call. + NextToken *string `type:"string"` + + // A list of identifiers of the snapshots to obtain the information for. If + // this member is null or empty, all snapshots are returned using the Limit + // and NextToken members. + SnapshotIds []*string `type:"list"` +} + +// String returns the string representation +func (s DescribeSnapshotsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeSnapshotsInput) GoString() string { + return s.String() +} + +// Contains the results of the DescribeSnapshots operation. +type DescribeSnapshotsOutput struct { + _ struct{} `type:"structure"` + + // If not null, more results are available. Pass this value in the NextToken + // member of a subsequent call to DescribeSnapshots. + NextToken *string `type:"string"` + + // The list of Snapshot objects that were retrieved. + // + // It is possible that this list contains less than the number of items specified + // in the Limit member of the request. This occurs if there are less than the + // requested number of items left to retrieve, or if the limitations of the + // operation have been exceeded. + Snapshots []*Snapshot `type:"list"` +} + +// String returns the string representation +func (s DescribeSnapshotsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeSnapshotsOutput) GoString() string { + return s.String() +} + +// Describes the trust relationships for a particular Microsoft AD in the AWS +// cloud. If no input parameters are are provided, such as directory ID or trust +// ID, this request describes all the trust relationships. +type DescribeTrustsInput struct { + _ struct{} `type:"structure"` + + // The Directory ID of the AWS directory that is a part of the requested trust + // relationship. + DirectoryId *string `type:"string"` + + // The maximum number of objects to return. + Limit *int64 `type:"integer"` + + // The DescribeTrustsResult.NextToken value from a previous call to DescribeTrusts. + // Pass null if this is the first call. + NextToken *string `type:"string"` + + // A list of identifiers of the trust relationships for which to obtain the + // information. If this member is null, all trust relationships that belong + // to the current account are returned. + // + // An empty list results in an InvalidParameterException being thrown. + TrustIds []*string `type:"list"` +} + +// String returns the string representation +func (s DescribeTrustsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeTrustsInput) GoString() string { + return s.String() +} + +// The result of a DescribeTrust request. +type DescribeTrustsOutput struct { + _ struct{} `type:"structure"` + + // If not null, more results are available. Pass this value for the NextToken + // parameter in a subsequent call to DescribeTrusts to retrieve the next set + // of items. + NextToken *string `type:"string"` + + // The list of Trust objects that were retrieved. + // + // It is possible that this list contains less than the number of items specified + // in the Limit member of the request. This occurs if there are less than the + // requested number of items left to retrieve, or if the limitations of the + // operation have been exceeded. + Trusts []*Trust `type:"list"` +} + +// String returns the string representation +func (s DescribeTrustsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeTrustsOutput) GoString() string { + return s.String() +} + +// Contains information for the ConnectDirectory operation when an AD Connector +// directory is being created. +type DirectoryConnectSettings struct { + _ struct{} `type:"structure"` + + // A list of one or more IP addresses of DNS servers or domain controllers in + // the on-premises directory. + CustomerDnsIps []*string `type:"list" required:"true"` + + // The username of an account in the on-premises directory that is used to connect + // to the directory. This account must have the following privileges: + // + // Read users and groups + // + // Create computer objects + // + // Join computers to the domain + CustomerUserName *string `min:"1" type:"string" required:"true"` + + // A list of subnet identifiers in the VPC in which the AD Connector is created. + SubnetIds []*string `type:"list" required:"true"` + + // The identifier of the VPC in which the AD Connector is created. + VpcId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DirectoryConnectSettings) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DirectoryConnectSettings) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DirectoryConnectSettings) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DirectoryConnectSettings"} + if s.CustomerDnsIps == nil { + invalidParams.Add(request.NewErrParamRequired("CustomerDnsIps")) + } + if s.CustomerUserName == nil { + invalidParams.Add(request.NewErrParamRequired("CustomerUserName")) + } + if s.CustomerUserName != nil && len(*s.CustomerUserName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("CustomerUserName", 1)) + } + if s.SubnetIds == nil { + invalidParams.Add(request.NewErrParamRequired("SubnetIds")) + } + if s.VpcId == nil { + invalidParams.Add(request.NewErrParamRequired("VpcId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains information about an AD Connector directory. +type DirectoryConnectSettingsDescription struct { + _ struct{} `type:"structure"` + + // A list of the Availability Zones that the directory is in. + AvailabilityZones []*string `type:"list"` + + // The IP addresses of the AD Connector servers. + ConnectIps []*string `type:"list"` + + // The username of the service account in the on-premises directory. + CustomerUserName *string `min:"1" type:"string"` + + // The security group identifier for the AD Connector directory. + SecurityGroupId *string `type:"string"` + + // A list of subnet identifiers in the VPC that the AD connector is in. + SubnetIds []*string `type:"list"` + + // The identifier of the VPC that the AD Connector is in. + VpcId *string `type:"string"` +} + +// String returns the string representation +func (s DirectoryConnectSettingsDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DirectoryConnectSettingsDescription) GoString() string { + return s.String() +} + +// Contains information about an AWS Directory Service directory. +type DirectoryDescription struct { + _ struct{} `type:"structure"` + + // The access URL for the directory, such as http://.awsapps.com. If + // no alias has been created for the directory, is the directory identifier, + // such as d-XXXXXXXXXX. + AccessUrl *string `min:"1" type:"string"` + + // The alias for the directory. If no alias has been created for the directory, + // the alias is the directory identifier, such as d-XXXXXXXXXX. + Alias *string `min:"1" type:"string"` + + // A DirectoryConnectSettingsDescription object that contains additional information + // about an AD Connector directory. This member is only present if the directory + // is an AD Connector directory. + ConnectSettings *DirectoryConnectSettingsDescription `type:"structure"` + + // The textual description for the directory. + Description *string `type:"string"` + + // The directory identifier. + DirectoryId *string `type:"string"` + + // The IP addresses of the DNS servers for the directory. For a Simple AD or + // Microsoft AD directory, these are the IP addresses of the Simple AD or Microsoft + // AD directory servers. For an AD Connector directory, these are the IP addresses + // of the DNS servers or domain controllers in the on-premises directory to + // which the AD Connector is connected. + DnsIpAddrs []*string `type:"list"` + + // Specifies when the directory was created. + LaunchTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The fully-qualified name of the directory. + Name *string `type:"string"` + + // A RadiusSettings object that contains information about the RADIUS server + // configured for this directory. + RadiusSettings *RadiusSettings `type:"structure"` + + // The status of the RADIUS MFA server connection. + RadiusStatus *string `type:"string" enum:"RadiusStatus"` + + // The short name of the directory. + ShortName *string `type:"string"` + + // The directory size. + Size *string `type:"string" enum:"DirectorySize"` + + // Indicates if single-sign on is enabled for the directory. For more information, + // see EnableSso and DisableSso. + SsoEnabled *bool `type:"boolean"` + + // The current stage of the directory. + Stage *string `type:"string" enum:"DirectoryStage"` + + // The date and time that the stage was last updated. + StageLastUpdatedDateTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // Additional information about the directory stage. + StageReason *string `type:"string"` + + // The directory size. + Type *string `type:"string" enum:"DirectoryType"` + + // A DirectoryVpcSettingsDescription object that contains additional information + // about a directory. This member is only present if the directory is a Simple + // AD or Managed AD directory. + VpcSettings *DirectoryVpcSettingsDescription `type:"structure"` +} + +// String returns the string representation +func (s DirectoryDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DirectoryDescription) GoString() string { + return s.String() +} + +// Contains directory limit information for a region. +type DirectoryLimits struct { + _ struct{} `type:"structure"` + + // The current number of cloud directories in the region. + CloudOnlyDirectoriesCurrentCount *int64 `type:"integer"` + + // The maximum number of cloud directories allowed in the region. + CloudOnlyDirectoriesLimit *int64 `type:"integer"` + + // Indicates if the cloud directory limit has been reached. + CloudOnlyDirectoriesLimitReached *bool `type:"boolean"` + + // The current number of Microsoft AD directories in the region. + CloudOnlyMicrosoftADCurrentCount *int64 `type:"integer"` + + // The maximum number of Microsoft AD directories allowed in the region. + CloudOnlyMicrosoftADLimit *int64 `type:"integer"` + + // Indicates if the Microsoft AD directory limit has been reached. + CloudOnlyMicrosoftADLimitReached *bool `type:"boolean"` + + // The current number of connected directories in the region. + ConnectedDirectoriesCurrentCount *int64 `type:"integer"` + + // The maximum number of connected directories allowed in the region. + ConnectedDirectoriesLimit *int64 `type:"integer"` + + // Indicates if the connected directory limit has been reached. + ConnectedDirectoriesLimitReached *bool `type:"boolean"` +} + +// String returns the string representation +func (s DirectoryLimits) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DirectoryLimits) GoString() string { + return s.String() +} + +// Contains VPC information for the CreateDirectory or CreateMicrosoftAD operation. +type DirectoryVpcSettings struct { + _ struct{} `type:"structure"` + + // The identifiers of the subnets for the directory servers. The two subnets + // must be in different Availability Zones. AWS Directory Service creates a + // directory server and a DNS server in each of these subnets. + SubnetIds []*string `type:"list" required:"true"` + + // The identifier of the VPC in which to create the directory. + VpcId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DirectoryVpcSettings) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DirectoryVpcSettings) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DirectoryVpcSettings) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DirectoryVpcSettings"} + if s.SubnetIds == nil { + invalidParams.Add(request.NewErrParamRequired("SubnetIds")) + } + if s.VpcId == nil { + invalidParams.Add(request.NewErrParamRequired("VpcId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains information about the directory. +type DirectoryVpcSettingsDescription struct { + _ struct{} `type:"structure"` + + // The list of Availability Zones that the directory is in. + AvailabilityZones []*string `type:"list"` + + // The security group identifier for the directory. If the directory was created + // before 8/1/2014, this is the identifier of the directory members security + // group that was created when the directory was created. If the directory was + // created after this date, this value is null. + SecurityGroupId *string `type:"string"` + + // The identifiers of the subnets for the directory servers. + SubnetIds []*string `type:"list"` + + // The identifier of the VPC that the directory is in. + VpcId *string `type:"string"` +} + +// String returns the string representation +func (s DirectoryVpcSettingsDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DirectoryVpcSettingsDescription) GoString() string { + return s.String() +} + +// Contains the inputs for the DisableRadius operation. +type DisableRadiusInput struct { + _ struct{} `type:"structure"` + + // The identifier of the directory for which to disable MFA. + DirectoryId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DisableRadiusInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisableRadiusInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DisableRadiusInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DisableRadiusInput"} + if s.DirectoryId == nil { + invalidParams.Add(request.NewErrParamRequired("DirectoryId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the results of the DisableRadius operation. +type DisableRadiusOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DisableRadiusOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisableRadiusOutput) GoString() string { + return s.String() +} + +// Contains the inputs for the DisableSso operation. +type DisableSsoInput struct { + _ struct{} `type:"structure"` + + // The identifier of the directory for which to disable single-sign on. + DirectoryId *string `type:"string" required:"true"` + + // The password of an alternate account to use to disable single-sign on. This + // is only used for AD Connector directories. For more information, see the + // UserName parameter. + Password *string `min:"1" type:"string"` + + // The username of an alternate account to use to disable single-sign on. This + // is only used for AD Connector directories. This account must have privileges + // to remove a service principal name. + // + // If the AD Connector service account does not have privileges to remove a + // service principal name, you can specify an alternate account with the UserName + // and Password parameters. These credentials are only used to disable single + // sign-on and are not stored by the service. The AD Connector service account + // is not changed. + UserName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s DisableSsoInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisableSsoInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DisableSsoInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DisableSsoInput"} + if s.DirectoryId == nil { + invalidParams.Add(request.NewErrParamRequired("DirectoryId")) + } + if s.Password != nil && len(*s.Password) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Password", 1)) + } + if s.UserName != nil && len(*s.UserName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("UserName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the results of the DisableSso operation. +type DisableSsoOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DisableSsoOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisableSsoOutput) GoString() string { + return s.String() +} + +// Contains the inputs for the EnableRadius operation. +type EnableRadiusInput struct { + _ struct{} `type:"structure"` + + // The identifier of the directory for which to enable MFA. + DirectoryId *string `type:"string" required:"true"` + + // A RadiusSettings object that contains information about the RADIUS server. + RadiusSettings *RadiusSettings `type:"structure" required:"true"` +} + +// String returns the string representation +func (s EnableRadiusInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnableRadiusInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *EnableRadiusInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "EnableRadiusInput"} + if s.DirectoryId == nil { + invalidParams.Add(request.NewErrParamRequired("DirectoryId")) + } + if s.RadiusSettings == nil { + invalidParams.Add(request.NewErrParamRequired("RadiusSettings")) + } + if s.RadiusSettings != nil { + if err := s.RadiusSettings.Validate(); err != nil { + invalidParams.AddNested("RadiusSettings", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the results of the EnableRadius operation. +type EnableRadiusOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s EnableRadiusOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnableRadiusOutput) GoString() string { + return s.String() +} + +// Contains the inputs for the EnableSso operation. +type EnableSsoInput struct { + _ struct{} `type:"structure"` + + // The identifier of the directory for which to enable single-sign on. + DirectoryId *string `type:"string" required:"true"` + + // The password of an alternate account to use to enable single-sign on. This + // is only used for AD Connector directories. For more information, see the + // UserName parameter. + Password *string `min:"1" type:"string"` + + // The username of an alternate account to use to enable single-sign on. This + // is only used for AD Connector directories. This account must have privileges + // to add a service principal name. + // + // If the AD Connector service account does not have privileges to add a service + // principal name, you can specify an alternate account with the UserName and + // Password parameters. These credentials are only used to enable single sign-on + // and are not stored by the service. The AD Connector service account is not + // changed. + UserName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s EnableSsoInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnableSsoInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *EnableSsoInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "EnableSsoInput"} + if s.DirectoryId == nil { + invalidParams.Add(request.NewErrParamRequired("DirectoryId")) + } + if s.Password != nil && len(*s.Password) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Password", 1)) + } + if s.UserName != nil && len(*s.UserName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("UserName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the results of the EnableSso operation. +type EnableSsoOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s EnableSsoOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnableSsoOutput) GoString() string { + return s.String() +} + +// Information about SNS topic and AWS Directory Service directory associations. +type EventTopic struct { + _ struct{} `type:"structure"` + + // The date and time of when you associated your directory with the SNS topic. + CreatedDateTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The Directory ID of an AWS Directory Service directory that will publish + // status messages to an SNS topic. + DirectoryId *string `type:"string"` + + // The topic registration status. + Status *string `type:"string" enum:"TopicStatus"` + + // The SNS topic ARN (Amazon Resource Name). + TopicArn *string `type:"string"` + + // The name of an AWS SNS topic the receives status messages from the directory. + TopicName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s EventTopic) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EventTopic) GoString() string { + return s.String() +} + +// Contains the inputs for the GetDirectoryLimits operation. +type GetDirectoryLimitsInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s GetDirectoryLimitsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetDirectoryLimitsInput) GoString() string { + return s.String() +} + +// Contains the results of the GetDirectoryLimits operation. +type GetDirectoryLimitsOutput struct { + _ struct{} `type:"structure"` + + // A DirectoryLimits object that contains the directory limits for the current + // region. + DirectoryLimits *DirectoryLimits `type:"structure"` +} + +// String returns the string representation +func (s GetDirectoryLimitsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetDirectoryLimitsOutput) GoString() string { + return s.String() +} + +// Contains the inputs for the GetSnapshotLimits operation. +type GetSnapshotLimitsInput struct { + _ struct{} `type:"structure"` + + // Contains the identifier of the directory to obtain the limits for. + DirectoryId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s GetSnapshotLimitsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetSnapshotLimitsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetSnapshotLimitsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetSnapshotLimitsInput"} + if s.DirectoryId == nil { + invalidParams.Add(request.NewErrParamRequired("DirectoryId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the results of the GetSnapshotLimits operation. +type GetSnapshotLimitsOutput struct { + _ struct{} `type:"structure"` + + // A SnapshotLimits object that contains the manual snapshot limits for the + // specified directory. + SnapshotLimits *SnapshotLimits `type:"structure"` +} + +// String returns the string representation +func (s GetSnapshotLimitsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetSnapshotLimitsOutput) GoString() string { + return s.String() +} + +type ListTagsForResourceInput struct { + _ struct{} `type:"structure"` + + // Reserved for future use. + Limit *int64 `type:"integer"` + + // Reserved for future use. + NextToken *string `type:"string"` + + // The ID of the directory for which you want to retrieve tags. + ResourceId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ListTagsForResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTagsForResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListTagsForResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListTagsForResourceInput"} + if s.ResourceId == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type ListTagsForResourceOutput struct { + _ struct{} `type:"structure"` + + // Reserved for future use. + NextToken *string `type:"string"` + + // List of tags returned by the ListTagsForResource operation. + Tags []*Tag `type:"list"` +} + +// String returns the string representation +func (s ListTagsForResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTagsForResourceOutput) GoString() string { + return s.String() +} + +// Contains information about a Remote Authentication Dial In User Service (RADIUS) +// server. +type RadiusSettings struct { + _ struct{} `type:"structure"` + + // The protocol specified for your RADIUS endpoints. + AuthenticationProtocol *string `type:"string" enum:"RadiusAuthenticationProtocol"` + + // Not currently used. + DisplayLabel *string `min:"1" type:"string"` + + // The port that your RADIUS server is using for communications. Your on-premises + // network must allow inbound traffic over this port from the AWS Directory + // Service servers. + RadiusPort *int64 `min:"1025" type:"integer"` + + // The maximum number of times that communication with the RADIUS server is + // attempted. + RadiusRetries *int64 `type:"integer"` + + // An array of strings that contains the IP addresses of the RADIUS server endpoints, + // or the IP addresses of your RADIUS server load balancer. + RadiusServers []*string `type:"list"` + + // The amount of time, in seconds, to wait for the RADIUS server to respond. + RadiusTimeout *int64 `min:"1" type:"integer"` + + // The shared secret code that was specified when your RADIUS endpoints were + // created. + SharedSecret *string `min:"8" type:"string"` + + // Not currently used. + UseSameUsername *bool `type:"boolean"` +} + +// String returns the string representation +func (s RadiusSettings) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RadiusSettings) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RadiusSettings) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RadiusSettings"} + if s.DisplayLabel != nil && len(*s.DisplayLabel) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DisplayLabel", 1)) + } + if s.RadiusPort != nil && *s.RadiusPort < 1025 { + invalidParams.Add(request.NewErrParamMinValue("RadiusPort", 1025)) + } + if s.RadiusTimeout != nil && *s.RadiusTimeout < 1 { + invalidParams.Add(request.NewErrParamMinValue("RadiusTimeout", 1)) + } + if s.SharedSecret != nil && len(*s.SharedSecret) < 8 { + invalidParams.Add(request.NewErrParamMinLen("SharedSecret", 8)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Registers a new event topic. +type RegisterEventTopicInput struct { + _ struct{} `type:"structure"` + + // The Directory ID that will publish status messages to the SNS topic. + DirectoryId *string `type:"string" required:"true"` + + // The SNS topic name to which the directory will publish status messages. This + // SNS topic must be in the same region as the specified Directory ID. + TopicName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s RegisterEventTopicInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RegisterEventTopicInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RegisterEventTopicInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RegisterEventTopicInput"} + if s.DirectoryId == nil { + invalidParams.Add(request.NewErrParamRequired("DirectoryId")) + } + if s.TopicName == nil { + invalidParams.Add(request.NewErrParamRequired("TopicName")) + } + if s.TopicName != nil && len(*s.TopicName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TopicName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The result of a RegisterEventTopic request. +type RegisterEventTopicOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s RegisterEventTopicOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RegisterEventTopicOutput) GoString() string { + return s.String() +} + +type RemoveTagsFromResourceInput struct { + _ struct{} `type:"structure"` + + // The ID of the directory from which to remove the tag. + ResourceId *string `type:"string" required:"true"` + + // The tag key (name) of the tag to be removed. + TagKeys []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s RemoveTagsFromResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RemoveTagsFromResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RemoveTagsFromResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RemoveTagsFromResourceInput"} + if s.ResourceId == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceId")) + } + if s.TagKeys == nil { + invalidParams.Add(request.NewErrParamRequired("TagKeys")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type RemoveTagsFromResourceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s RemoveTagsFromResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RemoveTagsFromResourceOutput) GoString() string { + return s.String() +} + +// An object representing the inputs for the RestoreFromSnapshot operation. +type RestoreFromSnapshotInput struct { + _ struct{} `type:"structure"` + + // The identifier of the snapshot to restore from. + SnapshotId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s RestoreFromSnapshotInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RestoreFromSnapshotInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RestoreFromSnapshotInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RestoreFromSnapshotInput"} + if s.SnapshotId == nil { + invalidParams.Add(request.NewErrParamRequired("SnapshotId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the results of the RestoreFromSnapshot operation. +type RestoreFromSnapshotOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s RestoreFromSnapshotOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RestoreFromSnapshotOutput) GoString() string { + return s.String() +} + +// Describes a directory snapshot. +type Snapshot struct { + _ struct{} `type:"structure"` + + // The directory identifier. + DirectoryId *string `type:"string"` + + // The descriptive name of the snapshot. + Name *string `type:"string"` + + // The snapshot identifier. + SnapshotId *string `type:"string"` + + // The date and time that the snapshot was taken. + StartTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The snapshot status. + Status *string `type:"string" enum:"SnapshotStatus"` + + // The snapshot type. + Type *string `type:"string" enum:"SnapshotType"` +} + +// String returns the string representation +func (s Snapshot) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Snapshot) GoString() string { + return s.String() +} + +// Contains manual snapshot limit information for a directory. +type SnapshotLimits struct { + _ struct{} `type:"structure"` + + // The current number of manual snapshots of the directory. + ManualSnapshotsCurrentCount *int64 `type:"integer"` + + // The maximum number of manual snapshots allowed. + ManualSnapshotsLimit *int64 `type:"integer"` + + // Indicates if the manual snapshot limit has been reached. + ManualSnapshotsLimitReached *bool `type:"boolean"` +} + +// String returns the string representation +func (s SnapshotLimits) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SnapshotLimits) GoString() string { + return s.String() +} + +// Metadata assigned to an Amazon Directory Services directory consisting of +// a key-value pair. +type Tag struct { + _ struct{} `type:"structure"` + + // A key is the required name of the tag. The string value can be from 1 to + // 128 Unicode characters in length and cannot be prefixed with "aws:". The + // string can only contain only the set of Unicode letters, digits, white-space, + // '_', '.', '/', '=', '+', '-' (Java regex: "^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-]*)$"). + Key *string `min:"1" type:"string" required:"true"` + + // A value is the optional value of the tag. The string value can be from 1 + // to 256 Unicode characters in length. The string can only contain only the + // set of Unicode letters, digits, white-space, '_', '.', '/', '=', '+', '-' + // (Java regex: "^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-]*)$"). + Value *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s Tag) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Tag) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Tag) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Tag"} + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.Value == nil { + invalidParams.Add(request.NewErrParamRequired("Value")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Describes a trust relationship between an Microsoft AD in the AWS cloud and +// an external domain. +type Trust struct { + _ struct{} `type:"structure"` + + // The date and time that the trust relationship was created. + CreatedDateTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The Directory ID of the AWS directory involved in the trust relationship. + DirectoryId *string `type:"string"` + + // The date and time that the trust relationship was last updated. + LastUpdatedDateTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The Fully Qualified Domain Name (FQDN) of the external domain involved in + // the trust relationship. + RemoteDomainName *string `type:"string"` + + // The date and time that the TrustState was last updated. + StateLastUpdatedDateTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The trust relationship direction. + TrustDirection *string `type:"string" enum:"TrustDirection"` + + // The unique ID of the trust relationship. + TrustId *string `type:"string"` + + // The trust relationship state. + TrustState *string `type:"string" enum:"TrustState"` + + // The reason for the TrustState. + TrustStateReason *string `type:"string"` + + // The trust relationship type. + TrustType *string `type:"string" enum:"TrustType"` +} + +// String returns the string representation +func (s Trust) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Trust) GoString() string { + return s.String() +} + +// Updates a conditional forwarder. +type UpdateConditionalForwarderInput struct { + _ struct{} `type:"structure"` + + // The directory ID of the AWS directory for which to update the conditional + // forwarder. + DirectoryId *string `type:"string" required:"true"` + + // The updated IP addresses of the remote DNS server associated with the conditional + // forwarder. + DnsIpAddrs []*string `type:"list" required:"true"` + + // The fully qualified domain name (FQDN) of the remote domain with which you + // will set up a trust relationship. + RemoteDomainName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateConditionalForwarderInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateConditionalForwarderInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateConditionalForwarderInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateConditionalForwarderInput"} + if s.DirectoryId == nil { + invalidParams.Add(request.NewErrParamRequired("DirectoryId")) + } + if s.DnsIpAddrs == nil { + invalidParams.Add(request.NewErrParamRequired("DnsIpAddrs")) + } + if s.RemoteDomainName == nil { + invalidParams.Add(request.NewErrParamRequired("RemoteDomainName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The result of an UpdateConditionalForwarder request. +type UpdateConditionalForwarderOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UpdateConditionalForwarderOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateConditionalForwarderOutput) GoString() string { + return s.String() +} + +// Contains the inputs for the UpdateRadius operation. +type UpdateRadiusInput struct { + _ struct{} `type:"structure"` + + // The identifier of the directory for which to update the RADIUS server information. + DirectoryId *string `type:"string" required:"true"` + + // A RadiusSettings object that contains information about the RADIUS server. + RadiusSettings *RadiusSettings `type:"structure" required:"true"` +} + +// String returns the string representation +func (s UpdateRadiusInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateRadiusInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateRadiusInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateRadiusInput"} + if s.DirectoryId == nil { + invalidParams.Add(request.NewErrParamRequired("DirectoryId")) + } + if s.RadiusSettings == nil { + invalidParams.Add(request.NewErrParamRequired("RadiusSettings")) + } + if s.RadiusSettings != nil { + if err := s.RadiusSettings.Validate(); err != nil { + invalidParams.AddNested("RadiusSettings", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the results of the UpdateRadius operation. +type UpdateRadiusOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UpdateRadiusOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateRadiusOutput) GoString() string { + return s.String() +} + +// Initiates the verification of an existing trust relationship between a Microsoft +// AD in the AWS cloud and an external domain. +type VerifyTrustInput struct { + _ struct{} `type:"structure"` + + // The unique Trust ID of the trust relationship to verify. + TrustId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s VerifyTrustInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VerifyTrustInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *VerifyTrustInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "VerifyTrustInput"} + if s.TrustId == nil { + invalidParams.Add(request.NewErrParamRequired("TrustId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Result of a VerifyTrust request. +type VerifyTrustOutput struct { + _ struct{} `type:"structure"` + + // The unique Trust ID of the trust relationship that was verified. + TrustId *string `type:"string"` +} + +// String returns the string representation +func (s VerifyTrustOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VerifyTrustOutput) GoString() string { + return s.String() +} + +const ( + // @enum DirectorySize + DirectorySizeSmall = "Small" + // @enum DirectorySize + DirectorySizeLarge = "Large" +) + +const ( + // @enum DirectoryStage + DirectoryStageRequested = "Requested" + // @enum DirectoryStage + DirectoryStageCreating = "Creating" + // @enum DirectoryStage + DirectoryStageCreated = "Created" + // @enum DirectoryStage + DirectoryStageActive = "Active" + // @enum DirectoryStage + DirectoryStageInoperable = "Inoperable" + // @enum DirectoryStage + DirectoryStageImpaired = "Impaired" + // @enum DirectoryStage + DirectoryStageRestoring = "Restoring" + // @enum DirectoryStage + DirectoryStageRestoreFailed = "RestoreFailed" + // @enum DirectoryStage + DirectoryStageDeleting = "Deleting" + // @enum DirectoryStage + DirectoryStageDeleted = "Deleted" + // @enum DirectoryStage + DirectoryStageFailed = "Failed" +) + +const ( + // @enum DirectoryType + DirectoryTypeSimpleAd = "SimpleAD" + // @enum DirectoryType + DirectoryTypeAdconnector = "ADConnector" + // @enum DirectoryType + DirectoryTypeMicrosoftAd = "MicrosoftAD" +) + +const ( + // @enum RadiusAuthenticationProtocol + RadiusAuthenticationProtocolPap = "PAP" + // @enum RadiusAuthenticationProtocol + RadiusAuthenticationProtocolChap = "CHAP" + // @enum RadiusAuthenticationProtocol + RadiusAuthenticationProtocolMsChapv1 = "MS-CHAPv1" + // @enum RadiusAuthenticationProtocol + RadiusAuthenticationProtocolMsChapv2 = "MS-CHAPv2" +) + +const ( + // @enum RadiusStatus + RadiusStatusCreating = "Creating" + // @enum RadiusStatus + RadiusStatusCompleted = "Completed" + // @enum RadiusStatus + RadiusStatusFailed = "Failed" +) + +const ( + // @enum ReplicationScope + ReplicationScopeDomain = "Domain" +) + +const ( + // @enum SnapshotStatus + SnapshotStatusCreating = "Creating" + // @enum SnapshotStatus + SnapshotStatusCompleted = "Completed" + // @enum SnapshotStatus + SnapshotStatusFailed = "Failed" +) + +const ( + // @enum SnapshotType + SnapshotTypeAuto = "Auto" + // @enum SnapshotType + SnapshotTypeManual = "Manual" +) + +const ( + // @enum TopicStatus + TopicStatusRegistered = "Registered" + // @enum TopicStatus + TopicStatusTopicnotfound = "Topic not found" + // @enum TopicStatus + TopicStatusFailed = "Failed" + // @enum TopicStatus + TopicStatusDeleted = "Deleted" +) + +const ( + // @enum TrustDirection + TrustDirectionOneWayOutgoing = "One-Way: Outgoing" + // @enum TrustDirection + TrustDirectionOneWayIncoming = "One-Way: Incoming" + // @enum TrustDirection + TrustDirectionTwoWay = "Two-Way" +) + +const ( + // @enum TrustState + TrustStateCreating = "Creating" + // @enum TrustState + TrustStateCreated = "Created" + // @enum TrustState + TrustStateVerifying = "Verifying" + // @enum TrustState + TrustStateVerifyFailed = "VerifyFailed" + // @enum TrustState + TrustStateVerified = "Verified" + // @enum TrustState + TrustStateDeleting = "Deleting" + // @enum TrustState + TrustStateDeleted = "Deleted" + // @enum TrustState + TrustStateFailed = "Failed" +) + +const ( + // @enum TrustType + TrustTypeForest = "Forest" +) diff --git a/vendor/github.com/aws/aws-sdk-go/service/directoryservice/directoryserviceiface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/directoryservice/directoryserviceiface/interface.go new file mode 100644 index 000000000..a508be486 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/directoryservice/directoryserviceiface/interface.go @@ -0,0 +1,142 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package directoryserviceiface provides an interface for the AWS Directory Service. +package directoryserviceiface + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/directoryservice" +) + +// DirectoryServiceAPI is the interface type for directoryservice.DirectoryService. +type DirectoryServiceAPI interface { + AddTagsToResourceRequest(*directoryservice.AddTagsToResourceInput) (*request.Request, *directoryservice.AddTagsToResourceOutput) + + AddTagsToResource(*directoryservice.AddTagsToResourceInput) (*directoryservice.AddTagsToResourceOutput, error) + + ConnectDirectoryRequest(*directoryservice.ConnectDirectoryInput) (*request.Request, *directoryservice.ConnectDirectoryOutput) + + ConnectDirectory(*directoryservice.ConnectDirectoryInput) (*directoryservice.ConnectDirectoryOutput, error) + + CreateAliasRequest(*directoryservice.CreateAliasInput) (*request.Request, *directoryservice.CreateAliasOutput) + + CreateAlias(*directoryservice.CreateAliasInput) (*directoryservice.CreateAliasOutput, error) + + CreateComputerRequest(*directoryservice.CreateComputerInput) (*request.Request, *directoryservice.CreateComputerOutput) + + CreateComputer(*directoryservice.CreateComputerInput) (*directoryservice.CreateComputerOutput, error) + + CreateConditionalForwarderRequest(*directoryservice.CreateConditionalForwarderInput) (*request.Request, *directoryservice.CreateConditionalForwarderOutput) + + CreateConditionalForwarder(*directoryservice.CreateConditionalForwarderInput) (*directoryservice.CreateConditionalForwarderOutput, error) + + CreateDirectoryRequest(*directoryservice.CreateDirectoryInput) (*request.Request, *directoryservice.CreateDirectoryOutput) + + CreateDirectory(*directoryservice.CreateDirectoryInput) (*directoryservice.CreateDirectoryOutput, error) + + CreateMicrosoftADRequest(*directoryservice.CreateMicrosoftADInput) (*request.Request, *directoryservice.CreateMicrosoftADOutput) + + CreateMicrosoftAD(*directoryservice.CreateMicrosoftADInput) (*directoryservice.CreateMicrosoftADOutput, error) + + CreateSnapshotRequest(*directoryservice.CreateSnapshotInput) (*request.Request, *directoryservice.CreateSnapshotOutput) + + CreateSnapshot(*directoryservice.CreateSnapshotInput) (*directoryservice.CreateSnapshotOutput, error) + + CreateTrustRequest(*directoryservice.CreateTrustInput) (*request.Request, *directoryservice.CreateTrustOutput) + + CreateTrust(*directoryservice.CreateTrustInput) (*directoryservice.CreateTrustOutput, error) + + DeleteConditionalForwarderRequest(*directoryservice.DeleteConditionalForwarderInput) (*request.Request, *directoryservice.DeleteConditionalForwarderOutput) + + DeleteConditionalForwarder(*directoryservice.DeleteConditionalForwarderInput) (*directoryservice.DeleteConditionalForwarderOutput, error) + + DeleteDirectoryRequest(*directoryservice.DeleteDirectoryInput) (*request.Request, *directoryservice.DeleteDirectoryOutput) + + DeleteDirectory(*directoryservice.DeleteDirectoryInput) (*directoryservice.DeleteDirectoryOutput, error) + + DeleteSnapshotRequest(*directoryservice.DeleteSnapshotInput) (*request.Request, *directoryservice.DeleteSnapshotOutput) + + DeleteSnapshot(*directoryservice.DeleteSnapshotInput) (*directoryservice.DeleteSnapshotOutput, error) + + DeleteTrustRequest(*directoryservice.DeleteTrustInput) (*request.Request, *directoryservice.DeleteTrustOutput) + + DeleteTrust(*directoryservice.DeleteTrustInput) (*directoryservice.DeleteTrustOutput, error) + + DeregisterEventTopicRequest(*directoryservice.DeregisterEventTopicInput) (*request.Request, *directoryservice.DeregisterEventTopicOutput) + + DeregisterEventTopic(*directoryservice.DeregisterEventTopicInput) (*directoryservice.DeregisterEventTopicOutput, error) + + DescribeConditionalForwardersRequest(*directoryservice.DescribeConditionalForwardersInput) (*request.Request, *directoryservice.DescribeConditionalForwardersOutput) + + DescribeConditionalForwarders(*directoryservice.DescribeConditionalForwardersInput) (*directoryservice.DescribeConditionalForwardersOutput, error) + + DescribeDirectoriesRequest(*directoryservice.DescribeDirectoriesInput) (*request.Request, *directoryservice.DescribeDirectoriesOutput) + + DescribeDirectories(*directoryservice.DescribeDirectoriesInput) (*directoryservice.DescribeDirectoriesOutput, error) + + DescribeEventTopicsRequest(*directoryservice.DescribeEventTopicsInput) (*request.Request, *directoryservice.DescribeEventTopicsOutput) + + DescribeEventTopics(*directoryservice.DescribeEventTopicsInput) (*directoryservice.DescribeEventTopicsOutput, error) + + DescribeSnapshotsRequest(*directoryservice.DescribeSnapshotsInput) (*request.Request, *directoryservice.DescribeSnapshotsOutput) + + DescribeSnapshots(*directoryservice.DescribeSnapshotsInput) (*directoryservice.DescribeSnapshotsOutput, error) + + DescribeTrustsRequest(*directoryservice.DescribeTrustsInput) (*request.Request, *directoryservice.DescribeTrustsOutput) + + DescribeTrusts(*directoryservice.DescribeTrustsInput) (*directoryservice.DescribeTrustsOutput, error) + + DisableRadiusRequest(*directoryservice.DisableRadiusInput) (*request.Request, *directoryservice.DisableRadiusOutput) + + DisableRadius(*directoryservice.DisableRadiusInput) (*directoryservice.DisableRadiusOutput, error) + + DisableSsoRequest(*directoryservice.DisableSsoInput) (*request.Request, *directoryservice.DisableSsoOutput) + + DisableSso(*directoryservice.DisableSsoInput) (*directoryservice.DisableSsoOutput, error) + + EnableRadiusRequest(*directoryservice.EnableRadiusInput) (*request.Request, *directoryservice.EnableRadiusOutput) + + EnableRadius(*directoryservice.EnableRadiusInput) (*directoryservice.EnableRadiusOutput, error) + + EnableSsoRequest(*directoryservice.EnableSsoInput) (*request.Request, *directoryservice.EnableSsoOutput) + + EnableSso(*directoryservice.EnableSsoInput) (*directoryservice.EnableSsoOutput, error) + + GetDirectoryLimitsRequest(*directoryservice.GetDirectoryLimitsInput) (*request.Request, *directoryservice.GetDirectoryLimitsOutput) + + GetDirectoryLimits(*directoryservice.GetDirectoryLimitsInput) (*directoryservice.GetDirectoryLimitsOutput, error) + + GetSnapshotLimitsRequest(*directoryservice.GetSnapshotLimitsInput) (*request.Request, *directoryservice.GetSnapshotLimitsOutput) + + GetSnapshotLimits(*directoryservice.GetSnapshotLimitsInput) (*directoryservice.GetSnapshotLimitsOutput, error) + + ListTagsForResourceRequest(*directoryservice.ListTagsForResourceInput) (*request.Request, *directoryservice.ListTagsForResourceOutput) + + ListTagsForResource(*directoryservice.ListTagsForResourceInput) (*directoryservice.ListTagsForResourceOutput, error) + + RegisterEventTopicRequest(*directoryservice.RegisterEventTopicInput) (*request.Request, *directoryservice.RegisterEventTopicOutput) + + RegisterEventTopic(*directoryservice.RegisterEventTopicInput) (*directoryservice.RegisterEventTopicOutput, error) + + RemoveTagsFromResourceRequest(*directoryservice.RemoveTagsFromResourceInput) (*request.Request, *directoryservice.RemoveTagsFromResourceOutput) + + RemoveTagsFromResource(*directoryservice.RemoveTagsFromResourceInput) (*directoryservice.RemoveTagsFromResourceOutput, error) + + RestoreFromSnapshotRequest(*directoryservice.RestoreFromSnapshotInput) (*request.Request, *directoryservice.RestoreFromSnapshotOutput) + + RestoreFromSnapshot(*directoryservice.RestoreFromSnapshotInput) (*directoryservice.RestoreFromSnapshotOutput, error) + + UpdateConditionalForwarderRequest(*directoryservice.UpdateConditionalForwarderInput) (*request.Request, *directoryservice.UpdateConditionalForwarderOutput) + + UpdateConditionalForwarder(*directoryservice.UpdateConditionalForwarderInput) (*directoryservice.UpdateConditionalForwarderOutput, error) + + UpdateRadiusRequest(*directoryservice.UpdateRadiusInput) (*request.Request, *directoryservice.UpdateRadiusOutput) + + UpdateRadius(*directoryservice.UpdateRadiusInput) (*directoryservice.UpdateRadiusOutput, error) + + VerifyTrustRequest(*directoryservice.VerifyTrustInput) (*request.Request, *directoryservice.VerifyTrustOutput) + + VerifyTrust(*directoryservice.VerifyTrustInput) (*directoryservice.VerifyTrustOutput, error) +} + +var _ DirectoryServiceAPI = (*directoryservice.DirectoryService)(nil) diff --git a/vendor/github.com/aws/aws-sdk-go/service/directoryservice/examples_test.go b/vendor/github.com/aws/aws-sdk-go/service/directoryservice/examples_test.go new file mode 100644 index 000000000..78e05bcbc --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/directoryservice/examples_test.go @@ -0,0 +1,761 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package directoryservice_test + +import ( + "bytes" + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/directoryservice" +) + +var _ time.Duration +var _ bytes.Buffer + +func ExampleDirectoryService_AddTagsToResource() { + svc := directoryservice.New(session.New()) + + params := &directoryservice.AddTagsToResourceInput{ + ResourceId: aws.String("ResourceId"), // Required + Tags: []*directoryservice.Tag{ // Required + { // Required + Key: aws.String("TagKey"), // Required + Value: aws.String("TagValue"), // Required + }, + // More values... + }, + } + resp, err := svc.AddTagsToResource(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDirectoryService_ConnectDirectory() { + svc := directoryservice.New(session.New()) + + params := &directoryservice.ConnectDirectoryInput{ + ConnectSettings: &directoryservice.DirectoryConnectSettings{ // Required + CustomerDnsIps: []*string{ // Required + aws.String("IpAddr"), // Required + // More values... + }, + CustomerUserName: aws.String("UserName"), // Required + SubnetIds: []*string{ // Required + aws.String("SubnetId"), // Required + // More values... + }, + VpcId: aws.String("VpcId"), // Required + }, + Name: aws.String("DirectoryName"), // Required + Password: aws.String("ConnectPassword"), // Required + Size: aws.String("DirectorySize"), // Required + Description: aws.String("Description"), + ShortName: aws.String("DirectoryShortName"), + } + resp, err := svc.ConnectDirectory(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDirectoryService_CreateAlias() { + svc := directoryservice.New(session.New()) + + params := &directoryservice.CreateAliasInput{ + Alias: aws.String("AliasName"), // Required + DirectoryId: aws.String("DirectoryId"), // Required + } + resp, err := svc.CreateAlias(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDirectoryService_CreateComputer() { + svc := directoryservice.New(session.New()) + + params := &directoryservice.CreateComputerInput{ + ComputerName: aws.String("ComputerName"), // Required + DirectoryId: aws.String("DirectoryId"), // Required + Password: aws.String("ComputerPassword"), // Required + ComputerAttributes: []*directoryservice.Attribute{ + { // Required + Name: aws.String("AttributeName"), + Value: aws.String("AttributeValue"), + }, + // More values... + }, + OrganizationalUnitDistinguishedName: aws.String("OrganizationalUnitDN"), + } + resp, err := svc.CreateComputer(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDirectoryService_CreateConditionalForwarder() { + svc := directoryservice.New(session.New()) + + params := &directoryservice.CreateConditionalForwarderInput{ + DirectoryId: aws.String("DirectoryId"), // Required + DnsIpAddrs: []*string{ // Required + aws.String("IpAddr"), // Required + // More values... + }, + RemoteDomainName: aws.String("RemoteDomainName"), // Required + } + resp, err := svc.CreateConditionalForwarder(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDirectoryService_CreateDirectory() { + svc := directoryservice.New(session.New()) + + params := &directoryservice.CreateDirectoryInput{ + Name: aws.String("DirectoryName"), // Required + Password: aws.String("Password"), // Required + Size: aws.String("DirectorySize"), // Required + Description: aws.String("Description"), + ShortName: aws.String("DirectoryShortName"), + VpcSettings: &directoryservice.DirectoryVpcSettings{ + SubnetIds: []*string{ // Required + aws.String("SubnetId"), // Required + // More values... + }, + VpcId: aws.String("VpcId"), // Required + }, + } + resp, err := svc.CreateDirectory(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDirectoryService_CreateMicrosoftAD() { + svc := directoryservice.New(session.New()) + + params := &directoryservice.CreateMicrosoftADInput{ + Name: aws.String("DirectoryName"), // Required + Password: aws.String("Password"), // Required + VpcSettings: &directoryservice.DirectoryVpcSettings{ // Required + SubnetIds: []*string{ // Required + aws.String("SubnetId"), // Required + // More values... + }, + VpcId: aws.String("VpcId"), // Required + }, + Description: aws.String("Description"), + ShortName: aws.String("DirectoryShortName"), + } + resp, err := svc.CreateMicrosoftAD(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDirectoryService_CreateSnapshot() { + svc := directoryservice.New(session.New()) + + params := &directoryservice.CreateSnapshotInput{ + DirectoryId: aws.String("DirectoryId"), // Required + Name: aws.String("SnapshotName"), + } + resp, err := svc.CreateSnapshot(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDirectoryService_CreateTrust() { + svc := directoryservice.New(session.New()) + + params := &directoryservice.CreateTrustInput{ + DirectoryId: aws.String("DirectoryId"), // Required + RemoteDomainName: aws.String("RemoteDomainName"), // Required + TrustDirection: aws.String("TrustDirection"), // Required + TrustPassword: aws.String("TrustPassword"), // Required + ConditionalForwarderIpAddrs: []*string{ + aws.String("IpAddr"), // Required + // More values... + }, + TrustType: aws.String("TrustType"), + } + resp, err := svc.CreateTrust(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDirectoryService_DeleteConditionalForwarder() { + svc := directoryservice.New(session.New()) + + params := &directoryservice.DeleteConditionalForwarderInput{ + DirectoryId: aws.String("DirectoryId"), // Required + RemoteDomainName: aws.String("RemoteDomainName"), // Required + } + resp, err := svc.DeleteConditionalForwarder(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDirectoryService_DeleteDirectory() { + svc := directoryservice.New(session.New()) + + params := &directoryservice.DeleteDirectoryInput{ + DirectoryId: aws.String("DirectoryId"), // Required + } + resp, err := svc.DeleteDirectory(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDirectoryService_DeleteSnapshot() { + svc := directoryservice.New(session.New()) + + params := &directoryservice.DeleteSnapshotInput{ + SnapshotId: aws.String("SnapshotId"), // Required + } + resp, err := svc.DeleteSnapshot(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDirectoryService_DeleteTrust() { + svc := directoryservice.New(session.New()) + + params := &directoryservice.DeleteTrustInput{ + TrustId: aws.String("TrustId"), // Required + DeleteAssociatedConditionalForwarder: aws.Bool(true), + } + resp, err := svc.DeleteTrust(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDirectoryService_DeregisterEventTopic() { + svc := directoryservice.New(session.New()) + + params := &directoryservice.DeregisterEventTopicInput{ + DirectoryId: aws.String("DirectoryId"), // Required + TopicName: aws.String("TopicName"), // Required + } + resp, err := svc.DeregisterEventTopic(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDirectoryService_DescribeConditionalForwarders() { + svc := directoryservice.New(session.New()) + + params := &directoryservice.DescribeConditionalForwardersInput{ + DirectoryId: aws.String("DirectoryId"), // Required + RemoteDomainNames: []*string{ + aws.String("RemoteDomainName"), // Required + // More values... + }, + } + resp, err := svc.DescribeConditionalForwarders(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDirectoryService_DescribeDirectories() { + svc := directoryservice.New(session.New()) + + params := &directoryservice.DescribeDirectoriesInput{ + DirectoryIds: []*string{ + aws.String("DirectoryId"), // Required + // More values... + }, + Limit: aws.Int64(1), + NextToken: aws.String("NextToken"), + } + resp, err := svc.DescribeDirectories(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDirectoryService_DescribeEventTopics() { + svc := directoryservice.New(session.New()) + + params := &directoryservice.DescribeEventTopicsInput{ + DirectoryId: aws.String("DirectoryId"), + TopicNames: []*string{ + aws.String("TopicName"), // Required + // More values... + }, + } + resp, err := svc.DescribeEventTopics(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDirectoryService_DescribeSnapshots() { + svc := directoryservice.New(session.New()) + + params := &directoryservice.DescribeSnapshotsInput{ + DirectoryId: aws.String("DirectoryId"), + Limit: aws.Int64(1), + NextToken: aws.String("NextToken"), + SnapshotIds: []*string{ + aws.String("SnapshotId"), // Required + // More values... + }, + } + resp, err := svc.DescribeSnapshots(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDirectoryService_DescribeTrusts() { + svc := directoryservice.New(session.New()) + + params := &directoryservice.DescribeTrustsInput{ + DirectoryId: aws.String("DirectoryId"), + Limit: aws.Int64(1), + NextToken: aws.String("NextToken"), + TrustIds: []*string{ + aws.String("TrustId"), // Required + // More values... + }, + } + resp, err := svc.DescribeTrusts(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDirectoryService_DisableRadius() { + svc := directoryservice.New(session.New()) + + params := &directoryservice.DisableRadiusInput{ + DirectoryId: aws.String("DirectoryId"), // Required + } + resp, err := svc.DisableRadius(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDirectoryService_DisableSso() { + svc := directoryservice.New(session.New()) + + params := &directoryservice.DisableSsoInput{ + DirectoryId: aws.String("DirectoryId"), // Required + Password: aws.String("ConnectPassword"), + UserName: aws.String("UserName"), + } + resp, err := svc.DisableSso(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDirectoryService_EnableRadius() { + svc := directoryservice.New(session.New()) + + params := &directoryservice.EnableRadiusInput{ + DirectoryId: aws.String("DirectoryId"), // Required + RadiusSettings: &directoryservice.RadiusSettings{ // Required + AuthenticationProtocol: aws.String("RadiusAuthenticationProtocol"), + DisplayLabel: aws.String("RadiusDisplayLabel"), + RadiusPort: aws.Int64(1), + RadiusRetries: aws.Int64(1), + RadiusServers: []*string{ + aws.String("Server"), // Required + // More values... + }, + RadiusTimeout: aws.Int64(1), + SharedSecret: aws.String("RadiusSharedSecret"), + UseSameUsername: aws.Bool(true), + }, + } + resp, err := svc.EnableRadius(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDirectoryService_EnableSso() { + svc := directoryservice.New(session.New()) + + params := &directoryservice.EnableSsoInput{ + DirectoryId: aws.String("DirectoryId"), // Required + Password: aws.String("ConnectPassword"), + UserName: aws.String("UserName"), + } + resp, err := svc.EnableSso(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDirectoryService_GetDirectoryLimits() { + svc := directoryservice.New(session.New()) + + var params *directoryservice.GetDirectoryLimitsInput + resp, err := svc.GetDirectoryLimits(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDirectoryService_GetSnapshotLimits() { + svc := directoryservice.New(session.New()) + + params := &directoryservice.GetSnapshotLimitsInput{ + DirectoryId: aws.String("DirectoryId"), // Required + } + resp, err := svc.GetSnapshotLimits(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDirectoryService_ListTagsForResource() { + svc := directoryservice.New(session.New()) + + params := &directoryservice.ListTagsForResourceInput{ + ResourceId: aws.String("ResourceId"), // Required + Limit: aws.Int64(1), + NextToken: aws.String("NextToken"), + } + resp, err := svc.ListTagsForResource(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDirectoryService_RegisterEventTopic() { + svc := directoryservice.New(session.New()) + + params := &directoryservice.RegisterEventTopicInput{ + DirectoryId: aws.String("DirectoryId"), // Required + TopicName: aws.String("TopicName"), // Required + } + resp, err := svc.RegisterEventTopic(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDirectoryService_RemoveTagsFromResource() { + svc := directoryservice.New(session.New()) + + params := &directoryservice.RemoveTagsFromResourceInput{ + ResourceId: aws.String("ResourceId"), // Required + TagKeys: []*string{ // Required + aws.String("TagKey"), // Required + // More values... + }, + } + resp, err := svc.RemoveTagsFromResource(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDirectoryService_RestoreFromSnapshot() { + svc := directoryservice.New(session.New()) + + params := &directoryservice.RestoreFromSnapshotInput{ + SnapshotId: aws.String("SnapshotId"), // Required + } + resp, err := svc.RestoreFromSnapshot(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDirectoryService_UpdateConditionalForwarder() { + svc := directoryservice.New(session.New()) + + params := &directoryservice.UpdateConditionalForwarderInput{ + DirectoryId: aws.String("DirectoryId"), // Required + DnsIpAddrs: []*string{ // Required + aws.String("IpAddr"), // Required + // More values... + }, + RemoteDomainName: aws.String("RemoteDomainName"), // Required + } + resp, err := svc.UpdateConditionalForwarder(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDirectoryService_UpdateRadius() { + svc := directoryservice.New(session.New()) + + params := &directoryservice.UpdateRadiusInput{ + DirectoryId: aws.String("DirectoryId"), // Required + RadiusSettings: &directoryservice.RadiusSettings{ // Required + AuthenticationProtocol: aws.String("RadiusAuthenticationProtocol"), + DisplayLabel: aws.String("RadiusDisplayLabel"), + RadiusPort: aws.Int64(1), + RadiusRetries: aws.Int64(1), + RadiusServers: []*string{ + aws.String("Server"), // Required + // More values... + }, + RadiusTimeout: aws.Int64(1), + SharedSecret: aws.String("RadiusSharedSecret"), + UseSameUsername: aws.Bool(true), + }, + } + resp, err := svc.UpdateRadius(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDirectoryService_VerifyTrust() { + svc := directoryservice.New(session.New()) + + params := &directoryservice.VerifyTrustInput{ + TrustId: aws.String("TrustId"), // Required + } + resp, err := svc.VerifyTrust(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/directoryservice/service.go b/vendor/github.com/aws/aws-sdk-go/service/directoryservice/service.go new file mode 100644 index 000000000..62acf33ce --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/directoryservice/service.go @@ -0,0 +1,90 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package directoryservice + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" +) + +// This is the AWS Directory Service API Reference. This guide provides detailed +// information about AWS Directory Service operations, data types, parameters, +// and errors. +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type DirectoryService struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "ds" + +// New creates a new instance of the DirectoryService client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a DirectoryService client from just a session. +// svc := directoryservice.New(mySession) +// +// // Create a DirectoryService client with additional configuration +// svc := directoryservice.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *DirectoryService { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *DirectoryService { + svc := &DirectoryService{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2015-04-16", + JSONVersion: "1.1", + TargetPrefix: "DirectoryService_20150416", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(jsonrpc.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a DirectoryService operation and runs any +// custom request initialization. +func (c *DirectoryService) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/api.go b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/api.go new file mode 100644 index 000000000..3e9d97cbc --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/api.go @@ -0,0 +1,6686 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package dynamodb provides a client for Amazon DynamoDB. +package dynamodb + +import ( + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" +) + +const opBatchGetItem = "BatchGetItem" + +// BatchGetItemRequest generates a "aws/request.Request" representing the +// client's request for the BatchGetItem operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the BatchGetItem method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the BatchGetItemRequest method. +// req, resp := client.BatchGetItemRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *DynamoDB) BatchGetItemRequest(input *BatchGetItemInput) (req *request.Request, output *BatchGetItemOutput) { + op := &request.Operation{ + Name: opBatchGetItem, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"RequestItems"}, + OutputTokens: []string{"UnprocessedKeys"}, + LimitToken: "", + TruncationToken: "", + }, + } + + if input == nil { + input = &BatchGetItemInput{} + } + + req = c.newRequest(op, input, output) + output = &BatchGetItemOutput{} + req.Data = output + return +} + +// The BatchGetItem operation returns the attributes of one or more items from +// one or more tables. You identify requested items by primary key. +// +// A single operation can retrieve up to 16 MB of data, which can contain as +// many as 100 items. BatchGetItem will return a partial result if the response +// size limit is exceeded, the table's provisioned throughput is exceeded, or +// an internal processing failure occurs. If a partial result is returned, the +// operation returns a value for UnprocessedKeys. You can use this value to +// retry the operation starting with the next item to get. +// +// If you request more than 100 items BatchGetItem will return a ValidationException +// with the message "Too many items requested for the BatchGetItem call". +// +// For example, if you ask to retrieve 100 items, but each individual item +// is 300 KB in size, the system returns 52 items (so as not to exceed the 16 +// MB limit). It also returns an appropriate UnprocessedKeys value so you can +// get the next page of results. If desired, your application can include its +// own logic to assemble the pages of results into one data set. +// +// If none of the items can be processed due to insufficient provisioned throughput +// on all of the tables in the request, then BatchGetItem will return a ProvisionedThroughputExceededException. +// If at least one of the items is successfully processed, then BatchGetItem +// completes successfully, while returning the keys of the unread items in UnprocessedKeys. +// +// If DynamoDB returns any unprocessed items, you should retry the batch operation +// on those items. However, we strongly recommend that you use an exponential +// backoff algorithm. If you retry the batch operation immediately, the underlying +// read or write requests can still fail due to throttling on the individual +// tables. If you delay the batch operation using exponential backoff, the individual +// requests in the batch are much more likely to succeed. +// +// For more information, see Batch Operations and Error Handling (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ErrorHandling.html#BatchOperations) +// in the Amazon DynamoDB Developer Guide. +// +// By default, BatchGetItem performs eventually consistent reads on every +// table in the request. If you want strongly consistent reads instead, you +// can set ConsistentRead to true for any or all tables. +// +// In order to minimize response latency, BatchGetItem retrieves items in parallel. +// +// When designing your application, keep in mind that DynamoDB does not return +// items in any particular order. To help parse the response by item, include +// the primary key values for the items in your request in the AttributesToGet +// parameter. +// +// If a requested item does not exist, it is not returned in the result. Requests +// for nonexistent items consume the minimum read capacity units according to +// the type of read. For more information, see Capacity Units Calculations (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/WorkingWithTables.html#CapacityUnitCalculations) +// in the Amazon DynamoDB Developer Guide. +func (c *DynamoDB) BatchGetItem(input *BatchGetItemInput) (*BatchGetItemOutput, error) { + req, out := c.BatchGetItemRequest(input) + err := req.Send() + return out, err +} + +// BatchGetItemPages iterates over the pages of a BatchGetItem operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See BatchGetItem method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a BatchGetItem operation. +// pageNum := 0 +// err := client.BatchGetItemPages(params, +// func(page *BatchGetItemOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *DynamoDB) BatchGetItemPages(input *BatchGetItemInput, fn func(p *BatchGetItemOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.BatchGetItemRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*BatchGetItemOutput), lastPage) + }) +} + +const opBatchWriteItem = "BatchWriteItem" + +// BatchWriteItemRequest generates a "aws/request.Request" representing the +// client's request for the BatchWriteItem operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the BatchWriteItem method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the BatchWriteItemRequest method. +// req, resp := client.BatchWriteItemRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *DynamoDB) BatchWriteItemRequest(input *BatchWriteItemInput) (req *request.Request, output *BatchWriteItemOutput) { + op := &request.Operation{ + Name: opBatchWriteItem, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &BatchWriteItemInput{} + } + + req = c.newRequest(op, input, output) + output = &BatchWriteItemOutput{} + req.Data = output + return +} + +// The BatchWriteItem operation puts or deletes multiple items in one or more +// tables. A single call to BatchWriteItem can write up to 16 MB of data, which +// can comprise as many as 25 put or delete requests. Individual items to be +// written can be as large as 400 KB. +// +// BatchWriteItem cannot update items. To update items, use the UpdateItem +// API. +// +// The individual PutItem and DeleteItem operations specified in BatchWriteItem +// are atomic; however BatchWriteItem as a whole is not. If any requested operations +// fail because the table's provisioned throughput is exceeded or an internal +// processing failure occurs, the failed operations are returned in the UnprocessedItems +// response parameter. You can investigate and optionally resend the requests. +// Typically, you would call BatchWriteItem in a loop. Each iteration would +// check for unprocessed items and submit a new BatchWriteItem request with +// those unprocessed items until all items have been processed. +// +// Note that if none of the items can be processed due to insufficient provisioned +// throughput on all of the tables in the request, then BatchWriteItem will +// return a ProvisionedThroughputExceededException. +// +// If DynamoDB returns any unprocessed items, you should retry the batch operation +// on those items. However, we strongly recommend that you use an exponential +// backoff algorithm. If you retry the batch operation immediately, the underlying +// read or write requests can still fail due to throttling on the individual +// tables. If you delay the batch operation using exponential backoff, the individual +// requests in the batch are much more likely to succeed. +// +// For more information, see Batch Operations and Error Handling (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ErrorHandling.html#BatchOperations) +// in the Amazon DynamoDB Developer Guide. +// +// With BatchWriteItem, you can efficiently write or delete large amounts +// of data, such as from Amazon Elastic MapReduce (EMR), or copy data from another +// database into DynamoDB. In order to improve performance with these large-scale +// operations, BatchWriteItem does not behave in the same way as individual +// PutItem and DeleteItem calls would. For example, you cannot specify conditions +// on individual put and delete requests, and BatchWriteItem does not return +// deleted items in the response. +// +// If you use a programming language that supports concurrency, you can use +// threads to write items in parallel. Your application must include the necessary +// logic to manage the threads. With languages that don't support threading, +// you must update or delete the specified items one at a time. In both situations, +// BatchWriteItem provides an alternative where the API performs the specified +// put and delete operations in parallel, giving you the power of the thread +// pool approach without having to introduce complexity into your application. +// +// Parallel processing reduces latency, but each specified put and delete request +// consumes the same number of write capacity units whether it is processed +// in parallel or not. Delete operations on nonexistent items consume one write +// capacity unit. +// +// If one or more of the following is true, DynamoDB rejects the entire batch +// write operation: +// +// One or more tables specified in the BatchWriteItem request does not exist. +// +// Primary key attributes specified on an item in the request do not match +// those in the corresponding table's primary key schema. +// +// You try to perform multiple operations on the same item in the same BatchWriteItem +// request. For example, you cannot put and delete the same item in the same +// BatchWriteItem request. +// +// There are more than 25 requests in the batch. +// +// Any individual item in a batch exceeds 400 KB. +// +// The total request size exceeds 16 MB. +func (c *DynamoDB) BatchWriteItem(input *BatchWriteItemInput) (*BatchWriteItemOutput, error) { + req, out := c.BatchWriteItemRequest(input) + err := req.Send() + return out, err +} + +const opCreateTable = "CreateTable" + +// CreateTableRequest generates a "aws/request.Request" representing the +// client's request for the CreateTable operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateTable method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateTableRequest method. +// req, resp := client.CreateTableRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *DynamoDB) CreateTableRequest(input *CreateTableInput) (req *request.Request, output *CreateTableOutput) { + op := &request.Operation{ + Name: opCreateTable, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateTableInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateTableOutput{} + req.Data = output + return +} + +// The CreateTable operation adds a new table to your account. In an AWS account, +// table names must be unique within each region. That is, you can have two +// tables with same name if you create the tables in different regions. +// +// CreateTable is an asynchronous operation. Upon receiving a CreateTable +// request, DynamoDB immediately returns a response with a TableStatus of CREATING. +// After the table is created, DynamoDB sets the TableStatus to ACTIVE. You +// can perform read and write operations only on an ACTIVE table. +// +// You can optionally define secondary indexes on the new table, as part of +// the CreateTable operation. If you want to create multiple tables with secondary +// indexes on them, you must create the tables sequentially. Only one table +// with secondary indexes can be in the CREATING state at any given time. +// +// You can use the DescribeTable API to check the table status. +func (c *DynamoDB) CreateTable(input *CreateTableInput) (*CreateTableOutput, error) { + req, out := c.CreateTableRequest(input) + err := req.Send() + return out, err +} + +const opDeleteItem = "DeleteItem" + +// DeleteItemRequest generates a "aws/request.Request" representing the +// client's request for the DeleteItem operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteItem method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteItemRequest method. +// req, resp := client.DeleteItemRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *DynamoDB) DeleteItemRequest(input *DeleteItemInput) (req *request.Request, output *DeleteItemOutput) { + op := &request.Operation{ + Name: opDeleteItem, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteItemInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteItemOutput{} + req.Data = output + return +} + +// Deletes a single item in a table by primary key. You can perform a conditional +// delete operation that deletes the item if it exists, or if it has an expected +// attribute value. +// +// In addition to deleting an item, you can also return the item's attribute +// values in the same operation, using the ReturnValues parameter. +// +// Unless you specify conditions, the DeleteItem is an idempotent operation; +// running it multiple times on the same item or attribute does not result in +// an error response. +// +// Conditional deletes are useful for deleting items only if specific conditions +// are met. If those conditions are met, DynamoDB performs the delete. Otherwise, +// the item is not deleted. +func (c *DynamoDB) DeleteItem(input *DeleteItemInput) (*DeleteItemOutput, error) { + req, out := c.DeleteItemRequest(input) + err := req.Send() + return out, err +} + +const opDeleteTable = "DeleteTable" + +// DeleteTableRequest generates a "aws/request.Request" representing the +// client's request for the DeleteTable operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteTable method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteTableRequest method. +// req, resp := client.DeleteTableRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *DynamoDB) DeleteTableRequest(input *DeleteTableInput) (req *request.Request, output *DeleteTableOutput) { + op := &request.Operation{ + Name: opDeleteTable, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteTableInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteTableOutput{} + req.Data = output + return +} + +// The DeleteTable operation deletes a table and all of its items. After a DeleteTable +// request, the specified table is in the DELETING state until DynamoDB completes +// the deletion. If the table is in the ACTIVE state, you can delete it. If +// a table is in CREATING or UPDATING states, then DynamoDB returns a ResourceInUseException. +// If the specified table does not exist, DynamoDB returns a ResourceNotFoundException. +// If table is already in the DELETING state, no error is returned. +// +// DynamoDB might continue to accept data read and write operations, such +// as GetItem and PutItem, on a table in the DELETING state until the table +// deletion is complete. +// +// When you delete a table, any indexes on that table are also deleted. +// +// If you have DynamoDB Streams enabled on the table, then the corresponding +// stream on that table goes into the DISABLED state, and the stream is automatically +// deleted after 24 hours. +// +// Use the DescribeTable API to check the status of the table. +func (c *DynamoDB) DeleteTable(input *DeleteTableInput) (*DeleteTableOutput, error) { + req, out := c.DeleteTableRequest(input) + err := req.Send() + return out, err +} + +const opDescribeLimits = "DescribeLimits" + +// DescribeLimitsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeLimits operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeLimits method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeLimitsRequest method. +// req, resp := client.DescribeLimitsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *DynamoDB) DescribeLimitsRequest(input *DescribeLimitsInput) (req *request.Request, output *DescribeLimitsOutput) { + op := &request.Operation{ + Name: opDescribeLimits, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeLimitsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeLimitsOutput{} + req.Data = output + return +} + +// Returns the current provisioned-capacity limits for your AWS account in a +// region, both for the region as a whole and for any one DynamoDB table that +// you create there. +// +// When you establish an AWS account, the account has initial limits on the +// maximum read capacity units and write capacity units that you can provision +// across all of your DynamoDB tables in a given region. Also, there are per-table +// limits that apply when you create a table there. For more information, see +// Limits (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html) +// page in the Amazon DynamoDB Developer Guide. +// +// Although you can increase these limits by filing a case at AWS Support Center +// (https://console.aws.amazon.com/support/home#/), obtaining the increase is +// not instantaneous. The DescribeLimits API lets you write code to compare +// the capacity you are currently using to those limits imposed by your account +// so that you have enough time to apply for an increase before you hit a limit. +// +// For example, you could use one of the AWS SDKs to do the following: +// +// Call DescribeLimits for a particular region to obtain your current account +// limits on provisioned capacity there. +// +// Create a variable to hold the aggregate read capacity units provisioned +// for all your tables in that region, and one to hold the aggregate write capacity +// units. Zero them both. +// +// Call ListTables to obtain a list of all your DynamoDB tables. +// +// For each table name listed by ListTables, do the following: +// +// Call DescribeTable with the table name. +// +// Use the data returned by DescribeTable to add the read capacity units and +// write capacity units provisioned for the table itself to your variables. +// +// If the table has one or more global secondary indexes (GSIs), loop over +// these GSIs and add their provisioned capacity values to your variables as +// well. +// +// Report the account limits for that region returned by DescribeLimits, +// along with the total current provisioned capacity levels you have calculated. +// +// This will let you see whether you are getting close to your account-level +// limits. +// +// The per-table limits apply only when you are creating a new table. They +// restrict the sum of the provisioned capacity of the new table itself and +// all its global secondary indexes. +// +// For existing tables and their GSIs, DynamoDB will not let you increase provisioned +// capacity extremely rapidly, but the only upper limit that applies is that +// the aggregate provisioned capacity over all your tables and GSIs cannot exceed +// either of the per-account limits. +// +// DescribeLimits should only be called periodically. You can expect throttling +// errors if you call it more than once in a minute. +// +// The DescribeLimits Request element has no content. +func (c *DynamoDB) DescribeLimits(input *DescribeLimitsInput) (*DescribeLimitsOutput, error) { + req, out := c.DescribeLimitsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeTable = "DescribeTable" + +// DescribeTableRequest generates a "aws/request.Request" representing the +// client's request for the DescribeTable operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeTable method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeTableRequest method. +// req, resp := client.DescribeTableRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *DynamoDB) DescribeTableRequest(input *DescribeTableInput) (req *request.Request, output *DescribeTableOutput) { + op := &request.Operation{ + Name: opDescribeTable, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeTableInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeTableOutput{} + req.Data = output + return +} + +// Returns information about the table, including the current status of the +// table, when it was created, the primary key schema, and any indexes on the +// table. +// +// If you issue a DescribeTable request immediately after a CreateTable request, +// DynamoDB might return a ResourceNotFoundException. This is because DescribeTable +// uses an eventually consistent query, and the metadata for your table might +// not be available at that moment. Wait for a few seconds, and then try the +// DescribeTable request again. +func (c *DynamoDB) DescribeTable(input *DescribeTableInput) (*DescribeTableOutput, error) { + req, out := c.DescribeTableRequest(input) + err := req.Send() + return out, err +} + +const opGetItem = "GetItem" + +// GetItemRequest generates a "aws/request.Request" representing the +// client's request for the GetItem operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetItem method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetItemRequest method. +// req, resp := client.GetItemRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *DynamoDB) GetItemRequest(input *GetItemInput) (req *request.Request, output *GetItemOutput) { + op := &request.Operation{ + Name: opGetItem, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetItemInput{} + } + + req = c.newRequest(op, input, output) + output = &GetItemOutput{} + req.Data = output + return +} + +// The GetItem operation returns a set of attributes for the item with the given +// primary key. If there is no matching item, GetItem does not return any data. +// +// GetItem provides an eventually consistent read by default. If your application +// requires a strongly consistent read, set ConsistentRead to true. Although +// a strongly consistent read might take more time than an eventually consistent +// read, it always returns the last updated value. +func (c *DynamoDB) GetItem(input *GetItemInput) (*GetItemOutput, error) { + req, out := c.GetItemRequest(input) + err := req.Send() + return out, err +} + +const opListTables = "ListTables" + +// ListTablesRequest generates a "aws/request.Request" representing the +// client's request for the ListTables operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListTables method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListTablesRequest method. +// req, resp := client.ListTablesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *DynamoDB) ListTablesRequest(input *ListTablesInput) (req *request.Request, output *ListTablesOutput) { + op := &request.Operation{ + Name: opListTables, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"ExclusiveStartTableName"}, + OutputTokens: []string{"LastEvaluatedTableName"}, + LimitToken: "Limit", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListTablesInput{} + } + + req = c.newRequest(op, input, output) + output = &ListTablesOutput{} + req.Data = output + return +} + +// Returns an array of table names associated with the current account and endpoint. +// The output from ListTables is paginated, with each page returning a maximum +// of 100 table names. +func (c *DynamoDB) ListTables(input *ListTablesInput) (*ListTablesOutput, error) { + req, out := c.ListTablesRequest(input) + err := req.Send() + return out, err +} + +// ListTablesPages iterates over the pages of a ListTables operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListTables method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListTables operation. +// pageNum := 0 +// err := client.ListTablesPages(params, +// func(page *ListTablesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *DynamoDB) ListTablesPages(input *ListTablesInput, fn func(p *ListTablesOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListTablesRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListTablesOutput), lastPage) + }) +} + +const opPutItem = "PutItem" + +// PutItemRequest generates a "aws/request.Request" representing the +// client's request for the PutItem operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutItem method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutItemRequest method. +// req, resp := client.PutItemRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *DynamoDB) PutItemRequest(input *PutItemInput) (req *request.Request, output *PutItemOutput) { + op := &request.Operation{ + Name: opPutItem, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutItemInput{} + } + + req = c.newRequest(op, input, output) + output = &PutItemOutput{} + req.Data = output + return +} + +// Creates a new item, or replaces an old item with a new item. If an item that +// has the same primary key as the new item already exists in the specified +// table, the new item completely replaces the existing item. You can perform +// a conditional put operation (add a new item if one with the specified primary +// key doesn't exist), or replace an existing item if it has certain attribute +// values. +// +// In addition to putting an item, you can also return the item's attribute +// values in the same operation, using the ReturnValues parameter. +// +// When you add an item, the primary key attribute(s) are the only required +// attributes. Attribute values cannot be null. String and Binary type attributes +// must have lengths greater than zero. Set type attributes cannot be empty. +// Requests with empty values will be rejected with a ValidationException exception. +// +// You can request that PutItem return either a copy of the original item (before +// the update) or a copy of the updated item (after the update). For more information, +// see the ReturnValues description below. +// +// To prevent a new item from replacing an existing item, use a conditional +// expression that contains the attribute_not_exists function with the name +// of the attribute being used as the partition key for the table. Since every +// record must contain that attribute, the attribute_not_exists function will +// only succeed if no matching item exists. +// +// For more information about using this API, see Working with Items (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/WorkingWithItems.html) +// in the Amazon DynamoDB Developer Guide. +func (c *DynamoDB) PutItem(input *PutItemInput) (*PutItemOutput, error) { + req, out := c.PutItemRequest(input) + err := req.Send() + return out, err +} + +const opQuery = "Query" + +// QueryRequest generates a "aws/request.Request" representing the +// client's request for the Query operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the Query method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the QueryRequest method. +// req, resp := client.QueryRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *DynamoDB) QueryRequest(input *QueryInput) (req *request.Request, output *QueryOutput) { + op := &request.Operation{ + Name: opQuery, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"ExclusiveStartKey"}, + OutputTokens: []string{"LastEvaluatedKey"}, + LimitToken: "Limit", + TruncationToken: "", + }, + } + + if input == nil { + input = &QueryInput{} + } + + req = c.newRequest(op, input, output) + output = &QueryOutput{} + req.Data = output + return +} + +// A Query operation uses the primary key of a table or a secondary index to +// directly access items from that table or index. +// +// Use the KeyConditionExpression parameter to provide a specific value for +// the partition key. The Query operation will return all of the items from +// the table or index with that partition key value. You can optionally narrow +// the scope of the Query operation by specifying a sort key value and a comparison +// operator in KeyConditionExpression. You can use the ScanIndexForward parameter +// to get results in forward or reverse order, by sort key. +// +// Queries that do not return results consume the minimum number of read capacity +// units for that type of read operation. +// +// If the total number of items meeting the query criteria exceeds the result +// set size limit of 1 MB, the query stops and results are returned to the user +// with the LastEvaluatedKey element to continue the query in a subsequent operation. +// Unlike a Scan operation, a Query operation never returns both an empty result +// set and a LastEvaluatedKey value. LastEvaluatedKey is only provided if you +// have used the Limit parameter, or if the result set exceeds 1 MB (prior to +// applying a filter). +// +// You can query a table, a local secondary index, or a global secondary index. +// For a query on a table or on a local secondary index, you can set the ConsistentRead +// parameter to true and obtain a strongly consistent result. Global secondary +// indexes support eventually consistent reads only, so do not specify ConsistentRead +// when querying a global secondary index. +func (c *DynamoDB) Query(input *QueryInput) (*QueryOutput, error) { + req, out := c.QueryRequest(input) + err := req.Send() + return out, err +} + +// QueryPages iterates over the pages of a Query operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See Query method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a Query operation. +// pageNum := 0 +// err := client.QueryPages(params, +// func(page *QueryOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *DynamoDB) QueryPages(input *QueryInput, fn func(p *QueryOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.QueryRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*QueryOutput), lastPage) + }) +} + +const opScan = "Scan" + +// ScanRequest generates a "aws/request.Request" representing the +// client's request for the Scan operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the Scan method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ScanRequest method. +// req, resp := client.ScanRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *DynamoDB) ScanRequest(input *ScanInput) (req *request.Request, output *ScanOutput) { + op := &request.Operation{ + Name: opScan, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"ExclusiveStartKey"}, + OutputTokens: []string{"LastEvaluatedKey"}, + LimitToken: "Limit", + TruncationToken: "", + }, + } + + if input == nil { + input = &ScanInput{} + } + + req = c.newRequest(op, input, output) + output = &ScanOutput{} + req.Data = output + return +} + +// The Scan operation returns one or more items and item attributes by accessing +// every item in a table or a secondary index. To have DynamoDB return fewer +// items, you can provide a ScanFilter operation. +// +// If the total number of scanned items exceeds the maximum data set size limit +// of 1 MB, the scan stops and results are returned to the user as a LastEvaluatedKey +// value to continue the scan in a subsequent operation. The results also include +// the number of items exceeding the limit. A scan can result in no table data +// meeting the filter criteria. +// +// By default, Scan operations proceed sequentially; however, for faster performance +// on a large table or secondary index, applications can request a parallel +// Scan operation by providing the Segment and TotalSegments parameters. For +// more information, see Parallel Scan (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/QueryAndScan.html#QueryAndScanParallelScan) +// in the Amazon DynamoDB Developer Guide. +// +// By default, Scan uses eventually consistent reads when accessing the data +// in a table; therefore, the result set might not include the changes to data +// in the table immediately before the operation began. If you need a consistent +// copy of the data, as of the time that the Scan begins, you can set the ConsistentRead +// parameter to true. +func (c *DynamoDB) Scan(input *ScanInput) (*ScanOutput, error) { + req, out := c.ScanRequest(input) + err := req.Send() + return out, err +} + +// ScanPages iterates over the pages of a Scan operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See Scan method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a Scan operation. +// pageNum := 0 +// err := client.ScanPages(params, +// func(page *ScanOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *DynamoDB) ScanPages(input *ScanInput, fn func(p *ScanOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ScanRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ScanOutput), lastPage) + }) +} + +const opUpdateItem = "UpdateItem" + +// UpdateItemRequest generates a "aws/request.Request" representing the +// client's request for the UpdateItem operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateItem method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateItemRequest method. +// req, resp := client.UpdateItemRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *DynamoDB) UpdateItemRequest(input *UpdateItemInput) (req *request.Request, output *UpdateItemOutput) { + op := &request.Operation{ + Name: opUpdateItem, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateItemInput{} + } + + req = c.newRequest(op, input, output) + output = &UpdateItemOutput{} + req.Data = output + return +} + +// Edits an existing item's attributes, or adds a new item to the table if it +// does not already exist. You can put, delete, or add attribute values. You +// can also perform a conditional update on an existing item (insert a new attribute +// name-value pair if it doesn't exist, or replace an existing name-value pair +// if it has certain expected attribute values). +// +// You can also return the item's attribute values in the same UpdateItem operation +// using the ReturnValues parameter. +func (c *DynamoDB) UpdateItem(input *UpdateItemInput) (*UpdateItemOutput, error) { + req, out := c.UpdateItemRequest(input) + err := req.Send() + return out, err +} + +const opUpdateTable = "UpdateTable" + +// UpdateTableRequest generates a "aws/request.Request" representing the +// client's request for the UpdateTable operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateTable method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateTableRequest method. +// req, resp := client.UpdateTableRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *DynamoDB) UpdateTableRequest(input *UpdateTableInput) (req *request.Request, output *UpdateTableOutput) { + op := &request.Operation{ + Name: opUpdateTable, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateTableInput{} + } + + req = c.newRequest(op, input, output) + output = &UpdateTableOutput{} + req.Data = output + return +} + +// Modifies the provisioned throughput settings, global secondary indexes, or +// DynamoDB Streams settings for a given table. +// +// You can only perform one of the following operations at once: +// +// Modify the provisioned throughput settings of the table. +// +// Enable or disable Streams on the table. +// +// Remove a global secondary index from the table. +// +// Create a new global secondary index on the table. Once the index begins +// backfilling, you can use UpdateTable to perform other operations. +// +// UpdateTable is an asynchronous operation; while it is executing, the +// table status changes from ACTIVE to UPDATING. While it is UPDATING, you cannot +// issue another UpdateTable request. When the table returns to the ACTIVE state, +// the UpdateTable operation is complete. +func (c *DynamoDB) UpdateTable(input *UpdateTableInput) (*UpdateTableOutput, error) { + req, out := c.UpdateTableRequest(input) + err := req.Send() + return out, err +} + +// Represents an attribute for describing the key schema for the table and indexes. +type AttributeDefinition struct { + _ struct{} `type:"structure"` + + // A name for the attribute. + AttributeName *string `min:"1" type:"string" required:"true"` + + // The data type for the attribute, where: + // + // S - the attribute is of type String + // + // N - the attribute is of type Number + // + // B - the attribute is of type Binary + AttributeType *string `type:"string" required:"true" enum:"ScalarAttributeType"` +} + +// String returns the string representation +func (s AttributeDefinition) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AttributeDefinition) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AttributeDefinition) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AttributeDefinition"} + if s.AttributeName == nil { + invalidParams.Add(request.NewErrParamRequired("AttributeName")) + } + if s.AttributeName != nil && len(*s.AttributeName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AttributeName", 1)) + } + if s.AttributeType == nil { + invalidParams.Add(request.NewErrParamRequired("AttributeType")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the data for an attribute. You can set one, and only one, of the +// elements. +// +// Each attribute in an item is a name-value pair. An attribute can be single-valued +// or multi-valued set. For example, a book item can have title and authors +// attributes. Each book has one title but can have many authors. The multi-valued +// attribute is a set; duplicate values are not allowed. +type AttributeValue struct { + _ struct{} `type:"structure"` + + // A Binary data type. + // + // B is automatically base64 encoded/decoded by the SDK. + B []byte `type:"blob"` + + // A Boolean data type. + BOOL *bool `type:"boolean"` + + // A Binary Set data type. + BS [][]byte `type:"list"` + + // A List of attribute values. + L []*AttributeValue `type:"list"` + + // A Map of attribute values. + M map[string]*AttributeValue `type:"map"` + + // A Number data type. + N *string `type:"string"` + + // A Number Set data type. + NS []*string `type:"list"` + + // A Null data type. + NULL *bool `type:"boolean"` + + // A String data type. + S *string `type:"string"` + + // A String Set data type. + SS []*string `type:"list"` +} + +// String returns the string representation +func (s AttributeValue) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AttributeValue) GoString() string { + return s.String() +} + +// For the UpdateItem operation, represents the attributes to be modified, the +// action to perform on each, and the new value for each. +// +// You cannot use UpdateItem to update any primary key attributes. Instead, +// you will need to delete the item, and then use PutItem to create a new item +// with new attributes. +// +// Attribute values cannot be null; string and binary type attributes must +// have lengths greater than zero; and set type attributes must not be empty. +// Requests with empty values will be rejected with a ValidationException exception. +type AttributeValueUpdate struct { + _ struct{} `type:"structure"` + + // Specifies how to perform the update. Valid values are PUT (default), DELETE, + // and ADD. The behavior depends on whether the specified primary key already + // exists in the table. + // + // If an item with the specified Key is found in the table: + // + // PUT - Adds the specified attribute to the item. If the attribute already + // exists, it is replaced by the new value. + // + // DELETE - If no value is specified, the attribute and its value are removed + // from the item. The data type of the specified value must match the existing + // value's data type. + // + // If a set of values is specified, then those values are subtracted from the + // old set. For example, if the attribute value was the set [a,b,c] and the + // DELETE action specified [a,c], then the final attribute value would be [b]. + // Specifying an empty set is an error. + // + // ADD - If the attribute does not already exist, then the attribute and + // its values are added to the item. If the attribute does exist, then the behavior + // of ADD depends on the data type of the attribute: + // + // If the existing attribute is a number, and if Value is also a number, + // then the Value is mathematically added to the existing attribute. If Value + // is a negative number, then it is subtracted from the existing attribute. + // + // If you use ADD to increment or decrement a number value for an item that + // doesn't exist before the update, DynamoDB uses 0 as the initial value. + // + // In addition, if you use ADD to update an existing item, and intend to increment + // or decrement an attribute value which does not yet exist, DynamoDB uses 0 + // as the initial value. For example, suppose that the item you want to update + // does not yet have an attribute named itemcount, but you decide to ADD the + // number 3 to this attribute anyway, even though it currently does not exist. + // DynamoDB will create the itemcount attribute, set its initial value to 0, + // and finally add 3 to it. The result will be a new itemcount attribute in + // the item, with a value of 3. + // + // If the existing data type is a set, and if the Value is also a set, then + // the Value is added to the existing set. (This is a set operation, not mathematical + // addition.) For example, if the attribute value was the set [1,2], and the + // ADD action specified [3], then the final attribute value would be [1,2,3]. + // An error occurs if an Add action is specified for a set attribute and the + // attribute type specified does not match the existing set type. + // + // Both sets must have the same primitive data type. For example, if the existing + // data type is a set of strings, the Value must also be a set of strings. The + // same holds true for number sets and binary sets. + // + // This action is only valid for an existing attribute whose data type is + // number or is a set. Do not use ADD for any other data types. + // + // If no item with the specified Key is found: + // + // PUT - DynamoDB creates a new item with the specified primary key, and + // then adds the attribute. + // + // DELETE - Nothing happens; there is no attribute to delete. + // + // ADD - DynamoDB creates an item with the supplied primary key and number + // (or set of numbers) for the attribute value. The only data types allowed + // are number and number set; no other data types can be specified. + Action *string `type:"string" enum:"AttributeAction"` + + // Represents the data for an attribute. You can set one, and only one, of the + // elements. + // + // Each attribute in an item is a name-value pair. An attribute can be single-valued + // or multi-valued set. For example, a book item can have title and authors + // attributes. Each book has one title but can have many authors. The multi-valued + // attribute is a set; duplicate values are not allowed. + Value *AttributeValue `type:"structure"` +} + +// String returns the string representation +func (s AttributeValueUpdate) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AttributeValueUpdate) GoString() string { + return s.String() +} + +// Represents the input of a BatchGetItem operation. +type BatchGetItemInput struct { + _ struct{} `type:"structure"` + + // A map of one or more table names and, for each table, a map that describes + // one or more items to retrieve from that table. Each table name can be used + // only once per BatchGetItem request. + // + // Each element in the map of items to retrieve consists of the following: + // + // ConsistentRead - If true, a strongly consistent read is used; if false + // (the default), an eventually consistent read is used. + // + // ExpressionAttributeNames - One or more substitution tokens for attribute + // names in the ProjectionExpression parameter. The following are some use cases + // for using ExpressionAttributeNames: + // + // To access an attribute whose name conflicts with a DynamoDB reserved word. + // + // To create a placeholder for repeating occurrences of an attribute name + // in an expression. + // + // To prevent special characters in an attribute name from being misinterpreted + // in an expression. + // + // Use the # character in an expression to dereference an attribute name. + // For example, consider the following attribute name: + // + // Percentile + // + // The name of this attribute conflicts with a reserved word, so it cannot + // be used directly in an expression. (For the complete list of reserved words, + // see Reserved Words (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html) + // in the Amazon DynamoDB Developer Guide). To work around this, you could specify + // the following for ExpressionAttributeNames: + // + // {"#P":"Percentile"} + // + // You could then use this substitution in an expression, as in this example: + // + // #P = :val + // + // Tokens that begin with the : character are expression attribute values, + // which are placeholders for the actual value at runtime. + // + // For more information on expression attribute names, see Accessing Item + // Attributes (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html) + // in the Amazon DynamoDB Developer Guide. + // + // Keys - An array of primary key attribute values that define specific + // items in the table. For each primary key, you must provide all of the key + // attributes. For example, with a simple primary key, you only need to provide + // the partition key value. For a composite key, you must provide both the partition + // key value and the sort key value. + // + // ProjectionExpression - A string that identifies one or more attributes + // to retrieve from the table. These attributes can include scalars, sets, or + // elements of a JSON document. The attributes in the expression must be separated + // by commas. + // + // If no attribute names are specified, then all attributes will be returned. + // If any of the requested attributes are not found, they will not appear in + // the result. + // + // For more information, see Accessing Item Attributes (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html) + // in the Amazon DynamoDB Developer Guide. + // + // AttributesToGet - + // + // This is a legacy parameter, for backward compatibility. New applications + // should use ProjectionExpression instead. Do not combine legacy parameters + // and expression parameters in a single API call; otherwise, DynamoDB will + // return a ValidationException exception. + // + // This parameter allows you to retrieve attributes of type List or Map; however, + // it cannot retrieve individual elements within a List or a Map. + // + // The names of one or more attributes to retrieve. If no attribute names + // are provided, then all attributes will be returned. If any of the requested + // attributes are not found, they will not appear in the result. + // + // Note that AttributesToGet has no effect on provisioned throughput consumption. + // DynamoDB determines capacity units consumed based on item size, not on the + // amount of data that is returned to an application. + RequestItems map[string]*KeysAndAttributes `min:"1" type:"map" required:"true"` + + // Determines the level of detail about provisioned throughput consumption that + // is returned in the response: + // + // INDEXES - The response includes the aggregate ConsumedCapacity for the + // operation, together with ConsumedCapacity for each table and secondary index + // that was accessed. + // + // Note that some operations, such as GetItem and BatchGetItem, do not access + // any indexes at all. In these cases, specifying INDEXES will only return ConsumedCapacity + // information for table(s). + // + // TOTAL - The response includes only the aggregate ConsumedCapacity for + // the operation. + // + // NONE - No ConsumedCapacity details are included in the response. + ReturnConsumedCapacity *string `type:"string" enum:"ReturnConsumedCapacity"` +} + +// String returns the string representation +func (s BatchGetItemInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchGetItemInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *BatchGetItemInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "BatchGetItemInput"} + if s.RequestItems == nil { + invalidParams.Add(request.NewErrParamRequired("RequestItems")) + } + if s.RequestItems != nil && len(s.RequestItems) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RequestItems", 1)) + } + if s.RequestItems != nil { + for i, v := range s.RequestItems { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "RequestItems", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the output of a BatchGetItem operation. +type BatchGetItemOutput struct { + _ struct{} `type:"structure"` + + // The read capacity units consumed by the operation. + // + // Each element consists of: + // + // TableName - The table that consumed the provisioned throughput. + // + // CapacityUnits - The total number of capacity units consumed. + ConsumedCapacity []*ConsumedCapacity `type:"list"` + + // A map of table name to a list of items. Each object in Responses consists + // of a table name, along with a map of attribute data consisting of the data + // type and attribute value. + Responses map[string][]map[string]*AttributeValue `type:"map"` + + // A map of tables and their respective keys that were not processed with the + // current response. The UnprocessedKeys value is in the same form as RequestItems, + // so the value can be provided directly to a subsequent BatchGetItem operation. + // For more information, see RequestItems in the Request Parameters section. + // + // Each element consists of: + // + // Keys - An array of primary key attribute values that define specific + // items in the table. + // + // AttributesToGet - One or more attributes to be retrieved from the table + // or index. By default, all attributes are returned. If a requested attribute + // is not found, it does not appear in the result. + // + // ConsistentRead - The consistency of a read operation. If set to true, + // then a strongly consistent read is used; otherwise, an eventually consistent + // read is used. + // + // If there are no unprocessed keys remaining, the response contains an empty + // UnprocessedKeys map. + UnprocessedKeys map[string]*KeysAndAttributes `min:"1" type:"map"` +} + +// String returns the string representation +func (s BatchGetItemOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchGetItemOutput) GoString() string { + return s.String() +} + +// Represents the input of a BatchWriteItem operation. +type BatchWriteItemInput struct { + _ struct{} `type:"structure"` + + // A map of one or more table names and, for each table, a list of operations + // to be performed (DeleteRequest or PutRequest). Each element in the map consists + // of the following: + // + // DeleteRequest - Perform a DeleteItem operation on the specified item. + // The item to be deleted is identified by a Key subelement: + // + // Key - A map of primary key attribute values that uniquely identify the + // ! item. Each entry in this map consists of an attribute name and an attribute + // value. For each primary key, you must provide all of the key attributes. + // For example, with a simple primary key, you only need to provide a value + // for the partition key. For a composite primary key, you must provide values + // for both the partition key and the sort key. + // + // PutRequest - Perform a PutItem operation on the specified item. The + // item to be put is identified by an Item subelement: + // + // Item - A map of attributes and their values. Each entry in this map consists + // of an attribute name and an attribute value. Attribute values must not be + // null; string and binary type attributes must have lengths greater than zero; + // and set type attributes must not be empty. Requests that contain empty values + // will be rejected with a ValidationException exception. + // + // If you specify any attributes that are part of an index key, then the data + // types for those attributes must match those of the schema in the table's + // attribute definition. + RequestItems map[string][]*WriteRequest `min:"1" type:"map" required:"true"` + + // Determines the level of detail about provisioned throughput consumption that + // is returned in the response: + // + // INDEXES - The response includes the aggregate ConsumedCapacity for the + // operation, together with ConsumedCapacity for each table and secondary index + // that was accessed. + // + // Note that some operations, such as GetItem and BatchGetItem, do not access + // any indexes at all. In these cases, specifying INDEXES will only return ConsumedCapacity + // information for table(s). + // + // TOTAL - The response includes only the aggregate ConsumedCapacity for + // the operation. + // + // NONE - No ConsumedCapacity details are included in the response. + ReturnConsumedCapacity *string `type:"string" enum:"ReturnConsumedCapacity"` + + // Determines whether item collection metrics are returned. If set to SIZE, + // the response includes statistics about item collections, if any, that were + // modified during the operation are returned in the response. If set to NONE + // (the default), no statistics are returned. + ReturnItemCollectionMetrics *string `type:"string" enum:"ReturnItemCollectionMetrics"` +} + +// String returns the string representation +func (s BatchWriteItemInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchWriteItemInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *BatchWriteItemInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "BatchWriteItemInput"} + if s.RequestItems == nil { + invalidParams.Add(request.NewErrParamRequired("RequestItems")) + } + if s.RequestItems != nil && len(s.RequestItems) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RequestItems", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the output of a BatchWriteItem operation. +type BatchWriteItemOutput struct { + _ struct{} `type:"structure"` + + // The capacity units consumed by the operation. + // + // Each element consists of: + // + // TableName - The table that consumed the provisioned throughput. + // + // CapacityUnits - The total number of capacity units consumed. + ConsumedCapacity []*ConsumedCapacity `type:"list"` + + // A list of tables that were processed by BatchWriteItem and, for each table, + // information about any item collections that were affected by individual DeleteItem + // or PutItem operations. + // + // Each entry consists of the following subelements: + // + // ItemCollectionKey - The partition key value of the item collection. This + // is the same as the partition key value of the item. + // + // SizeEstimateRange - An estimate of item collection size, expressed in + // GB. This is a two-element array containing a lower bound and an upper bound + // for the estimate. The estimate includes the size of all the items in the + // table, plus the size of all attributes projected into all of the local secondary + // indexes on the table. Use this estimate to measure whether a local secondary + // index is approaching its size limit. + // + // The estimate is subject to change over time; therefore, do not rely on the + // precision or accuracy of the estimate. + ItemCollectionMetrics map[string][]*ItemCollectionMetrics `type:"map"` + + // A map of tables and requests against those tables that were not processed. + // The UnprocessedItems value is in the same form as RequestItems, so you can + // provide this value directly to a subsequent BatchGetItem operation. For more + // information, see RequestItems in the Request Parameters section. + // + // Each UnprocessedItems entry consists of a table name and, for that table, + // a list of operations to perform (DeleteRequest or PutRequest). + // + // DeleteRequest - Perform a DeleteItem operation on the specified item. + // The item to be deleted is identified by a Key subelement: + // + // Key - A map of primary key attribute values that uniquely identify the + // item. Each entry in this map consists of an attribute name and an attribute + // value. + // + // PutRequest - Perform a PutItem operation on the specified item. The + // item to be put is identified by an Item subelement: + // + // Item - A map of attributes and their values. Each entry in this map consists + // of an attribute name and an attribute value. Attribute values must not be + // null; string and binary type attributes must have lengths greater than zero; + // and set type attributes must not be empty. Requests that contain empty values + // will be rejected with a ValidationException exception. + // + // If you specify any attributes that are part of an index key, then the data + // types for those attributes must match those of the schema in the table's + // attribute definition. + // + // If there are no unprocessed items remaining, the response contains an + // empty UnprocessedItems map. + UnprocessedItems map[string][]*WriteRequest `min:"1" type:"map"` +} + +// String returns the string representation +func (s BatchWriteItemOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchWriteItemOutput) GoString() string { + return s.String() +} + +// Represents the amount of provisioned throughput capacity consumed on a table +// or an index. +type Capacity struct { + _ struct{} `type:"structure"` + + // The total number of capacity units consumed on a table or an index. + CapacityUnits *float64 `type:"double"` +} + +// String returns the string representation +func (s Capacity) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Capacity) GoString() string { + return s.String() +} + +// Represents the selection criteria for a Query or Scan operation: +// +// For a Query operation, Condition is used for specifying the KeyConditions +// to use when querying a table or an index. For KeyConditions, only the following +// comparison operators are supported: +// +// EQ | LE | LT | GE | GT | BEGINS_WITH | BETWEEN +// +// Condition is also used in a QueryFilter, which evaluates the query results +// and returns only the desired values. +// +// For a Scan operation, Condition is used in a ScanFilter, which evaluates +// the scan results and returns only the desired values. +type Condition struct { + _ struct{} `type:"structure"` + + // One or more values to evaluate against the supplied attribute. The number + // of values in the list depends on the ComparisonOperator being used. + // + // For type Number, value comparisons are numeric. + // + // String value comparisons for greater than, equals, or less than are based + // on ASCII character code values. For example, a is greater than A, and a is + // greater than B. For a list of code values, see http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters + // (http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters). + // + // For Binary, DynamoDB treats each byte of the binary data as unsigned when + // it compares binary values. + AttributeValueList []*AttributeValue `type:"list"` + + // A comparator for evaluating attributes. For example, equals, greater than, + // less than, etc. + // + // The following comparison operators are available: + // + // EQ | NE | LE | LT | GE | GT | NOT_NULL | NULL | CONTAINS | NOT_CONTAINS + // | BEGINS_WITH | IN | BETWEEN + // + // The following are descriptions of each comparison operator. + // + // EQ : Equal. EQ is supported for all datatypes, including lists and maps. + // + // AttributeValueList can contain only one AttributeValue element of type + // String, Number, Binary, String Set, Number Set, or Binary Set. If an item + // contains an AttributeValue element of a different type than the one provided + // in the request, the value does not match. For example, {"S":"6"} does not + // equal {"N":"6"}. Also, {"N":"6"} does not equal {"NS":["6", "2", "1"]}. + // + // NE : Not equal. NE is supported for all datatypes, including lists and + // maps. + // + // AttributeValueList can contain only one AttributeValue of type String, + // Number, Binary, String Set, Number Set, or Binary Set. If an item contains + // an AttributeValue of a different type than the one provided in the request, + // the value does not match. For example, {"S":"6"} does not equal {"N":"6"}. + // Also, {"N":"6"} does not equal {"NS":["6", "2", "1"]}. + // + // LE : Less than or equal. + // + // AttributeValueList can contain only one AttributeValue element of type + // String, Number, or Binary (not a set type). If an item contains an AttributeValue + // element of a different type than the one provided in the request, the value + // does not match. For example, {"S":"6"} does not equal {"N":"6"}. Also, {"N":"6"} + // does not compare to {"NS":["6", "2", "1"]}. + // + // LT : Less than. + // + // AttributeValueList can contain only one AttributeValue of type String, + // Number, or Binary (not a set type). If an item contains an AttributeValue + // element of a different type than the one provided in the request, the value + // does not match. For example, {"S":"6"} does not equal {"N":"6"}. Also, {"N":"6"} + // does not compare to {"NS":["6", "2", "1"]}. + // + // GE : Greater than or equal. + // + // AttributeValueList can contain only one AttributeValue element of type + // String, Number, or Binary (not a set type). If an item contains an AttributeValue + // element of a different type than the one provided in the request, the value + // does not match. For example, {"S":"6"} does not equal {"N":"6"}. Also, {"N":"6"} + // does not compare to {"NS":["6", "2", "1"]}. + // + // GT : Greater than. + // + // AttributeValueList can contain only one AttributeValue element of type + // String, Number, or Binary (not a set type). If an item contains an AttributeValue + // element of a different type than the one provided in the request, the value + // does not match. For example, {"S":"6"} does not equal {"N":"6"}. Also, {"N":"6"} + // does not compare to {"NS":["6", "2", "1"]}. + // + // NOT_NULL : The attribute exists. NOT_NULL is supported for all datatypes, + // including lists and maps. + // + // This operator tests for the existence of an attribute, not its data type. + // If the data type of attribute "a" is null, and you evaluate it using NOT_NULL, + // the result is a Boolean true. This result is because the attribute "a" exists; + // its data type is not relevant to the NOT_NULL comparison operator. + // + // NULL : The attribute does not exist. NULL is supported for all datatypes, + // including lists and maps. + // + // This operator tests for the nonexistence of an attribute, not its data + // type. If the data type of attribute "a" is null, and you evaluate it using + // NULL, the result is a Boolean false. This is because the attribute "a" exists; + // its data type is not relevant to the NULL comparison operator. + // + // CONTAINS : Checks for a subsequence, or value in a set. + // + // AttributeValueList can contain only one AttributeValue element of type + // String, Number, or Binary (not a set type). If the target attribute of the + // comparison is of type String, then the operator checks for a substring match. + // If the target attribute of the comparison is of type Binary, then the operator + // looks for a subsequence of the target that matches the input. If the target + // attribute of the comparison is a set ("SS", "NS", or "BS"), then the operator + // evaluates to true if it finds an exact match with any member of the set. + // + // CONTAINS is supported for lists: When evaluating "a CONTAINS b", "a" can + // be a list; however, "b" cannot be a set, a map, or a list. + // + // NOT_CONTAINS : Checks for absence of a subsequence, or absence of a value + // in a set. + // + // AttributeValueList can contain only one AttributeValue element of type + // String, Number, or Binary (not a set type). If the target attribute of the + // comparison is a String, then the operator checks for the absence of a substring + // match. If the target attribute of the comparison is Binary, then the operator + // checks for the absence of a subsequence of the target that matches the input. + // If the target attribute of the comparison is a set ("SS", "NS", or "BS"), + // then the operator evaluates to true if it does not find an exact match with + // any member of the set. + // + // NOT_CONTAINS is supported for lists: When evaluating "a NOT CONTAINS b", + // "a" can be a list; however, "b" cannot be a set, a map, or a list. + // + // BEGINS_WITH : Checks for a prefix. + // + // AttributeValueList can contain only one AttributeValue of type String or + // Binary (not a Number or a set type). The target attribute of the comparison + // must be of type String or Binary (not a Number or a set type). + // + // IN : Checks for matching elements within two sets. + // + // AttributeValueList can contain one or more AttributeValue elements of type + // String, Number, or Binary (not a set type). These attributes are compared + // against an existing set type attribute of an item. If any elements of the + // input set are present in the item attribute, the expression evaluates to + // true. + // + // BETWEEN : Greater than or equal to the first value, and less than or + // equal to the second value. + // + // AttributeValueList must contain two AttributeValue elements of the same + // type, either String, Number, or Binary (not a set type). A target attribute + // matches if the target value is greater than, or equal to, the first element + // and less than, or equal to, the second element. If an item contains an AttributeValue + // element of a different type than the one provided in the request, the value + // does not match. For example, {"S":"6"} does not compare to {"N":"6"}. Also, + // {"N":"6"} does not compare to {"NS":["6", "2", "1"]} + // + // For usage examples of AttributeValueList and ComparisonOperator, see Legacy + // Conditional Parameters (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.html) + // in the Amazon DynamoDB Developer Guide. + ComparisonOperator *string `type:"string" required:"true" enum:"ComparisonOperator"` +} + +// String returns the string representation +func (s Condition) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Condition) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Condition) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Condition"} + if s.ComparisonOperator == nil { + invalidParams.Add(request.NewErrParamRequired("ComparisonOperator")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The capacity units consumed by an operation. The data returned includes the +// total provisioned throughput consumed, along with statistics for the table +// and any indexes involved in the operation. ConsumedCapacity is only returned +// if the request asked for it. For more information, see Provisioned Throughput +// (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ProvisionedThroughputIntro.html) +// in the Amazon DynamoDB Developer Guide. +type ConsumedCapacity struct { + _ struct{} `type:"structure"` + + // The total number of capacity units consumed by the operation. + CapacityUnits *float64 `type:"double"` + + // The amount of throughput consumed on each global index affected by the operation. + GlobalSecondaryIndexes map[string]*Capacity `type:"map"` + + // The amount of throughput consumed on each local index affected by the operation. + LocalSecondaryIndexes map[string]*Capacity `type:"map"` + + // The amount of throughput consumed on the table affected by the operation. + Table *Capacity `type:"structure"` + + // The name of the table that was affected by the operation. + TableName *string `min:"3" type:"string"` +} + +// String returns the string representation +func (s ConsumedCapacity) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ConsumedCapacity) GoString() string { + return s.String() +} + +// Represents a new global secondary index to be added to an existing table. +type CreateGlobalSecondaryIndexAction struct { + _ struct{} `type:"structure"` + + // The name of the global secondary index to be created. + IndexName *string `min:"3" type:"string" required:"true"` + + // The key schema for the global secondary index. + KeySchema []*KeySchemaElement `min:"1" type:"list" required:"true"` + + // Represents attributes that are copied (projected) from the table into an + // index. These are in addition to the primary key attributes and index key + // attributes, which are automatically projected. + Projection *Projection `type:"structure" required:"true"` + + // Represents the provisioned throughput settings for a specified table or index. + // The settings can be modified using the UpdateTable operation. + // + // For current minimum and maximum provisioned throughput values, see Limits + // (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html) + // in the Amazon DynamoDB Developer Guide. + ProvisionedThroughput *ProvisionedThroughput `type:"structure" required:"true"` +} + +// String returns the string representation +func (s CreateGlobalSecondaryIndexAction) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateGlobalSecondaryIndexAction) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateGlobalSecondaryIndexAction) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateGlobalSecondaryIndexAction"} + if s.IndexName == nil { + invalidParams.Add(request.NewErrParamRequired("IndexName")) + } + if s.IndexName != nil && len(*s.IndexName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("IndexName", 3)) + } + if s.KeySchema == nil { + invalidParams.Add(request.NewErrParamRequired("KeySchema")) + } + if s.KeySchema != nil && len(s.KeySchema) < 1 { + invalidParams.Add(request.NewErrParamMinLen("KeySchema", 1)) + } + if s.Projection == nil { + invalidParams.Add(request.NewErrParamRequired("Projection")) + } + if s.ProvisionedThroughput == nil { + invalidParams.Add(request.NewErrParamRequired("ProvisionedThroughput")) + } + if s.KeySchema != nil { + for i, v := range s.KeySchema { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "KeySchema", i), err.(request.ErrInvalidParams)) + } + } + } + if s.Projection != nil { + if err := s.Projection.Validate(); err != nil { + invalidParams.AddNested("Projection", err.(request.ErrInvalidParams)) + } + } + if s.ProvisionedThroughput != nil { + if err := s.ProvisionedThroughput.Validate(); err != nil { + invalidParams.AddNested("ProvisionedThroughput", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the input of a CreateTable operation. +type CreateTableInput struct { + _ struct{} `type:"structure"` + + // An array of attributes that describe the key schema for the table and indexes. + AttributeDefinitions []*AttributeDefinition `type:"list" required:"true"` + + // One or more global secondary indexes (the maximum is five) to be created + // on the table. Each global secondary index in the array includes the following: + // + // IndexName - The name of the global secondary index. Must be unique only + // for this table. + // + // KeySchema - Specifies the key schema for the global secondary index. + // + // Projection - Specifies attributes that are copied (projected) from the + // table into the index. These are in addition to the primary key attributes + // and index key attributes, which are automatically projected. Each attribute + // specification is composed of: + // + // ProjectionType - One of the following: + // + // KEYS_ONLY - Only the index and primary keys are projected into the index. + // + // INCLUDE - Only the specified table attributes are projected into the + // index. The list of projected attributes are in NonKeyAttributes. + // + // ALL - All of the table attributes are projected into the index. + // + // NonKeyAttributes - A list of one or more non-key attribute names that + // are projected into the secondary index. The total count of attributes provided + // in NonKeyAttributes, summed across all of the secondary indexes, must not + // exceed 20. If you project the same attribute into two different indexes, + // this counts as two distinct attributes when determining the total. + // + // ProvisionedThroughput - The provisioned throughput settings for the + // global secondary index, consisting of read and write capacity units. + GlobalSecondaryIndexes []*GlobalSecondaryIndex `type:"list"` + + // Specifies the attributes that make up the primary key for a table or an index. + // The attributes in KeySchema must also be defined in the AttributeDefinitions + // array. For more information, see Data Model (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/DataModel.html) + // in the Amazon DynamoDB Developer Guide. + // + // Each KeySchemaElement in the array is composed of: + // + // AttributeName - The name of this key attribute. + // + // KeyType - The role that the key attribute will assume: + // + // HASH - partition key + // + // RANGE - sort key + // + // The partition key of an item is also known as its hash attribute. The + // term "hash attribute" derives from DynamoDB' usage of an internal hash function + // to evenly distribute data items across partitions, based on their partition + // key values. + // + // The sort key of an item is also known as its range attribute. The term "range + // attribute" derives from the way DynamoDB stores items with the same partition + // key physically close together, in sorted order by the sort key value. + // + // For a simple primary key (partition key), you must provide exactly one + // element with a KeyType of HASH. + // + // For a composite primary key (partition key and sort key), you must provide + // exactly two elements, in this order: The first element must have a KeyType + // of HASH, and the second element must have a KeyType of RANGE. + // + // For more information, see Specifying the Primary Key (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/WorkingWithTables.html#WorkingWithTables.primary.key) + // in the Amazon DynamoDB Developer Guide. + KeySchema []*KeySchemaElement `min:"1" type:"list" required:"true"` + + // One or more local secondary indexes (the maximum is five) to be created on + // the table. Each index is scoped to a given partition key value. There is + // a 10 GB size limit per partition key value; otherwise, the size of a local + // secondary index is unconstrained. + // + // Each local secondary index in the array includes the following: + // + // IndexName - The name of the local secondary index. Must be unique only + // for this table. + // + // KeySchema - Specifies the key schema for the local secondary index. + // The key schema must begin with the same partition key as the table. + // + // Projection - Specifies attributes that are copied (projected) from the + // table into the index. These are in addition to the primary key attributes + // and index key attributes, which are automatically projected. Each attribute + // specification is composed of: + // + // ProjectionType - One of the following: + // + // KEYS_ONLY - Only the index and primary keys are projected into the index. + // + // INCLUDE - Only the specified table attributes are projected into the + // index. The list of projected attributes are in NonKeyAttributes. + // + // ALL - All of the table attributes are projected into the index. + // + // NonKeyAttributes - A list of one or more non-key attribute names that + // are projected into the secondary index. The total count of attributes provided + // in NonKeyAttributes, summed across all of the secondary indexes, must not + // exceed 20. If you project the same attribute into two different indexes, + // this counts as two distinct attributes when determining the total. + LocalSecondaryIndexes []*LocalSecondaryIndex `type:"list"` + + // Represents the provisioned throughput settings for a specified table or index. + // The settings can be modified using the UpdateTable operation. + // + // For current minimum and maximum provisioned throughput values, see Limits + // (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html) + // in the Amazon DynamoDB Developer Guide. + ProvisionedThroughput *ProvisionedThroughput `type:"structure" required:"true"` + + // The settings for DynamoDB Streams on the table. These settings consist of: + // + // StreamEnabled - Indicates whether Streams is to be enabled (true) or + // disabled (false). + // + // StreamViewType - When an item in the table is modified, StreamViewType + // determines what information is written to the table's stream. Valid values + // for StreamViewType are: + // + // KEYS_ONLY - Only the key attributes of the modified item are written + // to the stream. + // + // NEW_IMAGE - The entire item, as it appears after it was modified, is + // written to the stream. + // + // OLD_IMAGE - The entire item, as it appeared before it was modified, is + // written to the stream. + // + // NEW_AND_OLD_IMAGES - Both the new and the old item images of the item + // are written to the stream. + StreamSpecification *StreamSpecification `type:"structure"` + + // The name of the table to create. + TableName *string `min:"3" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateTableInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateTableInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateTableInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateTableInput"} + if s.AttributeDefinitions == nil { + invalidParams.Add(request.NewErrParamRequired("AttributeDefinitions")) + } + if s.KeySchema == nil { + invalidParams.Add(request.NewErrParamRequired("KeySchema")) + } + if s.KeySchema != nil && len(s.KeySchema) < 1 { + invalidParams.Add(request.NewErrParamMinLen("KeySchema", 1)) + } + if s.ProvisionedThroughput == nil { + invalidParams.Add(request.NewErrParamRequired("ProvisionedThroughput")) + } + if s.TableName == nil { + invalidParams.Add(request.NewErrParamRequired("TableName")) + } + if s.TableName != nil && len(*s.TableName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("TableName", 3)) + } + if s.AttributeDefinitions != nil { + for i, v := range s.AttributeDefinitions { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "AttributeDefinitions", i), err.(request.ErrInvalidParams)) + } + } + } + if s.GlobalSecondaryIndexes != nil { + for i, v := range s.GlobalSecondaryIndexes { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "GlobalSecondaryIndexes", i), err.(request.ErrInvalidParams)) + } + } + } + if s.KeySchema != nil { + for i, v := range s.KeySchema { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "KeySchema", i), err.(request.ErrInvalidParams)) + } + } + } + if s.LocalSecondaryIndexes != nil { + for i, v := range s.LocalSecondaryIndexes { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "LocalSecondaryIndexes", i), err.(request.ErrInvalidParams)) + } + } + } + if s.ProvisionedThroughput != nil { + if err := s.ProvisionedThroughput.Validate(); err != nil { + invalidParams.AddNested("ProvisionedThroughput", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the output of a CreateTable operation. +type CreateTableOutput struct { + _ struct{} `type:"structure"` + + // Represents the properties of a table. + TableDescription *TableDescription `type:"structure"` +} + +// String returns the string representation +func (s CreateTableOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateTableOutput) GoString() string { + return s.String() +} + +// Represents a global secondary index to be deleted from an existing table. +type DeleteGlobalSecondaryIndexAction struct { + _ struct{} `type:"structure"` + + // The name of the global secondary index to be deleted. + IndexName *string `min:"3" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteGlobalSecondaryIndexAction) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteGlobalSecondaryIndexAction) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteGlobalSecondaryIndexAction) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteGlobalSecondaryIndexAction"} + if s.IndexName == nil { + invalidParams.Add(request.NewErrParamRequired("IndexName")) + } + if s.IndexName != nil && len(*s.IndexName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("IndexName", 3)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the input of a DeleteItem operation. +type DeleteItemInput struct { + _ struct{} `type:"structure"` + + // A condition that must be satisfied in order for a conditional DeleteItem + // to succeed. + // + // An expression can contain any of the following: + // + // Functions: attribute_exists | attribute_not_exists | attribute_type | + // contains | begins_with | size + // + // These function names are case-sensitive. + // + // Comparison operators: = | <> | < | > | <= | + // >= | BETWEEN | IN + // + // Logical operators: AND | OR | NOT + // + // For more information on condition expressions, see Specifying Conditions + // (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.SpecifyingConditions.html) + // in the Amazon DynamoDB Developer Guide. + // + // ConditionExpression replaces the legacy ConditionalOperator and Expected + // parameters. + ConditionExpression *string `type:"string"` + + // This is a legacy parameter, for backward compatibility. New applications + // should use ConditionExpression instead. Do not combine legacy parameters + // and expression parameters in a single API call; otherwise, DynamoDB will + // return a ValidationException exception. + // + // A logical operator to apply to the conditions in the Expected map: + // + // AND - If all of the conditions evaluate to true, then the entire map + // evaluates to true. + // + // OR - If at least one of the conditions evaluate to true, then the entire + // map evaluates to true. + // + // If you omit ConditionalOperator, then AND is the default. + // + // The operation will succeed only if the entire map evaluates to true. + // + // This parameter does not support attributes of type List or Map. + ConditionalOperator *string `type:"string" enum:"ConditionalOperator"` + + // This is a legacy parameter, for backward compatibility. New applications + // should use ConditionExpression instead. Do not combine legacy parameters + // and expression parameters in a single API call; otherwise, DynamoDB will + // return a ValidationException exception. + // + // A map of attribute/condition pairs. Expected provides a conditional block + // for the DeleteItem operation. + // + // Each element of Expected consists of an attribute name, a comparison operator, + // and one or more values. DynamoDB compares the attribute with the value(s) + // you supplied, using the comparison operator. For each Expected element, the + // result of the evaluation is either true or false. + // + // If you specify more than one element in the Expected map, then by default + // all of the conditions must evaluate to true. In other words, the conditions + // are ANDed together. (You can use the ConditionalOperator parameter to OR + // the conditions instead. If you do this, then at least one of the conditions + // must evaluate to true, rather than all of them.) + // + // If the Expected map evaluates to true, then the conditional operation succeeds; + // otherwise, it fails. + // + // Expected contains the following: + // + // AttributeValueList - One or more values to evaluate against the supplied + // attribute. The number of values in the list depends on the ComparisonOperator + // being used. + // + // For type Number, value comparisons are numeric. + // + // String value comparisons for greater than, equals, or less than are based + // on ASCII character code values. For example, a is greater than A, and a is + // greater than B. For a list of code values, see http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters + // (http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters). + // + // For type Binary, DynamoDB treats each byte of the binary data as unsigned + // when it compares binary values. + // + // ComparisonOperator - A comparator for evaluating attributes in the AttributeValueList. + // When performing the comparison, DynamoDB uses strongly consistent reads. + // + // The following comparison operators are available: + // + // EQ | NE | LE | LT | GE | GT | NOT_NULL | NULL | CONTAINS | NOT_CONTAINS + // | BEGINS_WITH | IN | BETWEEN + // + // The following are descriptions of each comparison operator. + // + // EQ : Equal. EQ is supported for all datatypes, including lists and maps. + // + // AttributeValueList can contain only one AttributeValue element of type + // String, Number, Binary, String Set, Number Set, or Binary Set. If an item + // contains an AttributeValue element of a different type than the one provided + // in the request, the value does not match. For example, {"S":"6"} does not + // equal {"N":"6"}. Also, {"N":"6"} does not equal {"NS":["6", "2", "1"]}. + // + // NE : Not equal. NE is supported for all datatypes, including lists and + // maps. + // + // AttributeValueList can contain only one AttributeValue of type String, + // Number, Binary, String Set, Number Set, or Binary Set. If an item contains + // an AttributeValue of a different type than the one provided in the request, + // the value does not match. For example, {"S":"6"} does not equal {"N":"6"}. + // Also, {"N":"6"} does not equal {"NS":["6", "2", "1"]}. + // + // LE : Less than or equal. + // + // AttributeValueList can contain only one AttributeValue element of type + // String, Number, or Binary (not a set type). If an item contains an AttributeValue + // element of a different type than the one provided in the request, the value + // does not match. For example, {"S":"6"} does not equal {"N":"6"}. Also, {"N":"6"} + // does not compare to {"NS":["6", "2", "1"]}. + // + // LT : Less than. + // + // AttributeValueList can contain only one AttributeValue of type String, + // Number, or Binary (not a set type). If an item contains an AttributeValue + // element of a different type than the one provided in the request, the value + // does not match. For example, {"S":"6"} does not equal {"N":"6"}. Also, {"N":"6"} + // does not compare to {"NS":["6", "2", "1"]}. + // + // GE : Greater than or equal. + // + // AttributeValueList can contain only one AttributeValue element of type + // String, Number, or Binary (not a set type). If an item contains an AttributeValue + // element of a different type than the one provided in the request, the value + // does not match. For example, {"S":"6"} does not equal {"N":"6"}. Also, {"N":"6"} + // does not compare to {"NS":["6", "2", "1"]}. + // + // GT : Greater than. + // + // AttributeValueList can contain only one AttributeValue element of type + // String, Number, or Binary (not a set type). If an item contains an AttributeValue + // element of a different type than the one provided in the request, the value + // does not match. For example, {"S":"6"} does not equal {"N":"6"}. Also, {"N":"6"} + // does not compare to {"NS":["6", "2", "1"]}. + // + // NOT_NULL : The attribute exists. NOT_NULL is supported for all datatypes, + // including lists and maps. + // + // This operator tests for the existence of an attribute, not its data type. + // If the data type of attribute "a" is null, and you evaluate it using NOT_NULL, + // the result is a Boolean true. This result is because the attribute "a" exists; + // its data type is not relevant to the NOT_NULL comparison operator. + // + // NULL : The attribute does not exist. NULL is supported for all datatypes, + // including lists and maps. + // + // This operator tests for the nonexistence of an attribute, not its data + // type. If the data type of attribute "a" is null, and you evaluate it using + // NULL, the result is a Boolean false. This is because the attribute "a" exists; + // its data type is not relevant to the NULL comparison operator. + // + // CONTAINS : Checks for a subsequence, or value in a set. + // + // AttributeValueList can contain only one AttributeValue element of type + // String, Number, or Binary (not a set type). If the target attribute of the + // comparison is of type String, then the operator checks for a substring match. + // If the target attribute of the comparison is of type Binary, then the operator + // looks for a subsequence of the target that matches the input. If the target + // attribute of the comparison is a set ("SS", "NS", or "BS"), then the operator + // evaluates to true if it finds an exact match with any member of the set. + // + // CONTAINS is supported for lists: When evaluating "a CONTAINS b", "a" can + // be a list; however, "b" cannot be a set, a map, or a list. + // + // NOT_CONTAINS : Checks for absence of a subsequence, or absence of a value + // in a set. + // + // AttributeValueList can contain only one AttributeValue element of type + // String, Number, or Binary (not a set type). If the target attribute of the + // comparison is a String, then the operator checks for the absence of a substring + // match. If the target attribute of the comparison is Binary, then the operator + // checks for the absence of a subsequence of the target that matches the input. + // If the target attribute of the comparison is a set ("SS", "NS", or "BS"), + // then the operator evaluates to true if it does not find an exact match with + // any member of the set. + // + // NOT_CONTAINS is supported for lists: When evaluating "a NOT CONTAINS b", + // "a" can be a list; however, "b" cannot be a set, a map, or a list. + // + // BEGINS_WITH : Checks for a prefix. + // + // AttributeValueList can contain only one AttributeValue of type String or + // Binary (not a Number or a set type). The target attribute of the comparison + // must be of type String or Binary (not a Number or a set type). + // + // IN : Checks for matching elements within two sets. + // + // AttributeValueList can contain one or more AttributeValue elements of type + // String, Number, or Binary (not a set type). These attributes are compared + // against an existing set type attribute of an item. If any elements of the + // input set are present in the item attribute, the expression evaluates to + // true. + // + // BETWEEN : Greater than or equal to the first value, and less than or + // equal to the second value. + // + // AttributeValueList must contain two AttributeValue elements of the same + // type, either String, Number, or Binary (not a set type). A target attribute + // matches if the target value is greater than, or equal to, the first element + // and less than, or equal to, the second element. If an item contains an AttributeValue + // element of a different type than the one provided in the request, the value + // does not match. For example, {"S":"6"} does not compare to {"N":"6"}. Also, + // {"N":"6"} does not compare to {"NS":["6", "2", "1"]} + // + // For usage examples of AttributeValueList and ComparisonOperator, see + // Legacy Conditional Parameters (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.html) + // in the Amazon DynamoDB Developer Guide. + // + // For backward compatibility with previous DynamoDB releases, the following + // parameters can be used instead of AttributeValueList and ComparisonOperator: + // + // Value - A value for DynamoDB to compare with an attribute. + // + // Exists - A Boolean value that causes DynamoDB to evaluate the value before + // attempting the conditional operation: + // + // If Exists is true, DynamoDB will check to see if that attribute value + // already exists in the table. If it is found, then the condition evaluates + // to true; otherwise the condition evaluate to false. + // + // If Exists is false, DynamoDB assumes that the attribute value does not + // exist in the table. If in fact the value does not exist, then the assumption + // is valid and the condition evaluates to true. If the value is found, despite + // the assumption that it does not exist, the condition evaluates to false. + // + // Note that the default value for Exists is true. + // + // The Value and Exists parameters are incompatible with AttributeValueList + // and ComparisonOperator. Note that if you use both sets of parameters at once, + // DynamoDB will return a ValidationException exception. + // + // This parameter does not support attributes of type List or Map. + Expected map[string]*ExpectedAttributeValue `type:"map"` + + // One or more substitution tokens for attribute names in an expression. The + // following are some use cases for using ExpressionAttributeNames: + // + // To access an attribute whose name conflicts with a DynamoDB reserved word. + // + // To create a placeholder for repeating occurrences of an attribute name + // in an expression. + // + // To prevent special characters in an attribute name from being misinterpreted + // in an expression. + // + // Use the # character in an expression to dereference an attribute name. + // For example, consider the following attribute name: + // + // Percentile + // + // The name of this attribute conflicts with a reserved word, so it cannot + // be used directly in an expression. (For the complete list of reserved words, + // see Reserved Words (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html) + // in the Amazon DynamoDB Developer Guide). To work around this, you could specify + // the following for ExpressionAttributeNames: + // + // {"#P":"Percentile"} + // + // You could then use this substitution in an expression, as in this example: + // + // #P = :val + // + // Tokens that begin with the : character are expression attribute values, + // which are placeholders for the actual value at runtime. + // + // For more information on expression attribute names, see Accessing Item + // Attributes (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html) + // in the Amazon DynamoDB Developer Guide. + ExpressionAttributeNames map[string]*string `type:"map"` + + // One or more values that can be substituted in an expression. + // + // Use the : (colon) character in an expression to dereference an attribute + // value. For example, suppose that you wanted to check whether the value of + // the ProductStatus attribute was one of the following: + // + // Available | Backordered | Discontinued + // + // You would first need to specify ExpressionAttributeValues as follows: + // + // { ":avail":{"S":"Available"}, ":back":{"S":"Backordered"}, ":disc":{"S":"Discontinued"} + // } + // + // You could then use these values in an expression, such as this: + // + // ProductStatus IN (:avail, :back, :disc) + // + // For more information on expression attribute values, see Specifying Conditions + // (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.SpecifyingConditions.html) + // in the Amazon DynamoDB Developer Guide. + ExpressionAttributeValues map[string]*AttributeValue `type:"map"` + + // A map of attribute names to AttributeValue objects, representing the primary + // key of the item to delete. + // + // For the primary key, you must provide all of the attributes. For example, + // with a simple primary key, you only need to provide a value for the partition + // key. For a composite primary key, you must provide values for both the partition + // key and the sort key. + Key map[string]*AttributeValue `type:"map" required:"true"` + + // Determines the level of detail about provisioned throughput consumption that + // is returned in the response: + // + // INDEXES - The response includes the aggregate ConsumedCapacity for the + // operation, together with ConsumedCapacity for each table and secondary index + // that was accessed. + // + // Note that some operations, such as GetItem and BatchGetItem, do not access + // any indexes at all. In these cases, specifying INDEXES will only return ConsumedCapacity + // information for table(s). + // + // TOTAL - The response includes only the aggregate ConsumedCapacity for + // the operation. + // + // NONE - No ConsumedCapacity details are included in the response. + ReturnConsumedCapacity *string `type:"string" enum:"ReturnConsumedCapacity"` + + // Determines whether item collection metrics are returned. If set to SIZE, + // the response includes statistics about item collections, if any, that were + // modified during the operation are returned in the response. If set to NONE + // (the default), no statistics are returned. + ReturnItemCollectionMetrics *string `type:"string" enum:"ReturnItemCollectionMetrics"` + + // Use ReturnValues if you want to get the item attributes as they appeared + // before they were deleted. For DeleteItem, the valid values are: + // + // NONE - If ReturnValues is not specified, or if its value is NONE, then + // nothing is returned. (This setting is the default for ReturnValues.) + // + // ALL_OLD - The content of the old item is returned. + // + // The ReturnValues parameter is used by several DynamoDB operations; however, + // DeleteItem does not recognize any values other than NONE or ALL_OLD. + ReturnValues *string `type:"string" enum:"ReturnValue"` + + // The name of the table from which to delete the item. + TableName *string `min:"3" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteItemInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteItemInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteItemInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteItemInput"} + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.TableName == nil { + invalidParams.Add(request.NewErrParamRequired("TableName")) + } + if s.TableName != nil && len(*s.TableName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("TableName", 3)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the output of a DeleteItem operation. +type DeleteItemOutput struct { + _ struct{} `type:"structure"` + + // A map of attribute names to AttributeValue objects, representing the item + // as it appeared before the DeleteItem operation. This map appears in the response + // only if ReturnValues was specified as ALL_OLD in the request. + Attributes map[string]*AttributeValue `type:"map"` + + // The capacity units consumed by an operation. The data returned includes the + // total provisioned throughput consumed, along with statistics for the table + // and any indexes involved in the operation. ConsumedCapacity is only returned + // if the request asked for it. For more information, see Provisioned Throughput + // (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ProvisionedThroughputIntro.html) + // in the Amazon DynamoDB Developer Guide. + ConsumedCapacity *ConsumedCapacity `type:"structure"` + + // Information about item collections, if any, that were affected by the operation. + // ItemCollectionMetrics is only returned if the request asked for it. If the + // table does not have any local secondary indexes, this information is not + // returned in the response. + // + // Each ItemCollectionMetrics element consists of: + // + // ItemCollectionKey - The partition key value of the item collection. This + // is the same as the partition key value of the item itself. + // + // SizeEstimateRange - An estimate of item collection size, in gigabytes. + // This value is a two-element array containing a lower bound and an upper bound + // for the estimate. The estimate includes the size of all the items in the + // table, plus the size of all attributes projected into all of the local secondary + // indexes on that table. Use this estimate to measure whether a local secondary + // index is approaching its size limit. + // + // The estimate is subject to change over time; therefore, do not rely on the + // precision or accuracy of the estimate. + ItemCollectionMetrics *ItemCollectionMetrics `type:"structure"` +} + +// String returns the string representation +func (s DeleteItemOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteItemOutput) GoString() string { + return s.String() +} + +// Represents a request to perform a DeleteItem operation on an item. +type DeleteRequest struct { + _ struct{} `type:"structure"` + + // A map of attribute name to attribute values, representing the primary key + // of the item to delete. All of the table's primary key attributes must be + // specified, and their data types must match those of the table's key schema. + Key map[string]*AttributeValue `type:"map" required:"true"` +} + +// String returns the string representation +func (s DeleteRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteRequest) GoString() string { + return s.String() +} + +// Represents the input of a DeleteTable operation. +type DeleteTableInput struct { + _ struct{} `type:"structure"` + + // The name of the table to delete. + TableName *string `min:"3" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteTableInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteTableInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteTableInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteTableInput"} + if s.TableName == nil { + invalidParams.Add(request.NewErrParamRequired("TableName")) + } + if s.TableName != nil && len(*s.TableName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("TableName", 3)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the output of a DeleteTable operation. +type DeleteTableOutput struct { + _ struct{} `type:"structure"` + + // Represents the properties of a table. + TableDescription *TableDescription `type:"structure"` +} + +// String returns the string representation +func (s DeleteTableOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteTableOutput) GoString() string { + return s.String() +} + +// Represents the input of a DescribeLimits operation. Has no content. +type DescribeLimitsInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DescribeLimitsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeLimitsInput) GoString() string { + return s.String() +} + +// Represents the output of a DescribeLimits operation. +type DescribeLimitsOutput struct { + _ struct{} `type:"structure"` + + // The maximum total read capacity units that your account allows you to provision + // across all of your tables in this region. + AccountMaxReadCapacityUnits *int64 `min:"1" type:"long"` + + // The maximum total write capacity units that your account allows you to provision + // across all of your tables in this region. + AccountMaxWriteCapacityUnits *int64 `min:"1" type:"long"` + + // The maximum read capacity units that your account allows you to provision + // for a new table that you are creating in this region, including the read + // capacity units provisioned for its global secondary indexes (GSIs). + TableMaxReadCapacityUnits *int64 `min:"1" type:"long"` + + // The maximum write capacity units that your account allows you to provision + // for a new table that you are creating in this region, including the write + // capacity units provisioned for its global secondary indexes (GSIs). + TableMaxWriteCapacityUnits *int64 `min:"1" type:"long"` +} + +// String returns the string representation +func (s DescribeLimitsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeLimitsOutput) GoString() string { + return s.String() +} + +// Represents the input of a DescribeTable operation. +type DescribeTableInput struct { + _ struct{} `type:"structure"` + + // The name of the table to describe. + TableName *string `min:"3" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeTableInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeTableInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeTableInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeTableInput"} + if s.TableName == nil { + invalidParams.Add(request.NewErrParamRequired("TableName")) + } + if s.TableName != nil && len(*s.TableName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("TableName", 3)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the output of a DescribeTable operation. +type DescribeTableOutput struct { + _ struct{} `type:"structure"` + + // Represents the properties of a table. + Table *TableDescription `type:"structure"` +} + +// String returns the string representation +func (s DescribeTableOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeTableOutput) GoString() string { + return s.String() +} + +// Represents a condition to be compared with an attribute value. This condition +// can be used with DeleteItem, PutItem or UpdateItem operations; if the comparison +// evaluates to true, the operation succeeds; if not, the operation fails. You +// can use ExpectedAttributeValue in one of two different ways: +// +// Use AttributeValueList to specify one or more values to compare against +// an attribute. Use ComparisonOperator to specify how you want to perform the +// comparison. If the comparison evaluates to true, then the conditional operation +// succeeds. +// +// Use Value to specify a value that DynamoDB will compare against an attribute. +// If the values match, then ExpectedAttributeValue evaluates to true and the +// conditional operation succeeds. Optionally, you can also set Exists to false, +// indicating that you do not expect to find the attribute value in the table. +// In this case, the conditional operation succeeds only if the comparison evaluates +// to false. +// +// Value and Exists are incompatible with AttributeValueList and ComparisonOperator. +// Note that if you use both sets of parameters at once, DynamoDB will return +// a ValidationException exception. +type ExpectedAttributeValue struct { + _ struct{} `type:"structure"` + + // One or more values to evaluate against the supplied attribute. The number + // of values in the list depends on the ComparisonOperator being used. + // + // For type Number, value comparisons are numeric. + // + // String value comparisons for greater than, equals, or less than are based + // on ASCII character code values. For example, a is greater than A, and a is + // greater than B. For a list of code values, see http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters + // (http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters). + // + // For Binary, DynamoDB treats each byte of the binary data as unsigned when + // it compares binary values. + // + // For information on specifying data types in JSON, see JSON Data Format (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/DataFormat.html) + // in the Amazon DynamoDB Developer Guide. + AttributeValueList []*AttributeValue `type:"list"` + + // A comparator for evaluating attributes in the AttributeValueList. For example, + // equals, greater than, less than, etc. + // + // The following comparison operators are available: + // + // EQ | NE | LE | LT | GE | GT | NOT_NULL | NULL | CONTAINS | NOT_CONTAINS + // | BEGINS_WITH | IN | BETWEEN + // + // The following are descriptions of each comparison operator. + // + // EQ : Equal. EQ is supported for all datatypes, including lists and maps. + // + // AttributeValueList can contain only one AttributeValue element of type + // String, Number, Binary, String Set, Number Set, or Binary Set. If an item + // contains an AttributeValue element of a different type than the one provided + // in the request, the value does not match. For example, {"S":"6"} does not + // equal {"N":"6"}. Also, {"N":"6"} does not equal {"NS":["6", "2", "1"]}. + // + // NE : Not equal. NE is supported for all datatypes, including lists and + // maps. + // + // AttributeValueList can contain only one AttributeValue of type String, + // Number, Binary, String Set, Number Set, or Binary Set. If an item contains + // an AttributeValue of a different type than the one provided in the request, + // the value does not match. For example, {"S":"6"} does not equal {"N":"6"}. + // Also, {"N":"6"} does not equal {"NS":["6", "2", "1"]}. + // + // LE : Less than or equal. + // + // AttributeValueList can contain only one AttributeValue element of type + // String, Number, or Binary (not a set type). If an item contains an AttributeValue + // element of a different type than the one provided in the request, the value + // does not match. For example, {"S":"6"} does not equal {"N":"6"}. Also, {"N":"6"} + // does not compare to {"NS":["6", "2", "1"]}. + // + // LT : Less than. + // + // AttributeValueList can contain only one AttributeValue of type String, + // Number, or Binary (not a set type). If an item contains an AttributeValue + // element of a different type than the one provided in the request, the value + // does not match. For example, {"S":"6"} does not equal {"N":"6"}. Also, {"N":"6"} + // does not compare to {"NS":["6", "2", "1"]}. + // + // GE : Greater than or equal. + // + // AttributeValueList can contain only one AttributeValue element of type + // String, Number, or Binary (not a set type). If an item contains an AttributeValue + // element of a different type than the one provided in the request, the value + // does not match. For example, {"S":"6"} does not equal {"N":"6"}. Also, {"N":"6"} + // does not compare to {"NS":["6", "2", "1"]}. + // + // GT : Greater than. + // + // AttributeValueList can contain only one AttributeValue element of type + // String, Number, or Binary (not a set type). If an item contains an AttributeValue + // element of a different type than the one provided in the request, the value + // does not match. For example, {"S":"6"} does not equal {"N":"6"}. Also, {"N":"6"} + // does not compare to {"NS":["6", "2", "1"]}. + // + // NOT_NULL : The attribute exists. NOT_NULL is supported for all datatypes, + // including lists and maps. + // + // This operator tests for the existence of an attribute, not its data type. + // If the data type of attribute "a" is null, and you evaluate it using NOT_NULL, + // the result is a Boolean true. This result is because the attribute "a" exists; + // its data type is not relevant to the NOT_NULL comparison operator. + // + // NULL : The attribute does not exist. NULL is supported for all datatypes, + // including lists and maps. + // + // This operator tests for the nonexistence of an attribute, not its data + // type. If the data type of attribute "a" is null, and you evaluate it using + // NULL, the result is a Boolean false. This is because the attribute "a" exists; + // its data type is not relevant to the NULL comparison operator. + // + // CONTAINS : Checks for a subsequence, or value in a set. + // + // AttributeValueList can contain only one AttributeValue element of type + // String, Number, or Binary (not a set type). If the target attribute of the + // comparison is of type String, then the operator checks for a substring match. + // If the target attribute of the comparison is of type Binary, then the operator + // looks for a subsequence of the target that matches the input. If the target + // attribute of the comparison is a set ("SS", "NS", or "BS"), then the operator + // evaluates to true if it finds an exact match with any member of the set. + // + // CONTAINS is supported for lists: When evaluating "a CONTAINS b", "a" can + // be a list; however, "b" cannot be a set, a map, or a list. + // + // NOT_CONTAINS : Checks for absence of a subsequence, or absence of a value + // in a set. + // + // AttributeValueList can contain only one AttributeValue element of type + // String, Number, or Binary (not a set type). If the target attribute of the + // comparison is a String, then the operator checks for the absence of a substring + // match. If the target attribute of the comparison is Binary, then the operator + // checks for the absence of a subsequence of the target that matches the input. + // If the target attribute of the comparison is a set ("SS", "NS", or "BS"), + // then the operator evaluates to true if it does not find an exact match with + // any member of the set. + // + // NOT_CONTAINS is supported for lists: When evaluating "a NOT CONTAINS b", + // "a" can be a list; however, "b" cannot be a set, a map, or a list. + // + // BEGINS_WITH : Checks for a prefix. + // + // AttributeValueList can contain only one AttributeValue of type String or + // Binary (not a Number or a set type). The target attribute of the comparison + // must be of type String or Binary (not a Number or a set type). + // + // IN : Checks for matching elements within two sets. + // + // AttributeValueList can contain one or more AttributeValue elements of type + // String, Number, or Binary (not a set type). These attributes are compared + // against an existing set type attribute of an item. If any elements of the + // input set are present in the item attribute, the expression evaluates to + // true. + // + // BETWEEN : Greater than or equal to the first value, and less than or + // equal to the second value. + // + // AttributeValueList must contain two AttributeValue elements of the same + // type, either String, Number, or Binary (not a set type). A target attribute + // matches if the target value is greater than, or equal to, the first element + // and less than, or equal to, the second element. If an item contains an AttributeValue + // element of a different type than the one provided in the request, the value + // does not match. For example, {"S":"6"} does not compare to {"N":"6"}. Also, + // {"N":"6"} does not compare to {"NS":["6", "2", "1"]} + ComparisonOperator *string `type:"string" enum:"ComparisonOperator"` + + // Causes DynamoDB to evaluate the value before attempting a conditional operation: + // + // If Exists is true, DynamoDB will check to see if that attribute value + // already exists in the table. If it is found, then the operation succeeds. + // If it is not found, the operation fails with a ConditionalCheckFailedException. + // + // If Exists is false, DynamoDB assumes that the attribute value does not + // exist in the table. If in fact the value does not exist, then the assumption + // is valid and the operation succeeds. If the value is found, despite the assumption + // that it does not exist, the operation fails with a ConditionalCheckFailedException. + // + // The default setting for Exists is true. If you supply a Value all by itself, + // DynamoDB assumes the attribute exists: You don't have to set Exists to true, + // because it is implied. + // + // DynamoDB returns a ValidationException if: + // + // Exists is true but there is no Value to check. (You expect a value to + // exist, but don't specify what that value is.) + // + // Exists is false but you also provide a Value. (You cannot expect an attribute + // to have a value, while also expecting it not to exist.) + Exists *bool `type:"boolean"` + + // Represents the data for an attribute. You can set one, and only one, of the + // elements. + // + // Each attribute in an item is a name-value pair. An attribute can be single-valued + // or multi-valued set. For example, a book item can have title and authors + // attributes. Each book has one title but can have many authors. The multi-valued + // attribute is a set; duplicate values are not allowed. + Value *AttributeValue `type:"structure"` +} + +// String returns the string representation +func (s ExpectedAttributeValue) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ExpectedAttributeValue) GoString() string { + return s.String() +} + +// Represents the input of a GetItem operation. +type GetItemInput struct { + _ struct{} `type:"structure"` + + // This is a legacy parameter, for backward compatibility. New applications + // should use ProjectionExpression instead. Do not combine legacy parameters + // and expression parameters in a single API call; otherwise, DynamoDB will + // return a ValidationException exception. + // + // This parameter allows you to retrieve attributes of type List or Map; however, + // it cannot retrieve individual elements within a List or a Map. + // + // The names of one or more attributes to retrieve. If no attribute names + // are provided, then all attributes will be returned. If any of the requested + // attributes are not found, they will not appear in the result. + // + // Note that AttributesToGet has no effect on provisioned throughput consumption. + // DynamoDB determines capacity units consumed based on item size, not on the + // amount of data that is returned to an application. + AttributesToGet []*string `min:"1" type:"list"` + + // Determines the read consistency model: If set to true, then the operation + // uses strongly consistent reads; otherwise, the operation uses eventually + // consistent reads. + ConsistentRead *bool `type:"boolean"` + + // One or more substitution tokens for attribute names in an expression. The + // following are some use cases for using ExpressionAttributeNames: + // + // To access an attribute whose name conflicts with a DynamoDB reserved word. + // + // To create a placeholder for repeating occurrences of an attribute name + // in an expression. + // + // To prevent special characters in an attribute name from being misinterpreted + // in an expression. + // + // Use the # character in an expression to dereference an attribute name. + // For example, consider the following attribute name: + // + // Percentile + // + // The name of this attribute conflicts with a reserved word, so it cannot + // be used directly in an expression. (For the complete list of reserved words, + // see Reserved Words (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html) + // in the Amazon DynamoDB Developer Guide). To work around this, you could specify + // the following for ExpressionAttributeNames: + // + // {"#P":"Percentile"} + // + // You could then use this substitution in an expression, as in this example: + // + // #P = :val + // + // Tokens that begin with the : character are expression attribute values, + // which are placeholders for the actual value at runtime. + // + // For more information on expression attribute names, see Accessing Item + // Attributes (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html) + // in the Amazon DynamoDB Developer Guide. + ExpressionAttributeNames map[string]*string `type:"map"` + + // A map of attribute names to AttributeValue objects, representing the primary + // key of the item to retrieve. + // + // For the primary key, you must provide all of the attributes. For example, + // with a simple primary key, you only need to provide a value for the partition + // key. For a composite primary key, you must provide values for both the partition + // key and the sort key. + Key map[string]*AttributeValue `type:"map" required:"true"` + + // A string that identifies one or more attributes to retrieve from the table. + // These attributes can include scalars, sets, or elements of a JSON document. + // The attributes in the expression must be separated by commas. + // + // If no attribute names are specified, then all attributes will be returned. + // If any of the requested attributes are not found, they will not appear in + // the result. + // + // For more information, see Accessing Item Attributes (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html) + // in the Amazon DynamoDB Developer Guide. + // + // ProjectionExpression replaces the legacy AttributesToGet parameter. + ProjectionExpression *string `type:"string"` + + // Determines the level of detail about provisioned throughput consumption that + // is returned in the response: + // + // INDEXES - The response includes the aggregate ConsumedCapacity for the + // operation, together with ConsumedCapacity for each table and secondary index + // that was accessed. + // + // Note that some operations, such as GetItem and BatchGetItem, do not access + // any indexes at all. In these cases, specifying INDEXES will only return ConsumedCapacity + // information for table(s). + // + // TOTAL - The response includes only the aggregate ConsumedCapacity for + // the operation. + // + // NONE - No ConsumedCapacity details are included in the response. + ReturnConsumedCapacity *string `type:"string" enum:"ReturnConsumedCapacity"` + + // The name of the table containing the requested item. + TableName *string `min:"3" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetItemInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetItemInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetItemInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetItemInput"} + if s.AttributesToGet != nil && len(s.AttributesToGet) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AttributesToGet", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.TableName == nil { + invalidParams.Add(request.NewErrParamRequired("TableName")) + } + if s.TableName != nil && len(*s.TableName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("TableName", 3)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the output of a GetItem operation. +type GetItemOutput struct { + _ struct{} `type:"structure"` + + // The capacity units consumed by an operation. The data returned includes the + // total provisioned throughput consumed, along with statistics for the table + // and any indexes involved in the operation. ConsumedCapacity is only returned + // if the request asked for it. For more information, see Provisioned Throughput + // (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ProvisionedThroughputIntro.html) + // in the Amazon DynamoDB Developer Guide. + ConsumedCapacity *ConsumedCapacity `type:"structure"` + + // A map of attribute names to AttributeValue objects, as specified by AttributesToGet. + Item map[string]*AttributeValue `type:"map"` +} + +// String returns the string representation +func (s GetItemOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetItemOutput) GoString() string { + return s.String() +} + +// Represents the properties of a global secondary index. +type GlobalSecondaryIndex struct { + _ struct{} `type:"structure"` + + // The name of the global secondary index. The name must be unique among all + // other indexes on this table. + IndexName *string `min:"3" type:"string" required:"true"` + + // The complete key schema for a global secondary index, which consists of one + // or more pairs of attribute names and key types: + // + // HASH - partition key + // + // RANGE - sort key + // + // The partition key of an item is also known as its hash attribute. The + // term "hash attribute" derives from DynamoDB' usage of an internal hash function + // to evenly distribute data items across partitions, based on their partition + // key values. + // + // The sort key of an item is also known as its range attribute. The term "range + // attribute" derives from the way DynamoDB stores items with the same partition + // key physically close together, in sorted order by the sort key value. + KeySchema []*KeySchemaElement `min:"1" type:"list" required:"true"` + + // Represents attributes that are copied (projected) from the table into an + // index. These are in addition to the primary key attributes and index key + // attributes, which are automatically projected. + Projection *Projection `type:"structure" required:"true"` + + // Represents the provisioned throughput settings for a specified table or index. + // The settings can be modified using the UpdateTable operation. + // + // For current minimum and maximum provisioned throughput values, see Limits + // (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html) + // in the Amazon DynamoDB Developer Guide. + ProvisionedThroughput *ProvisionedThroughput `type:"structure" required:"true"` +} + +// String returns the string representation +func (s GlobalSecondaryIndex) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GlobalSecondaryIndex) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GlobalSecondaryIndex) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GlobalSecondaryIndex"} + if s.IndexName == nil { + invalidParams.Add(request.NewErrParamRequired("IndexName")) + } + if s.IndexName != nil && len(*s.IndexName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("IndexName", 3)) + } + if s.KeySchema == nil { + invalidParams.Add(request.NewErrParamRequired("KeySchema")) + } + if s.KeySchema != nil && len(s.KeySchema) < 1 { + invalidParams.Add(request.NewErrParamMinLen("KeySchema", 1)) + } + if s.Projection == nil { + invalidParams.Add(request.NewErrParamRequired("Projection")) + } + if s.ProvisionedThroughput == nil { + invalidParams.Add(request.NewErrParamRequired("ProvisionedThroughput")) + } + if s.KeySchema != nil { + for i, v := range s.KeySchema { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "KeySchema", i), err.(request.ErrInvalidParams)) + } + } + } + if s.Projection != nil { + if err := s.Projection.Validate(); err != nil { + invalidParams.AddNested("Projection", err.(request.ErrInvalidParams)) + } + } + if s.ProvisionedThroughput != nil { + if err := s.ProvisionedThroughput.Validate(); err != nil { + invalidParams.AddNested("ProvisionedThroughput", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the properties of a global secondary index. +type GlobalSecondaryIndexDescription struct { + _ struct{} `type:"structure"` + + // Indicates whether the index is currently backfilling. Backfilling is the + // process of reading items from the table and determining whether they can + // be added to the index. (Not all items will qualify: For example, a partition + // key cannot have any duplicate values.) If an item can be added to the index, + // DynamoDB will do so. After all items have been processed, the backfilling + // operation is complete and Backfilling is false. + // + // For indexes that were created during a CreateTable operation, the Backfilling + // attribute does not appear in the DescribeTable output. + Backfilling *bool `type:"boolean"` + + // The Amazon Resource Name (ARN) that uniquely identifies the index. + IndexArn *string `type:"string"` + + // The name of the global secondary index. + IndexName *string `min:"3" type:"string"` + + // The total size of the specified index, in bytes. DynamoDB updates this value + // approximately every six hours. Recent changes might not be reflected in this + // value. + IndexSizeBytes *int64 `type:"long"` + + // The current state of the global secondary index: + // + // CREATING - The index is being created. + // + // UPDATING - The index is being updated. + // + // DELETING - The index is being deleted. + // + // ACTIVE - The index is ready for use. + IndexStatus *string `type:"string" enum:"IndexStatus"` + + // The number of items in the specified index. DynamoDB updates this value approximately + // every six hours. Recent changes might not be reflected in this value. + ItemCount *int64 `type:"long"` + + // The complete key schema for a global secondary index, which consists of one + // or more pairs of attribute names and key types: + // + // HASH - partition key + // + // RANGE - sort key + // + // The partition key of an item is also known as its hash attribute. The + // term "hash attribute" derives from DynamoDB' usage of an internal hash function + // to evenly distribute data items across partitions, based on their partition + // key values. + // + // The sort key of an item is also known as its range attribute. The term "range + // attribute" derives from the way DynamoDB stores items with the same partition + // key physically close together, in sorted order by the sort key value. + KeySchema []*KeySchemaElement `min:"1" type:"list"` + + // Represents attributes that are copied (projected) from the table into an + // index. These are in addition to the primary key attributes and index key + // attributes, which are automatically projected. + Projection *Projection `type:"structure"` + + // Represents the provisioned throughput settings for the table, consisting + // of read and write capacity units, along with data about increases and decreases. + ProvisionedThroughput *ProvisionedThroughputDescription `type:"structure"` +} + +// String returns the string representation +func (s GlobalSecondaryIndexDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GlobalSecondaryIndexDescription) GoString() string { + return s.String() +} + +// Represents one of the following: +// +// A new global secondary index to be added to an existing table. +// +// New provisioned throughput parameters for an existing global secondary +// index. +// +// An existing global secondary index to be removed from an existing table. +type GlobalSecondaryIndexUpdate struct { + _ struct{} `type:"structure"` + + // The parameters required for creating a global secondary index on an existing + // table: + // + // IndexName + // + // KeySchema + // + // AttributeDefinitions + // + // Projection + // + // ProvisionedThroughput + Create *CreateGlobalSecondaryIndexAction `type:"structure"` + + // The name of an existing global secondary index to be removed. + Delete *DeleteGlobalSecondaryIndexAction `type:"structure"` + + // The name of an existing global secondary index, along with new provisioned + // throughput settings to be applied to that index. + Update *UpdateGlobalSecondaryIndexAction `type:"structure"` +} + +// String returns the string representation +func (s GlobalSecondaryIndexUpdate) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GlobalSecondaryIndexUpdate) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GlobalSecondaryIndexUpdate) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GlobalSecondaryIndexUpdate"} + if s.Create != nil { + if err := s.Create.Validate(); err != nil { + invalidParams.AddNested("Create", err.(request.ErrInvalidParams)) + } + } + if s.Delete != nil { + if err := s.Delete.Validate(); err != nil { + invalidParams.AddNested("Delete", err.(request.ErrInvalidParams)) + } + } + if s.Update != nil { + if err := s.Update.Validate(); err != nil { + invalidParams.AddNested("Update", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Information about item collections, if any, that were affected by the operation. +// ItemCollectionMetrics is only returned if the request asked for it. If the +// table does not have any local secondary indexes, this information is not +// returned in the response. +type ItemCollectionMetrics struct { + _ struct{} `type:"structure"` + + // The partition key value of the item collection. This value is the same as + // the partition key value of the item. + ItemCollectionKey map[string]*AttributeValue `type:"map"` + + // An estimate of item collection size, in gigabytes. This value is a two-element + // array containing a lower bound and an upper bound for the estimate. The estimate + // includes the size of all the items in the table, plus the size of all attributes + // projected into all of the local secondary indexes on that table. Use this + // estimate to measure whether a local secondary index is approaching its size + // limit. + // + // The estimate is subject to change over time; therefore, do not rely on the + // precision or accuracy of the estimate. + SizeEstimateRangeGB []*float64 `type:"list"` +} + +// String returns the string representation +func (s ItemCollectionMetrics) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ItemCollectionMetrics) GoString() string { + return s.String() +} + +// Represents a single element of a key schema. A key schema specifies the attributes +// that make up the primary key of a table, or the key attributes of an index. +// +// A KeySchemaElement represents exactly one attribute of the primary key. +// For example, a simple primary key would be represented by one KeySchemaElement +// (for the partition key). A composite primary key would require one KeySchemaElement +// for the partition key, and another KeySchemaElement for the sort key. +// +// A KeySchemaElement must be a scalar, top-level attribute (not a nested attribute). +// The data type must be one of String, Number, or Binary. The attribute cannot +// be nested within a List or a Map. +type KeySchemaElement struct { + _ struct{} `type:"structure"` + + // The name of a key attribute. + AttributeName *string `min:"1" type:"string" required:"true"` + + // The role that this key attribute will assume: + // + // HASH - partition key + // + // RANGE - sort key + // + // The partition key of an item is also known as its hash attribute. The + // term "hash attribute" derives from DynamoDB' usage of an internal hash function + // to evenly distribute data items across partitions, based on their partition + // key values. + // + // The sort key of an item is also known as its range attribute. The term "range + // attribute" derives from the way DynamoDB stores items with the same partition + // key physically close together, in sorted order by the sort key value. + KeyType *string `type:"string" required:"true" enum:"KeyType"` +} + +// String returns the string representation +func (s KeySchemaElement) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s KeySchemaElement) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *KeySchemaElement) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "KeySchemaElement"} + if s.AttributeName == nil { + invalidParams.Add(request.NewErrParamRequired("AttributeName")) + } + if s.AttributeName != nil && len(*s.AttributeName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AttributeName", 1)) + } + if s.KeyType == nil { + invalidParams.Add(request.NewErrParamRequired("KeyType")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents a set of primary keys and, for each key, the attributes to retrieve +// from the table. +// +// For each primary key, you must provide all of the key attributes. For example, +// with a simple primary key, you only need to provide the partition key. For +// a composite primary key, you must provide both the partition key and the +// sort key. +type KeysAndAttributes struct { + _ struct{} `type:"structure"` + + // One or more attributes to retrieve from the table or index. If no attribute + // names are specified then all attributes will be returned. If any of the specified + // attributes are not found, they will not appear in the result. + AttributesToGet []*string `min:"1" type:"list"` + + // The consistency of a read operation. If set to true, then a strongly consistent + // read is used; otherwise, an eventually consistent read is used. + ConsistentRead *bool `type:"boolean"` + + // One or more substitution tokens for attribute names in an expression. The + // following are some use cases for using ExpressionAttributeNames: + // + // To access an attribute whose name conflicts with a DynamoDB reserved word. + // + // To create a placeholder for repeating occurrences of an attribute name + // in an expression. + // + // To prevent special characters in an attribute name from being misinterpreted + // in an expression. + // + // Use the # character in an expression to dereference an attribute name. + // For example, consider the following attribute name: + // + // Percentile + // + // The name of this attribute conflicts with a reserved word, so it cannot + // be used directly in an expression. (For the complete list of reserved words, + // see Reserved Words (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html) + // in the Amazon DynamoDB Developer Guide). To work around this, you could specify + // the following for ExpressionAttributeNames: + // + // {"#P":"Percentile"} + // + // You could then use this substitution in an expression, as in this example: + // + // #P = :val + // + // Tokens that begin with the : character are expression attribute values, + // which are placeholders for the actual value at runtime. + // + // For more information on expression attribute names, see Accessing Item + // Attributes (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html) + // in the Amazon DynamoDB Developer Guide. + ExpressionAttributeNames map[string]*string `type:"map"` + + // The primary key attribute values that define the items and the attributes + // associated with the items. + Keys []map[string]*AttributeValue `min:"1" type:"list" required:"true"` + + // A string that identifies one or more attributes to retrieve from the table. + // These attributes can include scalars, sets, or elements of a JSON document. + // The attributes in the ProjectionExpression must be separated by commas. + // + // If no attribute names are specified, then all attributes will be returned. + // If any of the requested attributes are not found, they will not appear in + // the result. + // + // For more information, see Accessing Item Attributes (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html) + // in the Amazon DynamoDB Developer Guide. + // + // ProjectionExpression replaces the legacy AttributesToGet parameter. + ProjectionExpression *string `type:"string"` +} + +// String returns the string representation +func (s KeysAndAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s KeysAndAttributes) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *KeysAndAttributes) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "KeysAndAttributes"} + if s.AttributesToGet != nil && len(s.AttributesToGet) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AttributesToGet", 1)) + } + if s.Keys == nil { + invalidParams.Add(request.NewErrParamRequired("Keys")) + } + if s.Keys != nil && len(s.Keys) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Keys", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the input of a ListTables operation. +type ListTablesInput struct { + _ struct{} `type:"structure"` + + // The first table name that this operation will evaluate. Use the value that + // was returned for LastEvaluatedTableName in a previous operation, so that + // you can obtain the next page of results. + ExclusiveStartTableName *string `min:"3" type:"string"` + + // A maximum number of table names to return. If this parameter is not specified, + // the limit is 100. + Limit *int64 `min:"1" type:"integer"` +} + +// String returns the string representation +func (s ListTablesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTablesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListTablesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListTablesInput"} + if s.ExclusiveStartTableName != nil && len(*s.ExclusiveStartTableName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("ExclusiveStartTableName", 3)) + } + if s.Limit != nil && *s.Limit < 1 { + invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the output of a ListTables operation. +type ListTablesOutput struct { + _ struct{} `type:"structure"` + + // The name of the last table in the current page of results. Use this value + // as the ExclusiveStartTableName in a new request to obtain the next page of + // results, until all the table names are returned. + // + // If you do not receive a LastEvaluatedTableName value in the response, this + // means that there are no more table names to be retrieved. + LastEvaluatedTableName *string `min:"3" type:"string"` + + // The names of the tables associated with the current account at the current + // endpoint. The maximum size of this array is 100. + // + // If LastEvaluatedTableName also appears in the output, you can use this value + // as the ExclusiveStartTableName parameter in a subsequent ListTables request + // and obtain the next page of results. + TableNames []*string `type:"list"` +} + +// String returns the string representation +func (s ListTablesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTablesOutput) GoString() string { + return s.String() +} + +// Represents the properties of a local secondary index. +type LocalSecondaryIndex struct { + _ struct{} `type:"structure"` + + // The name of the local secondary index. The name must be unique among all + // other indexes on this table. + IndexName *string `min:"3" type:"string" required:"true"` + + // The complete key schema for the local secondary index, consisting of one + // or more pairs of attribute names and key types: + // + // HASH - partition key + // + // RANGE - sort key + // + // The partition key of an item is also known as its hash attribute. The + // term "hash attribute" derives from DynamoDB' usage of an internal hash function + // to evenly distribute data items across partitions, based on their partition + // key values. + // + // The sort key of an item is also known as its range attribute. The term "range + // attribute" derives from the way DynamoDB stores items with the same partition + // key physically close together, in sorted order by the sort key value. + KeySchema []*KeySchemaElement `min:"1" type:"list" required:"true"` + + // Represents attributes that are copied (projected) from the table into an + // index. These are in addition to the primary key attributes and index key + // attributes, which are automatically projected. + Projection *Projection `type:"structure" required:"true"` +} + +// String returns the string representation +func (s LocalSecondaryIndex) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LocalSecondaryIndex) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *LocalSecondaryIndex) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "LocalSecondaryIndex"} + if s.IndexName == nil { + invalidParams.Add(request.NewErrParamRequired("IndexName")) + } + if s.IndexName != nil && len(*s.IndexName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("IndexName", 3)) + } + if s.KeySchema == nil { + invalidParams.Add(request.NewErrParamRequired("KeySchema")) + } + if s.KeySchema != nil && len(s.KeySchema) < 1 { + invalidParams.Add(request.NewErrParamMinLen("KeySchema", 1)) + } + if s.Projection == nil { + invalidParams.Add(request.NewErrParamRequired("Projection")) + } + if s.KeySchema != nil { + for i, v := range s.KeySchema { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "KeySchema", i), err.(request.ErrInvalidParams)) + } + } + } + if s.Projection != nil { + if err := s.Projection.Validate(); err != nil { + invalidParams.AddNested("Projection", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the properties of a local secondary index. +type LocalSecondaryIndexDescription struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) that uniquely identifies the index. + IndexArn *string `type:"string"` + + // Represents the name of the local secondary index. + IndexName *string `min:"3" type:"string"` + + // The total size of the specified index, in bytes. DynamoDB updates this value + // approximately every six hours. Recent changes might not be reflected in this + // value. + IndexSizeBytes *int64 `type:"long"` + + // The number of items in the specified index. DynamoDB updates this value approximately + // every six hours. Recent changes might not be reflected in this value. + ItemCount *int64 `type:"long"` + + // The complete key schema for the local secondary index, consisting of one + // or more pairs of attribute names and key types: + // + // HASH - partition key + // + // RANGE - sort key + // + // The partition key of an item is also known as its hash attribute. The + // term "hash attribute" derives from DynamoDB' usage of an internal hash function + // to evenly distribute data items across partitions, based on their partition + // key values. + // + // The sort key of an item is also known as its range attribute. The term "range + // attribute" derives from the way DynamoDB stores items with the same partition + // key physically close together, in sorted order by the sort key value. + KeySchema []*KeySchemaElement `min:"1" type:"list"` + + // Represents attributes that are copied (projected) from the table into an + // index. These are in addition to the primary key attributes and index key + // attributes, which are automatically projected. + Projection *Projection `type:"structure"` +} + +// String returns the string representation +func (s LocalSecondaryIndexDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LocalSecondaryIndexDescription) GoString() string { + return s.String() +} + +// Represents attributes that are copied (projected) from the table into an +// index. These are in addition to the primary key attributes and index key +// attributes, which are automatically projected. +type Projection struct { + _ struct{} `type:"structure"` + + // Represents the non-key attribute names which will be projected into the index. + // + // For local secondary indexes, the total count of NonKeyAttributes summed + // across all of the local secondary indexes, must not exceed 20. If you project + // the same attribute into two different indexes, this counts as two distinct + // attributes when determining the total. + NonKeyAttributes []*string `min:"1" type:"list"` + + // The set of attributes that are projected into the index: + // + // KEYS_ONLY - Only the index and primary keys are projected into the index. + // + // INCLUDE - Only the specified table attributes are projected into the + // index. The list of projected attributes are in NonKeyAttributes. + // + // ALL - All of the table attributes are projected into the index. + ProjectionType *string `type:"string" enum:"ProjectionType"` +} + +// String returns the string representation +func (s Projection) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Projection) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Projection) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Projection"} + if s.NonKeyAttributes != nil && len(s.NonKeyAttributes) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NonKeyAttributes", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the provisioned throughput settings for a specified table or index. +// The settings can be modified using the UpdateTable operation. +// +// For current minimum and maximum provisioned throughput values, see Limits +// (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html) +// in the Amazon DynamoDB Developer Guide. +type ProvisionedThroughput struct { + _ struct{} `type:"structure"` + + // The maximum number of strongly consistent reads consumed per second before + // DynamoDB returns a ThrottlingException. For more information, see Specifying + // Read and Write Requirements (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/WorkingWithTables.html#ProvisionedThroughput) + // in the Amazon DynamoDB Developer Guide. + ReadCapacityUnits *int64 `min:"1" type:"long" required:"true"` + + // The maximum number of writes consumed per second before DynamoDB returns + // a ThrottlingException. For more information, see Specifying Read and Write + // Requirements (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/WorkingWithTables.html#ProvisionedThroughput) + // in the Amazon DynamoDB Developer Guide. + WriteCapacityUnits *int64 `min:"1" type:"long" required:"true"` +} + +// String returns the string representation +func (s ProvisionedThroughput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ProvisionedThroughput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ProvisionedThroughput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ProvisionedThroughput"} + if s.ReadCapacityUnits == nil { + invalidParams.Add(request.NewErrParamRequired("ReadCapacityUnits")) + } + if s.ReadCapacityUnits != nil && *s.ReadCapacityUnits < 1 { + invalidParams.Add(request.NewErrParamMinValue("ReadCapacityUnits", 1)) + } + if s.WriteCapacityUnits == nil { + invalidParams.Add(request.NewErrParamRequired("WriteCapacityUnits")) + } + if s.WriteCapacityUnits != nil && *s.WriteCapacityUnits < 1 { + invalidParams.Add(request.NewErrParamMinValue("WriteCapacityUnits", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the provisioned throughput settings for the table, consisting +// of read and write capacity units, along with data about increases and decreases. +type ProvisionedThroughputDescription struct { + _ struct{} `type:"structure"` + + // The date and time of the last provisioned throughput decrease for this table. + LastDecreaseDateTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The date and time of the last provisioned throughput increase for this table. + LastIncreaseDateTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The number of provisioned throughput decreases for this table during this + // UTC calendar day. For current maximums on provisioned throughput decreases, + // see Limits (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html) + // in the Amazon DynamoDB Developer Guide. + NumberOfDecreasesToday *int64 `min:"1" type:"long"` + + // The maximum number of strongly consistent reads consumed per second before + // DynamoDB returns a ThrottlingException. Eventually consistent reads require + // less effort than strongly consistent reads, so a setting of 50 ReadCapacityUnits + // per second provides 100 eventually consistent ReadCapacityUnits per second. + ReadCapacityUnits *int64 `min:"1" type:"long"` + + // The maximum number of writes consumed per second before DynamoDB returns + // a ThrottlingException. + WriteCapacityUnits *int64 `min:"1" type:"long"` +} + +// String returns the string representation +func (s ProvisionedThroughputDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ProvisionedThroughputDescription) GoString() string { + return s.String() +} + +// Represents the input of a PutItem operation. +type PutItemInput struct { + _ struct{} `type:"structure"` + + // A condition that must be satisfied in order for a conditional PutItem operation + // to succeed. + // + // An expression can contain any of the following: + // + // Functions: attribute_exists | attribute_not_exists | attribute_type | + // contains | begins_with | size + // + // These function names are case-sensitive. + // + // Comparison operators: = | <> | < | > | <= | + // >= | BETWEEN | IN + // + // Logical operators: AND | OR | NOT + // + // For more information on condition expressions, see Specifying Conditions + // (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.SpecifyingConditions.html) + // in the Amazon DynamoDB Developer Guide. + // + // ConditionExpression replaces the legacy ConditionalOperator and Expected + // parameters. + ConditionExpression *string `type:"string"` + + // This is a legacy parameter, for backward compatibility. New applications + // should use ConditionExpression instead. Do not combine legacy parameters + // and expression parameters in a single API call; otherwise, DynamoDB will + // return a ValidationException exception. + // + // A logical operator to apply to the conditions in the Expected map: + // + // AND - If all of the conditions evaluate to true, then the entire map + // evaluates to true. + // + // OR - If at least one of the conditions evaluate to true, then the entire + // map evaluates to true. + // + // If you omit ConditionalOperator, then AND is the default. + // + // The operation will succeed only if the entire map evaluates to true. + // + // This parameter does not support attributes of type List or Map. + ConditionalOperator *string `type:"string" enum:"ConditionalOperator"` + + // This is a legacy parameter, for backward compatibility. New applications + // should use ConditionExpression instead. Do not combine legacy parameters + // and expression parameters in a single API call; otherwise, DynamoDB will + // return a ValidationException exception. + // + // A map of attribute/condition pairs. Expected provides a conditional block + // for the PutItem operation. + // + // This parameter does not support attributes of type List or Map. + // + // Each element of Expected consists of an attribute name, a comparison operator, + // and one or more values. DynamoDB compares the attribute with the value(s) + // you supplied, using the comparison operator. For each Expected element, the + // result of the evaluation is either true or false. + // + // If you specify more than one element in the Expected map, then by default + // all of the conditions must evaluate to true. In other words, the conditions + // are ANDed together. (You can use the ConditionalOperator parameter to OR + // the conditions instead. If you do this, then at least one of the conditions + // must evaluate to true, rather than all of them.) + // + // If the Expected map evaluates to true, then the conditional operation succeeds; + // otherwise, it fails. + // + // Expected contains the following: + // + // AttributeValueList - One or more values to evaluate against the supplied + // attribute. The number of values in the list depends on the ComparisonOperator + // being used. + // + // For type Number, value comparisons are numeric. + // + // String value comparisons for greater than, equals, or less than are based + // on ASCII character code values. For example, a is greater than A, and a is + // greater than B. For a list of code values, see http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters + // (http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters). + // + // For type Binary, DynamoDB treats each byte of the binary data as unsigned + // when it compares binary values. + // + // ComparisonOperator - A comparator for evaluating attributes in the AttributeValueList. + // When performing the comparison, DynamoDB uses strongly consistent reads. + // + // The following comparison operators are available: + // + // EQ | NE | LE | LT | GE | GT | NOT_NULL | NULL | CONTAINS | NOT_CONTAINS + // | BEGINS_WITH | IN | BETWEEN + // + // The following are descriptions of each comparison operator. + // + // EQ : Equal. EQ is supported for all datatypes, including lists and maps. + // + // AttributeValueList can contain only one AttributeValue element of type + // String, Number, Binary, String Set, Number Set, or Binary Set. If an item + // contains an AttributeValue element of a different type than the one provided + // in the request, the value does not match. For example, {"S":"6"} does not + // equal {"N":"6"}. Also, {"N":"6"} does not equal {"NS":["6", "2", "1"]}. + // + // NE : Not equal. NE is supported for all datatypes, including lists and + // maps. + // + // AttributeValueList can contain only one AttributeValue of type String, + // Number, Binary, String Set, Number Set, or Binary Set. If an item contains + // an AttributeValue of a different type than the one provided in the request, + // the value does not match. For example, {"S":"6"} does not equal {"N":"6"}. + // Also, {"N":"6"} does not equal {"NS":["6", "2", "1"]}. + // + // LE : Less than or equal. + // + // AttributeValueList can contain only one AttributeValue element of type + // String, Number, or Binary (not a set type). If an item contains an AttributeValue + // element of a different type than the one provided in the request, the value + // does not match. For example, {"S":"6"} does not equal {"N":"6"}. Also, {"N":"6"} + // does not compare to {"NS":["6", "2", "1"]}. + // + // LT : Less than. + // + // AttributeValueList can contain only one AttributeValue of type String, + // Number, or Binary (not a set type). If an item contains an AttributeValue + // element of a different type than the one provided in the request, the value + // does not match. For example, {"S":"6"} does not equal {"N":"6"}. Also, {"N":"6"} + // does not compare to {"NS":["6", "2", "1"]}. + // + // GE : Greater than or equal. + // + // AttributeValueList can contain only one AttributeValue element of type + // String, Number, or Binary (not a set type). If an item contains an AttributeValue + // element of a different type than the one provided in the request, the value + // does not match. For example, {"S":"6"} does not equal {"N":"6"}. Also, {"N":"6"} + // does not compare to {"NS":["6", "2", "1"]}. + // + // GT : Greater than. + // + // AttributeValueList can contain only one AttributeValue element of type + // String, Number, or Binary (not a set type). If an item contains an AttributeValue + // element of a different type than the one provided in the request, the value + // does not match. For example, {"S":"6"} does not equal {"N":"6"}. Also, {"N":"6"} + // does not compare to {"NS":["6", "2", "1"]}. + // + // NOT_NULL : The attribute exists. NOT_NULL is supported for all datatypes, + // including lists and maps. + // + // This operator tests for the existence of an attribute, not its data type. + // If the data type of attribute "a" is null, and you evaluate it using NOT_NULL, + // the result is a Boolean true. This result is because the attribute "a" exists; + // its data type is not relevant to the NOT_NULL comparison operator. + // + // NULL : The attribute does not exist. NULL is supported for all datatypes, + // including lists and maps. + // + // This operator tests for the nonexistence of an attribute, not its data + // type. If the data type of attribute "a" is null, and you evaluate it using + // NULL, the result is a Boolean false. This is because the attribute "a" exists; + // its data type is not relevant to the NULL comparison operator. + // + // CONTAINS : Checks for a subsequence, or value in a set. + // + // AttributeValueList can contain only one AttributeValue element of type + // String, Number, or Binary (not a set type). If the target attribute of the + // comparison is of type String, then the operator checks for a substring match. + // If the target attribute of the comparison is of type Binary, then the operator + // looks for a subsequence of the target that matches the input. If the target + // attribute of the comparison is a set ("SS", "NS", or "BS"), then the operator + // evaluates to true if it finds an exact match with any member of the set. + // + // CONTAINS is supported for lists: When evaluating "a CONTAINS b", "a" can + // be a list; however, "b" cannot be a set, a map, or a list. + // + // NOT_CONTAINS : Checks for absence of a subsequence, or absence of a value + // in a set. + // + // AttributeValueList can contain only one AttributeValue element of type + // String, Number, or Binary (not a set type). If the target attribute of the + // comparison is a String, then the operator checks for the absence of a substring + // match. If the target attribute of the comparison is Binary, then the operator + // checks for the absence of a subsequence of the target that matches the input. + // If the target attribute of the comparison is a set ("SS", "NS", or "BS"), + // then the operator evaluates to true if it does not find an exact match with + // any member of the set. + // + // NOT_CONTAINS is supported for lists: When evaluating "a NOT CONTAINS b", + // "a" can be a list; however, "b" cannot be a set, a map, or a list. + // + // BEGINS_WITH : Checks for a prefix. + // + // AttributeValueList can contain only one AttributeValue of type String or + // Binary (not a Number or a set type). The target attribute of the comparison + // must be of type String or Binary (not a Number or a set type). + // + // IN : Checks for matching elements within two sets. + // + // AttributeValueList can contain one or more AttributeValue elements of type + // String, Number, or Binary (not a set type). These attributes are compared + // against an existing set type attribute of an item. If any elements of the + // input set are present in the item attribute, the expression evaluates to + // true. + // + // BETWEEN : Greater than or equal to the first value, and less than or + // equal to the second value. + // + // AttributeValueList must contain two AttributeValue elements of the same + // type, either String, Number, or Binary (not a set type). A target attribute + // matches if the target value is greater than, or equal to, the first element + // and less than, or equal to, the second element. If an item contains an AttributeValue + // element of a different type than the one provided in the request, the value + // does not match. For example, {"S":"6"} does not compare to {"N":"6"}. Also, + // {"N":"6"} does not compare to {"NS":["6", "2", "1"]} + // + // For usage examples of AttributeValueList and ComparisonOperator, see + // Legacy Conditional Parameters (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.html) + // in the Amazon DynamoDB Developer Guide. + // + // For backward compatibility with previous DynamoDB releases, the following + // parameters can be used instead of AttributeValueList and ComparisonOperator: + // + // Value - A value for DynamoDB to compare with an attribute. + // + // Exists - A Boolean value that causes DynamoDB to evaluate the value before + // attempting the conditional operation: + // + // If Exists is true, DynamoDB will check to see if that attribute value + // already exists in the table. If it is found, then the condition evaluates + // to true; otherwise the condition evaluate to false. + // + // If Exists is false, DynamoDB assumes that the attribute value does not + // exist in the table. If in fact the value does not exist, then the assumption + // is valid and the condition evaluates to true. If the value is found, despite + // the assumption that it does not exist, the condition evaluates to false. + // + // Note that the default value for Exists is true. + // + // The Value and Exists parameters are incompatible with AttributeValueList + // and ComparisonOperator. Note that if you use both sets of parameters at once, + // DynamoDB will return a ValidationException exception. + Expected map[string]*ExpectedAttributeValue `type:"map"` + + // One or more substitution tokens for attribute names in an expression. The + // following are some use cases for using ExpressionAttributeNames: + // + // To access an attribute whose name conflicts with a DynamoDB reserved word. + // + // To create a placeholder for repeating occurrences of an attribute name + // in an expression. + // + // To prevent special characters in an attribute name from being misinterpreted + // in an expression. + // + // Use the # character in an expression to dereference an attribute name. + // For example, consider the following attribute name: + // + // Percentile + // + // The name of this attribute conflicts with a reserved word, so it cannot + // be used directly in an expression. (For the complete list of reserved words, + // see Reserved Words (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html) + // in the Amazon DynamoDB Developer Guide). To work around this, you could specify + // the following for ExpressionAttributeNames: + // + // {"#P":"Percentile"} + // + // You could then use this substitution in an expression, as in this example: + // + // #P = :val + // + // Tokens that begin with the : character are expression attribute values, + // which are placeholders for the actual value at runtime. + // + // For more information on expression attribute names, see Accessing Item + // Attributes (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html) + // in the Amazon DynamoDB Developer Guide. + ExpressionAttributeNames map[string]*string `type:"map"` + + // One or more values that can be substituted in an expression. + // + // Use the : (colon) character in an expression to dereference an attribute + // value. For example, suppose that you wanted to check whether the value of + // the ProductStatus attribute was one of the following: + // + // Available | Backordered | Discontinued + // + // You would first need to specify ExpressionAttributeValues as follows: + // + // { ":avail":{"S":"Available"}, ":back":{"S":"Backordered"}, ":disc":{"S":"Discontinued"} + // } + // + // You could then use these values in an expression, such as this: + // + // ProductStatus IN (:avail, :back, :disc) + // + // For more information on expression attribute values, see Specifying Conditions + // (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.SpecifyingConditions.html) + // in the Amazon DynamoDB Developer Guide. + ExpressionAttributeValues map[string]*AttributeValue `type:"map"` + + // A map of attribute name/value pairs, one for each attribute. Only the primary + // key attributes are required; you can optionally provide other attribute name-value + // pairs for the item. + // + // You must provide all of the attributes for the primary key. For example, + // with a simple primary key, you only need to provide a value for the partition + // key. For a composite primary key, you must provide both values for both the + // partition key and the sort key. + // + // If you specify any attributes that are part of an index key, then the data + // types for those attributes must match those of the schema in the table's + // attribute definition. + // + // For more information about primary keys, see Primary Key (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/DataModel.html#DataModelPrimaryKey) + // in the Amazon DynamoDB Developer Guide. + // + // Each element in the Item map is an AttributeValue object. + Item map[string]*AttributeValue `type:"map" required:"true"` + + // Determines the level of detail about provisioned throughput consumption that + // is returned in the response: + // + // INDEXES - The response includes the aggregate ConsumedCapacity for the + // operation, together with ConsumedCapacity for each table and secondary index + // that was accessed. + // + // Note that some operations, such as GetItem and BatchGetItem, do not access + // any indexes at all. In these cases, specifying INDEXES will only return ConsumedCapacity + // information for table(s). + // + // TOTAL - The response includes only the aggregate ConsumedCapacity for + // the operation. + // + // NONE - No ConsumedCapacity details are included in the response. + ReturnConsumedCapacity *string `type:"string" enum:"ReturnConsumedCapacity"` + + // Determines whether item collection metrics are returned. If set to SIZE, + // the response includes statistics about item collections, if any, that were + // modified during the operation are returned in the response. If set to NONE + // (the default), no statistics are returned. + ReturnItemCollectionMetrics *string `type:"string" enum:"ReturnItemCollectionMetrics"` + + // Use ReturnValues if you want to get the item attributes as they appeared + // before they were updated with the PutItem request. For PutItem, the valid + // values are: + // + // NONE - If ReturnValues is not specified, or if its value is NONE, then + // nothing is returned. (This setting is the default for ReturnValues.) + // + // ALL_OLD - If PutItem overwrote an attribute name-value pair, then the + // content of the old item is returned. + // + // The ReturnValues parameter is used by several DynamoDB operations; however, + // PutItem does not recognize any values other than NONE or ALL_OLD. + ReturnValues *string `type:"string" enum:"ReturnValue"` + + // The name of the table to contain the item. + TableName *string `min:"3" type:"string" required:"true"` +} + +// String returns the string representation +func (s PutItemInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutItemInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutItemInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutItemInput"} + if s.Item == nil { + invalidParams.Add(request.NewErrParamRequired("Item")) + } + if s.TableName == nil { + invalidParams.Add(request.NewErrParamRequired("TableName")) + } + if s.TableName != nil && len(*s.TableName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("TableName", 3)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the output of a PutItem operation. +type PutItemOutput struct { + _ struct{} `type:"structure"` + + // The attribute values as they appeared before the PutItem operation, but only + // if ReturnValues is specified as ALL_OLD in the request. Each element consists + // of an attribute name and an attribute value. + Attributes map[string]*AttributeValue `type:"map"` + + // The capacity units consumed by an operation. The data returned includes the + // total provisioned throughput consumed, along with statistics for the table + // and any indexes involved in the operation. ConsumedCapacity is only returned + // if the request asked for it. For more information, see Provisioned Throughput + // (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ProvisionedThroughputIntro.html) + // in the Amazon DynamoDB Developer Guide. + ConsumedCapacity *ConsumedCapacity `type:"structure"` + + // Information about item collections, if any, that were affected by the operation. + // ItemCollectionMetrics is only returned if the request asked for it. If the + // table does not have any local secondary indexes, this information is not + // returned in the response. + // + // Each ItemCollectionMetrics element consists of: + // + // ItemCollectionKey - The partition key value of the item collection. This + // is the same as the partition key value of the item itself. + // + // SizeEstimateRange - An estimate of item collection size, in gigabytes. + // This value is a two-element array containing a lower bound and an upper bound + // for the estimate. The estimate includes the size of all the items in the + // table, plus the size of all attributes projected into all of the local secondary + // indexes on that table. Use this estimate to measure whether a local secondary + // index is approaching its size limit. + // + // The estimate is subject to change over time; therefore, do not rely on the + // precision or accuracy of the estimate. + ItemCollectionMetrics *ItemCollectionMetrics `type:"structure"` +} + +// String returns the string representation +func (s PutItemOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutItemOutput) GoString() string { + return s.String() +} + +// Represents a request to perform a PutItem operation on an item. +type PutRequest struct { + _ struct{} `type:"structure"` + + // A map of attribute name to attribute values, representing the primary key + // of an item to be processed by PutItem. All of the table's primary key attributes + // must be specified, and their data types must match those of the table's key + // schema. If any attributes are present in the item which are part of an index + // key schema for the table, their types must match the index key schema. + Item map[string]*AttributeValue `type:"map" required:"true"` +} + +// String returns the string representation +func (s PutRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutRequest) GoString() string { + return s.String() +} + +// Represents the input of a Query operation. +type QueryInput struct { + _ struct{} `type:"structure"` + + // This is a legacy parameter, for backward compatibility. New applications + // should use ProjectionExpression instead. Do not combine legacy parameters + // and expression parameters in a single API call; otherwise, DynamoDB will + // return a ValidationException exception. + // + // This parameter allows you to retrieve attributes of type List or Map; however, + // it cannot retrieve individual elements within a List or a Map. + // + // The names of one or more attributes to retrieve. If no attribute names + // are provided, then all attributes will be returned. If any of the requested + // attributes are not found, they will not appear in the result. + // + // Note that AttributesToGet has no effect on provisioned throughput consumption. + // DynamoDB determines capacity units consumed based on item size, not on the + // amount of data that is returned to an application. + // + // You cannot use both AttributesToGet and Select together in a Query request, + // unless the value for Select is SPECIFIC_ATTRIBUTES. (This usage is equivalent + // to specifying AttributesToGet without any value for Select.) + // + // If you query a local secondary index and request only attributes that are + // projected into that index, the operation will read only the index and not + // the table. If any of the requested attributes are not projected into the + // local secondary index, DynamoDB will fetch each of these attributes from + // the parent table. This extra fetching incurs additional throughput cost and + // latency. + // + // If you query a global secondary index, you can only request attributes that + // are projected into the index. Global secondary index queries cannot fetch + // attributes from the parent table. + AttributesToGet []*string `min:"1" type:"list"` + + // This is a legacy parameter, for backward compatibility. New applications + // should use FilterExpression instead. Do not combine legacy parameters and + // expression parameters in a single API call; otherwise, DynamoDB will return + // a ValidationException exception. + // + // A logical operator to apply to the conditions in a QueryFilter map: + // + // AND - If all of the conditions evaluate to true, then the entire map + // evaluates to true. + // + // OR - If at least one of the conditions evaluate to true, then the entire + // map evaluates to true. + // + // If you omit ConditionalOperator, then AND is the default. + // + // The operation will succeed only if the entire map evaluates to true. + // + // This parameter does not support attributes of type List or Map. + ConditionalOperator *string `type:"string" enum:"ConditionalOperator"` + + // Determines the read consistency model: If set to true, then the operation + // uses strongly consistent reads; otherwise, the operation uses eventually + // consistent reads. + // + // Strongly consistent reads are not supported on global secondary indexes. + // If you query a global secondary index with ConsistentRead set to true, you + // will receive a ValidationException. + ConsistentRead *bool `type:"boolean"` + + // The primary key of the first item that this operation will evaluate. Use + // the value that was returned for LastEvaluatedKey in the previous operation. + // + // The data type for ExclusiveStartKey must be String, Number or Binary. No + // set data types are allowed. + ExclusiveStartKey map[string]*AttributeValue `type:"map"` + + // One or more substitution tokens for attribute names in an expression. The + // following are some use cases for using ExpressionAttributeNames: + // + // To access an attribute whose name conflicts with a DynamoDB reserved word. + // + // To create a placeholder for repeating occurrences of an attribute name + // in an expression. + // + // To prevent special characters in an attribute name from being misinterpreted + // in an expression. + // + // Use the # character in an expression to dereference an attribute name. + // For example, consider the following attribute name: + // + // Percentile + // + // The name of this attribute conflicts with a reserved word, so it cannot + // be used directly in an expression. (For the complete list of reserved words, + // see Reserved Words (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html) + // in the Amazon DynamoDB Developer Guide). To work around this, you could specify + // the following for ExpressionAttributeNames: + // + // {"#P":"Percentile"} + // + // You could then use this substitution in an expression, as in this example: + // + // #P = :val + // + // Tokens that begin with the : character are expression attribute values, + // which are placeholders for the actual value at runtime. + // + // For more information on expression attribute names, see Accessing Item + // Attributes (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html) + // in the Amazon DynamoDB Developer Guide. + ExpressionAttributeNames map[string]*string `type:"map"` + + // One or more values that can be substituted in an expression. + // + // Use the : (colon) character in an expression to dereference an attribute + // value. For example, suppose that you wanted to check whether the value of + // the ProductStatus attribute was one of the following: + // + // Available | Backordered | Discontinued + // + // You would first need to specify ExpressionAttributeValues as follows: + // + // { ":avail":{"S":"Available"}, ":back":{"S":"Backordered"}, ":disc":{"S":"Discontinued"} + // } + // + // You could then use these values in an expression, such as this: + // + // ProductStatus IN (:avail, :back, :disc) + // + // For more information on expression attribute values, see Specifying Conditions + // (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.SpecifyingConditions.html) + // in the Amazon DynamoDB Developer Guide. + ExpressionAttributeValues map[string]*AttributeValue `type:"map"` + + // A string that contains conditions that DynamoDB applies after the Query operation, + // but before the data is returned to you. Items that do not satisfy the FilterExpression + // criteria are not returned. + // + // A FilterExpression is applied after the items have already been read; the + // process of filtering does not consume any additional read capacity units. + // + // For more information, see Filter Expressions (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/QueryAndScan.html#FilteringResults) + // in the Amazon DynamoDB Developer Guide. + // + // FilterExpression replaces the legacy QueryFilter and ConditionalOperator + // parameters. + FilterExpression *string `type:"string"` + + // The name of an index to query. This index can be any local secondary index + // or global secondary index on the table. Note that if you use the IndexName + // parameter, you must also provide TableName. + IndexName *string `min:"3" type:"string"` + + // The condition that specifies the key value(s) for items to be retrieved by + // the Query action. + // + // The condition must perform an equality test on a single partition key value. + // The condition can also perform one of several comparison tests on a single + // sort key value. Query can use KeyConditionExpression to retrieve one item + // with a given partition key value and sort key value, or several items that + // have the same partition key value but different sort key values. + // + // The partition key equality test is required, and must be specified in the + // following format: + // + // partitionKeyName = :partitionkeyval + // + // If you also want to provide a condition for the sort key, it must be combined + // using AND with the condition for the sort key. Following is an example, using + // the = comparison operator for the sort key: + // + // partitionKeyName = :partitionkeyval AND sortKeyName = :sortkeyval + // + // Valid comparisons for the sort key condition are as follows: + // + // sortKeyName = :sortkeyval - true if the sort key value is equal to :sortkeyval. + // + // sortKeyName < :sortkeyval - true if the sort key value is less than :sortkeyval. + // + // sortKeyName <= :sortkeyval - true if the sort key value is less than + // or equal to :sortkeyval. + // + // sortKeyName > :sortkeyval - true if the sort key value is greater than + // :sortkeyval. + // + // sortKeyName >= :sortkeyval - true if the sort key value is greater than + // or equal to :sortkeyval. + // + // sortKeyName BETWEEN :sortkeyval1 AND :sortkeyval2 - true if the sort + // key value is greater than or equal to :sortkeyval1, and less than or equal + // to :sortkeyval2. + // + // begins_with ( sortKeyName, :sortkeyval ) - true if the sort key value + // begins with a particular operand. (You cannot use this function with a sort + // key that is of type Number.) Note that the function name begins_with is case-sensitive. + // + // Use the ExpressionAttributeValues parameter to replace tokens such as + // :partitionval and :sortval with actual values at runtime. + // + // You can optionally use the ExpressionAttributeNames parameter to replace + // the names of the partition key and sort key with placeholder tokens. This + // option might be necessary if an attribute name conflicts with a DynamoDB + // reserved word. For example, the following KeyConditionExpression parameter + // causes an error because Size is a reserved word: + // + // Size = :myval + // + // To work around this, define a placeholder (such a #S) to represent the + // attribute name Size. KeyConditionExpression then is as follows: + // + // #S = :myval + // + // For a list of reserved words, see Reserved Words (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html) + // in the Amazon DynamoDB Developer Guide. + // + // For more information on ExpressionAttributeNames and ExpressionAttributeValues, + // see Using Placeholders for Attribute Names and Values (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ExpressionPlaceholders.html) + // in the Amazon DynamoDB Developer Guide. + // + // KeyConditionExpression replaces the legacy KeyConditions parameter. + KeyConditionExpression *string `type:"string"` + + // This is a legacy parameter, for backward compatibility. New applications + // should use KeyConditionExpression instead. Do not combine legacy parameters + // and expression parameters in a single API call; otherwise, DynamoDB will + // return a ValidationException exception. + // + // The selection criteria for the query. For a query on a table, you can have + // conditions only on the table primary key attributes. You must provide the + // partition key name and value as an EQ condition. You can optionally provide + // a second condition, referring to the sort key. + // + // If you don't provide a sort key condition, all of the items that match + // the partition key will be retrieved. If a FilterExpression or QueryFilter + // is present, it will be applied after the items are retrieved. + // + // For a query on an index, you can have conditions only on the index key + // attributes. You must provide the index partition key name and value as an + // EQ condition. You can optionally provide a second condition, referring to + // the index sort key. + // + // Each KeyConditions element consists of an attribute name to compare, along + // with the following: + // + // AttributeValueList - One or more values to evaluate against the supplied + // attribute. The number of values in the list depends on the ComparisonOperator + // being used. + // + // For type Number, value comparisons are numeric. + // + // String value comparisons for greater than, equals, or less than are based + // on ASCII character code values. For example, a is greater than A, and a is + // greater than B. For a list of code values, see http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters + // (http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters). + // + // For Binary, DynamoDB treats each byte of the binary data as unsigned when + // it compares binary values. + // + // ComparisonOperator - A comparator for evaluating attributes, for example, + // equals, greater than, less than, and so on. + // + // For KeyConditions, only the following comparison operators are supported: + // + // EQ | LE | LT | GE | GT | BEGINS_WITH | BETWEEN + // + // The following are descriptions of these comparison operators. + // + // EQ : Equal. + // + // AttributeValueList can contain only one AttributeValue of type String, + // Number, or Binary (not a set type). If an item contains an AttributeValue + // element of a different type than the one specified in the request, the value + // does not match. For example, {"S":"6"} does not equal {"N":"6"}. Also, {"N":"6"} + // does not equal {"NS":["6", "2", "1"]}. + // + // LE : Less than or equal. + // + // AttributeValueList can contain only one AttributeValue element of type + // String, Number, or Binary (not a set type). If an item contains an AttributeValue + // element of a different type than the one provided in the request, the value + // does not match. For example, {"S":"6"} does not equal {"N":"6"}. Also, {"N":"6"} + // does not compare to {"NS":["6", "2", "1"]}. + // + // LT : Less than. + // + // AttributeValueList can contain only one AttributeValue of type String, + // Number, or Binary (not a set type). If an item contains an AttributeValue + // element of a different type than the one provided in the request, the value + // does not match. For example, {"S":"6"} does not equal {"N":"6"}. Also, {"N":"6"} + // does not compare to {"NS":["6", "2", "1"]}. + // + // GE : Greater than or equal. + // + // AttributeValueList can contain only one AttributeValue element of type + // String, Number, or Binary (not a set type). If an item contains an AttributeValue + // element of a different type than the one provided in the request, the value + // does not match. For example, {"S":"6"} does not equal {"N":"6"}. Also, {"N":"6"} + // does not compare to {"NS":["6", "2", "1"]}. + // + // GT : Greater than. + // + // AttributeValueList can contain only one AttributeValue element of type + // String, Number, or Binary (not a set type). If an item contains an AttributeValue + // element of a different type than the one provided in the request, the value + // does not match. For example, {"S":"6"} does not equal {"N":"6"}. Also, {"N":"6"} + // does not compare to {"NS":["6", "2", "1"]}. + // + // BEGINS_WITH : Checks for a prefix. + // + // AttributeValueList can contain only one AttributeValue of type String or + // Binary (not a Number or a set type). The target attribute of the comparison + // must be of type String or Binary (not a Number or a set type). + // + // BETWEEN : Greater than or equal to the first value, and less than or + // equal to the second value. + // + // AttributeValueList must contain two AttributeValue elements of the same + // type, either String, Number, or Binary (not a set type). A target attribute + // matches if the target value is greater than, or equal to, the first element + // and less than, or equal to, the second element. If an item contains an AttributeValue + // element of a different type than the one provided in the request, the value + // does not match. For example, {"S":"6"} does not compare to {"N":"6"}. Also, + // {"N":"6"} does not compare to {"NS":["6", "2", "1"]} + // + // For usage examples of AttributeValueList and ComparisonOperator, see + // Legacy Conditional Parameters (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.html) + // in the Amazon DynamoDB Developer Guide. + KeyConditions map[string]*Condition `type:"map"` + + // The maximum number of items to evaluate (not necessarily the number of matching + // items). If DynamoDB processes the number of items up to the limit while processing + // the results, it stops the operation and returns the matching values up to + // that point, and a key in LastEvaluatedKey to apply in a subsequent operation, + // so that you can pick up where you left off. Also, if the processed data set + // size exceeds 1 MB before DynamoDB reaches this limit, it stops the operation + // and returns the matching values up to the limit, and a key in LastEvaluatedKey + // to apply in a subsequent operation to continue the operation. For more information, + // see Query and Scan (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/QueryAndScan.html) + // in the Amazon DynamoDB Developer Guide. + Limit *int64 `min:"1" type:"integer"` + + // A string that identifies one or more attributes to retrieve from the table. + // These attributes can include scalars, sets, or elements of a JSON document. + // The attributes in the expression must be separated by commas. + // + // If no attribute names are specified, then all attributes will be returned. + // If any of the requested attributes are not found, they will not appear in + // the result. + // + // For more information, see Accessing Item Attributes (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html) + // in the Amazon DynamoDB Developer Guide. + // + // ProjectionExpression replaces the legacy AttributesToGet parameter. + ProjectionExpression *string `type:"string"` + + // This is a legacy parameter, for backward compatibility. New applications + // should use FilterExpression instead. Do not combine legacy parameters and + // expression parameters in a single API call; otherwise, DynamoDB will return + // a ValidationException exception. + // + // A condition that evaluates the query results after the items are read and + // returns only the desired values. + // + // This parameter does not support attributes of type List or Map. + // + // A QueryFilter is applied after the items have already been read; the process + // of filtering does not consume any additional read capacity units. + // + // If you provide more than one condition in the QueryFilter map, then by + // default all of the conditions must evaluate to true. In other words, the + // conditions are ANDed together. (You can use the ConditionalOperator parameter + // to OR the conditions instead. If you do this, then at least one of the conditions + // must evaluate to true, rather than all of them.) + // + // Note that QueryFilter does not allow key attributes. You cannot define a + // filter condition on a partition key or a sort key. + // + // Each QueryFilter element consists of an attribute name to compare, along + // with the following: + // + // AttributeValueList - One or more values to evaluate against the supplied + // attribute. The number of values in the list depends on the operator specified + // in ComparisonOperator. + // + // For type Number, value comparisons are numeric. + // + // String value comparisons for greater than, equals, or less than are based + // on ASCII character code values. For example, a is greater than A, and a is + // greater than B. For a list of code values, see http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters + // (http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters). + // + // For type Binary, DynamoDB treats each byte of the binary data as unsigned + // when it compares binary values. + // + // For information on specifying data types in JSON, see JSON Data Format (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/DataFormat.html) + // in the Amazon DynamoDB Developer Guide. + // + // ComparisonOperator - A comparator for evaluating attributes. For example, + // equals, greater than, less than, etc. + // + // The following comparison operators are available: + // + // EQ | NE | LE | LT | GE | GT | NOT_NULL | NULL | CONTAINS | NOT_CONTAINS + // | BEGINS_WITH | IN | BETWEEN + // + // For complete descriptions of all comparison operators, see the Condition + // (http://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_Condition.html) + // data type. + QueryFilter map[string]*Condition `type:"map"` + + // Determines the level of detail about provisioned throughput consumption that + // is returned in the response: + // + // INDEXES - The response includes the aggregate ConsumedCapacity for the + // operation, together with ConsumedCapacity for each table and secondary index + // that was accessed. + // + // Note that some operations, such as GetItem and BatchGetItem, do not access + // any indexes at all. In these cases, specifying INDEXES will only return ConsumedCapacity + // information for table(s). + // + // TOTAL - The response includes only the aggregate ConsumedCapacity for + // the operation. + // + // NONE - No ConsumedCapacity details are included in the response. + ReturnConsumedCapacity *string `type:"string" enum:"ReturnConsumedCapacity"` + + // Specifies the order for index traversal: If true (default), the traversal + // is performed in ascending order; if false, the traversal is performed in + // descending order. + // + // Items with the same partition key value are stored in sorted order by sort + // key. If the sort key data type is Number, the results are stored in numeric + // order. For type String, the results are stored in order of ASCII character + // code values. For type Binary, DynamoDB treats each byte of the binary data + // as unsigned. + // + // If ScanIndexForward is true, DynamoDB returns the results in the order in + // which they are stored (by sort key value). This is the default behavior. + // If ScanIndexForward is false, DynamoDB reads the results in reverse order + // by sort key value, and then returns the results to the client. + ScanIndexForward *bool `type:"boolean"` + + // The attributes to be returned in the result. You can retrieve all item attributes, + // specific item attributes, the count of matching items, or in the case of + // an index, some or all of the attributes projected into the index. + // + // ALL_ATTRIBUTES - Returns all of the item attributes from the specified + // table or index. If you query a local secondary index, then for each matching + // item in the index DynamoDB will fetch the entire item from the parent table. + // If the index is configured to project all item attributes, then all of the + // data can be obtained from the local secondary index, and no fetching is required. + // + // ALL_PROJECTED_ATTRIBUTES - Allowed only when querying an index. Retrieves + // all attributes that have been projected into the index. If the index is configured + // to project all attributes, this return value is equivalent to specifying + // ALL_ATTRIBUTES. + // + // COUNT - Returns the number of matching items, rather than the matching + // items themselves. + // + // SPECIFIC_ATTRIBUTES - Returns only the attributes listed in AttributesToGet. + // This return value is equivalent to specifying AttributesToGet without specifying + // any value for Select. + // + // If you query a local secondary index and request only attributes that are + // projected into that index, the operation will read only the index and not + // the table. If any of the requested attributes are not projected into the + // local secondary index, DynamoDB will fetch each of these attributes from + // the parent table. This extra fetching incurs additional throughput cost and + // latency. + // + // If you query a global secondary index, you can only request attributes that + // are projected into the index. Global secondary index queries cannot fetch + // attributes from the parent table. + // + // If neither Select nor AttributesToGet are specified, DynamoDB defaults + // to ALL_ATTRIBUTES when accessing a table, and ALL_PROJECTED_ATTRIBUTES when + // accessing an index. You cannot use both Select and AttributesToGet together + // in a single request, unless the value for Select is SPECIFIC_ATTRIBUTES. + // (This usage is equivalent to specifying AttributesToGet without any value + // for Select.) + // + // If you use the ProjectionExpression parameter, then the value for Select + // can only be SPECIFIC_ATTRIBUTES. Any other value for Select will return an + // error. + Select *string `type:"string" enum:"Select"` + + // The name of the table containing the requested items. + TableName *string `min:"3" type:"string" required:"true"` +} + +// String returns the string representation +func (s QueryInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s QueryInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *QueryInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "QueryInput"} + if s.AttributesToGet != nil && len(s.AttributesToGet) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AttributesToGet", 1)) + } + if s.IndexName != nil && len(*s.IndexName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("IndexName", 3)) + } + if s.Limit != nil && *s.Limit < 1 { + invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) + } + if s.TableName == nil { + invalidParams.Add(request.NewErrParamRequired("TableName")) + } + if s.TableName != nil && len(*s.TableName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("TableName", 3)) + } + if s.KeyConditions != nil { + for i, v := range s.KeyConditions { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "KeyConditions", i), err.(request.ErrInvalidParams)) + } + } + } + if s.QueryFilter != nil { + for i, v := range s.QueryFilter { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "QueryFilter", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the output of a Query operation. +type QueryOutput struct { + _ struct{} `type:"structure"` + + // The capacity units consumed by an operation. The data returned includes the + // total provisioned throughput consumed, along with statistics for the table + // and any indexes involved in the operation. ConsumedCapacity is only returned + // if the request asked for it. For more information, see Provisioned Throughput + // (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ProvisionedThroughputIntro.html) + // in the Amazon DynamoDB Developer Guide. + ConsumedCapacity *ConsumedCapacity `type:"structure"` + + // The number of items in the response. + // + // If you used a QueryFilter in the request, then Count is the number of items + // returned after the filter was applied, and ScannedCount is the number of + // matching items before the filter was applied. + // + // If you did not use a filter in the request, then Count and ScannedCount + // are the same. + Count *int64 `type:"integer"` + + // An array of item attributes that match the query criteria. Each element in + // this array consists of an attribute name and the value for that attribute. + Items []map[string]*AttributeValue `type:"list"` + + // The primary key of the item where the operation stopped, inclusive of the + // previous result set. Use this value to start a new operation, excluding this + // value in the new request. + // + // If LastEvaluatedKey is empty, then the "last page" of results has been processed + // and there is no more data to be retrieved. + // + // If LastEvaluatedKey is not empty, it does not necessarily mean that there + // is more data in the result set. The only way to know when you have reached + // the end of the result set is when LastEvaluatedKey is empty. + LastEvaluatedKey map[string]*AttributeValue `type:"map"` + + // The number of items evaluated, before any QueryFilter is applied. A high + // ScannedCount value with few, or no, Count results indicates an inefficient + // Query operation. For more information, see Count and ScannedCount (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/QueryAndScan.html#Count) + // in the Amazon DynamoDB Developer Guide. + // + // If you did not use a filter in the request, then ScannedCount is the same + // as Count. + ScannedCount *int64 `type:"integer"` +} + +// String returns the string representation +func (s QueryOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s QueryOutput) GoString() string { + return s.String() +} + +// Represents the input of a Scan operation. +type ScanInput struct { + _ struct{} `type:"structure"` + + // This is a legacy parameter, for backward compatibility. New applications + // should use ProjectionExpression instead. Do not combine legacy parameters + // and expression parameters in a single API call; otherwise, DynamoDB will + // return a ValidationException exception. + // + // This parameter allows you to retrieve attributes of type List or Map; however, + // it cannot retrieve individual elements within a List or a Map. + // + // The names of one or more attributes to retrieve. If no attribute names + // are provided, then all attributes will be returned. If any of the requested + // attributes are not found, they will not appear in the result. + // + // Note that AttributesToGet has no effect on provisioned throughput consumption. + // DynamoDB determines capacity units consumed based on item size, not on the + // amount of data that is returned to an application. + AttributesToGet []*string `min:"1" type:"list"` + + // This is a legacy parameter, for backward compatibility. New applications + // should use FilterExpression instead. Do not combine legacy parameters and + // expression parameters in a single API call; otherwise, DynamoDB will return + // a ValidationException exception. + // + // A logical operator to apply to the conditions in a ScanFilter map: + // + // AND - If all of the conditions evaluate to true, then the entire map + // evaluates to true. + // + // OR - If at least one of the conditions evaluate to true, then the entire + // map evaluates to true. + // + // If you omit ConditionalOperator, then AND is the default. + // + // The operation will succeed only if the entire map evaluates to true. + // + // This parameter does not support attributes of type List or Map. + ConditionalOperator *string `type:"string" enum:"ConditionalOperator"` + + // A Boolean value that determines the read consistency model during the scan: + // + // If ConsistentRead is false, then the data returned from Scan might not + // contain the results from other recently completed write operations (PutItem, + // UpdateItem or DeleteItem). + // + // If ConsistentRead is true, then all of the write operations that completed + // before the Scan began are guaranteed to be contained in the Scan response. + // + // The default setting for ConsistentRead is false. + // + // The ConsistentRead parameter is not supported on global secondary indexes. + // If you scan a global secondary index with ConsistentRead set to true, you + // will receive a ValidationException. + ConsistentRead *bool `type:"boolean"` + + // The primary key of the first item that this operation will evaluate. Use + // the value that was returned for LastEvaluatedKey in the previous operation. + // + // The data type for ExclusiveStartKey must be String, Number or Binary. No + // set data types are allowed. + // + // In a parallel scan, a Scan request that includes ExclusiveStartKey must + // specify the same segment whose previous Scan returned the corresponding value + // of LastEvaluatedKey. + ExclusiveStartKey map[string]*AttributeValue `type:"map"` + + // One or more substitution tokens for attribute names in an expression. The + // following are some use cases for using ExpressionAttributeNames: + // + // To access an attribute whose name conflicts with a DynamoDB reserved word. + // + // To create a placeholder for repeating occurrences of an attribute name + // in an expression. + // + // To prevent special characters in an attribute name from being misinterpreted + // in an expression. + // + // Use the # character in an expression to dereference an attribute name. + // For example, consider the following attribute name: + // + // Percentile + // + // The name of this attribute conflicts with a reserved word, so it cannot + // be used directly in an expression. (For the complete list of reserved words, + // see Reserved Words (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html) + // in the Amazon DynamoDB Developer Guide). To work around this, you could specify + // the following for ExpressionAttributeNames: + // + // {"#P":"Percentile"} + // + // You could then use this substitution in an expression, as in this example: + // + // #P = :val + // + // Tokens that begin with the : character are expression attribute values, + // which are placeholders for the actual value at runtime. + // + // For more information on expression attribute names, see Accessing Item + // Attributes (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html) + // in the Amazon DynamoDB Developer Guide. + ExpressionAttributeNames map[string]*string `type:"map"` + + // One or more values that can be substituted in an expression. + // + // Use the : (colon) character in an expression to dereference an attribute + // value. For example, suppose that you wanted to check whether the value of + // the ProductStatus attribute was one of the following: + // + // Available | Backordered | Discontinued + // + // You would first need to specify ExpressionAttributeValues as follows: + // + // { ":avail":{"S":"Available"}, ":back":{"S":"Backordered"}, ":disc":{"S":"Discontinued"} + // } + // + // You could then use these values in an expression, such as this: + // + // ProductStatus IN (:avail, :back, :disc) + // + // For more information on expression attribute values, see Specifying Conditions + // (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.SpecifyingConditions.html) + // in the Amazon DynamoDB Developer Guide. + ExpressionAttributeValues map[string]*AttributeValue `type:"map"` + + // A string that contains conditions that DynamoDB applies after the Scan operation, + // but before the data is returned to you. Items that do not satisfy the FilterExpression + // criteria are not returned. + // + // A FilterExpression is applied after the items have already been read; the + // process of filtering does not consume any additional read capacity units. + // + // For more information, see Filter Expressions (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/QueryAndScan.html#FilteringResults) + // in the Amazon DynamoDB Developer Guide. + // + // FilterExpression replaces the legacy ScanFilter and ConditionalOperator + // parameters. + FilterExpression *string `type:"string"` + + // The name of a secondary index to scan. This index can be any local secondary + // index or global secondary index. Note that if you use the IndexName parameter, + // you must also provide TableName. + IndexName *string `min:"3" type:"string"` + + // The maximum number of items to evaluate (not necessarily the number of matching + // items). If DynamoDB processes the number of items up to the limit while processing + // the results, it stops the operation and returns the matching values up to + // that point, and a key in LastEvaluatedKey to apply in a subsequent operation, + // so that you can pick up where you left off. Also, if the processed data set + // size exceeds 1 MB before DynamoDB reaches this limit, it stops the operation + // and returns the matching values up to the limit, and a key in LastEvaluatedKey + // to apply in a subsequent operation to continue the operation. For more information, + // see Query and Scan (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/QueryAndScan.html) + // in the Amazon DynamoDB Developer Guide. + Limit *int64 `min:"1" type:"integer"` + + // A string that identifies one or more attributes to retrieve from the specified + // table or index. These attributes can include scalars, sets, or elements of + // a JSON document. The attributes in the expression must be separated by commas. + // + // If no attribute names are specified, then all attributes will be returned. + // If any of the requested attributes are not found, they will not appear in + // the result. + // + // For more information, see Accessing Item Attributes (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html) + // in the Amazon DynamoDB Developer Guide. + // + // ProjectionExpression replaces the legacy AttributesToGet parameter. + ProjectionExpression *string `type:"string"` + + // Determines the level of detail about provisioned throughput consumption that + // is returned in the response: + // + // INDEXES - The response includes the aggregate ConsumedCapacity for the + // operation, together with ConsumedCapacity for each table and secondary index + // that was accessed. + // + // Note that some operations, such as GetItem and BatchGetItem, do not access + // any indexes at all. In these cases, specifying INDEXES will only return ConsumedCapacity + // information for table(s). + // + // TOTAL - The response includes only the aggregate ConsumedCapacity for + // the operation. + // + // NONE - No ConsumedCapacity details are included in the response. + ReturnConsumedCapacity *string `type:"string" enum:"ReturnConsumedCapacity"` + + // This is a legacy parameter, for backward compatibility. New applications + // should use FilterExpression instead. Do not combine legacy parameters and + // expression parameters in a single API call; otherwise, DynamoDB will return + // a ValidationException exception. + // + // A condition that evaluates the scan results and returns only the desired + // values. + // + // This parameter does not support attributes of type List or Map. + // + // If you specify more than one condition in the ScanFilter map, then by default + // all of the conditions must evaluate to true. In other words, the conditions + // are ANDed together. (You can use the ConditionalOperator parameter to OR + // the conditions instead. If you do this, then at least one of the conditions + // must evaluate to true, rather than all of them.) + // + // Each ScanFilter element consists of an attribute name to compare, along + // with the following: + // + // AttributeValueList - One or more values to evaluate against the supplied + // attribute. The number of values in the list depends on the operator specified + // in ComparisonOperator . + // + // For type Number, value comparisons are numeric. + // + // String value comparisons for greater than, equals, or less than are based + // on ASCII character code values. For example, a is greater than A, and a is + // greater than B. For a list of code values, see http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters + // (http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters). + // + // For Binary, DynamoDB treats each byte of the binary data as unsigned when + // it compares binary values. + // + // For information on specifying data types in JSON, see JSON Data Format (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/DataFormat.html) + // in the Amazon DynamoDB Developer Guide. + // + // ComparisonOperator - A comparator for evaluating attributes. For example, + // equals, greater than, less than, etc. + // + // The following comparison operators are available: + // + // EQ | NE | LE | LT | GE | GT | NOT_NULL | NULL | CONTAINS | NOT_CONTAINS + // | BEGINS_WITH | IN | BETWEEN + // + // For complete descriptions of all comparison operators, see Condition (http://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_Condition.html). + ScanFilter map[string]*Condition `type:"map"` + + // For a parallel Scan request, Segment identifies an individual segment to + // be scanned by an application worker. + // + // Segment IDs are zero-based, so the first segment is always 0. For example, + // if you want to use four application threads to scan a table or an index, + // then the first thread specifies a Segment value of 0, the second thread specifies + // 1, and so on. + // + // The value of LastEvaluatedKey returned from a parallel Scan request must + // be used as ExclusiveStartKey with the same segment ID in a subsequent Scan + // operation. + // + // The value for Segment must be greater than or equal to 0, and less than + // the value provided for TotalSegments. + // + // If you provide Segment, you must also provide TotalSegments. + Segment *int64 `type:"integer"` + + // The attributes to be returned in the result. You can retrieve all item attributes, + // specific item attributes, or the count of matching items. + // + // ALL_ATTRIBUTES - Returns all of the item attributes. + // + // ALL_PROJECTED_ATTRIBUTES - Allowed only when querying an index. Retrieves + // all attributes that have been projected into the index. If the index is configured + // to project all attributes, this return value is equivalent to specifying + // ALL_ATTRIBUTES. + // + // COUNT - Returns the number of matching items, rather than the matching + // items themselves. + // + // SPECIFIC_ATTRIBUTES - Returns only the attributes listed in AttributesToGet. + // This return value is equivalent to specifying AttributesToGet without specifying + // any value for Select. + // + // If neither Select nor AttributesToGet are specified, DynamoDB defaults + // to ALL_ATTRIBUTES. You cannot use both AttributesToGet and Select together + // in a single request, unless the value for Select is SPECIFIC_ATTRIBUTES. + // (This usage is equivalent to specifying AttributesToGet without any value + // for Select.) + Select *string `type:"string" enum:"Select"` + + // The name of the table containing the requested items; or, if you provide + // IndexName, the name of the table to which that index belongs. + TableName *string `min:"3" type:"string" required:"true"` + + // For a parallel Scan request, TotalSegments represents the total number of + // segments into which the Scan operation will be divided. The value of TotalSegments + // corresponds to the number of application workers that will perform the parallel + // scan. For example, if you want to use four application threads to scan a + // table or an index, specify a TotalSegments value of 4. + // + // The value for TotalSegments must be greater than or equal to 1, and less + // than or equal to 1000000. If you specify a TotalSegments value of 1, the + // Scan operation will be sequential rather than parallel. + // + // If you specify TotalSegments, you must also specify Segment. + TotalSegments *int64 `min:"1" type:"integer"` +} + +// String returns the string representation +func (s ScanInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ScanInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ScanInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ScanInput"} + if s.AttributesToGet != nil && len(s.AttributesToGet) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AttributesToGet", 1)) + } + if s.IndexName != nil && len(*s.IndexName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("IndexName", 3)) + } + if s.Limit != nil && *s.Limit < 1 { + invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) + } + if s.TableName == nil { + invalidParams.Add(request.NewErrParamRequired("TableName")) + } + if s.TableName != nil && len(*s.TableName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("TableName", 3)) + } + if s.TotalSegments != nil && *s.TotalSegments < 1 { + invalidParams.Add(request.NewErrParamMinValue("TotalSegments", 1)) + } + if s.ScanFilter != nil { + for i, v := range s.ScanFilter { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "ScanFilter", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the output of a Scan operation. +type ScanOutput struct { + _ struct{} `type:"structure"` + + // The capacity units consumed by an operation. The data returned includes the + // total provisioned throughput consumed, along with statistics for the table + // and any indexes involved in the operation. ConsumedCapacity is only returned + // if the request asked for it. For more information, see Provisioned Throughput + // (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ProvisionedThroughputIntro.html) + // in the Amazon DynamoDB Developer Guide. + ConsumedCapacity *ConsumedCapacity `type:"structure"` + + // The number of items in the response. + // + // If you set ScanFilter in the request, then Count is the number of items + // returned after the filter was applied, and ScannedCount is the number of + // matching items before the filter was applied. + // + // If you did not use a filter in the request, then Count is the same as ScannedCount. + Count *int64 `type:"integer"` + + // An array of item attributes that match the scan criteria. Each element in + // this array consists of an attribute name and the value for that attribute. + Items []map[string]*AttributeValue `type:"list"` + + // The primary key of the item where the operation stopped, inclusive of the + // previous result set. Use this value to start a new operation, excluding this + // value in the new request. + // + // If LastEvaluatedKey is empty, then the "last page" of results has been processed + // and there is no more data to be retrieved. + // + // If LastEvaluatedKey is not empty, it does not necessarily mean that there + // is more data in the result set. The only way to know when you have reached + // the end of the result set is when LastEvaluatedKey is empty. + LastEvaluatedKey map[string]*AttributeValue `type:"map"` + + // The number of items evaluated, before any ScanFilter is applied. A high ScannedCount + // value with few, or no, Count results indicates an inefficient Scan operation. + // For more information, see Count and ScannedCount (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/QueryAndScan.html#Count) + // in the Amazon DynamoDB Developer Guide. + // + // If you did not use a filter in the request, then ScannedCount is the same + // as Count. + ScannedCount *int64 `type:"integer"` +} + +// String returns the string representation +func (s ScanOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ScanOutput) GoString() string { + return s.String() +} + +// Represents the DynamoDB Streams configuration for a table in DynamoDB. +type StreamSpecification struct { + _ struct{} `type:"structure"` + + // Indicates whether DynamoDB Streams is enabled (true) or disabled (false) + // on the table. + StreamEnabled *bool `type:"boolean"` + + // The DynamoDB Streams settings for the table. These settings consist of: + // + // StreamEnabled - Indicates whether DynamoDB Streams is enabled (true) + // or disabled (false) on the table. + // + // StreamViewType - When an item in the table is modified, StreamViewType + // determines what information is written to the stream for this table. Valid + // values for StreamViewType are: + // + // KEYS_ONLY - Only the key attributes of the modified item are written + // to the stream. + // + // NEW_IMAGE - The entire item, as it appears after it was modified, is + // written to the stream. + // + // OLD_IMAGE - The entire item, as it appeared before it was modified, is + // written to the stream. + // + // NEW_AND_OLD_IMAGES - Both the new and the old item images of the item + // are written to the stream. + StreamViewType *string `type:"string" enum:"StreamViewType"` +} + +// String returns the string representation +func (s StreamSpecification) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StreamSpecification) GoString() string { + return s.String() +} + +// Represents the properties of a table. +type TableDescription struct { + _ struct{} `type:"structure"` + + // An array of AttributeDefinition objects. Each of these objects describes + // one attribute in the table and index key schema. + // + // Each AttributeDefinition object in this array is composed of: + // + // AttributeName - The name of the attribute. + // + // AttributeType - The data type for the attribute. + AttributeDefinitions []*AttributeDefinition `type:"list"` + + // The date and time when the table was created, in UNIX epoch time (http://www.epochconverter.com/) + // format. + CreationDateTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The global secondary indexes, if any, on the table. Each index is scoped + // to a given partition key value. Each element is composed of: + // + // Backfilling - If true, then the index is currently in the backfilling + // phase. Backfilling occurs only when a new global secondary index is added + // to the table; it is the process by which DynamoDB populates the new index + // with data from the table. (This attribute does not appear for indexes that + // were created during a CreateTable operation.) + // + // IndexName - The name of the global secondary index. + // + // IndexSizeBytes - The total size of the global secondary index, in bytes. + // DynamoDB updates this value approximately every six hours. Recent changes + // might not be reflected in this value. + // + // IndexStatus - The current status of the global secondary index: + // + // CREATING - The index is being created. + // + // UPDATING - The index is being updated. + // + // DELETING - The index is being deleted. + // + // ACTIVE - The index is ready for use. + // + // ItemCount - The number of items in the global secondary index. DynamoDB + // updates this value approximately every six hours. Recent changes might not + // be reflected in this value. + // + // KeySchema - Specifies the complete index key schema. The attribute names + // in the key schema must be between 1 and 255 characters (inclusive). The key + // schema must begin with the same partition key as the table. + // + // Projection - Specifies attributes that are copied (projected) from the + // table into the index. These are in addition to the primary key attributes + // and index key attributes, which are automatically projected. Each attribute + // specification is composed of: + // + // ProjectionType - One of the following: + // + // KEYS_ONLY - Only the index and primary keys are projected into the index. + // + // INCLUDE - Only the specified table attributes are projected into the + // index. The list of projected attributes are in NonKeyAttributes. + // + // ALL - All of the table attributes are projected into the index. + // + // NonKeyAttributes - A list of one or more non-key attribute names that + // are projected into the secondary index. The total count of attributes provided + // in NonKeyAttributes, summed across all of the secondary indexes, must not + // exceed 20. If you project the same attribute into two different indexes, + // this counts as two distinct attributes when determining the total. + // + // ProvisionedThroughput - The provisioned throughput settings for the + // global secondary index, consisting of read and write capacity units, along + // with data about increases and decreases. + // + // If the table is in the DELETING state, no information about indexes will + // be returned. + GlobalSecondaryIndexes []*GlobalSecondaryIndexDescription `type:"list"` + + // The number of items in the specified table. DynamoDB updates this value approximately + // every six hours. Recent changes might not be reflected in this value. + ItemCount *int64 `type:"long"` + + // The primary key structure for the table. Each KeySchemaElement consists of: + // + // AttributeName - The name of the attribute. + // + // KeyType - The role of the attribute: + // + // HASH - partition key + // + // RANGE - sort key + // + // The partition key of an item is also known as its hash attribute. The + // term "hash attribute" derives from DynamoDB' usage of an internal hash function + // to evenly distribute data items across partitions, based on their partition + // key values. + // + // The sort key of an item is also known as its range attribute. The term "range + // attribute" derives from the way DynamoDB stores items with the same partition + // key physically close together, in sorted order by the sort key value. + // + // For more information about primary keys, see Primary Key (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/DataModel.html#DataModelPrimaryKey) + // in the Amazon DynamoDB Developer Guide. + KeySchema []*KeySchemaElement `min:"1" type:"list"` + + // The Amazon Resource Name (ARN) that uniquely identifies the latest stream + // for this table. + LatestStreamArn *string `min:"37" type:"string"` + + // A timestamp, in ISO 8601 format, for this stream. + // + // Note that LatestStreamLabel is not a unique identifier for the stream, because + // it is possible that a stream from another table might have the same timestamp. + // However, the combination of the following three elements is guaranteed to + // be unique: + // + // the AWS customer ID. + // + // the table name. + // + // the StreamLabel. + LatestStreamLabel *string `type:"string"` + + // Represents one or more local secondary indexes on the table. Each index is + // scoped to a given partition key value. Tables with one or more local secondary + // indexes are subject to an item collection size limit, where the amount of + // data within a given item collection cannot exceed 10 GB. Each element is + // composed of: + // + // IndexName - The name of the local secondary index. + // + // KeySchema - Specifies the complete index key schema. The attribute names + // in the key schema must be between 1 and 255 characters (inclusive). The key + // schema must begin with the same partition key as the table. + // + // Projection - Specifies attributes that are copied (projected) from the + // table into the index. These are in addition to the primary key attributes + // and index key attributes, which are automatically projected. Each attribute + // specification is composed of: + // + // ProjectionType - One of the following: + // + // KEYS_ONLY - Only the index and primary keys are projected into the index. + // + // INCLUDE - Only the specified table attributes are projected into the + // index. The list of projected attributes are in NonKeyAttributes. + // + // ALL - All of the table attributes are projected into the index. + // + // NonKeyAttributes - A list of one or more non-key attribute names that + // are projected into the secondary index. The total count of attributes provided + // in NonKeyAttributes, summed across all of the secondary indexes, must not + // exceed 20. If you project the same attribute into two different indexes, + // this counts as two distinct attributes when determining the total. + // + // IndexSizeBytes - Represents the total size of the index, in bytes. + // DynamoDB updates this value approximately every six hours. Recent changes + // might not be reflected in this value. + // + // ItemCount - Represents the number of items in the index. DynamoDB updates + // this value approximately every six hours. Recent changes might not be reflected + // in this value. + // + // If the table is in the DELETING state, no information about indexes will + // be returned. + LocalSecondaryIndexes []*LocalSecondaryIndexDescription `type:"list"` + + // The provisioned throughput settings for the table, consisting of read and + // write capacity units, along with data about increases and decreases. + ProvisionedThroughput *ProvisionedThroughputDescription `type:"structure"` + + // The current DynamoDB Streams configuration for the table. + StreamSpecification *StreamSpecification `type:"structure"` + + // The Amazon Resource Name (ARN) that uniquely identifies the table. + TableArn *string `type:"string"` + + // The name of the table. + TableName *string `min:"3" type:"string"` + + // The total size of the specified table, in bytes. DynamoDB updates this value + // approximately every six hours. Recent changes might not be reflected in this + // value. + TableSizeBytes *int64 `type:"long"` + + // The current state of the table: + // + // CREATING - The table is being created. + // + // UPDATING - The table is being updated. + // + // DELETING - The table is being deleted. + // + // ACTIVE - The table is ready for use. + TableStatus *string `type:"string" enum:"TableStatus"` +} + +// String returns the string representation +func (s TableDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TableDescription) GoString() string { + return s.String() +} + +// Represents the new provisioned throughput settings to be applied to a global +// secondary index. +type UpdateGlobalSecondaryIndexAction struct { + _ struct{} `type:"structure"` + + // The name of the global secondary index to be updated. + IndexName *string `min:"3" type:"string" required:"true"` + + // Represents the provisioned throughput settings for a specified table or index. + // The settings can be modified using the UpdateTable operation. + // + // For current minimum and maximum provisioned throughput values, see Limits + // (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html) + // in the Amazon DynamoDB Developer Guide. + ProvisionedThroughput *ProvisionedThroughput `type:"structure" required:"true"` +} + +// String returns the string representation +func (s UpdateGlobalSecondaryIndexAction) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateGlobalSecondaryIndexAction) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateGlobalSecondaryIndexAction) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateGlobalSecondaryIndexAction"} + if s.IndexName == nil { + invalidParams.Add(request.NewErrParamRequired("IndexName")) + } + if s.IndexName != nil && len(*s.IndexName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("IndexName", 3)) + } + if s.ProvisionedThroughput == nil { + invalidParams.Add(request.NewErrParamRequired("ProvisionedThroughput")) + } + if s.ProvisionedThroughput != nil { + if err := s.ProvisionedThroughput.Validate(); err != nil { + invalidParams.AddNested("ProvisionedThroughput", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the input of an UpdateItem operation. +type UpdateItemInput struct { + _ struct{} `type:"structure"` + + // This is a legacy parameter, for backward compatibility. New applications + // should use UpdateExpression instead. Do not combine legacy parameters and + // expression parameters in a single API call; otherwise, DynamoDB will return + // a ValidationException exception. + // + // This parameter can be used for modifying top-level attributes; however, + // it does not support individual list or map elements. + // + // The names of attributes to be modified, the action to perform on each, + // and the new value for each. If you are updating an attribute that is an index + // key attribute for any indexes on that table, the attribute type must match + // the index key type defined in the AttributesDefinition of the table description. + // You can use UpdateItem to update any non-key attributes. + // + // Attribute values cannot be null. String and Binary type attributes must + // have lengths greater than zero. Set type attributes must not be empty. Requests + // with empty values will be rejected with a ValidationException exception. + // + // Each AttributeUpdates element consists of an attribute name to modify, along + // with the following: + // + // Value - The new value, if applicable, for this attribute. + // + // Action - A value that specifies how to perform the update. This action + // is only valid for an existing attribute whose data type is Number or is a + // set; do not use ADD for other data types. + // + // If an item with the specified primary key is found in the table, the following + // values perform the following actions: + // + // PUT - Adds the specified attribute to the item. If the attribute already + // exists, it is replaced by the new value. + // + // DELETE - Removes the attribute and its value, if no value is specified + // for DELETE. The data type of the specified value must match the existing + // value's data type. + // + // If a set of values is specified, then those values are subtracted from the + // old set. For example, if the attribute value was the set [a,b,c] and the + // DELETE action specifies [a,c], then the final attribute value is [b]. Specifying + // an empty set is an error. + // + // ADD - Adds the specified value to the item, if the attribute does not + // already exist. If the attribute does exist, then the behavior of ADD depends + // on the data type of the attribute: + // + // If the existing attribute is a number, and if Value is also a number, + // then Value is mathematically added to the existing attribute. If Value is + // a negative number, then it is subtracted from the existing attribute. + // + // If you use ADD to increment or decrement a number value for an item that + // doesn't exist before the update, DynamoDB uses 0 as the initial value. + // + // Similarly, if you use ADD for an existing item to increment or decrement + // an attribute value that doesn't exist before the update, DynamoDB uses 0 + // as the initial value. For example, suppose that the item you want to update + // doesn't have an attribute named itemcount, but you decide to ADD the number + // 3 to this attribute anyway. DynamoDB will create the itemcount attribute, + // set its initial value to 0, and finally add 3 to it. The result will be a + // new itemcount attribute, with a value of 3. + // + // If the existing data type is a set, and if Value is also a set, then + // Value is appended to the existing set. For example, if the attribute value + // is the set [1,2], and the ADD action specified [3], then the final attribute + // value is [1,2,3]. An error occurs if an ADD action is specified for a set + // attribute and the attribute type specified does not match the existing set + // type. + // + // Both sets must have the same primitive data type. For example, if the existing + // data type is a set of strings, Value must also be a set of strings. + // + // If no item with the specified key is found in the table, the following + // values perform the following actions: + // + // PUT - Causes DynamoDB to create a new item with the specified primary + // key, and then adds the attribute. + // + // DELETE - Nothing happens, because attributes cannot be deleted from a + // nonexistent item. The operation succeeds, but DynamoDB does not create a + // new item. + // + // ADD - Causes DynamoDB to create an item with the supplied primary key + // and number (or set of numbers) for the attribute value. The only data types + // allowed are Number and Number Set. + // + // If you provide any attributes that are part of an index key, then the + // data types for those attributes must match those of the schema in the table's + // attribute definition. + AttributeUpdates map[string]*AttributeValueUpdate `type:"map"` + + // A condition that must be satisfied in order for a conditional update to succeed. + // + // An expression can contain any of the following: + // + // Functions: attribute_exists | attribute_not_exists | attribute_type | + // contains | begins_with | size + // + // These function names are case-sensitive. + // + // Comparison operators: = | <> | < | > | <= | + // >= | BETWEEN | IN + // + // Logical operators: AND | OR | NOT + // + // For more information on condition expressions, see Specifying Conditions + // (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.SpecifyingConditions.html) + // in the Amazon DynamoDB Developer Guide. + // + // ConditionExpression replaces the legacy ConditionalOperator and Expected + // parameters. + ConditionExpression *string `type:"string"` + + // This is a legacy parameter, for backward compatibility. New applications + // should use ConditionExpression instead. Do not combine legacy parameters + // and expression parameters in a single API call; otherwise, DynamoDB will + // return a ValidationException exception. + // + // A logical operator to apply to the conditions in the Expected map: + // + // AND - If all of the conditions evaluate to true, then the entire map + // evaluates to true. + // + // OR - If at least one of the conditions evaluate to true, then the entire + // map evaluates to true. + // + // If you omit ConditionalOperator, then AND is the default. + // + // The operation will succeed only if the entire map evaluates to true. + // + // This parameter does not support attributes of type List or Map. + ConditionalOperator *string `type:"string" enum:"ConditionalOperator"` + + // This is a legacy parameter, for backward compatibility. New applications + // should use ConditionExpression instead. Do not combine legacy parameters + // and expression parameters in a single API call; otherwise, DynamoDB will + // return a ValidationException exception. + // + // A map of attribute/condition pairs. Expected provides a conditional block + // for the UpdateItem operation. + // + // Each element of Expected consists of an attribute name, a comparison operator, + // and one or more values. DynamoDB compares the attribute with the value(s) + // you supplied, using the comparison operator. For each Expected element, the + // result of the evaluation is either true or false. + // + // If you specify more than one element in the Expected map, then by default + // all of the conditions must evaluate to true. In other words, the conditions + // are ANDed together. (You can use the ConditionalOperator parameter to OR + // the conditions instead. If you do this, then at least one of the conditions + // must evaluate to true, rather than all of them.) + // + // If the Expected map evaluates to true, then the conditional operation succeeds; + // otherwise, it fails. + // + // Expected contains the following: + // + // AttributeValueList - One or more values to evaluate against the supplied + // attribute. The number of values in the list depends on the ComparisonOperator + // being used. + // + // For type Number, value comparisons are numeric. + // + // String value comparisons for greater than, equals, or less than are based + // on ASCII character code values. For example, a is greater than A, and a is + // greater than B. For a list of code values, see http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters + // (http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters). + // + // For type Binary, DynamoDB treats each byte of the binary data as unsigned + // when it compares binary values. + // + // ComparisonOperator - A comparator for evaluating attributes in the AttributeValueList. + // When performing the comparison, DynamoDB uses strongly consistent reads. + // + // The following comparison operators are available: + // + // EQ | NE | LE | LT | GE | GT | NOT_NULL | NULL | CONTAINS | NOT_CONTAINS + // | BEGINS_WITH | IN | BETWEEN + // + // The following are descriptions of each comparison operator. + // + // EQ : Equal. EQ is supported for all datatypes, including lists and maps. + // + // AttributeValueList can contain only one AttributeValue element of type + // String, Number, Binary, String Set, Number Set, or Binary Set. If an item + // contains an AttributeValue element of a different type than the one provided + // in the request, the value does not match. For example, {"S":"6"} does not + // equal {"N":"6"}. Also, {"N":"6"} does not equal {"NS":["6", "2", "1"]}. + // + // NE : Not equal. NE is supported for all datatypes, including lists and + // maps. + // + // AttributeValueList can contain only one AttributeValue of type String, + // Number, Binary, String Set, Number Set, or Binary Set. If an item contains + // an AttributeValue of a different type than the one provided in the request, + // the value does not match. For example, {"S":"6"} does not equal {"N":"6"}. + // Also, {"N":"6"} does not equal {"NS":["6", "2", "1"]}. + // + // LE : Less than or equal. + // + // AttributeValueList can contain only one AttributeValue element of type + // String, Number, or Binary (not a set type). If an item contains an AttributeValue + // element of a different type than the one provided in the request, the value + // does not match. For example, {"S":"6"} does not equal {"N":"6"}. Also, {"N":"6"} + // does not compare to {"NS":["6", "2", "1"]}. + // + // LT : Less than. + // + // AttributeValueList can contain only one AttributeValue of type String, + // Number, or Binary (not a set type). If an item contains an AttributeValue + // element of a different type than the one provided in the request, the value + // does not match. For example, {"S":"6"} does not equal {"N":"6"}. Also, {"N":"6"} + // does not compare to {"NS":["6", "2", "1"]}. + // + // GE : Greater than or equal. + // + // AttributeValueList can contain only one AttributeValue element of type + // String, Number, or Binary (not a set type). If an item contains an AttributeValue + // element of a different type than the one provided in the request, the value + // does not match. For example, {"S":"6"} does not equal {"N":"6"}. Also, {"N":"6"} + // does not compare to {"NS":["6", "2", "1"]}. + // + // GT : Greater than. + // + // AttributeValueList can contain only one AttributeValue element of type + // String, Number, or Binary (not a set type). If an item contains an AttributeValue + // element of a different type than the one provided in the request, the value + // does not match. For example, {"S":"6"} does not equal {"N":"6"}. Also, {"N":"6"} + // does not compare to {"NS":["6", "2", "1"]}. + // + // NOT_NULL : The attribute exists. NOT_NULL is supported for all datatypes, + // including lists and maps. + // + // This operator tests for the existence of an attribute, not its data type. + // If the data type of attribute "a" is null, and you evaluate it using NOT_NULL, + // the result is a Boolean true. This result is because the attribute "a" exists; + // its data type is not relevant to the NOT_NULL comparison operator. + // + // NULL : The attribute does not exist. NULL is supported for all datatypes, + // including lists and maps. + // + // This operator tests for the nonexistence of an attribute, not its data + // type. If the data type of attribute "a" is null, and you evaluate it using + // NULL, the result is a Boolean false. This is because the attribute "a" exists; + // its data type is not relevant to the NULL comparison operator. + // + // CONTAINS : Checks for a subsequence, or value in a set. + // + // AttributeValueList can contain only one AttributeValue element of type + // String, Number, or Binary (not a set type). If the target attribute of the + // comparison is of type String, then the operator checks for a substring match. + // If the target attribute of the comparison is of type Binary, then the operator + // looks for a subsequence of the target that matches the input. If the target + // attribute of the comparison is a set ("SS", "NS", or "BS"), then the operator + // evaluates to true if it finds an exact match with any member of the set. + // + // CONTAINS is supported for lists: When evaluating "a CONTAINS b", "a" can + // be a list; however, "b" cannot be a set, a map, or a list. + // + // NOT_CONTAINS : Checks for absence of a subsequence, or absence of a value + // in a set. + // + // AttributeValueList can contain only one AttributeValue element of type + // String, Number, or Binary (not a set type). If the target attribute of the + // comparison is a String, then the operator checks for the absence of a substring + // match. If the target attribute of the comparison is Binary, then the operator + // checks for the absence of a subsequence of the target that matches the input. + // If the target attribute of the comparison is a set ("SS", "NS", or "BS"), + // then the operator evaluates to true if it does not find an exact match with + // any member of the set. + // + // NOT_CONTAINS is supported for lists: When evaluating "a NOT CONTAINS b", + // "a" can be a list; however, "b" cannot be a set, a map, or a list. + // + // BEGINS_WITH : Checks for a prefix. + // + // AttributeValueList can contain only one AttributeValue of type String or + // Binary (not a Number or a set type). The target attribute of the comparison + // must be of type String or Binary (not a Number or a set type). + // + // IN : Checks for matching elements within two sets. + // + // AttributeValueList can contain one or more AttributeValue elements of type + // String, Number, or Binary (not a set type). These attributes are compared + // against an existing set type attribute of an item. If any elements of the + // input set are present in the item attribute, the expression evaluates to + // true. + // + // BETWEEN : Greater than or equal to the first value, and less than or + // equal to the second value. + // + // AttributeValueList must contain two AttributeValue elements of the same + // type, either String, Number, or Binary (not a set type). A target attribute + // matches if the target value is greater than, or equal to, the first element + // and less than, or equal to, the second element. If an item contains an AttributeValue + // element of a different type than the one provided in the request, the value + // does not match. For example, {"S":"6"} does not compare to {"N":"6"}. Also, + // {"N":"6"} does not compare to {"NS":["6", "2", "1"]} + // + // For usage examples of AttributeValueList and ComparisonOperator, see + // Legacy Conditional Parameters (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.html) + // in the Amazon DynamoDB Developer Guide. + // + // For backward compatibility with previous DynamoDB releases, the following + // parameters can be used instead of AttributeValueList and ComparisonOperator: + // + // Value - A value for DynamoDB to compare with an attribute. + // + // Exists - A Boolean value that causes DynamoDB to evaluate the value before + // attempting the conditional operation: + // + // If Exists is true, DynamoDB will check to see if that attribute value + // already exists in the table. If it is found, then the condition evaluates + // to true; otherwise the condition evaluate to false. + // + // If Exists is false, DynamoDB assumes that the attribute value does not + // exist in the table. If in fact the value does not exist, then the assumption + // is valid and the condition evaluates to true. If the value is found, despite + // the assumption that it does not exist, the condition evaluates to false. + // + // Note that the default value for Exists is true. + // + // The Value and Exists parameters are incompatible with AttributeValueList + // and ComparisonOperator. Note that if you use both sets of parameters at once, + // DynamoDB will return a ValidationException exception. + // + // This parameter does not support attributes of type List or Map. + Expected map[string]*ExpectedAttributeValue `type:"map"` + + // One or more substitution tokens for attribute names in an expression. The + // following are some use cases for using ExpressionAttributeNames: + // + // To access an attribute whose name conflicts with a DynamoDB reserved word. + // + // To create a placeholder for repeating occurrences of an attribute name + // in an expression. + // + // To prevent special characters in an attribute name from being misinterpreted + // in an expression. + // + // Use the # character in an expression to dereference an attribute name. + // For example, consider the following attribute name: + // + // Percentile + // + // The name of this attribute conflicts with a reserved word, so it cannot + // be used directly in an expression. (For the complete list of reserved words, + // see Reserved Words (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html) + // in the Amazon DynamoDB Developer Guide). To work around this, you could specify + // the following for ExpressionAttributeNames: + // + // {"#P":"Percentile"} + // + // You could then use this substitution in an expression, as in this example: + // + // #P = :val + // + // Tokens that begin with the : character are expression attribute values, + // which are placeholders for the actual value at runtime. + // + // For more information on expression attribute names, see Accessing Item + // Attributes (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html) + // in the Amazon DynamoDB Developer Guide. + ExpressionAttributeNames map[string]*string `type:"map"` + + // One or more values that can be substituted in an expression. + // + // Use the : (colon) character in an expression to dereference an attribute + // value. For example, suppose that you wanted to check whether the value of + // the ProductStatus attribute was one of the following: + // + // Available | Backordered | Discontinued + // + // You would first need to specify ExpressionAttributeValues as follows: + // + // { ":avail":{"S":"Available"}, ":back":{"S":"Backordered"}, ":disc":{"S":"Discontinued"} + // } + // + // You could then use these values in an expression, such as this: + // + // ProductStatus IN (:avail, :back, :disc) + // + // For more information on expression attribute values, see Specifying Conditions + // (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.SpecifyingConditions.html) + // in the Amazon DynamoDB Developer Guide. + ExpressionAttributeValues map[string]*AttributeValue `type:"map"` + + // The primary key of the item to be updated. Each element consists of an attribute + // name and a value for that attribute. + // + // For the primary key, you must provide all of the attributes. For example, + // with a simple primary key, you only need to provide a value for the partition + // key. For a composite primary key, you must provide values for both the partition + // key and the sort key. + Key map[string]*AttributeValue `type:"map" required:"true"` + + // Determines the level of detail about provisioned throughput consumption that + // is returned in the response: + // + // INDEXES - The response includes the aggregate ConsumedCapacity for the + // operation, together with ConsumedCapacity for each table and secondary index + // that was accessed. + // + // Note that some operations, such as GetItem and BatchGetItem, do not access + // any indexes at all. In these cases, specifying INDEXES will only return ConsumedCapacity + // information for table(s). + // + // TOTAL - The response includes only the aggregate ConsumedCapacity for + // the operation. + // + // NONE - No ConsumedCapacity details are included in the response. + ReturnConsumedCapacity *string `type:"string" enum:"ReturnConsumedCapacity"` + + // Determines whether item collection metrics are returned. If set to SIZE, + // the response includes statistics about item collections, if any, that were + // modified during the operation are returned in the response. If set to NONE + // (the default), no statistics are returned. + ReturnItemCollectionMetrics *string `type:"string" enum:"ReturnItemCollectionMetrics"` + + // Use ReturnValues if you want to get the item attributes as they appeared + // either before or after they were updated. For UpdateItem, the valid values + // are: + // + // NONE - If ReturnValues is not specified, or if its value is NONE, then + // nothing is returned. (This setting is the default for ReturnValues.) + // + // ALL_OLD - If UpdateItem overwrote an attribute name-value pair, then + // the content of the old item is returned. + // + // UPDATED_OLD - The old versions of only the updated attributes are returned. + // + // ALL_NEW - All of the attributes of the new version of the item are returned. + // + // UPDATED_NEW - The new versions of only the updated attributes are returned. + // + // There is no additional cost associated with requesting a return value + // aside from the small network and processing overhead of receiving a larger + // response. No Read Capacity Units are consumed. + // + // Values returned are strongly consistent + ReturnValues *string `type:"string" enum:"ReturnValue"` + + // The name of the table containing the item to update. + TableName *string `min:"3" type:"string" required:"true"` + + // An expression that defines one or more attributes to be updated, the action + // to be performed on them, and new value(s) for them. + // + // The following action values are available for UpdateExpression. + // + // SET - Adds one or more attributes and values to an item. If any of these + // attribute already exist, they are replaced by the new values. You can also + // use SET to add or subtract from an attribute that is of type Number. For + // example: SET myNum = myNum + :val + // + // SET supports the following functions: + // + // if_not_exists (path, operand) - if the item does not contain an attribute + // at the specified path, then if_not_exists evaluates to operand; otherwise, + // it evaluates to path. You can use this function to avoid overwriting an attribute + // that may already be present in the item. + // + // list_append (operand, operand) - evaluates to a list with a new element + // added to it. You can append the new element to the start or the end of the + // list by reversing the order of the operands. + // + // These function names are case-sensitive. + // + // REMOVE - Removes one or more attributes from an item. + // + // ADD - Adds the specified value to the item, if the attribute does not + // already exist. If the attribute does exist, then the behavior of ADD depends + // on the data type of the attribute: + // + // If the existing attribute is a number, and if Value is also a number, + // then Value is mathematically added to the existing attribute. If Value is + // a negative number, then it is subtracted from the existing attribute. + // + // If you use ADD to increment or decrement a number value for an item that + // doesn't exist before the update, DynamoDB uses 0 as the initial value. + // + // Similarly, if you use ADD for an existing item to increment or decrement + // an attribute value that doesn't exist before the update, DynamoDB uses 0 + // as the initial value. For example, suppose that the item you want to update + // doesn't have an attribute named itemcount, but you decide to ADD the number + // 3 to this attribute anyway. DynamoDB will create the itemcount attribute, + // set its initial value to 0, and finally add 3 to it. The result will be a + // new itemcount attribute in the item, with a value of 3. + // + // If the existing data type is a set and if Value is also a set, then Value + // is added to the existing set. For example, if the attribute value is the + // set [1,2], and the ADD action specified [3], then the final attribute value + // is [1,2,3]. An error occurs if an ADD action is specified for a set attribute + // and the attribute type specified does not match the existing set type. + // + // Both sets must have the same primitive data type. For example, if the existing + // data type is a set of strings, the Value must also be a set of strings. + // + // The ADD action only supports Number and set data types. In addition, + // ADD can only be used on top-level attributes, not nested attributes. + // + // DELETE - Deletes an element from a set. + // + // If a set of values is specified, then those values are subtracted from the + // old set. For example, if the attribute value was the set [a,b,c] and the + // DELETE action specifies [a,c], then the final attribute value is [b]. Specifying + // an empty set is an error. + // + // The DELETE action only supports set data types. In addition, DELETE can + // only be used on top-level attributes, not nested attributes. + // + // You can have many actions in a single expression, such as the following: + // SET a=:value1, b=:value2 DELETE :value3, :value4, :value5 + // + // For more information on update expressions, see Modifying Items and Attributes + // (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.Modifying.html) + // in the Amazon DynamoDB Developer Guide. + // + // UpdateExpression replaces the legacy AttributeUpdates parameter. + UpdateExpression *string `type:"string"` +} + +// String returns the string representation +func (s UpdateItemInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateItemInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateItemInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateItemInput"} + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.TableName == nil { + invalidParams.Add(request.NewErrParamRequired("TableName")) + } + if s.TableName != nil && len(*s.TableName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("TableName", 3)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the output of an UpdateItem operation. +type UpdateItemOutput struct { + _ struct{} `type:"structure"` + + // A map of attribute values as they appeared before the UpdateItem operation. + // This map only appears if ReturnValues was specified as something other than + // NONE in the request. Each element represents one attribute. + Attributes map[string]*AttributeValue `type:"map"` + + // The capacity units consumed by an operation. The data returned includes the + // total provisioned throughput consumed, along with statistics for the table + // and any indexes involved in the operation. ConsumedCapacity is only returned + // if the request asked for it. For more information, see Provisioned Throughput + // (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ProvisionedThroughputIntro.html) + // in the Amazon DynamoDB Developer Guide. + ConsumedCapacity *ConsumedCapacity `type:"structure"` + + // Information about item collections, if any, that were affected by the operation. + // ItemCollectionMetrics is only returned if the request asked for it. If the + // table does not have any local secondary indexes, this information is not + // returned in the response. + ItemCollectionMetrics *ItemCollectionMetrics `type:"structure"` +} + +// String returns the string representation +func (s UpdateItemOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateItemOutput) GoString() string { + return s.String() +} + +// Represents the input of an UpdateTable operation. +type UpdateTableInput struct { + _ struct{} `type:"structure"` + + // An array of attributes that describe the key schema for the table and indexes. + // If you are adding a new global secondary index to the table, AttributeDefinitions + // must include the key element(s) of the new index. + AttributeDefinitions []*AttributeDefinition `type:"list"` + + // An array of one or more global secondary indexes for the table. For each + // index in the array, you can request one action: + // + // Create - add a new global secondary index to the table. + // + // Update - modify the provisioned throughput settings of an existing global + // secondary index. + // + // Delete - remove a global secondary index from the table. + // + // For more information, see Managing Global Secondary Indexes (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/GSI.OnlineOps.html) + // in the Amazon DynamoDB Developer Guide. + GlobalSecondaryIndexUpdates []*GlobalSecondaryIndexUpdate `type:"list"` + + // Represents the provisioned throughput settings for a specified table or index. + // The settings can be modified using the UpdateTable operation. + // + // For current minimum and maximum provisioned throughput values, see Limits + // (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html) + // in the Amazon DynamoDB Developer Guide. + ProvisionedThroughput *ProvisionedThroughput `type:"structure"` + + // Represents the DynamoDB Streams configuration for the table. + // + // You will receive a ResourceInUseException if you attempt to enable a stream + // on a table that already has a stream, or if you attempt to disable a stream + // on a table which does not have a stream. + StreamSpecification *StreamSpecification `type:"structure"` + + // The name of the table to be updated. + TableName *string `min:"3" type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateTableInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateTableInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateTableInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateTableInput"} + if s.TableName == nil { + invalidParams.Add(request.NewErrParamRequired("TableName")) + } + if s.TableName != nil && len(*s.TableName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("TableName", 3)) + } + if s.AttributeDefinitions != nil { + for i, v := range s.AttributeDefinitions { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "AttributeDefinitions", i), err.(request.ErrInvalidParams)) + } + } + } + if s.GlobalSecondaryIndexUpdates != nil { + for i, v := range s.GlobalSecondaryIndexUpdates { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "GlobalSecondaryIndexUpdates", i), err.(request.ErrInvalidParams)) + } + } + } + if s.ProvisionedThroughput != nil { + if err := s.ProvisionedThroughput.Validate(); err != nil { + invalidParams.AddNested("ProvisionedThroughput", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the output of an UpdateTable operation. +type UpdateTableOutput struct { + _ struct{} `type:"structure"` + + // Represents the properties of a table. + TableDescription *TableDescription `type:"structure"` +} + +// String returns the string representation +func (s UpdateTableOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateTableOutput) GoString() string { + return s.String() +} + +// Represents an operation to perform - either DeleteItem or PutItem. You can +// only request one of these operations, not both, in a single WriteRequest. +// If you do need to perform both of these operations, you will need to provide +// two separate WriteRequest objects. +type WriteRequest struct { + _ struct{} `type:"structure"` + + // A request to perform a DeleteItem operation. + DeleteRequest *DeleteRequest `type:"structure"` + + // A request to perform a PutItem operation. + PutRequest *PutRequest `type:"structure"` +} + +// String returns the string representation +func (s WriteRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s WriteRequest) GoString() string { + return s.String() +} + +const ( + // @enum AttributeAction + AttributeActionAdd = "ADD" + // @enum AttributeAction + AttributeActionPut = "PUT" + // @enum AttributeAction + AttributeActionDelete = "DELETE" +) + +const ( + // @enum ComparisonOperator + ComparisonOperatorEq = "EQ" + // @enum ComparisonOperator + ComparisonOperatorNe = "NE" + // @enum ComparisonOperator + ComparisonOperatorIn = "IN" + // @enum ComparisonOperator + ComparisonOperatorLe = "LE" + // @enum ComparisonOperator + ComparisonOperatorLt = "LT" + // @enum ComparisonOperator + ComparisonOperatorGe = "GE" + // @enum ComparisonOperator + ComparisonOperatorGt = "GT" + // @enum ComparisonOperator + ComparisonOperatorBetween = "BETWEEN" + // @enum ComparisonOperator + ComparisonOperatorNotNull = "NOT_NULL" + // @enum ComparisonOperator + ComparisonOperatorNull = "NULL" + // @enum ComparisonOperator + ComparisonOperatorContains = "CONTAINS" + // @enum ComparisonOperator + ComparisonOperatorNotContains = "NOT_CONTAINS" + // @enum ComparisonOperator + ComparisonOperatorBeginsWith = "BEGINS_WITH" +) + +const ( + // @enum ConditionalOperator + ConditionalOperatorAnd = "AND" + // @enum ConditionalOperator + ConditionalOperatorOr = "OR" +) + +const ( + // @enum IndexStatus + IndexStatusCreating = "CREATING" + // @enum IndexStatus + IndexStatusUpdating = "UPDATING" + // @enum IndexStatus + IndexStatusDeleting = "DELETING" + // @enum IndexStatus + IndexStatusActive = "ACTIVE" +) + +const ( + // @enum KeyType + KeyTypeHash = "HASH" + // @enum KeyType + KeyTypeRange = "RANGE" +) + +const ( + // @enum ProjectionType + ProjectionTypeAll = "ALL" + // @enum ProjectionType + ProjectionTypeKeysOnly = "KEYS_ONLY" + // @enum ProjectionType + ProjectionTypeInclude = "INCLUDE" +) + +// Determines the level of detail about provisioned throughput consumption that +// is returned in the response: +// +// INDEXES - The response includes the aggregate ConsumedCapacity for the +// operation, together with ConsumedCapacity for each table and secondary index +// that was accessed. +// +// Note that some operations, such as GetItem and BatchGetItem, do not access +// any indexes at all. In these cases, specifying INDEXES will only return ConsumedCapacity +// information for table(s). +// +// TOTAL - The response includes only the aggregate ConsumedCapacity for +// the operation. +// +// NONE - No ConsumedCapacity details are included in the response. +const ( + // @enum ReturnConsumedCapacity + ReturnConsumedCapacityIndexes = "INDEXES" + // @enum ReturnConsumedCapacity + ReturnConsumedCapacityTotal = "TOTAL" + // @enum ReturnConsumedCapacity + ReturnConsumedCapacityNone = "NONE" +) + +const ( + // @enum ReturnItemCollectionMetrics + ReturnItemCollectionMetricsSize = "SIZE" + // @enum ReturnItemCollectionMetrics + ReturnItemCollectionMetricsNone = "NONE" +) + +const ( + // @enum ReturnValue + ReturnValueNone = "NONE" + // @enum ReturnValue + ReturnValueAllOld = "ALL_OLD" + // @enum ReturnValue + ReturnValueUpdatedOld = "UPDATED_OLD" + // @enum ReturnValue + ReturnValueAllNew = "ALL_NEW" + // @enum ReturnValue + ReturnValueUpdatedNew = "UPDATED_NEW" +) + +const ( + // @enum ScalarAttributeType + ScalarAttributeTypeS = "S" + // @enum ScalarAttributeType + ScalarAttributeTypeN = "N" + // @enum ScalarAttributeType + ScalarAttributeTypeB = "B" +) + +const ( + // @enum Select + SelectAllAttributes = "ALL_ATTRIBUTES" + // @enum Select + SelectAllProjectedAttributes = "ALL_PROJECTED_ATTRIBUTES" + // @enum Select + SelectSpecificAttributes = "SPECIFIC_ATTRIBUTES" + // @enum Select + SelectCount = "COUNT" +) + +const ( + // @enum StreamViewType + StreamViewTypeNewImage = "NEW_IMAGE" + // @enum StreamViewType + StreamViewTypeOldImage = "OLD_IMAGE" + // @enum StreamViewType + StreamViewTypeNewAndOldImages = "NEW_AND_OLD_IMAGES" + // @enum StreamViewType + StreamViewTypeKeysOnly = "KEYS_ONLY" +) + +const ( + // @enum TableStatus + TableStatusCreating = "CREATING" + // @enum TableStatus + TableStatusUpdating = "UPDATING" + // @enum TableStatus + TableStatusDeleting = "DELETING" + // @enum TableStatus + TableStatusActive = "ACTIVE" +) diff --git a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/customizations.go b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/customizations.go new file mode 100644 index 000000000..51843cd7a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/customizations.go @@ -0,0 +1,98 @@ +package dynamodb + +import ( + "bytes" + "hash/crc32" + "io" + "io/ioutil" + "math" + "strconv" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/request" +) + +type retryer struct { + client.DefaultRetryer +} + +func (d retryer) RetryRules(r *request.Request) time.Duration { + delay := time.Duration(math.Pow(2, float64(r.RetryCount))) * 50 + return delay * time.Millisecond +} + +func init() { + initClient = func(c *client.Client) { + r := retryer{} + if c.Config.MaxRetries == nil || aws.IntValue(c.Config.MaxRetries) == aws.UseServiceDefaultRetries { + r.NumMaxRetries = 10 + } else { + r.NumMaxRetries = *c.Config.MaxRetries + } + c.Retryer = r + + c.Handlers.Build.PushBack(disableCompression) + c.Handlers.Unmarshal.PushFront(validateCRC32) + } +} + +func drainBody(b io.ReadCloser, length int64) (out *bytes.Buffer, err error) { + if length < 0 { + length = 0 + } + buf := bytes.NewBuffer(make([]byte, 0, length)) + + if _, err = buf.ReadFrom(b); err != nil { + return nil, err + } + if err = b.Close(); err != nil { + return nil, err + } + return buf, nil +} + +func disableCompression(r *request.Request) { + r.HTTPRequest.Header.Set("Accept-Encoding", "identity") +} + +func validateCRC32(r *request.Request) { + if r.Error != nil { + return // already have an error, no need to verify CRC + } + + // Checksum validation is off, skip + if aws.BoolValue(r.Config.DisableComputeChecksums) { + return + } + + // Try to get CRC from response + header := r.HTTPResponse.Header.Get("X-Amz-Crc32") + if header == "" { + return // No header, skip + } + + expected, err := strconv.ParseUint(header, 10, 32) + if err != nil { + return // Could not determine CRC value, skip + } + + buf, err := drainBody(r.HTTPResponse.Body, r.HTTPResponse.ContentLength) + if err != nil { // failed to read the response body, skip + return + } + + // Reset body for subsequent reads + r.HTTPResponse.Body = ioutil.NopCloser(bytes.NewReader(buf.Bytes())) + + // Compute the CRC checksum + crc := crc32.ChecksumIEEE(buf.Bytes()) + + if crc != uint32(expected) { + // CRC does not match, set a retryable error + r.Retryable = aws.Bool(true) + r.Error = awserr.New("CRC32CheckFailed", "CRC32 integrity check failed", nil) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/customizations_test.go b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/customizations_test.go new file mode 100644 index 000000000..194b51794 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/customizations_test.go @@ -0,0 +1,106 @@ +package dynamodb_test + +import ( + "bytes" + "io/ioutil" + "net/http" + "os" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/awstesting/unit" + "github.com/aws/aws-sdk-go/service/dynamodb" +) + +var db *dynamodb.DynamoDB + +func TestMain(m *testing.M) { + db = dynamodb.New(unit.Session, &aws.Config{ + MaxRetries: aws.Int(2), + }) + db.Handlers.Send.Clear() // mock sending + + os.Exit(m.Run()) +} + +func mockCRCResponse(svc *dynamodb.DynamoDB, status int, body, crc string) (req *request.Request) { + header := http.Header{} + header.Set("x-amz-crc32", crc) + + req, _ = svc.ListTablesRequest(nil) + req.Handlers.Send.PushBack(func(*request.Request) { + req.HTTPResponse = &http.Response{ + ContentLength: int64(len(body)), + StatusCode: status, + Body: ioutil.NopCloser(bytes.NewReader([]byte(body))), + Header: header, + } + }) + req.Send() + return +} + +func TestDefaultRetryRules(t *testing.T) { + d := dynamodb.New(unit.Session, &aws.Config{MaxRetries: aws.Int(-1)}) + assert.Equal(t, d.MaxRetries(), 10) +} + +func TestCustomRetryRules(t *testing.T) { + d := dynamodb.New(unit.Session, &aws.Config{MaxRetries: aws.Int(2)}) + assert.Equal(t, d.MaxRetries(), 2) +} + +func TestValidateCRC32NoHeaderSkip(t *testing.T) { + req := mockCRCResponse(db, 200, "{}", "") + assert.NoError(t, req.Error) +} + +func TestValidateCRC32InvalidHeaderSkip(t *testing.T) { + req := mockCRCResponse(db, 200, "{}", "ABC") + assert.NoError(t, req.Error) +} + +func TestValidateCRC32AlreadyErrorSkip(t *testing.T) { + req := mockCRCResponse(db, 400, "{}", "1234") + assert.Error(t, req.Error) + + assert.NotEqual(t, "CRC32CheckFailed", req.Error.(awserr.Error).Code()) +} + +func TestValidateCRC32IsValid(t *testing.T) { + req := mockCRCResponse(db, 200, `{"TableNames":["A"]}`, "3090163698") + assert.NoError(t, req.Error) + + // CRC check does not affect output parsing + out := req.Data.(*dynamodb.ListTablesOutput) + assert.Equal(t, "A", *out.TableNames[0]) +} + +func TestValidateCRC32DoesNotMatch(t *testing.T) { + req := mockCRCResponse(db, 200, "{}", "1234") + assert.Error(t, req.Error) + + assert.Equal(t, "CRC32CheckFailed", req.Error.(awserr.Error).Code()) + assert.Equal(t, 2, req.RetryCount) +} + +func TestValidateCRC32DoesNotMatchNoComputeChecksum(t *testing.T) { + svc := dynamodb.New(unit.Session, &aws.Config{ + MaxRetries: aws.Int(2), + DisableComputeChecksums: aws.Bool(true), + }) + svc.Handlers.Send.Clear() // mock sending + + req := mockCRCResponse(svc, 200, `{"TableNames":["A"]}`, "1234") + assert.NoError(t, req.Error) + + assert.Equal(t, 0, int(req.RetryCount)) + + // CRC check disabled. Does not affect output parsing + out := req.Data.(*dynamodb.ListTablesOutput) + assert.Equal(t, "A", *out.TableNames[0]) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute/converter.go b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute/converter.go new file mode 100644 index 000000000..e38e41daf --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute/converter.go @@ -0,0 +1,443 @@ +package dynamodbattribute + +import ( + "bytes" + "encoding/json" + "fmt" + "reflect" + "runtime" + "strconv" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/dynamodb" +) + +// ConvertToMap accepts a map[string]interface{} or struct and converts it to a +// map[string]*dynamodb.AttributeValue. +// +// If in contains any structs, it is first JSON encoded/decoded it to convert it +// to a map[string]interface{}, so `json` struct tags are respected. +// +// Deprecated: Use MarshalMap instead +func ConvertToMap(in interface{}) (item map[string]*dynamodb.AttributeValue, err error) { + defer func() { + if r := recover(); r != nil { + if e, ok := r.(runtime.Error); ok { + err = e + } else if s, ok := r.(string); ok { + err = fmt.Errorf(s) + } else { + err = r.(error) + } + item = nil + } + }() + + if in == nil { + return nil, awserr.New("SerializationError", + "in must be a map[string]interface{} or struct, got ", nil) + } + + v := reflect.ValueOf(in) + if v.Kind() != reflect.Struct && !(v.Kind() == reflect.Map && v.Type().Key().Kind() == reflect.String) { + return nil, awserr.New("SerializationError", + fmt.Sprintf("in must be a map[string]interface{} or struct, got %s", + v.Type().String()), + nil) + } + + if isTyped(reflect.TypeOf(in)) { + var out map[string]interface{} + in = convertToUntyped(in, out) + } + + item = make(map[string]*dynamodb.AttributeValue) + for k, v := range in.(map[string]interface{}) { + item[k] = convertTo(v) + } + + return item, nil +} + +// ConvertFromMap accepts a map[string]*dynamodb.AttributeValue and converts it to a +// map[string]interface{} or struct. +// +// If v points to a struct, the result is first converted it to a +// map[string]interface{}, then JSON encoded/decoded it to convert to a struct, +// so `json` struct tags are respected. +// +// Deprecated: Use UnmarshalMap instead +func ConvertFromMap(item map[string]*dynamodb.AttributeValue, v interface{}) (err error) { + defer func() { + if r := recover(); r != nil { + if e, ok := r.(runtime.Error); ok { + err = e + } else if s, ok := r.(string); ok { + err = fmt.Errorf(s) + } else { + err = r.(error) + } + item = nil + } + }() + + rv := reflect.ValueOf(v) + if rv.Kind() != reflect.Ptr || rv.IsNil() { + return awserr.New("SerializationError", + fmt.Sprintf("v must be a non-nil pointer to a map[string]interface{} or struct, got %s", + rv.Type()), + nil) + } + if rv.Elem().Kind() != reflect.Struct && !(rv.Elem().Kind() == reflect.Map && rv.Elem().Type().Key().Kind() == reflect.String) { + return awserr.New("SerializationError", + fmt.Sprintf("v must be a non-nil pointer to a map[string]interface{} or struct, got %s", + rv.Type()), + nil) + } + + m := make(map[string]interface{}) + for k, v := range item { + m[k] = convertFrom(v) + } + + if isTyped(reflect.TypeOf(v)) { + err = convertToTyped(m, v) + } else { + rv.Elem().Set(reflect.ValueOf(m)) + } + + return err +} + +// ConvertToList accepts an array or slice and converts it to a +// []*dynamodb.AttributeValue. +// +// Converting []byte fields to dynamodb.AttributeValue are only currently supported +// if the input is a map[string]interface{} type. []byte within typed structs are not +// converted correctly and are converted into base64 strings. This is a known bug, +// and will be fixed in a later release. +// +// If in contains any structs, it is first JSON encoded/decoded it to convert it +// to a []interface{}, so `json` struct tags are respected. +// +// Deprecated: Use MarshalList instead +func ConvertToList(in interface{}) (item []*dynamodb.AttributeValue, err error) { + defer func() { + if r := recover(); r != nil { + if e, ok := r.(runtime.Error); ok { + err = e + } else if s, ok := r.(string); ok { + err = fmt.Errorf(s) + } else { + err = r.(error) + } + item = nil + } + }() + + if in == nil { + return nil, awserr.New("SerializationError", + "in must be an array or slice, got ", + nil) + } + + v := reflect.ValueOf(in) + if v.Kind() != reflect.Array && v.Kind() != reflect.Slice { + return nil, awserr.New("SerializationError", + fmt.Sprintf("in must be an array or slice, got %s", + v.Type().String()), + nil) + } + + if isTyped(reflect.TypeOf(in)) { + var out []interface{} + in = convertToUntyped(in, out) + } + + item = make([]*dynamodb.AttributeValue, 0, len(in.([]interface{}))) + for _, v := range in.([]interface{}) { + item = append(item, convertTo(v)) + } + + return item, nil +} + +// ConvertFromList accepts a []*dynamodb.AttributeValue and converts it to an array or +// slice. +// +// If v contains any structs, the result is first converted it to a +// []interface{}, then JSON encoded/decoded it to convert to a typed array or +// slice, so `json` struct tags are respected. +// +// Deprecated: Use UnmarshalList instead +func ConvertFromList(item []*dynamodb.AttributeValue, v interface{}) (err error) { + defer func() { + if r := recover(); r != nil { + if e, ok := r.(runtime.Error); ok { + err = e + } else if s, ok := r.(string); ok { + err = fmt.Errorf(s) + } else { + err = r.(error) + } + item = nil + } + }() + + rv := reflect.ValueOf(v) + if rv.Kind() != reflect.Ptr || rv.IsNil() { + return awserr.New("SerializationError", + fmt.Sprintf("v must be a non-nil pointer to an array or slice, got %s", + rv.Type()), + nil) + } + if rv.Elem().Kind() != reflect.Array && rv.Elem().Kind() != reflect.Slice { + return awserr.New("SerializationError", + fmt.Sprintf("v must be a non-nil pointer to an array or slice, got %s", + rv.Type()), + nil) + } + + l := make([]interface{}, 0, len(item)) + for _, v := range item { + l = append(l, convertFrom(v)) + } + + if isTyped(reflect.TypeOf(v)) { + err = convertToTyped(l, v) + } else { + rv.Elem().Set(reflect.ValueOf(l)) + } + + return err +} + +// ConvertTo accepts any interface{} and converts it to a *dynamodb.AttributeValue. +// +// If in contains any structs, it is first JSON encoded/decoded it to convert it +// to a interface{}, so `json` struct tags are respected. +// +// Deprecated: Use Marshal instead +func ConvertTo(in interface{}) (item *dynamodb.AttributeValue, err error) { + defer func() { + if r := recover(); r != nil { + if e, ok := r.(runtime.Error); ok { + err = e + } else if s, ok := r.(string); ok { + err = fmt.Errorf(s) + } else { + err = r.(error) + } + item = nil + } + }() + + if in != nil && isTyped(reflect.TypeOf(in)) { + var out interface{} + in = convertToUntyped(in, out) + } + + item = convertTo(in) + return item, nil +} + +// ConvertFrom accepts a *dynamodb.AttributeValue and converts it to any interface{}. +// +// If v contains any structs, the result is first converted it to a interface{}, +// then JSON encoded/decoded it to convert to a struct, so `json` struct tags +// are respected. +// +// Deprecated: Use Unmarshal instead +func ConvertFrom(item *dynamodb.AttributeValue, v interface{}) (err error) { + defer func() { + if r := recover(); r != nil { + if e, ok := r.(runtime.Error); ok { + err = e + } else if s, ok := r.(string); ok { + err = fmt.Errorf(s) + } else { + err = r.(error) + } + item = nil + } + }() + + rv := reflect.ValueOf(v) + if rv.Kind() != reflect.Ptr || rv.IsNil() { + return awserr.New("SerializationError", + fmt.Sprintf("v must be a non-nil pointer to an interface{} or struct, got %s", + rv.Type()), + nil) + } + if rv.Elem().Kind() != reflect.Interface && rv.Elem().Kind() != reflect.Struct { + return awserr.New("SerializationError", + fmt.Sprintf("v must be a non-nil pointer to an interface{} or struct, got %s", + rv.Type()), + nil) + } + + res := convertFrom(item) + + if isTyped(reflect.TypeOf(v)) { + err = convertToTyped(res, v) + } else if res != nil { + rv.Elem().Set(reflect.ValueOf(res)) + } + + return err +} + +func isTyped(v reflect.Type) bool { + switch v.Kind() { + case reflect.Struct: + return true + case reflect.Array, reflect.Slice: + if isTyped(v.Elem()) { + return true + } + case reflect.Map: + if isTyped(v.Key()) { + return true + } + if isTyped(v.Elem()) { + return true + } + case reflect.Ptr: + return isTyped(v.Elem()) + } + return false +} + +func convertToUntyped(in, out interface{}) interface{} { + b, err := json.Marshal(in) + if err != nil { + panic(err) + } + + decoder := json.NewDecoder(bytes.NewReader(b)) + decoder.UseNumber() + err = decoder.Decode(&out) + if err != nil { + panic(err) + } + + return out +} + +func convertToTyped(in, out interface{}) error { + b, err := json.Marshal(in) + if err != nil { + return err + } + + decoder := json.NewDecoder(bytes.NewReader(b)) + return decoder.Decode(&out) +} + +func convertTo(in interface{}) *dynamodb.AttributeValue { + a := &dynamodb.AttributeValue{} + + if in == nil { + a.NULL = new(bool) + *a.NULL = true + return a + } + + if m, ok := in.(map[string]interface{}); ok { + a.M = make(map[string]*dynamodb.AttributeValue) + for k, v := range m { + a.M[k] = convertTo(v) + } + return a + } + + v := reflect.ValueOf(in) + switch v.Kind() { + case reflect.Bool: + a.BOOL = new(bool) + *a.BOOL = v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + a.N = new(string) + *a.N = strconv.FormatInt(v.Int(), 10) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + a.N = new(string) + *a.N = strconv.FormatUint(v.Uint(), 10) + case reflect.Float32, reflect.Float64: + a.N = new(string) + *a.N = strconv.FormatFloat(v.Float(), 'f', -1, 64) + case reflect.String: + if n, ok := in.(json.Number); ok { + a.N = new(string) + *a.N = n.String() + } else { + a.S = new(string) + *a.S = v.String() + } + case reflect.Slice: + switch v.Type() { + case reflect.TypeOf(([]byte)(nil)): + a.B = v.Bytes() + default: + a.L = make([]*dynamodb.AttributeValue, v.Len()) + for i := 0; i < v.Len(); i++ { + a.L[i] = convertTo(v.Index(i).Interface()) + } + } + default: + panic(fmt.Sprintf("the type %s is not supported", v.Type().String())) + } + + return a +} + +func convertFrom(a *dynamodb.AttributeValue) interface{} { + if a.S != nil { + return *a.S + } + + if a.N != nil { + // Number is tricky b/c we don't know which numeric type to use. Here we + // simply try the different types from most to least restrictive. + if n, err := strconv.ParseInt(*a.N, 10, 64); err == nil { + return int(n) + } + if n, err := strconv.ParseUint(*a.N, 10, 64); err == nil { + return uint(n) + } + n, err := strconv.ParseFloat(*a.N, 64) + if err != nil { + panic(err) + } + return n + } + + if a.BOOL != nil { + return *a.BOOL + } + + if a.NULL != nil { + return nil + } + + if a.M != nil { + m := make(map[string]interface{}) + for k, v := range a.M { + m[k] = convertFrom(v) + } + return m + } + + if a.L != nil { + l := make([]interface{}, len(a.L)) + for index, v := range a.L { + l[index] = convertFrom(v) + } + return l + } + + if a.B != nil { + return a.B + } + + panic(fmt.Sprintf("%#v is not a supported dynamodb.AttributeValue", a)) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute/converter_examples_test.go b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute/converter_examples_test.go new file mode 100644 index 000000000..67b65fae3 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute/converter_examples_test.go @@ -0,0 +1,80 @@ +package dynamodbattribute_test + +import ( + "fmt" + "reflect" + + "github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute" +) + +func ExampleConvertTo() { + type Record struct { + MyField string + Letters []string + Numbers []int + } + + r := Record{ + MyField: "MyFieldValue", + Letters: []string{"a", "b", "c", "d"}, + Numbers: []int{1, 2, 3}, + } + av, err := dynamodbattribute.ConvertTo(r) + fmt.Println("err", err) + fmt.Println("MyField", av.M["MyField"]) + fmt.Println("Letters", av.M["Letters"]) + fmt.Println("Numbers", av.M["Numbers"]) + + // Output: + // err + // MyField { + // S: "MyFieldValue" + // } + // Letters { + // L: [ + // { + // S: "a" + // }, + // { + // S: "b" + // }, + // { + // S: "c" + // }, + // { + // S: "d" + // } + // ] + // } + // Numbers { + // L: [{ + // N: "1" + // },{ + // N: "2" + // },{ + // N: "3" + // }] + // } +} + +func ExampleConvertFrom() { + type Record struct { + MyField string + Letters []string + A2Num map[string]int + } + + r := Record{ + MyField: "MyFieldValue", + Letters: []string{"a", "b", "c", "d"}, + A2Num: map[string]int{"a": 1, "b": 2, "c": 3}, + } + av, err := dynamodbattribute.ConvertTo(r) + + r2 := Record{} + err = dynamodbattribute.ConvertFrom(av, &r2) + fmt.Println(err, reflect.DeepEqual(r, r2)) + + // Output: + // true +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute/converter_test.go b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute/converter_test.go new file mode 100644 index 000000000..a73cd22c3 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute/converter_test.go @@ -0,0 +1,498 @@ +package dynamodbattribute + +import ( + "math" + "testing" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/dynamodb" +) + +type mySimpleStruct struct { + String string + Int int + Uint uint + Float32 float32 + Float64 float64 + Bool bool + Null *interface{} +} + +type myComplexStruct struct { + Simple []mySimpleStruct +} + +type converterTestInput struct { + input interface{} + expected interface{} + err awserr.Error + inputType string // "enum" of types +} + +var trueValue = true +var falseValue = false + +var converterScalarInputs = []converterTestInput{ + { + input: nil, + expected: &dynamodb.AttributeValue{NULL: &trueValue}, + }, + { + input: "some string", + expected: &dynamodb.AttributeValue{S: aws.String("some string")}, + }, + { + input: true, + expected: &dynamodb.AttributeValue{BOOL: &trueValue}, + }, + { + input: false, + expected: &dynamodb.AttributeValue{BOOL: &falseValue}, + }, + { + input: 3.14, + expected: &dynamodb.AttributeValue{N: aws.String("3.14")}, + }, + { + input: math.MaxFloat32, + expected: &dynamodb.AttributeValue{N: aws.String("340282346638528860000000000000000000000")}, + }, + { + input: math.MaxFloat64, + expected: &dynamodb.AttributeValue{N: aws.String("179769313486231570000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000")}, + }, + { + input: 12, + expected: &dynamodb.AttributeValue{N: aws.String("12")}, + }, + { + input: mySimpleStruct{}, + expected: &dynamodb.AttributeValue{ + M: map[string]*dynamodb.AttributeValue{ + "Bool": {BOOL: &falseValue}, + "Float32": {N: aws.String("0")}, + "Float64": {N: aws.String("0")}, + "Int": {N: aws.String("0")}, + "Null": {NULL: &trueValue}, + "String": {S: aws.String("")}, + "Uint": {N: aws.String("0")}, + }, + }, + inputType: "mySimpleStruct", + }, +} + +var converterMapTestInputs = []converterTestInput{ + // Scalar tests + { + input: nil, + err: awserr.New("SerializationError", "in must be a map[string]interface{} or struct, got ", nil), + }, + { + input: map[string]interface{}{"string": "some string"}, + expected: map[string]*dynamodb.AttributeValue{"string": {S: aws.String("some string")}}, + }, + { + input: map[string]interface{}{"bool": true}, + expected: map[string]*dynamodb.AttributeValue{"bool": {BOOL: &trueValue}}, + }, + { + input: map[string]interface{}{"bool": false}, + expected: map[string]*dynamodb.AttributeValue{"bool": {BOOL: &falseValue}}, + }, + { + input: map[string]interface{}{"null": nil}, + expected: map[string]*dynamodb.AttributeValue{"null": {NULL: &trueValue}}, + }, + { + input: map[string]interface{}{"float": 3.14}, + expected: map[string]*dynamodb.AttributeValue{"float": {N: aws.String("3.14")}}, + }, + { + input: map[string]interface{}{"float": math.MaxFloat32}, + expected: map[string]*dynamodb.AttributeValue{"float": {N: aws.String("340282346638528860000000000000000000000")}}, + }, + { + input: map[string]interface{}{"float": math.MaxFloat64}, + expected: map[string]*dynamodb.AttributeValue{"float": {N: aws.String("179769313486231570000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000")}}, + }, + { + input: map[string]interface{}{"int": int(12)}, + expected: map[string]*dynamodb.AttributeValue{"int": {N: aws.String("12")}}, + }, + { + input: map[string]interface{}{"byte": []byte{48, 49}}, + expected: map[string]*dynamodb.AttributeValue{"byte": {B: []byte{48, 49}}}, + }, + // List + { + input: map[string]interface{}{"list": []interface{}{"a string", 12, 3.14, true, nil, false}}, + expected: map[string]*dynamodb.AttributeValue{ + "list": { + L: []*dynamodb.AttributeValue{ + {S: aws.String("a string")}, + {N: aws.String("12")}, + {N: aws.String("3.14")}, + {BOOL: &trueValue}, + {NULL: &trueValue}, + {BOOL: &falseValue}, + }, + }, + }, + }, + // Map + { + input: map[string]interface{}{"map": map[string]interface{}{"nestedint": 12}}, + expected: map[string]*dynamodb.AttributeValue{ + "map": { + M: map[string]*dynamodb.AttributeValue{ + "nestedint": { + N: aws.String("12"), + }, + }, + }, + }, + }, + // Structs + { + input: mySimpleStruct{}, + expected: map[string]*dynamodb.AttributeValue{ + "Bool": {BOOL: &falseValue}, + "Float32": {N: aws.String("0")}, + "Float64": {N: aws.String("0")}, + "Int": {N: aws.String("0")}, + "Null": {NULL: &trueValue}, + "String": {S: aws.String("")}, + "Uint": {N: aws.String("0")}, + }, + inputType: "mySimpleStruct", + }, + { + input: myComplexStruct{}, + expected: map[string]*dynamodb.AttributeValue{ + "Simple": {NULL: &trueValue}, + }, + inputType: "myComplexStruct", + }, + { + input: myComplexStruct{Simple: []mySimpleStruct{{Int: -2}, {Uint: 5}}}, + expected: map[string]*dynamodb.AttributeValue{ + "Simple": { + L: []*dynamodb.AttributeValue{ + { + M: map[string]*dynamodb.AttributeValue{ + "Bool": {BOOL: &falseValue}, + "Float32": {N: aws.String("0")}, + "Float64": {N: aws.String("0")}, + "Int": {N: aws.String("-2")}, + "Null": {NULL: &trueValue}, + "String": {S: aws.String("")}, + "Uint": {N: aws.String("0")}, + }, + }, + { + M: map[string]*dynamodb.AttributeValue{ + "Bool": {BOOL: &falseValue}, + "Float32": {N: aws.String("0")}, + "Float64": {N: aws.String("0")}, + "Int": {N: aws.String("0")}, + "Null": {NULL: &trueValue}, + "String": {S: aws.String("")}, + "Uint": {N: aws.String("5")}, + }, + }, + }, + }, + }, + inputType: "myComplexStruct", + }, +} + +var converterListTestInputs = []converterTestInput{ + { + input: nil, + err: awserr.New("SerializationError", "in must be an array or slice, got ", nil), + }, + { + input: []interface{}{}, + expected: []*dynamodb.AttributeValue{}, + }, + { + input: []interface{}{"a string", 12, 3.14, true, nil, false}, + expected: []*dynamodb.AttributeValue{ + {S: aws.String("a string")}, + {N: aws.String("12")}, + {N: aws.String("3.14")}, + {BOOL: &trueValue}, + {NULL: &trueValue}, + {BOOL: &falseValue}, + }, + }, + { + input: []mySimpleStruct{{}}, + expected: []*dynamodb.AttributeValue{ + { + M: map[string]*dynamodb.AttributeValue{ + "Bool": {BOOL: &falseValue}, + "Float32": {N: aws.String("0")}, + "Float64": {N: aws.String("0")}, + "Int": {N: aws.String("0")}, + "Null": {NULL: &trueValue}, + "String": {S: aws.String("")}, + "Uint": {N: aws.String("0")}, + }, + }, + }, + inputType: "mySimpleStruct", + }, +} + +func TestConvertTo(t *testing.T) { + for _, test := range converterScalarInputs { + testConvertTo(t, test) + } +} + +func testConvertTo(t *testing.T, test converterTestInput) { + actual, err := ConvertTo(test.input) + if test.err != nil { + if err == nil { + t.Errorf("ConvertTo with input %#v retured %#v, expected error `%s`", test.input, actual, test.err) + } else if err.Error() != test.err.Error() { + t.Errorf("ConvertTo with input %#v retured error `%s`, expected error `%s`", test.input, err, test.err) + } + } else { + if err != nil { + t.Errorf("ConvertTo with input %#v retured error `%s`", test.input, err) + } + compareObjects(t, test.expected, actual) + } +} + +func TestConvertFrom(t *testing.T) { + // Using the same inputs from TestConvertTo, test the reverse mapping. + for _, test := range converterScalarInputs { + if test.expected != nil { + testConvertFrom(t, test) + } + } +} + +func testConvertFrom(t *testing.T, test converterTestInput) { + switch test.inputType { + case "mySimpleStruct": + var actual mySimpleStruct + if err := ConvertFrom(test.expected.(*dynamodb.AttributeValue), &actual); err != nil { + t.Errorf("ConvertFrom with input %#v retured error `%s`", test.expected, err) + } + compareObjects(t, test.input, actual) + case "myComplexStruct": + var actual myComplexStruct + if err := ConvertFrom(test.expected.(*dynamodb.AttributeValue), &actual); err != nil { + t.Errorf("ConvertFrom with input %#v retured error `%s`", test.expected, err) + } + compareObjects(t, test.input, actual) + default: + var actual interface{} + if err := ConvertFrom(test.expected.(*dynamodb.AttributeValue), &actual); err != nil { + t.Errorf("ConvertFrom with input %#v retured error `%s`", test.expected, err) + } + compareObjects(t, test.input, actual) + } +} + +func TestConvertFromError(t *testing.T) { + // Test that we get an error using ConvertFrom to convert to a map. + var actual map[string]interface{} + expected := awserr.New("SerializationError", `v must be a non-nil pointer to an interface{} or struct, got *map[string]interface {}`, nil).Error() + if err := ConvertFrom(nil, &actual); err == nil { + t.Errorf("ConvertFrom with input %#v returned no error, expected error `%s`", nil, expected) + } else if err.Error() != expected { + t.Errorf("ConvertFrom with input %#v returned error `%s`, expected error `%s`", nil, err, expected) + } + + // Test that we get an error using ConvertFrom to convert to a list. + var actual2 []interface{} + expected = awserr.New("SerializationError", `v must be a non-nil pointer to an interface{} or struct, got *[]interface {}`, nil).Error() + if err := ConvertFrom(nil, &actual2); err == nil { + t.Errorf("ConvertFrom with input %#v returned no error, expected error `%s`", nil, expected) + } else if err.Error() != expected { + t.Errorf("ConvertFrom with input %#v returned error `%s`, expected error `%s`", nil, err, expected) + } +} + +func TestConvertToMap(t *testing.T) { + for _, test := range converterMapTestInputs { + testConvertToMap(t, test) + } +} + +func testConvertToMap(t *testing.T, test converterTestInput) { + actual, err := ConvertToMap(test.input) + if test.err != nil { + if err == nil { + t.Errorf("ConvertToMap with input %#v retured %#v, expected error `%s`", test.input, actual, test.err) + } else if err.Error() != test.err.Error() { + t.Errorf("ConvertToMap with input %#v retured error `%s`, expected error `%s`", test.input, err, test.err) + } + } else { + if err != nil { + t.Errorf("ConvertToMap with input %#v retured error `%s`", test.input, err) + } + compareObjects(t, test.expected, actual) + } +} + +func TestConvertFromMap(t *testing.T) { + // Using the same inputs from TestConvertToMap, test the reverse mapping. + for _, test := range converterMapTestInputs { + if test.expected != nil { + testConvertFromMap(t, test) + } + } +} + +func testConvertFromMap(t *testing.T, test converterTestInput) { + switch test.inputType { + case "mySimpleStruct": + var actual mySimpleStruct + if err := ConvertFromMap(test.expected.(map[string]*dynamodb.AttributeValue), &actual); err != nil { + t.Errorf("ConvertFromMap with input %#v retured error `%s`", test.expected, err) + } + compareObjects(t, test.input, actual) + case "myComplexStruct": + var actual myComplexStruct + if err := ConvertFromMap(test.expected.(map[string]*dynamodb.AttributeValue), &actual); err != nil { + t.Errorf("ConvertFromMap with input %#v retured error `%s`", test.expected, err) + } + compareObjects(t, test.input, actual) + default: + var actual map[string]interface{} + if err := ConvertFromMap(test.expected.(map[string]*dynamodb.AttributeValue), &actual); err != nil { + t.Errorf("ConvertFromMap with input %#v retured error `%s`", test.expected, err) + } + compareObjects(t, test.input, actual) + } +} + +func TestConvertFromMapError(t *testing.T) { + // Test that we get an error using ConvertFromMap to convert to an interface{}. + var actual interface{} + expected := awserr.New("SerializationError", `v must be a non-nil pointer to a map[string]interface{} or struct, got *interface {}`, nil).Error() + if err := ConvertFromMap(nil, &actual); err == nil { + t.Errorf("ConvertFromMap with input %#v returned no error, expected error `%s`", nil, expected) + } else if err.Error() != expected { + t.Errorf("ConvertFromMap with input %#v returned error `%s`, expected error `%s`", nil, err, expected) + } + + // Test that we get an error using ConvertFromMap to convert to a slice. + var actual2 []interface{} + expected = awserr.New("SerializationError", `v must be a non-nil pointer to a map[string]interface{} or struct, got *[]interface {}`, nil).Error() + if err := ConvertFromMap(nil, &actual2); err == nil { + t.Errorf("ConvertFromMap with input %#v returned no error, expected error `%s`", nil, expected) + } else if err.Error() != expected { + t.Errorf("ConvertFromMap with input %#v returned error `%s`, expected error `%s`", nil, err, expected) + } +} + +func TestConvertToList(t *testing.T) { + for _, test := range converterListTestInputs { + testConvertToList(t, test) + } +} + +func testConvertToList(t *testing.T, test converterTestInput) { + actual, err := ConvertToList(test.input) + if test.err != nil { + if err == nil { + t.Errorf("ConvertToList with input %#v retured %#v, expected error `%s`", test.input, actual, test.err) + } else if err.Error() != test.err.Error() { + t.Errorf("ConvertToList with input %#v retured error `%s`, expected error `%s`", test.input, err, test.err) + } + } else { + if err != nil { + t.Errorf("ConvertToList with input %#v retured error `%s`", test.input, err) + } + compareObjects(t, test.expected, actual) + } +} + +func TestConvertFromList(t *testing.T) { + // Using the same inputs from TestConvertToList, test the reverse mapping. + for _, test := range converterListTestInputs { + if test.expected != nil { + testConvertFromList(t, test) + } + } +} + +func testConvertFromList(t *testing.T, test converterTestInput) { + switch test.inputType { + case "mySimpleStruct": + var actual []mySimpleStruct + if err := ConvertFromList(test.expected.([]*dynamodb.AttributeValue), &actual); err != nil { + t.Errorf("ConvertFromList with input %#v retured error `%s`", test.expected, err) + } + compareObjects(t, test.input, actual) + case "myComplexStruct": + var actual []myComplexStruct + if err := ConvertFromList(test.expected.([]*dynamodb.AttributeValue), &actual); err != nil { + t.Errorf("ConvertFromList with input %#v retured error `%s`", test.expected, err) + } + compareObjects(t, test.input, actual) + default: + var actual []interface{} + if err := ConvertFromList(test.expected.([]*dynamodb.AttributeValue), &actual); err != nil { + t.Errorf("ConvertFromList with input %#v retured error `%s`", test.expected, err) + } + compareObjects(t, test.input, actual) + } +} + +func TestConvertFromListError(t *testing.T) { + // Test that we get an error using ConvertFromList to convert to a map. + var actual map[string]interface{} + expected := awserr.New("SerializationError", `v must be a non-nil pointer to an array or slice, got *map[string]interface {}`, nil).Error() + if err := ConvertFromList(nil, &actual); err == nil { + t.Errorf("ConvertFromList with input %#v returned no error, expected error `%s`", nil, expected) + } else if err.Error() != expected { + t.Errorf("ConvertFromList with input %#v returned error `%s`, expected error `%s`", nil, err, expected) + } + + // Test that we get an error using ConvertFromList to convert to a struct. + var actual2 myComplexStruct + expected = awserr.New("SerializationError", `v must be a non-nil pointer to an array or slice, got *dynamodbattribute.myComplexStruct`, nil).Error() + if err := ConvertFromList(nil, &actual2); err == nil { + t.Errorf("ConvertFromList with input %#v returned no error, expected error `%s`", nil, expected) + } else if err.Error() != expected { + t.Errorf("ConvertFromList with input %#v returned error `%s`, expected error `%s`", nil, err, expected) + } + + // Test that we get an error using ConvertFromList to convert to an interface{}. + var actual3 interface{} + expected = awserr.New("SerializationError", `v must be a non-nil pointer to an array or slice, got *interface {}`, nil).Error() + if err := ConvertFromList(nil, &actual3); err == nil { + t.Errorf("ConvertFromList with input %#v returned no error, expected error `%s`", nil, expected) + } else if err.Error() != expected { + t.Errorf("ConvertFromList with input %#v returned error `%s`, expected error `%s`", nil, err, expected) + } +} + +func BenchmarkConvertTo(b *testing.B) { + d := mySimpleStruct{ + String: "abc", + Int: 123, + Uint: 123, + Float32: 123.321, + Float64: 123.321, + Bool: true, + Null: nil, + } + for i := 0; i < b.N; i++ { + _, err := ConvertTo(d) + if err != nil { + b.Fatal("unexpected error", err) + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute/decode.go b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute/decode.go new file mode 100644 index 000000000..802009014 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute/decode.go @@ -0,0 +1,661 @@ +package dynamodbattribute + +import ( + "fmt" + "reflect" + "strconv" + "time" + + "github.com/aws/aws-sdk-go/service/dynamodb" +) + +// An Unmarshaler is an interface to provide custom unmarshaling of +// AttributeValues. Use this to provide custom logic determining +// how AttributeValues should be unmarshaled. +// type ExampleUnmarshaler struct { +// Value int +// } +// +// type (u *exampleUnmarshaler) UnmarshalDynamoDBAttributeValue(av *dynamodb.AttributeValue) error { +// if av.N == nil { +// return nil +// } +// +// n, err := strconv.ParseInt(*av.N, 10, 0) +// if err != nil { +// return err +// } +// +// u.Value = n +// return nil +// } +type Unmarshaler interface { + UnmarshalDynamoDBAttributeValue(*dynamodb.AttributeValue) error +} + +// Unmarshal will unmarshal DynamoDB AttributeValues to Go value types. +// Both generic interface{} and concrete types are valid unmarshal +// destination types. +// +// Unmarshal will allocate maps, slices, and pointers as needed to +// unmarshal the AttributeValue into the provided type value. +// +// When unmarshaling AttributeValues into structs Unmarshal matches +// the field names of the struct to the AttributeValue Map keys. +// Initially it will look for exact field name matching, but will +// fall back to case insensitive if not exact match is found. +// +// With the exception of omitempty, omitemptyelem, binaryset, numberset +// and stringset all struct tags used by Marshal are also used by +// Unmarshal. +// +// When decoding AttributeValues to interfaces Unmarshal will use the +// following types. +// +// []byte, AV Binary (B) +// [][]byte, AV Binary Set (BS) +// bool, AV Boolean (BOOL) +// []interface{}, AV List (L) +// map[string]interface{}, AV Map (M) +// float64, AV Number (N) +// Number, AV Number (N) with UseNumber set +// []float64, AV Number Set (NS) +// []Number, AV Number Set (NS) with UseNumber set +// string, AV String (S) +// []string, AV String Set (SS) +// +// If the Decoder option, UseNumber is set numbers will be unmarshaled +// as Number values instead of float64. Use this to maintain the original +// string formating of the number as it was represented in the AttributeValue. +// In addition provides additional opportunities to parse the number +// string based on individual use cases. +// +// When unmarshaling any error that occurs will halt the unmarshal +// and return the error. +// +// The output value provided must be a non-nil pointer +func Unmarshal(av *dynamodb.AttributeValue, out interface{}) error { + return NewDecoder().Decode(av, out) +} + +// UnmarshalMap is an alias for Unmarshal which unmarshals from +// a map of AttributeValues. +// +// The output value provided must be a non-nil pointer +func UnmarshalMap(m map[string]*dynamodb.AttributeValue, out interface{}) error { + return NewDecoder().Decode(&dynamodb.AttributeValue{M: m}, out) +} + +// UnmarshalList is an alias for Unmarshal func which unmarshals +// a slice of AttributeValues. +// +// The output value provided must be a non-nil pointer +func UnmarshalList(l []*dynamodb.AttributeValue, out interface{}) error { + return NewDecoder().Decode(&dynamodb.AttributeValue{L: l}, out) +} + +// A Decoder provides unmarshaling AttributeValues to Go value types. +type Decoder struct { + MarshalOptions + + // Instructs the decoder to decode AttributeValue Numbers as + // Number type instead of float64 when the destination type + // is interface{}. Similar to encoding/json.Number + UseNumber bool +} + +// NewDecoder creates a new Decoder with default configuration. Use +// the `opts` functional options to override the default configuration. +func NewDecoder(opts ...func(*Decoder)) *Decoder { + d := &Decoder{ + MarshalOptions: MarshalOptions{ + SupportJSONTags: true, + }, + } + for _, o := range opts { + o(d) + } + + return d +} + +// Decode will unmarshal an AttributeValue into a Go value type. An error +// will be return if the decoder is unable to unmarshal the AttributeValue +// to the provide Go value type. +// +// The output value provided must be a non-nil pointer +func (d *Decoder) Decode(av *dynamodb.AttributeValue, out interface{}, opts ...func(*Decoder)) error { + v := reflect.ValueOf(out) + if v.Kind() != reflect.Ptr || v.IsNil() || !v.IsValid() { + return &InvalidUnmarshalError{Type: reflect.TypeOf(out)} + } + + return d.decode(av, v, tag{}) +} + +var stringInterfaceMapType = reflect.TypeOf(map[string]interface{}(nil)) +var byteSliceType = reflect.TypeOf([]byte(nil)) +var byteSliceSlicetype = reflect.TypeOf([][]byte(nil)) +var numberType = reflect.TypeOf(Number("")) + +func (d *Decoder) decode(av *dynamodb.AttributeValue, v reflect.Value, fieldTag tag) error { + var u Unmarshaler + if av == nil || av.NULL != nil { + u, v = indirect(v, true) + if u != nil { + return u.UnmarshalDynamoDBAttributeValue(av) + } + return d.decodeNull(v) + } + + u, v = indirect(v, false) + if u != nil { + return u.UnmarshalDynamoDBAttributeValue(av) + } + + switch { + case len(av.B) != 0: + return d.decodeBinary(av.B, v) + case av.BOOL != nil: + return d.decodeBool(av.BOOL, v) + case len(av.BS) != 0: + return d.decodeBinarySet(av.BS, v) + case len(av.L) != 0: + return d.decodeList(av.L, v) + case len(av.M) != 0: + return d.decodeMap(av.M, v) + case av.N != nil: + return d.decodeNumber(av.N, v) + case len(av.NS) != 0: + return d.decodeNumberSet(av.NS, v) + case av.S != nil: + return d.decodeString(av.S, v, fieldTag) + case len(av.SS) != 0: + return d.decodeStringSet(av.SS, v) + } + + return nil +} + +func (d *Decoder) decodeBinary(b []byte, v reflect.Value) error { + if v.Kind() == reflect.Interface { + buf := make([]byte, len(b)) + copy(buf, b) + v.Set(reflect.ValueOf(buf)) + return nil + } + + if v.Kind() != reflect.Slice { + return &UnmarshalTypeError{Value: "binary", Type: v.Type()} + } + + if v.Type() == byteSliceType { + // Optimization for []byte types + if v.IsNil() || v.Cap() < len(b) { + v.Set(reflect.MakeSlice(byteSliceType, len(b), len(b))) + } else if v.Len() != len(b) { + v.SetLen(len(b)) + } + copy(v.Interface().([]byte), b) + return nil + } + + switch v.Type().Elem().Kind() { + case reflect.Uint8: + // Fallback to reflection copy for type aliased of []byte type + if v.IsNil() || v.Cap() < len(b) { + v.Set(reflect.MakeSlice(v.Type(), len(b), len(b))) + } else if v.Len() != len(b) { + v.SetLen(len(b)) + } + for i := 0; i < len(b); i++ { + v.Index(i).SetUint(uint64(b[i])) + } + default: + if v.Kind() == reflect.Array && v.Type().Elem().Kind() == reflect.Uint8 { + reflect.Copy(v, reflect.ValueOf(b)) + break + } + return &UnmarshalTypeError{Value: "binary", Type: v.Type()} + } + + return nil +} + +func (d *Decoder) decodeBool(b *bool, v reflect.Value) error { + switch v.Kind() { + case reflect.Bool, reflect.Interface: + v.Set(reflect.ValueOf(*b)) + default: + return &UnmarshalTypeError{Value: "bool", Type: v.Type()} + } + + return nil +} + +func (d *Decoder) decodeBinarySet(bs [][]byte, v reflect.Value) error { + switch v.Kind() { + case reflect.Slice: + // Make room for the slice elements if needed + if v.IsNil() || v.Cap() < len(bs) { + // What about if ignoring nil/empty values? + v.Set(reflect.MakeSlice(v.Type(), 0, len(bs))) + } + case reflect.Array: + // Limited to capacity of existing array. + case reflect.Interface: + set := make([][]byte, len(bs)) + for i, b := range bs { + if err := d.decodeBinary(b, reflect.ValueOf(&set[i]).Elem()); err != nil { + return err + } + } + v.Set(reflect.ValueOf(set)) + return nil + default: + return &UnmarshalTypeError{Value: "binary set", Type: v.Type()} + } + + for i := 0; i < v.Cap() && i < len(bs); i++ { + v.SetLen(i + 1) + u, elem := indirect(v.Index(i), false) + if u != nil { + return u.UnmarshalDynamoDBAttributeValue(&dynamodb.AttributeValue{BS: bs}) + } + if err := d.decodeBinary(bs[i], elem); err != nil { + return err + } + } + + return nil +} + +func (d *Decoder) decodeNumber(n *string, v reflect.Value) error { + switch v.Kind() { + case reflect.Interface: + i, err := d.decodeNumberToInterface(n) + if err != nil { + return err + } + v.Set(reflect.ValueOf(i)) + return nil + case reflect.String: + if v.Type() == numberType { // Support Number value type + v.Set(reflect.ValueOf(Number(*n))) + return nil + } + v.Set(reflect.ValueOf(*n)) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + i, err := strconv.ParseInt(*n, 10, 64) + if err != nil { + return err + } + if v.OverflowInt(i) { + return &UnmarshalTypeError{ + Value: fmt.Sprintf("number overflow, %s", *n), + Type: v.Type(), + } + } + v.SetInt(i) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + i, err := strconv.ParseUint(*n, 10, 64) + if err != nil { + return err + } + if v.OverflowUint(i) { + return &UnmarshalTypeError{ + Value: fmt.Sprintf("number overflow, %s", *n), + Type: v.Type(), + } + } + v.SetUint(i) + case reflect.Float32, reflect.Float64: + i, err := strconv.ParseFloat(*n, 64) + if err != nil { + return err + } + if v.OverflowFloat(i) { + return &UnmarshalTypeError{ + Value: fmt.Sprintf("number overflow, %s", *n), + Type: v.Type(), + } + } + v.SetFloat(i) + default: + return &UnmarshalTypeError{Value: "number", Type: v.Type()} + } + + return nil +} + +func (d *Decoder) decodeNumberToInterface(n *string) (interface{}, error) { + if d.UseNumber { + return Number(*n), nil + } + + // Default to float64 for all numbers + return strconv.ParseFloat(*n, 64) +} + +func (d *Decoder) decodeNumberSet(ns []*string, v reflect.Value) error { + switch v.Kind() { + case reflect.Slice: + // Make room for the slice elements if needed + if v.IsNil() || v.Cap() < len(ns) { + // What about if ignoring nil/empty values? + v.Set(reflect.MakeSlice(v.Type(), 0, len(ns))) + } + case reflect.Array: + // Limited to capacity of existing array. + case reflect.Interface: + if d.UseNumber { + set := make([]Number, len(ns)) + for i, n := range ns { + if err := d.decodeNumber(n, reflect.ValueOf(&set[i]).Elem()); err != nil { + return err + } + } + v.Set(reflect.ValueOf(set)) + } else { + set := make([]float64, len(ns)) + for i, n := range ns { + if err := d.decodeNumber(n, reflect.ValueOf(&set[i]).Elem()); err != nil { + return err + } + } + v.Set(reflect.ValueOf(set)) + } + return nil + default: + return &UnmarshalTypeError{Value: "number set", Type: v.Type()} + } + + for i := 0; i < v.Cap() && i < len(ns); i++ { + v.SetLen(i + 1) + u, elem := indirect(v.Index(i), false) + if u != nil { + return u.UnmarshalDynamoDBAttributeValue(&dynamodb.AttributeValue{NS: ns}) + } + if err := d.decodeNumber(ns[i], elem); err != nil { + return err + } + } + + return nil +} + +func (d *Decoder) decodeList(avList []*dynamodb.AttributeValue, v reflect.Value) error { + switch v.Kind() { + case reflect.Slice: + // Make room for the slice elements if needed + if v.IsNil() || v.Cap() < len(avList) { + // What about if ignoring nil/empty values? + v.Set(reflect.MakeSlice(v.Type(), 0, len(avList))) + } + case reflect.Array: + // Limited to capacity of existing array. + case reflect.Interface: + s := make([]interface{}, len(avList)) + for i, av := range avList { + if err := d.decode(av, reflect.ValueOf(&s[i]).Elem(), tag{}); err != nil { + return err + } + } + v.Set(reflect.ValueOf(s)) + return nil + default: + return &UnmarshalTypeError{Value: "list", Type: v.Type()} + } + + // If v is not a slice, array + for i := 0; i < v.Cap() && i < len(avList); i++ { + v.SetLen(i + 1) + if err := d.decode(avList[i], v.Index(i), tag{}); err != nil { + return err + } + } + + return nil +} + +func (d *Decoder) decodeMap(avMap map[string]*dynamodb.AttributeValue, v reflect.Value) error { + switch v.Kind() { + case reflect.Map: + t := v.Type() + if t.Key().Kind() != reflect.String { + return &UnmarshalTypeError{Value: "map string key", Type: t.Key()} + } + if v.IsNil() { + v.Set(reflect.MakeMap(t)) + } + case reflect.Struct: + case reflect.Interface: + v.Set(reflect.MakeMap(stringInterfaceMapType)) + v = v.Elem() + default: + return &UnmarshalTypeError{Value: "map", Type: v.Type()} + } + + if v.Kind() == reflect.Map { + for k, av := range avMap { + key := reflect.ValueOf(k) + elem := reflect.New(v.Type().Elem()).Elem() + if err := d.decode(av, elem, tag{}); err != nil { + return err + } + v.SetMapIndex(key, elem) + } + } else if v.Kind() == reflect.Struct { + fields := unionStructFields(v.Type(), d.MarshalOptions) + for k, av := range avMap { + if f, ok := fieldByName(fields, k); ok { + fv := v.FieldByIndex(f.Index) + if err := d.decode(av, fv, f.tag); err != nil { + return err + } + } + } + } + + return nil +} + +func (d *Decoder) decodeNull(v reflect.Value) error { + if v.IsValid() && v.CanSet() { + v.Set(reflect.Zero(v.Type())) + } + + return nil +} + +func (d *Decoder) decodeString(s *string, v reflect.Value, fieldTag tag) error { + if fieldTag.AsString { + return d.decodeNumber(s, v) + } + + // To maintain backwards compatibility with ConvertFrom family of methods which + // converted strings to time.Time structs + if _, ok := v.Interface().(time.Time); ok { + t, err := time.Parse(time.RFC3339, *s) + if err != nil { + return err + } + v.Set(reflect.ValueOf(t)) + return nil + } + + switch v.Kind() { + case reflect.String: + v.SetString(*s) + case reflect.Interface: + // Ensure type aliasing is handled properly + v.Set(reflect.ValueOf(*s).Convert(v.Type())) + default: + return &UnmarshalTypeError{Value: "string", Type: v.Type()} + } + + return nil +} + +func (d *Decoder) decodeStringSet(ss []*string, v reflect.Value) error { + switch v.Kind() { + case reflect.Slice: + // Make room for the slice elements if needed + if v.IsNil() || v.Cap() < len(ss) { + v.Set(reflect.MakeSlice(v.Type(), 0, len(ss))) + } + case reflect.Array: + // Limited to capacity of existing array. + case reflect.Interface: + set := make([]string, len(ss)) + for i, s := range ss { + if err := d.decodeString(s, reflect.ValueOf(&set[i]).Elem(), tag{}); err != nil { + return err + } + } + v.Set(reflect.ValueOf(set)) + return nil + default: + return &UnmarshalTypeError{Value: "string set", Type: v.Type()} + } + + for i := 0; i < v.Cap() && i < len(ss); i++ { + v.SetLen(i + 1) + u, elem := indirect(v.Index(i), false) + if u != nil { + return u.UnmarshalDynamoDBAttributeValue(&dynamodb.AttributeValue{SS: ss}) + } + if err := d.decodeString(ss[i], elem, tag{}); err != nil { + return err + } + } + + return nil +} + +// indirect will walk a value's interface or pointer value types. Returning +// the final value or the value a unmarshaler is defined on. +// +// Based on the enoding/json type reflect value type indirection in Go Stdlib +// https://golang.org/src/encoding/json/decode.go indirect func. +func indirect(v reflect.Value, decodingNull bool) (Unmarshaler, reflect.Value) { + if v.Kind() != reflect.Ptr && v.Type().Name() != "" && v.CanAddr() { + v = v.Addr() + } + for { + if v.Kind() == reflect.Interface && !v.IsNil() { + e := v.Elem() + if e.Kind() == reflect.Ptr && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Ptr) { + v = e + continue + } + } + if v.Kind() != reflect.Ptr { + break + } + if v.Elem().Kind() != reflect.Ptr && decodingNull && v.CanSet() { + break + } + if v.IsNil() { + v.Set(reflect.New(v.Type().Elem())) + } + if v.Type().NumMethod() > 0 { + if u, ok := v.Interface().(Unmarshaler); ok { + return u, reflect.Value{} + } + } + v = v.Elem() + } + + return nil, v +} + +// A Number represents a Attributevalue number literal. +type Number string + +// Float64 attempts to cast the number ot a float64, returning +// the result of the case or error if the case failed. +func (n Number) Float64() (float64, error) { + return strconv.ParseFloat(string(n), 64) +} + +// Int64 attempts to cast the number ot a int64, returning +// the result of the case or error if the case failed. +func (n Number) Int64() (int64, error) { + return strconv.ParseInt(string(n), 10, 64) +} + +// Uint64 attempts to cast the number ot a uint64, returning +// the result of the case or error if the case failed. +func (n Number) Uint64() (uint64, error) { + return strconv.ParseUint(string(n), 10, 64) +} + +// String returns the raw number represented as a string +func (n Number) String() string { + return string(n) +} + +type emptyOrigError struct{} + +func (e emptyOrigError) OrigErr() error { + return nil +} + +// An UnmarshalTypeError is an error type representing a error +// unmarshaling the AttributeValue's element to a Go value type. +// Includes details about the AttributeValue type and Go value type. +type UnmarshalTypeError struct { + emptyOrigError + Value string + Type reflect.Type +} + +// Error returns the string representation of the error. +// satisfying the error interface +func (e *UnmarshalTypeError) Error() string { + return fmt.Sprintf("%s: %s", e.Code(), e.Message()) +} + +// Code returns the code of the error, satisfying the awserr.Error +// interface. +func (e *UnmarshalTypeError) Code() string { + return "UnmarshalTypeError" +} + +// Message returns the detailed message of the error, satisfying +// the awserr.Error interface. +func (e *UnmarshalTypeError) Message() string { + return "cannot unmarshal " + e.Value + " into Go value of type " + e.Type.String() +} + +// An InvalidUnmarshalError is an error type representing an invalid type +// encountered while unmarshaling a AttributeValue to a Go value type. +type InvalidUnmarshalError struct { + emptyOrigError + Type reflect.Type +} + +// Error returns the string representation of the error. +// satisfying the error interface +func (e *InvalidUnmarshalError) Error() string { + return fmt.Sprintf("%s: %s", e.Code(), e.Message()) +} + +// Code returns the code of the error, satisfying the awserr.Error +// interface. +func (e *InvalidUnmarshalError) Code() string { + return "InvalidUnmarshalError" +} + +// Message returns the detailed message of the error, satisfying +// the awserr.Error interface. +func (e *InvalidUnmarshalError) Message() string { + if e.Type == nil { + return "cannot unmarshal to nil value" + } + if e.Type.Kind() != reflect.Ptr { + return "cannot unmasrhal to non-pointer value, got " + e.Type.String() + } + return "cannot unmarshal to nil value, " + e.Type.String() +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute/decode_test.go b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute/decode_test.go new file mode 100644 index 000000000..3253c98d2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute/decode_test.go @@ -0,0 +1,394 @@ +package dynamodbattribute + +import ( + "fmt" + "reflect" + "strconv" + "testing" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/dynamodb" + "github.com/stretchr/testify/assert" +) + +func TestUnmarshalErrorTypes(t *testing.T) { + var _ awserr.Error = (*UnmarshalTypeError)(nil) + var _ awserr.Error = (*InvalidUnmarshalError)(nil) +} + +func TestUnmarshalShared(t *testing.T) { + for i, c := range sharedTestCases { + err := Unmarshal(c.in, c.actual) + assertConvertTest(t, i, c.actual, c.expected, err, c.err) + } +} + +func TestUnmarshal(t *testing.T) { + cases := []struct { + in *dynamodb.AttributeValue + actual, expected interface{} + err error + }{ + //------------ + // Sets + //------------ + { + in: &dynamodb.AttributeValue{BS: [][]byte{ + {48, 49}, {50, 51}, + }}, + actual: &[][]byte{}, + expected: [][]byte{{48, 49}, {50, 51}}, + }, + { + in: &dynamodb.AttributeValue{NS: []*string{ + aws.String("123"), aws.String("321"), + }}, + actual: &[]int{}, + expected: []int{123, 321}, + }, + { + in: &dynamodb.AttributeValue{NS: []*string{ + aws.String("123"), aws.String("321"), + }}, + actual: &[]interface{}{}, + expected: []interface{}{123., 321.}, + }, + { + in: &dynamodb.AttributeValue{SS: []*string{ + aws.String("abc"), aws.String("123"), + }}, + actual: &[]string{}, + expected: &[]string{"abc", "123"}, + }, + { + in: &dynamodb.AttributeValue{SS: []*string{ + aws.String("abc"), aws.String("123"), + }}, + actual: &[]*string{}, + expected: &[]*string{aws.String("abc"), aws.String("123")}, + }, + //------------ + // Interfaces + //------------ + { + in: &dynamodb.AttributeValue{B: []byte{48, 49}}, + actual: func() interface{} { + var v interface{} + return &v + }(), + expected: []byte{48, 49}, + }, + { + in: &dynamodb.AttributeValue{BS: [][]byte{ + {48, 49}, {50, 51}, + }}, + actual: func() interface{} { + var v interface{} + return &v + }(), + expected: [][]byte{{48, 49}, {50, 51}}, + }, + { + in: &dynamodb.AttributeValue{BOOL: aws.Bool(true)}, + actual: func() interface{} { + var v interface{} + return &v + }(), + expected: bool(true), + }, + { + in: &dynamodb.AttributeValue{L: []*dynamodb.AttributeValue{ + {S: aws.String("abc")}, {S: aws.String("123")}, + }}, + actual: func() interface{} { + var v interface{} + return &v + }(), + expected: []interface{}{"abc", "123"}, + }, + { + in: &dynamodb.AttributeValue{M: map[string]*dynamodb.AttributeValue{ + "123": {S: aws.String("abc")}, + "abc": {S: aws.String("123")}, + }}, + actual: func() interface{} { + var v interface{} + return &v + }(), + expected: map[string]interface{}{"123": "abc", "abc": "123"}, + }, + { + in: &dynamodb.AttributeValue{N: aws.String("123")}, + actual: func() interface{} { + var v interface{} + return &v + }(), + expected: float64(123), + }, + { + in: &dynamodb.AttributeValue{NS: []*string{ + aws.String("123"), aws.String("321"), + }}, + actual: func() interface{} { + var v interface{} + return &v + }(), + expected: []float64{123., 321.}, + }, + { + in: &dynamodb.AttributeValue{S: aws.String("123")}, + actual: func() interface{} { + var v interface{} + return &v + }(), + expected: "123", + }, + { + in: &dynamodb.AttributeValue{SS: []*string{ + aws.String("123"), aws.String("321"), + }}, + actual: func() interface{} { + var v interface{} + return &v + }(), + expected: []string{"123", "321"}, + }, + { + in: &dynamodb.AttributeValue{M: map[string]*dynamodb.AttributeValue{ + "abc": {S: aws.String("123")}, + "Cba": {S: aws.String("321")}, + }}, + actual: &struct{ Abc, Cba string }{}, + expected: struct{ Abc, Cba string }{Abc: "123", Cba: "321"}, + }, + { + in: &dynamodb.AttributeValue{N: aws.String("512")}, + actual: new(uint8), + err: &UnmarshalTypeError{ + Value: fmt.Sprintf("number overflow, 512"), + Type: reflect.TypeOf(uint8(0)), + }, + }, + } + + for i, c := range cases { + err := Unmarshal(c.in, c.actual) + assertConvertTest(t, i, c.actual, c.expected, err, c.err) + } +} + +func TestInterfaceInput(t *testing.T) { + var v interface{} + expected := []interface{}{"abc", "123"} + err := Unmarshal(&dynamodb.AttributeValue{L: []*dynamodb.AttributeValue{ + {S: aws.String("abc")}, {S: aws.String("123")}, + }}, &v) + assertConvertTest(t, 0, v, expected, err, nil) +} + +func TestUnmarshalError(t *testing.T) { + cases := []struct { + in *dynamodb.AttributeValue + actual, expected interface{} + err error + }{ + { + in: &dynamodb.AttributeValue{}, + actual: int(0), + expected: nil, + err: &InvalidUnmarshalError{Type: reflect.TypeOf(int(0))}, + }, + } + + for i, c := range cases { + err := Unmarshal(c.in, c.actual) + assertConvertTest(t, i, c.actual, c.expected, err, c.err) + } +} + +func TestUnmarshalListShared(t *testing.T) { + for i, c := range sharedListTestCases { + err := UnmarshalList(c.in, c.actual) + assertConvertTest(t, i, c.actual, c.expected, err, c.err) + } +} + +func TestUnmarshalListError(t *testing.T) { + cases := []struct { + in []*dynamodb.AttributeValue + actual, expected interface{} + err error + }{ + { + in: []*dynamodb.AttributeValue{}, + actual: []interface{}{}, + expected: nil, + err: &InvalidUnmarshalError{Type: reflect.TypeOf([]interface{}{})}, + }, + } + + for i, c := range cases { + err := UnmarshalList(c.in, c.actual) + assertConvertTest(t, i, c.actual, c.expected, err, c.err) + } +} + +func TestUnmarshalMapShared(t *testing.T) { + for i, c := range sharedMapTestCases { + err := UnmarshalMap(c.in, c.actual) + assertConvertTest(t, i, c.actual, c.expected, err, c.err) + } +} + +func TestUnmarshalMapError(t *testing.T) { + cases := []struct { + in map[string]*dynamodb.AttributeValue + actual, expected interface{} + err error + }{ + { + in: map[string]*dynamodb.AttributeValue{}, + actual: map[string]interface{}{}, + expected: nil, + err: &InvalidUnmarshalError{Type: reflect.TypeOf(map[string]interface{}{})}, + }, + { + in: map[string]*dynamodb.AttributeValue{ + "BOOL": {BOOL: aws.Bool(true)}, + }, + actual: &map[int]interface{}{}, + expected: nil, + err: &UnmarshalTypeError{Value: "map string key", Type: reflect.TypeOf(int(0))}, + }, + } + + for i, c := range cases { + err := UnmarshalMap(c.in, c.actual) + assertConvertTest(t, i, c.actual, c.expected, err, c.err) + } +} + +type unmarshalUnmarshaler struct { + Value string + Value2 int + Value3 bool + Value4 time.Time +} + +func (u *unmarshalUnmarshaler) UnmarshalDynamoDBAttributeValue(av *dynamodb.AttributeValue) error { + if av.M == nil { + return fmt.Errorf("expected AttributeValue to be map") + } + + if v, ok := av.M["abc"]; !ok { + return fmt.Errorf("expected `abc` map key") + } else if v.S == nil { + return fmt.Errorf("expected `abc` map value string") + } else { + u.Value = *v.S + } + + if v, ok := av.M["def"]; !ok { + return fmt.Errorf("expected `def` map key") + } else if v.N == nil { + return fmt.Errorf("expected `def` map value number") + } else { + n, err := strconv.ParseInt(*v.N, 10, 64) + if err != nil { + return err + } + u.Value2 = int(n) + } + + if v, ok := av.M["ghi"]; !ok { + return fmt.Errorf("expected `ghi` map key") + } else if v.BOOL == nil { + return fmt.Errorf("expected `ghi` map value number") + } else { + u.Value3 = *v.BOOL + } + + if v, ok := av.M["jkl"]; !ok { + return fmt.Errorf("expected `jkl` map key") + } else if v.S == nil { + return fmt.Errorf("expected `jkl` map value string") + } else { + t, err := time.Parse(time.RFC3339, *v.S) + if err != nil { + return err + } + u.Value4 = t + } + + return nil +} + +func TestUnmarshalUnmashaler(t *testing.T) { + u := &unmarshalUnmarshaler{} + av := &dynamodb.AttributeValue{ + M: map[string]*dynamodb.AttributeValue{ + "abc": {S: aws.String("value")}, + "def": {N: aws.String("123")}, + "ghi": {BOOL: aws.Bool(true)}, + "jkl": {S: aws.String("2016-05-03T17:06:26.209072Z")}, + }, + } + + err := Unmarshal(av, u) + assert.NoError(t, err) + + assert.Equal(t, "value", u.Value) + assert.Equal(t, 123, u.Value2) + assert.Equal(t, true, u.Value3) + assert.Equal(t, testDate, u.Value4) +} + +func TestDecodeUseNumber(t *testing.T) { + u := map[string]interface{}{} + av := &dynamodb.AttributeValue{ + M: map[string]*dynamodb.AttributeValue{ + "abc": {S: aws.String("value")}, + "def": {N: aws.String("123")}, + "ghi": {BOOL: aws.Bool(true)}, + }, + } + + decoder := NewDecoder(func(d *Decoder) { + d.UseNumber = true + }) + err := decoder.Decode(av, &u) + assert.NoError(t, err) + + assert.Equal(t, "value", u["abc"]) + n, ok := u["def"].(Number) + assert.True(t, ok) + assert.Equal(t, "123", n.String()) + assert.Equal(t, true, u["ghi"]) +} + +func TestDecodeUseNumberNumberSet(t *testing.T) { + u := map[string]interface{}{} + av := &dynamodb.AttributeValue{ + M: map[string]*dynamodb.AttributeValue{ + "ns": { + NS: []*string{ + aws.String("123"), aws.String("321"), + }, + }, + }, + } + + decoder := NewDecoder(func(d *Decoder) { + d.UseNumber = true + }) + err := decoder.Decode(av, &u) + assert.NoError(t, err) + + ns, ok := u["ns"].([]Number) + assert.True(t, ok) + + assert.Equal(t, "123", ns[0].String()) + assert.Equal(t, "321", ns[1].String()) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute/doc.go b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute/doc.go new file mode 100644 index 000000000..3d46f736b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute/doc.go @@ -0,0 +1,60 @@ +// Package dynamodbattribute provides marshaling utilities for marshaling to +// dynamodb.AttributeValue types and unmarshaling to Go value types. These +// utilities allow you to marshal slices, maps, structs, and scalar values +// to and from dynamodb.AttributeValue. These are useful when marshaling +// Go value tyes to dynamodb.AttributeValue for DynamoDB requests, or +// unmarshaling the dynamodb.AttributeValue back into a Go value type. +// +// Marshal Go value types to dynamodb.AttributeValue: See (ExampleMarshal) +// +// type Record struct { +// MyField string +// Letters []string +// A2Num map[string]int +// } +// +// ... +// +// r := Record{ +// MyField: "dynamodbattribute.Marshal example", +// Letters: []string{"a", "b", "c", "d"}, +// A2Num: map[string]int{"a": 1, "b": 2, "c": 3}, +// } +// av, err := dynamodbattribute.Marshal(r) +// fmt.Println(av, err) +// +// Unmarshal dynamodb.AttributeValue to Go value type: See (ExampleUnmarshal) +// +// r2 := Record{} +// err = dynamodbattribute.Unmarshal(av, &r2) +// fmt.Println(err, reflect.DeepEqual(r, r2)) +// +// Marshal Go value type for DynamoDB.PutItem: +// +// sess := session.New() +// svc := dynamodb.New(sess) +// item, err := dynamodbattribute.MarshalMap(r) +// if err != nil { +// fmt.Println("Failed to convert", err) +// return +// } +// result, err := svc.PutItem(&dynamodb.PutItemInput{ +// Item: item, +// TableName: aws.String("exampleTable"), +// }) +// +// +// +// The ConvertTo, ConvertToList, ConvertToMap, ConvertFrom, ConvertFromMap +// and ConvertFromList methods have been deprecated. The Marshal and Unmarshal +// functions should be used instead. The ConvertTo|From marshallers do not +// support BinarySet, NumberSet, nor StringSets, and will incorrect marshal +// binary data fields in structs as base64 strings. +// +// The Marshal and Unmarshal functions correct this behavior, and removes +// the reliance on encoding.json. `json` struct tags are still supported. In +// addition support for a new struct tag `dynamodbav` was added. Support for +// the json.Marshaler and json.Unmarshaler interfaces have been removed and +// replaced with have been replaced with dynamodbattribute.Marshaler and +// dynamodbattribute.Unmarshaler interfaces. +package dynamodbattribute diff --git a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute/encode.go b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute/encode.go new file mode 100644 index 000000000..72c07fee8 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute/encode.go @@ -0,0 +1,557 @@ +package dynamodbattribute + +import ( + "fmt" + "reflect" + "strconv" + "time" + + "github.com/aws/aws-sdk-go/service/dynamodb" +) + +// A Marshaler is an interface to provide custom marshalling of Go value types +// to AttributeValues. Use this to provide custom logic determining how a +// Go Value type should be marshaled. +// +// type ExampleMarshaler struct { +// Value int +// } +// type (m *ExampleMarshaler) MarshalDynamoDBAttributeValue(av *dynamodb.AttributeValue) error { +// n := fmt.Sprintf("%v", m.Value) +// av.N = &n +// +// return nil +// } +// +type Marshaler interface { + MarshalDynamoDBAttributeValue(*dynamodb.AttributeValue) error +} + +// Marshal will serialize the passed in Go value type into a DynamoDB AttributeValue +// type. This value can be used in DynamoDB API operations to simplify marshaling +// your Go value types into AttributeValues. +// +// Marshal will recursively transverse the passed in value marshaling its +// contents into a AttributeValue. Marshal supports basic scalars +// (int,uint,float,bool,string), maps, slices, and structs. Anonymous +// nested types are flattened based on Go anonymous type visibility. +// +// Marshaling slices to AttributeValue will default to a List for all +// types except for []byte and [][]byte. []byte will be marshaled as +// Binary data (B), and [][]byte will be marshaled as binary data set +// (BS). +// +// `dynamodbav` struct tag can be used to control how the value will be +// marshaled into a AttributeValue. +// +// // Field is ignored +// Field int `dynamodbav:"-"` +// +// // Field AttributeValue map key "myName" +// Field int `dynamodbav:"myName"` +// +// // Field AttributeValue map key "myName", and +// // Field is omitted if it is empty +// Field int `dynamodbav:"myName,omitempty"` +// +// // Field AttributeValue map key "Field", and +// // Field is omitted if it is empty +// Field int `dynamodbav:",omitempty"` +// +// // Field's elems will be omitted if empty +// // only valid for slices, and maps. +// Field []string `dynamodbav:",omitemptyelem"` +// +// // Field will be marshaled as a AttributeValue string +// // only value for number types, (int,uint,float) +// Field int `dynamodbav:",string"` +// +// // Field will be marshaled as a binary set +// Field [][]byte `dynamodbav:",binaryset"` +// +// // Field will be marshaled as a number set +// Field []int `dynamodbav:",numberset"` +// +// // Field will be marshaled as a string set +// Field []string `dynamodbav:",stringset"` +// +// The omitempty tag is only used during Marshaling and is ignored for +// Unmarshal. Any zero value or a value when marshaled results in a +// AttributeValue NULL will be added to AttributeValue Maps during struct +// marshal. The omitemptyelem tag works the same as omitempty except it +// applies to maps and slices instead of struct fields, and will not be +// included in the marshaled AttributeValue Map, List, or Set. +// +// For convenience and backwards compatibility with ConvertTo functions +// json struct tags are supported by the Marshal and Unmarshal. If +// both json and dynamodbav struct tags are provided the json tag will +// be ignored in favor of dynamodbav. +// +// All struct fields and with anonymous fields, are marshaled unless the +// any of the following conditions are meet. +// +// - the field is not exported +// - json or dynamodbav field tag is "-" +// - json or dynamodbav field tag specifies "omitempty", and is empty. +// +// Pointer and interfaces values encode as the value pointed to or contained +// in the interface. A nil value encodes as the AttributeValue NULL value. +// +// Channel, complex, and function values are not encoded and will be skipped +// when walking the value to be marshaled. +// +// When marshaling any error that occurs will halt the marshal and return +// the error. +// +// Marshal cannot represent cyclic data structures and will not handle them. +// Passing cyclic structures to Marshal will result in an infinite recursion. +func Marshal(in interface{}) (*dynamodb.AttributeValue, error) { + return NewEncoder().Encode(in) +} + +// MarshalMap is an alias for Marshal func which marshals Go value +// type to a map of AttributeValues. +func MarshalMap(in interface{}) (map[string]*dynamodb.AttributeValue, error) { + av, err := NewEncoder().Encode(in) + if err != nil || av == nil || av.M == nil { + return map[string]*dynamodb.AttributeValue{}, err + } + + return av.M, nil +} + +// MarshalList is an alias for Marshal func which marshals Go value +// type to a slice of AttributeValues. +func MarshalList(in interface{}) ([]*dynamodb.AttributeValue, error) { + av, err := NewEncoder().Encode(in) + if err != nil || av == nil || av.L == nil { + return []*dynamodb.AttributeValue{}, err + } + + return av.L, nil +} + +// A MarshalOptions is a collection of options shared between marshaling +// and unmarshaling +type MarshalOptions struct { + // States that the encoding/json struct tags should be supported. + // if a `dynamodbav` struct tag is also provided the encoding/json + // tag will be ignored. + // + // Enabled by default. + SupportJSONTags bool +} + +// An Encoder provides marshaling Go value types to AttributeValues. +type Encoder struct { + MarshalOptions + + // Empty strings, "", will be marked as NULL AttributeValue types. + // Empty strings are not valid values for DynamoDB. Will not apply + // to lists, sets, or maps. Use the struct tag `omitemptyelem` + // to skip empty (zero) values in lists, sets and maps. + // + // Enabled by default. + NullEmptyString bool +} + +// NewEncoder creates a new Encoder with default configuration. Use +// the `opts` functional options to override the default configuration. +func NewEncoder(opts ...func(*Encoder)) *Encoder { + e := &Encoder{ + MarshalOptions: MarshalOptions{ + SupportJSONTags: true, + }, + NullEmptyString: true, + } + for _, o := range opts { + o(e) + } + + return e +} + +// Encode will marshal a Go value type to an AttributeValue. Returning +// the AttributeValue constructed or error. +func (e *Encoder) Encode(in interface{}) (*dynamodb.AttributeValue, error) { + av := &dynamodb.AttributeValue{} + if err := e.encode(av, reflect.ValueOf(in), tag{}); err != nil { + return nil, err + } + + return av, nil +} + +func (e *Encoder) encode(av *dynamodb.AttributeValue, v reflect.Value, fieldTag tag) error { + // Handle both pointers and interface conversion into types + v = valueElem(v) + + if v.Kind() != reflect.Invalid { + if used, err := tryMarshaler(av, v); used { + return err + } + } + + if fieldTag.OmitEmpty && emptyValue(v) { + encodeNull(av) + return nil + } + + switch v.Kind() { + case reflect.Invalid: + encodeNull(av) + case reflect.Struct: + return e.encodeStruct(av, v) + case reflect.Map: + return e.encodeMap(av, v, fieldTag) + case reflect.Slice, reflect.Array: + return e.encodeSlice(av, v, fieldTag) + case reflect.Chan, reflect.Func, reflect.UnsafePointer: + // do nothing for unsupported types + default: + return e.encodeScalar(av, v, fieldTag) + } + + return nil +} + +func (e *Encoder) encodeStruct(av *dynamodb.AttributeValue, v reflect.Value) error { + + // To maintain backwards compatibility with ConvertTo family of methods which + // converted time.Time structs to strings + if t, ok := v.Interface().(time.Time); ok { + s := t.Format(time.RFC3339Nano) + av.S = &s + return nil + } + + av.M = map[string]*dynamodb.AttributeValue{} + fields := unionStructFields(v.Type(), e.MarshalOptions) + for _, f := range fields { + if f.Name == "" { + return &InvalidMarshalError{msg: "map key cannot be empty"} + } + + fv := v.FieldByIndex(f.Index) + elem := &dynamodb.AttributeValue{} + err := e.encode(elem, fv, f.tag) + skip, err := keepOrOmitEmpty(f.OmitEmpty, elem, err) + if err != nil { + return err + } else if skip { + continue + } + + av.M[f.Name] = elem + } + if len(av.M) == 0 { + encodeNull(av) + } + + return nil +} + +func (e *Encoder) encodeMap(av *dynamodb.AttributeValue, v reflect.Value, fieldTag tag) error { + av.M = map[string]*dynamodb.AttributeValue{} + for _, key := range v.MapKeys() { + keyName := fmt.Sprint(key.Interface()) + if keyName == "" { + return &InvalidMarshalError{msg: "map key cannot be empty"} + } + + elemVal := v.MapIndex(key) + elem := &dynamodb.AttributeValue{} + err := e.encode(elem, elemVal, tag{}) + skip, err := keepOrOmitEmpty(fieldTag.OmitEmptyElem, elem, err) + if err != nil { + return err + } else if skip { + continue + } + + av.M[keyName] = elem + } + if len(av.M) == 0 { + encodeNull(av) + } + + return nil +} + +func (e *Encoder) encodeSlice(av *dynamodb.AttributeValue, v reflect.Value, fieldTag tag) error { + switch v.Type().Elem().Kind() { + case reflect.Uint8: + b := v.Bytes() + if len(b) == 0 { + encodeNull(av) + return nil + } + av.B = append([]byte{}, b...) + default: + var elemFn func(dynamodb.AttributeValue) error + + if fieldTag.AsBinSet || v.Type() == byteSliceSlicetype { // Binary Set + av.BS = make([][]byte, 0, v.Len()) + elemFn = func(elem dynamodb.AttributeValue) error { + if elem.B == nil { + return &InvalidMarshalError{msg: "binary set must only contain non-nil byte slices"} + } + av.BS = append(av.BS, elem.B) + return nil + } + } else if fieldTag.AsNumSet { // Number Set + av.NS = make([]*string, 0, v.Len()) + elemFn = func(elem dynamodb.AttributeValue) error { + if elem.N == nil { + return &InvalidMarshalError{msg: "number set must only contain non-nil string numbers"} + } + av.NS = append(av.NS, elem.N) + return nil + } + } else if fieldTag.AsStrSet { // String Set + av.SS = make([]*string, 0, v.Len()) + elemFn = func(elem dynamodb.AttributeValue) error { + if elem.S == nil { + return &InvalidMarshalError{msg: "string set must only contain non-nil strings"} + } + av.SS = append(av.SS, elem.S) + return nil + } + } else { // List + av.L = make([]*dynamodb.AttributeValue, 0, v.Len()) + elemFn = func(elem dynamodb.AttributeValue) error { + av.L = append(av.L, &elem) + return nil + } + } + + if n, err := e.encodeList(v, fieldTag, elemFn); err != nil { + return err + } else if n == 0 { + encodeNull(av) + } + } + + return nil +} + +func (e *Encoder) encodeList(v reflect.Value, fieldTag tag, elemFn func(dynamodb.AttributeValue) error) (int, error) { + count := 0 + for i := 0; i < v.Len(); i++ { + elem := dynamodb.AttributeValue{} + err := e.encode(&elem, v.Index(i), tag{OmitEmpty: fieldTag.OmitEmptyElem}) + skip, err := keepOrOmitEmpty(fieldTag.OmitEmptyElem, &elem, err) + if err != nil { + return 0, err + } else if skip { + continue + } + + if err := elemFn(elem); err != nil { + return 0, err + } + count++ + } + + return count, nil +} + +func (e *Encoder) encodeScalar(av *dynamodb.AttributeValue, v reflect.Value, fieldTag tag) error { + if v.Type() == numberType { + s := v.String() + if fieldTag.AsString { + av.S = &s + } else { + av.N = &s + } + return nil + } + + switch v.Kind() { + case reflect.Bool: + av.BOOL = new(bool) + *av.BOOL = v.Bool() + case reflect.String: + if err := e.encodeString(av, v); err != nil { + return err + } + default: + // Fallback to encoding numbers, will return invalid type if not supported + if err := e.encodeNumber(av, v); err != nil { + return err + } + if fieldTag.AsString && av.NULL == nil && av.N != nil { + av.S = av.N + av.N = nil + } + } + + return nil +} + +func (e *Encoder) encodeNumber(av *dynamodb.AttributeValue, v reflect.Value) error { + if used, err := tryMarshaler(av, v); used { + return err + } + + var out string + switch v.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + out = encodeInt(v.Int()) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + out = encodeUint(v.Uint()) + case reflect.Float32, reflect.Float64: + out = encodeFloat(v.Float()) + default: + return &unsupportedMarshalTypeError{Type: v.Type()} + } + + av.N = &out + + return nil +} + +func (e *Encoder) encodeString(av *dynamodb.AttributeValue, v reflect.Value) error { + if used, err := tryMarshaler(av, v); used { + return err + } + + switch v.Kind() { + case reflect.String: + s := v.String() + if len(s) == 0 && e.NullEmptyString { + encodeNull(av) + } else { + av.S = &s + } + default: + return &unsupportedMarshalTypeError{Type: v.Type()} + } + + return nil +} + +func encodeInt(i int64) string { + return strconv.FormatInt(i, 10) +} +func encodeUint(u uint64) string { + return strconv.FormatUint(u, 10) +} +func encodeFloat(f float64) string { + return strconv.FormatFloat(f, 'f', -1, 64) +} +func encodeNull(av *dynamodb.AttributeValue) { + t := true + *av = dynamodb.AttributeValue{NULL: &t} +} + +func valueElem(v reflect.Value) reflect.Value { + switch v.Kind() { + case reflect.Interface, reflect.Ptr: + for v.Kind() == reflect.Interface || v.Kind() == reflect.Ptr { + v = v.Elem() + } + } + + return v +} + +func emptyValue(v reflect.Value) bool { + switch v.Kind() { + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + } + return false +} + +func tryMarshaler(av *dynamodb.AttributeValue, v reflect.Value) (bool, error) { + if v.Kind() != reflect.Ptr && v.Type().Name() != "" && v.CanAddr() { + v = v.Addr() + } + + if v.Type().NumMethod() == 0 { + return false, nil + } + + if m, ok := v.Interface().(Marshaler); ok { + return true, m.MarshalDynamoDBAttributeValue(av) + } + + return false, nil +} + +func keepOrOmitEmpty(omitEmpty bool, av *dynamodb.AttributeValue, err error) (bool, error) { + if err != nil { + if _, ok := err.(*unsupportedMarshalTypeError); ok { + return true, nil + } + return false, err + } + + if av.NULL != nil && omitEmpty { + return true, nil + } + + return false, nil +} + +// An InvalidMarshalError is an error type representing an error +// occurring when marshaling a Go value type to an AttributeValue. +type InvalidMarshalError struct { + emptyOrigError + msg string +} + +// Error returns the string representation of the error. +// satisfying the error interface +func (e *InvalidMarshalError) Error() string { + return fmt.Sprintf("%s: %s", e.Code(), e.Message()) +} + +// Code returns the code of the error, satisfying the awserr.Error +// interface. +func (e *InvalidMarshalError) Code() string { + return "InvalidMarshalError" +} + +// Message returns the detailed message of the error, satisfying +// the awserr.Error interface. +func (e *InvalidMarshalError) Message() string { + return e.msg +} + +// An unsupportedMarshalTypeError represents a Go value type +// which cannot be marshaled into an AttributeValue and should +// be skipped by the marshaler. +type unsupportedMarshalTypeError struct { + emptyOrigError + Type reflect.Type +} + +// Error returns the string representation of the error. +// satisfying the error interface +func (e *unsupportedMarshalTypeError) Error() string { + return fmt.Sprintf("%s: %s", e.Code(), e.Message()) +} + +// Code returns the code of the error, satisfying the awserr.Error +// interface. +func (e *unsupportedMarshalTypeError) Code() string { + return "unsupportedMarshalTypeError" +} + +// Message returns the detailed message of the error, satisfying +// the awserr.Error interface. +func (e *unsupportedMarshalTypeError) Message() string { + return "Go value type " + e.Type.String() + " is not supported" +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute/encode_test.go b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute/encode_test.go new file mode 100644 index 000000000..46028c946 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute/encode_test.go @@ -0,0 +1,126 @@ +package dynamodbattribute + +import ( + "fmt" + "testing" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/dynamodb" + "github.com/stretchr/testify/assert" +) + +func TestMarshalErrorTypes(t *testing.T) { + var _ awserr.Error = (*InvalidMarshalError)(nil) + var _ awserr.Error = (*unsupportedMarshalTypeError)(nil) +} + +func TestMarshalShared(t *testing.T) { + for i, c := range sharedTestCases { + av, err := Marshal(c.expected) + assertConvertTest(t, i, av, c.in, err, c.err) + } +} + +func TestMarshalListShared(t *testing.T) { + for i, c := range sharedListTestCases { + av, err := MarshalList(c.expected) + assertConvertTest(t, i, av, c.in, err, c.err) + } +} + +func TestMarshalMapShared(t *testing.T) { + for i, c := range sharedMapTestCases { + av, err := MarshalMap(c.expected) + assertConvertTest(t, i, av, c.in, err, c.err) + } +} + +type marshalMarshaler struct { + Value string + Value2 int + Value3 bool + Value4 time.Time +} + +func (m *marshalMarshaler) MarshalDynamoDBAttributeValue(av *dynamodb.AttributeValue) error { + av.M = map[string]*dynamodb.AttributeValue{ + "abc": {S: &m.Value}, + "def": {N: aws.String(fmt.Sprintf("%d", m.Value2))}, + "ghi": {BOOL: &m.Value3}, + "jkl": {S: aws.String(m.Value4.Format(time.RFC3339Nano))}, + } + + return nil +} + +func TestMarshalMashaler(t *testing.T) { + m := &marshalMarshaler{ + Value: "value", + Value2: 123, + Value3: true, + Value4: testDate, + } + + expect := &dynamodb.AttributeValue{ + M: map[string]*dynamodb.AttributeValue{ + "abc": {S: aws.String("value")}, + "def": {N: aws.String("123")}, + "ghi": {BOOL: aws.Bool(true)}, + "jkl": {S: aws.String("2016-05-03T17:06:26.209072Z")}, + }, + } + + actual, err := Marshal(m) + assert.NoError(t, err) + + assert.Equal(t, expect, actual) +} + +type testOmitEmptyElemListStruct struct { + Values []string `dynamodbav:",omitemptyelem"` +} + +type testOmitEmptyElemMapStruct struct { + Values map[string]interface{} `dynamodbav:",omitemptyelem"` +} + +func TestMarshalListOmitEmptyElem(t *testing.T) { + expect := &dynamodb.AttributeValue{ + M: map[string]*dynamodb.AttributeValue{ + "Values": {L: []*dynamodb.AttributeValue{ + {S: aws.String("abc")}, + {S: aws.String("123")}, + }}, + }, + } + + m := testOmitEmptyElemListStruct{Values: []string{"abc", "", "123"}} + + actual, err := Marshal(m) + assert.NoError(t, err) + assert.Equal(t, expect, actual) +} + +func TestMarshalMapOmitEmptyElem(t *testing.T) { + expect := &dynamodb.AttributeValue{ + M: map[string]*dynamodb.AttributeValue{ + "Values": {M: map[string]*dynamodb.AttributeValue{ + "abc": {N: aws.String("123")}, + "klm": {S: aws.String("abc")}, + }}, + }, + } + + m := testOmitEmptyElemMapStruct{Values: map[string]interface{}{ + "abc": 123., + "efg": nil, + "hij": "", + "klm": "abc", + }} + + actual, err := Marshal(m) + assert.NoError(t, err) + assert.Equal(t, expect, actual) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute/field.go b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute/field.go new file mode 100644 index 000000000..1fe0d3500 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute/field.go @@ -0,0 +1,269 @@ +package dynamodbattribute + +import ( + "reflect" + "sort" + "strings" +) + +type field struct { + tag + + Name string + NameFromTag bool + + Index []int + Type reflect.Type +} + +func fieldByName(fields []field, name string) (field, bool) { + foldExists := false + foldField := field{} + + for _, f := range fields { + if f.Name == name { + return f, true + } + if !foldExists && strings.EqualFold(f.Name, name) { + foldField = f + foldExists = true + } + } + + return foldField, foldExists +} + +func buildField(pIdx []int, i int, sf reflect.StructField, fieldTag tag) field { + f := field{ + Name: sf.Name, + Type: sf.Type, + tag: fieldTag, + } + if len(fieldTag.Name) != 0 { + f.NameFromTag = true + f.Name = fieldTag.Name + } + + f.Index = make([]int, len(pIdx)+1) + copy(f.Index, pIdx) + f.Index[len(pIdx)] = i + + return f +} + +func unionStructFields(t reflect.Type, opts MarshalOptions) []field { + fields := enumFields(t, opts) + + sort.Sort(fieldsByName(fields)) + + fields = visibleFields(fields) + + return fields +} + +// enumFields will recursively iterate through a structure and its nested +// anonymous fields. +// +// Based on the enoding/json struct field enumeration of the Go Stdlib +// https://golang.org/src/encoding/json/encode.go typeField func. +func enumFields(t reflect.Type, opts MarshalOptions) []field { + // Fields to explore + current := []field{} + next := []field{{Type: t}} + + // count of queued names + count := map[reflect.Type]int{} + nextCount := map[reflect.Type]int{} + + visited := map[reflect.Type]struct{}{} + fields := []field{} + + for len(next) > 0 { + current, next = next, current[:0] + count, nextCount = nextCount, map[reflect.Type]int{} + + for _, f := range current { + if _, ok := visited[f.Type]; ok { + continue + } + visited[f.Type] = struct{}{} + + for i := 0; i < f.Type.NumField(); i++ { + sf := f.Type.Field(i) + if sf.PkgPath != "" && !sf.Anonymous { + // Ignore unexported and non-anonymous fields + // unexported but anonymous field may still be used if + // the type has exported nested fields + continue + } + + fieldTag := tag{} + fieldTag.parseAVTag(sf.Tag) + if opts.SupportJSONTags && fieldTag == (tag{}) { + fieldTag.parseJSONTag(sf.Tag) + } + + if fieldTag.Ignore { + continue + } + + ft := sf.Type + if ft.Name() == "" && ft.Kind() == reflect.Ptr { + ft = ft.Elem() + } + + structField := buildField(f.Index, i, sf, fieldTag) + structField.Type = ft + + if !sf.Anonymous || ft.Kind() != reflect.Struct { + fields = append(fields, structField) + if count[f.Type] > 1 { + // If there were multiple instances, add a second, + // so that the annihilation code will see a duplicate. + // It only cares about the distinction between 1 or 2, + // so don't bother generating any more copies. + fields = append(fields, structField) + } + continue + } + + // Record new anon struct to explore next round + nextCount[ft]++ + if nextCount[ft] == 1 { + next = append(next, structField) + } + } + } + } + + return fields +} + +// visibleFields will return a slice of fields which are visible based on +// Go's standard visiblity rules with the exception of ties being broken +// by depth and struct tag naming. +// +// Based on the enoding/json field filtering of the Go Stdlib +// https://golang.org/src/encoding/json/encode.go typeField func. +func visibleFields(fields []field) []field { + // Delete all fields that are hidden by the Go rules for embedded fields, + // except that fields with JSON tags are promoted. + + // The fields are sorted in primary order of name, secondary order + // of field index length. Loop over names; for each name, delete + // hidden fields by choosing the one dominant field that survives. + out := fields[:0] + for advance, i := 0, 0; i < len(fields); i += advance { + // One iteration per name. + // Find the sequence of fields with the name of this first field. + fi := fields[i] + name := fi.Name + for advance = 1; i+advance < len(fields); advance++ { + fj := fields[i+advance] + if fj.Name != name { + break + } + } + if advance == 1 { // Only one field with this name + out = append(out, fi) + continue + } + dominant, ok := dominantField(fields[i : i+advance]) + if ok { + out = append(out, dominant) + } + } + + fields = out + sort.Sort(fieldsByIndex(fields)) + + return fields +} + +// dominantField looks through the fields, all of which are known to +// have the same name, to find the single field that dominates the +// others using Go's embedding rules, modified by the presence of +// JSON tags. If there are multiple top-level fields, the boolean +// will be false: This condition is an error in Go and we skip all +// the fields. +// +// Based on the enoding/json field filtering of the Go Stdlib +// https://golang.org/src/encoding/json/encode.go dominantField func. +func dominantField(fields []field) (field, bool) { + // The fields are sorted in increasing index-length order. The winner + // must therefore be one with the shortest index length. Drop all + // longer entries, which is easy: just truncate the slice. + length := len(fields[0].Index) + tagged := -1 // Index of first tagged field. + for i, f := range fields { + if len(f.Index) > length { + fields = fields[:i] + break + } + if f.NameFromTag { + if tagged >= 0 { + // Multiple tagged fields at the same level: conflict. + // Return no field. + return field{}, false + } + tagged = i + } + } + if tagged >= 0 { + return fields[tagged], true + } + // All remaining fields have the same length. If there's more than one, + // we have a conflict (two fields named "X" at the same level) and we + // return no field. + if len(fields) > 1 { + return field{}, false + } + return fields[0], true +} + +// fieldsByName sorts field by name, breaking ties with depth, +// then breaking ties with "name came from json tag", then +// breaking ties with index sequence. +// +// Based on the enoding/json field filtering of the Go Stdlib +// https://golang.org/src/encoding/json/encode.go fieldsByName type. +type fieldsByName []field + +func (x fieldsByName) Len() int { return len(x) } + +func (x fieldsByName) Swap(i, j int) { x[i], x[j] = x[j], x[i] } + +func (x fieldsByName) Less(i, j int) bool { + if x[i].Name != x[j].Name { + return x[i].Name < x[j].Name + } + if len(x[i].Index) != len(x[j].Index) { + return len(x[i].Index) < len(x[j].Index) + } + if x[i].NameFromTag != x[j].NameFromTag { + return x[i].NameFromTag + } + return fieldsByIndex(x).Less(i, j) +} + +// fieldsByIndex sorts field by index sequence. +// +// Based on the enoding/json field filtering of the Go Stdlib +// https://golang.org/src/encoding/json/encode.go fieldsByIndex type. +type fieldsByIndex []field + +func (x fieldsByIndex) Len() int { return len(x) } + +func (x fieldsByIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] } + +func (x fieldsByIndex) Less(i, j int) bool { + for k, xik := range x[i].Index { + if k >= len(x[j].Index) { + return false + } + if xik != x[j].Index[k] { + return xik < x[j].Index[k] + } + } + return len(x[i].Index) < len(x[j].Index) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute/field_test.go b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute/field_test.go new file mode 100644 index 000000000..58ee17b14 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute/field_test.go @@ -0,0 +1,110 @@ +package dynamodbattribute + +import ( + "reflect" + "testing" + + "github.com/stretchr/testify/assert" +) + +type testUnionValues struct { + Name string + Value interface{} +} + +type unionSimple struct { + A int + B string + C []string +} + +type unionComplex struct { + unionSimple + A int +} + +type unionTagged struct { + A int `json:"A"` +} + +type unionTaggedComplex struct { + unionSimple + unionTagged + B string +} + +func TestUnionStructFields(t *testing.T) { + var cases = []struct { + in interface{} + expect []testUnionValues + }{ + { + in: unionSimple{1, "2", []string{"abc"}}, + expect: []testUnionValues{ + {"A", 1}, + {"B", "2"}, + {"C", []string{"abc"}}, + }, + }, + { + in: unionComplex{ + unionSimple: unionSimple{1, "2", []string{"abc"}}, + A: 2, + }, + expect: []testUnionValues{ + {"B", "2"}, + {"C", []string{"abc"}}, + {"A", 2}, + }, + }, + { + in: unionTaggedComplex{ + unionSimple: unionSimple{1, "2", []string{"abc"}}, + unionTagged: unionTagged{3}, + B: "3", + }, + expect: []testUnionValues{ + {"C", []string{"abc"}}, + {"A", 3}, + {"B", "3"}, + }, + }, + } + + for i, c := range cases { + v := reflect.ValueOf(c.in) + + fields := unionStructFields(v.Type(), MarshalOptions{SupportJSONTags: true}) + for j, f := range fields { + expected := c.expect[j] + assert.Equal(t, expected.Name, f.Name, "case %d, field %d", i, j) + actual := v.FieldByIndex(f.Index).Interface() + assert.EqualValues(t, expected.Value, actual, "case %d, field %d", i, j) + } + } +} + +func TestFieldByName(t *testing.T) { + fields := []field{ + {Name: "Abc"}, {Name: "mixCase"}, {Name: "UPPERCASE"}, + } + + cases := []struct { + Name, FieldName string + Found bool + }{ + {"abc", "Abc", true}, {"ABC", "Abc", true}, {"Abc", "Abc", true}, + {"123", "", false}, + {"ab", "", false}, + {"MixCase", "mixCase", true}, + {"uppercase", "UPPERCASE", true}, {"UPPERCASE", "UPPERCASE", true}, + } + + for _, c := range cases { + f, ok := fieldByName(fields, c.Name) + assert.Equal(t, c.Found, ok) + if ok { + assert.Equal(t, c.FieldName, f.Name) + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute/marshaler_examples_test.go b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute/marshaler_examples_test.go new file mode 100644 index 000000000..28e915e35 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute/marshaler_examples_test.go @@ -0,0 +1,104 @@ +package dynamodbattribute_test + +import ( + "fmt" + "reflect" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/dynamodb" + "github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute" +) + +func ExampleMarshal() { + type Record struct { + Bytes []byte + MyField string + Letters []string + Numbers []int + } + + r := Record{ + Bytes: []byte{48, 49}, + MyField: "MyFieldValue", + Letters: []string{"a", "b", "c", "d"}, + Numbers: []int{1, 2, 3}, + } + av, err := dynamodbattribute.Marshal(r) + fmt.Println("err", err) + fmt.Println("Bytes", av.M["Bytes"]) + fmt.Println("MyField", av.M["MyField"]) + fmt.Println("Letters", av.M["Letters"]) + fmt.Println("Numbers", av.M["Numbers"]) + + // Output: + // err + // Bytes { + // B: [48,49] + // } + // MyField { + // S: "MyFieldValue" + // } + // Letters { + // L: [ + // { + // S: "a" + // }, + // { + // S: "b" + // }, + // { + // S: "c" + // }, + // { + // S: "d" + // } + // ] + // } + // Numbers { + // L: [{ + // N: "1" + // },{ + // N: "2" + // },{ + // N: "3" + // }] + // } +} + +func ExampleUnmarshal() { + type Record struct { + Bytes []byte + MyField string + Letters []string + A2Num map[string]int + } + + expect := Record{ + Bytes: []byte{48, 49}, + MyField: "MyFieldValue", + Letters: []string{"a", "b", "c", "d"}, + A2Num: map[string]int{"a": 1, "b": 2, "c": 3}, + } + + av := &dynamodb.AttributeValue{ + M: map[string]*dynamodb.AttributeValue{ + "Bytes": {B: []byte{48, 49}}, + "MyField": {S: aws.String("MyFieldValue")}, + "Letters": {L: []*dynamodb.AttributeValue{ + {S: aws.String("a")}, {S: aws.String("b")}, {S: aws.String("c")}, {S: aws.String("d")}, + }}, + "A2Num": {M: map[string]*dynamodb.AttributeValue{ + "a": {N: aws.String("1")}, + "b": {N: aws.String("2")}, + "c": {N: aws.String("3")}, + }}, + }, + } + + actual := Record{} + err := dynamodbattribute.Unmarshal(av, &actual) + fmt.Println(err, reflect.DeepEqual(expect, actual)) + + // Output: + // true +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute/marshaler_test.go b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute/marshaler_test.go new file mode 100644 index 000000000..819bbc6ca --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute/marshaler_test.go @@ -0,0 +1,526 @@ +package dynamodbattribute + +import ( + "math" + "reflect" + "testing" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/service/dynamodb" +) + +type simpleMarshalStruct struct { + Byte []byte + String string + Int int + Uint uint + Float32 float32 + Float64 float64 + Bool bool + Null *interface{} +} + +type complexMarshalStruct struct { + Simple []simpleMarshalStruct +} + +type myByteStruct struct { + Byte []byte +} + +type myByteSetStruct struct { + ByteSet [][]byte +} + +type marshallerTestInput struct { + input interface{} + expected interface{} + err awserr.Error +} + +var marshalerScalarInputs = []marshallerTestInput{ + { + input: nil, + expected: &dynamodb.AttributeValue{NULL: &trueValue}, + }, + { + input: "some string", + expected: &dynamodb.AttributeValue{S: aws.String("some string")}, + }, + { + input: true, + expected: &dynamodb.AttributeValue{BOOL: &trueValue}, + }, + { + input: false, + expected: &dynamodb.AttributeValue{BOOL: &falseValue}, + }, + { + input: 3.14, + expected: &dynamodb.AttributeValue{N: aws.String("3.14")}, + }, + { + input: math.MaxFloat32, + expected: &dynamodb.AttributeValue{N: aws.String("340282346638528860000000000000000000000")}, + }, + { + input: math.MaxFloat64, + expected: &dynamodb.AttributeValue{N: aws.String("179769313486231570000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000")}, + }, + { + input: 12, + expected: &dynamodb.AttributeValue{N: aws.String("12")}, + }, + { + input: Number("12"), + expected: &dynamodb.AttributeValue{N: aws.String("12")}, + }, + { + input: simpleMarshalStruct{}, + expected: &dynamodb.AttributeValue{ + M: map[string]*dynamodb.AttributeValue{ + "Byte": {NULL: &trueValue}, + "Bool": {BOOL: &falseValue}, + "Float32": {N: aws.String("0")}, + "Float64": {N: aws.String("0")}, + "Int": {N: aws.String("0")}, + "Null": {NULL: &trueValue}, + "String": {NULL: &trueValue}, + "Uint": {N: aws.String("0")}, + }, + }, + }, +} + +var marshallerMapTestInputs = []marshallerTestInput{ + // Scalar tests + { + input: nil, + expected: map[string]*dynamodb.AttributeValue{}, + }, + { + input: map[string]interface{}{"string": "some string"}, + expected: map[string]*dynamodb.AttributeValue{"string": {S: aws.String("some string")}}, + }, + { + input: map[string]interface{}{"bool": true}, + expected: map[string]*dynamodb.AttributeValue{"bool": {BOOL: &trueValue}}, + }, + { + input: map[string]interface{}{"bool": false}, + expected: map[string]*dynamodb.AttributeValue{"bool": {BOOL: &falseValue}}, + }, + { + input: map[string]interface{}{"null": nil}, + expected: map[string]*dynamodb.AttributeValue{"null": {NULL: &trueValue}}, + }, + { + input: map[string]interface{}{"float": 3.14}, + expected: map[string]*dynamodb.AttributeValue{"float": {N: aws.String("3.14")}}, + }, + { + input: map[string]interface{}{"float": math.MaxFloat32}, + expected: map[string]*dynamodb.AttributeValue{"float": {N: aws.String("340282346638528860000000000000000000000")}}, + }, + { + input: map[string]interface{}{"float": math.MaxFloat64}, + expected: map[string]*dynamodb.AttributeValue{"float": {N: aws.String("179769313486231570000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000")}}, + }, + { + input: map[string]interface{}{"num": 12.}, + expected: map[string]*dynamodb.AttributeValue{"num": {N: aws.String("12")}}, + }, + { + input: map[string]interface{}{"byte": []byte{48, 49}}, + expected: map[string]*dynamodb.AttributeValue{"byte": {B: []byte{48, 49}}}, + }, + { + input: struct{ Byte []byte }{Byte: []byte{48, 49}}, + expected: map[string]*dynamodb.AttributeValue{"Byte": {B: []byte{48, 49}}}, + }, + { + input: map[string]interface{}{"byte_set": [][]byte{{48, 49}, {50, 51}}}, + expected: map[string]*dynamodb.AttributeValue{"byte_set": {BS: [][]byte{{48, 49}, {50, 51}}}}, + }, + { + input: struct{ ByteSet [][]byte }{ByteSet: [][]byte{{48, 49}, {50, 51}}}, + expected: map[string]*dynamodb.AttributeValue{"ByteSet": {BS: [][]byte{{48, 49}, {50, 51}}}}, + }, + // List + { + input: map[string]interface{}{"list": []interface{}{"a string", 12., 3.14, true, nil, false}}, + expected: map[string]*dynamodb.AttributeValue{ + "list": { + L: []*dynamodb.AttributeValue{ + {S: aws.String("a string")}, + {N: aws.String("12")}, + {N: aws.String("3.14")}, + {BOOL: &trueValue}, + {NULL: &trueValue}, + {BOOL: &falseValue}, + }, + }, + }, + }, + // Map + { + input: map[string]interface{}{"map": map[string]interface{}{"nestednum": 12.}}, + expected: map[string]*dynamodb.AttributeValue{ + "map": { + M: map[string]*dynamodb.AttributeValue{ + "nestednum": { + N: aws.String("12"), + }, + }, + }, + }, + }, + // Structs + { + input: simpleMarshalStruct{}, + expected: map[string]*dynamodb.AttributeValue{ + "Byte": {NULL: &trueValue}, + "Bool": {BOOL: &falseValue}, + "Float32": {N: aws.String("0")}, + "Float64": {N: aws.String("0")}, + "Int": {N: aws.String("0")}, + "Null": {NULL: &trueValue}, + "String": {NULL: &trueValue}, + "Uint": {N: aws.String("0")}, + }, + }, + { + input: complexMarshalStruct{}, + expected: map[string]*dynamodb.AttributeValue{ + "Simple": {NULL: &trueValue}, + }, + }, + { + input: struct { + Simple []string `json:"simple"` + }{}, + expected: map[string]*dynamodb.AttributeValue{ + "simple": {NULL: &trueValue}, + }, + }, + { + input: struct { + Simple []string `json:"simple,omitempty"` + }{}, + expected: map[string]*dynamodb.AttributeValue{}, + }, + { + input: struct { + Simple []string `json:"-"` + }{}, + expected: map[string]*dynamodb.AttributeValue{}, + }, + { + input: complexMarshalStruct{Simple: []simpleMarshalStruct{{Int: -2}, {Uint: 5}}}, + expected: map[string]*dynamodb.AttributeValue{ + "Simple": { + L: []*dynamodb.AttributeValue{ + { + M: map[string]*dynamodb.AttributeValue{ + "Byte": {NULL: &trueValue}, + "Bool": {BOOL: &falseValue}, + "Float32": {N: aws.String("0")}, + "Float64": {N: aws.String("0")}, + "Int": {N: aws.String("-2")}, + "Null": {NULL: &trueValue}, + "String": {NULL: &trueValue}, + "Uint": {N: aws.String("0")}, + }, + }, + { + M: map[string]*dynamodb.AttributeValue{ + "Byte": {NULL: &trueValue}, + "Bool": {BOOL: &falseValue}, + "Float32": {N: aws.String("0")}, + "Float64": {N: aws.String("0")}, + "Int": {N: aws.String("0")}, + "Null": {NULL: &trueValue}, + "String": {NULL: &trueValue}, + "Uint": {N: aws.String("5")}, + }, + }, + }, + }, + }, + }, +} + +var marshallerListTestInputs = []marshallerTestInput{ + { + input: nil, + expected: []*dynamodb.AttributeValue{}, + }, + { + input: []interface{}{}, + expected: []*dynamodb.AttributeValue{}, + }, + { + input: []simpleMarshalStruct{}, + expected: []*dynamodb.AttributeValue{}, + }, + { + input: []interface{}{"a string", 12., 3.14, true, nil, false}, + expected: []*dynamodb.AttributeValue{ + {S: aws.String("a string")}, + {N: aws.String("12")}, + {N: aws.String("3.14")}, + {BOOL: &trueValue}, + {NULL: &trueValue}, + {BOOL: &falseValue}, + }, + }, + { + input: []simpleMarshalStruct{{}}, + expected: []*dynamodb.AttributeValue{ + { + M: map[string]*dynamodb.AttributeValue{ + "Byte": {NULL: &trueValue}, + "Bool": {BOOL: &falseValue}, + "Float32": {N: aws.String("0")}, + "Float64": {N: aws.String("0")}, + "Int": {N: aws.String("0")}, + "Null": {NULL: &trueValue}, + "String": {NULL: &trueValue}, + "Uint": {N: aws.String("0")}, + }, + }, + }, + }, +} + +func Test_New_Marshal(t *testing.T) { + for _, test := range marshalerScalarInputs { + testMarshal(t, test) + } +} + +func testMarshal(t *testing.T, test marshallerTestInput) { + actual, err := Marshal(test.input) + if test.err != nil { + if err == nil { + t.Errorf("Marshal with input %#v retured %#v, expected error `%s`", test.input, actual, test.err) + } else if err.Error() != test.err.Error() { + t.Errorf("Marshal with input %#v retured error `%s`, expected error `%s`", test.input, err, test.err) + } + } else { + if err != nil { + t.Errorf("Marshal with input %#v retured error `%s`", test.input, err) + } + compareObjects(t, test.expected, actual) + } +} + +func Test_New_Unmarshal(t *testing.T) { + // Using the same inputs from Marshal, test the reverse mapping. + for i, test := range marshalerScalarInputs { + if test.input == nil { + continue + } + actual := reflect.New(reflect.TypeOf(test.input)).Interface() + if err := Unmarshal(test.expected.(*dynamodb.AttributeValue), actual); err != nil { + t.Errorf("Unmarshal %d, with input %#v retured error `%s`", i+1, test.expected, err) + } + compareObjects(t, test.input, reflect.ValueOf(actual).Elem().Interface()) + } +} + +func Test_New_UnmarshalError(t *testing.T) { + // Test that we get an error using Unmarshal to convert to a nil value. + expected := &InvalidUnmarshalError{Type: reflect.TypeOf(nil)} + if err := Unmarshal(nil, nil); err == nil { + t.Errorf("Unmarshal with input %T returned no error, expected error `%v`", nil, expected) + } else if err.Error() != expected.Error() { + t.Errorf("Unmarshal with input %T returned error `%v`, expected error `%v`", nil, err, expected) + } + + // Test that we get an error using Unmarshal to convert to a non-pointer value. + var actual map[string]interface{} + expected = &InvalidUnmarshalError{Type: reflect.TypeOf(actual)} + if err := Unmarshal(nil, actual); err == nil { + t.Errorf("Unmarshal with input %T returned no error, expected error `%v`", actual, expected) + } else if err.Error() != expected.Error() { + t.Errorf("Unmarshal with input %T returned error `%v`, expected error `%v`", actual, err, expected) + } + + // Test that we get an error using Unmarshal to convert to nil struct. + var actual2 *struct{ A int } + expected = &InvalidUnmarshalError{Type: reflect.TypeOf(actual2)} + if err := Unmarshal(nil, actual2); err == nil { + t.Errorf("Unmarshal with input %T returned no error, expected error `%v`", actual2, expected) + } else if err.Error() != expected.Error() { + t.Errorf("Unmarshal with input %T returned error `%v`, expected error `%v`", actual2, err, expected) + } +} + +func Test_New_MarshalMap(t *testing.T) { + for _, test := range marshallerMapTestInputs { + testMarshalMap(t, test) + } +} + +func testMarshalMap(t *testing.T, test marshallerTestInput) { + actual, err := MarshalMap(test.input) + if test.err != nil { + if err == nil { + t.Errorf("MarshalMap with input %#v retured %#v, expected error `%s`", test.input, actual, test.err) + } else if err.Error() != test.err.Error() { + t.Errorf("MarshalMap with input %#v retured error `%s`, expected error `%s`", test.input, err, test.err) + } + } else { + if err != nil { + t.Errorf("MarshalMap with input %#v retured error `%s`", test.input, err) + } + compareObjects(t, test.expected, actual) + } +} + +func Test_New_UnmarshalMap(t *testing.T) { + // Using the same inputs from MarshalMap, test the reverse mapping. + for i, test := range marshallerMapTestInputs { + if test.input == nil { + continue + } + actual := reflect.New(reflect.TypeOf(test.input)).Interface() + if err := UnmarshalMap(test.expected.(map[string]*dynamodb.AttributeValue), actual); err != nil { + t.Errorf("Unmarshal %d, with input %#v retured error `%s`", i+1, test.expected, err) + } + compareObjects(t, test.input, reflect.ValueOf(actual).Elem().Interface()) + } +} + +func Test_New_UnmarshalMapError(t *testing.T) { + // Test that we get an error using UnmarshalMap to convert to a nil value. + expected := &InvalidUnmarshalError{Type: reflect.TypeOf(nil)} + if err := UnmarshalMap(nil, nil); err == nil { + t.Errorf("UnmarshalMap with input %T returned no error, expected error `%v`", nil, expected) + } else if err.Error() != expected.Error() { + t.Errorf("UnmarshalMap with input %T returned error `%v`, expected error `%v`", nil, err, expected) + } + + // Test that we get an error using UnmarshalMap to convert to a non-pointer value. + var actual map[string]interface{} + expected = &InvalidUnmarshalError{Type: reflect.TypeOf(actual)} + if err := UnmarshalMap(nil, actual); err == nil { + t.Errorf("UnmarshalMap with input %T returned no error, expected error `%v`", actual, expected) + } else if err.Error() != expected.Error() { + t.Errorf("UnmarshalMap with input %T returned error `%v`, expected error `%v`", actual, err, expected) + } + + // Test that we get an error using UnmarshalMap to convert to nil struct. + var actual2 *struct{ A int } + expected = &InvalidUnmarshalError{Type: reflect.TypeOf(actual2)} + if err := UnmarshalMap(nil, actual2); err == nil { + t.Errorf("UnmarshalMap with input %T returned no error, expected error `%v`", actual2, expected) + } else if err.Error() != expected.Error() { + t.Errorf("UnmarshalMap with input %T returned error `%v`, expected error `%v`", actual2, err, expected) + } +} + +func Test_New_MarshalList(t *testing.T) { + for _, test := range marshallerListTestInputs { + testMarshalList(t, test) + } +} + +func testMarshalList(t *testing.T, test marshallerTestInput) { + actual, err := MarshalList(test.input) + if test.err != nil { + if err == nil { + t.Errorf("MarshalList with input %#v retured %#v, expected error `%s`", test.input, actual, test.err) + } else if err.Error() != test.err.Error() { + t.Errorf("MarshalList with input %#v retured error `%s`, expected error `%s`", test.input, err, test.err) + } + } else { + if err != nil { + t.Errorf("MarshalList with input %#v retured error `%s`", test.input, err) + } + compareObjects(t, test.expected, actual) + } +} + +func Test_New_UnmarshalList(t *testing.T) { + // Using the same inputs from MarshalList, test the reverse mapping. + for i, test := range marshallerListTestInputs { + if test.input == nil { + continue + } + iv := reflect.ValueOf(test.input) + + actual := reflect.New(iv.Type()) + if iv.Kind() == reflect.Slice { + actual.Elem().Set(reflect.MakeSlice(iv.Type(), iv.Len(), iv.Cap())) + } + + if err := UnmarshalList(test.expected.([]*dynamodb.AttributeValue), actual.Interface()); err != nil { + t.Errorf("Unmarshal %d, with input %#v retured error `%s`", i+1, test.expected, err) + } + compareObjects(t, test.input, actual.Elem().Interface()) + } +} + +func Test_New_UnmarshalListError(t *testing.T) { + // Test that we get an error using UnmarshalList to convert to a nil value. + expected := &InvalidUnmarshalError{Type: reflect.TypeOf(nil)} + if err := UnmarshalList(nil, nil); err == nil { + t.Errorf("UnmarshalList with input %T returned no error, expected error `%v`", nil, expected) + } else if err.Error() != expected.Error() { + t.Errorf("UnmarshalList with input %T returned error `%v`, expected error `%v`", nil, err, expected) + } + + // Test that we get an error using UnmarshalList to convert to a non-pointer value. + var actual map[string]interface{} + expected = &InvalidUnmarshalError{Type: reflect.TypeOf(actual)} + if err := UnmarshalList(nil, actual); err == nil { + t.Errorf("UnmarshalList with input %T returned no error, expected error `%v`", actual, expected) + } else if err.Error() != expected.Error() { + t.Errorf("UnmarshalList with input %T returned error `%v`, expected error `%v`", actual, err, expected) + } + + // Test that we get an error using UnmarshalList to convert to nil struct. + var actual2 *struct{ A int } + expected = &InvalidUnmarshalError{Type: reflect.TypeOf(actual2)} + if err := UnmarshalList(nil, actual2); err == nil { + t.Errorf("UnmarshalList with input %T returned no error, expected error `%v`", actual2, expected) + } else if err.Error() != expected.Error() { + t.Errorf("UnmarshalList with input %T returned error `%v`, expected error `%v`", actual2, err, expected) + } +} + +func compareObjects(t *testing.T, expected interface{}, actual interface{}) { + if !reflect.DeepEqual(expected, actual) { + ev := reflect.ValueOf(expected) + av := reflect.ValueOf(actual) + t.Errorf("\nExpected kind(%s,%T):\n%s\nActual kind(%s,%T):\n%s\n", + ev.Kind(), + ev.Interface(), + awsutil.Prettify(expected), + av.Kind(), + ev.Interface(), + awsutil.Prettify(actual)) + } +} + +func BenchmarkMarshal(b *testing.B) { + d := simpleMarshalStruct{ + String: "abc", + Int: 123, + Uint: 123, + Float32: 123.321, + Float64: 123.321, + Bool: true, + Null: nil, + } + for i := 0; i < b.N; i++ { + _, err := Marshal(d) + if err != nil { + b.Fatal("unexpected error", err) + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute/shared_test.go b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute/shared_test.go new file mode 100644 index 000000000..3546ec30f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute/shared_test.go @@ -0,0 +1,389 @@ +package dynamodbattribute + +import ( + "reflect" + "testing" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/dynamodb" + "github.com/stretchr/testify/assert" +) + +type testBinarySetStruct struct { + Binarys [][]byte `dynamodbav:",binaryset"` +} +type testNumberSetStruct struct { + Numbers []int `dynamodbav:",numberset"` +} +type testStringSetStruct struct { + Strings []string `dynamodbav:",stringset"` +} + +type testIntAsStringStruct struct { + Value int `dynamodbav:",string"` +} + +type testOmitEmptyStruct struct { + Value string `dynamodbav:",omitempty"` + Value2 *string `dynamodbav:",omitempty"` + Value3 int +} + +type testAliasedString string +type testAliasedStringSlice []string +type testAliasedInt int +type testAliasedIntSlice []int +type testAliasedMap map[string]int +type testAliasedSlice []string +type testAliasedByteSlice []byte + +type testAliasedStruct struct { + Value testAliasedString + Value2 testAliasedInt + Value3 testAliasedMap + Value4 testAliasedSlice + + Value5 testAliasedByteSlice + Value6 []testAliasedInt + Value7 []testAliasedString + + Value8 []testAliasedByteSlice `dynamodbav:",binaryset"` + Value9 []testAliasedInt `dynamodbav:",numberset"` + Value10 []testAliasedString `dynamodbav:",stringset"` + + Value11 testAliasedIntSlice + Value12 testAliasedStringSlice +} + +type testNamedPointer *int + +var testDate, _ = time.Parse(time.RFC3339, "2016-05-03T17:06:26.209072Z") + +var sharedTestCases = []struct { + in *dynamodb.AttributeValue + actual, expected interface{} + err error +}{ + { // Binary slice + in: &dynamodb.AttributeValue{B: []byte{48, 49}}, + actual: &[]byte{}, + expected: []byte{48, 49}, + }, + { // Binary slice + in: &dynamodb.AttributeValue{B: []byte{48, 49}}, + actual: &[]byte{}, + expected: []byte{48, 49}, + }, + { // Binary slice oversized + in: &dynamodb.AttributeValue{B: []byte{48, 49}}, + actual: func() *[]byte { + v := make([]byte, 0, 10) + return &v + }(), + expected: []byte{48, 49}, + }, + { // Binary slice pointer + in: &dynamodb.AttributeValue{B: []byte{48, 49}}, + actual: func() **[]byte { + v := make([]byte, 0, 10) + v2 := &v + return &v2 + }(), + expected: []byte{48, 49}, + }, + { // Bool + in: &dynamodb.AttributeValue{BOOL: aws.Bool(true)}, + actual: new(bool), + expected: true, + }, + { // List + in: &dynamodb.AttributeValue{L: []*dynamodb.AttributeValue{ + {N: aws.String("123")}, + }}, + actual: &[]int{}, + expected: []int{123}, + }, + { // Map, interface + in: &dynamodb.AttributeValue{M: map[string]*dynamodb.AttributeValue{ + "abc": {N: aws.String("123")}, + }}, + actual: &map[string]int{}, + expected: map[string]int{"abc": 123}, + }, + { // Map, struct + in: &dynamodb.AttributeValue{M: map[string]*dynamodb.AttributeValue{ + "Abc": {N: aws.String("123")}, + }}, + actual: &struct{ Abc int }{}, + expected: struct{ Abc int }{Abc: 123}, + }, + { // Map, struct + in: &dynamodb.AttributeValue{M: map[string]*dynamodb.AttributeValue{ + "abc": {N: aws.String("123")}, + }}, + actual: &struct { + Abc int `json:"abc" dynamodbav:"abc"` + }{}, + expected: struct { + Abc int `json:"abc" dynamodbav:"abc"` + }{Abc: 123}, + }, + { // Number, int + in: &dynamodb.AttributeValue{N: aws.String("123")}, + actual: new(int), + expected: 123, + }, + { // Number, Float + in: &dynamodb.AttributeValue{N: aws.String("123.1")}, + actual: new(float64), + expected: float64(123.1), + }, + { // Null + in: &dynamodb.AttributeValue{NULL: aws.Bool(true)}, + actual: new(string), + expected: "", + }, + { // Null ptr + in: &dynamodb.AttributeValue{NULL: aws.Bool(true)}, + actual: new(*string), + expected: nil, + }, + { // String + in: &dynamodb.AttributeValue{S: aws.String("abc")}, + actual: new(string), + expected: "abc", + }, + { // Binary Set + in: &dynamodb.AttributeValue{ + M: map[string]*dynamodb.AttributeValue{ + "Binarys": {BS: [][]byte{{48, 49}, {50, 51}}}, + }, + }, + actual: &testBinarySetStruct{}, + expected: testBinarySetStruct{Binarys: [][]byte{{48, 49}, {50, 51}}}, + }, + { // Number Set + in: &dynamodb.AttributeValue{ + M: map[string]*dynamodb.AttributeValue{ + "Numbers": {NS: []*string{aws.String("123"), aws.String("321")}}, + }, + }, + actual: &testNumberSetStruct{}, + expected: testNumberSetStruct{Numbers: []int{123, 321}}, + }, + { // String Set + in: &dynamodb.AttributeValue{ + M: map[string]*dynamodb.AttributeValue{ + "Strings": {SS: []*string{aws.String("abc"), aws.String("efg")}}, + }, + }, + actual: &testStringSetStruct{}, + expected: testStringSetStruct{Strings: []string{"abc", "efg"}}, + }, + { // Int value as string + in: &dynamodb.AttributeValue{ + M: map[string]*dynamodb.AttributeValue{ + "Value": {S: aws.String("123")}, + }, + }, + actual: &testIntAsStringStruct{}, + expected: testIntAsStringStruct{Value: 123}, + }, + { // Omitempty + in: &dynamodb.AttributeValue{ + M: map[string]*dynamodb.AttributeValue{ + "Value3": {N: aws.String("0")}, + }, + }, + actual: &testOmitEmptyStruct{}, + expected: testOmitEmptyStruct{Value: "", Value2: nil, Value3: 0}, + }, + { // aliased type + in: &dynamodb.AttributeValue{ + M: map[string]*dynamodb.AttributeValue{ + "Value": {S: aws.String("123")}, + "Value2": {N: aws.String("123")}, + "Value3": {M: map[string]*dynamodb.AttributeValue{ + "Key": {N: aws.String("321")}, + }}, + "Value4": {L: []*dynamodb.AttributeValue{ + {S: aws.String("1")}, + {S: aws.String("2")}, + {S: aws.String("3")}, + }}, + "Value5": {B: []byte{0, 1, 2}}, + "Value6": {L: []*dynamodb.AttributeValue{ + {N: aws.String("1")}, + {N: aws.String("2")}, + {N: aws.String("3")}, + }}, + "Value7": {L: []*dynamodb.AttributeValue{ + {S: aws.String("1")}, + {S: aws.String("2")}, + {S: aws.String("3")}, + }}, + "Value8": {BS: [][]byte{ + {0, 1, 2}, {3, 4, 5}, + }}, + "Value9": {NS: []*string{ + aws.String("1"), + aws.String("2"), + aws.String("3"), + }}, + "Value10": {SS: []*string{ + aws.String("1"), + aws.String("2"), + aws.String("3"), + }}, + "Value11": {L: []*dynamodb.AttributeValue{ + {N: aws.String("1")}, + {N: aws.String("2")}, + {N: aws.String("3")}, + }}, + "Value12": {L: []*dynamodb.AttributeValue{ + {S: aws.String("1")}, + {S: aws.String("2")}, + {S: aws.String("3")}, + }}, + }, + }, + actual: &testAliasedStruct{}, + expected: testAliasedStruct{ + Value: "123", Value2: 123, + Value3: testAliasedMap{ + "Key": 321, + }, + Value4: testAliasedSlice{"1", "2", "3"}, + Value5: testAliasedByteSlice{0, 1, 2}, + Value6: []testAliasedInt{1, 2, 3}, + Value7: []testAliasedString{"1", "2", "3"}, + Value8: []testAliasedByteSlice{ + {0, 1, 2}, + {3, 4, 5}, + }, + Value9: []testAliasedInt{1, 2, 3}, + Value10: []testAliasedString{"1", "2", "3"}, + Value11: testAliasedIntSlice{1, 2, 3}, + Value12: testAliasedStringSlice{"1", "2", "3"}, + }, + }, + { + in: &dynamodb.AttributeValue{N: aws.String("123")}, + actual: new(testNamedPointer), + expected: testNamedPointer(aws.Int(123)), + }, + { // time.Time + in: &dynamodb.AttributeValue{S: aws.String("2016-05-03T17:06:26.209072Z")}, + actual: new(time.Time), + expected: testDate, + }, + { // time.Time List + in: &dynamodb.AttributeValue{L: []*dynamodb.AttributeValue{ + {S: aws.String("2016-05-03T17:06:26.209072Z")}, + {S: aws.String("2016-05-04T17:06:26.209072Z")}, + }}, + actual: new([]time.Time), + expected: []time.Time{testDate, testDate.Add(24 * time.Hour)}, + }, + { // time.Time struct + in: &dynamodb.AttributeValue{M: map[string]*dynamodb.AttributeValue{ + "abc": {S: aws.String("2016-05-03T17:06:26.209072Z")}, + }}, + actual: &struct { + Abc time.Time `json:"abc" dynamodbav:"abc"` + }{}, + expected: struct { + Abc time.Time `json:"abc" dynamodbav:"abc"` + }{Abc: testDate}, + }, + { // time.Time ptr struct + in: &dynamodb.AttributeValue{M: map[string]*dynamodb.AttributeValue{ + "abc": {S: aws.String("2016-05-03T17:06:26.209072Z")}, + }}, + actual: &struct { + Abc *time.Time `json:"abc" dynamodbav:"abc"` + }{}, + expected: struct { + Abc *time.Time `json:"abc" dynamodbav:"abc"` + }{Abc: &testDate}, + }, +} + +var sharedListTestCases = []struct { + in []*dynamodb.AttributeValue + actual, expected interface{} + err error +}{ + { + in: []*dynamodb.AttributeValue{ + {B: []byte{48, 49}}, + {BOOL: aws.Bool(true)}, + {N: aws.String("123")}, + {S: aws.String("123")}, + }, + actual: func() *[]interface{} { + v := []interface{}{} + return &v + }(), + expected: []interface{}{[]byte{48, 49}, true, 123., "123"}, + }, + { + in: []*dynamodb.AttributeValue{ + {N: aws.String("1")}, + {N: aws.String("2")}, + {N: aws.String("3")}, + }, + actual: &[]interface{}{}, + expected: []interface{}{1., 2., 3.}, + }, +} + +var sharedMapTestCases = []struct { + in map[string]*dynamodb.AttributeValue + actual, expected interface{} + err error +}{ + { + in: map[string]*dynamodb.AttributeValue{ + "B": {B: []byte{48, 49}}, + "BOOL": {BOOL: aws.Bool(true)}, + "N": {N: aws.String("123")}, + "S": {S: aws.String("123")}, + }, + actual: &map[string]interface{}{}, + expected: map[string]interface{}{ + "B": []byte{48, 49}, "BOOL": true, + "N": 123., "S": "123", + }, + }, +} + +func assertConvertTest(t *testing.T, i int, actual, expected interface{}, err, expectedErr error) { + i++ + if expectedErr != nil { + if err != nil { + assert.Equal(t, expectedErr, err, "case %d", i) + } else { + assert.Fail(t, "", "case %d, expected error, %v", i) + } + } else if err != nil { + assert.Fail(t, "", "case %d, expect no error, got %v", i, err) + } else { + assert.Equal(t, ptrToValue(expected), ptrToValue(actual), "case %d", i) + } +} + +func ptrToValue(in interface{}) interface{} { + v := reflect.ValueOf(in) + if v.Kind() == reflect.Ptr { + v = v.Elem() + } + if !v.IsValid() { + return nil + } + if v.Kind() == reflect.Ptr { + return ptrToValue(v.Interface()) + } + return v.Interface() +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute/tag.go b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute/tag.go new file mode 100644 index 000000000..0b63eb7d1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute/tag.go @@ -0,0 +1,65 @@ +package dynamodbattribute + +import ( + "reflect" + "strings" +) + +type tag struct { + Name string + Ignore bool + OmitEmpty bool + OmitEmptyElem bool + AsString bool + AsBinSet, AsNumSet, AsStrSet bool +} + +func (t *tag) parseAVTag(structTag reflect.StructTag) { + tagStr := structTag.Get("dynamodbav") + if len(tagStr) == 0 { + return + } + + t.parseTagStr(tagStr) +} + +func (t *tag) parseJSONTag(structTag reflect.StructTag) { + tagStr := structTag.Get("json") + if len(tagStr) == 0 { + return + } + + t.parseTagStr(tagStr) +} + +func (t *tag) parseTagStr(tagStr string) { + parts := strings.SplitN(tagStr, ",", 2) + if len(parts) == 0 { + return + } + + if name := parts[0]; name == "-" { + t.Name = "" + t.Ignore = true + } else { + t.Name = name + t.Ignore = false + } + + for _, opt := range parts[1:] { + switch opt { + case "omitempty": + t.OmitEmpty = true + case "omitemptyelem": + t.OmitEmptyElem = true + case "string": + t.AsString = true + case "binaryset": + t.AsBinSet = true + case "numberset": + t.AsNumSet = true + case "stringset": + t.AsStrSet = true + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute/tag_test.go b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute/tag_test.go new file mode 100644 index 000000000..09701036f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute/tag_test.go @@ -0,0 +1,45 @@ +package dynamodbattribute + +import ( + "reflect" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestTagParse(t *testing.T) { + cases := []struct { + in reflect.StructTag + json, av bool + expect tag + }{ + {`json:""`, true, false, tag{}}, + {`json:"name"`, true, false, tag{Name: "name"}}, + {`json:"name,omitempty"`, true, false, tag{Name: "name", OmitEmpty: true}}, + {`json:"-"`, true, false, tag{Ignore: true}}, + {`json:",omitempty"`, true, false, tag{OmitEmpty: true}}, + {`json:",string"`, true, false, tag{AsString: true}}, + {`dynamodbav:""`, false, true, tag{}}, + {`dynamodbav:","`, false, true, tag{}}, + {`dynamodbav:"name"`, false, true, tag{Name: "name"}}, + {`dynamodbav:"name"`, false, true, tag{Name: "name"}}, + {`dynamodbav:"-"`, false, true, tag{Ignore: true}}, + {`dynamodbav:",omitempty"`, false, true, tag{OmitEmpty: true}}, + {`dynamodbav:",omitemptyelem"`, false, true, tag{OmitEmptyElem: true}}, + {`dynamodbav:",string"`, false, true, tag{AsString: true}}, + {`dynamodbav:",binaryset"`, false, true, tag{AsBinSet: true}}, + {`dynamodbav:",numberset"`, false, true, tag{AsNumSet: true}}, + {`dynamodbav:",stringset"`, false, true, tag{AsStrSet: true}}, + } + + for i, c := range cases { + actual := tag{} + if c.json { + actual.parseJSONTag(c.in) + } + if c.av { + actual.parseAVTag(c.in) + } + assert.Equal(t, c.expect, actual, "case %d", i+1) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbiface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbiface/interface.go new file mode 100644 index 000000000..0b6df4ef5 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbiface/interface.go @@ -0,0 +1,78 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package dynamodbiface provides an interface for the Amazon DynamoDB. +package dynamodbiface + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/dynamodb" +) + +// DynamoDBAPI is the interface type for dynamodb.DynamoDB. +type DynamoDBAPI interface { + BatchGetItemRequest(*dynamodb.BatchGetItemInput) (*request.Request, *dynamodb.BatchGetItemOutput) + + BatchGetItem(*dynamodb.BatchGetItemInput) (*dynamodb.BatchGetItemOutput, error) + + BatchGetItemPages(*dynamodb.BatchGetItemInput, func(*dynamodb.BatchGetItemOutput, bool) bool) error + + BatchWriteItemRequest(*dynamodb.BatchWriteItemInput) (*request.Request, *dynamodb.BatchWriteItemOutput) + + BatchWriteItem(*dynamodb.BatchWriteItemInput) (*dynamodb.BatchWriteItemOutput, error) + + CreateTableRequest(*dynamodb.CreateTableInput) (*request.Request, *dynamodb.CreateTableOutput) + + CreateTable(*dynamodb.CreateTableInput) (*dynamodb.CreateTableOutput, error) + + DeleteItemRequest(*dynamodb.DeleteItemInput) (*request.Request, *dynamodb.DeleteItemOutput) + + DeleteItem(*dynamodb.DeleteItemInput) (*dynamodb.DeleteItemOutput, error) + + DeleteTableRequest(*dynamodb.DeleteTableInput) (*request.Request, *dynamodb.DeleteTableOutput) + + DeleteTable(*dynamodb.DeleteTableInput) (*dynamodb.DeleteTableOutput, error) + + DescribeLimitsRequest(*dynamodb.DescribeLimitsInput) (*request.Request, *dynamodb.DescribeLimitsOutput) + + DescribeLimits(*dynamodb.DescribeLimitsInput) (*dynamodb.DescribeLimitsOutput, error) + + DescribeTableRequest(*dynamodb.DescribeTableInput) (*request.Request, *dynamodb.DescribeTableOutput) + + DescribeTable(*dynamodb.DescribeTableInput) (*dynamodb.DescribeTableOutput, error) + + GetItemRequest(*dynamodb.GetItemInput) (*request.Request, *dynamodb.GetItemOutput) + + GetItem(*dynamodb.GetItemInput) (*dynamodb.GetItemOutput, error) + + ListTablesRequest(*dynamodb.ListTablesInput) (*request.Request, *dynamodb.ListTablesOutput) + + ListTables(*dynamodb.ListTablesInput) (*dynamodb.ListTablesOutput, error) + + ListTablesPages(*dynamodb.ListTablesInput, func(*dynamodb.ListTablesOutput, bool) bool) error + + PutItemRequest(*dynamodb.PutItemInput) (*request.Request, *dynamodb.PutItemOutput) + + PutItem(*dynamodb.PutItemInput) (*dynamodb.PutItemOutput, error) + + QueryRequest(*dynamodb.QueryInput) (*request.Request, *dynamodb.QueryOutput) + + Query(*dynamodb.QueryInput) (*dynamodb.QueryOutput, error) + + QueryPages(*dynamodb.QueryInput, func(*dynamodb.QueryOutput, bool) bool) error + + ScanRequest(*dynamodb.ScanInput) (*request.Request, *dynamodb.ScanOutput) + + Scan(*dynamodb.ScanInput) (*dynamodb.ScanOutput, error) + + ScanPages(*dynamodb.ScanInput, func(*dynamodb.ScanOutput, bool) bool) error + + UpdateItemRequest(*dynamodb.UpdateItemInput) (*request.Request, *dynamodb.UpdateItemOutput) + + UpdateItem(*dynamodb.UpdateItemInput) (*dynamodb.UpdateItemOutput, error) + + UpdateTableRequest(*dynamodb.UpdateTableInput) (*request.Request, *dynamodb.UpdateTableOutput) + + UpdateTable(*dynamodb.UpdateTableInput) (*dynamodb.UpdateTableOutput, error) +} + +var _ DynamoDBAPI = (*dynamodb.DynamoDB)(nil) diff --git a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/examples_test.go b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/examples_test.go new file mode 100644 index 000000000..060d1abdb --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/examples_test.go @@ -0,0 +1,1353 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package dynamodb_test + +import ( + "bytes" + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/dynamodb" +) + +var _ time.Duration +var _ bytes.Buffer + +func ExampleDynamoDB_BatchGetItem() { + svc := dynamodb.New(session.New()) + + params := &dynamodb.BatchGetItemInput{ + RequestItems: map[string]*dynamodb.KeysAndAttributes{ // Required + "Key": { // Required + Keys: []map[string]*dynamodb.AttributeValue{ // Required + { // Required + "Key": { // Required + B: []byte("PAYLOAD"), + BOOL: aws.Bool(true), + BS: [][]byte{ + []byte("PAYLOAD"), // Required + // More values... + }, + L: []*dynamodb.AttributeValue{ + { // Required + // Recursive values... + }, + // More values... + }, + M: map[string]*dynamodb.AttributeValue{ + "Key": { // Required + // Recursive values... + }, + // More values... + }, + N: aws.String("NumberAttributeValue"), + NS: []*string{ + aws.String("NumberAttributeValue"), // Required + // More values... + }, + NULL: aws.Bool(true), + S: aws.String("StringAttributeValue"), + SS: []*string{ + aws.String("StringAttributeValue"), // Required + // More values... + }, + }, + // More values... + }, + // More values... + }, + AttributesToGet: []*string{ + aws.String("AttributeName"), // Required + // More values... + }, + ConsistentRead: aws.Bool(true), + ExpressionAttributeNames: map[string]*string{ + "Key": aws.String("AttributeName"), // Required + // More values... + }, + ProjectionExpression: aws.String("ProjectionExpression"), + }, + // More values... + }, + ReturnConsumedCapacity: aws.String("ReturnConsumedCapacity"), + } + resp, err := svc.BatchGetItem(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDynamoDB_BatchWriteItem() { + svc := dynamodb.New(session.New()) + + params := &dynamodb.BatchWriteItemInput{ + RequestItems: map[string][]*dynamodb.WriteRequest{ // Required + "Key": { // Required + { // Required + DeleteRequest: &dynamodb.DeleteRequest{ + Key: map[string]*dynamodb.AttributeValue{ // Required + "Key": { // Required + B: []byte("PAYLOAD"), + BOOL: aws.Bool(true), + BS: [][]byte{ + []byte("PAYLOAD"), // Required + // More values... + }, + L: []*dynamodb.AttributeValue{ + { // Required + // Recursive values... + }, + // More values... + }, + M: map[string]*dynamodb.AttributeValue{ + "Key": { // Required + // Recursive values... + }, + // More values... + }, + N: aws.String("NumberAttributeValue"), + NS: []*string{ + aws.String("NumberAttributeValue"), // Required + // More values... + }, + NULL: aws.Bool(true), + S: aws.String("StringAttributeValue"), + SS: []*string{ + aws.String("StringAttributeValue"), // Required + // More values... + }, + }, + // More values... + }, + }, + PutRequest: &dynamodb.PutRequest{ + Item: map[string]*dynamodb.AttributeValue{ // Required + "Key": { // Required + B: []byte("PAYLOAD"), + BOOL: aws.Bool(true), + BS: [][]byte{ + []byte("PAYLOAD"), // Required + // More values... + }, + L: []*dynamodb.AttributeValue{ + { // Required + // Recursive values... + }, + // More values... + }, + M: map[string]*dynamodb.AttributeValue{ + "Key": { // Required + // Recursive values... + }, + // More values... + }, + N: aws.String("NumberAttributeValue"), + NS: []*string{ + aws.String("NumberAttributeValue"), // Required + // More values... + }, + NULL: aws.Bool(true), + S: aws.String("StringAttributeValue"), + SS: []*string{ + aws.String("StringAttributeValue"), // Required + // More values... + }, + }, + // More values... + }, + }, + }, + // More values... + }, + // More values... + }, + ReturnConsumedCapacity: aws.String("ReturnConsumedCapacity"), + ReturnItemCollectionMetrics: aws.String("ReturnItemCollectionMetrics"), + } + resp, err := svc.BatchWriteItem(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDynamoDB_CreateTable() { + svc := dynamodb.New(session.New()) + + params := &dynamodb.CreateTableInput{ + AttributeDefinitions: []*dynamodb.AttributeDefinition{ // Required + { // Required + AttributeName: aws.String("KeySchemaAttributeName"), // Required + AttributeType: aws.String("ScalarAttributeType"), // Required + }, + // More values... + }, + KeySchema: []*dynamodb.KeySchemaElement{ // Required + { // Required + AttributeName: aws.String("KeySchemaAttributeName"), // Required + KeyType: aws.String("KeyType"), // Required + }, + // More values... + }, + ProvisionedThroughput: &dynamodb.ProvisionedThroughput{ // Required + ReadCapacityUnits: aws.Int64(1), // Required + WriteCapacityUnits: aws.Int64(1), // Required + }, + TableName: aws.String("TableName"), // Required + GlobalSecondaryIndexes: []*dynamodb.GlobalSecondaryIndex{ + { // Required + IndexName: aws.String("IndexName"), // Required + KeySchema: []*dynamodb.KeySchemaElement{ // Required + { // Required + AttributeName: aws.String("KeySchemaAttributeName"), // Required + KeyType: aws.String("KeyType"), // Required + }, + // More values... + }, + Projection: &dynamodb.Projection{ // Required + NonKeyAttributes: []*string{ + aws.String("NonKeyAttributeName"), // Required + // More values... + }, + ProjectionType: aws.String("ProjectionType"), + }, + ProvisionedThroughput: &dynamodb.ProvisionedThroughput{ // Required + ReadCapacityUnits: aws.Int64(1), // Required + WriteCapacityUnits: aws.Int64(1), // Required + }, + }, + // More values... + }, + LocalSecondaryIndexes: []*dynamodb.LocalSecondaryIndex{ + { // Required + IndexName: aws.String("IndexName"), // Required + KeySchema: []*dynamodb.KeySchemaElement{ // Required + { // Required + AttributeName: aws.String("KeySchemaAttributeName"), // Required + KeyType: aws.String("KeyType"), // Required + }, + // More values... + }, + Projection: &dynamodb.Projection{ // Required + NonKeyAttributes: []*string{ + aws.String("NonKeyAttributeName"), // Required + // More values... + }, + ProjectionType: aws.String("ProjectionType"), + }, + }, + // More values... + }, + StreamSpecification: &dynamodb.StreamSpecification{ + StreamEnabled: aws.Bool(true), + StreamViewType: aws.String("StreamViewType"), + }, + } + resp, err := svc.CreateTable(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDynamoDB_DeleteItem() { + svc := dynamodb.New(session.New()) + + params := &dynamodb.DeleteItemInput{ + Key: map[string]*dynamodb.AttributeValue{ // Required + "Key": { // Required + B: []byte("PAYLOAD"), + BOOL: aws.Bool(true), + BS: [][]byte{ + []byte("PAYLOAD"), // Required + // More values... + }, + L: []*dynamodb.AttributeValue{ + { // Required + // Recursive values... + }, + // More values... + }, + M: map[string]*dynamodb.AttributeValue{ + "Key": { // Required + // Recursive values... + }, + // More values... + }, + N: aws.String("NumberAttributeValue"), + NS: []*string{ + aws.String("NumberAttributeValue"), // Required + // More values... + }, + NULL: aws.Bool(true), + S: aws.String("StringAttributeValue"), + SS: []*string{ + aws.String("StringAttributeValue"), // Required + // More values... + }, + }, + // More values... + }, + TableName: aws.String("TableName"), // Required + ConditionExpression: aws.String("ConditionExpression"), + ConditionalOperator: aws.String("ConditionalOperator"), + Expected: map[string]*dynamodb.ExpectedAttributeValue{ + "Key": { // Required + AttributeValueList: []*dynamodb.AttributeValue{ + { // Required + B: []byte("PAYLOAD"), + BOOL: aws.Bool(true), + BS: [][]byte{ + []byte("PAYLOAD"), // Required + // More values... + }, + L: []*dynamodb.AttributeValue{ + { // Required + // Recursive values... + }, + // More values... + }, + M: map[string]*dynamodb.AttributeValue{ + "Key": { // Required + // Recursive values... + }, + // More values... + }, + N: aws.String("NumberAttributeValue"), + NS: []*string{ + aws.String("NumberAttributeValue"), // Required + // More values... + }, + NULL: aws.Bool(true), + S: aws.String("StringAttributeValue"), + SS: []*string{ + aws.String("StringAttributeValue"), // Required + // More values... + }, + }, + // More values... + }, + ComparisonOperator: aws.String("ComparisonOperator"), + Exists: aws.Bool(true), + Value: &dynamodb.AttributeValue{ + B: []byte("PAYLOAD"), + BOOL: aws.Bool(true), + BS: [][]byte{ + []byte("PAYLOAD"), // Required + // More values... + }, + L: []*dynamodb.AttributeValue{ + { // Required + // Recursive values... + }, + // More values... + }, + M: map[string]*dynamodb.AttributeValue{ + "Key": { // Required + // Recursive values... + }, + // More values... + }, + N: aws.String("NumberAttributeValue"), + NS: []*string{ + aws.String("NumberAttributeValue"), // Required + // More values... + }, + NULL: aws.Bool(true), + S: aws.String("StringAttributeValue"), + SS: []*string{ + aws.String("StringAttributeValue"), // Required + // More values... + }, + }, + }, + // More values... + }, + ExpressionAttributeNames: map[string]*string{ + "Key": aws.String("AttributeName"), // Required + // More values... + }, + ExpressionAttributeValues: map[string]*dynamodb.AttributeValue{ + "Key": { // Required + B: []byte("PAYLOAD"), + BOOL: aws.Bool(true), + BS: [][]byte{ + []byte("PAYLOAD"), // Required + // More values... + }, + L: []*dynamodb.AttributeValue{ + { // Required + // Recursive values... + }, + // More values... + }, + M: map[string]*dynamodb.AttributeValue{ + "Key": { // Required + // Recursive values... + }, + // More values... + }, + N: aws.String("NumberAttributeValue"), + NS: []*string{ + aws.String("NumberAttributeValue"), // Required + // More values... + }, + NULL: aws.Bool(true), + S: aws.String("StringAttributeValue"), + SS: []*string{ + aws.String("StringAttributeValue"), // Required + // More values... + }, + }, + // More values... + }, + ReturnConsumedCapacity: aws.String("ReturnConsumedCapacity"), + ReturnItemCollectionMetrics: aws.String("ReturnItemCollectionMetrics"), + ReturnValues: aws.String("ReturnValue"), + } + resp, err := svc.DeleteItem(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDynamoDB_DeleteTable() { + svc := dynamodb.New(session.New()) + + params := &dynamodb.DeleteTableInput{ + TableName: aws.String("TableName"), // Required + } + resp, err := svc.DeleteTable(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDynamoDB_DescribeLimits() { + svc := dynamodb.New(session.New()) + + var params *dynamodb.DescribeLimitsInput + resp, err := svc.DescribeLimits(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDynamoDB_DescribeTable() { + svc := dynamodb.New(session.New()) + + params := &dynamodb.DescribeTableInput{ + TableName: aws.String("TableName"), // Required + } + resp, err := svc.DescribeTable(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDynamoDB_GetItem() { + svc := dynamodb.New(session.New()) + + params := &dynamodb.GetItemInput{ + Key: map[string]*dynamodb.AttributeValue{ // Required + "Key": { // Required + B: []byte("PAYLOAD"), + BOOL: aws.Bool(true), + BS: [][]byte{ + []byte("PAYLOAD"), // Required + // More values... + }, + L: []*dynamodb.AttributeValue{ + { // Required + // Recursive values... + }, + // More values... + }, + M: map[string]*dynamodb.AttributeValue{ + "Key": { // Required + // Recursive values... + }, + // More values... + }, + N: aws.String("NumberAttributeValue"), + NS: []*string{ + aws.String("NumberAttributeValue"), // Required + // More values... + }, + NULL: aws.Bool(true), + S: aws.String("StringAttributeValue"), + SS: []*string{ + aws.String("StringAttributeValue"), // Required + // More values... + }, + }, + // More values... + }, + TableName: aws.String("TableName"), // Required + AttributesToGet: []*string{ + aws.String("AttributeName"), // Required + // More values... + }, + ConsistentRead: aws.Bool(true), + ExpressionAttributeNames: map[string]*string{ + "Key": aws.String("AttributeName"), // Required + // More values... + }, + ProjectionExpression: aws.String("ProjectionExpression"), + ReturnConsumedCapacity: aws.String("ReturnConsumedCapacity"), + } + resp, err := svc.GetItem(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDynamoDB_ListTables() { + svc := dynamodb.New(session.New()) + + params := &dynamodb.ListTablesInput{ + ExclusiveStartTableName: aws.String("TableName"), + Limit: aws.Int64(1), + } + resp, err := svc.ListTables(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDynamoDB_PutItem() { + svc := dynamodb.New(session.New()) + + params := &dynamodb.PutItemInput{ + Item: map[string]*dynamodb.AttributeValue{ // Required + "Key": { // Required + B: []byte("PAYLOAD"), + BOOL: aws.Bool(true), + BS: [][]byte{ + []byte("PAYLOAD"), // Required + // More values... + }, + L: []*dynamodb.AttributeValue{ + { // Required + // Recursive values... + }, + // More values... + }, + M: map[string]*dynamodb.AttributeValue{ + "Key": { // Required + // Recursive values... + }, + // More values... + }, + N: aws.String("NumberAttributeValue"), + NS: []*string{ + aws.String("NumberAttributeValue"), // Required + // More values... + }, + NULL: aws.Bool(true), + S: aws.String("StringAttributeValue"), + SS: []*string{ + aws.String("StringAttributeValue"), // Required + // More values... + }, + }, + // More values... + }, + TableName: aws.String("TableName"), // Required + ConditionExpression: aws.String("ConditionExpression"), + ConditionalOperator: aws.String("ConditionalOperator"), + Expected: map[string]*dynamodb.ExpectedAttributeValue{ + "Key": { // Required + AttributeValueList: []*dynamodb.AttributeValue{ + { // Required + B: []byte("PAYLOAD"), + BOOL: aws.Bool(true), + BS: [][]byte{ + []byte("PAYLOAD"), // Required + // More values... + }, + L: []*dynamodb.AttributeValue{ + { // Required + // Recursive values... + }, + // More values... + }, + M: map[string]*dynamodb.AttributeValue{ + "Key": { // Required + // Recursive values... + }, + // More values... + }, + N: aws.String("NumberAttributeValue"), + NS: []*string{ + aws.String("NumberAttributeValue"), // Required + // More values... + }, + NULL: aws.Bool(true), + S: aws.String("StringAttributeValue"), + SS: []*string{ + aws.String("StringAttributeValue"), // Required + // More values... + }, + }, + // More values... + }, + ComparisonOperator: aws.String("ComparisonOperator"), + Exists: aws.Bool(true), + Value: &dynamodb.AttributeValue{ + B: []byte("PAYLOAD"), + BOOL: aws.Bool(true), + BS: [][]byte{ + []byte("PAYLOAD"), // Required + // More values... + }, + L: []*dynamodb.AttributeValue{ + { // Required + // Recursive values... + }, + // More values... + }, + M: map[string]*dynamodb.AttributeValue{ + "Key": { // Required + // Recursive values... + }, + // More values... + }, + N: aws.String("NumberAttributeValue"), + NS: []*string{ + aws.String("NumberAttributeValue"), // Required + // More values... + }, + NULL: aws.Bool(true), + S: aws.String("StringAttributeValue"), + SS: []*string{ + aws.String("StringAttributeValue"), // Required + // More values... + }, + }, + }, + // More values... + }, + ExpressionAttributeNames: map[string]*string{ + "Key": aws.String("AttributeName"), // Required + // More values... + }, + ExpressionAttributeValues: map[string]*dynamodb.AttributeValue{ + "Key": { // Required + B: []byte("PAYLOAD"), + BOOL: aws.Bool(true), + BS: [][]byte{ + []byte("PAYLOAD"), // Required + // More values... + }, + L: []*dynamodb.AttributeValue{ + { // Required + // Recursive values... + }, + // More values... + }, + M: map[string]*dynamodb.AttributeValue{ + "Key": { // Required + // Recursive values... + }, + // More values... + }, + N: aws.String("NumberAttributeValue"), + NS: []*string{ + aws.String("NumberAttributeValue"), // Required + // More values... + }, + NULL: aws.Bool(true), + S: aws.String("StringAttributeValue"), + SS: []*string{ + aws.String("StringAttributeValue"), // Required + // More values... + }, + }, + // More values... + }, + ReturnConsumedCapacity: aws.String("ReturnConsumedCapacity"), + ReturnItemCollectionMetrics: aws.String("ReturnItemCollectionMetrics"), + ReturnValues: aws.String("ReturnValue"), + } + resp, err := svc.PutItem(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDynamoDB_Query() { + svc := dynamodb.New(session.New()) + + params := &dynamodb.QueryInput{ + TableName: aws.String("TableName"), // Required + AttributesToGet: []*string{ + aws.String("AttributeName"), // Required + // More values... + }, + ConditionalOperator: aws.String("ConditionalOperator"), + ConsistentRead: aws.Bool(true), + ExclusiveStartKey: map[string]*dynamodb.AttributeValue{ + "Key": { // Required + B: []byte("PAYLOAD"), + BOOL: aws.Bool(true), + BS: [][]byte{ + []byte("PAYLOAD"), // Required + // More values... + }, + L: []*dynamodb.AttributeValue{ + { // Required + // Recursive values... + }, + // More values... + }, + M: map[string]*dynamodb.AttributeValue{ + "Key": { // Required + // Recursive values... + }, + // More values... + }, + N: aws.String("NumberAttributeValue"), + NS: []*string{ + aws.String("NumberAttributeValue"), // Required + // More values... + }, + NULL: aws.Bool(true), + S: aws.String("StringAttributeValue"), + SS: []*string{ + aws.String("StringAttributeValue"), // Required + // More values... + }, + }, + // More values... + }, + ExpressionAttributeNames: map[string]*string{ + "Key": aws.String("AttributeName"), // Required + // More values... + }, + ExpressionAttributeValues: map[string]*dynamodb.AttributeValue{ + "Key": { // Required + B: []byte("PAYLOAD"), + BOOL: aws.Bool(true), + BS: [][]byte{ + []byte("PAYLOAD"), // Required + // More values... + }, + L: []*dynamodb.AttributeValue{ + { // Required + // Recursive values... + }, + // More values... + }, + M: map[string]*dynamodb.AttributeValue{ + "Key": { // Required + // Recursive values... + }, + // More values... + }, + N: aws.String("NumberAttributeValue"), + NS: []*string{ + aws.String("NumberAttributeValue"), // Required + // More values... + }, + NULL: aws.Bool(true), + S: aws.String("StringAttributeValue"), + SS: []*string{ + aws.String("StringAttributeValue"), // Required + // More values... + }, + }, + // More values... + }, + FilterExpression: aws.String("ConditionExpression"), + IndexName: aws.String("IndexName"), + KeyConditionExpression: aws.String("KeyExpression"), + KeyConditions: map[string]*dynamodb.Condition{ + "Key": { // Required + ComparisonOperator: aws.String("ComparisonOperator"), // Required + AttributeValueList: []*dynamodb.AttributeValue{ + { // Required + B: []byte("PAYLOAD"), + BOOL: aws.Bool(true), + BS: [][]byte{ + []byte("PAYLOAD"), // Required + // More values... + }, + L: []*dynamodb.AttributeValue{ + { // Required + // Recursive values... + }, + // More values... + }, + M: map[string]*dynamodb.AttributeValue{ + "Key": { // Required + // Recursive values... + }, + // More values... + }, + N: aws.String("NumberAttributeValue"), + NS: []*string{ + aws.String("NumberAttributeValue"), // Required + // More values... + }, + NULL: aws.Bool(true), + S: aws.String("StringAttributeValue"), + SS: []*string{ + aws.String("StringAttributeValue"), // Required + // More values... + }, + }, + // More values... + }, + }, + // More values... + }, + Limit: aws.Int64(1), + ProjectionExpression: aws.String("ProjectionExpression"), + QueryFilter: map[string]*dynamodb.Condition{ + "Key": { // Required + ComparisonOperator: aws.String("ComparisonOperator"), // Required + AttributeValueList: []*dynamodb.AttributeValue{ + { // Required + B: []byte("PAYLOAD"), + BOOL: aws.Bool(true), + BS: [][]byte{ + []byte("PAYLOAD"), // Required + // More values... + }, + L: []*dynamodb.AttributeValue{ + { // Required + // Recursive values... + }, + // More values... + }, + M: map[string]*dynamodb.AttributeValue{ + "Key": { // Required + // Recursive values... + }, + // More values... + }, + N: aws.String("NumberAttributeValue"), + NS: []*string{ + aws.String("NumberAttributeValue"), // Required + // More values... + }, + NULL: aws.Bool(true), + S: aws.String("StringAttributeValue"), + SS: []*string{ + aws.String("StringAttributeValue"), // Required + // More values... + }, + }, + // More values... + }, + }, + // More values... + }, + ReturnConsumedCapacity: aws.String("ReturnConsumedCapacity"), + ScanIndexForward: aws.Bool(true), + Select: aws.String("Select"), + } + resp, err := svc.Query(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDynamoDB_Scan() { + svc := dynamodb.New(session.New()) + + params := &dynamodb.ScanInput{ + TableName: aws.String("TableName"), // Required + AttributesToGet: []*string{ + aws.String("AttributeName"), // Required + // More values... + }, + ConditionalOperator: aws.String("ConditionalOperator"), + ConsistentRead: aws.Bool(true), + ExclusiveStartKey: map[string]*dynamodb.AttributeValue{ + "Key": { // Required + B: []byte("PAYLOAD"), + BOOL: aws.Bool(true), + BS: [][]byte{ + []byte("PAYLOAD"), // Required + // More values... + }, + L: []*dynamodb.AttributeValue{ + { // Required + // Recursive values... + }, + // More values... + }, + M: map[string]*dynamodb.AttributeValue{ + "Key": { // Required + // Recursive values... + }, + // More values... + }, + N: aws.String("NumberAttributeValue"), + NS: []*string{ + aws.String("NumberAttributeValue"), // Required + // More values... + }, + NULL: aws.Bool(true), + S: aws.String("StringAttributeValue"), + SS: []*string{ + aws.String("StringAttributeValue"), // Required + // More values... + }, + }, + // More values... + }, + ExpressionAttributeNames: map[string]*string{ + "Key": aws.String("AttributeName"), // Required + // More values... + }, + ExpressionAttributeValues: map[string]*dynamodb.AttributeValue{ + "Key": { // Required + B: []byte("PAYLOAD"), + BOOL: aws.Bool(true), + BS: [][]byte{ + []byte("PAYLOAD"), // Required + // More values... + }, + L: []*dynamodb.AttributeValue{ + { // Required + // Recursive values... + }, + // More values... + }, + M: map[string]*dynamodb.AttributeValue{ + "Key": { // Required + // Recursive values... + }, + // More values... + }, + N: aws.String("NumberAttributeValue"), + NS: []*string{ + aws.String("NumberAttributeValue"), // Required + // More values... + }, + NULL: aws.Bool(true), + S: aws.String("StringAttributeValue"), + SS: []*string{ + aws.String("StringAttributeValue"), // Required + // More values... + }, + }, + // More values... + }, + FilterExpression: aws.String("ConditionExpression"), + IndexName: aws.String("IndexName"), + Limit: aws.Int64(1), + ProjectionExpression: aws.String("ProjectionExpression"), + ReturnConsumedCapacity: aws.String("ReturnConsumedCapacity"), + ScanFilter: map[string]*dynamodb.Condition{ + "Key": { // Required + ComparisonOperator: aws.String("ComparisonOperator"), // Required + AttributeValueList: []*dynamodb.AttributeValue{ + { // Required + B: []byte("PAYLOAD"), + BOOL: aws.Bool(true), + BS: [][]byte{ + []byte("PAYLOAD"), // Required + // More values... + }, + L: []*dynamodb.AttributeValue{ + { // Required + // Recursive values... + }, + // More values... + }, + M: map[string]*dynamodb.AttributeValue{ + "Key": { // Required + // Recursive values... + }, + // More values... + }, + N: aws.String("NumberAttributeValue"), + NS: []*string{ + aws.String("NumberAttributeValue"), // Required + // More values... + }, + NULL: aws.Bool(true), + S: aws.String("StringAttributeValue"), + SS: []*string{ + aws.String("StringAttributeValue"), // Required + // More values... + }, + }, + // More values... + }, + }, + // More values... + }, + Segment: aws.Int64(1), + Select: aws.String("Select"), + TotalSegments: aws.Int64(1), + } + resp, err := svc.Scan(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDynamoDB_UpdateItem() { + svc := dynamodb.New(session.New()) + + params := &dynamodb.UpdateItemInput{ + Key: map[string]*dynamodb.AttributeValue{ // Required + "Key": { // Required + B: []byte("PAYLOAD"), + BOOL: aws.Bool(true), + BS: [][]byte{ + []byte("PAYLOAD"), // Required + // More values... + }, + L: []*dynamodb.AttributeValue{ + { // Required + // Recursive values... + }, + // More values... + }, + M: map[string]*dynamodb.AttributeValue{ + "Key": { // Required + // Recursive values... + }, + // More values... + }, + N: aws.String("NumberAttributeValue"), + NS: []*string{ + aws.String("NumberAttributeValue"), // Required + // More values... + }, + NULL: aws.Bool(true), + S: aws.String("StringAttributeValue"), + SS: []*string{ + aws.String("StringAttributeValue"), // Required + // More values... + }, + }, + // More values... + }, + TableName: aws.String("TableName"), // Required + AttributeUpdates: map[string]*dynamodb.AttributeValueUpdate{ + "Key": { // Required + Action: aws.String("AttributeAction"), + Value: &dynamodb.AttributeValue{ + B: []byte("PAYLOAD"), + BOOL: aws.Bool(true), + BS: [][]byte{ + []byte("PAYLOAD"), // Required + // More values... + }, + L: []*dynamodb.AttributeValue{ + { // Required + // Recursive values... + }, + // More values... + }, + M: map[string]*dynamodb.AttributeValue{ + "Key": { // Required + // Recursive values... + }, + // More values... + }, + N: aws.String("NumberAttributeValue"), + NS: []*string{ + aws.String("NumberAttributeValue"), // Required + // More values... + }, + NULL: aws.Bool(true), + S: aws.String("StringAttributeValue"), + SS: []*string{ + aws.String("StringAttributeValue"), // Required + // More values... + }, + }, + }, + // More values... + }, + ConditionExpression: aws.String("ConditionExpression"), + ConditionalOperator: aws.String("ConditionalOperator"), + Expected: map[string]*dynamodb.ExpectedAttributeValue{ + "Key": { // Required + AttributeValueList: []*dynamodb.AttributeValue{ + { // Required + B: []byte("PAYLOAD"), + BOOL: aws.Bool(true), + BS: [][]byte{ + []byte("PAYLOAD"), // Required + // More values... + }, + L: []*dynamodb.AttributeValue{ + { // Required + // Recursive values... + }, + // More values... + }, + M: map[string]*dynamodb.AttributeValue{ + "Key": { // Required + // Recursive values... + }, + // More values... + }, + N: aws.String("NumberAttributeValue"), + NS: []*string{ + aws.String("NumberAttributeValue"), // Required + // More values... + }, + NULL: aws.Bool(true), + S: aws.String("StringAttributeValue"), + SS: []*string{ + aws.String("StringAttributeValue"), // Required + // More values... + }, + }, + // More values... + }, + ComparisonOperator: aws.String("ComparisonOperator"), + Exists: aws.Bool(true), + Value: &dynamodb.AttributeValue{ + B: []byte("PAYLOAD"), + BOOL: aws.Bool(true), + BS: [][]byte{ + []byte("PAYLOAD"), // Required + // More values... + }, + L: []*dynamodb.AttributeValue{ + { // Required + // Recursive values... + }, + // More values... + }, + M: map[string]*dynamodb.AttributeValue{ + "Key": { // Required + // Recursive values... + }, + // More values... + }, + N: aws.String("NumberAttributeValue"), + NS: []*string{ + aws.String("NumberAttributeValue"), // Required + // More values... + }, + NULL: aws.Bool(true), + S: aws.String("StringAttributeValue"), + SS: []*string{ + aws.String("StringAttributeValue"), // Required + // More values... + }, + }, + }, + // More values... + }, + ExpressionAttributeNames: map[string]*string{ + "Key": aws.String("AttributeName"), // Required + // More values... + }, + ExpressionAttributeValues: map[string]*dynamodb.AttributeValue{ + "Key": { // Required + B: []byte("PAYLOAD"), + BOOL: aws.Bool(true), + BS: [][]byte{ + []byte("PAYLOAD"), // Required + // More values... + }, + L: []*dynamodb.AttributeValue{ + { // Required + // Recursive values... + }, + // More values... + }, + M: map[string]*dynamodb.AttributeValue{ + "Key": { // Required + // Recursive values... + }, + // More values... + }, + N: aws.String("NumberAttributeValue"), + NS: []*string{ + aws.String("NumberAttributeValue"), // Required + // More values... + }, + NULL: aws.Bool(true), + S: aws.String("StringAttributeValue"), + SS: []*string{ + aws.String("StringAttributeValue"), // Required + // More values... + }, + }, + // More values... + }, + ReturnConsumedCapacity: aws.String("ReturnConsumedCapacity"), + ReturnItemCollectionMetrics: aws.String("ReturnItemCollectionMetrics"), + ReturnValues: aws.String("ReturnValue"), + UpdateExpression: aws.String("UpdateExpression"), + } + resp, err := svc.UpdateItem(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDynamoDB_UpdateTable() { + svc := dynamodb.New(session.New()) + + params := &dynamodb.UpdateTableInput{ + TableName: aws.String("TableName"), // Required + AttributeDefinitions: []*dynamodb.AttributeDefinition{ + { // Required + AttributeName: aws.String("KeySchemaAttributeName"), // Required + AttributeType: aws.String("ScalarAttributeType"), // Required + }, + // More values... + }, + GlobalSecondaryIndexUpdates: []*dynamodb.GlobalSecondaryIndexUpdate{ + { // Required + Create: &dynamodb.CreateGlobalSecondaryIndexAction{ + IndexName: aws.String("IndexName"), // Required + KeySchema: []*dynamodb.KeySchemaElement{ // Required + { // Required + AttributeName: aws.String("KeySchemaAttributeName"), // Required + KeyType: aws.String("KeyType"), // Required + }, + // More values... + }, + Projection: &dynamodb.Projection{ // Required + NonKeyAttributes: []*string{ + aws.String("NonKeyAttributeName"), // Required + // More values... + }, + ProjectionType: aws.String("ProjectionType"), + }, + ProvisionedThroughput: &dynamodb.ProvisionedThroughput{ // Required + ReadCapacityUnits: aws.Int64(1), // Required + WriteCapacityUnits: aws.Int64(1), // Required + }, + }, + Delete: &dynamodb.DeleteGlobalSecondaryIndexAction{ + IndexName: aws.String("IndexName"), // Required + }, + Update: &dynamodb.UpdateGlobalSecondaryIndexAction{ + IndexName: aws.String("IndexName"), // Required + ProvisionedThroughput: &dynamodb.ProvisionedThroughput{ // Required + ReadCapacityUnits: aws.Int64(1), // Required + WriteCapacityUnits: aws.Int64(1), // Required + }, + }, + }, + // More values... + }, + ProvisionedThroughput: &dynamodb.ProvisionedThroughput{ + ReadCapacityUnits: aws.Int64(1), // Required + WriteCapacityUnits: aws.Int64(1), // Required + }, + StreamSpecification: &dynamodb.StreamSpecification{ + StreamEnabled: aws.Bool(true), + StreamViewType: aws.String("StreamViewType"), + }, + } + resp, err := svc.UpdateTable(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/service.go b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/service.go new file mode 100644 index 000000000..2af4076f8 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/service.go @@ -0,0 +1,202 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package dynamodb + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" +) + +// This is the Amazon DynamoDB API Reference. This guide provides descriptions +// of the low-level DynamoDB API. +// +// This guide is intended for use with the following DynamoDB documentation: +// +// Amazon DynamoDB Getting Started Guide (http://docs.aws.amazon.com/amazondynamodb/latest/gettingstartedguide/) +// - provides hands-on exercises that help you learn the basics of working with +// DynamoDB. If you are new to DynamoDB, we recommend that you begin with the +// Getting Started Guide. +// +// Amazon DynamoDB Developer Guide (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/) +// - contains detailed information about DynamoDB concepts, usage, and best +// practices. +// +// Amazon DynamoDB Streams API Reference (http://docs.aws.amazon.com/dynamodbstreams/latest/APIReference/) +// - provides descriptions and samples of the DynamoDB Streams API. (For more +// information, see Capturing Table Activity with DynamoDB Streams (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Streams.html) +// in the Amazon DynamoDB Developer Guide.) +// +// Instead of making the requests to the low-level DynamoDB API directly +// from your application, we recommend that you use the AWS Software Development +// Kits (SDKs). The easy-to-use libraries in the AWS SDKs make it unnecessary +// to call the low-level DynamoDB API directly from your application. The libraries +// take care of request authentication, serialization, and connection management. +// For more information, see Using the AWS SDKs with DynamoDB (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/UsingAWSSDK.html) +// in the Amazon DynamoDB Developer Guide. +// +// If you decide to code against the low-level DynamoDB API directly, you will +// need to write the necessary code to authenticate your requests. For more +// information on signing your requests, see Using the DynamoDB API (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/API.html) +// in the Amazon DynamoDB Developer Guide. +// +// The following are short descriptions of each low-level API action, organized +// by function. +// +// Managing Tables +// +// CreateTable - Creates a table with user-specified provisioned throughput +// settings. You must define a primary key for the table - either a simple primary +// key (partition key), or a composite primary key (partition key and sort key). +// Optionally, you can create one or more secondary indexes, which provide fast +// data access using non-key attributes. +// +// DescribeTable - Returns metadata for a table, such as table size, status, +// and index information. +// +// UpdateTable - Modifies the provisioned throughput settings for a table. +// Optionally, you can modify the provisioned throughput settings for global +// secondary indexes on the table. +// +// ListTables - Returns a list of all tables associated with the current +// AWS account and endpoint. +// +// DeleteTable - Deletes a table and all of its indexes. +// +// For conceptual information about managing tables, see Working with Tables +// (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/WorkingWithTables.html) +// in the Amazon DynamoDB Developer Guide. +// +// Reading Data +// +// GetItem - Returns a set of attributes for the item that has a given primary +// key. By default, GetItem performs an eventually consistent read; however, +// applications can request a strongly consistent read instead. +// +// BatchGetItem - Performs multiple GetItem requests for data items using +// their primary keys, from one table or multiple tables. The response from +// BatchGetItem has a size limit of 16 MB and returns a maximum of 100 items. +// Both eventually consistent and strongly consistent reads can be used. +// +// Query - Returns one or more items from a table or a secondary index. +// You must provide a specific value for the partition key. You can narrow the +// scope of the query using comparison operators against a sort key value, or +// on the index key. Query supports either eventual or strong consistency. A +// single response has a size limit of 1 MB. +// +// Scan - Reads every item in a table; the result set is eventually consistent. +// You can limit the number of items returned by filtering the data attributes, +// using conditional expressions. Scan can be used to enable ad-hoc querying +// of a table against non-key attributes; however, since this is a full table +// scan without using an index, Scan should not be used for any application +// query use case that requires predictable performance. +// +// For conceptual information about reading data, see Working with Items +// (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/WorkingWithItems.html) +// and Query and Scan Operations (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/QueryAndScan.html) +// in the Amazon DynamoDB Developer Guide. +// +// Modifying Data +// +// PutItem - Creates a new item, or replaces an existing item with a new +// item (including all the attributes). By default, if an item in the table +// already exists with the same primary key, the new item completely replaces +// the existing item. You can use conditional operators to replace an item only +// if its attribute values match certain conditions, or to insert a new item +// only if that item doesn't already exist. +// +// UpdateItem - Modifies the attributes of an existing item. You can also +// use conditional operators to perform an update only if the item's attribute +// values match certain conditions. +// +// DeleteItem - Deletes an item in a table by primary key. You can use conditional +// operators to perform a delete an item only if the item's attribute values +// match certain conditions. +// +// BatchWriteItem - Performs multiple PutItem and DeleteItem requests across +// multiple tables in a single request. A failure of any request(s) in the batch +// will not cause the entire BatchWriteItem operation to fail. Supports batches +// of up to 25 items to put or delete, with a maximum total request size of +// 16 MB. +// +// For conceptual information about modifying data, see Working with Items +// (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/WorkingWithItems.html) +// and Query and Scan Operations (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/QueryAndScan.html) +// in the Amazon DynamoDB Developer Guide. +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type DynamoDB struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "dynamodb" + +// New creates a new instance of the DynamoDB client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a DynamoDB client from just a session. +// svc := dynamodb.New(mySession) +// +// // Create a DynamoDB client with additional configuration +// svc := dynamodb.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *DynamoDB { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *DynamoDB { + svc := &DynamoDB{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2012-08-10", + JSONVersion: "1.0", + TargetPrefix: "DynamoDB_20120810", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(jsonrpc.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a DynamoDB operation and runs any +// custom request initialization. +func (c *DynamoDB) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/waiters.go b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/waiters.go new file mode 100644 index 000000000..4deeed7a5 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/waiters.go @@ -0,0 +1,59 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package dynamodb + +import ( + "github.com/aws/aws-sdk-go/private/waiter" +) + +func (c *DynamoDB) WaitUntilTableExists(input *DescribeTableInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeTable", + Delay: 20, + MaxAttempts: 25, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "path", + Argument: "Table.TableStatus", + Expected: "ACTIVE", + }, + { + State: "retry", + Matcher: "error", + Argument: "", + Expected: "ResourceNotFoundException", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *DynamoDB) WaitUntilTableNotExists(input *DescribeTableInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeTable", + Delay: 20, + MaxAttempts: 25, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "error", + Argument: "", + Expected: "ResourceNotFoundException", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/dynamodbstreams/api.go b/vendor/github.com/aws/aws-sdk-go/service/dynamodbstreams/api.go new file mode 100644 index 000000000..3219905d5 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/dynamodbstreams/api.go @@ -0,0 +1,838 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package dynamodbstreams provides a client for Amazon DynamoDB Streams. +package dynamodbstreams + +import ( + "time" + + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/dynamodb" +) + +const opDescribeStream = "DescribeStream" + +// DescribeStreamRequest generates a "aws/request.Request" representing the +// client's request for the DescribeStream operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeStream method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeStreamRequest method. +// req, resp := client.DescribeStreamRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *DynamoDBStreams) DescribeStreamRequest(input *DescribeStreamInput) (req *request.Request, output *DescribeStreamOutput) { + op := &request.Operation{ + Name: opDescribeStream, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeStreamInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeStreamOutput{} + req.Data = output + return +} + +// Returns information about a stream, including the current status of the stream, +// its Amazon Resource Name (ARN), the composition of its shards, and its corresponding +// DynamoDB table. +// +// You can call DescribeStream at a maximum rate of 10 times per second. +// +// Each shard in the stream has a SequenceNumberRange associated with it. +// If the SequenceNumberRange has a StartingSequenceNumber but no EndingSequenceNumber, +// then the shard is still open (able to receive more stream records). If both +// StartingSequenceNumber and EndingSequenceNumber are present, then that shard +// is closed and can no longer receive more data. +func (c *DynamoDBStreams) DescribeStream(input *DescribeStreamInput) (*DescribeStreamOutput, error) { + req, out := c.DescribeStreamRequest(input) + err := req.Send() + return out, err +} + +const opGetRecords = "GetRecords" + +// GetRecordsRequest generates a "aws/request.Request" representing the +// client's request for the GetRecords operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetRecords method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetRecordsRequest method. +// req, resp := client.GetRecordsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *DynamoDBStreams) GetRecordsRequest(input *GetRecordsInput) (req *request.Request, output *GetRecordsOutput) { + op := &request.Operation{ + Name: opGetRecords, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetRecordsInput{} + } + + req = c.newRequest(op, input, output) + output = &GetRecordsOutput{} + req.Data = output + return +} + +// Retrieves the stream records from a given shard. +// +// Specify a shard iterator using the ShardIterator parameter. The shard iterator +// specifies the position in the shard from which you want to start reading +// stream records sequentially. If there are no stream records available in +// the portion of the shard that the iterator points to, GetRecords returns +// an empty list. Note that it might take multiple calls to get to a portion +// of the shard that contains stream records. +// +// GetRecords can retrieve a maximum of 1 MB of data or 1000 stream records, +// whichever comes first. +func (c *DynamoDBStreams) GetRecords(input *GetRecordsInput) (*GetRecordsOutput, error) { + req, out := c.GetRecordsRequest(input) + err := req.Send() + return out, err +} + +const opGetShardIterator = "GetShardIterator" + +// GetShardIteratorRequest generates a "aws/request.Request" representing the +// client's request for the GetShardIterator operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetShardIterator method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetShardIteratorRequest method. +// req, resp := client.GetShardIteratorRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *DynamoDBStreams) GetShardIteratorRequest(input *GetShardIteratorInput) (req *request.Request, output *GetShardIteratorOutput) { + op := &request.Operation{ + Name: opGetShardIterator, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetShardIteratorInput{} + } + + req = c.newRequest(op, input, output) + output = &GetShardIteratorOutput{} + req.Data = output + return +} + +// Returns a shard iterator. A shard iterator provides information about how +// to retrieve the stream records from within a shard. Use the shard iterator +// in a subsequent GetRecords request to read the stream records from the shard. +// +// A shard iterator expires 15 minutes after it is returned to the requester. +func (c *DynamoDBStreams) GetShardIterator(input *GetShardIteratorInput) (*GetShardIteratorOutput, error) { + req, out := c.GetShardIteratorRequest(input) + err := req.Send() + return out, err +} + +const opListStreams = "ListStreams" + +// ListStreamsRequest generates a "aws/request.Request" representing the +// client's request for the ListStreams operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListStreams method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListStreamsRequest method. +// req, resp := client.ListStreamsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *DynamoDBStreams) ListStreamsRequest(input *ListStreamsInput) (req *request.Request, output *ListStreamsOutput) { + op := &request.Operation{ + Name: opListStreams, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListStreamsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListStreamsOutput{} + req.Data = output + return +} + +// Returns an array of stream ARNs associated with the current account and endpoint. +// If the TableName parameter is present, then ListStreams will return only +// the streams ARNs for that table. +// +// You can call ListStreams at a maximum rate of 5 times per second. +func (c *DynamoDBStreams) ListStreams(input *ListStreamsInput) (*ListStreamsOutput, error) { + req, out := c.ListStreamsRequest(input) + err := req.Send() + return out, err +} + +// Represents the input of a DescribeStream operation. +type DescribeStreamInput struct { + _ struct{} `type:"structure"` + + // The shard ID of the first item that this operation will evaluate. Use the + // value that was returned for LastEvaluatedShardId in the previous operation. + ExclusiveStartShardId *string `min:"28" type:"string"` + + // The maximum number of shard objects to return. The upper limit is 100. + Limit *int64 `min:"1" type:"integer"` + + // The Amazon Resource Name (ARN) for the stream. + StreamArn *string `min:"37" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeStreamInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeStreamInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeStreamInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeStreamInput"} + if s.ExclusiveStartShardId != nil && len(*s.ExclusiveStartShardId) < 28 { + invalidParams.Add(request.NewErrParamMinLen("ExclusiveStartShardId", 28)) + } + if s.Limit != nil && *s.Limit < 1 { + invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) + } + if s.StreamArn == nil { + invalidParams.Add(request.NewErrParamRequired("StreamArn")) + } + if s.StreamArn != nil && len(*s.StreamArn) < 37 { + invalidParams.Add(request.NewErrParamMinLen("StreamArn", 37)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the output of a DescribeStream operation. +type DescribeStreamOutput struct { + _ struct{} `type:"structure"` + + // A complete description of the stream, including its creation date and time, + // the DynamoDB table associated with the stream, the shard IDs within the stream, + // and the beginning and ending sequence numbers of stream records within the + // shards. + StreamDescription *StreamDescription `type:"structure"` +} + +// String returns the string representation +func (s DescribeStreamOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeStreamOutput) GoString() string { + return s.String() +} + +// Represents the input of a GetRecords operation. +type GetRecordsInput struct { + _ struct{} `type:"structure"` + + // The maximum number of records to return from the shard. The upper limit is + // 1000. + Limit *int64 `min:"1" type:"integer"` + + // A shard iterator that was retrieved from a previous GetShardIterator operation. + // This iterator can be used to access the stream records in this shard. + ShardIterator *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetRecordsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetRecordsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetRecordsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetRecordsInput"} + if s.Limit != nil && *s.Limit < 1 { + invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) + } + if s.ShardIterator == nil { + invalidParams.Add(request.NewErrParamRequired("ShardIterator")) + } + if s.ShardIterator != nil && len(*s.ShardIterator) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ShardIterator", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the output of a GetRecords operation. +type GetRecordsOutput struct { + _ struct{} `type:"structure"` + + // The next position in the shard from which to start sequentially reading stream + // records. If set to null, the shard has been closed and the requested iterator + // will not return any more data. + NextShardIterator *string `min:"1" type:"string"` + + // The stream records from the shard, which were retrieved using the shard iterator. + Records []*Record `type:"list"` +} + +// String returns the string representation +func (s GetRecordsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetRecordsOutput) GoString() string { + return s.String() +} + +// Represents the input of a GetShardIterator operation. +type GetShardIteratorInput struct { + _ struct{} `type:"structure"` + + // The sequence number of a stream record in the shard from which to start reading. + SequenceNumber *string `min:"21" type:"string"` + + // The identifier of the shard. The iterator will be returned for this shard + // ID. + ShardId *string `min:"28" type:"string" required:"true"` + + // Determines how the shard iterator is used to start reading stream records + // from the shard: + // + // AT_SEQUENCE_NUMBER - Start reading exactly from the position denoted + // by a specific sequence number. + // + // AFTER_SEQUENCE_NUMBER - Start reading right after the position denoted + // by a specific sequence number. + // + // TRIM_HORIZON - Start reading at the last (untrimmed) stream record, which + // is the oldest record in the shard. In DynamoDB Streams, there is a 24 hour + // limit on data retention. Stream records whose age exceeds this limit are + // subject to removal (trimming) from the stream. + // + // LATEST - Start reading just after the most recent stream record in the + // shard, so that you always read the most recent data in the shard. + ShardIteratorType *string `type:"string" required:"true" enum:"ShardIteratorType"` + + // The Amazon Resource Name (ARN) for the stream. + StreamArn *string `min:"37" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetShardIteratorInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetShardIteratorInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetShardIteratorInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetShardIteratorInput"} + if s.SequenceNumber != nil && len(*s.SequenceNumber) < 21 { + invalidParams.Add(request.NewErrParamMinLen("SequenceNumber", 21)) + } + if s.ShardId == nil { + invalidParams.Add(request.NewErrParamRequired("ShardId")) + } + if s.ShardId != nil && len(*s.ShardId) < 28 { + invalidParams.Add(request.NewErrParamMinLen("ShardId", 28)) + } + if s.ShardIteratorType == nil { + invalidParams.Add(request.NewErrParamRequired("ShardIteratorType")) + } + if s.StreamArn == nil { + invalidParams.Add(request.NewErrParamRequired("StreamArn")) + } + if s.StreamArn != nil && len(*s.StreamArn) < 37 { + invalidParams.Add(request.NewErrParamMinLen("StreamArn", 37)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the output of a GetShardIterator operation. +type GetShardIteratorOutput struct { + _ struct{} `type:"structure"` + + // The position in the shard from which to start reading stream records sequentially. + // A shard iterator specifies this position using the sequence number of a stream + // record in a shard. + ShardIterator *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s GetShardIteratorOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetShardIteratorOutput) GoString() string { + return s.String() +} + +// Represents the input of a ListStreams operation. +type ListStreamsInput struct { + _ struct{} `type:"structure"` + + // The ARN (Amazon Resource Name) of the first item that this operation will + // evaluate. Use the value that was returned for LastEvaluatedStreamArn in the + // previous operation. + ExclusiveStartStreamArn *string `min:"37" type:"string"` + + // The maximum number of streams to return. The upper limit is 100. + Limit *int64 `min:"1" type:"integer"` + + // If this parameter is provided, then only the streams associated with this + // table name are returned. + TableName *string `min:"3" type:"string"` +} + +// String returns the string representation +func (s ListStreamsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListStreamsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListStreamsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListStreamsInput"} + if s.ExclusiveStartStreamArn != nil && len(*s.ExclusiveStartStreamArn) < 37 { + invalidParams.Add(request.NewErrParamMinLen("ExclusiveStartStreamArn", 37)) + } + if s.Limit != nil && *s.Limit < 1 { + invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) + } + if s.TableName != nil && len(*s.TableName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("TableName", 3)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the output of a ListStreams operation. +type ListStreamsOutput struct { + _ struct{} `type:"structure"` + + // The stream ARN of the item where the operation stopped, inclusive of the + // previous result set. Use this value to start a new operation, excluding this + // value in the new request. + // + // If LastEvaluatedStreamArn is empty, then the "last page" of results has + // been processed and there is no more data to be retrieved. + // + // If LastEvaluatedStreamArn is not empty, it does not necessarily mean that + // there is more data in the result set. The only way to know when you have + // reached the end of the result set is when LastEvaluatedStreamArn is empty. + LastEvaluatedStreamArn *string `min:"37" type:"string"` + + // A list of stream descriptors associated with the current account and endpoint. + Streams []*Stream `type:"list"` +} + +// String returns the string representation +func (s ListStreamsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListStreamsOutput) GoString() string { + return s.String() +} + +// A description of a unique event within a stream. +type Record struct { + _ struct{} `type:"structure"` + + // The region in which the GetRecords request was received. + AwsRegion *string `locationName:"awsRegion" type:"string"` + + // The main body of the stream record, containing all of the DynamoDB-specific + // fields. + Dynamodb *StreamRecord `locationName:"dynamodb" type:"structure"` + + // A globally unique identifier for the event that was recorded in this stream + // record. + EventID *string `locationName:"eventID" type:"string"` + + // The type of data modification that was performed on the DynamoDB table: + // + // INSERT - a new item was added to the table. + // + // MODIFY - one or more of an existing item's attributes were modified. + // + // REMOVE - the item was deleted from the table + EventName *string `locationName:"eventName" type:"string" enum:"OperationType"` + + // The AWS service from which the stream record originated. For DynamoDB Streams, + // this is aws:dynamodb. + EventSource *string `locationName:"eventSource" type:"string"` + + // The version number of the stream record format. This number is updated whenever + // the structure of Record is modified. + // + // Client applications must not assume that eventVersion will remain at a particular + // value, as this number is subject to change at any time. In general, eventVersion + // will only increase as the low-level DynamoDB Streams API evolves. + EventVersion *string `locationName:"eventVersion" type:"string"` +} + +// String returns the string representation +func (s Record) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Record) GoString() string { + return s.String() +} + +// The beginning and ending sequence numbers for the stream records contained +// within a shard. +type SequenceNumberRange struct { + _ struct{} `type:"structure"` + + // The last sequence number. + EndingSequenceNumber *string `min:"21" type:"string"` + + // The first sequence number. + StartingSequenceNumber *string `min:"21" type:"string"` +} + +// String returns the string representation +func (s SequenceNumberRange) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SequenceNumberRange) GoString() string { + return s.String() +} + +// A uniquely identified group of stream records within a stream. +type Shard struct { + _ struct{} `type:"structure"` + + // The shard ID of the current shard's parent. + ParentShardId *string `min:"28" type:"string"` + + // The range of possible sequence numbers for the shard. + SequenceNumberRange *SequenceNumberRange `type:"structure"` + + // The system-generated identifier for this shard. + ShardId *string `min:"28" type:"string"` +} + +// String returns the string representation +func (s Shard) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Shard) GoString() string { + return s.String() +} + +// Represents all of the data describing a particular stream. +type Stream struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) for the stream. + StreamArn *string `min:"37" type:"string"` + + // A timestamp, in ISO 8601 format, for this stream. + // + // Note that LatestStreamLabel is not a unique identifier for the stream, because + // it is possible that a stream from another table might have the same timestamp. + // However, the combination of the following three elements is guaranteed to + // be unique: + // + // the AWS customer ID. + // + // the table name + // + // the StreamLabel + StreamLabel *string `type:"string"` + + // The DynamoDB table with which the stream is associated. + TableName *string `min:"3" type:"string"` +} + +// String returns the string representation +func (s Stream) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Stream) GoString() string { + return s.String() +} + +// Represents all of the data describing a particular stream. +type StreamDescription struct { + _ struct{} `type:"structure"` + + // The date and time when the request to create this stream was issued. + CreationRequestDateTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The key attribute(s) of the stream's DynamoDB table. + KeySchema []*dynamodb.KeySchemaElement `min:"1" type:"list"` + + // The shard ID of the item where the operation stopped, inclusive of the previous + // result set. Use this value to start a new operation, excluding this value + // in the new request. + // + // If LastEvaluatedShardId is empty, then the "last page" of results has been + // processed and there is currently no more data to be retrieved. + // + // If LastEvaluatedShardId is not empty, it does not necessarily mean that + // there is more data in the result set. The only way to know when you have + // reached the end of the result set is when LastEvaluatedShardId is empty. + LastEvaluatedShardId *string `min:"28" type:"string"` + + // The shards that comprise the stream. + Shards []*Shard `type:"list"` + + // The Amazon Resource Name (ARN) for the stream. + StreamArn *string `min:"37" type:"string"` + + // A timestamp, in ISO 8601 format, for this stream. + // + // Note that LatestStreamLabel is not a unique identifier for the stream, because + // it is possible that a stream from another table might have the same timestamp. + // However, the combination of the following three elements is guaranteed to + // be unique: + // + // the AWS customer ID. + // + // the table name + // + // the StreamLabel + StreamLabel *string `type:"string"` + + // Indicates the current status of the stream: + // + // ENABLING - Streams is currently being enabled on the DynamoDB table. + // + // ENABLED - the stream is enabled. + // + // DISABLING - Streams is currently being disabled on the DynamoDB table. + // + // DISABLED - the stream is disabled. + StreamStatus *string `type:"string" enum:"StreamStatus"` + + // Indicates the format of the records within this stream: + // + // KEYS_ONLY - only the key attributes of items that were modified in the + // DynamoDB table. + // + // NEW_IMAGE - entire items from the table, as they appeared after they + // were modified. + // + // OLD_IMAGE - entire items from the table, as they appeared before they + // were modified. + // + // NEW_AND_OLD_IMAGES - both the new and the old images of the items from + // the table. + StreamViewType *string `type:"string" enum:"StreamViewType"` + + // The DynamoDB table with which the stream is associated. + TableName *string `min:"3" type:"string"` +} + +// String returns the string representation +func (s StreamDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StreamDescription) GoString() string { + return s.String() +} + +// A description of a single data modification that was performed on an item +// in a DynamoDB table. +type StreamRecord struct { + _ struct{} `type:"structure"` + + // The approximate date and time when the stream record was created, in UNIX + // epoch time (http://www.epochconverter.com/) format. + ApproximateCreationDateTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The primary key attribute(s) for the DynamoDB item that was modified. + Keys map[string]*dynamodb.AttributeValue `type:"map"` + + // The item in the DynamoDB table as it appeared after it was modified. + NewImage map[string]*dynamodb.AttributeValue `type:"map"` + + // The item in the DynamoDB table as it appeared before it was modified. + OldImage map[string]*dynamodb.AttributeValue `type:"map"` + + // The sequence number of the stream record. + SequenceNumber *string `min:"21" type:"string"` + + // The size of the stream record, in bytes. + SizeBytes *int64 `min:"1" type:"long"` + + // The type of data from the modified DynamoDB item that was captured in this + // stream record: + // + // KEYS_ONLY - only the key attributes of the modified item. + // + // NEW_IMAGE - the entire item, as it appeared after it was modified. + // + // OLD_IMAGE - the entire item, as it appeared before it was modified. + // + // NEW_AND_OLD_IMAGES - both the new and the old item images of the item. + StreamViewType *string `type:"string" enum:"StreamViewType"` +} + +// String returns the string representation +func (s StreamRecord) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StreamRecord) GoString() string { + return s.String() +} + +const ( + // @enum KeyType + KeyTypeHash = "HASH" + // @enum KeyType + KeyTypeRange = "RANGE" +) + +const ( + // @enum OperationType + OperationTypeInsert = "INSERT" + // @enum OperationType + OperationTypeModify = "MODIFY" + // @enum OperationType + OperationTypeRemove = "REMOVE" +) + +const ( + // @enum ShardIteratorType + ShardIteratorTypeTrimHorizon = "TRIM_HORIZON" + // @enum ShardIteratorType + ShardIteratorTypeLatest = "LATEST" + // @enum ShardIteratorType + ShardIteratorTypeAtSequenceNumber = "AT_SEQUENCE_NUMBER" + // @enum ShardIteratorType + ShardIteratorTypeAfterSequenceNumber = "AFTER_SEQUENCE_NUMBER" +) + +const ( + // @enum StreamStatus + StreamStatusEnabling = "ENABLING" + // @enum StreamStatus + StreamStatusEnabled = "ENABLED" + // @enum StreamStatus + StreamStatusDisabling = "DISABLING" + // @enum StreamStatus + StreamStatusDisabled = "DISABLED" +) + +const ( + // @enum StreamViewType + StreamViewTypeNewImage = "NEW_IMAGE" + // @enum StreamViewType + StreamViewTypeOldImage = "OLD_IMAGE" + // @enum StreamViewType + StreamViewTypeNewAndOldImages = "NEW_AND_OLD_IMAGES" + // @enum StreamViewType + StreamViewTypeKeysOnly = "KEYS_ONLY" +) diff --git a/vendor/github.com/aws/aws-sdk-go/service/dynamodbstreams/dynamodbstreamsiface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/dynamodbstreams/dynamodbstreamsiface/interface.go new file mode 100644 index 000000000..6283cee18 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/dynamodbstreams/dynamodbstreamsiface/interface.go @@ -0,0 +1,30 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package dynamodbstreamsiface provides an interface for the Amazon DynamoDB Streams. +package dynamodbstreamsiface + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/dynamodbstreams" +) + +// DynamoDBStreamsAPI is the interface type for dynamodbstreams.DynamoDBStreams. +type DynamoDBStreamsAPI interface { + DescribeStreamRequest(*dynamodbstreams.DescribeStreamInput) (*request.Request, *dynamodbstreams.DescribeStreamOutput) + + DescribeStream(*dynamodbstreams.DescribeStreamInput) (*dynamodbstreams.DescribeStreamOutput, error) + + GetRecordsRequest(*dynamodbstreams.GetRecordsInput) (*request.Request, *dynamodbstreams.GetRecordsOutput) + + GetRecords(*dynamodbstreams.GetRecordsInput) (*dynamodbstreams.GetRecordsOutput, error) + + GetShardIteratorRequest(*dynamodbstreams.GetShardIteratorInput) (*request.Request, *dynamodbstreams.GetShardIteratorOutput) + + GetShardIterator(*dynamodbstreams.GetShardIteratorInput) (*dynamodbstreams.GetShardIteratorOutput, error) + + ListStreamsRequest(*dynamodbstreams.ListStreamsInput) (*request.Request, *dynamodbstreams.ListStreamsOutput) + + ListStreams(*dynamodbstreams.ListStreamsInput) (*dynamodbstreams.ListStreamsOutput, error) +} + +var _ DynamoDBStreamsAPI = (*dynamodbstreams.DynamoDBStreams)(nil) diff --git a/vendor/github.com/aws/aws-sdk-go/service/dynamodbstreams/examples_test.go b/vendor/github.com/aws/aws-sdk-go/service/dynamodbstreams/examples_test.go new file mode 100644 index 000000000..73c4ef2e2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/dynamodbstreams/examples_test.go @@ -0,0 +1,100 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package dynamodbstreams_test + +import ( + "bytes" + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/dynamodbstreams" +) + +var _ time.Duration +var _ bytes.Buffer + +func ExampleDynamoDBStreams_DescribeStream() { + svc := dynamodbstreams.New(session.New()) + + params := &dynamodbstreams.DescribeStreamInput{ + StreamArn: aws.String("StreamArn"), // Required + ExclusiveStartShardId: aws.String("ShardId"), + Limit: aws.Int64(1), + } + resp, err := svc.DescribeStream(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDynamoDBStreams_GetRecords() { + svc := dynamodbstreams.New(session.New()) + + params := &dynamodbstreams.GetRecordsInput{ + ShardIterator: aws.String("ShardIterator"), // Required + Limit: aws.Int64(1), + } + resp, err := svc.GetRecords(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDynamoDBStreams_GetShardIterator() { + svc := dynamodbstreams.New(session.New()) + + params := &dynamodbstreams.GetShardIteratorInput{ + ShardId: aws.String("ShardId"), // Required + ShardIteratorType: aws.String("ShardIteratorType"), // Required + StreamArn: aws.String("StreamArn"), // Required + SequenceNumber: aws.String("SequenceNumber"), + } + resp, err := svc.GetShardIterator(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDynamoDBStreams_ListStreams() { + svc := dynamodbstreams.New(session.New()) + + params := &dynamodbstreams.ListStreamsInput{ + ExclusiveStartStreamArn: aws.String("StreamArn"), + Limit: aws.Int64(1), + TableName: aws.String("TableName"), + } + resp, err := svc.ListStreams(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/dynamodbstreams/service.go b/vendor/github.com/aws/aws-sdk-go/service/dynamodbstreams/service.go new file mode 100644 index 000000000..c8aaeee19 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/dynamodbstreams/service.go @@ -0,0 +1,105 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package dynamodbstreams + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" +) + +// Amazon DynamoDB Streams provides API actions for accessing streams and processing +// stream records. To learn more about application development with Streams, +// see Capturing Table Activity with DynamoDB Streams (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Streams.html) +// in the Amazon DynamoDB Developer Guide. +// +// The following are short descriptions of each low-level DynamoDB Streams +// action: +// +// DescribeStream - Returns detailed information about a particular stream. +// +// GetRecords - Retrieves the stream records from within a shard. +// +// GetShardIterator - Returns information on how to retrieve the streams +// record from a shard with a given shard ID. +// +// ListStreams - Returns a list of all the streams associated with the current +// AWS account and endpoint. +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type DynamoDBStreams struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "streams.dynamodb" + +// New creates a new instance of the DynamoDBStreams client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a DynamoDBStreams client from just a session. +// svc := dynamodbstreams.New(mySession) +// +// // Create a DynamoDBStreams client with additional configuration +// svc := dynamodbstreams.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *DynamoDBStreams { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *DynamoDBStreams { + svc := &DynamoDBStreams{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningName: "dynamodb", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2012-08-10", + JSONVersion: "1.0", + TargetPrefix: "DynamoDBStreams_20120810", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(jsonrpc.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a DynamoDBStreams operation and runs any +// custom request initialization. +func (c *DynamoDBStreams) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/ec2/api.go b/vendor/github.com/aws/aws-sdk-go/service/ec2/api.go new file mode 100644 index 000000000..170405fa1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/ec2/api.go @@ -0,0 +1,33259 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package ec2 provides a client for Amazon Elastic Compute Cloud. +package ec2 + +import ( + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/ec2query" +) + +const opAcceptVpcPeeringConnection = "AcceptVpcPeeringConnection" + +// AcceptVpcPeeringConnectionRequest generates a "aws/request.Request" representing the +// client's request for the AcceptVpcPeeringConnection operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the AcceptVpcPeeringConnection method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the AcceptVpcPeeringConnectionRequest method. +// req, resp := client.AcceptVpcPeeringConnectionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) AcceptVpcPeeringConnectionRequest(input *AcceptVpcPeeringConnectionInput) (req *request.Request, output *AcceptVpcPeeringConnectionOutput) { + op := &request.Operation{ + Name: opAcceptVpcPeeringConnection, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AcceptVpcPeeringConnectionInput{} + } + + req = c.newRequest(op, input, output) + output = &AcceptVpcPeeringConnectionOutput{} + req.Data = output + return +} + +// Accept a VPC peering connection request. To accept a request, the VPC peering +// connection must be in the pending-acceptance state, and you must be the owner +// of the peer VPC. Use the DescribeVpcPeeringConnections request to view your +// outstanding VPC peering connection requests. +func (c *EC2) AcceptVpcPeeringConnection(input *AcceptVpcPeeringConnectionInput) (*AcceptVpcPeeringConnectionOutput, error) { + req, out := c.AcceptVpcPeeringConnectionRequest(input) + err := req.Send() + return out, err +} + +const opAllocateAddress = "AllocateAddress" + +// AllocateAddressRequest generates a "aws/request.Request" representing the +// client's request for the AllocateAddress operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the AllocateAddress method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the AllocateAddressRequest method. +// req, resp := client.AllocateAddressRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) AllocateAddressRequest(input *AllocateAddressInput) (req *request.Request, output *AllocateAddressOutput) { + op := &request.Operation{ + Name: opAllocateAddress, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AllocateAddressInput{} + } + + req = c.newRequest(op, input, output) + output = &AllocateAddressOutput{} + req.Data = output + return +} + +// Acquires an Elastic IP address. +// +// An Elastic IP address is for use either in the EC2-Classic platform or in +// a VPC. For more information, see Elastic IP Addresses (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/elastic-ip-addresses-eip.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) AllocateAddress(input *AllocateAddressInput) (*AllocateAddressOutput, error) { + req, out := c.AllocateAddressRequest(input) + err := req.Send() + return out, err +} + +const opAllocateHosts = "AllocateHosts" + +// AllocateHostsRequest generates a "aws/request.Request" representing the +// client's request for the AllocateHosts operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the AllocateHosts method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the AllocateHostsRequest method. +// req, resp := client.AllocateHostsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) AllocateHostsRequest(input *AllocateHostsInput) (req *request.Request, output *AllocateHostsOutput) { + op := &request.Operation{ + Name: opAllocateHosts, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AllocateHostsInput{} + } + + req = c.newRequest(op, input, output) + output = &AllocateHostsOutput{} + req.Data = output + return +} + +// Allocates a Dedicated host to your account. At minimum you need to specify +// the instance size type, Availability Zone, and quantity of hosts you want +// to allocate. +func (c *EC2) AllocateHosts(input *AllocateHostsInput) (*AllocateHostsOutput, error) { + req, out := c.AllocateHostsRequest(input) + err := req.Send() + return out, err +} + +const opAssignPrivateIpAddresses = "AssignPrivateIpAddresses" + +// AssignPrivateIpAddressesRequest generates a "aws/request.Request" representing the +// client's request for the AssignPrivateIpAddresses operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the AssignPrivateIpAddresses method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the AssignPrivateIpAddressesRequest method. +// req, resp := client.AssignPrivateIpAddressesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) AssignPrivateIpAddressesRequest(input *AssignPrivateIpAddressesInput) (req *request.Request, output *AssignPrivateIpAddressesOutput) { + op := &request.Operation{ + Name: opAssignPrivateIpAddresses, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AssignPrivateIpAddressesInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &AssignPrivateIpAddressesOutput{} + req.Data = output + return +} + +// Assigns one or more secondary private IP addresses to the specified network +// interface. You can specify one or more specific secondary IP addresses, or +// you can specify the number of secondary IP addresses to be automatically +// assigned within the subnet's CIDR block range. The number of secondary IP +// addresses that you can assign to an instance varies by instance type. For +// information about instance types, see Instance Types (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html) +// in the Amazon Elastic Compute Cloud User Guide. For more information about +// Elastic IP addresses, see Elastic IP Addresses (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/elastic-ip-addresses-eip.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// AssignPrivateIpAddresses is available only in EC2-VPC. +func (c *EC2) AssignPrivateIpAddresses(input *AssignPrivateIpAddressesInput) (*AssignPrivateIpAddressesOutput, error) { + req, out := c.AssignPrivateIpAddressesRequest(input) + err := req.Send() + return out, err +} + +const opAssociateAddress = "AssociateAddress" + +// AssociateAddressRequest generates a "aws/request.Request" representing the +// client's request for the AssociateAddress operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the AssociateAddress method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the AssociateAddressRequest method. +// req, resp := client.AssociateAddressRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) AssociateAddressRequest(input *AssociateAddressInput) (req *request.Request, output *AssociateAddressOutput) { + op := &request.Operation{ + Name: opAssociateAddress, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AssociateAddressInput{} + } + + req = c.newRequest(op, input, output) + output = &AssociateAddressOutput{} + req.Data = output + return +} + +// Associates an Elastic IP address with an instance or a network interface. +// +// An Elastic IP address is for use in either the EC2-Classic platform or in +// a VPC. For more information, see Elastic IP Addresses (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/elastic-ip-addresses-eip.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// [EC2-Classic, VPC in an EC2-VPC-only account] If the Elastic IP address +// is already associated with a different instance, it is disassociated from +// that instance and associated with the specified instance. +// +// [VPC in an EC2-Classic account] If you don't specify a private IP address, +// the Elastic IP address is associated with the primary IP address. If the +// Elastic IP address is already associated with a different instance or a network +// interface, you get an error unless you allow reassociation. +// +// This is an idempotent operation. If you perform the operation more than +// once, Amazon EC2 doesn't return an error. +func (c *EC2) AssociateAddress(input *AssociateAddressInput) (*AssociateAddressOutput, error) { + req, out := c.AssociateAddressRequest(input) + err := req.Send() + return out, err +} + +const opAssociateDhcpOptions = "AssociateDhcpOptions" + +// AssociateDhcpOptionsRequest generates a "aws/request.Request" representing the +// client's request for the AssociateDhcpOptions operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the AssociateDhcpOptions method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the AssociateDhcpOptionsRequest method. +// req, resp := client.AssociateDhcpOptionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) AssociateDhcpOptionsRequest(input *AssociateDhcpOptionsInput) (req *request.Request, output *AssociateDhcpOptionsOutput) { + op := &request.Operation{ + Name: opAssociateDhcpOptions, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AssociateDhcpOptionsInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &AssociateDhcpOptionsOutput{} + req.Data = output + return +} + +// Associates a set of DHCP options (that you've previously created) with the +// specified VPC, or associates no DHCP options with the VPC. +// +// After you associate the options with the VPC, any existing instances and +// all new instances that you launch in that VPC use the options. You don't +// need to restart or relaunch the instances. They automatically pick up the +// changes within a few hours, depending on how frequently the instance renews +// its DHCP lease. You can explicitly renew the lease using the operating system +// on the instance. +// +// For more information, see DHCP Options Sets (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_DHCP_Options.html) +// in the Amazon Virtual Private Cloud User Guide. +func (c *EC2) AssociateDhcpOptions(input *AssociateDhcpOptionsInput) (*AssociateDhcpOptionsOutput, error) { + req, out := c.AssociateDhcpOptionsRequest(input) + err := req.Send() + return out, err +} + +const opAssociateRouteTable = "AssociateRouteTable" + +// AssociateRouteTableRequest generates a "aws/request.Request" representing the +// client's request for the AssociateRouteTable operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the AssociateRouteTable method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the AssociateRouteTableRequest method. +// req, resp := client.AssociateRouteTableRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) AssociateRouteTableRequest(input *AssociateRouteTableInput) (req *request.Request, output *AssociateRouteTableOutput) { + op := &request.Operation{ + Name: opAssociateRouteTable, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AssociateRouteTableInput{} + } + + req = c.newRequest(op, input, output) + output = &AssociateRouteTableOutput{} + req.Data = output + return +} + +// Associates a subnet with a route table. The subnet and route table must be +// in the same VPC. This association causes traffic originating from the subnet +// to be routed according to the routes in the route table. The action returns +// an association ID, which you need in order to disassociate the route table +// from the subnet later. A route table can be associated with multiple subnets. +// +// For more information about route tables, see Route Tables (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_Route_Tables.html) +// in the Amazon Virtual Private Cloud User Guide. +func (c *EC2) AssociateRouteTable(input *AssociateRouteTableInput) (*AssociateRouteTableOutput, error) { + req, out := c.AssociateRouteTableRequest(input) + err := req.Send() + return out, err +} + +const opAttachClassicLinkVpc = "AttachClassicLinkVpc" + +// AttachClassicLinkVpcRequest generates a "aws/request.Request" representing the +// client's request for the AttachClassicLinkVpc operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the AttachClassicLinkVpc method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the AttachClassicLinkVpcRequest method. +// req, resp := client.AttachClassicLinkVpcRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) AttachClassicLinkVpcRequest(input *AttachClassicLinkVpcInput) (req *request.Request, output *AttachClassicLinkVpcOutput) { + op := &request.Operation{ + Name: opAttachClassicLinkVpc, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AttachClassicLinkVpcInput{} + } + + req = c.newRequest(op, input, output) + output = &AttachClassicLinkVpcOutput{} + req.Data = output + return +} + +// Links an EC2-Classic instance to a ClassicLink-enabled VPC through one or +// more of the VPC's security groups. You cannot link an EC2-Classic instance +// to more than one VPC at a time. You can only link an instance that's in the +// running state. An instance is automatically unlinked from a VPC when it's +// stopped - you can link it to the VPC again when you restart it. +// +// After you've linked an instance, you cannot change the VPC security groups +// that are associated with it. To change the security groups, you must first +// unlink the instance, and then link it again. +// +// Linking your instance to a VPC is sometimes referred to as attaching your +// instance. +func (c *EC2) AttachClassicLinkVpc(input *AttachClassicLinkVpcInput) (*AttachClassicLinkVpcOutput, error) { + req, out := c.AttachClassicLinkVpcRequest(input) + err := req.Send() + return out, err +} + +const opAttachInternetGateway = "AttachInternetGateway" + +// AttachInternetGatewayRequest generates a "aws/request.Request" representing the +// client's request for the AttachInternetGateway operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the AttachInternetGateway method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the AttachInternetGatewayRequest method. +// req, resp := client.AttachInternetGatewayRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) AttachInternetGatewayRequest(input *AttachInternetGatewayInput) (req *request.Request, output *AttachInternetGatewayOutput) { + op := &request.Operation{ + Name: opAttachInternetGateway, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AttachInternetGatewayInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &AttachInternetGatewayOutput{} + req.Data = output + return +} + +// Attaches an Internet gateway to a VPC, enabling connectivity between the +// Internet and the VPC. For more information about your VPC and Internet gateway, +// see the Amazon Virtual Private Cloud User Guide (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/). +func (c *EC2) AttachInternetGateway(input *AttachInternetGatewayInput) (*AttachInternetGatewayOutput, error) { + req, out := c.AttachInternetGatewayRequest(input) + err := req.Send() + return out, err +} + +const opAttachNetworkInterface = "AttachNetworkInterface" + +// AttachNetworkInterfaceRequest generates a "aws/request.Request" representing the +// client's request for the AttachNetworkInterface operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the AttachNetworkInterface method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the AttachNetworkInterfaceRequest method. +// req, resp := client.AttachNetworkInterfaceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) AttachNetworkInterfaceRequest(input *AttachNetworkInterfaceInput) (req *request.Request, output *AttachNetworkInterfaceOutput) { + op := &request.Operation{ + Name: opAttachNetworkInterface, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AttachNetworkInterfaceInput{} + } + + req = c.newRequest(op, input, output) + output = &AttachNetworkInterfaceOutput{} + req.Data = output + return +} + +// Attaches a network interface to an instance. +func (c *EC2) AttachNetworkInterface(input *AttachNetworkInterfaceInput) (*AttachNetworkInterfaceOutput, error) { + req, out := c.AttachNetworkInterfaceRequest(input) + err := req.Send() + return out, err +} + +const opAttachVolume = "AttachVolume" + +// AttachVolumeRequest generates a "aws/request.Request" representing the +// client's request for the AttachVolume operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the AttachVolume method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the AttachVolumeRequest method. +// req, resp := client.AttachVolumeRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) AttachVolumeRequest(input *AttachVolumeInput) (req *request.Request, output *VolumeAttachment) { + op := &request.Operation{ + Name: opAttachVolume, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AttachVolumeInput{} + } + + req = c.newRequest(op, input, output) + output = &VolumeAttachment{} + req.Data = output + return +} + +// Attaches an EBS volume to a running or stopped instance and exposes it to +// the instance with the specified device name. +// +// Encrypted EBS volumes may only be attached to instances that support Amazon +// EBS encryption. For more information, see Amazon EBS Encryption (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSEncryption.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// For a list of supported device names, see Attaching an EBS Volume to an +// Instance (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-attaching-volume.html). +// Any device names that aren't reserved for instance store volumes can be used +// for EBS volumes. For more information, see Amazon EC2 Instance Store (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/InstanceStorage.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// If a volume has an AWS Marketplace product code: +// +// The volume can be attached only to a stopped instance. +// +// AWS Marketplace product codes are copied from the volume to the instance. +// +// You must be subscribed to the product. +// +// The instance type and operating system of the instance must support the +// product. For example, you can't detach a volume from a Windows instance and +// attach it to a Linux instance. +// +// For an overview of the AWS Marketplace, see Introducing AWS Marketplace +// (https://aws.amazon.com/marketplace/help/200900000). +// +// For more information about EBS volumes, see Attaching Amazon EBS Volumes +// (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-attaching-volume.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) AttachVolume(input *AttachVolumeInput) (*VolumeAttachment, error) { + req, out := c.AttachVolumeRequest(input) + err := req.Send() + return out, err +} + +const opAttachVpnGateway = "AttachVpnGateway" + +// AttachVpnGatewayRequest generates a "aws/request.Request" representing the +// client's request for the AttachVpnGateway operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the AttachVpnGateway method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the AttachVpnGatewayRequest method. +// req, resp := client.AttachVpnGatewayRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) AttachVpnGatewayRequest(input *AttachVpnGatewayInput) (req *request.Request, output *AttachVpnGatewayOutput) { + op := &request.Operation{ + Name: opAttachVpnGateway, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AttachVpnGatewayInput{} + } + + req = c.newRequest(op, input, output) + output = &AttachVpnGatewayOutput{} + req.Data = output + return +} + +// Attaches a virtual private gateway to a VPC. For more information, see Adding +// a Hardware Virtual Private Gateway to Your VPC (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_VPN.html) +// in the Amazon Virtual Private Cloud User Guide. +func (c *EC2) AttachVpnGateway(input *AttachVpnGatewayInput) (*AttachVpnGatewayOutput, error) { + req, out := c.AttachVpnGatewayRequest(input) + err := req.Send() + return out, err +} + +const opAuthorizeSecurityGroupEgress = "AuthorizeSecurityGroupEgress" + +// AuthorizeSecurityGroupEgressRequest generates a "aws/request.Request" representing the +// client's request for the AuthorizeSecurityGroupEgress operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the AuthorizeSecurityGroupEgress method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the AuthorizeSecurityGroupEgressRequest method. +// req, resp := client.AuthorizeSecurityGroupEgressRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) AuthorizeSecurityGroupEgressRequest(input *AuthorizeSecurityGroupEgressInput) (req *request.Request, output *AuthorizeSecurityGroupEgressOutput) { + op := &request.Operation{ + Name: opAuthorizeSecurityGroupEgress, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AuthorizeSecurityGroupEgressInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &AuthorizeSecurityGroupEgressOutput{} + req.Data = output + return +} + +// [EC2-VPC only] Adds one or more egress rules to a security group for use +// with a VPC. Specifically, this action permits instances to send traffic to +// one or more destination CIDR IP address ranges, or to one or more destination +// security groups for the same VPC. This action doesn't apply to security groups +// for use in EC2-Classic. For more information, see Security Groups for Your +// VPC (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_SecurityGroups.html) +// in the Amazon Virtual Private Cloud User Guide. +// +// You can have up to 50 rules per security group (covering both ingress and +// egress rules). +// +// Each rule consists of the protocol (for example, TCP), plus either a CIDR +// range or a source group. For the TCP and UDP protocols, you must also specify +// the destination port or port range. For the ICMP protocol, you must also +// specify the ICMP type and code. You can use -1 for the type or code to mean +// all types or all codes. +// +// Rule changes are propagated to affected instances as quickly as possible. +// However, a small delay might occur. +func (c *EC2) AuthorizeSecurityGroupEgress(input *AuthorizeSecurityGroupEgressInput) (*AuthorizeSecurityGroupEgressOutput, error) { + req, out := c.AuthorizeSecurityGroupEgressRequest(input) + err := req.Send() + return out, err +} + +const opAuthorizeSecurityGroupIngress = "AuthorizeSecurityGroupIngress" + +// AuthorizeSecurityGroupIngressRequest generates a "aws/request.Request" representing the +// client's request for the AuthorizeSecurityGroupIngress operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the AuthorizeSecurityGroupIngress method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the AuthorizeSecurityGroupIngressRequest method. +// req, resp := client.AuthorizeSecurityGroupIngressRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) AuthorizeSecurityGroupIngressRequest(input *AuthorizeSecurityGroupIngressInput) (req *request.Request, output *AuthorizeSecurityGroupIngressOutput) { + op := &request.Operation{ + Name: opAuthorizeSecurityGroupIngress, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AuthorizeSecurityGroupIngressInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &AuthorizeSecurityGroupIngressOutput{} + req.Data = output + return +} + +// Adds one or more ingress rules to a security group. +// +// EC2-Classic: You can have up to 100 rules per group. +// +// EC2-VPC: You can have up to 50 rules per group (covering both ingress and +// egress rules). +// +// Rule changes are propagated to instances within the security group as quickly +// as possible. However, a small delay might occur. +// +// [EC2-Classic] This action gives one or more CIDR IP address ranges permission +// to access a security group in your account, or gives one or more security +// groups (called the source groups) permission to access a security group for +// your account. A source group can be for your own AWS account, or another. +// +// [EC2-VPC] This action gives one or more CIDR IP address ranges permission +// to access a security group in your VPC, or gives one or more other security +// groups (called the source groups) permission to access a security group for +// your VPC. The security groups must all be for the same VPC. +func (c *EC2) AuthorizeSecurityGroupIngress(input *AuthorizeSecurityGroupIngressInput) (*AuthorizeSecurityGroupIngressOutput, error) { + req, out := c.AuthorizeSecurityGroupIngressRequest(input) + err := req.Send() + return out, err +} + +const opBundleInstance = "BundleInstance" + +// BundleInstanceRequest generates a "aws/request.Request" representing the +// client's request for the BundleInstance operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the BundleInstance method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the BundleInstanceRequest method. +// req, resp := client.BundleInstanceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) BundleInstanceRequest(input *BundleInstanceInput) (req *request.Request, output *BundleInstanceOutput) { + op := &request.Operation{ + Name: opBundleInstance, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &BundleInstanceInput{} + } + + req = c.newRequest(op, input, output) + output = &BundleInstanceOutput{} + req.Data = output + return +} + +// Bundles an Amazon instance store-backed Windows instance. +// +// During bundling, only the root device volume (C:\) is bundled. Data on other +// instance store volumes is not preserved. +// +// This action is not applicable for Linux/Unix instances or Windows instances +// that are backed by Amazon EBS. +// +// For more information, see Creating an Instance Store-Backed Windows AMI +// (http://docs.aws.amazon.com/AWSEC2/latest/WindowsGuide/Creating_InstanceStoreBacked_WinAMI.html). +func (c *EC2) BundleInstance(input *BundleInstanceInput) (*BundleInstanceOutput, error) { + req, out := c.BundleInstanceRequest(input) + err := req.Send() + return out, err +} + +const opCancelBundleTask = "CancelBundleTask" + +// CancelBundleTaskRequest generates a "aws/request.Request" representing the +// client's request for the CancelBundleTask operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CancelBundleTask method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CancelBundleTaskRequest method. +// req, resp := client.CancelBundleTaskRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) CancelBundleTaskRequest(input *CancelBundleTaskInput) (req *request.Request, output *CancelBundleTaskOutput) { + op := &request.Operation{ + Name: opCancelBundleTask, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CancelBundleTaskInput{} + } + + req = c.newRequest(op, input, output) + output = &CancelBundleTaskOutput{} + req.Data = output + return +} + +// Cancels a bundling operation for an instance store-backed Windows instance. +func (c *EC2) CancelBundleTask(input *CancelBundleTaskInput) (*CancelBundleTaskOutput, error) { + req, out := c.CancelBundleTaskRequest(input) + err := req.Send() + return out, err +} + +const opCancelConversionTask = "CancelConversionTask" + +// CancelConversionTaskRequest generates a "aws/request.Request" representing the +// client's request for the CancelConversionTask operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CancelConversionTask method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CancelConversionTaskRequest method. +// req, resp := client.CancelConversionTaskRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) CancelConversionTaskRequest(input *CancelConversionTaskInput) (req *request.Request, output *CancelConversionTaskOutput) { + op := &request.Operation{ + Name: opCancelConversionTask, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CancelConversionTaskInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &CancelConversionTaskOutput{} + req.Data = output + return +} + +// Cancels an active conversion task. The task can be the import of an instance +// or volume. The action removes all artifacts of the conversion, including +// a partially uploaded volume or instance. If the conversion is complete or +// is in the process of transferring the final disk image, the command fails +// and returns an exception. +// +// For more information, see Using the Command Line Tools to Import Your Virtual +// Machine to Amazon EC2 (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/UploadingYourInstancesandVolumes.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) CancelConversionTask(input *CancelConversionTaskInput) (*CancelConversionTaskOutput, error) { + req, out := c.CancelConversionTaskRequest(input) + err := req.Send() + return out, err +} + +const opCancelExportTask = "CancelExportTask" + +// CancelExportTaskRequest generates a "aws/request.Request" representing the +// client's request for the CancelExportTask operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CancelExportTask method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CancelExportTaskRequest method. +// req, resp := client.CancelExportTaskRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) CancelExportTaskRequest(input *CancelExportTaskInput) (req *request.Request, output *CancelExportTaskOutput) { + op := &request.Operation{ + Name: opCancelExportTask, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CancelExportTaskInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &CancelExportTaskOutput{} + req.Data = output + return +} + +// Cancels an active export task. The request removes all artifacts of the export, +// including any partially-created Amazon S3 objects. If the export task is +// complete or is in the process of transferring the final disk image, the command +// fails and returns an error. +func (c *EC2) CancelExportTask(input *CancelExportTaskInput) (*CancelExportTaskOutput, error) { + req, out := c.CancelExportTaskRequest(input) + err := req.Send() + return out, err +} + +const opCancelImportTask = "CancelImportTask" + +// CancelImportTaskRequest generates a "aws/request.Request" representing the +// client's request for the CancelImportTask operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CancelImportTask method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CancelImportTaskRequest method. +// req, resp := client.CancelImportTaskRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) CancelImportTaskRequest(input *CancelImportTaskInput) (req *request.Request, output *CancelImportTaskOutput) { + op := &request.Operation{ + Name: opCancelImportTask, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CancelImportTaskInput{} + } + + req = c.newRequest(op, input, output) + output = &CancelImportTaskOutput{} + req.Data = output + return +} + +// Cancels an in-process import virtual machine or import snapshot task. +func (c *EC2) CancelImportTask(input *CancelImportTaskInput) (*CancelImportTaskOutput, error) { + req, out := c.CancelImportTaskRequest(input) + err := req.Send() + return out, err +} + +const opCancelReservedInstancesListing = "CancelReservedInstancesListing" + +// CancelReservedInstancesListingRequest generates a "aws/request.Request" representing the +// client's request for the CancelReservedInstancesListing operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CancelReservedInstancesListing method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CancelReservedInstancesListingRequest method. +// req, resp := client.CancelReservedInstancesListingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) CancelReservedInstancesListingRequest(input *CancelReservedInstancesListingInput) (req *request.Request, output *CancelReservedInstancesListingOutput) { + op := &request.Operation{ + Name: opCancelReservedInstancesListing, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CancelReservedInstancesListingInput{} + } + + req = c.newRequest(op, input, output) + output = &CancelReservedInstancesListingOutput{} + req.Data = output + return +} + +// Cancels the specified Reserved Instance listing in the Reserved Instance +// Marketplace. +// +// For more information, see Reserved Instance Marketplace (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ri-market-general.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) CancelReservedInstancesListing(input *CancelReservedInstancesListingInput) (*CancelReservedInstancesListingOutput, error) { + req, out := c.CancelReservedInstancesListingRequest(input) + err := req.Send() + return out, err +} + +const opCancelSpotFleetRequests = "CancelSpotFleetRequests" + +// CancelSpotFleetRequestsRequest generates a "aws/request.Request" representing the +// client's request for the CancelSpotFleetRequests operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CancelSpotFleetRequests method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CancelSpotFleetRequestsRequest method. +// req, resp := client.CancelSpotFleetRequestsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) CancelSpotFleetRequestsRequest(input *CancelSpotFleetRequestsInput) (req *request.Request, output *CancelSpotFleetRequestsOutput) { + op := &request.Operation{ + Name: opCancelSpotFleetRequests, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CancelSpotFleetRequestsInput{} + } + + req = c.newRequest(op, input, output) + output = &CancelSpotFleetRequestsOutput{} + req.Data = output + return +} + +// Cancels the specified Spot fleet requests. +// +// After you cancel a Spot fleet request, the Spot fleet launches no new Spot +// instances. You must specify whether the Spot fleet should also terminate +// its Spot instances. If you terminate the instances, the Spot fleet request +// enters the cancelled_terminating state. Otherwise, the Spot fleet request +// enters the cancelled_running state and the instances continue to run until +// they are interrupted or you terminate them manually. +func (c *EC2) CancelSpotFleetRequests(input *CancelSpotFleetRequestsInput) (*CancelSpotFleetRequestsOutput, error) { + req, out := c.CancelSpotFleetRequestsRequest(input) + err := req.Send() + return out, err +} + +const opCancelSpotInstanceRequests = "CancelSpotInstanceRequests" + +// CancelSpotInstanceRequestsRequest generates a "aws/request.Request" representing the +// client's request for the CancelSpotInstanceRequests operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CancelSpotInstanceRequests method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CancelSpotInstanceRequestsRequest method. +// req, resp := client.CancelSpotInstanceRequestsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) CancelSpotInstanceRequestsRequest(input *CancelSpotInstanceRequestsInput) (req *request.Request, output *CancelSpotInstanceRequestsOutput) { + op := &request.Operation{ + Name: opCancelSpotInstanceRequests, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CancelSpotInstanceRequestsInput{} + } + + req = c.newRequest(op, input, output) + output = &CancelSpotInstanceRequestsOutput{} + req.Data = output + return +} + +// Cancels one or more Spot instance requests. Spot instances are instances +// that Amazon EC2 starts on your behalf when the bid price that you specify +// exceeds the current Spot price. Amazon EC2 periodically sets the Spot price +// based on available Spot instance capacity and current Spot instance requests. +// For more information, see Spot Instance Requests (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-requests.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// Canceling a Spot instance request does not terminate running Spot instances +// associated with the request. +func (c *EC2) CancelSpotInstanceRequests(input *CancelSpotInstanceRequestsInput) (*CancelSpotInstanceRequestsOutput, error) { + req, out := c.CancelSpotInstanceRequestsRequest(input) + err := req.Send() + return out, err +} + +const opConfirmProductInstance = "ConfirmProductInstance" + +// ConfirmProductInstanceRequest generates a "aws/request.Request" representing the +// client's request for the ConfirmProductInstance operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ConfirmProductInstance method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ConfirmProductInstanceRequest method. +// req, resp := client.ConfirmProductInstanceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) ConfirmProductInstanceRequest(input *ConfirmProductInstanceInput) (req *request.Request, output *ConfirmProductInstanceOutput) { + op := &request.Operation{ + Name: opConfirmProductInstance, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ConfirmProductInstanceInput{} + } + + req = c.newRequest(op, input, output) + output = &ConfirmProductInstanceOutput{} + req.Data = output + return +} + +// Determines whether a product code is associated with an instance. This action +// can only be used by the owner of the product code. It is useful when a product +// code owner needs to verify whether another user's instance is eligible for +// support. +func (c *EC2) ConfirmProductInstance(input *ConfirmProductInstanceInput) (*ConfirmProductInstanceOutput, error) { + req, out := c.ConfirmProductInstanceRequest(input) + err := req.Send() + return out, err +} + +const opCopyImage = "CopyImage" + +// CopyImageRequest generates a "aws/request.Request" representing the +// client's request for the CopyImage operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CopyImage method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CopyImageRequest method. +// req, resp := client.CopyImageRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) CopyImageRequest(input *CopyImageInput) (req *request.Request, output *CopyImageOutput) { + op := &request.Operation{ + Name: opCopyImage, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CopyImageInput{} + } + + req = c.newRequest(op, input, output) + output = &CopyImageOutput{} + req.Data = output + return +} + +// Initiates the copy of an AMI from the specified source region to the current +// region. You specify the destination region by using its endpoint when making +// the request. +// +// For more information, see Copying AMIs (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/CopyingAMIs.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) CopyImage(input *CopyImageInput) (*CopyImageOutput, error) { + req, out := c.CopyImageRequest(input) + err := req.Send() + return out, err +} + +const opCopySnapshot = "CopySnapshot" + +// CopySnapshotRequest generates a "aws/request.Request" representing the +// client's request for the CopySnapshot operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CopySnapshot method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CopySnapshotRequest method. +// req, resp := client.CopySnapshotRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) CopySnapshotRequest(input *CopySnapshotInput) (req *request.Request, output *CopySnapshotOutput) { + op := &request.Operation{ + Name: opCopySnapshot, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CopySnapshotInput{} + } + + req = c.newRequest(op, input, output) + output = &CopySnapshotOutput{} + req.Data = output + return +} + +// Copies a point-in-time snapshot of an EBS volume and stores it in Amazon +// S3. You can copy the snapshot within the same region or from one region to +// another. You can use the snapshot to create EBS volumes or Amazon Machine +// Images (AMIs). The snapshot is copied to the regional endpoint that you send +// the HTTP request to. +// +// Copies of encrypted EBS snapshots remain encrypted. Copies of unencrypted +// snapshots remain unencrypted, unless the Encrypted flag is specified during +// the snapshot copy operation. By default, encrypted snapshot copies use the +// default AWS Key Management Service (AWS KMS) customer master key (CMK); however, +// you can specify a non-default CMK with the KmsKeyId parameter. +// +// To copy an encrypted snapshot that has been shared from another account, +// you must have permissions for the CMK used to encrypt the snapshot. +// +// For more information, see Copying an Amazon EBS Snapshot (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-copy-snapshot.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) CopySnapshot(input *CopySnapshotInput) (*CopySnapshotOutput, error) { + req, out := c.CopySnapshotRequest(input) + err := req.Send() + return out, err +} + +const opCreateCustomerGateway = "CreateCustomerGateway" + +// CreateCustomerGatewayRequest generates a "aws/request.Request" representing the +// client's request for the CreateCustomerGateway operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateCustomerGateway method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateCustomerGatewayRequest method. +// req, resp := client.CreateCustomerGatewayRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) CreateCustomerGatewayRequest(input *CreateCustomerGatewayInput) (req *request.Request, output *CreateCustomerGatewayOutput) { + op := &request.Operation{ + Name: opCreateCustomerGateway, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateCustomerGatewayInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateCustomerGatewayOutput{} + req.Data = output + return +} + +// Provides information to AWS about your VPN customer gateway device. The customer +// gateway is the appliance at your end of the VPN connection. (The device on +// the AWS side of the VPN connection is the virtual private gateway.) You must +// provide the Internet-routable IP address of the customer gateway's external +// interface. The IP address must be static and may be behind a device performing +// network address translation (NAT). +// +// For devices that use Border Gateway Protocol (BGP), you can also provide +// the device's BGP Autonomous System Number (ASN). You can use an existing +// ASN assigned to your network. If you don't have an ASN already, you can use +// a private ASN (in the 64512 - 65534 range). +// +// Amazon EC2 supports all 2-byte ASN numbers in the range of 1 - 65534, with +// the exception of 7224, which is reserved in the us-east-1 region, and 9059, +// which is reserved in the eu-west-1 region. +// +// For more information about VPN customer gateways, see Adding a Hardware +// Virtual Private Gateway to Your VPC (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_VPN.html) +// in the Amazon Virtual Private Cloud User Guide. +// +// You cannot create more than one customer gateway with the same VPN type, +// IP address, and BGP ASN parameter values. If you run an identical request +// more than one time, the first request creates the customer gateway, and subsequent +// requests return information about the existing customer gateway. The subsequent +// requests do not create new customer gateway resources. +func (c *EC2) CreateCustomerGateway(input *CreateCustomerGatewayInput) (*CreateCustomerGatewayOutput, error) { + req, out := c.CreateCustomerGatewayRequest(input) + err := req.Send() + return out, err +} + +const opCreateDhcpOptions = "CreateDhcpOptions" + +// CreateDhcpOptionsRequest generates a "aws/request.Request" representing the +// client's request for the CreateDhcpOptions operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateDhcpOptions method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateDhcpOptionsRequest method. +// req, resp := client.CreateDhcpOptionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) CreateDhcpOptionsRequest(input *CreateDhcpOptionsInput) (req *request.Request, output *CreateDhcpOptionsOutput) { + op := &request.Operation{ + Name: opCreateDhcpOptions, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateDhcpOptionsInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateDhcpOptionsOutput{} + req.Data = output + return +} + +// Creates a set of DHCP options for your VPC. After creating the set, you must +// associate it with the VPC, causing all existing and new instances that you +// launch in the VPC to use this set of DHCP options. The following are the +// individual DHCP options you can specify. For more information about the options, +// see RFC 2132 (http://www.ietf.org/rfc/rfc2132.txt). +// +// domain-name-servers - The IP addresses of up to four domain name servers, +// or AmazonProvidedDNS. The default DHCP option set specifies AmazonProvidedDNS. +// If specifying more than one domain name server, specify the IP addresses +// in a single parameter, separated by commas. +// +// domain-name - If you're using AmazonProvidedDNS in "us-east-1", specify +// "ec2.internal". If you're using AmazonProvidedDNS in another region, specify +// "region.compute.internal" (for example, "ap-northeast-1.compute.internal"). +// Otherwise, specify a domain name (for example, "MyCompany.com"). Important: +// Some Linux operating systems accept multiple domain names separated by spaces. +// However, Windows and other Linux operating systems treat the value as a single +// domain, which results in unexpected behavior. If your DHCP options set is +// associated with a VPC that has instances with multiple operating systems, +// specify only one domain name. +// +// ntp-servers - The IP addresses of up to four Network Time Protocol (NTP) +// servers. +// +// netbios-name-servers - The IP addresses of up to four NetBIOS name servers. +// +// netbios-node-type - The NetBIOS node type (1, 2, 4, or 8). We recommend +// that you specify 2 (broadcast and multicast are not currently supported). +// For more information about these node types, see RFC 2132 (http://www.ietf.org/rfc/rfc2132.txt). +// +// Your VPC automatically starts out with a set of DHCP options that includes +// only a DNS server that we provide (AmazonProvidedDNS). If you create a set +// of options, and if your VPC has an Internet gateway, make sure to set the +// domain-name-servers option either to AmazonProvidedDNS or to a domain name +// server of your choice. For more information about DHCP options, see DHCP +// Options Sets (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_DHCP_Options.html) +// in the Amazon Virtual Private Cloud User Guide. +func (c *EC2) CreateDhcpOptions(input *CreateDhcpOptionsInput) (*CreateDhcpOptionsOutput, error) { + req, out := c.CreateDhcpOptionsRequest(input) + err := req.Send() + return out, err +} + +const opCreateFlowLogs = "CreateFlowLogs" + +// CreateFlowLogsRequest generates a "aws/request.Request" representing the +// client's request for the CreateFlowLogs operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateFlowLogs method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateFlowLogsRequest method. +// req, resp := client.CreateFlowLogsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) CreateFlowLogsRequest(input *CreateFlowLogsInput) (req *request.Request, output *CreateFlowLogsOutput) { + op := &request.Operation{ + Name: opCreateFlowLogs, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateFlowLogsInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateFlowLogsOutput{} + req.Data = output + return +} + +// Creates one or more flow logs to capture IP traffic for a specific network +// interface, subnet, or VPC. Flow logs are delivered to a specified log group +// in Amazon CloudWatch Logs. If you specify a VPC or subnet in the request, +// a log stream is created in CloudWatch Logs for each network interface in +// the subnet or VPC. Log streams can include information about accepted and +// rejected traffic to a network interface. You can view the data in your log +// streams using Amazon CloudWatch Logs. +// +// In your request, you must also specify an IAM role that has permission to +// publish logs to CloudWatch Logs. +func (c *EC2) CreateFlowLogs(input *CreateFlowLogsInput) (*CreateFlowLogsOutput, error) { + req, out := c.CreateFlowLogsRequest(input) + err := req.Send() + return out, err +} + +const opCreateImage = "CreateImage" + +// CreateImageRequest generates a "aws/request.Request" representing the +// client's request for the CreateImage operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateImage method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateImageRequest method. +// req, resp := client.CreateImageRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) CreateImageRequest(input *CreateImageInput) (req *request.Request, output *CreateImageOutput) { + op := &request.Operation{ + Name: opCreateImage, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateImageInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateImageOutput{} + req.Data = output + return +} + +// Creates an Amazon EBS-backed AMI from an Amazon EBS-backed instance that +// is either running or stopped. +// +// If you customized your instance with instance store volumes or EBS volumes +// in addition to the root device volume, the new AMI contains block device +// mapping information for those volumes. When you launch an instance from this +// new AMI, the instance automatically launches with those additional volumes. +// +// For more information, see Creating Amazon EBS-Backed Linux AMIs (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/creating-an-ami-ebs.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) CreateImage(input *CreateImageInput) (*CreateImageOutput, error) { + req, out := c.CreateImageRequest(input) + err := req.Send() + return out, err +} + +const opCreateInstanceExportTask = "CreateInstanceExportTask" + +// CreateInstanceExportTaskRequest generates a "aws/request.Request" representing the +// client's request for the CreateInstanceExportTask operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateInstanceExportTask method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateInstanceExportTaskRequest method. +// req, resp := client.CreateInstanceExportTaskRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) CreateInstanceExportTaskRequest(input *CreateInstanceExportTaskInput) (req *request.Request, output *CreateInstanceExportTaskOutput) { + op := &request.Operation{ + Name: opCreateInstanceExportTask, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateInstanceExportTaskInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateInstanceExportTaskOutput{} + req.Data = output + return +} + +// Exports a running or stopped instance to an S3 bucket. +// +// For information about the supported operating systems, image formats, and +// known limitations for the types of instances you can export, see Exporting +// EC2 Instances (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ExportingEC2Instances.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) CreateInstanceExportTask(input *CreateInstanceExportTaskInput) (*CreateInstanceExportTaskOutput, error) { + req, out := c.CreateInstanceExportTaskRequest(input) + err := req.Send() + return out, err +} + +const opCreateInternetGateway = "CreateInternetGateway" + +// CreateInternetGatewayRequest generates a "aws/request.Request" representing the +// client's request for the CreateInternetGateway operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateInternetGateway method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateInternetGatewayRequest method. +// req, resp := client.CreateInternetGatewayRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) CreateInternetGatewayRequest(input *CreateInternetGatewayInput) (req *request.Request, output *CreateInternetGatewayOutput) { + op := &request.Operation{ + Name: opCreateInternetGateway, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateInternetGatewayInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateInternetGatewayOutput{} + req.Data = output + return +} + +// Creates an Internet gateway for use with a VPC. After creating the Internet +// gateway, you attach it to a VPC using AttachInternetGateway. +// +// For more information about your VPC and Internet gateway, see the Amazon +// Virtual Private Cloud User Guide (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/). +func (c *EC2) CreateInternetGateway(input *CreateInternetGatewayInput) (*CreateInternetGatewayOutput, error) { + req, out := c.CreateInternetGatewayRequest(input) + err := req.Send() + return out, err +} + +const opCreateKeyPair = "CreateKeyPair" + +// CreateKeyPairRequest generates a "aws/request.Request" representing the +// client's request for the CreateKeyPair operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateKeyPair method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateKeyPairRequest method. +// req, resp := client.CreateKeyPairRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) CreateKeyPairRequest(input *CreateKeyPairInput) (req *request.Request, output *CreateKeyPairOutput) { + op := &request.Operation{ + Name: opCreateKeyPair, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateKeyPairInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateKeyPairOutput{} + req.Data = output + return +} + +// Creates a 2048-bit RSA key pair with the specified name. Amazon EC2 stores +// the public key and displays the private key for you to save to a file. The +// private key is returned as an unencrypted PEM encoded PKCS#8 private key. +// If a key with the specified name already exists, Amazon EC2 returns an error. +// +// You can have up to five thousand key pairs per region. +// +// The key pair returned to you is available only in the region in which you +// create it. To create a key pair that is available in all regions, use ImportKeyPair. +// +// For more information about key pairs, see Key Pairs (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) CreateKeyPair(input *CreateKeyPairInput) (*CreateKeyPairOutput, error) { + req, out := c.CreateKeyPairRequest(input) + err := req.Send() + return out, err +} + +const opCreateNatGateway = "CreateNatGateway" + +// CreateNatGatewayRequest generates a "aws/request.Request" representing the +// client's request for the CreateNatGateway operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateNatGateway method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateNatGatewayRequest method. +// req, resp := client.CreateNatGatewayRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) CreateNatGatewayRequest(input *CreateNatGatewayInput) (req *request.Request, output *CreateNatGatewayOutput) { + op := &request.Operation{ + Name: opCreateNatGateway, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateNatGatewayInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateNatGatewayOutput{} + req.Data = output + return +} + +// Creates a NAT gateway in the specified subnet. A NAT gateway can be used +// to enable instances in a private subnet to connect to the Internet. This +// action creates a network interface in the specified subnet with a private +// IP address from the IP address range of the subnet. For more information, +// see NAT Gateways (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/vpc-nat-gateway.html) +// in the Amazon Virtual Private Cloud User Guide. +func (c *EC2) CreateNatGateway(input *CreateNatGatewayInput) (*CreateNatGatewayOutput, error) { + req, out := c.CreateNatGatewayRequest(input) + err := req.Send() + return out, err +} + +const opCreateNetworkAcl = "CreateNetworkAcl" + +// CreateNetworkAclRequest generates a "aws/request.Request" representing the +// client's request for the CreateNetworkAcl operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateNetworkAcl method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateNetworkAclRequest method. +// req, resp := client.CreateNetworkAclRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) CreateNetworkAclRequest(input *CreateNetworkAclInput) (req *request.Request, output *CreateNetworkAclOutput) { + op := &request.Operation{ + Name: opCreateNetworkAcl, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateNetworkAclInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateNetworkAclOutput{} + req.Data = output + return +} + +// Creates a network ACL in a VPC. Network ACLs provide an optional layer of +// security (in addition to security groups) for the instances in your VPC. +// +// For more information about network ACLs, see Network ACLs (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_ACLs.html) +// in the Amazon Virtual Private Cloud User Guide. +func (c *EC2) CreateNetworkAcl(input *CreateNetworkAclInput) (*CreateNetworkAclOutput, error) { + req, out := c.CreateNetworkAclRequest(input) + err := req.Send() + return out, err +} + +const opCreateNetworkAclEntry = "CreateNetworkAclEntry" + +// CreateNetworkAclEntryRequest generates a "aws/request.Request" representing the +// client's request for the CreateNetworkAclEntry operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateNetworkAclEntry method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateNetworkAclEntryRequest method. +// req, resp := client.CreateNetworkAclEntryRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) CreateNetworkAclEntryRequest(input *CreateNetworkAclEntryInput) (req *request.Request, output *CreateNetworkAclEntryOutput) { + op := &request.Operation{ + Name: opCreateNetworkAclEntry, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateNetworkAclEntryInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &CreateNetworkAclEntryOutput{} + req.Data = output + return +} + +// Creates an entry (a rule) in a network ACL with the specified rule number. +// Each network ACL has a set of numbered ingress rules and a separate set of +// numbered egress rules. When determining whether a packet should be allowed +// in or out of a subnet associated with the ACL, we process the entries in +// the ACL according to the rule numbers, in ascending order. Each network ACL +// has a set of ingress rules and a separate set of egress rules. +// +// We recommend that you leave room between the rule numbers (for example, +// 100, 110, 120, ...), and not number them one right after the other (for example, +// 101, 102, 103, ...). This makes it easier to add a rule between existing +// ones without having to renumber the rules. +// +// After you add an entry, you can't modify it; you must either replace it, +// or create an entry and delete the old one. +// +// For more information about network ACLs, see Network ACLs (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_ACLs.html) +// in the Amazon Virtual Private Cloud User Guide. +func (c *EC2) CreateNetworkAclEntry(input *CreateNetworkAclEntryInput) (*CreateNetworkAclEntryOutput, error) { + req, out := c.CreateNetworkAclEntryRequest(input) + err := req.Send() + return out, err +} + +const opCreateNetworkInterface = "CreateNetworkInterface" + +// CreateNetworkInterfaceRequest generates a "aws/request.Request" representing the +// client's request for the CreateNetworkInterface operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateNetworkInterface method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateNetworkInterfaceRequest method. +// req, resp := client.CreateNetworkInterfaceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) CreateNetworkInterfaceRequest(input *CreateNetworkInterfaceInput) (req *request.Request, output *CreateNetworkInterfaceOutput) { + op := &request.Operation{ + Name: opCreateNetworkInterface, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateNetworkInterfaceInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateNetworkInterfaceOutput{} + req.Data = output + return +} + +// Creates a network interface in the specified subnet. +// +// For more information about network interfaces, see Elastic Network Interfaces +// (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-eni.html) in the +// Amazon Elastic Compute Cloud User Guide. +func (c *EC2) CreateNetworkInterface(input *CreateNetworkInterfaceInput) (*CreateNetworkInterfaceOutput, error) { + req, out := c.CreateNetworkInterfaceRequest(input) + err := req.Send() + return out, err +} + +const opCreatePlacementGroup = "CreatePlacementGroup" + +// CreatePlacementGroupRequest generates a "aws/request.Request" representing the +// client's request for the CreatePlacementGroup operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreatePlacementGroup method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreatePlacementGroupRequest method. +// req, resp := client.CreatePlacementGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) CreatePlacementGroupRequest(input *CreatePlacementGroupInput) (req *request.Request, output *CreatePlacementGroupOutput) { + op := &request.Operation{ + Name: opCreatePlacementGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreatePlacementGroupInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &CreatePlacementGroupOutput{} + req.Data = output + return +} + +// Creates a placement group that you launch cluster instances into. You must +// give the group a name that's unique within the scope of your account. +// +// For more information about placement groups and cluster instances, see Cluster +// Instances (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using_cluster_computing.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) CreatePlacementGroup(input *CreatePlacementGroupInput) (*CreatePlacementGroupOutput, error) { + req, out := c.CreatePlacementGroupRequest(input) + err := req.Send() + return out, err +} + +const opCreateReservedInstancesListing = "CreateReservedInstancesListing" + +// CreateReservedInstancesListingRequest generates a "aws/request.Request" representing the +// client's request for the CreateReservedInstancesListing operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateReservedInstancesListing method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateReservedInstancesListingRequest method. +// req, resp := client.CreateReservedInstancesListingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) CreateReservedInstancesListingRequest(input *CreateReservedInstancesListingInput) (req *request.Request, output *CreateReservedInstancesListingOutput) { + op := &request.Operation{ + Name: opCreateReservedInstancesListing, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateReservedInstancesListingInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateReservedInstancesListingOutput{} + req.Data = output + return +} + +// Creates a listing for Amazon EC2 Reserved Instances to be sold in the Reserved +// Instance Marketplace. You can submit one Reserved Instance listing at a time. +// To get a list of your Reserved Instances, you can use the DescribeReservedInstances +// operation. +// +// The Reserved Instance Marketplace matches sellers who want to resell Reserved +// Instance capacity that they no longer need with buyers who want to purchase +// additional capacity. Reserved Instances bought and sold through the Reserved +// Instance Marketplace work like any other Reserved Instances. +// +// To sell your Reserved Instances, you must first register as a seller in +// the Reserved Instance Marketplace. After completing the registration process, +// you can create a Reserved Instance Marketplace listing of some or all of +// your Reserved Instances, and specify the upfront price to receive for them. +// Your Reserved Instance listings then become available for purchase. To view +// the details of your Reserved Instance listing, you can use the DescribeReservedInstancesListings +// operation. +// +// For more information, see Reserved Instance Marketplace (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ri-market-general.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) CreateReservedInstancesListing(input *CreateReservedInstancesListingInput) (*CreateReservedInstancesListingOutput, error) { + req, out := c.CreateReservedInstancesListingRequest(input) + err := req.Send() + return out, err +} + +const opCreateRoute = "CreateRoute" + +// CreateRouteRequest generates a "aws/request.Request" representing the +// client's request for the CreateRoute operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateRoute method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateRouteRequest method. +// req, resp := client.CreateRouteRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) CreateRouteRequest(input *CreateRouteInput) (req *request.Request, output *CreateRouteOutput) { + op := &request.Operation{ + Name: opCreateRoute, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateRouteInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateRouteOutput{} + req.Data = output + return +} + +// Creates a route in a route table within a VPC. +// +// You must specify one of the following targets: Internet gateway or virtual +// private gateway, NAT instance, NAT gateway, VPC peering connection, or network +// interface. +// +// When determining how to route traffic, we use the route with the most specific +// match. For example, let's say the traffic is destined for 192.0.2.3, and +// the route table includes the following two routes: +// +// 192.0.2.0/24 (goes to some target A) +// +// 192.0.2.0/28 (goes to some target B) +// +// Both routes apply to the traffic destined for 192.0.2.3. However, the +// second route in the list covers a smaller number of IP addresses and is therefore +// more specific, so we use that route to determine where to target the traffic. +// +// For more information about route tables, see Route Tables (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_Route_Tables.html) +// in the Amazon Virtual Private Cloud User Guide. +func (c *EC2) CreateRoute(input *CreateRouteInput) (*CreateRouteOutput, error) { + req, out := c.CreateRouteRequest(input) + err := req.Send() + return out, err +} + +const opCreateRouteTable = "CreateRouteTable" + +// CreateRouteTableRequest generates a "aws/request.Request" representing the +// client's request for the CreateRouteTable operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateRouteTable method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateRouteTableRequest method. +// req, resp := client.CreateRouteTableRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) CreateRouteTableRequest(input *CreateRouteTableInput) (req *request.Request, output *CreateRouteTableOutput) { + op := &request.Operation{ + Name: opCreateRouteTable, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateRouteTableInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateRouteTableOutput{} + req.Data = output + return +} + +// Creates a route table for the specified VPC. After you create a route table, +// you can add routes and associate the table with a subnet. +// +// For more information about route tables, see Route Tables (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_Route_Tables.html) +// in the Amazon Virtual Private Cloud User Guide. +func (c *EC2) CreateRouteTable(input *CreateRouteTableInput) (*CreateRouteTableOutput, error) { + req, out := c.CreateRouteTableRequest(input) + err := req.Send() + return out, err +} + +const opCreateSecurityGroup = "CreateSecurityGroup" + +// CreateSecurityGroupRequest generates a "aws/request.Request" representing the +// client's request for the CreateSecurityGroup operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateSecurityGroup method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateSecurityGroupRequest method. +// req, resp := client.CreateSecurityGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) CreateSecurityGroupRequest(input *CreateSecurityGroupInput) (req *request.Request, output *CreateSecurityGroupOutput) { + op := &request.Operation{ + Name: opCreateSecurityGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateSecurityGroupInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateSecurityGroupOutput{} + req.Data = output + return +} + +// Creates a security group. +// +// A security group is for use with instances either in the EC2-Classic platform +// or in a specific VPC. For more information, see Amazon EC2 Security Groups +// (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-network-security.html) +// in the Amazon Elastic Compute Cloud User Guide and Security Groups for Your +// VPC (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_SecurityGroups.html) +// in the Amazon Virtual Private Cloud User Guide. +// +// EC2-Classic: You can have up to 500 security groups. +// +// EC2-VPC: You can create up to 500 security groups per VPC. +// +// When you create a security group, you specify a friendly name of your choice. +// You can have a security group for use in EC2-Classic with the same name as +// a security group for use in a VPC. However, you can't have two security groups +// for use in EC2-Classic with the same name or two security groups for use +// in a VPC with the same name. +// +// You have a default security group for use in EC2-Classic and a default security +// group for use in your VPC. If you don't specify a security group when you +// launch an instance, the instance is launched into the appropriate default +// security group. A default security group includes a default rule that grants +// instances unrestricted network access to each other. +// +// You can add or remove rules from your security groups using AuthorizeSecurityGroupIngress, +// AuthorizeSecurityGroupEgress, RevokeSecurityGroupIngress, and RevokeSecurityGroupEgress. +func (c *EC2) CreateSecurityGroup(input *CreateSecurityGroupInput) (*CreateSecurityGroupOutput, error) { + req, out := c.CreateSecurityGroupRequest(input) + err := req.Send() + return out, err +} + +const opCreateSnapshot = "CreateSnapshot" + +// CreateSnapshotRequest generates a "aws/request.Request" representing the +// client's request for the CreateSnapshot operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateSnapshot method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateSnapshotRequest method. +// req, resp := client.CreateSnapshotRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) CreateSnapshotRequest(input *CreateSnapshotInput) (req *request.Request, output *Snapshot) { + op := &request.Operation{ + Name: opCreateSnapshot, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateSnapshotInput{} + } + + req = c.newRequest(op, input, output) + output = &Snapshot{} + req.Data = output + return +} + +// Creates a snapshot of an EBS volume and stores it in Amazon S3. You can use +// snapshots for backups, to make copies of EBS volumes, and to save data before +// shutting down an instance. +// +// When a snapshot is created, any AWS Marketplace product codes that are associated +// with the source volume are propagated to the snapshot. +// +// You can take a snapshot of an attached volume that is in use. However, snapshots +// only capture data that has been written to your EBS volume at the time the +// snapshot command is issued; this may exclude any data that has been cached +// by any applications or the operating system. If you can pause any file systems +// on the volume long enough to take a snapshot, your snapshot should be complete. +// However, if you cannot pause all file writes to the volume, you should unmount +// the volume from within the instance, issue the snapshot command, and then +// remount the volume to ensure a consistent and complete snapshot. You may +// remount and use your volume while the snapshot status is pending. +// +// To create a snapshot for EBS volumes that serve as root devices, you should +// stop the instance before taking the snapshot. +// +// Snapshots that are taken from encrypted volumes are automatically encrypted. +// Volumes that are created from encrypted snapshots are also automatically +// encrypted. Your encrypted volumes and any associated snapshots always remain +// protected. +// +// For more information, see Amazon Elastic Block Store (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/AmazonEBS.html) +// and Amazon EBS Encryption (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSEncryption.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) CreateSnapshot(input *CreateSnapshotInput) (*Snapshot, error) { + req, out := c.CreateSnapshotRequest(input) + err := req.Send() + return out, err +} + +const opCreateSpotDatafeedSubscription = "CreateSpotDatafeedSubscription" + +// CreateSpotDatafeedSubscriptionRequest generates a "aws/request.Request" representing the +// client's request for the CreateSpotDatafeedSubscription operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateSpotDatafeedSubscription method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateSpotDatafeedSubscriptionRequest method. +// req, resp := client.CreateSpotDatafeedSubscriptionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) CreateSpotDatafeedSubscriptionRequest(input *CreateSpotDatafeedSubscriptionInput) (req *request.Request, output *CreateSpotDatafeedSubscriptionOutput) { + op := &request.Operation{ + Name: opCreateSpotDatafeedSubscription, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateSpotDatafeedSubscriptionInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateSpotDatafeedSubscriptionOutput{} + req.Data = output + return +} + +// Creates a data feed for Spot instances, enabling you to view Spot instance +// usage logs. You can create one data feed per AWS account. For more information, +// see Spot Instance Data Feed (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-data-feeds.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) CreateSpotDatafeedSubscription(input *CreateSpotDatafeedSubscriptionInput) (*CreateSpotDatafeedSubscriptionOutput, error) { + req, out := c.CreateSpotDatafeedSubscriptionRequest(input) + err := req.Send() + return out, err +} + +const opCreateSubnet = "CreateSubnet" + +// CreateSubnetRequest generates a "aws/request.Request" representing the +// client's request for the CreateSubnet operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateSubnet method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateSubnetRequest method. +// req, resp := client.CreateSubnetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) CreateSubnetRequest(input *CreateSubnetInput) (req *request.Request, output *CreateSubnetOutput) { + op := &request.Operation{ + Name: opCreateSubnet, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateSubnetInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateSubnetOutput{} + req.Data = output + return +} + +// Creates a subnet in an existing VPC. +// +// When you create each subnet, you provide the VPC ID and the CIDR block you +// want for the subnet. After you create a subnet, you can't change its CIDR +// block. The subnet's CIDR block can be the same as the VPC's CIDR block (assuming +// you want only a single subnet in the VPC), or a subset of the VPC's CIDR +// block. If you create more than one subnet in a VPC, the subnets' CIDR blocks +// must not overlap. The smallest subnet (and VPC) you can create uses a /28 +// netmask (16 IP addresses), and the largest uses a /16 netmask (65,536 IP +// addresses). +// +// AWS reserves both the first four and the last IP address in each subnet's +// CIDR block. They're not available for use. +// +// If you add more than one subnet to a VPC, they're set up in a star topology +// with a logical router in the middle. +// +// If you launch an instance in a VPC using an Amazon EBS-backed AMI, the IP +// address doesn't change if you stop and restart the instance (unlike a similar +// instance launched outside a VPC, which gets a new IP address when restarted). +// It's therefore possible to have a subnet with no running instances (they're +// all stopped), but no remaining IP addresses available. +// +// For more information about subnets, see Your VPC and Subnets (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_Subnets.html) +// in the Amazon Virtual Private Cloud User Guide. +func (c *EC2) CreateSubnet(input *CreateSubnetInput) (*CreateSubnetOutput, error) { + req, out := c.CreateSubnetRequest(input) + err := req.Send() + return out, err +} + +const opCreateTags = "CreateTags" + +// CreateTagsRequest generates a "aws/request.Request" representing the +// client's request for the CreateTags operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateTags method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateTagsRequest method. +// req, resp := client.CreateTagsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) CreateTagsRequest(input *CreateTagsInput) (req *request.Request, output *CreateTagsOutput) { + op := &request.Operation{ + Name: opCreateTags, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateTagsInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &CreateTagsOutput{} + req.Data = output + return +} + +// Adds or overwrites one or more tags for the specified Amazon EC2 resource +// or resources. Each resource can have a maximum of 10 tags. Each tag consists +// of a key and optional value. Tag keys must be unique per resource. +// +// For more information about tags, see Tagging Your Resources (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html) +// in the Amazon Elastic Compute Cloud User Guide. For more information about +// creating IAM policies that control users' access to resources based on tags, +// see Supported Resource-Level Permissions for Amazon EC2 API Actions (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-supported-iam-actions-resources.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) CreateTags(input *CreateTagsInput) (*CreateTagsOutput, error) { + req, out := c.CreateTagsRequest(input) + err := req.Send() + return out, err +} + +const opCreateVolume = "CreateVolume" + +// CreateVolumeRequest generates a "aws/request.Request" representing the +// client's request for the CreateVolume operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateVolume method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateVolumeRequest method. +// req, resp := client.CreateVolumeRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) CreateVolumeRequest(input *CreateVolumeInput) (req *request.Request, output *Volume) { + op := &request.Operation{ + Name: opCreateVolume, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateVolumeInput{} + } + + req = c.newRequest(op, input, output) + output = &Volume{} + req.Data = output + return +} + +// Creates an EBS volume that can be attached to an instance in the same Availability +// Zone. The volume is created in the regional endpoint that you send the HTTP +// request to. For more information see Regions and Endpoints (http://docs.aws.amazon.com/general/latest/gr/rande.html). +// +// You can create a new empty volume or restore a volume from an EBS snapshot. +// Any AWS Marketplace product codes from the snapshot are propagated to the +// volume. +// +// You can create encrypted volumes with the Encrypted parameter. Encrypted +// volumes may only be attached to instances that support Amazon EBS encryption. +// Volumes that are created from encrypted snapshots are also automatically +// encrypted. For more information, see Amazon EBS Encryption (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSEncryption.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// For more information, see Creating or Restoring an Amazon EBS Volume (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-creating-volume.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) CreateVolume(input *CreateVolumeInput) (*Volume, error) { + req, out := c.CreateVolumeRequest(input) + err := req.Send() + return out, err +} + +const opCreateVpc = "CreateVpc" + +// CreateVpcRequest generates a "aws/request.Request" representing the +// client's request for the CreateVpc operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateVpc method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateVpcRequest method. +// req, resp := client.CreateVpcRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) CreateVpcRequest(input *CreateVpcInput) (req *request.Request, output *CreateVpcOutput) { + op := &request.Operation{ + Name: opCreateVpc, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateVpcInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateVpcOutput{} + req.Data = output + return +} + +// Creates a VPC with the specified CIDR block. +// +// The smallest VPC you can create uses a /28 netmask (16 IP addresses), and +// the largest uses a /16 netmask (65,536 IP addresses). To help you decide +// how big to make your VPC, see Your VPC and Subnets (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_Subnets.html) +// in the Amazon Virtual Private Cloud User Guide. +// +// By default, each instance you launch in the VPC has the default DHCP options, +// which includes only a default DNS server that we provide (AmazonProvidedDNS). +// For more information about DHCP options, see DHCP Options Sets (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_DHCP_Options.html) +// in the Amazon Virtual Private Cloud User Guide. +// +// You can specify the instance tenancy value for the VPC when you create it. +// You can't change this value for the VPC after you create it. For more information, +// see Dedicated Instances (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/dedicated-instance.html.html) +// in the Amazon Virtual Private Cloud User Guide. +func (c *EC2) CreateVpc(input *CreateVpcInput) (*CreateVpcOutput, error) { + req, out := c.CreateVpcRequest(input) + err := req.Send() + return out, err +} + +const opCreateVpcEndpoint = "CreateVpcEndpoint" + +// CreateVpcEndpointRequest generates a "aws/request.Request" representing the +// client's request for the CreateVpcEndpoint operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateVpcEndpoint method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateVpcEndpointRequest method. +// req, resp := client.CreateVpcEndpointRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) CreateVpcEndpointRequest(input *CreateVpcEndpointInput) (req *request.Request, output *CreateVpcEndpointOutput) { + op := &request.Operation{ + Name: opCreateVpcEndpoint, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateVpcEndpointInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateVpcEndpointOutput{} + req.Data = output + return +} + +// Creates a VPC endpoint for a specified AWS service. An endpoint enables you +// to create a private connection between your VPC and another AWS service in +// your account. You can specify an endpoint policy to attach to the endpoint +// that will control access to the service from your VPC. You can also specify +// the VPC route tables that use the endpoint. +// +// Currently, only endpoints to Amazon S3 are supported. +func (c *EC2) CreateVpcEndpoint(input *CreateVpcEndpointInput) (*CreateVpcEndpointOutput, error) { + req, out := c.CreateVpcEndpointRequest(input) + err := req.Send() + return out, err +} + +const opCreateVpcPeeringConnection = "CreateVpcPeeringConnection" + +// CreateVpcPeeringConnectionRequest generates a "aws/request.Request" representing the +// client's request for the CreateVpcPeeringConnection operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateVpcPeeringConnection method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateVpcPeeringConnectionRequest method. +// req, resp := client.CreateVpcPeeringConnectionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) CreateVpcPeeringConnectionRequest(input *CreateVpcPeeringConnectionInput) (req *request.Request, output *CreateVpcPeeringConnectionOutput) { + op := &request.Operation{ + Name: opCreateVpcPeeringConnection, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateVpcPeeringConnectionInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateVpcPeeringConnectionOutput{} + req.Data = output + return +} + +// Requests a VPC peering connection between two VPCs: a requester VPC that +// you own and a peer VPC with which to create the connection. The peer VPC +// can belong to another AWS account. The requester VPC and peer VPC cannot +// have overlapping CIDR blocks. +// +// The owner of the peer VPC must accept the peering request to activate the +// peering connection. The VPC peering connection request expires after 7 days, +// after which it cannot be accepted or rejected. +// +// A CreateVpcPeeringConnection request between VPCs with overlapping CIDR +// blocks results in the VPC peering connection having a status of failed. +func (c *EC2) CreateVpcPeeringConnection(input *CreateVpcPeeringConnectionInput) (*CreateVpcPeeringConnectionOutput, error) { + req, out := c.CreateVpcPeeringConnectionRequest(input) + err := req.Send() + return out, err +} + +const opCreateVpnConnection = "CreateVpnConnection" + +// CreateVpnConnectionRequest generates a "aws/request.Request" representing the +// client's request for the CreateVpnConnection operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateVpnConnection method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateVpnConnectionRequest method. +// req, resp := client.CreateVpnConnectionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) CreateVpnConnectionRequest(input *CreateVpnConnectionInput) (req *request.Request, output *CreateVpnConnectionOutput) { + op := &request.Operation{ + Name: opCreateVpnConnection, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateVpnConnectionInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateVpnConnectionOutput{} + req.Data = output + return +} + +// Creates a VPN connection between an existing virtual private gateway and +// a VPN customer gateway. The only supported connection type is ipsec.1. +// +// The response includes information that you need to give to your network +// administrator to configure your customer gateway. +// +// We strongly recommend that you use HTTPS when calling this operation because +// the response contains sensitive cryptographic information for configuring +// your customer gateway. +// +// If you decide to shut down your VPN connection for any reason and later +// create a new VPN connection, you must reconfigure your customer gateway with +// the new information returned from this call. +// +// This is an idempotent operation. If you perform the operation more than +// once, Amazon EC2 doesn't return an error. +// +// For more information about VPN connections, see Adding a Hardware Virtual +// Private Gateway to Your VPC (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_VPN.html) +// in the Amazon Virtual Private Cloud User Guide. +func (c *EC2) CreateVpnConnection(input *CreateVpnConnectionInput) (*CreateVpnConnectionOutput, error) { + req, out := c.CreateVpnConnectionRequest(input) + err := req.Send() + return out, err +} + +const opCreateVpnConnectionRoute = "CreateVpnConnectionRoute" + +// CreateVpnConnectionRouteRequest generates a "aws/request.Request" representing the +// client's request for the CreateVpnConnectionRoute operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateVpnConnectionRoute method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateVpnConnectionRouteRequest method. +// req, resp := client.CreateVpnConnectionRouteRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) CreateVpnConnectionRouteRequest(input *CreateVpnConnectionRouteInput) (req *request.Request, output *CreateVpnConnectionRouteOutput) { + op := &request.Operation{ + Name: opCreateVpnConnectionRoute, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateVpnConnectionRouteInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &CreateVpnConnectionRouteOutput{} + req.Data = output + return +} + +// Creates a static route associated with a VPN connection between an existing +// virtual private gateway and a VPN customer gateway. The static route allows +// traffic to be routed from the virtual private gateway to the VPN customer +// gateway. +// +// For more information about VPN connections, see Adding a Hardware Virtual +// Private Gateway to Your VPC (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_VPN.html) +// in the Amazon Virtual Private Cloud User Guide. +func (c *EC2) CreateVpnConnectionRoute(input *CreateVpnConnectionRouteInput) (*CreateVpnConnectionRouteOutput, error) { + req, out := c.CreateVpnConnectionRouteRequest(input) + err := req.Send() + return out, err +} + +const opCreateVpnGateway = "CreateVpnGateway" + +// CreateVpnGatewayRequest generates a "aws/request.Request" representing the +// client's request for the CreateVpnGateway operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateVpnGateway method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateVpnGatewayRequest method. +// req, resp := client.CreateVpnGatewayRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) CreateVpnGatewayRequest(input *CreateVpnGatewayInput) (req *request.Request, output *CreateVpnGatewayOutput) { + op := &request.Operation{ + Name: opCreateVpnGateway, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateVpnGatewayInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateVpnGatewayOutput{} + req.Data = output + return +} + +// Creates a virtual private gateway. A virtual private gateway is the endpoint +// on the VPC side of your VPN connection. You can create a virtual private +// gateway before creating the VPC itself. +// +// For more information about virtual private gateways, see Adding a Hardware +// Virtual Private Gateway to Your VPC (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_VPN.html) +// in the Amazon Virtual Private Cloud User Guide. +func (c *EC2) CreateVpnGateway(input *CreateVpnGatewayInput) (*CreateVpnGatewayOutput, error) { + req, out := c.CreateVpnGatewayRequest(input) + err := req.Send() + return out, err +} + +const opDeleteCustomerGateway = "DeleteCustomerGateway" + +// DeleteCustomerGatewayRequest generates a "aws/request.Request" representing the +// client's request for the DeleteCustomerGateway operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteCustomerGateway method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteCustomerGatewayRequest method. +// req, resp := client.DeleteCustomerGatewayRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) DeleteCustomerGatewayRequest(input *DeleteCustomerGatewayInput) (req *request.Request, output *DeleteCustomerGatewayOutput) { + op := &request.Operation{ + Name: opDeleteCustomerGateway, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteCustomerGatewayInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteCustomerGatewayOutput{} + req.Data = output + return +} + +// Deletes the specified customer gateway. You must delete the VPN connection +// before you can delete the customer gateway. +func (c *EC2) DeleteCustomerGateway(input *DeleteCustomerGatewayInput) (*DeleteCustomerGatewayOutput, error) { + req, out := c.DeleteCustomerGatewayRequest(input) + err := req.Send() + return out, err +} + +const opDeleteDhcpOptions = "DeleteDhcpOptions" + +// DeleteDhcpOptionsRequest generates a "aws/request.Request" representing the +// client's request for the DeleteDhcpOptions operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteDhcpOptions method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteDhcpOptionsRequest method. +// req, resp := client.DeleteDhcpOptionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) DeleteDhcpOptionsRequest(input *DeleteDhcpOptionsInput) (req *request.Request, output *DeleteDhcpOptionsOutput) { + op := &request.Operation{ + Name: opDeleteDhcpOptions, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteDhcpOptionsInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteDhcpOptionsOutput{} + req.Data = output + return +} + +// Deletes the specified set of DHCP options. You must disassociate the set +// of DHCP options before you can delete it. You can disassociate the set of +// DHCP options by associating either a new set of options or the default set +// of options with the VPC. +func (c *EC2) DeleteDhcpOptions(input *DeleteDhcpOptionsInput) (*DeleteDhcpOptionsOutput, error) { + req, out := c.DeleteDhcpOptionsRequest(input) + err := req.Send() + return out, err +} + +const opDeleteFlowLogs = "DeleteFlowLogs" + +// DeleteFlowLogsRequest generates a "aws/request.Request" representing the +// client's request for the DeleteFlowLogs operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteFlowLogs method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteFlowLogsRequest method. +// req, resp := client.DeleteFlowLogsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) DeleteFlowLogsRequest(input *DeleteFlowLogsInput) (req *request.Request, output *DeleteFlowLogsOutput) { + op := &request.Operation{ + Name: opDeleteFlowLogs, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteFlowLogsInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteFlowLogsOutput{} + req.Data = output + return +} + +// Deletes one or more flow logs. +func (c *EC2) DeleteFlowLogs(input *DeleteFlowLogsInput) (*DeleteFlowLogsOutput, error) { + req, out := c.DeleteFlowLogsRequest(input) + err := req.Send() + return out, err +} + +const opDeleteInternetGateway = "DeleteInternetGateway" + +// DeleteInternetGatewayRequest generates a "aws/request.Request" representing the +// client's request for the DeleteInternetGateway operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteInternetGateway method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteInternetGatewayRequest method. +// req, resp := client.DeleteInternetGatewayRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) DeleteInternetGatewayRequest(input *DeleteInternetGatewayInput) (req *request.Request, output *DeleteInternetGatewayOutput) { + op := &request.Operation{ + Name: opDeleteInternetGateway, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteInternetGatewayInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteInternetGatewayOutput{} + req.Data = output + return +} + +// Deletes the specified Internet gateway. You must detach the Internet gateway +// from the VPC before you can delete it. +func (c *EC2) DeleteInternetGateway(input *DeleteInternetGatewayInput) (*DeleteInternetGatewayOutput, error) { + req, out := c.DeleteInternetGatewayRequest(input) + err := req.Send() + return out, err +} + +const opDeleteKeyPair = "DeleteKeyPair" + +// DeleteKeyPairRequest generates a "aws/request.Request" representing the +// client's request for the DeleteKeyPair operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteKeyPair method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteKeyPairRequest method. +// req, resp := client.DeleteKeyPairRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) DeleteKeyPairRequest(input *DeleteKeyPairInput) (req *request.Request, output *DeleteKeyPairOutput) { + op := &request.Operation{ + Name: opDeleteKeyPair, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteKeyPairInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteKeyPairOutput{} + req.Data = output + return +} + +// Deletes the specified key pair, by removing the public key from Amazon EC2. +func (c *EC2) DeleteKeyPair(input *DeleteKeyPairInput) (*DeleteKeyPairOutput, error) { + req, out := c.DeleteKeyPairRequest(input) + err := req.Send() + return out, err +} + +const opDeleteNatGateway = "DeleteNatGateway" + +// DeleteNatGatewayRequest generates a "aws/request.Request" representing the +// client's request for the DeleteNatGateway operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteNatGateway method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteNatGatewayRequest method. +// req, resp := client.DeleteNatGatewayRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) DeleteNatGatewayRequest(input *DeleteNatGatewayInput) (req *request.Request, output *DeleteNatGatewayOutput) { + op := &request.Operation{ + Name: opDeleteNatGateway, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteNatGatewayInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteNatGatewayOutput{} + req.Data = output + return +} + +// Deletes the specified NAT gateway. Deleting a NAT gateway disassociates its +// Elastic IP address, but does not release the address from your account. Deleting +// a NAT gateway does not delete any NAT gateway routes in your route tables. +func (c *EC2) DeleteNatGateway(input *DeleteNatGatewayInput) (*DeleteNatGatewayOutput, error) { + req, out := c.DeleteNatGatewayRequest(input) + err := req.Send() + return out, err +} + +const opDeleteNetworkAcl = "DeleteNetworkAcl" + +// DeleteNetworkAclRequest generates a "aws/request.Request" representing the +// client's request for the DeleteNetworkAcl operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteNetworkAcl method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteNetworkAclRequest method. +// req, resp := client.DeleteNetworkAclRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) DeleteNetworkAclRequest(input *DeleteNetworkAclInput) (req *request.Request, output *DeleteNetworkAclOutput) { + op := &request.Operation{ + Name: opDeleteNetworkAcl, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteNetworkAclInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteNetworkAclOutput{} + req.Data = output + return +} + +// Deletes the specified network ACL. You can't delete the ACL if it's associated +// with any subnets. You can't delete the default network ACL. +func (c *EC2) DeleteNetworkAcl(input *DeleteNetworkAclInput) (*DeleteNetworkAclOutput, error) { + req, out := c.DeleteNetworkAclRequest(input) + err := req.Send() + return out, err +} + +const opDeleteNetworkAclEntry = "DeleteNetworkAclEntry" + +// DeleteNetworkAclEntryRequest generates a "aws/request.Request" representing the +// client's request for the DeleteNetworkAclEntry operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteNetworkAclEntry method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteNetworkAclEntryRequest method. +// req, resp := client.DeleteNetworkAclEntryRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) DeleteNetworkAclEntryRequest(input *DeleteNetworkAclEntryInput) (req *request.Request, output *DeleteNetworkAclEntryOutput) { + op := &request.Operation{ + Name: opDeleteNetworkAclEntry, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteNetworkAclEntryInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteNetworkAclEntryOutput{} + req.Data = output + return +} + +// Deletes the specified ingress or egress entry (rule) from the specified network +// ACL. +func (c *EC2) DeleteNetworkAclEntry(input *DeleteNetworkAclEntryInput) (*DeleteNetworkAclEntryOutput, error) { + req, out := c.DeleteNetworkAclEntryRequest(input) + err := req.Send() + return out, err +} + +const opDeleteNetworkInterface = "DeleteNetworkInterface" + +// DeleteNetworkInterfaceRequest generates a "aws/request.Request" representing the +// client's request for the DeleteNetworkInterface operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteNetworkInterface method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteNetworkInterfaceRequest method. +// req, resp := client.DeleteNetworkInterfaceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) DeleteNetworkInterfaceRequest(input *DeleteNetworkInterfaceInput) (req *request.Request, output *DeleteNetworkInterfaceOutput) { + op := &request.Operation{ + Name: opDeleteNetworkInterface, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteNetworkInterfaceInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteNetworkInterfaceOutput{} + req.Data = output + return +} + +// Deletes the specified network interface. You must detach the network interface +// before you can delete it. +func (c *EC2) DeleteNetworkInterface(input *DeleteNetworkInterfaceInput) (*DeleteNetworkInterfaceOutput, error) { + req, out := c.DeleteNetworkInterfaceRequest(input) + err := req.Send() + return out, err +} + +const opDeletePlacementGroup = "DeletePlacementGroup" + +// DeletePlacementGroupRequest generates a "aws/request.Request" representing the +// client's request for the DeletePlacementGroup operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeletePlacementGroup method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeletePlacementGroupRequest method. +// req, resp := client.DeletePlacementGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) DeletePlacementGroupRequest(input *DeletePlacementGroupInput) (req *request.Request, output *DeletePlacementGroupOutput) { + op := &request.Operation{ + Name: opDeletePlacementGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeletePlacementGroupInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeletePlacementGroupOutput{} + req.Data = output + return +} + +// Deletes the specified placement group. You must terminate all instances in +// the placement group before you can delete the placement group. For more information +// about placement groups and cluster instances, see Cluster Instances (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using_cluster_computing.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) DeletePlacementGroup(input *DeletePlacementGroupInput) (*DeletePlacementGroupOutput, error) { + req, out := c.DeletePlacementGroupRequest(input) + err := req.Send() + return out, err +} + +const opDeleteRoute = "DeleteRoute" + +// DeleteRouteRequest generates a "aws/request.Request" representing the +// client's request for the DeleteRoute operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteRoute method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteRouteRequest method. +// req, resp := client.DeleteRouteRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) DeleteRouteRequest(input *DeleteRouteInput) (req *request.Request, output *DeleteRouteOutput) { + op := &request.Operation{ + Name: opDeleteRoute, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteRouteInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteRouteOutput{} + req.Data = output + return +} + +// Deletes the specified route from the specified route table. +func (c *EC2) DeleteRoute(input *DeleteRouteInput) (*DeleteRouteOutput, error) { + req, out := c.DeleteRouteRequest(input) + err := req.Send() + return out, err +} + +const opDeleteRouteTable = "DeleteRouteTable" + +// DeleteRouteTableRequest generates a "aws/request.Request" representing the +// client's request for the DeleteRouteTable operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteRouteTable method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteRouteTableRequest method. +// req, resp := client.DeleteRouteTableRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) DeleteRouteTableRequest(input *DeleteRouteTableInput) (req *request.Request, output *DeleteRouteTableOutput) { + op := &request.Operation{ + Name: opDeleteRouteTable, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteRouteTableInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteRouteTableOutput{} + req.Data = output + return +} + +// Deletes the specified route table. You must disassociate the route table +// from any subnets before you can delete it. You can't delete the main route +// table. +func (c *EC2) DeleteRouteTable(input *DeleteRouteTableInput) (*DeleteRouteTableOutput, error) { + req, out := c.DeleteRouteTableRequest(input) + err := req.Send() + return out, err +} + +const opDeleteSecurityGroup = "DeleteSecurityGroup" + +// DeleteSecurityGroupRequest generates a "aws/request.Request" representing the +// client's request for the DeleteSecurityGroup operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteSecurityGroup method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteSecurityGroupRequest method. +// req, resp := client.DeleteSecurityGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) DeleteSecurityGroupRequest(input *DeleteSecurityGroupInput) (req *request.Request, output *DeleteSecurityGroupOutput) { + op := &request.Operation{ + Name: opDeleteSecurityGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteSecurityGroupInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteSecurityGroupOutput{} + req.Data = output + return +} + +// Deletes a security group. +// +// If you attempt to delete a security group that is associated with an instance, +// or is referenced by another security group, the operation fails with InvalidGroup.InUse +// in EC2-Classic or DependencyViolation in EC2-VPC. +func (c *EC2) DeleteSecurityGroup(input *DeleteSecurityGroupInput) (*DeleteSecurityGroupOutput, error) { + req, out := c.DeleteSecurityGroupRequest(input) + err := req.Send() + return out, err +} + +const opDeleteSnapshot = "DeleteSnapshot" + +// DeleteSnapshotRequest generates a "aws/request.Request" representing the +// client's request for the DeleteSnapshot operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteSnapshot method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteSnapshotRequest method. +// req, resp := client.DeleteSnapshotRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) DeleteSnapshotRequest(input *DeleteSnapshotInput) (req *request.Request, output *DeleteSnapshotOutput) { + op := &request.Operation{ + Name: opDeleteSnapshot, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteSnapshotInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteSnapshotOutput{} + req.Data = output + return +} + +// Deletes the specified snapshot. +// +// When you make periodic snapshots of a volume, the snapshots are incremental, +// and only the blocks on the device that have changed since your last snapshot +// are saved in the new snapshot. When you delete a snapshot, only the data +// not needed for any other snapshot is removed. So regardless of which prior +// snapshots have been deleted, all active snapshots will have access to all +// the information needed to restore the volume. +// +// You cannot delete a snapshot of the root device of an EBS volume used by +// a registered AMI. You must first de-register the AMI before you can delete +// the snapshot. +// +// For more information, see Deleting an Amazon EBS Snapshot (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-deleting-snapshot.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) DeleteSnapshot(input *DeleteSnapshotInput) (*DeleteSnapshotOutput, error) { + req, out := c.DeleteSnapshotRequest(input) + err := req.Send() + return out, err +} + +const opDeleteSpotDatafeedSubscription = "DeleteSpotDatafeedSubscription" + +// DeleteSpotDatafeedSubscriptionRequest generates a "aws/request.Request" representing the +// client's request for the DeleteSpotDatafeedSubscription operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteSpotDatafeedSubscription method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteSpotDatafeedSubscriptionRequest method. +// req, resp := client.DeleteSpotDatafeedSubscriptionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) DeleteSpotDatafeedSubscriptionRequest(input *DeleteSpotDatafeedSubscriptionInput) (req *request.Request, output *DeleteSpotDatafeedSubscriptionOutput) { + op := &request.Operation{ + Name: opDeleteSpotDatafeedSubscription, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteSpotDatafeedSubscriptionInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteSpotDatafeedSubscriptionOutput{} + req.Data = output + return +} + +// Deletes the data feed for Spot instances. +func (c *EC2) DeleteSpotDatafeedSubscription(input *DeleteSpotDatafeedSubscriptionInput) (*DeleteSpotDatafeedSubscriptionOutput, error) { + req, out := c.DeleteSpotDatafeedSubscriptionRequest(input) + err := req.Send() + return out, err +} + +const opDeleteSubnet = "DeleteSubnet" + +// DeleteSubnetRequest generates a "aws/request.Request" representing the +// client's request for the DeleteSubnet operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteSubnet method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteSubnetRequest method. +// req, resp := client.DeleteSubnetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) DeleteSubnetRequest(input *DeleteSubnetInput) (req *request.Request, output *DeleteSubnetOutput) { + op := &request.Operation{ + Name: opDeleteSubnet, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteSubnetInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteSubnetOutput{} + req.Data = output + return +} + +// Deletes the specified subnet. You must terminate all running instances in +// the subnet before you can delete the subnet. +func (c *EC2) DeleteSubnet(input *DeleteSubnetInput) (*DeleteSubnetOutput, error) { + req, out := c.DeleteSubnetRequest(input) + err := req.Send() + return out, err +} + +const opDeleteTags = "DeleteTags" + +// DeleteTagsRequest generates a "aws/request.Request" representing the +// client's request for the DeleteTags operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteTags method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteTagsRequest method. +// req, resp := client.DeleteTagsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) DeleteTagsRequest(input *DeleteTagsInput) (req *request.Request, output *DeleteTagsOutput) { + op := &request.Operation{ + Name: opDeleteTags, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteTagsInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteTagsOutput{} + req.Data = output + return +} + +// Deletes the specified set of tags from the specified set of resources. This +// call is designed to follow a DescribeTags request. +// +// For more information about tags, see Tagging Your Resources (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) DeleteTags(input *DeleteTagsInput) (*DeleteTagsOutput, error) { + req, out := c.DeleteTagsRequest(input) + err := req.Send() + return out, err +} + +const opDeleteVolume = "DeleteVolume" + +// DeleteVolumeRequest generates a "aws/request.Request" representing the +// client's request for the DeleteVolume operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteVolume method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteVolumeRequest method. +// req, resp := client.DeleteVolumeRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) DeleteVolumeRequest(input *DeleteVolumeInput) (req *request.Request, output *DeleteVolumeOutput) { + op := &request.Operation{ + Name: opDeleteVolume, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteVolumeInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteVolumeOutput{} + req.Data = output + return +} + +// Deletes the specified EBS volume. The volume must be in the available state +// (not attached to an instance). +// +// The volume may remain in the deleting state for several minutes. +// +// For more information, see Deleting an Amazon EBS Volume (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-deleting-volume.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) DeleteVolume(input *DeleteVolumeInput) (*DeleteVolumeOutput, error) { + req, out := c.DeleteVolumeRequest(input) + err := req.Send() + return out, err +} + +const opDeleteVpc = "DeleteVpc" + +// DeleteVpcRequest generates a "aws/request.Request" representing the +// client's request for the DeleteVpc operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteVpc method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteVpcRequest method. +// req, resp := client.DeleteVpcRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) DeleteVpcRequest(input *DeleteVpcInput) (req *request.Request, output *DeleteVpcOutput) { + op := &request.Operation{ + Name: opDeleteVpc, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteVpcInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteVpcOutput{} + req.Data = output + return +} + +// Deletes the specified VPC. You must detach or delete all gateways and resources +// that are associated with the VPC before you can delete it. For example, you +// must terminate all instances running in the VPC, delete all security groups +// associated with the VPC (except the default one), delete all route tables +// associated with the VPC (except the default one), and so on. +func (c *EC2) DeleteVpc(input *DeleteVpcInput) (*DeleteVpcOutput, error) { + req, out := c.DeleteVpcRequest(input) + err := req.Send() + return out, err +} + +const opDeleteVpcEndpoints = "DeleteVpcEndpoints" + +// DeleteVpcEndpointsRequest generates a "aws/request.Request" representing the +// client's request for the DeleteVpcEndpoints operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteVpcEndpoints method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteVpcEndpointsRequest method. +// req, resp := client.DeleteVpcEndpointsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) DeleteVpcEndpointsRequest(input *DeleteVpcEndpointsInput) (req *request.Request, output *DeleteVpcEndpointsOutput) { + op := &request.Operation{ + Name: opDeleteVpcEndpoints, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteVpcEndpointsInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteVpcEndpointsOutput{} + req.Data = output + return +} + +// Deletes one or more specified VPC endpoints. Deleting the endpoint also deletes +// the endpoint routes in the route tables that were associated with the endpoint. +func (c *EC2) DeleteVpcEndpoints(input *DeleteVpcEndpointsInput) (*DeleteVpcEndpointsOutput, error) { + req, out := c.DeleteVpcEndpointsRequest(input) + err := req.Send() + return out, err +} + +const opDeleteVpcPeeringConnection = "DeleteVpcPeeringConnection" + +// DeleteVpcPeeringConnectionRequest generates a "aws/request.Request" representing the +// client's request for the DeleteVpcPeeringConnection operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteVpcPeeringConnection method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteVpcPeeringConnectionRequest method. +// req, resp := client.DeleteVpcPeeringConnectionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) DeleteVpcPeeringConnectionRequest(input *DeleteVpcPeeringConnectionInput) (req *request.Request, output *DeleteVpcPeeringConnectionOutput) { + op := &request.Operation{ + Name: opDeleteVpcPeeringConnection, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteVpcPeeringConnectionInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteVpcPeeringConnectionOutput{} + req.Data = output + return +} + +// Deletes a VPC peering connection. Either the owner of the requester VPC or +// the owner of the peer VPC can delete the VPC peering connection if it's in +// the active state. The owner of the requester VPC can delete a VPC peering +// connection in the pending-acceptance state. +func (c *EC2) DeleteVpcPeeringConnection(input *DeleteVpcPeeringConnectionInput) (*DeleteVpcPeeringConnectionOutput, error) { + req, out := c.DeleteVpcPeeringConnectionRequest(input) + err := req.Send() + return out, err +} + +const opDeleteVpnConnection = "DeleteVpnConnection" + +// DeleteVpnConnectionRequest generates a "aws/request.Request" representing the +// client's request for the DeleteVpnConnection operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteVpnConnection method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteVpnConnectionRequest method. +// req, resp := client.DeleteVpnConnectionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) DeleteVpnConnectionRequest(input *DeleteVpnConnectionInput) (req *request.Request, output *DeleteVpnConnectionOutput) { + op := &request.Operation{ + Name: opDeleteVpnConnection, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteVpnConnectionInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteVpnConnectionOutput{} + req.Data = output + return +} + +// Deletes the specified VPN connection. +// +// If you're deleting the VPC and its associated components, we recommend that +// you detach the virtual private gateway from the VPC and delete the VPC before +// deleting the VPN connection. If you believe that the tunnel credentials for +// your VPN connection have been compromised, you can delete the VPN connection +// and create a new one that has new keys, without needing to delete the VPC +// or virtual private gateway. If you create a new VPN connection, you must +// reconfigure the customer gateway using the new configuration information +// returned with the new VPN connection ID. +func (c *EC2) DeleteVpnConnection(input *DeleteVpnConnectionInput) (*DeleteVpnConnectionOutput, error) { + req, out := c.DeleteVpnConnectionRequest(input) + err := req.Send() + return out, err +} + +const opDeleteVpnConnectionRoute = "DeleteVpnConnectionRoute" + +// DeleteVpnConnectionRouteRequest generates a "aws/request.Request" representing the +// client's request for the DeleteVpnConnectionRoute operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteVpnConnectionRoute method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteVpnConnectionRouteRequest method. +// req, resp := client.DeleteVpnConnectionRouteRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) DeleteVpnConnectionRouteRequest(input *DeleteVpnConnectionRouteInput) (req *request.Request, output *DeleteVpnConnectionRouteOutput) { + op := &request.Operation{ + Name: opDeleteVpnConnectionRoute, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteVpnConnectionRouteInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteVpnConnectionRouteOutput{} + req.Data = output + return +} + +// Deletes the specified static route associated with a VPN connection between +// an existing virtual private gateway and a VPN customer gateway. The static +// route allows traffic to be routed from the virtual private gateway to the +// VPN customer gateway. +func (c *EC2) DeleteVpnConnectionRoute(input *DeleteVpnConnectionRouteInput) (*DeleteVpnConnectionRouteOutput, error) { + req, out := c.DeleteVpnConnectionRouteRequest(input) + err := req.Send() + return out, err +} + +const opDeleteVpnGateway = "DeleteVpnGateway" + +// DeleteVpnGatewayRequest generates a "aws/request.Request" representing the +// client's request for the DeleteVpnGateway operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteVpnGateway method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteVpnGatewayRequest method. +// req, resp := client.DeleteVpnGatewayRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) DeleteVpnGatewayRequest(input *DeleteVpnGatewayInput) (req *request.Request, output *DeleteVpnGatewayOutput) { + op := &request.Operation{ + Name: opDeleteVpnGateway, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteVpnGatewayInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteVpnGatewayOutput{} + req.Data = output + return +} + +// Deletes the specified virtual private gateway. We recommend that before you +// delete a virtual private gateway, you detach it from the VPC and delete the +// VPN connection. Note that you don't need to delete the virtual private gateway +// if you plan to delete and recreate the VPN connection between your VPC and +// your network. +func (c *EC2) DeleteVpnGateway(input *DeleteVpnGatewayInput) (*DeleteVpnGatewayOutput, error) { + req, out := c.DeleteVpnGatewayRequest(input) + err := req.Send() + return out, err +} + +const opDeregisterImage = "DeregisterImage" + +// DeregisterImageRequest generates a "aws/request.Request" representing the +// client's request for the DeregisterImage operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeregisterImage method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeregisterImageRequest method. +// req, resp := client.DeregisterImageRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) DeregisterImageRequest(input *DeregisterImageInput) (req *request.Request, output *DeregisterImageOutput) { + op := &request.Operation{ + Name: opDeregisterImage, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeregisterImageInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeregisterImageOutput{} + req.Data = output + return +} + +// Deregisters the specified AMI. After you deregister an AMI, it can't be used +// to launch new instances. +// +// This command does not delete the AMI. +func (c *EC2) DeregisterImage(input *DeregisterImageInput) (*DeregisterImageOutput, error) { + req, out := c.DeregisterImageRequest(input) + err := req.Send() + return out, err +} + +const opDescribeAccountAttributes = "DescribeAccountAttributes" + +// DescribeAccountAttributesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeAccountAttributes operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeAccountAttributes method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeAccountAttributesRequest method. +// req, resp := client.DescribeAccountAttributesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) DescribeAccountAttributesRequest(input *DescribeAccountAttributesInput) (req *request.Request, output *DescribeAccountAttributesOutput) { + op := &request.Operation{ + Name: opDescribeAccountAttributes, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeAccountAttributesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeAccountAttributesOutput{} + req.Data = output + return +} + +// Describes attributes of your AWS account. The following are the supported +// account attributes: +// +// supported-platforms: Indicates whether your account can launch instances +// into EC2-Classic and EC2-VPC, or only into EC2-VPC. +// +// default-vpc: The ID of the default VPC for your account, or none. +// +// max-instances: The maximum number of On-Demand instances that you can +// run. +// +// vpc-max-security-groups-per-interface: The maximum number of security +// groups that you can assign to a network interface. +// +// max-elastic-ips: The maximum number of Elastic IP addresses that you +// can allocate for use with EC2-Classic. +// +// vpc-max-elastic-ips: The maximum number of Elastic IP addresses that +// you can allocate for use with EC2-VPC. +func (c *EC2) DescribeAccountAttributes(input *DescribeAccountAttributesInput) (*DescribeAccountAttributesOutput, error) { + req, out := c.DescribeAccountAttributesRequest(input) + err := req.Send() + return out, err +} + +const opDescribeAddresses = "DescribeAddresses" + +// DescribeAddressesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeAddresses operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeAddresses method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeAddressesRequest method. +// req, resp := client.DescribeAddressesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) DescribeAddressesRequest(input *DescribeAddressesInput) (req *request.Request, output *DescribeAddressesOutput) { + op := &request.Operation{ + Name: opDescribeAddresses, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeAddressesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeAddressesOutput{} + req.Data = output + return +} + +// Describes one or more of your Elastic IP addresses. +// +// An Elastic IP address is for use in either the EC2-Classic platform or in +// a VPC. For more information, see Elastic IP Addresses (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/elastic-ip-addresses-eip.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) DescribeAddresses(input *DescribeAddressesInput) (*DescribeAddressesOutput, error) { + req, out := c.DescribeAddressesRequest(input) + err := req.Send() + return out, err +} + +const opDescribeAvailabilityZones = "DescribeAvailabilityZones" + +// DescribeAvailabilityZonesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeAvailabilityZones operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeAvailabilityZones method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeAvailabilityZonesRequest method. +// req, resp := client.DescribeAvailabilityZonesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) DescribeAvailabilityZonesRequest(input *DescribeAvailabilityZonesInput) (req *request.Request, output *DescribeAvailabilityZonesOutput) { + op := &request.Operation{ + Name: opDescribeAvailabilityZones, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeAvailabilityZonesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeAvailabilityZonesOutput{} + req.Data = output + return +} + +// Describes one or more of the Availability Zones that are available to you. +// The results include zones only for the region you're currently using. If +// there is an event impacting an Availability Zone, you can use this request +// to view the state and any provided message for that Availability Zone. +// +// For more information, see Regions and Availability Zones (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) DescribeAvailabilityZones(input *DescribeAvailabilityZonesInput) (*DescribeAvailabilityZonesOutput, error) { + req, out := c.DescribeAvailabilityZonesRequest(input) + err := req.Send() + return out, err +} + +const opDescribeBundleTasks = "DescribeBundleTasks" + +// DescribeBundleTasksRequest generates a "aws/request.Request" representing the +// client's request for the DescribeBundleTasks operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeBundleTasks method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeBundleTasksRequest method. +// req, resp := client.DescribeBundleTasksRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) DescribeBundleTasksRequest(input *DescribeBundleTasksInput) (req *request.Request, output *DescribeBundleTasksOutput) { + op := &request.Operation{ + Name: opDescribeBundleTasks, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeBundleTasksInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeBundleTasksOutput{} + req.Data = output + return +} + +// Describes one or more of your bundling tasks. +// +// Completed bundle tasks are listed for only a limited time. If your bundle +// task is no longer in the list, you can still register an AMI from it. Just +// use RegisterImage with the Amazon S3 bucket name and image manifest name +// you provided to the bundle task. +func (c *EC2) DescribeBundleTasks(input *DescribeBundleTasksInput) (*DescribeBundleTasksOutput, error) { + req, out := c.DescribeBundleTasksRequest(input) + err := req.Send() + return out, err +} + +const opDescribeClassicLinkInstances = "DescribeClassicLinkInstances" + +// DescribeClassicLinkInstancesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeClassicLinkInstances operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeClassicLinkInstances method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeClassicLinkInstancesRequest method. +// req, resp := client.DescribeClassicLinkInstancesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) DescribeClassicLinkInstancesRequest(input *DescribeClassicLinkInstancesInput) (req *request.Request, output *DescribeClassicLinkInstancesOutput) { + op := &request.Operation{ + Name: opDescribeClassicLinkInstances, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeClassicLinkInstancesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeClassicLinkInstancesOutput{} + req.Data = output + return +} + +// Describes one or more of your linked EC2-Classic instances. This request +// only returns information about EC2-Classic instances linked to a VPC through +// ClassicLink; you cannot use this request to return information about other +// instances. +func (c *EC2) DescribeClassicLinkInstances(input *DescribeClassicLinkInstancesInput) (*DescribeClassicLinkInstancesOutput, error) { + req, out := c.DescribeClassicLinkInstancesRequest(input) + err := req.Send() + return out, err +} + +const opDescribeConversionTasks = "DescribeConversionTasks" + +// DescribeConversionTasksRequest generates a "aws/request.Request" representing the +// client's request for the DescribeConversionTasks operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeConversionTasks method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeConversionTasksRequest method. +// req, resp := client.DescribeConversionTasksRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) DescribeConversionTasksRequest(input *DescribeConversionTasksInput) (req *request.Request, output *DescribeConversionTasksOutput) { + op := &request.Operation{ + Name: opDescribeConversionTasks, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeConversionTasksInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeConversionTasksOutput{} + req.Data = output + return +} + +// Describes one or more of your conversion tasks. For more information, see +// Using the Command Line Tools to Import Your Virtual Machine to Amazon EC2 +// (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/UploadingYourInstancesandVolumes.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// For information about the import manifest referenced by this API action, +// see VM Import Manifest (http://docs.aws.amazon.com/AWSEC2/latest/APIReference/manifest.html). +func (c *EC2) DescribeConversionTasks(input *DescribeConversionTasksInput) (*DescribeConversionTasksOutput, error) { + req, out := c.DescribeConversionTasksRequest(input) + err := req.Send() + return out, err +} + +const opDescribeCustomerGateways = "DescribeCustomerGateways" + +// DescribeCustomerGatewaysRequest generates a "aws/request.Request" representing the +// client's request for the DescribeCustomerGateways operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeCustomerGateways method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeCustomerGatewaysRequest method. +// req, resp := client.DescribeCustomerGatewaysRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) DescribeCustomerGatewaysRequest(input *DescribeCustomerGatewaysInput) (req *request.Request, output *DescribeCustomerGatewaysOutput) { + op := &request.Operation{ + Name: opDescribeCustomerGateways, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeCustomerGatewaysInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeCustomerGatewaysOutput{} + req.Data = output + return +} + +// Describes one or more of your VPN customer gateways. +// +// For more information about VPN customer gateways, see Adding a Hardware +// Virtual Private Gateway to Your VPC (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_VPN.html) +// in the Amazon Virtual Private Cloud User Guide. +func (c *EC2) DescribeCustomerGateways(input *DescribeCustomerGatewaysInput) (*DescribeCustomerGatewaysOutput, error) { + req, out := c.DescribeCustomerGatewaysRequest(input) + err := req.Send() + return out, err +} + +const opDescribeDhcpOptions = "DescribeDhcpOptions" + +// DescribeDhcpOptionsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeDhcpOptions operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeDhcpOptions method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeDhcpOptionsRequest method. +// req, resp := client.DescribeDhcpOptionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) DescribeDhcpOptionsRequest(input *DescribeDhcpOptionsInput) (req *request.Request, output *DescribeDhcpOptionsOutput) { + op := &request.Operation{ + Name: opDescribeDhcpOptions, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeDhcpOptionsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeDhcpOptionsOutput{} + req.Data = output + return +} + +// Describes one or more of your DHCP options sets. +// +// For more information about DHCP options sets, see DHCP Options Sets (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_DHCP_Options.html) +// in the Amazon Virtual Private Cloud User Guide. +func (c *EC2) DescribeDhcpOptions(input *DescribeDhcpOptionsInput) (*DescribeDhcpOptionsOutput, error) { + req, out := c.DescribeDhcpOptionsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeExportTasks = "DescribeExportTasks" + +// DescribeExportTasksRequest generates a "aws/request.Request" representing the +// client's request for the DescribeExportTasks operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeExportTasks method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeExportTasksRequest method. +// req, resp := client.DescribeExportTasksRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) DescribeExportTasksRequest(input *DescribeExportTasksInput) (req *request.Request, output *DescribeExportTasksOutput) { + op := &request.Operation{ + Name: opDescribeExportTasks, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeExportTasksInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeExportTasksOutput{} + req.Data = output + return +} + +// Describes one or more of your export tasks. +func (c *EC2) DescribeExportTasks(input *DescribeExportTasksInput) (*DescribeExportTasksOutput, error) { + req, out := c.DescribeExportTasksRequest(input) + err := req.Send() + return out, err +} + +const opDescribeFlowLogs = "DescribeFlowLogs" + +// DescribeFlowLogsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeFlowLogs operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeFlowLogs method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeFlowLogsRequest method. +// req, resp := client.DescribeFlowLogsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) DescribeFlowLogsRequest(input *DescribeFlowLogsInput) (req *request.Request, output *DescribeFlowLogsOutput) { + op := &request.Operation{ + Name: opDescribeFlowLogs, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeFlowLogsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeFlowLogsOutput{} + req.Data = output + return +} + +// Describes one or more flow logs. To view the information in your flow logs +// (the log streams for the network interfaces), you must use the CloudWatch +// Logs console or the CloudWatch Logs API. +func (c *EC2) DescribeFlowLogs(input *DescribeFlowLogsInput) (*DescribeFlowLogsOutput, error) { + req, out := c.DescribeFlowLogsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeHosts = "DescribeHosts" + +// DescribeHostsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeHosts operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeHosts method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeHostsRequest method. +// req, resp := client.DescribeHostsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) DescribeHostsRequest(input *DescribeHostsInput) (req *request.Request, output *DescribeHostsOutput) { + op := &request.Operation{ + Name: opDescribeHosts, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeHostsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeHostsOutput{} + req.Data = output + return +} + +// Describes one or more of your Dedicated hosts. +// +// The results describe only the Dedicated hosts in the region you're currently +// using. All listed instances consume capacity on your Dedicated host. Dedicated +// hosts that have recently been released will be listed with the state released. +func (c *EC2) DescribeHosts(input *DescribeHostsInput) (*DescribeHostsOutput, error) { + req, out := c.DescribeHostsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeIdFormat = "DescribeIdFormat" + +// DescribeIdFormatRequest generates a "aws/request.Request" representing the +// client's request for the DescribeIdFormat operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeIdFormat method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeIdFormatRequest method. +// req, resp := client.DescribeIdFormatRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) DescribeIdFormatRequest(input *DescribeIdFormatInput) (req *request.Request, output *DescribeIdFormatOutput) { + op := &request.Operation{ + Name: opDescribeIdFormat, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeIdFormatInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeIdFormatOutput{} + req.Data = output + return +} + +// Describes the ID format settings for your resources on a per-region basis, +// for example, to view which resource types are enabled for longer IDs. This +// request only returns information about resource types whose ID formats can +// be modified; it does not return information about other resource types. +// +// The following resource types support longer IDs: instance | reservation +// | snapshot | volume. +// +// These settings apply to the IAM user who makes the request; they do not +// apply to the entire AWS account. By default, an IAM user defaults to the +// same settings as the root user, unless they explicitly override the settings +// by running the ModifyIdFormat command. Resources created with longer IDs +// are visible to all IAM users, regardless of these settings and provided that +// they have permission to use the relevant Describe command for the resource +// type. +func (c *EC2) DescribeIdFormat(input *DescribeIdFormatInput) (*DescribeIdFormatOutput, error) { + req, out := c.DescribeIdFormatRequest(input) + err := req.Send() + return out, err +} + +const opDescribeIdentityIdFormat = "DescribeIdentityIdFormat" + +// DescribeIdentityIdFormatRequest generates a "aws/request.Request" representing the +// client's request for the DescribeIdentityIdFormat operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeIdentityIdFormat method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeIdentityIdFormatRequest method. +// req, resp := client.DescribeIdentityIdFormatRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) DescribeIdentityIdFormatRequest(input *DescribeIdentityIdFormatInput) (req *request.Request, output *DescribeIdentityIdFormatOutput) { + op := &request.Operation{ + Name: opDescribeIdentityIdFormat, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeIdentityIdFormatInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeIdentityIdFormatOutput{} + req.Data = output + return +} + +// Describes the ID format settings for resources for the specified IAM user, +// IAM role, or root user. For example, you can view the resource types that +// are enabled for longer IDs. This request only returns information about resource +// types whose ID formats can be modified; it does not return information about +// other resource types. For more information, see Resource IDs (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/resource-ids.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// The following resource types support longer IDs: instance | reservation +// | snapshot | volume. +// +// These settings apply to the principal specified in the request. They do +// not apply to the principal that makes the request. +func (c *EC2) DescribeIdentityIdFormat(input *DescribeIdentityIdFormatInput) (*DescribeIdentityIdFormatOutput, error) { + req, out := c.DescribeIdentityIdFormatRequest(input) + err := req.Send() + return out, err +} + +const opDescribeImageAttribute = "DescribeImageAttribute" + +// DescribeImageAttributeRequest generates a "aws/request.Request" representing the +// client's request for the DescribeImageAttribute operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeImageAttribute method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeImageAttributeRequest method. +// req, resp := client.DescribeImageAttributeRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) DescribeImageAttributeRequest(input *DescribeImageAttributeInput) (req *request.Request, output *DescribeImageAttributeOutput) { + op := &request.Operation{ + Name: opDescribeImageAttribute, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeImageAttributeInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeImageAttributeOutput{} + req.Data = output + return +} + +// Describes the specified attribute of the specified AMI. You can specify only +// one attribute at a time. +func (c *EC2) DescribeImageAttribute(input *DescribeImageAttributeInput) (*DescribeImageAttributeOutput, error) { + req, out := c.DescribeImageAttributeRequest(input) + err := req.Send() + return out, err +} + +const opDescribeImages = "DescribeImages" + +// DescribeImagesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeImages operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeImages method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeImagesRequest method. +// req, resp := client.DescribeImagesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) DescribeImagesRequest(input *DescribeImagesInput) (req *request.Request, output *DescribeImagesOutput) { + op := &request.Operation{ + Name: opDescribeImages, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeImagesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeImagesOutput{} + req.Data = output + return +} + +// Describes one or more of the images (AMIs, AKIs, and ARIs) available to you. +// Images available to you include public images, private images that you own, +// and private images owned by other AWS accounts but for which you have explicit +// launch permissions. +// +// Deregistered images are included in the returned results for an unspecified +// interval after deregistration. +func (c *EC2) DescribeImages(input *DescribeImagesInput) (*DescribeImagesOutput, error) { + req, out := c.DescribeImagesRequest(input) + err := req.Send() + return out, err +} + +const opDescribeImportImageTasks = "DescribeImportImageTasks" + +// DescribeImportImageTasksRequest generates a "aws/request.Request" representing the +// client's request for the DescribeImportImageTasks operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeImportImageTasks method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeImportImageTasksRequest method. +// req, resp := client.DescribeImportImageTasksRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) DescribeImportImageTasksRequest(input *DescribeImportImageTasksInput) (req *request.Request, output *DescribeImportImageTasksOutput) { + op := &request.Operation{ + Name: opDescribeImportImageTasks, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeImportImageTasksInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeImportImageTasksOutput{} + req.Data = output + return +} + +// Displays details about an import virtual machine or import snapshot tasks +// that are already created. +func (c *EC2) DescribeImportImageTasks(input *DescribeImportImageTasksInput) (*DescribeImportImageTasksOutput, error) { + req, out := c.DescribeImportImageTasksRequest(input) + err := req.Send() + return out, err +} + +const opDescribeImportSnapshotTasks = "DescribeImportSnapshotTasks" + +// DescribeImportSnapshotTasksRequest generates a "aws/request.Request" representing the +// client's request for the DescribeImportSnapshotTasks operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeImportSnapshotTasks method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeImportSnapshotTasksRequest method. +// req, resp := client.DescribeImportSnapshotTasksRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) DescribeImportSnapshotTasksRequest(input *DescribeImportSnapshotTasksInput) (req *request.Request, output *DescribeImportSnapshotTasksOutput) { + op := &request.Operation{ + Name: opDescribeImportSnapshotTasks, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeImportSnapshotTasksInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeImportSnapshotTasksOutput{} + req.Data = output + return +} + +// Describes your import snapshot tasks. +func (c *EC2) DescribeImportSnapshotTasks(input *DescribeImportSnapshotTasksInput) (*DescribeImportSnapshotTasksOutput, error) { + req, out := c.DescribeImportSnapshotTasksRequest(input) + err := req.Send() + return out, err +} + +const opDescribeInstanceAttribute = "DescribeInstanceAttribute" + +// DescribeInstanceAttributeRequest generates a "aws/request.Request" representing the +// client's request for the DescribeInstanceAttribute operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeInstanceAttribute method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeInstanceAttributeRequest method. +// req, resp := client.DescribeInstanceAttributeRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) DescribeInstanceAttributeRequest(input *DescribeInstanceAttributeInput) (req *request.Request, output *DescribeInstanceAttributeOutput) { + op := &request.Operation{ + Name: opDescribeInstanceAttribute, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeInstanceAttributeInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeInstanceAttributeOutput{} + req.Data = output + return +} + +// Describes the specified attribute of the specified instance. You can specify +// only one attribute at a time. Valid attribute values are: instanceType | +// kernel | ramdisk | userData | disableApiTermination | instanceInitiatedShutdownBehavior +// | rootDeviceName | blockDeviceMapping | productCodes | sourceDestCheck | +// groupSet | ebsOptimized | sriovNetSupport +func (c *EC2) DescribeInstanceAttribute(input *DescribeInstanceAttributeInput) (*DescribeInstanceAttributeOutput, error) { + req, out := c.DescribeInstanceAttributeRequest(input) + err := req.Send() + return out, err +} + +const opDescribeInstanceStatus = "DescribeInstanceStatus" + +// DescribeInstanceStatusRequest generates a "aws/request.Request" representing the +// client's request for the DescribeInstanceStatus operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeInstanceStatus method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeInstanceStatusRequest method. +// req, resp := client.DescribeInstanceStatusRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) DescribeInstanceStatusRequest(input *DescribeInstanceStatusInput) (req *request.Request, output *DescribeInstanceStatusOutput) { + op := &request.Operation{ + Name: opDescribeInstanceStatus, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeInstanceStatusInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeInstanceStatusOutput{} + req.Data = output + return +} + +// Describes the status of one or more instances. By default, only running instances +// are described, unless specified otherwise. +// +// Instance status includes the following components: +// +// Status checks - Amazon EC2 performs status checks on running EC2 instances +// to identify hardware and software issues. For more information, see Status +// Checks for Your Instances (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/monitoring-system-instance-status-check.html) +// and Troubleshooting Instances with Failed Status Checks (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/TroubleshootingInstances.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// Scheduled events - Amazon EC2 can schedule events (such as reboot, stop, +// or terminate) for your instances related to hardware issues, software updates, +// or system maintenance. For more information, see Scheduled Events for Your +// Instances (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/monitoring-instances-status-check_sched.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// Instance state - You can manage your instances from the moment you launch +// them through their termination. For more information, see Instance Lifecycle +// (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-lifecycle.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) DescribeInstanceStatus(input *DescribeInstanceStatusInput) (*DescribeInstanceStatusOutput, error) { + req, out := c.DescribeInstanceStatusRequest(input) + err := req.Send() + return out, err +} + +// DescribeInstanceStatusPages iterates over the pages of a DescribeInstanceStatus operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeInstanceStatus method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeInstanceStatus operation. +// pageNum := 0 +// err := client.DescribeInstanceStatusPages(params, +// func(page *DescribeInstanceStatusOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *EC2) DescribeInstanceStatusPages(input *DescribeInstanceStatusInput, fn func(p *DescribeInstanceStatusOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeInstanceStatusRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeInstanceStatusOutput), lastPage) + }) +} + +const opDescribeInstances = "DescribeInstances" + +// DescribeInstancesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeInstances operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeInstances method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeInstancesRequest method. +// req, resp := client.DescribeInstancesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) DescribeInstancesRequest(input *DescribeInstancesInput) (req *request.Request, output *DescribeInstancesOutput) { + op := &request.Operation{ + Name: opDescribeInstances, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeInstancesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeInstancesOutput{} + req.Data = output + return +} + +// Describes one or more of your instances. +// +// If you specify one or more instance IDs, Amazon EC2 returns information +// for those instances. If you do not specify instance IDs, Amazon EC2 returns +// information for all relevant instances. If you specify an instance ID that +// is not valid, an error is returned. If you specify an instance that you do +// not own, it is not included in the returned results. +// +// Recently terminated instances might appear in the returned results. This +// interval is usually less than one hour. +// +// If you describe instances in the rare case where an Availability Zone is +// experiencing a service disruption and you specify instance IDs that are in +// the affected zone, or do not specify any instance IDs at all, the call fails. +// If you describe instances and specify only instance IDs that are in an unaffected +// zone, the call works normally. +func (c *EC2) DescribeInstances(input *DescribeInstancesInput) (*DescribeInstancesOutput, error) { + req, out := c.DescribeInstancesRequest(input) + err := req.Send() + return out, err +} + +// DescribeInstancesPages iterates over the pages of a DescribeInstances operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeInstances method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeInstances operation. +// pageNum := 0 +// err := client.DescribeInstancesPages(params, +// func(page *DescribeInstancesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *EC2) DescribeInstancesPages(input *DescribeInstancesInput, fn func(p *DescribeInstancesOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeInstancesRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeInstancesOutput), lastPage) + }) +} + +const opDescribeInternetGateways = "DescribeInternetGateways" + +// DescribeInternetGatewaysRequest generates a "aws/request.Request" representing the +// client's request for the DescribeInternetGateways operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeInternetGateways method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeInternetGatewaysRequest method. +// req, resp := client.DescribeInternetGatewaysRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) DescribeInternetGatewaysRequest(input *DescribeInternetGatewaysInput) (req *request.Request, output *DescribeInternetGatewaysOutput) { + op := &request.Operation{ + Name: opDescribeInternetGateways, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeInternetGatewaysInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeInternetGatewaysOutput{} + req.Data = output + return +} + +// Describes one or more of your Internet gateways. +func (c *EC2) DescribeInternetGateways(input *DescribeInternetGatewaysInput) (*DescribeInternetGatewaysOutput, error) { + req, out := c.DescribeInternetGatewaysRequest(input) + err := req.Send() + return out, err +} + +const opDescribeKeyPairs = "DescribeKeyPairs" + +// DescribeKeyPairsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeKeyPairs operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeKeyPairs method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeKeyPairsRequest method. +// req, resp := client.DescribeKeyPairsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) DescribeKeyPairsRequest(input *DescribeKeyPairsInput) (req *request.Request, output *DescribeKeyPairsOutput) { + op := &request.Operation{ + Name: opDescribeKeyPairs, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeKeyPairsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeKeyPairsOutput{} + req.Data = output + return +} + +// Describes one or more of your key pairs. +// +// For more information about key pairs, see Key Pairs (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) DescribeKeyPairs(input *DescribeKeyPairsInput) (*DescribeKeyPairsOutput, error) { + req, out := c.DescribeKeyPairsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeMovingAddresses = "DescribeMovingAddresses" + +// DescribeMovingAddressesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeMovingAddresses operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeMovingAddresses method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeMovingAddressesRequest method. +// req, resp := client.DescribeMovingAddressesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) DescribeMovingAddressesRequest(input *DescribeMovingAddressesInput) (req *request.Request, output *DescribeMovingAddressesOutput) { + op := &request.Operation{ + Name: opDescribeMovingAddresses, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeMovingAddressesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeMovingAddressesOutput{} + req.Data = output + return +} + +// Describes your Elastic IP addresses that are being moved to the EC2-VPC platform, +// or that are being restored to the EC2-Classic platform. This request does +// not return information about any other Elastic IP addresses in your account. +func (c *EC2) DescribeMovingAddresses(input *DescribeMovingAddressesInput) (*DescribeMovingAddressesOutput, error) { + req, out := c.DescribeMovingAddressesRequest(input) + err := req.Send() + return out, err +} + +const opDescribeNatGateways = "DescribeNatGateways" + +// DescribeNatGatewaysRequest generates a "aws/request.Request" representing the +// client's request for the DescribeNatGateways operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeNatGateways method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeNatGatewaysRequest method. +// req, resp := client.DescribeNatGatewaysRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) DescribeNatGatewaysRequest(input *DescribeNatGatewaysInput) (req *request.Request, output *DescribeNatGatewaysOutput) { + op := &request.Operation{ + Name: opDescribeNatGateways, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeNatGatewaysInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeNatGatewaysOutput{} + req.Data = output + return +} + +// Describes one or more of the your NAT gateways. +func (c *EC2) DescribeNatGateways(input *DescribeNatGatewaysInput) (*DescribeNatGatewaysOutput, error) { + req, out := c.DescribeNatGatewaysRequest(input) + err := req.Send() + return out, err +} + +const opDescribeNetworkAcls = "DescribeNetworkAcls" + +// DescribeNetworkAclsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeNetworkAcls operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeNetworkAcls method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeNetworkAclsRequest method. +// req, resp := client.DescribeNetworkAclsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) DescribeNetworkAclsRequest(input *DescribeNetworkAclsInput) (req *request.Request, output *DescribeNetworkAclsOutput) { + op := &request.Operation{ + Name: opDescribeNetworkAcls, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeNetworkAclsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeNetworkAclsOutput{} + req.Data = output + return +} + +// Describes one or more of your network ACLs. +// +// For more information about network ACLs, see Network ACLs (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_ACLs.html) +// in the Amazon Virtual Private Cloud User Guide. +func (c *EC2) DescribeNetworkAcls(input *DescribeNetworkAclsInput) (*DescribeNetworkAclsOutput, error) { + req, out := c.DescribeNetworkAclsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeNetworkInterfaceAttribute = "DescribeNetworkInterfaceAttribute" + +// DescribeNetworkInterfaceAttributeRequest generates a "aws/request.Request" representing the +// client's request for the DescribeNetworkInterfaceAttribute operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeNetworkInterfaceAttribute method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeNetworkInterfaceAttributeRequest method. +// req, resp := client.DescribeNetworkInterfaceAttributeRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) DescribeNetworkInterfaceAttributeRequest(input *DescribeNetworkInterfaceAttributeInput) (req *request.Request, output *DescribeNetworkInterfaceAttributeOutput) { + op := &request.Operation{ + Name: opDescribeNetworkInterfaceAttribute, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeNetworkInterfaceAttributeInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeNetworkInterfaceAttributeOutput{} + req.Data = output + return +} + +// Describes a network interface attribute. You can specify only one attribute +// at a time. +func (c *EC2) DescribeNetworkInterfaceAttribute(input *DescribeNetworkInterfaceAttributeInput) (*DescribeNetworkInterfaceAttributeOutput, error) { + req, out := c.DescribeNetworkInterfaceAttributeRequest(input) + err := req.Send() + return out, err +} + +const opDescribeNetworkInterfaces = "DescribeNetworkInterfaces" + +// DescribeNetworkInterfacesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeNetworkInterfaces operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeNetworkInterfaces method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeNetworkInterfacesRequest method. +// req, resp := client.DescribeNetworkInterfacesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) DescribeNetworkInterfacesRequest(input *DescribeNetworkInterfacesInput) (req *request.Request, output *DescribeNetworkInterfacesOutput) { + op := &request.Operation{ + Name: opDescribeNetworkInterfaces, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeNetworkInterfacesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeNetworkInterfacesOutput{} + req.Data = output + return +} + +// Describes one or more of your network interfaces. +func (c *EC2) DescribeNetworkInterfaces(input *DescribeNetworkInterfacesInput) (*DescribeNetworkInterfacesOutput, error) { + req, out := c.DescribeNetworkInterfacesRequest(input) + err := req.Send() + return out, err +} + +const opDescribePlacementGroups = "DescribePlacementGroups" + +// DescribePlacementGroupsRequest generates a "aws/request.Request" representing the +// client's request for the DescribePlacementGroups operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribePlacementGroups method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribePlacementGroupsRequest method. +// req, resp := client.DescribePlacementGroupsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) DescribePlacementGroupsRequest(input *DescribePlacementGroupsInput) (req *request.Request, output *DescribePlacementGroupsOutput) { + op := &request.Operation{ + Name: opDescribePlacementGroups, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribePlacementGroupsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribePlacementGroupsOutput{} + req.Data = output + return +} + +// Describes one or more of your placement groups. For more information about +// placement groups and cluster instances, see Cluster Instances (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using_cluster_computing.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) DescribePlacementGroups(input *DescribePlacementGroupsInput) (*DescribePlacementGroupsOutput, error) { + req, out := c.DescribePlacementGroupsRequest(input) + err := req.Send() + return out, err +} + +const opDescribePrefixLists = "DescribePrefixLists" + +// DescribePrefixListsRequest generates a "aws/request.Request" representing the +// client's request for the DescribePrefixLists operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribePrefixLists method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribePrefixListsRequest method. +// req, resp := client.DescribePrefixListsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) DescribePrefixListsRequest(input *DescribePrefixListsInput) (req *request.Request, output *DescribePrefixListsOutput) { + op := &request.Operation{ + Name: opDescribePrefixLists, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribePrefixListsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribePrefixListsOutput{} + req.Data = output + return +} + +// Describes available AWS services in a prefix list format, which includes +// the prefix list name and prefix list ID of the service and the IP address +// range for the service. A prefix list ID is required for creating an outbound +// security group rule that allows traffic from a VPC to access an AWS service +// through a VPC endpoint. +func (c *EC2) DescribePrefixLists(input *DescribePrefixListsInput) (*DescribePrefixListsOutput, error) { + req, out := c.DescribePrefixListsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeRegions = "DescribeRegions" + +// DescribeRegionsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeRegions operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeRegions method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeRegionsRequest method. +// req, resp := client.DescribeRegionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) DescribeRegionsRequest(input *DescribeRegionsInput) (req *request.Request, output *DescribeRegionsOutput) { + op := &request.Operation{ + Name: opDescribeRegions, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeRegionsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeRegionsOutput{} + req.Data = output + return +} + +// Describes one or more regions that are currently available to you. +// +// For a list of the regions supported by Amazon EC2, see Regions and Endpoints +// (http://docs.aws.amazon.com/general/latest/gr/rande.html#ec2_region). +func (c *EC2) DescribeRegions(input *DescribeRegionsInput) (*DescribeRegionsOutput, error) { + req, out := c.DescribeRegionsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeReservedInstances = "DescribeReservedInstances" + +// DescribeReservedInstancesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeReservedInstances operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeReservedInstances method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeReservedInstancesRequest method. +// req, resp := client.DescribeReservedInstancesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) DescribeReservedInstancesRequest(input *DescribeReservedInstancesInput) (req *request.Request, output *DescribeReservedInstancesOutput) { + op := &request.Operation{ + Name: opDescribeReservedInstances, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeReservedInstancesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeReservedInstancesOutput{} + req.Data = output + return +} + +// Describes one or more of the Reserved Instances that you purchased. +// +// For more information about Reserved Instances, see Reserved Instances (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/concepts-on-demand-reserved-instances.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) DescribeReservedInstances(input *DescribeReservedInstancesInput) (*DescribeReservedInstancesOutput, error) { + req, out := c.DescribeReservedInstancesRequest(input) + err := req.Send() + return out, err +} + +const opDescribeReservedInstancesListings = "DescribeReservedInstancesListings" + +// DescribeReservedInstancesListingsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeReservedInstancesListings operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeReservedInstancesListings method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeReservedInstancesListingsRequest method. +// req, resp := client.DescribeReservedInstancesListingsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) DescribeReservedInstancesListingsRequest(input *DescribeReservedInstancesListingsInput) (req *request.Request, output *DescribeReservedInstancesListingsOutput) { + op := &request.Operation{ + Name: opDescribeReservedInstancesListings, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeReservedInstancesListingsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeReservedInstancesListingsOutput{} + req.Data = output + return +} + +// Describes your account's Reserved Instance listings in the Reserved Instance +// Marketplace. +// +// The Reserved Instance Marketplace matches sellers who want to resell Reserved +// Instance capacity that they no longer need with buyers who want to purchase +// additional capacity. Reserved Instances bought and sold through the Reserved +// Instance Marketplace work like any other Reserved Instances. +// +// As a seller, you choose to list some or all of your Reserved Instances, +// and you specify the upfront price to receive for them. Your Reserved Instances +// are then listed in the Reserved Instance Marketplace and are available for +// purchase. +// +// As a buyer, you specify the configuration of the Reserved Instance to purchase, +// and the Marketplace matches what you're searching for with what's available. +// The Marketplace first sells the lowest priced Reserved Instances to you, +// and continues to sell available Reserved Instance listings to you until your +// demand is met. You are charged based on the total price of all of the listings +// that you purchase. +// +// For more information, see Reserved Instance Marketplace (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ri-market-general.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) DescribeReservedInstancesListings(input *DescribeReservedInstancesListingsInput) (*DescribeReservedInstancesListingsOutput, error) { + req, out := c.DescribeReservedInstancesListingsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeReservedInstancesModifications = "DescribeReservedInstancesModifications" + +// DescribeReservedInstancesModificationsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeReservedInstancesModifications operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeReservedInstancesModifications method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeReservedInstancesModificationsRequest method. +// req, resp := client.DescribeReservedInstancesModificationsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) DescribeReservedInstancesModificationsRequest(input *DescribeReservedInstancesModificationsInput) (req *request.Request, output *DescribeReservedInstancesModificationsOutput) { + op := &request.Operation{ + Name: opDescribeReservedInstancesModifications, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeReservedInstancesModificationsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeReservedInstancesModificationsOutput{} + req.Data = output + return +} + +// Describes the modifications made to your Reserved Instances. If no parameter +// is specified, information about all your Reserved Instances modification +// requests is returned. If a modification ID is specified, only information +// about the specific modification is returned. +// +// For more information, see Modifying Reserved Instances (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ri-modifying.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) DescribeReservedInstancesModifications(input *DescribeReservedInstancesModificationsInput) (*DescribeReservedInstancesModificationsOutput, error) { + req, out := c.DescribeReservedInstancesModificationsRequest(input) + err := req.Send() + return out, err +} + +// DescribeReservedInstancesModificationsPages iterates over the pages of a DescribeReservedInstancesModifications operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeReservedInstancesModifications method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeReservedInstancesModifications operation. +// pageNum := 0 +// err := client.DescribeReservedInstancesModificationsPages(params, +// func(page *DescribeReservedInstancesModificationsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *EC2) DescribeReservedInstancesModificationsPages(input *DescribeReservedInstancesModificationsInput, fn func(p *DescribeReservedInstancesModificationsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeReservedInstancesModificationsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeReservedInstancesModificationsOutput), lastPage) + }) +} + +const opDescribeReservedInstancesOfferings = "DescribeReservedInstancesOfferings" + +// DescribeReservedInstancesOfferingsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeReservedInstancesOfferings operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeReservedInstancesOfferings method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeReservedInstancesOfferingsRequest method. +// req, resp := client.DescribeReservedInstancesOfferingsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) DescribeReservedInstancesOfferingsRequest(input *DescribeReservedInstancesOfferingsInput) (req *request.Request, output *DescribeReservedInstancesOfferingsOutput) { + op := &request.Operation{ + Name: opDescribeReservedInstancesOfferings, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeReservedInstancesOfferingsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeReservedInstancesOfferingsOutput{} + req.Data = output + return +} + +// Describes Reserved Instance offerings that are available for purchase. With +// Reserved Instances, you purchase the right to launch instances for a period +// of time. During that time period, you do not receive insufficient capacity +// errors, and you pay a lower usage rate than the rate charged for On-Demand +// instances for the actual time used. +// +// If you have listed your own Reserved Instances for sale in the Reserved +// Instance Marketplace, they will be excluded from these results. This is to +// ensure that you do not purchase your own Reserved Instances. +// +// For more information, see Reserved Instance Marketplace (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ri-market-general.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) DescribeReservedInstancesOfferings(input *DescribeReservedInstancesOfferingsInput) (*DescribeReservedInstancesOfferingsOutput, error) { + req, out := c.DescribeReservedInstancesOfferingsRequest(input) + err := req.Send() + return out, err +} + +// DescribeReservedInstancesOfferingsPages iterates over the pages of a DescribeReservedInstancesOfferings operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeReservedInstancesOfferings method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeReservedInstancesOfferings operation. +// pageNum := 0 +// err := client.DescribeReservedInstancesOfferingsPages(params, +// func(page *DescribeReservedInstancesOfferingsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *EC2) DescribeReservedInstancesOfferingsPages(input *DescribeReservedInstancesOfferingsInput, fn func(p *DescribeReservedInstancesOfferingsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeReservedInstancesOfferingsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeReservedInstancesOfferingsOutput), lastPage) + }) +} + +const opDescribeRouteTables = "DescribeRouteTables" + +// DescribeRouteTablesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeRouteTables operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeRouteTables method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeRouteTablesRequest method. +// req, resp := client.DescribeRouteTablesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) DescribeRouteTablesRequest(input *DescribeRouteTablesInput) (req *request.Request, output *DescribeRouteTablesOutput) { + op := &request.Operation{ + Name: opDescribeRouteTables, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeRouteTablesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeRouteTablesOutput{} + req.Data = output + return +} + +// Describes one or more of your route tables. +// +// Each subnet in your VPC must be associated with a route table. If a subnet +// is not explicitly associated with any route table, it is implicitly associated +// with the main route table. This command does not return the subnet ID for +// implicit associations. +// +// For more information about route tables, see Route Tables (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_Route_Tables.html) +// in the Amazon Virtual Private Cloud User Guide. +func (c *EC2) DescribeRouteTables(input *DescribeRouteTablesInput) (*DescribeRouteTablesOutput, error) { + req, out := c.DescribeRouteTablesRequest(input) + err := req.Send() + return out, err +} + +const opDescribeScheduledInstanceAvailability = "DescribeScheduledInstanceAvailability" + +// DescribeScheduledInstanceAvailabilityRequest generates a "aws/request.Request" representing the +// client's request for the DescribeScheduledInstanceAvailability operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeScheduledInstanceAvailability method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeScheduledInstanceAvailabilityRequest method. +// req, resp := client.DescribeScheduledInstanceAvailabilityRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) DescribeScheduledInstanceAvailabilityRequest(input *DescribeScheduledInstanceAvailabilityInput) (req *request.Request, output *DescribeScheduledInstanceAvailabilityOutput) { + op := &request.Operation{ + Name: opDescribeScheduledInstanceAvailability, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeScheduledInstanceAvailabilityInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeScheduledInstanceAvailabilityOutput{} + req.Data = output + return +} + +// Finds available schedules that meet the specified criteria. +// +// You can search for an available schedule no more than 3 months in advance. +// You must meet the minimum required duration of 1,200 hours per year. For +// example, the minimum daily schedule is 4 hours, the minimum weekly schedule +// is 24 hours, and the minimum monthly schedule is 100 hours. +// +// After you find a schedule that meets your needs, call PurchaseScheduledInstances +// to purchase Scheduled Instances with that schedule. +func (c *EC2) DescribeScheduledInstanceAvailability(input *DescribeScheduledInstanceAvailabilityInput) (*DescribeScheduledInstanceAvailabilityOutput, error) { + req, out := c.DescribeScheduledInstanceAvailabilityRequest(input) + err := req.Send() + return out, err +} + +const opDescribeScheduledInstances = "DescribeScheduledInstances" + +// DescribeScheduledInstancesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeScheduledInstances operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeScheduledInstances method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeScheduledInstancesRequest method. +// req, resp := client.DescribeScheduledInstancesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) DescribeScheduledInstancesRequest(input *DescribeScheduledInstancesInput) (req *request.Request, output *DescribeScheduledInstancesOutput) { + op := &request.Operation{ + Name: opDescribeScheduledInstances, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeScheduledInstancesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeScheduledInstancesOutput{} + req.Data = output + return +} + +// Describes one or more of your Scheduled Instances. +func (c *EC2) DescribeScheduledInstances(input *DescribeScheduledInstancesInput) (*DescribeScheduledInstancesOutput, error) { + req, out := c.DescribeScheduledInstancesRequest(input) + err := req.Send() + return out, err +} + +const opDescribeSecurityGroupReferences = "DescribeSecurityGroupReferences" + +// DescribeSecurityGroupReferencesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeSecurityGroupReferences operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeSecurityGroupReferences method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeSecurityGroupReferencesRequest method. +// req, resp := client.DescribeSecurityGroupReferencesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) DescribeSecurityGroupReferencesRequest(input *DescribeSecurityGroupReferencesInput) (req *request.Request, output *DescribeSecurityGroupReferencesOutput) { + op := &request.Operation{ + Name: opDescribeSecurityGroupReferences, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeSecurityGroupReferencesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeSecurityGroupReferencesOutput{} + req.Data = output + return +} + +// [EC2-VPC only] Describes the VPCs on the other side of a VPC peering connection +// that are referencing the security groups you've specified in this request. +func (c *EC2) DescribeSecurityGroupReferences(input *DescribeSecurityGroupReferencesInput) (*DescribeSecurityGroupReferencesOutput, error) { + req, out := c.DescribeSecurityGroupReferencesRequest(input) + err := req.Send() + return out, err +} + +const opDescribeSecurityGroups = "DescribeSecurityGroups" + +// DescribeSecurityGroupsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeSecurityGroups operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeSecurityGroups method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeSecurityGroupsRequest method. +// req, resp := client.DescribeSecurityGroupsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) DescribeSecurityGroupsRequest(input *DescribeSecurityGroupsInput) (req *request.Request, output *DescribeSecurityGroupsOutput) { + op := &request.Operation{ + Name: opDescribeSecurityGroups, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeSecurityGroupsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeSecurityGroupsOutput{} + req.Data = output + return +} + +// Describes one or more of your security groups. +// +// A security group is for use with instances either in the EC2-Classic platform +// or in a specific VPC. For more information, see Amazon EC2 Security Groups +// (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-network-security.html) +// in the Amazon Elastic Compute Cloud User Guide and Security Groups for Your +// VPC (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_SecurityGroups.html) +// in the Amazon Virtual Private Cloud User Guide. +func (c *EC2) DescribeSecurityGroups(input *DescribeSecurityGroupsInput) (*DescribeSecurityGroupsOutput, error) { + req, out := c.DescribeSecurityGroupsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeSnapshotAttribute = "DescribeSnapshotAttribute" + +// DescribeSnapshotAttributeRequest generates a "aws/request.Request" representing the +// client's request for the DescribeSnapshotAttribute operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeSnapshotAttribute method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeSnapshotAttributeRequest method. +// req, resp := client.DescribeSnapshotAttributeRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) DescribeSnapshotAttributeRequest(input *DescribeSnapshotAttributeInput) (req *request.Request, output *DescribeSnapshotAttributeOutput) { + op := &request.Operation{ + Name: opDescribeSnapshotAttribute, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeSnapshotAttributeInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeSnapshotAttributeOutput{} + req.Data = output + return +} + +// Describes the specified attribute of the specified snapshot. You can specify +// only one attribute at a time. +// +// For more information about EBS snapshots, see Amazon EBS Snapshots (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSSnapshots.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) DescribeSnapshotAttribute(input *DescribeSnapshotAttributeInput) (*DescribeSnapshotAttributeOutput, error) { + req, out := c.DescribeSnapshotAttributeRequest(input) + err := req.Send() + return out, err +} + +const opDescribeSnapshots = "DescribeSnapshots" + +// DescribeSnapshotsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeSnapshots operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeSnapshots method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeSnapshotsRequest method. +// req, resp := client.DescribeSnapshotsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) DescribeSnapshotsRequest(input *DescribeSnapshotsInput) (req *request.Request, output *DescribeSnapshotsOutput) { + op := &request.Operation{ + Name: opDescribeSnapshots, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeSnapshotsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeSnapshotsOutput{} + req.Data = output + return +} + +// Describes one or more of the EBS snapshots available to you. Available snapshots +// include public snapshots available for any AWS account to launch, private +// snapshots that you own, and private snapshots owned by another AWS account +// but for which you've been given explicit create volume permissions. +// +// The create volume permissions fall into the following categories: +// +// public: The owner of the snapshot granted create volume permissions for +// the snapshot to the all group. All AWS accounts have create volume permissions +// for these snapshots. +// +// explicit: The owner of the snapshot granted create volume permissions +// to a specific AWS account. +// +// implicit: An AWS account has implicit create volume permissions for all +// snapshots it owns. +// +// The list of snapshots returned can be modified by specifying snapshot +// IDs, snapshot owners, or AWS accounts with create volume permissions. If +// no options are specified, Amazon EC2 returns all snapshots for which you +// have create volume permissions. +// +// If you specify one or more snapshot IDs, only snapshots that have the specified +// IDs are returned. If you specify an invalid snapshot ID, an error is returned. +// If you specify a snapshot ID for which you do not have access, it is not +// included in the returned results. +// +// If you specify one or more snapshot owners using the OwnerIds option, only +// snapshots from the specified owners and for which you have access are returned. +// The results can include the AWS account IDs of the specified owners, amazon +// for snapshots owned by Amazon, or self for snapshots that you own. +// +// If you specify a list of restorable users, only snapshots with create snapshot +// permissions for those users are returned. You can specify AWS account IDs +// (if you own the snapshots), self for snapshots for which you own or have +// explicit permissions, or all for public snapshots. +// +// If you are describing a long list of snapshots, you can paginate the output +// to make the list more manageable. The MaxResults parameter sets the maximum +// number of results returned in a single page. If the list of results exceeds +// your MaxResults value, then that number of results is returned along with +// a NextToken value that can be passed to a subsequent DescribeSnapshots request +// to retrieve the remaining results. +// +// For more information about EBS snapshots, see Amazon EBS Snapshots (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSSnapshots.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) DescribeSnapshots(input *DescribeSnapshotsInput) (*DescribeSnapshotsOutput, error) { + req, out := c.DescribeSnapshotsRequest(input) + err := req.Send() + return out, err +} + +// DescribeSnapshotsPages iterates over the pages of a DescribeSnapshots operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeSnapshots method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeSnapshots operation. +// pageNum := 0 +// err := client.DescribeSnapshotsPages(params, +// func(page *DescribeSnapshotsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *EC2) DescribeSnapshotsPages(input *DescribeSnapshotsInput, fn func(p *DescribeSnapshotsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeSnapshotsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeSnapshotsOutput), lastPage) + }) +} + +const opDescribeSpotDatafeedSubscription = "DescribeSpotDatafeedSubscription" + +// DescribeSpotDatafeedSubscriptionRequest generates a "aws/request.Request" representing the +// client's request for the DescribeSpotDatafeedSubscription operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeSpotDatafeedSubscription method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeSpotDatafeedSubscriptionRequest method. +// req, resp := client.DescribeSpotDatafeedSubscriptionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) DescribeSpotDatafeedSubscriptionRequest(input *DescribeSpotDatafeedSubscriptionInput) (req *request.Request, output *DescribeSpotDatafeedSubscriptionOutput) { + op := &request.Operation{ + Name: opDescribeSpotDatafeedSubscription, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeSpotDatafeedSubscriptionInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeSpotDatafeedSubscriptionOutput{} + req.Data = output + return +} + +// Describes the data feed for Spot instances. For more information, see Spot +// Instance Data Feed (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-data-feeds.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) DescribeSpotDatafeedSubscription(input *DescribeSpotDatafeedSubscriptionInput) (*DescribeSpotDatafeedSubscriptionOutput, error) { + req, out := c.DescribeSpotDatafeedSubscriptionRequest(input) + err := req.Send() + return out, err +} + +const opDescribeSpotFleetInstances = "DescribeSpotFleetInstances" + +// DescribeSpotFleetInstancesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeSpotFleetInstances operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeSpotFleetInstances method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeSpotFleetInstancesRequest method. +// req, resp := client.DescribeSpotFleetInstancesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) DescribeSpotFleetInstancesRequest(input *DescribeSpotFleetInstancesInput) (req *request.Request, output *DescribeSpotFleetInstancesOutput) { + op := &request.Operation{ + Name: opDescribeSpotFleetInstances, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeSpotFleetInstancesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeSpotFleetInstancesOutput{} + req.Data = output + return +} + +// Describes the running instances for the specified Spot fleet. +func (c *EC2) DescribeSpotFleetInstances(input *DescribeSpotFleetInstancesInput) (*DescribeSpotFleetInstancesOutput, error) { + req, out := c.DescribeSpotFleetInstancesRequest(input) + err := req.Send() + return out, err +} + +const opDescribeSpotFleetRequestHistory = "DescribeSpotFleetRequestHistory" + +// DescribeSpotFleetRequestHistoryRequest generates a "aws/request.Request" representing the +// client's request for the DescribeSpotFleetRequestHistory operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeSpotFleetRequestHistory method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeSpotFleetRequestHistoryRequest method. +// req, resp := client.DescribeSpotFleetRequestHistoryRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) DescribeSpotFleetRequestHistoryRequest(input *DescribeSpotFleetRequestHistoryInput) (req *request.Request, output *DescribeSpotFleetRequestHistoryOutput) { + op := &request.Operation{ + Name: opDescribeSpotFleetRequestHistory, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeSpotFleetRequestHistoryInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeSpotFleetRequestHistoryOutput{} + req.Data = output + return +} + +// Describes the events for the specified Spot fleet request during the specified +// time. +// +// Spot fleet events are delayed by up to 30 seconds before they can be described. +// This ensures that you can query by the last evaluated time and not miss a +// recorded event. +func (c *EC2) DescribeSpotFleetRequestHistory(input *DescribeSpotFleetRequestHistoryInput) (*DescribeSpotFleetRequestHistoryOutput, error) { + req, out := c.DescribeSpotFleetRequestHistoryRequest(input) + err := req.Send() + return out, err +} + +const opDescribeSpotFleetRequests = "DescribeSpotFleetRequests" + +// DescribeSpotFleetRequestsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeSpotFleetRequests operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeSpotFleetRequests method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeSpotFleetRequestsRequest method. +// req, resp := client.DescribeSpotFleetRequestsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) DescribeSpotFleetRequestsRequest(input *DescribeSpotFleetRequestsInput) (req *request.Request, output *DescribeSpotFleetRequestsOutput) { + op := &request.Operation{ + Name: opDescribeSpotFleetRequests, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeSpotFleetRequestsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeSpotFleetRequestsOutput{} + req.Data = output + return +} + +// Describes your Spot fleet requests. +func (c *EC2) DescribeSpotFleetRequests(input *DescribeSpotFleetRequestsInput) (*DescribeSpotFleetRequestsOutput, error) { + req, out := c.DescribeSpotFleetRequestsRequest(input) + err := req.Send() + return out, err +} + +// DescribeSpotFleetRequestsPages iterates over the pages of a DescribeSpotFleetRequests operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeSpotFleetRequests method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeSpotFleetRequests operation. +// pageNum := 0 +// err := client.DescribeSpotFleetRequestsPages(params, +// func(page *DescribeSpotFleetRequestsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *EC2) DescribeSpotFleetRequestsPages(input *DescribeSpotFleetRequestsInput, fn func(p *DescribeSpotFleetRequestsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeSpotFleetRequestsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeSpotFleetRequestsOutput), lastPage) + }) +} + +const opDescribeSpotInstanceRequests = "DescribeSpotInstanceRequests" + +// DescribeSpotInstanceRequestsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeSpotInstanceRequests operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeSpotInstanceRequests method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeSpotInstanceRequestsRequest method. +// req, resp := client.DescribeSpotInstanceRequestsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) DescribeSpotInstanceRequestsRequest(input *DescribeSpotInstanceRequestsInput) (req *request.Request, output *DescribeSpotInstanceRequestsOutput) { + op := &request.Operation{ + Name: opDescribeSpotInstanceRequests, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeSpotInstanceRequestsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeSpotInstanceRequestsOutput{} + req.Data = output + return +} + +// Describes the Spot instance requests that belong to your account. Spot instances +// are instances that Amazon EC2 launches when the bid price that you specify +// exceeds the current Spot price. Amazon EC2 periodically sets the Spot price +// based on available Spot instance capacity and current Spot instance requests. +// For more information, see Spot Instance Requests (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-requests.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// You can use DescribeSpotInstanceRequests to find a running Spot instance +// by examining the response. If the status of the Spot instance is fulfilled, +// the instance ID appears in the response and contains the identifier of the +// instance. Alternatively, you can use DescribeInstances with a filter to look +// for instances where the instance lifecycle is spot. +func (c *EC2) DescribeSpotInstanceRequests(input *DescribeSpotInstanceRequestsInput) (*DescribeSpotInstanceRequestsOutput, error) { + req, out := c.DescribeSpotInstanceRequestsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeSpotPriceHistory = "DescribeSpotPriceHistory" + +// DescribeSpotPriceHistoryRequest generates a "aws/request.Request" representing the +// client's request for the DescribeSpotPriceHistory operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeSpotPriceHistory method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeSpotPriceHistoryRequest method. +// req, resp := client.DescribeSpotPriceHistoryRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) DescribeSpotPriceHistoryRequest(input *DescribeSpotPriceHistoryInput) (req *request.Request, output *DescribeSpotPriceHistoryOutput) { + op := &request.Operation{ + Name: opDescribeSpotPriceHistory, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeSpotPriceHistoryInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeSpotPriceHistoryOutput{} + req.Data = output + return +} + +// Describes the Spot price history. The prices returned are listed in chronological +// order, from the oldest to the most recent, for up to the past 90 days. For +// more information, see Spot Instance Pricing History (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-spot-instances-history.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// When you specify a start and end time, this operation returns the prices +// of the instance types within the time range that you specified and the time +// when the price changed. The price is valid within the time period that you +// specified; the response merely indicates the last time that the price changed. +func (c *EC2) DescribeSpotPriceHistory(input *DescribeSpotPriceHistoryInput) (*DescribeSpotPriceHistoryOutput, error) { + req, out := c.DescribeSpotPriceHistoryRequest(input) + err := req.Send() + return out, err +} + +// DescribeSpotPriceHistoryPages iterates over the pages of a DescribeSpotPriceHistory operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeSpotPriceHistory method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeSpotPriceHistory operation. +// pageNum := 0 +// err := client.DescribeSpotPriceHistoryPages(params, +// func(page *DescribeSpotPriceHistoryOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *EC2) DescribeSpotPriceHistoryPages(input *DescribeSpotPriceHistoryInput, fn func(p *DescribeSpotPriceHistoryOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeSpotPriceHistoryRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeSpotPriceHistoryOutput), lastPage) + }) +} + +const opDescribeStaleSecurityGroups = "DescribeStaleSecurityGroups" + +// DescribeStaleSecurityGroupsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeStaleSecurityGroups operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeStaleSecurityGroups method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeStaleSecurityGroupsRequest method. +// req, resp := client.DescribeStaleSecurityGroupsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) DescribeStaleSecurityGroupsRequest(input *DescribeStaleSecurityGroupsInput) (req *request.Request, output *DescribeStaleSecurityGroupsOutput) { + op := &request.Operation{ + Name: opDescribeStaleSecurityGroups, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeStaleSecurityGroupsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeStaleSecurityGroupsOutput{} + req.Data = output + return +} + +// [EC2-VPC only] Describes the stale security group rules for security groups +// in a specified VPC. Rules are stale when they reference a deleted security +// group in a peer VPC, or a security group in a peer VPC for which the VPC +// peering connection has been deleted. +func (c *EC2) DescribeStaleSecurityGroups(input *DescribeStaleSecurityGroupsInput) (*DescribeStaleSecurityGroupsOutput, error) { + req, out := c.DescribeStaleSecurityGroupsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeSubnets = "DescribeSubnets" + +// DescribeSubnetsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeSubnets operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeSubnets method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeSubnetsRequest method. +// req, resp := client.DescribeSubnetsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) DescribeSubnetsRequest(input *DescribeSubnetsInput) (req *request.Request, output *DescribeSubnetsOutput) { + op := &request.Operation{ + Name: opDescribeSubnets, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeSubnetsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeSubnetsOutput{} + req.Data = output + return +} + +// Describes one or more of your subnets. +// +// For more information about subnets, see Your VPC and Subnets (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_Subnets.html) +// in the Amazon Virtual Private Cloud User Guide. +func (c *EC2) DescribeSubnets(input *DescribeSubnetsInput) (*DescribeSubnetsOutput, error) { + req, out := c.DescribeSubnetsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeTags = "DescribeTags" + +// DescribeTagsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeTags operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeTags method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeTagsRequest method. +// req, resp := client.DescribeTagsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) DescribeTagsRequest(input *DescribeTagsInput) (req *request.Request, output *DescribeTagsOutput) { + op := &request.Operation{ + Name: opDescribeTags, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeTagsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeTagsOutput{} + req.Data = output + return +} + +// Describes one or more of the tags for your EC2 resources. +// +// For more information about tags, see Tagging Your Resources (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) DescribeTags(input *DescribeTagsInput) (*DescribeTagsOutput, error) { + req, out := c.DescribeTagsRequest(input) + err := req.Send() + return out, err +} + +// DescribeTagsPages iterates over the pages of a DescribeTags operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeTags method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeTags operation. +// pageNum := 0 +// err := client.DescribeTagsPages(params, +// func(page *DescribeTagsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *EC2) DescribeTagsPages(input *DescribeTagsInput, fn func(p *DescribeTagsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeTagsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeTagsOutput), lastPage) + }) +} + +const opDescribeVolumeAttribute = "DescribeVolumeAttribute" + +// DescribeVolumeAttributeRequest generates a "aws/request.Request" representing the +// client's request for the DescribeVolumeAttribute operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeVolumeAttribute method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeVolumeAttributeRequest method. +// req, resp := client.DescribeVolumeAttributeRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) DescribeVolumeAttributeRequest(input *DescribeVolumeAttributeInput) (req *request.Request, output *DescribeVolumeAttributeOutput) { + op := &request.Operation{ + Name: opDescribeVolumeAttribute, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeVolumeAttributeInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeVolumeAttributeOutput{} + req.Data = output + return +} + +// Describes the specified attribute of the specified volume. You can specify +// only one attribute at a time. +// +// For more information about EBS volumes, see Amazon EBS Volumes (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumes.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) DescribeVolumeAttribute(input *DescribeVolumeAttributeInput) (*DescribeVolumeAttributeOutput, error) { + req, out := c.DescribeVolumeAttributeRequest(input) + err := req.Send() + return out, err +} + +const opDescribeVolumeStatus = "DescribeVolumeStatus" + +// DescribeVolumeStatusRequest generates a "aws/request.Request" representing the +// client's request for the DescribeVolumeStatus operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeVolumeStatus method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeVolumeStatusRequest method. +// req, resp := client.DescribeVolumeStatusRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) DescribeVolumeStatusRequest(input *DescribeVolumeStatusInput) (req *request.Request, output *DescribeVolumeStatusOutput) { + op := &request.Operation{ + Name: opDescribeVolumeStatus, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeVolumeStatusInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeVolumeStatusOutput{} + req.Data = output + return +} + +// Describes the status of the specified volumes. Volume status provides the +// result of the checks performed on your volumes to determine events that can +// impair the performance of your volumes. The performance of a volume can be +// affected if an issue occurs on the volume's underlying host. If the volume's +// underlying host experiences a power outage or system issue, after the system +// is restored, there could be data inconsistencies on the volume. Volume events +// notify you if this occurs. Volume actions notify you if any action needs +// to be taken in response to the event. +// +// The DescribeVolumeStatus operation provides the following information about +// the specified volumes: +// +// Status: Reflects the current status of the volume. The possible values +// are ok, impaired , warning, or insufficient-data. If all checks pass, the +// overall status of the volume is ok. If the check fails, the overall status +// is impaired. If the status is insufficient-data, then the checks may still +// be taking place on your volume at the time. We recommend that you retry the +// request. For more information on volume status, see Monitoring the Status +// of Your Volumes (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/monitoring-volume-status.html). +// +// Events: Reflect the cause of a volume status and may require you to take +// action. For example, if your volume returns an impaired status, then the +// volume event might be potential-data-inconsistency. This means that your +// volume has been affected by an issue with the underlying host, has all I/O +// operations disabled, and may have inconsistent data. +// +// Actions: Reflect the actions you may have to take in response to an event. +// For example, if the status of the volume is impaired and the volume event +// shows potential-data-inconsistency, then the action shows enable-volume-io. +// This means that you may want to enable the I/O operations for the volume +// by calling the EnableVolumeIO action and then check the volume for data consistency. +// +// Volume status is based on the volume status checks, and does not reflect +// the volume state. Therefore, volume status does not indicate volumes in the +// error state (for example, when a volume is incapable of accepting I/O.) +func (c *EC2) DescribeVolumeStatus(input *DescribeVolumeStatusInput) (*DescribeVolumeStatusOutput, error) { + req, out := c.DescribeVolumeStatusRequest(input) + err := req.Send() + return out, err +} + +// DescribeVolumeStatusPages iterates over the pages of a DescribeVolumeStatus operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeVolumeStatus method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeVolumeStatus operation. +// pageNum := 0 +// err := client.DescribeVolumeStatusPages(params, +// func(page *DescribeVolumeStatusOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *EC2) DescribeVolumeStatusPages(input *DescribeVolumeStatusInput, fn func(p *DescribeVolumeStatusOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeVolumeStatusRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeVolumeStatusOutput), lastPage) + }) +} + +const opDescribeVolumes = "DescribeVolumes" + +// DescribeVolumesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeVolumes operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeVolumes method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeVolumesRequest method. +// req, resp := client.DescribeVolumesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) DescribeVolumesRequest(input *DescribeVolumesInput) (req *request.Request, output *DescribeVolumesOutput) { + op := &request.Operation{ + Name: opDescribeVolumes, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeVolumesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeVolumesOutput{} + req.Data = output + return +} + +// Describes the specified EBS volumes. +// +// If you are describing a long list of volumes, you can paginate the output +// to make the list more manageable. The MaxResults parameter sets the maximum +// number of results returned in a single page. If the list of results exceeds +// your MaxResults value, then that number of results is returned along with +// a NextToken value that can be passed to a subsequent DescribeVolumes request +// to retrieve the remaining results. +// +// For more information about EBS volumes, see Amazon EBS Volumes (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumes.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) DescribeVolumes(input *DescribeVolumesInput) (*DescribeVolumesOutput, error) { + req, out := c.DescribeVolumesRequest(input) + err := req.Send() + return out, err +} + +// DescribeVolumesPages iterates over the pages of a DescribeVolumes operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeVolumes method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeVolumes operation. +// pageNum := 0 +// err := client.DescribeVolumesPages(params, +// func(page *DescribeVolumesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *EC2) DescribeVolumesPages(input *DescribeVolumesInput, fn func(p *DescribeVolumesOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeVolumesRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeVolumesOutput), lastPage) + }) +} + +const opDescribeVpcAttribute = "DescribeVpcAttribute" + +// DescribeVpcAttributeRequest generates a "aws/request.Request" representing the +// client's request for the DescribeVpcAttribute operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeVpcAttribute method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeVpcAttributeRequest method. +// req, resp := client.DescribeVpcAttributeRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) DescribeVpcAttributeRequest(input *DescribeVpcAttributeInput) (req *request.Request, output *DescribeVpcAttributeOutput) { + op := &request.Operation{ + Name: opDescribeVpcAttribute, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeVpcAttributeInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeVpcAttributeOutput{} + req.Data = output + return +} + +// Describes the specified attribute of the specified VPC. You can specify only +// one attribute at a time. +func (c *EC2) DescribeVpcAttribute(input *DescribeVpcAttributeInput) (*DescribeVpcAttributeOutput, error) { + req, out := c.DescribeVpcAttributeRequest(input) + err := req.Send() + return out, err +} + +const opDescribeVpcClassicLink = "DescribeVpcClassicLink" + +// DescribeVpcClassicLinkRequest generates a "aws/request.Request" representing the +// client's request for the DescribeVpcClassicLink operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeVpcClassicLink method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeVpcClassicLinkRequest method. +// req, resp := client.DescribeVpcClassicLinkRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) DescribeVpcClassicLinkRequest(input *DescribeVpcClassicLinkInput) (req *request.Request, output *DescribeVpcClassicLinkOutput) { + op := &request.Operation{ + Name: opDescribeVpcClassicLink, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeVpcClassicLinkInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeVpcClassicLinkOutput{} + req.Data = output + return +} + +// Describes the ClassicLink status of one or more VPCs. +func (c *EC2) DescribeVpcClassicLink(input *DescribeVpcClassicLinkInput) (*DescribeVpcClassicLinkOutput, error) { + req, out := c.DescribeVpcClassicLinkRequest(input) + err := req.Send() + return out, err +} + +const opDescribeVpcClassicLinkDnsSupport = "DescribeVpcClassicLinkDnsSupport" + +// DescribeVpcClassicLinkDnsSupportRequest generates a "aws/request.Request" representing the +// client's request for the DescribeVpcClassicLinkDnsSupport operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeVpcClassicLinkDnsSupport method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeVpcClassicLinkDnsSupportRequest method. +// req, resp := client.DescribeVpcClassicLinkDnsSupportRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) DescribeVpcClassicLinkDnsSupportRequest(input *DescribeVpcClassicLinkDnsSupportInput) (req *request.Request, output *DescribeVpcClassicLinkDnsSupportOutput) { + op := &request.Operation{ + Name: opDescribeVpcClassicLinkDnsSupport, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeVpcClassicLinkDnsSupportInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeVpcClassicLinkDnsSupportOutput{} + req.Data = output + return +} + +// Describes the ClassicLink DNS support status of one or more VPCs. If enabled, +// the DNS hostname of a linked EC2-Classic instance resolves to its private +// IP address when addressed from an instance in the VPC to which it's linked. +// Similarly, the DNS hostname of an instance in a VPC resolves to its private +// IP address when addressed from a linked EC2-Classic instance. For more information +// about ClassicLink, see ClassicLink (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/vpc-classiclink.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) DescribeVpcClassicLinkDnsSupport(input *DescribeVpcClassicLinkDnsSupportInput) (*DescribeVpcClassicLinkDnsSupportOutput, error) { + req, out := c.DescribeVpcClassicLinkDnsSupportRequest(input) + err := req.Send() + return out, err +} + +const opDescribeVpcEndpointServices = "DescribeVpcEndpointServices" + +// DescribeVpcEndpointServicesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeVpcEndpointServices operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeVpcEndpointServices method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeVpcEndpointServicesRequest method. +// req, resp := client.DescribeVpcEndpointServicesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) DescribeVpcEndpointServicesRequest(input *DescribeVpcEndpointServicesInput) (req *request.Request, output *DescribeVpcEndpointServicesOutput) { + op := &request.Operation{ + Name: opDescribeVpcEndpointServices, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeVpcEndpointServicesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeVpcEndpointServicesOutput{} + req.Data = output + return +} + +// Describes all supported AWS services that can be specified when creating +// a VPC endpoint. +func (c *EC2) DescribeVpcEndpointServices(input *DescribeVpcEndpointServicesInput) (*DescribeVpcEndpointServicesOutput, error) { + req, out := c.DescribeVpcEndpointServicesRequest(input) + err := req.Send() + return out, err +} + +const opDescribeVpcEndpoints = "DescribeVpcEndpoints" + +// DescribeVpcEndpointsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeVpcEndpoints operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeVpcEndpoints method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeVpcEndpointsRequest method. +// req, resp := client.DescribeVpcEndpointsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) DescribeVpcEndpointsRequest(input *DescribeVpcEndpointsInput) (req *request.Request, output *DescribeVpcEndpointsOutput) { + op := &request.Operation{ + Name: opDescribeVpcEndpoints, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeVpcEndpointsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeVpcEndpointsOutput{} + req.Data = output + return +} + +// Describes one or more of your VPC endpoints. +func (c *EC2) DescribeVpcEndpoints(input *DescribeVpcEndpointsInput) (*DescribeVpcEndpointsOutput, error) { + req, out := c.DescribeVpcEndpointsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeVpcPeeringConnections = "DescribeVpcPeeringConnections" + +// DescribeVpcPeeringConnectionsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeVpcPeeringConnections operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeVpcPeeringConnections method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeVpcPeeringConnectionsRequest method. +// req, resp := client.DescribeVpcPeeringConnectionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) DescribeVpcPeeringConnectionsRequest(input *DescribeVpcPeeringConnectionsInput) (req *request.Request, output *DescribeVpcPeeringConnectionsOutput) { + op := &request.Operation{ + Name: opDescribeVpcPeeringConnections, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeVpcPeeringConnectionsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeVpcPeeringConnectionsOutput{} + req.Data = output + return +} + +// Describes one or more of your VPC peering connections. +func (c *EC2) DescribeVpcPeeringConnections(input *DescribeVpcPeeringConnectionsInput) (*DescribeVpcPeeringConnectionsOutput, error) { + req, out := c.DescribeVpcPeeringConnectionsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeVpcs = "DescribeVpcs" + +// DescribeVpcsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeVpcs operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeVpcs method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeVpcsRequest method. +// req, resp := client.DescribeVpcsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) DescribeVpcsRequest(input *DescribeVpcsInput) (req *request.Request, output *DescribeVpcsOutput) { + op := &request.Operation{ + Name: opDescribeVpcs, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeVpcsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeVpcsOutput{} + req.Data = output + return +} + +// Describes one or more of your VPCs. +func (c *EC2) DescribeVpcs(input *DescribeVpcsInput) (*DescribeVpcsOutput, error) { + req, out := c.DescribeVpcsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeVpnConnections = "DescribeVpnConnections" + +// DescribeVpnConnectionsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeVpnConnections operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeVpnConnections method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeVpnConnectionsRequest method. +// req, resp := client.DescribeVpnConnectionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) DescribeVpnConnectionsRequest(input *DescribeVpnConnectionsInput) (req *request.Request, output *DescribeVpnConnectionsOutput) { + op := &request.Operation{ + Name: opDescribeVpnConnections, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeVpnConnectionsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeVpnConnectionsOutput{} + req.Data = output + return +} + +// Describes one or more of your VPN connections. +// +// For more information about VPN connections, see Adding a Hardware Virtual +// Private Gateway to Your VPC (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_VPN.html) +// in the Amazon Virtual Private Cloud User Guide. +func (c *EC2) DescribeVpnConnections(input *DescribeVpnConnectionsInput) (*DescribeVpnConnectionsOutput, error) { + req, out := c.DescribeVpnConnectionsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeVpnGateways = "DescribeVpnGateways" + +// DescribeVpnGatewaysRequest generates a "aws/request.Request" representing the +// client's request for the DescribeVpnGateways operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeVpnGateways method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeVpnGatewaysRequest method. +// req, resp := client.DescribeVpnGatewaysRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) DescribeVpnGatewaysRequest(input *DescribeVpnGatewaysInput) (req *request.Request, output *DescribeVpnGatewaysOutput) { + op := &request.Operation{ + Name: opDescribeVpnGateways, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeVpnGatewaysInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeVpnGatewaysOutput{} + req.Data = output + return +} + +// Describes one or more of your virtual private gateways. +// +// For more information about virtual private gateways, see Adding an IPsec +// Hardware VPN to Your VPC (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_VPN.html) +// in the Amazon Virtual Private Cloud User Guide. +func (c *EC2) DescribeVpnGateways(input *DescribeVpnGatewaysInput) (*DescribeVpnGatewaysOutput, error) { + req, out := c.DescribeVpnGatewaysRequest(input) + err := req.Send() + return out, err +} + +const opDetachClassicLinkVpc = "DetachClassicLinkVpc" + +// DetachClassicLinkVpcRequest generates a "aws/request.Request" representing the +// client's request for the DetachClassicLinkVpc operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DetachClassicLinkVpc method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DetachClassicLinkVpcRequest method. +// req, resp := client.DetachClassicLinkVpcRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) DetachClassicLinkVpcRequest(input *DetachClassicLinkVpcInput) (req *request.Request, output *DetachClassicLinkVpcOutput) { + op := &request.Operation{ + Name: opDetachClassicLinkVpc, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DetachClassicLinkVpcInput{} + } + + req = c.newRequest(op, input, output) + output = &DetachClassicLinkVpcOutput{} + req.Data = output + return +} + +// Unlinks (detaches) a linked EC2-Classic instance from a VPC. After the instance +// has been unlinked, the VPC security groups are no longer associated with +// it. An instance is automatically unlinked from a VPC when it's stopped. +func (c *EC2) DetachClassicLinkVpc(input *DetachClassicLinkVpcInput) (*DetachClassicLinkVpcOutput, error) { + req, out := c.DetachClassicLinkVpcRequest(input) + err := req.Send() + return out, err +} + +const opDetachInternetGateway = "DetachInternetGateway" + +// DetachInternetGatewayRequest generates a "aws/request.Request" representing the +// client's request for the DetachInternetGateway operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DetachInternetGateway method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DetachInternetGatewayRequest method. +// req, resp := client.DetachInternetGatewayRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) DetachInternetGatewayRequest(input *DetachInternetGatewayInput) (req *request.Request, output *DetachInternetGatewayOutput) { + op := &request.Operation{ + Name: opDetachInternetGateway, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DetachInternetGatewayInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DetachInternetGatewayOutput{} + req.Data = output + return +} + +// Detaches an Internet gateway from a VPC, disabling connectivity between the +// Internet and the VPC. The VPC must not contain any running instances with +// Elastic IP addresses. +func (c *EC2) DetachInternetGateway(input *DetachInternetGatewayInput) (*DetachInternetGatewayOutput, error) { + req, out := c.DetachInternetGatewayRequest(input) + err := req.Send() + return out, err +} + +const opDetachNetworkInterface = "DetachNetworkInterface" + +// DetachNetworkInterfaceRequest generates a "aws/request.Request" representing the +// client's request for the DetachNetworkInterface operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DetachNetworkInterface method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DetachNetworkInterfaceRequest method. +// req, resp := client.DetachNetworkInterfaceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) DetachNetworkInterfaceRequest(input *DetachNetworkInterfaceInput) (req *request.Request, output *DetachNetworkInterfaceOutput) { + op := &request.Operation{ + Name: opDetachNetworkInterface, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DetachNetworkInterfaceInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DetachNetworkInterfaceOutput{} + req.Data = output + return +} + +// Detaches a network interface from an instance. +func (c *EC2) DetachNetworkInterface(input *DetachNetworkInterfaceInput) (*DetachNetworkInterfaceOutput, error) { + req, out := c.DetachNetworkInterfaceRequest(input) + err := req.Send() + return out, err +} + +const opDetachVolume = "DetachVolume" + +// DetachVolumeRequest generates a "aws/request.Request" representing the +// client's request for the DetachVolume operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DetachVolume method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DetachVolumeRequest method. +// req, resp := client.DetachVolumeRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) DetachVolumeRequest(input *DetachVolumeInput) (req *request.Request, output *VolumeAttachment) { + op := &request.Operation{ + Name: opDetachVolume, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DetachVolumeInput{} + } + + req = c.newRequest(op, input, output) + output = &VolumeAttachment{} + req.Data = output + return +} + +// Detaches an EBS volume from an instance. Make sure to unmount any file systems +// on the device within your operating system before detaching the volume. Failure +// to do so results in the volume being stuck in a busy state while detaching. +// +// If an Amazon EBS volume is the root device of an instance, it can't be detached +// while the instance is running. To detach the root volume, stop the instance +// first. +// +// When a volume with an AWS Marketplace product code is detached from an instance, +// the product code is no longer associated with the instance. +// +// For more information, see Detaching an Amazon EBS Volume (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-detaching-volume.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) DetachVolume(input *DetachVolumeInput) (*VolumeAttachment, error) { + req, out := c.DetachVolumeRequest(input) + err := req.Send() + return out, err +} + +const opDetachVpnGateway = "DetachVpnGateway" + +// DetachVpnGatewayRequest generates a "aws/request.Request" representing the +// client's request for the DetachVpnGateway operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DetachVpnGateway method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DetachVpnGatewayRequest method. +// req, resp := client.DetachVpnGatewayRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) DetachVpnGatewayRequest(input *DetachVpnGatewayInput) (req *request.Request, output *DetachVpnGatewayOutput) { + op := &request.Operation{ + Name: opDetachVpnGateway, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DetachVpnGatewayInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DetachVpnGatewayOutput{} + req.Data = output + return +} + +// Detaches a virtual private gateway from a VPC. You do this if you're planning +// to turn off the VPC and not use it anymore. You can confirm a virtual private +// gateway has been completely detached from a VPC by describing the virtual +// private gateway (any attachments to the virtual private gateway are also +// described). +// +// You must wait for the attachment's state to switch to detached before you +// can delete the VPC or attach a different VPC to the virtual private gateway. +func (c *EC2) DetachVpnGateway(input *DetachVpnGatewayInput) (*DetachVpnGatewayOutput, error) { + req, out := c.DetachVpnGatewayRequest(input) + err := req.Send() + return out, err +} + +const opDisableVgwRoutePropagation = "DisableVgwRoutePropagation" + +// DisableVgwRoutePropagationRequest generates a "aws/request.Request" representing the +// client's request for the DisableVgwRoutePropagation operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DisableVgwRoutePropagation method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DisableVgwRoutePropagationRequest method. +// req, resp := client.DisableVgwRoutePropagationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) DisableVgwRoutePropagationRequest(input *DisableVgwRoutePropagationInput) (req *request.Request, output *DisableVgwRoutePropagationOutput) { + op := &request.Operation{ + Name: opDisableVgwRoutePropagation, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DisableVgwRoutePropagationInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DisableVgwRoutePropagationOutput{} + req.Data = output + return +} + +// Disables a virtual private gateway (VGW) from propagating routes to a specified +// route table of a VPC. +func (c *EC2) DisableVgwRoutePropagation(input *DisableVgwRoutePropagationInput) (*DisableVgwRoutePropagationOutput, error) { + req, out := c.DisableVgwRoutePropagationRequest(input) + err := req.Send() + return out, err +} + +const opDisableVpcClassicLink = "DisableVpcClassicLink" + +// DisableVpcClassicLinkRequest generates a "aws/request.Request" representing the +// client's request for the DisableVpcClassicLink operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DisableVpcClassicLink method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DisableVpcClassicLinkRequest method. +// req, resp := client.DisableVpcClassicLinkRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) DisableVpcClassicLinkRequest(input *DisableVpcClassicLinkInput) (req *request.Request, output *DisableVpcClassicLinkOutput) { + op := &request.Operation{ + Name: opDisableVpcClassicLink, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DisableVpcClassicLinkInput{} + } + + req = c.newRequest(op, input, output) + output = &DisableVpcClassicLinkOutput{} + req.Data = output + return +} + +// Disables ClassicLink for a VPC. You cannot disable ClassicLink for a VPC +// that has EC2-Classic instances linked to it. +func (c *EC2) DisableVpcClassicLink(input *DisableVpcClassicLinkInput) (*DisableVpcClassicLinkOutput, error) { + req, out := c.DisableVpcClassicLinkRequest(input) + err := req.Send() + return out, err +} + +const opDisableVpcClassicLinkDnsSupport = "DisableVpcClassicLinkDnsSupport" + +// DisableVpcClassicLinkDnsSupportRequest generates a "aws/request.Request" representing the +// client's request for the DisableVpcClassicLinkDnsSupport operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DisableVpcClassicLinkDnsSupport method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DisableVpcClassicLinkDnsSupportRequest method. +// req, resp := client.DisableVpcClassicLinkDnsSupportRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) DisableVpcClassicLinkDnsSupportRequest(input *DisableVpcClassicLinkDnsSupportInput) (req *request.Request, output *DisableVpcClassicLinkDnsSupportOutput) { + op := &request.Operation{ + Name: opDisableVpcClassicLinkDnsSupport, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DisableVpcClassicLinkDnsSupportInput{} + } + + req = c.newRequest(op, input, output) + output = &DisableVpcClassicLinkDnsSupportOutput{} + req.Data = output + return +} + +// Disables ClassicLink DNS support for a VPC. If disabled, DNS hostnames resolve +// to public IP addresses when addressed between a linked EC2-Classic instance +// and instances in the VPC to which it's linked. For more information about +// ClassicLink, see ClassicLink (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/vpc-classiclink.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) DisableVpcClassicLinkDnsSupport(input *DisableVpcClassicLinkDnsSupportInput) (*DisableVpcClassicLinkDnsSupportOutput, error) { + req, out := c.DisableVpcClassicLinkDnsSupportRequest(input) + err := req.Send() + return out, err +} + +const opDisassociateAddress = "DisassociateAddress" + +// DisassociateAddressRequest generates a "aws/request.Request" representing the +// client's request for the DisassociateAddress operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DisassociateAddress method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DisassociateAddressRequest method. +// req, resp := client.DisassociateAddressRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) DisassociateAddressRequest(input *DisassociateAddressInput) (req *request.Request, output *DisassociateAddressOutput) { + op := &request.Operation{ + Name: opDisassociateAddress, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DisassociateAddressInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DisassociateAddressOutput{} + req.Data = output + return +} + +// Disassociates an Elastic IP address from the instance or network interface +// it's associated with. +// +// An Elastic IP address is for use in either the EC2-Classic platform or in +// a VPC. For more information, see Elastic IP Addresses (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/elastic-ip-addresses-eip.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// This is an idempotent operation. If you perform the operation more than +// once, Amazon EC2 doesn't return an error. +func (c *EC2) DisassociateAddress(input *DisassociateAddressInput) (*DisassociateAddressOutput, error) { + req, out := c.DisassociateAddressRequest(input) + err := req.Send() + return out, err +} + +const opDisassociateRouteTable = "DisassociateRouteTable" + +// DisassociateRouteTableRequest generates a "aws/request.Request" representing the +// client's request for the DisassociateRouteTable operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DisassociateRouteTable method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DisassociateRouteTableRequest method. +// req, resp := client.DisassociateRouteTableRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) DisassociateRouteTableRequest(input *DisassociateRouteTableInput) (req *request.Request, output *DisassociateRouteTableOutput) { + op := &request.Operation{ + Name: opDisassociateRouteTable, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DisassociateRouteTableInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DisassociateRouteTableOutput{} + req.Data = output + return +} + +// Disassociates a subnet from a route table. +// +// After you perform this action, the subnet no longer uses the routes in the +// route table. Instead, it uses the routes in the VPC's main route table. For +// more information about route tables, see Route Tables (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_Route_Tables.html) +// in the Amazon Virtual Private Cloud User Guide. +func (c *EC2) DisassociateRouteTable(input *DisassociateRouteTableInput) (*DisassociateRouteTableOutput, error) { + req, out := c.DisassociateRouteTableRequest(input) + err := req.Send() + return out, err +} + +const opEnableVgwRoutePropagation = "EnableVgwRoutePropagation" + +// EnableVgwRoutePropagationRequest generates a "aws/request.Request" representing the +// client's request for the EnableVgwRoutePropagation operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the EnableVgwRoutePropagation method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the EnableVgwRoutePropagationRequest method. +// req, resp := client.EnableVgwRoutePropagationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) EnableVgwRoutePropagationRequest(input *EnableVgwRoutePropagationInput) (req *request.Request, output *EnableVgwRoutePropagationOutput) { + op := &request.Operation{ + Name: opEnableVgwRoutePropagation, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &EnableVgwRoutePropagationInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &EnableVgwRoutePropagationOutput{} + req.Data = output + return +} + +// Enables a virtual private gateway (VGW) to propagate routes to the specified +// route table of a VPC. +func (c *EC2) EnableVgwRoutePropagation(input *EnableVgwRoutePropagationInput) (*EnableVgwRoutePropagationOutput, error) { + req, out := c.EnableVgwRoutePropagationRequest(input) + err := req.Send() + return out, err +} + +const opEnableVolumeIO = "EnableVolumeIO" + +// EnableVolumeIORequest generates a "aws/request.Request" representing the +// client's request for the EnableVolumeIO operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the EnableVolumeIO method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the EnableVolumeIORequest method. +// req, resp := client.EnableVolumeIORequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) EnableVolumeIORequest(input *EnableVolumeIOInput) (req *request.Request, output *EnableVolumeIOOutput) { + op := &request.Operation{ + Name: opEnableVolumeIO, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &EnableVolumeIOInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &EnableVolumeIOOutput{} + req.Data = output + return +} + +// Enables I/O operations for a volume that had I/O operations disabled because +// the data on the volume was potentially inconsistent. +func (c *EC2) EnableVolumeIO(input *EnableVolumeIOInput) (*EnableVolumeIOOutput, error) { + req, out := c.EnableVolumeIORequest(input) + err := req.Send() + return out, err +} + +const opEnableVpcClassicLink = "EnableVpcClassicLink" + +// EnableVpcClassicLinkRequest generates a "aws/request.Request" representing the +// client's request for the EnableVpcClassicLink operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the EnableVpcClassicLink method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the EnableVpcClassicLinkRequest method. +// req, resp := client.EnableVpcClassicLinkRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) EnableVpcClassicLinkRequest(input *EnableVpcClassicLinkInput) (req *request.Request, output *EnableVpcClassicLinkOutput) { + op := &request.Operation{ + Name: opEnableVpcClassicLink, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &EnableVpcClassicLinkInput{} + } + + req = c.newRequest(op, input, output) + output = &EnableVpcClassicLinkOutput{} + req.Data = output + return +} + +// Enables a VPC for ClassicLink. You can then link EC2-Classic instances to +// your ClassicLink-enabled VPC to allow communication over private IP addresses. +// You cannot enable your VPC for ClassicLink if any of your VPC's route tables +// have existing routes for address ranges within the 10.0.0.0/8 IP address +// range, excluding local routes for VPCs in the 10.0.0.0/16 and 10.1.0.0/16 +// IP address ranges. For more information, see ClassicLink (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/vpc-classiclink.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) EnableVpcClassicLink(input *EnableVpcClassicLinkInput) (*EnableVpcClassicLinkOutput, error) { + req, out := c.EnableVpcClassicLinkRequest(input) + err := req.Send() + return out, err +} + +const opEnableVpcClassicLinkDnsSupport = "EnableVpcClassicLinkDnsSupport" + +// EnableVpcClassicLinkDnsSupportRequest generates a "aws/request.Request" representing the +// client's request for the EnableVpcClassicLinkDnsSupport operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the EnableVpcClassicLinkDnsSupport method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the EnableVpcClassicLinkDnsSupportRequest method. +// req, resp := client.EnableVpcClassicLinkDnsSupportRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) EnableVpcClassicLinkDnsSupportRequest(input *EnableVpcClassicLinkDnsSupportInput) (req *request.Request, output *EnableVpcClassicLinkDnsSupportOutput) { + op := &request.Operation{ + Name: opEnableVpcClassicLinkDnsSupport, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &EnableVpcClassicLinkDnsSupportInput{} + } + + req = c.newRequest(op, input, output) + output = &EnableVpcClassicLinkDnsSupportOutput{} + req.Data = output + return +} + +// Enables a VPC to support DNS hostname resolution for ClassicLink. If enabled, +// the DNS hostname of a linked EC2-Classic instance resolves to its private +// IP address when addressed from an instance in the VPC to which it's linked. +// Similarly, the DNS hostname of an instance in a VPC resolves to its private +// IP address when addressed from a linked EC2-Classic instance. For more information +// about ClassicLink, see ClassicLink (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/vpc-classiclink.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) EnableVpcClassicLinkDnsSupport(input *EnableVpcClassicLinkDnsSupportInput) (*EnableVpcClassicLinkDnsSupportOutput, error) { + req, out := c.EnableVpcClassicLinkDnsSupportRequest(input) + err := req.Send() + return out, err +} + +const opGetConsoleOutput = "GetConsoleOutput" + +// GetConsoleOutputRequest generates a "aws/request.Request" representing the +// client's request for the GetConsoleOutput operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetConsoleOutput method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetConsoleOutputRequest method. +// req, resp := client.GetConsoleOutputRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) GetConsoleOutputRequest(input *GetConsoleOutputInput) (req *request.Request, output *GetConsoleOutputOutput) { + op := &request.Operation{ + Name: opGetConsoleOutput, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetConsoleOutputInput{} + } + + req = c.newRequest(op, input, output) + output = &GetConsoleOutputOutput{} + req.Data = output + return +} + +// Gets the console output for the specified instance. +// +// Instances do not have a physical monitor through which you can view their +// console output. They also lack physical controls that allow you to power +// up, reboot, or shut them down. To allow these actions, we provide them through +// the Amazon EC2 API and command line interface. +// +// Instance console output is buffered and posted shortly after instance boot, +// reboot, and termination. Amazon EC2 preserves the most recent 64 KB output +// which is available for at least one hour after the most recent post. +// +// For Linux instances, the instance console output displays the exact console +// output that would normally be displayed on a physical monitor attached to +// a computer. This output is buffered because the instance produces it and +// then posts it to a store where the instance's owner can retrieve it. +// +// For Windows instances, the instance console output includes output from +// the EC2Config service. +func (c *EC2) GetConsoleOutput(input *GetConsoleOutputInput) (*GetConsoleOutputOutput, error) { + req, out := c.GetConsoleOutputRequest(input) + err := req.Send() + return out, err +} + +const opGetConsoleScreenshot = "GetConsoleScreenshot" + +// GetConsoleScreenshotRequest generates a "aws/request.Request" representing the +// client's request for the GetConsoleScreenshot operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetConsoleScreenshot method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetConsoleScreenshotRequest method. +// req, resp := client.GetConsoleScreenshotRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) GetConsoleScreenshotRequest(input *GetConsoleScreenshotInput) (req *request.Request, output *GetConsoleScreenshotOutput) { + op := &request.Operation{ + Name: opGetConsoleScreenshot, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetConsoleScreenshotInput{} + } + + req = c.newRequest(op, input, output) + output = &GetConsoleScreenshotOutput{} + req.Data = output + return +} + +// Retrieve a JPG-format screenshot of a running instance to help with troubleshooting. +// +// The returned content is Base64-encoded. +func (c *EC2) GetConsoleScreenshot(input *GetConsoleScreenshotInput) (*GetConsoleScreenshotOutput, error) { + req, out := c.GetConsoleScreenshotRequest(input) + err := req.Send() + return out, err +} + +const opGetPasswordData = "GetPasswordData" + +// GetPasswordDataRequest generates a "aws/request.Request" representing the +// client's request for the GetPasswordData operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetPasswordData method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetPasswordDataRequest method. +// req, resp := client.GetPasswordDataRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) GetPasswordDataRequest(input *GetPasswordDataInput) (req *request.Request, output *GetPasswordDataOutput) { + op := &request.Operation{ + Name: opGetPasswordData, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetPasswordDataInput{} + } + + req = c.newRequest(op, input, output) + output = &GetPasswordDataOutput{} + req.Data = output + return +} + +// Retrieves the encrypted administrator password for an instance running Windows. +// +// The Windows password is generated at boot if the EC2Config service plugin, +// Ec2SetPassword, is enabled. This usually only happens the first time an AMI +// is launched, and then Ec2SetPassword is automatically disabled. The password +// is not generated for rebundled AMIs unless Ec2SetPassword is enabled before +// bundling. +// +// The password is encrypted using the key pair that you specified when you +// launched the instance. You must provide the corresponding key pair file. +// +// Password generation and encryption takes a few moments. We recommend that +// you wait up to 15 minutes after launching an instance before trying to retrieve +// the generated password. +func (c *EC2) GetPasswordData(input *GetPasswordDataInput) (*GetPasswordDataOutput, error) { + req, out := c.GetPasswordDataRequest(input) + err := req.Send() + return out, err +} + +const opImportImage = "ImportImage" + +// ImportImageRequest generates a "aws/request.Request" representing the +// client's request for the ImportImage operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ImportImage method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ImportImageRequest method. +// req, resp := client.ImportImageRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) ImportImageRequest(input *ImportImageInput) (req *request.Request, output *ImportImageOutput) { + op := &request.Operation{ + Name: opImportImage, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ImportImageInput{} + } + + req = c.newRequest(op, input, output) + output = &ImportImageOutput{} + req.Data = output + return +} + +// Import single or multi-volume disk images or EBS snapshots into an Amazon +// Machine Image (AMI). +func (c *EC2) ImportImage(input *ImportImageInput) (*ImportImageOutput, error) { + req, out := c.ImportImageRequest(input) + err := req.Send() + return out, err +} + +const opImportInstance = "ImportInstance" + +// ImportInstanceRequest generates a "aws/request.Request" representing the +// client's request for the ImportInstance operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ImportInstance method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ImportInstanceRequest method. +// req, resp := client.ImportInstanceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) ImportInstanceRequest(input *ImportInstanceInput) (req *request.Request, output *ImportInstanceOutput) { + op := &request.Operation{ + Name: opImportInstance, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ImportInstanceInput{} + } + + req = c.newRequest(op, input, output) + output = &ImportInstanceOutput{} + req.Data = output + return +} + +// Creates an import instance task using metadata from the specified disk image. +// ImportInstance only supports single-volume VMs. To import multi-volume VMs, +// use ImportImage. After importing the image, you then upload it using the +// ec2-import-volume command in the EC2 command line tools. For more information, +// see Using the Command Line Tools to Import Your Virtual Machine to Amazon +// EC2 (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/UploadingYourInstancesandVolumes.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// For information about the import manifest referenced by this API action, +// see VM Import Manifest (http://docs.aws.amazon.com/AWSEC2/latest/APIReference/manifest.html). +func (c *EC2) ImportInstance(input *ImportInstanceInput) (*ImportInstanceOutput, error) { + req, out := c.ImportInstanceRequest(input) + err := req.Send() + return out, err +} + +const opImportKeyPair = "ImportKeyPair" + +// ImportKeyPairRequest generates a "aws/request.Request" representing the +// client's request for the ImportKeyPair operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ImportKeyPair method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ImportKeyPairRequest method. +// req, resp := client.ImportKeyPairRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) ImportKeyPairRequest(input *ImportKeyPairInput) (req *request.Request, output *ImportKeyPairOutput) { + op := &request.Operation{ + Name: opImportKeyPair, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ImportKeyPairInput{} + } + + req = c.newRequest(op, input, output) + output = &ImportKeyPairOutput{} + req.Data = output + return +} + +// Imports the public key from an RSA key pair that you created with a third-party +// tool. Compare this with CreateKeyPair, in which AWS creates the key pair +// and gives the keys to you (AWS keeps a copy of the public key). With ImportKeyPair, +// you create the key pair and give AWS just the public key. The private key +// is never transferred between you and AWS. +// +// For more information about key pairs, see Key Pairs (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) ImportKeyPair(input *ImportKeyPairInput) (*ImportKeyPairOutput, error) { + req, out := c.ImportKeyPairRequest(input) + err := req.Send() + return out, err +} + +const opImportSnapshot = "ImportSnapshot" + +// ImportSnapshotRequest generates a "aws/request.Request" representing the +// client's request for the ImportSnapshot operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ImportSnapshot method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ImportSnapshotRequest method. +// req, resp := client.ImportSnapshotRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) ImportSnapshotRequest(input *ImportSnapshotInput) (req *request.Request, output *ImportSnapshotOutput) { + op := &request.Operation{ + Name: opImportSnapshot, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ImportSnapshotInput{} + } + + req = c.newRequest(op, input, output) + output = &ImportSnapshotOutput{} + req.Data = output + return +} + +// Imports a disk into an EBS snapshot. +func (c *EC2) ImportSnapshot(input *ImportSnapshotInput) (*ImportSnapshotOutput, error) { + req, out := c.ImportSnapshotRequest(input) + err := req.Send() + return out, err +} + +const opImportVolume = "ImportVolume" + +// ImportVolumeRequest generates a "aws/request.Request" representing the +// client's request for the ImportVolume operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ImportVolume method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ImportVolumeRequest method. +// req, resp := client.ImportVolumeRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) ImportVolumeRequest(input *ImportVolumeInput) (req *request.Request, output *ImportVolumeOutput) { + op := &request.Operation{ + Name: opImportVolume, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ImportVolumeInput{} + } + + req = c.newRequest(op, input, output) + output = &ImportVolumeOutput{} + req.Data = output + return +} + +// Creates an import volume task using metadata from the specified disk image. +// After importing the image, you then upload it using the ec2-import-volume +// command in the Amazon EC2 command-line interface (CLI) tools. For more information, +// see Using the Command Line Tools to Import Your Virtual Machine to Amazon +// EC2 (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/UploadingYourInstancesandVolumes.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// For information about the import manifest referenced by this API action, +// see VM Import Manifest (http://docs.aws.amazon.com/AWSEC2/latest/APIReference/manifest.html). +func (c *EC2) ImportVolume(input *ImportVolumeInput) (*ImportVolumeOutput, error) { + req, out := c.ImportVolumeRequest(input) + err := req.Send() + return out, err +} + +const opModifyHosts = "ModifyHosts" + +// ModifyHostsRequest generates a "aws/request.Request" representing the +// client's request for the ModifyHosts operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ModifyHosts method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ModifyHostsRequest method. +// req, resp := client.ModifyHostsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) ModifyHostsRequest(input *ModifyHostsInput) (req *request.Request, output *ModifyHostsOutput) { + op := &request.Operation{ + Name: opModifyHosts, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyHostsInput{} + } + + req = c.newRequest(op, input, output) + output = &ModifyHostsOutput{} + req.Data = output + return +} + +// Modify the auto-placement setting of a Dedicated host. When auto-placement +// is enabled, AWS will place instances that you launch with a tenancy of host, +// but without targeting a specific host ID, onto any available Dedicated host +// in your account which has auto-placement enabled. When auto-placement is +// disabled, you need to provide a host ID if you want the instance to launch +// onto a specific host. If no host ID is provided, the instance will be launched +// onto a suitable host which has auto-placement enabled. +func (c *EC2) ModifyHosts(input *ModifyHostsInput) (*ModifyHostsOutput, error) { + req, out := c.ModifyHostsRequest(input) + err := req.Send() + return out, err +} + +const opModifyIdFormat = "ModifyIdFormat" + +// ModifyIdFormatRequest generates a "aws/request.Request" representing the +// client's request for the ModifyIdFormat operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ModifyIdFormat method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ModifyIdFormatRequest method. +// req, resp := client.ModifyIdFormatRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) ModifyIdFormatRequest(input *ModifyIdFormatInput) (req *request.Request, output *ModifyIdFormatOutput) { + op := &request.Operation{ + Name: opModifyIdFormat, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyIdFormatInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &ModifyIdFormatOutput{} + req.Data = output + return +} + +// Modifies the ID format for the specified resource on a per-region basis. +// You can specify that resources should receive longer IDs (17-character IDs) +// when they are created. The following resource types support longer IDs: instance +// | reservation | snapshot | volume. +// +// This setting applies to the IAM user who makes the request; it does not +// apply to the entire AWS account. By default, an IAM user defaults to the +// same settings as the root user. If you're using this action as the root user, +// then these settings apply to the entire account, unless an IAM user explicitly +// overrides these settings for themselves. For more information, see Resource +// IDs (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/resource-ids.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// Resources created with longer IDs are visible to all IAM roles and users, +// regardless of these settings and provided that they have permission to use +// the relevant Describe command for the resource type. +func (c *EC2) ModifyIdFormat(input *ModifyIdFormatInput) (*ModifyIdFormatOutput, error) { + req, out := c.ModifyIdFormatRequest(input) + err := req.Send() + return out, err +} + +const opModifyIdentityIdFormat = "ModifyIdentityIdFormat" + +// ModifyIdentityIdFormatRequest generates a "aws/request.Request" representing the +// client's request for the ModifyIdentityIdFormat operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ModifyIdentityIdFormat method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ModifyIdentityIdFormatRequest method. +// req, resp := client.ModifyIdentityIdFormatRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) ModifyIdentityIdFormatRequest(input *ModifyIdentityIdFormatInput) (req *request.Request, output *ModifyIdentityIdFormatOutput) { + op := &request.Operation{ + Name: opModifyIdentityIdFormat, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyIdentityIdFormatInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &ModifyIdentityIdFormatOutput{} + req.Data = output + return +} + +// Modifies the ID format of a resource for the specified IAM user, IAM role, +// or root user. You can specify that resources should receive longer IDs (17-character +// IDs) when they are created. The following resource types support longer IDs: +// instance | reservation | snapshot | volume. For more information, see Resource +// IDs (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/resource-ids.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// This setting applies to the principal specified in the request; it does +// not apply to the principal that makes the request. +// +// Resources created with longer IDs are visible to all IAM roles and users, +// regardless of these settings and provided that they have permission to use +// the relevant Describe command for the resource type. +func (c *EC2) ModifyIdentityIdFormat(input *ModifyIdentityIdFormatInput) (*ModifyIdentityIdFormatOutput, error) { + req, out := c.ModifyIdentityIdFormatRequest(input) + err := req.Send() + return out, err +} + +const opModifyImageAttribute = "ModifyImageAttribute" + +// ModifyImageAttributeRequest generates a "aws/request.Request" representing the +// client's request for the ModifyImageAttribute operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ModifyImageAttribute method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ModifyImageAttributeRequest method. +// req, resp := client.ModifyImageAttributeRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) ModifyImageAttributeRequest(input *ModifyImageAttributeInput) (req *request.Request, output *ModifyImageAttributeOutput) { + op := &request.Operation{ + Name: opModifyImageAttribute, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyImageAttributeInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &ModifyImageAttributeOutput{} + req.Data = output + return +} + +// Modifies the specified attribute of the specified AMI. You can specify only +// one attribute at a time. +// +// AWS Marketplace product codes cannot be modified. Images with an AWS Marketplace +// product code cannot be made public. +func (c *EC2) ModifyImageAttribute(input *ModifyImageAttributeInput) (*ModifyImageAttributeOutput, error) { + req, out := c.ModifyImageAttributeRequest(input) + err := req.Send() + return out, err +} + +const opModifyInstanceAttribute = "ModifyInstanceAttribute" + +// ModifyInstanceAttributeRequest generates a "aws/request.Request" representing the +// client's request for the ModifyInstanceAttribute operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ModifyInstanceAttribute method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ModifyInstanceAttributeRequest method. +// req, resp := client.ModifyInstanceAttributeRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) ModifyInstanceAttributeRequest(input *ModifyInstanceAttributeInput) (req *request.Request, output *ModifyInstanceAttributeOutput) { + op := &request.Operation{ + Name: opModifyInstanceAttribute, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyInstanceAttributeInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &ModifyInstanceAttributeOutput{} + req.Data = output + return +} + +// Modifies the specified attribute of the specified instance. You can specify +// only one attribute at a time. +// +// To modify some attributes, the instance must be stopped. For more information, +// see Modifying Attributes of a Stopped Instance (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_ChangingAttributesWhileInstanceStopped.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) ModifyInstanceAttribute(input *ModifyInstanceAttributeInput) (*ModifyInstanceAttributeOutput, error) { + req, out := c.ModifyInstanceAttributeRequest(input) + err := req.Send() + return out, err +} + +const opModifyInstancePlacement = "ModifyInstancePlacement" + +// ModifyInstancePlacementRequest generates a "aws/request.Request" representing the +// client's request for the ModifyInstancePlacement operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ModifyInstancePlacement method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ModifyInstancePlacementRequest method. +// req, resp := client.ModifyInstancePlacementRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) ModifyInstancePlacementRequest(input *ModifyInstancePlacementInput) (req *request.Request, output *ModifyInstancePlacementOutput) { + op := &request.Operation{ + Name: opModifyInstancePlacement, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyInstancePlacementInput{} + } + + req = c.newRequest(op, input, output) + output = &ModifyInstancePlacementOutput{} + req.Data = output + return +} + +// Set the instance affinity value for a specific stopped instance and modify +// the instance tenancy setting. +// +// Instance affinity is disabled by default. When instance affinity is host +// and it is not associated with a specific Dedicated host, the next time it +// is launched it will automatically be associated with the host it lands on. +// This relationship will persist if the instance is stopped/started, or rebooted. +// +// You can modify the host ID associated with a stopped instance. If a stopped +// instance has a new host ID association, the instance will target that host +// when restarted. +// +// You can modify the tenancy of a stopped instance with a tenancy of host +// or dedicated. +// +// Affinity, hostID, and tenancy are not required parameters, but at least +// one of them must be specified in the request. Affinity and tenancy can be +// modified in the same request, but tenancy can only be modified on instances +// that are stopped. +func (c *EC2) ModifyInstancePlacement(input *ModifyInstancePlacementInput) (*ModifyInstancePlacementOutput, error) { + req, out := c.ModifyInstancePlacementRequest(input) + err := req.Send() + return out, err +} + +const opModifyNetworkInterfaceAttribute = "ModifyNetworkInterfaceAttribute" + +// ModifyNetworkInterfaceAttributeRequest generates a "aws/request.Request" representing the +// client's request for the ModifyNetworkInterfaceAttribute operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ModifyNetworkInterfaceAttribute method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ModifyNetworkInterfaceAttributeRequest method. +// req, resp := client.ModifyNetworkInterfaceAttributeRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) ModifyNetworkInterfaceAttributeRequest(input *ModifyNetworkInterfaceAttributeInput) (req *request.Request, output *ModifyNetworkInterfaceAttributeOutput) { + op := &request.Operation{ + Name: opModifyNetworkInterfaceAttribute, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyNetworkInterfaceAttributeInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &ModifyNetworkInterfaceAttributeOutput{} + req.Data = output + return +} + +// Modifies the specified network interface attribute. You can specify only +// one attribute at a time. +func (c *EC2) ModifyNetworkInterfaceAttribute(input *ModifyNetworkInterfaceAttributeInput) (*ModifyNetworkInterfaceAttributeOutput, error) { + req, out := c.ModifyNetworkInterfaceAttributeRequest(input) + err := req.Send() + return out, err +} + +const opModifyReservedInstances = "ModifyReservedInstances" + +// ModifyReservedInstancesRequest generates a "aws/request.Request" representing the +// client's request for the ModifyReservedInstances operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ModifyReservedInstances method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ModifyReservedInstancesRequest method. +// req, resp := client.ModifyReservedInstancesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) ModifyReservedInstancesRequest(input *ModifyReservedInstancesInput) (req *request.Request, output *ModifyReservedInstancesOutput) { + op := &request.Operation{ + Name: opModifyReservedInstances, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyReservedInstancesInput{} + } + + req = c.newRequest(op, input, output) + output = &ModifyReservedInstancesOutput{} + req.Data = output + return +} + +// Modifies the Availability Zone, instance count, instance type, or network +// platform (EC2-Classic or EC2-VPC) of your Reserved Instances. The Reserved +// Instances to be modified must be identical, except for Availability Zone, +// network platform, and instance type. +// +// For more information, see Modifying Reserved Instances (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ri-modifying.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) ModifyReservedInstances(input *ModifyReservedInstancesInput) (*ModifyReservedInstancesOutput, error) { + req, out := c.ModifyReservedInstancesRequest(input) + err := req.Send() + return out, err +} + +const opModifySnapshotAttribute = "ModifySnapshotAttribute" + +// ModifySnapshotAttributeRequest generates a "aws/request.Request" representing the +// client's request for the ModifySnapshotAttribute operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ModifySnapshotAttribute method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ModifySnapshotAttributeRequest method. +// req, resp := client.ModifySnapshotAttributeRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) ModifySnapshotAttributeRequest(input *ModifySnapshotAttributeInput) (req *request.Request, output *ModifySnapshotAttributeOutput) { + op := &request.Operation{ + Name: opModifySnapshotAttribute, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifySnapshotAttributeInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &ModifySnapshotAttributeOutput{} + req.Data = output + return +} + +// Adds or removes permission settings for the specified snapshot. You may add +// or remove specified AWS account IDs from a snapshot's list of create volume +// permissions, but you cannot do both in a single API call. If you need to +// both add and remove account IDs for a snapshot, you must use multiple API +// calls. +// +// Encrypted snapshots and snapshots with AWS Marketplace product codes cannot +// be made public. Snapshots encrypted with your default CMK cannot be shared +// with other accounts. +// +// For more information on modifying snapshot permissions, see Sharing Snapshots +// (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-modifying-snapshot-permissions.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) ModifySnapshotAttribute(input *ModifySnapshotAttributeInput) (*ModifySnapshotAttributeOutput, error) { + req, out := c.ModifySnapshotAttributeRequest(input) + err := req.Send() + return out, err +} + +const opModifySpotFleetRequest = "ModifySpotFleetRequest" + +// ModifySpotFleetRequestRequest generates a "aws/request.Request" representing the +// client's request for the ModifySpotFleetRequest operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ModifySpotFleetRequest method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ModifySpotFleetRequestRequest method. +// req, resp := client.ModifySpotFleetRequestRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) ModifySpotFleetRequestRequest(input *ModifySpotFleetRequestInput) (req *request.Request, output *ModifySpotFleetRequestOutput) { + op := &request.Operation{ + Name: opModifySpotFleetRequest, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifySpotFleetRequestInput{} + } + + req = c.newRequest(op, input, output) + output = &ModifySpotFleetRequestOutput{} + req.Data = output + return +} + +// Modifies the specified Spot fleet request. +// +// While the Spot fleet request is being modified, it is in the modifying state. +// +// To scale up your Spot fleet, increase its target capacity. The Spot fleet +// launches the additional Spot instances according to the allocation strategy +// for the Spot fleet request. If the allocation strategy is lowestPrice, the +// Spot fleet launches instances using the Spot pool with the lowest price. +// If the allocation strategy is diversified, the Spot fleet distributes the +// instances across the Spot pools. +// +// To scale down your Spot fleet, decrease its target capacity. First, the +// Spot fleet cancels any open bids that exceed the new target capacity. You +// can request that the Spot fleet terminate Spot instances until the size of +// the fleet no longer exceeds the new target capacity. If the allocation strategy +// is lowestPrice, the Spot fleet terminates the instances with the highest +// price per unit. If the allocation strategy is diversified, the Spot fleet +// terminates instances across the Spot pools. Alternatively, you can request +// that the Spot fleet keep the fleet at its current size, but not replace any +// Spot instances that are interrupted or that you terminate manually. +func (c *EC2) ModifySpotFleetRequest(input *ModifySpotFleetRequestInput) (*ModifySpotFleetRequestOutput, error) { + req, out := c.ModifySpotFleetRequestRequest(input) + err := req.Send() + return out, err +} + +const opModifySubnetAttribute = "ModifySubnetAttribute" + +// ModifySubnetAttributeRequest generates a "aws/request.Request" representing the +// client's request for the ModifySubnetAttribute operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ModifySubnetAttribute method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ModifySubnetAttributeRequest method. +// req, resp := client.ModifySubnetAttributeRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) ModifySubnetAttributeRequest(input *ModifySubnetAttributeInput) (req *request.Request, output *ModifySubnetAttributeOutput) { + op := &request.Operation{ + Name: opModifySubnetAttribute, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifySubnetAttributeInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &ModifySubnetAttributeOutput{} + req.Data = output + return +} + +// Modifies a subnet attribute. +func (c *EC2) ModifySubnetAttribute(input *ModifySubnetAttributeInput) (*ModifySubnetAttributeOutput, error) { + req, out := c.ModifySubnetAttributeRequest(input) + err := req.Send() + return out, err +} + +const opModifyVolumeAttribute = "ModifyVolumeAttribute" + +// ModifyVolumeAttributeRequest generates a "aws/request.Request" representing the +// client's request for the ModifyVolumeAttribute operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ModifyVolumeAttribute method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ModifyVolumeAttributeRequest method. +// req, resp := client.ModifyVolumeAttributeRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) ModifyVolumeAttributeRequest(input *ModifyVolumeAttributeInput) (req *request.Request, output *ModifyVolumeAttributeOutput) { + op := &request.Operation{ + Name: opModifyVolumeAttribute, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyVolumeAttributeInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &ModifyVolumeAttributeOutput{} + req.Data = output + return +} + +// Modifies a volume attribute. +// +// By default, all I/O operations for the volume are suspended when the data +// on the volume is determined to be potentially inconsistent, to prevent undetectable, +// latent data corruption. The I/O access to the volume can be resumed by first +// enabling I/O access and then checking the data consistency on your volume. +// +// You can change the default behavior to resume I/O operations. We recommend +// that you change this only for boot volumes or for volumes that are stateless +// or disposable. +func (c *EC2) ModifyVolumeAttribute(input *ModifyVolumeAttributeInput) (*ModifyVolumeAttributeOutput, error) { + req, out := c.ModifyVolumeAttributeRequest(input) + err := req.Send() + return out, err +} + +const opModifyVpcAttribute = "ModifyVpcAttribute" + +// ModifyVpcAttributeRequest generates a "aws/request.Request" representing the +// client's request for the ModifyVpcAttribute operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ModifyVpcAttribute method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ModifyVpcAttributeRequest method. +// req, resp := client.ModifyVpcAttributeRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) ModifyVpcAttributeRequest(input *ModifyVpcAttributeInput) (req *request.Request, output *ModifyVpcAttributeOutput) { + op := &request.Operation{ + Name: opModifyVpcAttribute, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyVpcAttributeInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &ModifyVpcAttributeOutput{} + req.Data = output + return +} + +// Modifies the specified attribute of the specified VPC. +func (c *EC2) ModifyVpcAttribute(input *ModifyVpcAttributeInput) (*ModifyVpcAttributeOutput, error) { + req, out := c.ModifyVpcAttributeRequest(input) + err := req.Send() + return out, err +} + +const opModifyVpcEndpoint = "ModifyVpcEndpoint" + +// ModifyVpcEndpointRequest generates a "aws/request.Request" representing the +// client's request for the ModifyVpcEndpoint operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ModifyVpcEndpoint method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ModifyVpcEndpointRequest method. +// req, resp := client.ModifyVpcEndpointRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) ModifyVpcEndpointRequest(input *ModifyVpcEndpointInput) (req *request.Request, output *ModifyVpcEndpointOutput) { + op := &request.Operation{ + Name: opModifyVpcEndpoint, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyVpcEndpointInput{} + } + + req = c.newRequest(op, input, output) + output = &ModifyVpcEndpointOutput{} + req.Data = output + return +} + +// Modifies attributes of a specified VPC endpoint. You can modify the policy +// associated with the endpoint, and you can add and remove route tables associated +// with the endpoint. +func (c *EC2) ModifyVpcEndpoint(input *ModifyVpcEndpointInput) (*ModifyVpcEndpointOutput, error) { + req, out := c.ModifyVpcEndpointRequest(input) + err := req.Send() + return out, err +} + +const opModifyVpcPeeringConnectionOptions = "ModifyVpcPeeringConnectionOptions" + +// ModifyVpcPeeringConnectionOptionsRequest generates a "aws/request.Request" representing the +// client's request for the ModifyVpcPeeringConnectionOptions operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ModifyVpcPeeringConnectionOptions method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ModifyVpcPeeringConnectionOptionsRequest method. +// req, resp := client.ModifyVpcPeeringConnectionOptionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) ModifyVpcPeeringConnectionOptionsRequest(input *ModifyVpcPeeringConnectionOptionsInput) (req *request.Request, output *ModifyVpcPeeringConnectionOptionsOutput) { + op := &request.Operation{ + Name: opModifyVpcPeeringConnectionOptions, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyVpcPeeringConnectionOptionsInput{} + } + + req = c.newRequest(op, input, output) + output = &ModifyVpcPeeringConnectionOptionsOutput{} + req.Data = output + return +} + +// Modifies the VPC peering connection options on one side of a VPC peering +// connection. You can do the following: +// +// Enable/disable communication over the peering connection between an EC2-Classic +// instance that's linked to your VPC (using ClassicLink) and instances in the +// peer VPC. +// +// Enable/disable communication over the peering connection between instances +// in your VPC and an EC2-Classic instance that's linked to the peer VPC. +// +// If the peered VPCs are in different accounts, each owner must initiate +// a separate request to enable or disable communication in either direction, +// depending on whether their VPC was the requester or accepter for the VPC +// peering connection. If the peered VPCs are in the same account, you can modify +// the requester and accepter options in the same request. To confirm which +// VPC is the accepter and requester for a VPC peering connection, use the DescribeVpcPeeringConnections +// command. +func (c *EC2) ModifyVpcPeeringConnectionOptions(input *ModifyVpcPeeringConnectionOptionsInput) (*ModifyVpcPeeringConnectionOptionsOutput, error) { + req, out := c.ModifyVpcPeeringConnectionOptionsRequest(input) + err := req.Send() + return out, err +} + +const opMonitorInstances = "MonitorInstances" + +// MonitorInstancesRequest generates a "aws/request.Request" representing the +// client's request for the MonitorInstances operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the MonitorInstances method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the MonitorInstancesRequest method. +// req, resp := client.MonitorInstancesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) MonitorInstancesRequest(input *MonitorInstancesInput) (req *request.Request, output *MonitorInstancesOutput) { + op := &request.Operation{ + Name: opMonitorInstances, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &MonitorInstancesInput{} + } + + req = c.newRequest(op, input, output) + output = &MonitorInstancesOutput{} + req.Data = output + return +} + +// Enables monitoring for a running instance. For more information about monitoring +// instances, see Monitoring Your Instances and Volumes (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-cloudwatch.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) MonitorInstances(input *MonitorInstancesInput) (*MonitorInstancesOutput, error) { + req, out := c.MonitorInstancesRequest(input) + err := req.Send() + return out, err +} + +const opMoveAddressToVpc = "MoveAddressToVpc" + +// MoveAddressToVpcRequest generates a "aws/request.Request" representing the +// client's request for the MoveAddressToVpc operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the MoveAddressToVpc method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the MoveAddressToVpcRequest method. +// req, resp := client.MoveAddressToVpcRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) MoveAddressToVpcRequest(input *MoveAddressToVpcInput) (req *request.Request, output *MoveAddressToVpcOutput) { + op := &request.Operation{ + Name: opMoveAddressToVpc, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &MoveAddressToVpcInput{} + } + + req = c.newRequest(op, input, output) + output = &MoveAddressToVpcOutput{} + req.Data = output + return +} + +// Moves an Elastic IP address from the EC2-Classic platform to the EC2-VPC +// platform. The Elastic IP address must be allocated to your account for more +// than 24 hours, and it must not be associated with an instance. After the +// Elastic IP address is moved, it is no longer available for use in the EC2-Classic +// platform, unless you move it back using the RestoreAddressToClassic request. +// You cannot move an Elastic IP address that was originally allocated for use +// in the EC2-VPC platform to the EC2-Classic platform. +func (c *EC2) MoveAddressToVpc(input *MoveAddressToVpcInput) (*MoveAddressToVpcOutput, error) { + req, out := c.MoveAddressToVpcRequest(input) + err := req.Send() + return out, err +} + +const opPurchaseReservedInstancesOffering = "PurchaseReservedInstancesOffering" + +// PurchaseReservedInstancesOfferingRequest generates a "aws/request.Request" representing the +// client's request for the PurchaseReservedInstancesOffering operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PurchaseReservedInstancesOffering method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PurchaseReservedInstancesOfferingRequest method. +// req, resp := client.PurchaseReservedInstancesOfferingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) PurchaseReservedInstancesOfferingRequest(input *PurchaseReservedInstancesOfferingInput) (req *request.Request, output *PurchaseReservedInstancesOfferingOutput) { + op := &request.Operation{ + Name: opPurchaseReservedInstancesOffering, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PurchaseReservedInstancesOfferingInput{} + } + + req = c.newRequest(op, input, output) + output = &PurchaseReservedInstancesOfferingOutput{} + req.Data = output + return +} + +// Purchases a Reserved Instance for use with your account. With Reserved Instances, +// you obtain a capacity reservation for a certain instance configuration over +// a specified period of time and pay a lower hourly rate compared to On-Demand +// instance pricing. +// +// Use DescribeReservedInstancesOfferings to get a list of Reserved Instance +// offerings that match your specifications. After you've purchased a Reserved +// Instance, you can check for your new Reserved Instance with DescribeReservedInstances. +// +// For more information, see Reserved Instances (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/concepts-on-demand-reserved-instances.html) +// and Reserved Instance Marketplace (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ri-market-general.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) PurchaseReservedInstancesOffering(input *PurchaseReservedInstancesOfferingInput) (*PurchaseReservedInstancesOfferingOutput, error) { + req, out := c.PurchaseReservedInstancesOfferingRequest(input) + err := req.Send() + return out, err +} + +const opPurchaseScheduledInstances = "PurchaseScheduledInstances" + +// PurchaseScheduledInstancesRequest generates a "aws/request.Request" representing the +// client's request for the PurchaseScheduledInstances operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PurchaseScheduledInstances method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PurchaseScheduledInstancesRequest method. +// req, resp := client.PurchaseScheduledInstancesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) PurchaseScheduledInstancesRequest(input *PurchaseScheduledInstancesInput) (req *request.Request, output *PurchaseScheduledInstancesOutput) { + op := &request.Operation{ + Name: opPurchaseScheduledInstances, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PurchaseScheduledInstancesInput{} + } + + req = c.newRequest(op, input, output) + output = &PurchaseScheduledInstancesOutput{} + req.Data = output + return +} + +// Purchases one or more Scheduled Instances with the specified schedule. +// +// Scheduled Instances enable you to purchase Amazon EC2 compute capacity by +// the hour for a one-year term. Before you can purchase a Scheduled Instance, +// you must call DescribeScheduledInstanceAvailability to check for available +// schedules and obtain a purchase token. After you purchase a Scheduled Instance, +// you must call RunScheduledInstances during each scheduled time period. +// +// After you purchase a Scheduled Instance, you can't cancel, modify, or resell +// your purchase. +func (c *EC2) PurchaseScheduledInstances(input *PurchaseScheduledInstancesInput) (*PurchaseScheduledInstancesOutput, error) { + req, out := c.PurchaseScheduledInstancesRequest(input) + err := req.Send() + return out, err +} + +const opRebootInstances = "RebootInstances" + +// RebootInstancesRequest generates a "aws/request.Request" representing the +// client's request for the RebootInstances operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the RebootInstances method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the RebootInstancesRequest method. +// req, resp := client.RebootInstancesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) RebootInstancesRequest(input *RebootInstancesInput) (req *request.Request, output *RebootInstancesOutput) { + op := &request.Operation{ + Name: opRebootInstances, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RebootInstancesInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &RebootInstancesOutput{} + req.Data = output + return +} + +// Requests a reboot of one or more instances. This operation is asynchronous; +// it only queues a request to reboot the specified instances. The operation +// succeeds if the instances are valid and belong to you. Requests to reboot +// terminated instances are ignored. +// +// If an instance does not cleanly shut down within four minutes, Amazon EC2 +// performs a hard reboot. +// +// For more information about troubleshooting, see Getting Console Output and +// Rebooting Instances (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-console.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) RebootInstances(input *RebootInstancesInput) (*RebootInstancesOutput, error) { + req, out := c.RebootInstancesRequest(input) + err := req.Send() + return out, err +} + +const opRegisterImage = "RegisterImage" + +// RegisterImageRequest generates a "aws/request.Request" representing the +// client's request for the RegisterImage operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the RegisterImage method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the RegisterImageRequest method. +// req, resp := client.RegisterImageRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) RegisterImageRequest(input *RegisterImageInput) (req *request.Request, output *RegisterImageOutput) { + op := &request.Operation{ + Name: opRegisterImage, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RegisterImageInput{} + } + + req = c.newRequest(op, input, output) + output = &RegisterImageOutput{} + req.Data = output + return +} + +// Registers an AMI. When you're creating an AMI, this is the final step you +// must complete before you can launch an instance from the AMI. For more information +// about creating AMIs, see Creating Your Own AMIs (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/creating-an-ami.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// For Amazon EBS-backed instances, CreateImage creates and registers the +// AMI in a single request, so you don't have to register the AMI yourself. +// +// You can also use RegisterImage to create an Amazon EBS-backed Linux AMI +// from a snapshot of a root device volume. For more information, see Launching +// an Instance from a Snapshot (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_LaunchingInstanceFromSnapshot.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// Some Linux distributions, such as Red Hat Enterprise Linux (RHEL) and SUSE +// Linux Enterprise Server (SLES), use the EC2 billingProduct code associated +// with an AMI to verify subscription status for package updates. Creating an +// AMI from an EBS snapshot does not maintain this billing code, and subsequent +// instances launched from such an AMI will not be able to connect to package +// update infrastructure. +// +// Similarly, although you can create a Windows AMI from a snapshot, you can't +// successfully launch an instance from the AMI. +// +// To create Windows AMIs or to create AMIs for Linux operating systems that +// must retain AMI billing codes to work properly, see CreateImage. +// +// If needed, you can deregister an AMI at any time. Any modifications you +// make to an AMI backed by an instance store volume invalidates its registration. +// If you make changes to an image, deregister the previous image and register +// the new image. +// +// You can't register an image where a secondary (non-root) snapshot has AWS +// Marketplace product codes. +func (c *EC2) RegisterImage(input *RegisterImageInput) (*RegisterImageOutput, error) { + req, out := c.RegisterImageRequest(input) + err := req.Send() + return out, err +} + +const opRejectVpcPeeringConnection = "RejectVpcPeeringConnection" + +// RejectVpcPeeringConnectionRequest generates a "aws/request.Request" representing the +// client's request for the RejectVpcPeeringConnection operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the RejectVpcPeeringConnection method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the RejectVpcPeeringConnectionRequest method. +// req, resp := client.RejectVpcPeeringConnectionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) RejectVpcPeeringConnectionRequest(input *RejectVpcPeeringConnectionInput) (req *request.Request, output *RejectVpcPeeringConnectionOutput) { + op := &request.Operation{ + Name: opRejectVpcPeeringConnection, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RejectVpcPeeringConnectionInput{} + } + + req = c.newRequest(op, input, output) + output = &RejectVpcPeeringConnectionOutput{} + req.Data = output + return +} + +// Rejects a VPC peering connection request. The VPC peering connection must +// be in the pending-acceptance state. Use the DescribeVpcPeeringConnections +// request to view your outstanding VPC peering connection requests. To delete +// an active VPC peering connection, or to delete a VPC peering connection request +// that you initiated, use DeleteVpcPeeringConnection. +func (c *EC2) RejectVpcPeeringConnection(input *RejectVpcPeeringConnectionInput) (*RejectVpcPeeringConnectionOutput, error) { + req, out := c.RejectVpcPeeringConnectionRequest(input) + err := req.Send() + return out, err +} + +const opReleaseAddress = "ReleaseAddress" + +// ReleaseAddressRequest generates a "aws/request.Request" representing the +// client's request for the ReleaseAddress operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ReleaseAddress method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ReleaseAddressRequest method. +// req, resp := client.ReleaseAddressRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) ReleaseAddressRequest(input *ReleaseAddressInput) (req *request.Request, output *ReleaseAddressOutput) { + op := &request.Operation{ + Name: opReleaseAddress, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ReleaseAddressInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &ReleaseAddressOutput{} + req.Data = output + return +} + +// Releases the specified Elastic IP address. +// +// After releasing an Elastic IP address, it is released to the IP address +// pool and might be unavailable to you. Be sure to update your DNS records +// and any servers or devices that communicate with the address. If you attempt +// to release an Elastic IP address that you already released, you'll get an +// AuthFailure error if the address is already allocated to another AWS account. +// +// [EC2-Classic, default VPC] Releasing an Elastic IP address automatically +// disassociates it from any instance that it's associated with. To disassociate +// an Elastic IP address without releasing it, use DisassociateAddress. +// +// [Nondefault VPC] You must use DisassociateAddress to disassociate the Elastic +// IP address before you try to release it. Otherwise, Amazon EC2 returns an +// error (InvalidIPAddress.InUse). +func (c *EC2) ReleaseAddress(input *ReleaseAddressInput) (*ReleaseAddressOutput, error) { + req, out := c.ReleaseAddressRequest(input) + err := req.Send() + return out, err +} + +const opReleaseHosts = "ReleaseHosts" + +// ReleaseHostsRequest generates a "aws/request.Request" representing the +// client's request for the ReleaseHosts operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ReleaseHosts method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ReleaseHostsRequest method. +// req, resp := client.ReleaseHostsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) ReleaseHostsRequest(input *ReleaseHostsInput) (req *request.Request, output *ReleaseHostsOutput) { + op := &request.Operation{ + Name: opReleaseHosts, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ReleaseHostsInput{} + } + + req = c.newRequest(op, input, output) + output = &ReleaseHostsOutput{} + req.Data = output + return +} + +// When you no longer want to use a Dedicated host it can be released. On-Demand +// billing is stopped and the host goes into released state. The host ID of +// Dedicated hosts that have been released can no longer be specified in another +// request, e.g., ModifyHosts. You must stop or terminate all instances on a +// host before it can be released. +// +// When Dedicated hosts are released, it make take some time for them to stop +// counting toward your limit and you may receive capacity errors when trying +// to allocate new Dedicated hosts. Try waiting a few minutes, and then try +// again. +// +// Released hosts will still appear in a DescribeHosts response. +func (c *EC2) ReleaseHosts(input *ReleaseHostsInput) (*ReleaseHostsOutput, error) { + req, out := c.ReleaseHostsRequest(input) + err := req.Send() + return out, err +} + +const opReplaceNetworkAclAssociation = "ReplaceNetworkAclAssociation" + +// ReplaceNetworkAclAssociationRequest generates a "aws/request.Request" representing the +// client's request for the ReplaceNetworkAclAssociation operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ReplaceNetworkAclAssociation method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ReplaceNetworkAclAssociationRequest method. +// req, resp := client.ReplaceNetworkAclAssociationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) ReplaceNetworkAclAssociationRequest(input *ReplaceNetworkAclAssociationInput) (req *request.Request, output *ReplaceNetworkAclAssociationOutput) { + op := &request.Operation{ + Name: opReplaceNetworkAclAssociation, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ReplaceNetworkAclAssociationInput{} + } + + req = c.newRequest(op, input, output) + output = &ReplaceNetworkAclAssociationOutput{} + req.Data = output + return +} + +// Changes which network ACL a subnet is associated with. By default when you +// create a subnet, it's automatically associated with the default network ACL. +// For more information about network ACLs, see Network ACLs (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_ACLs.html) +// in the Amazon Virtual Private Cloud User Guide. +func (c *EC2) ReplaceNetworkAclAssociation(input *ReplaceNetworkAclAssociationInput) (*ReplaceNetworkAclAssociationOutput, error) { + req, out := c.ReplaceNetworkAclAssociationRequest(input) + err := req.Send() + return out, err +} + +const opReplaceNetworkAclEntry = "ReplaceNetworkAclEntry" + +// ReplaceNetworkAclEntryRequest generates a "aws/request.Request" representing the +// client's request for the ReplaceNetworkAclEntry operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ReplaceNetworkAclEntry method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ReplaceNetworkAclEntryRequest method. +// req, resp := client.ReplaceNetworkAclEntryRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) ReplaceNetworkAclEntryRequest(input *ReplaceNetworkAclEntryInput) (req *request.Request, output *ReplaceNetworkAclEntryOutput) { + op := &request.Operation{ + Name: opReplaceNetworkAclEntry, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ReplaceNetworkAclEntryInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &ReplaceNetworkAclEntryOutput{} + req.Data = output + return +} + +// Replaces an entry (rule) in a network ACL. For more information about network +// ACLs, see Network ACLs (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_ACLs.html) +// in the Amazon Virtual Private Cloud User Guide. +func (c *EC2) ReplaceNetworkAclEntry(input *ReplaceNetworkAclEntryInput) (*ReplaceNetworkAclEntryOutput, error) { + req, out := c.ReplaceNetworkAclEntryRequest(input) + err := req.Send() + return out, err +} + +const opReplaceRoute = "ReplaceRoute" + +// ReplaceRouteRequest generates a "aws/request.Request" representing the +// client's request for the ReplaceRoute operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ReplaceRoute method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ReplaceRouteRequest method. +// req, resp := client.ReplaceRouteRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) ReplaceRouteRequest(input *ReplaceRouteInput) (req *request.Request, output *ReplaceRouteOutput) { + op := &request.Operation{ + Name: opReplaceRoute, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ReplaceRouteInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &ReplaceRouteOutput{} + req.Data = output + return +} + +// Replaces an existing route within a route table in a VPC. You must provide +// only one of the following: Internet gateway or virtual private gateway, NAT +// instance, NAT gateway, VPC peering connection, or network interface. +// +// For more information about route tables, see Route Tables (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_Route_Tables.html) +// in the Amazon Virtual Private Cloud User Guide. +func (c *EC2) ReplaceRoute(input *ReplaceRouteInput) (*ReplaceRouteOutput, error) { + req, out := c.ReplaceRouteRequest(input) + err := req.Send() + return out, err +} + +const opReplaceRouteTableAssociation = "ReplaceRouteTableAssociation" + +// ReplaceRouteTableAssociationRequest generates a "aws/request.Request" representing the +// client's request for the ReplaceRouteTableAssociation operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ReplaceRouteTableAssociation method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ReplaceRouteTableAssociationRequest method. +// req, resp := client.ReplaceRouteTableAssociationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) ReplaceRouteTableAssociationRequest(input *ReplaceRouteTableAssociationInput) (req *request.Request, output *ReplaceRouteTableAssociationOutput) { + op := &request.Operation{ + Name: opReplaceRouteTableAssociation, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ReplaceRouteTableAssociationInput{} + } + + req = c.newRequest(op, input, output) + output = &ReplaceRouteTableAssociationOutput{} + req.Data = output + return +} + +// Changes the route table associated with a given subnet in a VPC. After the +// operation completes, the subnet uses the routes in the new route table it's +// associated with. For more information about route tables, see Route Tables +// (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_Route_Tables.html) +// in the Amazon Virtual Private Cloud User Guide. +// +// You can also use ReplaceRouteTableAssociation to change which table is the +// main route table in the VPC. You just specify the main route table's association +// ID and the route table to be the new main route table. +func (c *EC2) ReplaceRouteTableAssociation(input *ReplaceRouteTableAssociationInput) (*ReplaceRouteTableAssociationOutput, error) { + req, out := c.ReplaceRouteTableAssociationRequest(input) + err := req.Send() + return out, err +} + +const opReportInstanceStatus = "ReportInstanceStatus" + +// ReportInstanceStatusRequest generates a "aws/request.Request" representing the +// client's request for the ReportInstanceStatus operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ReportInstanceStatus method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ReportInstanceStatusRequest method. +// req, resp := client.ReportInstanceStatusRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) ReportInstanceStatusRequest(input *ReportInstanceStatusInput) (req *request.Request, output *ReportInstanceStatusOutput) { + op := &request.Operation{ + Name: opReportInstanceStatus, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ReportInstanceStatusInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &ReportInstanceStatusOutput{} + req.Data = output + return +} + +// Submits feedback about the status of an instance. The instance must be in +// the running state. If your experience with the instance differs from the +// instance status returned by DescribeInstanceStatus, use ReportInstanceStatus +// to report your experience with the instance. Amazon EC2 collects this information +// to improve the accuracy of status checks. +// +// Use of this action does not change the value returned by DescribeInstanceStatus. +func (c *EC2) ReportInstanceStatus(input *ReportInstanceStatusInput) (*ReportInstanceStatusOutput, error) { + req, out := c.ReportInstanceStatusRequest(input) + err := req.Send() + return out, err +} + +const opRequestSpotFleet = "RequestSpotFleet" + +// RequestSpotFleetRequest generates a "aws/request.Request" representing the +// client's request for the RequestSpotFleet operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the RequestSpotFleet method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the RequestSpotFleetRequest method. +// req, resp := client.RequestSpotFleetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) RequestSpotFleetRequest(input *RequestSpotFleetInput) (req *request.Request, output *RequestSpotFleetOutput) { + op := &request.Operation{ + Name: opRequestSpotFleet, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RequestSpotFleetInput{} + } + + req = c.newRequest(op, input, output) + output = &RequestSpotFleetOutput{} + req.Data = output + return +} + +// Creates a Spot fleet request. +// +// You can submit a single request that includes multiple launch specifications +// that vary by instance type, AMI, Availability Zone, or subnet. +// +// By default, the Spot fleet requests Spot instances in the Spot pool where +// the price per unit is the lowest. Each launch specification can include its +// own instance weighting that reflects the value of the instance type to your +// application workload. +// +// Alternatively, you can specify that the Spot fleet distribute the target +// capacity across the Spot pools included in its launch specifications. By +// ensuring that the Spot instances in your Spot fleet are in different Spot +// pools, you can improve the availability of your fleet. +// +// For more information, see Spot Fleet Requests (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-fleet-requests.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) RequestSpotFleet(input *RequestSpotFleetInput) (*RequestSpotFleetOutput, error) { + req, out := c.RequestSpotFleetRequest(input) + err := req.Send() + return out, err +} + +const opRequestSpotInstances = "RequestSpotInstances" + +// RequestSpotInstancesRequest generates a "aws/request.Request" representing the +// client's request for the RequestSpotInstances operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the RequestSpotInstances method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the RequestSpotInstancesRequest method. +// req, resp := client.RequestSpotInstancesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) RequestSpotInstancesRequest(input *RequestSpotInstancesInput) (req *request.Request, output *RequestSpotInstancesOutput) { + op := &request.Operation{ + Name: opRequestSpotInstances, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RequestSpotInstancesInput{} + } + + req = c.newRequest(op, input, output) + output = &RequestSpotInstancesOutput{} + req.Data = output + return +} + +// Creates a Spot instance request. Spot instances are instances that Amazon +// EC2 launches when the bid price that you specify exceeds the current Spot +// price. Amazon EC2 periodically sets the Spot price based on available Spot +// Instance capacity and current Spot instance requests. For more information, +// see Spot Instance Requests (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-requests.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) RequestSpotInstances(input *RequestSpotInstancesInput) (*RequestSpotInstancesOutput, error) { + req, out := c.RequestSpotInstancesRequest(input) + err := req.Send() + return out, err +} + +const opResetImageAttribute = "ResetImageAttribute" + +// ResetImageAttributeRequest generates a "aws/request.Request" representing the +// client's request for the ResetImageAttribute operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ResetImageAttribute method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ResetImageAttributeRequest method. +// req, resp := client.ResetImageAttributeRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) ResetImageAttributeRequest(input *ResetImageAttributeInput) (req *request.Request, output *ResetImageAttributeOutput) { + op := &request.Operation{ + Name: opResetImageAttribute, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ResetImageAttributeInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &ResetImageAttributeOutput{} + req.Data = output + return +} + +// Resets an attribute of an AMI to its default value. +// +// The productCodes attribute can't be reset. +func (c *EC2) ResetImageAttribute(input *ResetImageAttributeInput) (*ResetImageAttributeOutput, error) { + req, out := c.ResetImageAttributeRequest(input) + err := req.Send() + return out, err +} + +const opResetInstanceAttribute = "ResetInstanceAttribute" + +// ResetInstanceAttributeRequest generates a "aws/request.Request" representing the +// client's request for the ResetInstanceAttribute operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ResetInstanceAttribute method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ResetInstanceAttributeRequest method. +// req, resp := client.ResetInstanceAttributeRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) ResetInstanceAttributeRequest(input *ResetInstanceAttributeInput) (req *request.Request, output *ResetInstanceAttributeOutput) { + op := &request.Operation{ + Name: opResetInstanceAttribute, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ResetInstanceAttributeInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &ResetInstanceAttributeOutput{} + req.Data = output + return +} + +// Resets an attribute of an instance to its default value. To reset the kernel +// or ramdisk, the instance must be in a stopped state. To reset the sourceDestCheck, +// the instance can be either running or stopped. +// +// The sourceDestCheck attribute controls whether source/destination checking +// is enabled. The default value is true, which means checking is enabled. This +// value must be false for a NAT instance to perform NAT. For more information, +// see NAT Instances (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_NAT_Instance.html) +// in the Amazon Virtual Private Cloud User Guide. +func (c *EC2) ResetInstanceAttribute(input *ResetInstanceAttributeInput) (*ResetInstanceAttributeOutput, error) { + req, out := c.ResetInstanceAttributeRequest(input) + err := req.Send() + return out, err +} + +const opResetNetworkInterfaceAttribute = "ResetNetworkInterfaceAttribute" + +// ResetNetworkInterfaceAttributeRequest generates a "aws/request.Request" representing the +// client's request for the ResetNetworkInterfaceAttribute operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ResetNetworkInterfaceAttribute method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ResetNetworkInterfaceAttributeRequest method. +// req, resp := client.ResetNetworkInterfaceAttributeRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) ResetNetworkInterfaceAttributeRequest(input *ResetNetworkInterfaceAttributeInput) (req *request.Request, output *ResetNetworkInterfaceAttributeOutput) { + op := &request.Operation{ + Name: opResetNetworkInterfaceAttribute, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ResetNetworkInterfaceAttributeInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &ResetNetworkInterfaceAttributeOutput{} + req.Data = output + return +} + +// Resets a network interface attribute. You can specify only one attribute +// at a time. +func (c *EC2) ResetNetworkInterfaceAttribute(input *ResetNetworkInterfaceAttributeInput) (*ResetNetworkInterfaceAttributeOutput, error) { + req, out := c.ResetNetworkInterfaceAttributeRequest(input) + err := req.Send() + return out, err +} + +const opResetSnapshotAttribute = "ResetSnapshotAttribute" + +// ResetSnapshotAttributeRequest generates a "aws/request.Request" representing the +// client's request for the ResetSnapshotAttribute operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ResetSnapshotAttribute method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ResetSnapshotAttributeRequest method. +// req, resp := client.ResetSnapshotAttributeRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) ResetSnapshotAttributeRequest(input *ResetSnapshotAttributeInput) (req *request.Request, output *ResetSnapshotAttributeOutput) { + op := &request.Operation{ + Name: opResetSnapshotAttribute, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ResetSnapshotAttributeInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &ResetSnapshotAttributeOutput{} + req.Data = output + return +} + +// Resets permission settings for the specified snapshot. +// +// For more information on modifying snapshot permissions, see Sharing Snapshots +// (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-modifying-snapshot-permissions.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) ResetSnapshotAttribute(input *ResetSnapshotAttributeInput) (*ResetSnapshotAttributeOutput, error) { + req, out := c.ResetSnapshotAttributeRequest(input) + err := req.Send() + return out, err +} + +const opRestoreAddressToClassic = "RestoreAddressToClassic" + +// RestoreAddressToClassicRequest generates a "aws/request.Request" representing the +// client's request for the RestoreAddressToClassic operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the RestoreAddressToClassic method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the RestoreAddressToClassicRequest method. +// req, resp := client.RestoreAddressToClassicRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) RestoreAddressToClassicRequest(input *RestoreAddressToClassicInput) (req *request.Request, output *RestoreAddressToClassicOutput) { + op := &request.Operation{ + Name: opRestoreAddressToClassic, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RestoreAddressToClassicInput{} + } + + req = c.newRequest(op, input, output) + output = &RestoreAddressToClassicOutput{} + req.Data = output + return +} + +// Restores an Elastic IP address that was previously moved to the EC2-VPC platform +// back to the EC2-Classic platform. You cannot move an Elastic IP address that +// was originally allocated for use in EC2-VPC. The Elastic IP address must +// not be associated with an instance or network interface. +func (c *EC2) RestoreAddressToClassic(input *RestoreAddressToClassicInput) (*RestoreAddressToClassicOutput, error) { + req, out := c.RestoreAddressToClassicRequest(input) + err := req.Send() + return out, err +} + +const opRevokeSecurityGroupEgress = "RevokeSecurityGroupEgress" + +// RevokeSecurityGroupEgressRequest generates a "aws/request.Request" representing the +// client's request for the RevokeSecurityGroupEgress operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the RevokeSecurityGroupEgress method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the RevokeSecurityGroupEgressRequest method. +// req, resp := client.RevokeSecurityGroupEgressRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) RevokeSecurityGroupEgressRequest(input *RevokeSecurityGroupEgressInput) (req *request.Request, output *RevokeSecurityGroupEgressOutput) { + op := &request.Operation{ + Name: opRevokeSecurityGroupEgress, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RevokeSecurityGroupEgressInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &RevokeSecurityGroupEgressOutput{} + req.Data = output + return +} + +// [EC2-VPC only] Removes one or more egress rules from a security group for +// EC2-VPC. This action doesn't apply to security groups for use in EC2-Classic. +// The values that you specify in the revoke request (for example, ports) must +// match the existing rule's values for the rule to be revoked. +// +// Each rule consists of the protocol and the CIDR range or source security +// group. For the TCP and UDP protocols, you must also specify the destination +// port or range of ports. For the ICMP protocol, you must also specify the +// ICMP type and code. +// +// Rule changes are propagated to instances within the security group as quickly +// as possible. However, a small delay might occur. +func (c *EC2) RevokeSecurityGroupEgress(input *RevokeSecurityGroupEgressInput) (*RevokeSecurityGroupEgressOutput, error) { + req, out := c.RevokeSecurityGroupEgressRequest(input) + err := req.Send() + return out, err +} + +const opRevokeSecurityGroupIngress = "RevokeSecurityGroupIngress" + +// RevokeSecurityGroupIngressRequest generates a "aws/request.Request" representing the +// client's request for the RevokeSecurityGroupIngress operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the RevokeSecurityGroupIngress method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the RevokeSecurityGroupIngressRequest method. +// req, resp := client.RevokeSecurityGroupIngressRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) RevokeSecurityGroupIngressRequest(input *RevokeSecurityGroupIngressInput) (req *request.Request, output *RevokeSecurityGroupIngressOutput) { + op := &request.Operation{ + Name: opRevokeSecurityGroupIngress, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RevokeSecurityGroupIngressInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &RevokeSecurityGroupIngressOutput{} + req.Data = output + return +} + +// Removes one or more ingress rules from a security group. The values that +// you specify in the revoke request (for example, ports) must match the existing +// rule's values for the rule to be removed. +// +// Each rule consists of the protocol and the CIDR range or source security +// group. For the TCP and UDP protocols, you must also specify the destination +// port or range of ports. For the ICMP protocol, you must also specify the +// ICMP type and code. +// +// Rule changes are propagated to instances within the security group as quickly +// as possible. However, a small delay might occur. +func (c *EC2) RevokeSecurityGroupIngress(input *RevokeSecurityGroupIngressInput) (*RevokeSecurityGroupIngressOutput, error) { + req, out := c.RevokeSecurityGroupIngressRequest(input) + err := req.Send() + return out, err +} + +const opRunInstances = "RunInstances" + +// RunInstancesRequest generates a "aws/request.Request" representing the +// client's request for the RunInstances operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the RunInstances method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the RunInstancesRequest method. +// req, resp := client.RunInstancesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) RunInstancesRequest(input *RunInstancesInput) (req *request.Request, output *Reservation) { + op := &request.Operation{ + Name: opRunInstances, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RunInstancesInput{} + } + + req = c.newRequest(op, input, output) + output = &Reservation{} + req.Data = output + return +} + +// Launches the specified number of instances using an AMI for which you have +// permissions. +// +// When you launch an instance, it enters the pending state. After the instance +// is ready for you, it enters the running state. To check the state of your +// instance, call DescribeInstances. +// +// To ensure faster instance launches, break up large requests into smaller +// batches. For example, create five separate launch requests for 100 instances +// each instead of one launch request for 500 instances. +// +// To tag your instance, ensure that it is running as CreateTags requires a +// resource ID. For more information about tagging, see Tagging Your Amazon +// EC2 Resources (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html). +// +// If you don't specify a security group when launching an instance, Amazon +// EC2 uses the default security group. For more information, see Security Groups +// (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-network-security.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// [EC2-VPC only accounts] If you don't specify a subnet in the request, we +// choose a default subnet from your default VPC for you. +// +// [EC2-Classic accounts] If you're launching into EC2-Classic and you don't +// specify an Availability Zone, we choose one for you. +// +// Linux instances have access to the public key of the key pair at boot. You +// can use this key to provide secure access to the instance. Amazon EC2 public +// images use this feature to provide secure access without passwords. For more +// information, see Key Pairs (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// You can provide optional user data when launching an instance. For more +// information, see Instance Metadata (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/AESDG-chapter-instancedata.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// If any of the AMIs have a product code attached for which the user has not +// subscribed, RunInstances fails. +// +// Some instance types can only be launched into a VPC. If you do not have +// a default VPC, or if you do not specify a subnet ID in the request, RunInstances +// fails. For more information, see Instance Types Available Only in a VPC (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-vpc.html#vpc-only-instance-types). +// +// For more information about troubleshooting, see What To Do If An Instance +// Immediately Terminates (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_InstanceStraightToTerminated.html), +// and Troubleshooting Connecting to Your Instance (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/TroubleshootingInstancesConnecting.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) RunInstances(input *RunInstancesInput) (*Reservation, error) { + req, out := c.RunInstancesRequest(input) + err := req.Send() + return out, err +} + +const opRunScheduledInstances = "RunScheduledInstances" + +// RunScheduledInstancesRequest generates a "aws/request.Request" representing the +// client's request for the RunScheduledInstances operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the RunScheduledInstances method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the RunScheduledInstancesRequest method. +// req, resp := client.RunScheduledInstancesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) RunScheduledInstancesRequest(input *RunScheduledInstancesInput) (req *request.Request, output *RunScheduledInstancesOutput) { + op := &request.Operation{ + Name: opRunScheduledInstances, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RunScheduledInstancesInput{} + } + + req = c.newRequest(op, input, output) + output = &RunScheduledInstancesOutput{} + req.Data = output + return +} + +// Launches the specified Scheduled Instances. +// +// Before you can launch a Scheduled Instance, you must purchase it and obtain +// an identifier using PurchaseScheduledInstances. +// +// You must launch a Scheduled Instance during its scheduled time period. You +// can't stop or reboot a Scheduled Instance, but you can terminate it as needed. +// If you terminate a Scheduled Instance before the current scheduled time period +// ends, you can launch it again after a few minutes. For more information, +// see Scheduled Instances (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-scheduled-instances.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) RunScheduledInstances(input *RunScheduledInstancesInput) (*RunScheduledInstancesOutput, error) { + req, out := c.RunScheduledInstancesRequest(input) + err := req.Send() + return out, err +} + +const opStartInstances = "StartInstances" + +// StartInstancesRequest generates a "aws/request.Request" representing the +// client's request for the StartInstances operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the StartInstances method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the StartInstancesRequest method. +// req, resp := client.StartInstancesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) StartInstancesRequest(input *StartInstancesInput) (req *request.Request, output *StartInstancesOutput) { + op := &request.Operation{ + Name: opStartInstances, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &StartInstancesInput{} + } + + req = c.newRequest(op, input, output) + output = &StartInstancesOutput{} + req.Data = output + return +} + +// Starts an Amazon EBS-backed AMI that you've previously stopped. +// +// Instances that use Amazon EBS volumes as their root devices can be quickly +// stopped and started. When an instance is stopped, the compute resources are +// released and you are not billed for hourly instance usage. However, your +// root partition Amazon EBS volume remains, continues to persist your data, +// and you are charged for Amazon EBS volume usage. You can restart your instance +// at any time. Each time you transition an instance from stopped to started, +// Amazon EC2 charges a full instance hour, even if transitions happen multiple +// times within a single hour. +// +// Before stopping an instance, make sure it is in a state from which it can +// be restarted. Stopping an instance does not preserve data stored in RAM. +// +// Performing this operation on an instance that uses an instance store as +// its root device returns an error. +// +// For more information, see Stopping Instances (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Stop_Start.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) StartInstances(input *StartInstancesInput) (*StartInstancesOutput, error) { + req, out := c.StartInstancesRequest(input) + err := req.Send() + return out, err +} + +const opStopInstances = "StopInstances" + +// StopInstancesRequest generates a "aws/request.Request" representing the +// client's request for the StopInstances operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the StopInstances method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the StopInstancesRequest method. +// req, resp := client.StopInstancesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) StopInstancesRequest(input *StopInstancesInput) (req *request.Request, output *StopInstancesOutput) { + op := &request.Operation{ + Name: opStopInstances, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &StopInstancesInput{} + } + + req = c.newRequest(op, input, output) + output = &StopInstancesOutput{} + req.Data = output + return +} + +// Stops an Amazon EBS-backed instance. +// +// We don't charge hourly usage for a stopped instance, or data transfer fees; +// however, your root partition Amazon EBS volume remains, continues to persist +// your data, and you are charged for Amazon EBS volume usage. Each time you +// transition an instance from stopped to started, Amazon EC2 charges a full +// instance hour, even if transitions happen multiple times within a single +// hour. +// +// You can't start or stop Spot instances, and you can't stop instance store-backed +// instances. +// +// When you stop an instance, we shut it down. You can restart your instance +// at any time. Before stopping an instance, make sure it is in a state from +// which it can be restarted. Stopping an instance does not preserve data stored +// in RAM. +// +// Stopping an instance is different to rebooting or terminating it. For example, +// when you stop an instance, the root device and any other devices attached +// to the instance persist. When you terminate an instance, the root device +// and any other devices attached during the instance launch are automatically +// deleted. For more information about the differences between rebooting, stopping, +// and terminating instances, see Instance Lifecycle (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-lifecycle.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// When you stop an instance, we attempt to shut it down forcibly after a short +// while. If your instance appears stuck in the stopping state after a period +// of time, there may be an issue with the underlying host computer. For more +// information, see Troubleshooting Stopping Your Instance (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/TroubleshootingInstancesStopping.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) StopInstances(input *StopInstancesInput) (*StopInstancesOutput, error) { + req, out := c.StopInstancesRequest(input) + err := req.Send() + return out, err +} + +const opTerminateInstances = "TerminateInstances" + +// TerminateInstancesRequest generates a "aws/request.Request" representing the +// client's request for the TerminateInstances operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the TerminateInstances method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the TerminateInstancesRequest method. +// req, resp := client.TerminateInstancesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) TerminateInstancesRequest(input *TerminateInstancesInput) (req *request.Request, output *TerminateInstancesOutput) { + op := &request.Operation{ + Name: opTerminateInstances, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &TerminateInstancesInput{} + } + + req = c.newRequest(op, input, output) + output = &TerminateInstancesOutput{} + req.Data = output + return +} + +// Shuts down one or more instances. This operation is idempotent; if you terminate +// an instance more than once, each call succeeds. +// +// Terminated instances remain visible after termination (for approximately +// one hour). +// +// By default, Amazon EC2 deletes all EBS volumes that were attached when the +// instance launched. Volumes attached after instance launch continue running. +// +// You can stop, start, and terminate EBS-backed instances. You can only terminate +// instance store-backed instances. What happens to an instance differs if you +// stop it or terminate it. For example, when you stop an instance, the root +// device and any other devices attached to the instance persist. When you terminate +// an instance, any attached EBS volumes with the DeleteOnTermination block +// device mapping parameter set to true are automatically deleted. For more +// information about the differences between stopping and terminating instances, +// see Instance Lifecycle (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-lifecycle.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// For more information about troubleshooting, see Troubleshooting Terminating +// Your Instance (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/TroubleshootingInstancesShuttingDown.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) TerminateInstances(input *TerminateInstancesInput) (*TerminateInstancesOutput, error) { + req, out := c.TerminateInstancesRequest(input) + err := req.Send() + return out, err +} + +const opUnassignPrivateIpAddresses = "UnassignPrivateIpAddresses" + +// UnassignPrivateIpAddressesRequest generates a "aws/request.Request" representing the +// client's request for the UnassignPrivateIpAddresses operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UnassignPrivateIpAddresses method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UnassignPrivateIpAddressesRequest method. +// req, resp := client.UnassignPrivateIpAddressesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) UnassignPrivateIpAddressesRequest(input *UnassignPrivateIpAddressesInput) (req *request.Request, output *UnassignPrivateIpAddressesOutput) { + op := &request.Operation{ + Name: opUnassignPrivateIpAddresses, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UnassignPrivateIpAddressesInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &UnassignPrivateIpAddressesOutput{} + req.Data = output + return +} + +// Unassigns one or more secondary private IP addresses from a network interface. +func (c *EC2) UnassignPrivateIpAddresses(input *UnassignPrivateIpAddressesInput) (*UnassignPrivateIpAddressesOutput, error) { + req, out := c.UnassignPrivateIpAddressesRequest(input) + err := req.Send() + return out, err +} + +const opUnmonitorInstances = "UnmonitorInstances" + +// UnmonitorInstancesRequest generates a "aws/request.Request" representing the +// client's request for the UnmonitorInstances operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UnmonitorInstances method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UnmonitorInstancesRequest method. +// req, resp := client.UnmonitorInstancesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) UnmonitorInstancesRequest(input *UnmonitorInstancesInput) (req *request.Request, output *UnmonitorInstancesOutput) { + op := &request.Operation{ + Name: opUnmonitorInstances, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UnmonitorInstancesInput{} + } + + req = c.newRequest(op, input, output) + output = &UnmonitorInstancesOutput{} + req.Data = output + return +} + +// Disables monitoring for a running instance. For more information about monitoring +// instances, see Monitoring Your Instances and Volumes (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-cloudwatch.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) UnmonitorInstances(input *UnmonitorInstancesInput) (*UnmonitorInstancesOutput, error) { + req, out := c.UnmonitorInstancesRequest(input) + err := req.Send() + return out, err +} + +// Contains the parameters for AcceptVpcPeeringConnection. +type AcceptVpcPeeringConnectionInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the VPC peering connection. + VpcPeeringConnectionId *string `locationName:"vpcPeeringConnectionId" type:"string"` +} + +// String returns the string representation +func (s AcceptVpcPeeringConnectionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AcceptVpcPeeringConnectionInput) GoString() string { + return s.String() +} + +// Contains the output of AcceptVpcPeeringConnection. +type AcceptVpcPeeringConnectionOutput struct { + _ struct{} `type:"structure"` + + // Information about the VPC peering connection. + VpcPeeringConnection *VpcPeeringConnection `locationName:"vpcPeeringConnection" type:"structure"` +} + +// String returns the string representation +func (s AcceptVpcPeeringConnectionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AcceptVpcPeeringConnectionOutput) GoString() string { + return s.String() +} + +// Describes an account attribute. +type AccountAttribute struct { + _ struct{} `type:"structure"` + + // The name of the account attribute. + AttributeName *string `locationName:"attributeName" type:"string"` + + // One or more values for the account attribute. + AttributeValues []*AccountAttributeValue `locationName:"attributeValueSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s AccountAttribute) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AccountAttribute) GoString() string { + return s.String() +} + +// Describes a value of an account attribute. +type AccountAttributeValue struct { + _ struct{} `type:"structure"` + + // The value of the attribute. + AttributeValue *string `locationName:"attributeValue" type:"string"` +} + +// String returns the string representation +func (s AccountAttributeValue) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AccountAttributeValue) GoString() string { + return s.String() +} + +// Describes a running instance in a Spot fleet. +type ActiveInstance struct { + _ struct{} `type:"structure"` + + // The ID of the instance. + InstanceId *string `locationName:"instanceId" type:"string"` + + // The instance type. + InstanceType *string `locationName:"instanceType" type:"string"` + + // The ID of the Spot instance request. + SpotInstanceRequestId *string `locationName:"spotInstanceRequestId" type:"string"` +} + +// String returns the string representation +func (s ActiveInstance) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ActiveInstance) GoString() string { + return s.String() +} + +// Describes an Elastic IP address. +type Address struct { + _ struct{} `type:"structure"` + + // The ID representing the allocation of the address for use with EC2-VPC. + AllocationId *string `locationName:"allocationId" type:"string"` + + // The ID representing the association of the address with an instance in a + // VPC. + AssociationId *string `locationName:"associationId" type:"string"` + + // Indicates whether this Elastic IP address is for use with instances in EC2-Classic + // (standard) or instances in a VPC (vpc). + Domain *string `locationName:"domain" type:"string" enum:"DomainType"` + + // The ID of the instance that the address is associated with (if any). + InstanceId *string `locationName:"instanceId" type:"string"` + + // The ID of the network interface. + NetworkInterfaceId *string `locationName:"networkInterfaceId" type:"string"` + + // The ID of the AWS account that owns the network interface. + NetworkInterfaceOwnerId *string `locationName:"networkInterfaceOwnerId" type:"string"` + + // The private IP address associated with the Elastic IP address. + PrivateIpAddress *string `locationName:"privateIpAddress" type:"string"` + + // The Elastic IP address. + PublicIp *string `locationName:"publicIp" type:"string"` +} + +// String returns the string representation +func (s Address) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Address) GoString() string { + return s.String() +} + +// Contains the parameters for AllocateAddress. +type AllocateAddressInput struct { + _ struct{} `type:"structure"` + + // Set to vpc to allocate the address for use with instances in a VPC. + // + // Default: The address is for use with instances in EC2-Classic. + Domain *string `type:"string" enum:"DomainType"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` +} + +// String returns the string representation +func (s AllocateAddressInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AllocateAddressInput) GoString() string { + return s.String() +} + +// Contains the output of AllocateAddress. +type AllocateAddressOutput struct { + _ struct{} `type:"structure"` + + // [EC2-VPC] The ID that AWS assigns to represent the allocation of the Elastic + // IP address for use with instances in a VPC. + AllocationId *string `locationName:"allocationId" type:"string"` + + // Indicates whether this Elastic IP address is for use with instances in EC2-Classic + // (standard) or instances in a VPC (vpc). + Domain *string `locationName:"domain" type:"string" enum:"DomainType"` + + // The Elastic IP address. + PublicIp *string `locationName:"publicIp" type:"string"` +} + +// String returns the string representation +func (s AllocateAddressOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AllocateAddressOutput) GoString() string { + return s.String() +} + +// Contains the parameters for AllocateHosts. +type AllocateHostsInput struct { + _ struct{} `type:"structure"` + + // This is enabled by default. This property allows instances to be automatically + // placed onto available Dedicated hosts, when you are launching instances without + // specifying a host ID. + // + // Default: Enabled + AutoPlacement *string `locationName:"autoPlacement" type:"string" enum:"AutoPlacement"` + + // The Availability Zone for the Dedicated hosts. + AvailabilityZone *string `locationName:"availabilityZone" type:"string" required:"true"` + + // Unique, case-sensitive identifier you provide to ensure idempotency of the + // request. For more information, see How to Ensure Idempotency (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Run_Instance_Idempotency.html) + // in the Amazon Elastic Compute Cloud User Guide. + ClientToken *string `locationName:"clientToken" type:"string"` + + // Specify the instance type that you want your Dedicated hosts to be configured + // for. When you specify the instance type, that is the only instance type that + // you can launch onto that host. + InstanceType *string `locationName:"instanceType" type:"string" required:"true"` + + // The number of Dedicated hosts you want to allocate to your account with these + // parameters. + Quantity *int64 `locationName:"quantity" type:"integer" required:"true"` +} + +// String returns the string representation +func (s AllocateHostsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AllocateHostsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AllocateHostsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AllocateHostsInput"} + if s.AvailabilityZone == nil { + invalidParams.Add(request.NewErrParamRequired("AvailabilityZone")) + } + if s.InstanceType == nil { + invalidParams.Add(request.NewErrParamRequired("InstanceType")) + } + if s.Quantity == nil { + invalidParams.Add(request.NewErrParamRequired("Quantity")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the output of AllocateHosts. +type AllocateHostsOutput struct { + _ struct{} `type:"structure"` + + // The ID of the allocated Dedicated host. This is used when you want to launch + // an instance onto a specific host. + HostIds []*string `locationName:"hostIdSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s AllocateHostsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AllocateHostsOutput) GoString() string { + return s.String() +} + +// Contains the parameters for AssignPrivateIpAddresses. +type AssignPrivateIpAddressesInput struct { + _ struct{} `type:"structure"` + + // Indicates whether to allow an IP address that is already assigned to another + // network interface or instance to be reassigned to the specified network interface. + AllowReassignment *bool `locationName:"allowReassignment" type:"boolean"` + + // The ID of the network interface. + NetworkInterfaceId *string `locationName:"networkInterfaceId" type:"string" required:"true"` + + // One or more IP addresses to be assigned as a secondary private IP address + // to the network interface. You can't specify this parameter when also specifying + // a number of secondary IP addresses. + // + // If you don't specify an IP address, Amazon EC2 automatically selects an + // IP address within the subnet range. + PrivateIpAddresses []*string `locationName:"privateIpAddress" locationNameList:"PrivateIpAddress" type:"list"` + + // The number of secondary IP addresses to assign to the network interface. + // You can't specify this parameter when also specifying private IP addresses. + SecondaryPrivateIpAddressCount *int64 `locationName:"secondaryPrivateIpAddressCount" type:"integer"` +} + +// String returns the string representation +func (s AssignPrivateIpAddressesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssignPrivateIpAddressesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AssignPrivateIpAddressesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AssignPrivateIpAddressesInput"} + if s.NetworkInterfaceId == nil { + invalidParams.Add(request.NewErrParamRequired("NetworkInterfaceId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type AssignPrivateIpAddressesOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s AssignPrivateIpAddressesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssignPrivateIpAddressesOutput) GoString() string { + return s.String() +} + +// Contains the parameters for AssociateAddress. +type AssociateAddressInput struct { + _ struct{} `type:"structure"` + + // [EC2-VPC] The allocation ID. This is required for EC2-VPC. + AllocationId *string `type:"string"` + + // [EC2-VPC] For a VPC in an EC2-Classic account, specify true to allow an Elastic + // IP address that is already associated with an instance or network interface + // to be reassociated with the specified instance or network interface. Otherwise, + // the operation fails. In a VPC in an EC2-VPC-only account, reassociation is + // automatic, therefore you can specify false to ensure the operation fails + // if the Elastic IP address is already associated with another resource. + AllowReassociation *bool `locationName:"allowReassociation" type:"boolean"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the instance. This is required for EC2-Classic. For EC2-VPC, you + // can specify either the instance ID or the network interface ID, but not both. + // The operation fails if you specify an instance ID unless exactly one network + // interface is attached. + InstanceId *string `type:"string"` + + // [EC2-VPC] The ID of the network interface. If the instance has more than + // one network interface, you must specify a network interface ID. + NetworkInterfaceId *string `locationName:"networkInterfaceId" type:"string"` + + // [EC2-VPC] The primary or secondary private IP address to associate with the + // Elastic IP address. If no private IP address is specified, the Elastic IP + // address is associated with the primary private IP address. + PrivateIpAddress *string `locationName:"privateIpAddress" type:"string"` + + // The Elastic IP address. This is required for EC2-Classic. + PublicIp *string `type:"string"` +} + +// String returns the string representation +func (s AssociateAddressInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssociateAddressInput) GoString() string { + return s.String() +} + +// Contains the output of AssociateAddress. +type AssociateAddressOutput struct { + _ struct{} `type:"structure"` + + // [EC2-VPC] The ID that represents the association of the Elastic IP address + // with an instance. + AssociationId *string `locationName:"associationId" type:"string"` +} + +// String returns the string representation +func (s AssociateAddressOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssociateAddressOutput) GoString() string { + return s.String() +} + +// Contains the parameters for AssociateDhcpOptions. +type AssociateDhcpOptionsInput struct { + _ struct{} `type:"structure"` + + // The ID of the DHCP options set, or default to associate no DHCP options with + // the VPC. + DhcpOptionsId *string `type:"string" required:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the VPC. + VpcId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s AssociateDhcpOptionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssociateDhcpOptionsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AssociateDhcpOptionsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AssociateDhcpOptionsInput"} + if s.DhcpOptionsId == nil { + invalidParams.Add(request.NewErrParamRequired("DhcpOptionsId")) + } + if s.VpcId == nil { + invalidParams.Add(request.NewErrParamRequired("VpcId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type AssociateDhcpOptionsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s AssociateDhcpOptionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssociateDhcpOptionsOutput) GoString() string { + return s.String() +} + +// Contains the parameters for AssociateRouteTable. +type AssociateRouteTableInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the route table. + RouteTableId *string `locationName:"routeTableId" type:"string" required:"true"` + + // The ID of the subnet. + SubnetId *string `locationName:"subnetId" type:"string" required:"true"` +} + +// String returns the string representation +func (s AssociateRouteTableInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssociateRouteTableInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AssociateRouteTableInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AssociateRouteTableInput"} + if s.RouteTableId == nil { + invalidParams.Add(request.NewErrParamRequired("RouteTableId")) + } + if s.SubnetId == nil { + invalidParams.Add(request.NewErrParamRequired("SubnetId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the output of AssociateRouteTable. +type AssociateRouteTableOutput struct { + _ struct{} `type:"structure"` + + // The route table association ID (needed to disassociate the route table). + AssociationId *string `locationName:"associationId" type:"string"` +} + +// String returns the string representation +func (s AssociateRouteTableOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssociateRouteTableOutput) GoString() string { + return s.String() +} + +// Contains the parameters for AttachClassicLinkVpc. +type AttachClassicLinkVpcInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of one or more of the VPC's security groups. You cannot specify security + // groups from a different VPC. + Groups []*string `locationName:"SecurityGroupId" locationNameList:"groupId" type:"list" required:"true"` + + // The ID of an EC2-Classic instance to link to the ClassicLink-enabled VPC. + InstanceId *string `locationName:"instanceId" type:"string" required:"true"` + + // The ID of a ClassicLink-enabled VPC. + VpcId *string `locationName:"vpcId" type:"string" required:"true"` +} + +// String returns the string representation +func (s AttachClassicLinkVpcInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AttachClassicLinkVpcInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AttachClassicLinkVpcInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AttachClassicLinkVpcInput"} + if s.Groups == nil { + invalidParams.Add(request.NewErrParamRequired("Groups")) + } + if s.InstanceId == nil { + invalidParams.Add(request.NewErrParamRequired("InstanceId")) + } + if s.VpcId == nil { + invalidParams.Add(request.NewErrParamRequired("VpcId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the output of AttachClassicLinkVpc. +type AttachClassicLinkVpcOutput struct { + _ struct{} `type:"structure"` + + // Returns true if the request succeeds; otherwise, it returns an error. + Return *bool `locationName:"return" type:"boolean"` +} + +// String returns the string representation +func (s AttachClassicLinkVpcOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AttachClassicLinkVpcOutput) GoString() string { + return s.String() +} + +// Contains the parameters for AttachInternetGateway. +type AttachInternetGatewayInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the Internet gateway. + InternetGatewayId *string `locationName:"internetGatewayId" type:"string" required:"true"` + + // The ID of the VPC. + VpcId *string `locationName:"vpcId" type:"string" required:"true"` +} + +// String returns the string representation +func (s AttachInternetGatewayInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AttachInternetGatewayInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AttachInternetGatewayInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AttachInternetGatewayInput"} + if s.InternetGatewayId == nil { + invalidParams.Add(request.NewErrParamRequired("InternetGatewayId")) + } + if s.VpcId == nil { + invalidParams.Add(request.NewErrParamRequired("VpcId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type AttachInternetGatewayOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s AttachInternetGatewayOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AttachInternetGatewayOutput) GoString() string { + return s.String() +} + +// Contains the parameters for AttachNetworkInterface. +type AttachNetworkInterfaceInput struct { + _ struct{} `type:"structure"` + + // The index of the device for the network interface attachment. + DeviceIndex *int64 `locationName:"deviceIndex" type:"integer" required:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the instance. + InstanceId *string `locationName:"instanceId" type:"string" required:"true"` + + // The ID of the network interface. + NetworkInterfaceId *string `locationName:"networkInterfaceId" type:"string" required:"true"` +} + +// String returns the string representation +func (s AttachNetworkInterfaceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AttachNetworkInterfaceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AttachNetworkInterfaceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AttachNetworkInterfaceInput"} + if s.DeviceIndex == nil { + invalidParams.Add(request.NewErrParamRequired("DeviceIndex")) + } + if s.InstanceId == nil { + invalidParams.Add(request.NewErrParamRequired("InstanceId")) + } + if s.NetworkInterfaceId == nil { + invalidParams.Add(request.NewErrParamRequired("NetworkInterfaceId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the output of AttachNetworkInterface. +type AttachNetworkInterfaceOutput struct { + _ struct{} `type:"structure"` + + // The ID of the network interface attachment. + AttachmentId *string `locationName:"attachmentId" type:"string"` +} + +// String returns the string representation +func (s AttachNetworkInterfaceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AttachNetworkInterfaceOutput) GoString() string { + return s.String() +} + +// Contains the parameters for AttachVolume. +type AttachVolumeInput struct { + _ struct{} `type:"structure"` + + // The device name to expose to the instance (for example, /dev/sdh or xvdh). + Device *string `type:"string" required:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the instance. + InstanceId *string `type:"string" required:"true"` + + // The ID of the EBS volume. The volume and instance must be within the same + // Availability Zone. + VolumeId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s AttachVolumeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AttachVolumeInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AttachVolumeInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AttachVolumeInput"} + if s.Device == nil { + invalidParams.Add(request.NewErrParamRequired("Device")) + } + if s.InstanceId == nil { + invalidParams.Add(request.NewErrParamRequired("InstanceId")) + } + if s.VolumeId == nil { + invalidParams.Add(request.NewErrParamRequired("VolumeId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the parameters for AttachVpnGateway. +type AttachVpnGatewayInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the VPC. + VpcId *string `type:"string" required:"true"` + + // The ID of the virtual private gateway. + VpnGatewayId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s AttachVpnGatewayInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AttachVpnGatewayInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AttachVpnGatewayInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AttachVpnGatewayInput"} + if s.VpcId == nil { + invalidParams.Add(request.NewErrParamRequired("VpcId")) + } + if s.VpnGatewayId == nil { + invalidParams.Add(request.NewErrParamRequired("VpnGatewayId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the output of AttachVpnGateway. +type AttachVpnGatewayOutput struct { + _ struct{} `type:"structure"` + + // Information about the attachment. + VpcAttachment *VpcAttachment `locationName:"attachment" type:"structure"` +} + +// String returns the string representation +func (s AttachVpnGatewayOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AttachVpnGatewayOutput) GoString() string { + return s.String() +} + +// Describes a value for a resource attribute that is a Boolean value. +type AttributeBooleanValue struct { + _ struct{} `type:"structure"` + + // The attribute value. The valid values are true or false. + Value *bool `locationName:"value" type:"boolean"` +} + +// String returns the string representation +func (s AttributeBooleanValue) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AttributeBooleanValue) GoString() string { + return s.String() +} + +// Describes a value for a resource attribute that is a String. +type AttributeValue struct { + _ struct{} `type:"structure"` + + // The attribute value. Note that the value is case-sensitive. + Value *string `locationName:"value" type:"string"` +} + +// String returns the string representation +func (s AttributeValue) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AttributeValue) GoString() string { + return s.String() +} + +// Contains the parameters for AuthorizeSecurityGroupEgress. +type AuthorizeSecurityGroupEgressInput struct { + _ struct{} `type:"structure"` + + // The CIDR IP address range. We recommend that you specify the CIDR range in + // a set of IP permissions instead. + CidrIp *string `locationName:"cidrIp" type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The start of port range for the TCP and UDP protocols, or an ICMP type number. + // We recommend that you specify the port range in a set of IP permissions instead. + FromPort *int64 `locationName:"fromPort" type:"integer"` + + // The ID of the security group. + GroupId *string `locationName:"groupId" type:"string" required:"true"` + + // A set of IP permissions. You can't specify a destination security group and + // a CIDR IP address range. + IpPermissions []*IpPermission `locationName:"ipPermissions" locationNameList:"item" type:"list"` + + // The IP protocol name or number. We recommend that you specify the protocol + // in a set of IP permissions instead. + IpProtocol *string `locationName:"ipProtocol" type:"string"` + + // The name of a destination security group. To authorize outbound access to + // a destination security group, we recommend that you use a set of IP permissions + // instead. + SourceSecurityGroupName *string `locationName:"sourceSecurityGroupName" type:"string"` + + // The AWS account number for a destination security group. To authorize outbound + // access to a destination security group, we recommend that you use a set of + // IP permissions instead. + SourceSecurityGroupOwnerId *string `locationName:"sourceSecurityGroupOwnerId" type:"string"` + + // The end of port range for the TCP and UDP protocols, or an ICMP type number. + // We recommend that you specify the port range in a set of IP permissions instead. + ToPort *int64 `locationName:"toPort" type:"integer"` +} + +// String returns the string representation +func (s AuthorizeSecurityGroupEgressInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AuthorizeSecurityGroupEgressInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AuthorizeSecurityGroupEgressInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AuthorizeSecurityGroupEgressInput"} + if s.GroupId == nil { + invalidParams.Add(request.NewErrParamRequired("GroupId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type AuthorizeSecurityGroupEgressOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s AuthorizeSecurityGroupEgressOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AuthorizeSecurityGroupEgressOutput) GoString() string { + return s.String() +} + +// Contains the parameters for AuthorizeSecurityGroupIngress. +type AuthorizeSecurityGroupIngressInput struct { + _ struct{} `type:"structure"` + + // The CIDR IP address range. You can't specify this parameter when specifying + // a source security group. + CidrIp *string `type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The start of port range for the TCP and UDP protocols, or an ICMP type number. + // For the ICMP type number, use -1 to specify all ICMP types. + FromPort *int64 `type:"integer"` + + // The ID of the security group. Required for a nondefault VPC. + GroupId *string `type:"string"` + + // [EC2-Classic, default VPC] The name of the security group. + GroupName *string `type:"string"` + + // A set of IP permissions. Can be used to specify multiple rules in a single + // command. + IpPermissions []*IpPermission `locationNameList:"item" type:"list"` + + // The IP protocol name (tcp, udp, icmp) or number (see Protocol Numbers (http://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml)). + // (VPC only) Use -1 to specify all. + IpProtocol *string `type:"string"` + + // [EC2-Classic, default VPC] The name of the source security group. You can't + // specify this parameter in combination with the following parameters: the + // CIDR IP address range, the start of the port range, the IP protocol, and + // the end of the port range. Creates rules that grant full ICMP, UDP, and TCP + // access. To create a rule with a specific IP protocol and port range, use + // a set of IP permissions instead. For EC2-VPC, the source security group must + // be in the same VPC. + SourceSecurityGroupName *string `type:"string"` + + // [EC2-Classic] The AWS account number for the source security group, if the + // source security group is in a different account. You can't specify this parameter + // in combination with the following parameters: the CIDR IP address range, + // the IP protocol, the start of the port range, and the end of the port range. + // Creates rules that grant full ICMP, UDP, and TCP access. To create a rule + // with a specific IP protocol and port range, use a set of IP permissions instead. + SourceSecurityGroupOwnerId *string `type:"string"` + + // The end of port range for the TCP and UDP protocols, or an ICMP code number. + // For the ICMP code number, use -1 to specify all ICMP codes for the ICMP type. + ToPort *int64 `type:"integer"` +} + +// String returns the string representation +func (s AuthorizeSecurityGroupIngressInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AuthorizeSecurityGroupIngressInput) GoString() string { + return s.String() +} + +type AuthorizeSecurityGroupIngressOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s AuthorizeSecurityGroupIngressOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AuthorizeSecurityGroupIngressOutput) GoString() string { + return s.String() +} + +// Describes an Availability Zone. +type AvailabilityZone struct { + _ struct{} `type:"structure"` + + // Any messages about the Availability Zone. + Messages []*AvailabilityZoneMessage `locationName:"messageSet" locationNameList:"item" type:"list"` + + // The name of the region. + RegionName *string `locationName:"regionName" type:"string"` + + // The state of the Availability Zone. + State *string `locationName:"zoneState" type:"string" enum:"AvailabilityZoneState"` + + // The name of the Availability Zone. + ZoneName *string `locationName:"zoneName" type:"string"` +} + +// String returns the string representation +func (s AvailabilityZone) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AvailabilityZone) GoString() string { + return s.String() +} + +// Describes a message about an Availability Zone. +type AvailabilityZoneMessage struct { + _ struct{} `type:"structure"` + + // The message about the Availability Zone. + Message *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s AvailabilityZoneMessage) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AvailabilityZoneMessage) GoString() string { + return s.String() +} + +// The capacity information for instances launched onto the Dedicated host. +type AvailableCapacity struct { + _ struct{} `type:"structure"` + + // The total number of instances that the Dedicated host supports. + AvailableInstanceCapacity []*InstanceCapacity `locationName:"availableInstanceCapacity" locationNameList:"item" type:"list"` + + // The number of vCPUs available on the Dedicated host. + AvailableVCpus *int64 `locationName:"availableVCpus" type:"integer"` +} + +// String returns the string representation +func (s AvailableCapacity) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AvailableCapacity) GoString() string { + return s.String() +} + +type BlobAttributeValue struct { + _ struct{} `type:"structure"` + + // Value is automatically base64 encoded/decoded by the SDK. + Value []byte `locationName:"value" type:"blob"` +} + +// String returns the string representation +func (s BlobAttributeValue) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BlobAttributeValue) GoString() string { + return s.String() +} + +// Describes a block device mapping. +type BlockDeviceMapping struct { + _ struct{} `type:"structure"` + + // The device name exposed to the instance (for example, /dev/sdh or xvdh). + DeviceName *string `locationName:"deviceName" type:"string"` + + // Parameters used to automatically set up EBS volumes when the instance is + // launched. + Ebs *EbsBlockDevice `locationName:"ebs" type:"structure"` + + // Suppresses the specified device included in the block device mapping of the + // AMI. + NoDevice *string `locationName:"noDevice" type:"string"` + + // The virtual device name (ephemeralN). Instance store volumes are numbered + // starting from 0. An instance type with 2 available instance store volumes + // can specify mappings for ephemeral0 and ephemeral1.The number of available + // instance store volumes depends on the instance type. After you connect to + // the instance, you must mount the volume. + // + // Constraints: For M3 instances, you must specify instance store volumes in + // the block device mapping for the instance. When you launch an M3 instance, + // we ignore any instance store volumes specified in the block device mapping + // for the AMI. + VirtualName *string `locationName:"virtualName" type:"string"` +} + +// String returns the string representation +func (s BlockDeviceMapping) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BlockDeviceMapping) GoString() string { + return s.String() +} + +// Contains the parameters for BundleInstance. +type BundleInstanceInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the instance to bundle. + // + // Type: String + // + // Default: None + // + // Required: Yes + InstanceId *string `type:"string" required:"true"` + + // The bucket in which to store the AMI. You can specify a bucket that you already + // own or a new bucket that Amazon EC2 creates on your behalf. If you specify + // a bucket that belongs to someone else, Amazon EC2 returns an error. + Storage *Storage `type:"structure" required:"true"` +} + +// String returns the string representation +func (s BundleInstanceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BundleInstanceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *BundleInstanceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "BundleInstanceInput"} + if s.InstanceId == nil { + invalidParams.Add(request.NewErrParamRequired("InstanceId")) + } + if s.Storage == nil { + invalidParams.Add(request.NewErrParamRequired("Storage")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the output of BundleInstance. +type BundleInstanceOutput struct { + _ struct{} `type:"structure"` + + // Information about the bundle task. + BundleTask *BundleTask `locationName:"bundleInstanceTask" type:"structure"` +} + +// String returns the string representation +func (s BundleInstanceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BundleInstanceOutput) GoString() string { + return s.String() +} + +// Describes a bundle task. +type BundleTask struct { + _ struct{} `type:"structure"` + + // The ID of the bundle task. + BundleId *string `locationName:"bundleId" type:"string"` + + // If the task fails, a description of the error. + BundleTaskError *BundleTaskError `locationName:"error" type:"structure"` + + // The ID of the instance associated with this bundle task. + InstanceId *string `locationName:"instanceId" type:"string"` + + // The level of task completion, as a percent (for example, 20%). + Progress *string `locationName:"progress" type:"string"` + + // The time this task started. + StartTime *time.Time `locationName:"startTime" type:"timestamp" timestampFormat:"iso8601"` + + // The state of the task. + State *string `locationName:"state" type:"string" enum:"BundleTaskState"` + + // The Amazon S3 storage locations. + Storage *Storage `locationName:"storage" type:"structure"` + + // The time of the most recent update for the task. + UpdateTime *time.Time `locationName:"updateTime" type:"timestamp" timestampFormat:"iso8601"` +} + +// String returns the string representation +func (s BundleTask) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BundleTask) GoString() string { + return s.String() +} + +// Describes an error for BundleInstance. +type BundleTaskError struct { + _ struct{} `type:"structure"` + + // The error code. + Code *string `locationName:"code" type:"string"` + + // The error message. + Message *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s BundleTaskError) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BundleTaskError) GoString() string { + return s.String() +} + +// Contains the parameters for CancelBundleTask. +type CancelBundleTaskInput struct { + _ struct{} `type:"structure"` + + // The ID of the bundle task. + BundleId *string `type:"string" required:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` +} + +// String returns the string representation +func (s CancelBundleTaskInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CancelBundleTaskInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CancelBundleTaskInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CancelBundleTaskInput"} + if s.BundleId == nil { + invalidParams.Add(request.NewErrParamRequired("BundleId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the output of CancelBundleTask. +type CancelBundleTaskOutput struct { + _ struct{} `type:"structure"` + + // Information about the bundle task. + BundleTask *BundleTask `locationName:"bundleInstanceTask" type:"structure"` +} + +// String returns the string representation +func (s CancelBundleTaskOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CancelBundleTaskOutput) GoString() string { + return s.String() +} + +// Contains the parameters for CancelConversionTask. +type CancelConversionTaskInput struct { + _ struct{} `type:"structure"` + + // The ID of the conversion task. + ConversionTaskId *string `locationName:"conversionTaskId" type:"string" required:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The reason for canceling the conversion task. + ReasonMessage *string `locationName:"reasonMessage" type:"string"` +} + +// String returns the string representation +func (s CancelConversionTaskInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CancelConversionTaskInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CancelConversionTaskInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CancelConversionTaskInput"} + if s.ConversionTaskId == nil { + invalidParams.Add(request.NewErrParamRequired("ConversionTaskId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type CancelConversionTaskOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s CancelConversionTaskOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CancelConversionTaskOutput) GoString() string { + return s.String() +} + +// Contains the parameters for CancelExportTask. +type CancelExportTaskInput struct { + _ struct{} `type:"structure"` + + // The ID of the export task. This is the ID returned by CreateInstanceExportTask. + ExportTaskId *string `locationName:"exportTaskId" type:"string" required:"true"` +} + +// String returns the string representation +func (s CancelExportTaskInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CancelExportTaskInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CancelExportTaskInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CancelExportTaskInput"} + if s.ExportTaskId == nil { + invalidParams.Add(request.NewErrParamRequired("ExportTaskId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type CancelExportTaskOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s CancelExportTaskOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CancelExportTaskOutput) GoString() string { + return s.String() +} + +// Contains the parameters for CancelImportTask. +type CancelImportTaskInput struct { + _ struct{} `type:"structure"` + + // The reason for canceling the task. + CancelReason *string `type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The ID of the import image or import snapshot task to be canceled. + ImportTaskId *string `type:"string"` +} + +// String returns the string representation +func (s CancelImportTaskInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CancelImportTaskInput) GoString() string { + return s.String() +} + +// Contains the output for CancelImportTask. +type CancelImportTaskOutput struct { + _ struct{} `type:"structure"` + + // The ID of the task being canceled. + ImportTaskId *string `locationName:"importTaskId" type:"string"` + + // The current state of the task being canceled. + PreviousState *string `locationName:"previousState" type:"string"` + + // The current state of the task being canceled. + State *string `locationName:"state" type:"string"` +} + +// String returns the string representation +func (s CancelImportTaskOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CancelImportTaskOutput) GoString() string { + return s.String() +} + +// Contains the parameters for CancelReservedInstancesListing. +type CancelReservedInstancesListingInput struct { + _ struct{} `type:"structure"` + + // The ID of the Reserved Instance listing. + ReservedInstancesListingId *string `locationName:"reservedInstancesListingId" type:"string" required:"true"` +} + +// String returns the string representation +func (s CancelReservedInstancesListingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CancelReservedInstancesListingInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CancelReservedInstancesListingInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CancelReservedInstancesListingInput"} + if s.ReservedInstancesListingId == nil { + invalidParams.Add(request.NewErrParamRequired("ReservedInstancesListingId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the output of CancelReservedInstancesListing. +type CancelReservedInstancesListingOutput struct { + _ struct{} `type:"structure"` + + // The Reserved Instance listing. + ReservedInstancesListings []*ReservedInstancesListing `locationName:"reservedInstancesListingsSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s CancelReservedInstancesListingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CancelReservedInstancesListingOutput) GoString() string { + return s.String() +} + +// Describes a Spot fleet error. +type CancelSpotFleetRequestsError struct { + _ struct{} `type:"structure"` + + // The error code. + Code *string `locationName:"code" type:"string" required:"true" enum:"CancelBatchErrorCode"` + + // The description for the error code. + Message *string `locationName:"message" type:"string" required:"true"` +} + +// String returns the string representation +func (s CancelSpotFleetRequestsError) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CancelSpotFleetRequestsError) GoString() string { + return s.String() +} + +// Describes a Spot fleet request that was not successfully canceled. +type CancelSpotFleetRequestsErrorItem struct { + _ struct{} `type:"structure"` + + // The error. + Error *CancelSpotFleetRequestsError `locationName:"error" type:"structure" required:"true"` + + // The ID of the Spot fleet request. + SpotFleetRequestId *string `locationName:"spotFleetRequestId" type:"string" required:"true"` +} + +// String returns the string representation +func (s CancelSpotFleetRequestsErrorItem) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CancelSpotFleetRequestsErrorItem) GoString() string { + return s.String() +} + +// Contains the parameters for CancelSpotFleetRequests. +type CancelSpotFleetRequestsInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The IDs of the Spot fleet requests. + SpotFleetRequestIds []*string `locationName:"spotFleetRequestId" locationNameList:"item" type:"list" required:"true"` + + // Indicates whether to terminate instances for a Spot fleet request if it is + // canceled successfully. + TerminateInstances *bool `locationName:"terminateInstances" type:"boolean" required:"true"` +} + +// String returns the string representation +func (s CancelSpotFleetRequestsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CancelSpotFleetRequestsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CancelSpotFleetRequestsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CancelSpotFleetRequestsInput"} + if s.SpotFleetRequestIds == nil { + invalidParams.Add(request.NewErrParamRequired("SpotFleetRequestIds")) + } + if s.TerminateInstances == nil { + invalidParams.Add(request.NewErrParamRequired("TerminateInstances")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the output of CancelSpotFleetRequests. +type CancelSpotFleetRequestsOutput struct { + _ struct{} `type:"structure"` + + // Information about the Spot fleet requests that are successfully canceled. + SuccessfulFleetRequests []*CancelSpotFleetRequestsSuccessItem `locationName:"successfulFleetRequestSet" locationNameList:"item" type:"list"` + + // Information about the Spot fleet requests that are not successfully canceled. + UnsuccessfulFleetRequests []*CancelSpotFleetRequestsErrorItem `locationName:"unsuccessfulFleetRequestSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s CancelSpotFleetRequestsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CancelSpotFleetRequestsOutput) GoString() string { + return s.String() +} + +// Describes a Spot fleet request that was successfully canceled. +type CancelSpotFleetRequestsSuccessItem struct { + _ struct{} `type:"structure"` + + // The current state of the Spot fleet request. + CurrentSpotFleetRequestState *string `locationName:"currentSpotFleetRequestState" type:"string" required:"true" enum:"BatchState"` + + // The previous state of the Spot fleet request. + PreviousSpotFleetRequestState *string `locationName:"previousSpotFleetRequestState" type:"string" required:"true" enum:"BatchState"` + + // The ID of the Spot fleet request. + SpotFleetRequestId *string `locationName:"spotFleetRequestId" type:"string" required:"true"` +} + +// String returns the string representation +func (s CancelSpotFleetRequestsSuccessItem) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CancelSpotFleetRequestsSuccessItem) GoString() string { + return s.String() +} + +// Contains the parameters for CancelSpotInstanceRequests. +type CancelSpotInstanceRequestsInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // One or more Spot instance request IDs. + SpotInstanceRequestIds []*string `locationName:"SpotInstanceRequestId" locationNameList:"SpotInstanceRequestId" type:"list" required:"true"` +} + +// String returns the string representation +func (s CancelSpotInstanceRequestsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CancelSpotInstanceRequestsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CancelSpotInstanceRequestsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CancelSpotInstanceRequestsInput"} + if s.SpotInstanceRequestIds == nil { + invalidParams.Add(request.NewErrParamRequired("SpotInstanceRequestIds")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the output of CancelSpotInstanceRequests. +type CancelSpotInstanceRequestsOutput struct { + _ struct{} `type:"structure"` + + // One or more Spot instance requests. + CancelledSpotInstanceRequests []*CancelledSpotInstanceRequest `locationName:"spotInstanceRequestSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s CancelSpotInstanceRequestsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CancelSpotInstanceRequestsOutput) GoString() string { + return s.String() +} + +// Describes a request to cancel a Spot instance. +type CancelledSpotInstanceRequest struct { + _ struct{} `type:"structure"` + + // The ID of the Spot instance request. + SpotInstanceRequestId *string `locationName:"spotInstanceRequestId" type:"string"` + + // The state of the Spot instance request. + State *string `locationName:"state" type:"string" enum:"CancelSpotInstanceRequestState"` +} + +// String returns the string representation +func (s CancelledSpotInstanceRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CancelledSpotInstanceRequest) GoString() string { + return s.String() +} + +// Describes the ClassicLink DNS support status of a VPC. +type ClassicLinkDnsSupport struct { + _ struct{} `type:"structure"` + + // Indicates whether ClassicLink DNS support is enabled for the VPC. + ClassicLinkDnsSupported *bool `locationName:"classicLinkDnsSupported" type:"boolean"` + + // The ID of the VPC. + VpcId *string `locationName:"vpcId" type:"string"` +} + +// String returns the string representation +func (s ClassicLinkDnsSupport) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ClassicLinkDnsSupport) GoString() string { + return s.String() +} + +// Describes a linked EC2-Classic instance. +type ClassicLinkInstance struct { + _ struct{} `type:"structure"` + + // A list of security groups. + Groups []*GroupIdentifier `locationName:"groupSet" locationNameList:"item" type:"list"` + + // The ID of the instance. + InstanceId *string `locationName:"instanceId" type:"string"` + + // Any tags assigned to the instance. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` + + // The ID of the VPC. + VpcId *string `locationName:"vpcId" type:"string"` +} + +// String returns the string representation +func (s ClassicLinkInstance) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ClassicLinkInstance) GoString() string { + return s.String() +} + +// Describes the client-specific data. +type ClientData struct { + _ struct{} `type:"structure"` + + // A user-defined comment about the disk upload. + Comment *string `type:"string"` + + // The time that the disk upload ends. + UploadEnd *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The size of the uploaded disk image, in GiB. + UploadSize *float64 `type:"double"` + + // The time that the disk upload starts. + UploadStart *time.Time `type:"timestamp" timestampFormat:"iso8601"` +} + +// String returns the string representation +func (s ClientData) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ClientData) GoString() string { + return s.String() +} + +// Contains the parameters for ConfirmProductInstance. +type ConfirmProductInstanceInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the instance. + InstanceId *string `type:"string" required:"true"` + + // The product code. This must be a product code that you own. + ProductCode *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ConfirmProductInstanceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ConfirmProductInstanceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ConfirmProductInstanceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ConfirmProductInstanceInput"} + if s.InstanceId == nil { + invalidParams.Add(request.NewErrParamRequired("InstanceId")) + } + if s.ProductCode == nil { + invalidParams.Add(request.NewErrParamRequired("ProductCode")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the output of ConfirmProductInstance. +type ConfirmProductInstanceOutput struct { + _ struct{} `type:"structure"` + + // The AWS account ID of the instance owner. This is only present if the product + // code is attached to the instance. + OwnerId *string `locationName:"ownerId" type:"string"` + + // The return value of the request. Returns true if the specified product code + // is owned by the requester and associated with the specified instance. + Return *bool `locationName:"return" type:"boolean"` +} + +// String returns the string representation +func (s ConfirmProductInstanceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ConfirmProductInstanceOutput) GoString() string { + return s.String() +} + +// Describes a conversion task. +type ConversionTask struct { + _ struct{} `type:"structure"` + + // The ID of the conversion task. + ConversionTaskId *string `locationName:"conversionTaskId" type:"string" required:"true"` + + // The time when the task expires. If the upload isn't complete before the expiration + // time, we automatically cancel the task. + ExpirationTime *string `locationName:"expirationTime" type:"string"` + + // If the task is for importing an instance, this contains information about + // the import instance task. + ImportInstance *ImportInstanceTaskDetails `locationName:"importInstance" type:"structure"` + + // If the task is for importing a volume, this contains information about the + // import volume task. + ImportVolume *ImportVolumeTaskDetails `locationName:"importVolume" type:"structure"` + + // The state of the conversion task. + State *string `locationName:"state" type:"string" required:"true" enum:"ConversionTaskState"` + + // The status message related to the conversion task. + StatusMessage *string `locationName:"statusMessage" type:"string"` + + // Any tags assigned to the task. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s ConversionTask) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ConversionTask) GoString() string { + return s.String() +} + +// Contains the parameters for CopyImage. +type CopyImageInput struct { + _ struct{} `type:"structure"` + + // Unique, case-sensitive identifier you provide to ensure idempotency of the + // request. For more information, see How to Ensure Idempotency (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Run_Instance_Idempotency.html) + // in the Amazon Elastic Compute Cloud User Guide. + ClientToken *string `type:"string"` + + // A description for the new AMI in the destination region. + Description *string `type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // Specifies whether the destination snapshots of the copied image should be + // encrypted. The default CMK for EBS is used unless a non-default AWS Key Management + // Service (AWS KMS) CMK is specified with KmsKeyId. For more information, see + // Amazon EBS Encryption (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSEncryption.html) + // in the Amazon Elastic Compute Cloud User Guide. + Encrypted *bool `locationName:"encrypted" type:"boolean"` + + // The full ARN of the AWS Key Management Service (AWS KMS) CMK to use when + // encrypting the snapshots of an image during a copy operation. This parameter + // is only required if you want to use a non-default CMK; if this parameter + // is not specified, the default CMK for EBS is used. The ARN contains the arn:aws:kms + // namespace, followed by the region of the CMK, the AWS account ID of the CMK + // owner, the key namespace, and then the CMK ID. For example, arn:aws:kms:us-east-1:012345678910:key/abcd1234-a123-456a-a12b-a123b4cd56ef. + // The specified CMK must exist in the region that the snapshot is being copied + // to. If a KmsKeyId is specified, the Encrypted flag must also be set. + KmsKeyId *string `locationName:"kmsKeyId" type:"string"` + + // The name of the new AMI in the destination region. + Name *string `type:"string" required:"true"` + + // The ID of the AMI to copy. + SourceImageId *string `type:"string" required:"true"` + + // The name of the region that contains the AMI to copy. + SourceRegion *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CopyImageInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CopyImageInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CopyImageInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CopyImageInput"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.SourceImageId == nil { + invalidParams.Add(request.NewErrParamRequired("SourceImageId")) + } + if s.SourceRegion == nil { + invalidParams.Add(request.NewErrParamRequired("SourceRegion")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the output of CopyImage. +type CopyImageOutput struct { + _ struct{} `type:"structure"` + + // The ID of the new AMI. + ImageId *string `locationName:"imageId" type:"string"` +} + +// String returns the string representation +func (s CopyImageOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CopyImageOutput) GoString() string { + return s.String() +} + +// Contains the parameters for CopySnapshot. +type CopySnapshotInput struct { + _ struct{} `type:"structure"` + + // A description for the EBS snapshot. + Description *string `type:"string"` + + // The destination region to use in the PresignedUrl parameter of a snapshot + // copy operation. This parameter is only valid for specifying the destination + // region in a PresignedUrl parameter, where it is required. + // + // CopySnapshot sends the snapshot copy to the regional endpoint that you + // send the HTTP request to, such as ec2.us-east-1.amazonaws.com (in the AWS + // CLI, this is specified with the --region parameter or the default region + // in your AWS configuration file). + DestinationRegion *string `locationName:"destinationRegion" type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // Specifies whether the destination snapshot should be encrypted. You can encrypt + // a copy of an unencrypted snapshot using this flag, but you cannot use it + // to create an unencrypted copy from an encrypted snapshot. Your default CMK + // for EBS is used unless a non-default AWS Key Management Service (AWS KMS) + // CMK is specified with KmsKeyId. For more information, see Amazon EBS Encryption + // (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSEncryption.html) in + // the Amazon Elastic Compute Cloud User Guide. + Encrypted *bool `locationName:"encrypted" type:"boolean"` + + // The full ARN of the AWS Key Management Service (AWS KMS) CMK to use when + // creating the snapshot copy. This parameter is only required if you want to + // use a non-default CMK; if this parameter is not specified, the default CMK + // for EBS is used. The ARN contains the arn:aws:kms namespace, followed by + // the region of the CMK, the AWS account ID of the CMK owner, the key namespace, + // and then the CMK ID. For example, arn:aws:kms:us-east-1:012345678910:key/abcd1234-a123-456a-a12b-a123b4cd56ef. + // The specified CMK must exist in the region that the snapshot is being copied + // to. If a KmsKeyId is specified, the Encrypted flag must also be set. + KmsKeyId *string `locationName:"kmsKeyId" type:"string"` + + // The pre-signed URL that facilitates copying an encrypted snapshot. This parameter + // is only required when copying an encrypted snapshot with the Amazon EC2 Query + // API; it is available as an optional parameter in all other cases. The PresignedUrl + // should use the snapshot source endpoint, the CopySnapshot action, and include + // the SourceRegion, SourceSnapshotId, and DestinationRegion parameters. The + // PresignedUrl must be signed using AWS Signature Version 4. Because EBS snapshots + // are stored in Amazon S3, the signing algorithm for this parameter uses the + // same logic that is described in Authenticating Requests by Using Query Parameters + // (AWS Signature Version 4) (http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html) + // in the Amazon Simple Storage Service API Reference. An invalid or improperly + // signed PresignedUrl will cause the copy operation to fail asynchronously, + // and the snapshot will move to an error state. + PresignedUrl *string `locationName:"presignedUrl" type:"string"` + + // The ID of the region that contains the snapshot to be copied. + SourceRegion *string `type:"string" required:"true"` + + // The ID of the EBS snapshot to copy. + SourceSnapshotId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CopySnapshotInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CopySnapshotInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CopySnapshotInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CopySnapshotInput"} + if s.SourceRegion == nil { + invalidParams.Add(request.NewErrParamRequired("SourceRegion")) + } + if s.SourceSnapshotId == nil { + invalidParams.Add(request.NewErrParamRequired("SourceSnapshotId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the output of CopySnapshot. +type CopySnapshotOutput struct { + _ struct{} `type:"structure"` + + // The ID of the new snapshot. + SnapshotId *string `locationName:"snapshotId" type:"string"` +} + +// String returns the string representation +func (s CopySnapshotOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CopySnapshotOutput) GoString() string { + return s.String() +} + +// Contains the parameters for CreateCustomerGateway. +type CreateCustomerGatewayInput struct { + _ struct{} `type:"structure"` + + // For devices that support BGP, the customer gateway's BGP ASN. + // + // Default: 65000 + BgpAsn *int64 `type:"integer" required:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The Internet-routable IP address for the customer gateway's outside interface. + // The address must be static. + PublicIp *string `locationName:"IpAddress" type:"string" required:"true"` + + // The type of VPN connection that this customer gateway supports (ipsec.1). + Type *string `type:"string" required:"true" enum:"GatewayType"` +} + +// String returns the string representation +func (s CreateCustomerGatewayInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateCustomerGatewayInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateCustomerGatewayInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateCustomerGatewayInput"} + if s.BgpAsn == nil { + invalidParams.Add(request.NewErrParamRequired("BgpAsn")) + } + if s.PublicIp == nil { + invalidParams.Add(request.NewErrParamRequired("PublicIp")) + } + if s.Type == nil { + invalidParams.Add(request.NewErrParamRequired("Type")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the output of CreateCustomerGateway. +type CreateCustomerGatewayOutput struct { + _ struct{} `type:"structure"` + + // Information about the customer gateway. + CustomerGateway *CustomerGateway `locationName:"customerGateway" type:"structure"` +} + +// String returns the string representation +func (s CreateCustomerGatewayOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateCustomerGatewayOutput) GoString() string { + return s.String() +} + +// Contains the parameters for CreateDhcpOptions. +type CreateDhcpOptionsInput struct { + _ struct{} `type:"structure"` + + // A DHCP configuration option. + DhcpConfigurations []*NewDhcpConfiguration `locationName:"dhcpConfiguration" locationNameList:"item" type:"list" required:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` +} + +// String returns the string representation +func (s CreateDhcpOptionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDhcpOptionsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateDhcpOptionsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateDhcpOptionsInput"} + if s.DhcpConfigurations == nil { + invalidParams.Add(request.NewErrParamRequired("DhcpConfigurations")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the output of CreateDhcpOptions. +type CreateDhcpOptionsOutput struct { + _ struct{} `type:"structure"` + + // A set of DHCP options. + DhcpOptions *DhcpOptions `locationName:"dhcpOptions" type:"structure"` +} + +// String returns the string representation +func (s CreateDhcpOptionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDhcpOptionsOutput) GoString() string { + return s.String() +} + +// Contains the parameters for CreateFlowLogs. +type CreateFlowLogsInput struct { + _ struct{} `type:"structure"` + + // Unique, case-sensitive identifier you provide to ensure the idempotency of + // the request. For more information, see How to Ensure Idempotency (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Run_Instance_Idempotency.html). + ClientToken *string `type:"string"` + + // The ARN for the IAM role that's used to post flow logs to a CloudWatch Logs + // log group. + DeliverLogsPermissionArn *string `type:"string" required:"true"` + + // The name of the CloudWatch log group. + LogGroupName *string `type:"string" required:"true"` + + // One or more subnet, network interface, or VPC IDs. + // + // Constraints: Maximum of 1000 resources + ResourceIds []*string `locationName:"ResourceId" locationNameList:"item" type:"list" required:"true"` + + // The type of resource on which to create the flow log. + ResourceType *string `type:"string" required:"true" enum:"FlowLogsResourceType"` + + // The type of traffic to log. + TrafficType *string `type:"string" required:"true" enum:"TrafficType"` +} + +// String returns the string representation +func (s CreateFlowLogsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateFlowLogsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateFlowLogsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateFlowLogsInput"} + if s.DeliverLogsPermissionArn == nil { + invalidParams.Add(request.NewErrParamRequired("DeliverLogsPermissionArn")) + } + if s.LogGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("LogGroupName")) + } + if s.ResourceIds == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceIds")) + } + if s.ResourceType == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceType")) + } + if s.TrafficType == nil { + invalidParams.Add(request.NewErrParamRequired("TrafficType")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the output of CreateFlowLogs. +type CreateFlowLogsOutput struct { + _ struct{} `type:"structure"` + + // Unique, case-sensitive identifier you provide to ensure the idempotency of + // the request. + ClientToken *string `locationName:"clientToken" type:"string"` + + // The IDs of the flow logs. + FlowLogIds []*string `locationName:"flowLogIdSet" locationNameList:"item" type:"list"` + + // Information about the flow logs that could not be created successfully. + Unsuccessful []*UnsuccessfulItem `locationName:"unsuccessful" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s CreateFlowLogsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateFlowLogsOutput) GoString() string { + return s.String() +} + +// Contains the parameters for CreateImage. +type CreateImageInput struct { + _ struct{} `type:"structure"` + + // Information about one or more block device mappings. + BlockDeviceMappings []*BlockDeviceMapping `locationName:"blockDeviceMapping" locationNameList:"BlockDeviceMapping" type:"list"` + + // A description for the new image. + Description *string `locationName:"description" type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the instance. + InstanceId *string `locationName:"instanceId" type:"string" required:"true"` + + // A name for the new image. + // + // Constraints: 3-128 alphanumeric characters, parentheses (()), square brackets + // ([]), spaces ( ), periods (.), slashes (/), dashes (-), single quotes ('), + // at-signs (@), or underscores(_) + Name *string `locationName:"name" type:"string" required:"true"` + + // By default, Amazon EC2 attempts to shut down and reboot the instance before + // creating the image. If the 'No Reboot' option is set, Amazon EC2 doesn't + // shut down the instance before creating the image. When this option is used, + // file system integrity on the created image can't be guaranteed. + NoReboot *bool `locationName:"noReboot" type:"boolean"` +} + +// String returns the string representation +func (s CreateImageInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateImageInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateImageInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateImageInput"} + if s.InstanceId == nil { + invalidParams.Add(request.NewErrParamRequired("InstanceId")) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the output of CreateImage. +type CreateImageOutput struct { + _ struct{} `type:"structure"` + + // The ID of the new AMI. + ImageId *string `locationName:"imageId" type:"string"` +} + +// String returns the string representation +func (s CreateImageOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateImageOutput) GoString() string { + return s.String() +} + +// Contains the parameters for CreateInstanceExportTask. +type CreateInstanceExportTaskInput struct { + _ struct{} `type:"structure"` + + // A description for the conversion task or the resource being exported. The + // maximum length is 255 bytes. + Description *string `locationName:"description" type:"string"` + + // The format and location for an instance export task. + ExportToS3Task *ExportToS3TaskSpecification `locationName:"exportToS3" type:"structure"` + + // The ID of the instance. + InstanceId *string `locationName:"instanceId" type:"string" required:"true"` + + // The target virtualization environment. + TargetEnvironment *string `locationName:"targetEnvironment" type:"string" enum:"ExportEnvironment"` +} + +// String returns the string representation +func (s CreateInstanceExportTaskInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateInstanceExportTaskInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateInstanceExportTaskInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateInstanceExportTaskInput"} + if s.InstanceId == nil { + invalidParams.Add(request.NewErrParamRequired("InstanceId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the output for CreateInstanceExportTask. +type CreateInstanceExportTaskOutput struct { + _ struct{} `type:"structure"` + + // Information about the instance export task. + ExportTask *ExportTask `locationName:"exportTask" type:"structure"` +} + +// String returns the string representation +func (s CreateInstanceExportTaskOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateInstanceExportTaskOutput) GoString() string { + return s.String() +} + +// Contains the parameters for CreateInternetGateway. +type CreateInternetGatewayInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` +} + +// String returns the string representation +func (s CreateInternetGatewayInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateInternetGatewayInput) GoString() string { + return s.String() +} + +// Contains the output of CreateInternetGateway. +type CreateInternetGatewayOutput struct { + _ struct{} `type:"structure"` + + // Information about the Internet gateway. + InternetGateway *InternetGateway `locationName:"internetGateway" type:"structure"` +} + +// String returns the string representation +func (s CreateInternetGatewayOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateInternetGatewayOutput) GoString() string { + return s.String() +} + +// Contains the parameters for CreateKeyPair. +type CreateKeyPairInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // A unique name for the key pair. + // + // Constraints: Up to 255 ASCII characters + KeyName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateKeyPairInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateKeyPairInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateKeyPairInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateKeyPairInput"} + if s.KeyName == nil { + invalidParams.Add(request.NewErrParamRequired("KeyName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Describes a key pair. +type CreateKeyPairOutput struct { + _ struct{} `type:"structure"` + + // The SHA-1 digest of the DER encoded private key. + KeyFingerprint *string `locationName:"keyFingerprint" type:"string"` + + // An unencrypted PEM encoded RSA private key. + KeyMaterial *string `locationName:"keyMaterial" type:"string"` + + // The name of the key pair. + KeyName *string `locationName:"keyName" type:"string"` +} + +// String returns the string representation +func (s CreateKeyPairOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateKeyPairOutput) GoString() string { + return s.String() +} + +// Contains the parameters for CreateNatGateway. +type CreateNatGatewayInput struct { + _ struct{} `type:"structure"` + + // The allocation ID of an Elastic IP address to associate with the NAT gateway. + // If the Elastic IP address is associated with another resource, you must first + // disassociate it. + AllocationId *string `type:"string" required:"true"` + + // Unique, case-sensitive identifier you provide to ensure the idempotency of + // the request. For more information, see How to Ensure Idempotency (http://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html). + // + // Constraint: Maximum 64 ASCII characters. + ClientToken *string `type:"string"` + + // The subnet in which to create the NAT gateway. + SubnetId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateNatGatewayInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateNatGatewayInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateNatGatewayInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateNatGatewayInput"} + if s.AllocationId == nil { + invalidParams.Add(request.NewErrParamRequired("AllocationId")) + } + if s.SubnetId == nil { + invalidParams.Add(request.NewErrParamRequired("SubnetId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the output of CreateNatGateway. +type CreateNatGatewayOutput struct { + _ struct{} `type:"structure"` + + // Unique, case-sensitive identifier to ensure the idempotency of the request. + // Only returned if a client token was provided in the request. + ClientToken *string `locationName:"clientToken" type:"string"` + + // Information about the NAT gateway. + NatGateway *NatGateway `locationName:"natGateway" type:"structure"` +} + +// String returns the string representation +func (s CreateNatGatewayOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateNatGatewayOutput) GoString() string { + return s.String() +} + +// Contains the parameters for CreateNetworkAclEntry. +type CreateNetworkAclEntryInput struct { + _ struct{} `type:"structure"` + + // The network range to allow or deny, in CIDR notation (for example 172.16.0.0/24). + CidrBlock *string `locationName:"cidrBlock" type:"string" required:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // Indicates whether this is an egress rule (rule is applied to traffic leaving + // the subnet). + Egress *bool `locationName:"egress" type:"boolean" required:"true"` + + // ICMP protocol: The ICMP type and code. Required if specifying ICMP for the + // protocol. + IcmpTypeCode *IcmpTypeCode `locationName:"Icmp" type:"structure"` + + // The ID of the network ACL. + NetworkAclId *string `locationName:"networkAclId" type:"string" required:"true"` + + // TCP or UDP protocols: The range of ports the rule applies to. + PortRange *PortRange `locationName:"portRange" type:"structure"` + + // The protocol. A value of -1 means all protocols. + Protocol *string `locationName:"protocol" type:"string" required:"true"` + + // Indicates whether to allow or deny the traffic that matches the rule. + RuleAction *string `locationName:"ruleAction" type:"string" required:"true" enum:"RuleAction"` + + // The rule number for the entry (for example, 100). ACL entries are processed + // in ascending order by rule number. + // + // Constraints: Positive integer from 1 to 32766. The range 32767 to 65535 + // is reserved for internal use. + RuleNumber *int64 `locationName:"ruleNumber" type:"integer" required:"true"` +} + +// String returns the string representation +func (s CreateNetworkAclEntryInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateNetworkAclEntryInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateNetworkAclEntryInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateNetworkAclEntryInput"} + if s.CidrBlock == nil { + invalidParams.Add(request.NewErrParamRequired("CidrBlock")) + } + if s.Egress == nil { + invalidParams.Add(request.NewErrParamRequired("Egress")) + } + if s.NetworkAclId == nil { + invalidParams.Add(request.NewErrParamRequired("NetworkAclId")) + } + if s.Protocol == nil { + invalidParams.Add(request.NewErrParamRequired("Protocol")) + } + if s.RuleAction == nil { + invalidParams.Add(request.NewErrParamRequired("RuleAction")) + } + if s.RuleNumber == nil { + invalidParams.Add(request.NewErrParamRequired("RuleNumber")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type CreateNetworkAclEntryOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s CreateNetworkAclEntryOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateNetworkAclEntryOutput) GoString() string { + return s.String() +} + +// Contains the parameters for CreateNetworkAcl. +type CreateNetworkAclInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the VPC. + VpcId *string `locationName:"vpcId" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateNetworkAclInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateNetworkAclInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateNetworkAclInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateNetworkAclInput"} + if s.VpcId == nil { + invalidParams.Add(request.NewErrParamRequired("VpcId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the output of CreateNetworkAcl. +type CreateNetworkAclOutput struct { + _ struct{} `type:"structure"` + + // Information about the network ACL. + NetworkAcl *NetworkAcl `locationName:"networkAcl" type:"structure"` +} + +// String returns the string representation +func (s CreateNetworkAclOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateNetworkAclOutput) GoString() string { + return s.String() +} + +// Contains the parameters for CreateNetworkInterface. +type CreateNetworkInterfaceInput struct { + _ struct{} `type:"structure"` + + // A description for the network interface. + Description *string `locationName:"description" type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The IDs of one or more security groups. + Groups []*string `locationName:"SecurityGroupId" locationNameList:"SecurityGroupId" type:"list"` + + // The primary private IP address of the network interface. If you don't specify + // an IP address, Amazon EC2 selects one for you from the subnet range. If you + // specify an IP address, you cannot indicate any IP addresses specified in + // privateIpAddresses as primary (only one IP address can be designated as primary). + PrivateIpAddress *string `locationName:"privateIpAddress" type:"string"` + + // One or more private IP addresses. + PrivateIpAddresses []*PrivateIpAddressSpecification `locationName:"privateIpAddresses" locationNameList:"item" type:"list"` + + // The number of secondary private IP addresses to assign to a network interface. + // When you specify a number of secondary IP addresses, Amazon EC2 selects these + // IP addresses within the subnet range. You can't specify this option and specify + // more than one private IP address using privateIpAddresses. + // + // The number of IP addresses you can assign to a network interface varies + // by instance type. For more information, see Private IP Addresses Per ENI + // Per Instance Type (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-eni.html#AvailableIpPerENI) + // in the Amazon Elastic Compute Cloud User Guide. + SecondaryPrivateIpAddressCount *int64 `locationName:"secondaryPrivateIpAddressCount" type:"integer"` + + // The ID of the subnet to associate with the network interface. + SubnetId *string `locationName:"subnetId" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateNetworkInterfaceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateNetworkInterfaceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateNetworkInterfaceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateNetworkInterfaceInput"} + if s.SubnetId == nil { + invalidParams.Add(request.NewErrParamRequired("SubnetId")) + } + if s.PrivateIpAddresses != nil { + for i, v := range s.PrivateIpAddresses { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "PrivateIpAddresses", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the output of CreateNetworkInterface. +type CreateNetworkInterfaceOutput struct { + _ struct{} `type:"structure"` + + // Information about the network interface. + NetworkInterface *NetworkInterface `locationName:"networkInterface" type:"structure"` +} + +// String returns the string representation +func (s CreateNetworkInterfaceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateNetworkInterfaceOutput) GoString() string { + return s.String() +} + +// Contains the parameters for CreatePlacementGroup. +type CreatePlacementGroupInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // A name for the placement group. + // + // Constraints: Up to 255 ASCII characters + GroupName *string `locationName:"groupName" type:"string" required:"true"` + + // The placement strategy. + Strategy *string `locationName:"strategy" type:"string" required:"true" enum:"PlacementStrategy"` +} + +// String returns the string representation +func (s CreatePlacementGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreatePlacementGroupInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreatePlacementGroupInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreatePlacementGroupInput"} + if s.GroupName == nil { + invalidParams.Add(request.NewErrParamRequired("GroupName")) + } + if s.Strategy == nil { + invalidParams.Add(request.NewErrParamRequired("Strategy")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type CreatePlacementGroupOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s CreatePlacementGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreatePlacementGroupOutput) GoString() string { + return s.String() +} + +// Contains the parameters for CreateReservedInstancesListing. +type CreateReservedInstancesListingInput struct { + _ struct{} `type:"structure"` + + // Unique, case-sensitive identifier you provide to ensure idempotency of your + // listings. This helps avoid duplicate listings. For more information, see + // Ensuring Idempotency (http://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html). + ClientToken *string `locationName:"clientToken" type:"string" required:"true"` + + // The number of instances that are a part of a Reserved Instance account to + // be listed in the Reserved Instance Marketplace. This number should be less + // than or equal to the instance count associated with the Reserved Instance + // ID specified in this call. + InstanceCount *int64 `locationName:"instanceCount" type:"integer" required:"true"` + + // A list specifying the price of the Reserved Instance for each month remaining + // in the Reserved Instance term. + PriceSchedules []*PriceScheduleSpecification `locationName:"priceSchedules" locationNameList:"item" type:"list" required:"true"` + + // The ID of the active Reserved Instance. + ReservedInstancesId *string `locationName:"reservedInstancesId" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateReservedInstancesListingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateReservedInstancesListingInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateReservedInstancesListingInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateReservedInstancesListingInput"} + if s.ClientToken == nil { + invalidParams.Add(request.NewErrParamRequired("ClientToken")) + } + if s.InstanceCount == nil { + invalidParams.Add(request.NewErrParamRequired("InstanceCount")) + } + if s.PriceSchedules == nil { + invalidParams.Add(request.NewErrParamRequired("PriceSchedules")) + } + if s.ReservedInstancesId == nil { + invalidParams.Add(request.NewErrParamRequired("ReservedInstancesId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the output of CreateReservedInstancesListing. +type CreateReservedInstancesListingOutput struct { + _ struct{} `type:"structure"` + + // Information about the Reserved Instance listing. + ReservedInstancesListings []*ReservedInstancesListing `locationName:"reservedInstancesListingsSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s CreateReservedInstancesListingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateReservedInstancesListingOutput) GoString() string { + return s.String() +} + +// Contains the parameters for CreateRoute. +type CreateRouteInput struct { + _ struct{} `type:"structure"` + + // The CIDR address block used for the destination match. Routing decisions + // are based on the most specific match. + DestinationCidrBlock *string `locationName:"destinationCidrBlock" type:"string" required:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of an Internet gateway or virtual private gateway attached to your + // VPC. + GatewayId *string `locationName:"gatewayId" type:"string"` + + // The ID of a NAT instance in your VPC. The operation fails if you specify + // an instance ID unless exactly one network interface is attached. + InstanceId *string `locationName:"instanceId" type:"string"` + + // The ID of a NAT gateway. + NatGatewayId *string `locationName:"natGatewayId" type:"string"` + + // The ID of a network interface. + NetworkInterfaceId *string `locationName:"networkInterfaceId" type:"string"` + + // The ID of the route table for the route. + RouteTableId *string `locationName:"routeTableId" type:"string" required:"true"` + + // The ID of a VPC peering connection. + VpcPeeringConnectionId *string `locationName:"vpcPeeringConnectionId" type:"string"` +} + +// String returns the string representation +func (s CreateRouteInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateRouteInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateRouteInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateRouteInput"} + if s.DestinationCidrBlock == nil { + invalidParams.Add(request.NewErrParamRequired("DestinationCidrBlock")) + } + if s.RouteTableId == nil { + invalidParams.Add(request.NewErrParamRequired("RouteTableId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the output of CreateRoute. +type CreateRouteOutput struct { + _ struct{} `type:"structure"` + + // Returns true if the request succeeds; otherwise, it returns an error. + Return *bool `locationName:"return" type:"boolean"` +} + +// String returns the string representation +func (s CreateRouteOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateRouteOutput) GoString() string { + return s.String() +} + +// Contains the parameters for CreateRouteTable. +type CreateRouteTableInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the VPC. + VpcId *string `locationName:"vpcId" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateRouteTableInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateRouteTableInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateRouteTableInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateRouteTableInput"} + if s.VpcId == nil { + invalidParams.Add(request.NewErrParamRequired("VpcId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the output of CreateRouteTable. +type CreateRouteTableOutput struct { + _ struct{} `type:"structure"` + + // Information about the route table. + RouteTable *RouteTable `locationName:"routeTable" type:"structure"` +} + +// String returns the string representation +func (s CreateRouteTableOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateRouteTableOutput) GoString() string { + return s.String() +} + +// Contains the parameters for CreateSecurityGroup. +type CreateSecurityGroupInput struct { + _ struct{} `type:"structure"` + + // A description for the security group. This is informational only. + // + // Constraints: Up to 255 characters in length + // + // Constraints for EC2-Classic: ASCII characters + // + // Constraints for EC2-VPC: a-z, A-Z, 0-9, spaces, and ._-:/()#,@[]+=&;{}!$* + Description *string `locationName:"GroupDescription" type:"string" required:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The name of the security group. + // + // Constraints: Up to 255 characters in length + // + // Constraints for EC2-Classic: ASCII characters + // + // Constraints for EC2-VPC: a-z, A-Z, 0-9, spaces, and ._-:/()#,@[]+=&;{}!$* + GroupName *string `type:"string" required:"true"` + + // [EC2-VPC] The ID of the VPC. Required for EC2-VPC. + VpcId *string `type:"string"` +} + +// String returns the string representation +func (s CreateSecurityGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateSecurityGroupInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateSecurityGroupInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateSecurityGroupInput"} + if s.Description == nil { + invalidParams.Add(request.NewErrParamRequired("Description")) + } + if s.GroupName == nil { + invalidParams.Add(request.NewErrParamRequired("GroupName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the output of CreateSecurityGroup. +type CreateSecurityGroupOutput struct { + _ struct{} `type:"structure"` + + // The ID of the security group. + GroupId *string `locationName:"groupId" type:"string"` +} + +// String returns the string representation +func (s CreateSecurityGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateSecurityGroupOutput) GoString() string { + return s.String() +} + +// Contains the parameters for CreateSnapshot. +type CreateSnapshotInput struct { + _ struct{} `type:"structure"` + + // A description for the snapshot. + Description *string `type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the EBS volume. + VolumeId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateSnapshotInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateSnapshotInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateSnapshotInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateSnapshotInput"} + if s.VolumeId == nil { + invalidParams.Add(request.NewErrParamRequired("VolumeId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the parameters for CreateSpotDatafeedSubscription. +type CreateSpotDatafeedSubscriptionInput struct { + _ struct{} `type:"structure"` + + // The Amazon S3 bucket in which to store the Spot instance data feed. + Bucket *string `locationName:"bucket" type:"string" required:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // A prefix for the data feed file names. + Prefix *string `locationName:"prefix" type:"string"` +} + +// String returns the string representation +func (s CreateSpotDatafeedSubscriptionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateSpotDatafeedSubscriptionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateSpotDatafeedSubscriptionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateSpotDatafeedSubscriptionInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the output of CreateSpotDatafeedSubscription. +type CreateSpotDatafeedSubscriptionOutput struct { + _ struct{} `type:"structure"` + + // The Spot instance data feed subscription. + SpotDatafeedSubscription *SpotDatafeedSubscription `locationName:"spotDatafeedSubscription" type:"structure"` +} + +// String returns the string representation +func (s CreateSpotDatafeedSubscriptionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateSpotDatafeedSubscriptionOutput) GoString() string { + return s.String() +} + +// Contains the parameters for CreateSubnet. +type CreateSubnetInput struct { + _ struct{} `type:"structure"` + + // The Availability Zone for the subnet. + // + // Default: AWS selects one for you. If you create more than one subnet in + // your VPC, we may not necessarily select a different zone for each subnet. + AvailabilityZone *string `type:"string"` + + // The network range for the subnet, in CIDR notation. For example, 10.0.0.0/24. + CidrBlock *string `type:"string" required:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the VPC. + VpcId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateSubnetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateSubnetInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateSubnetInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateSubnetInput"} + if s.CidrBlock == nil { + invalidParams.Add(request.NewErrParamRequired("CidrBlock")) + } + if s.VpcId == nil { + invalidParams.Add(request.NewErrParamRequired("VpcId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the output of CreateSubnet. +type CreateSubnetOutput struct { + _ struct{} `type:"structure"` + + // Information about the subnet. + Subnet *Subnet `locationName:"subnet" type:"structure"` +} + +// String returns the string representation +func (s CreateSubnetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateSubnetOutput) GoString() string { + return s.String() +} + +// Contains the parameters for CreateTags. +type CreateTagsInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The IDs of one or more resources to tag. For example, ami-1a2b3c4d. + Resources []*string `locationName:"ResourceId" type:"list" required:"true"` + + // One or more tags. The value parameter is required, but if you don't want + // the tag to have a value, specify the parameter with no value, and we set + // the value to an empty string. + Tags []*Tag `locationName:"Tag" locationNameList:"item" type:"list" required:"true"` +} + +// String returns the string representation +func (s CreateTagsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateTagsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateTagsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateTagsInput"} + if s.Resources == nil { + invalidParams.Add(request.NewErrParamRequired("Resources")) + } + if s.Tags == nil { + invalidParams.Add(request.NewErrParamRequired("Tags")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type CreateTagsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s CreateTagsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateTagsOutput) GoString() string { + return s.String() +} + +// Contains the parameters for CreateVolume. +type CreateVolumeInput struct { + _ struct{} `type:"structure"` + + // The Availability Zone in which to create the volume. Use DescribeAvailabilityZones + // to list the Availability Zones that are currently available to you. + AvailabilityZone *string `type:"string" required:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // Specifies whether the volume should be encrypted. Encrypted Amazon EBS volumes + // may only be attached to instances that support Amazon EBS encryption. Volumes + // that are created from encrypted snapshots are automatically encrypted. There + // is no way to create an encrypted volume from an unencrypted snapshot or vice + // versa. If your AMI uses encrypted volumes, you can only launch it on supported + // instance types. For more information, see Amazon EBS Encryption (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSEncryption.html) + // in the Amazon Elastic Compute Cloud User Guide. + Encrypted *bool `locationName:"encrypted" type:"boolean"` + + // Only valid for Provisioned IOPS SSD volumes. The number of I/O operations + // per second (IOPS) to provision for the volume, with a maximum ratio of 30 + // IOPS/GiB. + // + // Constraint: Range is 100 to 20000 for Provisioned IOPS SSD volumes + Iops *int64 `type:"integer"` + + // The full ARN of the AWS Key Management Service (AWS KMS) customer master + // key (CMK) to use when creating the encrypted volume. This parameter is only + // required if you want to use a non-default CMK; if this parameter is not specified, + // the default CMK for EBS is used. The ARN contains the arn:aws:kms namespace, + // followed by the region of the CMK, the AWS account ID of the CMK owner, the + // key namespace, and then the CMK ID. For example, arn:aws:kms:us-east-1:012345678910:key/abcd1234-a123-456a-a12b-a123b4cd56ef. + // If a KmsKeyId is specified, the Encrypted flag must also be set. + KmsKeyId *string `type:"string"` + + // The size of the volume, in GiBs. + // + // Constraints: 1-16384 for gp2, 4-16384 for io1, 500-16384 for st1, 500-16384 + // for sc1, and 1-1024 for standard. If you specify a snapshot, the volume size + // must be equal to or larger than the snapshot size. + // + // Default: If you're creating the volume from a snapshot and don't specify + // a volume size, the default is the snapshot size. + Size *int64 `type:"integer"` + + // The snapshot from which to create the volume. + SnapshotId *string `type:"string"` + + // The volume type. This can be gp2 for General Purpose SSD, io1 for Provisioned + // IOPS SSD, st1 for Throughput Optimized HDD, sc1 for Cold HDD, or standard + // for Magnetic volumes. + // + // Default: standard + VolumeType *string `type:"string" enum:"VolumeType"` +} + +// String returns the string representation +func (s CreateVolumeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateVolumeInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateVolumeInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateVolumeInput"} + if s.AvailabilityZone == nil { + invalidParams.Add(request.NewErrParamRequired("AvailabilityZone")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Describes the user or group to be added or removed from the permissions for +// a volume. +type CreateVolumePermission struct { + _ struct{} `type:"structure"` + + // The specific group that is to be added or removed from a volume's list of + // create volume permissions. + Group *string `locationName:"group" type:"string" enum:"PermissionGroup"` + + // The specific AWS account ID that is to be added or removed from a volume's + // list of create volume permissions. + UserId *string `locationName:"userId" type:"string"` +} + +// String returns the string representation +func (s CreateVolumePermission) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateVolumePermission) GoString() string { + return s.String() +} + +// Describes modifications to the permissions for a volume. +type CreateVolumePermissionModifications struct { + _ struct{} `type:"structure"` + + // Adds a specific AWS account ID or group to a volume's list of create volume + // permissions. + Add []*CreateVolumePermission `locationNameList:"item" type:"list"` + + // Removes a specific AWS account ID or group from a volume's list of create + // volume permissions. + Remove []*CreateVolumePermission `locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s CreateVolumePermissionModifications) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateVolumePermissionModifications) GoString() string { + return s.String() +} + +// Contains the parameters for CreateVpcEndpoint. +type CreateVpcEndpointInput struct { + _ struct{} `type:"structure"` + + // Unique, case-sensitive identifier you provide to ensure the idempotency of + // the request. For more information, see How to Ensure Idempotency (http://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html). + ClientToken *string `type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // A policy to attach to the endpoint that controls access to the service. The + // policy must be in valid JSON format. If this parameter is not specified, + // we attach a default policy that allows full access to the service. + PolicyDocument *string `type:"string"` + + // One or more route table IDs. + RouteTableIds []*string `locationName:"RouteTableId" locationNameList:"item" type:"list"` + + // The AWS service name, in the form com.amazonaws.region.service . To get a + // list of available services, use the DescribeVpcEndpointServices request. + ServiceName *string `type:"string" required:"true"` + + // The ID of the VPC in which the endpoint will be used. + VpcId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateVpcEndpointInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateVpcEndpointInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateVpcEndpointInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateVpcEndpointInput"} + if s.ServiceName == nil { + invalidParams.Add(request.NewErrParamRequired("ServiceName")) + } + if s.VpcId == nil { + invalidParams.Add(request.NewErrParamRequired("VpcId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the output of CreateVpcEndpoint. +type CreateVpcEndpointOutput struct { + _ struct{} `type:"structure"` + + // Unique, case-sensitive identifier you provide to ensure the idempotency of + // the request. + ClientToken *string `locationName:"clientToken" type:"string"` + + // Information about the endpoint. + VpcEndpoint *VpcEndpoint `locationName:"vpcEndpoint" type:"structure"` +} + +// String returns the string representation +func (s CreateVpcEndpointOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateVpcEndpointOutput) GoString() string { + return s.String() +} + +// Contains the parameters for CreateVpc. +type CreateVpcInput struct { + _ struct{} `type:"structure"` + + // The network range for the VPC, in CIDR notation. For example, 10.0.0.0/16. + CidrBlock *string `type:"string" required:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The tenancy options for instances launched into the VPC. For default, instances + // are launched with shared tenancy by default. You can launch instances with + // any tenancy into a shared tenancy VPC. For dedicated, instances are launched + // as dedicated tenancy instances by default. You can only launch instances + // with a tenancy of dedicated or host into a dedicated tenancy VPC. + // + // Important: The host value cannot be used with this parameter. Use the default + // or dedicated values only. + // + // Default: default + InstanceTenancy *string `locationName:"instanceTenancy" type:"string" enum:"Tenancy"` +} + +// String returns the string representation +func (s CreateVpcInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateVpcInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateVpcInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateVpcInput"} + if s.CidrBlock == nil { + invalidParams.Add(request.NewErrParamRequired("CidrBlock")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the output of CreateVpc. +type CreateVpcOutput struct { + _ struct{} `type:"structure"` + + // Information about the VPC. + Vpc *Vpc `locationName:"vpc" type:"structure"` +} + +// String returns the string representation +func (s CreateVpcOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateVpcOutput) GoString() string { + return s.String() +} + +// Contains the parameters for CreateVpcPeeringConnection. +type CreateVpcPeeringConnectionInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The AWS account ID of the owner of the peer VPC. + // + // Default: Your AWS account ID + PeerOwnerId *string `locationName:"peerOwnerId" type:"string"` + + // The ID of the VPC with which you are creating the VPC peering connection. + PeerVpcId *string `locationName:"peerVpcId" type:"string"` + + // The ID of the requester VPC. + VpcId *string `locationName:"vpcId" type:"string"` +} + +// String returns the string representation +func (s CreateVpcPeeringConnectionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateVpcPeeringConnectionInput) GoString() string { + return s.String() +} + +// Contains the output of CreateVpcPeeringConnection. +type CreateVpcPeeringConnectionOutput struct { + _ struct{} `type:"structure"` + + // Information about the VPC peering connection. + VpcPeeringConnection *VpcPeeringConnection `locationName:"vpcPeeringConnection" type:"structure"` +} + +// String returns the string representation +func (s CreateVpcPeeringConnectionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateVpcPeeringConnectionOutput) GoString() string { + return s.String() +} + +// Contains the parameters for CreateVpnConnection. +type CreateVpnConnectionInput struct { + _ struct{} `type:"structure"` + + // The ID of the customer gateway. + CustomerGatewayId *string `type:"string" required:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // Indicates whether the VPN connection requires static routes. If you are creating + // a VPN connection for a device that does not support BGP, you must specify + // true. + // + // Default: false + Options *VpnConnectionOptionsSpecification `locationName:"options" type:"structure"` + + // The type of VPN connection (ipsec.1). + Type *string `type:"string" required:"true"` + + // The ID of the virtual private gateway. + VpnGatewayId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateVpnConnectionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateVpnConnectionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateVpnConnectionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateVpnConnectionInput"} + if s.CustomerGatewayId == nil { + invalidParams.Add(request.NewErrParamRequired("CustomerGatewayId")) + } + if s.Type == nil { + invalidParams.Add(request.NewErrParamRequired("Type")) + } + if s.VpnGatewayId == nil { + invalidParams.Add(request.NewErrParamRequired("VpnGatewayId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the output of CreateVpnConnection. +type CreateVpnConnectionOutput struct { + _ struct{} `type:"structure"` + + // Information about the VPN connection. + VpnConnection *VpnConnection `locationName:"vpnConnection" type:"structure"` +} + +// String returns the string representation +func (s CreateVpnConnectionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateVpnConnectionOutput) GoString() string { + return s.String() +} + +// Contains the parameters for CreateVpnConnectionRoute. +type CreateVpnConnectionRouteInput struct { + _ struct{} `type:"structure"` + + // The CIDR block associated with the local subnet of the customer network. + DestinationCidrBlock *string `type:"string" required:"true"` + + // The ID of the VPN connection. + VpnConnectionId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateVpnConnectionRouteInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateVpnConnectionRouteInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateVpnConnectionRouteInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateVpnConnectionRouteInput"} + if s.DestinationCidrBlock == nil { + invalidParams.Add(request.NewErrParamRequired("DestinationCidrBlock")) + } + if s.VpnConnectionId == nil { + invalidParams.Add(request.NewErrParamRequired("VpnConnectionId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type CreateVpnConnectionRouteOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s CreateVpnConnectionRouteOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateVpnConnectionRouteOutput) GoString() string { + return s.String() +} + +// Contains the parameters for CreateVpnGateway. +type CreateVpnGatewayInput struct { + _ struct{} `type:"structure"` + + // The Availability Zone for the virtual private gateway. + AvailabilityZone *string `type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The type of VPN connection this virtual private gateway supports. + Type *string `type:"string" required:"true" enum:"GatewayType"` +} + +// String returns the string representation +func (s CreateVpnGatewayInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateVpnGatewayInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateVpnGatewayInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateVpnGatewayInput"} + if s.Type == nil { + invalidParams.Add(request.NewErrParamRequired("Type")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the output of CreateVpnGateway. +type CreateVpnGatewayOutput struct { + _ struct{} `type:"structure"` + + // Information about the virtual private gateway. + VpnGateway *VpnGateway `locationName:"vpnGateway" type:"structure"` +} + +// String returns the string representation +func (s CreateVpnGatewayOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateVpnGatewayOutput) GoString() string { + return s.String() +} + +// Describes a customer gateway. +type CustomerGateway struct { + _ struct{} `type:"structure"` + + // The customer gateway's Border Gateway Protocol (BGP) Autonomous System Number + // (ASN). + BgpAsn *string `locationName:"bgpAsn" type:"string"` + + // The ID of the customer gateway. + CustomerGatewayId *string `locationName:"customerGatewayId" type:"string"` + + // The Internet-routable IP address of the customer gateway's outside interface. + IpAddress *string `locationName:"ipAddress" type:"string"` + + // The current state of the customer gateway (pending | available | deleting + // | deleted). + State *string `locationName:"state" type:"string"` + + // Any tags assigned to the customer gateway. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` + + // The type of VPN connection the customer gateway supports (ipsec.1). + Type *string `locationName:"type" type:"string"` +} + +// String returns the string representation +func (s CustomerGateway) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CustomerGateway) GoString() string { + return s.String() +} + +// Contains the parameters for DeleteCustomerGateway. +type DeleteCustomerGatewayInput struct { + _ struct{} `type:"structure"` + + // The ID of the customer gateway. + CustomerGatewayId *string `type:"string" required:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` +} + +// String returns the string representation +func (s DeleteCustomerGatewayInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteCustomerGatewayInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteCustomerGatewayInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteCustomerGatewayInput"} + if s.CustomerGatewayId == nil { + invalidParams.Add(request.NewErrParamRequired("CustomerGatewayId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteCustomerGatewayOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteCustomerGatewayOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteCustomerGatewayOutput) GoString() string { + return s.String() +} + +// Contains the parameters for DeleteDhcpOptions. +type DeleteDhcpOptionsInput struct { + _ struct{} `type:"structure"` + + // The ID of the DHCP options set. + DhcpOptionsId *string `type:"string" required:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` +} + +// String returns the string representation +func (s DeleteDhcpOptionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteDhcpOptionsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteDhcpOptionsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteDhcpOptionsInput"} + if s.DhcpOptionsId == nil { + invalidParams.Add(request.NewErrParamRequired("DhcpOptionsId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteDhcpOptionsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteDhcpOptionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteDhcpOptionsOutput) GoString() string { + return s.String() +} + +// Contains the parameters for DeleteFlowLogs. +type DeleteFlowLogsInput struct { + _ struct{} `type:"structure"` + + // One or more flow log IDs. + FlowLogIds []*string `locationName:"FlowLogId" locationNameList:"item" type:"list" required:"true"` +} + +// String returns the string representation +func (s DeleteFlowLogsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteFlowLogsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteFlowLogsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteFlowLogsInput"} + if s.FlowLogIds == nil { + invalidParams.Add(request.NewErrParamRequired("FlowLogIds")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the output of DeleteFlowLogs. +type DeleteFlowLogsOutput struct { + _ struct{} `type:"structure"` + + // Information about the flow logs that could not be deleted successfully. + Unsuccessful []*UnsuccessfulItem `locationName:"unsuccessful" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DeleteFlowLogsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteFlowLogsOutput) GoString() string { + return s.String() +} + +// Contains the parameters for DeleteInternetGateway. +type DeleteInternetGatewayInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the Internet gateway. + InternetGatewayId *string `locationName:"internetGatewayId" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteInternetGatewayInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteInternetGatewayInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteInternetGatewayInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteInternetGatewayInput"} + if s.InternetGatewayId == nil { + invalidParams.Add(request.NewErrParamRequired("InternetGatewayId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteInternetGatewayOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteInternetGatewayOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteInternetGatewayOutput) GoString() string { + return s.String() +} + +// Contains the parameters for DeleteKeyPair. +type DeleteKeyPairInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The name of the key pair. + KeyName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteKeyPairInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteKeyPairInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteKeyPairInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteKeyPairInput"} + if s.KeyName == nil { + invalidParams.Add(request.NewErrParamRequired("KeyName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteKeyPairOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteKeyPairOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteKeyPairOutput) GoString() string { + return s.String() +} + +// Contains the parameters for DeleteNatGateway. +type DeleteNatGatewayInput struct { + _ struct{} `type:"structure"` + + // The ID of the NAT gateway. + NatGatewayId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteNatGatewayInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteNatGatewayInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteNatGatewayInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteNatGatewayInput"} + if s.NatGatewayId == nil { + invalidParams.Add(request.NewErrParamRequired("NatGatewayId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the output of DeleteNatGateway. +type DeleteNatGatewayOutput struct { + _ struct{} `type:"structure"` + + // The ID of the NAT gateway. + NatGatewayId *string `locationName:"natGatewayId" type:"string"` +} + +// String returns the string representation +func (s DeleteNatGatewayOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteNatGatewayOutput) GoString() string { + return s.String() +} + +// Contains the parameters for DeleteNetworkAclEntry. +type DeleteNetworkAclEntryInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // Indicates whether the rule is an egress rule. + Egress *bool `locationName:"egress" type:"boolean" required:"true"` + + // The ID of the network ACL. + NetworkAclId *string `locationName:"networkAclId" type:"string" required:"true"` + + // The rule number of the entry to delete. + RuleNumber *int64 `locationName:"ruleNumber" type:"integer" required:"true"` +} + +// String returns the string representation +func (s DeleteNetworkAclEntryInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteNetworkAclEntryInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteNetworkAclEntryInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteNetworkAclEntryInput"} + if s.Egress == nil { + invalidParams.Add(request.NewErrParamRequired("Egress")) + } + if s.NetworkAclId == nil { + invalidParams.Add(request.NewErrParamRequired("NetworkAclId")) + } + if s.RuleNumber == nil { + invalidParams.Add(request.NewErrParamRequired("RuleNumber")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteNetworkAclEntryOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteNetworkAclEntryOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteNetworkAclEntryOutput) GoString() string { + return s.String() +} + +// Contains the parameters for DeleteNetworkAcl. +type DeleteNetworkAclInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the network ACL. + NetworkAclId *string `locationName:"networkAclId" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteNetworkAclInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteNetworkAclInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteNetworkAclInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteNetworkAclInput"} + if s.NetworkAclId == nil { + invalidParams.Add(request.NewErrParamRequired("NetworkAclId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteNetworkAclOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteNetworkAclOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteNetworkAclOutput) GoString() string { + return s.String() +} + +// Contains the parameters for DeleteNetworkInterface. +type DeleteNetworkInterfaceInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the network interface. + NetworkInterfaceId *string `locationName:"networkInterfaceId" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteNetworkInterfaceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteNetworkInterfaceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteNetworkInterfaceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteNetworkInterfaceInput"} + if s.NetworkInterfaceId == nil { + invalidParams.Add(request.NewErrParamRequired("NetworkInterfaceId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteNetworkInterfaceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteNetworkInterfaceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteNetworkInterfaceOutput) GoString() string { + return s.String() +} + +// Contains the parameters for DeletePlacementGroup. +type DeletePlacementGroupInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The name of the placement group. + GroupName *string `locationName:"groupName" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeletePlacementGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeletePlacementGroupInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeletePlacementGroupInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeletePlacementGroupInput"} + if s.GroupName == nil { + invalidParams.Add(request.NewErrParamRequired("GroupName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeletePlacementGroupOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeletePlacementGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeletePlacementGroupOutput) GoString() string { + return s.String() +} + +// Contains the parameters for DeleteRoute. +type DeleteRouteInput struct { + _ struct{} `type:"structure"` + + // The CIDR range for the route. The value you specify must match the CIDR for + // the route exactly. + DestinationCidrBlock *string `locationName:"destinationCidrBlock" type:"string" required:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the route table. + RouteTableId *string `locationName:"routeTableId" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteRouteInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteRouteInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteRouteInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteRouteInput"} + if s.DestinationCidrBlock == nil { + invalidParams.Add(request.NewErrParamRequired("DestinationCidrBlock")) + } + if s.RouteTableId == nil { + invalidParams.Add(request.NewErrParamRequired("RouteTableId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteRouteOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteRouteOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteRouteOutput) GoString() string { + return s.String() +} + +// Contains the parameters for DeleteRouteTable. +type DeleteRouteTableInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the route table. + RouteTableId *string `locationName:"routeTableId" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteRouteTableInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteRouteTableInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteRouteTableInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteRouteTableInput"} + if s.RouteTableId == nil { + invalidParams.Add(request.NewErrParamRequired("RouteTableId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteRouteTableOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteRouteTableOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteRouteTableOutput) GoString() string { + return s.String() +} + +// Contains the parameters for DeleteSecurityGroup. +type DeleteSecurityGroupInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the security group. Required for a nondefault VPC. + GroupId *string `type:"string"` + + // [EC2-Classic, default VPC] The name of the security group. You can specify + // either the security group name or the security group ID. + GroupName *string `type:"string"` +} + +// String returns the string representation +func (s DeleteSecurityGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteSecurityGroupInput) GoString() string { + return s.String() +} + +type DeleteSecurityGroupOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteSecurityGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteSecurityGroupOutput) GoString() string { + return s.String() +} + +// Contains the parameters for DeleteSnapshot. +type DeleteSnapshotInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the EBS snapshot. + SnapshotId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteSnapshotInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteSnapshotInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteSnapshotInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteSnapshotInput"} + if s.SnapshotId == nil { + invalidParams.Add(request.NewErrParamRequired("SnapshotId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteSnapshotOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteSnapshotOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteSnapshotOutput) GoString() string { + return s.String() +} + +// Contains the parameters for DeleteSpotDatafeedSubscription. +type DeleteSpotDatafeedSubscriptionInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` +} + +// String returns the string representation +func (s DeleteSpotDatafeedSubscriptionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteSpotDatafeedSubscriptionInput) GoString() string { + return s.String() +} + +type DeleteSpotDatafeedSubscriptionOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteSpotDatafeedSubscriptionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteSpotDatafeedSubscriptionOutput) GoString() string { + return s.String() +} + +// Contains the parameters for DeleteSubnet. +type DeleteSubnetInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the subnet. + SubnetId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteSubnetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteSubnetInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteSubnetInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteSubnetInput"} + if s.SubnetId == nil { + invalidParams.Add(request.NewErrParamRequired("SubnetId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteSubnetOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteSubnetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteSubnetOutput) GoString() string { + return s.String() +} + +// Contains the parameters for DeleteTags. +type DeleteTagsInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the resource. For example, ami-1a2b3c4d. You can specify more than + // one resource ID. + Resources []*string `locationName:"resourceId" type:"list" required:"true"` + + // One or more tags to delete. If you omit the value parameter, we delete the + // tag regardless of its value. If you specify this parameter with an empty + // string as the value, we delete the key only if its value is an empty string. + Tags []*Tag `locationName:"tag" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DeleteTagsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteTagsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteTagsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteTagsInput"} + if s.Resources == nil { + invalidParams.Add(request.NewErrParamRequired("Resources")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteTagsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteTagsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteTagsOutput) GoString() string { + return s.String() +} + +// Contains the parameters for DeleteVolume. +type DeleteVolumeInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the volume. + VolumeId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteVolumeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteVolumeInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteVolumeInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteVolumeInput"} + if s.VolumeId == nil { + invalidParams.Add(request.NewErrParamRequired("VolumeId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteVolumeOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteVolumeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteVolumeOutput) GoString() string { + return s.String() +} + +// Contains the parameters for DeleteVpcEndpoints. +type DeleteVpcEndpointsInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // One or more endpoint IDs. + VpcEndpointIds []*string `locationName:"VpcEndpointId" locationNameList:"item" type:"list" required:"true"` +} + +// String returns the string representation +func (s DeleteVpcEndpointsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteVpcEndpointsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteVpcEndpointsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteVpcEndpointsInput"} + if s.VpcEndpointIds == nil { + invalidParams.Add(request.NewErrParamRequired("VpcEndpointIds")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the output of DeleteVpcEndpoints. +type DeleteVpcEndpointsOutput struct { + _ struct{} `type:"structure"` + + // Information about the endpoints that were not successfully deleted. + Unsuccessful []*UnsuccessfulItem `locationName:"unsuccessful" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DeleteVpcEndpointsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteVpcEndpointsOutput) GoString() string { + return s.String() +} + +// Contains the parameters for DeleteVpc. +type DeleteVpcInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the VPC. + VpcId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteVpcInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteVpcInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteVpcInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteVpcInput"} + if s.VpcId == nil { + invalidParams.Add(request.NewErrParamRequired("VpcId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteVpcOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteVpcOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteVpcOutput) GoString() string { + return s.String() +} + +// Contains the parameters for DeleteVpcPeeringConnection. +type DeleteVpcPeeringConnectionInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the VPC peering connection. + VpcPeeringConnectionId *string `locationName:"vpcPeeringConnectionId" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteVpcPeeringConnectionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteVpcPeeringConnectionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteVpcPeeringConnectionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteVpcPeeringConnectionInput"} + if s.VpcPeeringConnectionId == nil { + invalidParams.Add(request.NewErrParamRequired("VpcPeeringConnectionId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the output of DeleteVpcPeeringConnection. +type DeleteVpcPeeringConnectionOutput struct { + _ struct{} `type:"structure"` + + // Returns true if the request succeeds; otherwise, it returns an error. + Return *bool `locationName:"return" type:"boolean"` +} + +// String returns the string representation +func (s DeleteVpcPeeringConnectionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteVpcPeeringConnectionOutput) GoString() string { + return s.String() +} + +// Contains the parameters for DeleteVpnConnection. +type DeleteVpnConnectionInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the VPN connection. + VpnConnectionId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteVpnConnectionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteVpnConnectionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteVpnConnectionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteVpnConnectionInput"} + if s.VpnConnectionId == nil { + invalidParams.Add(request.NewErrParamRequired("VpnConnectionId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteVpnConnectionOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteVpnConnectionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteVpnConnectionOutput) GoString() string { + return s.String() +} + +// Contains the parameters for DeleteVpnConnectionRoute. +type DeleteVpnConnectionRouteInput struct { + _ struct{} `type:"structure"` + + // The CIDR block associated with the local subnet of the customer network. + DestinationCidrBlock *string `type:"string" required:"true"` + + // The ID of the VPN connection. + VpnConnectionId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteVpnConnectionRouteInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteVpnConnectionRouteInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteVpnConnectionRouteInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteVpnConnectionRouteInput"} + if s.DestinationCidrBlock == nil { + invalidParams.Add(request.NewErrParamRequired("DestinationCidrBlock")) + } + if s.VpnConnectionId == nil { + invalidParams.Add(request.NewErrParamRequired("VpnConnectionId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteVpnConnectionRouteOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteVpnConnectionRouteOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteVpnConnectionRouteOutput) GoString() string { + return s.String() +} + +// Contains the parameters for DeleteVpnGateway. +type DeleteVpnGatewayInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the virtual private gateway. + VpnGatewayId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteVpnGatewayInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteVpnGatewayInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteVpnGatewayInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteVpnGatewayInput"} + if s.VpnGatewayId == nil { + invalidParams.Add(request.NewErrParamRequired("VpnGatewayId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteVpnGatewayOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteVpnGatewayOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteVpnGatewayOutput) GoString() string { + return s.String() +} + +// Contains the parameters for DeregisterImage. +type DeregisterImageInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the AMI. + ImageId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeregisterImageInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeregisterImageInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeregisterImageInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeregisterImageInput"} + if s.ImageId == nil { + invalidParams.Add(request.NewErrParamRequired("ImageId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeregisterImageOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeregisterImageOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeregisterImageOutput) GoString() string { + return s.String() +} + +// Contains the parameters for DescribeAccountAttributes. +type DescribeAccountAttributesInput struct { + _ struct{} `type:"structure"` + + // One or more account attribute names. + AttributeNames []*string `locationName:"attributeName" locationNameList:"attributeName" type:"list"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` +} + +// String returns the string representation +func (s DescribeAccountAttributesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAccountAttributesInput) GoString() string { + return s.String() +} + +// Contains the output of DescribeAccountAttributes. +type DescribeAccountAttributesOutput struct { + _ struct{} `type:"structure"` + + // Information about one or more account attributes. + AccountAttributes []*AccountAttribute `locationName:"accountAttributeSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeAccountAttributesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAccountAttributesOutput) GoString() string { + return s.String() +} + +// Contains the parameters for DescribeAddresses. +type DescribeAddressesInput struct { + _ struct{} `type:"structure"` + + // [EC2-VPC] One or more allocation IDs. + // + // Default: Describes all your Elastic IP addresses. + AllocationIds []*string `locationName:"AllocationId" locationNameList:"AllocationId" type:"list"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // One or more filters. Filter names and values are case-sensitive. + // + // allocation-id - [EC2-VPC] The allocation ID for the address. + // + // association-id - [EC2-VPC] The association ID for the address. + // + // domain - Indicates whether the address is for use in EC2-Classic (standard) + // or in a VPC (vpc). + // + // instance-id - The ID of the instance the address is associated with, + // if any. + // + // network-interface-id - [EC2-VPC] The ID of the network interface that + // the address is associated with, if any. + // + // network-interface-owner-id - The AWS account ID of the owner. + // + // private-ip-address - [EC2-VPC] The private IP address associated with + // the Elastic IP address. + // + // public-ip - The Elastic IP address. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // [EC2-Classic] One or more Elastic IP addresses. + // + // Default: Describes all your Elastic IP addresses. + PublicIps []*string `locationName:"PublicIp" locationNameList:"PublicIp" type:"list"` +} + +// String returns the string representation +func (s DescribeAddressesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAddressesInput) GoString() string { + return s.String() +} + +// Contains the output of DescribeAddresses. +type DescribeAddressesOutput struct { + _ struct{} `type:"structure"` + + // Information about one or more Elastic IP addresses. + Addresses []*Address `locationName:"addressesSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeAddressesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAddressesOutput) GoString() string { + return s.String() +} + +// Contains the parameters for DescribeAvailabilityZones. +type DescribeAvailabilityZonesInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // One or more filters. + // + // message - Information about the Availability Zone. + // + // region-name - The name of the region for the Availability Zone (for example, + // us-east-1). + // + // state - The state of the Availability Zone (available | information | + // impaired | unavailable). + // + // zone-name - The name of the Availability Zone (for example, us-east-1a). + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // The names of one or more Availability Zones. + ZoneNames []*string `locationName:"ZoneName" locationNameList:"ZoneName" type:"list"` +} + +// String returns the string representation +func (s DescribeAvailabilityZonesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAvailabilityZonesInput) GoString() string { + return s.String() +} + +// Contains the output of DescribeAvailabiltyZones. +type DescribeAvailabilityZonesOutput struct { + _ struct{} `type:"structure"` + + // Information about one or more Availability Zones. + AvailabilityZones []*AvailabilityZone `locationName:"availabilityZoneInfo" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeAvailabilityZonesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAvailabilityZonesOutput) GoString() string { + return s.String() +} + +// Contains the parameters for DescribeBundleTasks. +type DescribeBundleTasksInput struct { + _ struct{} `type:"structure"` + + // One or more bundle task IDs. + // + // Default: Describes all your bundle tasks. + BundleIds []*string `locationName:"BundleId" locationNameList:"BundleId" type:"list"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // One or more filters. + // + // bundle-id - The ID of the bundle task. + // + // error-code - If the task failed, the error code returned. + // + // error-message - If the task failed, the error message returned. + // + // instance-id - The ID of the instance. + // + // progress - The level of task completion, as a percentage (for example, + // 20%). + // + // s3-bucket - The Amazon S3 bucket to store the AMI. + // + // s3-prefix - The beginning of the AMI name. + // + // start-time - The time the task started (for example, 2013-09-15T17:15:20.000Z). + // + // state - The state of the task (pending | waiting-for-shutdown | bundling + // | storing | cancelling | complete | failed). + // + // update-time - The time of the most recent update for the task. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` +} + +// String returns the string representation +func (s DescribeBundleTasksInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeBundleTasksInput) GoString() string { + return s.String() +} + +// Contains the output of DescribeBundleTasks. +type DescribeBundleTasksOutput struct { + _ struct{} `type:"structure"` + + // Information about one or more bundle tasks. + BundleTasks []*BundleTask `locationName:"bundleInstanceTasksSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeBundleTasksOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeBundleTasksOutput) GoString() string { + return s.String() +} + +// Contains the parameters for DescribeClassicLinkInstances. +type DescribeClassicLinkInstancesInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // One or more filters. + // + // group-id - The ID of a VPC security group that's associated with the + // instance. + // + // instance-id - The ID of the instance. + // + // tag:key=value - The key/value combination of a tag assigned to the resource. + // + // tag-key - The key of a tag assigned to the resource. This filter is independent + // of the tag-value filter. For example, if you use both the filter "tag-key=Purpose" + // and the filter "tag-value=X", you get any resources assigned both the tag + // key Purpose (regardless of what the tag's value is), and the tag value X + // (regardless of what the tag's key is). If you want to list only resources + // where Purpose is X, see the tag:key=value filter. + // + // tag-value - The value of a tag assigned to the resource. This filter + // is independent of the tag-key filter. + // + // vpc-id - The ID of the VPC that the instance is linked to. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // One or more instance IDs. Must be instances linked to a VPC through ClassicLink. + InstanceIds []*string `locationName:"InstanceId" locationNameList:"InstanceId" type:"list"` + + // The maximum number of results to return for the request in a single page. + // The remaining results of the initial request can be seen by sending another + // request with the returned NextToken value. This value can be between 5 and + // 1000; if MaxResults is given a value larger than 1000, only 1000 results + // are returned. You cannot specify this parameter and the instance IDs parameter + // in the same request. + // + // Constraint: If the value is greater than 1000, we return only 1000 items. + MaxResults *int64 `locationName:"maxResults" type:"integer"` + + // The token to retrieve the next page of results. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s DescribeClassicLinkInstancesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeClassicLinkInstancesInput) GoString() string { + return s.String() +} + +// Contains the output of DescribeClassicLinkInstances. +type DescribeClassicLinkInstancesOutput struct { + _ struct{} `type:"structure"` + + // Information about one or more linked EC2-Classic instances. + Instances []*ClassicLinkInstance `locationName:"instancesSet" locationNameList:"item" type:"list"` + + // The token to use to retrieve the next page of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s DescribeClassicLinkInstancesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeClassicLinkInstancesOutput) GoString() string { + return s.String() +} + +// Contains the parameters for DescribeConversionTasks. +type DescribeConversionTasksInput struct { + _ struct{} `type:"structure"` + + // One or more conversion task IDs. + ConversionTaskIds []*string `locationName:"conversionTaskId" locationNameList:"item" type:"list"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // One or more filters. + Filters []*Filter `locationName:"filter" locationNameList:"Filter" type:"list"` +} + +// String returns the string representation +func (s DescribeConversionTasksInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeConversionTasksInput) GoString() string { + return s.String() +} + +// Contains the output for DescribeConversionTasks. +type DescribeConversionTasksOutput struct { + _ struct{} `type:"structure"` + + // Information about the conversion tasks. + ConversionTasks []*ConversionTask `locationName:"conversionTasks" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeConversionTasksOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeConversionTasksOutput) GoString() string { + return s.String() +} + +// Contains the parameters for DescribeCustomerGateways. +type DescribeCustomerGatewaysInput struct { + _ struct{} `type:"structure"` + + // One or more customer gateway IDs. + // + // Default: Describes all your customer gateways. + CustomerGatewayIds []*string `locationName:"CustomerGatewayId" locationNameList:"CustomerGatewayId" type:"list"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // One or more filters. + // + // bgp-asn - The customer gateway's Border Gateway Protocol (BGP) Autonomous + // System Number (ASN). + // + // customer-gateway-id - The ID of the customer gateway. + // + // ip-address - The IP address of the customer gateway's Internet-routable + // external interface. + // + // state - The state of the customer gateway (pending | available | deleting + // | deleted). + // + // type - The type of customer gateway. Currently, the only supported type + // is ipsec.1. + // + // tag:key=value - The key/value combination of a tag assigned to the resource. + // + // tag-key - The key of a tag assigned to the resource. This filter is independent + // of the tag-value filter. For example, if you use both the filter "tag-key=Purpose" + // and the filter "tag-value=X", you get any resources assigned both the tag + // key Purpose (regardless of what the tag's value is), and the tag value X + // (regardless of what the tag's key is). If you want to list only resources + // where Purpose is X, see the tag:key=value filter. + // + // tag-value - The value of a tag assigned to the resource. This filter + // is independent of the tag-key filter. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` +} + +// String returns the string representation +func (s DescribeCustomerGatewaysInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeCustomerGatewaysInput) GoString() string { + return s.String() +} + +// Contains the output of DescribeCustomerGateways. +type DescribeCustomerGatewaysOutput struct { + _ struct{} `type:"structure"` + + // Information about one or more customer gateways. + CustomerGateways []*CustomerGateway `locationName:"customerGatewaySet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeCustomerGatewaysOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeCustomerGatewaysOutput) GoString() string { + return s.String() +} + +// Contains the parameters for DescribeDhcpOptions. +type DescribeDhcpOptionsInput struct { + _ struct{} `type:"structure"` + + // The IDs of one or more DHCP options sets. + // + // Default: Describes all your DHCP options sets. + DhcpOptionsIds []*string `locationName:"DhcpOptionsId" locationNameList:"DhcpOptionsId" type:"list"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // One or more filters. + // + // dhcp-options-id - The ID of a set of DHCP options. + // + // key - The key for one of the options (for example, domain-name). + // + // value - The value for one of the options. + // + // tag:key=value - The key/value combination of a tag assigned to the resource. + // + // tag-key - The key of a tag assigned to the resource. This filter is independent + // of the tag-value filter. For example, if you use both the filter "tag-key=Purpose" + // and the filter "tag-value=X", you get any resources assigned both the tag + // key Purpose (regardless of what the tag's value is), and the tag value X + // (regardless of what the tag's key is). If you want to list only resources + // where Purpose is X, see the tag:key=value filter. + // + // tag-value - The value of a tag assigned to the resource. This filter + // is independent of the tag-key filter. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` +} + +// String returns the string representation +func (s DescribeDhcpOptionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDhcpOptionsInput) GoString() string { + return s.String() +} + +// Contains the output of DescribeDhcpOptions. +type DescribeDhcpOptionsOutput struct { + _ struct{} `type:"structure"` + + // Information about one or more DHCP options sets. + DhcpOptions []*DhcpOptions `locationName:"dhcpOptionsSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeDhcpOptionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDhcpOptionsOutput) GoString() string { + return s.String() +} + +// Contains the parameters for DescribeExportTasks. +type DescribeExportTasksInput struct { + _ struct{} `type:"structure"` + + // One or more export task IDs. + ExportTaskIds []*string `locationName:"exportTaskId" locationNameList:"ExportTaskId" type:"list"` +} + +// String returns the string representation +func (s DescribeExportTasksInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeExportTasksInput) GoString() string { + return s.String() +} + +// Contains the output for DescribeExportTasks. +type DescribeExportTasksOutput struct { + _ struct{} `type:"structure"` + + // Information about the export tasks. + ExportTasks []*ExportTask `locationName:"exportTaskSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeExportTasksOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeExportTasksOutput) GoString() string { + return s.String() +} + +// Contains the parameters for DescribeFlowLogs. +type DescribeFlowLogsInput struct { + _ struct{} `type:"structure"` + + // One or more filters. + // + // deliver-log-status - The status of the logs delivery (SUCCESS | FAILED). + // + // flow-log-id - The ID of the flow log. + // + // log-group-name - The name of the log group. + // + // resource-id - The ID of the VPC, subnet, or network interface. + // + // traffic-type - The type of traffic (ACCEPT | REJECT | ALL) + Filter []*Filter `locationNameList:"Filter" type:"list"` + + // One or more flow log IDs. + FlowLogIds []*string `locationName:"FlowLogId" locationNameList:"item" type:"list"` + + // The maximum number of results to return for the request in a single page. + // The remaining results can be seen by sending another request with the returned + // NextToken value. This value can be between 5 and 1000; if MaxResults is given + // a value larger than 1000, only 1000 results are returned. You cannot specify + // this parameter and the flow log IDs parameter in the same request. + MaxResults *int64 `type:"integer"` + + // The token to retrieve the next page of results. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeFlowLogsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeFlowLogsInput) GoString() string { + return s.String() +} + +// Contains the output of DescribeFlowLogs. +type DescribeFlowLogsOutput struct { + _ struct{} `type:"structure"` + + // Information about the flow logs. + FlowLogs []*FlowLog `locationName:"flowLogSet" locationNameList:"item" type:"list"` + + // The token to use to retrieve the next page of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s DescribeFlowLogsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeFlowLogsOutput) GoString() string { + return s.String() +} + +// Contains the parameters for DescribeHosts. +type DescribeHostsInput struct { + _ struct{} `type:"structure"` + + // One or more filters. + // + // instance-type - The instance type size that the Dedicated host is configured + // to support. + // + // auto-placement - Whether auto-placement is enabled or disabled (on | + // off). + // + // host-reservation-id - The ID of the reservation associated with this + // host. + // + // client-token - The idempotency token you provided when you launched the + // instance + // + // state- The allocation state of the Dedicated host (available | under-assessment + // | permanent-failure | released | released-permanent-failure). + // + // availability-zone - The Availability Zone of the host. + Filter []*Filter `locationName:"filter" locationNameList:"Filter" type:"list"` + + // The IDs of the Dedicated hosts. The IDs are used for targeted instance launches. + HostIds []*string `locationName:"hostId" locationNameList:"item" type:"list"` + + // The maximum number of results to return for the request in a single page. + // The remaining results can be seen by sending another request with the returned + // nextToken value. This value can be between 5 and 500; if maxResults is given + // a larger value than 500, you will receive an error. You cannot specify this + // parameter and the host IDs parameter in the same request. + MaxResults *int64 `locationName:"maxResults" type:"integer"` + + // The token to retrieve the next page of results. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s DescribeHostsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeHostsInput) GoString() string { + return s.String() +} + +// Contains the output of DescribeHosts. +type DescribeHostsOutput struct { + _ struct{} `type:"structure"` + + // Information about the Dedicated hosts. + Hosts []*Host `locationName:"hostSet" locationNameList:"item" type:"list"` + + // The token to use to retrieve the next page of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s DescribeHostsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeHostsOutput) GoString() string { + return s.String() +} + +// Contains the parameters for DescribeIdFormat. +type DescribeIdFormatInput struct { + _ struct{} `type:"structure"` + + // The type of resource. + Resource *string `type:"string"` +} + +// String returns the string representation +func (s DescribeIdFormatInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeIdFormatInput) GoString() string { + return s.String() +} + +// Contains the output of DescribeIdFormat. +type DescribeIdFormatOutput struct { + _ struct{} `type:"structure"` + + // Information about the ID format for the resource. + Statuses []*IdFormat `locationName:"statusSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeIdFormatOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeIdFormatOutput) GoString() string { + return s.String() +} + +// Contains the parameters for DescribeIdentityIdFormat. +type DescribeIdentityIdFormatInput struct { + _ struct{} `type:"structure"` + + // The ARN of the principal, which can be an IAM role, IAM user, or the root + // user. + PrincipalArn *string `locationName:"principalArn" type:"string" required:"true"` + + // The type of resource. + Resource *string `locationName:"resource" type:"string"` +} + +// String returns the string representation +func (s DescribeIdentityIdFormatInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeIdentityIdFormatInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeIdentityIdFormatInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeIdentityIdFormatInput"} + if s.PrincipalArn == nil { + invalidParams.Add(request.NewErrParamRequired("PrincipalArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the output of DescribeIdentityIdFormat. +type DescribeIdentityIdFormatOutput struct { + _ struct{} `type:"structure"` + + // Information about the ID format for the resources. + Statuses []*IdFormat `locationName:"statusSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeIdentityIdFormatOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeIdentityIdFormatOutput) GoString() string { + return s.String() +} + +// Contains the parameters for DescribeImageAttribute. +type DescribeImageAttributeInput struct { + _ struct{} `type:"structure"` + + // The AMI attribute. + // + // Note: Depending on your account privileges, the blockDeviceMapping attribute + // may return a Client.AuthFailure error. If this happens, use DescribeImages + // to get information about the block device mapping for the AMI. + Attribute *string `type:"string" required:"true" enum:"ImageAttributeName"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the AMI. + ImageId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeImageAttributeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeImageAttributeInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeImageAttributeInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeImageAttributeInput"} + if s.Attribute == nil { + invalidParams.Add(request.NewErrParamRequired("Attribute")) + } + if s.ImageId == nil { + invalidParams.Add(request.NewErrParamRequired("ImageId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Describes an image attribute. +type DescribeImageAttributeOutput struct { + _ struct{} `type:"structure"` + + // One or more block device mapping entries. + BlockDeviceMappings []*BlockDeviceMapping `locationName:"blockDeviceMapping" locationNameList:"item" type:"list"` + + // A description for the AMI. + Description *AttributeValue `locationName:"description" type:"structure"` + + // The ID of the AMI. + ImageId *string `locationName:"imageId" type:"string"` + + // The kernel ID. + KernelId *AttributeValue `locationName:"kernel" type:"structure"` + + // One or more launch permissions. + LaunchPermissions []*LaunchPermission `locationName:"launchPermission" locationNameList:"item" type:"list"` + + // One or more product codes. + ProductCodes []*ProductCode `locationName:"productCodes" locationNameList:"item" type:"list"` + + // The RAM disk ID. + RamdiskId *AttributeValue `locationName:"ramdisk" type:"structure"` + + // Indicates whether enhanced networking with the Intel 82599 Virtual Function + // interface is enabled. + SriovNetSupport *AttributeValue `locationName:"sriovNetSupport" type:"structure"` +} + +// String returns the string representation +func (s DescribeImageAttributeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeImageAttributeOutput) GoString() string { + return s.String() +} + +// Contains the parameters for DescribeImages. +type DescribeImagesInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // Scopes the images by users with explicit launch permissions. Specify an AWS + // account ID, self (the sender of the request), or all (public AMIs). + ExecutableUsers []*string `locationName:"ExecutableBy" locationNameList:"ExecutableBy" type:"list"` + + // One or more filters. + // + // architecture - The image architecture (i386 | x86_64). + // + // block-device-mapping.delete-on-termination - A Boolean value that indicates + // whether the Amazon EBS volume is deleted on instance termination. + // + // block-device-mapping.device-name - The device name for the EBS volume + // (for example, /dev/sdh). + // + // block-device-mapping.snapshot-id - The ID of the snapshot used for the + // EBS volume. + // + // block-device-mapping.volume-size - The volume size of the EBS volume, + // in GiB. + // + // block-device-mapping.volume-type - The volume type of the EBS volume + // (gp2 | io1 | st1 | sc1 | standard). + // + // description - The description of the image (provided during image creation). + // + // hypervisor - The hypervisor type (ovm | xen). + // + // image-id - The ID of the image. + // + // image-type - The image type (machine | kernel | ramdisk). + // + // is-public - A Boolean that indicates whether the image is public. + // + // kernel-id - The kernel ID. + // + // manifest-location - The location of the image manifest. + // + // name - The name of the AMI (provided during image creation). + // + // owner-alias - The AWS account alias (for example, amazon). + // + // owner-id - The AWS account ID of the image owner. + // + // platform - The platform. To only list Windows-based AMIs, use windows. + // + // product-code - The product code. + // + // product-code.type - The type of the product code (devpay | marketplace). + // + // ramdisk-id - The RAM disk ID. + // + // root-device-name - The name of the root device volume (for example, /dev/sda1). + // + // root-device-type - The type of the root device volume (ebs | instance-store). + // + // state - The state of the image (available | pending | failed). + // + // state-reason-code - The reason code for the state change. + // + // state-reason-message - The message for the state change. + // + // tag:key=value - The key/value combination of a tag assigned to the resource. + // + // tag-key - The key of a tag assigned to the resource. This filter is independent + // of the tag-value filter. For example, if you use both the filter "tag-key=Purpose" + // and the filter "tag-value=X", you get any resources assigned both the tag + // key Purpose (regardless of what the tag's value is), and the tag value X + // (regardless of what the tag's key is). If you want to list only resources + // where Purpose is X, see the tag:key=value filter. + // + // tag-value - The value of a tag assigned to the resource. This filter + // is independent of the tag-key filter. + // + // virtualization-type - The virtualization type (paravirtual | hvm). + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // One or more image IDs. + // + // Default: Describes all images available to you. + ImageIds []*string `locationName:"ImageId" locationNameList:"ImageId" type:"list"` + + // Filters the images by the owner. Specify an AWS account ID, amazon (owner + // is Amazon), aws-marketplace (owner is AWS Marketplace), self (owner is the + // sender of the request). Omitting this option returns all images for which + // you have launch permissions, regardless of ownership. + Owners []*string `locationName:"Owner" locationNameList:"Owner" type:"list"` +} + +// String returns the string representation +func (s DescribeImagesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeImagesInput) GoString() string { + return s.String() +} + +// Contains the output of DescribeImages. +type DescribeImagesOutput struct { + _ struct{} `type:"structure"` + + // Information about one or more images. + Images []*Image `locationName:"imagesSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeImagesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeImagesOutput) GoString() string { + return s.String() +} + +// Contains the parameters for DescribeImportImageTasks. +type DescribeImportImageTasksInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // Filter tasks using the task-state filter and one of the following values: + // active, completed, deleting, deleted. + Filters []*Filter `locationNameList:"Filter" type:"list"` + + // A list of import image task IDs. + ImportTaskIds []*string `locationName:"ImportTaskId" locationNameList:"ImportTaskId" type:"list"` + + // The maximum number of results to return in a single call. To retrieve the + // remaining results, make another call with the returned NextToken value. + MaxResults *int64 `type:"integer"` + + // A token that indicates the next page of results. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeImportImageTasksInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeImportImageTasksInput) GoString() string { + return s.String() +} + +// Contains the output for DescribeImportImageTasks. +type DescribeImportImageTasksOutput struct { + _ struct{} `type:"structure"` + + // A list of zero or more import image tasks that are currently active or were + // completed or canceled in the previous 7 days. + ImportImageTasks []*ImportImageTask `locationName:"importImageTaskSet" locationNameList:"item" type:"list"` + + // The token to use to get the next page of results. This value is null when + // there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s DescribeImportImageTasksOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeImportImageTasksOutput) GoString() string { + return s.String() +} + +// Contains the parameters for DescribeImportSnapshotTasks. +type DescribeImportSnapshotTasksInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // One or more filters. + Filters []*Filter `locationNameList:"Filter" type:"list"` + + // A list of import snapshot task IDs. + ImportTaskIds []*string `locationName:"ImportTaskId" locationNameList:"ImportTaskId" type:"list"` + + // The maximum number of results to return in a single call. To retrieve the + // remaining results, make another call with the returned NextToken value. + MaxResults *int64 `type:"integer"` + + // A token that indicates the next page of results. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeImportSnapshotTasksInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeImportSnapshotTasksInput) GoString() string { + return s.String() +} + +// Contains the output for DescribeImportSnapshotTasks. +type DescribeImportSnapshotTasksOutput struct { + _ struct{} `type:"structure"` + + // A list of zero or more import snapshot tasks that are currently active or + // were completed or canceled in the previous 7 days. + ImportSnapshotTasks []*ImportSnapshotTask `locationName:"importSnapshotTaskSet" locationNameList:"item" type:"list"` + + // The token to use to get the next page of results. This value is null when + // there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s DescribeImportSnapshotTasksOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeImportSnapshotTasksOutput) GoString() string { + return s.String() +} + +// Contains the parameters for DescribeInstanceAttribute. +type DescribeInstanceAttributeInput struct { + _ struct{} `type:"structure"` + + // The instance attribute. + // + // Note: The enaSupport attribute is not supported at this time. + Attribute *string `locationName:"attribute" type:"string" required:"true" enum:"InstanceAttributeName"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the instance. + InstanceId *string `locationName:"instanceId" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeInstanceAttributeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeInstanceAttributeInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeInstanceAttributeInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeInstanceAttributeInput"} + if s.Attribute == nil { + invalidParams.Add(request.NewErrParamRequired("Attribute")) + } + if s.InstanceId == nil { + invalidParams.Add(request.NewErrParamRequired("InstanceId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Describes an instance attribute. +type DescribeInstanceAttributeOutput struct { + _ struct{} `type:"structure"` + + // The block device mapping of the instance. + BlockDeviceMappings []*InstanceBlockDeviceMapping `locationName:"blockDeviceMapping" locationNameList:"item" type:"list"` + + // If the value is true, you can't terminate the instance through the Amazon + // EC2 console, CLI, or API; otherwise, you can. + DisableApiTermination *AttributeBooleanValue `locationName:"disableApiTermination" type:"structure"` + + // Indicates whether the instance is optimized for EBS I/O. + EbsOptimized *AttributeBooleanValue `locationName:"ebsOptimized" type:"structure"` + + // Indicates whether enhanced networking with ENA is enabled. + EnaSupport *AttributeBooleanValue `locationName:"enaSupport" type:"structure"` + + // The security groups associated with the instance. + Groups []*GroupIdentifier `locationName:"groupSet" locationNameList:"item" type:"list"` + + // The ID of the instance. + InstanceId *string `locationName:"instanceId" type:"string"` + + // Indicates whether an instance stops or terminates when you initiate shutdown + // from the instance (using the operating system command for system shutdown). + InstanceInitiatedShutdownBehavior *AttributeValue `locationName:"instanceInitiatedShutdownBehavior" type:"structure"` + + // The instance type. + InstanceType *AttributeValue `locationName:"instanceType" type:"structure"` + + // The kernel ID. + KernelId *AttributeValue `locationName:"kernel" type:"structure"` + + // A list of product codes. + ProductCodes []*ProductCode `locationName:"productCodes" locationNameList:"item" type:"list"` + + // The RAM disk ID. + RamdiskId *AttributeValue `locationName:"ramdisk" type:"structure"` + + // The name of the root device (for example, /dev/sda1 or /dev/xvda). + RootDeviceName *AttributeValue `locationName:"rootDeviceName" type:"structure"` + + // Indicates whether source/destination checking is enabled. A value of true + // means checking is enabled, and false means checking is disabled. This value + // must be false for a NAT instance to perform NAT. + SourceDestCheck *AttributeBooleanValue `locationName:"sourceDestCheck" type:"structure"` + + // Indicates whether enhanced networking with the Intel 82599 Virtual Function + // interface is enabled. + SriovNetSupport *AttributeValue `locationName:"sriovNetSupport" type:"structure"` + + // The user data. + UserData *AttributeValue `locationName:"userData" type:"structure"` +} + +// String returns the string representation +func (s DescribeInstanceAttributeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeInstanceAttributeOutput) GoString() string { + return s.String() +} + +// Contains the parameters for DescribeInstanceStatus. +type DescribeInstanceStatusInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // One or more filters. + // + // availability-zone - The Availability Zone of the instance. + // + // event.code - The code for the scheduled event (instance-reboot | system-reboot + // | system-maintenance | instance-retirement | instance-stop). + // + // event.description - A description of the event. + // + // event.not-after - The latest end time for the scheduled event (for example, + // 2014-09-15T17:15:20.000Z). + // + // event.not-before - The earliest start time for the scheduled event (for + // example, 2014-09-15T17:15:20.000Z). + // + // instance-state-code - The code for the instance state, as a 16-bit unsigned + // integer. The high byte is an opaque internal value and should be ignored. + // The low byte is set based on the state represented. The valid values are + // 0 (pending), 16 (running), 32 (shutting-down), 48 (terminated), 64 (stopping), + // and 80 (stopped). + // + // instance-state-name - The state of the instance (pending | running | + // shutting-down | terminated | stopping | stopped). + // + // instance-status.reachability - Filters on instance status where the name + // is reachability (passed | failed | initializing | insufficient-data). + // + // instance-status.status - The status of the instance (ok | impaired | + // initializing | insufficient-data | not-applicable). + // + // system-status.reachability - Filters on system status where the name + // is reachability (passed | failed | initializing | insufficient-data). + // + // system-status.status - The system status of the instance (ok | impaired + // | initializing | insufficient-data | not-applicable). + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // When true, includes the health status for all instances. When false, includes + // the health status for running instances only. + // + // Default: false + IncludeAllInstances *bool `locationName:"includeAllInstances" type:"boolean"` + + // One or more instance IDs. + // + // Default: Describes all your instances. + // + // Constraints: Maximum 100 explicitly specified instance IDs. + InstanceIds []*string `locationName:"InstanceId" locationNameList:"InstanceId" type:"list"` + + // The maximum number of results to return in a single call. To retrieve the + // remaining results, make another call with the returned NextToken value. This + // value can be between 5 and 1000. You cannot specify this parameter and the + // instance IDs parameter in the same call. + MaxResults *int64 `type:"integer"` + + // The token to retrieve the next page of results. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeInstanceStatusInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeInstanceStatusInput) GoString() string { + return s.String() +} + +// Contains the output of DescribeInstanceStatus. +type DescribeInstanceStatusOutput struct { + _ struct{} `type:"structure"` + + // One or more instance status descriptions. + InstanceStatuses []*InstanceStatus `locationName:"instanceStatusSet" locationNameList:"item" type:"list"` + + // The token to use to retrieve the next page of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s DescribeInstanceStatusOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeInstanceStatusOutput) GoString() string { + return s.String() +} + +// Contains the parameters for DescribeInstances. +type DescribeInstancesInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // One or more filters. + // + // affinity - The affinity setting for an instance running on a Dedicated + // host (default | host). + // + // architecture - The instance architecture (i386 | x86_64). + // + // availability-zone - The Availability Zone of the instance. + // + // block-device-mapping.attach-time - The attach time for an EBS volume + // mapped to the instance, for example, 2010-09-15T17:15:20.000Z. + // + // block-device-mapping.delete-on-termination - A Boolean that indicates + // whether the EBS volume is deleted on instance termination. + // + // block-device-mapping.device-name - The device name for the EBS volume + // (for example, /dev/sdh or xvdh). + // + // block-device-mapping.status - The status for the EBS volume (attaching + // | attached | detaching | detached). + // + // block-device-mapping.volume-id - The volume ID of the EBS volume. + // + // client-token - The idempotency token you provided when you launched the + // instance. + // + // dns-name - The public DNS name of the instance. + // + // group-id - The ID of the security group for the instance. EC2-Classic + // only. + // + // group-name - The name of the security group for the instance. EC2-Classic + // only. + // + // host-Id - The ID of the Dedicated host on which the instance is running, + // if applicable. + // + // hypervisor - The hypervisor type of the instance (ovm | xen). + // + // iam-instance-profile.arn - The instance profile associated with the instance. + // Specified as an ARN. + // + // image-id - The ID of the image used to launch the instance. + // + // instance-id - The ID of the instance. + // + // instance-lifecycle - Indicates whether this is a Spot Instance or a Scheduled + // Instance (spot | scheduled). + // + // instance-state-code - The state of the instance, as a 16-bit unsigned + // integer. The high byte is an opaque internal value and should be ignored. + // The low byte is set based on the state represented. The valid values are: + // 0 (pending), 16 (running), 32 (shutting-down), 48 (terminated), 64 (stopping), + // and 80 (stopped). + // + // instance-state-name - The state of the instance (pending | running | + // shutting-down | terminated | stopping | stopped). + // + // instance-type - The type of instance (for example, t2.micro). + // + // instance.group-id - The ID of the security group for the instance. + // + // instance.group-name - The name of the security group for the instance. + // + // ip-address - The public IP address of the instance. + // + // kernel-id - The kernel ID. + // + // key-name - The name of the key pair used when the instance was launched. + // + // launch-index - When launching multiple instances, this is the index for + // the instance in the launch group (for example, 0, 1, 2, and so on). + // + // launch-time - The time when the instance was launched. + // + // monitoring-state - Indicates whether monitoring is enabled for the instance + // (disabled | enabled). + // + // owner-id - The AWS account ID of the instance owner. + // + // placement-group-name - The name of the placement group for the instance. + // + // platform - The platform. Use windows if you have Windows instances; otherwise, + // leave blank. + // + // private-dns-name - The private DNS name of the instance. + // + // private-ip-address - The private IP address of the instance. + // + // product-code - The product code associated with the AMI used to launch + // the instance. + // + // product-code.type - The type of product code (devpay | marketplace). + // + // ramdisk-id - The RAM disk ID. + // + // reason - The reason for the current state of the instance (for example, + // shows "User Initiated [date]" when you stop or terminate the instance). Similar + // to the state-reason-code filter. + // + // requester-id - The ID of the entity that launched the instance on your + // behalf (for example, AWS Management Console, Auto Scaling, and so on). + // + // reservation-id - The ID of the instance's reservation. A reservation + // ID is created any time you launch an instance. A reservation ID has a one-to-one + // relationship with an instance launch request, but can be associated with + // more than one instance if you launch multiple instances using the same launch + // request. For example, if you launch one instance, you'll get one reservation + // ID. If you launch ten instances using the same launch request, you'll also + // get one reservation ID. + // + // root-device-name - The name of the root device for the instance (for + // example, /dev/sda1 or /dev/xvda). + // + // root-device-type - The type of root device that the instance uses (ebs + // | instance-store). + // + // source-dest-check - Indicates whether the instance performs source/destination + // checking. A value of true means that checking is enabled, and false means + // checking is disabled. The value must be false for the instance to perform + // network address translation (NAT) in your VPC. + // + // spot-instance-request-id - The ID of the Spot instance request. + // + // state-reason-code - The reason code for the state change. + // + // state-reason-message - A message that describes the state change. + // + // subnet-id - The ID of the subnet for the instance. + // + // tag:key=value - The key/value combination of a tag assigned to the resource, + // where tag:key is the tag's key. + // + // tag-key - The key of a tag assigned to the resource. This filter is independent + // of the tag-value filter. For example, if you use both the filter "tag-key=Purpose" + // and the filter "tag-value=X", you get any resources assigned both the tag + // key Purpose (regardless of what the tag's value is), and the tag value X + // (regardless of what the tag's key is). If you want to list only resources + // where Purpose is X, see the tag:key=value filter. + // + // tag-value - The value of a tag assigned to the resource. This filter + // is independent of the tag-key filter. + // + // tenancy - The tenancy of an instance (dedicated | default | host). + // + // virtualization-type - The virtualization type of the instance (paravirtual + // | hvm). + // + // vpc-id - The ID of the VPC that the instance is running in. + // + // network-interface.description - The description of the network interface. + // + // network-interface.subnet-id - The ID of the subnet for the network interface. + // + // network-interface.vpc-id - The ID of the VPC for the network interface. + // + // network-interface.network-interface-id - The ID of the network interface. + // + // network-interface.owner-id - The ID of the owner of the network interface. + // + // network-interface.availability-zone - The Availability Zone for the network + // interface. + // + // network-interface.requester-id - The requester ID for the network interface. + // + // network-interface.requester-managed - Indicates whether the network interface + // is being managed by AWS. + // + // network-interface.status - The status of the network interface (available) + // | in-use). + // + // network-interface.mac-address - The MAC address of the network interface. + // + // network-interface.private-dns-name - The private DNS name of the network + // interface. + // + // network-interface.source-dest-check - Whether the network interface performs + // source/destination checking. A value of true means checking is enabled, and + // false means checking is disabled. The value must be false for the network + // interface to perform network address translation (NAT) in your VPC. + // + // network-interface.group-id - The ID of a security group associated with + // the network interface. + // + // network-interface.group-name - The name of a security group associated + // with the network interface. + // + // network-interface.attachment.attachment-id - The ID of the interface + // attachment. + // + // network-interface.attachment.instance-id - The ID of the instance to + // which the network interface is attached. + // + // network-interface.attachment.instance-owner-id - The owner ID of the + // instance to which the network interface is attached. + // + // network-interface.addresses.private-ip-address - The private IP address + // associated with the network interface. + // + // network-interface.attachment.device-index - The device index to which + // the network interface is attached. + // + // network-interface.attachment.status - The status of the attachment (attaching + // | attached | detaching | detached). + // + // network-interface.attachment.attach-time - The time that the network + // interface was attached to an instance. + // + // network-interface.attachment.delete-on-termination - Specifies whether + // the attachment is deleted when an instance is terminated. + // + // network-interface.addresses.primary - Specifies whether the IP address + // of the network interface is the primary private IP address. + // + // network-interface.addresses.association.public-ip - The ID of the association + // of an Elastic IP address with a network interface. + // + // network-interface.addresses.association.ip-owner-id - The owner ID of + // the private IP address associated with the network interface. + // + // association.public-ip - The address of the Elastic IP address bound to + // the network interface. + // + // association.ip-owner-id - The owner of the Elastic IP address associated + // with the network interface. + // + // association.allocation-id - The allocation ID returned when you allocated + // the Elastic IP address for your network interface. + // + // association.association-id - The association ID returned when the network + // interface was associated with an IP address. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // One or more instance IDs. + // + // Default: Describes all your instances. + InstanceIds []*string `locationName:"InstanceId" locationNameList:"InstanceId" type:"list"` + + // The maximum number of results to return in a single call. To retrieve the + // remaining results, make another call with the returned NextToken value. This + // value can be between 5 and 1000. You cannot specify this parameter and the + // instance IDs parameter or tag filters in the same call. + MaxResults *int64 `locationName:"maxResults" type:"integer"` + + // The token to request the next page of results. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s DescribeInstancesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeInstancesInput) GoString() string { + return s.String() +} + +// Contains the output of DescribeInstances. +type DescribeInstancesOutput struct { + _ struct{} `type:"structure"` + + // The token to use to retrieve the next page of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` + + // Zero or more reservations. + Reservations []*Reservation `locationName:"reservationSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeInstancesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeInstancesOutput) GoString() string { + return s.String() +} + +// Contains the parameters for DescribeInternetGateways. +type DescribeInternetGatewaysInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // One or more filters. + // + // attachment.state - The current state of the attachment between the gateway + // and the VPC (available). Present only if a VPC is attached. + // + // attachment.vpc-id - The ID of an attached VPC. + // + // internet-gateway-id - The ID of the Internet gateway. + // + // tag:key=value - The key/value combination of a tag assigned to the resource. + // + // tag-key - The key of a tag assigned to the resource. This filter is independent + // of the tag-value filter. For example, if you use both the filter "tag-key=Purpose" + // and the filter "tag-value=X", you get any resources assigned both the tag + // key Purpose (regardless of what the tag's value is), and the tag value X + // (regardless of what the tag's key is). If you want to list only resources + // where Purpose is X, see the tag:key=value filter. + // + // tag-value - The value of a tag assigned to the resource. This filter + // is independent of the tag-key filter. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // One or more Internet gateway IDs. + // + // Default: Describes all your Internet gateways. + InternetGatewayIds []*string `locationName:"internetGatewayId" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeInternetGatewaysInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeInternetGatewaysInput) GoString() string { + return s.String() +} + +// Contains the output of DescribeInternetGateways. +type DescribeInternetGatewaysOutput struct { + _ struct{} `type:"structure"` + + // Information about one or more Internet gateways. + InternetGateways []*InternetGateway `locationName:"internetGatewaySet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeInternetGatewaysOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeInternetGatewaysOutput) GoString() string { + return s.String() +} + +// Contains the parameters for DescribeKeyPairs. +type DescribeKeyPairsInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // One or more filters. + // + // fingerprint - The fingerprint of the key pair. + // + // key-name - The name of the key pair. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // One or more key pair names. + // + // Default: Describes all your key pairs. + KeyNames []*string `locationName:"KeyName" locationNameList:"KeyName" type:"list"` +} + +// String returns the string representation +func (s DescribeKeyPairsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeKeyPairsInput) GoString() string { + return s.String() +} + +// Contains the output of DescribeKeyPairs. +type DescribeKeyPairsOutput struct { + _ struct{} `type:"structure"` + + // Information about one or more key pairs. + KeyPairs []*KeyPairInfo `locationName:"keySet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeKeyPairsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeKeyPairsOutput) GoString() string { + return s.String() +} + +// Contains the parameters for DescribeMovingAddresses. +type DescribeMovingAddressesInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // One or more filters. + // + // moving-status - The status of the Elastic IP address (MovingToVpc | RestoringToClassic). + Filters []*Filter `locationName:"filter" locationNameList:"Filter" type:"list"` + + // The maximum number of results to return for the request in a single page. + // The remaining results of the initial request can be seen by sending another + // request with the returned NextToken value. This value can be between 5 and + // 1000; if MaxResults is given a value outside of this range, an error is returned. + // + // Default: If no value is provided, the default is 1000. + MaxResults *int64 `locationName:"maxResults" type:"integer"` + + // The token to use to retrieve the next page of results. + NextToken *string `locationName:"nextToken" type:"string"` + + // One or more Elastic IP addresses. + PublicIps []*string `locationName:"publicIp" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeMovingAddressesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeMovingAddressesInput) GoString() string { + return s.String() +} + +// Contains the output of DescribeMovingAddresses. +type DescribeMovingAddressesOutput struct { + _ struct{} `type:"structure"` + + // The status for each Elastic IP address. + MovingAddressStatuses []*MovingAddressStatus `locationName:"movingAddressStatusSet" locationNameList:"item" type:"list"` + + // The token to use to retrieve the next page of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s DescribeMovingAddressesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeMovingAddressesOutput) GoString() string { + return s.String() +} + +// Contains the parameters for DescribeNatGateways. +type DescribeNatGatewaysInput struct { + _ struct{} `type:"structure"` + + // One or more filters. + // + // nat-gateway-id - The ID of the NAT gateway. + // + // state - The state of the NAT gateway (pending | failed | available | + // deleting | deleted). + // + // subnet-id - The ID of the subnet in which the NAT gateway resides. + // + // vpc-id - The ID of the VPC in which the NAT gateway resides. + Filter []*Filter `locationNameList:"Filter" type:"list"` + + // The maximum number of items to return for this request. The request returns + // a token that you can specify in a subsequent call to get the next set of + // results. + // + // Constraint: If the value specified is greater than 1000, we return only + // 1000 items. + MaxResults *int64 `type:"integer"` + + // One or more NAT gateway IDs. + NatGatewayIds []*string `locationName:"NatGatewayId" locationNameList:"item" type:"list"` + + // The token to retrieve the next page of results. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeNatGatewaysInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeNatGatewaysInput) GoString() string { + return s.String() +} + +// Contains the output of DescribeNatGateways. +type DescribeNatGatewaysOutput struct { + _ struct{} `type:"structure"` + + // Information about the NAT gateways. + NatGateways []*NatGateway `locationName:"natGatewaySet" locationNameList:"item" type:"list"` + + // The token to use to retrieve the next page of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s DescribeNatGatewaysOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeNatGatewaysOutput) GoString() string { + return s.String() +} + +// Contains the parameters for DescribeNetworkAcls. +type DescribeNetworkAclsInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // One or more filters. + // + // association.association-id - The ID of an association ID for the ACL. + // + // association.network-acl-id - The ID of the network ACL involved in the + // association. + // + // association.subnet-id - The ID of the subnet involved in the association. + // + // default - Indicates whether the ACL is the default network ACL for the + // VPC. + // + // entry.cidr - The CIDR range specified in the entry. + // + // entry.egress - Indicates whether the entry applies to egress traffic. + // + // entry.icmp.code - The ICMP code specified in the entry, if any. + // + // entry.icmp.type - The ICMP type specified in the entry, if any. + // + // entry.port-range.from - The start of the port range specified in the + // entry. + // + // entry.port-range.to - The end of the port range specified in the entry. + // + // entry.protocol - The protocol specified in the entry (tcp | udp | icmp + // or a protocol number). + // + // entry.rule-action - Allows or denies the matching traffic (allow | deny). + // + // entry.rule-number - The number of an entry (in other words, rule) in + // the ACL's set of entries. + // + // network-acl-id - The ID of the network ACL. + // + // tag:key=value - The key/value combination of a tag assigned to the resource. + // + // tag-key - The key of a tag assigned to the resource. This filter is independent + // of the tag-value filter. For example, if you use both the filter "tag-key=Purpose" + // and the filter "tag-value=X", you get any resources assigned both the tag + // key Purpose (regardless of what the tag's value is), and the tag value X + // (regardless of what the tag's key is). If you want to list only resources + // where Purpose is X, see the tag:key=value filter. + // + // tag-value - The value of a tag assigned to the resource. This filter + // is independent of the tag-key filter. + // + // vpc-id - The ID of the VPC for the network ACL. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // One or more network ACL IDs. + // + // Default: Describes all your network ACLs. + NetworkAclIds []*string `locationName:"NetworkAclId" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeNetworkAclsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeNetworkAclsInput) GoString() string { + return s.String() +} + +// Contains the output of DescribeNetworkAcls. +type DescribeNetworkAclsOutput struct { + _ struct{} `type:"structure"` + + // Information about one or more network ACLs. + NetworkAcls []*NetworkAcl `locationName:"networkAclSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeNetworkAclsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeNetworkAclsOutput) GoString() string { + return s.String() +} + +// Contains the parameters for DescribeNetworkInterfaceAttribute. +type DescribeNetworkInterfaceAttributeInput struct { + _ struct{} `type:"structure"` + + // The attribute of the network interface. + Attribute *string `locationName:"attribute" type:"string" enum:"NetworkInterfaceAttribute"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the network interface. + NetworkInterfaceId *string `locationName:"networkInterfaceId" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeNetworkInterfaceAttributeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeNetworkInterfaceAttributeInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeNetworkInterfaceAttributeInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeNetworkInterfaceAttributeInput"} + if s.NetworkInterfaceId == nil { + invalidParams.Add(request.NewErrParamRequired("NetworkInterfaceId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the output of DescribeNetworkInterfaceAttribute. +type DescribeNetworkInterfaceAttributeOutput struct { + _ struct{} `type:"structure"` + + // The attachment (if any) of the network interface. + Attachment *NetworkInterfaceAttachment `locationName:"attachment" type:"structure"` + + // The description of the network interface. + Description *AttributeValue `locationName:"description" type:"structure"` + + // The security groups associated with the network interface. + Groups []*GroupIdentifier `locationName:"groupSet" locationNameList:"item" type:"list"` + + // The ID of the network interface. + NetworkInterfaceId *string `locationName:"networkInterfaceId" type:"string"` + + // Indicates whether source/destination checking is enabled. + SourceDestCheck *AttributeBooleanValue `locationName:"sourceDestCheck" type:"structure"` +} + +// String returns the string representation +func (s DescribeNetworkInterfaceAttributeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeNetworkInterfaceAttributeOutput) GoString() string { + return s.String() +} + +// Contains the parameters for DescribeNetworkInterfaces. +type DescribeNetworkInterfacesInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // One or more filters. + // + // addresses.private-ip-address - The private IP addresses associated with + // the network interface. + // + // addresses.primary - Whether the private IP address is the primary IP + // address associated with the network interface. + // + // addresses.association.public-ip - The association ID returned when the + // network interface was associated with the Elastic IP address. + // + // addresses.association.owner-id - The owner ID of the addresses associated + // with the network interface. + // + // association.association-id - The association ID returned when the network + // interface was associated with an IP address. + // + // association.allocation-id - The allocation ID returned when you allocated + // the Elastic IP address for your network interface. + // + // association.ip-owner-id - The owner of the Elastic IP address associated + // with the network interface. + // + // association.public-ip - The address of the Elastic IP address bound to + // the network interface. + // + // association.public-dns-name - The public DNS name for the network interface. + // + // attachment.attachment-id - The ID of the interface attachment. + // + // attachment.attach.time - The time that the network interface was attached + // to an instance. + // + // attachment.delete-on-termination - Indicates whether the attachment is + // deleted when an instance is terminated. + // + // attachment.device-index - The device index to which the network interface + // is attached. + // + // attachment.instance-id - The ID of the instance to which the network + // interface is attached. + // + // attachment.instance-owner-id - The owner ID of the instance to which + // the network interface is attached. + // + // attachment.nat-gateway-id - The ID of the NAT gateway to which the network + // interface is attached. + // + // attachment.status - The status of the attachment (attaching | attached + // | detaching | detached). + // + // availability-zone - The Availability Zone of the network interface. + // + // description - The description of the network interface. + // + // group-id - The ID of a security group associated with the network interface. + // + // group-name - The name of a security group associated with the network + // interface. + // + // mac-address - The MAC address of the network interface. + // + // network-interface-id - The ID of the network interface. + // + // owner-id - The AWS account ID of the network interface owner. + // + // private-ip-address - The private IP address or addresses of the network + // interface. + // + // private-dns-name - The private DNS name of the network interface. + // + // requester-id - The ID of the entity that launched the instance on your + // behalf (for example, AWS Management Console, Auto Scaling, and so on). + // + // requester-managed - Indicates whether the network interface is being + // managed by an AWS service (for example, AWS Management Console, Auto Scaling, + // and so on). + // + // source-desk-check - Indicates whether the network interface performs + // source/destination checking. A value of true means checking is enabled, and + // false means checking is disabled. The value must be false for the network + // interface to perform network address translation (NAT) in your VPC. + // + // status - The status of the network interface. If the network interface + // is not attached to an instance, the status is available; if a network interface + // is attached to an instance the status is in-use. + // + // subnet-id - The ID of the subnet for the network interface. + // + // tag:key=value - The key/value combination of a tag assigned to the resource. + // + // tag-key - The key of a tag assigned to the resource. This filter is independent + // of the tag-value filter. For example, if you use both the filter "tag-key=Purpose" + // and the filter "tag-value=X", you get any resources assigned both the tag + // key Purpose (regardless of what the tag's value is), and the tag value X + // (regardless of what the tag's key is). If you want to list only resources + // where Purpose is X, see the tag:key=value filter. + // + // tag-value - The value of a tag assigned to the resource. This filter + // is independent of the tag-key filter. + // + // vpc-id - The ID of the VPC for the network interface. + Filters []*Filter `locationName:"filter" locationNameList:"Filter" type:"list"` + + // One or more network interface IDs. + // + // Default: Describes all your network interfaces. + NetworkInterfaceIds []*string `locationName:"NetworkInterfaceId" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeNetworkInterfacesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeNetworkInterfacesInput) GoString() string { + return s.String() +} + +// Contains the output of DescribeNetworkInterfaces. +type DescribeNetworkInterfacesOutput struct { + _ struct{} `type:"structure"` + + // Information about one or more network interfaces. + NetworkInterfaces []*NetworkInterface `locationName:"networkInterfaceSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeNetworkInterfacesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeNetworkInterfacesOutput) GoString() string { + return s.String() +} + +// Contains the parameters for DescribePlacementGroups. +type DescribePlacementGroupsInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // One or more filters. + // + // group-name - The name of the placement group. + // + // state - The state of the placement group (pending | available | deleting + // | deleted). + // + // strategy - The strategy of the placement group (cluster). + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // One or more placement group names. + // + // Default: Describes all your placement groups, or only those otherwise specified. + GroupNames []*string `locationName:"groupName" type:"list"` +} + +// String returns the string representation +func (s DescribePlacementGroupsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribePlacementGroupsInput) GoString() string { + return s.String() +} + +// Contains the output of DescribePlacementGroups. +type DescribePlacementGroupsOutput struct { + _ struct{} `type:"structure"` + + // One or more placement groups. + PlacementGroups []*PlacementGroup `locationName:"placementGroupSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribePlacementGroupsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribePlacementGroupsOutput) GoString() string { + return s.String() +} + +// Contains the parameters for DescribePrefixLists. +type DescribePrefixListsInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // One or more filters. + // + // prefix-list-id: The ID of a prefix list. + // + // prefix-list-name: The name of a prefix list. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // The maximum number of items to return for this request. The request returns + // a token that you can specify in a subsequent call to get the next set of + // results. + // + // Constraint: If the value specified is greater than 1000, we return only + // 1000 items. + MaxResults *int64 `type:"integer"` + + // The token for the next set of items to return. (You received this token from + // a prior call.) + NextToken *string `type:"string"` + + // One or more prefix list IDs. + PrefixListIds []*string `locationName:"PrefixListId" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribePrefixListsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribePrefixListsInput) GoString() string { + return s.String() +} + +// Contains the output of DescribePrefixLists. +type DescribePrefixListsOutput struct { + _ struct{} `type:"structure"` + + // The token to use when requesting the next set of items. If there are no additional + // items to return, the string is empty. + NextToken *string `locationName:"nextToken" type:"string"` + + // All available prefix lists. + PrefixLists []*PrefixList `locationName:"prefixListSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribePrefixListsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribePrefixListsOutput) GoString() string { + return s.String() +} + +// Contains the parameters for DescribeRegions. +type DescribeRegionsInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // One or more filters. + // + // endpoint - The endpoint of the region (for example, ec2.us-east-1.amazonaws.com). + // + // region-name - The name of the region (for example, us-east-1). + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // The names of one or more regions. + RegionNames []*string `locationName:"RegionName" locationNameList:"RegionName" type:"list"` +} + +// String returns the string representation +func (s DescribeRegionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeRegionsInput) GoString() string { + return s.String() +} + +// Contains the output of DescribeRegions. +type DescribeRegionsOutput struct { + _ struct{} `type:"structure"` + + // Information about one or more regions. + Regions []*Region `locationName:"regionInfo" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeRegionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeRegionsOutput) GoString() string { + return s.String() +} + +// Contains the parameters for DescribeReservedInstances. +type DescribeReservedInstancesInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // One or more filters. + // + // availability-zone - The Availability Zone where the Reserved Instance + // can be used. + // + // duration - The duration of the Reserved Instance (one year or three years), + // in seconds (31536000 | 94608000). + // + // end - The time when the Reserved Instance expires (for example, 2015-08-07T11:54:42.000Z). + // + // fixed-price - The purchase price of the Reserved Instance (for example, + // 9800.0). + // + // instance-type - The instance type that is covered by the reservation. + // + // product-description - The Reserved Instance product platform description. + // Instances that include (Amazon VPC) in the product platform description will + // only be displayed to EC2-Classic account holders and are for use with Amazon + // VPC (Linux/UNIX | Linux/UNIX (Amazon VPC) | SUSE Linux | SUSE Linux (Amazon + // VPC) | Red Hat Enterprise Linux | Red Hat Enterprise Linux (Amazon VPC) | + // Windows | Windows (Amazon VPC) | Windows with SQL Server Standard | Windows + // with SQL Server Standard (Amazon VPC) | Windows with SQL Server Web | Windows + // with SQL Server Web (Amazon VPC) | Windows with SQL Server Enterprise | Windows + // with SQL Server Enterprise (Amazon VPC)). + // + // reserved-instances-id - The ID of the Reserved Instance. + // + // start - The time at which the Reserved Instance purchase request was + // placed (for example, 2014-08-07T11:54:42.000Z). + // + // state - The state of the Reserved Instance (payment-pending | active + // | payment-failed | retired). + // + // tag:key=value - The key/value combination of a tag assigned to the resource. + // + // tag-key - The key of a tag assigned to the resource. This filter is independent + // of the tag-value filter. For example, if you use both the filter "tag-key=Purpose" + // and the filter "tag-value=X", you get any resources assigned both the tag + // key Purpose (regardless of what the tag's value is), and the tag value X + // (regardless of what the tag's key is). If you want to list only resources + // where Purpose is X, see the tag:key=value filter. + // + // tag-value - The value of a tag assigned to the resource. This filter + // is independent of the tag-key filter. + // + // usage-price - The usage price of the Reserved Instance, per hour (for + // example, 0.84). + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // The Reserved Instance offering type. If you are using tools that predate + // the 2011-11-01 API version, you only have access to the Medium Utilization + // Reserved Instance offering type. + OfferingType *string `locationName:"offeringType" type:"string" enum:"OfferingTypeValues"` + + // One or more Reserved Instance IDs. + // + // Default: Describes all your Reserved Instances, or only those otherwise + // specified. + ReservedInstancesIds []*string `locationName:"ReservedInstancesId" locationNameList:"ReservedInstancesId" type:"list"` +} + +// String returns the string representation +func (s DescribeReservedInstancesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeReservedInstancesInput) GoString() string { + return s.String() +} + +// Contains the parameters for DescribeReservedInstancesListings. +type DescribeReservedInstancesListingsInput struct { + _ struct{} `type:"structure"` + + // One or more filters. + // + // reserved-instances-id - The ID of the Reserved Instances. + // + // reserved-instances-listing-id - The ID of the Reserved Instances listing. + // + // status - The status of the Reserved Instance listing (pending | active + // | cancelled | closed). + // + // status-message - The reason for the status. + Filters []*Filter `locationName:"filters" locationNameList:"Filter" type:"list"` + + // One or more Reserved Instance IDs. + ReservedInstancesId *string `locationName:"reservedInstancesId" type:"string"` + + // One or more Reserved Instance listing IDs. + ReservedInstancesListingId *string `locationName:"reservedInstancesListingId" type:"string"` +} + +// String returns the string representation +func (s DescribeReservedInstancesListingsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeReservedInstancesListingsInput) GoString() string { + return s.String() +} + +// Contains the output of DescribeReservedInstancesListings. +type DescribeReservedInstancesListingsOutput struct { + _ struct{} `type:"structure"` + + // Information about the Reserved Instance listing. + ReservedInstancesListings []*ReservedInstancesListing `locationName:"reservedInstancesListingsSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeReservedInstancesListingsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeReservedInstancesListingsOutput) GoString() string { + return s.String() +} + +// Contains the parameters for DescribeReservedInstancesModifications. +type DescribeReservedInstancesModificationsInput struct { + _ struct{} `type:"structure"` + + // One or more filters. + // + // client-token - The idempotency token for the modification request. + // + // create-date - The time when the modification request was created. + // + // effective-date - The time when the modification becomes effective. + // + // modification-result.reserved-instances-id - The ID for the Reserved Instances + // created as part of the modification request. This ID is only available when + // the status of the modification is fulfilled. + // + // modification-result.target-configuration.availability-zone - The Availability + // Zone for the new Reserved Instances. + // + // modification-result.target-configuration.instance-count - The number + // of new Reserved Instances. + // + // modification-result.target-configuration.instance-type - The instance + // type of the new Reserved Instances. + // + // modification-result.target-configuration.platform - The network platform + // of the new Reserved Instances (EC2-Classic | EC2-VPC). + // + // reserved-instances-id - The ID of the Reserved Instances modified. + // + // reserved-instances-modification-id - The ID of the modification request. + // + // status - The status of the Reserved Instances modification request (processing + // | fulfilled | failed). + // + // status-message - The reason for the status. + // + // update-date - The time when the modification request was last updated. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // The token to retrieve the next page of results. + NextToken *string `locationName:"nextToken" type:"string"` + + // IDs for the submitted modification request. + ReservedInstancesModificationIds []*string `locationName:"ReservedInstancesModificationId" locationNameList:"ReservedInstancesModificationId" type:"list"` +} + +// String returns the string representation +func (s DescribeReservedInstancesModificationsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeReservedInstancesModificationsInput) GoString() string { + return s.String() +} + +// Contains the output of DescribeReservedInstancesModifications. +type DescribeReservedInstancesModificationsOutput struct { + _ struct{} `type:"structure"` + + // The token to use to retrieve the next page of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` + + // The Reserved Instance modification information. + ReservedInstancesModifications []*ReservedInstancesModification `locationName:"reservedInstancesModificationsSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeReservedInstancesModificationsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeReservedInstancesModificationsOutput) GoString() string { + return s.String() +} + +// Contains the parameters for DescribeReservedInstancesOfferings. +type DescribeReservedInstancesOfferingsInput struct { + _ struct{} `type:"structure"` + + // The Availability Zone in which the Reserved Instance can be used. + AvailabilityZone *string `type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // One or more filters. + // + // availability-zone - The Availability Zone where the Reserved Instance + // can be used. + // + // duration - The duration of the Reserved Instance (for example, one year + // or three years), in seconds (31536000 | 94608000). + // + // fixed-price - The purchase price of the Reserved Instance (for example, + // 9800.0). + // + // instance-type - The instance type that is covered by the reservation. + // + // marketplace - Set to true to show only Reserved Instance Marketplace + // offerings. When this filter is not used, which is the default behavior, all + // offerings from both AWS and the Reserved Instance Marketplace are listed. + // + // product-description - The Reserved Instance product platform description. + // Instances that include (Amazon VPC) in the product platform description will + // only be displayed to EC2-Classic account holders and are for use with Amazon + // VPC. (Linux/UNIX | Linux/UNIX (Amazon VPC) | SUSE Linux | SUSE Linux (Amazon + // VPC) | Red Hat Enterprise Linux | Red Hat Enterprise Linux (Amazon VPC) | + // Windows | Windows (Amazon VPC) | Windows with SQL Server Standard | Windows + // with SQL Server Standard (Amazon VPC) | Windows with SQL Server Web | Windows + // with SQL Server Web (Amazon VPC) | Windows with SQL Server Enterprise | Windows + // with SQL Server Enterprise (Amazon VPC)) + // + // reserved-instances-offering-id - The Reserved Instances offering ID. + // + // usage-price - The usage price of the Reserved Instance, per hour (for + // example, 0.84). + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // Include Reserved Instance Marketplace offerings in the response. + IncludeMarketplace *bool `type:"boolean"` + + // The tenancy of the instances covered by the reservation. A Reserved Instance + // with a tenancy of dedicated is applied to instances that run in a VPC on + // single-tenant hardware (i.e., Dedicated Instances). + // + // Default: default + InstanceTenancy *string `locationName:"instanceTenancy" type:"string" enum:"Tenancy"` + + // The instance type that the reservation will cover (for example, m1.small). + // For more information, see Instance Types (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html) + // in the Amazon Elastic Compute Cloud User Guide. + InstanceType *string `type:"string" enum:"InstanceType"` + + // The maximum duration (in seconds) to filter when searching for offerings. + // + // Default: 94608000 (3 years) + MaxDuration *int64 `type:"long"` + + // The maximum number of instances to filter when searching for offerings. + // + // Default: 20 + MaxInstanceCount *int64 `type:"integer"` + + // The maximum number of results to return for the request in a single page. + // The remaining results of the initial request can be seen by sending another + // request with the returned NextToken value. The maximum is 100. + // + // Default: 100 + MaxResults *int64 `locationName:"maxResults" type:"integer"` + + // The minimum duration (in seconds) to filter when searching for offerings. + // + // Default: 2592000 (1 month) + MinDuration *int64 `type:"long"` + + // The token to retrieve the next page of results. + NextToken *string `locationName:"nextToken" type:"string"` + + // The Reserved Instance offering type. If you are using tools that predate + // the 2011-11-01 API version, you only have access to the Medium Utilization + // Reserved Instance offering type. + OfferingType *string `locationName:"offeringType" type:"string" enum:"OfferingTypeValues"` + + // The Reserved Instance product platform description. Instances that include + // (Amazon VPC) in the description are for use with Amazon VPC. + ProductDescription *string `type:"string" enum:"RIProductDescription"` + + // One or more Reserved Instances offering IDs. + ReservedInstancesOfferingIds []*string `locationName:"ReservedInstancesOfferingId" type:"list"` +} + +// String returns the string representation +func (s DescribeReservedInstancesOfferingsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeReservedInstancesOfferingsInput) GoString() string { + return s.String() +} + +// Contains the output of DescribeReservedInstancesOfferings. +type DescribeReservedInstancesOfferingsOutput struct { + _ struct{} `type:"structure"` + + // The token to use to retrieve the next page of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` + + // A list of Reserved Instances offerings. + ReservedInstancesOfferings []*ReservedInstancesOffering `locationName:"reservedInstancesOfferingsSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeReservedInstancesOfferingsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeReservedInstancesOfferingsOutput) GoString() string { + return s.String() +} + +// Contains the output for DescribeReservedInstances. +type DescribeReservedInstancesOutput struct { + _ struct{} `type:"structure"` + + // A list of Reserved Instances. + ReservedInstances []*ReservedInstances `locationName:"reservedInstancesSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeReservedInstancesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeReservedInstancesOutput) GoString() string { + return s.String() +} + +// Contains the parameters for DescribeRouteTables. +type DescribeRouteTablesInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // One or more filters. + // + // association.route-table-association-id - The ID of an association ID + // for the route table. + // + // association.route-table-id - The ID of the route table involved in the + // association. + // + // association.subnet-id - The ID of the subnet involved in the association. + // + // association.main - Indicates whether the route table is the main route + // table for the VPC (true | false). + // + // route-table-id - The ID of the route table. + // + // route.destination-cidr-block - The CIDR range specified in a route in + // the table. + // + // route.destination-prefix-list-id - The ID (prefix) of the AWS service + // specified in a route in the table. + // + // route.gateway-id - The ID of a gateway specified in a route in the table. + // + // route.instance-id - The ID of an instance specified in a route in the + // table. + // + // route.nat-gateway-id - The ID of a NAT gateway. + // + // route.origin - Describes how the route was created. CreateRouteTable + // indicates that the route was automatically created when the route table was + // created; CreateRoute indicates that the route was manually added to the route + // table; EnableVgwRoutePropagation indicates that the route was propagated + // by route propagation. + // + // route.state - The state of a route in the route table (active | blackhole). + // The blackhole state indicates that the route's target isn't available (for + // example, the specified gateway isn't attached to the VPC, the specified NAT + // instance has been terminated, and so on). + // + // route.vpc-peering-connection-id - The ID of a VPC peering connection + // specified in a route in the table. + // + // tag:key=value - The key/value combination of a tag assigned to the resource. + // + // tag-key - The key of a tag assigned to the resource. This filter is independent + // of the tag-value filter. For example, if you use both the filter "tag-key=Purpose" + // and the filter "tag-value=X", you get any resources assigned both the tag + // key Purpose (regardless of what the tag's value is), and the tag value X + // (regardless of what the tag's key is). If you want to list only resources + // where Purpose is X, see the tag:key=value filter. + // + // tag-value - The value of a tag assigned to the resource. This filter + // is independent of the tag-key filter. + // + // vpc-id - The ID of the VPC for the route table. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // One or more route table IDs. + // + // Default: Describes all your route tables. + RouteTableIds []*string `locationName:"RouteTableId" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeRouteTablesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeRouteTablesInput) GoString() string { + return s.String() +} + +// Contains the output of DescribeRouteTables. +type DescribeRouteTablesOutput struct { + _ struct{} `type:"structure"` + + // Information about one or more route tables. + RouteTables []*RouteTable `locationName:"routeTableSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeRouteTablesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeRouteTablesOutput) GoString() string { + return s.String() +} + +// Contains the parameters for DescribeScheduledInstanceAvailability. +type DescribeScheduledInstanceAvailabilityInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // One or more filters. + // + // availability-zone - The Availability Zone (for example, us-west-2a). + // + // instance-type - The instance type (for example, c4.large). + // + // network-platform - The network platform (EC2-Classic or EC2-VPC). + // + // platform - The platform (Linux/UNIX or Windows). + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // The time period for the first schedule to start. + FirstSlotStartTimeRange *SlotDateTimeRangeRequest `type:"structure" required:"true"` + + // The maximum number of results to return in a single call. This value can + // be between 5 and 300. The default value is 300. To retrieve the remaining + // results, make another call with the returned NextToken value. + MaxResults *int64 `type:"integer"` + + // The maximum available duration, in hours. This value must be greater than + // MinSlotDurationInHours and less than 1,720. + MaxSlotDurationInHours *int64 `type:"integer"` + + // The minimum available duration, in hours. The minimum required duration is + // 1,200 hours per year. For example, the minimum daily schedule is 4 hours, + // the minimum weekly schedule is 24 hours, and the minimum monthly schedule + // is 100 hours. + MinSlotDurationInHours *int64 `type:"integer"` + + // The token for the next set of results. + NextToken *string `type:"string"` + + // The schedule recurrence. + Recurrence *ScheduledInstanceRecurrenceRequest `type:"structure" required:"true"` +} + +// String returns the string representation +func (s DescribeScheduledInstanceAvailabilityInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeScheduledInstanceAvailabilityInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeScheduledInstanceAvailabilityInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeScheduledInstanceAvailabilityInput"} + if s.FirstSlotStartTimeRange == nil { + invalidParams.Add(request.NewErrParamRequired("FirstSlotStartTimeRange")) + } + if s.Recurrence == nil { + invalidParams.Add(request.NewErrParamRequired("Recurrence")) + } + if s.FirstSlotStartTimeRange != nil { + if err := s.FirstSlotStartTimeRange.Validate(); err != nil { + invalidParams.AddNested("FirstSlotStartTimeRange", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the output of DescribeScheduledInstanceAvailability. +type DescribeScheduledInstanceAvailabilityOutput struct { + _ struct{} `type:"structure"` + + // The token required to retrieve the next set of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` + + // Information about the available Scheduled Instances. + ScheduledInstanceAvailabilitySet []*ScheduledInstanceAvailability `locationName:"scheduledInstanceAvailabilitySet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeScheduledInstanceAvailabilityOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeScheduledInstanceAvailabilityOutput) GoString() string { + return s.String() +} + +// Contains the parameters for DescribeScheduledInstances. +type DescribeScheduledInstancesInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // One or more filters. + // + // availability-zone - The Availability Zone (for example, us-west-2a). + // + // instance-type - The instance type (for example, c4.large). + // + // network-platform - The network platform (EC2-Classic or EC2-VPC). + // + // platform - The platform (Linux/UNIX or Windows). + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // The maximum number of results to return in a single call. This value can + // be between 5 and 300. The default value is 100. To retrieve the remaining + // results, make another call with the returned NextToken value. + MaxResults *int64 `type:"integer"` + + // The token for the next set of results. + NextToken *string `type:"string"` + + // One or more Scheduled Instance IDs. + ScheduledInstanceIds []*string `locationName:"ScheduledInstanceId" locationNameList:"ScheduledInstanceId" type:"list"` + + // The time period for the first schedule to start. + SlotStartTimeRange *SlotStartTimeRangeRequest `type:"structure"` +} + +// String returns the string representation +func (s DescribeScheduledInstancesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeScheduledInstancesInput) GoString() string { + return s.String() +} + +// Contains the output of DescribeScheduledInstances. +type DescribeScheduledInstancesOutput struct { + _ struct{} `type:"structure"` + + // The token required to retrieve the next set of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` + + // Information about the Scheduled Instances. + ScheduledInstanceSet []*ScheduledInstance `locationName:"scheduledInstanceSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeScheduledInstancesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeScheduledInstancesOutput) GoString() string { + return s.String() +} + +type DescribeSecurityGroupReferencesInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the operation, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // One or more security group IDs in your account. + GroupId []*string `locationNameList:"item" type:"list" required:"true"` +} + +// String returns the string representation +func (s DescribeSecurityGroupReferencesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeSecurityGroupReferencesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeSecurityGroupReferencesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeSecurityGroupReferencesInput"} + if s.GroupId == nil { + invalidParams.Add(request.NewErrParamRequired("GroupId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DescribeSecurityGroupReferencesOutput struct { + _ struct{} `type:"structure"` + + // Information about the VPCs with the referencing security groups. + SecurityGroupReferenceSet []*SecurityGroupReference `locationName:"securityGroupReferenceSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeSecurityGroupReferencesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeSecurityGroupReferencesOutput) GoString() string { + return s.String() +} + +// Contains the parameters for DescribeSecurityGroups. +type DescribeSecurityGroupsInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // One or more filters. If using multiple filters for rules, the results include + // security groups for which any combination of rules - not necessarily a single + // rule - match all filters. + // + // description - The description of the security group. + // + // egress.ip-permission.prefix-list-id - The ID (prefix) of the AWS service + // to which the security group allows access. + // + // group-id - The ID of the security group. + // + // group-name - The name of the security group. + // + // ip-permission.cidr - A CIDR range that has been granted permission. + // + // ip-permission.from-port - The start of port range for the TCP and UDP + // protocols, or an ICMP type number. + // + // ip-permission.group-id - The ID of a security group that has been granted + // permission. + // + // ip-permission.group-name - The name of a security group that has been + // granted permission. + // + // ip-permission.protocol - The IP protocol for the permission (tcp | udp + // | icmp or a protocol number). + // + // ip-permission.to-port - The end of port range for the TCP and UDP protocols, + // or an ICMP code. + // + // ip-permission.user-id - The ID of an AWS account that has been granted + // permission. + // + // owner-id - The AWS account ID of the owner of the security group. + // + // tag-key - The key of a tag assigned to the security group. + // + // tag-value - The value of a tag assigned to the security group. + // + // vpc-id - The ID of the VPC specified when the security group was created. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // One or more security group IDs. Required for security groups in a nondefault + // VPC. + // + // Default: Describes all your security groups. + GroupIds []*string `locationName:"GroupId" locationNameList:"groupId" type:"list"` + + // [EC2-Classic and default VPC only] One or more security group names. You + // can specify either the security group name or the security group ID. For + // security groups in a nondefault VPC, use the group-name filter to describe + // security groups by name. + // + // Default: Describes all your security groups. + GroupNames []*string `locationName:"GroupName" locationNameList:"GroupName" type:"list"` +} + +// String returns the string representation +func (s DescribeSecurityGroupsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeSecurityGroupsInput) GoString() string { + return s.String() +} + +// Contains the output of DescribeSecurityGroups. +type DescribeSecurityGroupsOutput struct { + _ struct{} `type:"structure"` + + // Information about one or more security groups. + SecurityGroups []*SecurityGroup `locationName:"securityGroupInfo" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeSecurityGroupsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeSecurityGroupsOutput) GoString() string { + return s.String() +} + +// Contains the parameters for DescribeSnapshotAttribute. +type DescribeSnapshotAttributeInput struct { + _ struct{} `type:"structure"` + + // The snapshot attribute you would like to view. + Attribute *string `type:"string" required:"true" enum:"SnapshotAttributeName"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the EBS snapshot. + SnapshotId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeSnapshotAttributeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeSnapshotAttributeInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeSnapshotAttributeInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeSnapshotAttributeInput"} + if s.Attribute == nil { + invalidParams.Add(request.NewErrParamRequired("Attribute")) + } + if s.SnapshotId == nil { + invalidParams.Add(request.NewErrParamRequired("SnapshotId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the output of DescribeSnapshotAttribute. +type DescribeSnapshotAttributeOutput struct { + _ struct{} `type:"structure"` + + // A list of permissions for creating volumes from the snapshot. + CreateVolumePermissions []*CreateVolumePermission `locationName:"createVolumePermission" locationNameList:"item" type:"list"` + + // A list of product codes. + ProductCodes []*ProductCode `locationName:"productCodes" locationNameList:"item" type:"list"` + + // The ID of the EBS snapshot. + SnapshotId *string `locationName:"snapshotId" type:"string"` +} + +// String returns the string representation +func (s DescribeSnapshotAttributeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeSnapshotAttributeOutput) GoString() string { + return s.String() +} + +// Contains the parameters for DescribeSnapshots. +type DescribeSnapshotsInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // One or more filters. + // + // description - A description of the snapshot. + // + // owner-alias - The AWS account alias (for example, amazon) that owns the + // snapshot. + // + // owner-id - The ID of the AWS account that owns the snapshot. + // + // progress - The progress of the snapshot, as a percentage (for example, + // 80%). + // + // snapshot-id - The snapshot ID. + // + // start-time - The time stamp when the snapshot was initiated. + // + // status - The status of the snapshot (pending | completed | error). + // + // tag:key=value - The key/value combination of a tag assigned to the resource. + // + // tag-key - The key of a tag assigned to the resource. This filter is independent + // of the tag-value filter. For example, if you use both the filter "tag-key=Purpose" + // and the filter "tag-value=X", you get any resources assigned both the tag + // key Purpose (regardless of what the tag's value is), and the tag value X + // (regardless of what the tag's key is). If you want to list only resources + // where Purpose is X, see the tag:key=value filter. + // + // tag-value - The value of a tag assigned to the resource. This filter + // is independent of the tag-key filter. + // + // volume-id - The ID of the volume the snapshot is for. + // + // volume-size - The size of the volume, in GiB. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // The maximum number of snapshot results returned by DescribeSnapshots in paginated + // output. When this parameter is used, DescribeSnapshots only returns MaxResults + // results in a single page along with a NextToken response element. The remaining + // results of the initial request can be seen by sending another DescribeSnapshots + // request with the returned NextToken value. This value can be between 5 and + // 1000; if MaxResults is given a value larger than 1000, only 1000 results + // are returned. If this parameter is not used, then DescribeSnapshots returns + // all results. You cannot specify this parameter and the snapshot IDs parameter + // in the same request. + MaxResults *int64 `type:"integer"` + + // The NextToken value returned from a previous paginated DescribeSnapshots + // request where MaxResults was used and the results exceeded the value of that + // parameter. Pagination continues from the end of the previous results that + // returned the NextToken value. This value is null when there are no more results + // to return. + NextToken *string `type:"string"` + + // Returns the snapshots owned by the specified owner. Multiple owners can be + // specified. + OwnerIds []*string `locationName:"Owner" locationNameList:"Owner" type:"list"` + + // One or more AWS accounts IDs that can create volumes from the snapshot. + RestorableByUserIds []*string `locationName:"RestorableBy" type:"list"` + + // One or more snapshot IDs. + // + // Default: Describes snapshots for which you have launch permissions. + SnapshotIds []*string `locationName:"SnapshotId" locationNameList:"SnapshotId" type:"list"` +} + +// String returns the string representation +func (s DescribeSnapshotsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeSnapshotsInput) GoString() string { + return s.String() +} + +// Contains the output of DescribeSnapshots. +type DescribeSnapshotsOutput struct { + _ struct{} `type:"structure"` + + // The NextToken value to include in a future DescribeSnapshots request. When + // the results of a DescribeSnapshots request exceed MaxResults, this value + // can be used to retrieve the next page of results. This value is null when + // there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` + + // Information about the snapshots. + Snapshots []*Snapshot `locationName:"snapshotSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeSnapshotsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeSnapshotsOutput) GoString() string { + return s.String() +} + +// Contains the parameters for DescribeSpotDatafeedSubscription. +type DescribeSpotDatafeedSubscriptionInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` +} + +// String returns the string representation +func (s DescribeSpotDatafeedSubscriptionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeSpotDatafeedSubscriptionInput) GoString() string { + return s.String() +} + +// Contains the output of DescribeSpotDatafeedSubscription. +type DescribeSpotDatafeedSubscriptionOutput struct { + _ struct{} `type:"structure"` + + // The Spot instance data feed subscription. + SpotDatafeedSubscription *SpotDatafeedSubscription `locationName:"spotDatafeedSubscription" type:"structure"` +} + +// String returns the string representation +func (s DescribeSpotDatafeedSubscriptionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeSpotDatafeedSubscriptionOutput) GoString() string { + return s.String() +} + +// Contains the parameters for DescribeSpotFleetInstances. +type DescribeSpotFleetInstancesInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The maximum number of results to return in a single call. Specify a value + // between 1 and 1000. The default value is 1000. To retrieve the remaining + // results, make another call with the returned NextToken value. + MaxResults *int64 `locationName:"maxResults" type:"integer"` + + // The token for the next set of results. + NextToken *string `locationName:"nextToken" type:"string"` + + // The ID of the Spot fleet request. + SpotFleetRequestId *string `locationName:"spotFleetRequestId" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeSpotFleetInstancesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeSpotFleetInstancesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeSpotFleetInstancesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeSpotFleetInstancesInput"} + if s.SpotFleetRequestId == nil { + invalidParams.Add(request.NewErrParamRequired("SpotFleetRequestId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the output of DescribeSpotFleetInstances. +type DescribeSpotFleetInstancesOutput struct { + _ struct{} `type:"structure"` + + // The running instances. Note that this list is refreshed periodically and + // might be out of date. + ActiveInstances []*ActiveInstance `locationName:"activeInstanceSet" locationNameList:"item" type:"list" required:"true"` + + // The token required to retrieve the next set of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` + + // The ID of the Spot fleet request. + SpotFleetRequestId *string `locationName:"spotFleetRequestId" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeSpotFleetInstancesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeSpotFleetInstancesOutput) GoString() string { + return s.String() +} + +// Contains the parameters for DescribeSpotFleetRequestHistory. +type DescribeSpotFleetRequestHistoryInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The type of events to describe. By default, all events are described. + EventType *string `locationName:"eventType" type:"string" enum:"EventType"` + + // The maximum number of results to return in a single call. Specify a value + // between 1 and 1000. The default value is 1000. To retrieve the remaining + // results, make another call with the returned NextToken value. + MaxResults *int64 `locationName:"maxResults" type:"integer"` + + // The token for the next set of results. + NextToken *string `locationName:"nextToken" type:"string"` + + // The ID of the Spot fleet request. + SpotFleetRequestId *string `locationName:"spotFleetRequestId" type:"string" required:"true"` + + // The starting date and time for the events, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ). + StartTime *time.Time `locationName:"startTime" type:"timestamp" timestampFormat:"iso8601" required:"true"` +} + +// String returns the string representation +func (s DescribeSpotFleetRequestHistoryInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeSpotFleetRequestHistoryInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeSpotFleetRequestHistoryInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeSpotFleetRequestHistoryInput"} + if s.SpotFleetRequestId == nil { + invalidParams.Add(request.NewErrParamRequired("SpotFleetRequestId")) + } + if s.StartTime == nil { + invalidParams.Add(request.NewErrParamRequired("StartTime")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the output of DescribeSpotFleetRequestHistory. +type DescribeSpotFleetRequestHistoryOutput struct { + _ struct{} `type:"structure"` + + // Information about the events in the history of the Spot fleet request. + HistoryRecords []*HistoryRecord `locationName:"historyRecordSet" locationNameList:"item" type:"list" required:"true"` + + // The last date and time for the events, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ). + // All records up to this time were retrieved. + // + // If nextToken indicates that there are more results, this value is not present. + LastEvaluatedTime *time.Time `locationName:"lastEvaluatedTime" type:"timestamp" timestampFormat:"iso8601" required:"true"` + + // The token required to retrieve the next set of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` + + // The ID of the Spot fleet request. + SpotFleetRequestId *string `locationName:"spotFleetRequestId" type:"string" required:"true"` + + // The starting date and time for the events, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ). + StartTime *time.Time `locationName:"startTime" type:"timestamp" timestampFormat:"iso8601" required:"true"` +} + +// String returns the string representation +func (s DescribeSpotFleetRequestHistoryOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeSpotFleetRequestHistoryOutput) GoString() string { + return s.String() +} + +// Contains the parameters for DescribeSpotFleetRequests. +type DescribeSpotFleetRequestsInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The maximum number of results to return in a single call. Specify a value + // between 1 and 1000. The default value is 1000. To retrieve the remaining + // results, make another call with the returned NextToken value. + MaxResults *int64 `locationName:"maxResults" type:"integer"` + + // The token for the next set of results. + NextToken *string `locationName:"nextToken" type:"string"` + + // The IDs of the Spot fleet requests. + SpotFleetRequestIds []*string `locationName:"spotFleetRequestId" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeSpotFleetRequestsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeSpotFleetRequestsInput) GoString() string { + return s.String() +} + +// Contains the output of DescribeSpotFleetRequests. +type DescribeSpotFleetRequestsOutput struct { + _ struct{} `type:"structure"` + + // The token required to retrieve the next set of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` + + // Information about the configuration of your Spot fleet. + SpotFleetRequestConfigs []*SpotFleetRequestConfig `locationName:"spotFleetRequestConfigSet" locationNameList:"item" type:"list" required:"true"` +} + +// String returns the string representation +func (s DescribeSpotFleetRequestsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeSpotFleetRequestsOutput) GoString() string { + return s.String() +} + +// Contains the parameters for DescribeSpotInstanceRequests. +type DescribeSpotInstanceRequestsInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // One or more filters. + // + // availability-zone-group - The Availability Zone group. + // + // create-time - The time stamp when the Spot instance request was created. + // + // fault-code - The fault code related to the request. + // + // fault-message - The fault message related to the request. + // + // instance-id - The ID of the instance that fulfilled the request. + // + // launch-group - The Spot instance launch group. + // + // launch.block-device-mapping.delete-on-termination - Indicates whether + // the Amazon EBS volume is deleted on instance termination. + // + // launch.block-device-mapping.device-name - The device name for the Amazon + // EBS volume (for example, /dev/sdh). + // + // launch.block-device-mapping.snapshot-id - The ID of the snapshot used + // for the Amazon EBS volume. + // + // launch.block-device-mapping.volume-size - The size of the Amazon EBS + // volume, in GiB. + // + // launch.block-device-mapping.volume-type - The type of the Amazon EBS + // volume: gp2 for General Purpose SSD, io1 for Provisioned IOPS SSD, st1 for + // Throughput Optimized HDD, sc1for Cold HDD, or standard for Magnetic. + // + // launch.group-id - The security group for the instance. + // + // launch.image-id - The ID of the AMI. + // + // launch.instance-type - The type of instance (for example, m3.medium). + // + // launch.kernel-id - The kernel ID. + // + // launch.key-name - The name of the key pair the instance launched with. + // + // launch.monitoring-enabled - Whether monitoring is enabled for the Spot + // instance. + // + // launch.ramdisk-id - The RAM disk ID. + // + // network-interface.network-interface-id - The ID of the network interface. + // + // network-interface.device-index - The index of the device for the network + // interface attachment on the instance. + // + // network-interface.subnet-id - The ID of the subnet for the instance. + // + // network-interface.description - A description of the network interface. + // + // network-interface.private-ip-address - The primary private IP address + // of the network interface. + // + // network-interface.delete-on-termination - Indicates whether the network + // interface is deleted when the instance is terminated. + // + // network-interface.group-id - The ID of the security group associated + // with the network interface. + // + // network-interface.group-name - The name of the security group associated + // with the network interface. + // + // network-interface.addresses.primary - Indicates whether the IP address + // is the primary private IP address. + // + // product-description - The product description associated with the instance + // (Linux/UNIX | Windows). + // + // spot-instance-request-id - The Spot instance request ID. + // + // spot-price - The maximum hourly price for any Spot instance launched + // to fulfill the request. + // + // state - The state of the Spot instance request (open | active | closed + // | cancelled | failed). Spot bid status information can help you track your + // Amazon EC2 Spot instance requests. For more information, see Spot Bid Status + // (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-bid-status.html) + // in the Amazon Elastic Compute Cloud User Guide. + // + // status-code - The short code describing the most recent evaluation of + // your Spot instance request. + // + // status-message - The message explaining the status of the Spot instance + // request. + // + // tag:key=value - The key/value combination of a tag assigned to the resource. + // + // tag-key - The key of a tag assigned to the resource. This filter is independent + // of the tag-value filter. For example, if you use both the filter "tag-key=Purpose" + // and the filter "tag-value=X", you get any resources assigned both the tag + // key Purpose (regardless of what the tag's value is), and the tag value X + // (regardless of what the tag's key is). If you want to list only resources + // where Purpose is X, see the tag:key=value filter. + // + // tag-value - The value of a tag assigned to the resource. This filter + // is independent of the tag-key filter. + // + // type - The type of Spot instance request (one-time | persistent). + // + // launched-availability-zone - The Availability Zone in which the bid is + // launched. + // + // valid-from - The start date of the request. + // + // valid-until - The end date of the request. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // One or more Spot instance request IDs. + SpotInstanceRequestIds []*string `locationName:"SpotInstanceRequestId" locationNameList:"SpotInstanceRequestId" type:"list"` +} + +// String returns the string representation +func (s DescribeSpotInstanceRequestsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeSpotInstanceRequestsInput) GoString() string { + return s.String() +} + +// Contains the output of DescribeSpotInstanceRequests. +type DescribeSpotInstanceRequestsOutput struct { + _ struct{} `type:"structure"` + + // One or more Spot instance requests. + SpotInstanceRequests []*SpotInstanceRequest `locationName:"spotInstanceRequestSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeSpotInstanceRequestsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeSpotInstanceRequestsOutput) GoString() string { + return s.String() +} + +// Contains the parameters for DescribeSpotPriceHistory. +type DescribeSpotPriceHistoryInput struct { + _ struct{} `type:"structure"` + + // Filters the results by the specified Availability Zone. + AvailabilityZone *string `locationName:"availabilityZone" type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The date and time, up to the current date, from which to stop retrieving + // the price history data, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ). + EndTime *time.Time `locationName:"endTime" type:"timestamp" timestampFormat:"iso8601"` + + // One or more filters. + // + // availability-zone - The Availability Zone for which prices should be + // returned. + // + // instance-type - The type of instance (for example, m3.medium). + // + // product-description - The product description for the Spot price (Linux/UNIX + // | SUSE Linux | Windows | Linux/UNIX (Amazon VPC) | SUSE Linux (Amazon VPC) + // | Windows (Amazon VPC)). + // + // spot-price - The Spot price. The value must match exactly (or use wildcards; + // greater than or less than comparison is not supported). + // + // timestamp - The timestamp of the Spot price history, in UTC format (for + // example, YYYY-MM-DDTHH:MM:SSZ). You can use wildcards (* and ?). Greater + // than or less than comparison is not supported. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // Filters the results by the specified instance types. + InstanceTypes []*string `locationName:"InstanceType" type:"list"` + + // The maximum number of results to return in a single call. Specify a value + // between 1 and 1000. The default value is 1000. To retrieve the remaining + // results, make another call with the returned NextToken value. + MaxResults *int64 `locationName:"maxResults" type:"integer"` + + // The token for the next set of results. + NextToken *string `locationName:"nextToken" type:"string"` + + // Filters the results by the specified basic product descriptions. + ProductDescriptions []*string `locationName:"ProductDescription" type:"list"` + + // The date and time, up to the past 90 days, from which to start retrieving + // the price history data, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ). + StartTime *time.Time `locationName:"startTime" type:"timestamp" timestampFormat:"iso8601"` +} + +// String returns the string representation +func (s DescribeSpotPriceHistoryInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeSpotPriceHistoryInput) GoString() string { + return s.String() +} + +// Contains the output of DescribeSpotPriceHistory. +type DescribeSpotPriceHistoryOutput struct { + _ struct{} `type:"structure"` + + // The token required to retrieve the next set of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` + + // The historical Spot prices. + SpotPriceHistory []*SpotPrice `locationName:"spotPriceHistorySet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeSpotPriceHistoryOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeSpotPriceHistoryOutput) GoString() string { + return s.String() +} + +type DescribeStaleSecurityGroupsInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the operation, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The maximum number of items to return for this request. The request returns + // a token that you can specify in a subsequent call to get the next set of + // results. + MaxResults *int64 `min:"5" type:"integer"` + + // The token for the next set of items to return. (You received this token from + // a prior call.) + NextToken *string `min:"1" type:"string"` + + // The ID of the VPC. + VpcId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeStaleSecurityGroupsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeStaleSecurityGroupsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeStaleSecurityGroupsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeStaleSecurityGroupsInput"} + if s.MaxResults != nil && *s.MaxResults < 5 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 5)) + } + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + if s.VpcId == nil { + invalidParams.Add(request.NewErrParamRequired("VpcId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DescribeStaleSecurityGroupsOutput struct { + _ struct{} `type:"structure"` + + // The token to use when requesting the next set of items. If there are no additional + // items to return, the string is empty. + NextToken *string `locationName:"nextToken" type:"string"` + + // Information about the stale security groups. + StaleSecurityGroupSet []*StaleSecurityGroup `locationName:"staleSecurityGroupSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeStaleSecurityGroupsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeStaleSecurityGroupsOutput) GoString() string { + return s.String() +} + +// Contains the parameters for DescribeSubnets. +type DescribeSubnetsInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // One or more filters. + // + // availabilityZone - The Availability Zone for the subnet. You can also + // use availability-zone as the filter name. + // + // available-ip-address-count - The number of IP addresses in the subnet + // that are available. + // + // cidrBlock - The CIDR block of the subnet. The CIDR block you specify + // must exactly match the subnet's CIDR block for information to be returned + // for the subnet. You can also use cidr or cidr-block as the filter names. + // + // defaultForAz - Indicates whether this is the default subnet for the Availability + // Zone. You can also use default-for-az as the filter name. + // + // state - The state of the subnet (pending | available). + // + // subnet-id - The ID of the subnet. + // + // tag:key=value - The key/value combination of a tag assigned to the resource. + // + // tag-key - The key of a tag assigned to the resource. This filter is independent + // of the tag-value filter. For example, if you use both the filter "tag-key=Purpose" + // and the filter "tag-value=X", you get any resources assigned both the tag + // key Purpose (regardless of what the tag's value is), and the tag value X + // (regardless of what the tag's key is). If you want to list only resources + // where Purpose is X, see the tag:key=value filter. + // + // tag-value - The value of a tag assigned to the resource. This filter + // is independent of the tag-key filter. + // + // vpc-id - The ID of the VPC for the subnet. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // One or more subnet IDs. + // + // Default: Describes all your subnets. + SubnetIds []*string `locationName:"SubnetId" locationNameList:"SubnetId" type:"list"` +} + +// String returns the string representation +func (s DescribeSubnetsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeSubnetsInput) GoString() string { + return s.String() +} + +// Contains the output of DescribeSubnets. +type DescribeSubnetsOutput struct { + _ struct{} `type:"structure"` + + // Information about one or more subnets. + Subnets []*Subnet `locationName:"subnetSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeSubnetsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeSubnetsOutput) GoString() string { + return s.String() +} + +// Contains the parameters for DescribeTags. +type DescribeTagsInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // One or more filters. + // + // key - The tag key. + // + // resource-id - The resource ID. + // + // resource-type - The resource type (customer-gateway | dhcp-options | + // image | instance | internet-gateway | network-acl | network-interface | reserved-instances + // | route-table | security-group | snapshot | spot-instances-request | subnet + // | volume | vpc | vpn-connection | vpn-gateway). + // + // value - The tag value. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // The maximum number of results to return in a single call. This value can + // be between 5 and 1000. To retrieve the remaining results, make another call + // with the returned NextToken value. + MaxResults *int64 `locationName:"maxResults" type:"integer"` + + // The token to retrieve the next page of results. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s DescribeTagsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeTagsInput) GoString() string { + return s.String() +} + +// Contains the output of DescribeTags. +type DescribeTagsOutput struct { + _ struct{} `type:"structure"` + + // The token to use to retrieve the next page of results. This value is null + // when there are no more results to return.. + NextToken *string `locationName:"nextToken" type:"string"` + + // A list of tags. + Tags []*TagDescription `locationName:"tagSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeTagsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeTagsOutput) GoString() string { + return s.String() +} + +// Contains the parameters for DescribeVolumeAttribute. +type DescribeVolumeAttributeInput struct { + _ struct{} `type:"structure"` + + // The instance attribute. + Attribute *string `type:"string" enum:"VolumeAttributeName"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the volume. + VolumeId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeVolumeAttributeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeVolumeAttributeInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeVolumeAttributeInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeVolumeAttributeInput"} + if s.VolumeId == nil { + invalidParams.Add(request.NewErrParamRequired("VolumeId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the output of DescribeVolumeAttribute. +type DescribeVolumeAttributeOutput struct { + _ struct{} `type:"structure"` + + // The state of autoEnableIO attribute. + AutoEnableIO *AttributeBooleanValue `locationName:"autoEnableIO" type:"structure"` + + // A list of product codes. + ProductCodes []*ProductCode `locationName:"productCodes" locationNameList:"item" type:"list"` + + // The ID of the volume. + VolumeId *string `locationName:"volumeId" type:"string"` +} + +// String returns the string representation +func (s DescribeVolumeAttributeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeVolumeAttributeOutput) GoString() string { + return s.String() +} + +// Contains the parameters for DescribeVolumeStatus. +type DescribeVolumeStatusInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // One or more filters. + // + // action.code - The action code for the event (for example, enable-volume-io). + // + // action.description - A description of the action. + // + // action.event-id - The event ID associated with the action. + // + // availability-zone - The Availability Zone of the instance. + // + // event.description - A description of the event. + // + // event.event-id - The event ID. + // + // event.event-type - The event type (for io-enabled: passed | failed; for + // io-performance: io-performance:degraded | io-performance:severely-degraded + // | io-performance:stalled). + // + // event.not-after - The latest end time for the event. + // + // event.not-before - The earliest start time for the event. + // + // volume-status.details-name - The cause for volume-status.status (io-enabled + // | io-performance). + // + // volume-status.details-status - The status of volume-status.details-name + // (for io-enabled: passed | failed; for io-performance: normal | degraded | + // severely-degraded | stalled). + // + // volume-status.status - The status of the volume (ok | impaired | warning + // | insufficient-data). + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // The maximum number of volume results returned by DescribeVolumeStatus in + // paginated output. When this parameter is used, the request only returns MaxResults + // results in a single page along with a NextToken response element. The remaining + // results of the initial request can be seen by sending another request with + // the returned NextToken value. This value can be between 5 and 1000; if MaxResults + // is given a value larger than 1000, only 1000 results are returned. If this + // parameter is not used, then DescribeVolumeStatus returns all results. You + // cannot specify this parameter and the volume IDs parameter in the same request. + MaxResults *int64 `type:"integer"` + + // The NextToken value to include in a future DescribeVolumeStatus request. + // When the results of the request exceed MaxResults, this value can be used + // to retrieve the next page of results. This value is null when there are no + // more results to return. + NextToken *string `type:"string"` + + // One or more volume IDs. + // + // Default: Describes all your volumes. + VolumeIds []*string `locationName:"VolumeId" locationNameList:"VolumeId" type:"list"` +} + +// String returns the string representation +func (s DescribeVolumeStatusInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeVolumeStatusInput) GoString() string { + return s.String() +} + +// Contains the output of DescribeVolumeStatus. +type DescribeVolumeStatusOutput struct { + _ struct{} `type:"structure"` + + // The token to use to retrieve the next page of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` + + // A list of volumes. + VolumeStatuses []*VolumeStatusItem `locationName:"volumeStatusSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeVolumeStatusOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeVolumeStatusOutput) GoString() string { + return s.String() +} + +// Contains the parameters for DescribeVolumes. +type DescribeVolumesInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // One or more filters. + // + // attachment.attach-time - The time stamp when the attachment initiated. + // + // attachment.delete-on-termination - Whether the volume is deleted on instance + // termination. + // + // attachment.device - The device name that is exposed to the instance (for + // example, /dev/sda1). + // + // attachment.instance-id - The ID of the instance the volume is attached + // to. + // + // attachment.status - The attachment state (attaching | attached | detaching + // | detached). + // + // availability-zone - The Availability Zone in which the volume was created. + // + // create-time - The time stamp when the volume was created. + // + // encrypted - The encryption status of the volume. + // + // size - The size of the volume, in GiB. + // + // snapshot-id - The snapshot from which the volume was created. + // + // status - The status of the volume (creating | available | in-use | deleting + // | deleted | error). + // + // tag:key=value - The key/value combination of a tag assigned to the resource. + // + // tag-key - The key of a tag assigned to the resource. This filter is independent + // of the tag-value filter. For example, if you use both the filter "tag-key=Purpose" + // and the filter "tag-value=X", you get any resources assigned both the tag + // key Purpose (regardless of what the tag's value is), and the tag value X + // (regardless of what the tag's key is). If you want to list only resources + // where Purpose is X, see the tag:key=value filter. + // + // tag-value - The value of a tag assigned to the resource. This filter + // is independent of the tag-key filter. + // + // volume-id - The volume ID. + // + // volume-type - The Amazon EBS volume type. This can be gp2 for General + // Purpose SSD, io1 for Provisioned IOPS SSD, st1 for Throughput Optimized HDD, + // sc1 for Cold HDD, or standard for Magnetic volumes. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // The maximum number of volume results returned by DescribeVolumes in paginated + // output. When this parameter is used, DescribeVolumes only returns MaxResults + // results in a single page along with a NextToken response element. The remaining + // results of the initial request can be seen by sending another DescribeVolumes + // request with the returned NextToken value. This value can be between 5 and + // 1000; if MaxResults is given a value larger than 1000, only 1000 results + // are returned. If this parameter is not used, then DescribeVolumes returns + // all results. You cannot specify this parameter and the volume IDs parameter + // in the same request. + MaxResults *int64 `locationName:"maxResults" type:"integer"` + + // The NextToken value returned from a previous paginated DescribeVolumes request + // where MaxResults was used and the results exceeded the value of that parameter. + // Pagination continues from the end of the previous results that returned the + // NextToken value. This value is null when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` + + // One or more volume IDs. + VolumeIds []*string `locationName:"VolumeId" locationNameList:"VolumeId" type:"list"` +} + +// String returns the string representation +func (s DescribeVolumesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeVolumesInput) GoString() string { + return s.String() +} + +// Contains the output of DescribeVolumes. +type DescribeVolumesOutput struct { + _ struct{} `type:"structure"` + + // The NextToken value to include in a future DescribeVolumes request. When + // the results of a DescribeVolumes request exceed MaxResults, this value can + // be used to retrieve the next page of results. This value is null when there + // are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` + + // Information about the volumes. + Volumes []*Volume `locationName:"volumeSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeVolumesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeVolumesOutput) GoString() string { + return s.String() +} + +// Contains the parameters for DescribeVpcAttribute. +type DescribeVpcAttributeInput struct { + _ struct{} `type:"structure"` + + // The VPC attribute. + Attribute *string `type:"string" required:"true" enum:"VpcAttributeName"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the VPC. + VpcId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeVpcAttributeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeVpcAttributeInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeVpcAttributeInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeVpcAttributeInput"} + if s.Attribute == nil { + invalidParams.Add(request.NewErrParamRequired("Attribute")) + } + if s.VpcId == nil { + invalidParams.Add(request.NewErrParamRequired("VpcId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the output of DescribeVpcAttribute. +type DescribeVpcAttributeOutput struct { + _ struct{} `type:"structure"` + + // Indicates whether the instances launched in the VPC get DNS hostnames. If + // this attribute is true, instances in the VPC get DNS hostnames; otherwise, + // they do not. + EnableDnsHostnames *AttributeBooleanValue `locationName:"enableDnsHostnames" type:"structure"` + + // Indicates whether DNS resolution is enabled for the VPC. If this attribute + // is true, the Amazon DNS server resolves DNS hostnames for your instances + // to their corresponding IP addresses; otherwise, it does not. + EnableDnsSupport *AttributeBooleanValue `locationName:"enableDnsSupport" type:"structure"` + + // The ID of the VPC. + VpcId *string `locationName:"vpcId" type:"string"` +} + +// String returns the string representation +func (s DescribeVpcAttributeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeVpcAttributeOutput) GoString() string { + return s.String() +} + +// Contains the parameters for DescribeVpcClassicLinkDnsSupport. +type DescribeVpcClassicLinkDnsSupportInput struct { + _ struct{} `type:"structure"` + + // The maximum number of items to return for this request. The request returns + // a token that you can specify in a subsequent call to get the next set of + // results. + MaxResults *int64 `locationName:"maxResults" min:"5" type:"integer"` + + // The token for the next set of items to return. (You received this token from + // a prior call.) + NextToken *string `locationName:"nextToken" min:"1" type:"string"` + + // One or more VPC IDs. + VpcIds []*string `locationNameList:"VpcId" type:"list"` +} + +// String returns the string representation +func (s DescribeVpcClassicLinkDnsSupportInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeVpcClassicLinkDnsSupportInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeVpcClassicLinkDnsSupportInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeVpcClassicLinkDnsSupportInput"} + if s.MaxResults != nil && *s.MaxResults < 5 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 5)) + } + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the output of DescribeVpcClassicLinkDnsSupport. +type DescribeVpcClassicLinkDnsSupportOutput struct { + _ struct{} `type:"structure"` + + // The token to use when requesting the next set of items. + NextToken *string `locationName:"nextToken" min:"1" type:"string"` + + // Information about the ClassicLink DNS support status of the VPCs. + Vpcs []*ClassicLinkDnsSupport `locationName:"vpcs" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeVpcClassicLinkDnsSupportOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeVpcClassicLinkDnsSupportOutput) GoString() string { + return s.String() +} + +// Contains the parameters for DescribeVpcClassicLink. +type DescribeVpcClassicLinkInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // One or more filters. + // + // is-classic-link-enabled - Whether the VPC is enabled for ClassicLink + // (true | false). + // + // tag:key=value - The key/value combination of a tag assigned to the resource. + // + // tag-key - The key of a tag assigned to the resource. This filter is independent + // of the tag-value filter. For example, if you use both the filter "tag-key=Purpose" + // and the filter "tag-value=X", you get any resources assigned both the tag + // key Purpose (regardless of what the tag's value is), and the tag value X + // (regardless of what the tag's key is). If you want to list only resources + // where Purpose is X, see the tag:key=value filter. + // + // tag-value - The value of a tag assigned to the resource. This filter + // is independent of the tag-key filter. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // One or more VPCs for which you want to describe the ClassicLink status. + VpcIds []*string `locationName:"VpcId" locationNameList:"VpcId" type:"list"` +} + +// String returns the string representation +func (s DescribeVpcClassicLinkInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeVpcClassicLinkInput) GoString() string { + return s.String() +} + +// Contains the output of DescribeVpcClassicLink. +type DescribeVpcClassicLinkOutput struct { + _ struct{} `type:"structure"` + + // The ClassicLink status of one or more VPCs. + Vpcs []*VpcClassicLink `locationName:"vpcSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeVpcClassicLinkOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeVpcClassicLinkOutput) GoString() string { + return s.String() +} + +// Contains the parameters for DescribeVpcEndpointServices. +type DescribeVpcEndpointServicesInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The maximum number of items to return for this request. The request returns + // a token that you can specify in a subsequent call to get the next set of + // results. + // + // Constraint: If the value is greater than 1000, we return only 1000 items. + MaxResults *int64 `type:"integer"` + + // The token for the next set of items to return. (You received this token from + // a prior call.) + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeVpcEndpointServicesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeVpcEndpointServicesInput) GoString() string { + return s.String() +} + +// Contains the output of DescribeVpcEndpointServices. +type DescribeVpcEndpointServicesOutput struct { + _ struct{} `type:"structure"` + + // The token to use when requesting the next set of items. If there are no additional + // items to return, the string is empty. + NextToken *string `locationName:"nextToken" type:"string"` + + // A list of supported AWS services. + ServiceNames []*string `locationName:"serviceNameSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeVpcEndpointServicesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeVpcEndpointServicesOutput) GoString() string { + return s.String() +} + +// Contains the parameters for DescribeVpcEndpoints. +type DescribeVpcEndpointsInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // One or more filters. + // + // service-name: The name of the AWS service. + // + // vpc-id: The ID of the VPC in which the endpoint resides. + // + // vpc-endpoint-id: The ID of the endpoint. + // + // vpc-endpoint-state: The state of the endpoint. (pending | available | + // deleting | deleted) + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // The maximum number of items to return for this request. The request returns + // a token that you can specify in a subsequent call to get the next set of + // results. + // + // Constraint: If the value is greater than 1000, we return only 1000 items. + MaxResults *int64 `type:"integer"` + + // The token for the next set of items to return. (You received this token from + // a prior call.) + NextToken *string `type:"string"` + + // One or more endpoint IDs. + VpcEndpointIds []*string `locationName:"VpcEndpointId" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeVpcEndpointsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeVpcEndpointsInput) GoString() string { + return s.String() +} + +// Contains the output of DescribeVpcEndpoints. +type DescribeVpcEndpointsOutput struct { + _ struct{} `type:"structure"` + + // The token to use when requesting the next set of items. If there are no additional + // items to return, the string is empty. + NextToken *string `locationName:"nextToken" type:"string"` + + // Information about the endpoints. + VpcEndpoints []*VpcEndpoint `locationName:"vpcEndpointSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeVpcEndpointsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeVpcEndpointsOutput) GoString() string { + return s.String() +} + +// Contains the parameters for DescribeVpcPeeringConnections. +type DescribeVpcPeeringConnectionsInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // One or more filters. + // + // accepter-vpc-info.cidr-block - The CIDR block of the peer VPC. + // + // accepter-vpc-info.owner-id - The AWS account ID of the owner of the peer + // VPC. + // + // accepter-vpc-info.vpc-id - The ID of the peer VPC. + // + // expiration-time - The expiration date and time for the VPC peering connection. + // + // requester-vpc-info.cidr-block - The CIDR block of the requester's VPC. + // + // requester-vpc-info.owner-id - The AWS account ID of the owner of the + // requester VPC. + // + // requester-vpc-info.vpc-id - The ID of the requester VPC. + // + // status-code - The status of the VPC peering connection (pending-acceptance + // | failed | expired | provisioning | active | deleted | rejected). + // + // status-message - A message that provides more information about the status + // of the VPC peering connection, if applicable. + // + // tag:key=value - The key/value combination of a tag assigned to the resource. + // + // tag-key - The key of a tag assigned to the resource. This filter is independent + // of the tag-value filter. For example, if you use both the filter "tag-key=Purpose" + // and the filter "tag-value=X", you get any resources assigned both the tag + // key Purpose (regardless of what the tag's value is), and the tag value X + // (regardless of what the tag's key is). If you want to list only resources + // where Purpose is X, see the tag:key=value filter. + // + // tag-value - The value of a tag assigned to the resource. This filter + // is independent of the tag-key filter. + // + // vpc-peering-connection-id - The ID of the VPC peering connection. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // One or more VPC peering connection IDs. + // + // Default: Describes all your VPC peering connections. + VpcPeeringConnectionIds []*string `locationName:"VpcPeeringConnectionId" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeVpcPeeringConnectionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeVpcPeeringConnectionsInput) GoString() string { + return s.String() +} + +// Contains the output of DescribeVpcPeeringConnections. +type DescribeVpcPeeringConnectionsOutput struct { + _ struct{} `type:"structure"` + + // Information about the VPC peering connections. + VpcPeeringConnections []*VpcPeeringConnection `locationName:"vpcPeeringConnectionSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeVpcPeeringConnectionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeVpcPeeringConnectionsOutput) GoString() string { + return s.String() +} + +// Contains the parameters for DescribeVpcs. +type DescribeVpcsInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // One or more filters. + // + // cidr - The CIDR block of the VPC. The CIDR block you specify must exactly + // match the VPC's CIDR block for information to be returned for the VPC. Must + // contain the slash followed by one or two digits (for example, /28). + // + // dhcp-options-id - The ID of a set of DHCP options. + // + // isDefault - Indicates whether the VPC is the default VPC. + // + // state - The state of the VPC (pending | available). + // + // tag:key=value - The key/value combination of a tag assigned to the resource. + // + // tag-key - The key of a tag assigned to the resource. This filter is independent + // of the tag-value filter. For example, if you use both the filter "tag-key=Purpose" + // and the filter "tag-value=X", you get any resources assigned both the tag + // key Purpose (regardless of what the tag's value is), and the tag value X + // (regardless of what the tag's key is). If you want to list only resources + // where Purpose is X, see the tag:key=value filter. + // + // tag-value - The value of a tag assigned to the resource. This filter + // is independent of the tag-key filter. + // + // vpc-id - The ID of the VPC. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // One or more VPC IDs. + // + // Default: Describes all your VPCs. + VpcIds []*string `locationName:"VpcId" locationNameList:"VpcId" type:"list"` +} + +// String returns the string representation +func (s DescribeVpcsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeVpcsInput) GoString() string { + return s.String() +} + +// Contains the output of DescribeVpcs. +type DescribeVpcsOutput struct { + _ struct{} `type:"structure"` + + // Information about one or more VPCs. + Vpcs []*Vpc `locationName:"vpcSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeVpcsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeVpcsOutput) GoString() string { + return s.String() +} + +// Contains the parameters for DescribeVpnConnections. +type DescribeVpnConnectionsInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // One or more filters. + // + // customer-gateway-configuration - The configuration information for the + // customer gateway. + // + // customer-gateway-id - The ID of a customer gateway associated with the + // VPN connection. + // + // state - The state of the VPN connection (pending | available | deleting + // | deleted). + // + // option.static-routes-only - Indicates whether the connection has static + // routes only. Used for devices that do not support Border Gateway Protocol + // (BGP). + // + // route.destination-cidr-block - The destination CIDR block. This corresponds + // to the subnet used in a customer data center. + // + // bgp-asn - The BGP Autonomous System Number (ASN) associated with a BGP + // device. + // + // tag:key=value - The key/value combination of a tag assigned to the resource. + // + // tag-key - The key of a tag assigned to the resource. This filter is independent + // of the tag-value filter. For example, if you use both the filter "tag-key=Purpose" + // and the filter "tag-value=X", you get any resources assigned both the tag + // key Purpose (regardless of what the tag's value is), and the tag value X + // (regardless of what the tag's key is). If you want to list only resources + // where Purpose is X, see the tag:key=value filter. + // + // tag-value - The value of a tag assigned to the resource. This filter + // is independent of the tag-key filter. + // + // type - The type of VPN connection. Currently the only supported type + // is ipsec.1. + // + // vpn-connection-id - The ID of the VPN connection. + // + // vpn-gateway-id - The ID of a virtual private gateway associated with + // the VPN connection. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // One or more VPN connection IDs. + // + // Default: Describes your VPN connections. + VpnConnectionIds []*string `locationName:"VpnConnectionId" locationNameList:"VpnConnectionId" type:"list"` +} + +// String returns the string representation +func (s DescribeVpnConnectionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeVpnConnectionsInput) GoString() string { + return s.String() +} + +// Contains the output of DescribeVpnConnections. +type DescribeVpnConnectionsOutput struct { + _ struct{} `type:"structure"` + + // Information about one or more VPN connections. + VpnConnections []*VpnConnection `locationName:"vpnConnectionSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeVpnConnectionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeVpnConnectionsOutput) GoString() string { + return s.String() +} + +// Contains the parameters for DescribeVpnGateways. +type DescribeVpnGatewaysInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // One or more filters. + // + // attachment.state - The current state of the attachment between the gateway + // and the VPC (attaching | attached | detaching | detached). + // + // attachment.vpc-id - The ID of an attached VPC. + // + // availability-zone - The Availability Zone for the virtual private gateway + // (if applicable). + // + // state - The state of the virtual private gateway (pending | available + // | deleting | deleted). + // + // tag:key=value - The key/value combination of a tag assigned to the resource. + // + // tag-key - The key of a tag assigned to the resource. This filter is independent + // of the tag-value filter. For example, if you use both the filter "tag-key=Purpose" + // and the filter "tag-value=X", you get any resources assigned both the tag + // key Purpose (regardless of what the tag's value is), and the tag value X + // (regardless of what the tag's key is). If you want to list only resources + // where Purpose is X, see the tag:key=value filter. + // + // tag-value - The value of a tag assigned to the resource. This filter + // is independent of the tag-key filter. + // + // type - The type of virtual private gateway. Currently the only supported + // type is ipsec.1. + // + // vpn-gateway-id - The ID of the virtual private gateway. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // One or more virtual private gateway IDs. + // + // Default: Describes all your virtual private gateways. + VpnGatewayIds []*string `locationName:"VpnGatewayId" locationNameList:"VpnGatewayId" type:"list"` +} + +// String returns the string representation +func (s DescribeVpnGatewaysInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeVpnGatewaysInput) GoString() string { + return s.String() +} + +// Contains the output of DescribeVpnGateways. +type DescribeVpnGatewaysOutput struct { + _ struct{} `type:"structure"` + + // Information about one or more virtual private gateways. + VpnGateways []*VpnGateway `locationName:"vpnGatewaySet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeVpnGatewaysOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeVpnGatewaysOutput) GoString() string { + return s.String() +} + +// Contains the parameters for DetachClassicLinkVpc. +type DetachClassicLinkVpcInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the instance to unlink from the VPC. + InstanceId *string `locationName:"instanceId" type:"string" required:"true"` + + // The ID of the VPC to which the instance is linked. + VpcId *string `locationName:"vpcId" type:"string" required:"true"` +} + +// String returns the string representation +func (s DetachClassicLinkVpcInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DetachClassicLinkVpcInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DetachClassicLinkVpcInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DetachClassicLinkVpcInput"} + if s.InstanceId == nil { + invalidParams.Add(request.NewErrParamRequired("InstanceId")) + } + if s.VpcId == nil { + invalidParams.Add(request.NewErrParamRequired("VpcId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the output of DetachClassicLinkVpc. +type DetachClassicLinkVpcOutput struct { + _ struct{} `type:"structure"` + + // Returns true if the request succeeds; otherwise, it returns an error. + Return *bool `locationName:"return" type:"boolean"` +} + +// String returns the string representation +func (s DetachClassicLinkVpcOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DetachClassicLinkVpcOutput) GoString() string { + return s.String() +} + +// Contains the parameters for DetachInternetGateway. +type DetachInternetGatewayInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the Internet gateway. + InternetGatewayId *string `locationName:"internetGatewayId" type:"string" required:"true"` + + // The ID of the VPC. + VpcId *string `locationName:"vpcId" type:"string" required:"true"` +} + +// String returns the string representation +func (s DetachInternetGatewayInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DetachInternetGatewayInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DetachInternetGatewayInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DetachInternetGatewayInput"} + if s.InternetGatewayId == nil { + invalidParams.Add(request.NewErrParamRequired("InternetGatewayId")) + } + if s.VpcId == nil { + invalidParams.Add(request.NewErrParamRequired("VpcId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DetachInternetGatewayOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DetachInternetGatewayOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DetachInternetGatewayOutput) GoString() string { + return s.String() +} + +// Contains the parameters for DetachNetworkInterface. +type DetachNetworkInterfaceInput struct { + _ struct{} `type:"structure"` + + // The ID of the attachment. + AttachmentId *string `locationName:"attachmentId" type:"string" required:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // Specifies whether to force a detachment. + Force *bool `locationName:"force" type:"boolean"` +} + +// String returns the string representation +func (s DetachNetworkInterfaceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DetachNetworkInterfaceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DetachNetworkInterfaceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DetachNetworkInterfaceInput"} + if s.AttachmentId == nil { + invalidParams.Add(request.NewErrParamRequired("AttachmentId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DetachNetworkInterfaceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DetachNetworkInterfaceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DetachNetworkInterfaceOutput) GoString() string { + return s.String() +} + +// Contains the parameters for DetachVolume. +type DetachVolumeInput struct { + _ struct{} `type:"structure"` + + // The device name. + Device *string `type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // Forces detachment if the previous detachment attempt did not occur cleanly + // (for example, logging into an instance, unmounting the volume, and detaching + // normally). This option can lead to data loss or a corrupted file system. + // Use this option only as a last resort to detach a volume from a failed instance. + // The instance won't have an opportunity to flush file system caches or file + // system metadata. If you use this option, you must perform file system check + // and repair procedures. + Force *bool `type:"boolean"` + + // The ID of the instance. + InstanceId *string `type:"string"` + + // The ID of the volume. + VolumeId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DetachVolumeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DetachVolumeInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DetachVolumeInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DetachVolumeInput"} + if s.VolumeId == nil { + invalidParams.Add(request.NewErrParamRequired("VolumeId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the parameters for DetachVpnGateway. +type DetachVpnGatewayInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the VPC. + VpcId *string `type:"string" required:"true"` + + // The ID of the virtual private gateway. + VpnGatewayId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DetachVpnGatewayInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DetachVpnGatewayInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DetachVpnGatewayInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DetachVpnGatewayInput"} + if s.VpcId == nil { + invalidParams.Add(request.NewErrParamRequired("VpcId")) + } + if s.VpnGatewayId == nil { + invalidParams.Add(request.NewErrParamRequired("VpnGatewayId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DetachVpnGatewayOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DetachVpnGatewayOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DetachVpnGatewayOutput) GoString() string { + return s.String() +} + +// Describes a DHCP configuration option. +type DhcpConfiguration struct { + _ struct{} `type:"structure"` + + // The name of a DHCP option. + Key *string `locationName:"key" type:"string"` + + // One or more values for the DHCP option. + Values []*AttributeValue `locationName:"valueSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DhcpConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DhcpConfiguration) GoString() string { + return s.String() +} + +// Describes a set of DHCP options. +type DhcpOptions struct { + _ struct{} `type:"structure"` + + // One or more DHCP options in the set. + DhcpConfigurations []*DhcpConfiguration `locationName:"dhcpConfigurationSet" locationNameList:"item" type:"list"` + + // The ID of the set of DHCP options. + DhcpOptionsId *string `locationName:"dhcpOptionsId" type:"string"` + + // Any tags assigned to the DHCP options set. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DhcpOptions) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DhcpOptions) GoString() string { + return s.String() +} + +// Contains the parameters for DisableVgwRoutePropagation. +type DisableVgwRoutePropagationInput struct { + _ struct{} `type:"structure"` + + // The ID of the virtual private gateway. + GatewayId *string `type:"string" required:"true"` + + // The ID of the route table. + RouteTableId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DisableVgwRoutePropagationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisableVgwRoutePropagationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DisableVgwRoutePropagationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DisableVgwRoutePropagationInput"} + if s.GatewayId == nil { + invalidParams.Add(request.NewErrParamRequired("GatewayId")) + } + if s.RouteTableId == nil { + invalidParams.Add(request.NewErrParamRequired("RouteTableId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DisableVgwRoutePropagationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DisableVgwRoutePropagationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisableVgwRoutePropagationOutput) GoString() string { + return s.String() +} + +// Contains the parameters for DisableVpcClassicLinkDnsSupport. +type DisableVpcClassicLinkDnsSupportInput struct { + _ struct{} `type:"structure"` + + // The ID of the VPC. + VpcId *string `type:"string"` +} + +// String returns the string representation +func (s DisableVpcClassicLinkDnsSupportInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisableVpcClassicLinkDnsSupportInput) GoString() string { + return s.String() +} + +// Contains the output of DisableVpcClassicLinkDnsSupport. +type DisableVpcClassicLinkDnsSupportOutput struct { + _ struct{} `type:"structure"` + + // Returns true if the request succeeds; otherwise, it returns an error. + Return *bool `locationName:"return" type:"boolean"` +} + +// String returns the string representation +func (s DisableVpcClassicLinkDnsSupportOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisableVpcClassicLinkDnsSupportOutput) GoString() string { + return s.String() +} + +// Contains the parameters for DisableVpcClassicLink. +type DisableVpcClassicLinkInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the VPC. + VpcId *string `locationName:"vpcId" type:"string" required:"true"` +} + +// String returns the string representation +func (s DisableVpcClassicLinkInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisableVpcClassicLinkInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DisableVpcClassicLinkInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DisableVpcClassicLinkInput"} + if s.VpcId == nil { + invalidParams.Add(request.NewErrParamRequired("VpcId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the output of DisableVpcClassicLink. +type DisableVpcClassicLinkOutput struct { + _ struct{} `type:"structure"` + + // Returns true if the request succeeds; otherwise, it returns an error. + Return *bool `locationName:"return" type:"boolean"` +} + +// String returns the string representation +func (s DisableVpcClassicLinkOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisableVpcClassicLinkOutput) GoString() string { + return s.String() +} + +// Contains the parameters for DisassociateAddress. +type DisassociateAddressInput struct { + _ struct{} `type:"structure"` + + // [EC2-VPC] The association ID. Required for EC2-VPC. + AssociationId *string `type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // [EC2-Classic] The Elastic IP address. Required for EC2-Classic. + PublicIp *string `type:"string"` +} + +// String returns the string representation +func (s DisassociateAddressInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisassociateAddressInput) GoString() string { + return s.String() +} + +type DisassociateAddressOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DisassociateAddressOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisassociateAddressOutput) GoString() string { + return s.String() +} + +// Contains the parameters for DisassociateRouteTable. +type DisassociateRouteTableInput struct { + _ struct{} `type:"structure"` + + // The association ID representing the current association between the route + // table and subnet. + AssociationId *string `locationName:"associationId" type:"string" required:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` +} + +// String returns the string representation +func (s DisassociateRouteTableInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisassociateRouteTableInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DisassociateRouteTableInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DisassociateRouteTableInput"} + if s.AssociationId == nil { + invalidParams.Add(request.NewErrParamRequired("AssociationId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DisassociateRouteTableOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DisassociateRouteTableOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisassociateRouteTableOutput) GoString() string { + return s.String() +} + +// Describes a disk image. +type DiskImage struct { + _ struct{} `type:"structure"` + + // A description of the disk image. + Description *string `type:"string"` + + // Information about the disk image. + Image *DiskImageDetail `type:"structure"` + + // Information about the volume. + Volume *VolumeDetail `type:"structure"` +} + +// String returns the string representation +func (s DiskImage) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DiskImage) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DiskImage) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DiskImage"} + if s.Image != nil { + if err := s.Image.Validate(); err != nil { + invalidParams.AddNested("Image", err.(request.ErrInvalidParams)) + } + } + if s.Volume != nil { + if err := s.Volume.Validate(); err != nil { + invalidParams.AddNested("Volume", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Describes a disk image. +type DiskImageDescription struct { + _ struct{} `type:"structure"` + + // The checksum computed for the disk image. + Checksum *string `locationName:"checksum" type:"string"` + + // The disk image format. + Format *string `locationName:"format" type:"string" required:"true" enum:"DiskImageFormat"` + + // A presigned URL for the import manifest stored in Amazon S3. For information + // about creating a presigned URL for an Amazon S3 object, read the "Query String + // Request Authentication Alternative" section of the Authenticating REST Requests + // (http://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html) + // topic in the Amazon Simple Storage Service Developer Guide. + // + // For information about the import manifest referenced by this API action, + // see VM Import Manifest (http://docs.aws.amazon.com/AWSEC2/latest/APIReference/manifest.html). + ImportManifestUrl *string `locationName:"importManifestUrl" type:"string" required:"true"` + + // The size of the disk image, in GiB. + Size *int64 `locationName:"size" type:"long" required:"true"` +} + +// String returns the string representation +func (s DiskImageDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DiskImageDescription) GoString() string { + return s.String() +} + +// Describes a disk image. +type DiskImageDetail struct { + _ struct{} `type:"structure"` + + // The size of the disk image, in GiB. + Bytes *int64 `locationName:"bytes" type:"long" required:"true"` + + // The disk image format. + Format *string `locationName:"format" type:"string" required:"true" enum:"DiskImageFormat"` + + // A presigned URL for the import manifest stored in Amazon S3 and presented + // here as an Amazon S3 presigned URL. For information about creating a presigned + // URL for an Amazon S3 object, read the "Query String Request Authentication + // Alternative" section of the Authenticating REST Requests (http://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html) + // topic in the Amazon Simple Storage Service Developer Guide. + // + // For information about the import manifest referenced by this API action, + // see VM Import Manifest (http://docs.aws.amazon.com/AWSEC2/latest/APIReference/manifest.html). + ImportManifestUrl *string `locationName:"importManifestUrl" type:"string" required:"true"` +} + +// String returns the string representation +func (s DiskImageDetail) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DiskImageDetail) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DiskImageDetail) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DiskImageDetail"} + if s.Bytes == nil { + invalidParams.Add(request.NewErrParamRequired("Bytes")) + } + if s.Format == nil { + invalidParams.Add(request.NewErrParamRequired("Format")) + } + if s.ImportManifestUrl == nil { + invalidParams.Add(request.NewErrParamRequired("ImportManifestUrl")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Describes a disk image volume. +type DiskImageVolumeDescription struct { + _ struct{} `type:"structure"` + + // The volume identifier. + Id *string `locationName:"id" type:"string" required:"true"` + + // The size of the volume, in GiB. + Size *int64 `locationName:"size" type:"long"` +} + +// String returns the string representation +func (s DiskImageVolumeDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DiskImageVolumeDescription) GoString() string { + return s.String() +} + +// Describes a block device for an EBS volume. +type EbsBlockDevice struct { + _ struct{} `type:"structure"` + + // Indicates whether the EBS volume is deleted on instance termination. + DeleteOnTermination *bool `locationName:"deleteOnTermination" type:"boolean"` + + // Indicates whether the EBS volume is encrypted. Encrypted Amazon EBS volumes + // may only be attached to instances that support Amazon EBS encryption. + Encrypted *bool `locationName:"encrypted" type:"boolean"` + + // The number of I/O operations per second (IOPS) that the volume supports. + // For io1, this represents the number of IOPS that are provisioned for the + // volume. For gp2, this represents the baseline performance of the volume and + // the rate at which the volume accumulates I/O credits for bursting. For more + // information about General Purpose SSD baseline performance, I/O credits, + // and bursting, see Amazon EBS Volume Types (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html) + // in the Amazon Elastic Compute Cloud User Guide. + // + // Constraint: Range is 100-20000 IOPS for io1 volumes and 100-10000 IOPS for + // gp2 volumes. + // + // Condition: This parameter is required for requests to create io1 volumes; + // it is not used in requests to create gp2, st1, sc1, or standard volumes. + Iops *int64 `locationName:"iops" type:"integer"` + + // The ID of the snapshot. + SnapshotId *string `locationName:"snapshotId" type:"string"` + + // The size of the volume, in GiB. + // + // Constraints: 1-16384 for General Purpose SSD (gp2), 4-16384 for Provisioned + // IOPS SSD (io1), 500-16384 for Throughput Optimized HDD (st1), 500-16384 for + // Cold HDD (sc1), and 1-1024 for Magnetic (standard) volumes. If you specify + // a snapshot, the volume size must be equal to or larger than the snapshot + // size. + // + // Default: If you're creating the volume from a snapshot and don't specify + // a volume size, the default is the snapshot size. + VolumeSize *int64 `locationName:"volumeSize" type:"integer"` + + // The volume type: gp2, io1, st1, sc1, or standard. + // + // Default: standard + VolumeType *string `locationName:"volumeType" type:"string" enum:"VolumeType"` +} + +// String returns the string representation +func (s EbsBlockDevice) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EbsBlockDevice) GoString() string { + return s.String() +} + +// Describes a parameter used to set up an EBS volume in a block device mapping. +type EbsInstanceBlockDevice struct { + _ struct{} `type:"structure"` + + // The time stamp when the attachment initiated. + AttachTime *time.Time `locationName:"attachTime" type:"timestamp" timestampFormat:"iso8601"` + + // Indicates whether the volume is deleted on instance termination. + DeleteOnTermination *bool `locationName:"deleteOnTermination" type:"boolean"` + + // The attachment state. + Status *string `locationName:"status" type:"string" enum:"AttachmentStatus"` + + // The ID of the EBS volume. + VolumeId *string `locationName:"volumeId" type:"string"` +} + +// String returns the string representation +func (s EbsInstanceBlockDevice) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EbsInstanceBlockDevice) GoString() string { + return s.String() +} + +// Describes information used to set up an EBS volume specified in a block device +// mapping. +type EbsInstanceBlockDeviceSpecification struct { + _ struct{} `type:"structure"` + + // Indicates whether the volume is deleted on instance termination. + DeleteOnTermination *bool `locationName:"deleteOnTermination" type:"boolean"` + + // The ID of the EBS volume. + VolumeId *string `locationName:"volumeId" type:"string"` +} + +// String returns the string representation +func (s EbsInstanceBlockDeviceSpecification) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EbsInstanceBlockDeviceSpecification) GoString() string { + return s.String() +} + +// Contains the parameters for EnableVgwRoutePropagation. +type EnableVgwRoutePropagationInput struct { + _ struct{} `type:"structure"` + + // The ID of the virtual private gateway. + GatewayId *string `type:"string" required:"true"` + + // The ID of the route table. + RouteTableId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s EnableVgwRoutePropagationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnableVgwRoutePropagationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *EnableVgwRoutePropagationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "EnableVgwRoutePropagationInput"} + if s.GatewayId == nil { + invalidParams.Add(request.NewErrParamRequired("GatewayId")) + } + if s.RouteTableId == nil { + invalidParams.Add(request.NewErrParamRequired("RouteTableId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type EnableVgwRoutePropagationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s EnableVgwRoutePropagationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnableVgwRoutePropagationOutput) GoString() string { + return s.String() +} + +// Contains the parameters for EnableVolumeIO. +type EnableVolumeIOInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the volume. + VolumeId *string `locationName:"volumeId" type:"string" required:"true"` +} + +// String returns the string representation +func (s EnableVolumeIOInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnableVolumeIOInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *EnableVolumeIOInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "EnableVolumeIOInput"} + if s.VolumeId == nil { + invalidParams.Add(request.NewErrParamRequired("VolumeId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type EnableVolumeIOOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s EnableVolumeIOOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnableVolumeIOOutput) GoString() string { + return s.String() +} + +// Contains the parameters for EnableVpcClassicLinkDnsSupport. +type EnableVpcClassicLinkDnsSupportInput struct { + _ struct{} `type:"structure"` + + // The ID of the VPC. + VpcId *string `type:"string"` +} + +// String returns the string representation +func (s EnableVpcClassicLinkDnsSupportInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnableVpcClassicLinkDnsSupportInput) GoString() string { + return s.String() +} + +// Contains the output of EnableVpcClassicLinkDnsSupport. +type EnableVpcClassicLinkDnsSupportOutput struct { + _ struct{} `type:"structure"` + + // Returns true if the request succeeds; otherwise, it returns an error. + Return *bool `locationName:"return" type:"boolean"` +} + +// String returns the string representation +func (s EnableVpcClassicLinkDnsSupportOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnableVpcClassicLinkDnsSupportOutput) GoString() string { + return s.String() +} + +// Contains the parameters for EnableVpcClassicLink. +type EnableVpcClassicLinkInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the VPC. + VpcId *string `locationName:"vpcId" type:"string" required:"true"` +} + +// String returns the string representation +func (s EnableVpcClassicLinkInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnableVpcClassicLinkInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *EnableVpcClassicLinkInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "EnableVpcClassicLinkInput"} + if s.VpcId == nil { + invalidParams.Add(request.NewErrParamRequired("VpcId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the output of EnableVpcClassicLink. +type EnableVpcClassicLinkOutput struct { + _ struct{} `type:"structure"` + + // Returns true if the request succeeds; otherwise, it returns an error. + Return *bool `locationName:"return" type:"boolean"` +} + +// String returns the string representation +func (s EnableVpcClassicLinkOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnableVpcClassicLinkOutput) GoString() string { + return s.String() +} + +// Describes a Spot fleet event. +type EventInformation struct { + _ struct{} `type:"structure"` + + // The description of the event. + EventDescription *string `locationName:"eventDescription" type:"string"` + + // The event. + // + // The following are the error events. + // + // iamFleetRoleInvalid - The Spot fleet did not have the required permissions + // either to launch or terminate an instance. + // + // launchSpecTemporarilyBlacklisted - The configuration is not valid and + // several attempts to launch instances have failed. For more information, see + // the description of the event. + // + // spotFleetRequestConfigurationInvalid - The configuration is not valid. + // For more information, see the description of the event. + // + // spotInstanceCountLimitExceeded - You've reached the limit on the number + // of Spot instances that you can launch. + // + // The following are the fleetRequestChange events. + // + // active - The Spot fleet has been validated and Amazon EC2 is attempting + // to maintain the target number of running Spot instances. + // + // cancelled - The Spot fleet is canceled and has no running Spot instances. + // The Spot fleet will be deleted two days after its instances were terminated. + // + // cancelled_running - The Spot fleet is canceled and will not launch additional + // Spot instances, but its existing Spot instances continue to run until they + // are interrupted or terminated. + // + // cancelled_terminating - The Spot fleet is canceled and its Spot instances + // are terminating. + // + // expired - The Spot fleet request has expired. A subsequent event indicates + // that the instances were terminated, if the request was created with TerminateInstancesWithExpiration + // set. + // + // modify_in_progress - A request to modify the Spot fleet request was accepted + // and is in progress. + // + // modify_successful - The Spot fleet request was modified. + // + // price_update - The bid price for a launch configuration was adjusted + // because it was too high. This change is permanent. + // + // submitted - The Spot fleet request is being evaluated and Amazon EC2 + // is preparing to launch the target number of Spot instances. + // + // The following are the instanceChange events. + // + // launched - A bid was fulfilled and a new instance was launched. + // + // terminated - An instance was terminated by the user. + EventSubType *string `locationName:"eventSubType" type:"string"` + + // The ID of the instance. This information is available only for instanceChange + // events. + InstanceId *string `locationName:"instanceId" type:"string"` +} + +// String returns the string representation +func (s EventInformation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EventInformation) GoString() string { + return s.String() +} + +// Describes an instance export task. +type ExportTask struct { + _ struct{} `type:"structure"` + + // A description of the resource being exported. + Description *string `locationName:"description" type:"string"` + + // The ID of the export task. + ExportTaskId *string `locationName:"exportTaskId" type:"string"` + + // Information about the export task. + ExportToS3Task *ExportToS3Task `locationName:"exportToS3" type:"structure"` + + // Information about the instance to export. + InstanceExportDetails *InstanceExportDetails `locationName:"instanceExport" type:"structure"` + + // The state of the export task. + State *string `locationName:"state" type:"string" enum:"ExportTaskState"` + + // The status message related to the export task. + StatusMessage *string `locationName:"statusMessage" type:"string"` +} + +// String returns the string representation +func (s ExportTask) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ExportTask) GoString() string { + return s.String() +} + +// Describes the format and location for an instance export task. +type ExportToS3Task struct { + _ struct{} `type:"structure"` + + // The container format used to combine disk images with metadata (such as OVF). + // If absent, only the disk image is exported. + ContainerFormat *string `locationName:"containerFormat" type:"string" enum:"ContainerFormat"` + + // The format for the exported image. + DiskImageFormat *string `locationName:"diskImageFormat" type:"string" enum:"DiskImageFormat"` + + // The S3 bucket for the destination image. The destination bucket must exist + // and grant WRITE and READ_ACP permissions to the AWS account vm-import-export@amazon.com. + S3Bucket *string `locationName:"s3Bucket" type:"string"` + + // The encryption key for your S3 bucket. + S3Key *string `locationName:"s3Key" type:"string"` +} + +// String returns the string representation +func (s ExportToS3Task) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ExportToS3Task) GoString() string { + return s.String() +} + +// Describes an instance export task. +type ExportToS3TaskSpecification struct { + _ struct{} `type:"structure"` + + // The container format used to combine disk images with metadata (such as OVF). + // If absent, only the disk image is exported. + ContainerFormat *string `locationName:"containerFormat" type:"string" enum:"ContainerFormat"` + + // The format for the exported image. + DiskImageFormat *string `locationName:"diskImageFormat" type:"string" enum:"DiskImageFormat"` + + // The S3 bucket for the destination image. The destination bucket must exist + // and grant WRITE and READ_ACP permissions to the AWS account vm-import-export@amazon.com. + S3Bucket *string `locationName:"s3Bucket" type:"string"` + + // The image is written to a single object in the S3 bucket at the S3 key s3prefix + // + exportTaskId + '.' + diskImageFormat. + S3Prefix *string `locationName:"s3Prefix" type:"string"` +} + +// String returns the string representation +func (s ExportToS3TaskSpecification) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ExportToS3TaskSpecification) GoString() string { + return s.String() +} + +// A filter name and value pair that is used to return a more specific list +// of results. Filters can be used to match a set of resources by various criteria, +// such as tags, attributes, or IDs. +type Filter struct { + _ struct{} `type:"structure"` + + // The name of the filter. Filter names are case-sensitive. + Name *string `type:"string"` + + // One or more filter values. Filter values are case-sensitive. + Values []*string `locationName:"Value" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s Filter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Filter) GoString() string { + return s.String() +} + +// Describes a flow log. +type FlowLog struct { + _ struct{} `type:"structure"` + + // The date and time the flow log was created. + CreationTime *time.Time `locationName:"creationTime" type:"timestamp" timestampFormat:"iso8601"` + + // Information about the error that occurred. Rate limited indicates that CloudWatch + // logs throttling has been applied for one or more network interfaces, or that + // you've reached the limit on the number of CloudWatch Logs log groups that + // you can create. Access error indicates that the IAM role associated with + // the flow log does not have sufficient permissions to publish to CloudWatch + // Logs. Unknown error indicates an internal error. + DeliverLogsErrorMessage *string `locationName:"deliverLogsErrorMessage" type:"string"` + + // The ARN of the IAM role that posts logs to CloudWatch Logs. + DeliverLogsPermissionArn *string `locationName:"deliverLogsPermissionArn" type:"string"` + + // The status of the logs delivery (SUCCESS | FAILED). + DeliverLogsStatus *string `locationName:"deliverLogsStatus" type:"string"` + + // The flow log ID. + FlowLogId *string `locationName:"flowLogId" type:"string"` + + // The status of the flow log (ACTIVE). + FlowLogStatus *string `locationName:"flowLogStatus" type:"string"` + + // The name of the flow log group. + LogGroupName *string `locationName:"logGroupName" type:"string"` + + // The ID of the resource on which the flow log was created. + ResourceId *string `locationName:"resourceId" type:"string"` + + // The type of traffic captured for the flow log. + TrafficType *string `locationName:"trafficType" type:"string" enum:"TrafficType"` +} + +// String returns the string representation +func (s FlowLog) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FlowLog) GoString() string { + return s.String() +} + +// Contains the parameters for GetConsoleOutput. +type GetConsoleOutputInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the instance. + InstanceId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s GetConsoleOutputInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetConsoleOutputInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetConsoleOutputInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetConsoleOutputInput"} + if s.InstanceId == nil { + invalidParams.Add(request.NewErrParamRequired("InstanceId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the output of GetConsoleOutput. +type GetConsoleOutputOutput struct { + _ struct{} `type:"structure"` + + // The ID of the instance. + InstanceId *string `locationName:"instanceId" type:"string"` + + // The console output, Base64-encoded. If using a command line tool, the tool + // decodes the output for you. + Output *string `locationName:"output" type:"string"` + + // The time the output was last updated. + Timestamp *time.Time `locationName:"timestamp" type:"timestamp" timestampFormat:"iso8601"` +} + +// String returns the string representation +func (s GetConsoleOutputOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetConsoleOutputOutput) GoString() string { + return s.String() +} + +// Contains the parameters for the request. +type GetConsoleScreenshotInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The ID of the instance. + InstanceId *string `type:"string" required:"true"` + + // When set to true, acts as keystroke input and wakes up an instance that's + // in standby or "sleep" mode. + WakeUp *bool `type:"boolean"` +} + +// String returns the string representation +func (s GetConsoleScreenshotInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetConsoleScreenshotInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetConsoleScreenshotInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetConsoleScreenshotInput"} + if s.InstanceId == nil { + invalidParams.Add(request.NewErrParamRequired("InstanceId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the output of the request. +type GetConsoleScreenshotOutput struct { + _ struct{} `type:"structure"` + + // The data that comprises the image. + ImageData *string `locationName:"imageData" type:"string"` + + // The ID of the instance. + InstanceId *string `locationName:"instanceId" type:"string"` +} + +// String returns the string representation +func (s GetConsoleScreenshotOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetConsoleScreenshotOutput) GoString() string { + return s.String() +} + +// Contains the parameters for GetPasswordData. +type GetPasswordDataInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the Windows instance. + InstanceId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s GetPasswordDataInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetPasswordDataInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetPasswordDataInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetPasswordDataInput"} + if s.InstanceId == nil { + invalidParams.Add(request.NewErrParamRequired("InstanceId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the output of GetPasswordData. +type GetPasswordDataOutput struct { + _ struct{} `type:"structure"` + + // The ID of the Windows instance. + InstanceId *string `locationName:"instanceId" type:"string"` + + // The password of the instance. + PasswordData *string `locationName:"passwordData" type:"string"` + + // The time the data was last updated. + Timestamp *time.Time `locationName:"timestamp" type:"timestamp" timestampFormat:"iso8601"` +} + +// String returns the string representation +func (s GetPasswordDataOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetPasswordDataOutput) GoString() string { + return s.String() +} + +// Describes a security group. +type GroupIdentifier struct { + _ struct{} `type:"structure"` + + // The ID of the security group. + GroupId *string `locationName:"groupId" type:"string"` + + // The name of the security group. + GroupName *string `locationName:"groupName" type:"string"` +} + +// String returns the string representation +func (s GroupIdentifier) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GroupIdentifier) GoString() string { + return s.String() +} + +// Describes an event in the history of the Spot fleet request. +type HistoryRecord struct { + _ struct{} `type:"structure"` + + // Information about the event. + EventInformation *EventInformation `locationName:"eventInformation" type:"structure" required:"true"` + + // The event type. + // + // error - Indicates an error with the Spot fleet request. + // + // fleetRequestChange - Indicates a change in the status or configuration + // of the Spot fleet request. + // + // instanceChange - Indicates that an instance was launched or terminated. + EventType *string `locationName:"eventType" type:"string" required:"true" enum:"EventType"` + + // The date and time of the event, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ). + Timestamp *time.Time `locationName:"timestamp" type:"timestamp" timestampFormat:"iso8601" required:"true"` +} + +// String returns the string representation +func (s HistoryRecord) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HistoryRecord) GoString() string { + return s.String() +} + +// Describes the properties of the Dedicated host. +type Host struct { + _ struct{} `type:"structure"` + + // Whether auto-placement is on or off. + AutoPlacement *string `locationName:"autoPlacement" type:"string" enum:"AutoPlacement"` + + // The Availability Zone of the Dedicated host. + AvailabilityZone *string `locationName:"availabilityZone" type:"string"` + + // The number of new instances that can be launched onto the Dedicated host. + AvailableCapacity *AvailableCapacity `locationName:"availableCapacity" type:"structure"` + + // Unique, case-sensitive identifier you provide to ensure idempotency of the + // request. For more information, see How to Ensure Idempotency (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Run_Instance_Idempotency.html) + // in the Amazon Elastic Compute Cloud User Guide. + ClientToken *string `locationName:"clientToken" type:"string"` + + // The ID of the Dedicated host. + HostId *string `locationName:"hostId" type:"string"` + + // The hardware specifications of the Dedicated host. + HostProperties *HostProperties `locationName:"hostProperties" type:"structure"` + + // The reservation ID of the Dedicated host. This returns a null response if + // the Dedicated host doesn't have an associated reservation. + HostReservationId *string `locationName:"hostReservationId" type:"string"` + + // The IDs and instance type that are currently running on the Dedicated host. + Instances []*HostInstance `locationName:"instances" locationNameList:"item" type:"list"` + + // The Dedicated host's state. + State *string `locationName:"state" type:"string" enum:"AllocationState"` +} + +// String returns the string representation +func (s Host) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Host) GoString() string { + return s.String() +} + +// Describes an instance running on a Dedicated host. +type HostInstance struct { + _ struct{} `type:"structure"` + + // the IDs of instances that are running on the Dedicated host. + InstanceId *string `locationName:"instanceId" type:"string"` + + // The instance type size (for example, m3.medium) of the running instance. + InstanceType *string `locationName:"instanceType" type:"string"` +} + +// String returns the string representation +func (s HostInstance) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HostInstance) GoString() string { + return s.String() +} + +// Describes properties of a Dedicated host. +type HostProperties struct { + _ struct{} `type:"structure"` + + // The number of cores on the Dedicated host. + Cores *int64 `locationName:"cores" type:"integer"` + + // The instance type size that the Dedicated host supports (for example, m3.medium). + InstanceType *string `locationName:"instanceType" type:"string"` + + // The number of sockets on the Dedicated host. + Sockets *int64 `locationName:"sockets" type:"integer"` + + // The number of vCPUs on the Dedicated host. + TotalVCpus *int64 `locationName:"totalVCpus" type:"integer"` +} + +// String returns the string representation +func (s HostProperties) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HostProperties) GoString() string { + return s.String() +} + +// Describes an IAM instance profile. +type IamInstanceProfile struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the instance profile. + Arn *string `locationName:"arn" type:"string"` + + // The ID of the instance profile. + Id *string `locationName:"id" type:"string"` +} + +// String returns the string representation +func (s IamInstanceProfile) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s IamInstanceProfile) GoString() string { + return s.String() +} + +// Describes an IAM instance profile. +type IamInstanceProfileSpecification struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the instance profile. + Arn *string `locationName:"arn" type:"string"` + + // The name of the instance profile. + Name *string `locationName:"name" type:"string"` +} + +// String returns the string representation +func (s IamInstanceProfileSpecification) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s IamInstanceProfileSpecification) GoString() string { + return s.String() +} + +// Describes the ICMP type and code. +type IcmpTypeCode struct { + _ struct{} `type:"structure"` + + // The ICMP type. A value of -1 means all types. + Code *int64 `locationName:"code" type:"integer"` + + // The ICMP code. A value of -1 means all codes for the specified ICMP type. + Type *int64 `locationName:"type" type:"integer"` +} + +// String returns the string representation +func (s IcmpTypeCode) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s IcmpTypeCode) GoString() string { + return s.String() +} + +// Describes the ID format for a resource. +type IdFormat struct { + _ struct{} `type:"structure"` + + // The date in UTC at which you are permanently switched over to using longer + // IDs. If a deadline is not yet available for this resource type, this field + // is not returned. + Deadline *time.Time `locationName:"deadline" type:"timestamp" timestampFormat:"iso8601"` + + // The type of resource. + Resource *string `locationName:"resource" type:"string"` + + // Indicates whether longer IDs (17-character IDs) are enabled for the resource. + UseLongIds *bool `locationName:"useLongIds" type:"boolean"` +} + +// String returns the string representation +func (s IdFormat) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s IdFormat) GoString() string { + return s.String() +} + +// Describes an image. +type Image struct { + _ struct{} `type:"structure"` + + // The architecture of the image. + Architecture *string `locationName:"architecture" type:"string" enum:"ArchitectureValues"` + + // Any block device mapping entries. + BlockDeviceMappings []*BlockDeviceMapping `locationName:"blockDeviceMapping" locationNameList:"item" type:"list"` + + // The date and time the image was created. + CreationDate *string `locationName:"creationDate" type:"string"` + + // The description of the AMI that was provided during image creation. + Description *string `locationName:"description" type:"string"` + + // Specifies whether enhanced networking with ENA is enabled. + EnaSupport *bool `locationName:"enaSupport" type:"boolean"` + + // The hypervisor type of the image. + Hypervisor *string `locationName:"hypervisor" type:"string" enum:"HypervisorType"` + + // The ID of the AMI. + ImageId *string `locationName:"imageId" type:"string"` + + // The location of the AMI. + ImageLocation *string `locationName:"imageLocation" type:"string"` + + // The AWS account alias (for example, amazon, self) or the AWS account ID of + // the AMI owner. + ImageOwnerAlias *string `locationName:"imageOwnerAlias" type:"string"` + + // The type of image. + ImageType *string `locationName:"imageType" type:"string" enum:"ImageTypeValues"` + + // The kernel associated with the image, if any. Only applicable for machine + // images. + KernelId *string `locationName:"kernelId" type:"string"` + + // The name of the AMI that was provided during image creation. + Name *string `locationName:"name" type:"string"` + + // The AWS account ID of the image owner. + OwnerId *string `locationName:"imageOwnerId" type:"string"` + + // The value is Windows for Windows AMIs; otherwise blank. + Platform *string `locationName:"platform" type:"string" enum:"PlatformValues"` + + // Any product codes associated with the AMI. + ProductCodes []*ProductCode `locationName:"productCodes" locationNameList:"item" type:"list"` + + // Indicates whether the image has public launch permissions. The value is true + // if this image has public launch permissions or false if it has only implicit + // and explicit launch permissions. + Public *bool `locationName:"isPublic" type:"boolean"` + + // The RAM disk associated with the image, if any. Only applicable for machine + // images. + RamdiskId *string `locationName:"ramdiskId" type:"string"` + + // The device name of the root device (for example, /dev/sda1 or /dev/xvda). + RootDeviceName *string `locationName:"rootDeviceName" type:"string"` + + // The type of root device used by the AMI. The AMI can use an EBS volume or + // an instance store volume. + RootDeviceType *string `locationName:"rootDeviceType" type:"string" enum:"DeviceType"` + + // Specifies whether enhanced networking with the Intel 82599 Virtual Function + // interface is enabled. + SriovNetSupport *string `locationName:"sriovNetSupport" type:"string"` + + // The current state of the AMI. If the state is available, the image is successfully + // registered and can be used to launch an instance. + State *string `locationName:"imageState" type:"string" enum:"ImageState"` + + // The reason for the state change. + StateReason *StateReason `locationName:"stateReason" type:"structure"` + + // Any tags assigned to the image. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` + + // The type of virtualization of the AMI. + VirtualizationType *string `locationName:"virtualizationType" type:"string" enum:"VirtualizationType"` +} + +// String returns the string representation +func (s Image) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Image) GoString() string { + return s.String() +} + +// Describes the disk container object for an import image task. +type ImageDiskContainer struct { + _ struct{} `type:"structure"` + + // The description of the disk image. + Description *string `type:"string"` + + // The block device mapping for the disk. + DeviceName *string `type:"string"` + + // The format of the disk image being imported. + // + // Valid values: RAW | VHD | VMDK | OVA + Format *string `type:"string"` + + // The ID of the EBS snapshot to be used for importing the snapshot. + SnapshotId *string `type:"string"` + + // The URL to the Amazon S3-based disk image being imported. The URL can either + // be a https URL (https://..) or an Amazon S3 URL (s3://..) + Url *string `type:"string"` + + // The S3 bucket for the disk image. + UserBucket *UserBucket `type:"structure"` +} + +// String returns the string representation +func (s ImageDiskContainer) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ImageDiskContainer) GoString() string { + return s.String() +} + +// Contains the parameters for ImportImage. +type ImportImageInput struct { + _ struct{} `type:"structure"` + + // The architecture of the virtual machine. + // + // Valid values: i386 | x86_64 + Architecture *string `type:"string"` + + // The client-specific data. + ClientData *ClientData `type:"structure"` + + // The token to enable idempotency for VM import requests. + ClientToken *string `type:"string"` + + // A description string for the import image task. + Description *string `type:"string"` + + // Information about the disk containers. + DiskContainers []*ImageDiskContainer `locationName:"DiskContainer" locationNameList:"item" type:"list"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The target hypervisor platform. + // + // Valid values: xen + Hypervisor *string `type:"string"` + + // The license type to be used for the Amazon Machine Image (AMI) after importing. + // + // Note: You may only use BYOL if you have existing licenses with rights to + // use these licenses in a third party cloud like AWS. For more information, + // see VM Import/Export Prerequisites (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/VMImportPrerequisites.html) + // in the Amazon Elastic Compute Cloud User Guide. + // + // Valid values: AWS | BYOL + LicenseType *string `type:"string"` + + // The operating system of the virtual machine. + // + // Valid values: Windows | Linux + Platform *string `type:"string"` + + // The name of the role to use when not using the default role, 'vmimport'. + RoleName *string `type:"string"` +} + +// String returns the string representation +func (s ImportImageInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ImportImageInput) GoString() string { + return s.String() +} + +// Contains the output for ImportImage. +type ImportImageOutput struct { + _ struct{} `type:"structure"` + + // The architecture of the virtual machine. + Architecture *string `locationName:"architecture" type:"string"` + + // A description of the import task. + Description *string `locationName:"description" type:"string"` + + // The target hypervisor of the import task. + Hypervisor *string `locationName:"hypervisor" type:"string"` + + // The ID of the Amazon Machine Image (AMI) created by the import task. + ImageId *string `locationName:"imageId" type:"string"` + + // The task ID of the import image task. + ImportTaskId *string `locationName:"importTaskId" type:"string"` + + // The license type of the virtual machine. + LicenseType *string `locationName:"licenseType" type:"string"` + + // The operating system of the virtual machine. + Platform *string `locationName:"platform" type:"string"` + + // The progress of the task. + Progress *string `locationName:"progress" type:"string"` + + // Information about the snapshots. + SnapshotDetails []*SnapshotDetail `locationName:"snapshotDetailSet" locationNameList:"item" type:"list"` + + // A brief status of the task. + Status *string `locationName:"status" type:"string"` + + // A detailed status message of the import task. + StatusMessage *string `locationName:"statusMessage" type:"string"` +} + +// String returns the string representation +func (s ImportImageOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ImportImageOutput) GoString() string { + return s.String() +} + +// Describes an import image task. +type ImportImageTask struct { + _ struct{} `type:"structure"` + + // The architecture of the virtual machine. + // + // Valid values: i386 | x86_64 + Architecture *string `locationName:"architecture" type:"string"` + + // A description of the import task. + Description *string `locationName:"description" type:"string"` + + // The target hypervisor for the import task. + // + // Valid values: xen + Hypervisor *string `locationName:"hypervisor" type:"string"` + + // The ID of the Amazon Machine Image (AMI) of the imported virtual machine. + ImageId *string `locationName:"imageId" type:"string"` + + // The ID of the import image task. + ImportTaskId *string `locationName:"importTaskId" type:"string"` + + // The license type of the virtual machine. + LicenseType *string `locationName:"licenseType" type:"string"` + + // The description string for the import image task. + Platform *string `locationName:"platform" type:"string"` + + // The percentage of progress of the import image task. + Progress *string `locationName:"progress" type:"string"` + + // Information about the snapshots. + SnapshotDetails []*SnapshotDetail `locationName:"snapshotDetailSet" locationNameList:"item" type:"list"` + + // A brief status for the import image task. + Status *string `locationName:"status" type:"string"` + + // A descriptive status message for the import image task. + StatusMessage *string `locationName:"statusMessage" type:"string"` +} + +// String returns the string representation +func (s ImportImageTask) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ImportImageTask) GoString() string { + return s.String() +} + +// Contains the parameters for ImportInstance. +type ImportInstanceInput struct { + _ struct{} `type:"structure"` + + // A description for the instance being imported. + Description *string `locationName:"description" type:"string"` + + // The disk image. + DiskImages []*DiskImage `locationName:"diskImage" type:"list"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The launch specification. + LaunchSpecification *ImportInstanceLaunchSpecification `locationName:"launchSpecification" type:"structure"` + + // The instance operating system. + Platform *string `locationName:"platform" type:"string" required:"true" enum:"PlatformValues"` +} + +// String returns the string representation +func (s ImportInstanceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ImportInstanceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ImportInstanceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ImportInstanceInput"} + if s.Platform == nil { + invalidParams.Add(request.NewErrParamRequired("Platform")) + } + if s.DiskImages != nil { + for i, v := range s.DiskImages { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "DiskImages", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Describes the launch specification for VM import. +type ImportInstanceLaunchSpecification struct { + _ struct{} `type:"structure"` + + // Reserved. + AdditionalInfo *string `locationName:"additionalInfo" type:"string"` + + // The architecture of the instance. + Architecture *string `locationName:"architecture" type:"string" enum:"ArchitectureValues"` + + // One or more security group IDs. + GroupIds []*string `locationName:"GroupId" locationNameList:"SecurityGroupId" type:"list"` + + // One or more security group names. + GroupNames []*string `locationName:"GroupName" locationNameList:"SecurityGroup" type:"list"` + + // Indicates whether an instance stops or terminates when you initiate shutdown + // from the instance (using the operating system command for system shutdown). + InstanceInitiatedShutdownBehavior *string `locationName:"instanceInitiatedShutdownBehavior" type:"string" enum:"ShutdownBehavior"` + + // The instance type. For more information about the instance types that you + // can import, see Before You Get Started (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/VMImportPrerequisites.html) + // in the Amazon Elastic Compute Cloud User Guide. + InstanceType *string `locationName:"instanceType" type:"string" enum:"InstanceType"` + + // Indicates whether monitoring is enabled. + Monitoring *bool `locationName:"monitoring" type:"boolean"` + + // The placement information for the instance. + Placement *Placement `locationName:"placement" type:"structure"` + + // [EC2-VPC] An available IP address from the IP address range of the subnet. + PrivateIpAddress *string `locationName:"privateIpAddress" type:"string"` + + // [EC2-VPC] The ID of the subnet in which to launch the instance. + SubnetId *string `locationName:"subnetId" type:"string"` + + // The user data to make available to the instance. If you are using an AWS + // SDK or command line tool, Base64-encoding is performed for you, and you can + // load the text from a file. Otherwise, you must provide Base64-encoded text. + UserData *UserData `locationName:"userData" type:"structure"` +} + +// String returns the string representation +func (s ImportInstanceLaunchSpecification) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ImportInstanceLaunchSpecification) GoString() string { + return s.String() +} + +// Contains the output for ImportInstance. +type ImportInstanceOutput struct { + _ struct{} `type:"structure"` + + // Information about the conversion task. + ConversionTask *ConversionTask `locationName:"conversionTask" type:"structure"` +} + +// String returns the string representation +func (s ImportInstanceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ImportInstanceOutput) GoString() string { + return s.String() +} + +// Describes an import instance task. +type ImportInstanceTaskDetails struct { + _ struct{} `type:"structure"` + + // A description of the task. + Description *string `locationName:"description" type:"string"` + + // The ID of the instance. + InstanceId *string `locationName:"instanceId" type:"string"` + + // The instance operating system. + Platform *string `locationName:"platform" type:"string" enum:"PlatformValues"` + + // One or more volumes. + Volumes []*ImportInstanceVolumeDetailItem `locationName:"volumes" locationNameList:"item" type:"list" required:"true"` +} + +// String returns the string representation +func (s ImportInstanceTaskDetails) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ImportInstanceTaskDetails) GoString() string { + return s.String() +} + +// Describes an import volume task. +type ImportInstanceVolumeDetailItem struct { + _ struct{} `type:"structure"` + + // The Availability Zone where the resulting instance will reside. + AvailabilityZone *string `locationName:"availabilityZone" type:"string" required:"true"` + + // The number of bytes converted so far. + BytesConverted *int64 `locationName:"bytesConverted" type:"long" required:"true"` + + // A description of the task. + Description *string `locationName:"description" type:"string"` + + // The image. + Image *DiskImageDescription `locationName:"image" type:"structure" required:"true"` + + // The status of the import of this particular disk image. + Status *string `locationName:"status" type:"string" required:"true"` + + // The status information or errors related to the disk image. + StatusMessage *string `locationName:"statusMessage" type:"string"` + + // The volume. + Volume *DiskImageVolumeDescription `locationName:"volume" type:"structure" required:"true"` +} + +// String returns the string representation +func (s ImportInstanceVolumeDetailItem) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ImportInstanceVolumeDetailItem) GoString() string { + return s.String() +} + +// Contains the parameters for ImportKeyPair. +type ImportKeyPairInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // A unique name for the key pair. + KeyName *string `locationName:"keyName" type:"string" required:"true"` + + // The public key. For API calls, the text must be base64-encoded. For command + // line tools, base64 encoding is performed for you. + // + // PublicKeyMaterial is automatically base64 encoded/decoded by the SDK. + PublicKeyMaterial []byte `locationName:"publicKeyMaterial" type:"blob" required:"true"` +} + +// String returns the string representation +func (s ImportKeyPairInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ImportKeyPairInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ImportKeyPairInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ImportKeyPairInput"} + if s.KeyName == nil { + invalidParams.Add(request.NewErrParamRequired("KeyName")) + } + if s.PublicKeyMaterial == nil { + invalidParams.Add(request.NewErrParamRequired("PublicKeyMaterial")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the output of ImportKeyPair. +type ImportKeyPairOutput struct { + _ struct{} `type:"structure"` + + // The MD5 public key fingerprint as specified in section 4 of RFC 4716. + KeyFingerprint *string `locationName:"keyFingerprint" type:"string"` + + // The key pair name you provided. + KeyName *string `locationName:"keyName" type:"string"` +} + +// String returns the string representation +func (s ImportKeyPairOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ImportKeyPairOutput) GoString() string { + return s.String() +} + +// Contains the parameters for ImportSnapshot. +type ImportSnapshotInput struct { + _ struct{} `type:"structure"` + + // The client-specific data. + ClientData *ClientData `type:"structure"` + + // Token to enable idempotency for VM import requests. + ClientToken *string `type:"string"` + + // The description string for the import snapshot task. + Description *string `type:"string"` + + // Information about the disk container. + DiskContainer *SnapshotDiskContainer `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The name of the role to use when not using the default role, 'vmimport'. + RoleName *string `type:"string"` +} + +// String returns the string representation +func (s ImportSnapshotInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ImportSnapshotInput) GoString() string { + return s.String() +} + +// Contains the output for ImportSnapshot. +type ImportSnapshotOutput struct { + _ struct{} `type:"structure"` + + // A description of the import snapshot task. + Description *string `locationName:"description" type:"string"` + + // The ID of the import snapshot task. + ImportTaskId *string `locationName:"importTaskId" type:"string"` + + // Information about the import snapshot task. + SnapshotTaskDetail *SnapshotTaskDetail `locationName:"snapshotTaskDetail" type:"structure"` +} + +// String returns the string representation +func (s ImportSnapshotOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ImportSnapshotOutput) GoString() string { + return s.String() +} + +// Describes an import snapshot task. +type ImportSnapshotTask struct { + _ struct{} `type:"structure"` + + // A description of the import snapshot task. + Description *string `locationName:"description" type:"string"` + + // The ID of the import snapshot task. + ImportTaskId *string `locationName:"importTaskId" type:"string"` + + // Describes an import snapshot task. + SnapshotTaskDetail *SnapshotTaskDetail `locationName:"snapshotTaskDetail" type:"structure"` +} + +// String returns the string representation +func (s ImportSnapshotTask) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ImportSnapshotTask) GoString() string { + return s.String() +} + +// Contains the parameters for ImportVolume. +type ImportVolumeInput struct { + _ struct{} `type:"structure"` + + // The Availability Zone for the resulting EBS volume. + AvailabilityZone *string `locationName:"availabilityZone" type:"string" required:"true"` + + // A description of the volume. + Description *string `locationName:"description" type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The disk image. + Image *DiskImageDetail `locationName:"image" type:"structure" required:"true"` + + // The volume size. + Volume *VolumeDetail `locationName:"volume" type:"structure" required:"true"` +} + +// String returns the string representation +func (s ImportVolumeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ImportVolumeInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ImportVolumeInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ImportVolumeInput"} + if s.AvailabilityZone == nil { + invalidParams.Add(request.NewErrParamRequired("AvailabilityZone")) + } + if s.Image == nil { + invalidParams.Add(request.NewErrParamRequired("Image")) + } + if s.Volume == nil { + invalidParams.Add(request.NewErrParamRequired("Volume")) + } + if s.Image != nil { + if err := s.Image.Validate(); err != nil { + invalidParams.AddNested("Image", err.(request.ErrInvalidParams)) + } + } + if s.Volume != nil { + if err := s.Volume.Validate(); err != nil { + invalidParams.AddNested("Volume", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the output for ImportVolume. +type ImportVolumeOutput struct { + _ struct{} `type:"structure"` + + // Information about the conversion task. + ConversionTask *ConversionTask `locationName:"conversionTask" type:"structure"` +} + +// String returns the string representation +func (s ImportVolumeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ImportVolumeOutput) GoString() string { + return s.String() +} + +// Describes an import volume task. +type ImportVolumeTaskDetails struct { + _ struct{} `type:"structure"` + + // The Availability Zone where the resulting volume will reside. + AvailabilityZone *string `locationName:"availabilityZone" type:"string" required:"true"` + + // The number of bytes converted so far. + BytesConverted *int64 `locationName:"bytesConverted" type:"long" required:"true"` + + // The description you provided when starting the import volume task. + Description *string `locationName:"description" type:"string"` + + // The image. + Image *DiskImageDescription `locationName:"image" type:"structure" required:"true"` + + // The volume. + Volume *DiskImageVolumeDescription `locationName:"volume" type:"structure" required:"true"` +} + +// String returns the string representation +func (s ImportVolumeTaskDetails) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ImportVolumeTaskDetails) GoString() string { + return s.String() +} + +// Describes an instance. +type Instance struct { + _ struct{} `type:"structure"` + + // The AMI launch index, which can be used to find this instance in the launch + // group. + AmiLaunchIndex *int64 `locationName:"amiLaunchIndex" type:"integer"` + + // The architecture of the image. + Architecture *string `locationName:"architecture" type:"string" enum:"ArchitectureValues"` + + // Any block device mapping entries for the instance. + BlockDeviceMappings []*InstanceBlockDeviceMapping `locationName:"blockDeviceMapping" locationNameList:"item" type:"list"` + + // The idempotency token you provided when you launched the instance, if applicable. + ClientToken *string `locationName:"clientToken" type:"string"` + + // Indicates whether the instance is optimized for EBS I/O. This optimization + // provides dedicated throughput to Amazon EBS and an optimized configuration + // stack to provide optimal I/O performance. This optimization isn't available + // with all instance types. Additional usage charges apply when using an EBS + // Optimized instance. + EbsOptimized *bool `locationName:"ebsOptimized" type:"boolean"` + + // Specifies whether enhanced networking with ENA is enabled. + EnaSupport *bool `locationName:"enaSupport" type:"boolean"` + + // The hypervisor type of the instance. + Hypervisor *string `locationName:"hypervisor" type:"string" enum:"HypervisorType"` + + // The IAM instance profile associated with the instance, if applicable. + IamInstanceProfile *IamInstanceProfile `locationName:"iamInstanceProfile" type:"structure"` + + // The ID of the AMI used to launch the instance. + ImageId *string `locationName:"imageId" type:"string"` + + // The ID of the instance. + InstanceId *string `locationName:"instanceId" type:"string"` + + // Indicates whether this is a Spot instance or a Scheduled Instance. + InstanceLifecycle *string `locationName:"instanceLifecycle" type:"string" enum:"InstanceLifecycleType"` + + // The instance type. + InstanceType *string `locationName:"instanceType" type:"string" enum:"InstanceType"` + + // The kernel associated with this instance, if applicable. + KernelId *string `locationName:"kernelId" type:"string"` + + // The name of the key pair, if this instance was launched with an associated + // key pair. + KeyName *string `locationName:"keyName" type:"string"` + + // The time the instance was launched. + LaunchTime *time.Time `locationName:"launchTime" type:"timestamp" timestampFormat:"iso8601"` + + // The monitoring information for the instance. + Monitoring *Monitoring `locationName:"monitoring" type:"structure"` + + // [EC2-VPC] One or more network interfaces for the instance. + NetworkInterfaces []*InstanceNetworkInterface `locationName:"networkInterfaceSet" locationNameList:"item" type:"list"` + + // The location where the instance launched, if applicable. + Placement *Placement `locationName:"placement" type:"structure"` + + // The value is Windows for Windows instances; otherwise blank. + Platform *string `locationName:"platform" type:"string" enum:"PlatformValues"` + + // The private DNS name assigned to the instance. This DNS name can only be + // used inside the Amazon EC2 network. This name is not available until the + // instance enters the running state. For EC2-VPC, this name is only available + // if you've enabled DNS hostnames for your VPC. + PrivateDnsName *string `locationName:"privateDnsName" type:"string"` + + // The private IP address assigned to the instance. + PrivateIpAddress *string `locationName:"privateIpAddress" type:"string"` + + // The product codes attached to this instance, if applicable. + ProductCodes []*ProductCode `locationName:"productCodes" locationNameList:"item" type:"list"` + + // The public DNS name assigned to the instance. This name is not available + // until the instance enters the running state. For EC2-VPC, this name is only + // available if you've enabled DNS hostnames for your VPC. + PublicDnsName *string `locationName:"dnsName" type:"string"` + + // The public IP address assigned to the instance, if applicable. + PublicIpAddress *string `locationName:"ipAddress" type:"string"` + + // The RAM disk associated with this instance, if applicable. + RamdiskId *string `locationName:"ramdiskId" type:"string"` + + // The root device name (for example, /dev/sda1 or /dev/xvda). + RootDeviceName *string `locationName:"rootDeviceName" type:"string"` + + // The root device type used by the AMI. The AMI can use an EBS volume or an + // instance store volume. + RootDeviceType *string `locationName:"rootDeviceType" type:"string" enum:"DeviceType"` + + // One or more security groups for the instance. + SecurityGroups []*GroupIdentifier `locationName:"groupSet" locationNameList:"item" type:"list"` + + // Specifies whether to enable an instance launched in a VPC to perform NAT. + // This controls whether source/destination checking is enabled on the instance. + // A value of true means checking is enabled, and false means checking is disabled. + // The value must be false for the instance to perform NAT. For more information, + // see NAT Instances (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_NAT_Instance.html) + // in the Amazon Virtual Private Cloud User Guide. + SourceDestCheck *bool `locationName:"sourceDestCheck" type:"boolean"` + + // If the request is a Spot instance request, the ID of the request. + SpotInstanceRequestId *string `locationName:"spotInstanceRequestId" type:"string"` + + // Specifies whether enhanced networking with the Intel 82599 Virtual Function + // interface is enabled. + SriovNetSupport *string `locationName:"sriovNetSupport" type:"string"` + + // The current state of the instance. + State *InstanceState `locationName:"instanceState" type:"structure"` + + // The reason for the most recent state transition. + StateReason *StateReason `locationName:"stateReason" type:"structure"` + + // The reason for the most recent state transition. This might be an empty string. + StateTransitionReason *string `locationName:"reason" type:"string"` + + // [EC2-VPC] The ID of the subnet in which the instance is running. + SubnetId *string `locationName:"subnetId" type:"string"` + + // Any tags assigned to the instance. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` + + // The virtualization type of the instance. + VirtualizationType *string `locationName:"virtualizationType" type:"string" enum:"VirtualizationType"` + + // [EC2-VPC] The ID of the VPC in which the instance is running. + VpcId *string `locationName:"vpcId" type:"string"` +} + +// String returns the string representation +func (s Instance) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Instance) GoString() string { + return s.String() +} + +// Describes a block device mapping. +type InstanceBlockDeviceMapping struct { + _ struct{} `type:"structure"` + + // The device name exposed to the instance (for example, /dev/sdh or xvdh). + DeviceName *string `locationName:"deviceName" type:"string"` + + // Parameters used to automatically set up EBS volumes when the instance is + // launched. + Ebs *EbsInstanceBlockDevice `locationName:"ebs" type:"structure"` +} + +// String returns the string representation +func (s InstanceBlockDeviceMapping) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstanceBlockDeviceMapping) GoString() string { + return s.String() +} + +// Describes a block device mapping entry. +type InstanceBlockDeviceMappingSpecification struct { + _ struct{} `type:"structure"` + + // The device name exposed to the instance (for example, /dev/sdh or xvdh). + DeviceName *string `locationName:"deviceName" type:"string"` + + // Parameters used to automatically set up EBS volumes when the instance is + // launched. + Ebs *EbsInstanceBlockDeviceSpecification `locationName:"ebs" type:"structure"` + + // suppress the specified device included in the block device mapping. + NoDevice *string `locationName:"noDevice" type:"string"` + + // The virtual device name. + VirtualName *string `locationName:"virtualName" type:"string"` +} + +// String returns the string representation +func (s InstanceBlockDeviceMappingSpecification) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstanceBlockDeviceMappingSpecification) GoString() string { + return s.String() +} + +// Information about the instance type that the Dedicated host supports. +type InstanceCapacity struct { + _ struct{} `type:"structure"` + + // The number of instances that can still be launched onto the Dedicated host. + AvailableCapacity *int64 `locationName:"availableCapacity" type:"integer"` + + // The instance type size supported by the Dedicated host. + InstanceType *string `locationName:"instanceType" type:"string"` + + // The total number of instances that can be launched onto the Dedicated host. + TotalCapacity *int64 `locationName:"totalCapacity" type:"integer"` +} + +// String returns the string representation +func (s InstanceCapacity) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstanceCapacity) GoString() string { + return s.String() +} + +// Describes a Reserved Instance listing state. +type InstanceCount struct { + _ struct{} `type:"structure"` + + // The number of listed Reserved Instances in the state specified by the state. + InstanceCount *int64 `locationName:"instanceCount" type:"integer"` + + // The states of the listed Reserved Instances. + State *string `locationName:"state" type:"string" enum:"ListingState"` +} + +// String returns the string representation +func (s InstanceCount) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstanceCount) GoString() string { + return s.String() +} + +// Describes an instance to export. +type InstanceExportDetails struct { + _ struct{} `type:"structure"` + + // The ID of the resource being exported. + InstanceId *string `locationName:"instanceId" type:"string"` + + // The target virtualization environment. + TargetEnvironment *string `locationName:"targetEnvironment" type:"string" enum:"ExportEnvironment"` +} + +// String returns the string representation +func (s InstanceExportDetails) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstanceExportDetails) GoString() string { + return s.String() +} + +// Describes the monitoring information of the instance. +type InstanceMonitoring struct { + _ struct{} `type:"structure"` + + // The ID of the instance. + InstanceId *string `locationName:"instanceId" type:"string"` + + // The monitoring information. + Monitoring *Monitoring `locationName:"monitoring" type:"structure"` +} + +// String returns the string representation +func (s InstanceMonitoring) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstanceMonitoring) GoString() string { + return s.String() +} + +// Describes a network interface. +type InstanceNetworkInterface struct { + _ struct{} `type:"structure"` + + // The association information for an Elastic IP associated with the network + // interface. + Association *InstanceNetworkInterfaceAssociation `locationName:"association" type:"structure"` + + // The network interface attachment. + Attachment *InstanceNetworkInterfaceAttachment `locationName:"attachment" type:"structure"` + + // The description. + Description *string `locationName:"description" type:"string"` + + // One or more security groups. + Groups []*GroupIdentifier `locationName:"groupSet" locationNameList:"item" type:"list"` + + // The MAC address. + MacAddress *string `locationName:"macAddress" type:"string"` + + // The ID of the network interface. + NetworkInterfaceId *string `locationName:"networkInterfaceId" type:"string"` + + // The ID of the AWS account that created the network interface. + OwnerId *string `locationName:"ownerId" type:"string"` + + // The private DNS name. + PrivateDnsName *string `locationName:"privateDnsName" type:"string"` + + // The IP address of the network interface within the subnet. + PrivateIpAddress *string `locationName:"privateIpAddress" type:"string"` + + // The private IP addresses associated with the network interface. + PrivateIpAddresses []*InstancePrivateIpAddress `locationName:"privateIpAddressesSet" locationNameList:"item" type:"list"` + + // Indicates whether to validate network traffic to or from this network interface. + SourceDestCheck *bool `locationName:"sourceDestCheck" type:"boolean"` + + // The status of the network interface. + Status *string `locationName:"status" type:"string" enum:"NetworkInterfaceStatus"` + + // The ID of the subnet. + SubnetId *string `locationName:"subnetId" type:"string"` + + // The ID of the VPC. + VpcId *string `locationName:"vpcId" type:"string"` +} + +// String returns the string representation +func (s InstanceNetworkInterface) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstanceNetworkInterface) GoString() string { + return s.String() +} + +// Describes association information for an Elastic IP address. +type InstanceNetworkInterfaceAssociation struct { + _ struct{} `type:"structure"` + + // The ID of the owner of the Elastic IP address. + IpOwnerId *string `locationName:"ipOwnerId" type:"string"` + + // The public DNS name. + PublicDnsName *string `locationName:"publicDnsName" type:"string"` + + // The public IP address or Elastic IP address bound to the network interface. + PublicIp *string `locationName:"publicIp" type:"string"` +} + +// String returns the string representation +func (s InstanceNetworkInterfaceAssociation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstanceNetworkInterfaceAssociation) GoString() string { + return s.String() +} + +// Describes a network interface attachment. +type InstanceNetworkInterfaceAttachment struct { + _ struct{} `type:"structure"` + + // The time stamp when the attachment initiated. + AttachTime *time.Time `locationName:"attachTime" type:"timestamp" timestampFormat:"iso8601"` + + // The ID of the network interface attachment. + AttachmentId *string `locationName:"attachmentId" type:"string"` + + // Indicates whether the network interface is deleted when the instance is terminated. + DeleteOnTermination *bool `locationName:"deleteOnTermination" type:"boolean"` + + // The index of the device on the instance for the network interface attachment. + DeviceIndex *int64 `locationName:"deviceIndex" type:"integer"` + + // The attachment state. + Status *string `locationName:"status" type:"string" enum:"AttachmentStatus"` +} + +// String returns the string representation +func (s InstanceNetworkInterfaceAttachment) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstanceNetworkInterfaceAttachment) GoString() string { + return s.String() +} + +// Describes a network interface. +type InstanceNetworkInterfaceSpecification struct { + _ struct{} `type:"structure"` + + // Indicates whether to assign a public IP address to an instance you launch + // in a VPC. The public IP address can only be assigned to a network interface + // for eth0, and can only be assigned to a new network interface, not an existing + // one. You cannot specify more than one network interface in the request. If + // launching into a default subnet, the default value is true. + AssociatePublicIpAddress *bool `locationName:"associatePublicIpAddress" type:"boolean"` + + // If set to true, the interface is deleted when the instance is terminated. + // You can specify true only if creating a new network interface when launching + // an instance. + DeleteOnTermination *bool `locationName:"deleteOnTermination" type:"boolean"` + + // The description of the network interface. Applies only if creating a network + // interface when launching an instance. + Description *string `locationName:"description" type:"string"` + + // The index of the device on the instance for the network interface attachment. + // If you are specifying a network interface in a RunInstances request, you + // must provide the device index. + DeviceIndex *int64 `locationName:"deviceIndex" type:"integer"` + + // The IDs of the security groups for the network interface. Applies only if + // creating a network interface when launching an instance. + Groups []*string `locationName:"SecurityGroupId" locationNameList:"SecurityGroupId" type:"list"` + + // The ID of the network interface. + NetworkInterfaceId *string `locationName:"networkInterfaceId" type:"string"` + + // The private IP address of the network interface. Applies only if creating + // a network interface when launching an instance. + PrivateIpAddress *string `locationName:"privateIpAddress" type:"string"` + + // One or more private IP addresses to assign to the network interface. Only + // one private IP address can be designated as primary. + PrivateIpAddresses []*PrivateIpAddressSpecification `locationName:"privateIpAddressesSet" queryName:"PrivateIpAddresses" locationNameList:"item" type:"list"` + + // The number of secondary private IP addresses. You can't specify this option + // and specify more than one private IP address using the private IP addresses + // option. + SecondaryPrivateIpAddressCount *int64 `locationName:"secondaryPrivateIpAddressCount" type:"integer"` + + // The ID of the subnet associated with the network string. Applies only if + // creating a network interface when launching an instance. + SubnetId *string `locationName:"subnetId" type:"string"` +} + +// String returns the string representation +func (s InstanceNetworkInterfaceSpecification) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstanceNetworkInterfaceSpecification) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *InstanceNetworkInterfaceSpecification) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "InstanceNetworkInterfaceSpecification"} + if s.PrivateIpAddresses != nil { + for i, v := range s.PrivateIpAddresses { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "PrivateIpAddresses", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Describes a private IP address. +type InstancePrivateIpAddress struct { + _ struct{} `type:"structure"` + + // The association information for an Elastic IP address for the network interface. + Association *InstanceNetworkInterfaceAssociation `locationName:"association" type:"structure"` + + // Indicates whether this IP address is the primary private IP address of the + // network interface. + Primary *bool `locationName:"primary" type:"boolean"` + + // The private DNS name. + PrivateDnsName *string `locationName:"privateDnsName" type:"string"` + + // The private IP address of the network interface. + PrivateIpAddress *string `locationName:"privateIpAddress" type:"string"` +} + +// String returns the string representation +func (s InstancePrivateIpAddress) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstancePrivateIpAddress) GoString() string { + return s.String() +} + +// Describes the current state of the instance. +type InstanceState struct { + _ struct{} `type:"structure"` + + // The low byte represents the state. The high byte is an opaque internal value + // and should be ignored. + // + // 0 : pending + // + // 16 : running + // + // 32 : shutting-down + // + // 48 : terminated + // + // 64 : stopping + // + // 80 : stopped + Code *int64 `locationName:"code" type:"integer"` + + // The current state of the instance. + Name *string `locationName:"name" type:"string" enum:"InstanceStateName"` +} + +// String returns the string representation +func (s InstanceState) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstanceState) GoString() string { + return s.String() +} + +// Describes an instance state change. +type InstanceStateChange struct { + _ struct{} `type:"structure"` + + // The current state of the instance. + CurrentState *InstanceState `locationName:"currentState" type:"structure"` + + // The ID of the instance. + InstanceId *string `locationName:"instanceId" type:"string"` + + // The previous state of the instance. + PreviousState *InstanceState `locationName:"previousState" type:"structure"` +} + +// String returns the string representation +func (s InstanceStateChange) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstanceStateChange) GoString() string { + return s.String() +} + +// Describes the status of an instance. +type InstanceStatus struct { + _ struct{} `type:"structure"` + + // The Availability Zone of the instance. + AvailabilityZone *string `locationName:"availabilityZone" type:"string"` + + // Any scheduled events associated with the instance. + Events []*InstanceStatusEvent `locationName:"eventsSet" locationNameList:"item" type:"list"` + + // The ID of the instance. + InstanceId *string `locationName:"instanceId" type:"string"` + + // The intended state of the instance. DescribeInstanceStatus requires that + // an instance be in the running state. + InstanceState *InstanceState `locationName:"instanceState" type:"structure"` + + // Reports impaired functionality that stems from issues internal to the instance, + // such as impaired reachability. + InstanceStatus *InstanceStatusSummary `locationName:"instanceStatus" type:"structure"` + + // Reports impaired functionality that stems from issues related to the systems + // that support an instance, such as hardware failures and network connectivity + // problems. + SystemStatus *InstanceStatusSummary `locationName:"systemStatus" type:"structure"` +} + +// String returns the string representation +func (s InstanceStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstanceStatus) GoString() string { + return s.String() +} + +// Describes the instance status. +type InstanceStatusDetails struct { + _ struct{} `type:"structure"` + + // The time when a status check failed. For an instance that was launched and + // impaired, this is the time when the instance was launched. + ImpairedSince *time.Time `locationName:"impairedSince" type:"timestamp" timestampFormat:"iso8601"` + + // The type of instance status. + Name *string `locationName:"name" type:"string" enum:"StatusName"` + + // The status. + Status *string `locationName:"status" type:"string" enum:"StatusType"` +} + +// String returns the string representation +func (s InstanceStatusDetails) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstanceStatusDetails) GoString() string { + return s.String() +} + +// Describes a scheduled event for an instance. +type InstanceStatusEvent struct { + _ struct{} `type:"structure"` + + // The event code. + Code *string `locationName:"code" type:"string" enum:"EventCode"` + + // A description of the event. + // + // After a scheduled event is completed, it can still be described for up to + // a week. If the event has been completed, this description starts with the + // following text: [Completed]. + Description *string `locationName:"description" type:"string"` + + // The latest scheduled end time for the event. + NotAfter *time.Time `locationName:"notAfter" type:"timestamp" timestampFormat:"iso8601"` + + // The earliest scheduled start time for the event. + NotBefore *time.Time `locationName:"notBefore" type:"timestamp" timestampFormat:"iso8601"` +} + +// String returns the string representation +func (s InstanceStatusEvent) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstanceStatusEvent) GoString() string { + return s.String() +} + +// Describes the status of an instance. +type InstanceStatusSummary struct { + _ struct{} `type:"structure"` + + // The system instance health or application instance health. + Details []*InstanceStatusDetails `locationName:"details" locationNameList:"item" type:"list"` + + // The status. + Status *string `locationName:"status" type:"string" enum:"SummaryStatus"` +} + +// String returns the string representation +func (s InstanceStatusSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstanceStatusSummary) GoString() string { + return s.String() +} + +// Describes an Internet gateway. +type InternetGateway struct { + _ struct{} `type:"structure"` + + // Any VPCs attached to the Internet gateway. + Attachments []*InternetGatewayAttachment `locationName:"attachmentSet" locationNameList:"item" type:"list"` + + // The ID of the Internet gateway. + InternetGatewayId *string `locationName:"internetGatewayId" type:"string"` + + // Any tags assigned to the Internet gateway. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s InternetGateway) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InternetGateway) GoString() string { + return s.String() +} + +// Describes the attachment of a VPC to an Internet gateway. +type InternetGatewayAttachment struct { + _ struct{} `type:"structure"` + + // The current state of the attachment. + State *string `locationName:"state" type:"string" enum:"AttachmentStatus"` + + // The ID of the VPC. + VpcId *string `locationName:"vpcId" type:"string"` +} + +// String returns the string representation +func (s InternetGatewayAttachment) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InternetGatewayAttachment) GoString() string { + return s.String() +} + +// Describes a security group rule. +type IpPermission struct { + _ struct{} `type:"structure"` + + // The start of port range for the TCP and UDP protocols, or an ICMP type number. + // A value of -1 indicates all ICMP types. + FromPort *int64 `locationName:"fromPort" type:"integer"` + + // The IP protocol name (for tcp, udp, and icmp) or number (see Protocol Numbers + // (http://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml)). + // + // [EC2-VPC only] When you authorize or revoke security group rules, you can + // use -1 to specify all. + IpProtocol *string `locationName:"ipProtocol" type:"string"` + + // One or more IP ranges. + IpRanges []*IpRange `locationName:"ipRanges" locationNameList:"item" type:"list"` + + // (Valid for AuthorizeSecurityGroupEgress, RevokeSecurityGroupEgress and DescribeSecurityGroups + // only) One or more prefix list IDs for an AWS service. In an AuthorizeSecurityGroupEgress + // request, this is the AWS service that you want to access through a VPC endpoint + // from instances associated with the security group. + PrefixListIds []*PrefixListId `locationName:"prefixListIds" locationNameList:"item" type:"list"` + + // The end of port range for the TCP and UDP protocols, or an ICMP code. A value + // of -1 indicates all ICMP codes for the specified ICMP type. + ToPort *int64 `locationName:"toPort" type:"integer"` + + // One or more security group and AWS account ID pairs. + UserIdGroupPairs []*UserIdGroupPair `locationName:"groups" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s IpPermission) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s IpPermission) GoString() string { + return s.String() +} + +// Describes an IP range. +type IpRange struct { + _ struct{} `type:"structure"` + + // The CIDR range. You can either specify a CIDR range or a source security + // group, not both. + CidrIp *string `locationName:"cidrIp" type:"string"` +} + +// String returns the string representation +func (s IpRange) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s IpRange) GoString() string { + return s.String() +} + +// Describes a key pair. +type KeyPairInfo struct { + _ struct{} `type:"structure"` + + // If you used CreateKeyPair to create the key pair, this is the SHA-1 digest + // of the DER encoded private key. If you used ImportKeyPair to provide AWS + // the public key, this is the MD5 public key fingerprint as specified in section + // 4 of RFC4716. + KeyFingerprint *string `locationName:"keyFingerprint" type:"string"` + + // The name of the key pair. + KeyName *string `locationName:"keyName" type:"string"` +} + +// String returns the string representation +func (s KeyPairInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s KeyPairInfo) GoString() string { + return s.String() +} + +// Describes a launch permission. +type LaunchPermission struct { + _ struct{} `type:"structure"` + + // The name of the group. + Group *string `locationName:"group" type:"string" enum:"PermissionGroup"` + + // The AWS account ID. + UserId *string `locationName:"userId" type:"string"` +} + +// String returns the string representation +func (s LaunchPermission) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LaunchPermission) GoString() string { + return s.String() +} + +// Describes a launch permission modification. +type LaunchPermissionModifications struct { + _ struct{} `type:"structure"` + + // The AWS account ID to add to the list of launch permissions for the AMI. + Add []*LaunchPermission `locationNameList:"item" type:"list"` + + // The AWS account ID to remove from the list of launch permissions for the + // AMI. + Remove []*LaunchPermission `locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s LaunchPermissionModifications) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LaunchPermissionModifications) GoString() string { + return s.String() +} + +// Describes the launch specification for an instance. +type LaunchSpecification struct { + _ struct{} `type:"structure"` + + // Deprecated. + AddressingType *string `locationName:"addressingType" type:"string"` + + // One or more block device mapping entries. + // + // Although you can specify encrypted EBS volumes in this block device mapping + // for your Spot Instances, these volumes are not encrypted. + BlockDeviceMappings []*BlockDeviceMapping `locationName:"blockDeviceMapping" locationNameList:"item" type:"list"` + + // Indicates whether the instance is optimized for EBS I/O. This optimization + // provides dedicated throughput to Amazon EBS and an optimized configuration + // stack to provide optimal EBS I/O performance. This optimization isn't available + // with all instance types. Additional usage charges apply when using an EBS + // Optimized instance. + // + // Default: false + EbsOptimized *bool `locationName:"ebsOptimized" type:"boolean"` + + // The IAM instance profile. + IamInstanceProfile *IamInstanceProfileSpecification `locationName:"iamInstanceProfile" type:"structure"` + + // The ID of the AMI. + ImageId *string `locationName:"imageId" type:"string"` + + // The instance type. + InstanceType *string `locationName:"instanceType" type:"string" enum:"InstanceType"` + + // The ID of the kernel. + KernelId *string `locationName:"kernelId" type:"string"` + + // The name of the key pair. + KeyName *string `locationName:"keyName" type:"string"` + + // Describes the monitoring for the instance. + Monitoring *RunInstancesMonitoringEnabled `locationName:"monitoring" type:"structure"` + + // One or more network interfaces. + NetworkInterfaces []*InstanceNetworkInterfaceSpecification `locationName:"networkInterfaceSet" locationNameList:"item" type:"list"` + + // The placement information for the instance. + Placement *SpotPlacement `locationName:"placement" type:"structure"` + + // The ID of the RAM disk. + RamdiskId *string `locationName:"ramdiskId" type:"string"` + + // One or more security groups. When requesting instances in a VPC, you must + // specify the IDs of the security groups. When requesting instances in EC2-Classic, + // you can specify the names or the IDs of the security groups. + SecurityGroups []*GroupIdentifier `locationName:"groupSet" locationNameList:"item" type:"list"` + + // The ID of the subnet in which to launch the instance. + SubnetId *string `locationName:"subnetId" type:"string"` + + // The user data to make available to the instances. If you are using an AWS + // SDK or command line tool, Base64-encoding is performed for you, and you can + // load the text from a file. Otherwise, you must provide Base64-encoded text. + UserData *string `locationName:"userData" type:"string"` +} + +// String returns the string representation +func (s LaunchSpecification) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LaunchSpecification) GoString() string { + return s.String() +} + +// Contains the parameters for ModifyHosts. +type ModifyHostsInput struct { + _ struct{} `type:"structure"` + + // Specify whether to enable or disable auto-placement. + AutoPlacement *string `locationName:"autoPlacement" type:"string" required:"true" enum:"AutoPlacement"` + + // The host IDs of the Dedicated hosts you want to modify. + HostIds []*string `locationName:"hostId" locationNameList:"item" type:"list" required:"true"` +} + +// String returns the string representation +func (s ModifyHostsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyHostsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ModifyHostsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ModifyHostsInput"} + if s.AutoPlacement == nil { + invalidParams.Add(request.NewErrParamRequired("AutoPlacement")) + } + if s.HostIds == nil { + invalidParams.Add(request.NewErrParamRequired("HostIds")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the output of ModifyHosts. +type ModifyHostsOutput struct { + _ struct{} `type:"structure"` + + // The IDs of the Dedicated hosts that were successfully modified. + Successful []*string `locationName:"successful" locationNameList:"item" type:"list"` + + // The IDs of the Dedicated hosts that could not be modified. Check whether + // the setting you requested can be used. + Unsuccessful []*UnsuccessfulItem `locationName:"unsuccessful" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s ModifyHostsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyHostsOutput) GoString() string { + return s.String() +} + +// Contains the parameters of ModifyIdFormat. +type ModifyIdFormatInput struct { + _ struct{} `type:"structure"` + + // The type of resource. + Resource *string `type:"string" required:"true"` + + // Indicate whether the resource should use longer IDs (17-character IDs). + UseLongIds *bool `type:"boolean" required:"true"` +} + +// String returns the string representation +func (s ModifyIdFormatInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyIdFormatInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ModifyIdFormatInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ModifyIdFormatInput"} + if s.Resource == nil { + invalidParams.Add(request.NewErrParamRequired("Resource")) + } + if s.UseLongIds == nil { + invalidParams.Add(request.NewErrParamRequired("UseLongIds")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type ModifyIdFormatOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ModifyIdFormatOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyIdFormatOutput) GoString() string { + return s.String() +} + +// Contains the parameters of ModifyIdentityIdFormat. +type ModifyIdentityIdFormatInput struct { + _ struct{} `type:"structure"` + + // The ARN of the principal, which can be an IAM user, IAM role, or the root + // user. + PrincipalArn *string `locationName:"principalArn" type:"string" required:"true"` + + // The type of resource. + Resource *string `locationName:"resource" type:"string" required:"true"` + + // Indicates whether the resource should use longer IDs (17-character IDs) + UseLongIds *bool `locationName:"useLongIds" type:"boolean" required:"true"` +} + +// String returns the string representation +func (s ModifyIdentityIdFormatInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyIdentityIdFormatInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ModifyIdentityIdFormatInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ModifyIdentityIdFormatInput"} + if s.PrincipalArn == nil { + invalidParams.Add(request.NewErrParamRequired("PrincipalArn")) + } + if s.Resource == nil { + invalidParams.Add(request.NewErrParamRequired("Resource")) + } + if s.UseLongIds == nil { + invalidParams.Add(request.NewErrParamRequired("UseLongIds")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type ModifyIdentityIdFormatOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ModifyIdentityIdFormatOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyIdentityIdFormatOutput) GoString() string { + return s.String() +} + +// Contains the parameters for ModifyImageAttribute. +type ModifyImageAttributeInput struct { + _ struct{} `type:"structure"` + + // The name of the attribute to modify. + Attribute *string `type:"string"` + + // A description for the AMI. + Description *AttributeValue `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the AMI. + ImageId *string `type:"string" required:"true"` + + // A launch permission modification. + LaunchPermission *LaunchPermissionModifications `type:"structure"` + + // The operation type. + OperationType *string `type:"string" enum:"OperationType"` + + // One or more product codes. After you add a product code to an AMI, it can't + // be removed. This is only valid when modifying the productCodes attribute. + ProductCodes []*string `locationName:"ProductCode" locationNameList:"ProductCode" type:"list"` + + // One or more user groups. This is only valid when modifying the launchPermission + // attribute. + UserGroups []*string `locationName:"UserGroup" locationNameList:"UserGroup" type:"list"` + + // One or more AWS account IDs. This is only valid when modifying the launchPermission + // attribute. + UserIds []*string `locationName:"UserId" locationNameList:"UserId" type:"list"` + + // The value of the attribute being modified. This is only valid when modifying + // the description attribute. + Value *string `type:"string"` +} + +// String returns the string representation +func (s ModifyImageAttributeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyImageAttributeInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ModifyImageAttributeInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ModifyImageAttributeInput"} + if s.ImageId == nil { + invalidParams.Add(request.NewErrParamRequired("ImageId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type ModifyImageAttributeOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ModifyImageAttributeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyImageAttributeOutput) GoString() string { + return s.String() +} + +// Contains the parameters for ModifyInstanceAttribute. +type ModifyInstanceAttributeInput struct { + _ struct{} `type:"structure"` + + // The name of the attribute. + Attribute *string `locationName:"attribute" type:"string" enum:"InstanceAttributeName"` + + // Modifies the DeleteOnTermination attribute for volumes that are currently + // attached. The volume must be owned by the caller. If no value is specified + // for DeleteOnTermination, the default is true and the volume is deleted when + // the instance is terminated. + // + // To add instance store volumes to an Amazon EBS-backed instance, you must + // add them when you launch the instance. For more information, see Updating + // the Block Device Mapping when Launching an Instance (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/block-device-mapping-concepts.html#Using_OverridingAMIBDM) + // in the Amazon Elastic Compute Cloud User Guide. + BlockDeviceMappings []*InstanceBlockDeviceMappingSpecification `locationName:"blockDeviceMapping" locationNameList:"item" type:"list"` + + // If the value is true, you can't terminate the instance using the Amazon EC2 + // console, CLI, or API; otherwise, you can. You cannot use this paramater for + // Spot Instances. + DisableApiTermination *AttributeBooleanValue `locationName:"disableApiTermination" type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // Specifies whether the instance is optimized for EBS I/O. This optimization + // provides dedicated throughput to Amazon EBS and an optimized configuration + // stack to provide optimal EBS I/O performance. This optimization isn't available + // with all instance types. Additional usage charges apply when using an EBS + // Optimized instance. + EbsOptimized *AttributeBooleanValue `locationName:"ebsOptimized" type:"structure"` + + // Set to true to enable enhanced networking with ENA for the instance. + // + // This option is supported only for HVM instances. Specifying this option + // with a PV instance can make it unreachable. + EnaSupport *AttributeBooleanValue `locationName:"enaSupport" type:"structure"` + + // [EC2-VPC] Changes the security groups of the instance. You must specify at + // least one security group, even if it's just the default security group for + // the VPC. You must specify the security group ID, not the security group name. + Groups []*string `locationName:"GroupId" locationNameList:"groupId" type:"list"` + + // The ID of the instance. + InstanceId *string `locationName:"instanceId" type:"string" required:"true"` + + // Specifies whether an instance stops or terminates when you initiate shutdown + // from the instance (using the operating system command for system shutdown). + InstanceInitiatedShutdownBehavior *AttributeValue `locationName:"instanceInitiatedShutdownBehavior" type:"structure"` + + // Changes the instance type to the specified value. For more information, see + // Instance Types (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html). + // If the instance type is not valid, the error returned is InvalidInstanceAttributeValue. + InstanceType *AttributeValue `locationName:"instanceType" type:"structure"` + + // Changes the instance's kernel to the specified value. We recommend that you + // use PV-GRUB instead of kernels and RAM disks. For more information, see PV-GRUB + // (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/UserProvidedKernels.html). + Kernel *AttributeValue `locationName:"kernel" type:"structure"` + + // Changes the instance's RAM disk to the specified value. We recommend that + // you use PV-GRUB instead of kernels and RAM disks. For more information, see + // PV-GRUB (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/UserProvidedKernels.html). + Ramdisk *AttributeValue `locationName:"ramdisk" type:"structure"` + + // Specifies whether source/destination checking is enabled. A value of true + // means that checking is enabled, and false means checking is disabled. This + // value must be false for a NAT instance to perform NAT. + SourceDestCheck *AttributeBooleanValue `type:"structure"` + + // Set to simple to enable enhanced networking with the Intel 82599 Virtual + // Function interface for the instance. + // + // There is no way to disable enhanced networking with the Intel 82599 Virtual + // Function interface at this time. + // + // This option is supported only for HVM instances. Specifying this option + // with a PV instance can make it unreachable. + SriovNetSupport *AttributeValue `locationName:"sriovNetSupport" type:"structure"` + + // Changes the instance's user data to the specified value. If you are using + // an AWS SDK or command line tool, Base64-encoding is performed for you, and + // you can load the text from a file. Otherwise, you must provide Base64-encoded + // text. + UserData *BlobAttributeValue `locationName:"userData" type:"structure"` + + // A new value for the attribute. Use only with the kernel, ramdisk, userData, + // disableApiTermination, or instanceInitiatedShutdownBehavior attribute. + Value *string `locationName:"value" type:"string"` +} + +// String returns the string representation +func (s ModifyInstanceAttributeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyInstanceAttributeInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ModifyInstanceAttributeInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ModifyInstanceAttributeInput"} + if s.InstanceId == nil { + invalidParams.Add(request.NewErrParamRequired("InstanceId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type ModifyInstanceAttributeOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ModifyInstanceAttributeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyInstanceAttributeOutput) GoString() string { + return s.String() +} + +// Contains the parameters for ModifyInstancePlacement. +type ModifyInstancePlacementInput struct { + _ struct{} `type:"structure"` + + // The new affinity setting for the instance. + Affinity *string `locationName:"affinity" type:"string" enum:"Affinity"` + + // The ID of the Dedicated host that the instance will have affinity with. + HostId *string `locationName:"hostId" type:"string"` + + // The ID of the instance that you are modifying. + InstanceId *string `locationName:"instanceId" type:"string" required:"true"` + + // The tenancy of the instance that you are modifying. + Tenancy *string `locationName:"tenancy" type:"string" enum:"HostTenancy"` +} + +// String returns the string representation +func (s ModifyInstancePlacementInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyInstancePlacementInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ModifyInstancePlacementInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ModifyInstancePlacementInput"} + if s.InstanceId == nil { + invalidParams.Add(request.NewErrParamRequired("InstanceId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the output of ModifyInstancePlacement. +type ModifyInstancePlacementOutput struct { + _ struct{} `type:"structure"` + + // Is true if the request succeeds, and an error otherwise. + Return *bool `locationName:"return" type:"boolean"` +} + +// String returns the string representation +func (s ModifyInstancePlacementOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyInstancePlacementOutput) GoString() string { + return s.String() +} + +// Contains the parameters for ModifyNetworkInterfaceAttribute. +type ModifyNetworkInterfaceAttributeInput struct { + _ struct{} `type:"structure"` + + // Information about the interface attachment. If modifying the 'delete on termination' + // attribute, you must specify the ID of the interface attachment. + Attachment *NetworkInterfaceAttachmentChanges `locationName:"attachment" type:"structure"` + + // A description for the network interface. + Description *AttributeValue `locationName:"description" type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // Changes the security groups for the network interface. The new set of groups + // you specify replaces the current set. You must specify at least one group, + // even if it's just the default security group in the VPC. You must specify + // the ID of the security group, not the name. + Groups []*string `locationName:"SecurityGroupId" locationNameList:"SecurityGroupId" type:"list"` + + // The ID of the network interface. + NetworkInterfaceId *string `locationName:"networkInterfaceId" type:"string" required:"true"` + + // Indicates whether source/destination checking is enabled. A value of true + // means checking is enabled, and false means checking is disabled. This value + // must be false for a NAT instance to perform NAT. For more information, see + // NAT Instances (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_NAT_Instance.html) + // in the Amazon Virtual Private Cloud User Guide. + SourceDestCheck *AttributeBooleanValue `locationName:"sourceDestCheck" type:"structure"` +} + +// String returns the string representation +func (s ModifyNetworkInterfaceAttributeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyNetworkInterfaceAttributeInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ModifyNetworkInterfaceAttributeInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ModifyNetworkInterfaceAttributeInput"} + if s.NetworkInterfaceId == nil { + invalidParams.Add(request.NewErrParamRequired("NetworkInterfaceId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type ModifyNetworkInterfaceAttributeOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ModifyNetworkInterfaceAttributeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyNetworkInterfaceAttributeOutput) GoString() string { + return s.String() +} + +// Contains the parameters for ModifyReservedInstances. +type ModifyReservedInstancesInput struct { + _ struct{} `type:"structure"` + + // A unique, case-sensitive token you provide to ensure idempotency of your + // modification request. For more information, see Ensuring Idempotency (http://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html). + ClientToken *string `locationName:"clientToken" type:"string"` + + // The IDs of the Reserved Instances to modify. + ReservedInstancesIds []*string `locationName:"ReservedInstancesId" locationNameList:"ReservedInstancesId" type:"list" required:"true"` + + // The configuration settings for the Reserved Instances to modify. + TargetConfigurations []*ReservedInstancesConfiguration `locationName:"ReservedInstancesConfigurationSetItemType" locationNameList:"item" type:"list" required:"true"` +} + +// String returns the string representation +func (s ModifyReservedInstancesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyReservedInstancesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ModifyReservedInstancesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ModifyReservedInstancesInput"} + if s.ReservedInstancesIds == nil { + invalidParams.Add(request.NewErrParamRequired("ReservedInstancesIds")) + } + if s.TargetConfigurations == nil { + invalidParams.Add(request.NewErrParamRequired("TargetConfigurations")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the output of ModifyReservedInstances. +type ModifyReservedInstancesOutput struct { + _ struct{} `type:"structure"` + + // The ID for the modification. + ReservedInstancesModificationId *string `locationName:"reservedInstancesModificationId" type:"string"` +} + +// String returns the string representation +func (s ModifyReservedInstancesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyReservedInstancesOutput) GoString() string { + return s.String() +} + +// Contains the parameters for ModifySnapshotAttribute. +type ModifySnapshotAttributeInput struct { + _ struct{} `type:"structure"` + + // The snapshot attribute to modify. + // + // Only volume creation permissions may be modified at the customer level. + Attribute *string `type:"string" enum:"SnapshotAttributeName"` + + // A JSON representation of the snapshot attribute modification. + CreateVolumePermission *CreateVolumePermissionModifications `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The group to modify for the snapshot. + GroupNames []*string `locationName:"UserGroup" locationNameList:"GroupName" type:"list"` + + // The type of operation to perform to the attribute. + OperationType *string `type:"string" enum:"OperationType"` + + // The ID of the snapshot. + SnapshotId *string `type:"string" required:"true"` + + // The account ID to modify for the snapshot. + UserIds []*string `locationName:"UserId" locationNameList:"UserId" type:"list"` +} + +// String returns the string representation +func (s ModifySnapshotAttributeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifySnapshotAttributeInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ModifySnapshotAttributeInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ModifySnapshotAttributeInput"} + if s.SnapshotId == nil { + invalidParams.Add(request.NewErrParamRequired("SnapshotId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type ModifySnapshotAttributeOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ModifySnapshotAttributeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifySnapshotAttributeOutput) GoString() string { + return s.String() +} + +// Contains the parameters for ModifySpotFleetRequest. +type ModifySpotFleetRequestInput struct { + _ struct{} `type:"structure"` + + // Indicates whether running Spot instances should be terminated if the target + // capacity of the Spot fleet request is decreased below the current size of + // the Spot fleet. + ExcessCapacityTerminationPolicy *string `locationName:"excessCapacityTerminationPolicy" type:"string" enum:"ExcessCapacityTerminationPolicy"` + + // The ID of the Spot fleet request. + SpotFleetRequestId *string `locationName:"spotFleetRequestId" type:"string" required:"true"` + + // The size of the fleet. + TargetCapacity *int64 `locationName:"targetCapacity" type:"integer"` +} + +// String returns the string representation +func (s ModifySpotFleetRequestInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifySpotFleetRequestInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ModifySpotFleetRequestInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ModifySpotFleetRequestInput"} + if s.SpotFleetRequestId == nil { + invalidParams.Add(request.NewErrParamRequired("SpotFleetRequestId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the output of ModifySpotFleetRequest. +type ModifySpotFleetRequestOutput struct { + _ struct{} `type:"structure"` + + // Is true if the request succeeds, and an error otherwise. + Return *bool `locationName:"return" type:"boolean"` +} + +// String returns the string representation +func (s ModifySpotFleetRequestOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifySpotFleetRequestOutput) GoString() string { + return s.String() +} + +// Contains the parameters for ModifySubnetAttribute. +type ModifySubnetAttributeInput struct { + _ struct{} `type:"structure"` + + // Specify true to indicate that instances launched into the specified subnet + // should be assigned public IP address. + MapPublicIpOnLaunch *AttributeBooleanValue `type:"structure"` + + // The ID of the subnet. + SubnetId *string `locationName:"subnetId" type:"string" required:"true"` +} + +// String returns the string representation +func (s ModifySubnetAttributeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifySubnetAttributeInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ModifySubnetAttributeInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ModifySubnetAttributeInput"} + if s.SubnetId == nil { + invalidParams.Add(request.NewErrParamRequired("SubnetId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type ModifySubnetAttributeOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ModifySubnetAttributeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifySubnetAttributeOutput) GoString() string { + return s.String() +} + +// Contains the parameters for ModifyVolumeAttribute. +type ModifyVolumeAttributeInput struct { + _ struct{} `type:"structure"` + + // Indicates whether the volume should be auto-enabled for I/O operations. + AutoEnableIO *AttributeBooleanValue `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the volume. + VolumeId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ModifyVolumeAttributeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyVolumeAttributeInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ModifyVolumeAttributeInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ModifyVolumeAttributeInput"} + if s.VolumeId == nil { + invalidParams.Add(request.NewErrParamRequired("VolumeId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type ModifyVolumeAttributeOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ModifyVolumeAttributeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyVolumeAttributeOutput) GoString() string { + return s.String() +} + +// Contains the parameters for ModifyVpcAttribute. +type ModifyVpcAttributeInput struct { + _ struct{} `type:"structure"` + + // Indicates whether the instances launched in the VPC get DNS hostnames. If + // enabled, instances in the VPC get DNS hostnames; otherwise, they do not. + // + // You cannot modify the DNS resolution and DNS hostnames attributes in the + // same request. Use separate requests for each attribute. You can only enable + // DNS hostnames if you've enabled DNS support. + EnableDnsHostnames *AttributeBooleanValue `type:"structure"` + + // Indicates whether the DNS resolution is supported for the VPC. If enabled, + // queries to the Amazon provided DNS server at the 169.254.169.253 IP address, + // or the reserved IP address at the base of the VPC network range "plus two" + // will succeed. If disabled, the Amazon provided DNS service in the VPC that + // resolves public DNS hostnames to IP addresses is not enabled. + // + // You cannot modify the DNS resolution and DNS hostnames attributes in the + // same request. Use separate requests for each attribute. + EnableDnsSupport *AttributeBooleanValue `type:"structure"` + + // The ID of the VPC. + VpcId *string `locationName:"vpcId" type:"string" required:"true"` +} + +// String returns the string representation +func (s ModifyVpcAttributeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyVpcAttributeInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ModifyVpcAttributeInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ModifyVpcAttributeInput"} + if s.VpcId == nil { + invalidParams.Add(request.NewErrParamRequired("VpcId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type ModifyVpcAttributeOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ModifyVpcAttributeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyVpcAttributeOutput) GoString() string { + return s.String() +} + +// Contains the parameters for ModifyVpcEndpoint. +type ModifyVpcEndpointInput struct { + _ struct{} `type:"structure"` + + // One or more route tables IDs to associate with the endpoint. + AddRouteTableIds []*string `locationName:"AddRouteTableId" locationNameList:"item" type:"list"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // A policy document to attach to the endpoint. The policy must be in valid + // JSON format. + PolicyDocument *string `type:"string"` + + // One or more route table IDs to disassociate from the endpoint. + RemoveRouteTableIds []*string `locationName:"RemoveRouteTableId" locationNameList:"item" type:"list"` + + // Specify true to reset the policy document to the default policy. The default + // policy allows access to the service. + ResetPolicy *bool `type:"boolean"` + + // The ID of the endpoint. + VpcEndpointId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ModifyVpcEndpointInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyVpcEndpointInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ModifyVpcEndpointInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ModifyVpcEndpointInput"} + if s.VpcEndpointId == nil { + invalidParams.Add(request.NewErrParamRequired("VpcEndpointId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the output of ModifyVpcEndpoint. +type ModifyVpcEndpointOutput struct { + _ struct{} `type:"structure"` + + // Returns true if the request succeeds; otherwise, it returns an error. + Return *bool `locationName:"return" type:"boolean"` +} + +// String returns the string representation +func (s ModifyVpcEndpointOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyVpcEndpointOutput) GoString() string { + return s.String() +} + +type ModifyVpcPeeringConnectionOptionsInput struct { + _ struct{} `type:"structure"` + + // The VPC peering connection options for the accepter VPC. + AccepterPeeringConnectionOptions *PeeringConnectionOptionsRequest `type:"structure"` + + // Checks whether you have the required permissions for the operation, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The VPC peering connection options for the requester VPC. + RequesterPeeringConnectionOptions *PeeringConnectionOptionsRequest `type:"structure"` + + // The ID of the VPC peering connection. + VpcPeeringConnectionId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ModifyVpcPeeringConnectionOptionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyVpcPeeringConnectionOptionsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ModifyVpcPeeringConnectionOptionsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ModifyVpcPeeringConnectionOptionsInput"} + if s.VpcPeeringConnectionId == nil { + invalidParams.Add(request.NewErrParamRequired("VpcPeeringConnectionId")) + } + if s.AccepterPeeringConnectionOptions != nil { + if err := s.AccepterPeeringConnectionOptions.Validate(); err != nil { + invalidParams.AddNested("AccepterPeeringConnectionOptions", err.(request.ErrInvalidParams)) + } + } + if s.RequesterPeeringConnectionOptions != nil { + if err := s.RequesterPeeringConnectionOptions.Validate(); err != nil { + invalidParams.AddNested("RequesterPeeringConnectionOptions", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type ModifyVpcPeeringConnectionOptionsOutput struct { + _ struct{} `type:"structure"` + + // Information about the VPC peering connection options for the accepter VPC. + AccepterPeeringConnectionOptions *PeeringConnectionOptions `locationName:"accepterPeeringConnectionOptions" type:"structure"` + + // Information about the VPC peering connection options for the requester VPC. + RequesterPeeringConnectionOptions *PeeringConnectionOptions `locationName:"requesterPeeringConnectionOptions" type:"structure"` +} + +// String returns the string representation +func (s ModifyVpcPeeringConnectionOptionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyVpcPeeringConnectionOptionsOutput) GoString() string { + return s.String() +} + +// Contains the parameters for MonitorInstances. +type MonitorInstancesInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // One or more instance IDs. + InstanceIds []*string `locationName:"InstanceId" locationNameList:"InstanceId" type:"list" required:"true"` +} + +// String returns the string representation +func (s MonitorInstancesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MonitorInstancesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *MonitorInstancesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "MonitorInstancesInput"} + if s.InstanceIds == nil { + invalidParams.Add(request.NewErrParamRequired("InstanceIds")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the output of MonitorInstances. +type MonitorInstancesOutput struct { + _ struct{} `type:"structure"` + + // Monitoring information for one or more instances. + InstanceMonitorings []*InstanceMonitoring `locationName:"instancesSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s MonitorInstancesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MonitorInstancesOutput) GoString() string { + return s.String() +} + +// Describes the monitoring for the instance. +type Monitoring struct { + _ struct{} `type:"structure"` + + // Indicates whether monitoring is enabled for the instance. + State *string `locationName:"state" type:"string" enum:"MonitoringState"` +} + +// String returns the string representation +func (s Monitoring) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Monitoring) GoString() string { + return s.String() +} + +// Contains the parameters for MoveAddressToVpc. +type MoveAddressToVpcInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The Elastic IP address. + PublicIp *string `locationName:"publicIp" type:"string" required:"true"` +} + +// String returns the string representation +func (s MoveAddressToVpcInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MoveAddressToVpcInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *MoveAddressToVpcInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "MoveAddressToVpcInput"} + if s.PublicIp == nil { + invalidParams.Add(request.NewErrParamRequired("PublicIp")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the output of MoveAddressToVpc. +type MoveAddressToVpcOutput struct { + _ struct{} `type:"structure"` + + // The allocation ID for the Elastic IP address. + AllocationId *string `locationName:"allocationId" type:"string"` + + // The status of the move of the IP address. + Status *string `locationName:"status" type:"string" enum:"Status"` +} + +// String returns the string representation +func (s MoveAddressToVpcOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MoveAddressToVpcOutput) GoString() string { + return s.String() +} + +// Describes the status of a moving Elastic IP address. +type MovingAddressStatus struct { + _ struct{} `type:"structure"` + + // The status of the Elastic IP address that's being moved to the EC2-VPC platform, + // or restored to the EC2-Classic platform. + MoveStatus *string `locationName:"moveStatus" type:"string" enum:"MoveStatus"` + + // The Elastic IP address. + PublicIp *string `locationName:"publicIp" type:"string"` +} + +// String returns the string representation +func (s MovingAddressStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MovingAddressStatus) GoString() string { + return s.String() +} + +// Describes a NAT gateway. +type NatGateway struct { + _ struct{} `type:"structure"` + + // The date and time the NAT gateway was created. + CreateTime *time.Time `locationName:"createTime" type:"timestamp" timestampFormat:"iso8601"` + + // The date and time the NAT gateway was deleted, if applicable. + DeleteTime *time.Time `locationName:"deleteTime" type:"timestamp" timestampFormat:"iso8601"` + + // If the NAT gateway could not be created, specifies the error code for the + // failure. (InsufficientFreeAddressesInSubnet | Gateway.NotAttached | InvalidAllocationID.NotFound + // | Resource.AlreadyAssociated | InternalError | InvalidSubnetID.NotFound) + FailureCode *string `locationName:"failureCode" type:"string"` + + // If the NAT gateway could not be created, specifies the error message for + // the failure, that corresponds to the error code. + // + // For InsufficientFreeAddressesInSubnet: "Subnet has insufficient free addresses + // to create this NAT gateway" + // + // For Gateway.NotAttached: "Network vpc-xxxxxxxx has no Internet gateway + // attached" + // + // For InvalidAllocationID.NotFound: "Elastic IP address eipalloc-xxxxxxxx + // could not be associated with this NAT gateway" + // + // For Resource.AlreadyAssociated: "Elastic IP address eipalloc-xxxxxxxx + // is already associated" + // + // For InternalError: "Network interface eni-xxxxxxxx, created and used internally + // by this NAT gateway is in an invalid state. Please try again." + // + // For InvalidSubnetID.NotFound: "The specified subnet subnet-xxxxxxxx does + // not exist or could not be found." + FailureMessage *string `locationName:"failureMessage" type:"string"` + + // Information about the IP addresses and network interface associated with + // the NAT gateway. + NatGatewayAddresses []*NatGatewayAddress `locationName:"natGatewayAddressSet" locationNameList:"item" type:"list"` + + // The ID of the NAT gateway. + NatGatewayId *string `locationName:"natGatewayId" type:"string"` + + // Reserved. If you need to sustain traffic greater than the documented limits + // (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/vpc-nat-gateway.html), + // contact us through the Support Center (https://console.aws.amazon.com/support/home?). + ProvisionedBandwidth *ProvisionedBandwidth `locationName:"provisionedBandwidth" type:"structure"` + + // The state of the NAT gateway. + // + // pending: The NAT gateway is being created and is not ready to process + // traffic. + // + // failed: The NAT gateway could not be created. Check the failureCode and + // failureMessage fields for the reason. + // + // available: The NAT gateway is able to process traffic. This status remains + // until you delete the NAT gateway, and does not indicate the health of the + // NAT gateway. + // + // deleting: The NAT gateway is in the process of being terminated and may + // still be processing traffic. + // + // deleted: The NAT gateway has been terminated and is no longer processing + // traffic. + State *string `locationName:"state" type:"string" enum:"NatGatewayState"` + + // The ID of the subnet in which the NAT gateway is located. + SubnetId *string `locationName:"subnetId" type:"string"` + + // The ID of the VPC in which the NAT gateway is located. + VpcId *string `locationName:"vpcId" type:"string"` +} + +// String returns the string representation +func (s NatGateway) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NatGateway) GoString() string { + return s.String() +} + +// Describes the IP addresses and network interface associated with a NAT gateway. +type NatGatewayAddress struct { + _ struct{} `type:"structure"` + + // The allocation ID of the Elastic IP address that's associated with the NAT + // gateway. + AllocationId *string `locationName:"allocationId" type:"string"` + + // The ID of the network interface associated with the NAT gateway. + NetworkInterfaceId *string `locationName:"networkInterfaceId" type:"string"` + + // The private IP address associated with the Elastic IP address. + PrivateIp *string `locationName:"privateIp" type:"string"` + + // The Elastic IP address associated with the NAT gateway. + PublicIp *string `locationName:"publicIp" type:"string"` +} + +// String returns the string representation +func (s NatGatewayAddress) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NatGatewayAddress) GoString() string { + return s.String() +} + +// Describes a network ACL. +type NetworkAcl struct { + _ struct{} `type:"structure"` + + // Any associations between the network ACL and one or more subnets + Associations []*NetworkAclAssociation `locationName:"associationSet" locationNameList:"item" type:"list"` + + // One or more entries (rules) in the network ACL. + Entries []*NetworkAclEntry `locationName:"entrySet" locationNameList:"item" type:"list"` + + // Indicates whether this is the default network ACL for the VPC. + IsDefault *bool `locationName:"default" type:"boolean"` + + // The ID of the network ACL. + NetworkAclId *string `locationName:"networkAclId" type:"string"` + + // Any tags assigned to the network ACL. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` + + // The ID of the VPC for the network ACL. + VpcId *string `locationName:"vpcId" type:"string"` +} + +// String returns the string representation +func (s NetworkAcl) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NetworkAcl) GoString() string { + return s.String() +} + +// Describes an association between a network ACL and a subnet. +type NetworkAclAssociation struct { + _ struct{} `type:"structure"` + + // The ID of the association between a network ACL and a subnet. + NetworkAclAssociationId *string `locationName:"networkAclAssociationId" type:"string"` + + // The ID of the network ACL. + NetworkAclId *string `locationName:"networkAclId" type:"string"` + + // The ID of the subnet. + SubnetId *string `locationName:"subnetId" type:"string"` +} + +// String returns the string representation +func (s NetworkAclAssociation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NetworkAclAssociation) GoString() string { + return s.String() +} + +// Describes an entry in a network ACL. +type NetworkAclEntry struct { + _ struct{} `type:"structure"` + + // The network range to allow or deny, in CIDR notation. + CidrBlock *string `locationName:"cidrBlock" type:"string"` + + // Indicates whether the rule is an egress rule (applied to traffic leaving + // the subnet). + Egress *bool `locationName:"egress" type:"boolean"` + + // ICMP protocol: The ICMP type and code. + IcmpTypeCode *IcmpTypeCode `locationName:"icmpTypeCode" type:"structure"` + + // TCP or UDP protocols: The range of ports the rule applies to. + PortRange *PortRange `locationName:"portRange" type:"structure"` + + // The protocol. A value of -1 means all protocols. + Protocol *string `locationName:"protocol" type:"string"` + + // Indicates whether to allow or deny the traffic that matches the rule. + RuleAction *string `locationName:"ruleAction" type:"string" enum:"RuleAction"` + + // The rule number for the entry. ACL entries are processed in ascending order + // by rule number. + RuleNumber *int64 `locationName:"ruleNumber" type:"integer"` +} + +// String returns the string representation +func (s NetworkAclEntry) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NetworkAclEntry) GoString() string { + return s.String() +} + +// Describes a network interface. +type NetworkInterface struct { + _ struct{} `type:"structure"` + + // The association information for an Elastic IP associated with the network + // interface. + Association *NetworkInterfaceAssociation `locationName:"association" type:"structure"` + + // The network interface attachment. + Attachment *NetworkInterfaceAttachment `locationName:"attachment" type:"structure"` + + // The Availability Zone. + AvailabilityZone *string `locationName:"availabilityZone" type:"string"` + + // A description. + Description *string `locationName:"description" type:"string"` + + // Any security groups for the network interface. + Groups []*GroupIdentifier `locationName:"groupSet" locationNameList:"item" type:"list"` + + // The type of interface. + InterfaceType *string `locationName:"interfaceType" type:"string" enum:"NetworkInterfaceType"` + + // The MAC address. + MacAddress *string `locationName:"macAddress" type:"string"` + + // The ID of the network interface. + NetworkInterfaceId *string `locationName:"networkInterfaceId" type:"string"` + + // The AWS account ID of the owner of the network interface. + OwnerId *string `locationName:"ownerId" type:"string"` + + // The private DNS name. + PrivateDnsName *string `locationName:"privateDnsName" type:"string"` + + // The IP address of the network interface within the subnet. + PrivateIpAddress *string `locationName:"privateIpAddress" type:"string"` + + // The private IP addresses associated with the network interface. + PrivateIpAddresses []*NetworkInterfacePrivateIpAddress `locationName:"privateIpAddressesSet" locationNameList:"item" type:"list"` + + // The ID of the entity that launched the instance on your behalf (for example, + // AWS Management Console or Auto Scaling). + RequesterId *string `locationName:"requesterId" type:"string"` + + // Indicates whether the network interface is being managed by AWS. + RequesterManaged *bool `locationName:"requesterManaged" type:"boolean"` + + // Indicates whether traffic to or from the instance is validated. + SourceDestCheck *bool `locationName:"sourceDestCheck" type:"boolean"` + + // The status of the network interface. + Status *string `locationName:"status" type:"string" enum:"NetworkInterfaceStatus"` + + // The ID of the subnet. + SubnetId *string `locationName:"subnetId" type:"string"` + + // Any tags assigned to the network interface. + TagSet []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` + + // The ID of the VPC. + VpcId *string `locationName:"vpcId" type:"string"` +} + +// String returns the string representation +func (s NetworkInterface) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NetworkInterface) GoString() string { + return s.String() +} + +// Describes association information for an Elastic IP address. +type NetworkInterfaceAssociation struct { + _ struct{} `type:"structure"` + + // The allocation ID. + AllocationId *string `locationName:"allocationId" type:"string"` + + // The association ID. + AssociationId *string `locationName:"associationId" type:"string"` + + // The ID of the Elastic IP address owner. + IpOwnerId *string `locationName:"ipOwnerId" type:"string"` + + // The public DNS name. + PublicDnsName *string `locationName:"publicDnsName" type:"string"` + + // The address of the Elastic IP address bound to the network interface. + PublicIp *string `locationName:"publicIp" type:"string"` +} + +// String returns the string representation +func (s NetworkInterfaceAssociation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NetworkInterfaceAssociation) GoString() string { + return s.String() +} + +// Describes a network interface attachment. +type NetworkInterfaceAttachment struct { + _ struct{} `type:"structure"` + + // The timestamp indicating when the attachment initiated. + AttachTime *time.Time `locationName:"attachTime" type:"timestamp" timestampFormat:"iso8601"` + + // The ID of the network interface attachment. + AttachmentId *string `locationName:"attachmentId" type:"string"` + + // Indicates whether the network interface is deleted when the instance is terminated. + DeleteOnTermination *bool `locationName:"deleteOnTermination" type:"boolean"` + + // The device index of the network interface attachment on the instance. + DeviceIndex *int64 `locationName:"deviceIndex" type:"integer"` + + // The ID of the instance. + InstanceId *string `locationName:"instanceId" type:"string"` + + // The AWS account ID of the owner of the instance. + InstanceOwnerId *string `locationName:"instanceOwnerId" type:"string"` + + // The attachment state. + Status *string `locationName:"status" type:"string" enum:"AttachmentStatus"` +} + +// String returns the string representation +func (s NetworkInterfaceAttachment) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NetworkInterfaceAttachment) GoString() string { + return s.String() +} + +// Describes an attachment change. +type NetworkInterfaceAttachmentChanges struct { + _ struct{} `type:"structure"` + + // The ID of the network interface attachment. + AttachmentId *string `locationName:"attachmentId" type:"string"` + + // Indicates whether the network interface is deleted when the instance is terminated. + DeleteOnTermination *bool `locationName:"deleteOnTermination" type:"boolean"` +} + +// String returns the string representation +func (s NetworkInterfaceAttachmentChanges) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NetworkInterfaceAttachmentChanges) GoString() string { + return s.String() +} + +// Describes the private IP address of a network interface. +type NetworkInterfacePrivateIpAddress struct { + _ struct{} `type:"structure"` + + // The association information for an Elastic IP address associated with the + // network interface. + Association *NetworkInterfaceAssociation `locationName:"association" type:"structure"` + + // Indicates whether this IP address is the primary private IP address of the + // network interface. + Primary *bool `locationName:"primary" type:"boolean"` + + // The private DNS name. + PrivateDnsName *string `locationName:"privateDnsName" type:"string"` + + // The private IP address. + PrivateIpAddress *string `locationName:"privateIpAddress" type:"string"` +} + +// String returns the string representation +func (s NetworkInterfacePrivateIpAddress) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NetworkInterfacePrivateIpAddress) GoString() string { + return s.String() +} + +type NewDhcpConfiguration struct { + _ struct{} `type:"structure"` + + Key *string `locationName:"key" type:"string"` + + Values []*string `locationName:"Value" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s NewDhcpConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NewDhcpConfiguration) GoString() string { + return s.String() +} + +// Describes the VPC peering connection options. +type PeeringConnectionOptions struct { + _ struct{} `type:"structure"` + + // If true, enables outbound communication from an EC2-Classic instance that's + // linked to a local VPC via ClassicLink to instances in a peer VPC. + AllowEgressFromLocalClassicLinkToRemoteVpc *bool `locationName:"allowEgressFromLocalClassicLinkToRemoteVpc" type:"boolean"` + + // If true, enables outbound communication from instances in a local VPC to + // an EC2-Classic instance that's linked to a peer VPC via ClassicLink. + AllowEgressFromLocalVpcToRemoteClassicLink *bool `locationName:"allowEgressFromLocalVpcToRemoteClassicLink" type:"boolean"` +} + +// String returns the string representation +func (s PeeringConnectionOptions) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PeeringConnectionOptions) GoString() string { + return s.String() +} + +// The VPC peering connection options. +type PeeringConnectionOptionsRequest struct { + _ struct{} `type:"structure"` + + // If true, enables outbound communication from an EC2-Classic instance that's + // linked to a local VPC via ClassicLink to instances in a peer VPC. + AllowEgressFromLocalClassicLinkToRemoteVpc *bool `type:"boolean" required:"true"` + + // If true, enables outbound communication from instances in a local VPC to + // an EC2-Classic instance that's linked to a peer VPC via ClassicLink. + AllowEgressFromLocalVpcToRemoteClassicLink *bool `type:"boolean" required:"true"` +} + +// String returns the string representation +func (s PeeringConnectionOptionsRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PeeringConnectionOptionsRequest) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PeeringConnectionOptionsRequest) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PeeringConnectionOptionsRequest"} + if s.AllowEgressFromLocalClassicLinkToRemoteVpc == nil { + invalidParams.Add(request.NewErrParamRequired("AllowEgressFromLocalClassicLinkToRemoteVpc")) + } + if s.AllowEgressFromLocalVpcToRemoteClassicLink == nil { + invalidParams.Add(request.NewErrParamRequired("AllowEgressFromLocalVpcToRemoteClassicLink")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Describes the placement for the instance. +type Placement struct { + _ struct{} `type:"structure"` + + // The affinity setting for the instance on the Dedicated host. This parameter + // is not supported for the ImportInstance command. + Affinity *string `locationName:"affinity" type:"string"` + + // The Availability Zone of the instance. + AvailabilityZone *string `locationName:"availabilityZone" type:"string"` + + // The name of the placement group the instance is in (for cluster compute instances). + GroupName *string `locationName:"groupName" type:"string"` + + // The ID of the Dedicted host on which the instance resides. This parameter + // is not support for the ImportInstance command. + HostId *string `locationName:"hostId" type:"string"` + + // The tenancy of the instance (if the instance is running in a VPC). An instance + // with a tenancy of dedicated runs on single-tenant hardware. The host tenancy + // is not supported for the ImportInstance command. + Tenancy *string `locationName:"tenancy" type:"string" enum:"Tenancy"` +} + +// String returns the string representation +func (s Placement) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Placement) GoString() string { + return s.String() +} + +// Describes a placement group. +type PlacementGroup struct { + _ struct{} `type:"structure"` + + // The name of the placement group. + GroupName *string `locationName:"groupName" type:"string"` + + // The state of the placement group. + State *string `locationName:"state" type:"string" enum:"PlacementGroupState"` + + // The placement strategy. + Strategy *string `locationName:"strategy" type:"string" enum:"PlacementStrategy"` +} + +// String returns the string representation +func (s PlacementGroup) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PlacementGroup) GoString() string { + return s.String() +} + +// Describes a range of ports. +type PortRange struct { + _ struct{} `type:"structure"` + + // The first port in the range. + From *int64 `locationName:"from" type:"integer"` + + // The last port in the range. + To *int64 `locationName:"to" type:"integer"` +} + +// String returns the string representation +func (s PortRange) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PortRange) GoString() string { + return s.String() +} + +// Describes prefixes for AWS services. +type PrefixList struct { + _ struct{} `type:"structure"` + + // The IP address range of the AWS service. + Cidrs []*string `locationName:"cidrSet" locationNameList:"item" type:"list"` + + // The ID of the prefix. + PrefixListId *string `locationName:"prefixListId" type:"string"` + + // The name of the prefix. + PrefixListName *string `locationName:"prefixListName" type:"string"` +} + +// String returns the string representation +func (s PrefixList) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PrefixList) GoString() string { + return s.String() +} + +// The ID of the prefix. +type PrefixListId struct { + _ struct{} `type:"structure"` + + // The ID of the prefix. + PrefixListId *string `locationName:"prefixListId" type:"string"` +} + +// String returns the string representation +func (s PrefixListId) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PrefixListId) GoString() string { + return s.String() +} + +// Describes the price for a Reserved Instance. +type PriceSchedule struct { + _ struct{} `type:"structure"` + + // The current price schedule, as determined by the term remaining for the Reserved + // Instance in the listing. + // + // A specific price schedule is always in effect, but only one price schedule + // can be active at any time. Take, for example, a Reserved Instance listing + // that has five months remaining in its term. When you specify price schedules + // for five months and two months, this means that schedule 1, covering the + // first three months of the remaining term, will be active during months 5, + // 4, and 3. Then schedule 2, covering the last two months of the term, will + // be active for months 2 and 1. + Active *bool `locationName:"active" type:"boolean"` + + // The currency for transacting the Reserved Instance resale. At this time, + // the only supported currency is USD. + CurrencyCode *string `locationName:"currencyCode" type:"string" enum:"CurrencyCodeValues"` + + // The fixed price for the term. + Price *float64 `locationName:"price" type:"double"` + + // The number of months remaining in the reservation. For example, 2 is the + // second to the last month before the capacity reservation expires. + Term *int64 `locationName:"term" type:"long"` +} + +// String returns the string representation +func (s PriceSchedule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PriceSchedule) GoString() string { + return s.String() +} + +// Describes the price for a Reserved Instance. +type PriceScheduleSpecification struct { + _ struct{} `type:"structure"` + + // The currency for transacting the Reserved Instance resale. At this time, + // the only supported currency is USD. + CurrencyCode *string `locationName:"currencyCode" type:"string" enum:"CurrencyCodeValues"` + + // The fixed price for the term. + Price *float64 `locationName:"price" type:"double"` + + // The number of months remaining in the reservation. For example, 2 is the + // second to the last month before the capacity reservation expires. + Term *int64 `locationName:"term" type:"long"` +} + +// String returns the string representation +func (s PriceScheduleSpecification) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PriceScheduleSpecification) GoString() string { + return s.String() +} + +// Describes a Reserved Instance offering. +type PricingDetail struct { + _ struct{} `type:"structure"` + + // The number of reservations available for the price. + Count *int64 `locationName:"count" type:"integer"` + + // The price per instance. + Price *float64 `locationName:"price" type:"double"` +} + +// String returns the string representation +func (s PricingDetail) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PricingDetail) GoString() string { + return s.String() +} + +// Describes a secondary private IP address for a network interface. +type PrivateIpAddressSpecification struct { + _ struct{} `type:"structure"` + + // Indicates whether the private IP address is the primary private IP address. + // Only one IP address can be designated as primary. + Primary *bool `locationName:"primary" type:"boolean"` + + // The private IP addresses. + PrivateIpAddress *string `locationName:"privateIpAddress" type:"string" required:"true"` +} + +// String returns the string representation +func (s PrivateIpAddressSpecification) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PrivateIpAddressSpecification) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PrivateIpAddressSpecification) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PrivateIpAddressSpecification"} + if s.PrivateIpAddress == nil { + invalidParams.Add(request.NewErrParamRequired("PrivateIpAddress")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Describes a product code. +type ProductCode struct { + _ struct{} `type:"structure"` + + // The product code. + ProductCodeId *string `locationName:"productCode" type:"string"` + + // The type of product code. + ProductCodeType *string `locationName:"type" type:"string" enum:"ProductCodeValues"` +} + +// String returns the string representation +func (s ProductCode) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ProductCode) GoString() string { + return s.String() +} + +// Describes a virtual private gateway propagating route. +type PropagatingVgw struct { + _ struct{} `type:"structure"` + + // The ID of the virtual private gateway (VGW). + GatewayId *string `locationName:"gatewayId" type:"string"` +} + +// String returns the string representation +func (s PropagatingVgw) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PropagatingVgw) GoString() string { + return s.String() +} + +// Reserved. If you need to sustain traffic greater than the documented limits +// (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/vpc-nat-gateway.html), +// contact us through the Support Center (https://console.aws.amazon.com/support/home?). +type ProvisionedBandwidth struct { + _ struct{} `type:"structure"` + + // Reserved. If you need to sustain traffic greater than the documented limits + // (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/vpc-nat-gateway.html), + // contact us through the Support Center (https://console.aws.amazon.com/support/home?). + ProvisionTime *time.Time `locationName:"provisionTime" type:"timestamp" timestampFormat:"iso8601"` + + // Reserved. If you need to sustain traffic greater than the documented limits + // (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/vpc-nat-gateway.html), + // contact us through the Support Center (https://console.aws.amazon.com/support/home?). + Provisioned *string `locationName:"provisioned" type:"string"` + + // Reserved. If you need to sustain traffic greater than the documented limits + // (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/vpc-nat-gateway.html), + // contact us through the Support Center (https://console.aws.amazon.com/support/home?). + RequestTime *time.Time `locationName:"requestTime" type:"timestamp" timestampFormat:"iso8601"` + + // Reserved. If you need to sustain traffic greater than the documented limits + // (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/vpc-nat-gateway.html), + // contact us through the Support Center (https://console.aws.amazon.com/support/home?). + Requested *string `locationName:"requested" type:"string"` + + // Reserved. If you need to sustain traffic greater than the documented limits + // (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/vpc-nat-gateway.html), + // contact us through the Support Center (https://console.aws.amazon.com/support/home?). + Status *string `locationName:"status" type:"string"` +} + +// String returns the string representation +func (s ProvisionedBandwidth) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ProvisionedBandwidth) GoString() string { + return s.String() +} + +// Describes a request to purchase Scheduled Instances. +type PurchaseRequest struct { + _ struct{} `type:"structure"` + + // The number of instances. + InstanceCount *int64 `type:"integer" required:"true"` + + // The purchase token. + PurchaseToken *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s PurchaseRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PurchaseRequest) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PurchaseRequest) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PurchaseRequest"} + if s.InstanceCount == nil { + invalidParams.Add(request.NewErrParamRequired("InstanceCount")) + } + if s.PurchaseToken == nil { + invalidParams.Add(request.NewErrParamRequired("PurchaseToken")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the parameters for PurchaseReservedInstancesOffering. +type PurchaseReservedInstancesOfferingInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The number of Reserved Instances to purchase. + InstanceCount *int64 `type:"integer" required:"true"` + + // Specified for Reserved Instance Marketplace offerings to limit the total + // order and ensure that the Reserved Instances are not purchased at unexpected + // prices. + LimitPrice *ReservedInstanceLimitPrice `locationName:"limitPrice" type:"structure"` + + // The ID of the Reserved Instance offering to purchase. + ReservedInstancesOfferingId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s PurchaseReservedInstancesOfferingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PurchaseReservedInstancesOfferingInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PurchaseReservedInstancesOfferingInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PurchaseReservedInstancesOfferingInput"} + if s.InstanceCount == nil { + invalidParams.Add(request.NewErrParamRequired("InstanceCount")) + } + if s.ReservedInstancesOfferingId == nil { + invalidParams.Add(request.NewErrParamRequired("ReservedInstancesOfferingId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the output of PurchaseReservedInstancesOffering. +type PurchaseReservedInstancesOfferingOutput struct { + _ struct{} `type:"structure"` + + // The IDs of the purchased Reserved Instances. + ReservedInstancesId *string `locationName:"reservedInstancesId" type:"string"` +} + +// String returns the string representation +func (s PurchaseReservedInstancesOfferingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PurchaseReservedInstancesOfferingOutput) GoString() string { + return s.String() +} + +// Contains the parameters for PurchaseScheduledInstances. +type PurchaseScheduledInstancesInput struct { + _ struct{} `type:"structure"` + + // Unique, case-sensitive identifier that ensures the idempotency of the request. + // For more information, see Ensuring Idempotency (http://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html). + ClientToken *string `type:"string" idempotencyToken:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // One or more purchase requests. + PurchaseRequests []*PurchaseRequest `locationName:"PurchaseRequest" locationNameList:"PurchaseRequest" min:"1" type:"list" required:"true"` +} + +// String returns the string representation +func (s PurchaseScheduledInstancesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PurchaseScheduledInstancesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PurchaseScheduledInstancesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PurchaseScheduledInstancesInput"} + if s.PurchaseRequests == nil { + invalidParams.Add(request.NewErrParamRequired("PurchaseRequests")) + } + if s.PurchaseRequests != nil && len(s.PurchaseRequests) < 1 { + invalidParams.Add(request.NewErrParamMinLen("PurchaseRequests", 1)) + } + if s.PurchaseRequests != nil { + for i, v := range s.PurchaseRequests { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "PurchaseRequests", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the output of PurchaseScheduledInstances. +type PurchaseScheduledInstancesOutput struct { + _ struct{} `type:"structure"` + + // Information about the Scheduled Instances. + ScheduledInstanceSet []*ScheduledInstance `locationName:"scheduledInstanceSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s PurchaseScheduledInstancesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PurchaseScheduledInstancesOutput) GoString() string { + return s.String() +} + +// Contains the parameters for RebootInstances. +type RebootInstancesInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // One or more instance IDs. + InstanceIds []*string `locationName:"InstanceId" locationNameList:"InstanceId" type:"list" required:"true"` +} + +// String returns the string representation +func (s RebootInstancesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RebootInstancesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RebootInstancesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RebootInstancesInput"} + if s.InstanceIds == nil { + invalidParams.Add(request.NewErrParamRequired("InstanceIds")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type RebootInstancesOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s RebootInstancesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RebootInstancesOutput) GoString() string { + return s.String() +} + +// Describes a recurring charge. +type RecurringCharge struct { + _ struct{} `type:"structure"` + + // The amount of the recurring charge. + Amount *float64 `locationName:"amount" type:"double"` + + // The frequency of the recurring charge. + Frequency *string `locationName:"frequency" type:"string" enum:"RecurringChargeFrequency"` +} + +// String returns the string representation +func (s RecurringCharge) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RecurringCharge) GoString() string { + return s.String() +} + +// Describes a region. +type Region struct { + _ struct{} `type:"structure"` + + // The region service endpoint. + Endpoint *string `locationName:"regionEndpoint" type:"string"` + + // The name of the region. + RegionName *string `locationName:"regionName" type:"string"` +} + +// String returns the string representation +func (s Region) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Region) GoString() string { + return s.String() +} + +// Contains the parameters for RegisterImage. +type RegisterImageInput struct { + _ struct{} `type:"structure"` + + // The architecture of the AMI. + // + // Default: For Amazon EBS-backed AMIs, i386. For instance store-backed AMIs, + // the architecture specified in the manifest file. + Architecture *string `locationName:"architecture" type:"string" enum:"ArchitectureValues"` + + // One or more block device mapping entries. + BlockDeviceMappings []*BlockDeviceMapping `locationName:"BlockDeviceMapping" locationNameList:"BlockDeviceMapping" type:"list"` + + // A description for your AMI. + Description *string `locationName:"description" type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // Set to true to enable enhanced networking with ENA for the AMI and any instances + // that you launch from the AMI. + // + // This option is supported only for HVM AMIs. Specifying this option with + // a PV AMI can make instances launched from the AMI unreachable. + EnaSupport *bool `locationName:"enaSupport" type:"boolean"` + + // The full path to your AMI manifest in Amazon S3 storage. + ImageLocation *string `type:"string"` + + // The ID of the kernel. + KernelId *string `locationName:"kernelId" type:"string"` + + // A name for your AMI. + // + // Constraints: 3-128 alphanumeric characters, parentheses (()), square brackets + // ([]), spaces ( ), periods (.), slashes (/), dashes (-), single quotes ('), + // at-signs (@), or underscores(_) + Name *string `locationName:"name" type:"string" required:"true"` + + // The ID of the RAM disk. + RamdiskId *string `locationName:"ramdiskId" type:"string"` + + // The name of the root device (for example, /dev/sda1, or /dev/xvda). + RootDeviceName *string `locationName:"rootDeviceName" type:"string"` + + // Set to simple to enable enhanced networking with the Intel 82599 Virtual + // Function interface for the AMI and any instances that you launch from the + // AMI. + // + // There is no way to disable sriovNetSupport at this time. + // + // This option is supported only for HVM AMIs. Specifying this option with + // a PV AMI can make instances launched from the AMI unreachable. + SriovNetSupport *string `locationName:"sriovNetSupport" type:"string"` + + // The type of virtualization. + // + // Default: paravirtual + VirtualizationType *string `locationName:"virtualizationType" type:"string"` +} + +// String returns the string representation +func (s RegisterImageInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RegisterImageInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RegisterImageInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RegisterImageInput"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the output of RegisterImage. +type RegisterImageOutput struct { + _ struct{} `type:"structure"` + + // The ID of the newly registered AMI. + ImageId *string `locationName:"imageId" type:"string"` +} + +// String returns the string representation +func (s RegisterImageOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RegisterImageOutput) GoString() string { + return s.String() +} + +// Contains the parameters for RejectVpcPeeringConnection. +type RejectVpcPeeringConnectionInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the VPC peering connection. + VpcPeeringConnectionId *string `locationName:"vpcPeeringConnectionId" type:"string" required:"true"` +} + +// String returns the string representation +func (s RejectVpcPeeringConnectionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RejectVpcPeeringConnectionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RejectVpcPeeringConnectionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RejectVpcPeeringConnectionInput"} + if s.VpcPeeringConnectionId == nil { + invalidParams.Add(request.NewErrParamRequired("VpcPeeringConnectionId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the output of RejectVpcPeeringConnection. +type RejectVpcPeeringConnectionOutput struct { + _ struct{} `type:"structure"` + + // Returns true if the request succeeds; otherwise, it returns an error. + Return *bool `locationName:"return" type:"boolean"` +} + +// String returns the string representation +func (s RejectVpcPeeringConnectionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RejectVpcPeeringConnectionOutput) GoString() string { + return s.String() +} + +// Contains the parameters for ReleaseAddress. +type ReleaseAddressInput struct { + _ struct{} `type:"structure"` + + // [EC2-VPC] The allocation ID. Required for EC2-VPC. + AllocationId *string `type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // [EC2-Classic] The Elastic IP address. Required for EC2-Classic. + PublicIp *string `type:"string"` +} + +// String returns the string representation +func (s ReleaseAddressInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReleaseAddressInput) GoString() string { + return s.String() +} + +type ReleaseAddressOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ReleaseAddressOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReleaseAddressOutput) GoString() string { + return s.String() +} + +// Contains the parameters for ReleaseHosts. +type ReleaseHostsInput struct { + _ struct{} `type:"structure"` + + // The IDs of the Dedicated hosts you want to release. + HostIds []*string `locationName:"hostId" locationNameList:"item" type:"list" required:"true"` +} + +// String returns the string representation +func (s ReleaseHostsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReleaseHostsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ReleaseHostsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ReleaseHostsInput"} + if s.HostIds == nil { + invalidParams.Add(request.NewErrParamRequired("HostIds")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the output of ReleaseHosts. +type ReleaseHostsOutput struct { + _ struct{} `type:"structure"` + + // The IDs of the Dedicated hosts that were successfully released. + Successful []*string `locationName:"successful" locationNameList:"item" type:"list"` + + // The IDs of the Dedicated hosts that could not be released, including an error + // message. + Unsuccessful []*UnsuccessfulItem `locationName:"unsuccessful" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s ReleaseHostsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReleaseHostsOutput) GoString() string { + return s.String() +} + +// Contains the parameters for ReplaceNetworkAclAssociation. +type ReplaceNetworkAclAssociationInput struct { + _ struct{} `type:"structure"` + + // The ID of the current association between the original network ACL and the + // subnet. + AssociationId *string `locationName:"associationId" type:"string" required:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the new network ACL to associate with the subnet. + NetworkAclId *string `locationName:"networkAclId" type:"string" required:"true"` +} + +// String returns the string representation +func (s ReplaceNetworkAclAssociationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReplaceNetworkAclAssociationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ReplaceNetworkAclAssociationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ReplaceNetworkAclAssociationInput"} + if s.AssociationId == nil { + invalidParams.Add(request.NewErrParamRequired("AssociationId")) + } + if s.NetworkAclId == nil { + invalidParams.Add(request.NewErrParamRequired("NetworkAclId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the output of ReplaceNetworkAclAssociation. +type ReplaceNetworkAclAssociationOutput struct { + _ struct{} `type:"structure"` + + // The ID of the new association. + NewAssociationId *string `locationName:"newAssociationId" type:"string"` +} + +// String returns the string representation +func (s ReplaceNetworkAclAssociationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReplaceNetworkAclAssociationOutput) GoString() string { + return s.String() +} + +// Contains the parameters for ReplaceNetworkAclEntry. +type ReplaceNetworkAclEntryInput struct { + _ struct{} `type:"structure"` + + // The network range to allow or deny, in CIDR notation. + CidrBlock *string `locationName:"cidrBlock" type:"string" required:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // Indicates whether to replace the egress rule. + // + // Default: If no value is specified, we replace the ingress rule. + Egress *bool `locationName:"egress" type:"boolean" required:"true"` + + // ICMP protocol: The ICMP type and code. Required if specifying 1 (ICMP) for + // the protocol. + IcmpTypeCode *IcmpTypeCode `locationName:"Icmp" type:"structure"` + + // The ID of the ACL. + NetworkAclId *string `locationName:"networkAclId" type:"string" required:"true"` + + // TCP or UDP protocols: The range of ports the rule applies to. Required if + // specifying 6 (TCP) or 17 (UDP) for the protocol. + PortRange *PortRange `locationName:"portRange" type:"structure"` + + // The IP protocol. You can specify all or -1 to mean all protocols. + Protocol *string `locationName:"protocol" type:"string" required:"true"` + + // Indicates whether to allow or deny the traffic that matches the rule. + RuleAction *string `locationName:"ruleAction" type:"string" required:"true" enum:"RuleAction"` + + // The rule number of the entry to replace. + RuleNumber *int64 `locationName:"ruleNumber" type:"integer" required:"true"` +} + +// String returns the string representation +func (s ReplaceNetworkAclEntryInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReplaceNetworkAclEntryInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ReplaceNetworkAclEntryInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ReplaceNetworkAclEntryInput"} + if s.CidrBlock == nil { + invalidParams.Add(request.NewErrParamRequired("CidrBlock")) + } + if s.Egress == nil { + invalidParams.Add(request.NewErrParamRequired("Egress")) + } + if s.NetworkAclId == nil { + invalidParams.Add(request.NewErrParamRequired("NetworkAclId")) + } + if s.Protocol == nil { + invalidParams.Add(request.NewErrParamRequired("Protocol")) + } + if s.RuleAction == nil { + invalidParams.Add(request.NewErrParamRequired("RuleAction")) + } + if s.RuleNumber == nil { + invalidParams.Add(request.NewErrParamRequired("RuleNumber")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type ReplaceNetworkAclEntryOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ReplaceNetworkAclEntryOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReplaceNetworkAclEntryOutput) GoString() string { + return s.String() +} + +// Contains the parameters for ReplaceRoute. +type ReplaceRouteInput struct { + _ struct{} `type:"structure"` + + // The CIDR address block used for the destination match. The value you provide + // must match the CIDR of an existing route in the table. + DestinationCidrBlock *string `locationName:"destinationCidrBlock" type:"string" required:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of an Internet gateway or virtual private gateway. + GatewayId *string `locationName:"gatewayId" type:"string"` + + // The ID of a NAT instance in your VPC. + InstanceId *string `locationName:"instanceId" type:"string"` + + // The ID of a NAT gateway. + NatGatewayId *string `locationName:"natGatewayId" type:"string"` + + // The ID of a network interface. + NetworkInterfaceId *string `locationName:"networkInterfaceId" type:"string"` + + // The ID of the route table. + RouteTableId *string `locationName:"routeTableId" type:"string" required:"true"` + + // The ID of a VPC peering connection. + VpcPeeringConnectionId *string `locationName:"vpcPeeringConnectionId" type:"string"` +} + +// String returns the string representation +func (s ReplaceRouteInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReplaceRouteInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ReplaceRouteInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ReplaceRouteInput"} + if s.DestinationCidrBlock == nil { + invalidParams.Add(request.NewErrParamRequired("DestinationCidrBlock")) + } + if s.RouteTableId == nil { + invalidParams.Add(request.NewErrParamRequired("RouteTableId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type ReplaceRouteOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ReplaceRouteOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReplaceRouteOutput) GoString() string { + return s.String() +} + +// Contains the parameters for ReplaceRouteTableAssociation. +type ReplaceRouteTableAssociationInput struct { + _ struct{} `type:"structure"` + + // The association ID. + AssociationId *string `locationName:"associationId" type:"string" required:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the new route table to associate with the subnet. + RouteTableId *string `locationName:"routeTableId" type:"string" required:"true"` +} + +// String returns the string representation +func (s ReplaceRouteTableAssociationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReplaceRouteTableAssociationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ReplaceRouteTableAssociationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ReplaceRouteTableAssociationInput"} + if s.AssociationId == nil { + invalidParams.Add(request.NewErrParamRequired("AssociationId")) + } + if s.RouteTableId == nil { + invalidParams.Add(request.NewErrParamRequired("RouteTableId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the output of ReplaceRouteTableAssociation. +type ReplaceRouteTableAssociationOutput struct { + _ struct{} `type:"structure"` + + // The ID of the new association. + NewAssociationId *string `locationName:"newAssociationId" type:"string"` +} + +// String returns the string representation +func (s ReplaceRouteTableAssociationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReplaceRouteTableAssociationOutput) GoString() string { + return s.String() +} + +// Contains the parameters for ReportInstanceStatus. +type ReportInstanceStatusInput struct { + _ struct{} `type:"structure"` + + // Descriptive text about the health state of your instance. + Description *string `locationName:"description" type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The time at which the reported instance health state ended. + EndTime *time.Time `locationName:"endTime" type:"timestamp" timestampFormat:"iso8601"` + + // One or more instances. + Instances []*string `locationName:"instanceId" locationNameList:"InstanceId" type:"list" required:"true"` + + // One or more reason codes that describes the health state of your instance. + // + // instance-stuck-in-state: My instance is stuck in a state. + // + // unresponsive: My instance is unresponsive. + // + // not-accepting-credentials: My instance is not accepting my credentials. + // + // password-not-available: A password is not available for my instance. + // + // performance-network: My instance is experiencing performance problems + // which I believe are network related. + // + // performance-instance-store: My instance is experiencing performance problems + // which I believe are related to the instance stores. + // + // performance-ebs-volume: My instance is experiencing performance problems + // which I believe are related to an EBS volume. + // + // performance-other: My instance is experiencing performance problems. + // + // other: [explain using the description parameter] + ReasonCodes []*string `locationName:"reasonCode" locationNameList:"item" type:"list" required:"true"` + + // The time at which the reported instance health state began. + StartTime *time.Time `locationName:"startTime" type:"timestamp" timestampFormat:"iso8601"` + + // The status of all instances listed. + Status *string `locationName:"status" type:"string" required:"true" enum:"ReportStatusType"` +} + +// String returns the string representation +func (s ReportInstanceStatusInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReportInstanceStatusInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ReportInstanceStatusInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ReportInstanceStatusInput"} + if s.Instances == nil { + invalidParams.Add(request.NewErrParamRequired("Instances")) + } + if s.ReasonCodes == nil { + invalidParams.Add(request.NewErrParamRequired("ReasonCodes")) + } + if s.Status == nil { + invalidParams.Add(request.NewErrParamRequired("Status")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type ReportInstanceStatusOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ReportInstanceStatusOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReportInstanceStatusOutput) GoString() string { + return s.String() +} + +// Contains the parameters for RequestSpotFleet. +type RequestSpotFleetInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The configuration for the Spot fleet request. + SpotFleetRequestConfig *SpotFleetRequestConfigData `locationName:"spotFleetRequestConfig" type:"structure" required:"true"` +} + +// String returns the string representation +func (s RequestSpotFleetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RequestSpotFleetInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RequestSpotFleetInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RequestSpotFleetInput"} + if s.SpotFleetRequestConfig == nil { + invalidParams.Add(request.NewErrParamRequired("SpotFleetRequestConfig")) + } + if s.SpotFleetRequestConfig != nil { + if err := s.SpotFleetRequestConfig.Validate(); err != nil { + invalidParams.AddNested("SpotFleetRequestConfig", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the output of RequestSpotFleet. +type RequestSpotFleetOutput struct { + _ struct{} `type:"structure"` + + // The ID of the Spot fleet request. + SpotFleetRequestId *string `locationName:"spotFleetRequestId" type:"string" required:"true"` +} + +// String returns the string representation +func (s RequestSpotFleetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RequestSpotFleetOutput) GoString() string { + return s.String() +} + +// Contains the parameters for RequestSpotInstances. +type RequestSpotInstancesInput struct { + _ struct{} `type:"structure"` + + // The user-specified name for a logical grouping of bids. + // + // When you specify an Availability Zone group in a Spot Instance request, + // all Spot instances in the request are launched in the same Availability Zone. + // Instance proximity is maintained with this parameter, but the choice of Availability + // Zone is not. The group applies only to bids for Spot Instances of the same + // instance type. Any additional Spot instance requests that are specified with + // the same Availability Zone group name are launched in that same Availability + // Zone, as long as at least one instance from the group is still active. + // + // If there is no active instance running in the Availability Zone group that + // you specify for a new Spot instance request (all instances are terminated, + // the bid is expired, or the bid falls below current market), then Amazon EC2 + // launches the instance in any Availability Zone where the constraint can be + // met. Consequently, the subsequent set of Spot instances could be placed in + // a different zone from the original request, even if you specified the same + // Availability Zone group. + // + // Default: Instances are launched in any available Availability Zone. + AvailabilityZoneGroup *string `locationName:"availabilityZoneGroup" type:"string"` + + // The required duration for the Spot instances (also known as Spot blocks), + // in minutes. This value must be a multiple of 60 (60, 120, 180, 240, 300, + // or 360). + // + // The duration period starts as soon as your Spot instance receives its instance + // ID. At the end of the duration period, Amazon EC2 marks the Spot instance + // for termination and provides a Spot instance termination notice, which gives + // the instance a two-minute warning before it terminates. + // + // Note that you can't specify an Availability Zone group or a launch group + // if you specify a duration. + BlockDurationMinutes *int64 `locationName:"blockDurationMinutes" type:"integer"` + + // Unique, case-sensitive identifier that you provide to ensure the idempotency + // of the request. For more information, see How to Ensure Idempotency (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Run_Instance_Idempotency.html) + // in the Amazon Elastic Compute Cloud User Guide. + ClientToken *string `locationName:"clientToken" type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The maximum number of Spot instances to launch. + // + // Default: 1 + InstanceCount *int64 `locationName:"instanceCount" type:"integer"` + + // The instance launch group. Launch groups are Spot instances that launch together + // and terminate together. + // + // Default: Instances are launched and terminated individually + LaunchGroup *string `locationName:"launchGroup" type:"string"` + + // Describes the launch specification for an instance. + LaunchSpecification *RequestSpotLaunchSpecification `type:"structure"` + + // The maximum hourly price (bid) for any Spot instance launched to fulfill + // the request. + SpotPrice *string `locationName:"spotPrice" type:"string" required:"true"` + + // The Spot instance request type. + // + // Default: one-time + Type *string `locationName:"type" type:"string" enum:"SpotInstanceType"` + + // The start date of the request. If this is a one-time request, the request + // becomes active at this date and time and remains active until all instances + // launch, the request expires, or the request is canceled. If the request is + // persistent, the request becomes active at this date and time and remains + // active until it expires or is canceled. + // + // Default: The request is effective indefinitely. + ValidFrom *time.Time `locationName:"validFrom" type:"timestamp" timestampFormat:"iso8601"` + + // The end date of the request. If this is a one-time request, the request remains + // active until all instances launch, the request is canceled, or this date + // is reached. If the request is persistent, it remains active until it is canceled + // or this date and time is reached. + // + // Default: The request is effective indefinitely. + ValidUntil *time.Time `locationName:"validUntil" type:"timestamp" timestampFormat:"iso8601"` +} + +// String returns the string representation +func (s RequestSpotInstancesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RequestSpotInstancesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RequestSpotInstancesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RequestSpotInstancesInput"} + if s.SpotPrice == nil { + invalidParams.Add(request.NewErrParamRequired("SpotPrice")) + } + if s.LaunchSpecification != nil { + if err := s.LaunchSpecification.Validate(); err != nil { + invalidParams.AddNested("LaunchSpecification", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the output of RequestSpotInstances. +type RequestSpotInstancesOutput struct { + _ struct{} `type:"structure"` + + // One or more Spot instance requests. + SpotInstanceRequests []*SpotInstanceRequest `locationName:"spotInstanceRequestSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s RequestSpotInstancesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RequestSpotInstancesOutput) GoString() string { + return s.String() +} + +// Describes the launch specification for an instance. +type RequestSpotLaunchSpecification struct { + _ struct{} `type:"structure"` + + // Deprecated. + AddressingType *string `locationName:"addressingType" type:"string"` + + // One or more block device mapping entries. + // + // Although you can specify encrypted EBS volumes in this block device mapping + // for your Spot Instances, these volumes are not encrypted. + BlockDeviceMappings []*BlockDeviceMapping `locationName:"blockDeviceMapping" locationNameList:"item" type:"list"` + + // Indicates whether the instance is optimized for EBS I/O. This optimization + // provides dedicated throughput to Amazon EBS and an optimized configuration + // stack to provide optimal EBS I/O performance. This optimization isn't available + // with all instance types. Additional usage charges apply when using an EBS + // Optimized instance. + // + // Default: false + EbsOptimized *bool `locationName:"ebsOptimized" type:"boolean"` + + // The IAM instance profile. + IamInstanceProfile *IamInstanceProfileSpecification `locationName:"iamInstanceProfile" type:"structure"` + + // The ID of the AMI. + ImageId *string `locationName:"imageId" type:"string"` + + // The instance type. + InstanceType *string `locationName:"instanceType" type:"string" enum:"InstanceType"` + + // The ID of the kernel. + KernelId *string `locationName:"kernelId" type:"string"` + + // The name of the key pair. + KeyName *string `locationName:"keyName" type:"string"` + + // Describes the monitoring for the instance. + Monitoring *RunInstancesMonitoringEnabled `locationName:"monitoring" type:"structure"` + + // One or more network interfaces. + NetworkInterfaces []*InstanceNetworkInterfaceSpecification `locationName:"NetworkInterface" locationNameList:"item" type:"list"` + + // The placement information for the instance. + Placement *SpotPlacement `locationName:"placement" type:"structure"` + + // The ID of the RAM disk. + RamdiskId *string `locationName:"ramdiskId" type:"string"` + + SecurityGroupIds []*string `locationName:"SecurityGroupId" locationNameList:"item" type:"list"` + + SecurityGroups []*string `locationName:"SecurityGroup" locationNameList:"item" type:"list"` + + // The ID of the subnet in which to launch the instance. + SubnetId *string `locationName:"subnetId" type:"string"` + + // The user data to make available to the instances. If you are using an AWS + // SDK or command line tool, Base64-encoding is performed for you, and you can + // load the text from a file. Otherwise, you must provide Base64-encoded text. + UserData *string `locationName:"userData" type:"string"` +} + +// String returns the string representation +func (s RequestSpotLaunchSpecification) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RequestSpotLaunchSpecification) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RequestSpotLaunchSpecification) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RequestSpotLaunchSpecification"} + if s.Monitoring != nil { + if err := s.Monitoring.Validate(); err != nil { + invalidParams.AddNested("Monitoring", err.(request.ErrInvalidParams)) + } + } + if s.NetworkInterfaces != nil { + for i, v := range s.NetworkInterfaces { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "NetworkInterfaces", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Describes a reservation. +type Reservation struct { + _ struct{} `type:"structure"` + + // [EC2-Classic only] One or more security groups. + Groups []*GroupIdentifier `locationName:"groupSet" locationNameList:"item" type:"list"` + + // One or more instances. + Instances []*Instance `locationName:"instancesSet" locationNameList:"item" type:"list"` + + // The ID of the AWS account that owns the reservation. + OwnerId *string `locationName:"ownerId" type:"string"` + + // The ID of the requester that launched the instances on your behalf (for example, + // AWS Management Console or Auto Scaling). + RequesterId *string `locationName:"requesterId" type:"string"` + + // The ID of the reservation. + ReservationId *string `locationName:"reservationId" type:"string"` +} + +// String returns the string representation +func (s Reservation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Reservation) GoString() string { + return s.String() +} + +// Describes the limit price of a Reserved Instance offering. +type ReservedInstanceLimitPrice struct { + _ struct{} `type:"structure"` + + // Used for Reserved Instance Marketplace offerings. Specifies the limit price + // on the total order (instanceCount * price). + Amount *float64 `locationName:"amount" type:"double"` + + // The currency in which the limitPrice amount is specified. At this time, the + // only supported currency is USD. + CurrencyCode *string `locationName:"currencyCode" type:"string" enum:"CurrencyCodeValues"` +} + +// String returns the string representation +func (s ReservedInstanceLimitPrice) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReservedInstanceLimitPrice) GoString() string { + return s.String() +} + +// Describes a Reserved Instance. +type ReservedInstances struct { + _ struct{} `type:"structure"` + + // The Availability Zone in which the Reserved Instance can be used. + AvailabilityZone *string `locationName:"availabilityZone" type:"string"` + + // The currency of the Reserved Instance. It's specified using ISO 4217 standard + // currency codes. At this time, the only supported currency is USD. + CurrencyCode *string `locationName:"currencyCode" type:"string" enum:"CurrencyCodeValues"` + + // The duration of the Reserved Instance, in seconds. + Duration *int64 `locationName:"duration" type:"long"` + + // The time when the Reserved Instance expires. + End *time.Time `locationName:"end" type:"timestamp" timestampFormat:"iso8601"` + + // The purchase price of the Reserved Instance. + FixedPrice *float64 `locationName:"fixedPrice" type:"float"` + + // The number of reservations purchased. + InstanceCount *int64 `locationName:"instanceCount" type:"integer"` + + // The tenancy of the instance. + InstanceTenancy *string `locationName:"instanceTenancy" type:"string" enum:"Tenancy"` + + // The instance type on which the Reserved Instance can be used. + InstanceType *string `locationName:"instanceType" type:"string" enum:"InstanceType"` + + // The Reserved Instance offering type. + OfferingType *string `locationName:"offeringType" type:"string" enum:"OfferingTypeValues"` + + // The Reserved Instance product platform description. + ProductDescription *string `locationName:"productDescription" type:"string" enum:"RIProductDescription"` + + // The recurring charge tag assigned to the resource. + RecurringCharges []*RecurringCharge `locationName:"recurringCharges" locationNameList:"item" type:"list"` + + // The ID of the Reserved Instance. + ReservedInstancesId *string `locationName:"reservedInstancesId" type:"string"` + + // The date and time the Reserved Instance started. + Start *time.Time `locationName:"start" type:"timestamp" timestampFormat:"iso8601"` + + // The state of the Reserved Instance purchase. + State *string `locationName:"state" type:"string" enum:"ReservedInstanceState"` + + // Any tags assigned to the resource. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` + + // The usage price of the Reserved Instance, per hour. + UsagePrice *float64 `locationName:"usagePrice" type:"float"` +} + +// String returns the string representation +func (s ReservedInstances) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReservedInstances) GoString() string { + return s.String() +} + +// Describes the configuration settings for the modified Reserved Instances. +type ReservedInstancesConfiguration struct { + _ struct{} `type:"structure"` + + // The Availability Zone for the modified Reserved Instances. + AvailabilityZone *string `locationName:"availabilityZone" type:"string"` + + // The number of modified Reserved Instances. + InstanceCount *int64 `locationName:"instanceCount" type:"integer"` + + // The instance type for the modified Reserved Instances. + InstanceType *string `locationName:"instanceType" type:"string" enum:"InstanceType"` + + // The network platform of the modified Reserved Instances, which is either + // EC2-Classic or EC2-VPC. + Platform *string `locationName:"platform" type:"string"` +} + +// String returns the string representation +func (s ReservedInstancesConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReservedInstancesConfiguration) GoString() string { + return s.String() +} + +// Describes the ID of a Reserved Instance. +type ReservedInstancesId struct { + _ struct{} `type:"structure"` + + // The ID of the Reserved Instance. + ReservedInstancesId *string `locationName:"reservedInstancesId" type:"string"` +} + +// String returns the string representation +func (s ReservedInstancesId) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReservedInstancesId) GoString() string { + return s.String() +} + +// Describes a Reserved Instance listing. +type ReservedInstancesListing struct { + _ struct{} `type:"structure"` + + // A unique, case-sensitive key supplied by the client to ensure that the request + // is idempotent. For more information, see Ensuring Idempotency (http://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html). + ClientToken *string `locationName:"clientToken" type:"string"` + + // The time the listing was created. + CreateDate *time.Time `locationName:"createDate" type:"timestamp" timestampFormat:"iso8601"` + + // The number of instances in this state. + InstanceCounts []*InstanceCount `locationName:"instanceCounts" locationNameList:"item" type:"list"` + + // The price of the Reserved Instance listing. + PriceSchedules []*PriceSchedule `locationName:"priceSchedules" locationNameList:"item" type:"list"` + + // The ID of the Reserved Instance. + ReservedInstancesId *string `locationName:"reservedInstancesId" type:"string"` + + // The ID of the Reserved Instance listing. + ReservedInstancesListingId *string `locationName:"reservedInstancesListingId" type:"string"` + + // The status of the Reserved Instance listing. + Status *string `locationName:"status" type:"string" enum:"ListingStatus"` + + // The reason for the current status of the Reserved Instance listing. The response + // can be blank. + StatusMessage *string `locationName:"statusMessage" type:"string"` + + // Any tags assigned to the resource. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` + + // The last modified timestamp of the listing. + UpdateDate *time.Time `locationName:"updateDate" type:"timestamp" timestampFormat:"iso8601"` +} + +// String returns the string representation +func (s ReservedInstancesListing) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReservedInstancesListing) GoString() string { + return s.String() +} + +// Describes a Reserved Instance modification. +type ReservedInstancesModification struct { + _ struct{} `type:"structure"` + + // A unique, case-sensitive key supplied by the client to ensure that the request + // is idempotent. For more information, see Ensuring Idempotency (http://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html). + ClientToken *string `locationName:"clientToken" type:"string"` + + // The time when the modification request was created. + CreateDate *time.Time `locationName:"createDate" type:"timestamp" timestampFormat:"iso8601"` + + // The time for the modification to become effective. + EffectiveDate *time.Time `locationName:"effectiveDate" type:"timestamp" timestampFormat:"iso8601"` + + // Contains target configurations along with their corresponding new Reserved + // Instance IDs. + ModificationResults []*ReservedInstancesModificationResult `locationName:"modificationResultSet" locationNameList:"item" type:"list"` + + // The IDs of one or more Reserved Instances. + ReservedInstancesIds []*ReservedInstancesId `locationName:"reservedInstancesSet" locationNameList:"item" type:"list"` + + // A unique ID for the Reserved Instance modification. + ReservedInstancesModificationId *string `locationName:"reservedInstancesModificationId" type:"string"` + + // The status of the Reserved Instances modification request. + Status *string `locationName:"status" type:"string"` + + // The reason for the status. + StatusMessage *string `locationName:"statusMessage" type:"string"` + + // The time when the modification request was last updated. + UpdateDate *time.Time `locationName:"updateDate" type:"timestamp" timestampFormat:"iso8601"` +} + +// String returns the string representation +func (s ReservedInstancesModification) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReservedInstancesModification) GoString() string { + return s.String() +} + +// Describes the modification request/s. +type ReservedInstancesModificationResult struct { + _ struct{} `type:"structure"` + + // The ID for the Reserved Instances that were created as part of the modification + // request. This field is only available when the modification is fulfilled. + ReservedInstancesId *string `locationName:"reservedInstancesId" type:"string"` + + // The target Reserved Instances configurations supplied as part of the modification + // request. + TargetConfiguration *ReservedInstancesConfiguration `locationName:"targetConfiguration" type:"structure"` +} + +// String returns the string representation +func (s ReservedInstancesModificationResult) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReservedInstancesModificationResult) GoString() string { + return s.String() +} + +// Describes a Reserved Instance offering. +type ReservedInstancesOffering struct { + _ struct{} `type:"structure"` + + // The Availability Zone in which the Reserved Instance can be used. + AvailabilityZone *string `locationName:"availabilityZone" type:"string"` + + // The currency of the Reserved Instance offering you are purchasing. It's specified + // using ISO 4217 standard currency codes. At this time, the only supported + // currency is USD. + CurrencyCode *string `locationName:"currencyCode" type:"string" enum:"CurrencyCodeValues"` + + // The duration of the Reserved Instance, in seconds. + Duration *int64 `locationName:"duration" type:"long"` + + // The purchase price of the Reserved Instance. + FixedPrice *float64 `locationName:"fixedPrice" type:"float"` + + // The tenancy of the instance. + InstanceTenancy *string `locationName:"instanceTenancy" type:"string" enum:"Tenancy"` + + // The instance type on which the Reserved Instance can be used. + InstanceType *string `locationName:"instanceType" type:"string" enum:"InstanceType"` + + // Indicates whether the offering is available through the Reserved Instance + // Marketplace (resale) or AWS. If it's a Reserved Instance Marketplace offering, + // this is true. + Marketplace *bool `locationName:"marketplace" type:"boolean"` + + // The Reserved Instance offering type. + OfferingType *string `locationName:"offeringType" type:"string" enum:"OfferingTypeValues"` + + // The pricing details of the Reserved Instance offering. + PricingDetails []*PricingDetail `locationName:"pricingDetailsSet" locationNameList:"item" type:"list"` + + // The Reserved Instance product platform description. + ProductDescription *string `locationName:"productDescription" type:"string" enum:"RIProductDescription"` + + // The recurring charge tag assigned to the resource. + RecurringCharges []*RecurringCharge `locationName:"recurringCharges" locationNameList:"item" type:"list"` + + // The ID of the Reserved Instance offering. + ReservedInstancesOfferingId *string `locationName:"reservedInstancesOfferingId" type:"string"` + + // The usage price of the Reserved Instance, per hour. + UsagePrice *float64 `locationName:"usagePrice" type:"float"` +} + +// String returns the string representation +func (s ReservedInstancesOffering) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReservedInstancesOffering) GoString() string { + return s.String() +} + +// Contains the parameters for ResetImageAttribute. +type ResetImageAttributeInput struct { + _ struct{} `type:"structure"` + + // The attribute to reset (currently you can only reset the launch permission + // attribute). + Attribute *string `type:"string" required:"true" enum:"ResetImageAttributeName"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the AMI. + ImageId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ResetImageAttributeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResetImageAttributeInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ResetImageAttributeInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ResetImageAttributeInput"} + if s.Attribute == nil { + invalidParams.Add(request.NewErrParamRequired("Attribute")) + } + if s.ImageId == nil { + invalidParams.Add(request.NewErrParamRequired("ImageId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type ResetImageAttributeOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ResetImageAttributeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResetImageAttributeOutput) GoString() string { + return s.String() +} + +// Contains the parameters for ResetInstanceAttribute. +type ResetInstanceAttributeInput struct { + _ struct{} `type:"structure"` + + // The attribute to reset. + // + // You can only reset the following attributes: kernel | ramdisk | sourceDestCheck. + // To change an instance attribute, use ModifyInstanceAttribute. + Attribute *string `locationName:"attribute" type:"string" required:"true" enum:"InstanceAttributeName"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the instance. + InstanceId *string `locationName:"instanceId" type:"string" required:"true"` +} + +// String returns the string representation +func (s ResetInstanceAttributeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResetInstanceAttributeInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ResetInstanceAttributeInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ResetInstanceAttributeInput"} + if s.Attribute == nil { + invalidParams.Add(request.NewErrParamRequired("Attribute")) + } + if s.InstanceId == nil { + invalidParams.Add(request.NewErrParamRequired("InstanceId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type ResetInstanceAttributeOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ResetInstanceAttributeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResetInstanceAttributeOutput) GoString() string { + return s.String() +} + +// Contains the parameters for ResetNetworkInterfaceAttribute. +type ResetNetworkInterfaceAttributeInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the network interface. + NetworkInterfaceId *string `locationName:"networkInterfaceId" type:"string" required:"true"` + + // The source/destination checking attribute. Resets the value to true. + SourceDestCheck *string `locationName:"sourceDestCheck" type:"string"` +} + +// String returns the string representation +func (s ResetNetworkInterfaceAttributeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResetNetworkInterfaceAttributeInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ResetNetworkInterfaceAttributeInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ResetNetworkInterfaceAttributeInput"} + if s.NetworkInterfaceId == nil { + invalidParams.Add(request.NewErrParamRequired("NetworkInterfaceId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type ResetNetworkInterfaceAttributeOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ResetNetworkInterfaceAttributeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResetNetworkInterfaceAttributeOutput) GoString() string { + return s.String() +} + +// Contains the parameters for ResetSnapshotAttribute. +type ResetSnapshotAttributeInput struct { + _ struct{} `type:"structure"` + + // The attribute to reset. Currently, only the attribute for permission to create + // volumes can be reset. + Attribute *string `type:"string" required:"true" enum:"SnapshotAttributeName"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the snapshot. + SnapshotId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ResetSnapshotAttributeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResetSnapshotAttributeInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ResetSnapshotAttributeInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ResetSnapshotAttributeInput"} + if s.Attribute == nil { + invalidParams.Add(request.NewErrParamRequired("Attribute")) + } + if s.SnapshotId == nil { + invalidParams.Add(request.NewErrParamRequired("SnapshotId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type ResetSnapshotAttributeOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ResetSnapshotAttributeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResetSnapshotAttributeOutput) GoString() string { + return s.String() +} + +// Contains the parameters for RestoreAddressToClassic. +type RestoreAddressToClassicInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The Elastic IP address. + PublicIp *string `locationName:"publicIp" type:"string" required:"true"` +} + +// String returns the string representation +func (s RestoreAddressToClassicInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RestoreAddressToClassicInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RestoreAddressToClassicInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RestoreAddressToClassicInput"} + if s.PublicIp == nil { + invalidParams.Add(request.NewErrParamRequired("PublicIp")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the output of RestoreAddressToClassic. +type RestoreAddressToClassicOutput struct { + _ struct{} `type:"structure"` + + // The Elastic IP address. + PublicIp *string `locationName:"publicIp" type:"string"` + + // The move status for the IP address. + Status *string `locationName:"status" type:"string" enum:"Status"` +} + +// String returns the string representation +func (s RestoreAddressToClassicOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RestoreAddressToClassicOutput) GoString() string { + return s.String() +} + +// Contains the parameters for RevokeSecurityGroupEgress. +type RevokeSecurityGroupEgressInput struct { + _ struct{} `type:"structure"` + + // The CIDR IP address range. We recommend that you specify the CIDR range in + // a set of IP permissions instead. + CidrIp *string `locationName:"cidrIp" type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The start of port range for the TCP and UDP protocols, or an ICMP type number. + // We recommend that you specify the port range in a set of IP permissions instead. + FromPort *int64 `locationName:"fromPort" type:"integer"` + + // The ID of the security group. + GroupId *string `locationName:"groupId" type:"string" required:"true"` + + // A set of IP permissions. You can't specify a destination security group and + // a CIDR IP address range. + IpPermissions []*IpPermission `locationName:"ipPermissions" locationNameList:"item" type:"list"` + + // The IP protocol name or number. We recommend that you specify the protocol + // in a set of IP permissions instead. + IpProtocol *string `locationName:"ipProtocol" type:"string"` + + // The name of a destination security group. To revoke outbound access to a + // destination security group, we recommend that you use a set of IP permissions + // instead. + SourceSecurityGroupName *string `locationName:"sourceSecurityGroupName" type:"string"` + + // The AWS account number for a destination security group. To revoke outbound + // access to a destination security group, we recommend that you use a set of + // IP permissions instead. + SourceSecurityGroupOwnerId *string `locationName:"sourceSecurityGroupOwnerId" type:"string"` + + // The end of port range for the TCP and UDP protocols, or an ICMP type number. + // We recommend that you specify the port range in a set of IP permissions instead. + ToPort *int64 `locationName:"toPort" type:"integer"` +} + +// String returns the string representation +func (s RevokeSecurityGroupEgressInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RevokeSecurityGroupEgressInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RevokeSecurityGroupEgressInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RevokeSecurityGroupEgressInput"} + if s.GroupId == nil { + invalidParams.Add(request.NewErrParamRequired("GroupId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type RevokeSecurityGroupEgressOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s RevokeSecurityGroupEgressOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RevokeSecurityGroupEgressOutput) GoString() string { + return s.String() +} + +// Contains the parameters for RevokeSecurityGroupIngress. +type RevokeSecurityGroupIngressInput struct { + _ struct{} `type:"structure"` + + // The CIDR IP address range. You can't specify this parameter when specifying + // a source security group. + CidrIp *string `type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The start of port range for the TCP and UDP protocols, or an ICMP type number. + // For the ICMP type number, use -1 to specify all ICMP types. + FromPort *int64 `type:"integer"` + + // The ID of the security group. Required for a security group in a nondefault + // VPC. + GroupId *string `type:"string"` + + // [EC2-Classic, default VPC] The name of the security group. + GroupName *string `type:"string"` + + // A set of IP permissions. You can't specify a source security group and a + // CIDR IP address range. + IpPermissions []*IpPermission `locationNameList:"item" type:"list"` + + // The IP protocol name (tcp, udp, icmp) or number (see Protocol Numbers (http://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml)). + // Use -1 to specify all. + IpProtocol *string `type:"string"` + + // [EC2-Classic, default VPC] The name of the source security group. You can't + // specify this parameter in combination with the following parameters: the + // CIDR IP address range, the start of the port range, the IP protocol, and + // the end of the port range. For EC2-VPC, the source security group must be + // in the same VPC. To revoke a specific rule for an IP protocol and port range, + // use a set of IP permissions instead. + SourceSecurityGroupName *string `type:"string"` + + // [EC2-Classic] The AWS account ID of the source security group, if the source + // security group is in a different account. You can't specify this parameter + // in combination with the following parameters: the CIDR IP address range, + // the IP protocol, the start of the port range, and the end of the port range. + // To revoke a specific rule for an IP protocol and port range, use a set of + // IP permissions instead. + SourceSecurityGroupOwnerId *string `type:"string"` + + // The end of port range for the TCP and UDP protocols, or an ICMP code number. + // For the ICMP code number, use -1 to specify all ICMP codes for the ICMP type. + ToPort *int64 `type:"integer"` +} + +// String returns the string representation +func (s RevokeSecurityGroupIngressInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RevokeSecurityGroupIngressInput) GoString() string { + return s.String() +} + +type RevokeSecurityGroupIngressOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s RevokeSecurityGroupIngressOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RevokeSecurityGroupIngressOutput) GoString() string { + return s.String() +} + +// Describes a route in a route table. +type Route struct { + _ struct{} `type:"structure"` + + // The CIDR block used for the destination match. + DestinationCidrBlock *string `locationName:"destinationCidrBlock" type:"string"` + + // The prefix of the AWS service. + DestinationPrefixListId *string `locationName:"destinationPrefixListId" type:"string"` + + // The ID of a gateway attached to your VPC. + GatewayId *string `locationName:"gatewayId" type:"string"` + + // The ID of a NAT instance in your VPC. + InstanceId *string `locationName:"instanceId" type:"string"` + + // The AWS account ID of the owner of the instance. + InstanceOwnerId *string `locationName:"instanceOwnerId" type:"string"` + + // The ID of a NAT gateway. + NatGatewayId *string `locationName:"natGatewayId" type:"string"` + + // The ID of the network interface. + NetworkInterfaceId *string `locationName:"networkInterfaceId" type:"string"` + + // Describes how the route was created. + // + // CreateRouteTable - The route was automatically created when the route + // table was created. + // + // CreateRoute - The route was manually added to the route table. + // + // EnableVgwRoutePropagation - The route was propagated by route propagation. + Origin *string `locationName:"origin" type:"string" enum:"RouteOrigin"` + + // The state of the route. The blackhole state indicates that the route's target + // isn't available (for example, the specified gateway isn't attached to the + // VPC, or the specified NAT instance has been terminated). + State *string `locationName:"state" type:"string" enum:"RouteState"` + + // The ID of the VPC peering connection. + VpcPeeringConnectionId *string `locationName:"vpcPeeringConnectionId" type:"string"` +} + +// String returns the string representation +func (s Route) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Route) GoString() string { + return s.String() +} + +// Describes a route table. +type RouteTable struct { + _ struct{} `type:"structure"` + + // The associations between the route table and one or more subnets. + Associations []*RouteTableAssociation `locationName:"associationSet" locationNameList:"item" type:"list"` + + // Any virtual private gateway (VGW) propagating routes. + PropagatingVgws []*PropagatingVgw `locationName:"propagatingVgwSet" locationNameList:"item" type:"list"` + + // The ID of the route table. + RouteTableId *string `locationName:"routeTableId" type:"string"` + + // The routes in the route table. + Routes []*Route `locationName:"routeSet" locationNameList:"item" type:"list"` + + // Any tags assigned to the route table. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` + + // The ID of the VPC. + VpcId *string `locationName:"vpcId" type:"string"` +} + +// String returns the string representation +func (s RouteTable) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RouteTable) GoString() string { + return s.String() +} + +// Describes an association between a route table and a subnet. +type RouteTableAssociation struct { + _ struct{} `type:"structure"` + + // Indicates whether this is the main route table. + Main *bool `locationName:"main" type:"boolean"` + + // The ID of the association between a route table and a subnet. + RouteTableAssociationId *string `locationName:"routeTableAssociationId" type:"string"` + + // The ID of the route table. + RouteTableId *string `locationName:"routeTableId" type:"string"` + + // The ID of the subnet. A subnet ID is not returned for an implicit association. + SubnetId *string `locationName:"subnetId" type:"string"` +} + +// String returns the string representation +func (s RouteTableAssociation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RouteTableAssociation) GoString() string { + return s.String() +} + +// Contains the parameters for RunInstances. +type RunInstancesInput struct { + _ struct{} `type:"structure"` + + // Reserved. + AdditionalInfo *string `locationName:"additionalInfo" type:"string"` + + // The block device mapping. + BlockDeviceMappings []*BlockDeviceMapping `locationName:"BlockDeviceMapping" locationNameList:"BlockDeviceMapping" type:"list"` + + // Unique, case-sensitive identifier you provide to ensure the idempotency of + // the request. For more information, see Ensuring Idempotency (http://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html). + // + // Constraints: Maximum 64 ASCII characters + ClientToken *string `locationName:"clientToken" type:"string"` + + // If you set this parameter to true, you can't terminate the instance using + // the Amazon EC2 console, CLI, or API; otherwise, you can. If you set this + // parameter to true and then later want to be able to terminate the instance, + // you must first change the value of the disableApiTermination attribute to + // false using ModifyInstanceAttribute. Alternatively, if you set InstanceInitiatedShutdownBehavior + // to terminate, you can terminate the instance by running the shutdown command + // from the instance. + // + // Default: false + DisableApiTermination *bool `locationName:"disableApiTermination" type:"boolean"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // Indicates whether the instance is optimized for EBS I/O. This optimization + // provides dedicated throughput to Amazon EBS and an optimized configuration + // stack to provide optimal EBS I/O performance. This optimization isn't available + // with all instance types. Additional usage charges apply when using an EBS-optimized + // instance. + // + // Default: false + EbsOptimized *bool `locationName:"ebsOptimized" type:"boolean"` + + // The IAM instance profile. + IamInstanceProfile *IamInstanceProfileSpecification `locationName:"iamInstanceProfile" type:"structure"` + + // The ID of the AMI, which you can get by calling DescribeImages. + ImageId *string `type:"string" required:"true"` + + // Indicates whether an instance stops or terminates when you initiate shutdown + // from the instance (using the operating system command for system shutdown). + // + // Default: stop + InstanceInitiatedShutdownBehavior *string `locationName:"instanceInitiatedShutdownBehavior" type:"string" enum:"ShutdownBehavior"` + + // The instance type. For more information, see Instance Types (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html) + // in the Amazon Elastic Compute Cloud User Guide. + // + // Default: m1.small + InstanceType *string `type:"string" enum:"InstanceType"` + + // The ID of the kernel. + // + // We recommend that you use PV-GRUB instead of kernels and RAM disks. For + // more information, see PV-GRUB (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/UserProvidedkernels.html) + // in the Amazon Elastic Compute Cloud User Guide. + KernelId *string `type:"string"` + + // The name of the key pair. You can create a key pair using CreateKeyPair or + // ImportKeyPair. + // + // If you do not specify a key pair, you can't connect to the instance unless + // you choose an AMI that is configured to allow users another way to log in. + KeyName *string `type:"string"` + + // The maximum number of instances to launch. If you specify more instances + // than Amazon EC2 can launch in the target Availability Zone, Amazon EC2 launches + // the largest possible number of instances above MinCount. + // + // Constraints: Between 1 and the maximum number you're allowed for the specified + // instance type. For more information about the default limits, and how to + // request an increase, see How many instances can I run in Amazon EC2 (http://aws.amazon.com/ec2/faqs/#How_many_instances_can_I_run_in_Amazon_EC2) + // in the Amazon EC2 FAQ. + MaxCount *int64 `type:"integer" required:"true"` + + // The minimum number of instances to launch. If you specify a minimum that + // is more instances than Amazon EC2 can launch in the target Availability Zone, + // Amazon EC2 launches no instances. + // + // Constraints: Between 1 and the maximum number you're allowed for the specified + // instance type. For more information about the default limits, and how to + // request an increase, see How many instances can I run in Amazon EC2 (http://aws.amazon.com/ec2/faqs/#How_many_instances_can_I_run_in_Amazon_EC2) + // in the Amazon EC2 General FAQ. + MinCount *int64 `type:"integer" required:"true"` + + // The monitoring for the instance. + Monitoring *RunInstancesMonitoringEnabled `type:"structure"` + + // One or more network interfaces. + NetworkInterfaces []*InstanceNetworkInterfaceSpecification `locationName:"networkInterface" locationNameList:"item" type:"list"` + + // The placement for the instance. + Placement *Placement `type:"structure"` + + // [EC2-VPC] The primary IP address. You must specify a value from the IP address + // range of the subnet. + // + // Only one private IP address can be designated as primary. Therefore, you + // can't specify this parameter if PrivateIpAddresses.n.Primary is set to true + // and PrivateIpAddresses.n.PrivateIpAddress is set to an IP address. + // + // Default: We select an IP address from the IP address range of the subnet. + PrivateIpAddress *string `locationName:"privateIpAddress" type:"string"` + + // The ID of the RAM disk. + // + // We recommend that you use PV-GRUB instead of kernels and RAM disks. For + // more information, see PV-GRUB (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/UserProvidedkernels.html) + // in the Amazon Elastic Compute Cloud User Guide. + RamdiskId *string `type:"string"` + + // One or more security group IDs. You can create a security group using CreateSecurityGroup. + // + // Default: Amazon EC2 uses the default security group. + SecurityGroupIds []*string `locationName:"SecurityGroupId" locationNameList:"SecurityGroupId" type:"list"` + + // [EC2-Classic, default VPC] One or more security group names. For a nondefault + // VPC, you must use security group IDs instead. + // + // Default: Amazon EC2 uses the default security group. + SecurityGroups []*string `locationName:"SecurityGroup" locationNameList:"SecurityGroup" type:"list"` + + // [EC2-VPC] The ID of the subnet to launch the instance into. + SubnetId *string `type:"string"` + + // The user data to make available to the instance. For more information, see + // Running Commands on Your Linux Instance at Launch (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/user-data.html) + // (Linux) and Adding User Data (http://docs.aws.amazon.com/AWSEC2/latest/WindowsGuide/ec2-instance-metadata.html#instancedata-add-user-data) + // (Windows). If you are using an AWS SDK or command line tool, Base64-encoding + // is performed for you, and you can load the text from a file. Otherwise, you + // must provide Base64-encoded text. + UserData *string `type:"string"` +} + +// String returns the string representation +func (s RunInstancesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RunInstancesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RunInstancesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RunInstancesInput"} + if s.ImageId == nil { + invalidParams.Add(request.NewErrParamRequired("ImageId")) + } + if s.MaxCount == nil { + invalidParams.Add(request.NewErrParamRequired("MaxCount")) + } + if s.MinCount == nil { + invalidParams.Add(request.NewErrParamRequired("MinCount")) + } + if s.Monitoring != nil { + if err := s.Monitoring.Validate(); err != nil { + invalidParams.AddNested("Monitoring", err.(request.ErrInvalidParams)) + } + } + if s.NetworkInterfaces != nil { + for i, v := range s.NetworkInterfaces { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "NetworkInterfaces", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Describes the monitoring for the instance. +type RunInstancesMonitoringEnabled struct { + _ struct{} `type:"structure"` + + // Indicates whether monitoring is enabled for the instance. + Enabled *bool `locationName:"enabled" type:"boolean" required:"true"` +} + +// String returns the string representation +func (s RunInstancesMonitoringEnabled) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RunInstancesMonitoringEnabled) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RunInstancesMonitoringEnabled) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RunInstancesMonitoringEnabled"} + if s.Enabled == nil { + invalidParams.Add(request.NewErrParamRequired("Enabled")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the parameters for RunScheduledInstances. +type RunScheduledInstancesInput struct { + _ struct{} `type:"structure"` + + // Unique, case-sensitive identifier that ensures the idempotency of the request. + // For more information, see Ensuring Idempotency (http://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html). + ClientToken *string `type:"string" idempotencyToken:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The number of instances. + // + // Default: 1 + InstanceCount *int64 `type:"integer"` + + // The launch specification. + LaunchSpecification *ScheduledInstancesLaunchSpecification `type:"structure" required:"true"` + + // The Scheduled Instance ID. + ScheduledInstanceId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s RunScheduledInstancesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RunScheduledInstancesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RunScheduledInstancesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RunScheduledInstancesInput"} + if s.LaunchSpecification == nil { + invalidParams.Add(request.NewErrParamRequired("LaunchSpecification")) + } + if s.ScheduledInstanceId == nil { + invalidParams.Add(request.NewErrParamRequired("ScheduledInstanceId")) + } + if s.LaunchSpecification != nil { + if err := s.LaunchSpecification.Validate(); err != nil { + invalidParams.AddNested("LaunchSpecification", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the output of RunScheduledInstances. +type RunScheduledInstancesOutput struct { + _ struct{} `type:"structure"` + + // The IDs of the newly launched instances. + InstanceIdSet []*string `locationName:"instanceIdSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s RunScheduledInstancesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RunScheduledInstancesOutput) GoString() string { + return s.String() +} + +// Describes the storage parameters for S3 and S3 buckets for an instance store-backed +// AMI. +type S3Storage struct { + _ struct{} `type:"structure"` + + // The access key ID of the owner of the bucket. Before you specify a value + // for your access key ID, review and follow the guidance in Best Practices + // for Managing AWS Access Keys (http://docs.aws.amazon.com/general/latest/gr/aws-access-keys-best-practices.html). + AWSAccessKeyId *string `type:"string"` + + // The bucket in which to store the AMI. You can specify a bucket that you already + // own or a new bucket that Amazon EC2 creates on your behalf. If you specify + // a bucket that belongs to someone else, Amazon EC2 returns an error. + Bucket *string `locationName:"bucket" type:"string"` + + // The beginning of the file name of the AMI. + Prefix *string `locationName:"prefix" type:"string"` + + // An Amazon S3 upload policy that gives Amazon EC2 permission to upload items + // into Amazon S3 on your behalf. + // + // UploadPolicy is automatically base64 encoded/decoded by the SDK. + UploadPolicy []byte `locationName:"uploadPolicy" type:"blob"` + + // The signature of the JSON document. + UploadPolicySignature *string `locationName:"uploadPolicySignature" type:"string"` +} + +// String returns the string representation +func (s S3Storage) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s S3Storage) GoString() string { + return s.String() +} + +// Describes a Scheduled Instance. +type ScheduledInstance struct { + _ struct{} `type:"structure"` + + // The Availability Zone. + AvailabilityZone *string `locationName:"availabilityZone" type:"string"` + + // The date when the Scheduled Instance was purchased. + CreateDate *time.Time `locationName:"createDate" type:"timestamp" timestampFormat:"iso8601"` + + // The hourly price for a single instance. + HourlyPrice *string `locationName:"hourlyPrice" type:"string"` + + // The number of instances. + InstanceCount *int64 `locationName:"instanceCount" type:"integer"` + + // The instance type. + InstanceType *string `locationName:"instanceType" type:"string"` + + // The network platform (EC2-Classic or EC2-VPC). + NetworkPlatform *string `locationName:"networkPlatform" type:"string"` + + // The time for the next schedule to start. + NextSlotStartTime *time.Time `locationName:"nextSlotStartTime" type:"timestamp" timestampFormat:"iso8601"` + + // The platform (Linux/UNIX or Windows). + Platform *string `locationName:"platform" type:"string"` + + // The time that the previous schedule ended or will end. + PreviousSlotEndTime *time.Time `locationName:"previousSlotEndTime" type:"timestamp" timestampFormat:"iso8601"` + + // The schedule recurrence. + Recurrence *ScheduledInstanceRecurrence `locationName:"recurrence" type:"structure"` + + // The Scheduled Instance ID. + ScheduledInstanceId *string `locationName:"scheduledInstanceId" type:"string"` + + // The number of hours in the schedule. + SlotDurationInHours *int64 `locationName:"slotDurationInHours" type:"integer"` + + // The end date for the Scheduled Instance. + TermEndDate *time.Time `locationName:"termEndDate" type:"timestamp" timestampFormat:"iso8601"` + + // The start date for the Scheduled Instance. + TermStartDate *time.Time `locationName:"termStartDate" type:"timestamp" timestampFormat:"iso8601"` + + // The total number of hours for a single instance for the entire term. + TotalScheduledInstanceHours *int64 `locationName:"totalScheduledInstanceHours" type:"integer"` +} + +// String returns the string representation +func (s ScheduledInstance) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ScheduledInstance) GoString() string { + return s.String() +} + +// Describes a schedule that is available for your Scheduled Instances. +type ScheduledInstanceAvailability struct { + _ struct{} `type:"structure"` + + // The Availability Zone. + AvailabilityZone *string `locationName:"availabilityZone" type:"string"` + + // The number of available instances. + AvailableInstanceCount *int64 `locationName:"availableInstanceCount" type:"integer"` + + // The time period for the first schedule to start. + FirstSlotStartTime *time.Time `locationName:"firstSlotStartTime" type:"timestamp" timestampFormat:"iso8601"` + + // The hourly price for a single instance. + HourlyPrice *string `locationName:"hourlyPrice" type:"string"` + + // The instance type. You can specify one of the C3, C4, M4, or R3 instance + // types. + InstanceType *string `locationName:"instanceType" type:"string"` + + // The maximum term. The only possible value is 365 days. + MaxTermDurationInDays *int64 `locationName:"maxTermDurationInDays" type:"integer"` + + // The minimum term. The only possible value is 365 days. + MinTermDurationInDays *int64 `locationName:"minTermDurationInDays" type:"integer"` + + // The network platform (EC2-Classic or EC2-VPC). + NetworkPlatform *string `locationName:"networkPlatform" type:"string"` + + // The platform (Linux/UNIX or Windows). + Platform *string `locationName:"platform" type:"string"` + + // The purchase token. This token expires in two hours. + PurchaseToken *string `locationName:"purchaseToken" type:"string"` + + // The schedule recurrence. + Recurrence *ScheduledInstanceRecurrence `locationName:"recurrence" type:"structure"` + + // The number of hours in the schedule. + SlotDurationInHours *int64 `locationName:"slotDurationInHours" type:"integer"` + + // The total number of hours for a single instance for the entire term. + TotalScheduledInstanceHours *int64 `locationName:"totalScheduledInstanceHours" type:"integer"` +} + +// String returns the string representation +func (s ScheduledInstanceAvailability) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ScheduledInstanceAvailability) GoString() string { + return s.String() +} + +// Describes the recurring schedule for a Scheduled Instance. +type ScheduledInstanceRecurrence struct { + _ struct{} `type:"structure"` + + // The frequency (Daily, Weekly, or Monthly). + Frequency *string `locationName:"frequency" type:"string"` + + // The interval quantity. The interval unit depends on the value of frequency. + // For example, every 2 weeks or every 2 months. + Interval *int64 `locationName:"interval" type:"integer"` + + // The days. For a monthly schedule, this is one or more days of the month (1-31). + // For a weekly schedule, this is one or more days of the week (1-7, where 1 + // is Sunday). + OccurrenceDaySet []*int64 `locationName:"occurrenceDaySet" locationNameList:"item" type:"list"` + + // Indicates whether the occurrence is relative to the end of the specified + // week or month. + OccurrenceRelativeToEnd *bool `locationName:"occurrenceRelativeToEnd" type:"boolean"` + + // The unit for occurrenceDaySet (DayOfWeek or DayOfMonth). + OccurrenceUnit *string `locationName:"occurrenceUnit" type:"string"` +} + +// String returns the string representation +func (s ScheduledInstanceRecurrence) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ScheduledInstanceRecurrence) GoString() string { + return s.String() +} + +// Describes the recurring schedule for a Scheduled Instance. +type ScheduledInstanceRecurrenceRequest struct { + _ struct{} `type:"structure"` + + // The frequency (Daily, Weekly, or Monthly). + Frequency *string `type:"string"` + + // The interval quantity. The interval unit depends on the value of Frequency. + // For example, every 2 weeks or every 2 months. + Interval *int64 `type:"integer"` + + // The days. For a monthly schedule, this is one or more days of the month (1-31). + // For a weekly schedule, this is one or more days of the week (1-7, where 1 + // is Sunday). You can't specify this value with a daily schedule. If the occurrence + // is relative to the end of the month, you can specify only a single day. + OccurrenceDays []*int64 `locationName:"OccurrenceDay" locationNameList:"OccurenceDay" type:"list"` + + // Indicates whether the occurrence is relative to the end of the specified + // week or month. You can't specify this value with a daily schedule. + OccurrenceRelativeToEnd *bool `type:"boolean"` + + // The unit for OccurrenceDays (DayOfWeek or DayOfMonth). This value is required + // for a monthly schedule. You can't specify DayOfWeek with a weekly schedule. + // You can't specify this value with a daily schedule. + OccurrenceUnit *string `type:"string"` +} + +// String returns the string representation +func (s ScheduledInstanceRecurrenceRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ScheduledInstanceRecurrenceRequest) GoString() string { + return s.String() +} + +// Describes a block device mapping for a Scheduled Instance. +type ScheduledInstancesBlockDeviceMapping struct { + _ struct{} `type:"structure"` + + // The device name exposed to the instance (for example, /dev/sdh or xvdh). + DeviceName *string `type:"string"` + + // Parameters used to set up EBS volumes automatically when the instance is + // launched. + Ebs *ScheduledInstancesEbs `type:"structure"` + + // Suppresses the specified device included in the block device mapping of the + // AMI. + NoDevice *string `type:"string"` + + // The virtual device name (ephemeralN). Instance store volumes are numbered + // starting from 0. An instance type with two available instance store volumes + // can specify mappings for ephemeral0 and ephemeral1.The number of available + // instance store volumes depends on the instance type. After you connect to + // the instance, you must mount the volume. + // + // Constraints: For M3 instances, you must specify instance store volumes in + // the block device mapping for the instance. When you launch an M3 instance, + // we ignore any instance store volumes specified in the block device mapping + // for the AMI. + VirtualName *string `type:"string"` +} + +// String returns the string representation +func (s ScheduledInstancesBlockDeviceMapping) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ScheduledInstancesBlockDeviceMapping) GoString() string { + return s.String() +} + +// Describes an EBS volume for a Scheduled Instance. +type ScheduledInstancesEbs struct { + _ struct{} `type:"structure"` + + // Indicates whether the volume is deleted on instance termination. + DeleteOnTermination *bool `type:"boolean"` + + // Indicates whether the volume is encrypted. You can attached encrypted volumes + // only to instances that support them. + Encrypted *bool `type:"boolean"` + + // The number of I/O operations per second (IOPS) that the volume supports. + // For io1 volumes, this represents the number of IOPS that are provisioned + // for the volume. For gp2 volumes, this represents the baseline performance + // of the volume and the rate at which the volume accumulates I/O credits for + // bursting. For more information about gp2 baseline performance, I/O credits, + // and bursting, see Amazon EBS Volume Types (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html) + // in the Amazon Elastic Compute Cloud User Guide. + // + // Constraint: Range is 100-20000 IOPS for io1 volumes and 100-10000 IOPS for + // gp2 volumes. + // + // Condition: This parameter is required for requests to create io1volumes; + // it is not used in requests to create gp2, st1, sc1, or standard volumes. + Iops *int64 `type:"integer"` + + // The ID of the snapshot. + SnapshotId *string `type:"string"` + + // The size of the volume, in GiB. + // + // Default: If you're creating the volume from a snapshot and don't specify + // a volume size, the default is the snapshot size. + VolumeSize *int64 `type:"integer"` + + // The volume type. gp2 for General Purpose SSD, io1 for Provisioned IOPS SSD, + // Throughput Optimized HDD for st1, Cold HDD for sc1, or standard for Magnetic. + // + // Default: standard + VolumeType *string `type:"string"` +} + +// String returns the string representation +func (s ScheduledInstancesEbs) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ScheduledInstancesEbs) GoString() string { + return s.String() +} + +// Describes an IAM instance profile for a Scheduled Instance. +type ScheduledInstancesIamInstanceProfile struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN). + Arn *string `type:"string"` + + // The name. + Name *string `type:"string"` +} + +// String returns the string representation +func (s ScheduledInstancesIamInstanceProfile) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ScheduledInstancesIamInstanceProfile) GoString() string { + return s.String() +} + +// Describes the launch specification for a Scheduled Instance. +// +// If you are launching the Scheduled Instance in EC2-VPC, you must specify +// the ID of the subnet. You can specify the subnet using either SubnetId or +// NetworkInterface. +type ScheduledInstancesLaunchSpecification struct { + _ struct{} `type:"structure"` + + // One or more block device mapping entries. + BlockDeviceMappings []*ScheduledInstancesBlockDeviceMapping `locationName:"BlockDeviceMapping" locationNameList:"BlockDeviceMapping" type:"list"` + + // Indicates whether the instances are optimized for EBS I/O. This optimization + // provides dedicated throughput to Amazon EBS and an optimized configuration + // stack to provide optimal EBS I/O performance. This optimization isn't available + // with all instance types. Additional usage charges apply when using an EBS-optimized + // instance. + // + // Default: false + EbsOptimized *bool `type:"boolean"` + + // The IAM instance profile. + IamInstanceProfile *ScheduledInstancesIamInstanceProfile `type:"structure"` + + // The ID of the Amazon Machine Image (AMI). + ImageId *string `type:"string" required:"true"` + + // The instance type. + InstanceType *string `type:"string"` + + // The ID of the kernel. + KernelId *string `type:"string"` + + // The name of the key pair. + KeyName *string `type:"string"` + + // Enable or disable monitoring for the instances. + Monitoring *ScheduledInstancesMonitoring `type:"structure"` + + // One or more network interfaces. + NetworkInterfaces []*ScheduledInstancesNetworkInterface `locationName:"NetworkInterface" locationNameList:"NetworkInterface" type:"list"` + + // The placement information. + Placement *ScheduledInstancesPlacement `type:"structure"` + + // The ID of the RAM disk. + RamdiskId *string `type:"string"` + + // The IDs of one or more security groups. + SecurityGroupIds []*string `locationName:"SecurityGroupId" locationNameList:"SecurityGroupId" type:"list"` + + // The ID of the subnet in which to launch the instances. + SubnetId *string `type:"string"` + + // The base64-encoded MIME user data. + UserData *string `type:"string"` +} + +// String returns the string representation +func (s ScheduledInstancesLaunchSpecification) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ScheduledInstancesLaunchSpecification) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ScheduledInstancesLaunchSpecification) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ScheduledInstancesLaunchSpecification"} + if s.ImageId == nil { + invalidParams.Add(request.NewErrParamRequired("ImageId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Describes whether monitoring is enabled for a Scheduled Instance. +type ScheduledInstancesMonitoring struct { + _ struct{} `type:"structure"` + + // Indicates whether monitoring is enabled. + Enabled *bool `type:"boolean"` +} + +// String returns the string representation +func (s ScheduledInstancesMonitoring) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ScheduledInstancesMonitoring) GoString() string { + return s.String() +} + +// Describes a network interface for a Scheduled Instance. +type ScheduledInstancesNetworkInterface struct { + _ struct{} `type:"structure"` + + // Indicates whether to assign a public IP address to instances launched in + // a VPC. The public IP address can only be assigned to a network interface + // for eth0, and can only be assigned to a new network interface, not an existing + // one. You cannot specify more than one network interface in the request. If + // launching into a default subnet, the default value is true. + AssociatePublicIpAddress *bool `type:"boolean"` + + // Indicates whether to delete the interface when the instance is terminated. + DeleteOnTermination *bool `type:"boolean"` + + // The description. + Description *string `type:"string"` + + // The index of the device for the network interface attachment. + DeviceIndex *int64 `type:"integer"` + + // The IDs of one or more security groups. + Groups []*string `locationName:"Group" locationNameList:"SecurityGroupId" type:"list"` + + // The ID of the network interface. + NetworkInterfaceId *string `type:"string"` + + // The IP address of the network interface within the subnet. + PrivateIpAddress *string `type:"string"` + + // The private IP addresses. + PrivateIpAddressConfigs []*ScheduledInstancesPrivateIpAddressConfig `locationName:"PrivateIpAddressConfig" locationNameList:"PrivateIpAddressConfigSet" type:"list"` + + // The number of secondary private IP addresses. + SecondaryPrivateIpAddressCount *int64 `type:"integer"` + + // The ID of the subnet. + SubnetId *string `type:"string"` +} + +// String returns the string representation +func (s ScheduledInstancesNetworkInterface) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ScheduledInstancesNetworkInterface) GoString() string { + return s.String() +} + +// Describes the placement for a Scheduled Instance. +type ScheduledInstancesPlacement struct { + _ struct{} `type:"structure"` + + // The Availability Zone. + AvailabilityZone *string `type:"string"` + + // The name of the placement group. + GroupName *string `type:"string"` +} + +// String returns the string representation +func (s ScheduledInstancesPlacement) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ScheduledInstancesPlacement) GoString() string { + return s.String() +} + +// Describes a private IP address for a Scheduled Instance. +type ScheduledInstancesPrivateIpAddressConfig struct { + _ struct{} `type:"structure"` + + // Indicates whether this is a primary IP address. Otherwise, this is a secondary + // IP address. + Primary *bool `type:"boolean"` + + // The IP address. + PrivateIpAddress *string `type:"string"` +} + +// String returns the string representation +func (s ScheduledInstancesPrivateIpAddressConfig) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ScheduledInstancesPrivateIpAddressConfig) GoString() string { + return s.String() +} + +// Describes a security group +type SecurityGroup struct { + _ struct{} `type:"structure"` + + // A description of the security group. + Description *string `locationName:"groupDescription" type:"string"` + + // The ID of the security group. + GroupId *string `locationName:"groupId" type:"string"` + + // The name of the security group. + GroupName *string `locationName:"groupName" type:"string"` + + // One or more inbound rules associated with the security group. + IpPermissions []*IpPermission `locationName:"ipPermissions" locationNameList:"item" type:"list"` + + // [EC2-VPC] One or more outbound rules associated with the security group. + IpPermissionsEgress []*IpPermission `locationName:"ipPermissionsEgress" locationNameList:"item" type:"list"` + + // The AWS account ID of the owner of the security group. + OwnerId *string `locationName:"ownerId" type:"string"` + + // Any tags assigned to the security group. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` + + // [EC2-VPC] The ID of the VPC for the security group. + VpcId *string `locationName:"vpcId" type:"string"` +} + +// String returns the string representation +func (s SecurityGroup) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SecurityGroup) GoString() string { + return s.String() +} + +// Describes a VPC with a security group that references your security group. +type SecurityGroupReference struct { + _ struct{} `type:"structure"` + + // The ID of your security group. + GroupId *string `locationName:"groupId" type:"string" required:"true"` + + // The ID of the VPC with the referencing security group. + ReferencingVpcId *string `locationName:"referencingVpcId" type:"string" required:"true"` + + // The ID of the VPC peering connection. + VpcPeeringConnectionId *string `locationName:"vpcPeeringConnectionId" type:"string"` +} + +// String returns the string representation +func (s SecurityGroupReference) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SecurityGroupReference) GoString() string { + return s.String() +} + +// Describes the time period for a Scheduled Instance to start its first schedule. +// The time period must span less than one day. +type SlotDateTimeRangeRequest struct { + _ struct{} `type:"structure"` + + // The earliest date and time, in UTC, for the Scheduled Instance to start. + EarliestTime *time.Time `type:"timestamp" timestampFormat:"iso8601" required:"true"` + + // The latest date and time, in UTC, for the Scheduled Instance to start. This + // value must be later than or equal to the earliest date and at most three + // months in the future. + LatestTime *time.Time `type:"timestamp" timestampFormat:"iso8601" required:"true"` +} + +// String returns the string representation +func (s SlotDateTimeRangeRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SlotDateTimeRangeRequest) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SlotDateTimeRangeRequest) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SlotDateTimeRangeRequest"} + if s.EarliestTime == nil { + invalidParams.Add(request.NewErrParamRequired("EarliestTime")) + } + if s.LatestTime == nil { + invalidParams.Add(request.NewErrParamRequired("LatestTime")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Describes the time period for a Scheduled Instance to start its first schedule. +type SlotStartTimeRangeRequest struct { + _ struct{} `type:"structure"` + + // The earliest date and time, in UTC, for the Scheduled Instance to start. + EarliestTime *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The latest date and time, in UTC, for the Scheduled Instance to start. + LatestTime *time.Time `type:"timestamp" timestampFormat:"iso8601"` +} + +// String returns the string representation +func (s SlotStartTimeRangeRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SlotStartTimeRangeRequest) GoString() string { + return s.String() +} + +// Describes a snapshot. +type Snapshot struct { + _ struct{} `type:"structure"` + + // The data encryption key identifier for the snapshot. This value is a unique + // identifier that corresponds to the data encryption key that was used to encrypt + // the original volume or snapshot copy. Because data encryption keys are inherited + // by volumes created from snapshots, and vice versa, if snapshots share the + // same data encryption key identifier, then they belong to the same volume/snapshot + // lineage. This parameter is only returned by the DescribeSnapshots API operation. + DataEncryptionKeyId *string `locationName:"dataEncryptionKeyId" type:"string"` + + // The description for the snapshot. + Description *string `locationName:"description" type:"string"` + + // Indicates whether the snapshot is encrypted. + Encrypted *bool `locationName:"encrypted" type:"boolean"` + + // The full ARN of the AWS Key Management Service (AWS KMS) customer master + // key (CMK) that was used to protect the volume encryption key for the parent + // volume. + KmsKeyId *string `locationName:"kmsKeyId" type:"string"` + + // The AWS account alias (for example, amazon, self) or AWS account ID that + // owns the snapshot. + OwnerAlias *string `locationName:"ownerAlias" type:"string"` + + // The AWS account ID of the EBS snapshot owner. + OwnerId *string `locationName:"ownerId" type:"string"` + + // The progress of the snapshot, as a percentage. + Progress *string `locationName:"progress" type:"string"` + + // The ID of the snapshot. Each snapshot receives a unique identifier when it + // is created. + SnapshotId *string `locationName:"snapshotId" type:"string"` + + // The time stamp when the snapshot was initiated. + StartTime *time.Time `locationName:"startTime" type:"timestamp" timestampFormat:"iso8601"` + + // The snapshot state. + State *string `locationName:"status" type:"string" enum:"SnapshotState"` + + // Encrypted Amazon EBS snapshots are copied asynchronously. If a snapshot copy + // operation fails (for example, if the proper AWS Key Management Service (AWS + // KMS) permissions are not obtained) this field displays error state details + // to help you diagnose why the error occurred. This parameter is only returned + // by the DescribeSnapshots API operation. + StateMessage *string `locationName:"statusMessage" type:"string"` + + // Any tags assigned to the snapshot. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` + + // The ID of the volume that was used to create the snapshot. + VolumeId *string `locationName:"volumeId" type:"string"` + + // The size of the volume, in GiB. + VolumeSize *int64 `locationName:"volumeSize" type:"integer"` +} + +// String returns the string representation +func (s Snapshot) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Snapshot) GoString() string { + return s.String() +} + +// Describes the snapshot created from the imported disk. +type SnapshotDetail struct { + _ struct{} `type:"structure"` + + // A description for the snapshot. + Description *string `locationName:"description" type:"string"` + + // The block device mapping for the snapshot. + DeviceName *string `locationName:"deviceName" type:"string"` + + // The size of the disk in the snapshot, in GiB. + DiskImageSize *float64 `locationName:"diskImageSize" type:"double"` + + // The format of the disk image from which the snapshot is created. + Format *string `locationName:"format" type:"string"` + + // The percentage of progress for the task. + Progress *string `locationName:"progress" type:"string"` + + // The snapshot ID of the disk being imported. + SnapshotId *string `locationName:"snapshotId" type:"string"` + + // A brief status of the snapshot creation. + Status *string `locationName:"status" type:"string"` + + // A detailed status message for the snapshot creation. + StatusMessage *string `locationName:"statusMessage" type:"string"` + + // The URL used to access the disk image. + Url *string `locationName:"url" type:"string"` + + // The S3 bucket for the disk image. + UserBucket *UserBucketDetails `locationName:"userBucket" type:"structure"` +} + +// String returns the string representation +func (s SnapshotDetail) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SnapshotDetail) GoString() string { + return s.String() +} + +// The disk container object for the import snapshot request. +type SnapshotDiskContainer struct { + _ struct{} `type:"structure"` + + // The description of the disk image being imported. + Description *string `type:"string"` + + // The format of the disk image being imported. + // + // Valid values: RAW | VHD | VMDK | OVA + Format *string `type:"string"` + + // The URL to the Amazon S3-based disk image being imported. It can either be + // a https URL (https://..) or an Amazon S3 URL (s3://..). + Url *string `type:"string"` + + // The S3 bucket for the disk image. + UserBucket *UserBucket `type:"structure"` +} + +// String returns the string representation +func (s SnapshotDiskContainer) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SnapshotDiskContainer) GoString() string { + return s.String() +} + +// Details about the import snapshot task. +type SnapshotTaskDetail struct { + _ struct{} `type:"structure"` + + // The description of the snapshot. + Description *string `locationName:"description" type:"string"` + + // The size of the disk in the snapshot, in GiB. + DiskImageSize *float64 `locationName:"diskImageSize" type:"double"` + + // The format of the disk image from which the snapshot is created. + Format *string `locationName:"format" type:"string"` + + // The percentage of completion for the import snapshot task. + Progress *string `locationName:"progress" type:"string"` + + // The snapshot ID of the disk being imported. + SnapshotId *string `locationName:"snapshotId" type:"string"` + + // A brief status for the import snapshot task. + Status *string `locationName:"status" type:"string"` + + // A detailed status message for the import snapshot task. + StatusMessage *string `locationName:"statusMessage" type:"string"` + + // The URL of the disk image from which the snapshot is created. + Url *string `locationName:"url" type:"string"` + + // The S3 bucket for the disk image. + UserBucket *UserBucketDetails `locationName:"userBucket" type:"structure"` +} + +// String returns the string representation +func (s SnapshotTaskDetail) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SnapshotTaskDetail) GoString() string { + return s.String() +} + +// Describes the data feed for a Spot instance. +type SpotDatafeedSubscription struct { + _ struct{} `type:"structure"` + + // The Amazon S3 bucket where the Spot instance data feed is located. + Bucket *string `locationName:"bucket" type:"string"` + + // The fault codes for the Spot instance request, if any. + Fault *SpotInstanceStateFault `locationName:"fault" type:"structure"` + + // The AWS account ID of the account. + OwnerId *string `locationName:"ownerId" type:"string"` + + // The prefix that is prepended to data feed files. + Prefix *string `locationName:"prefix" type:"string"` + + // The state of the Spot instance data feed subscription. + State *string `locationName:"state" type:"string" enum:"DatafeedSubscriptionState"` +} + +// String returns the string representation +func (s SpotDatafeedSubscription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SpotDatafeedSubscription) GoString() string { + return s.String() +} + +// Describes the launch specification for one or more Spot instances. +type SpotFleetLaunchSpecification struct { + _ struct{} `type:"structure"` + + // Deprecated. + AddressingType *string `locationName:"addressingType" type:"string"` + + // One or more block device mapping entries. + BlockDeviceMappings []*BlockDeviceMapping `locationName:"blockDeviceMapping" locationNameList:"item" type:"list"` + + // Indicates whether the instances are optimized for EBS I/O. This optimization + // provides dedicated throughput to Amazon EBS and an optimized configuration + // stack to provide optimal EBS I/O performance. This optimization isn't available + // with all instance types. Additional usage charges apply when using an EBS + // Optimized instance. + // + // Default: false + EbsOptimized *bool `locationName:"ebsOptimized" type:"boolean"` + + // The IAM instance profile. + IamInstanceProfile *IamInstanceProfileSpecification `locationName:"iamInstanceProfile" type:"structure"` + + // The ID of the AMI. + ImageId *string `locationName:"imageId" type:"string"` + + // The instance type. + InstanceType *string `locationName:"instanceType" type:"string" enum:"InstanceType"` + + // The ID of the kernel. + KernelId *string `locationName:"kernelId" type:"string"` + + // The name of the key pair. + KeyName *string `locationName:"keyName" type:"string"` + + // Enable or disable monitoring for the instances. + Monitoring *SpotFleetMonitoring `locationName:"monitoring" type:"structure"` + + // One or more network interfaces. + NetworkInterfaces []*InstanceNetworkInterfaceSpecification `locationName:"networkInterfaceSet" locationNameList:"item" type:"list"` + + // The placement information. + Placement *SpotPlacement `locationName:"placement" type:"structure"` + + // The ID of the RAM disk. + RamdiskId *string `locationName:"ramdiskId" type:"string"` + + // One or more security groups. When requesting instances in a VPC, you must + // specify the IDs of the security groups. When requesting instances in EC2-Classic, + // you can specify the names or the IDs of the security groups. + SecurityGroups []*GroupIdentifier `locationName:"groupSet" locationNameList:"item" type:"list"` + + // The bid price per unit hour for the specified instance type. If this value + // is not specified, the default is the Spot bid price specified for the fleet. + // To determine the bid price per unit hour, divide the Spot bid price by the + // value of WeightedCapacity. + SpotPrice *string `locationName:"spotPrice" type:"string"` + + // The ID of the subnet in which to launch the instances. To specify multiple + // subnets, separate them using commas; for example, "subnet-a61dafcf, subnet-65ea5f08". + SubnetId *string `locationName:"subnetId" type:"string"` + + // The user data to make available to the instances. If you are using an AWS + // SDK or command line tool, Base64-encoding is performed for you, and you can + // load the text from a file. Otherwise, you must provide Base64-encoded text. + UserData *string `locationName:"userData" type:"string"` + + // The number of units provided by the specified instance type. These are the + // same units that you chose to set the target capacity in terms (instances + // or a performance characteristic such as vCPUs, memory, or I/O). + // + // If the target capacity divided by this value is not a whole number, we round + // the number of instances to the next whole number. If this value is not specified, + // the default is 1. + WeightedCapacity *float64 `locationName:"weightedCapacity" type:"double"` +} + +// String returns the string representation +func (s SpotFleetLaunchSpecification) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SpotFleetLaunchSpecification) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SpotFleetLaunchSpecification) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SpotFleetLaunchSpecification"} + if s.NetworkInterfaces != nil { + for i, v := range s.NetworkInterfaces { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "NetworkInterfaces", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Describes whether monitoring is enabled. +type SpotFleetMonitoring struct { + _ struct{} `type:"structure"` + + // Enables monitoring for the instance. + // + // Default: false + Enabled *bool `locationName:"enabled" type:"boolean"` +} + +// String returns the string representation +func (s SpotFleetMonitoring) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SpotFleetMonitoring) GoString() string { + return s.String() +} + +// Describes a Spot fleet request. +type SpotFleetRequestConfig struct { + _ struct{} `type:"structure"` + + // The creation date and time of the request. + CreateTime *time.Time `locationName:"createTime" type:"timestamp" timestampFormat:"iso8601" required:"true"` + + // Information about the configuration of the Spot fleet request. + SpotFleetRequestConfig *SpotFleetRequestConfigData `locationName:"spotFleetRequestConfig" type:"structure" required:"true"` + + // The ID of the Spot fleet request. + SpotFleetRequestId *string `locationName:"spotFleetRequestId" type:"string" required:"true"` + + // The state of the Spot fleet request. + SpotFleetRequestState *string `locationName:"spotFleetRequestState" type:"string" required:"true" enum:"BatchState"` +} + +// String returns the string representation +func (s SpotFleetRequestConfig) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SpotFleetRequestConfig) GoString() string { + return s.String() +} + +// Describes the configuration of a Spot fleet request. +type SpotFleetRequestConfigData struct { + _ struct{} `type:"structure"` + + // Indicates how to allocate the target capacity across the Spot pools specified + // by the Spot fleet request. The default is lowestPrice. + AllocationStrategy *string `locationName:"allocationStrategy" type:"string" enum:"AllocationStrategy"` + + // A unique, case-sensitive identifier you provide to ensure idempotency of + // your listings. This helps avoid duplicate listings. For more information, + // see Ensuring Idempotency (http://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html). + ClientToken *string `locationName:"clientToken" type:"string"` + + // Indicates whether running Spot instances should be terminated if the target + // capacity of the Spot fleet request is decreased below the current size of + // the Spot fleet. + ExcessCapacityTerminationPolicy *string `locationName:"excessCapacityTerminationPolicy" type:"string" enum:"ExcessCapacityTerminationPolicy"` + + // The number of units fulfilled by this request compared to the set target + // capacity. + FulfilledCapacity *float64 `locationName:"fulfilledCapacity" type:"double"` + + // Grants the Spot fleet permission to terminate Spot instances on your behalf + // when you cancel its Spot fleet request using CancelSpotFleetRequests or when + // the Spot fleet request expires, if you set terminateInstancesWithExpiration. + IamFleetRole *string `locationName:"iamFleetRole" type:"string" required:"true"` + + // Information about the launch specifications for the Spot fleet request. + LaunchSpecifications []*SpotFleetLaunchSpecification `locationName:"launchSpecifications" locationNameList:"item" min:"1" type:"list" required:"true"` + + // The bid price per unit hour. + SpotPrice *string `locationName:"spotPrice" type:"string" required:"true"` + + // The number of units to request. You can choose to set the target capacity + // in terms of instances or a performance characteristic that is important to + // your application workload, such as vCPUs, memory, or I/O. + TargetCapacity *int64 `locationName:"targetCapacity" type:"integer" required:"true"` + + // Indicates whether running Spot instances should be terminated when the Spot + // fleet request expires. + TerminateInstancesWithExpiration *bool `locationName:"terminateInstancesWithExpiration" type:"boolean"` + + // The type of request. Indicates whether the fleet will only request the target + // capacity or also attempt to maintain it. When you request a certain target + // capacity, the fleet will only place the required bids. It will not attempt + // to replenish Spot instances if capacity is diminished, nor will it submit + // bids in alternative Spot pools if capacity is not available. When you want + // to maintain a certain target capacity, fleet will place the required bids + // to meet this target capacity. It will also automatically replenish any interrupted + // instances. Default: maintain. + Type *string `locationName:"type" type:"string" enum:"FleetType"` + + // The start date and time of the request, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ). + // The default is to start fulfilling the request immediately. + ValidFrom *time.Time `locationName:"validFrom" type:"timestamp" timestampFormat:"iso8601"` + + // The end date and time of the request, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ). + // At this point, no new Spot instance requests are placed or enabled to fulfill + // the request. + ValidUntil *time.Time `locationName:"validUntil" type:"timestamp" timestampFormat:"iso8601"` +} + +// String returns the string representation +func (s SpotFleetRequestConfigData) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SpotFleetRequestConfigData) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SpotFleetRequestConfigData) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SpotFleetRequestConfigData"} + if s.IamFleetRole == nil { + invalidParams.Add(request.NewErrParamRequired("IamFleetRole")) + } + if s.LaunchSpecifications == nil { + invalidParams.Add(request.NewErrParamRequired("LaunchSpecifications")) + } + if s.LaunchSpecifications != nil && len(s.LaunchSpecifications) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LaunchSpecifications", 1)) + } + if s.SpotPrice == nil { + invalidParams.Add(request.NewErrParamRequired("SpotPrice")) + } + if s.TargetCapacity == nil { + invalidParams.Add(request.NewErrParamRequired("TargetCapacity")) + } + if s.LaunchSpecifications != nil { + for i, v := range s.LaunchSpecifications { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "LaunchSpecifications", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Describes a Spot instance request. +type SpotInstanceRequest struct { + _ struct{} `type:"structure"` + + // If you specified a duration and your Spot instance request was fulfilled, + // this is the fixed hourly price in effect for the Spot instance while it runs. + ActualBlockHourlyPrice *string `locationName:"actualBlockHourlyPrice" type:"string"` + + // The Availability Zone group. If you specify the same Availability Zone group + // for all Spot instance requests, all Spot instances are launched in the same + // Availability Zone. + AvailabilityZoneGroup *string `locationName:"availabilityZoneGroup" type:"string"` + + // The duration for the Spot instance, in minutes. + BlockDurationMinutes *int64 `locationName:"blockDurationMinutes" type:"integer"` + + // The date and time when the Spot instance request was created, in UTC format + // (for example, YYYY-MM-DDTHH:MM:SSZ). + CreateTime *time.Time `locationName:"createTime" type:"timestamp" timestampFormat:"iso8601"` + + // The fault codes for the Spot instance request, if any. + Fault *SpotInstanceStateFault `locationName:"fault" type:"structure"` + + // The instance ID, if an instance has been launched to fulfill the Spot instance + // request. + InstanceId *string `locationName:"instanceId" type:"string"` + + // The instance launch group. Launch groups are Spot instances that launch together + // and terminate together. + LaunchGroup *string `locationName:"launchGroup" type:"string"` + + // Additional information for launching instances. + LaunchSpecification *LaunchSpecification `locationName:"launchSpecification" type:"structure"` + + // The Availability Zone in which the bid is launched. + LaunchedAvailabilityZone *string `locationName:"launchedAvailabilityZone" type:"string"` + + // The product description associated with the Spot instance. + ProductDescription *string `locationName:"productDescription" type:"string" enum:"RIProductDescription"` + + // The ID of the Spot instance request. + SpotInstanceRequestId *string `locationName:"spotInstanceRequestId" type:"string"` + + // The maximum hourly price (bid) for the Spot instance launched to fulfill + // the request. + SpotPrice *string `locationName:"spotPrice" type:"string"` + + // The state of the Spot instance request. Spot bid status information can help + // you track your Spot instance requests. For more information, see Spot Bid + // Status (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-bid-status.html) + // in the Amazon Elastic Compute Cloud User Guide. + State *string `locationName:"state" type:"string" enum:"SpotInstanceState"` + + // The status code and status message describing the Spot instance request. + Status *SpotInstanceStatus `locationName:"status" type:"structure"` + + // Any tags assigned to the resource. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` + + // The Spot instance request type. + Type *string `locationName:"type" type:"string" enum:"SpotInstanceType"` + + // The start date of the request, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ). + // The request becomes active at this date and time. + ValidFrom *time.Time `locationName:"validFrom" type:"timestamp" timestampFormat:"iso8601"` + + // The end date of the request, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ). + // If this is a one-time request, it remains active until all instances launch, + // the request is canceled, or this date is reached. If the request is persistent, + // it remains active until it is canceled or this date is reached. + ValidUntil *time.Time `locationName:"validUntil" type:"timestamp" timestampFormat:"iso8601"` +} + +// String returns the string representation +func (s SpotInstanceRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SpotInstanceRequest) GoString() string { + return s.String() +} + +// Describes a Spot instance state change. +type SpotInstanceStateFault struct { + _ struct{} `type:"structure"` + + // The reason code for the Spot instance state change. + Code *string `locationName:"code" type:"string"` + + // The message for the Spot instance state change. + Message *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s SpotInstanceStateFault) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SpotInstanceStateFault) GoString() string { + return s.String() +} + +// Describes the status of a Spot instance request. +type SpotInstanceStatus struct { + _ struct{} `type:"structure"` + + // The status code. For a list of status codes, see Spot Bid Status Codes (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-bid-status.html#spot-instance-bid-status-understand) + // in the Amazon Elastic Compute Cloud User Guide. + Code *string `locationName:"code" type:"string"` + + // The description for the status code. + Message *string `locationName:"message" type:"string"` + + // The date and time of the most recent status update, in UTC format (for example, + // YYYY-MM-DDTHH:MM:SSZ). + UpdateTime *time.Time `locationName:"updateTime" type:"timestamp" timestampFormat:"iso8601"` +} + +// String returns the string representation +func (s SpotInstanceStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SpotInstanceStatus) GoString() string { + return s.String() +} + +// Describes Spot instance placement. +type SpotPlacement struct { + _ struct{} `type:"structure"` + + // The Availability Zone. + // + // [Spot fleet only] To specify multiple Availability Zones, separate them + // using commas; for example, "us-west-2a, us-west-2b". + AvailabilityZone *string `locationName:"availabilityZone" type:"string"` + + // The name of the placement group (for cluster instances). + GroupName *string `locationName:"groupName" type:"string"` +} + +// String returns the string representation +func (s SpotPlacement) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SpotPlacement) GoString() string { + return s.String() +} + +// Describes the maximum hourly price (bid) for any Spot instance launched to +// fulfill the request. +type SpotPrice struct { + _ struct{} `type:"structure"` + + // The Availability Zone. + AvailabilityZone *string `locationName:"availabilityZone" type:"string"` + + // The instance type. + InstanceType *string `locationName:"instanceType" type:"string" enum:"InstanceType"` + + // A general description of the AMI. + ProductDescription *string `locationName:"productDescription" type:"string" enum:"RIProductDescription"` + + // The maximum price (bid) that you are willing to pay for a Spot instance. + SpotPrice *string `locationName:"spotPrice" type:"string"` + + // The date and time the request was created, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ). + Timestamp *time.Time `locationName:"timestamp" type:"timestamp" timestampFormat:"iso8601"` +} + +// String returns the string representation +func (s SpotPrice) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SpotPrice) GoString() string { + return s.String() +} + +// Describes a stale rule in a security group. +type StaleIpPermission struct { + _ struct{} `type:"structure"` + + // The start of the port range for the TCP and UDP protocols, or an ICMP type + // number. A value of -1 indicates all ICMP types. + FromPort *int64 `locationName:"fromPort" type:"integer"` + + // The IP protocol name (for tcp, udp, and icmp) or number (see Protocol Numbers) + // (http://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml). + IpProtocol *string `locationName:"ipProtocol" type:"string"` + + // One or more IP ranges. Not applicable for stale security group rules. + IpRanges []*string `locationName:"ipRanges" locationNameList:"item" type:"list"` + + // One or more prefix list IDs for an AWS service. Not applicable for stale + // security group rules. + PrefixListIds []*string `locationName:"prefixListIds" locationNameList:"item" type:"list"` + + // The end of the port range for the TCP and UDP protocols, or an ICMP type + // number. A value of -1 indicates all ICMP types. + ToPort *int64 `locationName:"toPort" type:"integer"` + + // One or more security group pairs. Returns the ID of the referenced security + // group and VPC, and the ID and status of the VPC peering connection. + UserIdGroupPairs []*UserIdGroupPair `locationName:"groups" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s StaleIpPermission) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StaleIpPermission) GoString() string { + return s.String() +} + +// Describes a stale security group (a security group that contains stale rules). +type StaleSecurityGroup struct { + _ struct{} `type:"structure"` + + // The description of the security group. + Description *string `locationName:"description" type:"string"` + + // The ID of the security group. + GroupId *string `locationName:"groupId" type:"string" required:"true"` + + // The name of the security group. + GroupName *string `locationName:"groupName" type:"string"` + + // Information about the stale inbound rules in the security group. + StaleIpPermissions []*StaleIpPermission `locationName:"staleIpPermissions" locationNameList:"item" type:"list"` + + // Information about the stale outbound rules in the security group. + StaleIpPermissionsEgress []*StaleIpPermission `locationName:"staleIpPermissionsEgress" locationNameList:"item" type:"list"` + + // The ID of the VPC for the security group. + VpcId *string `locationName:"vpcId" type:"string"` +} + +// String returns the string representation +func (s StaleSecurityGroup) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StaleSecurityGroup) GoString() string { + return s.String() +} + +// Contains the parameters for StartInstances. +type StartInstancesInput struct { + _ struct{} `type:"structure"` + + // Reserved. + AdditionalInfo *string `locationName:"additionalInfo" type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // One or more instance IDs. + InstanceIds []*string `locationName:"InstanceId" locationNameList:"InstanceId" type:"list" required:"true"` +} + +// String returns the string representation +func (s StartInstancesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StartInstancesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *StartInstancesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StartInstancesInput"} + if s.InstanceIds == nil { + invalidParams.Add(request.NewErrParamRequired("InstanceIds")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the output of StartInstances. +type StartInstancesOutput struct { + _ struct{} `type:"structure"` + + // Information about one or more started instances. + StartingInstances []*InstanceStateChange `locationName:"instancesSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s StartInstancesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StartInstancesOutput) GoString() string { + return s.String() +} + +// Describes a state change. +type StateReason struct { + _ struct{} `type:"structure"` + + // The reason code for the state change. + Code *string `locationName:"code" type:"string"` + + // The message for the state change. + // + // Server.SpotInstanceTermination: A Spot instance was terminated due to + // an increase in the market price. + // + // Server.InternalError: An internal error occurred during instance launch, + // resulting in termination. + // + // Server.InsufficientInstanceCapacity: There was insufficient instance + // capacity to satisfy the launch request. + // + // Client.InternalError: A client error caused the instance to terminate + // on launch. + // + // Client.InstanceInitiatedShutdown: The instance was shut down using the + // shutdown -h command from the instance. + // + // Client.UserInitiatedShutdown: The instance was shut down using the Amazon + // EC2 API. + // + // Client.VolumeLimitExceeded: The limit on the number of EBS volumes or + // total storage was exceeded. Decrease usage or request an increase in your + // limits. + // + // Client.InvalidSnapshot.NotFound: The specified snapshot was not found. + Message *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s StateReason) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StateReason) GoString() string { + return s.String() +} + +// Contains the parameters for StopInstances. +type StopInstancesInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // Forces the instances to stop. The instances do not have an opportunity to + // flush file system caches or file system metadata. If you use this option, + // you must perform file system check and repair procedures. This option is + // not recommended for Windows instances. + // + // Default: false + Force *bool `locationName:"force" type:"boolean"` + + // One or more instance IDs. + InstanceIds []*string `locationName:"InstanceId" locationNameList:"InstanceId" type:"list" required:"true"` +} + +// String returns the string representation +func (s StopInstancesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StopInstancesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *StopInstancesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StopInstancesInput"} + if s.InstanceIds == nil { + invalidParams.Add(request.NewErrParamRequired("InstanceIds")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the output of StopInstances. +type StopInstancesOutput struct { + _ struct{} `type:"structure"` + + // Information about one or more stopped instances. + StoppingInstances []*InstanceStateChange `locationName:"instancesSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s StopInstancesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StopInstancesOutput) GoString() string { + return s.String() +} + +// Describes the storage location for an instance store-backed AMI. +type Storage struct { + _ struct{} `type:"structure"` + + // An Amazon S3 storage location. + S3 *S3Storage `type:"structure"` +} + +// String returns the string representation +func (s Storage) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Storage) GoString() string { + return s.String() +} + +// Describes a subnet. +type Subnet struct { + _ struct{} `type:"structure"` + + // The Availability Zone of the subnet. + AvailabilityZone *string `locationName:"availabilityZone" type:"string"` + + // The number of unused IP addresses in the subnet. Note that the IP addresses + // for any stopped instances are considered unavailable. + AvailableIpAddressCount *int64 `locationName:"availableIpAddressCount" type:"integer"` + + // The CIDR block assigned to the subnet. + CidrBlock *string `locationName:"cidrBlock" type:"string"` + + // Indicates whether this is the default subnet for the Availability Zone. + DefaultForAz *bool `locationName:"defaultForAz" type:"boolean"` + + // Indicates whether instances launched in this subnet receive a public IP address. + MapPublicIpOnLaunch *bool `locationName:"mapPublicIpOnLaunch" type:"boolean"` + + // The current state of the subnet. + State *string `locationName:"state" type:"string" enum:"SubnetState"` + + // The ID of the subnet. + SubnetId *string `locationName:"subnetId" type:"string"` + + // Any tags assigned to the subnet. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` + + // The ID of the VPC the subnet is in. + VpcId *string `locationName:"vpcId" type:"string"` +} + +// String returns the string representation +func (s Subnet) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Subnet) GoString() string { + return s.String() +} + +// Describes a tag. +type Tag struct { + _ struct{} `type:"structure"` + + // The key of the tag. + // + // Constraints: Tag keys are case-sensitive and accept a maximum of 127 Unicode + // characters. May not begin with aws: + Key *string `locationName:"key" type:"string"` + + // The value of the tag. + // + // Constraints: Tag values are case-sensitive and accept a maximum of 255 Unicode + // characters. + Value *string `locationName:"value" type:"string"` +} + +// String returns the string representation +func (s Tag) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Tag) GoString() string { + return s.String() +} + +// Describes a tag. +type TagDescription struct { + _ struct{} `type:"structure"` + + // The tag key. + Key *string `locationName:"key" type:"string"` + + // The ID of the resource. For example, ami-1a2b3c4d. + ResourceId *string `locationName:"resourceId" type:"string"` + + // The resource type. + ResourceType *string `locationName:"resourceType" type:"string" enum:"ResourceType"` + + // The tag value. + Value *string `locationName:"value" type:"string"` +} + +// String returns the string representation +func (s TagDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TagDescription) GoString() string { + return s.String() +} + +// Contains the parameters for TerminateInstances. +type TerminateInstancesInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // One or more instance IDs. + InstanceIds []*string `locationName:"InstanceId" locationNameList:"InstanceId" type:"list" required:"true"` +} + +// String returns the string representation +func (s TerminateInstancesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TerminateInstancesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TerminateInstancesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TerminateInstancesInput"} + if s.InstanceIds == nil { + invalidParams.Add(request.NewErrParamRequired("InstanceIds")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the output of TerminateInstances. +type TerminateInstancesOutput struct { + _ struct{} `type:"structure"` + + // Information about one or more terminated instances. + TerminatingInstances []*InstanceStateChange `locationName:"instancesSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s TerminateInstancesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TerminateInstancesOutput) GoString() string { + return s.String() +} + +// Contains the parameters for UnassignPrivateIpAddresses. +type UnassignPrivateIpAddressesInput struct { + _ struct{} `type:"structure"` + + // The ID of the network interface. + NetworkInterfaceId *string `locationName:"networkInterfaceId" type:"string" required:"true"` + + // The secondary private IP addresses to unassign from the network interface. + // You can specify this option multiple times to unassign more than one IP address. + PrivateIpAddresses []*string `locationName:"privateIpAddress" locationNameList:"PrivateIpAddress" type:"list" required:"true"` +} + +// String returns the string representation +func (s UnassignPrivateIpAddressesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UnassignPrivateIpAddressesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UnassignPrivateIpAddressesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UnassignPrivateIpAddressesInput"} + if s.NetworkInterfaceId == nil { + invalidParams.Add(request.NewErrParamRequired("NetworkInterfaceId")) + } + if s.PrivateIpAddresses == nil { + invalidParams.Add(request.NewErrParamRequired("PrivateIpAddresses")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type UnassignPrivateIpAddressesOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UnassignPrivateIpAddressesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UnassignPrivateIpAddressesOutput) GoString() string { + return s.String() +} + +// Contains the parameters for UnmonitorInstances. +type UnmonitorInstancesInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // One or more instance IDs. + InstanceIds []*string `locationName:"InstanceId" locationNameList:"InstanceId" type:"list" required:"true"` +} + +// String returns the string representation +func (s UnmonitorInstancesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UnmonitorInstancesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UnmonitorInstancesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UnmonitorInstancesInput"} + if s.InstanceIds == nil { + invalidParams.Add(request.NewErrParamRequired("InstanceIds")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the output of UnmonitorInstances. +type UnmonitorInstancesOutput struct { + _ struct{} `type:"structure"` + + // Monitoring information for one or more instances. + InstanceMonitorings []*InstanceMonitoring `locationName:"instancesSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s UnmonitorInstancesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UnmonitorInstancesOutput) GoString() string { + return s.String() +} + +// Information about items that were not successfully processed in a batch call. +type UnsuccessfulItem struct { + _ struct{} `type:"structure"` + + // Information about the error. + Error *UnsuccessfulItemError `locationName:"error" type:"structure" required:"true"` + + // The ID of the resource. + ResourceId *string `locationName:"resourceId" type:"string"` +} + +// String returns the string representation +func (s UnsuccessfulItem) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UnsuccessfulItem) GoString() string { + return s.String() +} + +// Information about the error that occurred. For more information about errors, +// see Error Codes (http://docs.aws.amazon.com/AWSEC2/latest/APIReference/errors-overview.html). +type UnsuccessfulItemError struct { + _ struct{} `type:"structure"` + + // The error code. + Code *string `locationName:"code" type:"string" required:"true"` + + // The error message accompanying the error code. + Message *string `locationName:"message" type:"string" required:"true"` +} + +// String returns the string representation +func (s UnsuccessfulItemError) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UnsuccessfulItemError) GoString() string { + return s.String() +} + +// Describes the S3 bucket for the disk image. +type UserBucket struct { + _ struct{} `type:"structure"` + + // The name of the S3 bucket where the disk image is located. + S3Bucket *string `type:"string"` + + // The file name of the disk image. + S3Key *string `type:"string"` +} + +// String returns the string representation +func (s UserBucket) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UserBucket) GoString() string { + return s.String() +} + +// Describes the S3 bucket for the disk image. +type UserBucketDetails struct { + _ struct{} `type:"structure"` + + // The S3 bucket from which the disk image was created. + S3Bucket *string `locationName:"s3Bucket" type:"string"` + + // The file name of the disk image. + S3Key *string `locationName:"s3Key" type:"string"` +} + +// String returns the string representation +func (s UserBucketDetails) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UserBucketDetails) GoString() string { + return s.String() +} + +// Describes the user data for an instance. +type UserData struct { + _ struct{} `type:"structure"` + + // The user data. If you are using an AWS SDK or command line tool, Base64-encoding + // is performed for you, and you can load the text from a file. Otherwise, you + // must provide Base64-encoded text. + Data *string `locationName:"data" type:"string"` +} + +// String returns the string representation +func (s UserData) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UserData) GoString() string { + return s.String() +} + +// Describes a security group and AWS account ID pair. +type UserIdGroupPair struct { + _ struct{} `type:"structure"` + + // The ID of the security group. + GroupId *string `locationName:"groupId" type:"string"` + + // The name of the security group. In a request, use this parameter for a security + // group in EC2-Classic or a default VPC only. For a security group in a nondefault + // VPC, use the security group ID. + GroupName *string `locationName:"groupName" type:"string"` + + // The status of a VPC peering connection, if applicable. + PeeringStatus *string `locationName:"peeringStatus" type:"string"` + + // The ID of an AWS account. For a referenced security group in another VPC, + // the account ID of the referenced security group is returned. + // + // [EC2-Classic] Required when adding or removing rules that reference a security + // group in another AWS account. + UserId *string `locationName:"userId" type:"string"` + + // The ID of the VPC for the referenced security group, if applicable. + VpcId *string `locationName:"vpcId" type:"string"` + + // The ID of the VPC peering connection, if applicable. + VpcPeeringConnectionId *string `locationName:"vpcPeeringConnectionId" type:"string"` +} + +// String returns the string representation +func (s UserIdGroupPair) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UserIdGroupPair) GoString() string { + return s.String() +} + +// Describes telemetry for a VPN tunnel. +type VgwTelemetry struct { + _ struct{} `type:"structure"` + + // The number of accepted routes. + AcceptedRouteCount *int64 `locationName:"acceptedRouteCount" type:"integer"` + + // The date and time of the last change in status. + LastStatusChange *time.Time `locationName:"lastStatusChange" type:"timestamp" timestampFormat:"iso8601"` + + // The Internet-routable IP address of the virtual private gateway's outside + // interface. + OutsideIpAddress *string `locationName:"outsideIpAddress" type:"string"` + + // The status of the VPN tunnel. + Status *string `locationName:"status" type:"string" enum:"TelemetryStatus"` + + // If an error occurs, a description of the error. + StatusMessage *string `locationName:"statusMessage" type:"string"` +} + +// String returns the string representation +func (s VgwTelemetry) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VgwTelemetry) GoString() string { + return s.String() +} + +// Describes a volume. +type Volume struct { + _ struct{} `type:"structure"` + + // Information about the volume attachments. + Attachments []*VolumeAttachment `locationName:"attachmentSet" locationNameList:"item" type:"list"` + + // The Availability Zone for the volume. + AvailabilityZone *string `locationName:"availabilityZone" type:"string"` + + // The time stamp when volume creation was initiated. + CreateTime *time.Time `locationName:"createTime" type:"timestamp" timestampFormat:"iso8601"` + + // Indicates whether the volume will be encrypted. + Encrypted *bool `locationName:"encrypted" type:"boolean"` + + // The number of I/O operations per second (IOPS) that the volume supports. + // For Provisioned IOPS SSD volumes, this represents the number of IOPS that + // are provisioned for the volume. For General Purpose SSD volumes, this represents + // the baseline performance of the volume and the rate at which the volume accumulates + // I/O credits for bursting. For more information on General Purpose SSD baseline + // performance, I/O credits, and bursting, see Amazon EBS Volume Types (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html) + // in the Amazon Elastic Compute Cloud User Guide. + // + // Constraint: Range is 100-20000 IOPS for io1 volumes and 100-10000 IOPS for + // gp2 volumes. + // + // Condition: This parameter is required for requests to create io1 volumes; + // it is not used in requests to create gp2, st1, sc1, or standard volumes. + Iops *int64 `locationName:"iops" type:"integer"` + + // The full ARN of the AWS Key Management Service (AWS KMS) customer master + // key (CMK) that was used to protect the volume encryption key for the volume. + KmsKeyId *string `locationName:"kmsKeyId" type:"string"` + + // The size of the volume, in GiBs. + Size *int64 `locationName:"size" type:"integer"` + + // The snapshot from which the volume was created, if applicable. + SnapshotId *string `locationName:"snapshotId" type:"string"` + + // The volume state. + State *string `locationName:"status" type:"string" enum:"VolumeState"` + + // Any tags assigned to the volume. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` + + // The ID of the volume. + VolumeId *string `locationName:"volumeId" type:"string"` + + // The volume type. This can be gp2 for General Purpose SSD, io1 for Provisioned + // IOPS SSD, st1 for Throughput Optimized HDD, sc1 for Cold HDD, or standard + // for Magnetic volumes. + VolumeType *string `locationName:"volumeType" type:"string" enum:"VolumeType"` +} + +// String returns the string representation +func (s Volume) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Volume) GoString() string { + return s.String() +} + +// Describes volume attachment details. +type VolumeAttachment struct { + _ struct{} `type:"structure"` + + // The time stamp when the attachment initiated. + AttachTime *time.Time `locationName:"attachTime" type:"timestamp" timestampFormat:"iso8601"` + + // Indicates whether the EBS volume is deleted on instance termination. + DeleteOnTermination *bool `locationName:"deleteOnTermination" type:"boolean"` + + // The device name. + Device *string `locationName:"device" type:"string"` + + // The ID of the instance. + InstanceId *string `locationName:"instanceId" type:"string"` + + // The attachment state of the volume. + State *string `locationName:"status" type:"string" enum:"VolumeAttachmentState"` + + // The ID of the volume. + VolumeId *string `locationName:"volumeId" type:"string"` +} + +// String returns the string representation +func (s VolumeAttachment) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VolumeAttachment) GoString() string { + return s.String() +} + +// Describes an EBS volume. +type VolumeDetail struct { + _ struct{} `type:"structure"` + + // The size of the volume, in GiB. + Size *int64 `locationName:"size" type:"long" required:"true"` +} + +// String returns the string representation +func (s VolumeDetail) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VolumeDetail) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *VolumeDetail) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "VolumeDetail"} + if s.Size == nil { + invalidParams.Add(request.NewErrParamRequired("Size")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Describes a volume status operation code. +type VolumeStatusAction struct { + _ struct{} `type:"structure"` + + // The code identifying the operation, for example, enable-volume-io. + Code *string `locationName:"code" type:"string"` + + // A description of the operation. + Description *string `locationName:"description" type:"string"` + + // The ID of the event associated with this operation. + EventId *string `locationName:"eventId" type:"string"` + + // The event type associated with this operation. + EventType *string `locationName:"eventType" type:"string"` +} + +// String returns the string representation +func (s VolumeStatusAction) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VolumeStatusAction) GoString() string { + return s.String() +} + +// Describes a volume status. +type VolumeStatusDetails struct { + _ struct{} `type:"structure"` + + // The name of the volume status. + Name *string `locationName:"name" type:"string" enum:"VolumeStatusName"` + + // The intended status of the volume status. + Status *string `locationName:"status" type:"string"` +} + +// String returns the string representation +func (s VolumeStatusDetails) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VolumeStatusDetails) GoString() string { + return s.String() +} + +// Describes a volume status event. +type VolumeStatusEvent struct { + _ struct{} `type:"structure"` + + // A description of the event. + Description *string `locationName:"description" type:"string"` + + // The ID of this event. + EventId *string `locationName:"eventId" type:"string"` + + // The type of this event. + EventType *string `locationName:"eventType" type:"string"` + + // The latest end time of the event. + NotAfter *time.Time `locationName:"notAfter" type:"timestamp" timestampFormat:"iso8601"` + + // The earliest start time of the event. + NotBefore *time.Time `locationName:"notBefore" type:"timestamp" timestampFormat:"iso8601"` +} + +// String returns the string representation +func (s VolumeStatusEvent) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VolumeStatusEvent) GoString() string { + return s.String() +} + +// Describes the status of a volume. +type VolumeStatusInfo struct { + _ struct{} `type:"structure"` + + // The details of the volume status. + Details []*VolumeStatusDetails `locationName:"details" locationNameList:"item" type:"list"` + + // The status of the volume. + Status *string `locationName:"status" type:"string" enum:"VolumeStatusInfoStatus"` +} + +// String returns the string representation +func (s VolumeStatusInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VolumeStatusInfo) GoString() string { + return s.String() +} + +// Describes the volume status. +type VolumeStatusItem struct { + _ struct{} `type:"structure"` + + // The details of the operation. + Actions []*VolumeStatusAction `locationName:"actionsSet" locationNameList:"item" type:"list"` + + // The Availability Zone of the volume. + AvailabilityZone *string `locationName:"availabilityZone" type:"string"` + + // A list of events associated with the volume. + Events []*VolumeStatusEvent `locationName:"eventsSet" locationNameList:"item" type:"list"` + + // The volume ID. + VolumeId *string `locationName:"volumeId" type:"string"` + + // The volume status. + VolumeStatus *VolumeStatusInfo `locationName:"volumeStatus" type:"structure"` +} + +// String returns the string representation +func (s VolumeStatusItem) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VolumeStatusItem) GoString() string { + return s.String() +} + +// Describes a VPC. +type Vpc struct { + _ struct{} `type:"structure"` + + // The CIDR block for the VPC. + CidrBlock *string `locationName:"cidrBlock" type:"string"` + + // The ID of the set of DHCP options you've associated with the VPC (or default + // if the default options are associated with the VPC). + DhcpOptionsId *string `locationName:"dhcpOptionsId" type:"string"` + + // The allowed tenancy of instances launched into the VPC. + InstanceTenancy *string `locationName:"instanceTenancy" type:"string" enum:"Tenancy"` + + // Indicates whether the VPC is the default VPC. + IsDefault *bool `locationName:"isDefault" type:"boolean"` + + // The current state of the VPC. + State *string `locationName:"state" type:"string" enum:"VpcState"` + + // Any tags assigned to the VPC. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` + + // The ID of the VPC. + VpcId *string `locationName:"vpcId" type:"string"` +} + +// String returns the string representation +func (s Vpc) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Vpc) GoString() string { + return s.String() +} + +// Describes an attachment between a virtual private gateway and a VPC. +type VpcAttachment struct { + _ struct{} `type:"structure"` + + // The current state of the attachment. + State *string `locationName:"state" type:"string" enum:"AttachmentStatus"` + + // The ID of the VPC. + VpcId *string `locationName:"vpcId" type:"string"` +} + +// String returns the string representation +func (s VpcAttachment) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VpcAttachment) GoString() string { + return s.String() +} + +// Describes whether a VPC is enabled for ClassicLink. +type VpcClassicLink struct { + _ struct{} `type:"structure"` + + // Indicates whether the VPC is enabled for ClassicLink. + ClassicLinkEnabled *bool `locationName:"classicLinkEnabled" type:"boolean"` + + // Any tags assigned to the VPC. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` + + // The ID of the VPC. + VpcId *string `locationName:"vpcId" type:"string"` +} + +// String returns the string representation +func (s VpcClassicLink) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VpcClassicLink) GoString() string { + return s.String() +} + +// Describes a VPC endpoint. +type VpcEndpoint struct { + _ struct{} `type:"structure"` + + // The date and time the VPC endpoint was created. + CreationTimestamp *time.Time `locationName:"creationTimestamp" type:"timestamp" timestampFormat:"iso8601"` + + // The policy document associated with the endpoint. + PolicyDocument *string `locationName:"policyDocument" type:"string"` + + // One or more route tables associated with the endpoint. + RouteTableIds []*string `locationName:"routeTableIdSet" locationNameList:"item" type:"list"` + + // The name of the AWS service to which the endpoint is associated. + ServiceName *string `locationName:"serviceName" type:"string"` + + // The state of the VPC endpoint. + State *string `locationName:"state" type:"string" enum:"State"` + + // The ID of the VPC endpoint. + VpcEndpointId *string `locationName:"vpcEndpointId" type:"string"` + + // The ID of the VPC to which the endpoint is associated. + VpcId *string `locationName:"vpcId" type:"string"` +} + +// String returns the string representation +func (s VpcEndpoint) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VpcEndpoint) GoString() string { + return s.String() +} + +// Describes a VPC peering connection. +type VpcPeeringConnection struct { + _ struct{} `type:"structure"` + + // Information about the accepter VPC. CIDR block information is not returned + // when creating a VPC peering connection, or when describing a VPC peering + // connection that's in the initiating-request or pending-acceptance state. + AccepterVpcInfo *VpcPeeringConnectionVpcInfo `locationName:"accepterVpcInfo" type:"structure"` + + // The time that an unaccepted VPC peering connection will expire. + ExpirationTime *time.Time `locationName:"expirationTime" type:"timestamp" timestampFormat:"iso8601"` + + // Information about the requester VPC. + RequesterVpcInfo *VpcPeeringConnectionVpcInfo `locationName:"requesterVpcInfo" type:"structure"` + + // The status of the VPC peering connection. + Status *VpcPeeringConnectionStateReason `locationName:"status" type:"structure"` + + // Any tags assigned to the resource. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` + + // The ID of the VPC peering connection. + VpcPeeringConnectionId *string `locationName:"vpcPeeringConnectionId" type:"string"` +} + +// String returns the string representation +func (s VpcPeeringConnection) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VpcPeeringConnection) GoString() string { + return s.String() +} + +// Describes the VPC peering connection options. +type VpcPeeringConnectionOptionsDescription struct { + _ struct{} `type:"structure"` + + // Indicates whether a local ClassicLink connection can communicate with the + // peer VPC over the VPC peering connection. + AllowEgressFromLocalClassicLinkToRemoteVpc *bool `locationName:"allowEgressFromLocalClassicLinkToRemoteVpc" type:"boolean"` + + // Indicates whether a local VPC can communicate with a ClassicLink connection + // in the peer VPC over the VPC peering connection. + AllowEgressFromLocalVpcToRemoteClassicLink *bool `locationName:"allowEgressFromLocalVpcToRemoteClassicLink" type:"boolean"` +} + +// String returns the string representation +func (s VpcPeeringConnectionOptionsDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VpcPeeringConnectionOptionsDescription) GoString() string { + return s.String() +} + +// Describes the status of a VPC peering connection. +type VpcPeeringConnectionStateReason struct { + _ struct{} `type:"structure"` + + // The status of the VPC peering connection. + Code *string `locationName:"code" type:"string" enum:"VpcPeeringConnectionStateReasonCode"` + + // A message that provides more information about the status, if applicable. + Message *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s VpcPeeringConnectionStateReason) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VpcPeeringConnectionStateReason) GoString() string { + return s.String() +} + +// Describes a VPC in a VPC peering connection. +type VpcPeeringConnectionVpcInfo struct { + _ struct{} `type:"structure"` + + // The CIDR block for the VPC. + CidrBlock *string `locationName:"cidrBlock" type:"string"` + + // The AWS account ID of the VPC owner. + OwnerId *string `locationName:"ownerId" type:"string"` + + // Information about the VPC peering connection options for the accepter or + // requester VPC. + PeeringOptions *VpcPeeringConnectionOptionsDescription `locationName:"peeringOptions" type:"structure"` + + // The ID of the VPC. + VpcId *string `locationName:"vpcId" type:"string"` +} + +// String returns the string representation +func (s VpcPeeringConnectionVpcInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VpcPeeringConnectionVpcInfo) GoString() string { + return s.String() +} + +// Describes a VPN connection. +type VpnConnection struct { + _ struct{} `type:"structure"` + + // The configuration information for the VPN connection's customer gateway (in + // the native XML format). This element is always present in the CreateVpnConnection + // response; however, it's present in the DescribeVpnConnections response only + // if the VPN connection is in the pending or available state. + CustomerGatewayConfiguration *string `locationName:"customerGatewayConfiguration" type:"string"` + + // The ID of the customer gateway at your end of the VPN connection. + CustomerGatewayId *string `locationName:"customerGatewayId" type:"string"` + + // The VPN connection options. + Options *VpnConnectionOptions `locationName:"options" type:"structure"` + + // The static routes associated with the VPN connection. + Routes []*VpnStaticRoute `locationName:"routes" locationNameList:"item" type:"list"` + + // The current state of the VPN connection. + State *string `locationName:"state" type:"string" enum:"VpnState"` + + // Any tags assigned to the VPN connection. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` + + // The type of VPN connection. + Type *string `locationName:"type" type:"string" enum:"GatewayType"` + + // Information about the VPN tunnel. + VgwTelemetry []*VgwTelemetry `locationName:"vgwTelemetry" locationNameList:"item" type:"list"` + + // The ID of the VPN connection. + VpnConnectionId *string `locationName:"vpnConnectionId" type:"string"` + + // The ID of the virtual private gateway at the AWS side of the VPN connection. + VpnGatewayId *string `locationName:"vpnGatewayId" type:"string"` +} + +// String returns the string representation +func (s VpnConnection) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VpnConnection) GoString() string { + return s.String() +} + +// Describes VPN connection options. +type VpnConnectionOptions struct { + _ struct{} `type:"structure"` + + // Indicates whether the VPN connection uses static routes only. Static routes + // must be used for devices that don't support BGP. + StaticRoutesOnly *bool `locationName:"staticRoutesOnly" type:"boolean"` +} + +// String returns the string representation +func (s VpnConnectionOptions) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VpnConnectionOptions) GoString() string { + return s.String() +} + +// Describes VPN connection options. +type VpnConnectionOptionsSpecification struct { + _ struct{} `type:"structure"` + + // Indicates whether the VPN connection uses static routes only. Static routes + // must be used for devices that don't support BGP. + StaticRoutesOnly *bool `locationName:"staticRoutesOnly" type:"boolean"` +} + +// String returns the string representation +func (s VpnConnectionOptionsSpecification) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VpnConnectionOptionsSpecification) GoString() string { + return s.String() +} + +// Describes a virtual private gateway. +type VpnGateway struct { + _ struct{} `type:"structure"` + + // The Availability Zone where the virtual private gateway was created, if applicable. + // This field may be empty or not returned. + AvailabilityZone *string `locationName:"availabilityZone" type:"string"` + + // The current state of the virtual private gateway. + State *string `locationName:"state" type:"string" enum:"VpnState"` + + // Any tags assigned to the virtual private gateway. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` + + // The type of VPN connection the virtual private gateway supports. + Type *string `locationName:"type" type:"string" enum:"GatewayType"` + + // Any VPCs attached to the virtual private gateway. + VpcAttachments []*VpcAttachment `locationName:"attachments" locationNameList:"item" type:"list"` + + // The ID of the virtual private gateway. + VpnGatewayId *string `locationName:"vpnGatewayId" type:"string"` +} + +// String returns the string representation +func (s VpnGateway) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VpnGateway) GoString() string { + return s.String() +} + +// Describes a static route for a VPN connection. +type VpnStaticRoute struct { + _ struct{} `type:"structure"` + + // The CIDR block associated with the local subnet of the customer data center. + DestinationCidrBlock *string `locationName:"destinationCidrBlock" type:"string"` + + // Indicates how the routes were provided. + Source *string `locationName:"source" type:"string" enum:"VpnStaticRouteSource"` + + // The current state of the static route. + State *string `locationName:"state" type:"string" enum:"VpnState"` +} + +// String returns the string representation +func (s VpnStaticRoute) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VpnStaticRoute) GoString() string { + return s.String() +} + +const ( + // @enum AccountAttributeName + AccountAttributeNameSupportedPlatforms = "supported-platforms" + // @enum AccountAttributeName + AccountAttributeNameDefaultVpc = "default-vpc" +) + +const ( + // @enum Affinity + AffinityDefault = "default" + // @enum Affinity + AffinityHost = "host" +) + +const ( + // @enum AllocationState + AllocationStateAvailable = "available" + // @enum AllocationState + AllocationStateUnderAssessment = "under-assessment" + // @enum AllocationState + AllocationStatePermanentFailure = "permanent-failure" + // @enum AllocationState + AllocationStateReleased = "released" + // @enum AllocationState + AllocationStateReleasedPermanentFailure = "released-permanent-failure" +) + +const ( + // @enum AllocationStrategy + AllocationStrategyLowestPrice = "lowestPrice" + // @enum AllocationStrategy + AllocationStrategyDiversified = "diversified" +) + +const ( + // @enum ArchitectureValues + ArchitectureValuesI386 = "i386" + // @enum ArchitectureValues + ArchitectureValuesX8664 = "x86_64" +) + +const ( + // @enum AttachmentStatus + AttachmentStatusAttaching = "attaching" + // @enum AttachmentStatus + AttachmentStatusAttached = "attached" + // @enum AttachmentStatus + AttachmentStatusDetaching = "detaching" + // @enum AttachmentStatus + AttachmentStatusDetached = "detached" +) + +const ( + // @enum AutoPlacement + AutoPlacementOn = "on" + // @enum AutoPlacement + AutoPlacementOff = "off" +) + +const ( + // @enum AvailabilityZoneState + AvailabilityZoneStateAvailable = "available" + // @enum AvailabilityZoneState + AvailabilityZoneStateInformation = "information" + // @enum AvailabilityZoneState + AvailabilityZoneStateImpaired = "impaired" + // @enum AvailabilityZoneState + AvailabilityZoneStateUnavailable = "unavailable" +) + +const ( + // @enum BatchState + BatchStateSubmitted = "submitted" + // @enum BatchState + BatchStateActive = "active" + // @enum BatchState + BatchStateCancelled = "cancelled" + // @enum BatchState + BatchStateFailed = "failed" + // @enum BatchState + BatchStateCancelledRunning = "cancelled_running" + // @enum BatchState + BatchStateCancelledTerminating = "cancelled_terminating" + // @enum BatchState + BatchStateModifying = "modifying" +) + +const ( + // @enum BundleTaskState + BundleTaskStatePending = "pending" + // @enum BundleTaskState + BundleTaskStateWaitingForShutdown = "waiting-for-shutdown" + // @enum BundleTaskState + BundleTaskStateBundling = "bundling" + // @enum BundleTaskState + BundleTaskStateStoring = "storing" + // @enum BundleTaskState + BundleTaskStateCancelling = "cancelling" + // @enum BundleTaskState + BundleTaskStateComplete = "complete" + // @enum BundleTaskState + BundleTaskStateFailed = "failed" +) + +const ( + // @enum CancelBatchErrorCode + CancelBatchErrorCodeFleetRequestIdDoesNotExist = "fleetRequestIdDoesNotExist" + // @enum CancelBatchErrorCode + CancelBatchErrorCodeFleetRequestIdMalformed = "fleetRequestIdMalformed" + // @enum CancelBatchErrorCode + CancelBatchErrorCodeFleetRequestNotInCancellableState = "fleetRequestNotInCancellableState" + // @enum CancelBatchErrorCode + CancelBatchErrorCodeUnexpectedError = "unexpectedError" +) + +const ( + // @enum CancelSpotInstanceRequestState + CancelSpotInstanceRequestStateActive = "active" + // @enum CancelSpotInstanceRequestState + CancelSpotInstanceRequestStateOpen = "open" + // @enum CancelSpotInstanceRequestState + CancelSpotInstanceRequestStateClosed = "closed" + // @enum CancelSpotInstanceRequestState + CancelSpotInstanceRequestStateCancelled = "cancelled" + // @enum CancelSpotInstanceRequestState + CancelSpotInstanceRequestStateCompleted = "completed" +) + +const ( + // @enum ContainerFormat + ContainerFormatOva = "ova" +) + +const ( + // @enum ConversionTaskState + ConversionTaskStateActive = "active" + // @enum ConversionTaskState + ConversionTaskStateCancelling = "cancelling" + // @enum ConversionTaskState + ConversionTaskStateCancelled = "cancelled" + // @enum ConversionTaskState + ConversionTaskStateCompleted = "completed" +) + +const ( + // @enum CurrencyCodeValues + CurrencyCodeValuesUsd = "USD" +) + +const ( + // @enum DatafeedSubscriptionState + DatafeedSubscriptionStateActive = "Active" + // @enum DatafeedSubscriptionState + DatafeedSubscriptionStateInactive = "Inactive" +) + +const ( + // @enum DeviceType + DeviceTypeEbs = "ebs" + // @enum DeviceType + DeviceTypeInstanceStore = "instance-store" +) + +const ( + // @enum DiskImageFormat + DiskImageFormatVmdk = "VMDK" + // @enum DiskImageFormat + DiskImageFormatRaw = "RAW" + // @enum DiskImageFormat + DiskImageFormatVhd = "VHD" +) + +const ( + // @enum DomainType + DomainTypeVpc = "vpc" + // @enum DomainType + DomainTypeStandard = "standard" +) + +const ( + // @enum EventCode + EventCodeInstanceReboot = "instance-reboot" + // @enum EventCode + EventCodeSystemReboot = "system-reboot" + // @enum EventCode + EventCodeSystemMaintenance = "system-maintenance" + // @enum EventCode + EventCodeInstanceRetirement = "instance-retirement" + // @enum EventCode + EventCodeInstanceStop = "instance-stop" +) + +const ( + // @enum EventType + EventTypeInstanceChange = "instanceChange" + // @enum EventType + EventTypeFleetRequestChange = "fleetRequestChange" + // @enum EventType + EventTypeError = "error" +) + +const ( + // @enum ExcessCapacityTerminationPolicy + ExcessCapacityTerminationPolicyNoTermination = "noTermination" + // @enum ExcessCapacityTerminationPolicy + ExcessCapacityTerminationPolicyDefault = "default" +) + +const ( + // @enum ExportEnvironment + ExportEnvironmentCitrix = "citrix" + // @enum ExportEnvironment + ExportEnvironmentVmware = "vmware" + // @enum ExportEnvironment + ExportEnvironmentMicrosoft = "microsoft" +) + +const ( + // @enum ExportTaskState + ExportTaskStateActive = "active" + // @enum ExportTaskState + ExportTaskStateCancelling = "cancelling" + // @enum ExportTaskState + ExportTaskStateCancelled = "cancelled" + // @enum ExportTaskState + ExportTaskStateCompleted = "completed" +) + +const ( + // @enum FleetType + FleetTypeRequest = "request" + // @enum FleetType + FleetTypeMaintain = "maintain" +) + +const ( + // @enum FlowLogsResourceType + FlowLogsResourceTypeVpc = "VPC" + // @enum FlowLogsResourceType + FlowLogsResourceTypeSubnet = "Subnet" + // @enum FlowLogsResourceType + FlowLogsResourceTypeNetworkInterface = "NetworkInterface" +) + +const ( + // @enum GatewayType + GatewayTypeIpsec1 = "ipsec.1" +) + +const ( + // @enum HostTenancy + HostTenancyDedicated = "dedicated" + // @enum HostTenancy + HostTenancyHost = "host" +) + +const ( + // @enum HypervisorType + HypervisorTypeOvm = "ovm" + // @enum HypervisorType + HypervisorTypeXen = "xen" +) + +const ( + // @enum ImageAttributeName + ImageAttributeNameDescription = "description" + // @enum ImageAttributeName + ImageAttributeNameKernel = "kernel" + // @enum ImageAttributeName + ImageAttributeNameRamdisk = "ramdisk" + // @enum ImageAttributeName + ImageAttributeNameLaunchPermission = "launchPermission" + // @enum ImageAttributeName + ImageAttributeNameProductCodes = "productCodes" + // @enum ImageAttributeName + ImageAttributeNameBlockDeviceMapping = "blockDeviceMapping" + // @enum ImageAttributeName + ImageAttributeNameSriovNetSupport = "sriovNetSupport" +) + +const ( + // @enum ImageState + ImageStatePending = "pending" + // @enum ImageState + ImageStateAvailable = "available" + // @enum ImageState + ImageStateInvalid = "invalid" + // @enum ImageState + ImageStateDeregistered = "deregistered" + // @enum ImageState + ImageStateTransient = "transient" + // @enum ImageState + ImageStateFailed = "failed" + // @enum ImageState + ImageStateError = "error" +) + +const ( + // @enum ImageTypeValues + ImageTypeValuesMachine = "machine" + // @enum ImageTypeValues + ImageTypeValuesKernel = "kernel" + // @enum ImageTypeValues + ImageTypeValuesRamdisk = "ramdisk" +) + +const ( + // @enum InstanceAttributeName + InstanceAttributeNameInstanceType = "instanceType" + // @enum InstanceAttributeName + InstanceAttributeNameKernel = "kernel" + // @enum InstanceAttributeName + InstanceAttributeNameRamdisk = "ramdisk" + // @enum InstanceAttributeName + InstanceAttributeNameUserData = "userData" + // @enum InstanceAttributeName + InstanceAttributeNameDisableApiTermination = "disableApiTermination" + // @enum InstanceAttributeName + InstanceAttributeNameInstanceInitiatedShutdownBehavior = "instanceInitiatedShutdownBehavior" + // @enum InstanceAttributeName + InstanceAttributeNameRootDeviceName = "rootDeviceName" + // @enum InstanceAttributeName + InstanceAttributeNameBlockDeviceMapping = "blockDeviceMapping" + // @enum InstanceAttributeName + InstanceAttributeNameProductCodes = "productCodes" + // @enum InstanceAttributeName + InstanceAttributeNameSourceDestCheck = "sourceDestCheck" + // @enum InstanceAttributeName + InstanceAttributeNameGroupSet = "groupSet" + // @enum InstanceAttributeName + InstanceAttributeNameEbsOptimized = "ebsOptimized" + // @enum InstanceAttributeName + InstanceAttributeNameSriovNetSupport = "sriovNetSupport" + // @enum InstanceAttributeName + InstanceAttributeNameEnaSupport = "enaSupport" +) + +const ( + // @enum InstanceLifecycleType + InstanceLifecycleTypeSpot = "spot" + // @enum InstanceLifecycleType + InstanceLifecycleTypeScheduled = "scheduled" +) + +const ( + // @enum InstanceStateName + InstanceStateNamePending = "pending" + // @enum InstanceStateName + InstanceStateNameRunning = "running" + // @enum InstanceStateName + InstanceStateNameShuttingDown = "shutting-down" + // @enum InstanceStateName + InstanceStateNameTerminated = "terminated" + // @enum InstanceStateName + InstanceStateNameStopping = "stopping" + // @enum InstanceStateName + InstanceStateNameStopped = "stopped" +) + +const ( + // @enum InstanceType + InstanceTypeT1Micro = "t1.micro" + // @enum InstanceType + InstanceTypeT2Nano = "t2.nano" + // @enum InstanceType + InstanceTypeT2Micro = "t2.micro" + // @enum InstanceType + InstanceTypeT2Small = "t2.small" + // @enum InstanceType + InstanceTypeT2Medium = "t2.medium" + // @enum InstanceType + InstanceTypeT2Large = "t2.large" + // @enum InstanceType + InstanceTypeM1Small = "m1.small" + // @enum InstanceType + InstanceTypeM1Medium = "m1.medium" + // @enum InstanceType + InstanceTypeM1Large = "m1.large" + // @enum InstanceType + InstanceTypeM1Xlarge = "m1.xlarge" + // @enum InstanceType + InstanceTypeM3Medium = "m3.medium" + // @enum InstanceType + InstanceTypeM3Large = "m3.large" + // @enum InstanceType + InstanceTypeM3Xlarge = "m3.xlarge" + // @enum InstanceType + InstanceTypeM32xlarge = "m3.2xlarge" + // @enum InstanceType + InstanceTypeM4Large = "m4.large" + // @enum InstanceType + InstanceTypeM4Xlarge = "m4.xlarge" + // @enum InstanceType + InstanceTypeM42xlarge = "m4.2xlarge" + // @enum InstanceType + InstanceTypeM44xlarge = "m4.4xlarge" + // @enum InstanceType + InstanceTypeM410xlarge = "m4.10xlarge" + // @enum InstanceType + InstanceTypeM2Xlarge = "m2.xlarge" + // @enum InstanceType + InstanceTypeM22xlarge = "m2.2xlarge" + // @enum InstanceType + InstanceTypeM24xlarge = "m2.4xlarge" + // @enum InstanceType + InstanceTypeCr18xlarge = "cr1.8xlarge" + // @enum InstanceType + InstanceTypeR3Large = "r3.large" + // @enum InstanceType + InstanceTypeR3Xlarge = "r3.xlarge" + // @enum InstanceType + InstanceTypeR32xlarge = "r3.2xlarge" + // @enum InstanceType + InstanceTypeR34xlarge = "r3.4xlarge" + // @enum InstanceType + InstanceTypeR38xlarge = "r3.8xlarge" + // @enum InstanceType + InstanceTypeX14xlarge = "x1.4xlarge" + // @enum InstanceType + InstanceTypeX18xlarge = "x1.8xlarge" + // @enum InstanceType + InstanceTypeX116xlarge = "x1.16xlarge" + // @enum InstanceType + InstanceTypeX132xlarge = "x1.32xlarge" + // @enum InstanceType + InstanceTypeI2Xlarge = "i2.xlarge" + // @enum InstanceType + InstanceTypeI22xlarge = "i2.2xlarge" + // @enum InstanceType + InstanceTypeI24xlarge = "i2.4xlarge" + // @enum InstanceType + InstanceTypeI28xlarge = "i2.8xlarge" + // @enum InstanceType + InstanceTypeHi14xlarge = "hi1.4xlarge" + // @enum InstanceType + InstanceTypeHs18xlarge = "hs1.8xlarge" + // @enum InstanceType + InstanceTypeC1Medium = "c1.medium" + // @enum InstanceType + InstanceTypeC1Xlarge = "c1.xlarge" + // @enum InstanceType + InstanceTypeC3Large = "c3.large" + // @enum InstanceType + InstanceTypeC3Xlarge = "c3.xlarge" + // @enum InstanceType + InstanceTypeC32xlarge = "c3.2xlarge" + // @enum InstanceType + InstanceTypeC34xlarge = "c3.4xlarge" + // @enum InstanceType + InstanceTypeC38xlarge = "c3.8xlarge" + // @enum InstanceType + InstanceTypeC4Large = "c4.large" + // @enum InstanceType + InstanceTypeC4Xlarge = "c4.xlarge" + // @enum InstanceType + InstanceTypeC42xlarge = "c4.2xlarge" + // @enum InstanceType + InstanceTypeC44xlarge = "c4.4xlarge" + // @enum InstanceType + InstanceTypeC48xlarge = "c4.8xlarge" + // @enum InstanceType + InstanceTypeCc14xlarge = "cc1.4xlarge" + // @enum InstanceType + InstanceTypeCc28xlarge = "cc2.8xlarge" + // @enum InstanceType + InstanceTypeG22xlarge = "g2.2xlarge" + // @enum InstanceType + InstanceTypeG28xlarge = "g2.8xlarge" + // @enum InstanceType + InstanceTypeCg14xlarge = "cg1.4xlarge" + // @enum InstanceType + InstanceTypeD2Xlarge = "d2.xlarge" + // @enum InstanceType + InstanceTypeD22xlarge = "d2.2xlarge" + // @enum InstanceType + InstanceTypeD24xlarge = "d2.4xlarge" + // @enum InstanceType + InstanceTypeD28xlarge = "d2.8xlarge" +) + +const ( + // @enum ListingState + ListingStateAvailable = "available" + // @enum ListingState + ListingStateSold = "sold" + // @enum ListingState + ListingStateCancelled = "cancelled" + // @enum ListingState + ListingStatePending = "pending" +) + +const ( + // @enum ListingStatus + ListingStatusActive = "active" + // @enum ListingStatus + ListingStatusPending = "pending" + // @enum ListingStatus + ListingStatusCancelled = "cancelled" + // @enum ListingStatus + ListingStatusClosed = "closed" +) + +const ( + // @enum MonitoringState + MonitoringStateDisabled = "disabled" + // @enum MonitoringState + MonitoringStateDisabling = "disabling" + // @enum MonitoringState + MonitoringStateEnabled = "enabled" + // @enum MonitoringState + MonitoringStatePending = "pending" +) + +const ( + // @enum MoveStatus + MoveStatusMovingToVpc = "movingToVpc" + // @enum MoveStatus + MoveStatusRestoringToClassic = "restoringToClassic" +) + +const ( + // @enum NatGatewayState + NatGatewayStatePending = "pending" + // @enum NatGatewayState + NatGatewayStateFailed = "failed" + // @enum NatGatewayState + NatGatewayStateAvailable = "available" + // @enum NatGatewayState + NatGatewayStateDeleting = "deleting" + // @enum NatGatewayState + NatGatewayStateDeleted = "deleted" +) + +const ( + // @enum NetworkInterfaceAttribute + NetworkInterfaceAttributeDescription = "description" + // @enum NetworkInterfaceAttribute + NetworkInterfaceAttributeGroupSet = "groupSet" + // @enum NetworkInterfaceAttribute + NetworkInterfaceAttributeSourceDestCheck = "sourceDestCheck" + // @enum NetworkInterfaceAttribute + NetworkInterfaceAttributeAttachment = "attachment" +) + +const ( + // @enum NetworkInterfaceStatus + NetworkInterfaceStatusAvailable = "available" + // @enum NetworkInterfaceStatus + NetworkInterfaceStatusAttaching = "attaching" + // @enum NetworkInterfaceStatus + NetworkInterfaceStatusInUse = "in-use" + // @enum NetworkInterfaceStatus + NetworkInterfaceStatusDetaching = "detaching" +) + +const ( + // @enum NetworkInterfaceType + NetworkInterfaceTypeInterface = "interface" + // @enum NetworkInterfaceType + NetworkInterfaceTypeNatGateway = "natGateway" +) + +const ( + // @enum OfferingTypeValues + OfferingTypeValuesHeavyUtilization = "Heavy Utilization" + // @enum OfferingTypeValues + OfferingTypeValuesMediumUtilization = "Medium Utilization" + // @enum OfferingTypeValues + OfferingTypeValuesLightUtilization = "Light Utilization" + // @enum OfferingTypeValues + OfferingTypeValuesNoUpfront = "No Upfront" + // @enum OfferingTypeValues + OfferingTypeValuesPartialUpfront = "Partial Upfront" + // @enum OfferingTypeValues + OfferingTypeValuesAllUpfront = "All Upfront" +) + +const ( + // @enum OperationType + OperationTypeAdd = "add" + // @enum OperationType + OperationTypeRemove = "remove" +) + +const ( + // @enum PermissionGroup + PermissionGroupAll = "all" +) + +const ( + // @enum PlacementGroupState + PlacementGroupStatePending = "pending" + // @enum PlacementGroupState + PlacementGroupStateAvailable = "available" + // @enum PlacementGroupState + PlacementGroupStateDeleting = "deleting" + // @enum PlacementGroupState + PlacementGroupStateDeleted = "deleted" +) + +const ( + // @enum PlacementStrategy + PlacementStrategyCluster = "cluster" +) + +const ( + // @enum PlatformValues + PlatformValuesWindows = "Windows" +) + +const ( + // @enum ProductCodeValues + ProductCodeValuesDevpay = "devpay" + // @enum ProductCodeValues + ProductCodeValuesMarketplace = "marketplace" +) + +const ( + // @enum RIProductDescription + RIProductDescriptionLinuxUnix = "Linux/UNIX" + // @enum RIProductDescription + RIProductDescriptionLinuxUnixamazonVpc = "Linux/UNIX (Amazon VPC)" + // @enum RIProductDescription + RIProductDescriptionWindows = "Windows" + // @enum RIProductDescription + RIProductDescriptionWindowsAmazonVpc = "Windows (Amazon VPC)" +) + +const ( + // @enum RecurringChargeFrequency + RecurringChargeFrequencyHourly = "Hourly" +) + +const ( + // @enum ReportInstanceReasonCodes + ReportInstanceReasonCodesInstanceStuckInState = "instance-stuck-in-state" + // @enum ReportInstanceReasonCodes + ReportInstanceReasonCodesUnresponsive = "unresponsive" + // @enum ReportInstanceReasonCodes + ReportInstanceReasonCodesNotAcceptingCredentials = "not-accepting-credentials" + // @enum ReportInstanceReasonCodes + ReportInstanceReasonCodesPasswordNotAvailable = "password-not-available" + // @enum ReportInstanceReasonCodes + ReportInstanceReasonCodesPerformanceNetwork = "performance-network" + // @enum ReportInstanceReasonCodes + ReportInstanceReasonCodesPerformanceInstanceStore = "performance-instance-store" + // @enum ReportInstanceReasonCodes + ReportInstanceReasonCodesPerformanceEbsVolume = "performance-ebs-volume" + // @enum ReportInstanceReasonCodes + ReportInstanceReasonCodesPerformanceOther = "performance-other" + // @enum ReportInstanceReasonCodes + ReportInstanceReasonCodesOther = "other" +) + +const ( + // @enum ReportStatusType + ReportStatusTypeOk = "ok" + // @enum ReportStatusType + ReportStatusTypeImpaired = "impaired" +) + +const ( + // @enum ReservedInstanceState + ReservedInstanceStatePaymentPending = "payment-pending" + // @enum ReservedInstanceState + ReservedInstanceStateActive = "active" + // @enum ReservedInstanceState + ReservedInstanceStatePaymentFailed = "payment-failed" + // @enum ReservedInstanceState + ReservedInstanceStateRetired = "retired" +) + +const ( + // @enum ResetImageAttributeName + ResetImageAttributeNameLaunchPermission = "launchPermission" +) + +const ( + // @enum ResourceType + ResourceTypeCustomerGateway = "customer-gateway" + // @enum ResourceType + ResourceTypeDhcpOptions = "dhcp-options" + // @enum ResourceType + ResourceTypeImage = "image" + // @enum ResourceType + ResourceTypeInstance = "instance" + // @enum ResourceType + ResourceTypeInternetGateway = "internet-gateway" + // @enum ResourceType + ResourceTypeNetworkAcl = "network-acl" + // @enum ResourceType + ResourceTypeNetworkInterface = "network-interface" + // @enum ResourceType + ResourceTypeReservedInstances = "reserved-instances" + // @enum ResourceType + ResourceTypeRouteTable = "route-table" + // @enum ResourceType + ResourceTypeSnapshot = "snapshot" + // @enum ResourceType + ResourceTypeSpotInstancesRequest = "spot-instances-request" + // @enum ResourceType + ResourceTypeSubnet = "subnet" + // @enum ResourceType + ResourceTypeSecurityGroup = "security-group" + // @enum ResourceType + ResourceTypeVolume = "volume" + // @enum ResourceType + ResourceTypeVpc = "vpc" + // @enum ResourceType + ResourceTypeVpnConnection = "vpn-connection" + // @enum ResourceType + ResourceTypeVpnGateway = "vpn-gateway" +) + +const ( + // @enum RouteOrigin + RouteOriginCreateRouteTable = "CreateRouteTable" + // @enum RouteOrigin + RouteOriginCreateRoute = "CreateRoute" + // @enum RouteOrigin + RouteOriginEnableVgwRoutePropagation = "EnableVgwRoutePropagation" +) + +const ( + // @enum RouteState + RouteStateActive = "active" + // @enum RouteState + RouteStateBlackhole = "blackhole" +) + +const ( + // @enum RuleAction + RuleActionAllow = "allow" + // @enum RuleAction + RuleActionDeny = "deny" +) + +const ( + // @enum ShutdownBehavior + ShutdownBehaviorStop = "stop" + // @enum ShutdownBehavior + ShutdownBehaviorTerminate = "terminate" +) + +const ( + // @enum SnapshotAttributeName + SnapshotAttributeNameProductCodes = "productCodes" + // @enum SnapshotAttributeName + SnapshotAttributeNameCreateVolumePermission = "createVolumePermission" +) + +const ( + // @enum SnapshotState + SnapshotStatePending = "pending" + // @enum SnapshotState + SnapshotStateCompleted = "completed" + // @enum SnapshotState + SnapshotStateError = "error" +) + +const ( + // @enum SpotInstanceState + SpotInstanceStateOpen = "open" + // @enum SpotInstanceState + SpotInstanceStateActive = "active" + // @enum SpotInstanceState + SpotInstanceStateClosed = "closed" + // @enum SpotInstanceState + SpotInstanceStateCancelled = "cancelled" + // @enum SpotInstanceState + SpotInstanceStateFailed = "failed" +) + +const ( + // @enum SpotInstanceType + SpotInstanceTypeOneTime = "one-time" + // @enum SpotInstanceType + SpotInstanceTypePersistent = "persistent" +) + +const ( + // @enum State + StatePending = "Pending" + // @enum State + StateAvailable = "Available" + // @enum State + StateDeleting = "Deleting" + // @enum State + StateDeleted = "Deleted" +) + +const ( + // @enum Status + StatusMoveInProgress = "MoveInProgress" + // @enum Status + StatusInVpc = "InVpc" + // @enum Status + StatusInClassic = "InClassic" +) + +const ( + // @enum StatusName + StatusNameReachability = "reachability" +) + +const ( + // @enum StatusType + StatusTypePassed = "passed" + // @enum StatusType + StatusTypeFailed = "failed" + // @enum StatusType + StatusTypeInsufficientData = "insufficient-data" + // @enum StatusType + StatusTypeInitializing = "initializing" +) + +const ( + // @enum SubnetState + SubnetStatePending = "pending" + // @enum SubnetState + SubnetStateAvailable = "available" +) + +const ( + // @enum SummaryStatus + SummaryStatusOk = "ok" + // @enum SummaryStatus + SummaryStatusImpaired = "impaired" + // @enum SummaryStatus + SummaryStatusInsufficientData = "insufficient-data" + // @enum SummaryStatus + SummaryStatusNotApplicable = "not-applicable" + // @enum SummaryStatus + SummaryStatusInitializing = "initializing" +) + +const ( + // @enum TelemetryStatus + TelemetryStatusUp = "UP" + // @enum TelemetryStatus + TelemetryStatusDown = "DOWN" +) + +const ( + // @enum Tenancy + TenancyDefault = "default" + // @enum Tenancy + TenancyDedicated = "dedicated" + // @enum Tenancy + TenancyHost = "host" +) + +const ( + // @enum TrafficType + TrafficTypeAccept = "ACCEPT" + // @enum TrafficType + TrafficTypeReject = "REJECT" + // @enum TrafficType + TrafficTypeAll = "ALL" +) + +const ( + // @enum VirtualizationType + VirtualizationTypeHvm = "hvm" + // @enum VirtualizationType + VirtualizationTypeParavirtual = "paravirtual" +) + +const ( + // @enum VolumeAttachmentState + VolumeAttachmentStateAttaching = "attaching" + // @enum VolumeAttachmentState + VolumeAttachmentStateAttached = "attached" + // @enum VolumeAttachmentState + VolumeAttachmentStateDetaching = "detaching" + // @enum VolumeAttachmentState + VolumeAttachmentStateDetached = "detached" +) + +const ( + // @enum VolumeAttributeName + VolumeAttributeNameAutoEnableIo = "autoEnableIO" + // @enum VolumeAttributeName + VolumeAttributeNameProductCodes = "productCodes" +) + +const ( + // @enum VolumeState + VolumeStateCreating = "creating" + // @enum VolumeState + VolumeStateAvailable = "available" + // @enum VolumeState + VolumeStateInUse = "in-use" + // @enum VolumeState + VolumeStateDeleting = "deleting" + // @enum VolumeState + VolumeStateDeleted = "deleted" + // @enum VolumeState + VolumeStateError = "error" +) + +const ( + // @enum VolumeStatusInfoStatus + VolumeStatusInfoStatusOk = "ok" + // @enum VolumeStatusInfoStatus + VolumeStatusInfoStatusImpaired = "impaired" + // @enum VolumeStatusInfoStatus + VolumeStatusInfoStatusInsufficientData = "insufficient-data" +) + +const ( + // @enum VolumeStatusName + VolumeStatusNameIoEnabled = "io-enabled" + // @enum VolumeStatusName + VolumeStatusNameIoPerformance = "io-performance" +) + +const ( + // @enum VolumeType + VolumeTypeStandard = "standard" + // @enum VolumeType + VolumeTypeIo1 = "io1" + // @enum VolumeType + VolumeTypeGp2 = "gp2" + // @enum VolumeType + VolumeTypeSc1 = "sc1" + // @enum VolumeType + VolumeTypeSt1 = "st1" +) + +const ( + // @enum VpcAttributeName + VpcAttributeNameEnableDnsSupport = "enableDnsSupport" + // @enum VpcAttributeName + VpcAttributeNameEnableDnsHostnames = "enableDnsHostnames" +) + +const ( + // @enum VpcPeeringConnectionStateReasonCode + VpcPeeringConnectionStateReasonCodeInitiatingRequest = "initiating-request" + // @enum VpcPeeringConnectionStateReasonCode + VpcPeeringConnectionStateReasonCodePendingAcceptance = "pending-acceptance" + // @enum VpcPeeringConnectionStateReasonCode + VpcPeeringConnectionStateReasonCodeActive = "active" + // @enum VpcPeeringConnectionStateReasonCode + VpcPeeringConnectionStateReasonCodeDeleted = "deleted" + // @enum VpcPeeringConnectionStateReasonCode + VpcPeeringConnectionStateReasonCodeRejected = "rejected" + // @enum VpcPeeringConnectionStateReasonCode + VpcPeeringConnectionStateReasonCodeFailed = "failed" + // @enum VpcPeeringConnectionStateReasonCode + VpcPeeringConnectionStateReasonCodeExpired = "expired" + // @enum VpcPeeringConnectionStateReasonCode + VpcPeeringConnectionStateReasonCodeProvisioning = "provisioning" + // @enum VpcPeeringConnectionStateReasonCode + VpcPeeringConnectionStateReasonCodeDeleting = "deleting" +) + +const ( + // @enum VpcState + VpcStatePending = "pending" + // @enum VpcState + VpcStateAvailable = "available" +) + +const ( + // @enum VpnState + VpnStatePending = "pending" + // @enum VpnState + VpnStateAvailable = "available" + // @enum VpnState + VpnStateDeleting = "deleting" + // @enum VpnState + VpnStateDeleted = "deleted" +) + +const ( + // @enum VpnStaticRouteSource + VpnStaticRouteSourceStatic = "Static" +) diff --git a/vendor/github.com/aws/aws-sdk-go/service/ec2/customizations.go b/vendor/github.com/aws/aws-sdk-go/service/ec2/customizations.go new file mode 100644 index 000000000..9e94fe671 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/ec2/customizations.go @@ -0,0 +1,55 @@ +package ec2 + +import ( + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/endpoints" +) + +func init() { + initRequest = func(r *request.Request) { + if r.Operation.Name == opCopySnapshot { // fill the PresignedURL parameter + r.Handlers.Build.PushFront(fillPresignedURL) + } + } +} + +func fillPresignedURL(r *request.Request) { + if !r.ParamsFilled() { + return + } + + origParams := r.Params.(*CopySnapshotInput) + + // Stop if PresignedURL/DestinationRegion is set + if origParams.PresignedUrl != nil || origParams.DestinationRegion != nil { + return + } + + origParams.DestinationRegion = r.Config.Region + newParams := awsutil.CopyOf(r.Params).(*CopySnapshotInput) + + // Create a new request based on the existing request. We will use this to + // presign the CopySnapshot request against the source region. + cfg := r.Config.Copy(aws.NewConfig(). + WithEndpoint(""). + WithRegion(aws.StringValue(origParams.SourceRegion))) + + clientInfo := r.ClientInfo + clientInfo.Endpoint, clientInfo.SigningRegion = endpoints.EndpointForRegion( + clientInfo.ServiceName, aws.StringValue(cfg.Region), aws.BoolValue(cfg.DisableSSL)) + + // Presign a CopySnapshot request with modified params + req := request.New(*cfg, clientInfo, r.Handlers, r.Retryer, r.Operation, newParams, r.Data) + url, err := req.Presign(5 * time.Minute) // 5 minutes should be enough. + if err != nil { // bubble error back up to original request + r.Error = err + return + } + + // We have our URL, set it on params + origParams.PresignedUrl = &url +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/ec2/customizations_test.go b/vendor/github.com/aws/aws-sdk-go/service/ec2/customizations_test.go new file mode 100644 index 000000000..195d9b55b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/ec2/customizations_test.go @@ -0,0 +1,35 @@ +package ec2_test + +import ( + "io/ioutil" + "net/url" + "testing" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/awstesting/unit" + "github.com/aws/aws-sdk-go/service/ec2" + "github.com/stretchr/testify/assert" +) + +func TestCopySnapshotPresignedURL(t *testing.T) { + svc := ec2.New(unit.Session, &aws.Config{Region: aws.String("us-west-2")}) + + assert.NotPanics(t, func() { + // Doesn't panic on nil input + req, _ := svc.CopySnapshotRequest(nil) + req.Sign() + }) + + req, _ := svc.CopySnapshotRequest(&ec2.CopySnapshotInput{ + SourceRegion: aws.String("us-west-1"), + SourceSnapshotId: aws.String("snap-id"), + }) + req.Sign() + + b, _ := ioutil.ReadAll(req.HTTPRequest.Body) + q, _ := url.ParseQuery(string(b)) + u, _ := url.QueryUnescape(q.Get("PresignedUrl")) + assert.Equal(t, "us-west-2", q.Get("DestinationRegion")) + assert.Equal(t, "us-west-1", q.Get("SourceRegion")) + assert.Regexp(t, `^https://ec2\.us-west-1\.amazonaws\.com/.+&DestinationRegion=us-west-2`, u) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/ec2/ec2iface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/ec2/ec2iface/interface.go new file mode 100644 index 000000000..ae63fa722 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/ec2/ec2iface/interface.go @@ -0,0 +1,858 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package ec2iface provides an interface for the Amazon Elastic Compute Cloud. +package ec2iface + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/ec2" +) + +// EC2API is the interface type for ec2.EC2. +type EC2API interface { + AcceptVpcPeeringConnectionRequest(*ec2.AcceptVpcPeeringConnectionInput) (*request.Request, *ec2.AcceptVpcPeeringConnectionOutput) + + AcceptVpcPeeringConnection(*ec2.AcceptVpcPeeringConnectionInput) (*ec2.AcceptVpcPeeringConnectionOutput, error) + + AllocateAddressRequest(*ec2.AllocateAddressInput) (*request.Request, *ec2.AllocateAddressOutput) + + AllocateAddress(*ec2.AllocateAddressInput) (*ec2.AllocateAddressOutput, error) + + AllocateHostsRequest(*ec2.AllocateHostsInput) (*request.Request, *ec2.AllocateHostsOutput) + + AllocateHosts(*ec2.AllocateHostsInput) (*ec2.AllocateHostsOutput, error) + + AssignPrivateIpAddressesRequest(*ec2.AssignPrivateIpAddressesInput) (*request.Request, *ec2.AssignPrivateIpAddressesOutput) + + AssignPrivateIpAddresses(*ec2.AssignPrivateIpAddressesInput) (*ec2.AssignPrivateIpAddressesOutput, error) + + AssociateAddressRequest(*ec2.AssociateAddressInput) (*request.Request, *ec2.AssociateAddressOutput) + + AssociateAddress(*ec2.AssociateAddressInput) (*ec2.AssociateAddressOutput, error) + + AssociateDhcpOptionsRequest(*ec2.AssociateDhcpOptionsInput) (*request.Request, *ec2.AssociateDhcpOptionsOutput) + + AssociateDhcpOptions(*ec2.AssociateDhcpOptionsInput) (*ec2.AssociateDhcpOptionsOutput, error) + + AssociateRouteTableRequest(*ec2.AssociateRouteTableInput) (*request.Request, *ec2.AssociateRouteTableOutput) + + AssociateRouteTable(*ec2.AssociateRouteTableInput) (*ec2.AssociateRouteTableOutput, error) + + AttachClassicLinkVpcRequest(*ec2.AttachClassicLinkVpcInput) (*request.Request, *ec2.AttachClassicLinkVpcOutput) + + AttachClassicLinkVpc(*ec2.AttachClassicLinkVpcInput) (*ec2.AttachClassicLinkVpcOutput, error) + + AttachInternetGatewayRequest(*ec2.AttachInternetGatewayInput) (*request.Request, *ec2.AttachInternetGatewayOutput) + + AttachInternetGateway(*ec2.AttachInternetGatewayInput) (*ec2.AttachInternetGatewayOutput, error) + + AttachNetworkInterfaceRequest(*ec2.AttachNetworkInterfaceInput) (*request.Request, *ec2.AttachNetworkInterfaceOutput) + + AttachNetworkInterface(*ec2.AttachNetworkInterfaceInput) (*ec2.AttachNetworkInterfaceOutput, error) + + AttachVolumeRequest(*ec2.AttachVolumeInput) (*request.Request, *ec2.VolumeAttachment) + + AttachVolume(*ec2.AttachVolumeInput) (*ec2.VolumeAttachment, error) + + AttachVpnGatewayRequest(*ec2.AttachVpnGatewayInput) (*request.Request, *ec2.AttachVpnGatewayOutput) + + AttachVpnGateway(*ec2.AttachVpnGatewayInput) (*ec2.AttachVpnGatewayOutput, error) + + AuthorizeSecurityGroupEgressRequest(*ec2.AuthorizeSecurityGroupEgressInput) (*request.Request, *ec2.AuthorizeSecurityGroupEgressOutput) + + AuthorizeSecurityGroupEgress(*ec2.AuthorizeSecurityGroupEgressInput) (*ec2.AuthorizeSecurityGroupEgressOutput, error) + + AuthorizeSecurityGroupIngressRequest(*ec2.AuthorizeSecurityGroupIngressInput) (*request.Request, *ec2.AuthorizeSecurityGroupIngressOutput) + + AuthorizeSecurityGroupIngress(*ec2.AuthorizeSecurityGroupIngressInput) (*ec2.AuthorizeSecurityGroupIngressOutput, error) + + BundleInstanceRequest(*ec2.BundleInstanceInput) (*request.Request, *ec2.BundleInstanceOutput) + + BundleInstance(*ec2.BundleInstanceInput) (*ec2.BundleInstanceOutput, error) + + CancelBundleTaskRequest(*ec2.CancelBundleTaskInput) (*request.Request, *ec2.CancelBundleTaskOutput) + + CancelBundleTask(*ec2.CancelBundleTaskInput) (*ec2.CancelBundleTaskOutput, error) + + CancelConversionTaskRequest(*ec2.CancelConversionTaskInput) (*request.Request, *ec2.CancelConversionTaskOutput) + + CancelConversionTask(*ec2.CancelConversionTaskInput) (*ec2.CancelConversionTaskOutput, error) + + CancelExportTaskRequest(*ec2.CancelExportTaskInput) (*request.Request, *ec2.CancelExportTaskOutput) + + CancelExportTask(*ec2.CancelExportTaskInput) (*ec2.CancelExportTaskOutput, error) + + CancelImportTaskRequest(*ec2.CancelImportTaskInput) (*request.Request, *ec2.CancelImportTaskOutput) + + CancelImportTask(*ec2.CancelImportTaskInput) (*ec2.CancelImportTaskOutput, error) + + CancelReservedInstancesListingRequest(*ec2.CancelReservedInstancesListingInput) (*request.Request, *ec2.CancelReservedInstancesListingOutput) + + CancelReservedInstancesListing(*ec2.CancelReservedInstancesListingInput) (*ec2.CancelReservedInstancesListingOutput, error) + + CancelSpotFleetRequestsRequest(*ec2.CancelSpotFleetRequestsInput) (*request.Request, *ec2.CancelSpotFleetRequestsOutput) + + CancelSpotFleetRequests(*ec2.CancelSpotFleetRequestsInput) (*ec2.CancelSpotFleetRequestsOutput, error) + + CancelSpotInstanceRequestsRequest(*ec2.CancelSpotInstanceRequestsInput) (*request.Request, *ec2.CancelSpotInstanceRequestsOutput) + + CancelSpotInstanceRequests(*ec2.CancelSpotInstanceRequestsInput) (*ec2.CancelSpotInstanceRequestsOutput, error) + + ConfirmProductInstanceRequest(*ec2.ConfirmProductInstanceInput) (*request.Request, *ec2.ConfirmProductInstanceOutput) + + ConfirmProductInstance(*ec2.ConfirmProductInstanceInput) (*ec2.ConfirmProductInstanceOutput, error) + + CopyImageRequest(*ec2.CopyImageInput) (*request.Request, *ec2.CopyImageOutput) + + CopyImage(*ec2.CopyImageInput) (*ec2.CopyImageOutput, error) + + CopySnapshotRequest(*ec2.CopySnapshotInput) (*request.Request, *ec2.CopySnapshotOutput) + + CopySnapshot(*ec2.CopySnapshotInput) (*ec2.CopySnapshotOutput, error) + + CreateCustomerGatewayRequest(*ec2.CreateCustomerGatewayInput) (*request.Request, *ec2.CreateCustomerGatewayOutput) + + CreateCustomerGateway(*ec2.CreateCustomerGatewayInput) (*ec2.CreateCustomerGatewayOutput, error) + + CreateDhcpOptionsRequest(*ec2.CreateDhcpOptionsInput) (*request.Request, *ec2.CreateDhcpOptionsOutput) + + CreateDhcpOptions(*ec2.CreateDhcpOptionsInput) (*ec2.CreateDhcpOptionsOutput, error) + + CreateFlowLogsRequest(*ec2.CreateFlowLogsInput) (*request.Request, *ec2.CreateFlowLogsOutput) + + CreateFlowLogs(*ec2.CreateFlowLogsInput) (*ec2.CreateFlowLogsOutput, error) + + CreateImageRequest(*ec2.CreateImageInput) (*request.Request, *ec2.CreateImageOutput) + + CreateImage(*ec2.CreateImageInput) (*ec2.CreateImageOutput, error) + + CreateInstanceExportTaskRequest(*ec2.CreateInstanceExportTaskInput) (*request.Request, *ec2.CreateInstanceExportTaskOutput) + + CreateInstanceExportTask(*ec2.CreateInstanceExportTaskInput) (*ec2.CreateInstanceExportTaskOutput, error) + + CreateInternetGatewayRequest(*ec2.CreateInternetGatewayInput) (*request.Request, *ec2.CreateInternetGatewayOutput) + + CreateInternetGateway(*ec2.CreateInternetGatewayInput) (*ec2.CreateInternetGatewayOutput, error) + + CreateKeyPairRequest(*ec2.CreateKeyPairInput) (*request.Request, *ec2.CreateKeyPairOutput) + + CreateKeyPair(*ec2.CreateKeyPairInput) (*ec2.CreateKeyPairOutput, error) + + CreateNatGatewayRequest(*ec2.CreateNatGatewayInput) (*request.Request, *ec2.CreateNatGatewayOutput) + + CreateNatGateway(*ec2.CreateNatGatewayInput) (*ec2.CreateNatGatewayOutput, error) + + CreateNetworkAclRequest(*ec2.CreateNetworkAclInput) (*request.Request, *ec2.CreateNetworkAclOutput) + + CreateNetworkAcl(*ec2.CreateNetworkAclInput) (*ec2.CreateNetworkAclOutput, error) + + CreateNetworkAclEntryRequest(*ec2.CreateNetworkAclEntryInput) (*request.Request, *ec2.CreateNetworkAclEntryOutput) + + CreateNetworkAclEntry(*ec2.CreateNetworkAclEntryInput) (*ec2.CreateNetworkAclEntryOutput, error) + + CreateNetworkInterfaceRequest(*ec2.CreateNetworkInterfaceInput) (*request.Request, *ec2.CreateNetworkInterfaceOutput) + + CreateNetworkInterface(*ec2.CreateNetworkInterfaceInput) (*ec2.CreateNetworkInterfaceOutput, error) + + CreatePlacementGroupRequest(*ec2.CreatePlacementGroupInput) (*request.Request, *ec2.CreatePlacementGroupOutput) + + CreatePlacementGroup(*ec2.CreatePlacementGroupInput) (*ec2.CreatePlacementGroupOutput, error) + + CreateReservedInstancesListingRequest(*ec2.CreateReservedInstancesListingInput) (*request.Request, *ec2.CreateReservedInstancesListingOutput) + + CreateReservedInstancesListing(*ec2.CreateReservedInstancesListingInput) (*ec2.CreateReservedInstancesListingOutput, error) + + CreateRouteRequest(*ec2.CreateRouteInput) (*request.Request, *ec2.CreateRouteOutput) + + CreateRoute(*ec2.CreateRouteInput) (*ec2.CreateRouteOutput, error) + + CreateRouteTableRequest(*ec2.CreateRouteTableInput) (*request.Request, *ec2.CreateRouteTableOutput) + + CreateRouteTable(*ec2.CreateRouteTableInput) (*ec2.CreateRouteTableOutput, error) + + CreateSecurityGroupRequest(*ec2.CreateSecurityGroupInput) (*request.Request, *ec2.CreateSecurityGroupOutput) + + CreateSecurityGroup(*ec2.CreateSecurityGroupInput) (*ec2.CreateSecurityGroupOutput, error) + + CreateSnapshotRequest(*ec2.CreateSnapshotInput) (*request.Request, *ec2.Snapshot) + + CreateSnapshot(*ec2.CreateSnapshotInput) (*ec2.Snapshot, error) + + CreateSpotDatafeedSubscriptionRequest(*ec2.CreateSpotDatafeedSubscriptionInput) (*request.Request, *ec2.CreateSpotDatafeedSubscriptionOutput) + + CreateSpotDatafeedSubscription(*ec2.CreateSpotDatafeedSubscriptionInput) (*ec2.CreateSpotDatafeedSubscriptionOutput, error) + + CreateSubnetRequest(*ec2.CreateSubnetInput) (*request.Request, *ec2.CreateSubnetOutput) + + CreateSubnet(*ec2.CreateSubnetInput) (*ec2.CreateSubnetOutput, error) + + CreateTagsRequest(*ec2.CreateTagsInput) (*request.Request, *ec2.CreateTagsOutput) + + CreateTags(*ec2.CreateTagsInput) (*ec2.CreateTagsOutput, error) + + CreateVolumeRequest(*ec2.CreateVolumeInput) (*request.Request, *ec2.Volume) + + CreateVolume(*ec2.CreateVolumeInput) (*ec2.Volume, error) + + CreateVpcRequest(*ec2.CreateVpcInput) (*request.Request, *ec2.CreateVpcOutput) + + CreateVpc(*ec2.CreateVpcInput) (*ec2.CreateVpcOutput, error) + + CreateVpcEndpointRequest(*ec2.CreateVpcEndpointInput) (*request.Request, *ec2.CreateVpcEndpointOutput) + + CreateVpcEndpoint(*ec2.CreateVpcEndpointInput) (*ec2.CreateVpcEndpointOutput, error) + + CreateVpcPeeringConnectionRequest(*ec2.CreateVpcPeeringConnectionInput) (*request.Request, *ec2.CreateVpcPeeringConnectionOutput) + + CreateVpcPeeringConnection(*ec2.CreateVpcPeeringConnectionInput) (*ec2.CreateVpcPeeringConnectionOutput, error) + + CreateVpnConnectionRequest(*ec2.CreateVpnConnectionInput) (*request.Request, *ec2.CreateVpnConnectionOutput) + + CreateVpnConnection(*ec2.CreateVpnConnectionInput) (*ec2.CreateVpnConnectionOutput, error) + + CreateVpnConnectionRouteRequest(*ec2.CreateVpnConnectionRouteInput) (*request.Request, *ec2.CreateVpnConnectionRouteOutput) + + CreateVpnConnectionRoute(*ec2.CreateVpnConnectionRouteInput) (*ec2.CreateVpnConnectionRouteOutput, error) + + CreateVpnGatewayRequest(*ec2.CreateVpnGatewayInput) (*request.Request, *ec2.CreateVpnGatewayOutput) + + CreateVpnGateway(*ec2.CreateVpnGatewayInput) (*ec2.CreateVpnGatewayOutput, error) + + DeleteCustomerGatewayRequest(*ec2.DeleteCustomerGatewayInput) (*request.Request, *ec2.DeleteCustomerGatewayOutput) + + DeleteCustomerGateway(*ec2.DeleteCustomerGatewayInput) (*ec2.DeleteCustomerGatewayOutput, error) + + DeleteDhcpOptionsRequest(*ec2.DeleteDhcpOptionsInput) (*request.Request, *ec2.DeleteDhcpOptionsOutput) + + DeleteDhcpOptions(*ec2.DeleteDhcpOptionsInput) (*ec2.DeleteDhcpOptionsOutput, error) + + DeleteFlowLogsRequest(*ec2.DeleteFlowLogsInput) (*request.Request, *ec2.DeleteFlowLogsOutput) + + DeleteFlowLogs(*ec2.DeleteFlowLogsInput) (*ec2.DeleteFlowLogsOutput, error) + + DeleteInternetGatewayRequest(*ec2.DeleteInternetGatewayInput) (*request.Request, *ec2.DeleteInternetGatewayOutput) + + DeleteInternetGateway(*ec2.DeleteInternetGatewayInput) (*ec2.DeleteInternetGatewayOutput, error) + + DeleteKeyPairRequest(*ec2.DeleteKeyPairInput) (*request.Request, *ec2.DeleteKeyPairOutput) + + DeleteKeyPair(*ec2.DeleteKeyPairInput) (*ec2.DeleteKeyPairOutput, error) + + DeleteNatGatewayRequest(*ec2.DeleteNatGatewayInput) (*request.Request, *ec2.DeleteNatGatewayOutput) + + DeleteNatGateway(*ec2.DeleteNatGatewayInput) (*ec2.DeleteNatGatewayOutput, error) + + DeleteNetworkAclRequest(*ec2.DeleteNetworkAclInput) (*request.Request, *ec2.DeleteNetworkAclOutput) + + DeleteNetworkAcl(*ec2.DeleteNetworkAclInput) (*ec2.DeleteNetworkAclOutput, error) + + DeleteNetworkAclEntryRequest(*ec2.DeleteNetworkAclEntryInput) (*request.Request, *ec2.DeleteNetworkAclEntryOutput) + + DeleteNetworkAclEntry(*ec2.DeleteNetworkAclEntryInput) (*ec2.DeleteNetworkAclEntryOutput, error) + + DeleteNetworkInterfaceRequest(*ec2.DeleteNetworkInterfaceInput) (*request.Request, *ec2.DeleteNetworkInterfaceOutput) + + DeleteNetworkInterface(*ec2.DeleteNetworkInterfaceInput) (*ec2.DeleteNetworkInterfaceOutput, error) + + DeletePlacementGroupRequest(*ec2.DeletePlacementGroupInput) (*request.Request, *ec2.DeletePlacementGroupOutput) + + DeletePlacementGroup(*ec2.DeletePlacementGroupInput) (*ec2.DeletePlacementGroupOutput, error) + + DeleteRouteRequest(*ec2.DeleteRouteInput) (*request.Request, *ec2.DeleteRouteOutput) + + DeleteRoute(*ec2.DeleteRouteInput) (*ec2.DeleteRouteOutput, error) + + DeleteRouteTableRequest(*ec2.DeleteRouteTableInput) (*request.Request, *ec2.DeleteRouteTableOutput) + + DeleteRouteTable(*ec2.DeleteRouteTableInput) (*ec2.DeleteRouteTableOutput, error) + + DeleteSecurityGroupRequest(*ec2.DeleteSecurityGroupInput) (*request.Request, *ec2.DeleteSecurityGroupOutput) + + DeleteSecurityGroup(*ec2.DeleteSecurityGroupInput) (*ec2.DeleteSecurityGroupOutput, error) + + DeleteSnapshotRequest(*ec2.DeleteSnapshotInput) (*request.Request, *ec2.DeleteSnapshotOutput) + + DeleteSnapshot(*ec2.DeleteSnapshotInput) (*ec2.DeleteSnapshotOutput, error) + + DeleteSpotDatafeedSubscriptionRequest(*ec2.DeleteSpotDatafeedSubscriptionInput) (*request.Request, *ec2.DeleteSpotDatafeedSubscriptionOutput) + + DeleteSpotDatafeedSubscription(*ec2.DeleteSpotDatafeedSubscriptionInput) (*ec2.DeleteSpotDatafeedSubscriptionOutput, error) + + DeleteSubnetRequest(*ec2.DeleteSubnetInput) (*request.Request, *ec2.DeleteSubnetOutput) + + DeleteSubnet(*ec2.DeleteSubnetInput) (*ec2.DeleteSubnetOutput, error) + + DeleteTagsRequest(*ec2.DeleteTagsInput) (*request.Request, *ec2.DeleteTagsOutput) + + DeleteTags(*ec2.DeleteTagsInput) (*ec2.DeleteTagsOutput, error) + + DeleteVolumeRequest(*ec2.DeleteVolumeInput) (*request.Request, *ec2.DeleteVolumeOutput) + + DeleteVolume(*ec2.DeleteVolumeInput) (*ec2.DeleteVolumeOutput, error) + + DeleteVpcRequest(*ec2.DeleteVpcInput) (*request.Request, *ec2.DeleteVpcOutput) + + DeleteVpc(*ec2.DeleteVpcInput) (*ec2.DeleteVpcOutput, error) + + DeleteVpcEndpointsRequest(*ec2.DeleteVpcEndpointsInput) (*request.Request, *ec2.DeleteVpcEndpointsOutput) + + DeleteVpcEndpoints(*ec2.DeleteVpcEndpointsInput) (*ec2.DeleteVpcEndpointsOutput, error) + + DeleteVpcPeeringConnectionRequest(*ec2.DeleteVpcPeeringConnectionInput) (*request.Request, *ec2.DeleteVpcPeeringConnectionOutput) + + DeleteVpcPeeringConnection(*ec2.DeleteVpcPeeringConnectionInput) (*ec2.DeleteVpcPeeringConnectionOutput, error) + + DeleteVpnConnectionRequest(*ec2.DeleteVpnConnectionInput) (*request.Request, *ec2.DeleteVpnConnectionOutput) + + DeleteVpnConnection(*ec2.DeleteVpnConnectionInput) (*ec2.DeleteVpnConnectionOutput, error) + + DeleteVpnConnectionRouteRequest(*ec2.DeleteVpnConnectionRouteInput) (*request.Request, *ec2.DeleteVpnConnectionRouteOutput) + + DeleteVpnConnectionRoute(*ec2.DeleteVpnConnectionRouteInput) (*ec2.DeleteVpnConnectionRouteOutput, error) + + DeleteVpnGatewayRequest(*ec2.DeleteVpnGatewayInput) (*request.Request, *ec2.DeleteVpnGatewayOutput) + + DeleteVpnGateway(*ec2.DeleteVpnGatewayInput) (*ec2.DeleteVpnGatewayOutput, error) + + DeregisterImageRequest(*ec2.DeregisterImageInput) (*request.Request, *ec2.DeregisterImageOutput) + + DeregisterImage(*ec2.DeregisterImageInput) (*ec2.DeregisterImageOutput, error) + + DescribeAccountAttributesRequest(*ec2.DescribeAccountAttributesInput) (*request.Request, *ec2.DescribeAccountAttributesOutput) + + DescribeAccountAttributes(*ec2.DescribeAccountAttributesInput) (*ec2.DescribeAccountAttributesOutput, error) + + DescribeAddressesRequest(*ec2.DescribeAddressesInput) (*request.Request, *ec2.DescribeAddressesOutput) + + DescribeAddresses(*ec2.DescribeAddressesInput) (*ec2.DescribeAddressesOutput, error) + + DescribeAvailabilityZonesRequest(*ec2.DescribeAvailabilityZonesInput) (*request.Request, *ec2.DescribeAvailabilityZonesOutput) + + DescribeAvailabilityZones(*ec2.DescribeAvailabilityZonesInput) (*ec2.DescribeAvailabilityZonesOutput, error) + + DescribeBundleTasksRequest(*ec2.DescribeBundleTasksInput) (*request.Request, *ec2.DescribeBundleTasksOutput) + + DescribeBundleTasks(*ec2.DescribeBundleTasksInput) (*ec2.DescribeBundleTasksOutput, error) + + DescribeClassicLinkInstancesRequest(*ec2.DescribeClassicLinkInstancesInput) (*request.Request, *ec2.DescribeClassicLinkInstancesOutput) + + DescribeClassicLinkInstances(*ec2.DescribeClassicLinkInstancesInput) (*ec2.DescribeClassicLinkInstancesOutput, error) + + DescribeConversionTasksRequest(*ec2.DescribeConversionTasksInput) (*request.Request, *ec2.DescribeConversionTasksOutput) + + DescribeConversionTasks(*ec2.DescribeConversionTasksInput) (*ec2.DescribeConversionTasksOutput, error) + + DescribeCustomerGatewaysRequest(*ec2.DescribeCustomerGatewaysInput) (*request.Request, *ec2.DescribeCustomerGatewaysOutput) + + DescribeCustomerGateways(*ec2.DescribeCustomerGatewaysInput) (*ec2.DescribeCustomerGatewaysOutput, error) + + DescribeDhcpOptionsRequest(*ec2.DescribeDhcpOptionsInput) (*request.Request, *ec2.DescribeDhcpOptionsOutput) + + DescribeDhcpOptions(*ec2.DescribeDhcpOptionsInput) (*ec2.DescribeDhcpOptionsOutput, error) + + DescribeExportTasksRequest(*ec2.DescribeExportTasksInput) (*request.Request, *ec2.DescribeExportTasksOutput) + + DescribeExportTasks(*ec2.DescribeExportTasksInput) (*ec2.DescribeExportTasksOutput, error) + + DescribeFlowLogsRequest(*ec2.DescribeFlowLogsInput) (*request.Request, *ec2.DescribeFlowLogsOutput) + + DescribeFlowLogs(*ec2.DescribeFlowLogsInput) (*ec2.DescribeFlowLogsOutput, error) + + DescribeHostsRequest(*ec2.DescribeHostsInput) (*request.Request, *ec2.DescribeHostsOutput) + + DescribeHosts(*ec2.DescribeHostsInput) (*ec2.DescribeHostsOutput, error) + + DescribeIdFormatRequest(*ec2.DescribeIdFormatInput) (*request.Request, *ec2.DescribeIdFormatOutput) + + DescribeIdFormat(*ec2.DescribeIdFormatInput) (*ec2.DescribeIdFormatOutput, error) + + DescribeIdentityIdFormatRequest(*ec2.DescribeIdentityIdFormatInput) (*request.Request, *ec2.DescribeIdentityIdFormatOutput) + + DescribeIdentityIdFormat(*ec2.DescribeIdentityIdFormatInput) (*ec2.DescribeIdentityIdFormatOutput, error) + + DescribeImageAttributeRequest(*ec2.DescribeImageAttributeInput) (*request.Request, *ec2.DescribeImageAttributeOutput) + + DescribeImageAttribute(*ec2.DescribeImageAttributeInput) (*ec2.DescribeImageAttributeOutput, error) + + DescribeImagesRequest(*ec2.DescribeImagesInput) (*request.Request, *ec2.DescribeImagesOutput) + + DescribeImages(*ec2.DescribeImagesInput) (*ec2.DescribeImagesOutput, error) + + DescribeImportImageTasksRequest(*ec2.DescribeImportImageTasksInput) (*request.Request, *ec2.DescribeImportImageTasksOutput) + + DescribeImportImageTasks(*ec2.DescribeImportImageTasksInput) (*ec2.DescribeImportImageTasksOutput, error) + + DescribeImportSnapshotTasksRequest(*ec2.DescribeImportSnapshotTasksInput) (*request.Request, *ec2.DescribeImportSnapshotTasksOutput) + + DescribeImportSnapshotTasks(*ec2.DescribeImportSnapshotTasksInput) (*ec2.DescribeImportSnapshotTasksOutput, error) + + DescribeInstanceAttributeRequest(*ec2.DescribeInstanceAttributeInput) (*request.Request, *ec2.DescribeInstanceAttributeOutput) + + DescribeInstanceAttribute(*ec2.DescribeInstanceAttributeInput) (*ec2.DescribeInstanceAttributeOutput, error) + + DescribeInstanceStatusRequest(*ec2.DescribeInstanceStatusInput) (*request.Request, *ec2.DescribeInstanceStatusOutput) + + DescribeInstanceStatus(*ec2.DescribeInstanceStatusInput) (*ec2.DescribeInstanceStatusOutput, error) + + DescribeInstanceStatusPages(*ec2.DescribeInstanceStatusInput, func(*ec2.DescribeInstanceStatusOutput, bool) bool) error + + DescribeInstancesRequest(*ec2.DescribeInstancesInput) (*request.Request, *ec2.DescribeInstancesOutput) + + DescribeInstances(*ec2.DescribeInstancesInput) (*ec2.DescribeInstancesOutput, error) + + DescribeInstancesPages(*ec2.DescribeInstancesInput, func(*ec2.DescribeInstancesOutput, bool) bool) error + + DescribeInternetGatewaysRequest(*ec2.DescribeInternetGatewaysInput) (*request.Request, *ec2.DescribeInternetGatewaysOutput) + + DescribeInternetGateways(*ec2.DescribeInternetGatewaysInput) (*ec2.DescribeInternetGatewaysOutput, error) + + DescribeKeyPairsRequest(*ec2.DescribeKeyPairsInput) (*request.Request, *ec2.DescribeKeyPairsOutput) + + DescribeKeyPairs(*ec2.DescribeKeyPairsInput) (*ec2.DescribeKeyPairsOutput, error) + + DescribeMovingAddressesRequest(*ec2.DescribeMovingAddressesInput) (*request.Request, *ec2.DescribeMovingAddressesOutput) + + DescribeMovingAddresses(*ec2.DescribeMovingAddressesInput) (*ec2.DescribeMovingAddressesOutput, error) + + DescribeNatGatewaysRequest(*ec2.DescribeNatGatewaysInput) (*request.Request, *ec2.DescribeNatGatewaysOutput) + + DescribeNatGateways(*ec2.DescribeNatGatewaysInput) (*ec2.DescribeNatGatewaysOutput, error) + + DescribeNetworkAclsRequest(*ec2.DescribeNetworkAclsInput) (*request.Request, *ec2.DescribeNetworkAclsOutput) + + DescribeNetworkAcls(*ec2.DescribeNetworkAclsInput) (*ec2.DescribeNetworkAclsOutput, error) + + DescribeNetworkInterfaceAttributeRequest(*ec2.DescribeNetworkInterfaceAttributeInput) (*request.Request, *ec2.DescribeNetworkInterfaceAttributeOutput) + + DescribeNetworkInterfaceAttribute(*ec2.DescribeNetworkInterfaceAttributeInput) (*ec2.DescribeNetworkInterfaceAttributeOutput, error) + + DescribeNetworkInterfacesRequest(*ec2.DescribeNetworkInterfacesInput) (*request.Request, *ec2.DescribeNetworkInterfacesOutput) + + DescribeNetworkInterfaces(*ec2.DescribeNetworkInterfacesInput) (*ec2.DescribeNetworkInterfacesOutput, error) + + DescribePlacementGroupsRequest(*ec2.DescribePlacementGroupsInput) (*request.Request, *ec2.DescribePlacementGroupsOutput) + + DescribePlacementGroups(*ec2.DescribePlacementGroupsInput) (*ec2.DescribePlacementGroupsOutput, error) + + DescribePrefixListsRequest(*ec2.DescribePrefixListsInput) (*request.Request, *ec2.DescribePrefixListsOutput) + + DescribePrefixLists(*ec2.DescribePrefixListsInput) (*ec2.DescribePrefixListsOutput, error) + + DescribeRegionsRequest(*ec2.DescribeRegionsInput) (*request.Request, *ec2.DescribeRegionsOutput) + + DescribeRegions(*ec2.DescribeRegionsInput) (*ec2.DescribeRegionsOutput, error) + + DescribeReservedInstancesRequest(*ec2.DescribeReservedInstancesInput) (*request.Request, *ec2.DescribeReservedInstancesOutput) + + DescribeReservedInstances(*ec2.DescribeReservedInstancesInput) (*ec2.DescribeReservedInstancesOutput, error) + + DescribeReservedInstancesListingsRequest(*ec2.DescribeReservedInstancesListingsInput) (*request.Request, *ec2.DescribeReservedInstancesListingsOutput) + + DescribeReservedInstancesListings(*ec2.DescribeReservedInstancesListingsInput) (*ec2.DescribeReservedInstancesListingsOutput, error) + + DescribeReservedInstancesModificationsRequest(*ec2.DescribeReservedInstancesModificationsInput) (*request.Request, *ec2.DescribeReservedInstancesModificationsOutput) + + DescribeReservedInstancesModifications(*ec2.DescribeReservedInstancesModificationsInput) (*ec2.DescribeReservedInstancesModificationsOutput, error) + + DescribeReservedInstancesModificationsPages(*ec2.DescribeReservedInstancesModificationsInput, func(*ec2.DescribeReservedInstancesModificationsOutput, bool) bool) error + + DescribeReservedInstancesOfferingsRequest(*ec2.DescribeReservedInstancesOfferingsInput) (*request.Request, *ec2.DescribeReservedInstancesOfferingsOutput) + + DescribeReservedInstancesOfferings(*ec2.DescribeReservedInstancesOfferingsInput) (*ec2.DescribeReservedInstancesOfferingsOutput, error) + + DescribeReservedInstancesOfferingsPages(*ec2.DescribeReservedInstancesOfferingsInput, func(*ec2.DescribeReservedInstancesOfferingsOutput, bool) bool) error + + DescribeRouteTablesRequest(*ec2.DescribeRouteTablesInput) (*request.Request, *ec2.DescribeRouteTablesOutput) + + DescribeRouteTables(*ec2.DescribeRouteTablesInput) (*ec2.DescribeRouteTablesOutput, error) + + DescribeScheduledInstanceAvailabilityRequest(*ec2.DescribeScheduledInstanceAvailabilityInput) (*request.Request, *ec2.DescribeScheduledInstanceAvailabilityOutput) + + DescribeScheduledInstanceAvailability(*ec2.DescribeScheduledInstanceAvailabilityInput) (*ec2.DescribeScheduledInstanceAvailabilityOutput, error) + + DescribeScheduledInstancesRequest(*ec2.DescribeScheduledInstancesInput) (*request.Request, *ec2.DescribeScheduledInstancesOutput) + + DescribeScheduledInstances(*ec2.DescribeScheduledInstancesInput) (*ec2.DescribeScheduledInstancesOutput, error) + + DescribeSecurityGroupReferencesRequest(*ec2.DescribeSecurityGroupReferencesInput) (*request.Request, *ec2.DescribeSecurityGroupReferencesOutput) + + DescribeSecurityGroupReferences(*ec2.DescribeSecurityGroupReferencesInput) (*ec2.DescribeSecurityGroupReferencesOutput, error) + + DescribeSecurityGroupsRequest(*ec2.DescribeSecurityGroupsInput) (*request.Request, *ec2.DescribeSecurityGroupsOutput) + + DescribeSecurityGroups(*ec2.DescribeSecurityGroupsInput) (*ec2.DescribeSecurityGroupsOutput, error) + + DescribeSnapshotAttributeRequest(*ec2.DescribeSnapshotAttributeInput) (*request.Request, *ec2.DescribeSnapshotAttributeOutput) + + DescribeSnapshotAttribute(*ec2.DescribeSnapshotAttributeInput) (*ec2.DescribeSnapshotAttributeOutput, error) + + DescribeSnapshotsRequest(*ec2.DescribeSnapshotsInput) (*request.Request, *ec2.DescribeSnapshotsOutput) + + DescribeSnapshots(*ec2.DescribeSnapshotsInput) (*ec2.DescribeSnapshotsOutput, error) + + DescribeSnapshotsPages(*ec2.DescribeSnapshotsInput, func(*ec2.DescribeSnapshotsOutput, bool) bool) error + + DescribeSpotDatafeedSubscriptionRequest(*ec2.DescribeSpotDatafeedSubscriptionInput) (*request.Request, *ec2.DescribeSpotDatafeedSubscriptionOutput) + + DescribeSpotDatafeedSubscription(*ec2.DescribeSpotDatafeedSubscriptionInput) (*ec2.DescribeSpotDatafeedSubscriptionOutput, error) + + DescribeSpotFleetInstancesRequest(*ec2.DescribeSpotFleetInstancesInput) (*request.Request, *ec2.DescribeSpotFleetInstancesOutput) + + DescribeSpotFleetInstances(*ec2.DescribeSpotFleetInstancesInput) (*ec2.DescribeSpotFleetInstancesOutput, error) + + DescribeSpotFleetRequestHistoryRequest(*ec2.DescribeSpotFleetRequestHistoryInput) (*request.Request, *ec2.DescribeSpotFleetRequestHistoryOutput) + + DescribeSpotFleetRequestHistory(*ec2.DescribeSpotFleetRequestHistoryInput) (*ec2.DescribeSpotFleetRequestHistoryOutput, error) + + DescribeSpotFleetRequestsRequest(*ec2.DescribeSpotFleetRequestsInput) (*request.Request, *ec2.DescribeSpotFleetRequestsOutput) + + DescribeSpotFleetRequests(*ec2.DescribeSpotFleetRequestsInput) (*ec2.DescribeSpotFleetRequestsOutput, error) + + DescribeSpotFleetRequestsPages(*ec2.DescribeSpotFleetRequestsInput, func(*ec2.DescribeSpotFleetRequestsOutput, bool) bool) error + + DescribeSpotInstanceRequestsRequest(*ec2.DescribeSpotInstanceRequestsInput) (*request.Request, *ec2.DescribeSpotInstanceRequestsOutput) + + DescribeSpotInstanceRequests(*ec2.DescribeSpotInstanceRequestsInput) (*ec2.DescribeSpotInstanceRequestsOutput, error) + + DescribeSpotPriceHistoryRequest(*ec2.DescribeSpotPriceHistoryInput) (*request.Request, *ec2.DescribeSpotPriceHistoryOutput) + + DescribeSpotPriceHistory(*ec2.DescribeSpotPriceHistoryInput) (*ec2.DescribeSpotPriceHistoryOutput, error) + + DescribeSpotPriceHistoryPages(*ec2.DescribeSpotPriceHistoryInput, func(*ec2.DescribeSpotPriceHistoryOutput, bool) bool) error + + DescribeStaleSecurityGroupsRequest(*ec2.DescribeStaleSecurityGroupsInput) (*request.Request, *ec2.DescribeStaleSecurityGroupsOutput) + + DescribeStaleSecurityGroups(*ec2.DescribeStaleSecurityGroupsInput) (*ec2.DescribeStaleSecurityGroupsOutput, error) + + DescribeSubnetsRequest(*ec2.DescribeSubnetsInput) (*request.Request, *ec2.DescribeSubnetsOutput) + + DescribeSubnets(*ec2.DescribeSubnetsInput) (*ec2.DescribeSubnetsOutput, error) + + DescribeTagsRequest(*ec2.DescribeTagsInput) (*request.Request, *ec2.DescribeTagsOutput) + + DescribeTags(*ec2.DescribeTagsInput) (*ec2.DescribeTagsOutput, error) + + DescribeTagsPages(*ec2.DescribeTagsInput, func(*ec2.DescribeTagsOutput, bool) bool) error + + DescribeVolumeAttributeRequest(*ec2.DescribeVolumeAttributeInput) (*request.Request, *ec2.DescribeVolumeAttributeOutput) + + DescribeVolumeAttribute(*ec2.DescribeVolumeAttributeInput) (*ec2.DescribeVolumeAttributeOutput, error) + + DescribeVolumeStatusRequest(*ec2.DescribeVolumeStatusInput) (*request.Request, *ec2.DescribeVolumeStatusOutput) + + DescribeVolumeStatus(*ec2.DescribeVolumeStatusInput) (*ec2.DescribeVolumeStatusOutput, error) + + DescribeVolumeStatusPages(*ec2.DescribeVolumeStatusInput, func(*ec2.DescribeVolumeStatusOutput, bool) bool) error + + DescribeVolumesRequest(*ec2.DescribeVolumesInput) (*request.Request, *ec2.DescribeVolumesOutput) + + DescribeVolumes(*ec2.DescribeVolumesInput) (*ec2.DescribeVolumesOutput, error) + + DescribeVolumesPages(*ec2.DescribeVolumesInput, func(*ec2.DescribeVolumesOutput, bool) bool) error + + DescribeVpcAttributeRequest(*ec2.DescribeVpcAttributeInput) (*request.Request, *ec2.DescribeVpcAttributeOutput) + + DescribeVpcAttribute(*ec2.DescribeVpcAttributeInput) (*ec2.DescribeVpcAttributeOutput, error) + + DescribeVpcClassicLinkRequest(*ec2.DescribeVpcClassicLinkInput) (*request.Request, *ec2.DescribeVpcClassicLinkOutput) + + DescribeVpcClassicLink(*ec2.DescribeVpcClassicLinkInput) (*ec2.DescribeVpcClassicLinkOutput, error) + + DescribeVpcClassicLinkDnsSupportRequest(*ec2.DescribeVpcClassicLinkDnsSupportInput) (*request.Request, *ec2.DescribeVpcClassicLinkDnsSupportOutput) + + DescribeVpcClassicLinkDnsSupport(*ec2.DescribeVpcClassicLinkDnsSupportInput) (*ec2.DescribeVpcClassicLinkDnsSupportOutput, error) + + DescribeVpcEndpointServicesRequest(*ec2.DescribeVpcEndpointServicesInput) (*request.Request, *ec2.DescribeVpcEndpointServicesOutput) + + DescribeVpcEndpointServices(*ec2.DescribeVpcEndpointServicesInput) (*ec2.DescribeVpcEndpointServicesOutput, error) + + DescribeVpcEndpointsRequest(*ec2.DescribeVpcEndpointsInput) (*request.Request, *ec2.DescribeVpcEndpointsOutput) + + DescribeVpcEndpoints(*ec2.DescribeVpcEndpointsInput) (*ec2.DescribeVpcEndpointsOutput, error) + + DescribeVpcPeeringConnectionsRequest(*ec2.DescribeVpcPeeringConnectionsInput) (*request.Request, *ec2.DescribeVpcPeeringConnectionsOutput) + + DescribeVpcPeeringConnections(*ec2.DescribeVpcPeeringConnectionsInput) (*ec2.DescribeVpcPeeringConnectionsOutput, error) + + DescribeVpcsRequest(*ec2.DescribeVpcsInput) (*request.Request, *ec2.DescribeVpcsOutput) + + DescribeVpcs(*ec2.DescribeVpcsInput) (*ec2.DescribeVpcsOutput, error) + + DescribeVpnConnectionsRequest(*ec2.DescribeVpnConnectionsInput) (*request.Request, *ec2.DescribeVpnConnectionsOutput) + + DescribeVpnConnections(*ec2.DescribeVpnConnectionsInput) (*ec2.DescribeVpnConnectionsOutput, error) + + DescribeVpnGatewaysRequest(*ec2.DescribeVpnGatewaysInput) (*request.Request, *ec2.DescribeVpnGatewaysOutput) + + DescribeVpnGateways(*ec2.DescribeVpnGatewaysInput) (*ec2.DescribeVpnGatewaysOutput, error) + + DetachClassicLinkVpcRequest(*ec2.DetachClassicLinkVpcInput) (*request.Request, *ec2.DetachClassicLinkVpcOutput) + + DetachClassicLinkVpc(*ec2.DetachClassicLinkVpcInput) (*ec2.DetachClassicLinkVpcOutput, error) + + DetachInternetGatewayRequest(*ec2.DetachInternetGatewayInput) (*request.Request, *ec2.DetachInternetGatewayOutput) + + DetachInternetGateway(*ec2.DetachInternetGatewayInput) (*ec2.DetachInternetGatewayOutput, error) + + DetachNetworkInterfaceRequest(*ec2.DetachNetworkInterfaceInput) (*request.Request, *ec2.DetachNetworkInterfaceOutput) + + DetachNetworkInterface(*ec2.DetachNetworkInterfaceInput) (*ec2.DetachNetworkInterfaceOutput, error) + + DetachVolumeRequest(*ec2.DetachVolumeInput) (*request.Request, *ec2.VolumeAttachment) + + DetachVolume(*ec2.DetachVolumeInput) (*ec2.VolumeAttachment, error) + + DetachVpnGatewayRequest(*ec2.DetachVpnGatewayInput) (*request.Request, *ec2.DetachVpnGatewayOutput) + + DetachVpnGateway(*ec2.DetachVpnGatewayInput) (*ec2.DetachVpnGatewayOutput, error) + + DisableVgwRoutePropagationRequest(*ec2.DisableVgwRoutePropagationInput) (*request.Request, *ec2.DisableVgwRoutePropagationOutput) + + DisableVgwRoutePropagation(*ec2.DisableVgwRoutePropagationInput) (*ec2.DisableVgwRoutePropagationOutput, error) + + DisableVpcClassicLinkRequest(*ec2.DisableVpcClassicLinkInput) (*request.Request, *ec2.DisableVpcClassicLinkOutput) + + DisableVpcClassicLink(*ec2.DisableVpcClassicLinkInput) (*ec2.DisableVpcClassicLinkOutput, error) + + DisableVpcClassicLinkDnsSupportRequest(*ec2.DisableVpcClassicLinkDnsSupportInput) (*request.Request, *ec2.DisableVpcClassicLinkDnsSupportOutput) + + DisableVpcClassicLinkDnsSupport(*ec2.DisableVpcClassicLinkDnsSupportInput) (*ec2.DisableVpcClassicLinkDnsSupportOutput, error) + + DisassociateAddressRequest(*ec2.DisassociateAddressInput) (*request.Request, *ec2.DisassociateAddressOutput) + + DisassociateAddress(*ec2.DisassociateAddressInput) (*ec2.DisassociateAddressOutput, error) + + DisassociateRouteTableRequest(*ec2.DisassociateRouteTableInput) (*request.Request, *ec2.DisassociateRouteTableOutput) + + DisassociateRouteTable(*ec2.DisassociateRouteTableInput) (*ec2.DisassociateRouteTableOutput, error) + + EnableVgwRoutePropagationRequest(*ec2.EnableVgwRoutePropagationInput) (*request.Request, *ec2.EnableVgwRoutePropagationOutput) + + EnableVgwRoutePropagation(*ec2.EnableVgwRoutePropagationInput) (*ec2.EnableVgwRoutePropagationOutput, error) + + EnableVolumeIORequest(*ec2.EnableVolumeIOInput) (*request.Request, *ec2.EnableVolumeIOOutput) + + EnableVolumeIO(*ec2.EnableVolumeIOInput) (*ec2.EnableVolumeIOOutput, error) + + EnableVpcClassicLinkRequest(*ec2.EnableVpcClassicLinkInput) (*request.Request, *ec2.EnableVpcClassicLinkOutput) + + EnableVpcClassicLink(*ec2.EnableVpcClassicLinkInput) (*ec2.EnableVpcClassicLinkOutput, error) + + EnableVpcClassicLinkDnsSupportRequest(*ec2.EnableVpcClassicLinkDnsSupportInput) (*request.Request, *ec2.EnableVpcClassicLinkDnsSupportOutput) + + EnableVpcClassicLinkDnsSupport(*ec2.EnableVpcClassicLinkDnsSupportInput) (*ec2.EnableVpcClassicLinkDnsSupportOutput, error) + + GetConsoleOutputRequest(*ec2.GetConsoleOutputInput) (*request.Request, *ec2.GetConsoleOutputOutput) + + GetConsoleOutput(*ec2.GetConsoleOutputInput) (*ec2.GetConsoleOutputOutput, error) + + GetConsoleScreenshotRequest(*ec2.GetConsoleScreenshotInput) (*request.Request, *ec2.GetConsoleScreenshotOutput) + + GetConsoleScreenshot(*ec2.GetConsoleScreenshotInput) (*ec2.GetConsoleScreenshotOutput, error) + + GetPasswordDataRequest(*ec2.GetPasswordDataInput) (*request.Request, *ec2.GetPasswordDataOutput) + + GetPasswordData(*ec2.GetPasswordDataInput) (*ec2.GetPasswordDataOutput, error) + + ImportImageRequest(*ec2.ImportImageInput) (*request.Request, *ec2.ImportImageOutput) + + ImportImage(*ec2.ImportImageInput) (*ec2.ImportImageOutput, error) + + ImportInstanceRequest(*ec2.ImportInstanceInput) (*request.Request, *ec2.ImportInstanceOutput) + + ImportInstance(*ec2.ImportInstanceInput) (*ec2.ImportInstanceOutput, error) + + ImportKeyPairRequest(*ec2.ImportKeyPairInput) (*request.Request, *ec2.ImportKeyPairOutput) + + ImportKeyPair(*ec2.ImportKeyPairInput) (*ec2.ImportKeyPairOutput, error) + + ImportSnapshotRequest(*ec2.ImportSnapshotInput) (*request.Request, *ec2.ImportSnapshotOutput) + + ImportSnapshot(*ec2.ImportSnapshotInput) (*ec2.ImportSnapshotOutput, error) + + ImportVolumeRequest(*ec2.ImportVolumeInput) (*request.Request, *ec2.ImportVolumeOutput) + + ImportVolume(*ec2.ImportVolumeInput) (*ec2.ImportVolumeOutput, error) + + ModifyHostsRequest(*ec2.ModifyHostsInput) (*request.Request, *ec2.ModifyHostsOutput) + + ModifyHosts(*ec2.ModifyHostsInput) (*ec2.ModifyHostsOutput, error) + + ModifyIdFormatRequest(*ec2.ModifyIdFormatInput) (*request.Request, *ec2.ModifyIdFormatOutput) + + ModifyIdFormat(*ec2.ModifyIdFormatInput) (*ec2.ModifyIdFormatOutput, error) + + ModifyIdentityIdFormatRequest(*ec2.ModifyIdentityIdFormatInput) (*request.Request, *ec2.ModifyIdentityIdFormatOutput) + + ModifyIdentityIdFormat(*ec2.ModifyIdentityIdFormatInput) (*ec2.ModifyIdentityIdFormatOutput, error) + + ModifyImageAttributeRequest(*ec2.ModifyImageAttributeInput) (*request.Request, *ec2.ModifyImageAttributeOutput) + + ModifyImageAttribute(*ec2.ModifyImageAttributeInput) (*ec2.ModifyImageAttributeOutput, error) + + ModifyInstanceAttributeRequest(*ec2.ModifyInstanceAttributeInput) (*request.Request, *ec2.ModifyInstanceAttributeOutput) + + ModifyInstanceAttribute(*ec2.ModifyInstanceAttributeInput) (*ec2.ModifyInstanceAttributeOutput, error) + + ModifyInstancePlacementRequest(*ec2.ModifyInstancePlacementInput) (*request.Request, *ec2.ModifyInstancePlacementOutput) + + ModifyInstancePlacement(*ec2.ModifyInstancePlacementInput) (*ec2.ModifyInstancePlacementOutput, error) + + ModifyNetworkInterfaceAttributeRequest(*ec2.ModifyNetworkInterfaceAttributeInput) (*request.Request, *ec2.ModifyNetworkInterfaceAttributeOutput) + + ModifyNetworkInterfaceAttribute(*ec2.ModifyNetworkInterfaceAttributeInput) (*ec2.ModifyNetworkInterfaceAttributeOutput, error) + + ModifyReservedInstancesRequest(*ec2.ModifyReservedInstancesInput) (*request.Request, *ec2.ModifyReservedInstancesOutput) + + ModifyReservedInstances(*ec2.ModifyReservedInstancesInput) (*ec2.ModifyReservedInstancesOutput, error) + + ModifySnapshotAttributeRequest(*ec2.ModifySnapshotAttributeInput) (*request.Request, *ec2.ModifySnapshotAttributeOutput) + + ModifySnapshotAttribute(*ec2.ModifySnapshotAttributeInput) (*ec2.ModifySnapshotAttributeOutput, error) + + ModifySpotFleetRequestRequest(*ec2.ModifySpotFleetRequestInput) (*request.Request, *ec2.ModifySpotFleetRequestOutput) + + ModifySpotFleetRequest(*ec2.ModifySpotFleetRequestInput) (*ec2.ModifySpotFleetRequestOutput, error) + + ModifySubnetAttributeRequest(*ec2.ModifySubnetAttributeInput) (*request.Request, *ec2.ModifySubnetAttributeOutput) + + ModifySubnetAttribute(*ec2.ModifySubnetAttributeInput) (*ec2.ModifySubnetAttributeOutput, error) + + ModifyVolumeAttributeRequest(*ec2.ModifyVolumeAttributeInput) (*request.Request, *ec2.ModifyVolumeAttributeOutput) + + ModifyVolumeAttribute(*ec2.ModifyVolumeAttributeInput) (*ec2.ModifyVolumeAttributeOutput, error) + + ModifyVpcAttributeRequest(*ec2.ModifyVpcAttributeInput) (*request.Request, *ec2.ModifyVpcAttributeOutput) + + ModifyVpcAttribute(*ec2.ModifyVpcAttributeInput) (*ec2.ModifyVpcAttributeOutput, error) + + ModifyVpcEndpointRequest(*ec2.ModifyVpcEndpointInput) (*request.Request, *ec2.ModifyVpcEndpointOutput) + + ModifyVpcEndpoint(*ec2.ModifyVpcEndpointInput) (*ec2.ModifyVpcEndpointOutput, error) + + ModifyVpcPeeringConnectionOptionsRequest(*ec2.ModifyVpcPeeringConnectionOptionsInput) (*request.Request, *ec2.ModifyVpcPeeringConnectionOptionsOutput) + + ModifyVpcPeeringConnectionOptions(*ec2.ModifyVpcPeeringConnectionOptionsInput) (*ec2.ModifyVpcPeeringConnectionOptionsOutput, error) + + MonitorInstancesRequest(*ec2.MonitorInstancesInput) (*request.Request, *ec2.MonitorInstancesOutput) + + MonitorInstances(*ec2.MonitorInstancesInput) (*ec2.MonitorInstancesOutput, error) + + MoveAddressToVpcRequest(*ec2.MoveAddressToVpcInput) (*request.Request, *ec2.MoveAddressToVpcOutput) + + MoveAddressToVpc(*ec2.MoveAddressToVpcInput) (*ec2.MoveAddressToVpcOutput, error) + + PurchaseReservedInstancesOfferingRequest(*ec2.PurchaseReservedInstancesOfferingInput) (*request.Request, *ec2.PurchaseReservedInstancesOfferingOutput) + + PurchaseReservedInstancesOffering(*ec2.PurchaseReservedInstancesOfferingInput) (*ec2.PurchaseReservedInstancesOfferingOutput, error) + + PurchaseScheduledInstancesRequest(*ec2.PurchaseScheduledInstancesInput) (*request.Request, *ec2.PurchaseScheduledInstancesOutput) + + PurchaseScheduledInstances(*ec2.PurchaseScheduledInstancesInput) (*ec2.PurchaseScheduledInstancesOutput, error) + + RebootInstancesRequest(*ec2.RebootInstancesInput) (*request.Request, *ec2.RebootInstancesOutput) + + RebootInstances(*ec2.RebootInstancesInput) (*ec2.RebootInstancesOutput, error) + + RegisterImageRequest(*ec2.RegisterImageInput) (*request.Request, *ec2.RegisterImageOutput) + + RegisterImage(*ec2.RegisterImageInput) (*ec2.RegisterImageOutput, error) + + RejectVpcPeeringConnectionRequest(*ec2.RejectVpcPeeringConnectionInput) (*request.Request, *ec2.RejectVpcPeeringConnectionOutput) + + RejectVpcPeeringConnection(*ec2.RejectVpcPeeringConnectionInput) (*ec2.RejectVpcPeeringConnectionOutput, error) + + ReleaseAddressRequest(*ec2.ReleaseAddressInput) (*request.Request, *ec2.ReleaseAddressOutput) + + ReleaseAddress(*ec2.ReleaseAddressInput) (*ec2.ReleaseAddressOutput, error) + + ReleaseHostsRequest(*ec2.ReleaseHostsInput) (*request.Request, *ec2.ReleaseHostsOutput) + + ReleaseHosts(*ec2.ReleaseHostsInput) (*ec2.ReleaseHostsOutput, error) + + ReplaceNetworkAclAssociationRequest(*ec2.ReplaceNetworkAclAssociationInput) (*request.Request, *ec2.ReplaceNetworkAclAssociationOutput) + + ReplaceNetworkAclAssociation(*ec2.ReplaceNetworkAclAssociationInput) (*ec2.ReplaceNetworkAclAssociationOutput, error) + + ReplaceNetworkAclEntryRequest(*ec2.ReplaceNetworkAclEntryInput) (*request.Request, *ec2.ReplaceNetworkAclEntryOutput) + + ReplaceNetworkAclEntry(*ec2.ReplaceNetworkAclEntryInput) (*ec2.ReplaceNetworkAclEntryOutput, error) + + ReplaceRouteRequest(*ec2.ReplaceRouteInput) (*request.Request, *ec2.ReplaceRouteOutput) + + ReplaceRoute(*ec2.ReplaceRouteInput) (*ec2.ReplaceRouteOutput, error) + + ReplaceRouteTableAssociationRequest(*ec2.ReplaceRouteTableAssociationInput) (*request.Request, *ec2.ReplaceRouteTableAssociationOutput) + + ReplaceRouteTableAssociation(*ec2.ReplaceRouteTableAssociationInput) (*ec2.ReplaceRouteTableAssociationOutput, error) + + ReportInstanceStatusRequest(*ec2.ReportInstanceStatusInput) (*request.Request, *ec2.ReportInstanceStatusOutput) + + ReportInstanceStatus(*ec2.ReportInstanceStatusInput) (*ec2.ReportInstanceStatusOutput, error) + + RequestSpotFleetRequest(*ec2.RequestSpotFleetInput) (*request.Request, *ec2.RequestSpotFleetOutput) + + RequestSpotFleet(*ec2.RequestSpotFleetInput) (*ec2.RequestSpotFleetOutput, error) + + RequestSpotInstancesRequest(*ec2.RequestSpotInstancesInput) (*request.Request, *ec2.RequestSpotInstancesOutput) + + RequestSpotInstances(*ec2.RequestSpotInstancesInput) (*ec2.RequestSpotInstancesOutput, error) + + ResetImageAttributeRequest(*ec2.ResetImageAttributeInput) (*request.Request, *ec2.ResetImageAttributeOutput) + + ResetImageAttribute(*ec2.ResetImageAttributeInput) (*ec2.ResetImageAttributeOutput, error) + + ResetInstanceAttributeRequest(*ec2.ResetInstanceAttributeInput) (*request.Request, *ec2.ResetInstanceAttributeOutput) + + ResetInstanceAttribute(*ec2.ResetInstanceAttributeInput) (*ec2.ResetInstanceAttributeOutput, error) + + ResetNetworkInterfaceAttributeRequest(*ec2.ResetNetworkInterfaceAttributeInput) (*request.Request, *ec2.ResetNetworkInterfaceAttributeOutput) + + ResetNetworkInterfaceAttribute(*ec2.ResetNetworkInterfaceAttributeInput) (*ec2.ResetNetworkInterfaceAttributeOutput, error) + + ResetSnapshotAttributeRequest(*ec2.ResetSnapshotAttributeInput) (*request.Request, *ec2.ResetSnapshotAttributeOutput) + + ResetSnapshotAttribute(*ec2.ResetSnapshotAttributeInput) (*ec2.ResetSnapshotAttributeOutput, error) + + RestoreAddressToClassicRequest(*ec2.RestoreAddressToClassicInput) (*request.Request, *ec2.RestoreAddressToClassicOutput) + + RestoreAddressToClassic(*ec2.RestoreAddressToClassicInput) (*ec2.RestoreAddressToClassicOutput, error) + + RevokeSecurityGroupEgressRequest(*ec2.RevokeSecurityGroupEgressInput) (*request.Request, *ec2.RevokeSecurityGroupEgressOutput) + + RevokeSecurityGroupEgress(*ec2.RevokeSecurityGroupEgressInput) (*ec2.RevokeSecurityGroupEgressOutput, error) + + RevokeSecurityGroupIngressRequest(*ec2.RevokeSecurityGroupIngressInput) (*request.Request, *ec2.RevokeSecurityGroupIngressOutput) + + RevokeSecurityGroupIngress(*ec2.RevokeSecurityGroupIngressInput) (*ec2.RevokeSecurityGroupIngressOutput, error) + + RunInstancesRequest(*ec2.RunInstancesInput) (*request.Request, *ec2.Reservation) + + RunInstances(*ec2.RunInstancesInput) (*ec2.Reservation, error) + + RunScheduledInstancesRequest(*ec2.RunScheduledInstancesInput) (*request.Request, *ec2.RunScheduledInstancesOutput) + + RunScheduledInstances(*ec2.RunScheduledInstancesInput) (*ec2.RunScheduledInstancesOutput, error) + + StartInstancesRequest(*ec2.StartInstancesInput) (*request.Request, *ec2.StartInstancesOutput) + + StartInstances(*ec2.StartInstancesInput) (*ec2.StartInstancesOutput, error) + + StopInstancesRequest(*ec2.StopInstancesInput) (*request.Request, *ec2.StopInstancesOutput) + + StopInstances(*ec2.StopInstancesInput) (*ec2.StopInstancesOutput, error) + + TerminateInstancesRequest(*ec2.TerminateInstancesInput) (*request.Request, *ec2.TerminateInstancesOutput) + + TerminateInstances(*ec2.TerminateInstancesInput) (*ec2.TerminateInstancesOutput, error) + + UnassignPrivateIpAddressesRequest(*ec2.UnassignPrivateIpAddressesInput) (*request.Request, *ec2.UnassignPrivateIpAddressesOutput) + + UnassignPrivateIpAddresses(*ec2.UnassignPrivateIpAddressesInput) (*ec2.UnassignPrivateIpAddressesOutput, error) + + UnmonitorInstancesRequest(*ec2.UnmonitorInstancesInput) (*request.Request, *ec2.UnmonitorInstancesOutput) + + UnmonitorInstances(*ec2.UnmonitorInstancesInput) (*ec2.UnmonitorInstancesOutput, error) +} + +var _ EC2API = (*ec2.EC2)(nil) diff --git a/vendor/github.com/aws/aws-sdk-go/service/ec2/examples_test.go b/vendor/github.com/aws/aws-sdk-go/service/ec2/examples_test.go new file mode 100644 index 000000000..7e39c218a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/ec2/examples_test.go @@ -0,0 +1,5848 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package ec2_test + +import ( + "bytes" + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/ec2" +) + +var _ time.Duration +var _ bytes.Buffer + +func ExampleEC2_AcceptVpcPeeringConnection() { + svc := ec2.New(session.New()) + + params := &ec2.AcceptVpcPeeringConnectionInput{ + DryRun: aws.Bool(true), + VpcPeeringConnectionId: aws.String("String"), + } + resp, err := svc.AcceptVpcPeeringConnection(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_AllocateAddress() { + svc := ec2.New(session.New()) + + params := &ec2.AllocateAddressInput{ + Domain: aws.String("DomainType"), + DryRun: aws.Bool(true), + } + resp, err := svc.AllocateAddress(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_AllocateHosts() { + svc := ec2.New(session.New()) + + params := &ec2.AllocateHostsInput{ + AvailabilityZone: aws.String("String"), // Required + InstanceType: aws.String("String"), // Required + Quantity: aws.Int64(1), // Required + AutoPlacement: aws.String("AutoPlacement"), + ClientToken: aws.String("String"), + } + resp, err := svc.AllocateHosts(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_AssignPrivateIpAddresses() { + svc := ec2.New(session.New()) + + params := &ec2.AssignPrivateIpAddressesInput{ + NetworkInterfaceId: aws.String("String"), // Required + AllowReassignment: aws.Bool(true), + PrivateIpAddresses: []*string{ + aws.String("String"), // Required + // More values... + }, + SecondaryPrivateIpAddressCount: aws.Int64(1), + } + resp, err := svc.AssignPrivateIpAddresses(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_AssociateAddress() { + svc := ec2.New(session.New()) + + params := &ec2.AssociateAddressInput{ + AllocationId: aws.String("String"), + AllowReassociation: aws.Bool(true), + DryRun: aws.Bool(true), + InstanceId: aws.String("String"), + NetworkInterfaceId: aws.String("String"), + PrivateIpAddress: aws.String("String"), + PublicIp: aws.String("String"), + } + resp, err := svc.AssociateAddress(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_AssociateDhcpOptions() { + svc := ec2.New(session.New()) + + params := &ec2.AssociateDhcpOptionsInput{ + DhcpOptionsId: aws.String("String"), // Required + VpcId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.AssociateDhcpOptions(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_AssociateRouteTable() { + svc := ec2.New(session.New()) + + params := &ec2.AssociateRouteTableInput{ + RouteTableId: aws.String("String"), // Required + SubnetId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.AssociateRouteTable(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_AttachClassicLinkVpc() { + svc := ec2.New(session.New()) + + params := &ec2.AttachClassicLinkVpcInput{ + Groups: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + InstanceId: aws.String("String"), // Required + VpcId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.AttachClassicLinkVpc(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_AttachInternetGateway() { + svc := ec2.New(session.New()) + + params := &ec2.AttachInternetGatewayInput{ + InternetGatewayId: aws.String("String"), // Required + VpcId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.AttachInternetGateway(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_AttachNetworkInterface() { + svc := ec2.New(session.New()) + + params := &ec2.AttachNetworkInterfaceInput{ + DeviceIndex: aws.Int64(1), // Required + InstanceId: aws.String("String"), // Required + NetworkInterfaceId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.AttachNetworkInterface(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_AttachVolume() { + svc := ec2.New(session.New()) + + params := &ec2.AttachVolumeInput{ + Device: aws.String("String"), // Required + InstanceId: aws.String("String"), // Required + VolumeId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.AttachVolume(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_AttachVpnGateway() { + svc := ec2.New(session.New()) + + params := &ec2.AttachVpnGatewayInput{ + VpcId: aws.String("String"), // Required + VpnGatewayId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.AttachVpnGateway(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_AuthorizeSecurityGroupEgress() { + svc := ec2.New(session.New()) + + params := &ec2.AuthorizeSecurityGroupEgressInput{ + GroupId: aws.String("String"), // Required + CidrIp: aws.String("String"), + DryRun: aws.Bool(true), + FromPort: aws.Int64(1), + IpPermissions: []*ec2.IpPermission{ + { // Required + FromPort: aws.Int64(1), + IpProtocol: aws.String("String"), + IpRanges: []*ec2.IpRange{ + { // Required + CidrIp: aws.String("String"), + }, + // More values... + }, + PrefixListIds: []*ec2.PrefixListId{ + { // Required + PrefixListId: aws.String("String"), + }, + // More values... + }, + ToPort: aws.Int64(1), + UserIdGroupPairs: []*ec2.UserIdGroupPair{ + { // Required + GroupId: aws.String("String"), + GroupName: aws.String("String"), + PeeringStatus: aws.String("String"), + UserId: aws.String("String"), + VpcId: aws.String("String"), + VpcPeeringConnectionId: aws.String("String"), + }, + // More values... + }, + }, + // More values... + }, + IpProtocol: aws.String("String"), + SourceSecurityGroupName: aws.String("String"), + SourceSecurityGroupOwnerId: aws.String("String"), + ToPort: aws.Int64(1), + } + resp, err := svc.AuthorizeSecurityGroupEgress(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_AuthorizeSecurityGroupIngress() { + svc := ec2.New(session.New()) + + params := &ec2.AuthorizeSecurityGroupIngressInput{ + CidrIp: aws.String("String"), + DryRun: aws.Bool(true), + FromPort: aws.Int64(1), + GroupId: aws.String("String"), + GroupName: aws.String("String"), + IpPermissions: []*ec2.IpPermission{ + { // Required + FromPort: aws.Int64(1), + IpProtocol: aws.String("String"), + IpRanges: []*ec2.IpRange{ + { // Required + CidrIp: aws.String("String"), + }, + // More values... + }, + PrefixListIds: []*ec2.PrefixListId{ + { // Required + PrefixListId: aws.String("String"), + }, + // More values... + }, + ToPort: aws.Int64(1), + UserIdGroupPairs: []*ec2.UserIdGroupPair{ + { // Required + GroupId: aws.String("String"), + GroupName: aws.String("String"), + PeeringStatus: aws.String("String"), + UserId: aws.String("String"), + VpcId: aws.String("String"), + VpcPeeringConnectionId: aws.String("String"), + }, + // More values... + }, + }, + // More values... + }, + IpProtocol: aws.String("String"), + SourceSecurityGroupName: aws.String("String"), + SourceSecurityGroupOwnerId: aws.String("String"), + ToPort: aws.Int64(1), + } + resp, err := svc.AuthorizeSecurityGroupIngress(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_BundleInstance() { + svc := ec2.New(session.New()) + + params := &ec2.BundleInstanceInput{ + InstanceId: aws.String("String"), // Required + Storage: &ec2.Storage{ // Required + S3: &ec2.S3Storage{ + AWSAccessKeyId: aws.String("String"), + Bucket: aws.String("String"), + Prefix: aws.String("String"), + UploadPolicy: []byte("PAYLOAD"), + UploadPolicySignature: aws.String("String"), + }, + }, + DryRun: aws.Bool(true), + } + resp, err := svc.BundleInstance(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_CancelBundleTask() { + svc := ec2.New(session.New()) + + params := &ec2.CancelBundleTaskInput{ + BundleId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.CancelBundleTask(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_CancelConversionTask() { + svc := ec2.New(session.New()) + + params := &ec2.CancelConversionTaskInput{ + ConversionTaskId: aws.String("String"), // Required + DryRun: aws.Bool(true), + ReasonMessage: aws.String("String"), + } + resp, err := svc.CancelConversionTask(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_CancelExportTask() { + svc := ec2.New(session.New()) + + params := &ec2.CancelExportTaskInput{ + ExportTaskId: aws.String("String"), // Required + } + resp, err := svc.CancelExportTask(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_CancelImportTask() { + svc := ec2.New(session.New()) + + params := &ec2.CancelImportTaskInput{ + CancelReason: aws.String("String"), + DryRun: aws.Bool(true), + ImportTaskId: aws.String("String"), + } + resp, err := svc.CancelImportTask(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_CancelReservedInstancesListing() { + svc := ec2.New(session.New()) + + params := &ec2.CancelReservedInstancesListingInput{ + ReservedInstancesListingId: aws.String("String"), // Required + } + resp, err := svc.CancelReservedInstancesListing(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_CancelSpotFleetRequests() { + svc := ec2.New(session.New()) + + params := &ec2.CancelSpotFleetRequestsInput{ + SpotFleetRequestIds: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + TerminateInstances: aws.Bool(true), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.CancelSpotFleetRequests(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_CancelSpotInstanceRequests() { + svc := ec2.New(session.New()) + + params := &ec2.CancelSpotInstanceRequestsInput{ + SpotInstanceRequestIds: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + DryRun: aws.Bool(true), + } + resp, err := svc.CancelSpotInstanceRequests(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_ConfirmProductInstance() { + svc := ec2.New(session.New()) + + params := &ec2.ConfirmProductInstanceInput{ + InstanceId: aws.String("String"), // Required + ProductCode: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.ConfirmProductInstance(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_CopyImage() { + svc := ec2.New(session.New()) + + params := &ec2.CopyImageInput{ + Name: aws.String("String"), // Required + SourceImageId: aws.String("String"), // Required + SourceRegion: aws.String("String"), // Required + ClientToken: aws.String("String"), + Description: aws.String("String"), + DryRun: aws.Bool(true), + Encrypted: aws.Bool(true), + KmsKeyId: aws.String("String"), + } + resp, err := svc.CopyImage(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_CopySnapshot() { + svc := ec2.New(session.New()) + + params := &ec2.CopySnapshotInput{ + SourceRegion: aws.String("String"), // Required + SourceSnapshotId: aws.String("String"), // Required + Description: aws.String("String"), + DestinationRegion: aws.String("String"), + DryRun: aws.Bool(true), + Encrypted: aws.Bool(true), + KmsKeyId: aws.String("String"), + PresignedUrl: aws.String("String"), + } + resp, err := svc.CopySnapshot(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_CreateCustomerGateway() { + svc := ec2.New(session.New()) + + params := &ec2.CreateCustomerGatewayInput{ + BgpAsn: aws.Int64(1), // Required + PublicIp: aws.String("String"), // Required + Type: aws.String("GatewayType"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.CreateCustomerGateway(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_CreateDhcpOptions() { + svc := ec2.New(session.New()) + + params := &ec2.CreateDhcpOptionsInput{ + DhcpConfigurations: []*ec2.NewDhcpConfiguration{ // Required + { // Required + Key: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + DryRun: aws.Bool(true), + } + resp, err := svc.CreateDhcpOptions(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_CreateFlowLogs() { + svc := ec2.New(session.New()) + + params := &ec2.CreateFlowLogsInput{ + DeliverLogsPermissionArn: aws.String("String"), // Required + LogGroupName: aws.String("String"), // Required + ResourceIds: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + ResourceType: aws.String("FlowLogsResourceType"), // Required + TrafficType: aws.String("TrafficType"), // Required + ClientToken: aws.String("String"), + } + resp, err := svc.CreateFlowLogs(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_CreateImage() { + svc := ec2.New(session.New()) + + params := &ec2.CreateImageInput{ + InstanceId: aws.String("String"), // Required + Name: aws.String("String"), // Required + BlockDeviceMappings: []*ec2.BlockDeviceMapping{ + { // Required + DeviceName: aws.String("String"), + Ebs: &ec2.EbsBlockDevice{ + DeleteOnTermination: aws.Bool(true), + Encrypted: aws.Bool(true), + Iops: aws.Int64(1), + SnapshotId: aws.String("String"), + VolumeSize: aws.Int64(1), + VolumeType: aws.String("VolumeType"), + }, + NoDevice: aws.String("String"), + VirtualName: aws.String("String"), + }, + // More values... + }, + Description: aws.String("String"), + DryRun: aws.Bool(true), + NoReboot: aws.Bool(true), + } + resp, err := svc.CreateImage(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_CreateInstanceExportTask() { + svc := ec2.New(session.New()) + + params := &ec2.CreateInstanceExportTaskInput{ + InstanceId: aws.String("String"), // Required + Description: aws.String("String"), + ExportToS3Task: &ec2.ExportToS3TaskSpecification{ + ContainerFormat: aws.String("ContainerFormat"), + DiskImageFormat: aws.String("DiskImageFormat"), + S3Bucket: aws.String("String"), + S3Prefix: aws.String("String"), + }, + TargetEnvironment: aws.String("ExportEnvironment"), + } + resp, err := svc.CreateInstanceExportTask(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_CreateInternetGateway() { + svc := ec2.New(session.New()) + + params := &ec2.CreateInternetGatewayInput{ + DryRun: aws.Bool(true), + } + resp, err := svc.CreateInternetGateway(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_CreateKeyPair() { + svc := ec2.New(session.New()) + + params := &ec2.CreateKeyPairInput{ + KeyName: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.CreateKeyPair(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_CreateNatGateway() { + svc := ec2.New(session.New()) + + params := &ec2.CreateNatGatewayInput{ + AllocationId: aws.String("String"), // Required + SubnetId: aws.String("String"), // Required + ClientToken: aws.String("String"), + } + resp, err := svc.CreateNatGateway(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_CreateNetworkAcl() { + svc := ec2.New(session.New()) + + params := &ec2.CreateNetworkAclInput{ + VpcId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.CreateNetworkAcl(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_CreateNetworkAclEntry() { + svc := ec2.New(session.New()) + + params := &ec2.CreateNetworkAclEntryInput{ + CidrBlock: aws.String("String"), // Required + Egress: aws.Bool(true), // Required + NetworkAclId: aws.String("String"), // Required + Protocol: aws.String("String"), // Required + RuleAction: aws.String("RuleAction"), // Required + RuleNumber: aws.Int64(1), // Required + DryRun: aws.Bool(true), + IcmpTypeCode: &ec2.IcmpTypeCode{ + Code: aws.Int64(1), + Type: aws.Int64(1), + }, + PortRange: &ec2.PortRange{ + From: aws.Int64(1), + To: aws.Int64(1), + }, + } + resp, err := svc.CreateNetworkAclEntry(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_CreateNetworkInterface() { + svc := ec2.New(session.New()) + + params := &ec2.CreateNetworkInterfaceInput{ + SubnetId: aws.String("String"), // Required + Description: aws.String("String"), + DryRun: aws.Bool(true), + Groups: []*string{ + aws.String("String"), // Required + // More values... + }, + PrivateIpAddress: aws.String("String"), + PrivateIpAddresses: []*ec2.PrivateIpAddressSpecification{ + { // Required + PrivateIpAddress: aws.String("String"), // Required + Primary: aws.Bool(true), + }, + // More values... + }, + SecondaryPrivateIpAddressCount: aws.Int64(1), + } + resp, err := svc.CreateNetworkInterface(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_CreatePlacementGroup() { + svc := ec2.New(session.New()) + + params := &ec2.CreatePlacementGroupInput{ + GroupName: aws.String("String"), // Required + Strategy: aws.String("PlacementStrategy"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.CreatePlacementGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_CreateReservedInstancesListing() { + svc := ec2.New(session.New()) + + params := &ec2.CreateReservedInstancesListingInput{ + ClientToken: aws.String("String"), // Required + InstanceCount: aws.Int64(1), // Required + PriceSchedules: []*ec2.PriceScheduleSpecification{ // Required + { // Required + CurrencyCode: aws.String("CurrencyCodeValues"), + Price: aws.Float64(1.0), + Term: aws.Int64(1), + }, + // More values... + }, + ReservedInstancesId: aws.String("String"), // Required + } + resp, err := svc.CreateReservedInstancesListing(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_CreateRoute() { + svc := ec2.New(session.New()) + + params := &ec2.CreateRouteInput{ + DestinationCidrBlock: aws.String("String"), // Required + RouteTableId: aws.String("String"), // Required + DryRun: aws.Bool(true), + GatewayId: aws.String("String"), + InstanceId: aws.String("String"), + NatGatewayId: aws.String("String"), + NetworkInterfaceId: aws.String("String"), + VpcPeeringConnectionId: aws.String("String"), + } + resp, err := svc.CreateRoute(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_CreateRouteTable() { + svc := ec2.New(session.New()) + + params := &ec2.CreateRouteTableInput{ + VpcId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.CreateRouteTable(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_CreateSecurityGroup() { + svc := ec2.New(session.New()) + + params := &ec2.CreateSecurityGroupInput{ + Description: aws.String("String"), // Required + GroupName: aws.String("String"), // Required + DryRun: aws.Bool(true), + VpcId: aws.String("String"), + } + resp, err := svc.CreateSecurityGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_CreateSnapshot() { + svc := ec2.New(session.New()) + + params := &ec2.CreateSnapshotInput{ + VolumeId: aws.String("String"), // Required + Description: aws.String("String"), + DryRun: aws.Bool(true), + } + resp, err := svc.CreateSnapshot(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_CreateSpotDatafeedSubscription() { + svc := ec2.New(session.New()) + + params := &ec2.CreateSpotDatafeedSubscriptionInput{ + Bucket: aws.String("String"), // Required + DryRun: aws.Bool(true), + Prefix: aws.String("String"), + } + resp, err := svc.CreateSpotDatafeedSubscription(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_CreateSubnet() { + svc := ec2.New(session.New()) + + params := &ec2.CreateSubnetInput{ + CidrBlock: aws.String("String"), // Required + VpcId: aws.String("String"), // Required + AvailabilityZone: aws.String("String"), + DryRun: aws.Bool(true), + } + resp, err := svc.CreateSubnet(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_CreateTags() { + svc := ec2.New(session.New()) + + params := &ec2.CreateTagsInput{ + Resources: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + Tags: []*ec2.Tag{ // Required + { // Required + Key: aws.String("String"), + Value: aws.String("String"), + }, + // More values... + }, + DryRun: aws.Bool(true), + } + resp, err := svc.CreateTags(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_CreateVolume() { + svc := ec2.New(session.New()) + + params := &ec2.CreateVolumeInput{ + AvailabilityZone: aws.String("String"), // Required + DryRun: aws.Bool(true), + Encrypted: aws.Bool(true), + Iops: aws.Int64(1), + KmsKeyId: aws.String("String"), + Size: aws.Int64(1), + SnapshotId: aws.String("String"), + VolumeType: aws.String("VolumeType"), + } + resp, err := svc.CreateVolume(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_CreateVpc() { + svc := ec2.New(session.New()) + + params := &ec2.CreateVpcInput{ + CidrBlock: aws.String("String"), // Required + DryRun: aws.Bool(true), + InstanceTenancy: aws.String("Tenancy"), + } + resp, err := svc.CreateVpc(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_CreateVpcEndpoint() { + svc := ec2.New(session.New()) + + params := &ec2.CreateVpcEndpointInput{ + ServiceName: aws.String("String"), // Required + VpcId: aws.String("String"), // Required + ClientToken: aws.String("String"), + DryRun: aws.Bool(true), + PolicyDocument: aws.String("String"), + RouteTableIds: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.CreateVpcEndpoint(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_CreateVpcPeeringConnection() { + svc := ec2.New(session.New()) + + params := &ec2.CreateVpcPeeringConnectionInput{ + DryRun: aws.Bool(true), + PeerOwnerId: aws.String("String"), + PeerVpcId: aws.String("String"), + VpcId: aws.String("String"), + } + resp, err := svc.CreateVpcPeeringConnection(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_CreateVpnConnection() { + svc := ec2.New(session.New()) + + params := &ec2.CreateVpnConnectionInput{ + CustomerGatewayId: aws.String("String"), // Required + Type: aws.String("String"), // Required + VpnGatewayId: aws.String("String"), // Required + DryRun: aws.Bool(true), + Options: &ec2.VpnConnectionOptionsSpecification{ + StaticRoutesOnly: aws.Bool(true), + }, + } + resp, err := svc.CreateVpnConnection(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_CreateVpnConnectionRoute() { + svc := ec2.New(session.New()) + + params := &ec2.CreateVpnConnectionRouteInput{ + DestinationCidrBlock: aws.String("String"), // Required + VpnConnectionId: aws.String("String"), // Required + } + resp, err := svc.CreateVpnConnectionRoute(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_CreateVpnGateway() { + svc := ec2.New(session.New()) + + params := &ec2.CreateVpnGatewayInput{ + Type: aws.String("GatewayType"), // Required + AvailabilityZone: aws.String("String"), + DryRun: aws.Bool(true), + } + resp, err := svc.CreateVpnGateway(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DeleteCustomerGateway() { + svc := ec2.New(session.New()) + + params := &ec2.DeleteCustomerGatewayInput{ + CustomerGatewayId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.DeleteCustomerGateway(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DeleteDhcpOptions() { + svc := ec2.New(session.New()) + + params := &ec2.DeleteDhcpOptionsInput{ + DhcpOptionsId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.DeleteDhcpOptions(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DeleteFlowLogs() { + svc := ec2.New(session.New()) + + params := &ec2.DeleteFlowLogsInput{ + FlowLogIds: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DeleteFlowLogs(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DeleteInternetGateway() { + svc := ec2.New(session.New()) + + params := &ec2.DeleteInternetGatewayInput{ + InternetGatewayId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.DeleteInternetGateway(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DeleteKeyPair() { + svc := ec2.New(session.New()) + + params := &ec2.DeleteKeyPairInput{ + KeyName: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.DeleteKeyPair(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DeleteNatGateway() { + svc := ec2.New(session.New()) + + params := &ec2.DeleteNatGatewayInput{ + NatGatewayId: aws.String("String"), // Required + } + resp, err := svc.DeleteNatGateway(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DeleteNetworkAcl() { + svc := ec2.New(session.New()) + + params := &ec2.DeleteNetworkAclInput{ + NetworkAclId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.DeleteNetworkAcl(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DeleteNetworkAclEntry() { + svc := ec2.New(session.New()) + + params := &ec2.DeleteNetworkAclEntryInput{ + Egress: aws.Bool(true), // Required + NetworkAclId: aws.String("String"), // Required + RuleNumber: aws.Int64(1), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.DeleteNetworkAclEntry(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DeleteNetworkInterface() { + svc := ec2.New(session.New()) + + params := &ec2.DeleteNetworkInterfaceInput{ + NetworkInterfaceId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.DeleteNetworkInterface(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DeletePlacementGroup() { + svc := ec2.New(session.New()) + + params := &ec2.DeletePlacementGroupInput{ + GroupName: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.DeletePlacementGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DeleteRoute() { + svc := ec2.New(session.New()) + + params := &ec2.DeleteRouteInput{ + DestinationCidrBlock: aws.String("String"), // Required + RouteTableId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.DeleteRoute(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DeleteRouteTable() { + svc := ec2.New(session.New()) + + params := &ec2.DeleteRouteTableInput{ + RouteTableId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.DeleteRouteTable(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DeleteSecurityGroup() { + svc := ec2.New(session.New()) + + params := &ec2.DeleteSecurityGroupInput{ + DryRun: aws.Bool(true), + GroupId: aws.String("String"), + GroupName: aws.String("String"), + } + resp, err := svc.DeleteSecurityGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DeleteSnapshot() { + svc := ec2.New(session.New()) + + params := &ec2.DeleteSnapshotInput{ + SnapshotId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.DeleteSnapshot(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DeleteSpotDatafeedSubscription() { + svc := ec2.New(session.New()) + + params := &ec2.DeleteSpotDatafeedSubscriptionInput{ + DryRun: aws.Bool(true), + } + resp, err := svc.DeleteSpotDatafeedSubscription(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DeleteSubnet() { + svc := ec2.New(session.New()) + + params := &ec2.DeleteSubnetInput{ + SubnetId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.DeleteSubnet(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DeleteTags() { + svc := ec2.New(session.New()) + + params := &ec2.DeleteTagsInput{ + Resources: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + DryRun: aws.Bool(true), + Tags: []*ec2.Tag{ + { // Required + Key: aws.String("String"), + Value: aws.String("String"), + }, + // More values... + }, + } + resp, err := svc.DeleteTags(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DeleteVolume() { + svc := ec2.New(session.New()) + + params := &ec2.DeleteVolumeInput{ + VolumeId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.DeleteVolume(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DeleteVpc() { + svc := ec2.New(session.New()) + + params := &ec2.DeleteVpcInput{ + VpcId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.DeleteVpc(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DeleteVpcEndpoints() { + svc := ec2.New(session.New()) + + params := &ec2.DeleteVpcEndpointsInput{ + VpcEndpointIds: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + DryRun: aws.Bool(true), + } + resp, err := svc.DeleteVpcEndpoints(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DeleteVpcPeeringConnection() { + svc := ec2.New(session.New()) + + params := &ec2.DeleteVpcPeeringConnectionInput{ + VpcPeeringConnectionId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.DeleteVpcPeeringConnection(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DeleteVpnConnection() { + svc := ec2.New(session.New()) + + params := &ec2.DeleteVpnConnectionInput{ + VpnConnectionId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.DeleteVpnConnection(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DeleteVpnConnectionRoute() { + svc := ec2.New(session.New()) + + params := &ec2.DeleteVpnConnectionRouteInput{ + DestinationCidrBlock: aws.String("String"), // Required + VpnConnectionId: aws.String("String"), // Required + } + resp, err := svc.DeleteVpnConnectionRoute(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DeleteVpnGateway() { + svc := ec2.New(session.New()) + + params := &ec2.DeleteVpnGatewayInput{ + VpnGatewayId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.DeleteVpnGateway(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DeregisterImage() { + svc := ec2.New(session.New()) + + params := &ec2.DeregisterImageInput{ + ImageId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.DeregisterImage(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeAccountAttributes() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeAccountAttributesInput{ + AttributeNames: []*string{ + aws.String("AccountAttributeName"), // Required + // More values... + }, + DryRun: aws.Bool(true), + } + resp, err := svc.DescribeAccountAttributes(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeAddresses() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeAddressesInput{ + AllocationIds: []*string{ + aws.String("String"), // Required + // More values... + }, + DryRun: aws.Bool(true), + Filters: []*ec2.Filter{ + { // Required + Name: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + PublicIps: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DescribeAddresses(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeAvailabilityZones() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeAvailabilityZonesInput{ + DryRun: aws.Bool(true), + Filters: []*ec2.Filter{ + { // Required + Name: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + ZoneNames: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DescribeAvailabilityZones(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeBundleTasks() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeBundleTasksInput{ + BundleIds: []*string{ + aws.String("String"), // Required + // More values... + }, + DryRun: aws.Bool(true), + Filters: []*ec2.Filter{ + { // Required + Name: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + } + resp, err := svc.DescribeBundleTasks(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeClassicLinkInstances() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeClassicLinkInstancesInput{ + DryRun: aws.Bool(true), + Filters: []*ec2.Filter{ + { // Required + Name: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + InstanceIds: []*string{ + aws.String("String"), // Required + // More values... + }, + MaxResults: aws.Int64(1), + NextToken: aws.String("String"), + } + resp, err := svc.DescribeClassicLinkInstances(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeConversionTasks() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeConversionTasksInput{ + ConversionTaskIds: []*string{ + aws.String("String"), // Required + // More values... + }, + DryRun: aws.Bool(true), + Filters: []*ec2.Filter{ + { // Required + Name: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + } + resp, err := svc.DescribeConversionTasks(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeCustomerGateways() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeCustomerGatewaysInput{ + CustomerGatewayIds: []*string{ + aws.String("String"), // Required + // More values... + }, + DryRun: aws.Bool(true), + Filters: []*ec2.Filter{ + { // Required + Name: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + } + resp, err := svc.DescribeCustomerGateways(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeDhcpOptions() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeDhcpOptionsInput{ + DhcpOptionsIds: []*string{ + aws.String("String"), // Required + // More values... + }, + DryRun: aws.Bool(true), + Filters: []*ec2.Filter{ + { // Required + Name: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + } + resp, err := svc.DescribeDhcpOptions(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeExportTasks() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeExportTasksInput{ + ExportTaskIds: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DescribeExportTasks(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeFlowLogs() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeFlowLogsInput{ + Filter: []*ec2.Filter{ + { // Required + Name: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + FlowLogIds: []*string{ + aws.String("String"), // Required + // More values... + }, + MaxResults: aws.Int64(1), + NextToken: aws.String("String"), + } + resp, err := svc.DescribeFlowLogs(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeHosts() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeHostsInput{ + Filter: []*ec2.Filter{ + { // Required + Name: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + HostIds: []*string{ + aws.String("String"), // Required + // More values... + }, + MaxResults: aws.Int64(1), + NextToken: aws.String("String"), + } + resp, err := svc.DescribeHosts(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeIdFormat() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeIdFormatInput{ + Resource: aws.String("String"), + } + resp, err := svc.DescribeIdFormat(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeIdentityIdFormat() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeIdentityIdFormatInput{ + PrincipalArn: aws.String("String"), // Required + Resource: aws.String("String"), + } + resp, err := svc.DescribeIdentityIdFormat(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeImageAttribute() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeImageAttributeInput{ + Attribute: aws.String("ImageAttributeName"), // Required + ImageId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.DescribeImageAttribute(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeImages() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeImagesInput{ + DryRun: aws.Bool(true), + ExecutableUsers: []*string{ + aws.String("String"), // Required + // More values... + }, + Filters: []*ec2.Filter{ + { // Required + Name: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + ImageIds: []*string{ + aws.String("String"), // Required + // More values... + }, + Owners: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DescribeImages(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeImportImageTasks() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeImportImageTasksInput{ + DryRun: aws.Bool(true), + Filters: []*ec2.Filter{ + { // Required + Name: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + ImportTaskIds: []*string{ + aws.String("String"), // Required + // More values... + }, + MaxResults: aws.Int64(1), + NextToken: aws.String("String"), + } + resp, err := svc.DescribeImportImageTasks(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeImportSnapshotTasks() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeImportSnapshotTasksInput{ + DryRun: aws.Bool(true), + Filters: []*ec2.Filter{ + { // Required + Name: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + ImportTaskIds: []*string{ + aws.String("String"), // Required + // More values... + }, + MaxResults: aws.Int64(1), + NextToken: aws.String("String"), + } + resp, err := svc.DescribeImportSnapshotTasks(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeInstanceAttribute() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeInstanceAttributeInput{ + Attribute: aws.String("InstanceAttributeName"), // Required + InstanceId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.DescribeInstanceAttribute(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeInstanceStatus() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeInstanceStatusInput{ + DryRun: aws.Bool(true), + Filters: []*ec2.Filter{ + { // Required + Name: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + IncludeAllInstances: aws.Bool(true), + InstanceIds: []*string{ + aws.String("String"), // Required + // More values... + }, + MaxResults: aws.Int64(1), + NextToken: aws.String("String"), + } + resp, err := svc.DescribeInstanceStatus(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeInstances() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeInstancesInput{ + DryRun: aws.Bool(true), + Filters: []*ec2.Filter{ + { // Required + Name: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + InstanceIds: []*string{ + aws.String("String"), // Required + // More values... + }, + MaxResults: aws.Int64(1), + NextToken: aws.String("String"), + } + resp, err := svc.DescribeInstances(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeInternetGateways() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeInternetGatewaysInput{ + DryRun: aws.Bool(true), + Filters: []*ec2.Filter{ + { // Required + Name: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + InternetGatewayIds: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DescribeInternetGateways(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeKeyPairs() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeKeyPairsInput{ + DryRun: aws.Bool(true), + Filters: []*ec2.Filter{ + { // Required + Name: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + KeyNames: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DescribeKeyPairs(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeMovingAddresses() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeMovingAddressesInput{ + DryRun: aws.Bool(true), + Filters: []*ec2.Filter{ + { // Required + Name: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + MaxResults: aws.Int64(1), + NextToken: aws.String("String"), + PublicIps: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DescribeMovingAddresses(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeNatGateways() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeNatGatewaysInput{ + Filter: []*ec2.Filter{ + { // Required + Name: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + MaxResults: aws.Int64(1), + NatGatewayIds: []*string{ + aws.String("String"), // Required + // More values... + }, + NextToken: aws.String("String"), + } + resp, err := svc.DescribeNatGateways(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeNetworkAcls() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeNetworkAclsInput{ + DryRun: aws.Bool(true), + Filters: []*ec2.Filter{ + { // Required + Name: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + NetworkAclIds: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DescribeNetworkAcls(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeNetworkInterfaceAttribute() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeNetworkInterfaceAttributeInput{ + NetworkInterfaceId: aws.String("String"), // Required + Attribute: aws.String("NetworkInterfaceAttribute"), + DryRun: aws.Bool(true), + } + resp, err := svc.DescribeNetworkInterfaceAttribute(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeNetworkInterfaces() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeNetworkInterfacesInput{ + DryRun: aws.Bool(true), + Filters: []*ec2.Filter{ + { // Required + Name: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + NetworkInterfaceIds: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DescribeNetworkInterfaces(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribePlacementGroups() { + svc := ec2.New(session.New()) + + params := &ec2.DescribePlacementGroupsInput{ + DryRun: aws.Bool(true), + Filters: []*ec2.Filter{ + { // Required + Name: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + GroupNames: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DescribePlacementGroups(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribePrefixLists() { + svc := ec2.New(session.New()) + + params := &ec2.DescribePrefixListsInput{ + DryRun: aws.Bool(true), + Filters: []*ec2.Filter{ + { // Required + Name: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + MaxResults: aws.Int64(1), + NextToken: aws.String("String"), + PrefixListIds: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DescribePrefixLists(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeRegions() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeRegionsInput{ + DryRun: aws.Bool(true), + Filters: []*ec2.Filter{ + { // Required + Name: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + RegionNames: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DescribeRegions(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeReservedInstances() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeReservedInstancesInput{ + DryRun: aws.Bool(true), + Filters: []*ec2.Filter{ + { // Required + Name: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + OfferingType: aws.String("OfferingTypeValues"), + ReservedInstancesIds: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DescribeReservedInstances(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeReservedInstancesListings() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeReservedInstancesListingsInput{ + Filters: []*ec2.Filter{ + { // Required + Name: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + ReservedInstancesId: aws.String("String"), + ReservedInstancesListingId: aws.String("String"), + } + resp, err := svc.DescribeReservedInstancesListings(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeReservedInstancesModifications() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeReservedInstancesModificationsInput{ + Filters: []*ec2.Filter{ + { // Required + Name: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + NextToken: aws.String("String"), + ReservedInstancesModificationIds: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DescribeReservedInstancesModifications(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeReservedInstancesOfferings() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeReservedInstancesOfferingsInput{ + AvailabilityZone: aws.String("String"), + DryRun: aws.Bool(true), + Filters: []*ec2.Filter{ + { // Required + Name: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + IncludeMarketplace: aws.Bool(true), + InstanceTenancy: aws.String("Tenancy"), + InstanceType: aws.String("InstanceType"), + MaxDuration: aws.Int64(1), + MaxInstanceCount: aws.Int64(1), + MaxResults: aws.Int64(1), + MinDuration: aws.Int64(1), + NextToken: aws.String("String"), + OfferingType: aws.String("OfferingTypeValues"), + ProductDescription: aws.String("RIProductDescription"), + ReservedInstancesOfferingIds: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DescribeReservedInstancesOfferings(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeRouteTables() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeRouteTablesInput{ + DryRun: aws.Bool(true), + Filters: []*ec2.Filter{ + { // Required + Name: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + RouteTableIds: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DescribeRouteTables(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeScheduledInstanceAvailability() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeScheduledInstanceAvailabilityInput{ + FirstSlotStartTimeRange: &ec2.SlotDateTimeRangeRequest{ // Required + EarliestTime: aws.Time(time.Now()), // Required + LatestTime: aws.Time(time.Now()), // Required + }, + Recurrence: &ec2.ScheduledInstanceRecurrenceRequest{ // Required + Frequency: aws.String("String"), + Interval: aws.Int64(1), + OccurrenceDays: []*int64{ + aws.Int64(1), // Required + // More values... + }, + OccurrenceRelativeToEnd: aws.Bool(true), + OccurrenceUnit: aws.String("String"), + }, + DryRun: aws.Bool(true), + Filters: []*ec2.Filter{ + { // Required + Name: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + MaxResults: aws.Int64(1), + MaxSlotDurationInHours: aws.Int64(1), + MinSlotDurationInHours: aws.Int64(1), + NextToken: aws.String("String"), + } + resp, err := svc.DescribeScheduledInstanceAvailability(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeScheduledInstances() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeScheduledInstancesInput{ + DryRun: aws.Bool(true), + Filters: []*ec2.Filter{ + { // Required + Name: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + MaxResults: aws.Int64(1), + NextToken: aws.String("String"), + ScheduledInstanceIds: []*string{ + aws.String("String"), // Required + // More values... + }, + SlotStartTimeRange: &ec2.SlotStartTimeRangeRequest{ + EarliestTime: aws.Time(time.Now()), + LatestTime: aws.Time(time.Now()), + }, + } + resp, err := svc.DescribeScheduledInstances(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeSecurityGroupReferences() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeSecurityGroupReferencesInput{ + GroupId: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + DryRun: aws.Bool(true), + } + resp, err := svc.DescribeSecurityGroupReferences(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeSecurityGroups() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeSecurityGroupsInput{ + DryRun: aws.Bool(true), + Filters: []*ec2.Filter{ + { // Required + Name: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + GroupIds: []*string{ + aws.String("String"), // Required + // More values... + }, + GroupNames: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DescribeSecurityGroups(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeSnapshotAttribute() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeSnapshotAttributeInput{ + Attribute: aws.String("SnapshotAttributeName"), // Required + SnapshotId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.DescribeSnapshotAttribute(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeSnapshots() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeSnapshotsInput{ + DryRun: aws.Bool(true), + Filters: []*ec2.Filter{ + { // Required + Name: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + MaxResults: aws.Int64(1), + NextToken: aws.String("String"), + OwnerIds: []*string{ + aws.String("String"), // Required + // More values... + }, + RestorableByUserIds: []*string{ + aws.String("String"), // Required + // More values... + }, + SnapshotIds: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DescribeSnapshots(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeSpotDatafeedSubscription() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeSpotDatafeedSubscriptionInput{ + DryRun: aws.Bool(true), + } + resp, err := svc.DescribeSpotDatafeedSubscription(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeSpotFleetInstances() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeSpotFleetInstancesInput{ + SpotFleetRequestId: aws.String("String"), // Required + DryRun: aws.Bool(true), + MaxResults: aws.Int64(1), + NextToken: aws.String("String"), + } + resp, err := svc.DescribeSpotFleetInstances(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeSpotFleetRequestHistory() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeSpotFleetRequestHistoryInput{ + SpotFleetRequestId: aws.String("String"), // Required + StartTime: aws.Time(time.Now()), // Required + DryRun: aws.Bool(true), + EventType: aws.String("EventType"), + MaxResults: aws.Int64(1), + NextToken: aws.String("String"), + } + resp, err := svc.DescribeSpotFleetRequestHistory(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeSpotFleetRequests() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeSpotFleetRequestsInput{ + DryRun: aws.Bool(true), + MaxResults: aws.Int64(1), + NextToken: aws.String("String"), + SpotFleetRequestIds: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DescribeSpotFleetRequests(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeSpotInstanceRequests() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeSpotInstanceRequestsInput{ + DryRun: aws.Bool(true), + Filters: []*ec2.Filter{ + { // Required + Name: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + SpotInstanceRequestIds: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DescribeSpotInstanceRequests(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeSpotPriceHistory() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeSpotPriceHistoryInput{ + AvailabilityZone: aws.String("String"), + DryRun: aws.Bool(true), + EndTime: aws.Time(time.Now()), + Filters: []*ec2.Filter{ + { // Required + Name: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + InstanceTypes: []*string{ + aws.String("InstanceType"), // Required + // More values... + }, + MaxResults: aws.Int64(1), + NextToken: aws.String("String"), + ProductDescriptions: []*string{ + aws.String("String"), // Required + // More values... + }, + StartTime: aws.Time(time.Now()), + } + resp, err := svc.DescribeSpotPriceHistory(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeStaleSecurityGroups() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeStaleSecurityGroupsInput{ + VpcId: aws.String("String"), // Required + DryRun: aws.Bool(true), + MaxResults: aws.Int64(1), + NextToken: aws.String("NextToken"), + } + resp, err := svc.DescribeStaleSecurityGroups(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeSubnets() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeSubnetsInput{ + DryRun: aws.Bool(true), + Filters: []*ec2.Filter{ + { // Required + Name: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + SubnetIds: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DescribeSubnets(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeTags() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeTagsInput{ + DryRun: aws.Bool(true), + Filters: []*ec2.Filter{ + { // Required + Name: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + MaxResults: aws.Int64(1), + NextToken: aws.String("String"), + } + resp, err := svc.DescribeTags(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeVolumeAttribute() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeVolumeAttributeInput{ + VolumeId: aws.String("String"), // Required + Attribute: aws.String("VolumeAttributeName"), + DryRun: aws.Bool(true), + } + resp, err := svc.DescribeVolumeAttribute(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeVolumeStatus() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeVolumeStatusInput{ + DryRun: aws.Bool(true), + Filters: []*ec2.Filter{ + { // Required + Name: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + MaxResults: aws.Int64(1), + NextToken: aws.String("String"), + VolumeIds: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DescribeVolumeStatus(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeVolumes() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeVolumesInput{ + DryRun: aws.Bool(true), + Filters: []*ec2.Filter{ + { // Required + Name: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + MaxResults: aws.Int64(1), + NextToken: aws.String("String"), + VolumeIds: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DescribeVolumes(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeVpcAttribute() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeVpcAttributeInput{ + Attribute: aws.String("VpcAttributeName"), // Required + VpcId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.DescribeVpcAttribute(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeVpcClassicLink() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeVpcClassicLinkInput{ + DryRun: aws.Bool(true), + Filters: []*ec2.Filter{ + { // Required + Name: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + VpcIds: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DescribeVpcClassicLink(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeVpcClassicLinkDnsSupport() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeVpcClassicLinkDnsSupportInput{ + MaxResults: aws.Int64(1), + NextToken: aws.String("NextToken"), + VpcIds: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DescribeVpcClassicLinkDnsSupport(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeVpcEndpointServices() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeVpcEndpointServicesInput{ + DryRun: aws.Bool(true), + MaxResults: aws.Int64(1), + NextToken: aws.String("String"), + } + resp, err := svc.DescribeVpcEndpointServices(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeVpcEndpoints() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeVpcEndpointsInput{ + DryRun: aws.Bool(true), + Filters: []*ec2.Filter{ + { // Required + Name: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + MaxResults: aws.Int64(1), + NextToken: aws.String("String"), + VpcEndpointIds: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DescribeVpcEndpoints(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeVpcPeeringConnections() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeVpcPeeringConnectionsInput{ + DryRun: aws.Bool(true), + Filters: []*ec2.Filter{ + { // Required + Name: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + VpcPeeringConnectionIds: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DescribeVpcPeeringConnections(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeVpcs() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeVpcsInput{ + DryRun: aws.Bool(true), + Filters: []*ec2.Filter{ + { // Required + Name: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + VpcIds: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DescribeVpcs(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeVpnConnections() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeVpnConnectionsInput{ + DryRun: aws.Bool(true), + Filters: []*ec2.Filter{ + { // Required + Name: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + VpnConnectionIds: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DescribeVpnConnections(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeVpnGateways() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeVpnGatewaysInput{ + DryRun: aws.Bool(true), + Filters: []*ec2.Filter{ + { // Required + Name: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + VpnGatewayIds: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DescribeVpnGateways(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DetachClassicLinkVpc() { + svc := ec2.New(session.New()) + + params := &ec2.DetachClassicLinkVpcInput{ + InstanceId: aws.String("String"), // Required + VpcId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.DetachClassicLinkVpc(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DetachInternetGateway() { + svc := ec2.New(session.New()) + + params := &ec2.DetachInternetGatewayInput{ + InternetGatewayId: aws.String("String"), // Required + VpcId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.DetachInternetGateway(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DetachNetworkInterface() { + svc := ec2.New(session.New()) + + params := &ec2.DetachNetworkInterfaceInput{ + AttachmentId: aws.String("String"), // Required + DryRun: aws.Bool(true), + Force: aws.Bool(true), + } + resp, err := svc.DetachNetworkInterface(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DetachVolume() { + svc := ec2.New(session.New()) + + params := &ec2.DetachVolumeInput{ + VolumeId: aws.String("String"), // Required + Device: aws.String("String"), + DryRun: aws.Bool(true), + Force: aws.Bool(true), + InstanceId: aws.String("String"), + } + resp, err := svc.DetachVolume(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DetachVpnGateway() { + svc := ec2.New(session.New()) + + params := &ec2.DetachVpnGatewayInput{ + VpcId: aws.String("String"), // Required + VpnGatewayId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.DetachVpnGateway(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DisableVgwRoutePropagation() { + svc := ec2.New(session.New()) + + params := &ec2.DisableVgwRoutePropagationInput{ + GatewayId: aws.String("String"), // Required + RouteTableId: aws.String("String"), // Required + } + resp, err := svc.DisableVgwRoutePropagation(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DisableVpcClassicLink() { + svc := ec2.New(session.New()) + + params := &ec2.DisableVpcClassicLinkInput{ + VpcId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.DisableVpcClassicLink(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DisableVpcClassicLinkDnsSupport() { + svc := ec2.New(session.New()) + + params := &ec2.DisableVpcClassicLinkDnsSupportInput{ + VpcId: aws.String("String"), + } + resp, err := svc.DisableVpcClassicLinkDnsSupport(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DisassociateAddress() { + svc := ec2.New(session.New()) + + params := &ec2.DisassociateAddressInput{ + AssociationId: aws.String("String"), + DryRun: aws.Bool(true), + PublicIp: aws.String("String"), + } + resp, err := svc.DisassociateAddress(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DisassociateRouteTable() { + svc := ec2.New(session.New()) + + params := &ec2.DisassociateRouteTableInput{ + AssociationId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.DisassociateRouteTable(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_EnableVgwRoutePropagation() { + svc := ec2.New(session.New()) + + params := &ec2.EnableVgwRoutePropagationInput{ + GatewayId: aws.String("String"), // Required + RouteTableId: aws.String("String"), // Required + } + resp, err := svc.EnableVgwRoutePropagation(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_EnableVolumeIO() { + svc := ec2.New(session.New()) + + params := &ec2.EnableVolumeIOInput{ + VolumeId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.EnableVolumeIO(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_EnableVpcClassicLink() { + svc := ec2.New(session.New()) + + params := &ec2.EnableVpcClassicLinkInput{ + VpcId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.EnableVpcClassicLink(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_EnableVpcClassicLinkDnsSupport() { + svc := ec2.New(session.New()) + + params := &ec2.EnableVpcClassicLinkDnsSupportInput{ + VpcId: aws.String("String"), + } + resp, err := svc.EnableVpcClassicLinkDnsSupport(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_GetConsoleOutput() { + svc := ec2.New(session.New()) + + params := &ec2.GetConsoleOutputInput{ + InstanceId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.GetConsoleOutput(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_GetConsoleScreenshot() { + svc := ec2.New(session.New()) + + params := &ec2.GetConsoleScreenshotInput{ + InstanceId: aws.String("String"), // Required + DryRun: aws.Bool(true), + WakeUp: aws.Bool(true), + } + resp, err := svc.GetConsoleScreenshot(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_GetPasswordData() { + svc := ec2.New(session.New()) + + params := &ec2.GetPasswordDataInput{ + InstanceId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.GetPasswordData(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_ImportImage() { + svc := ec2.New(session.New()) + + params := &ec2.ImportImageInput{ + Architecture: aws.String("String"), + ClientData: &ec2.ClientData{ + Comment: aws.String("String"), + UploadEnd: aws.Time(time.Now()), + UploadSize: aws.Float64(1.0), + UploadStart: aws.Time(time.Now()), + }, + ClientToken: aws.String("String"), + Description: aws.String("String"), + DiskContainers: []*ec2.ImageDiskContainer{ + { // Required + Description: aws.String("String"), + DeviceName: aws.String("String"), + Format: aws.String("String"), + SnapshotId: aws.String("String"), + Url: aws.String("String"), + UserBucket: &ec2.UserBucket{ + S3Bucket: aws.String("String"), + S3Key: aws.String("String"), + }, + }, + // More values... + }, + DryRun: aws.Bool(true), + Hypervisor: aws.String("String"), + LicenseType: aws.String("String"), + Platform: aws.String("String"), + RoleName: aws.String("String"), + } + resp, err := svc.ImportImage(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_ImportInstance() { + svc := ec2.New(session.New()) + + params := &ec2.ImportInstanceInput{ + Platform: aws.String("PlatformValues"), // Required + Description: aws.String("String"), + DiskImages: []*ec2.DiskImage{ + { // Required + Description: aws.String("String"), + Image: &ec2.DiskImageDetail{ + Bytes: aws.Int64(1), // Required + Format: aws.String("DiskImageFormat"), // Required + ImportManifestUrl: aws.String("String"), // Required + }, + Volume: &ec2.VolumeDetail{ + Size: aws.Int64(1), // Required + }, + }, + // More values... + }, + DryRun: aws.Bool(true), + LaunchSpecification: &ec2.ImportInstanceLaunchSpecification{ + AdditionalInfo: aws.String("String"), + Architecture: aws.String("ArchitectureValues"), + GroupIds: []*string{ + aws.String("String"), // Required + // More values... + }, + GroupNames: []*string{ + aws.String("String"), // Required + // More values... + }, + InstanceInitiatedShutdownBehavior: aws.String("ShutdownBehavior"), + InstanceType: aws.String("InstanceType"), + Monitoring: aws.Bool(true), + Placement: &ec2.Placement{ + Affinity: aws.String("String"), + AvailabilityZone: aws.String("String"), + GroupName: aws.String("String"), + HostId: aws.String("String"), + Tenancy: aws.String("Tenancy"), + }, + PrivateIpAddress: aws.String("String"), + SubnetId: aws.String("String"), + UserData: &ec2.UserData{ + Data: aws.String("String"), + }, + }, + } + resp, err := svc.ImportInstance(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_ImportKeyPair() { + svc := ec2.New(session.New()) + + params := &ec2.ImportKeyPairInput{ + KeyName: aws.String("String"), // Required + PublicKeyMaterial: []byte("PAYLOAD"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.ImportKeyPair(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_ImportSnapshot() { + svc := ec2.New(session.New()) + + params := &ec2.ImportSnapshotInput{ + ClientData: &ec2.ClientData{ + Comment: aws.String("String"), + UploadEnd: aws.Time(time.Now()), + UploadSize: aws.Float64(1.0), + UploadStart: aws.Time(time.Now()), + }, + ClientToken: aws.String("String"), + Description: aws.String("String"), + DiskContainer: &ec2.SnapshotDiskContainer{ + Description: aws.String("String"), + Format: aws.String("String"), + Url: aws.String("String"), + UserBucket: &ec2.UserBucket{ + S3Bucket: aws.String("String"), + S3Key: aws.String("String"), + }, + }, + DryRun: aws.Bool(true), + RoleName: aws.String("String"), + } + resp, err := svc.ImportSnapshot(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_ImportVolume() { + svc := ec2.New(session.New()) + + params := &ec2.ImportVolumeInput{ + AvailabilityZone: aws.String("String"), // Required + Image: &ec2.DiskImageDetail{ // Required + Bytes: aws.Int64(1), // Required + Format: aws.String("DiskImageFormat"), // Required + ImportManifestUrl: aws.String("String"), // Required + }, + Volume: &ec2.VolumeDetail{ // Required + Size: aws.Int64(1), // Required + }, + Description: aws.String("String"), + DryRun: aws.Bool(true), + } + resp, err := svc.ImportVolume(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_ModifyHosts() { + svc := ec2.New(session.New()) + + params := &ec2.ModifyHostsInput{ + AutoPlacement: aws.String("AutoPlacement"), // Required + HostIds: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.ModifyHosts(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_ModifyIdFormat() { + svc := ec2.New(session.New()) + + params := &ec2.ModifyIdFormatInput{ + Resource: aws.String("String"), // Required + UseLongIds: aws.Bool(true), // Required + } + resp, err := svc.ModifyIdFormat(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_ModifyIdentityIdFormat() { + svc := ec2.New(session.New()) + + params := &ec2.ModifyIdentityIdFormatInput{ + PrincipalArn: aws.String("String"), // Required + Resource: aws.String("String"), // Required + UseLongIds: aws.Bool(true), // Required + } + resp, err := svc.ModifyIdentityIdFormat(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_ModifyImageAttribute() { + svc := ec2.New(session.New()) + + params := &ec2.ModifyImageAttributeInput{ + ImageId: aws.String("String"), // Required + Attribute: aws.String("String"), + Description: &ec2.AttributeValue{ + Value: aws.String("String"), + }, + DryRun: aws.Bool(true), + LaunchPermission: &ec2.LaunchPermissionModifications{ + Add: []*ec2.LaunchPermission{ + { // Required + Group: aws.String("PermissionGroup"), + UserId: aws.String("String"), + }, + // More values... + }, + Remove: []*ec2.LaunchPermission{ + { // Required + Group: aws.String("PermissionGroup"), + UserId: aws.String("String"), + }, + // More values... + }, + }, + OperationType: aws.String("OperationType"), + ProductCodes: []*string{ + aws.String("String"), // Required + // More values... + }, + UserGroups: []*string{ + aws.String("String"), // Required + // More values... + }, + UserIds: []*string{ + aws.String("String"), // Required + // More values... + }, + Value: aws.String("String"), + } + resp, err := svc.ModifyImageAttribute(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_ModifyInstanceAttribute() { + svc := ec2.New(session.New()) + + params := &ec2.ModifyInstanceAttributeInput{ + InstanceId: aws.String("String"), // Required + Attribute: aws.String("InstanceAttributeName"), + BlockDeviceMappings: []*ec2.InstanceBlockDeviceMappingSpecification{ + { // Required + DeviceName: aws.String("String"), + Ebs: &ec2.EbsInstanceBlockDeviceSpecification{ + DeleteOnTermination: aws.Bool(true), + VolumeId: aws.String("String"), + }, + NoDevice: aws.String("String"), + VirtualName: aws.String("String"), + }, + // More values... + }, + DisableApiTermination: &ec2.AttributeBooleanValue{ + Value: aws.Bool(true), + }, + DryRun: aws.Bool(true), + EbsOptimized: &ec2.AttributeBooleanValue{ + Value: aws.Bool(true), + }, + EnaSupport: &ec2.AttributeBooleanValue{ + Value: aws.Bool(true), + }, + Groups: []*string{ + aws.String("String"), // Required + // More values... + }, + InstanceInitiatedShutdownBehavior: &ec2.AttributeValue{ + Value: aws.String("String"), + }, + InstanceType: &ec2.AttributeValue{ + Value: aws.String("String"), + }, + Kernel: &ec2.AttributeValue{ + Value: aws.String("String"), + }, + Ramdisk: &ec2.AttributeValue{ + Value: aws.String("String"), + }, + SourceDestCheck: &ec2.AttributeBooleanValue{ + Value: aws.Bool(true), + }, + SriovNetSupport: &ec2.AttributeValue{ + Value: aws.String("String"), + }, + UserData: &ec2.BlobAttributeValue{ + Value: []byte("PAYLOAD"), + }, + Value: aws.String("String"), + } + resp, err := svc.ModifyInstanceAttribute(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_ModifyInstancePlacement() { + svc := ec2.New(session.New()) + + params := &ec2.ModifyInstancePlacementInput{ + InstanceId: aws.String("String"), // Required + Affinity: aws.String("Affinity"), + HostId: aws.String("String"), + Tenancy: aws.String("HostTenancy"), + } + resp, err := svc.ModifyInstancePlacement(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_ModifyNetworkInterfaceAttribute() { + svc := ec2.New(session.New()) + + params := &ec2.ModifyNetworkInterfaceAttributeInput{ + NetworkInterfaceId: aws.String("String"), // Required + Attachment: &ec2.NetworkInterfaceAttachmentChanges{ + AttachmentId: aws.String("String"), + DeleteOnTermination: aws.Bool(true), + }, + Description: &ec2.AttributeValue{ + Value: aws.String("String"), + }, + DryRun: aws.Bool(true), + Groups: []*string{ + aws.String("String"), // Required + // More values... + }, + SourceDestCheck: &ec2.AttributeBooleanValue{ + Value: aws.Bool(true), + }, + } + resp, err := svc.ModifyNetworkInterfaceAttribute(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_ModifyReservedInstances() { + svc := ec2.New(session.New()) + + params := &ec2.ModifyReservedInstancesInput{ + ReservedInstancesIds: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + TargetConfigurations: []*ec2.ReservedInstancesConfiguration{ // Required + { // Required + AvailabilityZone: aws.String("String"), + InstanceCount: aws.Int64(1), + InstanceType: aws.String("InstanceType"), + Platform: aws.String("String"), + }, + // More values... + }, + ClientToken: aws.String("String"), + } + resp, err := svc.ModifyReservedInstances(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_ModifySnapshotAttribute() { + svc := ec2.New(session.New()) + + params := &ec2.ModifySnapshotAttributeInput{ + SnapshotId: aws.String("String"), // Required + Attribute: aws.String("SnapshotAttributeName"), + CreateVolumePermission: &ec2.CreateVolumePermissionModifications{ + Add: []*ec2.CreateVolumePermission{ + { // Required + Group: aws.String("PermissionGroup"), + UserId: aws.String("String"), + }, + // More values... + }, + Remove: []*ec2.CreateVolumePermission{ + { // Required + Group: aws.String("PermissionGroup"), + UserId: aws.String("String"), + }, + // More values... + }, + }, + DryRun: aws.Bool(true), + GroupNames: []*string{ + aws.String("String"), // Required + // More values... + }, + OperationType: aws.String("OperationType"), + UserIds: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.ModifySnapshotAttribute(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_ModifySpotFleetRequest() { + svc := ec2.New(session.New()) + + params := &ec2.ModifySpotFleetRequestInput{ + SpotFleetRequestId: aws.String("String"), // Required + ExcessCapacityTerminationPolicy: aws.String("ExcessCapacityTerminationPolicy"), + TargetCapacity: aws.Int64(1), + } + resp, err := svc.ModifySpotFleetRequest(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_ModifySubnetAttribute() { + svc := ec2.New(session.New()) + + params := &ec2.ModifySubnetAttributeInput{ + SubnetId: aws.String("String"), // Required + MapPublicIpOnLaunch: &ec2.AttributeBooleanValue{ + Value: aws.Bool(true), + }, + } + resp, err := svc.ModifySubnetAttribute(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_ModifyVolumeAttribute() { + svc := ec2.New(session.New()) + + params := &ec2.ModifyVolumeAttributeInput{ + VolumeId: aws.String("String"), // Required + AutoEnableIO: &ec2.AttributeBooleanValue{ + Value: aws.Bool(true), + }, + DryRun: aws.Bool(true), + } + resp, err := svc.ModifyVolumeAttribute(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_ModifyVpcAttribute() { + svc := ec2.New(session.New()) + + params := &ec2.ModifyVpcAttributeInput{ + VpcId: aws.String("String"), // Required + EnableDnsHostnames: &ec2.AttributeBooleanValue{ + Value: aws.Bool(true), + }, + EnableDnsSupport: &ec2.AttributeBooleanValue{ + Value: aws.Bool(true), + }, + } + resp, err := svc.ModifyVpcAttribute(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_ModifyVpcEndpoint() { + svc := ec2.New(session.New()) + + params := &ec2.ModifyVpcEndpointInput{ + VpcEndpointId: aws.String("String"), // Required + AddRouteTableIds: []*string{ + aws.String("String"), // Required + // More values... + }, + DryRun: aws.Bool(true), + PolicyDocument: aws.String("String"), + RemoveRouteTableIds: []*string{ + aws.String("String"), // Required + // More values... + }, + ResetPolicy: aws.Bool(true), + } + resp, err := svc.ModifyVpcEndpoint(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_ModifyVpcPeeringConnectionOptions() { + svc := ec2.New(session.New()) + + params := &ec2.ModifyVpcPeeringConnectionOptionsInput{ + VpcPeeringConnectionId: aws.String("String"), // Required + AccepterPeeringConnectionOptions: &ec2.PeeringConnectionOptionsRequest{ + AllowEgressFromLocalClassicLinkToRemoteVpc: aws.Bool(true), // Required + AllowEgressFromLocalVpcToRemoteClassicLink: aws.Bool(true), // Required + }, + DryRun: aws.Bool(true), + RequesterPeeringConnectionOptions: &ec2.PeeringConnectionOptionsRequest{ + AllowEgressFromLocalClassicLinkToRemoteVpc: aws.Bool(true), // Required + AllowEgressFromLocalVpcToRemoteClassicLink: aws.Bool(true), // Required + }, + } + resp, err := svc.ModifyVpcPeeringConnectionOptions(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_MonitorInstances() { + svc := ec2.New(session.New()) + + params := &ec2.MonitorInstancesInput{ + InstanceIds: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + DryRun: aws.Bool(true), + } + resp, err := svc.MonitorInstances(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_MoveAddressToVpc() { + svc := ec2.New(session.New()) + + params := &ec2.MoveAddressToVpcInput{ + PublicIp: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.MoveAddressToVpc(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_PurchaseReservedInstancesOffering() { + svc := ec2.New(session.New()) + + params := &ec2.PurchaseReservedInstancesOfferingInput{ + InstanceCount: aws.Int64(1), // Required + ReservedInstancesOfferingId: aws.String("String"), // Required + DryRun: aws.Bool(true), + LimitPrice: &ec2.ReservedInstanceLimitPrice{ + Amount: aws.Float64(1.0), + CurrencyCode: aws.String("CurrencyCodeValues"), + }, + } + resp, err := svc.PurchaseReservedInstancesOffering(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_PurchaseScheduledInstances() { + svc := ec2.New(session.New()) + + params := &ec2.PurchaseScheduledInstancesInput{ + PurchaseRequests: []*ec2.PurchaseRequest{ // Required + { // Required + InstanceCount: aws.Int64(1), // Required + PurchaseToken: aws.String("String"), // Required + }, + // More values... + }, + ClientToken: aws.String("String"), + DryRun: aws.Bool(true), + } + resp, err := svc.PurchaseScheduledInstances(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_RebootInstances() { + svc := ec2.New(session.New()) + + params := &ec2.RebootInstancesInput{ + InstanceIds: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + DryRun: aws.Bool(true), + } + resp, err := svc.RebootInstances(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_RegisterImage() { + svc := ec2.New(session.New()) + + params := &ec2.RegisterImageInput{ + Name: aws.String("String"), // Required + Architecture: aws.String("ArchitectureValues"), + BlockDeviceMappings: []*ec2.BlockDeviceMapping{ + { // Required + DeviceName: aws.String("String"), + Ebs: &ec2.EbsBlockDevice{ + DeleteOnTermination: aws.Bool(true), + Encrypted: aws.Bool(true), + Iops: aws.Int64(1), + SnapshotId: aws.String("String"), + VolumeSize: aws.Int64(1), + VolumeType: aws.String("VolumeType"), + }, + NoDevice: aws.String("String"), + VirtualName: aws.String("String"), + }, + // More values... + }, + Description: aws.String("String"), + DryRun: aws.Bool(true), + EnaSupport: aws.Bool(true), + ImageLocation: aws.String("String"), + KernelId: aws.String("String"), + RamdiskId: aws.String("String"), + RootDeviceName: aws.String("String"), + SriovNetSupport: aws.String("String"), + VirtualizationType: aws.String("String"), + } + resp, err := svc.RegisterImage(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_RejectVpcPeeringConnection() { + svc := ec2.New(session.New()) + + params := &ec2.RejectVpcPeeringConnectionInput{ + VpcPeeringConnectionId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.RejectVpcPeeringConnection(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_ReleaseAddress() { + svc := ec2.New(session.New()) + + params := &ec2.ReleaseAddressInput{ + AllocationId: aws.String("String"), + DryRun: aws.Bool(true), + PublicIp: aws.String("String"), + } + resp, err := svc.ReleaseAddress(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_ReleaseHosts() { + svc := ec2.New(session.New()) + + params := &ec2.ReleaseHostsInput{ + HostIds: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.ReleaseHosts(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_ReplaceNetworkAclAssociation() { + svc := ec2.New(session.New()) + + params := &ec2.ReplaceNetworkAclAssociationInput{ + AssociationId: aws.String("String"), // Required + NetworkAclId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.ReplaceNetworkAclAssociation(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_ReplaceNetworkAclEntry() { + svc := ec2.New(session.New()) + + params := &ec2.ReplaceNetworkAclEntryInput{ + CidrBlock: aws.String("String"), // Required + Egress: aws.Bool(true), // Required + NetworkAclId: aws.String("String"), // Required + Protocol: aws.String("String"), // Required + RuleAction: aws.String("RuleAction"), // Required + RuleNumber: aws.Int64(1), // Required + DryRun: aws.Bool(true), + IcmpTypeCode: &ec2.IcmpTypeCode{ + Code: aws.Int64(1), + Type: aws.Int64(1), + }, + PortRange: &ec2.PortRange{ + From: aws.Int64(1), + To: aws.Int64(1), + }, + } + resp, err := svc.ReplaceNetworkAclEntry(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_ReplaceRoute() { + svc := ec2.New(session.New()) + + params := &ec2.ReplaceRouteInput{ + DestinationCidrBlock: aws.String("String"), // Required + RouteTableId: aws.String("String"), // Required + DryRun: aws.Bool(true), + GatewayId: aws.String("String"), + InstanceId: aws.String("String"), + NatGatewayId: aws.String("String"), + NetworkInterfaceId: aws.String("String"), + VpcPeeringConnectionId: aws.String("String"), + } + resp, err := svc.ReplaceRoute(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_ReplaceRouteTableAssociation() { + svc := ec2.New(session.New()) + + params := &ec2.ReplaceRouteTableAssociationInput{ + AssociationId: aws.String("String"), // Required + RouteTableId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.ReplaceRouteTableAssociation(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_ReportInstanceStatus() { + svc := ec2.New(session.New()) + + params := &ec2.ReportInstanceStatusInput{ + Instances: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + ReasonCodes: []*string{ // Required + aws.String("ReportInstanceReasonCodes"), // Required + // More values... + }, + Status: aws.String("ReportStatusType"), // Required + Description: aws.String("String"), + DryRun: aws.Bool(true), + EndTime: aws.Time(time.Now()), + StartTime: aws.Time(time.Now()), + } + resp, err := svc.ReportInstanceStatus(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_RequestSpotFleet() { + svc := ec2.New(session.New()) + + params := &ec2.RequestSpotFleetInput{ + SpotFleetRequestConfig: &ec2.SpotFleetRequestConfigData{ // Required + IamFleetRole: aws.String("String"), // Required + LaunchSpecifications: []*ec2.SpotFleetLaunchSpecification{ // Required + { // Required + AddressingType: aws.String("String"), + BlockDeviceMappings: []*ec2.BlockDeviceMapping{ + { // Required + DeviceName: aws.String("String"), + Ebs: &ec2.EbsBlockDevice{ + DeleteOnTermination: aws.Bool(true), + Encrypted: aws.Bool(true), + Iops: aws.Int64(1), + SnapshotId: aws.String("String"), + VolumeSize: aws.Int64(1), + VolumeType: aws.String("VolumeType"), + }, + NoDevice: aws.String("String"), + VirtualName: aws.String("String"), + }, + // More values... + }, + EbsOptimized: aws.Bool(true), + IamInstanceProfile: &ec2.IamInstanceProfileSpecification{ + Arn: aws.String("String"), + Name: aws.String("String"), + }, + ImageId: aws.String("String"), + InstanceType: aws.String("InstanceType"), + KernelId: aws.String("String"), + KeyName: aws.String("String"), + Monitoring: &ec2.SpotFleetMonitoring{ + Enabled: aws.Bool(true), + }, + NetworkInterfaces: []*ec2.InstanceNetworkInterfaceSpecification{ + { // Required + AssociatePublicIpAddress: aws.Bool(true), + DeleteOnTermination: aws.Bool(true), + Description: aws.String("String"), + DeviceIndex: aws.Int64(1), + Groups: []*string{ + aws.String("String"), // Required + // More values... + }, + NetworkInterfaceId: aws.String("String"), + PrivateIpAddress: aws.String("String"), + PrivateIpAddresses: []*ec2.PrivateIpAddressSpecification{ + { // Required + PrivateIpAddress: aws.String("String"), // Required + Primary: aws.Bool(true), + }, + // More values... + }, + SecondaryPrivateIpAddressCount: aws.Int64(1), + SubnetId: aws.String("String"), + }, + // More values... + }, + Placement: &ec2.SpotPlacement{ + AvailabilityZone: aws.String("String"), + GroupName: aws.String("String"), + }, + RamdiskId: aws.String("String"), + SecurityGroups: []*ec2.GroupIdentifier{ + { // Required + GroupId: aws.String("String"), + GroupName: aws.String("String"), + }, + // More values... + }, + SpotPrice: aws.String("String"), + SubnetId: aws.String("String"), + UserData: aws.String("String"), + WeightedCapacity: aws.Float64(1.0), + }, + // More values... + }, + SpotPrice: aws.String("String"), // Required + TargetCapacity: aws.Int64(1), // Required + AllocationStrategy: aws.String("AllocationStrategy"), + ClientToken: aws.String("String"), + ExcessCapacityTerminationPolicy: aws.String("ExcessCapacityTerminationPolicy"), + FulfilledCapacity: aws.Float64(1.0), + TerminateInstancesWithExpiration: aws.Bool(true), + Type: aws.String("FleetType"), + ValidFrom: aws.Time(time.Now()), + ValidUntil: aws.Time(time.Now()), + }, + DryRun: aws.Bool(true), + } + resp, err := svc.RequestSpotFleet(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_RequestSpotInstances() { + svc := ec2.New(session.New()) + + params := &ec2.RequestSpotInstancesInput{ + SpotPrice: aws.String("String"), // Required + AvailabilityZoneGroup: aws.String("String"), + BlockDurationMinutes: aws.Int64(1), + ClientToken: aws.String("String"), + DryRun: aws.Bool(true), + InstanceCount: aws.Int64(1), + LaunchGroup: aws.String("String"), + LaunchSpecification: &ec2.RequestSpotLaunchSpecification{ + AddressingType: aws.String("String"), + BlockDeviceMappings: []*ec2.BlockDeviceMapping{ + { // Required + DeviceName: aws.String("String"), + Ebs: &ec2.EbsBlockDevice{ + DeleteOnTermination: aws.Bool(true), + Encrypted: aws.Bool(true), + Iops: aws.Int64(1), + SnapshotId: aws.String("String"), + VolumeSize: aws.Int64(1), + VolumeType: aws.String("VolumeType"), + }, + NoDevice: aws.String("String"), + VirtualName: aws.String("String"), + }, + // More values... + }, + EbsOptimized: aws.Bool(true), + IamInstanceProfile: &ec2.IamInstanceProfileSpecification{ + Arn: aws.String("String"), + Name: aws.String("String"), + }, + ImageId: aws.String("String"), + InstanceType: aws.String("InstanceType"), + KernelId: aws.String("String"), + KeyName: aws.String("String"), + Monitoring: &ec2.RunInstancesMonitoringEnabled{ + Enabled: aws.Bool(true), // Required + }, + NetworkInterfaces: []*ec2.InstanceNetworkInterfaceSpecification{ + { // Required + AssociatePublicIpAddress: aws.Bool(true), + DeleteOnTermination: aws.Bool(true), + Description: aws.String("String"), + DeviceIndex: aws.Int64(1), + Groups: []*string{ + aws.String("String"), // Required + // More values... + }, + NetworkInterfaceId: aws.String("String"), + PrivateIpAddress: aws.String("String"), + PrivateIpAddresses: []*ec2.PrivateIpAddressSpecification{ + { // Required + PrivateIpAddress: aws.String("String"), // Required + Primary: aws.Bool(true), + }, + // More values... + }, + SecondaryPrivateIpAddressCount: aws.Int64(1), + SubnetId: aws.String("String"), + }, + // More values... + }, + Placement: &ec2.SpotPlacement{ + AvailabilityZone: aws.String("String"), + GroupName: aws.String("String"), + }, + RamdiskId: aws.String("String"), + SecurityGroupIds: []*string{ + aws.String("String"), // Required + // More values... + }, + SecurityGroups: []*string{ + aws.String("String"), // Required + // More values... + }, + SubnetId: aws.String("String"), + UserData: aws.String("String"), + }, + Type: aws.String("SpotInstanceType"), + ValidFrom: aws.Time(time.Now()), + ValidUntil: aws.Time(time.Now()), + } + resp, err := svc.RequestSpotInstances(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_ResetImageAttribute() { + svc := ec2.New(session.New()) + + params := &ec2.ResetImageAttributeInput{ + Attribute: aws.String("ResetImageAttributeName"), // Required + ImageId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.ResetImageAttribute(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_ResetInstanceAttribute() { + svc := ec2.New(session.New()) + + params := &ec2.ResetInstanceAttributeInput{ + Attribute: aws.String("InstanceAttributeName"), // Required + InstanceId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.ResetInstanceAttribute(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_ResetNetworkInterfaceAttribute() { + svc := ec2.New(session.New()) + + params := &ec2.ResetNetworkInterfaceAttributeInput{ + NetworkInterfaceId: aws.String("String"), // Required + DryRun: aws.Bool(true), + SourceDestCheck: aws.String("String"), + } + resp, err := svc.ResetNetworkInterfaceAttribute(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_ResetSnapshotAttribute() { + svc := ec2.New(session.New()) + + params := &ec2.ResetSnapshotAttributeInput{ + Attribute: aws.String("SnapshotAttributeName"), // Required + SnapshotId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.ResetSnapshotAttribute(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_RestoreAddressToClassic() { + svc := ec2.New(session.New()) + + params := &ec2.RestoreAddressToClassicInput{ + PublicIp: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.RestoreAddressToClassic(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_RevokeSecurityGroupEgress() { + svc := ec2.New(session.New()) + + params := &ec2.RevokeSecurityGroupEgressInput{ + GroupId: aws.String("String"), // Required + CidrIp: aws.String("String"), + DryRun: aws.Bool(true), + FromPort: aws.Int64(1), + IpPermissions: []*ec2.IpPermission{ + { // Required + FromPort: aws.Int64(1), + IpProtocol: aws.String("String"), + IpRanges: []*ec2.IpRange{ + { // Required + CidrIp: aws.String("String"), + }, + // More values... + }, + PrefixListIds: []*ec2.PrefixListId{ + { // Required + PrefixListId: aws.String("String"), + }, + // More values... + }, + ToPort: aws.Int64(1), + UserIdGroupPairs: []*ec2.UserIdGroupPair{ + { // Required + GroupId: aws.String("String"), + GroupName: aws.String("String"), + PeeringStatus: aws.String("String"), + UserId: aws.String("String"), + VpcId: aws.String("String"), + VpcPeeringConnectionId: aws.String("String"), + }, + // More values... + }, + }, + // More values... + }, + IpProtocol: aws.String("String"), + SourceSecurityGroupName: aws.String("String"), + SourceSecurityGroupOwnerId: aws.String("String"), + ToPort: aws.Int64(1), + } + resp, err := svc.RevokeSecurityGroupEgress(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_RevokeSecurityGroupIngress() { + svc := ec2.New(session.New()) + + params := &ec2.RevokeSecurityGroupIngressInput{ + CidrIp: aws.String("String"), + DryRun: aws.Bool(true), + FromPort: aws.Int64(1), + GroupId: aws.String("String"), + GroupName: aws.String("String"), + IpPermissions: []*ec2.IpPermission{ + { // Required + FromPort: aws.Int64(1), + IpProtocol: aws.String("String"), + IpRanges: []*ec2.IpRange{ + { // Required + CidrIp: aws.String("String"), + }, + // More values... + }, + PrefixListIds: []*ec2.PrefixListId{ + { // Required + PrefixListId: aws.String("String"), + }, + // More values... + }, + ToPort: aws.Int64(1), + UserIdGroupPairs: []*ec2.UserIdGroupPair{ + { // Required + GroupId: aws.String("String"), + GroupName: aws.String("String"), + PeeringStatus: aws.String("String"), + UserId: aws.String("String"), + VpcId: aws.String("String"), + VpcPeeringConnectionId: aws.String("String"), + }, + // More values... + }, + }, + // More values... + }, + IpProtocol: aws.String("String"), + SourceSecurityGroupName: aws.String("String"), + SourceSecurityGroupOwnerId: aws.String("String"), + ToPort: aws.Int64(1), + } + resp, err := svc.RevokeSecurityGroupIngress(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_RunInstances() { + svc := ec2.New(session.New()) + + params := &ec2.RunInstancesInput{ + ImageId: aws.String("String"), // Required + MaxCount: aws.Int64(1), // Required + MinCount: aws.Int64(1), // Required + AdditionalInfo: aws.String("String"), + BlockDeviceMappings: []*ec2.BlockDeviceMapping{ + { // Required + DeviceName: aws.String("String"), + Ebs: &ec2.EbsBlockDevice{ + DeleteOnTermination: aws.Bool(true), + Encrypted: aws.Bool(true), + Iops: aws.Int64(1), + SnapshotId: aws.String("String"), + VolumeSize: aws.Int64(1), + VolumeType: aws.String("VolumeType"), + }, + NoDevice: aws.String("String"), + VirtualName: aws.String("String"), + }, + // More values... + }, + ClientToken: aws.String("String"), + DisableApiTermination: aws.Bool(true), + DryRun: aws.Bool(true), + EbsOptimized: aws.Bool(true), + IamInstanceProfile: &ec2.IamInstanceProfileSpecification{ + Arn: aws.String("String"), + Name: aws.String("String"), + }, + InstanceInitiatedShutdownBehavior: aws.String("ShutdownBehavior"), + InstanceType: aws.String("InstanceType"), + KernelId: aws.String("String"), + KeyName: aws.String("String"), + Monitoring: &ec2.RunInstancesMonitoringEnabled{ + Enabled: aws.Bool(true), // Required + }, + NetworkInterfaces: []*ec2.InstanceNetworkInterfaceSpecification{ + { // Required + AssociatePublicIpAddress: aws.Bool(true), + DeleteOnTermination: aws.Bool(true), + Description: aws.String("String"), + DeviceIndex: aws.Int64(1), + Groups: []*string{ + aws.String("String"), // Required + // More values... + }, + NetworkInterfaceId: aws.String("String"), + PrivateIpAddress: aws.String("String"), + PrivateIpAddresses: []*ec2.PrivateIpAddressSpecification{ + { // Required + PrivateIpAddress: aws.String("String"), // Required + Primary: aws.Bool(true), + }, + // More values... + }, + SecondaryPrivateIpAddressCount: aws.Int64(1), + SubnetId: aws.String("String"), + }, + // More values... + }, + Placement: &ec2.Placement{ + Affinity: aws.String("String"), + AvailabilityZone: aws.String("String"), + GroupName: aws.String("String"), + HostId: aws.String("String"), + Tenancy: aws.String("Tenancy"), + }, + PrivateIpAddress: aws.String("String"), + RamdiskId: aws.String("String"), + SecurityGroupIds: []*string{ + aws.String("String"), // Required + // More values... + }, + SecurityGroups: []*string{ + aws.String("String"), // Required + // More values... + }, + SubnetId: aws.String("String"), + UserData: aws.String("String"), + } + resp, err := svc.RunInstances(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_RunScheduledInstances() { + svc := ec2.New(session.New()) + + params := &ec2.RunScheduledInstancesInput{ + LaunchSpecification: &ec2.ScheduledInstancesLaunchSpecification{ // Required + ImageId: aws.String("String"), // Required + BlockDeviceMappings: []*ec2.ScheduledInstancesBlockDeviceMapping{ + { // Required + DeviceName: aws.String("String"), + Ebs: &ec2.ScheduledInstancesEbs{ + DeleteOnTermination: aws.Bool(true), + Encrypted: aws.Bool(true), + Iops: aws.Int64(1), + SnapshotId: aws.String("String"), + VolumeSize: aws.Int64(1), + VolumeType: aws.String("String"), + }, + NoDevice: aws.String("String"), + VirtualName: aws.String("String"), + }, + // More values... + }, + EbsOptimized: aws.Bool(true), + IamInstanceProfile: &ec2.ScheduledInstancesIamInstanceProfile{ + Arn: aws.String("String"), + Name: aws.String("String"), + }, + InstanceType: aws.String("String"), + KernelId: aws.String("String"), + KeyName: aws.String("String"), + Monitoring: &ec2.ScheduledInstancesMonitoring{ + Enabled: aws.Bool(true), + }, + NetworkInterfaces: []*ec2.ScheduledInstancesNetworkInterface{ + { // Required + AssociatePublicIpAddress: aws.Bool(true), + DeleteOnTermination: aws.Bool(true), + Description: aws.String("String"), + DeviceIndex: aws.Int64(1), + Groups: []*string{ + aws.String("String"), // Required + // More values... + }, + NetworkInterfaceId: aws.String("String"), + PrivateIpAddress: aws.String("String"), + PrivateIpAddressConfigs: []*ec2.ScheduledInstancesPrivateIpAddressConfig{ + { // Required + Primary: aws.Bool(true), + PrivateIpAddress: aws.String("String"), + }, + // More values... + }, + SecondaryPrivateIpAddressCount: aws.Int64(1), + SubnetId: aws.String("String"), + }, + // More values... + }, + Placement: &ec2.ScheduledInstancesPlacement{ + AvailabilityZone: aws.String("String"), + GroupName: aws.String("String"), + }, + RamdiskId: aws.String("String"), + SecurityGroupIds: []*string{ + aws.String("String"), // Required + // More values... + }, + SubnetId: aws.String("String"), + UserData: aws.String("String"), + }, + ScheduledInstanceId: aws.String("String"), // Required + ClientToken: aws.String("String"), + DryRun: aws.Bool(true), + InstanceCount: aws.Int64(1), + } + resp, err := svc.RunScheduledInstances(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_StartInstances() { + svc := ec2.New(session.New()) + + params := &ec2.StartInstancesInput{ + InstanceIds: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + AdditionalInfo: aws.String("String"), + DryRun: aws.Bool(true), + } + resp, err := svc.StartInstances(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_StopInstances() { + svc := ec2.New(session.New()) + + params := &ec2.StopInstancesInput{ + InstanceIds: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + DryRun: aws.Bool(true), + Force: aws.Bool(true), + } + resp, err := svc.StopInstances(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_TerminateInstances() { + svc := ec2.New(session.New()) + + params := &ec2.TerminateInstancesInput{ + InstanceIds: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + DryRun: aws.Bool(true), + } + resp, err := svc.TerminateInstances(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_UnassignPrivateIpAddresses() { + svc := ec2.New(session.New()) + + params := &ec2.UnassignPrivateIpAddressesInput{ + NetworkInterfaceId: aws.String("String"), // Required + PrivateIpAddresses: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.UnassignPrivateIpAddresses(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_UnmonitorInstances() { + svc := ec2.New(session.New()) + + params := &ec2.UnmonitorInstancesInput{ + InstanceIds: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + DryRun: aws.Bool(true), + } + resp, err := svc.UnmonitorInstances(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/ec2/service.go b/vendor/github.com/aws/aws-sdk-go/service/ec2/service.go new file mode 100644 index 000000000..4e6fa4cd7 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/ec2/service.go @@ -0,0 +1,89 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package ec2 + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/private/protocol/ec2query" +) + +// Amazon Elastic Compute Cloud (Amazon EC2) provides resizable computing capacity +// in the Amazon Web Services (AWS) cloud. Using Amazon EC2 eliminates your +// need to invest in hardware up front, so you can develop and deploy applications +// faster. +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type EC2 struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "ec2" + +// New creates a new instance of the EC2 client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a EC2 client from just a session. +// svc := ec2.New(mySession) +// +// // Create a EC2 client with additional configuration +// svc := ec2.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *EC2 { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *EC2 { + svc := &EC2{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2016-04-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(ec2query.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(ec2query.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(ec2query.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(ec2query.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a EC2 operation and runs any +// custom request initialization. +func (c *EC2) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/ec2/waiters.go b/vendor/github.com/aws/aws-sdk-go/service/ec2/waiters.go new file mode 100644 index 000000000..bee4a057f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/ec2/waiters.go @@ -0,0 +1,907 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package ec2 + +import ( + "github.com/aws/aws-sdk-go/private/waiter" +) + +func (c *EC2) WaitUntilBundleTaskComplete(input *DescribeBundleTasksInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeBundleTasks", + Delay: 15, + MaxAttempts: 40, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "pathAll", + Argument: "BundleTasks[].State", + Expected: "complete", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "BundleTasks[].State", + Expected: "failed", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *EC2) WaitUntilConversionTaskCancelled(input *DescribeConversionTasksInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeConversionTasks", + Delay: 15, + MaxAttempts: 40, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "pathAll", + Argument: "ConversionTasks[].State", + Expected: "cancelled", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *EC2) WaitUntilConversionTaskCompleted(input *DescribeConversionTasksInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeConversionTasks", + Delay: 15, + MaxAttempts: 40, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "pathAll", + Argument: "ConversionTasks[].State", + Expected: "completed", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "ConversionTasks[].State", + Expected: "cancelled", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "ConversionTasks[].State", + Expected: "cancelling", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *EC2) WaitUntilConversionTaskDeleted(input *DescribeConversionTasksInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeConversionTasks", + Delay: 15, + MaxAttempts: 40, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "pathAll", + Argument: "ConversionTasks[].State", + Expected: "deleted", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *EC2) WaitUntilCustomerGatewayAvailable(input *DescribeCustomerGatewaysInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeCustomerGateways", + Delay: 15, + MaxAttempts: 40, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "pathAll", + Argument: "CustomerGateways[].State", + Expected: "available", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "CustomerGateways[].State", + Expected: "deleted", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "CustomerGateways[].State", + Expected: "deleting", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *EC2) WaitUntilExportTaskCancelled(input *DescribeExportTasksInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeExportTasks", + Delay: 15, + MaxAttempts: 40, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "pathAll", + Argument: "ExportTasks[].State", + Expected: "cancelled", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *EC2) WaitUntilExportTaskCompleted(input *DescribeExportTasksInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeExportTasks", + Delay: 15, + MaxAttempts: 40, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "pathAll", + Argument: "ExportTasks[].State", + Expected: "completed", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *EC2) WaitUntilImageAvailable(input *DescribeImagesInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeImages", + Delay: 15, + MaxAttempts: 40, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "pathAll", + Argument: "Images[].State", + Expected: "available", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Images[].State", + Expected: "failed", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *EC2) WaitUntilImageExists(input *DescribeImagesInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeImages", + Delay: 15, + MaxAttempts: 40, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "path", + Argument: "length(Images[]) > `0`", + Expected: true, + }, + { + State: "retry", + Matcher: "error", + Argument: "", + Expected: "InvalidAMIID.NotFound", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *EC2) WaitUntilInstanceExists(input *DescribeInstancesInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeInstances", + Delay: 5, + MaxAttempts: 40, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "path", + Argument: "length(Reservations[]) > `0`", + Expected: true, + }, + { + State: "retry", + Matcher: "error", + Argument: "", + Expected: "InvalidInstanceID.NotFound", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *EC2) WaitUntilInstanceRunning(input *DescribeInstancesInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeInstances", + Delay: 15, + MaxAttempts: 40, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "pathAll", + Argument: "Reservations[].Instances[].State.Name", + Expected: "running", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Reservations[].Instances[].State.Name", + Expected: "shutting-down", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Reservations[].Instances[].State.Name", + Expected: "terminated", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Reservations[].Instances[].State.Name", + Expected: "stopping", + }, + { + State: "retry", + Matcher: "error", + Argument: "", + Expected: "InvalidInstanceID.NotFound", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *EC2) WaitUntilInstanceStatusOk(input *DescribeInstanceStatusInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeInstanceStatus", + Delay: 15, + MaxAttempts: 40, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "pathAll", + Argument: "InstanceStatuses[].InstanceStatus.Status", + Expected: "ok", + }, + { + State: "retry", + Matcher: "error", + Argument: "", + Expected: "InvalidInstanceID.NotFound", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *EC2) WaitUntilInstanceStopped(input *DescribeInstancesInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeInstances", + Delay: 15, + MaxAttempts: 40, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "pathAll", + Argument: "Reservations[].Instances[].State.Name", + Expected: "stopped", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Reservations[].Instances[].State.Name", + Expected: "pending", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Reservations[].Instances[].State.Name", + Expected: "terminated", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *EC2) WaitUntilInstanceTerminated(input *DescribeInstancesInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeInstances", + Delay: 15, + MaxAttempts: 40, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "pathAll", + Argument: "Reservations[].Instances[].State.Name", + Expected: "terminated", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Reservations[].Instances[].State.Name", + Expected: "pending", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Reservations[].Instances[].State.Name", + Expected: "stopping", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *EC2) WaitUntilKeyPairExists(input *DescribeKeyPairsInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeKeyPairs", + Delay: 5, + MaxAttempts: 6, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "pathAll", + Argument: "length(KeyPairs[].KeyName) > `0`", + Expected: true, + }, + { + State: "retry", + Matcher: "error", + Argument: "", + Expected: "InvalidKeyPair.NotFound", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *EC2) WaitUntilNatGatewayAvailable(input *DescribeNatGatewaysInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeNatGateways", + Delay: 15, + MaxAttempts: 40, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "pathAll", + Argument: "NatGateways[].State", + Expected: "available", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "NatGateways[].State", + Expected: "failed", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "NatGateways[].State", + Expected: "deleting", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "NatGateways[].State", + Expected: "deleted", + }, + { + State: "retry", + Matcher: "error", + Argument: "", + Expected: "NatGatewayNotFound", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *EC2) WaitUntilNetworkInterfaceAvailable(input *DescribeNetworkInterfacesInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeNetworkInterfaces", + Delay: 20, + MaxAttempts: 10, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "pathAll", + Argument: "NetworkInterfaces[].Status", + Expected: "available", + }, + { + State: "failure", + Matcher: "error", + Argument: "", + Expected: "InvalidNetworkInterfaceID.NotFound", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *EC2) WaitUntilPasswordDataAvailable(input *GetPasswordDataInput) error { + waiterCfg := waiter.Config{ + Operation: "GetPasswordData", + Delay: 15, + MaxAttempts: 40, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "path", + Argument: "length(PasswordData) > `0`", + Expected: true, + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *EC2) WaitUntilSnapshotCompleted(input *DescribeSnapshotsInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeSnapshots", + Delay: 15, + MaxAttempts: 40, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "pathAll", + Argument: "Snapshots[].State", + Expected: "completed", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *EC2) WaitUntilSpotInstanceRequestFulfilled(input *DescribeSpotInstanceRequestsInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeSpotInstanceRequests", + Delay: 15, + MaxAttempts: 40, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "pathAll", + Argument: "SpotInstanceRequests[].Status.Code", + Expected: "fulfilled", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "SpotInstanceRequests[].Status.Code", + Expected: "schedule-expired", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "SpotInstanceRequests[].Status.Code", + Expected: "canceled-before-fulfillment", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "SpotInstanceRequests[].Status.Code", + Expected: "bad-parameters", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "SpotInstanceRequests[].Status.Code", + Expected: "system-error", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *EC2) WaitUntilSubnetAvailable(input *DescribeSubnetsInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeSubnets", + Delay: 15, + MaxAttempts: 40, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "pathAll", + Argument: "Subnets[].State", + Expected: "available", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *EC2) WaitUntilSystemStatusOk(input *DescribeInstanceStatusInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeInstanceStatus", + Delay: 15, + MaxAttempts: 40, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "pathAll", + Argument: "InstanceStatuses[].SystemStatus.Status", + Expected: "ok", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *EC2) WaitUntilVolumeAvailable(input *DescribeVolumesInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeVolumes", + Delay: 15, + MaxAttempts: 40, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "pathAll", + Argument: "Volumes[].State", + Expected: "available", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Volumes[].State", + Expected: "deleted", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *EC2) WaitUntilVolumeDeleted(input *DescribeVolumesInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeVolumes", + Delay: 15, + MaxAttempts: 40, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "pathAll", + Argument: "Volumes[].State", + Expected: "deleted", + }, + { + State: "success", + Matcher: "error", + Argument: "", + Expected: "InvalidVolume.NotFound", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *EC2) WaitUntilVolumeInUse(input *DescribeVolumesInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeVolumes", + Delay: 15, + MaxAttempts: 40, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "pathAll", + Argument: "Volumes[].State", + Expected: "in-use", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Volumes[].State", + Expected: "deleted", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *EC2) WaitUntilVpcAvailable(input *DescribeVpcsInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeVpcs", + Delay: 15, + MaxAttempts: 40, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "pathAll", + Argument: "Vpcs[].State", + Expected: "available", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *EC2) WaitUntilVpcExists(input *DescribeVpcsInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeVpcs", + Delay: 1, + MaxAttempts: 5, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "status", + Argument: "", + Expected: 200, + }, + { + State: "retry", + Matcher: "error", + Argument: "", + Expected: "InvalidVpcID.NotFound", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *EC2) WaitUntilVpcPeeringConnectionExists(input *DescribeVpcPeeringConnectionsInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeVpcPeeringConnections", + Delay: 15, + MaxAttempts: 40, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "status", + Argument: "", + Expected: 200, + }, + { + State: "retry", + Matcher: "error", + Argument: "", + Expected: "InvalidVpcPeeringConnectionID.NotFound", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *EC2) WaitUntilVpnConnectionAvailable(input *DescribeVpnConnectionsInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeVpnConnections", + Delay: 15, + MaxAttempts: 40, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "pathAll", + Argument: "VpnConnections[].State", + Expected: "available", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "VpnConnections[].State", + Expected: "deleting", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "VpnConnections[].State", + Expected: "deleted", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *EC2) WaitUntilVpnConnectionDeleted(input *DescribeVpnConnectionsInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeVpnConnections", + Delay: 15, + MaxAttempts: 40, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "pathAll", + Argument: "VpnConnections[].State", + Expected: "deleted", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "VpnConnections[].State", + Expected: "pending", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/ecr/api.go b/vendor/github.com/aws/aws-sdk-go/service/ecr/api.go new file mode 100644 index 000000000..aa162c0be --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/ecr/api.go @@ -0,0 +1,2079 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package ecr provides a client for Amazon EC2 Container Registry. +package ecr + +import ( + "time" + + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" +) + +const opBatchCheckLayerAvailability = "BatchCheckLayerAvailability" + +// BatchCheckLayerAvailabilityRequest generates a "aws/request.Request" representing the +// client's request for the BatchCheckLayerAvailability operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the BatchCheckLayerAvailability method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the BatchCheckLayerAvailabilityRequest method. +// req, resp := client.BatchCheckLayerAvailabilityRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ECR) BatchCheckLayerAvailabilityRequest(input *BatchCheckLayerAvailabilityInput) (req *request.Request, output *BatchCheckLayerAvailabilityOutput) { + op := &request.Operation{ + Name: opBatchCheckLayerAvailability, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &BatchCheckLayerAvailabilityInput{} + } + + req = c.newRequest(op, input, output) + output = &BatchCheckLayerAvailabilityOutput{} + req.Data = output + return +} + +// Check the availability of multiple image layers in a specified registry and +// repository. +// +// This operation is used by the Amazon ECR proxy, and it is not intended +// for general use by customers. Use the docker CLI to pull, tag, and push images. +func (c *ECR) BatchCheckLayerAvailability(input *BatchCheckLayerAvailabilityInput) (*BatchCheckLayerAvailabilityOutput, error) { + req, out := c.BatchCheckLayerAvailabilityRequest(input) + err := req.Send() + return out, err +} + +const opBatchDeleteImage = "BatchDeleteImage" + +// BatchDeleteImageRequest generates a "aws/request.Request" representing the +// client's request for the BatchDeleteImage operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the BatchDeleteImage method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the BatchDeleteImageRequest method. +// req, resp := client.BatchDeleteImageRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ECR) BatchDeleteImageRequest(input *BatchDeleteImageInput) (req *request.Request, output *BatchDeleteImageOutput) { + op := &request.Operation{ + Name: opBatchDeleteImage, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &BatchDeleteImageInput{} + } + + req = c.newRequest(op, input, output) + output = &BatchDeleteImageOutput{} + req.Data = output + return +} + +// Deletes a list of specified images within a specified repository. Images +// are specified with either imageTag or imageDigest. +func (c *ECR) BatchDeleteImage(input *BatchDeleteImageInput) (*BatchDeleteImageOutput, error) { + req, out := c.BatchDeleteImageRequest(input) + err := req.Send() + return out, err +} + +const opBatchGetImage = "BatchGetImage" + +// BatchGetImageRequest generates a "aws/request.Request" representing the +// client's request for the BatchGetImage operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the BatchGetImage method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the BatchGetImageRequest method. +// req, resp := client.BatchGetImageRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ECR) BatchGetImageRequest(input *BatchGetImageInput) (req *request.Request, output *BatchGetImageOutput) { + op := &request.Operation{ + Name: opBatchGetImage, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &BatchGetImageInput{} + } + + req = c.newRequest(op, input, output) + output = &BatchGetImageOutput{} + req.Data = output + return +} + +// Gets detailed information for specified images within a specified repository. +// Images are specified with either imageTag or imageDigest. +func (c *ECR) BatchGetImage(input *BatchGetImageInput) (*BatchGetImageOutput, error) { + req, out := c.BatchGetImageRequest(input) + err := req.Send() + return out, err +} + +const opCompleteLayerUpload = "CompleteLayerUpload" + +// CompleteLayerUploadRequest generates a "aws/request.Request" representing the +// client's request for the CompleteLayerUpload operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CompleteLayerUpload method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CompleteLayerUploadRequest method. +// req, resp := client.CompleteLayerUploadRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ECR) CompleteLayerUploadRequest(input *CompleteLayerUploadInput) (req *request.Request, output *CompleteLayerUploadOutput) { + op := &request.Operation{ + Name: opCompleteLayerUpload, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CompleteLayerUploadInput{} + } + + req = c.newRequest(op, input, output) + output = &CompleteLayerUploadOutput{} + req.Data = output + return +} + +// Inform Amazon ECR that the image layer upload for a specified registry, repository +// name, and upload ID, has completed. You can optionally provide a sha256 digest +// of the image layer for data validation purposes. +// +// This operation is used by the Amazon ECR proxy, and it is not intended +// for general use by customers. Use the docker CLI to pull, tag, and push images. +func (c *ECR) CompleteLayerUpload(input *CompleteLayerUploadInput) (*CompleteLayerUploadOutput, error) { + req, out := c.CompleteLayerUploadRequest(input) + err := req.Send() + return out, err +} + +const opCreateRepository = "CreateRepository" + +// CreateRepositoryRequest generates a "aws/request.Request" representing the +// client's request for the CreateRepository operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateRepository method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateRepositoryRequest method. +// req, resp := client.CreateRepositoryRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ECR) CreateRepositoryRequest(input *CreateRepositoryInput) (req *request.Request, output *CreateRepositoryOutput) { + op := &request.Operation{ + Name: opCreateRepository, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateRepositoryInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateRepositoryOutput{} + req.Data = output + return +} + +// Creates an image repository. +func (c *ECR) CreateRepository(input *CreateRepositoryInput) (*CreateRepositoryOutput, error) { + req, out := c.CreateRepositoryRequest(input) + err := req.Send() + return out, err +} + +const opDeleteRepository = "DeleteRepository" + +// DeleteRepositoryRequest generates a "aws/request.Request" representing the +// client's request for the DeleteRepository operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteRepository method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteRepositoryRequest method. +// req, resp := client.DeleteRepositoryRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ECR) DeleteRepositoryRequest(input *DeleteRepositoryInput) (req *request.Request, output *DeleteRepositoryOutput) { + op := &request.Operation{ + Name: opDeleteRepository, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteRepositoryInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteRepositoryOutput{} + req.Data = output + return +} + +// Deletes an existing image repository. If a repository contains images, you +// must use the force option to delete it. +func (c *ECR) DeleteRepository(input *DeleteRepositoryInput) (*DeleteRepositoryOutput, error) { + req, out := c.DeleteRepositoryRequest(input) + err := req.Send() + return out, err +} + +const opDeleteRepositoryPolicy = "DeleteRepositoryPolicy" + +// DeleteRepositoryPolicyRequest generates a "aws/request.Request" representing the +// client's request for the DeleteRepositoryPolicy operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteRepositoryPolicy method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteRepositoryPolicyRequest method. +// req, resp := client.DeleteRepositoryPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ECR) DeleteRepositoryPolicyRequest(input *DeleteRepositoryPolicyInput) (req *request.Request, output *DeleteRepositoryPolicyOutput) { + op := &request.Operation{ + Name: opDeleteRepositoryPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteRepositoryPolicyInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteRepositoryPolicyOutput{} + req.Data = output + return +} + +// Deletes the repository policy from a specified repository. +func (c *ECR) DeleteRepositoryPolicy(input *DeleteRepositoryPolicyInput) (*DeleteRepositoryPolicyOutput, error) { + req, out := c.DeleteRepositoryPolicyRequest(input) + err := req.Send() + return out, err +} + +const opDescribeRepositories = "DescribeRepositories" + +// DescribeRepositoriesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeRepositories operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeRepositories method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeRepositoriesRequest method. +// req, resp := client.DescribeRepositoriesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ECR) DescribeRepositoriesRequest(input *DescribeRepositoriesInput) (req *request.Request, output *DescribeRepositoriesOutput) { + op := &request.Operation{ + Name: opDescribeRepositories, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeRepositoriesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeRepositoriesOutput{} + req.Data = output + return +} + +// Describes image repositories in a registry. +func (c *ECR) DescribeRepositories(input *DescribeRepositoriesInput) (*DescribeRepositoriesOutput, error) { + req, out := c.DescribeRepositoriesRequest(input) + err := req.Send() + return out, err +} + +const opGetAuthorizationToken = "GetAuthorizationToken" + +// GetAuthorizationTokenRequest generates a "aws/request.Request" representing the +// client's request for the GetAuthorizationToken operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetAuthorizationToken method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetAuthorizationTokenRequest method. +// req, resp := client.GetAuthorizationTokenRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ECR) GetAuthorizationTokenRequest(input *GetAuthorizationTokenInput) (req *request.Request, output *GetAuthorizationTokenOutput) { + op := &request.Operation{ + Name: opGetAuthorizationToken, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetAuthorizationTokenInput{} + } + + req = c.newRequest(op, input, output) + output = &GetAuthorizationTokenOutput{} + req.Data = output + return +} + +// Retrieves a token that is valid for a specified registry for 12 hours. This +// command allows you to use the docker CLI to push and pull images with Amazon +// ECR. If you do not specify a registry, the default registry is assumed. +// +// The authorizationToken returned for each registry specified is a base64 +// encoded string that can be decoded and used in a docker login command to +// authenticate to a registry. The AWS CLI offers an aws ecr get-login command +// that simplifies the login process. +func (c *ECR) GetAuthorizationToken(input *GetAuthorizationTokenInput) (*GetAuthorizationTokenOutput, error) { + req, out := c.GetAuthorizationTokenRequest(input) + err := req.Send() + return out, err +} + +const opGetDownloadUrlForLayer = "GetDownloadUrlForLayer" + +// GetDownloadUrlForLayerRequest generates a "aws/request.Request" representing the +// client's request for the GetDownloadUrlForLayer operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetDownloadUrlForLayer method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetDownloadUrlForLayerRequest method. +// req, resp := client.GetDownloadUrlForLayerRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ECR) GetDownloadUrlForLayerRequest(input *GetDownloadUrlForLayerInput) (req *request.Request, output *GetDownloadUrlForLayerOutput) { + op := &request.Operation{ + Name: opGetDownloadUrlForLayer, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetDownloadUrlForLayerInput{} + } + + req = c.newRequest(op, input, output) + output = &GetDownloadUrlForLayerOutput{} + req.Data = output + return +} + +// Retrieves the pre-signed Amazon S3 download URL corresponding to an image +// layer. You can only get URLs for image layers that are referenced in an image. +// +// This operation is used by the Amazon ECR proxy, and it is not intended +// for general use by customers. Use the docker CLI to pull, tag, and push images. +func (c *ECR) GetDownloadUrlForLayer(input *GetDownloadUrlForLayerInput) (*GetDownloadUrlForLayerOutput, error) { + req, out := c.GetDownloadUrlForLayerRequest(input) + err := req.Send() + return out, err +} + +const opGetRepositoryPolicy = "GetRepositoryPolicy" + +// GetRepositoryPolicyRequest generates a "aws/request.Request" representing the +// client's request for the GetRepositoryPolicy operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetRepositoryPolicy method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetRepositoryPolicyRequest method. +// req, resp := client.GetRepositoryPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ECR) GetRepositoryPolicyRequest(input *GetRepositoryPolicyInput) (req *request.Request, output *GetRepositoryPolicyOutput) { + op := &request.Operation{ + Name: opGetRepositoryPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetRepositoryPolicyInput{} + } + + req = c.newRequest(op, input, output) + output = &GetRepositoryPolicyOutput{} + req.Data = output + return +} + +// Retrieves the repository policy for a specified repository. +func (c *ECR) GetRepositoryPolicy(input *GetRepositoryPolicyInput) (*GetRepositoryPolicyOutput, error) { + req, out := c.GetRepositoryPolicyRequest(input) + err := req.Send() + return out, err +} + +const opInitiateLayerUpload = "InitiateLayerUpload" + +// InitiateLayerUploadRequest generates a "aws/request.Request" representing the +// client's request for the InitiateLayerUpload operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the InitiateLayerUpload method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the InitiateLayerUploadRequest method. +// req, resp := client.InitiateLayerUploadRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ECR) InitiateLayerUploadRequest(input *InitiateLayerUploadInput) (req *request.Request, output *InitiateLayerUploadOutput) { + op := &request.Operation{ + Name: opInitiateLayerUpload, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &InitiateLayerUploadInput{} + } + + req = c.newRequest(op, input, output) + output = &InitiateLayerUploadOutput{} + req.Data = output + return +} + +// Notify Amazon ECR that you intend to upload an image layer. +// +// This operation is used by the Amazon ECR proxy, and it is not intended +// for general use by customers. Use the docker CLI to pull, tag, and push images. +func (c *ECR) InitiateLayerUpload(input *InitiateLayerUploadInput) (*InitiateLayerUploadOutput, error) { + req, out := c.InitiateLayerUploadRequest(input) + err := req.Send() + return out, err +} + +const opListImages = "ListImages" + +// ListImagesRequest generates a "aws/request.Request" representing the +// client's request for the ListImages operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListImages method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListImagesRequest method. +// req, resp := client.ListImagesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ECR) ListImagesRequest(input *ListImagesInput) (req *request.Request, output *ListImagesOutput) { + op := &request.Operation{ + Name: opListImages, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListImagesInput{} + } + + req = c.newRequest(op, input, output) + output = &ListImagesOutput{} + req.Data = output + return +} + +// Lists all the image IDs for a given repository. +func (c *ECR) ListImages(input *ListImagesInput) (*ListImagesOutput, error) { + req, out := c.ListImagesRequest(input) + err := req.Send() + return out, err +} + +const opPutImage = "PutImage" + +// PutImageRequest generates a "aws/request.Request" representing the +// client's request for the PutImage operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutImage method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutImageRequest method. +// req, resp := client.PutImageRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ECR) PutImageRequest(input *PutImageInput) (req *request.Request, output *PutImageOutput) { + op := &request.Operation{ + Name: opPutImage, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutImageInput{} + } + + req = c.newRequest(op, input, output) + output = &PutImageOutput{} + req.Data = output + return +} + +// Creates or updates the image manifest associated with an image. +// +// This operation is used by the Amazon ECR proxy, and it is not intended +// for general use by customers. Use the docker CLI to pull, tag, and push images. +func (c *ECR) PutImage(input *PutImageInput) (*PutImageOutput, error) { + req, out := c.PutImageRequest(input) + err := req.Send() + return out, err +} + +const opSetRepositoryPolicy = "SetRepositoryPolicy" + +// SetRepositoryPolicyRequest generates a "aws/request.Request" representing the +// client's request for the SetRepositoryPolicy operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the SetRepositoryPolicy method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the SetRepositoryPolicyRequest method. +// req, resp := client.SetRepositoryPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ECR) SetRepositoryPolicyRequest(input *SetRepositoryPolicyInput) (req *request.Request, output *SetRepositoryPolicyOutput) { + op := &request.Operation{ + Name: opSetRepositoryPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SetRepositoryPolicyInput{} + } + + req = c.newRequest(op, input, output) + output = &SetRepositoryPolicyOutput{} + req.Data = output + return +} + +// Applies a repository policy on a specified repository to control access permissions. +func (c *ECR) SetRepositoryPolicy(input *SetRepositoryPolicyInput) (*SetRepositoryPolicyOutput, error) { + req, out := c.SetRepositoryPolicyRequest(input) + err := req.Send() + return out, err +} + +const opUploadLayerPart = "UploadLayerPart" + +// UploadLayerPartRequest generates a "aws/request.Request" representing the +// client's request for the UploadLayerPart operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UploadLayerPart method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UploadLayerPartRequest method. +// req, resp := client.UploadLayerPartRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ECR) UploadLayerPartRequest(input *UploadLayerPartInput) (req *request.Request, output *UploadLayerPartOutput) { + op := &request.Operation{ + Name: opUploadLayerPart, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UploadLayerPartInput{} + } + + req = c.newRequest(op, input, output) + output = &UploadLayerPartOutput{} + req.Data = output + return +} + +// Uploads an image layer part to Amazon ECR. +// +// This operation is used by the Amazon ECR proxy, and it is not intended +// for general use by customers. Use the docker CLI to pull, tag, and push images. +func (c *ECR) UploadLayerPart(input *UploadLayerPartInput) (*UploadLayerPartOutput, error) { + req, out := c.UploadLayerPartRequest(input) + err := req.Send() + return out, err +} + +// An object representing authorization data for an Amazon ECR registry. +type AuthorizationData struct { + _ struct{} `type:"structure"` + + // A base64-encoded string that contains authorization data for the specified + // Amazon ECR registry. When the string is decoded, it is presented in the format + // user:password for private registry authentication using docker login. + AuthorizationToken *string `locationName:"authorizationToken" type:"string"` + + // The Unix time in seconds and milliseconds when the authorization token expires. + // Authorization tokens are valid for 12 hours. + ExpiresAt *time.Time `locationName:"expiresAt" type:"timestamp" timestampFormat:"unix"` + + // The registry URL to use for this authorization token in a docker login command. + // The Amazon ECR registry URL format is https://aws_account_id.dkr.ecr.region.amazonaws.com. + // For example, https://012345678910.dkr.ecr.us-east-1.amazonaws.com.. + ProxyEndpoint *string `locationName:"proxyEndpoint" type:"string"` +} + +// String returns the string representation +func (s AuthorizationData) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AuthorizationData) GoString() string { + return s.String() +} + +type BatchCheckLayerAvailabilityInput struct { + _ struct{} `type:"structure"` + + // The digests of the image layers to check. + LayerDigests []*string `locationName:"layerDigests" min:"1" type:"list" required:"true"` + + // The AWS account ID associated with the registry that contains the image layers + // to check. If you do not specify a registry, the default registry is assumed. + RegistryId *string `locationName:"registryId" type:"string"` + + // The name of the repository that is associated with the image layers to check. + RepositoryName *string `locationName:"repositoryName" min:"2" type:"string" required:"true"` +} + +// String returns the string representation +func (s BatchCheckLayerAvailabilityInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchCheckLayerAvailabilityInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *BatchCheckLayerAvailabilityInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "BatchCheckLayerAvailabilityInput"} + if s.LayerDigests == nil { + invalidParams.Add(request.NewErrParamRequired("LayerDigests")) + } + if s.LayerDigests != nil && len(s.LayerDigests) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LayerDigests", 1)) + } + if s.RepositoryName == nil { + invalidParams.Add(request.NewErrParamRequired("RepositoryName")) + } + if s.RepositoryName != nil && len(*s.RepositoryName) < 2 { + invalidParams.Add(request.NewErrParamMinLen("RepositoryName", 2)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type BatchCheckLayerAvailabilityOutput struct { + _ struct{} `type:"structure"` + + // Any failures associated with the call. + Failures []*LayerFailure `locationName:"failures" type:"list"` + + // A list of image layer objects corresponding to the image layer references + // in the request. + Layers []*Layer `locationName:"layers" type:"list"` +} + +// String returns the string representation +func (s BatchCheckLayerAvailabilityOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchCheckLayerAvailabilityOutput) GoString() string { + return s.String() +} + +// Deletes specified images within a specified repository. Images are specified +// with either the imageTag or imageDigest. +type BatchDeleteImageInput struct { + _ struct{} `type:"structure"` + + // A list of image ID references that correspond to images to delete. The format + // of the imageIds reference is imageTag=tag or imageDigest=digest. + ImageIds []*ImageIdentifier `locationName:"imageIds" min:"1" type:"list" required:"true"` + + // The AWS account ID associated with the registry that contains the image to + // delete. If you do not specify a registry, the default registry is assumed. + RegistryId *string `locationName:"registryId" type:"string"` + + // The repository that contains the image to delete. + RepositoryName *string `locationName:"repositoryName" min:"2" type:"string" required:"true"` +} + +// String returns the string representation +func (s BatchDeleteImageInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchDeleteImageInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *BatchDeleteImageInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "BatchDeleteImageInput"} + if s.ImageIds == nil { + invalidParams.Add(request.NewErrParamRequired("ImageIds")) + } + if s.ImageIds != nil && len(s.ImageIds) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ImageIds", 1)) + } + if s.RepositoryName == nil { + invalidParams.Add(request.NewErrParamRequired("RepositoryName")) + } + if s.RepositoryName != nil && len(*s.RepositoryName) < 2 { + invalidParams.Add(request.NewErrParamMinLen("RepositoryName", 2)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type BatchDeleteImageOutput struct { + _ struct{} `type:"structure"` + + // Any failures associated with the call. + Failures []*ImageFailure `locationName:"failures" type:"list"` + + // The image IDs of the deleted images. + ImageIds []*ImageIdentifier `locationName:"imageIds" min:"1" type:"list"` +} + +// String returns the string representation +func (s BatchDeleteImageOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchDeleteImageOutput) GoString() string { + return s.String() +} + +type BatchGetImageInput struct { + _ struct{} `type:"structure"` + + // A list of image ID references that correspond to images to describe. The + // format of the imageIds reference is imageTag=tag or imageDigest=digest. + ImageIds []*ImageIdentifier `locationName:"imageIds" min:"1" type:"list" required:"true"` + + // The AWS account ID associated with the registry that contains the images + // to describe. If you do not specify a registry, the default registry is assumed. + RegistryId *string `locationName:"registryId" type:"string"` + + // The repository that contains the images to describe. + RepositoryName *string `locationName:"repositoryName" min:"2" type:"string" required:"true"` +} + +// String returns the string representation +func (s BatchGetImageInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchGetImageInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *BatchGetImageInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "BatchGetImageInput"} + if s.ImageIds == nil { + invalidParams.Add(request.NewErrParamRequired("ImageIds")) + } + if s.ImageIds != nil && len(s.ImageIds) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ImageIds", 1)) + } + if s.RepositoryName == nil { + invalidParams.Add(request.NewErrParamRequired("RepositoryName")) + } + if s.RepositoryName != nil && len(*s.RepositoryName) < 2 { + invalidParams.Add(request.NewErrParamMinLen("RepositoryName", 2)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type BatchGetImageOutput struct { + _ struct{} `type:"structure"` + + // Any failures associated with the call. + Failures []*ImageFailure `locationName:"failures" type:"list"` + + // A list of image objects corresponding to the image references in the request. + Images []*Image `locationName:"images" type:"list"` +} + +// String returns the string representation +func (s BatchGetImageOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchGetImageOutput) GoString() string { + return s.String() +} + +type CompleteLayerUploadInput struct { + _ struct{} `type:"structure"` + + // The sha256 digest of the image layer. + LayerDigests []*string `locationName:"layerDigests" min:"1" type:"list" required:"true"` + + // The AWS account ID associated with the registry to which to upload layers. + // If you do not specify a registry, the default registry is assumed. + RegistryId *string `locationName:"registryId" type:"string"` + + // The name of the repository to associate with the image layer. + RepositoryName *string `locationName:"repositoryName" min:"2" type:"string" required:"true"` + + // The upload ID from a previous InitiateLayerUpload operation to associate + // with the image layer. + UploadId *string `locationName:"uploadId" type:"string" required:"true"` +} + +// String returns the string representation +func (s CompleteLayerUploadInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CompleteLayerUploadInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CompleteLayerUploadInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CompleteLayerUploadInput"} + if s.LayerDigests == nil { + invalidParams.Add(request.NewErrParamRequired("LayerDigests")) + } + if s.LayerDigests != nil && len(s.LayerDigests) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LayerDigests", 1)) + } + if s.RepositoryName == nil { + invalidParams.Add(request.NewErrParamRequired("RepositoryName")) + } + if s.RepositoryName != nil && len(*s.RepositoryName) < 2 { + invalidParams.Add(request.NewErrParamMinLen("RepositoryName", 2)) + } + if s.UploadId == nil { + invalidParams.Add(request.NewErrParamRequired("UploadId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type CompleteLayerUploadOutput struct { + _ struct{} `type:"structure"` + + // The sha256 digest of the image layer. + LayerDigest *string `locationName:"layerDigest" type:"string"` + + // The registry ID associated with the request. + RegistryId *string `locationName:"registryId" type:"string"` + + // The repository name associated with the request. + RepositoryName *string `locationName:"repositoryName" min:"2" type:"string"` + + // The upload ID associated with the layer. + UploadId *string `locationName:"uploadId" type:"string"` +} + +// String returns the string representation +func (s CompleteLayerUploadOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CompleteLayerUploadOutput) GoString() string { + return s.String() +} + +type CreateRepositoryInput struct { + _ struct{} `type:"structure"` + + // The name to use for the repository. The repository name may be specified + // on its own (such as nginx-web-app) or it can be prepended with a namespace + // to group the repository into a category (such as project-a/nginx-web-app). + RepositoryName *string `locationName:"repositoryName" min:"2" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateRepositoryInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateRepositoryInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateRepositoryInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateRepositoryInput"} + if s.RepositoryName == nil { + invalidParams.Add(request.NewErrParamRequired("RepositoryName")) + } + if s.RepositoryName != nil && len(*s.RepositoryName) < 2 { + invalidParams.Add(request.NewErrParamMinLen("RepositoryName", 2)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type CreateRepositoryOutput struct { + _ struct{} `type:"structure"` + + // Object representing a repository. + Repository *Repository `locationName:"repository" type:"structure"` +} + +// String returns the string representation +func (s CreateRepositoryOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateRepositoryOutput) GoString() string { + return s.String() +} + +type DeleteRepositoryInput struct { + _ struct{} `type:"structure"` + + // Force the deletion of the repository if it contains images. + Force *bool `locationName:"force" type:"boolean"` + + // The AWS account ID associated with the registry that contains the repository + // to delete. If you do not specify a registry, the default registry is assumed. + RegistryId *string `locationName:"registryId" type:"string"` + + // The name of the repository to delete. + RepositoryName *string `locationName:"repositoryName" min:"2" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteRepositoryInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteRepositoryInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteRepositoryInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteRepositoryInput"} + if s.RepositoryName == nil { + invalidParams.Add(request.NewErrParamRequired("RepositoryName")) + } + if s.RepositoryName != nil && len(*s.RepositoryName) < 2 { + invalidParams.Add(request.NewErrParamMinLen("RepositoryName", 2)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteRepositoryOutput struct { + _ struct{} `type:"structure"` + + // Object representing a repository. + Repository *Repository `locationName:"repository" type:"structure"` +} + +// String returns the string representation +func (s DeleteRepositoryOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteRepositoryOutput) GoString() string { + return s.String() +} + +type DeleteRepositoryPolicyInput struct { + _ struct{} `type:"structure"` + + // The AWS account ID associated with the registry that contains the repository + // policy to delete. If you do not specify a registry, the default registry + // is assumed. + RegistryId *string `locationName:"registryId" type:"string"` + + // The name of the repository that is associated with the repository policy + // to delete. + RepositoryName *string `locationName:"repositoryName" min:"2" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteRepositoryPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteRepositoryPolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteRepositoryPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteRepositoryPolicyInput"} + if s.RepositoryName == nil { + invalidParams.Add(request.NewErrParamRequired("RepositoryName")) + } + if s.RepositoryName != nil && len(*s.RepositoryName) < 2 { + invalidParams.Add(request.NewErrParamMinLen("RepositoryName", 2)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteRepositoryPolicyOutput struct { + _ struct{} `type:"structure"` + + // The JSON repository policy that was deleted from the repository. + PolicyText *string `locationName:"policyText" type:"string"` + + // The registry ID associated with the request. + RegistryId *string `locationName:"registryId" type:"string"` + + // The repository name associated with the request. + RepositoryName *string `locationName:"repositoryName" min:"2" type:"string"` +} + +// String returns the string representation +func (s DeleteRepositoryPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteRepositoryPolicyOutput) GoString() string { + return s.String() +} + +type DescribeRepositoriesInput struct { + _ struct{} `type:"structure"` + + // The maximum number of repository results returned by DescribeRepositories + // in paginated output. When this parameter is used, DescribeRepositories only + // returns maxResults results in a single page along with a nextToken response + // element. The remaining results of the initial request can be seen by sending + // another DescribeRepositories request with the returned nextToken value. This + // value can be between 1 and 100. If this parameter is not used, then DescribeRepositories + // returns up to 100 results and a nextToken value, if applicable. + MaxResults *int64 `locationName:"maxResults" min:"1" type:"integer"` + + // The nextToken value returned from a previous paginated DescribeRepositories + // request where maxResults was used and the results exceeded the value of that + // parameter. Pagination continues from the end of the previous results that + // returned the nextToken value. This value is null when there are no more results + // to return. + NextToken *string `locationName:"nextToken" type:"string"` + + // The AWS account ID associated with the registry that contains the repositories + // to be described. If you do not specify a registry, the default registry is + // assumed. + RegistryId *string `locationName:"registryId" type:"string"` + + // A list of repositories to describe. If this parameter is omitted, then all + // repositories in a registry are described. + RepositoryNames []*string `locationName:"repositoryNames" min:"1" type:"list"` +} + +// String returns the string representation +func (s DescribeRepositoriesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeRepositoriesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeRepositoriesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeRepositoriesInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + if s.RepositoryNames != nil && len(s.RepositoryNames) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RepositoryNames", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DescribeRepositoriesOutput struct { + _ struct{} `type:"structure"` + + // The nextToken value to include in a future DescribeRepositories request. + // When the results of a DescribeRepositories request exceed maxResults, this + // value can be used to retrieve the next page of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` + + // A list of repository objects corresponding to valid repositories. + Repositories []*Repository `locationName:"repositories" type:"list"` +} + +// String returns the string representation +func (s DescribeRepositoriesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeRepositoriesOutput) GoString() string { + return s.String() +} + +type GetAuthorizationTokenInput struct { + _ struct{} `type:"structure"` + + // A list of AWS account IDs that are associated with the registries for which + // to get authorization tokens. If you do not specify a registry, the default + // registry is assumed. + RegistryIds []*string `locationName:"registryIds" min:"1" type:"list"` +} + +// String returns the string representation +func (s GetAuthorizationTokenInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetAuthorizationTokenInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetAuthorizationTokenInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetAuthorizationTokenInput"} + if s.RegistryIds != nil && len(s.RegistryIds) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RegistryIds", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type GetAuthorizationTokenOutput struct { + _ struct{} `type:"structure"` + + // A list of authorization token data objects that correspond to the registryIds + // values in the request. + AuthorizationData []*AuthorizationData `locationName:"authorizationData" type:"list"` +} + +// String returns the string representation +func (s GetAuthorizationTokenOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetAuthorizationTokenOutput) GoString() string { + return s.String() +} + +type GetDownloadUrlForLayerInput struct { + _ struct{} `type:"structure"` + + // The digest of the image layer to download. + LayerDigest *string `locationName:"layerDigest" type:"string" required:"true"` + + // The AWS account ID associated with the registry that contains the image layer + // to download. If you do not specify a registry, the default registry is assumed. + RegistryId *string `locationName:"registryId" type:"string"` + + // The name of the repository that is associated with the image layer to download. + RepositoryName *string `locationName:"repositoryName" min:"2" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetDownloadUrlForLayerInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetDownloadUrlForLayerInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetDownloadUrlForLayerInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetDownloadUrlForLayerInput"} + if s.LayerDigest == nil { + invalidParams.Add(request.NewErrParamRequired("LayerDigest")) + } + if s.RepositoryName == nil { + invalidParams.Add(request.NewErrParamRequired("RepositoryName")) + } + if s.RepositoryName != nil && len(*s.RepositoryName) < 2 { + invalidParams.Add(request.NewErrParamMinLen("RepositoryName", 2)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type GetDownloadUrlForLayerOutput struct { + _ struct{} `type:"structure"` + + // The pre-signed Amazon S3 download URL for the requested layer. + DownloadUrl *string `locationName:"downloadUrl" type:"string"` + + // The digest of the image layer to download. + LayerDigest *string `locationName:"layerDigest" type:"string"` +} + +// String returns the string representation +func (s GetDownloadUrlForLayerOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetDownloadUrlForLayerOutput) GoString() string { + return s.String() +} + +type GetRepositoryPolicyInput struct { + _ struct{} `type:"structure"` + + // The AWS account ID associated with the registry that contains the repository. + // If you do not specify a registry, the default registry is assumed. + RegistryId *string `locationName:"registryId" type:"string"` + + // The name of the repository whose policy you want to retrieve. + RepositoryName *string `locationName:"repositoryName" min:"2" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetRepositoryPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetRepositoryPolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetRepositoryPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetRepositoryPolicyInput"} + if s.RepositoryName == nil { + invalidParams.Add(request.NewErrParamRequired("RepositoryName")) + } + if s.RepositoryName != nil && len(*s.RepositoryName) < 2 { + invalidParams.Add(request.NewErrParamMinLen("RepositoryName", 2)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type GetRepositoryPolicyOutput struct { + _ struct{} `type:"structure"` + + // The JSON repository policy text associated with the repository. + PolicyText *string `locationName:"policyText" type:"string"` + + // The registry ID associated with the request. + RegistryId *string `locationName:"registryId" type:"string"` + + // The repository name associated with the request. + RepositoryName *string `locationName:"repositoryName" min:"2" type:"string"` +} + +// String returns the string representation +func (s GetRepositoryPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetRepositoryPolicyOutput) GoString() string { + return s.String() +} + +// Object representing an image. +type Image struct { + _ struct{} `type:"structure"` + + // An object containing the image tag and image digest associated with an image. + ImageId *ImageIdentifier `locationName:"imageId" type:"structure"` + + // The image manifest associated with the image. + ImageManifest *string `locationName:"imageManifest" type:"string"` + + // The AWS account ID associated with the registry containing the image. + RegistryId *string `locationName:"registryId" type:"string"` + + // The name of the repository associated with the image. + RepositoryName *string `locationName:"repositoryName" min:"2" type:"string"` +} + +// String returns the string representation +func (s Image) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Image) GoString() string { + return s.String() +} + +type ImageFailure struct { + _ struct{} `type:"structure"` + + // The code associated with the failure. + FailureCode *string `locationName:"failureCode" type:"string" enum:"ImageFailureCode"` + + // The reason for the failure. + FailureReason *string `locationName:"failureReason" type:"string"` + + // The image ID associated with the failure. + ImageId *ImageIdentifier `locationName:"imageId" type:"structure"` +} + +// String returns the string representation +func (s ImageFailure) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ImageFailure) GoString() string { + return s.String() +} + +type ImageIdentifier struct { + _ struct{} `type:"structure"` + + // The sha256 digest of the image manifest. + ImageDigest *string `locationName:"imageDigest" type:"string"` + + // The tag used for the image. + ImageTag *string `locationName:"imageTag" type:"string"` +} + +// String returns the string representation +func (s ImageIdentifier) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ImageIdentifier) GoString() string { + return s.String() +} + +type InitiateLayerUploadInput struct { + _ struct{} `type:"structure"` + + // The AWS account ID associated with the registry that you intend to upload + // layers to. If you do not specify a registry, the default registry is assumed. + RegistryId *string `locationName:"registryId" type:"string"` + + // The name of the repository that you intend to upload layers to. + RepositoryName *string `locationName:"repositoryName" min:"2" type:"string" required:"true"` +} + +// String returns the string representation +func (s InitiateLayerUploadInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InitiateLayerUploadInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *InitiateLayerUploadInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "InitiateLayerUploadInput"} + if s.RepositoryName == nil { + invalidParams.Add(request.NewErrParamRequired("RepositoryName")) + } + if s.RepositoryName != nil && len(*s.RepositoryName) < 2 { + invalidParams.Add(request.NewErrParamMinLen("RepositoryName", 2)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type InitiateLayerUploadOutput struct { + _ struct{} `type:"structure"` + + // The size, in bytes, that Amazon ECR expects future layer part uploads to + // be. + PartSize *int64 `locationName:"partSize" type:"long"` + + // The upload ID for the layer upload. This parameter is passed to further UploadLayerPart + // and CompleteLayerUpload operations. + UploadId *string `locationName:"uploadId" type:"string"` +} + +// String returns the string representation +func (s InitiateLayerUploadOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InitiateLayerUploadOutput) GoString() string { + return s.String() +} + +type Layer struct { + _ struct{} `type:"structure"` + + // The availability status of the image layer. Valid values are AVAILABLE and + // UNAVAILABLE. + LayerAvailability *string `locationName:"layerAvailability" type:"string" enum:"LayerAvailability"` + + // The sha256 digest of the image layer. + LayerDigest *string `locationName:"layerDigest" type:"string"` + + // The size, in bytes, of the image layer. + LayerSize *int64 `locationName:"layerSize" type:"long"` +} + +// String returns the string representation +func (s Layer) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Layer) GoString() string { + return s.String() +} + +type LayerFailure struct { + _ struct{} `type:"structure"` + + // The failure code associated with the failure. + FailureCode *string `locationName:"failureCode" type:"string" enum:"LayerFailureCode"` + + // The reason for the failure. + FailureReason *string `locationName:"failureReason" type:"string"` + + // The layer digest associated with the failure. + LayerDigest *string `locationName:"layerDigest" type:"string"` +} + +// String returns the string representation +func (s LayerFailure) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LayerFailure) GoString() string { + return s.String() +} + +type ListImagesInput struct { + _ struct{} `type:"structure"` + + // The maximum number of image results returned by ListImages in paginated output. + // When this parameter is used, ListImages only returns maxResults results in + // a single page along with a nextToken response element. The remaining results + // of the initial request can be seen by sending another ListImages request + // with the returned nextToken value. This value can be between 1 and 100. If + // this parameter is not used, then ListImages returns up to 100 results and + // a nextToken value, if applicable. + MaxResults *int64 `locationName:"maxResults" min:"1" type:"integer"` + + // The nextToken value returned from a previous paginated ListImages request + // where maxResults was used and the results exceeded the value of that parameter. + // Pagination continues from the end of the previous results that returned the + // nextToken value. This value is null when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` + + // The AWS account ID associated with the registry that contains the repository + // to list images in. If you do not specify a registry, the default registry + // is assumed. + RegistryId *string `locationName:"registryId" type:"string"` + + // The repository whose image IDs are to be listed. + RepositoryName *string `locationName:"repositoryName" min:"2" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListImagesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListImagesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListImagesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListImagesInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + if s.RepositoryName == nil { + invalidParams.Add(request.NewErrParamRequired("RepositoryName")) + } + if s.RepositoryName != nil && len(*s.RepositoryName) < 2 { + invalidParams.Add(request.NewErrParamMinLen("RepositoryName", 2)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type ListImagesOutput struct { + _ struct{} `type:"structure"` + + // The list of image IDs for the requested repository. + ImageIds []*ImageIdentifier `locationName:"imageIds" min:"1" type:"list"` + + // The nextToken value to include in a future ListImages request. When the results + // of a ListImages request exceed maxResults, this value can be used to retrieve + // the next page of results. This value is null when there are no more results + // to return. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s ListImagesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListImagesOutput) GoString() string { + return s.String() +} + +type PutImageInput struct { + _ struct{} `type:"structure"` + + // The image manifest corresponding to the image to be uploaded. + ImageManifest *string `locationName:"imageManifest" type:"string" required:"true"` + + // The AWS account ID associated with the registry that contains the repository + // in which to put the image. If you do not specify a registry, the default + // registry is assumed. + RegistryId *string `locationName:"registryId" type:"string"` + + // The name of the repository in which to put the image. + RepositoryName *string `locationName:"repositoryName" min:"2" type:"string" required:"true"` +} + +// String returns the string representation +func (s PutImageInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutImageInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutImageInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutImageInput"} + if s.ImageManifest == nil { + invalidParams.Add(request.NewErrParamRequired("ImageManifest")) + } + if s.RepositoryName == nil { + invalidParams.Add(request.NewErrParamRequired("RepositoryName")) + } + if s.RepositoryName != nil && len(*s.RepositoryName) < 2 { + invalidParams.Add(request.NewErrParamMinLen("RepositoryName", 2)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type PutImageOutput struct { + _ struct{} `type:"structure"` + + // Details of the image uploaded. + Image *Image `locationName:"image" type:"structure"` +} + +// String returns the string representation +func (s PutImageOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutImageOutput) GoString() string { + return s.String() +} + +// Object representing a repository. +type Repository struct { + _ struct{} `type:"structure"` + + // The AWS account ID associated with the registry that contains the repository. + RegistryId *string `locationName:"registryId" type:"string"` + + // The Amazon Resource Name (ARN) that identifies the repository. The ARN contains + // the arn:aws:ecr namespace, followed by the region of the repository, the + // AWS account ID of the repository owner, the repository namespace, and then + // the repository name. For example, arn:aws:ecr:region:012345678910:repository/test. + RepositoryArn *string `locationName:"repositoryArn" type:"string"` + + // The name of the repository. + RepositoryName *string `locationName:"repositoryName" min:"2" type:"string"` + + // The URI for the repository. You can use this URI for Docker push and pull + // operations. + RepositoryUri *string `locationName:"repositoryUri" type:"string"` +} + +// String returns the string representation +func (s Repository) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Repository) GoString() string { + return s.String() +} + +type SetRepositoryPolicyInput struct { + _ struct{} `type:"structure"` + + // If the policy you are attempting to set on a repository policy would prevent + // you from setting another policy in the future, you must force the SetRepositoryPolicy + // operation. This is intended to prevent accidental repository lock outs. + Force *bool `locationName:"force" type:"boolean"` + + // The JSON repository policy text to apply to the repository. + PolicyText *string `locationName:"policyText" type:"string" required:"true"` + + // The AWS account ID associated with the registry that contains the repository. + // If you do not specify a registry, the default registry is assumed. + RegistryId *string `locationName:"registryId" type:"string"` + + // The name of the repository to receive the policy. + RepositoryName *string `locationName:"repositoryName" min:"2" type:"string" required:"true"` +} + +// String returns the string representation +func (s SetRepositoryPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetRepositoryPolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SetRepositoryPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SetRepositoryPolicyInput"} + if s.PolicyText == nil { + invalidParams.Add(request.NewErrParamRequired("PolicyText")) + } + if s.RepositoryName == nil { + invalidParams.Add(request.NewErrParamRequired("RepositoryName")) + } + if s.RepositoryName != nil && len(*s.RepositoryName) < 2 { + invalidParams.Add(request.NewErrParamMinLen("RepositoryName", 2)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type SetRepositoryPolicyOutput struct { + _ struct{} `type:"structure"` + + // The JSON repository policy text applied to the repository. + PolicyText *string `locationName:"policyText" type:"string"` + + // The registry ID associated with the request. + RegistryId *string `locationName:"registryId" type:"string"` + + // The repository name associated with the request. + RepositoryName *string `locationName:"repositoryName" min:"2" type:"string"` +} + +// String returns the string representation +func (s SetRepositoryPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetRepositoryPolicyOutput) GoString() string { + return s.String() +} + +type UploadLayerPartInput struct { + _ struct{} `type:"structure"` + + // The base64-encoded layer part payload. + // + // LayerPartBlob is automatically base64 encoded/decoded by the SDK. + LayerPartBlob []byte `locationName:"layerPartBlob" type:"blob" required:"true"` + + // The integer value of the first byte of the layer part. + PartFirstByte *int64 `locationName:"partFirstByte" type:"long" required:"true"` + + // The integer value of the last byte of the layer part. + PartLastByte *int64 `locationName:"partLastByte" type:"long" required:"true"` + + // The AWS account ID associated with the registry that you are uploading layer + // parts to. If you do not specify a registry, the default registry is assumed. + RegistryId *string `locationName:"registryId" type:"string"` + + // The name of the repository that you are uploading layer parts to. + RepositoryName *string `locationName:"repositoryName" min:"2" type:"string" required:"true"` + + // The upload ID from a previous InitiateLayerUpload operation to associate + // with the layer part upload. + UploadId *string `locationName:"uploadId" type:"string" required:"true"` +} + +// String returns the string representation +func (s UploadLayerPartInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UploadLayerPartInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UploadLayerPartInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UploadLayerPartInput"} + if s.LayerPartBlob == nil { + invalidParams.Add(request.NewErrParamRequired("LayerPartBlob")) + } + if s.PartFirstByte == nil { + invalidParams.Add(request.NewErrParamRequired("PartFirstByte")) + } + if s.PartLastByte == nil { + invalidParams.Add(request.NewErrParamRequired("PartLastByte")) + } + if s.RepositoryName == nil { + invalidParams.Add(request.NewErrParamRequired("RepositoryName")) + } + if s.RepositoryName != nil && len(*s.RepositoryName) < 2 { + invalidParams.Add(request.NewErrParamMinLen("RepositoryName", 2)) + } + if s.UploadId == nil { + invalidParams.Add(request.NewErrParamRequired("UploadId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type UploadLayerPartOutput struct { + _ struct{} `type:"structure"` + + // The integer value of the last byte received in the request. + LastByteReceived *int64 `locationName:"lastByteReceived" type:"long"` + + // The registry ID associated with the request. + RegistryId *string `locationName:"registryId" type:"string"` + + // The repository name associated with the request. + RepositoryName *string `locationName:"repositoryName" min:"2" type:"string"` + + // The upload ID associated with the request. + UploadId *string `locationName:"uploadId" type:"string"` +} + +// String returns the string representation +func (s UploadLayerPartOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UploadLayerPartOutput) GoString() string { + return s.String() +} + +const ( + // @enum ImageFailureCode + ImageFailureCodeInvalidImageDigest = "InvalidImageDigest" + // @enum ImageFailureCode + ImageFailureCodeInvalidImageTag = "InvalidImageTag" + // @enum ImageFailureCode + ImageFailureCodeImageTagDoesNotMatchDigest = "ImageTagDoesNotMatchDigest" + // @enum ImageFailureCode + ImageFailureCodeImageNotFound = "ImageNotFound" + // @enum ImageFailureCode + ImageFailureCodeMissingDigestAndTag = "MissingDigestAndTag" +) + +const ( + // @enum LayerAvailability + LayerAvailabilityAvailable = "AVAILABLE" + // @enum LayerAvailability + LayerAvailabilityUnavailable = "UNAVAILABLE" +) + +const ( + // @enum LayerFailureCode + LayerFailureCodeInvalidLayerDigest = "InvalidLayerDigest" + // @enum LayerFailureCode + LayerFailureCodeMissingLayerDigest = "MissingLayerDigest" +) diff --git a/vendor/github.com/aws/aws-sdk-go/service/ecr/ecriface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/ecr/ecriface/interface.go new file mode 100644 index 000000000..f98fda647 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/ecr/ecriface/interface.go @@ -0,0 +1,78 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package ecriface provides an interface for the Amazon EC2 Container Registry. +package ecriface + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/ecr" +) + +// ECRAPI is the interface type for ecr.ECR. +type ECRAPI interface { + BatchCheckLayerAvailabilityRequest(*ecr.BatchCheckLayerAvailabilityInput) (*request.Request, *ecr.BatchCheckLayerAvailabilityOutput) + + BatchCheckLayerAvailability(*ecr.BatchCheckLayerAvailabilityInput) (*ecr.BatchCheckLayerAvailabilityOutput, error) + + BatchDeleteImageRequest(*ecr.BatchDeleteImageInput) (*request.Request, *ecr.BatchDeleteImageOutput) + + BatchDeleteImage(*ecr.BatchDeleteImageInput) (*ecr.BatchDeleteImageOutput, error) + + BatchGetImageRequest(*ecr.BatchGetImageInput) (*request.Request, *ecr.BatchGetImageOutput) + + BatchGetImage(*ecr.BatchGetImageInput) (*ecr.BatchGetImageOutput, error) + + CompleteLayerUploadRequest(*ecr.CompleteLayerUploadInput) (*request.Request, *ecr.CompleteLayerUploadOutput) + + CompleteLayerUpload(*ecr.CompleteLayerUploadInput) (*ecr.CompleteLayerUploadOutput, error) + + CreateRepositoryRequest(*ecr.CreateRepositoryInput) (*request.Request, *ecr.CreateRepositoryOutput) + + CreateRepository(*ecr.CreateRepositoryInput) (*ecr.CreateRepositoryOutput, error) + + DeleteRepositoryRequest(*ecr.DeleteRepositoryInput) (*request.Request, *ecr.DeleteRepositoryOutput) + + DeleteRepository(*ecr.DeleteRepositoryInput) (*ecr.DeleteRepositoryOutput, error) + + DeleteRepositoryPolicyRequest(*ecr.DeleteRepositoryPolicyInput) (*request.Request, *ecr.DeleteRepositoryPolicyOutput) + + DeleteRepositoryPolicy(*ecr.DeleteRepositoryPolicyInput) (*ecr.DeleteRepositoryPolicyOutput, error) + + DescribeRepositoriesRequest(*ecr.DescribeRepositoriesInput) (*request.Request, *ecr.DescribeRepositoriesOutput) + + DescribeRepositories(*ecr.DescribeRepositoriesInput) (*ecr.DescribeRepositoriesOutput, error) + + GetAuthorizationTokenRequest(*ecr.GetAuthorizationTokenInput) (*request.Request, *ecr.GetAuthorizationTokenOutput) + + GetAuthorizationToken(*ecr.GetAuthorizationTokenInput) (*ecr.GetAuthorizationTokenOutput, error) + + GetDownloadUrlForLayerRequest(*ecr.GetDownloadUrlForLayerInput) (*request.Request, *ecr.GetDownloadUrlForLayerOutput) + + GetDownloadUrlForLayer(*ecr.GetDownloadUrlForLayerInput) (*ecr.GetDownloadUrlForLayerOutput, error) + + GetRepositoryPolicyRequest(*ecr.GetRepositoryPolicyInput) (*request.Request, *ecr.GetRepositoryPolicyOutput) + + GetRepositoryPolicy(*ecr.GetRepositoryPolicyInput) (*ecr.GetRepositoryPolicyOutput, error) + + InitiateLayerUploadRequest(*ecr.InitiateLayerUploadInput) (*request.Request, *ecr.InitiateLayerUploadOutput) + + InitiateLayerUpload(*ecr.InitiateLayerUploadInput) (*ecr.InitiateLayerUploadOutput, error) + + ListImagesRequest(*ecr.ListImagesInput) (*request.Request, *ecr.ListImagesOutput) + + ListImages(*ecr.ListImagesInput) (*ecr.ListImagesOutput, error) + + PutImageRequest(*ecr.PutImageInput) (*request.Request, *ecr.PutImageOutput) + + PutImage(*ecr.PutImageInput) (*ecr.PutImageOutput, error) + + SetRepositoryPolicyRequest(*ecr.SetRepositoryPolicyInput) (*request.Request, *ecr.SetRepositoryPolicyOutput) + + SetRepositoryPolicy(*ecr.SetRepositoryPolicyInput) (*ecr.SetRepositoryPolicyOutput, error) + + UploadLayerPartRequest(*ecr.UploadLayerPartInput) (*request.Request, *ecr.UploadLayerPartOutput) + + UploadLayerPart(*ecr.UploadLayerPartInput) (*ecr.UploadLayerPartOutput, error) +} + +var _ ECRAPI = (*ecr.ECR)(nil) diff --git a/vendor/github.com/aws/aws-sdk-go/service/ecr/examples_test.go b/vendor/github.com/aws/aws-sdk-go/service/ecr/examples_test.go new file mode 100644 index 000000000..95e088bae --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/ecr/examples_test.go @@ -0,0 +1,376 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package ecr_test + +import ( + "bytes" + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/ecr" +) + +var _ time.Duration +var _ bytes.Buffer + +func ExampleECR_BatchCheckLayerAvailability() { + svc := ecr.New(session.New()) + + params := &ecr.BatchCheckLayerAvailabilityInput{ + LayerDigests: []*string{ // Required + aws.String("BatchedOperationLayerDigest"), // Required + // More values... + }, + RepositoryName: aws.String("RepositoryName"), // Required + RegistryId: aws.String("RegistryId"), + } + resp, err := svc.BatchCheckLayerAvailability(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleECR_BatchDeleteImage() { + svc := ecr.New(session.New()) + + params := &ecr.BatchDeleteImageInput{ + ImageIds: []*ecr.ImageIdentifier{ // Required + { // Required + ImageDigest: aws.String("ImageDigest"), + ImageTag: aws.String("ImageTag"), + }, + // More values... + }, + RepositoryName: aws.String("RepositoryName"), // Required + RegistryId: aws.String("RegistryId"), + } + resp, err := svc.BatchDeleteImage(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleECR_BatchGetImage() { + svc := ecr.New(session.New()) + + params := &ecr.BatchGetImageInput{ + ImageIds: []*ecr.ImageIdentifier{ // Required + { // Required + ImageDigest: aws.String("ImageDigest"), + ImageTag: aws.String("ImageTag"), + }, + // More values... + }, + RepositoryName: aws.String("RepositoryName"), // Required + RegistryId: aws.String("RegistryId"), + } + resp, err := svc.BatchGetImage(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleECR_CompleteLayerUpload() { + svc := ecr.New(session.New()) + + params := &ecr.CompleteLayerUploadInput{ + LayerDigests: []*string{ // Required + aws.String("LayerDigest"), // Required + // More values... + }, + RepositoryName: aws.String("RepositoryName"), // Required + UploadId: aws.String("UploadId"), // Required + RegistryId: aws.String("RegistryId"), + } + resp, err := svc.CompleteLayerUpload(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleECR_CreateRepository() { + svc := ecr.New(session.New()) + + params := &ecr.CreateRepositoryInput{ + RepositoryName: aws.String("RepositoryName"), // Required + } + resp, err := svc.CreateRepository(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleECR_DeleteRepository() { + svc := ecr.New(session.New()) + + params := &ecr.DeleteRepositoryInput{ + RepositoryName: aws.String("RepositoryName"), // Required + Force: aws.Bool(true), + RegistryId: aws.String("RegistryId"), + } + resp, err := svc.DeleteRepository(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleECR_DeleteRepositoryPolicy() { + svc := ecr.New(session.New()) + + params := &ecr.DeleteRepositoryPolicyInput{ + RepositoryName: aws.String("RepositoryName"), // Required + RegistryId: aws.String("RegistryId"), + } + resp, err := svc.DeleteRepositoryPolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleECR_DescribeRepositories() { + svc := ecr.New(session.New()) + + params := &ecr.DescribeRepositoriesInput{ + MaxResults: aws.Int64(1), + NextToken: aws.String("NextToken"), + RegistryId: aws.String("RegistryId"), + RepositoryNames: []*string{ + aws.String("RepositoryName"), // Required + // More values... + }, + } + resp, err := svc.DescribeRepositories(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleECR_GetAuthorizationToken() { + svc := ecr.New(session.New()) + + params := &ecr.GetAuthorizationTokenInput{ + RegistryIds: []*string{ + aws.String("RegistryId"), // Required + // More values... + }, + } + resp, err := svc.GetAuthorizationToken(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleECR_GetDownloadUrlForLayer() { + svc := ecr.New(session.New()) + + params := &ecr.GetDownloadUrlForLayerInput{ + LayerDigest: aws.String("LayerDigest"), // Required + RepositoryName: aws.String("RepositoryName"), // Required + RegistryId: aws.String("RegistryId"), + } + resp, err := svc.GetDownloadUrlForLayer(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleECR_GetRepositoryPolicy() { + svc := ecr.New(session.New()) + + params := &ecr.GetRepositoryPolicyInput{ + RepositoryName: aws.String("RepositoryName"), // Required + RegistryId: aws.String("RegistryId"), + } + resp, err := svc.GetRepositoryPolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleECR_InitiateLayerUpload() { + svc := ecr.New(session.New()) + + params := &ecr.InitiateLayerUploadInput{ + RepositoryName: aws.String("RepositoryName"), // Required + RegistryId: aws.String("RegistryId"), + } + resp, err := svc.InitiateLayerUpload(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleECR_ListImages() { + svc := ecr.New(session.New()) + + params := &ecr.ListImagesInput{ + RepositoryName: aws.String("RepositoryName"), // Required + MaxResults: aws.Int64(1), + NextToken: aws.String("NextToken"), + RegistryId: aws.String("RegistryId"), + } + resp, err := svc.ListImages(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleECR_PutImage() { + svc := ecr.New(session.New()) + + params := &ecr.PutImageInput{ + ImageManifest: aws.String("ImageManifest"), // Required + RepositoryName: aws.String("RepositoryName"), // Required + RegistryId: aws.String("RegistryId"), + } + resp, err := svc.PutImage(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleECR_SetRepositoryPolicy() { + svc := ecr.New(session.New()) + + params := &ecr.SetRepositoryPolicyInput{ + PolicyText: aws.String("RepositoryPolicyText"), // Required + RepositoryName: aws.String("RepositoryName"), // Required + Force: aws.Bool(true), + RegistryId: aws.String("RegistryId"), + } + resp, err := svc.SetRepositoryPolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleECR_UploadLayerPart() { + svc := ecr.New(session.New()) + + params := &ecr.UploadLayerPartInput{ + LayerPartBlob: []byte("PAYLOAD"), // Required + PartFirstByte: aws.Int64(1), // Required + PartLastByte: aws.Int64(1), // Required + RepositoryName: aws.String("RepositoryName"), // Required + UploadId: aws.String("UploadId"), // Required + RegistryId: aws.String("RegistryId"), + } + resp, err := svc.UploadLayerPart(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/ecr/service.go b/vendor/github.com/aws/aws-sdk-go/service/ecr/service.go new file mode 100644 index 000000000..53e3c57e3 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/ecr/service.go @@ -0,0 +1,93 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package ecr + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" +) + +// Amazon EC2 Container Registry (Amazon ECR) is a managed AWS Docker registry +// service. Customers can use the familiar Docker CLI to push, pull, and manage +// images. Amazon ECR provides a secure, scalable, and reliable registry. Amazon +// ECR supports private Docker repositories with resource-based permissions +// using AWS IAM so that specific users or Amazon EC2 instances can access repositories +// and images. Developers can use the Docker CLI to author and manage images. +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type ECR struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "ecr" + +// New creates a new instance of the ECR client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a ECR client from just a session. +// svc := ecr.New(mySession) +// +// // Create a ECR client with additional configuration +// svc := ecr.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *ECR { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *ECR { + svc := &ECR{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2015-09-21", + JSONVersion: "1.1", + TargetPrefix: "AmazonEC2ContainerRegistry_V20150921", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(jsonrpc.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a ECR operation and runs any +// custom request initialization. +func (c *ECR) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/ecs/api.go b/vendor/github.com/aws/aws-sdk-go/service/ecs/api.go new file mode 100644 index 000000000..9ca22f2bd --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/ecs/api.go @@ -0,0 +1,4742 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package ecs provides a client for Amazon EC2 Container Service. +package ecs + +import ( + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" +) + +const opCreateCluster = "CreateCluster" + +// CreateClusterRequest generates a "aws/request.Request" representing the +// client's request for the CreateCluster operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateCluster method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateClusterRequest method. +// req, resp := client.CreateClusterRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ECS) CreateClusterRequest(input *CreateClusterInput) (req *request.Request, output *CreateClusterOutput) { + op := &request.Operation{ + Name: opCreateCluster, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateClusterInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateClusterOutput{} + req.Data = output + return +} + +// Creates a new Amazon ECS cluster. By default, your account receives a default +// cluster when you launch your first container instance. However, you can create +// your own cluster with a unique name with the CreateCluster action. +func (c *ECS) CreateCluster(input *CreateClusterInput) (*CreateClusterOutput, error) { + req, out := c.CreateClusterRequest(input) + err := req.Send() + return out, err +} + +const opCreateService = "CreateService" + +// CreateServiceRequest generates a "aws/request.Request" representing the +// client's request for the CreateService operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateService method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateServiceRequest method. +// req, resp := client.CreateServiceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ECS) CreateServiceRequest(input *CreateServiceInput) (req *request.Request, output *CreateServiceOutput) { + op := &request.Operation{ + Name: opCreateService, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateServiceInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateServiceOutput{} + req.Data = output + return +} + +// Runs and maintains a desired number of tasks from a specified task definition. +// If the number of tasks running in a service drops below desiredCount, Amazon +// ECS spawns another instantiation of the task in the specified cluster. To +// update an existing service, see UpdateService. +// +// In addition to maintaining the desired count of tasks in your service, you +// can optionally run your service behind a load balancer. The load balancer +// distributes traffic across the tasks that are associated with the service. +// +// You can optionally specify a deployment configuration for your service. +// During a deployment (which is triggered by changing the task definition of +// a service with an UpdateService operation), the service scheduler uses the +// minimumHealthyPercent and maximumPercent parameters to determine the deployment +// strategy. +// +// If the minimumHealthyPercent is below 100%, the scheduler can ignore the +// desiredCount temporarily during a deployment. For example, if your service +// has a desiredCount of four tasks, a minimumHealthyPercent of 50% allows the +// scheduler to stop two existing tasks before starting two new tasks. Tasks +// for services that do not use a load balancer are considered healthy if they +// are in the RUNNING state; tasks for services that do use a load balancer +// are considered healthy if they are in the RUNNING state and the container +// instance it is hosted on is reported as healthy by the load balancer. The +// default value for minimumHealthyPercent is 50% in the console and 100% for +// the AWS CLI, the AWS SDKs, and the APIs. +// +// The maximumPercent parameter represents an upper limit on the number of +// running tasks during a deployment, which enables you to define the deployment +// batch size. For example, if your service has a desiredCount of four tasks, +// a maximumPercent value of 200% starts four new tasks before stopping the +// four older tasks (provided that the cluster resources required to do this +// are available). The default value for maximumPercent is 200%. +// +// When the service scheduler launches new tasks, it attempts to balance them +// across the Availability Zones in your cluster with the following logic: +// +// Determine which of the container instances in your cluster can support +// your service's task definition (for example, they have the required CPU, +// memory, ports, and container instance attributes). +// +// Sort the valid container instances by the fewest number of running tasks +// for this service in the same Availability Zone as the instance. For example, +// if zone A has one running service task and zones B and C each have zero, +// valid container instances in either zone B or C are considered optimal for +// placement. +// +// Place the new service task on a valid container instance in an optimal +// Availability Zone (based on the previous steps), favoring container instances +// with the fewest number of running tasks for this service. +func (c *ECS) CreateService(input *CreateServiceInput) (*CreateServiceOutput, error) { + req, out := c.CreateServiceRequest(input) + err := req.Send() + return out, err +} + +const opDeleteCluster = "DeleteCluster" + +// DeleteClusterRequest generates a "aws/request.Request" representing the +// client's request for the DeleteCluster operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteCluster method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteClusterRequest method. +// req, resp := client.DeleteClusterRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ECS) DeleteClusterRequest(input *DeleteClusterInput) (req *request.Request, output *DeleteClusterOutput) { + op := &request.Operation{ + Name: opDeleteCluster, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteClusterInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteClusterOutput{} + req.Data = output + return +} + +// Deletes the specified cluster. You must deregister all container instances +// from this cluster before you may delete it. You can list the container instances +// in a cluster with ListContainerInstances and deregister them with DeregisterContainerInstance. +func (c *ECS) DeleteCluster(input *DeleteClusterInput) (*DeleteClusterOutput, error) { + req, out := c.DeleteClusterRequest(input) + err := req.Send() + return out, err +} + +const opDeleteService = "DeleteService" + +// DeleteServiceRequest generates a "aws/request.Request" representing the +// client's request for the DeleteService operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteService method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteServiceRequest method. +// req, resp := client.DeleteServiceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ECS) DeleteServiceRequest(input *DeleteServiceInput) (req *request.Request, output *DeleteServiceOutput) { + op := &request.Operation{ + Name: opDeleteService, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteServiceInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteServiceOutput{} + req.Data = output + return +} + +// Deletes a specified service within a cluster. You can delete a service if +// you have no running tasks in it and the desired task count is zero. If the +// service is actively maintaining tasks, you cannot delete it, and you must +// update the service to a desired task count of zero. For more information, +// see UpdateService. +// +// When you delete a service, if there are still running tasks that require +// cleanup, the service status moves from ACTIVE to DRAINING, and the service +// is no longer visible in the console or in ListServices API operations. After +// the tasks have stopped, then the service status moves from DRAINING to INACTIVE. +// Services in the DRAINING or INACTIVE status can still be viewed with DescribeServices +// API operations; however, in the future, INACTIVE services may be cleaned +// up and purged from Amazon ECS record keeping, and DescribeServices API operations +// on those services will return a ServiceNotFoundException error. +func (c *ECS) DeleteService(input *DeleteServiceInput) (*DeleteServiceOutput, error) { + req, out := c.DeleteServiceRequest(input) + err := req.Send() + return out, err +} + +const opDeregisterContainerInstance = "DeregisterContainerInstance" + +// DeregisterContainerInstanceRequest generates a "aws/request.Request" representing the +// client's request for the DeregisterContainerInstance operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeregisterContainerInstance method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeregisterContainerInstanceRequest method. +// req, resp := client.DeregisterContainerInstanceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ECS) DeregisterContainerInstanceRequest(input *DeregisterContainerInstanceInput) (req *request.Request, output *DeregisterContainerInstanceOutput) { + op := &request.Operation{ + Name: opDeregisterContainerInstance, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeregisterContainerInstanceInput{} + } + + req = c.newRequest(op, input, output) + output = &DeregisterContainerInstanceOutput{} + req.Data = output + return +} + +// Deregisters an Amazon ECS container instance from the specified cluster. +// This instance is no longer available to run tasks. +// +// If you intend to use the container instance for some other purpose after +// deregistration, you should stop all of the tasks running on the container +// instance before deregistration to avoid any orphaned tasks from consuming +// resources. +// +// Deregistering a container instance removes the instance from a cluster, +// but it does not terminate the EC2 instance; if you are finished using the +// instance, be sure to terminate it in the Amazon EC2 console to stop billing. +// +// If you terminate a running container instance with a connected Amazon ECS +// container agent, the agent automatically deregisters the instance from your +// cluster (stopped container instances or instances with disconnected agents +// are not automatically deregistered when terminated). +func (c *ECS) DeregisterContainerInstance(input *DeregisterContainerInstanceInput) (*DeregisterContainerInstanceOutput, error) { + req, out := c.DeregisterContainerInstanceRequest(input) + err := req.Send() + return out, err +} + +const opDeregisterTaskDefinition = "DeregisterTaskDefinition" + +// DeregisterTaskDefinitionRequest generates a "aws/request.Request" representing the +// client's request for the DeregisterTaskDefinition operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeregisterTaskDefinition method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeregisterTaskDefinitionRequest method. +// req, resp := client.DeregisterTaskDefinitionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ECS) DeregisterTaskDefinitionRequest(input *DeregisterTaskDefinitionInput) (req *request.Request, output *DeregisterTaskDefinitionOutput) { + op := &request.Operation{ + Name: opDeregisterTaskDefinition, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeregisterTaskDefinitionInput{} + } + + req = c.newRequest(op, input, output) + output = &DeregisterTaskDefinitionOutput{} + req.Data = output + return +} + +// Deregisters the specified task definition by family and revision. Upon deregistration, +// the task definition is marked as INACTIVE. Existing tasks and services that +// reference an INACTIVE task definition continue to run without disruption. +// Existing services that reference an INACTIVE task definition can still scale +// up or down by modifying the service's desired count. +// +// You cannot use an INACTIVE task definition to run new tasks or create new +// services, and you cannot update an existing service to reference an INACTIVE +// task definition (although there may be up to a 10 minute window following +// deregistration where these restrictions have not yet taken effect). +func (c *ECS) DeregisterTaskDefinition(input *DeregisterTaskDefinitionInput) (*DeregisterTaskDefinitionOutput, error) { + req, out := c.DeregisterTaskDefinitionRequest(input) + err := req.Send() + return out, err +} + +const opDescribeClusters = "DescribeClusters" + +// DescribeClustersRequest generates a "aws/request.Request" representing the +// client's request for the DescribeClusters operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeClusters method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeClustersRequest method. +// req, resp := client.DescribeClustersRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ECS) DescribeClustersRequest(input *DescribeClustersInput) (req *request.Request, output *DescribeClustersOutput) { + op := &request.Operation{ + Name: opDescribeClusters, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeClustersInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeClustersOutput{} + req.Data = output + return +} + +// Describes one or more of your clusters. +func (c *ECS) DescribeClusters(input *DescribeClustersInput) (*DescribeClustersOutput, error) { + req, out := c.DescribeClustersRequest(input) + err := req.Send() + return out, err +} + +const opDescribeContainerInstances = "DescribeContainerInstances" + +// DescribeContainerInstancesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeContainerInstances operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeContainerInstances method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeContainerInstancesRequest method. +// req, resp := client.DescribeContainerInstancesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ECS) DescribeContainerInstancesRequest(input *DescribeContainerInstancesInput) (req *request.Request, output *DescribeContainerInstancesOutput) { + op := &request.Operation{ + Name: opDescribeContainerInstances, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeContainerInstancesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeContainerInstancesOutput{} + req.Data = output + return +} + +// Describes Amazon EC2 Container Service container instances. Returns metadata +// about registered and remaining resources on each container instance requested. +func (c *ECS) DescribeContainerInstances(input *DescribeContainerInstancesInput) (*DescribeContainerInstancesOutput, error) { + req, out := c.DescribeContainerInstancesRequest(input) + err := req.Send() + return out, err +} + +const opDescribeServices = "DescribeServices" + +// DescribeServicesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeServices operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeServices method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeServicesRequest method. +// req, resp := client.DescribeServicesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ECS) DescribeServicesRequest(input *DescribeServicesInput) (req *request.Request, output *DescribeServicesOutput) { + op := &request.Operation{ + Name: opDescribeServices, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeServicesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeServicesOutput{} + req.Data = output + return +} + +// Describes the specified services running in your cluster. +func (c *ECS) DescribeServices(input *DescribeServicesInput) (*DescribeServicesOutput, error) { + req, out := c.DescribeServicesRequest(input) + err := req.Send() + return out, err +} + +const opDescribeTaskDefinition = "DescribeTaskDefinition" + +// DescribeTaskDefinitionRequest generates a "aws/request.Request" representing the +// client's request for the DescribeTaskDefinition operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeTaskDefinition method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeTaskDefinitionRequest method. +// req, resp := client.DescribeTaskDefinitionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ECS) DescribeTaskDefinitionRequest(input *DescribeTaskDefinitionInput) (req *request.Request, output *DescribeTaskDefinitionOutput) { + op := &request.Operation{ + Name: opDescribeTaskDefinition, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeTaskDefinitionInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeTaskDefinitionOutput{} + req.Data = output + return +} + +// Describes a task definition. You can specify a family and revision to find +// information about a specific task definition, or you can simply specify the +// family to find the latest ACTIVE revision in that family. +// +// You can only describe INACTIVE task definitions while an active task or +// service references them. +func (c *ECS) DescribeTaskDefinition(input *DescribeTaskDefinitionInput) (*DescribeTaskDefinitionOutput, error) { + req, out := c.DescribeTaskDefinitionRequest(input) + err := req.Send() + return out, err +} + +const opDescribeTasks = "DescribeTasks" + +// DescribeTasksRequest generates a "aws/request.Request" representing the +// client's request for the DescribeTasks operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeTasks method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeTasksRequest method. +// req, resp := client.DescribeTasksRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ECS) DescribeTasksRequest(input *DescribeTasksInput) (req *request.Request, output *DescribeTasksOutput) { + op := &request.Operation{ + Name: opDescribeTasks, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeTasksInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeTasksOutput{} + req.Data = output + return +} + +// Describes a specified task or tasks. +func (c *ECS) DescribeTasks(input *DescribeTasksInput) (*DescribeTasksOutput, error) { + req, out := c.DescribeTasksRequest(input) + err := req.Send() + return out, err +} + +const opDiscoverPollEndpoint = "DiscoverPollEndpoint" + +// DiscoverPollEndpointRequest generates a "aws/request.Request" representing the +// client's request for the DiscoverPollEndpoint operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DiscoverPollEndpoint method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DiscoverPollEndpointRequest method. +// req, resp := client.DiscoverPollEndpointRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ECS) DiscoverPollEndpointRequest(input *DiscoverPollEndpointInput) (req *request.Request, output *DiscoverPollEndpointOutput) { + op := &request.Operation{ + Name: opDiscoverPollEndpoint, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DiscoverPollEndpointInput{} + } + + req = c.newRequest(op, input, output) + output = &DiscoverPollEndpointOutput{} + req.Data = output + return +} + +// This action is only used by the Amazon EC2 Container Service agent, and it +// is not intended for use outside of the agent. +// +// Returns an endpoint for the Amazon EC2 Container Service agent to poll +// for updates. +func (c *ECS) DiscoverPollEndpoint(input *DiscoverPollEndpointInput) (*DiscoverPollEndpointOutput, error) { + req, out := c.DiscoverPollEndpointRequest(input) + err := req.Send() + return out, err +} + +const opListClusters = "ListClusters" + +// ListClustersRequest generates a "aws/request.Request" representing the +// client's request for the ListClusters operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListClusters method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListClustersRequest method. +// req, resp := client.ListClustersRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ECS) ListClustersRequest(input *ListClustersInput) (req *request.Request, output *ListClustersOutput) { + op := &request.Operation{ + Name: opListClusters, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "maxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListClustersInput{} + } + + req = c.newRequest(op, input, output) + output = &ListClustersOutput{} + req.Data = output + return +} + +// Returns a list of existing clusters. +func (c *ECS) ListClusters(input *ListClustersInput) (*ListClustersOutput, error) { + req, out := c.ListClustersRequest(input) + err := req.Send() + return out, err +} + +// ListClustersPages iterates over the pages of a ListClusters operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListClusters method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListClusters operation. +// pageNum := 0 +// err := client.ListClustersPages(params, +// func(page *ListClustersOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *ECS) ListClustersPages(input *ListClustersInput, fn func(p *ListClustersOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListClustersRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListClustersOutput), lastPage) + }) +} + +const opListContainerInstances = "ListContainerInstances" + +// ListContainerInstancesRequest generates a "aws/request.Request" representing the +// client's request for the ListContainerInstances operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListContainerInstances method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListContainerInstancesRequest method. +// req, resp := client.ListContainerInstancesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ECS) ListContainerInstancesRequest(input *ListContainerInstancesInput) (req *request.Request, output *ListContainerInstancesOutput) { + op := &request.Operation{ + Name: opListContainerInstances, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "maxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListContainerInstancesInput{} + } + + req = c.newRequest(op, input, output) + output = &ListContainerInstancesOutput{} + req.Data = output + return +} + +// Returns a list of container instances in a specified cluster. +func (c *ECS) ListContainerInstances(input *ListContainerInstancesInput) (*ListContainerInstancesOutput, error) { + req, out := c.ListContainerInstancesRequest(input) + err := req.Send() + return out, err +} + +// ListContainerInstancesPages iterates over the pages of a ListContainerInstances operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListContainerInstances method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListContainerInstances operation. +// pageNum := 0 +// err := client.ListContainerInstancesPages(params, +// func(page *ListContainerInstancesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *ECS) ListContainerInstancesPages(input *ListContainerInstancesInput, fn func(p *ListContainerInstancesOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListContainerInstancesRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListContainerInstancesOutput), lastPage) + }) +} + +const opListServices = "ListServices" + +// ListServicesRequest generates a "aws/request.Request" representing the +// client's request for the ListServices operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListServices method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListServicesRequest method. +// req, resp := client.ListServicesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ECS) ListServicesRequest(input *ListServicesInput) (req *request.Request, output *ListServicesOutput) { + op := &request.Operation{ + Name: opListServices, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "maxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListServicesInput{} + } + + req = c.newRequest(op, input, output) + output = &ListServicesOutput{} + req.Data = output + return +} + +// Lists the services that are running in a specified cluster. +func (c *ECS) ListServices(input *ListServicesInput) (*ListServicesOutput, error) { + req, out := c.ListServicesRequest(input) + err := req.Send() + return out, err +} + +// ListServicesPages iterates over the pages of a ListServices operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListServices method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListServices operation. +// pageNum := 0 +// err := client.ListServicesPages(params, +// func(page *ListServicesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *ECS) ListServicesPages(input *ListServicesInput, fn func(p *ListServicesOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListServicesRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListServicesOutput), lastPage) + }) +} + +const opListTaskDefinitionFamilies = "ListTaskDefinitionFamilies" + +// ListTaskDefinitionFamiliesRequest generates a "aws/request.Request" representing the +// client's request for the ListTaskDefinitionFamilies operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListTaskDefinitionFamilies method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListTaskDefinitionFamiliesRequest method. +// req, resp := client.ListTaskDefinitionFamiliesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ECS) ListTaskDefinitionFamiliesRequest(input *ListTaskDefinitionFamiliesInput) (req *request.Request, output *ListTaskDefinitionFamiliesOutput) { + op := &request.Operation{ + Name: opListTaskDefinitionFamilies, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "maxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListTaskDefinitionFamiliesInput{} + } + + req = c.newRequest(op, input, output) + output = &ListTaskDefinitionFamiliesOutput{} + req.Data = output + return +} + +// Returns a list of task definition families that are registered to your account +// (which may include task definition families that no longer have any ACTIVE +// task definition revisions). +// +// You can filter out task definition families that do not contain any ACTIVE +// task definition revisions by setting the status parameter to ACTIVE. You +// can also filter the results with the familyPrefix parameter. +func (c *ECS) ListTaskDefinitionFamilies(input *ListTaskDefinitionFamiliesInput) (*ListTaskDefinitionFamiliesOutput, error) { + req, out := c.ListTaskDefinitionFamiliesRequest(input) + err := req.Send() + return out, err +} + +// ListTaskDefinitionFamiliesPages iterates over the pages of a ListTaskDefinitionFamilies operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListTaskDefinitionFamilies method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListTaskDefinitionFamilies operation. +// pageNum := 0 +// err := client.ListTaskDefinitionFamiliesPages(params, +// func(page *ListTaskDefinitionFamiliesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *ECS) ListTaskDefinitionFamiliesPages(input *ListTaskDefinitionFamiliesInput, fn func(p *ListTaskDefinitionFamiliesOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListTaskDefinitionFamiliesRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListTaskDefinitionFamiliesOutput), lastPage) + }) +} + +const opListTaskDefinitions = "ListTaskDefinitions" + +// ListTaskDefinitionsRequest generates a "aws/request.Request" representing the +// client's request for the ListTaskDefinitions operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListTaskDefinitions method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListTaskDefinitionsRequest method. +// req, resp := client.ListTaskDefinitionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ECS) ListTaskDefinitionsRequest(input *ListTaskDefinitionsInput) (req *request.Request, output *ListTaskDefinitionsOutput) { + op := &request.Operation{ + Name: opListTaskDefinitions, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "maxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListTaskDefinitionsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListTaskDefinitionsOutput{} + req.Data = output + return +} + +// Returns a list of task definitions that are registered to your account. You +// can filter the results by family name with the familyPrefix parameter or +// by status with the status parameter. +func (c *ECS) ListTaskDefinitions(input *ListTaskDefinitionsInput) (*ListTaskDefinitionsOutput, error) { + req, out := c.ListTaskDefinitionsRequest(input) + err := req.Send() + return out, err +} + +// ListTaskDefinitionsPages iterates over the pages of a ListTaskDefinitions operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListTaskDefinitions method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListTaskDefinitions operation. +// pageNum := 0 +// err := client.ListTaskDefinitionsPages(params, +// func(page *ListTaskDefinitionsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *ECS) ListTaskDefinitionsPages(input *ListTaskDefinitionsInput, fn func(p *ListTaskDefinitionsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListTaskDefinitionsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListTaskDefinitionsOutput), lastPage) + }) +} + +const opListTasks = "ListTasks" + +// ListTasksRequest generates a "aws/request.Request" representing the +// client's request for the ListTasks operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListTasks method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListTasksRequest method. +// req, resp := client.ListTasksRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ECS) ListTasksRequest(input *ListTasksInput) (req *request.Request, output *ListTasksOutput) { + op := &request.Operation{ + Name: opListTasks, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "maxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListTasksInput{} + } + + req = c.newRequest(op, input, output) + output = &ListTasksOutput{} + req.Data = output + return +} + +// Returns a list of tasks for a specified cluster. You can filter the results +// by family name, by a particular container instance, or by the desired status +// of the task with the family, containerInstance, and desiredStatus parameters. +func (c *ECS) ListTasks(input *ListTasksInput) (*ListTasksOutput, error) { + req, out := c.ListTasksRequest(input) + err := req.Send() + return out, err +} + +// ListTasksPages iterates over the pages of a ListTasks operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListTasks method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListTasks operation. +// pageNum := 0 +// err := client.ListTasksPages(params, +// func(page *ListTasksOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *ECS) ListTasksPages(input *ListTasksInput, fn func(p *ListTasksOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListTasksRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListTasksOutput), lastPage) + }) +} + +const opRegisterContainerInstance = "RegisterContainerInstance" + +// RegisterContainerInstanceRequest generates a "aws/request.Request" representing the +// client's request for the RegisterContainerInstance operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the RegisterContainerInstance method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the RegisterContainerInstanceRequest method. +// req, resp := client.RegisterContainerInstanceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ECS) RegisterContainerInstanceRequest(input *RegisterContainerInstanceInput) (req *request.Request, output *RegisterContainerInstanceOutput) { + op := &request.Operation{ + Name: opRegisterContainerInstance, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RegisterContainerInstanceInput{} + } + + req = c.newRequest(op, input, output) + output = &RegisterContainerInstanceOutput{} + req.Data = output + return +} + +// This action is only used by the Amazon EC2 Container Service agent, and it +// is not intended for use outside of the agent. +// +// Registers an EC2 instance into the specified cluster. This instance becomes +// available to place containers on. +func (c *ECS) RegisterContainerInstance(input *RegisterContainerInstanceInput) (*RegisterContainerInstanceOutput, error) { + req, out := c.RegisterContainerInstanceRequest(input) + err := req.Send() + return out, err +} + +const opRegisterTaskDefinition = "RegisterTaskDefinition" + +// RegisterTaskDefinitionRequest generates a "aws/request.Request" representing the +// client's request for the RegisterTaskDefinition operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the RegisterTaskDefinition method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the RegisterTaskDefinitionRequest method. +// req, resp := client.RegisterTaskDefinitionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ECS) RegisterTaskDefinitionRequest(input *RegisterTaskDefinitionInput) (req *request.Request, output *RegisterTaskDefinitionOutput) { + op := &request.Operation{ + Name: opRegisterTaskDefinition, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RegisterTaskDefinitionInput{} + } + + req = c.newRequest(op, input, output) + output = &RegisterTaskDefinitionOutput{} + req.Data = output + return +} + +// Registers a new task definition from the supplied family and containerDefinitions. +// Optionally, you can add data volumes to your containers with the volumes +// parameter. For more information about task definition parameters and defaults, +// see Amazon ECS Task Definitions (http://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_defintions.html) +// in the Amazon EC2 Container Service Developer Guide. +func (c *ECS) RegisterTaskDefinition(input *RegisterTaskDefinitionInput) (*RegisterTaskDefinitionOutput, error) { + req, out := c.RegisterTaskDefinitionRequest(input) + err := req.Send() + return out, err +} + +const opRunTask = "RunTask" + +// RunTaskRequest generates a "aws/request.Request" representing the +// client's request for the RunTask operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the RunTask method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the RunTaskRequest method. +// req, resp := client.RunTaskRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ECS) RunTaskRequest(input *RunTaskInput) (req *request.Request, output *RunTaskOutput) { + op := &request.Operation{ + Name: opRunTask, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RunTaskInput{} + } + + req = c.newRequest(op, input, output) + output = &RunTaskOutput{} + req.Data = output + return +} + +// Start a task using random placement and the default Amazon ECS scheduler. +// To use your own scheduler or place a task on a specific container instance, +// use StartTask instead. +// +// The count parameter is limited to 10 tasks per call. +func (c *ECS) RunTask(input *RunTaskInput) (*RunTaskOutput, error) { + req, out := c.RunTaskRequest(input) + err := req.Send() + return out, err +} + +const opStartTask = "StartTask" + +// StartTaskRequest generates a "aws/request.Request" representing the +// client's request for the StartTask operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the StartTask method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the StartTaskRequest method. +// req, resp := client.StartTaskRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ECS) StartTaskRequest(input *StartTaskInput) (req *request.Request, output *StartTaskOutput) { + op := &request.Operation{ + Name: opStartTask, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &StartTaskInput{} + } + + req = c.newRequest(op, input, output) + output = &StartTaskOutput{} + req.Data = output + return +} + +// Starts a new task from the specified task definition on the specified container +// instance or instances. To use the default Amazon ECS scheduler to place your +// task, use RunTask instead. +// +// The list of container instances to start tasks on is limited to 10. +func (c *ECS) StartTask(input *StartTaskInput) (*StartTaskOutput, error) { + req, out := c.StartTaskRequest(input) + err := req.Send() + return out, err +} + +const opStopTask = "StopTask" + +// StopTaskRequest generates a "aws/request.Request" representing the +// client's request for the StopTask operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the StopTask method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the StopTaskRequest method. +// req, resp := client.StopTaskRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ECS) StopTaskRequest(input *StopTaskInput) (req *request.Request, output *StopTaskOutput) { + op := &request.Operation{ + Name: opStopTask, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &StopTaskInput{} + } + + req = c.newRequest(op, input, output) + output = &StopTaskOutput{} + req.Data = output + return +} + +// Stops a running task. +// +// When StopTask is called on a task, the equivalent of docker stop is issued +// to the containers running in the task. This results in a SIGTERM and a 30-second +// timeout, after which SIGKILL is sent and the containers are forcibly stopped. +// If the container handles the SIGTERM gracefully and exits within 30 seconds +// from receiving it, no SIGKILL is sent. +func (c *ECS) StopTask(input *StopTaskInput) (*StopTaskOutput, error) { + req, out := c.StopTaskRequest(input) + err := req.Send() + return out, err +} + +const opSubmitContainerStateChange = "SubmitContainerStateChange" + +// SubmitContainerStateChangeRequest generates a "aws/request.Request" representing the +// client's request for the SubmitContainerStateChange operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the SubmitContainerStateChange method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the SubmitContainerStateChangeRequest method. +// req, resp := client.SubmitContainerStateChangeRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ECS) SubmitContainerStateChangeRequest(input *SubmitContainerStateChangeInput) (req *request.Request, output *SubmitContainerStateChangeOutput) { + op := &request.Operation{ + Name: opSubmitContainerStateChange, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SubmitContainerStateChangeInput{} + } + + req = c.newRequest(op, input, output) + output = &SubmitContainerStateChangeOutput{} + req.Data = output + return +} + +// This action is only used by the Amazon EC2 Container Service agent, and it +// is not intended for use outside of the agent. +// +// Sent to acknowledge that a container changed states. +func (c *ECS) SubmitContainerStateChange(input *SubmitContainerStateChangeInput) (*SubmitContainerStateChangeOutput, error) { + req, out := c.SubmitContainerStateChangeRequest(input) + err := req.Send() + return out, err +} + +const opSubmitTaskStateChange = "SubmitTaskStateChange" + +// SubmitTaskStateChangeRequest generates a "aws/request.Request" representing the +// client's request for the SubmitTaskStateChange operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the SubmitTaskStateChange method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the SubmitTaskStateChangeRequest method. +// req, resp := client.SubmitTaskStateChangeRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ECS) SubmitTaskStateChangeRequest(input *SubmitTaskStateChangeInput) (req *request.Request, output *SubmitTaskStateChangeOutput) { + op := &request.Operation{ + Name: opSubmitTaskStateChange, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SubmitTaskStateChangeInput{} + } + + req = c.newRequest(op, input, output) + output = &SubmitTaskStateChangeOutput{} + req.Data = output + return +} + +// This action is only used by the Amazon EC2 Container Service agent, and it +// is not intended for use outside of the agent. +// +// Sent to acknowledge that a task changed states. +func (c *ECS) SubmitTaskStateChange(input *SubmitTaskStateChangeInput) (*SubmitTaskStateChangeOutput, error) { + req, out := c.SubmitTaskStateChangeRequest(input) + err := req.Send() + return out, err +} + +const opUpdateContainerAgent = "UpdateContainerAgent" + +// UpdateContainerAgentRequest generates a "aws/request.Request" representing the +// client's request for the UpdateContainerAgent operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateContainerAgent method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateContainerAgentRequest method. +// req, resp := client.UpdateContainerAgentRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ECS) UpdateContainerAgentRequest(input *UpdateContainerAgentInput) (req *request.Request, output *UpdateContainerAgentOutput) { + op := &request.Operation{ + Name: opUpdateContainerAgent, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateContainerAgentInput{} + } + + req = c.newRequest(op, input, output) + output = &UpdateContainerAgentOutput{} + req.Data = output + return +} + +// Updates the Amazon ECS container agent on a specified container instance. +// Updating the Amazon ECS container agent does not interrupt running tasks +// or services on the container instance. The process for updating the agent +// differs depending on whether your container instance was launched with the +// Amazon ECS-optimized AMI or another operating system. +// +// UpdateContainerAgent requires the Amazon ECS-optimized AMI or Amazon Linux +// with the ecs-init service installed and running. For help updating the Amazon +// ECS container agent on other operating systems, see Manually Updating the +// Amazon ECS Container Agent (http://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-agent-update.html#manually_update_agent) +// in the Amazon EC2 Container Service Developer Guide. +func (c *ECS) UpdateContainerAgent(input *UpdateContainerAgentInput) (*UpdateContainerAgentOutput, error) { + req, out := c.UpdateContainerAgentRequest(input) + err := req.Send() + return out, err +} + +const opUpdateService = "UpdateService" + +// UpdateServiceRequest generates a "aws/request.Request" representing the +// client's request for the UpdateService operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateService method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateServiceRequest method. +// req, resp := client.UpdateServiceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ECS) UpdateServiceRequest(input *UpdateServiceInput) (req *request.Request, output *UpdateServiceOutput) { + op := &request.Operation{ + Name: opUpdateService, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateServiceInput{} + } + + req = c.newRequest(op, input, output) + output = &UpdateServiceOutput{} + req.Data = output + return +} + +// Modifies the desired count, deployment configuration, or task definition +// used in a service. +// +// You can add to or subtract from the number of instantiations of a task definition +// in a service by specifying the cluster that the service is running in and +// a new desiredCount parameter. +// +// You can use UpdateService to modify your task definition and deploy a new +// version of your service. +// +// You can also update the deployment configuration of a service. When a deployment +// is triggered by updating the task definition of a service, the service scheduler +// uses the deployment configuration parameters, minimumHealthyPercent and maximumPercent, +// to determine the deployment strategy. +// +// If the minimumHealthyPercent is below 100%, the scheduler can ignore the +// desiredCount temporarily during a deployment. For example, if your service +// has a desiredCount of four tasks, a minimumHealthyPercent of 50% allows the +// scheduler to stop two existing tasks before starting two new tasks. Tasks +// for services that do not use a load balancer are considered healthy if they +// are in the RUNNING state; tasks for services that do use a load balancer +// are considered healthy if they are in the RUNNING state and the container +// instance it is hosted on is reported as healthy by the load balancer. +// +// The maximumPercent parameter represents an upper limit on the number of +// running tasks during a deployment, which enables you to define the deployment +// batch size. For example, if your service has a desiredCount of four tasks, +// a maximumPercent value of 200% starts four new tasks before stopping the +// four older tasks (provided that the cluster resources required to do this +// are available). +// +// When UpdateService stops a task during a deployment, the equivalent of docker +// stop is issued to the containers running in the task. This results in a SIGTERM +// and a 30-second timeout, after which SIGKILL is sent and the containers are +// forcibly stopped. If the container handles the SIGTERM gracefully and exits +// within 30 seconds from receiving it, no SIGKILL is sent. +// +// When the service scheduler launches new tasks, it attempts to balance them +// across the Availability Zones in your cluster with the following logic: +// +// Determine which of the container instances in your cluster can support +// your service's task definition (for example, they have the required CPU, +// memory, ports, and container instance attributes). +// +// Sort the valid container instances by the fewest number of running tasks +// for this service in the same Availability Zone as the instance. For example, +// if zone A has one running service task and zones B and C each have zero, +// valid container instances in either zone B or C are considered optimal for +// placement. +// +// Place the new service task on a valid container instance in an optimal +// Availability Zone (based on the previous steps), favoring container instances +// with the fewest number of running tasks for this service. +func (c *ECS) UpdateService(input *UpdateServiceInput) (*UpdateServiceOutput, error) { + req, out := c.UpdateServiceRequest(input) + err := req.Send() + return out, err +} + +// The attributes applicable to a container instance when it is registered. +type Attribute struct { + _ struct{} `type:"structure"` + + // The name of the container instance attribute. + Name *string `locationName:"name" type:"string" required:"true"` + + // The value of the container instance attribute (at this time, the value here + // is Null, but this could change in future revisions for expandability). + Value *string `locationName:"value" type:"string"` +} + +// String returns the string representation +func (s Attribute) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Attribute) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Attribute) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Attribute"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// A regional grouping of one or more container instances on which you can run +// task requests. Each account receives a default cluster the first time you +// use the Amazon ECS service, but you may also create other clusters. Clusters +// may contain more than one instance type simultaneously. +type Cluster struct { + _ struct{} `type:"structure"` + + // The number of services that are running on the cluster in an ACTIVE state. + // You can view these services with ListServices. + ActiveServicesCount *int64 `locationName:"activeServicesCount" type:"integer"` + + // The Amazon Resource Name (ARN) that identifies the cluster. The ARN contains + // the arn:aws:ecs namespace, followed by the region of the cluster, the AWS + // account ID of the cluster owner, the cluster namespace, and then the cluster + // name. For example, arn:aws:ecs:region:012345678910:cluster/test .. + ClusterArn *string `locationName:"clusterArn" type:"string"` + + // A user-generated string that you use to identify your cluster. + ClusterName *string `locationName:"clusterName" type:"string"` + + // The number of tasks in the cluster that are in the PENDING state. + PendingTasksCount *int64 `locationName:"pendingTasksCount" type:"integer"` + + // The number of container instances registered into the cluster. + RegisteredContainerInstancesCount *int64 `locationName:"registeredContainerInstancesCount" type:"integer"` + + // The number of tasks in the cluster that are in the RUNNING state. + RunningTasksCount *int64 `locationName:"runningTasksCount" type:"integer"` + + // The status of the cluster. The valid values are ACTIVE or INACTIVE. ACTIVE + // indicates that you can register container instances with the cluster and + // the associated instances can accept tasks. + Status *string `locationName:"status" type:"string"` +} + +// String returns the string representation +func (s Cluster) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Cluster) GoString() string { + return s.String() +} + +// A Docker container that is part of a task. +type Container struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the container. + ContainerArn *string `locationName:"containerArn" type:"string"` + + // The exit code returned from the container. + ExitCode *int64 `locationName:"exitCode" type:"integer"` + + // The last known status of the container. + LastStatus *string `locationName:"lastStatus" type:"string"` + + // The name of the container. + Name *string `locationName:"name" type:"string"` + + // The network bindings associated with the container. + NetworkBindings []*NetworkBinding `locationName:"networkBindings" type:"list"` + + // A short (255 max characters) human-readable string to provide additional + // detail about a running or stopped container. + Reason *string `locationName:"reason" type:"string"` + + // The Amazon Resource Name (ARN) of the task. + TaskArn *string `locationName:"taskArn" type:"string"` +} + +// String returns the string representation +func (s Container) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Container) GoString() string { + return s.String() +} + +// Container definitions are used in task definitions to describe the different +// containers that are launched as part of a task. +type ContainerDefinition struct { + _ struct{} `type:"structure"` + + // The command that is passed to the container. This parameter maps to Cmd in + // the Create a container (https://docs.docker.com/reference/api/docker_remote_api_v1.19/#create-a-container) + // section of the Docker Remote API (https://docs.docker.com/reference/api/docker_remote_api_v1.19/) + // and the COMMAND parameter to docker run (https://docs.docker.com/reference/commandline/run/). + // For more information, see https://docs.docker.com/reference/builder/#cmd + // (https://docs.docker.com/reference/builder/#cmd). + Command []*string `locationName:"command" type:"list"` + + // The number of cpu units reserved for the container. A container instance + // has 1,024 cpu units for every CPU core. This parameter specifies the minimum + // amount of CPU to reserve for a container, and containers share unallocated + // CPU units with other containers on the instance with the same ratio as their + // allocated amount. This parameter maps to CpuShares in the Create a container + // (https://docs.docker.com/reference/api/docker_remote_api_v1.19/#create-a-container) + // section of the Docker Remote API (https://docs.docker.com/reference/api/docker_remote_api_v1.19/) + // and the --cpu-shares option to docker run (https://docs.docker.com/reference/commandline/run/). + // + // You can determine the number of CPU units that are available per EC2 instance + // type by multiplying the vCPUs listed for that instance type on the Amazon + // EC2 Instances (http://aws.amazon.com/ec2/instance-types/) detail page by + // 1,024. + // + // For example, if you run a single-container task on a single-core instance + // type with 512 CPU units specified for that container, and that is the only + // task running on the container instance, that container could use the full + // 1,024 CPU unit share at any given time. However, if you launched another + // copy of the same task on that container instance, each task would be guaranteed + // a minimum of 512 CPU units when needed, and each container could float to + // higher CPU usage if the other container was not using it, but if both tasks + // were 100% active all of the time, they would be limited to 512 CPU units. + // + // The Docker daemon on the container instance uses the CPU value to calculate + // the relative CPU share ratios for running containers. For more information, + // see CPU share constraint (https://docs.docker.com/reference/run/#cpu-share-constraint) + // in the Docker documentation. The minimum valid CPU share value that the Linux + // kernel allows is 2; however, the CPU parameter is not required, and you can + // use CPU values below 2 in your container definitions. For CPU values below + // 2 (including null), the behavior varies based on your Amazon ECS container + // agent version: + // + // Agent versions less than or equal to 1.1.0: Null and zero CPU values are + // passed to Docker as 0, which Docker then converts to 1,024 CPU shares. CPU + // values of 1 are passed to Docker as 1, which the Linux kernel converts to + // 2 CPU shares. + // + // Agent versions greater than or equal to 1.2.0: Null, zero, and CPU values + // of 1 are passed to Docker as 2. + Cpu *int64 `locationName:"cpu" type:"integer"` + + // When this parameter is true, networking is disabled within the container. + // This parameter maps to NetworkDisabled in the Create a container (https://docs.docker.com/reference/api/docker_remote_api_v1.19/#create-a-container) + // section of the Docker Remote API (https://docs.docker.com/reference/api/docker_remote_api_v1.19/). + DisableNetworking *bool `locationName:"disableNetworking" type:"boolean"` + + // A list of DNS search domains that are presented to the container. This parameter + // maps to DnsSearch in the Create a container (https://docs.docker.com/reference/api/docker_remote_api_v1.19/#create-a-container) + // section of the Docker Remote API (https://docs.docker.com/reference/api/docker_remote_api_v1.19/) + // and the --dns-search option to docker run (https://docs.docker.com/reference/commandline/run/). + DnsSearchDomains []*string `locationName:"dnsSearchDomains" type:"list"` + + // A list of DNS servers that are presented to the container. This parameter + // maps to Dns in the Create a container (https://docs.docker.com/reference/api/docker_remote_api_v1.19/#create-a-container) + // section of the Docker Remote API (https://docs.docker.com/reference/api/docker_remote_api_v1.19/) + // and the --dns option to docker run (https://docs.docker.com/reference/commandline/run/). + DnsServers []*string `locationName:"dnsServers" type:"list"` + + // A key/value map of labels to add to the container. This parameter maps to + // Labels in the Create a container (https://docs.docker.com/reference/api/docker_remote_api_v1.19/#create-a-container) + // section of the Docker Remote API (https://docs.docker.com/reference/api/docker_remote_api_v1.19/) + // and the --label option to docker run (https://docs.docker.com/reference/commandline/run/). + // This parameter requires version 1.18 of the Docker Remote API or greater + // on your container instance. To check the Docker Remote API version on your + // container instance, log into your container instance and run the following + // command: sudo docker version | grep "Server API version" + DockerLabels map[string]*string `locationName:"dockerLabels" type:"map"` + + // A list of strings to provide custom labels for SELinux and AppArmor multi-level + // security systems. This parameter maps to SecurityOpt in the Create a container + // (https://docs.docker.com/reference/api/docker_remote_api_v1.19/#create-a-container) + // section of the Docker Remote API (https://docs.docker.com/reference/api/docker_remote_api_v1.19/) + // and the --security-opt option to docker run (https://docs.docker.com/reference/commandline/run/). + // + // The Amazon ECS container agent running on a container instance must register + // with the ECS_SELINUX_CAPABLE=true or ECS_APPARMOR_CAPABLE=true environment + // variables before containers placed on that instance can use these security + // options. For more information, see Amazon ECS Container Agent Configuration + // (http://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-agent-config.html) + // in the Amazon EC2 Container Service Developer Guide. + DockerSecurityOptions []*string `locationName:"dockerSecurityOptions" type:"list"` + + // Early versions of the Amazon ECS container agent do not properly handle entryPoint + // parameters. If you have problems using entryPoint, update your container + // agent or enter your commands and arguments as command array items instead. + // + // The entry point that is passed to the container. This parameter maps to + // Entrypoint in the Create a container (https://docs.docker.com/reference/api/docker_remote_api_v1.19/#create-a-container) + // section of the Docker Remote API (https://docs.docker.com/reference/api/docker_remote_api_v1.19/) + // and the --entrypoint option to docker run (https://docs.docker.com/reference/commandline/run/). + // For more information, see https://docs.docker.com/reference/builder/#entrypoint + // (https://docs.docker.com/reference/builder/#entrypoint). + EntryPoint []*string `locationName:"entryPoint" type:"list"` + + // The environment variables to pass to a container. This parameter maps to + // Env in the Create a container (https://docs.docker.com/reference/api/docker_remote_api_v1.19/#create-a-container) + // section of the Docker Remote API (https://docs.docker.com/reference/api/docker_remote_api_v1.19/) + // and the --env option to docker run (https://docs.docker.com/reference/commandline/run/). + // + // We do not recommend using plain text environment variables for sensitive + // information, such as credential data. + Environment []*KeyValuePair `locationName:"environment" type:"list"` + + // If the essential parameter of a container is marked as true, and that container + // fails or stops for any reason, all other containers that are part of the + // task are stopped. If the essential parameter of a container is marked as + // false, then its failure does not affect the rest of the containers in a task. + // If this parameter is omitted, a container is assumed to be essential. + // + // All tasks must have at least one essential container. If you have an application + // that is composed of multiple containers, you should group containers that + // are used for a common purpose into components, and separate the different + // components into multiple task definitions. For more information, see Application + // Architecture (http://docs.aws.amazon.com/AmazonECS/latest/developerguide/application_architecture.html) + // in the Amazon EC2 Container Service Developer Guide. + Essential *bool `locationName:"essential" type:"boolean"` + + // A list of hostnames and IP address mappings to append to the /etc/hosts file + // on the container. This parameter maps to ExtraHosts in the Create a container + // (https://docs.docker.com/reference/api/docker_remote_api_v1.19/#create-a-container) + // section of the Docker Remote API (https://docs.docker.com/reference/api/docker_remote_api_v1.19/) + // and the --add-host option to docker run (https://docs.docker.com/reference/commandline/run/). + ExtraHosts []*HostEntry `locationName:"extraHosts" type:"list"` + + // The hostname to use for your container. This parameter maps to Hostname in + // the Create a container (https://docs.docker.com/reference/api/docker_remote_api_v1.19/#create-a-container) + // section of the Docker Remote API (https://docs.docker.com/reference/api/docker_remote_api_v1.19/) + // and the --hostname option to docker run (https://docs.docker.com/reference/commandline/run/). + Hostname *string `locationName:"hostname" type:"string"` + + // The image used to start a container. This string is passed directly to the + // Docker daemon. Images in the Docker Hub registry are available by default. + // Other repositories are specified with repository-url/image:tag . Up to 255 + // letters (uppercase and lowercase), numbers, hyphens, underscores, colons, + // periods, forward slashes, and number signs are allowed. This parameter maps + // to Image in the Create a container (https://docs.docker.com/reference/api/docker_remote_api_v1.19/#create-a-container) + // section of the Docker Remote API (https://docs.docker.com/reference/api/docker_remote_api_v1.19/) + // and the IMAGE parameter of docker run (https://docs.docker.com/reference/commandline/run/). + // + // Images in official repositories on Docker Hub use a single name (for example, + // ubuntu or mongo). + // + // Images in other repositories on Docker Hub are qualified with an organization + // name (for example, amazon/amazon-ecs-agent). + // + // Images in other online repositories are qualified further by a domain name + // (for example, quay.io/assemblyline/ubuntu). + Image *string `locationName:"image" type:"string"` + + // The link parameter allows containers to communicate with each other without + // the need for port mappings, using the name parameter and optionally, an alias + // for the link. This construct is analogous to name:alias in Docker links. + // Up to 255 letters (uppercase and lowercase), numbers, hyphens, and underscores + // are allowed for each name and alias. For more information on linking Docker + // containers, see https://docs.docker.com/userguide/dockerlinks/ (https://docs.docker.com/userguide/dockerlinks/). + // This parameter maps to Links in the Create a container (https://docs.docker.com/reference/api/docker_remote_api_v1.19/#create-a-container) + // section of the Docker Remote API (https://docs.docker.com/reference/api/docker_remote_api_v1.19/) + // and the --link option to docker run (https://docs.docker.com/reference/commandline/run/). + // + // Containers that are collocated on a single container instance may be able + // to communicate with each other without requiring links or host port mappings. + // Network isolation is achieved on the container instance using security groups + // and VPC settings. + Links []*string `locationName:"links" type:"list"` + + // The log configuration specification for the container. This parameter maps + // to LogConfig in the Create a container (https://docs.docker.com/reference/api/docker_remote_api_v1.19/#create-a-container) + // section of the Docker Remote API (https://docs.docker.com/reference/api/docker_remote_api_v1.19/) + // and the --log-driver option to docker run (https://docs.docker.com/reference/commandline/run/). + // By default, containers use the same logging driver that the Docker daemon + // uses; however the container may use a different logging driver than the Docker + // daemon by specifying a log driver with this parameter in the container definition. + // To use a different logging driver for a container, the log system must be + // configured properly on the container instance (or on a different log server + // for remote logging options). For more information on the options for different + // supported log drivers, see Configure logging drivers (https://docs.docker.com/engine/admin/logging/overview/) + // in the Docker documentation. + // + // Amazon ECS currently supports a subset of the logging drivers available + // to the Docker daemon (shown in the LogConfiguration data type). Currently + // unsupported log drivers may be available in future releases of the Amazon + // ECS container agent. + // + // This parameter requires version 1.18 of the Docker Remote API or greater + // on your container instance. To check the Docker Remote API version on your + // container instance, log into your container instance and run the following + // command: sudo docker version | grep "Server API version" + // + // The Amazon ECS container agent running on a container instance must register + // the logging drivers available on that instance with the ECS_AVAILABLE_LOGGING_DRIVERS + // environment variable before containers placed on that instance can use these + // log configuration options. For more information, see Amazon ECS Container + // Agent Configuration (http://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-agent-config.html) + // in the Amazon EC2 Container Service Developer Guide. + LogConfiguration *LogConfiguration `locationName:"logConfiguration" type:"structure"` + + // The number of MiB of memory to reserve for the container. You must specify + // a non-zero integer for this parameter; the Docker daemon reserves a minimum + // of 4 MiB of memory for a container, so you should not specify fewer than + // 4 MiB of memory for your containers. If your container attempts to exceed + // the memory allocated here, the container is killed. This parameter maps to + // Memory in the Create a container (https://docs.docker.com/reference/api/docker_remote_api_v1.19/#create-a-container) + // section of the Docker Remote API (https://docs.docker.com/reference/api/docker_remote_api_v1.19/) + // and the --memory option to docker run (https://docs.docker.com/reference/commandline/run/). + Memory *int64 `locationName:"memory" type:"integer"` + + // The mount points for data volumes in your container. This parameter maps + // to Volumes in the Create a container (https://docs.docker.com/reference/api/docker_remote_api_v1.19/#create-a-container) + // section of the Docker Remote API (https://docs.docker.com/reference/api/docker_remote_api_v1.19/) + // and the --volume option to docker run (https://docs.docker.com/reference/commandline/run/). + MountPoints []*MountPoint `locationName:"mountPoints" type:"list"` + + // The name of a container. If you are linking multiple containers together + // in a task definition, the name of one container can be entered in the links + // of another container to connect the containers. Up to 255 letters (uppercase + // and lowercase), numbers, hyphens, and underscores are allowed. This parameter + // maps to name in the Create a container (https://docs.docker.com/reference/api/docker_remote_api_v1.19/#create-a-container) + // section of the Docker Remote API (https://docs.docker.com/reference/api/docker_remote_api_v1.19/) + // and the --name option to docker run (https://docs.docker.com/reference/commandline/run/). + Name *string `locationName:"name" type:"string"` + + // The list of port mappings for the container. Port mappings allow containers + // to access ports on the host container instance to send or receive traffic. + // This parameter maps to PortBindings in the Create a container (https://docs.docker.com/reference/api/docker_remote_api_v1.19/#create-a-container) + // section of the Docker Remote API (https://docs.docker.com/reference/api/docker_remote_api_v1.19/) + // and the --publish option to docker run (https://docs.docker.com/reference/commandline/run/). + // + // After a task reaches the RUNNING status, manual and automatic host and + // container port assignments are visible in the Network Bindings section of + // a container description of a selected task in the Amazon ECS console, or + // the networkBindings section DescribeTasks responses. + PortMappings []*PortMapping `locationName:"portMappings" type:"list"` + + // When this parameter is true, the container is given elevated privileges on + // the host container instance (similar to the root user). This parameter maps + // to Privileged in the Create a container (https://docs.docker.com/reference/api/docker_remote_api_v1.19/#create-a-container) + // section of the Docker Remote API (https://docs.docker.com/reference/api/docker_remote_api_v1.19/) + // and the --privileged option to docker run (https://docs.docker.com/reference/commandline/run/). + Privileged *bool `locationName:"privileged" type:"boolean"` + + // When this parameter is true, the container is given read-only access to its + // root file system. This parameter maps to ReadonlyRootfs in the Create a container + // (https://docs.docker.com/reference/api/docker_remote_api_v1.19/#create-a-container) + // section of the Docker Remote API (https://docs.docker.com/reference/api/docker_remote_api_v1.19/) + // and the --read-only option to docker run. + ReadonlyRootFilesystem *bool `locationName:"readonlyRootFilesystem" type:"boolean"` + + // A list of ulimits to set in the container. This parameter maps to Ulimits + // in the Create a container (https://docs.docker.com/reference/api/docker_remote_api_v1.19/#create-a-container) + // section of the Docker Remote API (https://docs.docker.com/reference/api/docker_remote_api_v1.19/) + // and the --ulimit option to docker run (https://docs.docker.com/reference/commandline/run/). + // Valid naming values are displayed in the Ulimit data type. This parameter + // requires version 1.18 of the Docker Remote API or greater on your container + // instance. To check the Docker Remote API version on your container instance, + // log into your container instance and run the following command: sudo docker + // version | grep "Server API version" + Ulimits []*Ulimit `locationName:"ulimits" type:"list"` + + // The user name to use inside the container. This parameter maps to User in + // the Create a container (https://docs.docker.com/reference/api/docker_remote_api_v1.19/#create-a-container) + // section of the Docker Remote API (https://docs.docker.com/reference/api/docker_remote_api_v1.19/) + // and the --user option to docker run (https://docs.docker.com/reference/commandline/run/). + User *string `locationName:"user" type:"string"` + + // Data volumes to mount from another container. This parameter maps to VolumesFrom + // in the Create a container (https://docs.docker.com/reference/api/docker_remote_api_v1.19/#create-a-container) + // section of the Docker Remote API (https://docs.docker.com/reference/api/docker_remote_api_v1.19/) + // and the --volumes-from option to docker run (https://docs.docker.com/reference/commandline/run/). + VolumesFrom []*VolumeFrom `locationName:"volumesFrom" type:"list"` + + // The working directory in which to run commands inside the container. This + // parameter maps to WorkingDir in the Create a container (https://docs.docker.com/reference/api/docker_remote_api_v1.19/#create-a-container) + // section of the Docker Remote API (https://docs.docker.com/reference/api/docker_remote_api_v1.19/) + // and the --workdir option to docker run (https://docs.docker.com/reference/commandline/run/). + WorkingDirectory *string `locationName:"workingDirectory" type:"string"` +} + +// String returns the string representation +func (s ContainerDefinition) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ContainerDefinition) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ContainerDefinition) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ContainerDefinition"} + if s.ExtraHosts != nil { + for i, v := range s.ExtraHosts { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "ExtraHosts", i), err.(request.ErrInvalidParams)) + } + } + } + if s.LogConfiguration != nil { + if err := s.LogConfiguration.Validate(); err != nil { + invalidParams.AddNested("LogConfiguration", err.(request.ErrInvalidParams)) + } + } + if s.Ulimits != nil { + for i, v := range s.Ulimits { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Ulimits", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// An EC2 instance that is running the Amazon ECS agent and has been registered +// with a cluster. +type ContainerInstance struct { + _ struct{} `type:"structure"` + + // This parameter returns true if the agent is actually connected to Amazon + // ECS. Registered instances with an agent that may be unhealthy or stopped + // return false, and instances without a connected agent cannot accept placement + // requests. + AgentConnected *bool `locationName:"agentConnected" type:"boolean"` + + // The status of the most recent agent update. If an update has never been requested, + // this value is NULL. + AgentUpdateStatus *string `locationName:"agentUpdateStatus" type:"string" enum:"AgentUpdateStatus"` + + // The attributes set for the container instance by the Amazon ECS container + // agent at instance registration. + Attributes []*Attribute `locationName:"attributes" type:"list"` + + // The Amazon Resource Name (ARN) of the container instance. The ARN contains + // the arn:aws:ecs namespace, followed by the region of the container instance, + // the AWS account ID of the container instance owner, the container-instance + // namespace, and then the container instance ID. For example, arn:aws:ecs:region:aws_account_id:container-instance/container_instance_ID + // . + ContainerInstanceArn *string `locationName:"containerInstanceArn" type:"string"` + + // The EC2 instance ID of the container instance. + Ec2InstanceId *string `locationName:"ec2InstanceId" type:"string"` + + // The number of tasks on the container instance that are in the PENDING status. + PendingTasksCount *int64 `locationName:"pendingTasksCount" type:"integer"` + + // The registered resources on the container instance that are in use by current + // tasks. + RegisteredResources []*Resource `locationName:"registeredResources" type:"list"` + + // The remaining resources of the container instance that are available for + // new tasks. + RemainingResources []*Resource `locationName:"remainingResources" type:"list"` + + // The number of tasks on the container instance that are in the RUNNING status. + RunningTasksCount *int64 `locationName:"runningTasksCount" type:"integer"` + + // The status of the container instance. The valid values are ACTIVE or INACTIVE. + // ACTIVE indicates that the container instance can accept tasks. + Status *string `locationName:"status" type:"string"` + + // The version information for the Amazon ECS container agent and Docker daemon + // running on the container instance. + VersionInfo *VersionInfo `locationName:"versionInfo" type:"structure"` +} + +// String returns the string representation +func (s ContainerInstance) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ContainerInstance) GoString() string { + return s.String() +} + +// The overrides that should be sent to a container. +type ContainerOverride struct { + _ struct{} `type:"structure"` + + // The command to send to the container that overrides the default command from + // the Docker image or the task definition. + Command []*string `locationName:"command" type:"list"` + + // The environment variables to send to the container. You can add new environment + // variables, which are added to the container at launch, or you can override + // the existing environment variables from the Docker image or the task definition. + Environment []*KeyValuePair `locationName:"environment" type:"list"` + + // The name of the container that receives the override. + Name *string `locationName:"name" type:"string"` +} + +// String returns the string representation +func (s ContainerOverride) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ContainerOverride) GoString() string { + return s.String() +} + +type CreateClusterInput struct { + _ struct{} `type:"structure"` + + // The name of your cluster. If you do not specify a name for your cluster, + // you create a cluster named default. Up to 255 letters (uppercase and lowercase), + // numbers, hyphens, and underscores are allowed. + ClusterName *string `locationName:"clusterName" type:"string"` +} + +// String returns the string representation +func (s CreateClusterInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateClusterInput) GoString() string { + return s.String() +} + +type CreateClusterOutput struct { + _ struct{} `type:"structure"` + + // The full description of your new cluster. + Cluster *Cluster `locationName:"cluster" type:"structure"` +} + +// String returns the string representation +func (s CreateClusterOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateClusterOutput) GoString() string { + return s.String() +} + +type CreateServiceInput struct { + _ struct{} `type:"structure"` + + // Unique, case-sensitive identifier you provide to ensure the idempotency of + // the request. Up to 32 ASCII characters are allowed. + ClientToken *string `locationName:"clientToken" type:"string"` + + // The short name or full Amazon Resource Name (ARN) of the cluster on which + // to run your service. If you do not specify a cluster, the default cluster + // is assumed. + Cluster *string `locationName:"cluster" type:"string"` + + // Optional deployment parameters that control how many tasks run during the + // deployment and the ordering of stopping and starting tasks. + DeploymentConfiguration *DeploymentConfiguration `locationName:"deploymentConfiguration" type:"structure"` + + // The number of instantiations of the specified task definition to place and + // keep running on your cluster. + DesiredCount *int64 `locationName:"desiredCount" type:"integer" required:"true"` + + // A list of load balancer objects, containing the load balancer name, the container + // name (as it appears in a container definition), and the container port to + // access from the load balancer. + LoadBalancers []*LoadBalancer `locationName:"loadBalancers" type:"list"` + + // The name or full Amazon Resource Name (ARN) of the IAM role that allows Amazon + // ECS to make calls to your load balancer on your behalf. This parameter is + // required if you are using a load balancer with your service. If you specify + // the role parameter, you must also specify a load balancer object with the + // loadBalancers parameter. + // + // If your specified role has a path other than /, then you must either specify + // the full role ARN (this is recommended) or prefix the role name with the + // path. For example, if a role with the name bar has a path of /foo/ then you + // would specify /foo/bar as the role name. For more information, see Friendly + // Names and Paths (http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html#identifiers-friendly-names) + // in the IAM User Guide. + Role *string `locationName:"role" type:"string"` + + // The name of your service. Up to 255 letters (uppercase and lowercase), numbers, + // hyphens, and underscores are allowed. Service names must be unique within + // a cluster, but you can have similarly named services in multiple clusters + // within a region or across multiple regions. + ServiceName *string `locationName:"serviceName" type:"string" required:"true"` + + // The family and revision (family:revision) or full Amazon Resource Name (ARN) + // of the task definition to run in your service. If a revision is not specified, + // the latest ACTIVE revision is used. + TaskDefinition *string `locationName:"taskDefinition" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateServiceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateServiceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateServiceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateServiceInput"} + if s.DesiredCount == nil { + invalidParams.Add(request.NewErrParamRequired("DesiredCount")) + } + if s.ServiceName == nil { + invalidParams.Add(request.NewErrParamRequired("ServiceName")) + } + if s.TaskDefinition == nil { + invalidParams.Add(request.NewErrParamRequired("TaskDefinition")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type CreateServiceOutput struct { + _ struct{} `type:"structure"` + + // The full description of your service following the create call. + Service *Service `locationName:"service" type:"structure"` +} + +// String returns the string representation +func (s CreateServiceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateServiceOutput) GoString() string { + return s.String() +} + +type DeleteClusterInput struct { + _ struct{} `type:"structure"` + + // The short name or full Amazon Resource Name (ARN) of the cluster to delete. + Cluster *string `locationName:"cluster" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteClusterInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteClusterInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteClusterInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteClusterInput"} + if s.Cluster == nil { + invalidParams.Add(request.NewErrParamRequired("Cluster")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteClusterOutput struct { + _ struct{} `type:"structure"` + + // The full description of the deleted cluster. + Cluster *Cluster `locationName:"cluster" type:"structure"` +} + +// String returns the string representation +func (s DeleteClusterOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteClusterOutput) GoString() string { + return s.String() +} + +type DeleteServiceInput struct { + _ struct{} `type:"structure"` + + // The name of the cluster that hosts the service to delete. If you do not specify + // a cluster, the default cluster is assumed. + Cluster *string `locationName:"cluster" type:"string"` + + // The name of the service to delete. + Service *string `locationName:"service" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteServiceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteServiceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteServiceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteServiceInput"} + if s.Service == nil { + invalidParams.Add(request.NewErrParamRequired("Service")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteServiceOutput struct { + _ struct{} `type:"structure"` + + // The full description of the deleted service. + Service *Service `locationName:"service" type:"structure"` +} + +// String returns the string representation +func (s DeleteServiceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteServiceOutput) GoString() string { + return s.String() +} + +// The details of an Amazon ECS service deployment. +type Deployment struct { + _ struct{} `type:"structure"` + + // The Unix time in seconds and milliseconds when the service was created. + CreatedAt *time.Time `locationName:"createdAt" type:"timestamp" timestampFormat:"unix"` + + // The most recent desired count of tasks that was specified for the service + // to deploy or maintain. + DesiredCount *int64 `locationName:"desiredCount" type:"integer"` + + // The ID of the deployment. + Id *string `locationName:"id" type:"string"` + + // The number of tasks in the deployment that are in the PENDING status. + PendingCount *int64 `locationName:"pendingCount" type:"integer"` + + // The number of tasks in the deployment that are in the RUNNING status. + RunningCount *int64 `locationName:"runningCount" type:"integer"` + + // The status of the deployment. Valid values are PRIMARY (for the most recent + // deployment), ACTIVE (for previous deployments that still have tasks running, + // but are being replaced with the PRIMARY deployment), and INACTIVE (for deployments + // that have been completely replaced). + Status *string `locationName:"status" type:"string"` + + // The most recent task definition that was specified for the service to use. + TaskDefinition *string `locationName:"taskDefinition" type:"string"` + + // The Unix time in seconds and milliseconds when the service was last updated. + UpdatedAt *time.Time `locationName:"updatedAt" type:"timestamp" timestampFormat:"unix"` +} + +// String returns the string representation +func (s Deployment) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Deployment) GoString() string { + return s.String() +} + +// Optional deployment parameters that control how many tasks run during the +// deployment and the ordering of stopping and starting tasks. +type DeploymentConfiguration struct { + _ struct{} `type:"structure"` + + // The upper limit (as a percentage of the service's desiredCount) of the number + // of running tasks that can be running in a service during a deployment. The + // maximum number of tasks during a deployment is the desiredCount multiplied + // by the maximumPercent/100, rounded down to the nearest integer value. + MaximumPercent *int64 `locationName:"maximumPercent" type:"integer"` + + // The lower limit (as a percentage of the service's desiredCount) of the number + // of running tasks that must remain running and healthy in a service during + // a deployment. The minimum healthy tasks during a deployment is the desiredCount + // multiplied by the minimumHealthyPercent/100, rounded up to the nearest integer + // value. + MinimumHealthyPercent *int64 `locationName:"minimumHealthyPercent" type:"integer"` +} + +// String returns the string representation +func (s DeploymentConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeploymentConfiguration) GoString() string { + return s.String() +} + +type DeregisterContainerInstanceInput struct { + _ struct{} `type:"structure"` + + // The short name or full Amazon Resource Name (ARN) of the cluster that hosts + // the container instance to deregister. If you do not specify a cluster, the + // default cluster is assumed. + Cluster *string `locationName:"cluster" type:"string"` + + // The container instance ID or full Amazon Resource Name (ARN) of the container + // instance to deregister. The ARN contains the arn:aws:ecs namespace, followed + // by the region of the container instance, the AWS account ID of the container + // instance owner, the container-instance namespace, and then the container + // instance ID. For example, arn:aws:ecs:region:aws_account_id:container-instance/container_instance_ID + // . + ContainerInstance *string `locationName:"containerInstance" type:"string" required:"true"` + + // Forces the deregistration of the container instance. If you have tasks running + // on the container instance when you deregister it with the force option, these + // tasks remain running and they continue to pass Elastic Load Balancing load + // balancer health checks until you terminate the instance or the tasks stop + // through some other means, but they are orphaned (no longer monitored or accounted + // for by Amazon ECS). If an orphaned task on your container instance is part + // of an Amazon ECS service, then the service scheduler starts another copy + // of that task, on a different container instance if possible. + Force *bool `locationName:"force" type:"boolean"` +} + +// String returns the string representation +func (s DeregisterContainerInstanceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeregisterContainerInstanceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeregisterContainerInstanceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeregisterContainerInstanceInput"} + if s.ContainerInstance == nil { + invalidParams.Add(request.NewErrParamRequired("ContainerInstance")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeregisterContainerInstanceOutput struct { + _ struct{} `type:"structure"` + + // An EC2 instance that is running the Amazon ECS agent and has been registered + // with a cluster. + ContainerInstance *ContainerInstance `locationName:"containerInstance" type:"structure"` +} + +// String returns the string representation +func (s DeregisterContainerInstanceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeregisterContainerInstanceOutput) GoString() string { + return s.String() +} + +type DeregisterTaskDefinitionInput struct { + _ struct{} `type:"structure"` + + // The family and revision (family:revision) or full Amazon Resource Name (ARN) + // of the task definition to deregister. You must specify a revision. + TaskDefinition *string `locationName:"taskDefinition" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeregisterTaskDefinitionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeregisterTaskDefinitionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeregisterTaskDefinitionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeregisterTaskDefinitionInput"} + if s.TaskDefinition == nil { + invalidParams.Add(request.NewErrParamRequired("TaskDefinition")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeregisterTaskDefinitionOutput struct { + _ struct{} `type:"structure"` + + // The full description of the deregistered task. + TaskDefinition *TaskDefinition `locationName:"taskDefinition" type:"structure"` +} + +// String returns the string representation +func (s DeregisterTaskDefinitionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeregisterTaskDefinitionOutput) GoString() string { + return s.String() +} + +type DescribeClustersInput struct { + _ struct{} `type:"structure"` + + // A space-separated list of cluster names or full cluster Amazon Resource Name + // (ARN) entries. If you do not specify a cluster, the default cluster is assumed. + Clusters []*string `locationName:"clusters" type:"list"` +} + +// String returns the string representation +func (s DescribeClustersInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeClustersInput) GoString() string { + return s.String() +} + +type DescribeClustersOutput struct { + _ struct{} `type:"structure"` + + // The list of clusters. + Clusters []*Cluster `locationName:"clusters" type:"list"` + + // Any failures associated with the call. + Failures []*Failure `locationName:"failures" type:"list"` +} + +// String returns the string representation +func (s DescribeClustersOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeClustersOutput) GoString() string { + return s.String() +} + +type DescribeContainerInstancesInput struct { + _ struct{} `type:"structure"` + + // The short name or full Amazon Resource Name (ARN) of the cluster that hosts + // the container instances to describe. If you do not specify a cluster, the + // default cluster is assumed. + Cluster *string `locationName:"cluster" type:"string"` + + // A space-separated list of container instance IDs or full Amazon Resource + // Name (ARN) entries. + ContainerInstances []*string `locationName:"containerInstances" type:"list" required:"true"` +} + +// String returns the string representation +func (s DescribeContainerInstancesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeContainerInstancesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeContainerInstancesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeContainerInstancesInput"} + if s.ContainerInstances == nil { + invalidParams.Add(request.NewErrParamRequired("ContainerInstances")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DescribeContainerInstancesOutput struct { + _ struct{} `type:"structure"` + + // The list of container instances. + ContainerInstances []*ContainerInstance `locationName:"containerInstances" type:"list"` + + // Any failures associated with the call. + Failures []*Failure `locationName:"failures" type:"list"` +} + +// String returns the string representation +func (s DescribeContainerInstancesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeContainerInstancesOutput) GoString() string { + return s.String() +} + +type DescribeServicesInput struct { + _ struct{} `type:"structure"` + + // The name of the cluster that hosts the service to describe. If you do not + // specify a cluster, the default cluster is assumed. + Cluster *string `locationName:"cluster" type:"string"` + + // A list of services to describe. + Services []*string `locationName:"services" type:"list" required:"true"` +} + +// String returns the string representation +func (s DescribeServicesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeServicesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeServicesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeServicesInput"} + if s.Services == nil { + invalidParams.Add(request.NewErrParamRequired("Services")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DescribeServicesOutput struct { + _ struct{} `type:"structure"` + + // Any failures associated with the call. + Failures []*Failure `locationName:"failures" type:"list"` + + // The list of services described. + Services []*Service `locationName:"services" type:"list"` +} + +// String returns the string representation +func (s DescribeServicesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeServicesOutput) GoString() string { + return s.String() +} + +type DescribeTaskDefinitionInput struct { + _ struct{} `type:"structure"` + + // The family for the latest ACTIVE revision, family and revision (family:revision) + // for a specific revision in the family, or full Amazon Resource Name (ARN) + // of the task definition to describe. + TaskDefinition *string `locationName:"taskDefinition" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeTaskDefinitionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeTaskDefinitionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeTaskDefinitionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeTaskDefinitionInput"} + if s.TaskDefinition == nil { + invalidParams.Add(request.NewErrParamRequired("TaskDefinition")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DescribeTaskDefinitionOutput struct { + _ struct{} `type:"structure"` + + // The full task definition description. + TaskDefinition *TaskDefinition `locationName:"taskDefinition" type:"structure"` +} + +// String returns the string representation +func (s DescribeTaskDefinitionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeTaskDefinitionOutput) GoString() string { + return s.String() +} + +type DescribeTasksInput struct { + _ struct{} `type:"structure"` + + // The short name or full Amazon Resource Name (ARN) of the cluster that hosts + // the task to describe. If you do not specify a cluster, the default cluster + // is assumed. + Cluster *string `locationName:"cluster" type:"string"` + + // A space-separated list of task IDs or full Amazon Resource Name (ARN) entries. + Tasks []*string `locationName:"tasks" type:"list" required:"true"` +} + +// String returns the string representation +func (s DescribeTasksInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeTasksInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeTasksInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeTasksInput"} + if s.Tasks == nil { + invalidParams.Add(request.NewErrParamRequired("Tasks")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DescribeTasksOutput struct { + _ struct{} `type:"structure"` + + // Any failures associated with the call. + Failures []*Failure `locationName:"failures" type:"list"` + + // The list of tasks. + Tasks []*Task `locationName:"tasks" type:"list"` +} + +// String returns the string representation +func (s DescribeTasksOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeTasksOutput) GoString() string { + return s.String() +} + +type DiscoverPollEndpointInput struct { + _ struct{} `type:"structure"` + + // The cluster that the container instance belongs to. + Cluster *string `locationName:"cluster" type:"string"` + + // The container instance ID or full Amazon Resource Name (ARN) of the container + // instance. The ARN contains the arn:aws:ecs namespace, followed by the region + // of the container instance, the AWS account ID of the container instance owner, + // the container-instance namespace, and then the container instance ID. For + // example, arn:aws:ecs:region:aws_account_id:container-instance/container_instance_ID + // . + ContainerInstance *string `locationName:"containerInstance" type:"string"` +} + +// String returns the string representation +func (s DiscoverPollEndpointInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DiscoverPollEndpointInput) GoString() string { + return s.String() +} + +type DiscoverPollEndpointOutput struct { + _ struct{} `type:"structure"` + + // The endpoint for the Amazon ECS agent to poll. + Endpoint *string `locationName:"endpoint" type:"string"` + + // The telemetry endpoint for the Amazon ECS agent. + TelemetryEndpoint *string `locationName:"telemetryEndpoint" type:"string"` +} + +// String returns the string representation +func (s DiscoverPollEndpointOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DiscoverPollEndpointOutput) GoString() string { + return s.String() +} + +// A failed resource. +type Failure struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the failed resource. + Arn *string `locationName:"arn" type:"string"` + + // The reason for the failure. + Reason *string `locationName:"reason" type:"string"` +} + +// String returns the string representation +func (s Failure) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Failure) GoString() string { + return s.String() +} + +// Hostnames and IP address entries that are added to the /etc/hosts file of +// a container via the extraHosts parameter of its ContainerDefinition. +type HostEntry struct { + _ struct{} `type:"structure"` + + // The hostname to use in the /etc/hosts entry. + Hostname *string `locationName:"hostname" type:"string" required:"true"` + + // The IP address to use in the /etc/hosts entry. + IpAddress *string `locationName:"ipAddress" type:"string" required:"true"` +} + +// String returns the string representation +func (s HostEntry) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HostEntry) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *HostEntry) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "HostEntry"} + if s.Hostname == nil { + invalidParams.Add(request.NewErrParamRequired("Hostname")) + } + if s.IpAddress == nil { + invalidParams.Add(request.NewErrParamRequired("IpAddress")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Details on a container instance host volume. +type HostVolumeProperties struct { + _ struct{} `type:"structure"` + + // The path on the host container instance that is presented to the container. + // If this parameter is empty, then the Docker daemon has assigned a host path + // for you. If the host parameter contains a sourcePath file location, then + // the data volume persists at the specified location on the host container + // instance until you delete it manually. If the sourcePath value does not exist + // on the host container instance, the Docker daemon creates it. If the location + // does exist, the contents of the source path folder are exported. + SourcePath *string `locationName:"sourcePath" type:"string"` +} + +// String returns the string representation +func (s HostVolumeProperties) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HostVolumeProperties) GoString() string { + return s.String() +} + +// A key and value pair object. +type KeyValuePair struct { + _ struct{} `type:"structure"` + + // The name of the key value pair. For environment variables, this is the name + // of the environment variable. + Name *string `locationName:"name" type:"string"` + + // The value of the key value pair. For environment variables, this is the value + // of the environment variable. + Value *string `locationName:"value" type:"string"` +} + +// String returns the string representation +func (s KeyValuePair) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s KeyValuePair) GoString() string { + return s.String() +} + +type ListClustersInput struct { + _ struct{} `type:"structure"` + + // The maximum number of cluster results returned by ListClusters in paginated + // output. When this parameter is used, ListClusters only returns maxResults + // results in a single page along with a nextToken response element. The remaining + // results of the initial request can be seen by sending another ListClusters + // request with the returned nextToken value. This value can be between 1 and + // 100. If this parameter is not used, then ListClusters returns up to 100 results + // and a nextToken value if applicable. + MaxResults *int64 `locationName:"maxResults" type:"integer"` + + // The nextToken value returned from a previous paginated ListClusters request + // where maxResults was used and the results exceeded the value of that parameter. + // Pagination continues from the end of the previous results that returned the + // nextToken value. This value is null when there are no more results to return. + // + // This token should be treated as an opaque identifier that is only used + // to retrieve the next items in a list and not for other programmatic purposes. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s ListClustersInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListClustersInput) GoString() string { + return s.String() +} + +type ListClustersOutput struct { + _ struct{} `type:"structure"` + + // The list of full Amazon Resource Name (ARN) entries for each cluster associated + // with your account. + ClusterArns []*string `locationName:"clusterArns" type:"list"` + + // The nextToken value to include in a future ListClusters request. When the + // results of a ListClusters request exceed maxResults, this value can be used + // to retrieve the next page of results. This value is null when there are no + // more results to return. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s ListClustersOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListClustersOutput) GoString() string { + return s.String() +} + +type ListContainerInstancesInput struct { + _ struct{} `type:"structure"` + + // The short name or full Amazon Resource Name (ARN) of the cluster that hosts + // the container instances to list. If you do not specify a cluster, the default + // cluster is assumed.. + Cluster *string `locationName:"cluster" type:"string"` + + // The maximum number of container instance results returned by ListContainerInstances + // in paginated output. When this parameter is used, ListContainerInstances + // only returns maxResults results in a single page along with a nextToken response + // element. The remaining results of the initial request can be seen by sending + // another ListContainerInstances request with the returned nextToken value. + // This value can be between 1 and 100. If this parameter is not used, then + // ListContainerInstances returns up to 100 results and a nextToken value if + // applicable. + MaxResults *int64 `locationName:"maxResults" type:"integer"` + + // The nextToken value returned from a previous paginated ListContainerInstances + // request where maxResults was used and the results exceeded the value of that + // parameter. Pagination continues from the end of the previous results that + // returned the nextToken value. This value is null when there are no more results + // to return. + // + // This token should be treated as an opaque identifier that is only used + // to retrieve the next items in a list and not for other programmatic purposes. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s ListContainerInstancesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListContainerInstancesInput) GoString() string { + return s.String() +} + +type ListContainerInstancesOutput struct { + _ struct{} `type:"structure"` + + // The list of container instances with full Amazon Resource Name (ARN) entries + // for each container instance associated with the specified cluster. + ContainerInstanceArns []*string `locationName:"containerInstanceArns" type:"list"` + + // The nextToken value to include in a future ListContainerInstances request. + // When the results of a ListContainerInstances request exceed maxResults, this + // value can be used to retrieve the next page of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s ListContainerInstancesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListContainerInstancesOutput) GoString() string { + return s.String() +} + +type ListServicesInput struct { + _ struct{} `type:"structure"` + + // The short name or full Amazon Resource Name (ARN) of the cluster that hosts + // the services to list. If you do not specify a cluster, the default cluster + // is assumed.. + Cluster *string `locationName:"cluster" type:"string"` + + // The maximum number of container instance results returned by ListServices + // in paginated output. When this parameter is used, ListServices only returns + // maxResults results in a single page along with a nextToken response element. + // The remaining results of the initial request can be seen by sending another + // ListServices request with the returned nextToken value. This value can be + // between 1 and 10. If this parameter is not used, then ListServices returns + // up to 10 results and a nextToken value if applicable. + MaxResults *int64 `locationName:"maxResults" type:"integer"` + + // The nextToken value returned from a previous paginated ListServices request + // where maxResults was used and the results exceeded the value of that parameter. + // Pagination continues from the end of the previous results that returned the + // nextToken value. This value is null when there are no more results to return. + // + // This token should be treated as an opaque identifier that is only used + // to retrieve the next items in a list and not for other programmatic purposes. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s ListServicesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListServicesInput) GoString() string { + return s.String() +} + +type ListServicesOutput struct { + _ struct{} `type:"structure"` + + // The nextToken value to include in a future ListServices request. When the + // results of a ListServices request exceed maxResults, this value can be used + // to retrieve the next page of results. This value is null when there are no + // more results to return. + NextToken *string `locationName:"nextToken" type:"string"` + + // The list of full Amazon Resource Name (ARN) entries for each service associated + // with the specified cluster. + ServiceArns []*string `locationName:"serviceArns" type:"list"` +} + +// String returns the string representation +func (s ListServicesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListServicesOutput) GoString() string { + return s.String() +} + +type ListTaskDefinitionFamiliesInput struct { + _ struct{} `type:"structure"` + + // The familyPrefix is a string that is used to filter the results of ListTaskDefinitionFamilies. + // If you specify a familyPrefix, only task definition family names that begin + // with the familyPrefix string are returned. + FamilyPrefix *string `locationName:"familyPrefix" type:"string"` + + // The maximum number of task definition family results returned by ListTaskDefinitionFamilies + // in paginated output. When this parameter is used, ListTaskDefinitions only + // returns maxResults results in a single page along with a nextToken response + // element. The remaining results of the initial request can be seen by sending + // another ListTaskDefinitionFamilies request with the returned nextToken value. + // This value can be between 1 and 100. If this parameter is not used, then + // ListTaskDefinitionFamilies returns up to 100 results and a nextToken value + // if applicable. + MaxResults *int64 `locationName:"maxResults" type:"integer"` + + // The nextToken value returned from a previous paginated ListTaskDefinitionFamilies + // request where maxResults was used and the results exceeded the value of that + // parameter. Pagination continues from the end of the previous results that + // returned the nextToken value. This value is null when there are no more results + // to return. + // + // This token should be treated as an opaque identifier that is only used + // to retrieve the next items in a list and not for other programmatic purposes. + NextToken *string `locationName:"nextToken" type:"string"` + + // The task definition family status with which to filter the ListTaskDefinitionFamilies + // results. By default, both ACTIVE and INACTIVE task definition families are + // listed. If this parameter is set to ACTIVE, only task definition families + // that have an ACTIVE task definition revision are returned. If this parameter + // is set to INACTIVE, only task definition families that do not have any ACTIVE + // task definition revisions are returned. If you paginate the resulting output, + // be sure to keep the status value constant in each subsequent request. + Status *string `locationName:"status" type:"string" enum:"TaskDefinitionFamilyStatus"` +} + +// String returns the string representation +func (s ListTaskDefinitionFamiliesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTaskDefinitionFamiliesInput) GoString() string { + return s.String() +} + +type ListTaskDefinitionFamiliesOutput struct { + _ struct{} `type:"structure"` + + // The list of task definition family names that match the ListTaskDefinitionFamilies + // request. + Families []*string `locationName:"families" type:"list"` + + // The nextToken value to include in a future ListTaskDefinitionFamilies request. + // When the results of a ListTaskDefinitionFamilies request exceed maxResults, + // this value can be used to retrieve the next page of results. This value is + // null when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s ListTaskDefinitionFamiliesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTaskDefinitionFamiliesOutput) GoString() string { + return s.String() +} + +type ListTaskDefinitionsInput struct { + _ struct{} `type:"structure"` + + // The full family name with which to filter the ListTaskDefinitions results. + // Specifying a familyPrefix limits the listed task definitions to task definition + // revisions that belong to that family. + FamilyPrefix *string `locationName:"familyPrefix" type:"string"` + + // The maximum number of task definition results returned by ListTaskDefinitions + // in paginated output. When this parameter is used, ListTaskDefinitions only + // returns maxResults results in a single page along with a nextToken response + // element. The remaining results of the initial request can be seen by sending + // another ListTaskDefinitions request with the returned nextToken value. This + // value can be between 1 and 100. If this parameter is not used, then ListTaskDefinitions + // returns up to 100 results and a nextToken value if applicable. + MaxResults *int64 `locationName:"maxResults" type:"integer"` + + // The nextToken value returned from a previous paginated ListTaskDefinitions + // request where maxResults was used and the results exceeded the value of that + // parameter. Pagination continues from the end of the previous results that + // returned the nextToken value. This value is null when there are no more results + // to return. + // + // This token should be treated as an opaque identifier that is only used + // to retrieve the next items in a list and not for other programmatic purposes. + NextToken *string `locationName:"nextToken" type:"string"` + + // The order in which to sort the results. Valid values are ASC and DESC. By + // default (ASC), task definitions are listed lexicographically by family name + // and in ascending numerical order by revision so that the newest task definitions + // in a family are listed last. Setting this parameter to DESC reverses the + // sort order on family name and revision so that the newest task definitions + // in a family are listed first. + Sort *string `locationName:"sort" type:"string" enum:"SortOrder"` + + // The task definition status with which to filter the ListTaskDefinitions results. + // By default, only ACTIVE task definitions are listed. By setting this parameter + // to INACTIVE, you can view task definitions that are INACTIVE as long as an + // active task or service still references them. If you paginate the resulting + // output, be sure to keep the status value constant in each subsequent request. + Status *string `locationName:"status" type:"string" enum:"TaskDefinitionStatus"` +} + +// String returns the string representation +func (s ListTaskDefinitionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTaskDefinitionsInput) GoString() string { + return s.String() +} + +type ListTaskDefinitionsOutput struct { + _ struct{} `type:"structure"` + + // The nextToken value to include in a future ListTaskDefinitions request. When + // the results of a ListTaskDefinitions request exceed maxResults, this value + // can be used to retrieve the next page of results. This value is null when + // there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` + + // The list of task definition Amazon Resource Name (ARN) entries for the ListTaskDefinitions + // request. + TaskDefinitionArns []*string `locationName:"taskDefinitionArns" type:"list"` +} + +// String returns the string representation +func (s ListTaskDefinitionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTaskDefinitionsOutput) GoString() string { + return s.String() +} + +type ListTasksInput struct { + _ struct{} `type:"structure"` + + // The short name or full Amazon Resource Name (ARN) of the cluster that hosts + // the tasks to list. If you do not specify a cluster, the default cluster is + // assumed.. + Cluster *string `locationName:"cluster" type:"string"` + + // The container instance ID or full Amazon Resource Name (ARN) of the container + // instance with which to filter the ListTasks results. Specifying a containerInstance + // limits the results to tasks that belong to that container instance. + ContainerInstance *string `locationName:"containerInstance" type:"string"` + + // The task status with which to filter the ListTasks results. Specifying a + // desiredStatus of STOPPED limits the results to tasks that are in the STOPPED + // status, which can be useful for debugging tasks that are not starting properly + // or have died or finished. The default status filter is RUNNING. + DesiredStatus *string `locationName:"desiredStatus" type:"string" enum:"DesiredStatus"` + + // The name of the family with which to filter the ListTasks results. Specifying + // a family limits the results to tasks that belong to that family. + Family *string `locationName:"family" type:"string"` + + // The maximum number of task results returned by ListTasks in paginated output. + // When this parameter is used, ListTasks only returns maxResults results in + // a single page along with a nextToken response element. The remaining results + // of the initial request can be seen by sending another ListTasks request with + // the returned nextToken value. This value can be between 1 and 100. If this + // parameter is not used, then ListTasks returns up to 100 results and a nextToken + // value if applicable. + MaxResults *int64 `locationName:"maxResults" type:"integer"` + + // The nextToken value returned from a previous paginated ListTasks request + // where maxResults was used and the results exceeded the value of that parameter. + // Pagination continues from the end of the previous results that returned the + // nextToken value. This value is null when there are no more results to return. + // + // This token should be treated as an opaque identifier that is only used + // to retrieve the next items in a list and not for other programmatic purposes. + NextToken *string `locationName:"nextToken" type:"string"` + + // The name of the service with which to filter the ListTasks results. Specifying + // a serviceName limits the results to tasks that belong to that service. + ServiceName *string `locationName:"serviceName" type:"string"` + + // The startedBy value with which to filter the task results. Specifying a startedBy + // value limits the results to tasks that were started with that value. + StartedBy *string `locationName:"startedBy" type:"string"` +} + +// String returns the string representation +func (s ListTasksInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTasksInput) GoString() string { + return s.String() +} + +type ListTasksOutput struct { + _ struct{} `type:"structure"` + + // The nextToken value to include in a future ListTasks request. When the results + // of a ListTasks request exceed maxResults, this value can be used to retrieve + // the next page of results. This value is null when there are no more results + // to return. + NextToken *string `locationName:"nextToken" type:"string"` + + // The list of task Amazon Resource Name (ARN) entries for the ListTasks request. + TaskArns []*string `locationName:"taskArns" type:"list"` +} + +// String returns the string representation +func (s ListTasksOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTasksOutput) GoString() string { + return s.String() +} + +// Details on a load balancer that is used with a service. +type LoadBalancer struct { + _ struct{} `type:"structure"` + + // The name of the container (as it appears in a container definition) to associate + // with the load balancer. + ContainerName *string `locationName:"containerName" type:"string"` + + // The port on the container to associate with the load balancer. This port + // must correspond to a containerPort in the service's task definition. Your + // container instances must allow ingress traffic on the hostPort of the port + // mapping. + ContainerPort *int64 `locationName:"containerPort" type:"integer"` + + // The name of the load balancer. + LoadBalancerName *string `locationName:"loadBalancerName" type:"string"` +} + +// String returns the string representation +func (s LoadBalancer) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LoadBalancer) GoString() string { + return s.String() +} + +// Log configuration options to send to a custom log driver for the container. +type LogConfiguration struct { + _ struct{} `type:"structure"` + + // The log driver to use for the container. The valid values listed for this + // parameter are log drivers that the Amazon ECS container agent can communicate + // with by default. + // + // If you have a custom driver that is not listed above that you would like + // to work with the Amazon ECS container agent, you can fork the Amazon ECS + // container agent project that is available on GitHub (https://github.com/aws/amazon-ecs-agent) + // and customize it to work with that driver. We encourage you to submit pull + // requests for changes that you would like to have included. However, Amazon + // Web Services does not currently provide support for running modified copies + // of this software. + // + // This parameter requires version 1.18 of the Docker Remote API or greater + // on your container instance. To check the Docker Remote API version on your + // container instance, log into your container instance and run the following + // command: sudo docker version | grep "Server API version" + LogDriver *string `locationName:"logDriver" type:"string" required:"true" enum:"LogDriver"` + + // The configuration options to send to the log driver. This parameter requires + // version 1.19 of the Docker Remote API or greater on your container instance. + // To check the Docker Remote API version on your container instance, log into + // your container instance and run the following command: sudo docker version + // | grep "Server API version" + Options map[string]*string `locationName:"options" type:"map"` +} + +// String returns the string representation +func (s LogConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LogConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *LogConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "LogConfiguration"} + if s.LogDriver == nil { + invalidParams.Add(request.NewErrParamRequired("LogDriver")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Details on a volume mount point that is used in a container definition. +type MountPoint struct { + _ struct{} `type:"structure"` + + // The path on the container to mount the host volume at. + ContainerPath *string `locationName:"containerPath" type:"string"` + + // If this value is true, the container has read-only access to the volume. + // If this value is false, then the container can write to the volume. The default + // value is false. + ReadOnly *bool `locationName:"readOnly" type:"boolean"` + + // The name of the volume to mount. + SourceVolume *string `locationName:"sourceVolume" type:"string"` +} + +// String returns the string representation +func (s MountPoint) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MountPoint) GoString() string { + return s.String() +} + +// Details on the network bindings between a container and its host container +// instance. After a task reaches the RUNNING status, manual and automatic host +// and container port assignments are visible in the networkBindings section +// of DescribeTasks API responses. +type NetworkBinding struct { + _ struct{} `type:"structure"` + + // The IP address that the container is bound to on the container instance. + BindIP *string `locationName:"bindIP" type:"string"` + + // The port number on the container that is be used with the network binding. + ContainerPort *int64 `locationName:"containerPort" type:"integer"` + + // The port number on the host that is used with the network binding. + HostPort *int64 `locationName:"hostPort" type:"integer"` + + // The protocol used for the network binding. + Protocol *string `locationName:"protocol" type:"string" enum:"TransportProtocol"` +} + +// String returns the string representation +func (s NetworkBinding) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NetworkBinding) GoString() string { + return s.String() +} + +// Port mappings allow containers to access ports on the host container instance +// to send or receive traffic. Port mappings are specified as part of the container +// definition. After a task reaches the RUNNING status, manual and automatic +// host and container port assignments are visible in the networkBindings section +// of DescribeTasks API responses. +type PortMapping struct { + _ struct{} `type:"structure"` + + // The port number on the container that is bound to the user-specified or automatically + // assigned host port. If you specify a container port and not a host port, + // your container automatically receives a host port in the ephemeral port range + // (for more information, see hostPort). Port mappings that are automatically + // assigned in this way do not count toward the 100 reserved ports limit of + // a container instance. + ContainerPort *int64 `locationName:"containerPort" type:"integer"` + + // The port number on the container instance to reserve for your container. + // You can specify a non-reserved host port for your container port mapping, + // or you can omit the hostPort (or set it to 0) while specifying a containerPort + // and your container automatically receives a port in the ephemeral port range + // for your container instance operating system and Docker version. + // + // The default ephemeral port range is 49153 to 65535, and this range is used + // for Docker versions prior to 1.6.0. For Docker version 1.6.0 and later, the + // Docker daemon tries to read the ephemeral port range from /proc/sys/net/ipv4/ip_local_port_range; + // if this kernel parameter is unavailable, the default ephemeral port range + // is used. You should not attempt to specify a host port in the ephemeral port + // range, because these are reserved for automatic assignment. In general, ports + // below 32768 are outside of the ephemeral port range. + // + // The default reserved ports are 22 for SSH, the Docker ports 2375 and 2376, + // and the Amazon ECS container agent port 51678. Any host port that was previously + // specified in a running task is also reserved while the task is running (after + // a task stops, the host port is released).The current reserved ports are displayed + // in the remainingResources of DescribeContainerInstances output, and a container + // instance may have up to 100 reserved ports at a time, including the default + // reserved ports (automatically assigned ports do not count toward the 100 + // reserved ports limit). + HostPort *int64 `locationName:"hostPort" type:"integer"` + + // The protocol used for the port mapping. Valid values are tcp and udp. The + // default is tcp. + Protocol *string `locationName:"protocol" type:"string" enum:"TransportProtocol"` +} + +// String returns the string representation +func (s PortMapping) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PortMapping) GoString() string { + return s.String() +} + +type RegisterContainerInstanceInput struct { + _ struct{} `type:"structure"` + + // The container instance attributes that this container instance supports. + Attributes []*Attribute `locationName:"attributes" type:"list"` + + // The short name or full Amazon Resource Name (ARN) of the cluster with which + // to register your container instance. If you do not specify a cluster, the + // default cluster is assumed.. + Cluster *string `locationName:"cluster" type:"string"` + + // The Amazon Resource Name (ARN) of the container instance (if it was previously + // registered). + ContainerInstanceArn *string `locationName:"containerInstanceArn" type:"string"` + + // The instance identity document for the EC2 instance to register. This document + // can be found by running the following command from the instance: curl http://169.254.169.254/latest/dynamic/instance-identity/document/ + InstanceIdentityDocument *string `locationName:"instanceIdentityDocument" type:"string"` + + // The instance identity document signature for the EC2 instance to register. + // This signature can be found by running the following command from the instance: + // curl http://169.254.169.254/latest/dynamic/instance-identity/signature/ + InstanceIdentityDocumentSignature *string `locationName:"instanceIdentityDocumentSignature" type:"string"` + + // The resources available on the instance. + TotalResources []*Resource `locationName:"totalResources" type:"list"` + + // The version information for the Amazon ECS container agent and Docker daemon + // running on the container instance. + VersionInfo *VersionInfo `locationName:"versionInfo" type:"structure"` +} + +// String returns the string representation +func (s RegisterContainerInstanceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RegisterContainerInstanceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RegisterContainerInstanceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RegisterContainerInstanceInput"} + if s.Attributes != nil { + for i, v := range s.Attributes { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Attributes", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type RegisterContainerInstanceOutput struct { + _ struct{} `type:"structure"` + + // An EC2 instance that is running the Amazon ECS agent and has been registered + // with a cluster. + ContainerInstance *ContainerInstance `locationName:"containerInstance" type:"structure"` +} + +// String returns the string representation +func (s RegisterContainerInstanceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RegisterContainerInstanceOutput) GoString() string { + return s.String() +} + +type RegisterTaskDefinitionInput struct { + _ struct{} `type:"structure"` + + // A list of container definitions in JSON format that describe the different + // containers that make up your task. + ContainerDefinitions []*ContainerDefinition `locationName:"containerDefinitions" type:"list" required:"true"` + + // You must specify a family for a task definition, which allows you to track + // multiple versions of the same task definition. The family is used as a name + // for your task definition. Up to 255 letters (uppercase and lowercase), numbers, + // hyphens, and underscores are allowed. + Family *string `locationName:"family" type:"string" required:"true"` + + // A list of volume definitions in JSON format that containers in your task + // may use. + Volumes []*Volume `locationName:"volumes" type:"list"` +} + +// String returns the string representation +func (s RegisterTaskDefinitionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RegisterTaskDefinitionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RegisterTaskDefinitionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RegisterTaskDefinitionInput"} + if s.ContainerDefinitions == nil { + invalidParams.Add(request.NewErrParamRequired("ContainerDefinitions")) + } + if s.Family == nil { + invalidParams.Add(request.NewErrParamRequired("Family")) + } + if s.ContainerDefinitions != nil { + for i, v := range s.ContainerDefinitions { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "ContainerDefinitions", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type RegisterTaskDefinitionOutput struct { + _ struct{} `type:"structure"` + + // The full description of the registered task definition. + TaskDefinition *TaskDefinition `locationName:"taskDefinition" type:"structure"` +} + +// String returns the string representation +func (s RegisterTaskDefinitionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RegisterTaskDefinitionOutput) GoString() string { + return s.String() +} + +// Describes the resources available for a container instance. +type Resource struct { + _ struct{} `type:"structure"` + + // When the doubleValue type is set, the value of the resource must be a double + // precision floating-point type. + DoubleValue *float64 `locationName:"doubleValue" type:"double"` + + // When the integerValue type is set, the value of the resource must be an integer. + IntegerValue *int64 `locationName:"integerValue" type:"integer"` + + // When the longValue type is set, the value of the resource must be an extended + // precision floating-point type. + LongValue *int64 `locationName:"longValue" type:"long"` + + // The name of the resource, such as CPU, MEMORY, PORTS, or a user-defined resource. + Name *string `locationName:"name" type:"string"` + + // When the stringSetValue type is set, the value of the resource must be a + // string type. + StringSetValue []*string `locationName:"stringSetValue" type:"list"` + + // The type of the resource, such as INTEGER, DOUBLE, LONG, or STRINGSET. + Type *string `locationName:"type" type:"string"` +} + +// String returns the string representation +func (s Resource) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Resource) GoString() string { + return s.String() +} + +type RunTaskInput struct { + _ struct{} `type:"structure"` + + // The short name or full Amazon Resource Name (ARN) of the cluster on which + // to run your task. If you do not specify a cluster, the default cluster is + // assumed.. + Cluster *string `locationName:"cluster" type:"string"` + + // The number of instantiations of the specified task to place on your cluster. + // + // The count parameter is limited to 10 tasks per call. + Count *int64 `locationName:"count" type:"integer"` + + // A list of container overrides in JSON format that specify the name of a container + // in the specified task definition and the overrides it should receive. You + // can override the default command for a container (that is specified in the + // task definition or Docker image) with a command override. You can also override + // existing environment variables (that are specified in the task definition + // or Docker image) on a container or add new environment variables to it with + // an environment override. + // + // A total of 8192 characters are allowed for overrides. This limit includes + // the JSON formatting characters of the override structure. + Overrides *TaskOverride `locationName:"overrides" type:"structure"` + + // An optional tag specified when a task is started. For example if you automatically + // trigger a task to run a batch process job, you could apply a unique identifier + // for that job to your task with the startedBy parameter. You can then identify + // which tasks belong to that job by filtering the results of a ListTasks call + // with the startedBy value. Up to 36 letters (uppercase and lowercase), numbers, + // hyphens, and underscores are allowed. + // + // If a task is started by an Amazon ECS service, then the startedBy parameter + // contains the deployment ID of the service that starts it. + StartedBy *string `locationName:"startedBy" type:"string"` + + // The family and revision (family:revision) or full Amazon Resource Name (ARN) + // of the task definition to run. If a revision is not specified, the latest + // ACTIVE revision is used. + TaskDefinition *string `locationName:"taskDefinition" type:"string" required:"true"` +} + +// String returns the string representation +func (s RunTaskInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RunTaskInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RunTaskInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RunTaskInput"} + if s.TaskDefinition == nil { + invalidParams.Add(request.NewErrParamRequired("TaskDefinition")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type RunTaskOutput struct { + _ struct{} `type:"structure"` + + // Any failures associated with the call. + Failures []*Failure `locationName:"failures" type:"list"` + + // A full description of the tasks that were run. Each task that was successfully + // placed on your cluster are described here. + Tasks []*Task `locationName:"tasks" type:"list"` +} + +// String returns the string representation +func (s RunTaskOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RunTaskOutput) GoString() string { + return s.String() +} + +// Details on a service within a cluster +type Service struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the cluster that hosts the service. + ClusterArn *string `locationName:"clusterArn" type:"string"` + + // The Unix time in seconds and milliseconds when the service was created. + CreatedAt *time.Time `locationName:"createdAt" type:"timestamp" timestampFormat:"unix"` + + // Optional deployment parameters that control how many tasks run during the + // deployment and the ordering of stopping and starting tasks. + DeploymentConfiguration *DeploymentConfiguration `locationName:"deploymentConfiguration" type:"structure"` + + // The current state of deployments for the service. + Deployments []*Deployment `locationName:"deployments" type:"list"` + + // The desired number of instantiations of the task definition to keep running + // on the service. This value is specified when the service is created with + // CreateService, and it can be modified with UpdateService. + DesiredCount *int64 `locationName:"desiredCount" type:"integer"` + + // The event stream for your service. A maximum of 100 of the latest events + // are displayed. + Events []*ServiceEvent `locationName:"events" type:"list"` + + // A list of load balancer objects, containing the load balancer name, the container + // name (as it appears in a container definition), and the container port to + // access from the load balancer. + LoadBalancers []*LoadBalancer `locationName:"loadBalancers" type:"list"` + + // The number of tasks in the cluster that are in the PENDING state. + PendingCount *int64 `locationName:"pendingCount" type:"integer"` + + // The Amazon Resource Name (ARN) of the IAM role associated with the service + // that allows the Amazon ECS container agent to register container instances + // with a load balancer. + RoleArn *string `locationName:"roleArn" type:"string"` + + // The number of tasks in the cluster that are in the RUNNING state. + RunningCount *int64 `locationName:"runningCount" type:"integer"` + + // The Amazon Resource Name (ARN) that identifies the service. The ARN contains + // the arn:aws:ecs namespace, followed by the region of the service, the AWS + // account ID of the service owner, the service namespace, and then the service + // name. For example, arn:aws:ecs:region:012345678910:service/my-service . + ServiceArn *string `locationName:"serviceArn" type:"string"` + + // The name of your service. Up to 255 letters (uppercase and lowercase), numbers, + // hyphens, and underscores are allowed. Service names must be unique within + // a cluster, but you can have similarly named services in multiple clusters + // within a region or across multiple regions. + ServiceName *string `locationName:"serviceName" type:"string"` + + // The status of the service. The valid values are ACTIVE, DRAINING, or INACTIVE. + Status *string `locationName:"status" type:"string"` + + // The task definition to use for tasks in the service. This value is specified + // when the service is created with CreateService, and it can be modified with + // UpdateService. + TaskDefinition *string `locationName:"taskDefinition" type:"string"` +} + +// String returns the string representation +func (s Service) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Service) GoString() string { + return s.String() +} + +// Details on an event associated with a service. +type ServiceEvent struct { + _ struct{} `type:"structure"` + + // The Unix time in seconds and milliseconds when the event was triggered. + CreatedAt *time.Time `locationName:"createdAt" type:"timestamp" timestampFormat:"unix"` + + // The ID string of the event. + Id *string `locationName:"id" type:"string"` + + // The event message. + Message *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s ServiceEvent) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ServiceEvent) GoString() string { + return s.String() +} + +type StartTaskInput struct { + _ struct{} `type:"structure"` + + // The short name or full Amazon Resource Name (ARN) of the cluster on which + // to start your task. If you do not specify a cluster, the default cluster + // is assumed.. + Cluster *string `locationName:"cluster" type:"string"` + + // The container instance IDs or full Amazon Resource Name (ARN) entries for + // the container instances on which you would like to place your task. + // + // The list of container instances to start tasks on is limited to 10. + ContainerInstances []*string `locationName:"containerInstances" type:"list" required:"true"` + + // A list of container overrides in JSON format that specify the name of a container + // in the specified task definition and the overrides it should receive. You + // can override the default command for a container (that is specified in the + // task definition or Docker image) with a command override. You can also override + // existing environment variables (that are specified in the task definition + // or Docker image) on a container or add new environment variables to it with + // an environment override. + // + // A total of 8192 characters are allowed for overrides. This limit includes + // the JSON formatting characters of the override structure. + Overrides *TaskOverride `locationName:"overrides" type:"structure"` + + // An optional tag specified when a task is started. For example if you automatically + // trigger a task to run a batch process job, you could apply a unique identifier + // for that job to your task with the startedBy parameter. You can then identify + // which tasks belong to that job by filtering the results of a ListTasks call + // with the startedBy value. Up to 36 letters (uppercase and lowercase), numbers, + // hyphens, and underscores are allowed. + // + // If a task is started by an Amazon ECS service, then the startedBy parameter + // contains the deployment ID of the service that starts it. + StartedBy *string `locationName:"startedBy" type:"string"` + + // The family and revision (family:revision) or full Amazon Resource Name (ARN) + // of the task definition to start. If a revision is not specified, the latest + // ACTIVE revision is used. + TaskDefinition *string `locationName:"taskDefinition" type:"string" required:"true"` +} + +// String returns the string representation +func (s StartTaskInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StartTaskInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *StartTaskInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StartTaskInput"} + if s.ContainerInstances == nil { + invalidParams.Add(request.NewErrParamRequired("ContainerInstances")) + } + if s.TaskDefinition == nil { + invalidParams.Add(request.NewErrParamRequired("TaskDefinition")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type StartTaskOutput struct { + _ struct{} `type:"structure"` + + // Any failures associated with the call. + Failures []*Failure `locationName:"failures" type:"list"` + + // A full description of the tasks that were started. Each task that was successfully + // placed on your container instances are described here. + Tasks []*Task `locationName:"tasks" type:"list"` +} + +// String returns the string representation +func (s StartTaskOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StartTaskOutput) GoString() string { + return s.String() +} + +type StopTaskInput struct { + _ struct{} `type:"structure"` + + // The short name or full Amazon Resource Name (ARN) of the cluster that hosts + // the task to stop. If you do not specify a cluster, the default cluster is + // assumed.. + Cluster *string `locationName:"cluster" type:"string"` + + // An optional message specified when a task is stopped. For example, if you + // are using a custom scheduler, you can use this parameter to specify the reason + // for stopping the task here, and the message will appear in subsequent DescribeTasks + // API operations on this task. Up to 255 characters are allowed in this message. + Reason *string `locationName:"reason" type:"string"` + + // The task ID or full Amazon Resource Name (ARN) entry of the task to stop. + Task *string `locationName:"task" type:"string" required:"true"` +} + +// String returns the string representation +func (s StopTaskInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StopTaskInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *StopTaskInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StopTaskInput"} + if s.Task == nil { + invalidParams.Add(request.NewErrParamRequired("Task")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type StopTaskOutput struct { + _ struct{} `type:"structure"` + + // Details on a task in a cluster. + Task *Task `locationName:"task" type:"structure"` +} + +// String returns the string representation +func (s StopTaskOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StopTaskOutput) GoString() string { + return s.String() +} + +type SubmitContainerStateChangeInput struct { + _ struct{} `type:"structure"` + + // The short name or full Amazon Resource Name (ARN) of the cluster that hosts + // the container. + Cluster *string `locationName:"cluster" type:"string"` + + // The name of the container. + ContainerName *string `locationName:"containerName" type:"string"` + + // The exit code returned for the state change request. + ExitCode *int64 `locationName:"exitCode" type:"integer"` + + // The network bindings of the container. + NetworkBindings []*NetworkBinding `locationName:"networkBindings" type:"list"` + + // The reason for the state change request. + Reason *string `locationName:"reason" type:"string"` + + // The status of the state change request. + Status *string `locationName:"status" type:"string"` + + // The task ID or full Amazon Resource Name (ARN) of the task that hosts the + // container. + Task *string `locationName:"task" type:"string"` +} + +// String returns the string representation +func (s SubmitContainerStateChangeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SubmitContainerStateChangeInput) GoString() string { + return s.String() +} + +type SubmitContainerStateChangeOutput struct { + _ struct{} `type:"structure"` + + // Acknowledgement of the state change. + Acknowledgment *string `locationName:"acknowledgment" type:"string"` +} + +// String returns the string representation +func (s SubmitContainerStateChangeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SubmitContainerStateChangeOutput) GoString() string { + return s.String() +} + +type SubmitTaskStateChangeInput struct { + _ struct{} `type:"structure"` + + // The short name or full Amazon Resource Name (ARN) of the cluster that hosts + // the task. + Cluster *string `locationName:"cluster" type:"string"` + + // The reason for the state change request. + Reason *string `locationName:"reason" type:"string"` + + // The status of the state change request. + Status *string `locationName:"status" type:"string"` + + // The task ID or full Amazon Resource Name (ARN) of the task in the state change + // request. + Task *string `locationName:"task" type:"string"` +} + +// String returns the string representation +func (s SubmitTaskStateChangeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SubmitTaskStateChangeInput) GoString() string { + return s.String() +} + +type SubmitTaskStateChangeOutput struct { + _ struct{} `type:"structure"` + + // Acknowledgement of the state change. + Acknowledgment *string `locationName:"acknowledgment" type:"string"` +} + +// String returns the string representation +func (s SubmitTaskStateChangeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SubmitTaskStateChangeOutput) GoString() string { + return s.String() +} + +// Details on a task in a cluster. +type Task struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the cluster that hosts the task. + ClusterArn *string `locationName:"clusterArn" type:"string"` + + // The Amazon Resource Name (ARN) of the container instances that host the task. + ContainerInstanceArn *string `locationName:"containerInstanceArn" type:"string"` + + // The containers associated with the task. + Containers []*Container `locationName:"containers" type:"list"` + + // The Unix time in seconds and milliseconds when the task was created (the + // task entered the PENDING state). + CreatedAt *time.Time `locationName:"createdAt" type:"timestamp" timestampFormat:"unix"` + + // The desired status of the task. + DesiredStatus *string `locationName:"desiredStatus" type:"string"` + + // The last known status of the task. + LastStatus *string `locationName:"lastStatus" type:"string"` + + // One or more container overrides. + Overrides *TaskOverride `locationName:"overrides" type:"structure"` + + // The Unix time in seconds and milliseconds when the task was started (the + // task transitioned from the PENDING state to the RUNNING state). + StartedAt *time.Time `locationName:"startedAt" type:"timestamp" timestampFormat:"unix"` + + // The tag specified when a task is started. If the task is started by an Amazon + // ECS service, then the startedBy parameter contains the deployment ID of the + // service that starts it. + StartedBy *string `locationName:"startedBy" type:"string"` + + // The Unix time in seconds and milliseconds when the task was stopped (the + // task transitioned from the RUNNING state to the STOPPED state). + StoppedAt *time.Time `locationName:"stoppedAt" type:"timestamp" timestampFormat:"unix"` + + // The reason the task was stopped. + StoppedReason *string `locationName:"stoppedReason" type:"string"` + + // The Amazon Resource Name (ARN) of the task. + TaskArn *string `locationName:"taskArn" type:"string"` + + // The Amazon Resource Name (ARN) of the task definition that creates the task. + TaskDefinitionArn *string `locationName:"taskDefinitionArn" type:"string"` +} + +// String returns the string representation +func (s Task) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Task) GoString() string { + return s.String() +} + +// Details of a task definition. +type TaskDefinition struct { + _ struct{} `type:"structure"` + + // A list of container definitions in JSON format that describe the different + // containers that make up your task. For more information about container definition + // parameters and defaults, see Amazon ECS Task Definitions (http://docs.aws.amazon.com/http:/docs.aws.amazon.com/AmazonECS/latest/developerguidetask_defintions.html) + // in the Amazon EC2 Container Service Developer Guide. + ContainerDefinitions []*ContainerDefinition `locationName:"containerDefinitions" type:"list"` + + // The family of your task definition, used as the definition name. + Family *string `locationName:"family" type:"string"` + + // The container instance attributes required by your task. + RequiresAttributes []*Attribute `locationName:"requiresAttributes" type:"list"` + + // The revision of the task in a particular family. The revision is a version + // number of a task definition in a family. When you register a task definition + // for the first time, the revision is 1; each time you register a new revision + // of a task definition in the same family, the revision value always increases + // by one (even if you have deregistered previous revisions in this family). + Revision *int64 `locationName:"revision" type:"integer"` + + // The status of the task definition. + Status *string `locationName:"status" type:"string" enum:"TaskDefinitionStatus"` + + // The full Amazon Resource Name (ARN) of the task definition. + TaskDefinitionArn *string `locationName:"taskDefinitionArn" type:"string"` + + // The list of volumes in a task. For more information about volume definition + // parameters and defaults, see Amazon ECS Task Definitions (http://docs.aws.amazon.com/http:/docs.aws.amazon.com/AmazonECS/latest/developerguidetask_defintions.html) + // in the Amazon EC2 Container Service Developer Guide. + Volumes []*Volume `locationName:"volumes" type:"list"` +} + +// String returns the string representation +func (s TaskDefinition) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TaskDefinition) GoString() string { + return s.String() +} + +// The overrides associated with a task. +type TaskOverride struct { + _ struct{} `type:"structure"` + + // One or more container overrides sent to a task. + ContainerOverrides []*ContainerOverride `locationName:"containerOverrides" type:"list"` +} + +// String returns the string representation +func (s TaskOverride) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TaskOverride) GoString() string { + return s.String() +} + +// The ulimit settings to pass to the container. +type Ulimit struct { + _ struct{} `type:"structure"` + + // The hard limit for the ulimit type. + HardLimit *int64 `locationName:"hardLimit" type:"integer" required:"true"` + + // The type of the ulimit. + Name *string `locationName:"name" type:"string" required:"true" enum:"UlimitName"` + + // The soft limit for the ulimit type. + SoftLimit *int64 `locationName:"softLimit" type:"integer" required:"true"` +} + +// String returns the string representation +func (s Ulimit) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Ulimit) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Ulimit) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Ulimit"} + if s.HardLimit == nil { + invalidParams.Add(request.NewErrParamRequired("HardLimit")) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.SoftLimit == nil { + invalidParams.Add(request.NewErrParamRequired("SoftLimit")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type UpdateContainerAgentInput struct { + _ struct{} `type:"structure"` + + // The short name or full Amazon Resource Name (ARN) of the cluster that your + // container instance is running on. If you do not specify a cluster, the default + // cluster is assumed. + Cluster *string `locationName:"cluster" type:"string"` + + // The container instance ID or full Amazon Resource Name (ARN) entries for + // the container instance on which you would like to update the Amazon ECS container + // agent. + ContainerInstance *string `locationName:"containerInstance" type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateContainerAgentInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateContainerAgentInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateContainerAgentInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateContainerAgentInput"} + if s.ContainerInstance == nil { + invalidParams.Add(request.NewErrParamRequired("ContainerInstance")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type UpdateContainerAgentOutput struct { + _ struct{} `type:"structure"` + + // An EC2 instance that is running the Amazon ECS agent and has been registered + // with a cluster. + ContainerInstance *ContainerInstance `locationName:"containerInstance" type:"structure"` +} + +// String returns the string representation +func (s UpdateContainerAgentOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateContainerAgentOutput) GoString() string { + return s.String() +} + +type UpdateServiceInput struct { + _ struct{} `type:"structure"` + + // The short name or full Amazon Resource Name (ARN) of the cluster that your + // service is running on. If you do not specify a cluster, the default cluster + // is assumed. + Cluster *string `locationName:"cluster" type:"string"` + + // Optional deployment parameters that control how many tasks run during the + // deployment and the ordering of stopping and starting tasks. + DeploymentConfiguration *DeploymentConfiguration `locationName:"deploymentConfiguration" type:"structure"` + + // The number of instantiations of the task to place and keep running in your + // service. + DesiredCount *int64 `locationName:"desiredCount" type:"integer"` + + // The name of the service to update. + Service *string `locationName:"service" type:"string" required:"true"` + + // The family and revision (family:revision) or full Amazon Resource Name (ARN) + // of the task definition to run in your service. If a revision is not specified, + // the latest ACTIVE revision is used. If you modify the task definition with + // UpdateService, Amazon ECS spawns a task with the new version of the task + // definition and then stops an old task after the new version is running. + TaskDefinition *string `locationName:"taskDefinition" type:"string"` +} + +// String returns the string representation +func (s UpdateServiceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateServiceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateServiceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateServiceInput"} + if s.Service == nil { + invalidParams.Add(request.NewErrParamRequired("Service")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type UpdateServiceOutput struct { + _ struct{} `type:"structure"` + + // The full description of your service following the update call. + Service *Service `locationName:"service" type:"structure"` +} + +// String returns the string representation +func (s UpdateServiceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateServiceOutput) GoString() string { + return s.String() +} + +// The Docker and Amazon ECS container agent version information about a container +// instance. +type VersionInfo struct { + _ struct{} `type:"structure"` + + // The Git commit hash for the Amazon ECS container agent build on the amazon-ecs-agent + // (https://github.com/aws/amazon-ecs-agent/commits/master) GitHub repository. + AgentHash *string `locationName:"agentHash" type:"string"` + + // The version number of the Amazon ECS container agent. + AgentVersion *string `locationName:"agentVersion" type:"string"` + + // The Docker version running on the container instance. + DockerVersion *string `locationName:"dockerVersion" type:"string"` +} + +// String returns the string representation +func (s VersionInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VersionInfo) GoString() string { + return s.String() +} + +// A data volume used in a task definition. +type Volume struct { + _ struct{} `type:"structure"` + + // The contents of the host parameter determine whether your data volume persists + // on the host container instance and where it is stored. If the host parameter + // is empty, then the Docker daemon assigns a host path for your data volume, + // but the data is not guaranteed to persist after the containers associated + // with it stop running. + Host *HostVolumeProperties `locationName:"host" type:"structure"` + + // The name of the volume. Up to 255 letters (uppercase and lowercase), numbers, + // hyphens, and underscores are allowed. This name is referenced in the sourceVolume + // parameter of container definition mountPoints. + Name *string `locationName:"name" type:"string"` +} + +// String returns the string representation +func (s Volume) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Volume) GoString() string { + return s.String() +} + +// Details on a data volume from another container. +type VolumeFrom struct { + _ struct{} `type:"structure"` + + // If this value is true, the container has read-only access to the volume. + // If this value is false, then the container can write to the volume. The default + // value is false. + ReadOnly *bool `locationName:"readOnly" type:"boolean"` + + // The name of the container to mount volumes from. + SourceContainer *string `locationName:"sourceContainer" type:"string"` +} + +// String returns the string representation +func (s VolumeFrom) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VolumeFrom) GoString() string { + return s.String() +} + +const ( + // @enum AgentUpdateStatus + AgentUpdateStatusPending = "PENDING" + // @enum AgentUpdateStatus + AgentUpdateStatusStaging = "STAGING" + // @enum AgentUpdateStatus + AgentUpdateStatusStaged = "STAGED" + // @enum AgentUpdateStatus + AgentUpdateStatusUpdating = "UPDATING" + // @enum AgentUpdateStatus + AgentUpdateStatusUpdated = "UPDATED" + // @enum AgentUpdateStatus + AgentUpdateStatusFailed = "FAILED" +) + +const ( + // @enum DesiredStatus + DesiredStatusRunning = "RUNNING" + // @enum DesiredStatus + DesiredStatusPending = "PENDING" + // @enum DesiredStatus + DesiredStatusStopped = "STOPPED" +) + +const ( + // @enum LogDriver + LogDriverJsonFile = "json-file" + // @enum LogDriver + LogDriverSyslog = "syslog" + // @enum LogDriver + LogDriverJournald = "journald" + // @enum LogDriver + LogDriverGelf = "gelf" + // @enum LogDriver + LogDriverFluentd = "fluentd" + // @enum LogDriver + LogDriverAwslogs = "awslogs" +) + +const ( + // @enum SortOrder + SortOrderAsc = "ASC" + // @enum SortOrder + SortOrderDesc = "DESC" +) + +const ( + // @enum TaskDefinitionFamilyStatus + TaskDefinitionFamilyStatusActive = "ACTIVE" + // @enum TaskDefinitionFamilyStatus + TaskDefinitionFamilyStatusInactive = "INACTIVE" + // @enum TaskDefinitionFamilyStatus + TaskDefinitionFamilyStatusAll = "ALL" +) + +const ( + // @enum TaskDefinitionStatus + TaskDefinitionStatusActive = "ACTIVE" + // @enum TaskDefinitionStatus + TaskDefinitionStatusInactive = "INACTIVE" +) + +const ( + // @enum TransportProtocol + TransportProtocolTcp = "tcp" + // @enum TransportProtocol + TransportProtocolUdp = "udp" +) + +const ( + // @enum UlimitName + UlimitNameCore = "core" + // @enum UlimitName + UlimitNameCpu = "cpu" + // @enum UlimitName + UlimitNameData = "data" + // @enum UlimitName + UlimitNameFsize = "fsize" + // @enum UlimitName + UlimitNameLocks = "locks" + // @enum UlimitName + UlimitNameMemlock = "memlock" + // @enum UlimitName + UlimitNameMsgqueue = "msgqueue" + // @enum UlimitName + UlimitNameNice = "nice" + // @enum UlimitName + UlimitNameNofile = "nofile" + // @enum UlimitName + UlimitNameNproc = "nproc" + // @enum UlimitName + UlimitNameRss = "rss" + // @enum UlimitName + UlimitNameRtprio = "rtprio" + // @enum UlimitName + UlimitNameRttime = "rttime" + // @enum UlimitName + UlimitNameSigpending = "sigpending" + // @enum UlimitName + UlimitNameStack = "stack" +) diff --git a/vendor/github.com/aws/aws-sdk-go/service/ecs/ecsiface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/ecs/ecsiface/interface.go new file mode 100644 index 000000000..58a4aecef --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/ecs/ecsiface/interface.go @@ -0,0 +1,134 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package ecsiface provides an interface for the Amazon EC2 Container Service. +package ecsiface + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/ecs" +) + +// ECSAPI is the interface type for ecs.ECS. +type ECSAPI interface { + CreateClusterRequest(*ecs.CreateClusterInput) (*request.Request, *ecs.CreateClusterOutput) + + CreateCluster(*ecs.CreateClusterInput) (*ecs.CreateClusterOutput, error) + + CreateServiceRequest(*ecs.CreateServiceInput) (*request.Request, *ecs.CreateServiceOutput) + + CreateService(*ecs.CreateServiceInput) (*ecs.CreateServiceOutput, error) + + DeleteClusterRequest(*ecs.DeleteClusterInput) (*request.Request, *ecs.DeleteClusterOutput) + + DeleteCluster(*ecs.DeleteClusterInput) (*ecs.DeleteClusterOutput, error) + + DeleteServiceRequest(*ecs.DeleteServiceInput) (*request.Request, *ecs.DeleteServiceOutput) + + DeleteService(*ecs.DeleteServiceInput) (*ecs.DeleteServiceOutput, error) + + DeregisterContainerInstanceRequest(*ecs.DeregisterContainerInstanceInput) (*request.Request, *ecs.DeregisterContainerInstanceOutput) + + DeregisterContainerInstance(*ecs.DeregisterContainerInstanceInput) (*ecs.DeregisterContainerInstanceOutput, error) + + DeregisterTaskDefinitionRequest(*ecs.DeregisterTaskDefinitionInput) (*request.Request, *ecs.DeregisterTaskDefinitionOutput) + + DeregisterTaskDefinition(*ecs.DeregisterTaskDefinitionInput) (*ecs.DeregisterTaskDefinitionOutput, error) + + DescribeClustersRequest(*ecs.DescribeClustersInput) (*request.Request, *ecs.DescribeClustersOutput) + + DescribeClusters(*ecs.DescribeClustersInput) (*ecs.DescribeClustersOutput, error) + + DescribeContainerInstancesRequest(*ecs.DescribeContainerInstancesInput) (*request.Request, *ecs.DescribeContainerInstancesOutput) + + DescribeContainerInstances(*ecs.DescribeContainerInstancesInput) (*ecs.DescribeContainerInstancesOutput, error) + + DescribeServicesRequest(*ecs.DescribeServicesInput) (*request.Request, *ecs.DescribeServicesOutput) + + DescribeServices(*ecs.DescribeServicesInput) (*ecs.DescribeServicesOutput, error) + + DescribeTaskDefinitionRequest(*ecs.DescribeTaskDefinitionInput) (*request.Request, *ecs.DescribeTaskDefinitionOutput) + + DescribeTaskDefinition(*ecs.DescribeTaskDefinitionInput) (*ecs.DescribeTaskDefinitionOutput, error) + + DescribeTasksRequest(*ecs.DescribeTasksInput) (*request.Request, *ecs.DescribeTasksOutput) + + DescribeTasks(*ecs.DescribeTasksInput) (*ecs.DescribeTasksOutput, error) + + DiscoverPollEndpointRequest(*ecs.DiscoverPollEndpointInput) (*request.Request, *ecs.DiscoverPollEndpointOutput) + + DiscoverPollEndpoint(*ecs.DiscoverPollEndpointInput) (*ecs.DiscoverPollEndpointOutput, error) + + ListClustersRequest(*ecs.ListClustersInput) (*request.Request, *ecs.ListClustersOutput) + + ListClusters(*ecs.ListClustersInput) (*ecs.ListClustersOutput, error) + + ListClustersPages(*ecs.ListClustersInput, func(*ecs.ListClustersOutput, bool) bool) error + + ListContainerInstancesRequest(*ecs.ListContainerInstancesInput) (*request.Request, *ecs.ListContainerInstancesOutput) + + ListContainerInstances(*ecs.ListContainerInstancesInput) (*ecs.ListContainerInstancesOutput, error) + + ListContainerInstancesPages(*ecs.ListContainerInstancesInput, func(*ecs.ListContainerInstancesOutput, bool) bool) error + + ListServicesRequest(*ecs.ListServicesInput) (*request.Request, *ecs.ListServicesOutput) + + ListServices(*ecs.ListServicesInput) (*ecs.ListServicesOutput, error) + + ListServicesPages(*ecs.ListServicesInput, func(*ecs.ListServicesOutput, bool) bool) error + + ListTaskDefinitionFamiliesRequest(*ecs.ListTaskDefinitionFamiliesInput) (*request.Request, *ecs.ListTaskDefinitionFamiliesOutput) + + ListTaskDefinitionFamilies(*ecs.ListTaskDefinitionFamiliesInput) (*ecs.ListTaskDefinitionFamiliesOutput, error) + + ListTaskDefinitionFamiliesPages(*ecs.ListTaskDefinitionFamiliesInput, func(*ecs.ListTaskDefinitionFamiliesOutput, bool) bool) error + + ListTaskDefinitionsRequest(*ecs.ListTaskDefinitionsInput) (*request.Request, *ecs.ListTaskDefinitionsOutput) + + ListTaskDefinitions(*ecs.ListTaskDefinitionsInput) (*ecs.ListTaskDefinitionsOutput, error) + + ListTaskDefinitionsPages(*ecs.ListTaskDefinitionsInput, func(*ecs.ListTaskDefinitionsOutput, bool) bool) error + + ListTasksRequest(*ecs.ListTasksInput) (*request.Request, *ecs.ListTasksOutput) + + ListTasks(*ecs.ListTasksInput) (*ecs.ListTasksOutput, error) + + ListTasksPages(*ecs.ListTasksInput, func(*ecs.ListTasksOutput, bool) bool) error + + RegisterContainerInstanceRequest(*ecs.RegisterContainerInstanceInput) (*request.Request, *ecs.RegisterContainerInstanceOutput) + + RegisterContainerInstance(*ecs.RegisterContainerInstanceInput) (*ecs.RegisterContainerInstanceOutput, error) + + RegisterTaskDefinitionRequest(*ecs.RegisterTaskDefinitionInput) (*request.Request, *ecs.RegisterTaskDefinitionOutput) + + RegisterTaskDefinition(*ecs.RegisterTaskDefinitionInput) (*ecs.RegisterTaskDefinitionOutput, error) + + RunTaskRequest(*ecs.RunTaskInput) (*request.Request, *ecs.RunTaskOutput) + + RunTask(*ecs.RunTaskInput) (*ecs.RunTaskOutput, error) + + StartTaskRequest(*ecs.StartTaskInput) (*request.Request, *ecs.StartTaskOutput) + + StartTask(*ecs.StartTaskInput) (*ecs.StartTaskOutput, error) + + StopTaskRequest(*ecs.StopTaskInput) (*request.Request, *ecs.StopTaskOutput) + + StopTask(*ecs.StopTaskInput) (*ecs.StopTaskOutput, error) + + SubmitContainerStateChangeRequest(*ecs.SubmitContainerStateChangeInput) (*request.Request, *ecs.SubmitContainerStateChangeOutput) + + SubmitContainerStateChange(*ecs.SubmitContainerStateChangeInput) (*ecs.SubmitContainerStateChangeOutput, error) + + SubmitTaskStateChangeRequest(*ecs.SubmitTaskStateChangeInput) (*request.Request, *ecs.SubmitTaskStateChangeOutput) + + SubmitTaskStateChange(*ecs.SubmitTaskStateChangeInput) (*ecs.SubmitTaskStateChangeOutput, error) + + UpdateContainerAgentRequest(*ecs.UpdateContainerAgentInput) (*request.Request, *ecs.UpdateContainerAgentOutput) + + UpdateContainerAgent(*ecs.UpdateContainerAgentInput) (*ecs.UpdateContainerAgentOutput, error) + + UpdateServiceRequest(*ecs.UpdateServiceInput) (*request.Request, *ecs.UpdateServiceOutput) + + UpdateService(*ecs.UpdateServiceInput) (*ecs.UpdateServiceOutput, error) +} + +var _ ECSAPI = (*ecs.ECS)(nil) diff --git a/vendor/github.com/aws/aws-sdk-go/service/ecs/examples_test.go b/vendor/github.com/aws/aws-sdk-go/service/ecs/examples_test.go new file mode 100644 index 000000000..95bb09383 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/ecs/examples_test.go @@ -0,0 +1,792 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package ecs_test + +import ( + "bytes" + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/ecs" +) + +var _ time.Duration +var _ bytes.Buffer + +func ExampleECS_CreateCluster() { + svc := ecs.New(session.New()) + + params := &ecs.CreateClusterInput{ + ClusterName: aws.String("String"), + } + resp, err := svc.CreateCluster(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleECS_CreateService() { + svc := ecs.New(session.New()) + + params := &ecs.CreateServiceInput{ + DesiredCount: aws.Int64(1), // Required + ServiceName: aws.String("String"), // Required + TaskDefinition: aws.String("String"), // Required + ClientToken: aws.String("String"), + Cluster: aws.String("String"), + DeploymentConfiguration: &ecs.DeploymentConfiguration{ + MaximumPercent: aws.Int64(1), + MinimumHealthyPercent: aws.Int64(1), + }, + LoadBalancers: []*ecs.LoadBalancer{ + { // Required + ContainerName: aws.String("String"), + ContainerPort: aws.Int64(1), + LoadBalancerName: aws.String("String"), + }, + // More values... + }, + Role: aws.String("String"), + } + resp, err := svc.CreateService(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleECS_DeleteCluster() { + svc := ecs.New(session.New()) + + params := &ecs.DeleteClusterInput{ + Cluster: aws.String("String"), // Required + } + resp, err := svc.DeleteCluster(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleECS_DeleteService() { + svc := ecs.New(session.New()) + + params := &ecs.DeleteServiceInput{ + Service: aws.String("String"), // Required + Cluster: aws.String("String"), + } + resp, err := svc.DeleteService(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleECS_DeregisterContainerInstance() { + svc := ecs.New(session.New()) + + params := &ecs.DeregisterContainerInstanceInput{ + ContainerInstance: aws.String("String"), // Required + Cluster: aws.String("String"), + Force: aws.Bool(true), + } + resp, err := svc.DeregisterContainerInstance(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleECS_DeregisterTaskDefinition() { + svc := ecs.New(session.New()) + + params := &ecs.DeregisterTaskDefinitionInput{ + TaskDefinition: aws.String("String"), // Required + } + resp, err := svc.DeregisterTaskDefinition(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleECS_DescribeClusters() { + svc := ecs.New(session.New()) + + params := &ecs.DescribeClustersInput{ + Clusters: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DescribeClusters(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleECS_DescribeContainerInstances() { + svc := ecs.New(session.New()) + + params := &ecs.DescribeContainerInstancesInput{ + ContainerInstances: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + Cluster: aws.String("String"), + } + resp, err := svc.DescribeContainerInstances(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleECS_DescribeServices() { + svc := ecs.New(session.New()) + + params := &ecs.DescribeServicesInput{ + Services: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + Cluster: aws.String("String"), + } + resp, err := svc.DescribeServices(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleECS_DescribeTaskDefinition() { + svc := ecs.New(session.New()) + + params := &ecs.DescribeTaskDefinitionInput{ + TaskDefinition: aws.String("String"), // Required + } + resp, err := svc.DescribeTaskDefinition(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleECS_DescribeTasks() { + svc := ecs.New(session.New()) + + params := &ecs.DescribeTasksInput{ + Tasks: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + Cluster: aws.String("String"), + } + resp, err := svc.DescribeTasks(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleECS_DiscoverPollEndpoint() { + svc := ecs.New(session.New()) + + params := &ecs.DiscoverPollEndpointInput{ + Cluster: aws.String("String"), + ContainerInstance: aws.String("String"), + } + resp, err := svc.DiscoverPollEndpoint(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleECS_ListClusters() { + svc := ecs.New(session.New()) + + params := &ecs.ListClustersInput{ + MaxResults: aws.Int64(1), + NextToken: aws.String("String"), + } + resp, err := svc.ListClusters(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleECS_ListContainerInstances() { + svc := ecs.New(session.New()) + + params := &ecs.ListContainerInstancesInput{ + Cluster: aws.String("String"), + MaxResults: aws.Int64(1), + NextToken: aws.String("String"), + } + resp, err := svc.ListContainerInstances(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleECS_ListServices() { + svc := ecs.New(session.New()) + + params := &ecs.ListServicesInput{ + Cluster: aws.String("String"), + MaxResults: aws.Int64(1), + NextToken: aws.String("String"), + } + resp, err := svc.ListServices(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleECS_ListTaskDefinitionFamilies() { + svc := ecs.New(session.New()) + + params := &ecs.ListTaskDefinitionFamiliesInput{ + FamilyPrefix: aws.String("String"), + MaxResults: aws.Int64(1), + NextToken: aws.String("String"), + Status: aws.String("TaskDefinitionFamilyStatus"), + } + resp, err := svc.ListTaskDefinitionFamilies(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleECS_ListTaskDefinitions() { + svc := ecs.New(session.New()) + + params := &ecs.ListTaskDefinitionsInput{ + FamilyPrefix: aws.String("String"), + MaxResults: aws.Int64(1), + NextToken: aws.String("String"), + Sort: aws.String("SortOrder"), + Status: aws.String("TaskDefinitionStatus"), + } + resp, err := svc.ListTaskDefinitions(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleECS_ListTasks() { + svc := ecs.New(session.New()) + + params := &ecs.ListTasksInput{ + Cluster: aws.String("String"), + ContainerInstance: aws.String("String"), + DesiredStatus: aws.String("DesiredStatus"), + Family: aws.String("String"), + MaxResults: aws.Int64(1), + NextToken: aws.String("String"), + ServiceName: aws.String("String"), + StartedBy: aws.String("String"), + } + resp, err := svc.ListTasks(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleECS_RegisterContainerInstance() { + svc := ecs.New(session.New()) + + params := &ecs.RegisterContainerInstanceInput{ + Attributes: []*ecs.Attribute{ + { // Required + Name: aws.String("String"), // Required + Value: aws.String("String"), + }, + // More values... + }, + Cluster: aws.String("String"), + ContainerInstanceArn: aws.String("String"), + InstanceIdentityDocument: aws.String("String"), + InstanceIdentityDocumentSignature: aws.String("String"), + TotalResources: []*ecs.Resource{ + { // Required + DoubleValue: aws.Float64(1.0), + IntegerValue: aws.Int64(1), + LongValue: aws.Int64(1), + Name: aws.String("String"), + StringSetValue: []*string{ + aws.String("String"), // Required + // More values... + }, + Type: aws.String("String"), + }, + // More values... + }, + VersionInfo: &ecs.VersionInfo{ + AgentHash: aws.String("String"), + AgentVersion: aws.String("String"), + DockerVersion: aws.String("String"), + }, + } + resp, err := svc.RegisterContainerInstance(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleECS_RegisterTaskDefinition() { + svc := ecs.New(session.New()) + + params := &ecs.RegisterTaskDefinitionInput{ + ContainerDefinitions: []*ecs.ContainerDefinition{ // Required + { // Required + Command: []*string{ + aws.String("String"), // Required + // More values... + }, + Cpu: aws.Int64(1), + DisableNetworking: aws.Bool(true), + DnsSearchDomains: []*string{ + aws.String("String"), // Required + // More values... + }, + DnsServers: []*string{ + aws.String("String"), // Required + // More values... + }, + DockerLabels: map[string]*string{ + "Key": aws.String("String"), // Required + // More values... + }, + DockerSecurityOptions: []*string{ + aws.String("String"), // Required + // More values... + }, + EntryPoint: []*string{ + aws.String("String"), // Required + // More values... + }, + Environment: []*ecs.KeyValuePair{ + { // Required + Name: aws.String("String"), + Value: aws.String("String"), + }, + // More values... + }, + Essential: aws.Bool(true), + ExtraHosts: []*ecs.HostEntry{ + { // Required + Hostname: aws.String("String"), // Required + IpAddress: aws.String("String"), // Required + }, + // More values... + }, + Hostname: aws.String("String"), + Image: aws.String("String"), + Links: []*string{ + aws.String("String"), // Required + // More values... + }, + LogConfiguration: &ecs.LogConfiguration{ + LogDriver: aws.String("LogDriver"), // Required + Options: map[string]*string{ + "Key": aws.String("String"), // Required + // More values... + }, + }, + Memory: aws.Int64(1), + MountPoints: []*ecs.MountPoint{ + { // Required + ContainerPath: aws.String("String"), + ReadOnly: aws.Bool(true), + SourceVolume: aws.String("String"), + }, + // More values... + }, + Name: aws.String("String"), + PortMappings: []*ecs.PortMapping{ + { // Required + ContainerPort: aws.Int64(1), + HostPort: aws.Int64(1), + Protocol: aws.String("TransportProtocol"), + }, + // More values... + }, + Privileged: aws.Bool(true), + ReadonlyRootFilesystem: aws.Bool(true), + Ulimits: []*ecs.Ulimit{ + { // Required + HardLimit: aws.Int64(1), // Required + Name: aws.String("UlimitName"), // Required + SoftLimit: aws.Int64(1), // Required + }, + // More values... + }, + User: aws.String("String"), + VolumesFrom: []*ecs.VolumeFrom{ + { // Required + ReadOnly: aws.Bool(true), + SourceContainer: aws.String("String"), + }, + // More values... + }, + WorkingDirectory: aws.String("String"), + }, + // More values... + }, + Family: aws.String("String"), // Required + Volumes: []*ecs.Volume{ + { // Required + Host: &ecs.HostVolumeProperties{ + SourcePath: aws.String("String"), + }, + Name: aws.String("String"), + }, + // More values... + }, + } + resp, err := svc.RegisterTaskDefinition(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleECS_RunTask() { + svc := ecs.New(session.New()) + + params := &ecs.RunTaskInput{ + TaskDefinition: aws.String("String"), // Required + Cluster: aws.String("String"), + Count: aws.Int64(1), + Overrides: &ecs.TaskOverride{ + ContainerOverrides: []*ecs.ContainerOverride{ + { // Required + Command: []*string{ + aws.String("String"), // Required + // More values... + }, + Environment: []*ecs.KeyValuePair{ + { // Required + Name: aws.String("String"), + Value: aws.String("String"), + }, + // More values... + }, + Name: aws.String("String"), + }, + // More values... + }, + }, + StartedBy: aws.String("String"), + } + resp, err := svc.RunTask(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleECS_StartTask() { + svc := ecs.New(session.New()) + + params := &ecs.StartTaskInput{ + ContainerInstances: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + TaskDefinition: aws.String("String"), // Required + Cluster: aws.String("String"), + Overrides: &ecs.TaskOverride{ + ContainerOverrides: []*ecs.ContainerOverride{ + { // Required + Command: []*string{ + aws.String("String"), // Required + // More values... + }, + Environment: []*ecs.KeyValuePair{ + { // Required + Name: aws.String("String"), + Value: aws.String("String"), + }, + // More values... + }, + Name: aws.String("String"), + }, + // More values... + }, + }, + StartedBy: aws.String("String"), + } + resp, err := svc.StartTask(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleECS_StopTask() { + svc := ecs.New(session.New()) + + params := &ecs.StopTaskInput{ + Task: aws.String("String"), // Required + Cluster: aws.String("String"), + Reason: aws.String("String"), + } + resp, err := svc.StopTask(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleECS_SubmitContainerStateChange() { + svc := ecs.New(session.New()) + + params := &ecs.SubmitContainerStateChangeInput{ + Cluster: aws.String("String"), + ContainerName: aws.String("String"), + ExitCode: aws.Int64(1), + NetworkBindings: []*ecs.NetworkBinding{ + { // Required + BindIP: aws.String("String"), + ContainerPort: aws.Int64(1), + HostPort: aws.Int64(1), + Protocol: aws.String("TransportProtocol"), + }, + // More values... + }, + Reason: aws.String("String"), + Status: aws.String("String"), + Task: aws.String("String"), + } + resp, err := svc.SubmitContainerStateChange(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleECS_SubmitTaskStateChange() { + svc := ecs.New(session.New()) + + params := &ecs.SubmitTaskStateChangeInput{ + Cluster: aws.String("String"), + Reason: aws.String("String"), + Status: aws.String("String"), + Task: aws.String("String"), + } + resp, err := svc.SubmitTaskStateChange(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleECS_UpdateContainerAgent() { + svc := ecs.New(session.New()) + + params := &ecs.UpdateContainerAgentInput{ + ContainerInstance: aws.String("String"), // Required + Cluster: aws.String("String"), + } + resp, err := svc.UpdateContainerAgent(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleECS_UpdateService() { + svc := ecs.New(session.New()) + + params := &ecs.UpdateServiceInput{ + Service: aws.String("String"), // Required + Cluster: aws.String("String"), + DeploymentConfiguration: &ecs.DeploymentConfiguration{ + MaximumPercent: aws.Int64(1), + MinimumHealthyPercent: aws.Int64(1), + }, + DesiredCount: aws.Int64(1), + TaskDefinition: aws.String("String"), + } + resp, err := svc.UpdateService(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/ecs/service.go b/vendor/github.com/aws/aws-sdk-go/service/ecs/service.go new file mode 100644 index 000000000..fe99d735a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/ecs/service.go @@ -0,0 +1,99 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package ecs + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" +) + +// Amazon EC2 Container Service (Amazon ECS) is a highly scalable, fast, container +// management service that makes it easy to run, stop, and manage Docker containers +// on a cluster of EC2 instances. Amazon ECS lets you launch and stop container-enabled +// applications with simple API calls, allows you to get the state of your cluster +// from a centralized service, and gives you access to many familiar Amazon +// EC2 features like security groups, Amazon EBS volumes, and IAM roles. +// +// You can use Amazon ECS to schedule the placement of containers across your +// cluster based on your resource needs, isolation policies, and availability +// requirements. Amazon EC2 Container Service eliminates the need for you to +// operate your own cluster management and configuration management systems +// or worry about scaling your management infrastructure. +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type ECS struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "ecs" + +// New creates a new instance of the ECS client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a ECS client from just a session. +// svc := ecs.New(mySession) +// +// // Create a ECS client with additional configuration +// svc := ecs.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *ECS { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *ECS { + svc := &ECS{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-11-13", + JSONVersion: "1.1", + TargetPrefix: "AmazonEC2ContainerServiceV20141113", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(jsonrpc.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a ECS operation and runs any +// custom request initialization. +func (c *ECS) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/ecs/waiters.go b/vendor/github.com/aws/aws-sdk-go/service/ecs/waiters.go new file mode 100644 index 000000000..155f7c098 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/ecs/waiters.go @@ -0,0 +1,135 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package ecs + +import ( + "github.com/aws/aws-sdk-go/private/waiter" +) + +func (c *ECS) WaitUntilServicesInactive(input *DescribeServicesInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeServices", + Delay: 15, + MaxAttempts: 40, + Acceptors: []waiter.WaitAcceptor{ + { + State: "failure", + Matcher: "pathAny", + Argument: "failures[].reason", + Expected: "MISSING", + }, + { + State: "success", + Matcher: "pathAny", + Argument: "services[].status", + Expected: "INACTIVE", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *ECS) WaitUntilServicesStable(input *DescribeServicesInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeServices", + Delay: 15, + MaxAttempts: 40, + Acceptors: []waiter.WaitAcceptor{ + { + State: "failure", + Matcher: "pathAny", + Argument: "failures[].reason", + Expected: "MISSING", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "services[].status", + Expected: "DRAINING", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "services[].status", + Expected: "INACTIVE", + }, + { + State: "success", + Matcher: "path", + Argument: "services | [@[?length(deployments)!=`1`], @[?desiredCount!=runningCount]][] | length(@) == `0`", + Expected: true, + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *ECS) WaitUntilTasksRunning(input *DescribeTasksInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeTasks", + Delay: 6, + MaxAttempts: 100, + Acceptors: []waiter.WaitAcceptor{ + { + State: "failure", + Matcher: "pathAny", + Argument: "tasks[].lastStatus", + Expected: "STOPPED", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "failures[].reason", + Expected: "MISSING", + }, + { + State: "success", + Matcher: "pathAll", + Argument: "tasks[].lastStatus", + Expected: "RUNNING", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *ECS) WaitUntilTasksStopped(input *DescribeTasksInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeTasks", + Delay: 6, + MaxAttempts: 100, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "pathAll", + Argument: "tasks[].lastStatus", + Expected: "STOPPED", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/efs/api.go b/vendor/github.com/aws/aws-sdk-go/service/efs/api.go new file mode 100644 index 000000000..1245ef45d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/efs/api.go @@ -0,0 +1,1575 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package efs provides a client for Amazon Elastic File System. +package efs + +import ( + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/restjson" +) + +const opCreateFileSystem = "CreateFileSystem" + +// CreateFileSystemRequest generates a "aws/request.Request" representing the +// client's request for the CreateFileSystem operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateFileSystem method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateFileSystemRequest method. +// req, resp := client.CreateFileSystemRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EFS) CreateFileSystemRequest(input *CreateFileSystemInput) (req *request.Request, output *FileSystemDescription) { + op := &request.Operation{ + Name: opCreateFileSystem, + HTTPMethod: "POST", + HTTPPath: "/2015-02-01/file-systems", + } + + if input == nil { + input = &CreateFileSystemInput{} + } + + req = c.newRequest(op, input, output) + output = &FileSystemDescription{} + req.Data = output + return +} + +// Creates a new, empty file system. The operation requires a creation token +// in the request that Amazon EFS uses to ensure idempotent creation (calling +// the operation with same creation token has no effect). If a file system does +// not currently exist that is owned by the caller's AWS account with the specified +// creation token, this operation does the following: +// +// Creates a new, empty file system. The file system will have an Amazon +// EFS assigned ID, and an initial lifecycle state creating. +// +// Returns with the description of the created file system. +// +// Otherwise, this operation returns a FileSystemAlreadyExists error with +// the ID of the existing file system. +// +// For basic use cases, you can use a randomly generated UUID for the creation +// token. +// +// The idempotent operation allows you to retry a CreateFileSystem call without +// risk of creating an extra file system. This can happen when an initial call +// fails in a way that leaves it uncertain whether or not a file system was +// actually created. An example might be that a transport level timeout occurred +// or your connection was reset. As long as you use the same creation token, +// if the initial call had succeeded in creating a file system, the client can +// learn of its existence from the FileSystemAlreadyExists error. +// +// The CreateFileSystem call returns while the file system's lifecycle state +// is still creating. You can check the file system creation status by calling +// the DescribeFileSystems operation, which among other things returns the file +// system state. +// +// This operation also takes an optional PerformanceMode parameter that you +// choose for your file system. We recommend generalPurpose performance mode +// for most file systems. File systems using the maxIO performance mode can +// scale to higher levels of aggregate throughput and operations per second +// with a tradeoff of slightly higher latencies for most file operations. The +// performance mode can't be changed after the file system has been created. +// For more information, see Amazon EFS: Performance Modes (http://docs.aws.amazon.com/efs/latest/ug/performance.html#performancemodes.html). +// +// After the file system is fully created, Amazon EFS sets its lifecycle state +// to available, at which point you can create one or more mount targets for +// the file system in your VPC. For more information, see CreateMountTarget. +// You mount your Amazon EFS file system on an EC2 instances in your VPC via +// the mount target. For more information, see Amazon EFS: How it Works (http://docs.aws.amazon.com/efs/latest/ug/how-it-works.html). +// +// This operation requires permissions for the elasticfilesystem:CreateFileSystem +// action. +func (c *EFS) CreateFileSystem(input *CreateFileSystemInput) (*FileSystemDescription, error) { + req, out := c.CreateFileSystemRequest(input) + err := req.Send() + return out, err +} + +const opCreateMountTarget = "CreateMountTarget" + +// CreateMountTargetRequest generates a "aws/request.Request" representing the +// client's request for the CreateMountTarget operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateMountTarget method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateMountTargetRequest method. +// req, resp := client.CreateMountTargetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EFS) CreateMountTargetRequest(input *CreateMountTargetInput) (req *request.Request, output *MountTargetDescription) { + op := &request.Operation{ + Name: opCreateMountTarget, + HTTPMethod: "POST", + HTTPPath: "/2015-02-01/mount-targets", + } + + if input == nil { + input = &CreateMountTargetInput{} + } + + req = c.newRequest(op, input, output) + output = &MountTargetDescription{} + req.Data = output + return +} + +// Creates a mount target for a file system. You can then mount the file system +// on EC2 instances via the mount target. +// +// You can create one mount target in each Availability Zone in your VPC. All +// EC2 instances in a VPC within a given Availability Zone share a single mount +// target for a given file system. If you have multiple subnets in an Availability +// Zone, you create a mount target in one of the subnets. EC2 instances do not +// need to be in the same subnet as the mount target in order to access their +// file system. For more information, see Amazon EFS: How it Works (http://docs.aws.amazon.com/efs/latest/ug/how-it-works.html). +// +// In the request, you also specify a file system ID for which you are creating +// the mount target and the file system's lifecycle state must be available. +// For more information, see DescribeFileSystems. +// +// In the request, you also provide a subnet ID, which determines the following: +// +// VPC in which Amazon EFS creates the mount target +// +// Availability Zone in which Amazon EFS creates the mount target +// +// IP address range from which Amazon EFS selects the IP address of the mount +// target (if you don't specify an IP address in the request) +// +// After creating the mount target, Amazon EFS returns a response that includes, +// a MountTargetId and an IpAddress. You use this IP address when mounting the +// file system in an EC2 instance. You can also use the mount target's DNS name +// when mounting the file system. The EC2 instance on which you mount the file +// system via the mount target can resolve the mount target's DNS name to its +// IP address. For more information, see How it Works: Implementation Overview +// (http://docs.aws.amazon.com/efs/latest/ug/how-it-works.html#how-it-works-implementation). +// +// Note that you can create mount targets for a file system in only one VPC, +// and there can be only one mount target per Availability Zone. That is, if +// the file system already has one or more mount targets created for it, the +// subnet specified in the request to add another mount target must meet the +// following requirements: +// +// Must belong to the same VPC as the subnets of the existing mount targets +// +// Must not be in the same Availability Zone as any of the subnets of the +// existing mount targets +// +// If the request satisfies the requirements, Amazon EFS does the following: +// +// Creates a new mount target in the specified subnet. +// +// Also creates a new network interface in the subnet as follows: +// +// If the request provides an IpAddress, Amazon EFS assigns that IP address +// to the network interface. Otherwise, Amazon EFS assigns a free address in +// the subnet (in the same way that the Amazon EC2 CreateNetworkInterface call +// does when a request does not specify a primary private IP address). +// +// If the request provides SecurityGroups, this network interface is associated +// with those security groups. Otherwise, it belongs to the default security +// group for the subnet's VPC. +// +// Assigns the description Mount target fsmt-id for file system fs-id where +// fsmt-id is the mount target ID, and fs-id is the FileSystemId. +// +// Sets the requesterManaged property of the network interface to true, and +// the requesterId value to EFS. +// +// Each Amazon EFS mount target has one corresponding requestor-managed EC2 +// network interface. After the network interface is created, Amazon EFS sets +// the NetworkInterfaceId field in the mount target's description to the network +// interface ID, and the IpAddress field to its address. If network interface +// creation fails, the entire CreateMountTarget operation fails. +// +// The CreateMountTarget call returns only after creating the network interface, +// but while the mount target state is still creating. You can check the mount +// target creation status by calling the DescribeFileSystems operation, which +// among other things returns the mount target state. +// +// We recommend you create a mount target in each of the Availability Zones. +// There are cost considerations for using a file system in an Availability +// Zone through a mount target created in another Availability Zone. For more +// information, see Amazon EFS (http://aws.amazon.com/efs/). In addition, by +// always using a mount target local to the instance's Availability Zone, you +// eliminate a partial failure scenario. If the Availability Zone in which your +// mount target is created goes down, then you won't be able to access your +// file system through that mount target. +// +// This operation requires permissions for the following action on the file +// system: +// +// elasticfilesystem:CreateMountTarget +// +// This operation also requires permissions for the following Amazon EC2 +// actions: +// +// ec2:DescribeSubnets +// +// ec2:DescribeNetworkInterfaces +// +// ec2:CreateNetworkInterface +func (c *EFS) CreateMountTarget(input *CreateMountTargetInput) (*MountTargetDescription, error) { + req, out := c.CreateMountTargetRequest(input) + err := req.Send() + return out, err +} + +const opCreateTags = "CreateTags" + +// CreateTagsRequest generates a "aws/request.Request" representing the +// client's request for the CreateTags operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateTags method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateTagsRequest method. +// req, resp := client.CreateTagsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EFS) CreateTagsRequest(input *CreateTagsInput) (req *request.Request, output *CreateTagsOutput) { + op := &request.Operation{ + Name: opCreateTags, + HTTPMethod: "POST", + HTTPPath: "/2015-02-01/create-tags/{FileSystemId}", + } + + if input == nil { + input = &CreateTagsInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &CreateTagsOutput{} + req.Data = output + return +} + +// Creates or overwrites tags associated with a file system. Each tag is a key-value +// pair. If a tag key specified in the request already exists on the file system, +// this operation overwrites its value with the value provided in the request. +// If you add the Name tag to your file system, Amazon EFS returns it in the +// response to the DescribeFileSystems operation. +// +// This operation requires permission for the elasticfilesystem:CreateTags +// action. +func (c *EFS) CreateTags(input *CreateTagsInput) (*CreateTagsOutput, error) { + req, out := c.CreateTagsRequest(input) + err := req.Send() + return out, err +} + +const opDeleteFileSystem = "DeleteFileSystem" + +// DeleteFileSystemRequest generates a "aws/request.Request" representing the +// client's request for the DeleteFileSystem operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteFileSystem method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteFileSystemRequest method. +// req, resp := client.DeleteFileSystemRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EFS) DeleteFileSystemRequest(input *DeleteFileSystemInput) (req *request.Request, output *DeleteFileSystemOutput) { + op := &request.Operation{ + Name: opDeleteFileSystem, + HTTPMethod: "DELETE", + HTTPPath: "/2015-02-01/file-systems/{FileSystemId}", + } + + if input == nil { + input = &DeleteFileSystemInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteFileSystemOutput{} + req.Data = output + return +} + +// Deletes a file system, permanently severing access to its contents. Upon +// return, the file system no longer exists and you can't access any contents +// of the deleted file system. +// +// You can't delete a file system that is in use. That is, if the file system +// has any mount targets, you must first delete them. For more information, +// see DescribeMountTargets and DeleteMountTarget. +// +// The DeleteFileSystem call returns while the file system state is still +// deleting. You can check the file system deletion status by calling the DescribeFileSystems +// operation, which returns a list of file systems in your account. If you pass +// file system ID or creation token for the deleted file system, the DescribeFileSystems +// returns a 404 FileSystemNotFound error. +// +// This operation requires permissions for the elasticfilesystem:DeleteFileSystem +// action. +func (c *EFS) DeleteFileSystem(input *DeleteFileSystemInput) (*DeleteFileSystemOutput, error) { + req, out := c.DeleteFileSystemRequest(input) + err := req.Send() + return out, err +} + +const opDeleteMountTarget = "DeleteMountTarget" + +// DeleteMountTargetRequest generates a "aws/request.Request" representing the +// client's request for the DeleteMountTarget operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteMountTarget method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteMountTargetRequest method. +// req, resp := client.DeleteMountTargetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EFS) DeleteMountTargetRequest(input *DeleteMountTargetInput) (req *request.Request, output *DeleteMountTargetOutput) { + op := &request.Operation{ + Name: opDeleteMountTarget, + HTTPMethod: "DELETE", + HTTPPath: "/2015-02-01/mount-targets/{MountTargetId}", + } + + if input == nil { + input = &DeleteMountTargetInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteMountTargetOutput{} + req.Data = output + return +} + +// Deletes the specified mount target. +// +// This operation forcibly breaks any mounts of the file system via the mount +// target that is being deleted, which might disrupt instances or applications +// using those mounts. To avoid applications getting cut off abruptly, you might +// consider unmounting any mounts of the mount target, if feasible. The operation +// also deletes the associated network interface. Uncommitted writes may be +// lost, but breaking a mount target using this operation does not corrupt the +// file system itself. The file system you created remains. You can mount an +// EC2 instance in your VPC via another mount target. +// +// This operation requires permissions for the following action on the file +// system: +// +// elasticfilesystem:DeleteMountTarget +// +// The DeleteMountTarget call returns while the mount target state is still +// deleting. You can check the mount target deletion by calling the DescribeMountTargets +// operation, which returns a list of mount target descriptions for the given +// file system. +// +// The operation also requires permissions for the following Amazon EC2 action +// on the mount target's network interface: +// +// ec2:DeleteNetworkInterface +func (c *EFS) DeleteMountTarget(input *DeleteMountTargetInput) (*DeleteMountTargetOutput, error) { + req, out := c.DeleteMountTargetRequest(input) + err := req.Send() + return out, err +} + +const opDeleteTags = "DeleteTags" + +// DeleteTagsRequest generates a "aws/request.Request" representing the +// client's request for the DeleteTags operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteTags method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteTagsRequest method. +// req, resp := client.DeleteTagsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EFS) DeleteTagsRequest(input *DeleteTagsInput) (req *request.Request, output *DeleteTagsOutput) { + op := &request.Operation{ + Name: opDeleteTags, + HTTPMethod: "POST", + HTTPPath: "/2015-02-01/delete-tags/{FileSystemId}", + } + + if input == nil { + input = &DeleteTagsInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteTagsOutput{} + req.Data = output + return +} + +// Deletes the specified tags from a file system. If the DeleteTags request +// includes a tag key that does not exist, Amazon EFS ignores it and doesn't +// cause an error. For more information about tags and related restrictions, +// see Tag Restrictions (http://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html) +// in the AWS Billing and Cost Management User Guide. +// +// This operation requires permissions for the elasticfilesystem:DeleteTags +// action. +func (c *EFS) DeleteTags(input *DeleteTagsInput) (*DeleteTagsOutput, error) { + req, out := c.DeleteTagsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeFileSystems = "DescribeFileSystems" + +// DescribeFileSystemsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeFileSystems operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeFileSystems method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeFileSystemsRequest method. +// req, resp := client.DescribeFileSystemsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EFS) DescribeFileSystemsRequest(input *DescribeFileSystemsInput) (req *request.Request, output *DescribeFileSystemsOutput) { + op := &request.Operation{ + Name: opDescribeFileSystems, + HTTPMethod: "GET", + HTTPPath: "/2015-02-01/file-systems", + } + + if input == nil { + input = &DescribeFileSystemsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeFileSystemsOutput{} + req.Data = output + return +} + +// Returns the description of a specific Amazon EFS file system if either the +// file system CreationToken or the FileSystemId is provided. Otherwise, it +// returns descriptions of all file systems owned by the caller's AWS account +// in the AWS Region of the endpoint that you're calling. +// +// When retrieving all file system descriptions, you can optionally specify +// the MaxItems parameter to limit the number of descriptions in a response. +// If more file system descriptions remain, Amazon EFS returns a NextMarker, +// an opaque token, in the response. In this case, you should send a subsequent +// request with the Marker request parameter set to the value of NextMarker. +// +// To retrieve a list of your file system descriptions, this operation is used +// in an iterative process, where DescribeFileSystems is called first without +// the Marker and then the operation continues to call it with the Marker parameter +// set to the value of the NextMarker from the previous response until the response +// has no NextMarker. +// +// The implementation may return fewer than MaxItems file system descriptions +// while still including a NextMarker value. +// +// The order of file systems returned in the response of one DescribeFileSystems +// call and the order of file systems returned across the responses of a multi-call +// iteration is unspecified. +// +// This operation requires permissions for the elasticfilesystem:DescribeFileSystems +// action. +func (c *EFS) DescribeFileSystems(input *DescribeFileSystemsInput) (*DescribeFileSystemsOutput, error) { + req, out := c.DescribeFileSystemsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeMountTargetSecurityGroups = "DescribeMountTargetSecurityGroups" + +// DescribeMountTargetSecurityGroupsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeMountTargetSecurityGroups operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeMountTargetSecurityGroups method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeMountTargetSecurityGroupsRequest method. +// req, resp := client.DescribeMountTargetSecurityGroupsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EFS) DescribeMountTargetSecurityGroupsRequest(input *DescribeMountTargetSecurityGroupsInput) (req *request.Request, output *DescribeMountTargetSecurityGroupsOutput) { + op := &request.Operation{ + Name: opDescribeMountTargetSecurityGroups, + HTTPMethod: "GET", + HTTPPath: "/2015-02-01/mount-targets/{MountTargetId}/security-groups", + } + + if input == nil { + input = &DescribeMountTargetSecurityGroupsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeMountTargetSecurityGroupsOutput{} + req.Data = output + return +} + +// Returns the security groups currently in effect for a mount target. This +// operation requires that the network interface of the mount target has been +// created and the lifecycle state of the mount target is not deleted. +// +// This operation requires permissions for the following actions: +// +// elasticfilesystem:DescribeMountTargetSecurityGroups action on the mount +// target's file system. +// +// ec2:DescribeNetworkInterfaceAttribute action on the mount target's network +// interface. +func (c *EFS) DescribeMountTargetSecurityGroups(input *DescribeMountTargetSecurityGroupsInput) (*DescribeMountTargetSecurityGroupsOutput, error) { + req, out := c.DescribeMountTargetSecurityGroupsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeMountTargets = "DescribeMountTargets" + +// DescribeMountTargetsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeMountTargets operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeMountTargets method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeMountTargetsRequest method. +// req, resp := client.DescribeMountTargetsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EFS) DescribeMountTargetsRequest(input *DescribeMountTargetsInput) (req *request.Request, output *DescribeMountTargetsOutput) { + op := &request.Operation{ + Name: opDescribeMountTargets, + HTTPMethod: "GET", + HTTPPath: "/2015-02-01/mount-targets", + } + + if input == nil { + input = &DescribeMountTargetsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeMountTargetsOutput{} + req.Data = output + return +} + +// Returns the descriptions of all the current mount targets, or a specific +// mount target, for a file system. When requesting all of the current mount +// targets, the order of mount targets returned in the response is unspecified. +// +// This operation requires permissions for the elasticfilesystem:DescribeMountTargets +// action, on either the file system ID that you specify in FileSystemId, or +// on the file system of the mount target that you specify in MountTargetId. +func (c *EFS) DescribeMountTargets(input *DescribeMountTargetsInput) (*DescribeMountTargetsOutput, error) { + req, out := c.DescribeMountTargetsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeTags = "DescribeTags" + +// DescribeTagsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeTags operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeTags method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeTagsRequest method. +// req, resp := client.DescribeTagsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EFS) DescribeTagsRequest(input *DescribeTagsInput) (req *request.Request, output *DescribeTagsOutput) { + op := &request.Operation{ + Name: opDescribeTags, + HTTPMethod: "GET", + HTTPPath: "/2015-02-01/tags/{FileSystemId}/", + } + + if input == nil { + input = &DescribeTagsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeTagsOutput{} + req.Data = output + return +} + +// Returns the tags associated with a file system. The order of tags returned +// in the response of one DescribeTags call and the order of tags returned across +// the responses of a multi-call iteration (when using pagination) is unspecified. +// +// This operation requires permissions for the elasticfilesystem:DescribeTags +// action. +func (c *EFS) DescribeTags(input *DescribeTagsInput) (*DescribeTagsOutput, error) { + req, out := c.DescribeTagsRequest(input) + err := req.Send() + return out, err +} + +const opModifyMountTargetSecurityGroups = "ModifyMountTargetSecurityGroups" + +// ModifyMountTargetSecurityGroupsRequest generates a "aws/request.Request" representing the +// client's request for the ModifyMountTargetSecurityGroups operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ModifyMountTargetSecurityGroups method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ModifyMountTargetSecurityGroupsRequest method. +// req, resp := client.ModifyMountTargetSecurityGroupsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EFS) ModifyMountTargetSecurityGroupsRequest(input *ModifyMountTargetSecurityGroupsInput) (req *request.Request, output *ModifyMountTargetSecurityGroupsOutput) { + op := &request.Operation{ + Name: opModifyMountTargetSecurityGroups, + HTTPMethod: "PUT", + HTTPPath: "/2015-02-01/mount-targets/{MountTargetId}/security-groups", + } + + if input == nil { + input = &ModifyMountTargetSecurityGroupsInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &ModifyMountTargetSecurityGroupsOutput{} + req.Data = output + return +} + +// Modifies the set of security groups in effect for a mount target. +// +// When you create a mount target, Amazon EFS also creates a new network interface. +// For more information, see CreateMountTarget. This operation replaces the +// security groups in effect for the network interface associated with a mount +// target, with the SecurityGroups provided in the request. This operation requires +// that the network interface of the mount target has been created and the lifecycle +// state of the mount target is not deleted. +// +// The operation requires permissions for the following actions: +// +// elasticfilesystem:ModifyMountTargetSecurityGroups action on the mount +// target's file system. +// +// ec2:ModifyNetworkInterfaceAttribute action on the mount target's network +// interface. +func (c *EFS) ModifyMountTargetSecurityGroups(input *ModifyMountTargetSecurityGroupsInput) (*ModifyMountTargetSecurityGroupsOutput, error) { + req, out := c.ModifyMountTargetSecurityGroupsRequest(input) + err := req.Send() + return out, err +} + +type CreateFileSystemInput struct { + _ struct{} `type:"structure"` + + // String of up to 64 ASCII characters. Amazon EFS uses this to ensure idempotent + // creation. + CreationToken *string `min:"1" type:"string" required:"true"` + + // The PerformanceMode of the file system. We recommend generalPurpose performance + // mode for most file systems. File systems using the maxIO performance mode + // can scale to higher levels of aggregate throughput and operations per second + // with a tradeoff of slightly higher latencies for most file operations. This + // can't be changed after the file system has been created. + PerformanceMode *string `type:"string" enum:"PerformanceMode"` +} + +// String returns the string representation +func (s CreateFileSystemInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateFileSystemInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateFileSystemInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateFileSystemInput"} + if s.CreationToken == nil { + invalidParams.Add(request.NewErrParamRequired("CreationToken")) + } + if s.CreationToken != nil && len(*s.CreationToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("CreationToken", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type CreateMountTargetInput struct { + _ struct{} `type:"structure"` + + // ID of the file system for which to create the mount target. + FileSystemId *string `type:"string" required:"true"` + + // Valid IPv4 address within the address range of the specified subnet. + IpAddress *string `type:"string"` + + // Up to five VPC security group IDs, of the form sg-xxxxxxxx. These must be + // for the same VPC as subnet specified. + SecurityGroups []*string `type:"list"` + + // ID of the subnet to add the mount target in. + SubnetId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateMountTargetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateMountTargetInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateMountTargetInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateMountTargetInput"} + if s.FileSystemId == nil { + invalidParams.Add(request.NewErrParamRequired("FileSystemId")) + } + if s.SubnetId == nil { + invalidParams.Add(request.NewErrParamRequired("SubnetId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type CreateTagsInput struct { + _ struct{} `type:"structure"` + + // ID of the file system whose tags you want to modify (String). This operation + // modifies the tags only, not the file system. + FileSystemId *string `location:"uri" locationName:"FileSystemId" type:"string" required:"true"` + + // Array of Tag objects to add. Each Tag object is a key-value pair. + Tags []*Tag `type:"list" required:"true"` +} + +// String returns the string representation +func (s CreateTagsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateTagsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateTagsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateTagsInput"} + if s.FileSystemId == nil { + invalidParams.Add(request.NewErrParamRequired("FileSystemId")) + } + if s.Tags == nil { + invalidParams.Add(request.NewErrParamRequired("Tags")) + } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type CreateTagsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s CreateTagsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateTagsOutput) GoString() string { + return s.String() +} + +type DeleteFileSystemInput struct { + _ struct{} `type:"structure"` + + // ID of the file system you want to delete. + FileSystemId *string `location:"uri" locationName:"FileSystemId" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteFileSystemInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteFileSystemInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteFileSystemInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteFileSystemInput"} + if s.FileSystemId == nil { + invalidParams.Add(request.NewErrParamRequired("FileSystemId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteFileSystemOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteFileSystemOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteFileSystemOutput) GoString() string { + return s.String() +} + +type DeleteMountTargetInput struct { + _ struct{} `type:"structure"` + + // ID of the mount target to delete (String). + MountTargetId *string `location:"uri" locationName:"MountTargetId" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteMountTargetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteMountTargetInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteMountTargetInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteMountTargetInput"} + if s.MountTargetId == nil { + invalidParams.Add(request.NewErrParamRequired("MountTargetId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteMountTargetOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteMountTargetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteMountTargetOutput) GoString() string { + return s.String() +} + +type DeleteTagsInput struct { + _ struct{} `type:"structure"` + + // ID of the file system whose tags you want to delete (String). + FileSystemId *string `location:"uri" locationName:"FileSystemId" type:"string" required:"true"` + + // List of tag keys to delete. + TagKeys []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s DeleteTagsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteTagsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteTagsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteTagsInput"} + if s.FileSystemId == nil { + invalidParams.Add(request.NewErrParamRequired("FileSystemId")) + } + if s.TagKeys == nil { + invalidParams.Add(request.NewErrParamRequired("TagKeys")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteTagsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteTagsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteTagsOutput) GoString() string { + return s.String() +} + +type DescribeFileSystemsInput struct { + _ struct{} `type:"structure"` + + // (Optional) Restricts the list to the file system with this creation token + // (String). You specify a creation token when you create an Amazon EFS file + // system. + CreationToken *string `location:"querystring" locationName:"CreationToken" min:"1" type:"string"` + + // (Optional) ID of the file system whose description you want to retrieve (String). + FileSystemId *string `location:"querystring" locationName:"FileSystemId" type:"string"` + + // (Optional) Opaque pagination token returned from a previous DescribeFileSystems + // operation (String). If present, specifies to continue the list from where + // the returning call had left off. + Marker *string `location:"querystring" locationName:"Marker" type:"string"` + + // (Optional) Specifies the maximum number of file systems to return in the + // response (integer). This parameter value must be greater than 0. The number + // of items that Amazon EFS returns is the minimum of the MaxItems parameter + // specified in the request and the service's internal maximum number of items + // per page. + MaxItems *int64 `location:"querystring" locationName:"MaxItems" min:"1" type:"integer"` +} + +// String returns the string representation +func (s DescribeFileSystemsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeFileSystemsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeFileSystemsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeFileSystemsInput"} + if s.CreationToken != nil && len(*s.CreationToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("CreationToken", 1)) + } + if s.MaxItems != nil && *s.MaxItems < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxItems", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DescribeFileSystemsOutput struct { + _ struct{} `type:"structure"` + + // Array of file system descriptions. + FileSystems []*FileSystemDescription `type:"list"` + + // Present if provided by caller in the request (String). + Marker *string `type:"string"` + + // Present if there are more file systems than returned in the response (String). + // You can use the NextMarker in the subsequent request to fetch the descriptions. + NextMarker *string `type:"string"` +} + +// String returns the string representation +func (s DescribeFileSystemsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeFileSystemsOutput) GoString() string { + return s.String() +} + +type DescribeMountTargetSecurityGroupsInput struct { + _ struct{} `type:"structure"` + + // ID of the mount target whose security groups you want to retrieve. + MountTargetId *string `location:"uri" locationName:"MountTargetId" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeMountTargetSecurityGroupsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeMountTargetSecurityGroupsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeMountTargetSecurityGroupsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeMountTargetSecurityGroupsInput"} + if s.MountTargetId == nil { + invalidParams.Add(request.NewErrParamRequired("MountTargetId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DescribeMountTargetSecurityGroupsOutput struct { + _ struct{} `type:"structure"` + + // Array of security groups. + SecurityGroups []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s DescribeMountTargetSecurityGroupsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeMountTargetSecurityGroupsOutput) GoString() string { + return s.String() +} + +type DescribeMountTargetsInput struct { + _ struct{} `type:"structure"` + + // (Optional) ID of the file system whose mount targets you want to list (String). + // It must be included in your request if MountTargetId is not included. + FileSystemId *string `location:"querystring" locationName:"FileSystemId" type:"string"` + + // (Optional) Opaque pagination token returned from a previous DescribeMountTargets + // operation (String). If present, it specifies to continue the list from where + // the previous returning call left off. + Marker *string `location:"querystring" locationName:"Marker" type:"string"` + + // (Optional) Maximum number of mount targets to return in the response. It + // must be an integer with a value greater than zero. + MaxItems *int64 `location:"querystring" locationName:"MaxItems" min:"1" type:"integer"` + + // (Optional) ID of the mount target that you want to have described (String). + // It must be included in your request if FileSystemId is not included. + MountTargetId *string `location:"querystring" locationName:"MountTargetId" type:"string"` +} + +// String returns the string representation +func (s DescribeMountTargetsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeMountTargetsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeMountTargetsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeMountTargetsInput"} + if s.MaxItems != nil && *s.MaxItems < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxItems", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DescribeMountTargetsOutput struct { + _ struct{} `type:"structure"` + + // If the request included the Marker, the response returns that value in this + // field. + Marker *string `type:"string"` + + // Returns the file system's mount targets as an array of MountTargetDescription + // objects. + MountTargets []*MountTargetDescription `type:"list"` + + // If a value is present, there are more mount targets to return. In a subsequent + // request, you can provide Marker in your request with this value to retrieve + // the next set of mount targets. + NextMarker *string `type:"string"` +} + +// String returns the string representation +func (s DescribeMountTargetsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeMountTargetsOutput) GoString() string { + return s.String() +} + +type DescribeTagsInput struct { + _ struct{} `type:"structure"` + + // ID of the file system whose tag set you want to retrieve. + FileSystemId *string `location:"uri" locationName:"FileSystemId" type:"string" required:"true"` + + // (Optional) Opaque pagination token returned from a previous DescribeTags + // operation (String). If present, it specifies to continue the list from where + // the previous call left off. + Marker *string `location:"querystring" locationName:"Marker" type:"string"` + + // (Optional) Maximum number of file system tags to return in the response. + // It must be an integer with a value greater than zero. + MaxItems *int64 `location:"querystring" locationName:"MaxItems" min:"1" type:"integer"` +} + +// String returns the string representation +func (s DescribeTagsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeTagsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeTagsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeTagsInput"} + if s.FileSystemId == nil { + invalidParams.Add(request.NewErrParamRequired("FileSystemId")) + } + if s.MaxItems != nil && *s.MaxItems < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxItems", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DescribeTagsOutput struct { + _ struct{} `type:"structure"` + + // If the request included a Marker, the response returns that value in this + // field. + Marker *string `type:"string"` + + // If a value is present, there are more tags to return. In a subsequent request, + // you can provide the value of NextMarker as the value of the Marker parameter + // in your next request to retrieve the next set of tags. + NextMarker *string `type:"string"` + + // Returns tags associated with the file system as an array of Tag objects. + Tags []*Tag `type:"list" required:"true"` +} + +// String returns the string representation +func (s DescribeTagsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeTagsOutput) GoString() string { + return s.String() +} + +// Description of the file system. +type FileSystemDescription struct { + _ struct{} `type:"structure"` + + // Time that the file system was created, in seconds (since 1970-01-01T00:00:00Z). + CreationTime *time.Time `type:"timestamp" timestampFormat:"unix" required:"true"` + + // Opaque string specified in the request. + CreationToken *string `min:"1" type:"string" required:"true"` + + // ID of the file system, assigned by Amazon EFS. + FileSystemId *string `type:"string" required:"true"` + + // Lifecycle phase of the file system. + LifeCycleState *string `type:"string" required:"true" enum:"LifeCycleState"` + + // You can add tags to a file system, including a Name tag. For more information, + // see CreateTags. If the file system has a Name tag, Amazon EFS returns the + // value in this field. + Name *string `type:"string"` + + // Current number of mount targets that the file system has. For more information, + // see CreateMountTarget. + NumberOfMountTargets *int64 `type:"integer" required:"true"` + + // AWS account that created the file system. If the file system was created + // by an IAM user, the parent account to which the user belongs is the owner. + OwnerId *string `type:"string" required:"true"` + + // The PerformanceMode of the file system. + PerformanceMode *string `type:"string" required:"true" enum:"PerformanceMode"` + + // Latest known metered size (in bytes) of data stored in the file system, in + // bytes, in its Value field, and the time at which that size was determined + // in its Timestamp field. The Timestamp value is the integer number of seconds + // since 1970-01-01T00:00:00Z. Note that the value does not represent the size + // of a consistent snapshot of the file system, but it is eventually consistent + // when there are no writes to the file system. That is, the value will represent + // actual size only if the file system is not modified for a period longer than + // a couple of hours. Otherwise, the value is not the exact size the file system + // was at any instant in time. + SizeInBytes *FileSystemSize `type:"structure" required:"true"` +} + +// String returns the string representation +func (s FileSystemDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FileSystemDescription) GoString() string { + return s.String() +} + +// Latest known metered size (in bytes) of data stored in the file system, in +// its Value field, and the time at which that size was determined in its Timestamp +// field. Note that the value does not represent the size of a consistent snapshot +// of the file system, but it is eventually consistent when there are no writes +// to the file system. That is, the value will represent the actual size only +// if the file system is not modified for a period longer than a couple of hours. +// Otherwise, the value is not necessarily the exact size the file system was +// at any instant in time. +type FileSystemSize struct { + _ struct{} `type:"structure"` + + // Time at which the size of data, returned in the Value field, was determined. + // The value is the integer number of seconds since 1970-01-01T00:00:00Z. + Timestamp *time.Time `type:"timestamp" timestampFormat:"unix"` + + // Latest known metered size (in bytes) of data stored in the file system. + Value *int64 `type:"long" required:"true"` +} + +// String returns the string representation +func (s FileSystemSize) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FileSystemSize) GoString() string { + return s.String() +} + +type ModifyMountTargetSecurityGroupsInput struct { + _ struct{} `type:"structure"` + + // ID of the mount target whose security groups you want to modify. + MountTargetId *string `location:"uri" locationName:"MountTargetId" type:"string" required:"true"` + + // Array of up to five VPC security group IDs. + SecurityGroups []*string `type:"list"` +} + +// String returns the string representation +func (s ModifyMountTargetSecurityGroupsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyMountTargetSecurityGroupsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ModifyMountTargetSecurityGroupsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ModifyMountTargetSecurityGroupsInput"} + if s.MountTargetId == nil { + invalidParams.Add(request.NewErrParamRequired("MountTargetId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type ModifyMountTargetSecurityGroupsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ModifyMountTargetSecurityGroupsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyMountTargetSecurityGroupsOutput) GoString() string { + return s.String() +} + +// Provides a description of a mount target. +type MountTargetDescription struct { + _ struct{} `type:"structure"` + + // ID of the file system for which the mount target is intended. + FileSystemId *string `type:"string" required:"true"` + + // Address at which the file system may be mounted via the mount target. + IpAddress *string `type:"string"` + + // Lifecycle state of the mount target. + LifeCycleState *string `type:"string" required:"true" enum:"LifeCycleState"` + + // System-assigned mount target ID. + MountTargetId *string `type:"string" required:"true"` + + // ID of the network interface that Amazon EFS created when it created the mount + // target. + NetworkInterfaceId *string `type:"string"` + + // AWS account ID that owns the resource. + OwnerId *string `type:"string"` + + // ID of the mount target's subnet. + SubnetId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s MountTargetDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MountTargetDescription) GoString() string { + return s.String() +} + +// A tag is a key-value pair. Allowed characters: letters, whitespace, and numbers, +// representable in UTF-8, and the following characters: + - = . _ : / +type Tag struct { + _ struct{} `type:"structure"` + + // Tag key (String). The key can't start with aws:. + Key *string `min:"1" type:"string" required:"true"` + + // Value of the tag key. + Value *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s Tag) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Tag) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Tag) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Tag"} + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.Value == nil { + invalidParams.Add(request.NewErrParamRequired("Value")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +const ( + // @enum LifeCycleState + LifeCycleStateCreating = "creating" + // @enum LifeCycleState + LifeCycleStateAvailable = "available" + // @enum LifeCycleState + LifeCycleStateDeleting = "deleting" + // @enum LifeCycleState + LifeCycleStateDeleted = "deleted" +) + +const ( + // @enum PerformanceMode + PerformanceModeGeneralPurpose = "generalPurpose" + // @enum PerformanceMode + PerformanceModeMaxIo = "maxIO" +) diff --git a/vendor/github.com/aws/aws-sdk-go/service/efs/efsiface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/efs/efsiface/interface.go new file mode 100644 index 000000000..53542f18d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/efs/efsiface/interface.go @@ -0,0 +1,58 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package efsiface provides an interface for the Amazon Elastic File System. +package efsiface + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/efs" +) + +// EFSAPI is the interface type for efs.EFS. +type EFSAPI interface { + CreateFileSystemRequest(*efs.CreateFileSystemInput) (*request.Request, *efs.FileSystemDescription) + + CreateFileSystem(*efs.CreateFileSystemInput) (*efs.FileSystemDescription, error) + + CreateMountTargetRequest(*efs.CreateMountTargetInput) (*request.Request, *efs.MountTargetDescription) + + CreateMountTarget(*efs.CreateMountTargetInput) (*efs.MountTargetDescription, error) + + CreateTagsRequest(*efs.CreateTagsInput) (*request.Request, *efs.CreateTagsOutput) + + CreateTags(*efs.CreateTagsInput) (*efs.CreateTagsOutput, error) + + DeleteFileSystemRequest(*efs.DeleteFileSystemInput) (*request.Request, *efs.DeleteFileSystemOutput) + + DeleteFileSystem(*efs.DeleteFileSystemInput) (*efs.DeleteFileSystemOutput, error) + + DeleteMountTargetRequest(*efs.DeleteMountTargetInput) (*request.Request, *efs.DeleteMountTargetOutput) + + DeleteMountTarget(*efs.DeleteMountTargetInput) (*efs.DeleteMountTargetOutput, error) + + DeleteTagsRequest(*efs.DeleteTagsInput) (*request.Request, *efs.DeleteTagsOutput) + + DeleteTags(*efs.DeleteTagsInput) (*efs.DeleteTagsOutput, error) + + DescribeFileSystemsRequest(*efs.DescribeFileSystemsInput) (*request.Request, *efs.DescribeFileSystemsOutput) + + DescribeFileSystems(*efs.DescribeFileSystemsInput) (*efs.DescribeFileSystemsOutput, error) + + DescribeMountTargetSecurityGroupsRequest(*efs.DescribeMountTargetSecurityGroupsInput) (*request.Request, *efs.DescribeMountTargetSecurityGroupsOutput) + + DescribeMountTargetSecurityGroups(*efs.DescribeMountTargetSecurityGroupsInput) (*efs.DescribeMountTargetSecurityGroupsOutput, error) + + DescribeMountTargetsRequest(*efs.DescribeMountTargetsInput) (*request.Request, *efs.DescribeMountTargetsOutput) + + DescribeMountTargets(*efs.DescribeMountTargetsInput) (*efs.DescribeMountTargetsOutput, error) + + DescribeTagsRequest(*efs.DescribeTagsInput) (*request.Request, *efs.DescribeTagsOutput) + + DescribeTags(*efs.DescribeTagsInput) (*efs.DescribeTagsOutput, error) + + ModifyMountTargetSecurityGroupsRequest(*efs.ModifyMountTargetSecurityGroupsInput) (*request.Request, *efs.ModifyMountTargetSecurityGroupsOutput) + + ModifyMountTargetSecurityGroups(*efs.ModifyMountTargetSecurityGroupsInput) (*efs.ModifyMountTargetSecurityGroupsOutput, error) +} + +var _ EFSAPI = (*efs.EFS)(nil) diff --git a/vendor/github.com/aws/aws-sdk-go/service/efs/examples_test.go b/vendor/github.com/aws/aws-sdk-go/service/efs/examples_test.go new file mode 100644 index 000000000..72e705e15 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/efs/examples_test.go @@ -0,0 +1,255 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package efs_test + +import ( + "bytes" + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/efs" +) + +var _ time.Duration +var _ bytes.Buffer + +func ExampleEFS_CreateFileSystem() { + svc := efs.New(session.New()) + + params := &efs.CreateFileSystemInput{ + CreationToken: aws.String("CreationToken"), // Required + PerformanceMode: aws.String("PerformanceMode"), + } + resp, err := svc.CreateFileSystem(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEFS_CreateMountTarget() { + svc := efs.New(session.New()) + + params := &efs.CreateMountTargetInput{ + FileSystemId: aws.String("FileSystemId"), // Required + SubnetId: aws.String("SubnetId"), // Required + IpAddress: aws.String("IpAddress"), + SecurityGroups: []*string{ + aws.String("SecurityGroup"), // Required + // More values... + }, + } + resp, err := svc.CreateMountTarget(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEFS_CreateTags() { + svc := efs.New(session.New()) + + params := &efs.CreateTagsInput{ + FileSystemId: aws.String("FileSystemId"), // Required + Tags: []*efs.Tag{ // Required + { // Required + Key: aws.String("TagKey"), // Required + Value: aws.String("TagValue"), // Required + }, + // More values... + }, + } + resp, err := svc.CreateTags(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEFS_DeleteFileSystem() { + svc := efs.New(session.New()) + + params := &efs.DeleteFileSystemInput{ + FileSystemId: aws.String("FileSystemId"), // Required + } + resp, err := svc.DeleteFileSystem(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEFS_DeleteMountTarget() { + svc := efs.New(session.New()) + + params := &efs.DeleteMountTargetInput{ + MountTargetId: aws.String("MountTargetId"), // Required + } + resp, err := svc.DeleteMountTarget(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEFS_DeleteTags() { + svc := efs.New(session.New()) + + params := &efs.DeleteTagsInput{ + FileSystemId: aws.String("FileSystemId"), // Required + TagKeys: []*string{ // Required + aws.String("TagKey"), // Required + // More values... + }, + } + resp, err := svc.DeleteTags(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEFS_DescribeFileSystems() { + svc := efs.New(session.New()) + + params := &efs.DescribeFileSystemsInput{ + CreationToken: aws.String("CreationToken"), + FileSystemId: aws.String("FileSystemId"), + Marker: aws.String("Marker"), + MaxItems: aws.Int64(1), + } + resp, err := svc.DescribeFileSystems(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEFS_DescribeMountTargetSecurityGroups() { + svc := efs.New(session.New()) + + params := &efs.DescribeMountTargetSecurityGroupsInput{ + MountTargetId: aws.String("MountTargetId"), // Required + } + resp, err := svc.DescribeMountTargetSecurityGroups(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEFS_DescribeMountTargets() { + svc := efs.New(session.New()) + + params := &efs.DescribeMountTargetsInput{ + FileSystemId: aws.String("FileSystemId"), + Marker: aws.String("Marker"), + MaxItems: aws.Int64(1), + MountTargetId: aws.String("MountTargetId"), + } + resp, err := svc.DescribeMountTargets(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEFS_DescribeTags() { + svc := efs.New(session.New()) + + params := &efs.DescribeTagsInput{ + FileSystemId: aws.String("FileSystemId"), // Required + Marker: aws.String("Marker"), + MaxItems: aws.Int64(1), + } + resp, err := svc.DescribeTags(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEFS_ModifyMountTargetSecurityGroups() { + svc := efs.New(session.New()) + + params := &efs.ModifyMountTargetSecurityGroupsInput{ + MountTargetId: aws.String("MountTargetId"), // Required + SecurityGroups: []*string{ + aws.String("SecurityGroup"), // Required + // More values... + }, + } + resp, err := svc.ModifyMountTargetSecurityGroups(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/efs/service.go b/vendor/github.com/aws/aws-sdk-go/service/efs/service.go new file mode 100644 index 000000000..de3a48ac3 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/efs/service.go @@ -0,0 +1,85 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package efs + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/private/protocol/restjson" +) + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type EFS struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "elasticfilesystem" + +// New creates a new instance of the EFS client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a EFS client from just a session. +// svc := efs.New(mySession) +// +// // Create a EFS client with additional configuration +// svc := efs.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *EFS { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *EFS { + svc := &EFS{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2015-02-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(restjson.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restjson.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restjson.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restjson.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a EFS operation and runs any +// custom request initialization. +func (c *EFS) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/elasticache/api.go b/vendor/github.com/aws/aws-sdk-go/service/elasticache/api.go new file mode 100644 index 000000000..f1d9a70f1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/elasticache/api.go @@ -0,0 +1,6837 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package elasticache provides a client for Amazon ElastiCache. +package elasticache + +import ( + "time" + + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/query" +) + +const opAddTagsToResource = "AddTagsToResource" + +// AddTagsToResourceRequest generates a "aws/request.Request" representing the +// client's request for the AddTagsToResource operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the AddTagsToResource method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the AddTagsToResourceRequest method. +// req, resp := client.AddTagsToResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ElastiCache) AddTagsToResourceRequest(input *AddTagsToResourceInput) (req *request.Request, output *TagListMessage) { + op := &request.Operation{ + Name: opAddTagsToResource, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AddTagsToResourceInput{} + } + + req = c.newRequest(op, input, output) + output = &TagListMessage{} + req.Data = output + return +} + +// The AddTagsToResource action adds up to 10 cost allocation tags to the named +// resource. A cost allocation tag is a key-value pair where the key and value +// are case-sensitive. Cost allocation tags can be used to categorize and track +// your AWS costs. +// +// When you apply tags to your ElastiCache resources, AWS generates a cost +// allocation report as a comma-separated value (CSV) file with your usage and +// costs aggregated by your tags. You can apply tags that represent business +// categories (such as cost centers, application names, or owners) to organize +// your costs across multiple services. For more information, see Using Cost +// Allocation Tags in Amazon ElastiCache (http://docs.aws.amazon.com/AmazonElastiCache/latest/UserGuide/Tagging.html) +// in the ElastiCache User Guide. +func (c *ElastiCache) AddTagsToResource(input *AddTagsToResourceInput) (*TagListMessage, error) { + req, out := c.AddTagsToResourceRequest(input) + err := req.Send() + return out, err +} + +const opAuthorizeCacheSecurityGroupIngress = "AuthorizeCacheSecurityGroupIngress" + +// AuthorizeCacheSecurityGroupIngressRequest generates a "aws/request.Request" representing the +// client's request for the AuthorizeCacheSecurityGroupIngress operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the AuthorizeCacheSecurityGroupIngress method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the AuthorizeCacheSecurityGroupIngressRequest method. +// req, resp := client.AuthorizeCacheSecurityGroupIngressRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ElastiCache) AuthorizeCacheSecurityGroupIngressRequest(input *AuthorizeCacheSecurityGroupIngressInput) (req *request.Request, output *AuthorizeCacheSecurityGroupIngressOutput) { + op := &request.Operation{ + Name: opAuthorizeCacheSecurityGroupIngress, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AuthorizeCacheSecurityGroupIngressInput{} + } + + req = c.newRequest(op, input, output) + output = &AuthorizeCacheSecurityGroupIngressOutput{} + req.Data = output + return +} + +// The AuthorizeCacheSecurityGroupIngress action allows network ingress to a +// cache security group. Applications using ElastiCache must be running on Amazon +// EC2, and Amazon EC2 security groups are used as the authorization mechanism. +// +// You cannot authorize ingress from an Amazon EC2 security group in one region +// to an ElastiCache cluster in another region. +func (c *ElastiCache) AuthorizeCacheSecurityGroupIngress(input *AuthorizeCacheSecurityGroupIngressInput) (*AuthorizeCacheSecurityGroupIngressOutput, error) { + req, out := c.AuthorizeCacheSecurityGroupIngressRequest(input) + err := req.Send() + return out, err +} + +const opCopySnapshot = "CopySnapshot" + +// CopySnapshotRequest generates a "aws/request.Request" representing the +// client's request for the CopySnapshot operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CopySnapshot method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CopySnapshotRequest method. +// req, resp := client.CopySnapshotRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ElastiCache) CopySnapshotRequest(input *CopySnapshotInput) (req *request.Request, output *CopySnapshotOutput) { + op := &request.Operation{ + Name: opCopySnapshot, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CopySnapshotInput{} + } + + req = c.newRequest(op, input, output) + output = &CopySnapshotOutput{} + req.Data = output + return +} + +// The CopySnapshot action makes a copy of an existing snapshot. +// +// Users or groups that have permissions to use the CopySnapshot API can create +// their own Amazon S3 buckets and copy snapshots to it. To control access to +// your snapshots, use an IAM policy to control who has the ability to use the +// CopySnapshot API. For more information about using IAM to control the use +// of ElastiCache APIs, see Exporting Snapshots (http://docs.aws.amazon.com/ElastiCache/latest/Snapshots.Exporting.html) +// and Authentication & Access Control (http://docs.aws.amazon.com/ElastiCache/latest/IAM.html). +// +// Erorr Message: Error Message: The authenticated user does not have +// sufficient permissions to perform the desired activity. +// +// Solution: Contact your system administrator to get the needed permissions. +func (c *ElastiCache) CopySnapshot(input *CopySnapshotInput) (*CopySnapshotOutput, error) { + req, out := c.CopySnapshotRequest(input) + err := req.Send() + return out, err +} + +const opCreateCacheCluster = "CreateCacheCluster" + +// CreateCacheClusterRequest generates a "aws/request.Request" representing the +// client's request for the CreateCacheCluster operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateCacheCluster method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateCacheClusterRequest method. +// req, resp := client.CreateCacheClusterRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ElastiCache) CreateCacheClusterRequest(input *CreateCacheClusterInput) (req *request.Request, output *CreateCacheClusterOutput) { + op := &request.Operation{ + Name: opCreateCacheCluster, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateCacheClusterInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateCacheClusterOutput{} + req.Data = output + return +} + +// The CreateCacheCluster action creates a cache cluster. All nodes in the cache +// cluster run the same protocol-compliant cache engine software, either Memcached +// or Redis. +func (c *ElastiCache) CreateCacheCluster(input *CreateCacheClusterInput) (*CreateCacheClusterOutput, error) { + req, out := c.CreateCacheClusterRequest(input) + err := req.Send() + return out, err +} + +const opCreateCacheParameterGroup = "CreateCacheParameterGroup" + +// CreateCacheParameterGroupRequest generates a "aws/request.Request" representing the +// client's request for the CreateCacheParameterGroup operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateCacheParameterGroup method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateCacheParameterGroupRequest method. +// req, resp := client.CreateCacheParameterGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ElastiCache) CreateCacheParameterGroupRequest(input *CreateCacheParameterGroupInput) (req *request.Request, output *CreateCacheParameterGroupOutput) { + op := &request.Operation{ + Name: opCreateCacheParameterGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateCacheParameterGroupInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateCacheParameterGroupOutput{} + req.Data = output + return +} + +// The CreateCacheParameterGroup action creates a new cache parameter group. +// A cache parameter group is a collection of parameters that you apply to all +// of the nodes in a cache cluster. +func (c *ElastiCache) CreateCacheParameterGroup(input *CreateCacheParameterGroupInput) (*CreateCacheParameterGroupOutput, error) { + req, out := c.CreateCacheParameterGroupRequest(input) + err := req.Send() + return out, err +} + +const opCreateCacheSecurityGroup = "CreateCacheSecurityGroup" + +// CreateCacheSecurityGroupRequest generates a "aws/request.Request" representing the +// client's request for the CreateCacheSecurityGroup operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateCacheSecurityGroup method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateCacheSecurityGroupRequest method. +// req, resp := client.CreateCacheSecurityGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ElastiCache) CreateCacheSecurityGroupRequest(input *CreateCacheSecurityGroupInput) (req *request.Request, output *CreateCacheSecurityGroupOutput) { + op := &request.Operation{ + Name: opCreateCacheSecurityGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateCacheSecurityGroupInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateCacheSecurityGroupOutput{} + req.Data = output + return +} + +// The CreateCacheSecurityGroup action creates a new cache security group. Use +// a cache security group to control access to one or more cache clusters. +// +// Cache security groups are only used when you are creating a cache cluster +// outside of an Amazon Virtual Private Cloud (VPC). If you are creating a cache +// cluster inside of a VPC, use a cache subnet group instead. For more information, +// see CreateCacheSubnetGroup (http://docs.aws.amazon.com/AmazonElastiCache/latest/APIReference/API_CreateCacheSubnetGroup.html). +func (c *ElastiCache) CreateCacheSecurityGroup(input *CreateCacheSecurityGroupInput) (*CreateCacheSecurityGroupOutput, error) { + req, out := c.CreateCacheSecurityGroupRequest(input) + err := req.Send() + return out, err +} + +const opCreateCacheSubnetGroup = "CreateCacheSubnetGroup" + +// CreateCacheSubnetGroupRequest generates a "aws/request.Request" representing the +// client's request for the CreateCacheSubnetGroup operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateCacheSubnetGroup method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateCacheSubnetGroupRequest method. +// req, resp := client.CreateCacheSubnetGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ElastiCache) CreateCacheSubnetGroupRequest(input *CreateCacheSubnetGroupInput) (req *request.Request, output *CreateCacheSubnetGroupOutput) { + op := &request.Operation{ + Name: opCreateCacheSubnetGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateCacheSubnetGroupInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateCacheSubnetGroupOutput{} + req.Data = output + return +} + +// The CreateCacheSubnetGroup action creates a new cache subnet group. +// +// Use this parameter only when you are creating a cluster in an Amazon Virtual +// Private Cloud (VPC). +func (c *ElastiCache) CreateCacheSubnetGroup(input *CreateCacheSubnetGroupInput) (*CreateCacheSubnetGroupOutput, error) { + req, out := c.CreateCacheSubnetGroupRequest(input) + err := req.Send() + return out, err +} + +const opCreateReplicationGroup = "CreateReplicationGroup" + +// CreateReplicationGroupRequest generates a "aws/request.Request" representing the +// client's request for the CreateReplicationGroup operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateReplicationGroup method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateReplicationGroupRequest method. +// req, resp := client.CreateReplicationGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ElastiCache) CreateReplicationGroupRequest(input *CreateReplicationGroupInput) (req *request.Request, output *CreateReplicationGroupOutput) { + op := &request.Operation{ + Name: opCreateReplicationGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateReplicationGroupInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateReplicationGroupOutput{} + req.Data = output + return +} + +// The CreateReplicationGroup action creates a replication group. A replication +// group is a collection of cache clusters, where one of the cache clusters +// is a read/write primary and the others are read-only replicas. Writes to +// the primary are automatically propagated to the replicas. +// +// When you create a replication group, you must specify an existing cache +// cluster that is in the primary role. When the replication group has been +// successfully created, you can add one or more read replica replicas to it, +// up to a total of five read replicas. +// +// This action is valid only for Redis. +func (c *ElastiCache) CreateReplicationGroup(input *CreateReplicationGroupInput) (*CreateReplicationGroupOutput, error) { + req, out := c.CreateReplicationGroupRequest(input) + err := req.Send() + return out, err +} + +const opCreateSnapshot = "CreateSnapshot" + +// CreateSnapshotRequest generates a "aws/request.Request" representing the +// client's request for the CreateSnapshot operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateSnapshot method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateSnapshotRequest method. +// req, resp := client.CreateSnapshotRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ElastiCache) CreateSnapshotRequest(input *CreateSnapshotInput) (req *request.Request, output *CreateSnapshotOutput) { + op := &request.Operation{ + Name: opCreateSnapshot, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateSnapshotInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateSnapshotOutput{} + req.Data = output + return +} + +// The CreateSnapshot action creates a copy of an entire cache cluster at a +// specific moment in time. +func (c *ElastiCache) CreateSnapshot(input *CreateSnapshotInput) (*CreateSnapshotOutput, error) { + req, out := c.CreateSnapshotRequest(input) + err := req.Send() + return out, err +} + +const opDeleteCacheCluster = "DeleteCacheCluster" + +// DeleteCacheClusterRequest generates a "aws/request.Request" representing the +// client's request for the DeleteCacheCluster operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteCacheCluster method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteCacheClusterRequest method. +// req, resp := client.DeleteCacheClusterRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ElastiCache) DeleteCacheClusterRequest(input *DeleteCacheClusterInput) (req *request.Request, output *DeleteCacheClusterOutput) { + op := &request.Operation{ + Name: opDeleteCacheCluster, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteCacheClusterInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteCacheClusterOutput{} + req.Data = output + return +} + +// The DeleteCacheCluster action deletes a previously provisioned cache cluster. +// DeleteCacheCluster deletes all associated cache nodes, node endpoints and +// the cache cluster itself. When you receive a successful response from this +// action, Amazon ElastiCache immediately begins deleting the cache cluster; +// you cannot cancel or revert this action. +// +// This API cannot be used to delete a cache cluster that is the last read +// replica of a replication group that has Multi-AZ mode enabled. +func (c *ElastiCache) DeleteCacheCluster(input *DeleteCacheClusterInput) (*DeleteCacheClusterOutput, error) { + req, out := c.DeleteCacheClusterRequest(input) + err := req.Send() + return out, err +} + +const opDeleteCacheParameterGroup = "DeleteCacheParameterGroup" + +// DeleteCacheParameterGroupRequest generates a "aws/request.Request" representing the +// client's request for the DeleteCacheParameterGroup operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteCacheParameterGroup method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteCacheParameterGroupRequest method. +// req, resp := client.DeleteCacheParameterGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ElastiCache) DeleteCacheParameterGroupRequest(input *DeleteCacheParameterGroupInput) (req *request.Request, output *DeleteCacheParameterGroupOutput) { + op := &request.Operation{ + Name: opDeleteCacheParameterGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteCacheParameterGroupInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteCacheParameterGroupOutput{} + req.Data = output + return +} + +// The DeleteCacheParameterGroup action deletes the specified cache parameter +// group. You cannot delete a cache parameter group if it is associated with +// any cache clusters. +func (c *ElastiCache) DeleteCacheParameterGroup(input *DeleteCacheParameterGroupInput) (*DeleteCacheParameterGroupOutput, error) { + req, out := c.DeleteCacheParameterGroupRequest(input) + err := req.Send() + return out, err +} + +const opDeleteCacheSecurityGroup = "DeleteCacheSecurityGroup" + +// DeleteCacheSecurityGroupRequest generates a "aws/request.Request" representing the +// client's request for the DeleteCacheSecurityGroup operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteCacheSecurityGroup method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteCacheSecurityGroupRequest method. +// req, resp := client.DeleteCacheSecurityGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ElastiCache) DeleteCacheSecurityGroupRequest(input *DeleteCacheSecurityGroupInput) (req *request.Request, output *DeleteCacheSecurityGroupOutput) { + op := &request.Operation{ + Name: opDeleteCacheSecurityGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteCacheSecurityGroupInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteCacheSecurityGroupOutput{} + req.Data = output + return +} + +// The DeleteCacheSecurityGroup action deletes a cache security group. +// +// You cannot delete a cache security group if it is associated with any cache +// clusters. +func (c *ElastiCache) DeleteCacheSecurityGroup(input *DeleteCacheSecurityGroupInput) (*DeleteCacheSecurityGroupOutput, error) { + req, out := c.DeleteCacheSecurityGroupRequest(input) + err := req.Send() + return out, err +} + +const opDeleteCacheSubnetGroup = "DeleteCacheSubnetGroup" + +// DeleteCacheSubnetGroupRequest generates a "aws/request.Request" representing the +// client's request for the DeleteCacheSubnetGroup operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteCacheSubnetGroup method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteCacheSubnetGroupRequest method. +// req, resp := client.DeleteCacheSubnetGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ElastiCache) DeleteCacheSubnetGroupRequest(input *DeleteCacheSubnetGroupInput) (req *request.Request, output *DeleteCacheSubnetGroupOutput) { + op := &request.Operation{ + Name: opDeleteCacheSubnetGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteCacheSubnetGroupInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteCacheSubnetGroupOutput{} + req.Data = output + return +} + +// The DeleteCacheSubnetGroup action deletes a cache subnet group. +// +// You cannot delete a cache subnet group if it is associated with any cache +// clusters. +func (c *ElastiCache) DeleteCacheSubnetGroup(input *DeleteCacheSubnetGroupInput) (*DeleteCacheSubnetGroupOutput, error) { + req, out := c.DeleteCacheSubnetGroupRequest(input) + err := req.Send() + return out, err +} + +const opDeleteReplicationGroup = "DeleteReplicationGroup" + +// DeleteReplicationGroupRequest generates a "aws/request.Request" representing the +// client's request for the DeleteReplicationGroup operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteReplicationGroup method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteReplicationGroupRequest method. +// req, resp := client.DeleteReplicationGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ElastiCache) DeleteReplicationGroupRequest(input *DeleteReplicationGroupInput) (req *request.Request, output *DeleteReplicationGroupOutput) { + op := &request.Operation{ + Name: opDeleteReplicationGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteReplicationGroupInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteReplicationGroupOutput{} + req.Data = output + return +} + +// The DeleteReplicationGroup action deletes an existing replication group. +// By default, this action deletes the entire replication group, including the +// primary cluster and all of the read replicas. You can optionally delete only +// the read replicas, while retaining the primary cluster. +// +// When you receive a successful response from this action, Amazon ElastiCache +// immediately begins deleting the selected resources; you cannot cancel or +// revert this action. +func (c *ElastiCache) DeleteReplicationGroup(input *DeleteReplicationGroupInput) (*DeleteReplicationGroupOutput, error) { + req, out := c.DeleteReplicationGroupRequest(input) + err := req.Send() + return out, err +} + +const opDeleteSnapshot = "DeleteSnapshot" + +// DeleteSnapshotRequest generates a "aws/request.Request" representing the +// client's request for the DeleteSnapshot operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteSnapshot method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteSnapshotRequest method. +// req, resp := client.DeleteSnapshotRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ElastiCache) DeleteSnapshotRequest(input *DeleteSnapshotInput) (req *request.Request, output *DeleteSnapshotOutput) { + op := &request.Operation{ + Name: opDeleteSnapshot, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteSnapshotInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteSnapshotOutput{} + req.Data = output + return +} + +// The DeleteSnapshot action deletes an existing snapshot. When you receive +// a successful response from this action, ElastiCache immediately begins deleting +// the snapshot; you cannot cancel or revert this action. +func (c *ElastiCache) DeleteSnapshot(input *DeleteSnapshotInput) (*DeleteSnapshotOutput, error) { + req, out := c.DeleteSnapshotRequest(input) + err := req.Send() + return out, err +} + +const opDescribeCacheClusters = "DescribeCacheClusters" + +// DescribeCacheClustersRequest generates a "aws/request.Request" representing the +// client's request for the DescribeCacheClusters operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeCacheClusters method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeCacheClustersRequest method. +// req, resp := client.DescribeCacheClustersRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ElastiCache) DescribeCacheClustersRequest(input *DescribeCacheClustersInput) (req *request.Request, output *DescribeCacheClustersOutput) { + op := &request.Operation{ + Name: opDescribeCacheClusters, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeCacheClustersInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeCacheClustersOutput{} + req.Data = output + return +} + +// The DescribeCacheClusters action returns information about all provisioned +// cache clusters if no cache cluster identifier is specified, or about a specific +// cache cluster if a cache cluster identifier is supplied. +// +// By default, abbreviated information about the cache clusters(s) will be +// returned. You can use the optional ShowDetails flag to retrieve detailed +// information about the cache nodes associated with the cache clusters. These +// details include the DNS address and port for the cache node endpoint. +// +// If the cluster is in the CREATING state, only cluster level information +// will be displayed until all of the nodes are successfully provisioned. +// +// If the cluster is in the DELETING state, only cluster level information +// will be displayed. +// +// If cache nodes are currently being added to the cache cluster, node endpoint +// information and creation time for the additional nodes will not be displayed +// until they are completely provisioned. When the cache cluster state is available, +// the cluster is ready for use. +// +// If cache nodes are currently being removed from the cache cluster, no endpoint +// information for the removed nodes is displayed. +func (c *ElastiCache) DescribeCacheClusters(input *DescribeCacheClustersInput) (*DescribeCacheClustersOutput, error) { + req, out := c.DescribeCacheClustersRequest(input) + err := req.Send() + return out, err +} + +// DescribeCacheClustersPages iterates over the pages of a DescribeCacheClusters operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeCacheClusters method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeCacheClusters operation. +// pageNum := 0 +// err := client.DescribeCacheClustersPages(params, +// func(page *DescribeCacheClustersOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *ElastiCache) DescribeCacheClustersPages(input *DescribeCacheClustersInput, fn func(p *DescribeCacheClustersOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeCacheClustersRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeCacheClustersOutput), lastPage) + }) +} + +const opDescribeCacheEngineVersions = "DescribeCacheEngineVersions" + +// DescribeCacheEngineVersionsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeCacheEngineVersions operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeCacheEngineVersions method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeCacheEngineVersionsRequest method. +// req, resp := client.DescribeCacheEngineVersionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ElastiCache) DescribeCacheEngineVersionsRequest(input *DescribeCacheEngineVersionsInput) (req *request.Request, output *DescribeCacheEngineVersionsOutput) { + op := &request.Operation{ + Name: opDescribeCacheEngineVersions, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeCacheEngineVersionsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeCacheEngineVersionsOutput{} + req.Data = output + return +} + +// The DescribeCacheEngineVersions action returns a list of the available cache +// engines and their versions. +func (c *ElastiCache) DescribeCacheEngineVersions(input *DescribeCacheEngineVersionsInput) (*DescribeCacheEngineVersionsOutput, error) { + req, out := c.DescribeCacheEngineVersionsRequest(input) + err := req.Send() + return out, err +} + +// DescribeCacheEngineVersionsPages iterates over the pages of a DescribeCacheEngineVersions operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeCacheEngineVersions method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeCacheEngineVersions operation. +// pageNum := 0 +// err := client.DescribeCacheEngineVersionsPages(params, +// func(page *DescribeCacheEngineVersionsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *ElastiCache) DescribeCacheEngineVersionsPages(input *DescribeCacheEngineVersionsInput, fn func(p *DescribeCacheEngineVersionsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeCacheEngineVersionsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeCacheEngineVersionsOutput), lastPage) + }) +} + +const opDescribeCacheParameterGroups = "DescribeCacheParameterGroups" + +// DescribeCacheParameterGroupsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeCacheParameterGroups operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeCacheParameterGroups method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeCacheParameterGroupsRequest method. +// req, resp := client.DescribeCacheParameterGroupsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ElastiCache) DescribeCacheParameterGroupsRequest(input *DescribeCacheParameterGroupsInput) (req *request.Request, output *DescribeCacheParameterGroupsOutput) { + op := &request.Operation{ + Name: opDescribeCacheParameterGroups, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeCacheParameterGroupsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeCacheParameterGroupsOutput{} + req.Data = output + return +} + +// The DescribeCacheParameterGroups action returns a list of cache parameter +// group descriptions. If a cache parameter group name is specified, the list +// will contain only the descriptions for that group. +func (c *ElastiCache) DescribeCacheParameterGroups(input *DescribeCacheParameterGroupsInput) (*DescribeCacheParameterGroupsOutput, error) { + req, out := c.DescribeCacheParameterGroupsRequest(input) + err := req.Send() + return out, err +} + +// DescribeCacheParameterGroupsPages iterates over the pages of a DescribeCacheParameterGroups operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeCacheParameterGroups method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeCacheParameterGroups operation. +// pageNum := 0 +// err := client.DescribeCacheParameterGroupsPages(params, +// func(page *DescribeCacheParameterGroupsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *ElastiCache) DescribeCacheParameterGroupsPages(input *DescribeCacheParameterGroupsInput, fn func(p *DescribeCacheParameterGroupsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeCacheParameterGroupsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeCacheParameterGroupsOutput), lastPage) + }) +} + +const opDescribeCacheParameters = "DescribeCacheParameters" + +// DescribeCacheParametersRequest generates a "aws/request.Request" representing the +// client's request for the DescribeCacheParameters operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeCacheParameters method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeCacheParametersRequest method. +// req, resp := client.DescribeCacheParametersRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ElastiCache) DescribeCacheParametersRequest(input *DescribeCacheParametersInput) (req *request.Request, output *DescribeCacheParametersOutput) { + op := &request.Operation{ + Name: opDescribeCacheParameters, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeCacheParametersInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeCacheParametersOutput{} + req.Data = output + return +} + +// The DescribeCacheParameters action returns the detailed parameter list for +// a particular cache parameter group. +func (c *ElastiCache) DescribeCacheParameters(input *DescribeCacheParametersInput) (*DescribeCacheParametersOutput, error) { + req, out := c.DescribeCacheParametersRequest(input) + err := req.Send() + return out, err +} + +// DescribeCacheParametersPages iterates over the pages of a DescribeCacheParameters operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeCacheParameters method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeCacheParameters operation. +// pageNum := 0 +// err := client.DescribeCacheParametersPages(params, +// func(page *DescribeCacheParametersOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *ElastiCache) DescribeCacheParametersPages(input *DescribeCacheParametersInput, fn func(p *DescribeCacheParametersOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeCacheParametersRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeCacheParametersOutput), lastPage) + }) +} + +const opDescribeCacheSecurityGroups = "DescribeCacheSecurityGroups" + +// DescribeCacheSecurityGroupsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeCacheSecurityGroups operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeCacheSecurityGroups method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeCacheSecurityGroupsRequest method. +// req, resp := client.DescribeCacheSecurityGroupsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ElastiCache) DescribeCacheSecurityGroupsRequest(input *DescribeCacheSecurityGroupsInput) (req *request.Request, output *DescribeCacheSecurityGroupsOutput) { + op := &request.Operation{ + Name: opDescribeCacheSecurityGroups, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeCacheSecurityGroupsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeCacheSecurityGroupsOutput{} + req.Data = output + return +} + +// The DescribeCacheSecurityGroups action returns a list of cache security group +// descriptions. If a cache security group name is specified, the list will +// contain only the description of that group. +func (c *ElastiCache) DescribeCacheSecurityGroups(input *DescribeCacheSecurityGroupsInput) (*DescribeCacheSecurityGroupsOutput, error) { + req, out := c.DescribeCacheSecurityGroupsRequest(input) + err := req.Send() + return out, err +} + +// DescribeCacheSecurityGroupsPages iterates over the pages of a DescribeCacheSecurityGroups operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeCacheSecurityGroups method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeCacheSecurityGroups operation. +// pageNum := 0 +// err := client.DescribeCacheSecurityGroupsPages(params, +// func(page *DescribeCacheSecurityGroupsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *ElastiCache) DescribeCacheSecurityGroupsPages(input *DescribeCacheSecurityGroupsInput, fn func(p *DescribeCacheSecurityGroupsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeCacheSecurityGroupsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeCacheSecurityGroupsOutput), lastPage) + }) +} + +const opDescribeCacheSubnetGroups = "DescribeCacheSubnetGroups" + +// DescribeCacheSubnetGroupsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeCacheSubnetGroups operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeCacheSubnetGroups method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeCacheSubnetGroupsRequest method. +// req, resp := client.DescribeCacheSubnetGroupsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ElastiCache) DescribeCacheSubnetGroupsRequest(input *DescribeCacheSubnetGroupsInput) (req *request.Request, output *DescribeCacheSubnetGroupsOutput) { + op := &request.Operation{ + Name: opDescribeCacheSubnetGroups, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeCacheSubnetGroupsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeCacheSubnetGroupsOutput{} + req.Data = output + return +} + +// The DescribeCacheSubnetGroups action returns a list of cache subnet group +// descriptions. If a subnet group name is specified, the list will contain +// only the description of that group. +func (c *ElastiCache) DescribeCacheSubnetGroups(input *DescribeCacheSubnetGroupsInput) (*DescribeCacheSubnetGroupsOutput, error) { + req, out := c.DescribeCacheSubnetGroupsRequest(input) + err := req.Send() + return out, err +} + +// DescribeCacheSubnetGroupsPages iterates over the pages of a DescribeCacheSubnetGroups operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeCacheSubnetGroups method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeCacheSubnetGroups operation. +// pageNum := 0 +// err := client.DescribeCacheSubnetGroupsPages(params, +// func(page *DescribeCacheSubnetGroupsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *ElastiCache) DescribeCacheSubnetGroupsPages(input *DescribeCacheSubnetGroupsInput, fn func(p *DescribeCacheSubnetGroupsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeCacheSubnetGroupsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeCacheSubnetGroupsOutput), lastPage) + }) +} + +const opDescribeEngineDefaultParameters = "DescribeEngineDefaultParameters" + +// DescribeEngineDefaultParametersRequest generates a "aws/request.Request" representing the +// client's request for the DescribeEngineDefaultParameters operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeEngineDefaultParameters method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeEngineDefaultParametersRequest method. +// req, resp := client.DescribeEngineDefaultParametersRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ElastiCache) DescribeEngineDefaultParametersRequest(input *DescribeEngineDefaultParametersInput) (req *request.Request, output *DescribeEngineDefaultParametersOutput) { + op := &request.Operation{ + Name: opDescribeEngineDefaultParameters, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"EngineDefaults.Marker"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeEngineDefaultParametersInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeEngineDefaultParametersOutput{} + req.Data = output + return +} + +// The DescribeEngineDefaultParameters action returns the default engine and +// system parameter information for the specified cache engine. +func (c *ElastiCache) DescribeEngineDefaultParameters(input *DescribeEngineDefaultParametersInput) (*DescribeEngineDefaultParametersOutput, error) { + req, out := c.DescribeEngineDefaultParametersRequest(input) + err := req.Send() + return out, err +} + +// DescribeEngineDefaultParametersPages iterates over the pages of a DescribeEngineDefaultParameters operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeEngineDefaultParameters method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeEngineDefaultParameters operation. +// pageNum := 0 +// err := client.DescribeEngineDefaultParametersPages(params, +// func(page *DescribeEngineDefaultParametersOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *ElastiCache) DescribeEngineDefaultParametersPages(input *DescribeEngineDefaultParametersInput, fn func(p *DescribeEngineDefaultParametersOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeEngineDefaultParametersRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeEngineDefaultParametersOutput), lastPage) + }) +} + +const opDescribeEvents = "DescribeEvents" + +// DescribeEventsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeEvents operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeEvents method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeEventsRequest method. +// req, resp := client.DescribeEventsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ElastiCache) DescribeEventsRequest(input *DescribeEventsInput) (req *request.Request, output *DescribeEventsOutput) { + op := &request.Operation{ + Name: opDescribeEvents, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeEventsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeEventsOutput{} + req.Data = output + return +} + +// The DescribeEvents action returns events related to cache clusters, cache +// security groups, and cache parameter groups. You can obtain events specific +// to a particular cache cluster, cache security group, or cache parameter group +// by providing the name as a parameter. +// +// By default, only the events occurring within the last hour are returned; +// however, you can retrieve up to 14 days' worth of events if necessary. +func (c *ElastiCache) DescribeEvents(input *DescribeEventsInput) (*DescribeEventsOutput, error) { + req, out := c.DescribeEventsRequest(input) + err := req.Send() + return out, err +} + +// DescribeEventsPages iterates over the pages of a DescribeEvents operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeEvents method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeEvents operation. +// pageNum := 0 +// err := client.DescribeEventsPages(params, +// func(page *DescribeEventsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *ElastiCache) DescribeEventsPages(input *DescribeEventsInput, fn func(p *DescribeEventsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeEventsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeEventsOutput), lastPage) + }) +} + +const opDescribeReplicationGroups = "DescribeReplicationGroups" + +// DescribeReplicationGroupsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeReplicationGroups operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeReplicationGroups method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeReplicationGroupsRequest method. +// req, resp := client.DescribeReplicationGroupsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ElastiCache) DescribeReplicationGroupsRequest(input *DescribeReplicationGroupsInput) (req *request.Request, output *DescribeReplicationGroupsOutput) { + op := &request.Operation{ + Name: opDescribeReplicationGroups, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeReplicationGroupsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeReplicationGroupsOutput{} + req.Data = output + return +} + +// The DescribeReplicationGroups action returns information about a particular +// replication group. If no identifier is specified, DescribeReplicationGroups +// returns information about all replication groups. +func (c *ElastiCache) DescribeReplicationGroups(input *DescribeReplicationGroupsInput) (*DescribeReplicationGroupsOutput, error) { + req, out := c.DescribeReplicationGroupsRequest(input) + err := req.Send() + return out, err +} + +// DescribeReplicationGroupsPages iterates over the pages of a DescribeReplicationGroups operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeReplicationGroups method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeReplicationGroups operation. +// pageNum := 0 +// err := client.DescribeReplicationGroupsPages(params, +// func(page *DescribeReplicationGroupsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *ElastiCache) DescribeReplicationGroupsPages(input *DescribeReplicationGroupsInput, fn func(p *DescribeReplicationGroupsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeReplicationGroupsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeReplicationGroupsOutput), lastPage) + }) +} + +const opDescribeReservedCacheNodes = "DescribeReservedCacheNodes" + +// DescribeReservedCacheNodesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeReservedCacheNodes operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeReservedCacheNodes method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeReservedCacheNodesRequest method. +// req, resp := client.DescribeReservedCacheNodesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ElastiCache) DescribeReservedCacheNodesRequest(input *DescribeReservedCacheNodesInput) (req *request.Request, output *DescribeReservedCacheNodesOutput) { + op := &request.Operation{ + Name: opDescribeReservedCacheNodes, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeReservedCacheNodesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeReservedCacheNodesOutput{} + req.Data = output + return +} + +// The DescribeReservedCacheNodes action returns information about reserved +// cache nodes for this account, or about a specified reserved cache node. +func (c *ElastiCache) DescribeReservedCacheNodes(input *DescribeReservedCacheNodesInput) (*DescribeReservedCacheNodesOutput, error) { + req, out := c.DescribeReservedCacheNodesRequest(input) + err := req.Send() + return out, err +} + +// DescribeReservedCacheNodesPages iterates over the pages of a DescribeReservedCacheNodes operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeReservedCacheNodes method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeReservedCacheNodes operation. +// pageNum := 0 +// err := client.DescribeReservedCacheNodesPages(params, +// func(page *DescribeReservedCacheNodesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *ElastiCache) DescribeReservedCacheNodesPages(input *DescribeReservedCacheNodesInput, fn func(p *DescribeReservedCacheNodesOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeReservedCacheNodesRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeReservedCacheNodesOutput), lastPage) + }) +} + +const opDescribeReservedCacheNodesOfferings = "DescribeReservedCacheNodesOfferings" + +// DescribeReservedCacheNodesOfferingsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeReservedCacheNodesOfferings operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeReservedCacheNodesOfferings method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeReservedCacheNodesOfferingsRequest method. +// req, resp := client.DescribeReservedCacheNodesOfferingsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ElastiCache) DescribeReservedCacheNodesOfferingsRequest(input *DescribeReservedCacheNodesOfferingsInput) (req *request.Request, output *DescribeReservedCacheNodesOfferingsOutput) { + op := &request.Operation{ + Name: opDescribeReservedCacheNodesOfferings, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeReservedCacheNodesOfferingsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeReservedCacheNodesOfferingsOutput{} + req.Data = output + return +} + +// The DescribeReservedCacheNodesOfferings action lists available reserved cache +// node offerings. +func (c *ElastiCache) DescribeReservedCacheNodesOfferings(input *DescribeReservedCacheNodesOfferingsInput) (*DescribeReservedCacheNodesOfferingsOutput, error) { + req, out := c.DescribeReservedCacheNodesOfferingsRequest(input) + err := req.Send() + return out, err +} + +// DescribeReservedCacheNodesOfferingsPages iterates over the pages of a DescribeReservedCacheNodesOfferings operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeReservedCacheNodesOfferings method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeReservedCacheNodesOfferings operation. +// pageNum := 0 +// err := client.DescribeReservedCacheNodesOfferingsPages(params, +// func(page *DescribeReservedCacheNodesOfferingsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *ElastiCache) DescribeReservedCacheNodesOfferingsPages(input *DescribeReservedCacheNodesOfferingsInput, fn func(p *DescribeReservedCacheNodesOfferingsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeReservedCacheNodesOfferingsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeReservedCacheNodesOfferingsOutput), lastPage) + }) +} + +const opDescribeSnapshots = "DescribeSnapshots" + +// DescribeSnapshotsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeSnapshots operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeSnapshots method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeSnapshotsRequest method. +// req, resp := client.DescribeSnapshotsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ElastiCache) DescribeSnapshotsRequest(input *DescribeSnapshotsInput) (req *request.Request, output *DescribeSnapshotsOutput) { + op := &request.Operation{ + Name: opDescribeSnapshots, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeSnapshotsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeSnapshotsOutput{} + req.Data = output + return +} + +// The DescribeSnapshots action returns information about cache cluster snapshots. +// By default, DescribeSnapshots lists all of your snapshots; it can optionally +// describe a single snapshot, or just the snapshots associated with a particular +// cache cluster. +func (c *ElastiCache) DescribeSnapshots(input *DescribeSnapshotsInput) (*DescribeSnapshotsOutput, error) { + req, out := c.DescribeSnapshotsRequest(input) + err := req.Send() + return out, err +} + +// DescribeSnapshotsPages iterates over the pages of a DescribeSnapshots operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeSnapshots method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeSnapshots operation. +// pageNum := 0 +// err := client.DescribeSnapshotsPages(params, +// func(page *DescribeSnapshotsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *ElastiCache) DescribeSnapshotsPages(input *DescribeSnapshotsInput, fn func(p *DescribeSnapshotsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeSnapshotsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeSnapshotsOutput), lastPage) + }) +} + +const opListAllowedNodeTypeModifications = "ListAllowedNodeTypeModifications" + +// ListAllowedNodeTypeModificationsRequest generates a "aws/request.Request" representing the +// client's request for the ListAllowedNodeTypeModifications operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListAllowedNodeTypeModifications method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListAllowedNodeTypeModificationsRequest method. +// req, resp := client.ListAllowedNodeTypeModificationsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ElastiCache) ListAllowedNodeTypeModificationsRequest(input *ListAllowedNodeTypeModificationsInput) (req *request.Request, output *ListAllowedNodeTypeModificationsOutput) { + op := &request.Operation{ + Name: opListAllowedNodeTypeModifications, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListAllowedNodeTypeModificationsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListAllowedNodeTypeModificationsOutput{} + req.Data = output + return +} + +// The ListAllowedNodeTypeModifications action lists all available node types +// that you can scale your Redis cluster's or replication group's current node +// type up to. +// +// When you use the ModifyCacheCluster or ModifyReplicationGroup APIs to scale +// up your cluster or replication group, the value of the CacheNodeType parameter +// must be one of the node types returned by this action. +func (c *ElastiCache) ListAllowedNodeTypeModifications(input *ListAllowedNodeTypeModificationsInput) (*ListAllowedNodeTypeModificationsOutput, error) { + req, out := c.ListAllowedNodeTypeModificationsRequest(input) + err := req.Send() + return out, err +} + +const opListTagsForResource = "ListTagsForResource" + +// ListTagsForResourceRequest generates a "aws/request.Request" representing the +// client's request for the ListTagsForResource operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListTagsForResource method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListTagsForResourceRequest method. +// req, resp := client.ListTagsForResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ElastiCache) ListTagsForResourceRequest(input *ListTagsForResourceInput) (req *request.Request, output *TagListMessage) { + op := &request.Operation{ + Name: opListTagsForResource, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListTagsForResourceInput{} + } + + req = c.newRequest(op, input, output) + output = &TagListMessage{} + req.Data = output + return +} + +// The ListTagsForResource action lists all cost allocation tags currently on +// the named resource. A cost allocation tag is a key-value pair where the key +// is case-sensitive and the value is optional. Cost allocation tags can be +// used to categorize and track your AWS costs. +// +// You can have a maximum of 10 cost allocation tags on an ElastiCache resource. +// For more information, see Using Cost Allocation Tags in Amazon ElastiCache +// (http://docs.aws.amazon.com/AmazonElastiCache/latest/UserGuide/BestPractices.html). +func (c *ElastiCache) ListTagsForResource(input *ListTagsForResourceInput) (*TagListMessage, error) { + req, out := c.ListTagsForResourceRequest(input) + err := req.Send() + return out, err +} + +const opModifyCacheCluster = "ModifyCacheCluster" + +// ModifyCacheClusterRequest generates a "aws/request.Request" representing the +// client's request for the ModifyCacheCluster operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ModifyCacheCluster method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ModifyCacheClusterRequest method. +// req, resp := client.ModifyCacheClusterRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ElastiCache) ModifyCacheClusterRequest(input *ModifyCacheClusterInput) (req *request.Request, output *ModifyCacheClusterOutput) { + op := &request.Operation{ + Name: opModifyCacheCluster, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyCacheClusterInput{} + } + + req = c.newRequest(op, input, output) + output = &ModifyCacheClusterOutput{} + req.Data = output + return +} + +// The ModifyCacheCluster action modifies the settings for a cache cluster. +// You can use this action to change one or more cluster configuration parameters +// by specifying the parameters and the new values. +func (c *ElastiCache) ModifyCacheCluster(input *ModifyCacheClusterInput) (*ModifyCacheClusterOutput, error) { + req, out := c.ModifyCacheClusterRequest(input) + err := req.Send() + return out, err +} + +const opModifyCacheParameterGroup = "ModifyCacheParameterGroup" + +// ModifyCacheParameterGroupRequest generates a "aws/request.Request" representing the +// client's request for the ModifyCacheParameterGroup operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ModifyCacheParameterGroup method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ModifyCacheParameterGroupRequest method. +// req, resp := client.ModifyCacheParameterGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ElastiCache) ModifyCacheParameterGroupRequest(input *ModifyCacheParameterGroupInput) (req *request.Request, output *CacheParameterGroupNameMessage) { + op := &request.Operation{ + Name: opModifyCacheParameterGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyCacheParameterGroupInput{} + } + + req = c.newRequest(op, input, output) + output = &CacheParameterGroupNameMessage{} + req.Data = output + return +} + +// The ModifyCacheParameterGroup action modifies the parameters of a cache parameter +// group. You can modify up to 20 parameters in a single request by submitting +// a list parameter name and value pairs. +func (c *ElastiCache) ModifyCacheParameterGroup(input *ModifyCacheParameterGroupInput) (*CacheParameterGroupNameMessage, error) { + req, out := c.ModifyCacheParameterGroupRequest(input) + err := req.Send() + return out, err +} + +const opModifyCacheSubnetGroup = "ModifyCacheSubnetGroup" + +// ModifyCacheSubnetGroupRequest generates a "aws/request.Request" representing the +// client's request for the ModifyCacheSubnetGroup operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ModifyCacheSubnetGroup method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ModifyCacheSubnetGroupRequest method. +// req, resp := client.ModifyCacheSubnetGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ElastiCache) ModifyCacheSubnetGroupRequest(input *ModifyCacheSubnetGroupInput) (req *request.Request, output *ModifyCacheSubnetGroupOutput) { + op := &request.Operation{ + Name: opModifyCacheSubnetGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyCacheSubnetGroupInput{} + } + + req = c.newRequest(op, input, output) + output = &ModifyCacheSubnetGroupOutput{} + req.Data = output + return +} + +// The ModifyCacheSubnetGroup action modifies an existing cache subnet group. +func (c *ElastiCache) ModifyCacheSubnetGroup(input *ModifyCacheSubnetGroupInput) (*ModifyCacheSubnetGroupOutput, error) { + req, out := c.ModifyCacheSubnetGroupRequest(input) + err := req.Send() + return out, err +} + +const opModifyReplicationGroup = "ModifyReplicationGroup" + +// ModifyReplicationGroupRequest generates a "aws/request.Request" representing the +// client's request for the ModifyReplicationGroup operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ModifyReplicationGroup method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ModifyReplicationGroupRequest method. +// req, resp := client.ModifyReplicationGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ElastiCache) ModifyReplicationGroupRequest(input *ModifyReplicationGroupInput) (req *request.Request, output *ModifyReplicationGroupOutput) { + op := &request.Operation{ + Name: opModifyReplicationGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyReplicationGroupInput{} + } + + req = c.newRequest(op, input, output) + output = &ModifyReplicationGroupOutput{} + req.Data = output + return +} + +// The ModifyReplicationGroup action modifies the settings for a replication +// group. +func (c *ElastiCache) ModifyReplicationGroup(input *ModifyReplicationGroupInput) (*ModifyReplicationGroupOutput, error) { + req, out := c.ModifyReplicationGroupRequest(input) + err := req.Send() + return out, err +} + +const opPurchaseReservedCacheNodesOffering = "PurchaseReservedCacheNodesOffering" + +// PurchaseReservedCacheNodesOfferingRequest generates a "aws/request.Request" representing the +// client's request for the PurchaseReservedCacheNodesOffering operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PurchaseReservedCacheNodesOffering method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PurchaseReservedCacheNodesOfferingRequest method. +// req, resp := client.PurchaseReservedCacheNodesOfferingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ElastiCache) PurchaseReservedCacheNodesOfferingRequest(input *PurchaseReservedCacheNodesOfferingInput) (req *request.Request, output *PurchaseReservedCacheNodesOfferingOutput) { + op := &request.Operation{ + Name: opPurchaseReservedCacheNodesOffering, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PurchaseReservedCacheNodesOfferingInput{} + } + + req = c.newRequest(op, input, output) + output = &PurchaseReservedCacheNodesOfferingOutput{} + req.Data = output + return +} + +// The PurchaseReservedCacheNodesOffering action allows you to purchase a reserved +// cache node offering. +func (c *ElastiCache) PurchaseReservedCacheNodesOffering(input *PurchaseReservedCacheNodesOfferingInput) (*PurchaseReservedCacheNodesOfferingOutput, error) { + req, out := c.PurchaseReservedCacheNodesOfferingRequest(input) + err := req.Send() + return out, err +} + +const opRebootCacheCluster = "RebootCacheCluster" + +// RebootCacheClusterRequest generates a "aws/request.Request" representing the +// client's request for the RebootCacheCluster operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the RebootCacheCluster method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the RebootCacheClusterRequest method. +// req, resp := client.RebootCacheClusterRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ElastiCache) RebootCacheClusterRequest(input *RebootCacheClusterInput) (req *request.Request, output *RebootCacheClusterOutput) { + op := &request.Operation{ + Name: opRebootCacheCluster, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RebootCacheClusterInput{} + } + + req = c.newRequest(op, input, output) + output = &RebootCacheClusterOutput{} + req.Data = output + return +} + +// The RebootCacheCluster action reboots some, or all, of the cache nodes within +// a provisioned cache cluster. This API will apply any modified cache parameter +// groups to the cache cluster. The reboot action takes place as soon as possible, +// and results in a momentary outage to the cache cluster. During the reboot, +// the cache cluster status is set to REBOOTING. +// +// The reboot causes the contents of the cache (for each cache node being rebooted) +// to be lost. +// +// When the reboot is complete, a cache cluster event is created. +func (c *ElastiCache) RebootCacheCluster(input *RebootCacheClusterInput) (*RebootCacheClusterOutput, error) { + req, out := c.RebootCacheClusterRequest(input) + err := req.Send() + return out, err +} + +const opRemoveTagsFromResource = "RemoveTagsFromResource" + +// RemoveTagsFromResourceRequest generates a "aws/request.Request" representing the +// client's request for the RemoveTagsFromResource operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the RemoveTagsFromResource method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the RemoveTagsFromResourceRequest method. +// req, resp := client.RemoveTagsFromResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ElastiCache) RemoveTagsFromResourceRequest(input *RemoveTagsFromResourceInput) (req *request.Request, output *TagListMessage) { + op := &request.Operation{ + Name: opRemoveTagsFromResource, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RemoveTagsFromResourceInput{} + } + + req = c.newRequest(op, input, output) + output = &TagListMessage{} + req.Data = output + return +} + +// The RemoveTagsFromResource action removes the tags identified by the TagKeys +// list from the named resource. +func (c *ElastiCache) RemoveTagsFromResource(input *RemoveTagsFromResourceInput) (*TagListMessage, error) { + req, out := c.RemoveTagsFromResourceRequest(input) + err := req.Send() + return out, err +} + +const opResetCacheParameterGroup = "ResetCacheParameterGroup" + +// ResetCacheParameterGroupRequest generates a "aws/request.Request" representing the +// client's request for the ResetCacheParameterGroup operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ResetCacheParameterGroup method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ResetCacheParameterGroupRequest method. +// req, resp := client.ResetCacheParameterGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ElastiCache) ResetCacheParameterGroupRequest(input *ResetCacheParameterGroupInput) (req *request.Request, output *CacheParameterGroupNameMessage) { + op := &request.Operation{ + Name: opResetCacheParameterGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ResetCacheParameterGroupInput{} + } + + req = c.newRequest(op, input, output) + output = &CacheParameterGroupNameMessage{} + req.Data = output + return +} + +// The ResetCacheParameterGroup action modifies the parameters of a cache parameter +// group to the engine or system default value. You can reset specific parameters +// by submitting a list of parameter names. To reset the entire cache parameter +// group, specify the ResetAllParameters and CacheParameterGroupName parameters. +func (c *ElastiCache) ResetCacheParameterGroup(input *ResetCacheParameterGroupInput) (*CacheParameterGroupNameMessage, error) { + req, out := c.ResetCacheParameterGroupRequest(input) + err := req.Send() + return out, err +} + +const opRevokeCacheSecurityGroupIngress = "RevokeCacheSecurityGroupIngress" + +// RevokeCacheSecurityGroupIngressRequest generates a "aws/request.Request" representing the +// client's request for the RevokeCacheSecurityGroupIngress operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the RevokeCacheSecurityGroupIngress method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the RevokeCacheSecurityGroupIngressRequest method. +// req, resp := client.RevokeCacheSecurityGroupIngressRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ElastiCache) RevokeCacheSecurityGroupIngressRequest(input *RevokeCacheSecurityGroupIngressInput) (req *request.Request, output *RevokeCacheSecurityGroupIngressOutput) { + op := &request.Operation{ + Name: opRevokeCacheSecurityGroupIngress, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RevokeCacheSecurityGroupIngressInput{} + } + + req = c.newRequest(op, input, output) + output = &RevokeCacheSecurityGroupIngressOutput{} + req.Data = output + return +} + +// The RevokeCacheSecurityGroupIngress action revokes ingress from a cache security +// group. Use this action to disallow access from an Amazon EC2 security group +// that had been previously authorized. +func (c *ElastiCache) RevokeCacheSecurityGroupIngress(input *RevokeCacheSecurityGroupIngressInput) (*RevokeCacheSecurityGroupIngressOutput, error) { + req, out := c.RevokeCacheSecurityGroupIngressRequest(input) + err := req.Send() + return out, err +} + +// Represents the input of an AddTagsToResource action. +type AddTagsToResourceInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the resource to which the tags are to be + // added, for example arn:aws:elasticache:us-west-2:0123456789:cluster:myCluster + // or arn:aws:elasticache:us-west-2:0123456789:snapshot:mySnapshot. + // + // For more information on ARNs, go to Amazon Resource Names (ARNs) and AWS + // Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html). + ResourceName *string `type:"string" required:"true"` + + // A list of cost allocation tags to be added to this resource. A tag is a key-value + // pair. A tag key must be accompanied by a tag value. + Tags []*Tag `locationNameList:"Tag" type:"list" required:"true"` +} + +// String returns the string representation +func (s AddTagsToResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddTagsToResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AddTagsToResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AddTagsToResourceInput"} + if s.ResourceName == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceName")) + } + if s.Tags == nil { + invalidParams.Add(request.NewErrParamRequired("Tags")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the input of an AuthorizeCacheSecurityGroupIngress action. +type AuthorizeCacheSecurityGroupIngressInput struct { + _ struct{} `type:"structure"` + + // The cache security group which will allow network ingress. + CacheSecurityGroupName *string `type:"string" required:"true"` + + // The Amazon EC2 security group to be authorized for ingress to the cache security + // group. + EC2SecurityGroupName *string `type:"string" required:"true"` + + // The AWS account number of the Amazon EC2 security group owner. Note that + // this is not the same thing as an AWS access key ID - you must provide a valid + // AWS account number for this parameter. + EC2SecurityGroupOwnerId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s AuthorizeCacheSecurityGroupIngressInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AuthorizeCacheSecurityGroupIngressInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AuthorizeCacheSecurityGroupIngressInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AuthorizeCacheSecurityGroupIngressInput"} + if s.CacheSecurityGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("CacheSecurityGroupName")) + } + if s.EC2SecurityGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("EC2SecurityGroupName")) + } + if s.EC2SecurityGroupOwnerId == nil { + invalidParams.Add(request.NewErrParamRequired("EC2SecurityGroupOwnerId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type AuthorizeCacheSecurityGroupIngressOutput struct { + _ struct{} `type:"structure"` + + // Represents the output of one of the following actions: + // + // AuthorizeCacheSecurityGroupIngress + // + // CreateCacheSecurityGroup + // + // RevokeCacheSecurityGroupIngress + CacheSecurityGroup *CacheSecurityGroup `type:"structure"` +} + +// String returns the string representation +func (s AuthorizeCacheSecurityGroupIngressOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AuthorizeCacheSecurityGroupIngressOutput) GoString() string { + return s.String() +} + +// Describes an Availability Zone in which the cache cluster is launched. +type AvailabilityZone struct { + _ struct{} `type:"structure"` + + // The name of the Availability Zone. + Name *string `type:"string"` +} + +// String returns the string representation +func (s AvailabilityZone) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AvailabilityZone) GoString() string { + return s.String() +} + +// Contains all of the attributes of a specific cache cluster. +type CacheCluster struct { + _ struct{} `type:"structure"` + + // This parameter is currently disabled. + AutoMinorVersionUpgrade *bool `type:"boolean"` + + // The date and time when the cache cluster was created. + CacheClusterCreateTime *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The user-supplied identifier of the cache cluster. This identifier is a unique + // key that identifies a cache cluster. + CacheClusterId *string `type:"string"` + + // The current state of this cache cluster, one of the following values: available, + // creating, deleted, deleting, incompatible-network, modifying, rebooting cache + // cluster nodes, restore-failed, or snapshotting. + CacheClusterStatus *string `type:"string"` + + // The name of the compute and memory capacity node type for the cache cluster. + // + // Valid node types are as follows: + // + // General purpose: + // + // Current generation: cache.t2.micro, cache.t2.small, cache.t2.medium, cache.m3.medium, + // cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge + // + // Previous generation: cache.t1.micro, cache.m1.small, cache.m1.medium, + // cache.m1.large, cache.m1.xlarge + // + // Compute optimized: cache.c1.xlarge + // + // Memory optimized: + // + // Current generation: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, + // cache.r3.4xlarge, cache.r3.8xlarge + // + // Previous generation: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge + // + // Notes: + // + // All t2 instances are created in an Amazon Virtual Private Cloud (VPC). + // + // Redis backup/restore is not supported for t2 instances. + // + // Redis Append-only files (AOF) functionality is not supported for t1 or + // t2 instances. + // + // For a complete listing of cache node types and specifications, see Amazon + // ElastiCache Product Features and Details (http://aws.amazon.com/elasticache/details) + // and Cache Node Type-Specific Parameters for Memcached (http://docs.aws.amazon.com/AmazonElastiCache/latest/UserGuide/CacheParameterGroups.Memcached.html#CacheParameterGroups.Memcached.NodeSpecific) + // or Cache Node Type-Specific Parameters for Redis (http://docs.aws.amazon.com/AmazonElastiCache/latest/UserGuide/CacheParameterGroups.Redis.html#CacheParameterGroups.Redis.NodeSpecific). + CacheNodeType *string `type:"string"` + + // A list of cache nodes that are members of the cache cluster. + CacheNodes []*CacheNode `locationNameList:"CacheNode" type:"list"` + + // The status of the cache parameter group. + CacheParameterGroup *CacheParameterGroupStatus `type:"structure"` + + // A list of cache security group elements, composed of name and status sub-elements. + CacheSecurityGroups []*CacheSecurityGroupMembership `locationNameList:"CacheSecurityGroup" type:"list"` + + // The name of the cache subnet group associated with the cache cluster. + CacheSubnetGroupName *string `type:"string"` + + // The URL of the web page where you can download the latest ElastiCache client + // library. + ClientDownloadLandingPage *string `type:"string"` + + // Represents the information required for client programs to connect to a cache + // node. + ConfigurationEndpoint *Endpoint `type:"structure"` + + // The name of the cache engine (memcached or redis) to be used for this cache + // cluster. + Engine *string `type:"string"` + + // The version of the cache engine that is used in this cache cluster. + EngineVersion *string `type:"string"` + + // Describes a notification topic and its status. Notification topics are used + // for publishing ElastiCache events to subscribers using Amazon Simple Notification + // Service (SNS). + NotificationConfiguration *NotificationConfiguration `type:"structure"` + + // The number of cache nodes in the cache cluster. + // + // For clusters running Redis, this value must be 1. For clusters running Memcached, + // this value must be between 1 and 20. + NumCacheNodes *int64 `type:"integer"` + + // A group of settings that will be applied to the cache cluster in the future, + // or that are currently being applied. + PendingModifiedValues *PendingModifiedValues `type:"structure"` + + // The name of the Availability Zone in which the cache cluster is located or + // "Multiple" if the cache nodes are located in different Availability Zones. + PreferredAvailabilityZone *string `type:"string"` + + // Specifies the weekly time range during which maintenance on the cache cluster + // is performed. It is specified as a range in the format ddd:hh24:mi-ddd:hh24:mi + // (24H Clock UTC). The minimum maintenance window is a 60 minute period. Valid + // values for ddd are: + // + // sun + // + // mon + // + // tue + // + // wed + // + // thu + // + // fri + // + // sat + // + // Example: sun:05:00-sun:09:00 + PreferredMaintenanceWindow *string `type:"string"` + + // The replication group to which this cache cluster belongs. If this field + // is empty, the cache cluster is not associated with any replication group. + ReplicationGroupId *string `type:"string"` + + // A list of VPC Security Groups associated with the cache cluster. + SecurityGroups []*SecurityGroupMembership `type:"list"` + + // The number of days for which ElastiCache will retain automatic cache cluster + // snapshots before deleting them. For example, if you set SnapshotRetentionLimit + // to 5, then a snapshot that was taken today will be retained for 5 days before + // being deleted. + // + // If the value of SnapshotRetentionLimit is set to zero (0), backups are + // turned off. + SnapshotRetentionLimit *int64 `type:"integer"` + + // The daily time range (in UTC) during which ElastiCache will begin taking + // a daily snapshot of your cache cluster. + // + // Example: 05:00-09:00 + SnapshotWindow *string `type:"string"` +} + +// String returns the string representation +func (s CacheCluster) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CacheCluster) GoString() string { + return s.String() +} + +// Provides all of the details about a particular cache engine version. +type CacheEngineVersion struct { + _ struct{} `type:"structure"` + + // The description of the cache engine. + CacheEngineDescription *string `type:"string"` + + // The description of the cache engine version. + CacheEngineVersionDescription *string `type:"string"` + + // The name of the cache parameter group family associated with this cache engine. + CacheParameterGroupFamily *string `type:"string"` + + // The name of the cache engine. + Engine *string `type:"string"` + + // The version number of the cache engine. + EngineVersion *string `type:"string"` +} + +// String returns the string representation +func (s CacheEngineVersion) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CacheEngineVersion) GoString() string { + return s.String() +} + +// Represents an individual cache node within a cache cluster. Each cache node +// runs its own instance of the cluster's protocol-compliant caching software +// - either Memcached or Redis. +// +// Valid node types are as follows: +// +// General purpose: +// +// Current generation: cache.t2.micro, cache.t2.small, cache.t2.medium, cache.m3.medium, +// cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge +// +// Previous generation: cache.t1.micro, cache.m1.small, cache.m1.medium, +// cache.m1.large, cache.m1.xlarge +// +// Compute optimized: cache.c1.xlarge +// +// Memory optimized: +// +// Current generation: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, +// cache.r3.4xlarge, cache.r3.8xlarge +// +// Previous generation: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge +// +// Notes: +// +// All t2 instances are created in an Amazon Virtual Private Cloud (VPC). +// +// Redis backup/restore is not supported for t2 instances. +// +// Redis Append-only files (AOF) functionality is not supported for t1 or +// t2 instances. +// +// For a complete listing of cache node types and specifications, see Amazon +// ElastiCache Product Features and Details (http://aws.amazon.com/elasticache/details) +// and either Cache Node Type-Specific Parameters for Memcached (http://docs.aws.amazon.com/AmazonElastiCache/latest/UserGuide/CacheParameterGroups.Memcached.html#CacheParameterGroups.Memcached.NodeSpecific) +// or Cache Node Type-Specific Parameters for Redis (http://docs.aws.amazon.com/AmazonElastiCache/latest/UserGuide/CacheParameterGroups.Redis.html#CacheParameterGroups.Redis.NodeSpecific). +type CacheNode struct { + _ struct{} `type:"structure"` + + // The date and time when the cache node was created. + CacheNodeCreateTime *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The cache node identifier. A node ID is a numeric identifier (0001, 0002, + // etc.). The combination of cluster ID and node ID uniquely identifies every + // cache node used in a customer's AWS account. + CacheNodeId *string `type:"string"` + + // The current state of this cache node. + CacheNodeStatus *string `type:"string"` + + // The Availability Zone where this node was created and now resides. + CustomerAvailabilityZone *string `type:"string"` + + // The hostname for connecting to this cache node. + Endpoint *Endpoint `type:"structure"` + + // The status of the parameter group applied to this cache node. + ParameterGroupStatus *string `type:"string"` + + // The ID of the primary node to which this read replica node is synchronized. + // If this field is empty, then this node is not associated with a primary cache + // cluster. + SourceCacheNodeId *string `type:"string"` +} + +// String returns the string representation +func (s CacheNode) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CacheNode) GoString() string { + return s.String() +} + +// A parameter that has a different value for each cache node type it is applied +// to. For example, in a Redis cache cluster, a cache.m1.large cache node type +// would have a larger maxmemory value than a cache.m1.small type. +type CacheNodeTypeSpecificParameter struct { + _ struct{} `type:"structure"` + + // The valid range of values for the parameter. + AllowedValues *string `type:"string"` + + // A list of cache node types and their corresponding values for this parameter. + CacheNodeTypeSpecificValues []*CacheNodeTypeSpecificValue `locationNameList:"CacheNodeTypeSpecificValue" type:"list"` + + // ChangeType indicates whether a change to the parameter will be applied immediately + // or requires a reboot for the change to be applied. You can force a reboot + // or wait until the next maintenance window's reboot. For more information, + // see Rebooting a Cluster (http://docs.aws.amazon.com/AmazonElastiCache/latest/UserGuide/Clusters.Rebooting.html). + ChangeType *string `type:"string" enum:"ChangeType"` + + // The valid data type for the parameter. + DataType *string `type:"string"` + + // A description of the parameter. + Description *string `type:"string"` + + // Indicates whether (true) or not (false) the parameter can be modified. Some + // parameters have security or operational implications that prevent them from + // being changed. + IsModifiable *bool `type:"boolean"` + + // The earliest cache engine version to which the parameter can apply. + MinimumEngineVersion *string `type:"string"` + + // The name of the parameter. + ParameterName *string `type:"string"` + + // The source of the parameter value. + Source *string `type:"string"` +} + +// String returns the string representation +func (s CacheNodeTypeSpecificParameter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CacheNodeTypeSpecificParameter) GoString() string { + return s.String() +} + +// A value that applies only to a certain cache node type. +type CacheNodeTypeSpecificValue struct { + _ struct{} `type:"structure"` + + // The cache node type for which this value applies. + CacheNodeType *string `type:"string"` + + // The value for the cache node type. + Value *string `type:"string"` +} + +// String returns the string representation +func (s CacheNodeTypeSpecificValue) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CacheNodeTypeSpecificValue) GoString() string { + return s.String() +} + +// Represents the output of a CreateCacheParameterGroup action. +type CacheParameterGroup struct { + _ struct{} `type:"structure"` + + // The name of the cache parameter group family that this cache parameter group + // is compatible with. + CacheParameterGroupFamily *string `type:"string"` + + // The name of the cache parameter group. + CacheParameterGroupName *string `type:"string"` + + // The description for this cache parameter group. + Description *string `type:"string"` +} + +// String returns the string representation +func (s CacheParameterGroup) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CacheParameterGroup) GoString() string { + return s.String() +} + +// Represents the output of one of the following actions: +// +// ModifyCacheParameterGroup +// +// ResetCacheParameterGroup +type CacheParameterGroupNameMessage struct { + _ struct{} `type:"structure"` + + // The name of the cache parameter group. + CacheParameterGroupName *string `type:"string"` +} + +// String returns the string representation +func (s CacheParameterGroupNameMessage) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CacheParameterGroupNameMessage) GoString() string { + return s.String() +} + +// The status of the cache parameter group. +type CacheParameterGroupStatus struct { + _ struct{} `type:"structure"` + + // A list of the cache node IDs which need to be rebooted for parameter changes + // to be applied. A node ID is a numeric identifier (0001, 0002, etc.). + CacheNodeIdsToReboot []*string `locationNameList:"CacheNodeId" type:"list"` + + // The name of the cache parameter group. + CacheParameterGroupName *string `type:"string"` + + // The status of parameter updates. + ParameterApplyStatus *string `type:"string"` +} + +// String returns the string representation +func (s CacheParameterGroupStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CacheParameterGroupStatus) GoString() string { + return s.String() +} + +// Represents the output of one of the following actions: +// +// AuthorizeCacheSecurityGroupIngress +// +// CreateCacheSecurityGroup +// +// RevokeCacheSecurityGroupIngress +type CacheSecurityGroup struct { + _ struct{} `type:"structure"` + + // The name of the cache security group. + CacheSecurityGroupName *string `type:"string"` + + // The description of the cache security group. + Description *string `type:"string"` + + // A list of Amazon EC2 security groups that are associated with this cache + // security group. + EC2SecurityGroups []*EC2SecurityGroup `locationNameList:"EC2SecurityGroup" type:"list"` + + // The AWS account ID of the cache security group owner. + OwnerId *string `type:"string"` +} + +// String returns the string representation +func (s CacheSecurityGroup) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CacheSecurityGroup) GoString() string { + return s.String() +} + +// Represents a cache cluster's status within a particular cache security group. +type CacheSecurityGroupMembership struct { + _ struct{} `type:"structure"` + + // The name of the cache security group. + CacheSecurityGroupName *string `type:"string"` + + // The membership status in the cache security group. The status changes when + // a cache security group is modified, or when the cache security groups assigned + // to a cache cluster are modified. + Status *string `type:"string"` +} + +// String returns the string representation +func (s CacheSecurityGroupMembership) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CacheSecurityGroupMembership) GoString() string { + return s.String() +} + +// Represents the output of one of the following actions: +// +// CreateCacheSubnetGroup +// +// ModifyCacheSubnetGroup +type CacheSubnetGroup struct { + _ struct{} `type:"structure"` + + // The description of the cache subnet group. + CacheSubnetGroupDescription *string `type:"string"` + + // The name of the cache subnet group. + CacheSubnetGroupName *string `type:"string"` + + // A list of subnets associated with the cache subnet group. + Subnets []*Subnet `locationNameList:"Subnet" type:"list"` + + // The Amazon Virtual Private Cloud identifier (VPC ID) of the cache subnet + // group. + VpcId *string `type:"string"` +} + +// String returns the string representation +func (s CacheSubnetGroup) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CacheSubnetGroup) GoString() string { + return s.String() +} + +// Represents the input of a CopySnapshotMessage action. +type CopySnapshotInput struct { + _ struct{} `type:"structure"` + + // The name of an existing snapshot from which to make a copy. + SourceSnapshotName *string `type:"string" required:"true"` + + // The Amazon S3 bucket to which the snapshot will be exported. This parameter + // is used only when exporting a snapshot for external access. + // + // When using this parameter to export a snapshot, be sure Amazon ElastiCache + // has the needed permissions to this S3 bucket. For more information, see Step + // 2: Grant ElastiCache Access to Your Amazon S3 Bucket (http://docs.aws.amazon.com/AmazonElastiCache/AmazonElastiCache/latest/UserGuide/Snapshots.Exporting.html#Snapshots.Exporting.GrantAccess) + // in the Amazon ElastiCache User Guide. + // + // Error Messages: + // + // You could receive one of the following error messages. + // + // Erorr Messages Error Message: ElastiCache has not been granted READ + // permissions %s on the S3 Bucket. + // + // Solution: Add List and Read permissions on the bucket. + // + // Error Message: ElastiCache has not been granted WRITE permissions %s + // on the S3 Bucket. + // + // Solution: Add Upload/Delete permissions on the bucket. + // + // Error Message: ElastiCache has not been granted READ_ACP permissions + // %s on the S3 Bucket. + // + // Solution: Add View Permissions permissions on the bucket. + // + // Error Message: The S3 bucket %s is outside of the region. + // + // Solution: Before exporting your snapshot, create a new Amazon S3 bucket + // in the same region as your snapshot. For more information, see Step 1: Create + // an Amazon S3 Bucket (http://docs.aws.amazon.com/AmazonElastiCache/latest/UserGuide/Snapshots.Exporting.html#Snapshots.Exporting.CreateBucket). + // + // Error Message: The S3 bucket %s does not exist. + // + // Solution: Create an Amazon S3 bucket in the same region as your snapshot. + // For more information, see Step 1: Create an Amazon S3 Bucket (http://docs.aws.amazon.com/AmazonElastiCache/latest/UserGuide/Snapshots.Exporting.html#Snapshots.Exporting.CreateBucket). + // + // Error Message: The S3 bucket %s is not owned by the authenticated user. + // + // Solution: Create an Amazon S3 bucket in the same region as your snapshot. + // For more information, see Step 1: Create an Amazon S3 Bucket (http://docs.aws.amazon.com/AmazonElastiCache/latest/UserGuide/Snapshots.Exporting.html#Snapshots.Exporting.CreateBucket). + // + // Error Message: The authenticated user does not have sufficient permissions + // to perform the desired activity. + // + // Solution: Contact your system administrator to get the needed permissions. + // + // For more information, see Exporting a Snapshot (http://docs.aws.amazon.com/AmazonElastiCache/latest/UserGuide/Snapshots.Exporting.html) + // in the Amazon ElastiCache User Guide. + TargetBucket *string `type:"string"` + + // A name for the snapshot copy. ElastiCache does not permit overwriting a snapshot, + // therefore this name must be unique within its context - ElastiCache or an + // Amazon S3 bucket if exporting. + // + // Error Message Error Message: The S3 bucket %s already contains an object + // with key %s. + // + // Solution: Give the TargetSnapshotName a new and unique value. If exporting + // a snapshot, you could alternatively create a new Amazon S3 bucket and use + // this same value for TargetSnapshotName. + TargetSnapshotName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CopySnapshotInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CopySnapshotInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CopySnapshotInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CopySnapshotInput"} + if s.SourceSnapshotName == nil { + invalidParams.Add(request.NewErrParamRequired("SourceSnapshotName")) + } + if s.TargetSnapshotName == nil { + invalidParams.Add(request.NewErrParamRequired("TargetSnapshotName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type CopySnapshotOutput struct { + _ struct{} `type:"structure"` + + // Represents a copy of an entire cache cluster as of the time when the snapshot + // was taken. + Snapshot *Snapshot `type:"structure"` +} + +// String returns the string representation +func (s CopySnapshotOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CopySnapshotOutput) GoString() string { + return s.String() +} + +// Represents the input of a CreateCacheCluster action. +type CreateCacheClusterInput struct { + _ struct{} `type:"structure"` + + // Specifies whether the nodes in this Memcached node group are created in a + // single Availability Zone or created across multiple Availability Zones in + // the cluster's region. + // + // This parameter is only supported for Memcached cache clusters. + // + // If the AZMode and PreferredAvailabilityZones are not specified, ElastiCache + // assumes single-az mode. + AZMode *string `type:"string" enum:"AZMode"` + + // This parameter is currently disabled. + AutoMinorVersionUpgrade *bool `type:"boolean"` + + // The node group identifier. This parameter is stored as a lowercase string. + // + // Constraints: + // + // A name must contain from 1 to 20 alphanumeric characters or hyphens. + // + // The first character must be a letter. + // + // A name cannot end with a hyphen or contain two consecutive hyphens. + CacheClusterId *string `type:"string" required:"true"` + + // The compute and memory capacity of the nodes in the node group. + // + // Valid node types are as follows: + // + // General purpose: + // + // Current generation: cache.t2.micro, cache.t2.small, cache.t2.medium, cache.m3.medium, + // cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge + // + // Previous generation: cache.t1.micro, cache.m1.small, cache.m1.medium, + // cache.m1.large, cache.m1.xlarge + // + // Compute optimized: cache.c1.xlarge + // + // Memory optimized: + // + // Current generation: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, + // cache.r3.4xlarge, cache.r3.8xlarge + // + // Previous generation: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge + // + // Notes: + // + // All t2 instances are created in an Amazon Virtual Private Cloud (VPC). + // + // Redis backup/restore is not supported for t2 instances. + // + // Redis Append-only files (AOF) functionality is not supported for t1 or + // t2 instances. + // + // For a complete listing of cache node types and specifications, see Amazon + // ElastiCache Product Features and Details (http://aws.amazon.com/elasticache/details) + // and Cache Node Type-Specific Parameters for Memcached (http://docs.aws.amazon.com/AmazonElastiCache/latest/UserGuide/CacheParameterGroups.Memcached.html#CacheParameterGroups.Memcached.NodeSpecific) + // or Cache Node Type-Specific Parameters for Redis (http://docs.aws.amazon.com/AmazonElastiCache/latest/UserGuide/CacheParameterGroups.Redis.html#CacheParameterGroups.Redis.NodeSpecific). + CacheNodeType *string `type:"string"` + + // The name of the parameter group to associate with this cache cluster. If + // this argument is omitted, the default parameter group for the specified engine + // is used. + CacheParameterGroupName *string `type:"string"` + + // A list of security group names to associate with this cache cluster. + // + // Use this parameter only when you are creating a cache cluster outside of + // an Amazon Virtual Private Cloud (VPC). + CacheSecurityGroupNames []*string `locationNameList:"CacheSecurityGroupName" type:"list"` + + // The name of the subnet group to be used for the cache cluster. + // + // Use this parameter only when you are creating a cache cluster in an Amazon + // Virtual Private Cloud (VPC). + CacheSubnetGroupName *string `type:"string"` + + // The name of the cache engine to be used for this cache cluster. + // + // Valid values for this parameter are: + // + // memcached | redis + Engine *string `type:"string"` + + // The version number of the cache engine to be used for this cache cluster. + // To view the supported cache engine versions, use the DescribeCacheEngineVersions + // action. + // + // Important: You can upgrade to a newer engine version (see Selecting a Cache + // Engine and Version (http://docs.aws.amazon.com/AmazonElastiCache/latest/UserGuide/SelectEngine.html#VersionManagement)), + // but you cannot downgrade to an earlier engine version. If you want to use + // an earlier engine version, you must delete the existing cache cluster or + // replication group and create it anew with the earlier engine version. + EngineVersion *string `type:"string"` + + // The Amazon Resource Name (ARN) of the Amazon Simple Notification Service + // (SNS) topic to which notifications will be sent. + // + // The Amazon SNS topic owner must be the same as the cache cluster owner. + NotificationTopicArn *string `type:"string"` + + // The initial number of cache nodes that the cache cluster will have. + // + // For clusters running Redis, this value must be 1. For clusters running Memcached, + // this value must be between 1 and 20. + // + // If you need more than 20 nodes for your Memcached cluster, please fill out + // the ElastiCache Limit Increase Request form at http://aws.amazon.com/contact-us/elasticache-node-limit-request/ + // (http://aws.amazon.com/contact-us/elasticache-node-limit-request/). + NumCacheNodes *int64 `type:"integer"` + + // The port number on which each of the cache nodes will accept connections. + Port *int64 `type:"integer"` + + // The EC2 Availability Zone in which the cache cluster will be created. + // + // All nodes belonging to this Memcached cache cluster are placed in the preferred + // Availability Zone. If you want to create your nodes across multiple Availability + // Zones, use PreferredAvailabilityZones. + // + // Default: System chosen Availability Zone. + PreferredAvailabilityZone *string `type:"string"` + + // A list of the Availability Zones in which cache nodes will be created. The + // order of the zones in the list is not important. + // + // This option is only supported on Memcached. + // + // If you are creating your cache cluster in an Amazon VPC (recommended) you + // can only locate nodes in Availability Zones that are associated with the + // subnets in the selected subnet group. + // + // The number of Availability Zones listed must equal the value of NumCacheNodes. + // + // If you want all the nodes in the same Availability Zone, use PreferredAvailabilityZone + // instead, or repeat the Availability Zone multiple times in the list. + // + // Default: System chosen Availability Zones. + // + // Example: One Memcached node in each of three different Availability Zones: + // PreferredAvailabilityZones.member.1=us-west-2a&PreferredAvailabilityZones.member.2=us-west-2b&PreferredAvailabilityZones.member.3=us-west-2c + // + // Example: All three Memcached nodes in one Availability Zone: PreferredAvailabilityZones.member.1=us-west-2a&PreferredAvailabilityZones.member.2=us-west-2a&PreferredAvailabilityZones.member.3=us-west-2a + PreferredAvailabilityZones []*string `locationNameList:"PreferredAvailabilityZone" type:"list"` + + // Specifies the weekly time range during which maintenance on the cache cluster + // is performed. It is specified as a range in the format ddd:hh24:mi-ddd:hh24:mi + // (24H Clock UTC). The minimum maintenance window is a 60 minute period. Valid + // values for ddd are: + // + // sun + // + // mon + // + // tue + // + // wed + // + // thu + // + // fri + // + // sat + // + // Example: sun:05:00-sun:09:00 + PreferredMaintenanceWindow *string `type:"string"` + + // The ID of the replication group to which this cache cluster should belong. + // If this parameter is specified, the cache cluster will be added to the specified + // replication group as a read replica; otherwise, the cache cluster will be + // a standalone primary that is not part of any replication group. + // + // If the specified replication group is Multi-AZ enabled and the availability + // zone is not specified, the cache cluster will be created in availability + // zones that provide the best spread of read replicas across availability zones. + // + // This parameter is only valid if the Engine parameter is redis. + ReplicationGroupId *string `type:"string"` + + // One or more VPC security groups associated with the cache cluster. + // + // Use this parameter only when you are creating a cache cluster in an Amazon + // Virtual Private Cloud (VPC). + SecurityGroupIds []*string `locationNameList:"SecurityGroupId" type:"list"` + + // A single-element string list containing an Amazon Resource Name (ARN) that + // uniquely identifies a Redis RDB snapshot file stored in Amazon S3. The snapshot + // file will be used to populate the node group. The Amazon S3 object name in + // the ARN cannot contain any commas. + // + // This parameter is only valid if the Engine parameter is redis. + // + // Example of an Amazon S3 ARN: arn:aws:s3:::my_bucket/snapshot1.rdb + SnapshotArns []*string `locationNameList:"SnapshotArn" type:"list"` + + // The name of a snapshot from which to restore data into the new node group. + // The snapshot status changes to restoring while the new node group is being + // created. + // + // This parameter is only valid if the Engine parameter is redis. + SnapshotName *string `type:"string"` + + // The number of days for which ElastiCache will retain automatic snapshots + // before deleting them. For example, if you set SnapshotRetentionLimit to 5, + // then a snapshot that was taken today will be retained for 5 days before being + // deleted. + // + // This parameter is only valid if the Engine parameter is redis. + // + // Default: 0 (i.e., automatic backups are disabled for this cache cluster). + SnapshotRetentionLimit *int64 `type:"integer"` + + // The daily time range (in UTC) during which ElastiCache will begin taking + // a daily snapshot of your node group. + // + // Example: 05:00-09:00 + // + // If you do not specify this parameter, then ElastiCache will automatically + // choose an appropriate time range. + // + // Note: This parameter is only valid if the Engine parameter is redis. + SnapshotWindow *string `type:"string"` + + // A list of cost allocation tags to be added to this resource. A tag is a key-value + // pair. A tag key must be accompanied by a tag value. + Tags []*Tag `locationNameList:"Tag" type:"list"` +} + +// String returns the string representation +func (s CreateCacheClusterInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateCacheClusterInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateCacheClusterInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateCacheClusterInput"} + if s.CacheClusterId == nil { + invalidParams.Add(request.NewErrParamRequired("CacheClusterId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type CreateCacheClusterOutput struct { + _ struct{} `type:"structure"` + + // Contains all of the attributes of a specific cache cluster. + CacheCluster *CacheCluster `type:"structure"` +} + +// String returns the string representation +func (s CreateCacheClusterOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateCacheClusterOutput) GoString() string { + return s.String() +} + +// Represents the input of a CreateCacheParameterGroup action. +type CreateCacheParameterGroupInput struct { + _ struct{} `type:"structure"` + + // The name of the cache parameter group family the cache parameter group can + // be used with. + // + // Valid values are: memcached1.4 | redis2.6 | redis2.8 + CacheParameterGroupFamily *string `type:"string" required:"true"` + + // A user-specified name for the cache parameter group. + CacheParameterGroupName *string `type:"string" required:"true"` + + // A user-specified description for the cache parameter group. + Description *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateCacheParameterGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateCacheParameterGroupInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateCacheParameterGroupInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateCacheParameterGroupInput"} + if s.CacheParameterGroupFamily == nil { + invalidParams.Add(request.NewErrParamRequired("CacheParameterGroupFamily")) + } + if s.CacheParameterGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("CacheParameterGroupName")) + } + if s.Description == nil { + invalidParams.Add(request.NewErrParamRequired("Description")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type CreateCacheParameterGroupOutput struct { + _ struct{} `type:"structure"` + + // Represents the output of a CreateCacheParameterGroup action. + CacheParameterGroup *CacheParameterGroup `type:"structure"` +} + +// String returns the string representation +func (s CreateCacheParameterGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateCacheParameterGroupOutput) GoString() string { + return s.String() +} + +// Represents the input of a CreateCacheSecurityGroup action. +type CreateCacheSecurityGroupInput struct { + _ struct{} `type:"structure"` + + // A name for the cache security group. This value is stored as a lowercase + // string. + // + // Constraints: Must contain no more than 255 alphanumeric characters. Cannot + // be the word "Default". + // + // Example: mysecuritygroup + CacheSecurityGroupName *string `type:"string" required:"true"` + + // A description for the cache security group. + Description *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateCacheSecurityGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateCacheSecurityGroupInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateCacheSecurityGroupInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateCacheSecurityGroupInput"} + if s.CacheSecurityGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("CacheSecurityGroupName")) + } + if s.Description == nil { + invalidParams.Add(request.NewErrParamRequired("Description")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type CreateCacheSecurityGroupOutput struct { + _ struct{} `type:"structure"` + + // Represents the output of one of the following actions: + // + // AuthorizeCacheSecurityGroupIngress + // + // CreateCacheSecurityGroup + // + // RevokeCacheSecurityGroupIngress + CacheSecurityGroup *CacheSecurityGroup `type:"structure"` +} + +// String returns the string representation +func (s CreateCacheSecurityGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateCacheSecurityGroupOutput) GoString() string { + return s.String() +} + +// Represents the input of a CreateCacheSubnetGroup action. +type CreateCacheSubnetGroupInput struct { + _ struct{} `type:"structure"` + + // A description for the cache subnet group. + CacheSubnetGroupDescription *string `type:"string" required:"true"` + + // A name for the cache subnet group. This value is stored as a lowercase string. + // + // Constraints: Must contain no more than 255 alphanumeric characters or hyphens. + // + // Example: mysubnetgroup + CacheSubnetGroupName *string `type:"string" required:"true"` + + // A list of VPC subnet IDs for the cache subnet group. + SubnetIds []*string `locationNameList:"SubnetIdentifier" type:"list" required:"true"` +} + +// String returns the string representation +func (s CreateCacheSubnetGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateCacheSubnetGroupInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateCacheSubnetGroupInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateCacheSubnetGroupInput"} + if s.CacheSubnetGroupDescription == nil { + invalidParams.Add(request.NewErrParamRequired("CacheSubnetGroupDescription")) + } + if s.CacheSubnetGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("CacheSubnetGroupName")) + } + if s.SubnetIds == nil { + invalidParams.Add(request.NewErrParamRequired("SubnetIds")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type CreateCacheSubnetGroupOutput struct { + _ struct{} `type:"structure"` + + // Represents the output of one of the following actions: + // + // CreateCacheSubnetGroup + // + // ModifyCacheSubnetGroup + CacheSubnetGroup *CacheSubnetGroup `type:"structure"` +} + +// String returns the string representation +func (s CreateCacheSubnetGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateCacheSubnetGroupOutput) GoString() string { + return s.String() +} + +// Represents the input of a CreateReplicationGroup action. +type CreateReplicationGroupInput struct { + _ struct{} `type:"structure"` + + // This parameter is currently disabled. + AutoMinorVersionUpgrade *bool `type:"boolean"` + + // Specifies whether a read-only replica will be automatically promoted to read/write + // primary if the existing primary fails. + // + // If true, Multi-AZ is enabled for this replication group. If false, Multi-AZ + // is disabled for this replication group. + // + // Default: false + // + // ElastiCache Multi-AZ replication groups is not supported on: + // + // Redis versions earlier than 2.8.6. + // + // T1 and T2 cache node types. + AutomaticFailoverEnabled *bool `type:"boolean"` + + // The compute and memory capacity of the nodes in the node group. + // + // Valid node types are as follows: + // + // General purpose: + // + // Current generation: cache.t2.micro, cache.t2.small, cache.t2.medium, cache.m3.medium, + // cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge + // + // Previous generation: cache.t1.micro, cache.m1.small, cache.m1.medium, + // cache.m1.large, cache.m1.xlarge + // + // Compute optimized: cache.c1.xlarge + // + // Memory optimized: + // + // Current generation: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, + // cache.r3.4xlarge, cache.r3.8xlarge + // + // Previous generation: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge + // + // Notes: + // + // All t2 instances are created in an Amazon Virtual Private Cloud (VPC). + // + // Redis backup/restore is not supported for t2 instances. + // + // Redis Append-only files (AOF) functionality is not supported for t1 or + // t2 instances. + // + // For a complete listing of cache node types and specifications, see Amazon + // ElastiCache Product Features and Details (http://aws.amazon.com/elasticache/details) + // and Cache Node Type-Specific Parameters for Memcached (http://docs.aws.amazon.com/AmazonElastiCache/latest/UserGuide/CacheParameterGroups.Memcached.html#CacheParameterGroups.Memcached.NodeSpecific) + // or Cache Node Type-Specific Parameters for Redis (http://docs.aws.amazon.com/AmazonElastiCache/latest/UserGuide/CacheParameterGroups.Redis.html#CacheParameterGroups.Redis.NodeSpecific). + CacheNodeType *string `type:"string"` + + // The name of the parameter group to associate with this replication group. + // If this argument is omitted, the default cache parameter group for the specified + // engine is used. + CacheParameterGroupName *string `type:"string"` + + // A list of cache security group names to associate with this replication group. + CacheSecurityGroupNames []*string `locationNameList:"CacheSecurityGroupName" type:"list"` + + // The name of the cache subnet group to be used for the replication group. + CacheSubnetGroupName *string `type:"string"` + + // The name of the cache engine to be used for the cache clusters in this replication + // group. + // + // Default: redis + Engine *string `type:"string"` + + // The version number of the cache engine to be used for the cache clusters + // in this replication group. To view the supported cache engine versions, use + // the DescribeCacheEngineVersions action. + // + // Important: You can upgrade to a newer engine version (see Selecting a Cache + // Engine and Version (http://docs.aws.amazon.com/AmazonElastiCache/latest/UserGuide/SelectEngine.html#VersionManagement)) + // in the ElastiCache User Guide, but you cannot downgrade to an earlier engine + // version. If you want to use an earlier engine version, you must delete the + // existing cache cluster or replication group and create it anew with the earlier + // engine version. + EngineVersion *string `type:"string"` + + // The Amazon Resource Name (ARN) of the Amazon Simple Notification Service + // (SNS) topic to which notifications will be sent. + // + // The Amazon SNS topic owner must be the same as the cache cluster owner. + NotificationTopicArn *string `type:"string"` + + // The number of cache clusters this replication group will initially have. + // + // If Multi-AZ is enabled, the value of this parameter must be at least 2. + // + // The maximum permitted value for NumCacheClusters is 6 (primary plus 5 replicas). + // If you need to exceed this limit, please fill out the ElastiCache Limit Increase + // Request form at http://aws.amazon.com/contact-us/elasticache-node-limit-request + // (http://aws.amazon.com/contact-us/elasticache-node-limit-request). + NumCacheClusters *int64 `type:"integer"` + + // The port number on which each member of the replication group will accept + // connections. + Port *int64 `type:"integer"` + + // A list of EC2 availability zones in which the replication group's cache clusters + // will be created. The order of the availability zones in the list is not important. + // + // If you are creating your replication group in an Amazon VPC (recommended), + // you can only locate cache clusters in availability zones associated with + // the subnets in the selected subnet group. + // + // The number of availability zones listed must equal the value of NumCacheClusters. + // + // Default: system chosen availability zones. + // + // Example: One Redis cache cluster in each of three availability zones. + // + // PreferredAvailabilityZones.member.1=us-west-2a PreferredAvailabilityZones.member.2=us-west-2c + // PreferredAvailabilityZones.member.3=us-west-2c + PreferredCacheClusterAZs []*string `locationNameList:"AvailabilityZone" type:"list"` + + // Specifies the weekly time range during which maintenance on the cache cluster + // is performed. It is specified as a range in the format ddd:hh24:mi-ddd:hh24:mi + // (24H Clock UTC). The minimum maintenance window is a 60 minute period. Valid + // values for ddd are: + // + // sun + // + // mon + // + // tue + // + // wed + // + // thu + // + // fri + // + // sat + // + // Example: sun:05:00-sun:09:00 + PreferredMaintenanceWindow *string `type:"string"` + + // The identifier of the cache cluster that will serve as the primary for this + // replication group. This cache cluster must already exist and have a status + // of available. + // + // This parameter is not required if NumCacheClusters is specified. + PrimaryClusterId *string `type:"string"` + + // A user-created description for the replication group. + ReplicationGroupDescription *string `type:"string" required:"true"` + + // The replication group identifier. This parameter is stored as a lowercase + // string. + // + // Constraints: + // + // A name must contain from 1 to 20 alphanumeric characters or hyphens. + // + // The first character must be a letter. + // + // A name cannot end with a hyphen or contain two consecutive hyphens. + ReplicationGroupId *string `type:"string" required:"true"` + + // One or more Amazon VPC security groups associated with this replication group. + // + // Use this parameter only when you are creating a replication group in an + // Amazon Virtual Private Cloud (VPC). + SecurityGroupIds []*string `locationNameList:"SecurityGroupId" type:"list"` + + // A single-element string list containing an Amazon Resource Name (ARN) that + // uniquely identifies a Redis RDB snapshot file stored in Amazon S3. The snapshot + // file will be used to populate the node group. The Amazon S3 object name in + // the ARN cannot contain any commas. + // + // This parameter is only valid if the Engine parameter is redis. + // + // Example of an Amazon S3 ARN: arn:aws:s3:::my_bucket/snapshot1.rdb + SnapshotArns []*string `locationNameList:"SnapshotArn" type:"list"` + + // The name of a snapshot from which to restore data into the new node group. + // The snapshot status changes to restoring while the new node group is being + // created. + // + // This parameter is only valid if the Engine parameter is redis. + SnapshotName *string `type:"string"` + + // The number of days for which ElastiCache will retain automatic snapshots + // before deleting them. For example, if you set SnapshotRetentionLimit to 5, + // then a snapshot that was taken today will be retained for 5 days before being + // deleted. + // + // This parameter is only valid if the Engine parameter is redis. + // + // Default: 0 (i.e., automatic backups are disabled for this cache cluster). + SnapshotRetentionLimit *int64 `type:"integer"` + + // The daily time range (in UTC) during which ElastiCache will begin taking + // a daily snapshot of your node group. + // + // Example: 05:00-09:00 + // + // If you do not specify this parameter, then ElastiCache will automatically + // choose an appropriate time range. + // + // This parameter is only valid if the Engine parameter is redis. + SnapshotWindow *string `type:"string"` + + // A list of cost allocation tags to be added to this resource. A tag is a key-value + // pair. A tag key must be accompanied by a tag value. + Tags []*Tag `locationNameList:"Tag" type:"list"` +} + +// String returns the string representation +func (s CreateReplicationGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateReplicationGroupInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateReplicationGroupInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateReplicationGroupInput"} + if s.ReplicationGroupDescription == nil { + invalidParams.Add(request.NewErrParamRequired("ReplicationGroupDescription")) + } + if s.ReplicationGroupId == nil { + invalidParams.Add(request.NewErrParamRequired("ReplicationGroupId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type CreateReplicationGroupOutput struct { + _ struct{} `type:"structure"` + + // Contains all of the attributes of a specific replication group. + ReplicationGroup *ReplicationGroup `type:"structure"` +} + +// String returns the string representation +func (s CreateReplicationGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateReplicationGroupOutput) GoString() string { + return s.String() +} + +// Represents the input of a CreateSnapshot action. +type CreateSnapshotInput struct { + _ struct{} `type:"structure"` + + // The identifier of an existing cache cluster. The snapshot will be created + // from this cache cluster. + CacheClusterId *string `type:"string" required:"true"` + + // A name for the snapshot being created. + SnapshotName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateSnapshotInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateSnapshotInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateSnapshotInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateSnapshotInput"} + if s.CacheClusterId == nil { + invalidParams.Add(request.NewErrParamRequired("CacheClusterId")) + } + if s.SnapshotName == nil { + invalidParams.Add(request.NewErrParamRequired("SnapshotName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type CreateSnapshotOutput struct { + _ struct{} `type:"structure"` + + // Represents a copy of an entire cache cluster as of the time when the snapshot + // was taken. + Snapshot *Snapshot `type:"structure"` +} + +// String returns the string representation +func (s CreateSnapshotOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateSnapshotOutput) GoString() string { + return s.String() +} + +// Represents the input of a DeleteCacheCluster action. +type DeleteCacheClusterInput struct { + _ struct{} `type:"structure"` + + // The cache cluster identifier for the cluster to be deleted. This parameter + // is not case sensitive. + CacheClusterId *string `type:"string" required:"true"` + + // The user-supplied name of a final cache cluster snapshot. This is the unique + // name that identifies the snapshot. ElastiCache creates the snapshot, and + // then deletes the cache cluster immediately afterward. + FinalSnapshotIdentifier *string `type:"string"` +} + +// String returns the string representation +func (s DeleteCacheClusterInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteCacheClusterInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteCacheClusterInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteCacheClusterInput"} + if s.CacheClusterId == nil { + invalidParams.Add(request.NewErrParamRequired("CacheClusterId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteCacheClusterOutput struct { + _ struct{} `type:"structure"` + + // Contains all of the attributes of a specific cache cluster. + CacheCluster *CacheCluster `type:"structure"` +} + +// String returns the string representation +func (s DeleteCacheClusterOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteCacheClusterOutput) GoString() string { + return s.String() +} + +// Represents the input of a DeleteCacheParameterGroup action. +type DeleteCacheParameterGroupInput struct { + _ struct{} `type:"structure"` + + // The name of the cache parameter group to delete. + // + // The specified cache security group must not be associated with any cache + // clusters. + CacheParameterGroupName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteCacheParameterGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteCacheParameterGroupInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteCacheParameterGroupInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteCacheParameterGroupInput"} + if s.CacheParameterGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("CacheParameterGroupName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteCacheParameterGroupOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteCacheParameterGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteCacheParameterGroupOutput) GoString() string { + return s.String() +} + +// Represents the input of a DeleteCacheSecurityGroup action. +type DeleteCacheSecurityGroupInput struct { + _ struct{} `type:"structure"` + + // The name of the cache security group to delete. + // + // You cannot delete the default security group. + CacheSecurityGroupName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteCacheSecurityGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteCacheSecurityGroupInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteCacheSecurityGroupInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteCacheSecurityGroupInput"} + if s.CacheSecurityGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("CacheSecurityGroupName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteCacheSecurityGroupOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteCacheSecurityGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteCacheSecurityGroupOutput) GoString() string { + return s.String() +} + +// Represents the input of a DeleteCacheSubnetGroup action. +type DeleteCacheSubnetGroupInput struct { + _ struct{} `type:"structure"` + + // The name of the cache subnet group to delete. + // + // Constraints: Must contain no more than 255 alphanumeric characters or hyphens. + CacheSubnetGroupName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteCacheSubnetGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteCacheSubnetGroupInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteCacheSubnetGroupInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteCacheSubnetGroupInput"} + if s.CacheSubnetGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("CacheSubnetGroupName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteCacheSubnetGroupOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteCacheSubnetGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteCacheSubnetGroupOutput) GoString() string { + return s.String() +} + +// Represents the input of a DeleteReplicationGroup action. +type DeleteReplicationGroupInput struct { + _ struct{} `type:"structure"` + + // The name of a final node group snapshot. ElastiCache creates the snapshot + // from the primary node in the cluster, rather than one of the replicas; this + // is to ensure that it captures the freshest data. After the final snapshot + // is taken, the cluster is immediately deleted. + FinalSnapshotIdentifier *string `type:"string"` + + // The identifier for the cluster to be deleted. This parameter is not case + // sensitive. + ReplicationGroupId *string `type:"string" required:"true"` + + // If set to true, all of the read replicas will be deleted, but the primary + // node will be retained. + RetainPrimaryCluster *bool `type:"boolean"` +} + +// String returns the string representation +func (s DeleteReplicationGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteReplicationGroupInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteReplicationGroupInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteReplicationGroupInput"} + if s.ReplicationGroupId == nil { + invalidParams.Add(request.NewErrParamRequired("ReplicationGroupId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteReplicationGroupOutput struct { + _ struct{} `type:"structure"` + + // Contains all of the attributes of a specific replication group. + ReplicationGroup *ReplicationGroup `type:"structure"` +} + +// String returns the string representation +func (s DeleteReplicationGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteReplicationGroupOutput) GoString() string { + return s.String() +} + +// Represents the input of a DeleteSnapshot action. +type DeleteSnapshotInput struct { + _ struct{} `type:"structure"` + + // The name of the snapshot to be deleted. + SnapshotName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteSnapshotInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteSnapshotInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteSnapshotInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteSnapshotInput"} + if s.SnapshotName == nil { + invalidParams.Add(request.NewErrParamRequired("SnapshotName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteSnapshotOutput struct { + _ struct{} `type:"structure"` + + // Represents a copy of an entire cache cluster as of the time when the snapshot + // was taken. + Snapshot *Snapshot `type:"structure"` +} + +// String returns the string representation +func (s DeleteSnapshotOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteSnapshotOutput) GoString() string { + return s.String() +} + +// Represents the input of a DescribeCacheClusters action. +type DescribeCacheClustersInput struct { + _ struct{} `type:"structure"` + + // The user-supplied cluster identifier. If this parameter is specified, only + // information about that specific cache cluster is returned. This parameter + // isn't case sensitive. + CacheClusterId *string `type:"string"` + + // An optional marker returned from a prior request. Use this marker for pagination + // of results from this action. If this parameter is specified, the response + // includes only records beyond the marker, up to the value specified by MaxRecords. + Marker *string `type:"string"` + + // The maximum number of records to include in the response. If more records + // exist than the specified MaxRecords value, a marker is included in the response + // so that the remaining results can be retrieved. + // + // Default: 100 + // + // Constraints: minimum 20; maximum 100. + MaxRecords *int64 `type:"integer"` + + // An optional flag that can be included in the DescribeCacheCluster request + // to retrieve information about the individual cache nodes. + ShowCacheNodeInfo *bool `type:"boolean"` +} + +// String returns the string representation +func (s DescribeCacheClustersInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeCacheClustersInput) GoString() string { + return s.String() +} + +// Represents the output of a DescribeCacheClusters action. +type DescribeCacheClustersOutput struct { + _ struct{} `type:"structure"` + + // A list of cache clusters. Each item in the list contains detailed information + // about one cache cluster. + CacheClusters []*CacheCluster `locationNameList:"CacheCluster" type:"list"` + + // Provides an identifier to allow retrieval of paginated results. + Marker *string `type:"string"` +} + +// String returns the string representation +func (s DescribeCacheClustersOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeCacheClustersOutput) GoString() string { + return s.String() +} + +// Represents the input of a DescribeCacheEngineVersions action. +type DescribeCacheEngineVersionsInput struct { + _ struct{} `type:"structure"` + + // The name of a specific cache parameter group family to return details for. + // + // Constraints: + // + // Must be 1 to 255 alphanumeric characters + // + // First character must be a letter + // + // Cannot end with a hyphen or contain two consecutive hyphens + CacheParameterGroupFamily *string `type:"string"` + + // If true, specifies that only the default version of the specified engine + // or engine and major version combination is to be returned. + DefaultOnly *bool `type:"boolean"` + + // The cache engine to return. Valid values: memcached | redis + Engine *string `type:"string"` + + // The cache engine version to return. + // + // Example: 1.4.14 + EngineVersion *string `type:"string"` + + // An optional marker returned from a prior request. Use this marker for pagination + // of results from this action. If this parameter is specified, the response + // includes only records beyond the marker, up to the value specified by MaxRecords. + Marker *string `type:"string"` + + // The maximum number of records to include in the response. If more records + // exist than the specified MaxRecords value, a marker is included in the response + // so that the remaining results can be retrieved. + // + // Default: 100 + // + // Constraints: minimum 20; maximum 100. + MaxRecords *int64 `type:"integer"` +} + +// String returns the string representation +func (s DescribeCacheEngineVersionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeCacheEngineVersionsInput) GoString() string { + return s.String() +} + +// Represents the output of a DescribeCacheEngineVersions action. +type DescribeCacheEngineVersionsOutput struct { + _ struct{} `type:"structure"` + + // A list of cache engine version details. Each element in the list contains + // detailed information about one cache engine version. + CacheEngineVersions []*CacheEngineVersion `locationNameList:"CacheEngineVersion" type:"list"` + + // Provides an identifier to allow retrieval of paginated results. + Marker *string `type:"string"` +} + +// String returns the string representation +func (s DescribeCacheEngineVersionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeCacheEngineVersionsOutput) GoString() string { + return s.String() +} + +// Represents the input of a DescribeCacheParameterGroups action. +type DescribeCacheParameterGroupsInput struct { + _ struct{} `type:"structure"` + + // The name of a specific cache parameter group to return details for. + CacheParameterGroupName *string `type:"string"` + + // An optional marker returned from a prior request. Use this marker for pagination + // of results from this action. If this parameter is specified, the response + // includes only records beyond the marker, up to the value specified by MaxRecords. + Marker *string `type:"string"` + + // The maximum number of records to include in the response. If more records + // exist than the specified MaxRecords value, a marker is included in the response + // so that the remaining results can be retrieved. + // + // Default: 100 + // + // Constraints: minimum 20; maximum 100. + MaxRecords *int64 `type:"integer"` +} + +// String returns the string representation +func (s DescribeCacheParameterGroupsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeCacheParameterGroupsInput) GoString() string { + return s.String() +} + +// Represents the output of a DescribeCacheParameterGroups action. +type DescribeCacheParameterGroupsOutput struct { + _ struct{} `type:"structure"` + + // A list of cache parameter groups. Each element in the list contains detailed + // information about one cache parameter group. + CacheParameterGroups []*CacheParameterGroup `locationNameList:"CacheParameterGroup" type:"list"` + + // Provides an identifier to allow retrieval of paginated results. + Marker *string `type:"string"` +} + +// String returns the string representation +func (s DescribeCacheParameterGroupsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeCacheParameterGroupsOutput) GoString() string { + return s.String() +} + +// Represents the input of a DescribeCacheParameters action. +type DescribeCacheParametersInput struct { + _ struct{} `type:"structure"` + + // The name of a specific cache parameter group to return details for. + CacheParameterGroupName *string `type:"string" required:"true"` + + // An optional marker returned from a prior request. Use this marker for pagination + // of results from this action. If this parameter is specified, the response + // includes only records beyond the marker, up to the value specified by MaxRecords. + Marker *string `type:"string"` + + // The maximum number of brecords to include in the response. If more records + // exist than the specified MaxRecords value, a marker is included in the response + // so that the remaining results can be retrieved. + // + // Default: 100 + // + // Constraints: minimum 20; maximum 100. + MaxRecords *int64 `type:"integer"` + + // The parameter types to return. + // + // Valid values: user | system | engine-default + Source *string `type:"string"` +} + +// String returns the string representation +func (s DescribeCacheParametersInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeCacheParametersInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeCacheParametersInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeCacheParametersInput"} + if s.CacheParameterGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("CacheParameterGroupName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the output of a DescribeCacheParameters action. +type DescribeCacheParametersOutput struct { + _ struct{} `type:"structure"` + + // A list of parameters specific to a particular cache node type. Each element + // in the list contains detailed information about one parameter. + CacheNodeTypeSpecificParameters []*CacheNodeTypeSpecificParameter `locationNameList:"CacheNodeTypeSpecificParameter" type:"list"` + + // Provides an identifier to allow retrieval of paginated results. + Marker *string `type:"string"` + + // A list of Parameter instances. + Parameters []*Parameter `locationNameList:"Parameter" type:"list"` +} + +// String returns the string representation +func (s DescribeCacheParametersOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeCacheParametersOutput) GoString() string { + return s.String() +} + +// Represents the input of a DescribeCacheSecurityGroups action. +type DescribeCacheSecurityGroupsInput struct { + _ struct{} `type:"structure"` + + // The name of the cache security group to return details for. + CacheSecurityGroupName *string `type:"string"` + + // An optional marker returned from a prior request. Use this marker for pagination + // of results from this action. If this parameter is specified, the response + // includes only records beyond the marker, up to the value specified by MaxRecords. + Marker *string `type:"string"` + + // The maximum number of records to include in the response. If more records + // exist than the specified MaxRecords value, a marker is included in the response + // so that the remaining results can be retrieved. + // + // Default: 100 + // + // Constraints: minimum 20; maximum 100. + MaxRecords *int64 `type:"integer"` +} + +// String returns the string representation +func (s DescribeCacheSecurityGroupsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeCacheSecurityGroupsInput) GoString() string { + return s.String() +} + +// Represents the output of a DescribeCacheSecurityGroups action. +type DescribeCacheSecurityGroupsOutput struct { + _ struct{} `type:"structure"` + + // A list of cache security groups. Each element in the list contains detailed + // information about one group. + CacheSecurityGroups []*CacheSecurityGroup `locationNameList:"CacheSecurityGroup" type:"list"` + + // Provides an identifier to allow retrieval of paginated results. + Marker *string `type:"string"` +} + +// String returns the string representation +func (s DescribeCacheSecurityGroupsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeCacheSecurityGroupsOutput) GoString() string { + return s.String() +} + +// Represents the input of a DescribeCacheSubnetGroups action. +type DescribeCacheSubnetGroupsInput struct { + _ struct{} `type:"structure"` + + // The name of the cache subnet group to return details for. + CacheSubnetGroupName *string `type:"string"` + + // An optional marker returned from a prior request. Use this marker for pagination + // of results from this action. If this parameter is specified, the response + // includes only records beyond the marker, up to the value specified by MaxRecords. + Marker *string `type:"string"` + + // The maximum number of records to include in the response. If more records + // exist than the specified MaxRecords value, a marker is included in the response + // so that the remaining results can be retrieved. + // + // Default: 100 + // + // Constraints: minimum 20; maximum 100. + MaxRecords *int64 `type:"integer"` +} + +// String returns the string representation +func (s DescribeCacheSubnetGroupsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeCacheSubnetGroupsInput) GoString() string { + return s.String() +} + +// Represents the output of a DescribeCacheSubnetGroups action. +type DescribeCacheSubnetGroupsOutput struct { + _ struct{} `type:"structure"` + + // A list of cache subnet groups. Each element in the list contains detailed + // information about one group. + CacheSubnetGroups []*CacheSubnetGroup `locationNameList:"CacheSubnetGroup" type:"list"` + + // Provides an identifier to allow retrieval of paginated results. + Marker *string `type:"string"` +} + +// String returns the string representation +func (s DescribeCacheSubnetGroupsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeCacheSubnetGroupsOutput) GoString() string { + return s.String() +} + +// Represents the input of a DescribeEngineDefaultParameters action. +type DescribeEngineDefaultParametersInput struct { + _ struct{} `type:"structure"` + + // The name of the cache parameter group family. Valid values are: memcached1.4 + // | redis2.6 | redis2.8 + CacheParameterGroupFamily *string `type:"string" required:"true"` + + // An optional marker returned from a prior request. Use this marker for pagination + // of results from this action. If this parameter is specified, the response + // includes only records beyond the marker, up to the value specified by MaxRecords. + Marker *string `type:"string"` + + // The maximum number of records to include in the response. If more records + // exist than the specified MaxRecords value, a marker is included in the response + // so that the remaining results can be retrieved. + // + // Default: 100 + // + // Constraints: minimum 20; maximum 100. + MaxRecords *int64 `type:"integer"` +} + +// String returns the string representation +func (s DescribeEngineDefaultParametersInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeEngineDefaultParametersInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeEngineDefaultParametersInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeEngineDefaultParametersInput"} + if s.CacheParameterGroupFamily == nil { + invalidParams.Add(request.NewErrParamRequired("CacheParameterGroupFamily")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DescribeEngineDefaultParametersOutput struct { + _ struct{} `type:"structure"` + + // Represents the output of a DescribeEngineDefaultParameters action. + EngineDefaults *EngineDefaults `type:"structure"` +} + +// String returns the string representation +func (s DescribeEngineDefaultParametersOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeEngineDefaultParametersOutput) GoString() string { + return s.String() +} + +// Represents the input of a DescribeEvents action. +type DescribeEventsInput struct { + _ struct{} `type:"structure"` + + // The number of minutes' worth of events to retrieve. + Duration *int64 `type:"integer"` + + // The end of the time interval for which to retrieve events, specified in ISO + // 8601 format. + EndTime *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // An optional marker returned from a prior request. Use this marker for pagination + // of results from this action. If this parameter is specified, the response + // includes only records beyond the marker, up to the value specified by MaxRecords. + Marker *string `type:"string"` + + // The maximum number of records to include in the response. If more records + // exist than the specified MaxRecords value, a marker is included in the response + // so that the remaining results can be retrieved. + // + // Default: 100 + // + // Constraints: minimum 20; maximum 100. + MaxRecords *int64 `type:"integer"` + + // The identifier of the event source for which events will be returned. If + // not specified, then all sources are included in the response. + SourceIdentifier *string `type:"string"` + + // The event source to retrieve events for. If no value is specified, all events + // are returned. + // + // Valid values are: cache-cluster | cache-parameter-group | cache-security-group + // | cache-subnet-group + SourceType *string `type:"string" enum:"SourceType"` + + // The beginning of the time interval to retrieve events for, specified in ISO + // 8601 format. + StartTime *time.Time `type:"timestamp" timestampFormat:"iso8601"` +} + +// String returns the string representation +func (s DescribeEventsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeEventsInput) GoString() string { + return s.String() +} + +// Represents the output of a DescribeEvents action. +type DescribeEventsOutput struct { + _ struct{} `type:"structure"` + + // A list of events. Each element in the list contains detailed information + // about one event. + Events []*Event `locationNameList:"Event" type:"list"` + + // Provides an identifier to allow retrieval of paginated results. + Marker *string `type:"string"` +} + +// String returns the string representation +func (s DescribeEventsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeEventsOutput) GoString() string { + return s.String() +} + +// Represents the input of a DescribeReplicationGroups action. +type DescribeReplicationGroupsInput struct { + _ struct{} `type:"structure"` + + // An optional marker returned from a prior request. Use this marker for pagination + // of results from this action. If this parameter is specified, the response + // includes only records beyond the marker, up to the value specified by MaxRecords. + Marker *string `type:"string"` + + // The maximum number of records to include in the response. If more records + // exist than the specified MaxRecords value, a marker is included in the response + // so that the remaining results can be retrieved. + // + // Default: 100 + // + // Constraints: minimum 20; maximum 100. + MaxRecords *int64 `type:"integer"` + + // The identifier for the replication group to be described. This parameter + // is not case sensitive. + // + // If you do not specify this parameter, information about all replication + // groups is returned. + ReplicationGroupId *string `type:"string"` +} + +// String returns the string representation +func (s DescribeReplicationGroupsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeReplicationGroupsInput) GoString() string { + return s.String() +} + +// Represents the output of a DescribeReplicationGroups action. +type DescribeReplicationGroupsOutput struct { + _ struct{} `type:"structure"` + + // Provides an identifier to allow retrieval of paginated results. + Marker *string `type:"string"` + + // A list of replication groups. Each item in the list contains detailed information + // about one replication group. + ReplicationGroups []*ReplicationGroup `locationNameList:"ReplicationGroup" type:"list"` +} + +// String returns the string representation +func (s DescribeReplicationGroupsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeReplicationGroupsOutput) GoString() string { + return s.String() +} + +// Represents the input of a DescribeReservedCacheNodes action. +type DescribeReservedCacheNodesInput struct { + _ struct{} `type:"structure"` + + // The cache node type filter value. Use this parameter to show only those reservations + // matching the specified cache node type. + // + // Valid node types are as follows: + // + // General purpose: + // + // Current generation: cache.t2.micro, cache.t2.small, cache.t2.medium, cache.m3.medium, + // cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge + // + // Previous generation: cache.t1.micro, cache.m1.small, cache.m1.medium, + // cache.m1.large, cache.m1.xlarge + // + // Compute optimized: cache.c1.xlarge + // + // Memory optimized: + // + // Current generation: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, + // cache.r3.4xlarge, cache.r3.8xlarge + // + // Previous generation: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge + // + // Notes: + // + // All t2 instances are created in an Amazon Virtual Private Cloud (VPC). + // + // Redis backup/restore is not supported for t2 instances. + // + // Redis Append-only files (AOF) functionality is not supported for t1 or + // t2 instances. + // + // For a complete listing of cache node types and specifications, see Amazon + // ElastiCache Product Features and Details (http://aws.amazon.com/elasticache/details) + // and Cache Node Type-Specific Parameters for Memcached (http://docs.aws.amazon.com/AmazonElastiCache/latest/UserGuide/CacheParameterGroups.Memcached.html#CacheParameterGroups.Memcached.NodeSpecific) + // or Cache Node Type-Specific Parameters for Redis (http://docs.aws.amazon.com/AmazonElastiCache/latest/UserGuide/CacheParameterGroups.Redis.html#CacheParameterGroups.Redis.NodeSpecific). + CacheNodeType *string `type:"string"` + + // The duration filter value, specified in years or seconds. Use this parameter + // to show only reservations for this duration. + // + // Valid Values: 1 | 3 | 31536000 | 94608000 + Duration *string `type:"string"` + + // An optional marker returned from a prior request. Use this marker for pagination + // of results from this action. If this parameter is specified, the response + // includes only records beyond the marker, up to the value specified by MaxRecords. + Marker *string `type:"string"` + + // The maximum number of records to include in the response. If more records + // exist than the specified MaxRecords value, a marker is included in the response + // so that the remaining results can be retrieved. + // + // Default: 100 + // + // Constraints: minimum 20; maximum 100. + MaxRecords *int64 `type:"integer"` + + // The offering type filter value. Use this parameter to show only the available + // offerings matching the specified offering type. + // + // Valid values: "Light Utilization"|"Medium Utilization"|"Heavy Utilization" + OfferingType *string `type:"string"` + + // The product description filter value. Use this parameter to show only those + // reservations matching the specified product description. + ProductDescription *string `type:"string"` + + // The reserved cache node identifier filter value. Use this parameter to show + // only the reservation that matches the specified reservation ID. + ReservedCacheNodeId *string `type:"string"` + + // The offering identifier filter value. Use this parameter to show only purchased + // reservations matching the specified offering identifier. + ReservedCacheNodesOfferingId *string `type:"string"` +} + +// String returns the string representation +func (s DescribeReservedCacheNodesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeReservedCacheNodesInput) GoString() string { + return s.String() +} + +// Represents the input of a DescribeReservedCacheNodesOfferings action. +type DescribeReservedCacheNodesOfferingsInput struct { + _ struct{} `type:"structure"` + + // The cache node type filter value. Use this parameter to show only the available + // offerings matching the specified cache node type. + // + // Valid node types are as follows: + // + // General purpose: + // + // Current generation: cache.t2.micro, cache.t2.small, cache.t2.medium, cache.m3.medium, + // cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge + // + // Previous generation: cache.t1.micro, cache.m1.small, cache.m1.medium, + // cache.m1.large, cache.m1.xlarge + // + // Compute optimized: cache.c1.xlarge + // + // Memory optimized: + // + // Current generation: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, + // cache.r3.4xlarge, cache.r3.8xlarge + // + // Previous generation: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge + // + // Notes: + // + // All t2 instances are created in an Amazon Virtual Private Cloud (VPC). + // + // Redis backup/restore is not supported for t2 instances. + // + // Redis Append-only files (AOF) functionality is not supported for t1 or + // t2 instances. + // + // For a complete listing of cache node types and specifications, see Amazon + // ElastiCache Product Features and Details (http://aws.amazon.com/elasticache/details) + // and Cache Node Type-Specific Parameters for Memcached (http://docs.aws.amazon.com/AmazonElastiCache/latest/UserGuide/CacheParameterGroups.Memcached.html#CacheParameterGroups.Memcached.NodeSpecific) + // or Cache Node Type-Specific Parameters for Redis (http://docs.aws.amazon.com/AmazonElastiCache/latest/UserGuide/CacheParameterGroups.Redis.html#CacheParameterGroups.Redis.NodeSpecific). + CacheNodeType *string `type:"string"` + + // Duration filter value, specified in years or seconds. Use this parameter + // to show only reservations for a given duration. + // + // Valid Values: 1 | 3 | 31536000 | 94608000 + Duration *string `type:"string"` + + // An optional marker returned from a prior request. Use this marker for pagination + // of results from this action. If this parameter is specified, the response + // includes only records beyond the marker, up to the value specified by MaxRecords. + Marker *string `type:"string"` + + // The maximum number of records to include in the response. If more records + // exist than the specified MaxRecords value, a marker is included in the response + // so that the remaining results can be retrieved. + // + // Default: 100 + // + // Constraints: minimum 20; maximum 100. + MaxRecords *int64 `type:"integer"` + + // The offering type filter value. Use this parameter to show only the available + // offerings matching the specified offering type. + // + // Valid Values: "Light Utilization"|"Medium Utilization"|"Heavy Utilization" + OfferingType *string `type:"string"` + + // The product description filter value. Use this parameter to show only the + // available offerings matching the specified product description. + ProductDescription *string `type:"string"` + + // The offering identifier filter value. Use this parameter to show only the + // available offering that matches the specified reservation identifier. + // + // Example: 438012d3-4052-4cc7-b2e3-8d3372e0e706 + ReservedCacheNodesOfferingId *string `type:"string"` +} + +// String returns the string representation +func (s DescribeReservedCacheNodesOfferingsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeReservedCacheNodesOfferingsInput) GoString() string { + return s.String() +} + +// Represents the output of a DescribeReservedCacheNodesOfferings action. +type DescribeReservedCacheNodesOfferingsOutput struct { + _ struct{} `type:"structure"` + + // Provides an identifier to allow retrieval of paginated results. + Marker *string `type:"string"` + + // A list of reserved cache node offerings. Each element in the list contains + // detailed information about one offering. + ReservedCacheNodesOfferings []*ReservedCacheNodesOffering `locationNameList:"ReservedCacheNodesOffering" type:"list"` +} + +// String returns the string representation +func (s DescribeReservedCacheNodesOfferingsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeReservedCacheNodesOfferingsOutput) GoString() string { + return s.String() +} + +// Represents the output of a DescribeReservedCacheNodes action. +type DescribeReservedCacheNodesOutput struct { + _ struct{} `type:"structure"` + + // Provides an identifier to allow retrieval of paginated results. + Marker *string `type:"string"` + + // A list of reserved cache nodes. Each element in the list contains detailed + // information about one node. + ReservedCacheNodes []*ReservedCacheNode `locationNameList:"ReservedCacheNode" type:"list"` +} + +// String returns the string representation +func (s DescribeReservedCacheNodesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeReservedCacheNodesOutput) GoString() string { + return s.String() +} + +// Represents the input of a DescribeSnapshotsMessage action. +type DescribeSnapshotsInput struct { + _ struct{} `type:"structure"` + + // A user-supplied cluster identifier. If this parameter is specified, only + // snapshots associated with that specific cache cluster will be described. + CacheClusterId *string `type:"string"` + + // An optional marker returned from a prior request. Use this marker for pagination + // of results from this action. If this parameter is specified, the response + // includes only records beyond the marker, up to the value specified by MaxRecords. + Marker *string `type:"string"` + + // The maximum number of records to include in the response. If more records + // exist than the specified MaxRecords value, a marker is included in the response + // so that the remaining results can be retrieved. + // + // Default: 50 + // + // Constraints: minimum 20; maximum 50. + MaxRecords *int64 `type:"integer"` + + // A user-supplied name of the snapshot. If this parameter is specified, only + // this snapshot will be described. + SnapshotName *string `type:"string"` + + // If set to system, the output shows snapshots that were automatically created + // by ElastiCache. If set to user the output shows snapshots that were manually + // created. If omitted, the output shows both automatically and manually created + // snapshots. + SnapshotSource *string `type:"string"` +} + +// String returns the string representation +func (s DescribeSnapshotsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeSnapshotsInput) GoString() string { + return s.String() +} + +// Represents the output of a DescribeSnapshots action. +type DescribeSnapshotsOutput struct { + _ struct{} `type:"structure"` + + // An optional marker returned from a prior request. Use this marker for pagination + // of results from this action. If this parameter is specified, the response + // includes only records beyond the marker, up to the value specified by MaxRecords. + Marker *string `type:"string"` + + // A list of snapshots. Each item in the list contains detailed information + // about one snapshot. + Snapshots []*Snapshot `locationNameList:"Snapshot" type:"list"` +} + +// String returns the string representation +func (s DescribeSnapshotsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeSnapshotsOutput) GoString() string { + return s.String() +} + +// Provides ownership and status information for an Amazon EC2 security group. +type EC2SecurityGroup struct { + _ struct{} `type:"structure"` + + // The name of the Amazon EC2 security group. + EC2SecurityGroupName *string `type:"string"` + + // The AWS account ID of the Amazon EC2 security group owner. + EC2SecurityGroupOwnerId *string `type:"string"` + + // The status of the Amazon EC2 security group. + Status *string `type:"string"` +} + +// String returns the string representation +func (s EC2SecurityGroup) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EC2SecurityGroup) GoString() string { + return s.String() +} + +// Represents the information required for client programs to connect to a cache +// node. +type Endpoint struct { + _ struct{} `type:"structure"` + + // The DNS hostname of the cache node. + Address *string `type:"string"` + + // The port number that the cache engine is listening on. + Port *int64 `type:"integer"` +} + +// String returns the string representation +func (s Endpoint) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Endpoint) GoString() string { + return s.String() +} + +// Represents the output of a DescribeEngineDefaultParameters action. +type EngineDefaults struct { + _ struct{} `type:"structure"` + + // A list of parameters specific to a particular cache node type. Each element + // in the list contains detailed information about one parameter. + CacheNodeTypeSpecificParameters []*CacheNodeTypeSpecificParameter `locationNameList:"CacheNodeTypeSpecificParameter" type:"list"` + + // Specifies the name of the cache parameter group family to which the engine + // default parameters apply. + CacheParameterGroupFamily *string `type:"string"` + + // Provides an identifier to allow retrieval of paginated results. + Marker *string `type:"string"` + + // Contains a list of engine default parameters. + Parameters []*Parameter `locationNameList:"Parameter" type:"list"` +} + +// String returns the string representation +func (s EngineDefaults) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EngineDefaults) GoString() string { + return s.String() +} + +// Represents a single occurrence of something interesting within the system. +// Some examples of events are creating a cache cluster, adding or removing +// a cache node, or rebooting a node. +type Event struct { + _ struct{} `type:"structure"` + + // The date and time when the event occurred. + Date *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The text of the event. + Message *string `type:"string"` + + // The identifier for the source of the event. For example, if the event occurred + // at the cache cluster level, the identifier would be the name of the cache + // cluster. + SourceIdentifier *string `type:"string"` + + // Specifies the origin of this event - a cache cluster, a parameter group, + // a security group, etc. + SourceType *string `type:"string" enum:"SourceType"` +} + +// String returns the string representation +func (s Event) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Event) GoString() string { + return s.String() +} + +// The input parameters for the ListAllowedNodeTypeModifications action. +type ListAllowedNodeTypeModificationsInput struct { + _ struct{} `type:"structure"` + + // The name of the cache cluster you want to scale up to a larger node instanced + // type. ElastiCache uses the cluster id to identify the current node type of + // this cluster and from that to to create a list of node types you can scale + // up to. + // + // You must provide a value for either the CacheClusterId or the ReplicationGroupId. + CacheClusterId *string `type:"string"` + + // The name of the replication group want to scale up to a larger node type. + // ElastiCache uses the replication group id to identify the current node type + // being used by this replication group, and from that to create a list of node + // types you can scale up to. + // + // You must provide a value for either the CacheClusterId or the ReplicationGroupId. + ReplicationGroupId *string `type:"string"` +} + +// String returns the string representation +func (s ListAllowedNodeTypeModificationsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListAllowedNodeTypeModificationsInput) GoString() string { + return s.String() +} + +// Represents the allowed node types you can use to modify your cache cluster +// or replication group. +type ListAllowedNodeTypeModificationsOutput struct { + _ struct{} `type:"structure"` + + // A string list, each element of which specifies a cache node type which you + // can use to scale your cache cluster or replication group. + // + // When scaling up a Redis cluster or replication group using ModifyCacheCluster + // or ModifyReplicationGroup, use a value from this list for the CacheNodeType + // parameter. + ScaleUpModifications []*string `type:"list"` +} + +// String returns the string representation +func (s ListAllowedNodeTypeModificationsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListAllowedNodeTypeModificationsOutput) GoString() string { + return s.String() +} + +// The input parameters for the ListTagsForResource action. +type ListTagsForResourceInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the resource for which you want the list + // of tags, for example arn:aws:elasticache:us-west-2:0123456789:cluster:myCluster + // or arn:aws:elasticache:us-west-2:0123456789:snapshot:mySnapshot. + // + // For more information on ARNs, go to Amazon Resource Names (ARNs) and AWS + // Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html). + ResourceName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ListTagsForResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTagsForResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListTagsForResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListTagsForResourceInput"} + if s.ResourceName == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the input of a ModifyCacheCluster action. +type ModifyCacheClusterInput struct { + _ struct{} `type:"structure"` + + // Specifies whether the new nodes in this Memcached cache cluster are all created + // in a single Availability Zone or created across multiple Availability Zones. + // + // Valid values: single-az | cross-az. + // + // This option is only supported for Memcached cache clusters. + // + // You cannot specify single-az if the Memcached cache cluster already has + // cache nodes in different Availability Zones. If cross-az is specified, existing + // Memcached nodes remain in their current Availability Zone. + // + // Only newly created nodes will be located in different Availability Zones. + // For instructions on how to move existing Memcached nodes to different Availability + // Zones, see the Availability Zone Considerations section of Cache Node Considerations + // for Memcached (http://docs.aws.amazon.com/AmazonElastiCache/latest/UserGuide/CacheNode.Memcached.html). + AZMode *string `type:"string" enum:"AZMode"` + + // If true, this parameter causes the modifications in this request and any + // pending modifications to be applied, asynchronously and as soon as possible, + // regardless of the PreferredMaintenanceWindow setting for the cache cluster. + // + // If false, then changes to the cache cluster are applied on the next maintenance + // reboot, or the next failure reboot, whichever occurs first. + // + // If you perform a ModifyCacheCluster before a pending modification is applied, + // the pending modification is replaced by the newer modification. + // + // Valid values: true | false + // + // Default: false + ApplyImmediately *bool `type:"boolean"` + + // This parameter is currently disabled. + AutoMinorVersionUpgrade *bool `type:"boolean"` + + // The cache cluster identifier. This value is stored as a lowercase string. + CacheClusterId *string `type:"string" required:"true"` + + // A list of cache node IDs to be removed. A node ID is a numeric identifier + // (0001, 0002, etc.). This parameter is only valid when NumCacheNodes is less + // than the existing number of cache nodes. The number of cache node IDs supplied + // in this parameter must match the difference between the existing number of + // cache nodes in the cluster or pending cache nodes, whichever is greater, + // and the value of NumCacheNodes in the request. + // + // For example: If you have 3 active cache nodes, 7 pending cache nodes, and + // the number of cache nodes in this ModifyCacheCluser call is 5, you must list + // 2 (7 - 5) cache node IDs to remove. + CacheNodeIdsToRemove []*string `locationNameList:"CacheNodeId" type:"list"` + + // A valid cache node type that you want to scale this cache cluster to. The + // value of this parameter must be one of the ScaleUpModifications values returned + // by the ListAllowedCacheNodeTypeModification action. + CacheNodeType *string `type:"string"` + + // The name of the cache parameter group to apply to this cache cluster. This + // change is asynchronously applied as soon as possible for parameters when + // the ApplyImmediately parameter is specified as true for this request. + CacheParameterGroupName *string `type:"string"` + + // A list of cache security group names to authorize on this cache cluster. + // This change is asynchronously applied as soon as possible. + // + // This parameter can be used only with clusters that are created outside of + // an Amazon Virtual Private Cloud (VPC). + // + // Constraints: Must contain no more than 255 alphanumeric characters. Must + // not be "Default". + CacheSecurityGroupNames []*string `locationNameList:"CacheSecurityGroupName" type:"list"` + + // The upgraded version of the cache engine to be run on the cache nodes. + // + // Important: You can upgrade to a newer engine version (see Selecting a Cache + // Engine and Version (http://docs.aws.amazon.com/AmazonElastiCache/latest/UserGuide/SelectEngine.html#VersionManagement)), + // but you cannot downgrade to an earlier engine version. If you want to use + // an earlier engine version, you must delete the existing cache cluster and + // create it anew with the earlier engine version. + EngineVersion *string `type:"string"` + + // The list of Availability Zones where the new Memcached cache nodes will be + // created. + // + // This parameter is only valid when NumCacheNodes in the request is greater + // than the sum of the number of active cache nodes and the number of cache + // nodes pending creation (which may be zero). The number of Availability Zones + // supplied in this list must match the cache nodes being added in this request. + // + // This option is only supported on Memcached clusters. + // + // Scenarios: + // + // Scenario 1: You have 3 active nodes and wish to add 2 nodes. Specify + // NumCacheNodes=5 (3 + 2) and optionally specify two Availability Zones for + // the two new nodes. + // + // Scenario 2: You have 3 active nodes and 2 nodes pending creation (from + // the scenario 1 call) and want to add 1 more node. Specify NumCacheNodes=6 + // ((3 + 2) + 1) and optionally specify an Availability Zone for the new node. + // + // Scenario 3: You want to cancel all pending actions. Specify NumCacheNodes=3 + // to cancel all pending actions. + // + // The Availability Zone placement of nodes pending creation cannot be modified. + // If you wish to cancel any nodes pending creation, add 0 nodes by setting + // NumCacheNodes to the number of current nodes. + // + // If cross-az is specified, existing Memcached nodes remain in their current + // Availability Zone. Only newly created nodes can be located in different Availability + // Zones. For guidance on how to move existing Memcached nodes to different + // Availability Zones, see the Availability Zone Considerations section of Cache + // Node Considerations for Memcached (http://docs.aws.amazon.com/AmazonElastiCache/latest/UserGuide/CacheNode.Memcached.html). + // + // Impact of new add/remove requests upon pending requests + // + // Scenario-1 + // + // Pending Action: Delete + // + // New Request: Delete + // + // Result: The new delete, pending or immediate, replaces the pending delete. + // + // Scenario-2 + // + // Pending Action: Delete + // + // New Request: Create + // + // Result: The new create, pending or immediate, replaces the pending delete. + // + // Scenario-3 + // + // Pending Action: Create + // + // New Request: Delete + // + // Result: The new delete, pending or immediate, replaces the pending create. + // + // Scenario-4 + // + // Pending Action: Create + // + // New Request: Create + // + // Result: The new create is added to the pending create. + // + // Important: If the new create request is Apply Immediately - Yes, all creates + // are performed immediately. If the new create request is Apply Immediately + // - No, all creates are pending. + // + // Example: + // + // NewAvailabilityZones.member.1=us-west-2a&NewAvailabilityZones.member.2=us-west-2b&NewAvailabilityZones.member.3=us-west-2c + NewAvailabilityZones []*string `locationNameList:"PreferredAvailabilityZone" type:"list"` + + // The Amazon Resource Name (ARN) of the Amazon SNS topic to which notifications + // will be sent. + // + // The Amazon SNS topic owner must be same as the cache cluster owner. + NotificationTopicArn *string `type:"string"` + + // The status of the Amazon SNS notification topic. Notifications are sent only + // if the status is active. + // + // Valid values: active | inactive + NotificationTopicStatus *string `type:"string"` + + // The number of cache nodes that the cache cluster should have. If the value + // for NumCacheNodes is greater than the sum of the number of current cache + // nodes and the number of cache nodes pending creation (which may be zero), + // then more nodes will be added. If the value is less than the number of existing + // cache nodes, then nodes will be removed. If the value is equal to the number + // of current cache nodes, then any pending add or remove requests are canceled. + // + // If you are removing cache nodes, you must use the CacheNodeIdsToRemove parameter + // to provide the IDs of the specific cache nodes to remove. + // + // For clusters running Redis, this value must be 1. For clusters running Memcached, + // this value must be between 1 and 20. + // + // Adding or removing Memcached cache nodes can be applied immediately or + // as a pending action. See ApplyImmediately. + // + // A pending action to modify the number of cache nodes in a cluster during + // its maintenance window, whether by adding or removing nodes in accordance + // with the scale out architecture, is not queued. The customer's latest request + // to add or remove nodes to the cluster overrides any previous pending actions + // to modify the number of cache nodes in the cluster. For example, a request + // to remove 2 nodes would override a previous pending action to remove 3 nodes. + // Similarly, a request to add 2 nodes would override a previous pending action + // to remove 3 nodes and vice versa. As Memcached cache nodes may now be provisioned + // in different Availability Zones with flexible cache node placement, a request + // to add nodes does not automatically override a previous pending action to + // add nodes. The customer can modify the previous pending action to add more + // nodes or explicitly cancel the pending request and retry the new request. + // To cancel pending actions to modify the number of cache nodes in a cluster, + // use the ModifyCacheCluster request and set NumCacheNodes equal to the number + // of cache nodes currently in the cache cluster. + NumCacheNodes *int64 `type:"integer"` + + // Specifies the weekly time range during which maintenance on the cache cluster + // is performed. It is specified as a range in the format ddd:hh24:mi-ddd:hh24:mi + // (24H Clock UTC). The minimum maintenance window is a 60 minute period. Valid + // values for ddd are: + // + // sun + // + // mon + // + // tue + // + // wed + // + // thu + // + // fri + // + // sat + // + // Example: sun:05:00-sun:09:00 + PreferredMaintenanceWindow *string `type:"string"` + + // Specifies the VPC Security Groups associated with the cache cluster. + // + // This parameter can be used only with clusters that are created in an Amazon + // Virtual Private Cloud (VPC). + SecurityGroupIds []*string `locationNameList:"SecurityGroupId" type:"list"` + + // The number of days for which ElastiCache will retain automatic cache cluster + // snapshots before deleting them. For example, if you set SnapshotRetentionLimit + // to 5, then a snapshot that was taken today will be retained for 5 days before + // being deleted. + // + // If the value of SnapshotRetentionLimit is set to zero (0), backups are + // turned off. + SnapshotRetentionLimit *int64 `type:"integer"` + + // The daily time range (in UTC) during which ElastiCache will begin taking + // a daily snapshot of your cache cluster. + SnapshotWindow *string `type:"string"` +} + +// String returns the string representation +func (s ModifyCacheClusterInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyCacheClusterInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ModifyCacheClusterInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ModifyCacheClusterInput"} + if s.CacheClusterId == nil { + invalidParams.Add(request.NewErrParamRequired("CacheClusterId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type ModifyCacheClusterOutput struct { + _ struct{} `type:"structure"` + + // Contains all of the attributes of a specific cache cluster. + CacheCluster *CacheCluster `type:"structure"` +} + +// String returns the string representation +func (s ModifyCacheClusterOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyCacheClusterOutput) GoString() string { + return s.String() +} + +// Represents the input of a ModifyCacheParameterGroup action. +type ModifyCacheParameterGroupInput struct { + _ struct{} `type:"structure"` + + // The name of the cache parameter group to modify. + CacheParameterGroupName *string `type:"string" required:"true"` + + // An array of parameter names and values for the parameter update. You must + // supply at least one parameter name and value; subsequent arguments are optional. + // A maximum of 20 parameters may be modified per request. + ParameterNameValues []*ParameterNameValue `locationNameList:"ParameterNameValue" type:"list" required:"true"` +} + +// String returns the string representation +func (s ModifyCacheParameterGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyCacheParameterGroupInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ModifyCacheParameterGroupInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ModifyCacheParameterGroupInput"} + if s.CacheParameterGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("CacheParameterGroupName")) + } + if s.ParameterNameValues == nil { + invalidParams.Add(request.NewErrParamRequired("ParameterNameValues")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the input of a ModifyCacheSubnetGroup action. +type ModifyCacheSubnetGroupInput struct { + _ struct{} `type:"structure"` + + // A description for the cache subnet group. + CacheSubnetGroupDescription *string `type:"string"` + + // The name for the cache subnet group. This value is stored as a lowercase + // string. + // + // Constraints: Must contain no more than 255 alphanumeric characters or hyphens. + // + // Example: mysubnetgroup + CacheSubnetGroupName *string `type:"string" required:"true"` + + // The EC2 subnet IDs for the cache subnet group. + SubnetIds []*string `locationNameList:"SubnetIdentifier" type:"list"` +} + +// String returns the string representation +func (s ModifyCacheSubnetGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyCacheSubnetGroupInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ModifyCacheSubnetGroupInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ModifyCacheSubnetGroupInput"} + if s.CacheSubnetGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("CacheSubnetGroupName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type ModifyCacheSubnetGroupOutput struct { + _ struct{} `type:"structure"` + + // Represents the output of one of the following actions: + // + // CreateCacheSubnetGroup + // + // ModifyCacheSubnetGroup + CacheSubnetGroup *CacheSubnetGroup `type:"structure"` +} + +// String returns the string representation +func (s ModifyCacheSubnetGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyCacheSubnetGroupOutput) GoString() string { + return s.String() +} + +// Represents the input of a ModifyReplicationGroups action. +type ModifyReplicationGroupInput struct { + _ struct{} `type:"structure"` + + // If true, this parameter causes the modifications in this request and any + // pending modifications to be applied, asynchronously and as soon as possible, + // regardless of the PreferredMaintenanceWindow setting for the replication + // group. + // + // If false, then changes to the nodes in the replication group are applied + // on the next maintenance reboot, or the next failure reboot, whichever occurs + // first. + // + // Valid values: true | false + // + // Default: false + ApplyImmediately *bool `type:"boolean"` + + // This parameter is currently disabled. + AutoMinorVersionUpgrade *bool `type:"boolean"` + + // Whether a read replica will be automatically promoted to read/write primary + // if the existing primary encounters a failure. + // + // Valid values: true | false + // + // ElastiCache Multi-AZ replication groups are not supported on: + // + // Redis versions earlier than 2.8.6. + // + // T1 and T2 cache node types. + AutomaticFailoverEnabled *bool `type:"boolean"` + + // A valid cache node type that you want to scale this replication group to. + // The value of this parameter must be one of the ScaleUpModifications values + // returned by the ListAllowedCacheNodeTypeModification action. + CacheNodeType *string `type:"string"` + + // The name of the cache parameter group to apply to all of the clusters in + // this replication group. This change is asynchronously applied as soon as + // possible for parameters when the ApplyImmediately parameter is specified + // as true for this request. + CacheParameterGroupName *string `type:"string"` + + // A list of cache security group names to authorize for the clusters in this + // replication group. This change is asynchronously applied as soon as possible. + // + // This parameter can be used only with replication group containing cache + // clusters running outside of an Amazon Virtual Private Cloud (VPC). + // + // Constraints: Must contain no more than 255 alphanumeric characters. Must + // not be "Default". + CacheSecurityGroupNames []*string `locationNameList:"CacheSecurityGroupName" type:"list"` + + // The upgraded version of the cache engine to be run on the cache clusters + // in the replication group. + // + // Important: You can upgrade to a newer engine version (see Selecting a Cache + // Engine and Version (http://docs.aws.amazon.com/AmazonElastiCache/latest/UserGuide/SelectEngine.html#VersionManagement)), + // but you cannot downgrade to an earlier engine version. If you want to use + // an earlier engine version, you must delete the existing replication group + // and create it anew with the earlier engine version. + EngineVersion *string `type:"string"` + + // The Amazon Resource Name (ARN) of the Amazon SNS topic to which notifications + // will be sent. + // + // The Amazon SNS topic owner must be same as the replication group owner. + NotificationTopicArn *string `type:"string"` + + // The status of the Amazon SNS notification topic for the replication group. + // Notifications are sent only if the status is active. + // + // Valid values: active | inactive + NotificationTopicStatus *string `type:"string"` + + // Specifies the weekly time range during which maintenance on the cache cluster + // is performed. It is specified as a range in the format ddd:hh24:mi-ddd:hh24:mi + // (24H Clock UTC). The minimum maintenance window is a 60 minute period. Valid + // values for ddd are: + // + // sun + // + // mon + // + // tue + // + // wed + // + // thu + // + // fri + // + // sat + // + // Example: sun:05:00-sun:09:00 + PreferredMaintenanceWindow *string `type:"string"` + + // If this parameter is specified, ElastiCache will promote the specified cluster + // in the specified replication group to the primary role. The nodes of all + // other clusters in the replication group will be read replicas. + PrimaryClusterId *string `type:"string"` + + // A description for the replication group. Maximum length is 255 characters. + ReplicationGroupDescription *string `type:"string"` + + // The identifier of the replication group to modify. + ReplicationGroupId *string `type:"string" required:"true"` + + // Specifies the VPC Security Groups associated with the cache clusters in the + // replication group. + // + // This parameter can be used only with replication group containing cache + // clusters running in an Amazon Virtual Private Cloud (VPC). + SecurityGroupIds []*string `locationNameList:"SecurityGroupId" type:"list"` + + // The number of days for which ElastiCache will retain automatic node group + // snapshots before deleting them. For example, if you set SnapshotRetentionLimit + // to 5, then a snapshot that was taken today will be retained for 5 days before + // being deleted. + // + // Important If the value of SnapshotRetentionLimit is set to zero (0), backups + // are turned off. + SnapshotRetentionLimit *int64 `type:"integer"` + + // The daily time range (in UTC) during which ElastiCache will begin taking + // a daily snapshot of the node group specified by SnapshottingClusterId. + // + // Example: 05:00-09:00 + // + // If you do not specify this parameter, then ElastiCache will automatically + // choose an appropriate time range. + SnapshotWindow *string `type:"string"` + + // The cache cluster ID that will be used as the daily snapshot source for the + // replication group. + SnapshottingClusterId *string `type:"string"` +} + +// String returns the string representation +func (s ModifyReplicationGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyReplicationGroupInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ModifyReplicationGroupInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ModifyReplicationGroupInput"} + if s.ReplicationGroupId == nil { + invalidParams.Add(request.NewErrParamRequired("ReplicationGroupId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type ModifyReplicationGroupOutput struct { + _ struct{} `type:"structure"` + + // Contains all of the attributes of a specific replication group. + ReplicationGroup *ReplicationGroup `type:"structure"` +} + +// String returns the string representation +func (s ModifyReplicationGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyReplicationGroupOutput) GoString() string { + return s.String() +} + +// Represents a collection of cache nodes in a replication group. +type NodeGroup struct { + _ struct{} `type:"structure"` + + // The identifier for the node group. A replication group contains only one + // node group; therefore, the node group ID is 0001. + NodeGroupId *string `type:"string"` + + // A list containing information about individual nodes within the node group. + NodeGroupMembers []*NodeGroupMember `locationNameList:"NodeGroupMember" type:"list"` + + // Represents the information required for client programs to connect to a cache + // node. + PrimaryEndpoint *Endpoint `type:"structure"` + + // The current state of this replication group - creating, available, etc. + Status *string `type:"string"` +} + +// String returns the string representation +func (s NodeGroup) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NodeGroup) GoString() string { + return s.String() +} + +// Represents a single node within a node group. +type NodeGroupMember struct { + _ struct{} `type:"structure"` + + // The ID of the cache cluster to which the node belongs. + CacheClusterId *string `type:"string"` + + // The ID of the node within its cache cluster. A node ID is a numeric identifier + // (0001, 0002, etc.). + CacheNodeId *string `type:"string"` + + // The role that is currently assigned to the node - primary or replica. + CurrentRole *string `type:"string"` + + // The name of the Availability Zone in which the node is located. + PreferredAvailabilityZone *string `type:"string"` + + // Represents the information required for client programs to connect to a cache + // node. + ReadEndpoint *Endpoint `type:"structure"` +} + +// String returns the string representation +func (s NodeGroupMember) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NodeGroupMember) GoString() string { + return s.String() +} + +// Represents an individual cache node in a snapshot of a cache cluster. +type NodeSnapshot struct { + _ struct{} `type:"structure"` + + // The date and time when the cache node was created in the source cache cluster. + CacheNodeCreateTime *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The cache node identifier for the node in the source cache cluster. + CacheNodeId *string `type:"string"` + + // The size of the cache on the source cache node. + CacheSize *string `type:"string"` + + // The date and time when the source node's metadata and cache data set was + // obtained for the snapshot. + SnapshotCreateTime *time.Time `type:"timestamp" timestampFormat:"iso8601"` +} + +// String returns the string representation +func (s NodeSnapshot) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NodeSnapshot) GoString() string { + return s.String() +} + +// Describes a notification topic and its status. Notification topics are used +// for publishing ElastiCache events to subscribers using Amazon Simple Notification +// Service (SNS). +type NotificationConfiguration struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) that identifies the topic. + TopicArn *string `type:"string"` + + // The current state of the topic. + TopicStatus *string `type:"string"` +} + +// String returns the string representation +func (s NotificationConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NotificationConfiguration) GoString() string { + return s.String() +} + +// Describes an individual setting that controls some aspect of ElastiCache +// behavior. +type Parameter struct { + _ struct{} `type:"structure"` + + // The valid range of values for the parameter. + AllowedValues *string `type:"string"` + + // ChangeType indicates whether a change to the parameter will be applied immediately + // or requires a reboot for the change to be applied. You can force a reboot + // or wait until the next maintenance window's reboot. For more information, + // see Rebooting a Cluster (http://docs.aws.amazon.com/AmazonElastiCache/latest/UserGuide/Clusters.Rebooting.html). + ChangeType *string `type:"string" enum:"ChangeType"` + + // The valid data type for the parameter. + DataType *string `type:"string"` + + // A description of the parameter. + Description *string `type:"string"` + + // Indicates whether (true) or not (false) the parameter can be modified. Some + // parameters have security or operational implications that prevent them from + // being changed. + IsModifiable *bool `type:"boolean"` + + // The earliest cache engine version to which the parameter can apply. + MinimumEngineVersion *string `type:"string"` + + // The name of the parameter. + ParameterName *string `type:"string"` + + // The value of the parameter. + ParameterValue *string `type:"string"` + + // The source of the parameter. + Source *string `type:"string"` +} + +// String returns the string representation +func (s Parameter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Parameter) GoString() string { + return s.String() +} + +// Describes a name-value pair that is used to update the value of a parameter. +type ParameterNameValue struct { + _ struct{} `type:"structure"` + + // The name of the parameter. + ParameterName *string `type:"string"` + + // The value of the parameter. + ParameterValue *string `type:"string"` +} + +// String returns the string representation +func (s ParameterNameValue) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ParameterNameValue) GoString() string { + return s.String() +} + +// A group of settings that will be applied to the cache cluster in the future, +// or that are currently being applied. +type PendingModifiedValues struct { + _ struct{} `type:"structure"` + + // A list of cache node IDs that are being removed (or will be removed) from + // the cache cluster. A node ID is a numeric identifier (0001, 0002, etc.). + CacheNodeIdsToRemove []*string `locationNameList:"CacheNodeId" type:"list"` + + // The cache node type that this cache cluster or replication group will be + // scaled to. + CacheNodeType *string `type:"string"` + + // The new cache engine version that the cache cluster will run. + EngineVersion *string `type:"string"` + + // The new number of cache nodes for the cache cluster. + // + // For clusters running Redis, this value must be 1. For clusters running Memcached, + // this value must be between 1 and 20. + NumCacheNodes *int64 `type:"integer"` +} + +// String returns the string representation +func (s PendingModifiedValues) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PendingModifiedValues) GoString() string { + return s.String() +} + +// Represents the input of a PurchaseReservedCacheNodesOffering action. +type PurchaseReservedCacheNodesOfferingInput struct { + _ struct{} `type:"structure"` + + // The number of cache node instances to reserve. + // + // Default: 1 + CacheNodeCount *int64 `type:"integer"` + + // A customer-specified identifier to track this reservation. + // + // The Reserved Cache Node ID is an unique customer-specified identifier to + // track this reservation. If this parameter is not specified, ElastiCache automatically + // generates an identifier for the reservation. + // + // Example: myreservationID + ReservedCacheNodeId *string `type:"string"` + + // The ID of the reserved cache node offering to purchase. + // + // Example: 438012d3-4052-4cc7-b2e3-8d3372e0e706 + ReservedCacheNodesOfferingId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s PurchaseReservedCacheNodesOfferingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PurchaseReservedCacheNodesOfferingInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PurchaseReservedCacheNodesOfferingInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PurchaseReservedCacheNodesOfferingInput"} + if s.ReservedCacheNodesOfferingId == nil { + invalidParams.Add(request.NewErrParamRequired("ReservedCacheNodesOfferingId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type PurchaseReservedCacheNodesOfferingOutput struct { + _ struct{} `type:"structure"` + + // Represents the output of a PurchaseReservedCacheNodesOffering action. + ReservedCacheNode *ReservedCacheNode `type:"structure"` +} + +// String returns the string representation +func (s PurchaseReservedCacheNodesOfferingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PurchaseReservedCacheNodesOfferingOutput) GoString() string { + return s.String() +} + +// Represents the input of a RebootCacheCluster action. +type RebootCacheClusterInput struct { + _ struct{} `type:"structure"` + + // The cache cluster identifier. This parameter is stored as a lowercase string. + CacheClusterId *string `type:"string" required:"true"` + + // A list of cache node IDs to reboot. A node ID is a numeric identifier (0001, + // 0002, etc.). To reboot an entire cache cluster, specify all of the cache + // node IDs. + CacheNodeIdsToReboot []*string `locationNameList:"CacheNodeId" type:"list" required:"true"` +} + +// String returns the string representation +func (s RebootCacheClusterInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RebootCacheClusterInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RebootCacheClusterInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RebootCacheClusterInput"} + if s.CacheClusterId == nil { + invalidParams.Add(request.NewErrParamRequired("CacheClusterId")) + } + if s.CacheNodeIdsToReboot == nil { + invalidParams.Add(request.NewErrParamRequired("CacheNodeIdsToReboot")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type RebootCacheClusterOutput struct { + _ struct{} `type:"structure"` + + // Contains all of the attributes of a specific cache cluster. + CacheCluster *CacheCluster `type:"structure"` +} + +// String returns the string representation +func (s RebootCacheClusterOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RebootCacheClusterOutput) GoString() string { + return s.String() +} + +// Contains the specific price and frequency of a recurring charges for a reserved +// cache node, or for a reserved cache node offering. +type RecurringCharge struct { + _ struct{} `type:"structure"` + + // The monetary amount of the recurring charge. + RecurringChargeAmount *float64 `type:"double"` + + // The frequency of the recurring charge. + RecurringChargeFrequency *string `type:"string"` +} + +// String returns the string representation +func (s RecurringCharge) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RecurringCharge) GoString() string { + return s.String() +} + +// Represents the input of a RemoveTagsFromResource action. +type RemoveTagsFromResourceInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the resource from which you want the tags + // removed, for example arn:aws:elasticache:us-west-2:0123456789:cluster:myCluster + // or arn:aws:elasticache:us-west-2:0123456789:snapshot:mySnapshot. + // + // For more information on ARNs, go to Amazon Resource Names (ARNs) and AWS + // Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html). + ResourceName *string `type:"string" required:"true"` + + // A list of TagKeys identifying the tags you want removed from the named resource. + // For example, TagKeys.member.1=Region removes the cost allocation tag with + // the key name Region from the resource named by the ResourceName parameter. + TagKeys []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s RemoveTagsFromResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RemoveTagsFromResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RemoveTagsFromResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RemoveTagsFromResourceInput"} + if s.ResourceName == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceName")) + } + if s.TagKeys == nil { + invalidParams.Add(request.NewErrParamRequired("TagKeys")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains all of the attributes of a specific replication group. +type ReplicationGroup struct { + _ struct{} `type:"structure"` + + // Indicates the status of Multi-AZ for this replication group. + // + // ElastiCache Multi-AZ replication groups are not supported on: + // + // Redis versions earlier than 2.8.6. + // + // T1 and T2 cache node types. + AutomaticFailover *string `type:"string" enum:"AutomaticFailoverStatus"` + + // The description of the replication group. + Description *string `type:"string"` + + // The names of all the cache clusters that are part of this replication group. + MemberClusters []*string `locationNameList:"ClusterId" type:"list"` + + // A single element list with information about the nodes in the replication + // group. + NodeGroups []*NodeGroup `locationNameList:"NodeGroup" type:"list"` + + // A group of settings to be applied to the replication group, either immediately + // or during the next maintenance window. + PendingModifiedValues *ReplicationGroupPendingModifiedValues `type:"structure"` + + // The identifier for the replication group. + ReplicationGroupId *string `type:"string"` + + // The cache cluster ID that is used as the daily snapshot source for the replication + // group. + SnapshottingClusterId *string `type:"string"` + + // The current state of this replication group - creating, available, etc. + Status *string `type:"string"` +} + +// String returns the string representation +func (s ReplicationGroup) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReplicationGroup) GoString() string { + return s.String() +} + +// The settings to be applied to the replication group, either immediately or +// during the next maintenance window. +type ReplicationGroupPendingModifiedValues struct { + _ struct{} `type:"structure"` + + // Indicates the status of Multi-AZ for this replication group. + // + // ElastiCache Multi-AZ replication groups are not supported on: + // + // Redis versions earlier than 2.8.6. + // + // T1 and T2 cache node types. + AutomaticFailoverStatus *string `type:"string" enum:"PendingAutomaticFailoverStatus"` + + // The primary cluster ID which will be applied immediately (if --apply-immediately + // was specified), or during the next maintenance window. + PrimaryClusterId *string `type:"string"` +} + +// String returns the string representation +func (s ReplicationGroupPendingModifiedValues) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReplicationGroupPendingModifiedValues) GoString() string { + return s.String() +} + +// Represents the output of a PurchaseReservedCacheNodesOffering action. +type ReservedCacheNode struct { + _ struct{} `type:"structure"` + + // The number of cache nodes that have been reserved. + CacheNodeCount *int64 `type:"integer"` + + // The cache node type for the reserved cache nodes. + // + // Valid node types are as follows: + // + // General purpose: + // + // Current generation: cache.t2.micro, cache.t2.small, cache.t2.medium, cache.m3.medium, + // cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge + // + // Previous generation: cache.t1.micro, cache.m1.small, cache.m1.medium, + // cache.m1.large, cache.m1.xlarge + // + // Compute optimized: cache.c1.xlarge + // + // Memory optimized: + // + // Current generation: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, + // cache.r3.4xlarge, cache.r3.8xlarge + // + // Previous generation: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge + // + // Notes: + // + // All t2 instances are created in an Amazon Virtual Private Cloud (VPC). + // + // Redis backup/restore is not supported for t2 instances. + // + // Redis Append-only files (AOF) functionality is not supported for t1 or + // t2 instances. + // + // For a complete listing of cache node types and specifications, see Amazon + // ElastiCache Product Features and Details (http://aws.amazon.com/elasticache/details) + // and Cache Node Type-Specific Parameters for Memcached (http://docs.aws.amazon.com/AmazonElastiCache/latest/UserGuide/CacheParameterGroups.Memcached.html#CacheParameterGroups.Memcached.NodeSpecific) + // or Cache Node Type-Specific Parameters for Redis (http://docs.aws.amazon.com/AmazonElastiCache/latest/UserGuide/CacheParameterGroups.Redis.html#CacheParameterGroups.Redis.NodeSpecific). + CacheNodeType *string `type:"string"` + + // The duration of the reservation in seconds. + Duration *int64 `type:"integer"` + + // The fixed price charged for this reserved cache node. + FixedPrice *float64 `type:"double"` + + // The offering type of this reserved cache node. + OfferingType *string `type:"string"` + + // The description of the reserved cache node. + ProductDescription *string `type:"string"` + + // The recurring price charged to run this reserved cache node. + RecurringCharges []*RecurringCharge `locationNameList:"RecurringCharge" type:"list"` + + // The unique identifier for the reservation. + ReservedCacheNodeId *string `type:"string"` + + // The offering identifier. + ReservedCacheNodesOfferingId *string `type:"string"` + + // The time the reservation started. + StartTime *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The state of the reserved cache node. + State *string `type:"string"` + + // The hourly price charged for this reserved cache node. + UsagePrice *float64 `type:"double"` +} + +// String returns the string representation +func (s ReservedCacheNode) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReservedCacheNode) GoString() string { + return s.String() +} + +// Describes all of the attributes of a reserved cache node offering. +type ReservedCacheNodesOffering struct { + _ struct{} `type:"structure"` + + // The cache node type for the reserved cache node. + // + // Valid node types are as follows: + // + // General purpose: + // + // Current generation: cache.t2.micro, cache.t2.small, cache.t2.medium, cache.m3.medium, + // cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge + // + // Previous generation: cache.t1.micro, cache.m1.small, cache.m1.medium, + // cache.m1.large, cache.m1.xlarge + // + // Compute optimized: cache.c1.xlarge + // + // Memory optimized: + // + // Current generation: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, + // cache.r3.4xlarge, cache.r3.8xlarge + // + // Previous generation: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge + // + // Notes: + // + // All t2 instances are created in an Amazon Virtual Private Cloud (VPC). + // + // Redis backup/restore is not supported for t2 instances. + // + // Redis Append-only files (AOF) functionality is not supported for t1 or + // t2 instances. + // + // For a complete listing of cache node types and specifications, see Amazon + // ElastiCache Product Features and Details (http://aws.amazon.com/elasticache/details) + // and Cache Node Type-Specific Parameters for Memcached (http://docs.aws.amazon.com/AmazonElastiCache/latest/UserGuide/CacheParameterGroups.Memcached.html#CacheParameterGroups.Memcached.NodeSpecific) + // or Cache Node Type-Specific Parameters for Redis (http://docs.aws.amazon.com/AmazonElastiCache/latest/UserGuide/CacheParameterGroups.Redis.html#CacheParameterGroups.Redis.NodeSpecific). + CacheNodeType *string `type:"string"` + + // The duration of the offering. in seconds. + Duration *int64 `type:"integer"` + + // The fixed price charged for this offering. + FixedPrice *float64 `type:"double"` + + // The offering type. + OfferingType *string `type:"string"` + + // The cache engine used by the offering. + ProductDescription *string `type:"string"` + + // The recurring price charged to run this reserved cache node. + RecurringCharges []*RecurringCharge `locationNameList:"RecurringCharge" type:"list"` + + // A unique identifier for the reserved cache node offering. + ReservedCacheNodesOfferingId *string `type:"string"` + + // The hourly price charged for this offering. + UsagePrice *float64 `type:"double"` +} + +// String returns the string representation +func (s ReservedCacheNodesOffering) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReservedCacheNodesOffering) GoString() string { + return s.String() +} + +// Represents the input of a ResetCacheParameterGroup action. +type ResetCacheParameterGroupInput struct { + _ struct{} `type:"structure"` + + // The name of the cache parameter group to reset. + CacheParameterGroupName *string `type:"string" required:"true"` + + // An array of parameter names to reset to their default values. If ResetAllParameters + // is false, you must specify the name of at least one parameter to reset. + ParameterNameValues []*ParameterNameValue `locationNameList:"ParameterNameValue" type:"list"` + + // If true, all parameters in the cache parameter group will be reset to their + // default values. If false, only the parameters listed by ParameterNameValues + // are reset to their default values. + // + // Valid values: true | false + ResetAllParameters *bool `type:"boolean"` +} + +// String returns the string representation +func (s ResetCacheParameterGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResetCacheParameterGroupInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ResetCacheParameterGroupInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ResetCacheParameterGroupInput"} + if s.CacheParameterGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("CacheParameterGroupName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the input of a RevokeCacheSecurityGroupIngress action. +type RevokeCacheSecurityGroupIngressInput struct { + _ struct{} `type:"structure"` + + // The name of the cache security group to revoke ingress from. + CacheSecurityGroupName *string `type:"string" required:"true"` + + // The name of the Amazon EC2 security group to revoke access from. + EC2SecurityGroupName *string `type:"string" required:"true"` + + // The AWS account number of the Amazon EC2 security group owner. Note that + // this is not the same thing as an AWS access key ID - you must provide a valid + // AWS account number for this parameter. + EC2SecurityGroupOwnerId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s RevokeCacheSecurityGroupIngressInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RevokeCacheSecurityGroupIngressInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RevokeCacheSecurityGroupIngressInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RevokeCacheSecurityGroupIngressInput"} + if s.CacheSecurityGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("CacheSecurityGroupName")) + } + if s.EC2SecurityGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("EC2SecurityGroupName")) + } + if s.EC2SecurityGroupOwnerId == nil { + invalidParams.Add(request.NewErrParamRequired("EC2SecurityGroupOwnerId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type RevokeCacheSecurityGroupIngressOutput struct { + _ struct{} `type:"structure"` + + // Represents the output of one of the following actions: + // + // AuthorizeCacheSecurityGroupIngress + // + // CreateCacheSecurityGroup + // + // RevokeCacheSecurityGroupIngress + CacheSecurityGroup *CacheSecurityGroup `type:"structure"` +} + +// String returns the string representation +func (s RevokeCacheSecurityGroupIngressOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RevokeCacheSecurityGroupIngressOutput) GoString() string { + return s.String() +} + +// Represents a single cache security group and its status. +type SecurityGroupMembership struct { + _ struct{} `type:"structure"` + + // The identifier of the cache security group. + SecurityGroupId *string `type:"string"` + + // The status of the cache security group membership. The status changes whenever + // a cache security group is modified, or when the cache security groups assigned + // to a cache cluster are modified. + Status *string `type:"string"` +} + +// String returns the string representation +func (s SecurityGroupMembership) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SecurityGroupMembership) GoString() string { + return s.String() +} + +// Represents a copy of an entire cache cluster as of the time when the snapshot +// was taken. +type Snapshot struct { + _ struct{} `type:"structure"` + + // This parameter is currently disabled. + AutoMinorVersionUpgrade *bool `type:"boolean"` + + // The date and time when the source cache cluster was created. + CacheClusterCreateTime *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The user-supplied identifier of the source cache cluster. + CacheClusterId *string `type:"string"` + + // The name of the compute and memory capacity node type for the source cache + // cluster. + // + // Valid node types are as follows: + // + // General purpose: + // + // Current generation: cache.t2.micro, cache.t2.small, cache.t2.medium, cache.m3.medium, + // cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge + // + // Previous generation: cache.t1.micro, cache.m1.small, cache.m1.medium, + // cache.m1.large, cache.m1.xlarge + // + // Compute optimized: cache.c1.xlarge + // + // Memory optimized: + // + // Current generation: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, + // cache.r3.4xlarge, cache.r3.8xlarge + // + // Previous generation: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge + // + // Notes: + // + // All t2 instances are created in an Amazon Virtual Private Cloud (VPC). + // + // Redis backup/restore is not supported for t2 instances. + // + // Redis Append-only files (AOF) functionality is not supported for t1 or + // t2 instances. + // + // For a complete listing of cache node types and specifications, see Amazon + // ElastiCache Product Features and Details (http://aws.amazon.com/elasticache/details) + // and Cache Node Type-Specific Parameters for Memcached (http://docs.aws.amazon.com/AmazonElastiCache/latest/UserGuide/CacheParameterGroups.Memcached.html#CacheParameterGroups.Memcached.NodeSpecific) + // or Cache Node Type-Specific Parameters for Redis (http://docs.aws.amazon.com/AmazonElastiCache/latest/UserGuide/CacheParameterGroups.Redis.html#CacheParameterGroups.Redis.NodeSpecific). + CacheNodeType *string `type:"string"` + + // The cache parameter group that is associated with the source cache cluster. + CacheParameterGroupName *string `type:"string"` + + // The name of the cache subnet group associated with the source cache cluster. + CacheSubnetGroupName *string `type:"string"` + + // The name of the cache engine (memcached or redis) used by the source cache + // cluster. + Engine *string `type:"string"` + + // The version of the cache engine version that is used by the source cache + // cluster. + EngineVersion *string `type:"string"` + + // A list of the cache nodes in the source cache cluster. + NodeSnapshots []*NodeSnapshot `locationNameList:"NodeSnapshot" type:"list"` + + // The number of cache nodes in the source cache cluster. + // + // For clusters running Redis, this value must be 1. For clusters running Memcached, + // this value must be between 1 and 20. + NumCacheNodes *int64 `type:"integer"` + + // The port number used by each cache nodes in the source cache cluster. + Port *int64 `type:"integer"` + + // The name of the Availability Zone in which the source cache cluster is located. + PreferredAvailabilityZone *string `type:"string"` + + // Specifies the weekly time range during which maintenance on the cache cluster + // is performed. It is specified as a range in the format ddd:hh24:mi-ddd:hh24:mi + // (24H Clock UTC). The minimum maintenance window is a 60 minute period. Valid + // values for ddd are: + // + // sun + // + // mon + // + // tue + // + // wed + // + // thu + // + // fri + // + // sat + // + // Example: sun:05:00-sun:09:00 + PreferredMaintenanceWindow *string `type:"string"` + + // The name of a snapshot. For an automatic snapshot, the name is system-generated; + // for a manual snapshot, this is the user-provided name. + SnapshotName *string `type:"string"` + + // For an automatic snapshot, the number of days for which ElastiCache will + // retain the snapshot before deleting it. + // + // For manual snapshots, this field reflects the SnapshotRetentionLimit for + // the source cache cluster when the snapshot was created. This field is otherwise + // ignored: Manual snapshots do not expire, and can only be deleted using the + // DeleteSnapshot action. + // + // Important If the value of SnapshotRetentionLimit is set to zero (0), backups + // are turned off. + SnapshotRetentionLimit *int64 `type:"integer"` + + // Indicates whether the snapshot is from an automatic backup (automated) or + // was created manually (manual). + SnapshotSource *string `type:"string"` + + // The status of the snapshot. Valid values: creating | available | restoring + // | copying | deleting. + SnapshotStatus *string `type:"string"` + + // The daily time range during which ElastiCache takes daily snapshots of the + // source cache cluster. + SnapshotWindow *string `type:"string"` + + // The Amazon Resource Name (ARN) for the topic used by the source cache cluster + // for publishing notifications. + TopicArn *string `type:"string"` + + // The Amazon Virtual Private Cloud identifier (VPC ID) of the cache subnet + // group for the source cache cluster. + VpcId *string `type:"string"` +} + +// String returns the string representation +func (s Snapshot) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Snapshot) GoString() string { + return s.String() +} + +// Represents the subnet associated with a cache cluster. This parameter refers +// to subnets defined in Amazon Virtual Private Cloud (Amazon VPC) and used +// with ElastiCache. +type Subnet struct { + _ struct{} `type:"structure"` + + // The Availability Zone associated with the subnet. + SubnetAvailabilityZone *AvailabilityZone `type:"structure"` + + // The unique identifier for the subnet. + SubnetIdentifier *string `type:"string"` +} + +// String returns the string representation +func (s Subnet) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Subnet) GoString() string { + return s.String() +} + +// A cost allocation Tag that can be added to an ElastiCache cluster or replication +// group. Tags are composed of a Key/Value pair. A tag with a null Value is +// permitted. +type Tag struct { + _ struct{} `type:"structure"` + + // The key for the tag. + Key *string `type:"string"` + + // The tag's value. May not be null. + Value *string `type:"string"` +} + +// String returns the string representation +func (s Tag) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Tag) GoString() string { + return s.String() +} + +// Represents the output from the AddTagsToResource, ListTagsOnResource, and +// RemoveTagsFromResource actions. +type TagListMessage struct { + _ struct{} `type:"structure"` + + // A list of cost allocation tags as key-value pairs. + TagList []*Tag `locationNameList:"Tag" type:"list"` +} + +// String returns the string representation +func (s TagListMessage) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TagListMessage) GoString() string { + return s.String() +} + +const ( + // @enum AZMode + AZModeSingleAz = "single-az" + // @enum AZMode + AZModeCrossAz = "cross-az" +) + +const ( + // @enum AutomaticFailoverStatus + AutomaticFailoverStatusEnabled = "enabled" + // @enum AutomaticFailoverStatus + AutomaticFailoverStatusDisabled = "disabled" + // @enum AutomaticFailoverStatus + AutomaticFailoverStatusEnabling = "enabling" + // @enum AutomaticFailoverStatus + AutomaticFailoverStatusDisabling = "disabling" +) + +const ( + // @enum ChangeType + ChangeTypeImmediate = "immediate" + // @enum ChangeType + ChangeTypeRequiresReboot = "requires-reboot" +) + +const ( + // @enum PendingAutomaticFailoverStatus + PendingAutomaticFailoverStatusEnabled = "enabled" + // @enum PendingAutomaticFailoverStatus + PendingAutomaticFailoverStatusDisabled = "disabled" +) + +const ( + // @enum SourceType + SourceTypeCacheCluster = "cache-cluster" + // @enum SourceType + SourceTypeCacheParameterGroup = "cache-parameter-group" + // @enum SourceType + SourceTypeCacheSecurityGroup = "cache-security-group" + // @enum SourceType + SourceTypeCacheSubnetGroup = "cache-subnet-group" +) diff --git a/vendor/github.com/aws/aws-sdk-go/service/elasticache/elasticacheiface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/elasticache/elasticacheiface/interface.go new file mode 100644 index 000000000..67b4a93fe --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/elasticache/elasticacheiface/interface.go @@ -0,0 +1,190 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package elasticacheiface provides an interface for the Amazon ElastiCache. +package elasticacheiface + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/elasticache" +) + +// ElastiCacheAPI is the interface type for elasticache.ElastiCache. +type ElastiCacheAPI interface { + AddTagsToResourceRequest(*elasticache.AddTagsToResourceInput) (*request.Request, *elasticache.TagListMessage) + + AddTagsToResource(*elasticache.AddTagsToResourceInput) (*elasticache.TagListMessage, error) + + AuthorizeCacheSecurityGroupIngressRequest(*elasticache.AuthorizeCacheSecurityGroupIngressInput) (*request.Request, *elasticache.AuthorizeCacheSecurityGroupIngressOutput) + + AuthorizeCacheSecurityGroupIngress(*elasticache.AuthorizeCacheSecurityGroupIngressInput) (*elasticache.AuthorizeCacheSecurityGroupIngressOutput, error) + + CopySnapshotRequest(*elasticache.CopySnapshotInput) (*request.Request, *elasticache.CopySnapshotOutput) + + CopySnapshot(*elasticache.CopySnapshotInput) (*elasticache.CopySnapshotOutput, error) + + CreateCacheClusterRequest(*elasticache.CreateCacheClusterInput) (*request.Request, *elasticache.CreateCacheClusterOutput) + + CreateCacheCluster(*elasticache.CreateCacheClusterInput) (*elasticache.CreateCacheClusterOutput, error) + + CreateCacheParameterGroupRequest(*elasticache.CreateCacheParameterGroupInput) (*request.Request, *elasticache.CreateCacheParameterGroupOutput) + + CreateCacheParameterGroup(*elasticache.CreateCacheParameterGroupInput) (*elasticache.CreateCacheParameterGroupOutput, error) + + CreateCacheSecurityGroupRequest(*elasticache.CreateCacheSecurityGroupInput) (*request.Request, *elasticache.CreateCacheSecurityGroupOutput) + + CreateCacheSecurityGroup(*elasticache.CreateCacheSecurityGroupInput) (*elasticache.CreateCacheSecurityGroupOutput, error) + + CreateCacheSubnetGroupRequest(*elasticache.CreateCacheSubnetGroupInput) (*request.Request, *elasticache.CreateCacheSubnetGroupOutput) + + CreateCacheSubnetGroup(*elasticache.CreateCacheSubnetGroupInput) (*elasticache.CreateCacheSubnetGroupOutput, error) + + CreateReplicationGroupRequest(*elasticache.CreateReplicationGroupInput) (*request.Request, *elasticache.CreateReplicationGroupOutput) + + CreateReplicationGroup(*elasticache.CreateReplicationGroupInput) (*elasticache.CreateReplicationGroupOutput, error) + + CreateSnapshotRequest(*elasticache.CreateSnapshotInput) (*request.Request, *elasticache.CreateSnapshotOutput) + + CreateSnapshot(*elasticache.CreateSnapshotInput) (*elasticache.CreateSnapshotOutput, error) + + DeleteCacheClusterRequest(*elasticache.DeleteCacheClusterInput) (*request.Request, *elasticache.DeleteCacheClusterOutput) + + DeleteCacheCluster(*elasticache.DeleteCacheClusterInput) (*elasticache.DeleteCacheClusterOutput, error) + + DeleteCacheParameterGroupRequest(*elasticache.DeleteCacheParameterGroupInput) (*request.Request, *elasticache.DeleteCacheParameterGroupOutput) + + DeleteCacheParameterGroup(*elasticache.DeleteCacheParameterGroupInput) (*elasticache.DeleteCacheParameterGroupOutput, error) + + DeleteCacheSecurityGroupRequest(*elasticache.DeleteCacheSecurityGroupInput) (*request.Request, *elasticache.DeleteCacheSecurityGroupOutput) + + DeleteCacheSecurityGroup(*elasticache.DeleteCacheSecurityGroupInput) (*elasticache.DeleteCacheSecurityGroupOutput, error) + + DeleteCacheSubnetGroupRequest(*elasticache.DeleteCacheSubnetGroupInput) (*request.Request, *elasticache.DeleteCacheSubnetGroupOutput) + + DeleteCacheSubnetGroup(*elasticache.DeleteCacheSubnetGroupInput) (*elasticache.DeleteCacheSubnetGroupOutput, error) + + DeleteReplicationGroupRequest(*elasticache.DeleteReplicationGroupInput) (*request.Request, *elasticache.DeleteReplicationGroupOutput) + + DeleteReplicationGroup(*elasticache.DeleteReplicationGroupInput) (*elasticache.DeleteReplicationGroupOutput, error) + + DeleteSnapshotRequest(*elasticache.DeleteSnapshotInput) (*request.Request, *elasticache.DeleteSnapshotOutput) + + DeleteSnapshot(*elasticache.DeleteSnapshotInput) (*elasticache.DeleteSnapshotOutput, error) + + DescribeCacheClustersRequest(*elasticache.DescribeCacheClustersInput) (*request.Request, *elasticache.DescribeCacheClustersOutput) + + DescribeCacheClusters(*elasticache.DescribeCacheClustersInput) (*elasticache.DescribeCacheClustersOutput, error) + + DescribeCacheClustersPages(*elasticache.DescribeCacheClustersInput, func(*elasticache.DescribeCacheClustersOutput, bool) bool) error + + DescribeCacheEngineVersionsRequest(*elasticache.DescribeCacheEngineVersionsInput) (*request.Request, *elasticache.DescribeCacheEngineVersionsOutput) + + DescribeCacheEngineVersions(*elasticache.DescribeCacheEngineVersionsInput) (*elasticache.DescribeCacheEngineVersionsOutput, error) + + DescribeCacheEngineVersionsPages(*elasticache.DescribeCacheEngineVersionsInput, func(*elasticache.DescribeCacheEngineVersionsOutput, bool) bool) error + + DescribeCacheParameterGroupsRequest(*elasticache.DescribeCacheParameterGroupsInput) (*request.Request, *elasticache.DescribeCacheParameterGroupsOutput) + + DescribeCacheParameterGroups(*elasticache.DescribeCacheParameterGroupsInput) (*elasticache.DescribeCacheParameterGroupsOutput, error) + + DescribeCacheParameterGroupsPages(*elasticache.DescribeCacheParameterGroupsInput, func(*elasticache.DescribeCacheParameterGroupsOutput, bool) bool) error + + DescribeCacheParametersRequest(*elasticache.DescribeCacheParametersInput) (*request.Request, *elasticache.DescribeCacheParametersOutput) + + DescribeCacheParameters(*elasticache.DescribeCacheParametersInput) (*elasticache.DescribeCacheParametersOutput, error) + + DescribeCacheParametersPages(*elasticache.DescribeCacheParametersInput, func(*elasticache.DescribeCacheParametersOutput, bool) bool) error + + DescribeCacheSecurityGroupsRequest(*elasticache.DescribeCacheSecurityGroupsInput) (*request.Request, *elasticache.DescribeCacheSecurityGroupsOutput) + + DescribeCacheSecurityGroups(*elasticache.DescribeCacheSecurityGroupsInput) (*elasticache.DescribeCacheSecurityGroupsOutput, error) + + DescribeCacheSecurityGroupsPages(*elasticache.DescribeCacheSecurityGroupsInput, func(*elasticache.DescribeCacheSecurityGroupsOutput, bool) bool) error + + DescribeCacheSubnetGroupsRequest(*elasticache.DescribeCacheSubnetGroupsInput) (*request.Request, *elasticache.DescribeCacheSubnetGroupsOutput) + + DescribeCacheSubnetGroups(*elasticache.DescribeCacheSubnetGroupsInput) (*elasticache.DescribeCacheSubnetGroupsOutput, error) + + DescribeCacheSubnetGroupsPages(*elasticache.DescribeCacheSubnetGroupsInput, func(*elasticache.DescribeCacheSubnetGroupsOutput, bool) bool) error + + DescribeEngineDefaultParametersRequest(*elasticache.DescribeEngineDefaultParametersInput) (*request.Request, *elasticache.DescribeEngineDefaultParametersOutput) + + DescribeEngineDefaultParameters(*elasticache.DescribeEngineDefaultParametersInput) (*elasticache.DescribeEngineDefaultParametersOutput, error) + + DescribeEngineDefaultParametersPages(*elasticache.DescribeEngineDefaultParametersInput, func(*elasticache.DescribeEngineDefaultParametersOutput, bool) bool) error + + DescribeEventsRequest(*elasticache.DescribeEventsInput) (*request.Request, *elasticache.DescribeEventsOutput) + + DescribeEvents(*elasticache.DescribeEventsInput) (*elasticache.DescribeEventsOutput, error) + + DescribeEventsPages(*elasticache.DescribeEventsInput, func(*elasticache.DescribeEventsOutput, bool) bool) error + + DescribeReplicationGroupsRequest(*elasticache.DescribeReplicationGroupsInput) (*request.Request, *elasticache.DescribeReplicationGroupsOutput) + + DescribeReplicationGroups(*elasticache.DescribeReplicationGroupsInput) (*elasticache.DescribeReplicationGroupsOutput, error) + + DescribeReplicationGroupsPages(*elasticache.DescribeReplicationGroupsInput, func(*elasticache.DescribeReplicationGroupsOutput, bool) bool) error + + DescribeReservedCacheNodesRequest(*elasticache.DescribeReservedCacheNodesInput) (*request.Request, *elasticache.DescribeReservedCacheNodesOutput) + + DescribeReservedCacheNodes(*elasticache.DescribeReservedCacheNodesInput) (*elasticache.DescribeReservedCacheNodesOutput, error) + + DescribeReservedCacheNodesPages(*elasticache.DescribeReservedCacheNodesInput, func(*elasticache.DescribeReservedCacheNodesOutput, bool) bool) error + + DescribeReservedCacheNodesOfferingsRequest(*elasticache.DescribeReservedCacheNodesOfferingsInput) (*request.Request, *elasticache.DescribeReservedCacheNodesOfferingsOutput) + + DescribeReservedCacheNodesOfferings(*elasticache.DescribeReservedCacheNodesOfferingsInput) (*elasticache.DescribeReservedCacheNodesOfferingsOutput, error) + + DescribeReservedCacheNodesOfferingsPages(*elasticache.DescribeReservedCacheNodesOfferingsInput, func(*elasticache.DescribeReservedCacheNodesOfferingsOutput, bool) bool) error + + DescribeSnapshotsRequest(*elasticache.DescribeSnapshotsInput) (*request.Request, *elasticache.DescribeSnapshotsOutput) + + DescribeSnapshots(*elasticache.DescribeSnapshotsInput) (*elasticache.DescribeSnapshotsOutput, error) + + DescribeSnapshotsPages(*elasticache.DescribeSnapshotsInput, func(*elasticache.DescribeSnapshotsOutput, bool) bool) error + + ListAllowedNodeTypeModificationsRequest(*elasticache.ListAllowedNodeTypeModificationsInput) (*request.Request, *elasticache.ListAllowedNodeTypeModificationsOutput) + + ListAllowedNodeTypeModifications(*elasticache.ListAllowedNodeTypeModificationsInput) (*elasticache.ListAllowedNodeTypeModificationsOutput, error) + + ListTagsForResourceRequest(*elasticache.ListTagsForResourceInput) (*request.Request, *elasticache.TagListMessage) + + ListTagsForResource(*elasticache.ListTagsForResourceInput) (*elasticache.TagListMessage, error) + + ModifyCacheClusterRequest(*elasticache.ModifyCacheClusterInput) (*request.Request, *elasticache.ModifyCacheClusterOutput) + + ModifyCacheCluster(*elasticache.ModifyCacheClusterInput) (*elasticache.ModifyCacheClusterOutput, error) + + ModifyCacheParameterGroupRequest(*elasticache.ModifyCacheParameterGroupInput) (*request.Request, *elasticache.CacheParameterGroupNameMessage) + + ModifyCacheParameterGroup(*elasticache.ModifyCacheParameterGroupInput) (*elasticache.CacheParameterGroupNameMessage, error) + + ModifyCacheSubnetGroupRequest(*elasticache.ModifyCacheSubnetGroupInput) (*request.Request, *elasticache.ModifyCacheSubnetGroupOutput) + + ModifyCacheSubnetGroup(*elasticache.ModifyCacheSubnetGroupInput) (*elasticache.ModifyCacheSubnetGroupOutput, error) + + ModifyReplicationGroupRequest(*elasticache.ModifyReplicationGroupInput) (*request.Request, *elasticache.ModifyReplicationGroupOutput) + + ModifyReplicationGroup(*elasticache.ModifyReplicationGroupInput) (*elasticache.ModifyReplicationGroupOutput, error) + + PurchaseReservedCacheNodesOfferingRequest(*elasticache.PurchaseReservedCacheNodesOfferingInput) (*request.Request, *elasticache.PurchaseReservedCacheNodesOfferingOutput) + + PurchaseReservedCacheNodesOffering(*elasticache.PurchaseReservedCacheNodesOfferingInput) (*elasticache.PurchaseReservedCacheNodesOfferingOutput, error) + + RebootCacheClusterRequest(*elasticache.RebootCacheClusterInput) (*request.Request, *elasticache.RebootCacheClusterOutput) + + RebootCacheCluster(*elasticache.RebootCacheClusterInput) (*elasticache.RebootCacheClusterOutput, error) + + RemoveTagsFromResourceRequest(*elasticache.RemoveTagsFromResourceInput) (*request.Request, *elasticache.TagListMessage) + + RemoveTagsFromResource(*elasticache.RemoveTagsFromResourceInput) (*elasticache.TagListMessage, error) + + ResetCacheParameterGroupRequest(*elasticache.ResetCacheParameterGroupInput) (*request.Request, *elasticache.CacheParameterGroupNameMessage) + + ResetCacheParameterGroup(*elasticache.ResetCacheParameterGroupInput) (*elasticache.CacheParameterGroupNameMessage, error) + + RevokeCacheSecurityGroupIngressRequest(*elasticache.RevokeCacheSecurityGroupIngressInput) (*request.Request, *elasticache.RevokeCacheSecurityGroupIngressOutput) + + RevokeCacheSecurityGroupIngress(*elasticache.RevokeCacheSecurityGroupIngressInput) (*elasticache.RevokeCacheSecurityGroupIngressOutput, error) +} + +var _ ElastiCacheAPI = (*elasticache.ElastiCache)(nil) diff --git a/vendor/github.com/aws/aws-sdk-go/service/elasticache/examples_test.go b/vendor/github.com/aws/aws-sdk-go/service/elasticache/examples_test.go new file mode 100644 index 000000000..3f5bd7a44 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/elasticache/examples_test.go @@ -0,0 +1,966 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package elasticache_test + +import ( + "bytes" + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/elasticache" +) + +var _ time.Duration +var _ bytes.Buffer + +func ExampleElastiCache_AddTagsToResource() { + svc := elasticache.New(session.New()) + + params := &elasticache.AddTagsToResourceInput{ + ResourceName: aws.String("String"), // Required + Tags: []*elasticache.Tag{ // Required + { // Required + Key: aws.String("String"), + Value: aws.String("String"), + }, + // More values... + }, + } + resp, err := svc.AddTagsToResource(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElastiCache_AuthorizeCacheSecurityGroupIngress() { + svc := elasticache.New(session.New()) + + params := &elasticache.AuthorizeCacheSecurityGroupIngressInput{ + CacheSecurityGroupName: aws.String("String"), // Required + EC2SecurityGroupName: aws.String("String"), // Required + EC2SecurityGroupOwnerId: aws.String("String"), // Required + } + resp, err := svc.AuthorizeCacheSecurityGroupIngress(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElastiCache_CopySnapshot() { + svc := elasticache.New(session.New()) + + params := &elasticache.CopySnapshotInput{ + SourceSnapshotName: aws.String("String"), // Required + TargetSnapshotName: aws.String("String"), // Required + TargetBucket: aws.String("String"), + } + resp, err := svc.CopySnapshot(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElastiCache_CreateCacheCluster() { + svc := elasticache.New(session.New()) + + params := &elasticache.CreateCacheClusterInput{ + CacheClusterId: aws.String("String"), // Required + AZMode: aws.String("AZMode"), + AutoMinorVersionUpgrade: aws.Bool(true), + CacheNodeType: aws.String("String"), + CacheParameterGroupName: aws.String("String"), + CacheSecurityGroupNames: []*string{ + aws.String("String"), // Required + // More values... + }, + CacheSubnetGroupName: aws.String("String"), + Engine: aws.String("String"), + EngineVersion: aws.String("String"), + NotificationTopicArn: aws.String("String"), + NumCacheNodes: aws.Int64(1), + Port: aws.Int64(1), + PreferredAvailabilityZone: aws.String("String"), + PreferredAvailabilityZones: []*string{ + aws.String("String"), // Required + // More values... + }, + PreferredMaintenanceWindow: aws.String("String"), + ReplicationGroupId: aws.String("String"), + SecurityGroupIds: []*string{ + aws.String("String"), // Required + // More values... + }, + SnapshotArns: []*string{ + aws.String("String"), // Required + // More values... + }, + SnapshotName: aws.String("String"), + SnapshotRetentionLimit: aws.Int64(1), + SnapshotWindow: aws.String("String"), + Tags: []*elasticache.Tag{ + { // Required + Key: aws.String("String"), + Value: aws.String("String"), + }, + // More values... + }, + } + resp, err := svc.CreateCacheCluster(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElastiCache_CreateCacheParameterGroup() { + svc := elasticache.New(session.New()) + + params := &elasticache.CreateCacheParameterGroupInput{ + CacheParameterGroupFamily: aws.String("String"), // Required + CacheParameterGroupName: aws.String("String"), // Required + Description: aws.String("String"), // Required + } + resp, err := svc.CreateCacheParameterGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElastiCache_CreateCacheSecurityGroup() { + svc := elasticache.New(session.New()) + + params := &elasticache.CreateCacheSecurityGroupInput{ + CacheSecurityGroupName: aws.String("String"), // Required + Description: aws.String("String"), // Required + } + resp, err := svc.CreateCacheSecurityGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElastiCache_CreateCacheSubnetGroup() { + svc := elasticache.New(session.New()) + + params := &elasticache.CreateCacheSubnetGroupInput{ + CacheSubnetGroupDescription: aws.String("String"), // Required + CacheSubnetGroupName: aws.String("String"), // Required + SubnetIds: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.CreateCacheSubnetGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElastiCache_CreateReplicationGroup() { + svc := elasticache.New(session.New()) + + params := &elasticache.CreateReplicationGroupInput{ + ReplicationGroupDescription: aws.String("String"), // Required + ReplicationGroupId: aws.String("String"), // Required + AutoMinorVersionUpgrade: aws.Bool(true), + AutomaticFailoverEnabled: aws.Bool(true), + CacheNodeType: aws.String("String"), + CacheParameterGroupName: aws.String("String"), + CacheSecurityGroupNames: []*string{ + aws.String("String"), // Required + // More values... + }, + CacheSubnetGroupName: aws.String("String"), + Engine: aws.String("String"), + EngineVersion: aws.String("String"), + NotificationTopicArn: aws.String("String"), + NumCacheClusters: aws.Int64(1), + Port: aws.Int64(1), + PreferredCacheClusterAZs: []*string{ + aws.String("String"), // Required + // More values... + }, + PreferredMaintenanceWindow: aws.String("String"), + PrimaryClusterId: aws.String("String"), + SecurityGroupIds: []*string{ + aws.String("String"), // Required + // More values... + }, + SnapshotArns: []*string{ + aws.String("String"), // Required + // More values... + }, + SnapshotName: aws.String("String"), + SnapshotRetentionLimit: aws.Int64(1), + SnapshotWindow: aws.String("String"), + Tags: []*elasticache.Tag{ + { // Required + Key: aws.String("String"), + Value: aws.String("String"), + }, + // More values... + }, + } + resp, err := svc.CreateReplicationGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElastiCache_CreateSnapshot() { + svc := elasticache.New(session.New()) + + params := &elasticache.CreateSnapshotInput{ + CacheClusterId: aws.String("String"), // Required + SnapshotName: aws.String("String"), // Required + } + resp, err := svc.CreateSnapshot(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElastiCache_DeleteCacheCluster() { + svc := elasticache.New(session.New()) + + params := &elasticache.DeleteCacheClusterInput{ + CacheClusterId: aws.String("String"), // Required + FinalSnapshotIdentifier: aws.String("String"), + } + resp, err := svc.DeleteCacheCluster(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElastiCache_DeleteCacheParameterGroup() { + svc := elasticache.New(session.New()) + + params := &elasticache.DeleteCacheParameterGroupInput{ + CacheParameterGroupName: aws.String("String"), // Required + } + resp, err := svc.DeleteCacheParameterGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElastiCache_DeleteCacheSecurityGroup() { + svc := elasticache.New(session.New()) + + params := &elasticache.DeleteCacheSecurityGroupInput{ + CacheSecurityGroupName: aws.String("String"), // Required + } + resp, err := svc.DeleteCacheSecurityGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElastiCache_DeleteCacheSubnetGroup() { + svc := elasticache.New(session.New()) + + params := &elasticache.DeleteCacheSubnetGroupInput{ + CacheSubnetGroupName: aws.String("String"), // Required + } + resp, err := svc.DeleteCacheSubnetGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElastiCache_DeleteReplicationGroup() { + svc := elasticache.New(session.New()) + + params := &elasticache.DeleteReplicationGroupInput{ + ReplicationGroupId: aws.String("String"), // Required + FinalSnapshotIdentifier: aws.String("String"), + RetainPrimaryCluster: aws.Bool(true), + } + resp, err := svc.DeleteReplicationGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElastiCache_DeleteSnapshot() { + svc := elasticache.New(session.New()) + + params := &elasticache.DeleteSnapshotInput{ + SnapshotName: aws.String("String"), // Required + } + resp, err := svc.DeleteSnapshot(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElastiCache_DescribeCacheClusters() { + svc := elasticache.New(session.New()) + + params := &elasticache.DescribeCacheClustersInput{ + CacheClusterId: aws.String("String"), + Marker: aws.String("String"), + MaxRecords: aws.Int64(1), + ShowCacheNodeInfo: aws.Bool(true), + } + resp, err := svc.DescribeCacheClusters(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElastiCache_DescribeCacheEngineVersions() { + svc := elasticache.New(session.New()) + + params := &elasticache.DescribeCacheEngineVersionsInput{ + CacheParameterGroupFamily: aws.String("String"), + DefaultOnly: aws.Bool(true), + Engine: aws.String("String"), + EngineVersion: aws.String("String"), + Marker: aws.String("String"), + MaxRecords: aws.Int64(1), + } + resp, err := svc.DescribeCacheEngineVersions(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElastiCache_DescribeCacheParameterGroups() { + svc := elasticache.New(session.New()) + + params := &elasticache.DescribeCacheParameterGroupsInput{ + CacheParameterGroupName: aws.String("String"), + Marker: aws.String("String"), + MaxRecords: aws.Int64(1), + } + resp, err := svc.DescribeCacheParameterGroups(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElastiCache_DescribeCacheParameters() { + svc := elasticache.New(session.New()) + + params := &elasticache.DescribeCacheParametersInput{ + CacheParameterGroupName: aws.String("String"), // Required + Marker: aws.String("String"), + MaxRecords: aws.Int64(1), + Source: aws.String("String"), + } + resp, err := svc.DescribeCacheParameters(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElastiCache_DescribeCacheSecurityGroups() { + svc := elasticache.New(session.New()) + + params := &elasticache.DescribeCacheSecurityGroupsInput{ + CacheSecurityGroupName: aws.String("String"), + Marker: aws.String("String"), + MaxRecords: aws.Int64(1), + } + resp, err := svc.DescribeCacheSecurityGroups(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElastiCache_DescribeCacheSubnetGroups() { + svc := elasticache.New(session.New()) + + params := &elasticache.DescribeCacheSubnetGroupsInput{ + CacheSubnetGroupName: aws.String("String"), + Marker: aws.String("String"), + MaxRecords: aws.Int64(1), + } + resp, err := svc.DescribeCacheSubnetGroups(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElastiCache_DescribeEngineDefaultParameters() { + svc := elasticache.New(session.New()) + + params := &elasticache.DescribeEngineDefaultParametersInput{ + CacheParameterGroupFamily: aws.String("String"), // Required + Marker: aws.String("String"), + MaxRecords: aws.Int64(1), + } + resp, err := svc.DescribeEngineDefaultParameters(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElastiCache_DescribeEvents() { + svc := elasticache.New(session.New()) + + params := &elasticache.DescribeEventsInput{ + Duration: aws.Int64(1), + EndTime: aws.Time(time.Now()), + Marker: aws.String("String"), + MaxRecords: aws.Int64(1), + SourceIdentifier: aws.String("String"), + SourceType: aws.String("SourceType"), + StartTime: aws.Time(time.Now()), + } + resp, err := svc.DescribeEvents(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElastiCache_DescribeReplicationGroups() { + svc := elasticache.New(session.New()) + + params := &elasticache.DescribeReplicationGroupsInput{ + Marker: aws.String("String"), + MaxRecords: aws.Int64(1), + ReplicationGroupId: aws.String("String"), + } + resp, err := svc.DescribeReplicationGroups(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElastiCache_DescribeReservedCacheNodes() { + svc := elasticache.New(session.New()) + + params := &elasticache.DescribeReservedCacheNodesInput{ + CacheNodeType: aws.String("String"), + Duration: aws.String("String"), + Marker: aws.String("String"), + MaxRecords: aws.Int64(1), + OfferingType: aws.String("String"), + ProductDescription: aws.String("String"), + ReservedCacheNodeId: aws.String("String"), + ReservedCacheNodesOfferingId: aws.String("String"), + } + resp, err := svc.DescribeReservedCacheNodes(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElastiCache_DescribeReservedCacheNodesOfferings() { + svc := elasticache.New(session.New()) + + params := &elasticache.DescribeReservedCacheNodesOfferingsInput{ + CacheNodeType: aws.String("String"), + Duration: aws.String("String"), + Marker: aws.String("String"), + MaxRecords: aws.Int64(1), + OfferingType: aws.String("String"), + ProductDescription: aws.String("String"), + ReservedCacheNodesOfferingId: aws.String("String"), + } + resp, err := svc.DescribeReservedCacheNodesOfferings(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElastiCache_DescribeSnapshots() { + svc := elasticache.New(session.New()) + + params := &elasticache.DescribeSnapshotsInput{ + CacheClusterId: aws.String("String"), + Marker: aws.String("String"), + MaxRecords: aws.Int64(1), + SnapshotName: aws.String("String"), + SnapshotSource: aws.String("String"), + } + resp, err := svc.DescribeSnapshots(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElastiCache_ListAllowedNodeTypeModifications() { + svc := elasticache.New(session.New()) + + params := &elasticache.ListAllowedNodeTypeModificationsInput{ + CacheClusterId: aws.String("String"), + ReplicationGroupId: aws.String("String"), + } + resp, err := svc.ListAllowedNodeTypeModifications(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElastiCache_ListTagsForResource() { + svc := elasticache.New(session.New()) + + params := &elasticache.ListTagsForResourceInput{ + ResourceName: aws.String("String"), // Required + } + resp, err := svc.ListTagsForResource(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElastiCache_ModifyCacheCluster() { + svc := elasticache.New(session.New()) + + params := &elasticache.ModifyCacheClusterInput{ + CacheClusterId: aws.String("String"), // Required + AZMode: aws.String("AZMode"), + ApplyImmediately: aws.Bool(true), + AutoMinorVersionUpgrade: aws.Bool(true), + CacheNodeIdsToRemove: []*string{ + aws.String("String"), // Required + // More values... + }, + CacheNodeType: aws.String("String"), + CacheParameterGroupName: aws.String("String"), + CacheSecurityGroupNames: []*string{ + aws.String("String"), // Required + // More values... + }, + EngineVersion: aws.String("String"), + NewAvailabilityZones: []*string{ + aws.String("String"), // Required + // More values... + }, + NotificationTopicArn: aws.String("String"), + NotificationTopicStatus: aws.String("String"), + NumCacheNodes: aws.Int64(1), + PreferredMaintenanceWindow: aws.String("String"), + SecurityGroupIds: []*string{ + aws.String("String"), // Required + // More values... + }, + SnapshotRetentionLimit: aws.Int64(1), + SnapshotWindow: aws.String("String"), + } + resp, err := svc.ModifyCacheCluster(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElastiCache_ModifyCacheParameterGroup() { + svc := elasticache.New(session.New()) + + params := &elasticache.ModifyCacheParameterGroupInput{ + CacheParameterGroupName: aws.String("String"), // Required + ParameterNameValues: []*elasticache.ParameterNameValue{ // Required + { // Required + ParameterName: aws.String("String"), + ParameterValue: aws.String("String"), + }, + // More values... + }, + } + resp, err := svc.ModifyCacheParameterGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElastiCache_ModifyCacheSubnetGroup() { + svc := elasticache.New(session.New()) + + params := &elasticache.ModifyCacheSubnetGroupInput{ + CacheSubnetGroupName: aws.String("String"), // Required + CacheSubnetGroupDescription: aws.String("String"), + SubnetIds: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.ModifyCacheSubnetGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElastiCache_ModifyReplicationGroup() { + svc := elasticache.New(session.New()) + + params := &elasticache.ModifyReplicationGroupInput{ + ReplicationGroupId: aws.String("String"), // Required + ApplyImmediately: aws.Bool(true), + AutoMinorVersionUpgrade: aws.Bool(true), + AutomaticFailoverEnabled: aws.Bool(true), + CacheNodeType: aws.String("String"), + CacheParameterGroupName: aws.String("String"), + CacheSecurityGroupNames: []*string{ + aws.String("String"), // Required + // More values... + }, + EngineVersion: aws.String("String"), + NotificationTopicArn: aws.String("String"), + NotificationTopicStatus: aws.String("String"), + PreferredMaintenanceWindow: aws.String("String"), + PrimaryClusterId: aws.String("String"), + ReplicationGroupDescription: aws.String("String"), + SecurityGroupIds: []*string{ + aws.String("String"), // Required + // More values... + }, + SnapshotRetentionLimit: aws.Int64(1), + SnapshotWindow: aws.String("String"), + SnapshottingClusterId: aws.String("String"), + } + resp, err := svc.ModifyReplicationGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElastiCache_PurchaseReservedCacheNodesOffering() { + svc := elasticache.New(session.New()) + + params := &elasticache.PurchaseReservedCacheNodesOfferingInput{ + ReservedCacheNodesOfferingId: aws.String("String"), // Required + CacheNodeCount: aws.Int64(1), + ReservedCacheNodeId: aws.String("String"), + } + resp, err := svc.PurchaseReservedCacheNodesOffering(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElastiCache_RebootCacheCluster() { + svc := elasticache.New(session.New()) + + params := &elasticache.RebootCacheClusterInput{ + CacheClusterId: aws.String("String"), // Required + CacheNodeIdsToReboot: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.RebootCacheCluster(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElastiCache_RemoveTagsFromResource() { + svc := elasticache.New(session.New()) + + params := &elasticache.RemoveTagsFromResourceInput{ + ResourceName: aws.String("String"), // Required + TagKeys: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.RemoveTagsFromResource(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElastiCache_ResetCacheParameterGroup() { + svc := elasticache.New(session.New()) + + params := &elasticache.ResetCacheParameterGroupInput{ + CacheParameterGroupName: aws.String("String"), // Required + ParameterNameValues: []*elasticache.ParameterNameValue{ + { // Required + ParameterName: aws.String("String"), + ParameterValue: aws.String("String"), + }, + // More values... + }, + ResetAllParameters: aws.Bool(true), + } + resp, err := svc.ResetCacheParameterGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElastiCache_RevokeCacheSecurityGroupIngress() { + svc := elasticache.New(session.New()) + + params := &elasticache.RevokeCacheSecurityGroupIngressInput{ + CacheSecurityGroupName: aws.String("String"), // Required + EC2SecurityGroupName: aws.String("String"), // Required + EC2SecurityGroupOwnerId: aws.String("String"), // Required + } + resp, err := svc.RevokeCacheSecurityGroupIngress(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/elasticache/service.go b/vendor/github.com/aws/aws-sdk-go/service/elasticache/service.go new file mode 100644 index 000000000..c13234d73 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/elasticache/service.go @@ -0,0 +1,96 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package elasticache + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/private/protocol/query" +) + +// Amazon ElastiCache is a web service that makes it easier to set up, operate, +// and scale a distributed cache in the cloud. +// +// With ElastiCache, customers gain all of the benefits of a high-performance, +// in-memory cache with far less of the administrative burden of launching and +// managing a distributed cache. The service makes setup, scaling, and cluster +// failure handling much simpler than in a self-managed cache deployment. +// +// In addition, through integration with Amazon CloudWatch, customers get enhanced +// visibility into the key performance statistics associated with their cache +// and can receive alarms if a part of their cache runs hot. +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type ElastiCache struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "elasticache" + +// New creates a new instance of the ElastiCache client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a ElastiCache client from just a session. +// svc := elasticache.New(mySession) +// +// // Create a ElastiCache client with additional configuration +// svc := elasticache.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *ElastiCache { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *ElastiCache { + svc := &ElastiCache{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2015-02-02", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(query.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(query.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(query.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(query.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a ElastiCache operation and runs any +// custom request initialization. +func (c *ElastiCache) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/elasticache/waiters.go b/vendor/github.com/aws/aws-sdk-go/service/elasticache/waiters.go new file mode 100644 index 000000000..0f594a65f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/elasticache/waiters.go @@ -0,0 +1,183 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package elasticache + +import ( + "github.com/aws/aws-sdk-go/private/waiter" +) + +func (c *ElastiCache) WaitUntilCacheClusterAvailable(input *DescribeCacheClustersInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeCacheClusters", + Delay: 15, + MaxAttempts: 40, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "pathAll", + Argument: "CacheClusters[].CacheClusterStatus", + Expected: "available", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "CacheClusters[].CacheClusterStatus", + Expected: "deleted", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "CacheClusters[].CacheClusterStatus", + Expected: "deleting", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "CacheClusters[].CacheClusterStatus", + Expected: "incompatible-network", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "CacheClusters[].CacheClusterStatus", + Expected: "restore-failed", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *ElastiCache) WaitUntilCacheClusterDeleted(input *DescribeCacheClustersInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeCacheClusters", + Delay: 15, + MaxAttempts: 40, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "pathAll", + Argument: "CacheClusters[].CacheClusterStatus", + Expected: "deleted", + }, + { + State: "success", + Matcher: "error", + Argument: "", + Expected: "CacheClusterNotFound", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "CacheClusters[].CacheClusterStatus", + Expected: "available", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "CacheClusters[].CacheClusterStatus", + Expected: "creating", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "CacheClusters[].CacheClusterStatus", + Expected: "incompatible-network", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "CacheClusters[].CacheClusterStatus", + Expected: "modifying", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "CacheClusters[].CacheClusterStatus", + Expected: "restore-failed", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "CacheClusters[].CacheClusterStatus", + Expected: "snapshotting", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *ElastiCache) WaitUntilReplicationGroupAvailable(input *DescribeReplicationGroupsInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeReplicationGroups", + Delay: 15, + MaxAttempts: 40, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "pathAll", + Argument: "ReplicationGroups[].Status", + Expected: "available", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "ReplicationGroups[].Status", + Expected: "deleted", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *ElastiCache) WaitUntilReplicationGroupDeleted(input *DescribeReplicationGroupsInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeReplicationGroups", + Delay: 15, + MaxAttempts: 40, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "pathAll", + Argument: "ReplicationGroups[].Status", + Expected: "deleted", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "ReplicationGroups[].Status", + Expected: "available", + }, + { + State: "success", + Matcher: "error", + Argument: "", + Expected: "ReplicationGroupNotFoundFault", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/elasticbeanstalk/api.go b/vendor/github.com/aws/aws-sdk-go/service/elasticbeanstalk/api.go new file mode 100644 index 000000000..4f2deb7c2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/elasticbeanstalk/api.go @@ -0,0 +1,5781 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package elasticbeanstalk provides a client for AWS Elastic Beanstalk. +package elasticbeanstalk + +import ( + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/query" +) + +const opAbortEnvironmentUpdate = "AbortEnvironmentUpdate" + +// AbortEnvironmentUpdateRequest generates a "aws/request.Request" representing the +// client's request for the AbortEnvironmentUpdate operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the AbortEnvironmentUpdate method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the AbortEnvironmentUpdateRequest method. +// req, resp := client.AbortEnvironmentUpdateRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ElasticBeanstalk) AbortEnvironmentUpdateRequest(input *AbortEnvironmentUpdateInput) (req *request.Request, output *AbortEnvironmentUpdateOutput) { + op := &request.Operation{ + Name: opAbortEnvironmentUpdate, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AbortEnvironmentUpdateInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &AbortEnvironmentUpdateOutput{} + req.Data = output + return +} + +// Cancels in-progress environment configuration update or application version +// deployment. +func (c *ElasticBeanstalk) AbortEnvironmentUpdate(input *AbortEnvironmentUpdateInput) (*AbortEnvironmentUpdateOutput, error) { + req, out := c.AbortEnvironmentUpdateRequest(input) + err := req.Send() + return out, err +} + +const opApplyEnvironmentManagedAction = "ApplyEnvironmentManagedAction" + +// ApplyEnvironmentManagedActionRequest generates a "aws/request.Request" representing the +// client's request for the ApplyEnvironmentManagedAction operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ApplyEnvironmentManagedAction method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ApplyEnvironmentManagedActionRequest method. +// req, resp := client.ApplyEnvironmentManagedActionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ElasticBeanstalk) ApplyEnvironmentManagedActionRequest(input *ApplyEnvironmentManagedActionInput) (req *request.Request, output *ApplyEnvironmentManagedActionOutput) { + op := &request.Operation{ + Name: opApplyEnvironmentManagedAction, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ApplyEnvironmentManagedActionInput{} + } + + req = c.newRequest(op, input, output) + output = &ApplyEnvironmentManagedActionOutput{} + req.Data = output + return +} + +// Applies a scheduled managed action immediately. A managed action can be applied +// only if its status is Scheduled. Get the status and action ID of a managed +// action with DescribeEnvironmentManagedActions. +func (c *ElasticBeanstalk) ApplyEnvironmentManagedAction(input *ApplyEnvironmentManagedActionInput) (*ApplyEnvironmentManagedActionOutput, error) { + req, out := c.ApplyEnvironmentManagedActionRequest(input) + err := req.Send() + return out, err +} + +const opCheckDNSAvailability = "CheckDNSAvailability" + +// CheckDNSAvailabilityRequest generates a "aws/request.Request" representing the +// client's request for the CheckDNSAvailability operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CheckDNSAvailability method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CheckDNSAvailabilityRequest method. +// req, resp := client.CheckDNSAvailabilityRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ElasticBeanstalk) CheckDNSAvailabilityRequest(input *CheckDNSAvailabilityInput) (req *request.Request, output *CheckDNSAvailabilityOutput) { + op := &request.Operation{ + Name: opCheckDNSAvailability, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CheckDNSAvailabilityInput{} + } + + req = c.newRequest(op, input, output) + output = &CheckDNSAvailabilityOutput{} + req.Data = output + return +} + +// Checks if the specified CNAME is available. +func (c *ElasticBeanstalk) CheckDNSAvailability(input *CheckDNSAvailabilityInput) (*CheckDNSAvailabilityOutput, error) { + req, out := c.CheckDNSAvailabilityRequest(input) + err := req.Send() + return out, err +} + +const opComposeEnvironments = "ComposeEnvironments" + +// ComposeEnvironmentsRequest generates a "aws/request.Request" representing the +// client's request for the ComposeEnvironments operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ComposeEnvironments method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ComposeEnvironmentsRequest method. +// req, resp := client.ComposeEnvironmentsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ElasticBeanstalk) ComposeEnvironmentsRequest(input *ComposeEnvironmentsInput) (req *request.Request, output *EnvironmentDescriptionsMessage) { + op := &request.Operation{ + Name: opComposeEnvironments, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ComposeEnvironmentsInput{} + } + + req = c.newRequest(op, input, output) + output = &EnvironmentDescriptionsMessage{} + req.Data = output + return +} + +// Create or update a group of environments that each run a separate component +// of a single application. Takes a list of version labels that specify application +// source bundles for each of the environments to create or update. The name +// of each environment and other required information must be included in the +// source bundles in an environment manifest named env.yaml. See Compose Environments +// (http://docs.aws.amazon.com/elasticbeanstalk/latest/dg/environment-mgmt-compose.html) +// for details. +func (c *ElasticBeanstalk) ComposeEnvironments(input *ComposeEnvironmentsInput) (*EnvironmentDescriptionsMessage, error) { + req, out := c.ComposeEnvironmentsRequest(input) + err := req.Send() + return out, err +} + +const opCreateApplication = "CreateApplication" + +// CreateApplicationRequest generates a "aws/request.Request" representing the +// client's request for the CreateApplication operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateApplication method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateApplicationRequest method. +// req, resp := client.CreateApplicationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ElasticBeanstalk) CreateApplicationRequest(input *CreateApplicationInput) (req *request.Request, output *ApplicationDescriptionMessage) { + op := &request.Operation{ + Name: opCreateApplication, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateApplicationInput{} + } + + req = c.newRequest(op, input, output) + output = &ApplicationDescriptionMessage{} + req.Data = output + return +} + +// Creates an application that has one configuration template named default +// and no application versions. +func (c *ElasticBeanstalk) CreateApplication(input *CreateApplicationInput) (*ApplicationDescriptionMessage, error) { + req, out := c.CreateApplicationRequest(input) + err := req.Send() + return out, err +} + +const opCreateApplicationVersion = "CreateApplicationVersion" + +// CreateApplicationVersionRequest generates a "aws/request.Request" representing the +// client's request for the CreateApplicationVersion operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateApplicationVersion method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateApplicationVersionRequest method. +// req, resp := client.CreateApplicationVersionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ElasticBeanstalk) CreateApplicationVersionRequest(input *CreateApplicationVersionInput) (req *request.Request, output *ApplicationVersionDescriptionMessage) { + op := &request.Operation{ + Name: opCreateApplicationVersion, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateApplicationVersionInput{} + } + + req = c.newRequest(op, input, output) + output = &ApplicationVersionDescriptionMessage{} + req.Data = output + return +} + +// Creates an application version for the specified application. +// +// Once you create an application version with a specified Amazon S3 bucket +// and key location, you cannot change that Amazon S3 location. If you change +// the Amazon S3 location, you receive an exception when you attempt to launch +// an environment from the application version. +func (c *ElasticBeanstalk) CreateApplicationVersion(input *CreateApplicationVersionInput) (*ApplicationVersionDescriptionMessage, error) { + req, out := c.CreateApplicationVersionRequest(input) + err := req.Send() + return out, err +} + +const opCreateConfigurationTemplate = "CreateConfigurationTemplate" + +// CreateConfigurationTemplateRequest generates a "aws/request.Request" representing the +// client's request for the CreateConfigurationTemplate operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateConfigurationTemplate method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateConfigurationTemplateRequest method. +// req, resp := client.CreateConfigurationTemplateRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ElasticBeanstalk) CreateConfigurationTemplateRequest(input *CreateConfigurationTemplateInput) (req *request.Request, output *ConfigurationSettingsDescription) { + op := &request.Operation{ + Name: opCreateConfigurationTemplate, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateConfigurationTemplateInput{} + } + + req = c.newRequest(op, input, output) + output = &ConfigurationSettingsDescription{} + req.Data = output + return +} + +// Creates a configuration template. Templates are associated with a specific +// application and are used to deploy different versions of the application +// with the same configuration settings. +// +// Related Topics +// +// DescribeConfigurationOptions DescribeConfigurationSettings ListAvailableSolutionStacks +func (c *ElasticBeanstalk) CreateConfigurationTemplate(input *CreateConfigurationTemplateInput) (*ConfigurationSettingsDescription, error) { + req, out := c.CreateConfigurationTemplateRequest(input) + err := req.Send() + return out, err +} + +const opCreateEnvironment = "CreateEnvironment" + +// CreateEnvironmentRequest generates a "aws/request.Request" representing the +// client's request for the CreateEnvironment operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateEnvironment method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateEnvironmentRequest method. +// req, resp := client.CreateEnvironmentRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ElasticBeanstalk) CreateEnvironmentRequest(input *CreateEnvironmentInput) (req *request.Request, output *EnvironmentDescription) { + op := &request.Operation{ + Name: opCreateEnvironment, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateEnvironmentInput{} + } + + req = c.newRequest(op, input, output) + output = &EnvironmentDescription{} + req.Data = output + return +} + +// Launches an environment for the specified application using the specified +// configuration. +func (c *ElasticBeanstalk) CreateEnvironment(input *CreateEnvironmentInput) (*EnvironmentDescription, error) { + req, out := c.CreateEnvironmentRequest(input) + err := req.Send() + return out, err +} + +const opCreateStorageLocation = "CreateStorageLocation" + +// CreateStorageLocationRequest generates a "aws/request.Request" representing the +// client's request for the CreateStorageLocation operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateStorageLocation method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateStorageLocationRequest method. +// req, resp := client.CreateStorageLocationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ElasticBeanstalk) CreateStorageLocationRequest(input *CreateStorageLocationInput) (req *request.Request, output *CreateStorageLocationOutput) { + op := &request.Operation{ + Name: opCreateStorageLocation, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateStorageLocationInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateStorageLocationOutput{} + req.Data = output + return +} + +// Creates the Amazon S3 storage location for the account. +// +// This location is used to store user log files. +func (c *ElasticBeanstalk) CreateStorageLocation(input *CreateStorageLocationInput) (*CreateStorageLocationOutput, error) { + req, out := c.CreateStorageLocationRequest(input) + err := req.Send() + return out, err +} + +const opDeleteApplication = "DeleteApplication" + +// DeleteApplicationRequest generates a "aws/request.Request" representing the +// client's request for the DeleteApplication operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteApplication method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteApplicationRequest method. +// req, resp := client.DeleteApplicationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ElasticBeanstalk) DeleteApplicationRequest(input *DeleteApplicationInput) (req *request.Request, output *DeleteApplicationOutput) { + op := &request.Operation{ + Name: opDeleteApplication, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteApplicationInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteApplicationOutput{} + req.Data = output + return +} + +// Deletes the specified application along with all associated versions and +// configurations. The application versions will not be deleted from your Amazon +// S3 bucket. +// +// You cannot delete an application that has a running environment. +func (c *ElasticBeanstalk) DeleteApplication(input *DeleteApplicationInput) (*DeleteApplicationOutput, error) { + req, out := c.DeleteApplicationRequest(input) + err := req.Send() + return out, err +} + +const opDeleteApplicationVersion = "DeleteApplicationVersion" + +// DeleteApplicationVersionRequest generates a "aws/request.Request" representing the +// client's request for the DeleteApplicationVersion operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteApplicationVersion method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteApplicationVersionRequest method. +// req, resp := client.DeleteApplicationVersionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ElasticBeanstalk) DeleteApplicationVersionRequest(input *DeleteApplicationVersionInput) (req *request.Request, output *DeleteApplicationVersionOutput) { + op := &request.Operation{ + Name: opDeleteApplicationVersion, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteApplicationVersionInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteApplicationVersionOutput{} + req.Data = output + return +} + +// Deletes the specified version from the specified application. +// +// You cannot delete an application version that is associated with a running +// environment. +func (c *ElasticBeanstalk) DeleteApplicationVersion(input *DeleteApplicationVersionInput) (*DeleteApplicationVersionOutput, error) { + req, out := c.DeleteApplicationVersionRequest(input) + err := req.Send() + return out, err +} + +const opDeleteConfigurationTemplate = "DeleteConfigurationTemplate" + +// DeleteConfigurationTemplateRequest generates a "aws/request.Request" representing the +// client's request for the DeleteConfigurationTemplate operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteConfigurationTemplate method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteConfigurationTemplateRequest method. +// req, resp := client.DeleteConfigurationTemplateRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ElasticBeanstalk) DeleteConfigurationTemplateRequest(input *DeleteConfigurationTemplateInput) (req *request.Request, output *DeleteConfigurationTemplateOutput) { + op := &request.Operation{ + Name: opDeleteConfigurationTemplate, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteConfigurationTemplateInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteConfigurationTemplateOutput{} + req.Data = output + return +} + +// Deletes the specified configuration template. +// +// When you launch an environment using a configuration template, the environment +// gets a copy of the template. You can delete or modify the environment's copy +// of the template without affecting the running environment. +func (c *ElasticBeanstalk) DeleteConfigurationTemplate(input *DeleteConfigurationTemplateInput) (*DeleteConfigurationTemplateOutput, error) { + req, out := c.DeleteConfigurationTemplateRequest(input) + err := req.Send() + return out, err +} + +const opDeleteEnvironmentConfiguration = "DeleteEnvironmentConfiguration" + +// DeleteEnvironmentConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the DeleteEnvironmentConfiguration operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteEnvironmentConfiguration method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteEnvironmentConfigurationRequest method. +// req, resp := client.DeleteEnvironmentConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ElasticBeanstalk) DeleteEnvironmentConfigurationRequest(input *DeleteEnvironmentConfigurationInput) (req *request.Request, output *DeleteEnvironmentConfigurationOutput) { + op := &request.Operation{ + Name: opDeleteEnvironmentConfiguration, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteEnvironmentConfigurationInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteEnvironmentConfigurationOutput{} + req.Data = output + return +} + +// Deletes the draft configuration associated with the running environment. +// +// Updating a running environment with any configuration changes creates a +// draft configuration set. You can get the draft configuration using DescribeConfigurationSettings +// while the update is in progress or if the update fails. The DeploymentStatus +// for the draft configuration indicates whether the deployment is in process +// or has failed. The draft configuration remains in existence until it is deleted +// with this action. +func (c *ElasticBeanstalk) DeleteEnvironmentConfiguration(input *DeleteEnvironmentConfigurationInput) (*DeleteEnvironmentConfigurationOutput, error) { + req, out := c.DeleteEnvironmentConfigurationRequest(input) + err := req.Send() + return out, err +} + +const opDescribeApplicationVersions = "DescribeApplicationVersions" + +// DescribeApplicationVersionsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeApplicationVersions operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeApplicationVersions method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeApplicationVersionsRequest method. +// req, resp := client.DescribeApplicationVersionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ElasticBeanstalk) DescribeApplicationVersionsRequest(input *DescribeApplicationVersionsInput) (req *request.Request, output *DescribeApplicationVersionsOutput) { + op := &request.Operation{ + Name: opDescribeApplicationVersions, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeApplicationVersionsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeApplicationVersionsOutput{} + req.Data = output + return +} + +// Retrieve a list of application versions stored in your AWS Elastic Beanstalk +// storage bucket. +func (c *ElasticBeanstalk) DescribeApplicationVersions(input *DescribeApplicationVersionsInput) (*DescribeApplicationVersionsOutput, error) { + req, out := c.DescribeApplicationVersionsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeApplications = "DescribeApplications" + +// DescribeApplicationsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeApplications operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeApplications method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeApplicationsRequest method. +// req, resp := client.DescribeApplicationsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ElasticBeanstalk) DescribeApplicationsRequest(input *DescribeApplicationsInput) (req *request.Request, output *DescribeApplicationsOutput) { + op := &request.Operation{ + Name: opDescribeApplications, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeApplicationsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeApplicationsOutput{} + req.Data = output + return +} + +// Returns the descriptions of existing applications. +func (c *ElasticBeanstalk) DescribeApplications(input *DescribeApplicationsInput) (*DescribeApplicationsOutput, error) { + req, out := c.DescribeApplicationsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeConfigurationOptions = "DescribeConfigurationOptions" + +// DescribeConfigurationOptionsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeConfigurationOptions operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeConfigurationOptions method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeConfigurationOptionsRequest method. +// req, resp := client.DescribeConfigurationOptionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ElasticBeanstalk) DescribeConfigurationOptionsRequest(input *DescribeConfigurationOptionsInput) (req *request.Request, output *DescribeConfigurationOptionsOutput) { + op := &request.Operation{ + Name: opDescribeConfigurationOptions, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeConfigurationOptionsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeConfigurationOptionsOutput{} + req.Data = output + return +} + +// Describes the configuration options that are used in a particular configuration +// template or environment, or that a specified solution stack defines. The +// description includes the values the options, their default values, and an +// indication of the required action on a running environment if an option value +// is changed. +func (c *ElasticBeanstalk) DescribeConfigurationOptions(input *DescribeConfigurationOptionsInput) (*DescribeConfigurationOptionsOutput, error) { + req, out := c.DescribeConfigurationOptionsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeConfigurationSettings = "DescribeConfigurationSettings" + +// DescribeConfigurationSettingsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeConfigurationSettings operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeConfigurationSettings method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeConfigurationSettingsRequest method. +// req, resp := client.DescribeConfigurationSettingsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ElasticBeanstalk) DescribeConfigurationSettingsRequest(input *DescribeConfigurationSettingsInput) (req *request.Request, output *DescribeConfigurationSettingsOutput) { + op := &request.Operation{ + Name: opDescribeConfigurationSettings, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeConfigurationSettingsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeConfigurationSettingsOutput{} + req.Data = output + return +} + +// Returns a description of the settings for the specified configuration set, +// that is, either a configuration template or the configuration set associated +// with a running environment. +// +// When describing the settings for the configuration set associated with +// a running environment, it is possible to receive two sets of setting descriptions. +// One is the deployed configuration set, and the other is a draft configuration +// of an environment that is either in the process of deployment or that failed +// to deploy. +// +// Related Topics +// +// DeleteEnvironmentConfiguration +func (c *ElasticBeanstalk) DescribeConfigurationSettings(input *DescribeConfigurationSettingsInput) (*DescribeConfigurationSettingsOutput, error) { + req, out := c.DescribeConfigurationSettingsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeEnvironmentHealth = "DescribeEnvironmentHealth" + +// DescribeEnvironmentHealthRequest generates a "aws/request.Request" representing the +// client's request for the DescribeEnvironmentHealth operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeEnvironmentHealth method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeEnvironmentHealthRequest method. +// req, resp := client.DescribeEnvironmentHealthRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ElasticBeanstalk) DescribeEnvironmentHealthRequest(input *DescribeEnvironmentHealthInput) (req *request.Request, output *DescribeEnvironmentHealthOutput) { + op := &request.Operation{ + Name: opDescribeEnvironmentHealth, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeEnvironmentHealthInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeEnvironmentHealthOutput{} + req.Data = output + return +} + +// Returns information about the overall health of the specified environment. +// The DescribeEnvironmentHealth operation is only available with AWS Elastic +// Beanstalk Enhanced Health. +func (c *ElasticBeanstalk) DescribeEnvironmentHealth(input *DescribeEnvironmentHealthInput) (*DescribeEnvironmentHealthOutput, error) { + req, out := c.DescribeEnvironmentHealthRequest(input) + err := req.Send() + return out, err +} + +const opDescribeEnvironmentManagedActionHistory = "DescribeEnvironmentManagedActionHistory" + +// DescribeEnvironmentManagedActionHistoryRequest generates a "aws/request.Request" representing the +// client's request for the DescribeEnvironmentManagedActionHistory operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeEnvironmentManagedActionHistory method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeEnvironmentManagedActionHistoryRequest method. +// req, resp := client.DescribeEnvironmentManagedActionHistoryRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ElasticBeanstalk) DescribeEnvironmentManagedActionHistoryRequest(input *DescribeEnvironmentManagedActionHistoryInput) (req *request.Request, output *DescribeEnvironmentManagedActionHistoryOutput) { + op := &request.Operation{ + Name: opDescribeEnvironmentManagedActionHistory, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeEnvironmentManagedActionHistoryInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeEnvironmentManagedActionHistoryOutput{} + req.Data = output + return +} + +// Lists an environment's completed and failed managed actions. +func (c *ElasticBeanstalk) DescribeEnvironmentManagedActionHistory(input *DescribeEnvironmentManagedActionHistoryInput) (*DescribeEnvironmentManagedActionHistoryOutput, error) { + req, out := c.DescribeEnvironmentManagedActionHistoryRequest(input) + err := req.Send() + return out, err +} + +const opDescribeEnvironmentManagedActions = "DescribeEnvironmentManagedActions" + +// DescribeEnvironmentManagedActionsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeEnvironmentManagedActions operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeEnvironmentManagedActions method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeEnvironmentManagedActionsRequest method. +// req, resp := client.DescribeEnvironmentManagedActionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ElasticBeanstalk) DescribeEnvironmentManagedActionsRequest(input *DescribeEnvironmentManagedActionsInput) (req *request.Request, output *DescribeEnvironmentManagedActionsOutput) { + op := &request.Operation{ + Name: opDescribeEnvironmentManagedActions, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeEnvironmentManagedActionsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeEnvironmentManagedActionsOutput{} + req.Data = output + return +} + +// Lists an environment's upcoming and in-progress managed actions. +func (c *ElasticBeanstalk) DescribeEnvironmentManagedActions(input *DescribeEnvironmentManagedActionsInput) (*DescribeEnvironmentManagedActionsOutput, error) { + req, out := c.DescribeEnvironmentManagedActionsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeEnvironmentResources = "DescribeEnvironmentResources" + +// DescribeEnvironmentResourcesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeEnvironmentResources operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeEnvironmentResources method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeEnvironmentResourcesRequest method. +// req, resp := client.DescribeEnvironmentResourcesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ElasticBeanstalk) DescribeEnvironmentResourcesRequest(input *DescribeEnvironmentResourcesInput) (req *request.Request, output *DescribeEnvironmentResourcesOutput) { + op := &request.Operation{ + Name: opDescribeEnvironmentResources, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeEnvironmentResourcesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeEnvironmentResourcesOutput{} + req.Data = output + return +} + +// Returns AWS resources for this environment. +func (c *ElasticBeanstalk) DescribeEnvironmentResources(input *DescribeEnvironmentResourcesInput) (*DescribeEnvironmentResourcesOutput, error) { + req, out := c.DescribeEnvironmentResourcesRequest(input) + err := req.Send() + return out, err +} + +const opDescribeEnvironments = "DescribeEnvironments" + +// DescribeEnvironmentsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeEnvironments operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeEnvironments method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeEnvironmentsRequest method. +// req, resp := client.DescribeEnvironmentsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ElasticBeanstalk) DescribeEnvironmentsRequest(input *DescribeEnvironmentsInput) (req *request.Request, output *EnvironmentDescriptionsMessage) { + op := &request.Operation{ + Name: opDescribeEnvironments, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeEnvironmentsInput{} + } + + req = c.newRequest(op, input, output) + output = &EnvironmentDescriptionsMessage{} + req.Data = output + return +} + +// Returns descriptions for existing environments. +func (c *ElasticBeanstalk) DescribeEnvironments(input *DescribeEnvironmentsInput) (*EnvironmentDescriptionsMessage, error) { + req, out := c.DescribeEnvironmentsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeEvents = "DescribeEvents" + +// DescribeEventsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeEvents operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeEvents method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeEventsRequest method. +// req, resp := client.DescribeEventsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ElasticBeanstalk) DescribeEventsRequest(input *DescribeEventsInput) (req *request.Request, output *DescribeEventsOutput) { + op := &request.Operation{ + Name: opDescribeEvents, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeEventsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeEventsOutput{} + req.Data = output + return +} + +// Returns list of event descriptions matching criteria up to the last 6 weeks. +// +// This action returns the most recent 1,000 events from the specified NextToken. +func (c *ElasticBeanstalk) DescribeEvents(input *DescribeEventsInput) (*DescribeEventsOutput, error) { + req, out := c.DescribeEventsRequest(input) + err := req.Send() + return out, err +} + +// DescribeEventsPages iterates over the pages of a DescribeEvents operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeEvents method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeEvents operation. +// pageNum := 0 +// err := client.DescribeEventsPages(params, +// func(page *DescribeEventsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *ElasticBeanstalk) DescribeEventsPages(input *DescribeEventsInput, fn func(p *DescribeEventsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeEventsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeEventsOutput), lastPage) + }) +} + +const opDescribeInstancesHealth = "DescribeInstancesHealth" + +// DescribeInstancesHealthRequest generates a "aws/request.Request" representing the +// client's request for the DescribeInstancesHealth operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeInstancesHealth method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeInstancesHealthRequest method. +// req, resp := client.DescribeInstancesHealthRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ElasticBeanstalk) DescribeInstancesHealthRequest(input *DescribeInstancesHealthInput) (req *request.Request, output *DescribeInstancesHealthOutput) { + op := &request.Operation{ + Name: opDescribeInstancesHealth, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeInstancesHealthInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeInstancesHealthOutput{} + req.Data = output + return +} + +// Returns more detailed information about the health of the specified instances +// (for example, CPU utilization, load average, and causes). The DescribeInstancesHealth +// operation is only available with AWS Elastic Beanstalk Enhanced Health. +func (c *ElasticBeanstalk) DescribeInstancesHealth(input *DescribeInstancesHealthInput) (*DescribeInstancesHealthOutput, error) { + req, out := c.DescribeInstancesHealthRequest(input) + err := req.Send() + return out, err +} + +const opListAvailableSolutionStacks = "ListAvailableSolutionStacks" + +// ListAvailableSolutionStacksRequest generates a "aws/request.Request" representing the +// client's request for the ListAvailableSolutionStacks operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListAvailableSolutionStacks method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListAvailableSolutionStacksRequest method. +// req, resp := client.ListAvailableSolutionStacksRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ElasticBeanstalk) ListAvailableSolutionStacksRequest(input *ListAvailableSolutionStacksInput) (req *request.Request, output *ListAvailableSolutionStacksOutput) { + op := &request.Operation{ + Name: opListAvailableSolutionStacks, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListAvailableSolutionStacksInput{} + } + + req = c.newRequest(op, input, output) + output = &ListAvailableSolutionStacksOutput{} + req.Data = output + return +} + +// Returns a list of the available solution stack names. +func (c *ElasticBeanstalk) ListAvailableSolutionStacks(input *ListAvailableSolutionStacksInput) (*ListAvailableSolutionStacksOutput, error) { + req, out := c.ListAvailableSolutionStacksRequest(input) + err := req.Send() + return out, err +} + +const opRebuildEnvironment = "RebuildEnvironment" + +// RebuildEnvironmentRequest generates a "aws/request.Request" representing the +// client's request for the RebuildEnvironment operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the RebuildEnvironment method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the RebuildEnvironmentRequest method. +// req, resp := client.RebuildEnvironmentRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ElasticBeanstalk) RebuildEnvironmentRequest(input *RebuildEnvironmentInput) (req *request.Request, output *RebuildEnvironmentOutput) { + op := &request.Operation{ + Name: opRebuildEnvironment, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RebuildEnvironmentInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &RebuildEnvironmentOutput{} + req.Data = output + return +} + +// Deletes and recreates all of the AWS resources (for example: the Auto Scaling +// group, load balancer, etc.) for a specified environment and forces a restart. +func (c *ElasticBeanstalk) RebuildEnvironment(input *RebuildEnvironmentInput) (*RebuildEnvironmentOutput, error) { + req, out := c.RebuildEnvironmentRequest(input) + err := req.Send() + return out, err +} + +const opRequestEnvironmentInfo = "RequestEnvironmentInfo" + +// RequestEnvironmentInfoRequest generates a "aws/request.Request" representing the +// client's request for the RequestEnvironmentInfo operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the RequestEnvironmentInfo method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the RequestEnvironmentInfoRequest method. +// req, resp := client.RequestEnvironmentInfoRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ElasticBeanstalk) RequestEnvironmentInfoRequest(input *RequestEnvironmentInfoInput) (req *request.Request, output *RequestEnvironmentInfoOutput) { + op := &request.Operation{ + Name: opRequestEnvironmentInfo, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RequestEnvironmentInfoInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &RequestEnvironmentInfoOutput{} + req.Data = output + return +} + +// Initiates a request to compile the specified type of information of the deployed +// environment. +// +// Setting the InfoType to tail compiles the last lines from the application +// server log files of every Amazon EC2 instance in your environment. +// +// Setting the InfoType to bundle compresses the application server log files +// for every Amazon EC2 instance into a .zip file. Legacy and .NET containers +// do not support bundle logs. +// +// Use RetrieveEnvironmentInfo to obtain the set of logs. +// +// Related Topics +// +// RetrieveEnvironmentInfo +func (c *ElasticBeanstalk) RequestEnvironmentInfo(input *RequestEnvironmentInfoInput) (*RequestEnvironmentInfoOutput, error) { + req, out := c.RequestEnvironmentInfoRequest(input) + err := req.Send() + return out, err +} + +const opRestartAppServer = "RestartAppServer" + +// RestartAppServerRequest generates a "aws/request.Request" representing the +// client's request for the RestartAppServer operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the RestartAppServer method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the RestartAppServerRequest method. +// req, resp := client.RestartAppServerRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ElasticBeanstalk) RestartAppServerRequest(input *RestartAppServerInput) (req *request.Request, output *RestartAppServerOutput) { + op := &request.Operation{ + Name: opRestartAppServer, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RestartAppServerInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &RestartAppServerOutput{} + req.Data = output + return +} + +// Causes the environment to restart the application container server running +// on each Amazon EC2 instance. +func (c *ElasticBeanstalk) RestartAppServer(input *RestartAppServerInput) (*RestartAppServerOutput, error) { + req, out := c.RestartAppServerRequest(input) + err := req.Send() + return out, err +} + +const opRetrieveEnvironmentInfo = "RetrieveEnvironmentInfo" + +// RetrieveEnvironmentInfoRequest generates a "aws/request.Request" representing the +// client's request for the RetrieveEnvironmentInfo operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the RetrieveEnvironmentInfo method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the RetrieveEnvironmentInfoRequest method. +// req, resp := client.RetrieveEnvironmentInfoRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ElasticBeanstalk) RetrieveEnvironmentInfoRequest(input *RetrieveEnvironmentInfoInput) (req *request.Request, output *RetrieveEnvironmentInfoOutput) { + op := &request.Operation{ + Name: opRetrieveEnvironmentInfo, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RetrieveEnvironmentInfoInput{} + } + + req = c.newRequest(op, input, output) + output = &RetrieveEnvironmentInfoOutput{} + req.Data = output + return +} + +// Retrieves the compiled information from a RequestEnvironmentInfo request. +// +// Related Topics +// +// RequestEnvironmentInfo +func (c *ElasticBeanstalk) RetrieveEnvironmentInfo(input *RetrieveEnvironmentInfoInput) (*RetrieveEnvironmentInfoOutput, error) { + req, out := c.RetrieveEnvironmentInfoRequest(input) + err := req.Send() + return out, err +} + +const opSwapEnvironmentCNAMEs = "SwapEnvironmentCNAMEs" + +// SwapEnvironmentCNAMEsRequest generates a "aws/request.Request" representing the +// client's request for the SwapEnvironmentCNAMEs operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the SwapEnvironmentCNAMEs method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the SwapEnvironmentCNAMEsRequest method. +// req, resp := client.SwapEnvironmentCNAMEsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ElasticBeanstalk) SwapEnvironmentCNAMEsRequest(input *SwapEnvironmentCNAMEsInput) (req *request.Request, output *SwapEnvironmentCNAMEsOutput) { + op := &request.Operation{ + Name: opSwapEnvironmentCNAMEs, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SwapEnvironmentCNAMEsInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &SwapEnvironmentCNAMEsOutput{} + req.Data = output + return +} + +// Swaps the CNAMEs of two environments. +func (c *ElasticBeanstalk) SwapEnvironmentCNAMEs(input *SwapEnvironmentCNAMEsInput) (*SwapEnvironmentCNAMEsOutput, error) { + req, out := c.SwapEnvironmentCNAMEsRequest(input) + err := req.Send() + return out, err +} + +const opTerminateEnvironment = "TerminateEnvironment" + +// TerminateEnvironmentRequest generates a "aws/request.Request" representing the +// client's request for the TerminateEnvironment operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the TerminateEnvironment method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the TerminateEnvironmentRequest method. +// req, resp := client.TerminateEnvironmentRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ElasticBeanstalk) TerminateEnvironmentRequest(input *TerminateEnvironmentInput) (req *request.Request, output *EnvironmentDescription) { + op := &request.Operation{ + Name: opTerminateEnvironment, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &TerminateEnvironmentInput{} + } + + req = c.newRequest(op, input, output) + output = &EnvironmentDescription{} + req.Data = output + return +} + +// Terminates the specified environment. +func (c *ElasticBeanstalk) TerminateEnvironment(input *TerminateEnvironmentInput) (*EnvironmentDescription, error) { + req, out := c.TerminateEnvironmentRequest(input) + err := req.Send() + return out, err +} + +const opUpdateApplication = "UpdateApplication" + +// UpdateApplicationRequest generates a "aws/request.Request" representing the +// client's request for the UpdateApplication operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateApplication method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateApplicationRequest method. +// req, resp := client.UpdateApplicationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ElasticBeanstalk) UpdateApplicationRequest(input *UpdateApplicationInput) (req *request.Request, output *ApplicationDescriptionMessage) { + op := &request.Operation{ + Name: opUpdateApplication, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateApplicationInput{} + } + + req = c.newRequest(op, input, output) + output = &ApplicationDescriptionMessage{} + req.Data = output + return +} + +// Updates the specified application to have the specified properties. +// +// If a property (for example, description) is not provided, the value remains +// unchanged. To clear these properties, specify an empty string. +func (c *ElasticBeanstalk) UpdateApplication(input *UpdateApplicationInput) (*ApplicationDescriptionMessage, error) { + req, out := c.UpdateApplicationRequest(input) + err := req.Send() + return out, err +} + +const opUpdateApplicationVersion = "UpdateApplicationVersion" + +// UpdateApplicationVersionRequest generates a "aws/request.Request" representing the +// client's request for the UpdateApplicationVersion operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateApplicationVersion method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateApplicationVersionRequest method. +// req, resp := client.UpdateApplicationVersionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ElasticBeanstalk) UpdateApplicationVersionRequest(input *UpdateApplicationVersionInput) (req *request.Request, output *ApplicationVersionDescriptionMessage) { + op := &request.Operation{ + Name: opUpdateApplicationVersion, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateApplicationVersionInput{} + } + + req = c.newRequest(op, input, output) + output = &ApplicationVersionDescriptionMessage{} + req.Data = output + return +} + +// Updates the specified application version to have the specified properties. +// +// If a property (for example, description) is not provided, the value remains +// unchanged. To clear properties, specify an empty string. +func (c *ElasticBeanstalk) UpdateApplicationVersion(input *UpdateApplicationVersionInput) (*ApplicationVersionDescriptionMessage, error) { + req, out := c.UpdateApplicationVersionRequest(input) + err := req.Send() + return out, err +} + +const opUpdateConfigurationTemplate = "UpdateConfigurationTemplate" + +// UpdateConfigurationTemplateRequest generates a "aws/request.Request" representing the +// client's request for the UpdateConfigurationTemplate operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateConfigurationTemplate method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateConfigurationTemplateRequest method. +// req, resp := client.UpdateConfigurationTemplateRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ElasticBeanstalk) UpdateConfigurationTemplateRequest(input *UpdateConfigurationTemplateInput) (req *request.Request, output *ConfigurationSettingsDescription) { + op := &request.Operation{ + Name: opUpdateConfigurationTemplate, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateConfigurationTemplateInput{} + } + + req = c.newRequest(op, input, output) + output = &ConfigurationSettingsDescription{} + req.Data = output + return +} + +// Updates the specified configuration template to have the specified properties +// or configuration option values. +// +// If a property (for example, ApplicationName) is not provided, its value +// remains unchanged. To clear such properties, specify an empty string. Related +// Topics +// +// DescribeConfigurationOptions +func (c *ElasticBeanstalk) UpdateConfigurationTemplate(input *UpdateConfigurationTemplateInput) (*ConfigurationSettingsDescription, error) { + req, out := c.UpdateConfigurationTemplateRequest(input) + err := req.Send() + return out, err +} + +const opUpdateEnvironment = "UpdateEnvironment" + +// UpdateEnvironmentRequest generates a "aws/request.Request" representing the +// client's request for the UpdateEnvironment operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateEnvironment method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateEnvironmentRequest method. +// req, resp := client.UpdateEnvironmentRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ElasticBeanstalk) UpdateEnvironmentRequest(input *UpdateEnvironmentInput) (req *request.Request, output *EnvironmentDescription) { + op := &request.Operation{ + Name: opUpdateEnvironment, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateEnvironmentInput{} + } + + req = c.newRequest(op, input, output) + output = &EnvironmentDescription{} + req.Data = output + return +} + +// Updates the environment description, deploys a new application version, updates +// the configuration settings to an entirely new configuration template, or +// updates select configuration option values in the running environment. +// +// Attempting to update both the release and configuration is not allowed +// and AWS Elastic Beanstalk returns an InvalidParameterCombination error. +// +// When updating the configuration settings to a new template or individual +// settings, a draft configuration is created and DescribeConfigurationSettings +// for this environment returns two setting descriptions with different DeploymentStatus +// values. +func (c *ElasticBeanstalk) UpdateEnvironment(input *UpdateEnvironmentInput) (*EnvironmentDescription, error) { + req, out := c.UpdateEnvironmentRequest(input) + err := req.Send() + return out, err +} + +const opValidateConfigurationSettings = "ValidateConfigurationSettings" + +// ValidateConfigurationSettingsRequest generates a "aws/request.Request" representing the +// client's request for the ValidateConfigurationSettings operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ValidateConfigurationSettings method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ValidateConfigurationSettingsRequest method. +// req, resp := client.ValidateConfigurationSettingsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ElasticBeanstalk) ValidateConfigurationSettingsRequest(input *ValidateConfigurationSettingsInput) (req *request.Request, output *ValidateConfigurationSettingsOutput) { + op := &request.Operation{ + Name: opValidateConfigurationSettings, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ValidateConfigurationSettingsInput{} + } + + req = c.newRequest(op, input, output) + output = &ValidateConfigurationSettingsOutput{} + req.Data = output + return +} + +// Takes a set of configuration settings and either a configuration template +// or environment, and determines whether those values are valid. +// +// This action returns a list of messages indicating any errors or warnings +// associated with the selection of option values. +func (c *ElasticBeanstalk) ValidateConfigurationSettings(input *ValidateConfigurationSettingsInput) (*ValidateConfigurationSettingsOutput, error) { + req, out := c.ValidateConfigurationSettingsRequest(input) + err := req.Send() + return out, err +} + +type AbortEnvironmentUpdateInput struct { + _ struct{} `type:"structure"` + + // This specifies the ID of the environment with the in-progress update that + // you want to cancel. + EnvironmentId *string `type:"string"` + + // This specifies the name of the environment with the in-progress update that + // you want to cancel. + EnvironmentName *string `min:"4" type:"string"` +} + +// String returns the string representation +func (s AbortEnvironmentUpdateInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AbortEnvironmentUpdateInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AbortEnvironmentUpdateInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AbortEnvironmentUpdateInput"} + if s.EnvironmentName != nil && len(*s.EnvironmentName) < 4 { + invalidParams.Add(request.NewErrParamMinLen("EnvironmentName", 4)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type AbortEnvironmentUpdateOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s AbortEnvironmentUpdateOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AbortEnvironmentUpdateOutput) GoString() string { + return s.String() +} + +// Describes the properties of an application. +type ApplicationDescription struct { + _ struct{} `type:"structure"` + + // The name of the application. + ApplicationName *string `min:"1" type:"string"` + + // The names of the configuration templates associated with this application. + ConfigurationTemplates []*string `type:"list"` + + // The date when the application was created. + DateCreated *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The date when the application was last modified. + DateUpdated *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // User-defined description of the application. + Description *string `type:"string"` + + // The names of the versions for this application. + Versions []*string `type:"list"` +} + +// String returns the string representation +func (s ApplicationDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ApplicationDescription) GoString() string { + return s.String() +} + +// Result message containing a single description of an application. +type ApplicationDescriptionMessage struct { + _ struct{} `type:"structure"` + + // The ApplicationDescription of the application. + Application *ApplicationDescription `type:"structure"` +} + +// String returns the string representation +func (s ApplicationDescriptionMessage) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ApplicationDescriptionMessage) GoString() string { + return s.String() +} + +// Represents the application metrics for a specified environment. +type ApplicationMetrics struct { + _ struct{} `type:"structure"` + + // The amount of time that the metrics cover (usually 10 seconds). For example, + // you might have 5 requests (request_count) within the most recent time slice + // of 10 seconds (duration). + Duration *int64 `type:"integer"` + + // Represents the average latency for the slowest X percent of requests over + // the last 10 seconds. Latencies are in seconds with one milisecond resolution. + Latency *Latency `type:"structure"` + + // Average number of requests handled by the web server per second over the + // last 10 seconds. + RequestCount *int64 `type:"integer"` + + // Represents the percentage of requests over the last 10 seconds that resulted + // in each type of status code response. + StatusCodes *StatusCodes `type:"structure"` +} + +// String returns the string representation +func (s ApplicationMetrics) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ApplicationMetrics) GoString() string { + return s.String() +} + +// Describes the properties of an application version. +type ApplicationVersionDescription struct { + _ struct{} `type:"structure"` + + // The name of the application associated with this release. + ApplicationName *string `min:"1" type:"string"` + + // The creation date of the application version. + DateCreated *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The last modified date of the application version. + DateUpdated *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The description of this application version. + Description *string `type:"string"` + + // The location where the source bundle is located for this version. + SourceBundle *S3Location `type:"structure"` + + // The processing status of the application version. + Status *string `type:"string" enum:"ApplicationVersionStatus"` + + // A label uniquely identifying the version for the associated application. + VersionLabel *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ApplicationVersionDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ApplicationVersionDescription) GoString() string { + return s.String() +} + +// Result message wrapping a single description of an application version. +type ApplicationVersionDescriptionMessage struct { + _ struct{} `type:"structure"` + + // The ApplicationVersionDescription of the application version. + ApplicationVersion *ApplicationVersionDescription `type:"structure"` +} + +// String returns the string representation +func (s ApplicationVersionDescriptionMessage) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ApplicationVersionDescriptionMessage) GoString() string { + return s.String() +} + +// Request to execute a scheduled managed action immediately. +type ApplyEnvironmentManagedActionInput struct { + _ struct{} `type:"structure"` + + // The action ID of the scheduled managed action to execute. + ActionId *string `type:"string" required:"true"` + + // The environment ID of the target environment. + EnvironmentId *string `type:"string"` + + // The name of the target environment. + EnvironmentName *string `type:"string"` +} + +// String returns the string representation +func (s ApplyEnvironmentManagedActionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ApplyEnvironmentManagedActionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ApplyEnvironmentManagedActionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ApplyEnvironmentManagedActionInput"} + if s.ActionId == nil { + invalidParams.Add(request.NewErrParamRequired("ActionId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The result message containing information about the managed action. +type ApplyEnvironmentManagedActionOutput struct { + _ struct{} `type:"structure"` + + // A description of the managed action. + ActionDescription *string `type:"string"` + + // The action ID of the managed action. + ActionId *string `type:"string"` + + // The type of managed action. + ActionType *string `type:"string" enum:"ActionType"` + + // The status of the managed action. + Status *string `type:"string"` +} + +// String returns the string representation +func (s ApplyEnvironmentManagedActionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ApplyEnvironmentManagedActionOutput) GoString() string { + return s.String() +} + +// Describes an Auto Scaling launch configuration. +type AutoScalingGroup struct { + _ struct{} `type:"structure"` + + // The name of the AutoScalingGroup . + Name *string `type:"string"` +} + +// String returns the string representation +func (s AutoScalingGroup) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AutoScalingGroup) GoString() string { + return s.String() +} + +// Represents CPU utilization information from the specified instance that belongs +// to the AWS Elastic Beanstalk environment. Use the instanceId property to +// specify the application instance for which you'd like to return data. +type CPUUtilization struct { + _ struct{} `type:"structure"` + + // Percentage of time that the CPU has spent in the I/O Wait state over the + // last 10 seconds. + IOWait *float64 `type:"double"` + + // Percentage of time that the CPU has spent in the IRQ state over the last + // 10 seconds. + IRQ *float64 `type:"double"` + + // Percentage of time that the CPU has spent in the Idle state over the last + // 10 seconds. + Idle *float64 `type:"double"` + + // Percentage of time that the CPU has spent in the Nice state over the last + // 10 seconds. + Nice *float64 `type:"double"` + + // Percentage of time that the CPU has spent in the SoftIRQ state over the last + // 10 seconds. + SoftIRQ *float64 `type:"double"` + + // Percentage of time that the CPU has spent in the System state over the last + // 10 seconds. + System *float64 `type:"double"` + + // Percentage of time that the CPU has spent in the User state over the last + // 10 seconds. + User *float64 `type:"double"` +} + +// String returns the string representation +func (s CPUUtilization) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CPUUtilization) GoString() string { + return s.String() +} + +// Results message indicating whether a CNAME is available. +type CheckDNSAvailabilityInput struct { + _ struct{} `type:"structure"` + + // The prefix used when this CNAME is reserved. + CNAMEPrefix *string `min:"4" type:"string" required:"true"` +} + +// String returns the string representation +func (s CheckDNSAvailabilityInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CheckDNSAvailabilityInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CheckDNSAvailabilityInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CheckDNSAvailabilityInput"} + if s.CNAMEPrefix == nil { + invalidParams.Add(request.NewErrParamRequired("CNAMEPrefix")) + } + if s.CNAMEPrefix != nil && len(*s.CNAMEPrefix) < 4 { + invalidParams.Add(request.NewErrParamMinLen("CNAMEPrefix", 4)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Indicates if the specified CNAME is available. +type CheckDNSAvailabilityOutput struct { + _ struct{} `type:"structure"` + + // Indicates if the specified CNAME is available: + // + // true : The CNAME is available. false : The CNAME is not available. + Available *bool `type:"boolean"` + + // The fully qualified CNAME to reserve when CreateEnvironment is called with + // the provided prefix. + FullyQualifiedCNAME *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s CheckDNSAvailabilityOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CheckDNSAvailabilityOutput) GoString() string { + return s.String() +} + +// Request to create or update a group of environments. +type ComposeEnvironmentsInput struct { + _ struct{} `type:"structure"` + + // The name of the application to which the specified source bundles belong. + ApplicationName *string `min:"1" type:"string"` + + // The name of the group to which the target environments belong. Specify a + // group name only if the environment name defined in each target environment's + // manifest ends with a + (plus) character. See Environment Manifest (env.yaml) + // (http://docs.aws.amazon.com/elasticbeanstalk/latest/dg/environment-cfg-manifest.html) + // for details. + GroupName *string `min:"1" type:"string"` + + // A list of version labels, specifying one or more application source bundles + // that belong to the target application. Each source bundle must include an + // environment manifest that specifies the name of the environment and the name + // of the solution stack to use, and optionally can specify environment links + // to create. + VersionLabels []*string `type:"list"` +} + +// String returns the string representation +func (s ComposeEnvironmentsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ComposeEnvironmentsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ComposeEnvironmentsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ComposeEnvironmentsInput"} + if s.ApplicationName != nil && len(*s.ApplicationName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ApplicationName", 1)) + } + if s.GroupName != nil && len(*s.GroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("GroupName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Describes the possible values for a configuration option. +type ConfigurationOptionDescription struct { + _ struct{} `type:"structure"` + + // An indication of which action is required if the value for this configuration + // option changes: + // + // NoInterruption : There is no interruption to the environment or application + // availability. RestartEnvironment : The environment is entirely restarted, + // all AWS resources are deleted and recreated, and the environment is unavailable + // during the process. RestartApplicationServer : The environment is available + // the entire time. However, a short application outage occurs when the application + // servers on the running Amazon EC2 instances are restarted. + ChangeSeverity *string `type:"string"` + + // The default value for this configuration option. + DefaultValue *string `type:"string"` + + // If specified, the configuration option must be a string value no longer than + // this value. + MaxLength *int64 `type:"integer"` + + // If specified, the configuration option must be a numeric value less than + // this value. + MaxValue *int64 `type:"integer"` + + // If specified, the configuration option must be a numeric value greater than + // this value. + MinValue *int64 `type:"integer"` + + // The name of the configuration option. + Name *string `type:"string"` + + // A unique namespace identifying the option's associated AWS resource. + Namespace *string `type:"string"` + + // If specified, the configuration option must be a string value that satisfies + // this regular expression. + Regex *OptionRestrictionRegex `type:"structure"` + + // An indication of whether the user defined this configuration option: + // + // true : This configuration option was defined by the user. It is a valid + // choice for specifying if this as an Option to Remove when updating configuration + // settings. + // + // false : This configuration was not defined by the user. Constraint: + // You can remove only UserDefined options from a configuration. + // + // Valid Values: true | false + UserDefined *bool `type:"boolean"` + + // If specified, values for the configuration option are selected from this + // list. + ValueOptions []*string `type:"list"` + + // An indication of which type of values this option has and whether it is allowable + // to select one or more than one of the possible values: + // + // Scalar : Values for this option are a single selection from the possible + // values, or an unformatted string, or numeric value governed by the MIN/MAX/Regex + // constraints. List : Values for this option are multiple selections from + // the possible values. Boolean : Values for this option are either true or + // false . Json : Values for this option are a JSON representation of a ConfigDocument. + ValueType *string `type:"string" enum:"ConfigurationOptionValueType"` +} + +// String returns the string representation +func (s ConfigurationOptionDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ConfigurationOptionDescription) GoString() string { + return s.String() +} + +// A specification identifying an individual configuration option along with +// its current value. For a list of possible option values, go to Option Values +// (http://docs.aws.amazon.com/elasticbeanstalk/latest/dg/command-options.html) +// in the AWS Elastic Beanstalk Developer Guide. +type ConfigurationOptionSetting struct { + _ struct{} `type:"structure"` + + // A unique namespace identifying the option's associated AWS resource. + Namespace *string `type:"string"` + + // The name of the configuration option. + OptionName *string `type:"string"` + + // A unique resource name for a time-based scaling configuration option. + ResourceName *string `min:"1" type:"string"` + + // The current value for the configuration option. + Value *string `type:"string"` +} + +// String returns the string representation +func (s ConfigurationOptionSetting) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ConfigurationOptionSetting) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ConfigurationOptionSetting) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ConfigurationOptionSetting"} + if s.ResourceName != nil && len(*s.ResourceName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Describes the settings for a configuration set. +type ConfigurationSettingsDescription struct { + _ struct{} `type:"structure"` + + // The name of the application associated with this configuration set. + ApplicationName *string `min:"1" type:"string"` + + // The date (in UTC time) when this configuration set was created. + DateCreated *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The date (in UTC time) when this configuration set was last modified. + DateUpdated *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // If this configuration set is associated with an environment, the DeploymentStatus + // parameter indicates the deployment status of this configuration set: + // + // null: This configuration is not associated with a running environment. + // pending: This is a draft configuration that is not deployed to the associated + // environment but is in the process of deploying. deployed: This is the configuration + // that is currently deployed to the associated running environment. failed: + // This is a draft configuration that failed to successfully deploy. + DeploymentStatus *string `type:"string" enum:"ConfigurationDeploymentStatus"` + + // Describes this configuration set. + Description *string `type:"string"` + + // If not null, the name of the environment for this configuration set. + EnvironmentName *string `min:"4" type:"string"` + + // A list of the configuration options and their values in this configuration + // set. + OptionSettings []*ConfigurationOptionSetting `type:"list"` + + // The name of the solution stack this configuration set uses. + SolutionStackName *string `type:"string"` + + // If not null, the name of the configuration template for this configuration + // set. + TemplateName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ConfigurationSettingsDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ConfigurationSettingsDescription) GoString() string { + return s.String() +} + +// Request to create an application. +type CreateApplicationInput struct { + _ struct{} `type:"structure"` + + // The name of the application. + // + // Constraint: This name must be unique within your account. If the specified + // name already exists, the action returns an InvalidParameterValue error. + ApplicationName *string `min:"1" type:"string" required:"true"` + + // Describes the application. + Description *string `type:"string"` +} + +// String returns the string representation +func (s CreateApplicationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateApplicationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateApplicationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateApplicationInput"} + if s.ApplicationName == nil { + invalidParams.Add(request.NewErrParamRequired("ApplicationName")) + } + if s.ApplicationName != nil && len(*s.ApplicationName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ApplicationName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type CreateApplicationVersionInput struct { + _ struct{} `type:"structure"` + + // The name of the application. If no application is found with this name, and + // AutoCreateApplication is false, returns an InvalidParameterValue error. + ApplicationName *string `min:"1" type:"string" required:"true"` + + // Determines how the system behaves if the specified application for this version + // does not already exist: + // + // true : Automatically creates the specified application for this release + // if it does not already exist. false : Throws an InvalidParameterValue if + // the specified application for this release does not already exist. Default: + // false + // + // Valid Values: true | false + AutoCreateApplication *bool `type:"boolean"` + + // Describes this version. + Description *string `type:"string"` + + // Preprocesses and validates the environment manifest and configuration files + // in the source bundle. Validating configuration files can identify issues + // prior to deploying the application version to an environment. + Process *bool `type:"boolean"` + + // The Amazon S3 bucket and key that identify the location of the source bundle + // for this version. + // + // If data found at the Amazon S3 location exceeds the maximum allowed source + // bundle size, AWS Elastic Beanstalk returns an InvalidParameterValue error. + // The maximum size allowed is 512 MB. + // + // Default: If not specified, AWS Elastic Beanstalk uses a sample application. + // If only partially specified (for example, a bucket is provided but not the + // key) or if no data is found at the Amazon S3 location, AWS Elastic Beanstalk + // returns an InvalidParameterCombination error. + SourceBundle *S3Location `type:"structure"` + + // A label identifying this version. + // + // Constraint: Must be unique per application. If an application version already + // exists with this label for the specified application, AWS Elastic Beanstalk + // returns an InvalidParameterValue error. + VersionLabel *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateApplicationVersionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateApplicationVersionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateApplicationVersionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateApplicationVersionInput"} + if s.ApplicationName == nil { + invalidParams.Add(request.NewErrParamRequired("ApplicationName")) + } + if s.ApplicationName != nil && len(*s.ApplicationName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ApplicationName", 1)) + } + if s.VersionLabel == nil { + invalidParams.Add(request.NewErrParamRequired("VersionLabel")) + } + if s.VersionLabel != nil && len(*s.VersionLabel) < 1 { + invalidParams.Add(request.NewErrParamMinLen("VersionLabel", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Request to create a configuration template. +type CreateConfigurationTemplateInput struct { + _ struct{} `type:"structure"` + + // The name of the application to associate with this configuration template. + // If no application is found with this name, AWS Elastic Beanstalk returns + // an InvalidParameterValue error. + ApplicationName *string `min:"1" type:"string" required:"true"` + + // Describes this configuration. + Description *string `type:"string"` + + // The ID of the environment used with this configuration template. + EnvironmentId *string `type:"string"` + + // If specified, AWS Elastic Beanstalk sets the specified configuration option + // to the requested value. The new value overrides the value obtained from the + // solution stack or the source configuration template. + OptionSettings []*ConfigurationOptionSetting `type:"list"` + + // The name of the solution stack used by this configuration. The solution stack + // specifies the operating system, architecture, and application server for + // a configuration template. It determines the set of configuration options + // as well as the possible and default values. + // + // Use ListAvailableSolutionStacks to obtain a list of available solution + // stacks. + // + // A solution stack name or a source configuration parameter must be specified, + // otherwise AWS Elastic Beanstalk returns an InvalidParameterValue error. + // + // If a solution stack name is not specified and the source configuration + // parameter is specified, AWS Elastic Beanstalk uses the same solution stack + // as the source configuration template. + SolutionStackName *string `type:"string"` + + // If specified, AWS Elastic Beanstalk uses the configuration values from the + // specified configuration template to create a new configuration. + // + // Values specified in the OptionSettings parameter of this call overrides + // any values obtained from the SourceConfiguration. + // + // If no configuration template is found, returns an InvalidParameterValue + // error. + // + // Constraint: If both the solution stack name parameter and the source configuration + // parameters are specified, the solution stack of the source configuration + // template must match the specified solution stack name or else AWS Elastic + // Beanstalk returns an InvalidParameterCombination error. + SourceConfiguration *SourceConfiguration `type:"structure"` + + // The name of the configuration template. + // + // Constraint: This name must be unique per application. + // + // Default: If a configuration template already exists with this name, AWS + // Elastic Beanstalk returns an InvalidParameterValue error. + TemplateName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateConfigurationTemplateInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateConfigurationTemplateInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateConfigurationTemplateInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateConfigurationTemplateInput"} + if s.ApplicationName == nil { + invalidParams.Add(request.NewErrParamRequired("ApplicationName")) + } + if s.ApplicationName != nil && len(*s.ApplicationName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ApplicationName", 1)) + } + if s.TemplateName == nil { + invalidParams.Add(request.NewErrParamRequired("TemplateName")) + } + if s.TemplateName != nil && len(*s.TemplateName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TemplateName", 1)) + } + if s.OptionSettings != nil { + for i, v := range s.OptionSettings { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "OptionSettings", i), err.(request.ErrInvalidParams)) + } + } + } + if s.SourceConfiguration != nil { + if err := s.SourceConfiguration.Validate(); err != nil { + invalidParams.AddNested("SourceConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type CreateEnvironmentInput struct { + _ struct{} `type:"structure"` + + // The name of the application that contains the version to be deployed. + // + // If no application is found with this name, CreateEnvironment returns an + // InvalidParameterValue error. + ApplicationName *string `min:"1" type:"string" required:"true"` + + // If specified, the environment attempts to use this value as the prefix for + // the CNAME. If not specified, the CNAME is generated automatically by appending + // a random alphanumeric string to the environment name. + CNAMEPrefix *string `min:"4" type:"string"` + + // Describes this environment. + Description *string `type:"string"` + + // A unique name for the deployment environment. Used in the application URL. + // + // Constraint: Must be from 4 to 40 characters in length. The name can contain + // only letters, numbers, and hyphens. It cannot start or end with a hyphen. + // This name must be unique in your account. If the specified name already exists, + // AWS Elastic Beanstalk returns an InvalidParameterValue error. + // + // Default: If the CNAME parameter is not specified, the environment name becomes + // part of the CNAME, and therefore part of the visible URL for your application. + EnvironmentName *string `min:"4" type:"string"` + + // The name of the group to which the target environment belongs. Specify a + // group name only if the environment's name is specified in an environment + // manifest and not with the environment name parameter. See Environment Manifest + // (env.yaml) (http://docs.aws.amazon.com/elasticbeanstalk/latest/dg/environment-cfg-manifest.html) + // for details. + GroupName *string `min:"1" type:"string"` + + // If specified, AWS Elastic Beanstalk sets the specified configuration options + // to the requested value in the configuration set for the new environment. + // These override the values obtained from the solution stack or the configuration + // template. + OptionSettings []*ConfigurationOptionSetting `type:"list"` + + // A list of custom user-defined configuration options to remove from the configuration + // set for this new environment. + OptionsToRemove []*OptionSpecification `type:"list"` + + // This is an alternative to specifying a template name. If specified, AWS Elastic + // Beanstalk sets the configuration values to the default values associated + // with the specified solution stack. + // + // Condition: You must specify either this or a TemplateName, but not both. + // If you specify both, AWS Elastic Beanstalk returns an InvalidParameterCombination + // error. If you do not specify either, AWS Elastic Beanstalk returns a MissingRequiredParameter + // error. + SolutionStackName *string `type:"string"` + + // This specifies the tags applied to resources in the environment. + Tags []*Tag `type:"list"` + + // The name of the configuration template to use in deployment. If no configuration + // template is found with this name, AWS Elastic Beanstalk returns an InvalidParameterValue + // error. + // + // Condition: You must specify either this parameter or a SolutionStackName, + // but not both. If you specify both, AWS Elastic Beanstalk returns an InvalidParameterCombination + // error. If you do not specify either, AWS Elastic Beanstalk returns a MissingRequiredParameter + // error. + TemplateName *string `min:"1" type:"string"` + + // This specifies the tier to use for creating this environment. + Tier *EnvironmentTier `type:"structure"` + + // The name of the application version to deploy. + // + // If the specified application has no associated application versions, AWS + // Elastic Beanstalk UpdateEnvironment returns an InvalidParameterValue error. + // + // Default: If not specified, AWS Elastic Beanstalk attempts to launch the + // sample application in the container. + VersionLabel *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s CreateEnvironmentInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateEnvironmentInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateEnvironmentInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateEnvironmentInput"} + if s.ApplicationName == nil { + invalidParams.Add(request.NewErrParamRequired("ApplicationName")) + } + if s.ApplicationName != nil && len(*s.ApplicationName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ApplicationName", 1)) + } + if s.CNAMEPrefix != nil && len(*s.CNAMEPrefix) < 4 { + invalidParams.Add(request.NewErrParamMinLen("CNAMEPrefix", 4)) + } + if s.EnvironmentName != nil && len(*s.EnvironmentName) < 4 { + invalidParams.Add(request.NewErrParamMinLen("EnvironmentName", 4)) + } + if s.GroupName != nil && len(*s.GroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("GroupName", 1)) + } + if s.TemplateName != nil && len(*s.TemplateName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TemplateName", 1)) + } + if s.VersionLabel != nil && len(*s.VersionLabel) < 1 { + invalidParams.Add(request.NewErrParamMinLen("VersionLabel", 1)) + } + if s.OptionSettings != nil { + for i, v := range s.OptionSettings { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "OptionSettings", i), err.(request.ErrInvalidParams)) + } + } + } + if s.OptionsToRemove != nil { + for i, v := range s.OptionsToRemove { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "OptionsToRemove", i), err.(request.ErrInvalidParams)) + } + } + } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type CreateStorageLocationInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s CreateStorageLocationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateStorageLocationInput) GoString() string { + return s.String() +} + +// Results of a CreateStorageLocationResult call. +type CreateStorageLocationOutput struct { + _ struct{} `type:"structure"` + + // The name of the Amazon S3 bucket created. + S3Bucket *string `type:"string"` +} + +// String returns the string representation +func (s CreateStorageLocationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateStorageLocationOutput) GoString() string { + return s.String() +} + +// Request to delete an application. +type DeleteApplicationInput struct { + _ struct{} `type:"structure"` + + // The name of the application to delete. + ApplicationName *string `min:"1" type:"string" required:"true"` + + // When set to true, running environments will be terminated before deleting + // the application. + TerminateEnvByForce *bool `type:"boolean"` +} + +// String returns the string representation +func (s DeleteApplicationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteApplicationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteApplicationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteApplicationInput"} + if s.ApplicationName == nil { + invalidParams.Add(request.NewErrParamRequired("ApplicationName")) + } + if s.ApplicationName != nil && len(*s.ApplicationName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ApplicationName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteApplicationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteApplicationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteApplicationOutput) GoString() string { + return s.String() +} + +// Request to delete an application version. +type DeleteApplicationVersionInput struct { + _ struct{} `type:"structure"` + + // The name of the application to delete releases from. + ApplicationName *string `min:"1" type:"string" required:"true"` + + // Indicates whether to delete the associated source bundle from Amazon S3: + // + // true: An attempt is made to delete the associated Amazon S3 source bundle + // specified at time of creation. false: No action is taken on the Amazon + // S3 source bundle specified at time of creation. Valid Values: true | false + DeleteSourceBundle *bool `type:"boolean"` + + // The label of the version to delete. + VersionLabel *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteApplicationVersionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteApplicationVersionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteApplicationVersionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteApplicationVersionInput"} + if s.ApplicationName == nil { + invalidParams.Add(request.NewErrParamRequired("ApplicationName")) + } + if s.ApplicationName != nil && len(*s.ApplicationName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ApplicationName", 1)) + } + if s.VersionLabel == nil { + invalidParams.Add(request.NewErrParamRequired("VersionLabel")) + } + if s.VersionLabel != nil && len(*s.VersionLabel) < 1 { + invalidParams.Add(request.NewErrParamMinLen("VersionLabel", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteApplicationVersionOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteApplicationVersionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteApplicationVersionOutput) GoString() string { + return s.String() +} + +// Request to delete a configuration template. +type DeleteConfigurationTemplateInput struct { + _ struct{} `type:"structure"` + + // The name of the application to delete the configuration template from. + ApplicationName *string `min:"1" type:"string" required:"true"` + + // The name of the configuration template to delete. + TemplateName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteConfigurationTemplateInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteConfigurationTemplateInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteConfigurationTemplateInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteConfigurationTemplateInput"} + if s.ApplicationName == nil { + invalidParams.Add(request.NewErrParamRequired("ApplicationName")) + } + if s.ApplicationName != nil && len(*s.ApplicationName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ApplicationName", 1)) + } + if s.TemplateName == nil { + invalidParams.Add(request.NewErrParamRequired("TemplateName")) + } + if s.TemplateName != nil && len(*s.TemplateName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TemplateName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteConfigurationTemplateOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteConfigurationTemplateOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteConfigurationTemplateOutput) GoString() string { + return s.String() +} + +// Request to delete a draft environment configuration. +type DeleteEnvironmentConfigurationInput struct { + _ struct{} `type:"structure"` + + // The name of the application the environment is associated with. + ApplicationName *string `min:"1" type:"string" required:"true"` + + // The name of the environment to delete the draft configuration from. + EnvironmentName *string `min:"4" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteEnvironmentConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteEnvironmentConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteEnvironmentConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteEnvironmentConfigurationInput"} + if s.ApplicationName == nil { + invalidParams.Add(request.NewErrParamRequired("ApplicationName")) + } + if s.ApplicationName != nil && len(*s.ApplicationName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ApplicationName", 1)) + } + if s.EnvironmentName == nil { + invalidParams.Add(request.NewErrParamRequired("EnvironmentName")) + } + if s.EnvironmentName != nil && len(*s.EnvironmentName) < 4 { + invalidParams.Add(request.NewErrParamMinLen("EnvironmentName", 4)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteEnvironmentConfigurationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteEnvironmentConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteEnvironmentConfigurationOutput) GoString() string { + return s.String() +} + +// Information about an application version deployment. +type Deployment struct { + _ struct{} `type:"structure"` + + // The ID of the deployment. This number increases by one each time that you + // deploy source code or change instance configuration settings. + DeploymentId *int64 `type:"long"` + + // For in-progress deployments, the time that the deloyment started. + // + // For completed deployments, the time that the deployment ended. + DeploymentTime *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The status of the deployment: + // + // In Progress : The deployment is in progress. Deployed : The deployment + // succeeded. Failed : The deployment failed. + Status *string `type:"string"` + + // The version label of the application version in the deployment. + VersionLabel *string `type:"string"` +} + +// String returns the string representation +func (s Deployment) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Deployment) GoString() string { + return s.String() +} + +// Result message containing a list of configuration descriptions. +type DescribeApplicationVersionsInput struct { + _ struct{} `type:"structure"` + + // If specified, AWS Elastic Beanstalk restricts the returned descriptions to + // only include ones that are associated with the specified application. + ApplicationName *string `min:"1" type:"string"` + + // If specified, restricts the returned descriptions to only include ones that + // have the specified version labels. + VersionLabels []*string `type:"list"` +} + +// String returns the string representation +func (s DescribeApplicationVersionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeApplicationVersionsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeApplicationVersionsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeApplicationVersionsInput"} + if s.ApplicationName != nil && len(*s.ApplicationName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ApplicationName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Result message wrapping a list of application version descriptions. +type DescribeApplicationVersionsOutput struct { + _ struct{} `type:"structure"` + + // List of ApplicationVersionDescription objects sorted by order of creation. + ApplicationVersions []*ApplicationVersionDescription `type:"list"` +} + +// String returns the string representation +func (s DescribeApplicationVersionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeApplicationVersionsOutput) GoString() string { + return s.String() +} + +// Request to describe one or more applications. +type DescribeApplicationsInput struct { + _ struct{} `type:"structure"` + + // If specified, AWS Elastic Beanstalk restricts the returned descriptions to + // only include those with the specified names. + ApplicationNames []*string `type:"list"` +} + +// String returns the string representation +func (s DescribeApplicationsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeApplicationsInput) GoString() string { + return s.String() +} + +// Result message containing a list of application descriptions. +type DescribeApplicationsOutput struct { + _ struct{} `type:"structure"` + + // This parameter contains a list of ApplicationDescription. + Applications []*ApplicationDescription `type:"list"` +} + +// String returns the string representation +func (s DescribeApplicationsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeApplicationsOutput) GoString() string { + return s.String() +} + +// Result message containig a list of application version descriptions. +type DescribeConfigurationOptionsInput struct { + _ struct{} `type:"structure"` + + // The name of the application associated with the configuration template or + // environment. Only needed if you want to describe the configuration options + // associated with either the configuration template or environment. + ApplicationName *string `min:"1" type:"string"` + + // The name of the environment whose configuration options you want to describe. + EnvironmentName *string `min:"4" type:"string"` + + // If specified, restricts the descriptions to only the specified options. + Options []*OptionSpecification `type:"list"` + + // The name of the solution stack whose configuration options you want to describe. + SolutionStackName *string `type:"string"` + + // The name of the configuration template whose configuration options you want + // to describe. + TemplateName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s DescribeConfigurationOptionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeConfigurationOptionsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeConfigurationOptionsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeConfigurationOptionsInput"} + if s.ApplicationName != nil && len(*s.ApplicationName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ApplicationName", 1)) + } + if s.EnvironmentName != nil && len(*s.EnvironmentName) < 4 { + invalidParams.Add(request.NewErrParamMinLen("EnvironmentName", 4)) + } + if s.TemplateName != nil && len(*s.TemplateName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TemplateName", 1)) + } + if s.Options != nil { + for i, v := range s.Options { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Options", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Describes the settings for a specified configuration set. +type DescribeConfigurationOptionsOutput struct { + _ struct{} `type:"structure"` + + // A list of ConfigurationOptionDescription. + Options []*ConfigurationOptionDescription `type:"list"` + + // The name of the solution stack these configuration options belong to. + SolutionStackName *string `type:"string"` +} + +// String returns the string representation +func (s DescribeConfigurationOptionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeConfigurationOptionsOutput) GoString() string { + return s.String() +} + +// Result message containing all of the configuration settings for a specified +// solution stack or configuration template. +type DescribeConfigurationSettingsInput struct { + _ struct{} `type:"structure"` + + // The application for the environment or configuration template. + ApplicationName *string `min:"1" type:"string" required:"true"` + + // The name of the environment to describe. + // + // Condition: You must specify either this or a TemplateName, but not both. + // If you specify both, AWS Elastic Beanstalk returns an InvalidParameterCombination + // error. If you do not specify either, AWS Elastic Beanstalk returns MissingRequiredParameter + // error. + EnvironmentName *string `min:"4" type:"string"` + + // The name of the configuration template to describe. + // + // Conditional: You must specify either this parameter or an EnvironmentName, + // but not both. If you specify both, AWS Elastic Beanstalk returns an InvalidParameterCombination + // error. If you do not specify either, AWS Elastic Beanstalk returns a MissingRequiredParameter + // error. + TemplateName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s DescribeConfigurationSettingsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeConfigurationSettingsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeConfigurationSettingsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeConfigurationSettingsInput"} + if s.ApplicationName == nil { + invalidParams.Add(request.NewErrParamRequired("ApplicationName")) + } + if s.ApplicationName != nil && len(*s.ApplicationName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ApplicationName", 1)) + } + if s.EnvironmentName != nil && len(*s.EnvironmentName) < 4 { + invalidParams.Add(request.NewErrParamMinLen("EnvironmentName", 4)) + } + if s.TemplateName != nil && len(*s.TemplateName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TemplateName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The results from a request to change the configuration settings of an environment. +type DescribeConfigurationSettingsOutput struct { + _ struct{} `type:"structure"` + + // A list of ConfigurationSettingsDescription. + ConfigurationSettings []*ConfigurationSettingsDescription `type:"list"` +} + +// String returns the string representation +func (s DescribeConfigurationSettingsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeConfigurationSettingsOutput) GoString() string { + return s.String() +} + +// See the example below to learn how to create a request body. +type DescribeEnvironmentHealthInput struct { + _ struct{} `type:"structure"` + + // Specifies the response elements you wish to receive. If no attribute names + // are specified, AWS Elastic Beanstalk only returns the name of the environment. + AttributeNames []*string `type:"list"` + + // Specifies the AWS Elastic Beanstalk environment ID. + // + // Condition: You must specify either this or an EnvironmentName, or both. + // If you do not specify either, AWS Elastic Beanstalk returns MissingRequiredParameter + // error. + EnvironmentId *string `type:"string"` + + // Specifies the AWS Elastic Beanstalk environment name. + // + // Condition: You must specify either this or an EnvironmentId, or both. If + // you do not specify either, AWS Elastic Beanstalk returns MissingRequiredParameter + // error. + EnvironmentName *string `min:"4" type:"string"` +} + +// String returns the string representation +func (s DescribeEnvironmentHealthInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeEnvironmentHealthInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeEnvironmentHealthInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeEnvironmentHealthInput"} + if s.EnvironmentName != nil && len(*s.EnvironmentName) < 4 { + invalidParams.Add(request.NewErrParamMinLen("EnvironmentName", 4)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// See the example below for a sample response. +type DescribeEnvironmentHealthOutput struct { + _ struct{} `type:"structure"` + + // Represents the application metrics for a specified environment. + ApplicationMetrics *ApplicationMetrics `type:"structure"` + + // Returns potential causes for the reported status. + Causes []*string `type:"list"` + + // Returns the color indicator that tells you information about the health of + // the environment. For more information, see Health Colors and Statuses (http://docs.aws.amazon.com/elasticbeanstalk/latest/dg/health-enhanced-status.html). + Color *string `type:"string"` + + // The AWS Elastic Beanstalk environment name. + EnvironmentName *string `min:"4" type:"string"` + + // Contains the response body with information about the health of the environment. + HealthStatus *string `type:"string"` + + // Represents summary information about the health of an instance. For more + // information, see Health Colors and Statuses (http://docs.aws.amazon.com/elasticbeanstalk/latest/dg/health-enhanced-status.html). + InstancesHealth *InstanceHealthSummary `type:"structure"` + + // The date and time the information was last refreshed. + RefreshedAt *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // Returns the health status value of the environment. For more information, + // see Health Colors and Statuses (http://docs.aws.amazon.com/elasticbeanstalk/latest/dg/health-enhanced-status.html). + Status *string `type:"string" enum:"EnvironmentHealth"` +} + +// String returns the string representation +func (s DescribeEnvironmentHealthOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeEnvironmentHealthOutput) GoString() string { + return s.String() +} + +// Request to list completed and failed managed actions. +type DescribeEnvironmentManagedActionHistoryInput struct { + _ struct{} `type:"structure"` + + // The environment ID of the target environment. + EnvironmentId *string `type:"string"` + + // The name of the target environment. + EnvironmentName *string `min:"4" type:"string"` + + // The maximum number of items to return for a single request. + MaxItems *int64 `type:"integer"` + + // The pagination token returned by a previous request. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeEnvironmentManagedActionHistoryInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeEnvironmentManagedActionHistoryInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeEnvironmentManagedActionHistoryInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeEnvironmentManagedActionHistoryInput"} + if s.EnvironmentName != nil && len(*s.EnvironmentName) < 4 { + invalidParams.Add(request.NewErrParamMinLen("EnvironmentName", 4)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// A result message containing a list of completed and failed managed actions. +type DescribeEnvironmentManagedActionHistoryOutput struct { + _ struct{} `type:"structure"` + + // A list of completed and failed managed actions. + ManagedActionHistoryItems []*ManagedActionHistoryItem `min:"1" type:"list"` + + // A pagination token that you pass to DescribeEnvironmentManagedActionHistory + // to get the next page of results. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeEnvironmentManagedActionHistoryOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeEnvironmentManagedActionHistoryOutput) GoString() string { + return s.String() +} + +// Request to list an environment's upcoming and in-progress managed actions. +type DescribeEnvironmentManagedActionsInput struct { + _ struct{} `type:"structure"` + + // The environment ID of the target environment. + EnvironmentId *string `type:"string"` + + // The name of the target environment. + EnvironmentName *string `type:"string"` + + // To show only actions with a particular status, specify a status. + Status *string `type:"string" enum:"ActionStatus"` +} + +// String returns the string representation +func (s DescribeEnvironmentManagedActionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeEnvironmentManagedActionsInput) GoString() string { + return s.String() +} + +// The result message containing a list of managed actions. +type DescribeEnvironmentManagedActionsOutput struct { + _ struct{} `type:"structure"` + + // A list of upcoming and in-progress managed actions. + ManagedActions []*ManagedAction `min:"1" type:"list"` +} + +// String returns the string representation +func (s DescribeEnvironmentManagedActionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeEnvironmentManagedActionsOutput) GoString() string { + return s.String() +} + +// Request to describe the resources in an environment. +type DescribeEnvironmentResourcesInput struct { + _ struct{} `type:"structure"` + + // The ID of the environment to retrieve AWS resource usage data. + // + // Condition: You must specify either this or an EnvironmentName, or both. + // If you do not specify either, AWS Elastic Beanstalk returns MissingRequiredParameter + // error. + EnvironmentId *string `type:"string"` + + // The name of the environment to retrieve AWS resource usage data. + // + // Condition: You must specify either this or an EnvironmentId, or both. If + // you do not specify either, AWS Elastic Beanstalk returns MissingRequiredParameter + // error. + EnvironmentName *string `min:"4" type:"string"` +} + +// String returns the string representation +func (s DescribeEnvironmentResourcesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeEnvironmentResourcesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeEnvironmentResourcesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeEnvironmentResourcesInput"} + if s.EnvironmentName != nil && len(*s.EnvironmentName) < 4 { + invalidParams.Add(request.NewErrParamMinLen("EnvironmentName", 4)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Result message containing a list of environment resource descriptions. +type DescribeEnvironmentResourcesOutput struct { + _ struct{} `type:"structure"` + + // A list of EnvironmentResourceDescription. + EnvironmentResources *EnvironmentResourceDescription `type:"structure"` +} + +// String returns the string representation +func (s DescribeEnvironmentResourcesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeEnvironmentResourcesOutput) GoString() string { + return s.String() +} + +// Request to describe one or more environments. +type DescribeEnvironmentsInput struct { + _ struct{} `type:"structure"` + + // If specified, AWS Elastic Beanstalk restricts the returned descriptions to + // include only those that are associated with this application. + ApplicationName *string `min:"1" type:"string"` + + // If specified, AWS Elastic Beanstalk restricts the returned descriptions to + // include only those that have the specified IDs. + EnvironmentIds []*string `type:"list"` + + // If specified, AWS Elastic Beanstalk restricts the returned descriptions to + // include only those that have the specified names. + EnvironmentNames []*string `type:"list"` + + // Indicates whether to include deleted environments: + // + // true: Environments that have been deleted after IncludedDeletedBackTo are + // displayed. + // + // false: Do not include deleted environments. + IncludeDeleted *bool `type:"boolean"` + + // If specified when IncludeDeleted is set to true, then environments deleted + // after this date are displayed. + IncludedDeletedBackTo *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // If specified, AWS Elastic Beanstalk restricts the returned descriptions to + // include only those that are associated with this application version. + VersionLabel *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s DescribeEnvironmentsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeEnvironmentsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeEnvironmentsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeEnvironmentsInput"} + if s.ApplicationName != nil && len(*s.ApplicationName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ApplicationName", 1)) + } + if s.VersionLabel != nil && len(*s.VersionLabel) < 1 { + invalidParams.Add(request.NewErrParamMinLen("VersionLabel", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Request to retrieve a list of events for an environment. +type DescribeEventsInput struct { + _ struct{} `type:"structure"` + + // If specified, AWS Elastic Beanstalk restricts the returned descriptions to + // include only those associated with this application. + ApplicationName *string `min:"1" type:"string"` + + // If specified, AWS Elastic Beanstalk restricts the returned descriptions to + // those that occur up to, but not including, the EndTime. + EndTime *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // If specified, AWS Elastic Beanstalk restricts the returned descriptions to + // those associated with this environment. + EnvironmentId *string `type:"string"` + + // If specified, AWS Elastic Beanstalk restricts the returned descriptions to + // those associated with this environment. + EnvironmentName *string `min:"4" type:"string"` + + // Specifies the maximum number of events that can be returned, beginning with + // the most recent event. + MaxRecords *int64 `min:"1" type:"integer"` + + // Pagination token. If specified, the events return the next batch of results. + NextToken *string `type:"string"` + + // If specified, AWS Elastic Beanstalk restricts the described events to include + // only those associated with this request ID. + RequestId *string `type:"string"` + + // If specified, limits the events returned from this call to include only those + // with the specified severity or higher. + Severity *string `type:"string" enum:"EventSeverity"` + + // If specified, AWS Elastic Beanstalk restricts the returned descriptions to + // those that occur on or after this time. + StartTime *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // If specified, AWS Elastic Beanstalk restricts the returned descriptions to + // those that are associated with this environment configuration. + TemplateName *string `min:"1" type:"string"` + + // If specified, AWS Elastic Beanstalk restricts the returned descriptions to + // those associated with this application version. + VersionLabel *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s DescribeEventsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeEventsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeEventsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeEventsInput"} + if s.ApplicationName != nil && len(*s.ApplicationName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ApplicationName", 1)) + } + if s.EnvironmentName != nil && len(*s.EnvironmentName) < 4 { + invalidParams.Add(request.NewErrParamMinLen("EnvironmentName", 4)) + } + if s.MaxRecords != nil && *s.MaxRecords < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxRecords", 1)) + } + if s.TemplateName != nil && len(*s.TemplateName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TemplateName", 1)) + } + if s.VersionLabel != nil && len(*s.VersionLabel) < 1 { + invalidParams.Add(request.NewErrParamMinLen("VersionLabel", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Result message wrapping a list of event descriptions. +type DescribeEventsOutput struct { + _ struct{} `type:"structure"` + + // A list of EventDescription. + Events []*EventDescription `type:"list"` + + // If returned, this indicates that there are more results to obtain. Use this + // token in the next DescribeEvents call to get the next batch of events. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeEventsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeEventsOutput) GoString() string { + return s.String() +} + +// See the example below to learn how to create a request body. +type DescribeInstancesHealthInput struct { + _ struct{} `type:"structure"` + + // Specifies the response elements you wish to receive. If no attribute names + // are specified, AWS Elastic Beanstalk only returns a list of instances. + AttributeNames []*string `type:"list"` + + // Specifies the AWS Elastic Beanstalk environment ID. + EnvironmentId *string `type:"string"` + + // Specifies the AWS Elastic Beanstalk environment name. + EnvironmentName *string `min:"4" type:"string"` + + // Specifies the next token of the request. + NextToken *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s DescribeInstancesHealthInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeInstancesHealthInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeInstancesHealthInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeInstancesHealthInput"} + if s.EnvironmentName != nil && len(*s.EnvironmentName) < 4 { + invalidParams.Add(request.NewErrParamMinLen("EnvironmentName", 4)) + } + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// See the example below for a sample response. +type DescribeInstancesHealthOutput struct { + _ struct{} `type:"structure"` + + // Contains the response body with information about the health of the instance. + InstanceHealthList []*SingleInstanceHealth `type:"list"` + + // The next token. + NextToken *string `min:"1" type:"string"` + + // The date and time the information was last refreshed. + RefreshedAt *time.Time `type:"timestamp" timestampFormat:"iso8601"` +} + +// String returns the string representation +func (s DescribeInstancesHealthOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeInstancesHealthOutput) GoString() string { + return s.String() +} + +// Describes the properties of an environment. +type EnvironmentDescription struct { + _ struct{} `type:"structure"` + + // Indicates if there is an in-progress environment configuration update or + // application version deployment that you can cancel. + // + // true: There is an update in progress. + // + // false: There are no updates currently in progress. + AbortableOperationInProgress *bool `type:"boolean"` + + // The name of the application associated with this environment. + ApplicationName *string `min:"1" type:"string"` + + // The URL to the CNAME for this environment. + CNAME *string `min:"1" type:"string"` + + // The creation date for this environment. + DateCreated *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The last modified date for this environment. + DateUpdated *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // Describes this environment. + Description *string `type:"string"` + + // For load-balanced, autoscaling environments, the URL to the LoadBalancer. + // For single-instance environments, the IP address of the instance. + EndpointURL *string `type:"string"` + + // The ID of this environment. + EnvironmentId *string `type:"string"` + + // A list of links to other environments in the same group. + EnvironmentLinks []*EnvironmentLink `type:"list"` + + // The name of this environment. + EnvironmentName *string `min:"4" type:"string"` + + // Describes the health status of the environment. AWS Elastic Beanstalk indicates + // the failure levels for a running environment: + // + // Red: Indicates the environment is not responsive. Occurs when three or + // more consecutive failures occur for an environment. Yellow: Indicates that + // something is wrong. Occurs when two consecutive failures occur for an environment. + // Green: Indicates the environment is healthy and fully functional. Grey: + // Default health for a new environment. The environment is not fully launched + // and health checks have not started or health checks are suspended during + // an UpdateEnvironment or RestartEnvironement request. Default: Grey + Health *string `type:"string" enum:"EnvironmentHealth"` + + // Returns the health status of the application running in your environment. + // For more information, see Health Colors and Statuses (http://docs.aws.amazon.com/elasticbeanstalk/latest/dg/health-enhanced-status.html). + HealthStatus *string `type:"string" enum:"EnvironmentHealthStatus"` + + // The description of the AWS resources used by this environment. + Resources *EnvironmentResourcesDescription `type:"structure"` + + // The name of the SolutionStack deployed with this environment. + SolutionStackName *string `type:"string"` + + // The current operational status of the environment: + // + // Launching: Environment is in the process of initial deployment. Updating: + // Environment is in the process of updating its configuration settings or application + // version. Ready: Environment is available to have an action performed on + // it, such as update or terminate. Terminating: Environment is in the shut-down + // process. Terminated: Environment is not running. + Status *string `type:"string" enum:"EnvironmentStatus"` + + // The name of the configuration template used to originally launch this environment. + TemplateName *string `min:"1" type:"string"` + + // Describes the current tier of this environment. + Tier *EnvironmentTier `type:"structure"` + + // The application version deployed in this environment. + VersionLabel *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s EnvironmentDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnvironmentDescription) GoString() string { + return s.String() +} + +// Result message containing a list of environment descriptions. +type EnvironmentDescriptionsMessage struct { + _ struct{} `type:"structure"` + + // Returns an EnvironmentDescription list. + Environments []*EnvironmentDescription `type:"list"` +} + +// String returns the string representation +func (s EnvironmentDescriptionsMessage) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnvironmentDescriptionsMessage) GoString() string { + return s.String() +} + +// The information retrieved from the Amazon EC2 instances. +type EnvironmentInfoDescription struct { + _ struct{} `type:"structure"` + + // The Amazon EC2 Instance ID for this information. + Ec2InstanceId *string `type:"string"` + + // The type of information retrieved. + InfoType *string `type:"string" enum:"EnvironmentInfoType"` + + // The retrieved information. + Message *string `type:"string"` + + // The time stamp when this information was retrieved. + SampleTimestamp *time.Time `type:"timestamp" timestampFormat:"iso8601"` +} + +// String returns the string representation +func (s EnvironmentInfoDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnvironmentInfoDescription) GoString() string { + return s.String() +} + +// A link to another environment, defined in the environment's manifest. Links +// provide connection information in system properties that can be used to connect +// to another environment in the same group. See Environment Manifest (env.yaml) +// (http://docs.aws.amazon.com/elasticbeanstalk/latest/dg/environment-cfg-manifest.html) +// for details. +type EnvironmentLink struct { + _ struct{} `type:"structure"` + + // The name of the linked environment (the dependency). + EnvironmentName *string `type:"string"` + + // The name of the link. + LinkName *string `type:"string"` +} + +// String returns the string representation +func (s EnvironmentLink) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnvironmentLink) GoString() string { + return s.String() +} + +// Describes the AWS resources in use by this environment. This data is live. +type EnvironmentResourceDescription struct { + _ struct{} `type:"structure"` + + // The AutoScalingGroups used by this environment. + AutoScalingGroups []*AutoScalingGroup `type:"list"` + + // The name of the environment. + EnvironmentName *string `min:"4" type:"string"` + + // The Amazon EC2 instances used by this environment. + Instances []*Instance `type:"list"` + + // The Auto Scaling launch configurations in use by this environment. + LaunchConfigurations []*LaunchConfiguration `type:"list"` + + // The LoadBalancers in use by this environment. + LoadBalancers []*LoadBalancer `type:"list"` + + // The queues used by this environment. + Queues []*Queue `type:"list"` + + // The AutoScaling triggers in use by this environment. + Triggers []*Trigger `type:"list"` +} + +// String returns the string representation +func (s EnvironmentResourceDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnvironmentResourceDescription) GoString() string { + return s.String() +} + +// Describes the AWS resources in use by this environment. This data is not +// live data. +type EnvironmentResourcesDescription struct { + _ struct{} `type:"structure"` + + // Describes the LoadBalancer. + LoadBalancer *LoadBalancerDescription `type:"structure"` +} + +// String returns the string representation +func (s EnvironmentResourcesDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnvironmentResourcesDescription) GoString() string { + return s.String() +} + +// Describes the properties of an environment tier +type EnvironmentTier struct { + _ struct{} `type:"structure"` + + // The name of this environment tier. + Name *string `type:"string"` + + // The type of this environment tier. + Type *string `type:"string"` + + // The version of this environment tier. + Version *string `type:"string"` +} + +// String returns the string representation +func (s EnvironmentTier) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnvironmentTier) GoString() string { + return s.String() +} + +// Describes an event. +type EventDescription struct { + _ struct{} `type:"structure"` + + // The application associated with the event. + ApplicationName *string `min:"1" type:"string"` + + // The name of the environment associated with this event. + EnvironmentName *string `min:"4" type:"string"` + + // The date when the event occurred. + EventDate *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The event message. + Message *string `type:"string"` + + // The web service request ID for the activity of this event. + RequestId *string `type:"string"` + + // The severity level of this event. + Severity *string `type:"string" enum:"EventSeverity"` + + // The name of the configuration associated with this event. + TemplateName *string `min:"1" type:"string"` + + // The release label for the application version associated with this event. + VersionLabel *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s EventDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EventDescription) GoString() string { + return s.String() +} + +// The description of an Amazon EC2 instance. +type Instance struct { + _ struct{} `type:"structure"` + + // The ID of the Amazon EC2 instance. + Id *string `type:"string"` +} + +// String returns the string representation +func (s Instance) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Instance) GoString() string { + return s.String() +} + +// Represents summary information about the health of an instance. For more +// information, see Health Colors and Statuses (http://docs.aws.amazon.com/elasticbeanstalk/latest/dg/health-enhanced-status.html). +type InstanceHealthSummary struct { + _ struct{} `type:"structure"` + + // Red. The health agent is reporting a high number of request failures or other + // issues for an instance or environment. + Degraded *int64 `type:"integer"` + + // Green. An operation is in progress on an instance. + Info *int64 `type:"integer"` + + // Grey. AWS Elastic Beanstalk and the health agent are reporting no data on + // an instance. + NoData *int64 `type:"integer"` + + // Green. An instance is passing health checks and the health agent is not reporting + // any problems. + Ok *int64 `type:"integer"` + + // Grey. An operation is in progress on an instance within the command timeout. + Pending *int64 `type:"integer"` + + // Red. The health agent is reporting a very high number of request failures + // or other issues for an instance or environment. + Severe *int64 `type:"integer"` + + // Grey. AWS Elastic Beanstalk and the health agent are reporting an insufficient + // amount of data on an instance. + Unknown *int64 `type:"integer"` + + // Yellow. The health agent is reporting a moderate number of request failures + // or other issues for an instance or environment. + Warning *int64 `type:"integer"` +} + +// String returns the string representation +func (s InstanceHealthSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstanceHealthSummary) GoString() string { + return s.String() +} + +// Represents the average latency for the slowest X percent of requests over +// the last 10 seconds. +type Latency struct { + _ struct{} `type:"structure"` + + // The average latency for the slowest 90 percent of requests over the last + // 10 seconds. + P10 *float64 `type:"double"` + + // The average latency for the slowest 50 percent of requests over the last + // 10 seconds. + P50 *float64 `type:"double"` + + // The average latency for the slowest 25 percent of requests over the last + // 10 seconds. + P75 *float64 `type:"double"` + + // The average latency for the slowest 15 percent of requests over the last + // 10 seconds. + P85 *float64 `type:"double"` + + // The average latency for the slowest 10 percent of requests over the last + // 10 seconds. + P90 *float64 `type:"double"` + + // The average latency for the slowest 5 percent of requests over the last 10 + // seconds. + P95 *float64 `type:"double"` + + // The average latency for the slowest 1 percent of requests over the last 10 + // seconds. + P99 *float64 `type:"double"` + + // The average latency for the slowest 0.1 percent of requests over the last + // 10 seconds. + P999 *float64 `type:"double"` +} + +// String returns the string representation +func (s Latency) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Latency) GoString() string { + return s.String() +} + +// Describes an Auto Scaling launch configuration. +type LaunchConfiguration struct { + _ struct{} `type:"structure"` + + // The name of the launch configuration. + Name *string `type:"string"` +} + +// String returns the string representation +func (s LaunchConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LaunchConfiguration) GoString() string { + return s.String() +} + +type ListAvailableSolutionStacksInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ListAvailableSolutionStacksInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListAvailableSolutionStacksInput) GoString() string { + return s.String() +} + +// A list of available AWS Elastic Beanstalk solution stacks. +type ListAvailableSolutionStacksOutput struct { + _ struct{} `type:"structure"` + + // A list of available solution stacks and their SolutionStackDescription. + SolutionStackDetails []*SolutionStackDescription `type:"list"` + + // A list of available solution stacks. + SolutionStacks []*string `type:"list"` +} + +// String returns the string representation +func (s ListAvailableSolutionStacksOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListAvailableSolutionStacksOutput) GoString() string { + return s.String() +} + +// Describes the properties of a Listener for the LoadBalancer. +type Listener struct { + _ struct{} `type:"structure"` + + // The port that is used by the Listener. + Port *int64 `type:"integer"` + + // The protocol that is used by the Listener. + Protocol *string `type:"string"` +} + +// String returns the string representation +func (s Listener) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Listener) GoString() string { + return s.String() +} + +// Describes a LoadBalancer. +type LoadBalancer struct { + _ struct{} `type:"structure"` + + // The name of the LoadBalancer. + Name *string `type:"string"` +} + +// String returns the string representation +func (s LoadBalancer) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LoadBalancer) GoString() string { + return s.String() +} + +// Describes the details of a LoadBalancer. +type LoadBalancerDescription struct { + _ struct{} `type:"structure"` + + // The domain name of the LoadBalancer. + Domain *string `type:"string"` + + // A list of Listeners used by the LoadBalancer. + Listeners []*Listener `type:"list"` + + // The name of the LoadBalancer. + LoadBalancerName *string `type:"string"` +} + +// String returns the string representation +func (s LoadBalancerDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LoadBalancerDescription) GoString() string { + return s.String() +} + +// The record of an upcoming or in-progress managed action. +type ManagedAction struct { + _ struct{} `type:"structure"` + + // A description of the managed action. + ActionDescription *string `type:"string"` + + // A unique identifier for the managed action. + ActionId *string `type:"string"` + + // The type of managed action. + ActionType *string `type:"string" enum:"ActionType"` + + // The status of the managed action. If the action is Scheduled, you can apply + // it immediately with ApplyEnvironmentManagedAction. + Status *string `type:"string" enum:"ActionStatus"` + + // The start time of the maintenance window in which the managed action will + // execute. + WindowStartTime *time.Time `type:"timestamp" timestampFormat:"iso8601"` +} + +// String returns the string representation +func (s ManagedAction) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ManagedAction) GoString() string { + return s.String() +} + +// The record of a completed or failed managed action. +type ManagedActionHistoryItem struct { + _ struct{} `type:"structure"` + + // A description of the managed action. + ActionDescription *string `type:"string"` + + // A unique identifier for the managed action. + ActionId *string `type:"string"` + + // The type of the managed action. + ActionType *string `type:"string" enum:"ActionType"` + + // The date and time that the action started executing. + ExecutedTime *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // If the action failed, a description of the failure. + FailureDescription *string `type:"string"` + + // If the action failed, the type of failure. + FailureType *string `type:"string" enum:"FailureType"` + + // The date and time that the action finished executing. + FinishedTime *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The status of the action. + Status *string `type:"string" enum:"ActionHistoryStatus"` +} + +// String returns the string representation +func (s ManagedActionHistoryItem) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ManagedActionHistoryItem) GoString() string { + return s.String() +} + +// A regular expression representing a restriction on a string configuration +// option value. +type OptionRestrictionRegex struct { + _ struct{} `type:"structure"` + + // A unique name representing this regular expression. + Label *string `type:"string"` + + // The regular expression pattern that a string configuration option value with + // this restriction must match. + Pattern *string `type:"string"` +} + +// String returns the string representation +func (s OptionRestrictionRegex) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s OptionRestrictionRegex) GoString() string { + return s.String() +} + +// A specification identifying an individual configuration option. +type OptionSpecification struct { + _ struct{} `type:"structure"` + + // A unique namespace identifying the option's associated AWS resource. + Namespace *string `type:"string"` + + // The name of the configuration option. + OptionName *string `type:"string"` + + // A unique resource name for a time-based scaling configuration option. + ResourceName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s OptionSpecification) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s OptionSpecification) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *OptionSpecification) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "OptionSpecification"} + if s.ResourceName != nil && len(*s.ResourceName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Describes a queue. +type Queue struct { + _ struct{} `type:"structure"` + + // The name of the queue. + Name *string `type:"string"` + + // The URL of the queue. + URL *string `type:"string"` +} + +// String returns the string representation +func (s Queue) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Queue) GoString() string { + return s.String() +} + +type RebuildEnvironmentInput struct { + _ struct{} `type:"structure"` + + // The ID of the environment to rebuild. + // + // Condition: You must specify either this or an EnvironmentName, or both. + // If you do not specify either, AWS Elastic Beanstalk returns MissingRequiredParameter + // error. + EnvironmentId *string `type:"string"` + + // The name of the environment to rebuild. + // + // Condition: You must specify either this or an EnvironmentId, or both. If + // you do not specify either, AWS Elastic Beanstalk returns MissingRequiredParameter + // error. + EnvironmentName *string `min:"4" type:"string"` +} + +// String returns the string representation +func (s RebuildEnvironmentInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RebuildEnvironmentInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RebuildEnvironmentInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RebuildEnvironmentInput"} + if s.EnvironmentName != nil && len(*s.EnvironmentName) < 4 { + invalidParams.Add(request.NewErrParamMinLen("EnvironmentName", 4)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type RebuildEnvironmentOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s RebuildEnvironmentOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RebuildEnvironmentOutput) GoString() string { + return s.String() +} + +// Request to retrieve logs from an environment and store them in your Elastic +// Beanstalk storage bucket. +type RequestEnvironmentInfoInput struct { + _ struct{} `type:"structure"` + + // The ID of the environment of the requested data. + // + // If no such environment is found, RequestEnvironmentInfo returns an InvalidParameterValue + // error. + // + // Condition: You must specify either this or an EnvironmentName, or both. + // If you do not specify either, AWS Elastic Beanstalk returns MissingRequiredParameter + // error. + EnvironmentId *string `type:"string"` + + // The name of the environment of the requested data. + // + // If no such environment is found, RequestEnvironmentInfo returns an InvalidParameterValue + // error. + // + // Condition: You must specify either this or an EnvironmentId, or both. If + // you do not specify either, AWS Elastic Beanstalk returns MissingRequiredParameter + // error. + EnvironmentName *string `min:"4" type:"string"` + + // The type of information to request. + InfoType *string `type:"string" required:"true" enum:"EnvironmentInfoType"` +} + +// String returns the string representation +func (s RequestEnvironmentInfoInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RequestEnvironmentInfoInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RequestEnvironmentInfoInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RequestEnvironmentInfoInput"} + if s.EnvironmentName != nil && len(*s.EnvironmentName) < 4 { + invalidParams.Add(request.NewErrParamMinLen("EnvironmentName", 4)) + } + if s.InfoType == nil { + invalidParams.Add(request.NewErrParamRequired("InfoType")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type RequestEnvironmentInfoOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s RequestEnvironmentInfoOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RequestEnvironmentInfoOutput) GoString() string { + return s.String() +} + +type RestartAppServerInput struct { + _ struct{} `type:"structure"` + + // The ID of the environment to restart the server for. + // + // Condition: You must specify either this or an EnvironmentName, or both. + // If you do not specify either, AWS Elastic Beanstalk returns MissingRequiredParameter + // error. + EnvironmentId *string `type:"string"` + + // The name of the environment to restart the server for. + // + // Condition: You must specify either this or an EnvironmentId, or both. If + // you do not specify either, AWS Elastic Beanstalk returns MissingRequiredParameter + // error. + EnvironmentName *string `min:"4" type:"string"` +} + +// String returns the string representation +func (s RestartAppServerInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RestartAppServerInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RestartAppServerInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RestartAppServerInput"} + if s.EnvironmentName != nil && len(*s.EnvironmentName) < 4 { + invalidParams.Add(request.NewErrParamMinLen("EnvironmentName", 4)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type RestartAppServerOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s RestartAppServerOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RestartAppServerOutput) GoString() string { + return s.String() +} + +// Request to download logs retrieved with RequestEnvironmentInfo. +type RetrieveEnvironmentInfoInput struct { + _ struct{} `type:"structure"` + + // The ID of the data's environment. + // + // If no such environment is found, returns an InvalidParameterValue error. + // + // Condition: You must specify either this or an EnvironmentName, or both. + // If you do not specify either, AWS Elastic Beanstalk returns MissingRequiredParameter + // error. + EnvironmentId *string `type:"string"` + + // The name of the data's environment. + // + // If no such environment is found, returns an InvalidParameterValue error. + // + // Condition: You must specify either this or an EnvironmentId, or both. If + // you do not specify either, AWS Elastic Beanstalk returns MissingRequiredParameter + // error. + EnvironmentName *string `min:"4" type:"string"` + + // The type of information to retrieve. + InfoType *string `type:"string" required:"true" enum:"EnvironmentInfoType"` +} + +// String returns the string representation +func (s RetrieveEnvironmentInfoInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RetrieveEnvironmentInfoInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RetrieveEnvironmentInfoInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RetrieveEnvironmentInfoInput"} + if s.EnvironmentName != nil && len(*s.EnvironmentName) < 4 { + invalidParams.Add(request.NewErrParamMinLen("EnvironmentName", 4)) + } + if s.InfoType == nil { + invalidParams.Add(request.NewErrParamRequired("InfoType")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Result message containing a description of the requested environment info. +type RetrieveEnvironmentInfoOutput struct { + _ struct{} `type:"structure"` + + // The EnvironmentInfoDescription of the environment. + EnvironmentInfo []*EnvironmentInfoDescription `type:"list"` +} + +// String returns the string representation +func (s RetrieveEnvironmentInfoOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RetrieveEnvironmentInfoOutput) GoString() string { + return s.String() +} + +// A specification of a location in Amazon S3. +type S3Location struct { + _ struct{} `type:"structure"` + + // The Amazon S3 bucket where the data is located. + S3Bucket *string `type:"string"` + + // The Amazon S3 key where the data is located. + S3Key *string `type:"string"` +} + +// String returns the string representation +func (s S3Location) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s S3Location) GoString() string { + return s.String() +} + +// Represents health information from the specified instance that belongs to +// the AWS Elastic Beanstalk environment. Use the InstanceId property to specify +// the application instance for which you'd like to return data. +type SingleInstanceHealth struct { + _ struct{} `type:"structure"` + + // Represents the application metrics for a specified environment. + ApplicationMetrics *ApplicationMetrics `type:"structure"` + + // The availability zone in which the instance runs. + AvailabilityZone *string `type:"string"` + + // Represents the causes, which provide more information about the current health + // status. + Causes []*string `type:"list"` + + // Represents the color indicator that gives you information about the health + // of the EC2 instance. For more information, see Health Colors and Statuses + // (http://docs.aws.amazon.com/elasticbeanstalk/latest/dg/health-enhanced-status.html). + Color *string `type:"string"` + + // Information about the most recent deployment to an instance. + Deployment *Deployment `type:"structure"` + + // Returns the health status of the specified instance. For more information, + // see Health Colors and Statuses (http://docs.aws.amazon.com/elasticbeanstalk/latest/dg/health-enhanced-status.html). + HealthStatus *string `type:"string"` + + // The ID of the Amazon EC2 instance. + InstanceId *string `min:"1" type:"string"` + + // The instance's type. + InstanceType *string `type:"string"` + + // The time at which the EC2 instance was launched. + LaunchedAt *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // Represents CPU utilization and load average information for applications + // running in the specified environment. + System *SystemStatus `type:"structure"` +} + +// String returns the string representation +func (s SingleInstanceHealth) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SingleInstanceHealth) GoString() string { + return s.String() +} + +// Describes the solution stack. +type SolutionStackDescription struct { + _ struct{} `type:"structure"` + + // The permitted file types allowed for a solution stack. + PermittedFileTypes []*string `type:"list"` + + // The name of the solution stack. + SolutionStackName *string `type:"string"` +} + +// String returns the string representation +func (s SolutionStackDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SolutionStackDescription) GoString() string { + return s.String() +} + +// A specification for an environment configuration +type SourceConfiguration struct { + _ struct{} `type:"structure"` + + // The name of the application associated with the configuration. + ApplicationName *string `min:"1" type:"string"` + + // The name of the configuration template. + TemplateName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s SourceConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SourceConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SourceConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SourceConfiguration"} + if s.ApplicationName != nil && len(*s.ApplicationName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ApplicationName", 1)) + } + if s.TemplateName != nil && len(*s.TemplateName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TemplateName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the percentage of requests over the last 10 seconds that resulted +// in each type of status code response. For more information, see Status Code +// Definitions (http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html). +type StatusCodes struct { + _ struct{} `type:"structure"` + + // The percentage of requests over the last 10 seconds that resulted in a 2xx + // (200, 201, etc.) status code. + Status2xx *int64 `type:"integer"` + + // The percentage of requests over the last 10 seconds that resulted in a 3xx + // (300, 301, etc.) status code. + Status3xx *int64 `type:"integer"` + + // The percentage of requests over the last 10 seconds that resulted in a 4xx + // (400, 401, etc.) status code. + Status4xx *int64 `type:"integer"` + + // The percentage of requests over the last 10 seconds that resulted in a 5xx + // (500, 501, etc.) status code. + Status5xx *int64 `type:"integer"` +} + +// String returns the string representation +func (s StatusCodes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StatusCodes) GoString() string { + return s.String() +} + +// Swaps the CNAMEs of two environments. +type SwapEnvironmentCNAMEsInput struct { + _ struct{} `type:"structure"` + + // The ID of the destination environment. + // + // Condition: You must specify at least the DestinationEnvironmentID or the + // DestinationEnvironmentName. You may also specify both. You must specify the + // SourceEnvironmentId with the DestinationEnvironmentId. + DestinationEnvironmentId *string `type:"string"` + + // The name of the destination environment. + // + // Condition: You must specify at least the DestinationEnvironmentID or the + // DestinationEnvironmentName. You may also specify both. You must specify the + // SourceEnvironmentName with the DestinationEnvironmentName. + DestinationEnvironmentName *string `min:"4" type:"string"` + + // The ID of the source environment. + // + // Condition: You must specify at least the SourceEnvironmentID or the SourceEnvironmentName. + // You may also specify both. If you specify the SourceEnvironmentId, you must + // specify the DestinationEnvironmentId. + SourceEnvironmentId *string `type:"string"` + + // The name of the source environment. + // + // Condition: You must specify at least the SourceEnvironmentID or the SourceEnvironmentName. + // You may also specify both. If you specify the SourceEnvironmentName, you + // must specify the DestinationEnvironmentName. + SourceEnvironmentName *string `min:"4" type:"string"` +} + +// String returns the string representation +func (s SwapEnvironmentCNAMEsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SwapEnvironmentCNAMEsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SwapEnvironmentCNAMEsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SwapEnvironmentCNAMEsInput"} + if s.DestinationEnvironmentName != nil && len(*s.DestinationEnvironmentName) < 4 { + invalidParams.Add(request.NewErrParamMinLen("DestinationEnvironmentName", 4)) + } + if s.SourceEnvironmentName != nil && len(*s.SourceEnvironmentName) < 4 { + invalidParams.Add(request.NewErrParamMinLen("SourceEnvironmentName", 4)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type SwapEnvironmentCNAMEsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s SwapEnvironmentCNAMEsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SwapEnvironmentCNAMEsOutput) GoString() string { + return s.String() +} + +// Represents CPU utilization and load average information for applications +// running in the specified environment. +type SystemStatus struct { + _ struct{} `type:"structure"` + + // Represents CPU utilization information from the specified instance that belongs + // to the AWS Elastic Beanstalk environment. Use the instanceId property to + // specify the application instance for which you'd like to return data. + CPUUtilization *CPUUtilization `type:"structure"` + + // Load average in the last 1-minute and 5-minute periods. For more information, + // see Operating System Metrics (http://docs.aws.amazon.com/elasticbeanstalk/latest/dg/health-enhanced-metrics.html#health-enhanced-metrics-os). + LoadAverage []*float64 `type:"list"` +} + +// String returns the string representation +func (s SystemStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SystemStatus) GoString() string { + return s.String() +} + +// Describes a tag applied to a resource in an environment. +type Tag struct { + _ struct{} `type:"structure"` + + // The key of the tag. + Key *string `min:"1" type:"string"` + + // The value of the tag. + Value *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s Tag) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Tag) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Tag) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Tag"} + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.Value != nil && len(*s.Value) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Value", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Request to terminate an environment. +type TerminateEnvironmentInput struct { + _ struct{} `type:"structure"` + + // The ID of the environment to terminate. + // + // Condition: You must specify either this or an EnvironmentName, or both. + // If you do not specify either, AWS Elastic Beanstalk returns MissingRequiredParameter + // error. + EnvironmentId *string `type:"string"` + + // The name of the environment to terminate. + // + // Condition: You must specify either this or an EnvironmentId, or both. If + // you do not specify either, AWS Elastic Beanstalk returns MissingRequiredParameter + // error. + EnvironmentName *string `min:"4" type:"string"` + + // Terminates the target environment even if another environment in the same + // group is dependent on it. + ForceTerminate *bool `type:"boolean"` + + // Indicates whether the associated AWS resources should shut down when the + // environment is terminated: + // + // true: The specified environment as well as the associated AWS resources, + // such as Auto Scaling group and LoadBalancer, are terminated. false: AWS + // Elastic Beanstalk resource management is removed from the environment, but + // the AWS resources continue to operate. For more information, see the + // AWS Elastic Beanstalk User Guide. (http://docs.aws.amazon.com/elasticbeanstalk/latest/ug/) + // + // Default: true + // + // Valid Values: true | false + TerminateResources *bool `type:"boolean"` +} + +// String returns the string representation +func (s TerminateEnvironmentInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TerminateEnvironmentInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TerminateEnvironmentInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TerminateEnvironmentInput"} + if s.EnvironmentName != nil && len(*s.EnvironmentName) < 4 { + invalidParams.Add(request.NewErrParamMinLen("EnvironmentName", 4)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Describes a trigger. +type Trigger struct { + _ struct{} `type:"structure"` + + // The name of the trigger. + Name *string `type:"string"` +} + +// String returns the string representation +func (s Trigger) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Trigger) GoString() string { + return s.String() +} + +// Request to update an application. +type UpdateApplicationInput struct { + _ struct{} `type:"structure"` + + // The name of the application to update. If no such application is found, UpdateApplication + // returns an InvalidParameterValue error. + ApplicationName *string `min:"1" type:"string" required:"true"` + + // A new description for the application. + // + // Default: If not specified, AWS Elastic Beanstalk does not update the description. + Description *string `type:"string"` +} + +// String returns the string representation +func (s UpdateApplicationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateApplicationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateApplicationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateApplicationInput"} + if s.ApplicationName == nil { + invalidParams.Add(request.NewErrParamRequired("ApplicationName")) + } + if s.ApplicationName != nil && len(*s.ApplicationName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ApplicationName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type UpdateApplicationVersionInput struct { + _ struct{} `type:"structure"` + + // The name of the application associated with this version. + // + // If no application is found with this name, UpdateApplication returns an + // InvalidParameterValue error. + ApplicationName *string `min:"1" type:"string" required:"true"` + + // A new description for this release. + Description *string `type:"string"` + + // The name of the version to update. + // + // If no application version is found with this label, UpdateApplication returns + // an InvalidParameterValue error. + VersionLabel *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateApplicationVersionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateApplicationVersionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateApplicationVersionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateApplicationVersionInput"} + if s.ApplicationName == nil { + invalidParams.Add(request.NewErrParamRequired("ApplicationName")) + } + if s.ApplicationName != nil && len(*s.ApplicationName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ApplicationName", 1)) + } + if s.VersionLabel == nil { + invalidParams.Add(request.NewErrParamRequired("VersionLabel")) + } + if s.VersionLabel != nil && len(*s.VersionLabel) < 1 { + invalidParams.Add(request.NewErrParamMinLen("VersionLabel", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The result message containing the options for the specified solution stack. +type UpdateConfigurationTemplateInput struct { + _ struct{} `type:"structure"` + + // The name of the application associated with the configuration template to + // update. + // + // If no application is found with this name, UpdateConfigurationTemplate + // returns an InvalidParameterValue error. + ApplicationName *string `min:"1" type:"string" required:"true"` + + // A new description for the configuration. + Description *string `type:"string"` + + // A list of configuration option settings to update with the new specified + // option value. + OptionSettings []*ConfigurationOptionSetting `type:"list"` + + // A list of configuration options to remove from the configuration set. + // + // Constraint: You can remove only UserDefined configuration options. + OptionsToRemove []*OptionSpecification `type:"list"` + + // The name of the configuration template to update. + // + // If no configuration template is found with this name, UpdateConfigurationTemplate + // returns an InvalidParameterValue error. + TemplateName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateConfigurationTemplateInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateConfigurationTemplateInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateConfigurationTemplateInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateConfigurationTemplateInput"} + if s.ApplicationName == nil { + invalidParams.Add(request.NewErrParamRequired("ApplicationName")) + } + if s.ApplicationName != nil && len(*s.ApplicationName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ApplicationName", 1)) + } + if s.TemplateName == nil { + invalidParams.Add(request.NewErrParamRequired("TemplateName")) + } + if s.TemplateName != nil && len(*s.TemplateName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TemplateName", 1)) + } + if s.OptionSettings != nil { + for i, v := range s.OptionSettings { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "OptionSettings", i), err.(request.ErrInvalidParams)) + } + } + } + if s.OptionsToRemove != nil { + for i, v := range s.OptionsToRemove { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "OptionsToRemove", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Request to update an environment. +type UpdateEnvironmentInput struct { + _ struct{} `type:"structure"` + + // The name of the application with which the environment is associated. + ApplicationName *string `min:"1" type:"string"` + + // If this parameter is specified, AWS Elastic Beanstalk updates the description + // of this environment. + Description *string `type:"string"` + + // The ID of the environment to update. + // + // If no environment with this ID exists, AWS Elastic Beanstalk returns an + // InvalidParameterValue error. + // + // Condition: You must specify either this or an EnvironmentName, or both. + // If you do not specify either, AWS Elastic Beanstalk returns MissingRequiredParameter + // error. + EnvironmentId *string `type:"string"` + + // The name of the environment to update. If no environment with this name exists, + // AWS Elastic Beanstalk returns an InvalidParameterValue error. + // + // Condition: You must specify either this or an EnvironmentId, or both. If + // you do not specify either, AWS Elastic Beanstalk returns MissingRequiredParameter + // error. + EnvironmentName *string `min:"4" type:"string"` + + // The name of the group to which the target environment belongs. Specify a + // group name only if the environment's name is specified in an environment + // manifest and not with the environment name or environment ID parameters. + // See Environment Manifest (env.yaml) (http://docs.aws.amazon.com/elasticbeanstalk/latest/dg/environment-cfg-manifest.html) + // for details. + GroupName *string `min:"1" type:"string"` + + // If specified, AWS Elastic Beanstalk updates the configuration set associated + // with the running environment and sets the specified configuration options + // to the requested value. + OptionSettings []*ConfigurationOptionSetting `type:"list"` + + // A list of custom user-defined configuration options to remove from the configuration + // set for this environment. + OptionsToRemove []*OptionSpecification `type:"list"` + + // This specifies the platform version that the environment will run after the + // environment is updated. + SolutionStackName *string `type:"string"` + + // If this parameter is specified, AWS Elastic Beanstalk deploys this configuration + // template to the environment. If no such configuration template is found, + // AWS Elastic Beanstalk returns an InvalidParameterValue error. + TemplateName *string `min:"1" type:"string"` + + // This specifies the tier to use to update the environment. + // + // Condition: At this time, if you change the tier version, name, or type, + // AWS Elastic Beanstalk returns InvalidParameterValue error. + Tier *EnvironmentTier `type:"structure"` + + // If this parameter is specified, AWS Elastic Beanstalk deploys the named application + // version to the environment. If no such application version is found, returns + // an InvalidParameterValue error. + VersionLabel *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s UpdateEnvironmentInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateEnvironmentInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateEnvironmentInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateEnvironmentInput"} + if s.ApplicationName != nil && len(*s.ApplicationName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ApplicationName", 1)) + } + if s.EnvironmentName != nil && len(*s.EnvironmentName) < 4 { + invalidParams.Add(request.NewErrParamMinLen("EnvironmentName", 4)) + } + if s.GroupName != nil && len(*s.GroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("GroupName", 1)) + } + if s.TemplateName != nil && len(*s.TemplateName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TemplateName", 1)) + } + if s.VersionLabel != nil && len(*s.VersionLabel) < 1 { + invalidParams.Add(request.NewErrParamMinLen("VersionLabel", 1)) + } + if s.OptionSettings != nil { + for i, v := range s.OptionSettings { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "OptionSettings", i), err.(request.ErrInvalidParams)) + } + } + } + if s.OptionsToRemove != nil { + for i, v := range s.OptionsToRemove { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "OptionsToRemove", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// A list of validation messages for a specified configuration template. +type ValidateConfigurationSettingsInput struct { + _ struct{} `type:"structure"` + + // The name of the application that the configuration template or environment + // belongs to. + ApplicationName *string `min:"1" type:"string" required:"true"` + + // The name of the environment to validate the settings against. + // + // Condition: You cannot specify both this and a configuration template name. + EnvironmentName *string `min:"4" type:"string"` + + // A list of the options and desired values to evaluate. + OptionSettings []*ConfigurationOptionSetting `type:"list" required:"true"` + + // The name of the configuration template to validate the settings against. + // + // Condition: You cannot specify both this and an environment name. + TemplateName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ValidateConfigurationSettingsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ValidateConfigurationSettingsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ValidateConfigurationSettingsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ValidateConfigurationSettingsInput"} + if s.ApplicationName == nil { + invalidParams.Add(request.NewErrParamRequired("ApplicationName")) + } + if s.ApplicationName != nil && len(*s.ApplicationName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ApplicationName", 1)) + } + if s.EnvironmentName != nil && len(*s.EnvironmentName) < 4 { + invalidParams.Add(request.NewErrParamMinLen("EnvironmentName", 4)) + } + if s.OptionSettings == nil { + invalidParams.Add(request.NewErrParamRequired("OptionSettings")) + } + if s.TemplateName != nil && len(*s.TemplateName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TemplateName", 1)) + } + if s.OptionSettings != nil { + for i, v := range s.OptionSettings { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "OptionSettings", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Provides a list of validation messages. +type ValidateConfigurationSettingsOutput struct { + _ struct{} `type:"structure"` + + // A list of ValidationMessage. + Messages []*ValidationMessage `type:"list"` +} + +// String returns the string representation +func (s ValidateConfigurationSettingsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ValidateConfigurationSettingsOutput) GoString() string { + return s.String() +} + +// An error or warning for a desired configuration option value. +type ValidationMessage struct { + _ struct{} `type:"structure"` + + // A message describing the error or warning. + Message *string `type:"string"` + + Namespace *string `type:"string"` + + OptionName *string `type:"string"` + + // An indication of the severity of this message: + // + // error: This message indicates that this is not a valid setting for an + // option. warning: This message is providing information you should take + // into account. + Severity *string `type:"string" enum:"ValidationSeverity"` +} + +// String returns the string representation +func (s ValidationMessage) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ValidationMessage) GoString() string { + return s.String() +} + +const ( + // @enum ActionHistoryStatus + ActionHistoryStatusCompleted = "Completed" + // @enum ActionHistoryStatus + ActionHistoryStatusFailed = "Failed" + // @enum ActionHistoryStatus + ActionHistoryStatusUnknown = "Unknown" +) + +const ( + // @enum ActionStatus + ActionStatusScheduled = "Scheduled" + // @enum ActionStatus + ActionStatusPending = "Pending" + // @enum ActionStatus + ActionStatusRunning = "Running" + // @enum ActionStatus + ActionStatusUnknown = "Unknown" +) + +const ( + // @enum ActionType + ActionTypeInstanceRefresh = "InstanceRefresh" + // @enum ActionType + ActionTypePlatformUpdate = "PlatformUpdate" + // @enum ActionType + ActionTypeUnknown = "Unknown" +) + +const ( + // @enum ApplicationVersionStatus + ApplicationVersionStatusProcessed = "Processed" + // @enum ApplicationVersionStatus + ApplicationVersionStatusUnprocessed = "Unprocessed" + // @enum ApplicationVersionStatus + ApplicationVersionStatusFailed = "Failed" + // @enum ApplicationVersionStatus + ApplicationVersionStatusProcessing = "Processing" +) + +const ( + // @enum ConfigurationDeploymentStatus + ConfigurationDeploymentStatusDeployed = "deployed" + // @enum ConfigurationDeploymentStatus + ConfigurationDeploymentStatusPending = "pending" + // @enum ConfigurationDeploymentStatus + ConfigurationDeploymentStatusFailed = "failed" +) + +const ( + // @enum ConfigurationOptionValueType + ConfigurationOptionValueTypeScalar = "Scalar" + // @enum ConfigurationOptionValueType + ConfigurationOptionValueTypeList = "List" +) + +const ( + // @enum EnvironmentHealth + EnvironmentHealthGreen = "Green" + // @enum EnvironmentHealth + EnvironmentHealthYellow = "Yellow" + // @enum EnvironmentHealth + EnvironmentHealthRed = "Red" + // @enum EnvironmentHealth + EnvironmentHealthGrey = "Grey" +) + +const ( + // @enum EnvironmentHealthAttribute + EnvironmentHealthAttributeStatus = "Status" + // @enum EnvironmentHealthAttribute + EnvironmentHealthAttributeColor = "Color" + // @enum EnvironmentHealthAttribute + EnvironmentHealthAttributeCauses = "Causes" + // @enum EnvironmentHealthAttribute + EnvironmentHealthAttributeApplicationMetrics = "ApplicationMetrics" + // @enum EnvironmentHealthAttribute + EnvironmentHealthAttributeInstancesHealth = "InstancesHealth" + // @enum EnvironmentHealthAttribute + EnvironmentHealthAttributeAll = "All" + // @enum EnvironmentHealthAttribute + EnvironmentHealthAttributeHealthStatus = "HealthStatus" + // @enum EnvironmentHealthAttribute + EnvironmentHealthAttributeRefreshedAt = "RefreshedAt" +) + +const ( + // @enum EnvironmentHealthStatus + EnvironmentHealthStatusNoData = "NoData" + // @enum EnvironmentHealthStatus + EnvironmentHealthStatusUnknown = "Unknown" + // @enum EnvironmentHealthStatus + EnvironmentHealthStatusPending = "Pending" + // @enum EnvironmentHealthStatus + EnvironmentHealthStatusOk = "Ok" + // @enum EnvironmentHealthStatus + EnvironmentHealthStatusInfo = "Info" + // @enum EnvironmentHealthStatus + EnvironmentHealthStatusWarning = "Warning" + // @enum EnvironmentHealthStatus + EnvironmentHealthStatusDegraded = "Degraded" + // @enum EnvironmentHealthStatus + EnvironmentHealthStatusSevere = "Severe" +) + +const ( + // @enum EnvironmentInfoType + EnvironmentInfoTypeTail = "tail" + // @enum EnvironmentInfoType + EnvironmentInfoTypeBundle = "bundle" +) + +const ( + // @enum EnvironmentStatus + EnvironmentStatusLaunching = "Launching" + // @enum EnvironmentStatus + EnvironmentStatusUpdating = "Updating" + // @enum EnvironmentStatus + EnvironmentStatusReady = "Ready" + // @enum EnvironmentStatus + EnvironmentStatusTerminating = "Terminating" + // @enum EnvironmentStatus + EnvironmentStatusTerminated = "Terminated" +) + +const ( + // @enum EventSeverity + EventSeverityTrace = "TRACE" + // @enum EventSeverity + EventSeverityDebug = "DEBUG" + // @enum EventSeverity + EventSeverityInfo = "INFO" + // @enum EventSeverity + EventSeverityWarn = "WARN" + // @enum EventSeverity + EventSeverityError = "ERROR" + // @enum EventSeverity + EventSeverityFatal = "FATAL" +) + +const ( + // @enum FailureType + FailureTypeUpdateCancelled = "UpdateCancelled" + // @enum FailureType + FailureTypeCancellationFailed = "CancellationFailed" + // @enum FailureType + FailureTypeRollbackFailed = "RollbackFailed" + // @enum FailureType + FailureTypeRollbackSuccessful = "RollbackSuccessful" + // @enum FailureType + FailureTypeInternalFailure = "InternalFailure" + // @enum FailureType + FailureTypeInvalidEnvironmentState = "InvalidEnvironmentState" + // @enum FailureType + FailureTypePermissionsError = "PermissionsError" +) + +const ( + // @enum InstancesHealthAttribute + InstancesHealthAttributeHealthStatus = "HealthStatus" + // @enum InstancesHealthAttribute + InstancesHealthAttributeColor = "Color" + // @enum InstancesHealthAttribute + InstancesHealthAttributeCauses = "Causes" + // @enum InstancesHealthAttribute + InstancesHealthAttributeApplicationMetrics = "ApplicationMetrics" + // @enum InstancesHealthAttribute + InstancesHealthAttributeRefreshedAt = "RefreshedAt" + // @enum InstancesHealthAttribute + InstancesHealthAttributeLaunchedAt = "LaunchedAt" + // @enum InstancesHealthAttribute + InstancesHealthAttributeSystem = "System" + // @enum InstancesHealthAttribute + InstancesHealthAttributeDeployment = "Deployment" + // @enum InstancesHealthAttribute + InstancesHealthAttributeAvailabilityZone = "AvailabilityZone" + // @enum InstancesHealthAttribute + InstancesHealthAttributeInstanceType = "InstanceType" + // @enum InstancesHealthAttribute + InstancesHealthAttributeAll = "All" +) + +const ( + // @enum ValidationSeverity + ValidationSeverityError = "error" + // @enum ValidationSeverity + ValidationSeverityWarning = "warning" +) diff --git a/vendor/github.com/aws/aws-sdk-go/service/elasticbeanstalk/elasticbeanstalkiface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/elasticbeanstalk/elasticbeanstalkiface/interface.go new file mode 100644 index 000000000..5bb773757 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/elasticbeanstalk/elasticbeanstalkiface/interface.go @@ -0,0 +1,160 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package elasticbeanstalkiface provides an interface for the AWS Elastic Beanstalk. +package elasticbeanstalkiface + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/elasticbeanstalk" +) + +// ElasticBeanstalkAPI is the interface type for elasticbeanstalk.ElasticBeanstalk. +type ElasticBeanstalkAPI interface { + AbortEnvironmentUpdateRequest(*elasticbeanstalk.AbortEnvironmentUpdateInput) (*request.Request, *elasticbeanstalk.AbortEnvironmentUpdateOutput) + + AbortEnvironmentUpdate(*elasticbeanstalk.AbortEnvironmentUpdateInput) (*elasticbeanstalk.AbortEnvironmentUpdateOutput, error) + + ApplyEnvironmentManagedActionRequest(*elasticbeanstalk.ApplyEnvironmentManagedActionInput) (*request.Request, *elasticbeanstalk.ApplyEnvironmentManagedActionOutput) + + ApplyEnvironmentManagedAction(*elasticbeanstalk.ApplyEnvironmentManagedActionInput) (*elasticbeanstalk.ApplyEnvironmentManagedActionOutput, error) + + CheckDNSAvailabilityRequest(*elasticbeanstalk.CheckDNSAvailabilityInput) (*request.Request, *elasticbeanstalk.CheckDNSAvailabilityOutput) + + CheckDNSAvailability(*elasticbeanstalk.CheckDNSAvailabilityInput) (*elasticbeanstalk.CheckDNSAvailabilityOutput, error) + + ComposeEnvironmentsRequest(*elasticbeanstalk.ComposeEnvironmentsInput) (*request.Request, *elasticbeanstalk.EnvironmentDescriptionsMessage) + + ComposeEnvironments(*elasticbeanstalk.ComposeEnvironmentsInput) (*elasticbeanstalk.EnvironmentDescriptionsMessage, error) + + CreateApplicationRequest(*elasticbeanstalk.CreateApplicationInput) (*request.Request, *elasticbeanstalk.ApplicationDescriptionMessage) + + CreateApplication(*elasticbeanstalk.CreateApplicationInput) (*elasticbeanstalk.ApplicationDescriptionMessage, error) + + CreateApplicationVersionRequest(*elasticbeanstalk.CreateApplicationVersionInput) (*request.Request, *elasticbeanstalk.ApplicationVersionDescriptionMessage) + + CreateApplicationVersion(*elasticbeanstalk.CreateApplicationVersionInput) (*elasticbeanstalk.ApplicationVersionDescriptionMessage, error) + + CreateConfigurationTemplateRequest(*elasticbeanstalk.CreateConfigurationTemplateInput) (*request.Request, *elasticbeanstalk.ConfigurationSettingsDescription) + + CreateConfigurationTemplate(*elasticbeanstalk.CreateConfigurationTemplateInput) (*elasticbeanstalk.ConfigurationSettingsDescription, error) + + CreateEnvironmentRequest(*elasticbeanstalk.CreateEnvironmentInput) (*request.Request, *elasticbeanstalk.EnvironmentDescription) + + CreateEnvironment(*elasticbeanstalk.CreateEnvironmentInput) (*elasticbeanstalk.EnvironmentDescription, error) + + CreateStorageLocationRequest(*elasticbeanstalk.CreateStorageLocationInput) (*request.Request, *elasticbeanstalk.CreateStorageLocationOutput) + + CreateStorageLocation(*elasticbeanstalk.CreateStorageLocationInput) (*elasticbeanstalk.CreateStorageLocationOutput, error) + + DeleteApplicationRequest(*elasticbeanstalk.DeleteApplicationInput) (*request.Request, *elasticbeanstalk.DeleteApplicationOutput) + + DeleteApplication(*elasticbeanstalk.DeleteApplicationInput) (*elasticbeanstalk.DeleteApplicationOutput, error) + + DeleteApplicationVersionRequest(*elasticbeanstalk.DeleteApplicationVersionInput) (*request.Request, *elasticbeanstalk.DeleteApplicationVersionOutput) + + DeleteApplicationVersion(*elasticbeanstalk.DeleteApplicationVersionInput) (*elasticbeanstalk.DeleteApplicationVersionOutput, error) + + DeleteConfigurationTemplateRequest(*elasticbeanstalk.DeleteConfigurationTemplateInput) (*request.Request, *elasticbeanstalk.DeleteConfigurationTemplateOutput) + + DeleteConfigurationTemplate(*elasticbeanstalk.DeleteConfigurationTemplateInput) (*elasticbeanstalk.DeleteConfigurationTemplateOutput, error) + + DeleteEnvironmentConfigurationRequest(*elasticbeanstalk.DeleteEnvironmentConfigurationInput) (*request.Request, *elasticbeanstalk.DeleteEnvironmentConfigurationOutput) + + DeleteEnvironmentConfiguration(*elasticbeanstalk.DeleteEnvironmentConfigurationInput) (*elasticbeanstalk.DeleteEnvironmentConfigurationOutput, error) + + DescribeApplicationVersionsRequest(*elasticbeanstalk.DescribeApplicationVersionsInput) (*request.Request, *elasticbeanstalk.DescribeApplicationVersionsOutput) + + DescribeApplicationVersions(*elasticbeanstalk.DescribeApplicationVersionsInput) (*elasticbeanstalk.DescribeApplicationVersionsOutput, error) + + DescribeApplicationsRequest(*elasticbeanstalk.DescribeApplicationsInput) (*request.Request, *elasticbeanstalk.DescribeApplicationsOutput) + + DescribeApplications(*elasticbeanstalk.DescribeApplicationsInput) (*elasticbeanstalk.DescribeApplicationsOutput, error) + + DescribeConfigurationOptionsRequest(*elasticbeanstalk.DescribeConfigurationOptionsInput) (*request.Request, *elasticbeanstalk.DescribeConfigurationOptionsOutput) + + DescribeConfigurationOptions(*elasticbeanstalk.DescribeConfigurationOptionsInput) (*elasticbeanstalk.DescribeConfigurationOptionsOutput, error) + + DescribeConfigurationSettingsRequest(*elasticbeanstalk.DescribeConfigurationSettingsInput) (*request.Request, *elasticbeanstalk.DescribeConfigurationSettingsOutput) + + DescribeConfigurationSettings(*elasticbeanstalk.DescribeConfigurationSettingsInput) (*elasticbeanstalk.DescribeConfigurationSettingsOutput, error) + + DescribeEnvironmentHealthRequest(*elasticbeanstalk.DescribeEnvironmentHealthInput) (*request.Request, *elasticbeanstalk.DescribeEnvironmentHealthOutput) + + DescribeEnvironmentHealth(*elasticbeanstalk.DescribeEnvironmentHealthInput) (*elasticbeanstalk.DescribeEnvironmentHealthOutput, error) + + DescribeEnvironmentManagedActionHistoryRequest(*elasticbeanstalk.DescribeEnvironmentManagedActionHistoryInput) (*request.Request, *elasticbeanstalk.DescribeEnvironmentManagedActionHistoryOutput) + + DescribeEnvironmentManagedActionHistory(*elasticbeanstalk.DescribeEnvironmentManagedActionHistoryInput) (*elasticbeanstalk.DescribeEnvironmentManagedActionHistoryOutput, error) + + DescribeEnvironmentManagedActionsRequest(*elasticbeanstalk.DescribeEnvironmentManagedActionsInput) (*request.Request, *elasticbeanstalk.DescribeEnvironmentManagedActionsOutput) + + DescribeEnvironmentManagedActions(*elasticbeanstalk.DescribeEnvironmentManagedActionsInput) (*elasticbeanstalk.DescribeEnvironmentManagedActionsOutput, error) + + DescribeEnvironmentResourcesRequest(*elasticbeanstalk.DescribeEnvironmentResourcesInput) (*request.Request, *elasticbeanstalk.DescribeEnvironmentResourcesOutput) + + DescribeEnvironmentResources(*elasticbeanstalk.DescribeEnvironmentResourcesInput) (*elasticbeanstalk.DescribeEnvironmentResourcesOutput, error) + + DescribeEnvironmentsRequest(*elasticbeanstalk.DescribeEnvironmentsInput) (*request.Request, *elasticbeanstalk.EnvironmentDescriptionsMessage) + + DescribeEnvironments(*elasticbeanstalk.DescribeEnvironmentsInput) (*elasticbeanstalk.EnvironmentDescriptionsMessage, error) + + DescribeEventsRequest(*elasticbeanstalk.DescribeEventsInput) (*request.Request, *elasticbeanstalk.DescribeEventsOutput) + + DescribeEvents(*elasticbeanstalk.DescribeEventsInput) (*elasticbeanstalk.DescribeEventsOutput, error) + + DescribeEventsPages(*elasticbeanstalk.DescribeEventsInput, func(*elasticbeanstalk.DescribeEventsOutput, bool) bool) error + + DescribeInstancesHealthRequest(*elasticbeanstalk.DescribeInstancesHealthInput) (*request.Request, *elasticbeanstalk.DescribeInstancesHealthOutput) + + DescribeInstancesHealth(*elasticbeanstalk.DescribeInstancesHealthInput) (*elasticbeanstalk.DescribeInstancesHealthOutput, error) + + ListAvailableSolutionStacksRequest(*elasticbeanstalk.ListAvailableSolutionStacksInput) (*request.Request, *elasticbeanstalk.ListAvailableSolutionStacksOutput) + + ListAvailableSolutionStacks(*elasticbeanstalk.ListAvailableSolutionStacksInput) (*elasticbeanstalk.ListAvailableSolutionStacksOutput, error) + + RebuildEnvironmentRequest(*elasticbeanstalk.RebuildEnvironmentInput) (*request.Request, *elasticbeanstalk.RebuildEnvironmentOutput) + + RebuildEnvironment(*elasticbeanstalk.RebuildEnvironmentInput) (*elasticbeanstalk.RebuildEnvironmentOutput, error) + + RequestEnvironmentInfoRequest(*elasticbeanstalk.RequestEnvironmentInfoInput) (*request.Request, *elasticbeanstalk.RequestEnvironmentInfoOutput) + + RequestEnvironmentInfo(*elasticbeanstalk.RequestEnvironmentInfoInput) (*elasticbeanstalk.RequestEnvironmentInfoOutput, error) + + RestartAppServerRequest(*elasticbeanstalk.RestartAppServerInput) (*request.Request, *elasticbeanstalk.RestartAppServerOutput) + + RestartAppServer(*elasticbeanstalk.RestartAppServerInput) (*elasticbeanstalk.RestartAppServerOutput, error) + + RetrieveEnvironmentInfoRequest(*elasticbeanstalk.RetrieveEnvironmentInfoInput) (*request.Request, *elasticbeanstalk.RetrieveEnvironmentInfoOutput) + + RetrieveEnvironmentInfo(*elasticbeanstalk.RetrieveEnvironmentInfoInput) (*elasticbeanstalk.RetrieveEnvironmentInfoOutput, error) + + SwapEnvironmentCNAMEsRequest(*elasticbeanstalk.SwapEnvironmentCNAMEsInput) (*request.Request, *elasticbeanstalk.SwapEnvironmentCNAMEsOutput) + + SwapEnvironmentCNAMEs(*elasticbeanstalk.SwapEnvironmentCNAMEsInput) (*elasticbeanstalk.SwapEnvironmentCNAMEsOutput, error) + + TerminateEnvironmentRequest(*elasticbeanstalk.TerminateEnvironmentInput) (*request.Request, *elasticbeanstalk.EnvironmentDescription) + + TerminateEnvironment(*elasticbeanstalk.TerminateEnvironmentInput) (*elasticbeanstalk.EnvironmentDescription, error) + + UpdateApplicationRequest(*elasticbeanstalk.UpdateApplicationInput) (*request.Request, *elasticbeanstalk.ApplicationDescriptionMessage) + + UpdateApplication(*elasticbeanstalk.UpdateApplicationInput) (*elasticbeanstalk.ApplicationDescriptionMessage, error) + + UpdateApplicationVersionRequest(*elasticbeanstalk.UpdateApplicationVersionInput) (*request.Request, *elasticbeanstalk.ApplicationVersionDescriptionMessage) + + UpdateApplicationVersion(*elasticbeanstalk.UpdateApplicationVersionInput) (*elasticbeanstalk.ApplicationVersionDescriptionMessage, error) + + UpdateConfigurationTemplateRequest(*elasticbeanstalk.UpdateConfigurationTemplateInput) (*request.Request, *elasticbeanstalk.ConfigurationSettingsDescription) + + UpdateConfigurationTemplate(*elasticbeanstalk.UpdateConfigurationTemplateInput) (*elasticbeanstalk.ConfigurationSettingsDescription, error) + + UpdateEnvironmentRequest(*elasticbeanstalk.UpdateEnvironmentInput) (*request.Request, *elasticbeanstalk.EnvironmentDescription) + + UpdateEnvironment(*elasticbeanstalk.UpdateEnvironmentInput) (*elasticbeanstalk.EnvironmentDescription, error) + + ValidateConfigurationSettingsRequest(*elasticbeanstalk.ValidateConfigurationSettingsInput) (*request.Request, *elasticbeanstalk.ValidateConfigurationSettingsOutput) + + ValidateConfigurationSettings(*elasticbeanstalk.ValidateConfigurationSettingsInput) (*elasticbeanstalk.ValidateConfigurationSettingsOutput, error) +} + +var _ ElasticBeanstalkAPI = (*elasticbeanstalk.ElasticBeanstalk)(nil) diff --git a/vendor/github.com/aws/aws-sdk-go/service/elasticbeanstalk/examples_test.go b/vendor/github.com/aws/aws-sdk-go/service/elasticbeanstalk/examples_test.go new file mode 100644 index 000000000..9a6a634c2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/elasticbeanstalk/examples_test.go @@ -0,0 +1,903 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package elasticbeanstalk_test + +import ( + "bytes" + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/elasticbeanstalk" +) + +var _ time.Duration +var _ bytes.Buffer + +func ExampleElasticBeanstalk_AbortEnvironmentUpdate() { + svc := elasticbeanstalk.New(session.New()) + + params := &elasticbeanstalk.AbortEnvironmentUpdateInput{ + EnvironmentId: aws.String("EnvironmentId"), + EnvironmentName: aws.String("EnvironmentName"), + } + resp, err := svc.AbortEnvironmentUpdate(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElasticBeanstalk_ApplyEnvironmentManagedAction() { + svc := elasticbeanstalk.New(session.New()) + + params := &elasticbeanstalk.ApplyEnvironmentManagedActionInput{ + ActionId: aws.String("String"), // Required + EnvironmentId: aws.String("String"), + EnvironmentName: aws.String("String"), + } + resp, err := svc.ApplyEnvironmentManagedAction(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElasticBeanstalk_CheckDNSAvailability() { + svc := elasticbeanstalk.New(session.New()) + + params := &elasticbeanstalk.CheckDNSAvailabilityInput{ + CNAMEPrefix: aws.String("DNSCnamePrefix"), // Required + } + resp, err := svc.CheckDNSAvailability(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElasticBeanstalk_ComposeEnvironments() { + svc := elasticbeanstalk.New(session.New()) + + params := &elasticbeanstalk.ComposeEnvironmentsInput{ + ApplicationName: aws.String("ApplicationName"), + GroupName: aws.String("GroupName"), + VersionLabels: []*string{ + aws.String("VersionLabel"), // Required + // More values... + }, + } + resp, err := svc.ComposeEnvironments(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElasticBeanstalk_CreateApplication() { + svc := elasticbeanstalk.New(session.New()) + + params := &elasticbeanstalk.CreateApplicationInput{ + ApplicationName: aws.String("ApplicationName"), // Required + Description: aws.String("Description"), + } + resp, err := svc.CreateApplication(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElasticBeanstalk_CreateApplicationVersion() { + svc := elasticbeanstalk.New(session.New()) + + params := &elasticbeanstalk.CreateApplicationVersionInput{ + ApplicationName: aws.String("ApplicationName"), // Required + VersionLabel: aws.String("VersionLabel"), // Required + AutoCreateApplication: aws.Bool(true), + Description: aws.String("Description"), + Process: aws.Bool(true), + SourceBundle: &elasticbeanstalk.S3Location{ + S3Bucket: aws.String("S3Bucket"), + S3Key: aws.String("S3Key"), + }, + } + resp, err := svc.CreateApplicationVersion(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElasticBeanstalk_CreateConfigurationTemplate() { + svc := elasticbeanstalk.New(session.New()) + + params := &elasticbeanstalk.CreateConfigurationTemplateInput{ + ApplicationName: aws.String("ApplicationName"), // Required + TemplateName: aws.String("ConfigurationTemplateName"), // Required + Description: aws.String("Description"), + EnvironmentId: aws.String("EnvironmentId"), + OptionSettings: []*elasticbeanstalk.ConfigurationOptionSetting{ + { // Required + Namespace: aws.String("OptionNamespace"), + OptionName: aws.String("ConfigurationOptionName"), + ResourceName: aws.String("ResourceName"), + Value: aws.String("ConfigurationOptionValue"), + }, + // More values... + }, + SolutionStackName: aws.String("SolutionStackName"), + SourceConfiguration: &elasticbeanstalk.SourceConfiguration{ + ApplicationName: aws.String("ApplicationName"), + TemplateName: aws.String("ConfigurationTemplateName"), + }, + } + resp, err := svc.CreateConfigurationTemplate(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElasticBeanstalk_CreateEnvironment() { + svc := elasticbeanstalk.New(session.New()) + + params := &elasticbeanstalk.CreateEnvironmentInput{ + ApplicationName: aws.String("ApplicationName"), // Required + CNAMEPrefix: aws.String("DNSCnamePrefix"), + Description: aws.String("Description"), + EnvironmentName: aws.String("EnvironmentName"), + GroupName: aws.String("GroupName"), + OptionSettings: []*elasticbeanstalk.ConfigurationOptionSetting{ + { // Required + Namespace: aws.String("OptionNamespace"), + OptionName: aws.String("ConfigurationOptionName"), + ResourceName: aws.String("ResourceName"), + Value: aws.String("ConfigurationOptionValue"), + }, + // More values... + }, + OptionsToRemove: []*elasticbeanstalk.OptionSpecification{ + { // Required + Namespace: aws.String("OptionNamespace"), + OptionName: aws.String("ConfigurationOptionName"), + ResourceName: aws.String("ResourceName"), + }, + // More values... + }, + SolutionStackName: aws.String("SolutionStackName"), + Tags: []*elasticbeanstalk.Tag{ + { // Required + Key: aws.String("TagKey"), + Value: aws.String("TagValue"), + }, + // More values... + }, + TemplateName: aws.String("ConfigurationTemplateName"), + Tier: &elasticbeanstalk.EnvironmentTier{ + Name: aws.String("String"), + Type: aws.String("String"), + Version: aws.String("String"), + }, + VersionLabel: aws.String("VersionLabel"), + } + resp, err := svc.CreateEnvironment(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElasticBeanstalk_CreateStorageLocation() { + svc := elasticbeanstalk.New(session.New()) + + var params *elasticbeanstalk.CreateStorageLocationInput + resp, err := svc.CreateStorageLocation(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElasticBeanstalk_DeleteApplication() { + svc := elasticbeanstalk.New(session.New()) + + params := &elasticbeanstalk.DeleteApplicationInput{ + ApplicationName: aws.String("ApplicationName"), // Required + TerminateEnvByForce: aws.Bool(true), + } + resp, err := svc.DeleteApplication(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElasticBeanstalk_DeleteApplicationVersion() { + svc := elasticbeanstalk.New(session.New()) + + params := &elasticbeanstalk.DeleteApplicationVersionInput{ + ApplicationName: aws.String("ApplicationName"), // Required + VersionLabel: aws.String("VersionLabel"), // Required + DeleteSourceBundle: aws.Bool(true), + } + resp, err := svc.DeleteApplicationVersion(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElasticBeanstalk_DeleteConfigurationTemplate() { + svc := elasticbeanstalk.New(session.New()) + + params := &elasticbeanstalk.DeleteConfigurationTemplateInput{ + ApplicationName: aws.String("ApplicationName"), // Required + TemplateName: aws.String("ConfigurationTemplateName"), // Required + } + resp, err := svc.DeleteConfigurationTemplate(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElasticBeanstalk_DeleteEnvironmentConfiguration() { + svc := elasticbeanstalk.New(session.New()) + + params := &elasticbeanstalk.DeleteEnvironmentConfigurationInput{ + ApplicationName: aws.String("ApplicationName"), // Required + EnvironmentName: aws.String("EnvironmentName"), // Required + } + resp, err := svc.DeleteEnvironmentConfiguration(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElasticBeanstalk_DescribeApplicationVersions() { + svc := elasticbeanstalk.New(session.New()) + + params := &elasticbeanstalk.DescribeApplicationVersionsInput{ + ApplicationName: aws.String("ApplicationName"), + VersionLabels: []*string{ + aws.String("VersionLabel"), // Required + // More values... + }, + } + resp, err := svc.DescribeApplicationVersions(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElasticBeanstalk_DescribeApplications() { + svc := elasticbeanstalk.New(session.New()) + + params := &elasticbeanstalk.DescribeApplicationsInput{ + ApplicationNames: []*string{ + aws.String("ApplicationName"), // Required + // More values... + }, + } + resp, err := svc.DescribeApplications(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElasticBeanstalk_DescribeConfigurationOptions() { + svc := elasticbeanstalk.New(session.New()) + + params := &elasticbeanstalk.DescribeConfigurationOptionsInput{ + ApplicationName: aws.String("ApplicationName"), + EnvironmentName: aws.String("EnvironmentName"), + Options: []*elasticbeanstalk.OptionSpecification{ + { // Required + Namespace: aws.String("OptionNamespace"), + OptionName: aws.String("ConfigurationOptionName"), + ResourceName: aws.String("ResourceName"), + }, + // More values... + }, + SolutionStackName: aws.String("SolutionStackName"), + TemplateName: aws.String("ConfigurationTemplateName"), + } + resp, err := svc.DescribeConfigurationOptions(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElasticBeanstalk_DescribeConfigurationSettings() { + svc := elasticbeanstalk.New(session.New()) + + params := &elasticbeanstalk.DescribeConfigurationSettingsInput{ + ApplicationName: aws.String("ApplicationName"), // Required + EnvironmentName: aws.String("EnvironmentName"), + TemplateName: aws.String("ConfigurationTemplateName"), + } + resp, err := svc.DescribeConfigurationSettings(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElasticBeanstalk_DescribeEnvironmentHealth() { + svc := elasticbeanstalk.New(session.New()) + + params := &elasticbeanstalk.DescribeEnvironmentHealthInput{ + AttributeNames: []*string{ + aws.String("EnvironmentHealthAttribute"), // Required + // More values... + }, + EnvironmentId: aws.String("EnvironmentId"), + EnvironmentName: aws.String("EnvironmentName"), + } + resp, err := svc.DescribeEnvironmentHealth(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElasticBeanstalk_DescribeEnvironmentManagedActionHistory() { + svc := elasticbeanstalk.New(session.New()) + + params := &elasticbeanstalk.DescribeEnvironmentManagedActionHistoryInput{ + EnvironmentId: aws.String("EnvironmentId"), + EnvironmentName: aws.String("EnvironmentName"), + MaxItems: aws.Int64(1), + NextToken: aws.String("String"), + } + resp, err := svc.DescribeEnvironmentManagedActionHistory(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElasticBeanstalk_DescribeEnvironmentManagedActions() { + svc := elasticbeanstalk.New(session.New()) + + params := &elasticbeanstalk.DescribeEnvironmentManagedActionsInput{ + EnvironmentId: aws.String("String"), + EnvironmentName: aws.String("String"), + Status: aws.String("ActionStatus"), + } + resp, err := svc.DescribeEnvironmentManagedActions(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElasticBeanstalk_DescribeEnvironmentResources() { + svc := elasticbeanstalk.New(session.New()) + + params := &elasticbeanstalk.DescribeEnvironmentResourcesInput{ + EnvironmentId: aws.String("EnvironmentId"), + EnvironmentName: aws.String("EnvironmentName"), + } + resp, err := svc.DescribeEnvironmentResources(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElasticBeanstalk_DescribeEnvironments() { + svc := elasticbeanstalk.New(session.New()) + + params := &elasticbeanstalk.DescribeEnvironmentsInput{ + ApplicationName: aws.String("ApplicationName"), + EnvironmentIds: []*string{ + aws.String("EnvironmentId"), // Required + // More values... + }, + EnvironmentNames: []*string{ + aws.String("EnvironmentName"), // Required + // More values... + }, + IncludeDeleted: aws.Bool(true), + IncludedDeletedBackTo: aws.Time(time.Now()), + VersionLabel: aws.String("VersionLabel"), + } + resp, err := svc.DescribeEnvironments(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElasticBeanstalk_DescribeEvents() { + svc := elasticbeanstalk.New(session.New()) + + params := &elasticbeanstalk.DescribeEventsInput{ + ApplicationName: aws.String("ApplicationName"), + EndTime: aws.Time(time.Now()), + EnvironmentId: aws.String("EnvironmentId"), + EnvironmentName: aws.String("EnvironmentName"), + MaxRecords: aws.Int64(1), + NextToken: aws.String("Token"), + RequestId: aws.String("RequestId"), + Severity: aws.String("EventSeverity"), + StartTime: aws.Time(time.Now()), + TemplateName: aws.String("ConfigurationTemplateName"), + VersionLabel: aws.String("VersionLabel"), + } + resp, err := svc.DescribeEvents(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElasticBeanstalk_DescribeInstancesHealth() { + svc := elasticbeanstalk.New(session.New()) + + params := &elasticbeanstalk.DescribeInstancesHealthInput{ + AttributeNames: []*string{ + aws.String("InstancesHealthAttribute"), // Required + // More values... + }, + EnvironmentId: aws.String("EnvironmentId"), + EnvironmentName: aws.String("EnvironmentName"), + NextToken: aws.String("NextToken"), + } + resp, err := svc.DescribeInstancesHealth(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElasticBeanstalk_ListAvailableSolutionStacks() { + svc := elasticbeanstalk.New(session.New()) + + var params *elasticbeanstalk.ListAvailableSolutionStacksInput + resp, err := svc.ListAvailableSolutionStacks(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElasticBeanstalk_RebuildEnvironment() { + svc := elasticbeanstalk.New(session.New()) + + params := &elasticbeanstalk.RebuildEnvironmentInput{ + EnvironmentId: aws.String("EnvironmentId"), + EnvironmentName: aws.String("EnvironmentName"), + } + resp, err := svc.RebuildEnvironment(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElasticBeanstalk_RequestEnvironmentInfo() { + svc := elasticbeanstalk.New(session.New()) + + params := &elasticbeanstalk.RequestEnvironmentInfoInput{ + InfoType: aws.String("EnvironmentInfoType"), // Required + EnvironmentId: aws.String("EnvironmentId"), + EnvironmentName: aws.String("EnvironmentName"), + } + resp, err := svc.RequestEnvironmentInfo(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElasticBeanstalk_RestartAppServer() { + svc := elasticbeanstalk.New(session.New()) + + params := &elasticbeanstalk.RestartAppServerInput{ + EnvironmentId: aws.String("EnvironmentId"), + EnvironmentName: aws.String("EnvironmentName"), + } + resp, err := svc.RestartAppServer(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElasticBeanstalk_RetrieveEnvironmentInfo() { + svc := elasticbeanstalk.New(session.New()) + + params := &elasticbeanstalk.RetrieveEnvironmentInfoInput{ + InfoType: aws.String("EnvironmentInfoType"), // Required + EnvironmentId: aws.String("EnvironmentId"), + EnvironmentName: aws.String("EnvironmentName"), + } + resp, err := svc.RetrieveEnvironmentInfo(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElasticBeanstalk_SwapEnvironmentCNAMEs() { + svc := elasticbeanstalk.New(session.New()) + + params := &elasticbeanstalk.SwapEnvironmentCNAMEsInput{ + DestinationEnvironmentId: aws.String("EnvironmentId"), + DestinationEnvironmentName: aws.String("EnvironmentName"), + SourceEnvironmentId: aws.String("EnvironmentId"), + SourceEnvironmentName: aws.String("EnvironmentName"), + } + resp, err := svc.SwapEnvironmentCNAMEs(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElasticBeanstalk_TerminateEnvironment() { + svc := elasticbeanstalk.New(session.New()) + + params := &elasticbeanstalk.TerminateEnvironmentInput{ + EnvironmentId: aws.String("EnvironmentId"), + EnvironmentName: aws.String("EnvironmentName"), + ForceTerminate: aws.Bool(true), + TerminateResources: aws.Bool(true), + } + resp, err := svc.TerminateEnvironment(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElasticBeanstalk_UpdateApplication() { + svc := elasticbeanstalk.New(session.New()) + + params := &elasticbeanstalk.UpdateApplicationInput{ + ApplicationName: aws.String("ApplicationName"), // Required + Description: aws.String("Description"), + } + resp, err := svc.UpdateApplication(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElasticBeanstalk_UpdateApplicationVersion() { + svc := elasticbeanstalk.New(session.New()) + + params := &elasticbeanstalk.UpdateApplicationVersionInput{ + ApplicationName: aws.String("ApplicationName"), // Required + VersionLabel: aws.String("VersionLabel"), // Required + Description: aws.String("Description"), + } + resp, err := svc.UpdateApplicationVersion(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElasticBeanstalk_UpdateConfigurationTemplate() { + svc := elasticbeanstalk.New(session.New()) + + params := &elasticbeanstalk.UpdateConfigurationTemplateInput{ + ApplicationName: aws.String("ApplicationName"), // Required + TemplateName: aws.String("ConfigurationTemplateName"), // Required + Description: aws.String("Description"), + OptionSettings: []*elasticbeanstalk.ConfigurationOptionSetting{ + { // Required + Namespace: aws.String("OptionNamespace"), + OptionName: aws.String("ConfigurationOptionName"), + ResourceName: aws.String("ResourceName"), + Value: aws.String("ConfigurationOptionValue"), + }, + // More values... + }, + OptionsToRemove: []*elasticbeanstalk.OptionSpecification{ + { // Required + Namespace: aws.String("OptionNamespace"), + OptionName: aws.String("ConfigurationOptionName"), + ResourceName: aws.String("ResourceName"), + }, + // More values... + }, + } + resp, err := svc.UpdateConfigurationTemplate(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElasticBeanstalk_UpdateEnvironment() { + svc := elasticbeanstalk.New(session.New()) + + params := &elasticbeanstalk.UpdateEnvironmentInput{ + ApplicationName: aws.String("ApplicationName"), + Description: aws.String("Description"), + EnvironmentId: aws.String("EnvironmentId"), + EnvironmentName: aws.String("EnvironmentName"), + GroupName: aws.String("GroupName"), + OptionSettings: []*elasticbeanstalk.ConfigurationOptionSetting{ + { // Required + Namespace: aws.String("OptionNamespace"), + OptionName: aws.String("ConfigurationOptionName"), + ResourceName: aws.String("ResourceName"), + Value: aws.String("ConfigurationOptionValue"), + }, + // More values... + }, + OptionsToRemove: []*elasticbeanstalk.OptionSpecification{ + { // Required + Namespace: aws.String("OptionNamespace"), + OptionName: aws.String("ConfigurationOptionName"), + ResourceName: aws.String("ResourceName"), + }, + // More values... + }, + SolutionStackName: aws.String("SolutionStackName"), + TemplateName: aws.String("ConfigurationTemplateName"), + Tier: &elasticbeanstalk.EnvironmentTier{ + Name: aws.String("String"), + Type: aws.String("String"), + Version: aws.String("String"), + }, + VersionLabel: aws.String("VersionLabel"), + } + resp, err := svc.UpdateEnvironment(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElasticBeanstalk_ValidateConfigurationSettings() { + svc := elasticbeanstalk.New(session.New()) + + params := &elasticbeanstalk.ValidateConfigurationSettingsInput{ + ApplicationName: aws.String("ApplicationName"), // Required + OptionSettings: []*elasticbeanstalk.ConfigurationOptionSetting{ // Required + { // Required + Namespace: aws.String("OptionNamespace"), + OptionName: aws.String("ConfigurationOptionName"), + ResourceName: aws.String("ResourceName"), + Value: aws.String("ConfigurationOptionValue"), + }, + // More values... + }, + EnvironmentName: aws.String("EnvironmentName"), + TemplateName: aws.String("ConfigurationTemplateName"), + } + resp, err := svc.ValidateConfigurationSettings(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/elasticbeanstalk/service.go b/vendor/github.com/aws/aws-sdk-go/service/elasticbeanstalk/service.go new file mode 100644 index 000000000..795a6fef1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/elasticbeanstalk/service.go @@ -0,0 +1,102 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package elasticbeanstalk + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/private/protocol/query" +) + +// AWS Elastic Beanstalk makes it easy for you to create, deploy, and manage +// scalable, fault-tolerant applications running on the Amazon Web Services +// cloud. +// +// For more information about this product, go to the AWS Elastic Beanstalk +// (http://aws.amazon.com/elasticbeanstalk/) details page. The location of the +// latest AWS Elastic Beanstalk WSDL is http://elasticbeanstalk.s3.amazonaws.com/doc/2010-12-01/AWSElasticBeanstalk.wsdl +// (http://elasticbeanstalk.s3.amazonaws.com/doc/2010-12-01/AWSElasticBeanstalk.wsdl). +// To install the Software Development Kits (SDKs), Integrated Development Environment +// (IDE) Toolkits, and command line tools that enable you to access the API, +// go to Tools for Amazon Web Services (https://aws.amazon.com/tools/). +// +// Endpoints +// +// For a list of region-specific endpoints that AWS Elastic Beanstalk supports, +// go to Regions and Endpoints (http://docs.aws.amazon.com/general/latest/gr/rande.html#elasticbeanstalk_region) +// in the Amazon Web Services Glossary. +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type ElasticBeanstalk struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "elasticbeanstalk" + +// New creates a new instance of the ElasticBeanstalk client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a ElasticBeanstalk client from just a session. +// svc := elasticbeanstalk.New(mySession) +// +// // Create a ElasticBeanstalk client with additional configuration +// svc := elasticbeanstalk.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *ElasticBeanstalk { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *ElasticBeanstalk { + svc := &ElasticBeanstalk{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2010-12-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(query.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(query.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(query.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(query.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a ElasticBeanstalk operation and runs any +// custom request initialization. +func (c *ElasticBeanstalk) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/elasticsearchservice/api.go b/vendor/github.com/aws/aws-sdk-go/service/elasticsearchservice/api.go new file mode 100644 index 000000000..df36f509f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/elasticsearchservice/api.go @@ -0,0 +1,1527 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package elasticsearchservice provides a client for Amazon Elasticsearch Service. +package elasticsearchservice + +import ( + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/restjson" +) + +const opAddTags = "AddTags" + +// AddTagsRequest generates a "aws/request.Request" representing the +// client's request for the AddTags operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the AddTags method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the AddTagsRequest method. +// req, resp := client.AddTagsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ElasticsearchService) AddTagsRequest(input *AddTagsInput) (req *request.Request, output *AddTagsOutput) { + op := &request.Operation{ + Name: opAddTags, + HTTPMethod: "POST", + HTTPPath: "/2015-01-01/tags", + } + + if input == nil { + input = &AddTagsInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &AddTagsOutput{} + req.Data = output + return +} + +// Attaches tags to an existing Elasticsearch domain. Tags are a set of case-sensitive +// key value pairs. An Elasticsearch domain may have up to 10 tags. See Tagging +// Amazon Elasticsearch Service Domains for more information. (http://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-managedomains.html#es-managedomains-awsresorcetagging" +// target="_blank) +func (c *ElasticsearchService) AddTags(input *AddTagsInput) (*AddTagsOutput, error) { + req, out := c.AddTagsRequest(input) + err := req.Send() + return out, err +} + +const opCreateElasticsearchDomain = "CreateElasticsearchDomain" + +// CreateElasticsearchDomainRequest generates a "aws/request.Request" representing the +// client's request for the CreateElasticsearchDomain operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateElasticsearchDomain method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateElasticsearchDomainRequest method. +// req, resp := client.CreateElasticsearchDomainRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ElasticsearchService) CreateElasticsearchDomainRequest(input *CreateElasticsearchDomainInput) (req *request.Request, output *CreateElasticsearchDomainOutput) { + op := &request.Operation{ + Name: opCreateElasticsearchDomain, + HTTPMethod: "POST", + HTTPPath: "/2015-01-01/es/domain", + } + + if input == nil { + input = &CreateElasticsearchDomainInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateElasticsearchDomainOutput{} + req.Data = output + return +} + +// Creates a new Elasticsearch domain. For more information, see Creating Elasticsearch +// Domains (http://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-createupdatedomains.html#es-createdomains" +// target="_blank) in the Amazon Elasticsearch Service Developer Guide. +func (c *ElasticsearchService) CreateElasticsearchDomain(input *CreateElasticsearchDomainInput) (*CreateElasticsearchDomainOutput, error) { + req, out := c.CreateElasticsearchDomainRequest(input) + err := req.Send() + return out, err +} + +const opDeleteElasticsearchDomain = "DeleteElasticsearchDomain" + +// DeleteElasticsearchDomainRequest generates a "aws/request.Request" representing the +// client's request for the DeleteElasticsearchDomain operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteElasticsearchDomain method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteElasticsearchDomainRequest method. +// req, resp := client.DeleteElasticsearchDomainRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ElasticsearchService) DeleteElasticsearchDomainRequest(input *DeleteElasticsearchDomainInput) (req *request.Request, output *DeleteElasticsearchDomainOutput) { + op := &request.Operation{ + Name: opDeleteElasticsearchDomain, + HTTPMethod: "DELETE", + HTTPPath: "/2015-01-01/es/domain/{DomainName}", + } + + if input == nil { + input = &DeleteElasticsearchDomainInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteElasticsearchDomainOutput{} + req.Data = output + return +} + +// Permanently deletes the specified Elasticsearch domain and all of its data. +// Once a domain is deleted, it cannot be recovered. +func (c *ElasticsearchService) DeleteElasticsearchDomain(input *DeleteElasticsearchDomainInput) (*DeleteElasticsearchDomainOutput, error) { + req, out := c.DeleteElasticsearchDomainRequest(input) + err := req.Send() + return out, err +} + +const opDescribeElasticsearchDomain = "DescribeElasticsearchDomain" + +// DescribeElasticsearchDomainRequest generates a "aws/request.Request" representing the +// client's request for the DescribeElasticsearchDomain operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeElasticsearchDomain method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeElasticsearchDomainRequest method. +// req, resp := client.DescribeElasticsearchDomainRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ElasticsearchService) DescribeElasticsearchDomainRequest(input *DescribeElasticsearchDomainInput) (req *request.Request, output *DescribeElasticsearchDomainOutput) { + op := &request.Operation{ + Name: opDescribeElasticsearchDomain, + HTTPMethod: "GET", + HTTPPath: "/2015-01-01/es/domain/{DomainName}", + } + + if input == nil { + input = &DescribeElasticsearchDomainInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeElasticsearchDomainOutput{} + req.Data = output + return +} + +// Returns domain configuration information about the specified Elasticsearch +// domain, including the domain ID, domain endpoint, and domain ARN. +func (c *ElasticsearchService) DescribeElasticsearchDomain(input *DescribeElasticsearchDomainInput) (*DescribeElasticsearchDomainOutput, error) { + req, out := c.DescribeElasticsearchDomainRequest(input) + err := req.Send() + return out, err +} + +const opDescribeElasticsearchDomainConfig = "DescribeElasticsearchDomainConfig" + +// DescribeElasticsearchDomainConfigRequest generates a "aws/request.Request" representing the +// client's request for the DescribeElasticsearchDomainConfig operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeElasticsearchDomainConfig method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeElasticsearchDomainConfigRequest method. +// req, resp := client.DescribeElasticsearchDomainConfigRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ElasticsearchService) DescribeElasticsearchDomainConfigRequest(input *DescribeElasticsearchDomainConfigInput) (req *request.Request, output *DescribeElasticsearchDomainConfigOutput) { + op := &request.Operation{ + Name: opDescribeElasticsearchDomainConfig, + HTTPMethod: "GET", + HTTPPath: "/2015-01-01/es/domain/{DomainName}/config", + } + + if input == nil { + input = &DescribeElasticsearchDomainConfigInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeElasticsearchDomainConfigOutput{} + req.Data = output + return +} + +// Provides cluster configuration information about the specified Elasticsearch +// domain, such as the state, creation date, update version, and update date +// for cluster options. +func (c *ElasticsearchService) DescribeElasticsearchDomainConfig(input *DescribeElasticsearchDomainConfigInput) (*DescribeElasticsearchDomainConfigOutput, error) { + req, out := c.DescribeElasticsearchDomainConfigRequest(input) + err := req.Send() + return out, err +} + +const opDescribeElasticsearchDomains = "DescribeElasticsearchDomains" + +// DescribeElasticsearchDomainsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeElasticsearchDomains operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeElasticsearchDomains method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeElasticsearchDomainsRequest method. +// req, resp := client.DescribeElasticsearchDomainsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ElasticsearchService) DescribeElasticsearchDomainsRequest(input *DescribeElasticsearchDomainsInput) (req *request.Request, output *DescribeElasticsearchDomainsOutput) { + op := &request.Operation{ + Name: opDescribeElasticsearchDomains, + HTTPMethod: "POST", + HTTPPath: "/2015-01-01/es/domain-info", + } + + if input == nil { + input = &DescribeElasticsearchDomainsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeElasticsearchDomainsOutput{} + req.Data = output + return +} + +// Returns domain configuration information about the specified Elasticsearch +// domains, including the domain ID, domain endpoint, and domain ARN. +func (c *ElasticsearchService) DescribeElasticsearchDomains(input *DescribeElasticsearchDomainsInput) (*DescribeElasticsearchDomainsOutput, error) { + req, out := c.DescribeElasticsearchDomainsRequest(input) + err := req.Send() + return out, err +} + +const opListDomainNames = "ListDomainNames" + +// ListDomainNamesRequest generates a "aws/request.Request" representing the +// client's request for the ListDomainNames operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListDomainNames method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListDomainNamesRequest method. +// req, resp := client.ListDomainNamesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ElasticsearchService) ListDomainNamesRequest(input *ListDomainNamesInput) (req *request.Request, output *ListDomainNamesOutput) { + op := &request.Operation{ + Name: opListDomainNames, + HTTPMethod: "GET", + HTTPPath: "/2015-01-01/domain", + } + + if input == nil { + input = &ListDomainNamesInput{} + } + + req = c.newRequest(op, input, output) + output = &ListDomainNamesOutput{} + req.Data = output + return +} + +// Returns the name of all Elasticsearch domains owned by the current user's +// account. +func (c *ElasticsearchService) ListDomainNames(input *ListDomainNamesInput) (*ListDomainNamesOutput, error) { + req, out := c.ListDomainNamesRequest(input) + err := req.Send() + return out, err +} + +const opListTags = "ListTags" + +// ListTagsRequest generates a "aws/request.Request" representing the +// client's request for the ListTags operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListTags method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListTagsRequest method. +// req, resp := client.ListTagsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ElasticsearchService) ListTagsRequest(input *ListTagsInput) (req *request.Request, output *ListTagsOutput) { + op := &request.Operation{ + Name: opListTags, + HTTPMethod: "GET", + HTTPPath: "/2015-01-01/tags/", + } + + if input == nil { + input = &ListTagsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListTagsOutput{} + req.Data = output + return +} + +// Returns all tags for the given Elasticsearch domain. +func (c *ElasticsearchService) ListTags(input *ListTagsInput) (*ListTagsOutput, error) { + req, out := c.ListTagsRequest(input) + err := req.Send() + return out, err +} + +const opRemoveTags = "RemoveTags" + +// RemoveTagsRequest generates a "aws/request.Request" representing the +// client's request for the RemoveTags operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the RemoveTags method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the RemoveTagsRequest method. +// req, resp := client.RemoveTagsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ElasticsearchService) RemoveTagsRequest(input *RemoveTagsInput) (req *request.Request, output *RemoveTagsOutput) { + op := &request.Operation{ + Name: opRemoveTags, + HTTPMethod: "POST", + HTTPPath: "/2015-01-01/tags-removal", + } + + if input == nil { + input = &RemoveTagsInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &RemoveTagsOutput{} + req.Data = output + return +} + +// Removes the specified set of tags from the specified Elasticsearch domain. +func (c *ElasticsearchService) RemoveTags(input *RemoveTagsInput) (*RemoveTagsOutput, error) { + req, out := c.RemoveTagsRequest(input) + err := req.Send() + return out, err +} + +const opUpdateElasticsearchDomainConfig = "UpdateElasticsearchDomainConfig" + +// UpdateElasticsearchDomainConfigRequest generates a "aws/request.Request" representing the +// client's request for the UpdateElasticsearchDomainConfig operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateElasticsearchDomainConfig method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateElasticsearchDomainConfigRequest method. +// req, resp := client.UpdateElasticsearchDomainConfigRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ElasticsearchService) UpdateElasticsearchDomainConfigRequest(input *UpdateElasticsearchDomainConfigInput) (req *request.Request, output *UpdateElasticsearchDomainConfigOutput) { + op := &request.Operation{ + Name: opUpdateElasticsearchDomainConfig, + HTTPMethod: "POST", + HTTPPath: "/2015-01-01/es/domain/{DomainName}/config", + } + + if input == nil { + input = &UpdateElasticsearchDomainConfigInput{} + } + + req = c.newRequest(op, input, output) + output = &UpdateElasticsearchDomainConfigOutput{} + req.Data = output + return +} + +// Modifies the cluster configuration of the specified Elasticsearch domain, +// setting as setting the instance type and the number of instances. +func (c *ElasticsearchService) UpdateElasticsearchDomainConfig(input *UpdateElasticsearchDomainConfigInput) (*UpdateElasticsearchDomainConfigOutput, error) { + req, out := c.UpdateElasticsearchDomainConfigRequest(input) + err := req.Send() + return out, err +} + +// The configured access rules for the domain's document and search endpoints, +// and the current status of those rules. +type AccessPoliciesStatus struct { + _ struct{} `type:"structure"` + + // The access policy configured for the Elasticsearch domain. Access policies + // may be resource-based, IP-based, or IAM-based. See Configuring Access Policies + // (http://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-createupdatedomains.html#es-createdomain-configure-access-policies" + // target="_blank)for more information. + Options *string `type:"string" required:"true"` + + // The status of the access policy for the Elasticsearch domain. See OptionStatus + // for the status information that's included. + Status *OptionStatus `type:"structure" required:"true"` +} + +// String returns the string representation +func (s AccessPoliciesStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AccessPoliciesStatus) GoString() string { + return s.String() +} + +// Container for the parameters to the AddTags operation. Specify the tags that +// you want to attach to the Elasticsearch domain. +type AddTagsInput struct { + _ struct{} `type:"structure"` + + // Specify the ARN for which you want to add the tags. + ARN *string `type:"string" required:"true"` + + // List of Tag that need to be added for the Elasticsearch domain. + TagList []*Tag `type:"list" required:"true"` +} + +// String returns the string representation +func (s AddTagsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddTagsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AddTagsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AddTagsInput"} + if s.ARN == nil { + invalidParams.Add(request.NewErrParamRequired("ARN")) + } + if s.TagList == nil { + invalidParams.Add(request.NewErrParamRequired("TagList")) + } + if s.TagList != nil { + for i, v := range s.TagList { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "TagList", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type AddTagsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s AddTagsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddTagsOutput) GoString() string { + return s.String() +} + +// Status of the advanced options for the specified Elasticsearch domain. Currently, +// the following advanced options are available: +// +// Option to allow references to indices in an HTTP request body. Must be +// false when configuring access to individual sub-resources. By default, the +// value is true. See Configuration Advanced Options (http://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-createupdatedomains.html#es-createdomain-configure-advanced-options" +// target="_blank) for more information. Option to specify the percentage of +// heap space that is allocated to field data. By default, this setting is unbounded. +// For more information, see Configuring Advanced Options (http://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-createupdatedomains.html#es-createdomain-configure-advanced-options). +type AdvancedOptionsStatus struct { + _ struct{} `type:"structure"` + + // Specifies the status of advanced options for the specified Elasticsearch + // domain. + Options map[string]*string `type:"map" required:"true"` + + // Specifies the status of OptionStatus for advanced options for the specified + // Elasticsearch domain. + Status *OptionStatus `type:"structure" required:"true"` +} + +// String returns the string representation +func (s AdvancedOptionsStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AdvancedOptionsStatus) GoString() string { + return s.String() +} + +type CreateElasticsearchDomainInput struct { + _ struct{} `type:"structure"` + + // IAM access policy as a JSON-formatted string. + AccessPolicies *string `type:"string"` + + // Option to allow references to indices in an HTTP request body. Must be false + // when configuring access to individual sub-resources. By default, the value + // is true. See Configuration Advanced Options (http://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-createupdatedomains.html#es-createdomain-configure-advanced-options" + // target="_blank) for more information. + AdvancedOptions map[string]*string `type:"map"` + + // The name of the Elasticsearch domain that you are creating. Domain names + // are unique across the domains owned by an account within an AWS region. Domain + // names must start with a letter or number and can contain the following characters: + // a-z (lowercase), 0-9, and - (hyphen). + DomainName *string `min:"3" type:"string" required:"true"` + + // Options to enable, disable and specify the type and size of EBS storage volumes. + EBSOptions *EBSOptions `type:"structure"` + + // Configuration options for an Elasticsearch domain. Specifies the instance + // type and number of instances in the domain cluster. + ElasticsearchClusterConfig *ElasticsearchClusterConfig `type:"structure"` + + // Option to set time, in UTC format, of the daily automated snapshot. Default + // value is 0 hours. + SnapshotOptions *SnapshotOptions `type:"structure"` +} + +// String returns the string representation +func (s CreateElasticsearchDomainInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateElasticsearchDomainInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateElasticsearchDomainInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateElasticsearchDomainInput"} + if s.DomainName == nil { + invalidParams.Add(request.NewErrParamRequired("DomainName")) + } + if s.DomainName != nil && len(*s.DomainName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("DomainName", 3)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The result of a CreateElasticsearchDomain operation. Contains the status +// of the newly created Elasticsearch domain. +type CreateElasticsearchDomainOutput struct { + _ struct{} `type:"structure"` + + // The status of the newly created Elasticsearch domain. + DomainStatus *ElasticsearchDomainStatus `type:"structure"` +} + +// String returns the string representation +func (s CreateElasticsearchDomainOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateElasticsearchDomainOutput) GoString() string { + return s.String() +} + +// Container for the parameters to the DeleteElasticsearchDomain operation. +// Specifies the name of the Elasticsearch domain that you want to delete. +type DeleteElasticsearchDomainInput struct { + _ struct{} `type:"structure"` + + // The name of the Elasticsearch domain that you want to permanently delete. + DomainName *string `location:"uri" locationName:"DomainName" min:"3" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteElasticsearchDomainInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteElasticsearchDomainInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteElasticsearchDomainInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteElasticsearchDomainInput"} + if s.DomainName == nil { + invalidParams.Add(request.NewErrParamRequired("DomainName")) + } + if s.DomainName != nil && len(*s.DomainName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("DomainName", 3)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The result of a DeleteElasticsearchDomain request. Contains the status of +// the pending deletion, or no status if the domain and all of its resources +// have been deleted. +type DeleteElasticsearchDomainOutput struct { + _ struct{} `type:"structure"` + + // The status of the Elasticsearch domain being deleted. + DomainStatus *ElasticsearchDomainStatus `type:"structure"` +} + +// String returns the string representation +func (s DeleteElasticsearchDomainOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteElasticsearchDomainOutput) GoString() string { + return s.String() +} + +// Container for the parameters to the DescribeElasticsearchDomainConfig operation. +// Specifies the domain name for which you want configuration information. +type DescribeElasticsearchDomainConfigInput struct { + _ struct{} `type:"structure"` + + // The Elasticsearch domain that you want to get information about. + DomainName *string `location:"uri" locationName:"DomainName" min:"3" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeElasticsearchDomainConfigInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeElasticsearchDomainConfigInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeElasticsearchDomainConfigInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeElasticsearchDomainConfigInput"} + if s.DomainName == nil { + invalidParams.Add(request.NewErrParamRequired("DomainName")) + } + if s.DomainName != nil && len(*s.DomainName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("DomainName", 3)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The result of a DescribeElasticsearchDomainConfig request. Contains the configuration +// information of the requested domain. +type DescribeElasticsearchDomainConfigOutput struct { + _ struct{} `type:"structure"` + + // The configuration information of the domain requested in the DescribeElasticsearchDomainConfig + // request. + DomainConfig *ElasticsearchDomainConfig `type:"structure" required:"true"` +} + +// String returns the string representation +func (s DescribeElasticsearchDomainConfigOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeElasticsearchDomainConfigOutput) GoString() string { + return s.String() +} + +// Container for the parameters to the DescribeElasticsearchDomain operation. +type DescribeElasticsearchDomainInput struct { + _ struct{} `type:"structure"` + + // The name of the Elasticsearch domain for which you want information. + DomainName *string `location:"uri" locationName:"DomainName" min:"3" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeElasticsearchDomainInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeElasticsearchDomainInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeElasticsearchDomainInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeElasticsearchDomainInput"} + if s.DomainName == nil { + invalidParams.Add(request.NewErrParamRequired("DomainName")) + } + if s.DomainName != nil && len(*s.DomainName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("DomainName", 3)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The result of a DescribeElasticsearchDomain request. Contains the status +// of the domain specified in the request. +type DescribeElasticsearchDomainOutput struct { + _ struct{} `type:"structure"` + + // The current status of the Elasticsearch domain. + DomainStatus *ElasticsearchDomainStatus `type:"structure" required:"true"` +} + +// String returns the string representation +func (s DescribeElasticsearchDomainOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeElasticsearchDomainOutput) GoString() string { + return s.String() +} + +// Container for the parameters to the DescribeElasticsearchDomains operation. +// By default, the API returns the status of all Elasticsearch domains. +type DescribeElasticsearchDomainsInput struct { + _ struct{} `type:"structure"` + + // The Elasticsearch domains for which you want information. + DomainNames []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s DescribeElasticsearchDomainsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeElasticsearchDomainsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeElasticsearchDomainsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeElasticsearchDomainsInput"} + if s.DomainNames == nil { + invalidParams.Add(request.NewErrParamRequired("DomainNames")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The result of a DescribeElasticsearchDomains request. Contains the status +// of the specified domains or all domains owned by the account. +type DescribeElasticsearchDomainsOutput struct { + _ struct{} `type:"structure"` + + // The status of the domains requested in the DescribeElasticsearchDomains request. + DomainStatusList []*ElasticsearchDomainStatus `type:"list" required:"true"` +} + +// String returns the string representation +func (s DescribeElasticsearchDomainsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeElasticsearchDomainsOutput) GoString() string { + return s.String() +} + +type DomainInfo struct { + _ struct{} `type:"structure"` + + // Specifies the DomainName. + DomainName *string `min:"3" type:"string"` +} + +// String returns the string representation +func (s DomainInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DomainInfo) GoString() string { + return s.String() +} + +// Options to enable, disable, and specify the properties of EBS storage volumes. +// For more information, see Configuring EBS-based Storage (http://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-createupdatedomains.html#es-createdomain-configure-ebs" +// target="_blank). +type EBSOptions struct { + _ struct{} `type:"structure"` + + // Specifies whether EBS-based storage is enabled. + EBSEnabled *bool `type:"boolean"` + + // Specifies the IOPD for a Provisioned IOPS EBS volume (SSD). + Iops *int64 `type:"integer"` + + // Integer to specify the size of an EBS volume. + VolumeSize *int64 `type:"integer"` + + // Specifies the volume type for EBS-based storage. + VolumeType *string `type:"string" enum:"VolumeType"` +} + +// String returns the string representation +func (s EBSOptions) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EBSOptions) GoString() string { + return s.String() +} + +// Status of the EBS options for the specified Elasticsearch domain. +type EBSOptionsStatus struct { + _ struct{} `type:"structure"` + + // Specifies the EBS options for the specified Elasticsearch domain. + Options *EBSOptions `type:"structure" required:"true"` + + // Specifies the status of the EBS options for the specified Elasticsearch domain. + Status *OptionStatus `type:"structure" required:"true"` +} + +// String returns the string representation +func (s EBSOptionsStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EBSOptionsStatus) GoString() string { + return s.String() +} + +// Specifies the configuration for the domain cluster, such as the type and +// number of instances. +type ElasticsearchClusterConfig struct { + _ struct{} `type:"structure"` + + // Total number of dedicated master nodes, active and on standby, for the cluster. + DedicatedMasterCount *int64 `type:"integer"` + + // A boolean value to indicate whether a dedicated master node is enabled. See + // About Dedicated Master Nodes (http://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-managedomains.html#es-managedomains-dedicatedmasternodes" + // target="_blank) for more information. + DedicatedMasterEnabled *bool `type:"boolean"` + + // The instance type for a dedicated master node. + DedicatedMasterType *string `type:"string" enum:"ESPartitionInstanceType"` + + // The number of instances in the specified domain cluster. + InstanceCount *int64 `type:"integer"` + + // The instance type for an Elasticsearch cluster. + InstanceType *string `type:"string" enum:"ESPartitionInstanceType"` + + // A boolean value to indicate whether zone awareness is enabled. See About + // Zone Awareness (http://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-managedomains.html#es-managedomains-zoneawareness" + // target="_blank) for more information. + ZoneAwarenessEnabled *bool `type:"boolean"` +} + +// String returns the string representation +func (s ElasticsearchClusterConfig) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ElasticsearchClusterConfig) GoString() string { + return s.String() +} + +// Specifies the configuration status for the specified Elasticsearch domain. +type ElasticsearchClusterConfigStatus struct { + _ struct{} `type:"structure"` + + // Specifies the cluster configuration for the specified Elasticsearch domain. + Options *ElasticsearchClusterConfig `type:"structure" required:"true"` + + // Specifies the status of the configuration for the specified Elasticsearch + // domain. + Status *OptionStatus `type:"structure" required:"true"` +} + +// String returns the string representation +func (s ElasticsearchClusterConfigStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ElasticsearchClusterConfigStatus) GoString() string { + return s.String() +} + +// The configuration of an Elasticsearch domain. +type ElasticsearchDomainConfig struct { + _ struct{} `type:"structure"` + + // IAM access policy as a JSON-formatted string. + AccessPolicies *AccessPoliciesStatus `type:"structure"` + + // Specifies the AdvancedOptions for the domain. See Configuring Advanced Options + // (http://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-createupdatedomains.html#es-createdomain-configure-advanced-options" + // target="_blank) for more information. + AdvancedOptions *AdvancedOptionsStatus `type:"structure"` + + // Specifies the EBSOptions for the Elasticsearch domain. + EBSOptions *EBSOptionsStatus `type:"structure"` + + // Specifies the ElasticsearchClusterConfig for the Elasticsearch domain. + ElasticsearchClusterConfig *ElasticsearchClusterConfigStatus `type:"structure"` + + // Specifies the SnapshotOptions for the Elasticsearch domain. + SnapshotOptions *SnapshotOptionsStatus `type:"structure"` +} + +// String returns the string representation +func (s ElasticsearchDomainConfig) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ElasticsearchDomainConfig) GoString() string { + return s.String() +} + +// The current status of an Elasticsearch domain. +type ElasticsearchDomainStatus struct { + _ struct{} `type:"structure"` + + // The Amazon resource name (ARN) of an Elasticsearch domain. See Identifiers + // for IAM Entities (http://docs.aws.amazon.com/IAM/latest/UserGuide/index.html?Using_Identifiers.html" + // target="_blank) in Using AWS Identity and Access Management for more information. + ARN *string `type:"string" required:"true"` + + // IAM access policy as a JSON-formatted string. + AccessPolicies *string `type:"string"` + + // Specifies the status of the AdvancedOptions + AdvancedOptions map[string]*string `type:"map"` + + // The domain creation status. True if the creation of an Elasticsearch domain + // is complete. False if domain creation is still in progress. + Created *bool `type:"boolean"` + + // The domain deletion status. True if a delete request has been received for + // the domain but resource cleanup is still in progress. False if the domain + // has not been deleted. Once domain deletion is complete, the status of the + // domain is no longer returned. + Deleted *bool `type:"boolean"` + + // The unique identifier for the specified Elasticsearch domain. + DomainId *string `min:"1" type:"string" required:"true"` + + // The name of an Elasticsearch domain. Domain names are unique across the domains + // owned by an account within an AWS region. Domain names start with a letter + // or number and can contain the following characters: a-z (lowercase), 0-9, + // and - (hyphen). + DomainName *string `min:"3" type:"string" required:"true"` + + // The EBSOptions for the specified domain. See Configuring EBS-based Storage + // (http://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-createupdatedomains.html#es-createdomain-configure-ebs" + // target="_blank) for more information. + EBSOptions *EBSOptions `type:"structure"` + + // The type and number of instances in the domain cluster. + ElasticsearchClusterConfig *ElasticsearchClusterConfig `type:"structure" required:"true"` + + // The Elasticsearch domain endpoint that you use to submit index and search + // requests. + Endpoint *string `type:"string"` + + // The status of the Elasticsearch domain configuration. True if Amazon Elasticsearch + // Service is processing configuration changes. False if the configuration is + // active. + Processing *bool `type:"boolean"` + + // Specifies the status of the SnapshotOptions + SnapshotOptions *SnapshotOptions `type:"structure"` +} + +// String returns the string representation +func (s ElasticsearchDomainStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ElasticsearchDomainStatus) GoString() string { + return s.String() +} + +type ListDomainNamesInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ListDomainNamesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListDomainNamesInput) GoString() string { + return s.String() +} + +// The result of a ListDomainNames operation. Contains the names of all Elasticsearch +// domains owned by this account. +type ListDomainNamesOutput struct { + _ struct{} `type:"structure"` + + // List of Elasticsearch domain names. + DomainNames []*DomainInfo `type:"list"` +} + +// String returns the string representation +func (s ListDomainNamesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListDomainNamesOutput) GoString() string { + return s.String() +} + +// Container for the parameters to the ListTags operation. Specify the ARN for +// the Elasticsearch domain to which the tags are attached that you want to +// view are attached. +type ListTagsInput struct { + _ struct{} `type:"structure"` + + // Specify the ARN for the Elasticsearch domain to which the tags are attached + // that you want to view. + ARN *string `location:"querystring" locationName:"arn" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListTagsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTagsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListTagsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListTagsInput"} + if s.ARN == nil { + invalidParams.Add(request.NewErrParamRequired("ARN")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The result of a ListTags operation. Contains tags for all requested Elasticsearch +// domains. +type ListTagsOutput struct { + _ struct{} `type:"structure"` + + // List of Tag for the requested Elasticsearch domain. + TagList []*Tag `type:"list"` +} + +// String returns the string representation +func (s ListTagsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTagsOutput) GoString() string { + return s.String() +} + +// Provides the current status of the entity. +type OptionStatus struct { + _ struct{} `type:"structure"` + + // Timestamp which tells the creation date for the entity. + CreationDate *time.Time `type:"timestamp" timestampFormat:"unix" required:"true"` + + // Indicates whether the Elasticsearch domain is being deleted. + PendingDeletion *bool `type:"boolean"` + + // Provides the OptionState for the Elasticsearch domain. + State *string `type:"string" required:"true" enum:"OptionState"` + + // Timestamp which tells the last updated time for the entity. + UpdateDate *time.Time `type:"timestamp" timestampFormat:"unix" required:"true"` + + // Specifies the latest version for the entity. + UpdateVersion *int64 `type:"integer"` +} + +// String returns the string representation +func (s OptionStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s OptionStatus) GoString() string { + return s.String() +} + +// Container for the parameters to the RemoveTags operation. Specify the ARN +// for the Elasticsearch domain from which you want to remove the specified +// TagKey. +type RemoveTagsInput struct { + _ struct{} `type:"structure"` + + // Specifies the ARN for the Elasticsearch domain from which you want to delete + // the specified tags. + ARN *string `type:"string" required:"true"` + + // Specifies the TagKey list which you want to remove from the Elasticsearch + // domain. + TagKeys []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s RemoveTagsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RemoveTagsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RemoveTagsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RemoveTagsInput"} + if s.ARN == nil { + invalidParams.Add(request.NewErrParamRequired("ARN")) + } + if s.TagKeys == nil { + invalidParams.Add(request.NewErrParamRequired("TagKeys")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type RemoveTagsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s RemoveTagsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RemoveTagsOutput) GoString() string { + return s.String() +} + +// Specifies the time, in UTC format, when the service takes a daily automated +// snapshot of the specified Elasticsearch domain. Default value is 0 hours. +type SnapshotOptions struct { + _ struct{} `type:"structure"` + + // Specifies the time, in UTC format, when the service takes a daily automated + // snapshot of the specified Elasticsearch domain. Default value is 0 hours. + AutomatedSnapshotStartHour *int64 `type:"integer"` +} + +// String returns the string representation +func (s SnapshotOptions) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SnapshotOptions) GoString() string { + return s.String() +} + +// Status of a daily automated snapshot. +type SnapshotOptionsStatus struct { + _ struct{} `type:"structure"` + + // Specifies the daily snapshot options specified for the Elasticsearch domain. + Options *SnapshotOptions `type:"structure" required:"true"` + + // Specifies the status of a daily automated snapshot. + Status *OptionStatus `type:"structure" required:"true"` +} + +// String returns the string representation +func (s SnapshotOptionsStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SnapshotOptionsStatus) GoString() string { + return s.String() +} + +// Specifies a key value pair for a resource tag. +type Tag struct { + _ struct{} `type:"structure"` + + // Specifies the TagKey, the name of the tag. Tag keys must be unique for the + // Elasticsearch domain to which they are attached. + Key *string `min:"1" type:"string" required:"true"` + + // Specifies the TagValue, the value assigned to the corresponding tag key. + // Tag values can be null and do not have to be unique in a tag set. For example, + // you can have a key value pair in a tag set of project : Trinity and cost-center + // : Trinity + Value *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s Tag) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Tag) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Tag) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Tag"} + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.Value == nil { + invalidParams.Add(request.NewErrParamRequired("Value")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Container for the parameters to the UpdateElasticsearchDomain operation. +// Specifies the type and number of instances in the domain cluster. +type UpdateElasticsearchDomainConfigInput struct { + _ struct{} `type:"structure"` + + // IAM access policy as a JSON-formatted string. + AccessPolicies *string `type:"string"` + + // Modifies the advanced option to allow references to indices in an HTTP request + // body. Must be false when configuring access to individual sub-resources. + // By default, the value is true. See Configuration Advanced Options (http://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-createupdatedomains.html#es-createdomain-configure-advanced-options" + // target="_blank) for more information. + AdvancedOptions map[string]*string `type:"map"` + + // The name of the Elasticsearch domain that you are updating. + DomainName *string `location:"uri" locationName:"DomainName" min:"3" type:"string" required:"true"` + + // Specify the type and size of the EBS volume that you want to use. + EBSOptions *EBSOptions `type:"structure"` + + // The type and number of instances to instantiate for the domain cluster. + ElasticsearchClusterConfig *ElasticsearchClusterConfig `type:"structure"` + + // Option to set the time, in UTC format, for the daily automated snapshot. + // Default value is 0 hours. + SnapshotOptions *SnapshotOptions `type:"structure"` +} + +// String returns the string representation +func (s UpdateElasticsearchDomainConfigInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateElasticsearchDomainConfigInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateElasticsearchDomainConfigInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateElasticsearchDomainConfigInput"} + if s.DomainName == nil { + invalidParams.Add(request.NewErrParamRequired("DomainName")) + } + if s.DomainName != nil && len(*s.DomainName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("DomainName", 3)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The result of an UpdateElasticsearchDomain request. Contains the status of +// the Elasticsearch domain being updated. +type UpdateElasticsearchDomainConfigOutput struct { + _ struct{} `type:"structure"` + + // The status of the updated Elasticsearch domain. + DomainConfig *ElasticsearchDomainConfig `type:"structure" required:"true"` +} + +// String returns the string representation +func (s UpdateElasticsearchDomainConfigOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateElasticsearchDomainConfigOutput) GoString() string { + return s.String() +} + +const ( + // @enum ESPartitionInstanceType + ESPartitionInstanceTypeM3MediumElasticsearch = "m3.medium.elasticsearch" + // @enum ESPartitionInstanceType + ESPartitionInstanceTypeM3LargeElasticsearch = "m3.large.elasticsearch" + // @enum ESPartitionInstanceType + ESPartitionInstanceTypeM3XlargeElasticsearch = "m3.xlarge.elasticsearch" + // @enum ESPartitionInstanceType + ESPartitionInstanceTypeM32xlargeElasticsearch = "m3.2xlarge.elasticsearch" + // @enum ESPartitionInstanceType + ESPartitionInstanceTypeT2MicroElasticsearch = "t2.micro.elasticsearch" + // @enum ESPartitionInstanceType + ESPartitionInstanceTypeT2SmallElasticsearch = "t2.small.elasticsearch" + // @enum ESPartitionInstanceType + ESPartitionInstanceTypeT2MediumElasticsearch = "t2.medium.elasticsearch" + // @enum ESPartitionInstanceType + ESPartitionInstanceTypeR3LargeElasticsearch = "r3.large.elasticsearch" + // @enum ESPartitionInstanceType + ESPartitionInstanceTypeR3XlargeElasticsearch = "r3.xlarge.elasticsearch" + // @enum ESPartitionInstanceType + ESPartitionInstanceTypeR32xlargeElasticsearch = "r3.2xlarge.elasticsearch" + // @enum ESPartitionInstanceType + ESPartitionInstanceTypeR34xlargeElasticsearch = "r3.4xlarge.elasticsearch" + // @enum ESPartitionInstanceType + ESPartitionInstanceTypeR38xlargeElasticsearch = "r3.8xlarge.elasticsearch" + // @enum ESPartitionInstanceType + ESPartitionInstanceTypeI2XlargeElasticsearch = "i2.xlarge.elasticsearch" + // @enum ESPartitionInstanceType + ESPartitionInstanceTypeI22xlargeElasticsearch = "i2.2xlarge.elasticsearch" +) + +// The state of a requested change. One of the following: +// +// Processing: The request change is still in-process. Active: The request +// change is processed and deployed to the Elasticsearch domain. +const ( + // @enum OptionState + OptionStateRequiresIndexDocuments = "RequiresIndexDocuments" + // @enum OptionState + OptionStateProcessing = "Processing" + // @enum OptionState + OptionStateActive = "Active" +) + +// The type of EBS volume, standard, gp2, or io1. See Configuring EBS-based +// Storage (http://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-createupdatedomains.html#es-createdomain-configure-ebs" +// target="_blank)for more information. +const ( + // @enum VolumeType + VolumeTypeStandard = "standard" + // @enum VolumeType + VolumeTypeGp2 = "gp2" + // @enum VolumeType + VolumeTypeIo1 = "io1" +) diff --git a/vendor/github.com/aws/aws-sdk-go/service/elasticsearchservice/elasticsearchserviceiface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/elasticsearchservice/elasticsearchserviceiface/interface.go new file mode 100644 index 000000000..974a57852 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/elasticsearchservice/elasticsearchserviceiface/interface.go @@ -0,0 +1,54 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package elasticsearchserviceiface provides an interface for the Amazon Elasticsearch Service. +package elasticsearchserviceiface + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/elasticsearchservice" +) + +// ElasticsearchServiceAPI is the interface type for elasticsearchservice.ElasticsearchService. +type ElasticsearchServiceAPI interface { + AddTagsRequest(*elasticsearchservice.AddTagsInput) (*request.Request, *elasticsearchservice.AddTagsOutput) + + AddTags(*elasticsearchservice.AddTagsInput) (*elasticsearchservice.AddTagsOutput, error) + + CreateElasticsearchDomainRequest(*elasticsearchservice.CreateElasticsearchDomainInput) (*request.Request, *elasticsearchservice.CreateElasticsearchDomainOutput) + + CreateElasticsearchDomain(*elasticsearchservice.CreateElasticsearchDomainInput) (*elasticsearchservice.CreateElasticsearchDomainOutput, error) + + DeleteElasticsearchDomainRequest(*elasticsearchservice.DeleteElasticsearchDomainInput) (*request.Request, *elasticsearchservice.DeleteElasticsearchDomainOutput) + + DeleteElasticsearchDomain(*elasticsearchservice.DeleteElasticsearchDomainInput) (*elasticsearchservice.DeleteElasticsearchDomainOutput, error) + + DescribeElasticsearchDomainRequest(*elasticsearchservice.DescribeElasticsearchDomainInput) (*request.Request, *elasticsearchservice.DescribeElasticsearchDomainOutput) + + DescribeElasticsearchDomain(*elasticsearchservice.DescribeElasticsearchDomainInput) (*elasticsearchservice.DescribeElasticsearchDomainOutput, error) + + DescribeElasticsearchDomainConfigRequest(*elasticsearchservice.DescribeElasticsearchDomainConfigInput) (*request.Request, *elasticsearchservice.DescribeElasticsearchDomainConfigOutput) + + DescribeElasticsearchDomainConfig(*elasticsearchservice.DescribeElasticsearchDomainConfigInput) (*elasticsearchservice.DescribeElasticsearchDomainConfigOutput, error) + + DescribeElasticsearchDomainsRequest(*elasticsearchservice.DescribeElasticsearchDomainsInput) (*request.Request, *elasticsearchservice.DescribeElasticsearchDomainsOutput) + + DescribeElasticsearchDomains(*elasticsearchservice.DescribeElasticsearchDomainsInput) (*elasticsearchservice.DescribeElasticsearchDomainsOutput, error) + + ListDomainNamesRequest(*elasticsearchservice.ListDomainNamesInput) (*request.Request, *elasticsearchservice.ListDomainNamesOutput) + + ListDomainNames(*elasticsearchservice.ListDomainNamesInput) (*elasticsearchservice.ListDomainNamesOutput, error) + + ListTagsRequest(*elasticsearchservice.ListTagsInput) (*request.Request, *elasticsearchservice.ListTagsOutput) + + ListTags(*elasticsearchservice.ListTagsInput) (*elasticsearchservice.ListTagsOutput, error) + + RemoveTagsRequest(*elasticsearchservice.RemoveTagsInput) (*request.Request, *elasticsearchservice.RemoveTagsOutput) + + RemoveTags(*elasticsearchservice.RemoveTagsInput) (*elasticsearchservice.RemoveTagsOutput, error) + + UpdateElasticsearchDomainConfigRequest(*elasticsearchservice.UpdateElasticsearchDomainConfigInput) (*request.Request, *elasticsearchservice.UpdateElasticsearchDomainConfigOutput) + + UpdateElasticsearchDomainConfig(*elasticsearchservice.UpdateElasticsearchDomainConfigInput) (*elasticsearchservice.UpdateElasticsearchDomainConfigOutput, error) +} + +var _ ElasticsearchServiceAPI = (*elasticsearchservice.ElasticsearchService)(nil) diff --git a/vendor/github.com/aws/aws-sdk-go/service/elasticsearchservice/examples_test.go b/vendor/github.com/aws/aws-sdk-go/service/elasticsearchservice/examples_test.go new file mode 100644 index 000000000..8ca0ea7fc --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/elasticsearchservice/examples_test.go @@ -0,0 +1,262 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package elasticsearchservice_test + +import ( + "bytes" + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/elasticsearchservice" +) + +var _ time.Duration +var _ bytes.Buffer + +func ExampleElasticsearchService_AddTags() { + svc := elasticsearchservice.New(session.New()) + + params := &elasticsearchservice.AddTagsInput{ + ARN: aws.String("ARN"), // Required + TagList: []*elasticsearchservice.Tag{ // Required + { // Required + Key: aws.String("TagKey"), // Required + Value: aws.String("TagValue"), // Required + }, + // More values... + }, + } + resp, err := svc.AddTags(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElasticsearchService_CreateElasticsearchDomain() { + svc := elasticsearchservice.New(session.New()) + + params := &elasticsearchservice.CreateElasticsearchDomainInput{ + DomainName: aws.String("DomainName"), // Required + AccessPolicies: aws.String("PolicyDocument"), + AdvancedOptions: map[string]*string{ + "Key": aws.String("String"), // Required + // More values... + }, + EBSOptions: &elasticsearchservice.EBSOptions{ + EBSEnabled: aws.Bool(true), + Iops: aws.Int64(1), + VolumeSize: aws.Int64(1), + VolumeType: aws.String("VolumeType"), + }, + ElasticsearchClusterConfig: &elasticsearchservice.ElasticsearchClusterConfig{ + DedicatedMasterCount: aws.Int64(1), + DedicatedMasterEnabled: aws.Bool(true), + DedicatedMasterType: aws.String("ESPartitionInstanceType"), + InstanceCount: aws.Int64(1), + InstanceType: aws.String("ESPartitionInstanceType"), + ZoneAwarenessEnabled: aws.Bool(true), + }, + SnapshotOptions: &elasticsearchservice.SnapshotOptions{ + AutomatedSnapshotStartHour: aws.Int64(1), + }, + } + resp, err := svc.CreateElasticsearchDomain(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElasticsearchService_DeleteElasticsearchDomain() { + svc := elasticsearchservice.New(session.New()) + + params := &elasticsearchservice.DeleteElasticsearchDomainInput{ + DomainName: aws.String("DomainName"), // Required + } + resp, err := svc.DeleteElasticsearchDomain(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElasticsearchService_DescribeElasticsearchDomain() { + svc := elasticsearchservice.New(session.New()) + + params := &elasticsearchservice.DescribeElasticsearchDomainInput{ + DomainName: aws.String("DomainName"), // Required + } + resp, err := svc.DescribeElasticsearchDomain(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElasticsearchService_DescribeElasticsearchDomainConfig() { + svc := elasticsearchservice.New(session.New()) + + params := &elasticsearchservice.DescribeElasticsearchDomainConfigInput{ + DomainName: aws.String("DomainName"), // Required + } + resp, err := svc.DescribeElasticsearchDomainConfig(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElasticsearchService_DescribeElasticsearchDomains() { + svc := elasticsearchservice.New(session.New()) + + params := &elasticsearchservice.DescribeElasticsearchDomainsInput{ + DomainNames: []*string{ // Required + aws.String("DomainName"), // Required + // More values... + }, + } + resp, err := svc.DescribeElasticsearchDomains(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElasticsearchService_ListDomainNames() { + svc := elasticsearchservice.New(session.New()) + + var params *elasticsearchservice.ListDomainNamesInput + resp, err := svc.ListDomainNames(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElasticsearchService_ListTags() { + svc := elasticsearchservice.New(session.New()) + + params := &elasticsearchservice.ListTagsInput{ + ARN: aws.String("ARN"), // Required + } + resp, err := svc.ListTags(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElasticsearchService_RemoveTags() { + svc := elasticsearchservice.New(session.New()) + + params := &elasticsearchservice.RemoveTagsInput{ + ARN: aws.String("ARN"), // Required + TagKeys: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.RemoveTags(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElasticsearchService_UpdateElasticsearchDomainConfig() { + svc := elasticsearchservice.New(session.New()) + + params := &elasticsearchservice.UpdateElasticsearchDomainConfigInput{ + DomainName: aws.String("DomainName"), // Required + AccessPolicies: aws.String("PolicyDocument"), + AdvancedOptions: map[string]*string{ + "Key": aws.String("String"), // Required + // More values... + }, + EBSOptions: &elasticsearchservice.EBSOptions{ + EBSEnabled: aws.Bool(true), + Iops: aws.Int64(1), + VolumeSize: aws.Int64(1), + VolumeType: aws.String("VolumeType"), + }, + ElasticsearchClusterConfig: &elasticsearchservice.ElasticsearchClusterConfig{ + DedicatedMasterCount: aws.Int64(1), + DedicatedMasterEnabled: aws.Bool(true), + DedicatedMasterType: aws.String("ESPartitionInstanceType"), + InstanceCount: aws.Int64(1), + InstanceType: aws.String("ESPartitionInstanceType"), + ZoneAwarenessEnabled: aws.Bool(true), + }, + SnapshotOptions: &elasticsearchservice.SnapshotOptions{ + AutomatedSnapshotStartHour: aws.Int64(1), + }, + } + resp, err := svc.UpdateElasticsearchDomainConfig(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/elasticsearchservice/service.go b/vendor/github.com/aws/aws-sdk-go/service/elasticsearchservice/service.go new file mode 100644 index 000000000..60f3971a9 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/elasticsearchservice/service.go @@ -0,0 +1,92 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package elasticsearchservice + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/private/protocol/restjson" +) + +// Use the Amazon Elasticsearch configuration API to create, configure, and +// manage Elasticsearch domains. +// +// The endpoint for configuration service requests is region-specific: es.region.amazonaws.com. +// For example, es.us-east-1.amazonaws.com. For a current list of supported +// regions and endpoints, see Regions and Endpoints (http://docs.aws.amazon.com/general/latest/gr/rande.html#cloudsearch_region" +// target="_blank). +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type ElasticsearchService struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "es" + +// New creates a new instance of the ElasticsearchService client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a ElasticsearchService client from just a session. +// svc := elasticsearchservice.New(mySession) +// +// // Create a ElasticsearchService client with additional configuration +// svc := elasticsearchservice.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *ElasticsearchService { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *ElasticsearchService { + svc := &ElasticsearchService{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2015-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(restjson.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restjson.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restjson.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restjson.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a ElasticsearchService operation and runs any +// custom request initialization. +func (c *ElasticsearchService) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/elastictranscoder/api.go b/vendor/github.com/aws/aws-sdk-go/service/elastictranscoder/api.go new file mode 100644 index 000000000..af931b4f1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/elastictranscoder/api.go @@ -0,0 +1,4944 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package elastictranscoder provides a client for Amazon Elastic Transcoder. +package elastictranscoder + +import ( + "fmt" + + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" +) + +const opCancelJob = "CancelJob" + +// CancelJobRequest generates a "aws/request.Request" representing the +// client's request for the CancelJob operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CancelJob method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CancelJobRequest method. +// req, resp := client.CancelJobRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ElasticTranscoder) CancelJobRequest(input *CancelJobInput) (req *request.Request, output *CancelJobOutput) { + op := &request.Operation{ + Name: opCancelJob, + HTTPMethod: "DELETE", + HTTPPath: "/2012-09-25/jobs/{Id}", + } + + if input == nil { + input = &CancelJobInput{} + } + + req = c.newRequest(op, input, output) + output = &CancelJobOutput{} + req.Data = output + return +} + +// The CancelJob operation cancels an unfinished job. +// +// You can only cancel a job that has a status of Submitted. To prevent a pipeline +// from starting to process a job while you're getting the job identifier, use +// UpdatePipelineStatus to temporarily pause the pipeline. +func (c *ElasticTranscoder) CancelJob(input *CancelJobInput) (*CancelJobOutput, error) { + req, out := c.CancelJobRequest(input) + err := req.Send() + return out, err +} + +const opCreateJob = "CreateJob" + +// CreateJobRequest generates a "aws/request.Request" representing the +// client's request for the CreateJob operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateJob method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateJobRequest method. +// req, resp := client.CreateJobRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ElasticTranscoder) CreateJobRequest(input *CreateJobInput) (req *request.Request, output *CreateJobResponse) { + op := &request.Operation{ + Name: opCreateJob, + HTTPMethod: "POST", + HTTPPath: "/2012-09-25/jobs", + } + + if input == nil { + input = &CreateJobInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateJobResponse{} + req.Data = output + return +} + +// When you create a job, Elastic Transcoder returns JSON data that includes +// the values that you specified plus information about the job that is created. +// +// If you have specified more than one output for your jobs (for example, one +// output for the Kindle Fire and another output for the Apple iPhone 4s), you +// currently must use the Elastic Transcoder API to list the jobs (as opposed +// to the AWS Console). +func (c *ElasticTranscoder) CreateJob(input *CreateJobInput) (*CreateJobResponse, error) { + req, out := c.CreateJobRequest(input) + err := req.Send() + return out, err +} + +const opCreatePipeline = "CreatePipeline" + +// CreatePipelineRequest generates a "aws/request.Request" representing the +// client's request for the CreatePipeline operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreatePipeline method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreatePipelineRequest method. +// req, resp := client.CreatePipelineRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ElasticTranscoder) CreatePipelineRequest(input *CreatePipelineInput) (req *request.Request, output *CreatePipelineOutput) { + op := &request.Operation{ + Name: opCreatePipeline, + HTTPMethod: "POST", + HTTPPath: "/2012-09-25/pipelines", + } + + if input == nil { + input = &CreatePipelineInput{} + } + + req = c.newRequest(op, input, output) + output = &CreatePipelineOutput{} + req.Data = output + return +} + +// The CreatePipeline operation creates a pipeline with settings that you specify. +func (c *ElasticTranscoder) CreatePipeline(input *CreatePipelineInput) (*CreatePipelineOutput, error) { + req, out := c.CreatePipelineRequest(input) + err := req.Send() + return out, err +} + +const opCreatePreset = "CreatePreset" + +// CreatePresetRequest generates a "aws/request.Request" representing the +// client's request for the CreatePreset operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreatePreset method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreatePresetRequest method. +// req, resp := client.CreatePresetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ElasticTranscoder) CreatePresetRequest(input *CreatePresetInput) (req *request.Request, output *CreatePresetOutput) { + op := &request.Operation{ + Name: opCreatePreset, + HTTPMethod: "POST", + HTTPPath: "/2012-09-25/presets", + } + + if input == nil { + input = &CreatePresetInput{} + } + + req = c.newRequest(op, input, output) + output = &CreatePresetOutput{} + req.Data = output + return +} + +// The CreatePreset operation creates a preset with settings that you specify. +// +// Elastic Transcoder checks the CreatePreset settings to ensure that they +// meet Elastic Transcoder requirements and to determine whether they comply +// with H.264 standards. If your settings are not valid for Elastic Transcoder, +// Elastic Transcoder returns an HTTP 400 response (ValidationException) and +// does not create the preset. If the settings are valid for Elastic Transcoder +// but aren't strictly compliant with the H.264 standard, Elastic Transcoder +// creates the preset and returns a warning message in the response. This helps +// you determine whether your settings comply with the H.264 standard while +// giving you greater flexibility with respect to the video that Elastic Transcoder +// produces. Elastic Transcoder uses the H.264 video-compression format. For +// more information, see the International Telecommunication Union publication +// Recommendation ITU-T H.264: Advanced video coding for generic audiovisual +// services. +func (c *ElasticTranscoder) CreatePreset(input *CreatePresetInput) (*CreatePresetOutput, error) { + req, out := c.CreatePresetRequest(input) + err := req.Send() + return out, err +} + +const opDeletePipeline = "DeletePipeline" + +// DeletePipelineRequest generates a "aws/request.Request" representing the +// client's request for the DeletePipeline operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeletePipeline method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeletePipelineRequest method. +// req, resp := client.DeletePipelineRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ElasticTranscoder) DeletePipelineRequest(input *DeletePipelineInput) (req *request.Request, output *DeletePipelineOutput) { + op := &request.Operation{ + Name: opDeletePipeline, + HTTPMethod: "DELETE", + HTTPPath: "/2012-09-25/pipelines/{Id}", + } + + if input == nil { + input = &DeletePipelineInput{} + } + + req = c.newRequest(op, input, output) + output = &DeletePipelineOutput{} + req.Data = output + return +} + +// The DeletePipeline operation removes a pipeline. +// +// You can only delete a pipeline that has never been used or that is not +// currently in use (doesn't contain any active jobs). If the pipeline is currently +// in use, DeletePipeline returns an error. +func (c *ElasticTranscoder) DeletePipeline(input *DeletePipelineInput) (*DeletePipelineOutput, error) { + req, out := c.DeletePipelineRequest(input) + err := req.Send() + return out, err +} + +const opDeletePreset = "DeletePreset" + +// DeletePresetRequest generates a "aws/request.Request" representing the +// client's request for the DeletePreset operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeletePreset method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeletePresetRequest method. +// req, resp := client.DeletePresetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ElasticTranscoder) DeletePresetRequest(input *DeletePresetInput) (req *request.Request, output *DeletePresetOutput) { + op := &request.Operation{ + Name: opDeletePreset, + HTTPMethod: "DELETE", + HTTPPath: "/2012-09-25/presets/{Id}", + } + + if input == nil { + input = &DeletePresetInput{} + } + + req = c.newRequest(op, input, output) + output = &DeletePresetOutput{} + req.Data = output + return +} + +// The DeletePreset operation removes a preset that you've added in an AWS region. +// +// You can't delete the default presets that are included with Elastic Transcoder. +func (c *ElasticTranscoder) DeletePreset(input *DeletePresetInput) (*DeletePresetOutput, error) { + req, out := c.DeletePresetRequest(input) + err := req.Send() + return out, err +} + +const opListJobsByPipeline = "ListJobsByPipeline" + +// ListJobsByPipelineRequest generates a "aws/request.Request" representing the +// client's request for the ListJobsByPipeline operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListJobsByPipeline method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListJobsByPipelineRequest method. +// req, resp := client.ListJobsByPipelineRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ElasticTranscoder) ListJobsByPipelineRequest(input *ListJobsByPipelineInput) (req *request.Request, output *ListJobsByPipelineOutput) { + op := &request.Operation{ + Name: opListJobsByPipeline, + HTTPMethod: "GET", + HTTPPath: "/2012-09-25/jobsByPipeline/{PipelineId}", + Paginator: &request.Paginator{ + InputTokens: []string{"PageToken"}, + OutputTokens: []string{"NextPageToken"}, + LimitToken: "", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListJobsByPipelineInput{} + } + + req = c.newRequest(op, input, output) + output = &ListJobsByPipelineOutput{} + req.Data = output + return +} + +// The ListJobsByPipeline operation gets a list of the jobs currently in a pipeline. +// +// Elastic Transcoder returns all of the jobs currently in the specified pipeline. +// The response body contains one element for each job that satisfies the search +// criteria. +func (c *ElasticTranscoder) ListJobsByPipeline(input *ListJobsByPipelineInput) (*ListJobsByPipelineOutput, error) { + req, out := c.ListJobsByPipelineRequest(input) + err := req.Send() + return out, err +} + +// ListJobsByPipelinePages iterates over the pages of a ListJobsByPipeline operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListJobsByPipeline method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListJobsByPipeline operation. +// pageNum := 0 +// err := client.ListJobsByPipelinePages(params, +// func(page *ListJobsByPipelineOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *ElasticTranscoder) ListJobsByPipelinePages(input *ListJobsByPipelineInput, fn func(p *ListJobsByPipelineOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListJobsByPipelineRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListJobsByPipelineOutput), lastPage) + }) +} + +const opListJobsByStatus = "ListJobsByStatus" + +// ListJobsByStatusRequest generates a "aws/request.Request" representing the +// client's request for the ListJobsByStatus operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListJobsByStatus method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListJobsByStatusRequest method. +// req, resp := client.ListJobsByStatusRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ElasticTranscoder) ListJobsByStatusRequest(input *ListJobsByStatusInput) (req *request.Request, output *ListJobsByStatusOutput) { + op := &request.Operation{ + Name: opListJobsByStatus, + HTTPMethod: "GET", + HTTPPath: "/2012-09-25/jobsByStatus/{Status}", + Paginator: &request.Paginator{ + InputTokens: []string{"PageToken"}, + OutputTokens: []string{"NextPageToken"}, + LimitToken: "", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListJobsByStatusInput{} + } + + req = c.newRequest(op, input, output) + output = &ListJobsByStatusOutput{} + req.Data = output + return +} + +// The ListJobsByStatus operation gets a list of jobs that have a specified +// status. The response body contains one element for each job that satisfies +// the search criteria. +func (c *ElasticTranscoder) ListJobsByStatus(input *ListJobsByStatusInput) (*ListJobsByStatusOutput, error) { + req, out := c.ListJobsByStatusRequest(input) + err := req.Send() + return out, err +} + +// ListJobsByStatusPages iterates over the pages of a ListJobsByStatus operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListJobsByStatus method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListJobsByStatus operation. +// pageNum := 0 +// err := client.ListJobsByStatusPages(params, +// func(page *ListJobsByStatusOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *ElasticTranscoder) ListJobsByStatusPages(input *ListJobsByStatusInput, fn func(p *ListJobsByStatusOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListJobsByStatusRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListJobsByStatusOutput), lastPage) + }) +} + +const opListPipelines = "ListPipelines" + +// ListPipelinesRequest generates a "aws/request.Request" representing the +// client's request for the ListPipelines operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListPipelines method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListPipelinesRequest method. +// req, resp := client.ListPipelinesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ElasticTranscoder) ListPipelinesRequest(input *ListPipelinesInput) (req *request.Request, output *ListPipelinesOutput) { + op := &request.Operation{ + Name: opListPipelines, + HTTPMethod: "GET", + HTTPPath: "/2012-09-25/pipelines", + Paginator: &request.Paginator{ + InputTokens: []string{"PageToken"}, + OutputTokens: []string{"NextPageToken"}, + LimitToken: "", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListPipelinesInput{} + } + + req = c.newRequest(op, input, output) + output = &ListPipelinesOutput{} + req.Data = output + return +} + +// The ListPipelines operation gets a list of the pipelines associated with +// the current AWS account. +func (c *ElasticTranscoder) ListPipelines(input *ListPipelinesInput) (*ListPipelinesOutput, error) { + req, out := c.ListPipelinesRequest(input) + err := req.Send() + return out, err +} + +// ListPipelinesPages iterates over the pages of a ListPipelines operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListPipelines method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListPipelines operation. +// pageNum := 0 +// err := client.ListPipelinesPages(params, +// func(page *ListPipelinesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *ElasticTranscoder) ListPipelinesPages(input *ListPipelinesInput, fn func(p *ListPipelinesOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListPipelinesRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListPipelinesOutput), lastPage) + }) +} + +const opListPresets = "ListPresets" + +// ListPresetsRequest generates a "aws/request.Request" representing the +// client's request for the ListPresets operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListPresets method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListPresetsRequest method. +// req, resp := client.ListPresetsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ElasticTranscoder) ListPresetsRequest(input *ListPresetsInput) (req *request.Request, output *ListPresetsOutput) { + op := &request.Operation{ + Name: opListPresets, + HTTPMethod: "GET", + HTTPPath: "/2012-09-25/presets", + Paginator: &request.Paginator{ + InputTokens: []string{"PageToken"}, + OutputTokens: []string{"NextPageToken"}, + LimitToken: "", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListPresetsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListPresetsOutput{} + req.Data = output + return +} + +// The ListPresets operation gets a list of the default presets included with +// Elastic Transcoder and the presets that you've added in an AWS region. +func (c *ElasticTranscoder) ListPresets(input *ListPresetsInput) (*ListPresetsOutput, error) { + req, out := c.ListPresetsRequest(input) + err := req.Send() + return out, err +} + +// ListPresetsPages iterates over the pages of a ListPresets operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListPresets method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListPresets operation. +// pageNum := 0 +// err := client.ListPresetsPages(params, +// func(page *ListPresetsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *ElasticTranscoder) ListPresetsPages(input *ListPresetsInput, fn func(p *ListPresetsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListPresetsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListPresetsOutput), lastPage) + }) +} + +const opReadJob = "ReadJob" + +// ReadJobRequest generates a "aws/request.Request" representing the +// client's request for the ReadJob operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ReadJob method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ReadJobRequest method. +// req, resp := client.ReadJobRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ElasticTranscoder) ReadJobRequest(input *ReadJobInput) (req *request.Request, output *ReadJobOutput) { + op := &request.Operation{ + Name: opReadJob, + HTTPMethod: "GET", + HTTPPath: "/2012-09-25/jobs/{Id}", + } + + if input == nil { + input = &ReadJobInput{} + } + + req = c.newRequest(op, input, output) + output = &ReadJobOutput{} + req.Data = output + return +} + +// The ReadJob operation returns detailed information about a job. +func (c *ElasticTranscoder) ReadJob(input *ReadJobInput) (*ReadJobOutput, error) { + req, out := c.ReadJobRequest(input) + err := req.Send() + return out, err +} + +const opReadPipeline = "ReadPipeline" + +// ReadPipelineRequest generates a "aws/request.Request" representing the +// client's request for the ReadPipeline operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ReadPipeline method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ReadPipelineRequest method. +// req, resp := client.ReadPipelineRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ElasticTranscoder) ReadPipelineRequest(input *ReadPipelineInput) (req *request.Request, output *ReadPipelineOutput) { + op := &request.Operation{ + Name: opReadPipeline, + HTTPMethod: "GET", + HTTPPath: "/2012-09-25/pipelines/{Id}", + } + + if input == nil { + input = &ReadPipelineInput{} + } + + req = c.newRequest(op, input, output) + output = &ReadPipelineOutput{} + req.Data = output + return +} + +// The ReadPipeline operation gets detailed information about a pipeline. +func (c *ElasticTranscoder) ReadPipeline(input *ReadPipelineInput) (*ReadPipelineOutput, error) { + req, out := c.ReadPipelineRequest(input) + err := req.Send() + return out, err +} + +const opReadPreset = "ReadPreset" + +// ReadPresetRequest generates a "aws/request.Request" representing the +// client's request for the ReadPreset operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ReadPreset method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ReadPresetRequest method. +// req, resp := client.ReadPresetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ElasticTranscoder) ReadPresetRequest(input *ReadPresetInput) (req *request.Request, output *ReadPresetOutput) { + op := &request.Operation{ + Name: opReadPreset, + HTTPMethod: "GET", + HTTPPath: "/2012-09-25/presets/{Id}", + } + + if input == nil { + input = &ReadPresetInput{} + } + + req = c.newRequest(op, input, output) + output = &ReadPresetOutput{} + req.Data = output + return +} + +// The ReadPreset operation gets detailed information about a preset. +func (c *ElasticTranscoder) ReadPreset(input *ReadPresetInput) (*ReadPresetOutput, error) { + req, out := c.ReadPresetRequest(input) + err := req.Send() + return out, err +} + +const opTestRole = "TestRole" + +// TestRoleRequest generates a "aws/request.Request" representing the +// client's request for the TestRole operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the TestRole method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the TestRoleRequest method. +// req, resp := client.TestRoleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ElasticTranscoder) TestRoleRequest(input *TestRoleInput) (req *request.Request, output *TestRoleOutput) { + op := &request.Operation{ + Name: opTestRole, + HTTPMethod: "POST", + HTTPPath: "/2012-09-25/roleTests", + } + + if input == nil { + input = &TestRoleInput{} + } + + req = c.newRequest(op, input, output) + output = &TestRoleOutput{} + req.Data = output + return +} + +// The TestRole operation tests the IAM role used to create the pipeline. +// +// The TestRole action lets you determine whether the IAM role you are using +// has sufficient permissions to let Elastic Transcoder perform tasks associated +// with the transcoding process. The action attempts to assume the specified +// IAM role, checks read access to the input and output buckets, and tries to +// send a test notification to Amazon SNS topics that you specify. +func (c *ElasticTranscoder) TestRole(input *TestRoleInput) (*TestRoleOutput, error) { + req, out := c.TestRoleRequest(input) + err := req.Send() + return out, err +} + +const opUpdatePipeline = "UpdatePipeline" + +// UpdatePipelineRequest generates a "aws/request.Request" representing the +// client's request for the UpdatePipeline operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdatePipeline method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdatePipelineRequest method. +// req, resp := client.UpdatePipelineRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ElasticTranscoder) UpdatePipelineRequest(input *UpdatePipelineInput) (req *request.Request, output *UpdatePipelineOutput) { + op := &request.Operation{ + Name: opUpdatePipeline, + HTTPMethod: "PUT", + HTTPPath: "/2012-09-25/pipelines/{Id}", + } + + if input == nil { + input = &UpdatePipelineInput{} + } + + req = c.newRequest(op, input, output) + output = &UpdatePipelineOutput{} + req.Data = output + return +} + +// Use the UpdatePipeline operation to update settings for a pipeline. When +// you change pipeline settings, your changes take effect immediately. Jobs +// that you have already submitted and that Elastic Transcoder has not started +// to process are affected in addition to jobs that you submit after you change +// settings. +func (c *ElasticTranscoder) UpdatePipeline(input *UpdatePipelineInput) (*UpdatePipelineOutput, error) { + req, out := c.UpdatePipelineRequest(input) + err := req.Send() + return out, err +} + +const opUpdatePipelineNotifications = "UpdatePipelineNotifications" + +// UpdatePipelineNotificationsRequest generates a "aws/request.Request" representing the +// client's request for the UpdatePipelineNotifications operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdatePipelineNotifications method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdatePipelineNotificationsRequest method. +// req, resp := client.UpdatePipelineNotificationsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ElasticTranscoder) UpdatePipelineNotificationsRequest(input *UpdatePipelineNotificationsInput) (req *request.Request, output *UpdatePipelineNotificationsOutput) { + op := &request.Operation{ + Name: opUpdatePipelineNotifications, + HTTPMethod: "POST", + HTTPPath: "/2012-09-25/pipelines/{Id}/notifications", + } + + if input == nil { + input = &UpdatePipelineNotificationsInput{} + } + + req = c.newRequest(op, input, output) + output = &UpdatePipelineNotificationsOutput{} + req.Data = output + return +} + +// With the UpdatePipelineNotifications operation, you can update Amazon Simple +// Notification Service (Amazon SNS) notifications for a pipeline. +// +// When you update notifications for a pipeline, Elastic Transcoder returns +// the values that you specified in the request. +func (c *ElasticTranscoder) UpdatePipelineNotifications(input *UpdatePipelineNotificationsInput) (*UpdatePipelineNotificationsOutput, error) { + req, out := c.UpdatePipelineNotificationsRequest(input) + err := req.Send() + return out, err +} + +const opUpdatePipelineStatus = "UpdatePipelineStatus" + +// UpdatePipelineStatusRequest generates a "aws/request.Request" representing the +// client's request for the UpdatePipelineStatus operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdatePipelineStatus method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdatePipelineStatusRequest method. +// req, resp := client.UpdatePipelineStatusRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ElasticTranscoder) UpdatePipelineStatusRequest(input *UpdatePipelineStatusInput) (req *request.Request, output *UpdatePipelineStatusOutput) { + op := &request.Operation{ + Name: opUpdatePipelineStatus, + HTTPMethod: "POST", + HTTPPath: "/2012-09-25/pipelines/{Id}/status", + } + + if input == nil { + input = &UpdatePipelineStatusInput{} + } + + req = c.newRequest(op, input, output) + output = &UpdatePipelineStatusOutput{} + req.Data = output + return +} + +// The UpdatePipelineStatus operation pauses or reactivates a pipeline, so that +// the pipeline stops or restarts the processing of jobs. +// +// Changing the pipeline status is useful if you want to cancel one or more +// jobs. You can't cancel jobs after Elastic Transcoder has started processing +// them; if you pause the pipeline to which you submitted the jobs, you have +// more time to get the job IDs for the jobs that you want to cancel, and to +// send a CancelJob request. +func (c *ElasticTranscoder) UpdatePipelineStatus(input *UpdatePipelineStatusInput) (*UpdatePipelineStatusOutput, error) { + req, out := c.UpdatePipelineStatusRequest(input) + err := req.Send() + return out, err +} + +// The file to be used as album art. There can be multiple artworks associated +// with an audio file, to a maximum of 20. +// +// To remove artwork or leave the artwork empty, you can either set Artwork +// to null, or set the Merge Policy to "Replace" and use an empty Artwork array. +// +// To pass through existing artwork unchanged, set the Merge Policy to "Prepend", +// "Append", or "Fallback", and use an empty Artwork array. +type Artwork struct { + _ struct{} `type:"structure"` + + // The format of album art, if any. Valid formats are .jpg and .png. + AlbumArtFormat *string `type:"string"` + + // The encryption settings, if any, that you want Elastic Transcoder to apply + // to your artwork. + Encryption *Encryption `type:"structure"` + + // The name of the file to be used as album art. To determine which Amazon S3 + // bucket contains the specified file, Elastic Transcoder checks the pipeline + // specified by PipelineId; the InputBucket object in that pipeline identifies + // the bucket. + // + // If the file name includes a prefix, for example, cooking/pie.jpg, include + // the prefix in the key. If the file isn't in the specified bucket, Elastic + // Transcoder returns an error. + InputKey *string `min:"1" type:"string"` + + // The maximum height of the output album art in pixels. If you specify auto, + // Elastic Transcoder uses 600 as the default value. If you specify a numeric + // value, enter an even integer between 32 and 3072, inclusive. + MaxHeight *string `type:"string"` + + // The maximum width of the output album art in pixels. If you specify auto, + // Elastic Transcoder uses 600 as the default value. If you specify a numeric + // value, enter an even integer between 32 and 4096, inclusive. + MaxWidth *string `type:"string"` + + // When you set PaddingPolicy to Pad, Elastic Transcoder may add white bars + // to the top and bottom and/or left and right sides of the output album art + // to make the total size of the output art match the values that you specified + // for MaxWidth and MaxHeight. + PaddingPolicy *string `type:"string"` + + // Specify one of the following values to control scaling of the output album + // art: + // + // Fit: Elastic Transcoder scales the output art so it matches the value + // that you specified in either MaxWidth or MaxHeight without exceeding the + // other value. Fill: Elastic Transcoder scales the output art so it matches + // the value that you specified in either MaxWidth or MaxHeight and matches + // or exceeds the other value. Elastic Transcoder centers the output art and + // then crops it in the dimension (if any) that exceeds the maximum value. + // Stretch: Elastic Transcoder stretches the output art to match the values + // that you specified for MaxWidth and MaxHeight. If the relative proportions + // of the input art and the output art are different, the output art will be + // distorted. Keep: Elastic Transcoder does not scale the output art. If either + // dimension of the input art exceeds the values that you specified for MaxWidth + // and MaxHeight, Elastic Transcoder crops the output art. ShrinkToFit: Elastic + // Transcoder scales the output art down so that its dimensions match the values + // that you specified for at least one of MaxWidth and MaxHeight without exceeding + // either value. If you specify this option, Elastic Transcoder does not scale + // the art up. ShrinkToFill Elastic Transcoder scales the output art down so + // that its dimensions match the values that you specified for at least one + // of MaxWidth and MaxHeight without dropping below either value. If you specify + // this option, Elastic Transcoder does not scale the art up. + SizingPolicy *string `type:"string"` +} + +// String returns the string representation +func (s Artwork) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Artwork) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Artwork) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Artwork"} + if s.InputKey != nil && len(*s.InputKey) < 1 { + invalidParams.Add(request.NewErrParamMinLen("InputKey", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Options associated with your audio codec. +type AudioCodecOptions struct { + _ struct{} `type:"structure"` + + // You can only choose an audio bit depth when you specify flac or pcm for the + // value of Audio:Codec. + // + // The bit depth of a sample is how many bits of information are included in + // the audio samples. The higher the bit depth, the better the audio, but the + // larger the file. + // + // Valid values are 16 and 24. + // + // The most common bit depth is 24. + BitDepth *string `type:"string"` + + // You can only choose an audio bit order when you specify pcm for the value + // of Audio:Codec. + // + // The order the bits of a PCM sample are stored in. + // + // The supported value is LittleEndian. + BitOrder *string `type:"string"` + + // You can only choose an audio profile when you specify AAC for the value of + // Audio:Codec. + // + // Specify the AAC profile for the output file. Elastic Transcoder supports + // the following profiles: + // + // auto: If you specify auto, Elastic Transcoder will select the profile + // based on the bit rate selected for the output file. AAC-LC: The most common + // AAC profile. Use for bit rates larger than 64 kbps. HE-AAC: Not supported + // on some older players and devices. Use for bit rates between 40 and 80 kbps. + // HE-AACv2: Not supported on some players and devices. Use for bit rates less + // than 48 kbps. All outputs in a Smooth playlist must have the same value + // for Profile. + // + // If you created any presets before AAC profiles were added, Elastic Transcoder + // automatically updated your presets to use AAC-LC. You can change the value + // as required. + Profile *string `type:"string"` + + // You can only choose whether an audio sample is signed when you specify pcm + // for the value of Audio:Codec. + // + // Whether audio samples are represented with negative and positive numbers + // (signed) or only positive numbers (unsigned). + // + // The supported value is Signed. + Signed *string `type:"string"` +} + +// String returns the string representation +func (s AudioCodecOptions) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AudioCodecOptions) GoString() string { + return s.String() +} + +// Parameters required for transcoding audio. +type AudioParameters struct { + _ struct{} `type:"structure"` + + // The method of organizing audio channels and tracks. Use Audio:Channels to + // specify the number of channels in your output, and Audio:AudioPackingMode + // to specify the number of tracks and their relation to the channels. If you + // do not specify an Audio:AudioPackingMode, Elastic Transcoder uses SingleTrack. + // + // The following values are valid: + // + // SingleTrack, OneChannelPerTrack, and OneChannelPerTrackWithMosTo8Tracks + // + // When you specify SingleTrack, Elastic Transcoder creates a single track + // for your output. The track can have up to eight channels. Use SingleTrack + // for all non-mxf containers. + // + // The outputs of SingleTrack for a specific channel value and inputs are as + // follows: + // + // 0 channels with any input: Audio omitted from the output 1, 2, or auto + // channels with no audio input: Audio omitted from the output 1 channel + // with any input with audio: One track with one channel, downmixed if necessary + // 2 channels with one track with one channel: One track with two identical + // channels 2 or auto channels with two tracks with one channel each: One + // track with two channels 2 or auto channels with one track with two channels: + // One track with two channels 2 channels with one track with multiple channels: + // One track with two channels auto channels with one track with one channel: + // One track with one channel auto channels with one track with multiple channels: + // One track with multiple channels When you specify OneChannelPerTrack, Elastic + // Transcoder creates a new track for every channel in your output. Your output + // can have up to eight single-channel tracks. + // + // The outputs of OneChannelPerTrack for a specific channel value and inputs + // are as follows: + // + // 0 channels with any input: Audio omitted from the output 1, 2, or auto + // channels with no audio input: Audio omitted from the output 1 channel + // with any input with audio: One track with one channel, downmixed if necessary + // 2 channels with one track with one channel: Two tracks with one identical + // channel each 2 or auto channels with two tracks with one channel each: + // Two tracks with one channel each 2 or auto channels with one track with + // two channels: Two tracks with one channel each 2 channels with one track + // with multiple channels: Two tracks with one channel each auto channels + // with one track with one channel: One track with one channel auto channels + // with one track with multiple channels: Up to eight tracks with one channel + // each When you specify OneChannelPerTrackWithMosTo8Tracks, Elastic Transcoder + // creates eight single-channel tracks for your output. All tracks that do not + // contain audio data from an input channel are MOS, or Mit Out Sound, tracks. + // + // The outputs of OneChannelPerTrackWithMosTo8Tracks for a specific channel + // value and inputs are as follows: + // + // 0 channels with any input: Audio omitted from the output 1, 2, or auto + // channels with no audio input: Audio omitted from the output 1 channel + // with any input with audio: One track with one channel, downmixed if necessary, + // plus six MOS tracks 2 channels with one track with one channel: Two tracks + // with one identical channel each, plus six MOS tracks 2 or auto channels + // with two tracks with one channel each: Two tracks with one channel each, + // plus six MOS tracks 2 or auto channels with one track with two channels: + // Two tracks with one channel each, plus six MOS tracks 2 channels with one + // track with multiple channels: Two tracks with one channel each, plus six + // MOS tracks auto channels with one track with one channel: One track with + // one channel, plus seven MOS tracks auto channels with one track with multiple + // channels: Up to eight tracks with one channel each, plus MOS tracks until + // there are eight tracks in all + AudioPackingMode *string `type:"string"` + + // The bit rate of the audio stream in the output file, in kilobits/second. + // Enter an integer between 64 and 320, inclusive. + BitRate *string `type:"string"` + + // The number of audio channels in the output file. The following values are + // valid: + // + // auto, 0, 1, 2 + // + // One channel carries the information played by a single speaker. For example, + // a stereo track with two channels sends one channel to the left speaker, and + // the other channel to the right speaker. The output channels are organized + // into tracks. If you want Elastic Transcoder to automatically detect the number + // of audio channels in the input file and use that value for the output file, + // select auto. + // + // The output of a specific channel value and inputs are as follows: + // + // auto channel specified, with any input: Pass through up to eight input + // channels. 0 channels specified, with any input: Audio omitted from the output. + // 1 channel specified, with at least one input channel: Mono sound. 2 channels + // specified, with any input: Two identical mono channels or stereo. For more + // information about tracks, see Audio:AudioPackingMode. For more information + // about how Elastic Transcoder organizes channels and tracks, see Audio:AudioPackingMode. + Channels *string `type:"string"` + + // The audio codec for the output file. Valid values include aac, flac, mp2, + // mp3, pcm, and vorbis. + Codec *string `type:"string"` + + // If you specified AAC for Audio:Codec, this is the AAC compression profile + // to use. Valid values include: + // + // auto, AAC-LC, HE-AAC, HE-AACv2 + // + // If you specify auto, Elastic Transcoder chooses a profile based on the bit + // rate of the output file. + CodecOptions *AudioCodecOptions `type:"structure"` + + // The sample rate of the audio stream in the output file, in Hertz. Valid values + // include: + // + // auto, 22050, 32000, 44100, 48000, 96000 + // + // If you specify auto, Elastic Transcoder automatically detects the sample + // rate. + SampleRate *string `type:"string"` +} + +// String returns the string representation +func (s AudioParameters) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AudioParameters) GoString() string { + return s.String() +} + +// The CancelJobRequest structure. +type CancelJobInput struct { + _ struct{} `type:"structure"` + + // The identifier of the job that you want to cancel. + // + // To get a list of the jobs (including their jobId) that have a status of + // Submitted, use the ListJobsByStatus API action. + Id *string `location:"uri" locationName:"Id" type:"string" required:"true"` +} + +// String returns the string representation +func (s CancelJobInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CancelJobInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CancelJobInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CancelJobInput"} + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The response body contains a JSON object. If the job is successfully canceled, +// the value of Success is true. +type CancelJobOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s CancelJobOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CancelJobOutput) GoString() string { + return s.String() +} + +// The file format of the output captions. If you leave this value blank, Elastic +// Transcoder returns an error. +type CaptionFormat struct { + _ struct{} `type:"structure"` + + // The encryption settings, if any, that you want Elastic Transcoder to apply + // to your caption formats. + Encryption *Encryption `type:"structure"` + + // The format you specify determines whether Elastic Transcoder generates an + // embedded or sidecar caption for this output. + // + // Valid Embedded Caption Formats: + // + // for FLAC: None + // + // For MP3: None + // + // For MP4: mov-text + // + // For MPEG-TS: None + // + // For ogg: None + // + // For webm: None + // + // Valid Sidecar Caption Formats: Elastic Transcoder supports dfxp (first + // div element only), scc, srt, and webvtt. If you want ttml or smpte-tt compatible + // captions, specify dfxp as your output format. + // + // For FMP4: dfxp + // + // Non-FMP4 outputs: All sidecar types + // + // fmp4 captions have an extension of .ismt + Format *string `type:"string"` + + // The prefix for caption filenames, in the form description-{language}, where: + // + // description is a description of the video. {language} is a literal value + // that Elastic Transcoder replaces with the two- or three-letter code for the + // language of the caption in the output file names. If you don't include {language} + // in the file name pattern, Elastic Transcoder automatically appends "{language}" + // to the value that you specify for the description. In addition, Elastic Transcoder + // automatically appends the count to the end of the segment files. + // + // For example, suppose you're transcoding into srt format. When you enter + // "Sydney-{language}-sunrise", and the language of the captions is English + // (en), the name of the first caption file will be Sydney-en-sunrise00000.srt. + Pattern *string `type:"string"` +} + +// String returns the string representation +func (s CaptionFormat) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CaptionFormat) GoString() string { + return s.String() +} + +// A source file for the input sidecar captions used during the transcoding +// process. +type CaptionSource struct { + _ struct{} `type:"structure"` + + // The encryption settings, if any, that you want Elastic Transcoder to apply + // to your caption sources. + Encryption *Encryption `type:"structure"` + + // The name of the sidecar caption file that you want Elastic Transcoder to + // include in the output file. + Key *string `min:"1" type:"string"` + + // The label of the caption shown in the player when choosing a language. We + // recommend that you put the caption language name here, in the language of + // the captions. + Label *string `min:"1" type:"string"` + + // A string that specifies the language of the caption. Specify this as one + // of: + // + // 2-character ISO 639-1 code + // + // 3-character ISO 639-2 code + // + // For more information on ISO language codes and language names, see the + // List of ISO 639-1 codes. + Language *string `min:"1" type:"string"` + + // For clip generation or captions that do not start at the same time as the + // associated video file, the TimeOffset tells Elastic Transcoder how much of + // the video to encode before including captions. + // + // Specify the TimeOffset in the form [+-]SS.sss or [+-]HH:mm:SS.ss. + TimeOffset *string `type:"string"` +} + +// String returns the string representation +func (s CaptionSource) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CaptionSource) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CaptionSource) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CaptionSource"} + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.Label != nil && len(*s.Label) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Label", 1)) + } + if s.Language != nil && len(*s.Language) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Language", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The captions to be created, if any. +type Captions struct { + _ struct{} `type:"structure"` + + // The array of file formats for the output captions. If you leave this value + // blank, Elastic Transcoder returns an error. + CaptionFormats []*CaptionFormat `type:"list"` + + // Source files for the input sidecar captions used during the transcoding process. + // To omit all sidecar captions, leave CaptionSources blank. + CaptionSources []*CaptionSource `type:"list"` + + // A policy that determines how Elastic Transcoder handles the existence of + // multiple captions. + // + // MergeOverride: Elastic Transcoder transcodes both embedded and sidecar + // captions into outputs. If captions for a language are embedded in the input + // file and also appear in a sidecar file, Elastic Transcoder uses the sidecar + // captions and ignores the embedded captions for that language. + // + // MergeRetain: Elastic Transcoder transcodes both embedded and sidecar captions + // into outputs. If captions for a language are embedded in the input file and + // also appear in a sidecar file, Elastic Transcoder uses the embedded captions + // and ignores the sidecar captions for that language. If CaptionSources is + // empty, Elastic Transcoder omits all sidecar captions from the output files. + // + // Override: Elastic Transcoder transcodes only the sidecar captions that you + // specify in CaptionSources. + // + // MergePolicy cannot be null. + MergePolicy *string `type:"string"` +} + +// String returns the string representation +func (s Captions) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Captions) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Captions) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Captions"} + if s.CaptionSources != nil { + for i, v := range s.CaptionSources { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "CaptionSources", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Settings for one clip in a composition. All jobs in a playlist must have +// the same clip settings. +type Clip struct { + _ struct{} `type:"structure"` + + // Settings that determine when a clip begins and how long it lasts. + TimeSpan *TimeSpan `type:"structure"` +} + +// String returns the string representation +func (s Clip) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Clip) GoString() string { + return s.String() +} + +// The CreateJobRequest structure. +type CreateJobInput struct { + _ struct{} `type:"structure"` + + // A section of the request body that provides information about the file that + // is being transcoded. + Input *JobInput `type:"structure" required:"true"` + + // The CreateJobOutput structure. + Output *CreateJobOutput `type:"structure"` + + // The value, if any, that you want Elastic Transcoder to prepend to the names + // of all files that this job creates, including output files, thumbnails, and + // playlists. + OutputKeyPrefix *string `min:"1" type:"string"` + + // A section of the request body that provides information about the transcoded + // (target) files. We recommend that you use the Outputs syntax instead of the + // Output syntax. + Outputs []*CreateJobOutput `type:"list"` + + // The Id of the pipeline that you want Elastic Transcoder to use for transcoding. + // The pipeline determines several settings, including the Amazon S3 bucket + // from which Elastic Transcoder gets the files to transcode and the bucket + // into which Elastic Transcoder puts the transcoded files. + PipelineId *string `type:"string" required:"true"` + + // If you specify a preset in PresetId for which the value of Container is fmp4 + // (Fragmented MP4) or ts (MPEG-TS), Playlists contains information about the + // master playlists that you want Elastic Transcoder to create. + // + // The maximum number of master playlists in a job is 30. + Playlists []*CreateJobPlaylist `type:"list"` + + // User-defined metadata that you want to associate with an Elastic Transcoder + // job. You specify metadata in key/value pairs, and you can add up to 10 key/value + // pairs per job. Elastic Transcoder does not guarantee that key/value pairs + // will be returned in the same order in which you specify them. + UserMetadata map[string]*string `type:"map"` +} + +// String returns the string representation +func (s CreateJobInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateJobInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateJobInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateJobInput"} + if s.Input == nil { + invalidParams.Add(request.NewErrParamRequired("Input")) + } + if s.OutputKeyPrefix != nil && len(*s.OutputKeyPrefix) < 1 { + invalidParams.Add(request.NewErrParamMinLen("OutputKeyPrefix", 1)) + } + if s.PipelineId == nil { + invalidParams.Add(request.NewErrParamRequired("PipelineId")) + } + if s.Input != nil { + if err := s.Input.Validate(); err != nil { + invalidParams.AddNested("Input", err.(request.ErrInvalidParams)) + } + } + if s.Output != nil { + if err := s.Output.Validate(); err != nil { + invalidParams.AddNested("Output", err.(request.ErrInvalidParams)) + } + } + if s.Outputs != nil { + for i, v := range s.Outputs { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Outputs", i), err.(request.ErrInvalidParams)) + } + } + } + if s.Playlists != nil { + for i, v := range s.Playlists { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Playlists", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The CreateJobOutput structure. +type CreateJobOutput struct { + _ struct{} `type:"structure"` + + // Information about the album art that you want Elastic Transcoder to add to + // the file during transcoding. You can specify up to twenty album artworks + // for each output. Settings for each artwork must be defined in the job for + // the current output. + AlbumArt *JobAlbumArt `type:"structure"` + + // You can configure Elastic Transcoder to transcode captions, or subtitles, + // from one format to another. All captions must be in UTF-8. Elastic Transcoder + // supports two types of captions: + // + // Embedded: Embedded captions are included in the same file as the audio + // and video. Elastic Transcoder supports only one embedded caption per language, + // to a maximum of 300 embedded captions per file. + // + // Valid input values include: CEA-608 (EIA-608, first non-empty channel only), + // CEA-708 (EIA-708, first non-empty channel only), and mov-text + // + // Valid outputs include: mov-text + // + // Elastic Transcoder supports a maximum of one embedded format per output. + // + // Sidecar: Sidecar captions are kept in a separate metadata file from the + // audio and video data. Sidecar captions require a player that is capable of + // understanding the relationship between the video file and the sidecar file. + // Elastic Transcoder supports only one sidecar caption per language, to a maximum + // of 20 sidecar captions per file. + // + // Valid input values include: dfxp (first div element only), ebu-tt, scc, + // smpt, srt, ttml (first div element only), and webvtt + // + // Valid outputs include: dfxp (first div element only), scc, srt, and webvtt. + // + // If you want ttml or smpte-tt compatible captions, specify dfxp as your + // output format. + // + // Elastic Transcoder does not support OCR (Optical Character Recognition), + // does not accept pictures as a valid input for captions, and is not available + // for audio-only transcoding. Elastic Transcoder does not preserve text formatting + // (for example, italics) during the transcoding process. + // + // To remove captions or leave the captions empty, set Captions to null. To + // pass through existing captions unchanged, set the MergePolicy to MergeRetain, + // and pass in a null CaptionSources array. + // + // For more information on embedded files, see the Subtitles Wikipedia page. + // + // For more information on sidecar files, see the Extensible Metadata Platform + // and Sidecar file Wikipedia pages. + Captions *Captions `type:"structure"` + + // You can create an output file that contains an excerpt from the input file. + // This excerpt, called a clip, can come from the beginning, middle, or end + // of the file. The Composition object contains settings for the clips that + // make up an output file. For the current release, you can only specify settings + // for a single clip per output file. The Composition object cannot be null. + Composition []*Clip `type:"list"` + + // You can specify encryption settings for any output files that you want to + // use for a transcoding job. This includes the output file and any watermarks, + // thumbnails, album art, or captions that you want to use. You must specify + // encryption settings for each file individually. + Encryption *Encryption `type:"structure"` + + // The name to assign to the transcoded file. Elastic Transcoder saves the file + // in the Amazon S3 bucket specified by the OutputBucket object in the pipeline + // that is specified by the pipeline ID. If a file with the specified name already + // exists in the output bucket, the job fails. + Key *string `min:"1" type:"string"` + + // The Id of the preset to use for this job. The preset determines the audio, + // video, and thumbnail settings that Elastic Transcoder uses for transcoding. + PresetId *string `type:"string"` + + // The number of degrees clockwise by which you want Elastic Transcoder to rotate + // the output relative to the input. Enter one of the following values: auto, + // 0, 90, 180, 270. The value auto generally works only if the file that you're + // transcoding contains rotation metadata. + Rotate *string `type:"string"` + + // (Outputs in Fragmented MP4 or MPEG-TS format only.If you specify a preset + // in PresetId for which the value of Container is fmp4 (Fragmented MP4) or + // ts (MPEG-TS), SegmentDuration is the target maximum duration of each segment + // in seconds. For HLSv3 format playlists, each media segment is stored in a + // separate .ts file. For HLSv4 and Smooth playlists, all media segments for + // an output are stored in a single file. Each segment is approximately the + // length of the SegmentDuration, though individual segments might be shorter + // or longer. + // + // The range of valid values is 1 to 60 seconds. If the duration of the video + // is not evenly divisible by SegmentDuration, the duration of the last segment + // is the remainder of total length/SegmentDuration. + // + // Elastic Transcoder creates an output-specific playlist for each output HLS + // output that you specify in OutputKeys. To add an output to the master playlist + // for this job, include it in the OutputKeys of the associated playlist. + SegmentDuration *string `type:"string"` + + // The encryption settings, if any, that you want Elastic Transcoder to apply + // to your thumbnail. + ThumbnailEncryption *Encryption `type:"structure"` + + // Whether you want Elastic Transcoder to create thumbnails for your videos + // and, if so, how you want Elastic Transcoder to name the files. + // + // If you don't want Elastic Transcoder to create thumbnails, specify "". + // + // If you do want Elastic Transcoder to create thumbnails, specify the information + // that you want to include in the file name for each thumbnail. You can specify + // the following values in any sequence: + // + // {count} (Required): If you want to create thumbnails, you must include + // {count} in the ThumbnailPattern object. Wherever you specify {count}, Elastic + // Transcoder adds a five-digit sequence number (beginning with 00001) to thumbnail + // file names. The number indicates where a given thumbnail appears in the sequence + // of thumbnails for a transcoded file. + // + // If you specify a literal value and/or {resolution} but you omit {count}, + // Elastic Transcoder returns a validation error and does not create the job. + // Literal values (Optional): You can specify literal values anywhere in + // the ThumbnailPattern object. For example, you can include them as a file + // name prefix or as a delimiter between {resolution} and {count}. + // + // {resolution} (Optional): If you want Elastic Transcoder to include the + // resolution in the file name, include {resolution} in the ThumbnailPattern + // object. + // + // When creating thumbnails, Elastic Transcoder automatically saves the files + // in the format (.jpg or .png) that appears in the preset that you specified + // in the PresetID value of CreateJobOutput. Elastic Transcoder also appends + // the applicable file name extension. + ThumbnailPattern *string `type:"string"` + + // Information about the watermarks that you want Elastic Transcoder to add + // to the video during transcoding. You can specify up to four watermarks for + // each output. Settings for each watermark must be defined in the preset for + // the current output. + Watermarks []*JobWatermark `type:"list"` +} + +// String returns the string representation +func (s CreateJobOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateJobOutput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateJobOutput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateJobOutput"} + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.AlbumArt != nil { + if err := s.AlbumArt.Validate(); err != nil { + invalidParams.AddNested("AlbumArt", err.(request.ErrInvalidParams)) + } + } + if s.Captions != nil { + if err := s.Captions.Validate(); err != nil { + invalidParams.AddNested("Captions", err.(request.ErrInvalidParams)) + } + } + if s.Watermarks != nil { + for i, v := range s.Watermarks { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Watermarks", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Information about the master playlist. +type CreateJobPlaylist struct { + _ struct{} `type:"structure"` + + // The format of the output playlist. Valid formats include HLSv3, HLSv4, and + // Smooth. + Format *string `type:"string"` + + // The HLS content protection settings, if any, that you want Elastic Transcoder + // to apply to the output files associated with this playlist. + HlsContentProtection *HlsContentProtection `type:"structure"` + + // The name that you want Elastic Transcoder to assign to the master playlist, + // for example, nyc-vacation.m3u8. If the name includes a / character, the section + // of the name before the last / must be identical for all Name objects. If + // you create more than one master playlist, the values of all Name objects + // must be unique. + // + // Note: Elastic Transcoder automatically appends the relevant file extension + // to the file name (.m3u8 for HLSv3 and HLSv4 playlists, and .ism and .ismc + // for Smooth playlists). If you include a file extension in Name, the file + // name will have two extensions. + Name *string `min:"1" type:"string"` + + // For each output in this job that you want to include in a master playlist, + // the value of the Outputs:Key object. + // + // If your output is not HLS or does not have a segment duration set, the + // name of the output file is a concatenation of OutputKeyPrefix and Outputs:Key: + // + // OutputKeyPrefixOutputs:Key + // + // If your output is HLSv3 and has a segment duration set, or is not included + // in a playlist, Elastic Transcoder creates an output playlist file with a + // file extension of .m3u8, and a series of .ts files that include a five-digit + // sequential counter beginning with 00000: + // + // OutputKeyPrefixOutputs:Key.m3u8 + // + // OutputKeyPrefixOutputs:Key00000.ts + // + // If your output is HLSv4, has a segment duration set, and is included in + // an HLSv4 playlist, Elastic Transcoder creates an output playlist file with + // a file extension of _v4.m3u8. If the output is video, Elastic Transcoder + // also creates an output file with an extension of _iframe.m3u8: + // + // OutputKeyPrefixOutputs:Key_v4.m3u8 + // + // OutputKeyPrefixOutputs:Key_iframe.m3u8 + // + // OutputKeyPrefixOutputs:Key.ts + // + // Elastic Transcoder automatically appends the relevant file extension to + // the file name. If you include a file extension in Output Key, the file name + // will have two extensions. + // + // If you include more than one output in a playlist, any segment duration + // settings, clip settings, or caption settings must be the same for all outputs + // in the playlist. For Smooth playlists, the Audio:Profile, Video:Profile, + // and Video:FrameRate to Video:KeyframesMaxDist ratio must be the same for + // all outputs. + OutputKeys []*string `type:"list"` + + // The DRM settings, if any, that you want Elastic Transcoder to apply to the + // output files associated with this playlist. + PlayReadyDrm *PlayReadyDrm `type:"structure"` +} + +// String returns the string representation +func (s CreateJobPlaylist) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateJobPlaylist) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateJobPlaylist) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateJobPlaylist"} + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + if s.PlayReadyDrm != nil { + if err := s.PlayReadyDrm.Validate(); err != nil { + invalidParams.AddNested("PlayReadyDrm", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The CreateJobResponse structure. +type CreateJobResponse struct { + _ struct{} `type:"structure"` + + // A section of the response body that provides information about the job that + // is created. + Job *Job `type:"structure"` +} + +// String returns the string representation +func (s CreateJobResponse) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateJobResponse) GoString() string { + return s.String() +} + +// The CreatePipelineRequest structure. +type CreatePipelineInput struct { + _ struct{} `type:"structure"` + + // The AWS Key Management Service (AWS KMS) key that you want to use with this + // pipeline. + // + // If you use either S3 or S3-AWS-KMS as your Encryption:Mode, you don't need + // to provide a key with your job because a default key, known as an AWS-KMS + // key, is created for you automatically. You need to provide an AWS-KMS key + // only if you want to use a non-default AWS-KMS key, or if you are using an + // Encryption:Mode of AES-PKCS7, AES-CTR, or AES-GCM. + AwsKmsKeyArn *string `type:"string"` + + // The optional ContentConfig object specifies information about the Amazon + // S3 bucket in which you want Elastic Transcoder to save transcoded files and + // playlists: which bucket to use, which users you want to have access to the + // files, the type of access you want users to have, and the storage class that + // you want to assign to the files. + // + // If you specify values for ContentConfig, you must also specify values for + // ThumbnailConfig. + // + // If you specify values for ContentConfig and ThumbnailConfig, omit the OutputBucket + // object. + // + // Bucket: The Amazon S3 bucket in which you want Elastic Transcoder to save + // transcoded files and playlists. Permissions (Optional): The Permissions + // object specifies which users you want to have access to transcoded files + // and the type of access you want them to have. You can grant permissions to + // a maximum of 30 users and/or predefined Amazon S3 groups. Grantee Type: + // Specify the type of value that appears in the Grantee object: Canonical: + // The value in the Grantee object is either the canonical user ID for an AWS + // account or an origin access identity for an Amazon CloudFront distribution. + // For more information about canonical user IDs, see Access Control List (ACL) + // Overview in the Amazon Simple Storage Service Developer Guide. For more information + // about using CloudFront origin access identities to require that users use + // CloudFront URLs instead of Amazon S3 URLs, see Using an Origin Access Identity + // to Restrict Access to Your Amazon S3 Content. A canonical user ID is not + // the same as an AWS account number. Email: The value in the Grantee object + // is the registered email address of an AWS account. Group: The value in the + // Grantee object is one of the following predefined Amazon S3 groups: AllUsers, + // AuthenticatedUsers, or LogDelivery. Grantee: The AWS user or group that + // you want to have access to transcoded files and playlists. To identify the + // user or group, you can specify the canonical user ID for an AWS account, + // an origin access identity for a CloudFront distribution, the registered email + // address of an AWS account, or a predefined Amazon S3 group Access: The + // permission that you want to give to the AWS user that you specified in Grantee. + // Permissions are granted on the files that Elastic Transcoder adds to the + // bucket, including playlists and video files. Valid values include: READ: + // The grantee can read the objects and metadata for objects that Elastic Transcoder + // adds to the Amazon S3 bucket. READ_ACP: The grantee can read the object + // ACL for objects that Elastic Transcoder adds to the Amazon S3 bucket. WRITE_ACP: + // The grantee can write the ACL for the objects that Elastic Transcoder adds + // to the Amazon S3 bucket. FULL_CONTROL: The grantee has READ, READ_ACP, and + // WRITE_ACP permissions for the objects that Elastic Transcoder adds to the + // Amazon S3 bucket. StorageClass: The Amazon S3 storage class, Standard + // or ReducedRedundancy, that you want Elastic Transcoder to assign to the video + // files and playlists that it stores in your Amazon S3 bucket. + ContentConfig *PipelineOutputConfig `type:"structure"` + + // The Amazon S3 bucket in which you saved the media files that you want to + // transcode. + InputBucket *string `type:"string" required:"true"` + + // The name of the pipeline. We recommend that the name be unique within the + // AWS account, but uniqueness is not enforced. + // + // Constraints: Maximum 40 characters. + Name *string `min:"1" type:"string" required:"true"` + + // The Amazon Simple Notification Service (Amazon SNS) topic that you want to + // notify to report job status. + // + // To receive notifications, you must also subscribe to the new topic in the + // Amazon SNS console. Progressing: The topic ARN for the Amazon Simple Notification + // Service (Amazon SNS) topic that you want to notify when Elastic Transcoder + // has started to process a job in this pipeline. This is the ARN that Amazon + // SNS returned when you created the topic. For more information, see Create + // a Topic in the Amazon Simple Notification Service Developer Guide. Completed: + // The topic ARN for the Amazon SNS topic that you want to notify when Elastic + // Transcoder has finished processing a job in this pipeline. This is the ARN + // that Amazon SNS returned when you created the topic. Warning: The topic + // ARN for the Amazon SNS topic that you want to notify when Elastic Transcoder + // encounters a warning condition while processing a job in this pipeline. This + // is the ARN that Amazon SNS returned when you created the topic. Error: The + // topic ARN for the Amazon SNS topic that you want to notify when Elastic Transcoder + // encounters an error condition while processing a job in this pipeline. This + // is the ARN that Amazon SNS returned when you created the topic. + Notifications *Notifications `type:"structure"` + + // The Amazon S3 bucket in which you want Elastic Transcoder to save the transcoded + // files. (Use this, or use ContentConfig:Bucket plus ThumbnailConfig:Bucket.) + // + // Specify this value when all of the following are true: You want to save + // transcoded files, thumbnails (if any), and playlists (if any) together in + // one bucket. You do not want to specify the users or groups who have access + // to the transcoded files, thumbnails, and playlists. You do not want to specify + // the permissions that Elastic Transcoder grants to the files. When Elastic + // Transcoder saves files in OutputBucket, it grants full control over the files + // only to the AWS account that owns the role that is specified by Role. You + // want to associate the transcoded files and thumbnails with the Amazon S3 + // Standard storage class. + // + // If you want to save transcoded files and playlists in one bucket and thumbnails + // in another bucket, specify which users can access the transcoded files or + // the permissions the users have, or change the Amazon S3 storage class, omit + // OutputBucket and specify values for ContentConfig and ThumbnailConfig instead. + OutputBucket *string `type:"string"` + + // The IAM Amazon Resource Name (ARN) for the role that you want Elastic Transcoder + // to use to create the pipeline. + Role *string `type:"string" required:"true"` + + // The ThumbnailConfig object specifies several values, including the Amazon + // S3 bucket in which you want Elastic Transcoder to save thumbnail files, which + // users you want to have access to the files, the type of access you want users + // to have, and the storage class that you want to assign to the files. + // + // If you specify values for ContentConfig, you must also specify values for + // ThumbnailConfig even if you don't want to create thumbnails. + // + // If you specify values for ContentConfig and ThumbnailConfig, omit the OutputBucket + // object. + // + // Bucket: The Amazon S3 bucket in which you want Elastic Transcoder to save + // thumbnail files. Permissions (Optional): The Permissions object specifies + // which users and/or predefined Amazon S3 groups you want to have access to + // thumbnail files, and the type of access you want them to have. You can grant + // permissions to a maximum of 30 users and/or predefined Amazon S3 groups. + // GranteeType: Specify the type of value that appears in the Grantee object: + // Canonical: The value in the Grantee object is either the canonical user + // ID for an AWS account or an origin access identity for an Amazon CloudFront + // distribution. A canonical user ID is not the same as an AWS account number. + // Email: The value in the Grantee object is the registered email address + // of an AWS account. Group: The value in the Grantee object is one of the + // following predefined Amazon S3 groups: AllUsers, AuthenticatedUsers, or LogDelivery. + // Grantee: The AWS user or group that you want to have access to thumbnail + // files. To identify the user or group, you can specify the canonical user + // ID for an AWS account, an origin access identity for a CloudFront distribution, + // the registered email address of an AWS account, or a predefined Amazon S3 + // group. Access: The permission that you want to give to the AWS user that + // you specified in Grantee. Permissions are granted on the thumbnail files + // that Elastic Transcoder adds to the bucket. Valid values include: READ: + // The grantee can read the thumbnails and metadata for objects that Elastic + // Transcoder adds to the Amazon S3 bucket. READ_ACP: The grantee can read + // the object ACL for thumbnails that Elastic Transcoder adds to the Amazon + // S3 bucket. WRITE_ACP: The grantee can write the ACL for the thumbnails + // that Elastic Transcoder adds to the Amazon S3 bucket. FULL_CONTROL: The + // grantee has READ, READ_ACP, and WRITE_ACP permissions for the thumbnails + // that Elastic Transcoder adds to the Amazon S3 bucket. StorageClass: The + // Amazon S3 storage class, Standard or ReducedRedundancy, that you want Elastic + // Transcoder to assign to the thumbnails that it stores in your Amazon S3 bucket. + ThumbnailConfig *PipelineOutputConfig `type:"structure"` +} + +// String returns the string representation +func (s CreatePipelineInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreatePipelineInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreatePipelineInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreatePipelineInput"} + if s.InputBucket == nil { + invalidParams.Add(request.NewErrParamRequired("InputBucket")) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + if s.Role == nil { + invalidParams.Add(request.NewErrParamRequired("Role")) + } + if s.ContentConfig != nil { + if err := s.ContentConfig.Validate(); err != nil { + invalidParams.AddNested("ContentConfig", err.(request.ErrInvalidParams)) + } + } + if s.ThumbnailConfig != nil { + if err := s.ThumbnailConfig.Validate(); err != nil { + invalidParams.AddNested("ThumbnailConfig", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// When you create a pipeline, Elastic Transcoder returns the values that you +// specified in the request. +type CreatePipelineOutput struct { + _ struct{} `type:"structure"` + + // A section of the response body that provides information about the pipeline + // that is created. + Pipeline *Pipeline `type:"structure"` + + // Elastic Transcoder returns a warning if the resources used by your pipeline + // are not in the same region as the pipeline. + // + // Using resources in the same region, such as your Amazon S3 buckets, Amazon + // SNS notification topics, and AWS KMS key, reduces processing time and prevents + // cross-regional charges. + Warnings []*Warning `type:"list"` +} + +// String returns the string representation +func (s CreatePipelineOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreatePipelineOutput) GoString() string { + return s.String() +} + +// The CreatePresetRequest structure. +type CreatePresetInput struct { + _ struct{} `type:"structure"` + + // A section of the request body that specifies the audio parameters. + Audio *AudioParameters `type:"structure"` + + // The container type for the output file. Valid values include flac, flv, fmp4, + // gif, mp3, mp4, mpg, mxf, oga, ogg, ts, and webm. + Container *string `type:"string" required:"true"` + + // A description of the preset. + Description *string `type:"string"` + + // The name of the preset. We recommend that the name be unique within the AWS + // account, but uniqueness is not enforced. + Name *string `min:"1" type:"string" required:"true"` + + // A section of the request body that specifies the thumbnail parameters, if + // any. + Thumbnails *Thumbnails `type:"structure"` + + // A section of the request body that specifies the video parameters. + Video *VideoParameters `type:"structure"` +} + +// String returns the string representation +func (s CreatePresetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreatePresetInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreatePresetInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreatePresetInput"} + if s.Container == nil { + invalidParams.Add(request.NewErrParamRequired("Container")) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + if s.Video != nil { + if err := s.Video.Validate(); err != nil { + invalidParams.AddNested("Video", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The CreatePresetResponse structure. +type CreatePresetOutput struct { + _ struct{} `type:"structure"` + + // A section of the response body that provides information about the preset + // that is created. + Preset *Preset `type:"structure"` + + // If the preset settings don't comply with the standards for the video codec + // but Elastic Transcoder created the preset, this message explains the reason + // the preset settings don't meet the standard. Elastic Transcoder created the + // preset because the settings might produce acceptable output. + Warning *string `type:"string"` +} + +// String returns the string representation +func (s CreatePresetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreatePresetOutput) GoString() string { + return s.String() +} + +// The DeletePipelineRequest structure. +type DeletePipelineInput struct { + _ struct{} `type:"structure"` + + // The identifier of the pipeline that you want to delete. + Id *string `location:"uri" locationName:"Id" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeletePipelineInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeletePipelineInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeletePipelineInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeletePipelineInput"} + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The DeletePipelineResponse structure. +type DeletePipelineOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeletePipelineOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeletePipelineOutput) GoString() string { + return s.String() +} + +// The DeletePresetRequest structure. +type DeletePresetInput struct { + _ struct{} `type:"structure"` + + // The identifier of the preset for which you want to get detailed information. + Id *string `location:"uri" locationName:"Id" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeletePresetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeletePresetInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeletePresetInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeletePresetInput"} + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The DeletePresetResponse structure. +type DeletePresetOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeletePresetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeletePresetOutput) GoString() string { + return s.String() +} + +// The detected properties of the input file. Elastic Transcoder identifies +// these values from the input file. +type DetectedProperties struct { + _ struct{} `type:"structure"` + + // The detected duration of the input file, in milliseconds. + DurationMillis *int64 `type:"long"` + + // The detected file size of the input file, in bytes. + FileSize *int64 `type:"long"` + + // The detected frame rate of the input file, in frames per second. + FrameRate *string `type:"string"` + + // The detected height of the input file, in pixels. + Height *int64 `type:"integer"` + + // The detected width of the input file, in pixels. + Width *int64 `type:"integer"` +} + +// String returns the string representation +func (s DetectedProperties) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DetectedProperties) GoString() string { + return s.String() +} + +// The encryption settings, if any, that are used for decrypting your input +// files or encrypting your output files. If your input file is encrypted, you +// must specify the mode that Elastic Transcoder will use to decrypt your file, +// otherwise you must specify the mode you want Elastic Transcoder to use to +// encrypt your output files. +type Encryption struct { + _ struct{} `type:"structure"` + + // The series of random bits created by a random bit generator, unique for every + // encryption operation, that you used to encrypt your input files or that you + // want Elastic Transcoder to use to encrypt your output files. The initialization + // vector must be base64-encoded, and it must be exactly 16 bytes long before + // being base64-encoded. + InitializationVector *string `type:"string"` + + // The data encryption key that you want Elastic Transcoder to use to encrypt + // your output file, or that was used to encrypt your input file. The key must + // be base64-encoded and it must be one of the following bit lengths before + // being base64-encoded: + // + // 128, 192, or 256. + // + // The key must also be encrypted by using the Amazon Key Management Service. + Key *string `type:"string"` + + // The MD5 digest of the key that you used to encrypt your input file, or that + // you want Elastic Transcoder to use to encrypt your output file. Elastic Transcoder + // uses the key digest as a checksum to make sure your key was not corrupted + // in transit. The key MD5 must be base64-encoded, and it must be exactly 16 + // bytes long before being base64-encoded. + KeyMd5 *string `type:"string"` + + // The specific server-side encryption mode that you want Elastic Transcoder + // to use when decrypting your input files or encrypting your output files. + // Elastic Transcoder supports the following options: + // + // S3: Amazon S3 creates and manages the keys used for encrypting your files. + // + // S3-AWS-KMS: Amazon S3 calls the Amazon Key Management Service, which creates + // and manages the keys that are used for encrypting your files. If you specify + // S3-AWS-KMS and you don't want to use the default key, you must add the AWS-KMS + // key that you want to use to your pipeline. + // + // AES-CBC-PKCS7: A padded cipher-block mode of operation originally used for + // HLS files. + // + // AES-CTR: AES Counter Mode. + // + // AES-GCM: AES Galois Counter Mode, a mode of operation that is an authenticated + // encryption format, meaning that a file, key, or initialization vector that + // has been tampered with will fail the decryption process. + // + // For all three AES options, you must provide the following settings, which + // must be base64-encoded: + // + // Key + // + // Key MD5 + // + // Initialization Vector + // + // For the AES modes, your private encryption keys and your unencrypted data + // are never stored by AWS; therefore, it is important that you safely manage + // your encryption keys. If you lose them, you won't be able to unencrypt your + // data. + Mode *string `type:"string"` +} + +// String returns the string representation +func (s Encryption) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Encryption) GoString() string { + return s.String() +} + +// The HLS content protection settings, if any, that you want Elastic Transcoder +// to apply to your output files. +type HlsContentProtection struct { + _ struct{} `type:"structure"` + + // If Elastic Transcoder is generating your key for you, you must leave this + // field blank. + // + // The series of random bits created by a random bit generator, unique for + // every encryption operation, that you want Elastic Transcoder to use to encrypt + // your output files. The initialization vector must be base64-encoded, and + // it must be exactly 16 bytes before being base64-encoded. + InitializationVector *string `type:"string"` + + // If you want Elastic Transcoder to generate a key for you, leave this field + // blank. + // + // If you choose to supply your own key, you must encrypt the key by using + // AWS KMS. The key must be base64-encoded, and it must be one of the following + // bit lengths before being base64-encoded: + // + // 128, 192, or 256. + Key *string `type:"string"` + + // If Elastic Transcoder is generating your key for you, you must leave this + // field blank. + // + // The MD5 digest of the key that you want Elastic Transcoder to use to encrypt + // your output file, and that you want Elastic Transcoder to use as a checksum + // to make sure your key was not corrupted in transit. The key MD5 must be base64-encoded, + // and it must be exactly 16 bytes before being base64- encoded. + KeyMd5 *string `type:"string"` + + // Specify whether you want Elastic Transcoder to write your HLS license key + // to an Amazon S3 bucket. If you choose WithVariantPlaylists, LicenseAcquisitionUrl + // must be left blank and Elastic Transcoder writes your data key into the same + // bucket as the associated playlist. + KeyStoragePolicy *string `type:"string"` + + // The location of the license key required to decrypt your HLS playlist. The + // URL must be an absolute path, and is referenced in the URI attribute of the + // EXT-X-KEY metadata tag in the playlist file. + LicenseAcquisitionUrl *string `type:"string"` + + // The content protection method for your output. The only valid value is: aes-128. + // + // This value will be written into the method attribute of the EXT-X-KEY metadata + // tag in the output playlist. + Method *string `type:"string"` +} + +// String returns the string representation +func (s HlsContentProtection) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HlsContentProtection) GoString() string { + return s.String() +} + +// A section of the response body that provides information about the job that +// is created. +type Job struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) for the job. + Arn *string `type:"string"` + + // The identifier that Elastic Transcoder assigned to the job. You use this + // value to get settings for the job or to delete the job. + Id *string `type:"string"` + + // A section of the request or response body that provides information about + // the file that is being transcoded. + Input *JobInput `type:"structure"` + + // If you specified one output for a job, information about that output. If + // you specified multiple outputs for a job, the Output object lists information + // about the first output. This duplicates the information that is listed for + // the first output in the Outputs object. + // + // Outputs recommended instead. A section of the request or response body that + // provides information about the transcoded (target) file. + Output *JobOutput `type:"structure"` + + // The value, if any, that you want Elastic Transcoder to prepend to the names + // of all files that this job creates, including output files, thumbnails, and + // playlists. We recommend that you add a / or some other delimiter to the end + // of the OutputKeyPrefix. + OutputKeyPrefix *string `min:"1" type:"string"` + + // Information about the output files. We recommend that you use the Outputs + // syntax for all jobs, even when you want Elastic Transcoder to transcode a + // file into only one format. Do not use both the Outputs and Output syntaxes + // in the same request. You can create a maximum of 30 outputs per job. + // + // If you specify more than one output for a job, Elastic Transcoder creates + // the files for each output in the order in which you specify them in the job. + Outputs []*JobOutput `type:"list"` + + // The Id of the pipeline that you want Elastic Transcoder to use for transcoding. + // The pipeline determines several settings, including the Amazon S3 bucket + // from which Elastic Transcoder gets the files to transcode and the bucket + // into which Elastic Transcoder puts the transcoded files. + PipelineId *string `type:"string"` + + // Outputs in Fragmented MP4 or MPEG-TS format only.If you specify a preset + // in PresetId for which the value of Container is fmp4 (Fragmented MP4) or + // ts (MPEG-TS), Playlists contains information about the master playlists that + // you want Elastic Transcoder to create. + // + // The maximum number of master playlists in a job is 30. + Playlists []*Playlist `type:"list"` + + // The status of the job: Submitted, Progressing, Complete, Canceled, or Error. + Status *string `type:"string"` + + // Details about the timing of a job. + Timing *Timing `type:"structure"` + + // User-defined metadata that you want to associate with an Elastic Transcoder + // job. You specify metadata in key/value pairs, and you can add up to 10 key/value + // pairs per job. Elastic Transcoder does not guarantee that key/value pairs + // will be returned in the same order in which you specify them. + // + // Metadata keys and values must use characters from the following list: + // + // 0-9 + // + // A-Z and a-z + // + // Space + // + // The following symbols: _.:/=+-%@ + UserMetadata map[string]*string `type:"map"` +} + +// String returns the string representation +func (s Job) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Job) GoString() string { + return s.String() +} + +// The .jpg or .png file associated with an audio file. +type JobAlbumArt struct { + _ struct{} `type:"structure"` + + // The file to be used as album art. There can be multiple artworks associated + // with an audio file, to a maximum of 20. Valid formats are .jpg and .png + Artwork []*Artwork `type:"list"` + + // A policy that determines how Elastic Transcoder will handle the existence + // of multiple album artwork files. + // + // Replace: The specified album art will replace any existing album art. + // Prepend: The specified album art will be placed in front of any existing + // album art. Append: The specified album art will be placed after any existing + // album art. Fallback: If the original input file contains artwork, Elastic + // Transcoder will use that artwork for the output. If the original input does + // not contain artwork, Elastic Transcoder will use the specified album art + // file. + MergePolicy *string `type:"string"` +} + +// String returns the string representation +func (s JobAlbumArt) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s JobAlbumArt) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *JobAlbumArt) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "JobAlbumArt"} + if s.Artwork != nil { + for i, v := range s.Artwork { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Artwork", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Information about the file that you're transcoding. +type JobInput struct { + _ struct{} `type:"structure"` + + // The aspect ratio of the input file. If you want Elastic Transcoder to automatically + // detect the aspect ratio of the input file, specify auto. If you want to specify + // the aspect ratio for the output file, enter one of the following values: + // + // 1:1, 4:3, 3:2, 16:9 + // + // If you specify a value other than auto, Elastic Transcoder disables automatic + // detection of the aspect ratio. + AspectRatio *string `type:"string"` + + // The container type for the input file. If you want Elastic Transcoder to + // automatically detect the container type of the input file, specify auto. + // If you want to specify the container type for the input file, enter one of + // the following values: + // + // 3gp, aac, asf, avi, divx, flv, m4a, mkv, mov, mp3, mp4, mpeg, mpeg-ps, + // mpeg-ts, mxf, ogg, vob, wav, webm + Container *string `type:"string"` + + // The detected properties of the input file. + DetectedProperties *DetectedProperties `type:"structure"` + + // The encryption settings, if any, that are used for decrypting your input + // files. If your input file is encrypted, you must specify the mode that Elastic + // Transcoder will use to decrypt your file. + Encryption *Encryption `type:"structure"` + + // The frame rate of the input file. If you want Elastic Transcoder to automatically + // detect the frame rate of the input file, specify auto. If you want to specify + // the frame rate for the input file, enter one of the following values: + // + // 10, 15, 23.97, 24, 25, 29.97, 30, 60 + // + // If you specify a value other than auto, Elastic Transcoder disables automatic + // detection of the frame rate. + FrameRate *string `type:"string"` + + // Whether the input file is interlaced. If you want Elastic Transcoder to automatically + // detect whether the input file is interlaced, specify auto. If you want to + // specify whether the input file is interlaced, enter one of the following + // values: + // + // true, false + // + // If you specify a value other than auto, Elastic Transcoder disables automatic + // detection of interlacing. + Interlaced *string `type:"string"` + + // The name of the file to transcode. Elsewhere in the body of the JSON block + // is the the ID of the pipeline to use for processing the job. The InputBucket + // object in that pipeline tells Elastic Transcoder which Amazon S3 bucket to + // get the file from. + // + // If the file name includes a prefix, such as cooking/lasagna.mpg, include + // the prefix in the key. If the file isn't in the specified bucket, Elastic + // Transcoder returns an error. + Key *string `min:"1" type:"string"` + + // This value must be auto, which causes Elastic Transcoder to automatically + // detect the resolution of the input file. + Resolution *string `type:"string"` +} + +// String returns the string representation +func (s JobInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s JobInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *JobInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "JobInput"} + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Outputs recommended instead.If you specified one output for a job, information +// about that output. If you specified multiple outputs for a job, the Output +// object lists information about the first output. This duplicates the information +// that is listed for the first output in the Outputs object. +type JobOutput struct { + _ struct{} `type:"structure"` + + // The album art to be associated with the output file, if any. + AlbumArt *JobAlbumArt `type:"structure"` + + // If Elastic Transcoder used a preset with a ColorSpaceConversionMode to transcode + // the output file, the AppliedColorSpaceConversion parameter shows the conversion + // used. If no ColorSpaceConversionMode was defined in the preset, this parameter + // will not be included in the job response. + AppliedColorSpaceConversion *string `type:"string"` + + // You can configure Elastic Transcoder to transcode captions, or subtitles, + // from one format to another. All captions must be in UTF-8. Elastic Transcoder + // supports two types of captions: + // + // Embedded: Embedded captions are included in the same file as the audio + // and video. Elastic Transcoder supports only one embedded caption per language, + // to a maximum of 300 embedded captions per file. + // + // Valid input values include: CEA-608 (EIA-608, first non-empty channel only), + // CEA-708 (EIA-708, first non-empty channel only), and mov-text + // + // Valid outputs include: mov-text + // + // Elastic Transcoder supports a maximum of one embedded format per output. + // + // Sidecar: Sidecar captions are kept in a separate metadata file from the + // audio and video data. Sidecar captions require a player that is capable of + // understanding the relationship between the video file and the sidecar file. + // Elastic Transcoder supports only one sidecar caption per language, to a maximum + // of 20 sidecar captions per file. + // + // Valid input values include: dfxp (first div element only), ebu-tt, scc, + // smpt, srt, ttml (first div element only), and webvtt + // + // Valid outputs include: dfxp (first div element only), scc, srt, and webvtt. + // + // If you want ttml or smpte-tt compatible captions, specify dfxp as your + // output format. + // + // Elastic Transcoder does not support OCR (Optical Character Recognition), + // does not accept pictures as a valid input for captions, and is not available + // for audio-only transcoding. Elastic Transcoder does not preserve text formatting + // (for example, italics) during the transcoding process. + // + // To remove captions or leave the captions empty, set Captions to null. To + // pass through existing captions unchanged, set the MergePolicy to MergeRetain, + // and pass in a null CaptionSources array. + // + // For more information on embedded files, see the Subtitles Wikipedia page. + // + // For more information on sidecar files, see the Extensible Metadata Platform + // and Sidecar file Wikipedia pages. + Captions *Captions `type:"structure"` + + // You can create an output file that contains an excerpt from the input file. + // This excerpt, called a clip, can come from the beginning, middle, or end + // of the file. The Composition object contains settings for the clips that + // make up an output file. For the current release, you can only specify settings + // for a single clip per output file. The Composition object cannot be null. + Composition []*Clip `type:"list"` + + // Duration of the output file, in seconds. + Duration *int64 `type:"long"` + + // Duration of the output file, in milliseconds. + DurationMillis *int64 `type:"long"` + + // The encryption settings, if any, that you want Elastic Transcoder to apply + // to your output files. If you choose to use encryption, you must specify a + // mode to use. If you choose not to use encryption, Elastic Transcoder will + // write an unencrypted file to your Amazon S3 bucket. + Encryption *Encryption `type:"structure"` + + // File size of the output file, in bytes. + FileSize *int64 `type:"long"` + + // Frame rate of the output file, in frames per second. + FrameRate *string `type:"string"` + + // Height of the output file, in pixels. + Height *int64 `type:"integer"` + + // A sequential counter, starting with 1, that identifies an output among the + // outputs from the current job. In the Output syntax, this value is always + // 1. + Id *string `type:"string"` + + // The name to assign to the transcoded file. Elastic Transcoder saves the file + // in the Amazon S3 bucket specified by the OutputBucket object in the pipeline + // that is specified by the pipeline ID. + Key *string `min:"1" type:"string"` + + // The value of the Id object for the preset that you want to use for this job. + // The preset determines the audio, video, and thumbnail settings that Elastic + // Transcoder uses for transcoding. To use a preset that you created, specify + // the preset ID that Elastic Transcoder returned in the response when you created + // the preset. You can also use the Elastic Transcoder system presets, which + // you can get with ListPresets. + PresetId *string `type:"string"` + + // The number of degrees clockwise by which you want Elastic Transcoder to rotate + // the output relative to the input. Enter one of the following values: + // + // auto, 0, 90, 180, 270 + // + // The value auto generally works only if the file that you're transcoding + // contains rotation metadata. + Rotate *string `type:"string"` + + // (Outputs in Fragmented MP4 or MPEG-TS format only.If you specify a preset + // in PresetId for which the value of Container is fmp4 (Fragmented MP4) or + // ts (MPEG-TS), SegmentDuration is the target maximum duration of each segment + // in seconds. For HLSv3 format playlists, each media segment is stored in a + // separate .ts file. For HLSv4 and Smooth playlists, all media segments for + // an output are stored in a single file. Each segment is approximately the + // length of the SegmentDuration, though individual segments might be shorter + // or longer. + // + // The range of valid values is 1 to 60 seconds. If the duration of the video + // is not evenly divisible by SegmentDuration, the duration of the last segment + // is the remainder of total length/SegmentDuration. + // + // Elastic Transcoder creates an output-specific playlist for each output HLS + // output that you specify in OutputKeys. To add an output to the master playlist + // for this job, include it in the OutputKeys of the associated playlist. + SegmentDuration *string `type:"string"` + + // The status of one output in a job. If you specified only one output for the + // job, Outputs:Status is always the same as Job:Status. If you specified more + // than one output: Job:Status and Outputs:Status for all of the outputs is + // Submitted until Elastic Transcoder starts to process the first output. When + // Elastic Transcoder starts to process the first output, Outputs:Status for + // that output and Job:Status both change to Progressing. For each output, the + // value of Outputs:Status remains Submitted until Elastic Transcoder starts + // to process the output. Job:Status remains Progressing until all of the outputs + // reach a terminal status, either Complete or Error. When all of the outputs + // reach a terminal status, Job:Status changes to Complete only if Outputs:Status + // for all of the outputs is Complete. If Outputs:Status for one or more outputs + // is Error, the terminal status for Job:Status is also Error. The value of + // Status is one of the following: Submitted, Progressing, Complete, Canceled, + // or Error. + Status *string `type:"string"` + + // Information that further explains Status. + StatusDetail *string `type:"string"` + + // The encryption settings, if any, that you want Elastic Transcoder to apply + // to your thumbnail. + ThumbnailEncryption *Encryption `type:"structure"` + + // Whether you want Elastic Transcoder to create thumbnails for your videos + // and, if so, how you want Elastic Transcoder to name the files. + // + // If you don't want Elastic Transcoder to create thumbnails, specify "". + // + // If you do want Elastic Transcoder to create thumbnails, specify the information + // that you want to include in the file name for each thumbnail. You can specify + // the following values in any sequence: + // + // {count} (Required): If you want to create thumbnails, you must include + // {count} in the ThumbnailPattern object. Wherever you specify {count}, Elastic + // Transcoder adds a five-digit sequence number (beginning with 00001) to thumbnail + // file names. The number indicates where a given thumbnail appears in the sequence + // of thumbnails for a transcoded file. + // + // If you specify a literal value and/or {resolution} but you omit {count}, + // Elastic Transcoder returns a validation error and does not create the job. + // Literal values (Optional): You can specify literal values anywhere in + // the ThumbnailPattern object. For example, you can include them as a file + // name prefix or as a delimiter between {resolution} and {count}. + // + // {resolution} (Optional): If you want Elastic Transcoder to include the + // resolution in the file name, include {resolution} in the ThumbnailPattern + // object. + // + // When creating thumbnails, Elastic Transcoder automatically saves the files + // in the format (.jpg or .png) that appears in the preset that you specified + // in the PresetID value of CreateJobOutput. Elastic Transcoder also appends + // the applicable file name extension. + ThumbnailPattern *string `type:"string"` + + // Information about the watermarks that you want Elastic Transcoder to add + // to the video during transcoding. You can specify up to four watermarks for + // each output. Settings for each watermark must be defined in the preset that + // you specify in Preset for the current output. + // + // Watermarks are added to the output video in the sequence in which you list + // them in the job output—the first watermark in the list is added to the output + // video first, the second watermark in the list is added next, and so on. As + // a result, if the settings in a preset cause Elastic Transcoder to place all + // watermarks in the same location, the second watermark that you add will cover + // the first one, the third one will cover the second, and the fourth one will + // cover the third. + Watermarks []*JobWatermark `type:"list"` + + // Specifies the width of the output file in pixels. + Width *int64 `type:"integer"` +} + +// String returns the string representation +func (s JobOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s JobOutput) GoString() string { + return s.String() +} + +// Watermarks can be in .png or .jpg format. If you want to display a watermark +// that is not rectangular, use the .png format, which supports transparency. +type JobWatermark struct { + _ struct{} `type:"structure"` + + // The encryption settings, if any, that you want Elastic Transcoder to apply + // to your watermarks. + Encryption *Encryption `type:"structure"` + + // The name of the .png or .jpg file that you want to use for the watermark. + // To determine which Amazon S3 bucket contains the specified file, Elastic + // Transcoder checks the pipeline specified by Pipeline; the Input Bucket object + // in that pipeline identifies the bucket. + // + // If the file name includes a prefix, for example, logos/128x64.png, include + // the prefix in the key. If the file isn't in the specified bucket, Elastic + // Transcoder returns an error. + InputKey *string `min:"1" type:"string"` + + // The ID of the watermark settings that Elastic Transcoder uses to add watermarks + // to the video during transcoding. The settings are in the preset specified + // by Preset for the current output. In that preset, the value of Watermarks + // Id tells Elastic Transcoder which settings to use. + PresetWatermarkId *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s JobWatermark) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s JobWatermark) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *JobWatermark) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "JobWatermark"} + if s.InputKey != nil && len(*s.InputKey) < 1 { + invalidParams.Add(request.NewErrParamMinLen("InputKey", 1)) + } + if s.PresetWatermarkId != nil && len(*s.PresetWatermarkId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("PresetWatermarkId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The ListJobsByPipelineRequest structure. +type ListJobsByPipelineInput struct { + _ struct{} `type:"structure"` + + // To list jobs in chronological order by the date and time that they were submitted, + // enter true. To list jobs in reverse chronological order, enter false. + Ascending *string `location:"querystring" locationName:"Ascending" type:"string"` + + // When Elastic Transcoder returns more than one page of results, use pageToken + // in subsequent GET requests to get each successive page of results. + PageToken *string `location:"querystring" locationName:"PageToken" type:"string"` + + // The ID of the pipeline for which you want to get job information. + PipelineId *string `location:"uri" locationName:"PipelineId" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListJobsByPipelineInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListJobsByPipelineInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListJobsByPipelineInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListJobsByPipelineInput"} + if s.PipelineId == nil { + invalidParams.Add(request.NewErrParamRequired("PipelineId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The ListJobsByPipelineResponse structure. +type ListJobsByPipelineOutput struct { + _ struct{} `type:"structure"` + + // An array of Job objects that are in the specified pipeline. + Jobs []*Job `type:"list"` + + // A value that you use to access the second and subsequent pages of results, + // if any. When the jobs in the specified pipeline fit on one page or when you've + // reached the last page of results, the value of NextPageToken is null. + NextPageToken *string `type:"string"` +} + +// String returns the string representation +func (s ListJobsByPipelineOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListJobsByPipelineOutput) GoString() string { + return s.String() +} + +// The ListJobsByStatusRequest structure. +type ListJobsByStatusInput struct { + _ struct{} `type:"structure"` + + // To list jobs in chronological order by the date and time that they were submitted, + // enter true. To list jobs in reverse chronological order, enter false. + Ascending *string `location:"querystring" locationName:"Ascending" type:"string"` + + // When Elastic Transcoder returns more than one page of results, use pageToken + // in subsequent GET requests to get each successive page of results. + PageToken *string `location:"querystring" locationName:"PageToken" type:"string"` + + // To get information about all of the jobs associated with the current AWS + // account that have a given status, specify the following status: Submitted, + // Progressing, Complete, Canceled, or Error. + Status *string `location:"uri" locationName:"Status" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListJobsByStatusInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListJobsByStatusInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListJobsByStatusInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListJobsByStatusInput"} + if s.Status == nil { + invalidParams.Add(request.NewErrParamRequired("Status")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The ListJobsByStatusResponse structure. +type ListJobsByStatusOutput struct { + _ struct{} `type:"structure"` + + // An array of Job objects that have the specified status. + Jobs []*Job `type:"list"` + + // A value that you use to access the second and subsequent pages of results, + // if any. When the jobs in the specified pipeline fit on one page or when you've + // reached the last page of results, the value of NextPageToken is null. + NextPageToken *string `type:"string"` +} + +// String returns the string representation +func (s ListJobsByStatusOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListJobsByStatusOutput) GoString() string { + return s.String() +} + +// The ListPipelineRequest structure. +type ListPipelinesInput struct { + _ struct{} `type:"structure"` + + // To list pipelines in chronological order by the date and time that they were + // created, enter true. To list pipelines in reverse chronological order, enter + // false. + Ascending *string `location:"querystring" locationName:"Ascending" type:"string"` + + // When Elastic Transcoder returns more than one page of results, use pageToken + // in subsequent GET requests to get each successive page of results. + PageToken *string `location:"querystring" locationName:"PageToken" type:"string"` +} + +// String returns the string representation +func (s ListPipelinesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListPipelinesInput) GoString() string { + return s.String() +} + +// A list of the pipelines associated with the current AWS account. +type ListPipelinesOutput struct { + _ struct{} `type:"structure"` + + // A value that you use to access the second and subsequent pages of results, + // if any. When the pipelines fit on one page or when you've reached the last + // page of results, the value of NextPageToken is null. + NextPageToken *string `type:"string"` + + // An array of Pipeline objects. + Pipelines []*Pipeline `type:"list"` +} + +// String returns the string representation +func (s ListPipelinesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListPipelinesOutput) GoString() string { + return s.String() +} + +// The ListPresetsRequest structure. +type ListPresetsInput struct { + _ struct{} `type:"structure"` + + // To list presets in chronological order by the date and time that they were + // created, enter true. To list presets in reverse chronological order, enter + // false. + Ascending *string `location:"querystring" locationName:"Ascending" type:"string"` + + // When Elastic Transcoder returns more than one page of results, use pageToken + // in subsequent GET requests to get each successive page of results. + PageToken *string `location:"querystring" locationName:"PageToken" type:"string"` +} + +// String returns the string representation +func (s ListPresetsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListPresetsInput) GoString() string { + return s.String() +} + +// The ListPresetsResponse structure. +type ListPresetsOutput struct { + _ struct{} `type:"structure"` + + // A value that you use to access the second and subsequent pages of results, + // if any. When the presets fit on one page or when you've reached the last + // page of results, the value of NextPageToken is null. + NextPageToken *string `type:"string"` + + // An array of Preset objects. + Presets []*Preset `type:"list"` +} + +// String returns the string representation +func (s ListPresetsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListPresetsOutput) GoString() string { + return s.String() +} + +// The Amazon Simple Notification Service (Amazon SNS) topic or topics to notify +// in order to report job status. +// +// To receive notifications, you must also subscribe to the new topic in the +// Amazon SNS console. +type Notifications struct { + _ struct{} `type:"structure"` + + // The Amazon SNS topic that you want to notify when Elastic Transcoder has + // finished processing the job. + Completed *string `type:"string"` + + // The Amazon SNS topic that you want to notify when Elastic Transcoder encounters + // an error condition. + Error *string `type:"string"` + + // The Amazon Simple Notification Service (Amazon SNS) topic that you want to + // notify when Elastic Transcoder has started to process the job. + Progressing *string `type:"string"` + + // The Amazon SNS topic that you want to notify when Elastic Transcoder encounters + // a warning condition. + Warning *string `type:"string"` +} + +// String returns the string representation +func (s Notifications) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Notifications) GoString() string { + return s.String() +} + +// The Permission structure. +type Permission struct { + _ struct{} `type:"structure"` + + // The permission that you want to give to the AWS user that is listed in Grantee. + // Valid values include: READ: The grantee can read the thumbnails and metadata + // for thumbnails that Elastic Transcoder adds to the Amazon S3 bucket. READ_ACP: + // The grantee can read the object ACL for thumbnails that Elastic Transcoder + // adds to the Amazon S3 bucket. WRITE_ACP: The grantee can write the ACL for + // the thumbnails that Elastic Transcoder adds to the Amazon S3 bucket. FULL_CONTROL: + // The grantee has READ, READ_ACP, and WRITE_ACP permissions for the thumbnails + // that Elastic Transcoder adds to the Amazon S3 bucket. + Access []*string `type:"list"` + + // The AWS user or group that you want to have access to transcoded files and + // playlists. To identify the user or group, you can specify the canonical user + // ID for an AWS account, an origin access identity for a CloudFront distribution, + // the registered email address of an AWS account, or a predefined Amazon S3 + // group. + Grantee *string `min:"1" type:"string"` + + // The type of value that appears in the Grantee object: Canonical: Either + // the canonical user ID for an AWS account or an origin access identity for + // an Amazon CloudFront distribution. A canonical user ID is not the same as + // an AWS account number. Email: The registered email address of an AWS account. + // Group: One of the following predefined Amazon S3 groups: AllUsers, AuthenticatedUsers, + // or LogDelivery. + GranteeType *string `type:"string"` +} + +// String returns the string representation +func (s Permission) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Permission) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Permission) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Permission"} + if s.Grantee != nil && len(*s.Grantee) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Grantee", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The pipeline (queue) that is used to manage jobs. +type Pipeline struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) for the pipeline. + Arn *string `type:"string"` + + // The AWS Key Management Service (AWS KMS) key that you want to use with this + // pipeline. + // + // If you use either S3 or S3-AWS-KMS as your Encryption:Mode, you don't need + // to provide a key with your job because a default key, known as an AWS-KMS + // key, is created for you automatically. You need to provide an AWS-KMS key + // only if you want to use a non-default AWS-KMS key, or if you are using an + // Encryption:Mode of AES-PKCS7, AES-CTR, or AES-GCM. + AwsKmsKeyArn *string `type:"string"` + + // Information about the Amazon S3 bucket in which you want Elastic Transcoder + // to save transcoded files and playlists. Either you specify both ContentConfig + // and ThumbnailConfig, or you specify OutputBucket. + // + // Bucket: The Amazon S3 bucket in which you want Elastic Transcoder to save + // transcoded files and playlists. Permissions: A list of the users and/or + // predefined Amazon S3 groups you want to have access to transcoded files and + // playlists, and the type of access that you want them to have. GranteeType: + // The type of value that appears in the Grantee object: Canonical: Either + // the canonical user ID for an AWS account or an origin access identity for + // an Amazon CloudFront distribution. Email: The registered email address of + // an AWS account. Group: One of the following predefined Amazon S3 groups: + // AllUsers, AuthenticatedUsers, or LogDelivery. Grantee: The AWS user or + // group that you want to have access to transcoded files and playlists. Access: + // The permission that you want to give to the AWS user that is listed in Grantee. + // Valid values include: READ: The grantee can read the objects and metadata + // for objects that Elastic Transcoder adds to the Amazon S3 bucket. READ_ACP: + // The grantee can read the object ACL for objects that Elastic Transcoder adds + // to the Amazon S3 bucket. WRITE_ACP: The grantee can write the ACL for the + // objects that Elastic Transcoder adds to the Amazon S3 bucket. FULL_CONTROL: + // The grantee has READ, READ_ACP, and WRITE_ACP permissions for the objects + // that Elastic Transcoder adds to the Amazon S3 bucket. StorageClass: + // The Amazon S3 storage class, Standard or ReducedRedundancy, that you want + // Elastic Transcoder to assign to the video files and playlists that it stores + // in your Amazon S3 bucket. + ContentConfig *PipelineOutputConfig `type:"structure"` + + // The identifier for the pipeline. You use this value to identify the pipeline + // in which you want to perform a variety of operations, such as creating a + // job or a preset. + Id *string `type:"string"` + + // The Amazon S3 bucket from which Elastic Transcoder gets media files for transcoding + // and the graphics files, if any, that you want to use for watermarks. + InputBucket *string `type:"string"` + + // The name of the pipeline. We recommend that the name be unique within the + // AWS account, but uniqueness is not enforced. + // + // Constraints: Maximum 40 characters + Name *string `min:"1" type:"string"` + + // The Amazon Simple Notification Service (Amazon SNS) topic that you want to + // notify to report job status. + // + // To receive notifications, you must also subscribe to the new topic in the + // Amazon SNS console. Progressing (optional): The Amazon Simple Notification + // Service (Amazon SNS) topic that you want to notify when Elastic Transcoder + // has started to process the job. Completed (optional): The Amazon SNS topic + // that you want to notify when Elastic Transcoder has finished processing the + // job. Warning (optional): The Amazon SNS topic that you want to notify when + // Elastic Transcoder encounters a warning condition. Error (optional): The + // Amazon SNS topic that you want to notify when Elastic Transcoder encounters + // an error condition. + Notifications *Notifications `type:"structure"` + + // The Amazon S3 bucket in which you want Elastic Transcoder to save transcoded + // files, thumbnails, and playlists. Either you specify this value, or you specify + // both ContentConfig and ThumbnailConfig. + OutputBucket *string `type:"string"` + + // The IAM Amazon Resource Name (ARN) for the role that Elastic Transcoder uses + // to transcode jobs for this pipeline. + Role *string `type:"string"` + + // The current status of the pipeline: + // + // Active: The pipeline is processing jobs. Paused: The pipeline is not + // currently processing jobs. + Status *string `type:"string"` + + // Information about the Amazon S3 bucket in which you want Elastic Transcoder + // to save thumbnail files. Either you specify both ContentConfig and ThumbnailConfig, + // or you specify OutputBucket. + // + // Bucket: The Amazon S3 bucket in which you want Elastic Transcoder to save + // thumbnail files. Permissions: A list of the users and/or predefined Amazon + // S3 groups you want to have access to thumbnail files, and the type of access + // that you want them to have. GranteeType: The type of value that appears + // in the Grantee object: Canonical: Either the canonical user ID for an AWS + // account or an origin access identity for an Amazon CloudFront distribution. + // A canonical user ID is not the same as an AWS account number. Email: The + // registered email address of an AWS account. Group: One of the following + // predefined Amazon S3 groups: AllUsers, AuthenticatedUsers, or LogDelivery. + // Grantee: The AWS user or group that you want to have access to thumbnail + // files. Access: The permission that you want to give to the AWS user that + // is listed in Grantee. Valid values include: READ: The grantee can read + // the thumbnails and metadata for thumbnails that Elastic Transcoder adds to + // the Amazon S3 bucket. READ_ACP: The grantee can read the object ACL for + // thumbnails that Elastic Transcoder adds to the Amazon S3 bucket. WRITE_ACP: + // The grantee can write the ACL for the thumbnails that Elastic Transcoder + // adds to the Amazon S3 bucket. FULL_CONTROL: The grantee has READ, READ_ACP, + // and WRITE_ACP permissions for the thumbnails that Elastic Transcoder adds + // to the Amazon S3 bucket. StorageClass: The Amazon S3 storage class, + // Standard or ReducedRedundancy, that you want Elastic Transcoder to assign + // to the thumbnails that it stores in your Amazon S3 bucket. + ThumbnailConfig *PipelineOutputConfig `type:"structure"` +} + +// String returns the string representation +func (s Pipeline) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Pipeline) GoString() string { + return s.String() +} + +// The PipelineOutputConfig structure. +type PipelineOutputConfig struct { + _ struct{} `type:"structure"` + + // The Amazon S3 bucket in which you want Elastic Transcoder to save the transcoded + // files. Specify this value when all of the following are true: You want to + // save transcoded files, thumbnails (if any), and playlists (if any) together + // in one bucket. You do not want to specify the users or groups who have access + // to the transcoded files, thumbnails, and playlists. You do not want to specify + // the permissions that Elastic Transcoder grants to the files. You want to + // associate the transcoded files and thumbnails with the Amazon S3 Standard + // storage class. If you want to save transcoded files and playlists in one + // bucket and thumbnails in another bucket, specify which users can access the + // transcoded files or the permissions the users have, or change the Amazon + // S3 storage class, omit OutputBucket and specify values for ContentConfig + // and ThumbnailConfig instead. + Bucket *string `type:"string"` + + // Optional. The Permissions object specifies which users and/or predefined + // Amazon S3 groups you want to have access to transcoded files and playlists, + // and the type of access you want them to have. You can grant permissions to + // a maximum of 30 users and/or predefined Amazon S3 groups. + // + // If you include Permissions, Elastic Transcoder grants only the permissions + // that you specify. It does not grant full permissions to the owner of the + // role specified by Role. If you want that user to have full control, you must + // explicitly grant full control to the user. + // + // If you omit Permissions, Elastic Transcoder grants full control over the + // transcoded files and playlists to the owner of the role specified by Role, + // and grants no other permissions to any other user or group. + Permissions []*Permission `type:"list"` + + // The Amazon S3 storage class, Standard or ReducedRedundancy, that you want + // Elastic Transcoder to assign to the video files and playlists that it stores + // in your Amazon S3 bucket. + StorageClass *string `type:"string"` +} + +// String returns the string representation +func (s PipelineOutputConfig) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PipelineOutputConfig) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PipelineOutputConfig) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PipelineOutputConfig"} + if s.Permissions != nil { + for i, v := range s.Permissions { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Permissions", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The PlayReady DRM settings, if any, that you want Elastic Transcoder to apply +// to the output files associated with this playlist. +// +// PlayReady DRM encrypts your media files using AES-CTR encryption. +// +// If you use DRM for an HLSv3 playlist, your outputs must have a master playlist. +type PlayReadyDrm struct { + _ struct{} `type:"structure"` + + // The type of DRM, if any, that you want Elastic Transcoder to apply to the + // output files associated with this playlist. + Format *string `type:"string"` + + // The series of random bits created by a random bit generator, unique for every + // encryption operation, that you want Elastic Transcoder to use to encrypt + // your files. The initialization vector must be base64-encoded, and it must + // be exactly 8 bytes long before being base64-encoded. If no initialization + // vector is provided, Elastic Transcoder generates one for you. + InitializationVector *string `type:"string"` + + // The DRM key for your file, provided by your DRM license provider. The key + // must be base64-encoded, and it must be one of the following bit lengths before + // being base64-encoded: + // + // 128, 192, or 256. + // + // The key must also be encrypted by using AWS KMS. + Key *string `type:"string"` + + // The ID for your DRM key, so that your DRM license provider knows which key + // to provide. + // + // The key ID must be provided in big endian, and Elastic Transcoder will convert + // it to little endian before inserting it into the PlayReady DRM headers. If + // you are unsure whether your license server provides your key ID in big or + // little endian, check with your DRM provider. + KeyId *string `type:"string"` + + // The MD5 digest of the key used for DRM on your file, and that you want Elastic + // Transcoder to use as a checksum to make sure your key was not corrupted in + // transit. The key MD5 must be base64-encoded, and it must be exactly 16 bytes + // before being base64-encoded. + KeyMd5 *string `type:"string"` + + // The location of the license key required to play DRM content. The URL must + // be an absolute path, and is referenced by the PlayReady header. The PlayReady + // header is referenced in the protection header of the client manifest for + // Smooth Streaming outputs, and in the EXT-X-DXDRM and EXT-XDXDRMINFO metadata + // tags for HLS playlist outputs. An example URL looks like this: https://www.example.com/exampleKey/ + LicenseAcquisitionUrl *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s PlayReadyDrm) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PlayReadyDrm) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PlayReadyDrm) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PlayReadyDrm"} + if s.LicenseAcquisitionUrl != nil && len(*s.LicenseAcquisitionUrl) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LicenseAcquisitionUrl", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Use Only for Fragmented MP4 or MPEG-TS Outputs. If you specify a preset for +// which the value of Container is fmp4 (Fragmented MP4) or ts (MPEG-TS), Playlists +// contains information about the master playlists that you want Elastic Transcoder +// to create. We recommend that you create only one master playlist per output +// format. The maximum number of master playlists in a job is 30. +type Playlist struct { + _ struct{} `type:"structure"` + + // The format of the output playlist. Valid formats include HLSv3, HLSv4, and + // Smooth. + Format *string `type:"string"` + + // The HLS content protection settings, if any, that you want Elastic Transcoder + // to apply to the output files associated with this playlist. + HlsContentProtection *HlsContentProtection `type:"structure"` + + // The name that you want Elastic Transcoder to assign to the master playlist, + // for example, nyc-vacation.m3u8. If the name includes a / character, the section + // of the name before the last / must be identical for all Name objects. If + // you create more than one master playlist, the values of all Name objects + // must be unique. + // + // Note: Elastic Transcoder automatically appends the relevant file extension + // to the file name (.m3u8 for HLSv3 and HLSv4 playlists, and .ism and .ismc + // for Smooth playlists). If you include a file extension in Name, the file + // name will have two extensions. + Name *string `min:"1" type:"string"` + + // For each output in this job that you want to include in a master playlist, + // the value of the Outputs:Key object. + // + // If your output is not HLS or does not have a segment duration set, the + // name of the output file is a concatenation of OutputKeyPrefix and Outputs:Key: + // + // OutputKeyPrefixOutputs:Key + // + // If your output is HLSv3 and has a segment duration set, or is not included + // in a playlist, Elastic Transcoder creates an output playlist file with a + // file extension of .m3u8, and a series of .ts files that include a five-digit + // sequential counter beginning with 00000: + // + // OutputKeyPrefixOutputs:Key.m3u8 + // + // OutputKeyPrefixOutputs:Key00000.ts + // + // If your output is HLSv4, has a segment duration set, and is included in + // an HLSv4 playlist, Elastic Transcoder creates an output playlist file with + // a file extension of _v4.m3u8. If the output is video, Elastic Transcoder + // also creates an output file with an extension of _iframe.m3u8: + // + // OutputKeyPrefixOutputs:Key_v4.m3u8 + // + // OutputKeyPrefixOutputs:Key_iframe.m3u8 + // + // OutputKeyPrefixOutputs:Key.ts + // + // Elastic Transcoder automatically appends the relevant file extension to + // the file name. If you include a file extension in Output Key, the file name + // will have two extensions. + // + // If you include more than one output in a playlist, any segment duration + // settings, clip settings, or caption settings must be the same for all outputs + // in the playlist. For Smooth playlists, the Audio:Profile, Video:Profile, + // and Video:FrameRate to Video:KeyframesMaxDist ratio must be the same for + // all outputs. + OutputKeys []*string `type:"list"` + + // The DRM settings, if any, that you want Elastic Transcoder to apply to the + // output files associated with this playlist. + PlayReadyDrm *PlayReadyDrm `type:"structure"` + + // The status of the job with which the playlist is associated. + Status *string `type:"string"` + + // Information that further explains the status. + StatusDetail *string `type:"string"` +} + +// String returns the string representation +func (s Playlist) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Playlist) GoString() string { + return s.String() +} + +// Presets are templates that contain most of the settings for transcoding media +// files from one format to another. Elastic Transcoder includes some default +// presets for common formats, for example, several iPod and iPhone versions. +// You can also create your own presets for formats that aren't included among +// the default presets. You specify which preset you want to use when you create +// a job. +type Preset struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) for the preset. + Arn *string `type:"string"` + + // A section of the response body that provides information about the audio + // preset values. + Audio *AudioParameters `type:"structure"` + + // The container type for the output file. Valid values include flac, flv, fmp4, + // gif, mp3, mp4, mpg, mxf, oga, ogg, ts, and webm. + Container *string `type:"string"` + + // A description of the preset. + Description *string `type:"string"` + + // Identifier for the new preset. You use this value to get settings for the + // preset or to delete it. + Id *string `type:"string"` + + // The name of the preset. + Name *string `min:"1" type:"string"` + + // A section of the response body that provides information about the thumbnail + // preset values, if any. + Thumbnails *Thumbnails `type:"structure"` + + // Whether the preset is a default preset provided by Elastic Transcoder (System) + // or a preset that you have defined (Custom). + Type *string `type:"string"` + + // A section of the response body that provides information about the video + // preset values. + Video *VideoParameters `type:"structure"` +} + +// String returns the string representation +func (s Preset) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Preset) GoString() string { + return s.String() +} + +// Settings for the size, location, and opacity of graphics that you want Elastic +// Transcoder to overlay over videos that are transcoded using this preset. +// You can specify settings for up to four watermarks. Watermarks appear in +// the specified size and location, and with the specified opacity for the duration +// of the transcoded video. +// +// Watermarks can be in .png or .jpg format. If you want to display a watermark +// that is not rectangular, use the .png format, which supports transparency. +// +// When you create a job that uses this preset, you specify the .png or .jpg +// graphics that you want Elastic Transcoder to include in the transcoded videos. +// You can specify fewer graphics in the job than you specify watermark settings +// in the preset, which allows you to use the same preset for up to four watermarks +// that have different dimensions. +type PresetWatermark struct { + _ struct{} `type:"structure"` + + // The horizontal position of the watermark unless you specify a non-zero value + // for HorizontalOffset: Left: The left edge of the watermark is aligned with + // the left border of the video. Right: The right edge of the watermark is + // aligned with the right border of the video. Center: The watermark is centered + // between the left and right borders. + HorizontalAlign *string `type:"string"` + + // The amount by which you want the horizontal position of the watermark to + // be offset from the position specified by HorizontalAlign: number of pixels + // (px): The minimum value is 0 pixels, and the maximum value is the value of + // MaxWidth. integer percentage (%): The range of valid values is 0 to 100. + // For example, if you specify Left for HorizontalAlign and 5px for HorizontalOffset, + // the left side of the watermark appears 5 pixels from the left border of the + // output video. + // + // HorizontalOffset is only valid when the value of HorizontalAlign is Left + // or Right. If you specify an offset that causes the watermark to extend beyond + // the left or right border and Elastic Transcoder has not added black bars, + // the watermark is cropped. If Elastic Transcoder has added black bars, the + // watermark extends into the black bars. If the watermark extends beyond the + // black bars, it is cropped. + // + // Use the value of Target to specify whether you want to include the black + // bars that are added by Elastic Transcoder, if any, in the offset calculation. + HorizontalOffset *string `type:"string"` + + // A unique identifier for the settings for one watermark. The value of Id can + // be up to 40 characters long. + Id *string `min:"1" type:"string"` + + // The maximum height of the watermark in one of the following formats: number + // of pixels (px): The minimum value is 16 pixels, and the maximum value is + // the value of MaxHeight. integer percentage (%): The range of valid values + // is 0 to 100. Use the value of Target to specify whether you want Elastic + // Transcoder to include the black bars that are added by Elastic Transcoder, + // if any, in the calculation. If you specify the value in pixels, it must + // be less than or equal to the value of MaxHeight. + MaxHeight *string `type:"string"` + + // The maximum width of the watermark in one of the following formats: number + // of pixels (px): The minimum value is 16 pixels, and the maximum value is + // the value of MaxWidth. integer percentage (%): The range of valid values + // is 0 to 100. Use the value of Target to specify whether you want Elastic + // Transcoder to include the black bars that are added by Elastic Transcoder, + // if any, in the calculation. If you specify the value in pixels, it must be + // less than or equal to the value of MaxWidth. + MaxWidth *string `type:"string"` + + // A percentage that indicates how much you want a watermark to obscure the + // video in the location where it appears. Valid values are 0 (the watermark + // is invisible) to 100 (the watermark completely obscures the video in the + // specified location). The datatype of Opacity is float. + // + // Elastic Transcoder supports transparent .png graphics. If you use a transparent + // .png, the transparent portion of the video appears as if you had specified + // a value of 0 for Opacity. The .jpg file format doesn't support transparency. + Opacity *string `type:"string"` + + // A value that controls scaling of the watermark: Fit: Elastic Transcoder + // scales the watermark so it matches the value that you specified in either + // MaxWidth or MaxHeight without exceeding the other value. Stretch: Elastic + // Transcoder stretches the watermark to match the values that you specified + // for MaxWidth and MaxHeight. If the relative proportions of the watermark + // and the values of MaxWidth and MaxHeight are different, the watermark will + // be distorted. ShrinkToFit: Elastic Transcoder scales the watermark down + // so that its dimensions match the values that you specified for at least one + // of MaxWidth and MaxHeight without exceeding either value. If you specify + // this option, Elastic Transcoder does not scale the watermark up. + SizingPolicy *string `type:"string"` + + // A value that determines how Elastic Transcoder interprets values that you + // specified for HorizontalOffset, VerticalOffset, MaxWidth, and MaxHeight: + // Content: HorizontalOffset and VerticalOffset values are calculated based + // on the borders of the video excluding black bars added by Elastic Transcoder, + // if any. In addition, MaxWidth and MaxHeight, if specified as a percentage, + // are calculated based on the borders of the video excluding black bars added + // by Elastic Transcoder, if any. Frame: HorizontalOffset and VerticalOffset + // values are calculated based on the borders of the video including black bars + // added by Elastic Transcoder, if any. In addition, MaxWidth and MaxHeight, + // if specified as a percentage, are calculated based on the borders of the + // video including black bars added by Elastic Transcoder, if any. + Target *string `type:"string"` + + // The vertical position of the watermark unless you specify a non-zero value + // for VerticalOffset: Top: The top edge of the watermark is aligned with + // the top border of the video. Bottom: The bottom edge of the watermark is + // aligned with the bottom border of the video. Center: The watermark is centered + // between the top and bottom borders. + VerticalAlign *string `type:"string"` + + // VerticalOffset The amount by which you want the vertical position of the + // watermark to be offset from the position specified by VerticalAlign: number + // of pixels (px): The minimum value is 0 pixels, and the maximum value is the + // value of MaxHeight. integer percentage (%): The range of valid values is + // 0 to 100. For example, if you specify Top for VerticalAlign and 5px for + // VerticalOffset, the top of the watermark appears 5 pixels from the top border + // of the output video. + // + // VerticalOffset is only valid when the value of VerticalAlign is Top or Bottom. + // + // If you specify an offset that causes the watermark to extend beyond the + // top or bottom border and Elastic Transcoder has not added black bars, the + // watermark is cropped. If Elastic Transcoder has added black bars, the watermark + // extends into the black bars. If the watermark extends beyond the black bars, + // it is cropped. + // + // Use the value of Target to specify whether you want Elastic Transcoder to + // include the black bars that are added by Elastic Transcoder, if any, in the + // offset calculation. + VerticalOffset *string `type:"string"` +} + +// String returns the string representation +func (s PresetWatermark) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PresetWatermark) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PresetWatermark) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PresetWatermark"} + if s.Id != nil && len(*s.Id) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Id", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The ReadJobRequest structure. +type ReadJobInput struct { + _ struct{} `type:"structure"` + + // The identifier of the job for which you want to get detailed information. + Id *string `location:"uri" locationName:"Id" type:"string" required:"true"` +} + +// String returns the string representation +func (s ReadJobInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReadJobInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ReadJobInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ReadJobInput"} + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The ReadJobResponse structure. +type ReadJobOutput struct { + _ struct{} `type:"structure"` + + // A section of the response body that provides information about the job. + Job *Job `type:"structure"` +} + +// String returns the string representation +func (s ReadJobOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReadJobOutput) GoString() string { + return s.String() +} + +// The ReadPipelineRequest structure. +type ReadPipelineInput struct { + _ struct{} `type:"structure"` + + // The identifier of the pipeline to read. + Id *string `location:"uri" locationName:"Id" type:"string" required:"true"` +} + +// String returns the string representation +func (s ReadPipelineInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReadPipelineInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ReadPipelineInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ReadPipelineInput"} + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The ReadPipelineResponse structure. +type ReadPipelineOutput struct { + _ struct{} `type:"structure"` + + // A section of the response body that provides information about the pipeline. + Pipeline *Pipeline `type:"structure"` + + // Elastic Transcoder returns a warning if the resources used by your pipeline + // are not in the same region as the pipeline. + // + // Using resources in the same region, such as your Amazon S3 buckets, Amazon + // SNS notification topics, and AWS KMS key, reduces processing time and prevents + // cross-regional charges. + Warnings []*Warning `type:"list"` +} + +// String returns the string representation +func (s ReadPipelineOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReadPipelineOutput) GoString() string { + return s.String() +} + +// The ReadPresetRequest structure. +type ReadPresetInput struct { + _ struct{} `type:"structure"` + + // The identifier of the preset for which you want to get detailed information. + Id *string `location:"uri" locationName:"Id" type:"string" required:"true"` +} + +// String returns the string representation +func (s ReadPresetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReadPresetInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ReadPresetInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ReadPresetInput"} + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The ReadPresetResponse structure. +type ReadPresetOutput struct { + _ struct{} `type:"structure"` + + // A section of the response body that provides information about the preset. + Preset *Preset `type:"structure"` +} + +// String returns the string representation +func (s ReadPresetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReadPresetOutput) GoString() string { + return s.String() +} + +// The TestRoleRequest structure. +type TestRoleInput struct { + _ struct{} `type:"structure"` + + // The Amazon S3 bucket that contains media files to be transcoded. The action + // attempts to read from this bucket. + InputBucket *string `type:"string" required:"true"` + + // The Amazon S3 bucket that Elastic Transcoder will write transcoded media + // files to. The action attempts to read from this bucket. + OutputBucket *string `type:"string" required:"true"` + + // The IAM Amazon Resource Name (ARN) for the role that you want Elastic Transcoder + // to test. + Role *string `type:"string" required:"true"` + + // The ARNs of one or more Amazon Simple Notification Service (Amazon SNS) topics + // that you want the action to send a test notification to. + Topics []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s TestRoleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TestRoleInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TestRoleInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TestRoleInput"} + if s.InputBucket == nil { + invalidParams.Add(request.NewErrParamRequired("InputBucket")) + } + if s.OutputBucket == nil { + invalidParams.Add(request.NewErrParamRequired("OutputBucket")) + } + if s.Role == nil { + invalidParams.Add(request.NewErrParamRequired("Role")) + } + if s.Topics == nil { + invalidParams.Add(request.NewErrParamRequired("Topics")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The TestRoleResponse structure. +type TestRoleOutput struct { + _ struct{} `type:"structure"` + + // If the Success element contains false, this value is an array of one or more + // error messages that were generated during the test process. + Messages []*string `type:"list"` + + // If the operation is successful, this value is true; otherwise, the value + // is false. + Success *string `type:"string"` +} + +// String returns the string representation +func (s TestRoleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TestRoleOutput) GoString() string { + return s.String() +} + +// Thumbnails for videos. +type Thumbnails struct { + _ struct{} `type:"structure"` + + // To better control resolution and aspect ratio of thumbnails, we recommend + // that you use the values MaxWidth, MaxHeight, SizingPolicy, and PaddingPolicy + // instead of Resolution and AspectRatio. The two groups of settings are mutually + // exclusive. Do not use them together. + // + // The aspect ratio of thumbnails. Valid values include: + // + // auto, 1:1, 4:3, 3:2, 16:9 + // + // If you specify auto, Elastic Transcoder tries to preserve the aspect ratio + // of the video in the output file. + AspectRatio *string `type:"string"` + + // The format of thumbnails, if any. Valid values are jpg and png. + // + // You specify whether you want Elastic Transcoder to create thumbnails when + // you create a job. + Format *string `type:"string"` + + // The approximate number of seconds between thumbnails. Specify an integer + // value. + Interval *string `type:"string"` + + // The maximum height of thumbnails in pixels. If you specify auto, Elastic + // Transcoder uses 1080 (Full HD) as the default value. If you specify a numeric + // value, enter an even integer between 32 and 3072. + MaxHeight *string `type:"string"` + + // The maximum width of thumbnails in pixels. If you specify auto, Elastic Transcoder + // uses 1920 (Full HD) as the default value. If you specify a numeric value, + // enter an even integer between 32 and 4096. + MaxWidth *string `type:"string"` + + // When you set PaddingPolicy to Pad, Elastic Transcoder may add black bars + // to the top and bottom and/or left and right sides of thumbnails to make the + // total size of the thumbnails match the values that you specified for thumbnail + // MaxWidth and MaxHeight settings. + PaddingPolicy *string `type:"string"` + + // To better control resolution and aspect ratio of thumbnails, we recommend + // that you use the values MaxWidth, MaxHeight, SizingPolicy, and PaddingPolicy + // instead of Resolution and AspectRatio. The two groups of settings are mutually + // exclusive. Do not use them together. + // + // The width and height of thumbnail files in pixels. Specify a value in the + // format width x height where both values are even integers. The values cannot + // exceed the width and height that you specified in the Video:Resolution object. + Resolution *string `type:"string"` + + // Specify one of the following values to control scaling of thumbnails: + // + // Fit: Elastic Transcoder scales thumbnails so they match the value that + // you specified in thumbnail MaxWidth or MaxHeight settings without exceeding + // the other value. Fill: Elastic Transcoder scales thumbnails so they match + // the value that you specified in thumbnail MaxWidth or MaxHeight settings + // and matches or exceeds the other value. Elastic Transcoder centers the image + // in thumbnails and then crops in the dimension (if any) that exceeds the maximum + // value. Stretch: Elastic Transcoder stretches thumbnails to match the values + // that you specified for thumbnail MaxWidth and MaxHeight settings. If the + // relative proportions of the input video and thumbnails are different, the + // thumbnails will be distorted. Keep: Elastic Transcoder does not scale thumbnails. + // If either dimension of the input video exceeds the values that you specified + // for thumbnail MaxWidth and MaxHeight settings, Elastic Transcoder crops the + // thumbnails. ShrinkToFit: Elastic Transcoder scales thumbnails down so that + // their dimensions match the values that you specified for at least one of + // thumbnail MaxWidth and MaxHeight without exceeding either value. If you specify + // this option, Elastic Transcoder does not scale thumbnails up. ShrinkToFill: + // Elastic Transcoder scales thumbnails down so that their dimensions match + // the values that you specified for at least one of MaxWidth and MaxHeight + // without dropping below either value. If you specify this option, Elastic + // Transcoder does not scale thumbnails up. + SizingPolicy *string `type:"string"` +} + +// String returns the string representation +func (s Thumbnails) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Thumbnails) GoString() string { + return s.String() +} + +// Settings that determine when a clip begins and how long it lasts. +type TimeSpan struct { + _ struct{} `type:"structure"` + + // The duration of the clip. The format can be either HH:mm:ss.SSS (maximum + // value: 23:59:59.999; SSS is thousandths of a second) or sssss.SSS (maximum + // value: 86399.999). If you don't specify a value, Elastic Transcoder creates + // an output file from StartTime to the end of the file. + // + // If you specify a value longer than the duration of the input file, Elastic + // Transcoder transcodes the file and returns a warning message. + Duration *string `type:"string"` + + // The place in the input file where you want a clip to start. The format can + // be either HH:mm:ss.SSS (maximum value: 23:59:59.999; SSS is thousandths of + // a second) or sssss.SSS (maximum value: 86399.999). If you don't specify a + // value, Elastic Transcoder starts at the beginning of the input file. + StartTime *string `type:"string"` +} + +// String returns the string representation +func (s TimeSpan) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TimeSpan) GoString() string { + return s.String() +} + +// Details about the timing of a job. +type Timing struct { + _ struct{} `type:"structure"` + + // The time the job finished transcoding, in epoch milliseconds. + FinishTimeMillis *int64 `type:"long"` + + // The time the job began transcoding, in epoch milliseconds. + StartTimeMillis *int64 `type:"long"` + + // The time the job was submitted to Elastic Transcoder, in epoch milliseconds. + SubmitTimeMillis *int64 `type:"long"` +} + +// String returns the string representation +func (s Timing) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Timing) GoString() string { + return s.String() +} + +// The UpdatePipelineRequest structure. +type UpdatePipelineInput struct { + _ struct{} `type:"structure"` + + // The AWS Key Management Service (AWS KMS) key that you want to use with this + // pipeline. + // + // If you use either S3 or S3-AWS-KMS as your Encryption:Mode, you don't need + // to provide a key with your job because a default key, known as an AWS-KMS + // key, is created for you automatically. You need to provide an AWS-KMS key + // only if you want to use a non-default AWS-KMS key, or if you are using an + // Encryption:Mode of AES-PKCS7, AES-CTR, or AES-GCM. + AwsKmsKeyArn *string `type:"string"` + + // The optional ContentConfig object specifies information about the Amazon + // S3 bucket in which you want Elastic Transcoder to save transcoded files and + // playlists: which bucket to use, which users you want to have access to the + // files, the type of access you want users to have, and the storage class that + // you want to assign to the files. + // + // If you specify values for ContentConfig, you must also specify values for + // ThumbnailConfig. + // + // If you specify values for ContentConfig and ThumbnailConfig, omit the OutputBucket + // object. + // + // Bucket: The Amazon S3 bucket in which you want Elastic Transcoder to save + // transcoded files and playlists. Permissions (Optional): The Permissions + // object specifies which users you want to have access to transcoded files + // and the type of access you want them to have. You can grant permissions to + // a maximum of 30 users and/or predefined Amazon S3 groups. Grantee Type: + // Specify the type of value that appears in the Grantee object: Canonical: + // The value in the Grantee object is either the canonical user ID for an AWS + // account or an origin access identity for an Amazon CloudFront distribution. + // For more information about canonical user IDs, see Access Control List (ACL) + // Overview in the Amazon Simple Storage Service Developer Guide. For more information + // about using CloudFront origin access identities to require that users use + // CloudFront URLs instead of Amazon S3 URLs, see Using an Origin Access Identity + // to Restrict Access to Your Amazon S3 Content. A canonical user ID is not + // the same as an AWS account number. Email: The value in the Grantee object + // is the registered email address of an AWS account. Group: The value in the + // Grantee object is one of the following predefined Amazon S3 groups: AllUsers, + // AuthenticatedUsers, or LogDelivery. Grantee: The AWS user or group that + // you want to have access to transcoded files and playlists. To identify the + // user or group, you can specify the canonical user ID for an AWS account, + // an origin access identity for a CloudFront distribution, the registered email + // address of an AWS account, or a predefined Amazon S3 group Access: The + // permission that you want to give to the AWS user that you specified in Grantee. + // Permissions are granted on the files that Elastic Transcoder adds to the + // bucket, including playlists and video files. Valid values include: READ: + // The grantee can read the objects and metadata for objects that Elastic Transcoder + // adds to the Amazon S3 bucket. READ_ACP: The grantee can read the object + // ACL for objects that Elastic Transcoder adds to the Amazon S3 bucket. WRITE_ACP: + // The grantee can write the ACL for the objects that Elastic Transcoder adds + // to the Amazon S3 bucket. FULL_CONTROL: The grantee has READ, READ_ACP, and + // WRITE_ACP permissions for the objects that Elastic Transcoder adds to the + // Amazon S3 bucket. StorageClass: The Amazon S3 storage class, Standard + // or ReducedRedundancy, that you want Elastic Transcoder to assign to the video + // files and playlists that it stores in your Amazon S3 bucket. + ContentConfig *PipelineOutputConfig `type:"structure"` + + // The ID of the pipeline that you want to update. + Id *string `location:"uri" locationName:"Id" type:"string" required:"true"` + + // The Amazon S3 bucket in which you saved the media files that you want to + // transcode and the graphics that you want to use as watermarks. + InputBucket *string `type:"string"` + + // The name of the pipeline. We recommend that the name be unique within the + // AWS account, but uniqueness is not enforced. + // + // Constraints: Maximum 40 characters + Name *string `min:"1" type:"string"` + + // The Amazon Simple Notification Service (Amazon SNS) topic or topics to notify + // in order to report job status. + // + // To receive notifications, you must also subscribe to the new topic in the + // Amazon SNS console. + Notifications *Notifications `type:"structure"` + + // The IAM Amazon Resource Name (ARN) for the role that you want Elastic Transcoder + // to use to transcode jobs for this pipeline. + Role *string `type:"string"` + + // The ThumbnailConfig object specifies several values, including the Amazon + // S3 bucket in which you want Elastic Transcoder to save thumbnail files, which + // users you want to have access to the files, the type of access you want users + // to have, and the storage class that you want to assign to the files. + // + // If you specify values for ContentConfig, you must also specify values for + // ThumbnailConfig even if you don't want to create thumbnails. + // + // If you specify values for ContentConfig and ThumbnailConfig, omit the OutputBucket + // object. + // + // Bucket: The Amazon S3 bucket in which you want Elastic Transcoder to save + // thumbnail files. Permissions (Optional): The Permissions object specifies + // which users and/or predefined Amazon S3 groups you want to have access to + // thumbnail files, and the type of access you want them to have. You can grant + // permissions to a maximum of 30 users and/or predefined Amazon S3 groups. + // GranteeType: Specify the type of value that appears in the Grantee object: + // Canonical: The value in the Grantee object is either the canonical user + // ID for an AWS account or an origin access identity for an Amazon CloudFront + // distribution. A canonical user ID is not the same as an AWS account number. + // Email: The value in the Grantee object is the registered email address + // of an AWS account. Group: The value in the Grantee object is one of the + // following predefined Amazon S3 groups: AllUsers, AuthenticatedUsers, or LogDelivery. + // Grantee: The AWS user or group that you want to have access to thumbnail + // files. To identify the user or group, you can specify the canonical user + // ID for an AWS account, an origin access identity for a CloudFront distribution, + // the registered email address of an AWS account, or a predefined Amazon S3 + // group. Access: The permission that you want to give to the AWS user that + // you specified in Grantee. Permissions are granted on the thumbnail files + // that Elastic Transcoder adds to the bucket. Valid values include: READ: + // The grantee can read the thumbnails and metadata for objects that Elastic + // Transcoder adds to the Amazon S3 bucket. READ_ACP: The grantee can read + // the object ACL for thumbnails that Elastic Transcoder adds to the Amazon + // S3 bucket. WRITE_ACP: The grantee can write the ACL for the thumbnails + // that Elastic Transcoder adds to the Amazon S3 bucket. FULL_CONTROL: The + // grantee has READ, READ_ACP, and WRITE_ACP permissions for the thumbnails + // that Elastic Transcoder adds to the Amazon S3 bucket. StorageClass: The + // Amazon S3 storage class, Standard or ReducedRedundancy, that you want Elastic + // Transcoder to assign to the thumbnails that it stores in your Amazon S3 bucket. + ThumbnailConfig *PipelineOutputConfig `type:"structure"` +} + +// String returns the string representation +func (s UpdatePipelineInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdatePipelineInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdatePipelineInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdatePipelineInput"} + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + if s.ContentConfig != nil { + if err := s.ContentConfig.Validate(); err != nil { + invalidParams.AddNested("ContentConfig", err.(request.ErrInvalidParams)) + } + } + if s.ThumbnailConfig != nil { + if err := s.ThumbnailConfig.Validate(); err != nil { + invalidParams.AddNested("ThumbnailConfig", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The UpdatePipelineNotificationsRequest structure. +type UpdatePipelineNotificationsInput struct { + _ struct{} `type:"structure"` + + // The identifier of the pipeline for which you want to change notification + // settings. + Id *string `location:"uri" locationName:"Id" type:"string" required:"true"` + + // The topic ARN for the Amazon Simple Notification Service (Amazon SNS) topic + // that you want to notify to report job status. + // + // To receive notifications, you must also subscribe to the new topic in the + // Amazon SNS console. Progressing: The topic ARN for the Amazon Simple Notification + // Service (Amazon SNS) topic that you want to notify when Elastic Transcoder + // has started to process jobs that are added to this pipeline. This is the + // ARN that Amazon SNS returned when you created the topic. Completed: The + // topic ARN for the Amazon SNS topic that you want to notify when Elastic Transcoder + // has finished processing a job. This is the ARN that Amazon SNS returned when + // you created the topic. Warning: The topic ARN for the Amazon SNS topic that + // you want to notify when Elastic Transcoder encounters a warning condition. + // This is the ARN that Amazon SNS returned when you created the topic. Error: + // The topic ARN for the Amazon SNS topic that you want to notify when Elastic + // Transcoder encounters an error condition. This is the ARN that Amazon SNS + // returned when you created the topic. + Notifications *Notifications `type:"structure" required:"true"` +} + +// String returns the string representation +func (s UpdatePipelineNotificationsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdatePipelineNotificationsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdatePipelineNotificationsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdatePipelineNotificationsInput"} + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + if s.Notifications == nil { + invalidParams.Add(request.NewErrParamRequired("Notifications")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The UpdatePipelineNotificationsResponse structure. +type UpdatePipelineNotificationsOutput struct { + _ struct{} `type:"structure"` + + // A section of the response body that provides information about the pipeline. + Pipeline *Pipeline `type:"structure"` +} + +// String returns the string representation +func (s UpdatePipelineNotificationsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdatePipelineNotificationsOutput) GoString() string { + return s.String() +} + +// When you update a pipeline, Elastic Transcoder returns the values that you +// specified in the request. +type UpdatePipelineOutput struct { + _ struct{} `type:"structure"` + + // The pipeline (queue) that is used to manage jobs. + Pipeline *Pipeline `type:"structure"` + + // Elastic Transcoder returns a warning if the resources used by your pipeline + // are not in the same region as the pipeline. + // + // Using resources in the same region, such as your Amazon S3 buckets, Amazon + // SNS notification topics, and AWS KMS key, reduces processing time and prevents + // cross-regional charges. + Warnings []*Warning `type:"list"` +} + +// String returns the string representation +func (s UpdatePipelineOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdatePipelineOutput) GoString() string { + return s.String() +} + +// The UpdatePipelineStatusRequest structure. +type UpdatePipelineStatusInput struct { + _ struct{} `type:"structure"` + + // The identifier of the pipeline to update. + Id *string `location:"uri" locationName:"Id" type:"string" required:"true"` + + // The desired status of the pipeline: + // + // Active: The pipeline is processing jobs. Paused: The pipeline is not + // currently processing jobs. + Status *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdatePipelineStatusInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdatePipelineStatusInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdatePipelineStatusInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdatePipelineStatusInput"} + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + if s.Status == nil { + invalidParams.Add(request.NewErrParamRequired("Status")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// When you update status for a pipeline, Elastic Transcoder returns the values +// that you specified in the request. +type UpdatePipelineStatusOutput struct { + _ struct{} `type:"structure"` + + // A section of the response body that provides information about the pipeline. + Pipeline *Pipeline `type:"structure"` +} + +// String returns the string representation +func (s UpdatePipelineStatusOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdatePipelineStatusOutput) GoString() string { + return s.String() +} + +// The VideoParameters structure. +type VideoParameters struct { + _ struct{} `type:"structure"` + + // To better control resolution and aspect ratio of output videos, we recommend + // that you use the values MaxWidth, MaxHeight, SizingPolicy, PaddingPolicy, + // and DisplayAspectRatio instead of Resolution and AspectRatio. The two groups + // of settings are mutually exclusive. Do not use them together. + // + // The display aspect ratio of the video in the output file. Valid values + // include: + // + // auto, 1:1, 4:3, 3:2, 16:9 + // + // If you specify auto, Elastic Transcoder tries to preserve the aspect ratio + // of the input file. + // + // If you specify an aspect ratio for the output file that differs from aspect + // ratio of the input file, Elastic Transcoder adds pillarboxing (black bars + // on the sides) or letterboxing (black bars on the top and bottom) to maintain + // the aspect ratio of the active region of the video. + AspectRatio *string `type:"string"` + + // The bit rate of the video stream in the output file, in kilobits/second. + // Valid values depend on the values of Level and Profile. If you specify auto, + // Elastic Transcoder uses the detected bit rate of the input source. If you + // specify a value other than auto, we recommend that you specify a value less + // than or equal to the maximum H.264-compliant value listed for your level + // and profile: + // + // Level - Maximum video bit rate in kilobits/second (baseline and main Profile) + // : maximum video bit rate in kilobits/second (high Profile) + // + // 1 - 64 : 80 1b - 128 : 160 1.1 - 192 : 240 1.2 - 384 : 480 1.3 - 768 : + // 960 2 - 2000 : 2500 3 - 10000 : 12500 3.1 - 14000 : 17500 3.2 - 20000 : 25000 + // 4 - 20000 : 25000 4.1 - 50000 : 62500 + BitRate *string `type:"string"` + + // The video codec for the output file. Valid values include gif, H.264, mpeg2, + // and vp8. You can only specify vp8 when the container type is webm, gif when + // the container type is gif, and mpeg2 when the container type is mpg. + Codec *string `type:"string"` + + // Profile (H.264/VP8 Only) + // + // The H.264 profile that you want to use for the output file. Elastic Transcoder + // supports the following profiles: + // + // baseline: The profile most commonly used for videoconferencing and for + // mobile applications. main: The profile used for standard-definition digital + // TV broadcasts. high: The profile used for high-definition digital TV broadcasts + // and for Blu-ray discs. Level (H.264 Only) + // + // The H.264 level that you want to use for the output file. Elastic Transcoder + // supports the following levels: + // + // 1, 1b, 1.1, 1.2, 1.3, 2, 2.1, 2.2, 3, 3.1, 3.2, 4, 4.1 + // + // MaxReferenceFrames (H.264 Only) + // + // Applicable only when the value of Video:Codec is H.264. The maximum number + // of previously decoded frames to use as a reference for decoding future frames. + // Valid values are integers 0 through 16, but we recommend that you not use + // a value greater than the following: + // + // Min(Floor(Maximum decoded picture buffer in macroblocks * 256 / (Width + // in pixels * Height in pixels)), 16) + // + // where Width in pixels and Height in pixels represent either MaxWidth and + // MaxHeight, or Resolution. Maximum decoded picture buffer in macroblocks depends + // on the value of the Level object. See the list below. (A macroblock is a + // block of pixels measuring 16x16.) + // + // 1 - 396 1b - 396 1.1 - 900 1.2 - 2376 1.3 - 2376 2 - 2376 2.1 - 4752 2.2 + // - 8100 3 - 8100 3.1 - 18000 3.2 - 20480 4 - 32768 4.1 - 32768 MaxBitRate + // (Optional, H.264/MPEG2/VP8 only) + // + // The maximum number of bits per second in a video buffer; the size of the + // buffer is specified by BufferSize. Specify a value between 16 and 62,500. + // You can reduce the bandwidth required to stream a video by reducing the maximum + // bit rate, but this also reduces the quality of the video. + // + // BufferSize (Optional, H.264/MPEG2/VP8 only) + // + // The maximum number of bits in any x seconds of the output video. This window + // is commonly 10 seconds, the standard segment duration when you're using FMP4 + // or MPEG-TS for the container type of the output video. Specify an integer + // greater than 0. If you specify MaxBitRate and omit BufferSize, Elastic Transcoder + // sets BufferSize to 10 times the value of MaxBitRate. + // + // InterlacedMode (Optional, H.264/MPEG2 Only) + // + // The interlace mode for the output video. + // + // Interlaced video is used to double the perceived frame rate for a video + // by interlacing two fields (one field on every other line, the other field + // on the other lines) so that the human eye registers multiple pictures per + // frame. Interlacing reduces the bandwidth required for transmitting a video, + // but can result in blurred images and flickering. + // + // Valid values include Progressive (no interlacing, top to bottom), TopFirst + // (top field first), BottomFirst (bottom field first), and Auto. + // + // If InterlaceMode is not specified, Elastic Transcoder uses Progressive for + // the output. If Auto is specified, Elastic Transcoder interlaces the output. + // + // ColorSpaceConversionMode (Optional, H.264/MPEG2 Only) + // + // The color space conversion Elastic Transcoder applies to the output video. + // Color spaces are the algorithms used by the computer to store information + // about how to render color. Bt.601 is the standard for standard definition + // video, while Bt.709 is the standard for high definition video. + // + // Valid values include None, Bt709toBt601, Bt601toBt709, and Auto. + // + // If you chose Auto for ColorSpaceConversionMode and your output is interlaced, + // your frame rate is one of 23.97, 24, 25, 29.97, 50, or 60, your SegmentDuration + // is null, and you are using one of the resolution changes from the list below, + // Elastic Transcoder applies the following color space conversions: + // + // Standard to HD, 720x480 to 1920x1080 - Elastic Transcoder applies Bt601ToBt709 + // Standard to HD, 720x576 to 1920x1080 - Elastic Transcoder applies Bt601ToBt709 + // HD to Standard, 1920x1080 to 720x480 - Elastic Transcoder applies Bt709ToBt601 + // HD to Standard, 1920x1080 to 720x576 - Elastic Transcoder applies Bt709ToBt601 + // Elastic Transcoder may change the behavior of the ColorspaceConversionMode + // Auto mode in the future. All outputs in a playlist must use the same ColorSpaceConversionMode. + // If you do not specify a ColorSpaceConversionMode, Elastic Transcoder does + // not change the color space of a file. If you are unsure what ColorSpaceConversionMode + // was applied to your output file, you can check the AppliedColorSpaceConversion + // parameter included in your job response. If your job does not have an AppliedColorSpaceConversion + // in its response, no ColorSpaceConversionMode was applied. + // + // ChromaSubsampling + // + // The sampling pattern for the chroma (color) channels of the output video. + // Valid values include yuv420p and yuv422p. + // + // yuv420p samples the chroma information of every other horizontal and every + // other vertical line, yuv422p samples the color information of every horizontal + // line and every other vertical line. + // + // LoopCount (Gif Only) + // + // The number of times you want the output gif to loop. Valid values include + // Infinite and integers between 0 and 100, inclusive. + CodecOptions map[string]*string `type:"map"` + + // The value that Elastic Transcoder adds to the metadata in the output file. + DisplayAspectRatio *string `type:"string"` + + // Applicable only when the value of Video:Codec is one of H.264, MPEG2, or + // VP8. + // + // Whether to use a fixed value for FixedGOP. Valid values are true and false: + // + // true: Elastic Transcoder uses the value of KeyframesMaxDist for the distance + // between key frames (the number of frames in a group of pictures, or GOP). + // false: The distance between key frames can vary. FixedGOP must be set to + // true for fmp4 containers. + FixedGOP *string `type:"string"` + + // The frames per second for the video stream in the output file. Valid values + // include: + // + // auto, 10, 15, 23.97, 24, 25, 29.97, 30, 60 + // + // If you specify auto, Elastic Transcoder uses the detected frame rate of + // the input source. If you specify a frame rate, we recommend that you perform + // the following calculation: + // + // Frame rate = maximum recommended decoding speed in luma samples/second + // / (width in pixels * height in pixels) + // + // where: + // + // width in pixels and height in pixels represent the Resolution of the output + // video. maximum recommended decoding speed in Luma samples/second is less + // than or equal to the maximum value listed in the following table, based on + // the value that you specified for Level. The maximum recommended decoding + // speed in Luma samples/second for each level is described in the following + // list (Level - Decoding speed): + // + // 1 - 380160 1b - 380160 1.1 - 76800 1.2 - 1536000 1.3 - 3041280 2 - 3041280 + // 2.1 - 5068800 2.2 - 5184000 3 - 10368000 3.1 - 27648000 3.2 - 55296000 4 + // - 62914560 4.1 - 62914560 + FrameRate *string `type:"string"` + + // Applicable only when the value of Video:Codec is one of H.264, MPEG2, or + // VP8. + // + // The maximum number of frames between key frames. Key frames are fully encoded + // frames; the frames between key frames are encoded based, in part, on the + // content of the key frames. The value is an integer formatted as a string; + // valid values are between 1 (every frame is a key frame) and 100000, inclusive. + // A higher value results in higher compression but may also discernibly decrease + // video quality. + // + // For Smooth outputs, the FrameRate must have a constant ratio to the KeyframesMaxDist. + // This allows Smooth playlists to switch between different quality levels while + // the file is being played. + // + // For example, an input file can have a FrameRate of 30 with a KeyframesMaxDist + // of 90. The output file then needs to have a ratio of 1:3. Valid outputs would + // have FrameRate of 30, 25, and 10, and KeyframesMaxDist of 90, 75, and 30, + // respectively. + // + // Alternately, this can be achieved by setting FrameRate to auto and having + // the same values for MaxFrameRate and KeyframesMaxDist. + KeyframesMaxDist *string `type:"string"` + + // If you specify auto for FrameRate, Elastic Transcoder uses the frame rate + // of the input video for the frame rate of the output video. Specify the maximum + // frame rate that you want Elastic Transcoder to use when the frame rate of + // the input video is greater than the desired maximum frame rate of the output + // video. Valid values include: 10, 15, 23.97, 24, 25, 29.97, 30, 60. + MaxFrameRate *string `type:"string"` + + // The maximum height of the output video in pixels. If you specify auto, Elastic + // Transcoder uses 1080 (Full HD) as the default value. If you specify a numeric + // value, enter an even integer between 96 and 3072. + MaxHeight *string `type:"string"` + + // The maximum width of the output video in pixels. If you specify auto, Elastic + // Transcoder uses 1920 (Full HD) as the default value. If you specify a numeric + // value, enter an even integer between 128 and 4096. + MaxWidth *string `type:"string"` + + // When you set PaddingPolicy to Pad, Elastic Transcoder may add black bars + // to the top and bottom and/or left and right sides of the output video to + // make the total size of the output video match the values that you specified + // for MaxWidth and MaxHeight. + PaddingPolicy *string `type:"string"` + + // To better control resolution and aspect ratio of output videos, we recommend + // that you use the values MaxWidth, MaxHeight, SizingPolicy, PaddingPolicy, + // and DisplayAspectRatio instead of Resolution and AspectRatio. The two groups + // of settings are mutually exclusive. Do not use them together. + // + // The width and height of the video in the output file, in pixels. Valid + // values are auto and width x height: + // + // auto: Elastic Transcoder attempts to preserve the width and height of + // the input file, subject to the following rules. width x height: The width + // and height of the output video in pixels. Note the following about specifying + // the width and height: + // + // The width must be an even integer between 128 and 4096, inclusive. The + // height must be an even integer between 96 and 3072, inclusive. If you specify + // a resolution that is less than the resolution of the input file, Elastic + // Transcoder rescales the output file to the lower resolution. If you specify + // a resolution that is greater than the resolution of the input file, Elastic + // Transcoder rescales the output to the higher resolution. We recommend that + // you specify a resolution for which the product of width and height is less + // than or equal to the applicable value in the following list (List - Max width + // x height value): 1 - 25344 1b - 25344 1.1 - 101376 1.2 - 101376 1.3 - 101376 + // 2 - 101376 2.1 - 202752 2.2 - 404720 3 - 404720 3.1 - 921600 3.2 - 1310720 + // 4 - 2097152 4.1 - 2097152 + Resolution *string `type:"string"` + + // Specify one of the following values to control scaling of the output video: + // + // Fit: Elastic Transcoder scales the output video so it matches the value + // that you specified in either MaxWidth or MaxHeight without exceeding the + // other value. Fill: Elastic Transcoder scales the output video so it matches + // the value that you specified in either MaxWidth or MaxHeight and matches + // or exceeds the other value. Elastic Transcoder centers the output video and + // then crops it in the dimension (if any) that exceeds the maximum value. + // Stretch: Elastic Transcoder stretches the output video to match the values + // that you specified for MaxWidth and MaxHeight. If the relative proportions + // of the input video and the output video are different, the output video will + // be distorted. Keep: Elastic Transcoder does not scale the output video. + // If either dimension of the input video exceeds the values that you specified + // for MaxWidth and MaxHeight, Elastic Transcoder crops the output video. ShrinkToFit: + // Elastic Transcoder scales the output video down so that its dimensions match + // the values that you specified for at least one of MaxWidth and MaxHeight + // without exceeding either value. If you specify this option, Elastic Transcoder + // does not scale the video up. ShrinkToFill: Elastic Transcoder scales the + // output video down so that its dimensions match the values that you specified + // for at least one of MaxWidth and MaxHeight without dropping below either + // value. If you specify this option, Elastic Transcoder does not scale the + // video up. + SizingPolicy *string `type:"string"` + + // Settings for the size, location, and opacity of graphics that you want Elastic + // Transcoder to overlay over videos that are transcoded using this preset. + // You can specify settings for up to four watermarks. Watermarks appear in + // the specified size and location, and with the specified opacity for the duration + // of the transcoded video. + // + // Watermarks can be in .png or .jpg format. If you want to display a watermark + // that is not rectangular, use the .png format, which supports transparency. + // + // When you create a job that uses this preset, you specify the .png or .jpg + // graphics that you want Elastic Transcoder to include in the transcoded videos. + // You can specify fewer graphics in the job than you specify watermark settings + // in the preset, which allows you to use the same preset for up to four watermarks + // that have different dimensions. + Watermarks []*PresetWatermark `type:"list"` +} + +// String returns the string representation +func (s VideoParameters) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VideoParameters) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *VideoParameters) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "VideoParameters"} + if s.Watermarks != nil { + for i, v := range s.Watermarks { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Watermarks", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Elastic Transcoder returns a warning if the resources used by your pipeline +// are not in the same region as the pipeline. +// +// Using resources in the same region, such as your Amazon S3 buckets, Amazon +// SNS notification topics, and AWS KMS key, reduces processing time and prevents +// cross-regional charges. +type Warning struct { + _ struct{} `type:"structure"` + + // The code of the cross-regional warning. + Code *string `type:"string"` + + // The message explaining what resources are in a different region from the + // pipeline. + // + // Note: AWS KMS keys must be in the same region as the pipeline. + Message *string `type:"string"` +} + +// String returns the string representation +func (s Warning) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Warning) GoString() string { + return s.String() +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/elastictranscoder/elastictranscoderiface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/elastictranscoder/elastictranscoderiface/interface.go new file mode 100644 index 000000000..c1526c5ee --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/elastictranscoder/elastictranscoderiface/interface.go @@ -0,0 +1,90 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package elastictranscoderiface provides an interface for the Amazon Elastic Transcoder. +package elastictranscoderiface + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/elastictranscoder" +) + +// ElasticTranscoderAPI is the interface type for elastictranscoder.ElasticTranscoder. +type ElasticTranscoderAPI interface { + CancelJobRequest(*elastictranscoder.CancelJobInput) (*request.Request, *elastictranscoder.CancelJobOutput) + + CancelJob(*elastictranscoder.CancelJobInput) (*elastictranscoder.CancelJobOutput, error) + + CreateJobRequest(*elastictranscoder.CreateJobInput) (*request.Request, *elastictranscoder.CreateJobResponse) + + CreateJob(*elastictranscoder.CreateJobInput) (*elastictranscoder.CreateJobResponse, error) + + CreatePipelineRequest(*elastictranscoder.CreatePipelineInput) (*request.Request, *elastictranscoder.CreatePipelineOutput) + + CreatePipeline(*elastictranscoder.CreatePipelineInput) (*elastictranscoder.CreatePipelineOutput, error) + + CreatePresetRequest(*elastictranscoder.CreatePresetInput) (*request.Request, *elastictranscoder.CreatePresetOutput) + + CreatePreset(*elastictranscoder.CreatePresetInput) (*elastictranscoder.CreatePresetOutput, error) + + DeletePipelineRequest(*elastictranscoder.DeletePipelineInput) (*request.Request, *elastictranscoder.DeletePipelineOutput) + + DeletePipeline(*elastictranscoder.DeletePipelineInput) (*elastictranscoder.DeletePipelineOutput, error) + + DeletePresetRequest(*elastictranscoder.DeletePresetInput) (*request.Request, *elastictranscoder.DeletePresetOutput) + + DeletePreset(*elastictranscoder.DeletePresetInput) (*elastictranscoder.DeletePresetOutput, error) + + ListJobsByPipelineRequest(*elastictranscoder.ListJobsByPipelineInput) (*request.Request, *elastictranscoder.ListJobsByPipelineOutput) + + ListJobsByPipeline(*elastictranscoder.ListJobsByPipelineInput) (*elastictranscoder.ListJobsByPipelineOutput, error) + + ListJobsByPipelinePages(*elastictranscoder.ListJobsByPipelineInput, func(*elastictranscoder.ListJobsByPipelineOutput, bool) bool) error + + ListJobsByStatusRequest(*elastictranscoder.ListJobsByStatusInput) (*request.Request, *elastictranscoder.ListJobsByStatusOutput) + + ListJobsByStatus(*elastictranscoder.ListJobsByStatusInput) (*elastictranscoder.ListJobsByStatusOutput, error) + + ListJobsByStatusPages(*elastictranscoder.ListJobsByStatusInput, func(*elastictranscoder.ListJobsByStatusOutput, bool) bool) error + + ListPipelinesRequest(*elastictranscoder.ListPipelinesInput) (*request.Request, *elastictranscoder.ListPipelinesOutput) + + ListPipelines(*elastictranscoder.ListPipelinesInput) (*elastictranscoder.ListPipelinesOutput, error) + + ListPipelinesPages(*elastictranscoder.ListPipelinesInput, func(*elastictranscoder.ListPipelinesOutput, bool) bool) error + + ListPresetsRequest(*elastictranscoder.ListPresetsInput) (*request.Request, *elastictranscoder.ListPresetsOutput) + + ListPresets(*elastictranscoder.ListPresetsInput) (*elastictranscoder.ListPresetsOutput, error) + + ListPresetsPages(*elastictranscoder.ListPresetsInput, func(*elastictranscoder.ListPresetsOutput, bool) bool) error + + ReadJobRequest(*elastictranscoder.ReadJobInput) (*request.Request, *elastictranscoder.ReadJobOutput) + + ReadJob(*elastictranscoder.ReadJobInput) (*elastictranscoder.ReadJobOutput, error) + + ReadPipelineRequest(*elastictranscoder.ReadPipelineInput) (*request.Request, *elastictranscoder.ReadPipelineOutput) + + ReadPipeline(*elastictranscoder.ReadPipelineInput) (*elastictranscoder.ReadPipelineOutput, error) + + ReadPresetRequest(*elastictranscoder.ReadPresetInput) (*request.Request, *elastictranscoder.ReadPresetOutput) + + ReadPreset(*elastictranscoder.ReadPresetInput) (*elastictranscoder.ReadPresetOutput, error) + + TestRoleRequest(*elastictranscoder.TestRoleInput) (*request.Request, *elastictranscoder.TestRoleOutput) + + TestRole(*elastictranscoder.TestRoleInput) (*elastictranscoder.TestRoleOutput, error) + + UpdatePipelineRequest(*elastictranscoder.UpdatePipelineInput) (*request.Request, *elastictranscoder.UpdatePipelineOutput) + + UpdatePipeline(*elastictranscoder.UpdatePipelineInput) (*elastictranscoder.UpdatePipelineOutput, error) + + UpdatePipelineNotificationsRequest(*elastictranscoder.UpdatePipelineNotificationsInput) (*request.Request, *elastictranscoder.UpdatePipelineNotificationsOutput) + + UpdatePipelineNotifications(*elastictranscoder.UpdatePipelineNotificationsInput) (*elastictranscoder.UpdatePipelineNotificationsOutput, error) + + UpdatePipelineStatusRequest(*elastictranscoder.UpdatePipelineStatusInput) (*request.Request, *elastictranscoder.UpdatePipelineStatusOutput) + + UpdatePipelineStatus(*elastictranscoder.UpdatePipelineStatusInput) (*elastictranscoder.UpdatePipelineStatusOutput, error) +} + +var _ ElasticTranscoderAPI = (*elastictranscoder.ElasticTranscoder)(nil) diff --git a/vendor/github.com/aws/aws-sdk-go/service/elastictranscoder/examples_test.go b/vendor/github.com/aws/aws-sdk-go/service/elastictranscoder/examples_test.go new file mode 100644 index 000000000..c9f63bca5 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/elastictranscoder/examples_test.go @@ -0,0 +1,737 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package elastictranscoder_test + +import ( + "bytes" + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/elastictranscoder" +) + +var _ time.Duration +var _ bytes.Buffer + +func ExampleElasticTranscoder_CancelJob() { + svc := elastictranscoder.New(session.New()) + + params := &elastictranscoder.CancelJobInput{ + Id: aws.String("Id"), // Required + } + resp, err := svc.CancelJob(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElasticTranscoder_CreateJob() { + svc := elastictranscoder.New(session.New()) + + params := &elastictranscoder.CreateJobInput{ + Input: &elastictranscoder.JobInput{ // Required + AspectRatio: aws.String("AspectRatio"), + Container: aws.String("JobContainer"), + DetectedProperties: &elastictranscoder.DetectedProperties{ + DurationMillis: aws.Int64(1), + FileSize: aws.Int64(1), + FrameRate: aws.String("FloatString"), + Height: aws.Int64(1), + Width: aws.Int64(1), + }, + Encryption: &elastictranscoder.Encryption{ + InitializationVector: aws.String("ZeroTo255String"), + Key: aws.String("Base64EncodedString"), + KeyMd5: aws.String("Base64EncodedString"), + Mode: aws.String("EncryptionMode"), + }, + FrameRate: aws.String("FrameRate"), + Interlaced: aws.String("Interlaced"), + Key: aws.String("Key"), + Resolution: aws.String("Resolution"), + }, + PipelineId: aws.String("Id"), // Required + Output: &elastictranscoder.CreateJobOutput{ + AlbumArt: &elastictranscoder.JobAlbumArt{ + Artwork: []*elastictranscoder.Artwork{ + { // Required + AlbumArtFormat: aws.String("JpgOrPng"), + Encryption: &elastictranscoder.Encryption{ + InitializationVector: aws.String("ZeroTo255String"), + Key: aws.String("Base64EncodedString"), + KeyMd5: aws.String("Base64EncodedString"), + Mode: aws.String("EncryptionMode"), + }, + InputKey: aws.String("WatermarkKey"), + MaxHeight: aws.String("DigitsOrAuto"), + MaxWidth: aws.String("DigitsOrAuto"), + PaddingPolicy: aws.String("PaddingPolicy"), + SizingPolicy: aws.String("SizingPolicy"), + }, + // More values... + }, + MergePolicy: aws.String("MergePolicy"), + }, + Captions: &elastictranscoder.Captions{ + CaptionFormats: []*elastictranscoder.CaptionFormat{ + { // Required + Encryption: &elastictranscoder.Encryption{ + InitializationVector: aws.String("ZeroTo255String"), + Key: aws.String("Base64EncodedString"), + KeyMd5: aws.String("Base64EncodedString"), + Mode: aws.String("EncryptionMode"), + }, + Format: aws.String("CaptionFormatFormat"), + Pattern: aws.String("CaptionFormatPattern"), + }, + // More values... + }, + CaptionSources: []*elastictranscoder.CaptionSource{ + { // Required + Encryption: &elastictranscoder.Encryption{ + InitializationVector: aws.String("ZeroTo255String"), + Key: aws.String("Base64EncodedString"), + KeyMd5: aws.String("Base64EncodedString"), + Mode: aws.String("EncryptionMode"), + }, + Key: aws.String("Key"), + Label: aws.String("Name"), + Language: aws.String("Key"), + TimeOffset: aws.String("TimeOffset"), + }, + // More values... + }, + MergePolicy: aws.String("CaptionMergePolicy"), + }, + Composition: []*elastictranscoder.Clip{ + { // Required + TimeSpan: &elastictranscoder.TimeSpan{ + Duration: aws.String("Time"), + StartTime: aws.String("Time"), + }, + }, + // More values... + }, + Encryption: &elastictranscoder.Encryption{ + InitializationVector: aws.String("ZeroTo255String"), + Key: aws.String("Base64EncodedString"), + KeyMd5: aws.String("Base64EncodedString"), + Mode: aws.String("EncryptionMode"), + }, + Key: aws.String("Key"), + PresetId: aws.String("Id"), + Rotate: aws.String("Rotate"), + SegmentDuration: aws.String("FloatString"), + ThumbnailEncryption: &elastictranscoder.Encryption{ + InitializationVector: aws.String("ZeroTo255String"), + Key: aws.String("Base64EncodedString"), + KeyMd5: aws.String("Base64EncodedString"), + Mode: aws.String("EncryptionMode"), + }, + ThumbnailPattern: aws.String("ThumbnailPattern"), + Watermarks: []*elastictranscoder.JobWatermark{ + { // Required + Encryption: &elastictranscoder.Encryption{ + InitializationVector: aws.String("ZeroTo255String"), + Key: aws.String("Base64EncodedString"), + KeyMd5: aws.String("Base64EncodedString"), + Mode: aws.String("EncryptionMode"), + }, + InputKey: aws.String("WatermarkKey"), + PresetWatermarkId: aws.String("PresetWatermarkId"), + }, + // More values... + }, + }, + OutputKeyPrefix: aws.String("Key"), + Outputs: []*elastictranscoder.CreateJobOutput{ + { // Required + AlbumArt: &elastictranscoder.JobAlbumArt{ + Artwork: []*elastictranscoder.Artwork{ + { // Required + AlbumArtFormat: aws.String("JpgOrPng"), + Encryption: &elastictranscoder.Encryption{ + InitializationVector: aws.String("ZeroTo255String"), + Key: aws.String("Base64EncodedString"), + KeyMd5: aws.String("Base64EncodedString"), + Mode: aws.String("EncryptionMode"), + }, + InputKey: aws.String("WatermarkKey"), + MaxHeight: aws.String("DigitsOrAuto"), + MaxWidth: aws.String("DigitsOrAuto"), + PaddingPolicy: aws.String("PaddingPolicy"), + SizingPolicy: aws.String("SizingPolicy"), + }, + // More values... + }, + MergePolicy: aws.String("MergePolicy"), + }, + Captions: &elastictranscoder.Captions{ + CaptionFormats: []*elastictranscoder.CaptionFormat{ + { // Required + Encryption: &elastictranscoder.Encryption{ + InitializationVector: aws.String("ZeroTo255String"), + Key: aws.String("Base64EncodedString"), + KeyMd5: aws.String("Base64EncodedString"), + Mode: aws.String("EncryptionMode"), + }, + Format: aws.String("CaptionFormatFormat"), + Pattern: aws.String("CaptionFormatPattern"), + }, + // More values... + }, + CaptionSources: []*elastictranscoder.CaptionSource{ + { // Required + Encryption: &elastictranscoder.Encryption{ + InitializationVector: aws.String("ZeroTo255String"), + Key: aws.String("Base64EncodedString"), + KeyMd5: aws.String("Base64EncodedString"), + Mode: aws.String("EncryptionMode"), + }, + Key: aws.String("Key"), + Label: aws.String("Name"), + Language: aws.String("Key"), + TimeOffset: aws.String("TimeOffset"), + }, + // More values... + }, + MergePolicy: aws.String("CaptionMergePolicy"), + }, + Composition: []*elastictranscoder.Clip{ + { // Required + TimeSpan: &elastictranscoder.TimeSpan{ + Duration: aws.String("Time"), + StartTime: aws.String("Time"), + }, + }, + // More values... + }, + Encryption: &elastictranscoder.Encryption{ + InitializationVector: aws.String("ZeroTo255String"), + Key: aws.String("Base64EncodedString"), + KeyMd5: aws.String("Base64EncodedString"), + Mode: aws.String("EncryptionMode"), + }, + Key: aws.String("Key"), + PresetId: aws.String("Id"), + Rotate: aws.String("Rotate"), + SegmentDuration: aws.String("FloatString"), + ThumbnailEncryption: &elastictranscoder.Encryption{ + InitializationVector: aws.String("ZeroTo255String"), + Key: aws.String("Base64EncodedString"), + KeyMd5: aws.String("Base64EncodedString"), + Mode: aws.String("EncryptionMode"), + }, + ThumbnailPattern: aws.String("ThumbnailPattern"), + Watermarks: []*elastictranscoder.JobWatermark{ + { // Required + Encryption: &elastictranscoder.Encryption{ + InitializationVector: aws.String("ZeroTo255String"), + Key: aws.String("Base64EncodedString"), + KeyMd5: aws.String("Base64EncodedString"), + Mode: aws.String("EncryptionMode"), + }, + InputKey: aws.String("WatermarkKey"), + PresetWatermarkId: aws.String("PresetWatermarkId"), + }, + // More values... + }, + }, + // More values... + }, + Playlists: []*elastictranscoder.CreateJobPlaylist{ + { // Required + Format: aws.String("PlaylistFormat"), + HlsContentProtection: &elastictranscoder.HlsContentProtection{ + InitializationVector: aws.String("ZeroTo255String"), + Key: aws.String("Base64EncodedString"), + KeyMd5: aws.String("Base64EncodedString"), + KeyStoragePolicy: aws.String("KeyStoragePolicy"), + LicenseAcquisitionUrl: aws.String("ZeroTo512String"), + Method: aws.String("HlsContentProtectionMethod"), + }, + Name: aws.String("Filename"), + OutputKeys: []*string{ + aws.String("Key"), // Required + // More values... + }, + PlayReadyDrm: &elastictranscoder.PlayReadyDrm{ + Format: aws.String("PlayReadyDrmFormatString"), + InitializationVector: aws.String("ZeroTo255String"), + Key: aws.String("NonEmptyBase64EncodedString"), + KeyId: aws.String("KeyIdGuid"), + KeyMd5: aws.String("NonEmptyBase64EncodedString"), + LicenseAcquisitionUrl: aws.String("OneTo512String"), + }, + }, + // More values... + }, + UserMetadata: map[string]*string{ + "Key": aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.CreateJob(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElasticTranscoder_CreatePipeline() { + svc := elastictranscoder.New(session.New()) + + params := &elastictranscoder.CreatePipelineInput{ + InputBucket: aws.String("BucketName"), // Required + Name: aws.String("Name"), // Required + Role: aws.String("Role"), // Required + AwsKmsKeyArn: aws.String("KeyArn"), + ContentConfig: &elastictranscoder.PipelineOutputConfig{ + Bucket: aws.String("BucketName"), + Permissions: []*elastictranscoder.Permission{ + { // Required + Access: []*string{ + aws.String("AccessControl"), // Required + // More values... + }, + Grantee: aws.String("Grantee"), + GranteeType: aws.String("GranteeType"), + }, + // More values... + }, + StorageClass: aws.String("StorageClass"), + }, + Notifications: &elastictranscoder.Notifications{ + Completed: aws.String("SnsTopic"), + Error: aws.String("SnsTopic"), + Progressing: aws.String("SnsTopic"), + Warning: aws.String("SnsTopic"), + }, + OutputBucket: aws.String("BucketName"), + ThumbnailConfig: &elastictranscoder.PipelineOutputConfig{ + Bucket: aws.String("BucketName"), + Permissions: []*elastictranscoder.Permission{ + { // Required + Access: []*string{ + aws.String("AccessControl"), // Required + // More values... + }, + Grantee: aws.String("Grantee"), + GranteeType: aws.String("GranteeType"), + }, + // More values... + }, + StorageClass: aws.String("StorageClass"), + }, + } + resp, err := svc.CreatePipeline(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElasticTranscoder_CreatePreset() { + svc := elastictranscoder.New(session.New()) + + params := &elastictranscoder.CreatePresetInput{ + Container: aws.String("PresetContainer"), // Required + Name: aws.String("Name"), // Required + Audio: &elastictranscoder.AudioParameters{ + AudioPackingMode: aws.String("AudioPackingMode"), + BitRate: aws.String("AudioBitRate"), + Channels: aws.String("AudioChannels"), + Codec: aws.String("AudioCodec"), + CodecOptions: &elastictranscoder.AudioCodecOptions{ + BitDepth: aws.String("AudioBitDepth"), + BitOrder: aws.String("AudioBitOrder"), + Profile: aws.String("AudioCodecProfile"), + Signed: aws.String("AudioSigned"), + }, + SampleRate: aws.String("AudioSampleRate"), + }, + Description: aws.String("Description"), + Thumbnails: &elastictranscoder.Thumbnails{ + AspectRatio: aws.String("AspectRatio"), + Format: aws.String("JpgOrPng"), + Interval: aws.String("Digits"), + MaxHeight: aws.String("DigitsOrAuto"), + MaxWidth: aws.String("DigitsOrAuto"), + PaddingPolicy: aws.String("PaddingPolicy"), + Resolution: aws.String("ThumbnailResolution"), + SizingPolicy: aws.String("SizingPolicy"), + }, + Video: &elastictranscoder.VideoParameters{ + AspectRatio: aws.String("AspectRatio"), + BitRate: aws.String("VideoBitRate"), + Codec: aws.String("VideoCodec"), + CodecOptions: map[string]*string{ + "Key": aws.String("CodecOption"), // Required + // More values... + }, + DisplayAspectRatio: aws.String("AspectRatio"), + FixedGOP: aws.String("FixedGOP"), + FrameRate: aws.String("FrameRate"), + KeyframesMaxDist: aws.String("KeyframesMaxDist"), + MaxFrameRate: aws.String("MaxFrameRate"), + MaxHeight: aws.String("DigitsOrAuto"), + MaxWidth: aws.String("DigitsOrAuto"), + PaddingPolicy: aws.String("PaddingPolicy"), + Resolution: aws.String("Resolution"), + SizingPolicy: aws.String("SizingPolicy"), + Watermarks: []*elastictranscoder.PresetWatermark{ + { // Required + HorizontalAlign: aws.String("HorizontalAlign"), + HorizontalOffset: aws.String("PixelsOrPercent"), + Id: aws.String("PresetWatermarkId"), + MaxHeight: aws.String("PixelsOrPercent"), + MaxWidth: aws.String("PixelsOrPercent"), + Opacity: aws.String("Opacity"), + SizingPolicy: aws.String("WatermarkSizingPolicy"), + Target: aws.String("Target"), + VerticalAlign: aws.String("VerticalAlign"), + VerticalOffset: aws.String("PixelsOrPercent"), + }, + // More values... + }, + }, + } + resp, err := svc.CreatePreset(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElasticTranscoder_DeletePipeline() { + svc := elastictranscoder.New(session.New()) + + params := &elastictranscoder.DeletePipelineInput{ + Id: aws.String("Id"), // Required + } + resp, err := svc.DeletePipeline(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElasticTranscoder_DeletePreset() { + svc := elastictranscoder.New(session.New()) + + params := &elastictranscoder.DeletePresetInput{ + Id: aws.String("Id"), // Required + } + resp, err := svc.DeletePreset(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElasticTranscoder_ListJobsByPipeline() { + svc := elastictranscoder.New(session.New()) + + params := &elastictranscoder.ListJobsByPipelineInput{ + PipelineId: aws.String("Id"), // Required + Ascending: aws.String("Ascending"), + PageToken: aws.String("Id"), + } + resp, err := svc.ListJobsByPipeline(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElasticTranscoder_ListJobsByStatus() { + svc := elastictranscoder.New(session.New()) + + params := &elastictranscoder.ListJobsByStatusInput{ + Status: aws.String("JobStatus"), // Required + Ascending: aws.String("Ascending"), + PageToken: aws.String("Id"), + } + resp, err := svc.ListJobsByStatus(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElasticTranscoder_ListPipelines() { + svc := elastictranscoder.New(session.New()) + + params := &elastictranscoder.ListPipelinesInput{ + Ascending: aws.String("Ascending"), + PageToken: aws.String("Id"), + } + resp, err := svc.ListPipelines(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElasticTranscoder_ListPresets() { + svc := elastictranscoder.New(session.New()) + + params := &elastictranscoder.ListPresetsInput{ + Ascending: aws.String("Ascending"), + PageToken: aws.String("Id"), + } + resp, err := svc.ListPresets(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElasticTranscoder_ReadJob() { + svc := elastictranscoder.New(session.New()) + + params := &elastictranscoder.ReadJobInput{ + Id: aws.String("Id"), // Required + } + resp, err := svc.ReadJob(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElasticTranscoder_ReadPipeline() { + svc := elastictranscoder.New(session.New()) + + params := &elastictranscoder.ReadPipelineInput{ + Id: aws.String("Id"), // Required + } + resp, err := svc.ReadPipeline(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElasticTranscoder_ReadPreset() { + svc := elastictranscoder.New(session.New()) + + params := &elastictranscoder.ReadPresetInput{ + Id: aws.String("Id"), // Required + } + resp, err := svc.ReadPreset(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElasticTranscoder_TestRole() { + svc := elastictranscoder.New(session.New()) + + params := &elastictranscoder.TestRoleInput{ + InputBucket: aws.String("BucketName"), // Required + OutputBucket: aws.String("BucketName"), // Required + Role: aws.String("Role"), // Required + Topics: []*string{ // Required + aws.String("SnsTopic"), // Required + // More values... + }, + } + resp, err := svc.TestRole(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElasticTranscoder_UpdatePipeline() { + svc := elastictranscoder.New(session.New()) + + params := &elastictranscoder.UpdatePipelineInput{ + Id: aws.String("Id"), // Required + AwsKmsKeyArn: aws.String("KeyArn"), + ContentConfig: &elastictranscoder.PipelineOutputConfig{ + Bucket: aws.String("BucketName"), + Permissions: []*elastictranscoder.Permission{ + { // Required + Access: []*string{ + aws.String("AccessControl"), // Required + // More values... + }, + Grantee: aws.String("Grantee"), + GranteeType: aws.String("GranteeType"), + }, + // More values... + }, + StorageClass: aws.String("StorageClass"), + }, + InputBucket: aws.String("BucketName"), + Name: aws.String("Name"), + Notifications: &elastictranscoder.Notifications{ + Completed: aws.String("SnsTopic"), + Error: aws.String("SnsTopic"), + Progressing: aws.String("SnsTopic"), + Warning: aws.String("SnsTopic"), + }, + Role: aws.String("Role"), + ThumbnailConfig: &elastictranscoder.PipelineOutputConfig{ + Bucket: aws.String("BucketName"), + Permissions: []*elastictranscoder.Permission{ + { // Required + Access: []*string{ + aws.String("AccessControl"), // Required + // More values... + }, + Grantee: aws.String("Grantee"), + GranteeType: aws.String("GranteeType"), + }, + // More values... + }, + StorageClass: aws.String("StorageClass"), + }, + } + resp, err := svc.UpdatePipeline(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElasticTranscoder_UpdatePipelineNotifications() { + svc := elastictranscoder.New(session.New()) + + params := &elastictranscoder.UpdatePipelineNotificationsInput{ + Id: aws.String("Id"), // Required + Notifications: &elastictranscoder.Notifications{ // Required + Completed: aws.String("SnsTopic"), + Error: aws.String("SnsTopic"), + Progressing: aws.String("SnsTopic"), + Warning: aws.String("SnsTopic"), + }, + } + resp, err := svc.UpdatePipelineNotifications(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElasticTranscoder_UpdatePipelineStatus() { + svc := elastictranscoder.New(session.New()) + + params := &elastictranscoder.UpdatePipelineStatusInput{ + Id: aws.String("Id"), // Required + Status: aws.String("PipelineStatus"), // Required + } + resp, err := svc.UpdatePipelineStatus(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/elastictranscoder/service.go b/vendor/github.com/aws/aws-sdk-go/service/elastictranscoder/service.go new file mode 100644 index 000000000..c23818a23 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/elastictranscoder/service.go @@ -0,0 +1,86 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package elastictranscoder + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/private/protocol/restjson" +) + +// The AWS Elastic Transcoder Service. +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type ElasticTranscoder struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "elastictranscoder" + +// New creates a new instance of the ElasticTranscoder client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a ElasticTranscoder client from just a session. +// svc := elastictranscoder.New(mySession) +// +// // Create a ElasticTranscoder client with additional configuration +// svc := elastictranscoder.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *ElasticTranscoder { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *ElasticTranscoder { + svc := &ElasticTranscoder{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2012-09-25", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(restjson.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restjson.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restjson.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restjson.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a ElasticTranscoder operation and runs any +// custom request initialization. +func (c *ElasticTranscoder) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/elastictranscoder/waiters.go b/vendor/github.com/aws/aws-sdk-go/service/elastictranscoder/waiters.go new file mode 100644 index 000000000..8f5fcff03 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/elastictranscoder/waiters.go @@ -0,0 +1,42 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package elastictranscoder + +import ( + "github.com/aws/aws-sdk-go/private/waiter" +) + +func (c *ElasticTranscoder) WaitUntilJobComplete(input *ReadJobInput) error { + waiterCfg := waiter.Config{ + Operation: "ReadJob", + Delay: 30, + MaxAttempts: 120, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "path", + Argument: "Job.Status", + Expected: "Complete", + }, + { + State: "failure", + Matcher: "path", + Argument: "Job.Status", + Expected: "Canceled", + }, + { + State: "failure", + Matcher: "path", + Argument: "Job.Status", + Expected: "Error", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/elb/api.go b/vendor/github.com/aws/aws-sdk-go/service/elb/api.go new file mode 100644 index 000000000..46c912729 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/elb/api.go @@ -0,0 +1,4051 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package elb provides a client for Elastic Load Balancing. +package elb + +import ( + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" +) + +const opAddTags = "AddTags" + +// AddTagsRequest generates a "aws/request.Request" representing the +// client's request for the AddTags operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the AddTags method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the AddTagsRequest method. +// req, resp := client.AddTagsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ELB) AddTagsRequest(input *AddTagsInput) (req *request.Request, output *AddTagsOutput) { + op := &request.Operation{ + Name: opAddTags, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AddTagsInput{} + } + + req = c.newRequest(op, input, output) + output = &AddTagsOutput{} + req.Data = output + return +} + +// Adds the specified tags to the specified load balancer. Each load balancer +// can have a maximum of 10 tags. +// +// Each tag consists of a key and an optional value. If a tag with the same +// key is already associated with the load balancer, AddTags updates its value. +// +// For more information, see Tag Your Load Balancer (http://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/add-remove-tags.html) +// in the Elastic Load Balancing Developer Guide. +func (c *ELB) AddTags(input *AddTagsInput) (*AddTagsOutput, error) { + req, out := c.AddTagsRequest(input) + err := req.Send() + return out, err +} + +const opApplySecurityGroupsToLoadBalancer = "ApplySecurityGroupsToLoadBalancer" + +// ApplySecurityGroupsToLoadBalancerRequest generates a "aws/request.Request" representing the +// client's request for the ApplySecurityGroupsToLoadBalancer operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ApplySecurityGroupsToLoadBalancer method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ApplySecurityGroupsToLoadBalancerRequest method. +// req, resp := client.ApplySecurityGroupsToLoadBalancerRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ELB) ApplySecurityGroupsToLoadBalancerRequest(input *ApplySecurityGroupsToLoadBalancerInput) (req *request.Request, output *ApplySecurityGroupsToLoadBalancerOutput) { + op := &request.Operation{ + Name: opApplySecurityGroupsToLoadBalancer, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ApplySecurityGroupsToLoadBalancerInput{} + } + + req = c.newRequest(op, input, output) + output = &ApplySecurityGroupsToLoadBalancerOutput{} + req.Data = output + return +} + +// Associates one or more security groups with your load balancer in a virtual +// private cloud (VPC). The specified security groups override the previously +// associated security groups. +// +// For more information, see Security Groups for Load Balancers in a VPC (http://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/elb-security-groups.html#elb-vpc-security-groups) +// in the Elastic Load Balancing Developer Guide. +func (c *ELB) ApplySecurityGroupsToLoadBalancer(input *ApplySecurityGroupsToLoadBalancerInput) (*ApplySecurityGroupsToLoadBalancerOutput, error) { + req, out := c.ApplySecurityGroupsToLoadBalancerRequest(input) + err := req.Send() + return out, err +} + +const opAttachLoadBalancerToSubnets = "AttachLoadBalancerToSubnets" + +// AttachLoadBalancerToSubnetsRequest generates a "aws/request.Request" representing the +// client's request for the AttachLoadBalancerToSubnets operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the AttachLoadBalancerToSubnets method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the AttachLoadBalancerToSubnetsRequest method. +// req, resp := client.AttachLoadBalancerToSubnetsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ELB) AttachLoadBalancerToSubnetsRequest(input *AttachLoadBalancerToSubnetsInput) (req *request.Request, output *AttachLoadBalancerToSubnetsOutput) { + op := &request.Operation{ + Name: opAttachLoadBalancerToSubnets, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AttachLoadBalancerToSubnetsInput{} + } + + req = c.newRequest(op, input, output) + output = &AttachLoadBalancerToSubnetsOutput{} + req.Data = output + return +} + +// Adds one or more subnets to the set of configured subnets for the specified +// load balancer. +// +// The load balancer evenly distributes requests across all registered subnets. +// For more information, see Add or Remove Subnets for Your Load Balancer in +// a VPC (http://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/elb-manage-subnets.html) +// in the Elastic Load Balancing Developer Guide. +func (c *ELB) AttachLoadBalancerToSubnets(input *AttachLoadBalancerToSubnetsInput) (*AttachLoadBalancerToSubnetsOutput, error) { + req, out := c.AttachLoadBalancerToSubnetsRequest(input) + err := req.Send() + return out, err +} + +const opConfigureHealthCheck = "ConfigureHealthCheck" + +// ConfigureHealthCheckRequest generates a "aws/request.Request" representing the +// client's request for the ConfigureHealthCheck operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ConfigureHealthCheck method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ConfigureHealthCheckRequest method. +// req, resp := client.ConfigureHealthCheckRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ELB) ConfigureHealthCheckRequest(input *ConfigureHealthCheckInput) (req *request.Request, output *ConfigureHealthCheckOutput) { + op := &request.Operation{ + Name: opConfigureHealthCheck, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ConfigureHealthCheckInput{} + } + + req = c.newRequest(op, input, output) + output = &ConfigureHealthCheckOutput{} + req.Data = output + return +} + +// Specifies the health check settings to use when evaluating the health state +// of your back-end instances. +// +// For more information, see Configure Health Checks (http://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/elb-healthchecks.html) +// in the Elastic Load Balancing Developer Guide. +func (c *ELB) ConfigureHealthCheck(input *ConfigureHealthCheckInput) (*ConfigureHealthCheckOutput, error) { + req, out := c.ConfigureHealthCheckRequest(input) + err := req.Send() + return out, err +} + +const opCreateAppCookieStickinessPolicy = "CreateAppCookieStickinessPolicy" + +// CreateAppCookieStickinessPolicyRequest generates a "aws/request.Request" representing the +// client's request for the CreateAppCookieStickinessPolicy operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateAppCookieStickinessPolicy method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateAppCookieStickinessPolicyRequest method. +// req, resp := client.CreateAppCookieStickinessPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ELB) CreateAppCookieStickinessPolicyRequest(input *CreateAppCookieStickinessPolicyInput) (req *request.Request, output *CreateAppCookieStickinessPolicyOutput) { + op := &request.Operation{ + Name: opCreateAppCookieStickinessPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateAppCookieStickinessPolicyInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateAppCookieStickinessPolicyOutput{} + req.Data = output + return +} + +// Generates a stickiness policy with sticky session lifetimes that follow that +// of an application-generated cookie. This policy can be associated only with +// HTTP/HTTPS listeners. +// +// This policy is similar to the policy created by CreateLBCookieStickinessPolicy, +// except that the lifetime of the special Elastic Load Balancing cookie, AWSELB, +// follows the lifetime of the application-generated cookie specified in the +// policy configuration. The load balancer only inserts a new stickiness cookie +// when the application response includes a new application cookie. +// +// If the application cookie is explicitly removed or expires, the session +// stops being sticky until a new application cookie is issued. +// +// For more information, see Application-Controlled Session Stickiness (http://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/elb-sticky-sessions.html#enable-sticky-sessions-application) +// in the Elastic Load Balancing Developer Guide. +func (c *ELB) CreateAppCookieStickinessPolicy(input *CreateAppCookieStickinessPolicyInput) (*CreateAppCookieStickinessPolicyOutput, error) { + req, out := c.CreateAppCookieStickinessPolicyRequest(input) + err := req.Send() + return out, err +} + +const opCreateLBCookieStickinessPolicy = "CreateLBCookieStickinessPolicy" + +// CreateLBCookieStickinessPolicyRequest generates a "aws/request.Request" representing the +// client's request for the CreateLBCookieStickinessPolicy operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateLBCookieStickinessPolicy method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateLBCookieStickinessPolicyRequest method. +// req, resp := client.CreateLBCookieStickinessPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ELB) CreateLBCookieStickinessPolicyRequest(input *CreateLBCookieStickinessPolicyInput) (req *request.Request, output *CreateLBCookieStickinessPolicyOutput) { + op := &request.Operation{ + Name: opCreateLBCookieStickinessPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateLBCookieStickinessPolicyInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateLBCookieStickinessPolicyOutput{} + req.Data = output + return +} + +// Generates a stickiness policy with sticky session lifetimes controlled by +// the lifetime of the browser (user-agent) or a specified expiration period. +// This policy can be associated only with HTTP/HTTPS listeners. +// +// When a load balancer implements this policy, the load balancer uses a special +// cookie to track the back-end server instance for each request. When the load +// balancer receives a request, it first checks to see if this cookie is present +// in the request. If so, the load balancer sends the request to the application +// server specified in the cookie. If not, the load balancer sends the request +// to a server that is chosen based on the existing load-balancing algorithm. +// +// A cookie is inserted into the response for binding subsequent requests from +// the same user to that server. The validity of the cookie is based on the +// cookie expiration time, which is specified in the policy configuration. +// +// For more information, see Duration-Based Session Stickiness (http://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/elb-sticky-sessions.html#enable-sticky-sessions-duration) +// in the Elastic Load Balancing Developer Guide. +func (c *ELB) CreateLBCookieStickinessPolicy(input *CreateLBCookieStickinessPolicyInput) (*CreateLBCookieStickinessPolicyOutput, error) { + req, out := c.CreateLBCookieStickinessPolicyRequest(input) + err := req.Send() + return out, err +} + +const opCreateLoadBalancer = "CreateLoadBalancer" + +// CreateLoadBalancerRequest generates a "aws/request.Request" representing the +// client's request for the CreateLoadBalancer operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateLoadBalancer method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateLoadBalancerRequest method. +// req, resp := client.CreateLoadBalancerRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ELB) CreateLoadBalancerRequest(input *CreateLoadBalancerInput) (req *request.Request, output *CreateLoadBalancerOutput) { + op := &request.Operation{ + Name: opCreateLoadBalancer, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateLoadBalancerInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateLoadBalancerOutput{} + req.Data = output + return +} + +// Creates a load balancer. +// +// If the call completes successfully, a new load balancer is created with +// a unique Domain Name Service (DNS) name. The load balancer receives incoming +// traffic and routes it to the registered instances. For more information, +// see How Elastic Load Balancing Works (http://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/how-elb-works.html) +// in the Elastic Load Balancing Developer Guide. +// +// You can create up to 20 load balancers per region per account. You can request +// an increase for the number of load balancers for your account. For more information, +// see Elastic Load Balancing Limits (http://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/elb-limits.html) +// in the Elastic Load Balancing Developer Guide. +func (c *ELB) CreateLoadBalancer(input *CreateLoadBalancerInput) (*CreateLoadBalancerOutput, error) { + req, out := c.CreateLoadBalancerRequest(input) + err := req.Send() + return out, err +} + +const opCreateLoadBalancerListeners = "CreateLoadBalancerListeners" + +// CreateLoadBalancerListenersRequest generates a "aws/request.Request" representing the +// client's request for the CreateLoadBalancerListeners operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateLoadBalancerListeners method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateLoadBalancerListenersRequest method. +// req, resp := client.CreateLoadBalancerListenersRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ELB) CreateLoadBalancerListenersRequest(input *CreateLoadBalancerListenersInput) (req *request.Request, output *CreateLoadBalancerListenersOutput) { + op := &request.Operation{ + Name: opCreateLoadBalancerListeners, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateLoadBalancerListenersInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateLoadBalancerListenersOutput{} + req.Data = output + return +} + +// Creates one or more listeners for the specified load balancer. If a listener +// with the specified port does not already exist, it is created; otherwise, +// the properties of the new listener must match the properties of the existing +// listener. +// +// For more information, see Add a Listener to Your Load Balancer (http://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/us-add-listener.html) +// in the Elastic Load Balancing Developer Guide. +func (c *ELB) CreateLoadBalancerListeners(input *CreateLoadBalancerListenersInput) (*CreateLoadBalancerListenersOutput, error) { + req, out := c.CreateLoadBalancerListenersRequest(input) + err := req.Send() + return out, err +} + +const opCreateLoadBalancerPolicy = "CreateLoadBalancerPolicy" + +// CreateLoadBalancerPolicyRequest generates a "aws/request.Request" representing the +// client's request for the CreateLoadBalancerPolicy operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateLoadBalancerPolicy method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateLoadBalancerPolicyRequest method. +// req, resp := client.CreateLoadBalancerPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ELB) CreateLoadBalancerPolicyRequest(input *CreateLoadBalancerPolicyInput) (req *request.Request, output *CreateLoadBalancerPolicyOutput) { + op := &request.Operation{ + Name: opCreateLoadBalancerPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateLoadBalancerPolicyInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateLoadBalancerPolicyOutput{} + req.Data = output + return +} + +// Creates a policy with the specified attributes for the specified load balancer. +// +// Policies are settings that are saved for your load balancer and that can +// be applied to the front-end listener or the back-end application server, +// depending on the policy type. +func (c *ELB) CreateLoadBalancerPolicy(input *CreateLoadBalancerPolicyInput) (*CreateLoadBalancerPolicyOutput, error) { + req, out := c.CreateLoadBalancerPolicyRequest(input) + err := req.Send() + return out, err +} + +const opDeleteLoadBalancer = "DeleteLoadBalancer" + +// DeleteLoadBalancerRequest generates a "aws/request.Request" representing the +// client's request for the DeleteLoadBalancer operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteLoadBalancer method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteLoadBalancerRequest method. +// req, resp := client.DeleteLoadBalancerRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ELB) DeleteLoadBalancerRequest(input *DeleteLoadBalancerInput) (req *request.Request, output *DeleteLoadBalancerOutput) { + op := &request.Operation{ + Name: opDeleteLoadBalancer, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteLoadBalancerInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteLoadBalancerOutput{} + req.Data = output + return +} + +// Deletes the specified load balancer. +// +// If you are attempting to recreate a load balancer, you must reconfigure +// all settings. The DNS name associated with a deleted load balancer are no +// longer usable. The name and associated DNS record of the deleted load balancer +// no longer exist and traffic sent to any of its IP addresses is no longer +// delivered to back-end instances. +// +// If the load balancer does not exist or has already been deleted, the call +// to DeleteLoadBalancer still succeeds. +func (c *ELB) DeleteLoadBalancer(input *DeleteLoadBalancerInput) (*DeleteLoadBalancerOutput, error) { + req, out := c.DeleteLoadBalancerRequest(input) + err := req.Send() + return out, err +} + +const opDeleteLoadBalancerListeners = "DeleteLoadBalancerListeners" + +// DeleteLoadBalancerListenersRequest generates a "aws/request.Request" representing the +// client's request for the DeleteLoadBalancerListeners operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteLoadBalancerListeners method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteLoadBalancerListenersRequest method. +// req, resp := client.DeleteLoadBalancerListenersRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ELB) DeleteLoadBalancerListenersRequest(input *DeleteLoadBalancerListenersInput) (req *request.Request, output *DeleteLoadBalancerListenersOutput) { + op := &request.Operation{ + Name: opDeleteLoadBalancerListeners, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteLoadBalancerListenersInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteLoadBalancerListenersOutput{} + req.Data = output + return +} + +// Deletes the specified listeners from the specified load balancer. +func (c *ELB) DeleteLoadBalancerListeners(input *DeleteLoadBalancerListenersInput) (*DeleteLoadBalancerListenersOutput, error) { + req, out := c.DeleteLoadBalancerListenersRequest(input) + err := req.Send() + return out, err +} + +const opDeleteLoadBalancerPolicy = "DeleteLoadBalancerPolicy" + +// DeleteLoadBalancerPolicyRequest generates a "aws/request.Request" representing the +// client's request for the DeleteLoadBalancerPolicy operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteLoadBalancerPolicy method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteLoadBalancerPolicyRequest method. +// req, resp := client.DeleteLoadBalancerPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ELB) DeleteLoadBalancerPolicyRequest(input *DeleteLoadBalancerPolicyInput) (req *request.Request, output *DeleteLoadBalancerPolicyOutput) { + op := &request.Operation{ + Name: opDeleteLoadBalancerPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteLoadBalancerPolicyInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteLoadBalancerPolicyOutput{} + req.Data = output + return +} + +// Deletes the specified policy from the specified load balancer. This policy +// must not be enabled for any listeners. +func (c *ELB) DeleteLoadBalancerPolicy(input *DeleteLoadBalancerPolicyInput) (*DeleteLoadBalancerPolicyOutput, error) { + req, out := c.DeleteLoadBalancerPolicyRequest(input) + err := req.Send() + return out, err +} + +const opDeregisterInstancesFromLoadBalancer = "DeregisterInstancesFromLoadBalancer" + +// DeregisterInstancesFromLoadBalancerRequest generates a "aws/request.Request" representing the +// client's request for the DeregisterInstancesFromLoadBalancer operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeregisterInstancesFromLoadBalancer method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeregisterInstancesFromLoadBalancerRequest method. +// req, resp := client.DeregisterInstancesFromLoadBalancerRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ELB) DeregisterInstancesFromLoadBalancerRequest(input *DeregisterInstancesFromLoadBalancerInput) (req *request.Request, output *DeregisterInstancesFromLoadBalancerOutput) { + op := &request.Operation{ + Name: opDeregisterInstancesFromLoadBalancer, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeregisterInstancesFromLoadBalancerInput{} + } + + req = c.newRequest(op, input, output) + output = &DeregisterInstancesFromLoadBalancerOutput{} + req.Data = output + return +} + +// Deregisters the specified instances from the specified load balancer. After +// the instance is deregistered, it no longer receives traffic from the load +// balancer. +// +// You can use DescribeLoadBalancers to verify that the instance is deregistered +// from the load balancer. +// +// For more information, see Deregister and Register Amazon EC2 Instances (http://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/US_DeReg_Reg_Instances.html) +// in the Elastic Load Balancing Developer Guide. +func (c *ELB) DeregisterInstancesFromLoadBalancer(input *DeregisterInstancesFromLoadBalancerInput) (*DeregisterInstancesFromLoadBalancerOutput, error) { + req, out := c.DeregisterInstancesFromLoadBalancerRequest(input) + err := req.Send() + return out, err +} + +const opDescribeInstanceHealth = "DescribeInstanceHealth" + +// DescribeInstanceHealthRequest generates a "aws/request.Request" representing the +// client's request for the DescribeInstanceHealth operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeInstanceHealth method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeInstanceHealthRequest method. +// req, resp := client.DescribeInstanceHealthRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ELB) DescribeInstanceHealthRequest(input *DescribeInstanceHealthInput) (req *request.Request, output *DescribeInstanceHealthOutput) { + op := &request.Operation{ + Name: opDescribeInstanceHealth, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeInstanceHealthInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeInstanceHealthOutput{} + req.Data = output + return +} + +// Describes the state of the specified instances with respect to the specified +// load balancer. If no instances are specified, the call describes the state +// of all instances that are currently registered with the load balancer. If +// instances are specified, their state is returned even if they are no longer +// registered with the load balancer. The state of terminated instances is not +// returned. +func (c *ELB) DescribeInstanceHealth(input *DescribeInstanceHealthInput) (*DescribeInstanceHealthOutput, error) { + req, out := c.DescribeInstanceHealthRequest(input) + err := req.Send() + return out, err +} + +const opDescribeLoadBalancerAttributes = "DescribeLoadBalancerAttributes" + +// DescribeLoadBalancerAttributesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeLoadBalancerAttributes operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeLoadBalancerAttributes method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeLoadBalancerAttributesRequest method. +// req, resp := client.DescribeLoadBalancerAttributesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ELB) DescribeLoadBalancerAttributesRequest(input *DescribeLoadBalancerAttributesInput) (req *request.Request, output *DescribeLoadBalancerAttributesOutput) { + op := &request.Operation{ + Name: opDescribeLoadBalancerAttributes, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeLoadBalancerAttributesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeLoadBalancerAttributesOutput{} + req.Data = output + return +} + +// Describes the attributes for the specified load balancer. +func (c *ELB) DescribeLoadBalancerAttributes(input *DescribeLoadBalancerAttributesInput) (*DescribeLoadBalancerAttributesOutput, error) { + req, out := c.DescribeLoadBalancerAttributesRequest(input) + err := req.Send() + return out, err +} + +const opDescribeLoadBalancerPolicies = "DescribeLoadBalancerPolicies" + +// DescribeLoadBalancerPoliciesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeLoadBalancerPolicies operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeLoadBalancerPolicies method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeLoadBalancerPoliciesRequest method. +// req, resp := client.DescribeLoadBalancerPoliciesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ELB) DescribeLoadBalancerPoliciesRequest(input *DescribeLoadBalancerPoliciesInput) (req *request.Request, output *DescribeLoadBalancerPoliciesOutput) { + op := &request.Operation{ + Name: opDescribeLoadBalancerPolicies, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeLoadBalancerPoliciesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeLoadBalancerPoliciesOutput{} + req.Data = output + return +} + +// Describes the specified policies. +// +// If you specify a load balancer name, the action returns the descriptions +// of all policies created for the load balancer. If you specify a policy name +// associated with your load balancer, the action returns the description of +// that policy. If you don't specify a load balancer name, the action returns +// descriptions of the specified sample policies, or descriptions of all sample +// policies. The names of the sample policies have the ELBSample- prefix. +func (c *ELB) DescribeLoadBalancerPolicies(input *DescribeLoadBalancerPoliciesInput) (*DescribeLoadBalancerPoliciesOutput, error) { + req, out := c.DescribeLoadBalancerPoliciesRequest(input) + err := req.Send() + return out, err +} + +const opDescribeLoadBalancerPolicyTypes = "DescribeLoadBalancerPolicyTypes" + +// DescribeLoadBalancerPolicyTypesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeLoadBalancerPolicyTypes operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeLoadBalancerPolicyTypes method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeLoadBalancerPolicyTypesRequest method. +// req, resp := client.DescribeLoadBalancerPolicyTypesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ELB) DescribeLoadBalancerPolicyTypesRequest(input *DescribeLoadBalancerPolicyTypesInput) (req *request.Request, output *DescribeLoadBalancerPolicyTypesOutput) { + op := &request.Operation{ + Name: opDescribeLoadBalancerPolicyTypes, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeLoadBalancerPolicyTypesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeLoadBalancerPolicyTypesOutput{} + req.Data = output + return +} + +// Describes the specified load balancer policy types. +// +// You can use these policy types with CreateLoadBalancerPolicy to create policy +// configurations for a load balancer. +func (c *ELB) DescribeLoadBalancerPolicyTypes(input *DescribeLoadBalancerPolicyTypesInput) (*DescribeLoadBalancerPolicyTypesOutput, error) { + req, out := c.DescribeLoadBalancerPolicyTypesRequest(input) + err := req.Send() + return out, err +} + +const opDescribeLoadBalancers = "DescribeLoadBalancers" + +// DescribeLoadBalancersRequest generates a "aws/request.Request" representing the +// client's request for the DescribeLoadBalancers operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeLoadBalancers method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeLoadBalancersRequest method. +// req, resp := client.DescribeLoadBalancersRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ELB) DescribeLoadBalancersRequest(input *DescribeLoadBalancersInput) (req *request.Request, output *DescribeLoadBalancersOutput) { + op := &request.Operation{ + Name: opDescribeLoadBalancers, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"NextMarker"}, + LimitToken: "", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeLoadBalancersInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeLoadBalancersOutput{} + req.Data = output + return +} + +// Describes the specified the load balancers. If no load balancers are specified, +// the call describes all of your load balancers. +func (c *ELB) DescribeLoadBalancers(input *DescribeLoadBalancersInput) (*DescribeLoadBalancersOutput, error) { + req, out := c.DescribeLoadBalancersRequest(input) + err := req.Send() + return out, err +} + +// DescribeLoadBalancersPages iterates over the pages of a DescribeLoadBalancers operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeLoadBalancers method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeLoadBalancers operation. +// pageNum := 0 +// err := client.DescribeLoadBalancersPages(params, +// func(page *DescribeLoadBalancersOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *ELB) DescribeLoadBalancersPages(input *DescribeLoadBalancersInput, fn func(p *DescribeLoadBalancersOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeLoadBalancersRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeLoadBalancersOutput), lastPage) + }) +} + +const opDescribeTags = "DescribeTags" + +// DescribeTagsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeTags operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeTags method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeTagsRequest method. +// req, resp := client.DescribeTagsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ELB) DescribeTagsRequest(input *DescribeTagsInput) (req *request.Request, output *DescribeTagsOutput) { + op := &request.Operation{ + Name: opDescribeTags, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeTagsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeTagsOutput{} + req.Data = output + return +} + +// Describes the tags associated with the specified load balancers. +func (c *ELB) DescribeTags(input *DescribeTagsInput) (*DescribeTagsOutput, error) { + req, out := c.DescribeTagsRequest(input) + err := req.Send() + return out, err +} + +const opDetachLoadBalancerFromSubnets = "DetachLoadBalancerFromSubnets" + +// DetachLoadBalancerFromSubnetsRequest generates a "aws/request.Request" representing the +// client's request for the DetachLoadBalancerFromSubnets operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DetachLoadBalancerFromSubnets method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DetachLoadBalancerFromSubnetsRequest method. +// req, resp := client.DetachLoadBalancerFromSubnetsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ELB) DetachLoadBalancerFromSubnetsRequest(input *DetachLoadBalancerFromSubnetsInput) (req *request.Request, output *DetachLoadBalancerFromSubnetsOutput) { + op := &request.Operation{ + Name: opDetachLoadBalancerFromSubnets, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DetachLoadBalancerFromSubnetsInput{} + } + + req = c.newRequest(op, input, output) + output = &DetachLoadBalancerFromSubnetsOutput{} + req.Data = output + return +} + +// Removes the specified subnets from the set of configured subnets for the +// load balancer. +// +// After a subnet is removed, all EC2 instances registered with the load balancer +// in the removed subnet go into the OutOfService state. Then, the load balancer +// balances the traffic among the remaining routable subnets. +func (c *ELB) DetachLoadBalancerFromSubnets(input *DetachLoadBalancerFromSubnetsInput) (*DetachLoadBalancerFromSubnetsOutput, error) { + req, out := c.DetachLoadBalancerFromSubnetsRequest(input) + err := req.Send() + return out, err +} + +const opDisableAvailabilityZonesForLoadBalancer = "DisableAvailabilityZonesForLoadBalancer" + +// DisableAvailabilityZonesForLoadBalancerRequest generates a "aws/request.Request" representing the +// client's request for the DisableAvailabilityZonesForLoadBalancer operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DisableAvailabilityZonesForLoadBalancer method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DisableAvailabilityZonesForLoadBalancerRequest method. +// req, resp := client.DisableAvailabilityZonesForLoadBalancerRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ELB) DisableAvailabilityZonesForLoadBalancerRequest(input *DisableAvailabilityZonesForLoadBalancerInput) (req *request.Request, output *DisableAvailabilityZonesForLoadBalancerOutput) { + op := &request.Operation{ + Name: opDisableAvailabilityZonesForLoadBalancer, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DisableAvailabilityZonesForLoadBalancerInput{} + } + + req = c.newRequest(op, input, output) + output = &DisableAvailabilityZonesForLoadBalancerOutput{} + req.Data = output + return +} + +// Removes the specified Availability Zones from the set of Availability Zones +// for the specified load balancer. +// +// There must be at least one Availability Zone registered with a load balancer +// at all times. After an Availability Zone is removed, all instances registered +// with the load balancer that are in the removed Availability Zone go into +// the OutOfService state. Then, the load balancer attempts to equally balance +// the traffic among its remaining Availability Zones. +// +// For more information, see Disable an Availability Zone from a Load-Balanced +// Application (http://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/US_ShrinkLBApp04.html) +// in the Elastic Load Balancing Developer Guide. +func (c *ELB) DisableAvailabilityZonesForLoadBalancer(input *DisableAvailabilityZonesForLoadBalancerInput) (*DisableAvailabilityZonesForLoadBalancerOutput, error) { + req, out := c.DisableAvailabilityZonesForLoadBalancerRequest(input) + err := req.Send() + return out, err +} + +const opEnableAvailabilityZonesForLoadBalancer = "EnableAvailabilityZonesForLoadBalancer" + +// EnableAvailabilityZonesForLoadBalancerRequest generates a "aws/request.Request" representing the +// client's request for the EnableAvailabilityZonesForLoadBalancer operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the EnableAvailabilityZonesForLoadBalancer method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the EnableAvailabilityZonesForLoadBalancerRequest method. +// req, resp := client.EnableAvailabilityZonesForLoadBalancerRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ELB) EnableAvailabilityZonesForLoadBalancerRequest(input *EnableAvailabilityZonesForLoadBalancerInput) (req *request.Request, output *EnableAvailabilityZonesForLoadBalancerOutput) { + op := &request.Operation{ + Name: opEnableAvailabilityZonesForLoadBalancer, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &EnableAvailabilityZonesForLoadBalancerInput{} + } + + req = c.newRequest(op, input, output) + output = &EnableAvailabilityZonesForLoadBalancerOutput{} + req.Data = output + return +} + +// Adds the specified Availability Zones to the set of Availability Zones for +// the specified load balancer. +// +// The load balancer evenly distributes requests across all its registered +// Availability Zones that contain instances. +// +// For more information, see Add Availability Zone (http://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/US_AddLBAvailabilityZone.html) +// in the Elastic Load Balancing Developer Guide. +func (c *ELB) EnableAvailabilityZonesForLoadBalancer(input *EnableAvailabilityZonesForLoadBalancerInput) (*EnableAvailabilityZonesForLoadBalancerOutput, error) { + req, out := c.EnableAvailabilityZonesForLoadBalancerRequest(input) + err := req.Send() + return out, err +} + +const opModifyLoadBalancerAttributes = "ModifyLoadBalancerAttributes" + +// ModifyLoadBalancerAttributesRequest generates a "aws/request.Request" representing the +// client's request for the ModifyLoadBalancerAttributes operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ModifyLoadBalancerAttributes method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ModifyLoadBalancerAttributesRequest method. +// req, resp := client.ModifyLoadBalancerAttributesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ELB) ModifyLoadBalancerAttributesRequest(input *ModifyLoadBalancerAttributesInput) (req *request.Request, output *ModifyLoadBalancerAttributesOutput) { + op := &request.Operation{ + Name: opModifyLoadBalancerAttributes, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyLoadBalancerAttributesInput{} + } + + req = c.newRequest(op, input, output) + output = &ModifyLoadBalancerAttributesOutput{} + req.Data = output + return +} + +// Modifies the attributes of the specified load balancer. +// +// You can modify the load balancer attributes, such as AccessLogs, ConnectionDraining, +// and CrossZoneLoadBalancing by either enabling or disabling them. Or, you +// can modify the load balancer attribute ConnectionSettings by specifying an +// idle connection timeout value for your load balancer. +// +// For more information, see the following in the Elastic Load Balancing Developer +// Guide: +// +// Cross-Zone Load Balancing (http://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/TerminologyandKeyConcepts.html#request-routing) +// Connection Draining (http://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/TerminologyandKeyConcepts.html#conn-drain) +// Access Logs (http://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/access-log-collection.html) +// Idle Connection Timeout (http://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/TerminologyandKeyConcepts.html#idle-timeout) +func (c *ELB) ModifyLoadBalancerAttributes(input *ModifyLoadBalancerAttributesInput) (*ModifyLoadBalancerAttributesOutput, error) { + req, out := c.ModifyLoadBalancerAttributesRequest(input) + err := req.Send() + return out, err +} + +const opRegisterInstancesWithLoadBalancer = "RegisterInstancesWithLoadBalancer" + +// RegisterInstancesWithLoadBalancerRequest generates a "aws/request.Request" representing the +// client's request for the RegisterInstancesWithLoadBalancer operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the RegisterInstancesWithLoadBalancer method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the RegisterInstancesWithLoadBalancerRequest method. +// req, resp := client.RegisterInstancesWithLoadBalancerRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ELB) RegisterInstancesWithLoadBalancerRequest(input *RegisterInstancesWithLoadBalancerInput) (req *request.Request, output *RegisterInstancesWithLoadBalancerOutput) { + op := &request.Operation{ + Name: opRegisterInstancesWithLoadBalancer, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RegisterInstancesWithLoadBalancerInput{} + } + + req = c.newRequest(op, input, output) + output = &RegisterInstancesWithLoadBalancerOutput{} + req.Data = output + return +} + +// Adds the specified instances to the specified load balancer. +// +// The instance must be a running instance in the same network as the load +// balancer (EC2-Classic or the same VPC). If you have EC2-Classic instances +// and a load balancer in a VPC with ClassicLink enabled, you can link the EC2-Classic +// instances to that VPC and then register the linked EC2-Classic instances +// with the load balancer in the VPC. +// +// Note that RegisterInstanceWithLoadBalancer completes when the request has +// been registered. Instance registration takes a little time to complete. To +// check the state of the registered instances, use DescribeLoadBalancers or +// DescribeInstanceHealth. +// +// After the instance is registered, it starts receiving traffic and requests +// from the load balancer. Any instance that is not in one of the Availability +// Zones registered for the load balancer is moved to the OutOfService state. +// If an Availability Zone is added to the load balancer later, any instances +// registered with the load balancer move to the InService state. +// +// If you stop an instance registered with a load balancer and then start it, +// the IP addresses associated with the instance changes. Elastic Load Balancing +// cannot recognize the new IP address, which prevents it from routing traffic +// to the instances. We recommend that you use the following sequence: stop +// the instance, deregister the instance, start the instance, and then register +// the instance. To deregister instances from a load balancer, use DeregisterInstancesFromLoadBalancer. +// +// For more information, see Deregister and Register EC2 Instances (http://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/US_DeReg_Reg_Instances.html) +// in the Elastic Load Balancing Developer Guide. +func (c *ELB) RegisterInstancesWithLoadBalancer(input *RegisterInstancesWithLoadBalancerInput) (*RegisterInstancesWithLoadBalancerOutput, error) { + req, out := c.RegisterInstancesWithLoadBalancerRequest(input) + err := req.Send() + return out, err +} + +const opRemoveTags = "RemoveTags" + +// RemoveTagsRequest generates a "aws/request.Request" representing the +// client's request for the RemoveTags operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the RemoveTags method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the RemoveTagsRequest method. +// req, resp := client.RemoveTagsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ELB) RemoveTagsRequest(input *RemoveTagsInput) (req *request.Request, output *RemoveTagsOutput) { + op := &request.Operation{ + Name: opRemoveTags, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RemoveTagsInput{} + } + + req = c.newRequest(op, input, output) + output = &RemoveTagsOutput{} + req.Data = output + return +} + +// Removes one or more tags from the specified load balancer. +func (c *ELB) RemoveTags(input *RemoveTagsInput) (*RemoveTagsOutput, error) { + req, out := c.RemoveTagsRequest(input) + err := req.Send() + return out, err +} + +const opSetLoadBalancerListenerSSLCertificate = "SetLoadBalancerListenerSSLCertificate" + +// SetLoadBalancerListenerSSLCertificateRequest generates a "aws/request.Request" representing the +// client's request for the SetLoadBalancerListenerSSLCertificate operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the SetLoadBalancerListenerSSLCertificate method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the SetLoadBalancerListenerSSLCertificateRequest method. +// req, resp := client.SetLoadBalancerListenerSSLCertificateRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ELB) SetLoadBalancerListenerSSLCertificateRequest(input *SetLoadBalancerListenerSSLCertificateInput) (req *request.Request, output *SetLoadBalancerListenerSSLCertificateOutput) { + op := &request.Operation{ + Name: opSetLoadBalancerListenerSSLCertificate, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SetLoadBalancerListenerSSLCertificateInput{} + } + + req = c.newRequest(op, input, output) + output = &SetLoadBalancerListenerSSLCertificateOutput{} + req.Data = output + return +} + +// Sets the certificate that terminates the specified listener's SSL connections. +// The specified certificate replaces any prior certificate that was used on +// the same load balancer and port. +// +// For more information about updating your SSL certificate, see Updating an +// SSL Certificate for a Load Balancer (http://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/US_UpdatingLoadBalancerSSL.html) +// in the Elastic Load Balancing Developer Guide. +func (c *ELB) SetLoadBalancerListenerSSLCertificate(input *SetLoadBalancerListenerSSLCertificateInput) (*SetLoadBalancerListenerSSLCertificateOutput, error) { + req, out := c.SetLoadBalancerListenerSSLCertificateRequest(input) + err := req.Send() + return out, err +} + +const opSetLoadBalancerPoliciesForBackendServer = "SetLoadBalancerPoliciesForBackendServer" + +// SetLoadBalancerPoliciesForBackendServerRequest generates a "aws/request.Request" representing the +// client's request for the SetLoadBalancerPoliciesForBackendServer operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the SetLoadBalancerPoliciesForBackendServer method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the SetLoadBalancerPoliciesForBackendServerRequest method. +// req, resp := client.SetLoadBalancerPoliciesForBackendServerRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ELB) SetLoadBalancerPoliciesForBackendServerRequest(input *SetLoadBalancerPoliciesForBackendServerInput) (req *request.Request, output *SetLoadBalancerPoliciesForBackendServerOutput) { + op := &request.Operation{ + Name: opSetLoadBalancerPoliciesForBackendServer, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SetLoadBalancerPoliciesForBackendServerInput{} + } + + req = c.newRequest(op, input, output) + output = &SetLoadBalancerPoliciesForBackendServerOutput{} + req.Data = output + return +} + +// Replaces the set of policies associated with the specified port on which +// the back-end server is listening with a new set of policies. At this time, +// only the back-end server authentication policy type can be applied to the +// back-end ports; this policy type is composed of multiple public key policies. +// +// Each time you use SetLoadBalancerPoliciesForBackendServer to enable the +// policies, use the PolicyNames parameter to list the policies that you want +// to enable. +// +// You can use DescribeLoadBalancers or DescribeLoadBalancerPolicies to verify +// that the policy is associated with the back-end server. +func (c *ELB) SetLoadBalancerPoliciesForBackendServer(input *SetLoadBalancerPoliciesForBackendServerInput) (*SetLoadBalancerPoliciesForBackendServerOutput, error) { + req, out := c.SetLoadBalancerPoliciesForBackendServerRequest(input) + err := req.Send() + return out, err +} + +const opSetLoadBalancerPoliciesOfListener = "SetLoadBalancerPoliciesOfListener" + +// SetLoadBalancerPoliciesOfListenerRequest generates a "aws/request.Request" representing the +// client's request for the SetLoadBalancerPoliciesOfListener operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the SetLoadBalancerPoliciesOfListener method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the SetLoadBalancerPoliciesOfListenerRequest method. +// req, resp := client.SetLoadBalancerPoliciesOfListenerRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ELB) SetLoadBalancerPoliciesOfListenerRequest(input *SetLoadBalancerPoliciesOfListenerInput) (req *request.Request, output *SetLoadBalancerPoliciesOfListenerOutput) { + op := &request.Operation{ + Name: opSetLoadBalancerPoliciesOfListener, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SetLoadBalancerPoliciesOfListenerInput{} + } + + req = c.newRequest(op, input, output) + output = &SetLoadBalancerPoliciesOfListenerOutput{} + req.Data = output + return +} + +// Associates, updates, or disables a policy with a listener for the specified +// load balancer. You can associate multiple policies with a listener. +func (c *ELB) SetLoadBalancerPoliciesOfListener(input *SetLoadBalancerPoliciesOfListenerInput) (*SetLoadBalancerPoliciesOfListenerOutput, error) { + req, out := c.SetLoadBalancerPoliciesOfListenerRequest(input) + err := req.Send() + return out, err +} + +// Information about the AccessLog attribute. +type AccessLog struct { + _ struct{} `type:"structure"` + + // The interval for publishing the access logs. You can specify an interval + // of either 5 minutes or 60 minutes. + // + // Default: 60 minutes + EmitInterval *int64 `type:"integer"` + + // Specifies whether access log is enabled for the load balancer. + Enabled *bool `type:"boolean" required:"true"` + + // The name of the Amazon S3 bucket where the access logs are stored. + S3BucketName *string `type:"string"` + + // The logical hierarchy you created for your Amazon S3 bucket, for example + // my-bucket-prefix/prod. If the prefix is not provided, the log is placed at + // the root level of the bucket. + S3BucketPrefix *string `type:"string"` +} + +// String returns the string representation +func (s AccessLog) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AccessLog) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AccessLog) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AccessLog"} + if s.Enabled == nil { + invalidParams.Add(request.NewErrParamRequired("Enabled")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type AddTagsInput struct { + _ struct{} `type:"structure"` + + // The name of the load balancer. You can specify one load balancer only. + LoadBalancerNames []*string `type:"list" required:"true"` + + // The tags. + Tags []*Tag `min:"1" type:"list" required:"true"` +} + +// String returns the string representation +func (s AddTagsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddTagsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AddTagsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AddTagsInput"} + if s.LoadBalancerNames == nil { + invalidParams.Add(request.NewErrParamRequired("LoadBalancerNames")) + } + if s.Tags == nil { + invalidParams.Add(request.NewErrParamRequired("Tags")) + } + if s.Tags != nil && len(s.Tags) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Tags", 1)) + } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type AddTagsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s AddTagsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddTagsOutput) GoString() string { + return s.String() +} + +// This data type is reserved. +type AdditionalAttribute struct { + _ struct{} `type:"structure"` + + // This parameter is reserved. + Key *string `type:"string"` + + // This parameter is reserved. + Value *string `type:"string"` +} + +// String returns the string representation +func (s AdditionalAttribute) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AdditionalAttribute) GoString() string { + return s.String() +} + +// Information about a policy for application-controlled session stickiness. +type AppCookieStickinessPolicy struct { + _ struct{} `type:"structure"` + + // The name of the application cookie used for stickiness. + CookieName *string `type:"string"` + + // The mnemonic name for the policy being created. The name must be unique within + // a set of policies for this load balancer. + PolicyName *string `type:"string"` +} + +// String returns the string representation +func (s AppCookieStickinessPolicy) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AppCookieStickinessPolicy) GoString() string { + return s.String() +} + +type ApplySecurityGroupsToLoadBalancerInput struct { + _ struct{} `type:"structure"` + + // The name of the load balancer. + LoadBalancerName *string `type:"string" required:"true"` + + // The IDs of the security groups to associate with the load balancer. Note + // that you cannot specify the name of the security group. + SecurityGroups []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s ApplySecurityGroupsToLoadBalancerInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ApplySecurityGroupsToLoadBalancerInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ApplySecurityGroupsToLoadBalancerInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ApplySecurityGroupsToLoadBalancerInput"} + if s.LoadBalancerName == nil { + invalidParams.Add(request.NewErrParamRequired("LoadBalancerName")) + } + if s.SecurityGroups == nil { + invalidParams.Add(request.NewErrParamRequired("SecurityGroups")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type ApplySecurityGroupsToLoadBalancerOutput struct { + _ struct{} `type:"structure"` + + // The IDs of the security groups associated with the load balancer. + SecurityGroups []*string `type:"list"` +} + +// String returns the string representation +func (s ApplySecurityGroupsToLoadBalancerOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ApplySecurityGroupsToLoadBalancerOutput) GoString() string { + return s.String() +} + +type AttachLoadBalancerToSubnetsInput struct { + _ struct{} `type:"structure"` + + // The name of the load balancer. + LoadBalancerName *string `type:"string" required:"true"` + + // The IDs of the subnets to add for the load balancer. You can add only one + // subnet per Availability Zone. + Subnets []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s AttachLoadBalancerToSubnetsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AttachLoadBalancerToSubnetsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AttachLoadBalancerToSubnetsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AttachLoadBalancerToSubnetsInput"} + if s.LoadBalancerName == nil { + invalidParams.Add(request.NewErrParamRequired("LoadBalancerName")) + } + if s.Subnets == nil { + invalidParams.Add(request.NewErrParamRequired("Subnets")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type AttachLoadBalancerToSubnetsOutput struct { + _ struct{} `type:"structure"` + + // The IDs of the subnets attached to the load balancer. + Subnets []*string `type:"list"` +} + +// String returns the string representation +func (s AttachLoadBalancerToSubnetsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AttachLoadBalancerToSubnetsOutput) GoString() string { + return s.String() +} + +// Information about the configuration of a back-end server. +type BackendServerDescription struct { + _ struct{} `type:"structure"` + + // The port on which the back-end server is listening. + InstancePort *int64 `min:"1" type:"integer"` + + // The names of the policies enabled for the back-end server. + PolicyNames []*string `type:"list"` +} + +// String returns the string representation +func (s BackendServerDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BackendServerDescription) GoString() string { + return s.String() +} + +type ConfigureHealthCheckInput struct { + _ struct{} `type:"structure"` + + // The configuration information for the new health check. + HealthCheck *HealthCheck `type:"structure" required:"true"` + + // The name of the load balancer. + LoadBalancerName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ConfigureHealthCheckInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ConfigureHealthCheckInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ConfigureHealthCheckInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ConfigureHealthCheckInput"} + if s.HealthCheck == nil { + invalidParams.Add(request.NewErrParamRequired("HealthCheck")) + } + if s.LoadBalancerName == nil { + invalidParams.Add(request.NewErrParamRequired("LoadBalancerName")) + } + if s.HealthCheck != nil { + if err := s.HealthCheck.Validate(); err != nil { + invalidParams.AddNested("HealthCheck", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type ConfigureHealthCheckOutput struct { + _ struct{} `type:"structure"` + + // The updated health check. + HealthCheck *HealthCheck `type:"structure"` +} + +// String returns the string representation +func (s ConfigureHealthCheckOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ConfigureHealthCheckOutput) GoString() string { + return s.String() +} + +// Information about the ConnectionDraining attribute. +type ConnectionDraining struct { + _ struct{} `type:"structure"` + + // Specifies whether connection draining is enabled for the load balancer. + Enabled *bool `type:"boolean" required:"true"` + + // The maximum time, in seconds, to keep the existing connections open before + // deregistering the instances. + Timeout *int64 `type:"integer"` +} + +// String returns the string representation +func (s ConnectionDraining) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ConnectionDraining) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ConnectionDraining) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ConnectionDraining"} + if s.Enabled == nil { + invalidParams.Add(request.NewErrParamRequired("Enabled")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Information about the ConnectionSettings attribute. +type ConnectionSettings struct { + _ struct{} `type:"structure"` + + // The time, in seconds, that the connection is allowed to be idle (no data + // has been sent over the connection) before it is closed by the load balancer. + IdleTimeout *int64 `min:"1" type:"integer" required:"true"` +} + +// String returns the string representation +func (s ConnectionSettings) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ConnectionSettings) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ConnectionSettings) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ConnectionSettings"} + if s.IdleTimeout == nil { + invalidParams.Add(request.NewErrParamRequired("IdleTimeout")) + } + if s.IdleTimeout != nil && *s.IdleTimeout < 1 { + invalidParams.Add(request.NewErrParamMinValue("IdleTimeout", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type CreateAppCookieStickinessPolicyInput struct { + _ struct{} `type:"structure"` + + // The name of the application cookie used for stickiness. + CookieName *string `type:"string" required:"true"` + + // The name of the load balancer. + LoadBalancerName *string `type:"string" required:"true"` + + // The name of the policy being created. Policy names must consist of alphanumeric + // characters and dashes (-). This name must be unique within the set of policies + // for this load balancer. + PolicyName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateAppCookieStickinessPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateAppCookieStickinessPolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateAppCookieStickinessPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateAppCookieStickinessPolicyInput"} + if s.CookieName == nil { + invalidParams.Add(request.NewErrParamRequired("CookieName")) + } + if s.LoadBalancerName == nil { + invalidParams.Add(request.NewErrParamRequired("LoadBalancerName")) + } + if s.PolicyName == nil { + invalidParams.Add(request.NewErrParamRequired("PolicyName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type CreateAppCookieStickinessPolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s CreateAppCookieStickinessPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateAppCookieStickinessPolicyOutput) GoString() string { + return s.String() +} + +type CreateLBCookieStickinessPolicyInput struct { + _ struct{} `type:"structure"` + + // The time period, in seconds, after which the cookie should be considered + // stale. If you do not specify this parameter, the sticky session lasts for + // the duration of the browser session. + CookieExpirationPeriod *int64 `type:"long"` + + // The name of the load balancer. + LoadBalancerName *string `type:"string" required:"true"` + + // The name of the policy being created. Policy names must consist of alphanumeric + // characters and dashes (-). This name must be unique within the set of policies + // for this load balancer. + PolicyName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateLBCookieStickinessPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateLBCookieStickinessPolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateLBCookieStickinessPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateLBCookieStickinessPolicyInput"} + if s.LoadBalancerName == nil { + invalidParams.Add(request.NewErrParamRequired("LoadBalancerName")) + } + if s.PolicyName == nil { + invalidParams.Add(request.NewErrParamRequired("PolicyName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type CreateLBCookieStickinessPolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s CreateLBCookieStickinessPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateLBCookieStickinessPolicyOutput) GoString() string { + return s.String() +} + +type CreateLoadBalancerInput struct { + _ struct{} `type:"structure"` + + // One or more Availability Zones from the same region as the load balancer. + // Traffic is equally distributed across all specified Availability Zones. + // + // You must specify at least one Availability Zone. + // + // You can add more Availability Zones after you create the load balancer using + // EnableAvailabilityZonesForLoadBalancer. + AvailabilityZones []*string `type:"list"` + + // The listeners. + // + // For more information, see Listeners for Your Load Balancer (http://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/elb-listener-config.html) + // in the Elastic Load Balancing Developer Guide. + Listeners []*Listener `type:"list" required:"true"` + + // The name of the load balancer. + // + // This name must be unique within your set of load balancers for the region, + // must have a maximum of 32 characters, must contain only alphanumeric characters + // or hyphens, and cannot begin or end with a hyphen. + LoadBalancerName *string `type:"string" required:"true"` + + // The type of a load balancer. Valid only for load balancers in a VPC. + // + // By default, Elastic Load Balancing creates an Internet-facing load balancer + // with a publicly resolvable DNS name, which resolves to public IP addresses. + // For more information about Internet-facing and Internal load balancers, see + // Internet-facing and Internal Load Balancers (http://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/vpc-loadbalancer-types.html) + // in the Elastic Load Balancing Developer Guide. + // + // Specify internal to create an internal load balancer with a DNS name that + // resolves to private IP addresses. + Scheme *string `type:"string"` + + // The IDs of the security groups to assign to the load balancer. + SecurityGroups []*string `type:"list"` + + // The IDs of the subnets in your VPC to attach to the load balancer. Specify + // one subnet per Availability Zone specified in AvailabilityZones. + Subnets []*string `type:"list"` + + // A list of tags to assign to the load balancer. + // + // For more information about tagging your load balancer, see Tagging (http://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/TerminologyandKeyConcepts.html#tagging-elb) + // in the Elastic Load Balancing Developer Guide. + Tags []*Tag `min:"1" type:"list"` +} + +// String returns the string representation +func (s CreateLoadBalancerInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateLoadBalancerInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateLoadBalancerInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateLoadBalancerInput"} + if s.Listeners == nil { + invalidParams.Add(request.NewErrParamRequired("Listeners")) + } + if s.LoadBalancerName == nil { + invalidParams.Add(request.NewErrParamRequired("LoadBalancerName")) + } + if s.Tags != nil && len(s.Tags) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Tags", 1)) + } + if s.Listeners != nil { + for i, v := range s.Listeners { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Listeners", i), err.(request.ErrInvalidParams)) + } + } + } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type CreateLoadBalancerListenersInput struct { + _ struct{} `type:"structure"` + + // The listeners. + Listeners []*Listener `type:"list" required:"true"` + + // The name of the load balancer. + LoadBalancerName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateLoadBalancerListenersInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateLoadBalancerListenersInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateLoadBalancerListenersInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateLoadBalancerListenersInput"} + if s.Listeners == nil { + invalidParams.Add(request.NewErrParamRequired("Listeners")) + } + if s.LoadBalancerName == nil { + invalidParams.Add(request.NewErrParamRequired("LoadBalancerName")) + } + if s.Listeners != nil { + for i, v := range s.Listeners { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Listeners", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type CreateLoadBalancerListenersOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s CreateLoadBalancerListenersOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateLoadBalancerListenersOutput) GoString() string { + return s.String() +} + +type CreateLoadBalancerOutput struct { + _ struct{} `type:"structure"` + + // The DNS name of the load balancer. + DNSName *string `type:"string"` +} + +// String returns the string representation +func (s CreateLoadBalancerOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateLoadBalancerOutput) GoString() string { + return s.String() +} + +type CreateLoadBalancerPolicyInput struct { + _ struct{} `type:"structure"` + + // The name of the load balancer. + LoadBalancerName *string `type:"string" required:"true"` + + // The attributes for the policy. + PolicyAttributes []*PolicyAttribute `type:"list"` + + // The name of the load balancer policy to be created. This name must be unique + // within the set of policies for this load balancer. + PolicyName *string `type:"string" required:"true"` + + // The name of the base policy type. To get the list of policy types, use DescribeLoadBalancerPolicyTypes. + PolicyTypeName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateLoadBalancerPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateLoadBalancerPolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateLoadBalancerPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateLoadBalancerPolicyInput"} + if s.LoadBalancerName == nil { + invalidParams.Add(request.NewErrParamRequired("LoadBalancerName")) + } + if s.PolicyName == nil { + invalidParams.Add(request.NewErrParamRequired("PolicyName")) + } + if s.PolicyTypeName == nil { + invalidParams.Add(request.NewErrParamRequired("PolicyTypeName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type CreateLoadBalancerPolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s CreateLoadBalancerPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateLoadBalancerPolicyOutput) GoString() string { + return s.String() +} + +// Information about the CrossZoneLoadBalancing attribute. +type CrossZoneLoadBalancing struct { + _ struct{} `type:"structure"` + + // Specifies whether cross-zone load balancing is enabled for the load balancer. + Enabled *bool `type:"boolean" required:"true"` +} + +// String returns the string representation +func (s CrossZoneLoadBalancing) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CrossZoneLoadBalancing) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CrossZoneLoadBalancing) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CrossZoneLoadBalancing"} + if s.Enabled == nil { + invalidParams.Add(request.NewErrParamRequired("Enabled")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteLoadBalancerInput struct { + _ struct{} `type:"structure"` + + // The name of the load balancer. + LoadBalancerName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteLoadBalancerInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteLoadBalancerInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteLoadBalancerInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteLoadBalancerInput"} + if s.LoadBalancerName == nil { + invalidParams.Add(request.NewErrParamRequired("LoadBalancerName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteLoadBalancerListenersInput struct { + _ struct{} `type:"structure"` + + // The name of the load balancer. + LoadBalancerName *string `type:"string" required:"true"` + + // The client port numbers of the listeners. + LoadBalancerPorts []*int64 `type:"list" required:"true"` +} + +// String returns the string representation +func (s DeleteLoadBalancerListenersInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteLoadBalancerListenersInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteLoadBalancerListenersInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteLoadBalancerListenersInput"} + if s.LoadBalancerName == nil { + invalidParams.Add(request.NewErrParamRequired("LoadBalancerName")) + } + if s.LoadBalancerPorts == nil { + invalidParams.Add(request.NewErrParamRequired("LoadBalancerPorts")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteLoadBalancerListenersOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteLoadBalancerListenersOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteLoadBalancerListenersOutput) GoString() string { + return s.String() +} + +type DeleteLoadBalancerOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteLoadBalancerOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteLoadBalancerOutput) GoString() string { + return s.String() +} + +// = +type DeleteLoadBalancerPolicyInput struct { + _ struct{} `type:"structure"` + + // The name of the load balancer. + LoadBalancerName *string `type:"string" required:"true"` + + // The name of the policy. + PolicyName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteLoadBalancerPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteLoadBalancerPolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteLoadBalancerPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteLoadBalancerPolicyInput"} + if s.LoadBalancerName == nil { + invalidParams.Add(request.NewErrParamRequired("LoadBalancerName")) + } + if s.PolicyName == nil { + invalidParams.Add(request.NewErrParamRequired("PolicyName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteLoadBalancerPolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteLoadBalancerPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteLoadBalancerPolicyOutput) GoString() string { + return s.String() +} + +type DeregisterInstancesFromLoadBalancerInput struct { + _ struct{} `type:"structure"` + + // The IDs of the instances. + Instances []*Instance `type:"list" required:"true"` + + // The name of the load balancer. + LoadBalancerName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeregisterInstancesFromLoadBalancerInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeregisterInstancesFromLoadBalancerInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeregisterInstancesFromLoadBalancerInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeregisterInstancesFromLoadBalancerInput"} + if s.Instances == nil { + invalidParams.Add(request.NewErrParamRequired("Instances")) + } + if s.LoadBalancerName == nil { + invalidParams.Add(request.NewErrParamRequired("LoadBalancerName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeregisterInstancesFromLoadBalancerOutput struct { + _ struct{} `type:"structure"` + + // The remaining instances registered with the load balancer. + Instances []*Instance `type:"list"` +} + +// String returns the string representation +func (s DeregisterInstancesFromLoadBalancerOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeregisterInstancesFromLoadBalancerOutput) GoString() string { + return s.String() +} + +type DescribeInstanceHealthInput struct { + _ struct{} `type:"structure"` + + // The IDs of the instances. + Instances []*Instance `type:"list"` + + // The name of the load balancer. + LoadBalancerName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeInstanceHealthInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeInstanceHealthInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeInstanceHealthInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeInstanceHealthInput"} + if s.LoadBalancerName == nil { + invalidParams.Add(request.NewErrParamRequired("LoadBalancerName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DescribeInstanceHealthOutput struct { + _ struct{} `type:"structure"` + + // Information about the health of the instances. + InstanceStates []*InstanceState `type:"list"` +} + +// String returns the string representation +func (s DescribeInstanceHealthOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeInstanceHealthOutput) GoString() string { + return s.String() +} + +type DescribeLoadBalancerAttributesInput struct { + _ struct{} `type:"structure"` + + // The name of the load balancer. + LoadBalancerName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeLoadBalancerAttributesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeLoadBalancerAttributesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeLoadBalancerAttributesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeLoadBalancerAttributesInput"} + if s.LoadBalancerName == nil { + invalidParams.Add(request.NewErrParamRequired("LoadBalancerName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DescribeLoadBalancerAttributesOutput struct { + _ struct{} `type:"structure"` + + // Information about the load balancer attributes. + LoadBalancerAttributes *LoadBalancerAttributes `type:"structure"` +} + +// String returns the string representation +func (s DescribeLoadBalancerAttributesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeLoadBalancerAttributesOutput) GoString() string { + return s.String() +} + +type DescribeLoadBalancerPoliciesInput struct { + _ struct{} `type:"structure"` + + // The name of the load balancer. + LoadBalancerName *string `type:"string"` + + // The names of the policies. + PolicyNames []*string `type:"list"` +} + +// String returns the string representation +func (s DescribeLoadBalancerPoliciesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeLoadBalancerPoliciesInput) GoString() string { + return s.String() +} + +type DescribeLoadBalancerPoliciesOutput struct { + _ struct{} `type:"structure"` + + // Information about the policies. + PolicyDescriptions []*PolicyDescription `type:"list"` +} + +// String returns the string representation +func (s DescribeLoadBalancerPoliciesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeLoadBalancerPoliciesOutput) GoString() string { + return s.String() +} + +type DescribeLoadBalancerPolicyTypesInput struct { + _ struct{} `type:"structure"` + + // The names of the policy types. If no names are specified, describes all policy + // types defined by Elastic Load Balancing. + PolicyTypeNames []*string `type:"list"` +} + +// String returns the string representation +func (s DescribeLoadBalancerPolicyTypesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeLoadBalancerPolicyTypesInput) GoString() string { + return s.String() +} + +type DescribeLoadBalancerPolicyTypesOutput struct { + _ struct{} `type:"structure"` + + // Information about the policy types. + PolicyTypeDescriptions []*PolicyTypeDescription `type:"list"` +} + +// String returns the string representation +func (s DescribeLoadBalancerPolicyTypesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeLoadBalancerPolicyTypesOutput) GoString() string { + return s.String() +} + +type DescribeLoadBalancersInput struct { + _ struct{} `type:"structure"` + + // The names of the load balancers. + LoadBalancerNames []*string `type:"list"` + + // The marker for the next set of results. (You received this marker from a + // previous call.) + Marker *string `type:"string"` + + // The maximum number of results to return with this call (a number from 1 to + // 400). The default is 400. + PageSize *int64 `min:"1" type:"integer"` +} + +// String returns the string representation +func (s DescribeLoadBalancersInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeLoadBalancersInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeLoadBalancersInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeLoadBalancersInput"} + if s.PageSize != nil && *s.PageSize < 1 { + invalidParams.Add(request.NewErrParamMinValue("PageSize", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DescribeLoadBalancersOutput struct { + _ struct{} `type:"structure"` + + // Information about the load balancers. + LoadBalancerDescriptions []*LoadBalancerDescription `type:"list"` + + // The marker to use when requesting the next set of results. If there are no + // additional results, the string is empty. + NextMarker *string `type:"string"` +} + +// String returns the string representation +func (s DescribeLoadBalancersOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeLoadBalancersOutput) GoString() string { + return s.String() +} + +type DescribeTagsInput struct { + _ struct{} `type:"structure"` + + // The names of the load balancers. + LoadBalancerNames []*string `min:"1" type:"list" required:"true"` +} + +// String returns the string representation +func (s DescribeTagsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeTagsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeTagsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeTagsInput"} + if s.LoadBalancerNames == nil { + invalidParams.Add(request.NewErrParamRequired("LoadBalancerNames")) + } + if s.LoadBalancerNames != nil && len(s.LoadBalancerNames) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LoadBalancerNames", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DescribeTagsOutput struct { + _ struct{} `type:"structure"` + + // Information about the tags. + TagDescriptions []*TagDescription `type:"list"` +} + +// String returns the string representation +func (s DescribeTagsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeTagsOutput) GoString() string { + return s.String() +} + +type DetachLoadBalancerFromSubnetsInput struct { + _ struct{} `type:"structure"` + + // The name of the load balancer. + LoadBalancerName *string `type:"string" required:"true"` + + // The IDs of the subnets. + Subnets []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s DetachLoadBalancerFromSubnetsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DetachLoadBalancerFromSubnetsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DetachLoadBalancerFromSubnetsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DetachLoadBalancerFromSubnetsInput"} + if s.LoadBalancerName == nil { + invalidParams.Add(request.NewErrParamRequired("LoadBalancerName")) + } + if s.Subnets == nil { + invalidParams.Add(request.NewErrParamRequired("Subnets")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DetachLoadBalancerFromSubnetsOutput struct { + _ struct{} `type:"structure"` + + // The IDs of the remaining subnets for the load balancer. + Subnets []*string `type:"list"` +} + +// String returns the string representation +func (s DetachLoadBalancerFromSubnetsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DetachLoadBalancerFromSubnetsOutput) GoString() string { + return s.String() +} + +type DisableAvailabilityZonesForLoadBalancerInput struct { + _ struct{} `type:"structure"` + + // The Availability Zones. + AvailabilityZones []*string `type:"list" required:"true"` + + // The name of the load balancer. + LoadBalancerName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DisableAvailabilityZonesForLoadBalancerInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisableAvailabilityZonesForLoadBalancerInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DisableAvailabilityZonesForLoadBalancerInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DisableAvailabilityZonesForLoadBalancerInput"} + if s.AvailabilityZones == nil { + invalidParams.Add(request.NewErrParamRequired("AvailabilityZones")) + } + if s.LoadBalancerName == nil { + invalidParams.Add(request.NewErrParamRequired("LoadBalancerName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DisableAvailabilityZonesForLoadBalancerOutput struct { + _ struct{} `type:"structure"` + + // The remaining Availability Zones for the load balancer. + AvailabilityZones []*string `type:"list"` +} + +// String returns the string representation +func (s DisableAvailabilityZonesForLoadBalancerOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisableAvailabilityZonesForLoadBalancerOutput) GoString() string { + return s.String() +} + +type EnableAvailabilityZonesForLoadBalancerInput struct { + _ struct{} `type:"structure"` + + // The Availability Zones. These must be in the same region as the load balancer. + AvailabilityZones []*string `type:"list" required:"true"` + + // The name of the load balancer. + LoadBalancerName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s EnableAvailabilityZonesForLoadBalancerInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnableAvailabilityZonesForLoadBalancerInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *EnableAvailabilityZonesForLoadBalancerInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "EnableAvailabilityZonesForLoadBalancerInput"} + if s.AvailabilityZones == nil { + invalidParams.Add(request.NewErrParamRequired("AvailabilityZones")) + } + if s.LoadBalancerName == nil { + invalidParams.Add(request.NewErrParamRequired("LoadBalancerName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type EnableAvailabilityZonesForLoadBalancerOutput struct { + _ struct{} `type:"structure"` + + // The updated list of Availability Zones for the load balancer. + AvailabilityZones []*string `type:"list"` +} + +// String returns the string representation +func (s EnableAvailabilityZonesForLoadBalancerOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnableAvailabilityZonesForLoadBalancerOutput) GoString() string { + return s.String() +} + +// Information about a health check. +type HealthCheck struct { + _ struct{} `type:"structure"` + + // The number of consecutive health checks successes required before moving + // the instance to the Healthy state. + HealthyThreshold *int64 `min:"2" type:"integer" required:"true"` + + // The approximate interval, in seconds, between health checks of an individual + // instance. + Interval *int64 `min:"1" type:"integer" required:"true"` + + // The instance being checked. The protocol is either TCP, HTTP, HTTPS, or SSL. + // The range of valid ports is one (1) through 65535. + // + // TCP is the default, specified as a TCP: port pair, for example "TCP:5000". + // In this case, a health check simply attempts to open a TCP connection to + // the instance on the specified port. Failure to connect within the configured + // timeout is considered unhealthy. + // + // SSL is also specified as SSL: port pair, for example, SSL:5000. + // + // For HTTP/HTTPS, you must include a ping path in the string. HTTP is specified + // as a HTTP:port;/;PathToPing; grouping, for example "HTTP:80/weather/us/wa/seattle". + // In this case, a HTTP GET request is issued to the instance on the given port + // and path. Any answer other than "200 OK" within the timeout period is considered + // unhealthy. + // + // The total length of the HTTP ping target must be 1024 16-bit Unicode characters + // or less. + Target *string `type:"string" required:"true"` + + // The amount of time, in seconds, during which no response means a failed health + // check. + // + // This value must be less than the Interval value. + Timeout *int64 `min:"1" type:"integer" required:"true"` + + // The number of consecutive health check failures required before moving the + // instance to the Unhealthy state. + UnhealthyThreshold *int64 `min:"2" type:"integer" required:"true"` +} + +// String returns the string representation +func (s HealthCheck) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HealthCheck) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *HealthCheck) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "HealthCheck"} + if s.HealthyThreshold == nil { + invalidParams.Add(request.NewErrParamRequired("HealthyThreshold")) + } + if s.HealthyThreshold != nil && *s.HealthyThreshold < 2 { + invalidParams.Add(request.NewErrParamMinValue("HealthyThreshold", 2)) + } + if s.Interval == nil { + invalidParams.Add(request.NewErrParamRequired("Interval")) + } + if s.Interval != nil && *s.Interval < 1 { + invalidParams.Add(request.NewErrParamMinValue("Interval", 1)) + } + if s.Target == nil { + invalidParams.Add(request.NewErrParamRequired("Target")) + } + if s.Timeout == nil { + invalidParams.Add(request.NewErrParamRequired("Timeout")) + } + if s.Timeout != nil && *s.Timeout < 1 { + invalidParams.Add(request.NewErrParamMinValue("Timeout", 1)) + } + if s.UnhealthyThreshold == nil { + invalidParams.Add(request.NewErrParamRequired("UnhealthyThreshold")) + } + if s.UnhealthyThreshold != nil && *s.UnhealthyThreshold < 2 { + invalidParams.Add(request.NewErrParamMinValue("UnhealthyThreshold", 2)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The ID of a back-end instance. +type Instance struct { + _ struct{} `type:"structure"` + + // The ID of the instance. + InstanceId *string `type:"string"` +} + +// String returns the string representation +func (s Instance) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Instance) GoString() string { + return s.String() +} + +// Information about the state of a back-end instance. +type InstanceState struct { + _ struct{} `type:"structure"` + + // A description of the instance state. This string can contain one or more + // of the following messages. + // + // N/A + // + // A transient error occurred. Please try again later. + // + // Instance has failed at least the UnhealthyThreshold number of health checks + // consecutively. + // + // Instance has not passed the configured HealthyThreshold number of health + // checks consecutively. + // + // Instance registration is still in progress. + // + // Instance is in the EC2 Availability Zone for which LoadBalancer is not + // configured to route traffic to. + // + // Instance is not currently registered with the LoadBalancer. + // + // Instance deregistration currently in progress. + // + // Disable Availability Zone is currently in progress. + // + // Instance is in pending state. + // + // Instance is in stopped state. + // + // Instance is in terminated state. + Description *string `type:"string"` + + // The ID of the instance. + InstanceId *string `type:"string"` + + // Information about the cause of OutOfService instances. Specifically, whether + // the cause is Elastic Load Balancing or the instance. + // + // Valid values: ELB | Instance | N/A + ReasonCode *string `type:"string"` + + // The current state of the instance. + // + // Valid values: InService | OutOfService | Unknown + State *string `type:"string"` +} + +// String returns the string representation +func (s InstanceState) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstanceState) GoString() string { + return s.String() +} + +// Information about a policy for duration-based session stickiness. +type LBCookieStickinessPolicy struct { + _ struct{} `type:"structure"` + + // The time period, in seconds, after which the cookie should be considered + // stale. If this parameter is not specified, the stickiness session lasts for + // the duration of the browser session. + CookieExpirationPeriod *int64 `type:"long"` + + // The name for the policy being created. The name must be unique within the + // set of policies for this load balancer. + PolicyName *string `type:"string"` +} + +// String returns the string representation +func (s LBCookieStickinessPolicy) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LBCookieStickinessPolicy) GoString() string { + return s.String() +} + +// Information about a listener. +// +// For information about the protocols and the ports supported by Elastic Load +// Balancing, see Listener Configurations for Elastic Load Balancing (http://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/elb-listener-config.html) +// in the Elastic Load Balancing Developer Guide. +type Listener struct { + _ struct{} `type:"structure"` + + // The port on which the instance is listening. + InstancePort *int64 `min:"1" type:"integer" required:"true"` + + // The protocol to use for routing traffic to back-end instances: HTTP, HTTPS, + // TCP, or SSL. + // + // If the front-end protocol is HTTP, HTTPS, TCP, or SSL, InstanceProtocol + // must be at the same protocol. + // + // If there is another listener with the same InstancePort whose InstanceProtocol + // is secure, (HTTPS or SSL), the listener's InstanceProtocol must also be secure. + // + // If there is another listener with the same InstancePort whose InstanceProtocol + // is HTTP or TCP, the listener's InstanceProtocol must be HTTP or TCP. + InstanceProtocol *string `type:"string"` + + // The port on which the load balancer is listening. On EC2-VPC, you can specify + // any port from the range 1-65535. On EC2-Classic, you can specify any port + // from the following list: 25, 80, 443, 465, 587, 1024-65535. + LoadBalancerPort *int64 `type:"integer" required:"true"` + + // The load balancer transport protocol to use for routing: HTTP, HTTPS, TCP, + // or SSL. + Protocol *string `type:"string" required:"true"` + + // The Amazon Resource Name (ARN) of the server certificate. + SSLCertificateId *string `type:"string"` +} + +// String returns the string representation +func (s Listener) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Listener) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Listener) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Listener"} + if s.InstancePort == nil { + invalidParams.Add(request.NewErrParamRequired("InstancePort")) + } + if s.InstancePort != nil && *s.InstancePort < 1 { + invalidParams.Add(request.NewErrParamMinValue("InstancePort", 1)) + } + if s.LoadBalancerPort == nil { + invalidParams.Add(request.NewErrParamRequired("LoadBalancerPort")) + } + if s.Protocol == nil { + invalidParams.Add(request.NewErrParamRequired("Protocol")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The policies enabled for a listener. +type ListenerDescription struct { + _ struct{} `type:"structure"` + + // Information about a listener. + // + // For information about the protocols and the ports supported by Elastic Load + // Balancing, see Listener Configurations for Elastic Load Balancing (http://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/elb-listener-config.html) + // in the Elastic Load Balancing Developer Guide. + Listener *Listener `type:"structure"` + + // The policies. If there are no policies enabled, the list is empty. + PolicyNames []*string `type:"list"` +} + +// String returns the string representation +func (s ListenerDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListenerDescription) GoString() string { + return s.String() +} + +// The attributes for a load balancer. +type LoadBalancerAttributes struct { + _ struct{} `type:"structure"` + + // If enabled, the load balancer captures detailed information of all requests + // and delivers the information to the Amazon S3 bucket that you specify. + // + // For more information, see Enable Access Logs (http://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/enable-access-logs.html) + // in the Elastic Load Balancing Developer Guide. + AccessLog *AccessLog `type:"structure"` + + // This parameter is reserved. + AdditionalAttributes []*AdditionalAttribute `type:"list"` + + // If enabled, the load balancer allows existing requests to complete before + // the load balancer shifts traffic away from a deregistered or unhealthy back-end + // instance. + // + // For more information, see Enable Connection Draining (http://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/config-conn-drain.html) + // in the Elastic Load Balancing Developer Guide. + ConnectionDraining *ConnectionDraining `type:"structure"` + + // If enabled, the load balancer allows the connections to remain idle (no data + // is sent over the connection) for the specified duration. + // + // By default, Elastic Load Balancing maintains a 60-second idle connection + // timeout for both front-end and back-end connections of your load balancer. + // For more information, see Configure Idle Connection Timeout (http://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/config-idle-timeout.html) + // in the Elastic Load Balancing Developer Guide. + ConnectionSettings *ConnectionSettings `type:"structure"` + + // If enabled, the load balancer routes the request traffic evenly across all + // back-end instances regardless of the Availability Zones. + // + // For more information, see Enable Cross-Zone Load Balancing (http://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/enable-disable-crosszone-lb.html) + // in the Elastic Load Balancing Developer Guide. + CrossZoneLoadBalancing *CrossZoneLoadBalancing `type:"structure"` +} + +// String returns the string representation +func (s LoadBalancerAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LoadBalancerAttributes) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *LoadBalancerAttributes) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "LoadBalancerAttributes"} + if s.AccessLog != nil { + if err := s.AccessLog.Validate(); err != nil { + invalidParams.AddNested("AccessLog", err.(request.ErrInvalidParams)) + } + } + if s.ConnectionDraining != nil { + if err := s.ConnectionDraining.Validate(); err != nil { + invalidParams.AddNested("ConnectionDraining", err.(request.ErrInvalidParams)) + } + } + if s.ConnectionSettings != nil { + if err := s.ConnectionSettings.Validate(); err != nil { + invalidParams.AddNested("ConnectionSettings", err.(request.ErrInvalidParams)) + } + } + if s.CrossZoneLoadBalancing != nil { + if err := s.CrossZoneLoadBalancing.Validate(); err != nil { + invalidParams.AddNested("CrossZoneLoadBalancing", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Information about a load balancer. +type LoadBalancerDescription struct { + _ struct{} `type:"structure"` + + // The Availability Zones for the load balancer. + AvailabilityZones []*string `type:"list"` + + // Information about the back-end servers. + BackendServerDescriptions []*BackendServerDescription `type:"list"` + + // The Amazon Route 53 hosted zone associated with the load balancer. + // + // For more information, see Using Domain Names With Elastic Load Balancing + // (http://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/using-domain-names-with-elb.html) + // in the Elastic Load Balancing Developer Guide. + CanonicalHostedZoneName *string `type:"string"` + + // The ID of the Amazon Route 53 hosted zone name associated with the load balancer. + CanonicalHostedZoneNameID *string `type:"string"` + + // The date and time the load balancer was created. + CreatedTime *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The external DNS name of the load balancer. + DNSName *string `type:"string"` + + // Information about the health checks conducted on the load balancer. + HealthCheck *HealthCheck `type:"structure"` + + // The IDs of the instances for the load balancer. + Instances []*Instance `type:"list"` + + // The listeners for the load balancer. + ListenerDescriptions []*ListenerDescription `type:"list"` + + // The name of the load balancer. + LoadBalancerName *string `type:"string"` + + // The policies defined for the load balancer. + Policies *Policies `type:"structure"` + + // The type of load balancer. Valid only for load balancers in a VPC. + // + // If Scheme is internet-facing, the load balancer has a public DNS name that + // resolves to a public IP address. + // + // If Scheme is internal, the load balancer has a public DNS name that resolves + // to a private IP address. + Scheme *string `type:"string"` + + // The security groups for the load balancer. Valid only for load balancers + // in a VPC. + SecurityGroups []*string `type:"list"` + + // The security group that you can use as part of your inbound rules for your + // load balancer's back-end application instances. To only allow traffic from + // load balancers, add a security group rule to your back end instance that + // specifies this source security group as the inbound source. + SourceSecurityGroup *SourceSecurityGroup `type:"structure"` + + // The IDs of the subnets for the load balancer. + Subnets []*string `type:"list"` + + // The ID of the VPC for the load balancer. + VPCId *string `type:"string"` +} + +// String returns the string representation +func (s LoadBalancerDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LoadBalancerDescription) GoString() string { + return s.String() +} + +type ModifyLoadBalancerAttributesInput struct { + _ struct{} `type:"structure"` + + // The attributes of the load balancer. + LoadBalancerAttributes *LoadBalancerAttributes `type:"structure" required:"true"` + + // The name of the load balancer. + LoadBalancerName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ModifyLoadBalancerAttributesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyLoadBalancerAttributesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ModifyLoadBalancerAttributesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ModifyLoadBalancerAttributesInput"} + if s.LoadBalancerAttributes == nil { + invalidParams.Add(request.NewErrParamRequired("LoadBalancerAttributes")) + } + if s.LoadBalancerName == nil { + invalidParams.Add(request.NewErrParamRequired("LoadBalancerName")) + } + if s.LoadBalancerAttributes != nil { + if err := s.LoadBalancerAttributes.Validate(); err != nil { + invalidParams.AddNested("LoadBalancerAttributes", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type ModifyLoadBalancerAttributesOutput struct { + _ struct{} `type:"structure"` + + // The attributes for a load balancer. + LoadBalancerAttributes *LoadBalancerAttributes `type:"structure"` + + // The name of the load balancer. + LoadBalancerName *string `type:"string"` +} + +// String returns the string representation +func (s ModifyLoadBalancerAttributesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyLoadBalancerAttributesOutput) GoString() string { + return s.String() +} + +// The policies for a load balancer. +type Policies struct { + _ struct{} `type:"structure"` + + // The stickiness policies created using CreateAppCookieStickinessPolicy. + AppCookieStickinessPolicies []*AppCookieStickinessPolicy `type:"list"` + + // The stickiness policies created using CreateLBCookieStickinessPolicy. + LBCookieStickinessPolicies []*LBCookieStickinessPolicy `type:"list"` + + // The policies other than the stickiness policies. + OtherPolicies []*string `type:"list"` +} + +// String returns the string representation +func (s Policies) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Policies) GoString() string { + return s.String() +} + +// Information about a policy attribute. +type PolicyAttribute struct { + _ struct{} `type:"structure"` + + // The name of the attribute. + AttributeName *string `type:"string"` + + // The value of the attribute. + AttributeValue *string `type:"string"` +} + +// String returns the string representation +func (s PolicyAttribute) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PolicyAttribute) GoString() string { + return s.String() +} + +// Information about a policy attribute. +type PolicyAttributeDescription struct { + _ struct{} `type:"structure"` + + // The name of the attribute. + AttributeName *string `type:"string"` + + // The value of the attribute. + AttributeValue *string `type:"string"` +} + +// String returns the string representation +func (s PolicyAttributeDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PolicyAttributeDescription) GoString() string { + return s.String() +} + +// Information about a policy attribute type. +type PolicyAttributeTypeDescription struct { + _ struct{} `type:"structure"` + + // The name of the attribute. + AttributeName *string `type:"string"` + + // The type of the attribute. For example, Boolean or Integer. + AttributeType *string `type:"string"` + + // The cardinality of the attribute. + // + // Valid values: + // + // ONE(1) : Single value required ZERO_OR_ONE(0..1) : Up to one value can + // be supplied ZERO_OR_MORE(0..*) : Optional. Multiple values are allowed ONE_OR_MORE(1..*0) + // : Required. Multiple values are allowed + Cardinality *string `type:"string"` + + // The default value of the attribute, if applicable. + DefaultValue *string `type:"string"` + + // A description of the attribute. + Description *string `type:"string"` +} + +// String returns the string representation +func (s PolicyAttributeTypeDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PolicyAttributeTypeDescription) GoString() string { + return s.String() +} + +// Information about a policy. +type PolicyDescription struct { + _ struct{} `type:"structure"` + + // The policy attributes. + PolicyAttributeDescriptions []*PolicyAttributeDescription `type:"list"` + + // The name of the policy. + PolicyName *string `type:"string"` + + // The name of the policy type. + PolicyTypeName *string `type:"string"` +} + +// String returns the string representation +func (s PolicyDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PolicyDescription) GoString() string { + return s.String() +} + +// Information about a policy type. +type PolicyTypeDescription struct { + _ struct{} `type:"structure"` + + // A description of the policy type. + Description *string `type:"string"` + + // The description of the policy attributes associated with the policies defined + // by Elastic Load Balancing. + PolicyAttributeTypeDescriptions []*PolicyAttributeTypeDescription `type:"list"` + + // The name of the policy type. + PolicyTypeName *string `type:"string"` +} + +// String returns the string representation +func (s PolicyTypeDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PolicyTypeDescription) GoString() string { + return s.String() +} + +type RegisterInstancesWithLoadBalancerInput struct { + _ struct{} `type:"structure"` + + // The IDs of the instances. + Instances []*Instance `type:"list" required:"true"` + + // The name of the load balancer. + LoadBalancerName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s RegisterInstancesWithLoadBalancerInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RegisterInstancesWithLoadBalancerInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RegisterInstancesWithLoadBalancerInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RegisterInstancesWithLoadBalancerInput"} + if s.Instances == nil { + invalidParams.Add(request.NewErrParamRequired("Instances")) + } + if s.LoadBalancerName == nil { + invalidParams.Add(request.NewErrParamRequired("LoadBalancerName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type RegisterInstancesWithLoadBalancerOutput struct { + _ struct{} `type:"structure"` + + // The updated list of instances for the load balancer. + Instances []*Instance `type:"list"` +} + +// String returns the string representation +func (s RegisterInstancesWithLoadBalancerOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RegisterInstancesWithLoadBalancerOutput) GoString() string { + return s.String() +} + +type RemoveTagsInput struct { + _ struct{} `type:"structure"` + + // The name of the load balancer. You can specify a maximum of one load balancer + // name. + LoadBalancerNames []*string `type:"list" required:"true"` + + // The list of tag keys to remove. + Tags []*TagKeyOnly `min:"1" type:"list" required:"true"` +} + +// String returns the string representation +func (s RemoveTagsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RemoveTagsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RemoveTagsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RemoveTagsInput"} + if s.LoadBalancerNames == nil { + invalidParams.Add(request.NewErrParamRequired("LoadBalancerNames")) + } + if s.Tags == nil { + invalidParams.Add(request.NewErrParamRequired("Tags")) + } + if s.Tags != nil && len(s.Tags) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Tags", 1)) + } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type RemoveTagsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s RemoveTagsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RemoveTagsOutput) GoString() string { + return s.String() +} + +type SetLoadBalancerListenerSSLCertificateInput struct { + _ struct{} `type:"structure"` + + // The name of the load balancer. + LoadBalancerName *string `type:"string" required:"true"` + + // The port that uses the specified SSL certificate. + LoadBalancerPort *int64 `type:"integer" required:"true"` + + // The Amazon Resource Name (ARN) of the SSL certificate. + SSLCertificateId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s SetLoadBalancerListenerSSLCertificateInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetLoadBalancerListenerSSLCertificateInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SetLoadBalancerListenerSSLCertificateInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SetLoadBalancerListenerSSLCertificateInput"} + if s.LoadBalancerName == nil { + invalidParams.Add(request.NewErrParamRequired("LoadBalancerName")) + } + if s.LoadBalancerPort == nil { + invalidParams.Add(request.NewErrParamRequired("LoadBalancerPort")) + } + if s.SSLCertificateId == nil { + invalidParams.Add(request.NewErrParamRequired("SSLCertificateId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type SetLoadBalancerListenerSSLCertificateOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s SetLoadBalancerListenerSSLCertificateOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetLoadBalancerListenerSSLCertificateOutput) GoString() string { + return s.String() +} + +type SetLoadBalancerPoliciesForBackendServerInput struct { + _ struct{} `type:"structure"` + + // The port number associated with the back-end server. + InstancePort *int64 `type:"integer" required:"true"` + + // The name of the load balancer. + LoadBalancerName *string `type:"string" required:"true"` + + // The names of the policies. If the list is empty, then all current polices + // are removed from the back-end server. + PolicyNames []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s SetLoadBalancerPoliciesForBackendServerInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetLoadBalancerPoliciesForBackendServerInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SetLoadBalancerPoliciesForBackendServerInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SetLoadBalancerPoliciesForBackendServerInput"} + if s.InstancePort == nil { + invalidParams.Add(request.NewErrParamRequired("InstancePort")) + } + if s.LoadBalancerName == nil { + invalidParams.Add(request.NewErrParamRequired("LoadBalancerName")) + } + if s.PolicyNames == nil { + invalidParams.Add(request.NewErrParamRequired("PolicyNames")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type SetLoadBalancerPoliciesForBackendServerOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s SetLoadBalancerPoliciesForBackendServerOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetLoadBalancerPoliciesForBackendServerOutput) GoString() string { + return s.String() +} + +type SetLoadBalancerPoliciesOfListenerInput struct { + _ struct{} `type:"structure"` + + // The name of the load balancer. + LoadBalancerName *string `type:"string" required:"true"` + + // The external port of the load balancer for the policy. + LoadBalancerPort *int64 `type:"integer" required:"true"` + + // The names of the policies. If the list is empty, the current policy is removed + // from the listener. + PolicyNames []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s SetLoadBalancerPoliciesOfListenerInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetLoadBalancerPoliciesOfListenerInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SetLoadBalancerPoliciesOfListenerInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SetLoadBalancerPoliciesOfListenerInput"} + if s.LoadBalancerName == nil { + invalidParams.Add(request.NewErrParamRequired("LoadBalancerName")) + } + if s.LoadBalancerPort == nil { + invalidParams.Add(request.NewErrParamRequired("LoadBalancerPort")) + } + if s.PolicyNames == nil { + invalidParams.Add(request.NewErrParamRequired("PolicyNames")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type SetLoadBalancerPoliciesOfListenerOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s SetLoadBalancerPoliciesOfListenerOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetLoadBalancerPoliciesOfListenerOutput) GoString() string { + return s.String() +} + +// Information about a source security group. +type SourceSecurityGroup struct { + _ struct{} `type:"structure"` + + // The name of the security group. + GroupName *string `type:"string"` + + // The owner of the security group. + OwnerAlias *string `type:"string"` +} + +// String returns the string representation +func (s SourceSecurityGroup) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SourceSecurityGroup) GoString() string { + return s.String() +} + +// Information about a tag. +type Tag struct { + _ struct{} `type:"structure"` + + // The key of the tag. + Key *string `min:"1" type:"string" required:"true"` + + // The value of the tag. + Value *string `type:"string"` +} + +// String returns the string representation +func (s Tag) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Tag) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Tag) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Tag"} + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The tags associated with a load balancer. +type TagDescription struct { + _ struct{} `type:"structure"` + + // The name of the load balancer. + LoadBalancerName *string `type:"string"` + + // The tags. + Tags []*Tag `min:"1" type:"list"` +} + +// String returns the string representation +func (s TagDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TagDescription) GoString() string { + return s.String() +} + +// The key of a tag. +type TagKeyOnly struct { + _ struct{} `type:"structure"` + + // The name of the key. + Key *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s TagKeyOnly) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TagKeyOnly) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TagKeyOnly) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TagKeyOnly"} + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/elb/elbiface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/elb/elbiface/interface.go new file mode 100644 index 000000000..5674133f7 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/elb/elbiface/interface.go @@ -0,0 +1,128 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package elbiface provides an interface for the Elastic Load Balancing. +package elbiface + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/elb" +) + +// ELBAPI is the interface type for elb.ELB. +type ELBAPI interface { + AddTagsRequest(*elb.AddTagsInput) (*request.Request, *elb.AddTagsOutput) + + AddTags(*elb.AddTagsInput) (*elb.AddTagsOutput, error) + + ApplySecurityGroupsToLoadBalancerRequest(*elb.ApplySecurityGroupsToLoadBalancerInput) (*request.Request, *elb.ApplySecurityGroupsToLoadBalancerOutput) + + ApplySecurityGroupsToLoadBalancer(*elb.ApplySecurityGroupsToLoadBalancerInput) (*elb.ApplySecurityGroupsToLoadBalancerOutput, error) + + AttachLoadBalancerToSubnetsRequest(*elb.AttachLoadBalancerToSubnetsInput) (*request.Request, *elb.AttachLoadBalancerToSubnetsOutput) + + AttachLoadBalancerToSubnets(*elb.AttachLoadBalancerToSubnetsInput) (*elb.AttachLoadBalancerToSubnetsOutput, error) + + ConfigureHealthCheckRequest(*elb.ConfigureHealthCheckInput) (*request.Request, *elb.ConfigureHealthCheckOutput) + + ConfigureHealthCheck(*elb.ConfigureHealthCheckInput) (*elb.ConfigureHealthCheckOutput, error) + + CreateAppCookieStickinessPolicyRequest(*elb.CreateAppCookieStickinessPolicyInput) (*request.Request, *elb.CreateAppCookieStickinessPolicyOutput) + + CreateAppCookieStickinessPolicy(*elb.CreateAppCookieStickinessPolicyInput) (*elb.CreateAppCookieStickinessPolicyOutput, error) + + CreateLBCookieStickinessPolicyRequest(*elb.CreateLBCookieStickinessPolicyInput) (*request.Request, *elb.CreateLBCookieStickinessPolicyOutput) + + CreateLBCookieStickinessPolicy(*elb.CreateLBCookieStickinessPolicyInput) (*elb.CreateLBCookieStickinessPolicyOutput, error) + + CreateLoadBalancerRequest(*elb.CreateLoadBalancerInput) (*request.Request, *elb.CreateLoadBalancerOutput) + + CreateLoadBalancer(*elb.CreateLoadBalancerInput) (*elb.CreateLoadBalancerOutput, error) + + CreateLoadBalancerListenersRequest(*elb.CreateLoadBalancerListenersInput) (*request.Request, *elb.CreateLoadBalancerListenersOutput) + + CreateLoadBalancerListeners(*elb.CreateLoadBalancerListenersInput) (*elb.CreateLoadBalancerListenersOutput, error) + + CreateLoadBalancerPolicyRequest(*elb.CreateLoadBalancerPolicyInput) (*request.Request, *elb.CreateLoadBalancerPolicyOutput) + + CreateLoadBalancerPolicy(*elb.CreateLoadBalancerPolicyInput) (*elb.CreateLoadBalancerPolicyOutput, error) + + DeleteLoadBalancerRequest(*elb.DeleteLoadBalancerInput) (*request.Request, *elb.DeleteLoadBalancerOutput) + + DeleteLoadBalancer(*elb.DeleteLoadBalancerInput) (*elb.DeleteLoadBalancerOutput, error) + + DeleteLoadBalancerListenersRequest(*elb.DeleteLoadBalancerListenersInput) (*request.Request, *elb.DeleteLoadBalancerListenersOutput) + + DeleteLoadBalancerListeners(*elb.DeleteLoadBalancerListenersInput) (*elb.DeleteLoadBalancerListenersOutput, error) + + DeleteLoadBalancerPolicyRequest(*elb.DeleteLoadBalancerPolicyInput) (*request.Request, *elb.DeleteLoadBalancerPolicyOutput) + + DeleteLoadBalancerPolicy(*elb.DeleteLoadBalancerPolicyInput) (*elb.DeleteLoadBalancerPolicyOutput, error) + + DeregisterInstancesFromLoadBalancerRequest(*elb.DeregisterInstancesFromLoadBalancerInput) (*request.Request, *elb.DeregisterInstancesFromLoadBalancerOutput) + + DeregisterInstancesFromLoadBalancer(*elb.DeregisterInstancesFromLoadBalancerInput) (*elb.DeregisterInstancesFromLoadBalancerOutput, error) + + DescribeInstanceHealthRequest(*elb.DescribeInstanceHealthInput) (*request.Request, *elb.DescribeInstanceHealthOutput) + + DescribeInstanceHealth(*elb.DescribeInstanceHealthInput) (*elb.DescribeInstanceHealthOutput, error) + + DescribeLoadBalancerAttributesRequest(*elb.DescribeLoadBalancerAttributesInput) (*request.Request, *elb.DescribeLoadBalancerAttributesOutput) + + DescribeLoadBalancerAttributes(*elb.DescribeLoadBalancerAttributesInput) (*elb.DescribeLoadBalancerAttributesOutput, error) + + DescribeLoadBalancerPoliciesRequest(*elb.DescribeLoadBalancerPoliciesInput) (*request.Request, *elb.DescribeLoadBalancerPoliciesOutput) + + DescribeLoadBalancerPolicies(*elb.DescribeLoadBalancerPoliciesInput) (*elb.DescribeLoadBalancerPoliciesOutput, error) + + DescribeLoadBalancerPolicyTypesRequest(*elb.DescribeLoadBalancerPolicyTypesInput) (*request.Request, *elb.DescribeLoadBalancerPolicyTypesOutput) + + DescribeLoadBalancerPolicyTypes(*elb.DescribeLoadBalancerPolicyTypesInput) (*elb.DescribeLoadBalancerPolicyTypesOutput, error) + + DescribeLoadBalancersRequest(*elb.DescribeLoadBalancersInput) (*request.Request, *elb.DescribeLoadBalancersOutput) + + DescribeLoadBalancers(*elb.DescribeLoadBalancersInput) (*elb.DescribeLoadBalancersOutput, error) + + DescribeLoadBalancersPages(*elb.DescribeLoadBalancersInput, func(*elb.DescribeLoadBalancersOutput, bool) bool) error + + DescribeTagsRequest(*elb.DescribeTagsInput) (*request.Request, *elb.DescribeTagsOutput) + + DescribeTags(*elb.DescribeTagsInput) (*elb.DescribeTagsOutput, error) + + DetachLoadBalancerFromSubnetsRequest(*elb.DetachLoadBalancerFromSubnetsInput) (*request.Request, *elb.DetachLoadBalancerFromSubnetsOutput) + + DetachLoadBalancerFromSubnets(*elb.DetachLoadBalancerFromSubnetsInput) (*elb.DetachLoadBalancerFromSubnetsOutput, error) + + DisableAvailabilityZonesForLoadBalancerRequest(*elb.DisableAvailabilityZonesForLoadBalancerInput) (*request.Request, *elb.DisableAvailabilityZonesForLoadBalancerOutput) + + DisableAvailabilityZonesForLoadBalancer(*elb.DisableAvailabilityZonesForLoadBalancerInput) (*elb.DisableAvailabilityZonesForLoadBalancerOutput, error) + + EnableAvailabilityZonesForLoadBalancerRequest(*elb.EnableAvailabilityZonesForLoadBalancerInput) (*request.Request, *elb.EnableAvailabilityZonesForLoadBalancerOutput) + + EnableAvailabilityZonesForLoadBalancer(*elb.EnableAvailabilityZonesForLoadBalancerInput) (*elb.EnableAvailabilityZonesForLoadBalancerOutput, error) + + ModifyLoadBalancerAttributesRequest(*elb.ModifyLoadBalancerAttributesInput) (*request.Request, *elb.ModifyLoadBalancerAttributesOutput) + + ModifyLoadBalancerAttributes(*elb.ModifyLoadBalancerAttributesInput) (*elb.ModifyLoadBalancerAttributesOutput, error) + + RegisterInstancesWithLoadBalancerRequest(*elb.RegisterInstancesWithLoadBalancerInput) (*request.Request, *elb.RegisterInstancesWithLoadBalancerOutput) + + RegisterInstancesWithLoadBalancer(*elb.RegisterInstancesWithLoadBalancerInput) (*elb.RegisterInstancesWithLoadBalancerOutput, error) + + RemoveTagsRequest(*elb.RemoveTagsInput) (*request.Request, *elb.RemoveTagsOutput) + + RemoveTags(*elb.RemoveTagsInput) (*elb.RemoveTagsOutput, error) + + SetLoadBalancerListenerSSLCertificateRequest(*elb.SetLoadBalancerListenerSSLCertificateInput) (*request.Request, *elb.SetLoadBalancerListenerSSLCertificateOutput) + + SetLoadBalancerListenerSSLCertificate(*elb.SetLoadBalancerListenerSSLCertificateInput) (*elb.SetLoadBalancerListenerSSLCertificateOutput, error) + + SetLoadBalancerPoliciesForBackendServerRequest(*elb.SetLoadBalancerPoliciesForBackendServerInput) (*request.Request, *elb.SetLoadBalancerPoliciesForBackendServerOutput) + + SetLoadBalancerPoliciesForBackendServer(*elb.SetLoadBalancerPoliciesForBackendServerInput) (*elb.SetLoadBalancerPoliciesForBackendServerOutput, error) + + SetLoadBalancerPoliciesOfListenerRequest(*elb.SetLoadBalancerPoliciesOfListenerInput) (*request.Request, *elb.SetLoadBalancerPoliciesOfListenerOutput) + + SetLoadBalancerPoliciesOfListener(*elb.SetLoadBalancerPoliciesOfListenerInput) (*elb.SetLoadBalancerPoliciesOfListenerOutput, error) +} + +var _ ELBAPI = (*elb.ELB)(nil) diff --git a/vendor/github.com/aws/aws-sdk-go/service/elb/examples_test.go b/vendor/github.com/aws/aws-sdk-go/service/elb/examples_test.go new file mode 100644 index 000000000..d1da802e6 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/elb/examples_test.go @@ -0,0 +1,722 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package elb_test + +import ( + "bytes" + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/elb" +) + +var _ time.Duration +var _ bytes.Buffer + +func ExampleELB_AddTags() { + svc := elb.New(session.New()) + + params := &elb.AddTagsInput{ + LoadBalancerNames: []*string{ // Required + aws.String("AccessPointName"), // Required + // More values... + }, + Tags: []*elb.Tag{ // Required + { // Required + Key: aws.String("TagKey"), // Required + Value: aws.String("TagValue"), + }, + // More values... + }, + } + resp, err := svc.AddTags(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleELB_ApplySecurityGroupsToLoadBalancer() { + svc := elb.New(session.New()) + + params := &elb.ApplySecurityGroupsToLoadBalancerInput{ + LoadBalancerName: aws.String("AccessPointName"), // Required + SecurityGroups: []*string{ // Required + aws.String("SecurityGroupId"), // Required + // More values... + }, + } + resp, err := svc.ApplySecurityGroupsToLoadBalancer(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleELB_AttachLoadBalancerToSubnets() { + svc := elb.New(session.New()) + + params := &elb.AttachLoadBalancerToSubnetsInput{ + LoadBalancerName: aws.String("AccessPointName"), // Required + Subnets: []*string{ // Required + aws.String("SubnetId"), // Required + // More values... + }, + } + resp, err := svc.AttachLoadBalancerToSubnets(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleELB_ConfigureHealthCheck() { + svc := elb.New(session.New()) + + params := &elb.ConfigureHealthCheckInput{ + HealthCheck: &elb.HealthCheck{ // Required + HealthyThreshold: aws.Int64(1), // Required + Interval: aws.Int64(1), // Required + Target: aws.String("HealthCheckTarget"), // Required + Timeout: aws.Int64(1), // Required + UnhealthyThreshold: aws.Int64(1), // Required + }, + LoadBalancerName: aws.String("AccessPointName"), // Required + } + resp, err := svc.ConfigureHealthCheck(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleELB_CreateAppCookieStickinessPolicy() { + svc := elb.New(session.New()) + + params := &elb.CreateAppCookieStickinessPolicyInput{ + CookieName: aws.String("CookieName"), // Required + LoadBalancerName: aws.String("AccessPointName"), // Required + PolicyName: aws.String("PolicyName"), // Required + } + resp, err := svc.CreateAppCookieStickinessPolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleELB_CreateLBCookieStickinessPolicy() { + svc := elb.New(session.New()) + + params := &elb.CreateLBCookieStickinessPolicyInput{ + LoadBalancerName: aws.String("AccessPointName"), // Required + PolicyName: aws.String("PolicyName"), // Required + CookieExpirationPeriod: aws.Int64(1), + } + resp, err := svc.CreateLBCookieStickinessPolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleELB_CreateLoadBalancer() { + svc := elb.New(session.New()) + + params := &elb.CreateLoadBalancerInput{ + Listeners: []*elb.Listener{ // Required + { // Required + InstancePort: aws.Int64(1), // Required + LoadBalancerPort: aws.Int64(1), // Required + Protocol: aws.String("Protocol"), // Required + InstanceProtocol: aws.String("Protocol"), + SSLCertificateId: aws.String("SSLCertificateId"), + }, + // More values... + }, + LoadBalancerName: aws.String("AccessPointName"), // Required + AvailabilityZones: []*string{ + aws.String("AvailabilityZone"), // Required + // More values... + }, + Scheme: aws.String("LoadBalancerScheme"), + SecurityGroups: []*string{ + aws.String("SecurityGroupId"), // Required + // More values... + }, + Subnets: []*string{ + aws.String("SubnetId"), // Required + // More values... + }, + Tags: []*elb.Tag{ + { // Required + Key: aws.String("TagKey"), // Required + Value: aws.String("TagValue"), + }, + // More values... + }, + } + resp, err := svc.CreateLoadBalancer(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleELB_CreateLoadBalancerListeners() { + svc := elb.New(session.New()) + + params := &elb.CreateLoadBalancerListenersInput{ + Listeners: []*elb.Listener{ // Required + { // Required + InstancePort: aws.Int64(1), // Required + LoadBalancerPort: aws.Int64(1), // Required + Protocol: aws.String("Protocol"), // Required + InstanceProtocol: aws.String("Protocol"), + SSLCertificateId: aws.String("SSLCertificateId"), + }, + // More values... + }, + LoadBalancerName: aws.String("AccessPointName"), // Required + } + resp, err := svc.CreateLoadBalancerListeners(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleELB_CreateLoadBalancerPolicy() { + svc := elb.New(session.New()) + + params := &elb.CreateLoadBalancerPolicyInput{ + LoadBalancerName: aws.String("AccessPointName"), // Required + PolicyName: aws.String("PolicyName"), // Required + PolicyTypeName: aws.String("PolicyTypeName"), // Required + PolicyAttributes: []*elb.PolicyAttribute{ + { // Required + AttributeName: aws.String("AttributeName"), + AttributeValue: aws.String("AttributeValue"), + }, + // More values... + }, + } + resp, err := svc.CreateLoadBalancerPolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleELB_DeleteLoadBalancer() { + svc := elb.New(session.New()) + + params := &elb.DeleteLoadBalancerInput{ + LoadBalancerName: aws.String("AccessPointName"), // Required + } + resp, err := svc.DeleteLoadBalancer(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleELB_DeleteLoadBalancerListeners() { + svc := elb.New(session.New()) + + params := &elb.DeleteLoadBalancerListenersInput{ + LoadBalancerName: aws.String("AccessPointName"), // Required + LoadBalancerPorts: []*int64{ // Required + aws.Int64(1), // Required + // More values... + }, + } + resp, err := svc.DeleteLoadBalancerListeners(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleELB_DeleteLoadBalancerPolicy() { + svc := elb.New(session.New()) + + params := &elb.DeleteLoadBalancerPolicyInput{ + LoadBalancerName: aws.String("AccessPointName"), // Required + PolicyName: aws.String("PolicyName"), // Required + } + resp, err := svc.DeleteLoadBalancerPolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleELB_DeregisterInstancesFromLoadBalancer() { + svc := elb.New(session.New()) + + params := &elb.DeregisterInstancesFromLoadBalancerInput{ + Instances: []*elb.Instance{ // Required + { // Required + InstanceId: aws.String("InstanceId"), + }, + // More values... + }, + LoadBalancerName: aws.String("AccessPointName"), // Required + } + resp, err := svc.DeregisterInstancesFromLoadBalancer(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleELB_DescribeInstanceHealth() { + svc := elb.New(session.New()) + + params := &elb.DescribeInstanceHealthInput{ + LoadBalancerName: aws.String("AccessPointName"), // Required + Instances: []*elb.Instance{ + { // Required + InstanceId: aws.String("InstanceId"), + }, + // More values... + }, + } + resp, err := svc.DescribeInstanceHealth(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleELB_DescribeLoadBalancerAttributes() { + svc := elb.New(session.New()) + + params := &elb.DescribeLoadBalancerAttributesInput{ + LoadBalancerName: aws.String("AccessPointName"), // Required + } + resp, err := svc.DescribeLoadBalancerAttributes(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleELB_DescribeLoadBalancerPolicies() { + svc := elb.New(session.New()) + + params := &elb.DescribeLoadBalancerPoliciesInput{ + LoadBalancerName: aws.String("AccessPointName"), + PolicyNames: []*string{ + aws.String("PolicyName"), // Required + // More values... + }, + } + resp, err := svc.DescribeLoadBalancerPolicies(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleELB_DescribeLoadBalancerPolicyTypes() { + svc := elb.New(session.New()) + + params := &elb.DescribeLoadBalancerPolicyTypesInput{ + PolicyTypeNames: []*string{ + aws.String("PolicyTypeName"), // Required + // More values... + }, + } + resp, err := svc.DescribeLoadBalancerPolicyTypes(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleELB_DescribeLoadBalancers() { + svc := elb.New(session.New()) + + params := &elb.DescribeLoadBalancersInput{ + LoadBalancerNames: []*string{ + aws.String("AccessPointName"), // Required + // More values... + }, + Marker: aws.String("Marker"), + PageSize: aws.Int64(1), + } + resp, err := svc.DescribeLoadBalancers(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleELB_DescribeTags() { + svc := elb.New(session.New()) + + params := &elb.DescribeTagsInput{ + LoadBalancerNames: []*string{ // Required + aws.String("AccessPointName"), // Required + // More values... + }, + } + resp, err := svc.DescribeTags(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleELB_DetachLoadBalancerFromSubnets() { + svc := elb.New(session.New()) + + params := &elb.DetachLoadBalancerFromSubnetsInput{ + LoadBalancerName: aws.String("AccessPointName"), // Required + Subnets: []*string{ // Required + aws.String("SubnetId"), // Required + // More values... + }, + } + resp, err := svc.DetachLoadBalancerFromSubnets(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleELB_DisableAvailabilityZonesForLoadBalancer() { + svc := elb.New(session.New()) + + params := &elb.DisableAvailabilityZonesForLoadBalancerInput{ + AvailabilityZones: []*string{ // Required + aws.String("AvailabilityZone"), // Required + // More values... + }, + LoadBalancerName: aws.String("AccessPointName"), // Required + } + resp, err := svc.DisableAvailabilityZonesForLoadBalancer(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleELB_EnableAvailabilityZonesForLoadBalancer() { + svc := elb.New(session.New()) + + params := &elb.EnableAvailabilityZonesForLoadBalancerInput{ + AvailabilityZones: []*string{ // Required + aws.String("AvailabilityZone"), // Required + // More values... + }, + LoadBalancerName: aws.String("AccessPointName"), // Required + } + resp, err := svc.EnableAvailabilityZonesForLoadBalancer(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleELB_ModifyLoadBalancerAttributes() { + svc := elb.New(session.New()) + + params := &elb.ModifyLoadBalancerAttributesInput{ + LoadBalancerAttributes: &elb.LoadBalancerAttributes{ // Required + AccessLog: &elb.AccessLog{ + Enabled: aws.Bool(true), // Required + EmitInterval: aws.Int64(1), + S3BucketName: aws.String("S3BucketName"), + S3BucketPrefix: aws.String("AccessLogPrefix"), + }, + AdditionalAttributes: []*elb.AdditionalAttribute{ + { // Required + Key: aws.String("StringVal"), + Value: aws.String("StringVal"), + }, + // More values... + }, + ConnectionDraining: &elb.ConnectionDraining{ + Enabled: aws.Bool(true), // Required + Timeout: aws.Int64(1), + }, + ConnectionSettings: &elb.ConnectionSettings{ + IdleTimeout: aws.Int64(1), // Required + }, + CrossZoneLoadBalancing: &elb.CrossZoneLoadBalancing{ + Enabled: aws.Bool(true), // Required + }, + }, + LoadBalancerName: aws.String("AccessPointName"), // Required + } + resp, err := svc.ModifyLoadBalancerAttributes(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleELB_RegisterInstancesWithLoadBalancer() { + svc := elb.New(session.New()) + + params := &elb.RegisterInstancesWithLoadBalancerInput{ + Instances: []*elb.Instance{ // Required + { // Required + InstanceId: aws.String("InstanceId"), + }, + // More values... + }, + LoadBalancerName: aws.String("AccessPointName"), // Required + } + resp, err := svc.RegisterInstancesWithLoadBalancer(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleELB_RemoveTags() { + svc := elb.New(session.New()) + + params := &elb.RemoveTagsInput{ + LoadBalancerNames: []*string{ // Required + aws.String("AccessPointName"), // Required + // More values... + }, + Tags: []*elb.TagKeyOnly{ // Required + { // Required + Key: aws.String("TagKey"), + }, + // More values... + }, + } + resp, err := svc.RemoveTags(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleELB_SetLoadBalancerListenerSSLCertificate() { + svc := elb.New(session.New()) + + params := &elb.SetLoadBalancerListenerSSLCertificateInput{ + LoadBalancerName: aws.String("AccessPointName"), // Required + LoadBalancerPort: aws.Int64(1), // Required + SSLCertificateId: aws.String("SSLCertificateId"), // Required + } + resp, err := svc.SetLoadBalancerListenerSSLCertificate(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleELB_SetLoadBalancerPoliciesForBackendServer() { + svc := elb.New(session.New()) + + params := &elb.SetLoadBalancerPoliciesForBackendServerInput{ + InstancePort: aws.Int64(1), // Required + LoadBalancerName: aws.String("AccessPointName"), // Required + PolicyNames: []*string{ // Required + aws.String("PolicyName"), // Required + // More values... + }, + } + resp, err := svc.SetLoadBalancerPoliciesForBackendServer(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleELB_SetLoadBalancerPoliciesOfListener() { + svc := elb.New(session.New()) + + params := &elb.SetLoadBalancerPoliciesOfListenerInput{ + LoadBalancerName: aws.String("AccessPointName"), // Required + LoadBalancerPort: aws.Int64(1), // Required + PolicyNames: []*string{ // Required + aws.String("PolicyName"), // Required + // More values... + }, + } + resp, err := svc.SetLoadBalancerPoliciesOfListener(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/elb/service.go b/vendor/github.com/aws/aws-sdk-go/service/elb/service.go new file mode 100644 index 000000000..3dc5c73fb --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/elb/service.go @@ -0,0 +1,98 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package elb + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/private/protocol/query" +) + +// Elastic Load Balancing distributes incoming traffic across your EC2 instances. +// +// For information about the features of Elastic Load Balancing, see What Is +// Elastic Load Balancing? (http://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/elastic-load-balancing.html) +// in the Elastic Load Balancing Developer Guide. +// +// For information about the AWS regions supported by Elastic Load Balancing, +// see Regions and Endpoints - Elastic Load Balancing (http://docs.aws.amazon.com/general/latest/gr/rande.html#elb_region) +// in the Amazon Web Services General Reference. +// +// All Elastic Load Balancing operations are idempotent, which means that they +// complete at most one time. If you repeat an operation, it succeeds with a +// 200 OK response code. +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type ELB struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "elasticloadbalancing" + +// New creates a new instance of the ELB client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a ELB client from just a session. +// svc := elb.New(mySession) +// +// // Create a ELB client with additional configuration +// svc := elb.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *ELB { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *ELB { + svc := &ELB{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2012-06-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(query.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(query.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(query.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(query.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a ELB operation and runs any +// custom request initialization. +func (c *ELB) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/elb/waiters.go b/vendor/github.com/aws/aws-sdk-go/service/elb/waiters.go new file mode 100644 index 000000000..b1c9a526a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/elb/waiters.go @@ -0,0 +1,82 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package elb + +import ( + "github.com/aws/aws-sdk-go/private/waiter" +) + +func (c *ELB) WaitUntilAnyInstanceInService(input *DescribeInstanceHealthInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeInstanceHealth", + Delay: 15, + MaxAttempts: 40, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "pathAny", + Argument: "InstanceStates[].State", + Expected: "InService", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *ELB) WaitUntilInstanceDeregistered(input *DescribeInstanceHealthInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeInstanceHealth", + Delay: 15, + MaxAttempts: 40, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "pathAll", + Argument: "InstanceStates[].State", + Expected: "OutOfService", + }, + { + State: "success", + Matcher: "error", + Argument: "", + Expected: "InvalidInstance", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *ELB) WaitUntilInstanceInService(input *DescribeInstanceHealthInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeInstanceHealth", + Delay: 15, + MaxAttempts: 40, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "pathAll", + Argument: "InstanceStates[].State", + Expected: "InService", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/emr/api.go b/vendor/github.com/aws/aws-sdk-go/service/emr/api.go new file mode 100644 index 000000000..bacf653be --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/emr/api.go @@ -0,0 +1,4103 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package emr provides a client for Amazon Elastic MapReduce. +package emr + +import ( + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" +) + +const opAddInstanceGroups = "AddInstanceGroups" + +// AddInstanceGroupsRequest generates a "aws/request.Request" representing the +// client's request for the AddInstanceGroups operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the AddInstanceGroups method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the AddInstanceGroupsRequest method. +// req, resp := client.AddInstanceGroupsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EMR) AddInstanceGroupsRequest(input *AddInstanceGroupsInput) (req *request.Request, output *AddInstanceGroupsOutput) { + op := &request.Operation{ + Name: opAddInstanceGroups, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AddInstanceGroupsInput{} + } + + req = c.newRequest(op, input, output) + output = &AddInstanceGroupsOutput{} + req.Data = output + return +} + +// AddInstanceGroups adds an instance group to a running cluster. +func (c *EMR) AddInstanceGroups(input *AddInstanceGroupsInput) (*AddInstanceGroupsOutput, error) { + req, out := c.AddInstanceGroupsRequest(input) + err := req.Send() + return out, err +} + +const opAddJobFlowSteps = "AddJobFlowSteps" + +// AddJobFlowStepsRequest generates a "aws/request.Request" representing the +// client's request for the AddJobFlowSteps operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the AddJobFlowSteps method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the AddJobFlowStepsRequest method. +// req, resp := client.AddJobFlowStepsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EMR) AddJobFlowStepsRequest(input *AddJobFlowStepsInput) (req *request.Request, output *AddJobFlowStepsOutput) { + op := &request.Operation{ + Name: opAddJobFlowSteps, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AddJobFlowStepsInput{} + } + + req = c.newRequest(op, input, output) + output = &AddJobFlowStepsOutput{} + req.Data = output + return +} + +// AddJobFlowSteps adds new steps to a running job flow. A maximum of 256 steps +// are allowed in each job flow. +// +// If your job flow is long-running (such as a Hive data warehouse) or complex, +// you may require more than 256 steps to process your data. You can bypass +// the 256-step limitation in various ways, including using the SSH shell to +// connect to the master node and submitting queries directly to the software +// running on the master node, such as Hive and Hadoop. For more information +// on how to do this, go to Add More than 256 Steps to a Job Flow (http://docs.aws.amazon.com/ElasticMapReduce/latest/DeveloperGuide/AddMoreThan256Steps.html) +// in the Amazon Elastic MapReduce Developer's Guide. +// +// A step specifies the location of a JAR file stored either on the master +// node of the job flow or in Amazon S3. Each step is performed by the main +// function of the main class of the JAR file. The main class can be specified +// either in the manifest of the JAR or by using the MainFunction parameter +// of the step. +// +// Elastic MapReduce executes each step in the order listed. For a step to +// be considered complete, the main function must exit with a zero exit code +// and all Hadoop jobs started while the step was running must have completed +// and run successfully. +// +// You can only add steps to a job flow that is in one of the following states: +// STARTING, BOOTSTRAPPING, RUNNING, or WAITING. +func (c *EMR) AddJobFlowSteps(input *AddJobFlowStepsInput) (*AddJobFlowStepsOutput, error) { + req, out := c.AddJobFlowStepsRequest(input) + err := req.Send() + return out, err +} + +const opAddTags = "AddTags" + +// AddTagsRequest generates a "aws/request.Request" representing the +// client's request for the AddTags operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the AddTags method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the AddTagsRequest method. +// req, resp := client.AddTagsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EMR) AddTagsRequest(input *AddTagsInput) (req *request.Request, output *AddTagsOutput) { + op := &request.Operation{ + Name: opAddTags, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AddTagsInput{} + } + + req = c.newRequest(op, input, output) + output = &AddTagsOutput{} + req.Data = output + return +} + +// Adds tags to an Amazon EMR resource. Tags make it easier to associate clusters +// in various ways, such as grouping clusters to track your Amazon EMR resource +// allocation costs. For more information, see Tagging Amazon EMR Resources +// (http://docs.aws.amazon.com/ElasticMapReduce/latest/DeveloperGuide/emr-plan-tags.html). +func (c *EMR) AddTags(input *AddTagsInput) (*AddTagsOutput, error) { + req, out := c.AddTagsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeCluster = "DescribeCluster" + +// DescribeClusterRequest generates a "aws/request.Request" representing the +// client's request for the DescribeCluster operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeCluster method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeClusterRequest method. +// req, resp := client.DescribeClusterRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EMR) DescribeClusterRequest(input *DescribeClusterInput) (req *request.Request, output *DescribeClusterOutput) { + op := &request.Operation{ + Name: opDescribeCluster, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeClusterInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeClusterOutput{} + req.Data = output + return +} + +// Provides cluster-level details including status, hardware and software configuration, +// VPC settings, and so on. For information about the cluster steps, see ListSteps. +func (c *EMR) DescribeCluster(input *DescribeClusterInput) (*DescribeClusterOutput, error) { + req, out := c.DescribeClusterRequest(input) + err := req.Send() + return out, err +} + +const opDescribeJobFlows = "DescribeJobFlows" + +// DescribeJobFlowsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeJobFlows operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeJobFlows method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeJobFlowsRequest method. +// req, resp := client.DescribeJobFlowsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EMR) DescribeJobFlowsRequest(input *DescribeJobFlowsInput) (req *request.Request, output *DescribeJobFlowsOutput) { + if c.Client.Config.Logger != nil { + c.Client.Config.Logger.Log("This operation, DescribeJobFlows, has been deprecated") + } + op := &request.Operation{ + Name: opDescribeJobFlows, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeJobFlowsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeJobFlowsOutput{} + req.Data = output + return +} + +// This API is deprecated and will eventually be removed. We recommend you use +// ListClusters, DescribeCluster, ListSteps, ListInstanceGroups and ListBootstrapActions +// instead. +// +// DescribeJobFlows returns a list of job flows that match all of the supplied +// parameters. The parameters can include a list of job flow IDs, job flow states, +// and restrictions on job flow creation date and time. +// +// Regardless of supplied parameters, only job flows created within the last +// two months are returned. +// +// If no parameters are supplied, then job flows matching either of the following +// criteria are returned: +// +// Job flows created and completed in the last two weeks Job flows created +// within the last two months that are in one of the following states: RUNNING, +// WAITING, SHUTTING_DOWN, STARTING Amazon Elastic MapReduce can return a +// maximum of 512 job flow descriptions. +func (c *EMR) DescribeJobFlows(input *DescribeJobFlowsInput) (*DescribeJobFlowsOutput, error) { + req, out := c.DescribeJobFlowsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeStep = "DescribeStep" + +// DescribeStepRequest generates a "aws/request.Request" representing the +// client's request for the DescribeStep operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeStep method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeStepRequest method. +// req, resp := client.DescribeStepRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EMR) DescribeStepRequest(input *DescribeStepInput) (req *request.Request, output *DescribeStepOutput) { + op := &request.Operation{ + Name: opDescribeStep, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeStepInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeStepOutput{} + req.Data = output + return +} + +// Provides more detail about the cluster step. +func (c *EMR) DescribeStep(input *DescribeStepInput) (*DescribeStepOutput, error) { + req, out := c.DescribeStepRequest(input) + err := req.Send() + return out, err +} + +const opListBootstrapActions = "ListBootstrapActions" + +// ListBootstrapActionsRequest generates a "aws/request.Request" representing the +// client's request for the ListBootstrapActions operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListBootstrapActions method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListBootstrapActionsRequest method. +// req, resp := client.ListBootstrapActionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EMR) ListBootstrapActionsRequest(input *ListBootstrapActionsInput) (req *request.Request, output *ListBootstrapActionsOutput) { + op := &request.Operation{ + Name: opListBootstrapActions, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListBootstrapActionsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListBootstrapActionsOutput{} + req.Data = output + return +} + +// Provides information about the bootstrap actions associated with a cluster. +func (c *EMR) ListBootstrapActions(input *ListBootstrapActionsInput) (*ListBootstrapActionsOutput, error) { + req, out := c.ListBootstrapActionsRequest(input) + err := req.Send() + return out, err +} + +// ListBootstrapActionsPages iterates over the pages of a ListBootstrapActions operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListBootstrapActions method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListBootstrapActions operation. +// pageNum := 0 +// err := client.ListBootstrapActionsPages(params, +// func(page *ListBootstrapActionsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *EMR) ListBootstrapActionsPages(input *ListBootstrapActionsInput, fn func(p *ListBootstrapActionsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListBootstrapActionsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListBootstrapActionsOutput), lastPage) + }) +} + +const opListClusters = "ListClusters" + +// ListClustersRequest generates a "aws/request.Request" representing the +// client's request for the ListClusters operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListClusters method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListClustersRequest method. +// req, resp := client.ListClustersRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EMR) ListClustersRequest(input *ListClustersInput) (req *request.Request, output *ListClustersOutput) { + op := &request.Operation{ + Name: opListClusters, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListClustersInput{} + } + + req = c.newRequest(op, input, output) + output = &ListClustersOutput{} + req.Data = output + return +} + +// Provides the status of all clusters visible to this AWS account. Allows you +// to filter the list of clusters based on certain criteria; for example, filtering +// by cluster creation date and time or by status. This call returns a maximum +// of 50 clusters per call, but returns a marker to track the paging of the +// cluster list across multiple ListClusters calls. +func (c *EMR) ListClusters(input *ListClustersInput) (*ListClustersOutput, error) { + req, out := c.ListClustersRequest(input) + err := req.Send() + return out, err +} + +// ListClustersPages iterates over the pages of a ListClusters operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListClusters method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListClusters operation. +// pageNum := 0 +// err := client.ListClustersPages(params, +// func(page *ListClustersOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *EMR) ListClustersPages(input *ListClustersInput, fn func(p *ListClustersOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListClustersRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListClustersOutput), lastPage) + }) +} + +const opListInstanceGroups = "ListInstanceGroups" + +// ListInstanceGroupsRequest generates a "aws/request.Request" representing the +// client's request for the ListInstanceGroups operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListInstanceGroups method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListInstanceGroupsRequest method. +// req, resp := client.ListInstanceGroupsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EMR) ListInstanceGroupsRequest(input *ListInstanceGroupsInput) (req *request.Request, output *ListInstanceGroupsOutput) { + op := &request.Operation{ + Name: opListInstanceGroups, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListInstanceGroupsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListInstanceGroupsOutput{} + req.Data = output + return +} + +// Provides all available details about the instance groups in a cluster. +func (c *EMR) ListInstanceGroups(input *ListInstanceGroupsInput) (*ListInstanceGroupsOutput, error) { + req, out := c.ListInstanceGroupsRequest(input) + err := req.Send() + return out, err +} + +// ListInstanceGroupsPages iterates over the pages of a ListInstanceGroups operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListInstanceGroups method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListInstanceGroups operation. +// pageNum := 0 +// err := client.ListInstanceGroupsPages(params, +// func(page *ListInstanceGroupsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *EMR) ListInstanceGroupsPages(input *ListInstanceGroupsInput, fn func(p *ListInstanceGroupsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListInstanceGroupsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListInstanceGroupsOutput), lastPage) + }) +} + +const opListInstances = "ListInstances" + +// ListInstancesRequest generates a "aws/request.Request" representing the +// client's request for the ListInstances operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListInstances method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListInstancesRequest method. +// req, resp := client.ListInstancesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EMR) ListInstancesRequest(input *ListInstancesInput) (req *request.Request, output *ListInstancesOutput) { + op := &request.Operation{ + Name: opListInstances, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListInstancesInput{} + } + + req = c.newRequest(op, input, output) + output = &ListInstancesOutput{} + req.Data = output + return +} + +// Provides information about the cluster instances that Amazon EMR provisions +// on behalf of a user when it creates the cluster. For example, this operation +// indicates when the EC2 instances reach the Ready state, when instances become +// available to Amazon EMR to use for jobs, and the IP addresses for cluster +// instances, etc. +func (c *EMR) ListInstances(input *ListInstancesInput) (*ListInstancesOutput, error) { + req, out := c.ListInstancesRequest(input) + err := req.Send() + return out, err +} + +// ListInstancesPages iterates over the pages of a ListInstances operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListInstances method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListInstances operation. +// pageNum := 0 +// err := client.ListInstancesPages(params, +// func(page *ListInstancesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *EMR) ListInstancesPages(input *ListInstancesInput, fn func(p *ListInstancesOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListInstancesRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListInstancesOutput), lastPage) + }) +} + +const opListSteps = "ListSteps" + +// ListStepsRequest generates a "aws/request.Request" representing the +// client's request for the ListSteps operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListSteps method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListStepsRequest method. +// req, resp := client.ListStepsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EMR) ListStepsRequest(input *ListStepsInput) (req *request.Request, output *ListStepsOutput) { + op := &request.Operation{ + Name: opListSteps, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListStepsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListStepsOutput{} + req.Data = output + return +} + +// Provides a list of steps for the cluster. +func (c *EMR) ListSteps(input *ListStepsInput) (*ListStepsOutput, error) { + req, out := c.ListStepsRequest(input) + err := req.Send() + return out, err +} + +// ListStepsPages iterates over the pages of a ListSteps operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListSteps method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListSteps operation. +// pageNum := 0 +// err := client.ListStepsPages(params, +// func(page *ListStepsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *EMR) ListStepsPages(input *ListStepsInput, fn func(p *ListStepsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListStepsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListStepsOutput), lastPage) + }) +} + +const opModifyInstanceGroups = "ModifyInstanceGroups" + +// ModifyInstanceGroupsRequest generates a "aws/request.Request" representing the +// client's request for the ModifyInstanceGroups operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ModifyInstanceGroups method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ModifyInstanceGroupsRequest method. +// req, resp := client.ModifyInstanceGroupsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EMR) ModifyInstanceGroupsRequest(input *ModifyInstanceGroupsInput) (req *request.Request, output *ModifyInstanceGroupsOutput) { + op := &request.Operation{ + Name: opModifyInstanceGroups, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyInstanceGroupsInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &ModifyInstanceGroupsOutput{} + req.Data = output + return +} + +// ModifyInstanceGroups modifies the number of nodes and configuration settings +// of an instance group. The input parameters include the new target instance +// count for the group and the instance group ID. The call will either succeed +// or fail atomically. +func (c *EMR) ModifyInstanceGroups(input *ModifyInstanceGroupsInput) (*ModifyInstanceGroupsOutput, error) { + req, out := c.ModifyInstanceGroupsRequest(input) + err := req.Send() + return out, err +} + +const opRemoveTags = "RemoveTags" + +// RemoveTagsRequest generates a "aws/request.Request" representing the +// client's request for the RemoveTags operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the RemoveTags method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the RemoveTagsRequest method. +// req, resp := client.RemoveTagsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EMR) RemoveTagsRequest(input *RemoveTagsInput) (req *request.Request, output *RemoveTagsOutput) { + op := &request.Operation{ + Name: opRemoveTags, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RemoveTagsInput{} + } + + req = c.newRequest(op, input, output) + output = &RemoveTagsOutput{} + req.Data = output + return +} + +// Removes tags from an Amazon EMR resource. Tags make it easier to associate +// clusters in various ways, such as grouping clusters to track your Amazon +// EMR resource allocation costs. For more information, see Tagging Amazon EMR +// Resources (http://docs.aws.amazon.com/ElasticMapReduce/latest/DeveloperGuide/emr-plan-tags.html). +// +// The following example removes the stack tag with value Prod from a cluster: +func (c *EMR) RemoveTags(input *RemoveTagsInput) (*RemoveTagsOutput, error) { + req, out := c.RemoveTagsRequest(input) + err := req.Send() + return out, err +} + +const opRunJobFlow = "RunJobFlow" + +// RunJobFlowRequest generates a "aws/request.Request" representing the +// client's request for the RunJobFlow operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the RunJobFlow method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the RunJobFlowRequest method. +// req, resp := client.RunJobFlowRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EMR) RunJobFlowRequest(input *RunJobFlowInput) (req *request.Request, output *RunJobFlowOutput) { + op := &request.Operation{ + Name: opRunJobFlow, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RunJobFlowInput{} + } + + req = c.newRequest(op, input, output) + output = &RunJobFlowOutput{} + req.Data = output + return +} + +// RunJobFlow creates and starts running a new job flow. The job flow will run +// the steps specified. Once the job flow completes, the cluster is stopped +// and the HDFS partition is lost. To prevent loss of data, configure the last +// step of the job flow to store results in Amazon S3. If the JobFlowInstancesConfig +// KeepJobFlowAliveWhenNoSteps parameter is set to TRUE, the job flow will transition +// to the WAITING state rather than shutting down once the steps have completed. +// +// For additional protection, you can set the JobFlowInstancesConfig TerminationProtected +// parameter to TRUE to lock the job flow and prevent it from being terminated +// by API call, user intervention, or in the event of a job flow error. +// +// A maximum of 256 steps are allowed in each job flow. +// +// If your job flow is long-running (such as a Hive data warehouse) or complex, +// you may require more than 256 steps to process your data. You can bypass +// the 256-step limitation in various ways, including using the SSH shell to +// connect to the master node and submitting queries directly to the software +// running on the master node, such as Hive and Hadoop. For more information +// on how to do this, go to Add More than 256 Steps to a Job Flow (http://docs.aws.amazon.com/ElasticMapReduce/latest/DeveloperGuide/AddMoreThan256Steps.html) +// in the Amazon Elastic MapReduce Developer's Guide. +// +// For long running job flows, we recommend that you periodically store your +// results. +func (c *EMR) RunJobFlow(input *RunJobFlowInput) (*RunJobFlowOutput, error) { + req, out := c.RunJobFlowRequest(input) + err := req.Send() + return out, err +} + +const opSetTerminationProtection = "SetTerminationProtection" + +// SetTerminationProtectionRequest generates a "aws/request.Request" representing the +// client's request for the SetTerminationProtection operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the SetTerminationProtection method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the SetTerminationProtectionRequest method. +// req, resp := client.SetTerminationProtectionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EMR) SetTerminationProtectionRequest(input *SetTerminationProtectionInput) (req *request.Request, output *SetTerminationProtectionOutput) { + op := &request.Operation{ + Name: opSetTerminationProtection, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SetTerminationProtectionInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &SetTerminationProtectionOutput{} + req.Data = output + return +} + +// SetTerminationProtection locks a job flow so the Amazon EC2 instances in +// the cluster cannot be terminated by user intervention, an API call, or in +// the event of a job-flow error. The cluster still terminates upon successful +// completion of the job flow. Calling SetTerminationProtection on a job flow +// is analogous to calling the Amazon EC2 DisableAPITermination API on all of +// the EC2 instances in a cluster. +// +// SetTerminationProtection is used to prevent accidental termination of a +// job flow and to ensure that in the event of an error, the instances will +// persist so you can recover any data stored in their ephemeral instance storage. +// +// To terminate a job flow that has been locked by setting SetTerminationProtection +// to true, you must first unlock the job flow by a subsequent call to SetTerminationProtection +// in which you set the value to false. +// +// For more information, go to Protecting a Job Flow from Termination (http://docs.aws.amazon.com/ElasticMapReduce/latest/DeveloperGuide/UsingEMR_TerminationProtection.html) +// in the Amazon Elastic MapReduce Developer's Guide. +func (c *EMR) SetTerminationProtection(input *SetTerminationProtectionInput) (*SetTerminationProtectionOutput, error) { + req, out := c.SetTerminationProtectionRequest(input) + err := req.Send() + return out, err +} + +const opSetVisibleToAllUsers = "SetVisibleToAllUsers" + +// SetVisibleToAllUsersRequest generates a "aws/request.Request" representing the +// client's request for the SetVisibleToAllUsers operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the SetVisibleToAllUsers method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the SetVisibleToAllUsersRequest method. +// req, resp := client.SetVisibleToAllUsersRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EMR) SetVisibleToAllUsersRequest(input *SetVisibleToAllUsersInput) (req *request.Request, output *SetVisibleToAllUsersOutput) { + op := &request.Operation{ + Name: opSetVisibleToAllUsers, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SetVisibleToAllUsersInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &SetVisibleToAllUsersOutput{} + req.Data = output + return +} + +// Sets whether all AWS Identity and Access Management (IAM) users under your +// account can access the specified job flows. This action works on running +// job flows. You can also set the visibility of a job flow when you launch +// it using the VisibleToAllUsers parameter of RunJobFlow. The SetVisibleToAllUsers +// action can be called only by an IAM user who created the job flow or the +// AWS account that owns the job flow. +func (c *EMR) SetVisibleToAllUsers(input *SetVisibleToAllUsersInput) (*SetVisibleToAllUsersOutput, error) { + req, out := c.SetVisibleToAllUsersRequest(input) + err := req.Send() + return out, err +} + +const opTerminateJobFlows = "TerminateJobFlows" + +// TerminateJobFlowsRequest generates a "aws/request.Request" representing the +// client's request for the TerminateJobFlows operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the TerminateJobFlows method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the TerminateJobFlowsRequest method. +// req, resp := client.TerminateJobFlowsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EMR) TerminateJobFlowsRequest(input *TerminateJobFlowsInput) (req *request.Request, output *TerminateJobFlowsOutput) { + op := &request.Operation{ + Name: opTerminateJobFlows, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &TerminateJobFlowsInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &TerminateJobFlowsOutput{} + req.Data = output + return +} + +// TerminateJobFlows shuts a list of job flows down. When a job flow is shut +// down, any step not yet completed is canceled and the EC2 instances on which +// the job flow is running are stopped. Any log files not already saved are +// uploaded to Amazon S3 if a LogUri was specified when the job flow was created. +// +// The maximum number of JobFlows allowed is 10. The call to TerminateJobFlows +// is asynchronous. Depending on the configuration of the job flow, it may take +// up to 5-20 minutes for the job flow to completely terminate and release allocated +// resources, such as Amazon EC2 instances. +func (c *EMR) TerminateJobFlows(input *TerminateJobFlowsInput) (*TerminateJobFlowsOutput, error) { + req, out := c.TerminateJobFlowsRequest(input) + err := req.Send() + return out, err +} + +// Input to an AddInstanceGroups call. +type AddInstanceGroupsInput struct { + _ struct{} `type:"structure"` + + // Instance Groups to add. + InstanceGroups []*InstanceGroupConfig `type:"list" required:"true"` + + // Job flow in which to add the instance groups. + JobFlowId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s AddInstanceGroupsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddInstanceGroupsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AddInstanceGroupsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AddInstanceGroupsInput"} + if s.InstanceGroups == nil { + invalidParams.Add(request.NewErrParamRequired("InstanceGroups")) + } + if s.JobFlowId == nil { + invalidParams.Add(request.NewErrParamRequired("JobFlowId")) + } + if s.InstanceGroups != nil { + for i, v := range s.InstanceGroups { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "InstanceGroups", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Output from an AddInstanceGroups call. +type AddInstanceGroupsOutput struct { + _ struct{} `type:"structure"` + + // Instance group IDs of the newly created instance groups. + InstanceGroupIds []*string `type:"list"` + + // The job flow ID in which the instance groups are added. + JobFlowId *string `type:"string"` +} + +// String returns the string representation +func (s AddInstanceGroupsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddInstanceGroupsOutput) GoString() string { + return s.String() +} + +// The input argument to the AddJobFlowSteps operation. +type AddJobFlowStepsInput struct { + _ struct{} `type:"structure"` + + // A string that uniquely identifies the job flow. This identifier is returned + // by RunJobFlow and can also be obtained from ListClusters. + JobFlowId *string `type:"string" required:"true"` + + // A list of StepConfig to be executed by the job flow. + Steps []*StepConfig `type:"list" required:"true"` +} + +// String returns the string representation +func (s AddJobFlowStepsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddJobFlowStepsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AddJobFlowStepsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AddJobFlowStepsInput"} + if s.JobFlowId == nil { + invalidParams.Add(request.NewErrParamRequired("JobFlowId")) + } + if s.Steps == nil { + invalidParams.Add(request.NewErrParamRequired("Steps")) + } + if s.Steps != nil { + for i, v := range s.Steps { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Steps", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The output for the AddJobFlowSteps operation. +type AddJobFlowStepsOutput struct { + _ struct{} `type:"structure"` + + // The identifiers of the list of steps added to the job flow. + StepIds []*string `type:"list"` +} + +// String returns the string representation +func (s AddJobFlowStepsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddJobFlowStepsOutput) GoString() string { + return s.String() +} + +// This input identifies a cluster and a list of tags to attach. +type AddTagsInput struct { + _ struct{} `type:"structure"` + + // The Amazon EMR resource identifier to which tags will be added. This value + // must be a cluster identifier. + ResourceId *string `type:"string" required:"true"` + + // A list of tags to associate with a cluster and propagate to Amazon EC2 instances. + // Tags are user-defined key/value pairs that consist of a required key string + // with a maximum of 128 characters, and an optional value string with a maximum + // of 256 characters. + Tags []*Tag `type:"list" required:"true"` +} + +// String returns the string representation +func (s AddTagsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddTagsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AddTagsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AddTagsInput"} + if s.ResourceId == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceId")) + } + if s.Tags == nil { + invalidParams.Add(request.NewErrParamRequired("Tags")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// This output indicates the result of adding tags to a resource. +type AddTagsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s AddTagsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddTagsOutput) GoString() string { + return s.String() +} + +// An application is any Amazon or third-party software that you can add to +// the cluster. This structure contains a list of strings that indicates the +// software to use with the cluster and accepts a user argument list. Amazon +// EMR accepts and forwards the argument list to the corresponding installation +// script as bootstrap action argument. For more information, see Launch a Job +// Flow on the MapR Distribution for Hadoop (http://docs.aws.amazon.com/ElasticMapReduce/latest/DeveloperGuide/emr-mapr.html). +// Currently supported values are: +// +// "mapr-m3" - launch the job flow using MapR M3 Edition. "mapr-m5" - launch +// the job flow using MapR M5 Edition. "mapr" with the user arguments specifying +// "--edition,m3" or "--edition,m5" - launch the job flow using MapR M3 or M5 +// Edition, respectively. In Amazon EMR releases 4.0 and greater, the only +// accepted parameter is the application name. To pass arguments to applications, +// you supply a configuration for each application. +type Application struct { + _ struct{} `type:"structure"` + + // This option is for advanced users only. This is meta information about third-party + // applications that third-party vendors use for testing purposes. + AdditionalInfo map[string]*string `type:"map"` + + // Arguments for Amazon EMR to pass to the application. + Args []*string `type:"list"` + + // The name of the application. + Name *string `type:"string"` + + // The version of the application. + Version *string `type:"string"` +} + +// String returns the string representation +func (s Application) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Application) GoString() string { + return s.String() +} + +// Configuration of a bootstrap action. +type BootstrapActionConfig struct { + _ struct{} `type:"structure"` + + // The name of the bootstrap action. + Name *string `type:"string" required:"true"` + + // The script run by the bootstrap action. + ScriptBootstrapAction *ScriptBootstrapActionConfig `type:"structure" required:"true"` +} + +// String returns the string representation +func (s BootstrapActionConfig) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BootstrapActionConfig) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *BootstrapActionConfig) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "BootstrapActionConfig"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.ScriptBootstrapAction == nil { + invalidParams.Add(request.NewErrParamRequired("ScriptBootstrapAction")) + } + if s.ScriptBootstrapAction != nil { + if err := s.ScriptBootstrapAction.Validate(); err != nil { + invalidParams.AddNested("ScriptBootstrapAction", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Reports the configuration of a bootstrap action in a job flow. +type BootstrapActionDetail struct { + _ struct{} `type:"structure"` + + // A description of the bootstrap action. + BootstrapActionConfig *BootstrapActionConfig `type:"structure"` +} + +// String returns the string representation +func (s BootstrapActionDetail) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BootstrapActionDetail) GoString() string { + return s.String() +} + +// The detailed description of the cluster. +type Cluster struct { + _ struct{} `type:"structure"` + + // The applications installed on this cluster. + Applications []*Application `type:"list"` + + // Specifies whether the cluster should terminate after completing all steps. + AutoTerminate *bool `type:"boolean"` + + // Amazon EMR releases 4.x or later. + // + // The list of Configurations supplied to the EMR cluster. + Configurations []*Configuration `type:"list"` + + // Provides information about the EC2 instances in a cluster grouped by category. + // For example, key name, subnet ID, IAM instance profile, and so on. + Ec2InstanceAttributes *Ec2InstanceAttributes `type:"structure"` + + // The unique identifier for the cluster. + Id *string `type:"string"` + + // The path to the Amazon S3 location where logs for this cluster are stored. + LogUri *string `type:"string"` + + // The public DNS name of the master EC2 instance. + MasterPublicDnsName *string `type:"string"` + + // The name of the cluster. + Name *string `type:"string"` + + // An approximation of the cost of the job flow, represented in m1.small/hours. + // This value is incremented one time for every hour an m1.small instance runs. + // Larger instances are weighted more, so an EC2 instance that is roughly four + // times more expensive would result in the normalized instance hours being + // incremented by four. This result is only an approximation and does not reflect + // the actual billing rate. + NormalizedInstanceHours *int64 `type:"integer"` + + // The release label for the Amazon EMR release. For Amazon EMR 3.x and 2.x + // AMIs, use amiVersion instead instead of ReleaseLabel. + ReleaseLabel *string `type:"string"` + + // The AMI version requested for this cluster. + RequestedAmiVersion *string `type:"string"` + + // The AMI version running on this cluster. + RunningAmiVersion *string `type:"string"` + + // The IAM role that will be assumed by the Amazon EMR service to access AWS + // resources on your behalf. + ServiceRole *string `type:"string"` + + // The current status details about the cluster. + Status *ClusterStatus `type:"structure"` + + // A list of tags associated with a cluster. + Tags []*Tag `type:"list"` + + // Indicates whether Amazon EMR will lock the cluster to prevent the EC2 instances + // from being terminated by an API call or user intervention, or in the event + // of a cluster error. + TerminationProtected *bool `type:"boolean"` + + // Indicates whether the job flow is visible to all IAM users of the AWS account + // associated with the job flow. If this value is set to true, all IAM users + // of that AWS account can view and manage the job flow if they have the proper + // policy permissions set. If this value is false, only the IAM user that created + // the cluster can view and manage it. This value can be changed using the SetVisibleToAllUsers + // action. + VisibleToAllUsers *bool `type:"boolean"` +} + +// String returns the string representation +func (s Cluster) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Cluster) GoString() string { + return s.String() +} + +// The reason that the cluster changed to its current state. +type ClusterStateChangeReason struct { + _ struct{} `type:"structure"` + + // The programmatic code for the state change reason. + Code *string `type:"string" enum:"ClusterStateChangeReasonCode"` + + // The descriptive message for the state change reason. + Message *string `type:"string"` +} + +// String returns the string representation +func (s ClusterStateChangeReason) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ClusterStateChangeReason) GoString() string { + return s.String() +} + +// The detailed status of the cluster. +type ClusterStatus struct { + _ struct{} `type:"structure"` + + // The current state of the cluster. + State *string `type:"string" enum:"ClusterState"` + + // The reason for the cluster status change. + StateChangeReason *ClusterStateChangeReason `type:"structure"` + + // A timeline that represents the status of a cluster over the lifetime of the + // cluster. + Timeline *ClusterTimeline `type:"structure"` +} + +// String returns the string representation +func (s ClusterStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ClusterStatus) GoString() string { + return s.String() +} + +// The summary description of the cluster. +type ClusterSummary struct { + _ struct{} `type:"structure"` + + // The unique identifier for the cluster. + Id *string `type:"string"` + + // The name of the cluster. + Name *string `type:"string"` + + // An approximation of the cost of the job flow, represented in m1.small/hours. + // This value is incremented one time for every hour an m1.small instance runs. + // Larger instances are weighted more, so an EC2 instance that is roughly four + // times more expensive would result in the normalized instance hours being + // incremented by four. This result is only an approximation and does not reflect + // the actual billing rate. + NormalizedInstanceHours *int64 `type:"integer"` + + // The details about the current status of the cluster. + Status *ClusterStatus `type:"structure"` +} + +// String returns the string representation +func (s ClusterSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ClusterSummary) GoString() string { + return s.String() +} + +// Represents the timeline of the cluster's lifecycle. +type ClusterTimeline struct { + _ struct{} `type:"structure"` + + // The creation date and time of the cluster. + CreationDateTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The date and time when the cluster was terminated. + EndDateTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The date and time when the cluster was ready to execute steps. + ReadyDateTime *time.Time `type:"timestamp" timestampFormat:"unix"` +} + +// String returns the string representation +func (s ClusterTimeline) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ClusterTimeline) GoString() string { + return s.String() +} + +// An entity describing an executable that runs on a cluster. +type Command struct { + _ struct{} `type:"structure"` + + // Arguments for Amazon EMR to pass to the command for execution. + Args []*string `type:"list"` + + // The name of the command. + Name *string `type:"string"` + + // The Amazon S3 location of the command script. + ScriptPath *string `type:"string"` +} + +// String returns the string representation +func (s Command) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Command) GoString() string { + return s.String() +} + +// Amazon EMR releases 4.x or later. +// +// Specifies a hardware and software configuration of the EMR cluster. This +// includes configurations for applications and software bundled with Amazon +// EMR. The Configuration object is a JSON object which is defined by a classification +// and a set of properties. Configurations can be nested, so a configuration +// may have its own Configuration objects listed. +type Configuration struct { + _ struct{} `type:"structure"` + + // The classification of a configuration. For more information see, Amazon EMR + // Configurations (http://docs.aws.amazon.com/ElasticMapReduce/latest/API/EmrConfigurations.html). + Classification *string `type:"string"` + + // A list of configurations you apply to this configuration object. + Configurations []*Configuration `type:"list"` + + // A set of properties supplied to the Configuration object. + Properties map[string]*string `type:"map"` +} + +// String returns the string representation +func (s Configuration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Configuration) GoString() string { + return s.String() +} + +// This input determines which cluster to describe. +type DescribeClusterInput struct { + _ struct{} `type:"structure"` + + // The identifier of the cluster to describe. + ClusterId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeClusterInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeClusterInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeClusterInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeClusterInput"} + if s.ClusterId == nil { + invalidParams.Add(request.NewErrParamRequired("ClusterId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// This output contains the description of the cluster. +type DescribeClusterOutput struct { + _ struct{} `type:"structure"` + + // This output contains the details for the requested cluster. + Cluster *Cluster `type:"structure"` +} + +// String returns the string representation +func (s DescribeClusterOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeClusterOutput) GoString() string { + return s.String() +} + +// The input for the DescribeJobFlows operation. +type DescribeJobFlowsInput struct { + _ struct{} `type:"structure"` + + // Return only job flows created after this date and time. + CreatedAfter *time.Time `type:"timestamp" timestampFormat:"unix"` + + // Return only job flows created before this date and time. + CreatedBefore *time.Time `type:"timestamp" timestampFormat:"unix"` + + // Return only job flows whose job flow ID is contained in this list. + JobFlowIds []*string `type:"list"` + + // Return only job flows whose state is contained in this list. + JobFlowStates []*string `type:"list"` +} + +// String returns the string representation +func (s DescribeJobFlowsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeJobFlowsInput) GoString() string { + return s.String() +} + +// The output for the DescribeJobFlows operation. +type DescribeJobFlowsOutput struct { + _ struct{} `type:"structure"` + + // A list of job flows matching the parameters supplied. + JobFlows []*JobFlowDetail `type:"list"` +} + +// String returns the string representation +func (s DescribeJobFlowsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeJobFlowsOutput) GoString() string { + return s.String() +} + +// This input determines which step to describe. +type DescribeStepInput struct { + _ struct{} `type:"structure"` + + // The identifier of the cluster with steps to describe. + ClusterId *string `type:"string" required:"true"` + + // The identifier of the step to describe. + StepId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeStepInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeStepInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeStepInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeStepInput"} + if s.ClusterId == nil { + invalidParams.Add(request.NewErrParamRequired("ClusterId")) + } + if s.StepId == nil { + invalidParams.Add(request.NewErrParamRequired("StepId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// This output contains the description of the cluster step. +type DescribeStepOutput struct { + _ struct{} `type:"structure"` + + // The step details for the requested step identifier. + Step *Step `type:"structure"` +} + +// String returns the string representation +func (s DescribeStepOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeStepOutput) GoString() string { + return s.String() +} + +// Configuration of requested EBS block device associated with the instance +// group. +type EbsBlockDevice struct { + _ struct{} `type:"structure"` + + // The device name that is exposed to the instance, such as /dev/sdh. + Device *string `type:"string"` + + // EBS volume specifications such as volume type, IOPS, and size(GiB) that will + // be requested for the EBS volume attached to an EC2 instance in the cluster. + VolumeSpecification *VolumeSpecification `type:"structure"` +} + +// String returns the string representation +func (s EbsBlockDevice) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EbsBlockDevice) GoString() string { + return s.String() +} + +// Configuration of requested EBS block device associated with the instance +// group with count of volumes that will be associated to every instance. +type EbsBlockDeviceConfig struct { + _ struct{} `type:"structure"` + + // EBS volume specifications such as volume type, IOPS, and size(GiB) that will + // be requested for the EBS volume attached to an EC2 instance in the cluster. + VolumeSpecification *VolumeSpecification `type:"structure" required:"true"` + + // Number of EBS volumes with specific volume configuration, that will be associated + // with every instance in the instance group + VolumesPerInstance *int64 `type:"integer"` +} + +// String returns the string representation +func (s EbsBlockDeviceConfig) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EbsBlockDeviceConfig) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *EbsBlockDeviceConfig) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "EbsBlockDeviceConfig"} + if s.VolumeSpecification == nil { + invalidParams.Add(request.NewErrParamRequired("VolumeSpecification")) + } + if s.VolumeSpecification != nil { + if err := s.VolumeSpecification.Validate(); err != nil { + invalidParams.AddNested("VolumeSpecification", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type EbsConfiguration struct { + _ struct{} `type:"structure"` + + EbsBlockDeviceConfigs []*EbsBlockDeviceConfig `type:"list"` + + EbsOptimized *bool `type:"boolean"` +} + +// String returns the string representation +func (s EbsConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EbsConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *EbsConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "EbsConfiguration"} + if s.EbsBlockDeviceConfigs != nil { + for i, v := range s.EbsBlockDeviceConfigs { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "EbsBlockDeviceConfigs", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// EBS block device that's attached to an EC2 instance. +type EbsVolume struct { + _ struct{} `type:"structure"` + + // The device name that is exposed to the instance, such as /dev/sdh. + Device *string `type:"string"` + + // The volume identifier of the EBS volume. + VolumeId *string `type:"string"` +} + +// String returns the string representation +func (s EbsVolume) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EbsVolume) GoString() string { + return s.String() +} + +// Provides information about the EC2 instances in a cluster grouped by category. +// For example, key name, subnet ID, IAM instance profile, and so on. +type Ec2InstanceAttributes struct { + _ struct{} `type:"structure"` + + // A list of additional Amazon EC2 security group IDs for the master node. + AdditionalMasterSecurityGroups []*string `type:"list"` + + // A list of additional Amazon EC2 security group IDs for the slave nodes. + AdditionalSlaveSecurityGroups []*string `type:"list"` + + // The Availability Zone in which the cluster will run. + Ec2AvailabilityZone *string `type:"string"` + + // The name of the Amazon EC2 key pair to use when connecting with SSH into + // the master node as a user named "hadoop". + Ec2KeyName *string `type:"string"` + + // To launch the job flow in Amazon VPC, set this parameter to the identifier + // of the Amazon VPC subnet where you want the job flow to launch. If you do + // not specify this value, the job flow is launched in the normal AWS cloud, + // outside of a VPC. + // + // Amazon VPC currently does not support cluster compute quadruple extra large + // (cc1.4xlarge) instances. Thus, you cannot specify the cc1.4xlarge instance + // type for nodes of a job flow launched in a VPC. + Ec2SubnetId *string `type:"string"` + + // The identifier of the Amazon EC2 security group for the master node. + EmrManagedMasterSecurityGroup *string `type:"string"` + + // The identifier of the Amazon EC2 security group for the slave nodes. + EmrManagedSlaveSecurityGroup *string `type:"string"` + + // The IAM role that was specified when the job flow was launched. The EC2 instances + // of the job flow assume this role. + IamInstanceProfile *string `type:"string"` + + // The identifier of the Amazon EC2 security group for the Amazon EMR service + // to access clusters in VPC private subnets. + ServiceAccessSecurityGroup *string `type:"string"` +} + +// String returns the string representation +func (s Ec2InstanceAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Ec2InstanceAttributes) GoString() string { + return s.String() +} + +// A job flow step consisting of a JAR file whose main function will be executed. +// The main function submits a job for Hadoop to execute and waits for the job +// to finish or fail. +type HadoopJarStepConfig struct { + _ struct{} `type:"structure"` + + // A list of command line arguments passed to the JAR file's main function when + // executed. + Args []*string `type:"list"` + + // A path to a JAR file run during the step. + Jar *string `type:"string" required:"true"` + + // The name of the main class in the specified Java file. If not specified, + // the JAR file should specify a Main-Class in its manifest file. + MainClass *string `type:"string"` + + // A list of Java properties that are set when the step runs. You can use these + // properties to pass key value pairs to your main function. + Properties []*KeyValue `type:"list"` +} + +// String returns the string representation +func (s HadoopJarStepConfig) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HadoopJarStepConfig) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *HadoopJarStepConfig) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "HadoopJarStepConfig"} + if s.Jar == nil { + invalidParams.Add(request.NewErrParamRequired("Jar")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// A cluster step consisting of a JAR file whose main function will be executed. +// The main function submits a job for Hadoop to execute and waits for the job +// to finish or fail. +type HadoopStepConfig struct { + _ struct{} `type:"structure"` + + // The list of command line arguments to pass to the JAR file's main function + // for execution. + Args []*string `type:"list"` + + // The path to the JAR file that runs during the step. + Jar *string `type:"string"` + + // The name of the main class in the specified Java file. If not specified, + // the JAR file should specify a main class in its manifest file. + MainClass *string `type:"string"` + + // The list of Java properties that are set when the step runs. You can use + // these properties to pass key value pairs to your main function. + Properties map[string]*string `type:"map"` +} + +// String returns the string representation +func (s HadoopStepConfig) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HadoopStepConfig) GoString() string { + return s.String() +} + +// Represents an EC2 instance provisioned as part of cluster. +type Instance struct { + _ struct{} `type:"structure"` + + // The list of EBS volumes that are attached to this instance. + EbsVolumes []*EbsVolume `type:"list"` + + // The unique identifier of the instance in Amazon EC2. + Ec2InstanceId *string `type:"string"` + + // The unique identifier for the instance in Amazon EMR. + Id *string `type:"string"` + + // The identifier of the instance group to which this instance belongs. + InstanceGroupId *string `type:"string"` + + // The private DNS name of the instance. + PrivateDnsName *string `type:"string"` + + // The private IP address of the instance. + PrivateIpAddress *string `type:"string"` + + // The public DNS name of the instance. + PublicDnsName *string `type:"string"` + + // The public IP address of the instance. + PublicIpAddress *string `type:"string"` + + // The current status of the instance. + Status *InstanceStatus `type:"structure"` +} + +// String returns the string representation +func (s Instance) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Instance) GoString() string { + return s.String() +} + +// This entity represents an instance group, which is a group of instances that +// have common purpose. For example, CORE instance group is used for HDFS. +type InstanceGroup struct { + _ struct{} `type:"structure"` + + // The bid price for each EC2 instance in the instance group when launching + // nodes as Spot Instances, expressed in USD. + BidPrice *string `type:"string"` + + // Amazon EMR releases 4.x or later. + // + // The list of configurations supplied for an EMR cluster instance group. You + // can specify a separate configuration for each instance group (master, core, + // and task). + Configurations []*Configuration `type:"list"` + + // The EBS block devices that are mapped to this instance group. + EbsBlockDevices []*EbsBlockDevice `type:"list"` + + // If the instance group is EBS-optimized. An Amazon EBS–optimized instance + // uses an optimized configuration stack and provides additional, dedicated + // capacity for Amazon EBS I/O. + EbsOptimized *bool `type:"boolean"` + + // The identifier of the instance group. + Id *string `type:"string"` + + // The type of the instance group. Valid values are MASTER, CORE or TASK. + InstanceGroupType *string `type:"string" enum:"InstanceGroupType"` + + // The EC2 instance type for all instances in the instance group. + InstanceType *string `min:"1" type:"string"` + + // The marketplace to provision instances for this group. Valid values are ON_DEMAND + // or SPOT. + Market *string `type:"string" enum:"MarketType"` + + // The name of the instance group. + Name *string `type:"string"` + + // The target number of instances for the instance group. + RequestedInstanceCount *int64 `type:"integer"` + + // The number of instances currently running in this instance group. + RunningInstanceCount *int64 `type:"integer"` + + // Policy for customizing shrink operations. + ShrinkPolicy *ShrinkPolicy `type:"structure"` + + // The current status of the instance group. + Status *InstanceGroupStatus `type:"structure"` +} + +// String returns the string representation +func (s InstanceGroup) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstanceGroup) GoString() string { + return s.String() +} + +// Configuration defining a new instance group. +type InstanceGroupConfig struct { + _ struct{} `type:"structure"` + + // Bid price for each Amazon EC2 instance in the instance group when launching + // nodes as Spot Instances, expressed in USD. + BidPrice *string `type:"string"` + + // Amazon EMR releases 4.x or later. + // + // The list of configurations supplied for an EMR cluster instance group. You + // can specify a separate configuration for each instance group (master, core, + // and task). + Configurations []*Configuration `type:"list"` + + // EBS configurations that will be attached to each Amazon EC2 instance in the + // instance group. + EbsConfiguration *EbsConfiguration `type:"structure"` + + // Target number of instances for the instance group. + InstanceCount *int64 `type:"integer" required:"true"` + + // The role of the instance group in the cluster. + InstanceRole *string `type:"string" required:"true" enum:"InstanceRoleType"` + + // The Amazon EC2 instance type for all instances in the instance group. + InstanceType *string `min:"1" type:"string" required:"true"` + + // Market type of the Amazon EC2 instances used to create a cluster node. + Market *string `type:"string" enum:"MarketType"` + + // Friendly name given to the instance group. + Name *string `type:"string"` +} + +// String returns the string representation +func (s InstanceGroupConfig) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstanceGroupConfig) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *InstanceGroupConfig) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "InstanceGroupConfig"} + if s.InstanceCount == nil { + invalidParams.Add(request.NewErrParamRequired("InstanceCount")) + } + if s.InstanceRole == nil { + invalidParams.Add(request.NewErrParamRequired("InstanceRole")) + } + if s.InstanceType == nil { + invalidParams.Add(request.NewErrParamRequired("InstanceType")) + } + if s.InstanceType != nil && len(*s.InstanceType) < 1 { + invalidParams.Add(request.NewErrParamMinLen("InstanceType", 1)) + } + if s.EbsConfiguration != nil { + if err := s.EbsConfiguration.Validate(); err != nil { + invalidParams.AddNested("EbsConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Detailed information about an instance group. +type InstanceGroupDetail struct { + _ struct{} `type:"structure"` + + // Bid price for EC2 Instances when launching nodes as Spot Instances, expressed + // in USD. + BidPrice *string `type:"string"` + + // The date/time the instance group was created. + CreationDateTime *time.Time `type:"timestamp" timestampFormat:"unix" required:"true"` + + // The date/time the instance group was terminated. + EndDateTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // Unique identifier for the instance group. + InstanceGroupId *string `type:"string"` + + // Target number of instances to run in the instance group. + InstanceRequestCount *int64 `type:"integer" required:"true"` + + // Instance group role in the cluster + InstanceRole *string `type:"string" required:"true" enum:"InstanceRoleType"` + + // Actual count of running instances. + InstanceRunningCount *int64 `type:"integer" required:"true"` + + // Amazon EC2 Instance type. + InstanceType *string `min:"1" type:"string" required:"true"` + + // Details regarding the state of the instance group. + LastStateChangeReason *string `type:"string"` + + // Market type of the Amazon EC2 instances used to create a cluster node. + Market *string `type:"string" required:"true" enum:"MarketType"` + + // Friendly name for the instance group. + Name *string `type:"string"` + + // The date/time the instance group was available to the cluster. + ReadyDateTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The date/time the instance group was started. + StartDateTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // State of instance group. The following values are deprecated: STARTING, TERMINATED, + // and FAILED. + State *string `type:"string" required:"true" enum:"InstanceGroupState"` +} + +// String returns the string representation +func (s InstanceGroupDetail) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstanceGroupDetail) GoString() string { + return s.String() +} + +// Modify an instance group size. +type InstanceGroupModifyConfig struct { + _ struct{} `type:"structure"` + + // The EC2 InstanceIds to terminate. Once you terminate the instances, the instance + // group will not return to its original requested size. + EC2InstanceIdsToTerminate []*string `type:"list"` + + // Target size for the instance group. + InstanceCount *int64 `type:"integer"` + + // Unique ID of the instance group to expand or shrink. + InstanceGroupId *string `type:"string" required:"true"` + + // Policy for customizing shrink operations. + ShrinkPolicy *ShrinkPolicy `type:"structure"` +} + +// String returns the string representation +func (s InstanceGroupModifyConfig) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstanceGroupModifyConfig) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *InstanceGroupModifyConfig) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "InstanceGroupModifyConfig"} + if s.InstanceGroupId == nil { + invalidParams.Add(request.NewErrParamRequired("InstanceGroupId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The status change reason details for the instance group. +type InstanceGroupStateChangeReason struct { + _ struct{} `type:"structure"` + + // The programmable code for the state change reason. + Code *string `type:"string" enum:"InstanceGroupStateChangeReasonCode"` + + // The status change reason description. + Message *string `type:"string"` +} + +// String returns the string representation +func (s InstanceGroupStateChangeReason) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstanceGroupStateChangeReason) GoString() string { + return s.String() +} + +// The details of the instance group status. +type InstanceGroupStatus struct { + _ struct{} `type:"structure"` + + // The current state of the instance group. + State *string `type:"string" enum:"InstanceGroupState"` + + // The status change reason details for the instance group. + StateChangeReason *InstanceGroupStateChangeReason `type:"structure"` + + // The timeline of the instance group status over time. + Timeline *InstanceGroupTimeline `type:"structure"` +} + +// String returns the string representation +func (s InstanceGroupStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstanceGroupStatus) GoString() string { + return s.String() +} + +// The timeline of the instance group lifecycle. +type InstanceGroupTimeline struct { + _ struct{} `type:"structure"` + + // The creation date and time of the instance group. + CreationDateTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The date and time when the instance group terminated. + EndDateTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The date and time when the instance group became ready to perform tasks. + ReadyDateTime *time.Time `type:"timestamp" timestampFormat:"unix"` +} + +// String returns the string representation +func (s InstanceGroupTimeline) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstanceGroupTimeline) GoString() string { + return s.String() +} + +// Custom policy for requesting termination protection or termination of specific +// instances when shrinking an instance group. +type InstanceResizePolicy struct { + _ struct{} `type:"structure"` + + // Decommissioning timeout override for the specific list of instances to be + // terminated. + InstanceTerminationTimeout *int64 `type:"integer"` + + // Specific list of instances to be protected when shrinking an instance group. + InstancesToProtect []*string `type:"list"` + + // Specific list of instances to be terminated when shrinking an instance group. + InstancesToTerminate []*string `type:"list"` +} + +// String returns the string representation +func (s InstanceResizePolicy) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstanceResizePolicy) GoString() string { + return s.String() +} + +// The details of the status change reason for the instance. +type InstanceStateChangeReason struct { + _ struct{} `type:"structure"` + + // The programmable code for the state change reason. + Code *string `type:"string" enum:"InstanceStateChangeReasonCode"` + + // The status change reason description. + Message *string `type:"string"` +} + +// String returns the string representation +func (s InstanceStateChangeReason) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstanceStateChangeReason) GoString() string { + return s.String() +} + +// The instance status details. +type InstanceStatus struct { + _ struct{} `type:"structure"` + + // The current state of the instance. + State *string `type:"string" enum:"InstanceState"` + + // The details of the status change reason for the instance. + StateChangeReason *InstanceStateChangeReason `type:"structure"` + + // The timeline of the instance status over time. + Timeline *InstanceTimeline `type:"structure"` +} + +// String returns the string representation +func (s InstanceStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstanceStatus) GoString() string { + return s.String() +} + +// The timeline of the instance lifecycle. +type InstanceTimeline struct { + _ struct{} `type:"structure"` + + // The creation date and time of the instance. + CreationDateTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The date and time when the instance was terminated. + EndDateTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The date and time when the instance was ready to perform tasks. + ReadyDateTime *time.Time `type:"timestamp" timestampFormat:"unix"` +} + +// String returns the string representation +func (s InstanceTimeline) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstanceTimeline) GoString() string { + return s.String() +} + +// A description of a job flow. +type JobFlowDetail struct { + _ struct{} `type:"structure"` + + // The version of the AMI used to initialize Amazon EC2 instances in the job + // flow. For a list of AMI versions currently supported by Amazon ElasticMapReduce, + // go to AMI Versions Supported in Elastic MapReduce (http://docs.aws.amazon.com/ElasticMapReduce/latest/DeveloperGuide/EnvironmentConfig_AMIVersion.html#ami-versions-supported) + // in the Amazon Elastic MapReduce Developer Guide. + AmiVersion *string `type:"string"` + + // A list of the bootstrap actions run by the job flow. + BootstrapActions []*BootstrapActionDetail `type:"list"` + + // Describes the execution status of the job flow. + ExecutionStatusDetail *JobFlowExecutionStatusDetail `type:"structure" required:"true"` + + // Describes the Amazon EC2 instances of the job flow. + Instances *JobFlowInstancesDetail `type:"structure" required:"true"` + + // The job flow identifier. + JobFlowId *string `type:"string" required:"true"` + + // The IAM role that was specified when the job flow was launched. The EC2 instances + // of the job flow assume this role. + JobFlowRole *string `type:"string"` + + // The location in Amazon S3 where log files for the job are stored. + LogUri *string `type:"string"` + + // The name of the job flow. + Name *string `type:"string" required:"true"` + + // The IAM role that will be assumed by the Amazon EMR service to access AWS + // resources on your behalf. + ServiceRole *string `type:"string"` + + // A list of steps run by the job flow. + Steps []*StepDetail `type:"list"` + + // A list of strings set by third party software when the job flow is launched. + // If you are not using third party software to manage the job flow this value + // is empty. + SupportedProducts []*string `type:"list"` + + // Specifies whether the job flow is visible to all IAM users of the AWS account + // associated with the job flow. If this value is set to true, all IAM users + // of that AWS account can view and (if they have the proper policy permissions + // set) manage the job flow. If it is set to false, only the IAM user that created + // the job flow can view and manage it. This value can be changed using the + // SetVisibleToAllUsers action. + VisibleToAllUsers *bool `type:"boolean"` +} + +// String returns the string representation +func (s JobFlowDetail) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s JobFlowDetail) GoString() string { + return s.String() +} + +// Describes the status of the job flow. +type JobFlowExecutionStatusDetail struct { + _ struct{} `type:"structure"` + + // The creation date and time of the job flow. + CreationDateTime *time.Time `type:"timestamp" timestampFormat:"unix" required:"true"` + + // The completion date and time of the job flow. + EndDateTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // Description of the job flow last changed state. + LastStateChangeReason *string `type:"string"` + + // The date and time when the job flow was ready to start running bootstrap + // actions. + ReadyDateTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The start date and time of the job flow. + StartDateTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The state of the job flow. + State *string `type:"string" required:"true" enum:"JobFlowExecutionState"` +} + +// String returns the string representation +func (s JobFlowExecutionStatusDetail) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s JobFlowExecutionStatusDetail) GoString() string { + return s.String() +} + +// A description of the Amazon EC2 instance running the job flow. A valid JobFlowInstancesConfig +// must contain at least InstanceGroups, which is the recommended configuration. +// However, a valid alternative is to have MasterInstanceType, SlaveInstanceType, +// and InstanceCount (all three must be present). +type JobFlowInstancesConfig struct { + _ struct{} `type:"structure"` + + // A list of additional Amazon EC2 security group IDs for the master node. + AdditionalMasterSecurityGroups []*string `type:"list"` + + // A list of additional Amazon EC2 security group IDs for the slave nodes. + AdditionalSlaveSecurityGroups []*string `type:"list"` + + // The name of the Amazon EC2 key pair that can be used to ssh to the master + // node as the user called "hadoop." + Ec2KeyName *string `type:"string"` + + // To launch the job flow in Amazon Virtual Private Cloud (Amazon VPC), set + // this parameter to the identifier of the Amazon VPC subnet where you want + // the job flow to launch. If you do not specify this value, the job flow is + // launched in the normal Amazon Web Services cloud, outside of an Amazon VPC. + // + // Amazon VPC currently does not support cluster compute quadruple extra large + // (cc1.4xlarge) instances. Thus you cannot specify the cc1.4xlarge instance + // type for nodes of a job flow launched in a Amazon VPC. + Ec2SubnetId *string `type:"string"` + + // The identifier of the Amazon EC2 security group for the master node. + EmrManagedMasterSecurityGroup *string `type:"string"` + + // The identifier of the Amazon EC2 security group for the slave nodes. + EmrManagedSlaveSecurityGroup *string `type:"string"` + + // The Hadoop version for the job flow. Valid inputs are "0.18" (deprecated), + // "0.20" (deprecated), "0.20.205" (deprecated), "1.0.3", "2.2.0", or "2.4.0". + // If you do not set this value, the default of 0.18 is used, unless the AmiVersion + // parameter is set in the RunJobFlow call, in which case the default version + // of Hadoop for that AMI version is used. + HadoopVersion *string `type:"string"` + + // The number of Amazon EC2 instances used to execute the job flow. + InstanceCount *int64 `type:"integer"` + + // Configuration for the job flow's instance groups. + InstanceGroups []*InstanceGroupConfig `type:"list"` + + // Specifies whether the job flow should be kept alive after completing all + // steps. + KeepJobFlowAliveWhenNoSteps *bool `type:"boolean"` + + // The EC2 instance type of the master node. + MasterInstanceType *string `min:"1" type:"string"` + + // The Availability Zone the job flow will run in. + Placement *PlacementType `type:"structure"` + + // The identifier of the Amazon EC2 security group for the Amazon EMR service + // to access clusters in VPC private subnets. + ServiceAccessSecurityGroup *string `type:"string"` + + // The EC2 instance type of the slave nodes. + SlaveInstanceType *string `min:"1" type:"string"` + + // Specifies whether to lock the job flow to prevent the Amazon EC2 instances + // from being terminated by API call, user intervention, or in the event of + // a job flow error. + TerminationProtected *bool `type:"boolean"` +} + +// String returns the string representation +func (s JobFlowInstancesConfig) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s JobFlowInstancesConfig) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *JobFlowInstancesConfig) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "JobFlowInstancesConfig"} + if s.MasterInstanceType != nil && len(*s.MasterInstanceType) < 1 { + invalidParams.Add(request.NewErrParamMinLen("MasterInstanceType", 1)) + } + if s.SlaveInstanceType != nil && len(*s.SlaveInstanceType) < 1 { + invalidParams.Add(request.NewErrParamMinLen("SlaveInstanceType", 1)) + } + if s.InstanceGroups != nil { + for i, v := range s.InstanceGroups { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "InstanceGroups", i), err.(request.ErrInvalidParams)) + } + } + } + if s.Placement != nil { + if err := s.Placement.Validate(); err != nil { + invalidParams.AddNested("Placement", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Specify the type of Amazon EC2 instances to run the job flow on. +type JobFlowInstancesDetail struct { + _ struct{} `type:"structure"` + + // The name of an Amazon EC2 key pair that can be used to ssh to the master + // node of job flow. + Ec2KeyName *string `type:"string"` + + // For job flows launched within Amazon Virtual Private Cloud, this value specifies + // the identifier of the subnet where the job flow was launched. + Ec2SubnetId *string `type:"string"` + + // The Hadoop version for the job flow. + HadoopVersion *string `type:"string"` + + // The number of Amazon EC2 instances in the cluster. If the value is 1, the + // same instance serves as both the master and slave node. If the value is greater + // than 1, one instance is the master node and all others are slave nodes. + InstanceCount *int64 `type:"integer" required:"true"` + + // Details about the job flow's instance groups. + InstanceGroups []*InstanceGroupDetail `type:"list"` + + // Specifies whether the job flow should terminate after completing all steps. + KeepJobFlowAliveWhenNoSteps *bool `type:"boolean"` + + // The Amazon EC2 instance identifier of the master node. + MasterInstanceId *string `type:"string"` + + // The Amazon EC2 master node instance type. + MasterInstanceType *string `min:"1" type:"string" required:"true"` + + // The DNS name of the master node. + MasterPublicDnsName *string `type:"string"` + + // An approximation of the cost of the job flow, represented in m1.small/hours. + // This value is incremented once for every hour an m1.small runs. Larger instances + // are weighted more, so an Amazon EC2 instance that is roughly four times more + // expensive would result in the normalized instance hours being incremented + // by four. This result is only an approximation and does not reflect the actual + // billing rate. + NormalizedInstanceHours *int64 `type:"integer"` + + // The Amazon EC2 Availability Zone for the job flow. + Placement *PlacementType `type:"structure"` + + // The Amazon EC2 slave node instance type. + SlaveInstanceType *string `min:"1" type:"string" required:"true"` + + // Specifies whether the Amazon EC2 instances in the cluster are protected from + // termination by API calls, user intervention, or in the event of a job flow + // error. + TerminationProtected *bool `type:"boolean"` +} + +// String returns the string representation +func (s JobFlowInstancesDetail) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s JobFlowInstancesDetail) GoString() string { + return s.String() +} + +// A key value pair. +type KeyValue struct { + _ struct{} `type:"structure"` + + // The unique identifier of a key value pair. + Key *string `type:"string"` + + // The value part of the identified key. + Value *string `type:"string"` +} + +// String returns the string representation +func (s KeyValue) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s KeyValue) GoString() string { + return s.String() +} + +// This input determines which bootstrap actions to retrieve. +type ListBootstrapActionsInput struct { + _ struct{} `type:"structure"` + + // The cluster identifier for the bootstrap actions to list . + ClusterId *string `type:"string" required:"true"` + + // The pagination token that indicates the next set of results to retrieve . + Marker *string `type:"string"` +} + +// String returns the string representation +func (s ListBootstrapActionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListBootstrapActionsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListBootstrapActionsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListBootstrapActionsInput"} + if s.ClusterId == nil { + invalidParams.Add(request.NewErrParamRequired("ClusterId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// This output contains the boostrap actions detail . +type ListBootstrapActionsOutput struct { + _ struct{} `type:"structure"` + + // The bootstrap actions associated with the cluster . + BootstrapActions []*Command `type:"list"` + + // The pagination token that indicates the next set of results to retrieve . + Marker *string `type:"string"` +} + +// String returns the string representation +func (s ListBootstrapActionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListBootstrapActionsOutput) GoString() string { + return s.String() +} + +// This input determines how the ListClusters action filters the list of clusters +// that it returns. +type ListClustersInput struct { + _ struct{} `type:"structure"` + + // The cluster state filters to apply when listing clusters. + ClusterStates []*string `type:"list"` + + // The creation date and time beginning value filter for listing clusters . + CreatedAfter *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The creation date and time end value filter for listing clusters . + CreatedBefore *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The pagination token that indicates the next set of results to retrieve. + Marker *string `type:"string"` +} + +// String returns the string representation +func (s ListClustersInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListClustersInput) GoString() string { + return s.String() +} + +// This contains a ClusterSummaryList with the cluster details; for example, +// the cluster IDs, names, and status. +type ListClustersOutput struct { + _ struct{} `type:"structure"` + + // The list of clusters for the account based on the given filters. + Clusters []*ClusterSummary `type:"list"` + + // The pagination token that indicates the next set of results to retrieve. + Marker *string `type:"string"` +} + +// String returns the string representation +func (s ListClustersOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListClustersOutput) GoString() string { + return s.String() +} + +// This input determines which instance groups to retrieve. +type ListInstanceGroupsInput struct { + _ struct{} `type:"structure"` + + // The identifier of the cluster for which to list the instance groups. + ClusterId *string `type:"string" required:"true"` + + // The pagination token that indicates the next set of results to retrieve. + Marker *string `type:"string"` +} + +// String returns the string representation +func (s ListInstanceGroupsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListInstanceGroupsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListInstanceGroupsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListInstanceGroupsInput"} + if s.ClusterId == nil { + invalidParams.Add(request.NewErrParamRequired("ClusterId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// This input determines which instance groups to retrieve. +type ListInstanceGroupsOutput struct { + _ struct{} `type:"structure"` + + // The list of instance groups for the cluster and given filters. + InstanceGroups []*InstanceGroup `type:"list"` + + // The pagination token that indicates the next set of results to retrieve. + Marker *string `type:"string"` +} + +// String returns the string representation +func (s ListInstanceGroupsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListInstanceGroupsOutput) GoString() string { + return s.String() +} + +// This input determines which instances to list. +type ListInstancesInput struct { + _ struct{} `type:"structure"` + + // The identifier of the cluster for which to list the instances. + ClusterId *string `type:"string" required:"true"` + + // The identifier of the instance group for which to list the instances. + InstanceGroupId *string `type:"string"` + + // The type of instance group for which to list the instances. + InstanceGroupTypes []*string `type:"list"` + + // A list of instance states that will filter the instances returned with this + // request. + InstanceStates []*string `type:"list"` + + // The pagination token that indicates the next set of results to retrieve. + Marker *string `type:"string"` +} + +// String returns the string representation +func (s ListInstancesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListInstancesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListInstancesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListInstancesInput"} + if s.ClusterId == nil { + invalidParams.Add(request.NewErrParamRequired("ClusterId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// This output contains the list of instances. +type ListInstancesOutput struct { + _ struct{} `type:"structure"` + + // The list of instances for the cluster and given filters. + Instances []*Instance `type:"list"` + + // The pagination token that indicates the next set of results to retrieve. + Marker *string `type:"string"` +} + +// String returns the string representation +func (s ListInstancesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListInstancesOutput) GoString() string { + return s.String() +} + +// This input determines which steps to list. +type ListStepsInput struct { + _ struct{} `type:"structure"` + + // The identifier of the cluster for which to list the steps. + ClusterId *string `type:"string" required:"true"` + + // The pagination token that indicates the next set of results to retrieve. + Marker *string `type:"string"` + + // The filter to limit the step list based on the identifier of the steps. + StepIds []*string `type:"list"` + + // The filter to limit the step list based on certain states. + StepStates []*string `type:"list"` +} + +// String returns the string representation +func (s ListStepsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListStepsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListStepsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListStepsInput"} + if s.ClusterId == nil { + invalidParams.Add(request.NewErrParamRequired("ClusterId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// This output contains the list of steps returned in reverse order. This means +// that the last step is the first element in the list. +type ListStepsOutput struct { + _ struct{} `type:"structure"` + + // The pagination token that indicates the next set of results to retrieve. + Marker *string `type:"string"` + + // The filtered list of steps for the cluster. + Steps []*StepSummary `type:"list"` +} + +// String returns the string representation +func (s ListStepsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListStepsOutput) GoString() string { + return s.String() +} + +// Change the size of some instance groups. +type ModifyInstanceGroupsInput struct { + _ struct{} `type:"structure"` + + // Instance groups to change. + InstanceGroups []*InstanceGroupModifyConfig `type:"list"` +} + +// String returns the string representation +func (s ModifyInstanceGroupsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyInstanceGroupsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ModifyInstanceGroupsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ModifyInstanceGroupsInput"} + if s.InstanceGroups != nil { + for i, v := range s.InstanceGroups { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "InstanceGroups", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type ModifyInstanceGroupsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ModifyInstanceGroupsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyInstanceGroupsOutput) GoString() string { + return s.String() +} + +// The Amazon EC2 location for the job flow. +type PlacementType struct { + _ struct{} `type:"structure"` + + // The Amazon EC2 Availability Zone for the job flow. + AvailabilityZone *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s PlacementType) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PlacementType) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PlacementType) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PlacementType"} + if s.AvailabilityZone == nil { + invalidParams.Add(request.NewErrParamRequired("AvailabilityZone")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// This input identifies a cluster and a list of tags to remove. +type RemoveTagsInput struct { + _ struct{} `type:"structure"` + + // The Amazon EMR resource identifier from which tags will be removed. This + // value must be a cluster identifier. + ResourceId *string `type:"string" required:"true"` + + // A list of tag keys to remove from a resource. + TagKeys []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s RemoveTagsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RemoveTagsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RemoveTagsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RemoveTagsInput"} + if s.ResourceId == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceId")) + } + if s.TagKeys == nil { + invalidParams.Add(request.NewErrParamRequired("TagKeys")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// This output indicates the result of removing tags from a resource. +type RemoveTagsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s RemoveTagsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RemoveTagsOutput) GoString() string { + return s.String() +} + +// Input to the RunJobFlow operation. +type RunJobFlowInput struct { + _ struct{} `type:"structure"` + + // A JSON string for selecting additional features. + AdditionalInfo *string `type:"string"` + + // For Amazon EMR releases 3.x and 2.x. For Amazon EMR releases 4.x and greater, + // use ReleaseLabel. + // + // The version of the Amazon Machine Image (AMI) to use when launching Amazon + // EC2 instances in the job flow. The following values are valid: + // + // The version number of the AMI to use, for example, "2.0." If the AMI supports + // multiple versions of Hadoop (for example, AMI 1.0 supports both Hadoop 0.18 + // and 0.20) you can use the JobFlowInstancesConfig HadoopVersion parameter + // to modify the version of Hadoop from the defaults shown above. + // + // For details about the AMI versions currently supported by Amazon Elastic + // MapReduce, go to AMI Versions Supported in Elastic MapReduce (http://docs.aws.amazon.com/ElasticMapReduce/latest/DeveloperGuide/EnvironmentConfig_AMIVersion.html#ami-versions-supported) + // in the Amazon Elastic MapReduce Developer's Guide. + AmiVersion *string `type:"string"` + + // Amazon EMR releases 4.x or later. + // + // A list of applications for the cluster. Valid values are: "Hadoop", "Hive", + // "Mahout", "Pig", and "Spark." They are case insensitive. + Applications []*Application `type:"list"` + + // A list of bootstrap actions that will be run before Hadoop is started on + // the cluster nodes. + BootstrapActions []*BootstrapActionConfig `type:"list"` + + // Amazon EMR releases 4.x or later. + // + // The list of configurations supplied for the EMR cluster you are creating. + Configurations []*Configuration `type:"list"` + + // A specification of the number and type of Amazon EC2 instances on which to + // run the job flow. + Instances *JobFlowInstancesConfig `type:"structure" required:"true"` + + // Also called instance profile and EC2 role. An IAM role for an EMR cluster. + // The EC2 instances of the cluster assume this role. The default role is EMR_EC2_DefaultRole. + // In order to use the default role, you must have already created it using + // the CLI or console. + JobFlowRole *string `type:"string"` + + // The location in Amazon S3 to write the log files of the job flow. If a value + // is not provided, logs are not created. + LogUri *string `type:"string"` + + // The name of the job flow. + Name *string `type:"string" required:"true"` + + // For Amazon EMR releases 3.x and 2.x. For Amazon EMR releases 4.x and greater, + // use Applications. + // + // A list of strings that indicates third-party software to use with the job + // flow that accepts a user argument list. EMR accepts and forwards the argument + // list to the corresponding installation script as bootstrap action arguments. + // For more information, see Launch a Job Flow on the MapR Distribution for + // Hadoop (http://docs.aws.amazon.com/ElasticMapReduce/latest/DeveloperGuide/emr-mapr.html). + // Currently supported values are: + // + // "mapr-m3" - launch the cluster using MapR M3 Edition. "mapr-m5" - launch + // the cluster using MapR M5 Edition. "mapr" with the user arguments specifying + // "--edition,m3" or "--edition,m5" - launch the job flow using MapR M3 or M5 + // Edition respectively. "mapr-m7" - launch the cluster using MapR M7 Edition. + // "hunk" - launch the cluster with the Hunk Big Data Analtics Platform. "hue"- + // launch the cluster with Hue installed. "spark" - launch the cluster with + // Apache Spark installed. "ganglia" - launch the cluster with the Ganglia Monitoring + // System installed. + NewSupportedProducts []*SupportedProductConfig `type:"list"` + + // Amazon EMR releases 4.x or later. + // + // The release label for the Amazon EMR release. For Amazon EMR 3.x and 2.x + // AMIs, use amiVersion instead instead of ReleaseLabel. + ReleaseLabel *string `type:"string"` + + // The IAM role that will be assumed by the Amazon EMR service to access AWS + // resources on your behalf. + ServiceRole *string `type:"string"` + + // A list of steps to be executed by the job flow. + Steps []*StepConfig `type:"list"` + + // For Amazon EMR releases 3.x and 2.x. For Amazon EMR releases 4.x and greater, + // use Applications. + // + // A list of strings that indicates third-party software to use with the job + // flow. For more information, go to Use Third Party Applications with Amazon + // EMR (http://docs.aws.amazon.com/ElasticMapReduce/latest/DeveloperGuide/emr-supported-products.html). + // Currently supported values are: + // + // "mapr-m3" - launch the job flow using MapR M3 Edition. "mapr-m5" - launch + // the job flow using MapR M5 Edition. + SupportedProducts []*string `type:"list"` + + // A list of tags to associate with a cluster and propagate to Amazon EC2 instances. + Tags []*Tag `type:"list"` + + // Whether the job flow is visible to all IAM users of the AWS account associated + // with the job flow. If this value is set to true, all IAM users of that AWS + // account can view and (if they have the proper policy permissions set) manage + // the job flow. If it is set to false, only the IAM user that created the job + // flow can view and manage it. + VisibleToAllUsers *bool `type:"boolean"` +} + +// String returns the string representation +func (s RunJobFlowInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RunJobFlowInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RunJobFlowInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RunJobFlowInput"} + if s.Instances == nil { + invalidParams.Add(request.NewErrParamRequired("Instances")) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.BootstrapActions != nil { + for i, v := range s.BootstrapActions { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "BootstrapActions", i), err.(request.ErrInvalidParams)) + } + } + } + if s.Instances != nil { + if err := s.Instances.Validate(); err != nil { + invalidParams.AddNested("Instances", err.(request.ErrInvalidParams)) + } + } + if s.Steps != nil { + for i, v := range s.Steps { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Steps", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The result of the RunJobFlow operation. +type RunJobFlowOutput struct { + _ struct{} `type:"structure"` + + // An unique identifier for the job flow. + JobFlowId *string `type:"string"` +} + +// String returns the string representation +func (s RunJobFlowOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RunJobFlowOutput) GoString() string { + return s.String() +} + +// Configuration of the script to run during a bootstrap action. +type ScriptBootstrapActionConfig struct { + _ struct{} `type:"structure"` + + // A list of command line arguments to pass to the bootstrap action script. + Args []*string `type:"list"` + + // Location of the script to run during a bootstrap action. Can be either a + // location in Amazon S3 or on a local file system. + Path *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ScriptBootstrapActionConfig) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ScriptBootstrapActionConfig) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ScriptBootstrapActionConfig) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ScriptBootstrapActionConfig"} + if s.Path == nil { + invalidParams.Add(request.NewErrParamRequired("Path")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The input argument to the TerminationProtection operation. +type SetTerminationProtectionInput struct { + _ struct{} `type:"structure"` + + // A list of strings that uniquely identify the job flows to protect. This identifier + // is returned by RunJobFlow and can also be obtained from DescribeJobFlows + // . + JobFlowIds []*string `type:"list" required:"true"` + + // A Boolean that indicates whether to protect the job flow and prevent the + // Amazon EC2 instances in the cluster from shutting down due to API calls, + // user intervention, or job-flow error. + TerminationProtected *bool `type:"boolean" required:"true"` +} + +// String returns the string representation +func (s SetTerminationProtectionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetTerminationProtectionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SetTerminationProtectionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SetTerminationProtectionInput"} + if s.JobFlowIds == nil { + invalidParams.Add(request.NewErrParamRequired("JobFlowIds")) + } + if s.TerminationProtected == nil { + invalidParams.Add(request.NewErrParamRequired("TerminationProtected")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type SetTerminationProtectionOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s SetTerminationProtectionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetTerminationProtectionOutput) GoString() string { + return s.String() +} + +// The input to the SetVisibleToAllUsers action. +type SetVisibleToAllUsersInput struct { + _ struct{} `type:"structure"` + + // Identifiers of the job flows to receive the new visibility setting. + JobFlowIds []*string `type:"list" required:"true"` + + // Whether the specified job flows are visible to all IAM users of the AWS account + // associated with the job flow. If this value is set to True, all IAM users + // of that AWS account can view and, if they have the proper IAM policy permissions + // set, manage the job flows. If it is set to False, only the IAM user that + // created a job flow can view and manage it. + VisibleToAllUsers *bool `type:"boolean" required:"true"` +} + +// String returns the string representation +func (s SetVisibleToAllUsersInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetVisibleToAllUsersInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SetVisibleToAllUsersInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SetVisibleToAllUsersInput"} + if s.JobFlowIds == nil { + invalidParams.Add(request.NewErrParamRequired("JobFlowIds")) + } + if s.VisibleToAllUsers == nil { + invalidParams.Add(request.NewErrParamRequired("VisibleToAllUsers")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type SetVisibleToAllUsersOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s SetVisibleToAllUsersOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetVisibleToAllUsersOutput) GoString() string { + return s.String() +} + +// Policy for customizing shrink operations. Allows configuration of decommissioning +// timeout and targeted instance shrinking. +type ShrinkPolicy struct { + _ struct{} `type:"structure"` + + // The desired timeout for decommissioning an instance. Overrides the default + // YARN decommissioning timeout. + DecommissionTimeout *int64 `type:"integer"` + + // Custom policy for requesting termination protection or termination of specific + // instances when shrinking an instance group. + InstanceResizePolicy *InstanceResizePolicy `type:"structure"` +} + +// String returns the string representation +func (s ShrinkPolicy) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ShrinkPolicy) GoString() string { + return s.String() +} + +// This represents a step in a cluster. +type Step struct { + _ struct{} `type:"structure"` + + // This specifies what action to take when the cluster step fails. Possible + // values are TERMINATE_CLUSTER, CANCEL_AND_WAIT, and CONTINUE. + ActionOnFailure *string `type:"string" enum:"ActionOnFailure"` + + // The Hadoop job configuration of the cluster step. + Config *HadoopStepConfig `type:"structure"` + + // The identifier of the cluster step. + Id *string `type:"string"` + + // The name of the cluster step. + Name *string `type:"string"` + + // The current execution status details of the cluster step. + Status *StepStatus `type:"structure"` +} + +// String returns the string representation +func (s Step) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Step) GoString() string { + return s.String() +} + +// Specification of a job flow step. +type StepConfig struct { + _ struct{} `type:"structure"` + + // The action to take if the job flow step fails. + ActionOnFailure *string `type:"string" enum:"ActionOnFailure"` + + // The JAR file used for the job flow step. + HadoopJarStep *HadoopJarStepConfig `type:"structure" required:"true"` + + // The name of the job flow step. + Name *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s StepConfig) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StepConfig) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *StepConfig) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StepConfig"} + if s.HadoopJarStep == nil { + invalidParams.Add(request.NewErrParamRequired("HadoopJarStep")) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.HadoopJarStep != nil { + if err := s.HadoopJarStep.Validate(); err != nil { + invalidParams.AddNested("HadoopJarStep", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Combines the execution state and configuration of a step. +type StepDetail struct { + _ struct{} `type:"structure"` + + // The description of the step status. + ExecutionStatusDetail *StepExecutionStatusDetail `type:"structure" required:"true"` + + // The step configuration. + StepConfig *StepConfig `type:"structure" required:"true"` +} + +// String returns the string representation +func (s StepDetail) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StepDetail) GoString() string { + return s.String() +} + +// The execution state of a step. +type StepExecutionStatusDetail struct { + _ struct{} `type:"structure"` + + // The creation date and time of the step. + CreationDateTime *time.Time `type:"timestamp" timestampFormat:"unix" required:"true"` + + // The completion date and time of the step. + EndDateTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // A description of the step's current state. + LastStateChangeReason *string `type:"string"` + + // The start date and time of the step. + StartDateTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The state of the job flow step. + State *string `type:"string" required:"true" enum:"StepExecutionState"` +} + +// String returns the string representation +func (s StepExecutionStatusDetail) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StepExecutionStatusDetail) GoString() string { + return s.String() +} + +// The details of the step state change reason. +type StepStateChangeReason struct { + _ struct{} `type:"structure"` + + // The programmable code for the state change reason. Note: Currently, the service + // provides no code for the state change. + Code *string `type:"string" enum:"StepStateChangeReasonCode"` + + // The descriptive message for the state change reason. + Message *string `type:"string"` +} + +// String returns the string representation +func (s StepStateChangeReason) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StepStateChangeReason) GoString() string { + return s.String() +} + +// The execution status details of the cluster step. +type StepStatus struct { + _ struct{} `type:"structure"` + + // The execution state of the cluster step. + State *string `type:"string" enum:"StepState"` + + // The reason for the step execution status change. + StateChangeReason *StepStateChangeReason `type:"structure"` + + // The timeline of the cluster step status over time. + Timeline *StepTimeline `type:"structure"` +} + +// String returns the string representation +func (s StepStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StepStatus) GoString() string { + return s.String() +} + +// The summary of the cluster step. +type StepSummary struct { + _ struct{} `type:"structure"` + + // This specifies what action to take when the cluster step fails. Possible + // values are TERMINATE_CLUSTER, CANCEL_AND_WAIT, and CONTINUE. + ActionOnFailure *string `type:"string" enum:"ActionOnFailure"` + + // The Hadoop job configuration of the cluster step. + Config *HadoopStepConfig `type:"structure"` + + // The identifier of the cluster step. + Id *string `type:"string"` + + // The name of the cluster step. + Name *string `type:"string"` + + // The current execution status details of the cluster step. + Status *StepStatus `type:"structure"` +} + +// String returns the string representation +func (s StepSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StepSummary) GoString() string { + return s.String() +} + +// The timeline of the cluster step lifecycle. +type StepTimeline struct { + _ struct{} `type:"structure"` + + // The date and time when the cluster step was created. + CreationDateTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The date and time when the cluster step execution completed or failed. + EndDateTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The date and time when the cluster step execution started. + StartDateTime *time.Time `type:"timestamp" timestampFormat:"unix"` +} + +// String returns the string representation +func (s StepTimeline) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StepTimeline) GoString() string { + return s.String() +} + +// The list of supported product configurations which allow user-supplied arguments. +// EMR accepts these arguments and forwards them to the corresponding installation +// script as bootstrap action arguments. +type SupportedProductConfig struct { + _ struct{} `type:"structure"` + + // The list of user-supplied arguments. + Args []*string `type:"list"` + + // The name of the product configuration. + Name *string `type:"string"` +} + +// String returns the string representation +func (s SupportedProductConfig) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SupportedProductConfig) GoString() string { + return s.String() +} + +// A key/value pair containing user-defined metadata that you can associate +// with an Amazon EMR resource. Tags make it easier to associate clusters in +// various ways, such as grouping clu\ sters to track your Amazon EMR resource +// allocation costs. For more information, see Tagging Amazon EMR Resources +// (http://docs.aws.amazon.com/ElasticMapReduce/latest/DeveloperGuide/emr-plan-tags.html). +type Tag struct { + _ struct{} `type:"structure"` + + // A user-defined key, which is the minimum required information for a valid + // tag. For more information, see Tagging Amazon EMR Resources (http://docs.aws.amazon.com/ElasticMapReduce/latest/DeveloperGuide/emr-plan-tags.html). + Key *string `type:"string"` + + // A user-defined value, which is optional in a tag. For more information, see + // Tagging Amazon EMR Resources (http://docs.aws.amazon.com/ElasticMapReduce/latest/DeveloperGuide/emr-plan-tags.html). + Value *string `type:"string"` +} + +// String returns the string representation +func (s Tag) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Tag) GoString() string { + return s.String() +} + +// Input to the TerminateJobFlows operation. +type TerminateJobFlowsInput struct { + _ struct{} `type:"structure"` + + // A list of job flows to be shutdown. + JobFlowIds []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s TerminateJobFlowsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TerminateJobFlowsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TerminateJobFlowsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TerminateJobFlowsInput"} + if s.JobFlowIds == nil { + invalidParams.Add(request.NewErrParamRequired("JobFlowIds")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type TerminateJobFlowsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s TerminateJobFlowsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TerminateJobFlowsOutput) GoString() string { + return s.String() +} + +// EBS volume specifications such as volume type, IOPS, and size(GiB) that will +// be requested for the EBS volume attached to an EC2 instance in the cluster. +type VolumeSpecification struct { + _ struct{} `type:"structure"` + + // The number of I/O operations per second (IOPS) that the volume supports. + Iops *int64 `type:"integer"` + + // The volume size, in gibibytes (GiB). This can be a number from 1 – 1024. + // If the volume type is EBS-optimized, the minimum value is 10. + SizeInGB *int64 `type:"integer" required:"true"` + + // The volume type. Volume types supported are gp2, io1, standard. + VolumeType *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s VolumeSpecification) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VolumeSpecification) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *VolumeSpecification) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "VolumeSpecification"} + if s.SizeInGB == nil { + invalidParams.Add(request.NewErrParamRequired("SizeInGB")) + } + if s.VolumeType == nil { + invalidParams.Add(request.NewErrParamRequired("VolumeType")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +const ( + // @enum ActionOnFailure + ActionOnFailureTerminateJobFlow = "TERMINATE_JOB_FLOW" + // @enum ActionOnFailure + ActionOnFailureTerminateCluster = "TERMINATE_CLUSTER" + // @enum ActionOnFailure + ActionOnFailureCancelAndWait = "CANCEL_AND_WAIT" + // @enum ActionOnFailure + ActionOnFailureContinue = "CONTINUE" +) + +const ( + // @enum ClusterState + ClusterStateStarting = "STARTING" + // @enum ClusterState + ClusterStateBootstrapping = "BOOTSTRAPPING" + // @enum ClusterState + ClusterStateRunning = "RUNNING" + // @enum ClusterState + ClusterStateWaiting = "WAITING" + // @enum ClusterState + ClusterStateTerminating = "TERMINATING" + // @enum ClusterState + ClusterStateTerminated = "TERMINATED" + // @enum ClusterState + ClusterStateTerminatedWithErrors = "TERMINATED_WITH_ERRORS" +) + +const ( + // @enum ClusterStateChangeReasonCode + ClusterStateChangeReasonCodeInternalError = "INTERNAL_ERROR" + // @enum ClusterStateChangeReasonCode + ClusterStateChangeReasonCodeValidationError = "VALIDATION_ERROR" + // @enum ClusterStateChangeReasonCode + ClusterStateChangeReasonCodeInstanceFailure = "INSTANCE_FAILURE" + // @enum ClusterStateChangeReasonCode + ClusterStateChangeReasonCodeBootstrapFailure = "BOOTSTRAP_FAILURE" + // @enum ClusterStateChangeReasonCode + ClusterStateChangeReasonCodeUserRequest = "USER_REQUEST" + // @enum ClusterStateChangeReasonCode + ClusterStateChangeReasonCodeStepFailure = "STEP_FAILURE" + // @enum ClusterStateChangeReasonCode + ClusterStateChangeReasonCodeAllStepsCompleted = "ALL_STEPS_COMPLETED" +) + +const ( + // @enum InstanceGroupState + InstanceGroupStateProvisioning = "PROVISIONING" + // @enum InstanceGroupState + InstanceGroupStateBootstrapping = "BOOTSTRAPPING" + // @enum InstanceGroupState + InstanceGroupStateRunning = "RUNNING" + // @enum InstanceGroupState + InstanceGroupStateResizing = "RESIZING" + // @enum InstanceGroupState + InstanceGroupStateSuspended = "SUSPENDED" + // @enum InstanceGroupState + InstanceGroupStateTerminating = "TERMINATING" + // @enum InstanceGroupState + InstanceGroupStateTerminated = "TERMINATED" + // @enum InstanceGroupState + InstanceGroupStateArrested = "ARRESTED" + // @enum InstanceGroupState + InstanceGroupStateShuttingDown = "SHUTTING_DOWN" + // @enum InstanceGroupState + InstanceGroupStateEnded = "ENDED" +) + +const ( + // @enum InstanceGroupStateChangeReasonCode + InstanceGroupStateChangeReasonCodeInternalError = "INTERNAL_ERROR" + // @enum InstanceGroupStateChangeReasonCode + InstanceGroupStateChangeReasonCodeValidationError = "VALIDATION_ERROR" + // @enum InstanceGroupStateChangeReasonCode + InstanceGroupStateChangeReasonCodeInstanceFailure = "INSTANCE_FAILURE" + // @enum InstanceGroupStateChangeReasonCode + InstanceGroupStateChangeReasonCodeClusterTerminated = "CLUSTER_TERMINATED" +) + +const ( + // @enum InstanceGroupType + InstanceGroupTypeMaster = "MASTER" + // @enum InstanceGroupType + InstanceGroupTypeCore = "CORE" + // @enum InstanceGroupType + InstanceGroupTypeTask = "TASK" +) + +const ( + // @enum InstanceRoleType + InstanceRoleTypeMaster = "MASTER" + // @enum InstanceRoleType + InstanceRoleTypeCore = "CORE" + // @enum InstanceRoleType + InstanceRoleTypeTask = "TASK" +) + +const ( + // @enum InstanceState + InstanceStateAwaitingFulfillment = "AWAITING_FULFILLMENT" + // @enum InstanceState + InstanceStateProvisioning = "PROVISIONING" + // @enum InstanceState + InstanceStateBootstrapping = "BOOTSTRAPPING" + // @enum InstanceState + InstanceStateRunning = "RUNNING" + // @enum InstanceState + InstanceStateTerminated = "TERMINATED" +) + +const ( + // @enum InstanceStateChangeReasonCode + InstanceStateChangeReasonCodeInternalError = "INTERNAL_ERROR" + // @enum InstanceStateChangeReasonCode + InstanceStateChangeReasonCodeValidationError = "VALIDATION_ERROR" + // @enum InstanceStateChangeReasonCode + InstanceStateChangeReasonCodeInstanceFailure = "INSTANCE_FAILURE" + // @enum InstanceStateChangeReasonCode + InstanceStateChangeReasonCodeBootstrapFailure = "BOOTSTRAP_FAILURE" + // @enum InstanceStateChangeReasonCode + InstanceStateChangeReasonCodeClusterTerminated = "CLUSTER_TERMINATED" +) + +// The type of instance. +// +// A small instance +// +// A large instance +const ( + // @enum JobFlowExecutionState + JobFlowExecutionStateStarting = "STARTING" + // @enum JobFlowExecutionState + JobFlowExecutionStateBootstrapping = "BOOTSTRAPPING" + // @enum JobFlowExecutionState + JobFlowExecutionStateRunning = "RUNNING" + // @enum JobFlowExecutionState + JobFlowExecutionStateWaiting = "WAITING" + // @enum JobFlowExecutionState + JobFlowExecutionStateShuttingDown = "SHUTTING_DOWN" + // @enum JobFlowExecutionState + JobFlowExecutionStateTerminated = "TERMINATED" + // @enum JobFlowExecutionState + JobFlowExecutionStateCompleted = "COMPLETED" + // @enum JobFlowExecutionState + JobFlowExecutionStateFailed = "FAILED" +) + +const ( + // @enum MarketType + MarketTypeOnDemand = "ON_DEMAND" + // @enum MarketType + MarketTypeSpot = "SPOT" +) + +const ( + // @enum StepExecutionState + StepExecutionStatePending = "PENDING" + // @enum StepExecutionState + StepExecutionStateRunning = "RUNNING" + // @enum StepExecutionState + StepExecutionStateContinue = "CONTINUE" + // @enum StepExecutionState + StepExecutionStateCompleted = "COMPLETED" + // @enum StepExecutionState + StepExecutionStateCancelled = "CANCELLED" + // @enum StepExecutionState + StepExecutionStateFailed = "FAILED" + // @enum StepExecutionState + StepExecutionStateInterrupted = "INTERRUPTED" +) + +const ( + // @enum StepState + StepStatePending = "PENDING" + // @enum StepState + StepStateRunning = "RUNNING" + // @enum StepState + StepStateCompleted = "COMPLETED" + // @enum StepState + StepStateCancelled = "CANCELLED" + // @enum StepState + StepStateFailed = "FAILED" + // @enum StepState + StepStateInterrupted = "INTERRUPTED" +) + +const ( + // @enum StepStateChangeReasonCode + StepStateChangeReasonCodeNone = "NONE" +) diff --git a/vendor/github.com/aws/aws-sdk-go/service/emr/emriface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/emr/emriface/interface.go new file mode 100644 index 000000000..201478bdb --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/emr/emriface/interface.go @@ -0,0 +1,92 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package emriface provides an interface for the Amazon Elastic MapReduce. +package emriface + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/emr" +) + +// EMRAPI is the interface type for emr.EMR. +type EMRAPI interface { + AddInstanceGroupsRequest(*emr.AddInstanceGroupsInput) (*request.Request, *emr.AddInstanceGroupsOutput) + + AddInstanceGroups(*emr.AddInstanceGroupsInput) (*emr.AddInstanceGroupsOutput, error) + + AddJobFlowStepsRequest(*emr.AddJobFlowStepsInput) (*request.Request, *emr.AddJobFlowStepsOutput) + + AddJobFlowSteps(*emr.AddJobFlowStepsInput) (*emr.AddJobFlowStepsOutput, error) + + AddTagsRequest(*emr.AddTagsInput) (*request.Request, *emr.AddTagsOutput) + + AddTags(*emr.AddTagsInput) (*emr.AddTagsOutput, error) + + DescribeClusterRequest(*emr.DescribeClusterInput) (*request.Request, *emr.DescribeClusterOutput) + + DescribeCluster(*emr.DescribeClusterInput) (*emr.DescribeClusterOutput, error) + + DescribeJobFlowsRequest(*emr.DescribeJobFlowsInput) (*request.Request, *emr.DescribeJobFlowsOutput) + + DescribeJobFlows(*emr.DescribeJobFlowsInput) (*emr.DescribeJobFlowsOutput, error) + + DescribeStepRequest(*emr.DescribeStepInput) (*request.Request, *emr.DescribeStepOutput) + + DescribeStep(*emr.DescribeStepInput) (*emr.DescribeStepOutput, error) + + ListBootstrapActionsRequest(*emr.ListBootstrapActionsInput) (*request.Request, *emr.ListBootstrapActionsOutput) + + ListBootstrapActions(*emr.ListBootstrapActionsInput) (*emr.ListBootstrapActionsOutput, error) + + ListBootstrapActionsPages(*emr.ListBootstrapActionsInput, func(*emr.ListBootstrapActionsOutput, bool) bool) error + + ListClustersRequest(*emr.ListClustersInput) (*request.Request, *emr.ListClustersOutput) + + ListClusters(*emr.ListClustersInput) (*emr.ListClustersOutput, error) + + ListClustersPages(*emr.ListClustersInput, func(*emr.ListClustersOutput, bool) bool) error + + ListInstanceGroupsRequest(*emr.ListInstanceGroupsInput) (*request.Request, *emr.ListInstanceGroupsOutput) + + ListInstanceGroups(*emr.ListInstanceGroupsInput) (*emr.ListInstanceGroupsOutput, error) + + ListInstanceGroupsPages(*emr.ListInstanceGroupsInput, func(*emr.ListInstanceGroupsOutput, bool) bool) error + + ListInstancesRequest(*emr.ListInstancesInput) (*request.Request, *emr.ListInstancesOutput) + + ListInstances(*emr.ListInstancesInput) (*emr.ListInstancesOutput, error) + + ListInstancesPages(*emr.ListInstancesInput, func(*emr.ListInstancesOutput, bool) bool) error + + ListStepsRequest(*emr.ListStepsInput) (*request.Request, *emr.ListStepsOutput) + + ListSteps(*emr.ListStepsInput) (*emr.ListStepsOutput, error) + + ListStepsPages(*emr.ListStepsInput, func(*emr.ListStepsOutput, bool) bool) error + + ModifyInstanceGroupsRequest(*emr.ModifyInstanceGroupsInput) (*request.Request, *emr.ModifyInstanceGroupsOutput) + + ModifyInstanceGroups(*emr.ModifyInstanceGroupsInput) (*emr.ModifyInstanceGroupsOutput, error) + + RemoveTagsRequest(*emr.RemoveTagsInput) (*request.Request, *emr.RemoveTagsOutput) + + RemoveTags(*emr.RemoveTagsInput) (*emr.RemoveTagsOutput, error) + + RunJobFlowRequest(*emr.RunJobFlowInput) (*request.Request, *emr.RunJobFlowOutput) + + RunJobFlow(*emr.RunJobFlowInput) (*emr.RunJobFlowOutput, error) + + SetTerminationProtectionRequest(*emr.SetTerminationProtectionInput) (*request.Request, *emr.SetTerminationProtectionOutput) + + SetTerminationProtection(*emr.SetTerminationProtectionInput) (*emr.SetTerminationProtectionOutput, error) + + SetVisibleToAllUsersRequest(*emr.SetVisibleToAllUsersInput) (*request.Request, *emr.SetVisibleToAllUsersOutput) + + SetVisibleToAllUsers(*emr.SetVisibleToAllUsersInput) (*emr.SetVisibleToAllUsersOutput, error) + + TerminateJobFlowsRequest(*emr.TerminateJobFlowsInput) (*request.Request, *emr.TerminateJobFlowsOutput) + + TerminateJobFlows(*emr.TerminateJobFlowsInput) (*emr.TerminateJobFlowsOutput, error) +} + +var _ EMRAPI = (*emr.EMR)(nil) diff --git a/vendor/github.com/aws/aws-sdk-go/service/emr/examples_test.go b/vendor/github.com/aws/aws-sdk-go/service/emr/examples_test.go new file mode 100644 index 000000000..d19414034 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/emr/examples_test.go @@ -0,0 +1,635 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package emr_test + +import ( + "bytes" + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/emr" +) + +var _ time.Duration +var _ bytes.Buffer + +func ExampleEMR_AddInstanceGroups() { + svc := emr.New(session.New()) + + params := &emr.AddInstanceGroupsInput{ + InstanceGroups: []*emr.InstanceGroupConfig{ // Required + { // Required + InstanceCount: aws.Int64(1), // Required + InstanceRole: aws.String("InstanceRoleType"), // Required + InstanceType: aws.String("InstanceType"), // Required + BidPrice: aws.String("XmlStringMaxLen256"), + Configurations: []*emr.Configuration{ + { // Required + Classification: aws.String("String"), + Configurations: []*emr.Configuration{ + // Recursive values... + }, + Properties: map[string]*string{ + "Key": aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + EbsConfiguration: &emr.EbsConfiguration{ + EbsBlockDeviceConfigs: []*emr.EbsBlockDeviceConfig{ + { // Required + VolumeSpecification: &emr.VolumeSpecification{ // Required + SizeInGB: aws.Int64(1), // Required + VolumeType: aws.String("String"), // Required + Iops: aws.Int64(1), + }, + VolumesPerInstance: aws.Int64(1), + }, + // More values... + }, + EbsOptimized: aws.Bool(true), + }, + Market: aws.String("MarketType"), + Name: aws.String("XmlStringMaxLen256"), + }, + // More values... + }, + JobFlowId: aws.String("XmlStringMaxLen256"), // Required + } + resp, err := svc.AddInstanceGroups(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEMR_AddJobFlowSteps() { + svc := emr.New(session.New()) + + params := &emr.AddJobFlowStepsInput{ + JobFlowId: aws.String("XmlStringMaxLen256"), // Required + Steps: []*emr.StepConfig{ // Required + { // Required + HadoopJarStep: &emr.HadoopJarStepConfig{ // Required + Jar: aws.String("XmlString"), // Required + Args: []*string{ + aws.String("XmlString"), // Required + // More values... + }, + MainClass: aws.String("XmlString"), + Properties: []*emr.KeyValue{ + { // Required + Key: aws.String("XmlString"), + Value: aws.String("XmlString"), + }, + // More values... + }, + }, + Name: aws.String("XmlStringMaxLen256"), // Required + ActionOnFailure: aws.String("ActionOnFailure"), + }, + // More values... + }, + } + resp, err := svc.AddJobFlowSteps(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEMR_AddTags() { + svc := emr.New(session.New()) + + params := &emr.AddTagsInput{ + ResourceId: aws.String("ResourceId"), // Required + Tags: []*emr.Tag{ // Required + { // Required + Key: aws.String("String"), + Value: aws.String("String"), + }, + // More values... + }, + } + resp, err := svc.AddTags(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEMR_DescribeCluster() { + svc := emr.New(session.New()) + + params := &emr.DescribeClusterInput{ + ClusterId: aws.String("ClusterId"), // Required + } + resp, err := svc.DescribeCluster(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEMR_DescribeJobFlows() { + svc := emr.New(session.New()) + + params := &emr.DescribeJobFlowsInput{ + CreatedAfter: aws.Time(time.Now()), + CreatedBefore: aws.Time(time.Now()), + JobFlowIds: []*string{ + aws.String("XmlString"), // Required + // More values... + }, + JobFlowStates: []*string{ + aws.String("JobFlowExecutionState"), // Required + // More values... + }, + } + resp, err := svc.DescribeJobFlows(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEMR_DescribeStep() { + svc := emr.New(session.New()) + + params := &emr.DescribeStepInput{ + ClusterId: aws.String("ClusterId"), // Required + StepId: aws.String("StepId"), // Required + } + resp, err := svc.DescribeStep(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEMR_ListBootstrapActions() { + svc := emr.New(session.New()) + + params := &emr.ListBootstrapActionsInput{ + ClusterId: aws.String("ClusterId"), // Required + Marker: aws.String("Marker"), + } + resp, err := svc.ListBootstrapActions(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEMR_ListClusters() { + svc := emr.New(session.New()) + + params := &emr.ListClustersInput{ + ClusterStates: []*string{ + aws.String("ClusterState"), // Required + // More values... + }, + CreatedAfter: aws.Time(time.Now()), + CreatedBefore: aws.Time(time.Now()), + Marker: aws.String("Marker"), + } + resp, err := svc.ListClusters(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEMR_ListInstanceGroups() { + svc := emr.New(session.New()) + + params := &emr.ListInstanceGroupsInput{ + ClusterId: aws.String("ClusterId"), // Required + Marker: aws.String("Marker"), + } + resp, err := svc.ListInstanceGroups(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEMR_ListInstances() { + svc := emr.New(session.New()) + + params := &emr.ListInstancesInput{ + ClusterId: aws.String("ClusterId"), // Required + InstanceGroupId: aws.String("InstanceGroupId"), + InstanceGroupTypes: []*string{ + aws.String("InstanceGroupType"), // Required + // More values... + }, + InstanceStates: []*string{ + aws.String("InstanceState"), // Required + // More values... + }, + Marker: aws.String("Marker"), + } + resp, err := svc.ListInstances(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEMR_ListSteps() { + svc := emr.New(session.New()) + + params := &emr.ListStepsInput{ + ClusterId: aws.String("ClusterId"), // Required + Marker: aws.String("Marker"), + StepIds: []*string{ + aws.String("XmlString"), // Required + // More values... + }, + StepStates: []*string{ + aws.String("StepState"), // Required + // More values... + }, + } + resp, err := svc.ListSteps(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEMR_ModifyInstanceGroups() { + svc := emr.New(session.New()) + + params := &emr.ModifyInstanceGroupsInput{ + InstanceGroups: []*emr.InstanceGroupModifyConfig{ + { // Required + InstanceGroupId: aws.String("XmlStringMaxLen256"), // Required + EC2InstanceIdsToTerminate: []*string{ + aws.String("InstanceId"), // Required + // More values... + }, + InstanceCount: aws.Int64(1), + ShrinkPolicy: &emr.ShrinkPolicy{ + DecommissionTimeout: aws.Int64(1), + InstanceResizePolicy: &emr.InstanceResizePolicy{ + InstanceTerminationTimeout: aws.Int64(1), + InstancesToProtect: []*string{ + aws.String("InstanceId"), // Required + // More values... + }, + InstancesToTerminate: []*string{ + aws.String("InstanceId"), // Required + // More values... + }, + }, + }, + }, + // More values... + }, + } + resp, err := svc.ModifyInstanceGroups(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEMR_RemoveTags() { + svc := emr.New(session.New()) + + params := &emr.RemoveTagsInput{ + ResourceId: aws.String("ResourceId"), // Required + TagKeys: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.RemoveTags(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEMR_RunJobFlow() { + svc := emr.New(session.New()) + + params := &emr.RunJobFlowInput{ + Instances: &emr.JobFlowInstancesConfig{ // Required + AdditionalMasterSecurityGroups: []*string{ + aws.String("XmlStringMaxLen256"), // Required + // More values... + }, + AdditionalSlaveSecurityGroups: []*string{ + aws.String("XmlStringMaxLen256"), // Required + // More values... + }, + Ec2KeyName: aws.String("XmlStringMaxLen256"), + Ec2SubnetId: aws.String("XmlStringMaxLen256"), + EmrManagedMasterSecurityGroup: aws.String("XmlStringMaxLen256"), + EmrManagedSlaveSecurityGroup: aws.String("XmlStringMaxLen256"), + HadoopVersion: aws.String("XmlStringMaxLen256"), + InstanceCount: aws.Int64(1), + InstanceGroups: []*emr.InstanceGroupConfig{ + { // Required + InstanceCount: aws.Int64(1), // Required + InstanceRole: aws.String("InstanceRoleType"), // Required + InstanceType: aws.String("InstanceType"), // Required + BidPrice: aws.String("XmlStringMaxLen256"), + Configurations: []*emr.Configuration{ + { // Required + Classification: aws.String("String"), + Configurations: []*emr.Configuration{ + // Recursive values... + }, + Properties: map[string]*string{ + "Key": aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + EbsConfiguration: &emr.EbsConfiguration{ + EbsBlockDeviceConfigs: []*emr.EbsBlockDeviceConfig{ + { // Required + VolumeSpecification: &emr.VolumeSpecification{ // Required + SizeInGB: aws.Int64(1), // Required + VolumeType: aws.String("String"), // Required + Iops: aws.Int64(1), + }, + VolumesPerInstance: aws.Int64(1), + }, + // More values... + }, + EbsOptimized: aws.Bool(true), + }, + Market: aws.String("MarketType"), + Name: aws.String("XmlStringMaxLen256"), + }, + // More values... + }, + KeepJobFlowAliveWhenNoSteps: aws.Bool(true), + MasterInstanceType: aws.String("InstanceType"), + Placement: &emr.PlacementType{ + AvailabilityZone: aws.String("XmlString"), // Required + }, + ServiceAccessSecurityGroup: aws.String("XmlStringMaxLen256"), + SlaveInstanceType: aws.String("InstanceType"), + TerminationProtected: aws.Bool(true), + }, + Name: aws.String("XmlStringMaxLen256"), // Required + AdditionalInfo: aws.String("XmlString"), + AmiVersion: aws.String("XmlStringMaxLen256"), + Applications: []*emr.Application{ + { // Required + AdditionalInfo: map[string]*string{ + "Key": aws.String("String"), // Required + // More values... + }, + Args: []*string{ + aws.String("String"), // Required + // More values... + }, + Name: aws.String("String"), + Version: aws.String("String"), + }, + // More values... + }, + BootstrapActions: []*emr.BootstrapActionConfig{ + { // Required + Name: aws.String("XmlStringMaxLen256"), // Required + ScriptBootstrapAction: &emr.ScriptBootstrapActionConfig{ // Required + Path: aws.String("XmlString"), // Required + Args: []*string{ + aws.String("XmlString"), // Required + // More values... + }, + }, + }, + // More values... + }, + Configurations: []*emr.Configuration{ + { // Required + Classification: aws.String("String"), + Configurations: []*emr.Configuration{ + // Recursive values... + }, + Properties: map[string]*string{ + "Key": aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + JobFlowRole: aws.String("XmlString"), + LogUri: aws.String("XmlString"), + NewSupportedProducts: []*emr.SupportedProductConfig{ + { // Required + Args: []*string{ + aws.String("XmlString"), // Required + // More values... + }, + Name: aws.String("XmlStringMaxLen256"), + }, + // More values... + }, + ReleaseLabel: aws.String("XmlStringMaxLen256"), + ServiceRole: aws.String("XmlString"), + Steps: []*emr.StepConfig{ + { // Required + HadoopJarStep: &emr.HadoopJarStepConfig{ // Required + Jar: aws.String("XmlString"), // Required + Args: []*string{ + aws.String("XmlString"), // Required + // More values... + }, + MainClass: aws.String("XmlString"), + Properties: []*emr.KeyValue{ + { // Required + Key: aws.String("XmlString"), + Value: aws.String("XmlString"), + }, + // More values... + }, + }, + Name: aws.String("XmlStringMaxLen256"), // Required + ActionOnFailure: aws.String("ActionOnFailure"), + }, + // More values... + }, + SupportedProducts: []*string{ + aws.String("XmlStringMaxLen256"), // Required + // More values... + }, + Tags: []*emr.Tag{ + { // Required + Key: aws.String("String"), + Value: aws.String("String"), + }, + // More values... + }, + VisibleToAllUsers: aws.Bool(true), + } + resp, err := svc.RunJobFlow(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEMR_SetTerminationProtection() { + svc := emr.New(session.New()) + + params := &emr.SetTerminationProtectionInput{ + JobFlowIds: []*string{ // Required + aws.String("XmlString"), // Required + // More values... + }, + TerminationProtected: aws.Bool(true), // Required + } + resp, err := svc.SetTerminationProtection(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEMR_SetVisibleToAllUsers() { + svc := emr.New(session.New()) + + params := &emr.SetVisibleToAllUsersInput{ + JobFlowIds: []*string{ // Required + aws.String("XmlString"), // Required + // More values... + }, + VisibleToAllUsers: aws.Bool(true), // Required + } + resp, err := svc.SetVisibleToAllUsers(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEMR_TerminateJobFlows() { + svc := emr.New(session.New()) + + params := &emr.TerminateJobFlowsInput{ + JobFlowIds: []*string{ // Required + aws.String("XmlString"), // Required + // More values... + }, + } + resp, err := svc.TerminateJobFlows(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/emr/service.go b/vendor/github.com/aws/aws-sdk-go/service/emr/service.go new file mode 100644 index 000000000..755e1699a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/emr/service.go @@ -0,0 +1,92 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package emr + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" +) + +// Amazon Elastic MapReduce (Amazon EMR) is a web service that makes it easy +// to process large amounts of data efficiently. Amazon EMR uses Hadoop processing +// combined with several AWS products to do tasks such as web indexing, data +// mining, log file analysis, machine learning, scientific simulation, and data +// warehousing. +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type EMR struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "elasticmapreduce" + +// New creates a new instance of the EMR client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a EMR client from just a session. +// svc := emr.New(mySession) +// +// // Create a EMR client with additional configuration +// svc := emr.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *EMR { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *EMR { + svc := &EMR{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2009-03-31", + JSONVersion: "1.1", + TargetPrefix: "ElasticMapReduce", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(jsonrpc.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a EMR operation and runs any +// custom request initialization. +func (c *EMR) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/emr/waiters.go b/vendor/github.com/aws/aws-sdk-go/service/emr/waiters.go new file mode 100644 index 000000000..07c30c5cc --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/emr/waiters.go @@ -0,0 +1,89 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package emr + +import ( + "github.com/aws/aws-sdk-go/private/waiter" +) + +func (c *EMR) WaitUntilClusterRunning(input *DescribeClusterInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeCluster", + Delay: 30, + MaxAttempts: 60, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "path", + Argument: "Cluster.Status.State", + Expected: "RUNNING", + }, + { + State: "success", + Matcher: "path", + Argument: "Cluster.Status.State", + Expected: "WAITING", + }, + { + State: "failure", + Matcher: "path", + Argument: "Cluster.Status.State", + Expected: "TERMINATING", + }, + { + State: "failure", + Matcher: "path", + Argument: "Cluster.Status.State", + Expected: "TERMINATED", + }, + { + State: "failure", + Matcher: "path", + Argument: "Cluster.Status.State", + Expected: "TERMINATED_WITH_ERRORS", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *EMR) WaitUntilStepComplete(input *DescribeStepInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeStep", + Delay: 30, + MaxAttempts: 60, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "path", + Argument: "Step.Status.State", + Expected: "COMPLETED", + }, + { + State: "failure", + Matcher: "path", + Argument: "Step.Status.State", + Expected: "FAILED", + }, + { + State: "failure", + Matcher: "path", + Argument: "Step.Status.State", + Expected: "CANCELLED", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/firehose/api.go b/vendor/github.com/aws/aws-sdk-go/service/firehose/api.go new file mode 100644 index 000000000..e9eb2fdee --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/firehose/api.go @@ -0,0 +1,2132 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package firehose provides a client for Amazon Kinesis Firehose. +package firehose + +import ( + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" +) + +const opCreateDeliveryStream = "CreateDeliveryStream" + +// CreateDeliveryStreamRequest generates a "aws/request.Request" representing the +// client's request for the CreateDeliveryStream operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateDeliveryStream method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateDeliveryStreamRequest method. +// req, resp := client.CreateDeliveryStreamRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Firehose) CreateDeliveryStreamRequest(input *CreateDeliveryStreamInput) (req *request.Request, output *CreateDeliveryStreamOutput) { + op := &request.Operation{ + Name: opCreateDeliveryStream, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateDeliveryStreamInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateDeliveryStreamOutput{} + req.Data = output + return +} + +// Creates a delivery stream. +// +// CreateDeliveryStream is an asynchronous operation that immediately returns. +// The initial status of the delivery stream is CREATING. After the delivery +// stream is created, its status is ACTIVE and it now accepts data. Attempts +// to send data to a delivery stream that is not in the ACTIVE state cause an +// exception. To check the state of a delivery stream, use DescribeDeliveryStream. +// +// The name of a delivery stream identifies it. You can't have two delivery +// streams with the same name in the same region. Two delivery streams in different +// AWS accounts or different regions in the same AWS account can have the same +// name. +// +// By default, you can create up to 20 delivery streams per region. +// +// A delivery stream can only be configured with a single destination, Amazon +// S3, Amazon Elasticsearch Service, or Amazon Redshift. For correct CreateDeliveryStream +// request syntax, specify only one destination configuration parameter: either +// S3DestinationConfiguration, ElasticsearchDestinationConfiguration, or RedshiftDestinationConfiguration. +// +// As part of S3DestinationConfiguration, optional values BufferingHints, EncryptionConfiguration, +// and CompressionFormat can be provided. By default, if no BufferingHints value +// is provided, Firehose buffers data up to 5 MB or for 5 minutes, whichever +// condition is satisfied first. Note that BufferingHints is a hint, so there +// are some cases where the service cannot adhere to these conditions strictly; +// for example, record boundaries are such that the size is a little over or +// under the configured buffering size. By default, no encryption is performed. +// We strongly recommend that you enable encryption to ensure secure data storage +// in Amazon S3. +// +// A few notes about RedshiftDestinationConfiguration: +// +// An Amazon Redshift destination requires an S3 bucket as intermediate location, +// as Firehose first delivers data to S3 and then uses COPY syntax to load data +// into an Amazon Redshift table. This is specified in the RedshiftDestinationConfiguration.S3Configuration +// parameter element. +// +// The compression formats SNAPPY or ZIP cannot be specified in RedshiftDestinationConfiguration.S3Configuration +// because the Amazon Redshift COPY operation that reads from the S3 bucket +// doesn't support these compression formats. +// +// We strongly recommend that the username and password provided is used +// exclusively for Firehose purposes, and that the permissions for the account +// are restricted for Amazon Redshift INSERT permissions. +// +// Firehose assumes the IAM role that is configured as part of destinations. +// The IAM role should allow the Firehose principal to assume the role, and +// the role should have permissions that allows the service to deliver the data. +// For more information, see Amazon S3 Bucket Access (http://docs.aws.amazon.com/firehose/latest/dev/controlling-access.html#using-iam-s3) +// in the Amazon Kinesis Firehose Developer Guide. +func (c *Firehose) CreateDeliveryStream(input *CreateDeliveryStreamInput) (*CreateDeliveryStreamOutput, error) { + req, out := c.CreateDeliveryStreamRequest(input) + err := req.Send() + return out, err +} + +const opDeleteDeliveryStream = "DeleteDeliveryStream" + +// DeleteDeliveryStreamRequest generates a "aws/request.Request" representing the +// client's request for the DeleteDeliveryStream operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteDeliveryStream method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteDeliveryStreamRequest method. +// req, resp := client.DeleteDeliveryStreamRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Firehose) DeleteDeliveryStreamRequest(input *DeleteDeliveryStreamInput) (req *request.Request, output *DeleteDeliveryStreamOutput) { + op := &request.Operation{ + Name: opDeleteDeliveryStream, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteDeliveryStreamInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteDeliveryStreamOutput{} + req.Data = output + return +} + +// Deletes a delivery stream and its data. +// +// You can delete a delivery stream only if it is in ACTIVE or DELETING state, +// and not in the CREATING state. While the deletion request is in process, +// the delivery stream is in the DELETING state. +// +// To check the state of a delivery stream, use DescribeDeliveryStream. +// +// While the delivery stream is DELETING state, the service may continue to +// accept the records, but the service doesn't make any guarantees with respect +// to delivering the data. Therefore, as a best practice, you should first stop +// any applications that are sending records before deleting a delivery stream. +func (c *Firehose) DeleteDeliveryStream(input *DeleteDeliveryStreamInput) (*DeleteDeliveryStreamOutput, error) { + req, out := c.DeleteDeliveryStreamRequest(input) + err := req.Send() + return out, err +} + +const opDescribeDeliveryStream = "DescribeDeliveryStream" + +// DescribeDeliveryStreamRequest generates a "aws/request.Request" representing the +// client's request for the DescribeDeliveryStream operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeDeliveryStream method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeDeliveryStreamRequest method. +// req, resp := client.DescribeDeliveryStreamRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Firehose) DescribeDeliveryStreamRequest(input *DescribeDeliveryStreamInput) (req *request.Request, output *DescribeDeliveryStreamOutput) { + op := &request.Operation{ + Name: opDescribeDeliveryStream, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeDeliveryStreamInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeDeliveryStreamOutput{} + req.Data = output + return +} + +// Describes the specified delivery stream and gets the status. For example, +// after your delivery stream is created, call DescribeDeliveryStream to see +// if the delivery stream is ACTIVE and therefore ready for data to be sent +// to it. +func (c *Firehose) DescribeDeliveryStream(input *DescribeDeliveryStreamInput) (*DescribeDeliveryStreamOutput, error) { + req, out := c.DescribeDeliveryStreamRequest(input) + err := req.Send() + return out, err +} + +const opListDeliveryStreams = "ListDeliveryStreams" + +// ListDeliveryStreamsRequest generates a "aws/request.Request" representing the +// client's request for the ListDeliveryStreams operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListDeliveryStreams method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListDeliveryStreamsRequest method. +// req, resp := client.ListDeliveryStreamsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Firehose) ListDeliveryStreamsRequest(input *ListDeliveryStreamsInput) (req *request.Request, output *ListDeliveryStreamsOutput) { + op := &request.Operation{ + Name: opListDeliveryStreams, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListDeliveryStreamsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListDeliveryStreamsOutput{} + req.Data = output + return +} + +// Lists your delivery streams. +// +// The number of delivery streams might be too large to return using a single +// call to ListDeliveryStreams. You can limit the number of delivery streams +// returned, using the Limit parameter. To determine whether there are more +// delivery streams to list, check the value of HasMoreDeliveryStreams in the +// output. If there are more delivery streams to list, you can request them +// by specifying the name of the last delivery stream returned in the call in +// the ExclusiveStartDeliveryStreamName parameter of a subsequent call. +func (c *Firehose) ListDeliveryStreams(input *ListDeliveryStreamsInput) (*ListDeliveryStreamsOutput, error) { + req, out := c.ListDeliveryStreamsRequest(input) + err := req.Send() + return out, err +} + +const opPutRecord = "PutRecord" + +// PutRecordRequest generates a "aws/request.Request" representing the +// client's request for the PutRecord operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutRecord method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutRecordRequest method. +// req, resp := client.PutRecordRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Firehose) PutRecordRequest(input *PutRecordInput) (req *request.Request, output *PutRecordOutput) { + op := &request.Operation{ + Name: opPutRecord, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutRecordInput{} + } + + req = c.newRequest(op, input, output) + output = &PutRecordOutput{} + req.Data = output + return +} + +// Writes a single data record into an Amazon Kinesis Firehose delivery stream. +// To write multiple data records into a delivery stream, use PutRecordBatch. +// Applications using these operations are referred to as producers. +// +// By default, each delivery stream can take in up to 2,000 transactions per +// second, 5,000 records per second, or 5 MB per second. Note that if you use +// PutRecord and PutRecordBatch, the limits are an aggregate across these two +// operations for each delivery stream. For more information about limits and +// how to request an increase, see Amazon Kinesis Firehose Limits (http://docs.aws.amazon.com/firehose/latest/dev/limits.html). +// +// You must specify the name of the delivery stream and the data record when +// using PutRecord. The data record consists of a data blob that can be up to +// 1,000 KB in size, and any kind of data, for example, a segment from a log +// file, geographic location data, web site clickstream data, etc. +// +// Firehose buffers records before delivering them to the destination. To disambiguate +// the data blobs at the destination, a common solution is to use delimiters +// in the data, such as a newline (\n) or some other character unique within +// the data. This allows the consumer application(s) to parse individual data +// items when reading the data from the destination. +// +// The PutRecord operation returns a RecordId, which is a unique string assigned +// to each record. Producer applications can use this ID for purposes such as +// auditability and investigation. +// +// If the PutRecord operation throws a ServiceUnavailableException, back off +// and retry. If the exception persists, it is possible that the throughput +// limits have been exceeded for the delivery stream. +// +// Data records sent to Firehose are stored for 24 hours from the time they +// are added to a delivery stream as it attempts to send the records to the +// destination. If the destination is unreachable for more than 24 hours, the +// data is no longer available. +func (c *Firehose) PutRecord(input *PutRecordInput) (*PutRecordOutput, error) { + req, out := c.PutRecordRequest(input) + err := req.Send() + return out, err +} + +const opPutRecordBatch = "PutRecordBatch" + +// PutRecordBatchRequest generates a "aws/request.Request" representing the +// client's request for the PutRecordBatch operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutRecordBatch method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutRecordBatchRequest method. +// req, resp := client.PutRecordBatchRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Firehose) PutRecordBatchRequest(input *PutRecordBatchInput) (req *request.Request, output *PutRecordBatchOutput) { + op := &request.Operation{ + Name: opPutRecordBatch, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutRecordBatchInput{} + } + + req = c.newRequest(op, input, output) + output = &PutRecordBatchOutput{} + req.Data = output + return +} + +// Writes multiple data records into a delivery stream in a single call, which +// can achieve higher throughput per producer than when writing single records. +// To write single data records into a delivery stream, use PutRecord. Applications +// using these operations are referred to as producers. +// +// Each PutRecordBatch request supports up to 500 records. Each record in the +// request can be as large as 1,000 KB (before 64-bit encoding), up to a limit +// of 4 MB for the entire request. By default, each delivery stream can take +// in up to 2,000 transactions per second, 5,000 records per second, or 5 MB +// per second. Note that if you use PutRecord and PutRecordBatch, the limits +// are an aggregate across these two operations for each delivery stream. For +// more information about limits and how to request an increase, see Amazon +// Kinesis Firehose Limits (http://docs.aws.amazon.com/firehose/latest/dev/limits.html). +// +// You must specify the name of the delivery stream and the data record when +// using PutRecord. The data record consists of a data blob that can be up to +// 1,000 KB in size, and any kind of data, for example, a segment from a log +// file, geographic location data, web site clickstream data, and so on. +// +// Firehose buffers records before delivering them to the destination. To disambiguate +// the data blobs at the destination, a common solution is to use delimiters +// in the data, such as a newline (\n) or some other character unique within +// the data. This allows the consumer application(s) to parse individual data +// items when reading the data from the destination. +// +// The PutRecordBatch response includes a count of any failed records, FailedPutCount, +// and an array of responses, RequestResponses. The FailedPutCount value is +// a count of records that failed. Each entry in the RequestResponses array +// gives additional information of the processed record. Each entry in RequestResponses +// directly correlates with a record in the request array using the same ordering, +// from the top to the bottom of the request and response. RequestResponses +// always includes the same number of records as the request array. RequestResponses +// both successfully and unsuccessfully processed records. Firehose attempts +// to process all records in each PutRecordBatch request. A single record failure +// does not stop the processing of subsequent records. +// +// A successfully processed record includes a RecordId value, which is a unique +// value identified for the record. An unsuccessfully processed record includes +// ErrorCode and ErrorMessage values. ErrorCode reflects the type of error and +// is one of the following values: ServiceUnavailable or InternalFailure. ErrorMessage +// provides more detailed information about the error. +// +// If FailedPutCount is greater than 0 (zero), retry the request. A retry of +// the entire batch of records is possible; however, we strongly recommend that +// you inspect the entire response and resend only those records that failed +// processing. This minimizes duplicate records and also reduces the total bytes +// sent (and corresponding charges). +// +// If the PutRecordBatch operation throws a ServiceUnavailableException, back +// off and retry. If the exception persists, it is possible that the throughput +// limits have been exceeded for the delivery stream. +// +// Data records sent to Firehose are stored for 24 hours from the time they +// are added to a delivery stream as it attempts to send the records to the +// destination. If the destination is unreachable for more than 24 hours, the +// data is no longer available. +func (c *Firehose) PutRecordBatch(input *PutRecordBatchInput) (*PutRecordBatchOutput, error) { + req, out := c.PutRecordBatchRequest(input) + err := req.Send() + return out, err +} + +const opUpdateDestination = "UpdateDestination" + +// UpdateDestinationRequest generates a "aws/request.Request" representing the +// client's request for the UpdateDestination operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateDestination method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateDestinationRequest method. +// req, resp := client.UpdateDestinationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Firehose) UpdateDestinationRequest(input *UpdateDestinationInput) (req *request.Request, output *UpdateDestinationOutput) { + op := &request.Operation{ + Name: opUpdateDestination, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateDestinationInput{} + } + + req = c.newRequest(op, input, output) + output = &UpdateDestinationOutput{} + req.Data = output + return +} + +// Updates the specified destination of the specified delivery stream. Note: +// Switching between Elasticsearch and other services is not supported. For +// Elasticsearch destination, you can only update an existing Elasticsearch +// destination with this operation. +// +// This operation can be used to change the destination type (for example, +// to replace the Amazon S3 destination with Amazon Redshift) or change the +// parameters associated with a given destination (for example, to change the +// bucket name of the Amazon S3 destination). The update may not occur immediately. +// The target delivery stream remains active while the configurations are updated, +// so data writes to the delivery stream can continue during this process. The +// updated configurations are normally effective within a few minutes. +// +// If the destination type is the same, Firehose merges the configuration parameters +// specified in the UpdateDestination request with the destination configuration +// that already exists on the delivery stream. If any of the parameters are +// not specified in the update request, then the existing configuration parameters +// are retained. For example, in the Amazon S3 destination, if EncryptionConfiguration +// is not specified then the existing EncryptionConfiguration is maintained +// on the destination. +// +// If the destination type is not the same, for example, changing the destination +// from Amazon S3 to Amazon Redshift, Firehose does not merge any parameters. +// In this case, all parameters must be specified. +// +// Firehose uses the CurrentDeliveryStreamVersionId to avoid race conditions +// and conflicting merges. This is a required field in every request and the +// service only updates the configuration if the existing configuration matches +// the VersionId. After the update is applied successfully, the VersionId is +// updated, which can be retrieved with the DescribeDeliveryStream operation. +// The new VersionId should be uses to set CurrentDeliveryStreamVersionId in +// the next UpdateDestination operation. +func (c *Firehose) UpdateDestination(input *UpdateDestinationInput) (*UpdateDestinationOutput, error) { + req, out := c.UpdateDestinationRequest(input) + err := req.Send() + return out, err +} + +// Describes hints for the buffering to perform before delivering data to the +// destination. Please note that these options are treated as hints, and therefore +// Firehose may choose to use different values when it is optimal. +type BufferingHints struct { + _ struct{} `type:"structure"` + + // Buffer incoming data for the specified period of time, in seconds, before + // delivering it to the destination. The default value is 300. + IntervalInSeconds *int64 `min:"60" type:"integer"` + + // Buffer incoming data to the specified size, in MBs, before delivering it + // to the destination. The default value is 5. + // + // We recommend setting SizeInMBs to a value greater than the amount of data + // you typically ingest into the delivery stream in 10 seconds. For example, + // if you typically ingest data at 1 MB/sec set SizeInMBs to be 10 MB or higher. + SizeInMBs *int64 `min:"1" type:"integer"` +} + +// String returns the string representation +func (s BufferingHints) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BufferingHints) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *BufferingHints) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "BufferingHints"} + if s.IntervalInSeconds != nil && *s.IntervalInSeconds < 60 { + invalidParams.Add(request.NewErrParamMinValue("IntervalInSeconds", 60)) + } + if s.SizeInMBs != nil && *s.SizeInMBs < 1 { + invalidParams.Add(request.NewErrParamMinValue("SizeInMBs", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Describes CloudWatch logging options for your delivery stream. +type CloudWatchLoggingOptions struct { + _ struct{} `type:"structure"` + + // Enables or disables CloudWatch logging. + Enabled *bool `type:"boolean"` + + // The CloudWatch group name for logging. This value is required if Enabled + // is true. + LogGroupName *string `type:"string"` + + // The CloudWatch log stream name for logging. This value is required if Enabled + // is true. + LogStreamName *string `type:"string"` +} + +// String returns the string representation +func (s CloudWatchLoggingOptions) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CloudWatchLoggingOptions) GoString() string { + return s.String() +} + +// Describes a COPY command for Amazon Redshift. +type CopyCommand struct { + _ struct{} `type:"structure"` + + // Optional parameters to use with the Amazon Redshift COPY command. For more + // information, see the "Optional Parameters" section of Amazon Redshift COPY + // command (http://docs.aws.amazon.com/redshift/latest/dg/r_COPY.html). Some + // possible examples that would apply to Firehose are as follows. + // + // delimiter '\t' lzop; - fields are delimited with "\t" (TAB character) and + // compressed using lzop. + // + // delimiter '| - fields are delimited with "|" (this is the default delimiter). + // + // delimiter '|' escape - the delimiter should be escaped. + // + // fixedwidth 'venueid:3,venuename:25,venuecity:12,venuestate:2,venueseats:6' + // - fields are fixed width in the source, with each width specified after every + // column in the table. + // + // JSON 's3://mybucket/jsonpaths.txt' - data is in JSON format, and the path + // specified is the format of the data. + // + // For more examples, see Amazon Redshift COPY command examples (http://docs.aws.amazon.com/redshift/latest/dg/r_COPY_command_examples.html). + CopyOptions *string `type:"string"` + + // A comma-separated list of column names. + DataTableColumns *string `type:"string"` + + // The name of the target table. The table must already exist in the database. + DataTableName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CopyCommand) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CopyCommand) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CopyCommand) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CopyCommand"} + if s.DataTableName == nil { + invalidParams.Add(request.NewErrParamRequired("DataTableName")) + } + if s.DataTableName != nil && len(*s.DataTableName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DataTableName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the parameters for CreateDeliveryStream. +type CreateDeliveryStreamInput struct { + _ struct{} `type:"structure"` + + // The name of the delivery stream. + DeliveryStreamName *string `min:"1" type:"string" required:"true"` + + // The destination in Amazon ES. This value cannot be specified if Amazon S3 + // or Amazon Redshift is the desired destination (see restrictions listed above). + ElasticsearchDestinationConfiguration *ElasticsearchDestinationConfiguration `type:"structure"` + + // The destination in Amazon Redshift. This value cannot be specified if Amazon + // S3 or Amazon Elasticsearch is the desired destination (see restrictions listed + // above). + RedshiftDestinationConfiguration *RedshiftDestinationConfiguration `type:"structure"` + + // The destination in Amazon S3. This value must be specified if ElasticsearchDestinationConfiguration + // or RedshiftDestinationConfiguration is specified (see restrictions listed + // above). + S3DestinationConfiguration *S3DestinationConfiguration `type:"structure"` +} + +// String returns the string representation +func (s CreateDeliveryStreamInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDeliveryStreamInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateDeliveryStreamInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateDeliveryStreamInput"} + if s.DeliveryStreamName == nil { + invalidParams.Add(request.NewErrParamRequired("DeliveryStreamName")) + } + if s.DeliveryStreamName != nil && len(*s.DeliveryStreamName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DeliveryStreamName", 1)) + } + if s.ElasticsearchDestinationConfiguration != nil { + if err := s.ElasticsearchDestinationConfiguration.Validate(); err != nil { + invalidParams.AddNested("ElasticsearchDestinationConfiguration", err.(request.ErrInvalidParams)) + } + } + if s.RedshiftDestinationConfiguration != nil { + if err := s.RedshiftDestinationConfiguration.Validate(); err != nil { + invalidParams.AddNested("RedshiftDestinationConfiguration", err.(request.ErrInvalidParams)) + } + } + if s.S3DestinationConfiguration != nil { + if err := s.S3DestinationConfiguration.Validate(); err != nil { + invalidParams.AddNested("S3DestinationConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the output of CreateDeliveryStream. +type CreateDeliveryStreamOutput struct { + _ struct{} `type:"structure"` + + // The ARN of the delivery stream. + DeliveryStreamARN *string `type:"string"` +} + +// String returns the string representation +func (s CreateDeliveryStreamOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDeliveryStreamOutput) GoString() string { + return s.String() +} + +// Contains the parameters for DeleteDeliveryStream. +type DeleteDeliveryStreamInput struct { + _ struct{} `type:"structure"` + + // The name of the delivery stream. + DeliveryStreamName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteDeliveryStreamInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteDeliveryStreamInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteDeliveryStreamInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteDeliveryStreamInput"} + if s.DeliveryStreamName == nil { + invalidParams.Add(request.NewErrParamRequired("DeliveryStreamName")) + } + if s.DeliveryStreamName != nil && len(*s.DeliveryStreamName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DeliveryStreamName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the output of DeleteDeliveryStream. +type DeleteDeliveryStreamOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteDeliveryStreamOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteDeliveryStreamOutput) GoString() string { + return s.String() +} + +// Contains information about a delivery stream. +type DeliveryStreamDescription struct { + _ struct{} `type:"structure"` + + // The date and time that the delivery stream was created. + CreateTimestamp *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The Amazon Resource Name (ARN) of the delivery stream. + DeliveryStreamARN *string `type:"string" required:"true"` + + // The name of the delivery stream. + DeliveryStreamName *string `min:"1" type:"string" required:"true"` + + // The status of the delivery stream. + DeliveryStreamStatus *string `type:"string" required:"true" enum:"DeliveryStreamStatus"` + + // The destinations. + Destinations []*DestinationDescription `type:"list" required:"true"` + + // Indicates whether there are more destinations available to list. + HasMoreDestinations *bool `type:"boolean" required:"true"` + + // The date and time that the delivery stream was last updated. + LastUpdateTimestamp *time.Time `type:"timestamp" timestampFormat:"unix"` + + // Used when calling the UpdateDestination operation. Each time the destination + // is updated for the delivery stream, the VersionId is changed, and the current + // VersionId is required when updating the destination. This is so that the + // service knows it is applying the changes to the correct version of the delivery + // stream. + VersionId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeliveryStreamDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeliveryStreamDescription) GoString() string { + return s.String() +} + +// Contains the parameters for DescribeDeliveryStream. +type DescribeDeliveryStreamInput struct { + _ struct{} `type:"structure"` + + // The name of the delivery stream. + DeliveryStreamName *string `min:"1" type:"string" required:"true"` + + // Specifies the destination ID to start returning the destination information. + // Currently Firehose supports one destination per delivery stream. + ExclusiveStartDestinationId *string `min:"1" type:"string"` + + // The limit on the number of destinations to return. Currently, you can have + // one destination per delivery stream. + Limit *int64 `min:"1" type:"integer"` +} + +// String returns the string representation +func (s DescribeDeliveryStreamInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDeliveryStreamInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeDeliveryStreamInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeDeliveryStreamInput"} + if s.DeliveryStreamName == nil { + invalidParams.Add(request.NewErrParamRequired("DeliveryStreamName")) + } + if s.DeliveryStreamName != nil && len(*s.DeliveryStreamName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DeliveryStreamName", 1)) + } + if s.ExclusiveStartDestinationId != nil && len(*s.ExclusiveStartDestinationId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ExclusiveStartDestinationId", 1)) + } + if s.Limit != nil && *s.Limit < 1 { + invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the output of DescribeDeliveryStream. +type DescribeDeliveryStreamOutput struct { + _ struct{} `type:"structure"` + + // Information about the delivery stream. + DeliveryStreamDescription *DeliveryStreamDescription `type:"structure" required:"true"` +} + +// String returns the string representation +func (s DescribeDeliveryStreamOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDeliveryStreamOutput) GoString() string { + return s.String() +} + +// Describes the destination for a delivery stream. +type DestinationDescription struct { + _ struct{} `type:"structure"` + + // The ID of the destination. + DestinationId *string `min:"1" type:"string" required:"true"` + + // The destination in Amazon ES. + ElasticsearchDestinationDescription *ElasticsearchDestinationDescription `type:"structure"` + + // The destination in Amazon Redshift. + RedshiftDestinationDescription *RedshiftDestinationDescription `type:"structure"` + + // The Amazon S3 destination. + S3DestinationDescription *S3DestinationDescription `type:"structure"` +} + +// String returns the string representation +func (s DestinationDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DestinationDescription) GoString() string { + return s.String() +} + +// Describes the buffering to perform before delivering data to the Amazon ES +// destination. +type ElasticsearchBufferingHints struct { + _ struct{} `type:"structure"` + + // Buffer incoming data for the specified period of time, in seconds, before + // delivering it to the destination. The default value is 300 (5 minutes). + IntervalInSeconds *int64 `min:"60" type:"integer"` + + // Buffer incoming data to the specified size, in MBs, before delivering it + // to the destination. The default value is 5. + // + // We recommend setting SizeInMBs to a value greater than the amount of data + // you typically ingest into the delivery stream in 10 seconds. For example, + // if you typically ingest data at 1 MB/sec, set SizeInMBs to be 10 MB or higher. + SizeInMBs *int64 `min:"1" type:"integer"` +} + +// String returns the string representation +func (s ElasticsearchBufferingHints) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ElasticsearchBufferingHints) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ElasticsearchBufferingHints) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ElasticsearchBufferingHints"} + if s.IntervalInSeconds != nil && *s.IntervalInSeconds < 60 { + invalidParams.Add(request.NewErrParamMinValue("IntervalInSeconds", 60)) + } + if s.SizeInMBs != nil && *s.SizeInMBs < 1 { + invalidParams.Add(request.NewErrParamMinValue("SizeInMBs", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Describes the configuration of a destination in Amazon ES. +type ElasticsearchDestinationConfiguration struct { + _ struct{} `type:"structure"` + + // Buffering options. If no value is specified, ElasticsearchBufferingHints + // object default values are used. + BufferingHints *ElasticsearchBufferingHints `type:"structure"` + + // Describes CloudWatch logging options for your delivery stream. + CloudWatchLoggingOptions *CloudWatchLoggingOptions `type:"structure"` + + // The ARN of the Amazon ES domain. The IAM role must have permission for DescribeElasticsearchDomain, + // DescribeElasticsearchDomains , and DescribeElasticsearchDomainConfig after + // assuming RoleARN. + DomainARN *string `min:"1" type:"string" required:"true"` + + // The Elasticsearch index name. + IndexName *string `min:"1" type:"string" required:"true"` + + // The Elasticsearch index rotation period. Index rotation appends a timestamp + // to the IndexName to facilitate expiration of old data. For more information, + // see Index Rotation for Amazon Elasticsearch Service Destination (http://docs.aws.amazon.com/firehose/latest/dev/basic-deliver.html#es-index-rotation). + // Default value is OneDay. + IndexRotationPeriod *string `type:"string" enum:"ElasticsearchIndexRotationPeriod"` + + // Configures retry behavior in the event that Firehose is unable to deliver + // documents to Amazon ES. Default value is 300 (5 minutes). + RetryOptions *ElasticsearchRetryOptions `type:"structure"` + + // The ARN of the IAM role to be assumed by Firehose for calling the Amazon + // ES Configuration API and for indexing documents. For more information, see + // Amazon S3 Bucket Access (http://docs.aws.amazon.com/firehose/latest/dev/controlling-access.html#using-iam-s3). + RoleARN *string `min:"1" type:"string" required:"true"` + + // Defines how documents should be delivered to Amazon S3. When set to FailedDocumentsOnly, + // Firehose writes any documents that could not be indexed to the configured + // Amazon S3 destination, with elasticsearch-failed/ appended to the key prefix. + // When set to AllDocuments, Firehose delivers all incoming records to Amazon + // S3, and also writes failed documents with elasticsearch-failed/ appended + // to the prefix. For more information, see Amazon S3 Backup for Amazon Elasticsearch + // Service Destination (http://docs.aws.amazon.com/firehose/latest/dev/basic-deliver.html#es-s3-backup). + // Default value is FailedDocumentsOnly. + S3BackupMode *string `type:"string" enum:"ElasticsearchS3BackupMode"` + + // Describes the configuration of a destination in Amazon S3. + S3Configuration *S3DestinationConfiguration `type:"structure" required:"true"` + + // The Elasticsearch type name. + TypeName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s ElasticsearchDestinationConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ElasticsearchDestinationConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ElasticsearchDestinationConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ElasticsearchDestinationConfiguration"} + if s.DomainARN == nil { + invalidParams.Add(request.NewErrParamRequired("DomainARN")) + } + if s.DomainARN != nil && len(*s.DomainARN) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DomainARN", 1)) + } + if s.IndexName == nil { + invalidParams.Add(request.NewErrParamRequired("IndexName")) + } + if s.IndexName != nil && len(*s.IndexName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("IndexName", 1)) + } + if s.RoleARN == nil { + invalidParams.Add(request.NewErrParamRequired("RoleARN")) + } + if s.RoleARN != nil && len(*s.RoleARN) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RoleARN", 1)) + } + if s.S3Configuration == nil { + invalidParams.Add(request.NewErrParamRequired("S3Configuration")) + } + if s.TypeName == nil { + invalidParams.Add(request.NewErrParamRequired("TypeName")) + } + if s.TypeName != nil && len(*s.TypeName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TypeName", 1)) + } + if s.BufferingHints != nil { + if err := s.BufferingHints.Validate(); err != nil { + invalidParams.AddNested("BufferingHints", err.(request.ErrInvalidParams)) + } + } + if s.S3Configuration != nil { + if err := s.S3Configuration.Validate(); err != nil { + invalidParams.AddNested("S3Configuration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The destination description in Amazon ES. +type ElasticsearchDestinationDescription struct { + _ struct{} `type:"structure"` + + // Buffering options. + BufferingHints *ElasticsearchBufferingHints `type:"structure"` + + // CloudWatch logging options. + CloudWatchLoggingOptions *CloudWatchLoggingOptions `type:"structure"` + + // The ARN of the Amazon ES domain. + DomainARN *string `min:"1" type:"string"` + + // The Elasticsearch index name. + IndexName *string `min:"1" type:"string"` + + // The Elasticsearch index rotation period + IndexRotationPeriod *string `type:"string" enum:"ElasticsearchIndexRotationPeriod"` + + // Elasticsearch retry options. + RetryOptions *ElasticsearchRetryOptions `type:"structure"` + + // The ARN of the AWS credentials. + RoleARN *string `min:"1" type:"string"` + + // Amazon S3 backup mode. + S3BackupMode *string `type:"string" enum:"ElasticsearchS3BackupMode"` + + // Describes a destination in Amazon S3. + S3DestinationDescription *S3DestinationDescription `type:"structure"` + + // The Elasticsearch type name. + TypeName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ElasticsearchDestinationDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ElasticsearchDestinationDescription) GoString() string { + return s.String() +} + +// Describes an update for a destination in Amazon ES. +type ElasticsearchDestinationUpdate struct { + _ struct{} `type:"structure"` + + // Buffering options. If no value is specified, ElasticsearchBufferingHints + // object default values are used. + BufferingHints *ElasticsearchBufferingHints `type:"structure"` + + // Describes CloudWatch logging options for your delivery stream. + CloudWatchLoggingOptions *CloudWatchLoggingOptions `type:"structure"` + + // The ARN of the Amazon ES domain. The IAM role must have permission for DescribeElasticsearchDomain, + // DescribeElasticsearchDomains , and DescribeElasticsearchDomainConfig after + // assuming RoleARN. + DomainARN *string `min:"1" type:"string"` + + // The Elasticsearch index name. + IndexName *string `min:"1" type:"string"` + + // The Elasticsearch index rotation period. Index rotation appends a timestamp + // to the IndexName to facilitate the expiration of old data. For more information, + // see Index Rotation for Amazon Elasticsearch Service Destination (http://docs.aws.amazon.com/firehose/latest/dev/basic-deliver.html#es-index-rotation). + // Default value is OneDay. + IndexRotationPeriod *string `type:"string" enum:"ElasticsearchIndexRotationPeriod"` + + // Configures retry behavior in the event that Firehose is unable to deliver + // documents to Amazon ES. Default value is 300 (5 minutes). + RetryOptions *ElasticsearchRetryOptions `type:"structure"` + + // The ARN of the IAM role to be assumed by Firehose for calling the Amazon + // ES Configuration API and for indexing documents. For more information, see + // Amazon S3 Bucket Access (http://docs.aws.amazon.com/firehose/latest/dev/controlling-access.html#using-iam-s3). + RoleARN *string `min:"1" type:"string"` + + // Describes an update for a destination in Amazon S3. + S3Update *S3DestinationUpdate `type:"structure"` + + // The Elasticsearch type name. + TypeName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ElasticsearchDestinationUpdate) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ElasticsearchDestinationUpdate) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ElasticsearchDestinationUpdate) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ElasticsearchDestinationUpdate"} + if s.DomainARN != nil && len(*s.DomainARN) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DomainARN", 1)) + } + if s.IndexName != nil && len(*s.IndexName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("IndexName", 1)) + } + if s.RoleARN != nil && len(*s.RoleARN) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RoleARN", 1)) + } + if s.TypeName != nil && len(*s.TypeName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TypeName", 1)) + } + if s.BufferingHints != nil { + if err := s.BufferingHints.Validate(); err != nil { + invalidParams.AddNested("BufferingHints", err.(request.ErrInvalidParams)) + } + } + if s.S3Update != nil { + if err := s.S3Update.Validate(); err != nil { + invalidParams.AddNested("S3Update", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Configures retry behavior in the event that Firehose is unable to deliver +// documents to Amazon ES. +type ElasticsearchRetryOptions struct { + _ struct{} `type:"structure"` + + // After an initial failure to deliver to Amazon ES, the total amount of time + // during which Firehose re-attempts delivery (including the first attempt). + // After this time has elapsed, the failed documents are written to Amazon S3. + // Default value is 300 seconds (5 minutes). A value of 0 (zero) results in + // no retries. + DurationInSeconds *int64 `type:"integer"` +} + +// String returns the string representation +func (s ElasticsearchRetryOptions) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ElasticsearchRetryOptions) GoString() string { + return s.String() +} + +// Describes the encryption for a destination in Amazon S3. +type EncryptionConfiguration struct { + _ struct{} `type:"structure"` + + // The encryption key. + KMSEncryptionConfig *KMSEncryptionConfig `type:"structure"` + + // Specifically override existing encryption information to ensure no encryption + // is used. + NoEncryptionConfig *string `type:"string" enum:"NoEncryptionConfig"` +} + +// String returns the string representation +func (s EncryptionConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EncryptionConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *EncryptionConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "EncryptionConfiguration"} + if s.KMSEncryptionConfig != nil { + if err := s.KMSEncryptionConfig.Validate(); err != nil { + invalidParams.AddNested("KMSEncryptionConfig", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Describes an encryption key for a destination in Amazon S3. +type KMSEncryptionConfig struct { + _ struct{} `type:"structure"` + + // The ARN of the encryption key. Must belong to the same region as the destination + // Amazon S3 bucket. + AWSKMSKeyARN *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s KMSEncryptionConfig) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s KMSEncryptionConfig) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *KMSEncryptionConfig) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "KMSEncryptionConfig"} + if s.AWSKMSKeyARN == nil { + invalidParams.Add(request.NewErrParamRequired("AWSKMSKeyARN")) + } + if s.AWSKMSKeyARN != nil && len(*s.AWSKMSKeyARN) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AWSKMSKeyARN", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the parameters for ListDeliveryStreams. +type ListDeliveryStreamsInput struct { + _ struct{} `type:"structure"` + + // The name of the delivery stream to start the list with. + ExclusiveStartDeliveryStreamName *string `min:"1" type:"string"` + + // The maximum number of delivery streams to list. + Limit *int64 `min:"1" type:"integer"` +} + +// String returns the string representation +func (s ListDeliveryStreamsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListDeliveryStreamsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListDeliveryStreamsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListDeliveryStreamsInput"} + if s.ExclusiveStartDeliveryStreamName != nil && len(*s.ExclusiveStartDeliveryStreamName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ExclusiveStartDeliveryStreamName", 1)) + } + if s.Limit != nil && *s.Limit < 1 { + invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the output of ListDeliveryStreams. +type ListDeliveryStreamsOutput struct { + _ struct{} `type:"structure"` + + // The names of the delivery streams. + DeliveryStreamNames []*string `type:"list" required:"true"` + + // Indicates whether there are more delivery streams available to list. + HasMoreDeliveryStreams *bool `type:"boolean" required:"true"` +} + +// String returns the string representation +func (s ListDeliveryStreamsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListDeliveryStreamsOutput) GoString() string { + return s.String() +} + +// Contains the parameters for PutRecordBatch. +type PutRecordBatchInput struct { + _ struct{} `type:"structure"` + + // The name of the delivery stream. + DeliveryStreamName *string `min:"1" type:"string" required:"true"` + + // One or more records. + Records []*Record `min:"1" type:"list" required:"true"` +} + +// String returns the string representation +func (s PutRecordBatchInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutRecordBatchInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutRecordBatchInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutRecordBatchInput"} + if s.DeliveryStreamName == nil { + invalidParams.Add(request.NewErrParamRequired("DeliveryStreamName")) + } + if s.DeliveryStreamName != nil && len(*s.DeliveryStreamName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DeliveryStreamName", 1)) + } + if s.Records == nil { + invalidParams.Add(request.NewErrParamRequired("Records")) + } + if s.Records != nil && len(s.Records) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Records", 1)) + } + if s.Records != nil { + for i, v := range s.Records { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Records", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the output of PutRecordBatch. +type PutRecordBatchOutput struct { + _ struct{} `type:"structure"` + + // The number of unsuccessfully written records. + FailedPutCount *int64 `type:"integer" required:"true"` + + // The results for the individual records. The index of each element matches + // the same index in which records were sent. + RequestResponses []*PutRecordBatchResponseEntry `min:"1" type:"list" required:"true"` +} + +// String returns the string representation +func (s PutRecordBatchOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutRecordBatchOutput) GoString() string { + return s.String() +} + +// Contains the result for an individual record from a PutRecordBatch request. +// If the record is successfully added to your delivery stream, it receives +// a record ID. If the record fails to be added to your delivery stream, the +// result includes an error code and an error message. +type PutRecordBatchResponseEntry struct { + _ struct{} `type:"structure"` + + // The error code for an individual record result. + ErrorCode *string `type:"string"` + + // The error message for an individual record result. + ErrorMessage *string `type:"string"` + + // The ID of the record. + RecordId *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s PutRecordBatchResponseEntry) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutRecordBatchResponseEntry) GoString() string { + return s.String() +} + +// Contains the parameters for PutRecord. +type PutRecordInput struct { + _ struct{} `type:"structure"` + + // The name of the delivery stream. + DeliveryStreamName *string `min:"1" type:"string" required:"true"` + + // The record. + Record *Record `type:"structure" required:"true"` +} + +// String returns the string representation +func (s PutRecordInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutRecordInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutRecordInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutRecordInput"} + if s.DeliveryStreamName == nil { + invalidParams.Add(request.NewErrParamRequired("DeliveryStreamName")) + } + if s.DeliveryStreamName != nil && len(*s.DeliveryStreamName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DeliveryStreamName", 1)) + } + if s.Record == nil { + invalidParams.Add(request.NewErrParamRequired("Record")) + } + if s.Record != nil { + if err := s.Record.Validate(); err != nil { + invalidParams.AddNested("Record", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the output of PutRecord. +type PutRecordOutput struct { + _ struct{} `type:"structure"` + + // The ID of the record. + RecordId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s PutRecordOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutRecordOutput) GoString() string { + return s.String() +} + +// The unit of data in a delivery stream. +type Record struct { + _ struct{} `type:"structure"` + + // The data blob, which is base64-encoded when the blob is serialized. The maximum + // size of the data blob, before base64-encoding, is 1,000 KB. + // + // Data is automatically base64 encoded/decoded by the SDK. + Data []byte `type:"blob" required:"true"` +} + +// String returns the string representation +func (s Record) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Record) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Record) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Record"} + if s.Data == nil { + invalidParams.Add(request.NewErrParamRequired("Data")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Describes the configuration of a destination in Amazon Redshift. +type RedshiftDestinationConfiguration struct { + _ struct{} `type:"structure"` + + // Describes CloudWatch logging options for your delivery stream. + CloudWatchLoggingOptions *CloudWatchLoggingOptions `type:"structure"` + + // The database connection string. + ClusterJDBCURL *string `min:"1" type:"string" required:"true"` + + // The COPY command. + CopyCommand *CopyCommand `type:"structure" required:"true"` + + // The user password. + Password *string `min:"6" type:"string" required:"true"` + + // Configures retry behavior in the event that Firehose is unable to deliver + // documents to Amazon Redshift. Default value is 3600 (60 minutes). + RetryOptions *RedshiftRetryOptions `type:"structure"` + + // The ARN of the AWS credentials. + RoleARN *string `min:"1" type:"string" required:"true"` + + // The S3 configuration for the intermediate location from which Amazon Redshift + // obtains data. Restrictions are described in the topic for CreateDeliveryStream. + // + // The compression formats SNAPPY or ZIP cannot be specified in RedshiftDestinationConfiguration.S3Configuration + // because the Amazon Redshift COPY operation that reads from the S3 bucket + // doesn't support these compression formats. + S3Configuration *S3DestinationConfiguration `type:"structure" required:"true"` + + // The name of the user. + Username *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s RedshiftDestinationConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RedshiftDestinationConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RedshiftDestinationConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RedshiftDestinationConfiguration"} + if s.ClusterJDBCURL == nil { + invalidParams.Add(request.NewErrParamRequired("ClusterJDBCURL")) + } + if s.ClusterJDBCURL != nil && len(*s.ClusterJDBCURL) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ClusterJDBCURL", 1)) + } + if s.CopyCommand == nil { + invalidParams.Add(request.NewErrParamRequired("CopyCommand")) + } + if s.Password == nil { + invalidParams.Add(request.NewErrParamRequired("Password")) + } + if s.Password != nil && len(*s.Password) < 6 { + invalidParams.Add(request.NewErrParamMinLen("Password", 6)) + } + if s.RoleARN == nil { + invalidParams.Add(request.NewErrParamRequired("RoleARN")) + } + if s.RoleARN != nil && len(*s.RoleARN) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RoleARN", 1)) + } + if s.S3Configuration == nil { + invalidParams.Add(request.NewErrParamRequired("S3Configuration")) + } + if s.Username == nil { + invalidParams.Add(request.NewErrParamRequired("Username")) + } + if s.Username != nil && len(*s.Username) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Username", 1)) + } + if s.CopyCommand != nil { + if err := s.CopyCommand.Validate(); err != nil { + invalidParams.AddNested("CopyCommand", err.(request.ErrInvalidParams)) + } + } + if s.S3Configuration != nil { + if err := s.S3Configuration.Validate(); err != nil { + invalidParams.AddNested("S3Configuration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Describes a destination in Amazon Redshift. +type RedshiftDestinationDescription struct { + _ struct{} `type:"structure"` + + // Describes CloudWatch logging options for your delivery stream. + CloudWatchLoggingOptions *CloudWatchLoggingOptions `type:"structure"` + + // The database connection string. + ClusterJDBCURL *string `min:"1" type:"string" required:"true"` + + // The COPY command. + CopyCommand *CopyCommand `type:"structure" required:"true"` + + // Configures retry behavior in the event that Firehose is unable to deliver + // documents to Amazon Redshift. Default value is 3600 (60 minutes). + RetryOptions *RedshiftRetryOptions `type:"structure"` + + // The ARN of the AWS credentials. + RoleARN *string `min:"1" type:"string" required:"true"` + + // The Amazon S3 destination. + S3DestinationDescription *S3DestinationDescription `type:"structure" required:"true"` + + // The name of the user. + Username *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s RedshiftDestinationDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RedshiftDestinationDescription) GoString() string { + return s.String() +} + +// Describes an update for a destination in Amazon Redshift. +type RedshiftDestinationUpdate struct { + _ struct{} `type:"structure"` + + // Describes CloudWatch logging options for your delivery stream. + CloudWatchLoggingOptions *CloudWatchLoggingOptions `type:"structure"` + + // The database connection string. + ClusterJDBCURL *string `min:"1" type:"string"` + + // The COPY command. + CopyCommand *CopyCommand `type:"structure"` + + // The user password. + Password *string `min:"6" type:"string"` + + // Configures retry behavior in the event that Firehose is unable to deliver + // documents to Amazon Redshift. Default value is 3600 (60 minutes). + RetryOptions *RedshiftRetryOptions `type:"structure"` + + // The ARN of the AWS credentials. + RoleARN *string `min:"1" type:"string"` + + // The Amazon S3 destination. + // + // The compression formats SNAPPY or ZIP cannot be specified in RedshiftDestinationUpdate.S3Update + // because the Amazon Redshift COPY operation that reads from the S3 bucket + // doesn't support these compression formats. + S3Update *S3DestinationUpdate `type:"structure"` + + // The name of the user. + Username *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s RedshiftDestinationUpdate) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RedshiftDestinationUpdate) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RedshiftDestinationUpdate) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RedshiftDestinationUpdate"} + if s.ClusterJDBCURL != nil && len(*s.ClusterJDBCURL) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ClusterJDBCURL", 1)) + } + if s.Password != nil && len(*s.Password) < 6 { + invalidParams.Add(request.NewErrParamMinLen("Password", 6)) + } + if s.RoleARN != nil && len(*s.RoleARN) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RoleARN", 1)) + } + if s.Username != nil && len(*s.Username) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Username", 1)) + } + if s.CopyCommand != nil { + if err := s.CopyCommand.Validate(); err != nil { + invalidParams.AddNested("CopyCommand", err.(request.ErrInvalidParams)) + } + } + if s.S3Update != nil { + if err := s.S3Update.Validate(); err != nil { + invalidParams.AddNested("S3Update", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Configures retry behavior in the event that Firehose is unable to deliver +// documents to Amazon Redshift. +type RedshiftRetryOptions struct { + _ struct{} `type:"structure"` + + // The length of time during which Firehose retries delivery after a failure, + // starting from the initial request and including the first attempt. The default + // value is 3600 seconds (60 minutes). Firehose does not retry if the value + // of DurationInSeconds is 0 (zero) or if the first delivery attempt takes longer + // than the current value. + DurationInSeconds *int64 `type:"integer"` +} + +// String returns the string representation +func (s RedshiftRetryOptions) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RedshiftRetryOptions) GoString() string { + return s.String() +} + +// Describes the configuration of a destination in Amazon S3. +type S3DestinationConfiguration struct { + _ struct{} `type:"structure"` + + // The ARN of the S3 bucket. + BucketARN *string `min:"1" type:"string" required:"true"` + + // The buffering option. If no value is specified, BufferingHints object default + // values are used. + BufferingHints *BufferingHints `type:"structure"` + + // Describes CloudWatch logging options for your delivery stream. + CloudWatchLoggingOptions *CloudWatchLoggingOptions `type:"structure"` + + // The compression format. If no value is specified, the default is UNCOMPRESSED. + // + // The compression formats SNAPPY or ZIP cannot be specified for Amazon Redshift + // destinations because they are not supported by the Amazon Redshift COPY operation + // that reads from the S3 bucket. + CompressionFormat *string `type:"string" enum:"CompressionFormat"` + + // The encryption configuration. If no value is specified, the default is no + // encryption. + EncryptionConfiguration *EncryptionConfiguration `type:"structure"` + + // The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered + // S3 files. You can specify an extra prefix to be added in front of the time + // format prefix. Note that if the prefix ends with a slash, it appears as a + // folder in the S3 bucket. For more information, see Amazon S3 Object Name + // Format (http://docs.aws.amazon.com/firehose/latest/dev/basic-deliver.html) + // in the Amazon Kinesis Firehose Developer Guide (http://docs.aws.amazon.com/firehose/latest/dev/). + Prefix *string `type:"string"` + + // The ARN of the AWS credentials. + RoleARN *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s S3DestinationConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s S3DestinationConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *S3DestinationConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "S3DestinationConfiguration"} + if s.BucketARN == nil { + invalidParams.Add(request.NewErrParamRequired("BucketARN")) + } + if s.BucketARN != nil && len(*s.BucketARN) < 1 { + invalidParams.Add(request.NewErrParamMinLen("BucketARN", 1)) + } + if s.RoleARN == nil { + invalidParams.Add(request.NewErrParamRequired("RoleARN")) + } + if s.RoleARN != nil && len(*s.RoleARN) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RoleARN", 1)) + } + if s.BufferingHints != nil { + if err := s.BufferingHints.Validate(); err != nil { + invalidParams.AddNested("BufferingHints", err.(request.ErrInvalidParams)) + } + } + if s.EncryptionConfiguration != nil { + if err := s.EncryptionConfiguration.Validate(); err != nil { + invalidParams.AddNested("EncryptionConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Describes a destination in Amazon S3. +type S3DestinationDescription struct { + _ struct{} `type:"structure"` + + // The ARN of the S3 bucket. + BucketARN *string `min:"1" type:"string" required:"true"` + + // The buffering option. If no value is specified, BufferingHints object default + // values are used. + BufferingHints *BufferingHints `type:"structure" required:"true"` + + // Describes CloudWatch logging options for your delivery stream. + CloudWatchLoggingOptions *CloudWatchLoggingOptions `type:"structure"` + + // The compression format. If no value is specified, the default is NOCOMPRESSION. + CompressionFormat *string `type:"string" required:"true" enum:"CompressionFormat"` + + // The encryption configuration. If no value is specified, the default is no + // encryption. + EncryptionConfiguration *EncryptionConfiguration `type:"structure" required:"true"` + + // The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered + // S3 files. You can specify an extra prefix to be added in front of the time + // format prefix. Note that if the prefix ends with a slash, it appears as a + // folder in the S3 bucket. For more information, see Amazon S3 Object Name + // Format (http://docs.aws.amazon.com/firehose/latest/dev/basic-deliver.html) + // in the Amazon Kinesis Firehose Developer Guide (http://docs.aws.amazon.com/firehose/latest/dev/). + Prefix *string `type:"string"` + + // The ARN of the AWS credentials. + RoleARN *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s S3DestinationDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s S3DestinationDescription) GoString() string { + return s.String() +} + +// Describes an update for a destination in Amazon S3. +type S3DestinationUpdate struct { + _ struct{} `type:"structure"` + + // The ARN of the S3 bucket. + BucketARN *string `min:"1" type:"string"` + + // The buffering option. If no value is specified, BufferingHints object default + // values are used. + BufferingHints *BufferingHints `type:"structure"` + + // Describes CloudWatch logging options for your delivery stream. + CloudWatchLoggingOptions *CloudWatchLoggingOptions `type:"structure"` + + // The compression format. If no value is specified, the default is NOCOMPRESSION. + // + // The compression formats SNAPPY or ZIP cannot be specified for Amazon Redshift + // destinations because they are not supported by the Amazon Redshift COPY operation + // that reads from the S3 bucket. + CompressionFormat *string `type:"string" enum:"CompressionFormat"` + + // The encryption configuration. If no value is specified, the default is no + // encryption. + EncryptionConfiguration *EncryptionConfiguration `type:"structure"` + + // The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered + // S3 files. You can specify an extra prefix to be added in front of the time + // format prefix. Note that if the prefix ends with a slash, it appears as a + // folder in the S3 bucket. For more information, see Amazon S3 Object Name + // Format (http://docs.aws.amazon.com/firehose/latest/dev/basic-deliver.html) + // in the Amazon Kinesis Firehose Developer Guide (http://docs.aws.amazon.com/firehose/latest/dev/). + Prefix *string `type:"string"` + + // The ARN of the AWS credentials. + RoleARN *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s S3DestinationUpdate) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s S3DestinationUpdate) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *S3DestinationUpdate) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "S3DestinationUpdate"} + if s.BucketARN != nil && len(*s.BucketARN) < 1 { + invalidParams.Add(request.NewErrParamMinLen("BucketARN", 1)) + } + if s.RoleARN != nil && len(*s.RoleARN) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RoleARN", 1)) + } + if s.BufferingHints != nil { + if err := s.BufferingHints.Validate(); err != nil { + invalidParams.AddNested("BufferingHints", err.(request.ErrInvalidParams)) + } + } + if s.EncryptionConfiguration != nil { + if err := s.EncryptionConfiguration.Validate(); err != nil { + invalidParams.AddNested("EncryptionConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the parameters for UpdateDestination. +type UpdateDestinationInput struct { + _ struct{} `type:"structure"` + + // Obtain this value from the VersionId result of the DeliveryStreamDescription + // operation. This value is required, and helps the service to perform conditional + // operations. For example, if there is a interleaving update and this value + // is null, then the update destination fails. After the update is successful, + // the VersionId value is updated. The service then performs a merge of the + // old configuration with the new configuration. + CurrentDeliveryStreamVersionId *string `min:"1" type:"string" required:"true"` + + // The name of the delivery stream. + DeliveryStreamName *string `min:"1" type:"string" required:"true"` + + // The ID of the destination. + DestinationId *string `min:"1" type:"string" required:"true"` + + // Describes an update for a destination in Amazon ES. + ElasticsearchDestinationUpdate *ElasticsearchDestinationUpdate `type:"structure"` + + // Describes an update for a destination in Amazon Redshift. + RedshiftDestinationUpdate *RedshiftDestinationUpdate `type:"structure"` + + // Describes an update for a destination in Amazon S3. + S3DestinationUpdate *S3DestinationUpdate `type:"structure"` +} + +// String returns the string representation +func (s UpdateDestinationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateDestinationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateDestinationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateDestinationInput"} + if s.CurrentDeliveryStreamVersionId == nil { + invalidParams.Add(request.NewErrParamRequired("CurrentDeliveryStreamVersionId")) + } + if s.CurrentDeliveryStreamVersionId != nil && len(*s.CurrentDeliveryStreamVersionId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("CurrentDeliveryStreamVersionId", 1)) + } + if s.DeliveryStreamName == nil { + invalidParams.Add(request.NewErrParamRequired("DeliveryStreamName")) + } + if s.DeliveryStreamName != nil && len(*s.DeliveryStreamName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DeliveryStreamName", 1)) + } + if s.DestinationId == nil { + invalidParams.Add(request.NewErrParamRequired("DestinationId")) + } + if s.DestinationId != nil && len(*s.DestinationId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DestinationId", 1)) + } + if s.ElasticsearchDestinationUpdate != nil { + if err := s.ElasticsearchDestinationUpdate.Validate(); err != nil { + invalidParams.AddNested("ElasticsearchDestinationUpdate", err.(request.ErrInvalidParams)) + } + } + if s.RedshiftDestinationUpdate != nil { + if err := s.RedshiftDestinationUpdate.Validate(); err != nil { + invalidParams.AddNested("RedshiftDestinationUpdate", err.(request.ErrInvalidParams)) + } + } + if s.S3DestinationUpdate != nil { + if err := s.S3DestinationUpdate.Validate(); err != nil { + invalidParams.AddNested("S3DestinationUpdate", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the output of UpdateDestination. +type UpdateDestinationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UpdateDestinationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateDestinationOutput) GoString() string { + return s.String() +} + +const ( + // @enum CompressionFormat + CompressionFormatUncompressed = "UNCOMPRESSED" + // @enum CompressionFormat + CompressionFormatGzip = "GZIP" + // @enum CompressionFormat + CompressionFormatZip = "ZIP" + // @enum CompressionFormat + CompressionFormatSnappy = "Snappy" +) + +const ( + // @enum DeliveryStreamStatus + DeliveryStreamStatusCreating = "CREATING" + // @enum DeliveryStreamStatus + DeliveryStreamStatusDeleting = "DELETING" + // @enum DeliveryStreamStatus + DeliveryStreamStatusActive = "ACTIVE" +) + +const ( + // @enum ElasticsearchIndexRotationPeriod + ElasticsearchIndexRotationPeriodNoRotation = "NoRotation" + // @enum ElasticsearchIndexRotationPeriod + ElasticsearchIndexRotationPeriodOneHour = "OneHour" + // @enum ElasticsearchIndexRotationPeriod + ElasticsearchIndexRotationPeriodOneDay = "OneDay" + // @enum ElasticsearchIndexRotationPeriod + ElasticsearchIndexRotationPeriodOneWeek = "OneWeek" + // @enum ElasticsearchIndexRotationPeriod + ElasticsearchIndexRotationPeriodOneMonth = "OneMonth" +) + +const ( + // @enum ElasticsearchS3BackupMode + ElasticsearchS3BackupModeFailedDocumentsOnly = "FailedDocumentsOnly" + // @enum ElasticsearchS3BackupMode + ElasticsearchS3BackupModeAllDocuments = "AllDocuments" +) + +const ( + // @enum NoEncryptionConfig + NoEncryptionConfigNoEncryption = "NoEncryption" +) diff --git a/vendor/github.com/aws/aws-sdk-go/service/firehose/examples_test.go b/vendor/github.com/aws/aws-sdk-go/service/firehose/examples_test.go new file mode 100644 index 000000000..9c84657db --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/firehose/examples_test.go @@ -0,0 +1,366 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package firehose_test + +import ( + "bytes" + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/firehose" +) + +var _ time.Duration +var _ bytes.Buffer + +func ExampleFirehose_CreateDeliveryStream() { + svc := firehose.New(session.New()) + + params := &firehose.CreateDeliveryStreamInput{ + DeliveryStreamName: aws.String("DeliveryStreamName"), // Required + ElasticsearchDestinationConfiguration: &firehose.ElasticsearchDestinationConfiguration{ + DomainARN: aws.String("ElasticsearchDomainARN"), // Required + IndexName: aws.String("ElasticsearchIndexName"), // Required + RoleARN: aws.String("RoleARN"), // Required + S3Configuration: &firehose.S3DestinationConfiguration{ // Required + BucketARN: aws.String("BucketARN"), // Required + RoleARN: aws.String("RoleARN"), // Required + BufferingHints: &firehose.BufferingHints{ + IntervalInSeconds: aws.Int64(1), + SizeInMBs: aws.Int64(1), + }, + CloudWatchLoggingOptions: &firehose.CloudWatchLoggingOptions{ + Enabled: aws.Bool(true), + LogGroupName: aws.String("LogGroupName"), + LogStreamName: aws.String("LogStreamName"), + }, + CompressionFormat: aws.String("CompressionFormat"), + EncryptionConfiguration: &firehose.EncryptionConfiguration{ + KMSEncryptionConfig: &firehose.KMSEncryptionConfig{ + AWSKMSKeyARN: aws.String("AWSKMSKeyARN"), // Required + }, + NoEncryptionConfig: aws.String("NoEncryptionConfig"), + }, + Prefix: aws.String("Prefix"), + }, + TypeName: aws.String("ElasticsearchTypeName"), // Required + BufferingHints: &firehose.ElasticsearchBufferingHints{ + IntervalInSeconds: aws.Int64(1), + SizeInMBs: aws.Int64(1), + }, + CloudWatchLoggingOptions: &firehose.CloudWatchLoggingOptions{ + Enabled: aws.Bool(true), + LogGroupName: aws.String("LogGroupName"), + LogStreamName: aws.String("LogStreamName"), + }, + IndexRotationPeriod: aws.String("ElasticsearchIndexRotationPeriod"), + RetryOptions: &firehose.ElasticsearchRetryOptions{ + DurationInSeconds: aws.Int64(1), + }, + S3BackupMode: aws.String("ElasticsearchS3BackupMode"), + }, + RedshiftDestinationConfiguration: &firehose.RedshiftDestinationConfiguration{ + ClusterJDBCURL: aws.String("ClusterJDBCURL"), // Required + CopyCommand: &firehose.CopyCommand{ // Required + DataTableName: aws.String("DataTableName"), // Required + CopyOptions: aws.String("CopyOptions"), + DataTableColumns: aws.String("DataTableColumns"), + }, + Password: aws.String("Password"), // Required + RoleARN: aws.String("RoleARN"), // Required + S3Configuration: &firehose.S3DestinationConfiguration{ // Required + BucketARN: aws.String("BucketARN"), // Required + RoleARN: aws.String("RoleARN"), // Required + BufferingHints: &firehose.BufferingHints{ + IntervalInSeconds: aws.Int64(1), + SizeInMBs: aws.Int64(1), + }, + CloudWatchLoggingOptions: &firehose.CloudWatchLoggingOptions{ + Enabled: aws.Bool(true), + LogGroupName: aws.String("LogGroupName"), + LogStreamName: aws.String("LogStreamName"), + }, + CompressionFormat: aws.String("CompressionFormat"), + EncryptionConfiguration: &firehose.EncryptionConfiguration{ + KMSEncryptionConfig: &firehose.KMSEncryptionConfig{ + AWSKMSKeyARN: aws.String("AWSKMSKeyARN"), // Required + }, + NoEncryptionConfig: aws.String("NoEncryptionConfig"), + }, + Prefix: aws.String("Prefix"), + }, + Username: aws.String("Username"), // Required + CloudWatchLoggingOptions: &firehose.CloudWatchLoggingOptions{ + Enabled: aws.Bool(true), + LogGroupName: aws.String("LogGroupName"), + LogStreamName: aws.String("LogStreamName"), + }, + RetryOptions: &firehose.RedshiftRetryOptions{ + DurationInSeconds: aws.Int64(1), + }, + }, + S3DestinationConfiguration: &firehose.S3DestinationConfiguration{ + BucketARN: aws.String("BucketARN"), // Required + RoleARN: aws.String("RoleARN"), // Required + BufferingHints: &firehose.BufferingHints{ + IntervalInSeconds: aws.Int64(1), + SizeInMBs: aws.Int64(1), + }, + CloudWatchLoggingOptions: &firehose.CloudWatchLoggingOptions{ + Enabled: aws.Bool(true), + LogGroupName: aws.String("LogGroupName"), + LogStreamName: aws.String("LogStreamName"), + }, + CompressionFormat: aws.String("CompressionFormat"), + EncryptionConfiguration: &firehose.EncryptionConfiguration{ + KMSEncryptionConfig: &firehose.KMSEncryptionConfig{ + AWSKMSKeyARN: aws.String("AWSKMSKeyARN"), // Required + }, + NoEncryptionConfig: aws.String("NoEncryptionConfig"), + }, + Prefix: aws.String("Prefix"), + }, + } + resp, err := svc.CreateDeliveryStream(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleFirehose_DeleteDeliveryStream() { + svc := firehose.New(session.New()) + + params := &firehose.DeleteDeliveryStreamInput{ + DeliveryStreamName: aws.String("DeliveryStreamName"), // Required + } + resp, err := svc.DeleteDeliveryStream(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleFirehose_DescribeDeliveryStream() { + svc := firehose.New(session.New()) + + params := &firehose.DescribeDeliveryStreamInput{ + DeliveryStreamName: aws.String("DeliveryStreamName"), // Required + ExclusiveStartDestinationId: aws.String("DestinationId"), + Limit: aws.Int64(1), + } + resp, err := svc.DescribeDeliveryStream(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleFirehose_ListDeliveryStreams() { + svc := firehose.New(session.New()) + + params := &firehose.ListDeliveryStreamsInput{ + ExclusiveStartDeliveryStreamName: aws.String("DeliveryStreamName"), + Limit: aws.Int64(1), + } + resp, err := svc.ListDeliveryStreams(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleFirehose_PutRecord() { + svc := firehose.New(session.New()) + + params := &firehose.PutRecordInput{ + DeliveryStreamName: aws.String("DeliveryStreamName"), // Required + Record: &firehose.Record{ // Required + Data: []byte("PAYLOAD"), // Required + }, + } + resp, err := svc.PutRecord(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleFirehose_PutRecordBatch() { + svc := firehose.New(session.New()) + + params := &firehose.PutRecordBatchInput{ + DeliveryStreamName: aws.String("DeliveryStreamName"), // Required + Records: []*firehose.Record{ // Required + { // Required + Data: []byte("PAYLOAD"), // Required + }, + // More values... + }, + } + resp, err := svc.PutRecordBatch(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleFirehose_UpdateDestination() { + svc := firehose.New(session.New()) + + params := &firehose.UpdateDestinationInput{ + CurrentDeliveryStreamVersionId: aws.String("DeliveryStreamVersionId"), // Required + DeliveryStreamName: aws.String("DeliveryStreamName"), // Required + DestinationId: aws.String("DestinationId"), // Required + ElasticsearchDestinationUpdate: &firehose.ElasticsearchDestinationUpdate{ + BufferingHints: &firehose.ElasticsearchBufferingHints{ + IntervalInSeconds: aws.Int64(1), + SizeInMBs: aws.Int64(1), + }, + CloudWatchLoggingOptions: &firehose.CloudWatchLoggingOptions{ + Enabled: aws.Bool(true), + LogGroupName: aws.String("LogGroupName"), + LogStreamName: aws.String("LogStreamName"), + }, + DomainARN: aws.String("ElasticsearchDomainARN"), + IndexName: aws.String("ElasticsearchIndexName"), + IndexRotationPeriod: aws.String("ElasticsearchIndexRotationPeriod"), + RetryOptions: &firehose.ElasticsearchRetryOptions{ + DurationInSeconds: aws.Int64(1), + }, + RoleARN: aws.String("RoleARN"), + S3Update: &firehose.S3DestinationUpdate{ + BucketARN: aws.String("BucketARN"), + BufferingHints: &firehose.BufferingHints{ + IntervalInSeconds: aws.Int64(1), + SizeInMBs: aws.Int64(1), + }, + CloudWatchLoggingOptions: &firehose.CloudWatchLoggingOptions{ + Enabled: aws.Bool(true), + LogGroupName: aws.String("LogGroupName"), + LogStreamName: aws.String("LogStreamName"), + }, + CompressionFormat: aws.String("CompressionFormat"), + EncryptionConfiguration: &firehose.EncryptionConfiguration{ + KMSEncryptionConfig: &firehose.KMSEncryptionConfig{ + AWSKMSKeyARN: aws.String("AWSKMSKeyARN"), // Required + }, + NoEncryptionConfig: aws.String("NoEncryptionConfig"), + }, + Prefix: aws.String("Prefix"), + RoleARN: aws.String("RoleARN"), + }, + TypeName: aws.String("ElasticsearchTypeName"), + }, + RedshiftDestinationUpdate: &firehose.RedshiftDestinationUpdate{ + CloudWatchLoggingOptions: &firehose.CloudWatchLoggingOptions{ + Enabled: aws.Bool(true), + LogGroupName: aws.String("LogGroupName"), + LogStreamName: aws.String("LogStreamName"), + }, + ClusterJDBCURL: aws.String("ClusterJDBCURL"), + CopyCommand: &firehose.CopyCommand{ + DataTableName: aws.String("DataTableName"), // Required + CopyOptions: aws.String("CopyOptions"), + DataTableColumns: aws.String("DataTableColumns"), + }, + Password: aws.String("Password"), + RetryOptions: &firehose.RedshiftRetryOptions{ + DurationInSeconds: aws.Int64(1), + }, + RoleARN: aws.String("RoleARN"), + S3Update: &firehose.S3DestinationUpdate{ + BucketARN: aws.String("BucketARN"), + BufferingHints: &firehose.BufferingHints{ + IntervalInSeconds: aws.Int64(1), + SizeInMBs: aws.Int64(1), + }, + CloudWatchLoggingOptions: &firehose.CloudWatchLoggingOptions{ + Enabled: aws.Bool(true), + LogGroupName: aws.String("LogGroupName"), + LogStreamName: aws.String("LogStreamName"), + }, + CompressionFormat: aws.String("CompressionFormat"), + EncryptionConfiguration: &firehose.EncryptionConfiguration{ + KMSEncryptionConfig: &firehose.KMSEncryptionConfig{ + AWSKMSKeyARN: aws.String("AWSKMSKeyARN"), // Required + }, + NoEncryptionConfig: aws.String("NoEncryptionConfig"), + }, + Prefix: aws.String("Prefix"), + RoleARN: aws.String("RoleARN"), + }, + Username: aws.String("Username"), + }, + S3DestinationUpdate: &firehose.S3DestinationUpdate{ + BucketARN: aws.String("BucketARN"), + BufferingHints: &firehose.BufferingHints{ + IntervalInSeconds: aws.Int64(1), + SizeInMBs: aws.Int64(1), + }, + CloudWatchLoggingOptions: &firehose.CloudWatchLoggingOptions{ + Enabled: aws.Bool(true), + LogGroupName: aws.String("LogGroupName"), + LogStreamName: aws.String("LogStreamName"), + }, + CompressionFormat: aws.String("CompressionFormat"), + EncryptionConfiguration: &firehose.EncryptionConfiguration{ + KMSEncryptionConfig: &firehose.KMSEncryptionConfig{ + AWSKMSKeyARN: aws.String("AWSKMSKeyARN"), // Required + }, + NoEncryptionConfig: aws.String("NoEncryptionConfig"), + }, + Prefix: aws.String("Prefix"), + RoleARN: aws.String("RoleARN"), + }, + } + resp, err := svc.UpdateDestination(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/firehose/firehoseiface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/firehose/firehoseiface/interface.go new file mode 100644 index 000000000..0c18342d0 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/firehose/firehoseiface/interface.go @@ -0,0 +1,42 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package firehoseiface provides an interface for the Amazon Kinesis Firehose. +package firehoseiface + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/firehose" +) + +// FirehoseAPI is the interface type for firehose.Firehose. +type FirehoseAPI interface { + CreateDeliveryStreamRequest(*firehose.CreateDeliveryStreamInput) (*request.Request, *firehose.CreateDeliveryStreamOutput) + + CreateDeliveryStream(*firehose.CreateDeliveryStreamInput) (*firehose.CreateDeliveryStreamOutput, error) + + DeleteDeliveryStreamRequest(*firehose.DeleteDeliveryStreamInput) (*request.Request, *firehose.DeleteDeliveryStreamOutput) + + DeleteDeliveryStream(*firehose.DeleteDeliveryStreamInput) (*firehose.DeleteDeliveryStreamOutput, error) + + DescribeDeliveryStreamRequest(*firehose.DescribeDeliveryStreamInput) (*request.Request, *firehose.DescribeDeliveryStreamOutput) + + DescribeDeliveryStream(*firehose.DescribeDeliveryStreamInput) (*firehose.DescribeDeliveryStreamOutput, error) + + ListDeliveryStreamsRequest(*firehose.ListDeliveryStreamsInput) (*request.Request, *firehose.ListDeliveryStreamsOutput) + + ListDeliveryStreams(*firehose.ListDeliveryStreamsInput) (*firehose.ListDeliveryStreamsOutput, error) + + PutRecordRequest(*firehose.PutRecordInput) (*request.Request, *firehose.PutRecordOutput) + + PutRecord(*firehose.PutRecordInput) (*firehose.PutRecordOutput, error) + + PutRecordBatchRequest(*firehose.PutRecordBatchInput) (*request.Request, *firehose.PutRecordBatchOutput) + + PutRecordBatch(*firehose.PutRecordBatchInput) (*firehose.PutRecordBatchOutput, error) + + UpdateDestinationRequest(*firehose.UpdateDestinationInput) (*request.Request, *firehose.UpdateDestinationOutput) + + UpdateDestination(*firehose.UpdateDestinationInput) (*firehose.UpdateDestinationOutput, error) +} + +var _ FirehoseAPI = (*firehose.Firehose)(nil) diff --git a/vendor/github.com/aws/aws-sdk-go/service/firehose/service.go b/vendor/github.com/aws/aws-sdk-go/service/firehose/service.go new file mode 100644 index 000000000..7fd7f37ad --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/firehose/service.go @@ -0,0 +1,90 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package firehose + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" +) + +// Amazon Kinesis Firehose is a fully-managed service that delivers real-time +// streaming data to destinations such as Amazon Simple Storage Service (Amazon +// S3), Amazon Elasticsearch Service (Amazon ES), and Amazon Redshift. +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type Firehose struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "firehose" + +// New creates a new instance of the Firehose client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a Firehose client from just a session. +// svc := firehose.New(mySession) +// +// // Create a Firehose client with additional configuration +// svc := firehose.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *Firehose { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *Firehose { + svc := &Firehose{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2015-08-04", + JSONVersion: "1.1", + TargetPrefix: "Firehose_20150804", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(jsonrpc.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a Firehose operation and runs any +// custom request initialization. +func (c *Firehose) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/gamelift/api.go b/vendor/github.com/aws/aws-sdk-go/service/gamelift/api.go new file mode 100644 index 000000000..22b6ecceb --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/gamelift/api.go @@ -0,0 +1,5683 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package gamelift provides a client for Amazon GameLift. +package gamelift + +import ( + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" +) + +const opCreateAlias = "CreateAlias" + +// CreateAliasRequest generates a "aws/request.Request" representing the +// client's request for the CreateAlias operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateAlias method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateAliasRequest method. +// req, resp := client.CreateAliasRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *GameLift) CreateAliasRequest(input *CreateAliasInput) (req *request.Request, output *CreateAliasOutput) { + op := &request.Operation{ + Name: opCreateAlias, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateAliasInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateAliasOutput{} + req.Data = output + return +} + +// Creates an alias for a fleet. You can use an alias to anonymize your fleet +// by referencing an alias instead of a specific fleet when you create game +// sessions. Amazon GameLift supports two types of routing strategies for aliases: +// simple and terminal. Use a simple alias to point to an active fleet. Use +// a terminal alias to display a message to incoming traffic instead of routing +// players to an active fleet. This option is useful when a game server is no +// longer supported but you want to provide better messaging than a standard +// 404 error. +// +// To create a fleet alias, specify an alias name, routing strategy, and optional +// description. If successful, a new alias record is returned, including an +// alias ID, which you can reference when creating a game session. To reassign +// the alias to another fleet ID, call UpdateAlias. +func (c *GameLift) CreateAlias(input *CreateAliasInput) (*CreateAliasOutput, error) { + req, out := c.CreateAliasRequest(input) + err := req.Send() + return out, err +} + +const opCreateBuild = "CreateBuild" + +// CreateBuildRequest generates a "aws/request.Request" representing the +// client's request for the CreateBuild operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateBuild method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateBuildRequest method. +// req, resp := client.CreateBuildRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *GameLift) CreateBuildRequest(input *CreateBuildInput) (req *request.Request, output *CreateBuildOutput) { + op := &request.Operation{ + Name: opCreateBuild, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateBuildInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateBuildOutput{} + req.Data = output + return +} + +// Initializes a new build record and generates information required to upload +// a game build to Amazon GameLift. Once the build record has been created and +// is in an INITIALIZED state, you can upload your game build. +// +// Do not use this API action unless you are using your own Amazon Simple +// Storage Service (Amazon S3) client and need to manually upload your build +// files. Instead, to create a build, use the CLI command upload-build, which +// creates a new build record and uploads the build files in one step. (See +// the Amazon GameLift Developer Guide (http://docs.aws.amazon.com/gamelift/latest/developerguide/) +// for more details on the CLI and the upload process.) +// +// To create a new build, optionally specify a build name and version. This +// metadata is stored with other properties in the build record and is displayed +// in the GameLift console (it is not visible to players). If successful, this +// action returns the newly created build record along with the Amazon S3 storage +// location and AWS account credentials. Use the location and credentials to +// upload your game build. +func (c *GameLift) CreateBuild(input *CreateBuildInput) (*CreateBuildOutput, error) { + req, out := c.CreateBuildRequest(input) + err := req.Send() + return out, err +} + +const opCreateFleet = "CreateFleet" + +// CreateFleetRequest generates a "aws/request.Request" representing the +// client's request for the CreateFleet operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateFleet method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateFleetRequest method. +// req, resp := client.CreateFleetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *GameLift) CreateFleetRequest(input *CreateFleetInput) (req *request.Request, output *CreateFleetOutput) { + op := &request.Operation{ + Name: opCreateFleet, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateFleetInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateFleetOutput{} + req.Data = output + return +} + +// Creates a new fleet to run your game servers. A fleet is a set of Amazon +// Elastic Compute Cloud (Amazon EC2) instances, each of which can run multiple +// server processes to host game sessions. You configure a fleet to create instances +// with certain hardware specifications (see Amazon EC2 Instance Types (https://aws.amazon.com/ec2/instance-types/) +// for more information), and deploy a specified game build to each instance. +// A newly created fleet passes through several states; once it reaches the +// ACTIVE state, it can begin hosting game sessions. +// +// To create a new fleet, provide a fleet name, an EC2 instance type, and a +// build ID of the game build to deploy. You can also configure the new fleet +// with the following settings: (1) a runtime configuration describing what +// server processes to run on each instance in the fleet (required to create +// fleet), (2) access permissions for inbound traffic, (3) fleet-wide game session +// protection, and (4) the location of default log files for GameLift to upload +// and store. +// +// If the CreateFleet call is successful, Amazon GameLift performs the following +// tasks: +// +// Creates a fleet record and sets the state to NEW (followed by other states +// as the fleet is activated). Sets the fleet's capacity to 1 "desired", which +// causes GameLift to start one new EC2 instance. Starts launching server processes +// on the instance. If the fleet is configured to run multiple server processes +// per instance, GameLift staggers each launch by a few seconds. Begins writing +// events to the fleet event log, which can be accessed in the GameLift console. +// Sets the fleet's status to ACTIVE once one server process in the fleet is +// ready to host a game session. After a fleet is created, use the following +// actions to change fleet properties and configuration: +// +// UpdateFleetAttributes -- Update fleet metadata, including name and description. +// UpdateFleetCapacity -- Increase or decrease the number of instances you +// want the fleet to maintain. UpdateFleetPortSettings -- Change the IP address +// and port ranges that allow access to incoming traffic. UpdateRuntimeConfiguration +// -- Change how server processes are launched in the fleet, including launch +// path, launch parameters, and the number of concurrent processes. +func (c *GameLift) CreateFleet(input *CreateFleetInput) (*CreateFleetOutput, error) { + req, out := c.CreateFleetRequest(input) + err := req.Send() + return out, err +} + +const opCreateGameSession = "CreateGameSession" + +// CreateGameSessionRequest generates a "aws/request.Request" representing the +// client's request for the CreateGameSession operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateGameSession method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateGameSessionRequest method. +// req, resp := client.CreateGameSessionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *GameLift) CreateGameSessionRequest(input *CreateGameSessionInput) (req *request.Request, output *CreateGameSessionOutput) { + op := &request.Operation{ + Name: opCreateGameSession, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateGameSessionInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateGameSessionOutput{} + req.Data = output + return +} + +// Creates a multiplayer game session for players. This action creates a game +// session record and assigns the new session to an instance in the specified +// fleet, which initializes a new server process to host the game session. A +// fleet must be in an ACTIVE state before a game session can be created in +// it. +// +// To create a game session, specify either a fleet ID or an alias ID and indicate +// the maximum number of players the game session allows. You can also provide +// a name and a set of properties for your game (optional). If successful, a +// GameSession object is returned containing session properties, including an +// IP address. By default, newly created game sessions are set to accept adding +// any new players to the game session. Use UpdateGameSession to change the +// creation policy. +func (c *GameLift) CreateGameSession(input *CreateGameSessionInput) (*CreateGameSessionOutput, error) { + req, out := c.CreateGameSessionRequest(input) + err := req.Send() + return out, err +} + +const opCreatePlayerSession = "CreatePlayerSession" + +// CreatePlayerSessionRequest generates a "aws/request.Request" representing the +// client's request for the CreatePlayerSession operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreatePlayerSession method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreatePlayerSessionRequest method. +// req, resp := client.CreatePlayerSessionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *GameLift) CreatePlayerSessionRequest(input *CreatePlayerSessionInput) (req *request.Request, output *CreatePlayerSessionOutput) { + op := &request.Operation{ + Name: opCreatePlayerSession, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreatePlayerSessionInput{} + } + + req = c.newRequest(op, input, output) + output = &CreatePlayerSessionOutput{} + req.Data = output + return +} + +// Adds a player to a game session and creates a player session record. A game +// session must be in an ACTIVE state, have a creation policy of ALLOW_ALL, +// and have an open player slot before players can be added to the session. +// +// To create a player session, specify a game session ID and player ID. If +// successful, the player is added to the game session and a new PlayerSession +// object is returned. +func (c *GameLift) CreatePlayerSession(input *CreatePlayerSessionInput) (*CreatePlayerSessionOutput, error) { + req, out := c.CreatePlayerSessionRequest(input) + err := req.Send() + return out, err +} + +const opCreatePlayerSessions = "CreatePlayerSessions" + +// CreatePlayerSessionsRequest generates a "aws/request.Request" representing the +// client's request for the CreatePlayerSessions operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreatePlayerSessions method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreatePlayerSessionsRequest method. +// req, resp := client.CreatePlayerSessionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *GameLift) CreatePlayerSessionsRequest(input *CreatePlayerSessionsInput) (req *request.Request, output *CreatePlayerSessionsOutput) { + op := &request.Operation{ + Name: opCreatePlayerSessions, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreatePlayerSessionsInput{} + } + + req = c.newRequest(op, input, output) + output = &CreatePlayerSessionsOutput{} + req.Data = output + return +} + +// Adds a group of players to a game session. Similar to CreatePlayerSession, +// this action allows you to add multiple players in a single call, which is +// useful for games that provide party and/or matchmaking features. A game session +// must be in an ACTIVE state, have a creation policy of ALLOW_ALL, and have +// an open player slot before players can be added to the session. +// +// To create player sessions, specify a game session ID and a list of player +// IDs. If successful, the players are added to the game session and a set of +// new PlayerSession objects is returned. +func (c *GameLift) CreatePlayerSessions(input *CreatePlayerSessionsInput) (*CreatePlayerSessionsOutput, error) { + req, out := c.CreatePlayerSessionsRequest(input) + err := req.Send() + return out, err +} + +const opDeleteAlias = "DeleteAlias" + +// DeleteAliasRequest generates a "aws/request.Request" representing the +// client's request for the DeleteAlias operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteAlias method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteAliasRequest method. +// req, resp := client.DeleteAliasRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *GameLift) DeleteAliasRequest(input *DeleteAliasInput) (req *request.Request, output *DeleteAliasOutput) { + op := &request.Operation{ + Name: opDeleteAlias, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteAliasInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteAliasOutput{} + req.Data = output + return +} + +// Deletes an alias. This action removes all record of the alias; game clients +// attempting to access a server process using the deleted alias receive an +// error. To delete an alias, specify the alias ID to be deleted. +func (c *GameLift) DeleteAlias(input *DeleteAliasInput) (*DeleteAliasOutput, error) { + req, out := c.DeleteAliasRequest(input) + err := req.Send() + return out, err +} + +const opDeleteBuild = "DeleteBuild" + +// DeleteBuildRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBuild operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteBuild method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteBuildRequest method. +// req, resp := client.DeleteBuildRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *GameLift) DeleteBuildRequest(input *DeleteBuildInput) (req *request.Request, output *DeleteBuildOutput) { + op := &request.Operation{ + Name: opDeleteBuild, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteBuildInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteBuildOutput{} + req.Data = output + return +} + +// Deletes a build. This action permanently deletes the build record and any +// uploaded build files. +// +// To delete a build, specify its ID. Deleting a build does not affect the +// status of any active fleets using the build, but you can no longer create +// new fleets with the deleted build. +func (c *GameLift) DeleteBuild(input *DeleteBuildInput) (*DeleteBuildOutput, error) { + req, out := c.DeleteBuildRequest(input) + err := req.Send() + return out, err +} + +const opDeleteFleet = "DeleteFleet" + +// DeleteFleetRequest generates a "aws/request.Request" representing the +// client's request for the DeleteFleet operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteFleet method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteFleetRequest method. +// req, resp := client.DeleteFleetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *GameLift) DeleteFleetRequest(input *DeleteFleetInput) (req *request.Request, output *DeleteFleetOutput) { + op := &request.Operation{ + Name: opDeleteFleet, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteFleetInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteFleetOutput{} + req.Data = output + return +} + +// Deletes everything related to a fleet. Before deleting a fleet, you must +// set the fleet's desired capacity to zero. See UpdateFleetCapacity. +// +// This action removes the fleet's resources and the fleet record. Once a fleet +// is deleted, you can no longer use that fleet. +func (c *GameLift) DeleteFleet(input *DeleteFleetInput) (*DeleteFleetOutput, error) { + req, out := c.DeleteFleetRequest(input) + err := req.Send() + return out, err +} + +const opDeleteScalingPolicy = "DeleteScalingPolicy" + +// DeleteScalingPolicyRequest generates a "aws/request.Request" representing the +// client's request for the DeleteScalingPolicy operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteScalingPolicy method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteScalingPolicyRequest method. +// req, resp := client.DeleteScalingPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *GameLift) DeleteScalingPolicyRequest(input *DeleteScalingPolicyInput) (req *request.Request, output *DeleteScalingPolicyOutput) { + op := &request.Operation{ + Name: opDeleteScalingPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteScalingPolicyInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteScalingPolicyOutput{} + req.Data = output + return +} + +// Deletes a fleet scaling policy. This action means that the policy is no longer +// in force and removes all record of it. To delete a scaling policy, specify +// both the scaling policy name and the fleet ID it is associated with. +func (c *GameLift) DeleteScalingPolicy(input *DeleteScalingPolicyInput) (*DeleteScalingPolicyOutput, error) { + req, out := c.DeleteScalingPolicyRequest(input) + err := req.Send() + return out, err +} + +const opDescribeAlias = "DescribeAlias" + +// DescribeAliasRequest generates a "aws/request.Request" representing the +// client's request for the DescribeAlias operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeAlias method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeAliasRequest method. +// req, resp := client.DescribeAliasRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *GameLift) DescribeAliasRequest(input *DescribeAliasInput) (req *request.Request, output *DescribeAliasOutput) { + op := &request.Operation{ + Name: opDescribeAlias, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeAliasInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeAliasOutput{} + req.Data = output + return +} + +// Retrieves properties for a specified alias. To get the alias, specify an +// alias ID. If successful, an Alias object is returned. +func (c *GameLift) DescribeAlias(input *DescribeAliasInput) (*DescribeAliasOutput, error) { + req, out := c.DescribeAliasRequest(input) + err := req.Send() + return out, err +} + +const opDescribeBuild = "DescribeBuild" + +// DescribeBuildRequest generates a "aws/request.Request" representing the +// client's request for the DescribeBuild operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeBuild method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeBuildRequest method. +// req, resp := client.DescribeBuildRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *GameLift) DescribeBuildRequest(input *DescribeBuildInput) (req *request.Request, output *DescribeBuildOutput) { + op := &request.Operation{ + Name: opDescribeBuild, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeBuildInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeBuildOutput{} + req.Data = output + return +} + +// Retrieves properties for a build. To get a build record, specify a build +// ID. If successful, an object containing the build properties is returned. +func (c *GameLift) DescribeBuild(input *DescribeBuildInput) (*DescribeBuildOutput, error) { + req, out := c.DescribeBuildRequest(input) + err := req.Send() + return out, err +} + +const opDescribeEC2InstanceLimits = "DescribeEC2InstanceLimits" + +// DescribeEC2InstanceLimitsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeEC2InstanceLimits operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeEC2InstanceLimits method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeEC2InstanceLimitsRequest method. +// req, resp := client.DescribeEC2InstanceLimitsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *GameLift) DescribeEC2InstanceLimitsRequest(input *DescribeEC2InstanceLimitsInput) (req *request.Request, output *DescribeEC2InstanceLimitsOutput) { + op := &request.Operation{ + Name: opDescribeEC2InstanceLimits, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeEC2InstanceLimitsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeEC2InstanceLimitsOutput{} + req.Data = output + return +} + +// Retrieves the following information for the specified EC2 instance type: +// +// maximum number of instances allowed per AWS account (service limit) current +// usage level for the AWS account Service limits vary depending on region. +// Available regions for GameLift can be found in the AWS Management Console +// for GameLift (see the drop-down list in the upper right corner). +func (c *GameLift) DescribeEC2InstanceLimits(input *DescribeEC2InstanceLimitsInput) (*DescribeEC2InstanceLimitsOutput, error) { + req, out := c.DescribeEC2InstanceLimitsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeFleetAttributes = "DescribeFleetAttributes" + +// DescribeFleetAttributesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeFleetAttributes operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeFleetAttributes method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeFleetAttributesRequest method. +// req, resp := client.DescribeFleetAttributesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *GameLift) DescribeFleetAttributesRequest(input *DescribeFleetAttributesInput) (req *request.Request, output *DescribeFleetAttributesOutput) { + op := &request.Operation{ + Name: opDescribeFleetAttributes, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeFleetAttributesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeFleetAttributesOutput{} + req.Data = output + return +} + +// Retrieves fleet properties, including metadata, status, and configuration, +// for one or more fleets. You can request attributes for all fleets, or specify +// a list of one or more fleet IDs. When requesting multiple fleets, use the +// pagination parameters to retrieve results as a set of sequential pages. If +// successful, a FleetAttributes object is returned for each requested fleet +// ID. When specifying a list of fleet IDs, attribute objects are returned only +// for fleets that currently exist. +// +// Some API actions may limit the number of fleet IDs allowed in one request. +// If a request exceeds this limit, the request fails and the error message +// includes the maximum allowed. +func (c *GameLift) DescribeFleetAttributes(input *DescribeFleetAttributesInput) (*DescribeFleetAttributesOutput, error) { + req, out := c.DescribeFleetAttributesRequest(input) + err := req.Send() + return out, err +} + +const opDescribeFleetCapacity = "DescribeFleetCapacity" + +// DescribeFleetCapacityRequest generates a "aws/request.Request" representing the +// client's request for the DescribeFleetCapacity operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeFleetCapacity method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeFleetCapacityRequest method. +// req, resp := client.DescribeFleetCapacityRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *GameLift) DescribeFleetCapacityRequest(input *DescribeFleetCapacityInput) (req *request.Request, output *DescribeFleetCapacityOutput) { + op := &request.Operation{ + Name: opDescribeFleetCapacity, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeFleetCapacityInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeFleetCapacityOutput{} + req.Data = output + return +} + +// Retrieves the current status of fleet capacity for one or more fleets. This +// information includes the number of instances that have been requested for +// the fleet and the number currently active. You can request capacity for all +// fleets, or specify a list of one or more fleet IDs. When requesting multiple +// fleets, use the pagination parameters to retrieve results as a set of sequential +// pages. If successful, a FleetCapacity object is returned for each requested +// fleet ID. When specifying a list of fleet IDs, attribute objects are returned +// only for fleets that currently exist. +// +// Some API actions may limit the number of fleet IDs allowed in one request. +// If a request exceeds this limit, the request fails and the error message +// includes the maximum allowed. +func (c *GameLift) DescribeFleetCapacity(input *DescribeFleetCapacityInput) (*DescribeFleetCapacityOutput, error) { + req, out := c.DescribeFleetCapacityRequest(input) + err := req.Send() + return out, err +} + +const opDescribeFleetEvents = "DescribeFleetEvents" + +// DescribeFleetEventsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeFleetEvents operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeFleetEvents method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeFleetEventsRequest method. +// req, resp := client.DescribeFleetEventsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *GameLift) DescribeFleetEventsRequest(input *DescribeFleetEventsInput) (req *request.Request, output *DescribeFleetEventsOutput) { + op := &request.Operation{ + Name: opDescribeFleetEvents, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeFleetEventsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeFleetEventsOutput{} + req.Data = output + return +} + +// Retrieves entries from the specified fleet's event log. You can specify a +// time range to limit the result set. Use the pagination parameters to retrieve +// results as a set of sequential pages. If successful, a collection of event +// log entries matching the request are returned. +func (c *GameLift) DescribeFleetEvents(input *DescribeFleetEventsInput) (*DescribeFleetEventsOutput, error) { + req, out := c.DescribeFleetEventsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeFleetPortSettings = "DescribeFleetPortSettings" + +// DescribeFleetPortSettingsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeFleetPortSettings operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeFleetPortSettings method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeFleetPortSettingsRequest method. +// req, resp := client.DescribeFleetPortSettingsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *GameLift) DescribeFleetPortSettingsRequest(input *DescribeFleetPortSettingsInput) (req *request.Request, output *DescribeFleetPortSettingsOutput) { + op := &request.Operation{ + Name: opDescribeFleetPortSettings, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeFleetPortSettingsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeFleetPortSettingsOutput{} + req.Data = output + return +} + +// Retrieves the inbound connection permissions for a fleet. Connection permissions +// include a range of IP addresses and port settings that incoming traffic can +// use to access server processes in the fleet. To get a fleet's inbound connection +// permissions, specify a fleet ID. If successful, a collection of IpPermission +// objects is returned for the requested fleet ID. If the requested fleet has +// been deleted, the result set is empty. +func (c *GameLift) DescribeFleetPortSettings(input *DescribeFleetPortSettingsInput) (*DescribeFleetPortSettingsOutput, error) { + req, out := c.DescribeFleetPortSettingsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeFleetUtilization = "DescribeFleetUtilization" + +// DescribeFleetUtilizationRequest generates a "aws/request.Request" representing the +// client's request for the DescribeFleetUtilization operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeFleetUtilization method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeFleetUtilizationRequest method. +// req, resp := client.DescribeFleetUtilizationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *GameLift) DescribeFleetUtilizationRequest(input *DescribeFleetUtilizationInput) (req *request.Request, output *DescribeFleetUtilizationOutput) { + op := &request.Operation{ + Name: opDescribeFleetUtilization, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeFleetUtilizationInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeFleetUtilizationOutput{} + req.Data = output + return +} + +// Retrieves utilization statistics for one or more fleets. You can request +// utilization data for all fleets, or specify a list of one or more fleet IDs. +// When requesting multiple fleets, use the pagination parameters to retrieve +// results as a set of sequential pages. If successful, a FleetUtilization object +// is returned for each requested fleet ID. When specifying a list of fleet +// IDs, utilization objects are returned only for fleets that currently exist. +// +// Some API actions may limit the number of fleet IDs allowed in one request. +// If a request exceeds this limit, the request fails and the error message +// includes the maximum allowed. +func (c *GameLift) DescribeFleetUtilization(input *DescribeFleetUtilizationInput) (*DescribeFleetUtilizationOutput, error) { + req, out := c.DescribeFleetUtilizationRequest(input) + err := req.Send() + return out, err +} + +const opDescribeGameSessionDetails = "DescribeGameSessionDetails" + +// DescribeGameSessionDetailsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeGameSessionDetails operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeGameSessionDetails method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeGameSessionDetailsRequest method. +// req, resp := client.DescribeGameSessionDetailsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *GameLift) DescribeGameSessionDetailsRequest(input *DescribeGameSessionDetailsInput) (req *request.Request, output *DescribeGameSessionDetailsOutput) { + op := &request.Operation{ + Name: opDescribeGameSessionDetails, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeGameSessionDetailsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeGameSessionDetailsOutput{} + req.Data = output + return +} + +// Retrieves properties, including the protection policy in force, for one or +// more game sessions. This action can be used in several ways: (1) provide +// a GameSessionId to request details for a specific game session; (2) provide +// either a FleetId or an AliasId to request properties for all game sessions +// running on a fleet. +// +// To get game session record(s), specify just one of the following: game session +// ID, fleet ID, or alias ID. You can filter this request by game session status. +// Use the pagination parameters to retrieve results as a set of sequential +// pages. If successful, a GameSessionDetail object is returned for each session +// matching the request. +func (c *GameLift) DescribeGameSessionDetails(input *DescribeGameSessionDetailsInput) (*DescribeGameSessionDetailsOutput, error) { + req, out := c.DescribeGameSessionDetailsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeGameSessions = "DescribeGameSessions" + +// DescribeGameSessionsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeGameSessions operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeGameSessions method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeGameSessionsRequest method. +// req, resp := client.DescribeGameSessionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *GameLift) DescribeGameSessionsRequest(input *DescribeGameSessionsInput) (req *request.Request, output *DescribeGameSessionsOutput) { + op := &request.Operation{ + Name: opDescribeGameSessions, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeGameSessionsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeGameSessionsOutput{} + req.Data = output + return +} + +// Retrieves properties for one or more game sessions. This action can be used +// in several ways: (1) provide a GameSessionId to request properties for a +// specific game session; (2) provide a FleetId or an AliasId to request properties +// for all game sessions running on a fleet. +// +// To get game session record(s), specify just one of the following: game session +// ID, fleet ID, or alias ID. You can filter this request by game session status. +// Use the pagination parameters to retrieve results as a set of sequential +// pages. If successful, a GameSession object is returned for each session matching +// the request. +func (c *GameLift) DescribeGameSessions(input *DescribeGameSessionsInput) (*DescribeGameSessionsOutput, error) { + req, out := c.DescribeGameSessionsRequest(input) + err := req.Send() + return out, err +} + +const opDescribePlayerSessions = "DescribePlayerSessions" + +// DescribePlayerSessionsRequest generates a "aws/request.Request" representing the +// client's request for the DescribePlayerSessions operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribePlayerSessions method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribePlayerSessionsRequest method. +// req, resp := client.DescribePlayerSessionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *GameLift) DescribePlayerSessionsRequest(input *DescribePlayerSessionsInput) (req *request.Request, output *DescribePlayerSessionsOutput) { + op := &request.Operation{ + Name: opDescribePlayerSessions, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribePlayerSessionsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribePlayerSessionsOutput{} + req.Data = output + return +} + +// Retrieves properties for one or more player sessions. This action can be +// used in several ways: (1) provide a PlayerSessionId parameter to request +// properties for a specific player session; (2) provide a GameSessionId parameter +// to request properties for all player sessions in the specified game session; +// (3) provide a PlayerId parameter to request properties for all player sessions +// of a specified player. +// +// To get game session record(s), specify only one of the following: a player +// session ID, a game session ID, or a player ID. You can filter this request +// by player session status. Use the pagination parameters to retrieve results +// as a set of sequential pages. If successful, a PlayerSession object is returned +// for each session matching the request. +func (c *GameLift) DescribePlayerSessions(input *DescribePlayerSessionsInput) (*DescribePlayerSessionsOutput, error) { + req, out := c.DescribePlayerSessionsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeRuntimeConfiguration = "DescribeRuntimeConfiguration" + +// DescribeRuntimeConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the DescribeRuntimeConfiguration operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeRuntimeConfiguration method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeRuntimeConfigurationRequest method. +// req, resp := client.DescribeRuntimeConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *GameLift) DescribeRuntimeConfigurationRequest(input *DescribeRuntimeConfigurationInput) (req *request.Request, output *DescribeRuntimeConfigurationOutput) { + op := &request.Operation{ + Name: opDescribeRuntimeConfiguration, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeRuntimeConfigurationInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeRuntimeConfigurationOutput{} + req.Data = output + return +} + +// Retrieves the current runtime configuration for the specified fleet. The +// runtime configuration tells GameLift how to launch server processes on instances +// in the fleet. +func (c *GameLift) DescribeRuntimeConfiguration(input *DescribeRuntimeConfigurationInput) (*DescribeRuntimeConfigurationOutput, error) { + req, out := c.DescribeRuntimeConfigurationRequest(input) + err := req.Send() + return out, err +} + +const opDescribeScalingPolicies = "DescribeScalingPolicies" + +// DescribeScalingPoliciesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeScalingPolicies operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeScalingPolicies method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeScalingPoliciesRequest method. +// req, resp := client.DescribeScalingPoliciesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *GameLift) DescribeScalingPoliciesRequest(input *DescribeScalingPoliciesInput) (req *request.Request, output *DescribeScalingPoliciesOutput) { + op := &request.Operation{ + Name: opDescribeScalingPolicies, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeScalingPoliciesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeScalingPoliciesOutput{} + req.Data = output + return +} + +// Retrieves all scaling policies applied to a fleet. +// +// To get a fleet's scaling policies, specify the fleet ID. You can filter +// this request by policy status, such as to retrieve only active scaling policies. +// Use the pagination parameters to retrieve results as a set of sequential +// pages. If successful, set of ScalingPolicy objects is returned for the fleet. +func (c *GameLift) DescribeScalingPolicies(input *DescribeScalingPoliciesInput) (*DescribeScalingPoliciesOutput, error) { + req, out := c.DescribeScalingPoliciesRequest(input) + err := req.Send() + return out, err +} + +const opGetGameSessionLogUrl = "GetGameSessionLogUrl" + +// GetGameSessionLogUrlRequest generates a "aws/request.Request" representing the +// client's request for the GetGameSessionLogUrl operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetGameSessionLogUrl method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetGameSessionLogUrlRequest method. +// req, resp := client.GetGameSessionLogUrlRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *GameLift) GetGameSessionLogUrlRequest(input *GetGameSessionLogUrlInput) (req *request.Request, output *GetGameSessionLogUrlOutput) { + op := &request.Operation{ + Name: opGetGameSessionLogUrl, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetGameSessionLogUrlInput{} + } + + req = c.newRequest(op, input, output) + output = &GetGameSessionLogUrlOutput{} + req.Data = output + return +} + +// Retrieves the location of stored game session logs for a specified game session. +// When a game session is terminated, Amazon GameLift automatically stores the +// logs in Amazon S3. Use this URL to download the logs. +// +// See the AWS Service Limits (http://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html#limits_gamelift) +// page for maximum log file sizes. Log files that exceed this limit are not +// saved. +func (c *GameLift) GetGameSessionLogUrl(input *GetGameSessionLogUrlInput) (*GetGameSessionLogUrlOutput, error) { + req, out := c.GetGameSessionLogUrlRequest(input) + err := req.Send() + return out, err +} + +const opListAliases = "ListAliases" + +// ListAliasesRequest generates a "aws/request.Request" representing the +// client's request for the ListAliases operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListAliases method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListAliasesRequest method. +// req, resp := client.ListAliasesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *GameLift) ListAliasesRequest(input *ListAliasesInput) (req *request.Request, output *ListAliasesOutput) { + op := &request.Operation{ + Name: opListAliases, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListAliasesInput{} + } + + req = c.newRequest(op, input, output) + output = &ListAliasesOutput{} + req.Data = output + return +} + +// Retrieves a collection of alias records for this AWS account. You can filter +// the result set by alias name and/or routing strategy type. Use the pagination +// parameters to retrieve results in sequential pages. +// +// Aliases are not listed in any particular order. +func (c *GameLift) ListAliases(input *ListAliasesInput) (*ListAliasesOutput, error) { + req, out := c.ListAliasesRequest(input) + err := req.Send() + return out, err +} + +const opListBuilds = "ListBuilds" + +// ListBuildsRequest generates a "aws/request.Request" representing the +// client's request for the ListBuilds operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListBuilds method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListBuildsRequest method. +// req, resp := client.ListBuildsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *GameLift) ListBuildsRequest(input *ListBuildsInput) (req *request.Request, output *ListBuildsOutput) { + op := &request.Operation{ + Name: opListBuilds, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListBuildsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListBuildsOutput{} + req.Data = output + return +} + +// Retrieves build records for all builds associated with the AWS account in +// use. You can limit results to builds in a specific state using the Status +// parameter. Use the pagination parameters to retrieve results in a set of +// sequential pages. +// +// Build records are not listed in any particular order. +func (c *GameLift) ListBuilds(input *ListBuildsInput) (*ListBuildsOutput, error) { + req, out := c.ListBuildsRequest(input) + err := req.Send() + return out, err +} + +const opListFleets = "ListFleets" + +// ListFleetsRequest generates a "aws/request.Request" representing the +// client's request for the ListFleets operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListFleets method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListFleetsRequest method. +// req, resp := client.ListFleetsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *GameLift) ListFleetsRequest(input *ListFleetsInput) (req *request.Request, output *ListFleetsOutput) { + op := &request.Operation{ + Name: opListFleets, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListFleetsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListFleetsOutput{} + req.Data = output + return +} + +// Retrieves a collection of fleet records for this AWS account. You can filter +// the result set by build ID. Use the pagination parameters to retrieve results +// in sequential pages. +// +// Fleet records are not listed in any particular order. +func (c *GameLift) ListFleets(input *ListFleetsInput) (*ListFleetsOutput, error) { + req, out := c.ListFleetsRequest(input) + err := req.Send() + return out, err +} + +const opPutScalingPolicy = "PutScalingPolicy" + +// PutScalingPolicyRequest generates a "aws/request.Request" representing the +// client's request for the PutScalingPolicy operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutScalingPolicy method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutScalingPolicyRequest method. +// req, resp := client.PutScalingPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *GameLift) PutScalingPolicyRequest(input *PutScalingPolicyInput) (req *request.Request, output *PutScalingPolicyOutput) { + op := &request.Operation{ + Name: opPutScalingPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutScalingPolicyInput{} + } + + req = c.newRequest(op, input, output) + output = &PutScalingPolicyOutput{} + req.Data = output + return +} + +// Creates or updates a scaling policy for a fleet. An active scaling policy +// prompts Amazon GameLift to track a certain metric for a fleet and automatically +// change the fleet's capacity in specific circumstances. Each scaling policy +// contains one rule statement. Fleets can have multiple scaling policies in +// force simultaneously. +// +// A scaling policy rule statement has the following structure: +// +// If [MetricName] is [ComparisonOperator] [Threshold] for [EvaluationPeriods] +// minutes, then [ScalingAdjustmentType] to/by [ScalingAdjustment]. +// +// For example, this policy: "If the number of idle instances exceeds 20 for +// more than 15 minutes, then reduce the fleet capacity by 10 instances" could +// be implemented as the following rule statement: +// +// If [IdleInstances] is [GreaterThanOrEqualToThreshold] [20] for [15] minutes, +// then [ChangeInCapacity] by [-10]. +// +// To create or update a scaling policy, specify a unique combination of name +// and fleet ID, and set the rule values. All parameters for this action are +// required. If successful, the policy name is returned. Scaling policies cannot +// be suspended or made inactive. To stop enforcing a scaling policy, call DeleteScalingPolicy. +func (c *GameLift) PutScalingPolicy(input *PutScalingPolicyInput) (*PutScalingPolicyOutput, error) { + req, out := c.PutScalingPolicyRequest(input) + err := req.Send() + return out, err +} + +const opRequestUploadCredentials = "RequestUploadCredentials" + +// RequestUploadCredentialsRequest generates a "aws/request.Request" representing the +// client's request for the RequestUploadCredentials operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the RequestUploadCredentials method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the RequestUploadCredentialsRequest method. +// req, resp := client.RequestUploadCredentialsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *GameLift) RequestUploadCredentialsRequest(input *RequestUploadCredentialsInput) (req *request.Request, output *RequestUploadCredentialsOutput) { + op := &request.Operation{ + Name: opRequestUploadCredentials, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RequestUploadCredentialsInput{} + } + + req = c.newRequest(op, input, output) + output = &RequestUploadCredentialsOutput{} + req.Data = output + return +} + +// Retrieves a fresh set of upload credentials and the assigned Amazon S3 storage +// location for a specific build. Valid credentials are required to upload your +// game build files to Amazon S3. +// +// Call this action only if you need credentials for a build created with +// CreateBuild. This is a rare situation; in most cases, builds are created +// using the CLI command upload-build, which creates a build record and also +// uploads build files. +// +// Upload credentials are returned when you create the build, but they have +// a limited lifespan. You can get fresh credentials and use them to re-upload +// game files until the state of that build changes to READY. Once this happens, +// you must create a brand new build. +func (c *GameLift) RequestUploadCredentials(input *RequestUploadCredentialsInput) (*RequestUploadCredentialsOutput, error) { + req, out := c.RequestUploadCredentialsRequest(input) + err := req.Send() + return out, err +} + +const opResolveAlias = "ResolveAlias" + +// ResolveAliasRequest generates a "aws/request.Request" representing the +// client's request for the ResolveAlias operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ResolveAlias method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ResolveAliasRequest method. +// req, resp := client.ResolveAliasRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *GameLift) ResolveAliasRequest(input *ResolveAliasInput) (req *request.Request, output *ResolveAliasOutput) { + op := &request.Operation{ + Name: opResolveAlias, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ResolveAliasInput{} + } + + req = c.newRequest(op, input, output) + output = &ResolveAliasOutput{} + req.Data = output + return +} + +// Retrieves the fleet ID that a specified alias is currently pointing to. +func (c *GameLift) ResolveAlias(input *ResolveAliasInput) (*ResolveAliasOutput, error) { + req, out := c.ResolveAliasRequest(input) + err := req.Send() + return out, err +} + +const opUpdateAlias = "UpdateAlias" + +// UpdateAliasRequest generates a "aws/request.Request" representing the +// client's request for the UpdateAlias operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateAlias method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateAliasRequest method. +// req, resp := client.UpdateAliasRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *GameLift) UpdateAliasRequest(input *UpdateAliasInput) (req *request.Request, output *UpdateAliasOutput) { + op := &request.Operation{ + Name: opUpdateAlias, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateAliasInput{} + } + + req = c.newRequest(op, input, output) + output = &UpdateAliasOutput{} + req.Data = output + return +} + +// Updates properties for an alias. To update properties, specify the alias +// ID to be updated and provide the information to be changed. To reassign an +// alias to another fleet, provide an updated routing strategy. If successful, +// the updated alias record is returned. +func (c *GameLift) UpdateAlias(input *UpdateAliasInput) (*UpdateAliasOutput, error) { + req, out := c.UpdateAliasRequest(input) + err := req.Send() + return out, err +} + +const opUpdateBuild = "UpdateBuild" + +// UpdateBuildRequest generates a "aws/request.Request" representing the +// client's request for the UpdateBuild operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateBuild method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateBuildRequest method. +// req, resp := client.UpdateBuildRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *GameLift) UpdateBuildRequest(input *UpdateBuildInput) (req *request.Request, output *UpdateBuildOutput) { + op := &request.Operation{ + Name: opUpdateBuild, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateBuildInput{} + } + + req = c.newRequest(op, input, output) + output = &UpdateBuildOutput{} + req.Data = output + return +} + +// Updates metadata in a build record, including the build name and version. +// To update the metadata, specify the build ID to update and provide the new +// values. If successful, a build object containing the updated metadata is +// returned. +func (c *GameLift) UpdateBuild(input *UpdateBuildInput) (*UpdateBuildOutput, error) { + req, out := c.UpdateBuildRequest(input) + err := req.Send() + return out, err +} + +const opUpdateFleetAttributes = "UpdateFleetAttributes" + +// UpdateFleetAttributesRequest generates a "aws/request.Request" representing the +// client's request for the UpdateFleetAttributes operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateFleetAttributes method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateFleetAttributesRequest method. +// req, resp := client.UpdateFleetAttributesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *GameLift) UpdateFleetAttributesRequest(input *UpdateFleetAttributesInput) (req *request.Request, output *UpdateFleetAttributesOutput) { + op := &request.Operation{ + Name: opUpdateFleetAttributes, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateFleetAttributesInput{} + } + + req = c.newRequest(op, input, output) + output = &UpdateFleetAttributesOutput{} + req.Data = output + return +} + +// Updates fleet properties, including name and description, for a fleet. To +// update metadata, specify the fleet ID and the property values you want to +// change. If successful, the fleet ID for the updated fleet is returned. +func (c *GameLift) UpdateFleetAttributes(input *UpdateFleetAttributesInput) (*UpdateFleetAttributesOutput, error) { + req, out := c.UpdateFleetAttributesRequest(input) + err := req.Send() + return out, err +} + +const opUpdateFleetCapacity = "UpdateFleetCapacity" + +// UpdateFleetCapacityRequest generates a "aws/request.Request" representing the +// client's request for the UpdateFleetCapacity operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateFleetCapacity method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateFleetCapacityRequest method. +// req, resp := client.UpdateFleetCapacityRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *GameLift) UpdateFleetCapacityRequest(input *UpdateFleetCapacityInput) (req *request.Request, output *UpdateFleetCapacityOutput) { + op := &request.Operation{ + Name: opUpdateFleetCapacity, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateFleetCapacityInput{} + } + + req = c.newRequest(op, input, output) + output = &UpdateFleetCapacityOutput{} + req.Data = output + return +} + +// Updates capacity settings for a fleet. Use this action to specify the number +// of EC2 instances (hosts) that you want this fleet to contain. Before calling +// this action, you may want to call DescribeEC2InstanceLimits to get the maximum +// capacity based on the fleet's EC2 instance type. +// +// If you're using autoscaling (see PutScalingPolicy), you may want to specify +// a minimum and/or maximum capacity. If you don't provide these, autoscaling +// can set capacity anywhere between zero and the service limits (http://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html#limits_gamelift). +// +// To update fleet capacity, specify the fleet ID and the number of instances +// you want the fleet to host. If successful, Amazon GameLift starts or terminates +// instances so that the fleet's active instance count matches the desired instance +// count. You can view a fleet's current capacity information by calling DescribeFleetCapacity. +// If the desired instance count is higher than the instance type's limit, the +// "Limit Exceeded" exception occurs. +func (c *GameLift) UpdateFleetCapacity(input *UpdateFleetCapacityInput) (*UpdateFleetCapacityOutput, error) { + req, out := c.UpdateFleetCapacityRequest(input) + err := req.Send() + return out, err +} + +const opUpdateFleetPortSettings = "UpdateFleetPortSettings" + +// UpdateFleetPortSettingsRequest generates a "aws/request.Request" representing the +// client's request for the UpdateFleetPortSettings operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateFleetPortSettings method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateFleetPortSettingsRequest method. +// req, resp := client.UpdateFleetPortSettingsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *GameLift) UpdateFleetPortSettingsRequest(input *UpdateFleetPortSettingsInput) (req *request.Request, output *UpdateFleetPortSettingsOutput) { + op := &request.Operation{ + Name: opUpdateFleetPortSettings, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateFleetPortSettingsInput{} + } + + req = c.newRequest(op, input, output) + output = &UpdateFleetPortSettingsOutput{} + req.Data = output + return +} + +// Updates port settings for a fleet. To update settings, specify the fleet +// ID to be updated and list the permissions you want to update. List the permissions +// you want to add in InboundPermissionAuthorizations, and permissions you want +// to remove in InboundPermissionRevocations. Permissions to be removed must +// match existing fleet permissions. If successful, the fleet ID for the updated +// fleet is returned. +func (c *GameLift) UpdateFleetPortSettings(input *UpdateFleetPortSettingsInput) (*UpdateFleetPortSettingsOutput, error) { + req, out := c.UpdateFleetPortSettingsRequest(input) + err := req.Send() + return out, err +} + +const opUpdateGameSession = "UpdateGameSession" + +// UpdateGameSessionRequest generates a "aws/request.Request" representing the +// client's request for the UpdateGameSession operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateGameSession method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateGameSessionRequest method. +// req, resp := client.UpdateGameSessionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *GameLift) UpdateGameSessionRequest(input *UpdateGameSessionInput) (req *request.Request, output *UpdateGameSessionOutput) { + op := &request.Operation{ + Name: opUpdateGameSession, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateGameSessionInput{} + } + + req = c.newRequest(op, input, output) + output = &UpdateGameSessionOutput{} + req.Data = output + return +} + +// Updates game session properties. This includes the session name, maximum +// player count, protection policy, which controls whether or not an active +// game session can be terminated during a scale-down event, and the player +// session creation policy, which controls whether or not new players can join +// the session. To update a game session, specify the game session ID and the +// values you want to change. If successful, an updated GameSession object is +// returned. +func (c *GameLift) UpdateGameSession(input *UpdateGameSessionInput) (*UpdateGameSessionOutput, error) { + req, out := c.UpdateGameSessionRequest(input) + err := req.Send() + return out, err +} + +const opUpdateRuntimeConfiguration = "UpdateRuntimeConfiguration" + +// UpdateRuntimeConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the UpdateRuntimeConfiguration operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateRuntimeConfiguration method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateRuntimeConfigurationRequest method. +// req, resp := client.UpdateRuntimeConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *GameLift) UpdateRuntimeConfigurationRequest(input *UpdateRuntimeConfigurationInput) (req *request.Request, output *UpdateRuntimeConfigurationOutput) { + op := &request.Operation{ + Name: opUpdateRuntimeConfiguration, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateRuntimeConfigurationInput{} + } + + req = c.newRequest(op, input, output) + output = &UpdateRuntimeConfigurationOutput{} + req.Data = output + return +} + +// Updates the current runtime configuration for the specified fleet, which +// tells GameLift how to launch server processes on instances in the fleet. +// You can update a fleet's runtime configuration at any time after the fleet +// is created; it does not need to be in an ACTIVE state. +// +// To update runtime configuration, specify the fleet ID and provide a RuntimeConfiguration +// object with the updated collection of server process configurations. +// +// Each instance in a GameLift fleet checks regularly for an updated runtime +// configuration and changes how it launches server processes to comply with +// the latest version. Existing server processes are not affected by the update; +// they continue to run until they end, while GameLift simply adds new server +// processes to fit the current runtime configuration. As a result, the runtime +// configuration changes are applied gradually as existing processes shut down +// and new processes are launched in GameLift's normal process recycling activity. +func (c *GameLift) UpdateRuntimeConfiguration(input *UpdateRuntimeConfigurationInput) (*UpdateRuntimeConfigurationOutput, error) { + req, out := c.UpdateRuntimeConfigurationRequest(input) + err := req.Send() + return out, err +} + +// Properties describing a fleet alias. +type Alias struct { + _ struct{} `type:"structure"` + + // Unique identifier for a fleet alias. + AliasId *string `type:"string"` + + // Time stamp indicating when this object was created. Format is an integer + // representing the number of seconds since the Unix epoch (Unix time). + CreationTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // Human-readable description of an alias. + Description *string `type:"string"` + + // Time stamp indicating when this object was last modified. Format is an integer + // representing the number of seconds since the Unix epoch (Unix time). + LastUpdatedTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // Descriptive label associated with an alias. Alias names do not need to be + // unique. + Name *string `type:"string"` + + // Routing configuration for a fleet alias. + RoutingStrategy *RoutingStrategy `type:"structure"` +} + +// String returns the string representation +func (s Alias) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Alias) GoString() string { + return s.String() +} + +// AWS access credentials required to upload game build files to Amazon GameLift. +// These credentials are generated with CreateBuild, and are valid for a limited +// time. If they expire before you upload your game build, get a new set by +// calling RequestUploadCredentials. +type AwsCredentials struct { + _ struct{} `type:"structure"` + + // Access key for an AWS account. + AccessKeyId *string `min:"1" type:"string"` + + // Secret key for an AWS account. + SecretAccessKey *string `min:"1" type:"string"` + + // Token specific to a build ID. + SessionToken *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s AwsCredentials) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AwsCredentials) GoString() string { + return s.String() +} + +// Properties describing a game build. +type Build struct { + _ struct{} `type:"structure"` + + // Unique identifier for a build. + BuildId *string `type:"string"` + + // Time stamp indicating when this object was created. Format is an integer + // representing the number of seconds since the Unix epoch (Unix time). + CreationTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // Descriptive label associated with a build. Build names do not need to be + // unique. It can be set using CreateBuild or UpdateBuild. + Name *string `type:"string"` + + // File size of the uploaded game build, expressed in bytes. When the build + // state is INITIALIZED, this value is 0. + SizeOnDisk *int64 `min:"1" type:"long"` + + // Current status of the build. Possible build states include the following: + // INITIALIZED – A new build has been defined, but no files have been uploaded. + // You cannot create fleets for builds that are in this state. When a build + // is successfully created, the build state is set to this value. READY – The + // game build has been successfully uploaded. You can now create new fleets + // for this build.FAILED – The game build upload failed. You cannot create new + // fleets for this build. + Status *string `type:"string" enum:"BuildStatus"` + + // Version associated with this build. Version strings do not need to be unique + // to a build. This value can be set using CreateBuild or UpdateBuild. + Version *string `type:"string"` +} + +// String returns the string representation +func (s Build) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Build) GoString() string { + return s.String() +} + +// Represents the input for a request action. +type CreateAliasInput struct { + _ struct{} `type:"structure"` + + // Human-readable description of an alias. + Description *string `min:"1" type:"string"` + + // Descriptive label associated with an alias. Alias names do not need to be + // unique. + Name *string `min:"1" type:"string" required:"true"` + + // Object specifying the fleet and routing type to use for the alias. + RoutingStrategy *RoutingStrategy `type:"structure" required:"true"` +} + +// String returns the string representation +func (s CreateAliasInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateAliasInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateAliasInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateAliasInput"} + if s.Description != nil && len(*s.Description) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Description", 1)) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + if s.RoutingStrategy == nil { + invalidParams.Add(request.NewErrParamRequired("RoutingStrategy")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the returned data in response to a request action. +type CreateAliasOutput struct { + _ struct{} `type:"structure"` + + // Object containing the newly created alias record. + Alias *Alias `type:"structure"` +} + +// String returns the string representation +func (s CreateAliasOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateAliasOutput) GoString() string { + return s.String() +} + +// Represents the input for a request action. +type CreateBuildInput struct { + _ struct{} `type:"structure"` + + // Descriptive label associated with a build. Build names do not need to be + // unique. A build name can be changed later using UpdateBuild. + Name *string `min:"1" type:"string"` + + // Location in Amazon Simple Storage Service (Amazon S3) where a build's files + // are stored. This location is assigned in response to a CreateBuild call, + // and is always in the same region as the service used to create the build. + // For more details see the Amazon S3 documentation (http://aws.amazon.com/documentation/s3/). + StorageLocation *S3Location `type:"structure"` + + // Version associated with this build. Version strings do not need to be unique + // to a build. A build version can be changed later using UpdateBuild. + Version *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s CreateBuildInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateBuildInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateBuildInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateBuildInput"} + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + if s.Version != nil && len(*s.Version) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Version", 1)) + } + if s.StorageLocation != nil { + if err := s.StorageLocation.Validate(); err != nil { + invalidParams.AddNested("StorageLocation", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the returned data in response to a request action. +type CreateBuildOutput struct { + _ struct{} `type:"structure"` + + // Set of properties for the newly created build. + Build *Build `type:"structure"` + + // Amazon S3 path and key, identifying where the game build files are stored. + StorageLocation *S3Location `type:"structure"` + + // AWS credentials required when uploading a game build to the storage location. + // These credentials have a limited lifespan and are valid only for the build + // they were issued for. If you need to get fresh credentials, call RequestUploadCredentials. + UploadCredentials *AwsCredentials `type:"structure"` +} + +// String returns the string representation +func (s CreateBuildOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateBuildOutput) GoString() string { + return s.String() +} + +// Represents the input for a request action. +type CreateFleetInput struct { + _ struct{} `type:"structure"` + + // Unique identifier of the build to be deployed on the new fleet. The build + // must have been successfully uploaded to GameLift and be in a READY state. + // This fleet setting cannot be changed once the fleet is created. + BuildId *string `type:"string" required:"true"` + + // Human-readable description of a fleet. + Description *string `min:"1" type:"string"` + + // Range of IP addresses and port settings that permit inbound traffic to access + // server processes running on the fleet. If no inbound permissions are set, + // including both IP address range and port range, the server processes in the + // fleet cannot accept connections. You can specify one or more sets of permissions + // for a fleet. + EC2InboundPermissions []*IpPermission `type:"list"` + + // Name of an EC2 instance type that is supported in Amazon GameLift. A fleet + // instance type determines the computing resources of each instance in the + // fleet, including CPU, memory, storage, and networking capacity. GameLift + // supports the following EC2 instance types. See Amazon EC2 Instance Types + // (https://aws.amazon.com/ec2/instance-types/) for detailed descriptions. + EC2InstanceType *string `type:"string" required:"true" enum:"EC2InstanceType"` + + // Location of default log files. When a server process is shut down, Amazon + // GameLift captures and stores any log files in this location. These logs are + // in addition to game session logs; see more on game session logs in the Amazon + // GameLift Developer Guide (http://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-sdk-server-api.html#gamelift-sdk-server-api-server-code). + // If no default log path for a fleet is specified, GameLift will automatically + // upload logs stored on each instance at C:\game\logs. Use the GameLift console + // to access stored logs. + LogPaths []*string `type:"list"` + + // Descriptive label associated with a fleet. Fleet names do not need to be + // unique. + Name *string `min:"1" type:"string" required:"true"` + + // Game session protection policy to apply to all instances in this fleet. If + // this parameter is not set, instances in this fleet default to no protection. + // You can change a fleet's protection policy using UpdateFleetAttributes, but + // this change will only affect sessions created after the policy change. You + // can also set protection for individual instances using UpdateGameSession. + // NoProtection – The game session can be terminated during a scale-down event. + // FullProtection – If the game session is in an ACTIVE status, it cannot be + // terminated during a scale-down event. + NewGameSessionProtectionPolicy *string `type:"string" enum:"ProtectionPolicy"` + + // Instructions for launching server processes on each instance in the fleet. + // The runtime configuration for a fleet has a collection of server process + // configurations, one for each type of server process to run on an instance. + // A server process configuration specifies the location of the server executable, + // launch parameters, and the number of concurrent processes with that configuration + // to maintain on each instance. A CreateFleet request must include a runtime + // configuration with at least one server process configuration; otherwise the + // request will fail with an invalid request exception. (This parameter replaces + // the parameters ServerLaunchPath and ServerLaunchParameters; requests that + // contain values for these parameters instead of a runtime configuration will + // continue to work.) + RuntimeConfiguration *RuntimeConfiguration `type:"structure"` + + // This parameter is no longer used. Instead, specify server launch parameters + // in the RuntimeConfiguration parameter. (Requests that specify a server launch + // path and launch parameters instead of a runtime configuration will continue + // to work.) + ServerLaunchParameters *string `min:"1" type:"string"` + + // This parameter is no longer used. Instead, specify a server launch path using + // the RuntimeConfiguration parameter. (Requests that specify a server launch + // path and launch parameters instead of a runtime configuration will continue + // to work.) + ServerLaunchPath *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s CreateFleetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateFleetInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateFleetInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateFleetInput"} + if s.BuildId == nil { + invalidParams.Add(request.NewErrParamRequired("BuildId")) + } + if s.Description != nil && len(*s.Description) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Description", 1)) + } + if s.EC2InstanceType == nil { + invalidParams.Add(request.NewErrParamRequired("EC2InstanceType")) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + if s.ServerLaunchParameters != nil && len(*s.ServerLaunchParameters) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ServerLaunchParameters", 1)) + } + if s.ServerLaunchPath != nil && len(*s.ServerLaunchPath) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ServerLaunchPath", 1)) + } + if s.EC2InboundPermissions != nil { + for i, v := range s.EC2InboundPermissions { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "EC2InboundPermissions", i), err.(request.ErrInvalidParams)) + } + } + } + if s.RuntimeConfiguration != nil { + if err := s.RuntimeConfiguration.Validate(); err != nil { + invalidParams.AddNested("RuntimeConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the returned data in response to a request action. +type CreateFleetOutput struct { + _ struct{} `type:"structure"` + + // Properties for the newly created fleet. + FleetAttributes *FleetAttributes `type:"structure"` +} + +// String returns the string representation +func (s CreateFleetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateFleetOutput) GoString() string { + return s.String() +} + +// Represents the input for a request action. +type CreateGameSessionInput struct { + _ struct{} `type:"structure"` + + // Unique identifier for a fleet alias. Each request must reference either a + // fleet ID or alias ID, but not both. + AliasId *string `type:"string"` + + // Unique identifier for a fleet. Each request must reference either a fleet + // ID or alias ID, but not both. + FleetId *string `type:"string"` + + // Set of properties used to administer a game session. These properties are + // passed to the server process hosting it. + GameProperties []*GameProperty `type:"list"` + + // Maximum number of players that can be connected simultaneously to the game + // session. + MaximumPlayerSessionCount *int64 `type:"integer" required:"true"` + + // Descriptive label associated with a game session. Session names do not need + // to be unique. + Name *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s CreateGameSessionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateGameSessionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateGameSessionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateGameSessionInput"} + if s.MaximumPlayerSessionCount == nil { + invalidParams.Add(request.NewErrParamRequired("MaximumPlayerSessionCount")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + if s.GameProperties != nil { + for i, v := range s.GameProperties { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "GameProperties", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the returned data in response to a request action. +type CreateGameSessionOutput struct { + _ struct{} `type:"structure"` + + // Object containing the newly created game session record. + GameSession *GameSession `type:"structure"` +} + +// String returns the string representation +func (s CreateGameSessionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateGameSessionOutput) GoString() string { + return s.String() +} + +// Represents the input for a request action. +type CreatePlayerSessionInput struct { + _ struct{} `type:"structure"` + + // Unique identifier for a game session. Specify the game session you want to + // add a player to. + GameSessionId *string `type:"string" required:"true"` + + // Unique identifier for the player to be added. + PlayerId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreatePlayerSessionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreatePlayerSessionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreatePlayerSessionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreatePlayerSessionInput"} + if s.GameSessionId == nil { + invalidParams.Add(request.NewErrParamRequired("GameSessionId")) + } + if s.PlayerId == nil { + invalidParams.Add(request.NewErrParamRequired("PlayerId")) + } + if s.PlayerId != nil && len(*s.PlayerId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("PlayerId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the returned data in response to a request action. +type CreatePlayerSessionOutput struct { + _ struct{} `type:"structure"` + + // Object containing the newly created player session record. + PlayerSession *PlayerSession `type:"structure"` +} + +// String returns the string representation +func (s CreatePlayerSessionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreatePlayerSessionOutput) GoString() string { + return s.String() +} + +// Represents the input for a request action. +type CreatePlayerSessionsInput struct { + _ struct{} `type:"structure"` + + // Unique identifier for a game session. + GameSessionId *string `type:"string" required:"true"` + + // List of unique identifiers for the players to be added. + PlayerIds []*string `min:"1" type:"list" required:"true"` +} + +// String returns the string representation +func (s CreatePlayerSessionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreatePlayerSessionsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreatePlayerSessionsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreatePlayerSessionsInput"} + if s.GameSessionId == nil { + invalidParams.Add(request.NewErrParamRequired("GameSessionId")) + } + if s.PlayerIds == nil { + invalidParams.Add(request.NewErrParamRequired("PlayerIds")) + } + if s.PlayerIds != nil && len(s.PlayerIds) < 1 { + invalidParams.Add(request.NewErrParamMinLen("PlayerIds", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the returned data in response to a request action. +type CreatePlayerSessionsOutput struct { + _ struct{} `type:"structure"` + + // Collection of player session objects created for the added players. + PlayerSessions []*PlayerSession `type:"list"` +} + +// String returns the string representation +func (s CreatePlayerSessionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreatePlayerSessionsOutput) GoString() string { + return s.String() +} + +// Represents the input for a request action. +type DeleteAliasInput struct { + _ struct{} `type:"structure"` + + // Unique identifier for a fleet alias. Specify the alias you want to delete. + AliasId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteAliasInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteAliasInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteAliasInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteAliasInput"} + if s.AliasId == nil { + invalidParams.Add(request.NewErrParamRequired("AliasId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteAliasOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteAliasOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteAliasOutput) GoString() string { + return s.String() +} + +// Represents the input for a request action. +type DeleteBuildInput struct { + _ struct{} `type:"structure"` + + // Unique identifier for the build you want to delete. + BuildId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteBuildInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBuildInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBuildInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBuildInput"} + if s.BuildId == nil { + invalidParams.Add(request.NewErrParamRequired("BuildId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteBuildOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteBuildOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBuildOutput) GoString() string { + return s.String() +} + +// Represents the input for a request action. +type DeleteFleetInput struct { + _ struct{} `type:"structure"` + + // Unique identifier for the fleet you want to delete. + FleetId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteFleetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteFleetInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteFleetInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteFleetInput"} + if s.FleetId == nil { + invalidParams.Add(request.NewErrParamRequired("FleetId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteFleetOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteFleetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteFleetOutput) GoString() string { + return s.String() +} + +// Represents the input for a request action. +type DeleteScalingPolicyInput struct { + _ struct{} `type:"structure"` + + // Unique identifier for a fleet. + FleetId *string `type:"string" required:"true"` + + // Descriptive label associated with a scaling policy. Policy names do not need + // to be unique. + Name *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteScalingPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteScalingPolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteScalingPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteScalingPolicyInput"} + if s.FleetId == nil { + invalidParams.Add(request.NewErrParamRequired("FleetId")) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteScalingPolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteScalingPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteScalingPolicyOutput) GoString() string { + return s.String() +} + +// Represents the input for a request action. +type DescribeAliasInput struct { + _ struct{} `type:"structure"` + + // Unique identifier for a fleet alias. Specify the alias you want to retrieve. + AliasId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeAliasInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAliasInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeAliasInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeAliasInput"} + if s.AliasId == nil { + invalidParams.Add(request.NewErrParamRequired("AliasId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the returned data in response to a request action. +type DescribeAliasOutput struct { + _ struct{} `type:"structure"` + + // Object containing the requested alias. + Alias *Alias `type:"structure"` +} + +// String returns the string representation +func (s DescribeAliasOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAliasOutput) GoString() string { + return s.String() +} + +// Represents the input for a request action. +type DescribeBuildInput struct { + _ struct{} `type:"structure"` + + // Unique identifier of the build that you want to retrieve properties for. + BuildId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeBuildInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeBuildInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeBuildInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeBuildInput"} + if s.BuildId == nil { + invalidParams.Add(request.NewErrParamRequired("BuildId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the returned data in response to a request action. +type DescribeBuildOutput struct { + _ struct{} `type:"structure"` + + // Set of properties describing the requested build. + Build *Build `type:"structure"` +} + +// String returns the string representation +func (s DescribeBuildOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeBuildOutput) GoString() string { + return s.String() +} + +// Represents the input for a request action. +type DescribeEC2InstanceLimitsInput struct { + _ struct{} `type:"structure"` + + // Name of an EC2 instance type that is supported in Amazon GameLift. A fleet + // instance type determines the computing resources of each instance in the + // fleet, including CPU, memory, storage, and networking capacity. GameLift + // supports the following EC2 instance types. See Amazon EC2 Instance Types + // (https://aws.amazon.com/ec2/instance-types/) for detailed descriptions. Leave + // this parameter blank to retrieve limits for all types. + EC2InstanceType *string `type:"string" enum:"EC2InstanceType"` +} + +// String returns the string representation +func (s DescribeEC2InstanceLimitsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeEC2InstanceLimitsInput) GoString() string { + return s.String() +} + +// Represents the returned data in response to a request action. +type DescribeEC2InstanceLimitsOutput struct { + _ struct{} `type:"structure"` + + // Object containing the maximum number of instances for the specified instance + // type. + EC2InstanceLimits []*EC2InstanceLimit `type:"list"` +} + +// String returns the string representation +func (s DescribeEC2InstanceLimitsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeEC2InstanceLimitsOutput) GoString() string { + return s.String() +} + +// Represents the input for a request action. +type DescribeFleetAttributesInput struct { + _ struct{} `type:"structure"` + + // Unique identifiers for the fleet(s) that you want to retrieve attributes + // for. To request attributes for all fleets, leave this parameter empty. + FleetIds []*string `min:"1" type:"list"` + + // Maximum number of results to return. Use this parameter with NextToken to + // get results as a set of sequential pages. This parameter is ignored when + // the request specifies one or a list of fleet IDs. + Limit *int64 `min:"1" type:"integer"` + + // Token indicating the start of the next sequential page of results. Use the + // token that is returned with a previous call to this action. To specify the + // start of the result set, do not specify a value. This parameter is ignored + // when the request specifies one or a list of fleet IDs. + NextToken *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s DescribeFleetAttributesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeFleetAttributesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeFleetAttributesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeFleetAttributesInput"} + if s.FleetIds != nil && len(s.FleetIds) < 1 { + invalidParams.Add(request.NewErrParamMinLen("FleetIds", 1)) + } + if s.Limit != nil && *s.Limit < 1 { + invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) + } + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the returned data in response to a request action. +type DescribeFleetAttributesOutput struct { + _ struct{} `type:"structure"` + + // Collection of objects containing attribute metadata for each requested fleet + // ID. + FleetAttributes []*FleetAttributes `type:"list"` + + // Token indicating where to resume retrieving results on the next call to this + // action. If no token is returned, these results represent the end of the list. + // + // If a request has a limit that exactly matches the number of remaining results, + // a token is returned even though there are no more results to retrieve. + NextToken *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s DescribeFleetAttributesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeFleetAttributesOutput) GoString() string { + return s.String() +} + +// Represents the input for a request action. +type DescribeFleetCapacityInput struct { + _ struct{} `type:"structure"` + + // Unique identifier for the fleet(s) you want to retrieve capacity information + // for. To request capacity information for all fleets, leave this parameter + // empty. + FleetIds []*string `min:"1" type:"list"` + + // Maximum number of results to return. Use this parameter with NextToken to + // get results as a set of sequential pages. This parameter is ignored when + // the request specifies one or a list of fleet IDs. + Limit *int64 `min:"1" type:"integer"` + + // Token indicating the start of the next sequential page of results. Use the + // token that is returned with a previous call to this action. To specify the + // start of the result set, do not specify a value. This parameter is ignored + // when the request specifies one or a list of fleet IDs. + NextToken *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s DescribeFleetCapacityInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeFleetCapacityInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeFleetCapacityInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeFleetCapacityInput"} + if s.FleetIds != nil && len(s.FleetIds) < 1 { + invalidParams.Add(request.NewErrParamMinLen("FleetIds", 1)) + } + if s.Limit != nil && *s.Limit < 1 { + invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) + } + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the returned data in response to a request action. +type DescribeFleetCapacityOutput struct { + _ struct{} `type:"structure"` + + // Collection of objects containing capacity information for each requested + // fleet ID. Leave this parameter empty to retrieve capacity information for + // all fleets. + FleetCapacity []*FleetCapacity `type:"list"` + + // Token indicating where to resume retrieving results on the next call to this + // action. If no token is returned, these results represent the end of the list. + // + // If a request has a limit that exactly matches the number of remaining results, + // a token is returned even though there are no more results to retrieve. + NextToken *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s DescribeFleetCapacityOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeFleetCapacityOutput) GoString() string { + return s.String() +} + +// Represents the input for a request action. +type DescribeFleetEventsInput struct { + _ struct{} `type:"structure"` + + // Most recent date to retrieve event logs for. If no end time is specified, + // this call returns entries from the specified start time up to the present. + // Format is an integer representing the number of seconds since the Unix epoch + // (Unix time). + EndTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // Unique identifier for the fleet to get event logs for. + FleetId *string `type:"string" required:"true"` + + // Maximum number of results to return. Use this parameter with NextToken to + // get results as a set of sequential pages. + Limit *int64 `min:"1" type:"integer"` + + // Token indicating the start of the next sequential page of results. Use the + // token that is returned with a previous call to this action. To specify the + // start of the result set, do not specify a value. + NextToken *string `min:"1" type:"string"` + + // Earliest date to retrieve event logs for. If no start time is specified, + // this call returns entries starting from when the fleet was created to the + // specified end time. Format is an integer representing the number of seconds + // since the Unix epoch (Unix time). + StartTime *time.Time `type:"timestamp" timestampFormat:"unix"` +} + +// String returns the string representation +func (s DescribeFleetEventsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeFleetEventsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeFleetEventsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeFleetEventsInput"} + if s.FleetId == nil { + invalidParams.Add(request.NewErrParamRequired("FleetId")) + } + if s.Limit != nil && *s.Limit < 1 { + invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) + } + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the returned data in response to a request action. +type DescribeFleetEventsOutput struct { + _ struct{} `type:"structure"` + + // Collection of objects containing event log entries for the specified fleet. + Events []*Event `type:"list"` + + // Token indicating where to resume retrieving results on the next call to this + // action. If no token is returned, these results represent the end of the list. + // + // If a request has a limit that exactly matches the number of remaining results, + // a token is returned even though there are no more results to retrieve. + NextToken *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s DescribeFleetEventsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeFleetEventsOutput) GoString() string { + return s.String() +} + +// Represents the input for a request action. +type DescribeFleetPortSettingsInput struct { + _ struct{} `type:"structure"` + + // Unique identifier for the fleet you want to retrieve port settings for. + FleetId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeFleetPortSettingsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeFleetPortSettingsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeFleetPortSettingsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeFleetPortSettingsInput"} + if s.FleetId == nil { + invalidParams.Add(request.NewErrParamRequired("FleetId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the returned data in response to a request action. +type DescribeFleetPortSettingsOutput struct { + _ struct{} `type:"structure"` + + // Object containing port settings for the requested fleet ID. + InboundPermissions []*IpPermission `type:"list"` +} + +// String returns the string representation +func (s DescribeFleetPortSettingsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeFleetPortSettingsOutput) GoString() string { + return s.String() +} + +// Represents the input for a request action. +type DescribeFleetUtilizationInput struct { + _ struct{} `type:"structure"` + + // Unique identifier for the fleet(s) you want to retrieve utilization data + // for. To request utilization data for all fleets, leave this parameter empty. + FleetIds []*string `min:"1" type:"list"` + + // Maximum number of results to return. Use this parameter with NextToken to + // get results as a set of sequential pages. This parameter is ignored when + // the request specifies one or a list of fleet IDs. + Limit *int64 `min:"1" type:"integer"` + + // Token indicating the start of the next sequential page of results. Use the + // token that is returned with a previous call to this action. To specify the + // start of the result set, do not specify a value. This parameter is ignored + // when the request specifies one or a list of fleet IDs. + NextToken *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s DescribeFleetUtilizationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeFleetUtilizationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeFleetUtilizationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeFleetUtilizationInput"} + if s.FleetIds != nil && len(s.FleetIds) < 1 { + invalidParams.Add(request.NewErrParamMinLen("FleetIds", 1)) + } + if s.Limit != nil && *s.Limit < 1 { + invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) + } + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the returned data in response to a request action. +type DescribeFleetUtilizationOutput struct { + _ struct{} `type:"structure"` + + // Collection of objects containing utilization information for each requested + // fleet ID. + FleetUtilization []*FleetUtilization `type:"list"` + + // Token indicating where to resume retrieving results on the next call to this + // action. If no token is returned, these results represent the end of the list. + // + // If a request has a limit that exactly matches the number of remaining results, + // a token is returned even though there are no more results to retrieve. + NextToken *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s DescribeFleetUtilizationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeFleetUtilizationOutput) GoString() string { + return s.String() +} + +// Represents the input for a request action. +type DescribeGameSessionDetailsInput struct { + _ struct{} `type:"structure"` + + // Unique identifier for a fleet alias. Specify an alias to retrieve information + // on all game sessions active on the fleet. + AliasId *string `type:"string"` + + // Unique identifier for a fleet. Specify a fleet to retrieve information on + // all game sessions active on the fleet. + FleetId *string `type:"string"` + + // Unique identifier for a game session. Specify the game session to retrieve + // information on. + GameSessionId *string `type:"string"` + + // Maximum number of results to return. Use this parameter with NextToken to + // get results as a set of sequential pages. + Limit *int64 `min:"1" type:"integer"` + + // Token indicating the start of the next sequential page of results. Use the + // token that is returned with a previous call to this action. To specify the + // start of the result set, do not specify a value. + NextToken *string `min:"1" type:"string"` + + // Game session status to filter results on. Possible game session states include + // ACTIVE, TERMINATED, ACTIVATING and TERMINATING (the last two are transitory). + StatusFilter *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s DescribeGameSessionDetailsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeGameSessionDetailsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeGameSessionDetailsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeGameSessionDetailsInput"} + if s.Limit != nil && *s.Limit < 1 { + invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) + } + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + if s.StatusFilter != nil && len(*s.StatusFilter) < 1 { + invalidParams.Add(request.NewErrParamMinLen("StatusFilter", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the returned data in response to a request action. +type DescribeGameSessionDetailsOutput struct { + _ struct{} `type:"structure"` + + // Collection of objects containing game session properties and the protection + // policy currently in force for each session matching the request. + GameSessionDetails []*GameSessionDetail `type:"list"` + + // Token indicating where to resume retrieving results on the next call to this + // action. If no token is returned, these results represent the end of the list. + // + // If a request has a limit that exactly matches the number of remaining results, + // a token is returned even though there are no more results to retrieve. + NextToken *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s DescribeGameSessionDetailsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeGameSessionDetailsOutput) GoString() string { + return s.String() +} + +// Represents the input for a request action. +type DescribeGameSessionsInput struct { + _ struct{} `type:"structure"` + + // Unique identifier for a fleet alias. Specify an alias to retrieve information + // on all game sessions active on the fleet. + AliasId *string `type:"string"` + + // Unique identifier for a fleet. Specify a fleet to retrieve information on + // all game sessions active on the fleet. + FleetId *string `type:"string"` + + // Unique identifier for a game session. Specify the game session to retrieve + // information on. + GameSessionId *string `type:"string"` + + // Maximum number of results to return. Use this parameter with NextToken to + // get results as a set of sequential pages. + Limit *int64 `min:"1" type:"integer"` + + // Token indicating the start of the next sequential page of results. Use the + // token that is returned with a previous call to this action. To specify the + // start of the result set, do not specify a value. + NextToken *string `min:"1" type:"string"` + + // Game session status to filter results on. Possible game session states include + // ACTIVE, TERMINATED, ACTIVATING, and TERMINATING (the last two are transitory). + StatusFilter *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s DescribeGameSessionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeGameSessionsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeGameSessionsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeGameSessionsInput"} + if s.Limit != nil && *s.Limit < 1 { + invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) + } + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + if s.StatusFilter != nil && len(*s.StatusFilter) < 1 { + invalidParams.Add(request.NewErrParamMinLen("StatusFilter", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the returned data in response to a request action. +type DescribeGameSessionsOutput struct { + _ struct{} `type:"structure"` + + // Collection of objects containing game session properties for each session + // matching the request. + GameSessions []*GameSession `type:"list"` + + // Token indicating where to resume retrieving results on the next call to this + // action. If no token is returned, these results represent the end of the list. + // + // If a request has a limit that exactly matches the number of remaining results, + // a token is returned even though there are no more results to retrieve. + NextToken *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s DescribeGameSessionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeGameSessionsOutput) GoString() string { + return s.String() +} + +// Represents the input for a request action. +type DescribePlayerSessionsInput struct { + _ struct{} `type:"structure"` + + // Unique identifier for a game session. + GameSessionId *string `type:"string"` + + // Maximum number of results to return. Use this parameter with NextToken to + // get results as a set of sequential pages. If a player session ID is specified, + // this parameter is ignored. + Limit *int64 `min:"1" type:"integer"` + + // Token indicating the start of the next sequential page of results. Use the + // token that is returned with a previous call to this action. To specify the + // start of the result set, do not specify a value. If a player session ID is + // specified, this parameter is ignored. + NextToken *string `min:"1" type:"string"` + + // Unique identifier for a player. + PlayerId *string `min:"1" type:"string"` + + // Unique identifier for a player session. + PlayerSessionId *string `type:"string"` + + // Player session status to filter results on. Possible player session states + // include the following: RESERVED – The player session request has been received, + // but the player has not yet connected to the server process and/or been validated. + // ACTIVE – The player has been validated by the server process and is currently + // connected.COMPLETED – The player connection has been dropped.TIMEDOUT – A + // player session request was received, but the player did not connect and/or + // was not validated within the time-out limit (60 seconds). + PlayerSessionStatusFilter *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s DescribePlayerSessionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribePlayerSessionsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribePlayerSessionsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribePlayerSessionsInput"} + if s.Limit != nil && *s.Limit < 1 { + invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) + } + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + if s.PlayerId != nil && len(*s.PlayerId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("PlayerId", 1)) + } + if s.PlayerSessionStatusFilter != nil && len(*s.PlayerSessionStatusFilter) < 1 { + invalidParams.Add(request.NewErrParamMinLen("PlayerSessionStatusFilter", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the returned data in response to a request action. +type DescribePlayerSessionsOutput struct { + _ struct{} `type:"structure"` + + // Token indicating where to resume retrieving results on the next call to this + // action. If no token is returned, these results represent the end of the list. + // + // If a request has a limit that exactly matches the number of remaining results, + // a token is returned even though there are no more results to retrieve. + NextToken *string `min:"1" type:"string"` + + // Collection of objects containing properties for each player session that + // matches the request. + PlayerSessions []*PlayerSession `type:"list"` +} + +// String returns the string representation +func (s DescribePlayerSessionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribePlayerSessionsOutput) GoString() string { + return s.String() +} + +// Represents the input for a request action. +type DescribeRuntimeConfigurationInput struct { + _ struct{} `type:"structure"` + + // Unique identifier of the fleet to get the runtime configuration for. + FleetId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeRuntimeConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeRuntimeConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeRuntimeConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeRuntimeConfigurationInput"} + if s.FleetId == nil { + invalidParams.Add(request.NewErrParamRequired("FleetId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the returned data in response to a request action. +type DescribeRuntimeConfigurationOutput struct { + _ struct{} `type:"structure"` + + // Instructions describing how server processes should be launched and maintained + // on each instance in the fleet. + RuntimeConfiguration *RuntimeConfiguration `type:"structure"` +} + +// String returns the string representation +func (s DescribeRuntimeConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeRuntimeConfigurationOutput) GoString() string { + return s.String() +} + +// Represents the input for a request action. +type DescribeScalingPoliciesInput struct { + _ struct{} `type:"structure"` + + // Unique identifier for a fleet. Specify the fleet to retrieve scaling policies + // for. + FleetId *string `type:"string" required:"true"` + + // Maximum number of results to return. Use this parameter with NextToken to + // get results as a set of sequential pages. + Limit *int64 `min:"1" type:"integer"` + + // Token indicating the start of the next sequential page of results. Use the + // token that is returned with a previous call to this action. To specify the + // start of the result set, do not specify a value. + NextToken *string `min:"1" type:"string"` + + // Game session status to filter results on. A scaling policy is only in force + // when in an Active state. ACTIVE – The scaling policy is currently in force. + // UPDATEREQUESTED – A request to update the scaling policy has been received. + // UPDATING – A change is being made to the scaling policy. DELETEREQUESTED + // – A request to delete the scaling policy has been received. DELETING – The + // scaling policy is being deleted. DELETED – The scaling policy has been deleted. + // ERROR – An error occurred in creating the policy. It should be removed and + // recreated. + StatusFilter *string `type:"string" enum:"ScalingStatusType"` +} + +// String returns the string representation +func (s DescribeScalingPoliciesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeScalingPoliciesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeScalingPoliciesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeScalingPoliciesInput"} + if s.FleetId == nil { + invalidParams.Add(request.NewErrParamRequired("FleetId")) + } + if s.Limit != nil && *s.Limit < 1 { + invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) + } + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the returned data in response to a request action. +type DescribeScalingPoliciesOutput struct { + _ struct{} `type:"structure"` + + // Token indicating where to resume retrieving results on the next call to this + // action. If no token is returned, these results represent the end of the list. + // + // If a request has a limit that exactly matches the number of remaining results, + // a token is returned even though there are no more results to retrieve. + NextToken *string `min:"1" type:"string"` + + // Collection of objects containing the scaling policies matching the request. + ScalingPolicies []*ScalingPolicy `type:"list"` +} + +// String returns the string representation +func (s DescribeScalingPoliciesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeScalingPoliciesOutput) GoString() string { + return s.String() +} + +// Current status of fleet capacity. The number of active instances should match +// or be in the process of matching the number of desired instances. Pending +// and terminating counts are non-zero only if fleet capacity is adjusting to +// an UpdateFleetCapacity request, or if access to resources is temporarily +// affected. +type EC2InstanceCounts struct { + _ struct{} `type:"structure"` + + // Actual number of active instances in the fleet. + ACTIVE *int64 `type:"integer"` + + // Ideal number of active instances in the fleet. + DESIRED *int64 `type:"integer"` + + // Number of active instances in the fleet that are not currently hosting a + // game session. + IDLE *int64 `type:"integer"` + + // Maximum value allowed for the fleet's instance count. + MAXIMUM *int64 `type:"integer"` + + // Minimum value allowed for the fleet's instance count. + MINIMUM *int64 `type:"integer"` + + // Number of instances in the fleet that are starting but not yet active. + PENDING *int64 `type:"integer"` + + // Number of instances in the fleet that are no longer active but haven't yet + // been terminated. + TERMINATING *int64 `type:"integer"` +} + +// String returns the string representation +func (s EC2InstanceCounts) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EC2InstanceCounts) GoString() string { + return s.String() +} + +// Maximum number of instances allowed based on the Amazon Elastic Compute Cloud +// (Amazon EC2) instance type. Instance limits can be retrieved by calling DescribeEC2InstanceLimits. +type EC2InstanceLimit struct { + _ struct{} `type:"structure"` + + // Number of instances of the specified type that are currently in use by this + // AWS account. + CurrentInstances *int64 `type:"integer"` + + // Name of an EC2 instance type that is supported in Amazon GameLift. A fleet + // instance type determines the computing resources of each instance in the + // fleet, including CPU, memory, storage, and networking capacity. GameLift + // supports the following EC2 instance types. See Amazon EC2 Instance Types + // (https://aws.amazon.com/ec2/instance-types/) for detailed descriptions. + EC2InstanceType *string `type:"string" enum:"EC2InstanceType"` + + // Number of instances allowed. + InstanceLimit *int64 `type:"integer"` +} + +// String returns the string representation +func (s EC2InstanceLimit) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EC2InstanceLimit) GoString() string { + return s.String() +} + +// Log entry describing an event involving an Amazon GameLift resource (such +// as a fleet). +type Event struct { + _ struct{} `type:"structure"` + + // Type of event being logged. + EventCode *string `type:"string" enum:"EventCode"` + + // Unique identifier for a fleet event. + EventId *string `min:"1" type:"string"` + + // Time stamp indicating when this event occurred. Format is an integer representing + // the number of seconds since the Unix epoch (Unix time). + EventTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // Additional information related to the event. + Message *string `min:"1" type:"string"` + + // Unique identifier for the resource, such as a fleet ID. + ResourceId *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s Event) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Event) GoString() string { + return s.String() +} + +// General properties describing a fleet. +type FleetAttributes struct { + _ struct{} `type:"structure"` + + // Unique identifier for a build. + BuildId *string `type:"string"` + + // Time stamp indicating when this object was created. Format is an integer + // representing the number of seconds since the Unix epoch (Unix time). + CreationTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // Human-readable description of the fleet. + Description *string `min:"1" type:"string"` + + // Unique identifier for a fleet. + FleetId *string `type:"string"` + + // Location of default log files. When a server process is shut down, Amazon + // GameLift captures and stores any log files in this location. These logs are + // in addition to game session logs; see more on game session logs in the Amazon + // GameLift Developer Guide (http://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-sdk-server-api.html#gamelift-sdk-server-api-server-code). + // If no default log path for a fleet is specified, GameLift will automatically + // upload logs stored on each instance at C:\game\logs. Use the GameLift console + // to access stored logs. + LogPaths []*string `type:"list"` + + // Descriptive label associated with a fleet. Fleet names do not need to be + // unique. + Name *string `min:"1" type:"string"` + + // Type of game session protection to set for all new instances started in the + // fleet. NoProtection – The game session can be terminated during a scale-down + // event. FullProtection – If the game session is in an ACTIVE status, it cannot + // be terminated during a scale-down event. + NewGameSessionProtectionPolicy *string `type:"string" enum:"ProtectionPolicy"` + + // Deprecated. Server launch parameters are now specified using a RuntimeConfiguration + // object. + ServerLaunchParameters *string `min:"1" type:"string"` + + // Deprecated. Server launch parameters are now set using a RuntimeConfiguration + // object. + ServerLaunchPath *string `min:"1" type:"string"` + + // Current status of the fleet. Possible fleet states include the following: + // NEW – A new fleet has been defined and desired instances is set to 1. DOWNLOADING/VALIDATING/BUILDING/ACTIVATING + // – GameLift is setting up the new fleet, creating new instances with the game + // build and starting server processes.ACTIVE – Hosts can now accept game sessions.ERROR + // – An error occurred when downloading, validating, building, or activating + // the fleet.DELETING – Hosts are responding to a delete fleet request.TERMINATED + // – The fleet no longer exists. + Status *string `type:"string" enum:"FleetStatus"` + + // Time stamp indicating when this fleet was terminated. Format is an integer + // representing the number of seconds since the Unix epoch (Unix time). + TerminationTime *time.Time `type:"timestamp" timestampFormat:"unix"` +} + +// String returns the string representation +func (s FleetAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FleetAttributes) GoString() string { + return s.String() +} + +// Information about the fleet's capacity. Fleet capacity is measured in EC2 +// instances. By default, new fleets have a capacity of one instance, but can +// be updated as needed. The maximum number of instances for a fleet is determined +// by the fleet's instance type. +type FleetCapacity struct { + _ struct{} `type:"structure"` + + // Unique identifier for a fleet. + FleetId *string `type:"string"` + + // Current status of fleet capacity. + InstanceCounts *EC2InstanceCounts `type:"structure"` + + // Name of an EC2 instance type that is supported in Amazon GameLift. A fleet + // instance type determines the computing resources of each instance in the + // fleet, including CPU, memory, storage, and networking capacity. GameLift + // supports the following EC2 instance types. See Amazon EC2 Instance Types + // (https://aws.amazon.com/ec2/instance-types/) for detailed descriptions. + InstanceType *string `type:"string" enum:"EC2InstanceType"` +} + +// String returns the string representation +func (s FleetCapacity) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FleetCapacity) GoString() string { + return s.String() +} + +// Current status of fleet utilization, including the number of game and player +// sessions being hosted. +type FleetUtilization struct { + _ struct{} `type:"structure"` + + // Number of active game sessions currently being hosted on all instances in + // the fleet. + ActiveGameSessionCount *int64 `type:"integer"` + + // Number of server processes in an ACTIVE state currently running across all + // instances in the fleet + ActiveServerProcessCount *int64 `type:"integer"` + + // Number of active player sessions currently being hosted on all instances + // in the fleet. + CurrentPlayerSessionCount *int64 `type:"integer"` + + // Unique identifier for a fleet. + FleetId *string `type:"string"` + + // Maximum players allowed across all game sessions currently being hosted on + // all instances in the fleet. + MaximumPlayerSessionCount *int64 `type:"integer"` +} + +// String returns the string representation +func (s FleetUtilization) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FleetUtilization) GoString() string { + return s.String() +} + +// Set of key-value pairs containing information a server process requires to +// set up a game session. This object allows you to pass in any set of data +// needed for your game. For more information, see the Amazon GameLift Developer +// Guide (http://docs.aws.amazon.com/gamelift/latest/developerguide/). +type GameProperty struct { + _ struct{} `type:"structure"` + + Key *string `type:"string" required:"true"` + + Value *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s GameProperty) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GameProperty) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GameProperty) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GameProperty"} + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Value == nil { + invalidParams.Add(request.NewErrParamRequired("Value")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Properties describing a game session. +type GameSession struct { + _ struct{} `type:"structure"` + + // Time stamp indicating when this object was created. Format is an integer + // representing the number of seconds since the Unix epoch (Unix time). + CreationTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // Number of players currently in the game session. + CurrentPlayerSessionCount *int64 `type:"integer"` + + // Unique identifier for a fleet. + FleetId *string `type:"string"` + + // Set of custom properties for the game session. + GameProperties []*GameProperty `type:"list"` + + // Unique identifier for a game session. + GameSessionId *string `type:"string"` + + // IP address of the game session. To connect to a GameLift server process, + // an app needs both the IP address and port number. + IpAddress *string `type:"string"` + + // Maximum number of players allowed in the game session. + MaximumPlayerSessionCount *int64 `type:"integer"` + + // Descriptive label associated with a game session. Session names do not need + // to be unique. + Name *string `min:"1" type:"string"` + + // Indicates whether or not the game session is accepting new players. + PlayerSessionCreationPolicy *string `type:"string" enum:"PlayerSessionCreationPolicy"` + + // Port number for the game session. To connect to a GameLift server process, + // an app needs both the IP address and port number. + Port *int64 `min:"1025" type:"integer"` + + // Current status of the game session. A game session must be in an ACTIVE state + // to have player sessions. + Status *string `type:"string" enum:"GameSessionStatus"` + + // Time stamp indicating when this fleet was terminated. Format is an integer + // representing the number of seconds since the Unix epoch (Unix time). + TerminationTime *time.Time `type:"timestamp" timestampFormat:"unix"` +} + +// String returns the string representation +func (s GameSession) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GameSession) GoString() string { + return s.String() +} + +// A game session's properties and the protection policy currently in force. +type GameSessionDetail struct { + _ struct{} `type:"structure"` + + // Properties describing a game session. + GameSession *GameSession `type:"structure"` + + // Current status of protection for the game session. NoProtection – The game + // session can be terminated during a scale-down event. FullProtection – If + // the game session is in an ACTIVE status, it cannot be terminated during a + // scale-down event. + ProtectionPolicy *string `type:"string" enum:"ProtectionPolicy"` +} + +// String returns the string representation +func (s GameSessionDetail) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GameSessionDetail) GoString() string { + return s.String() +} + +// Represents the input for a request action. +type GetGameSessionLogUrlInput struct { + _ struct{} `type:"structure"` + + // Unique identifier for a game session. Specify the game session you want to + // get logs for. + GameSessionId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s GetGameSessionLogUrlInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetGameSessionLogUrlInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetGameSessionLogUrlInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetGameSessionLogUrlInput"} + if s.GameSessionId == nil { + invalidParams.Add(request.NewErrParamRequired("GameSessionId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the returned data in response to a request action. +type GetGameSessionLogUrlOutput struct { + _ struct{} `type:"structure"` + + // Location of the requested game session logs, available for download. + PreSignedUrl *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s GetGameSessionLogUrlOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetGameSessionLogUrlOutput) GoString() string { + return s.String() +} + +// A range of IP addresses and port settings that allow inbound traffic to connect +// to server processes on GameLift. Each game session hosted on a fleet is assigned +// a unique combination of IP address and port number, which must fall into +// the fleet's allowed ranges. This combination is included in the GameSession +// object. +type IpPermission struct { + _ struct{} `type:"structure"` + + // Starting value for a range of allowed port numbers. + FromPort *int64 `min:"1025" type:"integer" required:"true"` + + // Range of allowed IP addresses. This value must be expressed in CIDR notation + // (https://tools.ietf.org/id/cidr). Example: "000.000.000.000/[subnet mask]" + // or optionally the shortened version "0.0.0.0/[subnet mask]". + IpRange *string `type:"string" required:"true"` + + // Network communication protocol used by the fleet. + Protocol *string `type:"string" required:"true" enum:"IpProtocol"` + + // Ending value for a range of allowed port numbers. Port numbers are end-inclusive. + // This value must be higher than FromPort. + ToPort *int64 `min:"1025" type:"integer" required:"true"` +} + +// String returns the string representation +func (s IpPermission) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s IpPermission) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *IpPermission) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "IpPermission"} + if s.FromPort == nil { + invalidParams.Add(request.NewErrParamRequired("FromPort")) + } + if s.FromPort != nil && *s.FromPort < 1025 { + invalidParams.Add(request.NewErrParamMinValue("FromPort", 1025)) + } + if s.IpRange == nil { + invalidParams.Add(request.NewErrParamRequired("IpRange")) + } + if s.Protocol == nil { + invalidParams.Add(request.NewErrParamRequired("Protocol")) + } + if s.ToPort == nil { + invalidParams.Add(request.NewErrParamRequired("ToPort")) + } + if s.ToPort != nil && *s.ToPort < 1025 { + invalidParams.Add(request.NewErrParamMinValue("ToPort", 1025)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the input for a request action. +type ListAliasesInput struct { + _ struct{} `type:"structure"` + + // Maximum number of results to return. Use this parameter with NextToken to + // get results as a set of sequential pages. + Limit *int64 `min:"1" type:"integer"` + + // Descriptive label associated with an alias. Alias names do not need to be + // unique. + Name *string `min:"1" type:"string"` + + // Token indicating the start of the next sequential page of results. Use the + // token that is returned with a previous call to this action. To specify the + // start of the result set, do not specify a value. + NextToken *string `min:"1" type:"string"` + + // Type of routing to filter results on. Use this parameter to retrieve only + // aliases of a certain type. To retrieve all aliases, leave this parameter + // empty. Possible routing types include the following: SIMPLE – The alias resolves + // to one specific fleet. Use this type when routing to active fleets.TERMINAL + // – The alias does not resolve to a fleet but instead can be used to display + // a message to the user. A terminal alias throws a TerminalRoutingStrategyException + // with the RoutingStrategy message embedded. + RoutingStrategyType *string `type:"string" enum:"RoutingStrategyType"` +} + +// String returns the string representation +func (s ListAliasesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListAliasesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListAliasesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListAliasesInput"} + if s.Limit != nil && *s.Limit < 1 { + invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the returned data in response to a request action. +type ListAliasesOutput struct { + _ struct{} `type:"structure"` + + // Collection of alias records that match the list request. + Aliases []*Alias `type:"list"` + + // Token indicating where to resume retrieving results on the next call to this + // action. If no token is returned, these results represent the end of the list. + // + // If a request has a limit that exactly matches the number of remaining results, + // a token is returned even though there are no more results to retrieve. + NextToken *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListAliasesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListAliasesOutput) GoString() string { + return s.String() +} + +// Represents the input for a request action. +type ListBuildsInput struct { + _ struct{} `type:"structure"` + + // Maximum number of results to return. Use this parameter with NextToken to + // get results as a set of sequential pages. + Limit *int64 `min:"1" type:"integer"` + + // Token indicating the start of the next sequential page of results. Use the + // token that is returned with a previous call to this action. To specify the + // start of the result set, do not specify a value. + NextToken *string `min:"1" type:"string"` + + // Build state to filter results by. To retrieve all builds, leave this parameter + // empty. Possible build states include the following: INITIALIZED – A new build + // has been defined, but no files have been uploaded. You cannot create fleets + // for builds that are in this state. When a build is successfully created, + // the build state is set to this value. READY – The game build has been successfully + // uploaded. You can now create new fleets for this build.FAILED – The game + // build upload failed. You cannot create new fleets for this build. + Status *string `type:"string" enum:"BuildStatus"` +} + +// String returns the string representation +func (s ListBuildsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListBuildsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListBuildsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListBuildsInput"} + if s.Limit != nil && *s.Limit < 1 { + invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) + } + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the returned data in response to a request action. +type ListBuildsOutput struct { + _ struct{} `type:"structure"` + + // Collection of build records that match the request. + Builds []*Build `type:"list"` + + // Token indicating where to resume retrieving results on the next call to this + // action. If no token is returned, these results represent the end of the list. + // + // If a request has a limit that exactly matches the number of remaining results, + // a token is returned even though there are no more results to retrieve. + NextToken *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListBuildsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListBuildsOutput) GoString() string { + return s.String() +} + +// Represents the input for a request action. +type ListFleetsInput struct { + _ struct{} `type:"structure"` + + // Unique identifier of the build to return fleets for. Use this parameter to + // return only fleets using the specified build. To retrieve all fleets, leave + // this parameter empty. + BuildId *string `type:"string"` + + // Maximum number of results to return. Use this parameter with NextToken to + // get results as a set of sequential pages. + Limit *int64 `min:"1" type:"integer"` + + // Token indicating the start of the next sequential page of results. Use the + // token that is returned with a previous call to this action. To specify the + // start of the result set, do not specify a value. + NextToken *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListFleetsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListFleetsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListFleetsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListFleetsInput"} + if s.Limit != nil && *s.Limit < 1 { + invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) + } + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the returned data in response to a request action. +type ListFleetsOutput struct { + _ struct{} `type:"structure"` + + // Set of fleet IDs matching the list request. You can retrieve additional information + // about all returned fleets by passing this result set to a call to DescribeFleetAttributes, + // DescribeFleetCapacity, and DescribeFleetUtilization. + FleetIds []*string `min:"1" type:"list"` + + // Token indicating where to resume retrieving results on the next call to this + // action. If no token is returned, these results represent the end of the list. + // + // If a request has a limit that exactly matches the number of remaining results, + // a token is returned even though there are no more results to retrieve. + NextToken *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListFleetsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListFleetsOutput) GoString() string { + return s.String() +} + +// Properties describing a player session. +type PlayerSession struct { + _ struct{} `type:"structure"` + + // Time stamp indicating when this object was created. Format is an integer + // representing the number of seconds since the Unix epoch (Unix time). + CreationTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // Unique identifier for a fleet. + FleetId *string `type:"string"` + + // Unique identifier for a game session. + GameSessionId *string `type:"string"` + + // Game session IP address. All player sessions reference the game session location. + IpAddress *string `type:"string"` + + // Unique identifier for a player. + PlayerId *string `min:"1" type:"string"` + + // Unique identifier for a player session. + PlayerSessionId *string `type:"string"` + + // Port number for the game session. To connect to a GameLift server process, + // an app needs both the IP address and port number. + Port *int64 `min:"1025" type:"integer"` + + // Current status of the player session. Possible player session states include + // the following: RESERVED – The player session request has been received, but + // the player has not yet connected to the server process and/or been validated. + // ACTIVE – The player has been validated by the server process and is currently + // connected.COMPLETED – The player connection has been dropped.TIMEDOUT – A + // player session request was received, but the player did not connect and/or + // was not validated within the time-out limit (60 seconds). + Status *string `type:"string" enum:"PlayerSessionStatus"` + + // Time stamp indicating when this fleet was terminated. Format is an integer + // representing the number of seconds since the Unix epoch (Unix time). + TerminationTime *time.Time `type:"timestamp" timestampFormat:"unix"` +} + +// String returns the string representation +func (s PlayerSession) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PlayerSession) GoString() string { + return s.String() +} + +// Represents the input for a request action. +type PutScalingPolicyInput struct { + _ struct{} `type:"structure"` + + // Comparison operator to use when measuring the metric against the threshold + // value. + ComparisonOperator *string `type:"string" required:"true" enum:"ComparisonOperatorType"` + + // Length of time (in minutes) the metric must be at or beyond the threshold + // before a scaling event is triggered. + EvaluationPeriods *int64 `min:"1" type:"integer" required:"true"` + + // Unique identity for the fleet to scale with this policy. + FleetId *string `type:"string" required:"true"` + + // Name of the Amazon GameLift-defined metric that is used to trigger an adjustment. + // ActivatingGameSessions – number of game sessions in the process of being + // created (game session status = ACTIVATING). ActiveGameSessions – number + // of game sessions currently running (game session status = ACTIVE). CurrentPlayerSessions + // – number of active or reserved player sessions (player session status = ACTIVE + // or RESERVED). AvailablePlayerSessions – number of player session slots + // currently available in active game sessions across the fleet, calculated + // by subtracting a game session's current player session count from its maximum + // player session count. This number includes game sessions that are not currently + // accepting players (game session PlayerSessionCreationPolicy = DENY_ALL). + // ActiveInstances – number of instances currently running a game session. + // IdleInstances – number of instances not currently running a game session. + MetricName *string `type:"string" required:"true" enum:"MetricName"` + + // Descriptive label associated with a scaling policy. Policy names do not need + // to be unique. A fleet can have only one scaling policy with the same name. + Name *string `min:"1" type:"string" required:"true"` + + // Amount of adjustment to make, based on the scaling adjustment type. + ScalingAdjustment *int64 `type:"integer" required:"true"` + + // Type of adjustment to make to a fleet's instance count (see FleetCapacity): + // ChangeInCapacity – add (or subtract) the scaling adjustment value from + // the current instance count. Positive values scale up while negative values + // scale down. ExactCapacity – set the instance count to the scaling adjustment + // value. PercentChangeInCapacity – increase or reduce the current instance + // count by the scaling adjustment, read as a percentage. Positive values scale + // up while negative values scale down; for example, a value of "-10" scales + // the fleet down by 10%. + ScalingAdjustmentType *string `type:"string" required:"true" enum:"ScalingAdjustmentType"` + + // Metric value used to trigger a scaling event. + Threshold *float64 `type:"double" required:"true"` +} + +// String returns the string representation +func (s PutScalingPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutScalingPolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutScalingPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutScalingPolicyInput"} + if s.ComparisonOperator == nil { + invalidParams.Add(request.NewErrParamRequired("ComparisonOperator")) + } + if s.EvaluationPeriods == nil { + invalidParams.Add(request.NewErrParamRequired("EvaluationPeriods")) + } + if s.EvaluationPeriods != nil && *s.EvaluationPeriods < 1 { + invalidParams.Add(request.NewErrParamMinValue("EvaluationPeriods", 1)) + } + if s.FleetId == nil { + invalidParams.Add(request.NewErrParamRequired("FleetId")) + } + if s.MetricName == nil { + invalidParams.Add(request.NewErrParamRequired("MetricName")) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + if s.ScalingAdjustment == nil { + invalidParams.Add(request.NewErrParamRequired("ScalingAdjustment")) + } + if s.ScalingAdjustmentType == nil { + invalidParams.Add(request.NewErrParamRequired("ScalingAdjustmentType")) + } + if s.Threshold == nil { + invalidParams.Add(request.NewErrParamRequired("Threshold")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the returned data in response to a request action. +type PutScalingPolicyOutput struct { + _ struct{} `type:"structure"` + + // Descriptive label associated with a scaling policy. Policy names do not need + // to be unique. + Name *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s PutScalingPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutScalingPolicyOutput) GoString() string { + return s.String() +} + +// Represents the input for a request action. +type RequestUploadCredentialsInput struct { + _ struct{} `type:"structure"` + + // Unique identifier for the build you want to get credentials for. + BuildId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s RequestUploadCredentialsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RequestUploadCredentialsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RequestUploadCredentialsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RequestUploadCredentialsInput"} + if s.BuildId == nil { + invalidParams.Add(request.NewErrParamRequired("BuildId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the returned data in response to a request action. +type RequestUploadCredentialsOutput struct { + _ struct{} `type:"structure"` + + // Amazon S3 path and key, identifying where the game build files are stored. + StorageLocation *S3Location `type:"structure"` + + // AWS credentials required when uploading a game build to the storage location. + // These credentials have a limited lifespan and are valid only for the build + // they were issued for. + UploadCredentials *AwsCredentials `type:"structure"` +} + +// String returns the string representation +func (s RequestUploadCredentialsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RequestUploadCredentialsOutput) GoString() string { + return s.String() +} + +// Represents the input for a request action. +type ResolveAliasInput struct { + _ struct{} `type:"structure"` + + // Unique identifier for the alias you want to resolve. + AliasId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ResolveAliasInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResolveAliasInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ResolveAliasInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ResolveAliasInput"} + if s.AliasId == nil { + invalidParams.Add(request.NewErrParamRequired("AliasId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the returned data in response to a request action. +type ResolveAliasOutput struct { + _ struct{} `type:"structure"` + + // Fleet ID associated with the requested alias. + FleetId *string `type:"string"` +} + +// String returns the string representation +func (s ResolveAliasOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResolveAliasOutput) GoString() string { + return s.String() +} + +// Routing configuration for a fleet alias. +type RoutingStrategy struct { + _ struct{} `type:"structure"` + + // Unique identifier for a fleet. + FleetId *string `type:"string"` + + // Message text to be used with a terminal routing strategy. + Message *string `type:"string"` + + // Type of routing strategy. Possible routing types include the following: SIMPLE + // – The alias resolves to one specific fleet. Use this type when routing to + // active fleets.TERMINAL – The alias does not resolve to a fleet but instead + // can be used to display a message to the user. A terminal alias throws a TerminalRoutingStrategyException + // with the RoutingStrategy message embedded. + Type *string `type:"string" enum:"RoutingStrategyType"` +} + +// String returns the string representation +func (s RoutingStrategy) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RoutingStrategy) GoString() string { + return s.String() +} + +// Collection of server process configurations that describe what processes +// should be run on each instance in a fleet. An instance can launch and maintain +// multiple server processes based on the runtime configuration; it regularly +// checks for an updated runtime configuration and starts new server processes +// to match the latest version. +// +// The key purpose of a a runtime configuration with multiple server process +// configurations is to be able to run more than one kind of game server in +// a single fleet. You can include configurations for more than one server executable +// in order to run two or more different programs to run on the same instance. +// This option might be useful, for example, to run more than one version of +// your game server on the same fleet. Another option is to specify configurations +// for the same server executable but with different launch parameters. +// +// A GameLift instance is limited to 50 processes running simultaneously. To +// calculate the total number of processes specified in a runtime configuration, +// add the values of the ConcurrentExecutions parameter for each ServerProcess +// object in the runtime configuration. +type RuntimeConfiguration struct { + _ struct{} `type:"structure"` + + // Collection of server process configurations describing what server processes + // to run on each instance in a fleet + ServerProcesses []*ServerProcess `min:"1" type:"list"` +} + +// String returns the string representation +func (s RuntimeConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RuntimeConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RuntimeConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RuntimeConfiguration"} + if s.ServerProcesses != nil && len(s.ServerProcesses) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ServerProcesses", 1)) + } + if s.ServerProcesses != nil { + for i, v := range s.ServerProcesses { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "ServerProcesses", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Location in Amazon Simple Storage Service (Amazon S3) where a build's files +// are stored. This location is assigned in response to a CreateBuild call, +// and is always in the same region as the service used to create the build. +// For more details see the Amazon S3 documentation (http://aws.amazon.com/documentation/s3/). +type S3Location struct { + _ struct{} `type:"structure"` + + // Amazon S3 bucket identifier. + Bucket *string `min:"1" type:"string"` + + // Amazon S3 bucket key. + Key *string `min:"1" type:"string"` + + // Amazon resource number for the cross-account access role that allows GameLift + // access to the S3 bucket. + RoleArn *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s S3Location) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s S3Location) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *S3Location) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "S3Location"} + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.RoleArn != nil && len(*s.RoleArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RoleArn", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Rule that controls how a fleet is scaled. Scaling policies are uniquely identified +// by the combination of name and fleet ID. +type ScalingPolicy struct { + _ struct{} `type:"structure"` + + // Comparison operator to use when measuring a metric against the threshold + // value. + ComparisonOperator *string `type:"string" enum:"ComparisonOperatorType"` + + // Length of time (in minutes) the metric must be at or beyond the threshold + // before a scaling event is triggered. + EvaluationPeriods *int64 `min:"1" type:"integer"` + + // Unique identity for the fleet associated with this scaling policy. + FleetId *string `type:"string"` + + // Name of the GameLift-defined metric that is used to trigger an adjustment. + // ActivatingGameSessions – number of game sessions in the process of being + // created (game session status = ACTIVATING). ActiveGameSessions – number + // of game sessions currently running (game session status = ACTIVE). CurrentPlayerSessions + // – number of active or reserved player sessions (player session status = ACTIVE + // or RESERVED). AvailablePlayerSessions – number of player session slots + // currently available in active game sessions across the fleet, calculated + // by subtracting a game session's current player session count from its maximum + // player session count. This number does include game sessions that are not + // currently accepting players (game session PlayerSessionCreationPolicy = DENY_ALL). + // ActiveInstances – number of instances currently running a game session. + // IdleInstances – number of instances not currently running a game session. + MetricName *string `type:"string" enum:"MetricName"` + + // Descriptive label associated with a scaling policy. Policy names do not need + // to be unique. + Name *string `min:"1" type:"string"` + + // Amount of adjustment to make, based on the scaling adjustment type. + ScalingAdjustment *int64 `type:"integer"` + + // Type of adjustment to make to a fleet's instance count (see FleetCapacity): + // ChangeInCapacity – add (or subtract) the scaling adjustment value from + // the current instance count. Positive values scale up while negative values + // scale down. ExactCapacity – set the instance count to the scaling adjustment + // value. PercentChangeInCapacity – increase or reduce the current instance + // count by the scaling adjustment, read as a percentage. Positive values scale + // up while negative values scale down. + ScalingAdjustmentType *string `type:"string" enum:"ScalingAdjustmentType"` + + // Current status of the scaling policy. The scaling policy is only in force + // when in an Active state. ACTIVE – The scaling policy is currently in force. + // UPDATEREQUESTED – A request to update the scaling policy has been received. + // UPDATING – A change is being made to the scaling policy. DELETEREQUESTED + // – A request to delete the scaling policy has been received. DELETING – The + // scaling policy is being deleted. DELETED – The scaling policy has been deleted. + // ERROR – An error occurred in creating the policy. It should be removed and + // recreated. + Status *string `type:"string" enum:"ScalingStatusType"` + + // Metric value used to trigger a scaling event. + Threshold *float64 `type:"double"` +} + +// String returns the string representation +func (s ScalingPolicy) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ScalingPolicy) GoString() string { + return s.String() +} + +// A set of instructions for launching server processes on each instance in +// a fleet. Each instruction set identifies the location of the server executable, +// optional launch parameters, and the number of server processes with this +// configuration to maintain concurrently on the instance. Server process configurations +// make up a fleet's RuntimeConfiguration. +type ServerProcess struct { + _ struct{} `type:"structure"` + + // Number of server processes using this configuration to run concurrently on + // an instance. + ConcurrentExecutions *int64 `min:"1" type:"integer" required:"true"` + + // Location in the game build of the server executable. All game builds are + // installed on instances at the root C:\game\..., so an executable file located + // at MyGame\latest\server.exe has a launch path of "C:\game\MyGame\latest\server.exe". + LaunchPath *string `min:"1" type:"string" required:"true"` + + // Optional list of parameters to pass to the server executable on launch. + Parameters *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ServerProcess) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ServerProcess) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ServerProcess) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ServerProcess"} + if s.ConcurrentExecutions == nil { + invalidParams.Add(request.NewErrParamRequired("ConcurrentExecutions")) + } + if s.ConcurrentExecutions != nil && *s.ConcurrentExecutions < 1 { + invalidParams.Add(request.NewErrParamMinValue("ConcurrentExecutions", 1)) + } + if s.LaunchPath == nil { + invalidParams.Add(request.NewErrParamRequired("LaunchPath")) + } + if s.LaunchPath != nil && len(*s.LaunchPath) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LaunchPath", 1)) + } + if s.Parameters != nil && len(*s.Parameters) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Parameters", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the input for a request action. +type UpdateAliasInput struct { + _ struct{} `type:"structure"` + + // Unique identifier for a fleet alias. Specify the alias you want to update. + AliasId *string `type:"string" required:"true"` + + // Human-readable description of an alias. + Description *string `min:"1" type:"string"` + + // Descriptive label associated with an alias. Alias names do not need to be + // unique. + Name *string `min:"1" type:"string"` + + // Object specifying the fleet and routing type to use for the alias. + RoutingStrategy *RoutingStrategy `type:"structure"` +} + +// String returns the string representation +func (s UpdateAliasInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateAliasInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateAliasInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateAliasInput"} + if s.AliasId == nil { + invalidParams.Add(request.NewErrParamRequired("AliasId")) + } + if s.Description != nil && len(*s.Description) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Description", 1)) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the returned data in response to a request action. +type UpdateAliasOutput struct { + _ struct{} `type:"structure"` + + // Object containing the updated alias configuration. + Alias *Alias `type:"structure"` +} + +// String returns the string representation +func (s UpdateAliasOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateAliasOutput) GoString() string { + return s.String() +} + +// Represents the input for a request action. +type UpdateBuildInput struct { + _ struct{} `type:"structure"` + + // Unique identifier of the build you want to update. + BuildId *string `type:"string" required:"true"` + + // Descriptive label associated with a build. Build names do not need to be + // unique. + Name *string `min:"1" type:"string"` + + // Version associated with this build. Version strings do not need to be unique + // to a build. + Version *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s UpdateBuildInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateBuildInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateBuildInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateBuildInput"} + if s.BuildId == nil { + invalidParams.Add(request.NewErrParamRequired("BuildId")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + if s.Version != nil && len(*s.Version) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Version", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the returned data in response to a request action. +type UpdateBuildOutput struct { + _ struct{} `type:"structure"` + + // Object containing the updated build record. + Build *Build `type:"structure"` +} + +// String returns the string representation +func (s UpdateBuildOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateBuildOutput) GoString() string { + return s.String() +} + +// Represents the input for a request action. +type UpdateFleetAttributesInput struct { + _ struct{} `type:"structure"` + + // Human-readable description of a fleet. + Description *string `min:"1" type:"string"` + + // Unique identifier for the fleet you want to update attribute metadata for. + FleetId *string `type:"string" required:"true"` + + // Descriptive label associated with a fleet. Fleet names do not need to be + // unique. + Name *string `min:"1" type:"string"` + + // Game session protection policy to apply to all new instances created in this + // fleet. Instances that already exist are not affected. You can set protection + // for individual instances using UpdateGameSession. NoProtection – The game + // session can be terminated during a scale-down event. FullProtection – If + // the game session is in an ACTIVE status, it cannot be terminated during a + // scale-down event. + NewGameSessionProtectionPolicy *string `type:"string" enum:"ProtectionPolicy"` +} + +// String returns the string representation +func (s UpdateFleetAttributesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateFleetAttributesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateFleetAttributesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateFleetAttributesInput"} + if s.Description != nil && len(*s.Description) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Description", 1)) + } + if s.FleetId == nil { + invalidParams.Add(request.NewErrParamRequired("FleetId")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the returned data in response to a request action. +type UpdateFleetAttributesOutput struct { + _ struct{} `type:"structure"` + + // Unique identifier for the updated fleet. + FleetId *string `type:"string"` +} + +// String returns the string representation +func (s UpdateFleetAttributesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateFleetAttributesOutput) GoString() string { + return s.String() +} + +// Represents the input for a request action. +type UpdateFleetCapacityInput struct { + _ struct{} `type:"structure"` + + // Number of EC2 instances you want this fleet to host. + DesiredInstances *int64 `type:"integer"` + + // Unique identifier for the fleet you want to update capacity for. + FleetId *string `type:"string" required:"true"` + + // Maximum value allowed for the fleet's instance count. Default if not set + // is 1. + MaxSize *int64 `type:"integer"` + + // Minimum value allowed for the fleet's instance count. Default if not set + // is 0. + MinSize *int64 `type:"integer"` +} + +// String returns the string representation +func (s UpdateFleetCapacityInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateFleetCapacityInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateFleetCapacityInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateFleetCapacityInput"} + if s.FleetId == nil { + invalidParams.Add(request.NewErrParamRequired("FleetId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the returned data in response to a request action. +type UpdateFleetCapacityOutput struct { + _ struct{} `type:"structure"` + + // Unique identifier for the updated fleet. + FleetId *string `type:"string"` +} + +// String returns the string representation +func (s UpdateFleetCapacityOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateFleetCapacityOutput) GoString() string { + return s.String() +} + +// Represents the input for a request action. +type UpdateFleetPortSettingsInput struct { + _ struct{} `type:"structure"` + + // Unique identifier for the fleet you want to update port settings for. + FleetId *string `type:"string" required:"true"` + + // Collection of port settings to be added to the fleet record. + InboundPermissionAuthorizations []*IpPermission `type:"list"` + + // Collection of port settings to be removed from the fleet record. + InboundPermissionRevocations []*IpPermission `type:"list"` +} + +// String returns the string representation +func (s UpdateFleetPortSettingsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateFleetPortSettingsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateFleetPortSettingsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateFleetPortSettingsInput"} + if s.FleetId == nil { + invalidParams.Add(request.NewErrParamRequired("FleetId")) + } + if s.InboundPermissionAuthorizations != nil { + for i, v := range s.InboundPermissionAuthorizations { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "InboundPermissionAuthorizations", i), err.(request.ErrInvalidParams)) + } + } + } + if s.InboundPermissionRevocations != nil { + for i, v := range s.InboundPermissionRevocations { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "InboundPermissionRevocations", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the returned data in response to a request action. +type UpdateFleetPortSettingsOutput struct { + _ struct{} `type:"structure"` + + // Unique identifier for the updated fleet. + FleetId *string `type:"string"` +} + +// String returns the string representation +func (s UpdateFleetPortSettingsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateFleetPortSettingsOutput) GoString() string { + return s.String() +} + +// Represents the input for a request action. +type UpdateGameSessionInput struct { + _ struct{} `type:"structure"` + + // Unique identifier for a game session. Specify the game session you want to + // update. + GameSessionId *string `type:"string" required:"true"` + + // Maximum number of players that can be simultaneously connected to the game + // session. + MaximumPlayerSessionCount *int64 `type:"integer"` + + // Descriptive label associated with a game session. Session names do not need + // to be unique. + Name *string `min:"1" type:"string"` + + // Policy determining whether or not the game session accepts new players. + PlayerSessionCreationPolicy *string `type:"string" enum:"PlayerSessionCreationPolicy"` + + // Game session protection policy to apply to this game session only. NoProtection + // – The game session can be terminated during a scale-down event. FullProtection + // – If the game session is in an ACTIVE status, it cannot be terminated during + // a scale-down event. + ProtectionPolicy *string `type:"string" enum:"ProtectionPolicy"` +} + +// String returns the string representation +func (s UpdateGameSessionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateGameSessionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateGameSessionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateGameSessionInput"} + if s.GameSessionId == nil { + invalidParams.Add(request.NewErrParamRequired("GameSessionId")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the returned data in response to a request action. +type UpdateGameSessionOutput struct { + _ struct{} `type:"structure"` + + // Object containing the updated game session metadata. + GameSession *GameSession `type:"structure"` +} + +// String returns the string representation +func (s UpdateGameSessionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateGameSessionOutput) GoString() string { + return s.String() +} + +// Represents the input for a request action. +type UpdateRuntimeConfigurationInput struct { + _ struct{} `type:"structure"` + + // Unique identifier of the fleet to update runtime configuration for. + FleetId *string `type:"string" required:"true"` + + // Instructions for launching server processes on each instance in the fleet. + // The runtime configuration for a fleet has a collection of server process + // configurations, one for each type of server process to run on an instance. + // A server process configuration specifies the location of the server executable, + // launch parameters, and the number of concurrent processes with that configuration + // to maintain on each instance. + RuntimeConfiguration *RuntimeConfiguration `type:"structure" required:"true"` +} + +// String returns the string representation +func (s UpdateRuntimeConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateRuntimeConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateRuntimeConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateRuntimeConfigurationInput"} + if s.FleetId == nil { + invalidParams.Add(request.NewErrParamRequired("FleetId")) + } + if s.RuntimeConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("RuntimeConfiguration")) + } + if s.RuntimeConfiguration != nil { + if err := s.RuntimeConfiguration.Validate(); err != nil { + invalidParams.AddNested("RuntimeConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the returned data in response to a request action. +type UpdateRuntimeConfigurationOutput struct { + _ struct{} `type:"structure"` + + // The runtime configuration currently in force. If the update was successful, + // this object matches the one in the request. + RuntimeConfiguration *RuntimeConfiguration `type:"structure"` +} + +// String returns the string representation +func (s UpdateRuntimeConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateRuntimeConfigurationOutput) GoString() string { + return s.String() +} + +const ( + // @enum BuildStatus + BuildStatusInitialized = "INITIALIZED" + // @enum BuildStatus + BuildStatusReady = "READY" + // @enum BuildStatus + BuildStatusFailed = "FAILED" +) + +const ( + // @enum ComparisonOperatorType + ComparisonOperatorTypeGreaterThanOrEqualToThreshold = "GreaterThanOrEqualToThreshold" + // @enum ComparisonOperatorType + ComparisonOperatorTypeGreaterThanThreshold = "GreaterThanThreshold" + // @enum ComparisonOperatorType + ComparisonOperatorTypeLessThanThreshold = "LessThanThreshold" + // @enum ComparisonOperatorType + ComparisonOperatorTypeLessThanOrEqualToThreshold = "LessThanOrEqualToThreshold" +) + +const ( + // @enum EC2InstanceType + EC2InstanceTypeT2Micro = "t2.micro" + // @enum EC2InstanceType + EC2InstanceTypeT2Small = "t2.small" + // @enum EC2InstanceType + EC2InstanceTypeT2Medium = "t2.medium" + // @enum EC2InstanceType + EC2InstanceTypeT2Large = "t2.large" + // @enum EC2InstanceType + EC2InstanceTypeC3Large = "c3.large" + // @enum EC2InstanceType + EC2InstanceTypeC3Xlarge = "c3.xlarge" + // @enum EC2InstanceType + EC2InstanceTypeC32xlarge = "c3.2xlarge" + // @enum EC2InstanceType + EC2InstanceTypeC34xlarge = "c3.4xlarge" + // @enum EC2InstanceType + EC2InstanceTypeC38xlarge = "c3.8xlarge" + // @enum EC2InstanceType + EC2InstanceTypeC4Large = "c4.large" + // @enum EC2InstanceType + EC2InstanceTypeC4Xlarge = "c4.xlarge" + // @enum EC2InstanceType + EC2InstanceTypeC42xlarge = "c4.2xlarge" + // @enum EC2InstanceType + EC2InstanceTypeC44xlarge = "c4.4xlarge" + // @enum EC2InstanceType + EC2InstanceTypeC48xlarge = "c4.8xlarge" + // @enum EC2InstanceType + EC2InstanceTypeR3Large = "r3.large" + // @enum EC2InstanceType + EC2InstanceTypeR3Xlarge = "r3.xlarge" + // @enum EC2InstanceType + EC2InstanceTypeR32xlarge = "r3.2xlarge" + // @enum EC2InstanceType + EC2InstanceTypeR34xlarge = "r3.4xlarge" + // @enum EC2InstanceType + EC2InstanceTypeR38xlarge = "r3.8xlarge" + // @enum EC2InstanceType + EC2InstanceTypeM3Medium = "m3.medium" + // @enum EC2InstanceType + EC2InstanceTypeM3Large = "m3.large" + // @enum EC2InstanceType + EC2InstanceTypeM3Xlarge = "m3.xlarge" + // @enum EC2InstanceType + EC2InstanceTypeM32xlarge = "m3.2xlarge" + // @enum EC2InstanceType + EC2InstanceTypeM4Large = "m4.large" + // @enum EC2InstanceType + EC2InstanceTypeM4Xlarge = "m4.xlarge" + // @enum EC2InstanceType + EC2InstanceTypeM42xlarge = "m4.2xlarge" + // @enum EC2InstanceType + EC2InstanceTypeM44xlarge = "m4.4xlarge" + // @enum EC2InstanceType + EC2InstanceTypeM410xlarge = "m4.10xlarge" +) + +const ( + // @enum EventCode + EventCodeGenericEvent = "GENERIC_EVENT" + // @enum EventCode + EventCodeFleetCreated = "FLEET_CREATED" + // @enum EventCode + EventCodeFleetDeleted = "FLEET_DELETED" + // @enum EventCode + EventCodeFleetScalingEvent = "FLEET_SCALING_EVENT" + // @enum EventCode + EventCodeFleetStateDownloading = "FLEET_STATE_DOWNLOADING" + // @enum EventCode + EventCodeFleetStateValidating = "FLEET_STATE_VALIDATING" + // @enum EventCode + EventCodeFleetStateBuilding = "FLEET_STATE_BUILDING" + // @enum EventCode + EventCodeFleetStateActivating = "FLEET_STATE_ACTIVATING" + // @enum EventCode + EventCodeFleetStateActive = "FLEET_STATE_ACTIVE" + // @enum EventCode + EventCodeFleetStateError = "FLEET_STATE_ERROR" + // @enum EventCode + EventCodeFleetInitializationFailed = "FLEET_INITIALIZATION_FAILED" + // @enum EventCode + EventCodeFleetBinaryDownloadFailed = "FLEET_BINARY_DOWNLOAD_FAILED" + // @enum EventCode + EventCodeFleetValidationLaunchPathNotFound = "FLEET_VALIDATION_LAUNCH_PATH_NOT_FOUND" + // @enum EventCode + EventCodeFleetValidationExecutableRuntimeFailure = "FLEET_VALIDATION_EXECUTABLE_RUNTIME_FAILURE" + // @enum EventCode + EventCodeFleetValidationTimedOut = "FLEET_VALIDATION_TIMED_OUT" + // @enum EventCode + EventCodeFleetActivationFailed = "FLEET_ACTIVATION_FAILED" + // @enum EventCode + EventCodeFleetActivationFailedNoInstances = "FLEET_ACTIVATION_FAILED_NO_INSTANCES" + // @enum EventCode + EventCodeFleetNewGameSessionProtectionPolicyUpdated = "FLEET_NEW_GAME_SESSION_PROTECTION_POLICY_UPDATED" +) + +const ( + // @enum FleetStatus + FleetStatusNew = "NEW" + // @enum FleetStatus + FleetStatusDownloading = "DOWNLOADING" + // @enum FleetStatus + FleetStatusValidating = "VALIDATING" + // @enum FleetStatus + FleetStatusBuilding = "BUILDING" + // @enum FleetStatus + FleetStatusActivating = "ACTIVATING" + // @enum FleetStatus + FleetStatusActive = "ACTIVE" + // @enum FleetStatus + FleetStatusDeleting = "DELETING" + // @enum FleetStatus + FleetStatusError = "ERROR" + // @enum FleetStatus + FleetStatusTerminated = "TERMINATED" +) + +const ( + // @enum GameSessionStatus + GameSessionStatusActive = "ACTIVE" + // @enum GameSessionStatus + GameSessionStatusActivating = "ACTIVATING" + // @enum GameSessionStatus + GameSessionStatusTerminated = "TERMINATED" + // @enum GameSessionStatus + GameSessionStatusTerminating = "TERMINATING" +) + +const ( + // @enum IpProtocol + IpProtocolTcp = "TCP" + // @enum IpProtocol + IpProtocolUdp = "UDP" +) + +const ( + // @enum MetricName + MetricNameActivatingGameSessions = "ActivatingGameSessions" + // @enum MetricName + MetricNameActiveGameSessions = "ActiveGameSessions" + // @enum MetricName + MetricNameActiveInstances = "ActiveInstances" + // @enum MetricName + MetricNameAvailablePlayerSessions = "AvailablePlayerSessions" + // @enum MetricName + MetricNameCurrentPlayerSessions = "CurrentPlayerSessions" + // @enum MetricName + MetricNameIdleInstances = "IdleInstances" +) + +const ( + // @enum PlayerSessionCreationPolicy + PlayerSessionCreationPolicyAcceptAll = "ACCEPT_ALL" + // @enum PlayerSessionCreationPolicy + PlayerSessionCreationPolicyDenyAll = "DENY_ALL" +) + +const ( + // @enum PlayerSessionStatus + PlayerSessionStatusReserved = "RESERVED" + // @enum PlayerSessionStatus + PlayerSessionStatusActive = "ACTIVE" + // @enum PlayerSessionStatus + PlayerSessionStatusCompleted = "COMPLETED" + // @enum PlayerSessionStatus + PlayerSessionStatusTimedout = "TIMEDOUT" +) + +const ( + // @enum ProtectionPolicy + ProtectionPolicyNoProtection = "NoProtection" + // @enum ProtectionPolicy + ProtectionPolicyFullProtection = "FullProtection" +) + +const ( + // @enum RoutingStrategyType + RoutingStrategyTypeSimple = "SIMPLE" + // @enum RoutingStrategyType + RoutingStrategyTypeTerminal = "TERMINAL" +) + +const ( + // @enum ScalingAdjustmentType + ScalingAdjustmentTypeChangeInCapacity = "ChangeInCapacity" + // @enum ScalingAdjustmentType + ScalingAdjustmentTypeExactCapacity = "ExactCapacity" + // @enum ScalingAdjustmentType + ScalingAdjustmentTypePercentChangeInCapacity = "PercentChangeInCapacity" +) + +const ( + // @enum ScalingStatusType + ScalingStatusTypeActive = "ACTIVE" + // @enum ScalingStatusType + ScalingStatusTypeUpdateRequested = "UPDATE_REQUESTED" + // @enum ScalingStatusType + ScalingStatusTypeUpdating = "UPDATING" + // @enum ScalingStatusType + ScalingStatusTypeDeleteRequested = "DELETE_REQUESTED" + // @enum ScalingStatusType + ScalingStatusTypeDeleting = "DELETING" + // @enum ScalingStatusType + ScalingStatusTypeDeleted = "DELETED" + // @enum ScalingStatusType + ScalingStatusTypeError = "ERROR" +) diff --git a/vendor/github.com/aws/aws-sdk-go/service/gamelift/examples_test.go b/vendor/github.com/aws/aws-sdk-go/service/gamelift/examples_test.go new file mode 100644 index 000000000..af2b15bd1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/gamelift/examples_test.go @@ -0,0 +1,874 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package gamelift_test + +import ( + "bytes" + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/gamelift" +) + +var _ time.Duration +var _ bytes.Buffer + +func ExampleGameLift_CreateAlias() { + svc := gamelift.New(session.New()) + + params := &gamelift.CreateAliasInput{ + Name: aws.String("NonZeroAndMaxString"), // Required + RoutingStrategy: &gamelift.RoutingStrategy{ // Required + FleetId: aws.String("FleetId"), + Message: aws.String("FreeText"), + Type: aws.String("RoutingStrategyType"), + }, + Description: aws.String("NonZeroAndMaxString"), + } + resp, err := svc.CreateAlias(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleGameLift_CreateBuild() { + svc := gamelift.New(session.New()) + + params := &gamelift.CreateBuildInput{ + Name: aws.String("NonZeroAndMaxString"), + StorageLocation: &gamelift.S3Location{ + Bucket: aws.String("NonEmptyString"), + Key: aws.String("NonEmptyString"), + RoleArn: aws.String("NonEmptyString"), + }, + Version: aws.String("NonZeroAndMaxString"), + } + resp, err := svc.CreateBuild(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleGameLift_CreateFleet() { + svc := gamelift.New(session.New()) + + params := &gamelift.CreateFleetInput{ + BuildId: aws.String("BuildId"), // Required + EC2InstanceType: aws.String("EC2InstanceType"), // Required + Name: aws.String("NonZeroAndMaxString"), // Required + Description: aws.String("NonZeroAndMaxString"), + EC2InboundPermissions: []*gamelift.IpPermission{ + { // Required + FromPort: aws.Int64(1), // Required + IpRange: aws.String("NonBlankString"), // Required + Protocol: aws.String("IpProtocol"), // Required + ToPort: aws.Int64(1), // Required + }, + // More values... + }, + LogPaths: []*string{ + aws.String("NonZeroAndMaxString"), // Required + // More values... + }, + NewGameSessionProtectionPolicy: aws.String("ProtectionPolicy"), + RuntimeConfiguration: &gamelift.RuntimeConfiguration{ + ServerProcesses: []*gamelift.ServerProcess{ + { // Required + ConcurrentExecutions: aws.Int64(1), // Required + LaunchPath: aws.String("NonZeroAndMaxString"), // Required + Parameters: aws.String("NonZeroAndMaxString"), + }, + // More values... + }, + }, + ServerLaunchParameters: aws.String("NonZeroAndMaxString"), + ServerLaunchPath: aws.String("NonZeroAndMaxString"), + } + resp, err := svc.CreateFleet(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleGameLift_CreateGameSession() { + svc := gamelift.New(session.New()) + + params := &gamelift.CreateGameSessionInput{ + MaximumPlayerSessionCount: aws.Int64(1), // Required + AliasId: aws.String("AliasId"), + FleetId: aws.String("FleetId"), + GameProperties: []*gamelift.GameProperty{ + { // Required + Key: aws.String("GamePropertyKey"), // Required + Value: aws.String("GamePropertyValue"), // Required + }, + // More values... + }, + Name: aws.String("NonZeroAndMaxString"), + } + resp, err := svc.CreateGameSession(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleGameLift_CreatePlayerSession() { + svc := gamelift.New(session.New()) + + params := &gamelift.CreatePlayerSessionInput{ + GameSessionId: aws.String("GameSessionId"), // Required + PlayerId: aws.String("NonZeroAndMaxString"), // Required + } + resp, err := svc.CreatePlayerSession(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleGameLift_CreatePlayerSessions() { + svc := gamelift.New(session.New()) + + params := &gamelift.CreatePlayerSessionsInput{ + GameSessionId: aws.String("GameSessionId"), // Required + PlayerIds: []*string{ // Required + aws.String("NonZeroAndMaxString"), // Required + // More values... + }, + } + resp, err := svc.CreatePlayerSessions(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleGameLift_DeleteAlias() { + svc := gamelift.New(session.New()) + + params := &gamelift.DeleteAliasInput{ + AliasId: aws.String("AliasId"), // Required + } + resp, err := svc.DeleteAlias(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleGameLift_DeleteBuild() { + svc := gamelift.New(session.New()) + + params := &gamelift.DeleteBuildInput{ + BuildId: aws.String("BuildId"), // Required + } + resp, err := svc.DeleteBuild(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleGameLift_DeleteFleet() { + svc := gamelift.New(session.New()) + + params := &gamelift.DeleteFleetInput{ + FleetId: aws.String("FleetId"), // Required + } + resp, err := svc.DeleteFleet(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleGameLift_DeleteScalingPolicy() { + svc := gamelift.New(session.New()) + + params := &gamelift.DeleteScalingPolicyInput{ + FleetId: aws.String("FleetId"), // Required + Name: aws.String("NonZeroAndMaxString"), // Required + } + resp, err := svc.DeleteScalingPolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleGameLift_DescribeAlias() { + svc := gamelift.New(session.New()) + + params := &gamelift.DescribeAliasInput{ + AliasId: aws.String("AliasId"), // Required + } + resp, err := svc.DescribeAlias(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleGameLift_DescribeBuild() { + svc := gamelift.New(session.New()) + + params := &gamelift.DescribeBuildInput{ + BuildId: aws.String("BuildId"), // Required + } + resp, err := svc.DescribeBuild(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleGameLift_DescribeEC2InstanceLimits() { + svc := gamelift.New(session.New()) + + params := &gamelift.DescribeEC2InstanceLimitsInput{ + EC2InstanceType: aws.String("EC2InstanceType"), + } + resp, err := svc.DescribeEC2InstanceLimits(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleGameLift_DescribeFleetAttributes() { + svc := gamelift.New(session.New()) + + params := &gamelift.DescribeFleetAttributesInput{ + FleetIds: []*string{ + aws.String("FleetId"), // Required + // More values... + }, + Limit: aws.Int64(1), + NextToken: aws.String("NonZeroAndMaxString"), + } + resp, err := svc.DescribeFleetAttributes(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleGameLift_DescribeFleetCapacity() { + svc := gamelift.New(session.New()) + + params := &gamelift.DescribeFleetCapacityInput{ + FleetIds: []*string{ + aws.String("FleetId"), // Required + // More values... + }, + Limit: aws.Int64(1), + NextToken: aws.String("NonZeroAndMaxString"), + } + resp, err := svc.DescribeFleetCapacity(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleGameLift_DescribeFleetEvents() { + svc := gamelift.New(session.New()) + + params := &gamelift.DescribeFleetEventsInput{ + FleetId: aws.String("FleetId"), // Required + EndTime: aws.Time(time.Now()), + Limit: aws.Int64(1), + NextToken: aws.String("NonZeroAndMaxString"), + StartTime: aws.Time(time.Now()), + } + resp, err := svc.DescribeFleetEvents(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleGameLift_DescribeFleetPortSettings() { + svc := gamelift.New(session.New()) + + params := &gamelift.DescribeFleetPortSettingsInput{ + FleetId: aws.String("FleetId"), // Required + } + resp, err := svc.DescribeFleetPortSettings(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleGameLift_DescribeFleetUtilization() { + svc := gamelift.New(session.New()) + + params := &gamelift.DescribeFleetUtilizationInput{ + FleetIds: []*string{ + aws.String("FleetId"), // Required + // More values... + }, + Limit: aws.Int64(1), + NextToken: aws.String("NonZeroAndMaxString"), + } + resp, err := svc.DescribeFleetUtilization(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleGameLift_DescribeGameSessionDetails() { + svc := gamelift.New(session.New()) + + params := &gamelift.DescribeGameSessionDetailsInput{ + AliasId: aws.String("AliasId"), + FleetId: aws.String("FleetId"), + GameSessionId: aws.String("GameSessionId"), + Limit: aws.Int64(1), + NextToken: aws.String("NonZeroAndMaxString"), + StatusFilter: aws.String("NonZeroAndMaxString"), + } + resp, err := svc.DescribeGameSessionDetails(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleGameLift_DescribeGameSessions() { + svc := gamelift.New(session.New()) + + params := &gamelift.DescribeGameSessionsInput{ + AliasId: aws.String("AliasId"), + FleetId: aws.String("FleetId"), + GameSessionId: aws.String("GameSessionId"), + Limit: aws.Int64(1), + NextToken: aws.String("NonZeroAndMaxString"), + StatusFilter: aws.String("NonZeroAndMaxString"), + } + resp, err := svc.DescribeGameSessions(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleGameLift_DescribePlayerSessions() { + svc := gamelift.New(session.New()) + + params := &gamelift.DescribePlayerSessionsInput{ + GameSessionId: aws.String("GameSessionId"), + Limit: aws.Int64(1), + NextToken: aws.String("NonZeroAndMaxString"), + PlayerId: aws.String("NonZeroAndMaxString"), + PlayerSessionId: aws.String("PlayerSessionId"), + PlayerSessionStatusFilter: aws.String("NonZeroAndMaxString"), + } + resp, err := svc.DescribePlayerSessions(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleGameLift_DescribeRuntimeConfiguration() { + svc := gamelift.New(session.New()) + + params := &gamelift.DescribeRuntimeConfigurationInput{ + FleetId: aws.String("FleetId"), // Required + } + resp, err := svc.DescribeRuntimeConfiguration(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleGameLift_DescribeScalingPolicies() { + svc := gamelift.New(session.New()) + + params := &gamelift.DescribeScalingPoliciesInput{ + FleetId: aws.String("FleetId"), // Required + Limit: aws.Int64(1), + NextToken: aws.String("NonZeroAndMaxString"), + StatusFilter: aws.String("ScalingStatusType"), + } + resp, err := svc.DescribeScalingPolicies(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleGameLift_GetGameSessionLogUrl() { + svc := gamelift.New(session.New()) + + params := &gamelift.GetGameSessionLogUrlInput{ + GameSessionId: aws.String("GameSessionId"), // Required + } + resp, err := svc.GetGameSessionLogUrl(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleGameLift_ListAliases() { + svc := gamelift.New(session.New()) + + params := &gamelift.ListAliasesInput{ + Limit: aws.Int64(1), + Name: aws.String("NonEmptyString"), + NextToken: aws.String("NonEmptyString"), + RoutingStrategyType: aws.String("RoutingStrategyType"), + } + resp, err := svc.ListAliases(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleGameLift_ListBuilds() { + svc := gamelift.New(session.New()) + + params := &gamelift.ListBuildsInput{ + Limit: aws.Int64(1), + NextToken: aws.String("NonEmptyString"), + Status: aws.String("BuildStatus"), + } + resp, err := svc.ListBuilds(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleGameLift_ListFleets() { + svc := gamelift.New(session.New()) + + params := &gamelift.ListFleetsInput{ + BuildId: aws.String("BuildId"), + Limit: aws.Int64(1), + NextToken: aws.String("NonZeroAndMaxString"), + } + resp, err := svc.ListFleets(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleGameLift_PutScalingPolicy() { + svc := gamelift.New(session.New()) + + params := &gamelift.PutScalingPolicyInput{ + ComparisonOperator: aws.String("ComparisonOperatorType"), // Required + EvaluationPeriods: aws.Int64(1), // Required + FleetId: aws.String("FleetId"), // Required + MetricName: aws.String("MetricName"), // Required + Name: aws.String("NonZeroAndMaxString"), // Required + ScalingAdjustment: aws.Int64(1), // Required + ScalingAdjustmentType: aws.String("ScalingAdjustmentType"), // Required + Threshold: aws.Float64(1.0), // Required + } + resp, err := svc.PutScalingPolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleGameLift_RequestUploadCredentials() { + svc := gamelift.New(session.New()) + + params := &gamelift.RequestUploadCredentialsInput{ + BuildId: aws.String("BuildId"), // Required + } + resp, err := svc.RequestUploadCredentials(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleGameLift_ResolveAlias() { + svc := gamelift.New(session.New()) + + params := &gamelift.ResolveAliasInput{ + AliasId: aws.String("AliasId"), // Required + } + resp, err := svc.ResolveAlias(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleGameLift_UpdateAlias() { + svc := gamelift.New(session.New()) + + params := &gamelift.UpdateAliasInput{ + AliasId: aws.String("AliasId"), // Required + Description: aws.String("NonZeroAndMaxString"), + Name: aws.String("NonZeroAndMaxString"), + RoutingStrategy: &gamelift.RoutingStrategy{ + FleetId: aws.String("FleetId"), + Message: aws.String("FreeText"), + Type: aws.String("RoutingStrategyType"), + }, + } + resp, err := svc.UpdateAlias(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleGameLift_UpdateBuild() { + svc := gamelift.New(session.New()) + + params := &gamelift.UpdateBuildInput{ + BuildId: aws.String("BuildId"), // Required + Name: aws.String("NonZeroAndMaxString"), + Version: aws.String("NonZeroAndMaxString"), + } + resp, err := svc.UpdateBuild(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleGameLift_UpdateFleetAttributes() { + svc := gamelift.New(session.New()) + + params := &gamelift.UpdateFleetAttributesInput{ + FleetId: aws.String("FleetId"), // Required + Description: aws.String("NonZeroAndMaxString"), + Name: aws.String("NonZeroAndMaxString"), + NewGameSessionProtectionPolicy: aws.String("ProtectionPolicy"), + } + resp, err := svc.UpdateFleetAttributes(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleGameLift_UpdateFleetCapacity() { + svc := gamelift.New(session.New()) + + params := &gamelift.UpdateFleetCapacityInput{ + FleetId: aws.String("FleetId"), // Required + DesiredInstances: aws.Int64(1), + MaxSize: aws.Int64(1), + MinSize: aws.Int64(1), + } + resp, err := svc.UpdateFleetCapacity(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleGameLift_UpdateFleetPortSettings() { + svc := gamelift.New(session.New()) + + params := &gamelift.UpdateFleetPortSettingsInput{ + FleetId: aws.String("FleetId"), // Required + InboundPermissionAuthorizations: []*gamelift.IpPermission{ + { // Required + FromPort: aws.Int64(1), // Required + IpRange: aws.String("NonBlankString"), // Required + Protocol: aws.String("IpProtocol"), // Required + ToPort: aws.Int64(1), // Required + }, + // More values... + }, + InboundPermissionRevocations: []*gamelift.IpPermission{ + { // Required + FromPort: aws.Int64(1), // Required + IpRange: aws.String("NonBlankString"), // Required + Protocol: aws.String("IpProtocol"), // Required + ToPort: aws.Int64(1), // Required + }, + // More values... + }, + } + resp, err := svc.UpdateFleetPortSettings(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleGameLift_UpdateGameSession() { + svc := gamelift.New(session.New()) + + params := &gamelift.UpdateGameSessionInput{ + GameSessionId: aws.String("GameSessionId"), // Required + MaximumPlayerSessionCount: aws.Int64(1), + Name: aws.String("NonZeroAndMaxString"), + PlayerSessionCreationPolicy: aws.String("PlayerSessionCreationPolicy"), + ProtectionPolicy: aws.String("ProtectionPolicy"), + } + resp, err := svc.UpdateGameSession(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleGameLift_UpdateRuntimeConfiguration() { + svc := gamelift.New(session.New()) + + params := &gamelift.UpdateRuntimeConfigurationInput{ + FleetId: aws.String("FleetId"), // Required + RuntimeConfiguration: &gamelift.RuntimeConfiguration{ // Required + ServerProcesses: []*gamelift.ServerProcess{ + { // Required + ConcurrentExecutions: aws.Int64(1), // Required + LaunchPath: aws.String("NonZeroAndMaxString"), // Required + Parameters: aws.String("NonZeroAndMaxString"), + }, + // More values... + }, + }, + } + resp, err := svc.UpdateRuntimeConfiguration(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/gamelift/gameliftiface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/gamelift/gameliftiface/interface.go new file mode 100644 index 000000000..b3fb52fb3 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/gamelift/gameliftiface/interface.go @@ -0,0 +1,162 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package gameliftiface provides an interface for the Amazon GameLift. +package gameliftiface + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/gamelift" +) + +// GameLiftAPI is the interface type for gamelift.GameLift. +type GameLiftAPI interface { + CreateAliasRequest(*gamelift.CreateAliasInput) (*request.Request, *gamelift.CreateAliasOutput) + + CreateAlias(*gamelift.CreateAliasInput) (*gamelift.CreateAliasOutput, error) + + CreateBuildRequest(*gamelift.CreateBuildInput) (*request.Request, *gamelift.CreateBuildOutput) + + CreateBuild(*gamelift.CreateBuildInput) (*gamelift.CreateBuildOutput, error) + + CreateFleetRequest(*gamelift.CreateFleetInput) (*request.Request, *gamelift.CreateFleetOutput) + + CreateFleet(*gamelift.CreateFleetInput) (*gamelift.CreateFleetOutput, error) + + CreateGameSessionRequest(*gamelift.CreateGameSessionInput) (*request.Request, *gamelift.CreateGameSessionOutput) + + CreateGameSession(*gamelift.CreateGameSessionInput) (*gamelift.CreateGameSessionOutput, error) + + CreatePlayerSessionRequest(*gamelift.CreatePlayerSessionInput) (*request.Request, *gamelift.CreatePlayerSessionOutput) + + CreatePlayerSession(*gamelift.CreatePlayerSessionInput) (*gamelift.CreatePlayerSessionOutput, error) + + CreatePlayerSessionsRequest(*gamelift.CreatePlayerSessionsInput) (*request.Request, *gamelift.CreatePlayerSessionsOutput) + + CreatePlayerSessions(*gamelift.CreatePlayerSessionsInput) (*gamelift.CreatePlayerSessionsOutput, error) + + DeleteAliasRequest(*gamelift.DeleteAliasInput) (*request.Request, *gamelift.DeleteAliasOutput) + + DeleteAlias(*gamelift.DeleteAliasInput) (*gamelift.DeleteAliasOutput, error) + + DeleteBuildRequest(*gamelift.DeleteBuildInput) (*request.Request, *gamelift.DeleteBuildOutput) + + DeleteBuild(*gamelift.DeleteBuildInput) (*gamelift.DeleteBuildOutput, error) + + DeleteFleetRequest(*gamelift.DeleteFleetInput) (*request.Request, *gamelift.DeleteFleetOutput) + + DeleteFleet(*gamelift.DeleteFleetInput) (*gamelift.DeleteFleetOutput, error) + + DeleteScalingPolicyRequest(*gamelift.DeleteScalingPolicyInput) (*request.Request, *gamelift.DeleteScalingPolicyOutput) + + DeleteScalingPolicy(*gamelift.DeleteScalingPolicyInput) (*gamelift.DeleteScalingPolicyOutput, error) + + DescribeAliasRequest(*gamelift.DescribeAliasInput) (*request.Request, *gamelift.DescribeAliasOutput) + + DescribeAlias(*gamelift.DescribeAliasInput) (*gamelift.DescribeAliasOutput, error) + + DescribeBuildRequest(*gamelift.DescribeBuildInput) (*request.Request, *gamelift.DescribeBuildOutput) + + DescribeBuild(*gamelift.DescribeBuildInput) (*gamelift.DescribeBuildOutput, error) + + DescribeEC2InstanceLimitsRequest(*gamelift.DescribeEC2InstanceLimitsInput) (*request.Request, *gamelift.DescribeEC2InstanceLimitsOutput) + + DescribeEC2InstanceLimits(*gamelift.DescribeEC2InstanceLimitsInput) (*gamelift.DescribeEC2InstanceLimitsOutput, error) + + DescribeFleetAttributesRequest(*gamelift.DescribeFleetAttributesInput) (*request.Request, *gamelift.DescribeFleetAttributesOutput) + + DescribeFleetAttributes(*gamelift.DescribeFleetAttributesInput) (*gamelift.DescribeFleetAttributesOutput, error) + + DescribeFleetCapacityRequest(*gamelift.DescribeFleetCapacityInput) (*request.Request, *gamelift.DescribeFleetCapacityOutput) + + DescribeFleetCapacity(*gamelift.DescribeFleetCapacityInput) (*gamelift.DescribeFleetCapacityOutput, error) + + DescribeFleetEventsRequest(*gamelift.DescribeFleetEventsInput) (*request.Request, *gamelift.DescribeFleetEventsOutput) + + DescribeFleetEvents(*gamelift.DescribeFleetEventsInput) (*gamelift.DescribeFleetEventsOutput, error) + + DescribeFleetPortSettingsRequest(*gamelift.DescribeFleetPortSettingsInput) (*request.Request, *gamelift.DescribeFleetPortSettingsOutput) + + DescribeFleetPortSettings(*gamelift.DescribeFleetPortSettingsInput) (*gamelift.DescribeFleetPortSettingsOutput, error) + + DescribeFleetUtilizationRequest(*gamelift.DescribeFleetUtilizationInput) (*request.Request, *gamelift.DescribeFleetUtilizationOutput) + + DescribeFleetUtilization(*gamelift.DescribeFleetUtilizationInput) (*gamelift.DescribeFleetUtilizationOutput, error) + + DescribeGameSessionDetailsRequest(*gamelift.DescribeGameSessionDetailsInput) (*request.Request, *gamelift.DescribeGameSessionDetailsOutput) + + DescribeGameSessionDetails(*gamelift.DescribeGameSessionDetailsInput) (*gamelift.DescribeGameSessionDetailsOutput, error) + + DescribeGameSessionsRequest(*gamelift.DescribeGameSessionsInput) (*request.Request, *gamelift.DescribeGameSessionsOutput) + + DescribeGameSessions(*gamelift.DescribeGameSessionsInput) (*gamelift.DescribeGameSessionsOutput, error) + + DescribePlayerSessionsRequest(*gamelift.DescribePlayerSessionsInput) (*request.Request, *gamelift.DescribePlayerSessionsOutput) + + DescribePlayerSessions(*gamelift.DescribePlayerSessionsInput) (*gamelift.DescribePlayerSessionsOutput, error) + + DescribeRuntimeConfigurationRequest(*gamelift.DescribeRuntimeConfigurationInput) (*request.Request, *gamelift.DescribeRuntimeConfigurationOutput) + + DescribeRuntimeConfiguration(*gamelift.DescribeRuntimeConfigurationInput) (*gamelift.DescribeRuntimeConfigurationOutput, error) + + DescribeScalingPoliciesRequest(*gamelift.DescribeScalingPoliciesInput) (*request.Request, *gamelift.DescribeScalingPoliciesOutput) + + DescribeScalingPolicies(*gamelift.DescribeScalingPoliciesInput) (*gamelift.DescribeScalingPoliciesOutput, error) + + GetGameSessionLogUrlRequest(*gamelift.GetGameSessionLogUrlInput) (*request.Request, *gamelift.GetGameSessionLogUrlOutput) + + GetGameSessionLogUrl(*gamelift.GetGameSessionLogUrlInput) (*gamelift.GetGameSessionLogUrlOutput, error) + + ListAliasesRequest(*gamelift.ListAliasesInput) (*request.Request, *gamelift.ListAliasesOutput) + + ListAliases(*gamelift.ListAliasesInput) (*gamelift.ListAliasesOutput, error) + + ListBuildsRequest(*gamelift.ListBuildsInput) (*request.Request, *gamelift.ListBuildsOutput) + + ListBuilds(*gamelift.ListBuildsInput) (*gamelift.ListBuildsOutput, error) + + ListFleetsRequest(*gamelift.ListFleetsInput) (*request.Request, *gamelift.ListFleetsOutput) + + ListFleets(*gamelift.ListFleetsInput) (*gamelift.ListFleetsOutput, error) + + PutScalingPolicyRequest(*gamelift.PutScalingPolicyInput) (*request.Request, *gamelift.PutScalingPolicyOutput) + + PutScalingPolicy(*gamelift.PutScalingPolicyInput) (*gamelift.PutScalingPolicyOutput, error) + + RequestUploadCredentialsRequest(*gamelift.RequestUploadCredentialsInput) (*request.Request, *gamelift.RequestUploadCredentialsOutput) + + RequestUploadCredentials(*gamelift.RequestUploadCredentialsInput) (*gamelift.RequestUploadCredentialsOutput, error) + + ResolveAliasRequest(*gamelift.ResolveAliasInput) (*request.Request, *gamelift.ResolveAliasOutput) + + ResolveAlias(*gamelift.ResolveAliasInput) (*gamelift.ResolveAliasOutput, error) + + UpdateAliasRequest(*gamelift.UpdateAliasInput) (*request.Request, *gamelift.UpdateAliasOutput) + + UpdateAlias(*gamelift.UpdateAliasInput) (*gamelift.UpdateAliasOutput, error) + + UpdateBuildRequest(*gamelift.UpdateBuildInput) (*request.Request, *gamelift.UpdateBuildOutput) + + UpdateBuild(*gamelift.UpdateBuildInput) (*gamelift.UpdateBuildOutput, error) + + UpdateFleetAttributesRequest(*gamelift.UpdateFleetAttributesInput) (*request.Request, *gamelift.UpdateFleetAttributesOutput) + + UpdateFleetAttributes(*gamelift.UpdateFleetAttributesInput) (*gamelift.UpdateFleetAttributesOutput, error) + + UpdateFleetCapacityRequest(*gamelift.UpdateFleetCapacityInput) (*request.Request, *gamelift.UpdateFleetCapacityOutput) + + UpdateFleetCapacity(*gamelift.UpdateFleetCapacityInput) (*gamelift.UpdateFleetCapacityOutput, error) + + UpdateFleetPortSettingsRequest(*gamelift.UpdateFleetPortSettingsInput) (*request.Request, *gamelift.UpdateFleetPortSettingsOutput) + + UpdateFleetPortSettings(*gamelift.UpdateFleetPortSettingsInput) (*gamelift.UpdateFleetPortSettingsOutput, error) + + UpdateGameSessionRequest(*gamelift.UpdateGameSessionInput) (*request.Request, *gamelift.UpdateGameSessionOutput) + + UpdateGameSession(*gamelift.UpdateGameSessionInput) (*gamelift.UpdateGameSessionOutput, error) + + UpdateRuntimeConfigurationRequest(*gamelift.UpdateRuntimeConfigurationInput) (*request.Request, *gamelift.UpdateRuntimeConfigurationOutput) + + UpdateRuntimeConfiguration(*gamelift.UpdateRuntimeConfigurationInput) (*gamelift.UpdateRuntimeConfigurationOutput, error) +} + +var _ GameLiftAPI = (*gamelift.GameLift)(nil) diff --git a/vendor/github.com/aws/aws-sdk-go/service/gamelift/service.go b/vendor/github.com/aws/aws-sdk-go/service/gamelift/service.go new file mode 100644 index 000000000..8a80e08e7 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/gamelift/service.go @@ -0,0 +1,133 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package gamelift + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" +) + +// Welcome to the Amazon GameLift API Reference. Amazon GameLift is a managed +// Amazon Web Services (AWS) service for developers who need a scalable, server-based +// solution for multiplayer games. Amazon GameLift provides setup and deployment +// of game servers, and handles infrastructure scaling and session management. +// +// This reference describes the low-level service API for GameLift. You can +// call this API directly or use the AWS SDK (https://aws.amazon.com/tools/) +// for your preferred language. The AWS SDK includes a set of high-level GameLift +// actions multiplayer game sessions. Alternatively, you can use the AWS command-line +// interface (https://aws.amazon.com/cli/) (CLI) tool, which includes commands +// for GameLift. For administrative actions, you can also use the Amazon GameLift +// console. +// +// More Resources +// +// Amazon GameLift Developer Guide (http://docs.aws.amazon.com/gamelift/latest/developerguide/): +// Learn more about GameLift features and how to use them Lumberyard and GameLift +// Tutorials (https://gamedev.amazon.com/forums/tutorials): Get started fast +// with walkthroughs and sample projects GameDev Blog (https://aws.amazon.com/blogs/gamedev/): +// Stay up to date with new features and techniques GameDev Forums (https://gamedev.amazon.com/forums/spaces/123/gamelift-discussion.html): +// Connect with the GameDev community Manage Games and Players Through GameLift +// +// Call these actions from your game clients and/or services to create and +// manage multiplayer game sessions and player sessions. +// +// Game sessions CreateGameSession DescribeGameSessions DescribeGameSessionDetails +// UpdateGameSession Player sessions CreatePlayerSession CreatePlayerSessions +// DescribePlayerSessions Other actions: GetGameSessionLogUrl Set Up +// and Manage Game Servers +// +// Use these administrative actions to configure GameLift to host your game +// servers. When setting up GameLift, you'll need to (1) configure a build for +// your game and upload build files, and (2) set up one or more fleets to host +// game sessions. Once you've created and activated a fleet, you can assign +// aliases to it, scale capacity, track performance and utilization, etc. +// +// Manage your builds: ListBuilds CreateBuild DescribeBuild UpdateBuild +// DeleteBuild RequestUploadCredentials Manage your fleets: ListFleets CreateFleet +// Describe fleets: DescribeFleetAttributes DescribeFleetCapacity DescribeFleetPortSettings +// DescribeFleetUtilization DescribeEC2InstanceLimits DescribeFleetEvents DescribeRuntimeConfiguration +// Update fleets: UpdateFleetAttributes UpdateFleetCapacity UpdateFleetPortSettings +// UpdateRuntimeConfiguration DeleteFleet Manage fleet aliases: ListAliases +// CreateAlias DescribeAlias UpdateAlias DeleteAlias ResolveAlias Manage +// autoscaling: PutScalingPolicy DescribeScalingPolicies DeleteScalingPolicy +// To view changes to the API, see the GameLift Document History (http://docs.aws.amazon.com/gamelift/latest/developerguide/doc-history.html) +// page. +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type GameLift struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "gamelift" + +// New creates a new instance of the GameLift client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a GameLift client from just a session. +// svc := gamelift.New(mySession) +// +// // Create a GameLift client with additional configuration +// svc := gamelift.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *GameLift { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *GameLift { + svc := &GameLift{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2015-10-01", + JSONVersion: "1.1", + TargetPrefix: "GameLift", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(jsonrpc.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a GameLift operation and runs any +// custom request initialization. +func (c *GameLift) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/generate.go b/vendor/github.com/aws/aws-sdk-go/service/generate.go new file mode 100644 index 000000000..9cf991e69 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/generate.go @@ -0,0 +1,5 @@ +// Package service contains automatically generated AWS clients. +package service + +//go:generate go run ../private/model/cli/gen-api/main.go -path=../service ../models/apis/*/*/api-2.json +//go:generate gofmt -s -w ../service diff --git a/vendor/github.com/aws/aws-sdk-go/service/glacier/api.go b/vendor/github.com/aws/aws-sdk-go/service/glacier/api.go new file mode 100644 index 000000000..5ef73b115 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/glacier/api.go @@ -0,0 +1,4804 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package glacier provides a client for Amazon Glacier. +package glacier + +import ( + "io" + + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/restjson" +) + +const opAbortMultipartUpload = "AbortMultipartUpload" + +// AbortMultipartUploadRequest generates a "aws/request.Request" representing the +// client's request for the AbortMultipartUpload operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the AbortMultipartUpload method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the AbortMultipartUploadRequest method. +// req, resp := client.AbortMultipartUploadRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Glacier) AbortMultipartUploadRequest(input *AbortMultipartUploadInput) (req *request.Request, output *AbortMultipartUploadOutput) { + op := &request.Operation{ + Name: opAbortMultipartUpload, + HTTPMethod: "DELETE", + HTTPPath: "/{accountId}/vaults/{vaultName}/multipart-uploads/{uploadId}", + } + + if input == nil { + input = &AbortMultipartUploadInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &AbortMultipartUploadOutput{} + req.Data = output + return +} + +// This operation aborts a multipart upload identified by the upload ID. +// +// After the Abort Multipart Upload request succeeds, you cannot upload any +// more parts to the multipart upload or complete the multipart upload. Aborting +// a completed upload fails. However, aborting an already-aborted upload will +// succeed, for a short time. For more information about uploading a part and +// completing a multipart upload, see UploadMultipartPart and CompleteMultipartUpload. +// +// This operation is idempotent. +// +// An AWS account has full permission to perform all operations (actions). +// However, AWS Identity and Access Management (IAM) users don't have any permissions +// by default. You must grant them explicit permission to perform specific actions. +// For more information, see Access Control Using AWS Identity and Access Management +// (IAM) (http://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html). +// +// For conceptual information and underlying REST API, go to Working with +// Archives in Amazon Glacier (http://docs.aws.amazon.com/amazonglacier/latest/dev/working-with-archives.html) +// and Abort Multipart Upload (http://docs.aws.amazon.com/amazonglacier/latest/dev/api-multipart-abort-upload.html) +// in the Amazon Glacier Developer Guide. +func (c *Glacier) AbortMultipartUpload(input *AbortMultipartUploadInput) (*AbortMultipartUploadOutput, error) { + req, out := c.AbortMultipartUploadRequest(input) + err := req.Send() + return out, err +} + +const opAbortVaultLock = "AbortVaultLock" + +// AbortVaultLockRequest generates a "aws/request.Request" representing the +// client's request for the AbortVaultLock operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the AbortVaultLock method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the AbortVaultLockRequest method. +// req, resp := client.AbortVaultLockRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Glacier) AbortVaultLockRequest(input *AbortVaultLockInput) (req *request.Request, output *AbortVaultLockOutput) { + op := &request.Operation{ + Name: opAbortVaultLock, + HTTPMethod: "DELETE", + HTTPPath: "/{accountId}/vaults/{vaultName}/lock-policy", + } + + if input == nil { + input = &AbortVaultLockInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &AbortVaultLockOutput{} + req.Data = output + return +} + +// This operation aborts the vault locking process if the vault lock is not +// in the Locked state. If the vault lock is in the Locked state when this operation +// is requested, the operation returns an AccessDeniedException error. Aborting +// the vault locking process removes the vault lock policy from the specified +// vault. +// +// A vault lock is put into the InProgress state by calling InitiateVaultLock. +// A vault lock is put into the Locked state by calling CompleteVaultLock. You +// can get the state of a vault lock by calling GetVaultLock. For more information +// about the vault locking process, see Amazon Glacier Vault Lock (http://docs.aws.amazon.com/amazonglacier/latest/dev/vault-lock.html). +// For more information about vault lock policies, see Amazon Glacier Access +// Control with Vault Lock Policies (http://docs.aws.amazon.com/amazonglacier/latest/dev/vault-lock-policy.html). +// +// This operation is idempotent. You can successfully invoke this operation +// multiple times, if the vault lock is in the InProgress state or if there +// is no policy associated with the vault. +func (c *Glacier) AbortVaultLock(input *AbortVaultLockInput) (*AbortVaultLockOutput, error) { + req, out := c.AbortVaultLockRequest(input) + err := req.Send() + return out, err +} + +const opAddTagsToVault = "AddTagsToVault" + +// AddTagsToVaultRequest generates a "aws/request.Request" representing the +// client's request for the AddTagsToVault operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the AddTagsToVault method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the AddTagsToVaultRequest method. +// req, resp := client.AddTagsToVaultRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Glacier) AddTagsToVaultRequest(input *AddTagsToVaultInput) (req *request.Request, output *AddTagsToVaultOutput) { + op := &request.Operation{ + Name: opAddTagsToVault, + HTTPMethod: "POST", + HTTPPath: "/{accountId}/vaults/{vaultName}/tags?operation=add", + } + + if input == nil { + input = &AddTagsToVaultInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &AddTagsToVaultOutput{} + req.Data = output + return +} + +// This operation adds the specified tags to a vault. Each tag is composed of +// a key and a value. Each vault can have up to 10 tags. If your request would +// cause the tag limit for the vault to be exceeded, the operation throws the +// LimitExceededException error. If a tag already exists on the vault under +// a specified key, the existing key value will be overwritten. For more information +// about tags, see Tagging Amazon Glacier Resources (http://docs.aws.amazon.com/amazonglacier/latest/dev/tagging.html). +func (c *Glacier) AddTagsToVault(input *AddTagsToVaultInput) (*AddTagsToVaultOutput, error) { + req, out := c.AddTagsToVaultRequest(input) + err := req.Send() + return out, err +} + +const opCompleteMultipartUpload = "CompleteMultipartUpload" + +// CompleteMultipartUploadRequest generates a "aws/request.Request" representing the +// client's request for the CompleteMultipartUpload operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CompleteMultipartUpload method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CompleteMultipartUploadRequest method. +// req, resp := client.CompleteMultipartUploadRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Glacier) CompleteMultipartUploadRequest(input *CompleteMultipartUploadInput) (req *request.Request, output *ArchiveCreationOutput) { + op := &request.Operation{ + Name: opCompleteMultipartUpload, + HTTPMethod: "POST", + HTTPPath: "/{accountId}/vaults/{vaultName}/multipart-uploads/{uploadId}", + } + + if input == nil { + input = &CompleteMultipartUploadInput{} + } + + req = c.newRequest(op, input, output) + output = &ArchiveCreationOutput{} + req.Data = output + return +} + +// You call this operation to inform Amazon Glacier that all the archive parts +// have been uploaded and that Amazon Glacier can now assemble the archive from +// the uploaded parts. After assembling and saving the archive to the vault, +// Amazon Glacier returns the URI path of the newly created archive resource. +// Using the URI path, you can then access the archive. After you upload an +// archive, you should save the archive ID returned to retrieve the archive +// at a later point. You can also get the vault inventory to obtain a list of +// archive IDs in a vault. For more information, see InitiateJob. +// +// In the request, you must include the computed SHA256 tree hash of the entire +// archive you have uploaded. For information about computing a SHA256 tree +// hash, see Computing Checksums (http://docs.aws.amazon.com/amazonglacier/latest/dev/checksum-calculations.html). +// On the server side, Amazon Glacier also constructs the SHA256 tree hash of +// the assembled archive. If the values match, Amazon Glacier saves the archive +// to the vault; otherwise, it returns an error, and the operation fails. The +// ListParts operation returns a list of parts uploaded for a specific multipart +// upload. It includes checksum information for each uploaded part that can +// be used to debug a bad checksum issue. +// +// Additionally, Amazon Glacier also checks for any missing content ranges +// when assembling the archive, if missing content ranges are found, Amazon +// Glacier returns an error and the operation fails. +// +// Complete Multipart Upload is an idempotent operation. After your first successful +// complete multipart upload, if you call the operation again within a short +// period, the operation will succeed and return the same archive ID. This is +// useful in the event you experience a network issue that causes an aborted +// connection or receive a 500 server error, in which case you can repeat your +// Complete Multipart Upload request and get the same archive ID without creating +// duplicate archives. Note, however, that after the multipart upload completes, +// you cannot call the List Parts operation and the multipart upload will not +// appear in List Multipart Uploads response, even if idempotent complete is +// possible. +// +// An AWS account has full permission to perform all operations (actions). +// However, AWS Identity and Access Management (IAM) users don't have any permissions +// by default. You must grant them explicit permission to perform specific actions. +// For more information, see Access Control Using AWS Identity and Access Management +// (IAM) (http://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html). +// +// For conceptual information and underlying REST API, go to Uploading Large +// Archives in Parts (Multipart Upload) (http://docs.aws.amazon.com/amazonglacier/latest/dev/uploading-archive-mpu.html) +// and Complete Multipart Upload (http://docs.aws.amazon.com/amazonglacier/latest/dev/api-multipart-complete-upload.html) +// in the Amazon Glacier Developer Guide. +func (c *Glacier) CompleteMultipartUpload(input *CompleteMultipartUploadInput) (*ArchiveCreationOutput, error) { + req, out := c.CompleteMultipartUploadRequest(input) + err := req.Send() + return out, err +} + +const opCompleteVaultLock = "CompleteVaultLock" + +// CompleteVaultLockRequest generates a "aws/request.Request" representing the +// client's request for the CompleteVaultLock operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CompleteVaultLock method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CompleteVaultLockRequest method. +// req, resp := client.CompleteVaultLockRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Glacier) CompleteVaultLockRequest(input *CompleteVaultLockInput) (req *request.Request, output *CompleteVaultLockOutput) { + op := &request.Operation{ + Name: opCompleteVaultLock, + HTTPMethod: "POST", + HTTPPath: "/{accountId}/vaults/{vaultName}/lock-policy/{lockId}", + } + + if input == nil { + input = &CompleteVaultLockInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &CompleteVaultLockOutput{} + req.Data = output + return +} + +// This operation completes the vault locking process by transitioning the vault +// lock from the InProgress state to the Locked state, which causes the vault +// lock policy to become unchangeable. A vault lock is put into the InProgress +// state by calling InitiateVaultLock. You can obtain the state of the vault +// lock by calling GetVaultLock. For more information about the vault locking +// process, Amazon Glacier Vault Lock (http://docs.aws.amazon.com/amazonglacier/latest/dev/vault-lock.html). +// +// This operation is idempotent. This request is always successful if the vault +// lock is in the Locked state and the provided lock ID matches the lock ID +// originally used to lock the vault. +// +// If an invalid lock ID is passed in the request when the vault lock is in +// the Locked state, the operation returns an AccessDeniedException error. If +// an invalid lock ID is passed in the request when the vault lock is in the +// InProgress state, the operation throws an InvalidParameter error. +func (c *Glacier) CompleteVaultLock(input *CompleteVaultLockInput) (*CompleteVaultLockOutput, error) { + req, out := c.CompleteVaultLockRequest(input) + err := req.Send() + return out, err +} + +const opCreateVault = "CreateVault" + +// CreateVaultRequest generates a "aws/request.Request" representing the +// client's request for the CreateVault operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateVault method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateVaultRequest method. +// req, resp := client.CreateVaultRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Glacier) CreateVaultRequest(input *CreateVaultInput) (req *request.Request, output *CreateVaultOutput) { + op := &request.Operation{ + Name: opCreateVault, + HTTPMethod: "PUT", + HTTPPath: "/{accountId}/vaults/{vaultName}", + } + + if input == nil { + input = &CreateVaultInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateVaultOutput{} + req.Data = output + return +} + +// This operation creates a new vault with the specified name. The name of the +// vault must be unique within a region for an AWS account. You can create up +// to 1,000 vaults per account. If you need to create more vaults, contact Amazon +// Glacier. +// +// You must use the following guidelines when naming a vault. +// +// Names can be between 1 and 255 characters long. +// +// Allowed characters are a-z, A-Z, 0-9, '_' (underscore), '-' (hyphen), +// and '.' (period). +// +// This operation is idempotent. +// +// An AWS account has full permission to perform all operations (actions). +// However, AWS Identity and Access Management (IAM) users don't have any permissions +// by default. You must grant them explicit permission to perform specific actions. +// For more information, see Access Control Using AWS Identity and Access Management +// (IAM) (http://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html). +// +// For conceptual information and underlying REST API, go to Creating a Vault +// in Amazon Glacier (http://docs.aws.amazon.com/amazonglacier/latest/dev/creating-vaults.html) +// and Create Vault (http://docs.aws.amazon.com/amazonglacier/latest/dev/api-vault-put.html) +// in the Amazon Glacier Developer Guide. +func (c *Glacier) CreateVault(input *CreateVaultInput) (*CreateVaultOutput, error) { + req, out := c.CreateVaultRequest(input) + err := req.Send() + return out, err +} + +const opDeleteArchive = "DeleteArchive" + +// DeleteArchiveRequest generates a "aws/request.Request" representing the +// client's request for the DeleteArchive operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteArchive method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteArchiveRequest method. +// req, resp := client.DeleteArchiveRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Glacier) DeleteArchiveRequest(input *DeleteArchiveInput) (req *request.Request, output *DeleteArchiveOutput) { + op := &request.Operation{ + Name: opDeleteArchive, + HTTPMethod: "DELETE", + HTTPPath: "/{accountId}/vaults/{vaultName}/archives/{archiveId}", + } + + if input == nil { + input = &DeleteArchiveInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteArchiveOutput{} + req.Data = output + return +} + +// This operation deletes an archive from a vault. Subsequent requests to initiate +// a retrieval of this archive will fail. Archive retrievals that are in progress +// for this archive ID may or may not succeed according to the following scenarios: +// +// If the archive retrieval job is actively preparing the data for download +// when Amazon Glacier receives the delete archive request, the archival retrieval +// operation might fail. If the archive retrieval job has successfully prepared +// the archive for download when Amazon Glacier receives the delete archive +// request, you will be able to download the output. This operation is idempotent. +// Attempting to delete an already-deleted archive does not result in an error. +// +// An AWS account has full permission to perform all operations (actions). +// However, AWS Identity and Access Management (IAM) users don't have any permissions +// by default. You must grant them explicit permission to perform specific actions. +// For more information, see Access Control Using AWS Identity and Access Management +// (IAM) (http://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html). +// +// For conceptual information and underlying REST API, go to Deleting an Archive +// in Amazon Glacier (http://docs.aws.amazon.com/amazonglacier/latest/dev/deleting-an-archive.html) +// and Delete Archive (http://docs.aws.amazon.com/amazonglacier/latest/dev/api-archive-delete.html) +// in the Amazon Glacier Developer Guide. +func (c *Glacier) DeleteArchive(input *DeleteArchiveInput) (*DeleteArchiveOutput, error) { + req, out := c.DeleteArchiveRequest(input) + err := req.Send() + return out, err +} + +const opDeleteVault = "DeleteVault" + +// DeleteVaultRequest generates a "aws/request.Request" representing the +// client's request for the DeleteVault operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteVault method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteVaultRequest method. +// req, resp := client.DeleteVaultRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Glacier) DeleteVaultRequest(input *DeleteVaultInput) (req *request.Request, output *DeleteVaultOutput) { + op := &request.Operation{ + Name: opDeleteVault, + HTTPMethod: "DELETE", + HTTPPath: "/{accountId}/vaults/{vaultName}", + } + + if input == nil { + input = &DeleteVaultInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteVaultOutput{} + req.Data = output + return +} + +// This operation deletes a vault. Amazon Glacier will delete a vault only if +// there are no archives in the vault as of the last inventory and there have +// been no writes to the vault since the last inventory. If either of these +// conditions is not satisfied, the vault deletion fails (that is, the vault +// is not removed) and Amazon Glacier returns an error. You can use DescribeVault +// to return the number of archives in a vault, and you can use Initiate a Job +// (POST jobs) (http://docs.aws.amazon.com/amazonglacier/latest/dev/api-initiate-job-post.html) +// to initiate a new inventory retrieval for a vault. The inventory contains +// the archive IDs you use to delete archives using Delete Archive (DELETE archive) +// (http://docs.aws.amazon.com/amazonglacier/latest/dev/api-archive-delete.html). +// +// This operation is idempotent. +// +// An AWS account has full permission to perform all operations (actions). +// However, AWS Identity and Access Management (IAM) users don't have any permissions +// by default. You must grant them explicit permission to perform specific actions. +// For more information, see Access Control Using AWS Identity and Access Management +// (IAM) (http://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html). +// +// For conceptual information and underlying REST API, go to Deleting a Vault +// in Amazon Glacier (http://docs.aws.amazon.com/amazonglacier/latest/dev/deleting-vaults.html) +// and Delete Vault (http://docs.aws.amazon.com/amazonglacier/latest/dev/api-vault-delete.html) +// in the Amazon Glacier Developer Guide. +func (c *Glacier) DeleteVault(input *DeleteVaultInput) (*DeleteVaultOutput, error) { + req, out := c.DeleteVaultRequest(input) + err := req.Send() + return out, err +} + +const opDeleteVaultAccessPolicy = "DeleteVaultAccessPolicy" + +// DeleteVaultAccessPolicyRequest generates a "aws/request.Request" representing the +// client's request for the DeleteVaultAccessPolicy operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteVaultAccessPolicy method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteVaultAccessPolicyRequest method. +// req, resp := client.DeleteVaultAccessPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Glacier) DeleteVaultAccessPolicyRequest(input *DeleteVaultAccessPolicyInput) (req *request.Request, output *DeleteVaultAccessPolicyOutput) { + op := &request.Operation{ + Name: opDeleteVaultAccessPolicy, + HTTPMethod: "DELETE", + HTTPPath: "/{accountId}/vaults/{vaultName}/access-policy", + } + + if input == nil { + input = &DeleteVaultAccessPolicyInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteVaultAccessPolicyOutput{} + req.Data = output + return +} + +// This operation deletes the access policy associated with the specified vault. +// The operation is eventually consistent; that is, it might take some time +// for Amazon Glacier to completely remove the access policy, and you might +// still see the effect of the policy for a short time after you send the delete +// request. +// +// This operation is idempotent. You can invoke delete multiple times, even +// if there is no policy associated with the vault. For more information about +// vault access policies, see Amazon Glacier Access Control with Vault Access +// Policies (http://docs.aws.amazon.com/amazonglacier/latest/dev/vault-access-policy.html). +func (c *Glacier) DeleteVaultAccessPolicy(input *DeleteVaultAccessPolicyInput) (*DeleteVaultAccessPolicyOutput, error) { + req, out := c.DeleteVaultAccessPolicyRequest(input) + err := req.Send() + return out, err +} + +const opDeleteVaultNotifications = "DeleteVaultNotifications" + +// DeleteVaultNotificationsRequest generates a "aws/request.Request" representing the +// client's request for the DeleteVaultNotifications operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteVaultNotifications method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteVaultNotificationsRequest method. +// req, resp := client.DeleteVaultNotificationsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Glacier) DeleteVaultNotificationsRequest(input *DeleteVaultNotificationsInput) (req *request.Request, output *DeleteVaultNotificationsOutput) { + op := &request.Operation{ + Name: opDeleteVaultNotifications, + HTTPMethod: "DELETE", + HTTPPath: "/{accountId}/vaults/{vaultName}/notification-configuration", + } + + if input == nil { + input = &DeleteVaultNotificationsInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteVaultNotificationsOutput{} + req.Data = output + return +} + +// This operation deletes the notification configuration set for a vault. The +// operation is eventually consistent; that is, it might take some time for +// Amazon Glacier to completely disable the notifications and you might still +// receive some notifications for a short time after you send the delete request. +// +// An AWS account has full permission to perform all operations (actions). +// However, AWS Identity and Access Management (IAM) users don't have any permissions +// by default. You must grant them explicit permission to perform specific actions. +// For more information, see Access Control Using AWS Identity and Access Management +// (IAM) (http://docs.aws.amazon.com/latest/dev/using-iam-with-amazon-glacier.html). +// +// For conceptual information and underlying REST API, go to Configuring Vault +// Notifications in Amazon Glacier (http://docs.aws.amazon.com/amazonglacier/latest/dev/configuring-notifications.html) +// and Delete Vault Notification Configuration (http://docs.aws.amazon.com/amazonglacier/latest/dev/api-vault-notifications-delete.html) +// in the Amazon Glacier Developer Guide. +func (c *Glacier) DeleteVaultNotifications(input *DeleteVaultNotificationsInput) (*DeleteVaultNotificationsOutput, error) { + req, out := c.DeleteVaultNotificationsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeJob = "DescribeJob" + +// DescribeJobRequest generates a "aws/request.Request" representing the +// client's request for the DescribeJob operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeJob method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeJobRequest method. +// req, resp := client.DescribeJobRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Glacier) DescribeJobRequest(input *DescribeJobInput) (req *request.Request, output *JobDescription) { + op := &request.Operation{ + Name: opDescribeJob, + HTTPMethod: "GET", + HTTPPath: "/{accountId}/vaults/{vaultName}/jobs/{jobId}", + } + + if input == nil { + input = &DescribeJobInput{} + } + + req = c.newRequest(op, input, output) + output = &JobDescription{} + req.Data = output + return +} + +// This operation returns information about a job you previously initiated, +// including the job initiation date, the user who initiated the job, the job +// status code/message and the Amazon SNS topic to notify after Amazon Glacier +// completes the job. For more information about initiating a job, see InitiateJob. +// +// This operation enables you to check the status of your job. However, it +// is strongly recommended that you set up an Amazon SNS topic and specify it +// in your initiate job request so that Amazon Glacier can notify the topic +// after it completes the job. +// +// A job ID will not expire for at least 24 hours after Amazon Glacier completes +// the job. +// +// An AWS account has full permission to perform all operations (actions). +// However, AWS Identity and Access Management (IAM) users don't have any permissions +// by default. You must grant them explicit permission to perform specific actions. +// For more information, see Access Control Using AWS Identity and Access Management +// (IAM) (http://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html). +// +// For information about the underlying REST API, go to Working with Archives +// in Amazon Glacier (http://docs.aws.amazon.com/amazonglacier/latest/dev/api-describe-job-get.html) +// in the Amazon Glacier Developer Guide. +func (c *Glacier) DescribeJob(input *DescribeJobInput) (*JobDescription, error) { + req, out := c.DescribeJobRequest(input) + err := req.Send() + return out, err +} + +const opDescribeVault = "DescribeVault" + +// DescribeVaultRequest generates a "aws/request.Request" representing the +// client's request for the DescribeVault operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeVault method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeVaultRequest method. +// req, resp := client.DescribeVaultRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Glacier) DescribeVaultRequest(input *DescribeVaultInput) (req *request.Request, output *DescribeVaultOutput) { + op := &request.Operation{ + Name: opDescribeVault, + HTTPMethod: "GET", + HTTPPath: "/{accountId}/vaults/{vaultName}", + } + + if input == nil { + input = &DescribeVaultInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeVaultOutput{} + req.Data = output + return +} + +// This operation returns information about a vault, including the vault's Amazon +// Resource Name (ARN), the date the vault was created, the number of archives +// it contains, and the total size of all the archives in the vault. The number +// of archives and their total size are as of the last inventory generation. +// This means that if you add or remove an archive from a vault, and then immediately +// use Describe Vault, the change in contents will not be immediately reflected. +// If you want to retrieve the latest inventory of the vault, use InitiateJob. +// Amazon Glacier generates vault inventories approximately daily. For more +// information, see Downloading a Vault Inventory in Amazon Glacier (http://docs.aws.amazon.com/amazonglacier/latest/dev/vault-inventory.html). +// +// An AWS account has full permission to perform all operations (actions). +// However, AWS Identity and Access Management (IAM) users don't have any permissions +// by default. You must grant them explicit permission to perform specific actions. +// For more information, see Access Control Using AWS Identity and Access Management +// (IAM) (http://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html). +// +// For conceptual information and underlying REST API, go to Retrieving Vault +// Metadata in Amazon Glacier (http://docs.aws.amazon.com/amazonglacier/latest/dev/retrieving-vault-info.html) +// and Describe Vault (http://docs.aws.amazon.com/amazonglacier/latest/dev/api-vault-get.html) +// in the Amazon Glacier Developer Guide. +func (c *Glacier) DescribeVault(input *DescribeVaultInput) (*DescribeVaultOutput, error) { + req, out := c.DescribeVaultRequest(input) + err := req.Send() + return out, err +} + +const opGetDataRetrievalPolicy = "GetDataRetrievalPolicy" + +// GetDataRetrievalPolicyRequest generates a "aws/request.Request" representing the +// client's request for the GetDataRetrievalPolicy operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetDataRetrievalPolicy method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetDataRetrievalPolicyRequest method. +// req, resp := client.GetDataRetrievalPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Glacier) GetDataRetrievalPolicyRequest(input *GetDataRetrievalPolicyInput) (req *request.Request, output *GetDataRetrievalPolicyOutput) { + op := &request.Operation{ + Name: opGetDataRetrievalPolicy, + HTTPMethod: "GET", + HTTPPath: "/{accountId}/policies/data-retrieval", + } + + if input == nil { + input = &GetDataRetrievalPolicyInput{} + } + + req = c.newRequest(op, input, output) + output = &GetDataRetrievalPolicyOutput{} + req.Data = output + return +} + +// This operation returns the current data retrieval policy for the account +// and region specified in the GET request. For more information about data +// retrieval policies, see Amazon Glacier Data Retrieval Policies (http://docs.aws.amazon.com/amazonglacier/latest/dev/data-retrieval-policy.html). +func (c *Glacier) GetDataRetrievalPolicy(input *GetDataRetrievalPolicyInput) (*GetDataRetrievalPolicyOutput, error) { + req, out := c.GetDataRetrievalPolicyRequest(input) + err := req.Send() + return out, err +} + +const opGetJobOutput = "GetJobOutput" + +// GetJobOutputRequest generates a "aws/request.Request" representing the +// client's request for the GetJobOutput operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetJobOutput method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetJobOutputRequest method. +// req, resp := client.GetJobOutputRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Glacier) GetJobOutputRequest(input *GetJobOutputInput) (req *request.Request, output *GetJobOutputOutput) { + op := &request.Operation{ + Name: opGetJobOutput, + HTTPMethod: "GET", + HTTPPath: "/{accountId}/vaults/{vaultName}/jobs/{jobId}/output", + } + + if input == nil { + input = &GetJobOutputInput{} + } + + req = c.newRequest(op, input, output) + output = &GetJobOutputOutput{} + req.Data = output + return +} + +// This operation downloads the output of the job you initiated using InitiateJob. +// Depending on the job type you specified when you initiated the job, the output +// will be either the content of an archive or a vault inventory. +// +// A job ID will not expire for at least 24 hours after Amazon Glacier completes +// the job. That is, you can download the job output within the 24 hours period +// after Amazon Glacier completes the job. +// +// If the job output is large, then you can use the Range request header to +// retrieve a portion of the output. This allows you to download the entire +// output in smaller chunks of bytes. For example, suppose you have 1 GB of +// job output you want to download and you decide to download 128 MB chunks +// of data at a time, which is a total of eight Get Job Output requests. You +// use the following process to download the job output: +// +// Download a 128 MB chunk of output by specifying the appropriate byte range +// using the Range header. +// +// Along with the data, the response includes a SHA256 tree hash of the payload. +// You compute the checksum of the payload on the client and compare it with +// the checksum you received in the response to ensure you received all the +// expected data. +// +// Repeat steps 1 and 2 for all the eight 128 MB chunks of output data, each +// time specifying the appropriate byte range. +// +// After downloading all the parts of the job output, you have a list of +// eight checksum values. Compute the tree hash of these values to find the +// checksum of the entire output. Using the DescribeJob API, obtain job information +// of the job that provided you the output. The response includes the checksum +// of the entire archive stored in Amazon Glacier. You compare this value with +// the checksum you computed to ensure you have downloaded the entire archive +// content with no errors. +// +// An AWS account has full permission to perform all operations (actions). +// However, AWS Identity and Access Management (IAM) users don't have any permissions +// by default. You must grant them explicit permission to perform specific actions. +// For more information, see Access Control Using AWS Identity and Access Management +// (IAM) (http://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html). +// +// For conceptual information and the underlying REST API, go to Downloading +// a Vault Inventory (http://docs.aws.amazon.com/amazonglacier/latest/dev/vault-inventory.html), +// Downloading an Archive (http://docs.aws.amazon.com/amazonglacier/latest/dev/downloading-an-archive.html), +// and Get Job Output (http://docs.aws.amazon.com/amazonglacier/latest/dev/api-job-output-get.html) +func (c *Glacier) GetJobOutput(input *GetJobOutputInput) (*GetJobOutputOutput, error) { + req, out := c.GetJobOutputRequest(input) + err := req.Send() + return out, err +} + +const opGetVaultAccessPolicy = "GetVaultAccessPolicy" + +// GetVaultAccessPolicyRequest generates a "aws/request.Request" representing the +// client's request for the GetVaultAccessPolicy operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetVaultAccessPolicy method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetVaultAccessPolicyRequest method. +// req, resp := client.GetVaultAccessPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Glacier) GetVaultAccessPolicyRequest(input *GetVaultAccessPolicyInput) (req *request.Request, output *GetVaultAccessPolicyOutput) { + op := &request.Operation{ + Name: opGetVaultAccessPolicy, + HTTPMethod: "GET", + HTTPPath: "/{accountId}/vaults/{vaultName}/access-policy", + } + + if input == nil { + input = &GetVaultAccessPolicyInput{} + } + + req = c.newRequest(op, input, output) + output = &GetVaultAccessPolicyOutput{} + req.Data = output + return +} + +// This operation retrieves the access-policy subresource set on the vault; +// for more information on setting this subresource, see Set Vault Access Policy +// (PUT access-policy) (http://docs.aws.amazon.com/amazonglacier/latest/dev/api-SetVaultAccessPolicy.html). +// If there is no access policy set on the vault, the operation returns a 404 +// Not found error. For more information about vault access policies, see Amazon +// Glacier Access Control with Vault Access Policies (http://docs.aws.amazon.com/amazonglacier/latest/dev/vault-access-policy.html). +func (c *Glacier) GetVaultAccessPolicy(input *GetVaultAccessPolicyInput) (*GetVaultAccessPolicyOutput, error) { + req, out := c.GetVaultAccessPolicyRequest(input) + err := req.Send() + return out, err +} + +const opGetVaultLock = "GetVaultLock" + +// GetVaultLockRequest generates a "aws/request.Request" representing the +// client's request for the GetVaultLock operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetVaultLock method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetVaultLockRequest method. +// req, resp := client.GetVaultLockRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Glacier) GetVaultLockRequest(input *GetVaultLockInput) (req *request.Request, output *GetVaultLockOutput) { + op := &request.Operation{ + Name: opGetVaultLock, + HTTPMethod: "GET", + HTTPPath: "/{accountId}/vaults/{vaultName}/lock-policy", + } + + if input == nil { + input = &GetVaultLockInput{} + } + + req = c.newRequest(op, input, output) + output = &GetVaultLockOutput{} + req.Data = output + return +} + +// This operation retrieves the following attributes from the lock-policy subresource +// set on the specified vault: The vault lock policy set on the vault. +// +// The state of the vault lock, which is either InProgess or Locked. +// +// When the lock ID expires. The lock ID is used to complete the vault locking +// process. +// +// When the vault lock was initiated and put into the InProgress state. +// +// A vault lock is put into the InProgress state by calling InitiateVaultLock. +// A vault lock is put into the Locked state by calling CompleteVaultLock. You +// can abort the vault locking process by calling AbortVaultLock. For more information +// about the vault locking process, Amazon Glacier Vault Lock (http://docs.aws.amazon.com/amazonglacier/latest/dev/vault-lock.html). +// +// If there is no vault lock policy set on the vault, the operation returns +// a 404 Not found error. For more information about vault lock policies, Amazon +// Glacier Access Control with Vault Lock Policies (http://docs.aws.amazon.com/amazonglacier/latest/dev/vault-lock-policy.html). +func (c *Glacier) GetVaultLock(input *GetVaultLockInput) (*GetVaultLockOutput, error) { + req, out := c.GetVaultLockRequest(input) + err := req.Send() + return out, err +} + +const opGetVaultNotifications = "GetVaultNotifications" + +// GetVaultNotificationsRequest generates a "aws/request.Request" representing the +// client's request for the GetVaultNotifications operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetVaultNotifications method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetVaultNotificationsRequest method. +// req, resp := client.GetVaultNotificationsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Glacier) GetVaultNotificationsRequest(input *GetVaultNotificationsInput) (req *request.Request, output *GetVaultNotificationsOutput) { + op := &request.Operation{ + Name: opGetVaultNotifications, + HTTPMethod: "GET", + HTTPPath: "/{accountId}/vaults/{vaultName}/notification-configuration", + } + + if input == nil { + input = &GetVaultNotificationsInput{} + } + + req = c.newRequest(op, input, output) + output = &GetVaultNotificationsOutput{} + req.Data = output + return +} + +// This operation retrieves the notification-configuration subresource of the +// specified vault. +// +// For information about setting a notification configuration on a vault, see +// SetVaultNotifications. If a notification configuration for a vault is not +// set, the operation returns a 404 Not Found error. For more information about +// vault notifications, see Configuring Vault Notifications in Amazon Glacier +// (http://docs.aws.amazon.com/amazonglacier/latest/dev/configuring-notifications.html). +// +// An AWS account has full permission to perform all operations (actions). +// However, AWS Identity and Access Management (IAM) users don't have any permissions +// by default. You must grant them explicit permission to perform specific actions. +// For more information, see Access Control Using AWS Identity and Access Management +// (IAM) (http://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html). +// +// For conceptual information and underlying REST API, go to Configuring Vault +// Notifications in Amazon Glacier (http://docs.aws.amazon.com/amazonglacier/latest/dev/configuring-notifications.html) +// and Get Vault Notification Configuration (http://docs.aws.amazon.com/amazonglacier/latest/dev/api-vault-notifications-get.html) +// in the Amazon Glacier Developer Guide. +func (c *Glacier) GetVaultNotifications(input *GetVaultNotificationsInput) (*GetVaultNotificationsOutput, error) { + req, out := c.GetVaultNotificationsRequest(input) + err := req.Send() + return out, err +} + +const opInitiateJob = "InitiateJob" + +// InitiateJobRequest generates a "aws/request.Request" representing the +// client's request for the InitiateJob operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the InitiateJob method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the InitiateJobRequest method. +// req, resp := client.InitiateJobRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Glacier) InitiateJobRequest(input *InitiateJobInput) (req *request.Request, output *InitiateJobOutput) { + op := &request.Operation{ + Name: opInitiateJob, + HTTPMethod: "POST", + HTTPPath: "/{accountId}/vaults/{vaultName}/jobs", + } + + if input == nil { + input = &InitiateJobInput{} + } + + req = c.newRequest(op, input, output) + output = &InitiateJobOutput{} + req.Data = output + return +} + +// This operation initiates a job of the specified type. In this release, you +// can initiate a job to retrieve either an archive or a vault inventory (a +// list of archives in a vault). +// +// Retrieving data from Amazon Glacier is a two-step process: +// +// Initiate a retrieval job. +// +// A data retrieval policy can cause your initiate retrieval job request to +// fail with a PolicyEnforcedException exception. For more information about +// data retrieval policies, see Amazon Glacier Data Retrieval Policies (http://docs.aws.amazon.com/amazonglacier/latest/dev/data-retrieval-policy.html). +// For more information about the PolicyEnforcedException exception, see Error +// Responses (http://docs.aws.amazon.com/amazonglacier/latest/dev/api-error-responses.html). +// +// After the job completes, download the bytes. +// +// The retrieval request is executed asynchronously. When you initiate a retrieval +// job, Amazon Glacier creates a job and returns a job ID in the response. When +// Amazon Glacier completes the job, you can get the job output (archive or +// inventory data). For information about getting job output, see GetJobOutput +// operation. +// +// The job must complete before you can get its output. To determine when a +// job is complete, you have the following options: +// +// Use Amazon SNS Notification You can specify an Amazon Simple Notification +// Service (Amazon SNS) topic to which Amazon Glacier can post a notification +// after the job is completed. You can specify an SNS topic per job request. +// The notification is sent only after Amazon Glacier completes the job. In +// addition to specifying an SNS topic per job request, you can configure vault +// notifications for a vault so that job notifications are always sent. For +// more information, see SetVaultNotifications. +// +// Get job details You can make a DescribeJob request to obtain job status +// information while a job is in progress. However, it is more efficient to +// use an Amazon SNS notification to determine when a job is complete. +// +// The information you get via notification is same that you get by calling +// DescribeJob. +// +// If for a specific event, you add both the notification configuration on +// the vault and also specify an SNS topic in your initiate job request, Amazon +// Glacier sends both notifications. For more information, see SetVaultNotifications. +// +// An AWS account has full permission to perform all operations (actions). +// However, AWS Identity and Access Management (IAM) users don't have any permissions +// by default. You must grant them explicit permission to perform specific actions. +// For more information, see Access Control Using AWS Identity and Access Management +// (IAM) (http://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html). +// +// About the Vault Inventory +// +// Amazon Glacier prepares an inventory for each vault periodically, every +// 24 hours. When you initiate a job for a vault inventory, Amazon Glacier returns +// the last inventory for the vault. The inventory data you get might be up +// to a day or two days old. Also, the initiate inventory job might take some +// time to complete before you can download the vault inventory. So you do not +// want to retrieve a vault inventory for each vault operation. However, in +// some scenarios, you might find the vault inventory useful. For example, when +// you upload an archive, you can provide an archive description but not an +// archive name. Amazon Glacier provides you a unique archive ID, an opaque +// string of characters. So, you might maintain your own database that maps +// archive names to their corresponding Amazon Glacier assigned archive IDs. +// You might find the vault inventory useful in the event you need to reconcile +// information in your database with the actual vault inventory. +// +// Range Inventory Retrieval +// +// You can limit the number of inventory items retrieved by filtering on the +// archive creation date or by setting a limit. +// +// Filtering by Archive Creation Date +// +// You can retrieve inventory items for archives created between StartDate +// and EndDate by specifying values for these parameters in the InitiateJob +// request. Archives created on or after the StartDate and before the EndDate +// will be returned. If you only provide the StartDate without the EndDate, +// you will retrieve the inventory for all archives created on or after the +// StartDate. If you only provide the EndDate without the StartDate, you will +// get back the inventory for all archives created before the EndDate. +// +// Limiting Inventory Items per Retrieval +// +// You can limit the number of inventory items returned by setting the Limit +// parameter in the InitiateJob request. The inventory job output will contain +// inventory items up to the specified Limit. If there are more inventory items +// available, the result is paginated. After a job is complete you can use the +// DescribeJob operation to get a marker that you use in a subsequent InitiateJob +// request. The marker will indicate the starting point to retrieve the next +// set of inventory items. You can page through your entire inventory by repeatedly +// making InitiateJob requests with the marker from the previous DescribeJob +// output, until you get a marker from DescribeJob that returns null, indicating +// that there are no more inventory items available. +// +// You can use the Limit parameter together with the date range parameters. +// +// About Ranged Archive Retrieval +// +// You can initiate an archive retrieval for the whole archive or a range +// of the archive. In the case of ranged archive retrieval, you specify a byte +// range to return or the whole archive. The range specified must be megabyte +// (MB) aligned, that is the range start value must be divisible by 1 MB and +// range end value plus 1 must be divisible by 1 MB or equal the end of the +// archive. If the ranged archive retrieval is not megabyte aligned, this operation +// returns a 400 response. Furthermore, to ensure you get checksum values for +// data you download using Get Job Output API, the range must be tree hash aligned. +// +// An AWS account has full permission to perform all operations (actions). +// However, AWS Identity and Access Management (IAM) users don't have any permissions +// by default. You must grant them explicit permission to perform specific actions. +// For more information, see Access Control Using AWS Identity and Access Management +// (IAM) (http://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html). +// +// For conceptual information and the underlying REST API, go to Initiate a +// Job (http://docs.aws.amazon.com/amazonglacier/latest/dev/api-initiate-job-post.html) +// and Downloading a Vault Inventory (http://docs.aws.amazon.com/amazonglacier/latest/dev/vault-inventory.html) +func (c *Glacier) InitiateJob(input *InitiateJobInput) (*InitiateJobOutput, error) { + req, out := c.InitiateJobRequest(input) + err := req.Send() + return out, err +} + +const opInitiateMultipartUpload = "InitiateMultipartUpload" + +// InitiateMultipartUploadRequest generates a "aws/request.Request" representing the +// client's request for the InitiateMultipartUpload operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the InitiateMultipartUpload method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the InitiateMultipartUploadRequest method. +// req, resp := client.InitiateMultipartUploadRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Glacier) InitiateMultipartUploadRequest(input *InitiateMultipartUploadInput) (req *request.Request, output *InitiateMultipartUploadOutput) { + op := &request.Operation{ + Name: opInitiateMultipartUpload, + HTTPMethod: "POST", + HTTPPath: "/{accountId}/vaults/{vaultName}/multipart-uploads", + } + + if input == nil { + input = &InitiateMultipartUploadInput{} + } + + req = c.newRequest(op, input, output) + output = &InitiateMultipartUploadOutput{} + req.Data = output + return +} + +// This operation initiates a multipart upload. Amazon Glacier creates a multipart +// upload resource and returns its ID in the response. The multipart upload +// ID is used in subsequent requests to upload parts of an archive (see UploadMultipartPart). +// +// When you initiate a multipart upload, you specify the part size in number +// of bytes. The part size must be a megabyte (1024 KB) multiplied by a power +// of 2-for example, 1048576 (1 MB), 2097152 (2 MB), 4194304 (4 MB), 8388608 +// (8 MB), and so on. The minimum allowable part size is 1 MB, and the maximum +// is 4 GB. +// +// Every part you upload to this resource (see UploadMultipartPart), except +// the last one, must have the same size. The last one can be the same size +// or smaller. For example, suppose you want to upload a 16.2 MB file. If you +// initiate the multipart upload with a part size of 4 MB, you will upload four +// parts of 4 MB each and one part of 0.2 MB. +// +// You don't need to know the size of the archive when you start a multipart +// upload because Amazon Glacier does not require you to specify the overall +// archive size. +// +// After you complete the multipart upload, Amazon Glacier removes the multipart +// upload resource referenced by the ID. Amazon Glacier also removes the multipart +// upload resource if you cancel the multipart upload or it may be removed if +// there is no activity for a period of 24 hours. +// +// An AWS account has full permission to perform all operations (actions). +// However, AWS Identity and Access Management (IAM) users don't have any permissions +// by default. You must grant them explicit permission to perform specific actions. +// For more information, see Access Control Using AWS Identity and Access Management +// (IAM) (http://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html). +// +// For conceptual information and underlying REST API, go to Uploading Large +// Archives in Parts (Multipart Upload) (http://docs.aws.amazon.com/amazonglacier/latest/dev/uploading-archive-mpu.html) +// and Initiate Multipart Upload (http://docs.aws.amazon.com/amazonglacier/latest/dev/api-multipart-initiate-upload.html) +// in the Amazon Glacier Developer Guide. +func (c *Glacier) InitiateMultipartUpload(input *InitiateMultipartUploadInput) (*InitiateMultipartUploadOutput, error) { + req, out := c.InitiateMultipartUploadRequest(input) + err := req.Send() + return out, err +} + +const opInitiateVaultLock = "InitiateVaultLock" + +// InitiateVaultLockRequest generates a "aws/request.Request" representing the +// client's request for the InitiateVaultLock operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the InitiateVaultLock method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the InitiateVaultLockRequest method. +// req, resp := client.InitiateVaultLockRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Glacier) InitiateVaultLockRequest(input *InitiateVaultLockInput) (req *request.Request, output *InitiateVaultLockOutput) { + op := &request.Operation{ + Name: opInitiateVaultLock, + HTTPMethod: "POST", + HTTPPath: "/{accountId}/vaults/{vaultName}/lock-policy", + } + + if input == nil { + input = &InitiateVaultLockInput{} + } + + req = c.newRequest(op, input, output) + output = &InitiateVaultLockOutput{} + req.Data = output + return +} + +// This operation initiates the vault locking process by doing the following: +// Installing a vault lock policy on the specified vault. +// +// Setting the lock state of vault lock to InProgress. +// +// Returning a lock ID, which is used to complete the vault locking process. +// +// You can set one vault lock policy for each vault and this policy can +// be up to 20 KB in size. For more information about vault lock policies, see +// Amazon Glacier Access Control with Vault Lock Policies (http://docs.aws.amazon.com/amazonglacier/latest/dev/vault-lock-policy.html). +// +// You must complete the vault locking process within 24 hours after the vault +// lock enters the InProgress state. After the 24 hour window ends, the lock +// ID expires, the vault automatically exits the InProgress state, and the vault +// lock policy is removed from the vault. You call CompleteVaultLock to complete +// the vault locking process by setting the state of the vault lock to Locked. +// +// After a vault lock is in the Locked state, you cannot initiate a new vault +// lock for the vault. +// +// You can abort the vault locking process by calling AbortVaultLock. You can +// get the state of the vault lock by calling GetVaultLock. For more information +// about the vault locking process, Amazon Glacier Vault Lock (http://docs.aws.amazon.com/amazonglacier/latest/dev/vault-lock.html). +// +// If this operation is called when the vault lock is in the InProgress state, +// the operation returns an AccessDeniedException error. When the vault lock +// is in the InProgress state you must call AbortVaultLock before you can initiate +// a new vault lock policy. +func (c *Glacier) InitiateVaultLock(input *InitiateVaultLockInput) (*InitiateVaultLockOutput, error) { + req, out := c.InitiateVaultLockRequest(input) + err := req.Send() + return out, err +} + +const opListJobs = "ListJobs" + +// ListJobsRequest generates a "aws/request.Request" representing the +// client's request for the ListJobs operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListJobs method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListJobsRequest method. +// req, resp := client.ListJobsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Glacier) ListJobsRequest(input *ListJobsInput) (req *request.Request, output *ListJobsOutput) { + op := &request.Operation{ + Name: opListJobs, + HTTPMethod: "GET", + HTTPPath: "/{accountId}/vaults/{vaultName}/jobs", + Paginator: &request.Paginator{ + InputTokens: []string{"marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "limit", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListJobsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListJobsOutput{} + req.Data = output + return +} + +// This operation lists jobs for a vault, including jobs that are in-progress +// and jobs that have recently finished. +// +// Amazon Glacier retains recently completed jobs for a period before deleting +// them; however, it eventually removes completed jobs. The output of completed +// jobs can be retrieved. Retaining completed jobs for a period of time after +// they have completed enables you to get a job output in the event you miss +// the job completion notification or your first attempt to download it fails. +// For example, suppose you start an archive retrieval job to download an archive. +// After the job completes, you start to download the archive but encounter +// a network error. In this scenario, you can retry and download the archive +// while the job exists. +// +// To retrieve an archive or retrieve a vault inventory from Amazon Glacier, +// you first initiate a job, and after the job completes, you download the data. +// For an archive retrieval, the output is the archive data, and for an inventory +// retrieval, it is the inventory list. The List Job operation returns a list +// of these jobs sorted by job initiation time. +// +// This List Jobs operation supports pagination. By default, this operation +// returns up to 1,000 jobs in the response. You should always check the response +// for a marker at which to continue the list; if there are no more items the +// marker is null. To return a list of jobs that begins at a specific job, set +// the marker request parameter to the value you obtained from a previous List +// Jobs request. You can also limit the number of jobs returned in the response +// by specifying the limit parameter in the request. +// +// Additionally, you can filter the jobs list returned by specifying an optional +// statuscode (InProgress, Succeeded, or Failed) and completed (true, false) +// parameter. The statuscode allows you to specify that only jobs that match +// a specified status are returned. The completed parameter allows you to specify +// that only jobs in a specific completion state are returned. +// +// An AWS account has full permission to perform all operations (actions). +// However, AWS Identity and Access Management (IAM) users don't have any permissions +// by default. You must grant them explicit permission to perform specific actions. +// For more information, see Access Control Using AWS Identity and Access Management +// (IAM) (http://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html). +// +// For the underlying REST API, go to List Jobs (http://docs.aws.amazon.com/amazonglacier/latest/dev/api-jobs-get.html) +func (c *Glacier) ListJobs(input *ListJobsInput) (*ListJobsOutput, error) { + req, out := c.ListJobsRequest(input) + err := req.Send() + return out, err +} + +// ListJobsPages iterates over the pages of a ListJobs operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListJobs method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListJobs operation. +// pageNum := 0 +// err := client.ListJobsPages(params, +// func(page *ListJobsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *Glacier) ListJobsPages(input *ListJobsInput, fn func(p *ListJobsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListJobsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListJobsOutput), lastPage) + }) +} + +const opListMultipartUploads = "ListMultipartUploads" + +// ListMultipartUploadsRequest generates a "aws/request.Request" representing the +// client's request for the ListMultipartUploads operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListMultipartUploads method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListMultipartUploadsRequest method. +// req, resp := client.ListMultipartUploadsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Glacier) ListMultipartUploadsRequest(input *ListMultipartUploadsInput) (req *request.Request, output *ListMultipartUploadsOutput) { + op := &request.Operation{ + Name: opListMultipartUploads, + HTTPMethod: "GET", + HTTPPath: "/{accountId}/vaults/{vaultName}/multipart-uploads", + Paginator: &request.Paginator{ + InputTokens: []string{"marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "limit", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListMultipartUploadsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListMultipartUploadsOutput{} + req.Data = output + return +} + +// This operation lists in-progress multipart uploads for the specified vault. +// An in-progress multipart upload is a multipart upload that has been initiated +// by an InitiateMultipartUpload request, but has not yet been completed or +// aborted. The list returned in the List Multipart Upload response has no guaranteed +// order. +// +// The List Multipart Uploads operation supports pagination. By default, this +// operation returns up to 1,000 multipart uploads in the response. You should +// always check the response for a marker at which to continue the list; if +// there are no more items the marker is null. To return a list of multipart +// uploads that begins at a specific upload, set the marker request parameter +// to the value you obtained from a previous List Multipart Upload request. +// You can also limit the number of uploads returned in the response by specifying +// the limit parameter in the request. +// +// Note the difference between this operation and listing parts (ListParts). +// The List Multipart Uploads operation lists all multipart uploads for a vault +// and does not require a multipart upload ID. The List Parts operation requires +// a multipart upload ID since parts are associated with a single upload. +// +// An AWS account has full permission to perform all operations (actions). +// However, AWS Identity and Access Management (IAM) users don't have any permissions +// by default. You must grant them explicit permission to perform specific actions. +// For more information, see Access Control Using AWS Identity and Access Management +// (IAM) (http://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html). +// +// For conceptual information and the underlying REST API, go to Working with +// Archives in Amazon Glacier (http://docs.aws.amazon.com/amazonglacier/latest/dev/working-with-archives.html) +// and List Multipart Uploads (http://docs.aws.amazon.com/amazonglacier/latest/dev/api-multipart-list-uploads.html) +// in the Amazon Glacier Developer Guide. +func (c *Glacier) ListMultipartUploads(input *ListMultipartUploadsInput) (*ListMultipartUploadsOutput, error) { + req, out := c.ListMultipartUploadsRequest(input) + err := req.Send() + return out, err +} + +// ListMultipartUploadsPages iterates over the pages of a ListMultipartUploads operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListMultipartUploads method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListMultipartUploads operation. +// pageNum := 0 +// err := client.ListMultipartUploadsPages(params, +// func(page *ListMultipartUploadsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *Glacier) ListMultipartUploadsPages(input *ListMultipartUploadsInput, fn func(p *ListMultipartUploadsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListMultipartUploadsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListMultipartUploadsOutput), lastPage) + }) +} + +const opListParts = "ListParts" + +// ListPartsRequest generates a "aws/request.Request" representing the +// client's request for the ListParts operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListParts method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListPartsRequest method. +// req, resp := client.ListPartsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Glacier) ListPartsRequest(input *ListPartsInput) (req *request.Request, output *ListPartsOutput) { + op := &request.Operation{ + Name: opListParts, + HTTPMethod: "GET", + HTTPPath: "/{accountId}/vaults/{vaultName}/multipart-uploads/{uploadId}", + Paginator: &request.Paginator{ + InputTokens: []string{"marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "limit", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListPartsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListPartsOutput{} + req.Data = output + return +} + +// This operation lists the parts of an archive that have been uploaded in a +// specific multipart upload. You can make this request at any time during an +// in-progress multipart upload before you complete the upload (see CompleteMultipartUpload. +// List Parts returns an error for completed uploads. The list returned in the +// List Parts response is sorted by part range. +// +// The List Parts operation supports pagination. By default, this operation +// returns up to 1,000 uploaded parts in the response. You should always check +// the response for a marker at which to continue the list; if there are no +// more items the marker is null. To return a list of parts that begins at a +// specific part, set the marker request parameter to the value you obtained +// from a previous List Parts request. You can also limit the number of parts +// returned in the response by specifying the limit parameter in the request. +// +// An AWS account has full permission to perform all operations (actions). +// However, AWS Identity and Access Management (IAM) users don't have any permissions +// by default. You must grant them explicit permission to perform specific actions. +// For more information, see Access Control Using AWS Identity and Access Management +// (IAM) (http://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html). +// +// For conceptual information and the underlying REST API, go to Working with +// Archives in Amazon Glacier (http://docs.aws.amazon.com/amazonglacier/latest/dev/working-with-archives.html) +// and List Parts (http://docs.aws.amazon.com/amazonglacier/latest/dev/api-multipart-list-parts.html) +// in the Amazon Glacier Developer Guide. +func (c *Glacier) ListParts(input *ListPartsInput) (*ListPartsOutput, error) { + req, out := c.ListPartsRequest(input) + err := req.Send() + return out, err +} + +// ListPartsPages iterates over the pages of a ListParts operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListParts method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListParts operation. +// pageNum := 0 +// err := client.ListPartsPages(params, +// func(page *ListPartsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *Glacier) ListPartsPages(input *ListPartsInput, fn func(p *ListPartsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListPartsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListPartsOutput), lastPage) + }) +} + +const opListTagsForVault = "ListTagsForVault" + +// ListTagsForVaultRequest generates a "aws/request.Request" representing the +// client's request for the ListTagsForVault operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListTagsForVault method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListTagsForVaultRequest method. +// req, resp := client.ListTagsForVaultRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Glacier) ListTagsForVaultRequest(input *ListTagsForVaultInput) (req *request.Request, output *ListTagsForVaultOutput) { + op := &request.Operation{ + Name: opListTagsForVault, + HTTPMethod: "GET", + HTTPPath: "/{accountId}/vaults/{vaultName}/tags", + } + + if input == nil { + input = &ListTagsForVaultInput{} + } + + req = c.newRequest(op, input, output) + output = &ListTagsForVaultOutput{} + req.Data = output + return +} + +// This operation lists all the tags attached to a vault. The operation returns +// an empty map if there are no tags. For more information about tags, see Tagging +// Amazon Glacier Resources (http://docs.aws.amazon.com/amazonglacier/latest/dev/tagging.html). +func (c *Glacier) ListTagsForVault(input *ListTagsForVaultInput) (*ListTagsForVaultOutput, error) { + req, out := c.ListTagsForVaultRequest(input) + err := req.Send() + return out, err +} + +const opListVaults = "ListVaults" + +// ListVaultsRequest generates a "aws/request.Request" representing the +// client's request for the ListVaults operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListVaults method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListVaultsRequest method. +// req, resp := client.ListVaultsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Glacier) ListVaultsRequest(input *ListVaultsInput) (req *request.Request, output *ListVaultsOutput) { + op := &request.Operation{ + Name: opListVaults, + HTTPMethod: "GET", + HTTPPath: "/{accountId}/vaults", + Paginator: &request.Paginator{ + InputTokens: []string{"marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "limit", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListVaultsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListVaultsOutput{} + req.Data = output + return +} + +// This operation lists all vaults owned by the calling user's account. The +// list returned in the response is ASCII-sorted by vault name. +// +// By default, this operation returns up to 1,000 items. If there are more +// vaults to list, the response marker field contains the vault Amazon Resource +// Name (ARN) at which to continue the list with a new List Vaults request; +// otherwise, the marker field is null. To return a list of vaults that begins +// at a specific vault, set the marker request parameter to the vault ARN you +// obtained from a previous List Vaults request. You can also limit the number +// of vaults returned in the response by specifying the limit parameter in the +// request. +// +// An AWS account has full permission to perform all operations (actions). +// However, AWS Identity and Access Management (IAM) users don't have any permissions +// by default. You must grant them explicit permission to perform specific actions. +// For more information, see Access Control Using AWS Identity and Access Management +// (IAM) (http://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html). +// +// For conceptual information and underlying REST API, go to Retrieving Vault +// Metadata in Amazon Glacier (http://docs.aws.amazon.com/amazonglacier/latest/dev/retrieving-vault-info.html) +// and List Vaults (http://docs.aws.amazon.com/amazonglacier/latest/dev/api-vaults-get.html) +// in the Amazon Glacier Developer Guide. +func (c *Glacier) ListVaults(input *ListVaultsInput) (*ListVaultsOutput, error) { + req, out := c.ListVaultsRequest(input) + err := req.Send() + return out, err +} + +// ListVaultsPages iterates over the pages of a ListVaults operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListVaults method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListVaults operation. +// pageNum := 0 +// err := client.ListVaultsPages(params, +// func(page *ListVaultsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *Glacier) ListVaultsPages(input *ListVaultsInput, fn func(p *ListVaultsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListVaultsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListVaultsOutput), lastPage) + }) +} + +const opRemoveTagsFromVault = "RemoveTagsFromVault" + +// RemoveTagsFromVaultRequest generates a "aws/request.Request" representing the +// client's request for the RemoveTagsFromVault operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the RemoveTagsFromVault method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the RemoveTagsFromVaultRequest method. +// req, resp := client.RemoveTagsFromVaultRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Glacier) RemoveTagsFromVaultRequest(input *RemoveTagsFromVaultInput) (req *request.Request, output *RemoveTagsFromVaultOutput) { + op := &request.Operation{ + Name: opRemoveTagsFromVault, + HTTPMethod: "POST", + HTTPPath: "/{accountId}/vaults/{vaultName}/tags?operation=remove", + } + + if input == nil { + input = &RemoveTagsFromVaultInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &RemoveTagsFromVaultOutput{} + req.Data = output + return +} + +// This operation removes one or more tags from the set of tags attached to +// a vault. For more information about tags, see Tagging Amazon Glacier Resources +// (http://docs.aws.amazon.com/amazonglacier/latest/dev/tagging.html). This +// operation is idempotent. The operation will be successful, even if there +// are no tags attached to the vault. +func (c *Glacier) RemoveTagsFromVault(input *RemoveTagsFromVaultInput) (*RemoveTagsFromVaultOutput, error) { + req, out := c.RemoveTagsFromVaultRequest(input) + err := req.Send() + return out, err +} + +const opSetDataRetrievalPolicy = "SetDataRetrievalPolicy" + +// SetDataRetrievalPolicyRequest generates a "aws/request.Request" representing the +// client's request for the SetDataRetrievalPolicy operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the SetDataRetrievalPolicy method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the SetDataRetrievalPolicyRequest method. +// req, resp := client.SetDataRetrievalPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Glacier) SetDataRetrievalPolicyRequest(input *SetDataRetrievalPolicyInput) (req *request.Request, output *SetDataRetrievalPolicyOutput) { + op := &request.Operation{ + Name: opSetDataRetrievalPolicy, + HTTPMethod: "PUT", + HTTPPath: "/{accountId}/policies/data-retrieval", + } + + if input == nil { + input = &SetDataRetrievalPolicyInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &SetDataRetrievalPolicyOutput{} + req.Data = output + return +} + +// This operation sets and then enacts a data retrieval policy in the region +// specified in the PUT request. You can set one policy per region for an AWS +// account. The policy is enacted within a few minutes of a successful PUT operation. +// +// The set policy operation does not affect retrieval jobs that were in progress +// before the policy was enacted. For more information about data retrieval +// policies, see Amazon Glacier Data Retrieval Policies (http://docs.aws.amazon.com/amazonglacier/latest/dev/data-retrieval-policy.html). +func (c *Glacier) SetDataRetrievalPolicy(input *SetDataRetrievalPolicyInput) (*SetDataRetrievalPolicyOutput, error) { + req, out := c.SetDataRetrievalPolicyRequest(input) + err := req.Send() + return out, err +} + +const opSetVaultAccessPolicy = "SetVaultAccessPolicy" + +// SetVaultAccessPolicyRequest generates a "aws/request.Request" representing the +// client's request for the SetVaultAccessPolicy operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the SetVaultAccessPolicy method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the SetVaultAccessPolicyRequest method. +// req, resp := client.SetVaultAccessPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Glacier) SetVaultAccessPolicyRequest(input *SetVaultAccessPolicyInput) (req *request.Request, output *SetVaultAccessPolicyOutput) { + op := &request.Operation{ + Name: opSetVaultAccessPolicy, + HTTPMethod: "PUT", + HTTPPath: "/{accountId}/vaults/{vaultName}/access-policy", + } + + if input == nil { + input = &SetVaultAccessPolicyInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &SetVaultAccessPolicyOutput{} + req.Data = output + return +} + +// This operation configures an access policy for a vault and will overwrite +// an existing policy. To configure a vault access policy, send a PUT request +// to the access-policy subresource of the vault. An access policy is specific +// to a vault and is also called a vault subresource. You can set one access +// policy per vault and the policy can be up to 20 KB in size. For more information +// about vault access policies, see Amazon Glacier Access Control with Vault +// Access Policies (http://docs.aws.amazon.com/amazonglacier/latest/dev/vault-access-policy.html). +func (c *Glacier) SetVaultAccessPolicy(input *SetVaultAccessPolicyInput) (*SetVaultAccessPolicyOutput, error) { + req, out := c.SetVaultAccessPolicyRequest(input) + err := req.Send() + return out, err +} + +const opSetVaultNotifications = "SetVaultNotifications" + +// SetVaultNotificationsRequest generates a "aws/request.Request" representing the +// client's request for the SetVaultNotifications operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the SetVaultNotifications method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the SetVaultNotificationsRequest method. +// req, resp := client.SetVaultNotificationsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Glacier) SetVaultNotificationsRequest(input *SetVaultNotificationsInput) (req *request.Request, output *SetVaultNotificationsOutput) { + op := &request.Operation{ + Name: opSetVaultNotifications, + HTTPMethod: "PUT", + HTTPPath: "/{accountId}/vaults/{vaultName}/notification-configuration", + } + + if input == nil { + input = &SetVaultNotificationsInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &SetVaultNotificationsOutput{} + req.Data = output + return +} + +// This operation configures notifications that will be sent when specific events +// happen to a vault. By default, you don't get any notifications. +// +// To configure vault notifications, send a PUT request to the notification-configuration +// subresource of the vault. The request should include a JSON document that +// provides an Amazon SNS topic and specific events for which you want Amazon +// Glacier to send notifications to the topic. +// +// Amazon SNS topics must grant permission to the vault to be allowed to publish +// notifications to the topic. You can configure a vault to publish a notification +// for the following vault events: +// +// ArchiveRetrievalCompleted This event occurs when a job that was initiated +// for an archive retrieval is completed (InitiateJob). The status of the completed +// job can be "Succeeded" or "Failed". The notification sent to the SNS topic +// is the same output as returned from DescribeJob. InventoryRetrievalCompleted +// This event occurs when a job that was initiated for an inventory retrieval +// is completed (InitiateJob). The status of the completed job can be "Succeeded" +// or "Failed". The notification sent to the SNS topic is the same output as +// returned from DescribeJob. An AWS account has full permission to perform +// all operations (actions). However, AWS Identity and Access Management (IAM) +// users don't have any permissions by default. You must grant them explicit +// permission to perform specific actions. For more information, see Access +// Control Using AWS Identity and Access Management (IAM) (http://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html). +// +// For conceptual information and underlying REST API, go to Configuring Vault +// Notifications in Amazon Glacier (http://docs.aws.amazon.com/amazonglacier/latest/dev/configuring-notifications.html) +// and Set Vault Notification Configuration (http://docs.aws.amazon.com/amazonglacier/latest/dev/api-vault-notifications-put.html) +// in the Amazon Glacier Developer Guide. +func (c *Glacier) SetVaultNotifications(input *SetVaultNotificationsInput) (*SetVaultNotificationsOutput, error) { + req, out := c.SetVaultNotificationsRequest(input) + err := req.Send() + return out, err +} + +const opUploadArchive = "UploadArchive" + +// UploadArchiveRequest generates a "aws/request.Request" representing the +// client's request for the UploadArchive operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UploadArchive method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UploadArchiveRequest method. +// req, resp := client.UploadArchiveRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Glacier) UploadArchiveRequest(input *UploadArchiveInput) (req *request.Request, output *ArchiveCreationOutput) { + op := &request.Operation{ + Name: opUploadArchive, + HTTPMethod: "POST", + HTTPPath: "/{accountId}/vaults/{vaultName}/archives", + } + + if input == nil { + input = &UploadArchiveInput{} + } + + req = c.newRequest(op, input, output) + output = &ArchiveCreationOutput{} + req.Data = output + return +} + +// This operation adds an archive to a vault. This is a synchronous operation, +// and for a successful upload, your data is durably persisted. Amazon Glacier +// returns the archive ID in the x-amz-archive-id header of the response. +// +// You must use the archive ID to access your data in Amazon Glacier. After +// you upload an archive, you should save the archive ID returned so that you +// can retrieve or delete the archive later. Besides saving the archive ID, +// you can also index it and give it a friendly name to allow for better searching. +// You can also use the optional archive description field to specify how the +// archive is referred to in an external index of archives, such as you might +// create in Amazon DynamoDB. You can also get the vault inventory to obtain +// a list of archive IDs in a vault. For more information, see InitiateJob. +// +// You must provide a SHA256 tree hash of the data you are uploading. For information +// about computing a SHA256 tree hash, see Computing Checksums (http://docs.aws.amazon.com/amazonglacier/latest/dev/checksum-calculations.html). +// +// You can optionally specify an archive description of up to 1,024 printable +// ASCII characters. You can get the archive description when you either retrieve +// the archive or get the vault inventory. For more information, see InitiateJob. +// Amazon Glacier does not interpret the description in any way. An archive +// description does not need to be unique. You cannot use the description to +// retrieve or sort the archive list. +// +// Archives are immutable. After you upload an archive, you cannot edit the +// archive or its description. +// +// An AWS account has full permission to perform all operations (actions). +// However, AWS Identity and Access Management (IAM) users don't have any permissions +// by default. You must grant them explicit permission to perform specific actions. +// For more information, see Access Control Using AWS Identity and Access Management +// (IAM) (http://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html). +// +// For conceptual information and underlying REST API, go to Uploading an +// Archive in Amazon Glacier (http://docs.aws.amazon.com/amazonglacier/latest/dev/uploading-an-archive.html) +// and Upload Archive (http://docs.aws.amazon.com/amazonglacier/latest/dev/api-archive-post.html) +// in the Amazon Glacier Developer Guide. +func (c *Glacier) UploadArchive(input *UploadArchiveInput) (*ArchiveCreationOutput, error) { + req, out := c.UploadArchiveRequest(input) + err := req.Send() + return out, err +} + +const opUploadMultipartPart = "UploadMultipartPart" + +// UploadMultipartPartRequest generates a "aws/request.Request" representing the +// client's request for the UploadMultipartPart operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UploadMultipartPart method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UploadMultipartPartRequest method. +// req, resp := client.UploadMultipartPartRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Glacier) UploadMultipartPartRequest(input *UploadMultipartPartInput) (req *request.Request, output *UploadMultipartPartOutput) { + op := &request.Operation{ + Name: opUploadMultipartPart, + HTTPMethod: "PUT", + HTTPPath: "/{accountId}/vaults/{vaultName}/multipart-uploads/{uploadId}", + } + + if input == nil { + input = &UploadMultipartPartInput{} + } + + req = c.newRequest(op, input, output) + output = &UploadMultipartPartOutput{} + req.Data = output + return +} + +// This operation uploads a part of an archive. You can upload archive parts +// in any order. You can also upload them in parallel. You can upload up to +// 10,000 parts for a multipart upload. +// +// Amazon Glacier rejects your upload part request if any of the following +// conditions is true: +// +// SHA256 tree hash does not matchTo ensure that part data is not corrupted +// in transmission, you compute a SHA256 tree hash of the part and include it +// in your request. Upon receiving the part data, Amazon Glacier also computes +// a SHA256 tree hash. If these hash values don't match, the operation fails. +// For information about computing a SHA256 tree hash, see Computing Checksums +// (http://docs.aws.amazon.com/amazonglacier/latest/dev/checksum-calculations.html). +// +// Part size does not matchThe size of each part except the last must match +// the size specified in the corresponding InitiateMultipartUpload request. +// The size of the last part must be the same size as, or smaller than, the +// specified size. +// +// If you upload a part whose size is smaller than the part size you specified +// in your initiate multipart upload request and that part is not the last part, +// then the upload part request will succeed. However, the subsequent Complete +// Multipart Upload request will fail. +// +// Range does not alignThe byte range value in the request does not align +// with the part size specified in the corresponding initiate request. For example, +// if you specify a part size of 4194304 bytes (4 MB), then 0 to 4194303 bytes +// (4 MB - 1) and 4194304 (4 MB) to 8388607 (8 MB - 1) are valid part ranges. +// However, if you set a range value of 2 MB to 6 MB, the range does not align +// with the part size and the upload will fail. This operation is idempotent. +// If you upload the same part multiple times, the data included in the most +// recent request overwrites the previously uploaded data. +// +// An AWS account has full permission to perform all operations (actions). +// However, AWS Identity and Access Management (IAM) users don't have any permissions +// by default. You must grant them explicit permission to perform specific actions. +// For more information, see Access Control Using AWS Identity and Access Management +// (IAM) (http://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html). +// +// For conceptual information and underlying REST API, go to Uploading Large +// Archives in Parts (Multipart Upload) (http://docs.aws.amazon.com/amazonglacier/latest/dev/uploading-archive-mpu.html) +// and Upload Part (http://docs.aws.amazon.com/amazonglacier/latest/dev/api-upload-part.html) +// in the Amazon Glacier Developer Guide. +func (c *Glacier) UploadMultipartPart(input *UploadMultipartPartInput) (*UploadMultipartPartOutput, error) { + req, out := c.UploadMultipartPartRequest(input) + err := req.Send() + return out, err +} + +// Provides options to abort a multipart upload identified by the upload ID. +// +// For information about the underlying REST API, go to Abort Multipart Upload +// (http://docs.aws.amazon.com/amazonglacier/latest/dev/api-multipart-abort-upload.html). +// For conceptual information, go to Working with Archives in Amazon Glacier +// (http://docs.aws.amazon.com/amazonglacier/latest/dev/working-with-archives.html). +type AbortMultipartUploadInput struct { + _ struct{} `type:"structure"` + + // The AccountId value is the AWS account ID of the account that owns the vault. + // You can either specify an AWS account ID or optionally a single apos-apos + // (hyphen), in which case Amazon Glacier uses the AWS account ID associated + // with the credentials used to sign the request. If you use an account ID, + // do not include any hyphens (apos-apos) in the ID. + AccountId *string `location:"uri" locationName:"accountId" type:"string" required:"true"` + + // The upload ID of the multipart upload to delete. + UploadId *string `location:"uri" locationName:"uploadId" type:"string" required:"true"` + + // The name of the vault. + VaultName *string `location:"uri" locationName:"vaultName" type:"string" required:"true"` +} + +// String returns the string representation +func (s AbortMultipartUploadInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AbortMultipartUploadInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AbortMultipartUploadInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AbortMultipartUploadInput"} + if s.AccountId == nil { + invalidParams.Add(request.NewErrParamRequired("AccountId")) + } + if s.UploadId == nil { + invalidParams.Add(request.NewErrParamRequired("UploadId")) + } + if s.VaultName == nil { + invalidParams.Add(request.NewErrParamRequired("VaultName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type AbortMultipartUploadOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s AbortMultipartUploadOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AbortMultipartUploadOutput) GoString() string { + return s.String() +} + +// The input values for AbortVaultLock. +type AbortVaultLockInput struct { + _ struct{} `type:"structure"` + + // The AccountId value is the AWS account ID. This value must match the AWS + // account ID associated with the credentials used to sign the request. You + // can either specify an AWS account ID or optionally a single apos-apos (hyphen), + // in which case Amazon Glacier uses the AWS account ID associated with the + // credentials used to sign the request. If you specify your account ID, do + // not include any hyphens (apos-apos) in the ID. + AccountId *string `location:"uri" locationName:"accountId" type:"string" required:"true"` + + // The name of the vault. + VaultName *string `location:"uri" locationName:"vaultName" type:"string" required:"true"` +} + +// String returns the string representation +func (s AbortVaultLockInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AbortVaultLockInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AbortVaultLockInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AbortVaultLockInput"} + if s.AccountId == nil { + invalidParams.Add(request.NewErrParamRequired("AccountId")) + } + if s.VaultName == nil { + invalidParams.Add(request.NewErrParamRequired("VaultName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type AbortVaultLockOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s AbortVaultLockOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AbortVaultLockOutput) GoString() string { + return s.String() +} + +// The input values for AddTagsToVault. +type AddTagsToVaultInput struct { + _ struct{} `type:"structure"` + + // The AccountId value is the AWS account ID of the account that owns the vault. + // You can either specify an AWS account ID or optionally a single apos-apos + // (hyphen), in which case Amazon Glacier uses the AWS account ID associated + // with the credentials used to sign the request. If you use an account ID, + // do not include any hyphens (apos-apos) in the ID. + AccountId *string `location:"uri" locationName:"accountId" type:"string" required:"true"` + + // The tags to add to the vault. Each tag is composed of a key and a value. + // The value can be an empty string. + Tags map[string]*string `type:"map"` + + // The name of the vault. + VaultName *string `location:"uri" locationName:"vaultName" type:"string" required:"true"` +} + +// String returns the string representation +func (s AddTagsToVaultInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddTagsToVaultInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AddTagsToVaultInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AddTagsToVaultInput"} + if s.AccountId == nil { + invalidParams.Add(request.NewErrParamRequired("AccountId")) + } + if s.VaultName == nil { + invalidParams.Add(request.NewErrParamRequired("VaultName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type AddTagsToVaultOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s AddTagsToVaultOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddTagsToVaultOutput) GoString() string { + return s.String() +} + +// Contains the Amazon Glacier response to your request. +// +// For information about the underlying REST API, go to Upload Archive (http://docs.aws.amazon.com/amazonglacier/latest/dev/api-archive-post.html). +// For conceptual information, go to Working with Archives in Amazon Glacier +// (http://docs.aws.amazon.com/amazonglacier/latest/dev/working-with-archives.html). +type ArchiveCreationOutput struct { + _ struct{} `type:"structure"` + + // The ID of the archive. This value is also included as part of the location. + ArchiveId *string `location:"header" locationName:"x-amz-archive-id" type:"string"` + + // The checksum of the archive computed by Amazon Glacier. + Checksum *string `location:"header" locationName:"x-amz-sha256-tree-hash" type:"string"` + + // The relative URI path of the newly added archive resource. + Location *string `location:"header" locationName:"Location" type:"string"` +} + +// String returns the string representation +func (s ArchiveCreationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ArchiveCreationOutput) GoString() string { + return s.String() +} + +// Provides options to complete a multipart upload operation. This informs Amazon +// Glacier that all the archive parts have been uploaded and Amazon Glacier +// can now assemble the archive from the uploaded parts. After assembling and +// saving the archive to the vault, Amazon Glacier returns the URI path of the +// newly created archive resource. +type CompleteMultipartUploadInput struct { + _ struct{} `type:"structure"` + + // The AccountId value is the AWS account ID of the account that owns the vault. + // You can either specify an AWS account ID or optionally a single apos-apos + // (hyphen), in which case Amazon Glacier uses the AWS account ID associated + // with the credentials used to sign the request. If you use an account ID, + // do not include any hyphens (apos-apos) in the ID. + AccountId *string `location:"uri" locationName:"accountId" type:"string" required:"true"` + + // The total size, in bytes, of the entire archive. This value should be the + // sum of all the sizes of the individual parts that you uploaded. + ArchiveSize *string `location:"header" locationName:"x-amz-archive-size" type:"string"` + + // The SHA256 tree hash of the entire archive. It is the tree hash of SHA256 + // tree hash of the individual parts. If the value you specify in the request + // does not match the SHA256 tree hash of the final assembled archive as computed + // by Amazon Glacier, Amazon Glacier returns an error and the request fails. + Checksum *string `location:"header" locationName:"x-amz-sha256-tree-hash" type:"string"` + + // The upload ID of the multipart upload. + UploadId *string `location:"uri" locationName:"uploadId" type:"string" required:"true"` + + // The name of the vault. + VaultName *string `location:"uri" locationName:"vaultName" type:"string" required:"true"` +} + +// String returns the string representation +func (s CompleteMultipartUploadInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CompleteMultipartUploadInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CompleteMultipartUploadInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CompleteMultipartUploadInput"} + if s.AccountId == nil { + invalidParams.Add(request.NewErrParamRequired("AccountId")) + } + if s.UploadId == nil { + invalidParams.Add(request.NewErrParamRequired("UploadId")) + } + if s.VaultName == nil { + invalidParams.Add(request.NewErrParamRequired("VaultName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The input values for CompleteVaultLock. +type CompleteVaultLockInput struct { + _ struct{} `type:"structure"` + + // The AccountId value is the AWS account ID. This value must match the AWS + // account ID associated with the credentials used to sign the request. You + // can either specify an AWS account ID or optionally a single apos-apos (hyphen), + // in which case Amazon Glacier uses the AWS account ID associated with the + // credentials used to sign the request. If you specify your account ID, do + // not include any hyphens (apos-apos) in the ID. + AccountId *string `location:"uri" locationName:"accountId" type:"string" required:"true"` + + // The lockId value is the lock ID obtained from a InitiateVaultLock request. + LockId *string `location:"uri" locationName:"lockId" type:"string" required:"true"` + + // The name of the vault. + VaultName *string `location:"uri" locationName:"vaultName" type:"string" required:"true"` +} + +// String returns the string representation +func (s CompleteVaultLockInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CompleteVaultLockInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CompleteVaultLockInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CompleteVaultLockInput"} + if s.AccountId == nil { + invalidParams.Add(request.NewErrParamRequired("AccountId")) + } + if s.LockId == nil { + invalidParams.Add(request.NewErrParamRequired("LockId")) + } + if s.VaultName == nil { + invalidParams.Add(request.NewErrParamRequired("VaultName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type CompleteVaultLockOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s CompleteVaultLockOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CompleteVaultLockOutput) GoString() string { + return s.String() +} + +// Provides options to create a vault. +type CreateVaultInput struct { + _ struct{} `type:"structure"` + + // The AccountId value is the AWS account ID. This value must match the AWS + // account ID associated with the credentials used to sign the request. You + // can either specify an AWS account ID or optionally a single apos-apos (hyphen), + // in which case Amazon Glacier uses the AWS account ID associated with the + // credentials used to sign the request. If you specify your account ID, do + // not include any hyphens (apos-apos) in the ID. + AccountId *string `location:"uri" locationName:"accountId" type:"string" required:"true"` + + // The name of the vault. + VaultName *string `location:"uri" locationName:"vaultName" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateVaultInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateVaultInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateVaultInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateVaultInput"} + if s.AccountId == nil { + invalidParams.Add(request.NewErrParamRequired("AccountId")) + } + if s.VaultName == nil { + invalidParams.Add(request.NewErrParamRequired("VaultName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the Amazon Glacier response to your request. +type CreateVaultOutput struct { + _ struct{} `type:"structure"` + + // The URI of the vault that was created. + Location *string `location:"header" locationName:"Location" type:"string"` +} + +// String returns the string representation +func (s CreateVaultOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateVaultOutput) GoString() string { + return s.String() +} + +// Data retrieval policy. +type DataRetrievalPolicy struct { + _ struct{} `type:"structure"` + + // The policy rule. Although this is a list type, currently there must be only + // one rule, which contains a Strategy field and optionally a BytesPerHour field. + Rules []*DataRetrievalRule `type:"list"` +} + +// String returns the string representation +func (s DataRetrievalPolicy) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DataRetrievalPolicy) GoString() string { + return s.String() +} + +// Data retrieval policy rule. +type DataRetrievalRule struct { + _ struct{} `type:"structure"` + + // The maximum number of bytes that can be retrieved in an hour. + // + // This field is required only if the value of the Strategy field is BytesPerHour. + // Your PUT operation will be rejected if the Strategy field is not set to BytesPerHour + // and you set this field. + BytesPerHour *int64 `type:"long"` + + // The type of data retrieval policy to set. + // + // Valid values: BytesPerHour|FreeTier|None + Strategy *string `type:"string"` +} + +// String returns the string representation +func (s DataRetrievalRule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DataRetrievalRule) GoString() string { + return s.String() +} + +// Provides options for deleting an archive from an Amazon Glacier vault. +type DeleteArchiveInput struct { + _ struct{} `type:"structure"` + + // The AccountId value is the AWS account ID of the account that owns the vault. + // You can either specify an AWS account ID or optionally a single apos-apos + // (hyphen), in which case Amazon Glacier uses the AWS account ID associated + // with the credentials used to sign the request. If you use an account ID, + // do not include any hyphens (apos-apos) in the ID. + AccountId *string `location:"uri" locationName:"accountId" type:"string" required:"true"` + + // The ID of the archive to delete. + ArchiveId *string `location:"uri" locationName:"archiveId" type:"string" required:"true"` + + // The name of the vault. + VaultName *string `location:"uri" locationName:"vaultName" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteArchiveInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteArchiveInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteArchiveInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteArchiveInput"} + if s.AccountId == nil { + invalidParams.Add(request.NewErrParamRequired("AccountId")) + } + if s.ArchiveId == nil { + invalidParams.Add(request.NewErrParamRequired("ArchiveId")) + } + if s.VaultName == nil { + invalidParams.Add(request.NewErrParamRequired("VaultName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteArchiveOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteArchiveOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteArchiveOutput) GoString() string { + return s.String() +} + +// DeleteVaultAccessPolicy input. +type DeleteVaultAccessPolicyInput struct { + _ struct{} `type:"structure"` + + // The AccountId value is the AWS account ID of the account that owns the vault. + // You can either specify an AWS account ID or optionally a single apos-apos + // (hyphen), in which case Amazon Glacier uses the AWS account ID associated + // with the credentials used to sign the request. If you use an account ID, + // do not include any hyphens (apos-apos) in the ID. + AccountId *string `location:"uri" locationName:"accountId" type:"string" required:"true"` + + // The name of the vault. + VaultName *string `location:"uri" locationName:"vaultName" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteVaultAccessPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteVaultAccessPolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteVaultAccessPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteVaultAccessPolicyInput"} + if s.AccountId == nil { + invalidParams.Add(request.NewErrParamRequired("AccountId")) + } + if s.VaultName == nil { + invalidParams.Add(request.NewErrParamRequired("VaultName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteVaultAccessPolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteVaultAccessPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteVaultAccessPolicyOutput) GoString() string { + return s.String() +} + +// Provides options for deleting a vault from Amazon Glacier. +type DeleteVaultInput struct { + _ struct{} `type:"structure"` + + // The AccountId value is the AWS account ID of the account that owns the vault. + // You can either specify an AWS account ID or optionally a single apos-apos + // (hyphen), in which case Amazon Glacier uses the AWS account ID associated + // with the credentials used to sign the request. If you use an account ID, + // do not include any hyphens (apos-apos) in the ID. + AccountId *string `location:"uri" locationName:"accountId" type:"string" required:"true"` + + // The name of the vault. + VaultName *string `location:"uri" locationName:"vaultName" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteVaultInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteVaultInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteVaultInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteVaultInput"} + if s.AccountId == nil { + invalidParams.Add(request.NewErrParamRequired("AccountId")) + } + if s.VaultName == nil { + invalidParams.Add(request.NewErrParamRequired("VaultName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Provides options for deleting a vault notification configuration from an +// Amazon Glacier vault. +type DeleteVaultNotificationsInput struct { + _ struct{} `type:"structure"` + + // The AccountId value is the AWS account ID of the account that owns the vault. + // You can either specify an AWS account ID or optionally a single apos-apos + // (hyphen), in which case Amazon Glacier uses the AWS account ID associated + // with the credentials used to sign the request. If you use an account ID, + // do not include any hyphens (apos-apos) in the ID. + AccountId *string `location:"uri" locationName:"accountId" type:"string" required:"true"` + + // The name of the vault. + VaultName *string `location:"uri" locationName:"vaultName" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteVaultNotificationsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteVaultNotificationsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteVaultNotificationsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteVaultNotificationsInput"} + if s.AccountId == nil { + invalidParams.Add(request.NewErrParamRequired("AccountId")) + } + if s.VaultName == nil { + invalidParams.Add(request.NewErrParamRequired("VaultName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteVaultNotificationsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteVaultNotificationsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteVaultNotificationsOutput) GoString() string { + return s.String() +} + +type DeleteVaultOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteVaultOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteVaultOutput) GoString() string { + return s.String() +} + +// Provides options for retrieving a job description. +type DescribeJobInput struct { + _ struct{} `type:"structure"` + + // The AccountId value is the AWS account ID of the account that owns the vault. + // You can either specify an AWS account ID or optionally a single apos-apos + // (hyphen), in which case Amazon Glacier uses the AWS account ID associated + // with the credentials used to sign the request. If you use an account ID, + // do not include any hyphens (apos-apos) in the ID. + AccountId *string `location:"uri" locationName:"accountId" type:"string" required:"true"` + + // The ID of the job to describe. + JobId *string `location:"uri" locationName:"jobId" type:"string" required:"true"` + + // The name of the vault. + VaultName *string `location:"uri" locationName:"vaultName" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeJobInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeJobInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeJobInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeJobInput"} + if s.AccountId == nil { + invalidParams.Add(request.NewErrParamRequired("AccountId")) + } + if s.JobId == nil { + invalidParams.Add(request.NewErrParamRequired("JobId")) + } + if s.VaultName == nil { + invalidParams.Add(request.NewErrParamRequired("VaultName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Provides options for retrieving metadata for a specific vault in Amazon Glacier. +type DescribeVaultInput struct { + _ struct{} `type:"structure"` + + // The AccountId value is the AWS account ID of the account that owns the vault. + // You can either specify an AWS account ID or optionally a single apos-apos + // (hyphen), in which case Amazon Glacier uses the AWS account ID associated + // with the credentials used to sign the request. If you use an account ID, + // do not include any hyphens (apos-apos) in the ID. + AccountId *string `location:"uri" locationName:"accountId" type:"string" required:"true"` + + // The name of the vault. + VaultName *string `location:"uri" locationName:"vaultName" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeVaultInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeVaultInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeVaultInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeVaultInput"} + if s.AccountId == nil { + invalidParams.Add(request.NewErrParamRequired("AccountId")) + } + if s.VaultName == nil { + invalidParams.Add(request.NewErrParamRequired("VaultName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the Amazon Glacier response to your request. +type DescribeVaultOutput struct { + _ struct{} `type:"structure"` + + // The UTC date when the vault was created. A string representation of ISO 8601 + // date format, for example, "2012-03-20T17:03:43.221Z". + CreationDate *string `type:"string"` + + // The UTC date when Amazon Glacier completed the last vault inventory. A string + // representation of ISO 8601 date format, for example, "2012-03-20T17:03:43.221Z". + LastInventoryDate *string `type:"string"` + + // The number of archives in the vault as of the last inventory date. This field + // will return null if an inventory has not yet run on the vault, for example, + // if you just created the vault. + NumberOfArchives *int64 `type:"long"` + + // Total size, in bytes, of the archives in the vault as of the last inventory + // date. This field will return null if an inventory has not yet run on the + // vault, for example, if you just created the vault. + SizeInBytes *int64 `type:"long"` + + // The Amazon Resource Name (ARN) of the vault. + VaultARN *string `type:"string"` + + // The name of the vault. + VaultName *string `type:"string"` +} + +// String returns the string representation +func (s DescribeVaultOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeVaultOutput) GoString() string { + return s.String() +} + +// Input for GetDataRetrievalPolicy. +type GetDataRetrievalPolicyInput struct { + _ struct{} `type:"structure"` + + // The AccountId value is the AWS account ID. This value must match the AWS + // account ID associated with the credentials used to sign the request. You + // can either specify an AWS account ID or optionally a single apos-apos (hyphen), + // in which case Amazon Glacier uses the AWS account ID associated with the + // credentials used to sign the request. If you specify your account ID, do + // not include any hyphens (apos-apos) in the ID. + AccountId *string `location:"uri" locationName:"accountId" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetDataRetrievalPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetDataRetrievalPolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetDataRetrievalPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetDataRetrievalPolicyInput"} + if s.AccountId == nil { + invalidParams.Add(request.NewErrParamRequired("AccountId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the Amazon Glacier response to the GetDataRetrievalPolicy request. +type GetDataRetrievalPolicyOutput struct { + _ struct{} `type:"structure"` + + // Contains the returned data retrieval policy in JSON format. + Policy *DataRetrievalPolicy `type:"structure"` +} + +// String returns the string representation +func (s GetDataRetrievalPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetDataRetrievalPolicyOutput) GoString() string { + return s.String() +} + +// Provides options for downloading output of an Amazon Glacier job. +type GetJobOutputInput struct { + _ struct{} `type:"structure"` + + // The AccountId value is the AWS account ID of the account that owns the vault. + // You can either specify an AWS account ID or optionally a single apos-apos + // (hyphen), in which case Amazon Glacier uses the AWS account ID associated + // with the credentials used to sign the request. If you use an account ID, + // do not include any hyphens (apos-apos) in the ID. + AccountId *string `location:"uri" locationName:"accountId" type:"string" required:"true"` + + // The job ID whose data is downloaded. + JobId *string `location:"uri" locationName:"jobId" type:"string" required:"true"` + + // The range of bytes to retrieve from the output. For example, if you want + // to download the first 1,048,576 bytes, specify "Range: bytes=0-1048575". + // By default, this operation downloads the entire output. + Range *string `location:"header" locationName:"Range" type:"string"` + + // The name of the vault. + VaultName *string `location:"uri" locationName:"vaultName" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetJobOutputInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetJobOutputInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetJobOutputInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetJobOutputInput"} + if s.AccountId == nil { + invalidParams.Add(request.NewErrParamRequired("AccountId")) + } + if s.JobId == nil { + invalidParams.Add(request.NewErrParamRequired("JobId")) + } + if s.VaultName == nil { + invalidParams.Add(request.NewErrParamRequired("VaultName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the Amazon Glacier response to your request. +type GetJobOutputOutput struct { + _ struct{} `type:"structure" payload:"Body"` + + // Indicates the range units accepted. For more information, go to RFC2616 (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html). + AcceptRanges *string `location:"header" locationName:"Accept-Ranges" type:"string"` + + // The description of an archive. + ArchiveDescription *string `location:"header" locationName:"x-amz-archive-description" type:"string"` + + // The job data, either archive data or inventory data. + Body io.ReadCloser `locationName:"body" type:"blob"` + + // The checksum of the data in the response. This header is returned only when + // retrieving the output for an archive retrieval job. Furthermore, this header + // appears only under the following conditions: You get the entire range of + // the archive. You request a range to return of the archive that starts and + // ends on a multiple of 1 MB. For example, if you have an 3.1 MB archive and + // you specify a range to return that starts at 1 MB and ends at 2 MB, then + // the x-amz-sha256-tree-hash is returned as a response header. You request + // a range of the archive to return that starts on a multiple of 1 MB and goes + // to the end of the archive. For example, if you have a 3.1 MB archive and + // you specify a range that starts at 2 MB and ends at 3.1 MB (the end of the + // archive), then the x-amz-sha256-tree-hash is returned as a response header. + Checksum *string `location:"header" locationName:"x-amz-sha256-tree-hash" type:"string"` + + // The range of bytes returned by Amazon Glacier. If only partial output is + // downloaded, the response provides the range of bytes Amazon Glacier returned. + // For example, bytes 0-1048575/8388608 returns the first 1 MB from 8 MB. + ContentRange *string `location:"header" locationName:"Content-Range" type:"string"` + + // The Content-Type depends on whether the job output is an archive or a vault + // inventory. For archive data, the Content-Type is application/octet-stream. + // For vault inventory, if you requested CSV format when you initiated the job, + // the Content-Type is text/csv. Otherwise, by default, vault inventory is returned + // as JSON, and the Content-Type is application/json. + ContentType *string `location:"header" locationName:"Content-Type" type:"string"` + + // The HTTP response code for a job output request. The value depends on whether + // a range was specified in the request. + Status *int64 `location:"statusCode" locationName:"status" type:"integer"` +} + +// String returns the string representation +func (s GetJobOutputOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetJobOutputOutput) GoString() string { + return s.String() +} + +// Input for GetVaultAccessPolicy. +type GetVaultAccessPolicyInput struct { + _ struct{} `type:"structure"` + + // The AccountId value is the AWS account ID of the account that owns the vault. + // You can either specify an AWS account ID or optionally a single apos-apos + // (hyphen), in which case Amazon Glacier uses the AWS account ID associated + // with the credentials used to sign the request. If you use an account ID, + // do not include any hyphens (apos-apos) in the ID. + AccountId *string `location:"uri" locationName:"accountId" type:"string" required:"true"` + + // The name of the vault. + VaultName *string `location:"uri" locationName:"vaultName" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetVaultAccessPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetVaultAccessPolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetVaultAccessPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetVaultAccessPolicyInput"} + if s.AccountId == nil { + invalidParams.Add(request.NewErrParamRequired("AccountId")) + } + if s.VaultName == nil { + invalidParams.Add(request.NewErrParamRequired("VaultName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Output for GetVaultAccessPolicy. +type GetVaultAccessPolicyOutput struct { + _ struct{} `type:"structure" payload:"Policy"` + + // Contains the returned vault access policy as a JSON string. + Policy *VaultAccessPolicy `locationName:"policy" type:"structure"` +} + +// String returns the string representation +func (s GetVaultAccessPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetVaultAccessPolicyOutput) GoString() string { + return s.String() +} + +// The input values for GetVaultLock. +type GetVaultLockInput struct { + _ struct{} `type:"structure"` + + // The AccountId value is the AWS account ID of the account that owns the vault. + // You can either specify an AWS account ID or optionally a single apos-apos + // (hyphen), in which case Amazon Glacier uses the AWS account ID associated + // with the credentials used to sign the request. If you use an account ID, + // do not include any hyphens (apos-apos) in the ID. + AccountId *string `location:"uri" locationName:"accountId" type:"string" required:"true"` + + // The name of the vault. + VaultName *string `location:"uri" locationName:"vaultName" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetVaultLockInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetVaultLockInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetVaultLockInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetVaultLockInput"} + if s.AccountId == nil { + invalidParams.Add(request.NewErrParamRequired("AccountId")) + } + if s.VaultName == nil { + invalidParams.Add(request.NewErrParamRequired("VaultName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the Amazon Glacier response to your request. +type GetVaultLockOutput struct { + _ struct{} `type:"structure"` + + // The UTC date and time at which the vault lock was put into the InProgress + // state. + CreationDate *string `type:"string"` + + // The UTC date and time at which the lock ID expires. This value can be null + // if the vault lock is in a Locked state. + ExpirationDate *string `type:"string"` + + // The vault lock policy as a JSON string, which uses "\" as an escape character. + Policy *string `type:"string"` + + // The state of the vault lock. InProgress or Locked. + State *string `type:"string"` +} + +// String returns the string representation +func (s GetVaultLockOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetVaultLockOutput) GoString() string { + return s.String() +} + +// Provides options for retrieving the notification configuration set on an +// Amazon Glacier vault. +type GetVaultNotificationsInput struct { + _ struct{} `type:"structure"` + + // The AccountId value is the AWS account ID of the account that owns the vault. + // You can either specify an AWS account ID or optionally a single apos-apos + // (hyphen), in which case Amazon Glacier uses the AWS account ID associated + // with the credentials used to sign the request. If you use an account ID, + // do not include any hyphens (apos-apos) in the ID. + AccountId *string `location:"uri" locationName:"accountId" type:"string" required:"true"` + + // The name of the vault. + VaultName *string `location:"uri" locationName:"vaultName" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetVaultNotificationsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetVaultNotificationsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetVaultNotificationsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetVaultNotificationsInput"} + if s.AccountId == nil { + invalidParams.Add(request.NewErrParamRequired("AccountId")) + } + if s.VaultName == nil { + invalidParams.Add(request.NewErrParamRequired("VaultName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the Amazon Glacier response to your request. +type GetVaultNotificationsOutput struct { + _ struct{} `type:"structure" payload:"VaultNotificationConfig"` + + // Returns the notification configuration set on the vault. + VaultNotificationConfig *VaultNotificationConfig `locationName:"vaultNotificationConfig" type:"structure"` +} + +// String returns the string representation +func (s GetVaultNotificationsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetVaultNotificationsOutput) GoString() string { + return s.String() +} + +// Provides options for initiating an Amazon Glacier job. +type InitiateJobInput struct { + _ struct{} `type:"structure" payload:"JobParameters"` + + // The AccountId value is the AWS account ID of the account that owns the vault. + // You can either specify an AWS account ID or optionally a single apos-apos + // (hyphen), in which case Amazon Glacier uses the AWS account ID associated + // with the credentials used to sign the request. If you use an account ID, + // do not include any hyphens (apos-apos) in the ID. + AccountId *string `location:"uri" locationName:"accountId" type:"string" required:"true"` + + // Provides options for specifying job information. + JobParameters *JobParameters `locationName:"jobParameters" type:"structure"` + + // The name of the vault. + VaultName *string `location:"uri" locationName:"vaultName" type:"string" required:"true"` +} + +// String returns the string representation +func (s InitiateJobInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InitiateJobInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *InitiateJobInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "InitiateJobInput"} + if s.AccountId == nil { + invalidParams.Add(request.NewErrParamRequired("AccountId")) + } + if s.VaultName == nil { + invalidParams.Add(request.NewErrParamRequired("VaultName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the Amazon Glacier response to your request. +type InitiateJobOutput struct { + _ struct{} `type:"structure"` + + // The ID of the job. + JobId *string `location:"header" locationName:"x-amz-job-id" type:"string"` + + // The relative URI path of the job. + Location *string `location:"header" locationName:"Location" type:"string"` +} + +// String returns the string representation +func (s InitiateJobOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InitiateJobOutput) GoString() string { + return s.String() +} + +// Provides options for initiating a multipart upload to an Amazon Glacier vault. +type InitiateMultipartUploadInput struct { + _ struct{} `type:"structure"` + + // The AccountId value is the AWS account ID of the account that owns the vault. + // You can either specify an AWS account ID or optionally a single apos-apos + // (hyphen), in which case Amazon Glacier uses the AWS account ID associated + // with the credentials used to sign the request. If you use an account ID, + // do not include any hyphens (apos-apos) in the ID. + AccountId *string `location:"uri" locationName:"accountId" type:"string" required:"true"` + + // The archive description that you are uploading in parts. + // + // The part size must be a megabyte (1024 KB) multiplied by a power of 2, for + // example 1048576 (1 MB), 2097152 (2 MB), 4194304 (4 MB), 8388608 (8 MB), and + // so on. The minimum allowable part size is 1 MB, and the maximum is 4 GB (4096 + // MB). + ArchiveDescription *string `location:"header" locationName:"x-amz-archive-description" type:"string"` + + // The size of each part except the last, in bytes. The last part can be smaller + // than this part size. + PartSize *string `location:"header" locationName:"x-amz-part-size" type:"string"` + + // The name of the vault. + VaultName *string `location:"uri" locationName:"vaultName" type:"string" required:"true"` +} + +// String returns the string representation +func (s InitiateMultipartUploadInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InitiateMultipartUploadInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *InitiateMultipartUploadInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "InitiateMultipartUploadInput"} + if s.AccountId == nil { + invalidParams.Add(request.NewErrParamRequired("AccountId")) + } + if s.VaultName == nil { + invalidParams.Add(request.NewErrParamRequired("VaultName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The Amazon Glacier response to your request. +type InitiateMultipartUploadOutput struct { + _ struct{} `type:"structure"` + + // The relative URI path of the multipart upload ID Amazon Glacier created. + Location *string `location:"header" locationName:"Location" type:"string"` + + // The ID of the multipart upload. This value is also included as part of the + // location. + UploadId *string `location:"header" locationName:"x-amz-multipart-upload-id" type:"string"` +} + +// String returns the string representation +func (s InitiateMultipartUploadOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InitiateMultipartUploadOutput) GoString() string { + return s.String() +} + +// The input values for InitiateVaultLock. +type InitiateVaultLockInput struct { + _ struct{} `type:"structure" payload:"Policy"` + + // The AccountId value is the AWS account ID. This value must match the AWS + // account ID associated with the credentials used to sign the request. You + // can either specify an AWS account ID or optionally a single apos-apos (hyphen), + // in which case Amazon Glacier uses the AWS account ID associated with the + // credentials used to sign the request. If you specify your account ID, do + // not include any hyphens (apos-apos) in the ID. + AccountId *string `location:"uri" locationName:"accountId" type:"string" required:"true"` + + // The vault lock policy as a JSON string, which uses "\" as an escape character. + Policy *VaultLockPolicy `locationName:"policy" type:"structure"` + + // The name of the vault. + VaultName *string `location:"uri" locationName:"vaultName" type:"string" required:"true"` +} + +// String returns the string representation +func (s InitiateVaultLockInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InitiateVaultLockInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *InitiateVaultLockInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "InitiateVaultLockInput"} + if s.AccountId == nil { + invalidParams.Add(request.NewErrParamRequired("AccountId")) + } + if s.VaultName == nil { + invalidParams.Add(request.NewErrParamRequired("VaultName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the Amazon Glacier response to your request. +type InitiateVaultLockOutput struct { + _ struct{} `type:"structure"` + + // The lock ID, which is used to complete the vault locking process. + LockId *string `location:"header" locationName:"x-amz-lock-id" type:"string"` +} + +// String returns the string representation +func (s InitiateVaultLockOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InitiateVaultLockOutput) GoString() string { + return s.String() +} + +// Describes the options for a range inventory retrieval job. +type InventoryRetrievalJobDescription struct { + _ struct{} `type:"structure"` + + // The end of the date range in UTC for vault inventory retrieval that includes + // archives created before this date. A string representation of ISO 8601 date + // format, for example, 2013-03-20T17:03:43Z. + EndDate *string `type:"string"` + + // The output format for the vault inventory list, which is set by the InitiateJob + // request when initiating a job to retrieve a vault inventory. Valid values + // are "CSV" and "JSON". + Format *string `type:"string"` + + // Specifies the maximum number of inventory items returned per vault inventory + // retrieval request. This limit is set when initiating the job with the a InitiateJob + // request. + Limit *string `type:"string"` + + // An opaque string that represents where to continue pagination of the vault + // inventory retrieval results. You use the marker in a new InitiateJob request + // to obtain additional inventory items. If there are no more inventory items, + // this value is null. For more information, see Range Inventory Retrieval + // (http://docs.aws.amazon.com/amazonglacier/latest/dev/api-initiate-job-post.html#api-initiate-job-post-vault-inventory-list-filtering). + Marker *string `type:"string"` + + // The start of the date range in UTC for vault inventory retrieval that includes + // archives created on or after this date. A string representation of ISO 8601 + // date format, for example, 2013-03-20T17:03:43Z. + StartDate *string `type:"string"` +} + +// String returns the string representation +func (s InventoryRetrievalJobDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InventoryRetrievalJobDescription) GoString() string { + return s.String() +} + +// Provides options for specifying a range inventory retrieval job. +type InventoryRetrievalJobInput struct { + _ struct{} `type:"structure"` + + // The end of the date range in UTC for vault inventory retrieval that includes + // archives created before this date. A string representation of ISO 8601 date + // format, for example, 2013-03-20T17:03:43Z. + EndDate *string `type:"string"` + + // Specifies the maximum number of inventory items returned per vault inventory + // retrieval request. Valid values are greater than or equal to 1. + Limit *string `type:"string"` + + // An opaque string that represents where to continue pagination of the vault + // inventory retrieval results. You use the marker in a new InitiateJob request + // to obtain additional inventory items. If there are no more inventory items, + // this value is null. + Marker *string `type:"string"` + + // The start of the date range in UTC for vault inventory retrieval that includes + // archives created on or after this date. A string representation of ISO 8601 + // date format, for example, 2013-03-20T17:03:43Z. + StartDate *string `type:"string"` +} + +// String returns the string representation +func (s InventoryRetrievalJobInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InventoryRetrievalJobInput) GoString() string { + return s.String() +} + +// Describes an Amazon Glacier job. +type JobDescription struct { + _ struct{} `type:"structure"` + + // The job type. It is either ArchiveRetrieval or InventoryRetrieval. + Action *string `type:"string" enum:"ActionCode"` + + // For an ArchiveRetrieval job, this is the archive ID requested for download. + // Otherwise, this field is null. + ArchiveId *string `type:"string"` + + // The SHA256 tree hash of the entire archive for an archive retrieval. For + // inventory retrieval jobs, this field is null. + ArchiveSHA256TreeHash *string `type:"string"` + + // For an ArchiveRetrieval job, this is the size in bytes of the archive being + // requested for download. For the InventoryRetrieval job, the value is null. + ArchiveSizeInBytes *int64 `type:"long"` + + // The job status. When a job is completed, you get the job's output. + Completed *bool `type:"boolean"` + + // The UTC time that the archive retrieval request completed. While the job + // is in progress, the value will be null. + CompletionDate *string `type:"string"` + + // The UTC date when the job was created. A string representation of ISO 8601 + // date format, for example, "2012-03-20T17:03:43.221Z". + CreationDate *string `type:"string"` + + // Parameters used for range inventory retrieval. + InventoryRetrievalParameters *InventoryRetrievalJobDescription `type:"structure"` + + // For an InventoryRetrieval job, this is the size in bytes of the inventory + // requested for download. For the ArchiveRetrieval job, the value is null. + InventorySizeInBytes *int64 `type:"long"` + + // The job description you provided when you initiated the job. + JobDescription *string `type:"string"` + + // An opaque string that identifies an Amazon Glacier job. + JobId *string `type:"string"` + + // The retrieved byte range for archive retrieval jobs in the form "StartByteValue-EndByteValue" + // If no range was specified in the archive retrieval, then the whole archive + // is retrieved and StartByteValue equals 0 and EndByteValue equals the size + // of the archive minus 1. For inventory retrieval jobs this field is null. + RetrievalByteRange *string `type:"string"` + + // For an ArchiveRetrieval job, it is the checksum of the archive. Otherwise, + // the value is null. + // + // The SHA256 tree hash value for the requested range of an archive. If the + // Initiate a Job request for an archive specified a tree-hash aligned range, + // then this field returns a value. + // + // For the specific case when the whole archive is retrieved, this value is + // the same as the ArchiveSHA256TreeHash value. + // + // This field is null in the following situations: Archive retrieval jobs + // that specify a range that is not tree-hash aligned. + // + // Archival jobs that specify a range that is equal to the whole archive + // and the job status is InProgress. + // + // Inventory jobs. + SHA256TreeHash *string `type:"string"` + + // An Amazon Simple Notification Service (Amazon SNS) topic that receives notification. + SNSTopic *string `type:"string"` + + // The status code can be InProgress, Succeeded, or Failed, and indicates the + // status of the job. + StatusCode *string `type:"string" enum:"StatusCode"` + + // A friendly message that describes the job status. + StatusMessage *string `type:"string"` + + // The Amazon Resource Name (ARN) of the vault from which the archive retrieval + // was requested. + VaultARN *string `type:"string"` +} + +// String returns the string representation +func (s JobDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s JobDescription) GoString() string { + return s.String() +} + +// Provides options for defining a job. +type JobParameters struct { + _ struct{} `type:"structure"` + + // The ID of the archive that you want to retrieve. This field is required only + // if Type is set to archive-retrieval. An error occurs if you specify this + // request parameter for an inventory retrieval job request. + ArchiveId *string `type:"string"` + + // The optional description for the job. The description must be less than or + // equal to 1,024 bytes. The allowable characters are 7-bit ASCII without control + // codes-specifically, ASCII values 32-126 decimal or 0x20-0x7E hexadecimal. + Description *string `type:"string"` + + // When initiating a job to retrieve a vault inventory, you can optionally add + // this parameter to your request to specify the output format. If you are initiating + // an inventory job and do not specify a Format field, JSON is the default format. + // Valid values are "CSV" and "JSON". + Format *string `type:"string"` + + // Input parameters used for range inventory retrieval. + InventoryRetrievalParameters *InventoryRetrievalJobInput `type:"structure"` + + // The byte range to retrieve for an archive retrieval. in the form "StartByteValue-EndByteValue" + // If not specified, the whole archive is retrieved. If specified, the byte + // range must be megabyte (1024*1024) aligned which means that StartByteValue + // must be divisible by 1 MB and EndByteValue plus 1 must be divisible by 1 + // MB or be the end of the archive specified as the archive byte size value + // minus 1. If RetrievalByteRange is not megabyte aligned, this operation returns + // a 400 response. + // + // An error occurs if you specify this field for an inventory retrieval job + // request. + RetrievalByteRange *string `type:"string"` + + // The Amazon SNS topic ARN to which Amazon Glacier sends a notification when + // the job is completed and the output is ready for you to download. The specified + // topic publishes the notification to its subscribers. The SNS topic must exist. + SNSTopic *string `type:"string"` + + // The job type. You can initiate a job to retrieve an archive or get an inventory + // of a vault. Valid values are "archive-retrieval" and "inventory-retrieval". + Type *string `type:"string"` +} + +// String returns the string representation +func (s JobParameters) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s JobParameters) GoString() string { + return s.String() +} + +// Provides options for retrieving a job list for an Amazon Glacier vault. +type ListJobsInput struct { + _ struct{} `type:"structure"` + + // The AccountId value is the AWS account ID of the account that owns the vault. + // You can either specify an AWS account ID or optionally a single apos-apos + // (hyphen), in which case Amazon Glacier uses the AWS account ID associated + // with the credentials used to sign the request. If you use an account ID, + // do not include any hyphens (apos-apos) in the ID. + AccountId *string `location:"uri" locationName:"accountId" type:"string" required:"true"` + + // Specifies the state of the jobs to return. You can specify true or false. + Completed *string `location:"querystring" locationName:"completed" type:"string"` + + // Specifies that the response be limited to the specified number of items or + // fewer. If not specified, the List Jobs operation returns up to 1,000 jobs. + Limit *string `location:"querystring" locationName:"limit" type:"string"` + + // An opaque string used for pagination. This value specifies the job at which + // the listing of jobs should begin. Get the marker value from a previous List + // Jobs response. You need only include the marker if you are continuing the + // pagination of results started in a previous List Jobs request. + Marker *string `location:"querystring" locationName:"marker" type:"string"` + + // Specifies the type of job status to return. You can specify the following + // values: "InProgress", "Succeeded", or "Failed". + Statuscode *string `location:"querystring" locationName:"statuscode" type:"string"` + + // The name of the vault. + VaultName *string `location:"uri" locationName:"vaultName" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListJobsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListJobsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListJobsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListJobsInput"} + if s.AccountId == nil { + invalidParams.Add(request.NewErrParamRequired("AccountId")) + } + if s.VaultName == nil { + invalidParams.Add(request.NewErrParamRequired("VaultName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the Amazon Glacier response to your request. +type ListJobsOutput struct { + _ struct{} `type:"structure"` + + // A list of job objects. Each job object contains metadata describing the job. + JobList []*JobDescription `type:"list"` + + // An opaque string that represents where to continue pagination of the results. + // You use this value in a new List Jobs request to obtain more jobs in the + // list. If there are no more jobs, this value is null. + Marker *string `type:"string"` +} + +// String returns the string representation +func (s ListJobsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListJobsOutput) GoString() string { + return s.String() +} + +// Provides options for retrieving list of in-progress multipart uploads for +// an Amazon Glacier vault. +type ListMultipartUploadsInput struct { + _ struct{} `type:"structure"` + + // The AccountId value is the AWS account ID of the account that owns the vault. + // You can either specify an AWS account ID or optionally a single apos-apos + // (hyphen), in which case Amazon Glacier uses the AWS account ID associated + // with the credentials used to sign the request. If you use an account ID, + // do not include any hyphens (apos-apos) in the ID. + AccountId *string `location:"uri" locationName:"accountId" type:"string" required:"true"` + + // Specifies the maximum number of uploads returned in the response body. If + // this value is not specified, the List Uploads operation returns up to 1,000 + // uploads. + Limit *string `location:"querystring" locationName:"limit" type:"string"` + + // An opaque string used for pagination. This value specifies the upload at + // which the listing of uploads should begin. Get the marker value from a previous + // List Uploads response. You need only include the marker if you are continuing + // the pagination of results started in a previous List Uploads request. + Marker *string `location:"querystring" locationName:"marker" type:"string"` + + // The name of the vault. + VaultName *string `location:"uri" locationName:"vaultName" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListMultipartUploadsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListMultipartUploadsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListMultipartUploadsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListMultipartUploadsInput"} + if s.AccountId == nil { + invalidParams.Add(request.NewErrParamRequired("AccountId")) + } + if s.VaultName == nil { + invalidParams.Add(request.NewErrParamRequired("VaultName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the Amazon Glacier response to your request. +type ListMultipartUploadsOutput struct { + _ struct{} `type:"structure"` + + // An opaque string that represents where to continue pagination of the results. + // You use the marker in a new List Multipart Uploads request to obtain more + // uploads in the list. If there are no more uploads, this value is null. + Marker *string `type:"string"` + + // A list of in-progress multipart uploads. + UploadsList []*UploadListElement `type:"list"` +} + +// String returns the string representation +func (s ListMultipartUploadsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListMultipartUploadsOutput) GoString() string { + return s.String() +} + +// Provides options for retrieving a list of parts of an archive that have been +// uploaded in a specific multipart upload. +type ListPartsInput struct { + _ struct{} `type:"structure"` + + // The AccountId value is the AWS account ID of the account that owns the vault. + // You can either specify an AWS account ID or optionally a single apos-apos + // (hyphen), in which case Amazon Glacier uses the AWS account ID associated + // with the credentials used to sign the request. If you use an account ID, + // do not include any hyphens (apos-apos) in the ID. + AccountId *string `location:"uri" locationName:"accountId" type:"string" required:"true"` + + // Specifies the maximum number of parts returned in the response body. If this + // value is not specified, the List Parts operation returns up to 1,000 uploads. + Limit *string `location:"querystring" locationName:"limit" type:"string"` + + // An opaque string used for pagination. This value specifies the part at which + // the listing of parts should begin. Get the marker value from the response + // of a previous List Parts response. You need only include the marker if you + // are continuing the pagination of results started in a previous List Parts + // request. + Marker *string `location:"querystring" locationName:"marker" type:"string"` + + // The upload ID of the multipart upload. + UploadId *string `location:"uri" locationName:"uploadId" type:"string" required:"true"` + + // The name of the vault. + VaultName *string `location:"uri" locationName:"vaultName" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListPartsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListPartsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListPartsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListPartsInput"} + if s.AccountId == nil { + invalidParams.Add(request.NewErrParamRequired("AccountId")) + } + if s.UploadId == nil { + invalidParams.Add(request.NewErrParamRequired("UploadId")) + } + if s.VaultName == nil { + invalidParams.Add(request.NewErrParamRequired("VaultName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the Amazon Glacier response to your request. +type ListPartsOutput struct { + _ struct{} `type:"structure"` + + // The description of the archive that was specified in the Initiate Multipart + // Upload request. + ArchiveDescription *string `type:"string"` + + // The UTC time at which the multipart upload was initiated. + CreationDate *string `type:"string"` + + // An opaque string that represents where to continue pagination of the results. + // You use the marker in a new List Parts request to obtain more jobs in the + // list. If there are no more parts, this value is null. + Marker *string `type:"string"` + + // The ID of the upload to which the parts are associated. + MultipartUploadId *string `type:"string"` + + // The part size in bytes. + PartSizeInBytes *int64 `type:"long"` + + // A list of the part sizes of the multipart upload. + Parts []*PartListElement `type:"list"` + + // The Amazon Resource Name (ARN) of the vault to which the multipart upload + // was initiated. + VaultARN *string `type:"string"` +} + +// String returns the string representation +func (s ListPartsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListPartsOutput) GoString() string { + return s.String() +} + +// The input value for ListTagsForVaultInput. +type ListTagsForVaultInput struct { + _ struct{} `type:"structure"` + + // The AccountId value is the AWS account ID of the account that owns the vault. + // You can either specify an AWS account ID or optionally a single apos-apos + // (hyphen), in which case Amazon Glacier uses the AWS account ID associated + // with the credentials used to sign the request. If you use an account ID, + // do not include any hyphens (apos-apos) in the ID. + AccountId *string `location:"uri" locationName:"accountId" type:"string" required:"true"` + + // The name of the vault. + VaultName *string `location:"uri" locationName:"vaultName" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListTagsForVaultInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTagsForVaultInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListTagsForVaultInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListTagsForVaultInput"} + if s.AccountId == nil { + invalidParams.Add(request.NewErrParamRequired("AccountId")) + } + if s.VaultName == nil { + invalidParams.Add(request.NewErrParamRequired("VaultName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the Amazon Glacier response to your request. +type ListTagsForVaultOutput struct { + _ struct{} `type:"structure"` + + // The tags attached to the vault. Each tag is composed of a key and a value. + Tags map[string]*string `type:"map"` +} + +// String returns the string representation +func (s ListTagsForVaultOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTagsForVaultOutput) GoString() string { + return s.String() +} + +// Provides options to retrieve the vault list owned by the calling user's account. +// The list provides metadata information for each vault. +type ListVaultsInput struct { + _ struct{} `type:"structure"` + + // The AccountId value is the AWS account ID. This value must match the AWS + // account ID associated with the credentials used to sign the request. You + // can either specify an AWS account ID or optionally a single apos-apos (hyphen), + // in which case Amazon Glacier uses the AWS account ID associated with the + // credentials used to sign the request. If you specify your account ID, do + // not include any hyphens (apos-apos) in the ID. + AccountId *string `location:"uri" locationName:"accountId" type:"string" required:"true"` + + // The maximum number of items returned in the response. If you don't specify + // a value, the List Vaults operation returns up to 1,000 items. + Limit *string `location:"querystring" locationName:"limit" type:"string"` + + // A string used for pagination. The marker specifies the vault ARN after which + // the listing of vaults should begin. + Marker *string `location:"querystring" locationName:"marker" type:"string"` +} + +// String returns the string representation +func (s ListVaultsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListVaultsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListVaultsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListVaultsInput"} + if s.AccountId == nil { + invalidParams.Add(request.NewErrParamRequired("AccountId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the Amazon Glacier response to your request. +type ListVaultsOutput struct { + _ struct{} `type:"structure"` + + // The vault ARN at which to continue pagination of the results. You use the + // marker in another List Vaults request to obtain more vaults in the list. + Marker *string `type:"string"` + + // List of vaults. + VaultList []*DescribeVaultOutput `type:"list"` +} + +// String returns the string representation +func (s ListVaultsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListVaultsOutput) GoString() string { + return s.String() +} + +// A list of the part sizes of the multipart upload. +type PartListElement struct { + _ struct{} `type:"structure"` + + // The byte range of a part, inclusive of the upper value of the range. + RangeInBytes *string `type:"string"` + + // The SHA256 tree hash value that Amazon Glacier calculated for the part. This + // field is never null. + SHA256TreeHash *string `type:"string"` +} + +// String returns the string representation +func (s PartListElement) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PartListElement) GoString() string { + return s.String() +} + +// The input value for RemoveTagsFromVaultInput. +type RemoveTagsFromVaultInput struct { + _ struct{} `type:"structure"` + + // The AccountId value is the AWS account ID of the account that owns the vault. + // You can either specify an AWS account ID or optionally a single apos-apos + // (hyphen), in which case Amazon Glacier uses the AWS account ID associated + // with the credentials used to sign the request. If you use an account ID, + // do not include any hyphens (apos-apos) in the ID. + AccountId *string `location:"uri" locationName:"accountId" type:"string" required:"true"` + + // A list of tag keys. Each corresponding tag is removed from the vault. + TagKeys []*string `type:"list"` + + // The name of the vault. + VaultName *string `location:"uri" locationName:"vaultName" type:"string" required:"true"` +} + +// String returns the string representation +func (s RemoveTagsFromVaultInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RemoveTagsFromVaultInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RemoveTagsFromVaultInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RemoveTagsFromVaultInput"} + if s.AccountId == nil { + invalidParams.Add(request.NewErrParamRequired("AccountId")) + } + if s.VaultName == nil { + invalidParams.Add(request.NewErrParamRequired("VaultName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type RemoveTagsFromVaultOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s RemoveTagsFromVaultOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RemoveTagsFromVaultOutput) GoString() string { + return s.String() +} + +// SetDataRetrievalPolicy input. +type SetDataRetrievalPolicyInput struct { + _ struct{} `type:"structure"` + + // The AccountId value is the AWS account ID. This value must match the AWS + // account ID associated with the credentials used to sign the request. You + // can either specify an AWS account ID or optionally a single apos-apos (hyphen), + // in which case Amazon Glacier uses the AWS account ID associated with the + // credentials used to sign the request. If you specify your account ID, do + // not include any hyphens (apos-apos) in the ID. + AccountId *string `location:"uri" locationName:"accountId" type:"string" required:"true"` + + // The data retrieval policy in JSON format. + Policy *DataRetrievalPolicy `type:"structure"` +} + +// String returns the string representation +func (s SetDataRetrievalPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetDataRetrievalPolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SetDataRetrievalPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SetDataRetrievalPolicyInput"} + if s.AccountId == nil { + invalidParams.Add(request.NewErrParamRequired("AccountId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type SetDataRetrievalPolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s SetDataRetrievalPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetDataRetrievalPolicyOutput) GoString() string { + return s.String() +} + +// SetVaultAccessPolicy input. +type SetVaultAccessPolicyInput struct { + _ struct{} `type:"structure" payload:"Policy"` + + // The AccountId value is the AWS account ID of the account that owns the vault. + // You can either specify an AWS account ID or optionally a single apos-apos + // (hyphen), in which case Amazon Glacier uses the AWS account ID associated + // with the credentials used to sign the request. If you use an account ID, + // do not include any hyphens (apos-apos) in the ID. + AccountId *string `location:"uri" locationName:"accountId" type:"string" required:"true"` + + // The vault access policy as a JSON string. + Policy *VaultAccessPolicy `locationName:"policy" type:"structure"` + + // The name of the vault. + VaultName *string `location:"uri" locationName:"vaultName" type:"string" required:"true"` +} + +// String returns the string representation +func (s SetVaultAccessPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetVaultAccessPolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SetVaultAccessPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SetVaultAccessPolicyInput"} + if s.AccountId == nil { + invalidParams.Add(request.NewErrParamRequired("AccountId")) + } + if s.VaultName == nil { + invalidParams.Add(request.NewErrParamRequired("VaultName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type SetVaultAccessPolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s SetVaultAccessPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetVaultAccessPolicyOutput) GoString() string { + return s.String() +} + +// Provides options to configure notifications that will be sent when specific +// events happen to a vault. +type SetVaultNotificationsInput struct { + _ struct{} `type:"structure" payload:"VaultNotificationConfig"` + + // The AccountId value is the AWS account ID of the account that owns the vault. + // You can either specify an AWS account ID or optionally a single apos-apos + // (hyphen), in which case Amazon Glacier uses the AWS account ID associated + // with the credentials used to sign the request. If you use an account ID, + // do not include any hyphens (apos-apos) in the ID. + AccountId *string `location:"uri" locationName:"accountId" type:"string" required:"true"` + + // The name of the vault. + VaultName *string `location:"uri" locationName:"vaultName" type:"string" required:"true"` + + // Provides options for specifying notification configuration. + VaultNotificationConfig *VaultNotificationConfig `locationName:"vaultNotificationConfig" type:"structure"` +} + +// String returns the string representation +func (s SetVaultNotificationsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetVaultNotificationsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SetVaultNotificationsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SetVaultNotificationsInput"} + if s.AccountId == nil { + invalidParams.Add(request.NewErrParamRequired("AccountId")) + } + if s.VaultName == nil { + invalidParams.Add(request.NewErrParamRequired("VaultName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type SetVaultNotificationsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s SetVaultNotificationsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetVaultNotificationsOutput) GoString() string { + return s.String() +} + +// Provides options to add an archive to a vault. +type UploadArchiveInput struct { + _ struct{} `type:"structure" payload:"Body"` + + // The AccountId value is the AWS account ID of the account that owns the vault. + // You can either specify an AWS account ID or optionally a single apos-apos + // (hyphen), in which case Amazon Glacier uses the AWS account ID associated + // with the credentials used to sign the request. If you use an account ID, + // do not include any hyphens (apos-apos) in the ID. + AccountId *string `location:"uri" locationName:"accountId" type:"string" required:"true"` + + // The optional description of the archive you are uploading. + ArchiveDescription *string `location:"header" locationName:"x-amz-archive-description" type:"string"` + + // The data to upload. + Body io.ReadSeeker `locationName:"body" type:"blob"` + + // The SHA256 tree hash of the data being uploaded. + Checksum *string `location:"header" locationName:"x-amz-sha256-tree-hash" type:"string"` + + // The name of the vault. + VaultName *string `location:"uri" locationName:"vaultName" type:"string" required:"true"` +} + +// String returns the string representation +func (s UploadArchiveInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UploadArchiveInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UploadArchiveInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UploadArchiveInput"} + if s.AccountId == nil { + invalidParams.Add(request.NewErrParamRequired("AccountId")) + } + if s.VaultName == nil { + invalidParams.Add(request.NewErrParamRequired("VaultName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// A list of in-progress multipart uploads for a vault. +type UploadListElement struct { + _ struct{} `type:"structure"` + + // The description of the archive that was specified in the Initiate Multipart + // Upload request. + ArchiveDescription *string `type:"string"` + + // The UTC time at which the multipart upload was initiated. + CreationDate *string `type:"string"` + + // The ID of a multipart upload. + MultipartUploadId *string `type:"string"` + + // The part size, in bytes, specified in the Initiate Multipart Upload request. + // This is the size of all the parts in the upload except the last part, which + // may be smaller than this size. + PartSizeInBytes *int64 `type:"long"` + + // The Amazon Resource Name (ARN) of the vault that contains the archive. + VaultARN *string `type:"string"` +} + +// String returns the string representation +func (s UploadListElement) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UploadListElement) GoString() string { + return s.String() +} + +// Provides options to upload a part of an archive in a multipart upload operation. +type UploadMultipartPartInput struct { + _ struct{} `type:"structure" payload:"Body"` + + // The AccountId value is the AWS account ID of the account that owns the vault. + // You can either specify an AWS account ID or optionally a single apos-apos + // (hyphen), in which case Amazon Glacier uses the AWS account ID associated + // with the credentials used to sign the request. If you use an account ID, + // do not include any hyphens (apos-apos) in the ID. + AccountId *string `location:"uri" locationName:"accountId" type:"string" required:"true"` + + // The data to upload. + Body io.ReadSeeker `locationName:"body" type:"blob"` + + // The SHA256 tree hash of the data being uploaded. + Checksum *string `location:"header" locationName:"x-amz-sha256-tree-hash" type:"string"` + + // Identifies the range of bytes in the assembled archive that will be uploaded + // in this part. Amazon Glacier uses this information to assemble the archive + // in the proper sequence. The format of this header follows RFC 2616. An example + // header is Content-Range:bytes 0-4194303/*. + Range *string `location:"header" locationName:"Content-Range" type:"string"` + + // The upload ID of the multipart upload. + UploadId *string `location:"uri" locationName:"uploadId" type:"string" required:"true"` + + // The name of the vault. + VaultName *string `location:"uri" locationName:"vaultName" type:"string" required:"true"` +} + +// String returns the string representation +func (s UploadMultipartPartInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UploadMultipartPartInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UploadMultipartPartInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UploadMultipartPartInput"} + if s.AccountId == nil { + invalidParams.Add(request.NewErrParamRequired("AccountId")) + } + if s.UploadId == nil { + invalidParams.Add(request.NewErrParamRequired("UploadId")) + } + if s.VaultName == nil { + invalidParams.Add(request.NewErrParamRequired("VaultName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the Amazon Glacier response to your request. +type UploadMultipartPartOutput struct { + _ struct{} `type:"structure"` + + // The SHA256 tree hash that Amazon Glacier computed for the uploaded part. + Checksum *string `location:"header" locationName:"x-amz-sha256-tree-hash" type:"string"` +} + +// String returns the string representation +func (s UploadMultipartPartOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UploadMultipartPartOutput) GoString() string { + return s.String() +} + +// Contains the vault access policy. +type VaultAccessPolicy struct { + _ struct{} `type:"structure"` + + // The vault access policy. + Policy *string `type:"string"` +} + +// String returns the string representation +func (s VaultAccessPolicy) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VaultAccessPolicy) GoString() string { + return s.String() +} + +// Contains the vault lock policy. +type VaultLockPolicy struct { + _ struct{} `type:"structure"` + + // The vault lock policy. + Policy *string `type:"string"` +} + +// String returns the string representation +func (s VaultLockPolicy) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VaultLockPolicy) GoString() string { + return s.String() +} + +// Represents a vault's notification configuration. +type VaultNotificationConfig struct { + _ struct{} `type:"structure"` + + // A list of one or more events for which Amazon Glacier will send a notification + // to the specified Amazon SNS topic. + Events []*string `type:"list"` + + // The Amazon Simple Notification Service (Amazon SNS) topic Amazon Resource + // Name (ARN). + SNSTopic *string `type:"string"` +} + +// String returns the string representation +func (s VaultNotificationConfig) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VaultNotificationConfig) GoString() string { + return s.String() +} + +const ( + // @enum ActionCode + ActionCodeArchiveRetrieval = "ArchiveRetrieval" + // @enum ActionCode + ActionCodeInventoryRetrieval = "InventoryRetrieval" +) + +const ( + // @enum StatusCode + StatusCodeInProgress = "InProgress" + // @enum StatusCode + StatusCodeSucceeded = "Succeeded" + // @enum StatusCode + StatusCodeFailed = "Failed" +) diff --git a/vendor/github.com/aws/aws-sdk-go/service/glacier/customizations.go b/vendor/github.com/aws/aws-sdk-go/service/glacier/customizations.go new file mode 100644 index 000000000..613606019 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/glacier/customizations.go @@ -0,0 +1,54 @@ +package glacier + +import ( + "encoding/hex" + "reflect" + + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" +) + +var ( + defaultAccountID = "-" +) + +func init() { + initRequest = func(r *request.Request) { + r.Handlers.Validate.PushFront(addAccountID) + r.Handlers.Validate.PushFront(copyParams) // this happens first + r.Handlers.Build.PushBack(addChecksum) + r.Handlers.Build.PushBack(addAPIVersion) + } +} + +func copyParams(r *request.Request) { + r.Params = awsutil.CopyOf(r.Params) +} + +func addAccountID(r *request.Request) { + if !r.ParamsFilled() { + return + } + + v := reflect.Indirect(reflect.ValueOf(r.Params)) + if f := v.FieldByName("AccountId"); f.IsNil() { + f.Set(reflect.ValueOf(&defaultAccountID)) + } +} + +func addChecksum(r *request.Request) { + if r.Body == nil || r.HTTPRequest.Header.Get("X-Amz-Sha256-Tree-Hash") != "" { + return + } + + h := ComputeHashes(r.Body) + hstr := hex.EncodeToString(h.TreeHash) + r.HTTPRequest.Header.Set("X-Amz-Sha256-Tree-Hash", hstr) + + hLstr := hex.EncodeToString(h.LinearHash) + r.HTTPRequest.Header.Set("X-Amz-Content-Sha256", hLstr) +} + +func addAPIVersion(r *request.Request) { + r.HTTPRequest.Header.Set("X-Amz-Glacier-Version", r.ClientInfo.APIVersion) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/glacier/customizations_test.go b/vendor/github.com/aws/aws-sdk-go/service/glacier/customizations_test.go new file mode 100644 index 000000000..15b7a8ec6 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/glacier/customizations_test.go @@ -0,0 +1,90 @@ +// +build !integration + +package glacier_test + +import ( + "bytes" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/awstesting/unit" + "github.com/aws/aws-sdk-go/service/glacier" +) + +var ( + payloadBuf = func() *bytes.Reader { + buf := make([]byte, 5767168) // 5.5MB buffer + for i := range buf { + buf[i] = '0' // Fill with zero characters + } + return bytes.NewReader(buf) + }() + + svc = glacier.New(unit.Session) +) + +func TestCustomizations(t *testing.T) { + req, _ := svc.UploadArchiveRequest(&glacier.UploadArchiveInput{ + VaultName: aws.String("vault"), + Body: payloadBuf, + }) + err := req.Build() + assert.NoError(t, err) + + // Sets API version + assert.Equal(t, req.ClientInfo.APIVersion, req.HTTPRequest.Header.Get("x-amz-glacier-version")) + + // Sets Account ID + v, _ := awsutil.ValuesAtPath(req.Params, "AccountId") + assert.Equal(t, "-", *(v[0].(*string))) + + // Computes checksums + linear := "68aff0c5a91aa0491752bfb96e3fef33eb74953804f6a2f7b708d5bcefa8ff6b" + tree := "154e26c78fd74d0c2c9b3cc4644191619dc4f2cd539ae2a74d5fd07957a3ee6a" + assert.Equal(t, linear, req.HTTPRequest.Header.Get("x-amz-content-sha256")) + assert.Equal(t, tree, req.HTTPRequest.Header.Get("x-amz-sha256-tree-hash")) +} + +func TestShortcircuitTreehash(t *testing.T) { + req, _ := svc.UploadArchiveRequest(&glacier.UploadArchiveInput{ + VaultName: aws.String("vault"), + Body: payloadBuf, + Checksum: aws.String("000"), + }) + err := req.Build() + assert.NoError(t, err) + + assert.Equal(t, "000", req.HTTPRequest.Header.Get("x-amz-sha256-tree-hash")) +} + +func TestFillAccountIDWithNilStruct(t *testing.T) { + req, _ := svc.ListVaultsRequest(nil) + err := req.Build() + assert.NoError(t, err) + + empty := "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" + + // Sets Account ID + v, _ := awsutil.ValuesAtPath(req.Params, "AccountId") + assert.Equal(t, "-", *(v[0].(*string))) + + // Does not set tree hash + assert.Equal(t, empty, req.HTTPRequest.Header.Get("x-amz-content-sha256")) + assert.Equal(t, "", req.HTTPRequest.Header.Get("x-amz-sha256-tree-hash")) +} + +func TestHashOnce(t *testing.T) { + req, _ := svc.UploadArchiveRequest(&glacier.UploadArchiveInput{ + VaultName: aws.String("vault"), + Body: payloadBuf, + }) + req.HTTPRequest.Header.Set("X-Amz-Sha256-Tree-Hash", "0") + + err := req.Build() + assert.NoError(t, err) + + assert.Equal(t, "0", req.HTTPRequest.Header.Get("x-amz-sha256-tree-hash")) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/glacier/examples_test.go b/vendor/github.com/aws/aws-sdk-go/service/glacier/examples_test.go new file mode 100644 index 000000000..9384d3dae --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/glacier/examples_test.go @@ -0,0 +1,706 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package glacier_test + +import ( + "bytes" + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/glacier" +) + +var _ time.Duration +var _ bytes.Buffer + +func ExampleGlacier_AbortMultipartUpload() { + svc := glacier.New(session.New()) + + params := &glacier.AbortMultipartUploadInput{ + AccountId: aws.String("string"), // Required + UploadId: aws.String("string"), // Required + VaultName: aws.String("string"), // Required + } + resp, err := svc.AbortMultipartUpload(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleGlacier_AbortVaultLock() { + svc := glacier.New(session.New()) + + params := &glacier.AbortVaultLockInput{ + AccountId: aws.String("string"), // Required + VaultName: aws.String("string"), // Required + } + resp, err := svc.AbortVaultLock(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleGlacier_AddTagsToVault() { + svc := glacier.New(session.New()) + + params := &glacier.AddTagsToVaultInput{ + AccountId: aws.String("string"), // Required + VaultName: aws.String("string"), // Required + Tags: map[string]*string{ + "Key": aws.String("TagValue"), // Required + // More values... + }, + } + resp, err := svc.AddTagsToVault(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleGlacier_CompleteMultipartUpload() { + svc := glacier.New(session.New()) + + params := &glacier.CompleteMultipartUploadInput{ + AccountId: aws.String("string"), // Required + UploadId: aws.String("string"), // Required + VaultName: aws.String("string"), // Required + ArchiveSize: aws.String("string"), + Checksum: aws.String("string"), + } + resp, err := svc.CompleteMultipartUpload(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleGlacier_CompleteVaultLock() { + svc := glacier.New(session.New()) + + params := &glacier.CompleteVaultLockInput{ + AccountId: aws.String("string"), // Required + LockId: aws.String("string"), // Required + VaultName: aws.String("string"), // Required + } + resp, err := svc.CompleteVaultLock(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleGlacier_CreateVault() { + svc := glacier.New(session.New()) + + params := &glacier.CreateVaultInput{ + AccountId: aws.String("string"), // Required + VaultName: aws.String("string"), // Required + } + resp, err := svc.CreateVault(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleGlacier_DeleteArchive() { + svc := glacier.New(session.New()) + + params := &glacier.DeleteArchiveInput{ + AccountId: aws.String("string"), // Required + ArchiveId: aws.String("string"), // Required + VaultName: aws.String("string"), // Required + } + resp, err := svc.DeleteArchive(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleGlacier_DeleteVault() { + svc := glacier.New(session.New()) + + params := &glacier.DeleteVaultInput{ + AccountId: aws.String("string"), // Required + VaultName: aws.String("string"), // Required + } + resp, err := svc.DeleteVault(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleGlacier_DeleteVaultAccessPolicy() { + svc := glacier.New(session.New()) + + params := &glacier.DeleteVaultAccessPolicyInput{ + AccountId: aws.String("string"), // Required + VaultName: aws.String("string"), // Required + } + resp, err := svc.DeleteVaultAccessPolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleGlacier_DeleteVaultNotifications() { + svc := glacier.New(session.New()) + + params := &glacier.DeleteVaultNotificationsInput{ + AccountId: aws.String("string"), // Required + VaultName: aws.String("string"), // Required + } + resp, err := svc.DeleteVaultNotifications(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleGlacier_DescribeJob() { + svc := glacier.New(session.New()) + + params := &glacier.DescribeJobInput{ + AccountId: aws.String("string"), // Required + JobId: aws.String("string"), // Required + VaultName: aws.String("string"), // Required + } + resp, err := svc.DescribeJob(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleGlacier_DescribeVault() { + svc := glacier.New(session.New()) + + params := &glacier.DescribeVaultInput{ + AccountId: aws.String("string"), // Required + VaultName: aws.String("string"), // Required + } + resp, err := svc.DescribeVault(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleGlacier_GetDataRetrievalPolicy() { + svc := glacier.New(session.New()) + + params := &glacier.GetDataRetrievalPolicyInput{ + AccountId: aws.String("string"), // Required + } + resp, err := svc.GetDataRetrievalPolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleGlacier_GetJobOutput() { + svc := glacier.New(session.New()) + + params := &glacier.GetJobOutputInput{ + AccountId: aws.String("string"), // Required + JobId: aws.String("string"), // Required + VaultName: aws.String("string"), // Required + Range: aws.String("string"), + } + resp, err := svc.GetJobOutput(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleGlacier_GetVaultAccessPolicy() { + svc := glacier.New(session.New()) + + params := &glacier.GetVaultAccessPolicyInput{ + AccountId: aws.String("string"), // Required + VaultName: aws.String("string"), // Required + } + resp, err := svc.GetVaultAccessPolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleGlacier_GetVaultLock() { + svc := glacier.New(session.New()) + + params := &glacier.GetVaultLockInput{ + AccountId: aws.String("string"), // Required + VaultName: aws.String("string"), // Required + } + resp, err := svc.GetVaultLock(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleGlacier_GetVaultNotifications() { + svc := glacier.New(session.New()) + + params := &glacier.GetVaultNotificationsInput{ + AccountId: aws.String("string"), // Required + VaultName: aws.String("string"), // Required + } + resp, err := svc.GetVaultNotifications(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleGlacier_InitiateJob() { + svc := glacier.New(session.New()) + + params := &glacier.InitiateJobInput{ + AccountId: aws.String("string"), // Required + VaultName: aws.String("string"), // Required + JobParameters: &glacier.JobParameters{ + ArchiveId: aws.String("string"), + Description: aws.String("string"), + Format: aws.String("string"), + InventoryRetrievalParameters: &glacier.InventoryRetrievalJobInput{ + EndDate: aws.String("string"), + Limit: aws.String("string"), + Marker: aws.String("string"), + StartDate: aws.String("string"), + }, + RetrievalByteRange: aws.String("string"), + SNSTopic: aws.String("string"), + Type: aws.String("string"), + }, + } + resp, err := svc.InitiateJob(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleGlacier_InitiateMultipartUpload() { + svc := glacier.New(session.New()) + + params := &glacier.InitiateMultipartUploadInput{ + AccountId: aws.String("string"), // Required + VaultName: aws.String("string"), // Required + ArchiveDescription: aws.String("string"), + PartSize: aws.String("string"), + } + resp, err := svc.InitiateMultipartUpload(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleGlacier_InitiateVaultLock() { + svc := glacier.New(session.New()) + + params := &glacier.InitiateVaultLockInput{ + AccountId: aws.String("string"), // Required + VaultName: aws.String("string"), // Required + Policy: &glacier.VaultLockPolicy{ + Policy: aws.String("string"), + }, + } + resp, err := svc.InitiateVaultLock(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleGlacier_ListJobs() { + svc := glacier.New(session.New()) + + params := &glacier.ListJobsInput{ + AccountId: aws.String("string"), // Required + VaultName: aws.String("string"), // Required + Completed: aws.String("string"), + Limit: aws.String("string"), + Marker: aws.String("string"), + Statuscode: aws.String("string"), + } + resp, err := svc.ListJobs(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleGlacier_ListMultipartUploads() { + svc := glacier.New(session.New()) + + params := &glacier.ListMultipartUploadsInput{ + AccountId: aws.String("string"), // Required + VaultName: aws.String("string"), // Required + Limit: aws.String("string"), + Marker: aws.String("string"), + } + resp, err := svc.ListMultipartUploads(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleGlacier_ListParts() { + svc := glacier.New(session.New()) + + params := &glacier.ListPartsInput{ + AccountId: aws.String("string"), // Required + UploadId: aws.String("string"), // Required + VaultName: aws.String("string"), // Required + Limit: aws.String("string"), + Marker: aws.String("string"), + } + resp, err := svc.ListParts(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleGlacier_ListTagsForVault() { + svc := glacier.New(session.New()) + + params := &glacier.ListTagsForVaultInput{ + AccountId: aws.String("string"), // Required + VaultName: aws.String("string"), // Required + } + resp, err := svc.ListTagsForVault(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleGlacier_ListVaults() { + svc := glacier.New(session.New()) + + params := &glacier.ListVaultsInput{ + AccountId: aws.String("string"), // Required + Limit: aws.String("string"), + Marker: aws.String("string"), + } + resp, err := svc.ListVaults(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleGlacier_RemoveTagsFromVault() { + svc := glacier.New(session.New()) + + params := &glacier.RemoveTagsFromVaultInput{ + AccountId: aws.String("string"), // Required + VaultName: aws.String("string"), // Required + TagKeys: []*string{ + aws.String("string"), // Required + // More values... + }, + } + resp, err := svc.RemoveTagsFromVault(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleGlacier_SetDataRetrievalPolicy() { + svc := glacier.New(session.New()) + + params := &glacier.SetDataRetrievalPolicyInput{ + AccountId: aws.String("string"), // Required + Policy: &glacier.DataRetrievalPolicy{ + Rules: []*glacier.DataRetrievalRule{ + { // Required + BytesPerHour: aws.Int64(1), + Strategy: aws.String("string"), + }, + // More values... + }, + }, + } + resp, err := svc.SetDataRetrievalPolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleGlacier_SetVaultAccessPolicy() { + svc := glacier.New(session.New()) + + params := &glacier.SetVaultAccessPolicyInput{ + AccountId: aws.String("string"), // Required + VaultName: aws.String("string"), // Required + Policy: &glacier.VaultAccessPolicy{ + Policy: aws.String("string"), + }, + } + resp, err := svc.SetVaultAccessPolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleGlacier_SetVaultNotifications() { + svc := glacier.New(session.New()) + + params := &glacier.SetVaultNotificationsInput{ + AccountId: aws.String("string"), // Required + VaultName: aws.String("string"), // Required + VaultNotificationConfig: &glacier.VaultNotificationConfig{ + Events: []*string{ + aws.String("string"), // Required + // More values... + }, + SNSTopic: aws.String("string"), + }, + } + resp, err := svc.SetVaultNotifications(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleGlacier_UploadArchive() { + svc := glacier.New(session.New()) + + params := &glacier.UploadArchiveInput{ + AccountId: aws.String("string"), // Required + VaultName: aws.String("string"), // Required + ArchiveDescription: aws.String("string"), + Body: bytes.NewReader([]byte("PAYLOAD")), + Checksum: aws.String("string"), + } + resp, err := svc.UploadArchive(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleGlacier_UploadMultipartPart() { + svc := glacier.New(session.New()) + + params := &glacier.UploadMultipartPartInput{ + AccountId: aws.String("string"), // Required + UploadId: aws.String("string"), // Required + VaultName: aws.String("string"), // Required + Body: bytes.NewReader([]byte("PAYLOAD")), + Checksum: aws.String("string"), + Range: aws.String("string"), + } + resp, err := svc.UploadMultipartPart(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/glacier/glacieriface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/glacier/glacieriface/interface.go new file mode 100644 index 000000000..7b2f76798 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/glacier/glacieriface/interface.go @@ -0,0 +1,146 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package glacieriface provides an interface for the Amazon Glacier. +package glacieriface + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/glacier" +) + +// GlacierAPI is the interface type for glacier.Glacier. +type GlacierAPI interface { + AbortMultipartUploadRequest(*glacier.AbortMultipartUploadInput) (*request.Request, *glacier.AbortMultipartUploadOutput) + + AbortMultipartUpload(*glacier.AbortMultipartUploadInput) (*glacier.AbortMultipartUploadOutput, error) + + AbortVaultLockRequest(*glacier.AbortVaultLockInput) (*request.Request, *glacier.AbortVaultLockOutput) + + AbortVaultLock(*glacier.AbortVaultLockInput) (*glacier.AbortVaultLockOutput, error) + + AddTagsToVaultRequest(*glacier.AddTagsToVaultInput) (*request.Request, *glacier.AddTagsToVaultOutput) + + AddTagsToVault(*glacier.AddTagsToVaultInput) (*glacier.AddTagsToVaultOutput, error) + + CompleteMultipartUploadRequest(*glacier.CompleteMultipartUploadInput) (*request.Request, *glacier.ArchiveCreationOutput) + + CompleteMultipartUpload(*glacier.CompleteMultipartUploadInput) (*glacier.ArchiveCreationOutput, error) + + CompleteVaultLockRequest(*glacier.CompleteVaultLockInput) (*request.Request, *glacier.CompleteVaultLockOutput) + + CompleteVaultLock(*glacier.CompleteVaultLockInput) (*glacier.CompleteVaultLockOutput, error) + + CreateVaultRequest(*glacier.CreateVaultInput) (*request.Request, *glacier.CreateVaultOutput) + + CreateVault(*glacier.CreateVaultInput) (*glacier.CreateVaultOutput, error) + + DeleteArchiveRequest(*glacier.DeleteArchiveInput) (*request.Request, *glacier.DeleteArchiveOutput) + + DeleteArchive(*glacier.DeleteArchiveInput) (*glacier.DeleteArchiveOutput, error) + + DeleteVaultRequest(*glacier.DeleteVaultInput) (*request.Request, *glacier.DeleteVaultOutput) + + DeleteVault(*glacier.DeleteVaultInput) (*glacier.DeleteVaultOutput, error) + + DeleteVaultAccessPolicyRequest(*glacier.DeleteVaultAccessPolicyInput) (*request.Request, *glacier.DeleteVaultAccessPolicyOutput) + + DeleteVaultAccessPolicy(*glacier.DeleteVaultAccessPolicyInput) (*glacier.DeleteVaultAccessPolicyOutput, error) + + DeleteVaultNotificationsRequest(*glacier.DeleteVaultNotificationsInput) (*request.Request, *glacier.DeleteVaultNotificationsOutput) + + DeleteVaultNotifications(*glacier.DeleteVaultNotificationsInput) (*glacier.DeleteVaultNotificationsOutput, error) + + DescribeJobRequest(*glacier.DescribeJobInput) (*request.Request, *glacier.JobDescription) + + DescribeJob(*glacier.DescribeJobInput) (*glacier.JobDescription, error) + + DescribeVaultRequest(*glacier.DescribeVaultInput) (*request.Request, *glacier.DescribeVaultOutput) + + DescribeVault(*glacier.DescribeVaultInput) (*glacier.DescribeVaultOutput, error) + + GetDataRetrievalPolicyRequest(*glacier.GetDataRetrievalPolicyInput) (*request.Request, *glacier.GetDataRetrievalPolicyOutput) + + GetDataRetrievalPolicy(*glacier.GetDataRetrievalPolicyInput) (*glacier.GetDataRetrievalPolicyOutput, error) + + GetJobOutputRequest(*glacier.GetJobOutputInput) (*request.Request, *glacier.GetJobOutputOutput) + + GetJobOutput(*glacier.GetJobOutputInput) (*glacier.GetJobOutputOutput, error) + + GetVaultAccessPolicyRequest(*glacier.GetVaultAccessPolicyInput) (*request.Request, *glacier.GetVaultAccessPolicyOutput) + + GetVaultAccessPolicy(*glacier.GetVaultAccessPolicyInput) (*glacier.GetVaultAccessPolicyOutput, error) + + GetVaultLockRequest(*glacier.GetVaultLockInput) (*request.Request, *glacier.GetVaultLockOutput) + + GetVaultLock(*glacier.GetVaultLockInput) (*glacier.GetVaultLockOutput, error) + + GetVaultNotificationsRequest(*glacier.GetVaultNotificationsInput) (*request.Request, *glacier.GetVaultNotificationsOutput) + + GetVaultNotifications(*glacier.GetVaultNotificationsInput) (*glacier.GetVaultNotificationsOutput, error) + + InitiateJobRequest(*glacier.InitiateJobInput) (*request.Request, *glacier.InitiateJobOutput) + + InitiateJob(*glacier.InitiateJobInput) (*glacier.InitiateJobOutput, error) + + InitiateMultipartUploadRequest(*glacier.InitiateMultipartUploadInput) (*request.Request, *glacier.InitiateMultipartUploadOutput) + + InitiateMultipartUpload(*glacier.InitiateMultipartUploadInput) (*glacier.InitiateMultipartUploadOutput, error) + + InitiateVaultLockRequest(*glacier.InitiateVaultLockInput) (*request.Request, *glacier.InitiateVaultLockOutput) + + InitiateVaultLock(*glacier.InitiateVaultLockInput) (*glacier.InitiateVaultLockOutput, error) + + ListJobsRequest(*glacier.ListJobsInput) (*request.Request, *glacier.ListJobsOutput) + + ListJobs(*glacier.ListJobsInput) (*glacier.ListJobsOutput, error) + + ListJobsPages(*glacier.ListJobsInput, func(*glacier.ListJobsOutput, bool) bool) error + + ListMultipartUploadsRequest(*glacier.ListMultipartUploadsInput) (*request.Request, *glacier.ListMultipartUploadsOutput) + + ListMultipartUploads(*glacier.ListMultipartUploadsInput) (*glacier.ListMultipartUploadsOutput, error) + + ListMultipartUploadsPages(*glacier.ListMultipartUploadsInput, func(*glacier.ListMultipartUploadsOutput, bool) bool) error + + ListPartsRequest(*glacier.ListPartsInput) (*request.Request, *glacier.ListPartsOutput) + + ListParts(*glacier.ListPartsInput) (*glacier.ListPartsOutput, error) + + ListPartsPages(*glacier.ListPartsInput, func(*glacier.ListPartsOutput, bool) bool) error + + ListTagsForVaultRequest(*glacier.ListTagsForVaultInput) (*request.Request, *glacier.ListTagsForVaultOutput) + + ListTagsForVault(*glacier.ListTagsForVaultInput) (*glacier.ListTagsForVaultOutput, error) + + ListVaultsRequest(*glacier.ListVaultsInput) (*request.Request, *glacier.ListVaultsOutput) + + ListVaults(*glacier.ListVaultsInput) (*glacier.ListVaultsOutput, error) + + ListVaultsPages(*glacier.ListVaultsInput, func(*glacier.ListVaultsOutput, bool) bool) error + + RemoveTagsFromVaultRequest(*glacier.RemoveTagsFromVaultInput) (*request.Request, *glacier.RemoveTagsFromVaultOutput) + + RemoveTagsFromVault(*glacier.RemoveTagsFromVaultInput) (*glacier.RemoveTagsFromVaultOutput, error) + + SetDataRetrievalPolicyRequest(*glacier.SetDataRetrievalPolicyInput) (*request.Request, *glacier.SetDataRetrievalPolicyOutput) + + SetDataRetrievalPolicy(*glacier.SetDataRetrievalPolicyInput) (*glacier.SetDataRetrievalPolicyOutput, error) + + SetVaultAccessPolicyRequest(*glacier.SetVaultAccessPolicyInput) (*request.Request, *glacier.SetVaultAccessPolicyOutput) + + SetVaultAccessPolicy(*glacier.SetVaultAccessPolicyInput) (*glacier.SetVaultAccessPolicyOutput, error) + + SetVaultNotificationsRequest(*glacier.SetVaultNotificationsInput) (*request.Request, *glacier.SetVaultNotificationsOutput) + + SetVaultNotifications(*glacier.SetVaultNotificationsInput) (*glacier.SetVaultNotificationsOutput, error) + + UploadArchiveRequest(*glacier.UploadArchiveInput) (*request.Request, *glacier.ArchiveCreationOutput) + + UploadArchive(*glacier.UploadArchiveInput) (*glacier.ArchiveCreationOutput, error) + + UploadMultipartPartRequest(*glacier.UploadMultipartPartInput) (*request.Request, *glacier.UploadMultipartPartOutput) + + UploadMultipartPart(*glacier.UploadMultipartPartInput) (*glacier.UploadMultipartPartOutput, error) +} + +var _ GlacierAPI = (*glacier.Glacier)(nil) diff --git a/vendor/github.com/aws/aws-sdk-go/service/glacier/service.go b/vendor/github.com/aws/aws-sdk-go/service/glacier/service.go new file mode 100644 index 000000000..a318653d0 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/glacier/service.go @@ -0,0 +1,116 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package glacier + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/private/protocol/restjson" +) + +// Amazon Glacier is a storage solution for "cold data." +// +// Amazon Glacier is an extremely low-cost storage service that provides secure, +// durable, and easy-to-use storage for data backup and archival. With Amazon +// Glacier, customers can store their data cost effectively for months, years, +// or decades. Amazon Glacier also enables customers to offload the administrative +// burdens of operating and scaling storage to AWS, so they don't have to worry +// about capacity planning, hardware provisioning, data replication, hardware +// failure and recovery, or time-consuming hardware migrations. +// +// Amazon Glacier is a great storage choice when low storage cost is paramount, +// your data is rarely retrieved, and retrieval latency of several hours is +// acceptable. If your application requires fast or frequent access to your +// data, consider using Amazon S3. For more information, go to Amazon Simple +// Storage Service (Amazon S3) (http://aws.amazon.com/s3/). +// +// You can store any kind of data in any format. There is no maximum limit +// on the total amount of data you can store in Amazon Glacier. +// +// If you are a first-time user of Amazon Glacier, we recommend that you begin +// by reading the following sections in the Amazon Glacier Developer Guide: +// +// What is Amazon Glacier (http://docs.aws.amazon.com/amazonglacier/latest/dev/introduction.html) +// - This section of the Developer Guide describes the underlying data model, +// the operations it supports, and the AWS SDKs that you can use to interact +// with the service. +// +// Getting Started with Amazon Glacier (http://docs.aws.amazon.com/amazonglacier/latest/dev/amazon-glacier-getting-started.html) +// - The Getting Started section walks you through the process of creating a +// vault, uploading archives, creating jobs to download archives, retrieving +// the job output, and deleting archives. +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type Glacier struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "glacier" + +// New creates a new instance of the Glacier client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a Glacier client from just a session. +// svc := glacier.New(mySession) +// +// // Create a Glacier client with additional configuration +// svc := glacier.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *Glacier { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *Glacier { + svc := &Glacier{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2012-06-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(restjson.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restjson.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restjson.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restjson.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a Glacier operation and runs any +// custom request initialization. +func (c *Glacier) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/glacier/treehash.go b/vendor/github.com/aws/aws-sdk-go/service/glacier/treehash.go new file mode 100644 index 000000000..dac44baf5 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/glacier/treehash.go @@ -0,0 +1,71 @@ +package glacier + +import ( + "crypto/sha256" + "io" +) + +const bufsize = 1024 * 1024 + +// Hash contains information about the tree-hash and linear hash of a +// Glacier payload. This structure is generated by ComputeHashes(). +type Hash struct { + TreeHash []byte + LinearHash []byte +} + +// ComputeHashes computes the tree-hash and linear hash of a seekable reader r. +func ComputeHashes(r io.ReadSeeker) Hash { + r.Seek(0, 0) // Read the whole stream + defer r.Seek(0, 0) // Rewind stream at end + + buf := make([]byte, bufsize) + hashes := [][]byte{} + hsh := sha256.New() + + for { + // Build leaf nodes in 1MB chunks + n, err := io.ReadAtLeast(r, buf, bufsize) + if n == 0 { + break + } + + tmpHash := sha256.Sum256(buf[:n]) + hashes = append(hashes, tmpHash[:]) + hsh.Write(buf[:n]) // Track linear hash while we're at it + + if err != nil { + break // This is the last chunk + } + } + + return Hash{ + LinearHash: hsh.Sum(nil), + TreeHash: buildHashTree(hashes), + } +} + +// buildHashTree builds a hash tree root node given a set of hashes. +func buildHashTree(hashes [][]byte) []byte { + if hashes == nil || len(hashes) == 0 { + return nil + } + + for len(hashes) > 1 { + tmpHashes := [][]byte{} + + for i := 0; i < len(hashes); i += 2 { + if i+1 <= len(hashes)-1 { + tmpHash := append(append([]byte{}, hashes[i]...), hashes[i+1]...) + tmpSum := sha256.Sum256(tmpHash) + tmpHashes = append(tmpHashes, tmpSum[:]) + } else { + tmpHashes = append(tmpHashes, hashes[i]) + } + } + + hashes = tmpHashes + } + + return hashes[0] +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/glacier/treehash_test.go b/vendor/github.com/aws/aws-sdk-go/service/glacier/treehash_test.go new file mode 100644 index 000000000..970a083b9 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/glacier/treehash_test.go @@ -0,0 +1,28 @@ +package glacier_test + +import ( + "bytes" + "fmt" + + "github.com/aws/aws-sdk-go/service/glacier" +) + +func ExampleComputeHashes() { + buf := make([]byte, 5767168) // 5.5MB buffer + for i := range buf { + buf[i] = '0' // Fill with zero characters + } + + r := bytes.NewReader(buf) + h := glacier.ComputeHashes(r) + n, _ := r.Seek(0, 1) // Check position after checksumming + + fmt.Printf("linear: %x\n", h.LinearHash) + fmt.Printf("tree: %x\n", h.TreeHash) + fmt.Printf("pos: %d\n", n) + + // Output: + // linear: 68aff0c5a91aa0491752bfb96e3fef33eb74953804f6a2f7b708d5bcefa8ff6b + // tree: 154e26c78fd74d0c2c9b3cc4644191619dc4f2cd539ae2a74d5fd07957a3ee6a + // pos: 0 +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/glacier/waiters.go b/vendor/github.com/aws/aws-sdk-go/service/glacier/waiters.go new file mode 100644 index 000000000..e6fbedfa1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/glacier/waiters.go @@ -0,0 +1,65 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package glacier + +import ( + "github.com/aws/aws-sdk-go/private/waiter" +) + +func (c *Glacier) WaitUntilVaultExists(input *DescribeVaultInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeVault", + Delay: 3, + MaxAttempts: 15, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "status", + Argument: "", + Expected: 200, + }, + { + State: "retry", + Matcher: "error", + Argument: "", + Expected: "ResourceNotFoundException", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *Glacier) WaitUntilVaultNotExists(input *DescribeVaultInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeVault", + Delay: 3, + MaxAttempts: 15, + Acceptors: []waiter.WaitAcceptor{ + { + State: "retry", + Matcher: "status", + Argument: "", + Expected: 200, + }, + { + State: "success", + Matcher: "error", + Argument: "", + Expected: "ResourceNotFoundException", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/iam/api.go b/vendor/github.com/aws/aws-sdk-go/service/iam/api.go new file mode 100644 index 000000000..35efd0338 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/iam/api.go @@ -0,0 +1,17020 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package iam provides a client for AWS Identity and Access Management. +package iam + +import ( + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/query" +) + +const opAddClientIDToOpenIDConnectProvider = "AddClientIDToOpenIDConnectProvider" + +// AddClientIDToOpenIDConnectProviderRequest generates a "aws/request.Request" representing the +// client's request for the AddClientIDToOpenIDConnectProvider operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the AddClientIDToOpenIDConnectProvider method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the AddClientIDToOpenIDConnectProviderRequest method. +// req, resp := client.AddClientIDToOpenIDConnectProviderRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *IAM) AddClientIDToOpenIDConnectProviderRequest(input *AddClientIDToOpenIDConnectProviderInput) (req *request.Request, output *AddClientIDToOpenIDConnectProviderOutput) { + op := &request.Operation{ + Name: opAddClientIDToOpenIDConnectProvider, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AddClientIDToOpenIDConnectProviderInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &AddClientIDToOpenIDConnectProviderOutput{} + req.Data = output + return +} + +// Adds a new client ID (also known as audience) to the list of client IDs already +// registered for the specified IAM OpenID Connect (OIDC) provider resource. +// +// This action is idempotent; it does not fail or return an error if you add +// an existing client ID to the provider. +func (c *IAM) AddClientIDToOpenIDConnectProvider(input *AddClientIDToOpenIDConnectProviderInput) (*AddClientIDToOpenIDConnectProviderOutput, error) { + req, out := c.AddClientIDToOpenIDConnectProviderRequest(input) + err := req.Send() + return out, err +} + +const opAddRoleToInstanceProfile = "AddRoleToInstanceProfile" + +// AddRoleToInstanceProfileRequest generates a "aws/request.Request" representing the +// client's request for the AddRoleToInstanceProfile operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the AddRoleToInstanceProfile method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the AddRoleToInstanceProfileRequest method. +// req, resp := client.AddRoleToInstanceProfileRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *IAM) AddRoleToInstanceProfileRequest(input *AddRoleToInstanceProfileInput) (req *request.Request, output *AddRoleToInstanceProfileOutput) { + op := &request.Operation{ + Name: opAddRoleToInstanceProfile, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AddRoleToInstanceProfileInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &AddRoleToInstanceProfileOutput{} + req.Data = output + return +} + +// Adds the specified IAM role to the specified instance profile. +// +// The caller of this API must be granted the PassRole permission on the IAM +// role by a permission policy. +// +// For more information about roles, go to Working with Roles (http://docs.aws.amazon.com/IAM/latest/UserGuide/WorkingWithRoles.html). +// For more information about instance profiles, go to About Instance Profiles +// (http://docs.aws.amazon.com/IAM/latest/UserGuide/AboutInstanceProfiles.html). +func (c *IAM) AddRoleToInstanceProfile(input *AddRoleToInstanceProfileInput) (*AddRoleToInstanceProfileOutput, error) { + req, out := c.AddRoleToInstanceProfileRequest(input) + err := req.Send() + return out, err +} + +const opAddUserToGroup = "AddUserToGroup" + +// AddUserToGroupRequest generates a "aws/request.Request" representing the +// client's request for the AddUserToGroup operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the AddUserToGroup method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the AddUserToGroupRequest method. +// req, resp := client.AddUserToGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *IAM) AddUserToGroupRequest(input *AddUserToGroupInput) (req *request.Request, output *AddUserToGroupOutput) { + op := &request.Operation{ + Name: opAddUserToGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AddUserToGroupInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &AddUserToGroupOutput{} + req.Data = output + return +} + +// Adds the specified user to the specified group. +func (c *IAM) AddUserToGroup(input *AddUserToGroupInput) (*AddUserToGroupOutput, error) { + req, out := c.AddUserToGroupRequest(input) + err := req.Send() + return out, err +} + +const opAttachGroupPolicy = "AttachGroupPolicy" + +// AttachGroupPolicyRequest generates a "aws/request.Request" representing the +// client's request for the AttachGroupPolicy operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the AttachGroupPolicy method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the AttachGroupPolicyRequest method. +// req, resp := client.AttachGroupPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *IAM) AttachGroupPolicyRequest(input *AttachGroupPolicyInput) (req *request.Request, output *AttachGroupPolicyOutput) { + op := &request.Operation{ + Name: opAttachGroupPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AttachGroupPolicyInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &AttachGroupPolicyOutput{} + req.Data = output + return +} + +// Attaches the specified managed policy to the specified IAM group. +// +// You use this API to attach a managed policy to a group. To embed an inline +// policy in a group, use PutGroupPolicy. +// +// For more information about policies, see Managed Policies and Inline Policies +// (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the IAM User Guide. +func (c *IAM) AttachGroupPolicy(input *AttachGroupPolicyInput) (*AttachGroupPolicyOutput, error) { + req, out := c.AttachGroupPolicyRequest(input) + err := req.Send() + return out, err +} + +const opAttachRolePolicy = "AttachRolePolicy" + +// AttachRolePolicyRequest generates a "aws/request.Request" representing the +// client's request for the AttachRolePolicy operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the AttachRolePolicy method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the AttachRolePolicyRequest method. +// req, resp := client.AttachRolePolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *IAM) AttachRolePolicyRequest(input *AttachRolePolicyInput) (req *request.Request, output *AttachRolePolicyOutput) { + op := &request.Operation{ + Name: opAttachRolePolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AttachRolePolicyInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &AttachRolePolicyOutput{} + req.Data = output + return +} + +// Attaches the specified managed policy to the specified IAM role. +// +// When you attach a managed policy to a role, the managed policy becomes part +// of the role's permission (access) policy. You cannot use a managed policy +// as the role's trust policy. The role's trust policy is created at the same +// time as the role, using CreateRole. You can update a role's trust policy +// using UpdateAssumeRolePolicy. +// +// Use this API to attach a managed policy to a role. To embed an inline policy +// in a role, use PutRolePolicy. For more information about policies, see Managed +// Policies and Inline Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the IAM User Guide. +func (c *IAM) AttachRolePolicy(input *AttachRolePolicyInput) (*AttachRolePolicyOutput, error) { + req, out := c.AttachRolePolicyRequest(input) + err := req.Send() + return out, err +} + +const opAttachUserPolicy = "AttachUserPolicy" + +// AttachUserPolicyRequest generates a "aws/request.Request" representing the +// client's request for the AttachUserPolicy operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the AttachUserPolicy method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the AttachUserPolicyRequest method. +// req, resp := client.AttachUserPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *IAM) AttachUserPolicyRequest(input *AttachUserPolicyInput) (req *request.Request, output *AttachUserPolicyOutput) { + op := &request.Operation{ + Name: opAttachUserPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AttachUserPolicyInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &AttachUserPolicyOutput{} + req.Data = output + return +} + +// Attaches the specified managed policy to the specified user. +// +// You use this API to attach a managed policy to a user. To embed an inline +// policy in a user, use PutUserPolicy. +// +// For more information about policies, see Managed Policies and Inline Policies +// (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the IAM User Guide. +func (c *IAM) AttachUserPolicy(input *AttachUserPolicyInput) (*AttachUserPolicyOutput, error) { + req, out := c.AttachUserPolicyRequest(input) + err := req.Send() + return out, err +} + +const opChangePassword = "ChangePassword" + +// ChangePasswordRequest generates a "aws/request.Request" representing the +// client's request for the ChangePassword operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ChangePassword method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ChangePasswordRequest method. +// req, resp := client.ChangePasswordRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *IAM) ChangePasswordRequest(input *ChangePasswordInput) (req *request.Request, output *ChangePasswordOutput) { + op := &request.Operation{ + Name: opChangePassword, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ChangePasswordInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &ChangePasswordOutput{} + req.Data = output + return +} + +// Changes the password of the IAM user who is calling this action. The root +// account password is not affected by this action. +// +// To change the password for a different user, see UpdateLoginProfile. For +// more information about modifying passwords, see Managing Passwords (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_ManagingLogins.html) +// in the IAM User Guide. +func (c *IAM) ChangePassword(input *ChangePasswordInput) (*ChangePasswordOutput, error) { + req, out := c.ChangePasswordRequest(input) + err := req.Send() + return out, err +} + +const opCreateAccessKey = "CreateAccessKey" + +// CreateAccessKeyRequest generates a "aws/request.Request" representing the +// client's request for the CreateAccessKey operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateAccessKey method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateAccessKeyRequest method. +// req, resp := client.CreateAccessKeyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *IAM) CreateAccessKeyRequest(input *CreateAccessKeyInput) (req *request.Request, output *CreateAccessKeyOutput) { + op := &request.Operation{ + Name: opCreateAccessKey, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateAccessKeyInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateAccessKeyOutput{} + req.Data = output + return +} + +// Creates a new AWS secret access key and corresponding AWS access key ID for +// the specified user. The default status for new keys is Active. +// +// If you do not specify a user name, IAM determines the user name implicitly +// based on the AWS access key ID signing the request. Because this action works +// for access keys under the AWS account, you can use this action to manage +// root credentials even if the AWS account has no associated users. +// +// For information about limits on the number of keys you can create, see +// Limitations on IAM Entities (http://docs.aws.amazon.com/IAM/latest/UserGuide/LimitationsOnEntities.html) +// in the IAM User Guide. +// +// To ensure the security of your AWS account, the secret access key is accessible +// only during key and user creation. You must save the key (for example, in +// a text file) if you want to be able to access it again. If a secret key is +// lost, you can delete the access keys for the associated user and then create +// new keys. +func (c *IAM) CreateAccessKey(input *CreateAccessKeyInput) (*CreateAccessKeyOutput, error) { + req, out := c.CreateAccessKeyRequest(input) + err := req.Send() + return out, err +} + +const opCreateAccountAlias = "CreateAccountAlias" + +// CreateAccountAliasRequest generates a "aws/request.Request" representing the +// client's request for the CreateAccountAlias operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateAccountAlias method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateAccountAliasRequest method. +// req, resp := client.CreateAccountAliasRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *IAM) CreateAccountAliasRequest(input *CreateAccountAliasInput) (req *request.Request, output *CreateAccountAliasOutput) { + op := &request.Operation{ + Name: opCreateAccountAlias, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateAccountAliasInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &CreateAccountAliasOutput{} + req.Data = output + return +} + +// Creates an alias for your AWS account. For information about using an AWS +// account alias, see Using an Alias for Your AWS Account ID (http://docs.aws.amazon.com/IAM/latest/UserGuide/AccountAlias.html) +// in the IAM User Guide. +func (c *IAM) CreateAccountAlias(input *CreateAccountAliasInput) (*CreateAccountAliasOutput, error) { + req, out := c.CreateAccountAliasRequest(input) + err := req.Send() + return out, err +} + +const opCreateGroup = "CreateGroup" + +// CreateGroupRequest generates a "aws/request.Request" representing the +// client's request for the CreateGroup operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateGroup method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateGroupRequest method. +// req, resp := client.CreateGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *IAM) CreateGroupRequest(input *CreateGroupInput) (req *request.Request, output *CreateGroupOutput) { + op := &request.Operation{ + Name: opCreateGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateGroupInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateGroupOutput{} + req.Data = output + return +} + +// Creates a new group. +// +// For information about the number of groups you can create, see Limitations +// on IAM Entities (http://docs.aws.amazon.com/IAM/latest/UserGuide/LimitationsOnEntities.html) +// in the IAM User Guide. +func (c *IAM) CreateGroup(input *CreateGroupInput) (*CreateGroupOutput, error) { + req, out := c.CreateGroupRequest(input) + err := req.Send() + return out, err +} + +const opCreateInstanceProfile = "CreateInstanceProfile" + +// CreateInstanceProfileRequest generates a "aws/request.Request" representing the +// client's request for the CreateInstanceProfile operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateInstanceProfile method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateInstanceProfileRequest method. +// req, resp := client.CreateInstanceProfileRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *IAM) CreateInstanceProfileRequest(input *CreateInstanceProfileInput) (req *request.Request, output *CreateInstanceProfileOutput) { + op := &request.Operation{ + Name: opCreateInstanceProfile, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateInstanceProfileInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateInstanceProfileOutput{} + req.Data = output + return +} + +// Creates a new instance profile. For information about instance profiles, +// go to About Instance Profiles (http://docs.aws.amazon.com/IAM/latest/UserGuide/AboutInstanceProfiles.html). +// +// For information about the number of instance profiles you can create, see +// Limitations on IAM Entities (http://docs.aws.amazon.com/IAM/latest/UserGuide/LimitationsOnEntities.html) +// in the IAM User Guide. +func (c *IAM) CreateInstanceProfile(input *CreateInstanceProfileInput) (*CreateInstanceProfileOutput, error) { + req, out := c.CreateInstanceProfileRequest(input) + err := req.Send() + return out, err +} + +const opCreateLoginProfile = "CreateLoginProfile" + +// CreateLoginProfileRequest generates a "aws/request.Request" representing the +// client's request for the CreateLoginProfile operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateLoginProfile method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateLoginProfileRequest method. +// req, resp := client.CreateLoginProfileRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *IAM) CreateLoginProfileRequest(input *CreateLoginProfileInput) (req *request.Request, output *CreateLoginProfileOutput) { + op := &request.Operation{ + Name: opCreateLoginProfile, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateLoginProfileInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateLoginProfileOutput{} + req.Data = output + return +} + +// Creates a password for the specified user, giving the user the ability to +// access AWS services through the AWS Management Console. For more information +// about managing passwords, see Managing Passwords (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_ManagingLogins.html) +// in the IAM User Guide. +func (c *IAM) CreateLoginProfile(input *CreateLoginProfileInput) (*CreateLoginProfileOutput, error) { + req, out := c.CreateLoginProfileRequest(input) + err := req.Send() + return out, err +} + +const opCreateOpenIDConnectProvider = "CreateOpenIDConnectProvider" + +// CreateOpenIDConnectProviderRequest generates a "aws/request.Request" representing the +// client's request for the CreateOpenIDConnectProvider operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateOpenIDConnectProvider method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateOpenIDConnectProviderRequest method. +// req, resp := client.CreateOpenIDConnectProviderRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *IAM) CreateOpenIDConnectProviderRequest(input *CreateOpenIDConnectProviderInput) (req *request.Request, output *CreateOpenIDConnectProviderOutput) { + op := &request.Operation{ + Name: opCreateOpenIDConnectProvider, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateOpenIDConnectProviderInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateOpenIDConnectProviderOutput{} + req.Data = output + return +} + +// Creates an IAM entity to describe an identity provider (IdP) that supports +// OpenID Connect (OIDC) (http://openid.net/connect/). +// +// The OIDC provider that you create with this operation can be used as a principal +// in a role's trust policy to establish a trust relationship between AWS and +// the OIDC provider. +// +// When you create the IAM OIDC provider, you specify the URL of the OIDC identity +// provider (IdP) to trust, a list of client IDs (also known as audiences) that +// identify the application or applications that are allowed to authenticate +// using the OIDC provider, and a list of thumbprints of the server certificate(s) +// that the IdP uses. You get all of this information from the OIDC IdP that +// you want to use for access to AWS. +// +// Because trust for the OIDC provider is ultimately derived from the IAM +// provider that this action creates, it is a best practice to limit access +// to the CreateOpenIDConnectProvider action to highly-privileged users. +func (c *IAM) CreateOpenIDConnectProvider(input *CreateOpenIDConnectProviderInput) (*CreateOpenIDConnectProviderOutput, error) { + req, out := c.CreateOpenIDConnectProviderRequest(input) + err := req.Send() + return out, err +} + +const opCreatePolicy = "CreatePolicy" + +// CreatePolicyRequest generates a "aws/request.Request" representing the +// client's request for the CreatePolicy operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreatePolicy method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreatePolicyRequest method. +// req, resp := client.CreatePolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *IAM) CreatePolicyRequest(input *CreatePolicyInput) (req *request.Request, output *CreatePolicyOutput) { + op := &request.Operation{ + Name: opCreatePolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreatePolicyInput{} + } + + req = c.newRequest(op, input, output) + output = &CreatePolicyOutput{} + req.Data = output + return +} + +// Creates a new managed policy for your AWS account. +// +// This operation creates a policy version with a version identifier of v1 +// and sets v1 as the policy's default version. For more information about policy +// versions, see Versioning for Managed Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-versions.html) +// in the IAM User Guide. +// +// For more information about managed policies in general, see Managed Policies +// and Inline Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the IAM User Guide. +func (c *IAM) CreatePolicy(input *CreatePolicyInput) (*CreatePolicyOutput, error) { + req, out := c.CreatePolicyRequest(input) + err := req.Send() + return out, err +} + +const opCreatePolicyVersion = "CreatePolicyVersion" + +// CreatePolicyVersionRequest generates a "aws/request.Request" representing the +// client's request for the CreatePolicyVersion operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreatePolicyVersion method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreatePolicyVersionRequest method. +// req, resp := client.CreatePolicyVersionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *IAM) CreatePolicyVersionRequest(input *CreatePolicyVersionInput) (req *request.Request, output *CreatePolicyVersionOutput) { + op := &request.Operation{ + Name: opCreatePolicyVersion, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreatePolicyVersionInput{} + } + + req = c.newRequest(op, input, output) + output = &CreatePolicyVersionOutput{} + req.Data = output + return +} + +// Creates a new version of the specified managed policy. To update a managed +// policy, you create a new policy version. A managed policy can have up to +// five versions. If the policy has five versions, you must delete an existing +// version using DeletePolicyVersion before you create a new version. +// +// Optionally, you can set the new version as the policy's default version. +// The default version is the version that is in effect for the IAM users, groups, +// and roles to which the policy is attached. +// +// For more information about managed policy versions, see Versioning for Managed +// Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-versions.html) +// in the IAM User Guide. +func (c *IAM) CreatePolicyVersion(input *CreatePolicyVersionInput) (*CreatePolicyVersionOutput, error) { + req, out := c.CreatePolicyVersionRequest(input) + err := req.Send() + return out, err +} + +const opCreateRole = "CreateRole" + +// CreateRoleRequest generates a "aws/request.Request" representing the +// client's request for the CreateRole operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateRole method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateRoleRequest method. +// req, resp := client.CreateRoleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *IAM) CreateRoleRequest(input *CreateRoleInput) (req *request.Request, output *CreateRoleOutput) { + op := &request.Operation{ + Name: opCreateRole, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateRoleInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateRoleOutput{} + req.Data = output + return +} + +// Creates a new role for your AWS account. For more information about roles, +// go to Working with Roles (http://docs.aws.amazon.com/IAM/latest/UserGuide/WorkingWithRoles.html). +// For information about limitations on role names and the number of roles you +// can create, go to Limitations on IAM Entities (http://docs.aws.amazon.com/IAM/latest/UserGuide/LimitationsOnEntities.html) +// in the IAM User Guide. +func (c *IAM) CreateRole(input *CreateRoleInput) (*CreateRoleOutput, error) { + req, out := c.CreateRoleRequest(input) + err := req.Send() + return out, err +} + +const opCreateSAMLProvider = "CreateSAMLProvider" + +// CreateSAMLProviderRequest generates a "aws/request.Request" representing the +// client's request for the CreateSAMLProvider operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateSAMLProvider method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateSAMLProviderRequest method. +// req, resp := client.CreateSAMLProviderRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *IAM) CreateSAMLProviderRequest(input *CreateSAMLProviderInput) (req *request.Request, output *CreateSAMLProviderOutput) { + op := &request.Operation{ + Name: opCreateSAMLProvider, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateSAMLProviderInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateSAMLProviderOutput{} + req.Data = output + return +} + +// Creates an IAM resource that describes an identity provider (IdP) that supports +// SAML 2.0. +// +// The SAML provider resource that you create with this operation can be used +// as a principal in an IAM role's trust policy to enable federated users who +// sign-in using the SAML IdP to assume the role. You can create an IAM role +// that supports Web-based single sign-on (SSO) to the AWS Management Console +// or one that supports API access to AWS. +// +// When you create the SAML provider resource, you upload an a SAML metadata +// document that you get from your IdP and that includes the issuer's name, +// expiration information, and keys that can be used to validate the SAML authentication +// response (assertions) that the IdP sends. You must generate the metadata +// document using the identity management software that is used as your organization's +// IdP. +// +// This operation requires Signature Version 4 (http://docs.aws.amazon.com/general/latest/gr/signature-version-4.html). +// +// For more information, see Enabling SAML 2.0 Federated Users to Access +// the AWS Management Console (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-saml.html) +// and About SAML 2.0-based Federation (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_saml.html) +// in the IAM User Guide. +func (c *IAM) CreateSAMLProvider(input *CreateSAMLProviderInput) (*CreateSAMLProviderOutput, error) { + req, out := c.CreateSAMLProviderRequest(input) + err := req.Send() + return out, err +} + +const opCreateUser = "CreateUser" + +// CreateUserRequest generates a "aws/request.Request" representing the +// client's request for the CreateUser operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateUser method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateUserRequest method. +// req, resp := client.CreateUserRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *IAM) CreateUserRequest(input *CreateUserInput) (req *request.Request, output *CreateUserOutput) { + op := &request.Operation{ + Name: opCreateUser, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateUserInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateUserOutput{} + req.Data = output + return +} + +// Creates a new IAM user for your AWS account. +// +// For information about limitations on the number of IAM users you can create, +// see Limitations on IAM Entities (http://docs.aws.amazon.com/IAM/latest/UserGuide/LimitationsOnEntities.html) +// in the IAM User Guide. +func (c *IAM) CreateUser(input *CreateUserInput) (*CreateUserOutput, error) { + req, out := c.CreateUserRequest(input) + err := req.Send() + return out, err +} + +const opCreateVirtualMFADevice = "CreateVirtualMFADevice" + +// CreateVirtualMFADeviceRequest generates a "aws/request.Request" representing the +// client's request for the CreateVirtualMFADevice operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateVirtualMFADevice method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateVirtualMFADeviceRequest method. +// req, resp := client.CreateVirtualMFADeviceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *IAM) CreateVirtualMFADeviceRequest(input *CreateVirtualMFADeviceInput) (req *request.Request, output *CreateVirtualMFADeviceOutput) { + op := &request.Operation{ + Name: opCreateVirtualMFADevice, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateVirtualMFADeviceInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateVirtualMFADeviceOutput{} + req.Data = output + return +} + +// Creates a new virtual MFA device for the AWS account. After creating the +// virtual MFA, use EnableMFADevice to attach the MFA device to an IAM user. +// For more information about creating and working with virtual MFA devices, +// go to Using a Virtual MFA Device (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_VirtualMFA.html) +// in the IAM User Guide. +// +// For information about limits on the number of MFA devices you can create, +// see Limitations on Entities (http://docs.aws.amazon.com/IAM/latest/UserGuide/LimitationsOnEntities.html) +// in the IAM User Guide. +// +// The seed information contained in the QR code and the Base32 string should +// be treated like any other secret access information, such as your AWS access +// keys or your passwords. After you provision your virtual device, you should +// ensure that the information is destroyed following secure procedures. +func (c *IAM) CreateVirtualMFADevice(input *CreateVirtualMFADeviceInput) (*CreateVirtualMFADeviceOutput, error) { + req, out := c.CreateVirtualMFADeviceRequest(input) + err := req.Send() + return out, err +} + +const opDeactivateMFADevice = "DeactivateMFADevice" + +// DeactivateMFADeviceRequest generates a "aws/request.Request" representing the +// client's request for the DeactivateMFADevice operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeactivateMFADevice method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeactivateMFADeviceRequest method. +// req, resp := client.DeactivateMFADeviceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *IAM) DeactivateMFADeviceRequest(input *DeactivateMFADeviceInput) (req *request.Request, output *DeactivateMFADeviceOutput) { + op := &request.Operation{ + Name: opDeactivateMFADevice, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeactivateMFADeviceInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeactivateMFADeviceOutput{} + req.Data = output + return +} + +// Deactivates the specified MFA device and removes it from association with +// the user name for which it was originally enabled. +// +// For more information about creating and working with virtual MFA devices, +// go to Using a Virtual MFA Device (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_VirtualMFA.html) +// in the IAM User Guide. +func (c *IAM) DeactivateMFADevice(input *DeactivateMFADeviceInput) (*DeactivateMFADeviceOutput, error) { + req, out := c.DeactivateMFADeviceRequest(input) + err := req.Send() + return out, err +} + +const opDeleteAccessKey = "DeleteAccessKey" + +// DeleteAccessKeyRequest generates a "aws/request.Request" representing the +// client's request for the DeleteAccessKey operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteAccessKey method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteAccessKeyRequest method. +// req, resp := client.DeleteAccessKeyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *IAM) DeleteAccessKeyRequest(input *DeleteAccessKeyInput) (req *request.Request, output *DeleteAccessKeyOutput) { + op := &request.Operation{ + Name: opDeleteAccessKey, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteAccessKeyInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteAccessKeyOutput{} + req.Data = output + return +} + +// Deletes the access key pair associated with the specified IAM user. +// +// If you do not specify a user name, IAM determines the user name implicitly +// based on the AWS access key ID signing the request. Because this action works +// for access keys under the AWS account, you can use this action to manage +// root credentials even if the AWS account has no associated users. +func (c *IAM) DeleteAccessKey(input *DeleteAccessKeyInput) (*DeleteAccessKeyOutput, error) { + req, out := c.DeleteAccessKeyRequest(input) + err := req.Send() + return out, err +} + +const opDeleteAccountAlias = "DeleteAccountAlias" + +// DeleteAccountAliasRequest generates a "aws/request.Request" representing the +// client's request for the DeleteAccountAlias operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteAccountAlias method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteAccountAliasRequest method. +// req, resp := client.DeleteAccountAliasRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *IAM) DeleteAccountAliasRequest(input *DeleteAccountAliasInput) (req *request.Request, output *DeleteAccountAliasOutput) { + op := &request.Operation{ + Name: opDeleteAccountAlias, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteAccountAliasInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteAccountAliasOutput{} + req.Data = output + return +} + +// Deletes the specified AWS account alias. For information about using an AWS +// account alias, see Using an Alias for Your AWS Account ID (http://docs.aws.amazon.com/IAM/latest/UserGuide/AccountAlias.html) +// in the IAM User Guide. +func (c *IAM) DeleteAccountAlias(input *DeleteAccountAliasInput) (*DeleteAccountAliasOutput, error) { + req, out := c.DeleteAccountAliasRequest(input) + err := req.Send() + return out, err +} + +const opDeleteAccountPasswordPolicy = "DeleteAccountPasswordPolicy" + +// DeleteAccountPasswordPolicyRequest generates a "aws/request.Request" representing the +// client's request for the DeleteAccountPasswordPolicy operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteAccountPasswordPolicy method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteAccountPasswordPolicyRequest method. +// req, resp := client.DeleteAccountPasswordPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *IAM) DeleteAccountPasswordPolicyRequest(input *DeleteAccountPasswordPolicyInput) (req *request.Request, output *DeleteAccountPasswordPolicyOutput) { + op := &request.Operation{ + Name: opDeleteAccountPasswordPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteAccountPasswordPolicyInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteAccountPasswordPolicyOutput{} + req.Data = output + return +} + +// Deletes the password policy for the AWS account. There are no parameters. +func (c *IAM) DeleteAccountPasswordPolicy(input *DeleteAccountPasswordPolicyInput) (*DeleteAccountPasswordPolicyOutput, error) { + req, out := c.DeleteAccountPasswordPolicyRequest(input) + err := req.Send() + return out, err +} + +const opDeleteGroup = "DeleteGroup" + +// DeleteGroupRequest generates a "aws/request.Request" representing the +// client's request for the DeleteGroup operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteGroup method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteGroupRequest method. +// req, resp := client.DeleteGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *IAM) DeleteGroupRequest(input *DeleteGroupInput) (req *request.Request, output *DeleteGroupOutput) { + op := &request.Operation{ + Name: opDeleteGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteGroupInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteGroupOutput{} + req.Data = output + return +} + +// Deletes the specified IAM group. The group must not contain any users or +// have any attached policies. +func (c *IAM) DeleteGroup(input *DeleteGroupInput) (*DeleteGroupOutput, error) { + req, out := c.DeleteGroupRequest(input) + err := req.Send() + return out, err +} + +const opDeleteGroupPolicy = "DeleteGroupPolicy" + +// DeleteGroupPolicyRequest generates a "aws/request.Request" representing the +// client's request for the DeleteGroupPolicy operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteGroupPolicy method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteGroupPolicyRequest method. +// req, resp := client.DeleteGroupPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *IAM) DeleteGroupPolicyRequest(input *DeleteGroupPolicyInput) (req *request.Request, output *DeleteGroupPolicyOutput) { + op := &request.Operation{ + Name: opDeleteGroupPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteGroupPolicyInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteGroupPolicyOutput{} + req.Data = output + return +} + +// Deletes the specified inline policy that is embedded in the specified IAM +// group. +// +// A group can also have managed policies attached to it. To detach a managed +// policy from a group, use DetachGroupPolicy. For more information about policies, +// refer to Managed Policies and Inline Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the IAM User Guide. +func (c *IAM) DeleteGroupPolicy(input *DeleteGroupPolicyInput) (*DeleteGroupPolicyOutput, error) { + req, out := c.DeleteGroupPolicyRequest(input) + err := req.Send() + return out, err +} + +const opDeleteInstanceProfile = "DeleteInstanceProfile" + +// DeleteInstanceProfileRequest generates a "aws/request.Request" representing the +// client's request for the DeleteInstanceProfile operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteInstanceProfile method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteInstanceProfileRequest method. +// req, resp := client.DeleteInstanceProfileRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *IAM) DeleteInstanceProfileRequest(input *DeleteInstanceProfileInput) (req *request.Request, output *DeleteInstanceProfileOutput) { + op := &request.Operation{ + Name: opDeleteInstanceProfile, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteInstanceProfileInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteInstanceProfileOutput{} + req.Data = output + return +} + +// Deletes the specified instance profile. The instance profile must not have +// an associated role. +// +// Make sure you do not have any Amazon EC2 instances running with the instance +// profile you are about to delete. Deleting a role or instance profile that +// is associated with a running instance will break any applications running +// on the instance. +// +// For more information about instance profiles, go to About Instance Profiles +// (http://docs.aws.amazon.com/IAM/latest/UserGuide/AboutInstanceProfiles.html). +func (c *IAM) DeleteInstanceProfile(input *DeleteInstanceProfileInput) (*DeleteInstanceProfileOutput, error) { + req, out := c.DeleteInstanceProfileRequest(input) + err := req.Send() + return out, err +} + +const opDeleteLoginProfile = "DeleteLoginProfile" + +// DeleteLoginProfileRequest generates a "aws/request.Request" representing the +// client's request for the DeleteLoginProfile operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteLoginProfile method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteLoginProfileRequest method. +// req, resp := client.DeleteLoginProfileRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *IAM) DeleteLoginProfileRequest(input *DeleteLoginProfileInput) (req *request.Request, output *DeleteLoginProfileOutput) { + op := &request.Operation{ + Name: opDeleteLoginProfile, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteLoginProfileInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteLoginProfileOutput{} + req.Data = output + return +} + +// Deletes the password for the specified IAM user, which terminates the user's +// ability to access AWS services through the AWS Management Console. +// +// Deleting a user's password does not prevent a user from accessing AWS +// through the command line interface or the API. To prevent all user access +// you must also either make any access keys inactive or delete them. For more +// information about making keys inactive or deleting them, see UpdateAccessKey +// and DeleteAccessKey. +func (c *IAM) DeleteLoginProfile(input *DeleteLoginProfileInput) (*DeleteLoginProfileOutput, error) { + req, out := c.DeleteLoginProfileRequest(input) + err := req.Send() + return out, err +} + +const opDeleteOpenIDConnectProvider = "DeleteOpenIDConnectProvider" + +// DeleteOpenIDConnectProviderRequest generates a "aws/request.Request" representing the +// client's request for the DeleteOpenIDConnectProvider operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteOpenIDConnectProvider method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteOpenIDConnectProviderRequest method. +// req, resp := client.DeleteOpenIDConnectProviderRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *IAM) DeleteOpenIDConnectProviderRequest(input *DeleteOpenIDConnectProviderInput) (req *request.Request, output *DeleteOpenIDConnectProviderOutput) { + op := &request.Operation{ + Name: opDeleteOpenIDConnectProvider, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteOpenIDConnectProviderInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteOpenIDConnectProviderOutput{} + req.Data = output + return +} + +// Deletes an OpenID Connect identity provider (IdP) resource object in IAM. +// +// Deleting an IAM OIDC provider resource does not update any roles that reference +// the provider as a principal in their trust policies. Any attempt to assume +// a role that references a deleted provider fails. +// +// This action is idempotent; it does not fail or return an error if you call +// the action for a provider that does not exist. +func (c *IAM) DeleteOpenIDConnectProvider(input *DeleteOpenIDConnectProviderInput) (*DeleteOpenIDConnectProviderOutput, error) { + req, out := c.DeleteOpenIDConnectProviderRequest(input) + err := req.Send() + return out, err +} + +const opDeletePolicy = "DeletePolicy" + +// DeletePolicyRequest generates a "aws/request.Request" representing the +// client's request for the DeletePolicy operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeletePolicy method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeletePolicyRequest method. +// req, resp := client.DeletePolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *IAM) DeletePolicyRequest(input *DeletePolicyInput) (req *request.Request, output *DeletePolicyOutput) { + op := &request.Operation{ + Name: opDeletePolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeletePolicyInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeletePolicyOutput{} + req.Data = output + return +} + +// Deletes the specified managed policy. +// +// Before you can delete a managed policy, you must first detach the policy +// from all users, groups, and roles that it is attached to, and you must delete +// all of the policy's versions. The following steps describe the process for +// deleting a managed policy: +// +// Detach the policy from all users, groups, and roles that the policy is +// attached to, using the DetachUserPolicy, DetachGroupPolicy, or DetachRolePolicy +// APIs. To list all the users, groups, and roles that a policy is attached +// to, use ListEntitiesForPolicy. +// +// Delete all versions of the policy using DeletePolicyVersion. To list the +// policy's versions, use ListPolicyVersions. You cannot use DeletePolicyVersion +// to delete the version that is marked as the default version. You delete the +// policy's default version in the next step of the process. +// +// Delete the policy (this automatically deletes the policy's default version) +// using this API. +// +// For information about managed policies, see Managed Policies and Inline +// Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the IAM User Guide. +func (c *IAM) DeletePolicy(input *DeletePolicyInput) (*DeletePolicyOutput, error) { + req, out := c.DeletePolicyRequest(input) + err := req.Send() + return out, err +} + +const opDeletePolicyVersion = "DeletePolicyVersion" + +// DeletePolicyVersionRequest generates a "aws/request.Request" representing the +// client's request for the DeletePolicyVersion operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeletePolicyVersion method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeletePolicyVersionRequest method. +// req, resp := client.DeletePolicyVersionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *IAM) DeletePolicyVersionRequest(input *DeletePolicyVersionInput) (req *request.Request, output *DeletePolicyVersionOutput) { + op := &request.Operation{ + Name: opDeletePolicyVersion, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeletePolicyVersionInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeletePolicyVersionOutput{} + req.Data = output + return +} + +// Deletes the specified version from the specified managed policy. +// +// You cannot delete the default version from a policy using this API. To delete +// the default version from a policy, use DeletePolicy. To find out which version +// of a policy is marked as the default version, use ListPolicyVersions. +// +// For information about versions for managed policies, see Versioning for +// Managed Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-versions.html) +// in the IAM User Guide. +func (c *IAM) DeletePolicyVersion(input *DeletePolicyVersionInput) (*DeletePolicyVersionOutput, error) { + req, out := c.DeletePolicyVersionRequest(input) + err := req.Send() + return out, err +} + +const opDeleteRole = "DeleteRole" + +// DeleteRoleRequest generates a "aws/request.Request" representing the +// client's request for the DeleteRole operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteRole method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteRoleRequest method. +// req, resp := client.DeleteRoleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *IAM) DeleteRoleRequest(input *DeleteRoleInput) (req *request.Request, output *DeleteRoleOutput) { + op := &request.Operation{ + Name: opDeleteRole, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteRoleInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteRoleOutput{} + req.Data = output + return +} + +// Deletes the specified role. The role must not have any policies attached. +// For more information about roles, go to Working with Roles (http://docs.aws.amazon.com/IAM/latest/UserGuide/WorkingWithRoles.html). +// +// Make sure you do not have any Amazon EC2 instances running with the role +// you are about to delete. Deleting a role or instance profile that is associated +// with a running instance will break any applications running on the instance. +func (c *IAM) DeleteRole(input *DeleteRoleInput) (*DeleteRoleOutput, error) { + req, out := c.DeleteRoleRequest(input) + err := req.Send() + return out, err +} + +const opDeleteRolePolicy = "DeleteRolePolicy" + +// DeleteRolePolicyRequest generates a "aws/request.Request" representing the +// client's request for the DeleteRolePolicy operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteRolePolicy method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteRolePolicyRequest method. +// req, resp := client.DeleteRolePolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *IAM) DeleteRolePolicyRequest(input *DeleteRolePolicyInput) (req *request.Request, output *DeleteRolePolicyOutput) { + op := &request.Operation{ + Name: opDeleteRolePolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteRolePolicyInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteRolePolicyOutput{} + req.Data = output + return +} + +// Deletes the specified inline policy that is embedded in the specified IAM +// role. +// +// A role can also have managed policies attached to it. To detach a managed +// policy from a role, use DetachRolePolicy. For more information about policies, +// refer to Managed Policies and Inline Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the IAM User Guide. +func (c *IAM) DeleteRolePolicy(input *DeleteRolePolicyInput) (*DeleteRolePolicyOutput, error) { + req, out := c.DeleteRolePolicyRequest(input) + err := req.Send() + return out, err +} + +const opDeleteSAMLProvider = "DeleteSAMLProvider" + +// DeleteSAMLProviderRequest generates a "aws/request.Request" representing the +// client's request for the DeleteSAMLProvider operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteSAMLProvider method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteSAMLProviderRequest method. +// req, resp := client.DeleteSAMLProviderRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *IAM) DeleteSAMLProviderRequest(input *DeleteSAMLProviderInput) (req *request.Request, output *DeleteSAMLProviderOutput) { + op := &request.Operation{ + Name: opDeleteSAMLProvider, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteSAMLProviderInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteSAMLProviderOutput{} + req.Data = output + return +} + +// Deletes a SAML provider resource in IAM. +// +// Deleting the provider resource from IAM does not update any roles that reference +// the SAML provider resource's ARN as a principal in their trust policies. +// Any attempt to assume a role that references a non-existent provider resource +// ARN fails. +// +// This operation requires Signature Version 4 (http://docs.aws.amazon.com/general/latest/gr/signature-version-4.html). +func (c *IAM) DeleteSAMLProvider(input *DeleteSAMLProviderInput) (*DeleteSAMLProviderOutput, error) { + req, out := c.DeleteSAMLProviderRequest(input) + err := req.Send() + return out, err +} + +const opDeleteSSHPublicKey = "DeleteSSHPublicKey" + +// DeleteSSHPublicKeyRequest generates a "aws/request.Request" representing the +// client's request for the DeleteSSHPublicKey operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteSSHPublicKey method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteSSHPublicKeyRequest method. +// req, resp := client.DeleteSSHPublicKeyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *IAM) DeleteSSHPublicKeyRequest(input *DeleteSSHPublicKeyInput) (req *request.Request, output *DeleteSSHPublicKeyOutput) { + op := &request.Operation{ + Name: opDeleteSSHPublicKey, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteSSHPublicKeyInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteSSHPublicKeyOutput{} + req.Data = output + return +} + +// Deletes the specified SSH public key. +// +// The SSH public key deleted by this action is used only for authenticating +// the associated IAM user to an AWS CodeCommit repository. For more information +// about using SSH keys to authenticate to an AWS CodeCommit repository, see +// Set up AWS CodeCommit for SSH Connections (http://docs.aws.amazon.com/codecommit/latest/userguide/setting-up-credentials-ssh.html) +// in the AWS CodeCommit User Guide. +func (c *IAM) DeleteSSHPublicKey(input *DeleteSSHPublicKeyInput) (*DeleteSSHPublicKeyOutput, error) { + req, out := c.DeleteSSHPublicKeyRequest(input) + err := req.Send() + return out, err +} + +const opDeleteServerCertificate = "DeleteServerCertificate" + +// DeleteServerCertificateRequest generates a "aws/request.Request" representing the +// client's request for the DeleteServerCertificate operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteServerCertificate method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteServerCertificateRequest method. +// req, resp := client.DeleteServerCertificateRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *IAM) DeleteServerCertificateRequest(input *DeleteServerCertificateInput) (req *request.Request, output *DeleteServerCertificateOutput) { + op := &request.Operation{ + Name: opDeleteServerCertificate, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteServerCertificateInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteServerCertificateOutput{} + req.Data = output + return +} + +// Deletes the specified server certificate. +// +// For more information about working with server certificates, including a +// list of AWS services that can use the server certificates that you manage +// with IAM, go to Working with Server Certificates (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_server-certs.html) +// in the IAM User Guide. +// +// If you are using a server certificate with Elastic Load Balancing, deleting +// the certificate could have implications for your application. If Elastic +// Load Balancing doesn't detect the deletion of bound certificates, it may +// continue to use the certificates. This could cause Elastic Load Balancing +// to stop accepting traffic. We recommend that you remove the reference to +// the certificate from Elastic Load Balancing before using this command to +// delete the certificate. For more information, go to DeleteLoadBalancerListeners +// (http://docs.aws.amazon.com/ElasticLoadBalancing/latest/APIReference/API_DeleteLoadBalancerListeners.html) +// in the Elastic Load Balancing API Reference. +func (c *IAM) DeleteServerCertificate(input *DeleteServerCertificateInput) (*DeleteServerCertificateOutput, error) { + req, out := c.DeleteServerCertificateRequest(input) + err := req.Send() + return out, err +} + +const opDeleteSigningCertificate = "DeleteSigningCertificate" + +// DeleteSigningCertificateRequest generates a "aws/request.Request" representing the +// client's request for the DeleteSigningCertificate operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteSigningCertificate method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteSigningCertificateRequest method. +// req, resp := client.DeleteSigningCertificateRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *IAM) DeleteSigningCertificateRequest(input *DeleteSigningCertificateInput) (req *request.Request, output *DeleteSigningCertificateOutput) { + op := &request.Operation{ + Name: opDeleteSigningCertificate, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteSigningCertificateInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteSigningCertificateOutput{} + req.Data = output + return +} + +// Deletes a signing certificate associated with the specified IAM user. +// +// If you do not specify a user name, IAM determines the user name implicitly +// based on the AWS access key ID signing the request. Because this action works +// for access keys under the AWS account, you can use this action to manage +// root credentials even if the AWS account has no associated IAM users. +func (c *IAM) DeleteSigningCertificate(input *DeleteSigningCertificateInput) (*DeleteSigningCertificateOutput, error) { + req, out := c.DeleteSigningCertificateRequest(input) + err := req.Send() + return out, err +} + +const opDeleteUser = "DeleteUser" + +// DeleteUserRequest generates a "aws/request.Request" representing the +// client's request for the DeleteUser operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteUser method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteUserRequest method. +// req, resp := client.DeleteUserRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *IAM) DeleteUserRequest(input *DeleteUserInput) (req *request.Request, output *DeleteUserOutput) { + op := &request.Operation{ + Name: opDeleteUser, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteUserInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteUserOutput{} + req.Data = output + return +} + +// Deletes the specified IAM user. The user must not belong to any groups or +// have any access keys, signing certificates, or attached policies. +func (c *IAM) DeleteUser(input *DeleteUserInput) (*DeleteUserOutput, error) { + req, out := c.DeleteUserRequest(input) + err := req.Send() + return out, err +} + +const opDeleteUserPolicy = "DeleteUserPolicy" + +// DeleteUserPolicyRequest generates a "aws/request.Request" representing the +// client's request for the DeleteUserPolicy operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteUserPolicy method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteUserPolicyRequest method. +// req, resp := client.DeleteUserPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *IAM) DeleteUserPolicyRequest(input *DeleteUserPolicyInput) (req *request.Request, output *DeleteUserPolicyOutput) { + op := &request.Operation{ + Name: opDeleteUserPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteUserPolicyInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteUserPolicyOutput{} + req.Data = output + return +} + +// Deletes the specified inline policy that is embedded in the specified IAM +// user. +// +// A user can also have managed policies attached to it. To detach a managed +// policy from a user, use DetachUserPolicy. For more information about policies, +// refer to Managed Policies and Inline Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the IAM User Guide. +func (c *IAM) DeleteUserPolicy(input *DeleteUserPolicyInput) (*DeleteUserPolicyOutput, error) { + req, out := c.DeleteUserPolicyRequest(input) + err := req.Send() + return out, err +} + +const opDeleteVirtualMFADevice = "DeleteVirtualMFADevice" + +// DeleteVirtualMFADeviceRequest generates a "aws/request.Request" representing the +// client's request for the DeleteVirtualMFADevice operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteVirtualMFADevice method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteVirtualMFADeviceRequest method. +// req, resp := client.DeleteVirtualMFADeviceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *IAM) DeleteVirtualMFADeviceRequest(input *DeleteVirtualMFADeviceInput) (req *request.Request, output *DeleteVirtualMFADeviceOutput) { + op := &request.Operation{ + Name: opDeleteVirtualMFADevice, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteVirtualMFADeviceInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteVirtualMFADeviceOutput{} + req.Data = output + return +} + +// Deletes a virtual MFA device. +// +// You must deactivate a user's virtual MFA device before you can delete +// it. For information about deactivating MFA devices, see DeactivateMFADevice. +func (c *IAM) DeleteVirtualMFADevice(input *DeleteVirtualMFADeviceInput) (*DeleteVirtualMFADeviceOutput, error) { + req, out := c.DeleteVirtualMFADeviceRequest(input) + err := req.Send() + return out, err +} + +const opDetachGroupPolicy = "DetachGroupPolicy" + +// DetachGroupPolicyRequest generates a "aws/request.Request" representing the +// client's request for the DetachGroupPolicy operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DetachGroupPolicy method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DetachGroupPolicyRequest method. +// req, resp := client.DetachGroupPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *IAM) DetachGroupPolicyRequest(input *DetachGroupPolicyInput) (req *request.Request, output *DetachGroupPolicyOutput) { + op := &request.Operation{ + Name: opDetachGroupPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DetachGroupPolicyInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DetachGroupPolicyOutput{} + req.Data = output + return +} + +// Removes the specified managed policy from the specified IAM group. +// +// A group can also have inline policies embedded with it. To delete an inline +// policy, use the DeleteGroupPolicy API. For information about policies, see +// Managed Policies and Inline Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the IAM User Guide. +func (c *IAM) DetachGroupPolicy(input *DetachGroupPolicyInput) (*DetachGroupPolicyOutput, error) { + req, out := c.DetachGroupPolicyRequest(input) + err := req.Send() + return out, err +} + +const opDetachRolePolicy = "DetachRolePolicy" + +// DetachRolePolicyRequest generates a "aws/request.Request" representing the +// client's request for the DetachRolePolicy operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DetachRolePolicy method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DetachRolePolicyRequest method. +// req, resp := client.DetachRolePolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *IAM) DetachRolePolicyRequest(input *DetachRolePolicyInput) (req *request.Request, output *DetachRolePolicyOutput) { + op := &request.Operation{ + Name: opDetachRolePolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DetachRolePolicyInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DetachRolePolicyOutput{} + req.Data = output + return +} + +// Removes the specified managed policy from the specified role. +// +// A role can also have inline policies embedded with it. To delete an inline +// policy, use the DeleteRolePolicy API. For information about policies, see +// Managed Policies and Inline Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the IAM User Guide. +func (c *IAM) DetachRolePolicy(input *DetachRolePolicyInput) (*DetachRolePolicyOutput, error) { + req, out := c.DetachRolePolicyRequest(input) + err := req.Send() + return out, err +} + +const opDetachUserPolicy = "DetachUserPolicy" + +// DetachUserPolicyRequest generates a "aws/request.Request" representing the +// client's request for the DetachUserPolicy operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DetachUserPolicy method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DetachUserPolicyRequest method. +// req, resp := client.DetachUserPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *IAM) DetachUserPolicyRequest(input *DetachUserPolicyInput) (req *request.Request, output *DetachUserPolicyOutput) { + op := &request.Operation{ + Name: opDetachUserPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DetachUserPolicyInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DetachUserPolicyOutput{} + req.Data = output + return +} + +// Removes the specified managed policy from the specified user. +// +// A user can also have inline policies embedded with it. To delete an inline +// policy, use the DeleteUserPolicy API. For information about policies, see +// Managed Policies and Inline Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the IAM User Guide. +func (c *IAM) DetachUserPolicy(input *DetachUserPolicyInput) (*DetachUserPolicyOutput, error) { + req, out := c.DetachUserPolicyRequest(input) + err := req.Send() + return out, err +} + +const opEnableMFADevice = "EnableMFADevice" + +// EnableMFADeviceRequest generates a "aws/request.Request" representing the +// client's request for the EnableMFADevice operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the EnableMFADevice method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the EnableMFADeviceRequest method. +// req, resp := client.EnableMFADeviceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *IAM) EnableMFADeviceRequest(input *EnableMFADeviceInput) (req *request.Request, output *EnableMFADeviceOutput) { + op := &request.Operation{ + Name: opEnableMFADevice, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &EnableMFADeviceInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &EnableMFADeviceOutput{} + req.Data = output + return +} + +// Enables the specified MFA device and associates it with the specified IAM +// user. When enabled, the MFA device is required for every subsequent login +// by the IAM user associated with the device. +func (c *IAM) EnableMFADevice(input *EnableMFADeviceInput) (*EnableMFADeviceOutput, error) { + req, out := c.EnableMFADeviceRequest(input) + err := req.Send() + return out, err +} + +const opGenerateCredentialReport = "GenerateCredentialReport" + +// GenerateCredentialReportRequest generates a "aws/request.Request" representing the +// client's request for the GenerateCredentialReport operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GenerateCredentialReport method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GenerateCredentialReportRequest method. +// req, resp := client.GenerateCredentialReportRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *IAM) GenerateCredentialReportRequest(input *GenerateCredentialReportInput) (req *request.Request, output *GenerateCredentialReportOutput) { + op := &request.Operation{ + Name: opGenerateCredentialReport, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GenerateCredentialReportInput{} + } + + req = c.newRequest(op, input, output) + output = &GenerateCredentialReportOutput{} + req.Data = output + return +} + +// Generates a credential report for the AWS account. For more information about +// the credential report, see Getting Credential Reports (http://docs.aws.amazon.com/IAM/latest/UserGuide/credential-reports.html) +// in the IAM User Guide. +func (c *IAM) GenerateCredentialReport(input *GenerateCredentialReportInput) (*GenerateCredentialReportOutput, error) { + req, out := c.GenerateCredentialReportRequest(input) + err := req.Send() + return out, err +} + +const opGetAccessKeyLastUsed = "GetAccessKeyLastUsed" + +// GetAccessKeyLastUsedRequest generates a "aws/request.Request" representing the +// client's request for the GetAccessKeyLastUsed operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetAccessKeyLastUsed method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetAccessKeyLastUsedRequest method. +// req, resp := client.GetAccessKeyLastUsedRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *IAM) GetAccessKeyLastUsedRequest(input *GetAccessKeyLastUsedInput) (req *request.Request, output *GetAccessKeyLastUsedOutput) { + op := &request.Operation{ + Name: opGetAccessKeyLastUsed, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetAccessKeyLastUsedInput{} + } + + req = c.newRequest(op, input, output) + output = &GetAccessKeyLastUsedOutput{} + req.Data = output + return +} + +// Retrieves information about when the specified access key was last used. +// The information includes the date and time of last use, along with the AWS +// service and region that were specified in the last request made with that +// key. +func (c *IAM) GetAccessKeyLastUsed(input *GetAccessKeyLastUsedInput) (*GetAccessKeyLastUsedOutput, error) { + req, out := c.GetAccessKeyLastUsedRequest(input) + err := req.Send() + return out, err +} + +const opGetAccountAuthorizationDetails = "GetAccountAuthorizationDetails" + +// GetAccountAuthorizationDetailsRequest generates a "aws/request.Request" representing the +// client's request for the GetAccountAuthorizationDetails operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetAccountAuthorizationDetails method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetAccountAuthorizationDetailsRequest method. +// req, resp := client.GetAccountAuthorizationDetailsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *IAM) GetAccountAuthorizationDetailsRequest(input *GetAccountAuthorizationDetailsInput) (req *request.Request, output *GetAccountAuthorizationDetailsOutput) { + op := &request.Operation{ + Name: opGetAccountAuthorizationDetails, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxItems", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &GetAccountAuthorizationDetailsInput{} + } + + req = c.newRequest(op, input, output) + output = &GetAccountAuthorizationDetailsOutput{} + req.Data = output + return +} + +// Retrieves information about all IAM users, groups, roles, and policies in +// your AWS account, including their relationships to one another. Use this +// API to obtain a snapshot of the configuration of IAM permissions (users, +// groups, roles, and policies) in your account. +// +// You can optionally filter the results using the Filter parameter. You can +// paginate the results using the MaxItems and Marker parameters. +func (c *IAM) GetAccountAuthorizationDetails(input *GetAccountAuthorizationDetailsInput) (*GetAccountAuthorizationDetailsOutput, error) { + req, out := c.GetAccountAuthorizationDetailsRequest(input) + err := req.Send() + return out, err +} + +// GetAccountAuthorizationDetailsPages iterates over the pages of a GetAccountAuthorizationDetails operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See GetAccountAuthorizationDetails method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a GetAccountAuthorizationDetails operation. +// pageNum := 0 +// err := client.GetAccountAuthorizationDetailsPages(params, +// func(page *GetAccountAuthorizationDetailsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *IAM) GetAccountAuthorizationDetailsPages(input *GetAccountAuthorizationDetailsInput, fn func(p *GetAccountAuthorizationDetailsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.GetAccountAuthorizationDetailsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*GetAccountAuthorizationDetailsOutput), lastPage) + }) +} + +const opGetAccountPasswordPolicy = "GetAccountPasswordPolicy" + +// GetAccountPasswordPolicyRequest generates a "aws/request.Request" representing the +// client's request for the GetAccountPasswordPolicy operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetAccountPasswordPolicy method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetAccountPasswordPolicyRequest method. +// req, resp := client.GetAccountPasswordPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *IAM) GetAccountPasswordPolicyRequest(input *GetAccountPasswordPolicyInput) (req *request.Request, output *GetAccountPasswordPolicyOutput) { + op := &request.Operation{ + Name: opGetAccountPasswordPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetAccountPasswordPolicyInput{} + } + + req = c.newRequest(op, input, output) + output = &GetAccountPasswordPolicyOutput{} + req.Data = output + return +} + +// Retrieves the password policy for the AWS account. For more information about +// using a password policy, go to Managing an IAM Password Policy (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_ManagingPasswordPolicies.html). +func (c *IAM) GetAccountPasswordPolicy(input *GetAccountPasswordPolicyInput) (*GetAccountPasswordPolicyOutput, error) { + req, out := c.GetAccountPasswordPolicyRequest(input) + err := req.Send() + return out, err +} + +const opGetAccountSummary = "GetAccountSummary" + +// GetAccountSummaryRequest generates a "aws/request.Request" representing the +// client's request for the GetAccountSummary operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetAccountSummary method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetAccountSummaryRequest method. +// req, resp := client.GetAccountSummaryRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *IAM) GetAccountSummaryRequest(input *GetAccountSummaryInput) (req *request.Request, output *GetAccountSummaryOutput) { + op := &request.Operation{ + Name: opGetAccountSummary, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetAccountSummaryInput{} + } + + req = c.newRequest(op, input, output) + output = &GetAccountSummaryOutput{} + req.Data = output + return +} + +// Retrieves information about IAM entity usage and IAM quotas in the AWS account. +// +// For information about limitations on IAM entities, see Limitations on IAM +// Entities (http://docs.aws.amazon.com/IAM/latest/UserGuide/LimitationsOnEntities.html) +// in the IAM User Guide. +func (c *IAM) GetAccountSummary(input *GetAccountSummaryInput) (*GetAccountSummaryOutput, error) { + req, out := c.GetAccountSummaryRequest(input) + err := req.Send() + return out, err +} + +const opGetContextKeysForCustomPolicy = "GetContextKeysForCustomPolicy" + +// GetContextKeysForCustomPolicyRequest generates a "aws/request.Request" representing the +// client's request for the GetContextKeysForCustomPolicy operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetContextKeysForCustomPolicy method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetContextKeysForCustomPolicyRequest method. +// req, resp := client.GetContextKeysForCustomPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *IAM) GetContextKeysForCustomPolicyRequest(input *GetContextKeysForCustomPolicyInput) (req *request.Request, output *GetContextKeysForPolicyResponse) { + op := &request.Operation{ + Name: opGetContextKeysForCustomPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetContextKeysForCustomPolicyInput{} + } + + req = c.newRequest(op, input, output) + output = &GetContextKeysForPolicyResponse{} + req.Data = output + return +} + +// Gets a list of all of the context keys referenced in the input policies. +// The policies are supplied as a list of one or more strings. To get the context +// keys from policies associated with an IAM user, group, or role, use GetContextKeysForPrincipalPolicy. +// +// Context keys are variables maintained by AWS and its services that provide +// details about the context of an API query request, and can be evaluated by +// testing against a value specified in an IAM policy. Use GetContextKeysForCustomPolicy +// to understand what key names and values you must supply when you call SimulateCustomPolicy. +// Note that all parameters are shown in unencoded form here for clarity, but +// must be URL encoded to be included as a part of a real HTML request. +func (c *IAM) GetContextKeysForCustomPolicy(input *GetContextKeysForCustomPolicyInput) (*GetContextKeysForPolicyResponse, error) { + req, out := c.GetContextKeysForCustomPolicyRequest(input) + err := req.Send() + return out, err +} + +const opGetContextKeysForPrincipalPolicy = "GetContextKeysForPrincipalPolicy" + +// GetContextKeysForPrincipalPolicyRequest generates a "aws/request.Request" representing the +// client's request for the GetContextKeysForPrincipalPolicy operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetContextKeysForPrincipalPolicy method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetContextKeysForPrincipalPolicyRequest method. +// req, resp := client.GetContextKeysForPrincipalPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *IAM) GetContextKeysForPrincipalPolicyRequest(input *GetContextKeysForPrincipalPolicyInput) (req *request.Request, output *GetContextKeysForPolicyResponse) { + op := &request.Operation{ + Name: opGetContextKeysForPrincipalPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetContextKeysForPrincipalPolicyInput{} + } + + req = c.newRequest(op, input, output) + output = &GetContextKeysForPolicyResponse{} + req.Data = output + return +} + +// Gets a list of all of the context keys referenced in all of the IAM policies +// attached to the specified IAM entity. The entity can be an IAM user, group, +// or role. If you specify a user, then the request also includes all of the +// policies attached to groups that the user is a member of. +// +// You can optionally include a list of one or more additional policies, specified +// as strings. If you want to include only a list of policies by string, use +// GetContextKeysForCustomPolicy instead. +// +// Note: This API discloses information about the permissions granted to other +// users. If you do not want users to see other user's permissions, then consider +// allowing them to use GetContextKeysForCustomPolicy instead. +// +// Context keys are variables maintained by AWS and its services that provide +// details about the context of an API query request, and can be evaluated by +// testing against a value in an IAM policy. Use GetContextKeysForPrincipalPolicy +// to understand what key names and values you must supply when you call SimulatePrincipalPolicy. +func (c *IAM) GetContextKeysForPrincipalPolicy(input *GetContextKeysForPrincipalPolicyInput) (*GetContextKeysForPolicyResponse, error) { + req, out := c.GetContextKeysForPrincipalPolicyRequest(input) + err := req.Send() + return out, err +} + +const opGetCredentialReport = "GetCredentialReport" + +// GetCredentialReportRequest generates a "aws/request.Request" representing the +// client's request for the GetCredentialReport operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetCredentialReport method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetCredentialReportRequest method. +// req, resp := client.GetCredentialReportRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *IAM) GetCredentialReportRequest(input *GetCredentialReportInput) (req *request.Request, output *GetCredentialReportOutput) { + op := &request.Operation{ + Name: opGetCredentialReport, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetCredentialReportInput{} + } + + req = c.newRequest(op, input, output) + output = &GetCredentialReportOutput{} + req.Data = output + return +} + +// Retrieves a credential report for the AWS account. For more information about +// the credential report, see Getting Credential Reports (http://docs.aws.amazon.com/IAM/latest/UserGuide/credential-reports.html) +// in the IAM User Guide. +func (c *IAM) GetCredentialReport(input *GetCredentialReportInput) (*GetCredentialReportOutput, error) { + req, out := c.GetCredentialReportRequest(input) + err := req.Send() + return out, err +} + +const opGetGroup = "GetGroup" + +// GetGroupRequest generates a "aws/request.Request" representing the +// client's request for the GetGroup operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetGroup method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetGroupRequest method. +// req, resp := client.GetGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *IAM) GetGroupRequest(input *GetGroupInput) (req *request.Request, output *GetGroupOutput) { + op := &request.Operation{ + Name: opGetGroup, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxItems", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &GetGroupInput{} + } + + req = c.newRequest(op, input, output) + output = &GetGroupOutput{} + req.Data = output + return +} + +// Returns a list of IAM users that are in the specified IAM group. You can +// paginate the results using the MaxItems and Marker parameters. +func (c *IAM) GetGroup(input *GetGroupInput) (*GetGroupOutput, error) { + req, out := c.GetGroupRequest(input) + err := req.Send() + return out, err +} + +// GetGroupPages iterates over the pages of a GetGroup operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See GetGroup method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a GetGroup operation. +// pageNum := 0 +// err := client.GetGroupPages(params, +// func(page *GetGroupOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *IAM) GetGroupPages(input *GetGroupInput, fn func(p *GetGroupOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.GetGroupRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*GetGroupOutput), lastPage) + }) +} + +const opGetGroupPolicy = "GetGroupPolicy" + +// GetGroupPolicyRequest generates a "aws/request.Request" representing the +// client's request for the GetGroupPolicy operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetGroupPolicy method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetGroupPolicyRequest method. +// req, resp := client.GetGroupPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *IAM) GetGroupPolicyRequest(input *GetGroupPolicyInput) (req *request.Request, output *GetGroupPolicyOutput) { + op := &request.Operation{ + Name: opGetGroupPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetGroupPolicyInput{} + } + + req = c.newRequest(op, input, output) + output = &GetGroupPolicyOutput{} + req.Data = output + return +} + +// Retrieves the specified inline policy document that is embedded in the specified +// IAM group. +// +// Policies returned by this API are URL-encoded compliant with RFC 3986 (https://tools.ietf.org/html/rfc3986). +// You can use a URL decoding method to convert the policy back to plain JSON +// text. For example, if you use Java, you can use the decode method of the +// java.net.URLDecoder utility class in the Java SDK. Other languages and SDKs +// provide similar functionality. +// +// An IAM group can also have managed policies attached to it. To retrieve +// a managed policy document that is attached to a group, use GetPolicy to determine +// the policy's default version, then use GetPolicyVersion to retrieve the policy +// document. +// +// For more information about policies, see Managed Policies and Inline Policies +// (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the IAM User Guide. +func (c *IAM) GetGroupPolicy(input *GetGroupPolicyInput) (*GetGroupPolicyOutput, error) { + req, out := c.GetGroupPolicyRequest(input) + err := req.Send() + return out, err +} + +const opGetInstanceProfile = "GetInstanceProfile" + +// GetInstanceProfileRequest generates a "aws/request.Request" representing the +// client's request for the GetInstanceProfile operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetInstanceProfile method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetInstanceProfileRequest method. +// req, resp := client.GetInstanceProfileRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *IAM) GetInstanceProfileRequest(input *GetInstanceProfileInput) (req *request.Request, output *GetInstanceProfileOutput) { + op := &request.Operation{ + Name: opGetInstanceProfile, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetInstanceProfileInput{} + } + + req = c.newRequest(op, input, output) + output = &GetInstanceProfileOutput{} + req.Data = output + return +} + +// Retrieves information about the specified instance profile, including the +// instance profile's path, GUID, ARN, and role. For more information about +// instance profiles, see About Instance Profiles (http://docs.aws.amazon.com/IAM/latest/UserGuide/AboutInstanceProfiles.html) +// in the IAM User Guide. +func (c *IAM) GetInstanceProfile(input *GetInstanceProfileInput) (*GetInstanceProfileOutput, error) { + req, out := c.GetInstanceProfileRequest(input) + err := req.Send() + return out, err +} + +const opGetLoginProfile = "GetLoginProfile" + +// GetLoginProfileRequest generates a "aws/request.Request" representing the +// client's request for the GetLoginProfile operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetLoginProfile method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetLoginProfileRequest method. +// req, resp := client.GetLoginProfileRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *IAM) GetLoginProfileRequest(input *GetLoginProfileInput) (req *request.Request, output *GetLoginProfileOutput) { + op := &request.Operation{ + Name: opGetLoginProfile, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetLoginProfileInput{} + } + + req = c.newRequest(op, input, output) + output = &GetLoginProfileOutput{} + req.Data = output + return +} + +// Retrieves the user name and password-creation date for the specified IAM +// user. If the user has not been assigned a password, the action returns a +// 404 (NoSuchEntity) error. +func (c *IAM) GetLoginProfile(input *GetLoginProfileInput) (*GetLoginProfileOutput, error) { + req, out := c.GetLoginProfileRequest(input) + err := req.Send() + return out, err +} + +const opGetOpenIDConnectProvider = "GetOpenIDConnectProvider" + +// GetOpenIDConnectProviderRequest generates a "aws/request.Request" representing the +// client's request for the GetOpenIDConnectProvider operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetOpenIDConnectProvider method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetOpenIDConnectProviderRequest method. +// req, resp := client.GetOpenIDConnectProviderRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *IAM) GetOpenIDConnectProviderRequest(input *GetOpenIDConnectProviderInput) (req *request.Request, output *GetOpenIDConnectProviderOutput) { + op := &request.Operation{ + Name: opGetOpenIDConnectProvider, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetOpenIDConnectProviderInput{} + } + + req = c.newRequest(op, input, output) + output = &GetOpenIDConnectProviderOutput{} + req.Data = output + return +} + +// Returns information about the specified OpenID Connect (OIDC) provider resource +// object in IAM. +func (c *IAM) GetOpenIDConnectProvider(input *GetOpenIDConnectProviderInput) (*GetOpenIDConnectProviderOutput, error) { + req, out := c.GetOpenIDConnectProviderRequest(input) + err := req.Send() + return out, err +} + +const opGetPolicy = "GetPolicy" + +// GetPolicyRequest generates a "aws/request.Request" representing the +// client's request for the GetPolicy operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetPolicy method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetPolicyRequest method. +// req, resp := client.GetPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *IAM) GetPolicyRequest(input *GetPolicyInput) (req *request.Request, output *GetPolicyOutput) { + op := &request.Operation{ + Name: opGetPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetPolicyInput{} + } + + req = c.newRequest(op, input, output) + output = &GetPolicyOutput{} + req.Data = output + return +} + +// Retrieves information about the specified managed policy, including the policy's +// default version and the total number of IAM users, groups, and roles to which +// the policy is attached. To retrieve the list of the specific users, groups, +// and roles that the policy is attached to, use the ListEntitiesForPolicy API. +// This API returns metadata about the policy. To retrieve the actual policy +// document for a specific version of the policy, use GetPolicyVersion. +// +// This API retrieves information about managed policies. To retrieve information +// about an inline policy that is embedded with an IAM user, group, or role, +// use the GetUserPolicy, GetGroupPolicy, or GetRolePolicy API. +// +// For more information about policies, see Managed Policies and Inline Policies +// (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the IAM User Guide. +func (c *IAM) GetPolicy(input *GetPolicyInput) (*GetPolicyOutput, error) { + req, out := c.GetPolicyRequest(input) + err := req.Send() + return out, err +} + +const opGetPolicyVersion = "GetPolicyVersion" + +// GetPolicyVersionRequest generates a "aws/request.Request" representing the +// client's request for the GetPolicyVersion operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetPolicyVersion method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetPolicyVersionRequest method. +// req, resp := client.GetPolicyVersionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *IAM) GetPolicyVersionRequest(input *GetPolicyVersionInput) (req *request.Request, output *GetPolicyVersionOutput) { + op := &request.Operation{ + Name: opGetPolicyVersion, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetPolicyVersionInput{} + } + + req = c.newRequest(op, input, output) + output = &GetPolicyVersionOutput{} + req.Data = output + return +} + +// Retrieves information about the specified version of the specified managed +// policy, including the policy document. +// +// Policies returned by this API are URL-encoded compliant with RFC 3986 (https://tools.ietf.org/html/rfc3986). +// You can use a URL decoding method to convert the policy back to plain JSON +// text. For example, if you use Java, you can use the decode method of the +// java.net.URLDecoder utility class in the Java SDK. Other languages and SDKs +// provide similar functionality. +// +// To list the available versions for a policy, use ListPolicyVersions. +// +// This API retrieves information about managed policies. To retrieve information +// about an inline policy that is embedded in a user, group, or role, use the +// GetUserPolicy, GetGroupPolicy, or GetRolePolicy API. +// +// For more information about the types of policies, see Managed Policies and +// Inline Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the IAM User Guide. +// +// For more information about managed policy versions, see Versioning for Managed +// Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-versions.html) +// in the IAM User Guide. +func (c *IAM) GetPolicyVersion(input *GetPolicyVersionInput) (*GetPolicyVersionOutput, error) { + req, out := c.GetPolicyVersionRequest(input) + err := req.Send() + return out, err +} + +const opGetRole = "GetRole" + +// GetRoleRequest generates a "aws/request.Request" representing the +// client's request for the GetRole operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetRole method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetRoleRequest method. +// req, resp := client.GetRoleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *IAM) GetRoleRequest(input *GetRoleInput) (req *request.Request, output *GetRoleOutput) { + op := &request.Operation{ + Name: opGetRole, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetRoleInput{} + } + + req = c.newRequest(op, input, output) + output = &GetRoleOutput{} + req.Data = output + return +} + +// Retrieves information about the specified role, including the role's path, +// GUID, ARN, and the role's trust policy that grants permission to assume the +// role. For more information about roles, see Working with Roles (http://docs.aws.amazon.com/IAM/latest/UserGuide/WorkingWithRoles.html). +// +// Policies returned by this API are URL-encoded compliant with RFC 3986 (https://tools.ietf.org/html/rfc3986). +// You can use a URL decoding method to convert the policy back to plain JSON +// text. For example, if you use Java, you can use the decode method of the +// java.net.URLDecoder utility class in the Java SDK. Other languages and SDKs +// provide similar functionality. +func (c *IAM) GetRole(input *GetRoleInput) (*GetRoleOutput, error) { + req, out := c.GetRoleRequest(input) + err := req.Send() + return out, err +} + +const opGetRolePolicy = "GetRolePolicy" + +// GetRolePolicyRequest generates a "aws/request.Request" representing the +// client's request for the GetRolePolicy operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetRolePolicy method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetRolePolicyRequest method. +// req, resp := client.GetRolePolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *IAM) GetRolePolicyRequest(input *GetRolePolicyInput) (req *request.Request, output *GetRolePolicyOutput) { + op := &request.Operation{ + Name: opGetRolePolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetRolePolicyInput{} + } + + req = c.newRequest(op, input, output) + output = &GetRolePolicyOutput{} + req.Data = output + return +} + +// Retrieves the specified inline policy document that is embedded with the +// specified IAM role. +// +// Policies returned by this API are URL-encoded compliant with RFC 3986 (https://tools.ietf.org/html/rfc3986). +// You can use a URL decoding method to convert the policy back to plain JSON +// text. For example, if you use Java, you can use the decode method of the +// java.net.URLDecoder utility class in the Java SDK. Other languages and SDKs +// provide similar functionality. +// +// An IAM role can also have managed policies attached to it. To retrieve +// a managed policy document that is attached to a role, use GetPolicy to determine +// the policy's default version, then use GetPolicyVersion to retrieve the policy +// document. +// +// For more information about policies, see Managed Policies and Inline Policies +// (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the IAM User Guide. +// +// For more information about roles, see Using Roles to Delegate Permissions +// and Federate Identities (http://docs.aws.amazon.com/IAM/latest/UserGuide/roles-toplevel.html). +func (c *IAM) GetRolePolicy(input *GetRolePolicyInput) (*GetRolePolicyOutput, error) { + req, out := c.GetRolePolicyRequest(input) + err := req.Send() + return out, err +} + +const opGetSAMLProvider = "GetSAMLProvider" + +// GetSAMLProviderRequest generates a "aws/request.Request" representing the +// client's request for the GetSAMLProvider operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetSAMLProvider method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetSAMLProviderRequest method. +// req, resp := client.GetSAMLProviderRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *IAM) GetSAMLProviderRequest(input *GetSAMLProviderInput) (req *request.Request, output *GetSAMLProviderOutput) { + op := &request.Operation{ + Name: opGetSAMLProvider, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetSAMLProviderInput{} + } + + req = c.newRequest(op, input, output) + output = &GetSAMLProviderOutput{} + req.Data = output + return +} + +// Returns the SAML provider metadocument that was uploaded when the IAM SAML +// provider resource object was created or updated. +// +// This operation requires Signature Version 4 (http://docs.aws.amazon.com/general/latest/gr/signature-version-4.html). +func (c *IAM) GetSAMLProvider(input *GetSAMLProviderInput) (*GetSAMLProviderOutput, error) { + req, out := c.GetSAMLProviderRequest(input) + err := req.Send() + return out, err +} + +const opGetSSHPublicKey = "GetSSHPublicKey" + +// GetSSHPublicKeyRequest generates a "aws/request.Request" representing the +// client's request for the GetSSHPublicKey operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetSSHPublicKey method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetSSHPublicKeyRequest method. +// req, resp := client.GetSSHPublicKeyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *IAM) GetSSHPublicKeyRequest(input *GetSSHPublicKeyInput) (req *request.Request, output *GetSSHPublicKeyOutput) { + op := &request.Operation{ + Name: opGetSSHPublicKey, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetSSHPublicKeyInput{} + } + + req = c.newRequest(op, input, output) + output = &GetSSHPublicKeyOutput{} + req.Data = output + return +} + +// Retrieves the specified SSH public key, including metadata about the key. +// +// The SSH public key retrieved by this action is used only for authenticating +// the associated IAM user to an AWS CodeCommit repository. For more information +// about using SSH keys to authenticate to an AWS CodeCommit repository, see +// Set up AWS CodeCommit for SSH Connections (http://docs.aws.amazon.com/codecommit/latest/userguide/setting-up-credentials-ssh.html) +// in the AWS CodeCommit User Guide. +func (c *IAM) GetSSHPublicKey(input *GetSSHPublicKeyInput) (*GetSSHPublicKeyOutput, error) { + req, out := c.GetSSHPublicKeyRequest(input) + err := req.Send() + return out, err +} + +const opGetServerCertificate = "GetServerCertificate" + +// GetServerCertificateRequest generates a "aws/request.Request" representing the +// client's request for the GetServerCertificate operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetServerCertificate method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetServerCertificateRequest method. +// req, resp := client.GetServerCertificateRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *IAM) GetServerCertificateRequest(input *GetServerCertificateInput) (req *request.Request, output *GetServerCertificateOutput) { + op := &request.Operation{ + Name: opGetServerCertificate, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetServerCertificateInput{} + } + + req = c.newRequest(op, input, output) + output = &GetServerCertificateOutput{} + req.Data = output + return +} + +// Retrieves information about the specified server certificate stored in IAM. +// +// For more information about working with server certificates, including a +// list of AWS services that can use the server certificates that you manage +// with IAM, go to Working with Server Certificates (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_server-certs.html) +// in the IAM User Guide. +func (c *IAM) GetServerCertificate(input *GetServerCertificateInput) (*GetServerCertificateOutput, error) { + req, out := c.GetServerCertificateRequest(input) + err := req.Send() + return out, err +} + +const opGetUser = "GetUser" + +// GetUserRequest generates a "aws/request.Request" representing the +// client's request for the GetUser operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetUser method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetUserRequest method. +// req, resp := client.GetUserRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *IAM) GetUserRequest(input *GetUserInput) (req *request.Request, output *GetUserOutput) { + op := &request.Operation{ + Name: opGetUser, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetUserInput{} + } + + req = c.newRequest(op, input, output) + output = &GetUserOutput{} + req.Data = output + return +} + +// Retrieves information about the specified IAM user, including the user's +// creation date, path, unique ID, and ARN. +// +// If you do not specify a user name, IAM determines the user name implicitly +// based on the AWS access key ID used to sign the request to this API. +func (c *IAM) GetUser(input *GetUserInput) (*GetUserOutput, error) { + req, out := c.GetUserRequest(input) + err := req.Send() + return out, err +} + +const opGetUserPolicy = "GetUserPolicy" + +// GetUserPolicyRequest generates a "aws/request.Request" representing the +// client's request for the GetUserPolicy operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetUserPolicy method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetUserPolicyRequest method. +// req, resp := client.GetUserPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *IAM) GetUserPolicyRequest(input *GetUserPolicyInput) (req *request.Request, output *GetUserPolicyOutput) { + op := &request.Operation{ + Name: opGetUserPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetUserPolicyInput{} + } + + req = c.newRequest(op, input, output) + output = &GetUserPolicyOutput{} + req.Data = output + return +} + +// Retrieves the specified inline policy document that is embedded in the specified +// IAM user. +// +// Policies returned by this API are URL-encoded compliant with RFC 3986 (https://tools.ietf.org/html/rfc3986). +// You can use a URL decoding method to convert the policy back to plain JSON +// text. For example, if you use Java, you can use the decode method of the +// java.net.URLDecoder utility class in the Java SDK. Other languages and SDKs +// provide similar functionality. +// +// An IAM user can also have managed policies attached to it. To retrieve +// a managed policy document that is attached to a user, use GetPolicy to determine +// the policy's default version, then use GetPolicyVersion to retrieve the policy +// document. +// +// For more information about policies, see Managed Policies and Inline Policies +// (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the IAM User Guide. +func (c *IAM) GetUserPolicy(input *GetUserPolicyInput) (*GetUserPolicyOutput, error) { + req, out := c.GetUserPolicyRequest(input) + err := req.Send() + return out, err +} + +const opListAccessKeys = "ListAccessKeys" + +// ListAccessKeysRequest generates a "aws/request.Request" representing the +// client's request for the ListAccessKeys operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListAccessKeys method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListAccessKeysRequest method. +// req, resp := client.ListAccessKeysRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *IAM) ListAccessKeysRequest(input *ListAccessKeysInput) (req *request.Request, output *ListAccessKeysOutput) { + op := &request.Operation{ + Name: opListAccessKeys, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxItems", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListAccessKeysInput{} + } + + req = c.newRequest(op, input, output) + output = &ListAccessKeysOutput{} + req.Data = output + return +} + +// Returns information about the access key IDs associated with the specified +// IAM user. If there are none, the action returns an empty list. +// +// Although each user is limited to a small number of keys, you can still paginate +// the results using the MaxItems and Marker parameters. +// +// If the UserName field is not specified, the UserName is determined implicitly +// based on the AWS access key ID used to sign the request. Because this action +// works for access keys under the AWS account, you can use this action to manage +// root credentials even if the AWS account has no associated users. +// +// To ensure the security of your AWS account, the secret access key is accessible +// only during key and user creation. +func (c *IAM) ListAccessKeys(input *ListAccessKeysInput) (*ListAccessKeysOutput, error) { + req, out := c.ListAccessKeysRequest(input) + err := req.Send() + return out, err +} + +// ListAccessKeysPages iterates over the pages of a ListAccessKeys operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListAccessKeys method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListAccessKeys operation. +// pageNum := 0 +// err := client.ListAccessKeysPages(params, +// func(page *ListAccessKeysOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *IAM) ListAccessKeysPages(input *ListAccessKeysInput, fn func(p *ListAccessKeysOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListAccessKeysRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListAccessKeysOutput), lastPage) + }) +} + +const opListAccountAliases = "ListAccountAliases" + +// ListAccountAliasesRequest generates a "aws/request.Request" representing the +// client's request for the ListAccountAliases operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListAccountAliases method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListAccountAliasesRequest method. +// req, resp := client.ListAccountAliasesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *IAM) ListAccountAliasesRequest(input *ListAccountAliasesInput) (req *request.Request, output *ListAccountAliasesOutput) { + op := &request.Operation{ + Name: opListAccountAliases, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxItems", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListAccountAliasesInput{} + } + + req = c.newRequest(op, input, output) + output = &ListAccountAliasesOutput{} + req.Data = output + return +} + +// Lists the account alias associated with the AWS account (Note: you can have +// only one). For information about using an AWS account alias, see Using an +// Alias for Your AWS Account ID (http://docs.aws.amazon.com/IAM/latest/UserGuide/AccountAlias.html) +// in the IAM User Guide. +func (c *IAM) ListAccountAliases(input *ListAccountAliasesInput) (*ListAccountAliasesOutput, error) { + req, out := c.ListAccountAliasesRequest(input) + err := req.Send() + return out, err +} + +// ListAccountAliasesPages iterates over the pages of a ListAccountAliases operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListAccountAliases method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListAccountAliases operation. +// pageNum := 0 +// err := client.ListAccountAliasesPages(params, +// func(page *ListAccountAliasesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *IAM) ListAccountAliasesPages(input *ListAccountAliasesInput, fn func(p *ListAccountAliasesOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListAccountAliasesRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListAccountAliasesOutput), lastPage) + }) +} + +const opListAttachedGroupPolicies = "ListAttachedGroupPolicies" + +// ListAttachedGroupPoliciesRequest generates a "aws/request.Request" representing the +// client's request for the ListAttachedGroupPolicies operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListAttachedGroupPolicies method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListAttachedGroupPoliciesRequest method. +// req, resp := client.ListAttachedGroupPoliciesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *IAM) ListAttachedGroupPoliciesRequest(input *ListAttachedGroupPoliciesInput) (req *request.Request, output *ListAttachedGroupPoliciesOutput) { + op := &request.Operation{ + Name: opListAttachedGroupPolicies, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxItems", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListAttachedGroupPoliciesInput{} + } + + req = c.newRequest(op, input, output) + output = &ListAttachedGroupPoliciesOutput{} + req.Data = output + return +} + +// Lists all managed policies that are attached to the specified IAM group. +// +// An IAM group can also have inline policies embedded with it. To list the +// inline policies for a group, use the ListGroupPolicies API. For information +// about policies, see Managed Policies and Inline Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the IAM User Guide. +// +// You can paginate the results using the MaxItems and Marker parameters. You +// can use the PathPrefix parameter to limit the list of policies to only those +// matching the specified path prefix. If there are no policies attached to +// the specified group (or none that match the specified path prefix), the action +// returns an empty list. +func (c *IAM) ListAttachedGroupPolicies(input *ListAttachedGroupPoliciesInput) (*ListAttachedGroupPoliciesOutput, error) { + req, out := c.ListAttachedGroupPoliciesRequest(input) + err := req.Send() + return out, err +} + +// ListAttachedGroupPoliciesPages iterates over the pages of a ListAttachedGroupPolicies operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListAttachedGroupPolicies method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListAttachedGroupPolicies operation. +// pageNum := 0 +// err := client.ListAttachedGroupPoliciesPages(params, +// func(page *ListAttachedGroupPoliciesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *IAM) ListAttachedGroupPoliciesPages(input *ListAttachedGroupPoliciesInput, fn func(p *ListAttachedGroupPoliciesOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListAttachedGroupPoliciesRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListAttachedGroupPoliciesOutput), lastPage) + }) +} + +const opListAttachedRolePolicies = "ListAttachedRolePolicies" + +// ListAttachedRolePoliciesRequest generates a "aws/request.Request" representing the +// client's request for the ListAttachedRolePolicies operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListAttachedRolePolicies method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListAttachedRolePoliciesRequest method. +// req, resp := client.ListAttachedRolePoliciesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *IAM) ListAttachedRolePoliciesRequest(input *ListAttachedRolePoliciesInput) (req *request.Request, output *ListAttachedRolePoliciesOutput) { + op := &request.Operation{ + Name: opListAttachedRolePolicies, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxItems", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListAttachedRolePoliciesInput{} + } + + req = c.newRequest(op, input, output) + output = &ListAttachedRolePoliciesOutput{} + req.Data = output + return +} + +// Lists all managed policies that are attached to the specified IAM role. +// +// An IAM role can also have inline policies embedded with it. To list the +// inline policies for a role, use the ListRolePolicies API. For information +// about policies, see Managed Policies and Inline Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the IAM User Guide. +// +// You can paginate the results using the MaxItems and Marker parameters. You +// can use the PathPrefix parameter to limit the list of policies to only those +// matching the specified path prefix. If there are no policies attached to +// the specified role (or none that match the specified path prefix), the action +// returns an empty list. +func (c *IAM) ListAttachedRolePolicies(input *ListAttachedRolePoliciesInput) (*ListAttachedRolePoliciesOutput, error) { + req, out := c.ListAttachedRolePoliciesRequest(input) + err := req.Send() + return out, err +} + +// ListAttachedRolePoliciesPages iterates over the pages of a ListAttachedRolePolicies operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListAttachedRolePolicies method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListAttachedRolePolicies operation. +// pageNum := 0 +// err := client.ListAttachedRolePoliciesPages(params, +// func(page *ListAttachedRolePoliciesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *IAM) ListAttachedRolePoliciesPages(input *ListAttachedRolePoliciesInput, fn func(p *ListAttachedRolePoliciesOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListAttachedRolePoliciesRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListAttachedRolePoliciesOutput), lastPage) + }) +} + +const opListAttachedUserPolicies = "ListAttachedUserPolicies" + +// ListAttachedUserPoliciesRequest generates a "aws/request.Request" representing the +// client's request for the ListAttachedUserPolicies operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListAttachedUserPolicies method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListAttachedUserPoliciesRequest method. +// req, resp := client.ListAttachedUserPoliciesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *IAM) ListAttachedUserPoliciesRequest(input *ListAttachedUserPoliciesInput) (req *request.Request, output *ListAttachedUserPoliciesOutput) { + op := &request.Operation{ + Name: opListAttachedUserPolicies, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxItems", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListAttachedUserPoliciesInput{} + } + + req = c.newRequest(op, input, output) + output = &ListAttachedUserPoliciesOutput{} + req.Data = output + return +} + +// Lists all managed policies that are attached to the specified IAM user. +// +// An IAM user can also have inline policies embedded with it. To list the +// inline policies for a user, use the ListUserPolicies API. For information +// about policies, see Managed Policies and Inline Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the IAM User Guide. +// +// You can paginate the results using the MaxItems and Marker parameters. You +// can use the PathPrefix parameter to limit the list of policies to only those +// matching the specified path prefix. If there are no policies attached to +// the specified group (or none that match the specified path prefix), the action +// returns an empty list. +func (c *IAM) ListAttachedUserPolicies(input *ListAttachedUserPoliciesInput) (*ListAttachedUserPoliciesOutput, error) { + req, out := c.ListAttachedUserPoliciesRequest(input) + err := req.Send() + return out, err +} + +// ListAttachedUserPoliciesPages iterates over the pages of a ListAttachedUserPolicies operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListAttachedUserPolicies method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListAttachedUserPolicies operation. +// pageNum := 0 +// err := client.ListAttachedUserPoliciesPages(params, +// func(page *ListAttachedUserPoliciesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *IAM) ListAttachedUserPoliciesPages(input *ListAttachedUserPoliciesInput, fn func(p *ListAttachedUserPoliciesOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListAttachedUserPoliciesRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListAttachedUserPoliciesOutput), lastPage) + }) +} + +const opListEntitiesForPolicy = "ListEntitiesForPolicy" + +// ListEntitiesForPolicyRequest generates a "aws/request.Request" representing the +// client's request for the ListEntitiesForPolicy operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListEntitiesForPolicy method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListEntitiesForPolicyRequest method. +// req, resp := client.ListEntitiesForPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *IAM) ListEntitiesForPolicyRequest(input *ListEntitiesForPolicyInput) (req *request.Request, output *ListEntitiesForPolicyOutput) { + op := &request.Operation{ + Name: opListEntitiesForPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxItems", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListEntitiesForPolicyInput{} + } + + req = c.newRequest(op, input, output) + output = &ListEntitiesForPolicyOutput{} + req.Data = output + return +} + +// Lists all IAM users, groups, and roles that the specified managed policy +// is attached to. +// +// You can use the optional EntityFilter parameter to limit the results to +// a particular type of entity (users, groups, or roles). For example, to list +// only the roles that are attached to the specified policy, set EntityFilter +// to Role. +// +// You can paginate the results using the MaxItems and Marker parameters. +func (c *IAM) ListEntitiesForPolicy(input *ListEntitiesForPolicyInput) (*ListEntitiesForPolicyOutput, error) { + req, out := c.ListEntitiesForPolicyRequest(input) + err := req.Send() + return out, err +} + +// ListEntitiesForPolicyPages iterates over the pages of a ListEntitiesForPolicy operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListEntitiesForPolicy method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListEntitiesForPolicy operation. +// pageNum := 0 +// err := client.ListEntitiesForPolicyPages(params, +// func(page *ListEntitiesForPolicyOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *IAM) ListEntitiesForPolicyPages(input *ListEntitiesForPolicyInput, fn func(p *ListEntitiesForPolicyOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListEntitiesForPolicyRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListEntitiesForPolicyOutput), lastPage) + }) +} + +const opListGroupPolicies = "ListGroupPolicies" + +// ListGroupPoliciesRequest generates a "aws/request.Request" representing the +// client's request for the ListGroupPolicies operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListGroupPolicies method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListGroupPoliciesRequest method. +// req, resp := client.ListGroupPoliciesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *IAM) ListGroupPoliciesRequest(input *ListGroupPoliciesInput) (req *request.Request, output *ListGroupPoliciesOutput) { + op := &request.Operation{ + Name: opListGroupPolicies, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxItems", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListGroupPoliciesInput{} + } + + req = c.newRequest(op, input, output) + output = &ListGroupPoliciesOutput{} + req.Data = output + return +} + +// Lists the names of the inline policies that are embedded in the specified +// IAM group. +// +// An IAM group can also have managed policies attached to it. To list the +// managed policies that are attached to a group, use ListAttachedGroupPolicies. +// For more information about policies, see Managed Policies and Inline Policies +// (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the IAM User Guide. +// +// You can paginate the results using the MaxItems and Marker parameters. If +// there are no inline policies embedded with the specified group, the action +// returns an empty list. +func (c *IAM) ListGroupPolicies(input *ListGroupPoliciesInput) (*ListGroupPoliciesOutput, error) { + req, out := c.ListGroupPoliciesRequest(input) + err := req.Send() + return out, err +} + +// ListGroupPoliciesPages iterates over the pages of a ListGroupPolicies operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListGroupPolicies method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListGroupPolicies operation. +// pageNum := 0 +// err := client.ListGroupPoliciesPages(params, +// func(page *ListGroupPoliciesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *IAM) ListGroupPoliciesPages(input *ListGroupPoliciesInput, fn func(p *ListGroupPoliciesOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListGroupPoliciesRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListGroupPoliciesOutput), lastPage) + }) +} + +const opListGroups = "ListGroups" + +// ListGroupsRequest generates a "aws/request.Request" representing the +// client's request for the ListGroups operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListGroups method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListGroupsRequest method. +// req, resp := client.ListGroupsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *IAM) ListGroupsRequest(input *ListGroupsInput) (req *request.Request, output *ListGroupsOutput) { + op := &request.Operation{ + Name: opListGroups, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxItems", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListGroupsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListGroupsOutput{} + req.Data = output + return +} + +// Lists the IAM groups that have the specified path prefix. +// +// You can paginate the results using the MaxItems and Marker parameters. +func (c *IAM) ListGroups(input *ListGroupsInput) (*ListGroupsOutput, error) { + req, out := c.ListGroupsRequest(input) + err := req.Send() + return out, err +} + +// ListGroupsPages iterates over the pages of a ListGroups operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListGroups method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListGroups operation. +// pageNum := 0 +// err := client.ListGroupsPages(params, +// func(page *ListGroupsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *IAM) ListGroupsPages(input *ListGroupsInput, fn func(p *ListGroupsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListGroupsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListGroupsOutput), lastPage) + }) +} + +const opListGroupsForUser = "ListGroupsForUser" + +// ListGroupsForUserRequest generates a "aws/request.Request" representing the +// client's request for the ListGroupsForUser operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListGroupsForUser method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListGroupsForUserRequest method. +// req, resp := client.ListGroupsForUserRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *IAM) ListGroupsForUserRequest(input *ListGroupsForUserInput) (req *request.Request, output *ListGroupsForUserOutput) { + op := &request.Operation{ + Name: opListGroupsForUser, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxItems", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListGroupsForUserInput{} + } + + req = c.newRequest(op, input, output) + output = &ListGroupsForUserOutput{} + req.Data = output + return +} + +// Lists the IAM groups that the specified IAM user belongs to. +// +// You can paginate the results using the MaxItems and Marker parameters. +func (c *IAM) ListGroupsForUser(input *ListGroupsForUserInput) (*ListGroupsForUserOutput, error) { + req, out := c.ListGroupsForUserRequest(input) + err := req.Send() + return out, err +} + +// ListGroupsForUserPages iterates over the pages of a ListGroupsForUser operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListGroupsForUser method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListGroupsForUser operation. +// pageNum := 0 +// err := client.ListGroupsForUserPages(params, +// func(page *ListGroupsForUserOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *IAM) ListGroupsForUserPages(input *ListGroupsForUserInput, fn func(p *ListGroupsForUserOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListGroupsForUserRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListGroupsForUserOutput), lastPage) + }) +} + +const opListInstanceProfiles = "ListInstanceProfiles" + +// ListInstanceProfilesRequest generates a "aws/request.Request" representing the +// client's request for the ListInstanceProfiles operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListInstanceProfiles method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListInstanceProfilesRequest method. +// req, resp := client.ListInstanceProfilesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *IAM) ListInstanceProfilesRequest(input *ListInstanceProfilesInput) (req *request.Request, output *ListInstanceProfilesOutput) { + op := &request.Operation{ + Name: opListInstanceProfiles, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxItems", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListInstanceProfilesInput{} + } + + req = c.newRequest(op, input, output) + output = &ListInstanceProfilesOutput{} + req.Data = output + return +} + +// Lists the instance profiles that have the specified path prefix. If there +// are none, the action returns an empty list. For more information about instance +// profiles, go to About Instance Profiles (http://docs.aws.amazon.com/IAM/latest/UserGuide/AboutInstanceProfiles.html). +// +// You can paginate the results using the MaxItems and Marker parameters. +func (c *IAM) ListInstanceProfiles(input *ListInstanceProfilesInput) (*ListInstanceProfilesOutput, error) { + req, out := c.ListInstanceProfilesRequest(input) + err := req.Send() + return out, err +} + +// ListInstanceProfilesPages iterates over the pages of a ListInstanceProfiles operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListInstanceProfiles method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListInstanceProfiles operation. +// pageNum := 0 +// err := client.ListInstanceProfilesPages(params, +// func(page *ListInstanceProfilesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *IAM) ListInstanceProfilesPages(input *ListInstanceProfilesInput, fn func(p *ListInstanceProfilesOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListInstanceProfilesRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListInstanceProfilesOutput), lastPage) + }) +} + +const opListInstanceProfilesForRole = "ListInstanceProfilesForRole" + +// ListInstanceProfilesForRoleRequest generates a "aws/request.Request" representing the +// client's request for the ListInstanceProfilesForRole operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListInstanceProfilesForRole method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListInstanceProfilesForRoleRequest method. +// req, resp := client.ListInstanceProfilesForRoleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *IAM) ListInstanceProfilesForRoleRequest(input *ListInstanceProfilesForRoleInput) (req *request.Request, output *ListInstanceProfilesForRoleOutput) { + op := &request.Operation{ + Name: opListInstanceProfilesForRole, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxItems", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListInstanceProfilesForRoleInput{} + } + + req = c.newRequest(op, input, output) + output = &ListInstanceProfilesForRoleOutput{} + req.Data = output + return +} + +// Lists the instance profiles that have the specified associated IAM role. +// If there are none, the action returns an empty list. For more information +// about instance profiles, go to About Instance Profiles (http://docs.aws.amazon.com/IAM/latest/UserGuide/AboutInstanceProfiles.html). +// +// You can paginate the results using the MaxItems and Marker parameters. +func (c *IAM) ListInstanceProfilesForRole(input *ListInstanceProfilesForRoleInput) (*ListInstanceProfilesForRoleOutput, error) { + req, out := c.ListInstanceProfilesForRoleRequest(input) + err := req.Send() + return out, err +} + +// ListInstanceProfilesForRolePages iterates over the pages of a ListInstanceProfilesForRole operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListInstanceProfilesForRole method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListInstanceProfilesForRole operation. +// pageNum := 0 +// err := client.ListInstanceProfilesForRolePages(params, +// func(page *ListInstanceProfilesForRoleOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *IAM) ListInstanceProfilesForRolePages(input *ListInstanceProfilesForRoleInput, fn func(p *ListInstanceProfilesForRoleOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListInstanceProfilesForRoleRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListInstanceProfilesForRoleOutput), lastPage) + }) +} + +const opListMFADevices = "ListMFADevices" + +// ListMFADevicesRequest generates a "aws/request.Request" representing the +// client's request for the ListMFADevices operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListMFADevices method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListMFADevicesRequest method. +// req, resp := client.ListMFADevicesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *IAM) ListMFADevicesRequest(input *ListMFADevicesInput) (req *request.Request, output *ListMFADevicesOutput) { + op := &request.Operation{ + Name: opListMFADevices, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxItems", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListMFADevicesInput{} + } + + req = c.newRequest(op, input, output) + output = &ListMFADevicesOutput{} + req.Data = output + return +} + +// Lists the MFA devices for an IAM user. If the request includes a IAM user +// name, then this action lists all the MFA devices associated with the specified +// user. If you do not specify a user name, IAM determines the user name implicitly +// based on the AWS access key ID signing the request for this API. +// +// You can paginate the results using the MaxItems and Marker parameters. +func (c *IAM) ListMFADevices(input *ListMFADevicesInput) (*ListMFADevicesOutput, error) { + req, out := c.ListMFADevicesRequest(input) + err := req.Send() + return out, err +} + +// ListMFADevicesPages iterates over the pages of a ListMFADevices operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListMFADevices method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListMFADevices operation. +// pageNum := 0 +// err := client.ListMFADevicesPages(params, +// func(page *ListMFADevicesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *IAM) ListMFADevicesPages(input *ListMFADevicesInput, fn func(p *ListMFADevicesOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListMFADevicesRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListMFADevicesOutput), lastPage) + }) +} + +const opListOpenIDConnectProviders = "ListOpenIDConnectProviders" + +// ListOpenIDConnectProvidersRequest generates a "aws/request.Request" representing the +// client's request for the ListOpenIDConnectProviders operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListOpenIDConnectProviders method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListOpenIDConnectProvidersRequest method. +// req, resp := client.ListOpenIDConnectProvidersRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *IAM) ListOpenIDConnectProvidersRequest(input *ListOpenIDConnectProvidersInput) (req *request.Request, output *ListOpenIDConnectProvidersOutput) { + op := &request.Operation{ + Name: opListOpenIDConnectProviders, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListOpenIDConnectProvidersInput{} + } + + req = c.newRequest(op, input, output) + output = &ListOpenIDConnectProvidersOutput{} + req.Data = output + return +} + +// Lists information about the IAM OpenID Connect (OIDC) provider resource objects +// defined in the AWS account. +func (c *IAM) ListOpenIDConnectProviders(input *ListOpenIDConnectProvidersInput) (*ListOpenIDConnectProvidersOutput, error) { + req, out := c.ListOpenIDConnectProvidersRequest(input) + err := req.Send() + return out, err +} + +const opListPolicies = "ListPolicies" + +// ListPoliciesRequest generates a "aws/request.Request" representing the +// client's request for the ListPolicies operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListPolicies method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListPoliciesRequest method. +// req, resp := client.ListPoliciesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *IAM) ListPoliciesRequest(input *ListPoliciesInput) (req *request.Request, output *ListPoliciesOutput) { + op := &request.Operation{ + Name: opListPolicies, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxItems", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListPoliciesInput{} + } + + req = c.newRequest(op, input, output) + output = &ListPoliciesOutput{} + req.Data = output + return +} + +// Lists all the managed policies that are available in your AWS account, including +// your own customer-defined managed policies and all AWS managed policies. +// +// You can filter the list of policies that is returned using the optional +// OnlyAttached, Scope, and PathPrefix parameters. For example, to list only +// the customer managed policies in your AWS account, set Scope to Local. To +// list only AWS managed policies, set Scope to AWS. +// +// You can paginate the results using the MaxItems and Marker parameters. +// +// For more information about managed policies, see Managed Policies and Inline +// Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the IAM User Guide. +func (c *IAM) ListPolicies(input *ListPoliciesInput) (*ListPoliciesOutput, error) { + req, out := c.ListPoliciesRequest(input) + err := req.Send() + return out, err +} + +// ListPoliciesPages iterates over the pages of a ListPolicies operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListPolicies method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListPolicies operation. +// pageNum := 0 +// err := client.ListPoliciesPages(params, +// func(page *ListPoliciesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *IAM) ListPoliciesPages(input *ListPoliciesInput, fn func(p *ListPoliciesOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListPoliciesRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListPoliciesOutput), lastPage) + }) +} + +const opListPolicyVersions = "ListPolicyVersions" + +// ListPolicyVersionsRequest generates a "aws/request.Request" representing the +// client's request for the ListPolicyVersions operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListPolicyVersions method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListPolicyVersionsRequest method. +// req, resp := client.ListPolicyVersionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *IAM) ListPolicyVersionsRequest(input *ListPolicyVersionsInput) (req *request.Request, output *ListPolicyVersionsOutput) { + op := &request.Operation{ + Name: opListPolicyVersions, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxItems", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListPolicyVersionsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListPolicyVersionsOutput{} + req.Data = output + return +} + +// Lists information about the versions of the specified managed policy, including +// the version that is currently set as the policy's default version. +// +// For more information about managed policies, see Managed Policies and Inline +// Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the IAM User Guide. +func (c *IAM) ListPolicyVersions(input *ListPolicyVersionsInput) (*ListPolicyVersionsOutput, error) { + req, out := c.ListPolicyVersionsRequest(input) + err := req.Send() + return out, err +} + +// ListPolicyVersionsPages iterates over the pages of a ListPolicyVersions operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListPolicyVersions method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListPolicyVersions operation. +// pageNum := 0 +// err := client.ListPolicyVersionsPages(params, +// func(page *ListPolicyVersionsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *IAM) ListPolicyVersionsPages(input *ListPolicyVersionsInput, fn func(p *ListPolicyVersionsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListPolicyVersionsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListPolicyVersionsOutput), lastPage) + }) +} + +const opListRolePolicies = "ListRolePolicies" + +// ListRolePoliciesRequest generates a "aws/request.Request" representing the +// client's request for the ListRolePolicies operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListRolePolicies method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListRolePoliciesRequest method. +// req, resp := client.ListRolePoliciesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *IAM) ListRolePoliciesRequest(input *ListRolePoliciesInput) (req *request.Request, output *ListRolePoliciesOutput) { + op := &request.Operation{ + Name: opListRolePolicies, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxItems", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListRolePoliciesInput{} + } + + req = c.newRequest(op, input, output) + output = &ListRolePoliciesOutput{} + req.Data = output + return +} + +// Lists the names of the inline policies that are embedded in the specified +// IAM role. +// +// An IAM role can also have managed policies attached to it. To list the managed +// policies that are attached to a role, use ListAttachedRolePolicies. For more +// information about policies, see Managed Policies and Inline Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the IAM User Guide. +// +// You can paginate the results using the MaxItems and Marker parameters. If +// there are no inline policies embedded with the specified role, the action +// returns an empty list. +func (c *IAM) ListRolePolicies(input *ListRolePoliciesInput) (*ListRolePoliciesOutput, error) { + req, out := c.ListRolePoliciesRequest(input) + err := req.Send() + return out, err +} + +// ListRolePoliciesPages iterates over the pages of a ListRolePolicies operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListRolePolicies method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListRolePolicies operation. +// pageNum := 0 +// err := client.ListRolePoliciesPages(params, +// func(page *ListRolePoliciesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *IAM) ListRolePoliciesPages(input *ListRolePoliciesInput, fn func(p *ListRolePoliciesOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListRolePoliciesRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListRolePoliciesOutput), lastPage) + }) +} + +const opListRoles = "ListRoles" + +// ListRolesRequest generates a "aws/request.Request" representing the +// client's request for the ListRoles operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListRoles method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListRolesRequest method. +// req, resp := client.ListRolesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *IAM) ListRolesRequest(input *ListRolesInput) (req *request.Request, output *ListRolesOutput) { + op := &request.Operation{ + Name: opListRoles, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxItems", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListRolesInput{} + } + + req = c.newRequest(op, input, output) + output = &ListRolesOutput{} + req.Data = output + return +} + +// Lists the IAM roles that have the specified path prefix. If there are none, +// the action returns an empty list. For more information about roles, go to +// Working with Roles (http://docs.aws.amazon.com/IAM/latest/UserGuide/WorkingWithRoles.html). +// +// You can paginate the results using the MaxItems and Marker parameters. +func (c *IAM) ListRoles(input *ListRolesInput) (*ListRolesOutput, error) { + req, out := c.ListRolesRequest(input) + err := req.Send() + return out, err +} + +// ListRolesPages iterates over the pages of a ListRoles operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListRoles method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListRoles operation. +// pageNum := 0 +// err := client.ListRolesPages(params, +// func(page *ListRolesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *IAM) ListRolesPages(input *ListRolesInput, fn func(p *ListRolesOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListRolesRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListRolesOutput), lastPage) + }) +} + +const opListSAMLProviders = "ListSAMLProviders" + +// ListSAMLProvidersRequest generates a "aws/request.Request" representing the +// client's request for the ListSAMLProviders operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListSAMLProviders method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListSAMLProvidersRequest method. +// req, resp := client.ListSAMLProvidersRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *IAM) ListSAMLProvidersRequest(input *ListSAMLProvidersInput) (req *request.Request, output *ListSAMLProvidersOutput) { + op := &request.Operation{ + Name: opListSAMLProviders, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListSAMLProvidersInput{} + } + + req = c.newRequest(op, input, output) + output = &ListSAMLProvidersOutput{} + req.Data = output + return +} + +// Lists the SAML provider resource objects defined in IAM in the account. +// +// This operation requires Signature Version 4 (http://docs.aws.amazon.com/general/latest/gr/signature-version-4.html). +func (c *IAM) ListSAMLProviders(input *ListSAMLProvidersInput) (*ListSAMLProvidersOutput, error) { + req, out := c.ListSAMLProvidersRequest(input) + err := req.Send() + return out, err +} + +const opListSSHPublicKeys = "ListSSHPublicKeys" + +// ListSSHPublicKeysRequest generates a "aws/request.Request" representing the +// client's request for the ListSSHPublicKeys operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListSSHPublicKeys method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListSSHPublicKeysRequest method. +// req, resp := client.ListSSHPublicKeysRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *IAM) ListSSHPublicKeysRequest(input *ListSSHPublicKeysInput) (req *request.Request, output *ListSSHPublicKeysOutput) { + op := &request.Operation{ + Name: opListSSHPublicKeys, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxItems", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListSSHPublicKeysInput{} + } + + req = c.newRequest(op, input, output) + output = &ListSSHPublicKeysOutput{} + req.Data = output + return +} + +// Returns information about the SSH public keys associated with the specified +// IAM user. If there are none, the action returns an empty list. +// +// The SSH public keys returned by this action are used only for authenticating +// the IAM user to an AWS CodeCommit repository. For more information about +// using SSH keys to authenticate to an AWS CodeCommit repository, see Set up +// AWS CodeCommit for SSH Connections (http://docs.aws.amazon.com/codecommit/latest/userguide/setting-up-credentials-ssh.html) +// in the AWS CodeCommit User Guide. +// +// Although each user is limited to a small number of keys, you can still paginate +// the results using the MaxItems and Marker parameters. +func (c *IAM) ListSSHPublicKeys(input *ListSSHPublicKeysInput) (*ListSSHPublicKeysOutput, error) { + req, out := c.ListSSHPublicKeysRequest(input) + err := req.Send() + return out, err +} + +// ListSSHPublicKeysPages iterates over the pages of a ListSSHPublicKeys operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListSSHPublicKeys method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListSSHPublicKeys operation. +// pageNum := 0 +// err := client.ListSSHPublicKeysPages(params, +// func(page *ListSSHPublicKeysOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *IAM) ListSSHPublicKeysPages(input *ListSSHPublicKeysInput, fn func(p *ListSSHPublicKeysOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListSSHPublicKeysRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListSSHPublicKeysOutput), lastPage) + }) +} + +const opListServerCertificates = "ListServerCertificates" + +// ListServerCertificatesRequest generates a "aws/request.Request" representing the +// client's request for the ListServerCertificates operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListServerCertificates method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListServerCertificatesRequest method. +// req, resp := client.ListServerCertificatesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *IAM) ListServerCertificatesRequest(input *ListServerCertificatesInput) (req *request.Request, output *ListServerCertificatesOutput) { + op := &request.Operation{ + Name: opListServerCertificates, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxItems", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListServerCertificatesInput{} + } + + req = c.newRequest(op, input, output) + output = &ListServerCertificatesOutput{} + req.Data = output + return +} + +// Lists the server certificates stored in IAM that have the specified path +// prefix. If none exist, the action returns an empty list. +// +// You can paginate the results using the MaxItems and Marker parameters. +// +// For more information about working with server certificates, including a +// list of AWS services that can use the server certificates that you manage +// with IAM, go to Working with Server Certificates (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_server-certs.html) +// in the IAM User Guide. +func (c *IAM) ListServerCertificates(input *ListServerCertificatesInput) (*ListServerCertificatesOutput, error) { + req, out := c.ListServerCertificatesRequest(input) + err := req.Send() + return out, err +} + +// ListServerCertificatesPages iterates over the pages of a ListServerCertificates operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListServerCertificates method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListServerCertificates operation. +// pageNum := 0 +// err := client.ListServerCertificatesPages(params, +// func(page *ListServerCertificatesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *IAM) ListServerCertificatesPages(input *ListServerCertificatesInput, fn func(p *ListServerCertificatesOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListServerCertificatesRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListServerCertificatesOutput), lastPage) + }) +} + +const opListSigningCertificates = "ListSigningCertificates" + +// ListSigningCertificatesRequest generates a "aws/request.Request" representing the +// client's request for the ListSigningCertificates operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListSigningCertificates method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListSigningCertificatesRequest method. +// req, resp := client.ListSigningCertificatesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *IAM) ListSigningCertificatesRequest(input *ListSigningCertificatesInput) (req *request.Request, output *ListSigningCertificatesOutput) { + op := &request.Operation{ + Name: opListSigningCertificates, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxItems", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListSigningCertificatesInput{} + } + + req = c.newRequest(op, input, output) + output = &ListSigningCertificatesOutput{} + req.Data = output + return +} + +// Returns information about the signing certificates associated with the specified +// IAM user. If there are none, the action returns an empty list. +// +// Although each user is limited to a small number of signing certificates, +// you can still paginate the results using the MaxItems and Marker parameters. +// +// If the UserName field is not specified, the user name is determined implicitly +// based on the AWS access key ID used to sign the request for this API. Because +// this action works for access keys under the AWS account, you can use this +// action to manage root credentials even if the AWS account has no associated +// users. +func (c *IAM) ListSigningCertificates(input *ListSigningCertificatesInput) (*ListSigningCertificatesOutput, error) { + req, out := c.ListSigningCertificatesRequest(input) + err := req.Send() + return out, err +} + +// ListSigningCertificatesPages iterates over the pages of a ListSigningCertificates operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListSigningCertificates method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListSigningCertificates operation. +// pageNum := 0 +// err := client.ListSigningCertificatesPages(params, +// func(page *ListSigningCertificatesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *IAM) ListSigningCertificatesPages(input *ListSigningCertificatesInput, fn func(p *ListSigningCertificatesOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListSigningCertificatesRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListSigningCertificatesOutput), lastPage) + }) +} + +const opListUserPolicies = "ListUserPolicies" + +// ListUserPoliciesRequest generates a "aws/request.Request" representing the +// client's request for the ListUserPolicies operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListUserPolicies method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListUserPoliciesRequest method. +// req, resp := client.ListUserPoliciesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *IAM) ListUserPoliciesRequest(input *ListUserPoliciesInput) (req *request.Request, output *ListUserPoliciesOutput) { + op := &request.Operation{ + Name: opListUserPolicies, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxItems", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListUserPoliciesInput{} + } + + req = c.newRequest(op, input, output) + output = &ListUserPoliciesOutput{} + req.Data = output + return +} + +// Lists the names of the inline policies embedded in the specified IAM user. +// +// An IAM user can also have managed policies attached to it. To list the managed +// policies that are attached to a user, use ListAttachedUserPolicies. For more +// information about policies, see Managed Policies and Inline Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the IAM User Guide. +// +// You can paginate the results using the MaxItems and Marker parameters. If +// there are no inline policies embedded with the specified user, the action +// returns an empty list. +func (c *IAM) ListUserPolicies(input *ListUserPoliciesInput) (*ListUserPoliciesOutput, error) { + req, out := c.ListUserPoliciesRequest(input) + err := req.Send() + return out, err +} + +// ListUserPoliciesPages iterates over the pages of a ListUserPolicies operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListUserPolicies method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListUserPolicies operation. +// pageNum := 0 +// err := client.ListUserPoliciesPages(params, +// func(page *ListUserPoliciesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *IAM) ListUserPoliciesPages(input *ListUserPoliciesInput, fn func(p *ListUserPoliciesOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListUserPoliciesRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListUserPoliciesOutput), lastPage) + }) +} + +const opListUsers = "ListUsers" + +// ListUsersRequest generates a "aws/request.Request" representing the +// client's request for the ListUsers operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListUsers method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListUsersRequest method. +// req, resp := client.ListUsersRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *IAM) ListUsersRequest(input *ListUsersInput) (req *request.Request, output *ListUsersOutput) { + op := &request.Operation{ + Name: opListUsers, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxItems", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListUsersInput{} + } + + req = c.newRequest(op, input, output) + output = &ListUsersOutput{} + req.Data = output + return +} + +// Lists the IAM users that have the specified path prefix. If no path prefix +// is specified, the action returns all users in the AWS account. If there are +// none, the action returns an empty list. +// +// You can paginate the results using the MaxItems and Marker parameters. +func (c *IAM) ListUsers(input *ListUsersInput) (*ListUsersOutput, error) { + req, out := c.ListUsersRequest(input) + err := req.Send() + return out, err +} + +// ListUsersPages iterates over the pages of a ListUsers operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListUsers method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListUsers operation. +// pageNum := 0 +// err := client.ListUsersPages(params, +// func(page *ListUsersOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *IAM) ListUsersPages(input *ListUsersInput, fn func(p *ListUsersOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListUsersRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListUsersOutput), lastPage) + }) +} + +const opListVirtualMFADevices = "ListVirtualMFADevices" + +// ListVirtualMFADevicesRequest generates a "aws/request.Request" representing the +// client's request for the ListVirtualMFADevices operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListVirtualMFADevices method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListVirtualMFADevicesRequest method. +// req, resp := client.ListVirtualMFADevicesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *IAM) ListVirtualMFADevicesRequest(input *ListVirtualMFADevicesInput) (req *request.Request, output *ListVirtualMFADevicesOutput) { + op := &request.Operation{ + Name: opListVirtualMFADevices, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxItems", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListVirtualMFADevicesInput{} + } + + req = c.newRequest(op, input, output) + output = &ListVirtualMFADevicesOutput{} + req.Data = output + return +} + +// Lists the virtual MFA devices defined in the AWS account by assignment status. +// If you do not specify an assignment status, the action returns a list of +// all virtual MFA devices. Assignment status can be Assigned, Unassigned, or +// Any. +// +// You can paginate the results using the MaxItems and Marker parameters. +func (c *IAM) ListVirtualMFADevices(input *ListVirtualMFADevicesInput) (*ListVirtualMFADevicesOutput, error) { + req, out := c.ListVirtualMFADevicesRequest(input) + err := req.Send() + return out, err +} + +// ListVirtualMFADevicesPages iterates over the pages of a ListVirtualMFADevices operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListVirtualMFADevices method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListVirtualMFADevices operation. +// pageNum := 0 +// err := client.ListVirtualMFADevicesPages(params, +// func(page *ListVirtualMFADevicesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *IAM) ListVirtualMFADevicesPages(input *ListVirtualMFADevicesInput, fn func(p *ListVirtualMFADevicesOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListVirtualMFADevicesRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListVirtualMFADevicesOutput), lastPage) + }) +} + +const opPutGroupPolicy = "PutGroupPolicy" + +// PutGroupPolicyRequest generates a "aws/request.Request" representing the +// client's request for the PutGroupPolicy operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutGroupPolicy method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutGroupPolicyRequest method. +// req, resp := client.PutGroupPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *IAM) PutGroupPolicyRequest(input *PutGroupPolicyInput) (req *request.Request, output *PutGroupPolicyOutput) { + op := &request.Operation{ + Name: opPutGroupPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutGroupPolicyInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &PutGroupPolicyOutput{} + req.Data = output + return +} + +// Adds or updates an inline policy document that is embedded in the specified +// IAM group. +// +// A user can also have managed policies attached to it. To attach a managed +// policy to a group, use AttachGroupPolicy. To create a new managed policy, +// use CreatePolicy. For information about policies, see Managed Policies and +// Inline Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the IAM User Guide. +// +// For information about limits on the number of inline policies that you can +// embed in a group, see Limitations on IAM Entities (http://docs.aws.amazon.com/IAM/latest/UserGuide/LimitationsOnEntities.html) +// in the IAM User Guide. +// +// Because policy documents can be large, you should use POST rather than +// GET when calling PutGroupPolicy. For general information about using the +// Query API with IAM, go to Making Query Requests (http://docs.aws.amazon.com/IAM/latest/UserGuide/IAM_UsingQueryAPI.html) +// in the IAM User Guide. +func (c *IAM) PutGroupPolicy(input *PutGroupPolicyInput) (*PutGroupPolicyOutput, error) { + req, out := c.PutGroupPolicyRequest(input) + err := req.Send() + return out, err +} + +const opPutRolePolicy = "PutRolePolicy" + +// PutRolePolicyRequest generates a "aws/request.Request" representing the +// client's request for the PutRolePolicy operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutRolePolicy method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutRolePolicyRequest method. +// req, resp := client.PutRolePolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *IAM) PutRolePolicyRequest(input *PutRolePolicyInput) (req *request.Request, output *PutRolePolicyOutput) { + op := &request.Operation{ + Name: opPutRolePolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutRolePolicyInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &PutRolePolicyOutput{} + req.Data = output + return +} + +// Adds or updates an inline policy document that is embedded in the specified +// IAM role. +// +// When you embed an inline policy in a role, the inline policy is used as +// part of the role's access (permissions) policy. The role's trust policy is +// created at the same time as the role, using CreateRole. You can update a +// role's trust policy using UpdateAssumeRolePolicy. For more information about +// IAM roles, go to Using Roles to Delegate Permissions and Federate Identities +// (http://docs.aws.amazon.com/IAM/latest/UserGuide/roles-toplevel.html). +// +// A role can also have a managed policy attached to it. To attach a managed +// policy to a role, use AttachRolePolicy. To create a new managed policy, use +// CreatePolicy. For information about policies, see Managed Policies and Inline +// Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the IAM User Guide. +// +// For information about limits on the number of inline policies that you can +// embed with a role, see Limitations on IAM Entities (http://docs.aws.amazon.com/IAM/latest/UserGuide/LimitationsOnEntities.html) +// in the IAM User Guide. +// +// Because policy documents can be large, you should use POST rather than +// GET when calling PutRolePolicy. For general information about using the Query +// API with IAM, go to Making Query Requests (http://docs.aws.amazon.com/IAM/latest/UserGuide/IAM_UsingQueryAPI.html) +// in the IAM User Guide. +func (c *IAM) PutRolePolicy(input *PutRolePolicyInput) (*PutRolePolicyOutput, error) { + req, out := c.PutRolePolicyRequest(input) + err := req.Send() + return out, err +} + +const opPutUserPolicy = "PutUserPolicy" + +// PutUserPolicyRequest generates a "aws/request.Request" representing the +// client's request for the PutUserPolicy operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutUserPolicy method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutUserPolicyRequest method. +// req, resp := client.PutUserPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *IAM) PutUserPolicyRequest(input *PutUserPolicyInput) (req *request.Request, output *PutUserPolicyOutput) { + op := &request.Operation{ + Name: opPutUserPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutUserPolicyInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &PutUserPolicyOutput{} + req.Data = output + return +} + +// Adds or updates an inline policy document that is embedded in the specified +// IAM user. +// +// An IAM user can also have a managed policy attached to it. To attach a managed +// policy to a user, use AttachUserPolicy. To create a new managed policy, use +// CreatePolicy. For information about policies, see Managed Policies and Inline +// Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the IAM User Guide. +// +// For information about limits on the number of inline policies that you can +// embed in a user, see Limitations on IAM Entities (http://docs.aws.amazon.com/IAM/latest/UserGuide/LimitationsOnEntities.html) +// in the IAM User Guide. +// +// Because policy documents can be large, you should use POST rather than +// GET when calling PutUserPolicy. For general information about using the Query +// API with IAM, go to Making Query Requests (http://docs.aws.amazon.com/IAM/latest/UserGuide/IAM_UsingQueryAPI.html) +// in the IAM User Guide. +func (c *IAM) PutUserPolicy(input *PutUserPolicyInput) (*PutUserPolicyOutput, error) { + req, out := c.PutUserPolicyRequest(input) + err := req.Send() + return out, err +} + +const opRemoveClientIDFromOpenIDConnectProvider = "RemoveClientIDFromOpenIDConnectProvider" + +// RemoveClientIDFromOpenIDConnectProviderRequest generates a "aws/request.Request" representing the +// client's request for the RemoveClientIDFromOpenIDConnectProvider operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the RemoveClientIDFromOpenIDConnectProvider method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the RemoveClientIDFromOpenIDConnectProviderRequest method. +// req, resp := client.RemoveClientIDFromOpenIDConnectProviderRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *IAM) RemoveClientIDFromOpenIDConnectProviderRequest(input *RemoveClientIDFromOpenIDConnectProviderInput) (req *request.Request, output *RemoveClientIDFromOpenIDConnectProviderOutput) { + op := &request.Operation{ + Name: opRemoveClientIDFromOpenIDConnectProvider, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RemoveClientIDFromOpenIDConnectProviderInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &RemoveClientIDFromOpenIDConnectProviderOutput{} + req.Data = output + return +} + +// Removes the specified client ID (also known as audience) from the list of +// client IDs registered for the specified IAM OpenID Connect (OIDC) provider +// resource object. +// +// This action is idempotent; it does not fail or return an error if you try +// to remove a client ID that does not exist. +func (c *IAM) RemoveClientIDFromOpenIDConnectProvider(input *RemoveClientIDFromOpenIDConnectProviderInput) (*RemoveClientIDFromOpenIDConnectProviderOutput, error) { + req, out := c.RemoveClientIDFromOpenIDConnectProviderRequest(input) + err := req.Send() + return out, err +} + +const opRemoveRoleFromInstanceProfile = "RemoveRoleFromInstanceProfile" + +// RemoveRoleFromInstanceProfileRequest generates a "aws/request.Request" representing the +// client's request for the RemoveRoleFromInstanceProfile operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the RemoveRoleFromInstanceProfile method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the RemoveRoleFromInstanceProfileRequest method. +// req, resp := client.RemoveRoleFromInstanceProfileRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *IAM) RemoveRoleFromInstanceProfileRequest(input *RemoveRoleFromInstanceProfileInput) (req *request.Request, output *RemoveRoleFromInstanceProfileOutput) { + op := &request.Operation{ + Name: opRemoveRoleFromInstanceProfile, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RemoveRoleFromInstanceProfileInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &RemoveRoleFromInstanceProfileOutput{} + req.Data = output + return +} + +// Removes the specified IAM role from the specified EC2 instance profile. +// +// Make sure you do not have any Amazon EC2 instances running with the role +// you are about to remove from the instance profile. Removing a role from an +// instance profile that is associated with a running instance break any applications +// running on the instance. +// +// For more information about IAM roles, go to Working with Roles (http://docs.aws.amazon.com/IAM/latest/UserGuide/WorkingWithRoles.html). +// For more information about instance profiles, go to About Instance Profiles +// (http://docs.aws.amazon.com/IAM/latest/UserGuide/AboutInstanceProfiles.html). +func (c *IAM) RemoveRoleFromInstanceProfile(input *RemoveRoleFromInstanceProfileInput) (*RemoveRoleFromInstanceProfileOutput, error) { + req, out := c.RemoveRoleFromInstanceProfileRequest(input) + err := req.Send() + return out, err +} + +const opRemoveUserFromGroup = "RemoveUserFromGroup" + +// RemoveUserFromGroupRequest generates a "aws/request.Request" representing the +// client's request for the RemoveUserFromGroup operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the RemoveUserFromGroup method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the RemoveUserFromGroupRequest method. +// req, resp := client.RemoveUserFromGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *IAM) RemoveUserFromGroupRequest(input *RemoveUserFromGroupInput) (req *request.Request, output *RemoveUserFromGroupOutput) { + op := &request.Operation{ + Name: opRemoveUserFromGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RemoveUserFromGroupInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &RemoveUserFromGroupOutput{} + req.Data = output + return +} + +// Removes the specified user from the specified group. +func (c *IAM) RemoveUserFromGroup(input *RemoveUserFromGroupInput) (*RemoveUserFromGroupOutput, error) { + req, out := c.RemoveUserFromGroupRequest(input) + err := req.Send() + return out, err +} + +const opResyncMFADevice = "ResyncMFADevice" + +// ResyncMFADeviceRequest generates a "aws/request.Request" representing the +// client's request for the ResyncMFADevice operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ResyncMFADevice method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ResyncMFADeviceRequest method. +// req, resp := client.ResyncMFADeviceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *IAM) ResyncMFADeviceRequest(input *ResyncMFADeviceInput) (req *request.Request, output *ResyncMFADeviceOutput) { + op := &request.Operation{ + Name: opResyncMFADevice, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ResyncMFADeviceInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &ResyncMFADeviceOutput{} + req.Data = output + return +} + +// Synchronizes the specified MFA device with its IAM resource object on the +// AWS servers. +// +// For more information about creating and working with virtual MFA devices, +// go to Using a Virtual MFA Device (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_VirtualMFA.html) +// in the IAM User Guide. +func (c *IAM) ResyncMFADevice(input *ResyncMFADeviceInput) (*ResyncMFADeviceOutput, error) { + req, out := c.ResyncMFADeviceRequest(input) + err := req.Send() + return out, err +} + +const opSetDefaultPolicyVersion = "SetDefaultPolicyVersion" + +// SetDefaultPolicyVersionRequest generates a "aws/request.Request" representing the +// client's request for the SetDefaultPolicyVersion operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the SetDefaultPolicyVersion method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the SetDefaultPolicyVersionRequest method. +// req, resp := client.SetDefaultPolicyVersionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *IAM) SetDefaultPolicyVersionRequest(input *SetDefaultPolicyVersionInput) (req *request.Request, output *SetDefaultPolicyVersionOutput) { + op := &request.Operation{ + Name: opSetDefaultPolicyVersion, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SetDefaultPolicyVersionInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &SetDefaultPolicyVersionOutput{} + req.Data = output + return +} + +// Sets the specified version of the specified policy as the policy's default +// (operative) version. +// +// This action affects all users, groups, and roles that the policy is attached +// to. To list the users, groups, and roles that the policy is attached to, +// use the ListEntitiesForPolicy API. +// +// For information about managed policies, see Managed Policies and Inline +// Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the IAM User Guide. +func (c *IAM) SetDefaultPolicyVersion(input *SetDefaultPolicyVersionInput) (*SetDefaultPolicyVersionOutput, error) { + req, out := c.SetDefaultPolicyVersionRequest(input) + err := req.Send() + return out, err +} + +const opSimulateCustomPolicy = "SimulateCustomPolicy" + +// SimulateCustomPolicyRequest generates a "aws/request.Request" representing the +// client's request for the SimulateCustomPolicy operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the SimulateCustomPolicy method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the SimulateCustomPolicyRequest method. +// req, resp := client.SimulateCustomPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *IAM) SimulateCustomPolicyRequest(input *SimulateCustomPolicyInput) (req *request.Request, output *SimulatePolicyResponse) { + op := &request.Operation{ + Name: opSimulateCustomPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxItems", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &SimulateCustomPolicyInput{} + } + + req = c.newRequest(op, input, output) + output = &SimulatePolicyResponse{} + req.Data = output + return +} + +// Simulate how a set of IAM policies and optionally a resource-based policy +// works with a list of API actions and AWS resources to determine the policies' +// effective permissions. The policies are provided as strings. +// +// The simulation does not perform the API actions; it only checks the authorization +// to determine if the simulated policies allow or deny the actions. +// +// If you want to simulate existing policies attached to an IAM user, group, +// or role, use SimulatePrincipalPolicy instead. +// +// Context keys are variables maintained by AWS and its services that provide +// details about the context of an API query request. You can use the Condition +// element of an IAM policy to evaluate context keys. To get the list of context +// keys that the policies require for correct simulation, use GetContextKeysForCustomPolicy. +// +// If the output is long, you can use MaxItems and Marker parameters to paginate +// the results. +func (c *IAM) SimulateCustomPolicy(input *SimulateCustomPolicyInput) (*SimulatePolicyResponse, error) { + req, out := c.SimulateCustomPolicyRequest(input) + err := req.Send() + return out, err +} + +// SimulateCustomPolicyPages iterates over the pages of a SimulateCustomPolicy operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See SimulateCustomPolicy method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a SimulateCustomPolicy operation. +// pageNum := 0 +// err := client.SimulateCustomPolicyPages(params, +// func(page *SimulatePolicyResponse, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *IAM) SimulateCustomPolicyPages(input *SimulateCustomPolicyInput, fn func(p *SimulatePolicyResponse, lastPage bool) (shouldContinue bool)) error { + page, _ := c.SimulateCustomPolicyRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*SimulatePolicyResponse), lastPage) + }) +} + +const opSimulatePrincipalPolicy = "SimulatePrincipalPolicy" + +// SimulatePrincipalPolicyRequest generates a "aws/request.Request" representing the +// client's request for the SimulatePrincipalPolicy operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the SimulatePrincipalPolicy method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the SimulatePrincipalPolicyRequest method. +// req, resp := client.SimulatePrincipalPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *IAM) SimulatePrincipalPolicyRequest(input *SimulatePrincipalPolicyInput) (req *request.Request, output *SimulatePolicyResponse) { + op := &request.Operation{ + Name: opSimulatePrincipalPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxItems", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &SimulatePrincipalPolicyInput{} + } + + req = c.newRequest(op, input, output) + output = &SimulatePolicyResponse{} + req.Data = output + return +} + +// Simulate how a set of IAM policies attached to an IAM entity works with a +// list of API actions and AWS resources to determine the policies' effective +// permissions. The entity can be an IAM user, group, or role. If you specify +// a user, then the simulation also includes all of the policies that are attached +// to groups that the user belongs to . +// +// You can optionally include a list of one or more additional policies specified +// as strings to include in the simulation. If you want to simulate only policies +// specified as strings, use SimulateCustomPolicy instead. +// +// You can also optionally include one resource-based policy to be evaluated +// with each of the resources included in the simulation. +// +// The simulation does not perform the API actions, it only checks the authorization +// to determine if the simulated policies allow or deny the actions. +// +// Note: This API discloses information about the permissions granted to other +// users. If you do not want users to see other user's permissions, then consider +// allowing them to use SimulateCustomPolicy instead. +// +// Context keys are variables maintained by AWS and its services that provide +// details about the context of an API query request. You can use the Condition +// element of an IAM policy to evaluate context keys. To get the list of context +// keys that the policies require for correct simulation, use GetContextKeysForPrincipalPolicy. +// +// If the output is long, you can use the MaxItems and Marker parameters to +// paginate the results. +func (c *IAM) SimulatePrincipalPolicy(input *SimulatePrincipalPolicyInput) (*SimulatePolicyResponse, error) { + req, out := c.SimulatePrincipalPolicyRequest(input) + err := req.Send() + return out, err +} + +// SimulatePrincipalPolicyPages iterates over the pages of a SimulatePrincipalPolicy operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See SimulatePrincipalPolicy method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a SimulatePrincipalPolicy operation. +// pageNum := 0 +// err := client.SimulatePrincipalPolicyPages(params, +// func(page *SimulatePolicyResponse, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *IAM) SimulatePrincipalPolicyPages(input *SimulatePrincipalPolicyInput, fn func(p *SimulatePolicyResponse, lastPage bool) (shouldContinue bool)) error { + page, _ := c.SimulatePrincipalPolicyRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*SimulatePolicyResponse), lastPage) + }) +} + +const opUpdateAccessKey = "UpdateAccessKey" + +// UpdateAccessKeyRequest generates a "aws/request.Request" representing the +// client's request for the UpdateAccessKey operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateAccessKey method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateAccessKeyRequest method. +// req, resp := client.UpdateAccessKeyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *IAM) UpdateAccessKeyRequest(input *UpdateAccessKeyInput) (req *request.Request, output *UpdateAccessKeyOutput) { + op := &request.Operation{ + Name: opUpdateAccessKey, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateAccessKeyInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &UpdateAccessKeyOutput{} + req.Data = output + return +} + +// Changes the status of the specified access key from Active to Inactive, or +// vice versa. This action can be used to disable a user's key as part of a +// key rotation work flow. +// +// If the UserName field is not specified, the UserName is determined implicitly +// based on the AWS access key ID used to sign the request. Because this action +// works for access keys under the AWS account, you can use this action to manage +// root credentials even if the AWS account has no associated users. +// +// For information about rotating keys, see Managing Keys and Certificates +// (http://docs.aws.amazon.com/IAM/latest/UserGuide/ManagingCredentials.html) +// in the IAM User Guide. +func (c *IAM) UpdateAccessKey(input *UpdateAccessKeyInput) (*UpdateAccessKeyOutput, error) { + req, out := c.UpdateAccessKeyRequest(input) + err := req.Send() + return out, err +} + +const opUpdateAccountPasswordPolicy = "UpdateAccountPasswordPolicy" + +// UpdateAccountPasswordPolicyRequest generates a "aws/request.Request" representing the +// client's request for the UpdateAccountPasswordPolicy operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateAccountPasswordPolicy method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateAccountPasswordPolicyRequest method. +// req, resp := client.UpdateAccountPasswordPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *IAM) UpdateAccountPasswordPolicyRequest(input *UpdateAccountPasswordPolicyInput) (req *request.Request, output *UpdateAccountPasswordPolicyOutput) { + op := &request.Operation{ + Name: opUpdateAccountPasswordPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateAccountPasswordPolicyInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &UpdateAccountPasswordPolicyOutput{} + req.Data = output + return +} + +// Updates the password policy settings for the AWS account. +// +// This action does not support partial updates. No parameters are required, +// but if you do not specify a parameter, that parameter's value reverts to +// its default value. See the Request Parameters section for each parameter's +// default value. +// +// For more information about using a password policy, see Managing an IAM +// Password Policy (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_ManagingPasswordPolicies.html) +// in the IAM User Guide. +func (c *IAM) UpdateAccountPasswordPolicy(input *UpdateAccountPasswordPolicyInput) (*UpdateAccountPasswordPolicyOutput, error) { + req, out := c.UpdateAccountPasswordPolicyRequest(input) + err := req.Send() + return out, err +} + +const opUpdateAssumeRolePolicy = "UpdateAssumeRolePolicy" + +// UpdateAssumeRolePolicyRequest generates a "aws/request.Request" representing the +// client's request for the UpdateAssumeRolePolicy operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateAssumeRolePolicy method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateAssumeRolePolicyRequest method. +// req, resp := client.UpdateAssumeRolePolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *IAM) UpdateAssumeRolePolicyRequest(input *UpdateAssumeRolePolicyInput) (req *request.Request, output *UpdateAssumeRolePolicyOutput) { + op := &request.Operation{ + Name: opUpdateAssumeRolePolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateAssumeRolePolicyInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &UpdateAssumeRolePolicyOutput{} + req.Data = output + return +} + +// Updates the policy that grants an IAM entity permission to assume a role. +// This is typically referred to as the "role trust policy". For more information +// about roles, go to Using Roles to Delegate Permissions and Federate Identities +// (http://docs.aws.amazon.com/IAM/latest/UserGuide/roles-toplevel.html). +func (c *IAM) UpdateAssumeRolePolicy(input *UpdateAssumeRolePolicyInput) (*UpdateAssumeRolePolicyOutput, error) { + req, out := c.UpdateAssumeRolePolicyRequest(input) + err := req.Send() + return out, err +} + +const opUpdateGroup = "UpdateGroup" + +// UpdateGroupRequest generates a "aws/request.Request" representing the +// client's request for the UpdateGroup operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateGroup method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateGroupRequest method. +// req, resp := client.UpdateGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *IAM) UpdateGroupRequest(input *UpdateGroupInput) (req *request.Request, output *UpdateGroupOutput) { + op := &request.Operation{ + Name: opUpdateGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateGroupInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &UpdateGroupOutput{} + req.Data = output + return +} + +// Updates the name and/or the path of the specified IAM group. +// +// You should understand the implications of changing a group's path or name. +// For more information, see Renaming Users and Groups (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_WorkingWithGroupsAndUsers.html) +// in the IAM User Guide. +// +// To change an IAM group name the requester must have appropriate permissions +// on both the source object and the target object. For example, to change "Managers" +// to "MGRs", the entity making the request must have permission on both "Managers" +// and "MGRs", or must have permission on all (*). For more information about +// permissions, see Permissions and Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/PermissionsAndPolicies.html). +func (c *IAM) UpdateGroup(input *UpdateGroupInput) (*UpdateGroupOutput, error) { + req, out := c.UpdateGroupRequest(input) + err := req.Send() + return out, err +} + +const opUpdateLoginProfile = "UpdateLoginProfile" + +// UpdateLoginProfileRequest generates a "aws/request.Request" representing the +// client's request for the UpdateLoginProfile operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateLoginProfile method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateLoginProfileRequest method. +// req, resp := client.UpdateLoginProfileRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *IAM) UpdateLoginProfileRequest(input *UpdateLoginProfileInput) (req *request.Request, output *UpdateLoginProfileOutput) { + op := &request.Operation{ + Name: opUpdateLoginProfile, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateLoginProfileInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &UpdateLoginProfileOutput{} + req.Data = output + return +} + +// Changes the password for the specified IAM user. +// +// IAM users can change their own passwords by calling ChangePassword. For +// more information about modifying passwords, see Managing Passwords (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_ManagingLogins.html) +// in the IAM User Guide. +func (c *IAM) UpdateLoginProfile(input *UpdateLoginProfileInput) (*UpdateLoginProfileOutput, error) { + req, out := c.UpdateLoginProfileRequest(input) + err := req.Send() + return out, err +} + +const opUpdateOpenIDConnectProviderThumbprint = "UpdateOpenIDConnectProviderThumbprint" + +// UpdateOpenIDConnectProviderThumbprintRequest generates a "aws/request.Request" representing the +// client's request for the UpdateOpenIDConnectProviderThumbprint operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateOpenIDConnectProviderThumbprint method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateOpenIDConnectProviderThumbprintRequest method. +// req, resp := client.UpdateOpenIDConnectProviderThumbprintRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *IAM) UpdateOpenIDConnectProviderThumbprintRequest(input *UpdateOpenIDConnectProviderThumbprintInput) (req *request.Request, output *UpdateOpenIDConnectProviderThumbprintOutput) { + op := &request.Operation{ + Name: opUpdateOpenIDConnectProviderThumbprint, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateOpenIDConnectProviderThumbprintInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &UpdateOpenIDConnectProviderThumbprintOutput{} + req.Data = output + return +} + +// Replaces the existing list of server certificate thumbprints associated with +// an OpenID Connect (OIDC) provider resource object with a new list of thumbprints. +// +// The list that you pass with this action completely replaces the existing +// list of thumbprints. (The lists are not merged.) +// +// Typically, you need to update a thumbprint only when the identity provider's +// certificate changes, which occurs rarely. However, if the provider's certificate +// does change, any attempt to assume an IAM role that specifies the OIDC provider +// as a principal fails until the certificate thumbprint is updated. +// +// Because trust for the OIDC provider is ultimately derived from the provider's +// certificate and is validated by the thumbprint, it is a best practice to +// limit access to the UpdateOpenIDConnectProviderThumbprint action to highly-privileged +// users. +func (c *IAM) UpdateOpenIDConnectProviderThumbprint(input *UpdateOpenIDConnectProviderThumbprintInput) (*UpdateOpenIDConnectProviderThumbprintOutput, error) { + req, out := c.UpdateOpenIDConnectProviderThumbprintRequest(input) + err := req.Send() + return out, err +} + +const opUpdateSAMLProvider = "UpdateSAMLProvider" + +// UpdateSAMLProviderRequest generates a "aws/request.Request" representing the +// client's request for the UpdateSAMLProvider operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateSAMLProvider method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateSAMLProviderRequest method. +// req, resp := client.UpdateSAMLProviderRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *IAM) UpdateSAMLProviderRequest(input *UpdateSAMLProviderInput) (req *request.Request, output *UpdateSAMLProviderOutput) { + op := &request.Operation{ + Name: opUpdateSAMLProvider, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateSAMLProviderInput{} + } + + req = c.newRequest(op, input, output) + output = &UpdateSAMLProviderOutput{} + req.Data = output + return +} + +// Updates the metadata document for an existing SAML provider resource object. +// +// This operation requires Signature Version 4 (http://docs.aws.amazon.com/general/latest/gr/signature-version-4.html). +func (c *IAM) UpdateSAMLProvider(input *UpdateSAMLProviderInput) (*UpdateSAMLProviderOutput, error) { + req, out := c.UpdateSAMLProviderRequest(input) + err := req.Send() + return out, err +} + +const opUpdateSSHPublicKey = "UpdateSSHPublicKey" + +// UpdateSSHPublicKeyRequest generates a "aws/request.Request" representing the +// client's request for the UpdateSSHPublicKey operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateSSHPublicKey method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateSSHPublicKeyRequest method. +// req, resp := client.UpdateSSHPublicKeyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *IAM) UpdateSSHPublicKeyRequest(input *UpdateSSHPublicKeyInput) (req *request.Request, output *UpdateSSHPublicKeyOutput) { + op := &request.Operation{ + Name: opUpdateSSHPublicKey, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateSSHPublicKeyInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &UpdateSSHPublicKeyOutput{} + req.Data = output + return +} + +// Sets the status of an IAM user's SSH public key to active or inactive. SSH +// public keys that are inactive cannot be used for authentication. This action +// can be used to disable a user's SSH public key as part of a key rotation +// work flow. +// +// The SSH public key affected by this action is used only for authenticating +// the associated IAM user to an AWS CodeCommit repository. For more information +// about using SSH keys to authenticate to an AWS CodeCommit repository, see +// Set up AWS CodeCommit for SSH Connections (http://docs.aws.amazon.com/codecommit/latest/userguide/setting-up-credentials-ssh.html) +// in the AWS CodeCommit User Guide. +func (c *IAM) UpdateSSHPublicKey(input *UpdateSSHPublicKeyInput) (*UpdateSSHPublicKeyOutput, error) { + req, out := c.UpdateSSHPublicKeyRequest(input) + err := req.Send() + return out, err +} + +const opUpdateServerCertificate = "UpdateServerCertificate" + +// UpdateServerCertificateRequest generates a "aws/request.Request" representing the +// client's request for the UpdateServerCertificate operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateServerCertificate method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateServerCertificateRequest method. +// req, resp := client.UpdateServerCertificateRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *IAM) UpdateServerCertificateRequest(input *UpdateServerCertificateInput) (req *request.Request, output *UpdateServerCertificateOutput) { + op := &request.Operation{ + Name: opUpdateServerCertificate, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateServerCertificateInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &UpdateServerCertificateOutput{} + req.Data = output + return +} + +// Updates the name and/or the path of the specified server certificate stored +// in IAM. +// +// For more information about working with server certificates, including a +// list of AWS services that can use the server certificates that you manage +// with IAM, go to Working with Server Certificates (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_server-certs.html) +// in the IAM User Guide. +// +// You should understand the implications of changing a server certificate's +// path or name. For more information, see Renaming a Server Certificate (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_server-certs_manage.html#RenamingServerCerts) +// in the IAM User Guide. +// +// To change a server certificate name the requester must have appropriate +// permissions on both the source object and the target object. For example, +// to change the name from "ProductionCert" to "ProdCert", the entity making +// the request must have permission on "ProductionCert" and "ProdCert", or must +// have permission on all (*). For more information about permissions, see Access +// Management (http://docs.aws.amazon.com/IAM/latest/UserGuide/access.html) +// in the IAM User Guide. +func (c *IAM) UpdateServerCertificate(input *UpdateServerCertificateInput) (*UpdateServerCertificateOutput, error) { + req, out := c.UpdateServerCertificateRequest(input) + err := req.Send() + return out, err +} + +const opUpdateSigningCertificate = "UpdateSigningCertificate" + +// UpdateSigningCertificateRequest generates a "aws/request.Request" representing the +// client's request for the UpdateSigningCertificate operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateSigningCertificate method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateSigningCertificateRequest method. +// req, resp := client.UpdateSigningCertificateRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *IAM) UpdateSigningCertificateRequest(input *UpdateSigningCertificateInput) (req *request.Request, output *UpdateSigningCertificateOutput) { + op := &request.Operation{ + Name: opUpdateSigningCertificate, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateSigningCertificateInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &UpdateSigningCertificateOutput{} + req.Data = output + return +} + +// Changes the status of the specified user signing certificate from active +// to disabled, or vice versa. This action can be used to disable an IAM user's +// signing certificate as part of a certificate rotation work flow. +// +// If the UserName field is not specified, the UserName is determined implicitly +// based on the AWS access key ID used to sign the request. Because this action +// works for access keys under the AWS account, you can use this action to manage +// root credentials even if the AWS account has no associated users. +func (c *IAM) UpdateSigningCertificate(input *UpdateSigningCertificateInput) (*UpdateSigningCertificateOutput, error) { + req, out := c.UpdateSigningCertificateRequest(input) + err := req.Send() + return out, err +} + +const opUpdateUser = "UpdateUser" + +// UpdateUserRequest generates a "aws/request.Request" representing the +// client's request for the UpdateUser operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateUser method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateUserRequest method. +// req, resp := client.UpdateUserRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *IAM) UpdateUserRequest(input *UpdateUserInput) (req *request.Request, output *UpdateUserOutput) { + op := &request.Operation{ + Name: opUpdateUser, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateUserInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &UpdateUserOutput{} + req.Data = output + return +} + +// Updates the name and/or the path of the specified IAM user. +// +// You should understand the implications of changing an IAM user's path +// or name. For more information, see Renaming an IAM User (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_users_manage.html#id_users_renaming) +// and Renaming an IAM Group (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_groups_manage_rename.html) +// in the IAM User Guide. +// +// To change a user name the requester must have appropriate permissions +// on both the source object and the target object. For example, to change Bob +// to Robert, the entity making the request must have permission on Bob and +// Robert, or must have permission on all (*). For more information about permissions, +// see Permissions and Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/PermissionsAndPolicies.html). +func (c *IAM) UpdateUser(input *UpdateUserInput) (*UpdateUserOutput, error) { + req, out := c.UpdateUserRequest(input) + err := req.Send() + return out, err +} + +const opUploadSSHPublicKey = "UploadSSHPublicKey" + +// UploadSSHPublicKeyRequest generates a "aws/request.Request" representing the +// client's request for the UploadSSHPublicKey operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UploadSSHPublicKey method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UploadSSHPublicKeyRequest method. +// req, resp := client.UploadSSHPublicKeyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *IAM) UploadSSHPublicKeyRequest(input *UploadSSHPublicKeyInput) (req *request.Request, output *UploadSSHPublicKeyOutput) { + op := &request.Operation{ + Name: opUploadSSHPublicKey, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UploadSSHPublicKeyInput{} + } + + req = c.newRequest(op, input, output) + output = &UploadSSHPublicKeyOutput{} + req.Data = output + return +} + +// Uploads an SSH public key and associates it with the specified IAM user. +// +// The SSH public key uploaded by this action can be used only for authenticating +// the associated IAM user to an AWS CodeCommit repository. For more information +// about using SSH keys to authenticate to an AWS CodeCommit repository, see +// Set up AWS CodeCommit for SSH Connections (http://docs.aws.amazon.com/codecommit/latest/userguide/setting-up-credentials-ssh.html) +// in the AWS CodeCommit User Guide. +func (c *IAM) UploadSSHPublicKey(input *UploadSSHPublicKeyInput) (*UploadSSHPublicKeyOutput, error) { + req, out := c.UploadSSHPublicKeyRequest(input) + err := req.Send() + return out, err +} + +const opUploadServerCertificate = "UploadServerCertificate" + +// UploadServerCertificateRequest generates a "aws/request.Request" representing the +// client's request for the UploadServerCertificate operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UploadServerCertificate method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UploadServerCertificateRequest method. +// req, resp := client.UploadServerCertificateRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *IAM) UploadServerCertificateRequest(input *UploadServerCertificateInput) (req *request.Request, output *UploadServerCertificateOutput) { + op := &request.Operation{ + Name: opUploadServerCertificate, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UploadServerCertificateInput{} + } + + req = c.newRequest(op, input, output) + output = &UploadServerCertificateOutput{} + req.Data = output + return +} + +// Uploads a server certificate entity for the AWS account. The server certificate +// entity includes a public key certificate, a private key, and an optional +// certificate chain, which should all be PEM-encoded. +// +// For more information about working with server certificates, including a +// list of AWS services that can use the server certificates that you manage +// with IAM, go to Working with Server Certificates (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_server-certs.html) +// in the IAM User Guide. +// +// For information about the number of server certificates you can upload, +// see Limitations on IAM Entities and Objects (http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html) +// in the IAM User Guide. +// +// Because the body of the public key certificate, private key, and the certificate +// chain can be large, you should use POST rather than GET when calling UploadServerCertificate. +// For information about setting up signatures and authorization through the +// API, go to Signing AWS API Requests (http://docs.aws.amazon.com/general/latest/gr/signing_aws_api_requests.html) +// in the AWS General Reference. For general information about using the Query +// API with IAM, go to Calling the API by Making HTTP Query Requests (http://docs.aws.amazon.com/IAM/latest/UserGuide/programming.html) +// in the IAM User Guide. +func (c *IAM) UploadServerCertificate(input *UploadServerCertificateInput) (*UploadServerCertificateOutput, error) { + req, out := c.UploadServerCertificateRequest(input) + err := req.Send() + return out, err +} + +const opUploadSigningCertificate = "UploadSigningCertificate" + +// UploadSigningCertificateRequest generates a "aws/request.Request" representing the +// client's request for the UploadSigningCertificate operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UploadSigningCertificate method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UploadSigningCertificateRequest method. +// req, resp := client.UploadSigningCertificateRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *IAM) UploadSigningCertificateRequest(input *UploadSigningCertificateInput) (req *request.Request, output *UploadSigningCertificateOutput) { + op := &request.Operation{ + Name: opUploadSigningCertificate, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UploadSigningCertificateInput{} + } + + req = c.newRequest(op, input, output) + output = &UploadSigningCertificateOutput{} + req.Data = output + return +} + +// Uploads an X.509 signing certificate and associates it with the specified +// IAM user. Some AWS services use X.509 signing certificates to validate requests +// that are signed with a corresponding private key. When you upload the certificate, +// its default status is Active. +// +// If the UserName field is not specified, the IAM user name is determined +// implicitly based on the AWS access key ID used to sign the request. Because +// this action works for access keys under the AWS account, you can use this +// action to manage root credentials even if the AWS account has no associated +// users. +// +// Because the body of a X.509 certificate can be large, you should use POST +// rather than GET when calling UploadSigningCertificate. For information about +// setting up signatures and authorization through the API, go to Signing AWS +// API Requests (http://docs.aws.amazon.com/general/latest/gr/signing_aws_api_requests.html) +// in the AWS General Reference. For general information about using the Query +// API with IAM, go to Making Query Requests (http://docs.aws.amazon.com/IAM/latest/UserGuide/IAM_UsingQueryAPI.html) +// in the IAM User Guide. +func (c *IAM) UploadSigningCertificate(input *UploadSigningCertificateInput) (*UploadSigningCertificateOutput, error) { + req, out := c.UploadSigningCertificateRequest(input) + err := req.Send() + return out, err +} + +// Contains information about an AWS access key. +// +// This data type is used as a response element in the CreateAccessKey and +// ListAccessKeys actions. +// +// The SecretAccessKey value is returned only in response to CreateAccessKey. +// You can get a secret access key only when you first create an access key; +// you cannot recover the secret access key later. If you lose a secret access +// key, you must create a new access key. +type AccessKey struct { + _ struct{} `type:"structure"` + + // The ID for this access key. + AccessKeyId *string `min:"16" type:"string" required:"true"` + + // The date when the access key was created. + CreateDate *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The secret key used to sign requests. + SecretAccessKey *string `type:"string" required:"true"` + + // The status of the access key. Active means the key is valid for API calls, + // while Inactive means it is not. + Status *string `type:"string" required:"true" enum:"statusType"` + + // The name of the IAM user that the access key is associated with. + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s AccessKey) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AccessKey) GoString() string { + return s.String() +} + +// Contains information about the last time an AWS access key was used. +// +// This data type is used as a response element in the GetAccessKeyLastUsed +// action. +type AccessKeyLastUsed struct { + _ struct{} `type:"structure"` + + // The date and time, in ISO 8601 date-time format (http://www.iso.org/iso/iso8601), + // when the access key was most recently used. This field is null when: + // + // The user does not have an access key. + // + // An access key exists but has never been used, at least not since IAM started + // tracking this information on April 22nd, 2015. + // + // There is no sign-in data associated with the user + LastUsedDate *time.Time `type:"timestamp" timestampFormat:"iso8601" required:"true"` + + // The AWS region where this access key was most recently used. This field is + // null when: + // + // The user does not have an access key. + // + // An access key exists but has never been used, at least not since IAM started + // tracking this information on April 22nd, 2015. + // + // There is no sign-in data associated with the user + // + // For more information about AWS regions, see Regions and Endpoints (http://docs.aws.amazon.com/general/latest/gr/rande.html) + // in the Amazon Web Services General Reference. + Region *string `type:"string" required:"true"` + + // The name of the AWS service with which this access key was most recently + // used. This field is null when: + // + // The user does not have an access key. + // + // An access key exists but has never been used, at least not since IAM started + // tracking this information on April 22nd, 2015. + // + // There is no sign-in data associated with the user + ServiceName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s AccessKeyLastUsed) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AccessKeyLastUsed) GoString() string { + return s.String() +} + +// Contains information about an AWS access key, without its secret key. +// +// This data type is used as a response element in the ListAccessKeys action. +type AccessKeyMetadata struct { + _ struct{} `type:"structure"` + + // The ID for this access key. + AccessKeyId *string `min:"16" type:"string"` + + // The date when the access key was created. + CreateDate *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The status of the access key. Active means the key is valid for API calls; + // Inactive means it is not. + Status *string `type:"string" enum:"statusType"` + + // The name of the IAM user that the key is associated with. + UserName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s AccessKeyMetadata) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AccessKeyMetadata) GoString() string { + return s.String() +} + +type AddClientIDToOpenIDConnectProviderInput struct { + _ struct{} `type:"structure"` + + // The client ID (also known as audience) to add to the IAM OpenID Connect provider + // resource. + ClientID *string `min:"1" type:"string" required:"true"` + + // The Amazon Resource Name (ARN) of the IAM OpenID Connect (OIDC) provider + // resource to add the client ID to. You can get a list of OIDC provider ARNs + // by using the ListOpenIDConnectProviders action. + OpenIDConnectProviderArn *string `min:"20" type:"string" required:"true"` +} + +// String returns the string representation +func (s AddClientIDToOpenIDConnectProviderInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddClientIDToOpenIDConnectProviderInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AddClientIDToOpenIDConnectProviderInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AddClientIDToOpenIDConnectProviderInput"} + if s.ClientID == nil { + invalidParams.Add(request.NewErrParamRequired("ClientID")) + } + if s.ClientID != nil && len(*s.ClientID) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ClientID", 1)) + } + if s.OpenIDConnectProviderArn == nil { + invalidParams.Add(request.NewErrParamRequired("OpenIDConnectProviderArn")) + } + if s.OpenIDConnectProviderArn != nil && len(*s.OpenIDConnectProviderArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("OpenIDConnectProviderArn", 20)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type AddClientIDToOpenIDConnectProviderOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s AddClientIDToOpenIDConnectProviderOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddClientIDToOpenIDConnectProviderOutput) GoString() string { + return s.String() +} + +type AddRoleToInstanceProfileInput struct { + _ struct{} `type:"structure"` + + // The name of the instance profile to update. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- + InstanceProfileName *string `min:"1" type:"string" required:"true"` + + // The name of the role to add. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- + RoleName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s AddRoleToInstanceProfileInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddRoleToInstanceProfileInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AddRoleToInstanceProfileInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AddRoleToInstanceProfileInput"} + if s.InstanceProfileName == nil { + invalidParams.Add(request.NewErrParamRequired("InstanceProfileName")) + } + if s.InstanceProfileName != nil && len(*s.InstanceProfileName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("InstanceProfileName", 1)) + } + if s.RoleName == nil { + invalidParams.Add(request.NewErrParamRequired("RoleName")) + } + if s.RoleName != nil && len(*s.RoleName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RoleName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type AddRoleToInstanceProfileOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s AddRoleToInstanceProfileOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddRoleToInstanceProfileOutput) GoString() string { + return s.String() +} + +type AddUserToGroupInput struct { + _ struct{} `type:"structure"` + + // The name of the group to update. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- + GroupName *string `min:"1" type:"string" required:"true"` + + // The name of the user to add. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s AddUserToGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddUserToGroupInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AddUserToGroupInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AddUserToGroupInput"} + if s.GroupName == nil { + invalidParams.Add(request.NewErrParamRequired("GroupName")) + } + if s.GroupName != nil && len(*s.GroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("GroupName", 1)) + } + if s.UserName == nil { + invalidParams.Add(request.NewErrParamRequired("UserName")) + } + if s.UserName != nil && len(*s.UserName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("UserName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type AddUserToGroupOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s AddUserToGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddUserToGroupOutput) GoString() string { + return s.String() +} + +type AttachGroupPolicyInput struct { + _ struct{} `type:"structure"` + + // The name (friendly name, not ARN) of the group to attach the policy to. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- + GroupName *string `min:"1" type:"string" required:"true"` + + // The Amazon Resource Name (ARN) of the IAM policy you want to attach. + // + // For more information about ARNs, see Amazon Resource Names (ARNs) and AWS + // Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + PolicyArn *string `min:"20" type:"string" required:"true"` +} + +// String returns the string representation +func (s AttachGroupPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AttachGroupPolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AttachGroupPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AttachGroupPolicyInput"} + if s.GroupName == nil { + invalidParams.Add(request.NewErrParamRequired("GroupName")) + } + if s.GroupName != nil && len(*s.GroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("GroupName", 1)) + } + if s.PolicyArn == nil { + invalidParams.Add(request.NewErrParamRequired("PolicyArn")) + } + if s.PolicyArn != nil && len(*s.PolicyArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("PolicyArn", 20)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type AttachGroupPolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s AttachGroupPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AttachGroupPolicyOutput) GoString() string { + return s.String() +} + +type AttachRolePolicyInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the IAM policy you want to attach. + // + // For more information about ARNs, see Amazon Resource Names (ARNs) and AWS + // Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + PolicyArn *string `min:"20" type:"string" required:"true"` + + // The name (friendly name, not ARN) of the role to attach the policy to. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- + RoleName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s AttachRolePolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AttachRolePolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AttachRolePolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AttachRolePolicyInput"} + if s.PolicyArn == nil { + invalidParams.Add(request.NewErrParamRequired("PolicyArn")) + } + if s.PolicyArn != nil && len(*s.PolicyArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("PolicyArn", 20)) + } + if s.RoleName == nil { + invalidParams.Add(request.NewErrParamRequired("RoleName")) + } + if s.RoleName != nil && len(*s.RoleName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RoleName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type AttachRolePolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s AttachRolePolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AttachRolePolicyOutput) GoString() string { + return s.String() +} + +type AttachUserPolicyInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the IAM policy you want to attach. + // + // For more information about ARNs, see Amazon Resource Names (ARNs) and AWS + // Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + PolicyArn *string `min:"20" type:"string" required:"true"` + + // The name (friendly name, not ARN) of the IAM user to attach the policy to. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s AttachUserPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AttachUserPolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AttachUserPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AttachUserPolicyInput"} + if s.PolicyArn == nil { + invalidParams.Add(request.NewErrParamRequired("PolicyArn")) + } + if s.PolicyArn != nil && len(*s.PolicyArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("PolicyArn", 20)) + } + if s.UserName == nil { + invalidParams.Add(request.NewErrParamRequired("UserName")) + } + if s.UserName != nil && len(*s.UserName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("UserName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type AttachUserPolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s AttachUserPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AttachUserPolicyOutput) GoString() string { + return s.String() +} + +// Contains information about an attached policy. +// +// An attached policy is a managed policy that has been attached to a user, +// group, or role. This data type is used as a response element in the ListAttachedGroupPolicies, +// ListAttachedRolePolicies, ListAttachedUserPolicies, and GetAccountAuthorizationDetails +// actions. +// +// For more information about managed policies, refer to Managed Policies and +// Inline Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the Using IAM guide. +type AttachedPolicy struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN). ARNs are unique identifiers for AWS resources. + // + // For more information about ARNs, go to Amazon Resource Names (ARNs) and + // AWS Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + PolicyArn *string `min:"20" type:"string"` + + // The friendly name of the attached policy. + PolicyName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s AttachedPolicy) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AttachedPolicy) GoString() string { + return s.String() +} + +type ChangePasswordInput struct { + _ struct{} `type:"structure"` + + // The new password. The new password must conform to the AWS account's password + // policy, if one exists. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of almost any printable ASCII character + // from the space (\u0020) through the end of the ASCII character range (\u00FF). + // You can also include the tab (\u0009), line feed (\u000A), and carriage return + // (\u000D) characters. Although any of these characters are valid in a password, + // note that many tools, such as the AWS Management Console, might restrict + // the ability to enter certain characters because they have special meaning + // within that tool. + NewPassword *string `min:"1" type:"string" required:"true"` + + // The IAM user's current password. + OldPassword *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s ChangePasswordInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ChangePasswordInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ChangePasswordInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ChangePasswordInput"} + if s.NewPassword == nil { + invalidParams.Add(request.NewErrParamRequired("NewPassword")) + } + if s.NewPassword != nil && len(*s.NewPassword) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NewPassword", 1)) + } + if s.OldPassword == nil { + invalidParams.Add(request.NewErrParamRequired("OldPassword")) + } + if s.OldPassword != nil && len(*s.OldPassword) < 1 { + invalidParams.Add(request.NewErrParamMinLen("OldPassword", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type ChangePasswordOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ChangePasswordOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ChangePasswordOutput) GoString() string { + return s.String() +} + +// Contains information about a condition context key. It includes the name +// of the key and specifies the value (or values, if the context key supports +// multiple values) to use in the simulation. This information is used when +// evaluating the Condition elements of the input policies. +// +// This data type is used as an input parameter to SimulateCustomPolicy and +// SimulateCustomPolicy . +type ContextEntry struct { + _ struct{} `type:"structure"` + + // The full name of a condition context key, including the service prefix. For + // example, aws:SourceIp or s3:VersionId. + ContextKeyName *string `min:"5" type:"string"` + + // The data type of the value (or values) specified in the ContextKeyValues + // parameter. + ContextKeyType *string `type:"string" enum:"ContextKeyTypeEnum"` + + // The value (or values, if the condition context key supports multiple values) + // to provide to the simulation for use when the key is referenced by a Condition + // element in an input policy. + ContextKeyValues []*string `type:"list"` +} + +// String returns the string representation +func (s ContextEntry) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ContextEntry) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ContextEntry) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ContextEntry"} + if s.ContextKeyName != nil && len(*s.ContextKeyName) < 5 { + invalidParams.Add(request.NewErrParamMinLen("ContextKeyName", 5)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type CreateAccessKeyInput struct { + _ struct{} `type:"structure"` + + // The name of the IAM user that the new key will belong to. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- + UserName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s CreateAccessKeyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateAccessKeyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateAccessKeyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateAccessKeyInput"} + if s.UserName != nil && len(*s.UserName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("UserName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the response to a successful CreateAccessKey request. +type CreateAccessKeyOutput struct { + _ struct{} `type:"structure"` + + // A structure with details about the access key. + AccessKey *AccessKey `type:"structure" required:"true"` +} + +// String returns the string representation +func (s CreateAccessKeyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateAccessKeyOutput) GoString() string { + return s.String() +} + +type CreateAccountAliasInput struct { + _ struct{} `type:"structure"` + + // The account alias to create. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of lowercase letters, digits, and dashes. + // You cannot start or finish with a dash, nor can you have two dashes in a + // row. + AccountAlias *string `min:"3" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateAccountAliasInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateAccountAliasInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateAccountAliasInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateAccountAliasInput"} + if s.AccountAlias == nil { + invalidParams.Add(request.NewErrParamRequired("AccountAlias")) + } + if s.AccountAlias != nil && len(*s.AccountAlias) < 3 { + invalidParams.Add(request.NewErrParamMinLen("AccountAlias", 3)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type CreateAccountAliasOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s CreateAccountAliasOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateAccountAliasOutput) GoString() string { + return s.String() +} + +type CreateGroupInput struct { + _ struct{} `type:"structure"` + + // The name of the group to create. Do not include the path in this value. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- + GroupName *string `min:"1" type:"string" required:"true"` + + // The path to the group. For more information about paths, see IAM Identifiers + // (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) + // in the IAM User Guide. + // + // This parameter is optional. If it is not included, it defaults to a slash + // (/). + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of either a forward slash (/) by itself + // or a string that must begin and end with forward slashes, containing any + // ASCII character from the ! (\u0021) thru the DEL character (\u007F), including + // most punctuation characters, digits, and upper and lowercased letters. + Path *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s CreateGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateGroupInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateGroupInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateGroupInput"} + if s.GroupName == nil { + invalidParams.Add(request.NewErrParamRequired("GroupName")) + } + if s.GroupName != nil && len(*s.GroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("GroupName", 1)) + } + if s.Path != nil && len(*s.Path) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Path", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the response to a successful CreateGroup request. +type CreateGroupOutput struct { + _ struct{} `type:"structure"` + + // A structure containing details about the new group. + Group *Group `type:"structure" required:"true"` +} + +// String returns the string representation +func (s CreateGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateGroupOutput) GoString() string { + return s.String() +} + +type CreateInstanceProfileInput struct { + _ struct{} `type:"structure"` + + // The name of the instance profile to create. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- + InstanceProfileName *string `min:"1" type:"string" required:"true"` + + // The path to the instance profile. For more information about paths, see IAM + // Identifiers (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) + // in the IAM User Guide. + // + // This parameter is optional. If it is not included, it defaults to a slash + // (/). + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of either a forward slash (/) by itself + // or a string that must begin and end with forward slashes, containing any + // ASCII character from the ! (\u0021) thru the DEL character (\u007F), including + // most punctuation characters, digits, and upper and lowercased letters. + Path *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s CreateInstanceProfileInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateInstanceProfileInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateInstanceProfileInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateInstanceProfileInput"} + if s.InstanceProfileName == nil { + invalidParams.Add(request.NewErrParamRequired("InstanceProfileName")) + } + if s.InstanceProfileName != nil && len(*s.InstanceProfileName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("InstanceProfileName", 1)) + } + if s.Path != nil && len(*s.Path) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Path", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the response to a successful CreateInstanceProfile request. +type CreateInstanceProfileOutput struct { + _ struct{} `type:"structure"` + + // A structure containing details about the new instance profile. + InstanceProfile *InstanceProfile `type:"structure" required:"true"` +} + +// String returns the string representation +func (s CreateInstanceProfileOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateInstanceProfileOutput) GoString() string { + return s.String() +} + +type CreateLoginProfileInput struct { + _ struct{} `type:"structure"` + + // The new password for the user. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of almost any printable ASCII character + // from the space (\u0020) through the end of the ASCII character range (\u00FF). + // You can also include the tab (\u0009), line feed (\u000A), and carriage return + // (\u000D) characters. Although any of these characters are valid in a password, + // note that many tools, such as the AWS Management Console, might restrict + // the ability to enter certain characters because they have special meaning + // within that tool. + Password *string `min:"1" type:"string" required:"true"` + + // Specifies whether the user is required to set a new password on next sign-in. + PasswordResetRequired *bool `type:"boolean"` + + // The name of the IAM user to create a password for. The user must already + // exist. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateLoginProfileInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateLoginProfileInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateLoginProfileInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateLoginProfileInput"} + if s.Password == nil { + invalidParams.Add(request.NewErrParamRequired("Password")) + } + if s.Password != nil && len(*s.Password) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Password", 1)) + } + if s.UserName == nil { + invalidParams.Add(request.NewErrParamRequired("UserName")) + } + if s.UserName != nil && len(*s.UserName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("UserName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the response to a successful CreateLoginProfile request. +type CreateLoginProfileOutput struct { + _ struct{} `type:"structure"` + + // A structure containing the user name and password create date. + LoginProfile *LoginProfile `type:"structure" required:"true"` +} + +// String returns the string representation +func (s CreateLoginProfileOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateLoginProfileOutput) GoString() string { + return s.String() +} + +type CreateOpenIDConnectProviderInput struct { + _ struct{} `type:"structure"` + + // A list of client IDs (also known as audiences). When a mobile or web app + // registers with an OpenID Connect provider, they establish a value that identifies + // the application. (This is the value that's sent as the client_id parameter + // on OAuth requests.) + // + // You can register multiple client IDs with the same provider. For example, + // you might have multiple applications that use the same OIDC provider. You + // cannot register more than 100 client IDs with a single IAM OIDC provider. + // + // There is no defined format for a client ID. The CreateOpenIDConnectProviderRequest + // action accepts client IDs up to 255 characters long. + ClientIDList []*string `type:"list"` + + // A list of server certificate thumbprints for the OpenID Connect (OIDC) identity + // provider's server certificate(s). Typically this list includes only one entry. + // However, IAM lets you have up to five thumbprints for an OIDC provider. This + // lets you maintain multiple thumbprints if the identity provider is rotating + // certificates. + // + // The server certificate thumbprint is the hex-encoded SHA-1 hash value of + // the X.509 certificate used by the domain where the OpenID Connect provider + // makes its keys available. It is always a 40-character string. + // + // You must provide at least one thumbprint when creating an IAM OIDC provider. + // For example, if the OIDC provider is server.example.com and the provider + // stores its keys at "https://keys.server.example.com/openid-connect", the + // thumbprint string would be the hex-encoded SHA-1 hash value of the certificate + // used by https://keys.server.example.com. + // + // For more information about obtaining the OIDC provider's thumbprint, see + // Obtaining the Thumbprint for an OpenID Connect Provider (http://docs.aws.amazon.com/IAM/latest/UserGuide/identity-providers-oidc-obtain-thumbprint.html) + // in the IAM User Guide. + ThumbprintList []*string `type:"list" required:"true"` + + // The URL of the identity provider. The URL must begin with "https://" and + // should correspond to the iss claim in the provider's OpenID Connect ID tokens. + // Per the OIDC standard, path components are allowed but query parameters are + // not. Typically the URL consists of only a host name, like "https://server.example.org" + // or "https://example.com". + // + // You cannot register the same provider multiple times in a single AWS account. + // If you try to submit a URL that has already been used for an OpenID Connect + // provider in the AWS account, you will get an error. + Url *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateOpenIDConnectProviderInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateOpenIDConnectProviderInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateOpenIDConnectProviderInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateOpenIDConnectProviderInput"} + if s.ThumbprintList == nil { + invalidParams.Add(request.NewErrParamRequired("ThumbprintList")) + } + if s.Url == nil { + invalidParams.Add(request.NewErrParamRequired("Url")) + } + if s.Url != nil && len(*s.Url) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Url", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the response to a successful CreateOpenIDConnectProvider request. +type CreateOpenIDConnectProviderOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the new IAM OpenID Connect provider that + // is created. For more information, see OpenIDConnectProviderListEntry. + OpenIDConnectProviderArn *string `min:"20" type:"string"` +} + +// String returns the string representation +func (s CreateOpenIDConnectProviderOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateOpenIDConnectProviderOutput) GoString() string { + return s.String() +} + +type CreatePolicyInput struct { + _ struct{} `type:"structure"` + + // A friendly description of the policy. + // + // Typically used to store information about the permissions defined in the + // policy. For example, "Grants access to production DynamoDB tables." + // + // The policy description is immutable. After a value is assigned, it cannot + // be changed. + Description *string `type:"string"` + + // The path for the policy. + // + // For more information about paths, see IAM Identifiers (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) + // in the IAM User Guide. + // + // This parameter is optional. If it is not included, it defaults to a slash + // (/). + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of either a forward slash (/) by itself + // or a string that must begin and end with forward slashes, containing any + // ASCII character from the ! (\u0021) thru the DEL character (\u007F), including + // most punctuation characters, digits, and upper and lowercased letters. + Path *string `type:"string"` + + // The JSON policy document that you want to use as the content for the new + // policy. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of any printable ASCII character ranging + // from the space character (\u0020) through end of the ASCII character range + // (\u00FF). It also includes the special characters tab (\u0009), line feed + // (\u000A), and carriage return (\u000D). + PolicyDocument *string `min:"1" type:"string" required:"true"` + + // The friendly name of the policy. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- + PolicyName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreatePolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreatePolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreatePolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreatePolicyInput"} + if s.PolicyDocument == nil { + invalidParams.Add(request.NewErrParamRequired("PolicyDocument")) + } + if s.PolicyDocument != nil && len(*s.PolicyDocument) < 1 { + invalidParams.Add(request.NewErrParamMinLen("PolicyDocument", 1)) + } + if s.PolicyName == nil { + invalidParams.Add(request.NewErrParamRequired("PolicyName")) + } + if s.PolicyName != nil && len(*s.PolicyName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("PolicyName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the response to a successful CreatePolicy request. +type CreatePolicyOutput struct { + _ struct{} `type:"structure"` + + // A structure containing details about the new policy. + Policy *Policy `type:"structure"` +} + +// String returns the string representation +func (s CreatePolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreatePolicyOutput) GoString() string { + return s.String() +} + +type CreatePolicyVersionInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the IAM policy to which you want to add + // a new version. + // + // For more information about ARNs, see Amazon Resource Names (ARNs) and AWS + // Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + PolicyArn *string `min:"20" type:"string" required:"true"` + + // The JSON policy document that you want to use as the content for this new + // version of the policy. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of any printable ASCII character ranging + // from the space character (\u0020) through end of the ASCII character range + // (\u00FF). It also includes the special characters tab (\u0009), line feed + // (\u000A), and carriage return (\u000D). + PolicyDocument *string `min:"1" type:"string" required:"true"` + + // Specifies whether to set this version as the policy's default version. + // + // When this parameter is true, the new policy version becomes the operative + // version; that is, the version that is in effect for the IAM users, groups, + // and roles that the policy is attached to. + // + // For more information about managed policy versions, see Versioning for Managed + // Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-versions.html) + // in the IAM User Guide. + SetAsDefault *bool `type:"boolean"` +} + +// String returns the string representation +func (s CreatePolicyVersionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreatePolicyVersionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreatePolicyVersionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreatePolicyVersionInput"} + if s.PolicyArn == nil { + invalidParams.Add(request.NewErrParamRequired("PolicyArn")) + } + if s.PolicyArn != nil && len(*s.PolicyArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("PolicyArn", 20)) + } + if s.PolicyDocument == nil { + invalidParams.Add(request.NewErrParamRequired("PolicyDocument")) + } + if s.PolicyDocument != nil && len(*s.PolicyDocument) < 1 { + invalidParams.Add(request.NewErrParamMinLen("PolicyDocument", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the response to a successful CreatePolicyVersion request. +type CreatePolicyVersionOutput struct { + _ struct{} `type:"structure"` + + // A structure containing details about the new policy version. + PolicyVersion *PolicyVersion `type:"structure"` +} + +// String returns the string representation +func (s CreatePolicyVersionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreatePolicyVersionOutput) GoString() string { + return s.String() +} + +type CreateRoleInput struct { + _ struct{} `type:"structure"` + + // The trust relationship policy document that grants an entity permission to + // assume the role. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of any printable ASCII character ranging + // from the space character (\u0020) through end of the ASCII character range + // (\u00FF). It also includes the special characters tab (\u0009), line feed + // (\u000A), and carriage return (\u000D). + AssumeRolePolicyDocument *string `min:"1" type:"string" required:"true"` + + // The path to the role. For more information about paths, see IAM Identifiers + // (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) + // in the IAM User Guide. + // + // This parameter is optional. If it is not included, it defaults to a slash + // (/). + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of either a forward slash (/) by itself + // or a string that must begin and end with forward slashes, containing any + // ASCII character from the ! (\u0021) thru the DEL character (\u007F), including + // most punctuation characters, digits, and upper and lowercased letters. + Path *string `min:"1" type:"string"` + + // The name of the role to create. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- + RoleName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateRoleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateRoleInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateRoleInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateRoleInput"} + if s.AssumeRolePolicyDocument == nil { + invalidParams.Add(request.NewErrParamRequired("AssumeRolePolicyDocument")) + } + if s.AssumeRolePolicyDocument != nil && len(*s.AssumeRolePolicyDocument) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AssumeRolePolicyDocument", 1)) + } + if s.Path != nil && len(*s.Path) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Path", 1)) + } + if s.RoleName == nil { + invalidParams.Add(request.NewErrParamRequired("RoleName")) + } + if s.RoleName != nil && len(*s.RoleName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RoleName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the response to a successful CreateRole request. +type CreateRoleOutput struct { + _ struct{} `type:"structure"` + + // A structure containing details about the new role. + Role *Role `type:"structure" required:"true"` +} + +// String returns the string representation +func (s CreateRoleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateRoleOutput) GoString() string { + return s.String() +} + +type CreateSAMLProviderInput struct { + _ struct{} `type:"structure"` + + // The name of the provider to create. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- + Name *string `min:"1" type:"string" required:"true"` + + // An XML document generated by an identity provider (IdP) that supports SAML + // 2.0. The document includes the issuer's name, expiration information, and + // keys that can be used to validate the SAML authentication response (assertions) + // that are received from the IdP. You must generate the metadata document using + // the identity management software that is used as your organization's IdP. + // + // For more information, see About SAML 2.0-based Federation (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_saml.html) + // in the IAM User Guide + SAMLMetadataDocument *string `min:"1000" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateSAMLProviderInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateSAMLProviderInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateSAMLProviderInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateSAMLProviderInput"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + if s.SAMLMetadataDocument == nil { + invalidParams.Add(request.NewErrParamRequired("SAMLMetadataDocument")) + } + if s.SAMLMetadataDocument != nil && len(*s.SAMLMetadataDocument) < 1000 { + invalidParams.Add(request.NewErrParamMinLen("SAMLMetadataDocument", 1000)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the response to a successful CreateSAMLProvider request. +type CreateSAMLProviderOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the new SAML provider resource in IAM. + SAMLProviderArn *string `min:"20" type:"string"` +} + +// String returns the string representation +func (s CreateSAMLProviderOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateSAMLProviderOutput) GoString() string { + return s.String() +} + +type CreateUserInput struct { + _ struct{} `type:"structure"` + + // The path for the user name. For more information about paths, see IAM Identifiers + // (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) + // in the IAM User Guide. + // + // This parameter is optional. If it is not included, it defaults to a slash + // (/). + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of either a forward slash (/) by itself + // or a string that must begin and end with forward slashes, containing any + // ASCII character from the ! (\u0021) thru the DEL character (\u007F), including + // most punctuation characters, digits, and upper and lowercased letters. + Path *string `min:"1" type:"string"` + + // The name of the user to create. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateUserInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateUserInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateUserInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateUserInput"} + if s.Path != nil && len(*s.Path) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Path", 1)) + } + if s.UserName == nil { + invalidParams.Add(request.NewErrParamRequired("UserName")) + } + if s.UserName != nil && len(*s.UserName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("UserName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the response to a successful CreateUser request. +type CreateUserOutput struct { + _ struct{} `type:"structure"` + + // A structure with details about the new IAM user. + User *User `type:"structure"` +} + +// String returns the string representation +func (s CreateUserOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateUserOutput) GoString() string { + return s.String() +} + +type CreateVirtualMFADeviceInput struct { + _ struct{} `type:"structure"` + + // The path for the virtual MFA device. For more information about paths, see + // IAM Identifiers (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) + // in the IAM User Guide. + // + // This parameter is optional. If it is not included, it defaults to a slash + // (/). + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of either a forward slash (/) by itself + // or a string that must begin and end with forward slashes, containing any + // ASCII character from the ! (\u0021) thru the DEL character (\u007F), including + // most punctuation characters, digits, and upper and lowercased letters. + Path *string `min:"1" type:"string"` + + // The name of the virtual MFA device. Use with path to uniquely identify a + // virtual MFA device. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- + VirtualMFADeviceName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateVirtualMFADeviceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateVirtualMFADeviceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateVirtualMFADeviceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateVirtualMFADeviceInput"} + if s.Path != nil && len(*s.Path) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Path", 1)) + } + if s.VirtualMFADeviceName == nil { + invalidParams.Add(request.NewErrParamRequired("VirtualMFADeviceName")) + } + if s.VirtualMFADeviceName != nil && len(*s.VirtualMFADeviceName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("VirtualMFADeviceName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the response to a successful CreateVirtualMFADevice request. +type CreateVirtualMFADeviceOutput struct { + _ struct{} `type:"structure"` + + // A structure containing details about the new virtual MFA device. + VirtualMFADevice *VirtualMFADevice `type:"structure" required:"true"` +} + +// String returns the string representation +func (s CreateVirtualMFADeviceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateVirtualMFADeviceOutput) GoString() string { + return s.String() +} + +type DeactivateMFADeviceInput struct { + _ struct{} `type:"structure"` + + // The serial number that uniquely identifies the MFA device. For virtual MFA + // devices, the serial number is the device ARN. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =/:,.@- + SerialNumber *string `min:"9" type:"string" required:"true"` + + // The name of the user whose MFA device you want to deactivate. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeactivateMFADeviceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeactivateMFADeviceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeactivateMFADeviceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeactivateMFADeviceInput"} + if s.SerialNumber == nil { + invalidParams.Add(request.NewErrParamRequired("SerialNumber")) + } + if s.SerialNumber != nil && len(*s.SerialNumber) < 9 { + invalidParams.Add(request.NewErrParamMinLen("SerialNumber", 9)) + } + if s.UserName == nil { + invalidParams.Add(request.NewErrParamRequired("UserName")) + } + if s.UserName != nil && len(*s.UserName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("UserName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeactivateMFADeviceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeactivateMFADeviceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeactivateMFADeviceOutput) GoString() string { + return s.String() +} + +type DeleteAccessKeyInput struct { + _ struct{} `type:"structure"` + + // The access key ID for the access key ID and secret access key you want to + // delete. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters that can consist of any upper or lowercased letter + // or digit. + AccessKeyId *string `min:"16" type:"string" required:"true"` + + // The name of the user whose access key pair you want to delete. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- + UserName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s DeleteAccessKeyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteAccessKeyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteAccessKeyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteAccessKeyInput"} + if s.AccessKeyId == nil { + invalidParams.Add(request.NewErrParamRequired("AccessKeyId")) + } + if s.AccessKeyId != nil && len(*s.AccessKeyId) < 16 { + invalidParams.Add(request.NewErrParamMinLen("AccessKeyId", 16)) + } + if s.UserName != nil && len(*s.UserName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("UserName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteAccessKeyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteAccessKeyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteAccessKeyOutput) GoString() string { + return s.String() +} + +type DeleteAccountAliasInput struct { + _ struct{} `type:"structure"` + + // The name of the account alias to delete. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of lowercase letters, digits, and dashes. + // You cannot start or finish with a dash, nor can you have two dashes in a + // row. + AccountAlias *string `min:"3" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteAccountAliasInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteAccountAliasInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteAccountAliasInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteAccountAliasInput"} + if s.AccountAlias == nil { + invalidParams.Add(request.NewErrParamRequired("AccountAlias")) + } + if s.AccountAlias != nil && len(*s.AccountAlias) < 3 { + invalidParams.Add(request.NewErrParamMinLen("AccountAlias", 3)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteAccountAliasOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteAccountAliasOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteAccountAliasOutput) GoString() string { + return s.String() +} + +type DeleteAccountPasswordPolicyInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteAccountPasswordPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteAccountPasswordPolicyInput) GoString() string { + return s.String() +} + +type DeleteAccountPasswordPolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteAccountPasswordPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteAccountPasswordPolicyOutput) GoString() string { + return s.String() +} + +type DeleteGroupInput struct { + _ struct{} `type:"structure"` + + // The name of the IAM group to delete. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- + GroupName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteGroupInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteGroupInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteGroupInput"} + if s.GroupName == nil { + invalidParams.Add(request.NewErrParamRequired("GroupName")) + } + if s.GroupName != nil && len(*s.GroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("GroupName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteGroupOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteGroupOutput) GoString() string { + return s.String() +} + +type DeleteGroupPolicyInput struct { + _ struct{} `type:"structure"` + + // The name (friendly name, not ARN) identifying the group that the policy is + // embedded in. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- + GroupName *string `min:"1" type:"string" required:"true"` + + // The name identifying the policy document to delete. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- + PolicyName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteGroupPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteGroupPolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteGroupPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteGroupPolicyInput"} + if s.GroupName == nil { + invalidParams.Add(request.NewErrParamRequired("GroupName")) + } + if s.GroupName != nil && len(*s.GroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("GroupName", 1)) + } + if s.PolicyName == nil { + invalidParams.Add(request.NewErrParamRequired("PolicyName")) + } + if s.PolicyName != nil && len(*s.PolicyName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("PolicyName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteGroupPolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteGroupPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteGroupPolicyOutput) GoString() string { + return s.String() +} + +type DeleteInstanceProfileInput struct { + _ struct{} `type:"structure"` + + // The name of the instance profile to delete. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- + InstanceProfileName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteInstanceProfileInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteInstanceProfileInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteInstanceProfileInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteInstanceProfileInput"} + if s.InstanceProfileName == nil { + invalidParams.Add(request.NewErrParamRequired("InstanceProfileName")) + } + if s.InstanceProfileName != nil && len(*s.InstanceProfileName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("InstanceProfileName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteInstanceProfileOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteInstanceProfileOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteInstanceProfileOutput) GoString() string { + return s.String() +} + +type DeleteLoginProfileInput struct { + _ struct{} `type:"structure"` + + // The name of the user whose password you want to delete. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteLoginProfileInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteLoginProfileInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteLoginProfileInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteLoginProfileInput"} + if s.UserName == nil { + invalidParams.Add(request.NewErrParamRequired("UserName")) + } + if s.UserName != nil && len(*s.UserName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("UserName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteLoginProfileOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteLoginProfileOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteLoginProfileOutput) GoString() string { + return s.String() +} + +type DeleteOpenIDConnectProviderInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the IAM OpenID Connect provider resource + // object to delete. You can get a list of OpenID Connect provider resource + // ARNs by using the ListOpenIDConnectProviders action. + OpenIDConnectProviderArn *string `min:"20" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteOpenIDConnectProviderInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteOpenIDConnectProviderInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteOpenIDConnectProviderInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteOpenIDConnectProviderInput"} + if s.OpenIDConnectProviderArn == nil { + invalidParams.Add(request.NewErrParamRequired("OpenIDConnectProviderArn")) + } + if s.OpenIDConnectProviderArn != nil && len(*s.OpenIDConnectProviderArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("OpenIDConnectProviderArn", 20)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteOpenIDConnectProviderOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteOpenIDConnectProviderOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteOpenIDConnectProviderOutput) GoString() string { + return s.String() +} + +type DeletePolicyInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the IAM policy you want to delete. + // + // For more information about ARNs, see Amazon Resource Names (ARNs) and AWS + // Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + PolicyArn *string `min:"20" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeletePolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeletePolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeletePolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeletePolicyInput"} + if s.PolicyArn == nil { + invalidParams.Add(request.NewErrParamRequired("PolicyArn")) + } + if s.PolicyArn != nil && len(*s.PolicyArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("PolicyArn", 20)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeletePolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeletePolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeletePolicyOutput) GoString() string { + return s.String() +} + +type DeletePolicyVersionInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the IAM policy from which you want to delete + // a version. + // + // For more information about ARNs, see Amazon Resource Names (ARNs) and AWS + // Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + PolicyArn *string `min:"20" type:"string" required:"true"` + + // The policy version to delete. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters that consists of the lowercase letter 'v' followed + // by one or two digits, and optionally followed by a period '.' and a string + // of letters and digits. + // + // For more information about managed policy versions, see Versioning for Managed + // Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-versions.html) + // in the IAM User Guide. + VersionId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeletePolicyVersionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeletePolicyVersionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeletePolicyVersionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeletePolicyVersionInput"} + if s.PolicyArn == nil { + invalidParams.Add(request.NewErrParamRequired("PolicyArn")) + } + if s.PolicyArn != nil && len(*s.PolicyArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("PolicyArn", 20)) + } + if s.VersionId == nil { + invalidParams.Add(request.NewErrParamRequired("VersionId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeletePolicyVersionOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeletePolicyVersionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeletePolicyVersionOutput) GoString() string { + return s.String() +} + +type DeleteRoleInput struct { + _ struct{} `type:"structure"` + + // The name of the role to delete. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- + RoleName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteRoleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteRoleInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteRoleInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteRoleInput"} + if s.RoleName == nil { + invalidParams.Add(request.NewErrParamRequired("RoleName")) + } + if s.RoleName != nil && len(*s.RoleName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RoleName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteRoleOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteRoleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteRoleOutput) GoString() string { + return s.String() +} + +type DeleteRolePolicyInput struct { + _ struct{} `type:"structure"` + + // The name of the inline policy to delete from the specified IAM role. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- + PolicyName *string `min:"1" type:"string" required:"true"` + + // The name (friendly name, not ARN) identifying the role that the policy is + // embedded in. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- + RoleName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteRolePolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteRolePolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteRolePolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteRolePolicyInput"} + if s.PolicyName == nil { + invalidParams.Add(request.NewErrParamRequired("PolicyName")) + } + if s.PolicyName != nil && len(*s.PolicyName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("PolicyName", 1)) + } + if s.RoleName == nil { + invalidParams.Add(request.NewErrParamRequired("RoleName")) + } + if s.RoleName != nil && len(*s.RoleName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RoleName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteRolePolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteRolePolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteRolePolicyOutput) GoString() string { + return s.String() +} + +type DeleteSAMLProviderInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the SAML provider to delete. + SAMLProviderArn *string `min:"20" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteSAMLProviderInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteSAMLProviderInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteSAMLProviderInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteSAMLProviderInput"} + if s.SAMLProviderArn == nil { + invalidParams.Add(request.NewErrParamRequired("SAMLProviderArn")) + } + if s.SAMLProviderArn != nil && len(*s.SAMLProviderArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("SAMLProviderArn", 20)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteSAMLProviderOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteSAMLProviderOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteSAMLProviderOutput) GoString() string { + return s.String() +} + +type DeleteSSHPublicKeyInput struct { + _ struct{} `type:"structure"` + + // The unique identifier for the SSH public key. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters that can consist of any upper or lowercased letter + // or digit. + SSHPublicKeyId *string `min:"20" type:"string" required:"true"` + + // The name of the IAM user associated with the SSH public key. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteSSHPublicKeyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteSSHPublicKeyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteSSHPublicKeyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteSSHPublicKeyInput"} + if s.SSHPublicKeyId == nil { + invalidParams.Add(request.NewErrParamRequired("SSHPublicKeyId")) + } + if s.SSHPublicKeyId != nil && len(*s.SSHPublicKeyId) < 20 { + invalidParams.Add(request.NewErrParamMinLen("SSHPublicKeyId", 20)) + } + if s.UserName == nil { + invalidParams.Add(request.NewErrParamRequired("UserName")) + } + if s.UserName != nil && len(*s.UserName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("UserName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteSSHPublicKeyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteSSHPublicKeyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteSSHPublicKeyOutput) GoString() string { + return s.String() +} + +type DeleteServerCertificateInput struct { + _ struct{} `type:"structure"` + + // The name of the server certificate you want to delete. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- + ServerCertificateName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteServerCertificateInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteServerCertificateInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteServerCertificateInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteServerCertificateInput"} + if s.ServerCertificateName == nil { + invalidParams.Add(request.NewErrParamRequired("ServerCertificateName")) + } + if s.ServerCertificateName != nil && len(*s.ServerCertificateName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ServerCertificateName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteServerCertificateOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteServerCertificateOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteServerCertificateOutput) GoString() string { + return s.String() +} + +type DeleteSigningCertificateInput struct { + _ struct{} `type:"structure"` + + // The ID of the signing certificate to delete. + // + // The format of this parameter, as described by its regex (http://wikipedia.org/wiki/regex) + // pattern, is a string of characters that can be upper- or lower-cased letters + // or digits. + CertificateId *string `min:"24" type:"string" required:"true"` + + // The name of the user the signing certificate belongs to. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- + UserName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s DeleteSigningCertificateInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteSigningCertificateInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteSigningCertificateInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteSigningCertificateInput"} + if s.CertificateId == nil { + invalidParams.Add(request.NewErrParamRequired("CertificateId")) + } + if s.CertificateId != nil && len(*s.CertificateId) < 24 { + invalidParams.Add(request.NewErrParamMinLen("CertificateId", 24)) + } + if s.UserName != nil && len(*s.UserName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("UserName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteSigningCertificateOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteSigningCertificateOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteSigningCertificateOutput) GoString() string { + return s.String() +} + +type DeleteUserInput struct { + _ struct{} `type:"structure"` + + // The name of the user to delete. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteUserInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteUserInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteUserInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteUserInput"} + if s.UserName == nil { + invalidParams.Add(request.NewErrParamRequired("UserName")) + } + if s.UserName != nil && len(*s.UserName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("UserName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteUserOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteUserOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteUserOutput) GoString() string { + return s.String() +} + +type DeleteUserPolicyInput struct { + _ struct{} `type:"structure"` + + // The name identifying the policy document to delete. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- + PolicyName *string `min:"1" type:"string" required:"true"` + + // The name (friendly name, not ARN) identifying the user that the policy is + // embedded in. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteUserPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteUserPolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteUserPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteUserPolicyInput"} + if s.PolicyName == nil { + invalidParams.Add(request.NewErrParamRequired("PolicyName")) + } + if s.PolicyName != nil && len(*s.PolicyName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("PolicyName", 1)) + } + if s.UserName == nil { + invalidParams.Add(request.NewErrParamRequired("UserName")) + } + if s.UserName != nil && len(*s.UserName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("UserName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteUserPolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteUserPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteUserPolicyOutput) GoString() string { + return s.String() +} + +type DeleteVirtualMFADeviceInput struct { + _ struct{} `type:"structure"` + + // The serial number that uniquely identifies the MFA device. For virtual MFA + // devices, the serial number is the same as the ARN. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =/:,.@- + SerialNumber *string `min:"9" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteVirtualMFADeviceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteVirtualMFADeviceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteVirtualMFADeviceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteVirtualMFADeviceInput"} + if s.SerialNumber == nil { + invalidParams.Add(request.NewErrParamRequired("SerialNumber")) + } + if s.SerialNumber != nil && len(*s.SerialNumber) < 9 { + invalidParams.Add(request.NewErrParamMinLen("SerialNumber", 9)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteVirtualMFADeviceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteVirtualMFADeviceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteVirtualMFADeviceOutput) GoString() string { + return s.String() +} + +type DetachGroupPolicyInput struct { + _ struct{} `type:"structure"` + + // The name (friendly name, not ARN) of the IAM group to detach the policy from. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- + GroupName *string `min:"1" type:"string" required:"true"` + + // The Amazon Resource Name (ARN) of the IAM policy you want to detach. + // + // For more information about ARNs, see Amazon Resource Names (ARNs) and AWS + // Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + PolicyArn *string `min:"20" type:"string" required:"true"` +} + +// String returns the string representation +func (s DetachGroupPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DetachGroupPolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DetachGroupPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DetachGroupPolicyInput"} + if s.GroupName == nil { + invalidParams.Add(request.NewErrParamRequired("GroupName")) + } + if s.GroupName != nil && len(*s.GroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("GroupName", 1)) + } + if s.PolicyArn == nil { + invalidParams.Add(request.NewErrParamRequired("PolicyArn")) + } + if s.PolicyArn != nil && len(*s.PolicyArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("PolicyArn", 20)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DetachGroupPolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DetachGroupPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DetachGroupPolicyOutput) GoString() string { + return s.String() +} + +type DetachRolePolicyInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the IAM policy you want to detach. + // + // For more information about ARNs, see Amazon Resource Names (ARNs) and AWS + // Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + PolicyArn *string `min:"20" type:"string" required:"true"` + + // The name (friendly name, not ARN) of the IAM role to detach the policy from. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- + RoleName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DetachRolePolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DetachRolePolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DetachRolePolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DetachRolePolicyInput"} + if s.PolicyArn == nil { + invalidParams.Add(request.NewErrParamRequired("PolicyArn")) + } + if s.PolicyArn != nil && len(*s.PolicyArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("PolicyArn", 20)) + } + if s.RoleName == nil { + invalidParams.Add(request.NewErrParamRequired("RoleName")) + } + if s.RoleName != nil && len(*s.RoleName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RoleName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DetachRolePolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DetachRolePolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DetachRolePolicyOutput) GoString() string { + return s.String() +} + +type DetachUserPolicyInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the IAM policy you want to detach. + // + // For more information about ARNs, see Amazon Resource Names (ARNs) and AWS + // Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + PolicyArn *string `min:"20" type:"string" required:"true"` + + // The name (friendly name, not ARN) of the IAM user to detach the policy from. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DetachUserPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DetachUserPolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DetachUserPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DetachUserPolicyInput"} + if s.PolicyArn == nil { + invalidParams.Add(request.NewErrParamRequired("PolicyArn")) + } + if s.PolicyArn != nil && len(*s.PolicyArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("PolicyArn", 20)) + } + if s.UserName == nil { + invalidParams.Add(request.NewErrParamRequired("UserName")) + } + if s.UserName != nil && len(*s.UserName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("UserName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DetachUserPolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DetachUserPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DetachUserPolicyOutput) GoString() string { + return s.String() +} + +type EnableMFADeviceInput struct { + _ struct{} `type:"structure"` + + // An authentication code emitted by the device. + // + // The format for this parameter is a string of 6 digits. + AuthenticationCode1 *string `min:"6" type:"string" required:"true"` + + // A subsequent authentication code emitted by the device. + // + // The format for this parameter is a string of 6 digits. + AuthenticationCode2 *string `min:"6" type:"string" required:"true"` + + // The serial number that uniquely identifies the MFA device. For virtual MFA + // devices, the serial number is the device ARN. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =/:,.@- + SerialNumber *string `min:"9" type:"string" required:"true"` + + // The name of the IAM user for whom you want to enable the MFA device. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s EnableMFADeviceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnableMFADeviceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *EnableMFADeviceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "EnableMFADeviceInput"} + if s.AuthenticationCode1 == nil { + invalidParams.Add(request.NewErrParamRequired("AuthenticationCode1")) + } + if s.AuthenticationCode1 != nil && len(*s.AuthenticationCode1) < 6 { + invalidParams.Add(request.NewErrParamMinLen("AuthenticationCode1", 6)) + } + if s.AuthenticationCode2 == nil { + invalidParams.Add(request.NewErrParamRequired("AuthenticationCode2")) + } + if s.AuthenticationCode2 != nil && len(*s.AuthenticationCode2) < 6 { + invalidParams.Add(request.NewErrParamMinLen("AuthenticationCode2", 6)) + } + if s.SerialNumber == nil { + invalidParams.Add(request.NewErrParamRequired("SerialNumber")) + } + if s.SerialNumber != nil && len(*s.SerialNumber) < 9 { + invalidParams.Add(request.NewErrParamMinLen("SerialNumber", 9)) + } + if s.UserName == nil { + invalidParams.Add(request.NewErrParamRequired("UserName")) + } + if s.UserName != nil && len(*s.UserName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("UserName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type EnableMFADeviceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s EnableMFADeviceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnableMFADeviceOutput) GoString() string { + return s.String() +} + +// Contains the results of a simulation. +// +// This data type is used by the return parameter of SimulateCustomPolicy +// and SimulatePrincipalPolicy . +type EvaluationResult struct { + _ struct{} `type:"structure"` + + // The name of the API action tested on the indicated resource. + EvalActionName *string `min:"3" type:"string" required:"true"` + + // The result of the simulation. + EvalDecision *string `type:"string" required:"true" enum:"PolicyEvaluationDecisionType"` + + // Additional details about the results of the evaluation decision. When there + // are both IAM policies and resource policies, this parameter explains how + // each set of policies contributes to the final evaluation decision. When simulating + // cross-account access to a resource, both the resource-based policy and the + // caller's IAM policy must grant access. See How IAM Roles Differ from Resource-based + // Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_compare-resource-policies.html) + EvalDecisionDetails map[string]*string `type:"map"` + + // The ARN of the resource that the indicated API action was tested on. + EvalResourceName *string `min:"1" type:"string"` + + // A list of the statements in the input policies that determine the result + // for this scenario. Remember that even if multiple statements allow the action + // on the resource, if only one statement denies that action, then the explicit + // deny overrides any allow, and the deny statement is the only entry included + // in the result. + MatchedStatements []*Statement `type:"list"` + + // A list of context keys that are required by the included input policies but + // that were not provided by one of the input parameters. This list is used + // when the resource in a simulation is "*", either explicitly, or when the + // ResourceArns parameter blank. If you include a list of resources, then any + // missing context values are instead included under the ResourceSpecificResults + // section. To discover the context keys used by a set of policies, you can + // call GetContextKeysForCustomPolicy or GetContextKeysForPrincipalPolicy. + MissingContextValues []*string `type:"list"` + + // The individual results of the simulation of the API action specified in EvalActionName + // on each resource. + ResourceSpecificResults []*ResourceSpecificResult `type:"list"` +} + +// String returns the string representation +func (s EvaluationResult) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EvaluationResult) GoString() string { + return s.String() +} + +type GenerateCredentialReportInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s GenerateCredentialReportInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GenerateCredentialReportInput) GoString() string { + return s.String() +} + +// Contains the response to a successful GenerateCredentialReport request. +type GenerateCredentialReportOutput struct { + _ struct{} `type:"structure"` + + // Information about the credential report. + Description *string `type:"string"` + + // Information about the state of the credential report. + State *string `type:"string" enum:"ReportStateType"` +} + +// String returns the string representation +func (s GenerateCredentialReportOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GenerateCredentialReportOutput) GoString() string { + return s.String() +} + +type GetAccessKeyLastUsedInput struct { + _ struct{} `type:"structure"` + + // The identifier of an access key. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters that can consist of any upper or lowercased letter + // or digit. + AccessKeyId *string `min:"16" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetAccessKeyLastUsedInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetAccessKeyLastUsedInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetAccessKeyLastUsedInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetAccessKeyLastUsedInput"} + if s.AccessKeyId == nil { + invalidParams.Add(request.NewErrParamRequired("AccessKeyId")) + } + if s.AccessKeyId != nil && len(*s.AccessKeyId) < 16 { + invalidParams.Add(request.NewErrParamMinLen("AccessKeyId", 16)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the response to a successful GetAccessKeyLastUsed request. It is +// also returned as a member of the AccessKeyMetaData structure returned by +// the ListAccessKeys action. +type GetAccessKeyLastUsedOutput struct { + _ struct{} `type:"structure"` + + // Contains information about the last time the access key was used. + AccessKeyLastUsed *AccessKeyLastUsed `type:"structure"` + + // The name of the AWS IAM user that owns this access key. + UserName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s GetAccessKeyLastUsedOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetAccessKeyLastUsedOutput) GoString() string { + return s.String() +} + +type GetAccountAuthorizationDetailsInput struct { + _ struct{} `type:"structure"` + + // A list of entity types used to filter the results. Only the entities that + // match the types you specify are included in the output. Use the value LocalManagedPolicy + // to include customer managed policies. + // + // The format for this parameter is a comma-separated (if more than one) list + // of strings. Each string value in the list must be one of the valid values + // listed below. + Filter []*string `type:"list"` + + // Use this parameter only when paginating results and only after you receive + // a response indicating that the results are truncated. Set it to the value + // of the Marker element in the response that you received to indicate where + // the next call should start. + Marker *string `min:"1" type:"string"` + + // Use this only when paginating results to indicate the maximum number of items + // you want in the response. If additional items exist beyond the maximum you + // specify, the IsTruncated response element is true. + // + // This parameter is optional. If you do not include it, it defaults to 100. + // Note that IAM might return fewer results, even when there are more results + // available. In that case, the IsTruncated response element returns true and + // Marker contains a value to include in the subsequent call that tells the + // service where to continue from. + MaxItems *int64 `min:"1" type:"integer"` +} + +// String returns the string representation +func (s GetAccountAuthorizationDetailsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetAccountAuthorizationDetailsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetAccountAuthorizationDetailsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetAccountAuthorizationDetailsInput"} + if s.Marker != nil && len(*s.Marker) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Marker", 1)) + } + if s.MaxItems != nil && *s.MaxItems < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxItems", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the response to a successful GetAccountAuthorizationDetails request. +type GetAccountAuthorizationDetailsOutput struct { + _ struct{} `type:"structure"` + + // A list containing information about IAM groups. + GroupDetailList []*GroupDetail `type:"list"` + + // A flag that indicates whether there are more items to return. If your results + // were truncated, you can make a subsequent pagination request using the Marker + // request parameter to retrieve more items. Note that IAM might return fewer + // than the MaxItems number of results even when there are more results available. + // We recommend that you check IsTruncated after every call to ensure that you + // receive all of your results. + IsTruncated *bool `type:"boolean"` + + // When IsTruncated is true, this element is present and contains the value + // to use for the Marker parameter in a subsequent pagination request. + Marker *string `min:"1" type:"string"` + + // A list containing information about managed policies. + Policies []*ManagedPolicyDetail `type:"list"` + + // A list containing information about IAM roles. + RoleDetailList []*RoleDetail `type:"list"` + + // A list containing information about IAM users. + UserDetailList []*UserDetail `type:"list"` +} + +// String returns the string representation +func (s GetAccountAuthorizationDetailsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetAccountAuthorizationDetailsOutput) GoString() string { + return s.String() +} + +type GetAccountPasswordPolicyInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s GetAccountPasswordPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetAccountPasswordPolicyInput) GoString() string { + return s.String() +} + +// Contains the response to a successful GetAccountPasswordPolicy request. +type GetAccountPasswordPolicyOutput struct { + _ struct{} `type:"structure"` + + // Contains information about the account password policy. + // + // This data type is used as a response element in the GetAccountPasswordPolicy + // action. + PasswordPolicy *PasswordPolicy `type:"structure" required:"true"` +} + +// String returns the string representation +func (s GetAccountPasswordPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetAccountPasswordPolicyOutput) GoString() string { + return s.String() +} + +type GetAccountSummaryInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s GetAccountSummaryInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetAccountSummaryInput) GoString() string { + return s.String() +} + +// Contains the response to a successful GetAccountSummary request. +type GetAccountSummaryOutput struct { + _ struct{} `type:"structure"` + + // A set of key value pairs containing information about IAM entity usage and + // IAM quotas. + SummaryMap map[string]*int64 `type:"map"` +} + +// String returns the string representation +func (s GetAccountSummaryOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetAccountSummaryOutput) GoString() string { + return s.String() +} + +type GetContextKeysForCustomPolicyInput struct { + _ struct{} `type:"structure"` + + // A list of policies for which you want the list of context keys referenced + // in those policies. Each document is specified as a string containing the + // complete, valid JSON text of an IAM policy. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of any printable ASCII character ranging + // from the space character (\u0020) through end of the ASCII character range + // (\u00FF). It also includes the special characters tab (\u0009), line feed + // (\u000A), and carriage return (\u000D). + PolicyInputList []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s GetContextKeysForCustomPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetContextKeysForCustomPolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetContextKeysForCustomPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetContextKeysForCustomPolicyInput"} + if s.PolicyInputList == nil { + invalidParams.Add(request.NewErrParamRequired("PolicyInputList")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the response to a successful GetContextKeysForPrincipalPolicy or +// GetContextKeysForCustomPolicy request. +type GetContextKeysForPolicyResponse struct { + _ struct{} `type:"structure"` + + // The list of context keys that are referenced in the input policies. + ContextKeyNames []*string `type:"list"` +} + +// String returns the string representation +func (s GetContextKeysForPolicyResponse) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetContextKeysForPolicyResponse) GoString() string { + return s.String() +} + +type GetContextKeysForPrincipalPolicyInput struct { + _ struct{} `type:"structure"` + + // An optional list of additional policies for which you want the list of context + // keys that are referenced. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of any printable ASCII character ranging + // from the space character (\u0020) through end of the ASCII character range + // (\u00FF). It also includes the special characters tab (\u0009), line feed + // (\u000A), and carriage return (\u000D). + PolicyInputList []*string `type:"list"` + + // The ARN of a user, group, or role whose policies contain the context keys + // that you want listed. If you specify a user, the list includes context keys + // that are found in all policies attached to the user as well as to all groups + // that the user is a member of. If you pick a group or a role, then it includes + // only those context keys that are found in policies attached to that entity. + // Note that all parameters are shown in unencoded form here for clarity, but + // must be URL encoded to be included as a part of a real HTML request. + // + // For more information about ARNs, see Amazon Resource Names (ARNs) and AWS + // Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + PolicySourceArn *string `min:"20" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetContextKeysForPrincipalPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetContextKeysForPrincipalPolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetContextKeysForPrincipalPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetContextKeysForPrincipalPolicyInput"} + if s.PolicySourceArn == nil { + invalidParams.Add(request.NewErrParamRequired("PolicySourceArn")) + } + if s.PolicySourceArn != nil && len(*s.PolicySourceArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("PolicySourceArn", 20)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type GetCredentialReportInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s GetCredentialReportInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetCredentialReportInput) GoString() string { + return s.String() +} + +// Contains the response to a successful GetCredentialReport request. +type GetCredentialReportOutput struct { + _ struct{} `type:"structure"` + + // Contains the credential report. The report is Base64-encoded. + // + // Content is automatically base64 encoded/decoded by the SDK. + Content []byte `type:"blob"` + + // The date and time when the credential report was created, in ISO 8601 date-time + // format (http://www.iso.org/iso/iso8601). + GeneratedTime *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The format (MIME type) of the credential report. + ReportFormat *string `type:"string" enum:"ReportFormatType"` +} + +// String returns the string representation +func (s GetCredentialReportOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetCredentialReportOutput) GoString() string { + return s.String() +} + +type GetGroupInput struct { + _ struct{} `type:"structure"` + + // The name of the group. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- + GroupName *string `min:"1" type:"string" required:"true"` + + // Use this parameter only when paginating results and only after you receive + // a response indicating that the results are truncated. Set it to the value + // of the Marker element in the response that you received to indicate where + // the next call should start. + Marker *string `min:"1" type:"string"` + + // Use this only when paginating results to indicate the maximum number of items + // you want in the response. If additional items exist beyond the maximum you + // specify, the IsTruncated response element is true. + // + // This parameter is optional. If you do not include it, it defaults to 100. + // Note that IAM might return fewer results, even when there are more results + // available. In that case, the IsTruncated response element returns true and + // Marker contains a value to include in the subsequent call that tells the + // service where to continue from. + MaxItems *int64 `min:"1" type:"integer"` +} + +// String returns the string representation +func (s GetGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetGroupInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetGroupInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetGroupInput"} + if s.GroupName == nil { + invalidParams.Add(request.NewErrParamRequired("GroupName")) + } + if s.GroupName != nil && len(*s.GroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("GroupName", 1)) + } + if s.Marker != nil && len(*s.Marker) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Marker", 1)) + } + if s.MaxItems != nil && *s.MaxItems < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxItems", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the response to a successful GetGroup request. +type GetGroupOutput struct { + _ struct{} `type:"structure"` + + // A structure that contains details about the group. + Group *Group `type:"structure" required:"true"` + + // A flag that indicates whether there are more items to return. If your results + // were truncated, you can make a subsequent pagination request using the Marker + // request parameter to retrieve more items. Note that IAM might return fewer + // than the MaxItems number of results even when there are more results available. + // We recommend that you check IsTruncated after every call to ensure that you + // receive all of your results. + IsTruncated *bool `type:"boolean"` + + // When IsTruncated is true, this element is present and contains the value + // to use for the Marker parameter in a subsequent pagination request. + Marker *string `min:"1" type:"string"` + + // A list of users in the group. + Users []*User `type:"list" required:"true"` +} + +// String returns the string representation +func (s GetGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetGroupOutput) GoString() string { + return s.String() +} + +type GetGroupPolicyInput struct { + _ struct{} `type:"structure"` + + // The name of the group the policy is associated with. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- + GroupName *string `min:"1" type:"string" required:"true"` + + // The name of the policy document to get. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- + PolicyName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetGroupPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetGroupPolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetGroupPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetGroupPolicyInput"} + if s.GroupName == nil { + invalidParams.Add(request.NewErrParamRequired("GroupName")) + } + if s.GroupName != nil && len(*s.GroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("GroupName", 1)) + } + if s.PolicyName == nil { + invalidParams.Add(request.NewErrParamRequired("PolicyName")) + } + if s.PolicyName != nil && len(*s.PolicyName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("PolicyName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the response to a successful GetGroupPolicy request. +type GetGroupPolicyOutput struct { + _ struct{} `type:"structure"` + + // The group the policy is associated with. + GroupName *string `min:"1" type:"string" required:"true"` + + // The policy document. + PolicyDocument *string `min:"1" type:"string" required:"true"` + + // The name of the policy. + PolicyName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetGroupPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetGroupPolicyOutput) GoString() string { + return s.String() +} + +type GetInstanceProfileInput struct { + _ struct{} `type:"structure"` + + // The name of the instance profile to get information about. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- + InstanceProfileName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetInstanceProfileInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetInstanceProfileInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetInstanceProfileInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetInstanceProfileInput"} + if s.InstanceProfileName == nil { + invalidParams.Add(request.NewErrParamRequired("InstanceProfileName")) + } + if s.InstanceProfileName != nil && len(*s.InstanceProfileName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("InstanceProfileName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the response to a successful GetInstanceProfile request. +type GetInstanceProfileOutput struct { + _ struct{} `type:"structure"` + + // A structure containing details about the instance profile. + InstanceProfile *InstanceProfile `type:"structure" required:"true"` +} + +// String returns the string representation +func (s GetInstanceProfileOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetInstanceProfileOutput) GoString() string { + return s.String() +} + +type GetLoginProfileInput struct { + _ struct{} `type:"structure"` + + // The name of the user whose login profile you want to retrieve. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetLoginProfileInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetLoginProfileInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetLoginProfileInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetLoginProfileInput"} + if s.UserName == nil { + invalidParams.Add(request.NewErrParamRequired("UserName")) + } + if s.UserName != nil && len(*s.UserName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("UserName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the response to a successful GetLoginProfile request. +type GetLoginProfileOutput struct { + _ struct{} `type:"structure"` + + // A structure containing the user name and password create date for the user. + LoginProfile *LoginProfile `type:"structure" required:"true"` +} + +// String returns the string representation +func (s GetLoginProfileOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetLoginProfileOutput) GoString() string { + return s.String() +} + +type GetOpenIDConnectProviderInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the OIDC provider resource object in IAM + // to get information for. You can get a list of OIDC provider resource ARNs + // by using the ListOpenIDConnectProviders action. + // + // For more information about ARNs, see Amazon Resource Names (ARNs) and AWS + // Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + OpenIDConnectProviderArn *string `min:"20" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetOpenIDConnectProviderInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetOpenIDConnectProviderInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetOpenIDConnectProviderInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetOpenIDConnectProviderInput"} + if s.OpenIDConnectProviderArn == nil { + invalidParams.Add(request.NewErrParamRequired("OpenIDConnectProviderArn")) + } + if s.OpenIDConnectProviderArn != nil && len(*s.OpenIDConnectProviderArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("OpenIDConnectProviderArn", 20)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the response to a successful GetOpenIDConnectProvider request. +type GetOpenIDConnectProviderOutput struct { + _ struct{} `type:"structure"` + + // A list of client IDs (also known as audiences) that are associated with the + // specified IAM OIDC provider resource object. For more information, see CreateOpenIDConnectProvider. + ClientIDList []*string `type:"list"` + + // The date and time when the IAM OIDC provider resource object was created + // in the AWS account. + CreateDate *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // A list of certificate thumbprints that are associated with the specified + // IAM OIDC provider resource object. For more information, see CreateOpenIDConnectProvider. + ThumbprintList []*string `type:"list"` + + // The URL that the IAM OIDC provider resource object is associated with. For + // more information, see CreateOpenIDConnectProvider. + Url *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s GetOpenIDConnectProviderOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetOpenIDConnectProviderOutput) GoString() string { + return s.String() +} + +type GetPolicyInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the managed policy that you want information + // about. + // + // For more information about ARNs, see Amazon Resource Names (ARNs) and AWS + // Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + PolicyArn *string `min:"20" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetPolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetPolicyInput"} + if s.PolicyArn == nil { + invalidParams.Add(request.NewErrParamRequired("PolicyArn")) + } + if s.PolicyArn != nil && len(*s.PolicyArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("PolicyArn", 20)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the response to a successful GetPolicy request. +type GetPolicyOutput struct { + _ struct{} `type:"structure"` + + // A structure containing details about the policy. + Policy *Policy `type:"structure"` +} + +// String returns the string representation +func (s GetPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetPolicyOutput) GoString() string { + return s.String() +} + +type GetPolicyVersionInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the managed policy that you want information + // about. + // + // For more information about ARNs, see Amazon Resource Names (ARNs) and AWS + // Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + PolicyArn *string `min:"20" type:"string" required:"true"` + + // Identifies the policy version to retrieve. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters that consists of the lowercase letter 'v' followed + // by one or two digits, and optionally followed by a period '.' and a string + // of letters and digits. + VersionId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s GetPolicyVersionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetPolicyVersionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetPolicyVersionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetPolicyVersionInput"} + if s.PolicyArn == nil { + invalidParams.Add(request.NewErrParamRequired("PolicyArn")) + } + if s.PolicyArn != nil && len(*s.PolicyArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("PolicyArn", 20)) + } + if s.VersionId == nil { + invalidParams.Add(request.NewErrParamRequired("VersionId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the response to a successful GetPolicyVersion request. +type GetPolicyVersionOutput struct { + _ struct{} `type:"structure"` + + // A structure containing details about the policy version. + PolicyVersion *PolicyVersion `type:"structure"` +} + +// String returns the string representation +func (s GetPolicyVersionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetPolicyVersionOutput) GoString() string { + return s.String() +} + +type GetRoleInput struct { + _ struct{} `type:"structure"` + + // The name of the IAM role to get information about. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- + RoleName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetRoleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetRoleInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetRoleInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetRoleInput"} + if s.RoleName == nil { + invalidParams.Add(request.NewErrParamRequired("RoleName")) + } + if s.RoleName != nil && len(*s.RoleName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RoleName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the response to a successful GetRole request. +type GetRoleOutput struct { + _ struct{} `type:"structure"` + + // A structure containing details about the IAM role. + Role *Role `type:"structure" required:"true"` +} + +// String returns the string representation +func (s GetRoleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetRoleOutput) GoString() string { + return s.String() +} + +type GetRolePolicyInput struct { + _ struct{} `type:"structure"` + + // The name of the policy document to get. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- + PolicyName *string `min:"1" type:"string" required:"true"` + + // The name of the role associated with the policy. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- + RoleName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetRolePolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetRolePolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetRolePolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetRolePolicyInput"} + if s.PolicyName == nil { + invalidParams.Add(request.NewErrParamRequired("PolicyName")) + } + if s.PolicyName != nil && len(*s.PolicyName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("PolicyName", 1)) + } + if s.RoleName == nil { + invalidParams.Add(request.NewErrParamRequired("RoleName")) + } + if s.RoleName != nil && len(*s.RoleName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RoleName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the response to a successful GetRolePolicy request. +type GetRolePolicyOutput struct { + _ struct{} `type:"structure"` + + // The policy document. + PolicyDocument *string `min:"1" type:"string" required:"true"` + + // The name of the policy. + PolicyName *string `min:"1" type:"string" required:"true"` + + // The role the policy is associated with. + RoleName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetRolePolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetRolePolicyOutput) GoString() string { + return s.String() +} + +type GetSAMLProviderInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the SAML provider resource object in IAM + // to get information about. + // + // For more information about ARNs, see Amazon Resource Names (ARNs) and AWS + // Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + SAMLProviderArn *string `min:"20" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetSAMLProviderInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetSAMLProviderInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetSAMLProviderInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetSAMLProviderInput"} + if s.SAMLProviderArn == nil { + invalidParams.Add(request.NewErrParamRequired("SAMLProviderArn")) + } + if s.SAMLProviderArn != nil && len(*s.SAMLProviderArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("SAMLProviderArn", 20)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the response to a successful GetSAMLProvider request. +type GetSAMLProviderOutput struct { + _ struct{} `type:"structure"` + + // The date and time when the SAML provider was created. + CreateDate *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The XML metadata document that includes information about an identity provider. + SAMLMetadataDocument *string `min:"1000" type:"string"` + + // The expiration date and time for the SAML provider. + ValidUntil *time.Time `type:"timestamp" timestampFormat:"iso8601"` +} + +// String returns the string representation +func (s GetSAMLProviderOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetSAMLProviderOutput) GoString() string { + return s.String() +} + +type GetSSHPublicKeyInput struct { + _ struct{} `type:"structure"` + + // Specifies the public key encoding format to use in the response. To retrieve + // the public key in ssh-rsa format, use SSH. To retrieve the public key in + // PEM format, use PEM. + Encoding *string `type:"string" required:"true" enum:"encodingType"` + + // The unique identifier for the SSH public key. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters that can consist of any upper or lowercased letter + // or digit. + SSHPublicKeyId *string `min:"20" type:"string" required:"true"` + + // The name of the IAM user associated with the SSH public key. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetSSHPublicKeyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetSSHPublicKeyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetSSHPublicKeyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetSSHPublicKeyInput"} + if s.Encoding == nil { + invalidParams.Add(request.NewErrParamRequired("Encoding")) + } + if s.SSHPublicKeyId == nil { + invalidParams.Add(request.NewErrParamRequired("SSHPublicKeyId")) + } + if s.SSHPublicKeyId != nil && len(*s.SSHPublicKeyId) < 20 { + invalidParams.Add(request.NewErrParamMinLen("SSHPublicKeyId", 20)) + } + if s.UserName == nil { + invalidParams.Add(request.NewErrParamRequired("UserName")) + } + if s.UserName != nil && len(*s.UserName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("UserName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the response to a successful GetSSHPublicKey request. +type GetSSHPublicKeyOutput struct { + _ struct{} `type:"structure"` + + // A structure containing details about the SSH public key. + SSHPublicKey *SSHPublicKey `type:"structure"` +} + +// String returns the string representation +func (s GetSSHPublicKeyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetSSHPublicKeyOutput) GoString() string { + return s.String() +} + +type GetServerCertificateInput struct { + _ struct{} `type:"structure"` + + // The name of the server certificate you want to retrieve information about. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- + ServerCertificateName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetServerCertificateInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetServerCertificateInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetServerCertificateInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetServerCertificateInput"} + if s.ServerCertificateName == nil { + invalidParams.Add(request.NewErrParamRequired("ServerCertificateName")) + } + if s.ServerCertificateName != nil && len(*s.ServerCertificateName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ServerCertificateName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the response to a successful GetServerCertificate request. +type GetServerCertificateOutput struct { + _ struct{} `type:"structure"` + + // A structure containing details about the server certificate. + ServerCertificate *ServerCertificate `type:"structure" required:"true"` +} + +// String returns the string representation +func (s GetServerCertificateOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetServerCertificateOutput) GoString() string { + return s.String() +} + +type GetUserInput struct { + _ struct{} `type:"structure"` + + // The name of the user to get information about. + // + // This parameter is optional. If it is not included, it defaults to the user + // making the request. The regex pattern (http://wikipedia.org/wiki/regex) for + // this parameter is a string of characters consisting of upper and lowercase + // alphanumeric characters with no spaces. You can also include any of the following + // characters: =,.@- + UserName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s GetUserInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetUserInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetUserInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetUserInput"} + if s.UserName != nil && len(*s.UserName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("UserName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the response to a successful GetUser request. +type GetUserOutput struct { + _ struct{} `type:"structure"` + + // A structure containing details about the IAM user. + User *User `type:"structure" required:"true"` +} + +// String returns the string representation +func (s GetUserOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetUserOutput) GoString() string { + return s.String() +} + +type GetUserPolicyInput struct { + _ struct{} `type:"structure"` + + // The name of the policy document to get. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- + PolicyName *string `min:"1" type:"string" required:"true"` + + // The name of the user who the policy is associated with. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetUserPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetUserPolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetUserPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetUserPolicyInput"} + if s.PolicyName == nil { + invalidParams.Add(request.NewErrParamRequired("PolicyName")) + } + if s.PolicyName != nil && len(*s.PolicyName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("PolicyName", 1)) + } + if s.UserName == nil { + invalidParams.Add(request.NewErrParamRequired("UserName")) + } + if s.UserName != nil && len(*s.UserName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("UserName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the response to a successful GetUserPolicy request. +type GetUserPolicyOutput struct { + _ struct{} `type:"structure"` + + // The policy document. + PolicyDocument *string `min:"1" type:"string" required:"true"` + + // The name of the policy. + PolicyName *string `min:"1" type:"string" required:"true"` + + // The user the policy is associated with. + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetUserPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetUserPolicyOutput) GoString() string { + return s.String() +} + +// Contains information about an IAM group entity. +// +// This data type is used as a response element in the following actions: +// +// CreateGroup +// +// GetGroup +// +// ListGroups +type Group struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) specifying the group. For more information + // about ARNs and how to use them in policies, see IAM Identifiers (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) + // in the Using IAM guide. + Arn *string `min:"20" type:"string" required:"true"` + + // The date and time, in ISO 8601 date-time format (http://www.iso.org/iso/iso8601), + // when the group was created. + CreateDate *time.Time `type:"timestamp" timestampFormat:"iso8601" required:"true"` + + // The stable and unique string identifying the group. For more information + // about IDs, see IAM Identifiers (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) + // in the Using IAM guide. + GroupId *string `min:"16" type:"string" required:"true"` + + // The friendly name that identifies the group. + GroupName *string `min:"1" type:"string" required:"true"` + + // The path to the group. For more information about paths, see IAM Identifiers + // (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) + // in the Using IAM guide. + Path *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s Group) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Group) GoString() string { + return s.String() +} + +// Contains information about an IAM group, including all of the group's policies. +// +// This data type is used as a response element in the GetAccountAuthorizationDetails +// action. +type GroupDetail struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN). ARNs are unique identifiers for AWS resources. + // + // For more information about ARNs, go to Amazon Resource Names (ARNs) and + // AWS Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + Arn *string `min:"20" type:"string"` + + // A list of the managed policies attached to the group. + AttachedManagedPolicies []*AttachedPolicy `type:"list"` + + // The date and time, in ISO 8601 date-time format (http://www.iso.org/iso/iso8601), + // when the group was created. + CreateDate *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The stable and unique string identifying the group. For more information + // about IDs, see IAM Identifiers (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) + // in the Using IAM guide. + GroupId *string `min:"16" type:"string"` + + // The friendly name that identifies the group. + GroupName *string `min:"1" type:"string"` + + // A list of the inline policies embedded in the group. + GroupPolicyList []*PolicyDetail `type:"list"` + + // The path to the group. For more information about paths, see IAM Identifiers + // (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) + // in the Using IAM guide. + Path *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s GroupDetail) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GroupDetail) GoString() string { + return s.String() +} + +// Contains information about an instance profile. +// +// This data type is used as a response element in the following actions: +// +// CreateInstanceProfile +// +// GetInstanceProfile +// +// ListInstanceProfiles +// +// ListInstanceProfilesForRole +type InstanceProfile struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) specifying the instance profile. For more + // information about ARNs and how to use them in policies, see IAM Identifiers + // (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) + // in the Using IAM guide. + Arn *string `min:"20" type:"string" required:"true"` + + // The date when the instance profile was created. + CreateDate *time.Time `type:"timestamp" timestampFormat:"iso8601" required:"true"` + + // The stable and unique string identifying the instance profile. For more information + // about IDs, see IAM Identifiers (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) + // in the Using IAM guide. + InstanceProfileId *string `min:"16" type:"string" required:"true"` + + // The name identifying the instance profile. + InstanceProfileName *string `min:"1" type:"string" required:"true"` + + // The path to the instance profile. For more information about paths, see IAM + // Identifiers (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) + // in the Using IAM guide. + Path *string `min:"1" type:"string" required:"true"` + + // The role associated with the instance profile. + Roles []*Role `type:"list" required:"true"` +} + +// String returns the string representation +func (s InstanceProfile) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstanceProfile) GoString() string { + return s.String() +} + +type ListAccessKeysInput struct { + _ struct{} `type:"structure"` + + // Use this parameter only when paginating results and only after you receive + // a response indicating that the results are truncated. Set it to the value + // of the Marker element in the response that you received to indicate where + // the next call should start. + Marker *string `min:"1" type:"string"` + + // Use this only when paginating results to indicate the maximum number of items + // you want in the response. If additional items exist beyond the maximum you + // specify, the IsTruncated response element is true. + // + // This parameter is optional. If you do not include it, it defaults to 100. + // Note that IAM might return fewer results, even when there are more results + // available. In that case, the IsTruncated response element returns true and + // Marker contains a value to include in the subsequent call that tells the + // service where to continue from. + MaxItems *int64 `min:"1" type:"integer"` + + // The name of the user. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- + UserName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListAccessKeysInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListAccessKeysInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListAccessKeysInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListAccessKeysInput"} + if s.Marker != nil && len(*s.Marker) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Marker", 1)) + } + if s.MaxItems != nil && *s.MaxItems < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxItems", 1)) + } + if s.UserName != nil && len(*s.UserName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("UserName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the response to a successful ListAccessKeys request. +type ListAccessKeysOutput struct { + _ struct{} `type:"structure"` + + // A list of objects containing metadata about the access keys. + AccessKeyMetadata []*AccessKeyMetadata `type:"list" required:"true"` + + // A flag that indicates whether there are more items to return. If your results + // were truncated, you can make a subsequent pagination request using the Marker + // request parameter to retrieve more items. Note that IAM might return fewer + // than the MaxItems number of results even when there are more results available. + // We recommend that you check IsTruncated after every call to ensure that you + // receive all of your results. + IsTruncated *bool `type:"boolean"` + + // When IsTruncated is true, this element is present and contains the value + // to use for the Marker parameter in a subsequent pagination request. + Marker *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListAccessKeysOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListAccessKeysOutput) GoString() string { + return s.String() +} + +type ListAccountAliasesInput struct { + _ struct{} `type:"structure"` + + // Use this parameter only when paginating results and only after you receive + // a response indicating that the results are truncated. Set it to the value + // of the Marker element in the response that you received to indicate where + // the next call should start. + Marker *string `min:"1" type:"string"` + + // Use this only when paginating results to indicate the maximum number of items + // you want in the response. If additional items exist beyond the maximum you + // specify, the IsTruncated response element is true. + // + // This parameter is optional. If you do not include it, it defaults to 100. + // Note that IAM might return fewer results, even when there are more results + // available. In that case, the IsTruncated response element returns true and + // Marker contains a value to include in the subsequent call that tells the + // service where to continue from. + MaxItems *int64 `min:"1" type:"integer"` +} + +// String returns the string representation +func (s ListAccountAliasesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListAccountAliasesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListAccountAliasesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListAccountAliasesInput"} + if s.Marker != nil && len(*s.Marker) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Marker", 1)) + } + if s.MaxItems != nil && *s.MaxItems < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxItems", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the response to a successful ListAccountAliases request. +type ListAccountAliasesOutput struct { + _ struct{} `type:"structure"` + + // A list of aliases associated with the account. AWS supports only one alias + // per account. + AccountAliases []*string `type:"list" required:"true"` + + // A flag that indicates whether there are more items to return. If your results + // were truncated, you can make a subsequent pagination request using the Marker + // request parameter to retrieve more items. Note that IAM might return fewer + // than the MaxItems number of results even when there are more results available. + // We recommend that you check IsTruncated after every call to ensure that you + // receive all of your results. + IsTruncated *bool `type:"boolean"` + + // When IsTruncated is true, this element is present and contains the value + // to use for the Marker parameter in a subsequent pagination request. + Marker *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListAccountAliasesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListAccountAliasesOutput) GoString() string { + return s.String() +} + +type ListAttachedGroupPoliciesInput struct { + _ struct{} `type:"structure"` + + // The name (friendly name, not ARN) of the group to list attached policies + // for. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- + GroupName *string `min:"1" type:"string" required:"true"` + + // Use this parameter only when paginating results and only after you receive + // a response indicating that the results are truncated. Set it to the value + // of the Marker element in the response that you received to indicate where + // the next call should start. + Marker *string `min:"1" type:"string"` + + // Use this only when paginating results to indicate the maximum number of items + // you want in the response. If additional items exist beyond the maximum you + // specify, the IsTruncated response element is true. + // + // This parameter is optional. If you do not include it, it defaults to 100. + // Note that IAM might return fewer results, even when there are more results + // available. In that case, the IsTruncated response element returns true and + // Marker contains a value to include in the subsequent call that tells the + // service where to continue from. + MaxItems *int64 `min:"1" type:"integer"` + + // The path prefix for filtering the results. This parameter is optional. If + // it is not included, it defaults to a slash (/), listing all policies. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of either a forward slash (/) by itself + // or a string that must begin and end with forward slashes, containing any + // ASCII character from the ! (\u0021) thru the DEL character (\u007F), including + // most punctuation characters, digits, and upper and lowercased letters. + PathPrefix *string `type:"string"` +} + +// String returns the string representation +func (s ListAttachedGroupPoliciesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListAttachedGroupPoliciesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListAttachedGroupPoliciesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListAttachedGroupPoliciesInput"} + if s.GroupName == nil { + invalidParams.Add(request.NewErrParamRequired("GroupName")) + } + if s.GroupName != nil && len(*s.GroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("GroupName", 1)) + } + if s.Marker != nil && len(*s.Marker) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Marker", 1)) + } + if s.MaxItems != nil && *s.MaxItems < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxItems", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the response to a successful ListAttachedGroupPolicies request. +type ListAttachedGroupPoliciesOutput struct { + _ struct{} `type:"structure"` + + // A list of the attached policies. + AttachedPolicies []*AttachedPolicy `type:"list"` + + // A flag that indicates whether there are more items to return. If your results + // were truncated, you can make a subsequent pagination request using the Marker + // request parameter to retrieve more items. Note that IAM might return fewer + // than the MaxItems number of results even when there are more results available. + // We recommend that you check IsTruncated after every call to ensure that you + // receive all of your results. + IsTruncated *bool `type:"boolean"` + + // When IsTruncated is true, this element is present and contains the value + // to use for the Marker parameter in a subsequent pagination request. + Marker *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListAttachedGroupPoliciesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListAttachedGroupPoliciesOutput) GoString() string { + return s.String() +} + +type ListAttachedRolePoliciesInput struct { + _ struct{} `type:"structure"` + + // Use this parameter only when paginating results and only after you receive + // a response indicating that the results are truncated. Set it to the value + // of the Marker element in the response that you received to indicate where + // the next call should start. + Marker *string `min:"1" type:"string"` + + // Use this only when paginating results to indicate the maximum number of items + // you want in the response. If additional items exist beyond the maximum you + // specify, the IsTruncated response element is true. + // + // This parameter is optional. If you do not include it, it defaults to 100. + // Note that IAM might return fewer results, even when there are more results + // available. In that case, the IsTruncated response element returns true and + // Marker contains a value to include in the subsequent call that tells the + // service where to continue from. + MaxItems *int64 `min:"1" type:"integer"` + + // The path prefix for filtering the results. This parameter is optional. If + // it is not included, it defaults to a slash (/), listing all policies. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of either a forward slash (/) by itself + // or a string that must begin and end with forward slashes, containing any + // ASCII character from the ! (\u0021) thru the DEL character (\u007F), including + // most punctuation characters, digits, and upper and lowercased letters. + PathPrefix *string `type:"string"` + + // The name (friendly name, not ARN) of the role to list attached policies for. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- + RoleName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListAttachedRolePoliciesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListAttachedRolePoliciesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListAttachedRolePoliciesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListAttachedRolePoliciesInput"} + if s.Marker != nil && len(*s.Marker) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Marker", 1)) + } + if s.MaxItems != nil && *s.MaxItems < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxItems", 1)) + } + if s.RoleName == nil { + invalidParams.Add(request.NewErrParamRequired("RoleName")) + } + if s.RoleName != nil && len(*s.RoleName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RoleName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the response to a successful ListAttachedRolePolicies request. +type ListAttachedRolePoliciesOutput struct { + _ struct{} `type:"structure"` + + // A list of the attached policies. + AttachedPolicies []*AttachedPolicy `type:"list"` + + // A flag that indicates whether there are more items to return. If your results + // were truncated, you can make a subsequent pagination request using the Marker + // request parameter to retrieve more items. Note that IAM might return fewer + // than the MaxItems number of results even when there are more results available. + // We recommend that you check IsTruncated after every call to ensure that you + // receive all of your results. + IsTruncated *bool `type:"boolean"` + + // When IsTruncated is true, this element is present and contains the value + // to use for the Marker parameter in a subsequent pagination request. + Marker *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListAttachedRolePoliciesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListAttachedRolePoliciesOutput) GoString() string { + return s.String() +} + +type ListAttachedUserPoliciesInput struct { + _ struct{} `type:"structure"` + + // Use this parameter only when paginating results and only after you receive + // a response indicating that the results are truncated. Set it to the value + // of the Marker element in the response that you received to indicate where + // the next call should start. + Marker *string `min:"1" type:"string"` + + // Use this only when paginating results to indicate the maximum number of items + // you want in the response. If additional items exist beyond the maximum you + // specify, the IsTruncated response element is true. + // + // This parameter is optional. If you do not include it, it defaults to 100. + // Note that IAM might return fewer results, even when there are more results + // available. In that case, the IsTruncated response element returns true and + // Marker contains a value to include in the subsequent call that tells the + // service where to continue from. + MaxItems *int64 `min:"1" type:"integer"` + + // The path prefix for filtering the results. This parameter is optional. If + // it is not included, it defaults to a slash (/), listing all policies. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of either a forward slash (/) by itself + // or a string that must begin and end with forward slashes, containing any + // ASCII character from the ! (\u0021) thru the DEL character (\u007F), including + // most punctuation characters, digits, and upper and lowercased letters. + PathPrefix *string `type:"string"` + + // The name (friendly name, not ARN) of the user to list attached policies for. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListAttachedUserPoliciesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListAttachedUserPoliciesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListAttachedUserPoliciesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListAttachedUserPoliciesInput"} + if s.Marker != nil && len(*s.Marker) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Marker", 1)) + } + if s.MaxItems != nil && *s.MaxItems < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxItems", 1)) + } + if s.UserName == nil { + invalidParams.Add(request.NewErrParamRequired("UserName")) + } + if s.UserName != nil && len(*s.UserName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("UserName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the response to a successful ListAttachedUserPolicies request. +type ListAttachedUserPoliciesOutput struct { + _ struct{} `type:"structure"` + + // A list of the attached policies. + AttachedPolicies []*AttachedPolicy `type:"list"` + + // A flag that indicates whether there are more items to return. If your results + // were truncated, you can make a subsequent pagination request using the Marker + // request parameter to retrieve more items. Note that IAM might return fewer + // than the MaxItems number of results even when there are more results available. + // We recommend that you check IsTruncated after every call to ensure that you + // receive all of your results. + IsTruncated *bool `type:"boolean"` + + // When IsTruncated is true, this element is present and contains the value + // to use for the Marker parameter in a subsequent pagination request. + Marker *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListAttachedUserPoliciesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListAttachedUserPoliciesOutput) GoString() string { + return s.String() +} + +type ListEntitiesForPolicyInput struct { + _ struct{} `type:"structure"` + + // The entity type to use for filtering the results. + // + // For example, when EntityFilter is Role, only the roles that are attached + // to the specified policy are returned. This parameter is optional. If it is + // not included, all attached entities (users, groups, and roles) are returned. + // The argument for this parameter must be one of the valid values listed below. + EntityFilter *string `type:"string" enum:"EntityType"` + + // Use this parameter only when paginating results and only after you receive + // a response indicating that the results are truncated. Set it to the value + // of the Marker element in the response that you received to indicate where + // the next call should start. + Marker *string `min:"1" type:"string"` + + // Use this only when paginating results to indicate the maximum number of items + // you want in the response. If additional items exist beyond the maximum you + // specify, the IsTruncated response element is true. + // + // This parameter is optional. If you do not include it, it defaults to 100. + // Note that IAM might return fewer results, even when there are more results + // available. In that case, the IsTruncated response element returns true and + // Marker contains a value to include in the subsequent call that tells the + // service where to continue from. + MaxItems *int64 `min:"1" type:"integer"` + + // The path prefix for filtering the results. This parameter is optional. If + // it is not included, it defaults to a slash (/), listing all entities. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of either a forward slash (/) by itself + // or a string that must begin and end with forward slashes, containing any + // ASCII character from the ! (\u0021) thru the DEL character (\u007F), including + // most punctuation characters, digits, and upper and lowercased letters. + PathPrefix *string `min:"1" type:"string"` + + // The Amazon Resource Name (ARN) of the IAM policy for which you want the versions. + // + // For more information about ARNs, see Amazon Resource Names (ARNs) and AWS + // Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + PolicyArn *string `min:"20" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListEntitiesForPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListEntitiesForPolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListEntitiesForPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListEntitiesForPolicyInput"} + if s.Marker != nil && len(*s.Marker) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Marker", 1)) + } + if s.MaxItems != nil && *s.MaxItems < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxItems", 1)) + } + if s.PathPrefix != nil && len(*s.PathPrefix) < 1 { + invalidParams.Add(request.NewErrParamMinLen("PathPrefix", 1)) + } + if s.PolicyArn == nil { + invalidParams.Add(request.NewErrParamRequired("PolicyArn")) + } + if s.PolicyArn != nil && len(*s.PolicyArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("PolicyArn", 20)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the response to a successful ListEntitiesForPolicy request. +type ListEntitiesForPolicyOutput struct { + _ struct{} `type:"structure"` + + // A flag that indicates whether there are more items to return. If your results + // were truncated, you can make a subsequent pagination request using the Marker + // request parameter to retrieve more items. Note that IAM might return fewer + // than the MaxItems number of results even when there are more results available. + // We recommend that you check IsTruncated after every call to ensure that you + // receive all of your results. + IsTruncated *bool `type:"boolean"` + + // When IsTruncated is true, this element is present and contains the value + // to use for the Marker parameter in a subsequent pagination request. + Marker *string `min:"1" type:"string"` + + // A list of IAM groups that the policy is attached to. + PolicyGroups []*PolicyGroup `type:"list"` + + // A list of IAM roles that the policy is attached to. + PolicyRoles []*PolicyRole `type:"list"` + + // A list of IAM users that the policy is attached to. + PolicyUsers []*PolicyUser `type:"list"` +} + +// String returns the string representation +func (s ListEntitiesForPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListEntitiesForPolicyOutput) GoString() string { + return s.String() +} + +type ListGroupPoliciesInput struct { + _ struct{} `type:"structure"` + + // The name of the group to list policies for. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- + GroupName *string `min:"1" type:"string" required:"true"` + + // Use this parameter only when paginating results and only after you receive + // a response indicating that the results are truncated. Set it to the value + // of the Marker element in the response that you received to indicate where + // the next call should start. + Marker *string `min:"1" type:"string"` + + // Use this only when paginating results to indicate the maximum number of items + // you want in the response. If additional items exist beyond the maximum you + // specify, the IsTruncated response element is true. + // + // This parameter is optional. If you do not include it, it defaults to 100. + // Note that IAM might return fewer results, even when there are more results + // available. In that case, the IsTruncated response element returns true and + // Marker contains a value to include in the subsequent call that tells the + // service where to continue from. + MaxItems *int64 `min:"1" type:"integer"` +} + +// String returns the string representation +func (s ListGroupPoliciesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListGroupPoliciesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListGroupPoliciesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListGroupPoliciesInput"} + if s.GroupName == nil { + invalidParams.Add(request.NewErrParamRequired("GroupName")) + } + if s.GroupName != nil && len(*s.GroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("GroupName", 1)) + } + if s.Marker != nil && len(*s.Marker) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Marker", 1)) + } + if s.MaxItems != nil && *s.MaxItems < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxItems", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the response to a successful ListGroupPolicies request. +type ListGroupPoliciesOutput struct { + _ struct{} `type:"structure"` + + // A flag that indicates whether there are more items to return. If your results + // were truncated, you can make a subsequent pagination request using the Marker + // request parameter to retrieve more items. Note that IAM might return fewer + // than the MaxItems number of results even when there are more results available. + // We recommend that you check IsTruncated after every call to ensure that you + // receive all of your results. + IsTruncated *bool `type:"boolean"` + + // When IsTruncated is true, this element is present and contains the value + // to use for the Marker parameter in a subsequent pagination request. + Marker *string `min:"1" type:"string"` + + // A list of policy names. + PolicyNames []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s ListGroupPoliciesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListGroupPoliciesOutput) GoString() string { + return s.String() +} + +type ListGroupsForUserInput struct { + _ struct{} `type:"structure"` + + // Use this parameter only when paginating results and only after you receive + // a response indicating that the results are truncated. Set it to the value + // of the Marker element in the response that you received to indicate where + // the next call should start. + Marker *string `min:"1" type:"string"` + + // Use this only when paginating results to indicate the maximum number of items + // you want in the response. If additional items exist beyond the maximum you + // specify, the IsTruncated response element is true. + // + // This parameter is optional. If you do not include it, it defaults to 100. + // Note that IAM might return fewer results, even when there are more results + // available. In that case, the IsTruncated response element returns true and + // Marker contains a value to include in the subsequent call that tells the + // service where to continue from. + MaxItems *int64 `min:"1" type:"integer"` + + // The name of the user to list groups for. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListGroupsForUserInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListGroupsForUserInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListGroupsForUserInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListGroupsForUserInput"} + if s.Marker != nil && len(*s.Marker) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Marker", 1)) + } + if s.MaxItems != nil && *s.MaxItems < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxItems", 1)) + } + if s.UserName == nil { + invalidParams.Add(request.NewErrParamRequired("UserName")) + } + if s.UserName != nil && len(*s.UserName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("UserName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the response to a successful ListGroupsForUser request. +type ListGroupsForUserOutput struct { + _ struct{} `type:"structure"` + + // A list of groups. + Groups []*Group `type:"list" required:"true"` + + // A flag that indicates whether there are more items to return. If your results + // were truncated, you can make a subsequent pagination request using the Marker + // request parameter to retrieve more items. Note that IAM might return fewer + // than the MaxItems number of results even when there are more results available. + // We recommend that you check IsTruncated after every call to ensure that you + // receive all of your results. + IsTruncated *bool `type:"boolean"` + + // When IsTruncated is true, this element is present and contains the value + // to use for the Marker parameter in a subsequent pagination request. + Marker *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListGroupsForUserOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListGroupsForUserOutput) GoString() string { + return s.String() +} + +type ListGroupsInput struct { + _ struct{} `type:"structure"` + + // Use this parameter only when paginating results and only after you receive + // a response indicating that the results are truncated. Set it to the value + // of the Marker element in the response that you received to indicate where + // the next call should start. + Marker *string `min:"1" type:"string"` + + // Use this only when paginating results to indicate the maximum number of items + // you want in the response. If additional items exist beyond the maximum you + // specify, the IsTruncated response element is true. + // + // This parameter is optional. If you do not include it, it defaults to 100. + // Note that IAM might return fewer results, even when there are more results + // available. In that case, the IsTruncated response element returns true and + // Marker contains a value to include in the subsequent call that tells the + // service where to continue from. + MaxItems *int64 `min:"1" type:"integer"` + + // The path prefix for filtering the results. For example, the prefix /division_abc/subdivision_xyz/ + // gets all groups whose path starts with /division_abc/subdivision_xyz/. + // + // This parameter is optional. If it is not included, it defaults to a slash + // (/), listing all groups. The regex pattern (http://wikipedia.org/wiki/regex) + // for this parameter is a string of characters consisting of either a forward + // slash (/) by itself or a string that must begin and end with forward slashes, + // containing any ASCII character from the ! (\u0021) thru the DEL character + // (\u007F), including most punctuation characters, digits, and upper and lowercased + // letters. + PathPrefix *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListGroupsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListGroupsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListGroupsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListGroupsInput"} + if s.Marker != nil && len(*s.Marker) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Marker", 1)) + } + if s.MaxItems != nil && *s.MaxItems < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxItems", 1)) + } + if s.PathPrefix != nil && len(*s.PathPrefix) < 1 { + invalidParams.Add(request.NewErrParamMinLen("PathPrefix", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the response to a successful ListGroups request. +type ListGroupsOutput struct { + _ struct{} `type:"structure"` + + // A list of groups. + Groups []*Group `type:"list" required:"true"` + + // A flag that indicates whether there are more items to return. If your results + // were truncated, you can make a subsequent pagination request using the Marker + // request parameter to retrieve more items. Note that IAM might return fewer + // than the MaxItems number of results even when there are more results available. + // We recommend that you check IsTruncated after every call to ensure that you + // receive all of your results. + IsTruncated *bool `type:"boolean"` + + // When IsTruncated is true, this element is present and contains the value + // to use for the Marker parameter in a subsequent pagination request. + Marker *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListGroupsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListGroupsOutput) GoString() string { + return s.String() +} + +type ListInstanceProfilesForRoleInput struct { + _ struct{} `type:"structure"` + + // Use this parameter only when paginating results and only after you receive + // a response indicating that the results are truncated. Set it to the value + // of the Marker element in the response that you received to indicate where + // the next call should start. + Marker *string `min:"1" type:"string"` + + // Use this only when paginating results to indicate the maximum number of items + // you want in the response. If additional items exist beyond the maximum you + // specify, the IsTruncated response element is true. + // + // This parameter is optional. If you do not include it, it defaults to 100. + // Note that IAM might return fewer results, even when there are more results + // available. In that case, the IsTruncated response element returns true and + // Marker contains a value to include in the subsequent call that tells the + // service where to continue from. + MaxItems *int64 `min:"1" type:"integer"` + + // The name of the role to list instance profiles for. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- + RoleName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListInstanceProfilesForRoleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListInstanceProfilesForRoleInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListInstanceProfilesForRoleInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListInstanceProfilesForRoleInput"} + if s.Marker != nil && len(*s.Marker) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Marker", 1)) + } + if s.MaxItems != nil && *s.MaxItems < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxItems", 1)) + } + if s.RoleName == nil { + invalidParams.Add(request.NewErrParamRequired("RoleName")) + } + if s.RoleName != nil && len(*s.RoleName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RoleName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the response to a successful ListInstanceProfilesForRole request. +type ListInstanceProfilesForRoleOutput struct { + _ struct{} `type:"structure"` + + // A list of instance profiles. + InstanceProfiles []*InstanceProfile `type:"list" required:"true"` + + // A flag that indicates whether there are more items to return. If your results + // were truncated, you can make a subsequent pagination request using the Marker + // request parameter to retrieve more items. Note that IAM might return fewer + // than the MaxItems number of results even when there are more results available. + // We recommend that you check IsTruncated after every call to ensure that you + // receive all of your results. + IsTruncated *bool `type:"boolean"` + + // When IsTruncated is true, this element is present and contains the value + // to use for the Marker parameter in a subsequent pagination request. + Marker *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListInstanceProfilesForRoleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListInstanceProfilesForRoleOutput) GoString() string { + return s.String() +} + +type ListInstanceProfilesInput struct { + _ struct{} `type:"structure"` + + // Use this parameter only when paginating results and only after you receive + // a response indicating that the results are truncated. Set it to the value + // of the Marker element in the response that you received to indicate where + // the next call should start. + Marker *string `min:"1" type:"string"` + + // Use this only when paginating results to indicate the maximum number of items + // you want in the response. If additional items exist beyond the maximum you + // specify, the IsTruncated response element is true. + // + // This parameter is optional. If you do not include it, it defaults to 100. + // Note that IAM might return fewer results, even when there are more results + // available. In that case, the IsTruncated response element returns true and + // Marker contains a value to include in the subsequent call that tells the + // service where to continue from. + MaxItems *int64 `min:"1" type:"integer"` + + // The path prefix for filtering the results. For example, the prefix /application_abc/component_xyz/ + // gets all instance profiles whose path starts with /application_abc/component_xyz/. + // + // This parameter is optional. If it is not included, it defaults to a slash + // (/), listing all instance profiles. The regex pattern (http://wikipedia.org/wiki/regex) + // for this parameter is a string of characters consisting of either a forward + // slash (/) by itself or a string that must begin and end with forward slashes, + // containing any ASCII character from the ! (\u0021) thru the DEL character + // (\u007F), including most punctuation characters, digits, and upper and lowercased + // letters. + PathPrefix *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListInstanceProfilesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListInstanceProfilesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListInstanceProfilesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListInstanceProfilesInput"} + if s.Marker != nil && len(*s.Marker) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Marker", 1)) + } + if s.MaxItems != nil && *s.MaxItems < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxItems", 1)) + } + if s.PathPrefix != nil && len(*s.PathPrefix) < 1 { + invalidParams.Add(request.NewErrParamMinLen("PathPrefix", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the response to a successful ListInstanceProfiles request. +type ListInstanceProfilesOutput struct { + _ struct{} `type:"structure"` + + // A list of instance profiles. + InstanceProfiles []*InstanceProfile `type:"list" required:"true"` + + // A flag that indicates whether there are more items to return. If your results + // were truncated, you can make a subsequent pagination request using the Marker + // request parameter to retrieve more items. Note that IAM might return fewer + // than the MaxItems number of results even when there are more results available. + // We recommend that you check IsTruncated after every call to ensure that you + // receive all of your results. + IsTruncated *bool `type:"boolean"` + + // When IsTruncated is true, this element is present and contains the value + // to use for the Marker parameter in a subsequent pagination request. + Marker *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListInstanceProfilesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListInstanceProfilesOutput) GoString() string { + return s.String() +} + +type ListMFADevicesInput struct { + _ struct{} `type:"structure"` + + // Use this parameter only when paginating results and only after you receive + // a response indicating that the results are truncated. Set it to the value + // of the Marker element in the response that you received to indicate where + // the next call should start. + Marker *string `min:"1" type:"string"` + + // Use this only when paginating results to indicate the maximum number of items + // you want in the response. If additional items exist beyond the maximum you + // specify, the IsTruncated response element is true. + // + // This parameter is optional. If you do not include it, it defaults to 100. + // Note that IAM might return fewer results, even when there are more results + // available. In that case, the IsTruncated response element returns true and + // Marker contains a value to include in the subsequent call that tells the + // service where to continue from. + MaxItems *int64 `min:"1" type:"integer"` + + // The name of the user whose MFA devices you want to list. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- + UserName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListMFADevicesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListMFADevicesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListMFADevicesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListMFADevicesInput"} + if s.Marker != nil && len(*s.Marker) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Marker", 1)) + } + if s.MaxItems != nil && *s.MaxItems < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxItems", 1)) + } + if s.UserName != nil && len(*s.UserName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("UserName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the response to a successful ListMFADevices request. +type ListMFADevicesOutput struct { + _ struct{} `type:"structure"` + + // A flag that indicates whether there are more items to return. If your results + // were truncated, you can make a subsequent pagination request using the Marker + // request parameter to retrieve more items. Note that IAM might return fewer + // than the MaxItems number of results even when there are more results available. + // We recommend that you check IsTruncated after every call to ensure that you + // receive all of your results. + IsTruncated *bool `type:"boolean"` + + // A list of MFA devices. + MFADevices []*MFADevice `type:"list" required:"true"` + + // When IsTruncated is true, this element is present and contains the value + // to use for the Marker parameter in a subsequent pagination request. + Marker *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListMFADevicesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListMFADevicesOutput) GoString() string { + return s.String() +} + +type ListOpenIDConnectProvidersInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ListOpenIDConnectProvidersInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListOpenIDConnectProvidersInput) GoString() string { + return s.String() +} + +// Contains the response to a successful ListOpenIDConnectProviders request. +type ListOpenIDConnectProvidersOutput struct { + _ struct{} `type:"structure"` + + // The list of IAM OIDC provider resource objects defined in the AWS account. + OpenIDConnectProviderList []*OpenIDConnectProviderListEntry `type:"list"` +} + +// String returns the string representation +func (s ListOpenIDConnectProvidersOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListOpenIDConnectProvidersOutput) GoString() string { + return s.String() +} + +type ListPoliciesInput struct { + _ struct{} `type:"structure"` + + // Use this parameter only when paginating results and only after you receive + // a response indicating that the results are truncated. Set it to the value + // of the Marker element in the response that you received to indicate where + // the next call should start. + Marker *string `min:"1" type:"string"` + + // Use this only when paginating results to indicate the maximum number of items + // you want in the response. If additional items exist beyond the maximum you + // specify, the IsTruncated response element is true. + // + // This parameter is optional. If you do not include it, it defaults to 100. + // Note that IAM might return fewer results, even when there are more results + // available. In that case, the IsTruncated response element returns true and + // Marker contains a value to include in the subsequent call that tells the + // service where to continue from. + MaxItems *int64 `min:"1" type:"integer"` + + // A flag to filter the results to only the attached policies. + // + // When OnlyAttached is true, the returned list contains only the policies + // that are attached to an IAM user, group, or role. When OnlyAttached is false, + // or when the parameter is not included, all policies are returned. + OnlyAttached *bool `type:"boolean"` + + // The path prefix for filtering the results. This parameter is optional. If + // it is not included, it defaults to a slash (/), listing all policies. The + // regex pattern (http://wikipedia.org/wiki/regex) for this parameter is a string + // of characters consisting of either a forward slash (/) by itself or a string + // that must begin and end with forward slashes, containing any ASCII character + // from the ! (\u0021) thru the DEL character (\u007F), including most punctuation + // characters, digits, and upper and lowercased letters. + PathPrefix *string `type:"string"` + + // The scope to use for filtering the results. + // + // To list only AWS managed policies, set Scope to AWS. To list only the customer + // managed policies in your AWS account, set Scope to Local. + // + // This parameter is optional. If it is not included, or if it is set to All, + // all policies are returned. + Scope *string `type:"string" enum:"policyScopeType"` +} + +// String returns the string representation +func (s ListPoliciesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListPoliciesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListPoliciesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListPoliciesInput"} + if s.Marker != nil && len(*s.Marker) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Marker", 1)) + } + if s.MaxItems != nil && *s.MaxItems < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxItems", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the response to a successful ListPolicies request. +type ListPoliciesOutput struct { + _ struct{} `type:"structure"` + + // A flag that indicates whether there are more items to return. If your results + // were truncated, you can make a subsequent pagination request using the Marker + // request parameter to retrieve more items. Note that IAM might return fewer + // than the MaxItems number of results even when there are more results available. + // We recommend that you check IsTruncated after every call to ensure that you + // receive all of your results. + IsTruncated *bool `type:"boolean"` + + // When IsTruncated is true, this element is present and contains the value + // to use for the Marker parameter in a subsequent pagination request. + Marker *string `min:"1" type:"string"` + + // A list of policies. + Policies []*Policy `type:"list"` +} + +// String returns the string representation +func (s ListPoliciesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListPoliciesOutput) GoString() string { + return s.String() +} + +type ListPolicyVersionsInput struct { + _ struct{} `type:"structure"` + + // Use this parameter only when paginating results and only after you receive + // a response indicating that the results are truncated. Set it to the value + // of the Marker element in the response that you received to indicate where + // the next call should start. + Marker *string `min:"1" type:"string"` + + // Use this only when paginating results to indicate the maximum number of items + // you want in the response. If additional items exist beyond the maximum you + // specify, the IsTruncated response element is true. + // + // This parameter is optional. If you do not include it, it defaults to 100. + // Note that IAM might return fewer results, even when there are more results + // available. In that case, the IsTruncated response element returns true and + // Marker contains a value to include in the subsequent call that tells the + // service where to continue from. + MaxItems *int64 `min:"1" type:"integer"` + + // The Amazon Resource Name (ARN) of the IAM policy for which you want the versions. + // + // For more information about ARNs, see Amazon Resource Names (ARNs) and AWS + // Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + PolicyArn *string `min:"20" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListPolicyVersionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListPolicyVersionsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListPolicyVersionsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListPolicyVersionsInput"} + if s.Marker != nil && len(*s.Marker) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Marker", 1)) + } + if s.MaxItems != nil && *s.MaxItems < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxItems", 1)) + } + if s.PolicyArn == nil { + invalidParams.Add(request.NewErrParamRequired("PolicyArn")) + } + if s.PolicyArn != nil && len(*s.PolicyArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("PolicyArn", 20)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the response to a successful ListPolicyVersions request. +type ListPolicyVersionsOutput struct { + _ struct{} `type:"structure"` + + // A flag that indicates whether there are more items to return. If your results + // were truncated, you can make a subsequent pagination request using the Marker + // request parameter to retrieve more items. Note that IAM might return fewer + // than the MaxItems number of results even when there are more results available. + // We recommend that you check IsTruncated after every call to ensure that you + // receive all of your results. + IsTruncated *bool `type:"boolean"` + + // When IsTruncated is true, this element is present and contains the value + // to use for the Marker parameter in a subsequent pagination request. + Marker *string `min:"1" type:"string"` + + // A list of policy versions. + // + // For more information about managed policy versions, see Versioning for Managed + // Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-versions.html) + // in the IAM User Guide. + Versions []*PolicyVersion `type:"list"` +} + +// String returns the string representation +func (s ListPolicyVersionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListPolicyVersionsOutput) GoString() string { + return s.String() +} + +type ListRolePoliciesInput struct { + _ struct{} `type:"structure"` + + // Use this parameter only when paginating results and only after you receive + // a response indicating that the results are truncated. Set it to the value + // of the Marker element in the response that you received to indicate where + // the next call should start. + Marker *string `min:"1" type:"string"` + + // Use this only when paginating results to indicate the maximum number of items + // you want in the response. If additional items exist beyond the maximum you + // specify, the IsTruncated response element is true. + // + // This parameter is optional. If you do not include it, it defaults to 100. + // Note that IAM might return fewer results, even when there are more results + // available. In that case, the IsTruncated response element returns true and + // Marker contains a value to include in the subsequent call that tells the + // service where to continue from. + MaxItems *int64 `min:"1" type:"integer"` + + // The name of the role to list policies for. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- + RoleName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListRolePoliciesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListRolePoliciesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListRolePoliciesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListRolePoliciesInput"} + if s.Marker != nil && len(*s.Marker) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Marker", 1)) + } + if s.MaxItems != nil && *s.MaxItems < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxItems", 1)) + } + if s.RoleName == nil { + invalidParams.Add(request.NewErrParamRequired("RoleName")) + } + if s.RoleName != nil && len(*s.RoleName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RoleName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the response to a successful ListRolePolicies request. +type ListRolePoliciesOutput struct { + _ struct{} `type:"structure"` + + // A flag that indicates whether there are more items to return. If your results + // were truncated, you can make a subsequent pagination request using the Marker + // request parameter to retrieve more items. Note that IAM might return fewer + // than the MaxItems number of results even when there are more results available. + // We recommend that you check IsTruncated after every call to ensure that you + // receive all of your results. + IsTruncated *bool `type:"boolean"` + + // When IsTruncated is true, this element is present and contains the value + // to use for the Marker parameter in a subsequent pagination request. + Marker *string `min:"1" type:"string"` + + // A list of policy names. + PolicyNames []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s ListRolePoliciesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListRolePoliciesOutput) GoString() string { + return s.String() +} + +type ListRolesInput struct { + _ struct{} `type:"structure"` + + // Use this parameter only when paginating results and only after you receive + // a response indicating that the results are truncated. Set it to the value + // of the Marker element in the response that you received to indicate where + // the next call should start. + Marker *string `min:"1" type:"string"` + + // Use this only when paginating results to indicate the maximum number of items + // you want in the response. If additional items exist beyond the maximum you + // specify, the IsTruncated response element is true. + // + // This parameter is optional. If you do not include it, it defaults to 100. + // Note that IAM might return fewer results, even when there are more results + // available. In that case, the IsTruncated response element returns true and + // Marker contains a value to include in the subsequent call that tells the + // service where to continue from. + MaxItems *int64 `min:"1" type:"integer"` + + // The path prefix for filtering the results. For example, the prefix /application_abc/component_xyz/ + // gets all roles whose path starts with /application_abc/component_xyz/. + // + // This parameter is optional. If it is not included, it defaults to a slash + // (/), listing all roles. The regex pattern (http://wikipedia.org/wiki/regex) + // for this parameter is a string of characters consisting of either a forward + // slash (/) by itself or a string that must begin and end with forward slashes, + // containing any ASCII character from the ! (\u0021) thru the DEL character + // (\u007F), including most punctuation characters, digits, and upper and lowercased + // letters. + PathPrefix *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListRolesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListRolesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListRolesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListRolesInput"} + if s.Marker != nil && len(*s.Marker) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Marker", 1)) + } + if s.MaxItems != nil && *s.MaxItems < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxItems", 1)) + } + if s.PathPrefix != nil && len(*s.PathPrefix) < 1 { + invalidParams.Add(request.NewErrParamMinLen("PathPrefix", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the response to a successful ListRoles request. +type ListRolesOutput struct { + _ struct{} `type:"structure"` + + // A flag that indicates whether there are more items to return. If your results + // were truncated, you can make a subsequent pagination request using the Marker + // request parameter to retrieve more items. Note that IAM might return fewer + // than the MaxItems number of results even when there are more results available. + // We recommend that you check IsTruncated after every call to ensure that you + // receive all of your results. + IsTruncated *bool `type:"boolean"` + + // When IsTruncated is true, this element is present and contains the value + // to use for the Marker parameter in a subsequent pagination request. + Marker *string `min:"1" type:"string"` + + // A list of roles. + Roles []*Role `type:"list" required:"true"` +} + +// String returns the string representation +func (s ListRolesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListRolesOutput) GoString() string { + return s.String() +} + +type ListSAMLProvidersInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ListSAMLProvidersInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListSAMLProvidersInput) GoString() string { + return s.String() +} + +// Contains the response to a successful ListSAMLProviders request. +type ListSAMLProvidersOutput struct { + _ struct{} `type:"structure"` + + // The list of SAML provider resource objects defined in IAM for this AWS account. + SAMLProviderList []*SAMLProviderListEntry `type:"list"` +} + +// String returns the string representation +func (s ListSAMLProvidersOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListSAMLProvidersOutput) GoString() string { + return s.String() +} + +type ListSSHPublicKeysInput struct { + _ struct{} `type:"structure"` + + // Use this parameter only when paginating results and only after you receive + // a response indicating that the results are truncated. Set it to the value + // of the Marker element in the response that you received to indicate where + // the next call should start. + Marker *string `min:"1" type:"string"` + + // Use this only when paginating results to indicate the maximum number of items + // you want in the response. If additional items exist beyond the maximum you + // specify, the IsTruncated response element is true. + // + // This parameter is optional. If you do not include it, it defaults to 100. + // Note that IAM might return fewer results, even when there are more results + // available. In that case, the IsTruncated response element returns true and + // Marker contains a value to include in the subsequent call that tells the + // service where to continue from. + MaxItems *int64 `min:"1" type:"integer"` + + // The name of the IAM user to list SSH public keys for. If none is specified, + // the UserName field is determined implicitly based on the AWS access key used + // to sign the request. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- + UserName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListSSHPublicKeysInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListSSHPublicKeysInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListSSHPublicKeysInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListSSHPublicKeysInput"} + if s.Marker != nil && len(*s.Marker) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Marker", 1)) + } + if s.MaxItems != nil && *s.MaxItems < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxItems", 1)) + } + if s.UserName != nil && len(*s.UserName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("UserName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the response to a successful ListSSHPublicKeys request. +type ListSSHPublicKeysOutput struct { + _ struct{} `type:"structure"` + + // A flag that indicates whether there are more items to return. If your results + // were truncated, you can make a subsequent pagination request using the Marker + // request parameter to retrieve more items. Note that IAM might return fewer + // than the MaxItems number of results even when there are more results available. + // We recommend that you check IsTruncated after every call to ensure that you + // receive all of your results. + IsTruncated *bool `type:"boolean"` + + // When IsTruncated is true, this element is present and contains the value + // to use for the Marker parameter in a subsequent pagination request. + Marker *string `min:"1" type:"string"` + + // A list of the SSH public keys assigned to IAM user. + SSHPublicKeys []*SSHPublicKeyMetadata `type:"list"` +} + +// String returns the string representation +func (s ListSSHPublicKeysOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListSSHPublicKeysOutput) GoString() string { + return s.String() +} + +type ListServerCertificatesInput struct { + _ struct{} `type:"structure"` + + // Use this parameter only when paginating results and only after you receive + // a response indicating that the results are truncated. Set it to the value + // of the Marker element in the response that you received to indicate where + // the next call should start. + Marker *string `min:"1" type:"string"` + + // Use this only when paginating results to indicate the maximum number of items + // you want in the response. If additional items exist beyond the maximum you + // specify, the IsTruncated response element is true. + // + // This parameter is optional. If you do not include it, it defaults to 100. + // Note that IAM might return fewer results, even when there are more results + // available. In that case, the IsTruncated response element returns true and + // Marker contains a value to include in the subsequent call that tells the + // service where to continue from. + MaxItems *int64 `min:"1" type:"integer"` + + // The path prefix for filtering the results. For example: /company/servercerts + // would get all server certificates for which the path starts with /company/servercerts. + // + // This parameter is optional. If it is not included, it defaults to a slash + // (/), listing all server certificates. The regex pattern (http://wikipedia.org/wiki/regex) + // for this parameter is a string of characters consisting of either a forward + // slash (/) by itself or a string that must begin and end with forward slashes, + // containing any ASCII character from the ! (\u0021) thru the DEL character + // (\u007F), including most punctuation characters, digits, and upper and lowercased + // letters. + PathPrefix *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListServerCertificatesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListServerCertificatesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListServerCertificatesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListServerCertificatesInput"} + if s.Marker != nil && len(*s.Marker) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Marker", 1)) + } + if s.MaxItems != nil && *s.MaxItems < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxItems", 1)) + } + if s.PathPrefix != nil && len(*s.PathPrefix) < 1 { + invalidParams.Add(request.NewErrParamMinLen("PathPrefix", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the response to a successful ListServerCertificates request. +type ListServerCertificatesOutput struct { + _ struct{} `type:"structure"` + + // A flag that indicates whether there are more items to return. If your results + // were truncated, you can make a subsequent pagination request using the Marker + // request parameter to retrieve more items. Note that IAM might return fewer + // than the MaxItems number of results even when there are more results available. + // We recommend that you check IsTruncated after every call to ensure that you + // receive all of your results. + IsTruncated *bool `type:"boolean"` + + // When IsTruncated is true, this element is present and contains the value + // to use for the Marker parameter in a subsequent pagination request. + Marker *string `min:"1" type:"string"` + + // A list of server certificates. + ServerCertificateMetadataList []*ServerCertificateMetadata `type:"list" required:"true"` +} + +// String returns the string representation +func (s ListServerCertificatesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListServerCertificatesOutput) GoString() string { + return s.String() +} + +type ListSigningCertificatesInput struct { + _ struct{} `type:"structure"` + + // Use this parameter only when paginating results and only after you receive + // a response indicating that the results are truncated. Set it to the value + // of the Marker element in the response that you received to indicate where + // the next call should start. + Marker *string `min:"1" type:"string"` + + // Use this only when paginating results to indicate the maximum number of items + // you want in the response. If additional items exist beyond the maximum you + // specify, the IsTruncated response element is true. + // + // This parameter is optional. If you do not include it, it defaults to 100. + // Note that IAM might return fewer results, even when there are more results + // available. In that case, the IsTruncated response element returns true and + // Marker contains a value to include in the subsequent call that tells the + // service where to continue from. + MaxItems *int64 `min:"1" type:"integer"` + + // The name of the IAM user whose signing certificates you want to examine. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- + UserName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListSigningCertificatesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListSigningCertificatesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListSigningCertificatesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListSigningCertificatesInput"} + if s.Marker != nil && len(*s.Marker) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Marker", 1)) + } + if s.MaxItems != nil && *s.MaxItems < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxItems", 1)) + } + if s.UserName != nil && len(*s.UserName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("UserName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the response to a successful ListSigningCertificates request. +type ListSigningCertificatesOutput struct { + _ struct{} `type:"structure"` + + // A list of the user's signing certificate information. + Certificates []*SigningCertificate `type:"list" required:"true"` + + // A flag that indicates whether there are more items to return. If your results + // were truncated, you can make a subsequent pagination request using the Marker + // request parameter to retrieve more items. Note that IAM might return fewer + // than the MaxItems number of results even when there are more results available. + // We recommend that you check IsTruncated after every call to ensure that you + // receive all of your results. + IsTruncated *bool `type:"boolean"` + + // When IsTruncated is true, this element is present and contains the value + // to use for the Marker parameter in a subsequent pagination request. + Marker *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListSigningCertificatesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListSigningCertificatesOutput) GoString() string { + return s.String() +} + +type ListUserPoliciesInput struct { + _ struct{} `type:"structure"` + + // Use this parameter only when paginating results and only after you receive + // a response indicating that the results are truncated. Set it to the value + // of the Marker element in the response that you received to indicate where + // the next call should start. + Marker *string `min:"1" type:"string"` + + // Use this only when paginating results to indicate the maximum number of items + // you want in the response. If additional items exist beyond the maximum you + // specify, the IsTruncated response element is true. + // + // This parameter is optional. If you do not include it, it defaults to 100. + // Note that IAM might return fewer results, even when there are more results + // available. In that case, the IsTruncated response element returns true and + // Marker contains a value to include in the subsequent call that tells the + // service where to continue from. + MaxItems *int64 `min:"1" type:"integer"` + + // The name of the user to list policies for. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListUserPoliciesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListUserPoliciesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListUserPoliciesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListUserPoliciesInput"} + if s.Marker != nil && len(*s.Marker) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Marker", 1)) + } + if s.MaxItems != nil && *s.MaxItems < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxItems", 1)) + } + if s.UserName == nil { + invalidParams.Add(request.NewErrParamRequired("UserName")) + } + if s.UserName != nil && len(*s.UserName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("UserName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the response to a successful ListUserPolicies request. +type ListUserPoliciesOutput struct { + _ struct{} `type:"structure"` + + // A flag that indicates whether there are more items to return. If your results + // were truncated, you can make a subsequent pagination request using the Marker + // request parameter to retrieve more items. Note that IAM might return fewer + // than the MaxItems number of results even when there are more results available. + // We recommend that you check IsTruncated after every call to ensure that you + // receive all of your results. + IsTruncated *bool `type:"boolean"` + + // When IsTruncated is true, this element is present and contains the value + // to use for the Marker parameter in a subsequent pagination request. + Marker *string `min:"1" type:"string"` + + // A list of policy names. + PolicyNames []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s ListUserPoliciesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListUserPoliciesOutput) GoString() string { + return s.String() +} + +type ListUsersInput struct { + _ struct{} `type:"structure"` + + // Use this parameter only when paginating results and only after you receive + // a response indicating that the results are truncated. Set it to the value + // of the Marker element in the response that you received to indicate where + // the next call should start. + Marker *string `min:"1" type:"string"` + + // Use this only when paginating results to indicate the maximum number of items + // you want in the response. If additional items exist beyond the maximum you + // specify, the IsTruncated response element is true. + // + // This parameter is optional. If you do not include it, it defaults to 100. + // Note that IAM might return fewer results, even when there are more results + // available. In that case, the IsTruncated response element returns true and + // Marker contains a value to include in the subsequent call that tells the + // service where to continue from. + MaxItems *int64 `min:"1" type:"integer"` + + // The path prefix for filtering the results. For example: /division_abc/subdivision_xyz/, + // which would get all user names whose path starts with /division_abc/subdivision_xyz/. + // + // This parameter is optional. If it is not included, it defaults to a slash + // (/), listing all user names. The regex pattern (http://wikipedia.org/wiki/regex) + // for this parameter is a string of characters consisting of either a forward + // slash (/) by itself or a string that must begin and end with forward slashes, + // containing any ASCII character from the ! (\u0021) thru the DEL character + // (\u007F), including most punctuation characters, digits, and upper and lowercased + // letters. + PathPrefix *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListUsersInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListUsersInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListUsersInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListUsersInput"} + if s.Marker != nil && len(*s.Marker) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Marker", 1)) + } + if s.MaxItems != nil && *s.MaxItems < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxItems", 1)) + } + if s.PathPrefix != nil && len(*s.PathPrefix) < 1 { + invalidParams.Add(request.NewErrParamMinLen("PathPrefix", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the response to a successful ListUsers request. +type ListUsersOutput struct { + _ struct{} `type:"structure"` + + // A flag that indicates whether there are more items to return. If your results + // were truncated, you can make a subsequent pagination request using the Marker + // request parameter to retrieve more items. Note that IAM might return fewer + // than the MaxItems number of results even when there are more results available. + // We recommend that you check IsTruncated after every call to ensure that you + // receive all of your results. + IsTruncated *bool `type:"boolean"` + + // When IsTruncated is true, this element is present and contains the value + // to use for the Marker parameter in a subsequent pagination request. + Marker *string `min:"1" type:"string"` + + // A list of users. + Users []*User `type:"list" required:"true"` +} + +// String returns the string representation +func (s ListUsersOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListUsersOutput) GoString() string { + return s.String() +} + +type ListVirtualMFADevicesInput struct { + _ struct{} `type:"structure"` + + // The status (Unassigned or Assigned) of the devices to list. If you do not + // specify an AssignmentStatus, the action defaults to Any which lists both + // assigned and unassigned virtual MFA devices. + AssignmentStatus *string `type:"string" enum:"assignmentStatusType"` + + // Use this parameter only when paginating results and only after you receive + // a response indicating that the results are truncated. Set it to the value + // of the Marker element in the response that you received to indicate where + // the next call should start. + Marker *string `min:"1" type:"string"` + + // Use this only when paginating results to indicate the maximum number of items + // you want in the response. If additional items exist beyond the maximum you + // specify, the IsTruncated response element is true. + // + // This parameter is optional. If you do not include it, it defaults to 100. + // Note that IAM might return fewer results, even when there are more results + // available. In that case, the IsTruncated response element returns true and + // Marker contains a value to include in the subsequent call that tells the + // service where to continue from. + MaxItems *int64 `min:"1" type:"integer"` +} + +// String returns the string representation +func (s ListVirtualMFADevicesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListVirtualMFADevicesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListVirtualMFADevicesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListVirtualMFADevicesInput"} + if s.Marker != nil && len(*s.Marker) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Marker", 1)) + } + if s.MaxItems != nil && *s.MaxItems < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxItems", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the response to a successful ListVirtualMFADevices request. +type ListVirtualMFADevicesOutput struct { + _ struct{} `type:"structure"` + + // A flag that indicates whether there are more items to return. If your results + // were truncated, you can make a subsequent pagination request using the Marker + // request parameter to retrieve more items. Note that IAM might return fewer + // than the MaxItems number of results even when there are more results available. + // We recommend that you check IsTruncated after every call to ensure that you + // receive all of your results. + IsTruncated *bool `type:"boolean"` + + // When IsTruncated is true, this element is present and contains the value + // to use for the Marker parameter in a subsequent pagination request. + Marker *string `min:"1" type:"string"` + + // The list of virtual MFA devices in the current account that match the AssignmentStatus + // value that was passed in the request. + VirtualMFADevices []*VirtualMFADevice `type:"list" required:"true"` +} + +// String returns the string representation +func (s ListVirtualMFADevicesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListVirtualMFADevicesOutput) GoString() string { + return s.String() +} + +// Contains the user name and password create date for a user. +// +// This data type is used as a response element in the CreateLoginProfile +// and GetLoginProfile actions. +type LoginProfile struct { + _ struct{} `type:"structure"` + + // The date when the password for the user was created. + CreateDate *time.Time `type:"timestamp" timestampFormat:"iso8601" required:"true"` + + // Specifies whether the user is required to set a new password on next sign-in. + PasswordResetRequired *bool `type:"boolean"` + + // The name of the user, which can be used for signing in to the AWS Management + // Console. + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s LoginProfile) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LoginProfile) GoString() string { + return s.String() +} + +// Contains information about an MFA device. +// +// This data type is used as a response element in the ListMFADevices action. +type MFADevice struct { + _ struct{} `type:"structure"` + + // The date when the MFA device was enabled for the user. + EnableDate *time.Time `type:"timestamp" timestampFormat:"iso8601" required:"true"` + + // The serial number that uniquely identifies the MFA device. For virtual MFA + // devices, the serial number is the device ARN. + SerialNumber *string `min:"9" type:"string" required:"true"` + + // The user with whom the MFA device is associated. + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s MFADevice) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MFADevice) GoString() string { + return s.String() +} + +// Contains information about a managed policy, including the policy's ARN, +// versions, and the number of principal entities (users, groups, and roles) +// that the policy is attached to. +// +// This data type is used as a response element in the GetAccountAuthorizationDetails +// action. +// +// For more information about managed policies, see Managed Policies and Inline +// Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the Using IAM guide. +type ManagedPolicyDetail struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN). ARNs are unique identifiers for AWS resources. + // + // For more information about ARNs, go to Amazon Resource Names (ARNs) and + // AWS Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + Arn *string `min:"20" type:"string"` + + // The number of principal entities (users, groups, and roles) that the policy + // is attached to. + AttachmentCount *int64 `type:"integer"` + + // The date and time, in ISO 8601 date-time format (http://www.iso.org/iso/iso8601), + // when the policy was created. + CreateDate *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The identifier for the version of the policy that is set as the default (operative) + // version. + // + // For more information about policy versions, see Versioning for Managed Policies + // (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-versions.html) + // in the Using IAM guide. + DefaultVersionId *string `type:"string"` + + // A friendly description of the policy. + Description *string `type:"string"` + + // Specifies whether the policy can be attached to an IAM user, group, or role. + IsAttachable *bool `type:"boolean"` + + // The path to the policy. + // + // For more information about paths, see IAM Identifiers (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) + // in the Using IAM guide. + Path *string `type:"string"` + + // The stable and unique string identifying the policy. + // + // For more information about IDs, see IAM Identifiers (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) + // in the Using IAM guide. + PolicyId *string `min:"16" type:"string"` + + // The friendly name (not ARN) identifying the policy. + PolicyName *string `min:"1" type:"string"` + + // A list containing information about the versions of the policy. + PolicyVersionList []*PolicyVersion `type:"list"` + + // The date and time, in ISO 8601 date-time format (http://www.iso.org/iso/iso8601), + // when the policy was last updated. + // + // When a policy has only one version, this field contains the date and time + // when the policy was created. When a policy has more than one version, this + // field contains the date and time when the most recent policy version was + // created. + UpdateDate *time.Time `type:"timestamp" timestampFormat:"iso8601"` +} + +// String returns the string representation +func (s ManagedPolicyDetail) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ManagedPolicyDetail) GoString() string { + return s.String() +} + +// Contains the Amazon Resource Name (ARN) for an IAM OpenID Connect provider. +type OpenIDConnectProviderListEntry struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN). ARNs are unique identifiers for AWS resources. + // + // For more information about ARNs, go to Amazon Resource Names (ARNs) and + // AWS Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + Arn *string `min:"20" type:"string"` +} + +// String returns the string representation +func (s OpenIDConnectProviderListEntry) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s OpenIDConnectProviderListEntry) GoString() string { + return s.String() +} + +// Contains information about the account password policy. +// +// This data type is used as a response element in the GetAccountPasswordPolicy +// action. +type PasswordPolicy struct { + _ struct{} `type:"structure"` + + // Specifies whether IAM users are allowed to change their own password. + AllowUsersToChangePassword *bool `type:"boolean"` + + // Indicates whether passwords in the account expire. Returns true if MaxPasswordAge + // is contains a value greater than 0. Returns false if MaxPasswordAge is 0 + // or not present. + ExpirePasswords *bool `type:"boolean"` + + // Specifies whether IAM users are prevented from setting a new password after + // their password has expired. + HardExpiry *bool `type:"boolean"` + + // The number of days that an IAM user password is valid. + MaxPasswordAge *int64 `min:"1" type:"integer"` + + // Minimum length to require for IAM user passwords. + MinimumPasswordLength *int64 `min:"6" type:"integer"` + + // Specifies the number of previous passwords that IAM users are prevented from + // reusing. + PasswordReusePrevention *int64 `min:"1" type:"integer"` + + // Specifies whether to require lowercase characters for IAM user passwords. + RequireLowercaseCharacters *bool `type:"boolean"` + + // Specifies whether to require numbers for IAM user passwords. + RequireNumbers *bool `type:"boolean"` + + // Specifies whether to require symbols for IAM user passwords. + RequireSymbols *bool `type:"boolean"` + + // Specifies whether to require uppercase characters for IAM user passwords. + RequireUppercaseCharacters *bool `type:"boolean"` +} + +// String returns the string representation +func (s PasswordPolicy) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PasswordPolicy) GoString() string { + return s.String() +} + +// Contains information about a managed policy. +// +// This data type is used as a response element in the CreatePolicy, GetPolicy, +// and ListPolicies actions. +// +// For more information about managed policies, refer to Managed Policies and +// Inline Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the Using IAM guide. +type Policy struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN). ARNs are unique identifiers for AWS resources. + // + // For more information about ARNs, go to Amazon Resource Names (ARNs) and + // AWS Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + Arn *string `min:"20" type:"string"` + + // The number of entities (users, groups, and roles) that the policy is attached + // to. + AttachmentCount *int64 `type:"integer"` + + // The date and time, in ISO 8601 date-time format (http://www.iso.org/iso/iso8601), + // when the policy was created. + CreateDate *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The identifier for the version of the policy that is set as the default version. + DefaultVersionId *string `type:"string"` + + // A friendly description of the policy. + // + // This element is included in the response to the GetPolicy operation. It + // is not included in the response to the ListPolicies operation. + Description *string `type:"string"` + + // Specifies whether the policy can be attached to an IAM user, group, or role. + IsAttachable *bool `type:"boolean"` + + // The path to the policy. + // + // For more information about paths, see IAM Identifiers (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) + // in the Using IAM guide. + Path *string `type:"string"` + + // The stable and unique string identifying the policy. + // + // For more information about IDs, see IAM Identifiers (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) + // in the Using IAM guide. + PolicyId *string `min:"16" type:"string"` + + // The friendly name (not ARN) identifying the policy. + PolicyName *string `min:"1" type:"string"` + + // The date and time, in ISO 8601 date-time format (http://www.iso.org/iso/iso8601), + // when the policy was last updated. + // + // When a policy has only one version, this field contains the date and time + // when the policy was created. When a policy has more than one version, this + // field contains the date and time when the most recent policy version was + // created. + UpdateDate *time.Time `type:"timestamp" timestampFormat:"iso8601"` +} + +// String returns the string representation +func (s Policy) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Policy) GoString() string { + return s.String() +} + +// Contains information about an IAM policy, including the policy document. +// +// This data type is used as a response element in the GetAccountAuthorizationDetails +// action. +type PolicyDetail struct { + _ struct{} `type:"structure"` + + // The policy document. + PolicyDocument *string `min:"1" type:"string"` + + // The name of the policy. + PolicyName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s PolicyDetail) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PolicyDetail) GoString() string { + return s.String() +} + +// Contains information about a group that a managed policy is attached to. +// +// This data type is used as a response element in the ListEntitiesForPolicy +// action. +// +// For more information about managed policies, refer to Managed Policies and +// Inline Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the Using IAM guide. +type PolicyGroup struct { + _ struct{} `type:"structure"` + + // The stable and unique string identifying the group. For more information + // about IDs, see IAM Identifiers (http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html) + // in the IAM User Guide. + GroupId *string `min:"16" type:"string"` + + // The name (friendly name, not ARN) identifying the group. + GroupName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s PolicyGroup) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PolicyGroup) GoString() string { + return s.String() +} + +// Contains information about a role that a managed policy is attached to. +// +// This data type is used as a response element in the ListEntitiesForPolicy +// action. +// +// For more information about managed policies, refer to Managed Policies and +// Inline Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the Using IAM guide. +type PolicyRole struct { + _ struct{} `type:"structure"` + + // The stable and unique string identifying the role. For more information about + // IDs, see IAM Identifiers (http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html) + // in the IAM User Guide. + RoleId *string `min:"16" type:"string"` + + // The name (friendly name, not ARN) identifying the role. + RoleName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s PolicyRole) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PolicyRole) GoString() string { + return s.String() +} + +// Contains information about a user that a managed policy is attached to. +// +// This data type is used as a response element in the ListEntitiesForPolicy +// action. +// +// For more information about managed policies, refer to Managed Policies and +// Inline Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the Using IAM guide. +type PolicyUser struct { + _ struct{} `type:"structure"` + + // The stable and unique string identifying the user. For more information about + // IDs, see IAM Identifiers (http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html) + // in the IAM User Guide. + UserId *string `min:"16" type:"string"` + + // The name (friendly name, not ARN) identifying the user. + UserName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s PolicyUser) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PolicyUser) GoString() string { + return s.String() +} + +// Contains information about a version of a managed policy. +// +// This data type is used as a response element in the CreatePolicyVersion, +// GetPolicyVersion, ListPolicyVersions, and GetAccountAuthorizationDetails +// actions. +// +// For more information about managed policies, refer to Managed Policies and +// Inline Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the Using IAM guide. +type PolicyVersion struct { + _ struct{} `type:"structure"` + + // The date and time, in ISO 8601 date-time format (http://www.iso.org/iso/iso8601), + // when the policy version was created. + CreateDate *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The policy document. + // + // The policy document is returned in the response to the GetPolicyVersion + // and GetAccountAuthorizationDetails operations. It is not returned in the + // response to the CreatePolicyVersion or ListPolicyVersions operations. + Document *string `min:"1" type:"string"` + + // Specifies whether the policy version is set as the policy's default version. + IsDefaultVersion *bool `type:"boolean"` + + // The identifier for the policy version. + // + // Policy version identifiers always begin with v (always lowercase). When + // a policy is created, the first policy version is v1. + VersionId *string `type:"string"` +} + +// String returns the string representation +func (s PolicyVersion) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PolicyVersion) GoString() string { + return s.String() +} + +// Contains the row and column of a location of a Statement element in a policy +// document. +// +// This data type is used as a member of the Statement type. +type Position struct { + _ struct{} `type:"structure"` + + // The column in the line containing the specified position in the document. + Column *int64 `type:"integer"` + + // The line containing the specified position in the document. + Line *int64 `type:"integer"` +} + +// String returns the string representation +func (s Position) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Position) GoString() string { + return s.String() +} + +type PutGroupPolicyInput struct { + _ struct{} `type:"structure"` + + // The name of the group to associate the policy with. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- + GroupName *string `min:"1" type:"string" required:"true"` + + // The policy document. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of any printable ASCII character ranging + // from the space character (\u0020) through end of the ASCII character range + // (\u00FF). It also includes the special characters tab (\u0009), line feed + // (\u000A), and carriage return (\u000D). + PolicyDocument *string `min:"1" type:"string" required:"true"` + + // The name of the policy document. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- + PolicyName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s PutGroupPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutGroupPolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutGroupPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutGroupPolicyInput"} + if s.GroupName == nil { + invalidParams.Add(request.NewErrParamRequired("GroupName")) + } + if s.GroupName != nil && len(*s.GroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("GroupName", 1)) + } + if s.PolicyDocument == nil { + invalidParams.Add(request.NewErrParamRequired("PolicyDocument")) + } + if s.PolicyDocument != nil && len(*s.PolicyDocument) < 1 { + invalidParams.Add(request.NewErrParamMinLen("PolicyDocument", 1)) + } + if s.PolicyName == nil { + invalidParams.Add(request.NewErrParamRequired("PolicyName")) + } + if s.PolicyName != nil && len(*s.PolicyName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("PolicyName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type PutGroupPolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutGroupPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutGroupPolicyOutput) GoString() string { + return s.String() +} + +type PutRolePolicyInput struct { + _ struct{} `type:"structure"` + + // The policy document. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of any printable ASCII character ranging + // from the space character (\u0020) through end of the ASCII character range + // (\u00FF). It also includes the special characters tab (\u0009), line feed + // (\u000A), and carriage return (\u000D). + PolicyDocument *string `min:"1" type:"string" required:"true"` + + // The name of the policy document. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- + PolicyName *string `min:"1" type:"string" required:"true"` + + // The name of the role to associate the policy with. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- + RoleName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s PutRolePolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutRolePolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutRolePolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutRolePolicyInput"} + if s.PolicyDocument == nil { + invalidParams.Add(request.NewErrParamRequired("PolicyDocument")) + } + if s.PolicyDocument != nil && len(*s.PolicyDocument) < 1 { + invalidParams.Add(request.NewErrParamMinLen("PolicyDocument", 1)) + } + if s.PolicyName == nil { + invalidParams.Add(request.NewErrParamRequired("PolicyName")) + } + if s.PolicyName != nil && len(*s.PolicyName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("PolicyName", 1)) + } + if s.RoleName == nil { + invalidParams.Add(request.NewErrParamRequired("RoleName")) + } + if s.RoleName != nil && len(*s.RoleName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RoleName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type PutRolePolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutRolePolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutRolePolicyOutput) GoString() string { + return s.String() +} + +type PutUserPolicyInput struct { + _ struct{} `type:"structure"` + + // The policy document. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of any printable ASCII character ranging + // from the space character (\u0020) through end of the ASCII character range + // (\u00FF). It also includes the special characters tab (\u0009), line feed + // (\u000A), and carriage return (\u000D). + PolicyDocument *string `min:"1" type:"string" required:"true"` + + // The name of the policy document. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- + PolicyName *string `min:"1" type:"string" required:"true"` + + // The name of the user to associate the policy with. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s PutUserPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutUserPolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutUserPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutUserPolicyInput"} + if s.PolicyDocument == nil { + invalidParams.Add(request.NewErrParamRequired("PolicyDocument")) + } + if s.PolicyDocument != nil && len(*s.PolicyDocument) < 1 { + invalidParams.Add(request.NewErrParamMinLen("PolicyDocument", 1)) + } + if s.PolicyName == nil { + invalidParams.Add(request.NewErrParamRequired("PolicyName")) + } + if s.PolicyName != nil && len(*s.PolicyName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("PolicyName", 1)) + } + if s.UserName == nil { + invalidParams.Add(request.NewErrParamRequired("UserName")) + } + if s.UserName != nil && len(*s.UserName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("UserName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type PutUserPolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutUserPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutUserPolicyOutput) GoString() string { + return s.String() +} + +type RemoveClientIDFromOpenIDConnectProviderInput struct { + _ struct{} `type:"structure"` + + // The client ID (also known as audience) to remove from the IAM OIDC provider + // resource. For more information about client IDs, see CreateOpenIDConnectProvider. + ClientID *string `min:"1" type:"string" required:"true"` + + // The Amazon Resource Name (ARN) of the IAM OIDC provider resource to remove + // the client ID from. You can get a list of OIDC provider ARNs by using the + // ListOpenIDConnectProviders action. + // + // For more information about ARNs, see Amazon Resource Names (ARNs) and AWS + // Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + OpenIDConnectProviderArn *string `min:"20" type:"string" required:"true"` +} + +// String returns the string representation +func (s RemoveClientIDFromOpenIDConnectProviderInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RemoveClientIDFromOpenIDConnectProviderInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RemoveClientIDFromOpenIDConnectProviderInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RemoveClientIDFromOpenIDConnectProviderInput"} + if s.ClientID == nil { + invalidParams.Add(request.NewErrParamRequired("ClientID")) + } + if s.ClientID != nil && len(*s.ClientID) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ClientID", 1)) + } + if s.OpenIDConnectProviderArn == nil { + invalidParams.Add(request.NewErrParamRequired("OpenIDConnectProviderArn")) + } + if s.OpenIDConnectProviderArn != nil && len(*s.OpenIDConnectProviderArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("OpenIDConnectProviderArn", 20)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type RemoveClientIDFromOpenIDConnectProviderOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s RemoveClientIDFromOpenIDConnectProviderOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RemoveClientIDFromOpenIDConnectProviderOutput) GoString() string { + return s.String() +} + +type RemoveRoleFromInstanceProfileInput struct { + _ struct{} `type:"structure"` + + // The name of the instance profile to update. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- + InstanceProfileName *string `min:"1" type:"string" required:"true"` + + // The name of the role to remove. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- + RoleName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s RemoveRoleFromInstanceProfileInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RemoveRoleFromInstanceProfileInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RemoveRoleFromInstanceProfileInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RemoveRoleFromInstanceProfileInput"} + if s.InstanceProfileName == nil { + invalidParams.Add(request.NewErrParamRequired("InstanceProfileName")) + } + if s.InstanceProfileName != nil && len(*s.InstanceProfileName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("InstanceProfileName", 1)) + } + if s.RoleName == nil { + invalidParams.Add(request.NewErrParamRequired("RoleName")) + } + if s.RoleName != nil && len(*s.RoleName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RoleName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type RemoveRoleFromInstanceProfileOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s RemoveRoleFromInstanceProfileOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RemoveRoleFromInstanceProfileOutput) GoString() string { + return s.String() +} + +type RemoveUserFromGroupInput struct { + _ struct{} `type:"structure"` + + // The name of the group to update. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- + GroupName *string `min:"1" type:"string" required:"true"` + + // The name of the user to remove. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s RemoveUserFromGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RemoveUserFromGroupInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RemoveUserFromGroupInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RemoveUserFromGroupInput"} + if s.GroupName == nil { + invalidParams.Add(request.NewErrParamRequired("GroupName")) + } + if s.GroupName != nil && len(*s.GroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("GroupName", 1)) + } + if s.UserName == nil { + invalidParams.Add(request.NewErrParamRequired("UserName")) + } + if s.UserName != nil && len(*s.UserName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("UserName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type RemoveUserFromGroupOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s RemoveUserFromGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RemoveUserFromGroupOutput) GoString() string { + return s.String() +} + +// Contains the result of the simulation of a single API action call on a single +// resource. +// +// This data type is used by a member of the EvaluationResult data type. +type ResourceSpecificResult struct { + _ struct{} `type:"structure"` + + // Additional details about the results of the evaluation decision. When there + // are both IAM policies and resource policies, this parameter explains how + // each set of policies contributes to the final evaluation decision. When simulating + // cross-account access to a resource, both the resource-based policy and the + // caller's IAM policy must grant access. + EvalDecisionDetails map[string]*string `type:"map"` + + // The result of the simulation of the simulated API action on the resource + // specified in EvalResourceName. + EvalResourceDecision *string `type:"string" required:"true" enum:"PolicyEvaluationDecisionType"` + + // The name of the simulated resource, in Amazon Resource Name (ARN) format. + EvalResourceName *string `min:"1" type:"string" required:"true"` + + // A list of the statements in the input policies that determine the result + // for this part of the simulation. Remember that even if multiple statements + // allow the action on the resource, if any statement denies that action, then + // the explicit deny overrides any allow, and the deny statement is the only + // entry included in the result. + MatchedStatements []*Statement `type:"list"` + + // A list of context keys that are required by the included input policies but + // that were not provided by one of the input parameters. This list is used + // when a list of ARNs is included in the ResourceArns parameter instead of + // "*". If you do not specify individual resources, by setting ResourceArns + // to "*" or by not including the ResourceArns parameter, then any missing context + // values are instead included under the EvaluationResults section. To discover + // the context keys used by a set of policies, you can call GetContextKeysForCustomPolicy + // or GetContextKeysForPrincipalPolicy. + MissingContextValues []*string `type:"list"` +} + +// String returns the string representation +func (s ResourceSpecificResult) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResourceSpecificResult) GoString() string { + return s.String() +} + +type ResyncMFADeviceInput struct { + _ struct{} `type:"structure"` + + // An authentication code emitted by the device. + // + // The format for this parameter is a sequence of six digits. + AuthenticationCode1 *string `min:"6" type:"string" required:"true"` + + // A subsequent authentication code emitted by the device. + // + // The format for this parameter is a sequence of six digits. + AuthenticationCode2 *string `min:"6" type:"string" required:"true"` + + // Serial number that uniquely identifies the MFA device. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- + SerialNumber *string `min:"9" type:"string" required:"true"` + + // The name of the user whose MFA device you want to resynchronize. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s ResyncMFADeviceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResyncMFADeviceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ResyncMFADeviceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ResyncMFADeviceInput"} + if s.AuthenticationCode1 == nil { + invalidParams.Add(request.NewErrParamRequired("AuthenticationCode1")) + } + if s.AuthenticationCode1 != nil && len(*s.AuthenticationCode1) < 6 { + invalidParams.Add(request.NewErrParamMinLen("AuthenticationCode1", 6)) + } + if s.AuthenticationCode2 == nil { + invalidParams.Add(request.NewErrParamRequired("AuthenticationCode2")) + } + if s.AuthenticationCode2 != nil && len(*s.AuthenticationCode2) < 6 { + invalidParams.Add(request.NewErrParamMinLen("AuthenticationCode2", 6)) + } + if s.SerialNumber == nil { + invalidParams.Add(request.NewErrParamRequired("SerialNumber")) + } + if s.SerialNumber != nil && len(*s.SerialNumber) < 9 { + invalidParams.Add(request.NewErrParamMinLen("SerialNumber", 9)) + } + if s.UserName == nil { + invalidParams.Add(request.NewErrParamRequired("UserName")) + } + if s.UserName != nil && len(*s.UserName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("UserName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type ResyncMFADeviceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ResyncMFADeviceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResyncMFADeviceOutput) GoString() string { + return s.String() +} + +// Contains information about an IAM role. +// +// This data type is used as a response element in the following actions: +// +// CreateRole +// +// GetRole +// +// ListRoles +type Role struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) specifying the role. For more information + // about ARNs and how to use them in policies, see IAM Identifiers (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) + // in the Using IAM guide. + Arn *string `min:"20" type:"string" required:"true"` + + // The policy that grants an entity permission to assume the role. + AssumeRolePolicyDocument *string `min:"1" type:"string"` + + // The date and time, in ISO 8601 date-time format (http://www.iso.org/iso/iso8601), + // when the role was created. + CreateDate *time.Time `type:"timestamp" timestampFormat:"iso8601" required:"true"` + + // The path to the role. For more information about paths, see IAM Identifiers + // (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) + // in the Using IAM guide. + Path *string `min:"1" type:"string" required:"true"` + + // The stable and unique string identifying the role. For more information about + // IDs, see IAM Identifiers (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) + // in the Using IAM guide. + RoleId *string `min:"16" type:"string" required:"true"` + + // The friendly name that identifies the role. + RoleName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s Role) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Role) GoString() string { + return s.String() +} + +// Contains information about an IAM role, including all of the role's policies. +// +// This data type is used as a response element in the GetAccountAuthorizationDetails +// action. +type RoleDetail struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN). ARNs are unique identifiers for AWS resources. + // + // For more information about ARNs, go to Amazon Resource Names (ARNs) and + // AWS Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + Arn *string `min:"20" type:"string"` + + // The trust policy that grants permission to assume the role. + AssumeRolePolicyDocument *string `min:"1" type:"string"` + + // A list of managed policies attached to the role. These policies are the role's + // access (permissions) policies. + AttachedManagedPolicies []*AttachedPolicy `type:"list"` + + // The date and time, in ISO 8601 date-time format (http://www.iso.org/iso/iso8601), + // when the role was created. + CreateDate *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // Contains a list of instance profiles. + InstanceProfileList []*InstanceProfile `type:"list"` + + // The path to the role. For more information about paths, see IAM Identifiers + // (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) + // in the Using IAM guide. + Path *string `min:"1" type:"string"` + + // The stable and unique string identifying the role. For more information about + // IDs, see IAM Identifiers (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) + // in the Using IAM guide. + RoleId *string `min:"16" type:"string"` + + // The friendly name that identifies the role. + RoleName *string `min:"1" type:"string"` + + // A list of inline policies embedded in the role. These policies are the role's + // access (permissions) policies. + RolePolicyList []*PolicyDetail `type:"list"` +} + +// String returns the string representation +func (s RoleDetail) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RoleDetail) GoString() string { + return s.String() +} + +// Contains the list of SAML providers for this account. +type SAMLProviderListEntry struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the SAML provider. + Arn *string `min:"20" type:"string"` + + // The date and time when the SAML provider was created. + CreateDate *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The expiration date and time for the SAML provider. + ValidUntil *time.Time `type:"timestamp" timestampFormat:"iso8601"` +} + +// String returns the string representation +func (s SAMLProviderListEntry) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SAMLProviderListEntry) GoString() string { + return s.String() +} + +// Contains information about an SSH public key. +// +// This data type is used as a response element in the GetSSHPublicKey and +// UploadSSHPublicKey actions. +type SSHPublicKey struct { + _ struct{} `type:"structure"` + + // The MD5 message digest of the SSH public key. + Fingerprint *string `min:"48" type:"string" required:"true"` + + // The SSH public key. + SSHPublicKeyBody *string `min:"1" type:"string" required:"true"` + + // The unique identifier for the SSH public key. + SSHPublicKeyId *string `min:"20" type:"string" required:"true"` + + // The status of the SSH public key. Active means the key can be used for authentication + // with an AWS CodeCommit repository. Inactive means the key cannot be used. + Status *string `type:"string" required:"true" enum:"statusType"` + + // The date and time, in ISO 8601 date-time format (http://www.iso.org/iso/iso8601), + // when the SSH public key was uploaded. + UploadDate *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The name of the IAM user associated with the SSH public key. + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s SSHPublicKey) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SSHPublicKey) GoString() string { + return s.String() +} + +// Contains information about an SSH public key, without the key's body or fingerprint. +// +// This data type is used as a response element in the ListSSHPublicKeys action. +type SSHPublicKeyMetadata struct { + _ struct{} `type:"structure"` + + // The unique identifier for the SSH public key. + SSHPublicKeyId *string `min:"20" type:"string" required:"true"` + + // The status of the SSH public key. Active means the key can be used for authentication + // with an AWS CodeCommit repository. Inactive means the key cannot be used. + Status *string `type:"string" required:"true" enum:"statusType"` + + // The date and time, in ISO 8601 date-time format (http://www.iso.org/iso/iso8601), + // when the SSH public key was uploaded. + UploadDate *time.Time `type:"timestamp" timestampFormat:"iso8601" required:"true"` + + // The name of the IAM user associated with the SSH public key. + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s SSHPublicKeyMetadata) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SSHPublicKeyMetadata) GoString() string { + return s.String() +} + +// Contains information about a server certificate. +// +// This data type is used as a response element in the GetServerCertificate +// action. +type ServerCertificate struct { + _ struct{} `type:"structure"` + + // The contents of the public key certificate. + CertificateBody *string `min:"1" type:"string" required:"true"` + + // The contents of the public key certificate chain. + CertificateChain *string `min:"1" type:"string"` + + // The meta information of the server certificate, such as its name, path, ID, + // and ARN. + ServerCertificateMetadata *ServerCertificateMetadata `type:"structure" required:"true"` +} + +// String returns the string representation +func (s ServerCertificate) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ServerCertificate) GoString() string { + return s.String() +} + +// Contains information about a server certificate without its certificate body, +// certificate chain, and private key. +// +// This data type is used as a response element in the UploadServerCertificate +// and ListServerCertificates actions. +type ServerCertificateMetadata struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) specifying the server certificate. For more + // information about ARNs and how to use them in policies, see IAM Identifiers + // (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) + // in the Using IAM guide. + Arn *string `min:"20" type:"string" required:"true"` + + // The date on which the certificate is set to expire. + Expiration *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The path to the server certificate. For more information about paths, see + // IAM Identifiers (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) + // in the Using IAM guide. + Path *string `min:"1" type:"string" required:"true"` + + // The stable and unique string identifying the server certificate. For more + // information about IDs, see IAM Identifiers (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) + // in the Using IAM guide. + ServerCertificateId *string `min:"16" type:"string" required:"true"` + + // The name that identifies the server certificate. + ServerCertificateName *string `min:"1" type:"string" required:"true"` + + // The date when the server certificate was uploaded. + UploadDate *time.Time `type:"timestamp" timestampFormat:"iso8601"` +} + +// String returns the string representation +func (s ServerCertificateMetadata) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ServerCertificateMetadata) GoString() string { + return s.String() +} + +type SetDefaultPolicyVersionInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the IAM policy whose default version you + // want to set. + // + // For more information about ARNs, see Amazon Resource Names (ARNs) and AWS + // Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + PolicyArn *string `min:"20" type:"string" required:"true"` + + // The version of the policy to set as the default (operative) version. + // + // For more information about managed policy versions, see Versioning for Managed + // Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-versions.html) + // in the IAM User Guide. + VersionId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s SetDefaultPolicyVersionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetDefaultPolicyVersionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SetDefaultPolicyVersionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SetDefaultPolicyVersionInput"} + if s.PolicyArn == nil { + invalidParams.Add(request.NewErrParamRequired("PolicyArn")) + } + if s.PolicyArn != nil && len(*s.PolicyArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("PolicyArn", 20)) + } + if s.VersionId == nil { + invalidParams.Add(request.NewErrParamRequired("VersionId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type SetDefaultPolicyVersionOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s SetDefaultPolicyVersionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetDefaultPolicyVersionOutput) GoString() string { + return s.String() +} + +// Contains information about an X.509 signing certificate. +// +// This data type is used as a response element in the UploadSigningCertificate +// and ListSigningCertificates actions. +type SigningCertificate struct { + _ struct{} `type:"structure"` + + // The contents of the signing certificate. + CertificateBody *string `min:"1" type:"string" required:"true"` + + // The ID for the signing certificate. + CertificateId *string `min:"24" type:"string" required:"true"` + + // The status of the signing certificate. Active means the key is valid for + // API calls, while Inactive means it is not. + Status *string `type:"string" required:"true" enum:"statusType"` + + // The date when the signing certificate was uploaded. + UploadDate *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The name of the user the signing certificate is associated with. + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s SigningCertificate) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SigningCertificate) GoString() string { + return s.String() +} + +type SimulateCustomPolicyInput struct { + _ struct{} `type:"structure"` + + // A list of names of API actions to evaluate in the simulation. Each action + // is evaluated against each resource. Each action must include the service + // identifier, such as iam:CreateUser. + ActionNames []*string `type:"list" required:"true"` + + // The ARN of the IAM user that you want to use as the simulated caller of the + // APIs. CallerArn is required if you include a ResourcePolicy so that the policy's + // Principal element has a value to use in evaluating the policy. + // + // You can specify only the ARN of an IAM user. You cannot specify the ARN + // of an assumed role, federated user, or a service principal. + CallerArn *string `min:"1" type:"string"` + + // A list of context keys and corresponding values for the simulation to use. + // Whenever a context key is evaluated in one of the simulated IAM permission + // policies, the corresponding value is supplied. + ContextEntries []*ContextEntry `type:"list"` + + // Use this parameter only when paginating results and only after you receive + // a response indicating that the results are truncated. Set it to the value + // of the Marker element in the response that you received to indicate where + // the next call should start. + Marker *string `min:"1" type:"string"` + + // Use this only when paginating results to indicate the maximum number of items + // you want in the response. If additional items exist beyond the maximum you + // specify, the IsTruncated response element is true. + // + // This parameter is optional. If you do not include it, it defaults to 100. + // Note that IAM might return fewer results, even when there are more results + // available. In that case, the IsTruncated response element returns true and + // Marker contains a value to include in the subsequent call that tells the + // service where to continue from. + MaxItems *int64 `min:"1" type:"integer"` + + // A list of policy documents to include in the simulation. Each document is + // specified as a string containing the complete, valid JSON text of an IAM + // policy. Do not include any resource-based policies in this parameter. Any + // resource-based policy must be submitted with the ResourcePolicy parameter. + // The policies cannot be "scope-down" policies, such as you could include in + // a call to GetFederationToken (http://docs.aws.amazon.com/IAM/latest/APIReference/API_GetFederationToken.html) + // or one of the AssumeRole (http://docs.aws.amazon.com/IAM/latest/APIReference/API_AssumeRole.html) + // APIs to restrict what a user can do while using the temporary credentials. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of any printable ASCII character ranging + // from the space character (\u0020) through end of the ASCII character range + // (\u00FF). It also includes the special characters tab (\u0009), line feed + // (\u000A), and carriage return (\u000D). + PolicyInputList []*string `type:"list" required:"true"` + + // A list of ARNs of AWS resources to include in the simulation. If this parameter + // is not provided then the value defaults to * (all resources). Each API in + // the ActionNames parameter is evaluated for each resource in this list. The + // simulation determines the access result (allowed or denied) of each combination + // and reports it in the response. + // + // The simulation does not automatically retrieve policies for the specified + // resources. If you want to include a resource policy in the simulation, then + // you must include the policy as a string in the ResourcePolicy parameter. + // + // If you include a ResourcePolicy, then it must be applicable to all of the + // resources included in the simulation or you receive an invalid input error. + // + // For more information about ARNs, see Amazon Resource Names (ARNs) and AWS + // Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + ResourceArns []*string `type:"list"` + + // Specifies the type of simulation to run. Different APIs that support resource-based + // policies require different combinations of resources. By specifying the type + // of simulation to run, you enable the policy simulator to enforce the presence + // of the required resources to ensure reliable simulation results. If your + // simulation does not match one of the following scenarios, then you can omit + // this parameter. The following list shows each of the supported scenario values + // and the resources that you must define to run the simulation. + // + // Each of the EC2 scenarios requires that you specify instance, image, and + // security-group resources. If your scenario includes an EBS volume, then you + // must specify that volume as a resource. If the EC2 scenario includes VPC, + // then you must supply the network-interface resource. If it includes an IP + // subnet, then you must specify the subnet resource. For more information on + // the EC2 scenario options, see Supported Platforms (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-supported-platforms.html) + // in the AWS EC2 User Guide. + // + // EC2-Classic-InstanceStore + // + // instance, image, security-group + // + // EC2-Classic-EBS + // + // instance, image, security-group, volume + // + // EC2-VPC-InstanceStore + // + // instance, image, security-group, network-interface + // + // EC2-VPC-InstanceStore-Subnet + // + // instance, image, security-group, network-interface, subnet + // + // EC2-VPC-EBS + // + // instance, image, security-group, network-interface, volume + // + // EC2-VPC-EBS-Subnet + // + // instance, image, security-group, network-interface, subnet, volume + ResourceHandlingOption *string `min:"1" type:"string"` + + // An AWS account ID that specifies the owner of any simulated resource that + // does not identify its owner in the resource ARN, such as an S3 bucket or + // object. If ResourceOwner is specified, it is also used as the account owner + // of any ResourcePolicy included in the simulation. If the ResourceOwner parameter + // is not specified, then the owner of the resources and the resource policy + // defaults to the account of the identity provided in CallerArn. This parameter + // is required only if you specify a resource-based policy and account that + // owns the resource is different from the account that owns the simulated calling + // user CallerArn. + ResourceOwner *string `min:"1" type:"string"` + + // A resource-based policy to include in the simulation provided as a string. + // Each resource in the simulation is treated as if it had this policy attached. + // You can include only one resource-based policy in a simulation. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of any printable ASCII character ranging + // from the space character (\u0020) through end of the ASCII character range + // (\u00FF). It also includes the special characters tab (\u0009), line feed + // (\u000A), and carriage return (\u000D). + ResourcePolicy *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s SimulateCustomPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SimulateCustomPolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SimulateCustomPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SimulateCustomPolicyInput"} + if s.ActionNames == nil { + invalidParams.Add(request.NewErrParamRequired("ActionNames")) + } + if s.CallerArn != nil && len(*s.CallerArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("CallerArn", 1)) + } + if s.Marker != nil && len(*s.Marker) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Marker", 1)) + } + if s.MaxItems != nil && *s.MaxItems < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxItems", 1)) + } + if s.PolicyInputList == nil { + invalidParams.Add(request.NewErrParamRequired("PolicyInputList")) + } + if s.ResourceHandlingOption != nil && len(*s.ResourceHandlingOption) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceHandlingOption", 1)) + } + if s.ResourceOwner != nil && len(*s.ResourceOwner) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceOwner", 1)) + } + if s.ResourcePolicy != nil && len(*s.ResourcePolicy) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourcePolicy", 1)) + } + if s.ContextEntries != nil { + for i, v := range s.ContextEntries { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "ContextEntries", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the response to a successful SimulatePrincipalPolicy or SimulateCustomPolicy +// request. +type SimulatePolicyResponse struct { + _ struct{} `type:"structure"` + + // The results of the simulation. + EvaluationResults []*EvaluationResult `type:"list"` + + // A flag that indicates whether there are more items to return. If your results + // were truncated, you can make a subsequent pagination request using the Marker + // request parameter to retrieve more items. Note that IAM might return fewer + // than the MaxItems number of results even when there are more results available. + // We recommend that you check IsTruncated after every call to ensure that you + // receive all of your results. + IsTruncated *bool `type:"boolean"` + + // When IsTruncated is true, this element is present and contains the value + // to use for the Marker parameter in a subsequent pagination request. + Marker *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s SimulatePolicyResponse) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SimulatePolicyResponse) GoString() string { + return s.String() +} + +type SimulatePrincipalPolicyInput struct { + _ struct{} `type:"structure"` + + // A list of names of API actions to evaluate in the simulation. Each action + // is evaluated for each resource. Each action must include the service identifier, + // such as iam:CreateUser. + ActionNames []*string `type:"list" required:"true"` + + // The ARN of the IAM user that you want to specify as the simulated caller + // of the APIs. If you do not specify a CallerArn, it defaults to the ARN of + // the user that you specify in PolicySourceArn, if you specified a user. If + // you include both a PolicySourceArn (for example, arn:aws:iam::123456789012:user/David) + // and a CallerArn (for example, arn:aws:iam::123456789012:user/Bob), the result + // is that you simulate calling the APIs as Bob, as if Bob had David's policies. + // + // You can specify only the ARN of an IAM user. You cannot specify the ARN + // of an assumed role, federated user, or a service principal. + // + // CallerArn is required if you include a ResourcePolicy and the PolicySourceArn + // is not the ARN for an IAM user. This is required so that the resource-based + // policy's Principal element has a value to use in evaluating the policy. + // + // For more information about ARNs, see Amazon Resource Names (ARNs) and AWS + // Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + CallerArn *string `min:"1" type:"string"` + + // A list of context keys and corresponding values for the simulation to use. + // Whenever a context key is evaluated in one of the simulated IAM permission + // policies, the corresponding value is supplied. + ContextEntries []*ContextEntry `type:"list"` + + // Use this parameter only when paginating results and only after you receive + // a response indicating that the results are truncated. Set it to the value + // of the Marker element in the response that you received to indicate where + // the next call should start. + Marker *string `min:"1" type:"string"` + + // Use this only when paginating results to indicate the maximum number of items + // you want in the response. If additional items exist beyond the maximum you + // specify, the IsTruncated response element is true. + // + // This parameter is optional. If you do not include it, it defaults to 100. + // Note that IAM might return fewer results, even when there are more results + // available. In that case, the IsTruncated response element returns true and + // Marker contains a value to include in the subsequent call that tells the + // service where to continue from. + MaxItems *int64 `min:"1" type:"integer"` + + // An optional list of additional policy documents to include in the simulation. + // Each document is specified as a string containing the complete, valid JSON + // text of an IAM policy. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of any printable ASCII character ranging + // from the space character (\u0020) through end of the ASCII character range + // (\u00FF). It also includes the special characters tab (\u0009), line feed + // (\u000A), and carriage return (\u000D). + PolicyInputList []*string `type:"list"` + + // The Amazon Resource Name (ARN) of a user, group, or role whose policies you + // want to include in the simulation. If you specify a user, group, or role, + // the simulation includes all policies that are associated with that entity. + // If you specify a user, the simulation also includes all policies that are + // attached to any groups the user belongs to. + // + // For more information about ARNs, see Amazon Resource Names (ARNs) and AWS + // Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + PolicySourceArn *string `min:"20" type:"string" required:"true"` + + // A list of ARNs of AWS resources to include in the simulation. If this parameter + // is not provided then the value defaults to * (all resources). Each API in + // the ActionNames parameter is evaluated for each resource in this list. The + // simulation determines the access result (allowed or denied) of each combination + // and reports it in the response. + // + // The simulation does not automatically retrieve policies for the specified + // resources. If you want to include a resource policy in the simulation, then + // you must include the policy as a string in the ResourcePolicy parameter. + // + // For more information about ARNs, see Amazon Resource Names (ARNs) and AWS + // Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + ResourceArns []*string `type:"list"` + + // Specifies the type of simulation to run. Different APIs that support resource-based + // policies require different combinations of resources. By specifying the type + // of simulation to run, you enable the policy simulator to enforce the presence + // of the required resources to ensure reliable simulation results. If your + // simulation does not match one of the following scenarios, then you can omit + // this parameter. The following list shows each of the supported scenario values + // and the resources that you must define to run the simulation. + // + // Each of the EC2 scenarios requires that you specify instance, image, and + // security-group resources. If your scenario includes an EBS volume, then you + // must specify that volume as a resource. If the EC2 scenario includes VPC, + // then you must supply the network-interface resource. If it includes an IP + // subnet, then you must specify the subnet resource. For more information on + // the EC2 scenario options, see Supported Platforms (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-supported-platforms.html) + // in the AWS EC2 User Guide. + // + // EC2-Classic-InstanceStore + // + // instance, image, security-group + // + // EC2-Classic-EBS + // + // instance, image, security-group, volume + // + // EC2-VPC-InstanceStore + // + // instance, image, security-group, network-interface + // + // EC2-VPC-InstanceStore-Subnet + // + // instance, image, security-group, network-interface, subnet + // + // EC2-VPC-EBS + // + // instance, image, security-group, network-interface, volume + // + // EC2-VPC-EBS-Subnet + // + // instance, image, security-group, network-interface, subnet, volume + ResourceHandlingOption *string `min:"1" type:"string"` + + // An AWS account ID that specifies the owner of any simulated resource that + // does not identify its owner in the resource ARN, such as an S3 bucket or + // object. If ResourceOwner is specified, it is also used as the account owner + // of any ResourcePolicy included in the simulation. If the ResourceOwner parameter + // is not specified, then the owner of the resources and the resource policy + // defaults to the account of the identity provided in CallerArn. This parameter + // is required only if you specify a resource-based policy and account that + // owns the resource is different from the account that owns the simulated calling + // user CallerArn. + ResourceOwner *string `min:"1" type:"string"` + + // A resource-based policy to include in the simulation provided as a string. + // Each resource in the simulation is treated as if it had this policy attached. + // You can include only one resource-based policy in a simulation. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of any printable ASCII character ranging + // from the space character (\u0020) through end of the ASCII character range + // (\u00FF). It also includes the special characters tab (\u0009), line feed + // (\u000A), and carriage return (\u000D). + ResourcePolicy *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s SimulatePrincipalPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SimulatePrincipalPolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SimulatePrincipalPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SimulatePrincipalPolicyInput"} + if s.ActionNames == nil { + invalidParams.Add(request.NewErrParamRequired("ActionNames")) + } + if s.CallerArn != nil && len(*s.CallerArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("CallerArn", 1)) + } + if s.Marker != nil && len(*s.Marker) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Marker", 1)) + } + if s.MaxItems != nil && *s.MaxItems < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxItems", 1)) + } + if s.PolicySourceArn == nil { + invalidParams.Add(request.NewErrParamRequired("PolicySourceArn")) + } + if s.PolicySourceArn != nil && len(*s.PolicySourceArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("PolicySourceArn", 20)) + } + if s.ResourceHandlingOption != nil && len(*s.ResourceHandlingOption) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceHandlingOption", 1)) + } + if s.ResourceOwner != nil && len(*s.ResourceOwner) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceOwner", 1)) + } + if s.ResourcePolicy != nil && len(*s.ResourcePolicy) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourcePolicy", 1)) + } + if s.ContextEntries != nil { + for i, v := range s.ContextEntries { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "ContextEntries", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains a reference to a Statement element in a policy document that determines +// the result of the simulation. +// +// This data type is used by the MatchedStatements member of the EvaluationResult +// type. +type Statement struct { + _ struct{} `type:"structure"` + + // The row and column of the end of a Statement in an IAM policy. + EndPosition *Position `type:"structure"` + + // The identifier of the policy that was provided as an input. + SourcePolicyId *string `type:"string"` + + // The type of the policy. + SourcePolicyType *string `type:"string" enum:"PolicySourceType"` + + // The row and column of the beginning of the Statement in an IAM policy. + StartPosition *Position `type:"structure"` +} + +// String returns the string representation +func (s Statement) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Statement) GoString() string { + return s.String() +} + +type UpdateAccessKeyInput struct { + _ struct{} `type:"structure"` + + // The access key ID of the secret access key you want to update. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters that can consist of any upper or lowercased letter + // or digit. + AccessKeyId *string `min:"16" type:"string" required:"true"` + + // The status you want to assign to the secret access key. Active means the + // key can be used for API calls to AWS, while Inactive means the key cannot + // be used. + Status *string `type:"string" required:"true" enum:"statusType"` + + // The name of the user whose key you want to update. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- + UserName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s UpdateAccessKeyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateAccessKeyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateAccessKeyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateAccessKeyInput"} + if s.AccessKeyId == nil { + invalidParams.Add(request.NewErrParamRequired("AccessKeyId")) + } + if s.AccessKeyId != nil && len(*s.AccessKeyId) < 16 { + invalidParams.Add(request.NewErrParamMinLen("AccessKeyId", 16)) + } + if s.Status == nil { + invalidParams.Add(request.NewErrParamRequired("Status")) + } + if s.UserName != nil && len(*s.UserName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("UserName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type UpdateAccessKeyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UpdateAccessKeyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateAccessKeyOutput) GoString() string { + return s.String() +} + +type UpdateAccountPasswordPolicyInput struct { + _ struct{} `type:"structure"` + + // Allows all IAM users in your account to use the AWS Management Console to + // change their own passwords. For more information, see Letting IAM Users Change + // Their Own Passwords (http://docs.aws.amazon.com/IAM/latest/UserGuide/HowToPwdIAMUser.html) + // in the IAM User Guide. + // + // Default value: false + AllowUsersToChangePassword *bool `type:"boolean"` + + // Prevents IAM users from setting a new password after their password has expired. + // + // Default value: false + HardExpiry *bool `type:"boolean"` + + // The number of days that an IAM user password is valid. The default value + // of 0 means IAM user passwords never expire. + // + // Default value: 0 + MaxPasswordAge *int64 `min:"1" type:"integer"` + + // The minimum number of characters allowed in an IAM user password. + // + // Default value: 6 + MinimumPasswordLength *int64 `min:"6" type:"integer"` + + // Specifies the number of previous passwords that IAM users are prevented from + // reusing. The default value of 0 means IAM users are not prevented from reusing + // previous passwords. + // + // Default value: 0 + PasswordReusePrevention *int64 `min:"1" type:"integer"` + + // Specifies whether IAM user passwords must contain at least one lowercase + // character from the ISO basic Latin alphabet (a to z). + // + // Default value: false + RequireLowercaseCharacters *bool `type:"boolean"` + + // Specifies whether IAM user passwords must contain at least one numeric character + // (0 to 9). + // + // Default value: false + RequireNumbers *bool `type:"boolean"` + + // Specifies whether IAM user passwords must contain at least one of the following + // non-alphanumeric characters: + // + // ! @ # $ % ^ & * ( ) _ + - = [ ] { } | ' + // + // Default value: false + RequireSymbols *bool `type:"boolean"` + + // Specifies whether IAM user passwords must contain at least one uppercase + // character from the ISO basic Latin alphabet (A to Z). + // + // Default value: false + RequireUppercaseCharacters *bool `type:"boolean"` +} + +// String returns the string representation +func (s UpdateAccountPasswordPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateAccountPasswordPolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateAccountPasswordPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateAccountPasswordPolicyInput"} + if s.MaxPasswordAge != nil && *s.MaxPasswordAge < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxPasswordAge", 1)) + } + if s.MinimumPasswordLength != nil && *s.MinimumPasswordLength < 6 { + invalidParams.Add(request.NewErrParamMinValue("MinimumPasswordLength", 6)) + } + if s.PasswordReusePrevention != nil && *s.PasswordReusePrevention < 1 { + invalidParams.Add(request.NewErrParamMinValue("PasswordReusePrevention", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type UpdateAccountPasswordPolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UpdateAccountPasswordPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateAccountPasswordPolicyOutput) GoString() string { + return s.String() +} + +type UpdateAssumeRolePolicyInput struct { + _ struct{} `type:"structure"` + + // The policy that grants an entity permission to assume the role. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of any printable ASCII character ranging + // from the space character (\u0020) through end of the ASCII character range + // (\u00FF). It also includes the special characters tab (\u0009), line feed + // (\u000A), and carriage return (\u000D). + PolicyDocument *string `min:"1" type:"string" required:"true"` + + // The name of the role to update with the new policy. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- + RoleName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateAssumeRolePolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateAssumeRolePolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateAssumeRolePolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateAssumeRolePolicyInput"} + if s.PolicyDocument == nil { + invalidParams.Add(request.NewErrParamRequired("PolicyDocument")) + } + if s.PolicyDocument != nil && len(*s.PolicyDocument) < 1 { + invalidParams.Add(request.NewErrParamMinLen("PolicyDocument", 1)) + } + if s.RoleName == nil { + invalidParams.Add(request.NewErrParamRequired("RoleName")) + } + if s.RoleName != nil && len(*s.RoleName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RoleName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type UpdateAssumeRolePolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UpdateAssumeRolePolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateAssumeRolePolicyOutput) GoString() string { + return s.String() +} + +type UpdateGroupInput struct { + _ struct{} `type:"structure"` + + // Name of the IAM group to update. If you're changing the name of the group, + // this is the original name. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- + GroupName *string `min:"1" type:"string" required:"true"` + + // New name for the IAM group. Only include this if changing the group's name. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- + NewGroupName *string `min:"1" type:"string"` + + // New path for the IAM group. Only include this if changing the group's path. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of either a forward slash (/) by itself + // or a string that must begin and end with forward slashes, containing any + // ASCII character from the ! (\u0021) thru the DEL character (\u007F), including + // most punctuation characters, digits, and upper and lowercased letters. + NewPath *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s UpdateGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateGroupInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateGroupInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateGroupInput"} + if s.GroupName == nil { + invalidParams.Add(request.NewErrParamRequired("GroupName")) + } + if s.GroupName != nil && len(*s.GroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("GroupName", 1)) + } + if s.NewGroupName != nil && len(*s.NewGroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NewGroupName", 1)) + } + if s.NewPath != nil && len(*s.NewPath) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NewPath", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type UpdateGroupOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UpdateGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateGroupOutput) GoString() string { + return s.String() +} + +type UpdateLoginProfileInput struct { + _ struct{} `type:"structure"` + + // The new password for the specified IAM user. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of any printable ASCII character ranging + // from the space character (\u0020) through end of the ASCII character range + // (\u00FF). It also includes the special characters tab (\u0009), line feed + // (\u000A), and carriage return (\u000D). However, the format can be further + // restricted by the account administrator by setting a password policy on the + // AWS account. For more information, see UpdateAccountPasswordPolicy. + Password *string `min:"1" type:"string"` + + // Allows this new password to be used only once by requiring the specified + // IAM user to set a new password on next sign-in. + PasswordResetRequired *bool `type:"boolean"` + + // The name of the user whose password you want to update. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateLoginProfileInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateLoginProfileInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateLoginProfileInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateLoginProfileInput"} + if s.Password != nil && len(*s.Password) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Password", 1)) + } + if s.UserName == nil { + invalidParams.Add(request.NewErrParamRequired("UserName")) + } + if s.UserName != nil && len(*s.UserName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("UserName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type UpdateLoginProfileOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UpdateLoginProfileOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateLoginProfileOutput) GoString() string { + return s.String() +} + +type UpdateOpenIDConnectProviderThumbprintInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the IAM OIDC provider resource object for + // which you want to update the thumbprint. You can get a list of OIDC provider + // ARNs by using the ListOpenIDConnectProviders action. + // + // For more information about ARNs, see Amazon Resource Names (ARNs) and AWS + // Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + OpenIDConnectProviderArn *string `min:"20" type:"string" required:"true"` + + // A list of certificate thumbprints that are associated with the specified + // IAM OpenID Connect provider. For more information, see CreateOpenIDConnectProvider. + ThumbprintList []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s UpdateOpenIDConnectProviderThumbprintInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateOpenIDConnectProviderThumbprintInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateOpenIDConnectProviderThumbprintInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateOpenIDConnectProviderThumbprintInput"} + if s.OpenIDConnectProviderArn == nil { + invalidParams.Add(request.NewErrParamRequired("OpenIDConnectProviderArn")) + } + if s.OpenIDConnectProviderArn != nil && len(*s.OpenIDConnectProviderArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("OpenIDConnectProviderArn", 20)) + } + if s.ThumbprintList == nil { + invalidParams.Add(request.NewErrParamRequired("ThumbprintList")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type UpdateOpenIDConnectProviderThumbprintOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UpdateOpenIDConnectProviderThumbprintOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateOpenIDConnectProviderThumbprintOutput) GoString() string { + return s.String() +} + +type UpdateSAMLProviderInput struct { + _ struct{} `type:"structure"` + + // An XML document generated by an identity provider (IdP) that supports SAML + // 2.0. The document includes the issuer's name, expiration information, and + // keys that can be used to validate the SAML authentication response (assertions) + // that are received from the IdP. You must generate the metadata document using + // the identity management software that is used as your organization's IdP. + SAMLMetadataDocument *string `min:"1000" type:"string" required:"true"` + + // The Amazon Resource Name (ARN) of the SAML provider to update. + // + // For more information about ARNs, see Amazon Resource Names (ARNs) and AWS + // Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + SAMLProviderArn *string `min:"20" type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateSAMLProviderInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateSAMLProviderInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateSAMLProviderInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateSAMLProviderInput"} + if s.SAMLMetadataDocument == nil { + invalidParams.Add(request.NewErrParamRequired("SAMLMetadataDocument")) + } + if s.SAMLMetadataDocument != nil && len(*s.SAMLMetadataDocument) < 1000 { + invalidParams.Add(request.NewErrParamMinLen("SAMLMetadataDocument", 1000)) + } + if s.SAMLProviderArn == nil { + invalidParams.Add(request.NewErrParamRequired("SAMLProviderArn")) + } + if s.SAMLProviderArn != nil && len(*s.SAMLProviderArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("SAMLProviderArn", 20)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the response to a successful UpdateSAMLProvider request. +type UpdateSAMLProviderOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the SAML provider that was updated. + SAMLProviderArn *string `min:"20" type:"string"` +} + +// String returns the string representation +func (s UpdateSAMLProviderOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateSAMLProviderOutput) GoString() string { + return s.String() +} + +type UpdateSSHPublicKeyInput struct { + _ struct{} `type:"structure"` + + // The unique identifier for the SSH public key. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters that can consist of any upper or lowercased letter + // or digit. + SSHPublicKeyId *string `min:"20" type:"string" required:"true"` + + // The status to assign to the SSH public key. Active means the key can be used + // for authentication with an AWS CodeCommit repository. Inactive means the + // key cannot be used. + Status *string `type:"string" required:"true" enum:"statusType"` + + // The name of the IAM user associated with the SSH public key. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateSSHPublicKeyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateSSHPublicKeyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateSSHPublicKeyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateSSHPublicKeyInput"} + if s.SSHPublicKeyId == nil { + invalidParams.Add(request.NewErrParamRequired("SSHPublicKeyId")) + } + if s.SSHPublicKeyId != nil && len(*s.SSHPublicKeyId) < 20 { + invalidParams.Add(request.NewErrParamMinLen("SSHPublicKeyId", 20)) + } + if s.Status == nil { + invalidParams.Add(request.NewErrParamRequired("Status")) + } + if s.UserName == nil { + invalidParams.Add(request.NewErrParamRequired("UserName")) + } + if s.UserName != nil && len(*s.UserName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("UserName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type UpdateSSHPublicKeyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UpdateSSHPublicKeyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateSSHPublicKeyOutput) GoString() string { + return s.String() +} + +type UpdateServerCertificateInput struct { + _ struct{} `type:"structure"` + + // The new path for the server certificate. Include this only if you are updating + // the server certificate's path. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of either a forward slash (/) by itself + // or a string that must begin and end with forward slashes, containing any + // ASCII character from the ! (\u0021) thru the DEL character (\u007F), including + // most punctuation characters, digits, and upper and lowercased letters. + NewPath *string `min:"1" type:"string"` + + // The new name for the server certificate. Include this only if you are updating + // the server certificate's name. The name of the certificate cannot contain + // any spaces. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- + NewServerCertificateName *string `min:"1" type:"string"` + + // The name of the server certificate that you want to update. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- + ServerCertificateName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateServerCertificateInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateServerCertificateInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateServerCertificateInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateServerCertificateInput"} + if s.NewPath != nil && len(*s.NewPath) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NewPath", 1)) + } + if s.NewServerCertificateName != nil && len(*s.NewServerCertificateName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NewServerCertificateName", 1)) + } + if s.ServerCertificateName == nil { + invalidParams.Add(request.NewErrParamRequired("ServerCertificateName")) + } + if s.ServerCertificateName != nil && len(*s.ServerCertificateName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ServerCertificateName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type UpdateServerCertificateOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UpdateServerCertificateOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateServerCertificateOutput) GoString() string { + return s.String() +} + +type UpdateSigningCertificateInput struct { + _ struct{} `type:"structure"` + + // The ID of the signing certificate you want to update. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters that can consist of any upper or lowercased letter + // or digit. + CertificateId *string `min:"24" type:"string" required:"true"` + + // The status you want to assign to the certificate. Active means the certificate + // can be used for API calls to AWS, while Inactive means the certificate cannot + // be used. + Status *string `type:"string" required:"true" enum:"statusType"` + + // The name of the IAM user the signing certificate belongs to. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- + UserName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s UpdateSigningCertificateInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateSigningCertificateInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateSigningCertificateInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateSigningCertificateInput"} + if s.CertificateId == nil { + invalidParams.Add(request.NewErrParamRequired("CertificateId")) + } + if s.CertificateId != nil && len(*s.CertificateId) < 24 { + invalidParams.Add(request.NewErrParamMinLen("CertificateId", 24)) + } + if s.Status == nil { + invalidParams.Add(request.NewErrParamRequired("Status")) + } + if s.UserName != nil && len(*s.UserName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("UserName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type UpdateSigningCertificateOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UpdateSigningCertificateOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateSigningCertificateOutput) GoString() string { + return s.String() +} + +type UpdateUserInput struct { + _ struct{} `type:"structure"` + + // New path for the IAM user. Include this parameter only if you're changing + // the user's path. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of either a forward slash (/) by itself + // or a string that must begin and end with forward slashes, containing any + // ASCII character from the ! (\u0021) thru the DEL character (\u007F), including + // most punctuation characters, digits, and upper and lowercased letters. + NewPath *string `min:"1" type:"string"` + + // New name for the user. Include this parameter only if you're changing the + // user's name. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- + NewUserName *string `min:"1" type:"string"` + + // Name of the user to update. If you're changing the name of the user, this + // is the original user name. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateUserInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateUserInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateUserInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateUserInput"} + if s.NewPath != nil && len(*s.NewPath) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NewPath", 1)) + } + if s.NewUserName != nil && len(*s.NewUserName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NewUserName", 1)) + } + if s.UserName == nil { + invalidParams.Add(request.NewErrParamRequired("UserName")) + } + if s.UserName != nil && len(*s.UserName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("UserName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type UpdateUserOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UpdateUserOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateUserOutput) GoString() string { + return s.String() +} + +type UploadSSHPublicKeyInput struct { + _ struct{} `type:"structure"` + + // The SSH public key. The public key must be encoded in ssh-rsa format or PEM + // format. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of any printable ASCII character ranging + // from the space character (\u0020) through end of the ASCII character range + // (\u00FF). It also includes the special characters tab (\u0009), line feed + // (\u000A), and carriage return (\u000D). + SSHPublicKeyBody *string `min:"1" type:"string" required:"true"` + + // The name of the IAM user to associate the SSH public key with. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s UploadSSHPublicKeyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UploadSSHPublicKeyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UploadSSHPublicKeyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UploadSSHPublicKeyInput"} + if s.SSHPublicKeyBody == nil { + invalidParams.Add(request.NewErrParamRequired("SSHPublicKeyBody")) + } + if s.SSHPublicKeyBody != nil && len(*s.SSHPublicKeyBody) < 1 { + invalidParams.Add(request.NewErrParamMinLen("SSHPublicKeyBody", 1)) + } + if s.UserName == nil { + invalidParams.Add(request.NewErrParamRequired("UserName")) + } + if s.UserName != nil && len(*s.UserName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("UserName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the response to a successful UploadSSHPublicKey request. +type UploadSSHPublicKeyOutput struct { + _ struct{} `type:"structure"` + + // Contains information about the SSH public key. + SSHPublicKey *SSHPublicKey `type:"structure"` +} + +// String returns the string representation +func (s UploadSSHPublicKeyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UploadSSHPublicKeyOutput) GoString() string { + return s.String() +} + +type UploadServerCertificateInput struct { + _ struct{} `type:"structure"` + + // The contents of the public key certificate in PEM-encoded format. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of any printable ASCII character ranging + // from the space character (\u0020) through end of the ASCII character range + // (\u00FF). It also includes the special characters tab (\u0009), line feed + // (\u000A), and carriage return (\u000D). + CertificateBody *string `min:"1" type:"string" required:"true"` + + // The contents of the certificate chain. This is typically a concatenation + // of the PEM-encoded public key certificates of the chain. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of any printable ASCII character ranging + // from the space character (\u0020) through end of the ASCII character range + // (\u00FF). It also includes the special characters tab (\u0009), line feed + // (\u000A), and carriage return (\u000D). + CertificateChain *string `min:"1" type:"string"` + + // The path for the server certificate. For more information about paths, see + // IAM Identifiers (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) + // in the IAM User Guide. + // + // This parameter is optional. If it is not included, it defaults to a slash + // (/). The regex pattern (http://wikipedia.org/wiki/regex) for this parameter + // is a string of characters consisting of either a forward slash (/) by itself + // or a string that must begin and end with forward slashes, containing any + // ASCII character from the ! (\u0021) thru the DEL character (\u007F), including + // most punctuation characters, digits, and upper and lowercased letters. + // + // If you are uploading a server certificate specifically for use with Amazon + // CloudFront distributions, you must specify a path using the --path option. + // The path must begin with /cloudfront and must include a trailing slash (for + // example, /cloudfront/test/). + Path *string `min:"1" type:"string"` + + // The contents of the private key in PEM-encoded format. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of any printable ASCII character ranging + // from the space character (\u0020) through end of the ASCII character range + // (\u00FF). It also includes the special characters tab (\u0009), line feed + // (\u000A), and carriage return (\u000D). + PrivateKey *string `min:"1" type:"string" required:"true"` + + // The name for the server certificate. Do not include the path in this value. + // The name of the certificate cannot contain any spaces. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- + ServerCertificateName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s UploadServerCertificateInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UploadServerCertificateInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UploadServerCertificateInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UploadServerCertificateInput"} + if s.CertificateBody == nil { + invalidParams.Add(request.NewErrParamRequired("CertificateBody")) + } + if s.CertificateBody != nil && len(*s.CertificateBody) < 1 { + invalidParams.Add(request.NewErrParamMinLen("CertificateBody", 1)) + } + if s.CertificateChain != nil && len(*s.CertificateChain) < 1 { + invalidParams.Add(request.NewErrParamMinLen("CertificateChain", 1)) + } + if s.Path != nil && len(*s.Path) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Path", 1)) + } + if s.PrivateKey == nil { + invalidParams.Add(request.NewErrParamRequired("PrivateKey")) + } + if s.PrivateKey != nil && len(*s.PrivateKey) < 1 { + invalidParams.Add(request.NewErrParamMinLen("PrivateKey", 1)) + } + if s.ServerCertificateName == nil { + invalidParams.Add(request.NewErrParamRequired("ServerCertificateName")) + } + if s.ServerCertificateName != nil && len(*s.ServerCertificateName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ServerCertificateName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the response to a successful UploadServerCertificate request. +type UploadServerCertificateOutput struct { + _ struct{} `type:"structure"` + + // The meta information of the uploaded server certificate without its certificate + // body, certificate chain, and private key. + ServerCertificateMetadata *ServerCertificateMetadata `type:"structure"` +} + +// String returns the string representation +func (s UploadServerCertificateOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UploadServerCertificateOutput) GoString() string { + return s.String() +} + +type UploadSigningCertificateInput struct { + _ struct{} `type:"structure"` + + // The contents of the signing certificate. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of any printable ASCII character ranging + // from the space character (\u0020) through end of the ASCII character range + // (\u00FF). It also includes the special characters tab (\u0009), line feed + // (\u000A), and carriage return (\u000D). + CertificateBody *string `min:"1" type:"string" required:"true"` + + // The name of the user the signing certificate is for. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- + UserName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s UploadSigningCertificateInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UploadSigningCertificateInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UploadSigningCertificateInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UploadSigningCertificateInput"} + if s.CertificateBody == nil { + invalidParams.Add(request.NewErrParamRequired("CertificateBody")) + } + if s.CertificateBody != nil && len(*s.CertificateBody) < 1 { + invalidParams.Add(request.NewErrParamMinLen("CertificateBody", 1)) + } + if s.UserName != nil && len(*s.UserName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("UserName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the response to a successful UploadSigningCertificate request. +type UploadSigningCertificateOutput struct { + _ struct{} `type:"structure"` + + // Information about the certificate. + Certificate *SigningCertificate `type:"structure" required:"true"` +} + +// String returns the string representation +func (s UploadSigningCertificateOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UploadSigningCertificateOutput) GoString() string { + return s.String() +} + +// Contains information about an IAM user entity. +// +// This data type is used as a response element in the following actions: +// +// CreateUser +// +// GetUser +// +// ListUsers +type User struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) that identifies the user. For more information + // about ARNs and how to use ARNs in policies, see IAM Identifiers (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) + // in the Using IAM guide. + Arn *string `min:"20" type:"string" required:"true"` + + // The date and time, in ISO 8601 date-time format (http://www.iso.org/iso/iso8601), + // when the user was created. + CreateDate *time.Time `type:"timestamp" timestampFormat:"iso8601" required:"true"` + + // The date and time, in ISO 8601 date-time format (http://www.iso.org/iso/iso8601), + // when the user's password was last used to sign in to an AWS website. For + // a list of AWS websites that capture a user's last sign-in time, see the Credential + // Reports (http://docs.aws.amazon.com/IAM/latest/UserGuide/credential-reports.html) + // topic in the Using IAM guide. If a password is used more than once in a five-minute + // span, only the first use is returned in this field. This field is null (not + // present) when: + // + // The user does not have a password + // + // The password exists but has never been used (at least not since IAM started + // tracking this information on October 20th, 2014 + // + // there is no sign-in data associated with the user + // + // This value is returned only in the GetUser and ListUsers actions. + PasswordLastUsed *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The path to the user. For more information about paths, see IAM Identifiers + // (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) + // in the Using IAM guide. + Path *string `min:"1" type:"string" required:"true"` + + // The stable and unique string identifying the user. For more information about + // IDs, see IAM Identifiers (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) + // in the Using IAM guide. + UserId *string `min:"16" type:"string" required:"true"` + + // The friendly name identifying the user. + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s User) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s User) GoString() string { + return s.String() +} + +// Contains information about an IAM user, including all the user's policies +// and all the IAM groups the user is in. +// +// This data type is used as a response element in the GetAccountAuthorizationDetails +// action. +type UserDetail struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN). ARNs are unique identifiers for AWS resources. + // + // For more information about ARNs, go to Amazon Resource Names (ARNs) and + // AWS Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + Arn *string `min:"20" type:"string"` + + // A list of the managed policies attached to the user. + AttachedManagedPolicies []*AttachedPolicy `type:"list"` + + // The date and time, in ISO 8601 date-time format (http://www.iso.org/iso/iso8601), + // when the user was created. + CreateDate *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // A list of IAM groups that the user is in. + GroupList []*string `type:"list"` + + // The path to the user. For more information about paths, see IAM Identifiers + // (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) + // in the Using IAM guide. + Path *string `min:"1" type:"string"` + + // The stable and unique string identifying the user. For more information about + // IDs, see IAM Identifiers (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) + // in the Using IAM guide. + UserId *string `min:"16" type:"string"` + + // The friendly name identifying the user. + UserName *string `min:"1" type:"string"` + + // A list of the inline policies embedded in the user. + UserPolicyList []*PolicyDetail `type:"list"` +} + +// String returns the string representation +func (s UserDetail) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UserDetail) GoString() string { + return s.String() +} + +// Contains information about a virtual MFA device. +type VirtualMFADevice struct { + _ struct{} `type:"structure"` + + // The Base32 seed defined as specified in RFC3548 (http://www.ietf.org/rfc/rfc3548.txt). + // The Base32StringSeed is Base64-encoded. + // + // Base32StringSeed is automatically base64 encoded/decoded by the SDK. + Base32StringSeed []byte `type:"blob"` + + // The date and time on which the virtual MFA device was enabled. + EnableDate *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // A QR code PNG image that encodes otpauth://totp/$virtualMFADeviceName@$AccountName?secret=$Base32String + // where $virtualMFADeviceName is one of the create call arguments, AccountName + // is the user name if set (otherwise, the account ID otherwise), and Base32String + // is the seed in Base32 format. The Base32String value is Base64-encoded. + // + // QRCodePNG is automatically base64 encoded/decoded by the SDK. + QRCodePNG []byte `type:"blob"` + + // The serial number associated with VirtualMFADevice. + SerialNumber *string `min:"9" type:"string" required:"true"` + + // Contains information about an IAM user entity. + // + // This data type is used as a response element in the following actions: + // + // CreateUser + // + // GetUser + // + // ListUsers + User *User `type:"structure"` +} + +// String returns the string representation +func (s VirtualMFADevice) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VirtualMFADevice) GoString() string { + return s.String() +} + +const ( + // @enum ContextKeyTypeEnum + ContextKeyTypeEnumString = "string" + // @enum ContextKeyTypeEnum + ContextKeyTypeEnumStringList = "stringList" + // @enum ContextKeyTypeEnum + ContextKeyTypeEnumNumeric = "numeric" + // @enum ContextKeyTypeEnum + ContextKeyTypeEnumNumericList = "numericList" + // @enum ContextKeyTypeEnum + ContextKeyTypeEnumBoolean = "boolean" + // @enum ContextKeyTypeEnum + ContextKeyTypeEnumBooleanList = "booleanList" + // @enum ContextKeyTypeEnum + ContextKeyTypeEnumIp = "ip" + // @enum ContextKeyTypeEnum + ContextKeyTypeEnumIpList = "ipList" + // @enum ContextKeyTypeEnum + ContextKeyTypeEnumBinary = "binary" + // @enum ContextKeyTypeEnum + ContextKeyTypeEnumBinaryList = "binaryList" + // @enum ContextKeyTypeEnum + ContextKeyTypeEnumDate = "date" + // @enum ContextKeyTypeEnum + ContextKeyTypeEnumDateList = "dateList" +) + +const ( + // @enum EntityType + EntityTypeUser = "User" + // @enum EntityType + EntityTypeRole = "Role" + // @enum EntityType + EntityTypeGroup = "Group" + // @enum EntityType + EntityTypeLocalManagedPolicy = "LocalManagedPolicy" + // @enum EntityType + EntityTypeAwsmanagedPolicy = "AWSManagedPolicy" +) + +const ( + // @enum PolicyEvaluationDecisionType + PolicyEvaluationDecisionTypeAllowed = "allowed" + // @enum PolicyEvaluationDecisionType + PolicyEvaluationDecisionTypeExplicitDeny = "explicitDeny" + // @enum PolicyEvaluationDecisionType + PolicyEvaluationDecisionTypeImplicitDeny = "implicitDeny" +) + +const ( + // @enum PolicySourceType + PolicySourceTypeUser = "user" + // @enum PolicySourceType + PolicySourceTypeGroup = "group" + // @enum PolicySourceType + PolicySourceTypeRole = "role" + // @enum PolicySourceType + PolicySourceTypeAwsManaged = "aws-managed" + // @enum PolicySourceType + PolicySourceTypeUserManaged = "user-managed" + // @enum PolicySourceType + PolicySourceTypeResource = "resource" + // @enum PolicySourceType + PolicySourceTypeNone = "none" +) + +const ( + // @enum ReportFormatType + ReportFormatTypeTextCsv = "text/csv" +) + +const ( + // @enum ReportStateType + ReportStateTypeStarted = "STARTED" + // @enum ReportStateType + ReportStateTypeInprogress = "INPROGRESS" + // @enum ReportStateType + ReportStateTypeComplete = "COMPLETE" +) + +const ( + // @enum assignmentStatusType + AssignmentStatusTypeAssigned = "Assigned" + // @enum assignmentStatusType + AssignmentStatusTypeUnassigned = "Unassigned" + // @enum assignmentStatusType + AssignmentStatusTypeAny = "Any" +) + +const ( + // @enum encodingType + EncodingTypeSsh = "SSH" + // @enum encodingType + EncodingTypePem = "PEM" +) + +const ( + // @enum policyScopeType + PolicyScopeTypeAll = "All" + // @enum policyScopeType + PolicyScopeTypeAws = "AWS" + // @enum policyScopeType + PolicyScopeTypeLocal = "Local" +) + +const ( + // @enum statusType + StatusTypeActive = "Active" + // @enum statusType + StatusTypeInactive = "Inactive" +) + +const ( + // @enum summaryKeyType + SummaryKeyTypeUsers = "Users" + // @enum summaryKeyType + SummaryKeyTypeUsersQuota = "UsersQuota" + // @enum summaryKeyType + SummaryKeyTypeGroups = "Groups" + // @enum summaryKeyType + SummaryKeyTypeGroupsQuota = "GroupsQuota" + // @enum summaryKeyType + SummaryKeyTypeServerCertificates = "ServerCertificates" + // @enum summaryKeyType + SummaryKeyTypeServerCertificatesQuota = "ServerCertificatesQuota" + // @enum summaryKeyType + SummaryKeyTypeUserPolicySizeQuota = "UserPolicySizeQuota" + // @enum summaryKeyType + SummaryKeyTypeGroupPolicySizeQuota = "GroupPolicySizeQuota" + // @enum summaryKeyType + SummaryKeyTypeGroupsPerUserQuota = "GroupsPerUserQuota" + // @enum summaryKeyType + SummaryKeyTypeSigningCertificatesPerUserQuota = "SigningCertificatesPerUserQuota" + // @enum summaryKeyType + SummaryKeyTypeAccessKeysPerUserQuota = "AccessKeysPerUserQuota" + // @enum summaryKeyType + SummaryKeyTypeMfadevices = "MFADevices" + // @enum summaryKeyType + SummaryKeyTypeMfadevicesInUse = "MFADevicesInUse" + // @enum summaryKeyType + SummaryKeyTypeAccountMfaenabled = "AccountMFAEnabled" + // @enum summaryKeyType + SummaryKeyTypeAccountAccessKeysPresent = "AccountAccessKeysPresent" + // @enum summaryKeyType + SummaryKeyTypeAccountSigningCertificatesPresent = "AccountSigningCertificatesPresent" + // @enum summaryKeyType + SummaryKeyTypeAttachedPoliciesPerGroupQuota = "AttachedPoliciesPerGroupQuota" + // @enum summaryKeyType + SummaryKeyTypeAttachedPoliciesPerRoleQuota = "AttachedPoliciesPerRoleQuota" + // @enum summaryKeyType + SummaryKeyTypeAttachedPoliciesPerUserQuota = "AttachedPoliciesPerUserQuota" + // @enum summaryKeyType + SummaryKeyTypePolicies = "Policies" + // @enum summaryKeyType + SummaryKeyTypePoliciesQuota = "PoliciesQuota" + // @enum summaryKeyType + SummaryKeyTypePolicySizeQuota = "PolicySizeQuota" + // @enum summaryKeyType + SummaryKeyTypePolicyVersionsInUse = "PolicyVersionsInUse" + // @enum summaryKeyType + SummaryKeyTypePolicyVersionsInUseQuota = "PolicyVersionsInUseQuota" + // @enum summaryKeyType + SummaryKeyTypeVersionsPerPolicyQuota = "VersionsPerPolicyQuota" +) diff --git a/vendor/github.com/aws/aws-sdk-go/service/iam/examples_test.go b/vendor/github.com/aws/aws-sdk-go/service/iam/examples_test.go new file mode 100644 index 000000000..1aebec8cd --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/iam/examples_test.go @@ -0,0 +1,2366 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package iam_test + +import ( + "bytes" + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/iam" +) + +var _ time.Duration +var _ bytes.Buffer + +func ExampleIAM_AddClientIDToOpenIDConnectProvider() { + svc := iam.New(session.New()) + + params := &iam.AddClientIDToOpenIDConnectProviderInput{ + ClientID: aws.String("clientIDType"), // Required + OpenIDConnectProviderArn: aws.String("arnType"), // Required + } + resp, err := svc.AddClientIDToOpenIDConnectProvider(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_AddRoleToInstanceProfile() { + svc := iam.New(session.New()) + + params := &iam.AddRoleToInstanceProfileInput{ + InstanceProfileName: aws.String("instanceProfileNameType"), // Required + RoleName: aws.String("roleNameType"), // Required + } + resp, err := svc.AddRoleToInstanceProfile(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_AddUserToGroup() { + svc := iam.New(session.New()) + + params := &iam.AddUserToGroupInput{ + GroupName: aws.String("groupNameType"), // Required + UserName: aws.String("existingUserNameType"), // Required + } + resp, err := svc.AddUserToGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_AttachGroupPolicy() { + svc := iam.New(session.New()) + + params := &iam.AttachGroupPolicyInput{ + GroupName: aws.String("groupNameType"), // Required + PolicyArn: aws.String("arnType"), // Required + } + resp, err := svc.AttachGroupPolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_AttachRolePolicy() { + svc := iam.New(session.New()) + + params := &iam.AttachRolePolicyInput{ + PolicyArn: aws.String("arnType"), // Required + RoleName: aws.String("roleNameType"), // Required + } + resp, err := svc.AttachRolePolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_AttachUserPolicy() { + svc := iam.New(session.New()) + + params := &iam.AttachUserPolicyInput{ + PolicyArn: aws.String("arnType"), // Required + UserName: aws.String("userNameType"), // Required + } + resp, err := svc.AttachUserPolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_ChangePassword() { + svc := iam.New(session.New()) + + params := &iam.ChangePasswordInput{ + NewPassword: aws.String("passwordType"), // Required + OldPassword: aws.String("passwordType"), // Required + } + resp, err := svc.ChangePassword(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_CreateAccessKey() { + svc := iam.New(session.New()) + + params := &iam.CreateAccessKeyInput{ + UserName: aws.String("existingUserNameType"), + } + resp, err := svc.CreateAccessKey(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_CreateAccountAlias() { + svc := iam.New(session.New()) + + params := &iam.CreateAccountAliasInput{ + AccountAlias: aws.String("accountAliasType"), // Required + } + resp, err := svc.CreateAccountAlias(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_CreateGroup() { + svc := iam.New(session.New()) + + params := &iam.CreateGroupInput{ + GroupName: aws.String("groupNameType"), // Required + Path: aws.String("pathType"), + } + resp, err := svc.CreateGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_CreateInstanceProfile() { + svc := iam.New(session.New()) + + params := &iam.CreateInstanceProfileInput{ + InstanceProfileName: aws.String("instanceProfileNameType"), // Required + Path: aws.String("pathType"), + } + resp, err := svc.CreateInstanceProfile(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_CreateLoginProfile() { + svc := iam.New(session.New()) + + params := &iam.CreateLoginProfileInput{ + Password: aws.String("passwordType"), // Required + UserName: aws.String("userNameType"), // Required + PasswordResetRequired: aws.Bool(true), + } + resp, err := svc.CreateLoginProfile(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_CreateOpenIDConnectProvider() { + svc := iam.New(session.New()) + + params := &iam.CreateOpenIDConnectProviderInput{ + ThumbprintList: []*string{ // Required + aws.String("thumbprintType"), // Required + // More values... + }, + Url: aws.String("OpenIDConnectProviderUrlType"), // Required + ClientIDList: []*string{ + aws.String("clientIDType"), // Required + // More values... + }, + } + resp, err := svc.CreateOpenIDConnectProvider(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_CreatePolicy() { + svc := iam.New(session.New()) + + params := &iam.CreatePolicyInput{ + PolicyDocument: aws.String("policyDocumentType"), // Required + PolicyName: aws.String("policyNameType"), // Required + Description: aws.String("policyDescriptionType"), + Path: aws.String("policyPathType"), + } + resp, err := svc.CreatePolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_CreatePolicyVersion() { + svc := iam.New(session.New()) + + params := &iam.CreatePolicyVersionInput{ + PolicyArn: aws.String("arnType"), // Required + PolicyDocument: aws.String("policyDocumentType"), // Required + SetAsDefault: aws.Bool(true), + } + resp, err := svc.CreatePolicyVersion(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_CreateRole() { + svc := iam.New(session.New()) + + params := &iam.CreateRoleInput{ + AssumeRolePolicyDocument: aws.String("policyDocumentType"), // Required + RoleName: aws.String("roleNameType"), // Required + Path: aws.String("pathType"), + } + resp, err := svc.CreateRole(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_CreateSAMLProvider() { + svc := iam.New(session.New()) + + params := &iam.CreateSAMLProviderInput{ + Name: aws.String("SAMLProviderNameType"), // Required + SAMLMetadataDocument: aws.String("SAMLMetadataDocumentType"), // Required + } + resp, err := svc.CreateSAMLProvider(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_CreateUser() { + svc := iam.New(session.New()) + + params := &iam.CreateUserInput{ + UserName: aws.String("userNameType"), // Required + Path: aws.String("pathType"), + } + resp, err := svc.CreateUser(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_CreateVirtualMFADevice() { + svc := iam.New(session.New()) + + params := &iam.CreateVirtualMFADeviceInput{ + VirtualMFADeviceName: aws.String("virtualMFADeviceName"), // Required + Path: aws.String("pathType"), + } + resp, err := svc.CreateVirtualMFADevice(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_DeactivateMFADevice() { + svc := iam.New(session.New()) + + params := &iam.DeactivateMFADeviceInput{ + SerialNumber: aws.String("serialNumberType"), // Required + UserName: aws.String("existingUserNameType"), // Required + } + resp, err := svc.DeactivateMFADevice(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_DeleteAccessKey() { + svc := iam.New(session.New()) + + params := &iam.DeleteAccessKeyInput{ + AccessKeyId: aws.String("accessKeyIdType"), // Required + UserName: aws.String("existingUserNameType"), + } + resp, err := svc.DeleteAccessKey(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_DeleteAccountAlias() { + svc := iam.New(session.New()) + + params := &iam.DeleteAccountAliasInput{ + AccountAlias: aws.String("accountAliasType"), // Required + } + resp, err := svc.DeleteAccountAlias(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_DeleteAccountPasswordPolicy() { + svc := iam.New(session.New()) + + var params *iam.DeleteAccountPasswordPolicyInput + resp, err := svc.DeleteAccountPasswordPolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_DeleteGroup() { + svc := iam.New(session.New()) + + params := &iam.DeleteGroupInput{ + GroupName: aws.String("groupNameType"), // Required + } + resp, err := svc.DeleteGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_DeleteGroupPolicy() { + svc := iam.New(session.New()) + + params := &iam.DeleteGroupPolicyInput{ + GroupName: aws.String("groupNameType"), // Required + PolicyName: aws.String("policyNameType"), // Required + } + resp, err := svc.DeleteGroupPolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_DeleteInstanceProfile() { + svc := iam.New(session.New()) + + params := &iam.DeleteInstanceProfileInput{ + InstanceProfileName: aws.String("instanceProfileNameType"), // Required + } + resp, err := svc.DeleteInstanceProfile(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_DeleteLoginProfile() { + svc := iam.New(session.New()) + + params := &iam.DeleteLoginProfileInput{ + UserName: aws.String("userNameType"), // Required + } + resp, err := svc.DeleteLoginProfile(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_DeleteOpenIDConnectProvider() { + svc := iam.New(session.New()) + + params := &iam.DeleteOpenIDConnectProviderInput{ + OpenIDConnectProviderArn: aws.String("arnType"), // Required + } + resp, err := svc.DeleteOpenIDConnectProvider(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_DeletePolicy() { + svc := iam.New(session.New()) + + params := &iam.DeletePolicyInput{ + PolicyArn: aws.String("arnType"), // Required + } + resp, err := svc.DeletePolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_DeletePolicyVersion() { + svc := iam.New(session.New()) + + params := &iam.DeletePolicyVersionInput{ + PolicyArn: aws.String("arnType"), // Required + VersionId: aws.String("policyVersionIdType"), // Required + } + resp, err := svc.DeletePolicyVersion(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_DeleteRole() { + svc := iam.New(session.New()) + + params := &iam.DeleteRoleInput{ + RoleName: aws.String("roleNameType"), // Required + } + resp, err := svc.DeleteRole(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_DeleteRolePolicy() { + svc := iam.New(session.New()) + + params := &iam.DeleteRolePolicyInput{ + PolicyName: aws.String("policyNameType"), // Required + RoleName: aws.String("roleNameType"), // Required + } + resp, err := svc.DeleteRolePolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_DeleteSAMLProvider() { + svc := iam.New(session.New()) + + params := &iam.DeleteSAMLProviderInput{ + SAMLProviderArn: aws.String("arnType"), // Required + } + resp, err := svc.DeleteSAMLProvider(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_DeleteSSHPublicKey() { + svc := iam.New(session.New()) + + params := &iam.DeleteSSHPublicKeyInput{ + SSHPublicKeyId: aws.String("publicKeyIdType"), // Required + UserName: aws.String("userNameType"), // Required + } + resp, err := svc.DeleteSSHPublicKey(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_DeleteServerCertificate() { + svc := iam.New(session.New()) + + params := &iam.DeleteServerCertificateInput{ + ServerCertificateName: aws.String("serverCertificateNameType"), // Required + } + resp, err := svc.DeleteServerCertificate(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_DeleteSigningCertificate() { + svc := iam.New(session.New()) + + params := &iam.DeleteSigningCertificateInput{ + CertificateId: aws.String("certificateIdType"), // Required + UserName: aws.String("existingUserNameType"), + } + resp, err := svc.DeleteSigningCertificate(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_DeleteUser() { + svc := iam.New(session.New()) + + params := &iam.DeleteUserInput{ + UserName: aws.String("existingUserNameType"), // Required + } + resp, err := svc.DeleteUser(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_DeleteUserPolicy() { + svc := iam.New(session.New()) + + params := &iam.DeleteUserPolicyInput{ + PolicyName: aws.String("policyNameType"), // Required + UserName: aws.String("existingUserNameType"), // Required + } + resp, err := svc.DeleteUserPolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_DeleteVirtualMFADevice() { + svc := iam.New(session.New()) + + params := &iam.DeleteVirtualMFADeviceInput{ + SerialNumber: aws.String("serialNumberType"), // Required + } + resp, err := svc.DeleteVirtualMFADevice(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_DetachGroupPolicy() { + svc := iam.New(session.New()) + + params := &iam.DetachGroupPolicyInput{ + GroupName: aws.String("groupNameType"), // Required + PolicyArn: aws.String("arnType"), // Required + } + resp, err := svc.DetachGroupPolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_DetachRolePolicy() { + svc := iam.New(session.New()) + + params := &iam.DetachRolePolicyInput{ + PolicyArn: aws.String("arnType"), // Required + RoleName: aws.String("roleNameType"), // Required + } + resp, err := svc.DetachRolePolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_DetachUserPolicy() { + svc := iam.New(session.New()) + + params := &iam.DetachUserPolicyInput{ + PolicyArn: aws.String("arnType"), // Required + UserName: aws.String("userNameType"), // Required + } + resp, err := svc.DetachUserPolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_EnableMFADevice() { + svc := iam.New(session.New()) + + params := &iam.EnableMFADeviceInput{ + AuthenticationCode1: aws.String("authenticationCodeType"), // Required + AuthenticationCode2: aws.String("authenticationCodeType"), // Required + SerialNumber: aws.String("serialNumberType"), // Required + UserName: aws.String("existingUserNameType"), // Required + } + resp, err := svc.EnableMFADevice(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_GenerateCredentialReport() { + svc := iam.New(session.New()) + + var params *iam.GenerateCredentialReportInput + resp, err := svc.GenerateCredentialReport(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_GetAccessKeyLastUsed() { + svc := iam.New(session.New()) + + params := &iam.GetAccessKeyLastUsedInput{ + AccessKeyId: aws.String("accessKeyIdType"), // Required + } + resp, err := svc.GetAccessKeyLastUsed(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_GetAccountAuthorizationDetails() { + svc := iam.New(session.New()) + + params := &iam.GetAccountAuthorizationDetailsInput{ + Filter: []*string{ + aws.String("EntityType"), // Required + // More values... + }, + Marker: aws.String("markerType"), + MaxItems: aws.Int64(1), + } + resp, err := svc.GetAccountAuthorizationDetails(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_GetAccountPasswordPolicy() { + svc := iam.New(session.New()) + + var params *iam.GetAccountPasswordPolicyInput + resp, err := svc.GetAccountPasswordPolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_GetAccountSummary() { + svc := iam.New(session.New()) + + var params *iam.GetAccountSummaryInput + resp, err := svc.GetAccountSummary(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_GetContextKeysForCustomPolicy() { + svc := iam.New(session.New()) + + params := &iam.GetContextKeysForCustomPolicyInput{ + PolicyInputList: []*string{ // Required + aws.String("policyDocumentType"), // Required + // More values... + }, + } + resp, err := svc.GetContextKeysForCustomPolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_GetContextKeysForPrincipalPolicy() { + svc := iam.New(session.New()) + + params := &iam.GetContextKeysForPrincipalPolicyInput{ + PolicySourceArn: aws.String("arnType"), // Required + PolicyInputList: []*string{ + aws.String("policyDocumentType"), // Required + // More values... + }, + } + resp, err := svc.GetContextKeysForPrincipalPolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_GetCredentialReport() { + svc := iam.New(session.New()) + + var params *iam.GetCredentialReportInput + resp, err := svc.GetCredentialReport(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_GetGroup() { + svc := iam.New(session.New()) + + params := &iam.GetGroupInput{ + GroupName: aws.String("groupNameType"), // Required + Marker: aws.String("markerType"), + MaxItems: aws.Int64(1), + } + resp, err := svc.GetGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_GetGroupPolicy() { + svc := iam.New(session.New()) + + params := &iam.GetGroupPolicyInput{ + GroupName: aws.String("groupNameType"), // Required + PolicyName: aws.String("policyNameType"), // Required + } + resp, err := svc.GetGroupPolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_GetInstanceProfile() { + svc := iam.New(session.New()) + + params := &iam.GetInstanceProfileInput{ + InstanceProfileName: aws.String("instanceProfileNameType"), // Required + } + resp, err := svc.GetInstanceProfile(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_GetLoginProfile() { + svc := iam.New(session.New()) + + params := &iam.GetLoginProfileInput{ + UserName: aws.String("userNameType"), // Required + } + resp, err := svc.GetLoginProfile(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_GetOpenIDConnectProvider() { + svc := iam.New(session.New()) + + params := &iam.GetOpenIDConnectProviderInput{ + OpenIDConnectProviderArn: aws.String("arnType"), // Required + } + resp, err := svc.GetOpenIDConnectProvider(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_GetPolicy() { + svc := iam.New(session.New()) + + params := &iam.GetPolicyInput{ + PolicyArn: aws.String("arnType"), // Required + } + resp, err := svc.GetPolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_GetPolicyVersion() { + svc := iam.New(session.New()) + + params := &iam.GetPolicyVersionInput{ + PolicyArn: aws.String("arnType"), // Required + VersionId: aws.String("policyVersionIdType"), // Required + } + resp, err := svc.GetPolicyVersion(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_GetRole() { + svc := iam.New(session.New()) + + params := &iam.GetRoleInput{ + RoleName: aws.String("roleNameType"), // Required + } + resp, err := svc.GetRole(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_GetRolePolicy() { + svc := iam.New(session.New()) + + params := &iam.GetRolePolicyInput{ + PolicyName: aws.String("policyNameType"), // Required + RoleName: aws.String("roleNameType"), // Required + } + resp, err := svc.GetRolePolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_GetSAMLProvider() { + svc := iam.New(session.New()) + + params := &iam.GetSAMLProviderInput{ + SAMLProviderArn: aws.String("arnType"), // Required + } + resp, err := svc.GetSAMLProvider(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_GetSSHPublicKey() { + svc := iam.New(session.New()) + + params := &iam.GetSSHPublicKeyInput{ + Encoding: aws.String("encodingType"), // Required + SSHPublicKeyId: aws.String("publicKeyIdType"), // Required + UserName: aws.String("userNameType"), // Required + } + resp, err := svc.GetSSHPublicKey(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_GetServerCertificate() { + svc := iam.New(session.New()) + + params := &iam.GetServerCertificateInput{ + ServerCertificateName: aws.String("serverCertificateNameType"), // Required + } + resp, err := svc.GetServerCertificate(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_GetUser() { + svc := iam.New(session.New()) + + params := &iam.GetUserInput{ + UserName: aws.String("existingUserNameType"), + } + resp, err := svc.GetUser(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_GetUserPolicy() { + svc := iam.New(session.New()) + + params := &iam.GetUserPolicyInput{ + PolicyName: aws.String("policyNameType"), // Required + UserName: aws.String("existingUserNameType"), // Required + } + resp, err := svc.GetUserPolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_ListAccessKeys() { + svc := iam.New(session.New()) + + params := &iam.ListAccessKeysInput{ + Marker: aws.String("markerType"), + MaxItems: aws.Int64(1), + UserName: aws.String("existingUserNameType"), + } + resp, err := svc.ListAccessKeys(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_ListAccountAliases() { + svc := iam.New(session.New()) + + params := &iam.ListAccountAliasesInput{ + Marker: aws.String("markerType"), + MaxItems: aws.Int64(1), + } + resp, err := svc.ListAccountAliases(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_ListAttachedGroupPolicies() { + svc := iam.New(session.New()) + + params := &iam.ListAttachedGroupPoliciesInput{ + GroupName: aws.String("groupNameType"), // Required + Marker: aws.String("markerType"), + MaxItems: aws.Int64(1), + PathPrefix: aws.String("policyPathType"), + } + resp, err := svc.ListAttachedGroupPolicies(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_ListAttachedRolePolicies() { + svc := iam.New(session.New()) + + params := &iam.ListAttachedRolePoliciesInput{ + RoleName: aws.String("roleNameType"), // Required + Marker: aws.String("markerType"), + MaxItems: aws.Int64(1), + PathPrefix: aws.String("policyPathType"), + } + resp, err := svc.ListAttachedRolePolicies(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_ListAttachedUserPolicies() { + svc := iam.New(session.New()) + + params := &iam.ListAttachedUserPoliciesInput{ + UserName: aws.String("userNameType"), // Required + Marker: aws.String("markerType"), + MaxItems: aws.Int64(1), + PathPrefix: aws.String("policyPathType"), + } + resp, err := svc.ListAttachedUserPolicies(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_ListEntitiesForPolicy() { + svc := iam.New(session.New()) + + params := &iam.ListEntitiesForPolicyInput{ + PolicyArn: aws.String("arnType"), // Required + EntityFilter: aws.String("EntityType"), + Marker: aws.String("markerType"), + MaxItems: aws.Int64(1), + PathPrefix: aws.String("pathType"), + } + resp, err := svc.ListEntitiesForPolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_ListGroupPolicies() { + svc := iam.New(session.New()) + + params := &iam.ListGroupPoliciesInput{ + GroupName: aws.String("groupNameType"), // Required + Marker: aws.String("markerType"), + MaxItems: aws.Int64(1), + } + resp, err := svc.ListGroupPolicies(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_ListGroups() { + svc := iam.New(session.New()) + + params := &iam.ListGroupsInput{ + Marker: aws.String("markerType"), + MaxItems: aws.Int64(1), + PathPrefix: aws.String("pathPrefixType"), + } + resp, err := svc.ListGroups(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_ListGroupsForUser() { + svc := iam.New(session.New()) + + params := &iam.ListGroupsForUserInput{ + UserName: aws.String("existingUserNameType"), // Required + Marker: aws.String("markerType"), + MaxItems: aws.Int64(1), + } + resp, err := svc.ListGroupsForUser(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_ListInstanceProfiles() { + svc := iam.New(session.New()) + + params := &iam.ListInstanceProfilesInput{ + Marker: aws.String("markerType"), + MaxItems: aws.Int64(1), + PathPrefix: aws.String("pathPrefixType"), + } + resp, err := svc.ListInstanceProfiles(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_ListInstanceProfilesForRole() { + svc := iam.New(session.New()) + + params := &iam.ListInstanceProfilesForRoleInput{ + RoleName: aws.String("roleNameType"), // Required + Marker: aws.String("markerType"), + MaxItems: aws.Int64(1), + } + resp, err := svc.ListInstanceProfilesForRole(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_ListMFADevices() { + svc := iam.New(session.New()) + + params := &iam.ListMFADevicesInput{ + Marker: aws.String("markerType"), + MaxItems: aws.Int64(1), + UserName: aws.String("existingUserNameType"), + } + resp, err := svc.ListMFADevices(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_ListOpenIDConnectProviders() { + svc := iam.New(session.New()) + + var params *iam.ListOpenIDConnectProvidersInput + resp, err := svc.ListOpenIDConnectProviders(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_ListPolicies() { + svc := iam.New(session.New()) + + params := &iam.ListPoliciesInput{ + Marker: aws.String("markerType"), + MaxItems: aws.Int64(1), + OnlyAttached: aws.Bool(true), + PathPrefix: aws.String("policyPathType"), + Scope: aws.String("policyScopeType"), + } + resp, err := svc.ListPolicies(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_ListPolicyVersions() { + svc := iam.New(session.New()) + + params := &iam.ListPolicyVersionsInput{ + PolicyArn: aws.String("arnType"), // Required + Marker: aws.String("markerType"), + MaxItems: aws.Int64(1), + } + resp, err := svc.ListPolicyVersions(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_ListRolePolicies() { + svc := iam.New(session.New()) + + params := &iam.ListRolePoliciesInput{ + RoleName: aws.String("roleNameType"), // Required + Marker: aws.String("markerType"), + MaxItems: aws.Int64(1), + } + resp, err := svc.ListRolePolicies(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_ListRoles() { + svc := iam.New(session.New()) + + params := &iam.ListRolesInput{ + Marker: aws.String("markerType"), + MaxItems: aws.Int64(1), + PathPrefix: aws.String("pathPrefixType"), + } + resp, err := svc.ListRoles(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_ListSAMLProviders() { + svc := iam.New(session.New()) + + var params *iam.ListSAMLProvidersInput + resp, err := svc.ListSAMLProviders(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_ListSSHPublicKeys() { + svc := iam.New(session.New()) + + params := &iam.ListSSHPublicKeysInput{ + Marker: aws.String("markerType"), + MaxItems: aws.Int64(1), + UserName: aws.String("userNameType"), + } + resp, err := svc.ListSSHPublicKeys(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_ListServerCertificates() { + svc := iam.New(session.New()) + + params := &iam.ListServerCertificatesInput{ + Marker: aws.String("markerType"), + MaxItems: aws.Int64(1), + PathPrefix: aws.String("pathPrefixType"), + } + resp, err := svc.ListServerCertificates(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_ListSigningCertificates() { + svc := iam.New(session.New()) + + params := &iam.ListSigningCertificatesInput{ + Marker: aws.String("markerType"), + MaxItems: aws.Int64(1), + UserName: aws.String("existingUserNameType"), + } + resp, err := svc.ListSigningCertificates(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_ListUserPolicies() { + svc := iam.New(session.New()) + + params := &iam.ListUserPoliciesInput{ + UserName: aws.String("existingUserNameType"), // Required + Marker: aws.String("markerType"), + MaxItems: aws.Int64(1), + } + resp, err := svc.ListUserPolicies(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_ListUsers() { + svc := iam.New(session.New()) + + params := &iam.ListUsersInput{ + Marker: aws.String("markerType"), + MaxItems: aws.Int64(1), + PathPrefix: aws.String("pathPrefixType"), + } + resp, err := svc.ListUsers(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_ListVirtualMFADevices() { + svc := iam.New(session.New()) + + params := &iam.ListVirtualMFADevicesInput{ + AssignmentStatus: aws.String("assignmentStatusType"), + Marker: aws.String("markerType"), + MaxItems: aws.Int64(1), + } + resp, err := svc.ListVirtualMFADevices(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_PutGroupPolicy() { + svc := iam.New(session.New()) + + params := &iam.PutGroupPolicyInput{ + GroupName: aws.String("groupNameType"), // Required + PolicyDocument: aws.String("policyDocumentType"), // Required + PolicyName: aws.String("policyNameType"), // Required + } + resp, err := svc.PutGroupPolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_PutRolePolicy() { + svc := iam.New(session.New()) + + params := &iam.PutRolePolicyInput{ + PolicyDocument: aws.String("policyDocumentType"), // Required + PolicyName: aws.String("policyNameType"), // Required + RoleName: aws.String("roleNameType"), // Required + } + resp, err := svc.PutRolePolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_PutUserPolicy() { + svc := iam.New(session.New()) + + params := &iam.PutUserPolicyInput{ + PolicyDocument: aws.String("policyDocumentType"), // Required + PolicyName: aws.String("policyNameType"), // Required + UserName: aws.String("existingUserNameType"), // Required + } + resp, err := svc.PutUserPolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_RemoveClientIDFromOpenIDConnectProvider() { + svc := iam.New(session.New()) + + params := &iam.RemoveClientIDFromOpenIDConnectProviderInput{ + ClientID: aws.String("clientIDType"), // Required + OpenIDConnectProviderArn: aws.String("arnType"), // Required + } + resp, err := svc.RemoveClientIDFromOpenIDConnectProvider(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_RemoveRoleFromInstanceProfile() { + svc := iam.New(session.New()) + + params := &iam.RemoveRoleFromInstanceProfileInput{ + InstanceProfileName: aws.String("instanceProfileNameType"), // Required + RoleName: aws.String("roleNameType"), // Required + } + resp, err := svc.RemoveRoleFromInstanceProfile(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_RemoveUserFromGroup() { + svc := iam.New(session.New()) + + params := &iam.RemoveUserFromGroupInput{ + GroupName: aws.String("groupNameType"), // Required + UserName: aws.String("existingUserNameType"), // Required + } + resp, err := svc.RemoveUserFromGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_ResyncMFADevice() { + svc := iam.New(session.New()) + + params := &iam.ResyncMFADeviceInput{ + AuthenticationCode1: aws.String("authenticationCodeType"), // Required + AuthenticationCode2: aws.String("authenticationCodeType"), // Required + SerialNumber: aws.String("serialNumberType"), // Required + UserName: aws.String("existingUserNameType"), // Required + } + resp, err := svc.ResyncMFADevice(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_SetDefaultPolicyVersion() { + svc := iam.New(session.New()) + + params := &iam.SetDefaultPolicyVersionInput{ + PolicyArn: aws.String("arnType"), // Required + VersionId: aws.String("policyVersionIdType"), // Required + } + resp, err := svc.SetDefaultPolicyVersion(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_SimulateCustomPolicy() { + svc := iam.New(session.New()) + + params := &iam.SimulateCustomPolicyInput{ + ActionNames: []*string{ // Required + aws.String("ActionNameType"), // Required + // More values... + }, + PolicyInputList: []*string{ // Required + aws.String("policyDocumentType"), // Required + // More values... + }, + CallerArn: aws.String("ResourceNameType"), + ContextEntries: []*iam.ContextEntry{ + { // Required + ContextKeyName: aws.String("ContextKeyNameType"), + ContextKeyType: aws.String("ContextKeyTypeEnum"), + ContextKeyValues: []*string{ + aws.String("ContextKeyValueType"), // Required + // More values... + }, + }, + // More values... + }, + Marker: aws.String("markerType"), + MaxItems: aws.Int64(1), + ResourceArns: []*string{ + aws.String("ResourceNameType"), // Required + // More values... + }, + ResourceHandlingOption: aws.String("ResourceHandlingOptionType"), + ResourceOwner: aws.String("ResourceNameType"), + ResourcePolicy: aws.String("policyDocumentType"), + } + resp, err := svc.SimulateCustomPolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_SimulatePrincipalPolicy() { + svc := iam.New(session.New()) + + params := &iam.SimulatePrincipalPolicyInput{ + ActionNames: []*string{ // Required + aws.String("ActionNameType"), // Required + // More values... + }, + PolicySourceArn: aws.String("arnType"), // Required + CallerArn: aws.String("ResourceNameType"), + ContextEntries: []*iam.ContextEntry{ + { // Required + ContextKeyName: aws.String("ContextKeyNameType"), + ContextKeyType: aws.String("ContextKeyTypeEnum"), + ContextKeyValues: []*string{ + aws.String("ContextKeyValueType"), // Required + // More values... + }, + }, + // More values... + }, + Marker: aws.String("markerType"), + MaxItems: aws.Int64(1), + PolicyInputList: []*string{ + aws.String("policyDocumentType"), // Required + // More values... + }, + ResourceArns: []*string{ + aws.String("ResourceNameType"), // Required + // More values... + }, + ResourceHandlingOption: aws.String("ResourceHandlingOptionType"), + ResourceOwner: aws.String("ResourceNameType"), + ResourcePolicy: aws.String("policyDocumentType"), + } + resp, err := svc.SimulatePrincipalPolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_UpdateAccessKey() { + svc := iam.New(session.New()) + + params := &iam.UpdateAccessKeyInput{ + AccessKeyId: aws.String("accessKeyIdType"), // Required + Status: aws.String("statusType"), // Required + UserName: aws.String("existingUserNameType"), + } + resp, err := svc.UpdateAccessKey(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_UpdateAccountPasswordPolicy() { + svc := iam.New(session.New()) + + params := &iam.UpdateAccountPasswordPolicyInput{ + AllowUsersToChangePassword: aws.Bool(true), + HardExpiry: aws.Bool(true), + MaxPasswordAge: aws.Int64(1), + MinimumPasswordLength: aws.Int64(1), + PasswordReusePrevention: aws.Int64(1), + RequireLowercaseCharacters: aws.Bool(true), + RequireNumbers: aws.Bool(true), + RequireSymbols: aws.Bool(true), + RequireUppercaseCharacters: aws.Bool(true), + } + resp, err := svc.UpdateAccountPasswordPolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_UpdateAssumeRolePolicy() { + svc := iam.New(session.New()) + + params := &iam.UpdateAssumeRolePolicyInput{ + PolicyDocument: aws.String("policyDocumentType"), // Required + RoleName: aws.String("roleNameType"), // Required + } + resp, err := svc.UpdateAssumeRolePolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_UpdateGroup() { + svc := iam.New(session.New()) + + params := &iam.UpdateGroupInput{ + GroupName: aws.String("groupNameType"), // Required + NewGroupName: aws.String("groupNameType"), + NewPath: aws.String("pathType"), + } + resp, err := svc.UpdateGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_UpdateLoginProfile() { + svc := iam.New(session.New()) + + params := &iam.UpdateLoginProfileInput{ + UserName: aws.String("userNameType"), // Required + Password: aws.String("passwordType"), + PasswordResetRequired: aws.Bool(true), + } + resp, err := svc.UpdateLoginProfile(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_UpdateOpenIDConnectProviderThumbprint() { + svc := iam.New(session.New()) + + params := &iam.UpdateOpenIDConnectProviderThumbprintInput{ + OpenIDConnectProviderArn: aws.String("arnType"), // Required + ThumbprintList: []*string{ // Required + aws.String("thumbprintType"), // Required + // More values... + }, + } + resp, err := svc.UpdateOpenIDConnectProviderThumbprint(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_UpdateSAMLProvider() { + svc := iam.New(session.New()) + + params := &iam.UpdateSAMLProviderInput{ + SAMLMetadataDocument: aws.String("SAMLMetadataDocumentType"), // Required + SAMLProviderArn: aws.String("arnType"), // Required + } + resp, err := svc.UpdateSAMLProvider(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_UpdateSSHPublicKey() { + svc := iam.New(session.New()) + + params := &iam.UpdateSSHPublicKeyInput{ + SSHPublicKeyId: aws.String("publicKeyIdType"), // Required + Status: aws.String("statusType"), // Required + UserName: aws.String("userNameType"), // Required + } + resp, err := svc.UpdateSSHPublicKey(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_UpdateServerCertificate() { + svc := iam.New(session.New()) + + params := &iam.UpdateServerCertificateInput{ + ServerCertificateName: aws.String("serverCertificateNameType"), // Required + NewPath: aws.String("pathType"), + NewServerCertificateName: aws.String("serverCertificateNameType"), + } + resp, err := svc.UpdateServerCertificate(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_UpdateSigningCertificate() { + svc := iam.New(session.New()) + + params := &iam.UpdateSigningCertificateInput{ + CertificateId: aws.String("certificateIdType"), // Required + Status: aws.String("statusType"), // Required + UserName: aws.String("existingUserNameType"), + } + resp, err := svc.UpdateSigningCertificate(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_UpdateUser() { + svc := iam.New(session.New()) + + params := &iam.UpdateUserInput{ + UserName: aws.String("existingUserNameType"), // Required + NewPath: aws.String("pathType"), + NewUserName: aws.String("userNameType"), + } + resp, err := svc.UpdateUser(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_UploadSSHPublicKey() { + svc := iam.New(session.New()) + + params := &iam.UploadSSHPublicKeyInput{ + SSHPublicKeyBody: aws.String("publicKeyMaterialType"), // Required + UserName: aws.String("userNameType"), // Required + } + resp, err := svc.UploadSSHPublicKey(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_UploadServerCertificate() { + svc := iam.New(session.New()) + + params := &iam.UploadServerCertificateInput{ + CertificateBody: aws.String("certificateBodyType"), // Required + PrivateKey: aws.String("privateKeyType"), // Required + ServerCertificateName: aws.String("serverCertificateNameType"), // Required + CertificateChain: aws.String("certificateChainType"), + Path: aws.String("pathType"), + } + resp, err := svc.UploadServerCertificate(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_UploadSigningCertificate() { + svc := iam.New(session.New()) + + params := &iam.UploadSigningCertificateInput{ + CertificateBody: aws.String("certificateBodyType"), // Required + UserName: aws.String("existingUserNameType"), + } + resp, err := svc.UploadSigningCertificate(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/iam/iamiface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/iam/iamiface/interface.go new file mode 100644 index 000000000..03b464c8d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/iam/iamiface/interface.go @@ -0,0 +1,518 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package iamiface provides an interface for the AWS Identity and Access Management. +package iamiface + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/iam" +) + +// IAMAPI is the interface type for iam.IAM. +type IAMAPI interface { + AddClientIDToOpenIDConnectProviderRequest(*iam.AddClientIDToOpenIDConnectProviderInput) (*request.Request, *iam.AddClientIDToOpenIDConnectProviderOutput) + + AddClientIDToOpenIDConnectProvider(*iam.AddClientIDToOpenIDConnectProviderInput) (*iam.AddClientIDToOpenIDConnectProviderOutput, error) + + AddRoleToInstanceProfileRequest(*iam.AddRoleToInstanceProfileInput) (*request.Request, *iam.AddRoleToInstanceProfileOutput) + + AddRoleToInstanceProfile(*iam.AddRoleToInstanceProfileInput) (*iam.AddRoleToInstanceProfileOutput, error) + + AddUserToGroupRequest(*iam.AddUserToGroupInput) (*request.Request, *iam.AddUserToGroupOutput) + + AddUserToGroup(*iam.AddUserToGroupInput) (*iam.AddUserToGroupOutput, error) + + AttachGroupPolicyRequest(*iam.AttachGroupPolicyInput) (*request.Request, *iam.AttachGroupPolicyOutput) + + AttachGroupPolicy(*iam.AttachGroupPolicyInput) (*iam.AttachGroupPolicyOutput, error) + + AttachRolePolicyRequest(*iam.AttachRolePolicyInput) (*request.Request, *iam.AttachRolePolicyOutput) + + AttachRolePolicy(*iam.AttachRolePolicyInput) (*iam.AttachRolePolicyOutput, error) + + AttachUserPolicyRequest(*iam.AttachUserPolicyInput) (*request.Request, *iam.AttachUserPolicyOutput) + + AttachUserPolicy(*iam.AttachUserPolicyInput) (*iam.AttachUserPolicyOutput, error) + + ChangePasswordRequest(*iam.ChangePasswordInput) (*request.Request, *iam.ChangePasswordOutput) + + ChangePassword(*iam.ChangePasswordInput) (*iam.ChangePasswordOutput, error) + + CreateAccessKeyRequest(*iam.CreateAccessKeyInput) (*request.Request, *iam.CreateAccessKeyOutput) + + CreateAccessKey(*iam.CreateAccessKeyInput) (*iam.CreateAccessKeyOutput, error) + + CreateAccountAliasRequest(*iam.CreateAccountAliasInput) (*request.Request, *iam.CreateAccountAliasOutput) + + CreateAccountAlias(*iam.CreateAccountAliasInput) (*iam.CreateAccountAliasOutput, error) + + CreateGroupRequest(*iam.CreateGroupInput) (*request.Request, *iam.CreateGroupOutput) + + CreateGroup(*iam.CreateGroupInput) (*iam.CreateGroupOutput, error) + + CreateInstanceProfileRequest(*iam.CreateInstanceProfileInput) (*request.Request, *iam.CreateInstanceProfileOutput) + + CreateInstanceProfile(*iam.CreateInstanceProfileInput) (*iam.CreateInstanceProfileOutput, error) + + CreateLoginProfileRequest(*iam.CreateLoginProfileInput) (*request.Request, *iam.CreateLoginProfileOutput) + + CreateLoginProfile(*iam.CreateLoginProfileInput) (*iam.CreateLoginProfileOutput, error) + + CreateOpenIDConnectProviderRequest(*iam.CreateOpenIDConnectProviderInput) (*request.Request, *iam.CreateOpenIDConnectProviderOutput) + + CreateOpenIDConnectProvider(*iam.CreateOpenIDConnectProviderInput) (*iam.CreateOpenIDConnectProviderOutput, error) + + CreatePolicyRequest(*iam.CreatePolicyInput) (*request.Request, *iam.CreatePolicyOutput) + + CreatePolicy(*iam.CreatePolicyInput) (*iam.CreatePolicyOutput, error) + + CreatePolicyVersionRequest(*iam.CreatePolicyVersionInput) (*request.Request, *iam.CreatePolicyVersionOutput) + + CreatePolicyVersion(*iam.CreatePolicyVersionInput) (*iam.CreatePolicyVersionOutput, error) + + CreateRoleRequest(*iam.CreateRoleInput) (*request.Request, *iam.CreateRoleOutput) + + CreateRole(*iam.CreateRoleInput) (*iam.CreateRoleOutput, error) + + CreateSAMLProviderRequest(*iam.CreateSAMLProviderInput) (*request.Request, *iam.CreateSAMLProviderOutput) + + CreateSAMLProvider(*iam.CreateSAMLProviderInput) (*iam.CreateSAMLProviderOutput, error) + + CreateUserRequest(*iam.CreateUserInput) (*request.Request, *iam.CreateUserOutput) + + CreateUser(*iam.CreateUserInput) (*iam.CreateUserOutput, error) + + CreateVirtualMFADeviceRequest(*iam.CreateVirtualMFADeviceInput) (*request.Request, *iam.CreateVirtualMFADeviceOutput) + + CreateVirtualMFADevice(*iam.CreateVirtualMFADeviceInput) (*iam.CreateVirtualMFADeviceOutput, error) + + DeactivateMFADeviceRequest(*iam.DeactivateMFADeviceInput) (*request.Request, *iam.DeactivateMFADeviceOutput) + + DeactivateMFADevice(*iam.DeactivateMFADeviceInput) (*iam.DeactivateMFADeviceOutput, error) + + DeleteAccessKeyRequest(*iam.DeleteAccessKeyInput) (*request.Request, *iam.DeleteAccessKeyOutput) + + DeleteAccessKey(*iam.DeleteAccessKeyInput) (*iam.DeleteAccessKeyOutput, error) + + DeleteAccountAliasRequest(*iam.DeleteAccountAliasInput) (*request.Request, *iam.DeleteAccountAliasOutput) + + DeleteAccountAlias(*iam.DeleteAccountAliasInput) (*iam.DeleteAccountAliasOutput, error) + + DeleteAccountPasswordPolicyRequest(*iam.DeleteAccountPasswordPolicyInput) (*request.Request, *iam.DeleteAccountPasswordPolicyOutput) + + DeleteAccountPasswordPolicy(*iam.DeleteAccountPasswordPolicyInput) (*iam.DeleteAccountPasswordPolicyOutput, error) + + DeleteGroupRequest(*iam.DeleteGroupInput) (*request.Request, *iam.DeleteGroupOutput) + + DeleteGroup(*iam.DeleteGroupInput) (*iam.DeleteGroupOutput, error) + + DeleteGroupPolicyRequest(*iam.DeleteGroupPolicyInput) (*request.Request, *iam.DeleteGroupPolicyOutput) + + DeleteGroupPolicy(*iam.DeleteGroupPolicyInput) (*iam.DeleteGroupPolicyOutput, error) + + DeleteInstanceProfileRequest(*iam.DeleteInstanceProfileInput) (*request.Request, *iam.DeleteInstanceProfileOutput) + + DeleteInstanceProfile(*iam.DeleteInstanceProfileInput) (*iam.DeleteInstanceProfileOutput, error) + + DeleteLoginProfileRequest(*iam.DeleteLoginProfileInput) (*request.Request, *iam.DeleteLoginProfileOutput) + + DeleteLoginProfile(*iam.DeleteLoginProfileInput) (*iam.DeleteLoginProfileOutput, error) + + DeleteOpenIDConnectProviderRequest(*iam.DeleteOpenIDConnectProviderInput) (*request.Request, *iam.DeleteOpenIDConnectProviderOutput) + + DeleteOpenIDConnectProvider(*iam.DeleteOpenIDConnectProviderInput) (*iam.DeleteOpenIDConnectProviderOutput, error) + + DeletePolicyRequest(*iam.DeletePolicyInput) (*request.Request, *iam.DeletePolicyOutput) + + DeletePolicy(*iam.DeletePolicyInput) (*iam.DeletePolicyOutput, error) + + DeletePolicyVersionRequest(*iam.DeletePolicyVersionInput) (*request.Request, *iam.DeletePolicyVersionOutput) + + DeletePolicyVersion(*iam.DeletePolicyVersionInput) (*iam.DeletePolicyVersionOutput, error) + + DeleteRoleRequest(*iam.DeleteRoleInput) (*request.Request, *iam.DeleteRoleOutput) + + DeleteRole(*iam.DeleteRoleInput) (*iam.DeleteRoleOutput, error) + + DeleteRolePolicyRequest(*iam.DeleteRolePolicyInput) (*request.Request, *iam.DeleteRolePolicyOutput) + + DeleteRolePolicy(*iam.DeleteRolePolicyInput) (*iam.DeleteRolePolicyOutput, error) + + DeleteSAMLProviderRequest(*iam.DeleteSAMLProviderInput) (*request.Request, *iam.DeleteSAMLProviderOutput) + + DeleteSAMLProvider(*iam.DeleteSAMLProviderInput) (*iam.DeleteSAMLProviderOutput, error) + + DeleteSSHPublicKeyRequest(*iam.DeleteSSHPublicKeyInput) (*request.Request, *iam.DeleteSSHPublicKeyOutput) + + DeleteSSHPublicKey(*iam.DeleteSSHPublicKeyInput) (*iam.DeleteSSHPublicKeyOutput, error) + + DeleteServerCertificateRequest(*iam.DeleteServerCertificateInput) (*request.Request, *iam.DeleteServerCertificateOutput) + + DeleteServerCertificate(*iam.DeleteServerCertificateInput) (*iam.DeleteServerCertificateOutput, error) + + DeleteSigningCertificateRequest(*iam.DeleteSigningCertificateInput) (*request.Request, *iam.DeleteSigningCertificateOutput) + + DeleteSigningCertificate(*iam.DeleteSigningCertificateInput) (*iam.DeleteSigningCertificateOutput, error) + + DeleteUserRequest(*iam.DeleteUserInput) (*request.Request, *iam.DeleteUserOutput) + + DeleteUser(*iam.DeleteUserInput) (*iam.DeleteUserOutput, error) + + DeleteUserPolicyRequest(*iam.DeleteUserPolicyInput) (*request.Request, *iam.DeleteUserPolicyOutput) + + DeleteUserPolicy(*iam.DeleteUserPolicyInput) (*iam.DeleteUserPolicyOutput, error) + + DeleteVirtualMFADeviceRequest(*iam.DeleteVirtualMFADeviceInput) (*request.Request, *iam.DeleteVirtualMFADeviceOutput) + + DeleteVirtualMFADevice(*iam.DeleteVirtualMFADeviceInput) (*iam.DeleteVirtualMFADeviceOutput, error) + + DetachGroupPolicyRequest(*iam.DetachGroupPolicyInput) (*request.Request, *iam.DetachGroupPolicyOutput) + + DetachGroupPolicy(*iam.DetachGroupPolicyInput) (*iam.DetachGroupPolicyOutput, error) + + DetachRolePolicyRequest(*iam.DetachRolePolicyInput) (*request.Request, *iam.DetachRolePolicyOutput) + + DetachRolePolicy(*iam.DetachRolePolicyInput) (*iam.DetachRolePolicyOutput, error) + + DetachUserPolicyRequest(*iam.DetachUserPolicyInput) (*request.Request, *iam.DetachUserPolicyOutput) + + DetachUserPolicy(*iam.DetachUserPolicyInput) (*iam.DetachUserPolicyOutput, error) + + EnableMFADeviceRequest(*iam.EnableMFADeviceInput) (*request.Request, *iam.EnableMFADeviceOutput) + + EnableMFADevice(*iam.EnableMFADeviceInput) (*iam.EnableMFADeviceOutput, error) + + GenerateCredentialReportRequest(*iam.GenerateCredentialReportInput) (*request.Request, *iam.GenerateCredentialReportOutput) + + GenerateCredentialReport(*iam.GenerateCredentialReportInput) (*iam.GenerateCredentialReportOutput, error) + + GetAccessKeyLastUsedRequest(*iam.GetAccessKeyLastUsedInput) (*request.Request, *iam.GetAccessKeyLastUsedOutput) + + GetAccessKeyLastUsed(*iam.GetAccessKeyLastUsedInput) (*iam.GetAccessKeyLastUsedOutput, error) + + GetAccountAuthorizationDetailsRequest(*iam.GetAccountAuthorizationDetailsInput) (*request.Request, *iam.GetAccountAuthorizationDetailsOutput) + + GetAccountAuthorizationDetails(*iam.GetAccountAuthorizationDetailsInput) (*iam.GetAccountAuthorizationDetailsOutput, error) + + GetAccountAuthorizationDetailsPages(*iam.GetAccountAuthorizationDetailsInput, func(*iam.GetAccountAuthorizationDetailsOutput, bool) bool) error + + GetAccountPasswordPolicyRequest(*iam.GetAccountPasswordPolicyInput) (*request.Request, *iam.GetAccountPasswordPolicyOutput) + + GetAccountPasswordPolicy(*iam.GetAccountPasswordPolicyInput) (*iam.GetAccountPasswordPolicyOutput, error) + + GetAccountSummaryRequest(*iam.GetAccountSummaryInput) (*request.Request, *iam.GetAccountSummaryOutput) + + GetAccountSummary(*iam.GetAccountSummaryInput) (*iam.GetAccountSummaryOutput, error) + + GetContextKeysForCustomPolicyRequest(*iam.GetContextKeysForCustomPolicyInput) (*request.Request, *iam.GetContextKeysForPolicyResponse) + + GetContextKeysForCustomPolicy(*iam.GetContextKeysForCustomPolicyInput) (*iam.GetContextKeysForPolicyResponse, error) + + GetContextKeysForPrincipalPolicyRequest(*iam.GetContextKeysForPrincipalPolicyInput) (*request.Request, *iam.GetContextKeysForPolicyResponse) + + GetContextKeysForPrincipalPolicy(*iam.GetContextKeysForPrincipalPolicyInput) (*iam.GetContextKeysForPolicyResponse, error) + + GetCredentialReportRequest(*iam.GetCredentialReportInput) (*request.Request, *iam.GetCredentialReportOutput) + + GetCredentialReport(*iam.GetCredentialReportInput) (*iam.GetCredentialReportOutput, error) + + GetGroupRequest(*iam.GetGroupInput) (*request.Request, *iam.GetGroupOutput) + + GetGroup(*iam.GetGroupInput) (*iam.GetGroupOutput, error) + + GetGroupPages(*iam.GetGroupInput, func(*iam.GetGroupOutput, bool) bool) error + + GetGroupPolicyRequest(*iam.GetGroupPolicyInput) (*request.Request, *iam.GetGroupPolicyOutput) + + GetGroupPolicy(*iam.GetGroupPolicyInput) (*iam.GetGroupPolicyOutput, error) + + GetInstanceProfileRequest(*iam.GetInstanceProfileInput) (*request.Request, *iam.GetInstanceProfileOutput) + + GetInstanceProfile(*iam.GetInstanceProfileInput) (*iam.GetInstanceProfileOutput, error) + + GetLoginProfileRequest(*iam.GetLoginProfileInput) (*request.Request, *iam.GetLoginProfileOutput) + + GetLoginProfile(*iam.GetLoginProfileInput) (*iam.GetLoginProfileOutput, error) + + GetOpenIDConnectProviderRequest(*iam.GetOpenIDConnectProviderInput) (*request.Request, *iam.GetOpenIDConnectProviderOutput) + + GetOpenIDConnectProvider(*iam.GetOpenIDConnectProviderInput) (*iam.GetOpenIDConnectProviderOutput, error) + + GetPolicyRequest(*iam.GetPolicyInput) (*request.Request, *iam.GetPolicyOutput) + + GetPolicy(*iam.GetPolicyInput) (*iam.GetPolicyOutput, error) + + GetPolicyVersionRequest(*iam.GetPolicyVersionInput) (*request.Request, *iam.GetPolicyVersionOutput) + + GetPolicyVersion(*iam.GetPolicyVersionInput) (*iam.GetPolicyVersionOutput, error) + + GetRoleRequest(*iam.GetRoleInput) (*request.Request, *iam.GetRoleOutput) + + GetRole(*iam.GetRoleInput) (*iam.GetRoleOutput, error) + + GetRolePolicyRequest(*iam.GetRolePolicyInput) (*request.Request, *iam.GetRolePolicyOutput) + + GetRolePolicy(*iam.GetRolePolicyInput) (*iam.GetRolePolicyOutput, error) + + GetSAMLProviderRequest(*iam.GetSAMLProviderInput) (*request.Request, *iam.GetSAMLProviderOutput) + + GetSAMLProvider(*iam.GetSAMLProviderInput) (*iam.GetSAMLProviderOutput, error) + + GetSSHPublicKeyRequest(*iam.GetSSHPublicKeyInput) (*request.Request, *iam.GetSSHPublicKeyOutput) + + GetSSHPublicKey(*iam.GetSSHPublicKeyInput) (*iam.GetSSHPublicKeyOutput, error) + + GetServerCertificateRequest(*iam.GetServerCertificateInput) (*request.Request, *iam.GetServerCertificateOutput) + + GetServerCertificate(*iam.GetServerCertificateInput) (*iam.GetServerCertificateOutput, error) + + GetUserRequest(*iam.GetUserInput) (*request.Request, *iam.GetUserOutput) + + GetUser(*iam.GetUserInput) (*iam.GetUserOutput, error) + + GetUserPolicyRequest(*iam.GetUserPolicyInput) (*request.Request, *iam.GetUserPolicyOutput) + + GetUserPolicy(*iam.GetUserPolicyInput) (*iam.GetUserPolicyOutput, error) + + ListAccessKeysRequest(*iam.ListAccessKeysInput) (*request.Request, *iam.ListAccessKeysOutput) + + ListAccessKeys(*iam.ListAccessKeysInput) (*iam.ListAccessKeysOutput, error) + + ListAccessKeysPages(*iam.ListAccessKeysInput, func(*iam.ListAccessKeysOutput, bool) bool) error + + ListAccountAliasesRequest(*iam.ListAccountAliasesInput) (*request.Request, *iam.ListAccountAliasesOutput) + + ListAccountAliases(*iam.ListAccountAliasesInput) (*iam.ListAccountAliasesOutput, error) + + ListAccountAliasesPages(*iam.ListAccountAliasesInput, func(*iam.ListAccountAliasesOutput, bool) bool) error + + ListAttachedGroupPoliciesRequest(*iam.ListAttachedGroupPoliciesInput) (*request.Request, *iam.ListAttachedGroupPoliciesOutput) + + ListAttachedGroupPolicies(*iam.ListAttachedGroupPoliciesInput) (*iam.ListAttachedGroupPoliciesOutput, error) + + ListAttachedGroupPoliciesPages(*iam.ListAttachedGroupPoliciesInput, func(*iam.ListAttachedGroupPoliciesOutput, bool) bool) error + + ListAttachedRolePoliciesRequest(*iam.ListAttachedRolePoliciesInput) (*request.Request, *iam.ListAttachedRolePoliciesOutput) + + ListAttachedRolePolicies(*iam.ListAttachedRolePoliciesInput) (*iam.ListAttachedRolePoliciesOutput, error) + + ListAttachedRolePoliciesPages(*iam.ListAttachedRolePoliciesInput, func(*iam.ListAttachedRolePoliciesOutput, bool) bool) error + + ListAttachedUserPoliciesRequest(*iam.ListAttachedUserPoliciesInput) (*request.Request, *iam.ListAttachedUserPoliciesOutput) + + ListAttachedUserPolicies(*iam.ListAttachedUserPoliciesInput) (*iam.ListAttachedUserPoliciesOutput, error) + + ListAttachedUserPoliciesPages(*iam.ListAttachedUserPoliciesInput, func(*iam.ListAttachedUserPoliciesOutput, bool) bool) error + + ListEntitiesForPolicyRequest(*iam.ListEntitiesForPolicyInput) (*request.Request, *iam.ListEntitiesForPolicyOutput) + + ListEntitiesForPolicy(*iam.ListEntitiesForPolicyInput) (*iam.ListEntitiesForPolicyOutput, error) + + ListEntitiesForPolicyPages(*iam.ListEntitiesForPolicyInput, func(*iam.ListEntitiesForPolicyOutput, bool) bool) error + + ListGroupPoliciesRequest(*iam.ListGroupPoliciesInput) (*request.Request, *iam.ListGroupPoliciesOutput) + + ListGroupPolicies(*iam.ListGroupPoliciesInput) (*iam.ListGroupPoliciesOutput, error) + + ListGroupPoliciesPages(*iam.ListGroupPoliciesInput, func(*iam.ListGroupPoliciesOutput, bool) bool) error + + ListGroupsRequest(*iam.ListGroupsInput) (*request.Request, *iam.ListGroupsOutput) + + ListGroups(*iam.ListGroupsInput) (*iam.ListGroupsOutput, error) + + ListGroupsPages(*iam.ListGroupsInput, func(*iam.ListGroupsOutput, bool) bool) error + + ListGroupsForUserRequest(*iam.ListGroupsForUserInput) (*request.Request, *iam.ListGroupsForUserOutput) + + ListGroupsForUser(*iam.ListGroupsForUserInput) (*iam.ListGroupsForUserOutput, error) + + ListGroupsForUserPages(*iam.ListGroupsForUserInput, func(*iam.ListGroupsForUserOutput, bool) bool) error + + ListInstanceProfilesRequest(*iam.ListInstanceProfilesInput) (*request.Request, *iam.ListInstanceProfilesOutput) + + ListInstanceProfiles(*iam.ListInstanceProfilesInput) (*iam.ListInstanceProfilesOutput, error) + + ListInstanceProfilesPages(*iam.ListInstanceProfilesInput, func(*iam.ListInstanceProfilesOutput, bool) bool) error + + ListInstanceProfilesForRoleRequest(*iam.ListInstanceProfilesForRoleInput) (*request.Request, *iam.ListInstanceProfilesForRoleOutput) + + ListInstanceProfilesForRole(*iam.ListInstanceProfilesForRoleInput) (*iam.ListInstanceProfilesForRoleOutput, error) + + ListInstanceProfilesForRolePages(*iam.ListInstanceProfilesForRoleInput, func(*iam.ListInstanceProfilesForRoleOutput, bool) bool) error + + ListMFADevicesRequest(*iam.ListMFADevicesInput) (*request.Request, *iam.ListMFADevicesOutput) + + ListMFADevices(*iam.ListMFADevicesInput) (*iam.ListMFADevicesOutput, error) + + ListMFADevicesPages(*iam.ListMFADevicesInput, func(*iam.ListMFADevicesOutput, bool) bool) error + + ListOpenIDConnectProvidersRequest(*iam.ListOpenIDConnectProvidersInput) (*request.Request, *iam.ListOpenIDConnectProvidersOutput) + + ListOpenIDConnectProviders(*iam.ListOpenIDConnectProvidersInput) (*iam.ListOpenIDConnectProvidersOutput, error) + + ListPoliciesRequest(*iam.ListPoliciesInput) (*request.Request, *iam.ListPoliciesOutput) + + ListPolicies(*iam.ListPoliciesInput) (*iam.ListPoliciesOutput, error) + + ListPoliciesPages(*iam.ListPoliciesInput, func(*iam.ListPoliciesOutput, bool) bool) error + + ListPolicyVersionsRequest(*iam.ListPolicyVersionsInput) (*request.Request, *iam.ListPolicyVersionsOutput) + + ListPolicyVersions(*iam.ListPolicyVersionsInput) (*iam.ListPolicyVersionsOutput, error) + + ListPolicyVersionsPages(*iam.ListPolicyVersionsInput, func(*iam.ListPolicyVersionsOutput, bool) bool) error + + ListRolePoliciesRequest(*iam.ListRolePoliciesInput) (*request.Request, *iam.ListRolePoliciesOutput) + + ListRolePolicies(*iam.ListRolePoliciesInput) (*iam.ListRolePoliciesOutput, error) + + ListRolePoliciesPages(*iam.ListRolePoliciesInput, func(*iam.ListRolePoliciesOutput, bool) bool) error + + ListRolesRequest(*iam.ListRolesInput) (*request.Request, *iam.ListRolesOutput) + + ListRoles(*iam.ListRolesInput) (*iam.ListRolesOutput, error) + + ListRolesPages(*iam.ListRolesInput, func(*iam.ListRolesOutput, bool) bool) error + + ListSAMLProvidersRequest(*iam.ListSAMLProvidersInput) (*request.Request, *iam.ListSAMLProvidersOutput) + + ListSAMLProviders(*iam.ListSAMLProvidersInput) (*iam.ListSAMLProvidersOutput, error) + + ListSSHPublicKeysRequest(*iam.ListSSHPublicKeysInput) (*request.Request, *iam.ListSSHPublicKeysOutput) + + ListSSHPublicKeys(*iam.ListSSHPublicKeysInput) (*iam.ListSSHPublicKeysOutput, error) + + ListSSHPublicKeysPages(*iam.ListSSHPublicKeysInput, func(*iam.ListSSHPublicKeysOutput, bool) bool) error + + ListServerCertificatesRequest(*iam.ListServerCertificatesInput) (*request.Request, *iam.ListServerCertificatesOutput) + + ListServerCertificates(*iam.ListServerCertificatesInput) (*iam.ListServerCertificatesOutput, error) + + ListServerCertificatesPages(*iam.ListServerCertificatesInput, func(*iam.ListServerCertificatesOutput, bool) bool) error + + ListSigningCertificatesRequest(*iam.ListSigningCertificatesInput) (*request.Request, *iam.ListSigningCertificatesOutput) + + ListSigningCertificates(*iam.ListSigningCertificatesInput) (*iam.ListSigningCertificatesOutput, error) + + ListSigningCertificatesPages(*iam.ListSigningCertificatesInput, func(*iam.ListSigningCertificatesOutput, bool) bool) error + + ListUserPoliciesRequest(*iam.ListUserPoliciesInput) (*request.Request, *iam.ListUserPoliciesOutput) + + ListUserPolicies(*iam.ListUserPoliciesInput) (*iam.ListUserPoliciesOutput, error) + + ListUserPoliciesPages(*iam.ListUserPoliciesInput, func(*iam.ListUserPoliciesOutput, bool) bool) error + + ListUsersRequest(*iam.ListUsersInput) (*request.Request, *iam.ListUsersOutput) + + ListUsers(*iam.ListUsersInput) (*iam.ListUsersOutput, error) + + ListUsersPages(*iam.ListUsersInput, func(*iam.ListUsersOutput, bool) bool) error + + ListVirtualMFADevicesRequest(*iam.ListVirtualMFADevicesInput) (*request.Request, *iam.ListVirtualMFADevicesOutput) + + ListVirtualMFADevices(*iam.ListVirtualMFADevicesInput) (*iam.ListVirtualMFADevicesOutput, error) + + ListVirtualMFADevicesPages(*iam.ListVirtualMFADevicesInput, func(*iam.ListVirtualMFADevicesOutput, bool) bool) error + + PutGroupPolicyRequest(*iam.PutGroupPolicyInput) (*request.Request, *iam.PutGroupPolicyOutput) + + PutGroupPolicy(*iam.PutGroupPolicyInput) (*iam.PutGroupPolicyOutput, error) + + PutRolePolicyRequest(*iam.PutRolePolicyInput) (*request.Request, *iam.PutRolePolicyOutput) + + PutRolePolicy(*iam.PutRolePolicyInput) (*iam.PutRolePolicyOutput, error) + + PutUserPolicyRequest(*iam.PutUserPolicyInput) (*request.Request, *iam.PutUserPolicyOutput) + + PutUserPolicy(*iam.PutUserPolicyInput) (*iam.PutUserPolicyOutput, error) + + RemoveClientIDFromOpenIDConnectProviderRequest(*iam.RemoveClientIDFromOpenIDConnectProviderInput) (*request.Request, *iam.RemoveClientIDFromOpenIDConnectProviderOutput) + + RemoveClientIDFromOpenIDConnectProvider(*iam.RemoveClientIDFromOpenIDConnectProviderInput) (*iam.RemoveClientIDFromOpenIDConnectProviderOutput, error) + + RemoveRoleFromInstanceProfileRequest(*iam.RemoveRoleFromInstanceProfileInput) (*request.Request, *iam.RemoveRoleFromInstanceProfileOutput) + + RemoveRoleFromInstanceProfile(*iam.RemoveRoleFromInstanceProfileInput) (*iam.RemoveRoleFromInstanceProfileOutput, error) + + RemoveUserFromGroupRequest(*iam.RemoveUserFromGroupInput) (*request.Request, *iam.RemoveUserFromGroupOutput) + + RemoveUserFromGroup(*iam.RemoveUserFromGroupInput) (*iam.RemoveUserFromGroupOutput, error) + + ResyncMFADeviceRequest(*iam.ResyncMFADeviceInput) (*request.Request, *iam.ResyncMFADeviceOutput) + + ResyncMFADevice(*iam.ResyncMFADeviceInput) (*iam.ResyncMFADeviceOutput, error) + + SetDefaultPolicyVersionRequest(*iam.SetDefaultPolicyVersionInput) (*request.Request, *iam.SetDefaultPolicyVersionOutput) + + SetDefaultPolicyVersion(*iam.SetDefaultPolicyVersionInput) (*iam.SetDefaultPolicyVersionOutput, error) + + SimulateCustomPolicyRequest(*iam.SimulateCustomPolicyInput) (*request.Request, *iam.SimulatePolicyResponse) + + SimulateCustomPolicy(*iam.SimulateCustomPolicyInput) (*iam.SimulatePolicyResponse, error) + + SimulateCustomPolicyPages(*iam.SimulateCustomPolicyInput, func(*iam.SimulatePolicyResponse, bool) bool) error + + SimulatePrincipalPolicyRequest(*iam.SimulatePrincipalPolicyInput) (*request.Request, *iam.SimulatePolicyResponse) + + SimulatePrincipalPolicy(*iam.SimulatePrincipalPolicyInput) (*iam.SimulatePolicyResponse, error) + + SimulatePrincipalPolicyPages(*iam.SimulatePrincipalPolicyInput, func(*iam.SimulatePolicyResponse, bool) bool) error + + UpdateAccessKeyRequest(*iam.UpdateAccessKeyInput) (*request.Request, *iam.UpdateAccessKeyOutput) + + UpdateAccessKey(*iam.UpdateAccessKeyInput) (*iam.UpdateAccessKeyOutput, error) + + UpdateAccountPasswordPolicyRequest(*iam.UpdateAccountPasswordPolicyInput) (*request.Request, *iam.UpdateAccountPasswordPolicyOutput) + + UpdateAccountPasswordPolicy(*iam.UpdateAccountPasswordPolicyInput) (*iam.UpdateAccountPasswordPolicyOutput, error) + + UpdateAssumeRolePolicyRequest(*iam.UpdateAssumeRolePolicyInput) (*request.Request, *iam.UpdateAssumeRolePolicyOutput) + + UpdateAssumeRolePolicy(*iam.UpdateAssumeRolePolicyInput) (*iam.UpdateAssumeRolePolicyOutput, error) + + UpdateGroupRequest(*iam.UpdateGroupInput) (*request.Request, *iam.UpdateGroupOutput) + + UpdateGroup(*iam.UpdateGroupInput) (*iam.UpdateGroupOutput, error) + + UpdateLoginProfileRequest(*iam.UpdateLoginProfileInput) (*request.Request, *iam.UpdateLoginProfileOutput) + + UpdateLoginProfile(*iam.UpdateLoginProfileInput) (*iam.UpdateLoginProfileOutput, error) + + UpdateOpenIDConnectProviderThumbprintRequest(*iam.UpdateOpenIDConnectProviderThumbprintInput) (*request.Request, *iam.UpdateOpenIDConnectProviderThumbprintOutput) + + UpdateOpenIDConnectProviderThumbprint(*iam.UpdateOpenIDConnectProviderThumbprintInput) (*iam.UpdateOpenIDConnectProviderThumbprintOutput, error) + + UpdateSAMLProviderRequest(*iam.UpdateSAMLProviderInput) (*request.Request, *iam.UpdateSAMLProviderOutput) + + UpdateSAMLProvider(*iam.UpdateSAMLProviderInput) (*iam.UpdateSAMLProviderOutput, error) + + UpdateSSHPublicKeyRequest(*iam.UpdateSSHPublicKeyInput) (*request.Request, *iam.UpdateSSHPublicKeyOutput) + + UpdateSSHPublicKey(*iam.UpdateSSHPublicKeyInput) (*iam.UpdateSSHPublicKeyOutput, error) + + UpdateServerCertificateRequest(*iam.UpdateServerCertificateInput) (*request.Request, *iam.UpdateServerCertificateOutput) + + UpdateServerCertificate(*iam.UpdateServerCertificateInput) (*iam.UpdateServerCertificateOutput, error) + + UpdateSigningCertificateRequest(*iam.UpdateSigningCertificateInput) (*request.Request, *iam.UpdateSigningCertificateOutput) + + UpdateSigningCertificate(*iam.UpdateSigningCertificateInput) (*iam.UpdateSigningCertificateOutput, error) + + UpdateUserRequest(*iam.UpdateUserInput) (*request.Request, *iam.UpdateUserOutput) + + UpdateUser(*iam.UpdateUserInput) (*iam.UpdateUserOutput, error) + + UploadSSHPublicKeyRequest(*iam.UploadSSHPublicKeyInput) (*request.Request, *iam.UploadSSHPublicKeyOutput) + + UploadSSHPublicKey(*iam.UploadSSHPublicKeyInput) (*iam.UploadSSHPublicKeyOutput, error) + + UploadServerCertificateRequest(*iam.UploadServerCertificateInput) (*request.Request, *iam.UploadServerCertificateOutput) + + UploadServerCertificate(*iam.UploadServerCertificateInput) (*iam.UploadServerCertificateOutput, error) + + UploadSigningCertificateRequest(*iam.UploadSigningCertificateInput) (*request.Request, *iam.UploadSigningCertificateOutput) + + UploadSigningCertificate(*iam.UploadSigningCertificateInput) (*iam.UploadSigningCertificateOutput, error) +} + +var _ IAMAPI = (*iam.IAM)(nil) diff --git a/vendor/github.com/aws/aws-sdk-go/service/iam/service.go b/vendor/github.com/aws/aws-sdk-go/service/iam/service.go new file mode 100644 index 000000000..362916f98 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/iam/service.go @@ -0,0 +1,139 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package iam + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/private/protocol/query" +) + +// AWS Identity and Access Management (IAM) is a web service that you can use +// to manage users and user permissions under your AWS account. This guide provides +// descriptions of IAM actions that you can call programmatically. For general +// information about IAM, see AWS Identity and Access Management (IAM) (http://aws.amazon.com/iam/). +// For the user guide for IAM, see Using IAM (http://docs.aws.amazon.com/IAM/latest/UserGuide/). +// +// AWS provides SDKs that consist of libraries and sample code for various +// programming languages and platforms (Java, Ruby, .NET, iOS, Android, etc.). +// The SDKs provide a convenient way to create programmatic access to IAM and +// AWS. For example, the SDKs take care of tasks such as cryptographically signing +// requests (see below), managing errors, and retrying requests automatically. +// For information about the AWS SDKs, including how to download and install +// them, see the Tools for Amazon Web Services (http://aws.amazon.com/tools/) +// page. +// +// We recommend that you use the AWS SDKs to make programmatic API calls to +// IAM. However, you can also use the IAM Query API to make direct calls to +// the IAM web service. To learn more about the IAM Query API, see Making Query +// Requests (http://docs.aws.amazon.com/IAM/latest/UserGuide/IAM_UsingQueryAPI.html) +// in the Using IAM guide. IAM supports GET and POST requests for all actions. +// That is, the API does not require you to use GET for some actions and POST +// for others. However, GET requests are subject to the limitation size of a +// URL. Therefore, for operations that require larger sizes, use a POST request. +// +// Signing Requests +// +// Requests must be signed using an access key ID and a secret access key. +// We strongly recommend that you do not use your AWS account access key ID +// and secret access key for everyday work with IAM. You can use the access +// key ID and secret access key for an IAM user or you can use the AWS Security +// Token Service to generate temporary security credentials and use those to +// sign requests. +// +// To sign requests, we recommend that you use Signature Version 4 (http://docs.aws.amazon.com/general/latest/gr/signature-version-4.html). +// If you have an existing application that uses Signature Version 2, you do +// not have to update it to use Signature Version 4. However, some operations +// now require Signature Version 4. The documentation for operations that require +// version 4 indicate this requirement. +// +// Additional Resources +// +// For more information, see the following: +// +// AWS Security Credentials (http://docs.aws.amazon.com/general/latest/gr/aws-security-credentials.html). +// This topic provides general information about the types of credentials used +// for accessing AWS. +// +// IAM Best Practices (http://docs.aws.amazon.com/IAM/latest/UserGuide/IAMBestPractices.html). +// This topic presents a list of suggestions for using the IAM service to help +// secure your AWS resources. +// +// Signing AWS API Requests (http://docs.aws.amazon.com/general/latest/gr/signing_aws_api_requests.html). +// This set of topics walk you through the process of signing a request using +// an access key ID and secret access key. +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type IAM struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "iam" + +// New creates a new instance of the IAM client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a IAM client from just a session. +// svc := iam.New(mySession) +// +// // Create a IAM client with additional configuration +// svc := iam.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *IAM { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *IAM { + svc := &IAM{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2010-05-08", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(query.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(query.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(query.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(query.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a IAM operation and runs any +// custom request initialization. +func (c *IAM) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/iam/waiters.go b/vendor/github.com/aws/aws-sdk-go/service/iam/waiters.go new file mode 100644 index 000000000..b27303052 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/iam/waiters.go @@ -0,0 +1,65 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package iam + +import ( + "github.com/aws/aws-sdk-go/private/waiter" +) + +func (c *IAM) WaitUntilInstanceProfileExists(input *GetInstanceProfileInput) error { + waiterCfg := waiter.Config{ + Operation: "GetInstanceProfile", + Delay: 1, + MaxAttempts: 40, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "status", + Argument: "", + Expected: 200, + }, + { + State: "retry", + Matcher: "status", + Argument: "", + Expected: 404, + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *IAM) WaitUntilUserExists(input *GetUserInput) error { + waiterCfg := waiter.Config{ + Operation: "GetUser", + Delay: 1, + MaxAttempts: 20, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "status", + Argument: "", + Expected: 200, + }, + { + State: "retry", + Matcher: "error", + Argument: "", + Expected: "NoSuchEntity", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/inspector/api.go b/vendor/github.com/aws/aws-sdk-go/service/inspector/api.go new file mode 100644 index 000000000..ea22fe1af --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/inspector/api.go @@ -0,0 +1,4836 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package inspector provides a client for Amazon Inspector. +package inspector + +import ( + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" +) + +const opAddAttributesToFindings = "AddAttributesToFindings" + +// AddAttributesToFindingsRequest generates a "aws/request.Request" representing the +// client's request for the AddAttributesToFindings operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the AddAttributesToFindings method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the AddAttributesToFindingsRequest method. +// req, resp := client.AddAttributesToFindingsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Inspector) AddAttributesToFindingsRequest(input *AddAttributesToFindingsInput) (req *request.Request, output *AddAttributesToFindingsOutput) { + op := &request.Operation{ + Name: opAddAttributesToFindings, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AddAttributesToFindingsInput{} + } + + req = c.newRequest(op, input, output) + output = &AddAttributesToFindingsOutput{} + req.Data = output + return +} + +// Assigns attributes (key and value pairs) to the findings that are specified +// by the ARNs of the findings. +func (c *Inspector) AddAttributesToFindings(input *AddAttributesToFindingsInput) (*AddAttributesToFindingsOutput, error) { + req, out := c.AddAttributesToFindingsRequest(input) + err := req.Send() + return out, err +} + +const opCreateAssessmentTarget = "CreateAssessmentTarget" + +// CreateAssessmentTargetRequest generates a "aws/request.Request" representing the +// client's request for the CreateAssessmentTarget operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateAssessmentTarget method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateAssessmentTargetRequest method. +// req, resp := client.CreateAssessmentTargetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Inspector) CreateAssessmentTargetRequest(input *CreateAssessmentTargetInput) (req *request.Request, output *CreateAssessmentTargetOutput) { + op := &request.Operation{ + Name: opCreateAssessmentTarget, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateAssessmentTargetInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateAssessmentTargetOutput{} + req.Data = output + return +} + +// Creates a new assessment target using the ARN of the resource group that +// is generated by CreateResourceGroup. You can create up to 50 assessment targets +// per AWS account. You can run up to 500 concurrent agents per AWS account. +// For more information, see Amazon Inspector Assessment Targets (http://docs.aws.amazon.com/inspector/latest/userguide/inspector_applications.html). +func (c *Inspector) CreateAssessmentTarget(input *CreateAssessmentTargetInput) (*CreateAssessmentTargetOutput, error) { + req, out := c.CreateAssessmentTargetRequest(input) + err := req.Send() + return out, err +} + +const opCreateAssessmentTemplate = "CreateAssessmentTemplate" + +// CreateAssessmentTemplateRequest generates a "aws/request.Request" representing the +// client's request for the CreateAssessmentTemplate operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateAssessmentTemplate method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateAssessmentTemplateRequest method. +// req, resp := client.CreateAssessmentTemplateRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Inspector) CreateAssessmentTemplateRequest(input *CreateAssessmentTemplateInput) (req *request.Request, output *CreateAssessmentTemplateOutput) { + op := &request.Operation{ + Name: opCreateAssessmentTemplate, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateAssessmentTemplateInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateAssessmentTemplateOutput{} + req.Data = output + return +} + +// Creates an assessment template for the assessment target that is specified +// by the ARN of the assessment target. +func (c *Inspector) CreateAssessmentTemplate(input *CreateAssessmentTemplateInput) (*CreateAssessmentTemplateOutput, error) { + req, out := c.CreateAssessmentTemplateRequest(input) + err := req.Send() + return out, err +} + +const opCreateResourceGroup = "CreateResourceGroup" + +// CreateResourceGroupRequest generates a "aws/request.Request" representing the +// client's request for the CreateResourceGroup operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateResourceGroup method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateResourceGroupRequest method. +// req, resp := client.CreateResourceGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Inspector) CreateResourceGroupRequest(input *CreateResourceGroupInput) (req *request.Request, output *CreateResourceGroupOutput) { + op := &request.Operation{ + Name: opCreateResourceGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateResourceGroupInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateResourceGroupOutput{} + req.Data = output + return +} + +// Creates a resource group using the specified set of tags (key and value pairs) +// that are used to select the EC2 instances to be included in an Amazon Inspector +// assessment target. The created resource group is then used to create an Amazon +// Inspector assessment target. For more information, see CreateAssessmentTarget. +func (c *Inspector) CreateResourceGroup(input *CreateResourceGroupInput) (*CreateResourceGroupOutput, error) { + req, out := c.CreateResourceGroupRequest(input) + err := req.Send() + return out, err +} + +const opDeleteAssessmentRun = "DeleteAssessmentRun" + +// DeleteAssessmentRunRequest generates a "aws/request.Request" representing the +// client's request for the DeleteAssessmentRun operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteAssessmentRun method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteAssessmentRunRequest method. +// req, resp := client.DeleteAssessmentRunRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Inspector) DeleteAssessmentRunRequest(input *DeleteAssessmentRunInput) (req *request.Request, output *DeleteAssessmentRunOutput) { + op := &request.Operation{ + Name: opDeleteAssessmentRun, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteAssessmentRunInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteAssessmentRunOutput{} + req.Data = output + return +} + +// Deletes the assessment run that is specified by the ARN of the assessment +// run. +func (c *Inspector) DeleteAssessmentRun(input *DeleteAssessmentRunInput) (*DeleteAssessmentRunOutput, error) { + req, out := c.DeleteAssessmentRunRequest(input) + err := req.Send() + return out, err +} + +const opDeleteAssessmentTarget = "DeleteAssessmentTarget" + +// DeleteAssessmentTargetRequest generates a "aws/request.Request" representing the +// client's request for the DeleteAssessmentTarget operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteAssessmentTarget method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteAssessmentTargetRequest method. +// req, resp := client.DeleteAssessmentTargetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Inspector) DeleteAssessmentTargetRequest(input *DeleteAssessmentTargetInput) (req *request.Request, output *DeleteAssessmentTargetOutput) { + op := &request.Operation{ + Name: opDeleteAssessmentTarget, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteAssessmentTargetInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteAssessmentTargetOutput{} + req.Data = output + return +} + +// Deletes the assessment target that is specified by the ARN of the assessment +// target. +func (c *Inspector) DeleteAssessmentTarget(input *DeleteAssessmentTargetInput) (*DeleteAssessmentTargetOutput, error) { + req, out := c.DeleteAssessmentTargetRequest(input) + err := req.Send() + return out, err +} + +const opDeleteAssessmentTemplate = "DeleteAssessmentTemplate" + +// DeleteAssessmentTemplateRequest generates a "aws/request.Request" representing the +// client's request for the DeleteAssessmentTemplate operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteAssessmentTemplate method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteAssessmentTemplateRequest method. +// req, resp := client.DeleteAssessmentTemplateRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Inspector) DeleteAssessmentTemplateRequest(input *DeleteAssessmentTemplateInput) (req *request.Request, output *DeleteAssessmentTemplateOutput) { + op := &request.Operation{ + Name: opDeleteAssessmentTemplate, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteAssessmentTemplateInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteAssessmentTemplateOutput{} + req.Data = output + return +} + +// Deletes the assessment template that is specified by the ARN of the assessment +// template. +func (c *Inspector) DeleteAssessmentTemplate(input *DeleteAssessmentTemplateInput) (*DeleteAssessmentTemplateOutput, error) { + req, out := c.DeleteAssessmentTemplateRequest(input) + err := req.Send() + return out, err +} + +const opDescribeAssessmentRuns = "DescribeAssessmentRuns" + +// DescribeAssessmentRunsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeAssessmentRuns operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeAssessmentRuns method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeAssessmentRunsRequest method. +// req, resp := client.DescribeAssessmentRunsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Inspector) DescribeAssessmentRunsRequest(input *DescribeAssessmentRunsInput) (req *request.Request, output *DescribeAssessmentRunsOutput) { + op := &request.Operation{ + Name: opDescribeAssessmentRuns, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeAssessmentRunsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeAssessmentRunsOutput{} + req.Data = output + return +} + +// Describes the assessment runs that are specified by the ARNs of the assessment +// runs. +func (c *Inspector) DescribeAssessmentRuns(input *DescribeAssessmentRunsInput) (*DescribeAssessmentRunsOutput, error) { + req, out := c.DescribeAssessmentRunsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeAssessmentTargets = "DescribeAssessmentTargets" + +// DescribeAssessmentTargetsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeAssessmentTargets operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeAssessmentTargets method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeAssessmentTargetsRequest method. +// req, resp := client.DescribeAssessmentTargetsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Inspector) DescribeAssessmentTargetsRequest(input *DescribeAssessmentTargetsInput) (req *request.Request, output *DescribeAssessmentTargetsOutput) { + op := &request.Operation{ + Name: opDescribeAssessmentTargets, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeAssessmentTargetsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeAssessmentTargetsOutput{} + req.Data = output + return +} + +// Describes the assessment targets that are specified by the ARNs of the assessment +// targets. +func (c *Inspector) DescribeAssessmentTargets(input *DescribeAssessmentTargetsInput) (*DescribeAssessmentTargetsOutput, error) { + req, out := c.DescribeAssessmentTargetsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeAssessmentTemplates = "DescribeAssessmentTemplates" + +// DescribeAssessmentTemplatesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeAssessmentTemplates operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeAssessmentTemplates method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeAssessmentTemplatesRequest method. +// req, resp := client.DescribeAssessmentTemplatesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Inspector) DescribeAssessmentTemplatesRequest(input *DescribeAssessmentTemplatesInput) (req *request.Request, output *DescribeAssessmentTemplatesOutput) { + op := &request.Operation{ + Name: opDescribeAssessmentTemplates, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeAssessmentTemplatesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeAssessmentTemplatesOutput{} + req.Data = output + return +} + +// Describes the assessment templates that are specified by the ARNs of the +// assessment templates. +func (c *Inspector) DescribeAssessmentTemplates(input *DescribeAssessmentTemplatesInput) (*DescribeAssessmentTemplatesOutput, error) { + req, out := c.DescribeAssessmentTemplatesRequest(input) + err := req.Send() + return out, err +} + +const opDescribeCrossAccountAccessRole = "DescribeCrossAccountAccessRole" + +// DescribeCrossAccountAccessRoleRequest generates a "aws/request.Request" representing the +// client's request for the DescribeCrossAccountAccessRole operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeCrossAccountAccessRole method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeCrossAccountAccessRoleRequest method. +// req, resp := client.DescribeCrossAccountAccessRoleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Inspector) DescribeCrossAccountAccessRoleRequest(input *DescribeCrossAccountAccessRoleInput) (req *request.Request, output *DescribeCrossAccountAccessRoleOutput) { + op := &request.Operation{ + Name: opDescribeCrossAccountAccessRole, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeCrossAccountAccessRoleInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeCrossAccountAccessRoleOutput{} + req.Data = output + return +} + +// Describes the IAM role that enables Amazon Inspector to access your AWS account. +func (c *Inspector) DescribeCrossAccountAccessRole(input *DescribeCrossAccountAccessRoleInput) (*DescribeCrossAccountAccessRoleOutput, error) { + req, out := c.DescribeCrossAccountAccessRoleRequest(input) + err := req.Send() + return out, err +} + +const opDescribeFindings = "DescribeFindings" + +// DescribeFindingsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeFindings operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeFindings method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeFindingsRequest method. +// req, resp := client.DescribeFindingsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Inspector) DescribeFindingsRequest(input *DescribeFindingsInput) (req *request.Request, output *DescribeFindingsOutput) { + op := &request.Operation{ + Name: opDescribeFindings, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeFindingsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeFindingsOutput{} + req.Data = output + return +} + +// Describes the findings that are specified by the ARNs of the findings. +func (c *Inspector) DescribeFindings(input *DescribeFindingsInput) (*DescribeFindingsOutput, error) { + req, out := c.DescribeFindingsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeResourceGroups = "DescribeResourceGroups" + +// DescribeResourceGroupsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeResourceGroups operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeResourceGroups method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeResourceGroupsRequest method. +// req, resp := client.DescribeResourceGroupsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Inspector) DescribeResourceGroupsRequest(input *DescribeResourceGroupsInput) (req *request.Request, output *DescribeResourceGroupsOutput) { + op := &request.Operation{ + Name: opDescribeResourceGroups, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeResourceGroupsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeResourceGroupsOutput{} + req.Data = output + return +} + +// Describes the resource groups that are specified by the ARNs of the resource +// groups. +func (c *Inspector) DescribeResourceGroups(input *DescribeResourceGroupsInput) (*DescribeResourceGroupsOutput, error) { + req, out := c.DescribeResourceGroupsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeRulesPackages = "DescribeRulesPackages" + +// DescribeRulesPackagesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeRulesPackages operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeRulesPackages method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeRulesPackagesRequest method. +// req, resp := client.DescribeRulesPackagesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Inspector) DescribeRulesPackagesRequest(input *DescribeRulesPackagesInput) (req *request.Request, output *DescribeRulesPackagesOutput) { + op := &request.Operation{ + Name: opDescribeRulesPackages, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeRulesPackagesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeRulesPackagesOutput{} + req.Data = output + return +} + +// Describes the rules packages that are specified by the ARNs of the rules +// packages. +func (c *Inspector) DescribeRulesPackages(input *DescribeRulesPackagesInput) (*DescribeRulesPackagesOutput, error) { + req, out := c.DescribeRulesPackagesRequest(input) + err := req.Send() + return out, err +} + +const opGetTelemetryMetadata = "GetTelemetryMetadata" + +// GetTelemetryMetadataRequest generates a "aws/request.Request" representing the +// client's request for the GetTelemetryMetadata operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetTelemetryMetadata method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetTelemetryMetadataRequest method. +// req, resp := client.GetTelemetryMetadataRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Inspector) GetTelemetryMetadataRequest(input *GetTelemetryMetadataInput) (req *request.Request, output *GetTelemetryMetadataOutput) { + op := &request.Operation{ + Name: opGetTelemetryMetadata, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetTelemetryMetadataInput{} + } + + req = c.newRequest(op, input, output) + output = &GetTelemetryMetadataOutput{} + req.Data = output + return +} + +// Information about the data that is collected for the specified assessment +// run. +func (c *Inspector) GetTelemetryMetadata(input *GetTelemetryMetadataInput) (*GetTelemetryMetadataOutput, error) { + req, out := c.GetTelemetryMetadataRequest(input) + err := req.Send() + return out, err +} + +const opListAssessmentRunAgents = "ListAssessmentRunAgents" + +// ListAssessmentRunAgentsRequest generates a "aws/request.Request" representing the +// client's request for the ListAssessmentRunAgents operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListAssessmentRunAgents method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListAssessmentRunAgentsRequest method. +// req, resp := client.ListAssessmentRunAgentsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Inspector) ListAssessmentRunAgentsRequest(input *ListAssessmentRunAgentsInput) (req *request.Request, output *ListAssessmentRunAgentsOutput) { + op := &request.Operation{ + Name: opListAssessmentRunAgents, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListAssessmentRunAgentsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListAssessmentRunAgentsOutput{} + req.Data = output + return +} + +// Lists the agents of the assessment runs that are specified by the ARNs of +// the assessment runs. +func (c *Inspector) ListAssessmentRunAgents(input *ListAssessmentRunAgentsInput) (*ListAssessmentRunAgentsOutput, error) { + req, out := c.ListAssessmentRunAgentsRequest(input) + err := req.Send() + return out, err +} + +const opListAssessmentRuns = "ListAssessmentRuns" + +// ListAssessmentRunsRequest generates a "aws/request.Request" representing the +// client's request for the ListAssessmentRuns operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListAssessmentRuns method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListAssessmentRunsRequest method. +// req, resp := client.ListAssessmentRunsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Inspector) ListAssessmentRunsRequest(input *ListAssessmentRunsInput) (req *request.Request, output *ListAssessmentRunsOutput) { + op := &request.Operation{ + Name: opListAssessmentRuns, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListAssessmentRunsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListAssessmentRunsOutput{} + req.Data = output + return +} + +// Lists the assessment runs that correspond to the assessment templates that +// are specified by the ARNs of the assessment templates. +func (c *Inspector) ListAssessmentRuns(input *ListAssessmentRunsInput) (*ListAssessmentRunsOutput, error) { + req, out := c.ListAssessmentRunsRequest(input) + err := req.Send() + return out, err +} + +const opListAssessmentTargets = "ListAssessmentTargets" + +// ListAssessmentTargetsRequest generates a "aws/request.Request" representing the +// client's request for the ListAssessmentTargets operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListAssessmentTargets method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListAssessmentTargetsRequest method. +// req, resp := client.ListAssessmentTargetsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Inspector) ListAssessmentTargetsRequest(input *ListAssessmentTargetsInput) (req *request.Request, output *ListAssessmentTargetsOutput) { + op := &request.Operation{ + Name: opListAssessmentTargets, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListAssessmentTargetsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListAssessmentTargetsOutput{} + req.Data = output + return +} + +// Lists the ARNs of the assessment targets within this AWS account. For more +// information about assessment targets, see Amazon Inspector Assessment Targets +// (http://docs.aws.amazon.com/inspector/latest/userguide/inspector_applications.html). +func (c *Inspector) ListAssessmentTargets(input *ListAssessmentTargetsInput) (*ListAssessmentTargetsOutput, error) { + req, out := c.ListAssessmentTargetsRequest(input) + err := req.Send() + return out, err +} + +const opListAssessmentTemplates = "ListAssessmentTemplates" + +// ListAssessmentTemplatesRequest generates a "aws/request.Request" representing the +// client's request for the ListAssessmentTemplates operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListAssessmentTemplates method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListAssessmentTemplatesRequest method. +// req, resp := client.ListAssessmentTemplatesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Inspector) ListAssessmentTemplatesRequest(input *ListAssessmentTemplatesInput) (req *request.Request, output *ListAssessmentTemplatesOutput) { + op := &request.Operation{ + Name: opListAssessmentTemplates, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListAssessmentTemplatesInput{} + } + + req = c.newRequest(op, input, output) + output = &ListAssessmentTemplatesOutput{} + req.Data = output + return +} + +// Lists the assessment templates that correspond to the assessment targets +// that are specified by the ARNs of the assessment targets. +func (c *Inspector) ListAssessmentTemplates(input *ListAssessmentTemplatesInput) (*ListAssessmentTemplatesOutput, error) { + req, out := c.ListAssessmentTemplatesRequest(input) + err := req.Send() + return out, err +} + +const opListEventSubscriptions = "ListEventSubscriptions" + +// ListEventSubscriptionsRequest generates a "aws/request.Request" representing the +// client's request for the ListEventSubscriptions operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListEventSubscriptions method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListEventSubscriptionsRequest method. +// req, resp := client.ListEventSubscriptionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Inspector) ListEventSubscriptionsRequest(input *ListEventSubscriptionsInput) (req *request.Request, output *ListEventSubscriptionsOutput) { + op := &request.Operation{ + Name: opListEventSubscriptions, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListEventSubscriptionsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListEventSubscriptionsOutput{} + req.Data = output + return +} + +// Lists all the event subscriptions for the assessment template that is specified +// by the ARN of the assessment template. For more information, see SubscribeToEvent +// and UnsubscribeFromEvent. +func (c *Inspector) ListEventSubscriptions(input *ListEventSubscriptionsInput) (*ListEventSubscriptionsOutput, error) { + req, out := c.ListEventSubscriptionsRequest(input) + err := req.Send() + return out, err +} + +const opListFindings = "ListFindings" + +// ListFindingsRequest generates a "aws/request.Request" representing the +// client's request for the ListFindings operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListFindings method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListFindingsRequest method. +// req, resp := client.ListFindingsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Inspector) ListFindingsRequest(input *ListFindingsInput) (req *request.Request, output *ListFindingsOutput) { + op := &request.Operation{ + Name: opListFindings, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListFindingsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListFindingsOutput{} + req.Data = output + return +} + +// Lists findings that are generated by the assessment runs that are specified +// by the ARNs of the assessment runs. +func (c *Inspector) ListFindings(input *ListFindingsInput) (*ListFindingsOutput, error) { + req, out := c.ListFindingsRequest(input) + err := req.Send() + return out, err +} + +const opListRulesPackages = "ListRulesPackages" + +// ListRulesPackagesRequest generates a "aws/request.Request" representing the +// client's request for the ListRulesPackages operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListRulesPackages method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListRulesPackagesRequest method. +// req, resp := client.ListRulesPackagesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Inspector) ListRulesPackagesRequest(input *ListRulesPackagesInput) (req *request.Request, output *ListRulesPackagesOutput) { + op := &request.Operation{ + Name: opListRulesPackages, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListRulesPackagesInput{} + } + + req = c.newRequest(op, input, output) + output = &ListRulesPackagesOutput{} + req.Data = output + return +} + +// Lists all available Amazon Inspector rules packages. +func (c *Inspector) ListRulesPackages(input *ListRulesPackagesInput) (*ListRulesPackagesOutput, error) { + req, out := c.ListRulesPackagesRequest(input) + err := req.Send() + return out, err +} + +const opListTagsForResource = "ListTagsForResource" + +// ListTagsForResourceRequest generates a "aws/request.Request" representing the +// client's request for the ListTagsForResource operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListTagsForResource method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListTagsForResourceRequest method. +// req, resp := client.ListTagsForResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Inspector) ListTagsForResourceRequest(input *ListTagsForResourceInput) (req *request.Request, output *ListTagsForResourceOutput) { + op := &request.Operation{ + Name: opListTagsForResource, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListTagsForResourceInput{} + } + + req = c.newRequest(op, input, output) + output = &ListTagsForResourceOutput{} + req.Data = output + return +} + +// Lists all tags associated with an assessment template. +func (c *Inspector) ListTagsForResource(input *ListTagsForResourceInput) (*ListTagsForResourceOutput, error) { + req, out := c.ListTagsForResourceRequest(input) + err := req.Send() + return out, err +} + +const opPreviewAgents = "PreviewAgents" + +// PreviewAgentsRequest generates a "aws/request.Request" representing the +// client's request for the PreviewAgents operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PreviewAgents method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PreviewAgentsRequest method. +// req, resp := client.PreviewAgentsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Inspector) PreviewAgentsRequest(input *PreviewAgentsInput) (req *request.Request, output *PreviewAgentsOutput) { + op := &request.Operation{ + Name: opPreviewAgents, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PreviewAgentsInput{} + } + + req = c.newRequest(op, input, output) + output = &PreviewAgentsOutput{} + req.Data = output + return +} + +// Previews the agents installed on the EC2 instances that are part of the specified +// assessment target. +func (c *Inspector) PreviewAgents(input *PreviewAgentsInput) (*PreviewAgentsOutput, error) { + req, out := c.PreviewAgentsRequest(input) + err := req.Send() + return out, err +} + +const opRegisterCrossAccountAccessRole = "RegisterCrossAccountAccessRole" + +// RegisterCrossAccountAccessRoleRequest generates a "aws/request.Request" representing the +// client's request for the RegisterCrossAccountAccessRole operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the RegisterCrossAccountAccessRole method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the RegisterCrossAccountAccessRoleRequest method. +// req, resp := client.RegisterCrossAccountAccessRoleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Inspector) RegisterCrossAccountAccessRoleRequest(input *RegisterCrossAccountAccessRoleInput) (req *request.Request, output *RegisterCrossAccountAccessRoleOutput) { + op := &request.Operation{ + Name: opRegisterCrossAccountAccessRole, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RegisterCrossAccountAccessRoleInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &RegisterCrossAccountAccessRoleOutput{} + req.Data = output + return +} + +// Registers the IAM role that Amazon Inspector uses to list your EC2 instances +// at the start of the assessment run or when you call the PreviewAgents action. +func (c *Inspector) RegisterCrossAccountAccessRole(input *RegisterCrossAccountAccessRoleInput) (*RegisterCrossAccountAccessRoleOutput, error) { + req, out := c.RegisterCrossAccountAccessRoleRequest(input) + err := req.Send() + return out, err +} + +const opRemoveAttributesFromFindings = "RemoveAttributesFromFindings" + +// RemoveAttributesFromFindingsRequest generates a "aws/request.Request" representing the +// client's request for the RemoveAttributesFromFindings operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the RemoveAttributesFromFindings method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the RemoveAttributesFromFindingsRequest method. +// req, resp := client.RemoveAttributesFromFindingsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Inspector) RemoveAttributesFromFindingsRequest(input *RemoveAttributesFromFindingsInput) (req *request.Request, output *RemoveAttributesFromFindingsOutput) { + op := &request.Operation{ + Name: opRemoveAttributesFromFindings, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RemoveAttributesFromFindingsInput{} + } + + req = c.newRequest(op, input, output) + output = &RemoveAttributesFromFindingsOutput{} + req.Data = output + return +} + +// Removes entire attributes (key and value pairs) from the findings that are +// specified by the ARNs of the findings where an attribute with the specified +// key exists. +func (c *Inspector) RemoveAttributesFromFindings(input *RemoveAttributesFromFindingsInput) (*RemoveAttributesFromFindingsOutput, error) { + req, out := c.RemoveAttributesFromFindingsRequest(input) + err := req.Send() + return out, err +} + +const opSetTagsForResource = "SetTagsForResource" + +// SetTagsForResourceRequest generates a "aws/request.Request" representing the +// client's request for the SetTagsForResource operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the SetTagsForResource method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the SetTagsForResourceRequest method. +// req, resp := client.SetTagsForResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Inspector) SetTagsForResourceRequest(input *SetTagsForResourceInput) (req *request.Request, output *SetTagsForResourceOutput) { + op := &request.Operation{ + Name: opSetTagsForResource, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SetTagsForResourceInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &SetTagsForResourceOutput{} + req.Data = output + return +} + +// Sets tags (key and value pairs) to the assessment template that is specified +// by the ARN of the assessment template. +func (c *Inspector) SetTagsForResource(input *SetTagsForResourceInput) (*SetTagsForResourceOutput, error) { + req, out := c.SetTagsForResourceRequest(input) + err := req.Send() + return out, err +} + +const opStartAssessmentRun = "StartAssessmentRun" + +// StartAssessmentRunRequest generates a "aws/request.Request" representing the +// client's request for the StartAssessmentRun operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the StartAssessmentRun method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the StartAssessmentRunRequest method. +// req, resp := client.StartAssessmentRunRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Inspector) StartAssessmentRunRequest(input *StartAssessmentRunInput) (req *request.Request, output *StartAssessmentRunOutput) { + op := &request.Operation{ + Name: opStartAssessmentRun, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &StartAssessmentRunInput{} + } + + req = c.newRequest(op, input, output) + output = &StartAssessmentRunOutput{} + req.Data = output + return +} + +// Starts the assessment run specified by the ARN of the assessment template. +// For this API to function properly, you must not exceed the limit of running +// up to 500 concurrent agents per AWS account. +func (c *Inspector) StartAssessmentRun(input *StartAssessmentRunInput) (*StartAssessmentRunOutput, error) { + req, out := c.StartAssessmentRunRequest(input) + err := req.Send() + return out, err +} + +const opStopAssessmentRun = "StopAssessmentRun" + +// StopAssessmentRunRequest generates a "aws/request.Request" representing the +// client's request for the StopAssessmentRun operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the StopAssessmentRun method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the StopAssessmentRunRequest method. +// req, resp := client.StopAssessmentRunRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Inspector) StopAssessmentRunRequest(input *StopAssessmentRunInput) (req *request.Request, output *StopAssessmentRunOutput) { + op := &request.Operation{ + Name: opStopAssessmentRun, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &StopAssessmentRunInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &StopAssessmentRunOutput{} + req.Data = output + return +} + +// Stops the assessment run that is specified by the ARN of the assessment run. +func (c *Inspector) StopAssessmentRun(input *StopAssessmentRunInput) (*StopAssessmentRunOutput, error) { + req, out := c.StopAssessmentRunRequest(input) + err := req.Send() + return out, err +} + +const opSubscribeToEvent = "SubscribeToEvent" + +// SubscribeToEventRequest generates a "aws/request.Request" representing the +// client's request for the SubscribeToEvent operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the SubscribeToEvent method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the SubscribeToEventRequest method. +// req, resp := client.SubscribeToEventRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Inspector) SubscribeToEventRequest(input *SubscribeToEventInput) (req *request.Request, output *SubscribeToEventOutput) { + op := &request.Operation{ + Name: opSubscribeToEvent, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SubscribeToEventInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &SubscribeToEventOutput{} + req.Data = output + return +} + +// Enables the process of sending Amazon Simple Notification Service (SNS) notifications +// about a specified event to a specified SNS topic. +func (c *Inspector) SubscribeToEvent(input *SubscribeToEventInput) (*SubscribeToEventOutput, error) { + req, out := c.SubscribeToEventRequest(input) + err := req.Send() + return out, err +} + +const opUnsubscribeFromEvent = "UnsubscribeFromEvent" + +// UnsubscribeFromEventRequest generates a "aws/request.Request" representing the +// client's request for the UnsubscribeFromEvent operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UnsubscribeFromEvent method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UnsubscribeFromEventRequest method. +// req, resp := client.UnsubscribeFromEventRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Inspector) UnsubscribeFromEventRequest(input *UnsubscribeFromEventInput) (req *request.Request, output *UnsubscribeFromEventOutput) { + op := &request.Operation{ + Name: opUnsubscribeFromEvent, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UnsubscribeFromEventInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &UnsubscribeFromEventOutput{} + req.Data = output + return +} + +// Disables the process of sending Amazon Simple Notification Service (SNS) +// notifications about a specified event to a specified SNS topic. +func (c *Inspector) UnsubscribeFromEvent(input *UnsubscribeFromEventInput) (*UnsubscribeFromEventOutput, error) { + req, out := c.UnsubscribeFromEventRequest(input) + err := req.Send() + return out, err +} + +const opUpdateAssessmentTarget = "UpdateAssessmentTarget" + +// UpdateAssessmentTargetRequest generates a "aws/request.Request" representing the +// client's request for the UpdateAssessmentTarget operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateAssessmentTarget method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateAssessmentTargetRequest method. +// req, resp := client.UpdateAssessmentTargetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Inspector) UpdateAssessmentTargetRequest(input *UpdateAssessmentTargetInput) (req *request.Request, output *UpdateAssessmentTargetOutput) { + op := &request.Operation{ + Name: opUpdateAssessmentTarget, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateAssessmentTargetInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &UpdateAssessmentTargetOutput{} + req.Data = output + return +} + +// Updates the assessment target that is specified by the ARN of the assessment +// target. +func (c *Inspector) UpdateAssessmentTarget(input *UpdateAssessmentTargetInput) (*UpdateAssessmentTargetOutput, error) { + req, out := c.UpdateAssessmentTargetRequest(input) + err := req.Send() + return out, err +} + +type AddAttributesToFindingsInput struct { + _ struct{} `type:"structure"` + + // The array of attributes that you want to assign to specified findings. + Attributes []*Attribute `locationName:"attributes" type:"list" required:"true"` + + // The ARNs that specify the findings that you want to assign attributes to. + FindingArns []*string `locationName:"findingArns" min:"1" type:"list" required:"true"` +} + +// String returns the string representation +func (s AddAttributesToFindingsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddAttributesToFindingsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AddAttributesToFindingsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AddAttributesToFindingsInput"} + if s.Attributes == nil { + invalidParams.Add(request.NewErrParamRequired("Attributes")) + } + if s.FindingArns == nil { + invalidParams.Add(request.NewErrParamRequired("FindingArns")) + } + if s.FindingArns != nil && len(s.FindingArns) < 1 { + invalidParams.Add(request.NewErrParamMinLen("FindingArns", 1)) + } + if s.Attributes != nil { + for i, v := range s.Attributes { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Attributes", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type AddAttributesToFindingsOutput struct { + _ struct{} `type:"structure"` + + // Attribute details that cannot be described. An error code is provided for + // each failed item. + FailedItems map[string]*FailedItemDetails `locationName:"failedItems" type:"map" required:"true"` +} + +// String returns the string representation +func (s AddAttributesToFindingsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddAttributesToFindingsOutput) GoString() string { + return s.String() +} + +// Used in the exception error that is thrown if you start an assessment run +// for an assessment target that includes an EC2 instance that is already participating +// in another started assessment run. +type AgentAlreadyRunningAssessment struct { + _ struct{} `type:"structure"` + + // ID of the agent that is running on an EC2 instance that is already participating + // in another started assessment run. + AgentId *string `locationName:"agentId" min:"1" type:"string" required:"true"` + + // The ARN of the assessment run that has already been started. + AssessmentRunArn *string `locationName:"assessmentRunArn" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s AgentAlreadyRunningAssessment) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AgentAlreadyRunningAssessment) GoString() string { + return s.String() +} + +// Contains information about an Amazon Inspector agent. This data type is used +// as a request parameter in the ListAssessmentRunAgents action. +type AgentFilter struct { + _ struct{} `type:"structure"` + + // The detailed health state of the agent. Values can be set to IDLE, RUNNING, + // SHUTDOWN, UNHEALTHY, THROTTLED, and UNKNOWN. + AgentHealthCodes []*string `locationName:"agentHealthCodes" type:"list" required:"true"` + + // The current health state of the agent. Values can be set to HEALTHY or UNHEALTHY. + AgentHealths []*string `locationName:"agentHealths" type:"list" required:"true"` +} + +// String returns the string representation +func (s AgentFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AgentFilter) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AgentFilter) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AgentFilter"} + if s.AgentHealthCodes == nil { + invalidParams.Add(request.NewErrParamRequired("AgentHealthCodes")) + } + if s.AgentHealths == nil { + invalidParams.Add(request.NewErrParamRequired("AgentHealths")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Used as a response element in the PreviewAgents action. +type AgentPreview struct { + _ struct{} `type:"structure"` + + // The ID of the EC2 instance where the agent is installed. + AgentId *string `locationName:"agentId" min:"1" type:"string" required:"true"` + + // The Auto Scaling group for the EC2 instance where the agent is installed. + AutoScalingGroup *string `locationName:"autoScalingGroup" min:"1" type:"string"` +} + +// String returns the string representation +func (s AgentPreview) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AgentPreview) GoString() string { + return s.String() +} + +// A snapshot of an Amazon Inspector assessment run that contains the findings +// of the assessment run . +// +// Used as the response element in the DescribeAssessmentRuns action. +type AssessmentRun struct { + _ struct{} `type:"structure"` + + // The ARN of the assessment run. + Arn *string `locationName:"arn" min:"1" type:"string" required:"true"` + + // The ARN of the assessment template that is associated with the assessment + // run. + AssessmentTemplateArn *string `locationName:"assessmentTemplateArn" min:"1" type:"string" required:"true"` + + // The assessment run completion time that corresponds to the rules packages + // evaluation completion time or failure. + CompletedAt *time.Time `locationName:"completedAt" type:"timestamp" timestampFormat:"unix"` + + // The time when StartAssessmentRun was called. + CreatedAt *time.Time `locationName:"createdAt" type:"timestamp" timestampFormat:"unix" required:"true"` + + // A Boolean value (true or false) that specifies whether the process of collecting + // data from the agents is completed. + DataCollected *bool `locationName:"dataCollected" type:"boolean" required:"true"` + + // The duration of the assessment run. + DurationInSeconds *int64 `locationName:"durationInSeconds" min:"180" type:"integer" required:"true"` + + // The auto-generated name for the assessment run. + Name *string `locationName:"name" min:"1" type:"string" required:"true"` + + // A list of notifications for the event subscriptions. A notification about + // a particular generated finding is added to this list only once. + Notifications []*AssessmentRunNotification `locationName:"notifications" type:"list" required:"true"` + + // The rules packages selected for the assessment run. + RulesPackageArns []*string `locationName:"rulesPackageArns" min:"1" type:"list" required:"true"` + + // The time when StartAssessmentRun was called. + StartedAt *time.Time `locationName:"startedAt" type:"timestamp" timestampFormat:"unix"` + + // The state of the assessment run. + State *string `locationName:"state" type:"string" required:"true" enum:"AssessmentRunState"` + + // The last time when the assessment run's state changed. + StateChangedAt *time.Time `locationName:"stateChangedAt" type:"timestamp" timestampFormat:"unix" required:"true"` + + // A list of the assessment run state changes. + StateChanges []*AssessmentRunStateChange `locationName:"stateChanges" type:"list" required:"true"` + + // The user-defined attributes that are assigned to every generated finding. + UserAttributesForFindings []*Attribute `locationName:"userAttributesForFindings" type:"list" required:"true"` +} + +// String returns the string representation +func (s AssessmentRun) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssessmentRun) GoString() string { + return s.String() +} + +// Contains information about an Amazon Inspector agent. This data type is used +// as a response element in the ListAssessmentRunAgents action. +type AssessmentRunAgent struct { + _ struct{} `type:"structure"` + + // The current health state of the agent. + AgentHealth *string `locationName:"agentHealth" type:"string" required:"true" enum:"AgentHealth"` + + // The detailed health state of the agent. + AgentHealthCode *string `locationName:"agentHealthCode" type:"string" required:"true" enum:"AgentHealthCode"` + + // The description for the agent health code. + AgentHealthDetails *string `locationName:"agentHealthDetails" type:"string"` + + // The AWS account of the EC2 instance where the agent is installed. + AgentId *string `locationName:"agentId" min:"1" type:"string" required:"true"` + + // The ARN of the assessment run that is associated with the agent. + AssessmentRunArn *string `locationName:"assessmentRunArn" min:"1" type:"string" required:"true"` + + // The Auto Scaling group of the EC2 instance that is specified by the agent + // ID. + AutoScalingGroup *string `locationName:"autoScalingGroup" min:"1" type:"string"` + + // The Amazon Inspector application data metrics that are collected by the agent. + TelemetryMetadata []*TelemetryMetadata `locationName:"telemetryMetadata" type:"list" required:"true"` +} + +// String returns the string representation +func (s AssessmentRunAgent) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssessmentRunAgent) GoString() string { + return s.String() +} + +// Used as the request parameter in the ListAssessmentRuns action. +type AssessmentRunFilter struct { + _ struct{} `type:"structure"` + + // For a record to match a filter, the value that is specified for this data + // type property must inclusively match any value between the specified minimum + // and maximum values of the completedAt property of the AssessmentRun data + // type. + CompletionTimeRange *TimestampRange `locationName:"completionTimeRange" type:"structure"` + + // For a record to match a filter, the value that is specified for this data + // type property must inclusively match any value between the specified minimum + // and maximum values of the durationInSeconds property of the AssessmentRun + // data type. + DurationRange *DurationRange `locationName:"durationRange" type:"structure"` + + // For a record to match a filter, an explicit value or a string containing + // a wildcard that is specified for this data type property must match the value + // of the assessmentRunName property of the AssessmentRun data type. + NamePattern *string `locationName:"namePattern" min:"1" type:"string"` + + // For a record to match a filter, the value that is specified for this data + // type property must be contained in the list of values of the rulesPackages + // property of the AssessmentRun data type. + RulesPackageArns []*string `locationName:"rulesPackageArns" type:"list"` + + // For a record to match a filter, the value that is specified for this data + // type property must inclusively match any value between the specified minimum + // and maximum values of the startTime property of the AssessmentRun data type. + StartTimeRange *TimestampRange `locationName:"startTimeRange" type:"structure"` + + // For a record to match a filter, the value that is specified for this data + // type property must match the stateChangedAt property of the AssessmentRun + // data type. + StateChangeTimeRange *TimestampRange `locationName:"stateChangeTimeRange" type:"structure"` + + // For a record to match a filter, one of the values specified for this data + // type property must be the exact match of the value of the assessmentRunState + // property of the AssessmentRun data type. + States []*string `locationName:"states" type:"list"` +} + +// String returns the string representation +func (s AssessmentRunFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssessmentRunFilter) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AssessmentRunFilter) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AssessmentRunFilter"} + if s.NamePattern != nil && len(*s.NamePattern) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NamePattern", 1)) + } + if s.DurationRange != nil { + if err := s.DurationRange.Validate(); err != nil { + invalidParams.AddNested("DurationRange", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Used as one of the elements of the AssessmentRun data type. +type AssessmentRunNotification struct { + _ struct{} `type:"structure"` + + // The date of the notification. + Date *time.Time `locationName:"date" type:"timestamp" timestampFormat:"unix" required:"true"` + + // The Boolean value that specifies whether the notification represents an error. + Error *bool `locationName:"error" type:"boolean" required:"true"` + + // The event for which a notification is sent. + Event *string `locationName:"event" type:"string" required:"true" enum:"Event"` + + Message *string `locationName:"message" type:"string"` + + // The status code of the SNS notification. + SnsPublishStatusCode *string `locationName:"snsPublishStatusCode" type:"string" enum:"AssessmentRunNotificationSnsStatusCode"` + + // The SNS topic to which the SNS notification is sent. + SnsTopicArn *string `locationName:"snsTopicArn" min:"1" type:"string"` +} + +// String returns the string representation +func (s AssessmentRunNotification) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssessmentRunNotification) GoString() string { + return s.String() +} + +// Used as one of the elements of the AssessmentRun data type. +type AssessmentRunStateChange struct { + _ struct{} `type:"structure"` + + // The assessment run state. + State *string `locationName:"state" type:"string" required:"true" enum:"AssessmentRunState"` + + // The last time the assessment run state changed. + StateChangedAt *time.Time `locationName:"stateChangedAt" type:"timestamp" timestampFormat:"unix" required:"true"` +} + +// String returns the string representation +func (s AssessmentRunStateChange) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssessmentRunStateChange) GoString() string { + return s.String() +} + +// Contains information about an Amazon Inspector application. This data type +// is used as the response element in the DescribeAssessmentTargets action. +type AssessmentTarget struct { + _ struct{} `type:"structure"` + + // The ARN that specifies the Amazon Inspector assessment target. + Arn *string `locationName:"arn" min:"1" type:"string" required:"true"` + + // The time at which the assessment target is created. + CreatedAt *time.Time `locationName:"createdAt" type:"timestamp" timestampFormat:"unix" required:"true"` + + // The name of the Amazon Inspector assessment target. + Name *string `locationName:"name" min:"1" type:"string" required:"true"` + + // The ARN that specifies the resource group that is associated with the assessment + // target. + ResourceGroupArn *string `locationName:"resourceGroupArn" min:"1" type:"string" required:"true"` + + // The time at which UpdateAssessmentTarget is called. + UpdatedAt *time.Time `locationName:"updatedAt" type:"timestamp" timestampFormat:"unix" required:"true"` +} + +// String returns the string representation +func (s AssessmentTarget) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssessmentTarget) GoString() string { + return s.String() +} + +// Used as the request parameter in the ListAssessmentTargets action. +type AssessmentTargetFilter struct { + _ struct{} `type:"structure"` + + // For a record to match a filter, an explicit value or a string that contains + // a wildcard that is specified for this data type property must match the value + // of the assessmentTargetName property of the AssessmentTarget data type. + AssessmentTargetNamePattern *string `locationName:"assessmentTargetNamePattern" min:"1" type:"string"` +} + +// String returns the string representation +func (s AssessmentTargetFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssessmentTargetFilter) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AssessmentTargetFilter) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AssessmentTargetFilter"} + if s.AssessmentTargetNamePattern != nil && len(*s.AssessmentTargetNamePattern) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AssessmentTargetNamePattern", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains information about an Amazon Inspector assessment template. This +// data type is used as the response element in the DescribeAssessmentTemplates +// action. +type AssessmentTemplate struct { + _ struct{} `type:"structure"` + + // The ARN of the assessment template. + Arn *string `locationName:"arn" min:"1" type:"string" required:"true"` + + // The ARN of the assessment target that corresponds to this assessment template. + AssessmentTargetArn *string `locationName:"assessmentTargetArn" min:"1" type:"string" required:"true"` + + // The time at which the assessment template is created. + CreatedAt *time.Time `locationName:"createdAt" type:"timestamp" timestampFormat:"unix" required:"true"` + + // The duration in seconds specified for this assessment tempate. The default + // value is 3600 seconds (one hour). The maximum value is 86400 seconds (one + // day). + DurationInSeconds *int64 `locationName:"durationInSeconds" min:"180" type:"integer" required:"true"` + + // The name of the assessment template. + Name *string `locationName:"name" min:"1" type:"string" required:"true"` + + // The rules packages that are specified for this assessment template. + RulesPackageArns []*string `locationName:"rulesPackageArns" type:"list" required:"true"` + + // The user-defined attributes that are assigned to every generated finding + // from the assessment run that uses this assessment template. + UserAttributesForFindings []*Attribute `locationName:"userAttributesForFindings" type:"list" required:"true"` +} + +// String returns the string representation +func (s AssessmentTemplate) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssessmentTemplate) GoString() string { + return s.String() +} + +// Used as the request parameter in the ListAssessmentTemplates action. +type AssessmentTemplateFilter struct { + _ struct{} `type:"structure"` + + // For a record to match a filter, the value specified for this data type property + // must inclusively match any value between the specified minimum and maximum + // values of the durationInSeconds property of the AssessmentTemplate data type. + DurationRange *DurationRange `locationName:"durationRange" type:"structure"` + + // For a record to match a filter, an explicit value or a string that contains + // a wildcard that is specified for this data type property must match the value + // of the assessmentTemplateName property of the AssessmentTemplate data type. + NamePattern *string `locationName:"namePattern" min:"1" type:"string"` + + // For a record to match a filter, the values that are specified for this data + // type property must be contained in the list of values of the rulesPackageArns + // property of the AssessmentTemplate data type. + RulesPackageArns []*string `locationName:"rulesPackageArns" type:"list"` +} + +// String returns the string representation +func (s AssessmentTemplateFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssessmentTemplateFilter) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AssessmentTemplateFilter) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AssessmentTemplateFilter"} + if s.NamePattern != nil && len(*s.NamePattern) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NamePattern", 1)) + } + if s.DurationRange != nil { + if err := s.DurationRange.Validate(); err != nil { + invalidParams.AddNested("DurationRange", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// A collection of attributes of the host from which the finding is generated. +type AssetAttributes struct { + _ struct{} `type:"structure"` + + // The ID of the agent that is installed on the EC2 instance where the finding + // is generated. + AgentId *string `locationName:"agentId" min:"1" type:"string"` + + // The ID of the Amazon Machine Image (AMI) that is installed on the EC2 instance + // where the finding is generated. + AmiId *string `locationName:"amiId" type:"string"` + + // The Auto Scaling group of the EC2 instance where the finding is generated. + AutoScalingGroup *string `locationName:"autoScalingGroup" min:"1" type:"string"` + + // The hostname of the EC2 instance where the finding is generated. + Hostname *string `locationName:"hostname" type:"string"` + + // The list of IP v4 addresses of the EC2 instance where the finding is generated. + Ipv4Addresses []*string `locationName:"ipv4Addresses" type:"list"` + + // The schema version of this data type. + SchemaVersion *int64 `locationName:"schemaVersion" type:"integer" required:"true"` +} + +// String returns the string representation +func (s AssetAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssetAttributes) GoString() string { + return s.String() +} + +// This data type is used as a request parameter in the AddAttributesToFindings +// and CreateAssessmentTemplate actions. +type Attribute struct { + _ struct{} `type:"structure"` + + // The attribute key. + Key *string `locationName:"key" min:"1" type:"string" required:"true"` + + // The value assigned to the attribute key. + Value *string `locationName:"value" min:"1" type:"string"` +} + +// String returns the string representation +func (s Attribute) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Attribute) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Attribute) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Attribute"} + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.Value != nil && len(*s.Value) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Value", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type CreateAssessmentTargetInput struct { + _ struct{} `type:"structure"` + + // The user-defined name that identifies the assessment target that you want + // to create. The name must be unique within the AWS account. + AssessmentTargetName *string `locationName:"assessmentTargetName" min:"1" type:"string" required:"true"` + + // The ARN that specifies the resource group that is used to create the assessment + // target. + ResourceGroupArn *string `locationName:"resourceGroupArn" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateAssessmentTargetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateAssessmentTargetInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateAssessmentTargetInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateAssessmentTargetInput"} + if s.AssessmentTargetName == nil { + invalidParams.Add(request.NewErrParamRequired("AssessmentTargetName")) + } + if s.AssessmentTargetName != nil && len(*s.AssessmentTargetName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AssessmentTargetName", 1)) + } + if s.ResourceGroupArn == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceGroupArn")) + } + if s.ResourceGroupArn != nil && len(*s.ResourceGroupArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceGroupArn", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type CreateAssessmentTargetOutput struct { + _ struct{} `type:"structure"` + + // The ARN that specifies the assessment target that is created. + AssessmentTargetArn *string `locationName:"assessmentTargetArn" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateAssessmentTargetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateAssessmentTargetOutput) GoString() string { + return s.String() +} + +type CreateAssessmentTemplateInput struct { + _ struct{} `type:"structure"` + + // The ARN that specifies the assessment target for which you want to create + // the assessment template. + AssessmentTargetArn *string `locationName:"assessmentTargetArn" min:"1" type:"string" required:"true"` + + // The user-defined name that identifies the assessment template that you want + // to create. You can create several assessment templates for an assessment + // target. The names of the assessment templates that correspond to a particular + // assessment target must be unique. + AssessmentTemplateName *string `locationName:"assessmentTemplateName" min:"1" type:"string" required:"true"` + + // The duration of the assessment run in seconds. The default value is 3600 + // seconds (one hour). + DurationInSeconds *int64 `locationName:"durationInSeconds" min:"180" type:"integer" required:"true"` + + // The ARNs that specify the rules packages that you want to attach to the assessment + // template. + RulesPackageArns []*string `locationName:"rulesPackageArns" type:"list" required:"true"` + + // The user-defined attributes that are assigned to every finding that is generated + // by the assessment run that uses this assessment template. + UserAttributesForFindings []*Attribute `locationName:"userAttributesForFindings" type:"list"` +} + +// String returns the string representation +func (s CreateAssessmentTemplateInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateAssessmentTemplateInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateAssessmentTemplateInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateAssessmentTemplateInput"} + if s.AssessmentTargetArn == nil { + invalidParams.Add(request.NewErrParamRequired("AssessmentTargetArn")) + } + if s.AssessmentTargetArn != nil && len(*s.AssessmentTargetArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AssessmentTargetArn", 1)) + } + if s.AssessmentTemplateName == nil { + invalidParams.Add(request.NewErrParamRequired("AssessmentTemplateName")) + } + if s.AssessmentTemplateName != nil && len(*s.AssessmentTemplateName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AssessmentTemplateName", 1)) + } + if s.DurationInSeconds == nil { + invalidParams.Add(request.NewErrParamRequired("DurationInSeconds")) + } + if s.DurationInSeconds != nil && *s.DurationInSeconds < 180 { + invalidParams.Add(request.NewErrParamMinValue("DurationInSeconds", 180)) + } + if s.RulesPackageArns == nil { + invalidParams.Add(request.NewErrParamRequired("RulesPackageArns")) + } + if s.UserAttributesForFindings != nil { + for i, v := range s.UserAttributesForFindings { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "UserAttributesForFindings", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type CreateAssessmentTemplateOutput struct { + _ struct{} `type:"structure"` + + // The ARN that specifies the assessment template that is created. + AssessmentTemplateArn *string `locationName:"assessmentTemplateArn" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateAssessmentTemplateOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateAssessmentTemplateOutput) GoString() string { + return s.String() +} + +type CreateResourceGroupInput struct { + _ struct{} `type:"structure"` + + // A collection of keys and an array of possible values, '[{"key":"key1","values":["Value1","Value2"]},{"key":"Key2","values":["Value3"]}]'. + // + // For example,'[{"key":"Name","values":["TestEC2Instance"]}]'. + ResourceGroupTags []*ResourceGroupTag `locationName:"resourceGroupTags" min:"1" type:"list" required:"true"` +} + +// String returns the string representation +func (s CreateResourceGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateResourceGroupInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateResourceGroupInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateResourceGroupInput"} + if s.ResourceGroupTags == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceGroupTags")) + } + if s.ResourceGroupTags != nil && len(s.ResourceGroupTags) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceGroupTags", 1)) + } + if s.ResourceGroupTags != nil { + for i, v := range s.ResourceGroupTags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "ResourceGroupTags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type CreateResourceGroupOutput struct { + _ struct{} `type:"structure"` + + // The ARN that specifies the resource group that is created. + ResourceGroupArn *string `locationName:"resourceGroupArn" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateResourceGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateResourceGroupOutput) GoString() string { + return s.String() +} + +type DeleteAssessmentRunInput struct { + _ struct{} `type:"structure"` + + // The ARN that specifies the assessment run that you want to delete. + AssessmentRunArn *string `locationName:"assessmentRunArn" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteAssessmentRunInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteAssessmentRunInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteAssessmentRunInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteAssessmentRunInput"} + if s.AssessmentRunArn == nil { + invalidParams.Add(request.NewErrParamRequired("AssessmentRunArn")) + } + if s.AssessmentRunArn != nil && len(*s.AssessmentRunArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AssessmentRunArn", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteAssessmentRunOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteAssessmentRunOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteAssessmentRunOutput) GoString() string { + return s.String() +} + +type DeleteAssessmentTargetInput struct { + _ struct{} `type:"structure"` + + // The ARN that specifies the assessment target that you want to delete. + AssessmentTargetArn *string `locationName:"assessmentTargetArn" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteAssessmentTargetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteAssessmentTargetInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteAssessmentTargetInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteAssessmentTargetInput"} + if s.AssessmentTargetArn == nil { + invalidParams.Add(request.NewErrParamRequired("AssessmentTargetArn")) + } + if s.AssessmentTargetArn != nil && len(*s.AssessmentTargetArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AssessmentTargetArn", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteAssessmentTargetOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteAssessmentTargetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteAssessmentTargetOutput) GoString() string { + return s.String() +} + +type DeleteAssessmentTemplateInput struct { + _ struct{} `type:"structure"` + + // The ARN that specifies the assessment template that you want to delete. + AssessmentTemplateArn *string `locationName:"assessmentTemplateArn" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteAssessmentTemplateInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteAssessmentTemplateInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteAssessmentTemplateInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteAssessmentTemplateInput"} + if s.AssessmentTemplateArn == nil { + invalidParams.Add(request.NewErrParamRequired("AssessmentTemplateArn")) + } + if s.AssessmentTemplateArn != nil && len(*s.AssessmentTemplateArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AssessmentTemplateArn", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteAssessmentTemplateOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteAssessmentTemplateOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteAssessmentTemplateOutput) GoString() string { + return s.String() +} + +type DescribeAssessmentRunsInput struct { + _ struct{} `type:"structure"` + + // The ARN that specifies the assessment run that you want to describe. + AssessmentRunArns []*string `locationName:"assessmentRunArns" min:"1" type:"list" required:"true"` +} + +// String returns the string representation +func (s DescribeAssessmentRunsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAssessmentRunsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeAssessmentRunsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeAssessmentRunsInput"} + if s.AssessmentRunArns == nil { + invalidParams.Add(request.NewErrParamRequired("AssessmentRunArns")) + } + if s.AssessmentRunArns != nil && len(s.AssessmentRunArns) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AssessmentRunArns", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DescribeAssessmentRunsOutput struct { + _ struct{} `type:"structure"` + + // Information about the assessment run. + AssessmentRuns []*AssessmentRun `locationName:"assessmentRuns" type:"list" required:"true"` + + // Assessment run details that cannot be described. An error code is provided + // for each failed item. + FailedItems map[string]*FailedItemDetails `locationName:"failedItems" type:"map" required:"true"` +} + +// String returns the string representation +func (s DescribeAssessmentRunsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAssessmentRunsOutput) GoString() string { + return s.String() +} + +type DescribeAssessmentTargetsInput struct { + _ struct{} `type:"structure"` + + // The ARNs that specifies the assessment targets that you want to describe. + AssessmentTargetArns []*string `locationName:"assessmentTargetArns" min:"1" type:"list" required:"true"` +} + +// String returns the string representation +func (s DescribeAssessmentTargetsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAssessmentTargetsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeAssessmentTargetsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeAssessmentTargetsInput"} + if s.AssessmentTargetArns == nil { + invalidParams.Add(request.NewErrParamRequired("AssessmentTargetArns")) + } + if s.AssessmentTargetArns != nil && len(s.AssessmentTargetArns) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AssessmentTargetArns", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DescribeAssessmentTargetsOutput struct { + _ struct{} `type:"structure"` + + // Information about the assessment targets. + AssessmentTargets []*AssessmentTarget `locationName:"assessmentTargets" type:"list" required:"true"` + + // Assessment target details that cannot be described. An error code is provided + // for each failed item. + FailedItems map[string]*FailedItemDetails `locationName:"failedItems" type:"map" required:"true"` +} + +// String returns the string representation +func (s DescribeAssessmentTargetsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAssessmentTargetsOutput) GoString() string { + return s.String() +} + +type DescribeAssessmentTemplatesInput struct { + _ struct{} `type:"structure"` + + // The ARN that specifiesthe assessment templates that you want to describe. + AssessmentTemplateArns []*string `locationName:"assessmentTemplateArns" min:"1" type:"list" required:"true"` +} + +// String returns the string representation +func (s DescribeAssessmentTemplatesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAssessmentTemplatesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeAssessmentTemplatesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeAssessmentTemplatesInput"} + if s.AssessmentTemplateArns == nil { + invalidParams.Add(request.NewErrParamRequired("AssessmentTemplateArns")) + } + if s.AssessmentTemplateArns != nil && len(s.AssessmentTemplateArns) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AssessmentTemplateArns", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DescribeAssessmentTemplatesOutput struct { + _ struct{} `type:"structure"` + + // Information about the assessment templates. + AssessmentTemplates []*AssessmentTemplate `locationName:"assessmentTemplates" type:"list" required:"true"` + + // Assessment template details that cannot be described. An error code is provided + // for each failed item. + FailedItems map[string]*FailedItemDetails `locationName:"failedItems" type:"map" required:"true"` +} + +// String returns the string representation +func (s DescribeAssessmentTemplatesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAssessmentTemplatesOutput) GoString() string { + return s.String() +} + +type DescribeCrossAccountAccessRoleInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DescribeCrossAccountAccessRoleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeCrossAccountAccessRoleInput) GoString() string { + return s.String() +} + +type DescribeCrossAccountAccessRoleOutput struct { + _ struct{} `type:"structure"` + + // The date when the cross-account access role was registered. + RegisteredAt *time.Time `locationName:"registeredAt" type:"timestamp" timestampFormat:"unix" required:"true"` + + // The ARN that specifies the IAM role that Amazon Inspector uses to access + // your AWS account. + RoleArn *string `locationName:"roleArn" min:"1" type:"string" required:"true"` + + // A Boolean value that specifies whether the IAM role has the necessary policies + // attached to enable Amazon Inspector to access your AWS account. + Valid *bool `locationName:"valid" type:"boolean" required:"true"` +} + +// String returns the string representation +func (s DescribeCrossAccountAccessRoleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeCrossAccountAccessRoleOutput) GoString() string { + return s.String() +} + +type DescribeFindingsInput struct { + _ struct{} `type:"structure"` + + // The ARN that specifies the finding that you want to describe. + FindingArns []*string `locationName:"findingArns" min:"1" type:"list" required:"true"` + + // The locale into which you want to translate a finding description, recommendation, + // and the short description that identifies the finding. + Locale *string `locationName:"locale" type:"string" enum:"Locale"` +} + +// String returns the string representation +func (s DescribeFindingsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeFindingsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeFindingsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeFindingsInput"} + if s.FindingArns == nil { + invalidParams.Add(request.NewErrParamRequired("FindingArns")) + } + if s.FindingArns != nil && len(s.FindingArns) < 1 { + invalidParams.Add(request.NewErrParamMinLen("FindingArns", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DescribeFindingsOutput struct { + _ struct{} `type:"structure"` + + // Finding details that cannot be described. An error code is provided for each + // failed item. + FailedItems map[string]*FailedItemDetails `locationName:"failedItems" type:"map" required:"true"` + + // Information about the finding. + Findings []*Finding `locationName:"findings" type:"list" required:"true"` +} + +// String returns the string representation +func (s DescribeFindingsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeFindingsOutput) GoString() string { + return s.String() +} + +type DescribeResourceGroupsInput struct { + _ struct{} `type:"structure"` + + // The ARN that specifies the resource group that you want to describe. + ResourceGroupArns []*string `locationName:"resourceGroupArns" min:"1" type:"list" required:"true"` +} + +// String returns the string representation +func (s DescribeResourceGroupsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeResourceGroupsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeResourceGroupsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeResourceGroupsInput"} + if s.ResourceGroupArns == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceGroupArns")) + } + if s.ResourceGroupArns != nil && len(s.ResourceGroupArns) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceGroupArns", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DescribeResourceGroupsOutput struct { + _ struct{} `type:"structure"` + + // Resource group details that cannot be described. An error code is provided + // for each failed item. + FailedItems map[string]*FailedItemDetails `locationName:"failedItems" type:"map" required:"true"` + + // Information about a resource group. + ResourceGroups []*ResourceGroup `locationName:"resourceGroups" type:"list" required:"true"` +} + +// String returns the string representation +func (s DescribeResourceGroupsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeResourceGroupsOutput) GoString() string { + return s.String() +} + +type DescribeRulesPackagesInput struct { + _ struct{} `type:"structure"` + + // The locale that you want to translate a rules package description into. + Locale *string `locationName:"locale" type:"string" enum:"Locale"` + + // The ARN that specifies the rules package that you want to describe. + RulesPackageArns []*string `locationName:"rulesPackageArns" min:"1" type:"list" required:"true"` +} + +// String returns the string representation +func (s DescribeRulesPackagesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeRulesPackagesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeRulesPackagesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeRulesPackagesInput"} + if s.RulesPackageArns == nil { + invalidParams.Add(request.NewErrParamRequired("RulesPackageArns")) + } + if s.RulesPackageArns != nil && len(s.RulesPackageArns) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RulesPackageArns", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DescribeRulesPackagesOutput struct { + _ struct{} `type:"structure"` + + // Rules package details that cannot be described. An error code is provided + // for each failed item. + FailedItems map[string]*FailedItemDetails `locationName:"failedItems" type:"map" required:"true"` + + // Information about the rules package. + RulesPackages []*RulesPackage `locationName:"rulesPackages" type:"list" required:"true"` +} + +// String returns the string representation +func (s DescribeRulesPackagesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeRulesPackagesOutput) GoString() string { + return s.String() +} + +// This data type is used in the AssessmentTemplateFilter data type. +type DurationRange struct { + _ struct{} `type:"structure"` + + // The maximum value of the duration range. Must be less than or equal to 604800 + // seconds (1 week). + MaxSeconds *int64 `locationName:"maxSeconds" min:"180" type:"integer"` + + // The minimum value of the duration range. Must be greater than zero. + MinSeconds *int64 `locationName:"minSeconds" min:"180" type:"integer"` +} + +// String returns the string representation +func (s DurationRange) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DurationRange) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DurationRange) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DurationRange"} + if s.MaxSeconds != nil && *s.MaxSeconds < 180 { + invalidParams.Add(request.NewErrParamMinValue("MaxSeconds", 180)) + } + if s.MinSeconds != nil && *s.MinSeconds < 180 { + invalidParams.Add(request.NewErrParamMinValue("MinSeconds", 180)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// This data type is used in the Subscription data type. +type EventSubscription struct { + _ struct{} `type:"structure"` + + // The event for which Amazon Simple Notification Service (SNS) notifications + // are sent. + Event *string `locationName:"event" type:"string" required:"true" enum:"Event"` + + // The time at which SubscribeToEvent is called. + SubscribedAt *time.Time `locationName:"subscribedAt" type:"timestamp" timestampFormat:"unix" required:"true"` +} + +// String returns the string representation +func (s EventSubscription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EventSubscription) GoString() string { + return s.String() +} + +// Includes details about the failed items. +type FailedItemDetails struct { + _ struct{} `type:"structure"` + + // The status code of a failed item. + FailureCode *string `locationName:"failureCode" type:"string" required:"true" enum:"FailedItemErrorCode"` + + // Indicates whether you can immediately retry a request for this item for a + // specified resource. + Retryable *bool `locationName:"retryable" type:"boolean" required:"true"` +} + +// String returns the string representation +func (s FailedItemDetails) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FailedItemDetails) GoString() string { + return s.String() +} + +// Contains information about an Amazon Inspector finding. This data type is +// used as the response element in the DescribeFindings action. +type Finding struct { + _ struct{} `type:"structure"` + + // The ARN that specifies the finding. + Arn *string `locationName:"arn" min:"1" type:"string" required:"true"` + + // A collection of attributes of the host from which the finding is generated. + AssetAttributes *AssetAttributes `locationName:"assetAttributes" type:"structure"` + + // The type of the host from which the finding is generated. + AssetType *string `locationName:"assetType" type:"string" enum:"AssetType"` + + // The system-defined attributes for the finding. + Attributes []*Attribute `locationName:"attributes" type:"list" required:"true"` + + // This data element is currently not used. + Confidence *int64 `locationName:"confidence" type:"integer"` + + // The time when the finding was generated. + CreatedAt *time.Time `locationName:"createdAt" type:"timestamp" timestampFormat:"unix" required:"true"` + + // The description of the finding. + Description *string `locationName:"description" type:"string"` + + // The ID of the finding. + Id *string `locationName:"id" type:"string"` + + // This data element is currently not used. + IndicatorOfCompromise *bool `locationName:"indicatorOfCompromise" type:"boolean"` + + // The numeric value of the finding severity. + NumericSeverity *float64 `locationName:"numericSeverity" type:"double"` + + // The recommendation for the finding. + Recommendation *string `locationName:"recommendation" type:"string"` + + // The schema version of this data type. + SchemaVersion *int64 `locationName:"schemaVersion" type:"integer"` + + // The data element is set to "Inspector". + Service *string `locationName:"service" type:"string"` + + // This data type is used in the Finding data type. + ServiceAttributes *ServiceAttributes `locationName:"serviceAttributes" type:"structure"` + + // The finding severity. Values can be set to High, Medium, Low, and Informational. + Severity *string `locationName:"severity" type:"string" enum:"Severity"` + + // The name of the finding. + Title *string `locationName:"title" type:"string"` + + // The time when AddAttributesToFindings is called. + UpdatedAt *time.Time `locationName:"updatedAt" type:"timestamp" timestampFormat:"unix" required:"true"` + + // The user-defined attributes that are assigned to the finding. + UserAttributes []*Attribute `locationName:"userAttributes" type:"list" required:"true"` +} + +// String returns the string representation +func (s Finding) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Finding) GoString() string { + return s.String() +} + +// This data type is used as a request parameter in the ListFindings action. +type FindingFilter struct { + _ struct{} `type:"structure"` + + // For a record to match a filter, one of the values that is specified for this + // data type property must be the exact match of the value of the agentId property + // of the Finding data type. + AgentIds []*string `locationName:"agentIds" type:"list"` + + // For a record to match a filter, the list of values that are specified for + // this data type property must be contained in the list of values of the attributes + // property of the Finding data type. + Attributes []*Attribute `locationName:"attributes" type:"list"` + + // For a record to match a filter, one of the values that is specified for this + // data type property must be the exact match of the value of the autoScalingGroup + // property of the Finding data type. + AutoScalingGroups []*string `locationName:"autoScalingGroups" type:"list"` + + // The time range during which the finding is generated. + CreationTimeRange *TimestampRange `locationName:"creationTimeRange" type:"structure"` + + // For a record to match a filter, one of the values that is specified for this + // data type property must be the exact match of the value of the ruleName property + // of the Finding data type. + RuleNames []*string `locationName:"ruleNames" type:"list"` + + // For a record to match a filter, one of the values that is specified for this + // data type property must be the exact match of the value of the rulesPackageArn + // property of the Finding data type. + RulesPackageArns []*string `locationName:"rulesPackageArns" type:"list"` + + // For a record to match a filter, one of the values that is specified for this + // data type property must be the exact match of the value of the severity property + // of the Finding data type. + Severities []*string `locationName:"severities" type:"list"` + + // For a record to match a filter, the value that is specified for this data + // type property must be contained in the list of values of the userAttributes + // property of the Finding data type. + UserAttributes []*Attribute `locationName:"userAttributes" type:"list"` +} + +// String returns the string representation +func (s FindingFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FindingFilter) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *FindingFilter) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "FindingFilter"} + if s.Attributes != nil { + for i, v := range s.Attributes { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Attributes", i), err.(request.ErrInvalidParams)) + } + } + } + if s.UserAttributes != nil { + for i, v := range s.UserAttributes { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "UserAttributes", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type GetTelemetryMetadataInput struct { + _ struct{} `type:"structure"` + + // The ARN that specifies the assessment run that has the telemetry data that + // you want to obtain. + AssessmentRunArn *string `locationName:"assessmentRunArn" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetTelemetryMetadataInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetTelemetryMetadataInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetTelemetryMetadataInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetTelemetryMetadataInput"} + if s.AssessmentRunArn == nil { + invalidParams.Add(request.NewErrParamRequired("AssessmentRunArn")) + } + if s.AssessmentRunArn != nil && len(*s.AssessmentRunArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AssessmentRunArn", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type GetTelemetryMetadataOutput struct { + _ struct{} `type:"structure"` + + // Telemetry details. + TelemetryMetadata []*TelemetryMetadata `locationName:"telemetryMetadata" type:"list" required:"true"` +} + +// String returns the string representation +func (s GetTelemetryMetadataOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetTelemetryMetadataOutput) GoString() string { + return s.String() +} + +type ListAssessmentRunAgentsInput struct { + _ struct{} `type:"structure"` + + // The ARN that specifies the assessment run whose agents you want to list. + AssessmentRunArn *string `locationName:"assessmentRunArn" min:"1" type:"string" required:"true"` + + // You can use this parameter to specify a subset of data to be included in + // the action's response. + // + // For a record to match a filter, all specified filter attributes must match. + // When multiple values are specified for a filter attribute, any of the values + // can match. + Filter *AgentFilter `locationName:"filter" type:"structure"` + + // You can use this parameter to indicate the maximum number of items that you + // want in the response. The default value is 10. The maximum value is 500. + MaxResults *int64 `locationName:"maxResults" type:"integer"` + + // You can use this parameter when paginating results. Set the value of this + // parameter to null on your first call to the ListAssessmentRunAgents action. + // Subsequent calls to the action fill nextToken in the request with the value + // of NextToken from the previous response to continue listing data. + NextToken *string `locationName:"nextToken" min:"1" type:"string"` +} + +// String returns the string representation +func (s ListAssessmentRunAgentsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListAssessmentRunAgentsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListAssessmentRunAgentsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListAssessmentRunAgentsInput"} + if s.AssessmentRunArn == nil { + invalidParams.Add(request.NewErrParamRequired("AssessmentRunArn")) + } + if s.AssessmentRunArn != nil && len(*s.AssessmentRunArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AssessmentRunArn", 1)) + } + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + if s.Filter != nil { + if err := s.Filter.Validate(); err != nil { + invalidParams.AddNested("Filter", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type ListAssessmentRunAgentsOutput struct { + _ struct{} `type:"structure"` + + // A list of ARNs that specifies the agents returned by the action. + AssessmentRunAgents []*AssessmentRunAgent `locationName:"assessmentRunAgents" type:"list" required:"true"` + + // When a response is generated, if there is more data to be listed, this parameter + // is present in the response and contains the value to use for the nextToken + // parameter in a subsequent pagination request. If there is no more data to + // be listed, this parameter is set to null. + NextToken *string `locationName:"nextToken" min:"1" type:"string"` +} + +// String returns the string representation +func (s ListAssessmentRunAgentsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListAssessmentRunAgentsOutput) GoString() string { + return s.String() +} + +type ListAssessmentRunsInput struct { + _ struct{} `type:"structure"` + + // The ARNs that specify the assessment templates whose assessment runs you + // want to list. + AssessmentTemplateArns []*string `locationName:"assessmentTemplateArns" type:"list"` + + // You can use this parameter to specify a subset of data to be included in + // the action's response. + // + // For a record to match a filter, all specified filter attributes must match. + // When multiple values are specified for a filter attribute, any of the values + // can match. + Filter *AssessmentRunFilter `locationName:"filter" type:"structure"` + + // You can use this parameter to indicate the maximum number of items that you + // want in the response. The default value is 10. The maximum value is 500. + MaxResults *int64 `locationName:"maxResults" type:"integer"` + + // You can use this parameter when paginating results. Set the value of this + // parameter to null on your first call to the ListAssessmentRuns action. Subsequent + // calls to the action fill nextToken in the request with the value of NextToken + // from the previous response to continue listing data. + NextToken *string `locationName:"nextToken" min:"1" type:"string"` +} + +// String returns the string representation +func (s ListAssessmentRunsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListAssessmentRunsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListAssessmentRunsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListAssessmentRunsInput"} + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + if s.Filter != nil { + if err := s.Filter.Validate(); err != nil { + invalidParams.AddNested("Filter", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type ListAssessmentRunsOutput struct { + _ struct{} `type:"structure"` + + // A list of ARNs that specifies the assessment runs that are returned by the + // action. + AssessmentRunArns []*string `locationName:"assessmentRunArns" type:"list" required:"true"` + + // When a response is generated, if there is more data to be listed, this parameter + // is present in the response and contains the value to use for the nextToken + // parameter in a subsequent pagination request. If there is no more data to + // be listed, this parameter is set to null. + NextToken *string `locationName:"nextToken" min:"1" type:"string"` +} + +// String returns the string representation +func (s ListAssessmentRunsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListAssessmentRunsOutput) GoString() string { + return s.String() +} + +type ListAssessmentTargetsInput struct { + _ struct{} `type:"structure"` + + // You can use this parameter to specify a subset of data to be included in + // the action's response. + // + // For a record to match a filter, all specified filter attributes must match. + // When multiple values are specified for a filter attribute, any of the values + // can match. + Filter *AssessmentTargetFilter `locationName:"filter" type:"structure"` + + // You can use this parameter to indicate the maximum number of items you want + // in the response. The default value is 10. The maximum value is 500. + MaxResults *int64 `locationName:"maxResults" type:"integer"` + + // You can use this parameter when paginating results. Set the value of this + // parameter to null on your first call to the ListAssessmentTargets action. + // Subsequent calls to the action fill nextToken in the request with the value + // of NextToken from the previous response to continue listing data. + NextToken *string `locationName:"nextToken" min:"1" type:"string"` +} + +// String returns the string representation +func (s ListAssessmentTargetsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListAssessmentTargetsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListAssessmentTargetsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListAssessmentTargetsInput"} + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + if s.Filter != nil { + if err := s.Filter.Validate(); err != nil { + invalidParams.AddNested("Filter", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type ListAssessmentTargetsOutput struct { + _ struct{} `type:"structure"` + + // A list of ARNs that specifies the assessment targets that are returned by + // the action. + AssessmentTargetArns []*string `locationName:"assessmentTargetArns" type:"list" required:"true"` + + // When a response is generated, if there is more data to be listed, this parameter + // is present in the response and contains the value to use for the nextToken + // parameter in a subsequent pagination request. If there is no more data to + // be listed, this parameter is set to null. + NextToken *string `locationName:"nextToken" min:"1" type:"string"` +} + +// String returns the string representation +func (s ListAssessmentTargetsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListAssessmentTargetsOutput) GoString() string { + return s.String() +} + +type ListAssessmentTemplatesInput struct { + _ struct{} `type:"structure"` + + // A list of ARNs that specifies the assessment targets whose assessment templates + // you want to list. + AssessmentTargetArns []*string `locationName:"assessmentTargetArns" type:"list"` + + // You can use this parameter to specify a subset of data to be included in + // the action's response. + // + // For a record to match a filter, all specified filter attributes must match. + // When multiple values are specified for a filter attribute, any of the values + // can match. + Filter *AssessmentTemplateFilter `locationName:"filter" type:"structure"` + + // You can use this parameter to indicate the maximum number of items you want + // in the response. The default value is 10. The maximum value is 500. + MaxResults *int64 `locationName:"maxResults" type:"integer"` + + // You can use this parameter when paginating results. Set the value of this + // parameter to null on your first call to the ListAssessmentTemplates action. + // Subsequent calls to the action fill nextToken in the request with the value + // of NextToken from the previous response to continue listing data. + NextToken *string `locationName:"nextToken" min:"1" type:"string"` +} + +// String returns the string representation +func (s ListAssessmentTemplatesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListAssessmentTemplatesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListAssessmentTemplatesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListAssessmentTemplatesInput"} + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + if s.Filter != nil { + if err := s.Filter.Validate(); err != nil { + invalidParams.AddNested("Filter", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type ListAssessmentTemplatesOutput struct { + _ struct{} `type:"structure"` + + // A list of ARNs that specifies the assessment templates returned by the action. + AssessmentTemplateArns []*string `locationName:"assessmentTemplateArns" type:"list" required:"true"` + + // When a response is generated, if there is more data to be listed, this parameter + // is present in the response and contains the value to use for the nextToken + // parameter in a subsequent pagination request. If there is no more data to + // be listed, this parameter is set to null. + NextToken *string `locationName:"nextToken" min:"1" type:"string"` +} + +// String returns the string representation +func (s ListAssessmentTemplatesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListAssessmentTemplatesOutput) GoString() string { + return s.String() +} + +type ListEventSubscriptionsInput struct { + _ struct{} `type:"structure"` + + // You can use this parameter to indicate the maximum number of items you want + // in the response. The default value is 10. The maximum value is 500. + MaxResults *int64 `locationName:"maxResults" type:"integer"` + + // You can use this parameter when paginating results. Set the value of this + // parameter to null on your first call to the ListEventSubscriptions action. + // Subsequent calls to the action fill nextToken in the request with the value + // of NextToken from the previous response to continue listing data. + NextToken *string `locationName:"nextToken" min:"1" type:"string"` + + // The ARN of the assessment template for which you want to list the existing + // event subscriptions. + ResourceArn *string `locationName:"resourceArn" min:"1" type:"string"` +} + +// String returns the string representation +func (s ListEventSubscriptionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListEventSubscriptionsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListEventSubscriptionsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListEventSubscriptionsInput"} + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + if s.ResourceArn != nil && len(*s.ResourceArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceArn", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type ListEventSubscriptionsOutput struct { + _ struct{} `type:"structure"` + + // When a response is generated, if there is more data to be listed, this parameter + // is present in the response and contains the value to use for the nextToken + // parameter in a subsequent pagination request. If there is no more data to + // be listed, this parameter is set to null. + NextToken *string `locationName:"nextToken" min:"1" type:"string"` + + // Details of the returned event subscriptions. + Subscriptions []*Subscription `locationName:"subscriptions" type:"list" required:"true"` +} + +// String returns the string representation +func (s ListEventSubscriptionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListEventSubscriptionsOutput) GoString() string { + return s.String() +} + +type ListFindingsInput struct { + _ struct{} `type:"structure"` + + // The ARNs of the assessment runs that generate the findings that you want + // to list. + AssessmentRunArns []*string `locationName:"assessmentRunArns" type:"list"` + + // You can use this parameter to specify a subset of data to be included in + // the action's response. + // + // For a record to match a filter, all specified filter attributes must match. + // When multiple values are specified for a filter attribute, any of the values + // can match. + Filter *FindingFilter `locationName:"filter" type:"structure"` + + // You can use this parameter to indicate the maximum number of items you want + // in the response. The default value is 10. The maximum value is 500. + MaxResults *int64 `locationName:"maxResults" type:"integer"` + + // You can use this parameter when paginating results. Set the value of this + // parameter to null on your first call to the ListFindings action. Subsequent + // calls to the action fill nextToken in the request with the value of NextToken + // from the previous response to continue listing data. + NextToken *string `locationName:"nextToken" min:"1" type:"string"` +} + +// String returns the string representation +func (s ListFindingsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListFindingsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListFindingsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListFindingsInput"} + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + if s.Filter != nil { + if err := s.Filter.Validate(); err != nil { + invalidParams.AddNested("Filter", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type ListFindingsOutput struct { + _ struct{} `type:"structure"` + + // A list of ARNs that specifies the findings returned by the action. + FindingArns []*string `locationName:"findingArns" type:"list" required:"true"` + + // When a response is generated, if there is more data to be listed, this parameter + // is present in the response and contains the value to use for the nextToken + // parameter in a subsequent pagination request. If there is no more data to + // be listed, this parameter is set to null. + NextToken *string `locationName:"nextToken" min:"1" type:"string"` +} + +// String returns the string representation +func (s ListFindingsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListFindingsOutput) GoString() string { + return s.String() +} + +type ListRulesPackagesInput struct { + _ struct{} `type:"structure"` + + // You can use this parameter to indicate the maximum number of items you want + // in the response. The default value is 10. The maximum value is 500. + MaxResults *int64 `locationName:"maxResults" type:"integer"` + + // You can use this parameter when paginating results. Set the value of this + // parameter to null on your first call to the ListRulesPackages action. Subsequent + // calls to the action fill nextToken in the request with the value of NextToken + // from the previous response to continue listing data. + NextToken *string `locationName:"nextToken" min:"1" type:"string"` +} + +// String returns the string representation +func (s ListRulesPackagesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListRulesPackagesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListRulesPackagesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListRulesPackagesInput"} + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type ListRulesPackagesOutput struct { + _ struct{} `type:"structure"` + + // When a response is generated, if there is more data to be listed, this parameter + // is present in the response and contains the value to use for the nextToken + // parameter in a subsequent pagination request. If there is no more data to + // be listed, this parameter is set to null. + NextToken *string `locationName:"nextToken" min:"1" type:"string"` + + // The list of ARNs that specifies the rules packages returned by the action. + RulesPackageArns []*string `locationName:"rulesPackageArns" type:"list" required:"true"` +} + +// String returns the string representation +func (s ListRulesPackagesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListRulesPackagesOutput) GoString() string { + return s.String() +} + +type ListTagsForResourceInput struct { + _ struct{} `type:"structure"` + + // The ARN that specifies the assessment template whose tags you want to list. + ResourceArn *string `locationName:"resourceArn" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListTagsForResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTagsForResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListTagsForResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListTagsForResourceInput"} + if s.ResourceArn == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceArn")) + } + if s.ResourceArn != nil && len(*s.ResourceArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceArn", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type ListTagsForResourceOutput struct { + _ struct{} `type:"structure"` + + // A collection of key and value pairs. + Tags []*Tag `locationName:"tags" type:"list" required:"true"` +} + +// String returns the string representation +func (s ListTagsForResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTagsForResourceOutput) GoString() string { + return s.String() +} + +type PreviewAgentsInput struct { + _ struct{} `type:"structure"` + + // You can use this parameter to indicate the maximum number of items you want + // in the response. The default value is 10. The maximum value is 500. + MaxResults *int64 `locationName:"maxResults" type:"integer"` + + // You can use this parameter when paginating results. Set the value of this + // parameter to null on your first call to the PreviewAgents action. Subsequent + // calls to the action fill nextToken in the request with the value of NextToken + // from the previous response to continue listing data. + NextToken *string `locationName:"nextToken" min:"1" type:"string"` + + // The ARN of the assessment target whose agents you want to preview. + PreviewAgentsArn *string `locationName:"previewAgentsArn" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s PreviewAgentsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PreviewAgentsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PreviewAgentsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PreviewAgentsInput"} + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + if s.PreviewAgentsArn == nil { + invalidParams.Add(request.NewErrParamRequired("PreviewAgentsArn")) + } + if s.PreviewAgentsArn != nil && len(*s.PreviewAgentsArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("PreviewAgentsArn", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type PreviewAgentsOutput struct { + _ struct{} `type:"structure"` + + // The resulting list of agents. + AgentPreviews []*AgentPreview `locationName:"agentPreviews" type:"list" required:"true"` + + // When a response is generated, if there is more data to be listed, this parameter + // is present in the response and contains the value to use for the nextToken + // parameter in a subsequent pagination request. If there is no more data to + // be listed, this parameter is set to null. + NextToken *string `locationName:"nextToken" min:"1" type:"string"` +} + +// String returns the string representation +func (s PreviewAgentsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PreviewAgentsOutput) GoString() string { + return s.String() +} + +type RegisterCrossAccountAccessRoleInput struct { + _ struct{} `type:"structure"` + + // The ARN of the IAM role that Amazon Inspector uses to list your EC2 instances + // during the assessment run or when you call the PreviewAgents action. + RoleArn *string `locationName:"roleArn" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s RegisterCrossAccountAccessRoleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RegisterCrossAccountAccessRoleInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RegisterCrossAccountAccessRoleInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RegisterCrossAccountAccessRoleInput"} + if s.RoleArn == nil { + invalidParams.Add(request.NewErrParamRequired("RoleArn")) + } + if s.RoleArn != nil && len(*s.RoleArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RoleArn", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type RegisterCrossAccountAccessRoleOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s RegisterCrossAccountAccessRoleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RegisterCrossAccountAccessRoleOutput) GoString() string { + return s.String() +} + +type RemoveAttributesFromFindingsInput struct { + _ struct{} `type:"structure"` + + // The array of attribute keys that you want to remove from specified findings. + AttributeKeys []*string `locationName:"attributeKeys" type:"list" required:"true"` + + // The ARNs that specify the findings that you want to remove attributes from. + FindingArns []*string `locationName:"findingArns" min:"1" type:"list" required:"true"` +} + +// String returns the string representation +func (s RemoveAttributesFromFindingsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RemoveAttributesFromFindingsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RemoveAttributesFromFindingsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RemoveAttributesFromFindingsInput"} + if s.AttributeKeys == nil { + invalidParams.Add(request.NewErrParamRequired("AttributeKeys")) + } + if s.FindingArns == nil { + invalidParams.Add(request.NewErrParamRequired("FindingArns")) + } + if s.FindingArns != nil && len(s.FindingArns) < 1 { + invalidParams.Add(request.NewErrParamMinLen("FindingArns", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type RemoveAttributesFromFindingsOutput struct { + _ struct{} `type:"structure"` + + // Attributes details that cannot be described. An error code is provided for + // each failed item. + FailedItems map[string]*FailedItemDetails `locationName:"failedItems" type:"map" required:"true"` +} + +// String returns the string representation +func (s RemoveAttributesFromFindingsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RemoveAttributesFromFindingsOutput) GoString() string { + return s.String() +} + +// Contains information about a resource group. The resource group defines a +// set of tags that, when queried, identify the AWS resources that make up the +// assessment target. This data type is used as the response element in the +// DescribeResourceGroups action. +type ResourceGroup struct { + _ struct{} `type:"structure"` + + // The ARN of the resource group. + Arn *string `locationName:"arn" min:"1" type:"string" required:"true"` + + // The time at which resource group is created. + CreatedAt *time.Time `locationName:"createdAt" type:"timestamp" timestampFormat:"unix" required:"true"` + + // The tags (key and value pairs) of the resource group. This data type property + // is used in the CreateResourceGroup action. + Tags []*ResourceGroupTag `locationName:"tags" min:"1" type:"list" required:"true"` +} + +// String returns the string representation +func (s ResourceGroup) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResourceGroup) GoString() string { + return s.String() +} + +// This data type is used as one of the elements of the ResourceGroup data type. +type ResourceGroupTag struct { + _ struct{} `type:"structure"` + + // A tag key. + Key *string `locationName:"key" min:"1" type:"string" required:"true"` + + // The value assigned to a tag key. + Value *string `locationName:"value" min:"1" type:"string"` +} + +// String returns the string representation +func (s ResourceGroupTag) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResourceGroupTag) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ResourceGroupTag) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ResourceGroupTag"} + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.Value != nil && len(*s.Value) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Value", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains information about an Amazon Inspector rules package. This data type +// is used as the response element in the DescribeRulesPackages action. +type RulesPackage struct { + _ struct{} `type:"structure"` + + // The ARN of the rules package. + Arn *string `locationName:"arn" min:"1" type:"string" required:"true"` + + // The description of the rules package. + Description *string `locationName:"description" type:"string"` + + // The name of the rules package. + Name *string `locationName:"name" type:"string" required:"true"` + + // The provider of the rules package. + Provider *string `locationName:"provider" type:"string" required:"true"` + + // The version ID of the rules package. + Version *string `locationName:"version" type:"string" required:"true"` +} + +// String returns the string representation +func (s RulesPackage) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RulesPackage) GoString() string { + return s.String() +} + +// This data type is used in the Finding data type. +type ServiceAttributes struct { + _ struct{} `type:"structure"` + + // The ARN of the assessment run during which the finding is generated. + AssessmentRunArn *string `locationName:"assessmentRunArn" min:"1" type:"string"` + + // The ARN of the rules package that is used to generate the finding. + RulesPackageArn *string `locationName:"rulesPackageArn" min:"1" type:"string"` + + // The schema version of this data type. + SchemaVersion *int64 `locationName:"schemaVersion" type:"integer" required:"true"` +} + +// String returns the string representation +func (s ServiceAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ServiceAttributes) GoString() string { + return s.String() +} + +type SetTagsForResourceInput struct { + _ struct{} `type:"structure"` + + // The ARN of the assessment template that you want to set tags to. + ResourceArn *string `locationName:"resourceArn" min:"1" type:"string" required:"true"` + + // A collection of key and value pairs that you want to set to the assessment + // template. + Tags []*Tag `locationName:"tags" type:"list"` +} + +// String returns the string representation +func (s SetTagsForResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetTagsForResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SetTagsForResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SetTagsForResourceInput"} + if s.ResourceArn == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceArn")) + } + if s.ResourceArn != nil && len(*s.ResourceArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceArn", 1)) + } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type SetTagsForResourceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s SetTagsForResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetTagsForResourceOutput) GoString() string { + return s.String() +} + +type StartAssessmentRunInput struct { + _ struct{} `type:"structure"` + + // You can specify the name for the assessment run, or you can use the auto-generated + // name that is based on the assessment template name. The name must be unique + // for the assessment template. + AssessmentRunName *string `locationName:"assessmentRunName" min:"1" type:"string"` + + // The ARN of the assessment template of the assessment run that you want to + // start. + AssessmentTemplateArn *string `locationName:"assessmentTemplateArn" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s StartAssessmentRunInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StartAssessmentRunInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *StartAssessmentRunInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StartAssessmentRunInput"} + if s.AssessmentRunName != nil && len(*s.AssessmentRunName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AssessmentRunName", 1)) + } + if s.AssessmentTemplateArn == nil { + invalidParams.Add(request.NewErrParamRequired("AssessmentTemplateArn")) + } + if s.AssessmentTemplateArn != nil && len(*s.AssessmentTemplateArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AssessmentTemplateArn", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type StartAssessmentRunOutput struct { + _ struct{} `type:"structure"` + + // The ARN of the assessment run that has been started. + AssessmentRunArn *string `locationName:"assessmentRunArn" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s StartAssessmentRunOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StartAssessmentRunOutput) GoString() string { + return s.String() +} + +type StopAssessmentRunInput struct { + _ struct{} `type:"structure"` + + // The ARN of the assessment run that you want to stop. + AssessmentRunArn *string `locationName:"assessmentRunArn" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s StopAssessmentRunInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StopAssessmentRunInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *StopAssessmentRunInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StopAssessmentRunInput"} + if s.AssessmentRunArn == nil { + invalidParams.Add(request.NewErrParamRequired("AssessmentRunArn")) + } + if s.AssessmentRunArn != nil && len(*s.AssessmentRunArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AssessmentRunArn", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type StopAssessmentRunOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s StopAssessmentRunOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StopAssessmentRunOutput) GoString() string { + return s.String() +} + +type SubscribeToEventInput struct { + _ struct{} `type:"structure"` + + // The event for which you want to receive SNS notifications. + Event *string `locationName:"event" type:"string" required:"true" enum:"Event"` + + // The ARN of the assessment template that is used during the event for which + // you want to receive SNS notifications. + ResourceArn *string `locationName:"resourceArn" min:"1" type:"string" required:"true"` + + // The ARN of the SNS topic to which the SNS notifications are sent. + TopicArn *string `locationName:"topicArn" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s SubscribeToEventInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SubscribeToEventInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SubscribeToEventInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SubscribeToEventInput"} + if s.Event == nil { + invalidParams.Add(request.NewErrParamRequired("Event")) + } + if s.ResourceArn == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceArn")) + } + if s.ResourceArn != nil && len(*s.ResourceArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceArn", 1)) + } + if s.TopicArn == nil { + invalidParams.Add(request.NewErrParamRequired("TopicArn")) + } + if s.TopicArn != nil && len(*s.TopicArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TopicArn", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type SubscribeToEventOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s SubscribeToEventOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SubscribeToEventOutput) GoString() string { + return s.String() +} + +// This data type is used as a response element in the ListEventSubscriptions +// action. +type Subscription struct { + _ struct{} `type:"structure"` + + // The list of existing event subscriptions. + EventSubscriptions []*EventSubscription `locationName:"eventSubscriptions" min:"1" type:"list" required:"true"` + + // The ARN of the assessment template that is used during the event for which + // the SNS notification is sent. + ResourceArn *string `locationName:"resourceArn" min:"1" type:"string" required:"true"` + + // The ARN of the Amazon Simple Notification Service (SNS) topic to which the + // SNS notifications are sent. + TopicArn *string `locationName:"topicArn" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s Subscription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Subscription) GoString() string { + return s.String() +} + +// A key and value pair. This data type is used as a request parameter in the +// SetTagsForResource action and a response element in the ListTagsForResource +// action. +type Tag struct { + _ struct{} `type:"structure"` + + // A tag key. + Key *string `locationName:"key" min:"1" type:"string" required:"true"` + + // A value assigned to a tag key. + Value *string `locationName:"value" min:"1" type:"string"` +} + +// String returns the string representation +func (s Tag) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Tag) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Tag) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Tag"} + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.Value != nil && len(*s.Value) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Value", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The metadata about the Amazon Inspector application data metrics collected +// by the agent. This data type is used as the response element in the GetTelemetryMetadata +// action. +type TelemetryMetadata struct { + _ struct{} `type:"structure"` + + // The count of messages that the agent sends to the Amazon Inspector service. + Count *int64 `locationName:"count" type:"long" required:"true"` + + // The data size of messages that the agent sends to the Amazon Inspector service. + DataSize *int64 `locationName:"dataSize" type:"long"` + + // A specific type of behavioral data that is collected by the agent. + MessageType *string `locationName:"messageType" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s TelemetryMetadata) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TelemetryMetadata) GoString() string { + return s.String() +} + +// This data type is used in the AssessmentRunFilter data type. +type TimestampRange struct { + _ struct{} `type:"structure"` + + // The minimum value of the timestamp range. + BeginDate *time.Time `locationName:"beginDate" type:"timestamp" timestampFormat:"unix"` + + // The maximum value of the timestamp range. + EndDate *time.Time `locationName:"endDate" type:"timestamp" timestampFormat:"unix"` +} + +// String returns the string representation +func (s TimestampRange) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TimestampRange) GoString() string { + return s.String() +} + +type UnsubscribeFromEventInput struct { + _ struct{} `type:"structure"` + + // The event for which you want to stop receiving SNS notifications. + Event *string `locationName:"event" type:"string" required:"true" enum:"Event"` + + // The ARN of the assessment template that is used during the event for which + // you want to stop receiving SNS notifications. + ResourceArn *string `locationName:"resourceArn" min:"1" type:"string" required:"true"` + + // The ARN of the SNS topic to which SNS notifications are sent. + TopicArn *string `locationName:"topicArn" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s UnsubscribeFromEventInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UnsubscribeFromEventInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UnsubscribeFromEventInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UnsubscribeFromEventInput"} + if s.Event == nil { + invalidParams.Add(request.NewErrParamRequired("Event")) + } + if s.ResourceArn == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceArn")) + } + if s.ResourceArn != nil && len(*s.ResourceArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceArn", 1)) + } + if s.TopicArn == nil { + invalidParams.Add(request.NewErrParamRequired("TopicArn")) + } + if s.TopicArn != nil && len(*s.TopicArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TopicArn", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type UnsubscribeFromEventOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UnsubscribeFromEventOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UnsubscribeFromEventOutput) GoString() string { + return s.String() +} + +type UpdateAssessmentTargetInput struct { + _ struct{} `type:"structure"` + + // The ARN of the assessment target that you want to update. + AssessmentTargetArn *string `locationName:"assessmentTargetArn" min:"1" type:"string" required:"true"` + + // The name of the assessment target that you want to update. + AssessmentTargetName *string `locationName:"assessmentTargetName" min:"1" type:"string" required:"true"` + + // The ARN of the resource group that is used to specify the new resource group + // to associate with the assessment target. + ResourceGroupArn *string `locationName:"resourceGroupArn" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateAssessmentTargetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateAssessmentTargetInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateAssessmentTargetInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateAssessmentTargetInput"} + if s.AssessmentTargetArn == nil { + invalidParams.Add(request.NewErrParamRequired("AssessmentTargetArn")) + } + if s.AssessmentTargetArn != nil && len(*s.AssessmentTargetArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AssessmentTargetArn", 1)) + } + if s.AssessmentTargetName == nil { + invalidParams.Add(request.NewErrParamRequired("AssessmentTargetName")) + } + if s.AssessmentTargetName != nil && len(*s.AssessmentTargetName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AssessmentTargetName", 1)) + } + if s.ResourceGroupArn == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceGroupArn")) + } + if s.ResourceGroupArn != nil && len(*s.ResourceGroupArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceGroupArn", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type UpdateAssessmentTargetOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UpdateAssessmentTargetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateAssessmentTargetOutput) GoString() string { + return s.String() +} + +const ( + // @enum AccessDeniedErrorCode + AccessDeniedErrorCodeAccessDeniedToAssessmentTarget = "ACCESS_DENIED_TO_ASSESSMENT_TARGET" + // @enum AccessDeniedErrorCode + AccessDeniedErrorCodeAccessDeniedToAssessmentTemplate = "ACCESS_DENIED_TO_ASSESSMENT_TEMPLATE" + // @enum AccessDeniedErrorCode + AccessDeniedErrorCodeAccessDeniedToAssessmentRun = "ACCESS_DENIED_TO_ASSESSMENT_RUN" + // @enum AccessDeniedErrorCode + AccessDeniedErrorCodeAccessDeniedToFinding = "ACCESS_DENIED_TO_FINDING" + // @enum AccessDeniedErrorCode + AccessDeniedErrorCodeAccessDeniedToResourceGroup = "ACCESS_DENIED_TO_RESOURCE_GROUP" + // @enum AccessDeniedErrorCode + AccessDeniedErrorCodeAccessDeniedToRulesPackage = "ACCESS_DENIED_TO_RULES_PACKAGE" + // @enum AccessDeniedErrorCode + AccessDeniedErrorCodeAccessDeniedToSnsTopic = "ACCESS_DENIED_TO_SNS_TOPIC" + // @enum AccessDeniedErrorCode + AccessDeniedErrorCodeAccessDeniedToIamRole = "ACCESS_DENIED_TO_IAM_ROLE" +) + +const ( + // @enum AgentHealth + AgentHealthHealthy = "HEALTHY" + // @enum AgentHealth + AgentHealthUnhealthy = "UNHEALTHY" +) + +const ( + // @enum AgentHealthCode + AgentHealthCodeIdle = "IDLE" + // @enum AgentHealthCode + AgentHealthCodeRunning = "RUNNING" + // @enum AgentHealthCode + AgentHealthCodeShutdown = "SHUTDOWN" + // @enum AgentHealthCode + AgentHealthCodeUnhealthy = "UNHEALTHY" + // @enum AgentHealthCode + AgentHealthCodeThrottled = "THROTTLED" + // @enum AgentHealthCode + AgentHealthCodeUnknown = "UNKNOWN" +) + +const ( + // @enum AssessmentRunNotificationSnsStatusCode + AssessmentRunNotificationSnsStatusCodeSuccess = "SUCCESS" + // @enum AssessmentRunNotificationSnsStatusCode + AssessmentRunNotificationSnsStatusCodeTopicDoesNotExist = "TOPIC_DOES_NOT_EXIST" + // @enum AssessmentRunNotificationSnsStatusCode + AssessmentRunNotificationSnsStatusCodeAccessDenied = "ACCESS_DENIED" + // @enum AssessmentRunNotificationSnsStatusCode + AssessmentRunNotificationSnsStatusCodeInternalError = "INTERNAL_ERROR" +) + +const ( + // @enum AssessmentRunState + AssessmentRunStateCreated = "CREATED" + // @enum AssessmentRunState + AssessmentRunStateStartDataCollectionPending = "START_DATA_COLLECTION_PENDING" + // @enum AssessmentRunState + AssessmentRunStateStartDataCollectionInProgress = "START_DATA_COLLECTION_IN_PROGRESS" + // @enum AssessmentRunState + AssessmentRunStateCollectingData = "COLLECTING_DATA" + // @enum AssessmentRunState + AssessmentRunStateStopDataCollectionPending = "STOP_DATA_COLLECTION_PENDING" + // @enum AssessmentRunState + AssessmentRunStateDataCollected = "DATA_COLLECTED" + // @enum AssessmentRunState + AssessmentRunStateEvaluatingRules = "EVALUATING_RULES" + // @enum AssessmentRunState + AssessmentRunStateFailed = "FAILED" + // @enum AssessmentRunState + AssessmentRunStateCompleted = "COMPLETED" + // @enum AssessmentRunState + AssessmentRunStateCompletedWithErrors = "COMPLETED_WITH_ERRORS" +) + +const ( + // @enum AssetType + AssetTypeEc2Instance = "ec2-instance" +) + +const ( + // @enum Event + EventAssessmentRunStarted = "ASSESSMENT_RUN_STARTED" + // @enum Event + EventAssessmentRunCompleted = "ASSESSMENT_RUN_COMPLETED" + // @enum Event + EventAssessmentRunStateChanged = "ASSESSMENT_RUN_STATE_CHANGED" + // @enum Event + EventFindingReported = "FINDING_REPORTED" + // @enum Event + EventOther = "OTHER" +) + +const ( + // @enum FailedItemErrorCode + FailedItemErrorCodeInvalidArn = "INVALID_ARN" + // @enum FailedItemErrorCode + FailedItemErrorCodeDuplicateArn = "DUPLICATE_ARN" + // @enum FailedItemErrorCode + FailedItemErrorCodeItemDoesNotExist = "ITEM_DOES_NOT_EXIST" + // @enum FailedItemErrorCode + FailedItemErrorCodeAccessDenied = "ACCESS_DENIED" + // @enum FailedItemErrorCode + FailedItemErrorCodeLimitExceeded = "LIMIT_EXCEEDED" + // @enum FailedItemErrorCode + FailedItemErrorCodeInternalError = "INTERNAL_ERROR" +) + +const ( + // @enum InvalidCrossAccountRoleErrorCode + InvalidCrossAccountRoleErrorCodeRoleDoesNotExistOrInvalidTrustRelationship = "ROLE_DOES_NOT_EXIST_OR_INVALID_TRUST_RELATIONSHIP" + // @enum InvalidCrossAccountRoleErrorCode + InvalidCrossAccountRoleErrorCodeRoleDoesNotHaveCorrectPolicy = "ROLE_DOES_NOT_HAVE_CORRECT_POLICY" +) + +const ( + // @enum InvalidInputErrorCode + InvalidInputErrorCodeInvalidAssessmentTargetArn = "INVALID_ASSESSMENT_TARGET_ARN" + // @enum InvalidInputErrorCode + InvalidInputErrorCodeInvalidAssessmentTemplateArn = "INVALID_ASSESSMENT_TEMPLATE_ARN" + // @enum InvalidInputErrorCode + InvalidInputErrorCodeInvalidAssessmentRunArn = "INVALID_ASSESSMENT_RUN_ARN" + // @enum InvalidInputErrorCode + InvalidInputErrorCodeInvalidFindingArn = "INVALID_FINDING_ARN" + // @enum InvalidInputErrorCode + InvalidInputErrorCodeInvalidResourceGroupArn = "INVALID_RESOURCE_GROUP_ARN" + // @enum InvalidInputErrorCode + InvalidInputErrorCodeInvalidRulesPackageArn = "INVALID_RULES_PACKAGE_ARN" + // @enum InvalidInputErrorCode + InvalidInputErrorCodeInvalidResourceArn = "INVALID_RESOURCE_ARN" + // @enum InvalidInputErrorCode + InvalidInputErrorCodeInvalidSnsTopicArn = "INVALID_SNS_TOPIC_ARN" + // @enum InvalidInputErrorCode + InvalidInputErrorCodeInvalidIamRoleArn = "INVALID_IAM_ROLE_ARN" + // @enum InvalidInputErrorCode + InvalidInputErrorCodeInvalidAssessmentTargetName = "INVALID_ASSESSMENT_TARGET_NAME" + // @enum InvalidInputErrorCode + InvalidInputErrorCodeInvalidAssessmentTargetNamePattern = "INVALID_ASSESSMENT_TARGET_NAME_PATTERN" + // @enum InvalidInputErrorCode + InvalidInputErrorCodeInvalidAssessmentTemplateName = "INVALID_ASSESSMENT_TEMPLATE_NAME" + // @enum InvalidInputErrorCode + InvalidInputErrorCodeInvalidAssessmentTemplateNamePattern = "INVALID_ASSESSMENT_TEMPLATE_NAME_PATTERN" + // @enum InvalidInputErrorCode + InvalidInputErrorCodeInvalidAssessmentTemplateDuration = "INVALID_ASSESSMENT_TEMPLATE_DURATION" + // @enum InvalidInputErrorCode + InvalidInputErrorCodeInvalidAssessmentTemplateDurationRange = "INVALID_ASSESSMENT_TEMPLATE_DURATION_RANGE" + // @enum InvalidInputErrorCode + InvalidInputErrorCodeInvalidAssessmentRunDurationRange = "INVALID_ASSESSMENT_RUN_DURATION_RANGE" + // @enum InvalidInputErrorCode + InvalidInputErrorCodeInvalidAssessmentRunStartTimeRange = "INVALID_ASSESSMENT_RUN_START_TIME_RANGE" + // @enum InvalidInputErrorCode + InvalidInputErrorCodeInvalidAssessmentRunCompletionTimeRange = "INVALID_ASSESSMENT_RUN_COMPLETION_TIME_RANGE" + // @enum InvalidInputErrorCode + InvalidInputErrorCodeInvalidAssessmentRunStateChangeTimeRange = "INVALID_ASSESSMENT_RUN_STATE_CHANGE_TIME_RANGE" + // @enum InvalidInputErrorCode + InvalidInputErrorCodeInvalidAssessmentRunState = "INVALID_ASSESSMENT_RUN_STATE" + // @enum InvalidInputErrorCode + InvalidInputErrorCodeInvalidTag = "INVALID_TAG" + // @enum InvalidInputErrorCode + InvalidInputErrorCodeInvalidTagKey = "INVALID_TAG_KEY" + // @enum InvalidInputErrorCode + InvalidInputErrorCodeInvalidTagValue = "INVALID_TAG_VALUE" + // @enum InvalidInputErrorCode + InvalidInputErrorCodeInvalidResourceGroupTagKey = "INVALID_RESOURCE_GROUP_TAG_KEY" + // @enum InvalidInputErrorCode + InvalidInputErrorCodeInvalidResourceGroupTagValue = "INVALID_RESOURCE_GROUP_TAG_VALUE" + // @enum InvalidInputErrorCode + InvalidInputErrorCodeInvalidAttribute = "INVALID_ATTRIBUTE" + // @enum InvalidInputErrorCode + InvalidInputErrorCodeInvalidUserAttribute = "INVALID_USER_ATTRIBUTE" + // @enum InvalidInputErrorCode + InvalidInputErrorCodeInvalidUserAttributeKey = "INVALID_USER_ATTRIBUTE_KEY" + // @enum InvalidInputErrorCode + InvalidInputErrorCodeInvalidUserAttributeValue = "INVALID_USER_ATTRIBUTE_VALUE" + // @enum InvalidInputErrorCode + InvalidInputErrorCodeInvalidPaginationToken = "INVALID_PAGINATION_TOKEN" + // @enum InvalidInputErrorCode + InvalidInputErrorCodeInvalidMaxResults = "INVALID_MAX_RESULTS" + // @enum InvalidInputErrorCode + InvalidInputErrorCodeInvalidAgentId = "INVALID_AGENT_ID" + // @enum InvalidInputErrorCode + InvalidInputErrorCodeInvalidAutoScalingGroup = "INVALID_AUTO_SCALING_GROUP" + // @enum InvalidInputErrorCode + InvalidInputErrorCodeInvalidRuleName = "INVALID_RULE_NAME" + // @enum InvalidInputErrorCode + InvalidInputErrorCodeInvalidSeverity = "INVALID_SEVERITY" + // @enum InvalidInputErrorCode + InvalidInputErrorCodeInvalidLocale = "INVALID_LOCALE" + // @enum InvalidInputErrorCode + InvalidInputErrorCodeInvalidEvent = "INVALID_EVENT" + // @enum InvalidInputErrorCode + InvalidInputErrorCodeAssessmentTargetNameAlreadyTaken = "ASSESSMENT_TARGET_NAME_ALREADY_TAKEN" + // @enum InvalidInputErrorCode + InvalidInputErrorCodeAssessmentTemplateNameAlreadyTaken = "ASSESSMENT_TEMPLATE_NAME_ALREADY_TAKEN" + // @enum InvalidInputErrorCode + InvalidInputErrorCodeInvalidNumberOfAssessmentTargetArns = "INVALID_NUMBER_OF_ASSESSMENT_TARGET_ARNS" + // @enum InvalidInputErrorCode + InvalidInputErrorCodeInvalidNumberOfAssessmentTemplateArns = "INVALID_NUMBER_OF_ASSESSMENT_TEMPLATE_ARNS" + // @enum InvalidInputErrorCode + InvalidInputErrorCodeInvalidNumberOfAssessmentRunArns = "INVALID_NUMBER_OF_ASSESSMENT_RUN_ARNS" + // @enum InvalidInputErrorCode + InvalidInputErrorCodeInvalidNumberOfFindingArns = "INVALID_NUMBER_OF_FINDING_ARNS" + // @enum InvalidInputErrorCode + InvalidInputErrorCodeInvalidNumberOfResourceGroupArns = "INVALID_NUMBER_OF_RESOURCE_GROUP_ARNS" + // @enum InvalidInputErrorCode + InvalidInputErrorCodeInvalidNumberOfRulesPackageArns = "INVALID_NUMBER_OF_RULES_PACKAGE_ARNS" + // @enum InvalidInputErrorCode + InvalidInputErrorCodeInvalidNumberOfAssessmentRunStates = "INVALID_NUMBER_OF_ASSESSMENT_RUN_STATES" + // @enum InvalidInputErrorCode + InvalidInputErrorCodeInvalidNumberOfTags = "INVALID_NUMBER_OF_TAGS" + // @enum InvalidInputErrorCode + InvalidInputErrorCodeInvalidNumberOfResourceGroupTags = "INVALID_NUMBER_OF_RESOURCE_GROUP_TAGS" + // @enum InvalidInputErrorCode + InvalidInputErrorCodeInvalidNumberOfAttributes = "INVALID_NUMBER_OF_ATTRIBUTES" + // @enum InvalidInputErrorCode + InvalidInputErrorCodeInvalidNumberOfUserAttributes = "INVALID_NUMBER_OF_USER_ATTRIBUTES" + // @enum InvalidInputErrorCode + InvalidInputErrorCodeInvalidNumberOfAgentIds = "INVALID_NUMBER_OF_AGENT_IDS" + // @enum InvalidInputErrorCode + InvalidInputErrorCodeInvalidNumberOfAutoScalingGroups = "INVALID_NUMBER_OF_AUTO_SCALING_GROUPS" + // @enum InvalidInputErrorCode + InvalidInputErrorCodeInvalidNumberOfRuleNames = "INVALID_NUMBER_OF_RULE_NAMES" + // @enum InvalidInputErrorCode + InvalidInputErrorCodeInvalidNumberOfSeverities = "INVALID_NUMBER_OF_SEVERITIES" +) + +const ( + // @enum LimitExceededErrorCode + LimitExceededErrorCodeAssessmentTargetLimitExceeded = "ASSESSMENT_TARGET_LIMIT_EXCEEDED" + // @enum LimitExceededErrorCode + LimitExceededErrorCodeAssessmentTemplateLimitExceeded = "ASSESSMENT_TEMPLATE_LIMIT_EXCEEDED" + // @enum LimitExceededErrorCode + LimitExceededErrorCodeAssessmentRunLimitExceeded = "ASSESSMENT_RUN_LIMIT_EXCEEDED" + // @enum LimitExceededErrorCode + LimitExceededErrorCodeResourceGroupLimitExceeded = "RESOURCE_GROUP_LIMIT_EXCEEDED" + // @enum LimitExceededErrorCode + LimitExceededErrorCodeEventSubscriptionLimitExceeded = "EVENT_SUBSCRIPTION_LIMIT_EXCEEDED" +) + +const ( + // @enum Locale + LocaleEnUs = "EN_US" +) + +const ( + // @enum NoSuchEntityErrorCode + NoSuchEntityErrorCodeAssessmentTargetDoesNotExist = "ASSESSMENT_TARGET_DOES_NOT_EXIST" + // @enum NoSuchEntityErrorCode + NoSuchEntityErrorCodeAssessmentTemplateDoesNotExist = "ASSESSMENT_TEMPLATE_DOES_NOT_EXIST" + // @enum NoSuchEntityErrorCode + NoSuchEntityErrorCodeAssessmentRunDoesNotExist = "ASSESSMENT_RUN_DOES_NOT_EXIST" + // @enum NoSuchEntityErrorCode + NoSuchEntityErrorCodeFindingDoesNotExist = "FINDING_DOES_NOT_EXIST" + // @enum NoSuchEntityErrorCode + NoSuchEntityErrorCodeResourceGroupDoesNotExist = "RESOURCE_GROUP_DOES_NOT_EXIST" + // @enum NoSuchEntityErrorCode + NoSuchEntityErrorCodeRulesPackageDoesNotExist = "RULES_PACKAGE_DOES_NOT_EXIST" + // @enum NoSuchEntityErrorCode + NoSuchEntityErrorCodeSnsTopicDoesNotExist = "SNS_TOPIC_DOES_NOT_EXIST" + // @enum NoSuchEntityErrorCode + NoSuchEntityErrorCodeIamRoleDoesNotExist = "IAM_ROLE_DOES_NOT_EXIST" +) + +const ( + // @enum Severity + SeverityLow = "Low" + // @enum Severity + SeverityMedium = "Medium" + // @enum Severity + SeverityHigh = "High" + // @enum Severity + SeverityInformational = "Informational" + // @enum Severity + SeverityUndefined = "Undefined" +) diff --git a/vendor/github.com/aws/aws-sdk-go/service/inspector/examples_test.go b/vendor/github.com/aws/aws-sdk-go/service/inspector/examples_test.go new file mode 100644 index 000000000..10180d4d9 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/inspector/examples_test.go @@ -0,0 +1,807 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package inspector_test + +import ( + "bytes" + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/inspector" +) + +var _ time.Duration +var _ bytes.Buffer + +func ExampleInspector_AddAttributesToFindings() { + svc := inspector.New(session.New()) + + params := &inspector.AddAttributesToFindingsInput{ + Attributes: []*inspector.Attribute{ // Required + { // Required + Key: aws.String("AttributeKey"), // Required + Value: aws.String("AttributeValue"), + }, + // More values... + }, + FindingArns: []*string{ // Required + aws.String("Arn"), // Required + // More values... + }, + } + resp, err := svc.AddAttributesToFindings(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleInspector_CreateAssessmentTarget() { + svc := inspector.New(session.New()) + + params := &inspector.CreateAssessmentTargetInput{ + AssessmentTargetName: aws.String("AssessmentTargetName"), // Required + ResourceGroupArn: aws.String("Arn"), // Required + } + resp, err := svc.CreateAssessmentTarget(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleInspector_CreateAssessmentTemplate() { + svc := inspector.New(session.New()) + + params := &inspector.CreateAssessmentTemplateInput{ + AssessmentTargetArn: aws.String("Arn"), // Required + AssessmentTemplateName: aws.String("AssessmentTemplateName"), // Required + DurationInSeconds: aws.Int64(1), // Required + RulesPackageArns: []*string{ // Required + aws.String("Arn"), // Required + // More values... + }, + UserAttributesForFindings: []*inspector.Attribute{ + { // Required + Key: aws.String("AttributeKey"), // Required + Value: aws.String("AttributeValue"), + }, + // More values... + }, + } + resp, err := svc.CreateAssessmentTemplate(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleInspector_CreateResourceGroup() { + svc := inspector.New(session.New()) + + params := &inspector.CreateResourceGroupInput{ + ResourceGroupTags: []*inspector.ResourceGroupTag{ // Required + { // Required + Key: aws.String("TagKey"), // Required + Value: aws.String("TagValue"), + }, + // More values... + }, + } + resp, err := svc.CreateResourceGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleInspector_DeleteAssessmentRun() { + svc := inspector.New(session.New()) + + params := &inspector.DeleteAssessmentRunInput{ + AssessmentRunArn: aws.String("Arn"), // Required + } + resp, err := svc.DeleteAssessmentRun(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleInspector_DeleteAssessmentTarget() { + svc := inspector.New(session.New()) + + params := &inspector.DeleteAssessmentTargetInput{ + AssessmentTargetArn: aws.String("Arn"), // Required + } + resp, err := svc.DeleteAssessmentTarget(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleInspector_DeleteAssessmentTemplate() { + svc := inspector.New(session.New()) + + params := &inspector.DeleteAssessmentTemplateInput{ + AssessmentTemplateArn: aws.String("Arn"), // Required + } + resp, err := svc.DeleteAssessmentTemplate(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleInspector_DescribeAssessmentRuns() { + svc := inspector.New(session.New()) + + params := &inspector.DescribeAssessmentRunsInput{ + AssessmentRunArns: []*string{ // Required + aws.String("Arn"), // Required + // More values... + }, + } + resp, err := svc.DescribeAssessmentRuns(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleInspector_DescribeAssessmentTargets() { + svc := inspector.New(session.New()) + + params := &inspector.DescribeAssessmentTargetsInput{ + AssessmentTargetArns: []*string{ // Required + aws.String("Arn"), // Required + // More values... + }, + } + resp, err := svc.DescribeAssessmentTargets(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleInspector_DescribeAssessmentTemplates() { + svc := inspector.New(session.New()) + + params := &inspector.DescribeAssessmentTemplatesInput{ + AssessmentTemplateArns: []*string{ // Required + aws.String("Arn"), // Required + // More values... + }, + } + resp, err := svc.DescribeAssessmentTemplates(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleInspector_DescribeCrossAccountAccessRole() { + svc := inspector.New(session.New()) + + var params *inspector.DescribeCrossAccountAccessRoleInput + resp, err := svc.DescribeCrossAccountAccessRole(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleInspector_DescribeFindings() { + svc := inspector.New(session.New()) + + params := &inspector.DescribeFindingsInput{ + FindingArns: []*string{ // Required + aws.String("Arn"), // Required + // More values... + }, + Locale: aws.String("Locale"), + } + resp, err := svc.DescribeFindings(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleInspector_DescribeResourceGroups() { + svc := inspector.New(session.New()) + + params := &inspector.DescribeResourceGroupsInput{ + ResourceGroupArns: []*string{ // Required + aws.String("Arn"), // Required + // More values... + }, + } + resp, err := svc.DescribeResourceGroups(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleInspector_DescribeRulesPackages() { + svc := inspector.New(session.New()) + + params := &inspector.DescribeRulesPackagesInput{ + RulesPackageArns: []*string{ // Required + aws.String("Arn"), // Required + // More values... + }, + Locale: aws.String("Locale"), + } + resp, err := svc.DescribeRulesPackages(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleInspector_GetTelemetryMetadata() { + svc := inspector.New(session.New()) + + params := &inspector.GetTelemetryMetadataInput{ + AssessmentRunArn: aws.String("Arn"), // Required + } + resp, err := svc.GetTelemetryMetadata(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleInspector_ListAssessmentRunAgents() { + svc := inspector.New(session.New()) + + params := &inspector.ListAssessmentRunAgentsInput{ + AssessmentRunArn: aws.String("Arn"), // Required + Filter: &inspector.AgentFilter{ + AgentHealthCodes: []*string{ // Required + aws.String("AgentHealthCode"), // Required + // More values... + }, + AgentHealths: []*string{ // Required + aws.String("AgentHealth"), // Required + // More values... + }, + }, + MaxResults: aws.Int64(1), + NextToken: aws.String("PaginationToken"), + } + resp, err := svc.ListAssessmentRunAgents(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleInspector_ListAssessmentRuns() { + svc := inspector.New(session.New()) + + params := &inspector.ListAssessmentRunsInput{ + AssessmentTemplateArns: []*string{ + aws.String("Arn"), // Required + // More values... + }, + Filter: &inspector.AssessmentRunFilter{ + CompletionTimeRange: &inspector.TimestampRange{ + BeginDate: aws.Time(time.Now()), + EndDate: aws.Time(time.Now()), + }, + DurationRange: &inspector.DurationRange{ + MaxSeconds: aws.Int64(1), + MinSeconds: aws.Int64(1), + }, + NamePattern: aws.String("NamePattern"), + RulesPackageArns: []*string{ + aws.String("Arn"), // Required + // More values... + }, + StartTimeRange: &inspector.TimestampRange{ + BeginDate: aws.Time(time.Now()), + EndDate: aws.Time(time.Now()), + }, + StateChangeTimeRange: &inspector.TimestampRange{ + BeginDate: aws.Time(time.Now()), + EndDate: aws.Time(time.Now()), + }, + States: []*string{ + aws.String("AssessmentRunState"), // Required + // More values... + }, + }, + MaxResults: aws.Int64(1), + NextToken: aws.String("PaginationToken"), + } + resp, err := svc.ListAssessmentRuns(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleInspector_ListAssessmentTargets() { + svc := inspector.New(session.New()) + + params := &inspector.ListAssessmentTargetsInput{ + Filter: &inspector.AssessmentTargetFilter{ + AssessmentTargetNamePattern: aws.String("NamePattern"), + }, + MaxResults: aws.Int64(1), + NextToken: aws.String("PaginationToken"), + } + resp, err := svc.ListAssessmentTargets(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleInspector_ListAssessmentTemplates() { + svc := inspector.New(session.New()) + + params := &inspector.ListAssessmentTemplatesInput{ + AssessmentTargetArns: []*string{ + aws.String("Arn"), // Required + // More values... + }, + Filter: &inspector.AssessmentTemplateFilter{ + DurationRange: &inspector.DurationRange{ + MaxSeconds: aws.Int64(1), + MinSeconds: aws.Int64(1), + }, + NamePattern: aws.String("NamePattern"), + RulesPackageArns: []*string{ + aws.String("Arn"), // Required + // More values... + }, + }, + MaxResults: aws.Int64(1), + NextToken: aws.String("PaginationToken"), + } + resp, err := svc.ListAssessmentTemplates(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleInspector_ListEventSubscriptions() { + svc := inspector.New(session.New()) + + params := &inspector.ListEventSubscriptionsInput{ + MaxResults: aws.Int64(1), + NextToken: aws.String("PaginationToken"), + ResourceArn: aws.String("Arn"), + } + resp, err := svc.ListEventSubscriptions(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleInspector_ListFindings() { + svc := inspector.New(session.New()) + + params := &inspector.ListFindingsInput{ + AssessmentRunArns: []*string{ + aws.String("Arn"), // Required + // More values... + }, + Filter: &inspector.FindingFilter{ + AgentIds: []*string{ + aws.String("AgentId"), // Required + // More values... + }, + Attributes: []*inspector.Attribute{ + { // Required + Key: aws.String("AttributeKey"), // Required + Value: aws.String("AttributeValue"), + }, + // More values... + }, + AutoScalingGroups: []*string{ + aws.String("AutoScalingGroup"), // Required + // More values... + }, + CreationTimeRange: &inspector.TimestampRange{ + BeginDate: aws.Time(time.Now()), + EndDate: aws.Time(time.Now()), + }, + RuleNames: []*string{ + aws.String("RuleName"), // Required + // More values... + }, + RulesPackageArns: []*string{ + aws.String("Arn"), // Required + // More values... + }, + Severities: []*string{ + aws.String("Severity"), // Required + // More values... + }, + UserAttributes: []*inspector.Attribute{ + { // Required + Key: aws.String("AttributeKey"), // Required + Value: aws.String("AttributeValue"), + }, + // More values... + }, + }, + MaxResults: aws.Int64(1), + NextToken: aws.String("PaginationToken"), + } + resp, err := svc.ListFindings(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleInspector_ListRulesPackages() { + svc := inspector.New(session.New()) + + params := &inspector.ListRulesPackagesInput{ + MaxResults: aws.Int64(1), + NextToken: aws.String("PaginationToken"), + } + resp, err := svc.ListRulesPackages(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleInspector_ListTagsForResource() { + svc := inspector.New(session.New()) + + params := &inspector.ListTagsForResourceInput{ + ResourceArn: aws.String("Arn"), // Required + } + resp, err := svc.ListTagsForResource(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleInspector_PreviewAgents() { + svc := inspector.New(session.New()) + + params := &inspector.PreviewAgentsInput{ + PreviewAgentsArn: aws.String("Arn"), // Required + MaxResults: aws.Int64(1), + NextToken: aws.String("PaginationToken"), + } + resp, err := svc.PreviewAgents(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleInspector_RegisterCrossAccountAccessRole() { + svc := inspector.New(session.New()) + + params := &inspector.RegisterCrossAccountAccessRoleInput{ + RoleArn: aws.String("Arn"), // Required + } + resp, err := svc.RegisterCrossAccountAccessRole(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleInspector_RemoveAttributesFromFindings() { + svc := inspector.New(session.New()) + + params := &inspector.RemoveAttributesFromFindingsInput{ + AttributeKeys: []*string{ // Required + aws.String("AttributeKey"), // Required + // More values... + }, + FindingArns: []*string{ // Required + aws.String("Arn"), // Required + // More values... + }, + } + resp, err := svc.RemoveAttributesFromFindings(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleInspector_SetTagsForResource() { + svc := inspector.New(session.New()) + + params := &inspector.SetTagsForResourceInput{ + ResourceArn: aws.String("Arn"), // Required + Tags: []*inspector.Tag{ + { // Required + Key: aws.String("TagKey"), // Required + Value: aws.String("TagValue"), + }, + // More values... + }, + } + resp, err := svc.SetTagsForResource(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleInspector_StartAssessmentRun() { + svc := inspector.New(session.New()) + + params := &inspector.StartAssessmentRunInput{ + AssessmentTemplateArn: aws.String("Arn"), // Required + AssessmentRunName: aws.String("AssessmentRunName"), + } + resp, err := svc.StartAssessmentRun(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleInspector_StopAssessmentRun() { + svc := inspector.New(session.New()) + + params := &inspector.StopAssessmentRunInput{ + AssessmentRunArn: aws.String("Arn"), // Required + } + resp, err := svc.StopAssessmentRun(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleInspector_SubscribeToEvent() { + svc := inspector.New(session.New()) + + params := &inspector.SubscribeToEventInput{ + Event: aws.String("Event"), // Required + ResourceArn: aws.String("Arn"), // Required + TopicArn: aws.String("Arn"), // Required + } + resp, err := svc.SubscribeToEvent(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleInspector_UnsubscribeFromEvent() { + svc := inspector.New(session.New()) + + params := &inspector.UnsubscribeFromEventInput{ + Event: aws.String("Event"), // Required + ResourceArn: aws.String("Arn"), // Required + TopicArn: aws.String("Arn"), // Required + } + resp, err := svc.UnsubscribeFromEvent(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleInspector_UpdateAssessmentTarget() { + svc := inspector.New(session.New()) + + params := &inspector.UpdateAssessmentTargetInput{ + AssessmentTargetArn: aws.String("Arn"), // Required + AssessmentTargetName: aws.String("AssessmentTargetName"), // Required + ResourceGroupArn: aws.String("Arn"), // Required + } + resp, err := svc.UpdateAssessmentTarget(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/inspector/inspectoriface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/inspector/inspectoriface/interface.go new file mode 100644 index 000000000..276e6e69c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/inspector/inspectoriface/interface.go @@ -0,0 +1,142 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package inspectoriface provides an interface for the Amazon Inspector. +package inspectoriface + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/inspector" +) + +// InspectorAPI is the interface type for inspector.Inspector. +type InspectorAPI interface { + AddAttributesToFindingsRequest(*inspector.AddAttributesToFindingsInput) (*request.Request, *inspector.AddAttributesToFindingsOutput) + + AddAttributesToFindings(*inspector.AddAttributesToFindingsInput) (*inspector.AddAttributesToFindingsOutput, error) + + CreateAssessmentTargetRequest(*inspector.CreateAssessmentTargetInput) (*request.Request, *inspector.CreateAssessmentTargetOutput) + + CreateAssessmentTarget(*inspector.CreateAssessmentTargetInput) (*inspector.CreateAssessmentTargetOutput, error) + + CreateAssessmentTemplateRequest(*inspector.CreateAssessmentTemplateInput) (*request.Request, *inspector.CreateAssessmentTemplateOutput) + + CreateAssessmentTemplate(*inspector.CreateAssessmentTemplateInput) (*inspector.CreateAssessmentTemplateOutput, error) + + CreateResourceGroupRequest(*inspector.CreateResourceGroupInput) (*request.Request, *inspector.CreateResourceGroupOutput) + + CreateResourceGroup(*inspector.CreateResourceGroupInput) (*inspector.CreateResourceGroupOutput, error) + + DeleteAssessmentRunRequest(*inspector.DeleteAssessmentRunInput) (*request.Request, *inspector.DeleteAssessmentRunOutput) + + DeleteAssessmentRun(*inspector.DeleteAssessmentRunInput) (*inspector.DeleteAssessmentRunOutput, error) + + DeleteAssessmentTargetRequest(*inspector.DeleteAssessmentTargetInput) (*request.Request, *inspector.DeleteAssessmentTargetOutput) + + DeleteAssessmentTarget(*inspector.DeleteAssessmentTargetInput) (*inspector.DeleteAssessmentTargetOutput, error) + + DeleteAssessmentTemplateRequest(*inspector.DeleteAssessmentTemplateInput) (*request.Request, *inspector.DeleteAssessmentTemplateOutput) + + DeleteAssessmentTemplate(*inspector.DeleteAssessmentTemplateInput) (*inspector.DeleteAssessmentTemplateOutput, error) + + DescribeAssessmentRunsRequest(*inspector.DescribeAssessmentRunsInput) (*request.Request, *inspector.DescribeAssessmentRunsOutput) + + DescribeAssessmentRuns(*inspector.DescribeAssessmentRunsInput) (*inspector.DescribeAssessmentRunsOutput, error) + + DescribeAssessmentTargetsRequest(*inspector.DescribeAssessmentTargetsInput) (*request.Request, *inspector.DescribeAssessmentTargetsOutput) + + DescribeAssessmentTargets(*inspector.DescribeAssessmentTargetsInput) (*inspector.DescribeAssessmentTargetsOutput, error) + + DescribeAssessmentTemplatesRequest(*inspector.DescribeAssessmentTemplatesInput) (*request.Request, *inspector.DescribeAssessmentTemplatesOutput) + + DescribeAssessmentTemplates(*inspector.DescribeAssessmentTemplatesInput) (*inspector.DescribeAssessmentTemplatesOutput, error) + + DescribeCrossAccountAccessRoleRequest(*inspector.DescribeCrossAccountAccessRoleInput) (*request.Request, *inspector.DescribeCrossAccountAccessRoleOutput) + + DescribeCrossAccountAccessRole(*inspector.DescribeCrossAccountAccessRoleInput) (*inspector.DescribeCrossAccountAccessRoleOutput, error) + + DescribeFindingsRequest(*inspector.DescribeFindingsInput) (*request.Request, *inspector.DescribeFindingsOutput) + + DescribeFindings(*inspector.DescribeFindingsInput) (*inspector.DescribeFindingsOutput, error) + + DescribeResourceGroupsRequest(*inspector.DescribeResourceGroupsInput) (*request.Request, *inspector.DescribeResourceGroupsOutput) + + DescribeResourceGroups(*inspector.DescribeResourceGroupsInput) (*inspector.DescribeResourceGroupsOutput, error) + + DescribeRulesPackagesRequest(*inspector.DescribeRulesPackagesInput) (*request.Request, *inspector.DescribeRulesPackagesOutput) + + DescribeRulesPackages(*inspector.DescribeRulesPackagesInput) (*inspector.DescribeRulesPackagesOutput, error) + + GetTelemetryMetadataRequest(*inspector.GetTelemetryMetadataInput) (*request.Request, *inspector.GetTelemetryMetadataOutput) + + GetTelemetryMetadata(*inspector.GetTelemetryMetadataInput) (*inspector.GetTelemetryMetadataOutput, error) + + ListAssessmentRunAgentsRequest(*inspector.ListAssessmentRunAgentsInput) (*request.Request, *inspector.ListAssessmentRunAgentsOutput) + + ListAssessmentRunAgents(*inspector.ListAssessmentRunAgentsInput) (*inspector.ListAssessmentRunAgentsOutput, error) + + ListAssessmentRunsRequest(*inspector.ListAssessmentRunsInput) (*request.Request, *inspector.ListAssessmentRunsOutput) + + ListAssessmentRuns(*inspector.ListAssessmentRunsInput) (*inspector.ListAssessmentRunsOutput, error) + + ListAssessmentTargetsRequest(*inspector.ListAssessmentTargetsInput) (*request.Request, *inspector.ListAssessmentTargetsOutput) + + ListAssessmentTargets(*inspector.ListAssessmentTargetsInput) (*inspector.ListAssessmentTargetsOutput, error) + + ListAssessmentTemplatesRequest(*inspector.ListAssessmentTemplatesInput) (*request.Request, *inspector.ListAssessmentTemplatesOutput) + + ListAssessmentTemplates(*inspector.ListAssessmentTemplatesInput) (*inspector.ListAssessmentTemplatesOutput, error) + + ListEventSubscriptionsRequest(*inspector.ListEventSubscriptionsInput) (*request.Request, *inspector.ListEventSubscriptionsOutput) + + ListEventSubscriptions(*inspector.ListEventSubscriptionsInput) (*inspector.ListEventSubscriptionsOutput, error) + + ListFindingsRequest(*inspector.ListFindingsInput) (*request.Request, *inspector.ListFindingsOutput) + + ListFindings(*inspector.ListFindingsInput) (*inspector.ListFindingsOutput, error) + + ListRulesPackagesRequest(*inspector.ListRulesPackagesInput) (*request.Request, *inspector.ListRulesPackagesOutput) + + ListRulesPackages(*inspector.ListRulesPackagesInput) (*inspector.ListRulesPackagesOutput, error) + + ListTagsForResourceRequest(*inspector.ListTagsForResourceInput) (*request.Request, *inspector.ListTagsForResourceOutput) + + ListTagsForResource(*inspector.ListTagsForResourceInput) (*inspector.ListTagsForResourceOutput, error) + + PreviewAgentsRequest(*inspector.PreviewAgentsInput) (*request.Request, *inspector.PreviewAgentsOutput) + + PreviewAgents(*inspector.PreviewAgentsInput) (*inspector.PreviewAgentsOutput, error) + + RegisterCrossAccountAccessRoleRequest(*inspector.RegisterCrossAccountAccessRoleInput) (*request.Request, *inspector.RegisterCrossAccountAccessRoleOutput) + + RegisterCrossAccountAccessRole(*inspector.RegisterCrossAccountAccessRoleInput) (*inspector.RegisterCrossAccountAccessRoleOutput, error) + + RemoveAttributesFromFindingsRequest(*inspector.RemoveAttributesFromFindingsInput) (*request.Request, *inspector.RemoveAttributesFromFindingsOutput) + + RemoveAttributesFromFindings(*inspector.RemoveAttributesFromFindingsInput) (*inspector.RemoveAttributesFromFindingsOutput, error) + + SetTagsForResourceRequest(*inspector.SetTagsForResourceInput) (*request.Request, *inspector.SetTagsForResourceOutput) + + SetTagsForResource(*inspector.SetTagsForResourceInput) (*inspector.SetTagsForResourceOutput, error) + + StartAssessmentRunRequest(*inspector.StartAssessmentRunInput) (*request.Request, *inspector.StartAssessmentRunOutput) + + StartAssessmentRun(*inspector.StartAssessmentRunInput) (*inspector.StartAssessmentRunOutput, error) + + StopAssessmentRunRequest(*inspector.StopAssessmentRunInput) (*request.Request, *inspector.StopAssessmentRunOutput) + + StopAssessmentRun(*inspector.StopAssessmentRunInput) (*inspector.StopAssessmentRunOutput, error) + + SubscribeToEventRequest(*inspector.SubscribeToEventInput) (*request.Request, *inspector.SubscribeToEventOutput) + + SubscribeToEvent(*inspector.SubscribeToEventInput) (*inspector.SubscribeToEventOutput, error) + + UnsubscribeFromEventRequest(*inspector.UnsubscribeFromEventInput) (*request.Request, *inspector.UnsubscribeFromEventOutput) + + UnsubscribeFromEvent(*inspector.UnsubscribeFromEventInput) (*inspector.UnsubscribeFromEventOutput, error) + + UpdateAssessmentTargetRequest(*inspector.UpdateAssessmentTargetInput) (*request.Request, *inspector.UpdateAssessmentTargetOutput) + + UpdateAssessmentTarget(*inspector.UpdateAssessmentTargetInput) (*inspector.UpdateAssessmentTargetOutput, error) +} + +var _ InspectorAPI = (*inspector.Inspector)(nil) diff --git a/vendor/github.com/aws/aws-sdk-go/service/inspector/service.go b/vendor/github.com/aws/aws-sdk-go/service/inspector/service.go new file mode 100644 index 000000000..4d099420b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/inspector/service.go @@ -0,0 +1,90 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package inspector + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" +) + +// Amazon Inspector enables you to analyze the behavior of your AWS resources +// and to identify potential security issues. For more information, see Amazon +// Inspector User Guide (http://docs.aws.amazon.com/inspector/latest/userguide/inspector_introduction.html). +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type Inspector struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "inspector" + +// New creates a new instance of the Inspector client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a Inspector client from just a session. +// svc := inspector.New(mySession) +// +// // Create a Inspector client with additional configuration +// svc := inspector.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *Inspector { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *Inspector { + svc := &Inspector{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2016-02-16", + JSONVersion: "1.1", + TargetPrefix: "InspectorService", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(jsonrpc.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a Inspector operation and runs any +// custom request initialization. +func (c *Inspector) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/iot/api.go b/vendor/github.com/aws/aws-sdk-go/service/iot/api.go new file mode 100644 index 000000000..5b73fc169 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/iot/api.go @@ -0,0 +1,6558 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package iot provides a client for AWS IoT. +package iot + +import ( + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/restjson" +) + +const opAcceptCertificateTransfer = "AcceptCertificateTransfer" + +// AcceptCertificateTransferRequest generates a "aws/request.Request" representing the +// client's request for the AcceptCertificateTransfer operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the AcceptCertificateTransfer method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the AcceptCertificateTransferRequest method. +// req, resp := client.AcceptCertificateTransferRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *IoT) AcceptCertificateTransferRequest(input *AcceptCertificateTransferInput) (req *request.Request, output *AcceptCertificateTransferOutput) { + op := &request.Operation{ + Name: opAcceptCertificateTransfer, + HTTPMethod: "PATCH", + HTTPPath: "/accept-certificate-transfer/{certificateId}", + } + + if input == nil { + input = &AcceptCertificateTransferInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &AcceptCertificateTransferOutput{} + req.Data = output + return +} + +// Accepts a pending certificate transfer. The default state of the certificate +// is INACTIVE. +// +// To check for pending certificate transfers, call ListCertificates to enumerate +// your certificates. +func (c *IoT) AcceptCertificateTransfer(input *AcceptCertificateTransferInput) (*AcceptCertificateTransferOutput, error) { + req, out := c.AcceptCertificateTransferRequest(input) + err := req.Send() + return out, err +} + +const opAttachPrincipalPolicy = "AttachPrincipalPolicy" + +// AttachPrincipalPolicyRequest generates a "aws/request.Request" representing the +// client's request for the AttachPrincipalPolicy operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the AttachPrincipalPolicy method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the AttachPrincipalPolicyRequest method. +// req, resp := client.AttachPrincipalPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *IoT) AttachPrincipalPolicyRequest(input *AttachPrincipalPolicyInput) (req *request.Request, output *AttachPrincipalPolicyOutput) { + op := &request.Operation{ + Name: opAttachPrincipalPolicy, + HTTPMethod: "PUT", + HTTPPath: "/principal-policies/{policyName}", + } + + if input == nil { + input = &AttachPrincipalPolicyInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &AttachPrincipalPolicyOutput{} + req.Data = output + return +} + +// Attaches the specified policy to the specified principal (certificate or +// other credential). +func (c *IoT) AttachPrincipalPolicy(input *AttachPrincipalPolicyInput) (*AttachPrincipalPolicyOutput, error) { + req, out := c.AttachPrincipalPolicyRequest(input) + err := req.Send() + return out, err +} + +const opAttachThingPrincipal = "AttachThingPrincipal" + +// AttachThingPrincipalRequest generates a "aws/request.Request" representing the +// client's request for the AttachThingPrincipal operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the AttachThingPrincipal method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the AttachThingPrincipalRequest method. +// req, resp := client.AttachThingPrincipalRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *IoT) AttachThingPrincipalRequest(input *AttachThingPrincipalInput) (req *request.Request, output *AttachThingPrincipalOutput) { + op := &request.Operation{ + Name: opAttachThingPrincipal, + HTTPMethod: "PUT", + HTTPPath: "/things/{thingName}/principals", + } + + if input == nil { + input = &AttachThingPrincipalInput{} + } + + req = c.newRequest(op, input, output) + output = &AttachThingPrincipalOutput{} + req.Data = output + return +} + +// Attaches the specified principal to the specified thing. +func (c *IoT) AttachThingPrincipal(input *AttachThingPrincipalInput) (*AttachThingPrincipalOutput, error) { + req, out := c.AttachThingPrincipalRequest(input) + err := req.Send() + return out, err +} + +const opCancelCertificateTransfer = "CancelCertificateTransfer" + +// CancelCertificateTransferRequest generates a "aws/request.Request" representing the +// client's request for the CancelCertificateTransfer operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CancelCertificateTransfer method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CancelCertificateTransferRequest method. +// req, resp := client.CancelCertificateTransferRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *IoT) CancelCertificateTransferRequest(input *CancelCertificateTransferInput) (req *request.Request, output *CancelCertificateTransferOutput) { + op := &request.Operation{ + Name: opCancelCertificateTransfer, + HTTPMethod: "PATCH", + HTTPPath: "/cancel-certificate-transfer/{certificateId}", + } + + if input == nil { + input = &CancelCertificateTransferInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &CancelCertificateTransferOutput{} + req.Data = output + return +} + +// Cancels a pending transfer for the specified certificate. +// +// Note Only the transfer source account can use this operation to cancel a +// transfer. (Transfer destinations can use RejectCertificateTransfer instead.) +// After transfer, AWS IoT returns the certificate to the source account in +// the INACTIVE state. After the destination account has accepted the transfer, +// the transfer cannot be cancelled. +// +// After a certificate transfer is cancelled, the status of the certificate +// changes from PENDING_TRANSFER to INACTIVE. +func (c *IoT) CancelCertificateTransfer(input *CancelCertificateTransferInput) (*CancelCertificateTransferOutput, error) { + req, out := c.CancelCertificateTransferRequest(input) + err := req.Send() + return out, err +} + +const opCreateCertificateFromCsr = "CreateCertificateFromCsr" + +// CreateCertificateFromCsrRequest generates a "aws/request.Request" representing the +// client's request for the CreateCertificateFromCsr operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateCertificateFromCsr method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateCertificateFromCsrRequest method. +// req, resp := client.CreateCertificateFromCsrRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *IoT) CreateCertificateFromCsrRequest(input *CreateCertificateFromCsrInput) (req *request.Request, output *CreateCertificateFromCsrOutput) { + op := &request.Operation{ + Name: opCreateCertificateFromCsr, + HTTPMethod: "POST", + HTTPPath: "/certificates", + } + + if input == nil { + input = &CreateCertificateFromCsrInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateCertificateFromCsrOutput{} + req.Data = output + return +} + +// Creates an X.509 certificate using the specified certificate signing request. +// +// Note Reusing the same certificate signing request (CSR) results in a distinct +// certificate. +// +// You can create multiple certificates in a batch by creating a directory, +// copying multiple .csr files into that directory, and then specifying that +// directory on the command line. The following commands show how to create +// a batch of certificates given a batch of CSRs. +// +// Assuming a set of CSRs are located inside of the directory my-csr-directory: +// +// On Linux and OS X, the command is: +// +// $ ls my-csr-directory/ | xargs -I {} aws iot create-certificate-from-csr +// --certificate-signing-request file://my-csr-directory/{} +// +// This command lists all of the CSRs in my-csr-directory and pipes each CSR +// file name to the aws iot create-certificate-from-csr AWS CLI command to create +// a certificate for the corresponding CSR. +// +// The aws iot create-certificate-from-csr part of the command can also be +// run in parallel to speed up the certificate creation process: +// +// $ ls my-csr-directory/ | xargs -P 10 -I {} aws iot create-certificate-from-csr +// --certificate-signing-request file://my-csr-directory/{} +// +// On Windows PowerShell, the command to create certificates for all CSRs +// in my-csr-directory is: +// +// > ls -Name my-csr-directory | %{aws iot create-certificate-from-csr --certificate-signing-request +// file://my-csr-directory/$_} +// +// On a Windows command prompt, the command to create certificates for all +// CSRs in my-csr-directory is: +// +// > forfiles /p my-csr-directory /c "cmd /c aws iot create-certificate-from-csr +// --certificate-signing-request file://@path" +func (c *IoT) CreateCertificateFromCsr(input *CreateCertificateFromCsrInput) (*CreateCertificateFromCsrOutput, error) { + req, out := c.CreateCertificateFromCsrRequest(input) + err := req.Send() + return out, err +} + +const opCreateKeysAndCertificate = "CreateKeysAndCertificate" + +// CreateKeysAndCertificateRequest generates a "aws/request.Request" representing the +// client's request for the CreateKeysAndCertificate operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateKeysAndCertificate method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateKeysAndCertificateRequest method. +// req, resp := client.CreateKeysAndCertificateRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *IoT) CreateKeysAndCertificateRequest(input *CreateKeysAndCertificateInput) (req *request.Request, output *CreateKeysAndCertificateOutput) { + op := &request.Operation{ + Name: opCreateKeysAndCertificate, + HTTPMethod: "POST", + HTTPPath: "/keys-and-certificate", + } + + if input == nil { + input = &CreateKeysAndCertificateInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateKeysAndCertificateOutput{} + req.Data = output + return +} + +// Creates a 2048-bit RSA key pair and issues an X.509 certificate using the +// issued public key. +// +// Note This is the only time AWS IoT issues the private key for this certificate, +// so it is important to keep it in a secure location. +func (c *IoT) CreateKeysAndCertificate(input *CreateKeysAndCertificateInput) (*CreateKeysAndCertificateOutput, error) { + req, out := c.CreateKeysAndCertificateRequest(input) + err := req.Send() + return out, err +} + +const opCreatePolicy = "CreatePolicy" + +// CreatePolicyRequest generates a "aws/request.Request" representing the +// client's request for the CreatePolicy operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreatePolicy method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreatePolicyRequest method. +// req, resp := client.CreatePolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *IoT) CreatePolicyRequest(input *CreatePolicyInput) (req *request.Request, output *CreatePolicyOutput) { + op := &request.Operation{ + Name: opCreatePolicy, + HTTPMethod: "POST", + HTTPPath: "/policies/{policyName}", + } + + if input == nil { + input = &CreatePolicyInput{} + } + + req = c.newRequest(op, input, output) + output = &CreatePolicyOutput{} + req.Data = output + return +} + +// Creates an AWS IoT policy. +// +// The created policy is the default version for the policy. This operation +// creates a policy version with a version identifier of 1 and sets 1 as the +// policy's default version. +func (c *IoT) CreatePolicy(input *CreatePolicyInput) (*CreatePolicyOutput, error) { + req, out := c.CreatePolicyRequest(input) + err := req.Send() + return out, err +} + +const opCreatePolicyVersion = "CreatePolicyVersion" + +// CreatePolicyVersionRequest generates a "aws/request.Request" representing the +// client's request for the CreatePolicyVersion operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreatePolicyVersion method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreatePolicyVersionRequest method. +// req, resp := client.CreatePolicyVersionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *IoT) CreatePolicyVersionRequest(input *CreatePolicyVersionInput) (req *request.Request, output *CreatePolicyVersionOutput) { + op := &request.Operation{ + Name: opCreatePolicyVersion, + HTTPMethod: "POST", + HTTPPath: "/policies/{policyName}/version", + } + + if input == nil { + input = &CreatePolicyVersionInput{} + } + + req = c.newRequest(op, input, output) + output = &CreatePolicyVersionOutput{} + req.Data = output + return +} + +// Creates a new version of the specified AWS IoT policy. To update a policy, +// create a new policy version. A managed policy can have up to five versions. +// If the policy has five versions, you must use DeletePolicyVersion to delete +// an existing version before you create a new one. +// +// Optionally, you can set the new version as the policy's default version. +// The default version is the operative version (that is, the version that is +// in effect for the certificates to which the policy is attached). +func (c *IoT) CreatePolicyVersion(input *CreatePolicyVersionInput) (*CreatePolicyVersionOutput, error) { + req, out := c.CreatePolicyVersionRequest(input) + err := req.Send() + return out, err +} + +const opCreateThing = "CreateThing" + +// CreateThingRequest generates a "aws/request.Request" representing the +// client's request for the CreateThing operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateThing method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateThingRequest method. +// req, resp := client.CreateThingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *IoT) CreateThingRequest(input *CreateThingInput) (req *request.Request, output *CreateThingOutput) { + op := &request.Operation{ + Name: opCreateThing, + HTTPMethod: "POST", + HTTPPath: "/things/{thingName}", + } + + if input == nil { + input = &CreateThingInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateThingOutput{} + req.Data = output + return +} + +// Creates a thing in the Thing Registry. +func (c *IoT) CreateThing(input *CreateThingInput) (*CreateThingOutput, error) { + req, out := c.CreateThingRequest(input) + err := req.Send() + return out, err +} + +const opCreateTopicRule = "CreateTopicRule" + +// CreateTopicRuleRequest generates a "aws/request.Request" representing the +// client's request for the CreateTopicRule operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateTopicRule method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateTopicRuleRequest method. +// req, resp := client.CreateTopicRuleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *IoT) CreateTopicRuleRequest(input *CreateTopicRuleInput) (req *request.Request, output *CreateTopicRuleOutput) { + op := &request.Operation{ + Name: opCreateTopicRule, + HTTPMethod: "POST", + HTTPPath: "/rules/{ruleName}", + } + + if input == nil { + input = &CreateTopicRuleInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &CreateTopicRuleOutput{} + req.Data = output + return +} + +// Creates a rule. Creating rules is an administrator-level action. Any user +// who has permission to create rules will be able to access data processed +// by the rule. +func (c *IoT) CreateTopicRule(input *CreateTopicRuleInput) (*CreateTopicRuleOutput, error) { + req, out := c.CreateTopicRuleRequest(input) + err := req.Send() + return out, err +} + +const opDeleteCACertificate = "DeleteCACertificate" + +// DeleteCACertificateRequest generates a "aws/request.Request" representing the +// client's request for the DeleteCACertificate operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteCACertificate method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteCACertificateRequest method. +// req, resp := client.DeleteCACertificateRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *IoT) DeleteCACertificateRequest(input *DeleteCACertificateInput) (req *request.Request, output *DeleteCACertificateOutput) { + op := &request.Operation{ + Name: opDeleteCACertificate, + HTTPMethod: "DELETE", + HTTPPath: "/cacertificate/{caCertificateId}", + } + + if input == nil { + input = &DeleteCACertificateInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteCACertificateOutput{} + req.Data = output + return +} + +// Deletes a registered CA certificate. +func (c *IoT) DeleteCACertificate(input *DeleteCACertificateInput) (*DeleteCACertificateOutput, error) { + req, out := c.DeleteCACertificateRequest(input) + err := req.Send() + return out, err +} + +const opDeleteCertificate = "DeleteCertificate" + +// DeleteCertificateRequest generates a "aws/request.Request" representing the +// client's request for the DeleteCertificate operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteCertificate method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteCertificateRequest method. +// req, resp := client.DeleteCertificateRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *IoT) DeleteCertificateRequest(input *DeleteCertificateInput) (req *request.Request, output *DeleteCertificateOutput) { + op := &request.Operation{ + Name: opDeleteCertificate, + HTTPMethod: "DELETE", + HTTPPath: "/certificates/{certificateId}", + } + + if input == nil { + input = &DeleteCertificateInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteCertificateOutput{} + req.Data = output + return +} + +// Deletes the specified certificate. +// +// A certificate cannot be deleted if it has a policy attached to it or if +// its status is set to ACTIVE. To delete a certificate, first use the DetachPrincipalPolicy +// API to detach all policies. Next, use the UpdateCertificate API to set the +// certificate to the INACTIVE status. +func (c *IoT) DeleteCertificate(input *DeleteCertificateInput) (*DeleteCertificateOutput, error) { + req, out := c.DeleteCertificateRequest(input) + err := req.Send() + return out, err +} + +const opDeletePolicy = "DeletePolicy" + +// DeletePolicyRequest generates a "aws/request.Request" representing the +// client's request for the DeletePolicy operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeletePolicy method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeletePolicyRequest method. +// req, resp := client.DeletePolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *IoT) DeletePolicyRequest(input *DeletePolicyInput) (req *request.Request, output *DeletePolicyOutput) { + op := &request.Operation{ + Name: opDeletePolicy, + HTTPMethod: "DELETE", + HTTPPath: "/policies/{policyName}", + } + + if input == nil { + input = &DeletePolicyInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeletePolicyOutput{} + req.Data = output + return +} + +// Deletes the specified policy. +// +// A policy cannot be deleted if it has non-default versions or it is attached +// to any certificate. +// +// To delete a policy, use the DeletePolicyVersion API to delete all non-default +// versions of the policy; use the DetachPrincipalPolicy API to detach the policy +// from any certificate; and then use the DeletePolicy API to delete the policy. +// +// When a policy is deleted using DeletePolicy, its default version is deleted +// with it. +func (c *IoT) DeletePolicy(input *DeletePolicyInput) (*DeletePolicyOutput, error) { + req, out := c.DeletePolicyRequest(input) + err := req.Send() + return out, err +} + +const opDeletePolicyVersion = "DeletePolicyVersion" + +// DeletePolicyVersionRequest generates a "aws/request.Request" representing the +// client's request for the DeletePolicyVersion operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeletePolicyVersion method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeletePolicyVersionRequest method. +// req, resp := client.DeletePolicyVersionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *IoT) DeletePolicyVersionRequest(input *DeletePolicyVersionInput) (req *request.Request, output *DeletePolicyVersionOutput) { + op := &request.Operation{ + Name: opDeletePolicyVersion, + HTTPMethod: "DELETE", + HTTPPath: "/policies/{policyName}/version/{policyVersionId}", + } + + if input == nil { + input = &DeletePolicyVersionInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeletePolicyVersionOutput{} + req.Data = output + return +} + +// Deletes the specified version of the specified policy. You cannot delete +// the default version of a policy using this API. To delete the default version +// of a policy, use DeletePolicy. To find out which version of a policy is marked +// as the default version, use ListPolicyVersions. +func (c *IoT) DeletePolicyVersion(input *DeletePolicyVersionInput) (*DeletePolicyVersionOutput, error) { + req, out := c.DeletePolicyVersionRequest(input) + err := req.Send() + return out, err +} + +const opDeleteRegistrationCode = "DeleteRegistrationCode" + +// DeleteRegistrationCodeRequest generates a "aws/request.Request" representing the +// client's request for the DeleteRegistrationCode operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteRegistrationCode method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteRegistrationCodeRequest method. +// req, resp := client.DeleteRegistrationCodeRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *IoT) DeleteRegistrationCodeRequest(input *DeleteRegistrationCodeInput) (req *request.Request, output *DeleteRegistrationCodeOutput) { + op := &request.Operation{ + Name: opDeleteRegistrationCode, + HTTPMethod: "DELETE", + HTTPPath: "/registrationcode", + } + + if input == nil { + input = &DeleteRegistrationCodeInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteRegistrationCodeOutput{} + req.Data = output + return +} + +// Deletes a CA certificate registration code. +func (c *IoT) DeleteRegistrationCode(input *DeleteRegistrationCodeInput) (*DeleteRegistrationCodeOutput, error) { + req, out := c.DeleteRegistrationCodeRequest(input) + err := req.Send() + return out, err +} + +const opDeleteThing = "DeleteThing" + +// DeleteThingRequest generates a "aws/request.Request" representing the +// client's request for the DeleteThing operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteThing method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteThingRequest method. +// req, resp := client.DeleteThingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *IoT) DeleteThingRequest(input *DeleteThingInput) (req *request.Request, output *DeleteThingOutput) { + op := &request.Operation{ + Name: opDeleteThing, + HTTPMethod: "DELETE", + HTTPPath: "/things/{thingName}", + } + + if input == nil { + input = &DeleteThingInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteThingOutput{} + req.Data = output + return +} + +// Deletes the specified thing from the Thing Registry. +func (c *IoT) DeleteThing(input *DeleteThingInput) (*DeleteThingOutput, error) { + req, out := c.DeleteThingRequest(input) + err := req.Send() + return out, err +} + +const opDeleteTopicRule = "DeleteTopicRule" + +// DeleteTopicRuleRequest generates a "aws/request.Request" representing the +// client's request for the DeleteTopicRule operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteTopicRule method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteTopicRuleRequest method. +// req, resp := client.DeleteTopicRuleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *IoT) DeleteTopicRuleRequest(input *DeleteTopicRuleInput) (req *request.Request, output *DeleteTopicRuleOutput) { + op := &request.Operation{ + Name: opDeleteTopicRule, + HTTPMethod: "DELETE", + HTTPPath: "/rules/{ruleName}", + } + + if input == nil { + input = &DeleteTopicRuleInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteTopicRuleOutput{} + req.Data = output + return +} + +// Deletes the specified rule. +func (c *IoT) DeleteTopicRule(input *DeleteTopicRuleInput) (*DeleteTopicRuleOutput, error) { + req, out := c.DeleteTopicRuleRequest(input) + err := req.Send() + return out, err +} + +const opDescribeCACertificate = "DescribeCACertificate" + +// DescribeCACertificateRequest generates a "aws/request.Request" representing the +// client's request for the DescribeCACertificate operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeCACertificate method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeCACertificateRequest method. +// req, resp := client.DescribeCACertificateRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *IoT) DescribeCACertificateRequest(input *DescribeCACertificateInput) (req *request.Request, output *DescribeCACertificateOutput) { + op := &request.Operation{ + Name: opDescribeCACertificate, + HTTPMethod: "GET", + HTTPPath: "/cacertificate/{caCertificateId}", + } + + if input == nil { + input = &DescribeCACertificateInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeCACertificateOutput{} + req.Data = output + return +} + +// Describes a registered CA certificate. +func (c *IoT) DescribeCACertificate(input *DescribeCACertificateInput) (*DescribeCACertificateOutput, error) { + req, out := c.DescribeCACertificateRequest(input) + err := req.Send() + return out, err +} + +const opDescribeCertificate = "DescribeCertificate" + +// DescribeCertificateRequest generates a "aws/request.Request" representing the +// client's request for the DescribeCertificate operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeCertificate method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeCertificateRequest method. +// req, resp := client.DescribeCertificateRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *IoT) DescribeCertificateRequest(input *DescribeCertificateInput) (req *request.Request, output *DescribeCertificateOutput) { + op := &request.Operation{ + Name: opDescribeCertificate, + HTTPMethod: "GET", + HTTPPath: "/certificates/{certificateId}", + } + + if input == nil { + input = &DescribeCertificateInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeCertificateOutput{} + req.Data = output + return +} + +// Gets information about the specified certificate. +func (c *IoT) DescribeCertificate(input *DescribeCertificateInput) (*DescribeCertificateOutput, error) { + req, out := c.DescribeCertificateRequest(input) + err := req.Send() + return out, err +} + +const opDescribeEndpoint = "DescribeEndpoint" + +// DescribeEndpointRequest generates a "aws/request.Request" representing the +// client's request for the DescribeEndpoint operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeEndpoint method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeEndpointRequest method. +// req, resp := client.DescribeEndpointRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *IoT) DescribeEndpointRequest(input *DescribeEndpointInput) (req *request.Request, output *DescribeEndpointOutput) { + op := &request.Operation{ + Name: opDescribeEndpoint, + HTTPMethod: "GET", + HTTPPath: "/endpoint", + } + + if input == nil { + input = &DescribeEndpointInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeEndpointOutput{} + req.Data = output + return +} + +// Returns a unique endpoint specific to the AWS account making the call. +func (c *IoT) DescribeEndpoint(input *DescribeEndpointInput) (*DescribeEndpointOutput, error) { + req, out := c.DescribeEndpointRequest(input) + err := req.Send() + return out, err +} + +const opDescribeThing = "DescribeThing" + +// DescribeThingRequest generates a "aws/request.Request" representing the +// client's request for the DescribeThing operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeThing method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeThingRequest method. +// req, resp := client.DescribeThingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *IoT) DescribeThingRequest(input *DescribeThingInput) (req *request.Request, output *DescribeThingOutput) { + op := &request.Operation{ + Name: opDescribeThing, + HTTPMethod: "GET", + HTTPPath: "/things/{thingName}", + } + + if input == nil { + input = &DescribeThingInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeThingOutput{} + req.Data = output + return +} + +// Gets information about the specified thing. +func (c *IoT) DescribeThing(input *DescribeThingInput) (*DescribeThingOutput, error) { + req, out := c.DescribeThingRequest(input) + err := req.Send() + return out, err +} + +const opDetachPrincipalPolicy = "DetachPrincipalPolicy" + +// DetachPrincipalPolicyRequest generates a "aws/request.Request" representing the +// client's request for the DetachPrincipalPolicy operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DetachPrincipalPolicy method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DetachPrincipalPolicyRequest method. +// req, resp := client.DetachPrincipalPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *IoT) DetachPrincipalPolicyRequest(input *DetachPrincipalPolicyInput) (req *request.Request, output *DetachPrincipalPolicyOutput) { + op := &request.Operation{ + Name: opDetachPrincipalPolicy, + HTTPMethod: "DELETE", + HTTPPath: "/principal-policies/{policyName}", + } + + if input == nil { + input = &DetachPrincipalPolicyInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DetachPrincipalPolicyOutput{} + req.Data = output + return +} + +// Removes the specified policy from the specified certificate. +func (c *IoT) DetachPrincipalPolicy(input *DetachPrincipalPolicyInput) (*DetachPrincipalPolicyOutput, error) { + req, out := c.DetachPrincipalPolicyRequest(input) + err := req.Send() + return out, err +} + +const opDetachThingPrincipal = "DetachThingPrincipal" + +// DetachThingPrincipalRequest generates a "aws/request.Request" representing the +// client's request for the DetachThingPrincipal operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DetachThingPrincipal method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DetachThingPrincipalRequest method. +// req, resp := client.DetachThingPrincipalRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *IoT) DetachThingPrincipalRequest(input *DetachThingPrincipalInput) (req *request.Request, output *DetachThingPrincipalOutput) { + op := &request.Operation{ + Name: opDetachThingPrincipal, + HTTPMethod: "DELETE", + HTTPPath: "/things/{thingName}/principals", + } + + if input == nil { + input = &DetachThingPrincipalInput{} + } + + req = c.newRequest(op, input, output) + output = &DetachThingPrincipalOutput{} + req.Data = output + return +} + +// Detaches the specified principal from the specified thing. +func (c *IoT) DetachThingPrincipal(input *DetachThingPrincipalInput) (*DetachThingPrincipalOutput, error) { + req, out := c.DetachThingPrincipalRequest(input) + err := req.Send() + return out, err +} + +const opDisableTopicRule = "DisableTopicRule" + +// DisableTopicRuleRequest generates a "aws/request.Request" representing the +// client's request for the DisableTopicRule operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DisableTopicRule method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DisableTopicRuleRequest method. +// req, resp := client.DisableTopicRuleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *IoT) DisableTopicRuleRequest(input *DisableTopicRuleInput) (req *request.Request, output *DisableTopicRuleOutput) { + op := &request.Operation{ + Name: opDisableTopicRule, + HTTPMethod: "POST", + HTTPPath: "/rules/{ruleName}/disable", + } + + if input == nil { + input = &DisableTopicRuleInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DisableTopicRuleOutput{} + req.Data = output + return +} + +// Disables the specified rule. +func (c *IoT) DisableTopicRule(input *DisableTopicRuleInput) (*DisableTopicRuleOutput, error) { + req, out := c.DisableTopicRuleRequest(input) + err := req.Send() + return out, err +} + +const opEnableTopicRule = "EnableTopicRule" + +// EnableTopicRuleRequest generates a "aws/request.Request" representing the +// client's request for the EnableTopicRule operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the EnableTopicRule method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the EnableTopicRuleRequest method. +// req, resp := client.EnableTopicRuleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *IoT) EnableTopicRuleRequest(input *EnableTopicRuleInput) (req *request.Request, output *EnableTopicRuleOutput) { + op := &request.Operation{ + Name: opEnableTopicRule, + HTTPMethod: "POST", + HTTPPath: "/rules/{ruleName}/enable", + } + + if input == nil { + input = &EnableTopicRuleInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &EnableTopicRuleOutput{} + req.Data = output + return +} + +// Enables the specified rule. +func (c *IoT) EnableTopicRule(input *EnableTopicRuleInput) (*EnableTopicRuleOutput, error) { + req, out := c.EnableTopicRuleRequest(input) + err := req.Send() + return out, err +} + +const opGetLoggingOptions = "GetLoggingOptions" + +// GetLoggingOptionsRequest generates a "aws/request.Request" representing the +// client's request for the GetLoggingOptions operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetLoggingOptions method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetLoggingOptionsRequest method. +// req, resp := client.GetLoggingOptionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *IoT) GetLoggingOptionsRequest(input *GetLoggingOptionsInput) (req *request.Request, output *GetLoggingOptionsOutput) { + op := &request.Operation{ + Name: opGetLoggingOptions, + HTTPMethod: "GET", + HTTPPath: "/loggingOptions", + } + + if input == nil { + input = &GetLoggingOptionsInput{} + } + + req = c.newRequest(op, input, output) + output = &GetLoggingOptionsOutput{} + req.Data = output + return +} + +// Gets the logging options. +func (c *IoT) GetLoggingOptions(input *GetLoggingOptionsInput) (*GetLoggingOptionsOutput, error) { + req, out := c.GetLoggingOptionsRequest(input) + err := req.Send() + return out, err +} + +const opGetPolicy = "GetPolicy" + +// GetPolicyRequest generates a "aws/request.Request" representing the +// client's request for the GetPolicy operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetPolicy method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetPolicyRequest method. +// req, resp := client.GetPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *IoT) GetPolicyRequest(input *GetPolicyInput) (req *request.Request, output *GetPolicyOutput) { + op := &request.Operation{ + Name: opGetPolicy, + HTTPMethod: "GET", + HTTPPath: "/policies/{policyName}", + } + + if input == nil { + input = &GetPolicyInput{} + } + + req = c.newRequest(op, input, output) + output = &GetPolicyOutput{} + req.Data = output + return +} + +// Gets information about the specified policy with the policy document of the +// default version. +func (c *IoT) GetPolicy(input *GetPolicyInput) (*GetPolicyOutput, error) { + req, out := c.GetPolicyRequest(input) + err := req.Send() + return out, err +} + +const opGetPolicyVersion = "GetPolicyVersion" + +// GetPolicyVersionRequest generates a "aws/request.Request" representing the +// client's request for the GetPolicyVersion operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetPolicyVersion method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetPolicyVersionRequest method. +// req, resp := client.GetPolicyVersionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *IoT) GetPolicyVersionRequest(input *GetPolicyVersionInput) (req *request.Request, output *GetPolicyVersionOutput) { + op := &request.Operation{ + Name: opGetPolicyVersion, + HTTPMethod: "GET", + HTTPPath: "/policies/{policyName}/version/{policyVersionId}", + } + + if input == nil { + input = &GetPolicyVersionInput{} + } + + req = c.newRequest(op, input, output) + output = &GetPolicyVersionOutput{} + req.Data = output + return +} + +// Gets information about the specified policy version. +func (c *IoT) GetPolicyVersion(input *GetPolicyVersionInput) (*GetPolicyVersionOutput, error) { + req, out := c.GetPolicyVersionRequest(input) + err := req.Send() + return out, err +} + +const opGetRegistrationCode = "GetRegistrationCode" + +// GetRegistrationCodeRequest generates a "aws/request.Request" representing the +// client's request for the GetRegistrationCode operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetRegistrationCode method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetRegistrationCodeRequest method. +// req, resp := client.GetRegistrationCodeRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *IoT) GetRegistrationCodeRequest(input *GetRegistrationCodeInput) (req *request.Request, output *GetRegistrationCodeOutput) { + op := &request.Operation{ + Name: opGetRegistrationCode, + HTTPMethod: "GET", + HTTPPath: "/registrationcode", + } + + if input == nil { + input = &GetRegistrationCodeInput{} + } + + req = c.newRequest(op, input, output) + output = &GetRegistrationCodeOutput{} + req.Data = output + return +} + +// Gets a registration code used to register a CA certificate with AWS IoT. +func (c *IoT) GetRegistrationCode(input *GetRegistrationCodeInput) (*GetRegistrationCodeOutput, error) { + req, out := c.GetRegistrationCodeRequest(input) + err := req.Send() + return out, err +} + +const opGetTopicRule = "GetTopicRule" + +// GetTopicRuleRequest generates a "aws/request.Request" representing the +// client's request for the GetTopicRule operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetTopicRule method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetTopicRuleRequest method. +// req, resp := client.GetTopicRuleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *IoT) GetTopicRuleRequest(input *GetTopicRuleInput) (req *request.Request, output *GetTopicRuleOutput) { + op := &request.Operation{ + Name: opGetTopicRule, + HTTPMethod: "GET", + HTTPPath: "/rules/{ruleName}", + } + + if input == nil { + input = &GetTopicRuleInput{} + } + + req = c.newRequest(op, input, output) + output = &GetTopicRuleOutput{} + req.Data = output + return +} + +// Gets information about the specified rule. +func (c *IoT) GetTopicRule(input *GetTopicRuleInput) (*GetTopicRuleOutput, error) { + req, out := c.GetTopicRuleRequest(input) + err := req.Send() + return out, err +} + +const opListCACertificates = "ListCACertificates" + +// ListCACertificatesRequest generates a "aws/request.Request" representing the +// client's request for the ListCACertificates operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListCACertificates method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListCACertificatesRequest method. +// req, resp := client.ListCACertificatesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *IoT) ListCACertificatesRequest(input *ListCACertificatesInput) (req *request.Request, output *ListCACertificatesOutput) { + op := &request.Operation{ + Name: opListCACertificates, + HTTPMethod: "GET", + HTTPPath: "/cacertificates", + } + + if input == nil { + input = &ListCACertificatesInput{} + } + + req = c.newRequest(op, input, output) + output = &ListCACertificatesOutput{} + req.Data = output + return +} + +// Lists the CA certificates registered for your AWS account. +// +// The results are paginated with a default page size of 25. You can use the +// returned marker to retrieve additional results. +func (c *IoT) ListCACertificates(input *ListCACertificatesInput) (*ListCACertificatesOutput, error) { + req, out := c.ListCACertificatesRequest(input) + err := req.Send() + return out, err +} + +const opListCertificates = "ListCertificates" + +// ListCertificatesRequest generates a "aws/request.Request" representing the +// client's request for the ListCertificates operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListCertificates method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListCertificatesRequest method. +// req, resp := client.ListCertificatesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *IoT) ListCertificatesRequest(input *ListCertificatesInput) (req *request.Request, output *ListCertificatesOutput) { + op := &request.Operation{ + Name: opListCertificates, + HTTPMethod: "GET", + HTTPPath: "/certificates", + } + + if input == nil { + input = &ListCertificatesInput{} + } + + req = c.newRequest(op, input, output) + output = &ListCertificatesOutput{} + req.Data = output + return +} + +// Lists the certificates registered in your AWS account. +// +// The results are paginated with a default page size of 25. You can use the +// returned marker to retrieve additional results. +func (c *IoT) ListCertificates(input *ListCertificatesInput) (*ListCertificatesOutput, error) { + req, out := c.ListCertificatesRequest(input) + err := req.Send() + return out, err +} + +const opListCertificatesByCA = "ListCertificatesByCA" + +// ListCertificatesByCARequest generates a "aws/request.Request" representing the +// client's request for the ListCertificatesByCA operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListCertificatesByCA method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListCertificatesByCARequest method. +// req, resp := client.ListCertificatesByCARequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *IoT) ListCertificatesByCARequest(input *ListCertificatesByCAInput) (req *request.Request, output *ListCertificatesByCAOutput) { + op := &request.Operation{ + Name: opListCertificatesByCA, + HTTPMethod: "GET", + HTTPPath: "/certificates-by-ca/{caCertificateId}", + } + + if input == nil { + input = &ListCertificatesByCAInput{} + } + + req = c.newRequest(op, input, output) + output = &ListCertificatesByCAOutput{} + req.Data = output + return +} + +// List the device certificates signed by the specified CA certificate. +func (c *IoT) ListCertificatesByCA(input *ListCertificatesByCAInput) (*ListCertificatesByCAOutput, error) { + req, out := c.ListCertificatesByCARequest(input) + err := req.Send() + return out, err +} + +const opListPolicies = "ListPolicies" + +// ListPoliciesRequest generates a "aws/request.Request" representing the +// client's request for the ListPolicies operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListPolicies method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListPoliciesRequest method. +// req, resp := client.ListPoliciesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *IoT) ListPoliciesRequest(input *ListPoliciesInput) (req *request.Request, output *ListPoliciesOutput) { + op := &request.Operation{ + Name: opListPolicies, + HTTPMethod: "GET", + HTTPPath: "/policies", + } + + if input == nil { + input = &ListPoliciesInput{} + } + + req = c.newRequest(op, input, output) + output = &ListPoliciesOutput{} + req.Data = output + return +} + +// Lists your policies. +func (c *IoT) ListPolicies(input *ListPoliciesInput) (*ListPoliciesOutput, error) { + req, out := c.ListPoliciesRequest(input) + err := req.Send() + return out, err +} + +const opListPolicyPrincipals = "ListPolicyPrincipals" + +// ListPolicyPrincipalsRequest generates a "aws/request.Request" representing the +// client's request for the ListPolicyPrincipals operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListPolicyPrincipals method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListPolicyPrincipalsRequest method. +// req, resp := client.ListPolicyPrincipalsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *IoT) ListPolicyPrincipalsRequest(input *ListPolicyPrincipalsInput) (req *request.Request, output *ListPolicyPrincipalsOutput) { + op := &request.Operation{ + Name: opListPolicyPrincipals, + HTTPMethod: "GET", + HTTPPath: "/policy-principals", + } + + if input == nil { + input = &ListPolicyPrincipalsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListPolicyPrincipalsOutput{} + req.Data = output + return +} + +// Lists the principals associated with the specified policy. +func (c *IoT) ListPolicyPrincipals(input *ListPolicyPrincipalsInput) (*ListPolicyPrincipalsOutput, error) { + req, out := c.ListPolicyPrincipalsRequest(input) + err := req.Send() + return out, err +} + +const opListPolicyVersions = "ListPolicyVersions" + +// ListPolicyVersionsRequest generates a "aws/request.Request" representing the +// client's request for the ListPolicyVersions operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListPolicyVersions method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListPolicyVersionsRequest method. +// req, resp := client.ListPolicyVersionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *IoT) ListPolicyVersionsRequest(input *ListPolicyVersionsInput) (req *request.Request, output *ListPolicyVersionsOutput) { + op := &request.Operation{ + Name: opListPolicyVersions, + HTTPMethod: "GET", + HTTPPath: "/policies/{policyName}/version", + } + + if input == nil { + input = &ListPolicyVersionsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListPolicyVersionsOutput{} + req.Data = output + return +} + +// Lists the versions of the specified policy and identifies the default version. +func (c *IoT) ListPolicyVersions(input *ListPolicyVersionsInput) (*ListPolicyVersionsOutput, error) { + req, out := c.ListPolicyVersionsRequest(input) + err := req.Send() + return out, err +} + +const opListPrincipalPolicies = "ListPrincipalPolicies" + +// ListPrincipalPoliciesRequest generates a "aws/request.Request" representing the +// client's request for the ListPrincipalPolicies operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListPrincipalPolicies method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListPrincipalPoliciesRequest method. +// req, resp := client.ListPrincipalPoliciesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *IoT) ListPrincipalPoliciesRequest(input *ListPrincipalPoliciesInput) (req *request.Request, output *ListPrincipalPoliciesOutput) { + op := &request.Operation{ + Name: opListPrincipalPolicies, + HTTPMethod: "GET", + HTTPPath: "/principal-policies", + } + + if input == nil { + input = &ListPrincipalPoliciesInput{} + } + + req = c.newRequest(op, input, output) + output = &ListPrincipalPoliciesOutput{} + req.Data = output + return +} + +// Lists the policies attached to the specified principal. If you use an Cognito +// identity, the ID must be in AmazonCognito Identity format (http://docs.aws.amazon.com/cognitoidentity/latest/APIReference/API_GetCredentialsForIdentity.html#API_GetCredentialsForIdentity_RequestSyntax). +func (c *IoT) ListPrincipalPolicies(input *ListPrincipalPoliciesInput) (*ListPrincipalPoliciesOutput, error) { + req, out := c.ListPrincipalPoliciesRequest(input) + err := req.Send() + return out, err +} + +const opListPrincipalThings = "ListPrincipalThings" + +// ListPrincipalThingsRequest generates a "aws/request.Request" representing the +// client's request for the ListPrincipalThings operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListPrincipalThings method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListPrincipalThingsRequest method. +// req, resp := client.ListPrincipalThingsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *IoT) ListPrincipalThingsRequest(input *ListPrincipalThingsInput) (req *request.Request, output *ListPrincipalThingsOutput) { + op := &request.Operation{ + Name: opListPrincipalThings, + HTTPMethod: "GET", + HTTPPath: "/principals/things", + } + + if input == nil { + input = &ListPrincipalThingsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListPrincipalThingsOutput{} + req.Data = output + return +} + +// Lists the things associated with the specified principal. +func (c *IoT) ListPrincipalThings(input *ListPrincipalThingsInput) (*ListPrincipalThingsOutput, error) { + req, out := c.ListPrincipalThingsRequest(input) + err := req.Send() + return out, err +} + +const opListThingPrincipals = "ListThingPrincipals" + +// ListThingPrincipalsRequest generates a "aws/request.Request" representing the +// client's request for the ListThingPrincipals operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListThingPrincipals method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListThingPrincipalsRequest method. +// req, resp := client.ListThingPrincipalsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *IoT) ListThingPrincipalsRequest(input *ListThingPrincipalsInput) (req *request.Request, output *ListThingPrincipalsOutput) { + op := &request.Operation{ + Name: opListThingPrincipals, + HTTPMethod: "GET", + HTTPPath: "/things/{thingName}/principals", + } + + if input == nil { + input = &ListThingPrincipalsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListThingPrincipalsOutput{} + req.Data = output + return +} + +// Lists the principals associated with the specified thing. +func (c *IoT) ListThingPrincipals(input *ListThingPrincipalsInput) (*ListThingPrincipalsOutput, error) { + req, out := c.ListThingPrincipalsRequest(input) + err := req.Send() + return out, err +} + +const opListThings = "ListThings" + +// ListThingsRequest generates a "aws/request.Request" representing the +// client's request for the ListThings operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListThings method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListThingsRequest method. +// req, resp := client.ListThingsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *IoT) ListThingsRequest(input *ListThingsInput) (req *request.Request, output *ListThingsOutput) { + op := &request.Operation{ + Name: opListThings, + HTTPMethod: "GET", + HTTPPath: "/things", + } + + if input == nil { + input = &ListThingsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListThingsOutput{} + req.Data = output + return +} + +// Lists your things. You can pass an AttributeName or AttributeValue to filter +// your things (for example, "ListThings where AttributeName=Color and AttributeValue=Red"). +func (c *IoT) ListThings(input *ListThingsInput) (*ListThingsOutput, error) { + req, out := c.ListThingsRequest(input) + err := req.Send() + return out, err +} + +const opListTopicRules = "ListTopicRules" + +// ListTopicRulesRequest generates a "aws/request.Request" representing the +// client's request for the ListTopicRules operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListTopicRules method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListTopicRulesRequest method. +// req, resp := client.ListTopicRulesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *IoT) ListTopicRulesRequest(input *ListTopicRulesInput) (req *request.Request, output *ListTopicRulesOutput) { + op := &request.Operation{ + Name: opListTopicRules, + HTTPMethod: "GET", + HTTPPath: "/rules", + } + + if input == nil { + input = &ListTopicRulesInput{} + } + + req = c.newRequest(op, input, output) + output = &ListTopicRulesOutput{} + req.Data = output + return +} + +// Lists the rules for the specific topic. +func (c *IoT) ListTopicRules(input *ListTopicRulesInput) (*ListTopicRulesOutput, error) { + req, out := c.ListTopicRulesRequest(input) + err := req.Send() + return out, err +} + +const opRegisterCACertificate = "RegisterCACertificate" + +// RegisterCACertificateRequest generates a "aws/request.Request" representing the +// client's request for the RegisterCACertificate operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the RegisterCACertificate method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the RegisterCACertificateRequest method. +// req, resp := client.RegisterCACertificateRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *IoT) RegisterCACertificateRequest(input *RegisterCACertificateInput) (req *request.Request, output *RegisterCACertificateOutput) { + op := &request.Operation{ + Name: opRegisterCACertificate, + HTTPMethod: "POST", + HTTPPath: "/cacertificate", + } + + if input == nil { + input = &RegisterCACertificateInput{} + } + + req = c.newRequest(op, input, output) + output = &RegisterCACertificateOutput{} + req.Data = output + return +} + +// Registers a CA certificate with AWS IoT. This CA certificate can then be +// used to sign device certificates, which can be then registered with AWS IoT. +// You can register up to 10 CA certificates per AWS account that have the same +// subject field and public key. This enables you to have up to 10 certificate +// authorities sign your device certificates. If you have more than one CA certificate +// registered, make sure you pass the CA certificate when you register your +// device certificates with the RegisterCertificate API. +func (c *IoT) RegisterCACertificate(input *RegisterCACertificateInput) (*RegisterCACertificateOutput, error) { + req, out := c.RegisterCACertificateRequest(input) + err := req.Send() + return out, err +} + +const opRegisterCertificate = "RegisterCertificate" + +// RegisterCertificateRequest generates a "aws/request.Request" representing the +// client's request for the RegisterCertificate operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the RegisterCertificate method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the RegisterCertificateRequest method. +// req, resp := client.RegisterCertificateRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *IoT) RegisterCertificateRequest(input *RegisterCertificateInput) (req *request.Request, output *RegisterCertificateOutput) { + op := &request.Operation{ + Name: opRegisterCertificate, + HTTPMethod: "POST", + HTTPPath: "/certificate/register", + } + + if input == nil { + input = &RegisterCertificateInput{} + } + + req = c.newRequest(op, input, output) + output = &RegisterCertificateOutput{} + req.Data = output + return +} + +// Registers a device certificate with AWS IoT. If you have more than one CA +// certificate that has the same subject field, you must specify the CA certificate +// that was used to sign the device certificate being registered. +func (c *IoT) RegisterCertificate(input *RegisterCertificateInput) (*RegisterCertificateOutput, error) { + req, out := c.RegisterCertificateRequest(input) + err := req.Send() + return out, err +} + +const opRejectCertificateTransfer = "RejectCertificateTransfer" + +// RejectCertificateTransferRequest generates a "aws/request.Request" representing the +// client's request for the RejectCertificateTransfer operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the RejectCertificateTransfer method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the RejectCertificateTransferRequest method. +// req, resp := client.RejectCertificateTransferRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *IoT) RejectCertificateTransferRequest(input *RejectCertificateTransferInput) (req *request.Request, output *RejectCertificateTransferOutput) { + op := &request.Operation{ + Name: opRejectCertificateTransfer, + HTTPMethod: "PATCH", + HTTPPath: "/reject-certificate-transfer/{certificateId}", + } + + if input == nil { + input = &RejectCertificateTransferInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &RejectCertificateTransferOutput{} + req.Data = output + return +} + +// Rejects a pending certificate transfer. After AWS IoT rejects a certificate +// transfer, the certificate status changes from PENDING_TRANSFER to INACTIVE. +// +// To check for pending certificate transfers, call ListCertificates to enumerate +// your certificates. +// +// This operation can only be called by the transfer destination. After it +// is called, the certificate will be returned to the source's account in the +// INACTIVE state. +func (c *IoT) RejectCertificateTransfer(input *RejectCertificateTransferInput) (*RejectCertificateTransferOutput, error) { + req, out := c.RejectCertificateTransferRequest(input) + err := req.Send() + return out, err +} + +const opReplaceTopicRule = "ReplaceTopicRule" + +// ReplaceTopicRuleRequest generates a "aws/request.Request" representing the +// client's request for the ReplaceTopicRule operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ReplaceTopicRule method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ReplaceTopicRuleRequest method. +// req, resp := client.ReplaceTopicRuleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *IoT) ReplaceTopicRuleRequest(input *ReplaceTopicRuleInput) (req *request.Request, output *ReplaceTopicRuleOutput) { + op := &request.Operation{ + Name: opReplaceTopicRule, + HTTPMethod: "PATCH", + HTTPPath: "/rules/{ruleName}", + } + + if input == nil { + input = &ReplaceTopicRuleInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &ReplaceTopicRuleOutput{} + req.Data = output + return +} + +// Replaces the specified rule. You must specify all parameters for the new +// rule. Creating rules is an administrator-level action. Any user who has permission +// to create rules will be able to access data processed by the rule. +func (c *IoT) ReplaceTopicRule(input *ReplaceTopicRuleInput) (*ReplaceTopicRuleOutput, error) { + req, out := c.ReplaceTopicRuleRequest(input) + err := req.Send() + return out, err +} + +const opSetDefaultPolicyVersion = "SetDefaultPolicyVersion" + +// SetDefaultPolicyVersionRequest generates a "aws/request.Request" representing the +// client's request for the SetDefaultPolicyVersion operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the SetDefaultPolicyVersion method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the SetDefaultPolicyVersionRequest method. +// req, resp := client.SetDefaultPolicyVersionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *IoT) SetDefaultPolicyVersionRequest(input *SetDefaultPolicyVersionInput) (req *request.Request, output *SetDefaultPolicyVersionOutput) { + op := &request.Operation{ + Name: opSetDefaultPolicyVersion, + HTTPMethod: "PATCH", + HTTPPath: "/policies/{policyName}/version/{policyVersionId}", + } + + if input == nil { + input = &SetDefaultPolicyVersionInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &SetDefaultPolicyVersionOutput{} + req.Data = output + return +} + +// Sets the specified version of the specified policy as the policy's default +// (operative) version. This action affects all certificates to which the policy +// is attached. To list the principals the policy is attached to, use the ListPrincipalPolicy +// API. +func (c *IoT) SetDefaultPolicyVersion(input *SetDefaultPolicyVersionInput) (*SetDefaultPolicyVersionOutput, error) { + req, out := c.SetDefaultPolicyVersionRequest(input) + err := req.Send() + return out, err +} + +const opSetLoggingOptions = "SetLoggingOptions" + +// SetLoggingOptionsRequest generates a "aws/request.Request" representing the +// client's request for the SetLoggingOptions operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the SetLoggingOptions method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the SetLoggingOptionsRequest method. +// req, resp := client.SetLoggingOptionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *IoT) SetLoggingOptionsRequest(input *SetLoggingOptionsInput) (req *request.Request, output *SetLoggingOptionsOutput) { + op := &request.Operation{ + Name: opSetLoggingOptions, + HTTPMethod: "POST", + HTTPPath: "/loggingOptions", + } + + if input == nil { + input = &SetLoggingOptionsInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &SetLoggingOptionsOutput{} + req.Data = output + return +} + +// Sets the logging options. +func (c *IoT) SetLoggingOptions(input *SetLoggingOptionsInput) (*SetLoggingOptionsOutput, error) { + req, out := c.SetLoggingOptionsRequest(input) + err := req.Send() + return out, err +} + +const opTransferCertificate = "TransferCertificate" + +// TransferCertificateRequest generates a "aws/request.Request" representing the +// client's request for the TransferCertificate operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the TransferCertificate method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the TransferCertificateRequest method. +// req, resp := client.TransferCertificateRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *IoT) TransferCertificateRequest(input *TransferCertificateInput) (req *request.Request, output *TransferCertificateOutput) { + op := &request.Operation{ + Name: opTransferCertificate, + HTTPMethod: "PATCH", + HTTPPath: "/transfer-certificate/{certificateId}", + } + + if input == nil { + input = &TransferCertificateInput{} + } + + req = c.newRequest(op, input, output) + output = &TransferCertificateOutput{} + req.Data = output + return +} + +// Transfers the specified certificate to the specified AWS account. +// +// You can cancel the transfer until it is acknowledged by the recipient. +// +// No notification is sent to the transfer destination's account. It is up +// to the caller to notify the transfer target. +// +// The certificate being transferred must not be in the ACTIVE state. You can +// use the UpdateCertificate API to deactivate it. +// +// The certificate must not have any policies attached to it. You can use the +// DetachPrincipalPolicy API to detach them. +func (c *IoT) TransferCertificate(input *TransferCertificateInput) (*TransferCertificateOutput, error) { + req, out := c.TransferCertificateRequest(input) + err := req.Send() + return out, err +} + +const opUpdateCACertificate = "UpdateCACertificate" + +// UpdateCACertificateRequest generates a "aws/request.Request" representing the +// client's request for the UpdateCACertificate operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateCACertificate method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateCACertificateRequest method. +// req, resp := client.UpdateCACertificateRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *IoT) UpdateCACertificateRequest(input *UpdateCACertificateInput) (req *request.Request, output *UpdateCACertificateOutput) { + op := &request.Operation{ + Name: opUpdateCACertificate, + HTTPMethod: "PUT", + HTTPPath: "/cacertificate/{caCertificateId}", + } + + if input == nil { + input = &UpdateCACertificateInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &UpdateCACertificateOutput{} + req.Data = output + return +} + +// Updates a registered CA certificate. +func (c *IoT) UpdateCACertificate(input *UpdateCACertificateInput) (*UpdateCACertificateOutput, error) { + req, out := c.UpdateCACertificateRequest(input) + err := req.Send() + return out, err +} + +const opUpdateCertificate = "UpdateCertificate" + +// UpdateCertificateRequest generates a "aws/request.Request" representing the +// client's request for the UpdateCertificate operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateCertificate method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateCertificateRequest method. +// req, resp := client.UpdateCertificateRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *IoT) UpdateCertificateRequest(input *UpdateCertificateInput) (req *request.Request, output *UpdateCertificateOutput) { + op := &request.Operation{ + Name: opUpdateCertificate, + HTTPMethod: "PUT", + HTTPPath: "/certificates/{certificateId}", + } + + if input == nil { + input = &UpdateCertificateInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &UpdateCertificateOutput{} + req.Data = output + return +} + +// Updates the status of the specified certificate. This operation is idempotent. +// +// Moving a certificate from the ACTIVE state (including REVOKED) will not +// disconnect currently connected devices, but these devices will be unable +// to reconnect. +// +// The ACTIVE state is required to authenticate devices connecting to AWS IoT +// using a certificate. +func (c *IoT) UpdateCertificate(input *UpdateCertificateInput) (*UpdateCertificateOutput, error) { + req, out := c.UpdateCertificateRequest(input) + err := req.Send() + return out, err +} + +const opUpdateThing = "UpdateThing" + +// UpdateThingRequest generates a "aws/request.Request" representing the +// client's request for the UpdateThing operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateThing method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateThingRequest method. +// req, resp := client.UpdateThingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *IoT) UpdateThingRequest(input *UpdateThingInput) (req *request.Request, output *UpdateThingOutput) { + op := &request.Operation{ + Name: opUpdateThing, + HTTPMethod: "PATCH", + HTTPPath: "/things/{thingName}", + } + + if input == nil { + input = &UpdateThingInput{} + } + + req = c.newRequest(op, input, output) + output = &UpdateThingOutput{} + req.Data = output + return +} + +// Updates the data for a thing. +func (c *IoT) UpdateThing(input *UpdateThingInput) (*UpdateThingOutput, error) { + req, out := c.UpdateThingRequest(input) + err := req.Send() + return out, err +} + +// The input for the AcceptCertificateTransfer operation. +type AcceptCertificateTransferInput struct { + _ struct{} `type:"structure"` + + // The ID of the certificate. + CertificateId *string `location:"uri" locationName:"certificateId" min:"64" type:"string" required:"true"` + + // Specifies whether the certificate is active. + SetAsActive *bool `location:"querystring" locationName:"setAsActive" type:"boolean"` +} + +// String returns the string representation +func (s AcceptCertificateTransferInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AcceptCertificateTransferInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AcceptCertificateTransferInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AcceptCertificateTransferInput"} + if s.CertificateId == nil { + invalidParams.Add(request.NewErrParamRequired("CertificateId")) + } + if s.CertificateId != nil && len(*s.CertificateId) < 64 { + invalidParams.Add(request.NewErrParamMinLen("CertificateId", 64)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type AcceptCertificateTransferOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s AcceptCertificateTransferOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AcceptCertificateTransferOutput) GoString() string { + return s.String() +} + +// Describes the actions associated with a rule. +type Action struct { + _ struct{} `type:"structure"` + + // Change the state of a CloudWatch alarm. + CloudwatchAlarm *CloudwatchAlarmAction `locationName:"cloudwatchAlarm" type:"structure"` + + // Capture a CloudWatch metric. + CloudwatchMetric *CloudwatchMetricAction `locationName:"cloudwatchMetric" type:"structure"` + + // Write to a DynamoDB table. + DynamoDB *DynamoDBAction `locationName:"dynamoDB" type:"structure"` + + // Write data to an Amazon Elasticsearch Service; domain. + Elasticsearch *ElasticsearchAction `locationName:"elasticsearch" type:"structure"` + + // Write to an Amazon Kinesis Firehose stream. + Firehose *FirehoseAction `locationName:"firehose" type:"structure"` + + // Write data to an Amazon Kinesis stream. + Kinesis *KinesisAction `locationName:"kinesis" type:"structure"` + + // Invoke a Lambda function. + Lambda *LambdaAction `locationName:"lambda" type:"structure"` + + // Publish to another MQTT topic. + Republish *RepublishAction `locationName:"republish" type:"structure"` + + // Write to an Amazon S3 bucket. + S3 *S3Action `locationName:"s3" type:"structure"` + + // Publish to an Amazon SNS topic. + Sns *SnsAction `locationName:"sns" type:"structure"` + + // Publish to an Amazon SQS queue. + Sqs *SqsAction `locationName:"sqs" type:"structure"` +} + +// String returns the string representation +func (s Action) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Action) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Action) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Action"} + if s.CloudwatchAlarm != nil { + if err := s.CloudwatchAlarm.Validate(); err != nil { + invalidParams.AddNested("CloudwatchAlarm", err.(request.ErrInvalidParams)) + } + } + if s.CloudwatchMetric != nil { + if err := s.CloudwatchMetric.Validate(); err != nil { + invalidParams.AddNested("CloudwatchMetric", err.(request.ErrInvalidParams)) + } + } + if s.DynamoDB != nil { + if err := s.DynamoDB.Validate(); err != nil { + invalidParams.AddNested("DynamoDB", err.(request.ErrInvalidParams)) + } + } + if s.Elasticsearch != nil { + if err := s.Elasticsearch.Validate(); err != nil { + invalidParams.AddNested("Elasticsearch", err.(request.ErrInvalidParams)) + } + } + if s.Firehose != nil { + if err := s.Firehose.Validate(); err != nil { + invalidParams.AddNested("Firehose", err.(request.ErrInvalidParams)) + } + } + if s.Kinesis != nil { + if err := s.Kinesis.Validate(); err != nil { + invalidParams.AddNested("Kinesis", err.(request.ErrInvalidParams)) + } + } + if s.Lambda != nil { + if err := s.Lambda.Validate(); err != nil { + invalidParams.AddNested("Lambda", err.(request.ErrInvalidParams)) + } + } + if s.Republish != nil { + if err := s.Republish.Validate(); err != nil { + invalidParams.AddNested("Republish", err.(request.ErrInvalidParams)) + } + } + if s.S3 != nil { + if err := s.S3.Validate(); err != nil { + invalidParams.AddNested("S3", err.(request.ErrInvalidParams)) + } + } + if s.Sns != nil { + if err := s.Sns.Validate(); err != nil { + invalidParams.AddNested("Sns", err.(request.ErrInvalidParams)) + } + } + if s.Sqs != nil { + if err := s.Sqs.Validate(); err != nil { + invalidParams.AddNested("Sqs", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The input for the AttachPrincipalPolicy operation. +type AttachPrincipalPolicyInput struct { + _ struct{} `type:"structure"` + + // The policy name. + PolicyName *string `location:"uri" locationName:"policyName" min:"1" type:"string" required:"true"` + + // The principal, which can be a certificate ARN (as returned from the CreateCertificate + // operation) or an Amazon Cognito ID. + Principal *string `location:"header" locationName:"x-amzn-iot-principal" type:"string" required:"true"` +} + +// String returns the string representation +func (s AttachPrincipalPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AttachPrincipalPolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AttachPrincipalPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AttachPrincipalPolicyInput"} + if s.PolicyName == nil { + invalidParams.Add(request.NewErrParamRequired("PolicyName")) + } + if s.PolicyName != nil && len(*s.PolicyName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("PolicyName", 1)) + } + if s.Principal == nil { + invalidParams.Add(request.NewErrParamRequired("Principal")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type AttachPrincipalPolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s AttachPrincipalPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AttachPrincipalPolicyOutput) GoString() string { + return s.String() +} + +// The input for the AttachThingPrincipal operation. +type AttachThingPrincipalInput struct { + _ struct{} `type:"structure"` + + // The principal (certificate or other credential). + Principal *string `location:"header" locationName:"x-amzn-principal" type:"string" required:"true"` + + // The name of the thing. + ThingName *string `location:"uri" locationName:"thingName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s AttachThingPrincipalInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AttachThingPrincipalInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AttachThingPrincipalInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AttachThingPrincipalInput"} + if s.Principal == nil { + invalidParams.Add(request.NewErrParamRequired("Principal")) + } + if s.ThingName == nil { + invalidParams.Add(request.NewErrParamRequired("ThingName")) + } + if s.ThingName != nil && len(*s.ThingName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ThingName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The output from the AttachThingPrincipal operation. +type AttachThingPrincipalOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s AttachThingPrincipalOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AttachThingPrincipalOutput) GoString() string { + return s.String() +} + +// The attribute payload, a JSON string containing up to three key-value pairs +// (for example, {\"attributes\":{\"string1\":\"string2\"}}). +type AttributePayload struct { + _ struct{} `type:"structure"` + + // A JSON string containing up to three key-value pair in JSON format (for example, + // {\"attributes\":{\"string1\":\"string2\"}}). + Attributes map[string]*string `locationName:"attributes" type:"map"` +} + +// String returns the string representation +func (s AttributePayload) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AttributePayload) GoString() string { + return s.String() +} + +// A CA certificate. +type CACertificate struct { + _ struct{} `type:"structure"` + + // The ARN of the CA certificate. + CertificateArn *string `locationName:"certificateArn" type:"string"` + + // The ID of the CA certificate. + CertificateId *string `locationName:"certificateId" min:"64" type:"string"` + + // The date the CA certificate was created. + CreationDate *time.Time `locationName:"creationDate" type:"timestamp" timestampFormat:"unix"` + + // The status of the CA certificate. + // + // The status value REGISTER_INACTIVE is deprecated and should not be used. + Status *string `locationName:"status" type:"string" enum:"CACertificateStatus"` +} + +// String returns the string representation +func (s CACertificate) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CACertificate) GoString() string { + return s.String() +} + +// Describes a CA certificate. +type CACertificateDescription struct { + _ struct{} `type:"structure"` + + // The CA certificate ARN. + CertificateArn *string `locationName:"certificateArn" type:"string"` + + // The CA certificate ID. + CertificateId *string `locationName:"certificateId" min:"64" type:"string"` + + // The CA certificate data, in PEM format. + CertificatePem *string `locationName:"certificatePem" min:"1" type:"string"` + + // The date the CA certificate was created. + CreationDate *time.Time `locationName:"creationDate" type:"timestamp" timestampFormat:"unix"` + + // The owner of the CA certificate. + OwnedBy *string `locationName:"ownedBy" type:"string"` + + // The status of a CA certificate. + Status *string `locationName:"status" type:"string" enum:"CACertificateStatus"` +} + +// String returns the string representation +func (s CACertificateDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CACertificateDescription) GoString() string { + return s.String() +} + +// The input for the CancelCertificateTransfer operation. +type CancelCertificateTransferInput struct { + _ struct{} `type:"structure"` + + // The ID of the certificate. + CertificateId *string `location:"uri" locationName:"certificateId" min:"64" type:"string" required:"true"` +} + +// String returns the string representation +func (s CancelCertificateTransferInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CancelCertificateTransferInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CancelCertificateTransferInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CancelCertificateTransferInput"} + if s.CertificateId == nil { + invalidParams.Add(request.NewErrParamRequired("CertificateId")) + } + if s.CertificateId != nil && len(*s.CertificateId) < 64 { + invalidParams.Add(request.NewErrParamMinLen("CertificateId", 64)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type CancelCertificateTransferOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s CancelCertificateTransferOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CancelCertificateTransferOutput) GoString() string { + return s.String() +} + +// Information about a certificate. +type Certificate struct { + _ struct{} `type:"structure"` + + // The ARN of the certificate. + CertificateArn *string `locationName:"certificateArn" type:"string"` + + // The ID of the certificate. + CertificateId *string `locationName:"certificateId" min:"64" type:"string"` + + // The date and time the certificate was created. + CreationDate *time.Time `locationName:"creationDate" type:"timestamp" timestampFormat:"unix"` + + // The status of the certificate. + // + // The status value REGISTER_INACTIVE is deprecated and should not be used. + Status *string `locationName:"status" type:"string" enum:"CertificateStatus"` +} + +// String returns the string representation +func (s Certificate) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Certificate) GoString() string { + return s.String() +} + +// Describes a certificate. +type CertificateDescription struct { + _ struct{} `type:"structure"` + + // The certificate ID of the CA certificate used to sign this certificate. + CaCertificateId *string `locationName:"caCertificateId" min:"64" type:"string"` + + // The ARN of the certificate. + CertificateArn *string `locationName:"certificateArn" type:"string"` + + // The ID of the certificate. + CertificateId *string `locationName:"certificateId" min:"64" type:"string"` + + // The certificate data, in PEM format. + CertificatePem *string `locationName:"certificatePem" min:"1" type:"string"` + + // The date and time the certificate was created. + CreationDate *time.Time `locationName:"creationDate" type:"timestamp" timestampFormat:"unix"` + + // The date and time the certificate was last modified. + LastModifiedDate *time.Time `locationName:"lastModifiedDate" type:"timestamp" timestampFormat:"unix"` + + // The ID of the AWS account that owns the certificate. + OwnedBy *string `locationName:"ownedBy" type:"string"` + + // The ID of the AWS account of the previous owner of the certificate. + PreviousOwnedBy *string `locationName:"previousOwnedBy" type:"string"` + + // The status of the certificate. + Status *string `locationName:"status" type:"string" enum:"CertificateStatus"` + + // The transfer data. + TransferData *TransferData `locationName:"transferData" type:"structure"` +} + +// String returns the string representation +func (s CertificateDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CertificateDescription) GoString() string { + return s.String() +} + +// Describes an action that updates a CloudWatch alarm. +type CloudwatchAlarmAction struct { + _ struct{} `type:"structure"` + + // The CloudWatch alarm name. + AlarmName *string `locationName:"alarmName" type:"string" required:"true"` + + // The IAM role that allows access to the CloudWatch alarm. + RoleArn *string `locationName:"roleArn" type:"string" required:"true"` + + // The reason for the alarm change. + StateReason *string `locationName:"stateReason" type:"string" required:"true"` + + // The value of the alarm state. Acceptable values are: OK, ALARM, INSUFFICIENT_DATA. + StateValue *string `locationName:"stateValue" type:"string" required:"true"` +} + +// String returns the string representation +func (s CloudwatchAlarmAction) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CloudwatchAlarmAction) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CloudwatchAlarmAction) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CloudwatchAlarmAction"} + if s.AlarmName == nil { + invalidParams.Add(request.NewErrParamRequired("AlarmName")) + } + if s.RoleArn == nil { + invalidParams.Add(request.NewErrParamRequired("RoleArn")) + } + if s.StateReason == nil { + invalidParams.Add(request.NewErrParamRequired("StateReason")) + } + if s.StateValue == nil { + invalidParams.Add(request.NewErrParamRequired("StateValue")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Describes an action that captures a CloudWatch metric. +type CloudwatchMetricAction struct { + _ struct{} `type:"structure"` + + // The CloudWatch metric name. + MetricName *string `locationName:"metricName" type:"string" required:"true"` + + // The CloudWatch metric namespace name. + MetricNamespace *string `locationName:"metricNamespace" type:"string" required:"true"` + + // An optional Unix timestamp (http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/cloudwatch_concepts.html#about_timestamp). + MetricTimestamp *string `locationName:"metricTimestamp" type:"string"` + + // The metric unit (http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/cloudwatch_concepts.html#Unit) + // supported by CloudWatch. + MetricUnit *string `locationName:"metricUnit" type:"string" required:"true"` + + // The CloudWatch metric value. + MetricValue *string `locationName:"metricValue" type:"string" required:"true"` + + // The IAM role that allows access to the CloudWatch metric. + RoleArn *string `locationName:"roleArn" type:"string" required:"true"` +} + +// String returns the string representation +func (s CloudwatchMetricAction) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CloudwatchMetricAction) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CloudwatchMetricAction) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CloudwatchMetricAction"} + if s.MetricName == nil { + invalidParams.Add(request.NewErrParamRequired("MetricName")) + } + if s.MetricNamespace == nil { + invalidParams.Add(request.NewErrParamRequired("MetricNamespace")) + } + if s.MetricUnit == nil { + invalidParams.Add(request.NewErrParamRequired("MetricUnit")) + } + if s.MetricValue == nil { + invalidParams.Add(request.NewErrParamRequired("MetricValue")) + } + if s.RoleArn == nil { + invalidParams.Add(request.NewErrParamRequired("RoleArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The input for the CreateCertificateFromCsr operation. +type CreateCertificateFromCsrInput struct { + _ struct{} `type:"structure"` + + // The certificate signing request (CSR). + CertificateSigningRequest *string `locationName:"certificateSigningRequest" min:"1" type:"string" required:"true"` + + // Specifies whether the certificate is active. + SetAsActive *bool `location:"querystring" locationName:"setAsActive" type:"boolean"` +} + +// String returns the string representation +func (s CreateCertificateFromCsrInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateCertificateFromCsrInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateCertificateFromCsrInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateCertificateFromCsrInput"} + if s.CertificateSigningRequest == nil { + invalidParams.Add(request.NewErrParamRequired("CertificateSigningRequest")) + } + if s.CertificateSigningRequest != nil && len(*s.CertificateSigningRequest) < 1 { + invalidParams.Add(request.NewErrParamMinLen("CertificateSigningRequest", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The output from the CreateCertificateFromCsr operation. +type CreateCertificateFromCsrOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the certificate. You can use the ARN as + // a principal for policy operations. + CertificateArn *string `locationName:"certificateArn" type:"string"` + + // The ID of the certificate. Certificate management operations only take a + // certificateId. + CertificateId *string `locationName:"certificateId" min:"64" type:"string"` + + // The certificate data, in PEM format. + CertificatePem *string `locationName:"certificatePem" min:"1" type:"string"` +} + +// String returns the string representation +func (s CreateCertificateFromCsrOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateCertificateFromCsrOutput) GoString() string { + return s.String() +} + +// The input for the CreateKeysAndCertificate operation. +type CreateKeysAndCertificateInput struct { + _ struct{} `type:"structure"` + + // Specifies whether the certificate is active. + SetAsActive *bool `location:"querystring" locationName:"setAsActive" type:"boolean"` +} + +// String returns the string representation +func (s CreateKeysAndCertificateInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateKeysAndCertificateInput) GoString() string { + return s.String() +} + +// The output of the CreateKeysAndCertificate operation. +type CreateKeysAndCertificateOutput struct { + _ struct{} `type:"structure"` + + // The ARN of the certificate. + CertificateArn *string `locationName:"certificateArn" type:"string"` + + // The ID of the certificate. AWS IoT issues a default subject name for the + // certificate (for example, AWS IoT Certificate). + CertificateId *string `locationName:"certificateId" min:"64" type:"string"` + + // The certificate data, in PEM format. + CertificatePem *string `locationName:"certificatePem" min:"1" type:"string"` + + // The generated key pair. + KeyPair *KeyPair `locationName:"keyPair" type:"structure"` +} + +// String returns the string representation +func (s CreateKeysAndCertificateOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateKeysAndCertificateOutput) GoString() string { + return s.String() +} + +// The input for the CreatePolicy operation. +type CreatePolicyInput struct { + _ struct{} `type:"structure"` + + // The JSON document that describes the policy. policyDocument must have a minimum + // length of 1, with a maximum length of 2048, excluding whitespace. + PolicyDocument *string `locationName:"policyDocument" type:"string" required:"true"` + + // The policy name. + PolicyName *string `location:"uri" locationName:"policyName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreatePolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreatePolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreatePolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreatePolicyInput"} + if s.PolicyDocument == nil { + invalidParams.Add(request.NewErrParamRequired("PolicyDocument")) + } + if s.PolicyName == nil { + invalidParams.Add(request.NewErrParamRequired("PolicyName")) + } + if s.PolicyName != nil && len(*s.PolicyName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("PolicyName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The output from the CreatePolicy operation. +type CreatePolicyOutput struct { + _ struct{} `type:"structure"` + + // The policy ARN. + PolicyArn *string `locationName:"policyArn" type:"string"` + + // The JSON document that describes the policy. + PolicyDocument *string `locationName:"policyDocument" type:"string"` + + // The policy name. + PolicyName *string `locationName:"policyName" min:"1" type:"string"` + + // The policy version ID. + PolicyVersionId *string `locationName:"policyVersionId" type:"string"` +} + +// String returns the string representation +func (s CreatePolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreatePolicyOutput) GoString() string { + return s.String() +} + +// The input for the CreatePolicyVersion operation. +type CreatePolicyVersionInput struct { + _ struct{} `type:"structure"` + + // The JSON document that describes the policy. Minimum length of 1. Maximum + // length of 2048, excluding whitespaces + PolicyDocument *string `locationName:"policyDocument" type:"string" required:"true"` + + // The policy name. + PolicyName *string `location:"uri" locationName:"policyName" min:"1" type:"string" required:"true"` + + // Specifies whether the policy version is set as the default. When this parameter + // is true, the new policy version becomes the operative version (that is, the + // version that is in effect for the certificates to which the policy is attached). + SetAsDefault *bool `location:"querystring" locationName:"setAsDefault" type:"boolean"` +} + +// String returns the string representation +func (s CreatePolicyVersionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreatePolicyVersionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreatePolicyVersionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreatePolicyVersionInput"} + if s.PolicyDocument == nil { + invalidParams.Add(request.NewErrParamRequired("PolicyDocument")) + } + if s.PolicyName == nil { + invalidParams.Add(request.NewErrParamRequired("PolicyName")) + } + if s.PolicyName != nil && len(*s.PolicyName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("PolicyName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The output of the CreatePolicyVersion operation. +type CreatePolicyVersionOutput struct { + _ struct{} `type:"structure"` + + // Specifies whether the policy version is the default. + IsDefaultVersion *bool `locationName:"isDefaultVersion" type:"boolean"` + + // The policy ARN. + PolicyArn *string `locationName:"policyArn" type:"string"` + + // The JSON document that describes the policy. + PolicyDocument *string `locationName:"policyDocument" type:"string"` + + // The policy version ID. + PolicyVersionId *string `locationName:"policyVersionId" type:"string"` +} + +// String returns the string representation +func (s CreatePolicyVersionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreatePolicyVersionOutput) GoString() string { + return s.String() +} + +// The input for the CreateThing operation. +type CreateThingInput struct { + _ struct{} `type:"structure"` + + // The attribute payload, which consists of up to 3 name/value pairs in a JSON + // document (for example, {\"attributes\":{\"string1\":\"string2\"}}). + AttributePayload *AttributePayload `locationName:"attributePayload" type:"structure"` + + // The name of the thing. + ThingName *string `location:"uri" locationName:"thingName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateThingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateThingInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateThingInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateThingInput"} + if s.ThingName == nil { + invalidParams.Add(request.NewErrParamRequired("ThingName")) + } + if s.ThingName != nil && len(*s.ThingName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ThingName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The output of the CreateThing operation. +type CreateThingOutput struct { + _ struct{} `type:"structure"` + + // The thing ARN. + ThingArn *string `locationName:"thingArn" type:"string"` + + // The name of the thing. + ThingName *string `locationName:"thingName" min:"1" type:"string"` +} + +// String returns the string representation +func (s CreateThingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateThingOutput) GoString() string { + return s.String() +} + +// The input for the CreateTopicRule operation. +type CreateTopicRuleInput struct { + _ struct{} `type:"structure" payload:"TopicRulePayload"` + + // The name of the rule. + RuleName *string `location:"uri" locationName:"ruleName" min:"1" type:"string" required:"true"` + + // The rule payload. + TopicRulePayload *TopicRulePayload `locationName:"topicRulePayload" type:"structure" required:"true"` +} + +// String returns the string representation +func (s CreateTopicRuleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateTopicRuleInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateTopicRuleInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateTopicRuleInput"} + if s.RuleName == nil { + invalidParams.Add(request.NewErrParamRequired("RuleName")) + } + if s.RuleName != nil && len(*s.RuleName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RuleName", 1)) + } + if s.TopicRulePayload == nil { + invalidParams.Add(request.NewErrParamRequired("TopicRulePayload")) + } + if s.TopicRulePayload != nil { + if err := s.TopicRulePayload.Validate(); err != nil { + invalidParams.AddNested("TopicRulePayload", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type CreateTopicRuleOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s CreateTopicRuleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateTopicRuleOutput) GoString() string { + return s.String() +} + +// Input for the DeleteCACertificate operation. +type DeleteCACertificateInput struct { + _ struct{} `type:"structure"` + + // The ID of the certificate to delete. + CertificateId *string `location:"uri" locationName:"caCertificateId" min:"64" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteCACertificateInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteCACertificateInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteCACertificateInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteCACertificateInput"} + if s.CertificateId == nil { + invalidParams.Add(request.NewErrParamRequired("CertificateId")) + } + if s.CertificateId != nil && len(*s.CertificateId) < 64 { + invalidParams.Add(request.NewErrParamMinLen("CertificateId", 64)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The output for the DeleteCACertificate operation. +type DeleteCACertificateOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteCACertificateOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteCACertificateOutput) GoString() string { + return s.String() +} + +// The input for the DeleteCertificate operation. +type DeleteCertificateInput struct { + _ struct{} `type:"structure"` + + // The ID of the certificate. + CertificateId *string `location:"uri" locationName:"certificateId" min:"64" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteCertificateInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteCertificateInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteCertificateInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteCertificateInput"} + if s.CertificateId == nil { + invalidParams.Add(request.NewErrParamRequired("CertificateId")) + } + if s.CertificateId != nil && len(*s.CertificateId) < 64 { + invalidParams.Add(request.NewErrParamMinLen("CertificateId", 64)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteCertificateOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteCertificateOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteCertificateOutput) GoString() string { + return s.String() +} + +// The input for the DeletePolicy operation. +type DeletePolicyInput struct { + _ struct{} `type:"structure"` + + // The name of the policy to delete. + PolicyName *string `location:"uri" locationName:"policyName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeletePolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeletePolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeletePolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeletePolicyInput"} + if s.PolicyName == nil { + invalidParams.Add(request.NewErrParamRequired("PolicyName")) + } + if s.PolicyName != nil && len(*s.PolicyName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("PolicyName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeletePolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeletePolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeletePolicyOutput) GoString() string { + return s.String() +} + +// The input for the DeletePolicyVersion operation. +type DeletePolicyVersionInput struct { + _ struct{} `type:"structure"` + + // The name of the policy. + PolicyName *string `location:"uri" locationName:"policyName" min:"1" type:"string" required:"true"` + + // The policy version ID. + PolicyVersionId *string `location:"uri" locationName:"policyVersionId" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeletePolicyVersionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeletePolicyVersionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeletePolicyVersionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeletePolicyVersionInput"} + if s.PolicyName == nil { + invalidParams.Add(request.NewErrParamRequired("PolicyName")) + } + if s.PolicyName != nil && len(*s.PolicyName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("PolicyName", 1)) + } + if s.PolicyVersionId == nil { + invalidParams.Add(request.NewErrParamRequired("PolicyVersionId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeletePolicyVersionOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeletePolicyVersionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeletePolicyVersionOutput) GoString() string { + return s.String() +} + +// The input for the DeleteRegistrationCode operation. +type DeleteRegistrationCodeInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteRegistrationCodeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteRegistrationCodeInput) GoString() string { + return s.String() +} + +// The output for the DeleteRegistrationCode operation. +type DeleteRegistrationCodeOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteRegistrationCodeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteRegistrationCodeOutput) GoString() string { + return s.String() +} + +// The input for the DeleteThing operation. +type DeleteThingInput struct { + _ struct{} `type:"structure"` + + // The thing name. + ThingName *string `location:"uri" locationName:"thingName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteThingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteThingInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteThingInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteThingInput"} + if s.ThingName == nil { + invalidParams.Add(request.NewErrParamRequired("ThingName")) + } + if s.ThingName != nil && len(*s.ThingName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ThingName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The output of the DeleteThing operation. +type DeleteThingOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteThingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteThingOutput) GoString() string { + return s.String() +} + +// The input for the DeleteTopicRule operation. +type DeleteTopicRuleInput struct { + _ struct{} `type:"structure"` + + // The name of the rule. + RuleName *string `location:"uri" locationName:"ruleName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteTopicRuleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteTopicRuleInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteTopicRuleInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteTopicRuleInput"} + if s.RuleName == nil { + invalidParams.Add(request.NewErrParamRequired("RuleName")) + } + if s.RuleName != nil && len(*s.RuleName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RuleName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteTopicRuleOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteTopicRuleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteTopicRuleOutput) GoString() string { + return s.String() +} + +// The input for the DescribeCACertificate operation. +type DescribeCACertificateInput struct { + _ struct{} `type:"structure"` + + // The CA certificate identifier. + CertificateId *string `location:"uri" locationName:"caCertificateId" min:"64" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeCACertificateInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeCACertificateInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeCACertificateInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeCACertificateInput"} + if s.CertificateId == nil { + invalidParams.Add(request.NewErrParamRequired("CertificateId")) + } + if s.CertificateId != nil && len(*s.CertificateId) < 64 { + invalidParams.Add(request.NewErrParamMinLen("CertificateId", 64)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The output from the DescribeCACertificate operation. +type DescribeCACertificateOutput struct { + _ struct{} `type:"structure"` + + // The CA certificate description. + CertificateDescription *CACertificateDescription `locationName:"certificateDescription" type:"structure"` +} + +// String returns the string representation +func (s DescribeCACertificateOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeCACertificateOutput) GoString() string { + return s.String() +} + +// The input for the DescribeCertificate operation. +type DescribeCertificateInput struct { + _ struct{} `type:"structure"` + + // The ID of the certificate. + CertificateId *string `location:"uri" locationName:"certificateId" min:"64" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeCertificateInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeCertificateInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeCertificateInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeCertificateInput"} + if s.CertificateId == nil { + invalidParams.Add(request.NewErrParamRequired("CertificateId")) + } + if s.CertificateId != nil && len(*s.CertificateId) < 64 { + invalidParams.Add(request.NewErrParamMinLen("CertificateId", 64)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The output of the DescribeCertificate operation. +type DescribeCertificateOutput struct { + _ struct{} `type:"structure"` + + // The description of the certificate. + CertificateDescription *CertificateDescription `locationName:"certificateDescription" type:"structure"` +} + +// String returns the string representation +func (s DescribeCertificateOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeCertificateOutput) GoString() string { + return s.String() +} + +// The input for the DescribeEndpoint operation. +type DescribeEndpointInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DescribeEndpointInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeEndpointInput) GoString() string { + return s.String() +} + +// The output from the DescribeEndpoint operation. +type DescribeEndpointOutput struct { + _ struct{} `type:"structure"` + + // The endpoint. The format of the endpoint is as follows: identifier.iot.region.amazonaws.com. + EndpointAddress *string `locationName:"endpointAddress" type:"string"` +} + +// String returns the string representation +func (s DescribeEndpointOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeEndpointOutput) GoString() string { + return s.String() +} + +// The input for the DescribeThing operation. +type DescribeThingInput struct { + _ struct{} `type:"structure"` + + // The name of the thing. + ThingName *string `location:"uri" locationName:"thingName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeThingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeThingInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeThingInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeThingInput"} + if s.ThingName == nil { + invalidParams.Add(request.NewErrParamRequired("ThingName")) + } + if s.ThingName != nil && len(*s.ThingName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ThingName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The output from the DescribeThing operation. +type DescribeThingOutput struct { + _ struct{} `type:"structure"` + + // The attributes, which are name/value pairs in JSON format (for example: {\"attributes\":{\"some-name1\":\"some-value1\"}, + // {\"some-name2\":\"some-value2\"}, {\"some-name3\":\"some-value3\"}}) + Attributes map[string]*string `locationName:"attributes" type:"map"` + + // The default client ID. + DefaultClientId *string `locationName:"defaultClientId" type:"string"` + + // The name of the thing. + ThingName *string `locationName:"thingName" min:"1" type:"string"` +} + +// String returns the string representation +func (s DescribeThingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeThingOutput) GoString() string { + return s.String() +} + +// The input for the DetachPrincipalPolicy operation. +type DetachPrincipalPolicyInput struct { + _ struct{} `type:"structure"` + + // The name of the policy to detach. + PolicyName *string `location:"uri" locationName:"policyName" min:"1" type:"string" required:"true"` + + // The principal. + // + // If the principal is a certificate, specify the certificate ARN. If the principal + // is an Amazon Cognito identity, specify the identity ID. + Principal *string `location:"header" locationName:"x-amzn-iot-principal" type:"string" required:"true"` +} + +// String returns the string representation +func (s DetachPrincipalPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DetachPrincipalPolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DetachPrincipalPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DetachPrincipalPolicyInput"} + if s.PolicyName == nil { + invalidParams.Add(request.NewErrParamRequired("PolicyName")) + } + if s.PolicyName != nil && len(*s.PolicyName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("PolicyName", 1)) + } + if s.Principal == nil { + invalidParams.Add(request.NewErrParamRequired("Principal")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DetachPrincipalPolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DetachPrincipalPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DetachPrincipalPolicyOutput) GoString() string { + return s.String() +} + +// The input for the DetachThingPrincipal operation. +type DetachThingPrincipalInput struct { + _ struct{} `type:"structure"` + + // The principal. + // + // If the principal is a certificate, specify the certificate ARN. If the principal + // is an Amazon Cognito identity, specify the identity ID. + Principal *string `location:"header" locationName:"x-amzn-principal" type:"string" required:"true"` + + // The name of the thing. + ThingName *string `location:"uri" locationName:"thingName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DetachThingPrincipalInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DetachThingPrincipalInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DetachThingPrincipalInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DetachThingPrincipalInput"} + if s.Principal == nil { + invalidParams.Add(request.NewErrParamRequired("Principal")) + } + if s.ThingName == nil { + invalidParams.Add(request.NewErrParamRequired("ThingName")) + } + if s.ThingName != nil && len(*s.ThingName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ThingName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The output from the DetachThingPrincipal operation. +type DetachThingPrincipalOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DetachThingPrincipalOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DetachThingPrincipalOutput) GoString() string { + return s.String() +} + +// The input for the DisableTopicRuleRequest operation. +type DisableTopicRuleInput struct { + _ struct{} `type:"structure"` + + // The name of the rule to disable. + RuleName *string `location:"uri" locationName:"ruleName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DisableTopicRuleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisableTopicRuleInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DisableTopicRuleInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DisableTopicRuleInput"} + if s.RuleName == nil { + invalidParams.Add(request.NewErrParamRequired("RuleName")) + } + if s.RuleName != nil && len(*s.RuleName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RuleName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DisableTopicRuleOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DisableTopicRuleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisableTopicRuleOutput) GoString() string { + return s.String() +} + +// Describes an action to write to a DynamoDB table. +// +// The tableName, hashKeyField, and rangeKeyField values must match the values +// used when you created the table. +// +// The hashKeyValue and rangeKeyvalue fields use a substitution template syntax. +// These templates provide data at runtime. The syntax is as follows: ${sql-expression}. +// +// You can specify any valid expression in a WHERE or SELECT clause, including +// JSON properties, comparisons, calculations, and functions. For example, the +// following field uses the third level of the topic: +// +// "hashKeyValue": "${topic(3)}" +// +// The following field uses the timestamp: +// +// "rangeKeyValue": "${timestamp()}" +type DynamoDBAction struct { + _ struct{} `type:"structure"` + + // The hash key name. + HashKeyField *string `locationName:"hashKeyField" type:"string" required:"true"` + + // The hash key type. Valid values are "STRING" or "NUMBER" + HashKeyType *string `locationName:"hashKeyType" type:"string" enum:"DynamoKeyType"` + + // The hash key value. + HashKeyValue *string `locationName:"hashKeyValue" type:"string" required:"true"` + + // The type of operation to be performed. This follows the substitution template, + // so it can be ${operation}, but the substitution must result in one of the + // following: INSERT, UPDATE, or DELETE. + Operation *string `locationName:"operation" type:"string"` + + // The action payload. This name can be customized. + PayloadField *string `locationName:"payloadField" type:"string"` + + // The range key name. + RangeKeyField *string `locationName:"rangeKeyField" type:"string"` + + // The range key type. Valid values are "STRING" or "NUMBER" + RangeKeyType *string `locationName:"rangeKeyType" type:"string" enum:"DynamoKeyType"` + + // The range key value. + RangeKeyValue *string `locationName:"rangeKeyValue" type:"string"` + + // The ARN of the IAM role that grants access to the DynamoDB table. + RoleArn *string `locationName:"roleArn" type:"string" required:"true"` + + // The name of the DynamoDB table. + TableName *string `locationName:"tableName" type:"string" required:"true"` +} + +// String returns the string representation +func (s DynamoDBAction) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DynamoDBAction) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DynamoDBAction) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DynamoDBAction"} + if s.HashKeyField == nil { + invalidParams.Add(request.NewErrParamRequired("HashKeyField")) + } + if s.HashKeyValue == nil { + invalidParams.Add(request.NewErrParamRequired("HashKeyValue")) + } + if s.RoleArn == nil { + invalidParams.Add(request.NewErrParamRequired("RoleArn")) + } + if s.TableName == nil { + invalidParams.Add(request.NewErrParamRequired("TableName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Describes an action that writes data to an Amazon Elasticsearch Service; +// domain. +type ElasticsearchAction struct { + _ struct{} `type:"structure"` + + // The endpoint of your Elasticsearch domain. + Endpoint *string `locationName:"endpoint" type:"string" required:"true"` + + // The unique identifier for the document you are storing. + Id *string `locationName:"id" type:"string" required:"true"` + + // The Elasticsearch index where you want to store your data. + Index *string `locationName:"index" type:"string" required:"true"` + + // The IAM role ARN that has access to Elasticsearch. + RoleArn *string `locationName:"roleArn" type:"string" required:"true"` + + // The type of document you are storing. + Type *string `locationName:"type" type:"string" required:"true"` +} + +// String returns the string representation +func (s ElasticsearchAction) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ElasticsearchAction) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ElasticsearchAction) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ElasticsearchAction"} + if s.Endpoint == nil { + invalidParams.Add(request.NewErrParamRequired("Endpoint")) + } + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + if s.Index == nil { + invalidParams.Add(request.NewErrParamRequired("Index")) + } + if s.RoleArn == nil { + invalidParams.Add(request.NewErrParamRequired("RoleArn")) + } + if s.Type == nil { + invalidParams.Add(request.NewErrParamRequired("Type")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The input for the EnableTopicRuleRequest operation. +type EnableTopicRuleInput struct { + _ struct{} `type:"structure"` + + // The name of the topic rule to enable. + RuleName *string `location:"uri" locationName:"ruleName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s EnableTopicRuleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnableTopicRuleInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *EnableTopicRuleInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "EnableTopicRuleInput"} + if s.RuleName == nil { + invalidParams.Add(request.NewErrParamRequired("RuleName")) + } + if s.RuleName != nil && len(*s.RuleName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RuleName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type EnableTopicRuleOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s EnableTopicRuleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnableTopicRuleOutput) GoString() string { + return s.String() +} + +// Describes an action that writes data to an Amazon Kinesis Firehose stream. +type FirehoseAction struct { + _ struct{} `type:"structure"` + + // The delivery stream name. + DeliveryStreamName *string `locationName:"deliveryStreamName" type:"string" required:"true"` + + // The IAM role that grants access to the Amazon Kinesis Firehost stream. + RoleArn *string `locationName:"roleArn" type:"string" required:"true"` +} + +// String returns the string representation +func (s FirehoseAction) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FirehoseAction) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *FirehoseAction) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "FirehoseAction"} + if s.DeliveryStreamName == nil { + invalidParams.Add(request.NewErrParamRequired("DeliveryStreamName")) + } + if s.RoleArn == nil { + invalidParams.Add(request.NewErrParamRequired("RoleArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The input for the GetLoggingOptions operation. +type GetLoggingOptionsInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s GetLoggingOptionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetLoggingOptionsInput) GoString() string { + return s.String() +} + +// The output from the GetLoggingOptions operation. +type GetLoggingOptionsOutput struct { + _ struct{} `type:"structure"` + + // The logging level. + LogLevel *string `locationName:"logLevel" type:"string" enum:"LogLevel"` + + // The ARN of the IAM role that grants access. + RoleArn *string `locationName:"roleArn" type:"string"` +} + +// String returns the string representation +func (s GetLoggingOptionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetLoggingOptionsOutput) GoString() string { + return s.String() +} + +// The input for the GetPolicy operation. +type GetPolicyInput struct { + _ struct{} `type:"structure"` + + // The name of the policy. + PolicyName *string `location:"uri" locationName:"policyName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetPolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetPolicyInput"} + if s.PolicyName == nil { + invalidParams.Add(request.NewErrParamRequired("PolicyName")) + } + if s.PolicyName != nil && len(*s.PolicyName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("PolicyName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The output from the GetPolicy operation. +type GetPolicyOutput struct { + _ struct{} `type:"structure"` + + // The default policy version ID. + DefaultVersionId *string `locationName:"defaultVersionId" type:"string"` + + // The policy ARN. + PolicyArn *string `locationName:"policyArn" type:"string"` + + // The JSON document that describes the policy. + PolicyDocument *string `locationName:"policyDocument" type:"string"` + + // The policy name. + PolicyName *string `locationName:"policyName" min:"1" type:"string"` +} + +// String returns the string representation +func (s GetPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetPolicyOutput) GoString() string { + return s.String() +} + +// The input for the GetPolicyVersion operation. +type GetPolicyVersionInput struct { + _ struct{} `type:"structure"` + + // The name of the policy. + PolicyName *string `location:"uri" locationName:"policyName" min:"1" type:"string" required:"true"` + + // The policy version ID. + PolicyVersionId *string `location:"uri" locationName:"policyVersionId" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetPolicyVersionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetPolicyVersionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetPolicyVersionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetPolicyVersionInput"} + if s.PolicyName == nil { + invalidParams.Add(request.NewErrParamRequired("PolicyName")) + } + if s.PolicyName != nil && len(*s.PolicyName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("PolicyName", 1)) + } + if s.PolicyVersionId == nil { + invalidParams.Add(request.NewErrParamRequired("PolicyVersionId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The output from the GetPolicyVersion operation. +type GetPolicyVersionOutput struct { + _ struct{} `type:"structure"` + + // Specifies whether the policy version is the default. + IsDefaultVersion *bool `locationName:"isDefaultVersion" type:"boolean"` + + // The policy ARN. + PolicyArn *string `locationName:"policyArn" type:"string"` + + // The JSON document that describes the policy. + PolicyDocument *string `locationName:"policyDocument" type:"string"` + + // The policy name. + PolicyName *string `locationName:"policyName" min:"1" type:"string"` + + // The policy version ID. + PolicyVersionId *string `locationName:"policyVersionId" type:"string"` +} + +// String returns the string representation +func (s GetPolicyVersionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetPolicyVersionOutput) GoString() string { + return s.String() +} + +// The input to the GetRegistrationCode operation. +type GetRegistrationCodeInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s GetRegistrationCodeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetRegistrationCodeInput) GoString() string { + return s.String() +} + +// The output from the GetRegistrationCode operation. +type GetRegistrationCodeOutput struct { + _ struct{} `type:"structure"` + + // The CA certificate registration code. + RegistrationCode *string `locationName:"registrationCode" min:"64" type:"string"` +} + +// String returns the string representation +func (s GetRegistrationCodeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetRegistrationCodeOutput) GoString() string { + return s.String() +} + +// The input for the GetTopicRule operation. +type GetTopicRuleInput struct { + _ struct{} `type:"structure"` + + // The name of the rule. + RuleName *string `location:"uri" locationName:"ruleName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetTopicRuleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetTopicRuleInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetTopicRuleInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetTopicRuleInput"} + if s.RuleName == nil { + invalidParams.Add(request.NewErrParamRequired("RuleName")) + } + if s.RuleName != nil && len(*s.RuleName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RuleName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The output from the GetTopicRule operation. +type GetTopicRuleOutput struct { + _ struct{} `type:"structure"` + + // The rule. + Rule *TopicRule `locationName:"rule" type:"structure"` + + // The rule ARN. + RuleArn *string `locationName:"ruleArn" type:"string"` +} + +// String returns the string representation +func (s GetTopicRuleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetTopicRuleOutput) GoString() string { + return s.String() +} + +// Describes a key pair. +type KeyPair struct { + _ struct{} `type:"structure"` + + // The private key. + PrivateKey *string `min:"1" type:"string"` + + // The public key. + PublicKey *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s KeyPair) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s KeyPair) GoString() string { + return s.String() +} + +// Describes an action to write data to an Amazon Kinesis stream. +type KinesisAction struct { + _ struct{} `type:"structure"` + + // The partition key. + PartitionKey *string `locationName:"partitionKey" type:"string"` + + // The ARN of the IAM role that grants access to the Amazon Kinesis stream. + RoleArn *string `locationName:"roleArn" type:"string" required:"true"` + + // The name of the Amazon Kinesis stream. + StreamName *string `locationName:"streamName" type:"string" required:"true"` +} + +// String returns the string representation +func (s KinesisAction) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s KinesisAction) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *KinesisAction) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "KinesisAction"} + if s.RoleArn == nil { + invalidParams.Add(request.NewErrParamRequired("RoleArn")) + } + if s.StreamName == nil { + invalidParams.Add(request.NewErrParamRequired("StreamName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Describes an action to invoke a Lambda function. +type LambdaAction struct { + _ struct{} `type:"structure"` + + // The ARN of the Lambda function. + FunctionArn *string `locationName:"functionArn" type:"string" required:"true"` +} + +// String returns the string representation +func (s LambdaAction) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LambdaAction) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *LambdaAction) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "LambdaAction"} + if s.FunctionArn == nil { + invalidParams.Add(request.NewErrParamRequired("FunctionArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Input for the ListCACertificates operation. +type ListCACertificatesInput struct { + _ struct{} `type:"structure"` + + // Determines the order of the results. + AscendingOrder *bool `location:"querystring" locationName:"isAscendingOrder" type:"boolean"` + + // The marker for the next set of results. + Marker *string `location:"querystring" locationName:"marker" type:"string"` + + // The result page size. + PageSize *int64 `location:"querystring" locationName:"pageSize" min:"1" type:"integer"` +} + +// String returns the string representation +func (s ListCACertificatesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListCACertificatesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListCACertificatesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListCACertificatesInput"} + if s.PageSize != nil && *s.PageSize < 1 { + invalidParams.Add(request.NewErrParamMinValue("PageSize", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The output from the ListCACertificates operation. +type ListCACertificatesOutput struct { + _ struct{} `type:"structure"` + + // The CA certificates registered in your AWS account. + Certificates []*CACertificate `locationName:"certificates" type:"list"` + + // The current position within the list of CA certificates. + NextMarker *string `locationName:"nextMarker" type:"string"` +} + +// String returns the string representation +func (s ListCACertificatesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListCACertificatesOutput) GoString() string { + return s.String() +} + +// The input to the ListCertificatesByCA operation. +type ListCertificatesByCAInput struct { + _ struct{} `type:"structure"` + + // Specifies the order for results. If True, the results are returned in ascending + // order, based on the creation date. + AscendingOrder *bool `location:"querystring" locationName:"isAscendingOrder" type:"boolean"` + + // The ID of the CA certificate. This operation will list all registered device + // certificate that were signed by this CA certificate. + CaCertificateId *string `location:"uri" locationName:"caCertificateId" min:"64" type:"string" required:"true"` + + // The marker for the next set of results. + Marker *string `location:"querystring" locationName:"marker" type:"string"` + + // The result page size. + PageSize *int64 `location:"querystring" locationName:"pageSize" min:"1" type:"integer"` +} + +// String returns the string representation +func (s ListCertificatesByCAInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListCertificatesByCAInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListCertificatesByCAInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListCertificatesByCAInput"} + if s.CaCertificateId == nil { + invalidParams.Add(request.NewErrParamRequired("CaCertificateId")) + } + if s.CaCertificateId != nil && len(*s.CaCertificateId) < 64 { + invalidParams.Add(request.NewErrParamMinLen("CaCertificateId", 64)) + } + if s.PageSize != nil && *s.PageSize < 1 { + invalidParams.Add(request.NewErrParamMinValue("PageSize", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The output of the ListCertificatesByCA operation. +type ListCertificatesByCAOutput struct { + _ struct{} `type:"structure"` + + // The device certificates signed by the specified CA certificate. + Certificates []*Certificate `locationName:"certificates" type:"list"` + + // The marker for the next set of results, or null if there are no additional + // results. + NextMarker *string `locationName:"nextMarker" type:"string"` +} + +// String returns the string representation +func (s ListCertificatesByCAOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListCertificatesByCAOutput) GoString() string { + return s.String() +} + +// The input for the ListCertificates operation. +type ListCertificatesInput struct { + _ struct{} `type:"structure"` + + // Specifies the order for results. If True, the results are returned in ascending + // order, based on the creation date. + AscendingOrder *bool `location:"querystring" locationName:"isAscendingOrder" type:"boolean"` + + // The marker for the next set of results. + Marker *string `location:"querystring" locationName:"marker" type:"string"` + + // The result page size. + PageSize *int64 `location:"querystring" locationName:"pageSize" min:"1" type:"integer"` +} + +// String returns the string representation +func (s ListCertificatesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListCertificatesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListCertificatesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListCertificatesInput"} + if s.PageSize != nil && *s.PageSize < 1 { + invalidParams.Add(request.NewErrParamMinValue("PageSize", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The output of the ListCertificates operation. +type ListCertificatesOutput struct { + _ struct{} `type:"structure"` + + // The descriptions of the certificates. + Certificates []*Certificate `locationName:"certificates" type:"list"` + + // The marker for the next set of results, or null if there are no additional + // results. + NextMarker *string `locationName:"nextMarker" type:"string"` +} + +// String returns the string representation +func (s ListCertificatesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListCertificatesOutput) GoString() string { + return s.String() +} + +// The input for the ListPolicies operation. +type ListPoliciesInput struct { + _ struct{} `type:"structure"` + + // Specifies the order for results. If true, the results are returned in ascending + // creation order. + AscendingOrder *bool `location:"querystring" locationName:"isAscendingOrder" type:"boolean"` + + // The marker for the next set of results. + Marker *string `location:"querystring" locationName:"marker" type:"string"` + + // The result page size. + PageSize *int64 `location:"querystring" locationName:"pageSize" min:"1" type:"integer"` +} + +// String returns the string representation +func (s ListPoliciesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListPoliciesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListPoliciesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListPoliciesInput"} + if s.PageSize != nil && *s.PageSize < 1 { + invalidParams.Add(request.NewErrParamMinValue("PageSize", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The output from the ListPolicies operation. +type ListPoliciesOutput struct { + _ struct{} `type:"structure"` + + // The marker for the next set of results, or null if there are no additional + // results. + NextMarker *string `locationName:"nextMarker" type:"string"` + + // The descriptions of the policies. + Policies []*Policy `locationName:"policies" type:"list"` +} + +// String returns the string representation +func (s ListPoliciesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListPoliciesOutput) GoString() string { + return s.String() +} + +// The input for the ListPolicyPrincipals operation. +type ListPolicyPrincipalsInput struct { + _ struct{} `type:"structure"` + + // Specifies the order for results. If true, the results are returned in ascending + // creation order. + AscendingOrder *bool `location:"querystring" locationName:"isAscendingOrder" type:"boolean"` + + // The marker for the next set of results. + Marker *string `location:"querystring" locationName:"marker" type:"string"` + + // The result page size. + PageSize *int64 `location:"querystring" locationName:"pageSize" min:"1" type:"integer"` + + // The policy name. + PolicyName *string `location:"header" locationName:"x-amzn-iot-policy" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListPolicyPrincipalsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListPolicyPrincipalsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListPolicyPrincipalsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListPolicyPrincipalsInput"} + if s.PageSize != nil && *s.PageSize < 1 { + invalidParams.Add(request.NewErrParamMinValue("PageSize", 1)) + } + if s.PolicyName == nil { + invalidParams.Add(request.NewErrParamRequired("PolicyName")) + } + if s.PolicyName != nil && len(*s.PolicyName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("PolicyName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The output from the ListPolicyPrincipals operation. +type ListPolicyPrincipalsOutput struct { + _ struct{} `type:"structure"` + + // The marker for the next set of results, or null if there are no additional + // results. + NextMarker *string `locationName:"nextMarker" type:"string"` + + // The descriptions of the principals. + Principals []*string `locationName:"principals" type:"list"` +} + +// String returns the string representation +func (s ListPolicyPrincipalsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListPolicyPrincipalsOutput) GoString() string { + return s.String() +} + +// The input for the ListPolicyVersions operation. +type ListPolicyVersionsInput struct { + _ struct{} `type:"structure"` + + // The policy name. + PolicyName *string `location:"uri" locationName:"policyName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListPolicyVersionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListPolicyVersionsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListPolicyVersionsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListPolicyVersionsInput"} + if s.PolicyName == nil { + invalidParams.Add(request.NewErrParamRequired("PolicyName")) + } + if s.PolicyName != nil && len(*s.PolicyName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("PolicyName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The output from the ListPolicyVersions operation. +type ListPolicyVersionsOutput struct { + _ struct{} `type:"structure"` + + // The policy versions. + PolicyVersions []*PolicyVersion `locationName:"policyVersions" type:"list"` +} + +// String returns the string representation +func (s ListPolicyVersionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListPolicyVersionsOutput) GoString() string { + return s.String() +} + +// The input for the ListPrincipalPolicies operation. +type ListPrincipalPoliciesInput struct { + _ struct{} `type:"structure"` + + // Specifies the order for results. If true, results are returned in ascending + // creation order. + AscendingOrder *bool `location:"querystring" locationName:"isAscendingOrder" type:"boolean"` + + // The marker for the next set of results. + Marker *string `location:"querystring" locationName:"marker" type:"string"` + + // The result page size. + PageSize *int64 `location:"querystring" locationName:"pageSize" min:"1" type:"integer"` + + // The principal. + Principal *string `location:"header" locationName:"x-amzn-iot-principal" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListPrincipalPoliciesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListPrincipalPoliciesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListPrincipalPoliciesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListPrincipalPoliciesInput"} + if s.PageSize != nil && *s.PageSize < 1 { + invalidParams.Add(request.NewErrParamMinValue("PageSize", 1)) + } + if s.Principal == nil { + invalidParams.Add(request.NewErrParamRequired("Principal")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The output from the ListPrincipalPolicies operation. +type ListPrincipalPoliciesOutput struct { + _ struct{} `type:"structure"` + + // The marker for the next set of results, or null if there are no additional + // results. + NextMarker *string `locationName:"nextMarker" type:"string"` + + // The policies. + Policies []*Policy `locationName:"policies" type:"list"` +} + +// String returns the string representation +func (s ListPrincipalPoliciesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListPrincipalPoliciesOutput) GoString() string { + return s.String() +} + +// The input for the ListPrincipalThings operation. +type ListPrincipalThingsInput struct { + _ struct{} `type:"structure"` + + // The maximum number of principals to return. + MaxResults *int64 `location:"querystring" locationName:"maxResults" min:"1" type:"integer"` + + // A token used to retrieve the next value. + NextToken *string `location:"querystring" locationName:"nextToken" type:"string"` + + // The principal. + Principal *string `location:"header" locationName:"x-amzn-principal" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListPrincipalThingsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListPrincipalThingsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListPrincipalThingsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListPrincipalThingsInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + if s.Principal == nil { + invalidParams.Add(request.NewErrParamRequired("Principal")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The output from the ListPrincipalThings operation. +type ListPrincipalThingsOutput struct { + _ struct{} `type:"structure"` + + // A token used to retrieve the next value. + NextToken *string `locationName:"nextToken" type:"string"` + + // The things. + Things []*string `locationName:"things" type:"list"` +} + +// String returns the string representation +func (s ListPrincipalThingsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListPrincipalThingsOutput) GoString() string { + return s.String() +} + +// The input for the ListThingPrincipal operation. +type ListThingPrincipalsInput struct { + _ struct{} `type:"structure"` + + // The name of the thing. + ThingName *string `location:"uri" locationName:"thingName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListThingPrincipalsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListThingPrincipalsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListThingPrincipalsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListThingPrincipalsInput"} + if s.ThingName == nil { + invalidParams.Add(request.NewErrParamRequired("ThingName")) + } + if s.ThingName != nil && len(*s.ThingName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ThingName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The output from the ListThingPrincipals operation. +type ListThingPrincipalsOutput struct { + _ struct{} `type:"structure"` + + // The principals. + Principals []*string `locationName:"principals" type:"list"` +} + +// String returns the string representation +func (s ListThingPrincipalsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListThingPrincipalsOutput) GoString() string { + return s.String() +} + +// The input for the ListThings operation. +type ListThingsInput struct { + _ struct{} `type:"structure"` + + // The attribute name. + AttributeName *string `location:"querystring" locationName:"attributeName" type:"string"` + + // The attribute value. + AttributeValue *string `location:"querystring" locationName:"attributeValue" type:"string"` + + // The maximum number of results. + MaxResults *int64 `location:"querystring" locationName:"maxResults" min:"1" type:"integer"` + + // The token for the next value. + NextToken *string `location:"querystring" locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s ListThingsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListThingsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListThingsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListThingsInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The output from the ListThings operation. +type ListThingsOutput struct { + _ struct{} `type:"structure"` + + // A token used to retrieve the next value. + NextToken *string `locationName:"nextToken" type:"string"` + + // The things. + Things []*ThingAttribute `locationName:"things" type:"list"` +} + +// String returns the string representation +func (s ListThingsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListThingsOutput) GoString() string { + return s.String() +} + +// The input for the ListTopicRules operation. +type ListTopicRulesInput struct { + _ struct{} `type:"structure"` + + // The maximum number of results to return. + MaxResults *int64 `location:"querystring" locationName:"maxResults" min:"1" type:"integer"` + + // A token used to retrieve the next value. + NextToken *string `location:"querystring" locationName:"nextToken" type:"string"` + + // Specifies whether the rule is disabled. + RuleDisabled *bool `location:"querystring" locationName:"ruleDisabled" type:"boolean"` + + // The topic. + Topic *string `location:"querystring" locationName:"topic" type:"string"` +} + +// String returns the string representation +func (s ListTopicRulesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTopicRulesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListTopicRulesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListTopicRulesInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The output from the ListTopicRules operation. +type ListTopicRulesOutput struct { + _ struct{} `type:"structure"` + + // A token used to retrieve the next value. + NextToken *string `locationName:"nextToken" type:"string"` + + // The rules. + Rules []*TopicRuleListItem `locationName:"rules" type:"list"` +} + +// String returns the string representation +func (s ListTopicRulesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTopicRulesOutput) GoString() string { + return s.String() +} + +// Describes the logging options payload. +type LoggingOptionsPayload struct { + _ struct{} `type:"structure"` + + // The logging level. + LogLevel *string `locationName:"logLevel" type:"string" enum:"LogLevel"` + + // The ARN of the IAM role that grants access. + RoleArn *string `locationName:"roleArn" type:"string" required:"true"` +} + +// String returns the string representation +func (s LoggingOptionsPayload) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LoggingOptionsPayload) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *LoggingOptionsPayload) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "LoggingOptionsPayload"} + if s.RoleArn == nil { + invalidParams.Add(request.NewErrParamRequired("RoleArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Describes an AWS IoT policy. +type Policy struct { + _ struct{} `type:"structure"` + + // The policy ARN. + PolicyArn *string `locationName:"policyArn" type:"string"` + + // The policy name. + PolicyName *string `locationName:"policyName" min:"1" type:"string"` +} + +// String returns the string representation +func (s Policy) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Policy) GoString() string { + return s.String() +} + +// Describes a policy version. +type PolicyVersion struct { + _ struct{} `type:"structure"` + + // The date and time the policy was created. + CreateDate *time.Time `locationName:"createDate" type:"timestamp" timestampFormat:"unix"` + + // Specifies whether the policy version is the default. + IsDefaultVersion *bool `locationName:"isDefaultVersion" type:"boolean"` + + // The policy version ID. + VersionId *string `locationName:"versionId" type:"string"` +} + +// String returns the string representation +func (s PolicyVersion) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PolicyVersion) GoString() string { + return s.String() +} + +// The input to the RegisterCACertificate operation. +type RegisterCACertificateInput struct { + _ struct{} `type:"structure"` + + // The CA certificate. + CaCertificate *string `locationName:"caCertificate" min:"1" type:"string" required:"true"` + + // A boolean value that specifies if the CA certificate is set to active. + SetAsActive *bool `location:"querystring" locationName:"setAsActive" type:"boolean"` + + // The private key verification certificate. + VerificationCertificate *string `locationName:"verificationCertificate" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s RegisterCACertificateInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RegisterCACertificateInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RegisterCACertificateInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RegisterCACertificateInput"} + if s.CaCertificate == nil { + invalidParams.Add(request.NewErrParamRequired("CaCertificate")) + } + if s.CaCertificate != nil && len(*s.CaCertificate) < 1 { + invalidParams.Add(request.NewErrParamMinLen("CaCertificate", 1)) + } + if s.VerificationCertificate == nil { + invalidParams.Add(request.NewErrParamRequired("VerificationCertificate")) + } + if s.VerificationCertificate != nil && len(*s.VerificationCertificate) < 1 { + invalidParams.Add(request.NewErrParamMinLen("VerificationCertificate", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The output from the RegisterCACertificateResponse operation. +type RegisterCACertificateOutput struct { + _ struct{} `type:"structure"` + + // The CA certificate ARN. + CertificateArn *string `locationName:"certificateArn" type:"string"` + + // The CA certificate identifier. + CertificateId *string `locationName:"certificateId" min:"64" type:"string"` +} + +// String returns the string representation +func (s RegisterCACertificateOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RegisterCACertificateOutput) GoString() string { + return s.String() +} + +// The input to the RegisterCertificate operation. +type RegisterCertificateInput struct { + _ struct{} `type:"structure"` + + // The CA certificate used to sign the device certificate being registered. + CaCertificatePem *string `locationName:"caCertificatePem" min:"1" type:"string"` + + // The certificate data, in PEM format. + CertificatePem *string `locationName:"certificatePem" min:"1" type:"string" required:"true"` + + // A boolean value that specifies if the CA certificate is set to active. + SetAsActive *bool `location:"querystring" locationName:"setAsActive" type:"boolean"` +} + +// String returns the string representation +func (s RegisterCertificateInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RegisterCertificateInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RegisterCertificateInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RegisterCertificateInput"} + if s.CaCertificatePem != nil && len(*s.CaCertificatePem) < 1 { + invalidParams.Add(request.NewErrParamMinLen("CaCertificatePem", 1)) + } + if s.CertificatePem == nil { + invalidParams.Add(request.NewErrParamRequired("CertificatePem")) + } + if s.CertificatePem != nil && len(*s.CertificatePem) < 1 { + invalidParams.Add(request.NewErrParamMinLen("CertificatePem", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The output from the RegisterCertificate operation. +type RegisterCertificateOutput struct { + _ struct{} `type:"structure"` + + // The certificate ARN. + CertificateArn *string `locationName:"certificateArn" type:"string"` + + // The certificate identifier. + CertificateId *string `locationName:"certificateId" min:"64" type:"string"` +} + +// String returns the string representation +func (s RegisterCertificateOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RegisterCertificateOutput) GoString() string { + return s.String() +} + +// The input for the RejectCertificateTransfer operation. +type RejectCertificateTransferInput struct { + _ struct{} `type:"structure"` + + // The ID of the certificate. + CertificateId *string `location:"uri" locationName:"certificateId" min:"64" type:"string" required:"true"` + + // The reason the certificate transfer was rejected. + RejectReason *string `locationName:"rejectReason" type:"string"` +} + +// String returns the string representation +func (s RejectCertificateTransferInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RejectCertificateTransferInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RejectCertificateTransferInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RejectCertificateTransferInput"} + if s.CertificateId == nil { + invalidParams.Add(request.NewErrParamRequired("CertificateId")) + } + if s.CertificateId != nil && len(*s.CertificateId) < 64 { + invalidParams.Add(request.NewErrParamMinLen("CertificateId", 64)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type RejectCertificateTransferOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s RejectCertificateTransferOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RejectCertificateTransferOutput) GoString() string { + return s.String() +} + +// The input for the ReplaceTopicRule operation. +type ReplaceTopicRuleInput struct { + _ struct{} `type:"structure" payload:"TopicRulePayload"` + + // The name of the rule. + RuleName *string `location:"uri" locationName:"ruleName" min:"1" type:"string" required:"true"` + + // The rule payload. + TopicRulePayload *TopicRulePayload `locationName:"topicRulePayload" type:"structure" required:"true"` +} + +// String returns the string representation +func (s ReplaceTopicRuleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReplaceTopicRuleInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ReplaceTopicRuleInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ReplaceTopicRuleInput"} + if s.RuleName == nil { + invalidParams.Add(request.NewErrParamRequired("RuleName")) + } + if s.RuleName != nil && len(*s.RuleName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RuleName", 1)) + } + if s.TopicRulePayload == nil { + invalidParams.Add(request.NewErrParamRequired("TopicRulePayload")) + } + if s.TopicRulePayload != nil { + if err := s.TopicRulePayload.Validate(); err != nil { + invalidParams.AddNested("TopicRulePayload", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type ReplaceTopicRuleOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ReplaceTopicRuleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReplaceTopicRuleOutput) GoString() string { + return s.String() +} + +// Describes an action to republish to another topic. +type RepublishAction struct { + _ struct{} `type:"structure"` + + // The ARN of the IAM role that grants access. + RoleArn *string `locationName:"roleArn" type:"string" required:"true"` + + // The name of the MQTT topic. + Topic *string `locationName:"topic" type:"string" required:"true"` +} + +// String returns the string representation +func (s RepublishAction) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RepublishAction) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RepublishAction) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RepublishAction"} + if s.RoleArn == nil { + invalidParams.Add(request.NewErrParamRequired("RoleArn")) + } + if s.Topic == nil { + invalidParams.Add(request.NewErrParamRequired("Topic")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Describes an action to write data to an Amazon S3 bucket. +type S3Action struct { + _ struct{} `type:"structure"` + + // The Amazon S3 bucket. + BucketName *string `locationName:"bucketName" type:"string" required:"true"` + + // The object key. + Key *string `locationName:"key" type:"string" required:"true"` + + // The ARN of the IAM role that grants access. + RoleArn *string `locationName:"roleArn" type:"string" required:"true"` +} + +// String returns the string representation +func (s S3Action) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s S3Action) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *S3Action) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "S3Action"} + if s.BucketName == nil { + invalidParams.Add(request.NewErrParamRequired("BucketName")) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.RoleArn == nil { + invalidParams.Add(request.NewErrParamRequired("RoleArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The input for the SetDefaultPolicyVersion operation. +type SetDefaultPolicyVersionInput struct { + _ struct{} `type:"structure"` + + // The policy name. + PolicyName *string `location:"uri" locationName:"policyName" min:"1" type:"string" required:"true"` + + // The policy version ID. + PolicyVersionId *string `location:"uri" locationName:"policyVersionId" type:"string" required:"true"` +} + +// String returns the string representation +func (s SetDefaultPolicyVersionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetDefaultPolicyVersionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SetDefaultPolicyVersionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SetDefaultPolicyVersionInput"} + if s.PolicyName == nil { + invalidParams.Add(request.NewErrParamRequired("PolicyName")) + } + if s.PolicyName != nil && len(*s.PolicyName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("PolicyName", 1)) + } + if s.PolicyVersionId == nil { + invalidParams.Add(request.NewErrParamRequired("PolicyVersionId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type SetDefaultPolicyVersionOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s SetDefaultPolicyVersionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetDefaultPolicyVersionOutput) GoString() string { + return s.String() +} + +// The input for the SetLoggingOptions operation. +type SetLoggingOptionsInput struct { + _ struct{} `type:"structure" payload:"LoggingOptionsPayload"` + + // The logging options payload. + LoggingOptionsPayload *LoggingOptionsPayload `locationName:"loggingOptionsPayload" type:"structure" required:"true"` +} + +// String returns the string representation +func (s SetLoggingOptionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetLoggingOptionsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SetLoggingOptionsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SetLoggingOptionsInput"} + if s.LoggingOptionsPayload == nil { + invalidParams.Add(request.NewErrParamRequired("LoggingOptionsPayload")) + } + if s.LoggingOptionsPayload != nil { + if err := s.LoggingOptionsPayload.Validate(); err != nil { + invalidParams.AddNested("LoggingOptionsPayload", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type SetLoggingOptionsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s SetLoggingOptionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetLoggingOptionsOutput) GoString() string { + return s.String() +} + +// Describes an action to publish to an Amazon SNS topic. +type SnsAction struct { + _ struct{} `type:"structure"` + + // The message format of the message to publish. Optional. Accepted values are + // "JSON" and "RAW". The default value of the attribute is "RAW". SNS uses this + // setting to determine if the payload should be parsed and relevant platform-specific + // bits of the payload should be extracted. To read more about SNS message formats, + // see refer to their official documentation. + MessageFormat *string `locationName:"messageFormat" type:"string" enum:"MessageFormat"` + + // The ARN of the IAM role that grants access. + RoleArn *string `locationName:"roleArn" type:"string" required:"true"` + + // The ARN of the SNS topic. + TargetArn *string `locationName:"targetArn" type:"string" required:"true"` +} + +// String returns the string representation +func (s SnsAction) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SnsAction) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SnsAction) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SnsAction"} + if s.RoleArn == nil { + invalidParams.Add(request.NewErrParamRequired("RoleArn")) + } + if s.TargetArn == nil { + invalidParams.Add(request.NewErrParamRequired("TargetArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Describes an action to publish data to an Amazon SQS queue. +type SqsAction struct { + _ struct{} `type:"structure"` + + // The URL of the Amazon SQS queue. + QueueUrl *string `locationName:"queueUrl" type:"string" required:"true"` + + // The ARN of the IAM role that grants access. + RoleArn *string `locationName:"roleArn" type:"string" required:"true"` + + // Specifies whether to use Base64 encoding. + UseBase64 *bool `locationName:"useBase64" type:"boolean"` +} + +// String returns the string representation +func (s SqsAction) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SqsAction) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SqsAction) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SqsAction"} + if s.QueueUrl == nil { + invalidParams.Add(request.NewErrParamRequired("QueueUrl")) + } + if s.RoleArn == nil { + invalidParams.Add(request.NewErrParamRequired("RoleArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Describes a thing attribute. +type ThingAttribute struct { + _ struct{} `type:"structure"` + + // The attributes. + Attributes map[string]*string `locationName:"attributes" type:"map"` + + // The name of the thing. + ThingName *string `locationName:"thingName" min:"1" type:"string"` +} + +// String returns the string representation +func (s ThingAttribute) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ThingAttribute) GoString() string { + return s.String() +} + +// Describes a rule. +type TopicRule struct { + _ struct{} `type:"structure"` + + // The actions associated with the rule. + Actions []*Action `locationName:"actions" type:"list"` + + // The version of the SQL rules engine to use when evaluating the rule. + AwsIotSqlVersion *string `locationName:"awsIotSqlVersion" type:"string"` + + // The date and time the rule was created. + CreatedAt *time.Time `locationName:"createdAt" type:"timestamp" timestampFormat:"unix"` + + // The description of the rule. + Description *string `locationName:"description" type:"string"` + + // Specifies whether the rule is disabled. + RuleDisabled *bool `locationName:"ruleDisabled" type:"boolean"` + + // The name of the rule. + RuleName *string `locationName:"ruleName" min:"1" type:"string"` + + // The SQL statement used to query the topic. When using a SQL query with multiple + // lines, be sure to escape the newline characters. + Sql *string `locationName:"sql" type:"string"` +} + +// String returns the string representation +func (s TopicRule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TopicRule) GoString() string { + return s.String() +} + +// Describes a rule. +type TopicRuleListItem struct { + _ struct{} `type:"structure"` + + // The date and time the rule was created. + CreatedAt *time.Time `locationName:"createdAt" type:"timestamp" timestampFormat:"unix"` + + // The rule ARN. + RuleArn *string `locationName:"ruleArn" type:"string"` + + // Specifies whether the rule is disabled. + RuleDisabled *bool `locationName:"ruleDisabled" type:"boolean"` + + // The name of the rule. + RuleName *string `locationName:"ruleName" min:"1" type:"string"` + + // The pattern for the topic names that apply. + TopicPattern *string `locationName:"topicPattern" type:"string"` +} + +// String returns the string representation +func (s TopicRuleListItem) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TopicRuleListItem) GoString() string { + return s.String() +} + +// Describes a rule. +type TopicRulePayload struct { + _ struct{} `type:"structure"` + + // The actions associated with the rule. + Actions []*Action `locationName:"actions" type:"list" required:"true"` + + // The version of the SQL rules engine to use when evaluating the rule. + AwsIotSqlVersion *string `locationName:"awsIotSqlVersion" type:"string"` + + // The description of the rule. + Description *string `locationName:"description" type:"string"` + + // Specifies whether the rule is disabled. + RuleDisabled *bool `locationName:"ruleDisabled" type:"boolean"` + + // The SQL statement used to query the topic. For more information, see AWS + // IoT SQL Reference (http://docs.aws.amazon.com/iot/latest/developerguide/iot-rules.html#aws-iot-sql-reference) + // in the AWS IoT Developer Guide. + Sql *string `locationName:"sql" type:"string" required:"true"` +} + +// String returns the string representation +func (s TopicRulePayload) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TopicRulePayload) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TopicRulePayload) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TopicRulePayload"} + if s.Actions == nil { + invalidParams.Add(request.NewErrParamRequired("Actions")) + } + if s.Sql == nil { + invalidParams.Add(request.NewErrParamRequired("Sql")) + } + if s.Actions != nil { + for i, v := range s.Actions { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Actions", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The input for the TransferCertificate operation. +type TransferCertificateInput struct { + _ struct{} `type:"structure"` + + // The ID of the certificate. + CertificateId *string `location:"uri" locationName:"certificateId" min:"64" type:"string" required:"true"` + + // The AWS account. + TargetAwsAccount *string `location:"querystring" locationName:"targetAwsAccount" type:"string" required:"true"` + + // The transfer message. + TransferMessage *string `locationName:"transferMessage" type:"string"` +} + +// String returns the string representation +func (s TransferCertificateInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TransferCertificateInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TransferCertificateInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TransferCertificateInput"} + if s.CertificateId == nil { + invalidParams.Add(request.NewErrParamRequired("CertificateId")) + } + if s.CertificateId != nil && len(*s.CertificateId) < 64 { + invalidParams.Add(request.NewErrParamMinLen("CertificateId", 64)) + } + if s.TargetAwsAccount == nil { + invalidParams.Add(request.NewErrParamRequired("TargetAwsAccount")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The output from the TransferCertificate operation. +type TransferCertificateOutput struct { + _ struct{} `type:"structure"` + + // The ARN of the certificate. + TransferredCertificateArn *string `locationName:"transferredCertificateArn" type:"string"` +} + +// String returns the string representation +func (s TransferCertificateOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TransferCertificateOutput) GoString() string { + return s.String() +} + +// Data used to transfer a certificate to an AWS account. +type TransferData struct { + _ struct{} `type:"structure"` + + // The date the transfer was accepted. + AcceptDate *time.Time `locationName:"acceptDate" type:"timestamp" timestampFormat:"unix"` + + // The date the transfer was rejected. + RejectDate *time.Time `locationName:"rejectDate" type:"timestamp" timestampFormat:"unix"` + + // The reason why the transfer was rejected. + RejectReason *string `locationName:"rejectReason" type:"string"` + + // The date the transfer took place. + TransferDate *time.Time `locationName:"transferDate" type:"timestamp" timestampFormat:"unix"` + + // The transfer message. + TransferMessage *string `locationName:"transferMessage" type:"string"` +} + +// String returns the string representation +func (s TransferData) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TransferData) GoString() string { + return s.String() +} + +// The input to the UpdateCACertificate operation. +type UpdateCACertificateInput struct { + _ struct{} `type:"structure"` + + // The CA certificate identifier. + CertificateId *string `location:"uri" locationName:"caCertificateId" min:"64" type:"string" required:"true"` + + // The updated status of the CA certificate. + // + // Note: The status value REGISTER_INACTIVE is deprecated and should not be + // used. + NewStatus *string `location:"querystring" locationName:"newStatus" type:"string" required:"true" enum:"CACertificateStatus"` +} + +// String returns the string representation +func (s UpdateCACertificateInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateCACertificateInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateCACertificateInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateCACertificateInput"} + if s.CertificateId == nil { + invalidParams.Add(request.NewErrParamRequired("CertificateId")) + } + if s.CertificateId != nil && len(*s.CertificateId) < 64 { + invalidParams.Add(request.NewErrParamMinLen("CertificateId", 64)) + } + if s.NewStatus == nil { + invalidParams.Add(request.NewErrParamRequired("NewStatus")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type UpdateCACertificateOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UpdateCACertificateOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateCACertificateOutput) GoString() string { + return s.String() +} + +// The input for the UpdateCertificate operation. +type UpdateCertificateInput struct { + _ struct{} `type:"structure"` + + // The ID of the certificate. + CertificateId *string `location:"uri" locationName:"certificateId" min:"64" type:"string" required:"true"` + + // The new status. + // + // Note: Setting the status to PENDING_TRANSFER will result in an exception + // being thrown. PENDING_TRANSFER is a status used internally by AWS IoT. It + // is not intended for developer use. + // + // Note: The status value REGISTER_INACTIVE is deprecated and should not be + // used. + NewStatus *string `location:"querystring" locationName:"newStatus" type:"string" required:"true" enum:"CertificateStatus"` +} + +// String returns the string representation +func (s UpdateCertificateInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateCertificateInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateCertificateInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateCertificateInput"} + if s.CertificateId == nil { + invalidParams.Add(request.NewErrParamRequired("CertificateId")) + } + if s.CertificateId != nil && len(*s.CertificateId) < 64 { + invalidParams.Add(request.NewErrParamMinLen("CertificateId", 64)) + } + if s.NewStatus == nil { + invalidParams.Add(request.NewErrParamRequired("NewStatus")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type UpdateCertificateOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UpdateCertificateOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateCertificateOutput) GoString() string { + return s.String() +} + +// The input for the UpdateThing operation. +type UpdateThingInput struct { + _ struct{} `type:"structure"` + + // The attribute payload, a JSON string containing up to three key-value pairs + // (for example, {\"attributes\":{\"string1\":\"string2\"}}). + AttributePayload *AttributePayload `locationName:"attributePayload" type:"structure" required:"true"` + + // The thing name. + ThingName *string `location:"uri" locationName:"thingName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateThingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateThingInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateThingInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateThingInput"} + if s.AttributePayload == nil { + invalidParams.Add(request.NewErrParamRequired("AttributePayload")) + } + if s.ThingName == nil { + invalidParams.Add(request.NewErrParamRequired("ThingName")) + } + if s.ThingName != nil && len(*s.ThingName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ThingName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The output from the UpdateThing operation. +type UpdateThingOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UpdateThingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateThingOutput) GoString() string { + return s.String() +} + +const ( + // @enum CACertificateStatus + CACertificateStatusActive = "ACTIVE" + // @enum CACertificateStatus + CACertificateStatusInactive = "INACTIVE" +) + +const ( + // @enum CertificateStatus + CertificateStatusActive = "ACTIVE" + // @enum CertificateStatus + CertificateStatusInactive = "INACTIVE" + // @enum CertificateStatus + CertificateStatusRevoked = "REVOKED" + // @enum CertificateStatus + CertificateStatusPendingTransfer = "PENDING_TRANSFER" + // @enum CertificateStatus + CertificateStatusRegisterInactive = "REGISTER_INACTIVE" +) + +const ( + // @enum DynamoKeyType + DynamoKeyTypeString = "STRING" + // @enum DynamoKeyType + DynamoKeyTypeNumber = "NUMBER" +) + +const ( + // @enum LogLevel + LogLevelDebug = "DEBUG" + // @enum LogLevel + LogLevelInfo = "INFO" + // @enum LogLevel + LogLevelError = "ERROR" + // @enum LogLevel + LogLevelWarn = "WARN" + // @enum LogLevel + LogLevelDisabled = "DISABLED" +) + +const ( + // @enum MessageFormat + MessageFormatRaw = "RAW" + // @enum MessageFormat + MessageFormatJson = "JSON" +) diff --git a/vendor/github.com/aws/aws-sdk-go/service/iot/examples_test.go b/vendor/github.com/aws/aws-sdk-go/service/iot/examples_test.go new file mode 100644 index 000000000..f846db2a8 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/iot/examples_test.go @@ -0,0 +1,1186 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package iot_test + +import ( + "bytes" + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/iot" +) + +var _ time.Duration +var _ bytes.Buffer + +func ExampleIoT_AcceptCertificateTransfer() { + svc := iot.New(session.New()) + + params := &iot.AcceptCertificateTransferInput{ + CertificateId: aws.String("CertificateId"), // Required + SetAsActive: aws.Bool(true), + } + resp, err := svc.AcceptCertificateTransfer(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIoT_AttachPrincipalPolicy() { + svc := iot.New(session.New()) + + params := &iot.AttachPrincipalPolicyInput{ + PolicyName: aws.String("PolicyName"), // Required + Principal: aws.String("Principal"), // Required + } + resp, err := svc.AttachPrincipalPolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIoT_AttachThingPrincipal() { + svc := iot.New(session.New()) + + params := &iot.AttachThingPrincipalInput{ + Principal: aws.String("Principal"), // Required + ThingName: aws.String("ThingName"), // Required + } + resp, err := svc.AttachThingPrincipal(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIoT_CancelCertificateTransfer() { + svc := iot.New(session.New()) + + params := &iot.CancelCertificateTransferInput{ + CertificateId: aws.String("CertificateId"), // Required + } + resp, err := svc.CancelCertificateTransfer(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIoT_CreateCertificateFromCsr() { + svc := iot.New(session.New()) + + params := &iot.CreateCertificateFromCsrInput{ + CertificateSigningRequest: aws.String("CertificateSigningRequest"), // Required + SetAsActive: aws.Bool(true), + } + resp, err := svc.CreateCertificateFromCsr(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIoT_CreateKeysAndCertificate() { + svc := iot.New(session.New()) + + params := &iot.CreateKeysAndCertificateInput{ + SetAsActive: aws.Bool(true), + } + resp, err := svc.CreateKeysAndCertificate(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIoT_CreatePolicy() { + svc := iot.New(session.New()) + + params := &iot.CreatePolicyInput{ + PolicyDocument: aws.String("PolicyDocument"), // Required + PolicyName: aws.String("PolicyName"), // Required + } + resp, err := svc.CreatePolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIoT_CreatePolicyVersion() { + svc := iot.New(session.New()) + + params := &iot.CreatePolicyVersionInput{ + PolicyDocument: aws.String("PolicyDocument"), // Required + PolicyName: aws.String("PolicyName"), // Required + SetAsDefault: aws.Bool(true), + } + resp, err := svc.CreatePolicyVersion(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIoT_CreateThing() { + svc := iot.New(session.New()) + + params := &iot.CreateThingInput{ + ThingName: aws.String("ThingName"), // Required + AttributePayload: &iot.AttributePayload{ + Attributes: map[string]*string{ + "Key": aws.String("AttributeValue"), // Required + // More values... + }, + }, + } + resp, err := svc.CreateThing(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIoT_CreateTopicRule() { + svc := iot.New(session.New()) + + params := &iot.CreateTopicRuleInput{ + RuleName: aws.String("RuleName"), // Required + TopicRulePayload: &iot.TopicRulePayload{ // Required + Actions: []*iot.Action{ // Required + { // Required + CloudwatchAlarm: &iot.CloudwatchAlarmAction{ + AlarmName: aws.String("AlarmName"), // Required + RoleArn: aws.String("AwsArn"), // Required + StateReason: aws.String("StateReason"), // Required + StateValue: aws.String("StateValue"), // Required + }, + CloudwatchMetric: &iot.CloudwatchMetricAction{ + MetricName: aws.String("MetricName"), // Required + MetricNamespace: aws.String("MetricNamespace"), // Required + MetricUnit: aws.String("MetricUnit"), // Required + MetricValue: aws.String("MetricValue"), // Required + RoleArn: aws.String("AwsArn"), // Required + MetricTimestamp: aws.String("MetricTimestamp"), + }, + DynamoDB: &iot.DynamoDBAction{ + HashKeyField: aws.String("HashKeyField"), // Required + HashKeyValue: aws.String("HashKeyValue"), // Required + RoleArn: aws.String("AwsArn"), // Required + TableName: aws.String("TableName"), // Required + HashKeyType: aws.String("DynamoKeyType"), + Operation: aws.String("DynamoOperation"), + PayloadField: aws.String("PayloadField"), + RangeKeyField: aws.String("RangeKeyField"), + RangeKeyType: aws.String("DynamoKeyType"), + RangeKeyValue: aws.String("RangeKeyValue"), + }, + Elasticsearch: &iot.ElasticsearchAction{ + Endpoint: aws.String("ElasticsearchEndpoint"), // Required + Id: aws.String("ElasticsearchId"), // Required + Index: aws.String("ElasticsearchIndex"), // Required + RoleArn: aws.String("AwsArn"), // Required + Type: aws.String("ElasticsearchType"), // Required + }, + Firehose: &iot.FirehoseAction{ + DeliveryStreamName: aws.String("DeliveryStreamName"), // Required + RoleArn: aws.String("AwsArn"), // Required + }, + Kinesis: &iot.KinesisAction{ + RoleArn: aws.String("AwsArn"), // Required + StreamName: aws.String("StreamName"), // Required + PartitionKey: aws.String("PartitionKey"), + }, + Lambda: &iot.LambdaAction{ + FunctionArn: aws.String("FunctionArn"), // Required + }, + Republish: &iot.RepublishAction{ + RoleArn: aws.String("AwsArn"), // Required + Topic: aws.String("TopicPattern"), // Required + }, + S3: &iot.S3Action{ + BucketName: aws.String("BucketName"), // Required + Key: aws.String("Key"), // Required + RoleArn: aws.String("AwsArn"), // Required + }, + Sns: &iot.SnsAction{ + RoleArn: aws.String("AwsArn"), // Required + TargetArn: aws.String("AwsArn"), // Required + MessageFormat: aws.String("MessageFormat"), + }, + Sqs: &iot.SqsAction{ + QueueUrl: aws.String("QueueUrl"), // Required + RoleArn: aws.String("AwsArn"), // Required + UseBase64: aws.Bool(true), + }, + }, + // More values... + }, + Sql: aws.String("SQL"), // Required + AwsIotSqlVersion: aws.String("AwsIotSqlVersion"), + Description: aws.String("Description"), + RuleDisabled: aws.Bool(true), + }, + } + resp, err := svc.CreateTopicRule(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIoT_DeleteCACertificate() { + svc := iot.New(session.New()) + + params := &iot.DeleteCACertificateInput{ + CertificateId: aws.String("CertificateId"), // Required + } + resp, err := svc.DeleteCACertificate(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIoT_DeleteCertificate() { + svc := iot.New(session.New()) + + params := &iot.DeleteCertificateInput{ + CertificateId: aws.String("CertificateId"), // Required + } + resp, err := svc.DeleteCertificate(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIoT_DeletePolicy() { + svc := iot.New(session.New()) + + params := &iot.DeletePolicyInput{ + PolicyName: aws.String("PolicyName"), // Required + } + resp, err := svc.DeletePolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIoT_DeletePolicyVersion() { + svc := iot.New(session.New()) + + params := &iot.DeletePolicyVersionInput{ + PolicyName: aws.String("PolicyName"), // Required + PolicyVersionId: aws.String("PolicyVersionId"), // Required + } + resp, err := svc.DeletePolicyVersion(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIoT_DeleteRegistrationCode() { + svc := iot.New(session.New()) + + var params *iot.DeleteRegistrationCodeInput + resp, err := svc.DeleteRegistrationCode(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIoT_DeleteThing() { + svc := iot.New(session.New()) + + params := &iot.DeleteThingInput{ + ThingName: aws.String("ThingName"), // Required + } + resp, err := svc.DeleteThing(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIoT_DeleteTopicRule() { + svc := iot.New(session.New()) + + params := &iot.DeleteTopicRuleInput{ + RuleName: aws.String("RuleName"), // Required + } + resp, err := svc.DeleteTopicRule(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIoT_DescribeCACertificate() { + svc := iot.New(session.New()) + + params := &iot.DescribeCACertificateInput{ + CertificateId: aws.String("CertificateId"), // Required + } + resp, err := svc.DescribeCACertificate(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIoT_DescribeCertificate() { + svc := iot.New(session.New()) + + params := &iot.DescribeCertificateInput{ + CertificateId: aws.String("CertificateId"), // Required + } + resp, err := svc.DescribeCertificate(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIoT_DescribeEndpoint() { + svc := iot.New(session.New()) + + var params *iot.DescribeEndpointInput + resp, err := svc.DescribeEndpoint(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIoT_DescribeThing() { + svc := iot.New(session.New()) + + params := &iot.DescribeThingInput{ + ThingName: aws.String("ThingName"), // Required + } + resp, err := svc.DescribeThing(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIoT_DetachPrincipalPolicy() { + svc := iot.New(session.New()) + + params := &iot.DetachPrincipalPolicyInput{ + PolicyName: aws.String("PolicyName"), // Required + Principal: aws.String("Principal"), // Required + } + resp, err := svc.DetachPrincipalPolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIoT_DetachThingPrincipal() { + svc := iot.New(session.New()) + + params := &iot.DetachThingPrincipalInput{ + Principal: aws.String("Principal"), // Required + ThingName: aws.String("ThingName"), // Required + } + resp, err := svc.DetachThingPrincipal(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIoT_DisableTopicRule() { + svc := iot.New(session.New()) + + params := &iot.DisableTopicRuleInput{ + RuleName: aws.String("RuleName"), // Required + } + resp, err := svc.DisableTopicRule(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIoT_EnableTopicRule() { + svc := iot.New(session.New()) + + params := &iot.EnableTopicRuleInput{ + RuleName: aws.String("RuleName"), // Required + } + resp, err := svc.EnableTopicRule(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIoT_GetLoggingOptions() { + svc := iot.New(session.New()) + + var params *iot.GetLoggingOptionsInput + resp, err := svc.GetLoggingOptions(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIoT_GetPolicy() { + svc := iot.New(session.New()) + + params := &iot.GetPolicyInput{ + PolicyName: aws.String("PolicyName"), // Required + } + resp, err := svc.GetPolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIoT_GetPolicyVersion() { + svc := iot.New(session.New()) + + params := &iot.GetPolicyVersionInput{ + PolicyName: aws.String("PolicyName"), // Required + PolicyVersionId: aws.String("PolicyVersionId"), // Required + } + resp, err := svc.GetPolicyVersion(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIoT_GetRegistrationCode() { + svc := iot.New(session.New()) + + var params *iot.GetRegistrationCodeInput + resp, err := svc.GetRegistrationCode(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIoT_GetTopicRule() { + svc := iot.New(session.New()) + + params := &iot.GetTopicRuleInput{ + RuleName: aws.String("RuleName"), // Required + } + resp, err := svc.GetTopicRule(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIoT_ListCACertificates() { + svc := iot.New(session.New()) + + params := &iot.ListCACertificatesInput{ + AscendingOrder: aws.Bool(true), + Marker: aws.String("Marker"), + PageSize: aws.Int64(1), + } + resp, err := svc.ListCACertificates(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIoT_ListCertificates() { + svc := iot.New(session.New()) + + params := &iot.ListCertificatesInput{ + AscendingOrder: aws.Bool(true), + Marker: aws.String("Marker"), + PageSize: aws.Int64(1), + } + resp, err := svc.ListCertificates(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIoT_ListCertificatesByCA() { + svc := iot.New(session.New()) + + params := &iot.ListCertificatesByCAInput{ + CaCertificateId: aws.String("CertificateId"), // Required + AscendingOrder: aws.Bool(true), + Marker: aws.String("Marker"), + PageSize: aws.Int64(1), + } + resp, err := svc.ListCertificatesByCA(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIoT_ListPolicies() { + svc := iot.New(session.New()) + + params := &iot.ListPoliciesInput{ + AscendingOrder: aws.Bool(true), + Marker: aws.String("Marker"), + PageSize: aws.Int64(1), + } + resp, err := svc.ListPolicies(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIoT_ListPolicyPrincipals() { + svc := iot.New(session.New()) + + params := &iot.ListPolicyPrincipalsInput{ + PolicyName: aws.String("PolicyName"), // Required + AscendingOrder: aws.Bool(true), + Marker: aws.String("Marker"), + PageSize: aws.Int64(1), + } + resp, err := svc.ListPolicyPrincipals(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIoT_ListPolicyVersions() { + svc := iot.New(session.New()) + + params := &iot.ListPolicyVersionsInput{ + PolicyName: aws.String("PolicyName"), // Required + } + resp, err := svc.ListPolicyVersions(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIoT_ListPrincipalPolicies() { + svc := iot.New(session.New()) + + params := &iot.ListPrincipalPoliciesInput{ + Principal: aws.String("Principal"), // Required + AscendingOrder: aws.Bool(true), + Marker: aws.String("Marker"), + PageSize: aws.Int64(1), + } + resp, err := svc.ListPrincipalPolicies(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIoT_ListPrincipalThings() { + svc := iot.New(session.New()) + + params := &iot.ListPrincipalThingsInput{ + Principal: aws.String("Principal"), // Required + MaxResults: aws.Int64(1), + NextToken: aws.String("NextToken"), + } + resp, err := svc.ListPrincipalThings(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIoT_ListThingPrincipals() { + svc := iot.New(session.New()) + + params := &iot.ListThingPrincipalsInput{ + ThingName: aws.String("ThingName"), // Required + } + resp, err := svc.ListThingPrincipals(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIoT_ListThings() { + svc := iot.New(session.New()) + + params := &iot.ListThingsInput{ + AttributeName: aws.String("AttributeName"), + AttributeValue: aws.String("AttributeValue"), + MaxResults: aws.Int64(1), + NextToken: aws.String("NextToken"), + } + resp, err := svc.ListThings(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIoT_ListTopicRules() { + svc := iot.New(session.New()) + + params := &iot.ListTopicRulesInput{ + MaxResults: aws.Int64(1), + NextToken: aws.String("NextToken"), + RuleDisabled: aws.Bool(true), + Topic: aws.String("Topic"), + } + resp, err := svc.ListTopicRules(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIoT_RegisterCACertificate() { + svc := iot.New(session.New()) + + params := &iot.RegisterCACertificateInput{ + CaCertificate: aws.String("CertificatePem"), // Required + VerificationCertificate: aws.String("CertificatePem"), // Required + SetAsActive: aws.Bool(true), + } + resp, err := svc.RegisterCACertificate(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIoT_RegisterCertificate() { + svc := iot.New(session.New()) + + params := &iot.RegisterCertificateInput{ + CertificatePem: aws.String("CertificatePem"), // Required + CaCertificatePem: aws.String("CertificatePem"), + SetAsActive: aws.Bool(true), + } + resp, err := svc.RegisterCertificate(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIoT_RejectCertificateTransfer() { + svc := iot.New(session.New()) + + params := &iot.RejectCertificateTransferInput{ + CertificateId: aws.String("CertificateId"), // Required + RejectReason: aws.String("Message"), + } + resp, err := svc.RejectCertificateTransfer(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIoT_ReplaceTopicRule() { + svc := iot.New(session.New()) + + params := &iot.ReplaceTopicRuleInput{ + RuleName: aws.String("RuleName"), // Required + TopicRulePayload: &iot.TopicRulePayload{ // Required + Actions: []*iot.Action{ // Required + { // Required + CloudwatchAlarm: &iot.CloudwatchAlarmAction{ + AlarmName: aws.String("AlarmName"), // Required + RoleArn: aws.String("AwsArn"), // Required + StateReason: aws.String("StateReason"), // Required + StateValue: aws.String("StateValue"), // Required + }, + CloudwatchMetric: &iot.CloudwatchMetricAction{ + MetricName: aws.String("MetricName"), // Required + MetricNamespace: aws.String("MetricNamespace"), // Required + MetricUnit: aws.String("MetricUnit"), // Required + MetricValue: aws.String("MetricValue"), // Required + RoleArn: aws.String("AwsArn"), // Required + MetricTimestamp: aws.String("MetricTimestamp"), + }, + DynamoDB: &iot.DynamoDBAction{ + HashKeyField: aws.String("HashKeyField"), // Required + HashKeyValue: aws.String("HashKeyValue"), // Required + RoleArn: aws.String("AwsArn"), // Required + TableName: aws.String("TableName"), // Required + HashKeyType: aws.String("DynamoKeyType"), + Operation: aws.String("DynamoOperation"), + PayloadField: aws.String("PayloadField"), + RangeKeyField: aws.String("RangeKeyField"), + RangeKeyType: aws.String("DynamoKeyType"), + RangeKeyValue: aws.String("RangeKeyValue"), + }, + Elasticsearch: &iot.ElasticsearchAction{ + Endpoint: aws.String("ElasticsearchEndpoint"), // Required + Id: aws.String("ElasticsearchId"), // Required + Index: aws.String("ElasticsearchIndex"), // Required + RoleArn: aws.String("AwsArn"), // Required + Type: aws.String("ElasticsearchType"), // Required + }, + Firehose: &iot.FirehoseAction{ + DeliveryStreamName: aws.String("DeliveryStreamName"), // Required + RoleArn: aws.String("AwsArn"), // Required + }, + Kinesis: &iot.KinesisAction{ + RoleArn: aws.String("AwsArn"), // Required + StreamName: aws.String("StreamName"), // Required + PartitionKey: aws.String("PartitionKey"), + }, + Lambda: &iot.LambdaAction{ + FunctionArn: aws.String("FunctionArn"), // Required + }, + Republish: &iot.RepublishAction{ + RoleArn: aws.String("AwsArn"), // Required + Topic: aws.String("TopicPattern"), // Required + }, + S3: &iot.S3Action{ + BucketName: aws.String("BucketName"), // Required + Key: aws.String("Key"), // Required + RoleArn: aws.String("AwsArn"), // Required + }, + Sns: &iot.SnsAction{ + RoleArn: aws.String("AwsArn"), // Required + TargetArn: aws.String("AwsArn"), // Required + MessageFormat: aws.String("MessageFormat"), + }, + Sqs: &iot.SqsAction{ + QueueUrl: aws.String("QueueUrl"), // Required + RoleArn: aws.String("AwsArn"), // Required + UseBase64: aws.Bool(true), + }, + }, + // More values... + }, + Sql: aws.String("SQL"), // Required + AwsIotSqlVersion: aws.String("AwsIotSqlVersion"), + Description: aws.String("Description"), + RuleDisabled: aws.Bool(true), + }, + } + resp, err := svc.ReplaceTopicRule(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIoT_SetDefaultPolicyVersion() { + svc := iot.New(session.New()) + + params := &iot.SetDefaultPolicyVersionInput{ + PolicyName: aws.String("PolicyName"), // Required + PolicyVersionId: aws.String("PolicyVersionId"), // Required + } + resp, err := svc.SetDefaultPolicyVersion(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIoT_SetLoggingOptions() { + svc := iot.New(session.New()) + + params := &iot.SetLoggingOptionsInput{ + LoggingOptionsPayload: &iot.LoggingOptionsPayload{ // Required + RoleArn: aws.String("AwsArn"), // Required + LogLevel: aws.String("LogLevel"), + }, + } + resp, err := svc.SetLoggingOptions(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIoT_TransferCertificate() { + svc := iot.New(session.New()) + + params := &iot.TransferCertificateInput{ + CertificateId: aws.String("CertificateId"), // Required + TargetAwsAccount: aws.String("AwsAccountId"), // Required + TransferMessage: aws.String("Message"), + } + resp, err := svc.TransferCertificate(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIoT_UpdateCACertificate() { + svc := iot.New(session.New()) + + params := &iot.UpdateCACertificateInput{ + CertificateId: aws.String("CertificateId"), // Required + NewStatus: aws.String("CACertificateStatus"), // Required + } + resp, err := svc.UpdateCACertificate(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIoT_UpdateCertificate() { + svc := iot.New(session.New()) + + params := &iot.UpdateCertificateInput{ + CertificateId: aws.String("CertificateId"), // Required + NewStatus: aws.String("CertificateStatus"), // Required + } + resp, err := svc.UpdateCertificate(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIoT_UpdateThing() { + svc := iot.New(session.New()) + + params := &iot.UpdateThingInput{ + AttributePayload: &iot.AttributePayload{ // Required + Attributes: map[string]*string{ + "Key": aws.String("AttributeValue"), // Required + // More values... + }, + }, + ThingName: aws.String("ThingName"), // Required + } + resp, err := svc.UpdateThing(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/iot/iotiface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/iot/iotiface/interface.go new file mode 100644 index 000000000..7b3c81206 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/iot/iotiface/interface.go @@ -0,0 +1,218 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package iotiface provides an interface for the AWS IoT. +package iotiface + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/iot" +) + +// IoTAPI is the interface type for iot.IoT. +type IoTAPI interface { + AcceptCertificateTransferRequest(*iot.AcceptCertificateTransferInput) (*request.Request, *iot.AcceptCertificateTransferOutput) + + AcceptCertificateTransfer(*iot.AcceptCertificateTransferInput) (*iot.AcceptCertificateTransferOutput, error) + + AttachPrincipalPolicyRequest(*iot.AttachPrincipalPolicyInput) (*request.Request, *iot.AttachPrincipalPolicyOutput) + + AttachPrincipalPolicy(*iot.AttachPrincipalPolicyInput) (*iot.AttachPrincipalPolicyOutput, error) + + AttachThingPrincipalRequest(*iot.AttachThingPrincipalInput) (*request.Request, *iot.AttachThingPrincipalOutput) + + AttachThingPrincipal(*iot.AttachThingPrincipalInput) (*iot.AttachThingPrincipalOutput, error) + + CancelCertificateTransferRequest(*iot.CancelCertificateTransferInput) (*request.Request, *iot.CancelCertificateTransferOutput) + + CancelCertificateTransfer(*iot.CancelCertificateTransferInput) (*iot.CancelCertificateTransferOutput, error) + + CreateCertificateFromCsrRequest(*iot.CreateCertificateFromCsrInput) (*request.Request, *iot.CreateCertificateFromCsrOutput) + + CreateCertificateFromCsr(*iot.CreateCertificateFromCsrInput) (*iot.CreateCertificateFromCsrOutput, error) + + CreateKeysAndCertificateRequest(*iot.CreateKeysAndCertificateInput) (*request.Request, *iot.CreateKeysAndCertificateOutput) + + CreateKeysAndCertificate(*iot.CreateKeysAndCertificateInput) (*iot.CreateKeysAndCertificateOutput, error) + + CreatePolicyRequest(*iot.CreatePolicyInput) (*request.Request, *iot.CreatePolicyOutput) + + CreatePolicy(*iot.CreatePolicyInput) (*iot.CreatePolicyOutput, error) + + CreatePolicyVersionRequest(*iot.CreatePolicyVersionInput) (*request.Request, *iot.CreatePolicyVersionOutput) + + CreatePolicyVersion(*iot.CreatePolicyVersionInput) (*iot.CreatePolicyVersionOutput, error) + + CreateThingRequest(*iot.CreateThingInput) (*request.Request, *iot.CreateThingOutput) + + CreateThing(*iot.CreateThingInput) (*iot.CreateThingOutput, error) + + CreateTopicRuleRequest(*iot.CreateTopicRuleInput) (*request.Request, *iot.CreateTopicRuleOutput) + + CreateTopicRule(*iot.CreateTopicRuleInput) (*iot.CreateTopicRuleOutput, error) + + DeleteCACertificateRequest(*iot.DeleteCACertificateInput) (*request.Request, *iot.DeleteCACertificateOutput) + + DeleteCACertificate(*iot.DeleteCACertificateInput) (*iot.DeleteCACertificateOutput, error) + + DeleteCertificateRequest(*iot.DeleteCertificateInput) (*request.Request, *iot.DeleteCertificateOutput) + + DeleteCertificate(*iot.DeleteCertificateInput) (*iot.DeleteCertificateOutput, error) + + DeletePolicyRequest(*iot.DeletePolicyInput) (*request.Request, *iot.DeletePolicyOutput) + + DeletePolicy(*iot.DeletePolicyInput) (*iot.DeletePolicyOutput, error) + + DeletePolicyVersionRequest(*iot.DeletePolicyVersionInput) (*request.Request, *iot.DeletePolicyVersionOutput) + + DeletePolicyVersion(*iot.DeletePolicyVersionInput) (*iot.DeletePolicyVersionOutput, error) + + DeleteRegistrationCodeRequest(*iot.DeleteRegistrationCodeInput) (*request.Request, *iot.DeleteRegistrationCodeOutput) + + DeleteRegistrationCode(*iot.DeleteRegistrationCodeInput) (*iot.DeleteRegistrationCodeOutput, error) + + DeleteThingRequest(*iot.DeleteThingInput) (*request.Request, *iot.DeleteThingOutput) + + DeleteThing(*iot.DeleteThingInput) (*iot.DeleteThingOutput, error) + + DeleteTopicRuleRequest(*iot.DeleteTopicRuleInput) (*request.Request, *iot.DeleteTopicRuleOutput) + + DeleteTopicRule(*iot.DeleteTopicRuleInput) (*iot.DeleteTopicRuleOutput, error) + + DescribeCACertificateRequest(*iot.DescribeCACertificateInput) (*request.Request, *iot.DescribeCACertificateOutput) + + DescribeCACertificate(*iot.DescribeCACertificateInput) (*iot.DescribeCACertificateOutput, error) + + DescribeCertificateRequest(*iot.DescribeCertificateInput) (*request.Request, *iot.DescribeCertificateOutput) + + DescribeCertificate(*iot.DescribeCertificateInput) (*iot.DescribeCertificateOutput, error) + + DescribeEndpointRequest(*iot.DescribeEndpointInput) (*request.Request, *iot.DescribeEndpointOutput) + + DescribeEndpoint(*iot.DescribeEndpointInput) (*iot.DescribeEndpointOutput, error) + + DescribeThingRequest(*iot.DescribeThingInput) (*request.Request, *iot.DescribeThingOutput) + + DescribeThing(*iot.DescribeThingInput) (*iot.DescribeThingOutput, error) + + DetachPrincipalPolicyRequest(*iot.DetachPrincipalPolicyInput) (*request.Request, *iot.DetachPrincipalPolicyOutput) + + DetachPrincipalPolicy(*iot.DetachPrincipalPolicyInput) (*iot.DetachPrincipalPolicyOutput, error) + + DetachThingPrincipalRequest(*iot.DetachThingPrincipalInput) (*request.Request, *iot.DetachThingPrincipalOutput) + + DetachThingPrincipal(*iot.DetachThingPrincipalInput) (*iot.DetachThingPrincipalOutput, error) + + DisableTopicRuleRequest(*iot.DisableTopicRuleInput) (*request.Request, *iot.DisableTopicRuleOutput) + + DisableTopicRule(*iot.DisableTopicRuleInput) (*iot.DisableTopicRuleOutput, error) + + EnableTopicRuleRequest(*iot.EnableTopicRuleInput) (*request.Request, *iot.EnableTopicRuleOutput) + + EnableTopicRule(*iot.EnableTopicRuleInput) (*iot.EnableTopicRuleOutput, error) + + GetLoggingOptionsRequest(*iot.GetLoggingOptionsInput) (*request.Request, *iot.GetLoggingOptionsOutput) + + GetLoggingOptions(*iot.GetLoggingOptionsInput) (*iot.GetLoggingOptionsOutput, error) + + GetPolicyRequest(*iot.GetPolicyInput) (*request.Request, *iot.GetPolicyOutput) + + GetPolicy(*iot.GetPolicyInput) (*iot.GetPolicyOutput, error) + + GetPolicyVersionRequest(*iot.GetPolicyVersionInput) (*request.Request, *iot.GetPolicyVersionOutput) + + GetPolicyVersion(*iot.GetPolicyVersionInput) (*iot.GetPolicyVersionOutput, error) + + GetRegistrationCodeRequest(*iot.GetRegistrationCodeInput) (*request.Request, *iot.GetRegistrationCodeOutput) + + GetRegistrationCode(*iot.GetRegistrationCodeInput) (*iot.GetRegistrationCodeOutput, error) + + GetTopicRuleRequest(*iot.GetTopicRuleInput) (*request.Request, *iot.GetTopicRuleOutput) + + GetTopicRule(*iot.GetTopicRuleInput) (*iot.GetTopicRuleOutput, error) + + ListCACertificatesRequest(*iot.ListCACertificatesInput) (*request.Request, *iot.ListCACertificatesOutput) + + ListCACertificates(*iot.ListCACertificatesInput) (*iot.ListCACertificatesOutput, error) + + ListCertificatesRequest(*iot.ListCertificatesInput) (*request.Request, *iot.ListCertificatesOutput) + + ListCertificates(*iot.ListCertificatesInput) (*iot.ListCertificatesOutput, error) + + ListCertificatesByCARequest(*iot.ListCertificatesByCAInput) (*request.Request, *iot.ListCertificatesByCAOutput) + + ListCertificatesByCA(*iot.ListCertificatesByCAInput) (*iot.ListCertificatesByCAOutput, error) + + ListPoliciesRequest(*iot.ListPoliciesInput) (*request.Request, *iot.ListPoliciesOutput) + + ListPolicies(*iot.ListPoliciesInput) (*iot.ListPoliciesOutput, error) + + ListPolicyPrincipalsRequest(*iot.ListPolicyPrincipalsInput) (*request.Request, *iot.ListPolicyPrincipalsOutput) + + ListPolicyPrincipals(*iot.ListPolicyPrincipalsInput) (*iot.ListPolicyPrincipalsOutput, error) + + ListPolicyVersionsRequest(*iot.ListPolicyVersionsInput) (*request.Request, *iot.ListPolicyVersionsOutput) + + ListPolicyVersions(*iot.ListPolicyVersionsInput) (*iot.ListPolicyVersionsOutput, error) + + ListPrincipalPoliciesRequest(*iot.ListPrincipalPoliciesInput) (*request.Request, *iot.ListPrincipalPoliciesOutput) + + ListPrincipalPolicies(*iot.ListPrincipalPoliciesInput) (*iot.ListPrincipalPoliciesOutput, error) + + ListPrincipalThingsRequest(*iot.ListPrincipalThingsInput) (*request.Request, *iot.ListPrincipalThingsOutput) + + ListPrincipalThings(*iot.ListPrincipalThingsInput) (*iot.ListPrincipalThingsOutput, error) + + ListThingPrincipalsRequest(*iot.ListThingPrincipalsInput) (*request.Request, *iot.ListThingPrincipalsOutput) + + ListThingPrincipals(*iot.ListThingPrincipalsInput) (*iot.ListThingPrincipalsOutput, error) + + ListThingsRequest(*iot.ListThingsInput) (*request.Request, *iot.ListThingsOutput) + + ListThings(*iot.ListThingsInput) (*iot.ListThingsOutput, error) + + ListTopicRulesRequest(*iot.ListTopicRulesInput) (*request.Request, *iot.ListTopicRulesOutput) + + ListTopicRules(*iot.ListTopicRulesInput) (*iot.ListTopicRulesOutput, error) + + RegisterCACertificateRequest(*iot.RegisterCACertificateInput) (*request.Request, *iot.RegisterCACertificateOutput) + + RegisterCACertificate(*iot.RegisterCACertificateInput) (*iot.RegisterCACertificateOutput, error) + + RegisterCertificateRequest(*iot.RegisterCertificateInput) (*request.Request, *iot.RegisterCertificateOutput) + + RegisterCertificate(*iot.RegisterCertificateInput) (*iot.RegisterCertificateOutput, error) + + RejectCertificateTransferRequest(*iot.RejectCertificateTransferInput) (*request.Request, *iot.RejectCertificateTransferOutput) + + RejectCertificateTransfer(*iot.RejectCertificateTransferInput) (*iot.RejectCertificateTransferOutput, error) + + ReplaceTopicRuleRequest(*iot.ReplaceTopicRuleInput) (*request.Request, *iot.ReplaceTopicRuleOutput) + + ReplaceTopicRule(*iot.ReplaceTopicRuleInput) (*iot.ReplaceTopicRuleOutput, error) + + SetDefaultPolicyVersionRequest(*iot.SetDefaultPolicyVersionInput) (*request.Request, *iot.SetDefaultPolicyVersionOutput) + + SetDefaultPolicyVersion(*iot.SetDefaultPolicyVersionInput) (*iot.SetDefaultPolicyVersionOutput, error) + + SetLoggingOptionsRequest(*iot.SetLoggingOptionsInput) (*request.Request, *iot.SetLoggingOptionsOutput) + + SetLoggingOptions(*iot.SetLoggingOptionsInput) (*iot.SetLoggingOptionsOutput, error) + + TransferCertificateRequest(*iot.TransferCertificateInput) (*request.Request, *iot.TransferCertificateOutput) + + TransferCertificate(*iot.TransferCertificateInput) (*iot.TransferCertificateOutput, error) + + UpdateCACertificateRequest(*iot.UpdateCACertificateInput) (*request.Request, *iot.UpdateCACertificateOutput) + + UpdateCACertificate(*iot.UpdateCACertificateInput) (*iot.UpdateCACertificateOutput, error) + + UpdateCertificateRequest(*iot.UpdateCertificateInput) (*request.Request, *iot.UpdateCertificateOutput) + + UpdateCertificate(*iot.UpdateCertificateInput) (*iot.UpdateCertificateOutput, error) + + UpdateThingRequest(*iot.UpdateThingInput) (*request.Request, *iot.UpdateThingOutput) + + UpdateThing(*iot.UpdateThingInput) (*iot.UpdateThingOutput, error) +} + +var _ IoTAPI = (*iot.IoT)(nil) diff --git a/vendor/github.com/aws/aws-sdk-go/service/iot/service.go b/vendor/github.com/aws/aws-sdk-go/service/iot/service.go new file mode 100644 index 000000000..38869db5f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/iot/service.go @@ -0,0 +1,94 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package iot + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/private/protocol/restjson" +) + +// AWS IoT provides secure, bi-directional communication between Internet-connected +// things (such as sensors, actuators, embedded devices, or smart appliances) +// and the AWS cloud. You can discover your custom IoT-Data endpoint to communicate +// with, configure rules for data processing and integration with other services, +// organize resources associated with each thing (Thing Registry), configure +// logging, and create and manage policies and credentials to authenticate things. +// +// For more information about how AWS IoT works, see the Developer Guide (http://docs.aws.amazon.com/iot/latest/developerguide/aws-iot-how-it-works.html). +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type IoT struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "iot" + +// New creates a new instance of the IoT client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a IoT client from just a session. +// svc := iot.New(mySession) +// +// // Create a IoT client with additional configuration +// svc := iot.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *IoT { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *IoT { + svc := &IoT{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningName: "execute-api", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2015-05-28", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(restjson.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restjson.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restjson.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restjson.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a IoT operation and runs any +// custom request initialization. +func (c *IoT) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/iotdataplane/api.go b/vendor/github.com/aws/aws-sdk-go/service/iotdataplane/api.go new file mode 100644 index 000000000..f30fc62bb --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/iotdataplane/api.go @@ -0,0 +1,430 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package iotdataplane provides a client for AWS IoT Data Plane. +package iotdataplane + +import ( + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/restjson" +) + +const opDeleteThingShadow = "DeleteThingShadow" + +// DeleteThingShadowRequest generates a "aws/request.Request" representing the +// client's request for the DeleteThingShadow operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteThingShadow method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteThingShadowRequest method. +// req, resp := client.DeleteThingShadowRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *IoTDataPlane) DeleteThingShadowRequest(input *DeleteThingShadowInput) (req *request.Request, output *DeleteThingShadowOutput) { + op := &request.Operation{ + Name: opDeleteThingShadow, + HTTPMethod: "DELETE", + HTTPPath: "/things/{thingName}/shadow", + } + + if input == nil { + input = &DeleteThingShadowInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteThingShadowOutput{} + req.Data = output + return +} + +// Deletes the thing shadow for the specified thing. +// +// For more information, see DeleteThingShadow (http://docs.aws.amazon.com/iot/latest/developerguide/API_DeleteThingShadow.html) +// in the AWS IoT Developer Guide. +func (c *IoTDataPlane) DeleteThingShadow(input *DeleteThingShadowInput) (*DeleteThingShadowOutput, error) { + req, out := c.DeleteThingShadowRequest(input) + err := req.Send() + return out, err +} + +const opGetThingShadow = "GetThingShadow" + +// GetThingShadowRequest generates a "aws/request.Request" representing the +// client's request for the GetThingShadow operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetThingShadow method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetThingShadowRequest method. +// req, resp := client.GetThingShadowRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *IoTDataPlane) GetThingShadowRequest(input *GetThingShadowInput) (req *request.Request, output *GetThingShadowOutput) { + op := &request.Operation{ + Name: opGetThingShadow, + HTTPMethod: "GET", + HTTPPath: "/things/{thingName}/shadow", + } + + if input == nil { + input = &GetThingShadowInput{} + } + + req = c.newRequest(op, input, output) + output = &GetThingShadowOutput{} + req.Data = output + return +} + +// Gets the thing shadow for the specified thing. +// +// For more information, see GetThingShadow (http://docs.aws.amazon.com/iot/latest/developerguide/API_GetThingShadow.html) +// in the AWS IoT Developer Guide. +func (c *IoTDataPlane) GetThingShadow(input *GetThingShadowInput) (*GetThingShadowOutput, error) { + req, out := c.GetThingShadowRequest(input) + err := req.Send() + return out, err +} + +const opPublish = "Publish" + +// PublishRequest generates a "aws/request.Request" representing the +// client's request for the Publish operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the Publish method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PublishRequest method. +// req, resp := client.PublishRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *IoTDataPlane) PublishRequest(input *PublishInput) (req *request.Request, output *PublishOutput) { + op := &request.Operation{ + Name: opPublish, + HTTPMethod: "POST", + HTTPPath: "/topics/{topic}", + } + + if input == nil { + input = &PublishInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &PublishOutput{} + req.Data = output + return +} + +// Publishes state information. +// +// For more information, see HTTP Protocol (http://docs.aws.amazon.com/iot/latest/developerguide/protocols.html#http) +// in the AWS IoT Developer Guide. +func (c *IoTDataPlane) Publish(input *PublishInput) (*PublishOutput, error) { + req, out := c.PublishRequest(input) + err := req.Send() + return out, err +} + +const opUpdateThingShadow = "UpdateThingShadow" + +// UpdateThingShadowRequest generates a "aws/request.Request" representing the +// client's request for the UpdateThingShadow operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateThingShadow method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateThingShadowRequest method. +// req, resp := client.UpdateThingShadowRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *IoTDataPlane) UpdateThingShadowRequest(input *UpdateThingShadowInput) (req *request.Request, output *UpdateThingShadowOutput) { + op := &request.Operation{ + Name: opUpdateThingShadow, + HTTPMethod: "POST", + HTTPPath: "/things/{thingName}/shadow", + } + + if input == nil { + input = &UpdateThingShadowInput{} + } + + req = c.newRequest(op, input, output) + output = &UpdateThingShadowOutput{} + req.Data = output + return +} + +// Updates the thing shadow for the specified thing. +// +// For more information, see UpdateThingShadow (http://docs.aws.amazon.com/iot/latest/developerguide/API_UpdateThingShadow.html) +// in the AWS IoT Developer Guide. +func (c *IoTDataPlane) UpdateThingShadow(input *UpdateThingShadowInput) (*UpdateThingShadowOutput, error) { + req, out := c.UpdateThingShadowRequest(input) + err := req.Send() + return out, err +} + +// The input for the DeleteThingShadow operation. +type DeleteThingShadowInput struct { + _ struct{} `type:"structure"` + + // The name of the thing. + ThingName *string `location:"uri" locationName:"thingName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteThingShadowInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteThingShadowInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteThingShadowInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteThingShadowInput"} + if s.ThingName == nil { + invalidParams.Add(request.NewErrParamRequired("ThingName")) + } + if s.ThingName != nil && len(*s.ThingName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ThingName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The output from the DeleteThingShadow operation. +type DeleteThingShadowOutput struct { + _ struct{} `type:"structure" payload:"Payload"` + + // The state information, in JSON format. + Payload []byte `locationName:"payload" type:"blob" required:"true"` +} + +// String returns the string representation +func (s DeleteThingShadowOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteThingShadowOutput) GoString() string { + return s.String() +} + +// The input for the GetThingShadow operation. +type GetThingShadowInput struct { + _ struct{} `type:"structure"` + + // The name of the thing. + ThingName *string `location:"uri" locationName:"thingName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetThingShadowInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetThingShadowInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetThingShadowInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetThingShadowInput"} + if s.ThingName == nil { + invalidParams.Add(request.NewErrParamRequired("ThingName")) + } + if s.ThingName != nil && len(*s.ThingName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ThingName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The output from the GetThingShadow operation. +type GetThingShadowOutput struct { + _ struct{} `type:"structure" payload:"Payload"` + + // The state information, in JSON format. + Payload []byte `locationName:"payload" type:"blob"` +} + +// String returns the string representation +func (s GetThingShadowOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetThingShadowOutput) GoString() string { + return s.String() +} + +// The input for the Publish operation. +type PublishInput struct { + _ struct{} `type:"structure" payload:"Payload"` + + // The state information, in JSON format. + Payload []byte `locationName:"payload" type:"blob"` + + // The Quality of Service (QoS) level. + Qos *int64 `location:"querystring" locationName:"qos" type:"integer"` + + // The name of the MQTT topic. + Topic *string `location:"uri" locationName:"topic" type:"string" required:"true"` +} + +// String returns the string representation +func (s PublishInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PublishInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PublishInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PublishInput"} + if s.Topic == nil { + invalidParams.Add(request.NewErrParamRequired("Topic")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type PublishOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PublishOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PublishOutput) GoString() string { + return s.String() +} + +// The input for the UpdateThingShadow operation. +type UpdateThingShadowInput struct { + _ struct{} `type:"structure" payload:"Payload"` + + // The state information, in JSON format. + Payload []byte `locationName:"payload" type:"blob" required:"true"` + + // The name of the thing. + ThingName *string `location:"uri" locationName:"thingName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateThingShadowInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateThingShadowInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateThingShadowInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateThingShadowInput"} + if s.Payload == nil { + invalidParams.Add(request.NewErrParamRequired("Payload")) + } + if s.ThingName == nil { + invalidParams.Add(request.NewErrParamRequired("ThingName")) + } + if s.ThingName != nil && len(*s.ThingName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ThingName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The output from the UpdateThingShadow operation. +type UpdateThingShadowOutput struct { + _ struct{} `type:"structure" payload:"Payload"` + + // The state information, in JSON format. + Payload []byte `locationName:"payload" type:"blob"` +} + +// String returns the string representation +func (s UpdateThingShadowOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateThingShadowOutput) GoString() string { + return s.String() +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/iotdataplane/customizations_test.go b/vendor/github.com/aws/aws-sdk-go/service/iotdataplane/customizations_test.go new file mode 100644 index 000000000..4bb81a044 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/iotdataplane/customizations_test.go @@ -0,0 +1,52 @@ +package iotdataplane_test + +import ( + "fmt" + "github.com/stretchr/testify/assert" + "testing" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/awstesting/unit" + "github.com/aws/aws-sdk-go/service/iotdataplane" +) + +func TestRequireEndpointIfRegionProvided(t *testing.T) { + svc := iotdataplane.New(unit.Session, &aws.Config{ + Region: aws.String("mock-region"), + DisableParamValidation: aws.Bool(true), + }) + req, _ := svc.GetThingShadowRequest(nil) + err := req.Build() + + assert.Equal(t, "", svc.Endpoint) + assert.Error(t, err) + assert.Equal(t, aws.ErrMissingEndpoint, err) +} + +func TestRequireEndpointIfNoRegionProvided(t *testing.T) { + svc := iotdataplane.New(unit.Session, &aws.Config{ + Region: aws.String(""), + DisableParamValidation: aws.Bool(true), + }) + fmt.Println(svc.ClientInfo.SigningRegion) + + req, _ := svc.GetThingShadowRequest(nil) + err := req.Build() + + assert.Equal(t, "", svc.Endpoint) + assert.Error(t, err) + assert.Equal(t, aws.ErrMissingEndpoint, err) +} + +func TestRequireEndpointUsed(t *testing.T) { + svc := iotdataplane.New(unit.Session, &aws.Config{ + Region: aws.String("mock-region"), + DisableParamValidation: aws.Bool(true), + Endpoint: aws.String("https://endpoint"), + }) + req, _ := svc.GetThingShadowRequest(nil) + err := req.Build() + + assert.Equal(t, "https://endpoint", svc.Endpoint) + assert.NoError(t, err) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/iotdataplane/examples_test.go b/vendor/github.com/aws/aws-sdk-go/service/iotdataplane/examples_test.go new file mode 100644 index 000000000..32769aba8 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/iotdataplane/examples_test.go @@ -0,0 +1,95 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package iotdataplane_test + +import ( + "bytes" + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/iotdataplane" +) + +var _ time.Duration +var _ bytes.Buffer + +func ExampleIoTDataPlane_DeleteThingShadow() { + svc := iotdataplane.New(session.New()) + + params := &iotdataplane.DeleteThingShadowInput{ + ThingName: aws.String("ThingName"), // Required + } + resp, err := svc.DeleteThingShadow(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIoTDataPlane_GetThingShadow() { + svc := iotdataplane.New(session.New()) + + params := &iotdataplane.GetThingShadowInput{ + ThingName: aws.String("ThingName"), // Required + } + resp, err := svc.GetThingShadow(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIoTDataPlane_Publish() { + svc := iotdataplane.New(session.New()) + + params := &iotdataplane.PublishInput{ + Topic: aws.String("Topic"), // Required + Payload: []byte("PAYLOAD"), + Qos: aws.Int64(1), + } + resp, err := svc.Publish(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIoTDataPlane_UpdateThingShadow() { + svc := iotdataplane.New(session.New()) + + params := &iotdataplane.UpdateThingShadowInput{ + Payload: []byte("PAYLOAD"), // Required + ThingName: aws.String("ThingName"), // Required + } + resp, err := svc.UpdateThingShadow(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/iotdataplane/iotdataplaneiface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/iotdataplane/iotdataplaneiface/interface.go new file mode 100644 index 000000000..0351a84f4 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/iotdataplane/iotdataplaneiface/interface.go @@ -0,0 +1,30 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package iotdataplaneiface provides an interface for the AWS IoT Data Plane. +package iotdataplaneiface + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/iotdataplane" +) + +// IoTDataPlaneAPI is the interface type for iotdataplane.IoTDataPlane. +type IoTDataPlaneAPI interface { + DeleteThingShadowRequest(*iotdataplane.DeleteThingShadowInput) (*request.Request, *iotdataplane.DeleteThingShadowOutput) + + DeleteThingShadow(*iotdataplane.DeleteThingShadowInput) (*iotdataplane.DeleteThingShadowOutput, error) + + GetThingShadowRequest(*iotdataplane.GetThingShadowInput) (*request.Request, *iotdataplane.GetThingShadowOutput) + + GetThingShadow(*iotdataplane.GetThingShadowInput) (*iotdataplane.GetThingShadowOutput, error) + + PublishRequest(*iotdataplane.PublishInput) (*request.Request, *iotdataplane.PublishOutput) + + Publish(*iotdataplane.PublishInput) (*iotdataplane.PublishOutput, error) + + UpdateThingShadowRequest(*iotdataplane.UpdateThingShadowInput) (*request.Request, *iotdataplane.UpdateThingShadowOutput) + + UpdateThingShadow(*iotdataplane.UpdateThingShadowInput) (*iotdataplane.UpdateThingShadowOutput, error) +} + +var _ IoTDataPlaneAPI = (*iotdataplane.IoTDataPlane)(nil) diff --git a/vendor/github.com/aws/aws-sdk-go/service/iotdataplane/service.go b/vendor/github.com/aws/aws-sdk-go/service/iotdataplane/service.go new file mode 100644 index 000000000..888b329d0 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/iotdataplane/service.go @@ -0,0 +1,92 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package iotdataplane + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/private/protocol/restjson" +) + +// AWS IoT-Data enables secure, bi-directional communication between Internet-connected +// things (such as sensors, actuators, embedded devices, or smart appliances) +// and the AWS cloud. It implements a broker for applications and things to +// publish messages over HTTP (Publish) and retrieve, update, and delete thing +// shadows. A thing shadow is a persistent representation of your things and +// their state in the AWS cloud. +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type IoTDataPlane struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "data.iot" + +// New creates a new instance of the IoTDataPlane client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a IoTDataPlane client from just a session. +// svc := iotdataplane.New(mySession) +// +// // Create a IoTDataPlane client with additional configuration +// svc := iotdataplane.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *IoTDataPlane { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *IoTDataPlane { + svc := &IoTDataPlane{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningName: "iotdata", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2015-05-28", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(restjson.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restjson.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restjson.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restjson.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a IoTDataPlane operation and runs any +// custom request initialization. +func (c *IoTDataPlane) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/kinesis/api.go b/vendor/github.com/aws/aws-sdk-go/service/kinesis/api.go new file mode 100644 index 000000000..aa5d71858 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/kinesis/api.go @@ -0,0 +1,2773 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package kinesis provides a client for Amazon Kinesis. +package kinesis + +import ( + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" +) + +const opAddTagsToStream = "AddTagsToStream" + +// AddTagsToStreamRequest generates a "aws/request.Request" representing the +// client's request for the AddTagsToStream operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the AddTagsToStream method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the AddTagsToStreamRequest method. +// req, resp := client.AddTagsToStreamRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Kinesis) AddTagsToStreamRequest(input *AddTagsToStreamInput) (req *request.Request, output *AddTagsToStreamOutput) { + op := &request.Operation{ + Name: opAddTagsToStream, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AddTagsToStreamInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &AddTagsToStreamOutput{} + req.Data = output + return +} + +// Adds or updates tags for the specified Amazon Kinesis stream. Each stream +// can have up to 10 tags. +// +// If tags have already been assigned to the stream, AddTagsToStream overwrites +// any existing tags that correspond to the specified tag keys. +func (c *Kinesis) AddTagsToStream(input *AddTagsToStreamInput) (*AddTagsToStreamOutput, error) { + req, out := c.AddTagsToStreamRequest(input) + err := req.Send() + return out, err +} + +const opCreateStream = "CreateStream" + +// CreateStreamRequest generates a "aws/request.Request" representing the +// client's request for the CreateStream operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateStream method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateStreamRequest method. +// req, resp := client.CreateStreamRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Kinesis) CreateStreamRequest(input *CreateStreamInput) (req *request.Request, output *CreateStreamOutput) { + op := &request.Operation{ + Name: opCreateStream, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateStreamInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &CreateStreamOutput{} + req.Data = output + return +} + +// Creates an Amazon Kinesis stream. A stream captures and transports data records +// that are continuously emitted from different data sources or producers. Scale-out +// within a stream is explicitly supported by means of shards, which are uniquely +// identified groups of data records in a stream. +// +// You specify and control the number of shards that a stream is composed of. +// Each shard can support reads up to 5 transactions per second, up to a maximum +// data read total of 2 MB per second. Each shard can support writes up to 1,000 +// records per second, up to a maximum data write total of 1 MB per second. +// You can add shards to a stream if the amount of data input increases and +// you can remove shards if the amount of data input decreases. +// +// The stream name identifies the stream. The name is scoped to the AWS account +// used by the application. It is also scoped by region. That is, two streams +// in two different accounts can have the same name, and two streams in the +// same account, but in two different regions, can have the same name. +// +// CreateStream is an asynchronous operation. Upon receiving a CreateStream +// request, Amazon Kinesis immediately returns and sets the stream status to +// CREATING. After the stream is created, Amazon Kinesis sets the stream status +// to ACTIVE. You should perform read and write operations only on an ACTIVE +// stream. +// +// You receive a LimitExceededException when making a CreateStream request +// if you try to do one of the following: +// +// Have more than five streams in the CREATING state at any point in time. +// Create more shards than are authorized for your account. For the default +// shard limit for an AWS account, see Streams Limits (http://docs.aws.amazon.com/kinesis/latest/dev/service-sizes-and-limits.html) +// in the Amazon Kinesis Streams Developer Guide. If you need to increase this +// limit, contact AWS Support (http://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html). +// +// You can use DescribeStream to check the stream status, which is returned +// in StreamStatus. +// +// CreateStream has a limit of 5 transactions per second per account. +func (c *Kinesis) CreateStream(input *CreateStreamInput) (*CreateStreamOutput, error) { + req, out := c.CreateStreamRequest(input) + err := req.Send() + return out, err +} + +const opDecreaseStreamRetentionPeriod = "DecreaseStreamRetentionPeriod" + +// DecreaseStreamRetentionPeriodRequest generates a "aws/request.Request" representing the +// client's request for the DecreaseStreamRetentionPeriod operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DecreaseStreamRetentionPeriod method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DecreaseStreamRetentionPeriodRequest method. +// req, resp := client.DecreaseStreamRetentionPeriodRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Kinesis) DecreaseStreamRetentionPeriodRequest(input *DecreaseStreamRetentionPeriodInput) (req *request.Request, output *DecreaseStreamRetentionPeriodOutput) { + op := &request.Operation{ + Name: opDecreaseStreamRetentionPeriod, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DecreaseStreamRetentionPeriodInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DecreaseStreamRetentionPeriodOutput{} + req.Data = output + return +} + +// Decreases the Amazon Kinesis stream's retention period, which is the length +// of time data records are accessible after they are added to the stream. The +// minimum value of a stream's retention period is 24 hours. +// +// This operation may result in lost data. For example, if the stream's retention +// period is 48 hours and is decreased to 24 hours, any data already in the +// stream that is older than 24 hours is inaccessible. +func (c *Kinesis) DecreaseStreamRetentionPeriod(input *DecreaseStreamRetentionPeriodInput) (*DecreaseStreamRetentionPeriodOutput, error) { + req, out := c.DecreaseStreamRetentionPeriodRequest(input) + err := req.Send() + return out, err +} + +const opDeleteStream = "DeleteStream" + +// DeleteStreamRequest generates a "aws/request.Request" representing the +// client's request for the DeleteStream operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteStream method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteStreamRequest method. +// req, resp := client.DeleteStreamRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Kinesis) DeleteStreamRequest(input *DeleteStreamInput) (req *request.Request, output *DeleteStreamOutput) { + op := &request.Operation{ + Name: opDeleteStream, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteStreamInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteStreamOutput{} + req.Data = output + return +} + +// Deletes an Amazon Kinesis stream and all its shards and data. You must shut +// down any applications that are operating on the stream before you delete +// the stream. If an application attempts to operate on a deleted stream, it +// will receive the exception ResourceNotFoundException. +// +// If the stream is in the ACTIVE state, you can delete it. After a DeleteStream +// request, the specified stream is in the DELETING state until Amazon Kinesis +// completes the deletion. +// +// Note: Amazon Kinesis might continue to accept data read and write operations, +// such as PutRecord, PutRecords, and GetRecords, on a stream in the DELETING +// state until the stream deletion is complete. +// +// When you delete a stream, any shards in that stream are also deleted, and +// any tags are dissociated from the stream. +// +// You can use the DescribeStream operation to check the state of the stream, +// which is returned in StreamStatus. +// +// DeleteStream has a limit of 5 transactions per second per account. +func (c *Kinesis) DeleteStream(input *DeleteStreamInput) (*DeleteStreamOutput, error) { + req, out := c.DeleteStreamRequest(input) + err := req.Send() + return out, err +} + +const opDescribeStream = "DescribeStream" + +// DescribeStreamRequest generates a "aws/request.Request" representing the +// client's request for the DescribeStream operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeStream method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeStreamRequest method. +// req, resp := client.DescribeStreamRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Kinesis) DescribeStreamRequest(input *DescribeStreamInput) (req *request.Request, output *DescribeStreamOutput) { + op := &request.Operation{ + Name: opDescribeStream, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"ExclusiveStartShardId"}, + OutputTokens: []string{"StreamDescription.Shards[-1].ShardId"}, + LimitToken: "Limit", + TruncationToken: "StreamDescription.HasMoreShards", + }, + } + + if input == nil { + input = &DescribeStreamInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeStreamOutput{} + req.Data = output + return +} + +// Describes the specified Amazon Kinesis stream. +// +// The information about the stream includes its current status, its Amazon +// Resource Name (ARN), and an array of shard objects. For each shard object, +// there is information about the hash key and sequence number ranges that the +// shard spans, and the IDs of any earlier shards that played in a role in creating +// the shard. A sequence number is the identifier associated with every record +// ingested in the stream. The sequence number is assigned when a record is +// put into the stream. +// +// You can limit the number of returned shards using the Limit parameter. The +// number of shards in a stream may be too large to return from a single call +// to DescribeStream. You can detect this by using the HasMoreShards flag in +// the returned output. HasMoreShards is set to true when there is more data +// available. +// +// DescribeStream is a paginated operation. If there are more shards available, +// you can request them using the shard ID of the last shard returned. Specify +// this ID in the ExclusiveStartShardId parameter in a subsequent request to +// DescribeStream. +// +// There are no guarantees about the chronological order shards returned in +// DescribeStream results. If you want to process shards in chronological order, +// use ParentShardId to track lineage to the oldest shard. +// +// DescribeStream has a limit of 10 transactions per second per account. +func (c *Kinesis) DescribeStream(input *DescribeStreamInput) (*DescribeStreamOutput, error) { + req, out := c.DescribeStreamRequest(input) + err := req.Send() + return out, err +} + +// DescribeStreamPages iterates over the pages of a DescribeStream operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeStream method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeStream operation. +// pageNum := 0 +// err := client.DescribeStreamPages(params, +// func(page *DescribeStreamOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *Kinesis) DescribeStreamPages(input *DescribeStreamInput, fn func(p *DescribeStreamOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeStreamRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeStreamOutput), lastPage) + }) +} + +const opDisableEnhancedMonitoring = "DisableEnhancedMonitoring" + +// DisableEnhancedMonitoringRequest generates a "aws/request.Request" representing the +// client's request for the DisableEnhancedMonitoring operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DisableEnhancedMonitoring method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DisableEnhancedMonitoringRequest method. +// req, resp := client.DisableEnhancedMonitoringRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Kinesis) DisableEnhancedMonitoringRequest(input *DisableEnhancedMonitoringInput) (req *request.Request, output *EnhancedMonitoringOutput) { + op := &request.Operation{ + Name: opDisableEnhancedMonitoring, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DisableEnhancedMonitoringInput{} + } + + req = c.newRequest(op, input, output) + output = &EnhancedMonitoringOutput{} + req.Data = output + return +} + +// Disables enhanced monitoring. +func (c *Kinesis) DisableEnhancedMonitoring(input *DisableEnhancedMonitoringInput) (*EnhancedMonitoringOutput, error) { + req, out := c.DisableEnhancedMonitoringRequest(input) + err := req.Send() + return out, err +} + +const opEnableEnhancedMonitoring = "EnableEnhancedMonitoring" + +// EnableEnhancedMonitoringRequest generates a "aws/request.Request" representing the +// client's request for the EnableEnhancedMonitoring operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the EnableEnhancedMonitoring method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the EnableEnhancedMonitoringRequest method. +// req, resp := client.EnableEnhancedMonitoringRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Kinesis) EnableEnhancedMonitoringRequest(input *EnableEnhancedMonitoringInput) (req *request.Request, output *EnhancedMonitoringOutput) { + op := &request.Operation{ + Name: opEnableEnhancedMonitoring, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &EnableEnhancedMonitoringInput{} + } + + req = c.newRequest(op, input, output) + output = &EnhancedMonitoringOutput{} + req.Data = output + return +} + +// Enables enhanced Amazon Kinesis stream monitoring for shard-level metrics. +func (c *Kinesis) EnableEnhancedMonitoring(input *EnableEnhancedMonitoringInput) (*EnhancedMonitoringOutput, error) { + req, out := c.EnableEnhancedMonitoringRequest(input) + err := req.Send() + return out, err +} + +const opGetRecords = "GetRecords" + +// GetRecordsRequest generates a "aws/request.Request" representing the +// client's request for the GetRecords operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetRecords method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetRecordsRequest method. +// req, resp := client.GetRecordsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Kinesis) GetRecordsRequest(input *GetRecordsInput) (req *request.Request, output *GetRecordsOutput) { + op := &request.Operation{ + Name: opGetRecords, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetRecordsInput{} + } + + req = c.newRequest(op, input, output) + output = &GetRecordsOutput{} + req.Data = output + return +} + +// Gets data records from an Amazon Kinesis stream's shard. +// +// Specify a shard iterator using the ShardIterator parameter. The shard iterator +// specifies the position in the shard from which you want to start reading +// data records sequentially. If there are no records available in the portion +// of the shard that the iterator points to, GetRecords returns an empty list. +// Note that it might take multiple calls to get to a portion of the shard that +// contains records. +// +// You can scale by provisioning multiple shards per stream while considering +// service limits (for more information, see Streams Limits (http://docs.aws.amazon.com/kinesis/latest/dev/service-sizes-and-limits.html) +// in the Amazon Kinesis Streams Developer Guide). Your application should have +// one thread per shard, each reading continuously from its stream. To read +// from a stream continually, call GetRecords in a loop. Use GetShardIterator +// to get the shard iterator to specify in the first GetRecords call. GetRecords +// returns a new shard iterator in NextShardIterator. Specify the shard iterator +// returned in NextShardIterator in subsequent calls to GetRecords. Note that +// if the shard has been closed, the shard iterator can't return more data and +// GetRecords returns null in NextShardIterator. You can terminate the loop +// when the shard is closed, or when the shard iterator reaches the record with +// the sequence number or other attribute that marks it as the last record to +// process. +// +// Each data record can be up to 1 MB in size, and each shard can read up to +// 2 MB per second. You can ensure that your calls don't exceed the maximum +// supported size or throughput by using the Limit parameter to specify the +// maximum number of records that GetRecords can return. Consider your average +// record size when determining this limit. +// +// The size of the data returned by GetRecords varies depending on the utilization +// of the shard. The maximum size of data that GetRecords can return is 10 MB. +// If a call returns this amount of data, subsequent calls made within the next +// 5 seconds throw ProvisionedThroughputExceededException. If there is insufficient +// provisioned throughput on the shard, subsequent calls made within the next +// 1 second throw ProvisionedThroughputExceededException. Note that GetRecords +// won't return any data when it throws an exception. For this reason, we recommend +// that you wait one second between calls to GetRecords; however, it's possible +// that the application will get exceptions for longer than 1 second. +// +// To detect whether the application is falling behind in processing, you can +// use the MillisBehindLatest response attribute. You can also monitor the stream +// using CloudWatch metrics and other mechanisms (see Monitoring (http://docs.aws.amazon.com/kinesis/latest/dev/monitoring.html) +// in the Amazon Kinesis Streams Developer Guide). +// +// Each Amazon Kinesis record includes a value, ApproximateArrivalTimestamp, +// that is set when a stream successfully receives and stores a record. This +// is commonly referred to as a server-side timestamp, whereas a client-side +// timestamp is set when a data producer creates or sends the record to a stream +// (a data producer is any data source putting data records into a stream, for +// example with PutRecords). The timestamp has millisecond precision. There +// are no guarantees about the timestamp accuracy, or that the timestamp is +// always increasing. For example, records in a shard or across a stream might +// have timestamps that are out of order. +func (c *Kinesis) GetRecords(input *GetRecordsInput) (*GetRecordsOutput, error) { + req, out := c.GetRecordsRequest(input) + err := req.Send() + return out, err +} + +const opGetShardIterator = "GetShardIterator" + +// GetShardIteratorRequest generates a "aws/request.Request" representing the +// client's request for the GetShardIterator operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetShardIterator method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetShardIteratorRequest method. +// req, resp := client.GetShardIteratorRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Kinesis) GetShardIteratorRequest(input *GetShardIteratorInput) (req *request.Request, output *GetShardIteratorOutput) { + op := &request.Operation{ + Name: opGetShardIterator, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetShardIteratorInput{} + } + + req = c.newRequest(op, input, output) + output = &GetShardIteratorOutput{} + req.Data = output + return +} + +// Gets an Amazon Kinesis shard iterator. A shard iterator expires five minutes +// after it is returned to the requester. +// +// A shard iterator specifies the shard position from which to start reading +// data records sequentially. The position is specified using the sequence number +// of a data record in a shard. A sequence number is the identifier associated +// with every record ingested in the stream, and is assigned when a record is +// put into the stream. Each stream has one or more shards. +// +// You must specify the shard iterator type. For example, you can set the ShardIteratorType +// parameter to read exactly from the position denoted by a specific sequence +// number by using the AT_SEQUENCE_NUMBER shard iterator type, or right after +// the sequence number by using the AFTER_SEQUENCE_NUMBER shard iterator type, +// using sequence numbers returned by earlier calls to PutRecord, PutRecords, +// GetRecords, or DescribeStream. In the request, you can specify the shard +// iterator type AT_TIMESTAMP to read records from an arbitrary point in time, +// TRIM_HORIZON to cause ShardIterator to point to the last untrimmed record +// in the shard in the system (the oldest data record in the shard), or LATEST +// so that you always read the most recent data in the shard. +// +// When you read repeatedly from a stream, use a GetShardIterator request to +// get the first shard iterator for use in your first GetRecords request and +// for subsequent reads use the shard iterator returned by the GetRecords request +// in NextShardIterator. A new shard iterator is returned by every GetRecords +// request in NextShardIterator, which you use in the ShardIterator parameter +// of the next GetRecords request. +// +// If a GetShardIterator request is made too often, you receive a ProvisionedThroughputExceededException. +// For more information about throughput limits, see GetRecords, and Streams +// Limits (http://docs.aws.amazon.com/kinesis/latest/dev/service-sizes-and-limits.html) +// in the Amazon Kinesis Streams Developer Guide. +// +// If the shard is closed, GetShardIterator returns a valid iterator for the +// last sequence number of the shard. Note that a shard can be closed as a result +// of using SplitShard or MergeShards. +// +// GetShardIterator has a limit of 5 transactions per second per account per +// open shard. +func (c *Kinesis) GetShardIterator(input *GetShardIteratorInput) (*GetShardIteratorOutput, error) { + req, out := c.GetShardIteratorRequest(input) + err := req.Send() + return out, err +} + +const opIncreaseStreamRetentionPeriod = "IncreaseStreamRetentionPeriod" + +// IncreaseStreamRetentionPeriodRequest generates a "aws/request.Request" representing the +// client's request for the IncreaseStreamRetentionPeriod operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the IncreaseStreamRetentionPeriod method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the IncreaseStreamRetentionPeriodRequest method. +// req, resp := client.IncreaseStreamRetentionPeriodRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Kinesis) IncreaseStreamRetentionPeriodRequest(input *IncreaseStreamRetentionPeriodInput) (req *request.Request, output *IncreaseStreamRetentionPeriodOutput) { + op := &request.Operation{ + Name: opIncreaseStreamRetentionPeriod, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &IncreaseStreamRetentionPeriodInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &IncreaseStreamRetentionPeriodOutput{} + req.Data = output + return +} + +// Increases the Amazon Kinesis stream's retention period, which is the length +// of time data records are accessible after they are added to the stream. The +// maximum value of a stream's retention period is 168 hours (7 days). +// +// Upon choosing a longer stream retention period, this operation will increase +// the time period records are accessible that have not yet expired. However, +// it will not make previous data that has expired (older than the stream's +// previous retention period) accessible after the operation has been called. +// For example, if a stream's retention period is set to 24 hours and is increased +// to 168 hours, any data that is older than 24 hours will remain inaccessible +// to consumer applications. +func (c *Kinesis) IncreaseStreamRetentionPeriod(input *IncreaseStreamRetentionPeriodInput) (*IncreaseStreamRetentionPeriodOutput, error) { + req, out := c.IncreaseStreamRetentionPeriodRequest(input) + err := req.Send() + return out, err +} + +const opListStreams = "ListStreams" + +// ListStreamsRequest generates a "aws/request.Request" representing the +// client's request for the ListStreams operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListStreams method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListStreamsRequest method. +// req, resp := client.ListStreamsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Kinesis) ListStreamsRequest(input *ListStreamsInput) (req *request.Request, output *ListStreamsOutput) { + op := &request.Operation{ + Name: opListStreams, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"ExclusiveStartStreamName"}, + OutputTokens: []string{"StreamNames[-1]"}, + LimitToken: "Limit", + TruncationToken: "HasMoreStreams", + }, + } + + if input == nil { + input = &ListStreamsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListStreamsOutput{} + req.Data = output + return +} + +// Lists your Amazon Kinesis streams. +// +// The number of streams may be too large to return from a single call to ListStreams. +// You can limit the number of returned streams using the Limit parameter. If +// you do not specify a value for the Limit parameter, Amazon Kinesis uses the +// default limit, which is currently 10. +// +// You can detect if there are more streams available to list by using the +// HasMoreStreams flag from the returned output. If there are more streams available, +// you can request more streams by using the name of the last stream returned +// by the ListStreams request in the ExclusiveStartStreamName parameter in a +// subsequent request to ListStreams. The group of stream names returned by +// the subsequent request is then added to the list. You can continue this process +// until all the stream names have been collected in the list. +// +// ListStreams has a limit of 5 transactions per second per account. +func (c *Kinesis) ListStreams(input *ListStreamsInput) (*ListStreamsOutput, error) { + req, out := c.ListStreamsRequest(input) + err := req.Send() + return out, err +} + +// ListStreamsPages iterates over the pages of a ListStreams operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListStreams method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListStreams operation. +// pageNum := 0 +// err := client.ListStreamsPages(params, +// func(page *ListStreamsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *Kinesis) ListStreamsPages(input *ListStreamsInput, fn func(p *ListStreamsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListStreamsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListStreamsOutput), lastPage) + }) +} + +const opListTagsForStream = "ListTagsForStream" + +// ListTagsForStreamRequest generates a "aws/request.Request" representing the +// client's request for the ListTagsForStream operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListTagsForStream method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListTagsForStreamRequest method. +// req, resp := client.ListTagsForStreamRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Kinesis) ListTagsForStreamRequest(input *ListTagsForStreamInput) (req *request.Request, output *ListTagsForStreamOutput) { + op := &request.Operation{ + Name: opListTagsForStream, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListTagsForStreamInput{} + } + + req = c.newRequest(op, input, output) + output = &ListTagsForStreamOutput{} + req.Data = output + return +} + +// Lists the tags for the specified Amazon Kinesis stream. +func (c *Kinesis) ListTagsForStream(input *ListTagsForStreamInput) (*ListTagsForStreamOutput, error) { + req, out := c.ListTagsForStreamRequest(input) + err := req.Send() + return out, err +} + +const opMergeShards = "MergeShards" + +// MergeShardsRequest generates a "aws/request.Request" representing the +// client's request for the MergeShards operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the MergeShards method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the MergeShardsRequest method. +// req, resp := client.MergeShardsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Kinesis) MergeShardsRequest(input *MergeShardsInput) (req *request.Request, output *MergeShardsOutput) { + op := &request.Operation{ + Name: opMergeShards, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &MergeShardsInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &MergeShardsOutput{} + req.Data = output + return +} + +// Merges two adjacent shards in an Amazon Kinesis stream and combines them +// into a single shard to reduce the stream's capacity to ingest and transport +// data. Two shards are considered adjacent if the union of the hash key ranges +// for the two shards form a contiguous set with no gaps. For example, if you +// have two shards, one with a hash key range of 276...381 and the other with +// a hash key range of 382...454, then you could merge these two shards into +// a single shard that would have a hash key range of 276...454. After the merge, +// the single child shard receives data for all hash key values covered by the +// two parent shards. +// +// MergeShards is called when there is a need to reduce the overall capacity +// of a stream because of excess capacity that is not being used. You must specify +// the shard to be merged and the adjacent shard for a stream. For more information +// about merging shards, see Merge Two Shards (http://docs.aws.amazon.com/kinesis/latest/dev/kinesis-using-sdk-java-resharding-merge.html) +// in the Amazon Kinesis Streams Developer Guide. +// +// If the stream is in the ACTIVE state, you can call MergeShards. If a stream +// is in the CREATING, UPDATING, or DELETING state, MergeShards returns a ResourceInUseException. +// If the specified stream does not exist, MergeShards returns a ResourceNotFoundException. +// +// You can use DescribeStream to check the state of the stream, which is returned +// in StreamStatus. +// +// MergeShards is an asynchronous operation. Upon receiving a MergeShards request, +// Amazon Kinesis immediately returns a response and sets the StreamStatus to +// UPDATING. After the operation is completed, Amazon Kinesis sets the StreamStatus +// to ACTIVE. Read and write operations continue to work while the stream is +// in the UPDATING state. +// +// You use DescribeStream to determine the shard IDs that are specified in +// the MergeShards request. +// +// If you try to operate on too many streams in parallel using CreateStream, +// DeleteStream, MergeShards or SplitShard, you will receive a LimitExceededException. +// +// MergeShards has limit of 5 transactions per second per account. +func (c *Kinesis) MergeShards(input *MergeShardsInput) (*MergeShardsOutput, error) { + req, out := c.MergeShardsRequest(input) + err := req.Send() + return out, err +} + +const opPutRecord = "PutRecord" + +// PutRecordRequest generates a "aws/request.Request" representing the +// client's request for the PutRecord operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutRecord method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutRecordRequest method. +// req, resp := client.PutRecordRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Kinesis) PutRecordRequest(input *PutRecordInput) (req *request.Request, output *PutRecordOutput) { + op := &request.Operation{ + Name: opPutRecord, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutRecordInput{} + } + + req = c.newRequest(op, input, output) + output = &PutRecordOutput{} + req.Data = output + return +} + +// Writes a single data record into an Amazon Kinesis stream. Call PutRecord +// to send data into the stream for real-time ingestion and subsequent processing, +// one record at a time. Each shard can support writes up to 1,000 records per +// second, up to a maximum data write total of 1 MB per second. +// +// You must specify the name of the stream that captures, stores, and transports +// the data; a partition key; and the data blob itself. +// +// The data blob can be any type of data; for example, a segment from a log +// file, geographic/location data, website clickstream data, and so on. +// +// The partition key is used by Amazon Kinesis to distribute data across shards. +// Amazon Kinesis segregates the data records that belong to a stream into multiple +// shards, using the partition key associated with each data record to determine +// which shard a given data record belongs to. +// +// Partition keys are Unicode strings, with a maximum length limit of 256 characters +// for each key. An MD5 hash function is used to map partition keys to 128-bit +// integer values and to map associated data records to shards using the hash +// key ranges of the shards. You can override hashing the partition key to determine +// the shard by explicitly specifying a hash value using the ExplicitHashKey +// parameter. For more information, see Adding Data to a Stream (http://docs.aws.amazon.com/kinesis/latest/dev/developing-producers-with-sdk.html#kinesis-using-sdk-java-add-data-to-stream) +// in the Amazon Kinesis Streams Developer Guide. +// +// PutRecord returns the shard ID of where the data record was placed and the +// sequence number that was assigned to the data record. +// +// Sequence numbers increase over time and are specific to a shard within a +// stream, not across all shards within a stream. To guarantee strictly increasing +// ordering, write serially to a shard and use the SequenceNumberForOrdering +// parameter. For more information, see Adding Data to a Stream (http://docs.aws.amazon.com/kinesis/latest/dev/developing-producers-with-sdk.html#kinesis-using-sdk-java-add-data-to-stream) +// in the Amazon Kinesis Streams Developer Guide. +// +// If a PutRecord request cannot be processed because of insufficient provisioned +// throughput on the shard involved in the request, PutRecord throws ProvisionedThroughputExceededException. +// +// Data records are accessible for only 24 hours from the time that they are +// added to a stream. +func (c *Kinesis) PutRecord(input *PutRecordInput) (*PutRecordOutput, error) { + req, out := c.PutRecordRequest(input) + err := req.Send() + return out, err +} + +const opPutRecords = "PutRecords" + +// PutRecordsRequest generates a "aws/request.Request" representing the +// client's request for the PutRecords operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutRecords method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutRecordsRequest method. +// req, resp := client.PutRecordsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Kinesis) PutRecordsRequest(input *PutRecordsInput) (req *request.Request, output *PutRecordsOutput) { + op := &request.Operation{ + Name: opPutRecords, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutRecordsInput{} + } + + req = c.newRequest(op, input, output) + output = &PutRecordsOutput{} + req.Data = output + return +} + +// Writes multiple data records into an Amazon Kinesis stream in a single call +// (also referred to as a PutRecords request). Use this operation to send data +// into the stream for data ingestion and processing. +// +// Each PutRecords request can support up to 500 records. Each record in the +// request can be as large as 1 MB, up to a limit of 5 MB for the entire request, +// including partition keys. Each shard can support writes up to 1,000 records +// per second, up to a maximum data write total of 1 MB per second. +// +// You must specify the name of the stream that captures, stores, and transports +// the data; and an array of request Records, with each record in the array +// requiring a partition key and data blob. The record size limit applies to +// the total size of the partition key and data blob. +// +// The data blob can be any type of data; for example, a segment from a log +// file, geographic/location data, website clickstream data, and so on. +// +// The partition key is used by Amazon Kinesis as input to a hash function +// that maps the partition key and associated data to a specific shard. An MD5 +// hash function is used to map partition keys to 128-bit integer values and +// to map associated data records to shards. As a result of this hashing mechanism, +// all data records with the same partition key map to the same shard within +// the stream. For more information, see Adding Data to a Stream (http://docs.aws.amazon.com/kinesis/latest/dev/developing-producers-with-sdk.html#kinesis-using-sdk-java-add-data-to-stream) +// in the Amazon Kinesis Streams Developer Guide. +// +// Each record in the Records array may include an optional parameter, ExplicitHashKey, +// which overrides the partition key to shard mapping. This parameter allows +// a data producer to determine explicitly the shard where the record is stored. +// For more information, see Adding Multiple Records with PutRecords (http://docs.aws.amazon.com/kinesis/latest/dev/developing-producers-with-sdk.html#kinesis-using-sdk-java-putrecords) +// in the Amazon Kinesis Streams Developer Guide. +// +// The PutRecords response includes an array of response Records. Each record +// in the response array directly correlates with a record in the request array +// using natural ordering, from the top to the bottom of the request and response. +// The response Records array always includes the same number of records as +// the request array. +// +// The response Records array includes both successfully and unsuccessfully +// processed records. Amazon Kinesis attempts to process all records in each +// PutRecords request. A single record failure does not stop the processing +// of subsequent records. +// +// A successfully-processed record includes ShardId and SequenceNumber values. +// The ShardId parameter identifies the shard in the stream where the record +// is stored. The SequenceNumber parameter is an identifier assigned to the +// put record, unique to all records in the stream. +// +// An unsuccessfully-processed record includes ErrorCode and ErrorMessage values. +// ErrorCode reflects the type of error and can be one of the following values: +// ProvisionedThroughputExceededException or InternalFailure. ErrorMessage provides +// more detailed information about the ProvisionedThroughputExceededException +// exception including the account ID, stream name, and shard ID of the record +// that was throttled. For more information about partially successful responses, +// see Adding Multiple Records with PutRecords (http://docs.aws.amazon.com/kinesis/latest/dev/kinesis-using-sdk-java-add-data-to-stream.html#kinesis-using-sdk-java-putrecords) +// in the Amazon Kinesis Streams Developer Guide. +// +// By default, data records are accessible for only 24 hours from the time +// that they are added to an Amazon Kinesis stream. This retention period can +// be modified using the DecreaseStreamRetentionPeriod and IncreaseStreamRetentionPeriod +// operations. +func (c *Kinesis) PutRecords(input *PutRecordsInput) (*PutRecordsOutput, error) { + req, out := c.PutRecordsRequest(input) + err := req.Send() + return out, err +} + +const opRemoveTagsFromStream = "RemoveTagsFromStream" + +// RemoveTagsFromStreamRequest generates a "aws/request.Request" representing the +// client's request for the RemoveTagsFromStream operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the RemoveTagsFromStream method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the RemoveTagsFromStreamRequest method. +// req, resp := client.RemoveTagsFromStreamRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Kinesis) RemoveTagsFromStreamRequest(input *RemoveTagsFromStreamInput) (req *request.Request, output *RemoveTagsFromStreamOutput) { + op := &request.Operation{ + Name: opRemoveTagsFromStream, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RemoveTagsFromStreamInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &RemoveTagsFromStreamOutput{} + req.Data = output + return +} + +// Removes tags from the specified Amazon Kinesis stream. Removed tags are deleted +// and cannot be recovered after this operation successfully completes. +// +// If you specify a tag that does not exist, it is ignored. +func (c *Kinesis) RemoveTagsFromStream(input *RemoveTagsFromStreamInput) (*RemoveTagsFromStreamOutput, error) { + req, out := c.RemoveTagsFromStreamRequest(input) + err := req.Send() + return out, err +} + +const opSplitShard = "SplitShard" + +// SplitShardRequest generates a "aws/request.Request" representing the +// client's request for the SplitShard operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the SplitShard method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the SplitShardRequest method. +// req, resp := client.SplitShardRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Kinesis) SplitShardRequest(input *SplitShardInput) (req *request.Request, output *SplitShardOutput) { + op := &request.Operation{ + Name: opSplitShard, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SplitShardInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &SplitShardOutput{} + req.Data = output + return +} + +// Splits a shard into two new shards in the Amazon Kinesis stream to increase +// the stream's capacity to ingest and transport data. SplitShard is called +// when there is a need to increase the overall capacity of a stream because +// of an expected increase in the volume of data records being ingested. +// +// You can also use SplitShard when a shard appears to be approaching its maximum +// utilization; for example, the producers sending data into the specific shard +// are suddenly sending more than previously anticipated. You can also call +// SplitShard to increase stream capacity, so that more Amazon Kinesis applications +// can simultaneously read data from the stream for real-time processing. +// +// You must specify the shard to be split and the new hash key, which is the +// position in the shard where the shard gets split in two. In many cases, the +// new hash key might simply be the average of the beginning and ending hash +// key, but it can be any hash key value in the range being mapped into the +// shard. For more information about splitting shards, see Split a Shard (http://docs.aws.amazon.com/kinesis/latest/dev/kinesis-using-sdk-java-resharding-split.html) +// in the Amazon Kinesis Streams Developer Guide. +// +// You can use DescribeStream to determine the shard ID and hash key values +// for the ShardToSplit and NewStartingHashKey parameters that are specified +// in the SplitShard request. +// +// SplitShard is an asynchronous operation. Upon receiving a SplitShard request, +// Amazon Kinesis immediately returns a response and sets the stream status +// to UPDATING. After the operation is completed, Amazon Kinesis sets the stream +// status to ACTIVE. Read and write operations continue to work while the stream +// is in the UPDATING state. +// +// You can use DescribeStream to check the status of the stream, which is returned +// in StreamStatus. If the stream is in the ACTIVE state, you can call SplitShard. +// If a stream is in CREATING or UPDATING or DELETING states, DescribeStream +// returns a ResourceInUseException. +// +// If the specified stream does not exist, DescribeStream returns a ResourceNotFoundException. +// If you try to create more shards than are authorized for your account, you +// receive a LimitExceededException. +// +// For the default shard limit for an AWS account, see Streams Limits (http://docs.aws.amazon.com/kinesis/latest/dev/service-sizes-and-limits.html) +// in the Amazon Kinesis Streams Developer Guide. If you need to increase this +// limit, contact AWS Support (http://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html). +// +// If you try to operate on too many streams simultaneously using CreateStream, +// DeleteStream, MergeShards, and/or SplitShard, you receive a LimitExceededException. +// +// SplitShard has limit of 5 transactions per second per account. +func (c *Kinesis) SplitShard(input *SplitShardInput) (*SplitShardOutput, error) { + req, out := c.SplitShardRequest(input) + err := req.Send() + return out, err +} + +// Represents the input for AddTagsToStream. +type AddTagsToStreamInput struct { + _ struct{} `type:"structure"` + + // The name of the stream. + StreamName *string `min:"1" type:"string" required:"true"` + + // The set of key-value pairs to use to create the tags. + Tags map[string]*string `min:"1" type:"map" required:"true"` +} + +// String returns the string representation +func (s AddTagsToStreamInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddTagsToStreamInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AddTagsToStreamInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AddTagsToStreamInput"} + if s.StreamName == nil { + invalidParams.Add(request.NewErrParamRequired("StreamName")) + } + if s.StreamName != nil && len(*s.StreamName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("StreamName", 1)) + } + if s.Tags == nil { + invalidParams.Add(request.NewErrParamRequired("Tags")) + } + if s.Tags != nil && len(s.Tags) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Tags", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type AddTagsToStreamOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s AddTagsToStreamOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddTagsToStreamOutput) GoString() string { + return s.String() +} + +// Represents the input for CreateStream. +type CreateStreamInput struct { + _ struct{} `type:"structure"` + + // The number of shards that the stream will use. The throughput of the stream + // is a function of the number of shards; more shards are required for greater + // provisioned throughput. + // + // DefaultShardLimit; + ShardCount *int64 `min:"1" type:"integer" required:"true"` + + // A name to identify the stream. The stream name is scoped to the AWS account + // used by the application that creates the stream. It is also scoped by region. + // That is, two streams in two different AWS accounts can have the same name, + // and two streams in the same AWS account but in two different regions can + // have the same name. + StreamName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateStreamInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateStreamInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateStreamInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateStreamInput"} + if s.ShardCount == nil { + invalidParams.Add(request.NewErrParamRequired("ShardCount")) + } + if s.ShardCount != nil && *s.ShardCount < 1 { + invalidParams.Add(request.NewErrParamMinValue("ShardCount", 1)) + } + if s.StreamName == nil { + invalidParams.Add(request.NewErrParamRequired("StreamName")) + } + if s.StreamName != nil && len(*s.StreamName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("StreamName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type CreateStreamOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s CreateStreamOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateStreamOutput) GoString() string { + return s.String() +} + +// Represents the input for DecreaseStreamRetentionPeriod. +type DecreaseStreamRetentionPeriodInput struct { + _ struct{} `type:"structure"` + + // The new retention period of the stream, in hours. Must be less than the current + // retention period. + RetentionPeriodHours *int64 `min:"24" type:"integer" required:"true"` + + // The name of the stream to modify. + StreamName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DecreaseStreamRetentionPeriodInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DecreaseStreamRetentionPeriodInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DecreaseStreamRetentionPeriodInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DecreaseStreamRetentionPeriodInput"} + if s.RetentionPeriodHours == nil { + invalidParams.Add(request.NewErrParamRequired("RetentionPeriodHours")) + } + if s.RetentionPeriodHours != nil && *s.RetentionPeriodHours < 24 { + invalidParams.Add(request.NewErrParamMinValue("RetentionPeriodHours", 24)) + } + if s.StreamName == nil { + invalidParams.Add(request.NewErrParamRequired("StreamName")) + } + if s.StreamName != nil && len(*s.StreamName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("StreamName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DecreaseStreamRetentionPeriodOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DecreaseStreamRetentionPeriodOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DecreaseStreamRetentionPeriodOutput) GoString() string { + return s.String() +} + +// Represents the input for DeleteStream. +type DeleteStreamInput struct { + _ struct{} `type:"structure"` + + // The name of the stream to delete. + StreamName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteStreamInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteStreamInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteStreamInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteStreamInput"} + if s.StreamName == nil { + invalidParams.Add(request.NewErrParamRequired("StreamName")) + } + if s.StreamName != nil && len(*s.StreamName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("StreamName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteStreamOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteStreamOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteStreamOutput) GoString() string { + return s.String() +} + +// Represents the input for DescribeStream. +type DescribeStreamInput struct { + _ struct{} `type:"structure"` + + // The shard ID of the shard to start with. + ExclusiveStartShardId *string `min:"1" type:"string"` + + // The maximum number of shards to return. + Limit *int64 `min:"1" type:"integer"` + + // The name of the stream to describe. + StreamName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeStreamInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeStreamInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeStreamInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeStreamInput"} + if s.ExclusiveStartShardId != nil && len(*s.ExclusiveStartShardId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ExclusiveStartShardId", 1)) + } + if s.Limit != nil && *s.Limit < 1 { + invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) + } + if s.StreamName == nil { + invalidParams.Add(request.NewErrParamRequired("StreamName")) + } + if s.StreamName != nil && len(*s.StreamName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("StreamName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the output for DescribeStream. +type DescribeStreamOutput struct { + _ struct{} `type:"structure"` + + // The current status of the stream, the stream ARN, an array of shard objects + // that comprise the stream, and states whether there are more shards available. + StreamDescription *StreamDescription `type:"structure" required:"true"` +} + +// String returns the string representation +func (s DescribeStreamOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeStreamOutput) GoString() string { + return s.String() +} + +// Represents the input for DisableEnhancedMonitoring. +type DisableEnhancedMonitoringInput struct { + _ struct{} `type:"structure"` + + // List of shard-level metrics to disable. + // + // The following are the valid shard-level metrics. The value "ALL" disables + // every metric. + // + // IncomingBytes IncomingRecords OutgoingBytes OutgoingRecords WriteProvisionedThroughputExceeded + // ReadProvisionedThroughputExceeded IteratorAgeMilliseconds ALL For + // more information, see Monitoring the Amazon Kinesis Streams Service with + // Amazon CloudWatch (http://docs.aws.amazon.com/kinesis/latest/dev/monitoring-with-cloudwatch.html) + // in the Amazon Kinesis Streams Developer Guide. + ShardLevelMetrics []*string `min:"1" type:"list" required:"true"` + + // The name of the Amazon Kinesis stream for which to disable enhanced monitoring. + StreamName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DisableEnhancedMonitoringInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisableEnhancedMonitoringInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DisableEnhancedMonitoringInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DisableEnhancedMonitoringInput"} + if s.ShardLevelMetrics == nil { + invalidParams.Add(request.NewErrParamRequired("ShardLevelMetrics")) + } + if s.ShardLevelMetrics != nil && len(s.ShardLevelMetrics) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ShardLevelMetrics", 1)) + } + if s.StreamName == nil { + invalidParams.Add(request.NewErrParamRequired("StreamName")) + } + if s.StreamName != nil && len(*s.StreamName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("StreamName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the input for EnableEnhancedMonitoring. +type EnableEnhancedMonitoringInput struct { + _ struct{} `type:"structure"` + + // List of shard-level metrics to enable. + // + // The following are the valid shard-level metrics. The value "ALL" enables + // every metric. + // + // IncomingBytes IncomingRecords OutgoingBytes OutgoingRecords WriteProvisionedThroughputExceeded + // ReadProvisionedThroughputExceeded IteratorAgeMilliseconds ALL For + // more information, see Monitoring the Amazon Kinesis Streams Service with + // Amazon CloudWatch (http://docs.aws.amazon.com/kinesis/latest/dev/monitoring-with-cloudwatch.html) + // in the Amazon Kinesis Streams Developer Guide. + ShardLevelMetrics []*string `min:"1" type:"list" required:"true"` + + // The name of the stream for which to enable enhanced monitoring. + StreamName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s EnableEnhancedMonitoringInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnableEnhancedMonitoringInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *EnableEnhancedMonitoringInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "EnableEnhancedMonitoringInput"} + if s.ShardLevelMetrics == nil { + invalidParams.Add(request.NewErrParamRequired("ShardLevelMetrics")) + } + if s.ShardLevelMetrics != nil && len(s.ShardLevelMetrics) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ShardLevelMetrics", 1)) + } + if s.StreamName == nil { + invalidParams.Add(request.NewErrParamRequired("StreamName")) + } + if s.StreamName != nil && len(*s.StreamName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("StreamName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents enhanced metrics types. +type EnhancedMetrics struct { + _ struct{} `type:"structure"` + + // List of shard-level metrics. + // + // The following are the valid shard-level metrics. The value "ALL" enhances + // every metric. + // + // IncomingBytes IncomingRecords OutgoingBytes OutgoingRecords WriteProvisionedThroughputExceeded + // ReadProvisionedThroughputExceeded IteratorAgeMilliseconds ALL For + // more information, see Monitoring the Amazon Kinesis Streams Service with + // Amazon CloudWatch (http://docs.aws.amazon.com/kinesis/latest/dev/monitoring-with-cloudwatch.html) + // in the Amazon Kinesis Streams Developer Guide. + ShardLevelMetrics []*string `min:"1" type:"list"` +} + +// String returns the string representation +func (s EnhancedMetrics) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnhancedMetrics) GoString() string { + return s.String() +} + +// Represents the output for EnableEnhancedMonitoring and DisableEnhancedMonitoring. +type EnhancedMonitoringOutput struct { + _ struct{} `type:"structure"` + + // Represents the current state of the metrics that are in the enhanced state + // before the operation. + CurrentShardLevelMetrics []*string `min:"1" type:"list"` + + // Represents the list of all the metrics that would be in the enhanced state + // after the operation. + DesiredShardLevelMetrics []*string `min:"1" type:"list"` + + // The name of the Amazon Kinesis stream. + StreamName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s EnhancedMonitoringOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnhancedMonitoringOutput) GoString() string { + return s.String() +} + +// Represents the input for GetRecords. +type GetRecordsInput struct { + _ struct{} `type:"structure"` + + // The maximum number of records to return. Specify a value of up to 10,000. + // If you specify a value that is greater than 10,000, GetRecords throws InvalidArgumentException. + Limit *int64 `min:"1" type:"integer"` + + // The position in the shard from which you want to start sequentially reading + // data records. A shard iterator specifies this position using the sequence + // number of a data record in the shard. + ShardIterator *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetRecordsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetRecordsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetRecordsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetRecordsInput"} + if s.Limit != nil && *s.Limit < 1 { + invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) + } + if s.ShardIterator == nil { + invalidParams.Add(request.NewErrParamRequired("ShardIterator")) + } + if s.ShardIterator != nil && len(*s.ShardIterator) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ShardIterator", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the output for GetRecords. +type GetRecordsOutput struct { + _ struct{} `type:"structure"` + + // The number of milliseconds the GetRecords response is from the tip of the + // stream, indicating how far behind current time the consumer is. A value of + // zero indicates record processing is caught up, and there are no new records + // to process at this moment. + MillisBehindLatest *int64 `type:"long"` + + // The next position in the shard from which to start sequentially reading data + // records. If set to null, the shard has been closed and the requested iterator + // will not return any more data. + NextShardIterator *string `min:"1" type:"string"` + + // The data records retrieved from the shard. + Records []*Record `type:"list" required:"true"` +} + +// String returns the string representation +func (s GetRecordsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetRecordsOutput) GoString() string { + return s.String() +} + +// Represents the input for GetShardIterator. +type GetShardIteratorInput struct { + _ struct{} `type:"structure"` + + // The shard ID of the Amazon Kinesis shard to get the iterator for. + ShardId *string `min:"1" type:"string" required:"true"` + + // Determines how the shard iterator is used to start reading data records from + // the shard. + // + // The following are the valid Amazon Kinesis shard iterator types: + // + // AT_SEQUENCE_NUMBER - Start reading from the position denoted by a specific + // sequence number, provided in the value StartingSequenceNumber. AFTER_SEQUENCE_NUMBER + // - Start reading right after the position denoted by a specific sequence number, + // provided in the value StartingSequenceNumber. AT_TIMESTAMP - Start reading + // from the position denoted by a specific timestamp, provided in the value + // Timestamp. TRIM_HORIZON - Start reading at the last untrimmed record in the + // shard in the system, which is the oldest data record in the shard. LATEST + // - Start reading just after the most recent record in the shard, so that you + // always read the most recent data in the shard. + ShardIteratorType *string `type:"string" required:"true" enum:"ShardIteratorType"` + + // The sequence number of the data record in the shard from which to start reading. + // Used with shard iterator type AT_SEQUENCE_NUMBER and AFTER_SEQUENCE_NUMBER. + StartingSequenceNumber *string `type:"string"` + + // The name of the Amazon Kinesis stream. + StreamName *string `min:"1" type:"string" required:"true"` + + // The timestamp of the data record from which to start reading. Used with shard + // iterator type AT_TIMESTAMP. A timestamp is the Unix epoch date with precision + // in milliseconds. For example, 2016-04-04T19:58:46.480-00:00 or 1459799926.480. + // If a record with this exact timestamp does not exist, the iterator returned + // is for the next (later) record. If the timestamp is older than the current + // trim horizon, the iterator returned is for the oldest untrimmed data record + // (TRIM_HORIZON). + Timestamp *time.Time `type:"timestamp" timestampFormat:"unix"` +} + +// String returns the string representation +func (s GetShardIteratorInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetShardIteratorInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetShardIteratorInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetShardIteratorInput"} + if s.ShardId == nil { + invalidParams.Add(request.NewErrParamRequired("ShardId")) + } + if s.ShardId != nil && len(*s.ShardId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ShardId", 1)) + } + if s.ShardIteratorType == nil { + invalidParams.Add(request.NewErrParamRequired("ShardIteratorType")) + } + if s.StreamName == nil { + invalidParams.Add(request.NewErrParamRequired("StreamName")) + } + if s.StreamName != nil && len(*s.StreamName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("StreamName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the output for GetShardIterator. +type GetShardIteratorOutput struct { + _ struct{} `type:"structure"` + + // The position in the shard from which to start reading data records sequentially. + // A shard iterator specifies this position using the sequence number of a data + // record in a shard. + ShardIterator *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s GetShardIteratorOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetShardIteratorOutput) GoString() string { + return s.String() +} + +// The range of possible hash key values for the shard, which is a set of ordered +// contiguous positive integers. +type HashKeyRange struct { + _ struct{} `type:"structure"` + + // The ending hash key of the hash key range. + EndingHashKey *string `type:"string" required:"true"` + + // The starting hash key of the hash key range. + StartingHashKey *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s HashKeyRange) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HashKeyRange) GoString() string { + return s.String() +} + +// Represents the input for IncreaseStreamRetentionPeriod. +type IncreaseStreamRetentionPeriodInput struct { + _ struct{} `type:"structure"` + + // The new retention period of the stream, in hours. Must be more than the current + // retention period. + RetentionPeriodHours *int64 `min:"24" type:"integer" required:"true"` + + // The name of the stream to modify. + StreamName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s IncreaseStreamRetentionPeriodInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s IncreaseStreamRetentionPeriodInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *IncreaseStreamRetentionPeriodInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "IncreaseStreamRetentionPeriodInput"} + if s.RetentionPeriodHours == nil { + invalidParams.Add(request.NewErrParamRequired("RetentionPeriodHours")) + } + if s.RetentionPeriodHours != nil && *s.RetentionPeriodHours < 24 { + invalidParams.Add(request.NewErrParamMinValue("RetentionPeriodHours", 24)) + } + if s.StreamName == nil { + invalidParams.Add(request.NewErrParamRequired("StreamName")) + } + if s.StreamName != nil && len(*s.StreamName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("StreamName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type IncreaseStreamRetentionPeriodOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s IncreaseStreamRetentionPeriodOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s IncreaseStreamRetentionPeriodOutput) GoString() string { + return s.String() +} + +// Represents the input for ListStreams. +type ListStreamsInput struct { + _ struct{} `type:"structure"` + + // The name of the stream to start the list with. + ExclusiveStartStreamName *string `min:"1" type:"string"` + + // The maximum number of streams to list. + Limit *int64 `min:"1" type:"integer"` +} + +// String returns the string representation +func (s ListStreamsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListStreamsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListStreamsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListStreamsInput"} + if s.ExclusiveStartStreamName != nil && len(*s.ExclusiveStartStreamName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ExclusiveStartStreamName", 1)) + } + if s.Limit != nil && *s.Limit < 1 { + invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the output for ListStreams. +type ListStreamsOutput struct { + _ struct{} `type:"structure"` + + // If set to true, there are more streams available to list. + HasMoreStreams *bool `type:"boolean" required:"true"` + + // The names of the streams that are associated with the AWS account making + // the ListStreams request. + StreamNames []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s ListStreamsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListStreamsOutput) GoString() string { + return s.String() +} + +// Represents the input for ListTagsForStream. +type ListTagsForStreamInput struct { + _ struct{} `type:"structure"` + + // The key to use as the starting point for the list of tags. If this parameter + // is set, ListTagsForStream gets all tags that occur after ExclusiveStartTagKey. + ExclusiveStartTagKey *string `min:"1" type:"string"` + + // The number of tags to return. If this number is less than the total number + // of tags associated with the stream, HasMoreTags is set to true. To list additional + // tags, set ExclusiveStartTagKey to the last key in the response. + Limit *int64 `min:"1" type:"integer"` + + // The name of the stream. + StreamName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListTagsForStreamInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTagsForStreamInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListTagsForStreamInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListTagsForStreamInput"} + if s.ExclusiveStartTagKey != nil && len(*s.ExclusiveStartTagKey) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ExclusiveStartTagKey", 1)) + } + if s.Limit != nil && *s.Limit < 1 { + invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) + } + if s.StreamName == nil { + invalidParams.Add(request.NewErrParamRequired("StreamName")) + } + if s.StreamName != nil && len(*s.StreamName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("StreamName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the output for ListTagsForStream. +type ListTagsForStreamOutput struct { + _ struct{} `type:"structure"` + + // If set to true, more tags are available. To request additional tags, set + // ExclusiveStartTagKey to the key of the last tag returned. + HasMoreTags *bool `type:"boolean" required:"true"` + + // A list of tags associated with StreamName, starting with the first tag after + // ExclusiveStartTagKey and up to the specified Limit. + Tags []*Tag `type:"list" required:"true"` +} + +// String returns the string representation +func (s ListTagsForStreamOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTagsForStreamOutput) GoString() string { + return s.String() +} + +// Represents the input for MergeShards. +type MergeShardsInput struct { + _ struct{} `type:"structure"` + + // The shard ID of the adjacent shard for the merge. + AdjacentShardToMerge *string `min:"1" type:"string" required:"true"` + + // The shard ID of the shard to combine with the adjacent shard for the merge. + ShardToMerge *string `min:"1" type:"string" required:"true"` + + // The name of the stream for the merge. + StreamName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s MergeShardsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MergeShardsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *MergeShardsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "MergeShardsInput"} + if s.AdjacentShardToMerge == nil { + invalidParams.Add(request.NewErrParamRequired("AdjacentShardToMerge")) + } + if s.AdjacentShardToMerge != nil && len(*s.AdjacentShardToMerge) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AdjacentShardToMerge", 1)) + } + if s.ShardToMerge == nil { + invalidParams.Add(request.NewErrParamRequired("ShardToMerge")) + } + if s.ShardToMerge != nil && len(*s.ShardToMerge) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ShardToMerge", 1)) + } + if s.StreamName == nil { + invalidParams.Add(request.NewErrParamRequired("StreamName")) + } + if s.StreamName != nil && len(*s.StreamName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("StreamName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type MergeShardsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s MergeShardsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MergeShardsOutput) GoString() string { + return s.String() +} + +// Represents the input for PutRecord. +type PutRecordInput struct { + _ struct{} `type:"structure"` + + // The data blob to put into the record, which is base64-encoded when the blob + // is serialized. When the data blob (the payload before base64-encoding) is + // added to the partition key size, the total size must not exceed the maximum + // record size (1 MB). + // + // Data is automatically base64 encoded/decoded by the SDK. + Data []byte `type:"blob" required:"true"` + + // The hash value used to explicitly determine the shard the data record is + // assigned to by overriding the partition key hash. + ExplicitHashKey *string `type:"string"` + + // Determines which shard in the stream the data record is assigned to. Partition + // keys are Unicode strings with a maximum length limit of 256 characters for + // each key. Amazon Kinesis uses the partition key as input to a hash function + // that maps the partition key and associated data to a specific shard. Specifically, + // an MD5 hash function is used to map partition keys to 128-bit integer values + // and to map associated data records to shards. As a result of this hashing + // mechanism, all data records with the same partition key map to the same shard + // within the stream. + PartitionKey *string `min:"1" type:"string" required:"true"` + + // Guarantees strictly increasing sequence numbers, for puts from the same client + // and to the same partition key. Usage: set the SequenceNumberForOrdering of + // record n to the sequence number of record n-1 (as returned in the result + // when putting record n-1). If this parameter is not set, records will be coarsely + // ordered based on arrival time. + SequenceNumberForOrdering *string `type:"string"` + + // The name of the stream to put the data record into. + StreamName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s PutRecordInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutRecordInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutRecordInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutRecordInput"} + if s.Data == nil { + invalidParams.Add(request.NewErrParamRequired("Data")) + } + if s.PartitionKey == nil { + invalidParams.Add(request.NewErrParamRequired("PartitionKey")) + } + if s.PartitionKey != nil && len(*s.PartitionKey) < 1 { + invalidParams.Add(request.NewErrParamMinLen("PartitionKey", 1)) + } + if s.StreamName == nil { + invalidParams.Add(request.NewErrParamRequired("StreamName")) + } + if s.StreamName != nil && len(*s.StreamName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("StreamName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the output for PutRecord. +type PutRecordOutput struct { + _ struct{} `type:"structure"` + + // The sequence number identifier that was assigned to the put data record. + // The sequence number for the record is unique across all records in the stream. + // A sequence number is the identifier associated with every record put into + // the stream. + SequenceNumber *string `type:"string" required:"true"` + + // The shard ID of the shard where the data record was placed. + ShardId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s PutRecordOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutRecordOutput) GoString() string { + return s.String() +} + +// A PutRecords request. +type PutRecordsInput struct { + _ struct{} `type:"structure"` + + // The records associated with the request. + Records []*PutRecordsRequestEntry `min:"1" type:"list" required:"true"` + + // The stream name associated with the request. + StreamName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s PutRecordsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutRecordsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutRecordsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutRecordsInput"} + if s.Records == nil { + invalidParams.Add(request.NewErrParamRequired("Records")) + } + if s.Records != nil && len(s.Records) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Records", 1)) + } + if s.StreamName == nil { + invalidParams.Add(request.NewErrParamRequired("StreamName")) + } + if s.StreamName != nil && len(*s.StreamName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("StreamName", 1)) + } + if s.Records != nil { + for i, v := range s.Records { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Records", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// PutRecords results. +type PutRecordsOutput struct { + _ struct{} `type:"structure"` + + // The number of unsuccessfully processed records in a PutRecords request. + FailedRecordCount *int64 `min:"1" type:"integer"` + + // An array of successfully and unsuccessfully processed record results, correlated + // with the request by natural ordering. A record that is successfully added + // to a stream includes SequenceNumber and ShardId in the result. A record that + // fails to be added to a stream includes ErrorCode and ErrorMessage in the + // result. + Records []*PutRecordsResultEntry `min:"1" type:"list" required:"true"` +} + +// String returns the string representation +func (s PutRecordsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutRecordsOutput) GoString() string { + return s.String() +} + +// Represents the output for PutRecords. +type PutRecordsRequestEntry struct { + _ struct{} `type:"structure"` + + // The data blob to put into the record, which is base64-encoded when the blob + // is serialized. When the data blob (the payload before base64-encoding) is + // added to the partition key size, the total size must not exceed the maximum + // record size (1 MB). + // + // Data is automatically base64 encoded/decoded by the SDK. + Data []byte `type:"blob" required:"true"` + + // The hash value used to determine explicitly the shard that the data record + // is assigned to by overriding the partition key hash. + ExplicitHashKey *string `type:"string"` + + // Determines which shard in the stream the data record is assigned to. Partition + // keys are Unicode strings with a maximum length limit of 256 characters for + // each key. Amazon Kinesis uses the partition key as input to a hash function + // that maps the partition key and associated data to a specific shard. Specifically, + // an MD5 hash function is used to map partition keys to 128-bit integer values + // and to map associated data records to shards. As a result of this hashing + // mechanism, all data records with the same partition key map to the same shard + // within the stream. + PartitionKey *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s PutRecordsRequestEntry) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutRecordsRequestEntry) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutRecordsRequestEntry) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutRecordsRequestEntry"} + if s.Data == nil { + invalidParams.Add(request.NewErrParamRequired("Data")) + } + if s.PartitionKey == nil { + invalidParams.Add(request.NewErrParamRequired("PartitionKey")) + } + if s.PartitionKey != nil && len(*s.PartitionKey) < 1 { + invalidParams.Add(request.NewErrParamMinLen("PartitionKey", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the result of an individual record from a PutRecords request. +// A record that is successfully added to a stream includes SequenceNumber and +// ShardId in the result. A record that fails to be added to the stream includes +// ErrorCode and ErrorMessage in the result. +type PutRecordsResultEntry struct { + _ struct{} `type:"structure"` + + // The error code for an individual record result. ErrorCodes can be either + // ProvisionedThroughputExceededException or InternalFailure. + ErrorCode *string `type:"string"` + + // The error message for an individual record result. An ErrorCode value of + // ProvisionedThroughputExceededException has an error message that includes + // the account ID, stream name, and shard ID. An ErrorCode value of InternalFailure + // has the error message "Internal Service Failure". + ErrorMessage *string `type:"string"` + + // The sequence number for an individual record result. + SequenceNumber *string `type:"string"` + + // The shard ID for an individual record result. + ShardId *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s PutRecordsResultEntry) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutRecordsResultEntry) GoString() string { + return s.String() +} + +// The unit of data of the Amazon Kinesis stream, which is composed of a sequence +// number, a partition key, and a data blob. +type Record struct { + _ struct{} `type:"structure"` + + // The approximate time that the record was inserted into the stream. + ApproximateArrivalTimestamp *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The data blob. The data in the blob is both opaque and immutable to the Amazon + // Kinesis service, which does not inspect, interpret, or change the data in + // the blob in any way. When the data blob (the payload before base64-encoding) + // is added to the partition key size, the total size must not exceed the maximum + // record size (1 MB). + // + // Data is automatically base64 encoded/decoded by the SDK. + Data []byte `type:"blob" required:"true"` + + // Identifies which shard in the stream the data record is assigned to. + PartitionKey *string `min:"1" type:"string" required:"true"` + + // The unique identifier of the record in the stream. + SequenceNumber *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s Record) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Record) GoString() string { + return s.String() +} + +// Represents the input for RemoveTagsFromStream. +type RemoveTagsFromStreamInput struct { + _ struct{} `type:"structure"` + + // The name of the stream. + StreamName *string `min:"1" type:"string" required:"true"` + + // A list of tag keys. Each corresponding tag is removed from the stream. + TagKeys []*string `min:"1" type:"list" required:"true"` +} + +// String returns the string representation +func (s RemoveTagsFromStreamInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RemoveTagsFromStreamInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RemoveTagsFromStreamInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RemoveTagsFromStreamInput"} + if s.StreamName == nil { + invalidParams.Add(request.NewErrParamRequired("StreamName")) + } + if s.StreamName != nil && len(*s.StreamName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("StreamName", 1)) + } + if s.TagKeys == nil { + invalidParams.Add(request.NewErrParamRequired("TagKeys")) + } + if s.TagKeys != nil && len(s.TagKeys) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TagKeys", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type RemoveTagsFromStreamOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s RemoveTagsFromStreamOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RemoveTagsFromStreamOutput) GoString() string { + return s.String() +} + +// The range of possible sequence numbers for the shard. +type SequenceNumberRange struct { + _ struct{} `type:"structure"` + + // The ending sequence number for the range. Shards that are in the OPEN state + // have an ending sequence number of null. + EndingSequenceNumber *string `type:"string"` + + // The starting sequence number for the range. + StartingSequenceNumber *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s SequenceNumberRange) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SequenceNumberRange) GoString() string { + return s.String() +} + +// A uniquely identified group of data records in an Amazon Kinesis stream. +type Shard struct { + _ struct{} `type:"structure"` + + // The shard ID of the shard adjacent to the shard's parent. + AdjacentParentShardId *string `min:"1" type:"string"` + + // The range of possible hash key values for the shard, which is a set of ordered + // contiguous positive integers. + HashKeyRange *HashKeyRange `type:"structure" required:"true"` + + // The shard ID of the shard's parent. + ParentShardId *string `min:"1" type:"string"` + + // The range of possible sequence numbers for the shard. + SequenceNumberRange *SequenceNumberRange `type:"structure" required:"true"` + + // The unique identifier of the shard within the stream. + ShardId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s Shard) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Shard) GoString() string { + return s.String() +} + +// Represents the input for SplitShard. +type SplitShardInput struct { + _ struct{} `type:"structure"` + + // A hash key value for the starting hash key of one of the child shards created + // by the split. The hash key range for a given shard constitutes a set of ordered + // contiguous positive integers. The value for NewStartingHashKey must be in + // the range of hash keys being mapped into the shard. The NewStartingHashKey + // hash key value and all higher hash key values in hash key range are distributed + // to one of the child shards. All the lower hash key values in the range are + // distributed to the other child shard. + NewStartingHashKey *string `type:"string" required:"true"` + + // The shard ID of the shard to split. + ShardToSplit *string `min:"1" type:"string" required:"true"` + + // The name of the stream for the shard split. + StreamName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s SplitShardInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SplitShardInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SplitShardInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SplitShardInput"} + if s.NewStartingHashKey == nil { + invalidParams.Add(request.NewErrParamRequired("NewStartingHashKey")) + } + if s.ShardToSplit == nil { + invalidParams.Add(request.NewErrParamRequired("ShardToSplit")) + } + if s.ShardToSplit != nil && len(*s.ShardToSplit) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ShardToSplit", 1)) + } + if s.StreamName == nil { + invalidParams.Add(request.NewErrParamRequired("StreamName")) + } + if s.StreamName != nil && len(*s.StreamName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("StreamName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type SplitShardOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s SplitShardOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SplitShardOutput) GoString() string { + return s.String() +} + +// Represents the output for DescribeStream. +type StreamDescription struct { + _ struct{} `type:"structure"` + + // Represents the current enhanced monitoring settings of the stream. + EnhancedMonitoring []*EnhancedMetrics `type:"list" required:"true"` + + // If set to true, more shards in the stream are available to describe. + HasMoreShards *bool `type:"boolean" required:"true"` + + // The current retention period, in hours. + RetentionPeriodHours *int64 `min:"24" type:"integer" required:"true"` + + // The shards that comprise the stream. + Shards []*Shard `type:"list" required:"true"` + + // The Amazon Resource Name (ARN) for the stream being described. + StreamARN *string `type:"string" required:"true"` + + // The name of the stream being described. + StreamName *string `min:"1" type:"string" required:"true"` + + // The current status of the stream being described. The stream status is one + // of the following states: + // + // CREATING - The stream is being created. Amazon Kinesis immediately returns + // and sets StreamStatus to CREATING. DELETING - The stream is being deleted. + // The specified stream is in the DELETING state until Amazon Kinesis completes + // the deletion. ACTIVE - The stream exists and is ready for read and write + // operations or deletion. You should perform read and write operations only + // on an ACTIVE stream. UPDATING - Shards in the stream are being merged or + // split. Read and write operations continue to work while the stream is in + // the UPDATING state. + StreamStatus *string `type:"string" required:"true" enum:"StreamStatus"` +} + +// String returns the string representation +func (s StreamDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StreamDescription) GoString() string { + return s.String() +} + +// Metadata assigned to the stream, consisting of a key-value pair. +type Tag struct { + _ struct{} `type:"structure"` + + // A unique identifier for the tag. Maximum length: 128 characters. Valid characters: + // Unicode letters, digits, white space, _ . / = + - % @ + Key *string `min:"1" type:"string" required:"true"` + + // An optional string, typically used to describe or define the tag. Maximum + // length: 256 characters. Valid characters: Unicode letters, digits, white + // space, _ . / = + - % @ + Value *string `type:"string"` +} + +// String returns the string representation +func (s Tag) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Tag) GoString() string { + return s.String() +} + +const ( + // @enum MetricsName + MetricsNameIncomingBytes = "IncomingBytes" + // @enum MetricsName + MetricsNameIncomingRecords = "IncomingRecords" + // @enum MetricsName + MetricsNameOutgoingBytes = "OutgoingBytes" + // @enum MetricsName + MetricsNameOutgoingRecords = "OutgoingRecords" + // @enum MetricsName + MetricsNameWriteProvisionedThroughputExceeded = "WriteProvisionedThroughputExceeded" + // @enum MetricsName + MetricsNameReadProvisionedThroughputExceeded = "ReadProvisionedThroughputExceeded" + // @enum MetricsName + MetricsNameIteratorAgeMilliseconds = "IteratorAgeMilliseconds" + // @enum MetricsName + MetricsNameAll = "ALL" +) + +const ( + // @enum ShardIteratorType + ShardIteratorTypeAtSequenceNumber = "AT_SEQUENCE_NUMBER" + // @enum ShardIteratorType + ShardIteratorTypeAfterSequenceNumber = "AFTER_SEQUENCE_NUMBER" + // @enum ShardIteratorType + ShardIteratorTypeTrimHorizon = "TRIM_HORIZON" + // @enum ShardIteratorType + ShardIteratorTypeLatest = "LATEST" + // @enum ShardIteratorType + ShardIteratorTypeAtTimestamp = "AT_TIMESTAMP" +) + +const ( + // @enum StreamStatus + StreamStatusCreating = "CREATING" + // @enum StreamStatus + StreamStatusDeleting = "DELETING" + // @enum StreamStatus + StreamStatusActive = "ACTIVE" + // @enum StreamStatus + StreamStatusUpdating = "UPDATING" +) diff --git a/vendor/github.com/aws/aws-sdk-go/service/kinesis/examples_test.go b/vendor/github.com/aws/aws-sdk-go/service/kinesis/examples_test.go new file mode 100644 index 000000000..ffbb78bea --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/kinesis/examples_test.go @@ -0,0 +1,384 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package kinesis_test + +import ( + "bytes" + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/kinesis" +) + +var _ time.Duration +var _ bytes.Buffer + +func ExampleKinesis_AddTagsToStream() { + svc := kinesis.New(session.New()) + + params := &kinesis.AddTagsToStreamInput{ + StreamName: aws.String("StreamName"), // Required + Tags: map[string]*string{ // Required + "Key": aws.String("TagValue"), // Required + // More values... + }, + } + resp, err := svc.AddTagsToStream(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleKinesis_CreateStream() { + svc := kinesis.New(session.New()) + + params := &kinesis.CreateStreamInput{ + ShardCount: aws.Int64(1), // Required + StreamName: aws.String("StreamName"), // Required + } + resp, err := svc.CreateStream(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleKinesis_DecreaseStreamRetentionPeriod() { + svc := kinesis.New(session.New()) + + params := &kinesis.DecreaseStreamRetentionPeriodInput{ + RetentionPeriodHours: aws.Int64(1), // Required + StreamName: aws.String("StreamName"), // Required + } + resp, err := svc.DecreaseStreamRetentionPeriod(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleKinesis_DeleteStream() { + svc := kinesis.New(session.New()) + + params := &kinesis.DeleteStreamInput{ + StreamName: aws.String("StreamName"), // Required + } + resp, err := svc.DeleteStream(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleKinesis_DescribeStream() { + svc := kinesis.New(session.New()) + + params := &kinesis.DescribeStreamInput{ + StreamName: aws.String("StreamName"), // Required + ExclusiveStartShardId: aws.String("ShardId"), + Limit: aws.Int64(1), + } + resp, err := svc.DescribeStream(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleKinesis_DisableEnhancedMonitoring() { + svc := kinesis.New(session.New()) + + params := &kinesis.DisableEnhancedMonitoringInput{ + ShardLevelMetrics: []*string{ // Required + aws.String("MetricsName"), // Required + // More values... + }, + StreamName: aws.String("StreamName"), // Required + } + resp, err := svc.DisableEnhancedMonitoring(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleKinesis_EnableEnhancedMonitoring() { + svc := kinesis.New(session.New()) + + params := &kinesis.EnableEnhancedMonitoringInput{ + ShardLevelMetrics: []*string{ // Required + aws.String("MetricsName"), // Required + // More values... + }, + StreamName: aws.String("StreamName"), // Required + } + resp, err := svc.EnableEnhancedMonitoring(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleKinesis_GetRecords() { + svc := kinesis.New(session.New()) + + params := &kinesis.GetRecordsInput{ + ShardIterator: aws.String("ShardIterator"), // Required + Limit: aws.Int64(1), + } + resp, err := svc.GetRecords(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleKinesis_GetShardIterator() { + svc := kinesis.New(session.New()) + + params := &kinesis.GetShardIteratorInput{ + ShardId: aws.String("ShardId"), // Required + ShardIteratorType: aws.String("ShardIteratorType"), // Required + StreamName: aws.String("StreamName"), // Required + StartingSequenceNumber: aws.String("SequenceNumber"), + Timestamp: aws.Time(time.Now()), + } + resp, err := svc.GetShardIterator(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleKinesis_IncreaseStreamRetentionPeriod() { + svc := kinesis.New(session.New()) + + params := &kinesis.IncreaseStreamRetentionPeriodInput{ + RetentionPeriodHours: aws.Int64(1), // Required + StreamName: aws.String("StreamName"), // Required + } + resp, err := svc.IncreaseStreamRetentionPeriod(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleKinesis_ListStreams() { + svc := kinesis.New(session.New()) + + params := &kinesis.ListStreamsInput{ + ExclusiveStartStreamName: aws.String("StreamName"), + Limit: aws.Int64(1), + } + resp, err := svc.ListStreams(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleKinesis_ListTagsForStream() { + svc := kinesis.New(session.New()) + + params := &kinesis.ListTagsForStreamInput{ + StreamName: aws.String("StreamName"), // Required + ExclusiveStartTagKey: aws.String("TagKey"), + Limit: aws.Int64(1), + } + resp, err := svc.ListTagsForStream(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleKinesis_MergeShards() { + svc := kinesis.New(session.New()) + + params := &kinesis.MergeShardsInput{ + AdjacentShardToMerge: aws.String("ShardId"), // Required + ShardToMerge: aws.String("ShardId"), // Required + StreamName: aws.String("StreamName"), // Required + } + resp, err := svc.MergeShards(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleKinesis_PutRecord() { + svc := kinesis.New(session.New()) + + params := &kinesis.PutRecordInput{ + Data: []byte("PAYLOAD"), // Required + PartitionKey: aws.String("PartitionKey"), // Required + StreamName: aws.String("StreamName"), // Required + ExplicitHashKey: aws.String("HashKey"), + SequenceNumberForOrdering: aws.String("SequenceNumber"), + } + resp, err := svc.PutRecord(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleKinesis_PutRecords() { + svc := kinesis.New(session.New()) + + params := &kinesis.PutRecordsInput{ + Records: []*kinesis.PutRecordsRequestEntry{ // Required + { // Required + Data: []byte("PAYLOAD"), // Required + PartitionKey: aws.String("PartitionKey"), // Required + ExplicitHashKey: aws.String("HashKey"), + }, + // More values... + }, + StreamName: aws.String("StreamName"), // Required + } + resp, err := svc.PutRecords(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleKinesis_RemoveTagsFromStream() { + svc := kinesis.New(session.New()) + + params := &kinesis.RemoveTagsFromStreamInput{ + StreamName: aws.String("StreamName"), // Required + TagKeys: []*string{ // Required + aws.String("TagKey"), // Required + // More values... + }, + } + resp, err := svc.RemoveTagsFromStream(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleKinesis_SplitShard() { + svc := kinesis.New(session.New()) + + params := &kinesis.SplitShardInput{ + NewStartingHashKey: aws.String("HashKey"), // Required + ShardToSplit: aws.String("ShardId"), // Required + StreamName: aws.String("StreamName"), // Required + } + resp, err := svc.SplitShard(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/kinesis/kinesisiface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/kinesis/kinesisiface/interface.go new file mode 100644 index 000000000..f0c373aec --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/kinesis/kinesisiface/interface.go @@ -0,0 +1,86 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package kinesisiface provides an interface for the Amazon Kinesis. +package kinesisiface + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/kinesis" +) + +// KinesisAPI is the interface type for kinesis.Kinesis. +type KinesisAPI interface { + AddTagsToStreamRequest(*kinesis.AddTagsToStreamInput) (*request.Request, *kinesis.AddTagsToStreamOutput) + + AddTagsToStream(*kinesis.AddTagsToStreamInput) (*kinesis.AddTagsToStreamOutput, error) + + CreateStreamRequest(*kinesis.CreateStreamInput) (*request.Request, *kinesis.CreateStreamOutput) + + CreateStream(*kinesis.CreateStreamInput) (*kinesis.CreateStreamOutput, error) + + DecreaseStreamRetentionPeriodRequest(*kinesis.DecreaseStreamRetentionPeriodInput) (*request.Request, *kinesis.DecreaseStreamRetentionPeriodOutput) + + DecreaseStreamRetentionPeriod(*kinesis.DecreaseStreamRetentionPeriodInput) (*kinesis.DecreaseStreamRetentionPeriodOutput, error) + + DeleteStreamRequest(*kinesis.DeleteStreamInput) (*request.Request, *kinesis.DeleteStreamOutput) + + DeleteStream(*kinesis.DeleteStreamInput) (*kinesis.DeleteStreamOutput, error) + + DescribeStreamRequest(*kinesis.DescribeStreamInput) (*request.Request, *kinesis.DescribeStreamOutput) + + DescribeStream(*kinesis.DescribeStreamInput) (*kinesis.DescribeStreamOutput, error) + + DescribeStreamPages(*kinesis.DescribeStreamInput, func(*kinesis.DescribeStreamOutput, bool) bool) error + + DisableEnhancedMonitoringRequest(*kinesis.DisableEnhancedMonitoringInput) (*request.Request, *kinesis.EnhancedMonitoringOutput) + + DisableEnhancedMonitoring(*kinesis.DisableEnhancedMonitoringInput) (*kinesis.EnhancedMonitoringOutput, error) + + EnableEnhancedMonitoringRequest(*kinesis.EnableEnhancedMonitoringInput) (*request.Request, *kinesis.EnhancedMonitoringOutput) + + EnableEnhancedMonitoring(*kinesis.EnableEnhancedMonitoringInput) (*kinesis.EnhancedMonitoringOutput, error) + + GetRecordsRequest(*kinesis.GetRecordsInput) (*request.Request, *kinesis.GetRecordsOutput) + + GetRecords(*kinesis.GetRecordsInput) (*kinesis.GetRecordsOutput, error) + + GetShardIteratorRequest(*kinesis.GetShardIteratorInput) (*request.Request, *kinesis.GetShardIteratorOutput) + + GetShardIterator(*kinesis.GetShardIteratorInput) (*kinesis.GetShardIteratorOutput, error) + + IncreaseStreamRetentionPeriodRequest(*kinesis.IncreaseStreamRetentionPeriodInput) (*request.Request, *kinesis.IncreaseStreamRetentionPeriodOutput) + + IncreaseStreamRetentionPeriod(*kinesis.IncreaseStreamRetentionPeriodInput) (*kinesis.IncreaseStreamRetentionPeriodOutput, error) + + ListStreamsRequest(*kinesis.ListStreamsInput) (*request.Request, *kinesis.ListStreamsOutput) + + ListStreams(*kinesis.ListStreamsInput) (*kinesis.ListStreamsOutput, error) + + ListStreamsPages(*kinesis.ListStreamsInput, func(*kinesis.ListStreamsOutput, bool) bool) error + + ListTagsForStreamRequest(*kinesis.ListTagsForStreamInput) (*request.Request, *kinesis.ListTagsForStreamOutput) + + ListTagsForStream(*kinesis.ListTagsForStreamInput) (*kinesis.ListTagsForStreamOutput, error) + + MergeShardsRequest(*kinesis.MergeShardsInput) (*request.Request, *kinesis.MergeShardsOutput) + + MergeShards(*kinesis.MergeShardsInput) (*kinesis.MergeShardsOutput, error) + + PutRecordRequest(*kinesis.PutRecordInput) (*request.Request, *kinesis.PutRecordOutput) + + PutRecord(*kinesis.PutRecordInput) (*kinesis.PutRecordOutput, error) + + PutRecordsRequest(*kinesis.PutRecordsInput) (*request.Request, *kinesis.PutRecordsOutput) + + PutRecords(*kinesis.PutRecordsInput) (*kinesis.PutRecordsOutput, error) + + RemoveTagsFromStreamRequest(*kinesis.RemoveTagsFromStreamInput) (*request.Request, *kinesis.RemoveTagsFromStreamOutput) + + RemoveTagsFromStream(*kinesis.RemoveTagsFromStreamInput) (*kinesis.RemoveTagsFromStreamOutput, error) + + SplitShardRequest(*kinesis.SplitShardInput) (*request.Request, *kinesis.SplitShardOutput) + + SplitShard(*kinesis.SplitShardInput) (*kinesis.SplitShardOutput, error) +} + +var _ KinesisAPI = (*kinesis.Kinesis)(nil) diff --git a/vendor/github.com/aws/aws-sdk-go/service/kinesis/service.go b/vendor/github.com/aws/aws-sdk-go/service/kinesis/service.go new file mode 100644 index 000000000..e2fe21e77 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/kinesis/service.go @@ -0,0 +1,89 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package kinesis + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" +) + +// Amazon Kinesis Streams is a managed service that scales elastically for real +// time processing of streaming big data. +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type Kinesis struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "kinesis" + +// New creates a new instance of the Kinesis client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a Kinesis client from just a session. +// svc := kinesis.New(mySession) +// +// // Create a Kinesis client with additional configuration +// svc := kinesis.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *Kinesis { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *Kinesis { + svc := &Kinesis{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2013-12-02", + JSONVersion: "1.1", + TargetPrefix: "Kinesis_20131202", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(jsonrpc.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a Kinesis operation and runs any +// custom request initialization. +func (c *Kinesis) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/kinesis/waiters.go b/vendor/github.com/aws/aws-sdk-go/service/kinesis/waiters.go new file mode 100644 index 000000000..383a2e0b7 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/kinesis/waiters.go @@ -0,0 +1,30 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package kinesis + +import ( + "github.com/aws/aws-sdk-go/private/waiter" +) + +func (c *Kinesis) WaitUntilStreamExists(input *DescribeStreamInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeStream", + Delay: 10, + MaxAttempts: 18, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "path", + Argument: "StreamDescription.StreamStatus", + Expected: "ACTIVE", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/kms/api.go b/vendor/github.com/aws/aws-sdk-go/service/kms/api.go new file mode 100644 index 000000000..18055b42d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/kms/api.go @@ -0,0 +1,4049 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package kms provides a client for AWS Key Management Service. +package kms + +import ( + "time" + + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" +) + +const opCancelKeyDeletion = "CancelKeyDeletion" + +// CancelKeyDeletionRequest generates a "aws/request.Request" representing the +// client's request for the CancelKeyDeletion operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CancelKeyDeletion method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CancelKeyDeletionRequest method. +// req, resp := client.CancelKeyDeletionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *KMS) CancelKeyDeletionRequest(input *CancelKeyDeletionInput) (req *request.Request, output *CancelKeyDeletionOutput) { + op := &request.Operation{ + Name: opCancelKeyDeletion, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CancelKeyDeletionInput{} + } + + req = c.newRequest(op, input, output) + output = &CancelKeyDeletionOutput{} + req.Data = output + return +} + +// Cancels the deletion of a customer master key (CMK). When this operation +// is successful, the CMK is set to the Disabled state. To enable a CMK, use +// EnableKey. +// +// For more information about scheduling and canceling deletion of a CMK, see +// Deleting Customer Master Keys (http://docs.aws.amazon.com/kms/latest/developerguide/deleting-keys.html) +// in the AWS Key Management Service Developer Guide. +func (c *KMS) CancelKeyDeletion(input *CancelKeyDeletionInput) (*CancelKeyDeletionOutput, error) { + req, out := c.CancelKeyDeletionRequest(input) + err := req.Send() + return out, err +} + +const opCreateAlias = "CreateAlias" + +// CreateAliasRequest generates a "aws/request.Request" representing the +// client's request for the CreateAlias operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateAlias method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateAliasRequest method. +// req, resp := client.CreateAliasRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *KMS) CreateAliasRequest(input *CreateAliasInput) (req *request.Request, output *CreateAliasOutput) { + op := &request.Operation{ + Name: opCreateAlias, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateAliasInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &CreateAliasOutput{} + req.Data = output + return +} + +// Creates a display name for a customer master key. An alias can be used to +// identify a key and should be unique. The console enforces a one-to-one mapping +// between the alias and a key. An alias name can contain only alphanumeric +// characters, forward slashes (/), underscores (_), and dashes (-). An alias +// must start with the word "alias" followed by a forward slash (alias/). An +// alias that begins with "aws" after the forward slash (alias/aws...) is reserved +// by Amazon Web Services (AWS). +// +// The alias and the key it is mapped to must be in the same AWS account and +// the same region. +// +// To map an alias to a different key, call UpdateAlias. +func (c *KMS) CreateAlias(input *CreateAliasInput) (*CreateAliasOutput, error) { + req, out := c.CreateAliasRequest(input) + err := req.Send() + return out, err +} + +const opCreateGrant = "CreateGrant" + +// CreateGrantRequest generates a "aws/request.Request" representing the +// client's request for the CreateGrant operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateGrant method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateGrantRequest method. +// req, resp := client.CreateGrantRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *KMS) CreateGrantRequest(input *CreateGrantInput) (req *request.Request, output *CreateGrantOutput) { + op := &request.Operation{ + Name: opCreateGrant, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateGrantInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateGrantOutput{} + req.Data = output + return +} + +// Adds a grant to a key to specify who can use the key and under what conditions. +// Grants are alternate permission mechanisms to key policies. +// +// For more information about grants, see Grants (http://docs.aws.amazon.com/kms/latest/developerguide/grants.html) +// in the AWS Key Management Service Developer Guide. +func (c *KMS) CreateGrant(input *CreateGrantInput) (*CreateGrantOutput, error) { + req, out := c.CreateGrantRequest(input) + err := req.Send() + return out, err +} + +const opCreateKey = "CreateKey" + +// CreateKeyRequest generates a "aws/request.Request" representing the +// client's request for the CreateKey operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateKey method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateKeyRequest method. +// req, resp := client.CreateKeyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *KMS) CreateKeyRequest(input *CreateKeyInput) (req *request.Request, output *CreateKeyOutput) { + op := &request.Operation{ + Name: opCreateKey, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateKeyInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateKeyOutput{} + req.Data = output + return +} + +// Creates a customer master key (CMK). +// +// You can use a CMK to encrypt small amounts of data (4 KiB or less) directly, +// but CMKs are more commonly used to encrypt data encryption keys (DEKs), which +// are used to encrypt raw data. For more information about DEKs and the difference +// between CMKs and DEKs, see the following: +// +// The GenerateDataKey operation +// +// AWS Key Management Service Concepts (http://docs.aws.amazon.com/kms/latest/developerguide/concepts.html) +// in the AWS Key Management Service Developer Guide +func (c *KMS) CreateKey(input *CreateKeyInput) (*CreateKeyOutput, error) { + req, out := c.CreateKeyRequest(input) + err := req.Send() + return out, err +} + +const opDecrypt = "Decrypt" + +// DecryptRequest generates a "aws/request.Request" representing the +// client's request for the Decrypt operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the Decrypt method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DecryptRequest method. +// req, resp := client.DecryptRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *KMS) DecryptRequest(input *DecryptInput) (req *request.Request, output *DecryptOutput) { + op := &request.Operation{ + Name: opDecrypt, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DecryptInput{} + } + + req = c.newRequest(op, input, output) + output = &DecryptOutput{} + req.Data = output + return +} + +// Decrypts ciphertext. Ciphertext is plaintext that has been previously encrypted +// by using any of the following functions: +// +// GenerateDataKey +// +// GenerateDataKeyWithoutPlaintext +// +// Encrypt +// +// Note that if a caller has been granted access permissions to all keys +// (through, for example, IAM user policies that grant Decrypt permission on +// all resources), then ciphertext encrypted by using keys in other accounts +// where the key grants access to the caller can be decrypted. To remedy this, +// we recommend that you do not grant Decrypt access in an IAM user policy. +// Instead grant Decrypt access only in key policies. If you must grant Decrypt +// access in an IAM user policy, you should scope the resource to specific keys +// or to specific trusted accounts. +func (c *KMS) Decrypt(input *DecryptInput) (*DecryptOutput, error) { + req, out := c.DecryptRequest(input) + err := req.Send() + return out, err +} + +const opDeleteAlias = "DeleteAlias" + +// DeleteAliasRequest generates a "aws/request.Request" representing the +// client's request for the DeleteAlias operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteAlias method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteAliasRequest method. +// req, resp := client.DeleteAliasRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *KMS) DeleteAliasRequest(input *DeleteAliasInput) (req *request.Request, output *DeleteAliasOutput) { + op := &request.Operation{ + Name: opDeleteAlias, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteAliasInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteAliasOutput{} + req.Data = output + return +} + +// Deletes the specified alias. To map an alias to a different key, call UpdateAlias. +func (c *KMS) DeleteAlias(input *DeleteAliasInput) (*DeleteAliasOutput, error) { + req, out := c.DeleteAliasRequest(input) + err := req.Send() + return out, err +} + +const opDescribeKey = "DescribeKey" + +// DescribeKeyRequest generates a "aws/request.Request" representing the +// client's request for the DescribeKey operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeKey method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeKeyRequest method. +// req, resp := client.DescribeKeyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *KMS) DescribeKeyRequest(input *DescribeKeyInput) (req *request.Request, output *DescribeKeyOutput) { + op := &request.Operation{ + Name: opDescribeKey, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeKeyInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeKeyOutput{} + req.Data = output + return +} + +// Provides detailed information about the specified customer master key. +func (c *KMS) DescribeKey(input *DescribeKeyInput) (*DescribeKeyOutput, error) { + req, out := c.DescribeKeyRequest(input) + err := req.Send() + return out, err +} + +const opDisableKey = "DisableKey" + +// DisableKeyRequest generates a "aws/request.Request" representing the +// client's request for the DisableKey operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DisableKey method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DisableKeyRequest method. +// req, resp := client.DisableKeyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *KMS) DisableKeyRequest(input *DisableKeyInput) (req *request.Request, output *DisableKeyOutput) { + op := &request.Operation{ + Name: opDisableKey, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DisableKeyInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DisableKeyOutput{} + req.Data = output + return +} + +// Sets the state of a customer master key (CMK) to disabled, thereby preventing +// its use for cryptographic operations. For more information about how key +// state affects the use of a CMK, see How Key State Affects the Use of a Customer +// Master Key (http://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) +// in the AWS Key Management Service Developer Guide. +func (c *KMS) DisableKey(input *DisableKeyInput) (*DisableKeyOutput, error) { + req, out := c.DisableKeyRequest(input) + err := req.Send() + return out, err +} + +const opDisableKeyRotation = "DisableKeyRotation" + +// DisableKeyRotationRequest generates a "aws/request.Request" representing the +// client's request for the DisableKeyRotation operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DisableKeyRotation method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DisableKeyRotationRequest method. +// req, resp := client.DisableKeyRotationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *KMS) DisableKeyRotationRequest(input *DisableKeyRotationInput) (req *request.Request, output *DisableKeyRotationOutput) { + op := &request.Operation{ + Name: opDisableKeyRotation, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DisableKeyRotationInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DisableKeyRotationOutput{} + req.Data = output + return +} + +// Disables rotation of the specified key. +func (c *KMS) DisableKeyRotation(input *DisableKeyRotationInput) (*DisableKeyRotationOutput, error) { + req, out := c.DisableKeyRotationRequest(input) + err := req.Send() + return out, err +} + +const opEnableKey = "EnableKey" + +// EnableKeyRequest generates a "aws/request.Request" representing the +// client's request for the EnableKey operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the EnableKey method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the EnableKeyRequest method. +// req, resp := client.EnableKeyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *KMS) EnableKeyRequest(input *EnableKeyInput) (req *request.Request, output *EnableKeyOutput) { + op := &request.Operation{ + Name: opEnableKey, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &EnableKeyInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &EnableKeyOutput{} + req.Data = output + return +} + +// Marks a key as enabled, thereby permitting its use. +func (c *KMS) EnableKey(input *EnableKeyInput) (*EnableKeyOutput, error) { + req, out := c.EnableKeyRequest(input) + err := req.Send() + return out, err +} + +const opEnableKeyRotation = "EnableKeyRotation" + +// EnableKeyRotationRequest generates a "aws/request.Request" representing the +// client's request for the EnableKeyRotation operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the EnableKeyRotation method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the EnableKeyRotationRequest method. +// req, resp := client.EnableKeyRotationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *KMS) EnableKeyRotationRequest(input *EnableKeyRotationInput) (req *request.Request, output *EnableKeyRotationOutput) { + op := &request.Operation{ + Name: opEnableKeyRotation, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &EnableKeyRotationInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &EnableKeyRotationOutput{} + req.Data = output + return +} + +// Enables rotation of the specified customer master key. +func (c *KMS) EnableKeyRotation(input *EnableKeyRotationInput) (*EnableKeyRotationOutput, error) { + req, out := c.EnableKeyRotationRequest(input) + err := req.Send() + return out, err +} + +const opEncrypt = "Encrypt" + +// EncryptRequest generates a "aws/request.Request" representing the +// client's request for the Encrypt operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the Encrypt method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the EncryptRequest method. +// req, resp := client.EncryptRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *KMS) EncryptRequest(input *EncryptInput) (req *request.Request, output *EncryptOutput) { + op := &request.Operation{ + Name: opEncrypt, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &EncryptInput{} + } + + req = c.newRequest(op, input, output) + output = &EncryptOutput{} + req.Data = output + return +} + +// Encrypts plaintext into ciphertext by using a customer master key. The Encrypt +// function has two primary use cases: +// +// You can encrypt up to 4 KB of arbitrary data such as an RSA key, a database +// password, or other sensitive customer information. +// +// If you are moving encrypted data from one region to another, you can use +// this API to encrypt in the new region the plaintext data key that was used +// to encrypt the data in the original region. This provides you with an encrypted +// copy of the data key that can be decrypted in the new region and used there +// to decrypt the encrypted data. +// +// Unless you are moving encrypted data from one region to another, you don't +// use this function to encrypt a generated data key within a region. You retrieve +// data keys already encrypted by calling the GenerateDataKey or GenerateDataKeyWithoutPlaintext +// function. Data keys don't need to be encrypted again by calling Encrypt. +// +// If you want to encrypt data locally in your application, you can use the +// GenerateDataKey function to return a plaintext data encryption key and a +// copy of the key encrypted under the customer master key (CMK) of your choosing. +func (c *KMS) Encrypt(input *EncryptInput) (*EncryptOutput, error) { + req, out := c.EncryptRequest(input) + err := req.Send() + return out, err +} + +const opGenerateDataKey = "GenerateDataKey" + +// GenerateDataKeyRequest generates a "aws/request.Request" representing the +// client's request for the GenerateDataKey operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GenerateDataKey method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GenerateDataKeyRequest method. +// req, resp := client.GenerateDataKeyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *KMS) GenerateDataKeyRequest(input *GenerateDataKeyInput) (req *request.Request, output *GenerateDataKeyOutput) { + op := &request.Operation{ + Name: opGenerateDataKey, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GenerateDataKeyInput{} + } + + req = c.newRequest(op, input, output) + output = &GenerateDataKeyOutput{} + req.Data = output + return +} + +// Generates a data key that you can use in your application to locally encrypt +// data. This call returns a plaintext version of the key in the Plaintext field +// of the response object and an encrypted copy of the key in the CiphertextBlob +// field. The key is encrypted by using the master key specified by the KeyId +// field. To decrypt the encrypted key, pass it to the Decrypt API. +// +// We recommend that you use the following pattern to locally encrypt data: +// call the GenerateDataKey API, use the key returned in the Plaintext response +// field to locally encrypt data, and then erase the plaintext data key from +// memory. Store the encrypted data key (contained in the CiphertextBlob field) +// alongside of the locally encrypted data. +// +// You should not call the Encrypt function to re-encrypt your data keys within +// a region. GenerateDataKey always returns the data key encrypted and tied +// to the customer master key that will be used to decrypt it. There is no need +// to decrypt it twice. +// +// If you decide to use the optional EncryptionContext parameter, you must +// also store the context in full or at least store enough information along +// with the encrypted data to be able to reconstruct the context when submitting +// the ciphertext to the Decrypt API. It is a good practice to choose a context +// that you can reconstruct on the fly to better secure the ciphertext. For +// more information about how this parameter is used, see Encryption Context +// (http://docs.aws.amazon.com/kms/latest/developerguide/encrypt-context.html). +// +// To decrypt data, pass the encrypted data key to the Decrypt API. Decrypt +// uses the associated master key to decrypt the encrypted data key and returns +// it as plaintext. Use the plaintext data key to locally decrypt your data +// and then erase the key from memory. You must specify the encryption context, +// if any, that you specified when you generated the key. The encryption context +// is logged by CloudTrail, and you can use this log to help track the use of +// particular data. +func (c *KMS) GenerateDataKey(input *GenerateDataKeyInput) (*GenerateDataKeyOutput, error) { + req, out := c.GenerateDataKeyRequest(input) + err := req.Send() + return out, err +} + +const opGenerateDataKeyWithoutPlaintext = "GenerateDataKeyWithoutPlaintext" + +// GenerateDataKeyWithoutPlaintextRequest generates a "aws/request.Request" representing the +// client's request for the GenerateDataKeyWithoutPlaintext operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GenerateDataKeyWithoutPlaintext method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GenerateDataKeyWithoutPlaintextRequest method. +// req, resp := client.GenerateDataKeyWithoutPlaintextRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *KMS) GenerateDataKeyWithoutPlaintextRequest(input *GenerateDataKeyWithoutPlaintextInput) (req *request.Request, output *GenerateDataKeyWithoutPlaintextOutput) { + op := &request.Operation{ + Name: opGenerateDataKeyWithoutPlaintext, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GenerateDataKeyWithoutPlaintextInput{} + } + + req = c.newRequest(op, input, output) + output = &GenerateDataKeyWithoutPlaintextOutput{} + req.Data = output + return +} + +// Returns a data key encrypted by a customer master key without the plaintext +// copy of that key. Otherwise, this API functions exactly like GenerateDataKey. +// You can use this API to, for example, satisfy an audit requirement that an +// encrypted key be made available without exposing the plaintext copy of that +// key. +func (c *KMS) GenerateDataKeyWithoutPlaintext(input *GenerateDataKeyWithoutPlaintextInput) (*GenerateDataKeyWithoutPlaintextOutput, error) { + req, out := c.GenerateDataKeyWithoutPlaintextRequest(input) + err := req.Send() + return out, err +} + +const opGenerateRandom = "GenerateRandom" + +// GenerateRandomRequest generates a "aws/request.Request" representing the +// client's request for the GenerateRandom operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GenerateRandom method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GenerateRandomRequest method. +// req, resp := client.GenerateRandomRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *KMS) GenerateRandomRequest(input *GenerateRandomInput) (req *request.Request, output *GenerateRandomOutput) { + op := &request.Operation{ + Name: opGenerateRandom, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GenerateRandomInput{} + } + + req = c.newRequest(op, input, output) + output = &GenerateRandomOutput{} + req.Data = output + return +} + +// Generates an unpredictable byte string. +func (c *KMS) GenerateRandom(input *GenerateRandomInput) (*GenerateRandomOutput, error) { + req, out := c.GenerateRandomRequest(input) + err := req.Send() + return out, err +} + +const opGetKeyPolicy = "GetKeyPolicy" + +// GetKeyPolicyRequest generates a "aws/request.Request" representing the +// client's request for the GetKeyPolicy operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetKeyPolicy method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetKeyPolicyRequest method. +// req, resp := client.GetKeyPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *KMS) GetKeyPolicyRequest(input *GetKeyPolicyInput) (req *request.Request, output *GetKeyPolicyOutput) { + op := &request.Operation{ + Name: opGetKeyPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetKeyPolicyInput{} + } + + req = c.newRequest(op, input, output) + output = &GetKeyPolicyOutput{} + req.Data = output + return +} + +// Retrieves a policy attached to the specified key. +func (c *KMS) GetKeyPolicy(input *GetKeyPolicyInput) (*GetKeyPolicyOutput, error) { + req, out := c.GetKeyPolicyRequest(input) + err := req.Send() + return out, err +} + +const opGetKeyRotationStatus = "GetKeyRotationStatus" + +// GetKeyRotationStatusRequest generates a "aws/request.Request" representing the +// client's request for the GetKeyRotationStatus operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetKeyRotationStatus method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetKeyRotationStatusRequest method. +// req, resp := client.GetKeyRotationStatusRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *KMS) GetKeyRotationStatusRequest(input *GetKeyRotationStatusInput) (req *request.Request, output *GetKeyRotationStatusOutput) { + op := &request.Operation{ + Name: opGetKeyRotationStatus, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetKeyRotationStatusInput{} + } + + req = c.newRequest(op, input, output) + output = &GetKeyRotationStatusOutput{} + req.Data = output + return +} + +// Retrieves a Boolean value that indicates whether key rotation is enabled +// for the specified key. +func (c *KMS) GetKeyRotationStatus(input *GetKeyRotationStatusInput) (*GetKeyRotationStatusOutput, error) { + req, out := c.GetKeyRotationStatusRequest(input) + err := req.Send() + return out, err +} + +const opListAliases = "ListAliases" + +// ListAliasesRequest generates a "aws/request.Request" representing the +// client's request for the ListAliases operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListAliases method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListAliasesRequest method. +// req, resp := client.ListAliasesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *KMS) ListAliasesRequest(input *ListAliasesInput) (req *request.Request, output *ListAliasesOutput) { + op := &request.Operation{ + Name: opListAliases, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"NextMarker"}, + LimitToken: "Limit", + TruncationToken: "Truncated", + }, + } + + if input == nil { + input = &ListAliasesInput{} + } + + req = c.newRequest(op, input, output) + output = &ListAliasesOutput{} + req.Data = output + return +} + +// Lists all of the key aliases in the account. +func (c *KMS) ListAliases(input *ListAliasesInput) (*ListAliasesOutput, error) { + req, out := c.ListAliasesRequest(input) + err := req.Send() + return out, err +} + +// ListAliasesPages iterates over the pages of a ListAliases operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListAliases method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListAliases operation. +// pageNum := 0 +// err := client.ListAliasesPages(params, +// func(page *ListAliasesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *KMS) ListAliasesPages(input *ListAliasesInput, fn func(p *ListAliasesOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListAliasesRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListAliasesOutput), lastPage) + }) +} + +const opListGrants = "ListGrants" + +// ListGrantsRequest generates a "aws/request.Request" representing the +// client's request for the ListGrants operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListGrants method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListGrantsRequest method. +// req, resp := client.ListGrantsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *KMS) ListGrantsRequest(input *ListGrantsInput) (req *request.Request, output *ListGrantsResponse) { + op := &request.Operation{ + Name: opListGrants, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"NextMarker"}, + LimitToken: "Limit", + TruncationToken: "Truncated", + }, + } + + if input == nil { + input = &ListGrantsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListGrantsResponse{} + req.Data = output + return +} + +// List the grants for a specified key. +func (c *KMS) ListGrants(input *ListGrantsInput) (*ListGrantsResponse, error) { + req, out := c.ListGrantsRequest(input) + err := req.Send() + return out, err +} + +// ListGrantsPages iterates over the pages of a ListGrants operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListGrants method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListGrants operation. +// pageNum := 0 +// err := client.ListGrantsPages(params, +// func(page *ListGrantsResponse, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *KMS) ListGrantsPages(input *ListGrantsInput, fn func(p *ListGrantsResponse, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListGrantsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListGrantsResponse), lastPage) + }) +} + +const opListKeyPolicies = "ListKeyPolicies" + +// ListKeyPoliciesRequest generates a "aws/request.Request" representing the +// client's request for the ListKeyPolicies operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListKeyPolicies method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListKeyPoliciesRequest method. +// req, resp := client.ListKeyPoliciesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *KMS) ListKeyPoliciesRequest(input *ListKeyPoliciesInput) (req *request.Request, output *ListKeyPoliciesOutput) { + op := &request.Operation{ + Name: opListKeyPolicies, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"NextMarker"}, + LimitToken: "Limit", + TruncationToken: "Truncated", + }, + } + + if input == nil { + input = &ListKeyPoliciesInput{} + } + + req = c.newRequest(op, input, output) + output = &ListKeyPoliciesOutput{} + req.Data = output + return +} + +// Retrieves a list of policies attached to a key. +func (c *KMS) ListKeyPolicies(input *ListKeyPoliciesInput) (*ListKeyPoliciesOutput, error) { + req, out := c.ListKeyPoliciesRequest(input) + err := req.Send() + return out, err +} + +// ListKeyPoliciesPages iterates over the pages of a ListKeyPolicies operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListKeyPolicies method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListKeyPolicies operation. +// pageNum := 0 +// err := client.ListKeyPoliciesPages(params, +// func(page *ListKeyPoliciesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *KMS) ListKeyPoliciesPages(input *ListKeyPoliciesInput, fn func(p *ListKeyPoliciesOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListKeyPoliciesRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListKeyPoliciesOutput), lastPage) + }) +} + +const opListKeys = "ListKeys" + +// ListKeysRequest generates a "aws/request.Request" representing the +// client's request for the ListKeys operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListKeys method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListKeysRequest method. +// req, resp := client.ListKeysRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *KMS) ListKeysRequest(input *ListKeysInput) (req *request.Request, output *ListKeysOutput) { + op := &request.Operation{ + Name: opListKeys, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"NextMarker"}, + LimitToken: "Limit", + TruncationToken: "Truncated", + }, + } + + if input == nil { + input = &ListKeysInput{} + } + + req = c.newRequest(op, input, output) + output = &ListKeysOutput{} + req.Data = output + return +} + +// Lists the customer master keys. +func (c *KMS) ListKeys(input *ListKeysInput) (*ListKeysOutput, error) { + req, out := c.ListKeysRequest(input) + err := req.Send() + return out, err +} + +// ListKeysPages iterates over the pages of a ListKeys operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListKeys method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListKeys operation. +// pageNum := 0 +// err := client.ListKeysPages(params, +// func(page *ListKeysOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *KMS) ListKeysPages(input *ListKeysInput, fn func(p *ListKeysOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListKeysRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListKeysOutput), lastPage) + }) +} + +const opListRetirableGrants = "ListRetirableGrants" + +// ListRetirableGrantsRequest generates a "aws/request.Request" representing the +// client's request for the ListRetirableGrants operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListRetirableGrants method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListRetirableGrantsRequest method. +// req, resp := client.ListRetirableGrantsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *KMS) ListRetirableGrantsRequest(input *ListRetirableGrantsInput) (req *request.Request, output *ListGrantsResponse) { + op := &request.Operation{ + Name: opListRetirableGrants, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListRetirableGrantsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListGrantsResponse{} + req.Data = output + return +} + +// Returns a list of all grants for which the grant's RetiringPrincipal matches +// the one specified. +// +// A typical use is to list all grants that you are able to retire. To retire +// a grant, use RetireGrant. +func (c *KMS) ListRetirableGrants(input *ListRetirableGrantsInput) (*ListGrantsResponse, error) { + req, out := c.ListRetirableGrantsRequest(input) + err := req.Send() + return out, err +} + +const opPutKeyPolicy = "PutKeyPolicy" + +// PutKeyPolicyRequest generates a "aws/request.Request" representing the +// client's request for the PutKeyPolicy operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutKeyPolicy method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutKeyPolicyRequest method. +// req, resp := client.PutKeyPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *KMS) PutKeyPolicyRequest(input *PutKeyPolicyInput) (req *request.Request, output *PutKeyPolicyOutput) { + op := &request.Operation{ + Name: opPutKeyPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutKeyPolicyInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &PutKeyPolicyOutput{} + req.Data = output + return +} + +// Attaches a key policy to the specified customer master key (CMK). +// +// For more information about key policies, see Key Policies (http://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html) +// in the AWS Key Management Service Developer Guide. +func (c *KMS) PutKeyPolicy(input *PutKeyPolicyInput) (*PutKeyPolicyOutput, error) { + req, out := c.PutKeyPolicyRequest(input) + err := req.Send() + return out, err +} + +const opReEncrypt = "ReEncrypt" + +// ReEncryptRequest generates a "aws/request.Request" representing the +// client's request for the ReEncrypt operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ReEncrypt method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ReEncryptRequest method. +// req, resp := client.ReEncryptRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *KMS) ReEncryptRequest(input *ReEncryptInput) (req *request.Request, output *ReEncryptOutput) { + op := &request.Operation{ + Name: opReEncrypt, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ReEncryptInput{} + } + + req = c.newRequest(op, input, output) + output = &ReEncryptOutput{} + req.Data = output + return +} + +// Encrypts data on the server side with a new customer master key without exposing +// the plaintext of the data on the client side. The data is first decrypted +// and then encrypted. This operation can also be used to change the encryption +// context of a ciphertext. +// +// Unlike other actions, ReEncrypt is authorized twice - once as ReEncryptFrom +// on the source key and once as ReEncryptTo on the destination key. We therefore +// recommend that you include the "action":"kms:ReEncrypt*" statement in your +// key policies to permit re-encryption from or to the key. The statement is +// included automatically when you authorize use of the key through the console +// but must be included manually when you set a policy by using the PutKeyPolicy +// function. +func (c *KMS) ReEncrypt(input *ReEncryptInput) (*ReEncryptOutput, error) { + req, out := c.ReEncryptRequest(input) + err := req.Send() + return out, err +} + +const opRetireGrant = "RetireGrant" + +// RetireGrantRequest generates a "aws/request.Request" representing the +// client's request for the RetireGrant operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the RetireGrant method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the RetireGrantRequest method. +// req, resp := client.RetireGrantRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *KMS) RetireGrantRequest(input *RetireGrantInput) (req *request.Request, output *RetireGrantOutput) { + op := &request.Operation{ + Name: opRetireGrant, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RetireGrantInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &RetireGrantOutput{} + req.Data = output + return +} + +// Retires a grant. You can retire a grant when you're done using it to clean +// up. You should revoke a grant when you intend to actively deny operations +// that depend on it. The following are permitted to call this API: +// +// The account that created the grant +// +// The RetiringPrincipal, if present +// +// The GranteePrincipal, if RetireGrant is a grantee operation +// +// The grant to retire must be identified by its grant token or by a combination +// of the key ARN and the grant ID. A grant token is a unique variable-length +// base64-encoded string. A grant ID is a 64 character unique identifier of +// a grant. Both are returned by the CreateGrant function. +func (c *KMS) RetireGrant(input *RetireGrantInput) (*RetireGrantOutput, error) { + req, out := c.RetireGrantRequest(input) + err := req.Send() + return out, err +} + +const opRevokeGrant = "RevokeGrant" + +// RevokeGrantRequest generates a "aws/request.Request" representing the +// client's request for the RevokeGrant operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the RevokeGrant method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the RevokeGrantRequest method. +// req, resp := client.RevokeGrantRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *KMS) RevokeGrantRequest(input *RevokeGrantInput) (req *request.Request, output *RevokeGrantOutput) { + op := &request.Operation{ + Name: opRevokeGrant, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RevokeGrantInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &RevokeGrantOutput{} + req.Data = output + return +} + +// Revokes a grant. You can revoke a grant to actively deny operations that +// depend on it. +func (c *KMS) RevokeGrant(input *RevokeGrantInput) (*RevokeGrantOutput, error) { + req, out := c.RevokeGrantRequest(input) + err := req.Send() + return out, err +} + +const opScheduleKeyDeletion = "ScheduleKeyDeletion" + +// ScheduleKeyDeletionRequest generates a "aws/request.Request" representing the +// client's request for the ScheduleKeyDeletion operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ScheduleKeyDeletion method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ScheduleKeyDeletionRequest method. +// req, resp := client.ScheduleKeyDeletionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *KMS) ScheduleKeyDeletionRequest(input *ScheduleKeyDeletionInput) (req *request.Request, output *ScheduleKeyDeletionOutput) { + op := &request.Operation{ + Name: opScheduleKeyDeletion, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ScheduleKeyDeletionInput{} + } + + req = c.newRequest(op, input, output) + output = &ScheduleKeyDeletionOutput{} + req.Data = output + return +} + +// Schedules the deletion of a customer master key (CMK). You may provide a +// waiting period, specified in days, before deletion occurs. If you do not +// provide a waiting period, the default period of 30 days is used. When this +// operation is successful, the state of the CMK changes to PendingDeletion. +// Before the waiting period ends, you can use CancelKeyDeletion to cancel the +// deletion of the CMK. After the waiting period ends, AWS KMS deletes the CMK +// and all AWS KMS data associated with it, including all aliases that point +// to it. +// +// Deleting a CMK is a destructive and potentially dangerous operation. When +// a CMK is deleted, all data that was encrypted under the CMK is rendered unrecoverable. +// To restrict the use of a CMK without deleting it, use DisableKey. +// +// For more information about scheduling a CMK for deletion, see Deleting +// Customer Master Keys (http://docs.aws.amazon.com/kms/latest/developerguide/deleting-keys.html) +// in the AWS Key Management Service Developer Guide. +func (c *KMS) ScheduleKeyDeletion(input *ScheduleKeyDeletionInput) (*ScheduleKeyDeletionOutput, error) { + req, out := c.ScheduleKeyDeletionRequest(input) + err := req.Send() + return out, err +} + +const opUpdateAlias = "UpdateAlias" + +// UpdateAliasRequest generates a "aws/request.Request" representing the +// client's request for the UpdateAlias operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateAlias method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateAliasRequest method. +// req, resp := client.UpdateAliasRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *KMS) UpdateAliasRequest(input *UpdateAliasInput) (req *request.Request, output *UpdateAliasOutput) { + op := &request.Operation{ + Name: opUpdateAlias, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateAliasInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &UpdateAliasOutput{} + req.Data = output + return +} + +// Updates an alias to map it to a different key. +// +// An alias is not a property of a key. Therefore, an alias can be mapped to +// and unmapped from an existing key without changing the properties of the +// key. +// +// An alias name can contain only alphanumeric characters, forward slashes +// (/), underscores (_), and dashes (-). An alias must start with the word "alias" +// followed by a forward slash (alias/). An alias that begins with "aws" after +// the forward slash (alias/aws...) is reserved by Amazon Web Services (AWS). +// +// The alias and the key it is mapped to must be in the same AWS account and +// the same region. +func (c *KMS) UpdateAlias(input *UpdateAliasInput) (*UpdateAliasOutput, error) { + req, out := c.UpdateAliasRequest(input) + err := req.Send() + return out, err +} + +const opUpdateKeyDescription = "UpdateKeyDescription" + +// UpdateKeyDescriptionRequest generates a "aws/request.Request" representing the +// client's request for the UpdateKeyDescription operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateKeyDescription method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateKeyDescriptionRequest method. +// req, resp := client.UpdateKeyDescriptionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *KMS) UpdateKeyDescriptionRequest(input *UpdateKeyDescriptionInput) (req *request.Request, output *UpdateKeyDescriptionOutput) { + op := &request.Operation{ + Name: opUpdateKeyDescription, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateKeyDescriptionInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &UpdateKeyDescriptionOutput{} + req.Data = output + return +} + +// Updates the description of a key. +func (c *KMS) UpdateKeyDescription(input *UpdateKeyDescriptionInput) (*UpdateKeyDescriptionOutput, error) { + req, out := c.UpdateKeyDescriptionRequest(input) + err := req.Send() + return out, err +} + +// Contains information about an alias. +type AliasListEntry struct { + _ struct{} `type:"structure"` + + // String that contains the key ARN. + AliasArn *string `min:"20" type:"string"` + + // String that contains the alias. + AliasName *string `min:"1" type:"string"` + + // String that contains the key identifier pointed to by the alias. + TargetKeyId *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s AliasListEntry) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AliasListEntry) GoString() string { + return s.String() +} + +type CancelKeyDeletionInput struct { + _ struct{} `type:"structure"` + + // The unique identifier for the customer master key (CMK) for which to cancel + // deletion. + // + // To specify this value, use the unique key ID or the Amazon Resource Name + // (ARN) of the CMK. Examples: + // + // Unique key ID: 1234abcd-12ab-34cd-56ef-1234567890ab + // + // Key ARN: arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // + // To obtain the unique key ID and key ARN for a given CMK, use ListKeys + // or DescribeKey. + KeyId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CancelKeyDeletionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CancelKeyDeletionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CancelKeyDeletionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CancelKeyDeletionInput"} + if s.KeyId == nil { + invalidParams.Add(request.NewErrParamRequired("KeyId")) + } + if s.KeyId != nil && len(*s.KeyId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("KeyId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type CancelKeyDeletionOutput struct { + _ struct{} `type:"structure"` + + // The unique identifier of the master key for which deletion is canceled. + KeyId *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s CancelKeyDeletionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CancelKeyDeletionOutput) GoString() string { + return s.String() +} + +type CreateAliasInput struct { + _ struct{} `type:"structure"` + + // String that contains the display name. The name must start with the word + // "alias" followed by a forward slash (alias/). Aliases that begin with "alias/AWS" + // are reserved. + AliasName *string `min:"1" type:"string" required:"true"` + + // An identifier of the key for which you are creating the alias. This value + // cannot be another alias but can be a globally unique identifier or a fully + // specified ARN to a key. + // + // Key ARN Example - arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012 + // + // Globally Unique Key ID Example - 12345678-1234-1234-1234-123456789012 + TargetKeyId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateAliasInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateAliasInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateAliasInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateAliasInput"} + if s.AliasName == nil { + invalidParams.Add(request.NewErrParamRequired("AliasName")) + } + if s.AliasName != nil && len(*s.AliasName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AliasName", 1)) + } + if s.TargetKeyId == nil { + invalidParams.Add(request.NewErrParamRequired("TargetKeyId")) + } + if s.TargetKeyId != nil && len(*s.TargetKeyId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TargetKeyId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type CreateAliasOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s CreateAliasOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateAliasOutput) GoString() string { + return s.String() +} + +type CreateGrantInput struct { + _ struct{} `type:"structure"` + + // The conditions under which the operations permitted by the grant are allowed. + // + // You can use this value to allow the operations permitted by the grant only + // when a specified encryption context is present. For more information, see + // Encryption Context (http://docs.aws.amazon.com/kms/latest/developerguide/encrypt-context.html) + // in the AWS Key Management Service Developer Guide. + Constraints *GrantConstraints `type:"structure"` + + // A list of grant tokens. + // + // For more information, see Grant Tokens (http://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#grant_token) + // in the AWS Key Management Service Developer Guide. + GrantTokens []*string `type:"list"` + + // The principal that is given permission to perform the operations that the + // grant permits. + // + // To specify the principal, use the Amazon Resource Name (ARN) (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // of an AWS principal. Valid AWS principals include AWS accounts (root), IAM + // users, federated users, and assumed role users. For examples of the ARN syntax + // to use for specifying a principal, see AWS Identity and Access Management + // (IAM) (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#arn-syntax-iam) + // in the Example ARNs section of the AWS General Reference. + GranteePrincipal *string `min:"1" type:"string" required:"true"` + + // The unique identifier for the customer master key (CMK) that the grant applies + // to. + // + // To specify this value, use the globally unique key ID or the Amazon Resource + // Name (ARN) of the key. Examples: + // + // Globally unique key ID: 12345678-1234-1234-1234-123456789012 + // + // Key ARN: arn:aws:kms:us-west-2:123456789012:key/12345678-1234-1234-1234-123456789012 + KeyId *string `min:"1" type:"string" required:"true"` + + // A friendly name for identifying the grant. Use this value to prevent unintended + // creation of duplicate grants when retrying this request. + // + // When this value is absent, all CreateGrant requests result in a new grant + // with a unique GrantId even if all the supplied parameters are identical. + // This can result in unintended duplicates when you retry the CreateGrant request. + // + // When this value is present, you can retry a CreateGrant request with identical + // parameters; if the grant already exists, the original GrantId is returned + // without creating a new grant. Note that the returned grant token is unique + // with every CreateGrant request, even when a duplicate GrantId is returned. + // All grant tokens obtained in this way can be used interchangeably. + Name *string `min:"1" type:"string"` + + // A list of operations that the grant permits. The list can contain any combination + // of one or more of the following values: + // + // Decrypt + // + // Encrypt + // + // GenerateDataKey + // + // GenerateDataKeyWithoutPlaintext + // + // ReEncryptFrom (http://docs.aws.amazon.com/kms/latest/APIReference/API_ReEncrypt.html) + // + // ReEncryptTo (http://docs.aws.amazon.com/kms/latest/APIReference/API_ReEncrypt.html) + // + // CreateGrant + // + // RetireGrant + // + // DescribeKey + Operations []*string `type:"list"` + + // The principal that is given permission to retire the grant by using RetireGrant + // operation. + // + // To specify the principal, use the Amazon Resource Name (ARN) (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // of an AWS principal. Valid AWS principals include AWS accounts (root), IAM + // users, federated users, and assumed role users. For examples of the ARN syntax + // to use for specifying a principal, see AWS Identity and Access Management + // (IAM) (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#arn-syntax-iam) + // in the Example ARNs section of the AWS General Reference. + RetiringPrincipal *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s CreateGrantInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateGrantInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateGrantInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateGrantInput"} + if s.GranteePrincipal == nil { + invalidParams.Add(request.NewErrParamRequired("GranteePrincipal")) + } + if s.GranteePrincipal != nil && len(*s.GranteePrincipal) < 1 { + invalidParams.Add(request.NewErrParamMinLen("GranteePrincipal", 1)) + } + if s.KeyId == nil { + invalidParams.Add(request.NewErrParamRequired("KeyId")) + } + if s.KeyId != nil && len(*s.KeyId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("KeyId", 1)) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + if s.RetiringPrincipal != nil && len(*s.RetiringPrincipal) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RetiringPrincipal", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type CreateGrantOutput struct { + _ struct{} `type:"structure"` + + // The unique identifier for the grant. + // + // You can use the GrantId in a subsequent RetireGrant or RevokeGrant operation. + GrantId *string `min:"1" type:"string"` + + // The grant token. + // + // For more information, see Grant Tokens (http://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#grant_token) + // in the AWS Key Management Service Developer Guide. + GrantToken *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s CreateGrantOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateGrantOutput) GoString() string { + return s.String() +} + +type CreateKeyInput struct { + _ struct{} `type:"structure"` + + // A flag to indicate whether to bypass the key policy lockout safety check. + // + // Setting this value to true increases the likelihood that the CMK becomes + // unmanageable. Do not set this value to true indiscriminately. + // + // For more information, refer to the scenario in the Default Key Policy (http://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html#key-policy-default-allow-root-enable-iam) + // section in the AWS Key Management Service Developer Guide. + // + // Use this parameter only when you include a policy in the request and you + // intend to prevent the principal making the request from making a subsequent + // PutKeyPolicy request on the CMK. + // + // The default value is false. + BypassPolicyLockoutSafetyCheck *bool `type:"boolean"` + + // A description of the CMK. + // + // Use a description that helps you decide whether the CMK is appropriate for + // a task. + Description *string `type:"string"` + + // The intended use of the CMK. + // + // You can use CMKs only for symmetric encryption and decryption. + KeyUsage *string `type:"string" enum:"KeyUsageType"` + + // The key policy to attach to the CMK. + // + // If you specify a key policy, it must meet the following criteria: + // + // It must allow the principal making the CreateKey request to make a subsequent + // PutKeyPolicy request on the CMK. This reduces the likelihood that the CMK + // becomes unmanageable. For more information, refer to the scenario in the + // Default Key Policy (http://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html#key-policy-default-allow-root-enable-iam) + // section in the AWS Key Management Service Developer Guide. + // + // The principal(s) specified in the key policy must exist and be visible + // to AWS KMS. When you create a new AWS principal (for example, an IAM user + // or role), you might need to enforce a delay before specifying the new principal + // in a key policy because the new principal might not immediately be visible + // to AWS KMS. For more information, see Changes that I make are not always + // immediately visible (http://docs.aws.amazon.com/IAM/latest/UserGuide/troubleshoot_general.html#troubleshoot_general_eventual-consistency) + // in the IAM User Guide. + // + // If you do not specify a policy, AWS KMS attaches a default key policy + // to the CMK. For more information, see Default Key Policy (http://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html#key-policy-default) + // in the AWS Key Management Service Developer Guide. + // + // The policy size limit is 32 KiB (32768 bytes). + Policy *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s CreateKeyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateKeyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateKeyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateKeyInput"} + if s.Policy != nil && len(*s.Policy) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Policy", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type CreateKeyOutput struct { + _ struct{} `type:"structure"` + + // Metadata associated with the CMK. + KeyMetadata *KeyMetadata `type:"structure"` +} + +// String returns the string representation +func (s CreateKeyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateKeyOutput) GoString() string { + return s.String() +} + +type DecryptInput struct { + _ struct{} `type:"structure"` + + // Ciphertext to be decrypted. The blob includes metadata. + // + // CiphertextBlob is automatically base64 encoded/decoded by the SDK. + CiphertextBlob []byte `min:"1" type:"blob" required:"true"` + + // The encryption context. If this was specified in the Encrypt function, it + // must be specified here or the decryption operation will fail. For more information, + // see Encryption Context (http://docs.aws.amazon.com/kms/latest/developerguide/encrypt-context.html). + EncryptionContext map[string]*string `type:"map"` + + // A list of grant tokens. + // + // For more information, see Grant Tokens (http://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#grant_token) + // in the AWS Key Management Service Developer Guide. + GrantTokens []*string `type:"list"` +} + +// String returns the string representation +func (s DecryptInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DecryptInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DecryptInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DecryptInput"} + if s.CiphertextBlob == nil { + invalidParams.Add(request.NewErrParamRequired("CiphertextBlob")) + } + if s.CiphertextBlob != nil && len(s.CiphertextBlob) < 1 { + invalidParams.Add(request.NewErrParamMinLen("CiphertextBlob", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DecryptOutput struct { + _ struct{} `type:"structure"` + + // ARN of the key used to perform the decryption. This value is returned if + // no errors are encountered during the operation. + KeyId *string `min:"1" type:"string"` + + // Decrypted plaintext data. This value may not be returned if the customer + // master key is not available or if you didn't have permission to use it. + // + // Plaintext is automatically base64 encoded/decoded by the SDK. + Plaintext []byte `min:"1" type:"blob"` +} + +// String returns the string representation +func (s DecryptOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DecryptOutput) GoString() string { + return s.String() +} + +type DeleteAliasInput struct { + _ struct{} `type:"structure"` + + // The alias to be deleted. The name must start with the word "alias" followed + // by a forward slash (alias/). Aliases that begin with "alias/AWS" are reserved. + AliasName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteAliasInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteAliasInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteAliasInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteAliasInput"} + if s.AliasName == nil { + invalidParams.Add(request.NewErrParamRequired("AliasName")) + } + if s.AliasName != nil && len(*s.AliasName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AliasName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteAliasOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteAliasOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteAliasOutput) GoString() string { + return s.String() +} + +type DescribeKeyInput struct { + _ struct{} `type:"structure"` + + // A list of grant tokens. + // + // For more information, see Grant Tokens (http://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#grant_token) + // in the AWS Key Management Service Developer Guide. + GrantTokens []*string `type:"list"` + + // A unique identifier for the customer master key. This value can be a globally + // unique identifier, a fully specified ARN to either an alias or a key, or + // an alias name prefixed by "alias/". + // + // Key ARN Example - arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012 + // + // Alias ARN Example - arn:aws:kms:us-east-1:123456789012:alias/MyAliasName + // + // Globally Unique Key ID Example - 12345678-1234-1234-1234-123456789012 + // + // Alias Name Example - alias/MyAliasName + KeyId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeKeyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeKeyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeKeyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeKeyInput"} + if s.KeyId == nil { + invalidParams.Add(request.NewErrParamRequired("KeyId")) + } + if s.KeyId != nil && len(*s.KeyId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("KeyId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DescribeKeyOutput struct { + _ struct{} `type:"structure"` + + // Metadata associated with the key. + KeyMetadata *KeyMetadata `type:"structure"` +} + +// String returns the string representation +func (s DescribeKeyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeKeyOutput) GoString() string { + return s.String() +} + +type DisableKeyInput struct { + _ struct{} `type:"structure"` + + // A unique identifier for the CMK. + // + // Use the CMK's unique identifier or its Amazon Resource Name (ARN). For example: + // + // Unique ID: 1234abcd-12ab-34cd-56ef-1234567890ab + // + // ARN: arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + KeyId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DisableKeyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisableKeyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DisableKeyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DisableKeyInput"} + if s.KeyId == nil { + invalidParams.Add(request.NewErrParamRequired("KeyId")) + } + if s.KeyId != nil && len(*s.KeyId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("KeyId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DisableKeyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DisableKeyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisableKeyOutput) GoString() string { + return s.String() +} + +type DisableKeyRotationInput struct { + _ struct{} `type:"structure"` + + // A unique identifier for the customer master key. This value can be a globally + // unique identifier or the fully specified ARN to a key. + // + // Key ARN Example - arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012 + // + // Globally Unique Key ID Example - 12345678-1234-1234-1234-123456789012 + KeyId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DisableKeyRotationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisableKeyRotationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DisableKeyRotationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DisableKeyRotationInput"} + if s.KeyId == nil { + invalidParams.Add(request.NewErrParamRequired("KeyId")) + } + if s.KeyId != nil && len(*s.KeyId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("KeyId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DisableKeyRotationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DisableKeyRotationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisableKeyRotationOutput) GoString() string { + return s.String() +} + +type EnableKeyInput struct { + _ struct{} `type:"structure"` + + // A unique identifier for the customer master key. This value can be a globally + // unique identifier or the fully specified ARN to a key. + // + // Key ARN Example - arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012 + // + // Globally Unique Key ID Example - 12345678-1234-1234-1234-123456789012 + KeyId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s EnableKeyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnableKeyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *EnableKeyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "EnableKeyInput"} + if s.KeyId == nil { + invalidParams.Add(request.NewErrParamRequired("KeyId")) + } + if s.KeyId != nil && len(*s.KeyId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("KeyId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type EnableKeyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s EnableKeyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnableKeyOutput) GoString() string { + return s.String() +} + +type EnableKeyRotationInput struct { + _ struct{} `type:"structure"` + + // A unique identifier for the customer master key. This value can be a globally + // unique identifier or the fully specified ARN to a key. + // + // Key ARN Example - arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012 + // + // Globally Unique Key ID Example - 12345678-1234-1234-1234-123456789012 + KeyId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s EnableKeyRotationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnableKeyRotationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *EnableKeyRotationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "EnableKeyRotationInput"} + if s.KeyId == nil { + invalidParams.Add(request.NewErrParamRequired("KeyId")) + } + if s.KeyId != nil && len(*s.KeyId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("KeyId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type EnableKeyRotationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s EnableKeyRotationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnableKeyRotationOutput) GoString() string { + return s.String() +} + +type EncryptInput struct { + _ struct{} `type:"structure"` + + // Name/value pair that specifies the encryption context to be used for authenticated + // encryption. If used here, the same value must be supplied to the Decrypt + // API or decryption will fail. For more information, see Encryption Context + // (http://docs.aws.amazon.com/kms/latest/developerguide/encrypt-context.html). + EncryptionContext map[string]*string `type:"map"` + + // A list of grant tokens. + // + // For more information, see Grant Tokens (http://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#grant_token) + // in the AWS Key Management Service Developer Guide. + GrantTokens []*string `type:"list"` + + // A unique identifier for the customer master key. This value can be a globally + // unique identifier, a fully specified ARN to either an alias or a key, or + // an alias name prefixed by "alias/". + // + // Key ARN Example - arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012 + // + // Alias ARN Example - arn:aws:kms:us-east-1:123456789012:alias/MyAliasName + // + // Globally Unique Key ID Example - 12345678-1234-1234-1234-123456789012 + // + // Alias Name Example - alias/MyAliasName + KeyId *string `min:"1" type:"string" required:"true"` + + // Data to be encrypted. + // + // Plaintext is automatically base64 encoded/decoded by the SDK. + Plaintext []byte `min:"1" type:"blob" required:"true"` +} + +// String returns the string representation +func (s EncryptInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EncryptInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *EncryptInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "EncryptInput"} + if s.KeyId == nil { + invalidParams.Add(request.NewErrParamRequired("KeyId")) + } + if s.KeyId != nil && len(*s.KeyId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("KeyId", 1)) + } + if s.Plaintext == nil { + invalidParams.Add(request.NewErrParamRequired("Plaintext")) + } + if s.Plaintext != nil && len(s.Plaintext) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Plaintext", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type EncryptOutput struct { + _ struct{} `type:"structure"` + + // The encrypted plaintext. If you are using the CLI, the value is Base64 encoded. + // Otherwise, it is not encoded. + // + // CiphertextBlob is automatically base64 encoded/decoded by the SDK. + CiphertextBlob []byte `min:"1" type:"blob"` + + // The ID of the key used during encryption. + KeyId *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s EncryptOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EncryptOutput) GoString() string { + return s.String() +} + +type GenerateDataKeyInput struct { + _ struct{} `type:"structure"` + + // Name/value pair that contains additional data to be authenticated during + // the encryption and decryption processes that use the key. This value is logged + // by AWS CloudTrail to provide context around the data encrypted by the key. + EncryptionContext map[string]*string `type:"map"` + + // A list of grant tokens. + // + // For more information, see Grant Tokens (http://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#grant_token) + // in the AWS Key Management Service Developer Guide. + GrantTokens []*string `type:"list"` + + // A unique identifier for the customer master key. This value can be a globally + // unique identifier, a fully specified ARN to either an alias or a key, or + // an alias name prefixed by "alias/". + // + // Key ARN Example - arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012 + // + // Alias ARN Example - arn:aws:kms:us-east-1:123456789012:alias/MyAliasName + // + // Globally Unique Key ID Example - 12345678-1234-1234-1234-123456789012 + // + // Alias Name Example - alias/MyAliasName + KeyId *string `min:"1" type:"string" required:"true"` + + // Value that identifies the encryption algorithm and key size to generate a + // data key for. Currently this can be AES_128 or AES_256. + KeySpec *string `type:"string" enum:"DataKeySpec"` + + // Integer that contains the number of bytes to generate. Common values are + // 128, 256, 512, and 1024. 1024 is the current limit. We recommend that you + // use the KeySpec parameter instead. + NumberOfBytes *int64 `min:"1" type:"integer"` +} + +// String returns the string representation +func (s GenerateDataKeyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GenerateDataKeyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GenerateDataKeyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GenerateDataKeyInput"} + if s.KeyId == nil { + invalidParams.Add(request.NewErrParamRequired("KeyId")) + } + if s.KeyId != nil && len(*s.KeyId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("KeyId", 1)) + } + if s.NumberOfBytes != nil && *s.NumberOfBytes < 1 { + invalidParams.Add(request.NewErrParamMinValue("NumberOfBytes", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type GenerateDataKeyOutput struct { + _ struct{} `type:"structure"` + + // Ciphertext that contains the encrypted data key. You must store the blob + // and enough information to reconstruct the encryption context so that the + // data encrypted by using the key can later be decrypted. You must provide + // both the ciphertext blob and the encryption context to the Decrypt API to + // recover the plaintext data key and decrypt the object. + // + // If you are using the CLI, the value is Base64 encoded. Otherwise, it is + // not encoded. + // + // CiphertextBlob is automatically base64 encoded/decoded by the SDK. + CiphertextBlob []byte `min:"1" type:"blob"` + + // System generated unique identifier of the key to be used to decrypt the encrypted + // copy of the data key. + KeyId *string `min:"1" type:"string"` + + // Plaintext that contains the data key. Use this for encryption and decryption + // and then remove it from memory as soon as possible. + // + // Plaintext is automatically base64 encoded/decoded by the SDK. + Plaintext []byte `min:"1" type:"blob"` +} + +// String returns the string representation +func (s GenerateDataKeyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GenerateDataKeyOutput) GoString() string { + return s.String() +} + +type GenerateDataKeyWithoutPlaintextInput struct { + _ struct{} `type:"structure"` + + // Name:value pair that contains additional data to be authenticated during + // the encryption and decryption processes. + EncryptionContext map[string]*string `type:"map"` + + // A list of grant tokens. + // + // For more information, see Grant Tokens (http://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#grant_token) + // in the AWS Key Management Service Developer Guide. + GrantTokens []*string `type:"list"` + + // A unique identifier for the customer master key. This value can be a globally + // unique identifier, a fully specified ARN to either an alias or a key, or + // an alias name prefixed by "alias/". + // + // Key ARN Example - arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012 + // + // Alias ARN Example - arn:aws:kms:us-east-1:123456789012:alias/MyAliasName + // + // Globally Unique Key ID Example - 12345678-1234-1234-1234-123456789012 + // + // Alias Name Example - alias/MyAliasName + KeyId *string `min:"1" type:"string" required:"true"` + + // Value that identifies the encryption algorithm and key size. Currently this + // can be AES_128 or AES_256. + KeySpec *string `type:"string" enum:"DataKeySpec"` + + // Integer that contains the number of bytes to generate. Common values are + // 128, 256, 512, 1024 and so on. We recommend that you use the KeySpec parameter + // instead. + NumberOfBytes *int64 `min:"1" type:"integer"` +} + +// String returns the string representation +func (s GenerateDataKeyWithoutPlaintextInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GenerateDataKeyWithoutPlaintextInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GenerateDataKeyWithoutPlaintextInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GenerateDataKeyWithoutPlaintextInput"} + if s.KeyId == nil { + invalidParams.Add(request.NewErrParamRequired("KeyId")) + } + if s.KeyId != nil && len(*s.KeyId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("KeyId", 1)) + } + if s.NumberOfBytes != nil && *s.NumberOfBytes < 1 { + invalidParams.Add(request.NewErrParamMinValue("NumberOfBytes", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type GenerateDataKeyWithoutPlaintextOutput struct { + _ struct{} `type:"structure"` + + // Ciphertext that contains the wrapped data key. You must store the blob and + // encryption context so that the key can be used in a future decrypt operation. + // + // If you are using the CLI, the value is Base64 encoded. Otherwise, it is + // not encoded. + // + // CiphertextBlob is automatically base64 encoded/decoded by the SDK. + CiphertextBlob []byte `min:"1" type:"blob"` + + // System generated unique identifier of the key to be used to decrypt the encrypted + // copy of the data key. + KeyId *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s GenerateDataKeyWithoutPlaintextOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GenerateDataKeyWithoutPlaintextOutput) GoString() string { + return s.String() +} + +type GenerateRandomInput struct { + _ struct{} `type:"structure"` + + // Integer that contains the number of bytes to generate. Common values are + // 128, 256, 512, 1024 and so on. The current limit is 1024 bytes. + NumberOfBytes *int64 `min:"1" type:"integer"` +} + +// String returns the string representation +func (s GenerateRandomInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GenerateRandomInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GenerateRandomInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GenerateRandomInput"} + if s.NumberOfBytes != nil && *s.NumberOfBytes < 1 { + invalidParams.Add(request.NewErrParamMinValue("NumberOfBytes", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type GenerateRandomOutput struct { + _ struct{} `type:"structure"` + + // Plaintext that contains the unpredictable byte string. + // + // Plaintext is automatically base64 encoded/decoded by the SDK. + Plaintext []byte `min:"1" type:"blob"` +} + +// String returns the string representation +func (s GenerateRandomOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GenerateRandomOutput) GoString() string { + return s.String() +} + +type GetKeyPolicyInput struct { + _ struct{} `type:"structure"` + + // A unique identifier for the customer master key. This value can be a globally + // unique identifier or the fully specified ARN to a key. + // + // Key ARN Example - arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012 + // + // Globally Unique Key ID Example - 12345678-1234-1234-1234-123456789012 + KeyId *string `min:"1" type:"string" required:"true"` + + // String that contains the name of the policy. Currently, this must be "default". + // Policy names can be discovered by calling ListKeyPolicies. + PolicyName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetKeyPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetKeyPolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetKeyPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetKeyPolicyInput"} + if s.KeyId == nil { + invalidParams.Add(request.NewErrParamRequired("KeyId")) + } + if s.KeyId != nil && len(*s.KeyId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("KeyId", 1)) + } + if s.PolicyName == nil { + invalidParams.Add(request.NewErrParamRequired("PolicyName")) + } + if s.PolicyName != nil && len(*s.PolicyName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("PolicyName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type GetKeyPolicyOutput struct { + _ struct{} `type:"structure"` + + // A policy document in JSON format. + Policy *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s GetKeyPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetKeyPolicyOutput) GoString() string { + return s.String() +} + +type GetKeyRotationStatusInput struct { + _ struct{} `type:"structure"` + + // A unique identifier for the customer master key. This value can be a globally + // unique identifier or the fully specified ARN to a key. + // + // Key ARN Example - arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012 + // + // Globally Unique Key ID Example - 12345678-1234-1234-1234-123456789012 + KeyId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetKeyRotationStatusInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetKeyRotationStatusInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetKeyRotationStatusInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetKeyRotationStatusInput"} + if s.KeyId == nil { + invalidParams.Add(request.NewErrParamRequired("KeyId")) + } + if s.KeyId != nil && len(*s.KeyId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("KeyId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type GetKeyRotationStatusOutput struct { + _ struct{} `type:"structure"` + + // A Boolean value that specifies whether key rotation is enabled. + KeyRotationEnabled *bool `type:"boolean"` +} + +// String returns the string representation +func (s GetKeyRotationStatusOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetKeyRotationStatusOutput) GoString() string { + return s.String() +} + +// A structure for specifying the conditions under which the operations permitted +// by the grant are allowed. +// +// You can use this structure to allow the operations permitted by the grant +// only when a specified encryption context is present. For more information +// about encryption context, see Encryption Context (http://docs.aws.amazon.com/kms/latest/developerguide/encrypt-context.html) +// in the AWS Key Management Service Developer Guide. +type GrantConstraints struct { + _ struct{} `type:"structure"` + + // Contains a list of key-value pairs that must be present in the encryption + // context of a subsequent operation permitted by the grant. When a subsequent + // operation permitted by the grant includes an encryption context that matches + // this list, the grant allows the operation. Otherwise, the operation is not + // allowed. + EncryptionContextEquals map[string]*string `type:"map"` + + // Contains a list of key-value pairs, a subset of which must be present in + // the encryption context of a subsequent operation permitted by the grant. + // When a subsequent operation permitted by the grant includes an encryption + // context that matches this list or is a subset of this list, the grant allows + // the operation. Otherwise, the operation is not allowed. + EncryptionContextSubset map[string]*string `type:"map"` +} + +// String returns the string representation +func (s GrantConstraints) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GrantConstraints) GoString() string { + return s.String() +} + +// Contains information about an entry in a list of grants. +type GrantListEntry struct { + _ struct{} `type:"structure"` + + // The conditions under which the grant's operations are allowed. + Constraints *GrantConstraints `type:"structure"` + + // The date and time when the grant was created. + CreationDate *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The unique identifier for the grant. + GrantId *string `min:"1" type:"string"` + + // The principal that receives the grant's permissions. + GranteePrincipal *string `min:"1" type:"string"` + + // The AWS account under which the grant was issued. + IssuingAccount *string `min:"1" type:"string"` + + // The unique identifier for the customer master key (CMK) to which the grant + // applies. + KeyId *string `min:"1" type:"string"` + + // The friendly name that identifies the grant. If a name was provided in the + // CreateGrant request, that name is returned. Otherwise this value is null. + Name *string `min:"1" type:"string"` + + // The list of operations permitted by the grant. + Operations []*string `type:"list"` + + // The principal that can retire the grant. + RetiringPrincipal *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s GrantListEntry) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GrantListEntry) GoString() string { + return s.String() +} + +// Contains information about each entry in the key list. +type KeyListEntry struct { + _ struct{} `type:"structure"` + + // ARN of the key. + KeyArn *string `min:"20" type:"string"` + + // Unique identifier of the key. + KeyId *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s KeyListEntry) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s KeyListEntry) GoString() string { + return s.String() +} + +// Contains metadata about a customer master key (CMK). +// +// This data type is used as a response element for the CreateKey and DescribeKey +// operations. +type KeyMetadata struct { + _ struct{} `type:"structure"` + + // The twelve-digit account ID of the AWS account that owns the key. + AWSAccountId *string `type:"string"` + + // The Amazon Resource Name (ARN) of the key. For examples, see AWS Key Management + // Service (AWS KMS) (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#arn-syntax-kms) + // in the Example ARNs section of the AWS General Reference. + Arn *string `min:"20" type:"string"` + + // The date and time when the key was created. + CreationDate *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The date and time after which AWS KMS deletes the customer master key (CMK). + // This value is present only when KeyState is PendingDeletion, otherwise this + // value is null. + DeletionDate *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The friendly description of the key. + Description *string `type:"string"` + + // Specifies whether the key is enabled. When KeyState is Enabled this value + // is true, otherwise it is false. + Enabled *bool `type:"boolean"` + + // The globally unique identifier for the key. + KeyId *string `min:"1" type:"string" required:"true"` + + // The state of the customer master key (CMK). + // + // For more information about how key state affects the use of a CMK, see How + // Key State Affects the Use of a Customer Master Key (http://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) + // in the AWS Key Management Service Developer Guide. + KeyState *string `type:"string" enum:"KeyState"` + + // The cryptographic operations for which you can use the key. Currently the + // only allowed value is ENCRYPT_DECRYPT, which means you can use the key for + // the Encrypt and Decrypt operations. + KeyUsage *string `type:"string" enum:"KeyUsageType"` +} + +// String returns the string representation +func (s KeyMetadata) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s KeyMetadata) GoString() string { + return s.String() +} + +type ListAliasesInput struct { + _ struct{} `type:"structure"` + + // When paginating results, specify the maximum number of items to return in + // the response. If additional items exist beyond the number you specify, the + // Truncated element in the response is set to true. + // + // This value is optional. If you include a value, it must be between 1 and + // 100, inclusive. If you do not include a value, it defaults to 50. + Limit *int64 `min:"1" type:"integer"` + + // Use this parameter only when paginating results and only in a subsequent + // request after you receive a response with truncated results. Set it to the + // value of NextMarker from the response you just received. + Marker *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListAliasesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListAliasesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListAliasesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListAliasesInput"} + if s.Limit != nil && *s.Limit < 1 { + invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) + } + if s.Marker != nil && len(*s.Marker) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Marker", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type ListAliasesOutput struct { + _ struct{} `type:"structure"` + + // A list of key aliases in the user's account. + Aliases []*AliasListEntry `type:"list"` + + // When Truncated is true, this value is present and contains the value to use + // for the Marker parameter in a subsequent pagination request. + NextMarker *string `min:"1" type:"string"` + + // A flag that indicates whether there are more items in the list. If your results + // were truncated, you can use the Marker parameter to make a subsequent pagination + // request to retrieve more items in the list. + Truncated *bool `type:"boolean"` +} + +// String returns the string representation +func (s ListAliasesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListAliasesOutput) GoString() string { + return s.String() +} + +type ListGrantsInput struct { + _ struct{} `type:"structure"` + + // A unique identifier for the customer master key. This value can be a globally + // unique identifier or the fully specified ARN to a key. + // + // Key ARN Example - arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012 + // + // Globally Unique Key ID Example - 12345678-1234-1234-1234-123456789012 + KeyId *string `min:"1" type:"string" required:"true"` + + // When paginating results, specify the maximum number of items to return in + // the response. If additional items exist beyond the number you specify, the + // Truncated element in the response is set to true. + // + // This value is optional. If you include a value, it must be between 1 and + // 100, inclusive. If you do not include a value, it defaults to 50. + Limit *int64 `min:"1" type:"integer"` + + // Use this parameter only when paginating results and only in a subsequent + // request after you receive a response with truncated results. Set it to the + // value of NextMarker from the response you just received. + Marker *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListGrantsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListGrantsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListGrantsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListGrantsInput"} + if s.KeyId == nil { + invalidParams.Add(request.NewErrParamRequired("KeyId")) + } + if s.KeyId != nil && len(*s.KeyId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("KeyId", 1)) + } + if s.Limit != nil && *s.Limit < 1 { + invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) + } + if s.Marker != nil && len(*s.Marker) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Marker", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type ListGrantsResponse struct { + _ struct{} `type:"structure"` + + // A list of grants. + Grants []*GrantListEntry `type:"list"` + + // When Truncated is true, this value is present and contains the value to use + // for the Marker parameter in a subsequent pagination request. + NextMarker *string `min:"1" type:"string"` + + // A flag that indicates whether there are more items in the list. If your results + // were truncated, you can use the Marker parameter to make a subsequent pagination + // request to retrieve more items in the list. + Truncated *bool `type:"boolean"` +} + +// String returns the string representation +func (s ListGrantsResponse) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListGrantsResponse) GoString() string { + return s.String() +} + +type ListKeyPoliciesInput struct { + _ struct{} `type:"structure"` + + // A unique identifier for the customer master key. This value can be a globally + // unique identifier, a fully specified ARN to either an alias or a key, or + // an alias name prefixed by "alias/". + // + // Key ARN Example - arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012 + // + // Alias ARN Example - arn:aws:kms:us-east-1:123456789012:alias/MyAliasName + // + // Globally Unique Key ID Example - 12345678-1234-1234-1234-123456789012 + // + // Alias Name Example - alias/MyAliasName + KeyId *string `min:"1" type:"string" required:"true"` + + // When paginating results, specify the maximum number of items to return in + // the response. If additional items exist beyond the number you specify, the + // Truncated element in the response is set to true. + // + // This value is optional. If you include a value, it must be between 1 and + // 1000, inclusive. If you do not include a value, it defaults to 100. + // + // Currently only 1 policy can be attached to a key. + Limit *int64 `min:"1" type:"integer"` + + // Use this parameter only when paginating results and only in a subsequent + // request after you receive a response with truncated results. Set it to the + // value of NextMarker from the response you just received. + Marker *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListKeyPoliciesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListKeyPoliciesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListKeyPoliciesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListKeyPoliciesInput"} + if s.KeyId == nil { + invalidParams.Add(request.NewErrParamRequired("KeyId")) + } + if s.KeyId != nil && len(*s.KeyId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("KeyId", 1)) + } + if s.Limit != nil && *s.Limit < 1 { + invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) + } + if s.Marker != nil && len(*s.Marker) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Marker", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type ListKeyPoliciesOutput struct { + _ struct{} `type:"structure"` + + // When Truncated is true, this value is present and contains the value to use + // for the Marker parameter in a subsequent pagination request. + NextMarker *string `min:"1" type:"string"` + + // A list of policy names. Currently, there is only one policy and it is named + // "Default". + PolicyNames []*string `type:"list"` + + // A flag that indicates whether there are more items in the list. If your results + // were truncated, you can use the Marker parameter to make a subsequent pagination + // request to retrieve more items in the list. + Truncated *bool `type:"boolean"` +} + +// String returns the string representation +func (s ListKeyPoliciesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListKeyPoliciesOutput) GoString() string { + return s.String() +} + +type ListKeysInput struct { + _ struct{} `type:"structure"` + + // When paginating results, specify the maximum number of items to return in + // the response. If additional items exist beyond the number you specify, the + // Truncated element in the response is set to true. + // + // This value is optional. If you include a value, it must be between 1 and + // 1000, inclusive. If you do not include a value, it defaults to 100. + Limit *int64 `min:"1" type:"integer"` + + // Use this parameter only when paginating results and only in a subsequent + // request after you receive a response with truncated results. Set it to the + // value of NextMarker from the response you just received. + Marker *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListKeysInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListKeysInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListKeysInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListKeysInput"} + if s.Limit != nil && *s.Limit < 1 { + invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) + } + if s.Marker != nil && len(*s.Marker) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Marker", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type ListKeysOutput struct { + _ struct{} `type:"structure"` + + // A list of keys. + Keys []*KeyListEntry `type:"list"` + + // When Truncated is true, this value is present and contains the value to use + // for the Marker parameter in a subsequent pagination request. + NextMarker *string `min:"1" type:"string"` + + // A flag that indicates whether there are more items in the list. If your results + // were truncated, you can use the Marker parameter to make a subsequent pagination + // request to retrieve more items in the list. + Truncated *bool `type:"boolean"` +} + +// String returns the string representation +func (s ListKeysOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListKeysOutput) GoString() string { + return s.String() +} + +type ListRetirableGrantsInput struct { + _ struct{} `type:"structure"` + + // When paginating results, specify the maximum number of items to return in + // the response. If additional items exist beyond the number you specify, the + // Truncated element in the response is set to true. + // + // This value is optional. If you include a value, it must be between 1 and + // 100, inclusive. If you do not include a value, it defaults to 50. + Limit *int64 `min:"1" type:"integer"` + + // Use this parameter only when paginating results and only in a subsequent + // request after you receive a response with truncated results. Set it to the + // value of NextMarker from the response you just received. + Marker *string `min:"1" type:"string"` + + // The retiring principal for which to list grants. + // + // To specify the retiring principal, use the Amazon Resource Name (ARN) (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // of an AWS principal. Valid AWS principals include AWS accounts (root), IAM + // users, federated users, and assumed role users. For examples of the ARN syntax + // for specifying a principal, see AWS Identity and Access Management (IAM) + // (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#arn-syntax-iam) + // in the Example ARNs section of the Amazon Web Services General Reference. + RetiringPrincipal *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListRetirableGrantsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListRetirableGrantsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListRetirableGrantsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListRetirableGrantsInput"} + if s.Limit != nil && *s.Limit < 1 { + invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) + } + if s.Marker != nil && len(*s.Marker) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Marker", 1)) + } + if s.RetiringPrincipal == nil { + invalidParams.Add(request.NewErrParamRequired("RetiringPrincipal")) + } + if s.RetiringPrincipal != nil && len(*s.RetiringPrincipal) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RetiringPrincipal", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type PutKeyPolicyInput struct { + _ struct{} `type:"structure"` + + // A flag to indicate whether to bypass the key policy lockout safety check. + // + // Setting this value to true increases the likelihood that the CMK becomes + // unmanageable. Do not set this value to true indiscriminately. + // + // For more information, refer to the scenario in the Default Key Policy (http://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html#key-policy-default-allow-root-enable-iam) + // section in the AWS Key Management Service Developer Guide. + // + // Use this parameter only when you intend to prevent the principal making + // the request from making a subsequent PutKeyPolicy request on the CMK. + // + // The default value is false. + BypassPolicyLockoutSafetyCheck *bool `type:"boolean"` + + // A unique identifier for the CMK. + // + // Use the CMK's unique identifier or its Amazon Resource Name (ARN). For example: + // + // Unique ID: 1234abcd-12ab-34cd-56ef-1234567890ab + // + // ARN: arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + KeyId *string `min:"1" type:"string" required:"true"` + + // The key policy to attach to the CMK. + // + // The key policy must meet the following criteria: + // + // It must allow the principal making the PutKeyPolicy request to make a + // subsequent PutKeyPolicy request on the CMK. This reduces the likelihood that + // the CMK becomes unmanageable. For more information, refer to the scenario + // in the Default Key Policy (http://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html#key-policy-default-allow-root-enable-iam) + // section in the AWS Key Management Service Developer Guide. + // + // The principal(s) specified in the key policy must exist and be visible + // to AWS KMS. When you create a new AWS principal (for example, an IAM user + // or role), you might need to enforce a delay before specifying the new principal + // in a key policy because the new principal might not immediately be visible + // to AWS KMS. For more information, see Changes that I make are not always + // immediately visible (http://docs.aws.amazon.com/IAM/latest/UserGuide/troubleshoot_general.html#troubleshoot_general_eventual-consistency) + // in the IAM User Guide. + // + // The policy size limit is 32 KiB (32768 bytes). + Policy *string `min:"1" type:"string" required:"true"` + + // The name of the key policy. + // + // This value must be default. + PolicyName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s PutKeyPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutKeyPolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutKeyPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutKeyPolicyInput"} + if s.KeyId == nil { + invalidParams.Add(request.NewErrParamRequired("KeyId")) + } + if s.KeyId != nil && len(*s.KeyId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("KeyId", 1)) + } + if s.Policy == nil { + invalidParams.Add(request.NewErrParamRequired("Policy")) + } + if s.Policy != nil && len(*s.Policy) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Policy", 1)) + } + if s.PolicyName == nil { + invalidParams.Add(request.NewErrParamRequired("PolicyName")) + } + if s.PolicyName != nil && len(*s.PolicyName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("PolicyName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type PutKeyPolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutKeyPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutKeyPolicyOutput) GoString() string { + return s.String() +} + +type ReEncryptInput struct { + _ struct{} `type:"structure"` + + // Ciphertext of the data to re-encrypt. + // + // CiphertextBlob is automatically base64 encoded/decoded by the SDK. + CiphertextBlob []byte `min:"1" type:"blob" required:"true"` + + // Encryption context to be used when the data is re-encrypted. + DestinationEncryptionContext map[string]*string `type:"map"` + + // A unique identifier for the customer master key used to re-encrypt the data. + // This value can be a globally unique identifier, a fully specified ARN to + // either an alias or a key, or an alias name prefixed by "alias/". + // + // Key ARN Example - arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012 + // + // Alias ARN Example - arn:aws:kms:us-east-1:123456789012:alias/MyAliasName + // + // Globally Unique Key ID Example - 12345678-1234-1234-1234-123456789012 + // + // Alias Name Example - alias/MyAliasName + DestinationKeyId *string `min:"1" type:"string" required:"true"` + + // A list of grant tokens. + // + // For more information, see Grant Tokens (http://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#grant_token) + // in the AWS Key Management Service Developer Guide. + GrantTokens []*string `type:"list"` + + // Encryption context used to encrypt and decrypt the data specified in the + // CiphertextBlob parameter. + SourceEncryptionContext map[string]*string `type:"map"` +} + +// String returns the string representation +func (s ReEncryptInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReEncryptInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ReEncryptInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ReEncryptInput"} + if s.CiphertextBlob == nil { + invalidParams.Add(request.NewErrParamRequired("CiphertextBlob")) + } + if s.CiphertextBlob != nil && len(s.CiphertextBlob) < 1 { + invalidParams.Add(request.NewErrParamMinLen("CiphertextBlob", 1)) + } + if s.DestinationKeyId == nil { + invalidParams.Add(request.NewErrParamRequired("DestinationKeyId")) + } + if s.DestinationKeyId != nil && len(*s.DestinationKeyId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DestinationKeyId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type ReEncryptOutput struct { + _ struct{} `type:"structure"` + + // The re-encrypted data. If you are using the CLI, the value is Base64 encoded. + // Otherwise, it is not encoded. + // + // CiphertextBlob is automatically base64 encoded/decoded by the SDK. + CiphertextBlob []byte `min:"1" type:"blob"` + + // Unique identifier of the key used to re-encrypt the data. + KeyId *string `min:"1" type:"string"` + + // Unique identifier of the key used to originally encrypt the data. + SourceKeyId *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ReEncryptOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReEncryptOutput) GoString() string { + return s.String() +} + +type RetireGrantInput struct { + _ struct{} `type:"structure"` + + // Unique identifier of the grant to be retired. The grant ID is returned by + // the CreateGrant function. + // + // Grant ID Example - 0123456789012345678901234567890123456789012345678901234567890123 + GrantId *string `min:"1" type:"string"` + + // Token that identifies the grant to be retired. + GrantToken *string `min:"1" type:"string"` + + // A unique identifier for the customer master key associated with the grant. + // This value can be a globally unique identifier or a fully specified ARN of + // the key. + // + // Key ARN Example - arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012 + // + // Globally Unique Key ID Example - 12345678-1234-1234-1234-123456789012 + KeyId *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s RetireGrantInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RetireGrantInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RetireGrantInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RetireGrantInput"} + if s.GrantId != nil && len(*s.GrantId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("GrantId", 1)) + } + if s.GrantToken != nil && len(*s.GrantToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("GrantToken", 1)) + } + if s.KeyId != nil && len(*s.KeyId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("KeyId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type RetireGrantOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s RetireGrantOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RetireGrantOutput) GoString() string { + return s.String() +} + +type RevokeGrantInput struct { + _ struct{} `type:"structure"` + + // Identifier of the grant to be revoked. + GrantId *string `min:"1" type:"string" required:"true"` + + // A unique identifier for the customer master key associated with the grant. + // This value can be a globally unique identifier or the fully specified ARN + // to a key. + // + // Key ARN Example - arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012 + // + // Globally Unique Key ID Example - 12345678-1234-1234-1234-123456789012 + KeyId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s RevokeGrantInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RevokeGrantInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RevokeGrantInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RevokeGrantInput"} + if s.GrantId == nil { + invalidParams.Add(request.NewErrParamRequired("GrantId")) + } + if s.GrantId != nil && len(*s.GrantId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("GrantId", 1)) + } + if s.KeyId == nil { + invalidParams.Add(request.NewErrParamRequired("KeyId")) + } + if s.KeyId != nil && len(*s.KeyId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("KeyId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type RevokeGrantOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s RevokeGrantOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RevokeGrantOutput) GoString() string { + return s.String() +} + +type ScheduleKeyDeletionInput struct { + _ struct{} `type:"structure"` + + // The unique identifier for the customer master key (CMK) to delete. + // + // To specify this value, use the unique key ID or the Amazon Resource Name + // (ARN) of the CMK. Examples: + // + // Unique key ID: 1234abcd-12ab-34cd-56ef-1234567890ab + // + // Key ARN: arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // + // To obtain the unique key ID and key ARN for a given CMK, use ListKeys + // or DescribeKey. + KeyId *string `min:"1" type:"string" required:"true"` + + // The waiting period, specified in number of days. After the waiting period + // ends, AWS KMS deletes the customer master key (CMK). + // + // This value is optional. If you include a value, it must be between 7 and + // 30, inclusive. If you do not include a value, it defaults to 30. + PendingWindowInDays *int64 `min:"1" type:"integer"` +} + +// String returns the string representation +func (s ScheduleKeyDeletionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ScheduleKeyDeletionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ScheduleKeyDeletionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ScheduleKeyDeletionInput"} + if s.KeyId == nil { + invalidParams.Add(request.NewErrParamRequired("KeyId")) + } + if s.KeyId != nil && len(*s.KeyId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("KeyId", 1)) + } + if s.PendingWindowInDays != nil && *s.PendingWindowInDays < 1 { + invalidParams.Add(request.NewErrParamMinValue("PendingWindowInDays", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type ScheduleKeyDeletionOutput struct { + _ struct{} `type:"structure"` + + // The date and time after which AWS KMS deletes the customer master key (CMK). + DeletionDate *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The unique identifier of the customer master key (CMK) for which deletion + // is scheduled. + KeyId *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ScheduleKeyDeletionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ScheduleKeyDeletionOutput) GoString() string { + return s.String() +} + +type UpdateAliasInput struct { + _ struct{} `type:"structure"` + + // String that contains the name of the alias to be modified. The name must + // start with the word "alias" followed by a forward slash (alias/). Aliases + // that begin with "alias/aws" are reserved. + AliasName *string `min:"1" type:"string" required:"true"` + + // Unique identifier of the customer master key to be mapped to the alias. This + // value can be a globally unique identifier or the fully specified ARN of a + // key. + // + // Key ARN Example - arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012 + // + // Globally Unique Key ID Example - 12345678-1234-1234-1234-123456789012 + // + // You can call ListAliases to verify that the alias is mapped to the correct + // TargetKeyId. + TargetKeyId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateAliasInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateAliasInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateAliasInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateAliasInput"} + if s.AliasName == nil { + invalidParams.Add(request.NewErrParamRequired("AliasName")) + } + if s.AliasName != nil && len(*s.AliasName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AliasName", 1)) + } + if s.TargetKeyId == nil { + invalidParams.Add(request.NewErrParamRequired("TargetKeyId")) + } + if s.TargetKeyId != nil && len(*s.TargetKeyId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TargetKeyId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type UpdateAliasOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UpdateAliasOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateAliasOutput) GoString() string { + return s.String() +} + +type UpdateKeyDescriptionInput struct { + _ struct{} `type:"structure"` + + // New description for the key. + Description *string `type:"string" required:"true"` + + // A unique identifier for the customer master key. This value can be a globally + // unique identifier or the fully specified ARN to a key. + // + // Key ARN Example - arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012 + // + // Globally Unique Key ID Example - 12345678-1234-1234-1234-123456789012 + KeyId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateKeyDescriptionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateKeyDescriptionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateKeyDescriptionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateKeyDescriptionInput"} + if s.Description == nil { + invalidParams.Add(request.NewErrParamRequired("Description")) + } + if s.KeyId == nil { + invalidParams.Add(request.NewErrParamRequired("KeyId")) + } + if s.KeyId != nil && len(*s.KeyId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("KeyId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type UpdateKeyDescriptionOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UpdateKeyDescriptionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateKeyDescriptionOutput) GoString() string { + return s.String() +} + +const ( + // @enum DataKeySpec + DataKeySpecAes256 = "AES_256" + // @enum DataKeySpec + DataKeySpecAes128 = "AES_128" +) + +const ( + // @enum GrantOperation + GrantOperationDecrypt = "Decrypt" + // @enum GrantOperation + GrantOperationEncrypt = "Encrypt" + // @enum GrantOperation + GrantOperationGenerateDataKey = "GenerateDataKey" + // @enum GrantOperation + GrantOperationGenerateDataKeyWithoutPlaintext = "GenerateDataKeyWithoutPlaintext" + // @enum GrantOperation + GrantOperationReEncryptFrom = "ReEncryptFrom" + // @enum GrantOperation + GrantOperationReEncryptTo = "ReEncryptTo" + // @enum GrantOperation + GrantOperationCreateGrant = "CreateGrant" + // @enum GrantOperation + GrantOperationRetireGrant = "RetireGrant" + // @enum GrantOperation + GrantOperationDescribeKey = "DescribeKey" +) + +const ( + // @enum KeyState + KeyStateEnabled = "Enabled" + // @enum KeyState + KeyStateDisabled = "Disabled" + // @enum KeyState + KeyStatePendingDeletion = "PendingDeletion" +) + +const ( + // @enum KeyUsageType + KeyUsageTypeEncryptDecrypt = "ENCRYPT_DECRYPT" +) diff --git a/vendor/github.com/aws/aws-sdk-go/service/kms/examples_test.go b/vendor/github.com/aws/aws-sdk-go/service/kms/examples_test.go new file mode 100644 index 000000000..7c2470602 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/kms/examples_test.go @@ -0,0 +1,664 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package kms_test + +import ( + "bytes" + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/kms" +) + +var _ time.Duration +var _ bytes.Buffer + +func ExampleKMS_CancelKeyDeletion() { + svc := kms.New(session.New()) + + params := &kms.CancelKeyDeletionInput{ + KeyId: aws.String("KeyIdType"), // Required + } + resp, err := svc.CancelKeyDeletion(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleKMS_CreateAlias() { + svc := kms.New(session.New()) + + params := &kms.CreateAliasInput{ + AliasName: aws.String("AliasNameType"), // Required + TargetKeyId: aws.String("KeyIdType"), // Required + } + resp, err := svc.CreateAlias(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleKMS_CreateGrant() { + svc := kms.New(session.New()) + + params := &kms.CreateGrantInput{ + GranteePrincipal: aws.String("PrincipalIdType"), // Required + KeyId: aws.String("KeyIdType"), // Required + Constraints: &kms.GrantConstraints{ + EncryptionContextEquals: map[string]*string{ + "Key": aws.String("EncryptionContextValue"), // Required + // More values... + }, + EncryptionContextSubset: map[string]*string{ + "Key": aws.String("EncryptionContextValue"), // Required + // More values... + }, + }, + GrantTokens: []*string{ + aws.String("GrantTokenType"), // Required + // More values... + }, + Name: aws.String("GrantNameType"), + Operations: []*string{ + aws.String("GrantOperation"), // Required + // More values... + }, + RetiringPrincipal: aws.String("PrincipalIdType"), + } + resp, err := svc.CreateGrant(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleKMS_CreateKey() { + svc := kms.New(session.New()) + + params := &kms.CreateKeyInput{ + BypassPolicyLockoutSafetyCheck: aws.Bool(true), + Description: aws.String("DescriptionType"), + KeyUsage: aws.String("KeyUsageType"), + Policy: aws.String("PolicyType"), + } + resp, err := svc.CreateKey(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleKMS_Decrypt() { + svc := kms.New(session.New()) + + params := &kms.DecryptInput{ + CiphertextBlob: []byte("PAYLOAD"), // Required + EncryptionContext: map[string]*string{ + "Key": aws.String("EncryptionContextValue"), // Required + // More values... + }, + GrantTokens: []*string{ + aws.String("GrantTokenType"), // Required + // More values... + }, + } + resp, err := svc.Decrypt(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleKMS_DeleteAlias() { + svc := kms.New(session.New()) + + params := &kms.DeleteAliasInput{ + AliasName: aws.String("AliasNameType"), // Required + } + resp, err := svc.DeleteAlias(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleKMS_DescribeKey() { + svc := kms.New(session.New()) + + params := &kms.DescribeKeyInput{ + KeyId: aws.String("KeyIdType"), // Required + GrantTokens: []*string{ + aws.String("GrantTokenType"), // Required + // More values... + }, + } + resp, err := svc.DescribeKey(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleKMS_DisableKey() { + svc := kms.New(session.New()) + + params := &kms.DisableKeyInput{ + KeyId: aws.String("KeyIdType"), // Required + } + resp, err := svc.DisableKey(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleKMS_DisableKeyRotation() { + svc := kms.New(session.New()) + + params := &kms.DisableKeyRotationInput{ + KeyId: aws.String("KeyIdType"), // Required + } + resp, err := svc.DisableKeyRotation(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleKMS_EnableKey() { + svc := kms.New(session.New()) + + params := &kms.EnableKeyInput{ + KeyId: aws.String("KeyIdType"), // Required + } + resp, err := svc.EnableKey(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleKMS_EnableKeyRotation() { + svc := kms.New(session.New()) + + params := &kms.EnableKeyRotationInput{ + KeyId: aws.String("KeyIdType"), // Required + } + resp, err := svc.EnableKeyRotation(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleKMS_Encrypt() { + svc := kms.New(session.New()) + + params := &kms.EncryptInput{ + KeyId: aws.String("KeyIdType"), // Required + Plaintext: []byte("PAYLOAD"), // Required + EncryptionContext: map[string]*string{ + "Key": aws.String("EncryptionContextValue"), // Required + // More values... + }, + GrantTokens: []*string{ + aws.String("GrantTokenType"), // Required + // More values... + }, + } + resp, err := svc.Encrypt(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleKMS_GenerateDataKey() { + svc := kms.New(session.New()) + + params := &kms.GenerateDataKeyInput{ + KeyId: aws.String("KeyIdType"), // Required + EncryptionContext: map[string]*string{ + "Key": aws.String("EncryptionContextValue"), // Required + // More values... + }, + GrantTokens: []*string{ + aws.String("GrantTokenType"), // Required + // More values... + }, + KeySpec: aws.String("DataKeySpec"), + NumberOfBytes: aws.Int64(1), + } + resp, err := svc.GenerateDataKey(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleKMS_GenerateDataKeyWithoutPlaintext() { + svc := kms.New(session.New()) + + params := &kms.GenerateDataKeyWithoutPlaintextInput{ + KeyId: aws.String("KeyIdType"), // Required + EncryptionContext: map[string]*string{ + "Key": aws.String("EncryptionContextValue"), // Required + // More values... + }, + GrantTokens: []*string{ + aws.String("GrantTokenType"), // Required + // More values... + }, + KeySpec: aws.String("DataKeySpec"), + NumberOfBytes: aws.Int64(1), + } + resp, err := svc.GenerateDataKeyWithoutPlaintext(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleKMS_GenerateRandom() { + svc := kms.New(session.New()) + + params := &kms.GenerateRandomInput{ + NumberOfBytes: aws.Int64(1), + } + resp, err := svc.GenerateRandom(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleKMS_GetKeyPolicy() { + svc := kms.New(session.New()) + + params := &kms.GetKeyPolicyInput{ + KeyId: aws.String("KeyIdType"), // Required + PolicyName: aws.String("PolicyNameType"), // Required + } + resp, err := svc.GetKeyPolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleKMS_GetKeyRotationStatus() { + svc := kms.New(session.New()) + + params := &kms.GetKeyRotationStatusInput{ + KeyId: aws.String("KeyIdType"), // Required + } + resp, err := svc.GetKeyRotationStatus(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleKMS_ListAliases() { + svc := kms.New(session.New()) + + params := &kms.ListAliasesInput{ + Limit: aws.Int64(1), + Marker: aws.String("MarkerType"), + } + resp, err := svc.ListAliases(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleKMS_ListGrants() { + svc := kms.New(session.New()) + + params := &kms.ListGrantsInput{ + KeyId: aws.String("KeyIdType"), // Required + Limit: aws.Int64(1), + Marker: aws.String("MarkerType"), + } + resp, err := svc.ListGrants(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleKMS_ListKeyPolicies() { + svc := kms.New(session.New()) + + params := &kms.ListKeyPoliciesInput{ + KeyId: aws.String("KeyIdType"), // Required + Limit: aws.Int64(1), + Marker: aws.String("MarkerType"), + } + resp, err := svc.ListKeyPolicies(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleKMS_ListKeys() { + svc := kms.New(session.New()) + + params := &kms.ListKeysInput{ + Limit: aws.Int64(1), + Marker: aws.String("MarkerType"), + } + resp, err := svc.ListKeys(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleKMS_ListRetirableGrants() { + svc := kms.New(session.New()) + + params := &kms.ListRetirableGrantsInput{ + RetiringPrincipal: aws.String("PrincipalIdType"), // Required + Limit: aws.Int64(1), + Marker: aws.String("MarkerType"), + } + resp, err := svc.ListRetirableGrants(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleKMS_PutKeyPolicy() { + svc := kms.New(session.New()) + + params := &kms.PutKeyPolicyInput{ + KeyId: aws.String("KeyIdType"), // Required + Policy: aws.String("PolicyType"), // Required + PolicyName: aws.String("PolicyNameType"), // Required + BypassPolicyLockoutSafetyCheck: aws.Bool(true), + } + resp, err := svc.PutKeyPolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleKMS_ReEncrypt() { + svc := kms.New(session.New()) + + params := &kms.ReEncryptInput{ + CiphertextBlob: []byte("PAYLOAD"), // Required + DestinationKeyId: aws.String("KeyIdType"), // Required + DestinationEncryptionContext: map[string]*string{ + "Key": aws.String("EncryptionContextValue"), // Required + // More values... + }, + GrantTokens: []*string{ + aws.String("GrantTokenType"), // Required + // More values... + }, + SourceEncryptionContext: map[string]*string{ + "Key": aws.String("EncryptionContextValue"), // Required + // More values... + }, + } + resp, err := svc.ReEncrypt(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleKMS_RetireGrant() { + svc := kms.New(session.New()) + + params := &kms.RetireGrantInput{ + GrantId: aws.String("GrantIdType"), + GrantToken: aws.String("GrantTokenType"), + KeyId: aws.String("KeyIdType"), + } + resp, err := svc.RetireGrant(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleKMS_RevokeGrant() { + svc := kms.New(session.New()) + + params := &kms.RevokeGrantInput{ + GrantId: aws.String("GrantIdType"), // Required + KeyId: aws.String("KeyIdType"), // Required + } + resp, err := svc.RevokeGrant(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleKMS_ScheduleKeyDeletion() { + svc := kms.New(session.New()) + + params := &kms.ScheduleKeyDeletionInput{ + KeyId: aws.String("KeyIdType"), // Required + PendingWindowInDays: aws.Int64(1), + } + resp, err := svc.ScheduleKeyDeletion(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleKMS_UpdateAlias() { + svc := kms.New(session.New()) + + params := &kms.UpdateAliasInput{ + AliasName: aws.String("AliasNameType"), // Required + TargetKeyId: aws.String("KeyIdType"), // Required + } + resp, err := svc.UpdateAlias(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleKMS_UpdateKeyDescription() { + svc := kms.New(session.New()) + + params := &kms.UpdateKeyDescriptionInput{ + Description: aws.String("DescriptionType"), // Required + KeyId: aws.String("KeyIdType"), // Required + } + resp, err := svc.UpdateKeyDescription(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/kms/kmsiface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/kms/kmsiface/interface.go new file mode 100644 index 000000000..34628c02b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/kms/kmsiface/interface.go @@ -0,0 +1,138 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package kmsiface provides an interface for the AWS Key Management Service. +package kmsiface + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/kms" +) + +// KMSAPI is the interface type for kms.KMS. +type KMSAPI interface { + CancelKeyDeletionRequest(*kms.CancelKeyDeletionInput) (*request.Request, *kms.CancelKeyDeletionOutput) + + CancelKeyDeletion(*kms.CancelKeyDeletionInput) (*kms.CancelKeyDeletionOutput, error) + + CreateAliasRequest(*kms.CreateAliasInput) (*request.Request, *kms.CreateAliasOutput) + + CreateAlias(*kms.CreateAliasInput) (*kms.CreateAliasOutput, error) + + CreateGrantRequest(*kms.CreateGrantInput) (*request.Request, *kms.CreateGrantOutput) + + CreateGrant(*kms.CreateGrantInput) (*kms.CreateGrantOutput, error) + + CreateKeyRequest(*kms.CreateKeyInput) (*request.Request, *kms.CreateKeyOutput) + + CreateKey(*kms.CreateKeyInput) (*kms.CreateKeyOutput, error) + + DecryptRequest(*kms.DecryptInput) (*request.Request, *kms.DecryptOutput) + + Decrypt(*kms.DecryptInput) (*kms.DecryptOutput, error) + + DeleteAliasRequest(*kms.DeleteAliasInput) (*request.Request, *kms.DeleteAliasOutput) + + DeleteAlias(*kms.DeleteAliasInput) (*kms.DeleteAliasOutput, error) + + DescribeKeyRequest(*kms.DescribeKeyInput) (*request.Request, *kms.DescribeKeyOutput) + + DescribeKey(*kms.DescribeKeyInput) (*kms.DescribeKeyOutput, error) + + DisableKeyRequest(*kms.DisableKeyInput) (*request.Request, *kms.DisableKeyOutput) + + DisableKey(*kms.DisableKeyInput) (*kms.DisableKeyOutput, error) + + DisableKeyRotationRequest(*kms.DisableKeyRotationInput) (*request.Request, *kms.DisableKeyRotationOutput) + + DisableKeyRotation(*kms.DisableKeyRotationInput) (*kms.DisableKeyRotationOutput, error) + + EnableKeyRequest(*kms.EnableKeyInput) (*request.Request, *kms.EnableKeyOutput) + + EnableKey(*kms.EnableKeyInput) (*kms.EnableKeyOutput, error) + + EnableKeyRotationRequest(*kms.EnableKeyRotationInput) (*request.Request, *kms.EnableKeyRotationOutput) + + EnableKeyRotation(*kms.EnableKeyRotationInput) (*kms.EnableKeyRotationOutput, error) + + EncryptRequest(*kms.EncryptInput) (*request.Request, *kms.EncryptOutput) + + Encrypt(*kms.EncryptInput) (*kms.EncryptOutput, error) + + GenerateDataKeyRequest(*kms.GenerateDataKeyInput) (*request.Request, *kms.GenerateDataKeyOutput) + + GenerateDataKey(*kms.GenerateDataKeyInput) (*kms.GenerateDataKeyOutput, error) + + GenerateDataKeyWithoutPlaintextRequest(*kms.GenerateDataKeyWithoutPlaintextInput) (*request.Request, *kms.GenerateDataKeyWithoutPlaintextOutput) + + GenerateDataKeyWithoutPlaintext(*kms.GenerateDataKeyWithoutPlaintextInput) (*kms.GenerateDataKeyWithoutPlaintextOutput, error) + + GenerateRandomRequest(*kms.GenerateRandomInput) (*request.Request, *kms.GenerateRandomOutput) + + GenerateRandom(*kms.GenerateRandomInput) (*kms.GenerateRandomOutput, error) + + GetKeyPolicyRequest(*kms.GetKeyPolicyInput) (*request.Request, *kms.GetKeyPolicyOutput) + + GetKeyPolicy(*kms.GetKeyPolicyInput) (*kms.GetKeyPolicyOutput, error) + + GetKeyRotationStatusRequest(*kms.GetKeyRotationStatusInput) (*request.Request, *kms.GetKeyRotationStatusOutput) + + GetKeyRotationStatus(*kms.GetKeyRotationStatusInput) (*kms.GetKeyRotationStatusOutput, error) + + ListAliasesRequest(*kms.ListAliasesInput) (*request.Request, *kms.ListAliasesOutput) + + ListAliases(*kms.ListAliasesInput) (*kms.ListAliasesOutput, error) + + ListAliasesPages(*kms.ListAliasesInput, func(*kms.ListAliasesOutput, bool) bool) error + + ListGrantsRequest(*kms.ListGrantsInput) (*request.Request, *kms.ListGrantsResponse) + + ListGrants(*kms.ListGrantsInput) (*kms.ListGrantsResponse, error) + + ListGrantsPages(*kms.ListGrantsInput, func(*kms.ListGrantsResponse, bool) bool) error + + ListKeyPoliciesRequest(*kms.ListKeyPoliciesInput) (*request.Request, *kms.ListKeyPoliciesOutput) + + ListKeyPolicies(*kms.ListKeyPoliciesInput) (*kms.ListKeyPoliciesOutput, error) + + ListKeyPoliciesPages(*kms.ListKeyPoliciesInput, func(*kms.ListKeyPoliciesOutput, bool) bool) error + + ListKeysRequest(*kms.ListKeysInput) (*request.Request, *kms.ListKeysOutput) + + ListKeys(*kms.ListKeysInput) (*kms.ListKeysOutput, error) + + ListKeysPages(*kms.ListKeysInput, func(*kms.ListKeysOutput, bool) bool) error + + ListRetirableGrantsRequest(*kms.ListRetirableGrantsInput) (*request.Request, *kms.ListGrantsResponse) + + ListRetirableGrants(*kms.ListRetirableGrantsInput) (*kms.ListGrantsResponse, error) + + PutKeyPolicyRequest(*kms.PutKeyPolicyInput) (*request.Request, *kms.PutKeyPolicyOutput) + + PutKeyPolicy(*kms.PutKeyPolicyInput) (*kms.PutKeyPolicyOutput, error) + + ReEncryptRequest(*kms.ReEncryptInput) (*request.Request, *kms.ReEncryptOutput) + + ReEncrypt(*kms.ReEncryptInput) (*kms.ReEncryptOutput, error) + + RetireGrantRequest(*kms.RetireGrantInput) (*request.Request, *kms.RetireGrantOutput) + + RetireGrant(*kms.RetireGrantInput) (*kms.RetireGrantOutput, error) + + RevokeGrantRequest(*kms.RevokeGrantInput) (*request.Request, *kms.RevokeGrantOutput) + + RevokeGrant(*kms.RevokeGrantInput) (*kms.RevokeGrantOutput, error) + + ScheduleKeyDeletionRequest(*kms.ScheduleKeyDeletionInput) (*request.Request, *kms.ScheduleKeyDeletionOutput) + + ScheduleKeyDeletion(*kms.ScheduleKeyDeletionInput) (*kms.ScheduleKeyDeletionOutput, error) + + UpdateAliasRequest(*kms.UpdateAliasInput) (*request.Request, *kms.UpdateAliasOutput) + + UpdateAlias(*kms.UpdateAliasInput) (*kms.UpdateAliasOutput, error) + + UpdateKeyDescriptionRequest(*kms.UpdateKeyDescriptionInput) (*request.Request, *kms.UpdateKeyDescriptionOutput) + + UpdateKeyDescription(*kms.UpdateKeyDescriptionInput) (*kms.UpdateKeyDescriptionOutput, error) +} + +var _ KMSAPI = (*kms.KMS)(nil) diff --git a/vendor/github.com/aws/aws-sdk-go/service/kms/service.go b/vendor/github.com/aws/aws-sdk-go/service/kms/service.go new file mode 100644 index 000000000..4afd3a8e2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/kms/service.go @@ -0,0 +1,158 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package kms + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" +) + +// AWS Key Management Service (AWS KMS) is an encryption and key management +// web service. This guide describes the AWS KMS operations that you can call +// programmatically. For general information about AWS KMS, see the AWS Key +// Management Service Developer Guide (http://docs.aws.amazon.com/kms/latest/developerguide/). +// +// AWS provides SDKs that consist of libraries and sample code for various +// programming languages and platforms (Java, Ruby, .Net, iOS, Android, etc.). +// The SDKs provide a convenient way to create programmatic access to AWS KMS +// and other AWS services. For example, the SDKs take care of tasks such as +// signing requests (see below), managing errors, and retrying requests automatically. +// For more information about the AWS SDKs, including how to download and install +// them, see Tools for Amazon Web Services (http://aws.amazon.com/tools/). +// +// We recommend that you use the AWS SDKs to make programmatic API calls to +// AWS KMS. +// +// Clients must support TLS (Transport Layer Security) 1.0. We recommend TLS +// 1.2. Clients must also support cipher suites with Perfect Forward Secrecy +// (PFS) such as Ephemeral Diffie-Hellman (DHE) or Elliptic Curve Ephemeral +// Diffie-Hellman (ECDHE). Most modern systems such as Java 7 and later support +// these modes. +// +// Signing Requests +// +// Requests must be signed by using an access key ID and a secret access key. +// We strongly recommend that you do not use your AWS account (root) access +// key ID and secret key for everyday work with AWS KMS. Instead, use the access +// key ID and secret access key for an IAM user, or you can use the AWS Security +// Token Service to generate temporary security credentials that you can use +// to sign requests. +// +// All AWS KMS operations require Signature Version 4 (http://docs.aws.amazon.com/general/latest/gr/signature-version-4.html). +// +// Logging API Requests +// +// AWS KMS supports AWS CloudTrail, a service that logs AWS API calls and related +// events for your AWS account and delivers them to an Amazon S3 bucket that +// you specify. By using the information collected by CloudTrail, you can determine +// what requests were made to AWS KMS, who made the request, when it was made, +// and so on. To learn more about CloudTrail, including how to turn it on and +// find your log files, see the AWS CloudTrail User Guide (http://docs.aws.amazon.com/awscloudtrail/latest/userguide/). +// +// Additional Resources +// +// For more information about credentials and request signing, see the following: +// +// AWS Security Credentials (http://docs.aws.amazon.com/general/latest/gr/aws-security-credentials.html) +// - This topic provides general information about the types of credentials +// used for accessing AWS. +// +// Temporary Security Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html) +// - This section of the IAM User Guide describes how to create and use temporary +// security credentials. +// +// Signature Version 4 Signing Process (http://docs.aws.amazon.com/general/latest/gr/signature-version-4.html) +// - This set of topics walks you through the process of signing a request using +// an access key ID and a secret access key. +// +// Commonly Used APIs +// +// Of the APIs discussed in this guide, the following will prove the most useful +// for most applications. You will likely perform actions other than these, +// such as creating keys and assigning policies, by using the console. +// +// Encrypt +// +// Decrypt +// +// GenerateDataKey +// +// GenerateDataKeyWithoutPlaintext +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type KMS struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "kms" + +// New creates a new instance of the KMS client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a KMS client from just a session. +// svc := kms.New(mySession) +// +// // Create a KMS client with additional configuration +// svc := kms.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *KMS { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *KMS { + svc := &KMS{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-11-01", + JSONVersion: "1.1", + TargetPrefix: "TrentService", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(jsonrpc.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a KMS operation and runs any +// custom request initialization. +func (c *KMS) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/lambda/api.go b/vendor/github.com/aws/aws-sdk-go/service/lambda/api.go new file mode 100644 index 000000000..2467d596b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/lambda/api.go @@ -0,0 +1,3350 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package lambda provides a client for AWS Lambda. +package lambda + +import ( + "io" + "time" + + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/restjson" +) + +const opAddPermission = "AddPermission" + +// AddPermissionRequest generates a "aws/request.Request" representing the +// client's request for the AddPermission operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the AddPermission method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the AddPermissionRequest method. +// req, resp := client.AddPermissionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Lambda) AddPermissionRequest(input *AddPermissionInput) (req *request.Request, output *AddPermissionOutput) { + op := &request.Operation{ + Name: opAddPermission, + HTTPMethod: "POST", + HTTPPath: "/2015-03-31/functions/{FunctionName}/policy", + } + + if input == nil { + input = &AddPermissionInput{} + } + + req = c.newRequest(op, input, output) + output = &AddPermissionOutput{} + req.Data = output + return +} + +// Adds a permission to the resource policy associated with the specified AWS +// Lambda function. You use resource policies to grant permissions to event +// sources that use push model. In a push model, event sources (such as Amazon +// S3 and custom applications) invoke your Lambda function. Each permission +// you add to the resource policy allows an event source, permission to invoke +// the Lambda function. +// +// For information about the push model, see AWS Lambda: How it Works (http://docs.aws.amazon.com/lambda/latest/dg/lambda-introduction.html). +// +// If you are using versioning, the permissions you add are specific to the +// Lambda function version or alias you specify in the AddPermission request +// via the Qualifier parameter. For more information about versioning, see AWS +// Lambda Function Versioning and Aliases (http://docs.aws.amazon.com/lambda/latest/dg/versioning-aliases.html). +// +// This operation requires permission for the lambda:AddPermission action. +func (c *Lambda) AddPermission(input *AddPermissionInput) (*AddPermissionOutput, error) { + req, out := c.AddPermissionRequest(input) + err := req.Send() + return out, err +} + +const opCreateAlias = "CreateAlias" + +// CreateAliasRequest generates a "aws/request.Request" representing the +// client's request for the CreateAlias operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateAlias method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateAliasRequest method. +// req, resp := client.CreateAliasRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Lambda) CreateAliasRequest(input *CreateAliasInput) (req *request.Request, output *AliasConfiguration) { + op := &request.Operation{ + Name: opCreateAlias, + HTTPMethod: "POST", + HTTPPath: "/2015-03-31/functions/{FunctionName}/aliases", + } + + if input == nil { + input = &CreateAliasInput{} + } + + req = c.newRequest(op, input, output) + output = &AliasConfiguration{} + req.Data = output + return +} + +// Creates an alias that points to the specified Lambda function version. For +// more information, see Introduction to AWS Lambda Aliases (http://docs.aws.amazon.com/lambda/latest/dg/aliases-intro.html). +// +// Alias names are unique for a given function. This requires permission for +// the lambda:CreateAlias action. +func (c *Lambda) CreateAlias(input *CreateAliasInput) (*AliasConfiguration, error) { + req, out := c.CreateAliasRequest(input) + err := req.Send() + return out, err +} + +const opCreateEventSourceMapping = "CreateEventSourceMapping" + +// CreateEventSourceMappingRequest generates a "aws/request.Request" representing the +// client's request for the CreateEventSourceMapping operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateEventSourceMapping method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateEventSourceMappingRequest method. +// req, resp := client.CreateEventSourceMappingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Lambda) CreateEventSourceMappingRequest(input *CreateEventSourceMappingInput) (req *request.Request, output *EventSourceMappingConfiguration) { + op := &request.Operation{ + Name: opCreateEventSourceMapping, + HTTPMethod: "POST", + HTTPPath: "/2015-03-31/event-source-mappings/", + } + + if input == nil { + input = &CreateEventSourceMappingInput{} + } + + req = c.newRequest(op, input, output) + output = &EventSourceMappingConfiguration{} + req.Data = output + return +} + +// Identifies a stream as an event source for a Lambda function. It can be either +// an Amazon Kinesis stream or an Amazon DynamoDB stream. AWS Lambda invokes +// the specified function when records are posted to the stream. +// +// This association between a stream source and a Lambda function is called +// the event source mapping. +// +// This event source mapping is relevant only in the AWS Lambda pull model, +// where AWS Lambda invokes the function. For more information, go to AWS Lambda: +// How it Works (http://docs.aws.amazon.com/lambda/latest/dg/lambda-introduction.html) +// in the AWS Lambda Developer Guide. You provide mapping information (for +// example, which stream to read from and which Lambda function to invoke) in +// the request body. +// +// Each event source, such as an Amazon Kinesis or a DynamoDB stream, can +// be associated with multiple AWS Lambda function. A given Lambda function +// can be associated with multiple AWS event sources. +// +// If you are using versioning, you can specify a specific function version +// or an alias via the function name parameter. For more information about versioning, +// see AWS Lambda Function Versioning and Aliases (http://docs.aws.amazon.com/lambda/latest/dg/versioning-aliases.html). +// +// This operation requires permission for the lambda:CreateEventSourceMapping +// action. +func (c *Lambda) CreateEventSourceMapping(input *CreateEventSourceMappingInput) (*EventSourceMappingConfiguration, error) { + req, out := c.CreateEventSourceMappingRequest(input) + err := req.Send() + return out, err +} + +const opCreateFunction = "CreateFunction" + +// CreateFunctionRequest generates a "aws/request.Request" representing the +// client's request for the CreateFunction operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateFunction method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateFunctionRequest method. +// req, resp := client.CreateFunctionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Lambda) CreateFunctionRequest(input *CreateFunctionInput) (req *request.Request, output *FunctionConfiguration) { + op := &request.Operation{ + Name: opCreateFunction, + HTTPMethod: "POST", + HTTPPath: "/2015-03-31/functions", + } + + if input == nil { + input = &CreateFunctionInput{} + } + + req = c.newRequest(op, input, output) + output = &FunctionConfiguration{} + req.Data = output + return +} + +// Creates a new Lambda function. The function metadata is created from the +// request parameters, and the code for the function is provided by a .zip file +// in the request body. If the function name already exists, the operation will +// fail. Note that the function name is case-sensitive. +// +// If you are using versioning, you can also publish a version of the Lambda +// function you are creating using the Publish parameter. For more information +// about versioning, see AWS Lambda Function Versioning and Aliases (http://docs.aws.amazon.com/lambda/latest/dg/versioning-aliases.html). +// +// This operation requires permission for the lambda:CreateFunction action. +func (c *Lambda) CreateFunction(input *CreateFunctionInput) (*FunctionConfiguration, error) { + req, out := c.CreateFunctionRequest(input) + err := req.Send() + return out, err +} + +const opDeleteAlias = "DeleteAlias" + +// DeleteAliasRequest generates a "aws/request.Request" representing the +// client's request for the DeleteAlias operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteAlias method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteAliasRequest method. +// req, resp := client.DeleteAliasRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Lambda) DeleteAliasRequest(input *DeleteAliasInput) (req *request.Request, output *DeleteAliasOutput) { + op := &request.Operation{ + Name: opDeleteAlias, + HTTPMethod: "DELETE", + HTTPPath: "/2015-03-31/functions/{FunctionName}/aliases/{Name}", + } + + if input == nil { + input = &DeleteAliasInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteAliasOutput{} + req.Data = output + return +} + +// Deletes the specified Lambda function alias. For more information, see Introduction +// to AWS Lambda Aliases (http://docs.aws.amazon.com/lambda/latest/dg/aliases-intro.html). +// +// This requires permission for the lambda:DeleteAlias action. +func (c *Lambda) DeleteAlias(input *DeleteAliasInput) (*DeleteAliasOutput, error) { + req, out := c.DeleteAliasRequest(input) + err := req.Send() + return out, err +} + +const opDeleteEventSourceMapping = "DeleteEventSourceMapping" + +// DeleteEventSourceMappingRequest generates a "aws/request.Request" representing the +// client's request for the DeleteEventSourceMapping operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteEventSourceMapping method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteEventSourceMappingRequest method. +// req, resp := client.DeleteEventSourceMappingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Lambda) DeleteEventSourceMappingRequest(input *DeleteEventSourceMappingInput) (req *request.Request, output *EventSourceMappingConfiguration) { + op := &request.Operation{ + Name: opDeleteEventSourceMapping, + HTTPMethod: "DELETE", + HTTPPath: "/2015-03-31/event-source-mappings/{UUID}", + } + + if input == nil { + input = &DeleteEventSourceMappingInput{} + } + + req = c.newRequest(op, input, output) + output = &EventSourceMappingConfiguration{} + req.Data = output + return +} + +// Removes an event source mapping. This means AWS Lambda will no longer invoke +// the function for events in the associated source. +// +// This operation requires permission for the lambda:DeleteEventSourceMapping +// action. +func (c *Lambda) DeleteEventSourceMapping(input *DeleteEventSourceMappingInput) (*EventSourceMappingConfiguration, error) { + req, out := c.DeleteEventSourceMappingRequest(input) + err := req.Send() + return out, err +} + +const opDeleteFunction = "DeleteFunction" + +// DeleteFunctionRequest generates a "aws/request.Request" representing the +// client's request for the DeleteFunction operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteFunction method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteFunctionRequest method. +// req, resp := client.DeleteFunctionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Lambda) DeleteFunctionRequest(input *DeleteFunctionInput) (req *request.Request, output *DeleteFunctionOutput) { + op := &request.Operation{ + Name: opDeleteFunction, + HTTPMethod: "DELETE", + HTTPPath: "/2015-03-31/functions/{FunctionName}", + } + + if input == nil { + input = &DeleteFunctionInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteFunctionOutput{} + req.Data = output + return +} + +// Deletes the specified Lambda function code and configuration. +// +// If you are using the versioning feature and you don't specify a function +// version in your DeleteFunction request, AWS Lambda will delete the function, +// including all its versions, and any aliases pointing to the function versions. +// To delete a specific function version, you must provide the function version +// via the Qualifier parameter. For information about function versioning, see +// AWS Lambda Function Versioning and Aliases (http://docs.aws.amazon.com/lambda/latest/dg/versioning-aliases.html). +// +// When you delete a function the associated resource policy is also deleted. +// You will need to delete the event source mappings explicitly. +// +// This operation requires permission for the lambda:DeleteFunction action. +func (c *Lambda) DeleteFunction(input *DeleteFunctionInput) (*DeleteFunctionOutput, error) { + req, out := c.DeleteFunctionRequest(input) + err := req.Send() + return out, err +} + +const opGetAlias = "GetAlias" + +// GetAliasRequest generates a "aws/request.Request" representing the +// client's request for the GetAlias operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetAlias method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetAliasRequest method. +// req, resp := client.GetAliasRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Lambda) GetAliasRequest(input *GetAliasInput) (req *request.Request, output *AliasConfiguration) { + op := &request.Operation{ + Name: opGetAlias, + HTTPMethod: "GET", + HTTPPath: "/2015-03-31/functions/{FunctionName}/aliases/{Name}", + } + + if input == nil { + input = &GetAliasInput{} + } + + req = c.newRequest(op, input, output) + output = &AliasConfiguration{} + req.Data = output + return +} + +// Returns the specified alias information such as the alias ARN, description, +// and function version it is pointing to. For more information, see Introduction +// to AWS Lambda Aliases (http://docs.aws.amazon.com/lambda/latest/dg/aliases-intro.html). +// +// This requires permission for the lambda:GetAlias action. +func (c *Lambda) GetAlias(input *GetAliasInput) (*AliasConfiguration, error) { + req, out := c.GetAliasRequest(input) + err := req.Send() + return out, err +} + +const opGetEventSourceMapping = "GetEventSourceMapping" + +// GetEventSourceMappingRequest generates a "aws/request.Request" representing the +// client's request for the GetEventSourceMapping operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetEventSourceMapping method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetEventSourceMappingRequest method. +// req, resp := client.GetEventSourceMappingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Lambda) GetEventSourceMappingRequest(input *GetEventSourceMappingInput) (req *request.Request, output *EventSourceMappingConfiguration) { + op := &request.Operation{ + Name: opGetEventSourceMapping, + HTTPMethod: "GET", + HTTPPath: "/2015-03-31/event-source-mappings/{UUID}", + } + + if input == nil { + input = &GetEventSourceMappingInput{} + } + + req = c.newRequest(op, input, output) + output = &EventSourceMappingConfiguration{} + req.Data = output + return +} + +// Returns configuration information for the specified event source mapping +// (see CreateEventSourceMapping). +// +// This operation requires permission for the lambda:GetEventSourceMapping +// action. +func (c *Lambda) GetEventSourceMapping(input *GetEventSourceMappingInput) (*EventSourceMappingConfiguration, error) { + req, out := c.GetEventSourceMappingRequest(input) + err := req.Send() + return out, err +} + +const opGetFunction = "GetFunction" + +// GetFunctionRequest generates a "aws/request.Request" representing the +// client's request for the GetFunction operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetFunction method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetFunctionRequest method. +// req, resp := client.GetFunctionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Lambda) GetFunctionRequest(input *GetFunctionInput) (req *request.Request, output *GetFunctionOutput) { + op := &request.Operation{ + Name: opGetFunction, + HTTPMethod: "GET", + HTTPPath: "/2015-03-31/functions/{FunctionName}", + } + + if input == nil { + input = &GetFunctionInput{} + } + + req = c.newRequest(op, input, output) + output = &GetFunctionOutput{} + req.Data = output + return +} + +// Returns the configuration information of the Lambda function and a presigned +// URL link to the .zip file you uploaded with CreateFunction so you can download +// the .zip file. Note that the URL is valid for up to 10 minutes. The configuration +// information is the same information you provided as parameters when uploading +// the function. +// +// Using the optional Qualifier parameter, you can specify a specific function +// version for which you want this information. If you don't specify this parameter, +// the API uses unqualified function ARN which return information about the +// $LATEST version of the Lambda function. For more information, see AWS Lambda +// Function Versioning and Aliases (http://docs.aws.amazon.com/lambda/latest/dg/versioning-aliases.html). +// +// This operation requires permission for the lambda:GetFunction action. +func (c *Lambda) GetFunction(input *GetFunctionInput) (*GetFunctionOutput, error) { + req, out := c.GetFunctionRequest(input) + err := req.Send() + return out, err +} + +const opGetFunctionConfiguration = "GetFunctionConfiguration" + +// GetFunctionConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the GetFunctionConfiguration operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetFunctionConfiguration method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetFunctionConfigurationRequest method. +// req, resp := client.GetFunctionConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Lambda) GetFunctionConfigurationRequest(input *GetFunctionConfigurationInput) (req *request.Request, output *FunctionConfiguration) { + op := &request.Operation{ + Name: opGetFunctionConfiguration, + HTTPMethod: "GET", + HTTPPath: "/2015-03-31/functions/{FunctionName}/configuration", + } + + if input == nil { + input = &GetFunctionConfigurationInput{} + } + + req = c.newRequest(op, input, output) + output = &FunctionConfiguration{} + req.Data = output + return +} + +// Returns the configuration information of the Lambda function. This the same +// information you provided as parameters when uploading the function by using +// CreateFunction. +// +// If you are using the versioning feature, you can retrieve this information +// for a specific function version by using the optional Qualifier parameter +// and specifying the function version or alias that points to it. If you don't +// provide it, the API returns information about the $LATEST version of the +// function. For more information about versioning, see AWS Lambda Function +// Versioning and Aliases (http://docs.aws.amazon.com/lambda/latest/dg/versioning-aliases.html). +// +// This operation requires permission for the lambda:GetFunctionConfiguration +// operation. +func (c *Lambda) GetFunctionConfiguration(input *GetFunctionConfigurationInput) (*FunctionConfiguration, error) { + req, out := c.GetFunctionConfigurationRequest(input) + err := req.Send() + return out, err +} + +const opGetPolicy = "GetPolicy" + +// GetPolicyRequest generates a "aws/request.Request" representing the +// client's request for the GetPolicy operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetPolicy method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetPolicyRequest method. +// req, resp := client.GetPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Lambda) GetPolicyRequest(input *GetPolicyInput) (req *request.Request, output *GetPolicyOutput) { + op := &request.Operation{ + Name: opGetPolicy, + HTTPMethod: "GET", + HTTPPath: "/2015-03-31/functions/{FunctionName}/policy", + } + + if input == nil { + input = &GetPolicyInput{} + } + + req = c.newRequest(op, input, output) + output = &GetPolicyOutput{} + req.Data = output + return +} + +// Returns the resource policy associated with the specified Lambda function. +// +// If you are using the versioning feature, you can get the resource policy +// associated with the specific Lambda function version or alias by specifying +// the version or alias name using the Qualifier parameter. For more information +// about versioning, see AWS Lambda Function Versioning and Aliases (http://docs.aws.amazon.com/lambda/latest/dg/versioning-aliases.html). +// +// For information about adding permissions, see AddPermission. +// +// You need permission for the lambda:GetPolicy action. +func (c *Lambda) GetPolicy(input *GetPolicyInput) (*GetPolicyOutput, error) { + req, out := c.GetPolicyRequest(input) + err := req.Send() + return out, err +} + +const opInvoke = "Invoke" + +// InvokeRequest generates a "aws/request.Request" representing the +// client's request for the Invoke operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the Invoke method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the InvokeRequest method. +// req, resp := client.InvokeRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Lambda) InvokeRequest(input *InvokeInput) (req *request.Request, output *InvokeOutput) { + op := &request.Operation{ + Name: opInvoke, + HTTPMethod: "POST", + HTTPPath: "/2015-03-31/functions/{FunctionName}/invocations", + } + + if input == nil { + input = &InvokeInput{} + } + + req = c.newRequest(op, input, output) + output = &InvokeOutput{} + req.Data = output + return +} + +// Invokes a specific Lambda function. +// +// If you are using the versioning feature, you can invoke the specific function +// version by providing function version or alias name that is pointing to the +// function version using the Qualifier parameter in the request. If you don't +// provide the Qualifier parameter, the $LATEST version of the Lambda function +// is invoked. For information about the versioning feature, see AWS Lambda +// Function Versioning and Aliases (http://docs.aws.amazon.com/lambda/latest/dg/versioning-aliases.html). +// +// This operation requires permission for the lambda:InvokeFunction action. +func (c *Lambda) Invoke(input *InvokeInput) (*InvokeOutput, error) { + req, out := c.InvokeRequest(input) + err := req.Send() + return out, err +} + +const opInvokeAsync = "InvokeAsync" + +// InvokeAsyncRequest generates a "aws/request.Request" representing the +// client's request for the InvokeAsync operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the InvokeAsync method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the InvokeAsyncRequest method. +// req, resp := client.InvokeAsyncRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Lambda) InvokeAsyncRequest(input *InvokeAsyncInput) (req *request.Request, output *InvokeAsyncOutput) { + if c.Client.Config.Logger != nil { + c.Client.Config.Logger.Log("This operation, InvokeAsync, has been deprecated") + } + op := &request.Operation{ + Name: opInvokeAsync, + HTTPMethod: "POST", + HTTPPath: "/2014-11-13/functions/{FunctionName}/invoke-async/", + } + + if input == nil { + input = &InvokeAsyncInput{} + } + + req = c.newRequest(op, input, output) + output = &InvokeAsyncOutput{} + req.Data = output + return +} + +// This API is deprecated. We recommend you use Invoke API (see Invoke). Submits +// an invocation request to AWS Lambda. Upon receiving the request, Lambda executes +// the specified function asynchronously. To see the logs generated by the Lambda +// function execution, see the CloudWatch Logs console. +// +// This operation requires permission for the lambda:InvokeFunction action. +func (c *Lambda) InvokeAsync(input *InvokeAsyncInput) (*InvokeAsyncOutput, error) { + req, out := c.InvokeAsyncRequest(input) + err := req.Send() + return out, err +} + +const opListAliases = "ListAliases" + +// ListAliasesRequest generates a "aws/request.Request" representing the +// client's request for the ListAliases operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListAliases method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListAliasesRequest method. +// req, resp := client.ListAliasesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Lambda) ListAliasesRequest(input *ListAliasesInput) (req *request.Request, output *ListAliasesOutput) { + op := &request.Operation{ + Name: opListAliases, + HTTPMethod: "GET", + HTTPPath: "/2015-03-31/functions/{FunctionName}/aliases", + } + + if input == nil { + input = &ListAliasesInput{} + } + + req = c.newRequest(op, input, output) + output = &ListAliasesOutput{} + req.Data = output + return +} + +// Returns list of aliases created for a Lambda function. For each alias, the +// response includes information such as the alias ARN, description, alias name, +// and the function version to which it points. For more information, see Introduction +// to AWS Lambda Aliases (http://docs.aws.amazon.com/lambda/latest/dg/aliases-intro.html). +// +// This requires permission for the lambda:ListAliases action. +func (c *Lambda) ListAliases(input *ListAliasesInput) (*ListAliasesOutput, error) { + req, out := c.ListAliasesRequest(input) + err := req.Send() + return out, err +} + +const opListEventSourceMappings = "ListEventSourceMappings" + +// ListEventSourceMappingsRequest generates a "aws/request.Request" representing the +// client's request for the ListEventSourceMappings operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListEventSourceMappings method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListEventSourceMappingsRequest method. +// req, resp := client.ListEventSourceMappingsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Lambda) ListEventSourceMappingsRequest(input *ListEventSourceMappingsInput) (req *request.Request, output *ListEventSourceMappingsOutput) { + op := &request.Operation{ + Name: opListEventSourceMappings, + HTTPMethod: "GET", + HTTPPath: "/2015-03-31/event-source-mappings/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"NextMarker"}, + LimitToken: "MaxItems", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListEventSourceMappingsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListEventSourceMappingsOutput{} + req.Data = output + return +} + +// Returns a list of event source mappings you created using the CreateEventSourceMapping +// (see CreateEventSourceMapping). +// +// For each mapping, the API returns configuration information. You can optionally +// specify filters to retrieve specific event source mappings. +// +// If you are using the versioning feature, you can get list of event source +// mappings for a specific Lambda function version or an alias as described +// in the FunctionName parameter. For information about the versioning feature, +// see AWS Lambda Function Versioning and Aliases (http://docs.aws.amazon.com/lambda/latest/dg/versioning-aliases.html). +// +// This operation requires permission for the lambda:ListEventSourceMappings +// action. +func (c *Lambda) ListEventSourceMappings(input *ListEventSourceMappingsInput) (*ListEventSourceMappingsOutput, error) { + req, out := c.ListEventSourceMappingsRequest(input) + err := req.Send() + return out, err +} + +// ListEventSourceMappingsPages iterates over the pages of a ListEventSourceMappings operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListEventSourceMappings method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListEventSourceMappings operation. +// pageNum := 0 +// err := client.ListEventSourceMappingsPages(params, +// func(page *ListEventSourceMappingsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *Lambda) ListEventSourceMappingsPages(input *ListEventSourceMappingsInput, fn func(p *ListEventSourceMappingsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListEventSourceMappingsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListEventSourceMappingsOutput), lastPage) + }) +} + +const opListFunctions = "ListFunctions" + +// ListFunctionsRequest generates a "aws/request.Request" representing the +// client's request for the ListFunctions operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListFunctions method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListFunctionsRequest method. +// req, resp := client.ListFunctionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Lambda) ListFunctionsRequest(input *ListFunctionsInput) (req *request.Request, output *ListFunctionsOutput) { + op := &request.Operation{ + Name: opListFunctions, + HTTPMethod: "GET", + HTTPPath: "/2015-03-31/functions/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"NextMarker"}, + LimitToken: "MaxItems", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListFunctionsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListFunctionsOutput{} + req.Data = output + return +} + +// Returns a list of your Lambda functions. For each function, the response +// includes the function configuration information. You must use GetFunction +// to retrieve the code for your function. +// +// This operation requires permission for the lambda:ListFunctions action. +// +// If you are using versioning feature, the response returns list of $LATEST +// versions of your functions. For information about the versioning feature, +// see AWS Lambda Function Versioning and Aliases (http://docs.aws.amazon.com/lambda/latest/dg/versioning-aliases.html). +func (c *Lambda) ListFunctions(input *ListFunctionsInput) (*ListFunctionsOutput, error) { + req, out := c.ListFunctionsRequest(input) + err := req.Send() + return out, err +} + +// ListFunctionsPages iterates over the pages of a ListFunctions operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListFunctions method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListFunctions operation. +// pageNum := 0 +// err := client.ListFunctionsPages(params, +// func(page *ListFunctionsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *Lambda) ListFunctionsPages(input *ListFunctionsInput, fn func(p *ListFunctionsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListFunctionsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListFunctionsOutput), lastPage) + }) +} + +const opListVersionsByFunction = "ListVersionsByFunction" + +// ListVersionsByFunctionRequest generates a "aws/request.Request" representing the +// client's request for the ListVersionsByFunction operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListVersionsByFunction method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListVersionsByFunctionRequest method. +// req, resp := client.ListVersionsByFunctionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Lambda) ListVersionsByFunctionRequest(input *ListVersionsByFunctionInput) (req *request.Request, output *ListVersionsByFunctionOutput) { + op := &request.Operation{ + Name: opListVersionsByFunction, + HTTPMethod: "GET", + HTTPPath: "/2015-03-31/functions/{FunctionName}/versions", + } + + if input == nil { + input = &ListVersionsByFunctionInput{} + } + + req = c.newRequest(op, input, output) + output = &ListVersionsByFunctionOutput{} + req.Data = output + return +} + +// List all versions of a function. For information about the versioning feature, +// see AWS Lambda Function Versioning and Aliases (http://docs.aws.amazon.com/lambda/latest/dg/versioning-aliases.html). +func (c *Lambda) ListVersionsByFunction(input *ListVersionsByFunctionInput) (*ListVersionsByFunctionOutput, error) { + req, out := c.ListVersionsByFunctionRequest(input) + err := req.Send() + return out, err +} + +const opPublishVersion = "PublishVersion" + +// PublishVersionRequest generates a "aws/request.Request" representing the +// client's request for the PublishVersion operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PublishVersion method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PublishVersionRequest method. +// req, resp := client.PublishVersionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Lambda) PublishVersionRequest(input *PublishVersionInput) (req *request.Request, output *FunctionConfiguration) { + op := &request.Operation{ + Name: opPublishVersion, + HTTPMethod: "POST", + HTTPPath: "/2015-03-31/functions/{FunctionName}/versions", + } + + if input == nil { + input = &PublishVersionInput{} + } + + req = c.newRequest(op, input, output) + output = &FunctionConfiguration{} + req.Data = output + return +} + +// Publishes a version of your function from the current snapshot of $LATEST. +// That is, AWS Lambda takes a snapshot of the function code and configuration +// information from $LATEST and publishes a new version. The code and configuration +// cannot be modified after publication. For information about the versioning +// feature, see AWS Lambda Function Versioning and Aliases (http://docs.aws.amazon.com/lambda/latest/dg/versioning-aliases.html). +func (c *Lambda) PublishVersion(input *PublishVersionInput) (*FunctionConfiguration, error) { + req, out := c.PublishVersionRequest(input) + err := req.Send() + return out, err +} + +const opRemovePermission = "RemovePermission" + +// RemovePermissionRequest generates a "aws/request.Request" representing the +// client's request for the RemovePermission operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the RemovePermission method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the RemovePermissionRequest method. +// req, resp := client.RemovePermissionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Lambda) RemovePermissionRequest(input *RemovePermissionInput) (req *request.Request, output *RemovePermissionOutput) { + op := &request.Operation{ + Name: opRemovePermission, + HTTPMethod: "DELETE", + HTTPPath: "/2015-03-31/functions/{FunctionName}/policy/{StatementId}", + } + + if input == nil { + input = &RemovePermissionInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &RemovePermissionOutput{} + req.Data = output + return +} + +// You can remove individual permissions from an resource policy associated +// with a Lambda function by providing a statement ID that you provided when +// you added the permission. +// +// If you are using versioning, the permissions you remove are specific to +// the Lambda function version or alias you specify in the AddPermission request +// via the Qualifier parameter. For more information about versioning, see AWS +// Lambda Function Versioning and Aliases (http://docs.aws.amazon.com/lambda/latest/dg/versioning-aliases.html). +// +// Note that removal of a permission will cause an active event source to lose +// permission to the function. +// +// You need permission for the lambda:RemovePermission action. +func (c *Lambda) RemovePermission(input *RemovePermissionInput) (*RemovePermissionOutput, error) { + req, out := c.RemovePermissionRequest(input) + err := req.Send() + return out, err +} + +const opUpdateAlias = "UpdateAlias" + +// UpdateAliasRequest generates a "aws/request.Request" representing the +// client's request for the UpdateAlias operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateAlias method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateAliasRequest method. +// req, resp := client.UpdateAliasRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Lambda) UpdateAliasRequest(input *UpdateAliasInput) (req *request.Request, output *AliasConfiguration) { + op := &request.Operation{ + Name: opUpdateAlias, + HTTPMethod: "PUT", + HTTPPath: "/2015-03-31/functions/{FunctionName}/aliases/{Name}", + } + + if input == nil { + input = &UpdateAliasInput{} + } + + req = c.newRequest(op, input, output) + output = &AliasConfiguration{} + req.Data = output + return +} + +// Using this API you can update the function version to which the alias points +// and the alias description. For more information, see Introduction to AWS +// Lambda Aliases (http://docs.aws.amazon.com/lambda/latest/dg/aliases-intro.html). +// +// This requires permission for the lambda:UpdateAlias action. +func (c *Lambda) UpdateAlias(input *UpdateAliasInput) (*AliasConfiguration, error) { + req, out := c.UpdateAliasRequest(input) + err := req.Send() + return out, err +} + +const opUpdateEventSourceMapping = "UpdateEventSourceMapping" + +// UpdateEventSourceMappingRequest generates a "aws/request.Request" representing the +// client's request for the UpdateEventSourceMapping operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateEventSourceMapping method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateEventSourceMappingRequest method. +// req, resp := client.UpdateEventSourceMappingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Lambda) UpdateEventSourceMappingRequest(input *UpdateEventSourceMappingInput) (req *request.Request, output *EventSourceMappingConfiguration) { + op := &request.Operation{ + Name: opUpdateEventSourceMapping, + HTTPMethod: "PUT", + HTTPPath: "/2015-03-31/event-source-mappings/{UUID}", + } + + if input == nil { + input = &UpdateEventSourceMappingInput{} + } + + req = c.newRequest(op, input, output) + output = &EventSourceMappingConfiguration{} + req.Data = output + return +} + +// You can update an event source mapping. This is useful if you want to change +// the parameters of the existing mapping without losing your position in the +// stream. You can change which function will receive the stream records, but +// to change the stream itself, you must create a new mapping. +// +// If you are using the versioning feature, you can update the event source +// mapping to map to a specific Lambda function version or alias as described +// in the FunctionName parameter. For information about the versioning feature, +// see AWS Lambda Function Versioning and Aliases (http://docs.aws.amazon.com/lambda/latest/dg/versioning-aliases.html). +// +// If you disable the event source mapping, AWS Lambda stops polling. If you +// enable again, it will resume polling from the time it had stopped polling, +// so you don't lose processing of any records. However, if you delete event +// source mapping and create it again, it will reset. +// +// This operation requires permission for the lambda:UpdateEventSourceMapping +// action. +func (c *Lambda) UpdateEventSourceMapping(input *UpdateEventSourceMappingInput) (*EventSourceMappingConfiguration, error) { + req, out := c.UpdateEventSourceMappingRequest(input) + err := req.Send() + return out, err +} + +const opUpdateFunctionCode = "UpdateFunctionCode" + +// UpdateFunctionCodeRequest generates a "aws/request.Request" representing the +// client's request for the UpdateFunctionCode operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateFunctionCode method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateFunctionCodeRequest method. +// req, resp := client.UpdateFunctionCodeRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Lambda) UpdateFunctionCodeRequest(input *UpdateFunctionCodeInput) (req *request.Request, output *FunctionConfiguration) { + op := &request.Operation{ + Name: opUpdateFunctionCode, + HTTPMethod: "PUT", + HTTPPath: "/2015-03-31/functions/{FunctionName}/code", + } + + if input == nil { + input = &UpdateFunctionCodeInput{} + } + + req = c.newRequest(op, input, output) + output = &FunctionConfiguration{} + req.Data = output + return +} + +// Updates the code for the specified Lambda function. This operation must only +// be used on an existing Lambda function and cannot be used to update the function +// configuration. +// +// If you are using the versioning feature, note this API will always update +// the $LATEST version of your Lambda function. For information about the versioning +// feature, see AWS Lambda Function Versioning and Aliases (http://docs.aws.amazon.com/lambda/latest/dg/versioning-aliases.html). +// +// This operation requires permission for the lambda:UpdateFunctionCode action. +func (c *Lambda) UpdateFunctionCode(input *UpdateFunctionCodeInput) (*FunctionConfiguration, error) { + req, out := c.UpdateFunctionCodeRequest(input) + err := req.Send() + return out, err +} + +const opUpdateFunctionConfiguration = "UpdateFunctionConfiguration" + +// UpdateFunctionConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the UpdateFunctionConfiguration operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateFunctionConfiguration method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateFunctionConfigurationRequest method. +// req, resp := client.UpdateFunctionConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Lambda) UpdateFunctionConfigurationRequest(input *UpdateFunctionConfigurationInput) (req *request.Request, output *FunctionConfiguration) { + op := &request.Operation{ + Name: opUpdateFunctionConfiguration, + HTTPMethod: "PUT", + HTTPPath: "/2015-03-31/functions/{FunctionName}/configuration", + } + + if input == nil { + input = &UpdateFunctionConfigurationInput{} + } + + req = c.newRequest(op, input, output) + output = &FunctionConfiguration{} + req.Data = output + return +} + +// Updates the configuration parameters for the specified Lambda function by +// using the values provided in the request. You provide only the parameters +// you want to change. This operation must only be used on an existing Lambda +// function and cannot be used to update the function's code. +// +// If you are using the versioning feature, note this API will always update +// the $LATEST version of your Lambda function. For information about the versioning +// feature, see AWS Lambda Function Versioning and Aliases (http://docs.aws.amazon.com/lambda/latest/dg/versioning-aliases.html). +// +// This operation requires permission for the lambda:UpdateFunctionConfiguration +// action. +func (c *Lambda) UpdateFunctionConfiguration(input *UpdateFunctionConfigurationInput) (*FunctionConfiguration, error) { + req, out := c.UpdateFunctionConfigurationRequest(input) + err := req.Send() + return out, err +} + +type AddPermissionInput struct { + _ struct{} `type:"structure"` + + // The AWS Lambda action you want to allow in this statement. Each Lambda action + // is a string starting with lambda: followed by the API name (see Operations). + // For example, lambda:CreateFunction. You can use wildcard (lambda:*) to grant + // permission for all AWS Lambda actions. + Action *string `type:"string" required:"true"` + + EventSourceToken *string `type:"string"` + + // Name of the Lambda function whose resource policy you are updating by adding + // a new permission. + // + // You can specify a function name (for example, Thumbnail) or you can specify + // Amazon Resource Name (ARN) of the function (for example, arn:aws:lambda:us-west-2:account-id:function:ThumbNail). + // AWS Lambda also allows you to specify partial ARN (for example, account-id:Thumbnail). + // Note that the length constraint applies only to the ARN. If you specify only + // the function name, it is limited to 64 character in length. + FunctionName *string `location:"uri" locationName:"FunctionName" min:"1" type:"string" required:"true"` + + // The principal who is getting this permission. It can be Amazon S3 service + // Principal (s3.amazonaws.com) if you want Amazon S3 to invoke the function, + // an AWS account ID if you are granting cross-account permission, or any valid + // AWS service principal such as sns.amazonaws.com. For example, you might want + // to allow a custom application in another AWS account to push events to AWS + // Lambda by invoking your function. + Principal *string `type:"string" required:"true"` + + // You can use this optional query parameter to describe a qualified ARN using + // a function version or an alias name. The permission will then apply to the + // specific qualified ARN. For example, if you specify function version 2 as + // the qualifier, then permission applies only when request is made using qualified + // function ARN: + // + // arn:aws:lambda:aws-region:acct-id:function:function-name:2 + // + // If you specify an alias name, for example PROD, then the permission is valid + // only for requests made using the alias ARN: + // + // arn:aws:lambda:aws-region:acct-id:function:function-name:PROD + // + // If the qualifier is not specified, the permission is valid only when requests + // is made using unqualified function ARN. + // + // arn:aws:lambda:aws-region:acct-id:function:function-name + Qualifier *string `location:"querystring" locationName:"Qualifier" min:"1" type:"string"` + + // The AWS account ID (without a hyphen) of the source owner. For example, if + // the SourceArn identifies a bucket, then this is the bucket owner's account + // ID. You can use this additional condition to ensure the bucket you specify + // is owned by a specific account (it is possible the bucket owner deleted the + // bucket and some other AWS account created the bucket). You can also use this + // condition to specify all sources (that is, you don't specify the SourceArn) + // owned by a specific account. + SourceAccount *string `type:"string"` + + // This is optional; however, when granting Amazon S3 permission to invoke your + // function, you should specify this field with the bucket Amazon Resource Name + // (ARN) as its value. This ensures that only events generated from the specified + // bucket can invoke the function. + // + // If you add a permission for the Amazon S3 principal without providing the + // source ARN, any AWS account that creates a mapping to your function ARN can + // send events to invoke your Lambda function from Amazon S3. + SourceArn *string `type:"string"` + + // A unique statement identifier. + StatementId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s AddPermissionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddPermissionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AddPermissionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AddPermissionInput"} + if s.Action == nil { + invalidParams.Add(request.NewErrParamRequired("Action")) + } + if s.FunctionName == nil { + invalidParams.Add(request.NewErrParamRequired("FunctionName")) + } + if s.FunctionName != nil && len(*s.FunctionName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("FunctionName", 1)) + } + if s.Principal == nil { + invalidParams.Add(request.NewErrParamRequired("Principal")) + } + if s.Qualifier != nil && len(*s.Qualifier) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Qualifier", 1)) + } + if s.StatementId == nil { + invalidParams.Add(request.NewErrParamRequired("StatementId")) + } + if s.StatementId != nil && len(*s.StatementId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("StatementId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type AddPermissionOutput struct { + _ struct{} `type:"structure"` + + // The permission statement you specified in the request. The response returns + // the same as a string using a backslash ("\") as an escape character in the + // JSON. + Statement *string `type:"string"` +} + +// String returns the string representation +func (s AddPermissionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddPermissionOutput) GoString() string { + return s.String() +} + +// Provides configuration information about a Lambda function version alias. +type AliasConfiguration struct { + _ struct{} `type:"structure"` + + // Lambda function ARN that is qualified using the alias name as the suffix. + // For example, if you create an alias called BETA that points to a helloworld + // function version, the ARN is arn:aws:lambda:aws-regions:acct-id:function:helloworld:BETA. + AliasArn *string `type:"string"` + + // Alias description. + Description *string `type:"string"` + + // Function version to which the alias points. + FunctionVersion *string `min:"1" type:"string"` + + // Alias name. + Name *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s AliasConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AliasConfiguration) GoString() string { + return s.String() +} + +type CreateAliasInput struct { + _ struct{} `type:"structure"` + + // Description of the alias. + Description *string `type:"string"` + + // Name of the Lambda function for which you want to create an alias. + FunctionName *string `location:"uri" locationName:"FunctionName" min:"1" type:"string" required:"true"` + + // Lambda function version for which you are creating the alias. + FunctionVersion *string `min:"1" type:"string" required:"true"` + + // Name for the alias you are creating. + Name *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateAliasInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateAliasInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateAliasInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateAliasInput"} + if s.FunctionName == nil { + invalidParams.Add(request.NewErrParamRequired("FunctionName")) + } + if s.FunctionName != nil && len(*s.FunctionName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("FunctionName", 1)) + } + if s.FunctionVersion == nil { + invalidParams.Add(request.NewErrParamRequired("FunctionVersion")) + } + if s.FunctionVersion != nil && len(*s.FunctionVersion) < 1 { + invalidParams.Add(request.NewErrParamMinLen("FunctionVersion", 1)) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type CreateEventSourceMappingInput struct { + _ struct{} `type:"structure"` + + // The largest number of records that AWS Lambda will retrieve from your event + // source at the time of invoking your function. Your function receives an event + // with all the retrieved records. The default is 100 records. + BatchSize *int64 `min:"1" type:"integer"` + + // Indicates whether AWS Lambda should begin polling the event source. By default, + // Enabled is true. + Enabled *bool `type:"boolean"` + + // The Amazon Resource Name (ARN) of the Amazon Kinesis or the Amazon DynamoDB + // stream that is the event source. Any record added to this stream could cause + // AWS Lambda to invoke your Lambda function, it depends on the BatchSize. AWS + // Lambda POSTs the Amazon Kinesis event, containing records, to your Lambda + // function as JSON. + EventSourceArn *string `type:"string" required:"true"` + + // The Lambda function to invoke when AWS Lambda detects an event on the stream. + // + // You can specify the function name (for example, Thumbnail) or you can specify + // Amazon Resource Name (ARN) of the function (for example, arn:aws:lambda:us-west-2:account-id:function:ThumbNail). + // + // If you are using versioning, you can also provide a qualified function + // ARN (ARN that is qualified with function version or alias name as suffix). + // For more information about versioning, see AWS Lambda Function Versioning + // and Aliases (http://docs.aws.amazon.com/lambda/latest/dg/versioning-aliases.html) + // + // AWS Lambda also allows you to specify only the function name with the account + // ID qualifier (for example, account-id:Thumbnail). + // + // Note that the length constraint applies only to the ARN. If you specify + // only the function name, it is limited to 64 character in length. + FunctionName *string `min:"1" type:"string" required:"true"` + + // The position in the stream where AWS Lambda should start reading. For more + // information, go to ShardIteratorType (http://docs.aws.amazon.com/kinesis/latest/APIReference/API_GetShardIterator.html#Kinesis-GetShardIterator-request-ShardIteratorType) + // in the Amazon Kinesis API Reference. + StartingPosition *string `type:"string" required:"true" enum:"EventSourcePosition"` +} + +// String returns the string representation +func (s CreateEventSourceMappingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateEventSourceMappingInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateEventSourceMappingInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateEventSourceMappingInput"} + if s.BatchSize != nil && *s.BatchSize < 1 { + invalidParams.Add(request.NewErrParamMinValue("BatchSize", 1)) + } + if s.EventSourceArn == nil { + invalidParams.Add(request.NewErrParamRequired("EventSourceArn")) + } + if s.FunctionName == nil { + invalidParams.Add(request.NewErrParamRequired("FunctionName")) + } + if s.FunctionName != nil && len(*s.FunctionName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("FunctionName", 1)) + } + if s.StartingPosition == nil { + invalidParams.Add(request.NewErrParamRequired("StartingPosition")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type CreateFunctionInput struct { + _ struct{} `type:"structure"` + + // The code for the Lambda function. + Code *FunctionCode `type:"structure" required:"true"` + + // A short, user-defined function description. Lambda does not use this value. + // Assign a meaningful description as you see fit. + Description *string `type:"string"` + + // The name you want to assign to the function you are uploading. The function + // names appear in the console and are returned in the ListFunctions API. Function + // names are used to specify functions to other AWS Lambda APIs, such as Invoke. + FunctionName *string `min:"1" type:"string" required:"true"` + + // The function within your code that Lambda calls to begin execution. For Node.js, + // it is the module-name.export value in your function. For Java, it can be + // package.class-name::handler or package.class-name. For more information, + // see Lambda Function Handler (Java) (http://docs.aws.amazon.com/lambda/latest/dg/java-programming-model-handler-types.html). + Handler *string `type:"string" required:"true"` + + // The amount of memory, in MB, your Lambda function is given. Lambda uses this + // memory size to infer the amount of CPU and memory allocated to your function. + // Your function use-case determines your CPU and memory requirements. For example, + // a database operation might need less memory compared to an image processing + // function. The default value is 128 MB. The value must be a multiple of 64 + // MB. + MemorySize *int64 `min:"128" type:"integer"` + + // This boolean parameter can be used to request AWS Lambda to create the Lambda + // function and publish a version as an atomic operation. + Publish *bool `type:"boolean"` + + // The Amazon Resource Name (ARN) of the IAM role that Lambda assumes when it + // executes your function to access any other Amazon Web Services (AWS) resources. + // For more information, see AWS Lambda: How it Works (http://docs.aws.amazon.com/lambda/latest/dg/lambda-introduction.html). + Role *string `type:"string" required:"true"` + + // The runtime environment for the Lambda function you are uploading. + Runtime *string `type:"string" required:"true" enum:"Runtime"` + + // The function execution time at which Lambda should terminate the function. + // Because the execution time has cost implications, we recommend you set this + // value based on your expected execution time. The default is 3 seconds. + Timeout *int64 `min:"1" type:"integer"` + + // If your Lambda function accesses resources in a VPC, you provide this parameter + // identifying the list of security group IDs and subnet IDs. These must belong + // to the same VPC. You must provide at least one security group and one subnet + // ID. + VpcConfig *VpcConfig `type:"structure"` +} + +// String returns the string representation +func (s CreateFunctionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateFunctionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateFunctionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateFunctionInput"} + if s.Code == nil { + invalidParams.Add(request.NewErrParamRequired("Code")) + } + if s.FunctionName == nil { + invalidParams.Add(request.NewErrParamRequired("FunctionName")) + } + if s.FunctionName != nil && len(*s.FunctionName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("FunctionName", 1)) + } + if s.Handler == nil { + invalidParams.Add(request.NewErrParamRequired("Handler")) + } + if s.MemorySize != nil && *s.MemorySize < 128 { + invalidParams.Add(request.NewErrParamMinValue("MemorySize", 128)) + } + if s.Role == nil { + invalidParams.Add(request.NewErrParamRequired("Role")) + } + if s.Runtime == nil { + invalidParams.Add(request.NewErrParamRequired("Runtime")) + } + if s.Timeout != nil && *s.Timeout < 1 { + invalidParams.Add(request.NewErrParamMinValue("Timeout", 1)) + } + if s.Code != nil { + if err := s.Code.Validate(); err != nil { + invalidParams.AddNested("Code", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteAliasInput struct { + _ struct{} `type:"structure"` + + // The Lambda function name for which the alias is created. Deleting an alias + // does not delete the function version to which it is pointing. + FunctionName *string `location:"uri" locationName:"FunctionName" min:"1" type:"string" required:"true"` + + // Name of the alias to delete. + Name *string `location:"uri" locationName:"Name" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteAliasInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteAliasInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteAliasInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteAliasInput"} + if s.FunctionName == nil { + invalidParams.Add(request.NewErrParamRequired("FunctionName")) + } + if s.FunctionName != nil && len(*s.FunctionName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("FunctionName", 1)) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteAliasOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteAliasOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteAliasOutput) GoString() string { + return s.String() +} + +type DeleteEventSourceMappingInput struct { + _ struct{} `type:"structure"` + + // The event source mapping ID. + UUID *string `location:"uri" locationName:"UUID" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteEventSourceMappingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteEventSourceMappingInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteEventSourceMappingInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteEventSourceMappingInput"} + if s.UUID == nil { + invalidParams.Add(request.NewErrParamRequired("UUID")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteFunctionInput struct { + _ struct{} `type:"structure"` + + // The Lambda function to delete. + // + // You can specify the function name (for example, Thumbnail) or you can specify + // Amazon Resource Name (ARN) of the function (for example, arn:aws:lambda:us-west-2:account-id:function:ThumbNail). + // If you are using versioning, you can also provide a qualified function ARN + // (ARN that is qualified with function version or alias name as suffix). AWS + // Lambda also allows you to specify only the function name with the account + // ID qualifier (for example, account-id:Thumbnail). Note that the length constraint + // applies only to the ARN. If you specify only the function name, it is limited + // to 64 character in length. + FunctionName *string `location:"uri" locationName:"FunctionName" min:"1" type:"string" required:"true"` + + // Using this optional parameter you can specify a function version (but not + // the $LATEST version) to direct AWS Lambda to delete a specific function version. + // If the function version has one or more aliases pointing to it, you will + // get an error because you cannot have aliases pointing to it. You can delete + // any function version but not the $LATEST, that is, you cannot specify $LATEST + // as the value of this parameter. The $LATEST version can be deleted only when + // you want to delete all the function versions and aliases. + // + // You can only specify a function version, not an alias name, using this parameter. + // You cannot delete a function version using its alias. + // + // If you don't specify this parameter, AWS Lambda will delete the function, + // including all of its versions and aliases. + Qualifier *string `location:"querystring" locationName:"Qualifier" min:"1" type:"string"` +} + +// String returns the string representation +func (s DeleteFunctionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteFunctionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteFunctionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteFunctionInput"} + if s.FunctionName == nil { + invalidParams.Add(request.NewErrParamRequired("FunctionName")) + } + if s.FunctionName != nil && len(*s.FunctionName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("FunctionName", 1)) + } + if s.Qualifier != nil && len(*s.Qualifier) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Qualifier", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteFunctionOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteFunctionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteFunctionOutput) GoString() string { + return s.String() +} + +// Describes mapping between an Amazon Kinesis stream and a Lambda function. +type EventSourceMappingConfiguration struct { + _ struct{} `type:"structure"` + + // The largest number of records that AWS Lambda will retrieve from your event + // source at the time of invoking your function. Your function receives an event + // with all the retrieved records. + BatchSize *int64 `min:"1" type:"integer"` + + // The Amazon Resource Name (ARN) of the Amazon Kinesis stream that is the source + // of events. + EventSourceArn *string `type:"string"` + + // The Lambda function to invoke when AWS Lambda detects an event on the stream. + FunctionArn *string `type:"string"` + + // The UTC time string indicating the last time the event mapping was updated. + LastModified *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The result of the last AWS Lambda invocation of your Lambda function. + LastProcessingResult *string `type:"string"` + + // The state of the event source mapping. It can be Creating, Enabled, Disabled, + // Enabling, Disabling, Updating, or Deleting. + State *string `type:"string"` + + // The reason the event source mapping is in its current state. It is either + // user-requested or an AWS Lambda-initiated state transition. + StateTransitionReason *string `type:"string"` + + // The AWS Lambda assigned opaque identifier for the mapping. + UUID *string `type:"string"` +} + +// String returns the string representation +func (s EventSourceMappingConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EventSourceMappingConfiguration) GoString() string { + return s.String() +} + +// The code for the Lambda function. +type FunctionCode struct { + _ struct{} `type:"structure"` + + // Amazon S3 bucket name where the .zip file containing your deployment package + // is stored. This bucket must reside in the same AWS region where you are creating + // the Lambda function. + S3Bucket *string `min:"3" type:"string"` + + // The Amazon S3 object (the deployment package) key name you want to upload. + S3Key *string `min:"1" type:"string"` + + // The Amazon S3 object (the deployment package) version you want to upload. + S3ObjectVersion *string `min:"1" type:"string"` + + // A zip file containing your deployment package. If you are using the API directly, + // the zip file must be base64-encoded (if you are using the AWS SDKs or the + // AWS CLI, the SDKs or CLI will do the encoding for you). For more information + // about creating a .zip file, go to Execution Permissions (http://docs.aws.amazon.com/lambda/latest/dg/intro-permission-model.html#lambda-intro-execution-role.html) + // in the AWS Lambda Developer Guide. + // + // ZipFile is automatically base64 encoded/decoded by the SDK. + ZipFile []byte `type:"blob"` +} + +// String returns the string representation +func (s FunctionCode) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FunctionCode) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *FunctionCode) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "FunctionCode"} + if s.S3Bucket != nil && len(*s.S3Bucket) < 3 { + invalidParams.Add(request.NewErrParamMinLen("S3Bucket", 3)) + } + if s.S3Key != nil && len(*s.S3Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("S3Key", 1)) + } + if s.S3ObjectVersion != nil && len(*s.S3ObjectVersion) < 1 { + invalidParams.Add(request.NewErrParamMinLen("S3ObjectVersion", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The object for the Lambda function location. +type FunctionCodeLocation struct { + _ struct{} `type:"structure"` + + // The presigned URL you can use to download the function's .zip file that you + // previously uploaded. The URL is valid for up to 10 minutes. + Location *string `type:"string"` + + // The repository from which you can download the function. + RepositoryType *string `type:"string"` +} + +// String returns the string representation +func (s FunctionCodeLocation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FunctionCodeLocation) GoString() string { + return s.String() +} + +// A complex type that describes function metadata. +type FunctionConfiguration struct { + _ struct{} `type:"structure"` + + // It is the SHA256 hash of your function deployment package. + CodeSha256 *string `type:"string"` + + // The size, in bytes, of the function .zip file you uploaded. + CodeSize *int64 `type:"long"` + + // The user-provided description. + Description *string `type:"string"` + + // The Amazon Resource Name (ARN) assigned to the function. + FunctionArn *string `type:"string"` + + // The name of the function. + FunctionName *string `min:"1" type:"string"` + + // The function Lambda calls to begin executing your function. + Handler *string `type:"string"` + + // The time stamp of the last time you updated the function. + LastModified *string `type:"string"` + + // The memory size, in MB, you configured for the function. Must be a multiple + // of 64 MB. + MemorySize *int64 `min:"128" type:"integer"` + + // The Amazon Resource Name (ARN) of the IAM role that Lambda assumes when it + // executes your function to access any other Amazon Web Services (AWS) resources. + Role *string `type:"string"` + + // The runtime environment for the Lambda function. + Runtime *string `type:"string" enum:"Runtime"` + + // The function execution time at which Lambda should terminate the function. + // Because the execution time has cost implications, we recommend you set this + // value based on your expected execution time. The default is 3 seconds. + Timeout *int64 `min:"1" type:"integer"` + + // The version of the Lambda function. + Version *string `min:"1" type:"string"` + + // VPC configuration associated with your Lambda function. + VpcConfig *VpcConfigResponse `type:"structure"` +} + +// String returns the string representation +func (s FunctionConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FunctionConfiguration) GoString() string { + return s.String() +} + +type GetAliasInput struct { + _ struct{} `type:"structure"` + + // Function name for which the alias is created. An alias is a subresource that + // exists only in the context of an existing Lambda function so you must specify + // the function name. + FunctionName *string `location:"uri" locationName:"FunctionName" min:"1" type:"string" required:"true"` + + // Name of the alias for which you want to retrieve information. + Name *string `location:"uri" locationName:"Name" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetAliasInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetAliasInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetAliasInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetAliasInput"} + if s.FunctionName == nil { + invalidParams.Add(request.NewErrParamRequired("FunctionName")) + } + if s.FunctionName != nil && len(*s.FunctionName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("FunctionName", 1)) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type GetEventSourceMappingInput struct { + _ struct{} `type:"structure"` + + // The AWS Lambda assigned ID of the event source mapping. + UUID *string `location:"uri" locationName:"UUID" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetEventSourceMappingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetEventSourceMappingInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetEventSourceMappingInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetEventSourceMappingInput"} + if s.UUID == nil { + invalidParams.Add(request.NewErrParamRequired("UUID")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type GetFunctionConfigurationInput struct { + _ struct{} `type:"structure"` + + // The name of the Lambda function for which you want to retrieve the configuration + // information. + // + // You can specify a function name (for example, Thumbnail) or you can specify + // Amazon Resource Name (ARN) of the function (for example, arn:aws:lambda:us-west-2:account-id:function:ThumbNail). + // AWS Lambda also allows you to specify a partial ARN (for example, account-id:Thumbnail). + // Note that the length constraint applies only to the ARN. If you specify only + // the function name, it is limited to 64 character in length. + FunctionName *string `location:"uri" locationName:"FunctionName" min:"1" type:"string" required:"true"` + + // Using this optional parameter you can specify a function version or an alias + // name. If you specify function version, the API uses qualified function ARN + // and returns information about the specific function version. If you specify + // an alias name, the API uses the alias ARN and returns information about the + // function version to which the alias points. + // + // If you don't specify this parameter, the API uses unqualified function ARN, + // and returns information about the $LATEST function version. + Qualifier *string `location:"querystring" locationName:"Qualifier" min:"1" type:"string"` +} + +// String returns the string representation +func (s GetFunctionConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetFunctionConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetFunctionConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetFunctionConfigurationInput"} + if s.FunctionName == nil { + invalidParams.Add(request.NewErrParamRequired("FunctionName")) + } + if s.FunctionName != nil && len(*s.FunctionName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("FunctionName", 1)) + } + if s.Qualifier != nil && len(*s.Qualifier) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Qualifier", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type GetFunctionInput struct { + _ struct{} `type:"structure"` + + // The Lambda function name. + // + // You can specify a function name (for example, Thumbnail) or you can specify + // Amazon Resource Name (ARN) of the function (for example, arn:aws:lambda:us-west-2:account-id:function:ThumbNail). + // AWS Lambda also allows you to specify a partial ARN (for example, account-id:Thumbnail). + // Note that the length constraint applies only to the ARN. If you specify only + // the function name, it is limited to 64 character in length. + FunctionName *string `location:"uri" locationName:"FunctionName" min:"1" type:"string" required:"true"` + + // Using this optional parameter to specify a function version or an alias name. + // If you specify function version, the API uses qualified function ARN for + // the request and returns information about the specific Lambda function version. + // If you specify an alias name, the API uses the alias ARN and returns information + // about the function version to which the alias points. If you don't provide + // this parameter, the API uses unqualified function ARN and returns information + // about the $LATEST version of the Lambda function. + Qualifier *string `location:"querystring" locationName:"Qualifier" min:"1" type:"string"` +} + +// String returns the string representation +func (s GetFunctionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetFunctionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetFunctionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetFunctionInput"} + if s.FunctionName == nil { + invalidParams.Add(request.NewErrParamRequired("FunctionName")) + } + if s.FunctionName != nil && len(*s.FunctionName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("FunctionName", 1)) + } + if s.Qualifier != nil && len(*s.Qualifier) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Qualifier", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// This response contains the object for the Lambda function location (see API_FunctionCodeLocation. +type GetFunctionOutput struct { + _ struct{} `type:"structure"` + + // The object for the Lambda function location. + Code *FunctionCodeLocation `type:"structure"` + + // A complex type that describes function metadata. + Configuration *FunctionConfiguration `type:"structure"` +} + +// String returns the string representation +func (s GetFunctionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetFunctionOutput) GoString() string { + return s.String() +} + +type GetPolicyInput struct { + _ struct{} `type:"structure"` + + // Function name whose resource policy you want to retrieve. + // + // You can specify the function name (for example, Thumbnail) or you can specify + // Amazon Resource Name (ARN) of the function (for example, arn:aws:lambda:us-west-2:account-id:function:ThumbNail). + // If you are using versioning, you can also provide a qualified function ARN + // (ARN that is qualified with function version or alias name as suffix). AWS + // Lambda also allows you to specify only the function name with the account + // ID qualifier (for example, account-id:Thumbnail). Note that the length constraint + // applies only to the ARN. If you specify only the function name, it is limited + // to 64 character in length. + FunctionName *string `location:"uri" locationName:"FunctionName" min:"1" type:"string" required:"true"` + + // You can specify this optional query parameter to specify a function version + // or an alias name in which case this API will return all permissions associated + // with the specific qualified ARN. If you don't provide this parameter, the + // API will return permissions that apply to the unqualified function ARN. + Qualifier *string `location:"querystring" locationName:"Qualifier" min:"1" type:"string"` +} + +// String returns the string representation +func (s GetPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetPolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetPolicyInput"} + if s.FunctionName == nil { + invalidParams.Add(request.NewErrParamRequired("FunctionName")) + } + if s.FunctionName != nil && len(*s.FunctionName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("FunctionName", 1)) + } + if s.Qualifier != nil && len(*s.Qualifier) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Qualifier", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type GetPolicyOutput struct { + _ struct{} `type:"structure"` + + // The resource policy associated with the specified function. The response + // returns the same as a string using a backslash ("\") as an escape character + // in the JSON. + Policy *string `type:"string"` +} + +// String returns the string representation +func (s GetPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetPolicyOutput) GoString() string { + return s.String() +} + +type InvokeAsyncInput struct { + _ struct{} `deprecated:"true" type:"structure" payload:"InvokeArgs"` + + // The Lambda function name. + FunctionName *string `location:"uri" locationName:"FunctionName" min:"1" type:"string" required:"true"` + + // JSON that you want to provide to your Lambda function as input. + InvokeArgs io.ReadSeeker `type:"blob" required:"true"` +} + +// String returns the string representation +func (s InvokeAsyncInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InvokeAsyncInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *InvokeAsyncInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "InvokeAsyncInput"} + if s.FunctionName == nil { + invalidParams.Add(request.NewErrParamRequired("FunctionName")) + } + if s.FunctionName != nil && len(*s.FunctionName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("FunctionName", 1)) + } + if s.InvokeArgs == nil { + invalidParams.Add(request.NewErrParamRequired("InvokeArgs")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Upon success, it returns empty response. Otherwise, throws an exception. +type InvokeAsyncOutput struct { + _ struct{} `deprecated:"true" type:"structure"` + + // It will be 202 upon success. + Status *int64 `location:"statusCode" type:"integer"` +} + +// String returns the string representation +func (s InvokeAsyncOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InvokeAsyncOutput) GoString() string { + return s.String() +} + +type InvokeInput struct { + _ struct{} `type:"structure" payload:"Payload"` + + // Using the ClientContext you can pass client-specific information to the Lambda + // function you are invoking. You can then process the client information in + // your Lambda function as you choose through the context variable. For an example + // of a ClientContext JSON, see PutEvents (http://docs.aws.amazon.com/mobileanalytics/latest/ug/PutEvents.html) + // in the Amazon Mobile Analytics API Reference and User Guide. + // + // The ClientContext JSON must be base64-encoded. + ClientContext *string `location:"header" locationName:"X-Amz-Client-Context" type:"string"` + + // The Lambda function name. + // + // You can specify a function name (for example, Thumbnail) or you can specify + // Amazon Resource Name (ARN) of the function (for example, arn:aws:lambda:us-west-2:account-id:function:ThumbNail). + // AWS Lambda also allows you to specify a partial ARN (for example, account-id:Thumbnail). + // Note that the length constraint applies only to the ARN. If you specify only + // the function name, it is limited to 64 character in length. + FunctionName *string `location:"uri" locationName:"FunctionName" min:"1" type:"string" required:"true"` + + // By default, the Invoke API assumes RequestResponse invocation type. You can + // optionally request asynchronous execution by specifying Event as the InvocationType. + // You can also use this parameter to request AWS Lambda to not execute the + // function but do some verification, such as if the caller is authorized to + // invoke the function and if the inputs are valid. You request this by specifying + // DryRun as the InvocationType. This is useful in a cross-account scenario + // when you want to verify access to a function without running it. + InvocationType *string `location:"header" locationName:"X-Amz-Invocation-Type" type:"string" enum:"InvocationType"` + + // You can set this optional parameter to Tail in the request only if you specify + // the InvocationType parameter with value RequestResponse. In this case, AWS + // Lambda returns the base64-encoded last 4 KB of log data produced by your + // Lambda function in the x-amz-log-results header. + LogType *string `location:"header" locationName:"X-Amz-Log-Type" type:"string" enum:"LogType"` + + // JSON that you want to provide to your Lambda function as input. + Payload []byte `type:"blob"` + + // You can use this optional parameter to specify a Lambda function version + // or alias name. If you specify a function version, the API uses the qualified + // function ARN to invoke a specific Lambda function. If you specify an alias + // name, the API uses the alias ARN to invoke the Lambda function version to + // which the alias points. + // + // If you don't provide this parameter, then the API uses unqualified function + // ARN which results in invocation of the $LATEST version. + Qualifier *string `location:"querystring" locationName:"Qualifier" min:"1" type:"string"` +} + +// String returns the string representation +func (s InvokeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InvokeInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *InvokeInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "InvokeInput"} + if s.FunctionName == nil { + invalidParams.Add(request.NewErrParamRequired("FunctionName")) + } + if s.FunctionName != nil && len(*s.FunctionName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("FunctionName", 1)) + } + if s.Qualifier != nil && len(*s.Qualifier) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Qualifier", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Upon success, returns an empty response. Otherwise, throws an exception. +type InvokeOutput struct { + _ struct{} `type:"structure" payload:"Payload"` + + // Indicates whether an error occurred while executing the Lambda function. + // If an error occurred this field will have one of two values; Handled or Unhandled. + // Handled errors are errors that are reported by the function while the Unhandled + // errors are those detected and reported by AWS Lambda. Unhandled errors include + // out of memory errors and function timeouts. For information about how to + // report an Handled error, see Programming Model (http://docs.aws.amazon.com/lambda/latest/dg/programming-model.html). + FunctionError *string `location:"header" locationName:"X-Amz-Function-Error" type:"string"` + + // It is the base64-encoded logs for the Lambda function invocation. This is + // present only if the invocation type is RequestResponse and the logs were + // requested. + LogResult *string `location:"header" locationName:"X-Amz-Log-Result" type:"string"` + + // It is the JSON representation of the object returned by the Lambda function. + // In This is present only if the invocation type is RequestResponse. + // + // In the event of a function error this field contains a message describing + // the error. For the Handled errors the Lambda function will report this message. + // For Unhandled errors AWS Lambda reports the message. + Payload []byte `type:"blob"` + + // The HTTP status code will be in the 200 range for successful request. For + // the RequestResonse invocation type this status code will be 200. For the + // Event invocation type this status code will be 202. For the DryRun invocation + // type the status code will be 204. + StatusCode *int64 `location:"statusCode" type:"integer"` +} + +// String returns the string representation +func (s InvokeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InvokeOutput) GoString() string { + return s.String() +} + +type ListAliasesInput struct { + _ struct{} `type:"structure"` + + // Lambda function name for which the alias is created. + FunctionName *string `location:"uri" locationName:"FunctionName" min:"1" type:"string" required:"true"` + + // If you specify this optional parameter, the API returns only the aliases + // that are pointing to the specific Lambda function version, otherwise the + // API returns all of the aliases created for the Lambda function. + FunctionVersion *string `location:"querystring" locationName:"FunctionVersion" min:"1" type:"string"` + + // Optional string. An opaque pagination token returned from a previous ListAliases + // operation. If present, indicates where to continue the listing. + Marker *string `location:"querystring" locationName:"Marker" type:"string"` + + // Optional integer. Specifies the maximum number of aliases to return in response. + // This parameter value must be greater than 0. + MaxItems *int64 `location:"querystring" locationName:"MaxItems" min:"1" type:"integer"` +} + +// String returns the string representation +func (s ListAliasesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListAliasesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListAliasesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListAliasesInput"} + if s.FunctionName == nil { + invalidParams.Add(request.NewErrParamRequired("FunctionName")) + } + if s.FunctionName != nil && len(*s.FunctionName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("FunctionName", 1)) + } + if s.FunctionVersion != nil && len(*s.FunctionVersion) < 1 { + invalidParams.Add(request.NewErrParamMinLen("FunctionVersion", 1)) + } + if s.MaxItems != nil && *s.MaxItems < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxItems", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type ListAliasesOutput struct { + _ struct{} `type:"structure"` + + // A list of aliases. + Aliases []*AliasConfiguration `type:"list"` + + // A string, present if there are more aliases. + NextMarker *string `type:"string"` +} + +// String returns the string representation +func (s ListAliasesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListAliasesOutput) GoString() string { + return s.String() +} + +type ListEventSourceMappingsInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the Amazon Kinesis stream. + EventSourceArn *string `location:"querystring" locationName:"EventSourceArn" type:"string"` + + // The name of the Lambda function. + // + // You can specify the function name (for example, Thumbnail) or you can specify + // Amazon Resource Name (ARN) of the function (for example, arn:aws:lambda:us-west-2:account-id:function:ThumbNail). + // If you are using versioning, you can also provide a qualified function ARN + // (ARN that is qualified with function version or alias name as suffix). AWS + // Lambda also allows you to specify only the function name with the account + // ID qualifier (for example, account-id:Thumbnail). Note that the length constraint + // applies only to the ARN. If you specify only the function name, it is limited + // to 64 character in length. + FunctionName *string `location:"querystring" locationName:"FunctionName" min:"1" type:"string"` + + // Optional string. An opaque pagination token returned from a previous ListEventSourceMappings + // operation. If present, specifies to continue the list from where the returning + // call left off. + Marker *string `location:"querystring" locationName:"Marker" type:"string"` + + // Optional integer. Specifies the maximum number of event sources to return + // in response. This value must be greater than 0. + MaxItems *int64 `location:"querystring" locationName:"MaxItems" min:"1" type:"integer"` +} + +// String returns the string representation +func (s ListEventSourceMappingsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListEventSourceMappingsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListEventSourceMappingsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListEventSourceMappingsInput"} + if s.FunctionName != nil && len(*s.FunctionName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("FunctionName", 1)) + } + if s.MaxItems != nil && *s.MaxItems < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxItems", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains a list of event sources (see API_EventSourceMappingConfiguration) +type ListEventSourceMappingsOutput struct { + _ struct{} `type:"structure"` + + // An array of EventSourceMappingConfiguration objects. + EventSourceMappings []*EventSourceMappingConfiguration `type:"list"` + + // A string, present if there are more event source mappings. + NextMarker *string `type:"string"` +} + +// String returns the string representation +func (s ListEventSourceMappingsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListEventSourceMappingsOutput) GoString() string { + return s.String() +} + +type ListFunctionsInput struct { + _ struct{} `type:"structure"` + + // Optional string. An opaque pagination token returned from a previous ListFunctions + // operation. If present, indicates where to continue the listing. + Marker *string `location:"querystring" locationName:"Marker" type:"string"` + + // Optional integer. Specifies the maximum number of AWS Lambda functions to + // return in response. This parameter value must be greater than 0. + MaxItems *int64 `location:"querystring" locationName:"MaxItems" min:"1" type:"integer"` +} + +// String returns the string representation +func (s ListFunctionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListFunctionsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListFunctionsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListFunctionsInput"} + if s.MaxItems != nil && *s.MaxItems < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxItems", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains a list of AWS Lambda function configurations (see FunctionConfiguration. +type ListFunctionsOutput struct { + _ struct{} `type:"structure"` + + // A list of Lambda functions. + Functions []*FunctionConfiguration `type:"list"` + + // A string, present if there are more functions. + NextMarker *string `type:"string"` +} + +// String returns the string representation +func (s ListFunctionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListFunctionsOutput) GoString() string { + return s.String() +} + +type ListVersionsByFunctionInput struct { + _ struct{} `type:"structure"` + + // Function name whose versions to list. You can specify a function name (for + // example, Thumbnail) or you can specify Amazon Resource Name (ARN) of the + // function (for example, arn:aws:lambda:us-west-2:account-id:function:ThumbNail). + // AWS Lambda also allows you to specify a partial ARN (for example, account-id:Thumbnail). + // Note that the length constraint applies only to the ARN. If you specify only + // the function name, it is limited to 64 character in length. + FunctionName *string `location:"uri" locationName:"FunctionName" min:"1" type:"string" required:"true"` + + // Optional string. An opaque pagination token returned from a previous ListVersionsByFunction + // operation. If present, indicates where to continue the listing. + Marker *string `location:"querystring" locationName:"Marker" type:"string"` + + // Optional integer. Specifies the maximum number of AWS Lambda function versions + // to return in response. This parameter value must be greater than 0. + MaxItems *int64 `location:"querystring" locationName:"MaxItems" min:"1" type:"integer"` +} + +// String returns the string representation +func (s ListVersionsByFunctionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListVersionsByFunctionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListVersionsByFunctionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListVersionsByFunctionInput"} + if s.FunctionName == nil { + invalidParams.Add(request.NewErrParamRequired("FunctionName")) + } + if s.FunctionName != nil && len(*s.FunctionName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("FunctionName", 1)) + } + if s.MaxItems != nil && *s.MaxItems < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxItems", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type ListVersionsByFunctionOutput struct { + _ struct{} `type:"structure"` + + // A string, present if there are more function versions. + NextMarker *string `type:"string"` + + // A list of Lambda function versions. + Versions []*FunctionConfiguration `type:"list"` +} + +// String returns the string representation +func (s ListVersionsByFunctionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListVersionsByFunctionOutput) GoString() string { + return s.String() +} + +type PublishVersionInput struct { + _ struct{} `type:"structure"` + + // The SHA256 hash of the deployment package you want to publish. This provides + // validation on the code you are publishing. If you provide this parameter + // value must match the SHA256 of the $LATEST version for the publication to + // succeed. + CodeSha256 *string `type:"string"` + + // The description for the version you are publishing. If not provided, AWS + // Lambda copies the description from the $LATEST version. + Description *string `type:"string"` + + // The Lambda function name. You can specify a function name (for example, Thumbnail) + // or you can specify Amazon Resource Name (ARN) of the function (for example, + // arn:aws:lambda:us-west-2:account-id:function:ThumbNail). AWS Lambda also + // allows you to specify a partial ARN (for example, account-id:Thumbnail). + // Note that the length constraint applies only to the ARN. If you specify only + // the function name, it is limited to 64 character in length. + FunctionName *string `location:"uri" locationName:"FunctionName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s PublishVersionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PublishVersionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PublishVersionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PublishVersionInput"} + if s.FunctionName == nil { + invalidParams.Add(request.NewErrParamRequired("FunctionName")) + } + if s.FunctionName != nil && len(*s.FunctionName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("FunctionName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type RemovePermissionInput struct { + _ struct{} `type:"structure"` + + // Lambda function whose resource policy you want to remove a permission from. + // + // You can specify a function name (for example, Thumbnail) or you can specify + // Amazon Resource Name (ARN) of the function (for example, arn:aws:lambda:us-west-2:account-id:function:ThumbNail). + // AWS Lambda also allows you to specify a partial ARN (for example, account-id:Thumbnail). + // Note that the length constraint applies only to the ARN. If you specify only + // the function name, it is limited to 64 character in length. + FunctionName *string `location:"uri" locationName:"FunctionName" min:"1" type:"string" required:"true"` + + // You can specify this optional parameter to remove permission associated with + // a specific function version or function alias. If you don't specify this + // parameter, the API removes permission associated with the unqualified function + // ARN. + Qualifier *string `location:"querystring" locationName:"Qualifier" min:"1" type:"string"` + + // Statement ID of the permission to remove. + StatementId *string `location:"uri" locationName:"StatementId" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s RemovePermissionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RemovePermissionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RemovePermissionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RemovePermissionInput"} + if s.FunctionName == nil { + invalidParams.Add(request.NewErrParamRequired("FunctionName")) + } + if s.FunctionName != nil && len(*s.FunctionName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("FunctionName", 1)) + } + if s.Qualifier != nil && len(*s.Qualifier) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Qualifier", 1)) + } + if s.StatementId == nil { + invalidParams.Add(request.NewErrParamRequired("StatementId")) + } + if s.StatementId != nil && len(*s.StatementId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("StatementId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type RemovePermissionOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s RemovePermissionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RemovePermissionOutput) GoString() string { + return s.String() +} + +type UpdateAliasInput struct { + _ struct{} `type:"structure"` + + // You can change the description of the alias using this parameter. + Description *string `type:"string"` + + // The function name for which the alias is created. + FunctionName *string `location:"uri" locationName:"FunctionName" min:"1" type:"string" required:"true"` + + // Using this parameter you can change the Lambda function version to which + // the alias points. + FunctionVersion *string `min:"1" type:"string"` + + // The alias name. + Name *string `location:"uri" locationName:"Name" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateAliasInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateAliasInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateAliasInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateAliasInput"} + if s.FunctionName == nil { + invalidParams.Add(request.NewErrParamRequired("FunctionName")) + } + if s.FunctionName != nil && len(*s.FunctionName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("FunctionName", 1)) + } + if s.FunctionVersion != nil && len(*s.FunctionVersion) < 1 { + invalidParams.Add(request.NewErrParamMinLen("FunctionVersion", 1)) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type UpdateEventSourceMappingInput struct { + _ struct{} `type:"structure"` + + // The maximum number of stream records that can be sent to your Lambda function + // for a single invocation. + BatchSize *int64 `min:"1" type:"integer"` + + // Specifies whether AWS Lambda should actively poll the stream or not. If disabled, + // AWS Lambda will not poll the stream. + Enabled *bool `type:"boolean"` + + // The Lambda function to which you want the stream records sent. + // + // You can specify a function name (for example, Thumbnail) or you can specify + // Amazon Resource Name (ARN) of the function (for example, arn:aws:lambda:us-west-2:account-id:function:ThumbNail). + // AWS Lambda also allows you to specify a partial ARN (for example, account-id:Thumbnail). + // + // If you are using versioning, you can also provide a qualified function ARN + // (ARN that is qualified with function version or alias name as suffix). For + // more information about versioning, see AWS Lambda Function Versioning and + // Aliases (http://docs.aws.amazon.com/lambda/latest/dg/versioning-aliases.html) + // + // Note that the length constraint applies only to the ARN. If you specify + // only the function name, it is limited to 64 character in length. + FunctionName *string `min:"1" type:"string"` + + // The event source mapping identifier. + UUID *string `location:"uri" locationName:"UUID" type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateEventSourceMappingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateEventSourceMappingInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateEventSourceMappingInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateEventSourceMappingInput"} + if s.BatchSize != nil && *s.BatchSize < 1 { + invalidParams.Add(request.NewErrParamMinValue("BatchSize", 1)) + } + if s.FunctionName != nil && len(*s.FunctionName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("FunctionName", 1)) + } + if s.UUID == nil { + invalidParams.Add(request.NewErrParamRequired("UUID")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type UpdateFunctionCodeInput struct { + _ struct{} `type:"structure"` + + // The existing Lambda function name whose code you want to replace. + // + // You can specify a function name (for example, Thumbnail) or you can specify + // Amazon Resource Name (ARN) of the function (for example, arn:aws:lambda:us-west-2:account-id:function:ThumbNail). + // AWS Lambda also allows you to specify a partial ARN (for example, account-id:Thumbnail). + // Note that the length constraint applies only to the ARN. If you specify only + // the function name, it is limited to 64 character in length. + FunctionName *string `location:"uri" locationName:"FunctionName" min:"1" type:"string" required:"true"` + + // This boolean parameter can be used to request AWS Lambda to update the Lambda + // function and publish a version as an atomic operation. + Publish *bool `type:"boolean"` + + // Amazon S3 bucket name where the .zip file containing your deployment package + // is stored. This bucket must reside in the same AWS region where you are creating + // the Lambda function. + S3Bucket *string `min:"3" type:"string"` + + // The Amazon S3 object (the deployment package) key name you want to upload. + S3Key *string `min:"1" type:"string"` + + // The Amazon S3 object (the deployment package) version you want to upload. + S3ObjectVersion *string `min:"1" type:"string"` + + // Based64-encoded .zip file containing your packaged source code. + // + // ZipFile is automatically base64 encoded/decoded by the SDK. + ZipFile []byte `type:"blob"` +} + +// String returns the string representation +func (s UpdateFunctionCodeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateFunctionCodeInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateFunctionCodeInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateFunctionCodeInput"} + if s.FunctionName == nil { + invalidParams.Add(request.NewErrParamRequired("FunctionName")) + } + if s.FunctionName != nil && len(*s.FunctionName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("FunctionName", 1)) + } + if s.S3Bucket != nil && len(*s.S3Bucket) < 3 { + invalidParams.Add(request.NewErrParamMinLen("S3Bucket", 3)) + } + if s.S3Key != nil && len(*s.S3Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("S3Key", 1)) + } + if s.S3ObjectVersion != nil && len(*s.S3ObjectVersion) < 1 { + invalidParams.Add(request.NewErrParamMinLen("S3ObjectVersion", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type UpdateFunctionConfigurationInput struct { + _ struct{} `type:"structure"` + + // A short user-defined function description. AWS Lambda does not use this value. + // Assign a meaningful description as you see fit. + Description *string `type:"string"` + + // The name of the Lambda function. + // + // You can specify a function name (for example, Thumbnail) or you can specify + // Amazon Resource Name (ARN) of the function (for example, arn:aws:lambda:us-west-2:account-id:function:ThumbNail). + // AWS Lambda also allows you to specify a partial ARN (for example, account-id:Thumbnail). + // Note that the length constraint applies only to the ARN. If you specify only + // the function name, it is limited to 64 character in length. + FunctionName *string `location:"uri" locationName:"FunctionName" min:"1" type:"string" required:"true"` + + // The function that Lambda calls to begin executing your function. For Node.js, + // it is the module-name.export value in your function. + Handler *string `type:"string"` + + // The amount of memory, in MB, your Lambda function is given. AWS Lambda uses + // this memory size to infer the amount of CPU allocated to your function. Your + // function use-case determines your CPU and memory requirements. For example, + // a database operation might need less memory compared to an image processing + // function. The default value is 128 MB. The value must be a multiple of 64 + // MB. + MemorySize *int64 `min:"128" type:"integer"` + + // The Amazon Resource Name (ARN) of the IAM role that Lambda will assume when + // it executes your function. + Role *string `type:"string"` + + Runtime *string `type:"string" enum:"Runtime"` + + // The function execution time at which AWS Lambda should terminate the function. + // Because the execution time has cost implications, we recommend you set this + // value based on your expected execution time. The default is 3 seconds. + Timeout *int64 `min:"1" type:"integer"` + + // If your Lambda function accesses resources in a VPC, you provide this parameter + // identifying the list of security group IDs and subnet IDs. These must belong + // to the same VPC. You must provide at least one security group and one subnet + // ID. + VpcConfig *VpcConfig `type:"structure"` +} + +// String returns the string representation +func (s UpdateFunctionConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateFunctionConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateFunctionConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateFunctionConfigurationInput"} + if s.FunctionName == nil { + invalidParams.Add(request.NewErrParamRequired("FunctionName")) + } + if s.FunctionName != nil && len(*s.FunctionName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("FunctionName", 1)) + } + if s.MemorySize != nil && *s.MemorySize < 128 { + invalidParams.Add(request.NewErrParamMinValue("MemorySize", 128)) + } + if s.Timeout != nil && *s.Timeout < 1 { + invalidParams.Add(request.NewErrParamMinValue("Timeout", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// If your Lambda function accesses resources in a VPC, you provide this parameter +// identifying the list of security group IDs and subnet IDs. These must belong +// to the same VPC. You must provide at least one security group and one subnet +// ID. +type VpcConfig struct { + _ struct{} `type:"structure"` + + // A list of one or more security groups IDs in your VPC. + SecurityGroupIds []*string `type:"list"` + + // A list of one or more subnet IDs in your VPC. + SubnetIds []*string `type:"list"` +} + +// String returns the string representation +func (s VpcConfig) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VpcConfig) GoString() string { + return s.String() +} + +// VPC configuration associated with your Lambda function. +type VpcConfigResponse struct { + _ struct{} `type:"structure"` + + // A list of security group IDs associated with the Lambda function. + SecurityGroupIds []*string `type:"list"` + + // A list of subnet IDs associated with the Lambda function. + SubnetIds []*string `type:"list"` + + // The VPC ID associated with you Lambda function. + VpcId *string `type:"string"` +} + +// String returns the string representation +func (s VpcConfigResponse) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VpcConfigResponse) GoString() string { + return s.String() +} + +const ( + // @enum EventSourcePosition + EventSourcePositionTrimHorizon = "TRIM_HORIZON" + // @enum EventSourcePosition + EventSourcePositionLatest = "LATEST" +) + +const ( + // @enum InvocationType + InvocationTypeEvent = "Event" + // @enum InvocationType + InvocationTypeRequestResponse = "RequestResponse" + // @enum InvocationType + InvocationTypeDryRun = "DryRun" +) + +const ( + // @enum LogType + LogTypeNone = "None" + // @enum LogType + LogTypeTail = "Tail" +) + +const ( + // @enum Runtime + RuntimeNodejs = "nodejs" + // @enum Runtime + RuntimeNodejs43 = "nodejs4.3" + // @enum Runtime + RuntimeJava8 = "java8" + // @enum Runtime + RuntimePython27 = "python2.7" +) diff --git a/vendor/github.com/aws/aws-sdk-go/service/lambda/examples_test.go b/vendor/github.com/aws/aws-sdk-go/service/lambda/examples_test.go new file mode 100644 index 000000000..c5d9e2960 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/lambda/examples_test.go @@ -0,0 +1,561 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package lambda_test + +import ( + "bytes" + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/lambda" +) + +var _ time.Duration +var _ bytes.Buffer + +func ExampleLambda_AddPermission() { + svc := lambda.New(session.New()) + + params := &lambda.AddPermissionInput{ + Action: aws.String("Action"), // Required + FunctionName: aws.String("FunctionName"), // Required + Principal: aws.String("Principal"), // Required + StatementId: aws.String("StatementId"), // Required + EventSourceToken: aws.String("EventSourceToken"), + Qualifier: aws.String("Qualifier"), + SourceAccount: aws.String("SourceOwner"), + SourceArn: aws.String("Arn"), + } + resp, err := svc.AddPermission(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleLambda_CreateAlias() { + svc := lambda.New(session.New()) + + params := &lambda.CreateAliasInput{ + FunctionName: aws.String("FunctionName"), // Required + FunctionVersion: aws.String("Version"), // Required + Name: aws.String("Alias"), // Required + Description: aws.String("Description"), + } + resp, err := svc.CreateAlias(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleLambda_CreateEventSourceMapping() { + svc := lambda.New(session.New()) + + params := &lambda.CreateEventSourceMappingInput{ + EventSourceArn: aws.String("Arn"), // Required + FunctionName: aws.String("FunctionName"), // Required + StartingPosition: aws.String("EventSourcePosition"), // Required + BatchSize: aws.Int64(1), + Enabled: aws.Bool(true), + } + resp, err := svc.CreateEventSourceMapping(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleLambda_CreateFunction() { + svc := lambda.New(session.New()) + + params := &lambda.CreateFunctionInput{ + Code: &lambda.FunctionCode{ // Required + S3Bucket: aws.String("S3Bucket"), + S3Key: aws.String("S3Key"), + S3ObjectVersion: aws.String("S3ObjectVersion"), + ZipFile: []byte("PAYLOAD"), + }, + FunctionName: aws.String("FunctionName"), // Required + Handler: aws.String("Handler"), // Required + Role: aws.String("RoleArn"), // Required + Runtime: aws.String("Runtime"), // Required + Description: aws.String("Description"), + MemorySize: aws.Int64(1), + Publish: aws.Bool(true), + Timeout: aws.Int64(1), + VpcConfig: &lambda.VpcConfig{ + SecurityGroupIds: []*string{ + aws.String("SecurityGroupId"), // Required + // More values... + }, + SubnetIds: []*string{ + aws.String("SubnetId"), // Required + // More values... + }, + }, + } + resp, err := svc.CreateFunction(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleLambda_DeleteAlias() { + svc := lambda.New(session.New()) + + params := &lambda.DeleteAliasInput{ + FunctionName: aws.String("FunctionName"), // Required + Name: aws.String("Alias"), // Required + } + resp, err := svc.DeleteAlias(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleLambda_DeleteEventSourceMapping() { + svc := lambda.New(session.New()) + + params := &lambda.DeleteEventSourceMappingInput{ + UUID: aws.String("String"), // Required + } + resp, err := svc.DeleteEventSourceMapping(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleLambda_DeleteFunction() { + svc := lambda.New(session.New()) + + params := &lambda.DeleteFunctionInput{ + FunctionName: aws.String("FunctionName"), // Required + Qualifier: aws.String("Qualifier"), + } + resp, err := svc.DeleteFunction(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleLambda_GetAlias() { + svc := lambda.New(session.New()) + + params := &lambda.GetAliasInput{ + FunctionName: aws.String("FunctionName"), // Required + Name: aws.String("Alias"), // Required + } + resp, err := svc.GetAlias(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleLambda_GetEventSourceMapping() { + svc := lambda.New(session.New()) + + params := &lambda.GetEventSourceMappingInput{ + UUID: aws.String("String"), // Required + } + resp, err := svc.GetEventSourceMapping(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleLambda_GetFunction() { + svc := lambda.New(session.New()) + + params := &lambda.GetFunctionInput{ + FunctionName: aws.String("FunctionName"), // Required + Qualifier: aws.String("Qualifier"), + } + resp, err := svc.GetFunction(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleLambda_GetFunctionConfiguration() { + svc := lambda.New(session.New()) + + params := &lambda.GetFunctionConfigurationInput{ + FunctionName: aws.String("FunctionName"), // Required + Qualifier: aws.String("Qualifier"), + } + resp, err := svc.GetFunctionConfiguration(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleLambda_GetPolicy() { + svc := lambda.New(session.New()) + + params := &lambda.GetPolicyInput{ + FunctionName: aws.String("FunctionName"), // Required + Qualifier: aws.String("Qualifier"), + } + resp, err := svc.GetPolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleLambda_Invoke() { + svc := lambda.New(session.New()) + + params := &lambda.InvokeInput{ + FunctionName: aws.String("FunctionName"), // Required + ClientContext: aws.String("String"), + InvocationType: aws.String("InvocationType"), + LogType: aws.String("LogType"), + Payload: []byte("PAYLOAD"), + Qualifier: aws.String("Qualifier"), + } + resp, err := svc.Invoke(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleLambda_InvokeAsync() { + svc := lambda.New(session.New()) + + params := &lambda.InvokeAsyncInput{ + FunctionName: aws.String("FunctionName"), // Required + InvokeArgs: bytes.NewReader([]byte("PAYLOAD")), // Required + } + resp, err := svc.InvokeAsync(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleLambda_ListAliases() { + svc := lambda.New(session.New()) + + params := &lambda.ListAliasesInput{ + FunctionName: aws.String("FunctionName"), // Required + FunctionVersion: aws.String("Version"), + Marker: aws.String("String"), + MaxItems: aws.Int64(1), + } + resp, err := svc.ListAliases(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleLambda_ListEventSourceMappings() { + svc := lambda.New(session.New()) + + params := &lambda.ListEventSourceMappingsInput{ + EventSourceArn: aws.String("Arn"), + FunctionName: aws.String("FunctionName"), + Marker: aws.String("String"), + MaxItems: aws.Int64(1), + } + resp, err := svc.ListEventSourceMappings(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleLambda_ListFunctions() { + svc := lambda.New(session.New()) + + params := &lambda.ListFunctionsInput{ + Marker: aws.String("String"), + MaxItems: aws.Int64(1), + } + resp, err := svc.ListFunctions(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleLambda_ListVersionsByFunction() { + svc := lambda.New(session.New()) + + params := &lambda.ListVersionsByFunctionInput{ + FunctionName: aws.String("FunctionName"), // Required + Marker: aws.String("String"), + MaxItems: aws.Int64(1), + } + resp, err := svc.ListVersionsByFunction(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleLambda_PublishVersion() { + svc := lambda.New(session.New()) + + params := &lambda.PublishVersionInput{ + FunctionName: aws.String("FunctionName"), // Required + CodeSha256: aws.String("String"), + Description: aws.String("Description"), + } + resp, err := svc.PublishVersion(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleLambda_RemovePermission() { + svc := lambda.New(session.New()) + + params := &lambda.RemovePermissionInput{ + FunctionName: aws.String("FunctionName"), // Required + StatementId: aws.String("StatementId"), // Required + Qualifier: aws.String("Qualifier"), + } + resp, err := svc.RemovePermission(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleLambda_UpdateAlias() { + svc := lambda.New(session.New()) + + params := &lambda.UpdateAliasInput{ + FunctionName: aws.String("FunctionName"), // Required + Name: aws.String("Alias"), // Required + Description: aws.String("Description"), + FunctionVersion: aws.String("Version"), + } + resp, err := svc.UpdateAlias(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleLambda_UpdateEventSourceMapping() { + svc := lambda.New(session.New()) + + params := &lambda.UpdateEventSourceMappingInput{ + UUID: aws.String("String"), // Required + BatchSize: aws.Int64(1), + Enabled: aws.Bool(true), + FunctionName: aws.String("FunctionName"), + } + resp, err := svc.UpdateEventSourceMapping(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleLambda_UpdateFunctionCode() { + svc := lambda.New(session.New()) + + params := &lambda.UpdateFunctionCodeInput{ + FunctionName: aws.String("FunctionName"), // Required + Publish: aws.Bool(true), + S3Bucket: aws.String("S3Bucket"), + S3Key: aws.String("S3Key"), + S3ObjectVersion: aws.String("S3ObjectVersion"), + ZipFile: []byte("PAYLOAD"), + } + resp, err := svc.UpdateFunctionCode(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleLambda_UpdateFunctionConfiguration() { + svc := lambda.New(session.New()) + + params := &lambda.UpdateFunctionConfigurationInput{ + FunctionName: aws.String("FunctionName"), // Required + Description: aws.String("Description"), + Handler: aws.String("Handler"), + MemorySize: aws.Int64(1), + Role: aws.String("RoleArn"), + Runtime: aws.String("Runtime"), + Timeout: aws.Int64(1), + VpcConfig: &lambda.VpcConfig{ + SecurityGroupIds: []*string{ + aws.String("SecurityGroupId"), // Required + // More values... + }, + SubnetIds: []*string{ + aws.String("SubnetId"), // Required + // More values... + }, + }, + } + resp, err := svc.UpdateFunctionConfiguration(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/lambda/lambdaiface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/lambda/lambdaiface/interface.go new file mode 100644 index 000000000..03c2a40d9 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/lambda/lambdaiface/interface.go @@ -0,0 +1,114 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package lambdaiface provides an interface for the AWS Lambda. +package lambdaiface + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/lambda" +) + +// LambdaAPI is the interface type for lambda.Lambda. +type LambdaAPI interface { + AddPermissionRequest(*lambda.AddPermissionInput) (*request.Request, *lambda.AddPermissionOutput) + + AddPermission(*lambda.AddPermissionInput) (*lambda.AddPermissionOutput, error) + + CreateAliasRequest(*lambda.CreateAliasInput) (*request.Request, *lambda.AliasConfiguration) + + CreateAlias(*lambda.CreateAliasInput) (*lambda.AliasConfiguration, error) + + CreateEventSourceMappingRequest(*lambda.CreateEventSourceMappingInput) (*request.Request, *lambda.EventSourceMappingConfiguration) + + CreateEventSourceMapping(*lambda.CreateEventSourceMappingInput) (*lambda.EventSourceMappingConfiguration, error) + + CreateFunctionRequest(*lambda.CreateFunctionInput) (*request.Request, *lambda.FunctionConfiguration) + + CreateFunction(*lambda.CreateFunctionInput) (*lambda.FunctionConfiguration, error) + + DeleteAliasRequest(*lambda.DeleteAliasInput) (*request.Request, *lambda.DeleteAliasOutput) + + DeleteAlias(*lambda.DeleteAliasInput) (*lambda.DeleteAliasOutput, error) + + DeleteEventSourceMappingRequest(*lambda.DeleteEventSourceMappingInput) (*request.Request, *lambda.EventSourceMappingConfiguration) + + DeleteEventSourceMapping(*lambda.DeleteEventSourceMappingInput) (*lambda.EventSourceMappingConfiguration, error) + + DeleteFunctionRequest(*lambda.DeleteFunctionInput) (*request.Request, *lambda.DeleteFunctionOutput) + + DeleteFunction(*lambda.DeleteFunctionInput) (*lambda.DeleteFunctionOutput, error) + + GetAliasRequest(*lambda.GetAliasInput) (*request.Request, *lambda.AliasConfiguration) + + GetAlias(*lambda.GetAliasInput) (*lambda.AliasConfiguration, error) + + GetEventSourceMappingRequest(*lambda.GetEventSourceMappingInput) (*request.Request, *lambda.EventSourceMappingConfiguration) + + GetEventSourceMapping(*lambda.GetEventSourceMappingInput) (*lambda.EventSourceMappingConfiguration, error) + + GetFunctionRequest(*lambda.GetFunctionInput) (*request.Request, *lambda.GetFunctionOutput) + + GetFunction(*lambda.GetFunctionInput) (*lambda.GetFunctionOutput, error) + + GetFunctionConfigurationRequest(*lambda.GetFunctionConfigurationInput) (*request.Request, *lambda.FunctionConfiguration) + + GetFunctionConfiguration(*lambda.GetFunctionConfigurationInput) (*lambda.FunctionConfiguration, error) + + GetPolicyRequest(*lambda.GetPolicyInput) (*request.Request, *lambda.GetPolicyOutput) + + GetPolicy(*lambda.GetPolicyInput) (*lambda.GetPolicyOutput, error) + + InvokeRequest(*lambda.InvokeInput) (*request.Request, *lambda.InvokeOutput) + + Invoke(*lambda.InvokeInput) (*lambda.InvokeOutput, error) + + InvokeAsyncRequest(*lambda.InvokeAsyncInput) (*request.Request, *lambda.InvokeAsyncOutput) + + InvokeAsync(*lambda.InvokeAsyncInput) (*lambda.InvokeAsyncOutput, error) + + ListAliasesRequest(*lambda.ListAliasesInput) (*request.Request, *lambda.ListAliasesOutput) + + ListAliases(*lambda.ListAliasesInput) (*lambda.ListAliasesOutput, error) + + ListEventSourceMappingsRequest(*lambda.ListEventSourceMappingsInput) (*request.Request, *lambda.ListEventSourceMappingsOutput) + + ListEventSourceMappings(*lambda.ListEventSourceMappingsInput) (*lambda.ListEventSourceMappingsOutput, error) + + ListEventSourceMappingsPages(*lambda.ListEventSourceMappingsInput, func(*lambda.ListEventSourceMappingsOutput, bool) bool) error + + ListFunctionsRequest(*lambda.ListFunctionsInput) (*request.Request, *lambda.ListFunctionsOutput) + + ListFunctions(*lambda.ListFunctionsInput) (*lambda.ListFunctionsOutput, error) + + ListFunctionsPages(*lambda.ListFunctionsInput, func(*lambda.ListFunctionsOutput, bool) bool) error + + ListVersionsByFunctionRequest(*lambda.ListVersionsByFunctionInput) (*request.Request, *lambda.ListVersionsByFunctionOutput) + + ListVersionsByFunction(*lambda.ListVersionsByFunctionInput) (*lambda.ListVersionsByFunctionOutput, error) + + PublishVersionRequest(*lambda.PublishVersionInput) (*request.Request, *lambda.FunctionConfiguration) + + PublishVersion(*lambda.PublishVersionInput) (*lambda.FunctionConfiguration, error) + + RemovePermissionRequest(*lambda.RemovePermissionInput) (*request.Request, *lambda.RemovePermissionOutput) + + RemovePermission(*lambda.RemovePermissionInput) (*lambda.RemovePermissionOutput, error) + + UpdateAliasRequest(*lambda.UpdateAliasInput) (*request.Request, *lambda.AliasConfiguration) + + UpdateAlias(*lambda.UpdateAliasInput) (*lambda.AliasConfiguration, error) + + UpdateEventSourceMappingRequest(*lambda.UpdateEventSourceMappingInput) (*request.Request, *lambda.EventSourceMappingConfiguration) + + UpdateEventSourceMapping(*lambda.UpdateEventSourceMappingInput) (*lambda.EventSourceMappingConfiguration, error) + + UpdateFunctionCodeRequest(*lambda.UpdateFunctionCodeInput) (*request.Request, *lambda.FunctionConfiguration) + + UpdateFunctionCode(*lambda.UpdateFunctionCodeInput) (*lambda.FunctionConfiguration, error) + + UpdateFunctionConfigurationRequest(*lambda.UpdateFunctionConfigurationInput) (*request.Request, *lambda.FunctionConfiguration) + + UpdateFunctionConfiguration(*lambda.UpdateFunctionConfigurationInput) (*lambda.FunctionConfiguration, error) +} + +var _ LambdaAPI = (*lambda.Lambda)(nil) diff --git a/vendor/github.com/aws/aws-sdk-go/service/lambda/service.go b/vendor/github.com/aws/aws-sdk-go/service/lambda/service.go new file mode 100644 index 000000000..8b155cf84 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/lambda/service.go @@ -0,0 +1,92 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package lambda + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/private/protocol/restjson" +) + +// Overview +// +// This is the AWS Lambda API Reference. The AWS Lambda Developer Guide provides +// additional information. For the service overview, go to What is AWS Lambda +// (http://docs.aws.amazon.com/lambda/latest/dg/welcome.html), and for information +// about how the service works, go to AWS Lambda: How it Works (http://docs.aws.amazon.com/lambda/latest/dg/lambda-introduction.html) +// in the AWS Lambda Developer Guide. +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type Lambda struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "lambda" + +// New creates a new instance of the Lambda client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a Lambda client from just a session. +// svc := lambda.New(mySession) +// +// // Create a Lambda client with additional configuration +// svc := lambda.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *Lambda { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *Lambda { + svc := &Lambda{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2015-03-31", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(restjson.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restjson.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restjson.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restjson.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a Lambda operation and runs any +// custom request initialization. +func (c *Lambda) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/machinelearning/api.go b/vendor/github.com/aws/aws-sdk-go/service/machinelearning/api.go new file mode 100644 index 000000000..b8b4b6c24 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/machinelearning/api.go @@ -0,0 +1,5602 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package machinelearning provides a client for Amazon Machine Learning. +package machinelearning + +import ( + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" +) + +const opAddTags = "AddTags" + +// AddTagsRequest generates a "aws/request.Request" representing the +// client's request for the AddTags operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the AddTags method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the AddTagsRequest method. +// req, resp := client.AddTagsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *MachineLearning) AddTagsRequest(input *AddTagsInput) (req *request.Request, output *AddTagsOutput) { + op := &request.Operation{ + Name: opAddTags, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AddTagsInput{} + } + + req = c.newRequest(op, input, output) + output = &AddTagsOutput{} + req.Data = output + return +} + +// Adds one or more tags to an object, up to a limit of 10. Each tag consists +// of a key and an optional value. If you add a tag using a key that is already +// associated with the ML object, AddTags updates the tag's value. +func (c *MachineLearning) AddTags(input *AddTagsInput) (*AddTagsOutput, error) { + req, out := c.AddTagsRequest(input) + err := req.Send() + return out, err +} + +const opCreateBatchPrediction = "CreateBatchPrediction" + +// CreateBatchPredictionRequest generates a "aws/request.Request" representing the +// client's request for the CreateBatchPrediction operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateBatchPrediction method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateBatchPredictionRequest method. +// req, resp := client.CreateBatchPredictionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *MachineLearning) CreateBatchPredictionRequest(input *CreateBatchPredictionInput) (req *request.Request, output *CreateBatchPredictionOutput) { + op := &request.Operation{ + Name: opCreateBatchPrediction, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateBatchPredictionInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateBatchPredictionOutput{} + req.Data = output + return +} + +// Generates predictions for a group of observations. The observations to process +// exist in one or more data files referenced by a DataSource. This operation +// creates a new BatchPrediction, and uses an MLModel and the data files referenced +// by the DataSource as information sources. +// +// CreateBatchPrediction is an asynchronous operation. In response to CreateBatchPrediction, +// Amazon Machine Learning (Amazon ML) immediately returns and sets the BatchPrediction +// status to PENDING. After the BatchPrediction completes, Amazon ML sets the +// status to COMPLETED. +// +// You can poll for status updates by using the GetBatchPrediction operation +// and checking the Status parameter of the result. After the COMPLETED status +// appears, the results are available in the location specified by the OutputUri +// parameter. +func (c *MachineLearning) CreateBatchPrediction(input *CreateBatchPredictionInput) (*CreateBatchPredictionOutput, error) { + req, out := c.CreateBatchPredictionRequest(input) + err := req.Send() + return out, err +} + +const opCreateDataSourceFromRDS = "CreateDataSourceFromRDS" + +// CreateDataSourceFromRDSRequest generates a "aws/request.Request" representing the +// client's request for the CreateDataSourceFromRDS operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateDataSourceFromRDS method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateDataSourceFromRDSRequest method. +// req, resp := client.CreateDataSourceFromRDSRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *MachineLearning) CreateDataSourceFromRDSRequest(input *CreateDataSourceFromRDSInput) (req *request.Request, output *CreateDataSourceFromRDSOutput) { + op := &request.Operation{ + Name: opCreateDataSourceFromRDS, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateDataSourceFromRDSInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateDataSourceFromRDSOutput{} + req.Data = output + return +} + +// Creates a DataSource object from an Amazon Relational Database Service (http://aws.amazon.com/rds/) +// (Amazon RDS). A DataSource references data that can be used to perform CreateMLModel, +// CreateEvaluation, or CreateBatchPrediction operations. +// +// CreateDataSourceFromRDS is an asynchronous operation. In response to CreateDataSourceFromRDS, +// Amazon Machine Learning (Amazon ML) immediately returns and sets the DataSource +// status to PENDING. After the DataSource is created and ready for use, Amazon +// ML sets the Status parameter to COMPLETED. DataSource in the COMPLETED or +// PENDING state can be used only to perform >CreateMLModel>, CreateEvaluation, +// or CreateBatchPrediction operations. +// +// If Amazon ML cannot accept the input source, it sets the Status parameter +// to FAILED and includes an error message in the Message attribute of the GetDataSource +// operation response. +func (c *MachineLearning) CreateDataSourceFromRDS(input *CreateDataSourceFromRDSInput) (*CreateDataSourceFromRDSOutput, error) { + req, out := c.CreateDataSourceFromRDSRequest(input) + err := req.Send() + return out, err +} + +const opCreateDataSourceFromRedshift = "CreateDataSourceFromRedshift" + +// CreateDataSourceFromRedshiftRequest generates a "aws/request.Request" representing the +// client's request for the CreateDataSourceFromRedshift operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateDataSourceFromRedshift method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateDataSourceFromRedshiftRequest method. +// req, resp := client.CreateDataSourceFromRedshiftRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *MachineLearning) CreateDataSourceFromRedshiftRequest(input *CreateDataSourceFromRedshiftInput) (req *request.Request, output *CreateDataSourceFromRedshiftOutput) { + op := &request.Operation{ + Name: opCreateDataSourceFromRedshift, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateDataSourceFromRedshiftInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateDataSourceFromRedshiftOutput{} + req.Data = output + return +} + +// Creates a DataSource from a database hosted on an Amazon Redshift cluster. +// A DataSource references data that can be used to perform either CreateMLModel, +// CreateEvaluation, or CreateBatchPrediction operations. +// +// CreateDataSourceFromRedshift is an asynchronous operation. In response to +// CreateDataSourceFromRedshift, Amazon Machine Learning (Amazon ML) immediately +// returns and sets the DataSource status to PENDING. After the DataSource is +// created and ready for use, Amazon ML sets the Status parameter to COMPLETED. +// DataSource in COMPLETED or PENDING states can be used to perform only CreateMLModel, +// CreateEvaluation, or CreateBatchPrediction operations. +// +// If Amazon ML can't accept the input source, it sets the Status parameter +// to FAILED and includes an error message in the Message attribute of the GetDataSource +// operation response. +// +// The observations should be contained in the database hosted on an Amazon +// Redshift cluster and should be specified by a SelectSqlQuery query. Amazon +// ML executes an Unload command in Amazon Redshift to transfer the result set +// of the SelectSqlQuery query to S3StagingLocation. +// +// After the DataSource has been created, it's ready for use in evaluations +// and batch predictions. If you plan to use the DataSource to train an MLModel, +// the DataSource also requires a recipe. A recipe describes how each input +// variable will be used in training an MLModel. Will the variable be included +// or excluded from training? Will the variable be manipulated; for example, +// will it be combined with another variable or will it be split apart into +// word combinations? The recipe provides answers to these questions. +// +// You can't change an existing datasource, but you can copy and modify the +// settings from an existing Amazon Redshift datasource to create a new datasource. +// To do so, call GetDataSource for an existing datasource and copy the values +// to a CreateDataSource call. Change the settings that you want to change and +// make sure that all required fields have the appropriate values. +func (c *MachineLearning) CreateDataSourceFromRedshift(input *CreateDataSourceFromRedshiftInput) (*CreateDataSourceFromRedshiftOutput, error) { + req, out := c.CreateDataSourceFromRedshiftRequest(input) + err := req.Send() + return out, err +} + +const opCreateDataSourceFromS3 = "CreateDataSourceFromS3" + +// CreateDataSourceFromS3Request generates a "aws/request.Request" representing the +// client's request for the CreateDataSourceFromS3 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateDataSourceFromS3 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateDataSourceFromS3Request method. +// req, resp := client.CreateDataSourceFromS3Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *MachineLearning) CreateDataSourceFromS3Request(input *CreateDataSourceFromS3Input) (req *request.Request, output *CreateDataSourceFromS3Output) { + op := &request.Operation{ + Name: opCreateDataSourceFromS3, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateDataSourceFromS3Input{} + } + + req = c.newRequest(op, input, output) + output = &CreateDataSourceFromS3Output{} + req.Data = output + return +} + +// Creates a DataSource object. A DataSource references data that can be used +// to perform CreateMLModel, CreateEvaluation, or CreateBatchPrediction operations. +// +// CreateDataSourceFromS3 is an asynchronous operation. In response to CreateDataSourceFromS3, +// Amazon Machine Learning (Amazon ML) immediately returns and sets the DataSource +// status to PENDING. After the DataSource has been created and is ready for +// use, Amazon ML sets the Status parameter to COMPLETED. DataSource in the +// COMPLETED or PENDING state can be used to perform only CreateMLModel, CreateEvaluation +// or CreateBatchPrediction operations. +// +// If Amazon ML can't accept the input source, it sets the Status parameter +// to FAILED and includes an error message in the Message attribute of the GetDataSource +// operation response. +// +// The observation data used in a DataSource should be ready to use; that is, +// it should have a consistent structure, and missing data values should be +// kept to a minimum. The observation data must reside in one or more .csv files +// in an Amazon Simple Storage Service (Amazon S3) location, along with a schema +// that describes the data items by name and type. The same schema must be used +// for all of the data files referenced by the DataSource. +// +// After the DataSource has been created, it's ready to use in evaluations +// and batch predictions. If you plan to use the DataSource to train an MLModel, +// the DataSource also needs a recipe. A recipe describes how each input variable +// will be used in training an MLModel. Will the variable be included or excluded +// from training? Will the variable be manipulated; for example, will it be +// combined with another variable or will it be split apart into word combinations? +// The recipe provides answers to these questions. +func (c *MachineLearning) CreateDataSourceFromS3(input *CreateDataSourceFromS3Input) (*CreateDataSourceFromS3Output, error) { + req, out := c.CreateDataSourceFromS3Request(input) + err := req.Send() + return out, err +} + +const opCreateEvaluation = "CreateEvaluation" + +// CreateEvaluationRequest generates a "aws/request.Request" representing the +// client's request for the CreateEvaluation operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateEvaluation method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateEvaluationRequest method. +// req, resp := client.CreateEvaluationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *MachineLearning) CreateEvaluationRequest(input *CreateEvaluationInput) (req *request.Request, output *CreateEvaluationOutput) { + op := &request.Operation{ + Name: opCreateEvaluation, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateEvaluationInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateEvaluationOutput{} + req.Data = output + return +} + +// Creates a new Evaluation of an MLModel. An MLModel is evaluated on a set +// of observations associated to a DataSource. Like a DataSource for an MLModel, +// the DataSource for an Evaluation contains values for the Target Variable. +// The Evaluation compares the predicted result for each observation to the +// actual outcome and provides a summary so that you know how effective the +// MLModel functions on the test data. Evaluation generates a relevant performance +// metric, such as BinaryAUC, RegressionRMSE or MulticlassAvgFScore based on +// the corresponding MLModelType: BINARY, REGRESSION or MULTICLASS. +// +// CreateEvaluation is an asynchronous operation. In response to CreateEvaluation, +// Amazon Machine Learning (Amazon ML) immediately returns and sets the evaluation +// status to PENDING. After the Evaluation is created and ready for use, Amazon +// ML sets the status to COMPLETED. +// +// You can use the GetEvaluation operation to check progress of the evaluation +// during the creation operation. +func (c *MachineLearning) CreateEvaluation(input *CreateEvaluationInput) (*CreateEvaluationOutput, error) { + req, out := c.CreateEvaluationRequest(input) + err := req.Send() + return out, err +} + +const opCreateMLModel = "CreateMLModel" + +// CreateMLModelRequest generates a "aws/request.Request" representing the +// client's request for the CreateMLModel operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateMLModel method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateMLModelRequest method. +// req, resp := client.CreateMLModelRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *MachineLearning) CreateMLModelRequest(input *CreateMLModelInput) (req *request.Request, output *CreateMLModelOutput) { + op := &request.Operation{ + Name: opCreateMLModel, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateMLModelInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateMLModelOutput{} + req.Data = output + return +} + +// Creates a new MLModel using the DataSource and the recipe as information +// sources. +// +// An MLModel is nearly immutable. Users can update only the MLModelName and +// the ScoreThreshold in an MLModel without creating a new MLModel. +// +// CreateMLModel is an asynchronous operation. In response to CreateMLModel, +// Amazon Machine Learning (Amazon ML) immediately returns and sets the MLModel +// status to PENDING. After the MLModel has been created and ready is for use, +// Amazon ML sets the status to COMPLETED. +// +// You can use the GetMLModel operation to check the progress of the MLModel +// during the creation operation. +// +// CreateMLModel requires a DataSource with computed statistics, which can +// be created by setting ComputeStatistics to true in CreateDataSourcceFromRDS, +// CreateDataSourceFromS3, or CreateDataSourceFromRedshift operations. +func (c *MachineLearning) CreateMLModel(input *CreateMLModelInput) (*CreateMLModelOutput, error) { + req, out := c.CreateMLModelRequest(input) + err := req.Send() + return out, err +} + +const opCreateRealtimeEndpoint = "CreateRealtimeEndpoint" + +// CreateRealtimeEndpointRequest generates a "aws/request.Request" representing the +// client's request for the CreateRealtimeEndpoint operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateRealtimeEndpoint method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateRealtimeEndpointRequest method. +// req, resp := client.CreateRealtimeEndpointRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *MachineLearning) CreateRealtimeEndpointRequest(input *CreateRealtimeEndpointInput) (req *request.Request, output *CreateRealtimeEndpointOutput) { + op := &request.Operation{ + Name: opCreateRealtimeEndpoint, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateRealtimeEndpointInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateRealtimeEndpointOutput{} + req.Data = output + return +} + +// Creates a real-time endpoint for the MLModel. The endpoint contains the URI +// of the MLModel; that is, the location to send real-time prediction requests +// for the specified MLModel. +func (c *MachineLearning) CreateRealtimeEndpoint(input *CreateRealtimeEndpointInput) (*CreateRealtimeEndpointOutput, error) { + req, out := c.CreateRealtimeEndpointRequest(input) + err := req.Send() + return out, err +} + +const opDeleteBatchPrediction = "DeleteBatchPrediction" + +// DeleteBatchPredictionRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBatchPrediction operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteBatchPrediction method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteBatchPredictionRequest method. +// req, resp := client.DeleteBatchPredictionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *MachineLearning) DeleteBatchPredictionRequest(input *DeleteBatchPredictionInput) (req *request.Request, output *DeleteBatchPredictionOutput) { + op := &request.Operation{ + Name: opDeleteBatchPrediction, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteBatchPredictionInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteBatchPredictionOutput{} + req.Data = output + return +} + +// Assigns the DELETED status to a BatchPrediction, rendering it unusable. +// +// After using the DeleteBatchPrediction operation, you can use the GetBatchPrediction +// operation to verify that the status of the BatchPrediction changed to DELETED. +// +// Caution: The result of the DeleteBatchPrediction operation is irreversible. +func (c *MachineLearning) DeleteBatchPrediction(input *DeleteBatchPredictionInput) (*DeleteBatchPredictionOutput, error) { + req, out := c.DeleteBatchPredictionRequest(input) + err := req.Send() + return out, err +} + +const opDeleteDataSource = "DeleteDataSource" + +// DeleteDataSourceRequest generates a "aws/request.Request" representing the +// client's request for the DeleteDataSource operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteDataSource method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteDataSourceRequest method. +// req, resp := client.DeleteDataSourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *MachineLearning) DeleteDataSourceRequest(input *DeleteDataSourceInput) (req *request.Request, output *DeleteDataSourceOutput) { + op := &request.Operation{ + Name: opDeleteDataSource, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteDataSourceInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteDataSourceOutput{} + req.Data = output + return +} + +// Assigns the DELETED status to a DataSource, rendering it unusable. +// +// After using the DeleteDataSource operation, you can use the GetDataSource +// operation to verify that the status of the DataSource changed to DELETED. +// +// Caution: The results of the DeleteDataSource operation are irreversible. +func (c *MachineLearning) DeleteDataSource(input *DeleteDataSourceInput) (*DeleteDataSourceOutput, error) { + req, out := c.DeleteDataSourceRequest(input) + err := req.Send() + return out, err +} + +const opDeleteEvaluation = "DeleteEvaluation" + +// DeleteEvaluationRequest generates a "aws/request.Request" representing the +// client's request for the DeleteEvaluation operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteEvaluation method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteEvaluationRequest method. +// req, resp := client.DeleteEvaluationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *MachineLearning) DeleteEvaluationRequest(input *DeleteEvaluationInput) (req *request.Request, output *DeleteEvaluationOutput) { + op := &request.Operation{ + Name: opDeleteEvaluation, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteEvaluationInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteEvaluationOutput{} + req.Data = output + return +} + +// Assigns the DELETED status to an Evaluation, rendering it unusable. +// +// After invoking the DeleteEvaluation operation, you can use the GetEvaluation +// operation to verify that the status of the Evaluation changed to DELETED. +// +// Caution The results of the DeleteEvaluation operation are irreversible. +func (c *MachineLearning) DeleteEvaluation(input *DeleteEvaluationInput) (*DeleteEvaluationOutput, error) { + req, out := c.DeleteEvaluationRequest(input) + err := req.Send() + return out, err +} + +const opDeleteMLModel = "DeleteMLModel" + +// DeleteMLModelRequest generates a "aws/request.Request" representing the +// client's request for the DeleteMLModel operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteMLModel method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteMLModelRequest method. +// req, resp := client.DeleteMLModelRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *MachineLearning) DeleteMLModelRequest(input *DeleteMLModelInput) (req *request.Request, output *DeleteMLModelOutput) { + op := &request.Operation{ + Name: opDeleteMLModel, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteMLModelInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteMLModelOutput{} + req.Data = output + return +} + +// Assigns the DELETED status to an MLModel, rendering it unusable. +// +// After using the DeleteMLModel operation, you can use the GetMLModel operation +// to verify that the status of the MLModel changed to DELETED. +// +// Caution: The result of the DeleteMLModel operation is irreversible. +func (c *MachineLearning) DeleteMLModel(input *DeleteMLModelInput) (*DeleteMLModelOutput, error) { + req, out := c.DeleteMLModelRequest(input) + err := req.Send() + return out, err +} + +const opDeleteRealtimeEndpoint = "DeleteRealtimeEndpoint" + +// DeleteRealtimeEndpointRequest generates a "aws/request.Request" representing the +// client's request for the DeleteRealtimeEndpoint operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteRealtimeEndpoint method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteRealtimeEndpointRequest method. +// req, resp := client.DeleteRealtimeEndpointRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *MachineLearning) DeleteRealtimeEndpointRequest(input *DeleteRealtimeEndpointInput) (req *request.Request, output *DeleteRealtimeEndpointOutput) { + op := &request.Operation{ + Name: opDeleteRealtimeEndpoint, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteRealtimeEndpointInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteRealtimeEndpointOutput{} + req.Data = output + return +} + +// Deletes a real time endpoint of an MLModel. +func (c *MachineLearning) DeleteRealtimeEndpoint(input *DeleteRealtimeEndpointInput) (*DeleteRealtimeEndpointOutput, error) { + req, out := c.DeleteRealtimeEndpointRequest(input) + err := req.Send() + return out, err +} + +const opDeleteTags = "DeleteTags" + +// DeleteTagsRequest generates a "aws/request.Request" representing the +// client's request for the DeleteTags operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteTags method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteTagsRequest method. +// req, resp := client.DeleteTagsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *MachineLearning) DeleteTagsRequest(input *DeleteTagsInput) (req *request.Request, output *DeleteTagsOutput) { + op := &request.Operation{ + Name: opDeleteTags, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteTagsInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteTagsOutput{} + req.Data = output + return +} + +// Deletes the specified tags associated with an ML object. After this operation +// is complete, you can't recover deleted tags. +// +// If you specify a tag that doesn't exist, Amazon ML ignores it. +func (c *MachineLearning) DeleteTags(input *DeleteTagsInput) (*DeleteTagsOutput, error) { + req, out := c.DeleteTagsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeBatchPredictions = "DescribeBatchPredictions" + +// DescribeBatchPredictionsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeBatchPredictions operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeBatchPredictions method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeBatchPredictionsRequest method. +// req, resp := client.DescribeBatchPredictionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *MachineLearning) DescribeBatchPredictionsRequest(input *DescribeBatchPredictionsInput) (req *request.Request, output *DescribeBatchPredictionsOutput) { + op := &request.Operation{ + Name: opDescribeBatchPredictions, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "Limit", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeBatchPredictionsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeBatchPredictionsOutput{} + req.Data = output + return +} + +// Returns a list of BatchPrediction operations that match the search criteria +// in the request. +func (c *MachineLearning) DescribeBatchPredictions(input *DescribeBatchPredictionsInput) (*DescribeBatchPredictionsOutput, error) { + req, out := c.DescribeBatchPredictionsRequest(input) + err := req.Send() + return out, err +} + +// DescribeBatchPredictionsPages iterates over the pages of a DescribeBatchPredictions operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeBatchPredictions method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeBatchPredictions operation. +// pageNum := 0 +// err := client.DescribeBatchPredictionsPages(params, +// func(page *DescribeBatchPredictionsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *MachineLearning) DescribeBatchPredictionsPages(input *DescribeBatchPredictionsInput, fn func(p *DescribeBatchPredictionsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeBatchPredictionsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeBatchPredictionsOutput), lastPage) + }) +} + +const opDescribeDataSources = "DescribeDataSources" + +// DescribeDataSourcesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeDataSources operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeDataSources method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeDataSourcesRequest method. +// req, resp := client.DescribeDataSourcesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *MachineLearning) DescribeDataSourcesRequest(input *DescribeDataSourcesInput) (req *request.Request, output *DescribeDataSourcesOutput) { + op := &request.Operation{ + Name: opDescribeDataSources, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "Limit", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeDataSourcesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeDataSourcesOutput{} + req.Data = output + return +} + +// Returns a list of DataSource that match the search criteria in the request. +func (c *MachineLearning) DescribeDataSources(input *DescribeDataSourcesInput) (*DescribeDataSourcesOutput, error) { + req, out := c.DescribeDataSourcesRequest(input) + err := req.Send() + return out, err +} + +// DescribeDataSourcesPages iterates over the pages of a DescribeDataSources operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeDataSources method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeDataSources operation. +// pageNum := 0 +// err := client.DescribeDataSourcesPages(params, +// func(page *DescribeDataSourcesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *MachineLearning) DescribeDataSourcesPages(input *DescribeDataSourcesInput, fn func(p *DescribeDataSourcesOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeDataSourcesRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeDataSourcesOutput), lastPage) + }) +} + +const opDescribeEvaluations = "DescribeEvaluations" + +// DescribeEvaluationsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeEvaluations operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeEvaluations method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeEvaluationsRequest method. +// req, resp := client.DescribeEvaluationsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *MachineLearning) DescribeEvaluationsRequest(input *DescribeEvaluationsInput) (req *request.Request, output *DescribeEvaluationsOutput) { + op := &request.Operation{ + Name: opDescribeEvaluations, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "Limit", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeEvaluationsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeEvaluationsOutput{} + req.Data = output + return +} + +// Returns a list of DescribeEvaluations that match the search criteria in the +// request. +func (c *MachineLearning) DescribeEvaluations(input *DescribeEvaluationsInput) (*DescribeEvaluationsOutput, error) { + req, out := c.DescribeEvaluationsRequest(input) + err := req.Send() + return out, err +} + +// DescribeEvaluationsPages iterates over the pages of a DescribeEvaluations operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeEvaluations method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeEvaluations operation. +// pageNum := 0 +// err := client.DescribeEvaluationsPages(params, +// func(page *DescribeEvaluationsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *MachineLearning) DescribeEvaluationsPages(input *DescribeEvaluationsInput, fn func(p *DescribeEvaluationsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeEvaluationsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeEvaluationsOutput), lastPage) + }) +} + +const opDescribeMLModels = "DescribeMLModels" + +// DescribeMLModelsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeMLModels operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeMLModels method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeMLModelsRequest method. +// req, resp := client.DescribeMLModelsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *MachineLearning) DescribeMLModelsRequest(input *DescribeMLModelsInput) (req *request.Request, output *DescribeMLModelsOutput) { + op := &request.Operation{ + Name: opDescribeMLModels, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "Limit", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeMLModelsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeMLModelsOutput{} + req.Data = output + return +} + +// Returns a list of MLModel that match the search criteria in the request. +func (c *MachineLearning) DescribeMLModels(input *DescribeMLModelsInput) (*DescribeMLModelsOutput, error) { + req, out := c.DescribeMLModelsRequest(input) + err := req.Send() + return out, err +} + +// DescribeMLModelsPages iterates over the pages of a DescribeMLModels operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeMLModels method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeMLModels operation. +// pageNum := 0 +// err := client.DescribeMLModelsPages(params, +// func(page *DescribeMLModelsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *MachineLearning) DescribeMLModelsPages(input *DescribeMLModelsInput, fn func(p *DescribeMLModelsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeMLModelsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeMLModelsOutput), lastPage) + }) +} + +const opDescribeTags = "DescribeTags" + +// DescribeTagsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeTags operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeTags method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeTagsRequest method. +// req, resp := client.DescribeTagsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *MachineLearning) DescribeTagsRequest(input *DescribeTagsInput) (req *request.Request, output *DescribeTagsOutput) { + op := &request.Operation{ + Name: opDescribeTags, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeTagsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeTagsOutput{} + req.Data = output + return +} + +// Describes one or more of the tags for your Amazon ML object. +func (c *MachineLearning) DescribeTags(input *DescribeTagsInput) (*DescribeTagsOutput, error) { + req, out := c.DescribeTagsRequest(input) + err := req.Send() + return out, err +} + +const opGetBatchPrediction = "GetBatchPrediction" + +// GetBatchPredictionRequest generates a "aws/request.Request" representing the +// client's request for the GetBatchPrediction operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetBatchPrediction method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetBatchPredictionRequest method. +// req, resp := client.GetBatchPredictionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *MachineLearning) GetBatchPredictionRequest(input *GetBatchPredictionInput) (req *request.Request, output *GetBatchPredictionOutput) { + op := &request.Operation{ + Name: opGetBatchPrediction, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetBatchPredictionInput{} + } + + req = c.newRequest(op, input, output) + output = &GetBatchPredictionOutput{} + req.Data = output + return +} + +// Returns a BatchPrediction that includes detailed metadata, status, and data +// file information for a Batch Prediction request. +func (c *MachineLearning) GetBatchPrediction(input *GetBatchPredictionInput) (*GetBatchPredictionOutput, error) { + req, out := c.GetBatchPredictionRequest(input) + err := req.Send() + return out, err +} + +const opGetDataSource = "GetDataSource" + +// GetDataSourceRequest generates a "aws/request.Request" representing the +// client's request for the GetDataSource operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetDataSource method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetDataSourceRequest method. +// req, resp := client.GetDataSourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *MachineLearning) GetDataSourceRequest(input *GetDataSourceInput) (req *request.Request, output *GetDataSourceOutput) { + op := &request.Operation{ + Name: opGetDataSource, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetDataSourceInput{} + } + + req = c.newRequest(op, input, output) + output = &GetDataSourceOutput{} + req.Data = output + return +} + +// Returns a DataSource that includes metadata and data file information, as +// well as the current status of the DataSource. +// +// GetDataSource provides results in normal or verbose format. The verbose +// format adds the schema description and the list of files pointed to by the +// DataSource to the normal format. +func (c *MachineLearning) GetDataSource(input *GetDataSourceInput) (*GetDataSourceOutput, error) { + req, out := c.GetDataSourceRequest(input) + err := req.Send() + return out, err +} + +const opGetEvaluation = "GetEvaluation" + +// GetEvaluationRequest generates a "aws/request.Request" representing the +// client's request for the GetEvaluation operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetEvaluation method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetEvaluationRequest method. +// req, resp := client.GetEvaluationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *MachineLearning) GetEvaluationRequest(input *GetEvaluationInput) (req *request.Request, output *GetEvaluationOutput) { + op := &request.Operation{ + Name: opGetEvaluation, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetEvaluationInput{} + } + + req = c.newRequest(op, input, output) + output = &GetEvaluationOutput{} + req.Data = output + return +} + +// Returns an Evaluation that includes metadata as well as the current status +// of the Evaluation. +func (c *MachineLearning) GetEvaluation(input *GetEvaluationInput) (*GetEvaluationOutput, error) { + req, out := c.GetEvaluationRequest(input) + err := req.Send() + return out, err +} + +const opGetMLModel = "GetMLModel" + +// GetMLModelRequest generates a "aws/request.Request" representing the +// client's request for the GetMLModel operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetMLModel method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetMLModelRequest method. +// req, resp := client.GetMLModelRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *MachineLearning) GetMLModelRequest(input *GetMLModelInput) (req *request.Request, output *GetMLModelOutput) { + op := &request.Operation{ + Name: opGetMLModel, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetMLModelInput{} + } + + req = c.newRequest(op, input, output) + output = &GetMLModelOutput{} + req.Data = output + return +} + +// Returns an MLModel that includes detailed metadata, data source information, +// and the current status of the MLModel. +// +// GetMLModel provides results in normal or verbose format. +func (c *MachineLearning) GetMLModel(input *GetMLModelInput) (*GetMLModelOutput, error) { + req, out := c.GetMLModelRequest(input) + err := req.Send() + return out, err +} + +const opPredict = "Predict" + +// PredictRequest generates a "aws/request.Request" representing the +// client's request for the Predict operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the Predict method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PredictRequest method. +// req, resp := client.PredictRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *MachineLearning) PredictRequest(input *PredictInput) (req *request.Request, output *PredictOutput) { + op := &request.Operation{ + Name: opPredict, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PredictInput{} + } + + req = c.newRequest(op, input, output) + output = &PredictOutput{} + req.Data = output + return +} + +// Generates a prediction for the observation using the specified ML Model. +// +// Note Not all response parameters will be populated. Whether a response parameter +// is populated depends on the type of model requested. +func (c *MachineLearning) Predict(input *PredictInput) (*PredictOutput, error) { + req, out := c.PredictRequest(input) + err := req.Send() + return out, err +} + +const opUpdateBatchPrediction = "UpdateBatchPrediction" + +// UpdateBatchPredictionRequest generates a "aws/request.Request" representing the +// client's request for the UpdateBatchPrediction operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateBatchPrediction method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateBatchPredictionRequest method. +// req, resp := client.UpdateBatchPredictionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *MachineLearning) UpdateBatchPredictionRequest(input *UpdateBatchPredictionInput) (req *request.Request, output *UpdateBatchPredictionOutput) { + op := &request.Operation{ + Name: opUpdateBatchPrediction, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateBatchPredictionInput{} + } + + req = c.newRequest(op, input, output) + output = &UpdateBatchPredictionOutput{} + req.Data = output + return +} + +// Updates the BatchPredictionName of a BatchPrediction. +// +// You can use the GetBatchPrediction operation to view the contents of the +// updated data element. +func (c *MachineLearning) UpdateBatchPrediction(input *UpdateBatchPredictionInput) (*UpdateBatchPredictionOutput, error) { + req, out := c.UpdateBatchPredictionRequest(input) + err := req.Send() + return out, err +} + +const opUpdateDataSource = "UpdateDataSource" + +// UpdateDataSourceRequest generates a "aws/request.Request" representing the +// client's request for the UpdateDataSource operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateDataSource method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateDataSourceRequest method. +// req, resp := client.UpdateDataSourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *MachineLearning) UpdateDataSourceRequest(input *UpdateDataSourceInput) (req *request.Request, output *UpdateDataSourceOutput) { + op := &request.Operation{ + Name: opUpdateDataSource, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateDataSourceInput{} + } + + req = c.newRequest(op, input, output) + output = &UpdateDataSourceOutput{} + req.Data = output + return +} + +// Updates the DataSourceName of a DataSource. +// +// You can use the GetDataSource operation to view the contents of the updated +// data element. +func (c *MachineLearning) UpdateDataSource(input *UpdateDataSourceInput) (*UpdateDataSourceOutput, error) { + req, out := c.UpdateDataSourceRequest(input) + err := req.Send() + return out, err +} + +const opUpdateEvaluation = "UpdateEvaluation" + +// UpdateEvaluationRequest generates a "aws/request.Request" representing the +// client's request for the UpdateEvaluation operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateEvaluation method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateEvaluationRequest method. +// req, resp := client.UpdateEvaluationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *MachineLearning) UpdateEvaluationRequest(input *UpdateEvaluationInput) (req *request.Request, output *UpdateEvaluationOutput) { + op := &request.Operation{ + Name: opUpdateEvaluation, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateEvaluationInput{} + } + + req = c.newRequest(op, input, output) + output = &UpdateEvaluationOutput{} + req.Data = output + return +} + +// Updates the EvaluationName of an Evaluation. +// +// You can use the GetEvaluation operation to view the contents of the updated +// data element. +func (c *MachineLearning) UpdateEvaluation(input *UpdateEvaluationInput) (*UpdateEvaluationOutput, error) { + req, out := c.UpdateEvaluationRequest(input) + err := req.Send() + return out, err +} + +const opUpdateMLModel = "UpdateMLModel" + +// UpdateMLModelRequest generates a "aws/request.Request" representing the +// client's request for the UpdateMLModel operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateMLModel method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateMLModelRequest method. +// req, resp := client.UpdateMLModelRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *MachineLearning) UpdateMLModelRequest(input *UpdateMLModelInput) (req *request.Request, output *UpdateMLModelOutput) { + op := &request.Operation{ + Name: opUpdateMLModel, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateMLModelInput{} + } + + req = c.newRequest(op, input, output) + output = &UpdateMLModelOutput{} + req.Data = output + return +} + +// Updates the MLModelName and the ScoreThreshold of an MLModel. +// +// You can use the GetMLModel operation to view the contents of the updated +// data element. +func (c *MachineLearning) UpdateMLModel(input *UpdateMLModelInput) (*UpdateMLModelOutput, error) { + req, out := c.UpdateMLModelRequest(input) + err := req.Send() + return out, err +} + +type AddTagsInput struct { + _ struct{} `type:"structure"` + + // The ID of the ML object to tag. For example, exampleModelId. + ResourceId *string `min:"1" type:"string" required:"true"` + + // The type of the ML object to tag. + ResourceType *string `type:"string" required:"true" enum:"TaggableResourceType"` + + // The key-value pairs to use to create tags. If you specify a key without specifying + // a value, Amazon ML creates a tag with the specified key and a value of null. + Tags []*Tag `type:"list" required:"true"` +} + +// String returns the string representation +func (s AddTagsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddTagsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AddTagsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AddTagsInput"} + if s.ResourceId == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceId")) + } + if s.ResourceId != nil && len(*s.ResourceId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceId", 1)) + } + if s.ResourceType == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceType")) + } + if s.Tags == nil { + invalidParams.Add(request.NewErrParamRequired("Tags")) + } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Amazon ML returns the following elements. +type AddTagsOutput struct { + _ struct{} `type:"structure"` + + // The ID of the ML object that was tagged. + ResourceId *string `min:"1" type:"string"` + + // The type of the ML object that was tagged. + ResourceType *string `type:"string" enum:"TaggableResourceType"` +} + +// String returns the string representation +func (s AddTagsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddTagsOutput) GoString() string { + return s.String() +} + +// Represents the output of a GetBatchPrediction operation. +// +// The content consists of the detailed metadata, the status, and the data +// file information of a Batch Prediction. +type BatchPrediction struct { + _ struct{} `type:"structure"` + + // The ID of the DataSource that points to the group of observations to predict. + BatchPredictionDataSourceId *string `min:"1" type:"string"` + + // The ID assigned to the BatchPrediction at creation. This value should be + // identical to the value of the BatchPredictionID in the request. + BatchPredictionId *string `min:"1" type:"string"` + + // The time that the BatchPrediction was created. The time is expressed in epoch + // time. + CreatedAt *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The AWS user account that invoked the BatchPrediction. The account type can + // be either an AWS root account or an AWS Identity and Access Management (IAM) + // user account. + CreatedByIamUser *string `type:"string"` + + // The location of the data file or directory in Amazon Simple Storage Service + // (Amazon S3). + InputDataLocationS3 *string `type:"string"` + + // The time of the most recent edit to the BatchPrediction. The time is expressed + // in epoch time. + LastUpdatedAt *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The ID of the MLModel that generated predictions for the BatchPrediction + // request. + MLModelId *string `min:"1" type:"string"` + + // A description of the most recent details about processing the batch prediction + // request. + Message *string `type:"string"` + + // A user-supplied name or description of the BatchPrediction. + Name *string `type:"string"` + + // The location of an Amazon S3 bucket or directory to receive the operation + // results. The following substrings are not allowed in the s3 key portion of + // the outputURI field: ':', '//', '/./', '/../'. + OutputUri *string `type:"string"` + + // The status of the BatchPrediction. This element can have one of the following + // values: + // + // PENDING - Amazon Machine Learning (Amazon ML) submitted a request to generate + // predictions for a batch of observations. INPROGRESS - The process is underway. + // FAILED - The request to perform a batch prediction did not run to completion. + // It is not usable. COMPLETED - The batch prediction process completed successfully. + // DELETED - The BatchPrediction is marked as deleted. It is not usable. + Status *string `type:"string" enum:"EntityStatus"` +} + +// String returns the string representation +func (s BatchPrediction) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchPrediction) GoString() string { + return s.String() +} + +type CreateBatchPredictionInput struct { + _ struct{} `type:"structure"` + + // The ID of the DataSource that points to the group of observations to predict. + BatchPredictionDataSourceId *string `min:"1" type:"string" required:"true"` + + // A user-supplied ID that uniquely identifies the BatchPrediction. + BatchPredictionId *string `min:"1" type:"string" required:"true"` + + // A user-supplied name or description of the BatchPrediction. BatchPredictionName + // can only use the UTF-8 character set. + BatchPredictionName *string `type:"string"` + + // The ID of the MLModel that will generate predictions for the group of observations. + MLModelId *string `min:"1" type:"string" required:"true"` + + // The location of an Amazon Simple Storage Service (Amazon S3) bucket or directory + // to store the batch prediction results. The following substrings are not allowed + // in the s3 key portion of the outputURI field: ':', '//', '/./', '/../'. + // + // Amazon ML needs permissions to store and retrieve the logs on your behalf. + // For information about how to set permissions, see the Amazon Machine Learning + // Developer Guide (http://docs.aws.amazon.com/machine-learning/latest/dg). + OutputUri *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateBatchPredictionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateBatchPredictionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateBatchPredictionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateBatchPredictionInput"} + if s.BatchPredictionDataSourceId == nil { + invalidParams.Add(request.NewErrParamRequired("BatchPredictionDataSourceId")) + } + if s.BatchPredictionDataSourceId != nil && len(*s.BatchPredictionDataSourceId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("BatchPredictionDataSourceId", 1)) + } + if s.BatchPredictionId == nil { + invalidParams.Add(request.NewErrParamRequired("BatchPredictionId")) + } + if s.BatchPredictionId != nil && len(*s.BatchPredictionId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("BatchPredictionId", 1)) + } + if s.MLModelId == nil { + invalidParams.Add(request.NewErrParamRequired("MLModelId")) + } + if s.MLModelId != nil && len(*s.MLModelId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("MLModelId", 1)) + } + if s.OutputUri == nil { + invalidParams.Add(request.NewErrParamRequired("OutputUri")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the output of a CreateBatchPrediction operation, and is an acknowledgement +// that Amazon ML received the request. +// +// The CreateBatchPrediction operation is asynchronous. You can poll for status +// updates by using the >GetBatchPrediction operation and checking the Status +// parameter of the result. +type CreateBatchPredictionOutput struct { + _ struct{} `type:"structure"` + + // A user-supplied ID that uniquely identifies the BatchPrediction. This value + // is identical to the value of the BatchPredictionId in the request. + BatchPredictionId *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s CreateBatchPredictionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateBatchPredictionOutput) GoString() string { + return s.String() +} + +type CreateDataSourceFromRDSInput struct { + _ struct{} `type:"structure"` + + // The compute statistics for a DataSource. The statistics are generated from + // the observation data referenced by a DataSource. Amazon ML uses the statistics + // internally during MLModel training. This parameter must be set to true if + // the DataSource needs to be used for MLModel training. + ComputeStatistics *bool `type:"boolean"` + + // A user-supplied ID that uniquely identifies the DataSource. Typically, an + // Amazon Resource Number (ARN) becomes the ID for a DataSource. + DataSourceId *string `min:"1" type:"string" required:"true"` + + // A user-supplied name or description of the DataSource. + DataSourceName *string `type:"string"` + + // The data specification of an Amazon RDS DataSource: + // + // DatabaseInformation - DatabaseName - The name of the Amazon RDS database. + // InstanceIdentifier - A unique identifier for the Amazon RDS database instance. + // + // + // DatabaseCredentials - AWS Identity and Access Management (IAM) credentials + // that are used to connect to the Amazon RDS database. + // + // ResourceRole - A role (DataPipelineDefaultResourceRole) assumed by an EC2 + // instance to carry out the copy task from Amazon RDS to Amazon Simple Storage + // Service (Amazon S3). For more information, see Role templates (http://docs.aws.amazon.com/datapipeline/latest/DeveloperGuide/dp-iam-roles.html) + // for data pipelines. + // + // ServiceRole - A role (DataPipelineDefaultRole) assumed by the AWS Data Pipeline + // service to monitor the progress of the copy task from Amazon RDS to Amazon + // S3. For more information, see Role templates (http://docs.aws.amazon.com/datapipeline/latest/DeveloperGuide/dp-iam-roles.html) + // for data pipelines. + // + // SecurityInfo - The security information to use to access an RDS DB instance. + // You need to set up appropriate ingress rules for the security entity IDs + // provided to allow access to the Amazon RDS instance. Specify a [SubnetId, + // SecurityGroupIds] pair for a VPC-based RDS DB instance. + // + // SelectSqlQuery - A query that is used to retrieve the observation data for + // the Datasource. + // + // S3StagingLocation - The Amazon S3 location for staging Amazon RDS data. + // The data retrieved from Amazon RDS using SelectSqlQuery is stored in this + // location. + // + // DataSchemaUri - The Amazon S3 location of the DataSchema. + // + // DataSchema - A JSON string representing the schema. This is not required + // if DataSchemaUri is specified. + // + // DataRearrangement - A JSON string that represents the splitting and rearrangement + // requirements for the Datasource. + // + // Sample - "{\"splitting\":{\"percentBegin\":10,\"percentEnd\":60}}" + RDSData *RDSDataSpec `type:"structure" required:"true"` + + // The role that Amazon ML assumes on behalf of the user to create and activate + // a data pipeline in the user's account and copy data using the SelectSqlQuery + // query from Amazon RDS to Amazon S3. + RoleARN *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateDataSourceFromRDSInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDataSourceFromRDSInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateDataSourceFromRDSInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateDataSourceFromRDSInput"} + if s.DataSourceId == nil { + invalidParams.Add(request.NewErrParamRequired("DataSourceId")) + } + if s.DataSourceId != nil && len(*s.DataSourceId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DataSourceId", 1)) + } + if s.RDSData == nil { + invalidParams.Add(request.NewErrParamRequired("RDSData")) + } + if s.RoleARN == nil { + invalidParams.Add(request.NewErrParamRequired("RoleARN")) + } + if s.RoleARN != nil && len(*s.RoleARN) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RoleARN", 1)) + } + if s.RDSData != nil { + if err := s.RDSData.Validate(); err != nil { + invalidParams.AddNested("RDSData", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the output of a CreateDataSourceFromRDS operation, and is an acknowledgement +// that Amazon ML received the request. +// +// The CreateDataSourceFromRDS> operation is asynchronous. You can poll for +// updates by using the GetBatchPrediction operation and checking the Status +// parameter. You can inspect the Message when Status shows up as FAILED. You +// can also check the progress of the copy operation by going to the DataPipeline +// console and looking up the pipeline using the pipelineId from the describe +// call. +type CreateDataSourceFromRDSOutput struct { + _ struct{} `type:"structure"` + + // A user-supplied ID that uniquely identifies the datasource. This value should + // be identical to the value of the DataSourceID in the request. + DataSourceId *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s CreateDataSourceFromRDSOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDataSourceFromRDSOutput) GoString() string { + return s.String() +} + +type CreateDataSourceFromRedshiftInput struct { + _ struct{} `type:"structure"` + + // The compute statistics for a DataSource. The statistics are generated from + // the observation data referenced by a DataSource. Amazon ML uses the statistics + // internally during MLModel training. This parameter must be set to true if + // the DataSource needs to be used for MLModel training. + ComputeStatistics *bool `type:"boolean"` + + // A user-supplied ID that uniquely identifies the DataSource. + DataSourceId *string `min:"1" type:"string" required:"true"` + + // A user-supplied name or description of the DataSource. + DataSourceName *string `type:"string"` + + // The data specification of an Amazon Redshift DataSource: + // + // DatabaseInformation - DatabaseName - The name of the Amazon Redshift + // database. ClusterIdentifier - The unique ID for the Amazon Redshift cluster. + // + // DatabaseCredentials - The AWS Identity and Access Management (IAM) credentials + // that are used to connect to the Amazon Redshift database. + // + // SelectSqlQuery - The query that is used to retrieve the observation data + // for the Datasource. + // + // S3StagingLocation - The Amazon Simple Storage Service (Amazon S3) location + // for staging Amazon Redshift data. The data retrieved from Amazon Redshift + // using the SelectSqlQuery query is stored in this location. + // + // DataSchemaUri - The Amazon S3 location of the DataSchema. + // + // DataSchema - A JSON string representing the schema. This is not required + // if DataSchemaUri is specified. + // + // DataRearrangement - A JSON string that represents the splitting and rearrangement + // requirements for the DataSource. + // + // Sample - "{\"splitting\":{\"percentBegin\":10,\"percentEnd\":60}}" + DataSpec *RedshiftDataSpec `type:"structure" required:"true"` + + // A fully specified role Amazon Resource Name (ARN). Amazon ML assumes the + // role on behalf of the user to create the following: + // + // A security group to allow Amazon ML to execute the SelectSqlQuery query + // on an Amazon Redshift cluster + // + // An Amazon S3 bucket policy to grant Amazon ML read/write permissions on + // the S3StagingLocation + RoleARN *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateDataSourceFromRedshiftInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDataSourceFromRedshiftInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateDataSourceFromRedshiftInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateDataSourceFromRedshiftInput"} + if s.DataSourceId == nil { + invalidParams.Add(request.NewErrParamRequired("DataSourceId")) + } + if s.DataSourceId != nil && len(*s.DataSourceId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DataSourceId", 1)) + } + if s.DataSpec == nil { + invalidParams.Add(request.NewErrParamRequired("DataSpec")) + } + if s.RoleARN == nil { + invalidParams.Add(request.NewErrParamRequired("RoleARN")) + } + if s.RoleARN != nil && len(*s.RoleARN) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RoleARN", 1)) + } + if s.DataSpec != nil { + if err := s.DataSpec.Validate(); err != nil { + invalidParams.AddNested("DataSpec", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the output of a CreateDataSourceFromRedshift operation, and is +// an acknowledgement that Amazon ML received the request. +// +// The CreateDataSourceFromRedshift operation is asynchronous. You can poll +// for updates by using the GetBatchPrediction operation and checking the Status +// parameter. +type CreateDataSourceFromRedshiftOutput struct { + _ struct{} `type:"structure"` + + // A user-supplied ID that uniquely identifies the datasource. This value should + // be identical to the value of the DataSourceID in the request. + DataSourceId *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s CreateDataSourceFromRedshiftOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDataSourceFromRedshiftOutput) GoString() string { + return s.String() +} + +type CreateDataSourceFromS3Input struct { + _ struct{} `type:"structure"` + + // The compute statistics for a DataSource. The statistics are generated from + // the observation data referenced by a DataSource. Amazon ML uses the statistics + // internally during MLModel training. This parameter must be set to true if + // the DataSource needs to be used for MLModel training. + ComputeStatistics *bool `type:"boolean"` + + // A user-supplied identifier that uniquely identifies the DataSource. + DataSourceId *string `min:"1" type:"string" required:"true"` + + // A user-supplied name or description of the DataSource. + DataSourceName *string `type:"string"` + + // The data specification of a DataSource: + // + // DataLocationS3 - The Amazon S3 location of the observation data. + // + // DataSchemaLocationS3 - The Amazon S3 location of the DataSchema. + // + // DataSchema - A JSON string representing the schema. This is not required + // if DataSchemaUri is specified. + // + // DataRearrangement - A JSON string that represents the splitting and rearrangement + // requirements for the Datasource. + // + // Sample - "{\"splitting\":{\"percentBegin\":10,\"percentEnd\":60}}" + DataSpec *S3DataSpec `type:"structure" required:"true"` +} + +// String returns the string representation +func (s CreateDataSourceFromS3Input) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDataSourceFromS3Input) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateDataSourceFromS3Input) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateDataSourceFromS3Input"} + if s.DataSourceId == nil { + invalidParams.Add(request.NewErrParamRequired("DataSourceId")) + } + if s.DataSourceId != nil && len(*s.DataSourceId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DataSourceId", 1)) + } + if s.DataSpec == nil { + invalidParams.Add(request.NewErrParamRequired("DataSpec")) + } + if s.DataSpec != nil { + if err := s.DataSpec.Validate(); err != nil { + invalidParams.AddNested("DataSpec", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the output of a CreateDataSourceFromS3 operation, and is an acknowledgement +// that Amazon ML received the request. +// +// The CreateDataSourceFromS3 operation is asynchronous. You can poll for updates +// by using the GetBatchPrediction operation and checking the Status parameter. +type CreateDataSourceFromS3Output struct { + _ struct{} `type:"structure"` + + // A user-supplied ID that uniquely identifies the DataSource. This value should + // be identical to the value of the DataSourceID in the request. + DataSourceId *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s CreateDataSourceFromS3Output) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDataSourceFromS3Output) GoString() string { + return s.String() +} + +type CreateEvaluationInput struct { + _ struct{} `type:"structure"` + + // The ID of the DataSource for the evaluation. The schema of the DataSource + // must match the schema used to create the MLModel. + EvaluationDataSourceId *string `min:"1" type:"string" required:"true"` + + // A user-supplied ID that uniquely identifies the Evaluation. + EvaluationId *string `min:"1" type:"string" required:"true"` + + // A user-supplied name or description of the Evaluation. + EvaluationName *string `type:"string"` + + // The ID of the MLModel to evaluate. + // + // The schema used in creating the MLModel must match the schema of the DataSource + // used in the Evaluation. + MLModelId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateEvaluationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateEvaluationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateEvaluationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateEvaluationInput"} + if s.EvaluationDataSourceId == nil { + invalidParams.Add(request.NewErrParamRequired("EvaluationDataSourceId")) + } + if s.EvaluationDataSourceId != nil && len(*s.EvaluationDataSourceId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("EvaluationDataSourceId", 1)) + } + if s.EvaluationId == nil { + invalidParams.Add(request.NewErrParamRequired("EvaluationId")) + } + if s.EvaluationId != nil && len(*s.EvaluationId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("EvaluationId", 1)) + } + if s.MLModelId == nil { + invalidParams.Add(request.NewErrParamRequired("MLModelId")) + } + if s.MLModelId != nil && len(*s.MLModelId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("MLModelId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the output of a CreateEvaluation operation, and is an acknowledgement +// that Amazon ML received the request. +// +// CreateEvaluation operation is asynchronous. You can poll for status updates +// by using the GetEvcaluation operation and checking the Status parameter. +type CreateEvaluationOutput struct { + _ struct{} `type:"structure"` + + // The user-supplied ID that uniquely identifies the Evaluation. This value + // should be identical to the value of the EvaluationId in the request. + EvaluationId *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s CreateEvaluationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateEvaluationOutput) GoString() string { + return s.String() +} + +type CreateMLModelInput struct { + _ struct{} `type:"structure"` + + // A user-supplied ID that uniquely identifies the MLModel. + MLModelId *string `min:"1" type:"string" required:"true"` + + // A user-supplied name or description of the MLModel. + MLModelName *string `type:"string"` + + // The category of supervised learning that this MLModel will address. Choose + // from the following types: + // + // Choose REGRESSION if the MLModel will be used to predict a numeric value. + // Choose BINARY if the MLModel result has two possible values. Choose MULTICLASS + // if the MLModel result has a limited number of values. For more information, + // see the Amazon Machine Learning Developer Guide (http://docs.aws.amazon.com/machine-learning/latest/dg). + MLModelType *string `type:"string" required:"true" enum:"MLModelType"` + + // A list of the training parameters in the MLModel. The list is implemented + // as a map of key-value pairs. + // + // The following is the current set of training parameters: + // + // sgd.maxMLModelSizeInBytes - The maximum allowed size of the model. Depending + // on the input data, the size of the model might affect its performance. + // + // The value is an integer that ranges from 100000 to 2147483648. The default + // value is 33554432. + // + // sgd.maxPasses - The number of times that the training process traverses + // the observations to build the MLModel. The value is an integer that ranges + // from 1 to 10000. The default value is 10. + // + // sgd.shuffleType - Whether Amazon ML shuffles the training data. Shuffling + // the data improves a model's ability to find the optimal solution for a variety + // of data types. The valid values are auto and none. The default value is none. + // We strongly recommend that you shuffle your data. + // + // sgd.l1RegularizationAmount - The coefficient regularization L1 norm. It + // controls overfitting the data by penalizing large coefficients. This tends + // to drive coefficients to zero, resulting in a sparse feature set. If you + // use this parameter, start by specifying a small value, such as 1.0E-08. + // + // The value is a double that ranges from 0 to MAX_DOUBLE. The default is to + // not use L1 normalization. This parameter can't be used when L2 is specified. + // Use this parameter sparingly. + // + // sgd.l2RegularizationAmount - The coefficient regularization L2 norm. It + // controls overfitting the data by penalizing large coefficients. This tends + // to drive coefficients to small, nonzero values. If you use this parameter, + // start by specifying a small value, such as 1.0E-08. + // + // The value is a double that ranges from 0 to MAX_DOUBLE. The default is to + // not use L2 normalization. This parameter can't be used when L1 is specified. + // Use this parameter sparingly. + Parameters map[string]*string `type:"map"` + + // The data recipe for creating the MLModel. You must specify either the recipe + // or its URI. If you don't specify a recipe or its URI, Amazon ML creates a + // default. + Recipe *string `type:"string"` + + // The Amazon Simple Storage Service (Amazon S3) location and file name that + // contains the MLModel recipe. You must specify either the recipe or its URI. + // If you don't specify a recipe or its URI, Amazon ML creates a default. + RecipeUri *string `type:"string"` + + // The DataSource that points to the training data. + TrainingDataSourceId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateMLModelInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateMLModelInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateMLModelInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateMLModelInput"} + if s.MLModelId == nil { + invalidParams.Add(request.NewErrParamRequired("MLModelId")) + } + if s.MLModelId != nil && len(*s.MLModelId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("MLModelId", 1)) + } + if s.MLModelType == nil { + invalidParams.Add(request.NewErrParamRequired("MLModelType")) + } + if s.TrainingDataSourceId == nil { + invalidParams.Add(request.NewErrParamRequired("TrainingDataSourceId")) + } + if s.TrainingDataSourceId != nil && len(*s.TrainingDataSourceId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TrainingDataSourceId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the output of a CreateMLModel operation, and is an acknowledgement +// that Amazon ML received the request. +// +// The CreateMLModel operation is asynchronous. You can poll for status updates +// by using the GetMLModel operation and checking the Status parameter. +type CreateMLModelOutput struct { + _ struct{} `type:"structure"` + + // A user-supplied ID that uniquely identifies the MLModel. This value should + // be identical to the value of the MLModelId in the request. + MLModelId *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s CreateMLModelOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateMLModelOutput) GoString() string { + return s.String() +} + +type CreateRealtimeEndpointInput struct { + _ struct{} `type:"structure"` + + // The ID assigned to the MLModel during creation. + MLModelId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateRealtimeEndpointInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateRealtimeEndpointInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateRealtimeEndpointInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateRealtimeEndpointInput"} + if s.MLModelId == nil { + invalidParams.Add(request.NewErrParamRequired("MLModelId")) + } + if s.MLModelId != nil && len(*s.MLModelId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("MLModelId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the output of an CreateRealtimeEndpoint operation. +// +// The result contains the MLModelId and the endpoint information for the MLModel. +// +// The endpoint information includes the URI of the MLModel; that is, the +// location to send online prediction requests for the specified MLModel. +type CreateRealtimeEndpointOutput struct { + _ struct{} `type:"structure"` + + // A user-supplied ID that uniquely identifies the MLModel. This value should + // be identical to the value of the MLModelId in the request. + MLModelId *string `min:"1" type:"string"` + + // The endpoint information of the MLModel + RealtimeEndpointInfo *RealtimeEndpointInfo `type:"structure"` +} + +// String returns the string representation +func (s CreateRealtimeEndpointOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateRealtimeEndpointOutput) GoString() string { + return s.String() +} + +// Represents the output of the GetDataSource operation. +// +// The content consists of the detailed metadata and data file information +// and the current status of the DataSource. +type DataSource struct { + _ struct{} `type:"structure"` + + // The parameter is true if statistics need to be generated from the observation + // data. + ComputeStatistics *bool `type:"boolean"` + + // The time that the DataSource was created. The time is expressed in epoch + // time. + CreatedAt *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The AWS user account from which the DataSource was created. The account type + // can be either an AWS root account or an AWS Identity and Access Management + // (IAM) user account. + CreatedByIamUser *string `type:"string"` + + // The location and name of the data in Amazon Simple Storage Service (Amazon + // S3) that is used by a DataSource. + DataLocationS3 *string `type:"string"` + + // A JSON string that represents the splitting and rearrangement requirement + // used when this DataSource was created. + DataRearrangement *string `type:"string"` + + // The total number of observations contained in the data files that the DataSource + // references. + DataSizeInBytes *int64 `type:"long"` + + // The ID that is assigned to the DataSource during creation. + DataSourceId *string `min:"1" type:"string"` + + // The time of the most recent edit to the BatchPrediction. The time is expressed + // in epoch time. + LastUpdatedAt *time.Time `type:"timestamp" timestampFormat:"unix"` + + // A description of the most recent details about creating the DataSource. + Message *string `type:"string"` + + // A user-supplied name or description of the DataSource. + Name *string `type:"string"` + + // The number of data files referenced by the DataSource. + NumberOfFiles *int64 `type:"long"` + + // The datasource details that are specific to Amazon RDS. + RDSMetadata *RDSMetadata `type:"structure"` + + // Describes the DataSource details specific to Amazon Redshift. + RedshiftMetadata *RedshiftMetadata `type:"structure"` + + // The Amazon Resource Name (ARN) of an AWS IAM Role (http://docs.aws.amazon.com/IAM/latest/UserGuide/roles-toplevel.html#roles-about-termsandconcepts), + // such as the following: arn:aws:iam::account:role/rolename. + RoleARN *string `min:"1" type:"string"` + + // The current status of the DataSource. This element can have one of the following + // values: + // + // PENDING - Amazon Machine Learning (Amazon ML) submitted a request to create + // a DataSource. INPROGRESS - The creation process is underway. FAILED - The + // request to create a DataSource did not run to completion. It is not usable. + // COMPLETED - The creation process completed successfully. DELETED - The DataSource + // is marked as deleted. It is not usable. + Status *string `type:"string" enum:"EntityStatus"` +} + +// String returns the string representation +func (s DataSource) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DataSource) GoString() string { + return s.String() +} + +type DeleteBatchPredictionInput struct { + _ struct{} `type:"structure"` + + // A user-supplied ID that uniquely identifies the BatchPrediction. + BatchPredictionId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteBatchPredictionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBatchPredictionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBatchPredictionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBatchPredictionInput"} + if s.BatchPredictionId == nil { + invalidParams.Add(request.NewErrParamRequired("BatchPredictionId")) + } + if s.BatchPredictionId != nil && len(*s.BatchPredictionId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("BatchPredictionId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the output of a DeleteBatchPrediction operation. +// +// You can use the GetBatchPrediction operation and check the value of the +// Status parameter to see whether a BatchPrediction is marked as DELETED. +type DeleteBatchPredictionOutput struct { + _ struct{} `type:"structure"` + + // A user-supplied ID that uniquely identifies the BatchPrediction. This value + // should be identical to the value of the BatchPredictionID in the request. + BatchPredictionId *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s DeleteBatchPredictionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBatchPredictionOutput) GoString() string { + return s.String() +} + +type DeleteDataSourceInput struct { + _ struct{} `type:"structure"` + + // A user-supplied ID that uniquely identifies the DataSource. + DataSourceId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteDataSourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteDataSourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteDataSourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteDataSourceInput"} + if s.DataSourceId == nil { + invalidParams.Add(request.NewErrParamRequired("DataSourceId")) + } + if s.DataSourceId != nil && len(*s.DataSourceId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DataSourceId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the output of a DeleteDataSource operation. +type DeleteDataSourceOutput struct { + _ struct{} `type:"structure"` + + // A user-supplied ID that uniquely identifies the DataSource. This value should + // be identical to the value of the DataSourceID in the request. + DataSourceId *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s DeleteDataSourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteDataSourceOutput) GoString() string { + return s.String() +} + +type DeleteEvaluationInput struct { + _ struct{} `type:"structure"` + + // A user-supplied ID that uniquely identifies the Evaluation to delete. + EvaluationId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteEvaluationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteEvaluationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteEvaluationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteEvaluationInput"} + if s.EvaluationId == nil { + invalidParams.Add(request.NewErrParamRequired("EvaluationId")) + } + if s.EvaluationId != nil && len(*s.EvaluationId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("EvaluationId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the output of a DeleteEvaluation operation. The output indicates +// that Amazon Machine Learning (Amazon ML) received the request. +// +// You can use the GetEvaluation operation and check the value of the Status +// parameter to see whether an Evaluation is marked as DELETED. +type DeleteEvaluationOutput struct { + _ struct{} `type:"structure"` + + // A user-supplied ID that uniquely identifies the Evaluation. This value should + // be identical to the value of the EvaluationId in the request. + EvaluationId *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s DeleteEvaluationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteEvaluationOutput) GoString() string { + return s.String() +} + +type DeleteMLModelInput struct { + _ struct{} `type:"structure"` + + // A user-supplied ID that uniquely identifies the MLModel. + MLModelId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteMLModelInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteMLModelInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteMLModelInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteMLModelInput"} + if s.MLModelId == nil { + invalidParams.Add(request.NewErrParamRequired("MLModelId")) + } + if s.MLModelId != nil && len(*s.MLModelId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("MLModelId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the output of a DeleteMLModel operation. +// +// You can use the GetMLModel operation and check the value of the Status parameter +// to see whether an MLModel is marked as DELETED. +type DeleteMLModelOutput struct { + _ struct{} `type:"structure"` + + // A user-supplied ID that uniquely identifies the MLModel. This value should + // be identical to the value of the MLModelID in the request. + MLModelId *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s DeleteMLModelOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteMLModelOutput) GoString() string { + return s.String() +} + +type DeleteRealtimeEndpointInput struct { + _ struct{} `type:"structure"` + + // The ID assigned to the MLModel during creation. + MLModelId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteRealtimeEndpointInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteRealtimeEndpointInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteRealtimeEndpointInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteRealtimeEndpointInput"} + if s.MLModelId == nil { + invalidParams.Add(request.NewErrParamRequired("MLModelId")) + } + if s.MLModelId != nil && len(*s.MLModelId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("MLModelId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the output of an DeleteRealtimeEndpoint operation. +// +// The result contains the MLModelId and the endpoint information for the MLModel. +type DeleteRealtimeEndpointOutput struct { + _ struct{} `type:"structure"` + + // A user-supplied ID that uniquely identifies the MLModel. This value should + // be identical to the value of the MLModelId in the request. + MLModelId *string `min:"1" type:"string"` + + // The endpoint information of the MLModel + RealtimeEndpointInfo *RealtimeEndpointInfo `type:"structure"` +} + +// String returns the string representation +func (s DeleteRealtimeEndpointOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteRealtimeEndpointOutput) GoString() string { + return s.String() +} + +type DeleteTagsInput struct { + _ struct{} `type:"structure"` + + // The ID of the tagged ML object. For example, exampleModelId. + ResourceId *string `min:"1" type:"string" required:"true"` + + // The type of the tagged ML object. + ResourceType *string `type:"string" required:"true" enum:"TaggableResourceType"` + + // One or more tags to delete. + TagKeys []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s DeleteTagsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteTagsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteTagsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteTagsInput"} + if s.ResourceId == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceId")) + } + if s.ResourceId != nil && len(*s.ResourceId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceId", 1)) + } + if s.ResourceType == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceType")) + } + if s.TagKeys == nil { + invalidParams.Add(request.NewErrParamRequired("TagKeys")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Amazon ML returns the following elements. +type DeleteTagsOutput struct { + _ struct{} `type:"structure"` + + // The ID of the ML object from which tags were deleted. + ResourceId *string `min:"1" type:"string"` + + // The type of the ML object from which tags were deleted. + ResourceType *string `type:"string" enum:"TaggableResourceType"` +} + +// String returns the string representation +func (s DeleteTagsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteTagsOutput) GoString() string { + return s.String() +} + +type DescribeBatchPredictionsInput struct { + _ struct{} `type:"structure"` + + // The equal to operator. The BatchPrediction results will have FilterVariable + // values that exactly match the value specified with EQ. + EQ *string `type:"string"` + + // Use one of the following variables to filter a list of BatchPrediction: + // + // CreatedAt - Sets the search criteria to the BatchPrediction creation date. + // Status - Sets the search criteria to the BatchPrediction status. Name - + // Sets the search criteria to the contents of the BatchPrediction Name. IAMUser + // - Sets the search criteria to the user account that invoked the BatchPrediction + // creation. MLModelId - Sets the search criteria to the MLModel used in the + // BatchPrediction. DataSourceId - Sets the search criteria to the DataSource + // used in the BatchPrediction. DataURI - Sets the search criteria to the data + // file(s) used in the BatchPrediction. The URL can identify either a file or + // an Amazon Simple Storage Solution (Amazon S3) bucket or directory. + FilterVariable *string `type:"string" enum:"BatchPredictionFilterVariable"` + + // The greater than or equal to operator. The BatchPrediction results will have + // FilterVariable values that are greater than or equal to the value specified + // with GE. + GE *string `type:"string"` + + // The greater than operator. The BatchPrediction results will have FilterVariable + // values that are greater than the value specified with GT. + GT *string `type:"string"` + + // The less than or equal to operator. The BatchPrediction results will have + // FilterVariable values that are less than or equal to the value specified + // with LE. + LE *string `type:"string"` + + // The less than operator. The BatchPrediction results will have FilterVariable + // values that are less than the value specified with LT. + LT *string `type:"string"` + + // The number of pages of information to include in the result. The range of + // acceptable values is 1 through 100. The default value is 100. + Limit *int64 `min:"1" type:"integer"` + + // The not equal to operator. The BatchPrediction results will have FilterVariable + // values not equal to the value specified with NE. + NE *string `type:"string"` + + // An ID of the page in the paginated results. + NextToken *string `type:"string"` + + // A string that is found at the beginning of a variable, such as Name or Id. + // + // For example, a Batch Prediction operation could have the Name 2014-09-09-HolidayGiftMailer. + // To search for this BatchPrediction, select Name for the FilterVariable and + // any of the following strings for the Prefix: + // + // 2014-09 + // + // 2014-09-09 + // + // 2014-09-09-Holiday + Prefix *string `type:"string"` + + // A two-value parameter that determines the sequence of the resulting list + // of MLModels. + // + // asc - Arranges the list in ascending order (A-Z, 0-9). dsc - Arranges + // the list in descending order (Z-A, 9-0). Results are sorted by FilterVariable. + SortOrder *string `type:"string" enum:"SortOrder"` +} + +// String returns the string representation +func (s DescribeBatchPredictionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeBatchPredictionsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeBatchPredictionsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeBatchPredictionsInput"} + if s.Limit != nil && *s.Limit < 1 { + invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the output of a DescribeBatchPredictions operation. The content +// is essentially a list of BatchPredictions. +type DescribeBatchPredictionsOutput struct { + _ struct{} `type:"structure"` + + // The ID of the next page in the paginated results that indicates at least + // one more page follows. + NextToken *string `type:"string"` + + // A list of BatchPrediction objects that meet the search criteria. + Results []*BatchPrediction `type:"list"` +} + +// String returns the string representation +func (s DescribeBatchPredictionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeBatchPredictionsOutput) GoString() string { + return s.String() +} + +type DescribeDataSourcesInput struct { + _ struct{} `type:"structure"` + + // The equal to operator. The DataSource results will have FilterVariable values + // that exactly match the value specified with EQ. + EQ *string `type:"string"` + + // Use one of the following variables to filter a list of DataSource: + // + // CreatedAt - Sets the search criteria to DataSource creation dates. Status + // - Sets the search criteria to DataSource statuses. Name - Sets the search + // criteria to the contents of DataSource Name. DataUri - Sets the search + // criteria to the URI of data files used to create the DataSource. The URI + // can identify either a file or an Amazon Simple Storage Service (Amazon S3) + // bucket or directory. IAMUser - Sets the search criteria to the user account + // that invoked the DataSource creation. + FilterVariable *string `type:"string" enum:"DataSourceFilterVariable"` + + // The greater than or equal to operator. The DataSource results will have FilterVariable + // values that are greater than or equal to the value specified with GE. + GE *string `type:"string"` + + // The greater than operator. The DataSource results will have FilterVariable + // values that are greater than the value specified with GT. + GT *string `type:"string"` + + // The less than or equal to operator. The DataSource results will have FilterVariable + // values that are less than or equal to the value specified with LE. + LE *string `type:"string"` + + // The less than operator. The DataSource results will have FilterVariable values + // that are less than the value specified with LT. + LT *string `type:"string"` + + // The maximum number of DataSource to include in the result. + Limit *int64 `min:"1" type:"integer"` + + // The not equal to operator. The DataSource results will have FilterVariable + // values not equal to the value specified with NE. + NE *string `type:"string"` + + // The ID of the page in the paginated results. + NextToken *string `type:"string"` + + // A string that is found at the beginning of a variable, such as Name or Id. + // + // For example, a DataSource could have the Name 2014-09-09-HolidayGiftMailer. + // To search for this DataSource, select Name for the FilterVariable and any + // of the following strings for the Prefix: + // + // 2014-09 + // + // 2014-09-09 + // + // 2014-09-09-Holiday + Prefix *string `type:"string"` + + // A two-value parameter that determines the sequence of the resulting list + // of DataSource. + // + // asc - Arranges the list in ascending order (A-Z, 0-9). dsc - Arranges + // the list in descending order (Z-A, 9-0). Results are sorted by FilterVariable. + SortOrder *string `type:"string" enum:"SortOrder"` +} + +// String returns the string representation +func (s DescribeDataSourcesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDataSourcesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeDataSourcesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeDataSourcesInput"} + if s.Limit != nil && *s.Limit < 1 { + invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the query results from a DescribeDataSources operation. The content +// is essentially a list of DataSource. +type DescribeDataSourcesOutput struct { + _ struct{} `type:"structure"` + + // An ID of the next page in the paginated results that indicates at least one + // more page follows. + NextToken *string `type:"string"` + + // A list of DataSource that meet the search criteria. + Results []*DataSource `type:"list"` +} + +// String returns the string representation +func (s DescribeDataSourcesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDataSourcesOutput) GoString() string { + return s.String() +} + +type DescribeEvaluationsInput struct { + _ struct{} `type:"structure"` + + // The equal to operator. The Evaluation results will have FilterVariable values + // that exactly match the value specified with EQ. + EQ *string `type:"string"` + + // Use one of the following variable to filter a list of Evaluation objects: + // + // CreatedAt - Sets the search criteria to the Evaluation creation date. + // Status - Sets the search criteria to the Evaluation status. Name - Sets + // the search criteria to the contents of Evaluation Name. IAMUser - Sets + // the search criteria to the user account that invoked an Evaluation. MLModelId + // - Sets the search criteria to the MLModel that was evaluated. DataSourceId + // - Sets the search criteria to the DataSource used in Evaluation. DataUri + // - Sets the search criteria to the data file(s) used in Evaluation. The URL + // can identify either a file or an Amazon Simple Storage Solution (Amazon S3) + // bucket or directory. + FilterVariable *string `type:"string" enum:"EvaluationFilterVariable"` + + // The greater than or equal to operator. The Evaluation results will have FilterVariable + // values that are greater than or equal to the value specified with GE. + GE *string `type:"string"` + + // The greater than operator. The Evaluation results will have FilterVariable + // values that are greater than the value specified with GT. + GT *string `type:"string"` + + // The less than or equal to operator. The Evaluation results will have FilterVariable + // values that are less than or equal to the value specified with LE. + LE *string `type:"string"` + + // The less than operator. The Evaluation results will have FilterVariable values + // that are less than the value specified with LT. + LT *string `type:"string"` + + // The maximum number of Evaluation to include in the result. + Limit *int64 `min:"1" type:"integer"` + + // The not equal to operator. The Evaluation results will have FilterVariable + // values not equal to the value specified with NE. + NE *string `type:"string"` + + // The ID of the page in the paginated results. + NextToken *string `type:"string"` + + // A string that is found at the beginning of a variable, such as Name or Id. + // + // For example, an Evaluation could have the Name 2014-09-09-HolidayGiftMailer. + // To search for this Evaluation, select Name for the FilterVariable and any + // of the following strings for the Prefix: + // + // 2014-09 + // + // 2014-09-09 + // + // 2014-09-09-Holiday + Prefix *string `type:"string"` + + // A two-value parameter that determines the sequence of the resulting list + // of Evaluation. + // + // asc - Arranges the list in ascending order (A-Z, 0-9). dsc - Arranges + // the list in descending order (Z-A, 9-0). Results are sorted by FilterVariable. + SortOrder *string `type:"string" enum:"SortOrder"` +} + +// String returns the string representation +func (s DescribeEvaluationsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeEvaluationsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeEvaluationsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeEvaluationsInput"} + if s.Limit != nil && *s.Limit < 1 { + invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the query results from a DescribeEvaluations operation. The content +// is essentially a list of Evaluation. +type DescribeEvaluationsOutput struct { + _ struct{} `type:"structure"` + + // The ID of the next page in the paginated results that indicates at least + // one more page follows. + NextToken *string `type:"string"` + + // A list of Evaluation that meet the search criteria. + Results []*Evaluation `type:"list"` +} + +// String returns the string representation +func (s DescribeEvaluationsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeEvaluationsOutput) GoString() string { + return s.String() +} + +type DescribeMLModelsInput struct { + _ struct{} `type:"structure"` + + // The equal to operator. The MLModel results will have FilterVariable values + // that exactly match the value specified with EQ. + EQ *string `type:"string"` + + // Use one of the following variables to filter a list of MLModel: + // + // CreatedAt - Sets the search criteria to MLModel creation date. Status + // - Sets the search criteria to MLModel status. Name - Sets the search criteria + // to the contents of MLModel Name. IAMUser - Sets the search criteria to + // the user account that invoked the MLModel creation. TrainingDataSourceId + // - Sets the search criteria to the DataSource used to train one or more MLModel. + // RealtimeEndpointStatus - Sets the search criteria to the MLModel real-time + // endpoint status. MLModelType - Sets the search criteria to MLModel type: + // binary, regression, or multi-class. Algorithm - Sets the search criteria + // to the algorithm that the MLModel uses. TrainingDataURI - Sets the search + // criteria to the data file(s) used in training a MLModel. The URL can identify + // either a file or an Amazon Simple Storage Service (Amazon S3) bucket or directory. + FilterVariable *string `type:"string" enum:"MLModelFilterVariable"` + + // The greater than or equal to operator. The MLModel results will have FilterVariable + // values that are greater than or equal to the value specified with GE. + GE *string `type:"string"` + + // The greater than operator. The MLModel results will have FilterVariable values + // that are greater than the value specified with GT. + GT *string `type:"string"` + + // The less than or equal to operator. The MLModel results will have FilterVariable + // values that are less than or equal to the value specified with LE. + LE *string `type:"string"` + + // The less than operator. The MLModel results will have FilterVariable values + // that are less than the value specified with LT. + LT *string `type:"string"` + + // The number of pages of information to include in the result. The range of + // acceptable values is 1 through 100. The default value is 100. + Limit *int64 `min:"1" type:"integer"` + + // The not equal to operator. The MLModel results will have FilterVariable values + // not equal to the value specified with NE. + NE *string `type:"string"` + + // The ID of the page in the paginated results. + NextToken *string `type:"string"` + + // A string that is found at the beginning of a variable, such as Name or Id. + // + // For example, an MLModel could have the Name 2014-09-09-HolidayGiftMailer. + // To search for this MLModel, select Name for the FilterVariable and any of + // the following strings for the Prefix: + // + // 2014-09 + // + // 2014-09-09 + // + // 2014-09-09-Holiday + Prefix *string `type:"string"` + + // A two-value parameter that determines the sequence of the resulting list + // of MLModel. + // + // asc - Arranges the list in ascending order (A-Z, 0-9). dsc - Arranges + // the list in descending order (Z-A, 9-0). Results are sorted by FilterVariable. + SortOrder *string `type:"string" enum:"SortOrder"` +} + +// String returns the string representation +func (s DescribeMLModelsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeMLModelsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeMLModelsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeMLModelsInput"} + if s.Limit != nil && *s.Limit < 1 { + invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the output of a DescribeMLModels operation. The content is essentially +// a list of MLModel. +type DescribeMLModelsOutput struct { + _ struct{} `type:"structure"` + + // The ID of the next page in the paginated results that indicates at least + // one more page follows. + NextToken *string `type:"string"` + + // A list of MLModel that meet the search criteria. + Results []*MLModel `type:"list"` +} + +// String returns the string representation +func (s DescribeMLModelsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeMLModelsOutput) GoString() string { + return s.String() +} + +type DescribeTagsInput struct { + _ struct{} `type:"structure"` + + // The ID of the ML object. For example, exampleModelId. + ResourceId *string `min:"1" type:"string" required:"true"` + + // The type of the ML object. + ResourceType *string `type:"string" required:"true" enum:"TaggableResourceType"` +} + +// String returns the string representation +func (s DescribeTagsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeTagsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeTagsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeTagsInput"} + if s.ResourceId == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceId")) + } + if s.ResourceId != nil && len(*s.ResourceId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceId", 1)) + } + if s.ResourceType == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceType")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Amazon ML returns the following elements. +type DescribeTagsOutput struct { + _ struct{} `type:"structure"` + + // The ID of the tagged ML object. + ResourceId *string `min:"1" type:"string"` + + // The type of the tagged ML object. + ResourceType *string `type:"string" enum:"TaggableResourceType"` + + // A list of tags associated with the ML object. + Tags []*Tag `type:"list"` +} + +// String returns the string representation +func (s DescribeTagsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeTagsOutput) GoString() string { + return s.String() +} + +// Represents the output of GetEvaluation operation. +// +// The content consists of the detailed metadata and data file information +// and the current status of the Evaluation. +type Evaluation struct { + _ struct{} `type:"structure"` + + // The time that the Evaluation was created. The time is expressed in epoch + // time. + CreatedAt *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The AWS user account that invoked the evaluation. The account type can be + // either an AWS root account or an AWS Identity and Access Management (IAM) + // user account. + CreatedByIamUser *string `type:"string"` + + // The ID of the DataSource that is used to evaluate the MLModel. + EvaluationDataSourceId *string `min:"1" type:"string"` + + // The ID that is assigned to the Evaluation at creation. + EvaluationId *string `min:"1" type:"string"` + + // The location and name of the data in Amazon Simple Storage Server (Amazon + // S3) that is used in the evaluation. + InputDataLocationS3 *string `type:"string"` + + // The time of the most recent edit to the Evaluation. The time is expressed + // in epoch time. + LastUpdatedAt *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The ID of the MLModel that is the focus of the evaluation. + MLModelId *string `min:"1" type:"string"` + + // A description of the most recent details about evaluating the MLModel. + Message *string `type:"string"` + + // A user-supplied name or description of the Evaluation. + Name *string `type:"string"` + + // Measurements of how well the MLModel performed, using observations referenced + // by the DataSource. One of the following metrics is returned, based on the + // type of the MLModel: + // + // BinaryAUC: A binary MLModel uses the Area Under the Curve (AUC) technique + // to measure performance. + // + // RegressionRMSE: A regression MLModel uses the Root Mean Square Error (RMSE) + // technique to measure performance. RMSE measures the difference between predicted + // and actual values for a single variable. + // + // MulticlassAvgFScore: A multiclass MLModel uses the F1 score technique + // to measure performance. + // + // For more information about performance metrics, please see the Amazon + // Machine Learning Developer Guide (http://docs.aws.amazon.com/machine-learning/latest/dg). + PerformanceMetrics *PerformanceMetrics `type:"structure"` + + // The status of the evaluation. This element can have one of the following + // values: + // + // PENDING - Amazon Machine Learning (Amazon ML) submitted a request to evaluate + // an MLModel. INPROGRESS - The evaluation is underway. FAILED - The request + // to evaluate an MLModel did not run to completion. It is not usable. COMPLETED + // - The evaluation process completed successfully. DELETED - The Evaluation + // is marked as deleted. It is not usable. + Status *string `type:"string" enum:"EntityStatus"` +} + +// String returns the string representation +func (s Evaluation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Evaluation) GoString() string { + return s.String() +} + +type GetBatchPredictionInput struct { + _ struct{} `type:"structure"` + + // An ID assigned to the BatchPrediction at creation. + BatchPredictionId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBatchPredictionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBatchPredictionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBatchPredictionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBatchPredictionInput"} + if s.BatchPredictionId == nil { + invalidParams.Add(request.NewErrParamRequired("BatchPredictionId")) + } + if s.BatchPredictionId != nil && len(*s.BatchPredictionId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("BatchPredictionId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the output of a GetBatchPrediction operation and describes a BatchPrediction. +type GetBatchPredictionOutput struct { + _ struct{} `type:"structure"` + + // The ID of the DataSource that was used to create the BatchPrediction. + BatchPredictionDataSourceId *string `min:"1" type:"string"` + + // An ID assigned to the BatchPrediction at creation. This value should be identical + // to the value of the BatchPredictionID in the request. + BatchPredictionId *string `min:"1" type:"string"` + + // The time when the BatchPrediction was created. The time is expressed in epoch + // time. + CreatedAt *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The AWS user account that invoked the BatchPrediction. The account type can + // be either an AWS root account or an AWS Identity and Access Management (IAM) + // user account. + CreatedByIamUser *string `type:"string"` + + // The location of the data file or directory in Amazon Simple Storage Service + // (Amazon S3). + InputDataLocationS3 *string `type:"string"` + + // The time of the most recent edit to BatchPrediction. The time is expressed + // in epoch time. + LastUpdatedAt *time.Time `type:"timestamp" timestampFormat:"unix"` + + // A link to the file that contains logs of the CreateBatchPrediction operation. + LogUri *string `type:"string"` + + // The ID of the MLModel that generated predictions for the BatchPrediction + // request. + MLModelId *string `min:"1" type:"string"` + + // A description of the most recent details about processing the batch prediction + // request. + Message *string `type:"string"` + + // A user-supplied name or description of the BatchPrediction. + Name *string `type:"string"` + + // The location of an Amazon S3 bucket or directory to receive the operation + // results. + OutputUri *string `type:"string"` + + // The status of the BatchPrediction, which can be one of the following values: + // + // PENDING - Amazon Machine Learning (Amazon ML) submitted a request to generate + // batch predictions. INPROGRESS - The batch predictions are in progress. + // FAILED - The request to perform a batch prediction did not run to completion. + // It is not usable. COMPLETED - The batch prediction process completed successfully. + // DELETED - The BatchPrediction is marked as deleted. It is not usable. + Status *string `type:"string" enum:"EntityStatus"` +} + +// String returns the string representation +func (s GetBatchPredictionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBatchPredictionOutput) GoString() string { + return s.String() +} + +type GetDataSourceInput struct { + _ struct{} `type:"structure"` + + // The ID assigned to the DataSource at creation. + DataSourceId *string `min:"1" type:"string" required:"true"` + + // Specifies whether the GetDataSource operation should return DataSourceSchema. + // + // If true, DataSourceSchema is returned. + // + // If false, DataSourceSchema is not returned. + Verbose *bool `type:"boolean"` +} + +// String returns the string representation +func (s GetDataSourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetDataSourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetDataSourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetDataSourceInput"} + if s.DataSourceId == nil { + invalidParams.Add(request.NewErrParamRequired("DataSourceId")) + } + if s.DataSourceId != nil && len(*s.DataSourceId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DataSourceId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the output of a GetDataSource operation and describes a DataSource. +type GetDataSourceOutput struct { + _ struct{} `type:"structure"` + + // The parameter is true if statistics need to be generated from the observation + // data. + ComputeStatistics *bool `type:"boolean"` + + // The time that the DataSource was created. The time is expressed in epoch + // time. + CreatedAt *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The AWS user account from which the DataSource was created. The account type + // can be either an AWS root account or an AWS Identity and Access Management + // (IAM) user account. + CreatedByIamUser *string `type:"string"` + + // The location of the data file or directory in Amazon Simple Storage Service + // (Amazon S3). + DataLocationS3 *string `type:"string"` + + // A JSON string that represents the splitting and rearrangement requirement + // used when this DataSource was created. + DataRearrangement *string `type:"string"` + + // The total size of observations in the data files. + DataSizeInBytes *int64 `type:"long"` + + // The ID assigned to the DataSource at creation. This value should be identical + // to the value of the DataSourceId in the request. + DataSourceId *string `min:"1" type:"string"` + + // The schema used by all of the data files of this DataSource. + // + // Note This parameter is provided as part of the verbose format. + DataSourceSchema *string `type:"string"` + + // The time of the most recent edit to the DataSource. The time is expressed + // in epoch time. + LastUpdatedAt *time.Time `type:"timestamp" timestampFormat:"unix"` + + // A link to the file containing logs of CreateDataSourceFrom* operations. + LogUri *string `type:"string"` + + // The user-supplied description of the most recent details about creating the + // DataSource. + Message *string `type:"string"` + + // A user-supplied name or description of the DataSource. + Name *string `type:"string"` + + // The number of data files referenced by the DataSource. + NumberOfFiles *int64 `type:"long"` + + // The datasource details that are specific to Amazon RDS. + RDSMetadata *RDSMetadata `type:"structure"` + + // Describes the DataSource details specific to Amazon Redshift. + RedshiftMetadata *RedshiftMetadata `type:"structure"` + + // The Amazon Resource Name (ARN) of an AWS IAM Role (http://docs.aws.amazon.com/IAM/latest/UserGuide/roles-toplevel.html#roles-about-termsandconcepts), + // such as the following: arn:aws:iam::account:role/rolename. + RoleARN *string `min:"1" type:"string"` + + // The current status of the DataSource. This element can have one of the following + // values: + // + // PENDING - Amazon ML submitted a request to create a DataSource. INPROGRESS + // - The creation process is underway. FAILED - The request to create a DataSource + // did not run to completion. It is not usable. COMPLETED - The creation process + // completed successfully. DELETED - The DataSource is marked as deleted. It + // is not usable. + Status *string `type:"string" enum:"EntityStatus"` +} + +// String returns the string representation +func (s GetDataSourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetDataSourceOutput) GoString() string { + return s.String() +} + +type GetEvaluationInput struct { + _ struct{} `type:"structure"` + + // The ID of the Evaluation to retrieve. The evaluation of each MLModel is recorded + // and cataloged. The ID provides the means to access the information. + EvaluationId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetEvaluationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetEvaluationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetEvaluationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetEvaluationInput"} + if s.EvaluationId == nil { + invalidParams.Add(request.NewErrParamRequired("EvaluationId")) + } + if s.EvaluationId != nil && len(*s.EvaluationId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("EvaluationId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the output of a GetEvaluation operation and describes an Evaluation. +type GetEvaluationOutput struct { + _ struct{} `type:"structure"` + + // The time that the Evaluation was created. The time is expressed in epoch + // time. + CreatedAt *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The AWS user account that invoked the evaluation. The account type can be + // either an AWS root account or an AWS Identity and Access Management (IAM) + // user account. + CreatedByIamUser *string `type:"string"` + + // The DataSource used for this evaluation. + EvaluationDataSourceId *string `min:"1" type:"string"` + + // The evaluation ID which is same as the EvaluationId in the request. + EvaluationId *string `min:"1" type:"string"` + + // The location of the data file or directory in Amazon Simple Storage Service + // (Amazon S3). + InputDataLocationS3 *string `type:"string"` + + // The time of the most recent edit to the BatchPrediction. The time is expressed + // in epoch time. + LastUpdatedAt *time.Time `type:"timestamp" timestampFormat:"unix"` + + // A link to the file that contains logs of the CreateEvaluation operation. + LogUri *string `type:"string"` + + // The ID of the MLModel that was the focus of the evaluation. + MLModelId *string `min:"1" type:"string"` + + // A description of the most recent details about evaluating the MLModel. + Message *string `type:"string"` + + // A user-supplied name or description of the Evaluation. + Name *string `type:"string"` + + // Measurements of how well the MLModel performed using observations referenced + // by the DataSource. One of the following metric is returned based on the type + // of the MLModel: + // + // BinaryAUC: A binary MLModel uses the Area Under the Curve (AUC) technique + // to measure performance. + // + // RegressionRMSE: A regression MLModel uses the Root Mean Square Error (RMSE) + // technique to measure performance. RMSE measures the difference between predicted + // and actual values for a single variable. + // + // MulticlassAvgFScore: A multiclass MLModel uses the F1 score technique + // to measure performance. + // + // For more information about performance metrics, please see the Amazon + // Machine Learning Developer Guide (http://docs.aws.amazon.com/machine-learning/latest/dg). + PerformanceMetrics *PerformanceMetrics `type:"structure"` + + // The status of the evaluation. This element can have one of the following + // values: + // + // PENDING - Amazon Machine Language (Amazon ML) submitted a request to evaluate + // an MLModel. INPROGRESS - The evaluation is underway. FAILED - The request + // to evaluate an MLModel did not run to completion. It is not usable. COMPLETED + // - The evaluation process completed successfully. DELETED - The Evaluation + // is marked as deleted. It is not usable. + Status *string `type:"string" enum:"EntityStatus"` +} + +// String returns the string representation +func (s GetEvaluationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetEvaluationOutput) GoString() string { + return s.String() +} + +type GetMLModelInput struct { + _ struct{} `type:"structure"` + + // The ID assigned to the MLModel at creation. + MLModelId *string `min:"1" type:"string" required:"true"` + + // Specifies whether the GetMLModel operation should return Recipe. + // + // If true, Recipe is returned. + // + // If false, Recipe is not returned. + Verbose *bool `type:"boolean"` +} + +// String returns the string representation +func (s GetMLModelInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetMLModelInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetMLModelInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetMLModelInput"} + if s.MLModelId == nil { + invalidParams.Add(request.NewErrParamRequired("MLModelId")) + } + if s.MLModelId != nil && len(*s.MLModelId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("MLModelId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the output of a GetMLModel operation, and provides detailed information +// about a MLModel. +type GetMLModelOutput struct { + _ struct{} `type:"structure"` + + // The time that the MLModel was created. The time is expressed in epoch time. + CreatedAt *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The AWS user account from which the MLModel was created. The account type + // can be either an AWS root account or an AWS Identity and Access Management + // (IAM) user account. + CreatedByIamUser *string `type:"string"` + + // The current endpoint of the MLModel + EndpointInfo *RealtimeEndpointInfo `type:"structure"` + + // The location of the data file or directory in Amazon Simple Storage Service + // (Amazon S3). + InputDataLocationS3 *string `type:"string"` + + // The time of the most recent edit to the MLModel. The time is expressed in + // epoch time. + LastUpdatedAt *time.Time `type:"timestamp" timestampFormat:"unix"` + + // A link to the file that contains logs of the CreateMLModel operation. + LogUri *string `type:"string"` + + // The MLModel ID, which is same as the MLModelId in the request. + MLModelId *string `min:"1" type:"string"` + + // Identifies the MLModel category. The following are the available types: + // + // REGRESSION -- Produces a numeric result. For example, "What price should + // a house be listed at?" BINARY -- Produces one of two possible results. For + // example, "Is this an e-commerce website?" MULTICLASS -- Produces one of several + // possible results. For example, "Is this a HIGH, LOW or MEDIUM risk trade?" + MLModelType *string `type:"string" enum:"MLModelType"` + + // A description of the most recent details about accessing the MLModel. + Message *string `type:"string"` + + // A user-supplied name or description of the MLModel. + Name *string `type:"string"` + + // The recipe to use when training the MLModel. The Recipe provides detailed + // information about the observation data to use during training, and manipulations + // to perform on the observation data during training. + // + // Note This parameter is provided as part of the verbose format. + Recipe *string `type:"string"` + + // The schema used by all of the data files referenced by the DataSource. + // + // Note This parameter is provided as part of the verbose format. + Schema *string `type:"string"` + + // The scoring threshold is used in binary classification MLModel models. It + // marks the boundary between a positive prediction and a negative prediction. + // + // Output values greater than or equal to the threshold receive a positive + // result from the MLModel, such as true. Output values less than the threshold + // receive a negative response from the MLModel, such as false. + ScoreThreshold *float64 `type:"float"` + + // The time of the most recent edit to the ScoreThreshold. The time is expressed + // in epoch time. + ScoreThresholdLastUpdatedAt *time.Time `type:"timestamp" timestampFormat:"unix"` + + // Long integer type that is a 64-bit signed number. + SizeInBytes *int64 `type:"long"` + + // The current status of the MLModel. This element can have one of the following + // values: + // + // PENDING - Amazon Machine Learning (Amazon ML) submitted a request to describe + // a MLModel. INPROGRESS - The request is processing. FAILED - The request + // did not run to completion. The ML model isn't usable. COMPLETED - The request + // completed successfully. DELETED - The MLModel is marked as deleted. It isn't + // usable. + Status *string `type:"string" enum:"EntityStatus"` + + // The ID of the training DataSource. + TrainingDataSourceId *string `min:"1" type:"string"` + + // A list of the training parameters in the MLModel. The list is implemented + // as a map of key-value pairs. + // + // The following is the current set of training parameters: + // + // sgd.maxMLModelSizeInBytes - The maximum allowed size of the model. Depending + // on the input data, the size of the model might affect its performance. + // + // The value is an integer that ranges from 100000 to 2147483648. The default + // value is 33554432. + // + // sgd.maxPasses - The number of times that the training process traverses + // the observations to build the MLModel. The value is an integer that ranges + // from 1 to 10000. The default value is 10. + // + // sgd.shuffleType - Whether Amazon ML shuffles the training data. Shuffling + // data improves a model's ability to find the optimal solution for a variety + // of data types. The valid values are auto and none. The default value is none. + // We strongly recommend that you shuffle your data. + // + // sgd.l1RegularizationAmount - The coefficient regularization L1 norm. It + // controls overfitting the data by penalizing large coefficients. This tends + // to drive coefficients to zero, resulting in a sparse feature set. If you + // use this parameter, start by specifying a small value, such as 1.0E-08. + // + // The value is a double that ranges from 0 to MAX_DOUBLE. The default is to + // not use L1 normalization. This parameter can't be used when L2 is specified. + // Use this parameter sparingly. + // + // sgd.l2RegularizationAmount - The coefficient regularization L2 norm. It + // controls overfitting the data by penalizing large coefficients. This tends + // to drive coefficients to small, nonzero values. If you use this parameter, + // start by specifying a small value, such as 1.0E-08. + // + // The value is a double that ranges from 0 to MAX_DOUBLE. The default is to + // not use L2 normalization. This parameter can't be used when L1 is specified. + // Use this parameter sparingly. + TrainingParameters map[string]*string `type:"map"` +} + +// String returns the string representation +func (s GetMLModelOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetMLModelOutput) GoString() string { + return s.String() +} + +// Represents the output of a GetMLModel operation. +// +// The content consists of the detailed metadata and the current status of +// the MLModel. +type MLModel struct { + _ struct{} `type:"structure"` + + // The algorithm used to train the MLModel. The following algorithm is supported: + // + // SGD -- Stochastic gradient descent. The goal of SGD is to minimize the + // gradient of the loss function. + Algorithm *string `type:"string" enum:"Algorithm"` + + // The time that the MLModel was created. The time is expressed in epoch time. + CreatedAt *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The AWS user account from which the MLModel was created. The account type + // can be either an AWS root account or an AWS Identity and Access Management + // (IAM) user account. + CreatedByIamUser *string `type:"string"` + + // The current endpoint of the MLModel. + EndpointInfo *RealtimeEndpointInfo `type:"structure"` + + // The location of the data file or directory in Amazon Simple Storage Service + // (Amazon S3). + InputDataLocationS3 *string `type:"string"` + + // The time of the most recent edit to the MLModel. The time is expressed in + // epoch time. + LastUpdatedAt *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The ID assigned to the MLModel at creation. + MLModelId *string `min:"1" type:"string"` + + // Identifies the MLModel category. The following are the available types: + // + // REGRESSION - Produces a numeric result. For example, "What price should + // a house be listed at?" BINARY - Produces one of two possible results. For + // example, "Is this a child-friendly web site?". MULTICLASS - Produces one + // of several possible results. For example, "Is this a HIGH-, LOW-, or MEDIUM-risk + // trade?". + MLModelType *string `type:"string" enum:"MLModelType"` + + // A description of the most recent details about accessing the MLModel. + Message *string `type:"string"` + + // A user-supplied name or description of the MLModel. + Name *string `type:"string"` + + ScoreThreshold *float64 `type:"float"` + + // The time of the most recent edit to the ScoreThreshold. The time is expressed + // in epoch time. + ScoreThresholdLastUpdatedAt *time.Time `type:"timestamp" timestampFormat:"unix"` + + // Long integer type that is a 64-bit signed number. + SizeInBytes *int64 `type:"long"` + + // The current status of an MLModel. This element can have one of the following + // values: + // + // PENDING - Amazon Machine Learning (Amazon ML) submitted a request to create + // an MLModel. INPROGRESS - The creation process is underway. FAILED - The + // request to create an MLModel didn't run to completion. The model isn't usable. + // COMPLETED - The creation process completed successfully. DELETED - The + // MLModel is marked as deleted. It isn't usable. + Status *string `type:"string" enum:"EntityStatus"` + + // The ID of the training DataSource. The CreateMLModel operation uses the TrainingDataSourceId. + TrainingDataSourceId *string `min:"1" type:"string"` + + // A list of the training parameters in the MLModel. The list is implemented + // as a map of key-value pairs. + // + // The following is the current set of training parameters: + // + // sgd.maxMLModelSizeInBytes - The maximum allowed size of the model. Depending + // on the input data, the size of the model might affect its performance. + // + // The value is an integer that ranges from 100000 to 2147483648. The default + // value is 33554432. + // + // sgd.maxPasses - The number of times that the training process traverses + // the observations to build the MLModel. The value is an integer that ranges + // from 1 to 10000. The default value is 10. + // + // sgd.shuffleType - Whether Amazon ML shuffles the training data. Shuffling + // the data improves a model's ability to find the optimal solution for a variety + // of data types. The valid values are auto and none. The default value is none. + // + // sgd.l1RegularizationAmount - The coefficient regularization L1 norm, which + // controls overfitting the data by penalizing large coefficients. This parameter + // tends to drive coefficients to zero, resulting in sparse feature set. If + // you use this parameter, start by specifying a small value, such as 1.0E-08. + // + // The value is a double that ranges from 0 to MAX_DOUBLE. The default is to + // not use L1 normalization. This parameter can't be used when L2 is specified. + // Use this parameter sparingly. + // + // sgd.l2RegularizationAmount - The coefficient regularization L2 norm, which + // controls overfitting the data by penalizing large coefficients. This tends + // to drive coefficients to small, nonzero values. If you use this parameter, + // start by specifying a small value, such as 1.0E-08. + // + // The value is a double that ranges from 0 to MAX_DOUBLE. The default is to + // not use L2 normalization. This parameter can't be used when L1 is specified. + // Use this parameter sparingly. + TrainingParameters map[string]*string `type:"map"` +} + +// String returns the string representation +func (s MLModel) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MLModel) GoString() string { + return s.String() +} + +// Measurements of how well the MLModel performed on known observations. One +// of the following metrics is returned, based on the type of the MLModel: +// +// BinaryAUC: The binary MLModel uses the Area Under the Curve (AUC) technique +// to measure performance. +// +// RegressionRMSE: The regression MLModel uses the Root Mean Square Error +// (RMSE) technique to measure performance. RMSE measures the difference between +// predicted and actual values for a single variable. +// +// MulticlassAvgFScore: The multiclass MLModel uses the F1 score technique +// to measure performance. +// +// For more information about performance metrics, please see the Amazon +// Machine Learning Developer Guide (http://docs.aws.amazon.com/machine-learning/latest/dg). +type PerformanceMetrics struct { + _ struct{} `type:"structure"` + + Properties map[string]*string `type:"map"` +} + +// String returns the string representation +func (s PerformanceMetrics) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PerformanceMetrics) GoString() string { + return s.String() +} + +type PredictInput struct { + _ struct{} `type:"structure"` + + // A unique identifier of the MLModel. + MLModelId *string `min:"1" type:"string" required:"true"` + + PredictEndpoint *string `type:"string" required:"true"` + + // A map of variable name-value pairs that represent an observation. + Record map[string]*string `type:"map" required:"true"` +} + +// String returns the string representation +func (s PredictInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PredictInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PredictInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PredictInput"} + if s.MLModelId == nil { + invalidParams.Add(request.NewErrParamRequired("MLModelId")) + } + if s.MLModelId != nil && len(*s.MLModelId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("MLModelId", 1)) + } + if s.PredictEndpoint == nil { + invalidParams.Add(request.NewErrParamRequired("PredictEndpoint")) + } + if s.Record == nil { + invalidParams.Add(request.NewErrParamRequired("Record")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type PredictOutput struct { + _ struct{} `type:"structure"` + + // The output from a Predict operation: + // + // Details - Contains the following attributes: DetailsAttributes.PREDICTIVE_MODEL_TYPE + // - REGRESSION | BINARY | MULTICLASS DetailsAttributes.ALGORITHM - SGD + // + // PredictedLabel - Present for either a BINARY or MULTICLASS MLModel request. + // + // PredictedScores - Contains the raw classification score corresponding + // to each label. + // + // PredictedValue - Present for a REGRESSION MLModel request. + Prediction *Prediction `type:"structure"` +} + +// String returns the string representation +func (s PredictOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PredictOutput) GoString() string { + return s.String() +} + +// The output from a Predict operation: +// +// Details - Contains the following attributes: DetailsAttributes.PREDICTIVE_MODEL_TYPE +// - REGRESSION | BINARY | MULTICLASS DetailsAttributes.ALGORITHM - SGD +// +// PredictedLabel - Present for either a BINARY or MULTICLASS MLModel request. +// +// PredictedScores - Contains the raw classification score corresponding +// to each label. +// +// PredictedValue - Present for a REGRESSION MLModel request. +type Prediction struct { + _ struct{} `type:"structure"` + + // Provides any additional details regarding the prediction. + Details map[string]*string `locationName:"details" type:"map"` + + // The prediction label for either a BINARY or MULTICLASS MLModel. + PredictedLabel *string `locationName:"predictedLabel" min:"1" type:"string"` + + // Provides the raw classification score corresponding to each label. + PredictedScores map[string]*float64 `locationName:"predictedScores" type:"map"` + + // The prediction value for REGRESSION MLModel. + PredictedValue *float64 `locationName:"predictedValue" type:"float"` +} + +// String returns the string representation +func (s Prediction) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Prediction) GoString() string { + return s.String() +} + +// The data specification of an Amazon Relational Database Service (Amazon RDS) +// DataSource. +type RDSDataSpec struct { + _ struct{} `type:"structure"` + + // A JSON string that represents the splitting and rearrangement processing + // to be applied to a DataSource. If the DataRearrangement parameter is not + // provided, all of the input data is used to create the Datasource. + // + // There are multiple parameters that control what data is used to create a + // datasource: + // + // percentBegin + // + // Use percentBegin to indicate the beginning of the range of the data used + // to create the Datasource. If you do not include percentBegin and percentEnd, + // Amazon ML includes all of the data when creating the datasource. + // + // percentEnd + // + // Use percentEnd to indicate the end of the range of the data used to create + // the Datasource. If you do not include percentBegin and percentEnd, Amazon + // ML includes all of the data when creating the datasource. + // + // complement + // + // The complement parameter instructs Amazon ML to use the data that is not + // included in the range of percentBegin to percentEnd to create a datasource. + // The complement parameter is useful if you need to create complementary datasources + // for training and evaluation. To create a complementary datasource, use the + // same values for percentBegin and percentEnd, along with the complement parameter. + // + // For example, the following two datasources do not share any data, and can + // be used to train and evaluate a model. The first datasource has 25 percent + // of the data, and the second one has 75 percent of the data. + // + // Datasource for evaluation: {"splitting":{"percentBegin":0, "percentEnd":25}} + // + // Datasource for training: {"splitting":{"percentBegin":0, "percentEnd":25, + // "complement":"true"}} + // + // strategy + // + // To change how Amazon ML splits the data for a datasource, use the strategy + // parameter. + // + // The default value for the strategy parameter is sequential, meaning that + // Amazon ML takes all of the data records between the percentBegin and percentEnd + // parameters for the datasource, in the order that the records appear in the + // input data. + // + // The following two DataRearrangement lines are examples of sequentially ordered + // training and evaluation datasources: + // + // Datasource for evaluation: {"splitting":{"percentBegin":70, "percentEnd":100, + // "strategy":"sequential"}} + // + // Datasource for training: {"splitting":{"percentBegin":70, "percentEnd":100, + // "strategy":"sequential", "complement":"true"}} + // + // To randomly split the input data into the proportions indicated by the percentBegin + // and percentEnd parameters, set the strategy parameter to random and provide + // a string that is used as the seed value for the random data splitting (for + // example, you can use the S3 path to your data as the random seed string). + // If you choose the random split strategy, Amazon ML assigns each row of data + // a pseudo-random number between 0 and 100, and then selects the rows that + // have an assigned number between percentBegin and percentEnd. Pseudo-random + // numbers are assigned using both the input seed string value and the byte + // offset as a seed, so changing the data results in a different split. Any + // existing ordering is preserved. The random splitting strategy ensures that + // variables in the training and evaluation data are distributed similarly. + // It is useful in the cases where the input data may have an implicit sort + // order, which would otherwise result in training and evaluation datasources + // containing non-similar data records. + // + // The following two DataRearrangement lines are examples of non-sequentially + // ordered training and evaluation datasources: + // + // Datasource for evaluation: {"splitting":{"percentBegin":70, "percentEnd":100, + // "strategy":"random", "randomSeed"="s3://my_s3_path/bucket/file.csv"}} + // + // Datasource for training: {"splitting":{"percentBegin":70, "percentEnd":100, + // "strategy":"random", "randomSeed"="s3://my_s3_path/bucket/file.csv", "complement":"true"}} + DataRearrangement *string `type:"string"` + + // A JSON string that represents the schema for an Amazon RDS DataSource. The + // DataSchema defines the structure of the observation data in the data file(s) + // referenced in the DataSource. + // + // A DataSchema is not required if you specify a DataSchemaUri + // + // Define your DataSchema as a series of key-value pairs. attributes and excludedVariableNames + // have an array of key-value pairs for their value. Use the following format + // to define your DataSchema. + // + // { "version": "1.0", + // + // "recordAnnotationFieldName": "F1", + // + // "recordWeightFieldName": "F2", + // + // "targetFieldName": "F3", + // + // "dataFormat": "CSV", + // + // "dataFileContainsHeader": true, + // + // "attributes": [ + // + // { "fieldName": "F1", "fieldType": "TEXT" }, { "fieldName": "F2", "fieldType": + // "NUMERIC" }, { "fieldName": "F3", "fieldType": "CATEGORICAL" }, { "fieldName": + // "F4", "fieldType": "NUMERIC" }, { "fieldName": "F5", "fieldType": "CATEGORICAL" + // }, { "fieldName": "F6", "fieldType": "TEXT" }, { "fieldName": "F7", "fieldType": + // "WEIGHTED_INT_SEQUENCE" }, { "fieldName": "F8", "fieldType": "WEIGHTED_STRING_SEQUENCE" + // } ], + // + // "excludedVariableNames": [ "F6" ] } + DataSchema *string `type:"string"` + + // The Amazon S3 location of the DataSchema. + DataSchemaUri *string `type:"string"` + + // The AWS Identity and Access Management (IAM) credentials that are used connect + // to the Amazon RDS database. + DatabaseCredentials *RDSDatabaseCredentials `type:"structure" required:"true"` + + // Describes the DatabaseName and InstanceIdentifier of an an Amazon RDS database. + DatabaseInformation *RDSDatabase `type:"structure" required:"true"` + + // The role (DataPipelineDefaultResourceRole) assumed by an Amazon Elastic Compute + // Cloud (Amazon EC2) instance to carry out the copy operation from Amazon RDS + // to an Amazon S3 task. For more information, see Role templates (http://docs.aws.amazon.com/datapipeline/latest/DeveloperGuide/dp-iam-roles.html) + // for data pipelines. + ResourceRole *string `min:"1" type:"string" required:"true"` + + // The Amazon S3 location for staging Amazon RDS data. The data retrieved from + // Amazon RDS using SelectSqlQuery is stored in this location. + S3StagingLocation *string `type:"string" required:"true"` + + // The security group IDs to be used to access a VPC-based RDS DB instance. + // Ensure that there are appropriate ingress rules set up to allow access to + // the RDS DB instance. This attribute is used by Data Pipeline to carry out + // the copy operation from Amazon RDS to an Amazon S3 task. + SecurityGroupIds []*string `type:"list" required:"true"` + + // The query that is used to retrieve the observation data for the DataSource. + SelectSqlQuery *string `min:"1" type:"string" required:"true"` + + // The role (DataPipelineDefaultRole) assumed by AWS Data Pipeline service to + // monitor the progress of the copy task from Amazon RDS to Amazon S3. For more + // information, see Role templates (http://docs.aws.amazon.com/datapipeline/latest/DeveloperGuide/dp-iam-roles.html) + // for data pipelines. + ServiceRole *string `min:"1" type:"string" required:"true"` + + // The subnet ID to be used to access a VPC-based RDS DB instance. This attribute + // is used by Data Pipeline to carry out the copy task from Amazon RDS to Amazon + // S3. + SubnetId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s RDSDataSpec) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RDSDataSpec) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RDSDataSpec) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RDSDataSpec"} + if s.DatabaseCredentials == nil { + invalidParams.Add(request.NewErrParamRequired("DatabaseCredentials")) + } + if s.DatabaseInformation == nil { + invalidParams.Add(request.NewErrParamRequired("DatabaseInformation")) + } + if s.ResourceRole == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceRole")) + } + if s.ResourceRole != nil && len(*s.ResourceRole) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceRole", 1)) + } + if s.S3StagingLocation == nil { + invalidParams.Add(request.NewErrParamRequired("S3StagingLocation")) + } + if s.SecurityGroupIds == nil { + invalidParams.Add(request.NewErrParamRequired("SecurityGroupIds")) + } + if s.SelectSqlQuery == nil { + invalidParams.Add(request.NewErrParamRequired("SelectSqlQuery")) + } + if s.SelectSqlQuery != nil && len(*s.SelectSqlQuery) < 1 { + invalidParams.Add(request.NewErrParamMinLen("SelectSqlQuery", 1)) + } + if s.ServiceRole == nil { + invalidParams.Add(request.NewErrParamRequired("ServiceRole")) + } + if s.ServiceRole != nil && len(*s.ServiceRole) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ServiceRole", 1)) + } + if s.SubnetId == nil { + invalidParams.Add(request.NewErrParamRequired("SubnetId")) + } + if s.SubnetId != nil && len(*s.SubnetId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("SubnetId", 1)) + } + if s.DatabaseCredentials != nil { + if err := s.DatabaseCredentials.Validate(); err != nil { + invalidParams.AddNested("DatabaseCredentials", err.(request.ErrInvalidParams)) + } + } + if s.DatabaseInformation != nil { + if err := s.DatabaseInformation.Validate(); err != nil { + invalidParams.AddNested("DatabaseInformation", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The database details of an Amazon RDS database. +type RDSDatabase struct { + _ struct{} `type:"structure"` + + // The name of a database hosted on an RDS DB instance. + DatabaseName *string `min:"1" type:"string" required:"true"` + + // The ID of an RDS DB instance. + InstanceIdentifier *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s RDSDatabase) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RDSDatabase) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RDSDatabase) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RDSDatabase"} + if s.DatabaseName == nil { + invalidParams.Add(request.NewErrParamRequired("DatabaseName")) + } + if s.DatabaseName != nil && len(*s.DatabaseName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DatabaseName", 1)) + } + if s.InstanceIdentifier == nil { + invalidParams.Add(request.NewErrParamRequired("InstanceIdentifier")) + } + if s.InstanceIdentifier != nil && len(*s.InstanceIdentifier) < 1 { + invalidParams.Add(request.NewErrParamMinLen("InstanceIdentifier", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The database credentials to connect to a database on an RDS DB instance. +type RDSDatabaseCredentials struct { + _ struct{} `type:"structure"` + + // The password to be used by Amazon ML to connect to a database on an RDS DB + // instance. The password should have sufficient permissions to execute the + // RDSSelectQuery query. + Password *string `min:"8" type:"string" required:"true"` + + // The username to be used by Amazon ML to connect to database on an Amazon + // RDS instance. The username should have sufficient permissions to execute + // an RDSSelectSqlQuery query. + Username *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s RDSDatabaseCredentials) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RDSDatabaseCredentials) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RDSDatabaseCredentials) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RDSDatabaseCredentials"} + if s.Password == nil { + invalidParams.Add(request.NewErrParamRequired("Password")) + } + if s.Password != nil && len(*s.Password) < 8 { + invalidParams.Add(request.NewErrParamMinLen("Password", 8)) + } + if s.Username == nil { + invalidParams.Add(request.NewErrParamRequired("Username")) + } + if s.Username != nil && len(*s.Username) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Username", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The datasource details that are specific to Amazon RDS. +type RDSMetadata struct { + _ struct{} `type:"structure"` + + // The ID of the Data Pipeline instance that is used to carry to copy data from + // Amazon RDS to Amazon S3. You can use the ID to find details about the instance + // in the Data Pipeline console. + DataPipelineId *string `min:"1" type:"string"` + + // The database details required to connect to an Amazon RDS. + Database *RDSDatabase `type:"structure"` + + // The username to be used by Amazon ML to connect to database on an Amazon + // RDS instance. The username should have sufficient permissions to execute + // an RDSSelectSqlQuery query. + DatabaseUserName *string `min:"1" type:"string"` + + // The role (DataPipelineDefaultResourceRole) assumed by an Amazon EC2 instance + // to carry out the copy task from Amazon RDS to Amazon S3. For more information, + // see Role templates (http://docs.aws.amazon.com/datapipeline/latest/DeveloperGuide/dp-iam-roles.html) + // for data pipelines. + ResourceRole *string `min:"1" type:"string"` + + // The SQL query that is supplied during CreateDataSourceFromRDS. Returns only + // if Verbose is true in GetDataSourceInput. + SelectSqlQuery *string `min:"1" type:"string"` + + // The role (DataPipelineDefaultRole) assumed by the Data Pipeline service to + // monitor the progress of the copy task from Amazon RDS to Amazon S3. For more + // information, see Role templates (http://docs.aws.amazon.com/datapipeline/latest/DeveloperGuide/dp-iam-roles.html) + // for data pipelines. + ServiceRole *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s RDSMetadata) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RDSMetadata) GoString() string { + return s.String() +} + +// Describes the real-time endpoint information for an MLModel. +type RealtimeEndpointInfo struct { + _ struct{} `type:"structure"` + + // The time that the request to create the real-time endpoint for the MLModel + // was received. The time is expressed in epoch time. + CreatedAt *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The current status of the real-time endpoint for the MLModel. This element + // can have one of the following values: + // + // NONE - Endpoint does not exist or was previously deleted. READY - Endpoint + // is ready to be used for real-time predictions. UPDATING - Updating/creating + // the endpoint. + EndpointStatus *string `type:"string" enum:"RealtimeEndpointStatus"` + + // The URI that specifies where to send real-time prediction requests for the + // MLModel. + // + // Note The application must wait until the real-time endpoint is ready before + // using this URI. + EndpointUrl *string `type:"string"` + + // The maximum processing rate for the real-time endpoint for MLModel, measured + // in incoming requests per second. + PeakRequestsPerSecond *int64 `type:"integer"` +} + +// String returns the string representation +func (s RealtimeEndpointInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RealtimeEndpointInfo) GoString() string { + return s.String() +} + +// Describes the data specification of an Amazon Redshift DataSource. +type RedshiftDataSpec struct { + _ struct{} `type:"structure"` + + // A JSON string that represents the splitting and rearrangement processing + // to be applied to a DataSource. If the DataRearrangement parameter is not + // provided, all of the input data is used to create the Datasource. + // + // There are multiple parameters that control what data is used to create a + // datasource: + // + // percentBegin + // + // Use percentBegin to indicate the beginning of the range of the data used + // to create the Datasource. If you do not include percentBegin and percentEnd, + // Amazon ML includes all of the data when creating the datasource. + // + // percentEnd + // + // Use percentEnd to indicate the end of the range of the data used to create + // the Datasource. If you do not include percentBegin and percentEnd, Amazon + // ML includes all of the data when creating the datasource. + // + // complement + // + // The complement parameter instructs Amazon ML to use the data that is not + // included in the range of percentBegin to percentEnd to create a datasource. + // The complement parameter is useful if you need to create complementary datasources + // for training and evaluation. To create a complementary datasource, use the + // same values for percentBegin and percentEnd, along with the complement parameter. + // + // For example, the following two datasources do not share any data, and can + // be used to train and evaluate a model. The first datasource has 25 percent + // of the data, and the second one has 75 percent of the data. + // + // Datasource for evaluation: {"splitting":{"percentBegin":0, "percentEnd":25}} + // + // Datasource for training: {"splitting":{"percentBegin":0, "percentEnd":25, + // "complement":"true"}} + // + // strategy + // + // To change how Amazon ML splits the data for a datasource, use the strategy + // parameter. + // + // The default value for the strategy parameter is sequential, meaning that + // Amazon ML takes all of the data records between the percentBegin and percentEnd + // parameters for the datasource, in the order that the records appear in the + // input data. + // + // The following two DataRearrangement lines are examples of sequentially ordered + // training and evaluation datasources: + // + // Datasource for evaluation: {"splitting":{"percentBegin":70, "percentEnd":100, + // "strategy":"sequential"}} + // + // Datasource for training: {"splitting":{"percentBegin":70, "percentEnd":100, + // "strategy":"sequential", "complement":"true"}} + // + // To randomly split the input data into the proportions indicated by the percentBegin + // and percentEnd parameters, set the strategy parameter to random and provide + // a string that is used as the seed value for the random data splitting (for + // example, you can use the S3 path to your data as the random seed string). + // If you choose the random split strategy, Amazon ML assigns each row of data + // a pseudo-random number between 0 and 100, and then selects the rows that + // have an assigned number between percentBegin and percentEnd. Pseudo-random + // numbers are assigned using both the input seed string value and the byte + // offset as a seed, so changing the data results in a different split. Any + // existing ordering is preserved. The random splitting strategy ensures that + // variables in the training and evaluation data are distributed similarly. + // It is useful in the cases where the input data may have an implicit sort + // order, which would otherwise result in training and evaluation datasources + // containing non-similar data records. + // + // The following two DataRearrangement lines are examples of non-sequentially + // ordered training and evaluation datasources: + // + // Datasource for evaluation: {"splitting":{"percentBegin":70, "percentEnd":100, + // "strategy":"random", "randomSeed"="s3://my_s3_path/bucket/file.csv"}} + // + // Datasource for training: {"splitting":{"percentBegin":70, "percentEnd":100, + // "strategy":"random", "randomSeed"="s3://my_s3_path/bucket/file.csv", "complement":"true"}} + DataRearrangement *string `type:"string"` + + // A JSON string that represents the schema for an Amazon Redshift DataSource. + // The DataSchema defines the structure of the observation data in the data + // file(s) referenced in the DataSource. + // + // A DataSchema is not required if you specify a DataSchemaUri. + // + // Define your DataSchema as a series of key-value pairs. attributes and excludedVariableNames + // have an array of key-value pairs for their value. Use the following format + // to define your DataSchema. + // + // { "version": "1.0", + // + // "recordAnnotationFieldName": "F1", + // + // "recordWeightFieldName": "F2", + // + // "targetFieldName": "F3", + // + // "dataFormat": "CSV", + // + // "dataFileContainsHeader": true, + // + // "attributes": [ + // + // { "fieldName": "F1", "fieldType": "TEXT" }, { "fieldName": "F2", "fieldType": + // "NUMERIC" }, { "fieldName": "F3", "fieldType": "CATEGORICAL" }, { "fieldName": + // "F4", "fieldType": "NUMERIC" }, { "fieldName": "F5", "fieldType": "CATEGORICAL" + // }, { "fieldName": "F6", "fieldType": "TEXT" }, { "fieldName": "F7", "fieldType": + // "WEIGHTED_INT_SEQUENCE" }, { "fieldName": "F8", "fieldType": "WEIGHTED_STRING_SEQUENCE" + // } ], + // + // "excludedVariableNames": [ "F6" ] } + DataSchema *string `type:"string"` + + // Describes the schema location for an Amazon Redshift DataSource. + DataSchemaUri *string `type:"string"` + + // Describes AWS Identity and Access Management (IAM) credentials that are used + // connect to the Amazon Redshift database. + DatabaseCredentials *RedshiftDatabaseCredentials `type:"structure" required:"true"` + + // Describes the DatabaseName and ClusterIdentifier for an Amazon Redshift DataSource. + DatabaseInformation *RedshiftDatabase `type:"structure" required:"true"` + + // Describes an Amazon S3 location to store the result set of the SelectSqlQuery + // query. + S3StagingLocation *string `type:"string" required:"true"` + + // Describes the SQL Query to execute on an Amazon Redshift database for an + // Amazon Redshift DataSource. + SelectSqlQuery *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s RedshiftDataSpec) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RedshiftDataSpec) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RedshiftDataSpec) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RedshiftDataSpec"} + if s.DatabaseCredentials == nil { + invalidParams.Add(request.NewErrParamRequired("DatabaseCredentials")) + } + if s.DatabaseInformation == nil { + invalidParams.Add(request.NewErrParamRequired("DatabaseInformation")) + } + if s.S3StagingLocation == nil { + invalidParams.Add(request.NewErrParamRequired("S3StagingLocation")) + } + if s.SelectSqlQuery == nil { + invalidParams.Add(request.NewErrParamRequired("SelectSqlQuery")) + } + if s.SelectSqlQuery != nil && len(*s.SelectSqlQuery) < 1 { + invalidParams.Add(request.NewErrParamMinLen("SelectSqlQuery", 1)) + } + if s.DatabaseCredentials != nil { + if err := s.DatabaseCredentials.Validate(); err != nil { + invalidParams.AddNested("DatabaseCredentials", err.(request.ErrInvalidParams)) + } + } + if s.DatabaseInformation != nil { + if err := s.DatabaseInformation.Validate(); err != nil { + invalidParams.AddNested("DatabaseInformation", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Describes the database details required to connect to an Amazon Redshift +// database. +type RedshiftDatabase struct { + _ struct{} `type:"structure"` + + // The ID of an Amazon Redshift cluster. + ClusterIdentifier *string `min:"1" type:"string" required:"true"` + + // The name of a database hosted on an Amazon Redshift cluster. + DatabaseName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s RedshiftDatabase) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RedshiftDatabase) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RedshiftDatabase) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RedshiftDatabase"} + if s.ClusterIdentifier == nil { + invalidParams.Add(request.NewErrParamRequired("ClusterIdentifier")) + } + if s.ClusterIdentifier != nil && len(*s.ClusterIdentifier) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ClusterIdentifier", 1)) + } + if s.DatabaseName == nil { + invalidParams.Add(request.NewErrParamRequired("DatabaseName")) + } + if s.DatabaseName != nil && len(*s.DatabaseName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DatabaseName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Describes the database credentials for connecting to a database on an Amazon +// Redshift cluster. +type RedshiftDatabaseCredentials struct { + _ struct{} `type:"structure"` + + // A password to be used by Amazon ML to connect to a database on an Amazon + // Redshift cluster. The password should have sufficient permissions to execute + // a RedshiftSelectSqlQuery query. The password should be valid for an Amazon + // Redshift USER (http://docs.aws.amazon.com/redshift/latest/dg/r_CREATE_USER.html). + Password *string `min:"8" type:"string" required:"true"` + + // A username to be used by Amazon Machine Learning (Amazon ML)to connect to + // a database on an Amazon Redshift cluster. The username should have sufficient + // permissions to execute the RedshiftSelectSqlQuery query. The username should + // be valid for an Amazon Redshift USER (http://docs.aws.amazon.com/redshift/latest/dg/r_CREATE_USER.html). + Username *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s RedshiftDatabaseCredentials) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RedshiftDatabaseCredentials) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RedshiftDatabaseCredentials) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RedshiftDatabaseCredentials"} + if s.Password == nil { + invalidParams.Add(request.NewErrParamRequired("Password")) + } + if s.Password != nil && len(*s.Password) < 8 { + invalidParams.Add(request.NewErrParamMinLen("Password", 8)) + } + if s.Username == nil { + invalidParams.Add(request.NewErrParamRequired("Username")) + } + if s.Username != nil && len(*s.Username) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Username", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Describes the DataSource details specific to Amazon Redshift. +type RedshiftMetadata struct { + _ struct{} `type:"structure"` + + // A username to be used by Amazon Machine Learning (Amazon ML)to connect to + // a database on an Amazon Redshift cluster. The username should have sufficient + // permissions to execute the RedshiftSelectSqlQuery query. The username should + // be valid for an Amazon Redshift USER (http://docs.aws.amazon.com/redshift/latest/dg/r_CREATE_USER.html). + DatabaseUserName *string `min:"1" type:"string"` + + // Describes the database details required to connect to an Amazon Redshift + // database. + RedshiftDatabase *RedshiftDatabase `type:"structure"` + + // The SQL query that is specified during CreateDataSourceFromRedshift. Returns + // only if Verbose is true in GetDataSourceInput. + SelectSqlQuery *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s RedshiftMetadata) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RedshiftMetadata) GoString() string { + return s.String() +} + +// Describes the data specification of a DataSource. +type S3DataSpec struct { + _ struct{} `type:"structure"` + + // The location of the data file(s) used by a DataSource. The URI specifies + // a data file or an Amazon Simple Storage Service (Amazon S3) directory or + // bucket containing data files. + DataLocationS3 *string `type:"string" required:"true"` + + // A JSON string that represents the splitting and rearrangement processing + // to be applied to a DataSource. If the DataRearrangement parameter is not + // provided, all of the input data is used to create the Datasource. + // + // There are multiple parameters that control what data is used to create a + // datasource: + // + // percentBegin + // + // Use percentBegin to indicate the beginning of the range of the data used + // to create the Datasource. If you do not include percentBegin and percentEnd, + // Amazon ML includes all of the data when creating the datasource. + // + // percentEnd + // + // Use percentEnd to indicate the end of the range of the data used to create + // the Datasource. If you do not include percentBegin and percentEnd, Amazon + // ML includes all of the data when creating the datasource. + // + // complement + // + // The complement parameter instructs Amazon ML to use the data that is not + // included in the range of percentBegin to percentEnd to create a datasource. + // The complement parameter is useful if you need to create complementary datasources + // for training and evaluation. To create a complementary datasource, use the + // same values for percentBegin and percentEnd, along with the complement parameter. + // + // For example, the following two datasources do not share any data, and can + // be used to train and evaluate a model. The first datasource has 25 percent + // of the data, and the second one has 75 percent of the data. + // + // Datasource for evaluation: {"splitting":{"percentBegin":0, "percentEnd":25}} + // + // Datasource for training: {"splitting":{"percentBegin":0, "percentEnd":25, + // "complement":"true"}} + // + // strategy + // + // To change how Amazon ML splits the data for a datasource, use the strategy + // parameter. + // + // The default value for the strategy parameter is sequential, meaning that + // Amazon ML takes all of the data records between the percentBegin and percentEnd + // parameters for the datasource, in the order that the records appear in the + // input data. + // + // The following two DataRearrangement lines are examples of sequentially ordered + // training and evaluation datasources: + // + // Datasource for evaluation: {"splitting":{"percentBegin":70, "percentEnd":100, + // "strategy":"sequential"}} + // + // Datasource for training: {"splitting":{"percentBegin":70, "percentEnd":100, + // "strategy":"sequential", "complement":"true"}} + // + // To randomly split the input data into the proportions indicated by the percentBegin + // and percentEnd parameters, set the strategy parameter to random and provide + // a string that is used as the seed value for the random data splitting (for + // example, you can use the S3 path to your data as the random seed string). + // If you choose the random split strategy, Amazon ML assigns each row of data + // a pseudo-random number between 0 and 100, and then selects the rows that + // have an assigned number between percentBegin and percentEnd. Pseudo-random + // numbers are assigned using both the input seed string value and the byte + // offset as a seed, so changing the data results in a different split. Any + // existing ordering is preserved. The random splitting strategy ensures that + // variables in the training and evaluation data are distributed similarly. + // It is useful in the cases where the input data may have an implicit sort + // order, which would otherwise result in training and evaluation datasources + // containing non-similar data records. + // + // The following two DataRearrangement lines are examples of non-sequentially + // ordered training and evaluation datasources: + // + // Datasource for evaluation: {"splitting":{"percentBegin":70, "percentEnd":100, + // "strategy":"random", "randomSeed"="s3://my_s3_path/bucket/file.csv"}} + // + // Datasource for training: {"splitting":{"percentBegin":70, "percentEnd":100, + // "strategy":"random", "randomSeed"="s3://my_s3_path/bucket/file.csv", "complement":"true"}} + DataRearrangement *string `type:"string"` + + // A JSON string that represents the schema for an Amazon S3 DataSource. The + // DataSchema defines the structure of the observation data in the data file(s) + // referenced in the DataSource. + // + // You must provide either the DataSchema or the DataSchemaLocationS3. + // + // Define your DataSchema as a series of key-value pairs. attributes and excludedVariableNames + // have an array of key-value pairs for their value. Use the following format + // to define your DataSchema. + // + // { "version": "1.0", + // + // "recordAnnotationFieldName": "F1", + // + // "recordWeightFieldName": "F2", + // + // "targetFieldName": "F3", + // + // "dataFormat": "CSV", + // + // "dataFileContainsHeader": true, + // + // "attributes": [ + // + // { "fieldName": "F1", "fieldType": "TEXT" }, { "fieldName": "F2", "fieldType": + // "NUMERIC" }, { "fieldName": "F3", "fieldType": "CATEGORICAL" }, { "fieldName": + // "F4", "fieldType": "NUMERIC" }, { "fieldName": "F5", "fieldType": "CATEGORICAL" + // }, { "fieldName": "F6", "fieldType": "TEXT" }, { "fieldName": "F7", "fieldType": + // "WEIGHTED_INT_SEQUENCE" }, { "fieldName": "F8", "fieldType": "WEIGHTED_STRING_SEQUENCE" + // } ], + // + // "excludedVariableNames": [ "F6" ] } + DataSchema *string `type:"string"` + + // Describes the schema location in Amazon S3. You must provide either the DataSchema + // or the DataSchemaLocationS3. + DataSchemaLocationS3 *string `type:"string"` +} + +// String returns the string representation +func (s S3DataSpec) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s S3DataSpec) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *S3DataSpec) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "S3DataSpec"} + if s.DataLocationS3 == nil { + invalidParams.Add(request.NewErrParamRequired("DataLocationS3")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// A custom key-value pair associated with an ML object, such as an ML model. +type Tag struct { + _ struct{} `type:"structure"` + + // A unique identifier for the tag. Valid characters include Unicode letters, + // digits, white space, _, ., /, =, +, -, %, and @. + Key *string `min:"1" type:"string"` + + // An optional string, typically used to describe or define the tag. Valid characters + // include Unicode letters, digits, white space, _, ., /, =, +, -, %, and @. + Value *string `type:"string"` +} + +// String returns the string representation +func (s Tag) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Tag) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Tag) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Tag"} + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type UpdateBatchPredictionInput struct { + _ struct{} `type:"structure"` + + // The ID assigned to the BatchPrediction during creation. + BatchPredictionId *string `min:"1" type:"string" required:"true"` + + // A new user-supplied name or description of the BatchPrediction. + BatchPredictionName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateBatchPredictionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateBatchPredictionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateBatchPredictionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateBatchPredictionInput"} + if s.BatchPredictionId == nil { + invalidParams.Add(request.NewErrParamRequired("BatchPredictionId")) + } + if s.BatchPredictionId != nil && len(*s.BatchPredictionId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("BatchPredictionId", 1)) + } + if s.BatchPredictionName == nil { + invalidParams.Add(request.NewErrParamRequired("BatchPredictionName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the output of an UpdateBatchPrediction operation. +// +// You can see the updated content by using the GetBatchPrediction operation. +type UpdateBatchPredictionOutput struct { + _ struct{} `type:"structure"` + + // The ID assigned to the BatchPrediction during creation. This value should + // be identical to the value of the BatchPredictionId in the request. + BatchPredictionId *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s UpdateBatchPredictionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateBatchPredictionOutput) GoString() string { + return s.String() +} + +type UpdateDataSourceInput struct { + _ struct{} `type:"structure"` + + // The ID assigned to the DataSource during creation. + DataSourceId *string `min:"1" type:"string" required:"true"` + + // A new user-supplied name or description of the DataSource that will replace + // the current description. + DataSourceName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateDataSourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateDataSourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateDataSourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateDataSourceInput"} + if s.DataSourceId == nil { + invalidParams.Add(request.NewErrParamRequired("DataSourceId")) + } + if s.DataSourceId != nil && len(*s.DataSourceId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DataSourceId", 1)) + } + if s.DataSourceName == nil { + invalidParams.Add(request.NewErrParamRequired("DataSourceName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the output of an UpdateDataSource operation. +// +// You can see the updated content by using the GetBatchPrediction operation. +type UpdateDataSourceOutput struct { + _ struct{} `type:"structure"` + + // The ID assigned to the DataSource during creation. This value should be identical + // to the value of the DataSourceID in the request. + DataSourceId *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s UpdateDataSourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateDataSourceOutput) GoString() string { + return s.String() +} + +type UpdateEvaluationInput struct { + _ struct{} `type:"structure"` + + // The ID assigned to the Evaluation during creation. + EvaluationId *string `min:"1" type:"string" required:"true"` + + // A new user-supplied name or description of the Evaluation that will replace + // the current content. + EvaluationName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateEvaluationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateEvaluationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateEvaluationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateEvaluationInput"} + if s.EvaluationId == nil { + invalidParams.Add(request.NewErrParamRequired("EvaluationId")) + } + if s.EvaluationId != nil && len(*s.EvaluationId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("EvaluationId", 1)) + } + if s.EvaluationName == nil { + invalidParams.Add(request.NewErrParamRequired("EvaluationName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the output of an UpdateEvaluation operation. +// +// You can see the updated content by using the GetEvaluation operation. +type UpdateEvaluationOutput struct { + _ struct{} `type:"structure"` + + // The ID assigned to the Evaluation during creation. This value should be identical + // to the value of the Evaluation in the request. + EvaluationId *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s UpdateEvaluationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateEvaluationOutput) GoString() string { + return s.String() +} + +type UpdateMLModelInput struct { + _ struct{} `type:"structure"` + + // The ID assigned to the MLModel during creation. + MLModelId *string `min:"1" type:"string" required:"true"` + + // A user-supplied name or description of the MLModel. + MLModelName *string `type:"string"` + + // The ScoreThreshold used in binary classification MLModel that marks the boundary + // between a positive prediction and a negative prediction. + // + // Output values greater than or equal to the ScoreThreshold receive a positive + // result from the MLModel, such as true. Output values less than the ScoreThreshold + // receive a negative response from the MLModel, such as false. + ScoreThreshold *float64 `type:"float"` +} + +// String returns the string representation +func (s UpdateMLModelInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateMLModelInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateMLModelInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateMLModelInput"} + if s.MLModelId == nil { + invalidParams.Add(request.NewErrParamRequired("MLModelId")) + } + if s.MLModelId != nil && len(*s.MLModelId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("MLModelId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the output of an UpdateMLModel operation. +// +// You can see the updated content by using the GetMLModel operation. +type UpdateMLModelOutput struct { + _ struct{} `type:"structure"` + + // The ID assigned to the MLModel during creation. This value should be identical + // to the value of the MLModelID in the request. + MLModelId *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s UpdateMLModelOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateMLModelOutput) GoString() string { + return s.String() +} + +// The function used to train an MLModel. Training choices supported by Amazon +// ML include the following: +// +// SGD - Stochastic Gradient Descent. RandomForest - Random forest of decision +// trees. +const ( + // @enum Algorithm + AlgorithmSgd = "sgd" +) + +// A list of the variables to use in searching or filtering BatchPrediction. +// +// CreatedAt - Sets the search criteria to BatchPrediction creation date. +// Status - Sets the search criteria to BatchPrediction status. Name - Sets +// the search criteria to the contents of BatchPrediction Name. IAMUser - +// Sets the search criteria to the user account that invoked the BatchPrediction +// creation. MLModelId - Sets the search criteria to the MLModel used in the +// BatchPrediction. DataSourceId - Sets the search criteria to the DataSource +// used in the BatchPrediction. DataURI - Sets the search criteria to the data +// file(s) used in the BatchPrediction. The URL can identify either a file or +// an Amazon Simple Storage Service (Amazon S3) bucket or directory. +const ( + // @enum BatchPredictionFilterVariable + BatchPredictionFilterVariableCreatedAt = "CreatedAt" + // @enum BatchPredictionFilterVariable + BatchPredictionFilterVariableLastUpdatedAt = "LastUpdatedAt" + // @enum BatchPredictionFilterVariable + BatchPredictionFilterVariableStatus = "Status" + // @enum BatchPredictionFilterVariable + BatchPredictionFilterVariableName = "Name" + // @enum BatchPredictionFilterVariable + BatchPredictionFilterVariableIamuser = "IAMUser" + // @enum BatchPredictionFilterVariable + BatchPredictionFilterVariableMlmodelId = "MLModelId" + // @enum BatchPredictionFilterVariable + BatchPredictionFilterVariableDataSourceId = "DataSourceId" + // @enum BatchPredictionFilterVariable + BatchPredictionFilterVariableDataUri = "DataURI" +) + +// A list of the variables to use in searching or filtering DataSource. +// +// CreatedAt - Sets the search criteria to DataSource creation date. Status +// - Sets the search criteria to DataSource status. Name - Sets the search +// criteria to the contents of DataSource Name. DataUri - Sets the search +// criteria to the URI of data files used to create the DataSource. The URI +// can identify either a file or an Amazon Simple Storage Service (Amazon S3) +// bucket or directory. IAMUser - Sets the search criteria to the user account +// that invoked the DataSource creation. Note The variable names should match +// the variable names in the DataSource. +const ( + // @enum DataSourceFilterVariable + DataSourceFilterVariableCreatedAt = "CreatedAt" + // @enum DataSourceFilterVariable + DataSourceFilterVariableLastUpdatedAt = "LastUpdatedAt" + // @enum DataSourceFilterVariable + DataSourceFilterVariableStatus = "Status" + // @enum DataSourceFilterVariable + DataSourceFilterVariableName = "Name" + // @enum DataSourceFilterVariable + DataSourceFilterVariableDataLocationS3 = "DataLocationS3" + // @enum DataSourceFilterVariable + DataSourceFilterVariableIamuser = "IAMUser" +) + +// Contains the key values of DetailsMap: PredictiveModelType - Indicates the +// type of the MLModel. Algorithm - Indicates the algorithm that was used for +// the MLModel. +const ( + // @enum DetailsAttributes + DetailsAttributesPredictiveModelType = "PredictiveModelType" + // @enum DetailsAttributes + DetailsAttributesAlgorithm = "Algorithm" +) + +// Object status with the following possible values: +// +// PENDING INPROGRESS FAILED COMPLETED DELETED +const ( + // @enum EntityStatus + EntityStatusPending = "PENDING" + // @enum EntityStatus + EntityStatusInprogress = "INPROGRESS" + // @enum EntityStatus + EntityStatusFailed = "FAILED" + // @enum EntityStatus + EntityStatusCompleted = "COMPLETED" + // @enum EntityStatus + EntityStatusDeleted = "DELETED" +) + +// A list of the variables to use in searching or filtering Evaluation. +// +// CreatedAt - Sets the search criteria to Evaluation creation date. Status +// - Sets the search criteria to Evaluation status. Name - Sets the search +// criteria to the contents of Evaluation Name. IAMUser - Sets the search +// criteria to the user account that invoked an evaluation. MLModelId - Sets +// the search criteria to the Predictor that was evaluated. DataSourceId - +// Sets the search criteria to the DataSource used in evaluation. DataUri - +// Sets the search criteria to the data file(s) used in evaluation. The URL +// can identify either a file or an Amazon Simple Storage Service (Amazon S3) +// bucket or directory. +const ( + // @enum EvaluationFilterVariable + EvaluationFilterVariableCreatedAt = "CreatedAt" + // @enum EvaluationFilterVariable + EvaluationFilterVariableLastUpdatedAt = "LastUpdatedAt" + // @enum EvaluationFilterVariable + EvaluationFilterVariableStatus = "Status" + // @enum EvaluationFilterVariable + EvaluationFilterVariableName = "Name" + // @enum EvaluationFilterVariable + EvaluationFilterVariableIamuser = "IAMUser" + // @enum EvaluationFilterVariable + EvaluationFilterVariableMlmodelId = "MLModelId" + // @enum EvaluationFilterVariable + EvaluationFilterVariableDataSourceId = "DataSourceId" + // @enum EvaluationFilterVariable + EvaluationFilterVariableDataUri = "DataURI" +) + +const ( + // @enum MLModelFilterVariable + MLModelFilterVariableCreatedAt = "CreatedAt" + // @enum MLModelFilterVariable + MLModelFilterVariableLastUpdatedAt = "LastUpdatedAt" + // @enum MLModelFilterVariable + MLModelFilterVariableStatus = "Status" + // @enum MLModelFilterVariable + MLModelFilterVariableName = "Name" + // @enum MLModelFilterVariable + MLModelFilterVariableIamuser = "IAMUser" + // @enum MLModelFilterVariable + MLModelFilterVariableTrainingDataSourceId = "TrainingDataSourceId" + // @enum MLModelFilterVariable + MLModelFilterVariableRealtimeEndpointStatus = "RealtimeEndpointStatus" + // @enum MLModelFilterVariable + MLModelFilterVariableMlmodelType = "MLModelType" + // @enum MLModelFilterVariable + MLModelFilterVariableAlgorithm = "Algorithm" + // @enum MLModelFilterVariable + MLModelFilterVariableTrainingDataUri = "TrainingDataURI" +) + +const ( + // @enum MLModelType + MLModelTypeRegression = "REGRESSION" + // @enum MLModelType + MLModelTypeBinary = "BINARY" + // @enum MLModelType + MLModelTypeMulticlass = "MULTICLASS" +) + +const ( + // @enum RealtimeEndpointStatus + RealtimeEndpointStatusNone = "NONE" + // @enum RealtimeEndpointStatus + RealtimeEndpointStatusReady = "READY" + // @enum RealtimeEndpointStatus + RealtimeEndpointStatusUpdating = "UPDATING" + // @enum RealtimeEndpointStatus + RealtimeEndpointStatusFailed = "FAILED" +) + +// The sort order specified in a listing condition. Possible values include +// the following: +// +// asc - Present the information in ascending order (from A-Z). dsc - Present +// the information in descending order (from Z-A). +const ( + // @enum SortOrder + SortOrderAsc = "asc" + // @enum SortOrder + SortOrderDsc = "dsc" +) + +const ( + // @enum TaggableResourceType + TaggableResourceTypeBatchPrediction = "BatchPrediction" + // @enum TaggableResourceType + TaggableResourceTypeDataSource = "DataSource" + // @enum TaggableResourceType + TaggableResourceTypeEvaluation = "Evaluation" + // @enum TaggableResourceType + TaggableResourceTypeMlmodel = "MLModel" +) diff --git a/vendor/github.com/aws/aws-sdk-go/service/machinelearning/customizations.go b/vendor/github.com/aws/aws-sdk-go/service/machinelearning/customizations.go new file mode 100644 index 000000000..9a6a45666 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/machinelearning/customizations.go @@ -0,0 +1,33 @@ +package machinelearning + +import ( + "net/url" + + "github.com/aws/aws-sdk-go/aws/request" +) + +func init() { + initRequest = func(r *request.Request) { + switch r.Operation.Name { + case opPredict: + r.Handlers.Build.PushBack(updatePredictEndpoint) + } + } +} + +// updatePredictEndpoint rewrites the request endpoint to use the +// "PredictEndpoint" parameter of the Predict operation. +func updatePredictEndpoint(r *request.Request) { + if !r.ParamsFilled() { + return + } + + r.ClientInfo.Endpoint = *r.Params.(*PredictInput).PredictEndpoint + + uri, err := url.Parse(r.ClientInfo.Endpoint) + if err != nil { + r.Error = err + return + } + r.HTTPRequest.URL = uri +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/machinelearning/customizations_test.go b/vendor/github.com/aws/aws-sdk-go/service/machinelearning/customizations_test.go new file mode 100644 index 000000000..734de65ef --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/machinelearning/customizations_test.go @@ -0,0 +1,37 @@ +package machinelearning_test + +import ( + "bytes" + "io/ioutil" + "net/http" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/awstesting/unit" + "github.com/aws/aws-sdk-go/service/machinelearning" +) + +func TestPredictEndpoint(t *testing.T) { + ml := machinelearning.New(unit.Session) + ml.Handlers.Send.Clear() + ml.Handlers.Send.PushBack(func(r *request.Request) { + r.HTTPResponse = &http.Response{ + StatusCode: 200, + Header: http.Header{}, + Body: ioutil.NopCloser(bytes.NewReader([]byte("{}"))), + } + }) + + req, _ := ml.PredictRequest(&machinelearning.PredictInput{ + PredictEndpoint: aws.String("https://localhost/endpoint"), + MLModelId: aws.String("id"), + Record: map[string]*string{}, + }) + err := req.Send() + + assert.Nil(t, err) + assert.Equal(t, "https://localhost/endpoint", req.HTTPRequest.URL.String()) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/machinelearning/examples_test.go b/vendor/github.com/aws/aws-sdk-go/service/machinelearning/examples_test.go new file mode 100644 index 000000000..ab15d62b8 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/machinelearning/examples_test.go @@ -0,0 +1,681 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package machinelearning_test + +import ( + "bytes" + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/machinelearning" +) + +var _ time.Duration +var _ bytes.Buffer + +func ExampleMachineLearning_AddTags() { + svc := machinelearning.New(session.New()) + + params := &machinelearning.AddTagsInput{ + ResourceId: aws.String("EntityId"), // Required + ResourceType: aws.String("TaggableResourceType"), // Required + Tags: []*machinelearning.Tag{ // Required + { // Required + Key: aws.String("TagKey"), + Value: aws.String("TagValue"), + }, + // More values... + }, + } + resp, err := svc.AddTags(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleMachineLearning_CreateBatchPrediction() { + svc := machinelearning.New(session.New()) + + params := &machinelearning.CreateBatchPredictionInput{ + BatchPredictionDataSourceId: aws.String("EntityId"), // Required + BatchPredictionId: aws.String("EntityId"), // Required + MLModelId: aws.String("EntityId"), // Required + OutputUri: aws.String("S3Url"), // Required + BatchPredictionName: aws.String("EntityName"), + } + resp, err := svc.CreateBatchPrediction(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleMachineLearning_CreateDataSourceFromRDS() { + svc := machinelearning.New(session.New()) + + params := &machinelearning.CreateDataSourceFromRDSInput{ + DataSourceId: aws.String("EntityId"), // Required + RDSData: &machinelearning.RDSDataSpec{ // Required + DatabaseCredentials: &machinelearning.RDSDatabaseCredentials{ // Required + Password: aws.String("RDSDatabasePassword"), // Required + Username: aws.String("RDSDatabaseUsername"), // Required + }, + DatabaseInformation: &machinelearning.RDSDatabase{ // Required + DatabaseName: aws.String("RDSDatabaseName"), // Required + InstanceIdentifier: aws.String("RDSInstanceIdentifier"), // Required + }, + ResourceRole: aws.String("EDPResourceRole"), // Required + S3StagingLocation: aws.String("S3Url"), // Required + SecurityGroupIds: []*string{ // Required + aws.String("EDPSecurityGroupId"), // Required + // More values... + }, + SelectSqlQuery: aws.String("RDSSelectSqlQuery"), // Required + ServiceRole: aws.String("EDPServiceRole"), // Required + SubnetId: aws.String("EDPSubnetId"), // Required + DataRearrangement: aws.String("DataRearrangement"), + DataSchema: aws.String("DataSchema"), + DataSchemaUri: aws.String("S3Url"), + }, + RoleARN: aws.String("RoleARN"), // Required + ComputeStatistics: aws.Bool(true), + DataSourceName: aws.String("EntityName"), + } + resp, err := svc.CreateDataSourceFromRDS(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleMachineLearning_CreateDataSourceFromRedshift() { + svc := machinelearning.New(session.New()) + + params := &machinelearning.CreateDataSourceFromRedshiftInput{ + DataSourceId: aws.String("EntityId"), // Required + DataSpec: &machinelearning.RedshiftDataSpec{ // Required + DatabaseCredentials: &machinelearning.RedshiftDatabaseCredentials{ // Required + Password: aws.String("RedshiftDatabasePassword"), // Required + Username: aws.String("RedshiftDatabaseUsername"), // Required + }, + DatabaseInformation: &machinelearning.RedshiftDatabase{ // Required + ClusterIdentifier: aws.String("RedshiftClusterIdentifier"), // Required + DatabaseName: aws.String("RedshiftDatabaseName"), // Required + }, + S3StagingLocation: aws.String("S3Url"), // Required + SelectSqlQuery: aws.String("RedshiftSelectSqlQuery"), // Required + DataRearrangement: aws.String("DataRearrangement"), + DataSchema: aws.String("DataSchema"), + DataSchemaUri: aws.String("S3Url"), + }, + RoleARN: aws.String("RoleARN"), // Required + ComputeStatistics: aws.Bool(true), + DataSourceName: aws.String("EntityName"), + } + resp, err := svc.CreateDataSourceFromRedshift(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleMachineLearning_CreateDataSourceFromS3() { + svc := machinelearning.New(session.New()) + + params := &machinelearning.CreateDataSourceFromS3Input{ + DataSourceId: aws.String("EntityId"), // Required + DataSpec: &machinelearning.S3DataSpec{ // Required + DataLocationS3: aws.String("S3Url"), // Required + DataRearrangement: aws.String("DataRearrangement"), + DataSchema: aws.String("DataSchema"), + DataSchemaLocationS3: aws.String("S3Url"), + }, + ComputeStatistics: aws.Bool(true), + DataSourceName: aws.String("EntityName"), + } + resp, err := svc.CreateDataSourceFromS3(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleMachineLearning_CreateEvaluation() { + svc := machinelearning.New(session.New()) + + params := &machinelearning.CreateEvaluationInput{ + EvaluationDataSourceId: aws.String("EntityId"), // Required + EvaluationId: aws.String("EntityId"), // Required + MLModelId: aws.String("EntityId"), // Required + EvaluationName: aws.String("EntityName"), + } + resp, err := svc.CreateEvaluation(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleMachineLearning_CreateMLModel() { + svc := machinelearning.New(session.New()) + + params := &machinelearning.CreateMLModelInput{ + MLModelId: aws.String("EntityId"), // Required + MLModelType: aws.String("MLModelType"), // Required + TrainingDataSourceId: aws.String("EntityId"), // Required + MLModelName: aws.String("EntityName"), + Parameters: map[string]*string{ + "Key": aws.String("StringType"), // Required + // More values... + }, + Recipe: aws.String("Recipe"), + RecipeUri: aws.String("S3Url"), + } + resp, err := svc.CreateMLModel(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleMachineLearning_CreateRealtimeEndpoint() { + svc := machinelearning.New(session.New()) + + params := &machinelearning.CreateRealtimeEndpointInput{ + MLModelId: aws.String("EntityId"), // Required + } + resp, err := svc.CreateRealtimeEndpoint(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleMachineLearning_DeleteBatchPrediction() { + svc := machinelearning.New(session.New()) + + params := &machinelearning.DeleteBatchPredictionInput{ + BatchPredictionId: aws.String("EntityId"), // Required + } + resp, err := svc.DeleteBatchPrediction(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleMachineLearning_DeleteDataSource() { + svc := machinelearning.New(session.New()) + + params := &machinelearning.DeleteDataSourceInput{ + DataSourceId: aws.String("EntityId"), // Required + } + resp, err := svc.DeleteDataSource(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleMachineLearning_DeleteEvaluation() { + svc := machinelearning.New(session.New()) + + params := &machinelearning.DeleteEvaluationInput{ + EvaluationId: aws.String("EntityId"), // Required + } + resp, err := svc.DeleteEvaluation(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleMachineLearning_DeleteMLModel() { + svc := machinelearning.New(session.New()) + + params := &machinelearning.DeleteMLModelInput{ + MLModelId: aws.String("EntityId"), // Required + } + resp, err := svc.DeleteMLModel(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleMachineLearning_DeleteRealtimeEndpoint() { + svc := machinelearning.New(session.New()) + + params := &machinelearning.DeleteRealtimeEndpointInput{ + MLModelId: aws.String("EntityId"), // Required + } + resp, err := svc.DeleteRealtimeEndpoint(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleMachineLearning_DeleteTags() { + svc := machinelearning.New(session.New()) + + params := &machinelearning.DeleteTagsInput{ + ResourceId: aws.String("EntityId"), // Required + ResourceType: aws.String("TaggableResourceType"), // Required + TagKeys: []*string{ // Required + aws.String("TagKey"), // Required + // More values... + }, + } + resp, err := svc.DeleteTags(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleMachineLearning_DescribeBatchPredictions() { + svc := machinelearning.New(session.New()) + + params := &machinelearning.DescribeBatchPredictionsInput{ + EQ: aws.String("ComparatorValue"), + FilterVariable: aws.String("BatchPredictionFilterVariable"), + GE: aws.String("ComparatorValue"), + GT: aws.String("ComparatorValue"), + LE: aws.String("ComparatorValue"), + LT: aws.String("ComparatorValue"), + Limit: aws.Int64(1), + NE: aws.String("ComparatorValue"), + NextToken: aws.String("StringType"), + Prefix: aws.String("ComparatorValue"), + SortOrder: aws.String("SortOrder"), + } + resp, err := svc.DescribeBatchPredictions(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleMachineLearning_DescribeDataSources() { + svc := machinelearning.New(session.New()) + + params := &machinelearning.DescribeDataSourcesInput{ + EQ: aws.String("ComparatorValue"), + FilterVariable: aws.String("DataSourceFilterVariable"), + GE: aws.String("ComparatorValue"), + GT: aws.String("ComparatorValue"), + LE: aws.String("ComparatorValue"), + LT: aws.String("ComparatorValue"), + Limit: aws.Int64(1), + NE: aws.String("ComparatorValue"), + NextToken: aws.String("StringType"), + Prefix: aws.String("ComparatorValue"), + SortOrder: aws.String("SortOrder"), + } + resp, err := svc.DescribeDataSources(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleMachineLearning_DescribeEvaluations() { + svc := machinelearning.New(session.New()) + + params := &machinelearning.DescribeEvaluationsInput{ + EQ: aws.String("ComparatorValue"), + FilterVariable: aws.String("EvaluationFilterVariable"), + GE: aws.String("ComparatorValue"), + GT: aws.String("ComparatorValue"), + LE: aws.String("ComparatorValue"), + LT: aws.String("ComparatorValue"), + Limit: aws.Int64(1), + NE: aws.String("ComparatorValue"), + NextToken: aws.String("StringType"), + Prefix: aws.String("ComparatorValue"), + SortOrder: aws.String("SortOrder"), + } + resp, err := svc.DescribeEvaluations(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleMachineLearning_DescribeMLModels() { + svc := machinelearning.New(session.New()) + + params := &machinelearning.DescribeMLModelsInput{ + EQ: aws.String("ComparatorValue"), + FilterVariable: aws.String("MLModelFilterVariable"), + GE: aws.String("ComparatorValue"), + GT: aws.String("ComparatorValue"), + LE: aws.String("ComparatorValue"), + LT: aws.String("ComparatorValue"), + Limit: aws.Int64(1), + NE: aws.String("ComparatorValue"), + NextToken: aws.String("StringType"), + Prefix: aws.String("ComparatorValue"), + SortOrder: aws.String("SortOrder"), + } + resp, err := svc.DescribeMLModels(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleMachineLearning_DescribeTags() { + svc := machinelearning.New(session.New()) + + params := &machinelearning.DescribeTagsInput{ + ResourceId: aws.String("EntityId"), // Required + ResourceType: aws.String("TaggableResourceType"), // Required + } + resp, err := svc.DescribeTags(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleMachineLearning_GetBatchPrediction() { + svc := machinelearning.New(session.New()) + + params := &machinelearning.GetBatchPredictionInput{ + BatchPredictionId: aws.String("EntityId"), // Required + } + resp, err := svc.GetBatchPrediction(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleMachineLearning_GetDataSource() { + svc := machinelearning.New(session.New()) + + params := &machinelearning.GetDataSourceInput{ + DataSourceId: aws.String("EntityId"), // Required + Verbose: aws.Bool(true), + } + resp, err := svc.GetDataSource(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleMachineLearning_GetEvaluation() { + svc := machinelearning.New(session.New()) + + params := &machinelearning.GetEvaluationInput{ + EvaluationId: aws.String("EntityId"), // Required + } + resp, err := svc.GetEvaluation(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleMachineLearning_GetMLModel() { + svc := machinelearning.New(session.New()) + + params := &machinelearning.GetMLModelInput{ + MLModelId: aws.String("EntityId"), // Required + Verbose: aws.Bool(true), + } + resp, err := svc.GetMLModel(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleMachineLearning_Predict() { + svc := machinelearning.New(session.New()) + + params := &machinelearning.PredictInput{ + MLModelId: aws.String("EntityId"), // Required + PredictEndpoint: aws.String("VipURL"), // Required + Record: map[string]*string{ // Required + "Key": aws.String("VariableValue"), // Required + // More values... + }, + } + resp, err := svc.Predict(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleMachineLearning_UpdateBatchPrediction() { + svc := machinelearning.New(session.New()) + + params := &machinelearning.UpdateBatchPredictionInput{ + BatchPredictionId: aws.String("EntityId"), // Required + BatchPredictionName: aws.String("EntityName"), // Required + } + resp, err := svc.UpdateBatchPrediction(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleMachineLearning_UpdateDataSource() { + svc := machinelearning.New(session.New()) + + params := &machinelearning.UpdateDataSourceInput{ + DataSourceId: aws.String("EntityId"), // Required + DataSourceName: aws.String("EntityName"), // Required + } + resp, err := svc.UpdateDataSource(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleMachineLearning_UpdateEvaluation() { + svc := machinelearning.New(session.New()) + + params := &machinelearning.UpdateEvaluationInput{ + EvaluationId: aws.String("EntityId"), // Required + EvaluationName: aws.String("EntityName"), // Required + } + resp, err := svc.UpdateEvaluation(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleMachineLearning_UpdateMLModel() { + svc := machinelearning.New(session.New()) + + params := &machinelearning.UpdateMLModelInput{ + MLModelId: aws.String("EntityId"), // Required + MLModelName: aws.String("EntityName"), + ScoreThreshold: aws.Float64(1.0), + } + resp, err := svc.UpdateMLModel(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/machinelearning/machinelearningiface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/machinelearning/machinelearningiface/interface.go new file mode 100644 index 000000000..c104c6227 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/machinelearning/machinelearningiface/interface.go @@ -0,0 +1,134 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package machinelearningiface provides an interface for the Amazon Machine Learning. +package machinelearningiface + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/machinelearning" +) + +// MachineLearningAPI is the interface type for machinelearning.MachineLearning. +type MachineLearningAPI interface { + AddTagsRequest(*machinelearning.AddTagsInput) (*request.Request, *machinelearning.AddTagsOutput) + + AddTags(*machinelearning.AddTagsInput) (*machinelearning.AddTagsOutput, error) + + CreateBatchPredictionRequest(*machinelearning.CreateBatchPredictionInput) (*request.Request, *machinelearning.CreateBatchPredictionOutput) + + CreateBatchPrediction(*machinelearning.CreateBatchPredictionInput) (*machinelearning.CreateBatchPredictionOutput, error) + + CreateDataSourceFromRDSRequest(*machinelearning.CreateDataSourceFromRDSInput) (*request.Request, *machinelearning.CreateDataSourceFromRDSOutput) + + CreateDataSourceFromRDS(*machinelearning.CreateDataSourceFromRDSInput) (*machinelearning.CreateDataSourceFromRDSOutput, error) + + CreateDataSourceFromRedshiftRequest(*machinelearning.CreateDataSourceFromRedshiftInput) (*request.Request, *machinelearning.CreateDataSourceFromRedshiftOutput) + + CreateDataSourceFromRedshift(*machinelearning.CreateDataSourceFromRedshiftInput) (*machinelearning.CreateDataSourceFromRedshiftOutput, error) + + CreateDataSourceFromS3Request(*machinelearning.CreateDataSourceFromS3Input) (*request.Request, *machinelearning.CreateDataSourceFromS3Output) + + CreateDataSourceFromS3(*machinelearning.CreateDataSourceFromS3Input) (*machinelearning.CreateDataSourceFromS3Output, error) + + CreateEvaluationRequest(*machinelearning.CreateEvaluationInput) (*request.Request, *machinelearning.CreateEvaluationOutput) + + CreateEvaluation(*machinelearning.CreateEvaluationInput) (*machinelearning.CreateEvaluationOutput, error) + + CreateMLModelRequest(*machinelearning.CreateMLModelInput) (*request.Request, *machinelearning.CreateMLModelOutput) + + CreateMLModel(*machinelearning.CreateMLModelInput) (*machinelearning.CreateMLModelOutput, error) + + CreateRealtimeEndpointRequest(*machinelearning.CreateRealtimeEndpointInput) (*request.Request, *machinelearning.CreateRealtimeEndpointOutput) + + CreateRealtimeEndpoint(*machinelearning.CreateRealtimeEndpointInput) (*machinelearning.CreateRealtimeEndpointOutput, error) + + DeleteBatchPredictionRequest(*machinelearning.DeleteBatchPredictionInput) (*request.Request, *machinelearning.DeleteBatchPredictionOutput) + + DeleteBatchPrediction(*machinelearning.DeleteBatchPredictionInput) (*machinelearning.DeleteBatchPredictionOutput, error) + + DeleteDataSourceRequest(*machinelearning.DeleteDataSourceInput) (*request.Request, *machinelearning.DeleteDataSourceOutput) + + DeleteDataSource(*machinelearning.DeleteDataSourceInput) (*machinelearning.DeleteDataSourceOutput, error) + + DeleteEvaluationRequest(*machinelearning.DeleteEvaluationInput) (*request.Request, *machinelearning.DeleteEvaluationOutput) + + DeleteEvaluation(*machinelearning.DeleteEvaluationInput) (*machinelearning.DeleteEvaluationOutput, error) + + DeleteMLModelRequest(*machinelearning.DeleteMLModelInput) (*request.Request, *machinelearning.DeleteMLModelOutput) + + DeleteMLModel(*machinelearning.DeleteMLModelInput) (*machinelearning.DeleteMLModelOutput, error) + + DeleteRealtimeEndpointRequest(*machinelearning.DeleteRealtimeEndpointInput) (*request.Request, *machinelearning.DeleteRealtimeEndpointOutput) + + DeleteRealtimeEndpoint(*machinelearning.DeleteRealtimeEndpointInput) (*machinelearning.DeleteRealtimeEndpointOutput, error) + + DeleteTagsRequest(*machinelearning.DeleteTagsInput) (*request.Request, *machinelearning.DeleteTagsOutput) + + DeleteTags(*machinelearning.DeleteTagsInput) (*machinelearning.DeleteTagsOutput, error) + + DescribeBatchPredictionsRequest(*machinelearning.DescribeBatchPredictionsInput) (*request.Request, *machinelearning.DescribeBatchPredictionsOutput) + + DescribeBatchPredictions(*machinelearning.DescribeBatchPredictionsInput) (*machinelearning.DescribeBatchPredictionsOutput, error) + + DescribeBatchPredictionsPages(*machinelearning.DescribeBatchPredictionsInput, func(*machinelearning.DescribeBatchPredictionsOutput, bool) bool) error + + DescribeDataSourcesRequest(*machinelearning.DescribeDataSourcesInput) (*request.Request, *machinelearning.DescribeDataSourcesOutput) + + DescribeDataSources(*machinelearning.DescribeDataSourcesInput) (*machinelearning.DescribeDataSourcesOutput, error) + + DescribeDataSourcesPages(*machinelearning.DescribeDataSourcesInput, func(*machinelearning.DescribeDataSourcesOutput, bool) bool) error + + DescribeEvaluationsRequest(*machinelearning.DescribeEvaluationsInput) (*request.Request, *machinelearning.DescribeEvaluationsOutput) + + DescribeEvaluations(*machinelearning.DescribeEvaluationsInput) (*machinelearning.DescribeEvaluationsOutput, error) + + DescribeEvaluationsPages(*machinelearning.DescribeEvaluationsInput, func(*machinelearning.DescribeEvaluationsOutput, bool) bool) error + + DescribeMLModelsRequest(*machinelearning.DescribeMLModelsInput) (*request.Request, *machinelearning.DescribeMLModelsOutput) + + DescribeMLModels(*machinelearning.DescribeMLModelsInput) (*machinelearning.DescribeMLModelsOutput, error) + + DescribeMLModelsPages(*machinelearning.DescribeMLModelsInput, func(*machinelearning.DescribeMLModelsOutput, bool) bool) error + + DescribeTagsRequest(*machinelearning.DescribeTagsInput) (*request.Request, *machinelearning.DescribeTagsOutput) + + DescribeTags(*machinelearning.DescribeTagsInput) (*machinelearning.DescribeTagsOutput, error) + + GetBatchPredictionRequest(*machinelearning.GetBatchPredictionInput) (*request.Request, *machinelearning.GetBatchPredictionOutput) + + GetBatchPrediction(*machinelearning.GetBatchPredictionInput) (*machinelearning.GetBatchPredictionOutput, error) + + GetDataSourceRequest(*machinelearning.GetDataSourceInput) (*request.Request, *machinelearning.GetDataSourceOutput) + + GetDataSource(*machinelearning.GetDataSourceInput) (*machinelearning.GetDataSourceOutput, error) + + GetEvaluationRequest(*machinelearning.GetEvaluationInput) (*request.Request, *machinelearning.GetEvaluationOutput) + + GetEvaluation(*machinelearning.GetEvaluationInput) (*machinelearning.GetEvaluationOutput, error) + + GetMLModelRequest(*machinelearning.GetMLModelInput) (*request.Request, *machinelearning.GetMLModelOutput) + + GetMLModel(*machinelearning.GetMLModelInput) (*machinelearning.GetMLModelOutput, error) + + PredictRequest(*machinelearning.PredictInput) (*request.Request, *machinelearning.PredictOutput) + + Predict(*machinelearning.PredictInput) (*machinelearning.PredictOutput, error) + + UpdateBatchPredictionRequest(*machinelearning.UpdateBatchPredictionInput) (*request.Request, *machinelearning.UpdateBatchPredictionOutput) + + UpdateBatchPrediction(*machinelearning.UpdateBatchPredictionInput) (*machinelearning.UpdateBatchPredictionOutput, error) + + UpdateDataSourceRequest(*machinelearning.UpdateDataSourceInput) (*request.Request, *machinelearning.UpdateDataSourceOutput) + + UpdateDataSource(*machinelearning.UpdateDataSourceInput) (*machinelearning.UpdateDataSourceOutput, error) + + UpdateEvaluationRequest(*machinelearning.UpdateEvaluationInput) (*request.Request, *machinelearning.UpdateEvaluationOutput) + + UpdateEvaluation(*machinelearning.UpdateEvaluationInput) (*machinelearning.UpdateEvaluationOutput, error) + + UpdateMLModelRequest(*machinelearning.UpdateMLModelInput) (*request.Request, *machinelearning.UpdateMLModelOutput) + + UpdateMLModel(*machinelearning.UpdateMLModelInput) (*machinelearning.UpdateMLModelOutput, error) +} + +var _ MachineLearningAPI = (*machinelearning.MachineLearning)(nil) diff --git a/vendor/github.com/aws/aws-sdk-go/service/machinelearning/service.go b/vendor/github.com/aws/aws-sdk-go/service/machinelearning/service.go new file mode 100644 index 000000000..6ea471742 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/machinelearning/service.go @@ -0,0 +1,88 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package machinelearning + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" +) + +// Definition of the public APIs exposed by Amazon Machine Learning +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type MachineLearning struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "machinelearning" + +// New creates a new instance of the MachineLearning client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a MachineLearning client from just a session. +// svc := machinelearning.New(mySession) +// +// // Create a MachineLearning client with additional configuration +// svc := machinelearning.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *MachineLearning { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *MachineLearning { + svc := &MachineLearning{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-12-12", + JSONVersion: "1.1", + TargetPrefix: "AmazonML_20141212", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(jsonrpc.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a MachineLearning operation and runs any +// custom request initialization. +func (c *MachineLearning) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/machinelearning/waiters.go b/vendor/github.com/aws/aws-sdk-go/service/machinelearning/waiters.go new file mode 100644 index 000000000..924767f9e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/machinelearning/waiters.go @@ -0,0 +1,123 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package machinelearning + +import ( + "github.com/aws/aws-sdk-go/private/waiter" +) + +func (c *MachineLearning) WaitUntilBatchPredictionAvailable(input *DescribeBatchPredictionsInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeBatchPredictions", + Delay: 30, + MaxAttempts: 60, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "pathAll", + Argument: "Results[].Status", + Expected: "COMPLETED", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Results[].Status", + Expected: "FAILED", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *MachineLearning) WaitUntilDataSourceAvailable(input *DescribeDataSourcesInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeDataSources", + Delay: 30, + MaxAttempts: 60, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "pathAll", + Argument: "Results[].Status", + Expected: "COMPLETED", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Results[].Status", + Expected: "FAILED", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *MachineLearning) WaitUntilEvaluationAvailable(input *DescribeEvaluationsInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeEvaluations", + Delay: 30, + MaxAttempts: 60, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "pathAll", + Argument: "Results[].Status", + Expected: "COMPLETED", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Results[].Status", + Expected: "FAILED", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *MachineLearning) WaitUntilMLModelAvailable(input *DescribeMLModelsInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeMLModels", + Delay: 30, + MaxAttempts: 60, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "pathAll", + Argument: "Results[].Status", + Expected: "COMPLETED", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Results[].Status", + Expected: "FAILED", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/marketplacecommerceanalytics/api.go b/vendor/github.com/aws/aws-sdk-go/service/marketplacecommerceanalytics/api.go new file mode 100644 index 000000000..3eb8071de --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/marketplacecommerceanalytics/api.go @@ -0,0 +1,241 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package marketplacecommerceanalytics provides a client for AWS Marketplace Commerce Analytics. +package marketplacecommerceanalytics + +import ( + "time" + + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" +) + +const opGenerateDataSet = "GenerateDataSet" + +// GenerateDataSetRequest generates a "aws/request.Request" representing the +// client's request for the GenerateDataSet operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GenerateDataSet method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GenerateDataSetRequest method. +// req, resp := client.GenerateDataSetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *MarketplaceCommerceAnalytics) GenerateDataSetRequest(input *GenerateDataSetInput) (req *request.Request, output *GenerateDataSetOutput) { + op := &request.Operation{ + Name: opGenerateDataSet, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GenerateDataSetInput{} + } + + req = c.newRequest(op, input, output) + output = &GenerateDataSetOutput{} + req.Data = output + return +} + +// Given a data set type and data set publication date, asynchronously publishes +// the requested data set to the specified S3 bucket and notifies the specified +// SNS topic once the data is available. Returns a unique request identifier +// that can be used to correlate requests with notifications from the SNS topic. +// Data sets will be published in comma-separated values (CSV) format with the +// file name {data_set_type}_YYYY-MM-DD.csv. If a file with the same name already +// exists (e.g. if the same data set is requested twice), the original file +// will be overwritten by the new file. Requires a Role with an attached permissions +// policy providing Allow permissions for the following actions: s3:PutObject, +// s3:GetBucketLocation, sns:GetTopicAttributes, sns:Publish, iam:GetRolePolicy. +func (c *MarketplaceCommerceAnalytics) GenerateDataSet(input *GenerateDataSetInput) (*GenerateDataSetOutput, error) { + req, out := c.GenerateDataSetRequest(input) + err := req.Send() + return out, err +} + +// Container for the parameters to the GenerateDataSet operation. +type GenerateDataSetInput struct { + _ struct{} `type:"structure"` + + // (Optional) Key-value pairs which will be returned, unmodified, in the Amazon + // SNS notification message and the data set metadata file. These key-value + // pairs can be used to correlated responses with tracking information from + // other systems. + CustomerDefinedValues map[string]*string `locationName:"customerDefinedValues" min:"1" type:"map"` + + // The date a data set was published. For daily data sets, provide a date with + // day-level granularity for the desired day. For weekly data sets, provide + // a date with day-level granularity within the desired week (the day value + // will be ignored). For monthly data sets, provide a date with month-level + // granularity for the desired month (the day value will be ignored). + DataSetPublicationDate *time.Time `locationName:"dataSetPublicationDate" type:"timestamp" timestampFormat:"unix" required:"true"` + + // The desired data set type. + // + // customer_subscriber_hourly_monthly_subscriptions - Available daily by + // 5:00 PM Pacific Time since 2014-07-21. customer_subscriber_annual_subscriptions + // - Available daily by 5:00 PM Pacific Time since 2014-07-21. daily_business_usage_by_instance_type + // - Available daily by 5:00 PM Pacific Time since 2015-01-26. daily_business_fees + // - Available daily by 5:00 PM Pacific Time since 2015-01-26. daily_business_free_trial_conversions + // - Available daily by 5:00 PM Pacific Time since 2015-01-26. daily_business_new_instances + // - Available daily by 5:00 PM Pacific Time since 2015-01-26. daily_business_new_product_subscribers + // - Available daily by 5:00 PM Pacific Time since 2015-01-26. daily_business_canceled_product_subscribers + // - Available daily by 5:00 PM Pacific Time since 2015-01-26. monthly_revenue_billing_and_revenue_data + // - Available monthly on the 4th day of the month by 5:00 PM Pacific Time since + // 2015-02. monthly_revenue_annual_subscriptions - Available monthly on the + // 4th day of the month by 5:00 PM Pacific Time since 2015-02. disbursed_amount_by_product + // - Available every 30 days by 5:00 PM Pacific Time since 2015-01-26. disbursed_amount_by_product_with_uncollected_funds + // -This data set is only available from 2012-04-19 until 2015-01-25. After + // 2015-01-25, this data set was split into three data sets: disbursed_amount_by_product, + // disbursed_amount_by_age_of_uncollected_funds, and disbursed_amount_by_age_of_disbursed_funds. + // disbursed_amount_by_customer_geo - Available every 30 days by 5:00 PM Pacific + // Time since 2012-04-19. disbursed_amount_by_age_of_uncollected_funds - Available + // every 30 days by 5:00 PM Pacific Time since 2015-01-26. disbursed_amount_by_age_of_disbursed_funds + // - Available every 30 days by 5:00 PM Pacific Time since 2015-01-26. customer_profile_by_industry + // - Available daily by 5:00 PM Pacific Time since 2015-10-01. customer_profile_by_revenue + // - Available daily by 5:00 PM Pacific Time since 2015-10-01. customer_profile_by_geography + // - Available daily by 5:00 PM Pacific Time since 2015-10-01. + DataSetType *string `locationName:"dataSetType" min:"1" type:"string" required:"true" enum:"DataSetType"` + + // The name (friendly name, not ARN) of the destination S3 bucket. + DestinationS3BucketName *string `locationName:"destinationS3BucketName" min:"1" type:"string" required:"true"` + + // (Optional) The desired S3 prefix for the published data set, similar to a + // directory path in standard file systems. For example, if given the bucket + // name "mybucket" and the prefix "myprefix/mydatasets", the output file "outputfile" + // would be published to "s3://mybucket/myprefix/mydatasets/outputfile". If + // the prefix directory structure does not exist, it will be created. If no + // prefix is provided, the data set will be published to the S3 bucket root. + DestinationS3Prefix *string `locationName:"destinationS3Prefix" type:"string"` + + // The Amazon Resource Name (ARN) of the Role with an attached permissions policy + // to interact with the provided AWS services. + RoleNameArn *string `locationName:"roleNameArn" min:"1" type:"string" required:"true"` + + // Amazon Resource Name (ARN) for the SNS Topic that will be notified when the + // data set has been published or if an error has occurred. + SnsTopicArn *string `locationName:"snsTopicArn" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s GenerateDataSetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GenerateDataSetInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GenerateDataSetInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GenerateDataSetInput"} + if s.CustomerDefinedValues != nil && len(s.CustomerDefinedValues) < 1 { + invalidParams.Add(request.NewErrParamMinLen("CustomerDefinedValues", 1)) + } + if s.DataSetPublicationDate == nil { + invalidParams.Add(request.NewErrParamRequired("DataSetPublicationDate")) + } + if s.DataSetType == nil { + invalidParams.Add(request.NewErrParamRequired("DataSetType")) + } + if s.DataSetType != nil && len(*s.DataSetType) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DataSetType", 1)) + } + if s.DestinationS3BucketName == nil { + invalidParams.Add(request.NewErrParamRequired("DestinationS3BucketName")) + } + if s.DestinationS3BucketName != nil && len(*s.DestinationS3BucketName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DestinationS3BucketName", 1)) + } + if s.RoleNameArn == nil { + invalidParams.Add(request.NewErrParamRequired("RoleNameArn")) + } + if s.RoleNameArn != nil && len(*s.RoleNameArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RoleNameArn", 1)) + } + if s.SnsTopicArn == nil { + invalidParams.Add(request.NewErrParamRequired("SnsTopicArn")) + } + if s.SnsTopicArn != nil && len(*s.SnsTopicArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("SnsTopicArn", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Container for the result of the GenerateDataSet operation. +type GenerateDataSetOutput struct { + _ struct{} `type:"structure"` + + // A unique identifier representing a specific request to the GenerateDataSet + // operation. This identifier can be used to correlate a request with notifications + // from the SNS topic. + DataSetRequestId *string `locationName:"dataSetRequestId" type:"string"` +} + +// String returns the string representation +func (s GenerateDataSetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GenerateDataSetOutput) GoString() string { + return s.String() +} + +const ( + // @enum DataSetType + DataSetTypeCustomerSubscriberHourlyMonthlySubscriptions = "customer_subscriber_hourly_monthly_subscriptions" + // @enum DataSetType + DataSetTypeCustomerSubscriberAnnualSubscriptions = "customer_subscriber_annual_subscriptions" + // @enum DataSetType + DataSetTypeDailyBusinessUsageByInstanceType = "daily_business_usage_by_instance_type" + // @enum DataSetType + DataSetTypeDailyBusinessFees = "daily_business_fees" + // @enum DataSetType + DataSetTypeDailyBusinessFreeTrialConversions = "daily_business_free_trial_conversions" + // @enum DataSetType + DataSetTypeDailyBusinessNewInstances = "daily_business_new_instances" + // @enum DataSetType + DataSetTypeDailyBusinessNewProductSubscribers = "daily_business_new_product_subscribers" + // @enum DataSetType + DataSetTypeDailyBusinessCanceledProductSubscribers = "daily_business_canceled_product_subscribers" + // @enum DataSetType + DataSetTypeMonthlyRevenueBillingAndRevenueData = "monthly_revenue_billing_and_revenue_data" + // @enum DataSetType + DataSetTypeMonthlyRevenueAnnualSubscriptions = "monthly_revenue_annual_subscriptions" + // @enum DataSetType + DataSetTypeDisbursedAmountByProduct = "disbursed_amount_by_product" + // @enum DataSetType + DataSetTypeDisbursedAmountByProductWithUncollectedFunds = "disbursed_amount_by_product_with_uncollected_funds" + // @enum DataSetType + DataSetTypeDisbursedAmountByCustomerGeo = "disbursed_amount_by_customer_geo" + // @enum DataSetType + DataSetTypeDisbursedAmountByAgeOfUncollectedFunds = "disbursed_amount_by_age_of_uncollected_funds" + // @enum DataSetType + DataSetTypeDisbursedAmountByAgeOfDisbursedFunds = "disbursed_amount_by_age_of_disbursed_funds" + // @enum DataSetType + DataSetTypeCustomerProfileByIndustry = "customer_profile_by_industry" + // @enum DataSetType + DataSetTypeCustomerProfileByRevenue = "customer_profile_by_revenue" + // @enum DataSetType + DataSetTypeCustomerProfileByGeography = "customer_profile_by_geography" +) diff --git a/vendor/github.com/aws/aws-sdk-go/service/marketplacecommerceanalytics/examples_test.go b/vendor/github.com/aws/aws-sdk-go/service/marketplacecommerceanalytics/examples_test.go new file mode 100644 index 000000000..319af8552 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/marketplacecommerceanalytics/examples_test.go @@ -0,0 +1,44 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package marketplacecommerceanalytics_test + +import ( + "bytes" + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/marketplacecommerceanalytics" +) + +var _ time.Duration +var _ bytes.Buffer + +func ExampleMarketplaceCommerceAnalytics_GenerateDataSet() { + svc := marketplacecommerceanalytics.New(session.New()) + + params := &marketplacecommerceanalytics.GenerateDataSetInput{ + DataSetPublicationDate: aws.Time(time.Now()), // Required + DataSetType: aws.String("DataSetType"), // Required + DestinationS3BucketName: aws.String("DestinationS3BucketName"), // Required + RoleNameArn: aws.String("RoleNameArn"), // Required + SnsTopicArn: aws.String("SnsTopicArn"), // Required + CustomerDefinedValues: map[string]*string{ + "Key": aws.String("OptionalValue"), // Required + // More values... + }, + DestinationS3Prefix: aws.String("DestinationS3Prefix"), + } + resp, err := svc.GenerateDataSet(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/marketplacecommerceanalytics/marketplacecommerceanalyticsiface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/marketplacecommerceanalytics/marketplacecommerceanalyticsiface/interface.go new file mode 100644 index 000000000..825d62f59 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/marketplacecommerceanalytics/marketplacecommerceanalyticsiface/interface.go @@ -0,0 +1,18 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package marketplacecommerceanalyticsiface provides an interface for the AWS Marketplace Commerce Analytics. +package marketplacecommerceanalyticsiface + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/marketplacecommerceanalytics" +) + +// MarketplaceCommerceAnalyticsAPI is the interface type for marketplacecommerceanalytics.MarketplaceCommerceAnalytics. +type MarketplaceCommerceAnalyticsAPI interface { + GenerateDataSetRequest(*marketplacecommerceanalytics.GenerateDataSetInput) (*request.Request, *marketplacecommerceanalytics.GenerateDataSetOutput) + + GenerateDataSet(*marketplacecommerceanalytics.GenerateDataSetInput) (*marketplacecommerceanalytics.GenerateDataSetOutput, error) +} + +var _ MarketplaceCommerceAnalyticsAPI = (*marketplacecommerceanalytics.MarketplaceCommerceAnalytics)(nil) diff --git a/vendor/github.com/aws/aws-sdk-go/service/marketplacecommerceanalytics/service.go b/vendor/github.com/aws/aws-sdk-go/service/marketplacecommerceanalytics/service.go new file mode 100644 index 000000000..0c257635f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/marketplacecommerceanalytics/service.go @@ -0,0 +1,89 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package marketplacecommerceanalytics + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" +) + +// Provides AWS Marketplace business intelligence data on-demand. +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type MarketplaceCommerceAnalytics struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "marketplacecommerceanalytics" + +// New creates a new instance of the MarketplaceCommerceAnalytics client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a MarketplaceCommerceAnalytics client from just a session. +// svc := marketplacecommerceanalytics.New(mySession) +// +// // Create a MarketplaceCommerceAnalytics client with additional configuration +// svc := marketplacecommerceanalytics.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *MarketplaceCommerceAnalytics { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *MarketplaceCommerceAnalytics { + svc := &MarketplaceCommerceAnalytics{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningName: "marketplacecommerceanalytics", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2015-07-01", + JSONVersion: "1.1", + TargetPrefix: "MarketplaceCommerceAnalytics20150701", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(jsonrpc.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a MarketplaceCommerceAnalytics operation and runs any +// custom request initialization. +func (c *MarketplaceCommerceAnalytics) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/marketplacemetering/api.go b/vendor/github.com/aws/aws-sdk-go/service/marketplacemetering/api.go new file mode 100644 index 000000000..305aa6714 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/marketplacemetering/api.go @@ -0,0 +1,142 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package marketplacemetering provides a client for AWSMarketplace Metering. +package marketplacemetering + +import ( + "time" + + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" +) + +const opMeterUsage = "MeterUsage" + +// MeterUsageRequest generates a "aws/request.Request" representing the +// client's request for the MeterUsage operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the MeterUsage method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the MeterUsageRequest method. +// req, resp := client.MeterUsageRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *MarketplaceMetering) MeterUsageRequest(input *MeterUsageInput) (req *request.Request, output *MeterUsageOutput) { + op := &request.Operation{ + Name: opMeterUsage, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &MeterUsageInput{} + } + + req = c.newRequest(op, input, output) + output = &MeterUsageOutput{} + req.Data = output + return +} + +// API to emit metering records. For identical requests, the API is idempotent. +// It simply returns the metering record ID. +func (c *MarketplaceMetering) MeterUsage(input *MeterUsageInput) (*MeterUsageOutput, error) { + req, out := c.MeterUsageRequest(input) + err := req.Send() + return out, err +} + +type MeterUsageInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the permissions required for the action, but does + // not make the request. If you have the permissions, the request returns DryRunOperation; + // otherwise, it returns UnauthorizedException. + DryRun *bool `type:"boolean" required:"true"` + + // Product code is used to uniquely identify a product in AWS Marketplace. The + // product code should be the same as the one used during the publishing of + // a new product. + ProductCode *string `min:"1" type:"string" required:"true"` + + // Timestamp of the hour, recorded in UTC. The seconds and milliseconds portions + // of the timestamp will be ignored. + Timestamp *time.Time `type:"timestamp" timestampFormat:"unix" required:"true"` + + // It will be one of the 'fcp dimension name' provided during the publishing + // of the product. + UsageDimension *string `min:"1" type:"string" required:"true"` + + // Consumption value for the hour. + UsageQuantity *int64 `type:"integer" required:"true"` +} + +// String returns the string representation +func (s MeterUsageInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MeterUsageInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *MeterUsageInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "MeterUsageInput"} + if s.DryRun == nil { + invalidParams.Add(request.NewErrParamRequired("DryRun")) + } + if s.ProductCode == nil { + invalidParams.Add(request.NewErrParamRequired("ProductCode")) + } + if s.ProductCode != nil && len(*s.ProductCode) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ProductCode", 1)) + } + if s.Timestamp == nil { + invalidParams.Add(request.NewErrParamRequired("Timestamp")) + } + if s.UsageDimension == nil { + invalidParams.Add(request.NewErrParamRequired("UsageDimension")) + } + if s.UsageDimension != nil && len(*s.UsageDimension) < 1 { + invalidParams.Add(request.NewErrParamMinLen("UsageDimension", 1)) + } + if s.UsageQuantity == nil { + invalidParams.Add(request.NewErrParamRequired("UsageQuantity")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type MeterUsageOutput struct { + _ struct{} `type:"structure"` + + MeteringRecordId *string `type:"string"` +} + +// String returns the string representation +func (s MeterUsageOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MeterUsageOutput) GoString() string { + return s.String() +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/marketplacemetering/examples_test.go b/vendor/github.com/aws/aws-sdk-go/service/marketplacemetering/examples_test.go new file mode 100644 index 000000000..c608eb476 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/marketplacemetering/examples_test.go @@ -0,0 +1,39 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package marketplacemetering_test + +import ( + "bytes" + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/marketplacemetering" +) + +var _ time.Duration +var _ bytes.Buffer + +func ExampleMarketplaceMetering_MeterUsage() { + svc := marketplacemetering.New(session.New()) + + params := &marketplacemetering.MeterUsageInput{ + DryRun: aws.Bool(true), // Required + ProductCode: aws.String("ProductCode"), // Required + Timestamp: aws.Time(time.Now()), // Required + UsageDimension: aws.String("UsageDimension"), // Required + UsageQuantity: aws.Int64(1), // Required + } + resp, err := svc.MeterUsage(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/marketplacemetering/marketplacemeteringiface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/marketplacemetering/marketplacemeteringiface/interface.go new file mode 100644 index 000000000..f4957bf85 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/marketplacemetering/marketplacemeteringiface/interface.go @@ -0,0 +1,18 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package marketplacemeteringiface provides an interface for the AWSMarketplace Metering. +package marketplacemeteringiface + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/marketplacemetering" +) + +// MarketplaceMeteringAPI is the interface type for marketplacemetering.MarketplaceMetering. +type MarketplaceMeteringAPI interface { + MeterUsageRequest(*marketplacemetering.MeterUsageInput) (*request.Request, *marketplacemetering.MeterUsageOutput) + + MeterUsage(*marketplacemetering.MeterUsageInput) (*marketplacemetering.MeterUsageOutput, error) +} + +var _ MarketplaceMeteringAPI = (*marketplacemetering.MarketplaceMetering)(nil) diff --git a/vendor/github.com/aws/aws-sdk-go/service/marketplacemetering/service.go b/vendor/github.com/aws/aws-sdk-go/service/marketplacemetering/service.go new file mode 100644 index 000000000..9ab3a08c8 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/marketplacemetering/service.go @@ -0,0 +1,97 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package marketplacemetering + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" +) + +// This reference provides descriptions of the low-level AWS Marketplace Metering +// Service API. +// +// AWS Marketplace sellers can use this API to submit usage data for custom +// usage dimensions. +// +// Submitting Metering Records +// +// MeterUsage- Submits the metering record for a Marketplace product. +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type MarketplaceMetering struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "metering.marketplace" + +// New creates a new instance of the MarketplaceMetering client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a MarketplaceMetering client from just a session. +// svc := marketplacemetering.New(mySession) +// +// // Create a MarketplaceMetering client with additional configuration +// svc := marketplacemetering.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *MarketplaceMetering { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *MarketplaceMetering { + svc := &MarketplaceMetering{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningName: "aws-marketplace", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2016-01-14", + JSONVersion: "1.1", + TargetPrefix: "AWSMPMeteringService", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(jsonrpc.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a MarketplaceMetering operation and runs any +// custom request initialization. +func (c *MarketplaceMetering) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/mobileanalytics/api.go b/vendor/github.com/aws/aws-sdk-go/service/mobileanalytics/api.go new file mode 100644 index 000000000..48f02880e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/mobileanalytics/api.go @@ -0,0 +1,240 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package mobileanalytics provides a client for Amazon Mobile Analytics. +package mobileanalytics + +import ( + "fmt" + + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/restjson" +) + +const opPutEvents = "PutEvents" + +// PutEventsRequest generates a "aws/request.Request" representing the +// client's request for the PutEvents operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutEvents method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutEventsRequest method. +// req, resp := client.PutEventsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *MobileAnalytics) PutEventsRequest(input *PutEventsInput) (req *request.Request, output *PutEventsOutput) { + op := &request.Operation{ + Name: opPutEvents, + HTTPMethod: "POST", + HTTPPath: "/2014-06-05/events", + } + + if input == nil { + input = &PutEventsInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &PutEventsOutput{} + req.Data = output + return +} + +// The PutEvents operation records one or more events. You can have up to 1,500 +// unique custom events per app, any combination of up to 40 attributes and +// metrics per custom event, and any number of attribute or metric values. +func (c *MobileAnalytics) PutEvents(input *PutEventsInput) (*PutEventsOutput, error) { + req, out := c.PutEventsRequest(input) + err := req.Send() + return out, err +} + +// A JSON object representing a batch of unique event occurrences in your app. +type Event struct { + _ struct{} `type:"structure"` + + // A collection of key-value pairs that give additional context to the event. + // The key-value pairs are specified by the developer. + // + // This collection can be empty or the attribute object can be omitted. + Attributes map[string]*string `locationName:"attributes" type:"map"` + + // A name signifying an event that occurred in your app. This is used for grouping + // and aggregating like events together for reporting purposes. + EventType *string `locationName:"eventType" min:"1" type:"string" required:"true"` + + // A collection of key-value pairs that gives additional, measurable context + // to the event. The key-value pairs are specified by the developer. + // + // This collection can be empty or the attribute object can be omitted. + Metrics map[string]*float64 `locationName:"metrics" type:"map"` + + // The session the event occured within. + Session *Session `locationName:"session" type:"structure"` + + // The time the event occurred in ISO 8601 standard date time format. For example, + // 2014-06-30T19:07:47.885Z + Timestamp *string `locationName:"timestamp" type:"string" required:"true"` + + // The version of the event. + Version *string `locationName:"version" min:"1" type:"string"` +} + +// String returns the string representation +func (s Event) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Event) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Event) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Event"} + if s.EventType == nil { + invalidParams.Add(request.NewErrParamRequired("EventType")) + } + if s.EventType != nil && len(*s.EventType) < 1 { + invalidParams.Add(request.NewErrParamMinLen("EventType", 1)) + } + if s.Timestamp == nil { + invalidParams.Add(request.NewErrParamRequired("Timestamp")) + } + if s.Version != nil && len(*s.Version) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Version", 1)) + } + if s.Session != nil { + if err := s.Session.Validate(); err != nil { + invalidParams.AddNested("Session", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// A container for the data needed for a PutEvent operation +type PutEventsInput struct { + _ struct{} `type:"structure"` + + // The client context including the client ID, app title, app version and package + // name. + ClientContext *string `location:"header" locationName:"x-amz-Client-Context" type:"string" required:"true"` + + // The encoding used for the client context. + ClientContextEncoding *string `location:"header" locationName:"x-amz-Client-Context-Encoding" type:"string"` + + // An array of Event JSON objects + Events []*Event `locationName:"events" type:"list" required:"true"` +} + +// String returns the string representation +func (s PutEventsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutEventsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutEventsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutEventsInput"} + if s.ClientContext == nil { + invalidParams.Add(request.NewErrParamRequired("ClientContext")) + } + if s.Events == nil { + invalidParams.Add(request.NewErrParamRequired("Events")) + } + if s.Events != nil { + for i, v := range s.Events { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Events", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type PutEventsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutEventsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutEventsOutput) GoString() string { + return s.String() +} + +// Describes the session. Session information is required on ALL events. +type Session struct { + _ struct{} `type:"structure"` + + // The duration of the session. + Duration *int64 `locationName:"duration" type:"long"` + + // A unique identifier for the session + Id *string `locationName:"id" min:"1" type:"string"` + + // The time the event started in ISO 8601 standard date time format. For example, + // 2014-06-30T19:07:47.885Z + StartTimestamp *string `locationName:"startTimestamp" type:"string"` + + // The time the event terminated in ISO 8601 standard date time format. For + // example, 2014-06-30T19:07:47.885Z + StopTimestamp *string `locationName:"stopTimestamp" type:"string"` +} + +// String returns the string representation +func (s Session) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Session) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Session) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Session"} + if s.Id != nil && len(*s.Id) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Id", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/mobileanalytics/examples_test.go b/vendor/github.com/aws/aws-sdk-go/service/mobileanalytics/examples_test.go new file mode 100644 index 000000000..38cd3ebb5 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/mobileanalytics/examples_test.go @@ -0,0 +1,58 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package mobileanalytics_test + +import ( + "bytes" + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/mobileanalytics" +) + +var _ time.Duration +var _ bytes.Buffer + +func ExampleMobileAnalytics_PutEvents() { + svc := mobileanalytics.New(session.New()) + + params := &mobileanalytics.PutEventsInput{ + ClientContext: aws.String("String"), // Required + Events: []*mobileanalytics.Event{ // Required + { // Required + EventType: aws.String("String50Chars"), // Required + Timestamp: aws.String("ISO8601Timestamp"), // Required + Attributes: map[string]*string{ + "Key": aws.String("String0to1000Chars"), // Required + // More values... + }, + Metrics: map[string]*float64{ + "Key": aws.Float64(1.0), // Required + // More values... + }, + Session: &mobileanalytics.Session{ + Duration: aws.Int64(1), + Id: aws.String("String50Chars"), + StartTimestamp: aws.String("ISO8601Timestamp"), + StopTimestamp: aws.String("ISO8601Timestamp"), + }, + Version: aws.String("String10Chars"), + }, + // More values... + }, + ClientContextEncoding: aws.String("String"), + } + resp, err := svc.PutEvents(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/mobileanalytics/mobileanalyticsiface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/mobileanalytics/mobileanalyticsiface/interface.go new file mode 100644 index 000000000..524635c56 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/mobileanalytics/mobileanalyticsiface/interface.go @@ -0,0 +1,18 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package mobileanalyticsiface provides an interface for the Amazon Mobile Analytics. +package mobileanalyticsiface + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/mobileanalytics" +) + +// MobileAnalyticsAPI is the interface type for mobileanalytics.MobileAnalytics. +type MobileAnalyticsAPI interface { + PutEventsRequest(*mobileanalytics.PutEventsInput) (*request.Request, *mobileanalytics.PutEventsOutput) + + PutEvents(*mobileanalytics.PutEventsInput) (*mobileanalytics.PutEventsOutput, error) +} + +var _ MobileAnalyticsAPI = (*mobileanalytics.MobileAnalytics)(nil) diff --git a/vendor/github.com/aws/aws-sdk-go/service/mobileanalytics/service.go b/vendor/github.com/aws/aws-sdk-go/service/mobileanalytics/service.go new file mode 100644 index 000000000..a6252f70d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/mobileanalytics/service.go @@ -0,0 +1,87 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package mobileanalytics + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/private/protocol/restjson" +) + +// Amazon Mobile Analytics is a service for collecting, visualizing, and understanding +// app usage data at scale. +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type MobileAnalytics struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "mobileanalytics" + +// New creates a new instance of the MobileAnalytics client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a MobileAnalytics client from just a session. +// svc := mobileanalytics.New(mySession) +// +// // Create a MobileAnalytics client with additional configuration +// svc := mobileanalytics.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *MobileAnalytics { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *MobileAnalytics { + svc := &MobileAnalytics{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-06-05", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(restjson.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restjson.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restjson.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restjson.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a MobileAnalytics operation and runs any +// custom request initialization. +func (c *MobileAnalytics) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/opsworks/api.go b/vendor/github.com/aws/aws-sdk-go/service/opsworks/api.go new file mode 100644 index 000000000..9af0dfd05 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/opsworks/api.go @@ -0,0 +1,10469 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package opsworks provides a client for AWS OpsWorks. +package opsworks + +import ( + "fmt" + + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" +) + +const opAssignInstance = "AssignInstance" + +// AssignInstanceRequest generates a "aws/request.Request" representing the +// client's request for the AssignInstance operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the AssignInstance method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the AssignInstanceRequest method. +// req, resp := client.AssignInstanceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *OpsWorks) AssignInstanceRequest(input *AssignInstanceInput) (req *request.Request, output *AssignInstanceOutput) { + op := &request.Operation{ + Name: opAssignInstance, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AssignInstanceInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &AssignInstanceOutput{} + req.Data = output + return +} + +// Assign a registered instance to a layer. +// +// You can assign registered on-premises instances to any layer type. +// +// You can assign registered Amazon EC2 instances only to custom layers. +// +// You cannot use this action with instances that were created with AWS OpsWorks. +// +// Required Permissions: To use this action, an AWS Identity and Access +// Management (IAM) user must have a Manage permissions level for the stack +// or an attached policy that explicitly grants permissions. For more information +// on user permissions, see Managing User Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) AssignInstance(input *AssignInstanceInput) (*AssignInstanceOutput, error) { + req, out := c.AssignInstanceRequest(input) + err := req.Send() + return out, err +} + +const opAssignVolume = "AssignVolume" + +// AssignVolumeRequest generates a "aws/request.Request" representing the +// client's request for the AssignVolume operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the AssignVolume method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the AssignVolumeRequest method. +// req, resp := client.AssignVolumeRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *OpsWorks) AssignVolumeRequest(input *AssignVolumeInput) (req *request.Request, output *AssignVolumeOutput) { + op := &request.Operation{ + Name: opAssignVolume, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AssignVolumeInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &AssignVolumeOutput{} + req.Data = output + return +} + +// Assigns one of the stack's registered Amazon EBS volumes to a specified instance. +// The volume must first be registered with the stack by calling RegisterVolume. +// After you register the volume, you must call UpdateVolume to specify a mount +// point before calling AssignVolume. For more information, see Resource Management +// (http://docs.aws.amazon.com/opsworks/latest/userguide/resources.html). +// +// Required Permissions: To use this action, an IAM user must have a Manage +// permissions level for the stack, or an attached policy that explicitly grants +// permissions. For more information on user permissions, see Managing User +// Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) AssignVolume(input *AssignVolumeInput) (*AssignVolumeOutput, error) { + req, out := c.AssignVolumeRequest(input) + err := req.Send() + return out, err +} + +const opAssociateElasticIp = "AssociateElasticIp" + +// AssociateElasticIpRequest generates a "aws/request.Request" representing the +// client's request for the AssociateElasticIp operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the AssociateElasticIp method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the AssociateElasticIpRequest method. +// req, resp := client.AssociateElasticIpRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *OpsWorks) AssociateElasticIpRequest(input *AssociateElasticIpInput) (req *request.Request, output *AssociateElasticIpOutput) { + op := &request.Operation{ + Name: opAssociateElasticIp, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AssociateElasticIpInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &AssociateElasticIpOutput{} + req.Data = output + return +} + +// Associates one of the stack's registered Elastic IP addresses with a specified +// instance. The address must first be registered with the stack by calling +// RegisterElasticIp. For more information, see Resource Management (http://docs.aws.amazon.com/opsworks/latest/userguide/resources.html). +// +// Required Permissions: To use this action, an IAM user must have a Manage +// permissions level for the stack, or an attached policy that explicitly grants +// permissions. For more information on user permissions, see Managing User +// Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) AssociateElasticIp(input *AssociateElasticIpInput) (*AssociateElasticIpOutput, error) { + req, out := c.AssociateElasticIpRequest(input) + err := req.Send() + return out, err +} + +const opAttachElasticLoadBalancer = "AttachElasticLoadBalancer" + +// AttachElasticLoadBalancerRequest generates a "aws/request.Request" representing the +// client's request for the AttachElasticLoadBalancer operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the AttachElasticLoadBalancer method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the AttachElasticLoadBalancerRequest method. +// req, resp := client.AttachElasticLoadBalancerRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *OpsWorks) AttachElasticLoadBalancerRequest(input *AttachElasticLoadBalancerInput) (req *request.Request, output *AttachElasticLoadBalancerOutput) { + op := &request.Operation{ + Name: opAttachElasticLoadBalancer, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AttachElasticLoadBalancerInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &AttachElasticLoadBalancerOutput{} + req.Data = output + return +} + +// Attaches an Elastic Load Balancing load balancer to a specified layer. For +// more information, see Elastic Load Balancing (http://docs.aws.amazon.com/opsworks/latest/userguide/load-balancer-elb.html). +// +// You must create the Elastic Load Balancing instance separately, by using +// the Elastic Load Balancing console, API, or CLI. For more information, see +// Elastic Load Balancing Developer Guide (http://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/Welcome.html). +// +// Required Permissions: To use this action, an IAM user must have a Manage +// permissions level for the stack, or an attached policy that explicitly grants +// permissions. For more information on user permissions, see Managing User +// Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) AttachElasticLoadBalancer(input *AttachElasticLoadBalancerInput) (*AttachElasticLoadBalancerOutput, error) { + req, out := c.AttachElasticLoadBalancerRequest(input) + err := req.Send() + return out, err +} + +const opCloneStack = "CloneStack" + +// CloneStackRequest generates a "aws/request.Request" representing the +// client's request for the CloneStack operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CloneStack method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CloneStackRequest method. +// req, resp := client.CloneStackRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *OpsWorks) CloneStackRequest(input *CloneStackInput) (req *request.Request, output *CloneStackOutput) { + op := &request.Operation{ + Name: opCloneStack, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CloneStackInput{} + } + + req = c.newRequest(op, input, output) + output = &CloneStackOutput{} + req.Data = output + return +} + +// Creates a clone of a specified stack. For more information, see Clone a Stack +// (http://docs.aws.amazon.com/opsworks/latest/userguide/workingstacks-cloning.html). +// By default, all parameters are set to the values used by the parent stack. +// +// Required Permissions: To use this action, an IAM user must have an attached +// policy that explicitly grants permissions. For more information on user permissions, +// see Managing User Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) CloneStack(input *CloneStackInput) (*CloneStackOutput, error) { + req, out := c.CloneStackRequest(input) + err := req.Send() + return out, err +} + +const opCreateApp = "CreateApp" + +// CreateAppRequest generates a "aws/request.Request" representing the +// client's request for the CreateApp operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateApp method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateAppRequest method. +// req, resp := client.CreateAppRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *OpsWorks) CreateAppRequest(input *CreateAppInput) (req *request.Request, output *CreateAppOutput) { + op := &request.Operation{ + Name: opCreateApp, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateAppInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateAppOutput{} + req.Data = output + return +} + +// Creates an app for a specified stack. For more information, see Creating +// Apps (http://docs.aws.amazon.com/opsworks/latest/userguide/workingapps-creating.html). +// +// Required Permissions: To use this action, an IAM user must have a Manage +// permissions level for the stack, or an attached policy that explicitly grants +// permissions. For more information on user permissions, see Managing User +// Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) CreateApp(input *CreateAppInput) (*CreateAppOutput, error) { + req, out := c.CreateAppRequest(input) + err := req.Send() + return out, err +} + +const opCreateDeployment = "CreateDeployment" + +// CreateDeploymentRequest generates a "aws/request.Request" representing the +// client's request for the CreateDeployment operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateDeployment method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateDeploymentRequest method. +// req, resp := client.CreateDeploymentRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *OpsWorks) CreateDeploymentRequest(input *CreateDeploymentInput) (req *request.Request, output *CreateDeploymentOutput) { + op := &request.Operation{ + Name: opCreateDeployment, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateDeploymentInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateDeploymentOutput{} + req.Data = output + return +} + +// Runs deployment or stack commands. For more information, see Deploying Apps +// (http://docs.aws.amazon.com/opsworks/latest/userguide/workingapps-deploying.html) +// and Run Stack Commands (http://docs.aws.amazon.com/opsworks/latest/userguide/workingstacks-commands.html). +// +// Required Permissions: To use this action, an IAM user must have a Deploy +// or Manage permissions level for the stack, or an attached policy that explicitly +// grants permissions. For more information on user permissions, see Managing +// User Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) CreateDeployment(input *CreateDeploymentInput) (*CreateDeploymentOutput, error) { + req, out := c.CreateDeploymentRequest(input) + err := req.Send() + return out, err +} + +const opCreateInstance = "CreateInstance" + +// CreateInstanceRequest generates a "aws/request.Request" representing the +// client's request for the CreateInstance operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateInstance method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateInstanceRequest method. +// req, resp := client.CreateInstanceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *OpsWorks) CreateInstanceRequest(input *CreateInstanceInput) (req *request.Request, output *CreateInstanceOutput) { + op := &request.Operation{ + Name: opCreateInstance, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateInstanceInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateInstanceOutput{} + req.Data = output + return +} + +// Creates an instance in a specified stack. For more information, see Adding +// an Instance to a Layer (http://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-add.html). +// +// Required Permissions: To use this action, an IAM user must have a Manage +// permissions level for the stack, or an attached policy that explicitly grants +// permissions. For more information on user permissions, see Managing User +// Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) CreateInstance(input *CreateInstanceInput) (*CreateInstanceOutput, error) { + req, out := c.CreateInstanceRequest(input) + err := req.Send() + return out, err +} + +const opCreateLayer = "CreateLayer" + +// CreateLayerRequest generates a "aws/request.Request" representing the +// client's request for the CreateLayer operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateLayer method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateLayerRequest method. +// req, resp := client.CreateLayerRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *OpsWorks) CreateLayerRequest(input *CreateLayerInput) (req *request.Request, output *CreateLayerOutput) { + op := &request.Operation{ + Name: opCreateLayer, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateLayerInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateLayerOutput{} + req.Data = output + return +} + +// Creates a layer. For more information, see How to Create a Layer (http://docs.aws.amazon.com/opsworks/latest/userguide/workinglayers-basics-create.html). +// +// You should use CreateLayer for noncustom layer types such as PHP App Server +// only if the stack does not have an existing layer of that type. A stack can +// have at most one instance of each noncustom layer; if you attempt to create +// a second instance, CreateLayer fails. A stack can have an arbitrary number +// of custom layers, so you can call CreateLayer as many times as you like for +// that layer type. +// +// Required Permissions: To use this action, an IAM user must have a Manage +// permissions level for the stack, or an attached policy that explicitly grants +// permissions. For more information on user permissions, see Managing User +// Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) CreateLayer(input *CreateLayerInput) (*CreateLayerOutput, error) { + req, out := c.CreateLayerRequest(input) + err := req.Send() + return out, err +} + +const opCreateStack = "CreateStack" + +// CreateStackRequest generates a "aws/request.Request" representing the +// client's request for the CreateStack operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateStack method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateStackRequest method. +// req, resp := client.CreateStackRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *OpsWorks) CreateStackRequest(input *CreateStackInput) (req *request.Request, output *CreateStackOutput) { + op := &request.Operation{ + Name: opCreateStack, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateStackInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateStackOutput{} + req.Data = output + return +} + +// Creates a new stack. For more information, see Create a New Stack (http://docs.aws.amazon.com/opsworks/latest/userguide/workingstacks-edit.html). +// +// Required Permissions: To use this action, an IAM user must have an attached +// policy that explicitly grants permissions. For more information on user permissions, +// see Managing User Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) CreateStack(input *CreateStackInput) (*CreateStackOutput, error) { + req, out := c.CreateStackRequest(input) + err := req.Send() + return out, err +} + +const opCreateUserProfile = "CreateUserProfile" + +// CreateUserProfileRequest generates a "aws/request.Request" representing the +// client's request for the CreateUserProfile operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateUserProfile method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateUserProfileRequest method. +// req, resp := client.CreateUserProfileRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *OpsWorks) CreateUserProfileRequest(input *CreateUserProfileInput) (req *request.Request, output *CreateUserProfileOutput) { + op := &request.Operation{ + Name: opCreateUserProfile, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateUserProfileInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateUserProfileOutput{} + req.Data = output + return +} + +// Creates a new user profile. +// +// Required Permissions: To use this action, an IAM user must have an attached +// policy that explicitly grants permissions. For more information on user permissions, +// see Managing User Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) CreateUserProfile(input *CreateUserProfileInput) (*CreateUserProfileOutput, error) { + req, out := c.CreateUserProfileRequest(input) + err := req.Send() + return out, err +} + +const opDeleteApp = "DeleteApp" + +// DeleteAppRequest generates a "aws/request.Request" representing the +// client's request for the DeleteApp operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteApp method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteAppRequest method. +// req, resp := client.DeleteAppRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *OpsWorks) DeleteAppRequest(input *DeleteAppInput) (req *request.Request, output *DeleteAppOutput) { + op := &request.Operation{ + Name: opDeleteApp, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteAppInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteAppOutput{} + req.Data = output + return +} + +// Deletes a specified app. +// +// Required Permissions: To use this action, an IAM user must have a Manage +// permissions level for the stack, or an attached policy that explicitly grants +// permissions. For more information on user permissions, see Managing User +// Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) DeleteApp(input *DeleteAppInput) (*DeleteAppOutput, error) { + req, out := c.DeleteAppRequest(input) + err := req.Send() + return out, err +} + +const opDeleteInstance = "DeleteInstance" + +// DeleteInstanceRequest generates a "aws/request.Request" representing the +// client's request for the DeleteInstance operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteInstance method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteInstanceRequest method. +// req, resp := client.DeleteInstanceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *OpsWorks) DeleteInstanceRequest(input *DeleteInstanceInput) (req *request.Request, output *DeleteInstanceOutput) { + op := &request.Operation{ + Name: opDeleteInstance, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteInstanceInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteInstanceOutput{} + req.Data = output + return +} + +// Deletes a specified instance, which terminates the associated Amazon EC2 +// instance. You must stop an instance before you can delete it. +// +// For more information, see Deleting Instances (http://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-delete.html). +// +// Required Permissions: To use this action, an IAM user must have a Manage +// permissions level for the stack, or an attached policy that explicitly grants +// permissions. For more information on user permissions, see Managing User +// Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) DeleteInstance(input *DeleteInstanceInput) (*DeleteInstanceOutput, error) { + req, out := c.DeleteInstanceRequest(input) + err := req.Send() + return out, err +} + +const opDeleteLayer = "DeleteLayer" + +// DeleteLayerRequest generates a "aws/request.Request" representing the +// client's request for the DeleteLayer operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteLayer method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteLayerRequest method. +// req, resp := client.DeleteLayerRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *OpsWorks) DeleteLayerRequest(input *DeleteLayerInput) (req *request.Request, output *DeleteLayerOutput) { + op := &request.Operation{ + Name: opDeleteLayer, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteLayerInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteLayerOutput{} + req.Data = output + return +} + +// Deletes a specified layer. You must first stop and then delete all associated +// instances or unassign registered instances. For more information, see How +// to Delete a Layer (http://docs.aws.amazon.com/opsworks/latest/userguide/workinglayers-basics-delete.html). +// +// Required Permissions: To use this action, an IAM user must have a Manage +// permissions level for the stack, or an attached policy that explicitly grants +// permissions. For more information on user permissions, see Managing User +// Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) DeleteLayer(input *DeleteLayerInput) (*DeleteLayerOutput, error) { + req, out := c.DeleteLayerRequest(input) + err := req.Send() + return out, err +} + +const opDeleteStack = "DeleteStack" + +// DeleteStackRequest generates a "aws/request.Request" representing the +// client's request for the DeleteStack operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteStack method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteStackRequest method. +// req, resp := client.DeleteStackRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *OpsWorks) DeleteStackRequest(input *DeleteStackInput) (req *request.Request, output *DeleteStackOutput) { + op := &request.Operation{ + Name: opDeleteStack, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteStackInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteStackOutput{} + req.Data = output + return +} + +// Deletes a specified stack. You must first delete all instances, layers, and +// apps or deregister registered instances. For more information, see Shut Down +// a Stack (http://docs.aws.amazon.com/opsworks/latest/userguide/workingstacks-shutting.html). +// +// Required Permissions: To use this action, an IAM user must have a Manage +// permissions level for the stack, or an attached policy that explicitly grants +// permissions. For more information on user permissions, see Managing User +// Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) DeleteStack(input *DeleteStackInput) (*DeleteStackOutput, error) { + req, out := c.DeleteStackRequest(input) + err := req.Send() + return out, err +} + +const opDeleteUserProfile = "DeleteUserProfile" + +// DeleteUserProfileRequest generates a "aws/request.Request" representing the +// client's request for the DeleteUserProfile operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteUserProfile method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteUserProfileRequest method. +// req, resp := client.DeleteUserProfileRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *OpsWorks) DeleteUserProfileRequest(input *DeleteUserProfileInput) (req *request.Request, output *DeleteUserProfileOutput) { + op := &request.Operation{ + Name: opDeleteUserProfile, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteUserProfileInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteUserProfileOutput{} + req.Data = output + return +} + +// Deletes a user profile. +// +// Required Permissions: To use this action, an IAM user must have an attached +// policy that explicitly grants permissions. For more information on user permissions, +// see Managing User Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) DeleteUserProfile(input *DeleteUserProfileInput) (*DeleteUserProfileOutput, error) { + req, out := c.DeleteUserProfileRequest(input) + err := req.Send() + return out, err +} + +const opDeregisterEcsCluster = "DeregisterEcsCluster" + +// DeregisterEcsClusterRequest generates a "aws/request.Request" representing the +// client's request for the DeregisterEcsCluster operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeregisterEcsCluster method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeregisterEcsClusterRequest method. +// req, resp := client.DeregisterEcsClusterRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *OpsWorks) DeregisterEcsClusterRequest(input *DeregisterEcsClusterInput) (req *request.Request, output *DeregisterEcsClusterOutput) { + op := &request.Operation{ + Name: opDeregisterEcsCluster, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeregisterEcsClusterInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeregisterEcsClusterOutput{} + req.Data = output + return +} + +// Deregisters a specified Amazon ECS cluster from a stack. For more information, +// see Resource Management (http://docs.aws.amazon.com/opsworks/latest/userguide/workinglayers-ecscluster.html#workinglayers-ecscluster-delete). +// +// Required Permissions: To use this action, an IAM user must have a Manage +// permissions level for the stack or an attached policy that explicitly grants +// permissions. For more information on user permissions, see http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html +// (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) DeregisterEcsCluster(input *DeregisterEcsClusterInput) (*DeregisterEcsClusterOutput, error) { + req, out := c.DeregisterEcsClusterRequest(input) + err := req.Send() + return out, err +} + +const opDeregisterElasticIp = "DeregisterElasticIp" + +// DeregisterElasticIpRequest generates a "aws/request.Request" representing the +// client's request for the DeregisterElasticIp operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeregisterElasticIp method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeregisterElasticIpRequest method. +// req, resp := client.DeregisterElasticIpRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *OpsWorks) DeregisterElasticIpRequest(input *DeregisterElasticIpInput) (req *request.Request, output *DeregisterElasticIpOutput) { + op := &request.Operation{ + Name: opDeregisterElasticIp, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeregisterElasticIpInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeregisterElasticIpOutput{} + req.Data = output + return +} + +// Deregisters a specified Elastic IP address. The address can then be registered +// by another stack. For more information, see Resource Management (http://docs.aws.amazon.com/opsworks/latest/userguide/resources.html). +// +// Required Permissions: To use this action, an IAM user must have a Manage +// permissions level for the stack, or an attached policy that explicitly grants +// permissions. For more information on user permissions, see Managing User +// Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) DeregisterElasticIp(input *DeregisterElasticIpInput) (*DeregisterElasticIpOutput, error) { + req, out := c.DeregisterElasticIpRequest(input) + err := req.Send() + return out, err +} + +const opDeregisterInstance = "DeregisterInstance" + +// DeregisterInstanceRequest generates a "aws/request.Request" representing the +// client's request for the DeregisterInstance operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeregisterInstance method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeregisterInstanceRequest method. +// req, resp := client.DeregisterInstanceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *OpsWorks) DeregisterInstanceRequest(input *DeregisterInstanceInput) (req *request.Request, output *DeregisterInstanceOutput) { + op := &request.Operation{ + Name: opDeregisterInstance, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeregisterInstanceInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeregisterInstanceOutput{} + req.Data = output + return +} + +// Deregister a registered Amazon EC2 or on-premises instance. This action removes +// the instance from the stack and returns it to your control. This action can +// not be used with instances that were created with AWS OpsWorks. +// +// Required Permissions: To use this action, an IAM user must have a Manage +// permissions level for the stack or an attached policy that explicitly grants +// permissions. For more information on user permissions, see Managing User +// Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) DeregisterInstance(input *DeregisterInstanceInput) (*DeregisterInstanceOutput, error) { + req, out := c.DeregisterInstanceRequest(input) + err := req.Send() + return out, err +} + +const opDeregisterRdsDbInstance = "DeregisterRdsDbInstance" + +// DeregisterRdsDbInstanceRequest generates a "aws/request.Request" representing the +// client's request for the DeregisterRdsDbInstance operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeregisterRdsDbInstance method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeregisterRdsDbInstanceRequest method. +// req, resp := client.DeregisterRdsDbInstanceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *OpsWorks) DeregisterRdsDbInstanceRequest(input *DeregisterRdsDbInstanceInput) (req *request.Request, output *DeregisterRdsDbInstanceOutput) { + op := &request.Operation{ + Name: opDeregisterRdsDbInstance, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeregisterRdsDbInstanceInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeregisterRdsDbInstanceOutput{} + req.Data = output + return +} + +// Deregisters an Amazon RDS instance. +// +// Required Permissions: To use this action, an IAM user must have a Manage +// permissions level for the stack, or an attached policy that explicitly grants +// permissions. For more information on user permissions, see Managing User +// Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) DeregisterRdsDbInstance(input *DeregisterRdsDbInstanceInput) (*DeregisterRdsDbInstanceOutput, error) { + req, out := c.DeregisterRdsDbInstanceRequest(input) + err := req.Send() + return out, err +} + +const opDeregisterVolume = "DeregisterVolume" + +// DeregisterVolumeRequest generates a "aws/request.Request" representing the +// client's request for the DeregisterVolume operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeregisterVolume method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeregisterVolumeRequest method. +// req, resp := client.DeregisterVolumeRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *OpsWorks) DeregisterVolumeRequest(input *DeregisterVolumeInput) (req *request.Request, output *DeregisterVolumeOutput) { + op := &request.Operation{ + Name: opDeregisterVolume, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeregisterVolumeInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeregisterVolumeOutput{} + req.Data = output + return +} + +// Deregisters an Amazon EBS volume. The volume can then be registered by another +// stack. For more information, see Resource Management (http://docs.aws.amazon.com/opsworks/latest/userguide/resources.html). +// +// Required Permissions: To use this action, an IAM user must have a Manage +// permissions level for the stack, or an attached policy that explicitly grants +// permissions. For more information on user permissions, see Managing User +// Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) DeregisterVolume(input *DeregisterVolumeInput) (*DeregisterVolumeOutput, error) { + req, out := c.DeregisterVolumeRequest(input) + err := req.Send() + return out, err +} + +const opDescribeAgentVersions = "DescribeAgentVersions" + +// DescribeAgentVersionsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeAgentVersions operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeAgentVersions method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeAgentVersionsRequest method. +// req, resp := client.DescribeAgentVersionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *OpsWorks) DescribeAgentVersionsRequest(input *DescribeAgentVersionsInput) (req *request.Request, output *DescribeAgentVersionsOutput) { + op := &request.Operation{ + Name: opDescribeAgentVersions, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeAgentVersionsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeAgentVersionsOutput{} + req.Data = output + return +} + +// Describes the available AWS OpsWorks agent versions. You must specify a stack +// ID or a configuration manager. DescribeAgentVersions returns a list of available +// agent versions for the specified stack or configuration manager. +func (c *OpsWorks) DescribeAgentVersions(input *DescribeAgentVersionsInput) (*DescribeAgentVersionsOutput, error) { + req, out := c.DescribeAgentVersionsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeApps = "DescribeApps" + +// DescribeAppsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeApps operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeApps method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeAppsRequest method. +// req, resp := client.DescribeAppsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *OpsWorks) DescribeAppsRequest(input *DescribeAppsInput) (req *request.Request, output *DescribeAppsOutput) { + op := &request.Operation{ + Name: opDescribeApps, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeAppsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeAppsOutput{} + req.Data = output + return +} + +// Requests a description of a specified set of apps. +// +// You must specify at least one of the parameters. +// +// Required Permissions: To use this action, an IAM user must have a Show, +// Deploy, or Manage permissions level for the stack, or an attached policy +// that explicitly grants permissions. For more information on user permissions, +// see Managing User Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) DescribeApps(input *DescribeAppsInput) (*DescribeAppsOutput, error) { + req, out := c.DescribeAppsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeCommands = "DescribeCommands" + +// DescribeCommandsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeCommands operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeCommands method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeCommandsRequest method. +// req, resp := client.DescribeCommandsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *OpsWorks) DescribeCommandsRequest(input *DescribeCommandsInput) (req *request.Request, output *DescribeCommandsOutput) { + op := &request.Operation{ + Name: opDescribeCommands, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeCommandsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeCommandsOutput{} + req.Data = output + return +} + +// Describes the results of specified commands. +// +// You must specify at least one of the parameters. +// +// Required Permissions: To use this action, an IAM user must have a Show, +// Deploy, or Manage permissions level for the stack, or an attached policy +// that explicitly grants permissions. For more information on user permissions, +// see Managing User Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) DescribeCommands(input *DescribeCommandsInput) (*DescribeCommandsOutput, error) { + req, out := c.DescribeCommandsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeDeployments = "DescribeDeployments" + +// DescribeDeploymentsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeDeployments operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeDeployments method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeDeploymentsRequest method. +// req, resp := client.DescribeDeploymentsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *OpsWorks) DescribeDeploymentsRequest(input *DescribeDeploymentsInput) (req *request.Request, output *DescribeDeploymentsOutput) { + op := &request.Operation{ + Name: opDescribeDeployments, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeDeploymentsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeDeploymentsOutput{} + req.Data = output + return +} + +// Requests a description of a specified set of deployments. +// +// You must specify at least one of the parameters. +// +// Required Permissions: To use this action, an IAM user must have a Show, +// Deploy, or Manage permissions level for the stack, or an attached policy +// that explicitly grants permissions. For more information on user permissions, +// see Managing User Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) DescribeDeployments(input *DescribeDeploymentsInput) (*DescribeDeploymentsOutput, error) { + req, out := c.DescribeDeploymentsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeEcsClusters = "DescribeEcsClusters" + +// DescribeEcsClustersRequest generates a "aws/request.Request" representing the +// client's request for the DescribeEcsClusters operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeEcsClusters method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeEcsClustersRequest method. +// req, resp := client.DescribeEcsClustersRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *OpsWorks) DescribeEcsClustersRequest(input *DescribeEcsClustersInput) (req *request.Request, output *DescribeEcsClustersOutput) { + op := &request.Operation{ + Name: opDescribeEcsClusters, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeEcsClustersInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeEcsClustersOutput{} + req.Data = output + return +} + +// Describes Amazon ECS clusters that are registered with a stack. If you specify +// only a stack ID, you can use the MaxResults and NextToken parameters to paginate +// the response. However, AWS OpsWorks currently supports only one cluster per +// layer, so the result set has a maximum of one element. +// +// Required Permissions: To use this action, an IAM user must have a Show, +// Deploy, or Manage permissions level for the stack or an attached policy that +// explicitly grants permission. For more information on user permissions, see +// Managing User Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) DescribeEcsClusters(input *DescribeEcsClustersInput) (*DescribeEcsClustersOutput, error) { + req, out := c.DescribeEcsClustersRequest(input) + err := req.Send() + return out, err +} + +// DescribeEcsClustersPages iterates over the pages of a DescribeEcsClusters operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeEcsClusters method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeEcsClusters operation. +// pageNum := 0 +// err := client.DescribeEcsClustersPages(params, +// func(page *DescribeEcsClustersOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *OpsWorks) DescribeEcsClustersPages(input *DescribeEcsClustersInput, fn func(p *DescribeEcsClustersOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeEcsClustersRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeEcsClustersOutput), lastPage) + }) +} + +const opDescribeElasticIps = "DescribeElasticIps" + +// DescribeElasticIpsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeElasticIps operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeElasticIps method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeElasticIpsRequest method. +// req, resp := client.DescribeElasticIpsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *OpsWorks) DescribeElasticIpsRequest(input *DescribeElasticIpsInput) (req *request.Request, output *DescribeElasticIpsOutput) { + op := &request.Operation{ + Name: opDescribeElasticIps, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeElasticIpsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeElasticIpsOutput{} + req.Data = output + return +} + +// Describes Elastic IP addresses (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/elastic-ip-addresses-eip.html). +// +// You must specify at least one of the parameters. +// +// Required Permissions: To use this action, an IAM user must have a Show, +// Deploy, or Manage permissions level for the stack, or an attached policy +// that explicitly grants permissions. For more information on user permissions, +// see Managing User Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) DescribeElasticIps(input *DescribeElasticIpsInput) (*DescribeElasticIpsOutput, error) { + req, out := c.DescribeElasticIpsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeElasticLoadBalancers = "DescribeElasticLoadBalancers" + +// DescribeElasticLoadBalancersRequest generates a "aws/request.Request" representing the +// client's request for the DescribeElasticLoadBalancers operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeElasticLoadBalancers method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeElasticLoadBalancersRequest method. +// req, resp := client.DescribeElasticLoadBalancersRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *OpsWorks) DescribeElasticLoadBalancersRequest(input *DescribeElasticLoadBalancersInput) (req *request.Request, output *DescribeElasticLoadBalancersOutput) { + op := &request.Operation{ + Name: opDescribeElasticLoadBalancers, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeElasticLoadBalancersInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeElasticLoadBalancersOutput{} + req.Data = output + return +} + +// Describes a stack's Elastic Load Balancing instances. +// +// You must specify at least one of the parameters. +// +// Required Permissions: To use this action, an IAM user must have a Show, +// Deploy, or Manage permissions level for the stack, or an attached policy +// that explicitly grants permissions. For more information on user permissions, +// see Managing User Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) DescribeElasticLoadBalancers(input *DescribeElasticLoadBalancersInput) (*DescribeElasticLoadBalancersOutput, error) { + req, out := c.DescribeElasticLoadBalancersRequest(input) + err := req.Send() + return out, err +} + +const opDescribeInstances = "DescribeInstances" + +// DescribeInstancesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeInstances operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeInstances method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeInstancesRequest method. +// req, resp := client.DescribeInstancesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *OpsWorks) DescribeInstancesRequest(input *DescribeInstancesInput) (req *request.Request, output *DescribeInstancesOutput) { + op := &request.Operation{ + Name: opDescribeInstances, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeInstancesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeInstancesOutput{} + req.Data = output + return +} + +// Requests a description of a set of instances. +// +// You must specify at least one of the parameters. +// +// Required Permissions: To use this action, an IAM user must have a Show, +// Deploy, or Manage permissions level for the stack, or an attached policy +// that explicitly grants permissions. For more information on user permissions, +// see Managing User Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) DescribeInstances(input *DescribeInstancesInput) (*DescribeInstancesOutput, error) { + req, out := c.DescribeInstancesRequest(input) + err := req.Send() + return out, err +} + +const opDescribeLayers = "DescribeLayers" + +// DescribeLayersRequest generates a "aws/request.Request" representing the +// client's request for the DescribeLayers operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeLayers method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeLayersRequest method. +// req, resp := client.DescribeLayersRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *OpsWorks) DescribeLayersRequest(input *DescribeLayersInput) (req *request.Request, output *DescribeLayersOutput) { + op := &request.Operation{ + Name: opDescribeLayers, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeLayersInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeLayersOutput{} + req.Data = output + return +} + +// Requests a description of one or more layers in a specified stack. +// +// You must specify at least one of the parameters. +// +// Required Permissions: To use this action, an IAM user must have a Show, +// Deploy, or Manage permissions level for the stack, or an attached policy +// that explicitly grants permissions. For more information on user permissions, +// see Managing User Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) DescribeLayers(input *DescribeLayersInput) (*DescribeLayersOutput, error) { + req, out := c.DescribeLayersRequest(input) + err := req.Send() + return out, err +} + +const opDescribeLoadBasedAutoScaling = "DescribeLoadBasedAutoScaling" + +// DescribeLoadBasedAutoScalingRequest generates a "aws/request.Request" representing the +// client's request for the DescribeLoadBasedAutoScaling operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeLoadBasedAutoScaling method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeLoadBasedAutoScalingRequest method. +// req, resp := client.DescribeLoadBasedAutoScalingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *OpsWorks) DescribeLoadBasedAutoScalingRequest(input *DescribeLoadBasedAutoScalingInput) (req *request.Request, output *DescribeLoadBasedAutoScalingOutput) { + op := &request.Operation{ + Name: opDescribeLoadBasedAutoScaling, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeLoadBasedAutoScalingInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeLoadBasedAutoScalingOutput{} + req.Data = output + return +} + +// Describes load-based auto scaling configurations for specified layers. +// +// You must specify at least one of the parameters. +// +// Required Permissions: To use this action, an IAM user must have a Show, +// Deploy, or Manage permissions level for the stack, or an attached policy +// that explicitly grants permissions. For more information on user permissions, +// see Managing User Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) DescribeLoadBasedAutoScaling(input *DescribeLoadBasedAutoScalingInput) (*DescribeLoadBasedAutoScalingOutput, error) { + req, out := c.DescribeLoadBasedAutoScalingRequest(input) + err := req.Send() + return out, err +} + +const opDescribeMyUserProfile = "DescribeMyUserProfile" + +// DescribeMyUserProfileRequest generates a "aws/request.Request" representing the +// client's request for the DescribeMyUserProfile operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeMyUserProfile method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeMyUserProfileRequest method. +// req, resp := client.DescribeMyUserProfileRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *OpsWorks) DescribeMyUserProfileRequest(input *DescribeMyUserProfileInput) (req *request.Request, output *DescribeMyUserProfileOutput) { + op := &request.Operation{ + Name: opDescribeMyUserProfile, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeMyUserProfileInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeMyUserProfileOutput{} + req.Data = output + return +} + +// Describes a user's SSH information. +// +// Required Permissions: To use this action, an IAM user must have self-management +// enabled or an attached policy that explicitly grants permissions. For more +// information on user permissions, see Managing User Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) DescribeMyUserProfile(input *DescribeMyUserProfileInput) (*DescribeMyUserProfileOutput, error) { + req, out := c.DescribeMyUserProfileRequest(input) + err := req.Send() + return out, err +} + +const opDescribePermissions = "DescribePermissions" + +// DescribePermissionsRequest generates a "aws/request.Request" representing the +// client's request for the DescribePermissions operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribePermissions method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribePermissionsRequest method. +// req, resp := client.DescribePermissionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *OpsWorks) DescribePermissionsRequest(input *DescribePermissionsInput) (req *request.Request, output *DescribePermissionsOutput) { + op := &request.Operation{ + Name: opDescribePermissions, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribePermissionsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribePermissionsOutput{} + req.Data = output + return +} + +// Describes the permissions for a specified stack. +// +// Required Permissions: To use this action, an IAM user must have a Manage +// permissions level for the stack, or an attached policy that explicitly grants +// permissions. For more information on user permissions, see Managing User +// Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) DescribePermissions(input *DescribePermissionsInput) (*DescribePermissionsOutput, error) { + req, out := c.DescribePermissionsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeRaidArrays = "DescribeRaidArrays" + +// DescribeRaidArraysRequest generates a "aws/request.Request" representing the +// client's request for the DescribeRaidArrays operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeRaidArrays method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeRaidArraysRequest method. +// req, resp := client.DescribeRaidArraysRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *OpsWorks) DescribeRaidArraysRequest(input *DescribeRaidArraysInput) (req *request.Request, output *DescribeRaidArraysOutput) { + op := &request.Operation{ + Name: opDescribeRaidArrays, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeRaidArraysInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeRaidArraysOutput{} + req.Data = output + return +} + +// Describe an instance's RAID arrays. +// +// You must specify at least one of the parameters. +// +// Required Permissions: To use this action, an IAM user must have a Show, +// Deploy, or Manage permissions level for the stack, or an attached policy +// that explicitly grants permissions. For more information on user permissions, +// see Managing User Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) DescribeRaidArrays(input *DescribeRaidArraysInput) (*DescribeRaidArraysOutput, error) { + req, out := c.DescribeRaidArraysRequest(input) + err := req.Send() + return out, err +} + +const opDescribeRdsDbInstances = "DescribeRdsDbInstances" + +// DescribeRdsDbInstancesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeRdsDbInstances operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeRdsDbInstances method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeRdsDbInstancesRequest method. +// req, resp := client.DescribeRdsDbInstancesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *OpsWorks) DescribeRdsDbInstancesRequest(input *DescribeRdsDbInstancesInput) (req *request.Request, output *DescribeRdsDbInstancesOutput) { + op := &request.Operation{ + Name: opDescribeRdsDbInstances, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeRdsDbInstancesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeRdsDbInstancesOutput{} + req.Data = output + return +} + +// Describes Amazon RDS instances. +// +// Required Permissions: To use this action, an IAM user must have a Show, +// Deploy, or Manage permissions level for the stack, or an attached policy +// that explicitly grants permissions. For more information on user permissions, +// see Managing User Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) DescribeRdsDbInstances(input *DescribeRdsDbInstancesInput) (*DescribeRdsDbInstancesOutput, error) { + req, out := c.DescribeRdsDbInstancesRequest(input) + err := req.Send() + return out, err +} + +const opDescribeServiceErrors = "DescribeServiceErrors" + +// DescribeServiceErrorsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeServiceErrors operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeServiceErrors method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeServiceErrorsRequest method. +// req, resp := client.DescribeServiceErrorsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *OpsWorks) DescribeServiceErrorsRequest(input *DescribeServiceErrorsInput) (req *request.Request, output *DescribeServiceErrorsOutput) { + op := &request.Operation{ + Name: opDescribeServiceErrors, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeServiceErrorsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeServiceErrorsOutput{} + req.Data = output + return +} + +// Describes AWS OpsWorks service errors. +// +// Required Permissions: To use this action, an IAM user must have a Show, +// Deploy, or Manage permissions level for the stack, or an attached policy +// that explicitly grants permissions. For more information on user permissions, +// see Managing User Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) DescribeServiceErrors(input *DescribeServiceErrorsInput) (*DescribeServiceErrorsOutput, error) { + req, out := c.DescribeServiceErrorsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeStackProvisioningParameters = "DescribeStackProvisioningParameters" + +// DescribeStackProvisioningParametersRequest generates a "aws/request.Request" representing the +// client's request for the DescribeStackProvisioningParameters operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeStackProvisioningParameters method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeStackProvisioningParametersRequest method. +// req, resp := client.DescribeStackProvisioningParametersRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *OpsWorks) DescribeStackProvisioningParametersRequest(input *DescribeStackProvisioningParametersInput) (req *request.Request, output *DescribeStackProvisioningParametersOutput) { + op := &request.Operation{ + Name: opDescribeStackProvisioningParameters, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeStackProvisioningParametersInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeStackProvisioningParametersOutput{} + req.Data = output + return +} + +// Requests a description of a stack's provisioning parameters. +// +// Required Permissions: To use this action, an IAM user must have a Show, +// Deploy, or Manage permissions level for the stack or an attached policy that +// explicitly grants permissions. For more information on user permissions, +// see Managing User Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) DescribeStackProvisioningParameters(input *DescribeStackProvisioningParametersInput) (*DescribeStackProvisioningParametersOutput, error) { + req, out := c.DescribeStackProvisioningParametersRequest(input) + err := req.Send() + return out, err +} + +const opDescribeStackSummary = "DescribeStackSummary" + +// DescribeStackSummaryRequest generates a "aws/request.Request" representing the +// client's request for the DescribeStackSummary operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeStackSummary method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeStackSummaryRequest method. +// req, resp := client.DescribeStackSummaryRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *OpsWorks) DescribeStackSummaryRequest(input *DescribeStackSummaryInput) (req *request.Request, output *DescribeStackSummaryOutput) { + op := &request.Operation{ + Name: opDescribeStackSummary, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeStackSummaryInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeStackSummaryOutput{} + req.Data = output + return +} + +// Describes the number of layers and apps in a specified stack, and the number +// of instances in each state, such as running_setup or online. +// +// Required Permissions: To use this action, an IAM user must have a Show, +// Deploy, or Manage permissions level for the stack, or an attached policy +// that explicitly grants permissions. For more information on user permissions, +// see Managing User Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) DescribeStackSummary(input *DescribeStackSummaryInput) (*DescribeStackSummaryOutput, error) { + req, out := c.DescribeStackSummaryRequest(input) + err := req.Send() + return out, err +} + +const opDescribeStacks = "DescribeStacks" + +// DescribeStacksRequest generates a "aws/request.Request" representing the +// client's request for the DescribeStacks operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeStacks method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeStacksRequest method. +// req, resp := client.DescribeStacksRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *OpsWorks) DescribeStacksRequest(input *DescribeStacksInput) (req *request.Request, output *DescribeStacksOutput) { + op := &request.Operation{ + Name: opDescribeStacks, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeStacksInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeStacksOutput{} + req.Data = output + return +} + +// Requests a description of one or more stacks. +// +// Required Permissions: To use this action, an IAM user must have a Show, +// Deploy, or Manage permissions level for the stack, or an attached policy +// that explicitly grants permissions. For more information on user permissions, +// see Managing User Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) DescribeStacks(input *DescribeStacksInput) (*DescribeStacksOutput, error) { + req, out := c.DescribeStacksRequest(input) + err := req.Send() + return out, err +} + +const opDescribeTimeBasedAutoScaling = "DescribeTimeBasedAutoScaling" + +// DescribeTimeBasedAutoScalingRequest generates a "aws/request.Request" representing the +// client's request for the DescribeTimeBasedAutoScaling operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeTimeBasedAutoScaling method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeTimeBasedAutoScalingRequest method. +// req, resp := client.DescribeTimeBasedAutoScalingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *OpsWorks) DescribeTimeBasedAutoScalingRequest(input *DescribeTimeBasedAutoScalingInput) (req *request.Request, output *DescribeTimeBasedAutoScalingOutput) { + op := &request.Operation{ + Name: opDescribeTimeBasedAutoScaling, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeTimeBasedAutoScalingInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeTimeBasedAutoScalingOutput{} + req.Data = output + return +} + +// Describes time-based auto scaling configurations for specified instances. +// +// You must specify at least one of the parameters. +// +// Required Permissions: To use this action, an IAM user must have a Show, +// Deploy, or Manage permissions level for the stack, or an attached policy +// that explicitly grants permissions. For more information on user permissions, +// see Managing User Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) DescribeTimeBasedAutoScaling(input *DescribeTimeBasedAutoScalingInput) (*DescribeTimeBasedAutoScalingOutput, error) { + req, out := c.DescribeTimeBasedAutoScalingRequest(input) + err := req.Send() + return out, err +} + +const opDescribeUserProfiles = "DescribeUserProfiles" + +// DescribeUserProfilesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeUserProfiles operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeUserProfiles method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeUserProfilesRequest method. +// req, resp := client.DescribeUserProfilesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *OpsWorks) DescribeUserProfilesRequest(input *DescribeUserProfilesInput) (req *request.Request, output *DescribeUserProfilesOutput) { + op := &request.Operation{ + Name: opDescribeUserProfiles, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeUserProfilesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeUserProfilesOutput{} + req.Data = output + return +} + +// Describe specified users. +// +// Required Permissions: To use this action, an IAM user must have an attached +// policy that explicitly grants permissions. For more information on user permissions, +// see Managing User Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) DescribeUserProfiles(input *DescribeUserProfilesInput) (*DescribeUserProfilesOutput, error) { + req, out := c.DescribeUserProfilesRequest(input) + err := req.Send() + return out, err +} + +const opDescribeVolumes = "DescribeVolumes" + +// DescribeVolumesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeVolumes operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeVolumes method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeVolumesRequest method. +// req, resp := client.DescribeVolumesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *OpsWorks) DescribeVolumesRequest(input *DescribeVolumesInput) (req *request.Request, output *DescribeVolumesOutput) { + op := &request.Operation{ + Name: opDescribeVolumes, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeVolumesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeVolumesOutput{} + req.Data = output + return +} + +// Describes an instance's Amazon EBS volumes. +// +// You must specify at least one of the parameters. +// +// Required Permissions: To use this action, an IAM user must have a Show, +// Deploy, or Manage permissions level for the stack, or an attached policy +// that explicitly grants permissions. For more information on user permissions, +// see Managing User Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) DescribeVolumes(input *DescribeVolumesInput) (*DescribeVolumesOutput, error) { + req, out := c.DescribeVolumesRequest(input) + err := req.Send() + return out, err +} + +const opDetachElasticLoadBalancer = "DetachElasticLoadBalancer" + +// DetachElasticLoadBalancerRequest generates a "aws/request.Request" representing the +// client's request for the DetachElasticLoadBalancer operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DetachElasticLoadBalancer method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DetachElasticLoadBalancerRequest method. +// req, resp := client.DetachElasticLoadBalancerRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *OpsWorks) DetachElasticLoadBalancerRequest(input *DetachElasticLoadBalancerInput) (req *request.Request, output *DetachElasticLoadBalancerOutput) { + op := &request.Operation{ + Name: opDetachElasticLoadBalancer, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DetachElasticLoadBalancerInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DetachElasticLoadBalancerOutput{} + req.Data = output + return +} + +// Detaches a specified Elastic Load Balancing instance from its layer. +// +// Required Permissions: To use this action, an IAM user must have a Manage +// permissions level for the stack, or an attached policy that explicitly grants +// permissions. For more information on user permissions, see Managing User +// Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) DetachElasticLoadBalancer(input *DetachElasticLoadBalancerInput) (*DetachElasticLoadBalancerOutput, error) { + req, out := c.DetachElasticLoadBalancerRequest(input) + err := req.Send() + return out, err +} + +const opDisassociateElasticIp = "DisassociateElasticIp" + +// DisassociateElasticIpRequest generates a "aws/request.Request" representing the +// client's request for the DisassociateElasticIp operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DisassociateElasticIp method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DisassociateElasticIpRequest method. +// req, resp := client.DisassociateElasticIpRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *OpsWorks) DisassociateElasticIpRequest(input *DisassociateElasticIpInput) (req *request.Request, output *DisassociateElasticIpOutput) { + op := &request.Operation{ + Name: opDisassociateElasticIp, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DisassociateElasticIpInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DisassociateElasticIpOutput{} + req.Data = output + return +} + +// Disassociates an Elastic IP address from its instance. The address remains +// registered with the stack. For more information, see Resource Management +// (http://docs.aws.amazon.com/opsworks/latest/userguide/resources.html). +// +// Required Permissions: To use this action, an IAM user must have a Manage +// permissions level for the stack, or an attached policy that explicitly grants +// permissions. For more information on user permissions, see Managing User +// Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) DisassociateElasticIp(input *DisassociateElasticIpInput) (*DisassociateElasticIpOutput, error) { + req, out := c.DisassociateElasticIpRequest(input) + err := req.Send() + return out, err +} + +const opGetHostnameSuggestion = "GetHostnameSuggestion" + +// GetHostnameSuggestionRequest generates a "aws/request.Request" representing the +// client's request for the GetHostnameSuggestion operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetHostnameSuggestion method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetHostnameSuggestionRequest method. +// req, resp := client.GetHostnameSuggestionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *OpsWorks) GetHostnameSuggestionRequest(input *GetHostnameSuggestionInput) (req *request.Request, output *GetHostnameSuggestionOutput) { + op := &request.Operation{ + Name: opGetHostnameSuggestion, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetHostnameSuggestionInput{} + } + + req = c.newRequest(op, input, output) + output = &GetHostnameSuggestionOutput{} + req.Data = output + return +} + +// Gets a generated host name for the specified layer, based on the current +// host name theme. +// +// Required Permissions: To use this action, an IAM user must have a Manage +// permissions level for the stack, or an attached policy that explicitly grants +// permissions. For more information on user permissions, see Managing User +// Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) GetHostnameSuggestion(input *GetHostnameSuggestionInput) (*GetHostnameSuggestionOutput, error) { + req, out := c.GetHostnameSuggestionRequest(input) + err := req.Send() + return out, err +} + +const opGrantAccess = "GrantAccess" + +// GrantAccessRequest generates a "aws/request.Request" representing the +// client's request for the GrantAccess operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GrantAccess method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GrantAccessRequest method. +// req, resp := client.GrantAccessRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *OpsWorks) GrantAccessRequest(input *GrantAccessInput) (req *request.Request, output *GrantAccessOutput) { + op := &request.Operation{ + Name: opGrantAccess, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GrantAccessInput{} + } + + req = c.newRequest(op, input, output) + output = &GrantAccessOutput{} + req.Data = output + return +} + +// This action can be used only with Windows stacks. +// +// Grants RDP access to a Windows instance for a specified time period. +func (c *OpsWorks) GrantAccess(input *GrantAccessInput) (*GrantAccessOutput, error) { + req, out := c.GrantAccessRequest(input) + err := req.Send() + return out, err +} + +const opRebootInstance = "RebootInstance" + +// RebootInstanceRequest generates a "aws/request.Request" representing the +// client's request for the RebootInstance operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the RebootInstance method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the RebootInstanceRequest method. +// req, resp := client.RebootInstanceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *OpsWorks) RebootInstanceRequest(input *RebootInstanceInput) (req *request.Request, output *RebootInstanceOutput) { + op := &request.Operation{ + Name: opRebootInstance, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RebootInstanceInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &RebootInstanceOutput{} + req.Data = output + return +} + +// Reboots a specified instance. For more information, see Starting, Stopping, +// and Rebooting Instances (http://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-starting.html). +// +// Required Permissions: To use this action, an IAM user must have a Manage +// permissions level for the stack, or an attached policy that explicitly grants +// permissions. For more information on user permissions, see Managing User +// Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) RebootInstance(input *RebootInstanceInput) (*RebootInstanceOutput, error) { + req, out := c.RebootInstanceRequest(input) + err := req.Send() + return out, err +} + +const opRegisterEcsCluster = "RegisterEcsCluster" + +// RegisterEcsClusterRequest generates a "aws/request.Request" representing the +// client's request for the RegisterEcsCluster operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the RegisterEcsCluster method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the RegisterEcsClusterRequest method. +// req, resp := client.RegisterEcsClusterRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *OpsWorks) RegisterEcsClusterRequest(input *RegisterEcsClusterInput) (req *request.Request, output *RegisterEcsClusterOutput) { + op := &request.Operation{ + Name: opRegisterEcsCluster, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RegisterEcsClusterInput{} + } + + req = c.newRequest(op, input, output) + output = &RegisterEcsClusterOutput{} + req.Data = output + return +} + +// Registers a specified Amazon ECS cluster with a stack. You can register only +// one cluster with a stack. A cluster can be registered with only one stack. +// For more information, see Resource Management (http://docs.aws.amazon.com/opsworks/latest/userguide/workinglayers-ecscluster.html). +// +// Required Permissions: To use this action, an IAM user must have a Manage +// permissions level for the stack or an attached policy that explicitly grants +// permissions. For more information on user permissions, see Managing User +// Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) RegisterEcsCluster(input *RegisterEcsClusterInput) (*RegisterEcsClusterOutput, error) { + req, out := c.RegisterEcsClusterRequest(input) + err := req.Send() + return out, err +} + +const opRegisterElasticIp = "RegisterElasticIp" + +// RegisterElasticIpRequest generates a "aws/request.Request" representing the +// client's request for the RegisterElasticIp operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the RegisterElasticIp method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the RegisterElasticIpRequest method. +// req, resp := client.RegisterElasticIpRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *OpsWorks) RegisterElasticIpRequest(input *RegisterElasticIpInput) (req *request.Request, output *RegisterElasticIpOutput) { + op := &request.Operation{ + Name: opRegisterElasticIp, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RegisterElasticIpInput{} + } + + req = c.newRequest(op, input, output) + output = &RegisterElasticIpOutput{} + req.Data = output + return +} + +// Registers an Elastic IP address with a specified stack. An address can be +// registered with only one stack at a time. If the address is already registered, +// you must first deregister it by calling DeregisterElasticIp. For more information, +// see Resource Management (http://docs.aws.amazon.com/opsworks/latest/userguide/resources.html). +// +// Required Permissions: To use this action, an IAM user must have a Manage +// permissions level for the stack, or an attached policy that explicitly grants +// permissions. For more information on user permissions, see Managing User +// Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) RegisterElasticIp(input *RegisterElasticIpInput) (*RegisterElasticIpOutput, error) { + req, out := c.RegisterElasticIpRequest(input) + err := req.Send() + return out, err +} + +const opRegisterInstance = "RegisterInstance" + +// RegisterInstanceRequest generates a "aws/request.Request" representing the +// client's request for the RegisterInstance operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the RegisterInstance method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the RegisterInstanceRequest method. +// req, resp := client.RegisterInstanceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *OpsWorks) RegisterInstanceRequest(input *RegisterInstanceInput) (req *request.Request, output *RegisterInstanceOutput) { + op := &request.Operation{ + Name: opRegisterInstance, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RegisterInstanceInput{} + } + + req = c.newRequest(op, input, output) + output = &RegisterInstanceOutput{} + req.Data = output + return +} + +// Registers instances with a specified stack that were created outside of AWS +// OpsWorks. +// +// We do not recommend using this action to register instances. The complete +// registration operation has two primary steps, installing the AWS OpsWorks +// agent on the instance and registering the instance with the stack. RegisterInstance +// handles only the second step. You should instead use the AWS CLI register +// command, which performs the entire registration operation. For more information, +// see Registering an Instance with an AWS OpsWorks Stack (http://docs.aws.amazon.com/opsworks/latest/userguide/registered-instances-register.html). +// +// Required Permissions: To use this action, an IAM user must have a Manage +// permissions level for the stack or an attached policy that explicitly grants +// permissions. For more information on user permissions, see Managing User +// Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) RegisterInstance(input *RegisterInstanceInput) (*RegisterInstanceOutput, error) { + req, out := c.RegisterInstanceRequest(input) + err := req.Send() + return out, err +} + +const opRegisterRdsDbInstance = "RegisterRdsDbInstance" + +// RegisterRdsDbInstanceRequest generates a "aws/request.Request" representing the +// client's request for the RegisterRdsDbInstance operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the RegisterRdsDbInstance method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the RegisterRdsDbInstanceRequest method. +// req, resp := client.RegisterRdsDbInstanceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *OpsWorks) RegisterRdsDbInstanceRequest(input *RegisterRdsDbInstanceInput) (req *request.Request, output *RegisterRdsDbInstanceOutput) { + op := &request.Operation{ + Name: opRegisterRdsDbInstance, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RegisterRdsDbInstanceInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &RegisterRdsDbInstanceOutput{} + req.Data = output + return +} + +// Registers an Amazon RDS instance with a stack. +// +// Required Permissions: To use this action, an IAM user must have a Manage +// permissions level for the stack, or an attached policy that explicitly grants +// permissions. For more information on user permissions, see Managing User +// Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) RegisterRdsDbInstance(input *RegisterRdsDbInstanceInput) (*RegisterRdsDbInstanceOutput, error) { + req, out := c.RegisterRdsDbInstanceRequest(input) + err := req.Send() + return out, err +} + +const opRegisterVolume = "RegisterVolume" + +// RegisterVolumeRequest generates a "aws/request.Request" representing the +// client's request for the RegisterVolume operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the RegisterVolume method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the RegisterVolumeRequest method. +// req, resp := client.RegisterVolumeRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *OpsWorks) RegisterVolumeRequest(input *RegisterVolumeInput) (req *request.Request, output *RegisterVolumeOutput) { + op := &request.Operation{ + Name: opRegisterVolume, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RegisterVolumeInput{} + } + + req = c.newRequest(op, input, output) + output = &RegisterVolumeOutput{} + req.Data = output + return +} + +// Registers an Amazon EBS volume with a specified stack. A volume can be registered +// with only one stack at a time. If the volume is already registered, you must +// first deregister it by calling DeregisterVolume. For more information, see +// Resource Management (http://docs.aws.amazon.com/opsworks/latest/userguide/resources.html). +// +// Required Permissions: To use this action, an IAM user must have a Manage +// permissions level for the stack, or an attached policy that explicitly grants +// permissions. For more information on user permissions, see Managing User +// Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) RegisterVolume(input *RegisterVolumeInput) (*RegisterVolumeOutput, error) { + req, out := c.RegisterVolumeRequest(input) + err := req.Send() + return out, err +} + +const opSetLoadBasedAutoScaling = "SetLoadBasedAutoScaling" + +// SetLoadBasedAutoScalingRequest generates a "aws/request.Request" representing the +// client's request for the SetLoadBasedAutoScaling operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the SetLoadBasedAutoScaling method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the SetLoadBasedAutoScalingRequest method. +// req, resp := client.SetLoadBasedAutoScalingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *OpsWorks) SetLoadBasedAutoScalingRequest(input *SetLoadBasedAutoScalingInput) (req *request.Request, output *SetLoadBasedAutoScalingOutput) { + op := &request.Operation{ + Name: opSetLoadBasedAutoScaling, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SetLoadBasedAutoScalingInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &SetLoadBasedAutoScalingOutput{} + req.Data = output + return +} + +// Specify the load-based auto scaling configuration for a specified layer. +// For more information, see Managing Load with Time-based and Load-based Instances +// (http://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-autoscaling.html). +// +// To use load-based auto scaling, you must create a set of load-based auto +// scaling instances. Load-based auto scaling operates only on the instances +// from that set, so you must ensure that you have created enough instances +// to handle the maximum anticipated load. +// +// Required Permissions: To use this action, an IAM user must have a Manage +// permissions level for the stack, or an attached policy that explicitly grants +// permissions. For more information on user permissions, see Managing User +// Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) SetLoadBasedAutoScaling(input *SetLoadBasedAutoScalingInput) (*SetLoadBasedAutoScalingOutput, error) { + req, out := c.SetLoadBasedAutoScalingRequest(input) + err := req.Send() + return out, err +} + +const opSetPermission = "SetPermission" + +// SetPermissionRequest generates a "aws/request.Request" representing the +// client's request for the SetPermission operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the SetPermission method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the SetPermissionRequest method. +// req, resp := client.SetPermissionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *OpsWorks) SetPermissionRequest(input *SetPermissionInput) (req *request.Request, output *SetPermissionOutput) { + op := &request.Operation{ + Name: opSetPermission, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SetPermissionInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &SetPermissionOutput{} + req.Data = output + return +} + +// Specifies a user's permissions. For more information, see Security and Permissions +// (http://docs.aws.amazon.com/opsworks/latest/userguide/workingsecurity.html). +// +// Required Permissions: To use this action, an IAM user must have a Manage +// permissions level for the stack, or an attached policy that explicitly grants +// permissions. For more information on user permissions, see Managing User +// Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) SetPermission(input *SetPermissionInput) (*SetPermissionOutput, error) { + req, out := c.SetPermissionRequest(input) + err := req.Send() + return out, err +} + +const opSetTimeBasedAutoScaling = "SetTimeBasedAutoScaling" + +// SetTimeBasedAutoScalingRequest generates a "aws/request.Request" representing the +// client's request for the SetTimeBasedAutoScaling operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the SetTimeBasedAutoScaling method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the SetTimeBasedAutoScalingRequest method. +// req, resp := client.SetTimeBasedAutoScalingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *OpsWorks) SetTimeBasedAutoScalingRequest(input *SetTimeBasedAutoScalingInput) (req *request.Request, output *SetTimeBasedAutoScalingOutput) { + op := &request.Operation{ + Name: opSetTimeBasedAutoScaling, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SetTimeBasedAutoScalingInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &SetTimeBasedAutoScalingOutput{} + req.Data = output + return +} + +// Specify the time-based auto scaling configuration for a specified instance. +// For more information, see Managing Load with Time-based and Load-based Instances +// (http://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-autoscaling.html). +// +// Required Permissions: To use this action, an IAM user must have a Manage +// permissions level for the stack, or an attached policy that explicitly grants +// permissions. For more information on user permissions, see Managing User +// Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) SetTimeBasedAutoScaling(input *SetTimeBasedAutoScalingInput) (*SetTimeBasedAutoScalingOutput, error) { + req, out := c.SetTimeBasedAutoScalingRequest(input) + err := req.Send() + return out, err +} + +const opStartInstance = "StartInstance" + +// StartInstanceRequest generates a "aws/request.Request" representing the +// client's request for the StartInstance operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the StartInstance method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the StartInstanceRequest method. +// req, resp := client.StartInstanceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *OpsWorks) StartInstanceRequest(input *StartInstanceInput) (req *request.Request, output *StartInstanceOutput) { + op := &request.Operation{ + Name: opStartInstance, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &StartInstanceInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &StartInstanceOutput{} + req.Data = output + return +} + +// Starts a specified instance. For more information, see Starting, Stopping, +// and Rebooting Instances (http://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-starting.html). +// +// Required Permissions: To use this action, an IAM user must have a Manage +// permissions level for the stack, or an attached policy that explicitly grants +// permissions. For more information on user permissions, see Managing User +// Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) StartInstance(input *StartInstanceInput) (*StartInstanceOutput, error) { + req, out := c.StartInstanceRequest(input) + err := req.Send() + return out, err +} + +const opStartStack = "StartStack" + +// StartStackRequest generates a "aws/request.Request" representing the +// client's request for the StartStack operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the StartStack method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the StartStackRequest method. +// req, resp := client.StartStackRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *OpsWorks) StartStackRequest(input *StartStackInput) (req *request.Request, output *StartStackOutput) { + op := &request.Operation{ + Name: opStartStack, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &StartStackInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &StartStackOutput{} + req.Data = output + return +} + +// Starts a stack's instances. +// +// Required Permissions: To use this action, an IAM user must have a Manage +// permissions level for the stack, or an attached policy that explicitly grants +// permissions. For more information on user permissions, see Managing User +// Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) StartStack(input *StartStackInput) (*StartStackOutput, error) { + req, out := c.StartStackRequest(input) + err := req.Send() + return out, err +} + +const opStopInstance = "StopInstance" + +// StopInstanceRequest generates a "aws/request.Request" representing the +// client's request for the StopInstance operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the StopInstance method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the StopInstanceRequest method. +// req, resp := client.StopInstanceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *OpsWorks) StopInstanceRequest(input *StopInstanceInput) (req *request.Request, output *StopInstanceOutput) { + op := &request.Operation{ + Name: opStopInstance, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &StopInstanceInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &StopInstanceOutput{} + req.Data = output + return +} + +// Stops a specified instance. When you stop a standard instance, the data disappears +// and must be reinstalled when you restart the instance. You can stop an Amazon +// EBS-backed instance without losing data. For more information, see Starting, +// Stopping, and Rebooting Instances (http://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-starting.html). +// +// Required Permissions: To use this action, an IAM user must have a Manage +// permissions level for the stack, or an attached policy that explicitly grants +// permissions. For more information on user permissions, see Managing User +// Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) StopInstance(input *StopInstanceInput) (*StopInstanceOutput, error) { + req, out := c.StopInstanceRequest(input) + err := req.Send() + return out, err +} + +const opStopStack = "StopStack" + +// StopStackRequest generates a "aws/request.Request" representing the +// client's request for the StopStack operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the StopStack method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the StopStackRequest method. +// req, resp := client.StopStackRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *OpsWorks) StopStackRequest(input *StopStackInput) (req *request.Request, output *StopStackOutput) { + op := &request.Operation{ + Name: opStopStack, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &StopStackInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &StopStackOutput{} + req.Data = output + return +} + +// Stops a specified stack. +// +// Required Permissions: To use this action, an IAM user must have a Manage +// permissions level for the stack, or an attached policy that explicitly grants +// permissions. For more information on user permissions, see Managing User +// Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) StopStack(input *StopStackInput) (*StopStackOutput, error) { + req, out := c.StopStackRequest(input) + err := req.Send() + return out, err +} + +const opUnassignInstance = "UnassignInstance" + +// UnassignInstanceRequest generates a "aws/request.Request" representing the +// client's request for the UnassignInstance operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UnassignInstance method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UnassignInstanceRequest method. +// req, resp := client.UnassignInstanceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *OpsWorks) UnassignInstanceRequest(input *UnassignInstanceInput) (req *request.Request, output *UnassignInstanceOutput) { + op := &request.Operation{ + Name: opUnassignInstance, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UnassignInstanceInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &UnassignInstanceOutput{} + req.Data = output + return +} + +// Unassigns a registered instance from all of it's layers. The instance remains +// in the stack as an unassigned instance and can be assigned to another layer, +// as needed. You cannot use this action with instances that were created with +// AWS OpsWorks. +// +// Required Permissions: To use this action, an IAM user must have a Manage +// permissions level for the stack or an attached policy that explicitly grants +// permissions. For more information on user permissions, see Managing User +// Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) UnassignInstance(input *UnassignInstanceInput) (*UnassignInstanceOutput, error) { + req, out := c.UnassignInstanceRequest(input) + err := req.Send() + return out, err +} + +const opUnassignVolume = "UnassignVolume" + +// UnassignVolumeRequest generates a "aws/request.Request" representing the +// client's request for the UnassignVolume operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UnassignVolume method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UnassignVolumeRequest method. +// req, resp := client.UnassignVolumeRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *OpsWorks) UnassignVolumeRequest(input *UnassignVolumeInput) (req *request.Request, output *UnassignVolumeOutput) { + op := &request.Operation{ + Name: opUnassignVolume, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UnassignVolumeInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &UnassignVolumeOutput{} + req.Data = output + return +} + +// Unassigns an assigned Amazon EBS volume. The volume remains registered with +// the stack. For more information, see Resource Management (http://docs.aws.amazon.com/opsworks/latest/userguide/resources.html). +// +// Required Permissions: To use this action, an IAM user must have a Manage +// permissions level for the stack, or an attached policy that explicitly grants +// permissions. For more information on user permissions, see Managing User +// Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) UnassignVolume(input *UnassignVolumeInput) (*UnassignVolumeOutput, error) { + req, out := c.UnassignVolumeRequest(input) + err := req.Send() + return out, err +} + +const opUpdateApp = "UpdateApp" + +// UpdateAppRequest generates a "aws/request.Request" representing the +// client's request for the UpdateApp operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateApp method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateAppRequest method. +// req, resp := client.UpdateAppRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *OpsWorks) UpdateAppRequest(input *UpdateAppInput) (req *request.Request, output *UpdateAppOutput) { + op := &request.Operation{ + Name: opUpdateApp, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateAppInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &UpdateAppOutput{} + req.Data = output + return +} + +// Updates a specified app. +// +// Required Permissions: To use this action, an IAM user must have a Deploy +// or Manage permissions level for the stack, or an attached policy that explicitly +// grants permissions. For more information on user permissions, see Managing +// User Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) UpdateApp(input *UpdateAppInput) (*UpdateAppOutput, error) { + req, out := c.UpdateAppRequest(input) + err := req.Send() + return out, err +} + +const opUpdateElasticIp = "UpdateElasticIp" + +// UpdateElasticIpRequest generates a "aws/request.Request" representing the +// client's request for the UpdateElasticIp operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateElasticIp method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateElasticIpRequest method. +// req, resp := client.UpdateElasticIpRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *OpsWorks) UpdateElasticIpRequest(input *UpdateElasticIpInput) (req *request.Request, output *UpdateElasticIpOutput) { + op := &request.Operation{ + Name: opUpdateElasticIp, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateElasticIpInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &UpdateElasticIpOutput{} + req.Data = output + return +} + +// Updates a registered Elastic IP address's name. For more information, see +// Resource Management (http://docs.aws.amazon.com/opsworks/latest/userguide/resources.html). +// +// Required Permissions: To use this action, an IAM user must have a Manage +// permissions level for the stack, or an attached policy that explicitly grants +// permissions. For more information on user permissions, see Managing User +// Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) UpdateElasticIp(input *UpdateElasticIpInput) (*UpdateElasticIpOutput, error) { + req, out := c.UpdateElasticIpRequest(input) + err := req.Send() + return out, err +} + +const opUpdateInstance = "UpdateInstance" + +// UpdateInstanceRequest generates a "aws/request.Request" representing the +// client's request for the UpdateInstance operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateInstance method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateInstanceRequest method. +// req, resp := client.UpdateInstanceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *OpsWorks) UpdateInstanceRequest(input *UpdateInstanceInput) (req *request.Request, output *UpdateInstanceOutput) { + op := &request.Operation{ + Name: opUpdateInstance, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateInstanceInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &UpdateInstanceOutput{} + req.Data = output + return +} + +// Updates a specified instance. +// +// Required Permissions: To use this action, an IAM user must have a Manage +// permissions level for the stack, or an attached policy that explicitly grants +// permissions. For more information on user permissions, see Managing User +// Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) UpdateInstance(input *UpdateInstanceInput) (*UpdateInstanceOutput, error) { + req, out := c.UpdateInstanceRequest(input) + err := req.Send() + return out, err +} + +const opUpdateLayer = "UpdateLayer" + +// UpdateLayerRequest generates a "aws/request.Request" representing the +// client's request for the UpdateLayer operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateLayer method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateLayerRequest method. +// req, resp := client.UpdateLayerRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *OpsWorks) UpdateLayerRequest(input *UpdateLayerInput) (req *request.Request, output *UpdateLayerOutput) { + op := &request.Operation{ + Name: opUpdateLayer, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateLayerInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &UpdateLayerOutput{} + req.Data = output + return +} + +// Updates a specified layer. +// +// Required Permissions: To use this action, an IAM user must have a Manage +// permissions level for the stack, or an attached policy that explicitly grants +// permissions. For more information on user permissions, see Managing User +// Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) UpdateLayer(input *UpdateLayerInput) (*UpdateLayerOutput, error) { + req, out := c.UpdateLayerRequest(input) + err := req.Send() + return out, err +} + +const opUpdateMyUserProfile = "UpdateMyUserProfile" + +// UpdateMyUserProfileRequest generates a "aws/request.Request" representing the +// client's request for the UpdateMyUserProfile operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateMyUserProfile method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateMyUserProfileRequest method. +// req, resp := client.UpdateMyUserProfileRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *OpsWorks) UpdateMyUserProfileRequest(input *UpdateMyUserProfileInput) (req *request.Request, output *UpdateMyUserProfileOutput) { + op := &request.Operation{ + Name: opUpdateMyUserProfile, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateMyUserProfileInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &UpdateMyUserProfileOutput{} + req.Data = output + return +} + +// Updates a user's SSH public key. +// +// Required Permissions: To use this action, an IAM user must have self-management +// enabled or an attached policy that explicitly grants permissions. For more +// information on user permissions, see Managing User Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) UpdateMyUserProfile(input *UpdateMyUserProfileInput) (*UpdateMyUserProfileOutput, error) { + req, out := c.UpdateMyUserProfileRequest(input) + err := req.Send() + return out, err +} + +const opUpdateRdsDbInstance = "UpdateRdsDbInstance" + +// UpdateRdsDbInstanceRequest generates a "aws/request.Request" representing the +// client's request for the UpdateRdsDbInstance operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateRdsDbInstance method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateRdsDbInstanceRequest method. +// req, resp := client.UpdateRdsDbInstanceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *OpsWorks) UpdateRdsDbInstanceRequest(input *UpdateRdsDbInstanceInput) (req *request.Request, output *UpdateRdsDbInstanceOutput) { + op := &request.Operation{ + Name: opUpdateRdsDbInstance, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateRdsDbInstanceInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &UpdateRdsDbInstanceOutput{} + req.Data = output + return +} + +// Updates an Amazon RDS instance. +// +// Required Permissions: To use this action, an IAM user must have a Manage +// permissions level for the stack, or an attached policy that explicitly grants +// permissions. For more information on user permissions, see Managing User +// Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) UpdateRdsDbInstance(input *UpdateRdsDbInstanceInput) (*UpdateRdsDbInstanceOutput, error) { + req, out := c.UpdateRdsDbInstanceRequest(input) + err := req.Send() + return out, err +} + +const opUpdateStack = "UpdateStack" + +// UpdateStackRequest generates a "aws/request.Request" representing the +// client's request for the UpdateStack operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateStack method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateStackRequest method. +// req, resp := client.UpdateStackRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *OpsWorks) UpdateStackRequest(input *UpdateStackInput) (req *request.Request, output *UpdateStackOutput) { + op := &request.Operation{ + Name: opUpdateStack, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateStackInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &UpdateStackOutput{} + req.Data = output + return +} + +// Updates a specified stack. +// +// Required Permissions: To use this action, an IAM user must have a Manage +// permissions level for the stack, or an attached policy that explicitly grants +// permissions. For more information on user permissions, see Managing User +// Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) UpdateStack(input *UpdateStackInput) (*UpdateStackOutput, error) { + req, out := c.UpdateStackRequest(input) + err := req.Send() + return out, err +} + +const opUpdateUserProfile = "UpdateUserProfile" + +// UpdateUserProfileRequest generates a "aws/request.Request" representing the +// client's request for the UpdateUserProfile operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateUserProfile method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateUserProfileRequest method. +// req, resp := client.UpdateUserProfileRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *OpsWorks) UpdateUserProfileRequest(input *UpdateUserProfileInput) (req *request.Request, output *UpdateUserProfileOutput) { + op := &request.Operation{ + Name: opUpdateUserProfile, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateUserProfileInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &UpdateUserProfileOutput{} + req.Data = output + return +} + +// Updates a specified user profile. +// +// Required Permissions: To use this action, an IAM user must have an attached +// policy that explicitly grants permissions. For more information on user permissions, +// see Managing User Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) UpdateUserProfile(input *UpdateUserProfileInput) (*UpdateUserProfileOutput, error) { + req, out := c.UpdateUserProfileRequest(input) + err := req.Send() + return out, err +} + +const opUpdateVolume = "UpdateVolume" + +// UpdateVolumeRequest generates a "aws/request.Request" representing the +// client's request for the UpdateVolume operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateVolume method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateVolumeRequest method. +// req, resp := client.UpdateVolumeRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *OpsWorks) UpdateVolumeRequest(input *UpdateVolumeInput) (req *request.Request, output *UpdateVolumeOutput) { + op := &request.Operation{ + Name: opUpdateVolume, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateVolumeInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &UpdateVolumeOutput{} + req.Data = output + return +} + +// Updates an Amazon EBS volume's name or mount point. For more information, +// see Resource Management (http://docs.aws.amazon.com/opsworks/latest/userguide/resources.html). +// +// Required Permissions: To use this action, an IAM user must have a Manage +// permissions level for the stack, or an attached policy that explicitly grants +// permissions. For more information on user permissions, see Managing User +// Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) UpdateVolume(input *UpdateVolumeInput) (*UpdateVolumeOutput, error) { + req, out := c.UpdateVolumeRequest(input) + err := req.Send() + return out, err +} + +// Describes an agent version. +type AgentVersion struct { + _ struct{} `type:"structure"` + + // The configuration manager. + ConfigurationManager *StackConfigurationManager `type:"structure"` + + // The agent version. + Version *string `type:"string"` +} + +// String returns the string representation +func (s AgentVersion) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AgentVersion) GoString() string { + return s.String() +} + +// A description of the app. +type App struct { + _ struct{} `type:"structure"` + + // The app ID. + AppId *string `type:"string"` + + // A Source object that describes the app repository. + AppSource *Source `type:"structure"` + + // The stack attributes. + Attributes map[string]*string `type:"map"` + + // When the app was created. + CreatedAt *string `type:"string"` + + // The app's data sources. + DataSources []*DataSource `type:"list"` + + // A description of the app. + Description *string `type:"string"` + + // The app vhost settings with multiple domains separated by commas. For example: + // 'www.example.com, example.com' + Domains []*string `type:"list"` + + // Whether to enable SSL for the app. + EnableSsl *bool `type:"boolean"` + + // An array of EnvironmentVariable objects that specify environment variables + // to be associated with the app. After you deploy the app, these variables + // are defined on the associated app server instances. For more information, + // see Environment Variables (http://docs.aws.amazon.com/opsworks/latest/userguide/workingapps-creating.html#workingapps-creating-environment). + // + // There is no specific limit on the number of environment variables. However, + // the size of the associated data structure - which includes the variable names, + // values, and protected flag values - cannot exceed 10 KB (10240 Bytes). This + // limit should accommodate most if not all use cases, but if you do exceed + // it, you will cause an exception (API) with an "Environment: is too large + // (maximum is 10KB)" message. + Environment []*EnvironmentVariable `type:"list"` + + // The app name. + Name *string `type:"string"` + + // The app's short name. + Shortname *string `type:"string"` + + // An SslConfiguration object with the SSL configuration. + SslConfiguration *SslConfiguration `type:"structure"` + + // The app stack ID. + StackId *string `type:"string"` + + // The app type. + Type *string `type:"string" enum:"AppType"` +} + +// String returns the string representation +func (s App) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s App) GoString() string { + return s.String() +} + +type AssignInstanceInput struct { + _ struct{} `type:"structure"` + + // The instance ID. + InstanceId *string `type:"string" required:"true"` + + // The layer ID, which must correspond to a custom layer. You cannot assign + // a registered instance to a built-in layer. + LayerIds []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s AssignInstanceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssignInstanceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AssignInstanceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AssignInstanceInput"} + if s.InstanceId == nil { + invalidParams.Add(request.NewErrParamRequired("InstanceId")) + } + if s.LayerIds == nil { + invalidParams.Add(request.NewErrParamRequired("LayerIds")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type AssignInstanceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s AssignInstanceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssignInstanceOutput) GoString() string { + return s.String() +} + +type AssignVolumeInput struct { + _ struct{} `type:"structure"` + + // The instance ID. + InstanceId *string `type:"string"` + + // The volume ID. + VolumeId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s AssignVolumeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssignVolumeInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AssignVolumeInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AssignVolumeInput"} + if s.VolumeId == nil { + invalidParams.Add(request.NewErrParamRequired("VolumeId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type AssignVolumeOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s AssignVolumeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssignVolumeOutput) GoString() string { + return s.String() +} + +type AssociateElasticIpInput struct { + _ struct{} `type:"structure"` + + // The Elastic IP address. + ElasticIp *string `type:"string" required:"true"` + + // The instance ID. + InstanceId *string `type:"string"` +} + +// String returns the string representation +func (s AssociateElasticIpInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssociateElasticIpInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AssociateElasticIpInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AssociateElasticIpInput"} + if s.ElasticIp == nil { + invalidParams.Add(request.NewErrParamRequired("ElasticIp")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type AssociateElasticIpOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s AssociateElasticIpOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssociateElasticIpOutput) GoString() string { + return s.String() +} + +type AttachElasticLoadBalancerInput struct { + _ struct{} `type:"structure"` + + // The Elastic Load Balancing instance's name. + ElasticLoadBalancerName *string `type:"string" required:"true"` + + // The ID of the layer that the Elastic Load Balancing instance is to be attached + // to. + LayerId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s AttachElasticLoadBalancerInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AttachElasticLoadBalancerInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AttachElasticLoadBalancerInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AttachElasticLoadBalancerInput"} + if s.ElasticLoadBalancerName == nil { + invalidParams.Add(request.NewErrParamRequired("ElasticLoadBalancerName")) + } + if s.LayerId == nil { + invalidParams.Add(request.NewErrParamRequired("LayerId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type AttachElasticLoadBalancerOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s AttachElasticLoadBalancerOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AttachElasticLoadBalancerOutput) GoString() string { + return s.String() +} + +// Describes a load-based auto scaling upscaling or downscaling threshold configuration, +// which specifies when AWS OpsWorks starts or stops load-based instances. +type AutoScalingThresholds struct { + _ struct{} `type:"structure"` + + // Custom Cloudwatch auto scaling alarms, to be used as thresholds. This parameter + // takes a list of up to five alarm names, which are case sensitive and must + // be in the same region as the stack. + // + // To use custom alarms, you must update your service role to allow cloudwatch:DescribeAlarms. + // You can either have AWS OpsWorks update the role for you when you first use + // this feature or you can edit the role manually. For more information, see + // Allowing AWS OpsWorks to Act on Your Behalf (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-servicerole.html). + Alarms []*string `type:"list"` + + // The CPU utilization threshold, as a percent of the available CPU. A value + // of -1 disables the threshold. + CpuThreshold *float64 `type:"double"` + + // The amount of time (in minutes) after a scaling event occurs that AWS OpsWorks + // should ignore metrics and suppress additional scaling events. For example, + // AWS OpsWorks adds new instances following an upscaling event but the instances + // won't start reducing the load until they have been booted and configured. + // There is no point in raising additional scaling events during that operation, + // which typically takes several minutes. IgnoreMetricsTime allows you to direct + // AWS OpsWorks to suppress scaling events long enough to get the new instances + // online. + IgnoreMetricsTime *int64 `min:"1" type:"integer"` + + // The number of instances to add or remove when the load exceeds a threshold. + InstanceCount *int64 `type:"integer"` + + // The load threshold. A value of -1 disables the threshold. For more information + // about how load is computed, see Load (computing) (http://en.wikipedia.org/wiki/Load_%28computing%29). + LoadThreshold *float64 `type:"double"` + + // The memory utilization threshold, as a percent of the available memory. A + // value of -1 disables the threshold. + MemoryThreshold *float64 `type:"double"` + + // The amount of time, in minutes, that the load must exceed a threshold before + // more instances are added or removed. + ThresholdsWaitTime *int64 `min:"1" type:"integer"` +} + +// String returns the string representation +func (s AutoScalingThresholds) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AutoScalingThresholds) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AutoScalingThresholds) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AutoScalingThresholds"} + if s.IgnoreMetricsTime != nil && *s.IgnoreMetricsTime < 1 { + invalidParams.Add(request.NewErrParamMinValue("IgnoreMetricsTime", 1)) + } + if s.ThresholdsWaitTime != nil && *s.ThresholdsWaitTime < 1 { + invalidParams.Add(request.NewErrParamMinValue("ThresholdsWaitTime", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Describes a block device mapping. This data type maps directly to the Amazon +// EC2 BlockDeviceMapping (http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_BlockDeviceMapping.html) +// data type. +type BlockDeviceMapping struct { + _ struct{} `type:"structure"` + + // The device name that is exposed to the instance, such as /dev/sdh. For the + // root device, you can use the explicit device name or you can set this parameter + // to ROOT_DEVICE and AWS OpsWorks will provide the correct device name. + DeviceName *string `type:"string"` + + // An EBSBlockDevice that defines how to configure an Amazon EBS volume when + // the instance is launched. + Ebs *EbsBlockDevice `type:"structure"` + + // Suppresses the specified device included in the AMI's block device mapping. + NoDevice *string `type:"string"` + + // The virtual device name. For more information, see BlockDeviceMapping (http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_BlockDeviceMapping.html). + VirtualName *string `type:"string"` +} + +// String returns the string representation +func (s BlockDeviceMapping) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BlockDeviceMapping) GoString() string { + return s.String() +} + +// Describes the Chef configuration. +type ChefConfiguration struct { + _ struct{} `type:"structure"` + + // The Berkshelf version. + BerkshelfVersion *string `type:"string"` + + // Whether to enable Berkshelf. + ManageBerkshelf *bool `type:"boolean"` +} + +// String returns the string representation +func (s ChefConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ChefConfiguration) GoString() string { + return s.String() +} + +type CloneStackInput struct { + _ struct{} `type:"structure"` + + // The default AWS OpsWorks agent version. You have the following options: + // + // Auto-update - Set this parameter to LATEST. AWS OpsWorks automatically + // installs new agent versions on the stack's instances as soon as they are + // available. + // + // Fixed version - Set this parameter to your preferred agent version. To + // update the agent version, you must edit the stack configuration and specify + // a new version. AWS OpsWorks then automatically installs that version on the + // stack's instances. + // + // The default setting is LATEST. To specify an agent version, you must use + // the complete version number, not the abbreviated number shown on the console. + // For a list of available agent version numbers, call DescribeAgentVersions. + // + // You can also specify an agent version when you create or update an instance, + // which overrides the stack's default setting. + AgentVersion *string `type:"string"` + + // A list of stack attributes and values as key/value pairs to be added to the + // cloned stack. + Attributes map[string]*string `type:"map"` + + // A ChefConfiguration object that specifies whether to enable Berkshelf and + // the Berkshelf version on Chef 11.10 stacks. For more information, see Create + // a New Stack (http://docs.aws.amazon.com/opsworks/latest/userguide/workingstacks-creating.html). + ChefConfiguration *ChefConfiguration `type:"structure"` + + // A list of source stack app IDs to be included in the cloned stack. + CloneAppIds []*string `type:"list"` + + // Whether to clone the source stack's permissions. + ClonePermissions *bool `type:"boolean"` + + // The configuration manager. When you clone a stack we recommend that you use + // the configuration manager to specify the Chef version: 12, 11.10, or 11.4 + // for Linux stacks, or 12.2 for Windows stacks. The default value for Linux + // stacks is currently 12. + ConfigurationManager *StackConfigurationManager `type:"structure"` + + // Contains the information required to retrieve an app or cookbook from a repository. + // For more information, see Creating Apps (http://docs.aws.amazon.com/opsworks/latest/userguide/workingapps-creating.html) + // or Custom Recipes and Cookbooks (http://docs.aws.amazon.com/opsworks/latest/userguide/workingcookbook.html). + CustomCookbooksSource *Source `type:"structure"` + + // A string that contains user-defined, custom JSON. It is used to override + // the corresponding default stack configuration JSON values. The string should + // be in the following format and must escape characters such as '"': + // + // "{\"key1\": \"value1\", \"key2\": \"value2\",...}" + // + // For more information on custom JSON, see Use Custom JSON to Modify the Stack + // Configuration Attributes (http://docs.aws.amazon.com/opsworks/latest/userguide/workingstacks-json.html) + CustomJson *string `type:"string"` + + // The cloned stack's default Availability Zone, which must be in the specified + // region. For more information, see Regions and Endpoints (http://docs.aws.amazon.com/general/latest/gr/rande.html). + // If you also specify a value for DefaultSubnetId, the subnet must be in the + // same zone. For more information, see the VpcId parameter description. + DefaultAvailabilityZone *string `type:"string"` + + // The Amazon Resource Name (ARN) of an IAM profile that is the default profile + // for all of the stack's EC2 instances. For more information about IAM ARNs, + // see Using Identifiers (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html). + DefaultInstanceProfileArn *string `type:"string"` + + // The stack's operating system, which must be set to one of the following. + // + // A supported Linux operating system: An Amazon Linux version, such as Amazon + // Linux 2016.03, Amazon Linux 2015.09, or Amazon Linux 2015.03. + // + // A supported Ubuntu operating system, such as Ubuntu 16.04 LTS, Ubuntu + // 14.04 LTS, or Ubuntu 12.04 LTS. + // + // CentOS 7 + // + // Red Hat Enterprise Linux 7 + // + // Microsoft Windows Server 2012 R2 Base, Microsoft Windows Server 2012 + // R2 with SQL Server Express, Microsoft Windows Server 2012 R2 with SQL Server + // Standard, or Microsoft Windows Server 2012 R2 with SQL Server Web. + // + // A custom AMI: Custom. You specify the custom AMI you want to use when + // you create instances. For more information on how to use custom AMIs with + // OpsWorks, see Using Custom AMIs (http://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-custom-ami.html). + // + // The default option is the parent stack's operating system. For more information + // on the supported operating systems, see AWS OpsWorks Operating Systems (http://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-os.html). + // + // You can specify a different Linux operating system for the cloned stack, + // but you cannot change from Linux to Windows or Windows to Linux. + DefaultOs *string `type:"string"` + + // The default root device type. This value is used by default for all instances + // in the cloned stack, but you can override it when you create an instance. + // For more information, see Storage for the Root Device (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ComponentsAMIs.html#storage-for-the-root-device). + DefaultRootDeviceType *string `type:"string" enum:"RootDeviceType"` + + // A default Amazon EC2 key pair name. The default value is none. If you specify + // a key pair name, AWS OpsWorks installs the public key on the instance and + // you can use the private key with an SSH client to log in to the instance. + // For more information, see Using SSH to Communicate with an Instance (http://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-ssh.html) + // and Managing SSH Access (http://docs.aws.amazon.com/opsworks/latest/userguide/security-ssh-access.html). + // You can override this setting by specifying a different key pair, or no key + // pair, when you create an instance (http://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-add.html). + DefaultSshKeyName *string `type:"string"` + + // The stack's default VPC subnet ID. This parameter is required if you specify + // a value for the VpcId parameter. All instances are launched into this subnet + // unless you specify otherwise when you create the instance. If you also specify + // a value for DefaultAvailabilityZone, the subnet must be in that zone. For + // information on default values and when this parameter is required, see the + // VpcId parameter description. + DefaultSubnetId *string `type:"string"` + + // The stack's host name theme, with spaces are replaced by underscores. The + // theme is used to generate host names for the stack's instances. By default, + // HostnameTheme is set to Layer_Dependent, which creates host names by appending + // integers to the layer's short name. The other themes are: + // + // Baked_Goods + // + // Clouds + // + // Europe_Cities + // + // Fruits + // + // Greek_Deities + // + // Legendary_creatures_from_Japan + // + // Planets_and_Moons + // + // Roman_Deities + // + // Scottish_Islands + // + // US_Cities + // + // Wild_Cats + // + // To obtain a generated host name, call GetHostNameSuggestion, which returns + // a host name based on the current theme. + HostnameTheme *string `type:"string"` + + // The cloned stack name. + Name *string `type:"string"` + + // The cloned stack AWS region, such as "us-east-1". For more information about + // AWS regions, see Regions and Endpoints (http://docs.aws.amazon.com/general/latest/gr/rande.html). + Region *string `type:"string"` + + // The stack AWS Identity and Access Management (IAM) role, which allows AWS + // OpsWorks to work with AWS resources on your behalf. You must set this parameter + // to the Amazon Resource Name (ARN) for an existing IAM role. If you create + // a stack by using the AWS OpsWorks console, it creates the role for you. You + // can obtain an existing stack's IAM ARN programmatically by calling DescribePermissions. + // For more information about IAM ARNs, see Using Identifiers (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html). + // + // You must set this parameter to a valid service role ARN or the action will + // fail; there is no default value. You can specify the source stack's service + // role ARN, if you prefer, but you must do so explicitly. + ServiceRoleArn *string `type:"string" required:"true"` + + // The source stack ID. + SourceStackId *string `type:"string" required:"true"` + + // Whether to use custom cookbooks. + UseCustomCookbooks *bool `type:"boolean"` + + // Whether to associate the AWS OpsWorks built-in security groups with the stack's + // layers. + // + // AWS OpsWorks provides a standard set of built-in security groups, one for + // each layer, which are associated with layers by default. With UseOpsworksSecurityGroups + // you can instead provide your own custom security groups. UseOpsworksSecurityGroups + // has the following settings: + // + // True - AWS OpsWorks automatically associates the appropriate built-in + // security group with each layer (default setting). You can associate additional + // security groups with a layer after you create it but you cannot delete the + // built-in security group. + // + // False - AWS OpsWorks does not associate built-in security groups with + // layers. You must create appropriate Amazon Elastic Compute Cloud (Amazon + // EC2) security groups and associate a security group with each layer that + // you create. However, you can still manually associate a built-in security + // group with a layer on creation; custom security groups are required only + // for those layers that need custom settings. + // + // For more information, see Create a New Stack (http://docs.aws.amazon.com/opsworks/latest/userguide/workingstacks-creating.html). + UseOpsworksSecurityGroups *bool `type:"boolean"` + + // The ID of the VPC that the cloned stack is to be launched into. It must be + // in the specified region. All instances are launched into this VPC, and you + // cannot change the ID later. + // + // If your account supports EC2 Classic, the default value is no VPC. + // + // If your account does not support EC2 Classic, the default value is the + // default VPC for the specified region. + // + // If the VPC ID corresponds to a default VPC and you have specified either + // the DefaultAvailabilityZone or the DefaultSubnetId parameter only, AWS OpsWorks + // infers the value of the other parameter. If you specify neither parameter, + // AWS OpsWorks sets these parameters to the first valid Availability Zone for + // the specified region and the corresponding default VPC subnet ID, respectively. + // + // If you specify a nondefault VPC ID, note the following: + // + // It must belong to a VPC in your account that is in the specified region. + // + // You must specify a value for DefaultSubnetId. + // + // For more information on how to use AWS OpsWorks with a VPC, see Running + // a Stack in a VPC (http://docs.aws.amazon.com/opsworks/latest/userguide/workingstacks-vpc.html). + // For more information on default VPC and EC2 Classic, see Supported Platforms + // (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-supported-platforms.html). + VpcId *string `type:"string"` +} + +// String returns the string representation +func (s CloneStackInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CloneStackInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CloneStackInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CloneStackInput"} + if s.ServiceRoleArn == nil { + invalidParams.Add(request.NewErrParamRequired("ServiceRoleArn")) + } + if s.SourceStackId == nil { + invalidParams.Add(request.NewErrParamRequired("SourceStackId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the response to a CloneStack request. +type CloneStackOutput struct { + _ struct{} `type:"structure"` + + // The cloned stack ID. + StackId *string `type:"string"` +} + +// String returns the string representation +func (s CloneStackOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CloneStackOutput) GoString() string { + return s.String() +} + +// Describes a command. +type Command struct { + _ struct{} `type:"structure"` + + // Date and time when the command was acknowledged. + AcknowledgedAt *string `type:"string"` + + // The command ID. + CommandId *string `type:"string"` + + // Date when the command completed. + CompletedAt *string `type:"string"` + + // Date and time when the command was run. + CreatedAt *string `type:"string"` + + // The command deployment ID. + DeploymentId *string `type:"string"` + + // The command exit code. + ExitCode *int64 `type:"integer"` + + // The ID of the instance where the command was executed. + InstanceId *string `type:"string"` + + // The URL of the command log. + LogUrl *string `type:"string"` + + // The command status: + // + // failed + // + // successful + // + // skipped + // + // pending + Status *string `type:"string"` + + // The command type: + // + // deploy + // + // rollback + // + // start + // + // stop + // + // restart + // + // undeploy + // + // update_dependencies + // + // install_dependencies + // + // update_custom_cookbooks + // + // execute_recipes + Type *string `type:"string"` +} + +// String returns the string representation +func (s Command) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Command) GoString() string { + return s.String() +} + +type CreateAppInput struct { + _ struct{} `type:"structure"` + + // A Source object that specifies the app repository. + AppSource *Source `type:"structure"` + + // One or more user-defined key/value pairs to be added to the stack attributes. + Attributes map[string]*string `type:"map"` + + // The app's data source. + DataSources []*DataSource `type:"list"` + + // A description of the app. + Description *string `type:"string"` + + // The app virtual host settings, with multiple domains separated by commas. + // For example: 'www.example.com, example.com' + Domains []*string `type:"list"` + + // Whether to enable SSL for the app. + EnableSsl *bool `type:"boolean"` + + // An array of EnvironmentVariable objects that specify environment variables + // to be associated with the app. After you deploy the app, these variables + // are defined on the associated app server instance. For more information, + // see Environment Variables (http://docs.aws.amazon.com/opsworks/latest/userguide/workingapps-creating.html#workingapps-creating-environment). + // + // There is no specific limit on the number of environment variables. However, + // the size of the associated data structure - which includes the variables' + // names, values, and protected flag values - cannot exceed 10 KB (10240 Bytes). + // This limit should accommodate most if not all use cases. Exceeding it will + // cause an exception with the message, "Environment: is too large (maximum + // is 10KB)." + // + // This parameter is supported only by Chef 11.10 stacks. If you have specified + // one or more environment variables, you cannot modify the stack's Chef version. + Environment []*EnvironmentVariable `type:"list"` + + // The app name. + Name *string `type:"string" required:"true"` + + // The app's short name. + Shortname *string `type:"string"` + + // An SslConfiguration object with the SSL configuration. + SslConfiguration *SslConfiguration `type:"structure"` + + // The stack ID. + StackId *string `type:"string" required:"true"` + + // The app type. Each supported type is associated with a particular layer. + // For example, PHP applications are associated with a PHP layer. AWS OpsWorks + // deploys an application to those instances that are members of the corresponding + // layer. If your app isn't one of the standard types, or you prefer to implement + // your own Deploy recipes, specify other. + Type *string `type:"string" required:"true" enum:"AppType"` +} + +// String returns the string representation +func (s CreateAppInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateAppInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateAppInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateAppInput"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.StackId == nil { + invalidParams.Add(request.NewErrParamRequired("StackId")) + } + if s.Type == nil { + invalidParams.Add(request.NewErrParamRequired("Type")) + } + if s.Environment != nil { + for i, v := range s.Environment { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Environment", i), err.(request.ErrInvalidParams)) + } + } + } + if s.SslConfiguration != nil { + if err := s.SslConfiguration.Validate(); err != nil { + invalidParams.AddNested("SslConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the response to a CreateApp request. +type CreateAppOutput struct { + _ struct{} `type:"structure"` + + // The app ID. + AppId *string `type:"string"` +} + +// String returns the string representation +func (s CreateAppOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateAppOutput) GoString() string { + return s.String() +} + +type CreateDeploymentInput struct { + _ struct{} `type:"structure"` + + // The app ID. This parameter is required for app deployments, but not for other + // deployment commands. + AppId *string `type:"string"` + + // A DeploymentCommand object that specifies the deployment command and any + // associated arguments. + Command *DeploymentCommand `type:"structure" required:"true"` + + // A user-defined comment. + Comment *string `type:"string"` + + // A string that contains user-defined, custom JSON. It is used to override + // the corresponding default stack configuration JSON values. The string should + // be in the following format and must escape characters such as '"': + // + // "{\"key1\": \"value1\", \"key2\": \"value2\",...}" + // + // For more information on custom JSON, see Use Custom JSON to Modify the Stack + // Configuration Attributes (http://docs.aws.amazon.com/opsworks/latest/userguide/workingstacks-json.html). + CustomJson *string `type:"string"` + + // The instance IDs for the deployment targets. + InstanceIds []*string `type:"list"` + + // The layer IDs for the deployment targets. + LayerIds []*string `type:"list"` + + // The stack ID. + StackId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateDeploymentInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDeploymentInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateDeploymentInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateDeploymentInput"} + if s.Command == nil { + invalidParams.Add(request.NewErrParamRequired("Command")) + } + if s.StackId == nil { + invalidParams.Add(request.NewErrParamRequired("StackId")) + } + if s.Command != nil { + if err := s.Command.Validate(); err != nil { + invalidParams.AddNested("Command", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the response to a CreateDeployment request. +type CreateDeploymentOutput struct { + _ struct{} `type:"structure"` + + // The deployment ID, which can be used with other requests to identify the + // deployment. + DeploymentId *string `type:"string"` +} + +// String returns the string representation +func (s CreateDeploymentOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDeploymentOutput) GoString() string { + return s.String() +} + +type CreateInstanceInput struct { + _ struct{} `type:"structure"` + + // The default AWS OpsWorks agent version. You have the following options: + // + // INHERIT - Use the stack's default agent version setting. + // + // version_number - Use the specified agent version. This value overrides + // the stack's default setting. To update the agent version, edit the instance + // configuration and specify a new version. AWS OpsWorks then automatically + // installs that version on the instance. + // + // The default setting is INHERIT. To specify an agent version, you must + // use the complete version number, not the abbreviated number shown on the + // console. For a list of available agent version numbers, call DescribeAgentVersions. + AgentVersion *string `type:"string"` + + // A custom AMI ID to be used to create the instance. The AMI should be based + // on one of the supported operating systems. For more information, see Using + // Custom AMIs (http://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-custom-ami.html). + // + // If you specify a custom AMI, you must set Os to Custom. + AmiId *string `type:"string"` + + // The instance architecture. The default option is x86_64. Instance types do + // not necessarily support both architectures. For a list of the architectures + // that are supported by the different instance types, see Instance Families + // and Types (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html). + Architecture *string `type:"string" enum:"Architecture"` + + // For load-based or time-based instances, the type. Windows stacks can use + // only time-based instances. + AutoScalingType *string `type:"string" enum:"AutoScalingType"` + + // The instance Availability Zone. For more information, see Regions and Endpoints + // (http://docs.aws.amazon.com/general/latest/gr/rande.html). + AvailabilityZone *string `type:"string"` + + // An array of BlockDeviceMapping objects that specify the instance's block + // devices. For more information, see Block Device Mapping (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/block-device-mapping-concepts.html). + // Note that block device mappings are not supported for custom AMIs. + BlockDeviceMappings []*BlockDeviceMapping `type:"list"` + + // Whether to create an Amazon EBS-optimized instance. + EbsOptimized *bool `type:"boolean"` + + // The instance host name. + Hostname *string `type:"string"` + + // Whether to install operating system and package updates when the instance + // boots. The default value is true. To control when updates are installed, + // set this value to false. You must then update your instances manually by + // using CreateDeployment to run the update_dependencies stack command or by + // manually running yum (Amazon Linux) or apt-get (Ubuntu) on the instances. + // + // We strongly recommend using the default value of true to ensure that your + // instances have the latest security updates. + InstallUpdatesOnBoot *bool `type:"boolean"` + + // The instance type, such as t2.micro. For a list of supported instance types, + // open the stack in the console, choose Instances, and choose + Instance. The + // Size list contains the currently supported types. For more information, see + // Instance Families and Types (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html). + // The parameter values that you use to specify the various types are in the + // API Name column of the Available Instance Types table. + InstanceType *string `type:"string" required:"true"` + + // An array that contains the instance's layer IDs. + LayerIds []*string `type:"list" required:"true"` + + // The instance's operating system, which must be set to one of the following. + // + // A supported Linux operating system: An Amazon Linux version, such as Amazon + // Linux 2016.03, Amazon Linux 2015.09, or Amazon Linux 2015.03. + // + // A supported Ubuntu operating system, such as Ubuntu 16.04 LTS, Ubuntu + // 14.04 LTS, or Ubuntu 12.04 LTS. + // + // CentOS 7 + // + // Red Hat Enterprise Linux 7 + // + // A supported Windows operating system, such as Microsoft Windows Server + // 2012 R2 Base, Microsoft Windows Server 2012 R2 with SQL Server Express, Microsoft + // Windows Server 2012 R2 with SQL Server Standard, or Microsoft Windows Server + // 2012 R2 with SQL Server Web. + // + // A custom AMI: Custom. + // + // For more information on the supported operating systems, see AWS OpsWorks + // Operating Systems (http://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-os.html). + // + // The default option is the current Amazon Linux version. If you set this + // parameter to Custom, you must use the CreateInstance action's AmiId parameter + // to specify the custom AMI that you want to use. Block device mappings are + // not supported if the value is Custom. For more information on the supported + // operating systems, see Operating Systems (http://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-os.html)For + // more information on how to use custom AMIs with AWS OpsWorks, see Using Custom + // AMIs (http://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-custom-ami.html). + Os *string `type:"string"` + + // The instance root device type. For more information, see Storage for the + // Root Device (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ComponentsAMIs.html#storage-for-the-root-device). + RootDeviceType *string `type:"string" enum:"RootDeviceType"` + + // The instance's Amazon EC2 key-pair name. + SshKeyName *string `type:"string"` + + // The stack ID. + StackId *string `type:"string" required:"true"` + + // The ID of the instance's subnet. If the stack is running in a VPC, you can + // use this parameter to override the stack's default subnet ID value and direct + // AWS OpsWorks to launch the instance in a different subnet. + SubnetId *string `type:"string"` + + // The instance's tenancy option. The default option is no tenancy, or if the + // instance is running in a VPC, inherit tenancy settings from the VPC. The + // following are valid values for this parameter: dedicated, default, or host. + // Because there are costs associated with changes in tenancy options, we recommend + // that you research tenancy options before choosing them for your instances. + // For more information about dedicated hosts, see Dedicated Hosts Overview + // (http://aws.amazon.com/ec2/dedicated-hosts/) and Amazon EC2 Dedicated Hosts + // (http://aws.amazon.com/ec2/dedicated-hosts/). For more information about + // dedicated instances, see Dedicated Instances (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/dedicated-instance.html) + // and Amazon EC2 Dedicated Instances (http://aws.amazon.com/ec2/purchasing-options/dedicated-instances/). + Tenancy *string `type:"string"` + + // The instance's virtualization type, paravirtual or hvm. + VirtualizationType *string `type:"string"` +} + +// String returns the string representation +func (s CreateInstanceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateInstanceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateInstanceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateInstanceInput"} + if s.InstanceType == nil { + invalidParams.Add(request.NewErrParamRequired("InstanceType")) + } + if s.LayerIds == nil { + invalidParams.Add(request.NewErrParamRequired("LayerIds")) + } + if s.StackId == nil { + invalidParams.Add(request.NewErrParamRequired("StackId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the response to a CreateInstance request. +type CreateInstanceOutput struct { + _ struct{} `type:"structure"` + + // The instance ID. + InstanceId *string `type:"string"` +} + +// String returns the string representation +func (s CreateInstanceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateInstanceOutput) GoString() string { + return s.String() +} + +type CreateLayerInput struct { + _ struct{} `type:"structure"` + + // One or more user-defined key-value pairs to be added to the stack attributes. + // + // To create a cluster layer, set the EcsClusterArn attribute to the cluster's + // ARN. + Attributes map[string]*string `type:"map"` + + // Whether to automatically assign an Elastic IP address (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/elastic-ip-addresses-eip.html) + // to the layer's instances. For more information, see How to Edit a Layer (http://docs.aws.amazon.com/opsworks/latest/userguide/workinglayers-basics-edit.html). + AutoAssignElasticIps *bool `type:"boolean"` + + // For stacks that are running in a VPC, whether to automatically assign a public + // IP address to the layer's instances. For more information, see How to Edit + // a Layer (http://docs.aws.amazon.com/opsworks/latest/userguide/workinglayers-basics-edit.html). + AutoAssignPublicIps *bool `type:"boolean"` + + // The ARN of an IAM profile to be used for the layer's EC2 instances. For more + // information about IAM ARNs, see Using Identifiers (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html). + CustomInstanceProfileArn *string `type:"string"` + + // A JSON-formatted string containing custom stack configuration and deployment + // attributes to be installed on the layer's instances. For more information, + // see Using Custom JSON (http://docs.aws.amazon.com/opsworks/latest/userguide/workingcookbook-json-override.html). + // This feature is supported as of version 1.7.42 of the AWS CLI. + CustomJson *string `type:"string"` + + // A LayerCustomRecipes object that specifies the layer custom recipes. + CustomRecipes *Recipes `type:"structure"` + + // An array containing the layer custom security group IDs. + CustomSecurityGroupIds []*string `type:"list"` + + // Whether to disable auto healing for the layer. + EnableAutoHealing *bool `type:"boolean"` + + // Whether to install operating system and package updates when the instance + // boots. The default value is true. To control when updates are installed, + // set this value to false. You must then update your instances manually by + // using CreateDeployment to run the update_dependencies stack command or by + // manually running yum (Amazon Linux) or apt-get (Ubuntu) on the instances. + // + // To ensure that your instances have the latest security updates, we strongly + // recommend using the default value of true. + InstallUpdatesOnBoot *bool `type:"boolean"` + + // A LifeCycleEventConfiguration object that you can use to configure the Shutdown + // event to specify an execution timeout and enable or disable Elastic Load + // Balancer connection draining. + LifecycleEventConfiguration *LifecycleEventConfiguration `type:"structure"` + + // The layer name, which is used by the console. + Name *string `type:"string" required:"true"` + + // An array of Package objects that describes the layer packages. + Packages []*string `type:"list"` + + // For custom layers only, use this parameter to specify the layer's short name, + // which is used internally by AWS OpsWorks and by Chef recipes. The short name + // is also used as the name for the directory where your app files are installed. + // It can have a maximum of 200 characters, which are limited to the alphanumeric + // characters, '-', '_', and '.'. + // + // The built-in layers' short names are defined by AWS OpsWorks. For more information, + // see the Layer Reference (http://docs.aws.amazon.com/opsworks/latest/userguide/layers.html). + Shortname *string `type:"string" required:"true"` + + // The layer stack ID. + StackId *string `type:"string" required:"true"` + + // The layer type. A stack cannot have more than one built-in layer of the same + // type. It can have any number of custom layers. Built-in layers are not available + // in Chef 12 stacks. + Type *string `type:"string" required:"true" enum:"LayerType"` + + // Whether to use Amazon EBS-optimized instances. + UseEbsOptimizedInstances *bool `type:"boolean"` + + // A VolumeConfigurations object that describes the layer's Amazon EBS volumes. + VolumeConfigurations []*VolumeConfiguration `type:"list"` +} + +// String returns the string representation +func (s CreateLayerInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateLayerInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateLayerInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateLayerInput"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Shortname == nil { + invalidParams.Add(request.NewErrParamRequired("Shortname")) + } + if s.StackId == nil { + invalidParams.Add(request.NewErrParamRequired("StackId")) + } + if s.Type == nil { + invalidParams.Add(request.NewErrParamRequired("Type")) + } + if s.VolumeConfigurations != nil { + for i, v := range s.VolumeConfigurations { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "VolumeConfigurations", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the response to a CreateLayer request. +type CreateLayerOutput struct { + _ struct{} `type:"structure"` + + // The layer ID. + LayerId *string `type:"string"` +} + +// String returns the string representation +func (s CreateLayerOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateLayerOutput) GoString() string { + return s.String() +} + +type CreateStackInput struct { + _ struct{} `type:"structure"` + + // The default AWS OpsWorks agent version. You have the following options: + // + // Auto-update - Set this parameter to LATEST. AWS OpsWorks automatically + // installs new agent versions on the stack's instances as soon as they are + // available. + // + // Fixed version - Set this parameter to your preferred agent version. To + // update the agent version, you must edit the stack configuration and specify + // a new version. AWS OpsWorks then automatically installs that version on the + // stack's instances. + // + // The default setting is the most recent release of the agent. To specify + // an agent version, you must use the complete version number, not the abbreviated + // number shown on the console. For a list of available agent version numbers, + // call DescribeAgentVersions. + // + // You can also specify an agent version when you create or update an instance, + // which overrides the stack's default setting. + AgentVersion *string `type:"string"` + + // One or more user-defined key-value pairs to be added to the stack attributes. + Attributes map[string]*string `type:"map"` + + // A ChefConfiguration object that specifies whether to enable Berkshelf and + // the Berkshelf version on Chef 11.10 stacks. For more information, see Create + // a New Stack (http://docs.aws.amazon.com/opsworks/latest/userguide/workingstacks-creating.html). + ChefConfiguration *ChefConfiguration `type:"structure"` + + // The configuration manager. When you create a stack we recommend that you + // use the configuration manager to specify the Chef version: 12, 11.10, or + // 11.4 for Linux stacks, or 12.2 for Windows stacks. The default value for + // Linux stacks is currently 11.4. + ConfigurationManager *StackConfigurationManager `type:"structure"` + + // Contains the information required to retrieve an app or cookbook from a repository. + // For more information, see Creating Apps (http://docs.aws.amazon.com/opsworks/latest/userguide/workingapps-creating.html) + // or Custom Recipes and Cookbooks (http://docs.aws.amazon.com/opsworks/latest/userguide/workingcookbook.html). + CustomCookbooksSource *Source `type:"structure"` + + // A string that contains user-defined, custom JSON. It can be used to override + // the corresponding default stack configuration attribute values or to pass + // data to recipes. The string should be in the following escape characters + // such as '"': + // + // "{\"key1\": \"value1\", \"key2\": \"value2\",...}" + // + // For more information on custom JSON, see Use Custom JSON to Modify the Stack + // Configuration Attributes (http://docs.aws.amazon.com/opsworks/latest/userguide/workingstacks-json.html). + CustomJson *string `type:"string"` + + // The stack's default Availability Zone, which must be in the specified region. + // For more information, see Regions and Endpoints (http://docs.aws.amazon.com/general/latest/gr/rande.html). + // If you also specify a value for DefaultSubnetId, the subnet must be in the + // same zone. For more information, see the VpcId parameter description. + DefaultAvailabilityZone *string `type:"string"` + + // The Amazon Resource Name (ARN) of an IAM profile that is the default profile + // for all of the stack's EC2 instances. For more information about IAM ARNs, + // see Using Identifiers (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html). + DefaultInstanceProfileArn *string `type:"string" required:"true"` + + // The stack's default operating system, which is installed on every instance + // unless you specify a different operating system when you create the instance. + // You can specify one of the following. + // + // A supported Linux operating system: An Amazon Linux version, such as Amazon + // Linux 2016.03, Amazon Linux 2015.09, or Amazon Linux 2015.03. + // + // A supported Ubuntu operating system, such as Ubuntu 16.04 LTS, Ubuntu + // 14.04 LTS, or Ubuntu 12.04 LTS. + // + // CentOS 7 + // + // Red Hat Enterprise Linux 7 + // + // A supported Windows operating system, such as Microsoft Windows Server + // 2012 R2 Base, Microsoft Windows Server 2012 R2 with SQL Server Express, Microsoft + // Windows Server 2012 R2 with SQL Server Standard, or Microsoft Windows Server + // 2012 R2 with SQL Server Web. + // + // A custom AMI: Custom. You specify the custom AMI you want to use when + // you create instances. For more information, see Using Custom AMIs (http://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-custom-ami.html). + // + // The default option is the current Amazon Linux version. For more information + // on the supported operating systems, see AWS OpsWorks Operating Systems (http://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-os.html). + DefaultOs *string `type:"string"` + + // The default root device type. This value is the default for all instances + // in the stack, but you can override it when you create an instance. The default + // option is instance-store. For more information, see Storage for the Root + // Device (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ComponentsAMIs.html#storage-for-the-root-device). + DefaultRootDeviceType *string `type:"string" enum:"RootDeviceType"` + + // A default Amazon EC2 key pair name. The default value is none. If you specify + // a key pair name, AWS OpsWorks installs the public key on the instance and + // you can use the private key with an SSH client to log in to the instance. + // For more information, see Using SSH to Communicate with an Instance (http://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-ssh.html) + // and Managing SSH Access (http://docs.aws.amazon.com/opsworks/latest/userguide/security-ssh-access.html). + // You can override this setting by specifying a different key pair, or no key + // pair, when you create an instance (http://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-add.html). + DefaultSshKeyName *string `type:"string"` + + // The stack's default VPC subnet ID. This parameter is required if you specify + // a value for the VpcId parameter. All instances are launched into this subnet + // unless you specify otherwise when you create the instance. If you also specify + // a value for DefaultAvailabilityZone, the subnet must be in that zone. For + // information on default values and when this parameter is required, see the + // VpcId parameter description. + DefaultSubnetId *string `type:"string"` + + // The stack's host name theme, with spaces replaced by underscores. The theme + // is used to generate host names for the stack's instances. By default, HostnameTheme + // is set to Layer_Dependent, which creates host names by appending integers + // to the layer's short name. The other themes are: + // + // Baked_Goods + // + // Clouds + // + // Europe_Cities + // + // Fruits + // + // Greek_Deities + // + // Legendary_creatures_from_Japan + // + // Planets_and_Moons + // + // Roman_Deities + // + // Scottish_Islands + // + // US_Cities + // + // Wild_Cats + // + // To obtain a generated host name, call GetHostNameSuggestion, which returns + // a host name based on the current theme. + HostnameTheme *string `type:"string"` + + // The stack name. + Name *string `type:"string" required:"true"` + + // The stack's AWS region, such as "us-east-1". For more information about Amazon + // regions, see Regions and Endpoints (http://docs.aws.amazon.com/general/latest/gr/rande.html). + Region *string `type:"string" required:"true"` + + // The stack's AWS Identity and Access Management (IAM) role, which allows AWS + // OpsWorks to work with AWS resources on your behalf. You must set this parameter + // to the Amazon Resource Name (ARN) for an existing IAM role. For more information + // about IAM ARNs, see Using Identifiers (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html). + ServiceRoleArn *string `type:"string" required:"true"` + + // Whether the stack uses custom cookbooks. + UseCustomCookbooks *bool `type:"boolean"` + + // Whether to associate the AWS OpsWorks built-in security groups with the stack's + // layers. + // + // AWS OpsWorks provides a standard set of built-in security groups, one for + // each layer, which are associated with layers by default. With UseOpsworksSecurityGroups + // you can instead provide your own custom security groups. UseOpsworksSecurityGroups + // has the following settings: + // + // True - AWS OpsWorks automatically associates the appropriate built-in + // security group with each layer (default setting). You can associate additional + // security groups with a layer after you create it, but you cannot delete the + // built-in security group. + // + // False - AWS OpsWorks does not associate built-in security groups with + // layers. You must create appropriate EC2 security groups and associate a security + // group with each layer that you create. However, you can still manually associate + // a built-in security group with a layer on creation; custom security groups + // are required only for those layers that need custom settings. + // + // For more information, see Create a New Stack (http://docs.aws.amazon.com/opsworks/latest/userguide/workingstacks-creating.html). + UseOpsworksSecurityGroups *bool `type:"boolean"` + + // The ID of the VPC that the stack is to be launched into. The VPC must be + // in the stack's region. All instances are launched into this VPC. You cannot + // change the ID later. + // + // If your account supports EC2-Classic, the default value is no VPC. + // + // If your account does not support EC2-Classic, the default value is the + // default VPC for the specified region. + // + // If the VPC ID corresponds to a default VPC and you have specified either + // the DefaultAvailabilityZone or the DefaultSubnetId parameter only, AWS OpsWorks + // infers the value of the other parameter. If you specify neither parameter, + // AWS OpsWorks sets these parameters to the first valid Availability Zone for + // the specified region and the corresponding default VPC subnet ID, respectively. + // + // If you specify a nondefault VPC ID, note the following: + // + // It must belong to a VPC in your account that is in the specified region. + // + // You must specify a value for DefaultSubnetId. + // + // For more information on how to use AWS OpsWorks with a VPC, see Running + // a Stack in a VPC (http://docs.aws.amazon.com/opsworks/latest/userguide/workingstacks-vpc.html). + // For more information on default VPC and EC2-Classic, see Supported Platforms + // (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-supported-platforms.html). + VpcId *string `type:"string"` +} + +// String returns the string representation +func (s CreateStackInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateStackInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateStackInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateStackInput"} + if s.DefaultInstanceProfileArn == nil { + invalidParams.Add(request.NewErrParamRequired("DefaultInstanceProfileArn")) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Region == nil { + invalidParams.Add(request.NewErrParamRequired("Region")) + } + if s.ServiceRoleArn == nil { + invalidParams.Add(request.NewErrParamRequired("ServiceRoleArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the response to a CreateStack request. +type CreateStackOutput struct { + _ struct{} `type:"structure"` + + // The stack ID, which is an opaque string that you use to identify the stack + // when performing actions such as DescribeStacks. + StackId *string `type:"string"` +} + +// String returns the string representation +func (s CreateStackOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateStackOutput) GoString() string { + return s.String() +} + +type CreateUserProfileInput struct { + _ struct{} `type:"structure"` + + // Whether users can specify their own SSH public key through the My Settings + // page. For more information, see Setting an IAM User's Public SSH Key (http://docs.aws.amazon.com/opsworks/latest/userguide/security-settingsshkey.html). + AllowSelfManagement *bool `type:"boolean"` + + // The user's IAM ARN. + IamUserArn *string `type:"string" required:"true"` + + // The user's public SSH key. + SshPublicKey *string `type:"string"` + + // The user's SSH user name. The allowable characters are [a-z], [A-Z], [0-9], + // '-', and '_'. If the specified name includes other punctuation marks, AWS + // OpsWorks removes them. For example, my.name will be changed to myname. If + // you do not specify an SSH user name, AWS OpsWorks generates one from the + // IAM user name. + SshUsername *string `type:"string"` +} + +// String returns the string representation +func (s CreateUserProfileInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateUserProfileInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateUserProfileInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateUserProfileInput"} + if s.IamUserArn == nil { + invalidParams.Add(request.NewErrParamRequired("IamUserArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the response to a CreateUserProfile request. +type CreateUserProfileOutput struct { + _ struct{} `type:"structure"` + + // The user's IAM ARN. + IamUserArn *string `type:"string"` +} + +// String returns the string representation +func (s CreateUserProfileOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateUserProfileOutput) GoString() string { + return s.String() +} + +// Describes an app's data source. +type DataSource struct { + _ struct{} `type:"structure"` + + // The data source's ARN. + Arn *string `type:"string"` + + // The database name. + DatabaseName *string `type:"string"` + + // The data source's type, AutoSelectOpsworksMysqlInstance, OpsworksMysqlInstance, + // or RdsDbInstance. + Type *string `type:"string"` +} + +// String returns the string representation +func (s DataSource) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DataSource) GoString() string { + return s.String() +} + +type DeleteAppInput struct { + _ struct{} `type:"structure"` + + // The app ID. + AppId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteAppInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteAppInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteAppInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteAppInput"} + if s.AppId == nil { + invalidParams.Add(request.NewErrParamRequired("AppId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteAppOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteAppOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteAppOutput) GoString() string { + return s.String() +} + +type DeleteInstanceInput struct { + _ struct{} `type:"structure"` + + // Whether to delete the instance Elastic IP address. + DeleteElasticIp *bool `type:"boolean"` + + // Whether to delete the instance's Amazon EBS volumes. + DeleteVolumes *bool `type:"boolean"` + + // The instance ID. + InstanceId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteInstanceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteInstanceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteInstanceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteInstanceInput"} + if s.InstanceId == nil { + invalidParams.Add(request.NewErrParamRequired("InstanceId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteInstanceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteInstanceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteInstanceOutput) GoString() string { + return s.String() +} + +type DeleteLayerInput struct { + _ struct{} `type:"structure"` + + // The layer ID. + LayerId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteLayerInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteLayerInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteLayerInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteLayerInput"} + if s.LayerId == nil { + invalidParams.Add(request.NewErrParamRequired("LayerId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteLayerOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteLayerOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteLayerOutput) GoString() string { + return s.String() +} + +type DeleteStackInput struct { + _ struct{} `type:"structure"` + + // The stack ID. + StackId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteStackInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteStackInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteStackInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteStackInput"} + if s.StackId == nil { + invalidParams.Add(request.NewErrParamRequired("StackId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteStackOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteStackOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteStackOutput) GoString() string { + return s.String() +} + +type DeleteUserProfileInput struct { + _ struct{} `type:"structure"` + + // The user's IAM ARN. + IamUserArn *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteUserProfileInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteUserProfileInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteUserProfileInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteUserProfileInput"} + if s.IamUserArn == nil { + invalidParams.Add(request.NewErrParamRequired("IamUserArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteUserProfileOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteUserProfileOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteUserProfileOutput) GoString() string { + return s.String() +} + +// Describes a deployment of a stack or app. +type Deployment struct { + _ struct{} `type:"structure"` + + // The app ID. + AppId *string `type:"string"` + + // Used to specify a stack or deployment command. + Command *DeploymentCommand `type:"structure"` + + // A user-defined comment. + Comment *string `type:"string"` + + // Date when the deployment completed. + CompletedAt *string `type:"string"` + + // Date when the deployment was created. + CreatedAt *string `type:"string"` + + // A string that contains user-defined custom JSON. It can be used to override + // the corresponding default stack configuration attribute values for stack + // or to pass data to recipes. The string should be in the following format + // and must escape characters such as '"': + // + // "{\"key1\": \"value1\", \"key2\": \"value2\",...}" + // + // For more information on custom JSON, see Use Custom JSON to Modify the Stack + // Configuration Attributes (http://docs.aws.amazon.com/opsworks/latest/userguide/workingstacks-json.html). + CustomJson *string `type:"string"` + + // The deployment ID. + DeploymentId *string `type:"string"` + + // The deployment duration. + Duration *int64 `type:"integer"` + + // The user's IAM ARN. + IamUserArn *string `type:"string"` + + // The IDs of the target instances. + InstanceIds []*string `type:"list"` + + // The stack ID. + StackId *string `type:"string"` + + // The deployment status: + // + // running + // + // successful + // + // failed + Status *string `type:"string"` +} + +// String returns the string representation +func (s Deployment) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Deployment) GoString() string { + return s.String() +} + +// Used to specify a stack or deployment command. +type DeploymentCommand struct { + _ struct{} `type:"structure"` + + // The arguments of those commands that take arguments. It should be set to + // a JSON object with the following format: + // + // {"arg_name1" : ["value1", "value2", ...], "arg_name2" : ["value1", "value2", + // ...], ...} + // + // The update_dependencies command takes two arguments: + // + // upgrade_os_to - Specifies the desired Amazon Linux version for instances + // whose OS you want to upgrade, such as Amazon Linux 2014.09. You must also + // set the allow_reboot argument to true. + // + // allow_reboot - Specifies whether to allow AWS OpsWorks to reboot the + // instances if necessary, after installing the updates. This argument can be + // set to either true or false. The default value is false. + // + // For example, to upgrade an instance to Amazon Linux 2014.09, set Args + // to the following. + // + // { "upgrade_os_to":["Amazon Linux 2014.09"], "allow_reboot":["true"] } + Args map[string][]*string `type:"map"` + + // Specifies the operation. You can specify only one command. + // + // For stacks, the following commands are available: + // + // execute_recipes: Execute one or more recipes. To specify the recipes, + // set an Args parameter named recipes to the list of recipes to be executed. + // For example, to execute phpapp::appsetup, set Args to {"recipes":["phpapp::appsetup"]}. + // + // install_dependencies: Install the stack's dependencies. + // + // update_custom_cookbooks: Update the stack's custom cookbooks. + // + // update_dependencies: Update the stack's dependencies. + // + // The update_dependencies and install_dependencies commands are supported + // only for Linux instances. You can run the commands successfully on Windows + // instances, but they do nothing. + // + // For apps, the following commands are available: + // + // deploy: Deploy an app. Ruby on Rails apps have an optional Args parameter + // named migrate. Set Args to {"migrate":["true"]} to migrate the database. + // The default setting is {"migrate":["false"]}. + // + // rollback Roll the app back to the previous version. When you update an + // app, AWS OpsWorks stores the previous version, up to a maximum of five versions. + // You can use this command to roll an app back as many as four versions. + // + // start: Start the app's web or application server. + // + // stop: Stop the app's web or application server. + // + // restart: Restart the app's web or application server. + // + // undeploy: Undeploy the app. + Name *string `type:"string" required:"true" enum:"DeploymentCommandName"` +} + +// String returns the string representation +func (s DeploymentCommand) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeploymentCommand) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeploymentCommand) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeploymentCommand"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeregisterEcsClusterInput struct { + _ struct{} `type:"structure"` + + // The cluster's ARN. + EcsClusterArn *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeregisterEcsClusterInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeregisterEcsClusterInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeregisterEcsClusterInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeregisterEcsClusterInput"} + if s.EcsClusterArn == nil { + invalidParams.Add(request.NewErrParamRequired("EcsClusterArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeregisterEcsClusterOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeregisterEcsClusterOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeregisterEcsClusterOutput) GoString() string { + return s.String() +} + +type DeregisterElasticIpInput struct { + _ struct{} `type:"structure"` + + // The Elastic IP address. + ElasticIp *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeregisterElasticIpInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeregisterElasticIpInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeregisterElasticIpInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeregisterElasticIpInput"} + if s.ElasticIp == nil { + invalidParams.Add(request.NewErrParamRequired("ElasticIp")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeregisterElasticIpOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeregisterElasticIpOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeregisterElasticIpOutput) GoString() string { + return s.String() +} + +type DeregisterInstanceInput struct { + _ struct{} `type:"structure"` + + // The instance ID. + InstanceId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeregisterInstanceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeregisterInstanceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeregisterInstanceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeregisterInstanceInput"} + if s.InstanceId == nil { + invalidParams.Add(request.NewErrParamRequired("InstanceId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeregisterInstanceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeregisterInstanceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeregisterInstanceOutput) GoString() string { + return s.String() +} + +type DeregisterRdsDbInstanceInput struct { + _ struct{} `type:"structure"` + + // The Amazon RDS instance's ARN. + RdsDbInstanceArn *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeregisterRdsDbInstanceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeregisterRdsDbInstanceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeregisterRdsDbInstanceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeregisterRdsDbInstanceInput"} + if s.RdsDbInstanceArn == nil { + invalidParams.Add(request.NewErrParamRequired("RdsDbInstanceArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeregisterRdsDbInstanceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeregisterRdsDbInstanceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeregisterRdsDbInstanceOutput) GoString() string { + return s.String() +} + +type DeregisterVolumeInput struct { + _ struct{} `type:"structure"` + + // The AWS OpsWorks volume ID, which is the GUID that AWS OpsWorks assigned + // to the instance when you registered the volume with the stack, not the Amazon + // EC2 volume ID. + VolumeId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeregisterVolumeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeregisterVolumeInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeregisterVolumeInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeregisterVolumeInput"} + if s.VolumeId == nil { + invalidParams.Add(request.NewErrParamRequired("VolumeId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeregisterVolumeOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeregisterVolumeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeregisterVolumeOutput) GoString() string { + return s.String() +} + +type DescribeAgentVersionsInput struct { + _ struct{} `type:"structure"` + + // The configuration manager. + ConfigurationManager *StackConfigurationManager `type:"structure"` + + // The stack ID. + StackId *string `type:"string"` +} + +// String returns the string representation +func (s DescribeAgentVersionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAgentVersionsInput) GoString() string { + return s.String() +} + +// Contains the response to a DescribeAgentVersions request. +type DescribeAgentVersionsOutput struct { + _ struct{} `type:"structure"` + + // The agent versions for the specified stack or configuration manager. Note + // that this value is the complete version number, not the abbreviated number + // used by the console. + AgentVersions []*AgentVersion `type:"list"` +} + +// String returns the string representation +func (s DescribeAgentVersionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAgentVersionsOutput) GoString() string { + return s.String() +} + +type DescribeAppsInput struct { + _ struct{} `type:"structure"` + + // An array of app IDs for the apps to be described. If you use this parameter, + // DescribeApps returns a description of the specified apps. Otherwise, it returns + // a description of every app. + AppIds []*string `type:"list"` + + // The app stack ID. If you use this parameter, DescribeApps returns a description + // of the apps in the specified stack. + StackId *string `type:"string"` +} + +// String returns the string representation +func (s DescribeAppsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAppsInput) GoString() string { + return s.String() +} + +// Contains the response to a DescribeApps request. +type DescribeAppsOutput struct { + _ struct{} `type:"structure"` + + // An array of App objects that describe the specified apps. + Apps []*App `type:"list"` +} + +// String returns the string representation +func (s DescribeAppsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAppsOutput) GoString() string { + return s.String() +} + +type DescribeCommandsInput struct { + _ struct{} `type:"structure"` + + // An array of command IDs. If you include this parameter, DescribeCommands + // returns a description of the specified commands. Otherwise, it returns a + // description of every command. + CommandIds []*string `type:"list"` + + // The deployment ID. If you include this parameter, DescribeCommands returns + // a description of the commands associated with the specified deployment. + DeploymentId *string `type:"string"` + + // The instance ID. If you include this parameter, DescribeCommands returns + // a description of the commands associated with the specified instance. + InstanceId *string `type:"string"` +} + +// String returns the string representation +func (s DescribeCommandsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeCommandsInput) GoString() string { + return s.String() +} + +// Contains the response to a DescribeCommands request. +type DescribeCommandsOutput struct { + _ struct{} `type:"structure"` + + // An array of Command objects that describe each of the specified commands. + Commands []*Command `type:"list"` +} + +// String returns the string representation +func (s DescribeCommandsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeCommandsOutput) GoString() string { + return s.String() +} + +type DescribeDeploymentsInput struct { + _ struct{} `type:"structure"` + + // The app ID. If you include this parameter, DescribeDeployments returns a + // description of the commands associated with the specified app. + AppId *string `type:"string"` + + // An array of deployment IDs to be described. If you include this parameter, + // DescribeDeployments returns a description of the specified deployments. Otherwise, + // it returns a description of every deployment. + DeploymentIds []*string `type:"list"` + + // The stack ID. If you include this parameter, DescribeDeployments returns + // a description of the commands associated with the specified stack. + StackId *string `type:"string"` +} + +// String returns the string representation +func (s DescribeDeploymentsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDeploymentsInput) GoString() string { + return s.String() +} + +// Contains the response to a DescribeDeployments request. +type DescribeDeploymentsOutput struct { + _ struct{} `type:"structure"` + + // An array of Deployment objects that describe the deployments. + Deployments []*Deployment `type:"list"` +} + +// String returns the string representation +func (s DescribeDeploymentsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDeploymentsOutput) GoString() string { + return s.String() +} + +type DescribeEcsClustersInput struct { + _ struct{} `type:"structure"` + + // A list of ARNs, one for each cluster to be described. + EcsClusterArns []*string `type:"list"` + + // To receive a paginated response, use this parameter to specify the maximum + // number of results to be returned with a single call. If the number of available + // results exceeds this maximum, the response includes a NextToken value that + // you can assign to the NextToken request parameter to get the next set of + // results. + MaxResults *int64 `type:"integer"` + + // If the previous paginated request did not return all of the remaining results, + // the response object'sNextToken parameter value is set to a token. To retrieve + // the next set of results, call DescribeEcsClusters again and assign that token + // to the request object's NextToken parameter. If there are no remaining results, + // the previous response object's NextToken parameter is set to null. + NextToken *string `type:"string"` + + // A stack ID. DescribeEcsClusters returns a description of the cluster that + // is registered with the stack. + StackId *string `type:"string"` +} + +// String returns the string representation +func (s DescribeEcsClustersInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeEcsClustersInput) GoString() string { + return s.String() +} + +// Contains the response to a DescribeEcsClusters request. +type DescribeEcsClustersOutput struct { + _ struct{} `type:"structure"` + + // A list of EcsCluster objects containing the cluster descriptions. + EcsClusters []*EcsCluster `type:"list"` + + // If a paginated request does not return all of the remaining results, this + // parameter is set to a token that you can assign to the request object's NextToken + // parameter to retrieve the next set of results. If the previous paginated + // request returned all of the remaining results, this parameter is set to null. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeEcsClustersOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeEcsClustersOutput) GoString() string { + return s.String() +} + +type DescribeElasticIpsInput struct { + _ struct{} `type:"structure"` + + // The instance ID. If you include this parameter, DescribeElasticIps returns + // a description of the Elastic IP addresses associated with the specified instance. + InstanceId *string `type:"string"` + + // An array of Elastic IP addresses to be described. If you include this parameter, + // DescribeElasticIps returns a description of the specified Elastic IP addresses. + // Otherwise, it returns a description of every Elastic IP address. + Ips []*string `type:"list"` + + // A stack ID. If you include this parameter, DescribeElasticIps returns a description + // of the Elastic IP addresses that are registered with the specified stack. + StackId *string `type:"string"` +} + +// String returns the string representation +func (s DescribeElasticIpsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeElasticIpsInput) GoString() string { + return s.String() +} + +// Contains the response to a DescribeElasticIps request. +type DescribeElasticIpsOutput struct { + _ struct{} `type:"structure"` + + // An ElasticIps object that describes the specified Elastic IP addresses. + ElasticIps []*ElasticIp `type:"list"` +} + +// String returns the string representation +func (s DescribeElasticIpsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeElasticIpsOutput) GoString() string { + return s.String() +} + +type DescribeElasticLoadBalancersInput struct { + _ struct{} `type:"structure"` + + // A list of layer IDs. The action describes the Elastic Load Balancing instances + // for the specified layers. + LayerIds []*string `type:"list"` + + // A stack ID. The action describes the stack's Elastic Load Balancing instances. + StackId *string `type:"string"` +} + +// String returns the string representation +func (s DescribeElasticLoadBalancersInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeElasticLoadBalancersInput) GoString() string { + return s.String() +} + +// Contains the response to a DescribeElasticLoadBalancers request. +type DescribeElasticLoadBalancersOutput struct { + _ struct{} `type:"structure"` + + // A list of ElasticLoadBalancer objects that describe the specified Elastic + // Load Balancing instances. + ElasticLoadBalancers []*ElasticLoadBalancer `type:"list"` +} + +// String returns the string representation +func (s DescribeElasticLoadBalancersOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeElasticLoadBalancersOutput) GoString() string { + return s.String() +} + +type DescribeInstancesInput struct { + _ struct{} `type:"structure"` + + // An array of instance IDs to be described. If you use this parameter, DescribeInstances + // returns a description of the specified instances. Otherwise, it returns a + // description of every instance. + InstanceIds []*string `type:"list"` + + // A layer ID. If you use this parameter, DescribeInstances returns descriptions + // of the instances associated with the specified layer. + LayerId *string `type:"string"` + + // A stack ID. If you use this parameter, DescribeInstances returns descriptions + // of the instances associated with the specified stack. + StackId *string `type:"string"` +} + +// String returns the string representation +func (s DescribeInstancesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeInstancesInput) GoString() string { + return s.String() +} + +// Contains the response to a DescribeInstances request. +type DescribeInstancesOutput struct { + _ struct{} `type:"structure"` + + // An array of Instance objects that describe the instances. + Instances []*Instance `type:"list"` +} + +// String returns the string representation +func (s DescribeInstancesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeInstancesOutput) GoString() string { + return s.String() +} + +type DescribeLayersInput struct { + _ struct{} `type:"structure"` + + // An array of layer IDs that specify the layers to be described. If you omit + // this parameter, DescribeLayers returns a description of every layer in the + // specified stack. + LayerIds []*string `type:"list"` + + // The stack ID. + StackId *string `type:"string"` +} + +// String returns the string representation +func (s DescribeLayersInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeLayersInput) GoString() string { + return s.String() +} + +// Contains the response to a DescribeLayers request. +type DescribeLayersOutput struct { + _ struct{} `type:"structure"` + + // An array of Layer objects that describe the layers. + Layers []*Layer `type:"list"` +} + +// String returns the string representation +func (s DescribeLayersOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeLayersOutput) GoString() string { + return s.String() +} + +type DescribeLoadBasedAutoScalingInput struct { + _ struct{} `type:"structure"` + + // An array of layer IDs. + LayerIds []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s DescribeLoadBasedAutoScalingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeLoadBasedAutoScalingInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeLoadBasedAutoScalingInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeLoadBasedAutoScalingInput"} + if s.LayerIds == nil { + invalidParams.Add(request.NewErrParamRequired("LayerIds")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the response to a DescribeLoadBasedAutoScaling request. +type DescribeLoadBasedAutoScalingOutput struct { + _ struct{} `type:"structure"` + + // An array of LoadBasedAutoScalingConfiguration objects that describe each + // layer's configuration. + LoadBasedAutoScalingConfigurations []*LoadBasedAutoScalingConfiguration `type:"list"` +} + +// String returns the string representation +func (s DescribeLoadBasedAutoScalingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeLoadBasedAutoScalingOutput) GoString() string { + return s.String() +} + +type DescribeMyUserProfileInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DescribeMyUserProfileInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeMyUserProfileInput) GoString() string { + return s.String() +} + +// Contains the response to a DescribeMyUserProfile request. +type DescribeMyUserProfileOutput struct { + _ struct{} `type:"structure"` + + // A UserProfile object that describes the user's SSH information. + UserProfile *SelfUserProfile `type:"structure"` +} + +// String returns the string representation +func (s DescribeMyUserProfileOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeMyUserProfileOutput) GoString() string { + return s.String() +} + +type DescribePermissionsInput struct { + _ struct{} `type:"structure"` + + // The user's IAM ARN. For more information about IAM ARNs, see Using Identifiers + // (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html). + IamUserArn *string `type:"string"` + + // The stack ID. + StackId *string `type:"string"` +} + +// String returns the string representation +func (s DescribePermissionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribePermissionsInput) GoString() string { + return s.String() +} + +// Contains the response to a DescribePermissions request. +type DescribePermissionsOutput struct { + _ struct{} `type:"structure"` + + // An array of Permission objects that describe the stack permissions. + // + // If the request object contains only a stack ID, the array contains a Permission + // object with permissions for each of the stack IAM ARNs. + // + // If the request object contains only an IAM ARN, the array contains a Permission + // object with permissions for each of the user's stack IDs. + // + // If the request contains a stack ID and an IAM ARN, the array contains + // a single Permission object with permissions for the specified stack and IAM + // ARN. + Permissions []*Permission `type:"list"` +} + +// String returns the string representation +func (s DescribePermissionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribePermissionsOutput) GoString() string { + return s.String() +} + +type DescribeRaidArraysInput struct { + _ struct{} `type:"structure"` + + // The instance ID. If you use this parameter, DescribeRaidArrays returns descriptions + // of the RAID arrays associated with the specified instance. + InstanceId *string `type:"string"` + + // An array of RAID array IDs. If you use this parameter, DescribeRaidArrays + // returns descriptions of the specified arrays. Otherwise, it returns a description + // of every array. + RaidArrayIds []*string `type:"list"` + + // The stack ID. + StackId *string `type:"string"` +} + +// String returns the string representation +func (s DescribeRaidArraysInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeRaidArraysInput) GoString() string { + return s.String() +} + +// Contains the response to a DescribeRaidArrays request. +type DescribeRaidArraysOutput struct { + _ struct{} `type:"structure"` + + // A RaidArrays object that describes the specified RAID arrays. + RaidArrays []*RaidArray `type:"list"` +} + +// String returns the string representation +func (s DescribeRaidArraysOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeRaidArraysOutput) GoString() string { + return s.String() +} + +type DescribeRdsDbInstancesInput struct { + _ struct{} `type:"structure"` + + // An array containing the ARNs of the instances to be described. + RdsDbInstanceArns []*string `type:"list"` + + // The stack ID that the instances are registered with. The operation returns + // descriptions of all registered Amazon RDS instances. + StackId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeRdsDbInstancesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeRdsDbInstancesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeRdsDbInstancesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeRdsDbInstancesInput"} + if s.StackId == nil { + invalidParams.Add(request.NewErrParamRequired("StackId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the response to a DescribeRdsDbInstances request. +type DescribeRdsDbInstancesOutput struct { + _ struct{} `type:"structure"` + + // An a array of RdsDbInstance objects that describe the instances. + RdsDbInstances []*RdsDbInstance `type:"list"` +} + +// String returns the string representation +func (s DescribeRdsDbInstancesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeRdsDbInstancesOutput) GoString() string { + return s.String() +} + +type DescribeServiceErrorsInput struct { + _ struct{} `type:"structure"` + + // The instance ID. If you use this parameter, DescribeServiceErrors returns + // descriptions of the errors associated with the specified instance. + InstanceId *string `type:"string"` + + // An array of service error IDs. If you use this parameter, DescribeServiceErrors + // returns descriptions of the specified errors. Otherwise, it returns a description + // of every error. + ServiceErrorIds []*string `type:"list"` + + // The stack ID. If you use this parameter, DescribeServiceErrors returns descriptions + // of the errors associated with the specified stack. + StackId *string `type:"string"` +} + +// String returns the string representation +func (s DescribeServiceErrorsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeServiceErrorsInput) GoString() string { + return s.String() +} + +// Contains the response to a DescribeServiceErrors request. +type DescribeServiceErrorsOutput struct { + _ struct{} `type:"structure"` + + // An array of ServiceError objects that describe the specified service errors. + ServiceErrors []*ServiceError `type:"list"` +} + +// String returns the string representation +func (s DescribeServiceErrorsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeServiceErrorsOutput) GoString() string { + return s.String() +} + +type DescribeStackProvisioningParametersInput struct { + _ struct{} `type:"structure"` + + // The stack ID + StackId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeStackProvisioningParametersInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeStackProvisioningParametersInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeStackProvisioningParametersInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeStackProvisioningParametersInput"} + if s.StackId == nil { + invalidParams.Add(request.NewErrParamRequired("StackId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the response to a DescribeStackProvisioningParameters request. +type DescribeStackProvisioningParametersOutput struct { + _ struct{} `type:"structure"` + + // The AWS OpsWorks agent installer's URL. + AgentInstallerUrl *string `type:"string"` + + // An embedded object that contains the provisioning parameters. + Parameters map[string]*string `type:"map"` +} + +// String returns the string representation +func (s DescribeStackProvisioningParametersOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeStackProvisioningParametersOutput) GoString() string { + return s.String() +} + +type DescribeStackSummaryInput struct { + _ struct{} `type:"structure"` + + // The stack ID. + StackId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeStackSummaryInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeStackSummaryInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeStackSummaryInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeStackSummaryInput"} + if s.StackId == nil { + invalidParams.Add(request.NewErrParamRequired("StackId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the response to a DescribeStackSummary request. +type DescribeStackSummaryOutput struct { + _ struct{} `type:"structure"` + + // A StackSummary object that contains the results. + StackSummary *StackSummary `type:"structure"` +} + +// String returns the string representation +func (s DescribeStackSummaryOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeStackSummaryOutput) GoString() string { + return s.String() +} + +type DescribeStacksInput struct { + _ struct{} `type:"structure"` + + // An array of stack IDs that specify the stacks to be described. If you omit + // this parameter, DescribeStacks returns a description of every stack. + StackIds []*string `type:"list"` +} + +// String returns the string representation +func (s DescribeStacksInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeStacksInput) GoString() string { + return s.String() +} + +// Contains the response to a DescribeStacks request. +type DescribeStacksOutput struct { + _ struct{} `type:"structure"` + + // An array of Stack objects that describe the stacks. + Stacks []*Stack `type:"list"` +} + +// String returns the string representation +func (s DescribeStacksOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeStacksOutput) GoString() string { + return s.String() +} + +type DescribeTimeBasedAutoScalingInput struct { + _ struct{} `type:"structure"` + + // An array of instance IDs. + InstanceIds []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s DescribeTimeBasedAutoScalingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeTimeBasedAutoScalingInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeTimeBasedAutoScalingInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeTimeBasedAutoScalingInput"} + if s.InstanceIds == nil { + invalidParams.Add(request.NewErrParamRequired("InstanceIds")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the response to a DescribeTimeBasedAutoScaling request. +type DescribeTimeBasedAutoScalingOutput struct { + _ struct{} `type:"structure"` + + // An array of TimeBasedAutoScalingConfiguration objects that describe the configuration + // for the specified instances. + TimeBasedAutoScalingConfigurations []*TimeBasedAutoScalingConfiguration `type:"list"` +} + +// String returns the string representation +func (s DescribeTimeBasedAutoScalingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeTimeBasedAutoScalingOutput) GoString() string { + return s.String() +} + +type DescribeUserProfilesInput struct { + _ struct{} `type:"structure"` + + // An array of IAM user ARNs that identify the users to be described. + IamUserArns []*string `type:"list"` +} + +// String returns the string representation +func (s DescribeUserProfilesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeUserProfilesInput) GoString() string { + return s.String() +} + +// Contains the response to a DescribeUserProfiles request. +type DescribeUserProfilesOutput struct { + _ struct{} `type:"structure"` + + // A Users object that describes the specified users. + UserProfiles []*UserProfile `type:"list"` +} + +// String returns the string representation +func (s DescribeUserProfilesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeUserProfilesOutput) GoString() string { + return s.String() +} + +type DescribeVolumesInput struct { + _ struct{} `type:"structure"` + + // The instance ID. If you use this parameter, DescribeVolumes returns descriptions + // of the volumes associated with the specified instance. + InstanceId *string `type:"string"` + + // The RAID array ID. If you use this parameter, DescribeVolumes returns descriptions + // of the volumes associated with the specified RAID array. + RaidArrayId *string `type:"string"` + + // A stack ID. The action describes the stack's registered Amazon EBS volumes. + StackId *string `type:"string"` + + // Am array of volume IDs. If you use this parameter, DescribeVolumes returns + // descriptions of the specified volumes. Otherwise, it returns a description + // of every volume. + VolumeIds []*string `type:"list"` +} + +// String returns the string representation +func (s DescribeVolumesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeVolumesInput) GoString() string { + return s.String() +} + +// Contains the response to a DescribeVolumes request. +type DescribeVolumesOutput struct { + _ struct{} `type:"structure"` + + // An array of volume IDs. + Volumes []*Volume `type:"list"` +} + +// String returns the string representation +func (s DescribeVolumesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeVolumesOutput) GoString() string { + return s.String() +} + +type DetachElasticLoadBalancerInput struct { + _ struct{} `type:"structure"` + + // The Elastic Load Balancing instance's name. + ElasticLoadBalancerName *string `type:"string" required:"true"` + + // The ID of the layer that the Elastic Load Balancing instance is attached + // to. + LayerId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DetachElasticLoadBalancerInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DetachElasticLoadBalancerInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DetachElasticLoadBalancerInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DetachElasticLoadBalancerInput"} + if s.ElasticLoadBalancerName == nil { + invalidParams.Add(request.NewErrParamRequired("ElasticLoadBalancerName")) + } + if s.LayerId == nil { + invalidParams.Add(request.NewErrParamRequired("LayerId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DetachElasticLoadBalancerOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DetachElasticLoadBalancerOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DetachElasticLoadBalancerOutput) GoString() string { + return s.String() +} + +type DisassociateElasticIpInput struct { + _ struct{} `type:"structure"` + + // The Elastic IP address. + ElasticIp *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DisassociateElasticIpInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisassociateElasticIpInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DisassociateElasticIpInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DisassociateElasticIpInput"} + if s.ElasticIp == nil { + invalidParams.Add(request.NewErrParamRequired("ElasticIp")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DisassociateElasticIpOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DisassociateElasticIpOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisassociateElasticIpOutput) GoString() string { + return s.String() +} + +// Describes an Amazon EBS volume. This data type maps directly to the Amazon +// EC2 EbsBlockDevice (http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_EbsBlockDevice.html) +// data type. +type EbsBlockDevice struct { + _ struct{} `type:"structure"` + + // Whether the volume is deleted on instance termination. + DeleteOnTermination *bool `type:"boolean"` + + // The number of I/O operations per second (IOPS) that the volume supports. + // For more information, see EbsBlockDevice (http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_EbsBlockDevice.html). + Iops *int64 `type:"integer"` + + // The snapshot ID. + SnapshotId *string `type:"string"` + + // The volume size, in GiB. For more information, see EbsBlockDevice (http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_EbsBlockDevice.html). + VolumeSize *int64 `type:"integer"` + + // The volume type. gp2 for General Purpose (SSD) volumes, io1 for Provisioned + // IOPS (SSD) volumes, and standard for Magnetic volumes. + VolumeType *string `type:"string" enum:"VolumeType"` +} + +// String returns the string representation +func (s EbsBlockDevice) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EbsBlockDevice) GoString() string { + return s.String() +} + +// Describes a registered Amazon ECS cluster. +type EcsCluster struct { + _ struct{} `type:"structure"` + + // The cluster's ARN. + EcsClusterArn *string `type:"string"` + + // The cluster name. + EcsClusterName *string `type:"string"` + + // The time and date that the cluster was registered with the stack. + RegisteredAt *string `type:"string"` + + // The stack ID. + StackId *string `type:"string"` +} + +// String returns the string representation +func (s EcsCluster) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EcsCluster) GoString() string { + return s.String() +} + +// Describes an Elastic IP address. +type ElasticIp struct { + _ struct{} `type:"structure"` + + // The domain. + Domain *string `type:"string"` + + // The ID of the instance that the address is attached to. + InstanceId *string `type:"string"` + + // The IP address. + Ip *string `type:"string"` + + // The name. + Name *string `type:"string"` + + // The AWS region. For more information, see Regions and Endpoints (http://docs.aws.amazon.com/general/latest/gr/rande.html). + Region *string `type:"string"` +} + +// String returns the string representation +func (s ElasticIp) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ElasticIp) GoString() string { + return s.String() +} + +// Describes an Elastic Load Balancing instance. +type ElasticLoadBalancer struct { + _ struct{} `type:"structure"` + + // A list of Availability Zones. + AvailabilityZones []*string `type:"list"` + + // The instance's public DNS name. + DnsName *string `type:"string"` + + // A list of the EC2 instances that the Elastic Load Balancing instance is managing + // traffic for. + Ec2InstanceIds []*string `type:"list"` + + // The Elastic Load Balancing instance's name. + ElasticLoadBalancerName *string `type:"string"` + + // The ID of the layer that the instance is attached to. + LayerId *string `type:"string"` + + // The instance's AWS region. + Region *string `type:"string"` + + // The ID of the stack that the instance is associated with. + StackId *string `type:"string"` + + // A list of subnet IDs, if the stack is running in a VPC. + SubnetIds []*string `type:"list"` + + // The VPC ID. + VpcId *string `type:"string"` +} + +// String returns the string representation +func (s ElasticLoadBalancer) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ElasticLoadBalancer) GoString() string { + return s.String() +} + +// Represents an app's environment variable. +type EnvironmentVariable struct { + _ struct{} `type:"structure"` + + // (Required) The environment variable's name, which can consist of up to 64 + // characters and must be specified. The name can contain upper- and lowercase + // letters, numbers, and underscores (_), but it must start with a letter or + // underscore. + Key *string `type:"string" required:"true"` + + // (Optional) Whether the variable's value will be returned by the DescribeApps + // action. To conceal an environment variable's value, set Secure to true. DescribeApps + // then returns *****FILTERED***** instead of the actual value. The default + // value for Secure is false. + Secure *bool `type:"boolean"` + + // (Optional) The environment variable's value, which can be left empty. If + // you specify a value, it can contain up to 256 characters, which must all + // be printable. + Value *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s EnvironmentVariable) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnvironmentVariable) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *EnvironmentVariable) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "EnvironmentVariable"} + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Value == nil { + invalidParams.Add(request.NewErrParamRequired("Value")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type GetHostnameSuggestionInput struct { + _ struct{} `type:"structure"` + + // The layer ID. + LayerId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s GetHostnameSuggestionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetHostnameSuggestionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetHostnameSuggestionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetHostnameSuggestionInput"} + if s.LayerId == nil { + invalidParams.Add(request.NewErrParamRequired("LayerId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the response to a GetHostnameSuggestion request. +type GetHostnameSuggestionOutput struct { + _ struct{} `type:"structure"` + + // The generated host name. + Hostname *string `type:"string"` + + // The layer ID. + LayerId *string `type:"string"` +} + +// String returns the string representation +func (s GetHostnameSuggestionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetHostnameSuggestionOutput) GoString() string { + return s.String() +} + +type GrantAccessInput struct { + _ struct{} `type:"structure"` + + // The instance's AWS OpsWorks ID. + InstanceId *string `type:"string" required:"true"` + + // The length of time (in minutes) that the grant is valid. When the grant expires + // at the end of this period, the user will no longer be able to use the credentials + // to log in. If the user is logged in at the time, he or she automatically + // will be logged out. + ValidForInMinutes *int64 `min:"60" type:"integer"` +} + +// String returns the string representation +func (s GrantAccessInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GrantAccessInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GrantAccessInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GrantAccessInput"} + if s.InstanceId == nil { + invalidParams.Add(request.NewErrParamRequired("InstanceId")) + } + if s.ValidForInMinutes != nil && *s.ValidForInMinutes < 60 { + invalidParams.Add(request.NewErrParamMinValue("ValidForInMinutes", 60)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the response to a GrantAccess request. +type GrantAccessOutput struct { + _ struct{} `type:"structure"` + + // A TemporaryCredential object that contains the data needed to log in to the + // instance by RDP clients, such as the Microsoft Remote Desktop Connection. + TemporaryCredential *TemporaryCredential `type:"structure"` +} + +// String returns the string representation +func (s GrantAccessOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GrantAccessOutput) GoString() string { + return s.String() +} + +// Describes an instance. +type Instance struct { + _ struct{} `type:"structure"` + + // The agent version. This parameter is set to INHERIT if the instance inherits + // the default stack setting or to a a version number for a fixed agent version. + AgentVersion *string `type:"string"` + + // A custom AMI ID to be used to create the instance. For more information, + // see Instances (http://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-custom-ami.html) + AmiId *string `type:"string"` + + // The instance architecture: "i386" or "x86_64". + Architecture *string `type:"string" enum:"Architecture"` + + // For load-based or time-based instances, the type. + AutoScalingType *string `type:"string" enum:"AutoScalingType"` + + // The instance Availability Zone. For more information, see Regions and Endpoints + // (http://docs.aws.amazon.com/general/latest/gr/rande.html). + AvailabilityZone *string `type:"string"` + + // An array of BlockDeviceMapping objects that specify the instance's block + // device mappings. + BlockDeviceMappings []*BlockDeviceMapping `type:"list"` + + // The time that the instance was created. + CreatedAt *string `type:"string"` + + // Whether this is an Amazon EBS-optimized instance. + EbsOptimized *bool `type:"boolean"` + + // The ID of the associated Amazon EC2 instance. + Ec2InstanceId *string `type:"string"` + + // For container instances, the Amazon ECS cluster's ARN. + EcsClusterArn *string `type:"string"` + + // For container instances, the instance's ARN. + EcsContainerInstanceArn *string `type:"string"` + + // The instance Elastic IP address (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/elastic-ip-addresses-eip.html). + ElasticIp *string `type:"string"` + + // The instance host name. + Hostname *string `type:"string"` + + // For registered instances, the infrastructure class: ec2 or on-premises. + InfrastructureClass *string `type:"string"` + + // Whether to install operating system and package updates when the instance + // boots. The default value is true. If this value is set to false, you must + // then update your instances manually by using CreateDeployment to run the + // update_dependencies stack command or by manually running yum (Amazon Linux) + // or apt-get (Ubuntu) on the instances. + // + // We strongly recommend using the default value of true, to ensure that your + // instances have the latest security updates. + InstallUpdatesOnBoot *bool `type:"boolean"` + + // The instance ID. + InstanceId *string `type:"string"` + + // The ARN of the instance's IAM profile. For more information about IAM ARNs, + // see Using Identifiers (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html). + InstanceProfileArn *string `type:"string"` + + // The instance type, such as t2.micro. + InstanceType *string `type:"string"` + + // The ID of the last service error. For more information, call DescribeServiceErrors. + LastServiceErrorId *string `type:"string"` + + // An array containing the instance layer IDs. + LayerIds []*string `type:"list"` + + // The instance's operating system. + Os *string `type:"string"` + + // The instance's platform. + Platform *string `type:"string"` + + // The The instance's private DNS name. + PrivateDns *string `type:"string"` + + // The instance's private IP address. + PrivateIp *string `type:"string"` + + // The instance public DNS name. + PublicDns *string `type:"string"` + + // The instance public IP address. + PublicIp *string `type:"string"` + + // For registered instances, who performed the registration. + RegisteredBy *string `type:"string"` + + // The instance's reported AWS OpsWorks agent version. + ReportedAgentVersion *string `type:"string"` + + // For registered instances, the reported operating system. + ReportedOs *ReportedOs `type:"structure"` + + // The instance's root device type. For more information, see Storage for the + // Root Device (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ComponentsAMIs.html#storage-for-the-root-device). + RootDeviceType *string `type:"string" enum:"RootDeviceType"` + + // The root device volume ID. + RootDeviceVolumeId *string `type:"string"` + + // An array containing the instance security group IDs. + SecurityGroupIds []*string `type:"list"` + + // The SSH key's Deep Security Agent (DSA) fingerprint. + SshHostDsaKeyFingerprint *string `type:"string"` + + // The SSH key's RSA fingerprint. + SshHostRsaKeyFingerprint *string `type:"string"` + + // The instance's Amazon EC2 key-pair name. + SshKeyName *string `type:"string"` + + // The stack ID. + StackId *string `type:"string"` + + // The instance status: + // + // booting + // + // connection_lost + // + // online + // + // pending + // + // rebooting + // + // requested + // + // running_setup + // + // setup_failed + // + // shutting_down + // + // start_failed + // + // stop_failed + // + // stopped + // + // stopping + // + // terminated + // + // terminating + Status *string `type:"string"` + + // The instance's subnet ID; applicable only if the stack is running in a VPC. + SubnetId *string `type:"string"` + + // The instance's tenancy option, such as dedicated or host. + Tenancy *string `type:"string"` + + // The instance's virtualization type: paravirtual or hvm. + VirtualizationType *string `type:"string" enum:"VirtualizationType"` +} + +// String returns the string representation +func (s Instance) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Instance) GoString() string { + return s.String() +} + +// Contains a description of an Amazon EC2 instance from the Amazon EC2 metadata +// service. For more information, see Instance Metadata and User Data (http://docs.aws.amazon.com/sdkfornet/latest/apidocs/Index.html). +type InstanceIdentity struct { + _ struct{} `type:"structure"` + + // A JSON document that contains the metadata. + Document *string `type:"string"` + + // A signature that can be used to verify the document's accuracy and authenticity. + Signature *string `type:"string"` +} + +// String returns the string representation +func (s InstanceIdentity) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstanceIdentity) GoString() string { + return s.String() +} + +// Describes how many instances a stack has for each status. +type InstancesCount struct { + _ struct{} `type:"structure"` + + // The number of instances in the Assigning state. + Assigning *int64 `type:"integer"` + + // The number of instances with booting status. + Booting *int64 `type:"integer"` + + // The number of instances with connection_lost status. + ConnectionLost *int64 `type:"integer"` + + // The number of instances in the Deregistering state. + Deregistering *int64 `type:"integer"` + + // The number of instances with online status. + Online *int64 `type:"integer"` + + // The number of instances with pending status. + Pending *int64 `type:"integer"` + + // The number of instances with rebooting status. + Rebooting *int64 `type:"integer"` + + // The number of instances in the Registered state. + Registered *int64 `type:"integer"` + + // The number of instances in the Registering state. + Registering *int64 `type:"integer"` + + // The number of instances with requested status. + Requested *int64 `type:"integer"` + + // The number of instances with running_setup status. + RunningSetup *int64 `type:"integer"` + + // The number of instances with setup_failed status. + SetupFailed *int64 `type:"integer"` + + // The number of instances with shutting_down status. + ShuttingDown *int64 `type:"integer"` + + // The number of instances with start_failed status. + StartFailed *int64 `type:"integer"` + + // The number of instances with stopped status. + Stopped *int64 `type:"integer"` + + // The number of instances with stopping status. + Stopping *int64 `type:"integer"` + + // The number of instances with terminated status. + Terminated *int64 `type:"integer"` + + // The number of instances with terminating status. + Terminating *int64 `type:"integer"` + + // The number of instances in the Unassigning state. + Unassigning *int64 `type:"integer"` +} + +// String returns the string representation +func (s InstancesCount) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstancesCount) GoString() string { + return s.String() +} + +// Describes a layer. +type Layer struct { + _ struct{} `type:"structure"` + + // The layer attributes. + // + // For the HaproxyStatsPassword, MysqlRootPassword, and GangliaPassword attributes, + // AWS OpsWorks returns *****FILTERED***** instead of the actual value + // + // For an ECS Cluster layer, AWS OpsWorks the EcsClusterArn attribute is set + // to the cluster's ARN. + Attributes map[string]*string `type:"map"` + + // Whether to automatically assign an Elastic IP address (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/elastic-ip-addresses-eip.html) + // to the layer's instances. For more information, see How to Edit a Layer (http://docs.aws.amazon.com/opsworks/latest/userguide/workinglayers-basics-edit.html). + AutoAssignElasticIps *bool `type:"boolean"` + + // For stacks that are running in a VPC, whether to automatically assign a public + // IP address to the layer's instances. For more information, see How to Edit + // a Layer (http://docs.aws.amazon.com/opsworks/latest/userguide/workinglayers-basics-edit.html). + AutoAssignPublicIps *bool `type:"boolean"` + + // Date when the layer was created. + CreatedAt *string `type:"string"` + + // The ARN of the default IAM profile to be used for the layer's EC2 instances. + // For more information about IAM ARNs, see Using Identifiers (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html). + CustomInstanceProfileArn *string `type:"string"` + + // A JSON formatted string containing the layer's custom stack configuration + // and deployment attributes. + CustomJson *string `type:"string"` + + // A LayerCustomRecipes object that specifies the layer's custom recipes. + CustomRecipes *Recipes `type:"structure"` + + // An array containing the layer's custom security group IDs. + CustomSecurityGroupIds []*string `type:"list"` + + // AWS OpsWorks supports five lifecycle events: setup, configuration, deploy, + // undeploy, and shutdown. For each layer, AWS OpsWorks runs a set of standard + // recipes for each event. In addition, you can provide custom recipes for any + // or all layers and events. AWS OpsWorks runs custom event recipes after the + // standard recipes. LayerCustomRecipes specifies the custom recipes for a particular + // layer to be run in response to each of the five events. + // + // To specify a recipe, use the cookbook's directory name in the repository + // followed by two colons and the recipe name, which is the recipe's file name + // without the .rb extension. For example: phpapp2::dbsetup specifies the dbsetup.rb + // recipe in the repository's phpapp2 folder. + DefaultRecipes *Recipes `type:"structure"` + + // An array containing the layer's security group names. + DefaultSecurityGroupNames []*string `type:"list"` + + // Whether auto healing is disabled for the layer. + EnableAutoHealing *bool `type:"boolean"` + + // Whether to install operating system and package updates when the instance + // boots. The default value is true. If this value is set to false, you must + // then update your instances manually by using CreateDeployment to run the + // update_dependencies stack command or manually running yum (Amazon Linux) + // or apt-get (Ubuntu) on the instances. + // + // We strongly recommend using the default value of true, to ensure that your + // instances have the latest security updates. + InstallUpdatesOnBoot *bool `type:"boolean"` + + // The layer ID. + LayerId *string `type:"string"` + + // A LifeCycleEventConfiguration object that specifies the Shutdown event configuration. + LifecycleEventConfiguration *LifecycleEventConfiguration `type:"structure"` + + // The layer name. + Name *string `type:"string"` + + // An array of Package objects that describe the layer's packages. + Packages []*string `type:"list"` + + // The layer short name. + Shortname *string `type:"string"` + + // The layer stack ID. + StackId *string `type:"string"` + + // The layer type. + Type *string `type:"string" enum:"LayerType"` + + // Whether the layer uses Amazon EBS-optimized instances. + UseEbsOptimizedInstances *bool `type:"boolean"` + + // A VolumeConfigurations object that describes the layer's Amazon EBS volumes. + VolumeConfigurations []*VolumeConfiguration `type:"list"` +} + +// String returns the string representation +func (s Layer) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Layer) GoString() string { + return s.String() +} + +// Specifies the lifecycle event configuration +type LifecycleEventConfiguration struct { + _ struct{} `type:"structure"` + + // A ShutdownEventConfiguration object that specifies the Shutdown event configuration. + Shutdown *ShutdownEventConfiguration `type:"structure"` +} + +// String returns the string representation +func (s LifecycleEventConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LifecycleEventConfiguration) GoString() string { + return s.String() +} + +// Describes a layer's load-based auto scaling configuration. +type LoadBasedAutoScalingConfiguration struct { + _ struct{} `type:"structure"` + + // An AutoScalingThresholds object that describes the downscaling configuration, + // which defines how and when AWS OpsWorks reduces the number of instances. + DownScaling *AutoScalingThresholds `type:"structure"` + + // Whether load-based auto scaling is enabled for the layer. + Enable *bool `type:"boolean"` + + // The layer ID. + LayerId *string `type:"string"` + + // An AutoScalingThresholds object that describes the upscaling configuration, + // which defines how and when AWS OpsWorks increases the number of instances. + UpScaling *AutoScalingThresholds `type:"structure"` +} + +// String returns the string representation +func (s LoadBasedAutoScalingConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LoadBasedAutoScalingConfiguration) GoString() string { + return s.String() +} + +// Describes stack or user permissions. +type Permission struct { + _ struct{} `type:"structure"` + + // Whether the user can use SSH. + AllowSsh *bool `type:"boolean"` + + // Whether the user can use sudo. + AllowSudo *bool `type:"boolean"` + + // The Amazon Resource Name (ARN) for an AWS Identity and Access Management + // (IAM) role. For more information about IAM ARNs, see Using Identifiers (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html). + IamUserArn *string `type:"string"` + + // The user's permission level, which must be the following: + // + // deny + // + // show + // + // deploy + // + // manage + // + // iam_only + // + // For more information on the permissions associated with these levels, + // see Managing User Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html) + Level *string `type:"string"` + + // A stack ID. + StackId *string `type:"string"` +} + +// String returns the string representation +func (s Permission) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Permission) GoString() string { + return s.String() +} + +// Describes an instance's RAID array. +type RaidArray struct { + _ struct{} `type:"structure"` + + // The array's Availability Zone. For more information, see Regions and Endpoints + // (http://docs.aws.amazon.com/general/latest/gr/rande.html). + AvailabilityZone *string `type:"string"` + + // When the RAID array was created. + CreatedAt *string `type:"string"` + + // The array's Linux device. For example /dev/mdadm0. + Device *string `type:"string"` + + // The instance ID. + InstanceId *string `type:"string"` + + // For PIOPS volumes, the IOPS per disk. + Iops *int64 `type:"integer"` + + // The array's mount point. + MountPoint *string `type:"string"` + + // The array name. + Name *string `type:"string"` + + // The number of disks in the array. + NumberOfDisks *int64 `type:"integer"` + + // The array ID. + RaidArrayId *string `type:"string"` + + // The RAID level (http://en.wikipedia.org/wiki/Standard_RAID_levels). + RaidLevel *int64 `type:"integer"` + + // The array's size. + Size *int64 `type:"integer"` + + // The stack ID. + StackId *string `type:"string"` + + // The volume type, standard or PIOPS. + VolumeType *string `type:"string"` +} + +// String returns the string representation +func (s RaidArray) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RaidArray) GoString() string { + return s.String() +} + +// Describes an Amazon RDS instance. +type RdsDbInstance struct { + _ struct{} `type:"structure"` + + // The instance's address. + Address *string `type:"string"` + + // The DB instance identifier. + DbInstanceIdentifier *string `type:"string"` + + // AWS OpsWorks returns *****FILTERED***** instead of the actual value. + DbPassword *string `type:"string"` + + // The master user name. + DbUser *string `type:"string"` + + // The instance's database engine. + Engine *string `type:"string"` + + // Set to true if AWS OpsWorks was unable to discover the Amazon RDS instance. + // AWS OpsWorks attempts to discover the instance only once. If this value is + // set to true, you must deregister the instance and then register it again. + MissingOnRds *bool `type:"boolean"` + + // The instance's ARN. + RdsDbInstanceArn *string `type:"string"` + + // The instance's AWS region. + Region *string `type:"string"` + + // The ID of the stack that the instance is registered with. + StackId *string `type:"string"` +} + +// String returns the string representation +func (s RdsDbInstance) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RdsDbInstance) GoString() string { + return s.String() +} + +type RebootInstanceInput struct { + _ struct{} `type:"structure"` + + // The instance ID. + InstanceId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s RebootInstanceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RebootInstanceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RebootInstanceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RebootInstanceInput"} + if s.InstanceId == nil { + invalidParams.Add(request.NewErrParamRequired("InstanceId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type RebootInstanceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s RebootInstanceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RebootInstanceOutput) GoString() string { + return s.String() +} + +// AWS OpsWorks supports five lifecycle events: setup, configuration, deploy, +// undeploy, and shutdown. For each layer, AWS OpsWorks runs a set of standard +// recipes for each event. In addition, you can provide custom recipes for any +// or all layers and events. AWS OpsWorks runs custom event recipes after the +// standard recipes. LayerCustomRecipes specifies the custom recipes for a particular +// layer to be run in response to each of the five events. +// +// To specify a recipe, use the cookbook's directory name in the repository +// followed by two colons and the recipe name, which is the recipe's file name +// without the .rb extension. For example: phpapp2::dbsetup specifies the dbsetup.rb +// recipe in the repository's phpapp2 folder. +type Recipes struct { + _ struct{} `type:"structure"` + + // An array of custom recipe names to be run following a configure event. + Configure []*string `type:"list"` + + // An array of custom recipe names to be run following a deploy event. + Deploy []*string `type:"list"` + + // An array of custom recipe names to be run following a setup event. + Setup []*string `type:"list"` + + // An array of custom recipe names to be run following a shutdown event. + Shutdown []*string `type:"list"` + + // An array of custom recipe names to be run following a undeploy event. + Undeploy []*string `type:"list"` +} + +// String returns the string representation +func (s Recipes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Recipes) GoString() string { + return s.String() +} + +type RegisterEcsClusterInput struct { + _ struct{} `type:"structure"` + + // The cluster's ARN. + EcsClusterArn *string `type:"string" required:"true"` + + // The stack ID. + StackId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s RegisterEcsClusterInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RegisterEcsClusterInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RegisterEcsClusterInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RegisterEcsClusterInput"} + if s.EcsClusterArn == nil { + invalidParams.Add(request.NewErrParamRequired("EcsClusterArn")) + } + if s.StackId == nil { + invalidParams.Add(request.NewErrParamRequired("StackId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the response to a RegisterEcsCluster request. +type RegisterEcsClusterOutput struct { + _ struct{} `type:"structure"` + + // The cluster's ARN. + EcsClusterArn *string `type:"string"` +} + +// String returns the string representation +func (s RegisterEcsClusterOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RegisterEcsClusterOutput) GoString() string { + return s.String() +} + +type RegisterElasticIpInput struct { + _ struct{} `type:"structure"` + + // The Elastic IP address. + ElasticIp *string `type:"string" required:"true"` + + // The stack ID. + StackId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s RegisterElasticIpInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RegisterElasticIpInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RegisterElasticIpInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RegisterElasticIpInput"} + if s.ElasticIp == nil { + invalidParams.Add(request.NewErrParamRequired("ElasticIp")) + } + if s.StackId == nil { + invalidParams.Add(request.NewErrParamRequired("StackId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the response to a RegisterElasticIp request. +type RegisterElasticIpOutput struct { + _ struct{} `type:"structure"` + + // The Elastic IP address. + ElasticIp *string `type:"string"` +} + +// String returns the string representation +func (s RegisterElasticIpOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RegisterElasticIpOutput) GoString() string { + return s.String() +} + +type RegisterInstanceInput struct { + _ struct{} `type:"structure"` + + // The instance's hostname. + Hostname *string `type:"string"` + + // An InstanceIdentity object that contains the instance's identity. + InstanceIdentity *InstanceIdentity `type:"structure"` + + // The instance's private IP address. + PrivateIp *string `type:"string"` + + // The instance's public IP address. + PublicIp *string `type:"string"` + + // The instances public RSA key. This key is used to encrypt communication between + // the instance and the service. + RsaPublicKey *string `type:"string"` + + // The instances public RSA key fingerprint. + RsaPublicKeyFingerprint *string `type:"string"` + + // The ID of the stack that the instance is to be registered with. + StackId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s RegisterInstanceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RegisterInstanceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RegisterInstanceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RegisterInstanceInput"} + if s.StackId == nil { + invalidParams.Add(request.NewErrParamRequired("StackId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the response to a RegisterInstanceResult request. +type RegisterInstanceOutput struct { + _ struct{} `type:"structure"` + + // The registered instance's AWS OpsWorks ID. + InstanceId *string `type:"string"` +} + +// String returns the string representation +func (s RegisterInstanceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RegisterInstanceOutput) GoString() string { + return s.String() +} + +type RegisterRdsDbInstanceInput struct { + _ struct{} `type:"structure"` + + // The database password. + DbPassword *string `type:"string" required:"true"` + + // The database's master user name. + DbUser *string `type:"string" required:"true"` + + // The Amazon RDS instance's ARN. + RdsDbInstanceArn *string `type:"string" required:"true"` + + // The stack ID. + StackId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s RegisterRdsDbInstanceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RegisterRdsDbInstanceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RegisterRdsDbInstanceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RegisterRdsDbInstanceInput"} + if s.DbPassword == nil { + invalidParams.Add(request.NewErrParamRequired("DbPassword")) + } + if s.DbUser == nil { + invalidParams.Add(request.NewErrParamRequired("DbUser")) + } + if s.RdsDbInstanceArn == nil { + invalidParams.Add(request.NewErrParamRequired("RdsDbInstanceArn")) + } + if s.StackId == nil { + invalidParams.Add(request.NewErrParamRequired("StackId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type RegisterRdsDbInstanceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s RegisterRdsDbInstanceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RegisterRdsDbInstanceOutput) GoString() string { + return s.String() +} + +type RegisterVolumeInput struct { + _ struct{} `type:"structure"` + + // The Amazon EBS volume ID. + Ec2VolumeId *string `type:"string"` + + // The stack ID. + StackId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s RegisterVolumeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RegisterVolumeInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RegisterVolumeInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RegisterVolumeInput"} + if s.StackId == nil { + invalidParams.Add(request.NewErrParamRequired("StackId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the response to a RegisterVolume request. +type RegisterVolumeOutput struct { + _ struct{} `type:"structure"` + + // The volume ID. + VolumeId *string `type:"string"` +} + +// String returns the string representation +func (s RegisterVolumeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RegisterVolumeOutput) GoString() string { + return s.String() +} + +// A registered instance's reported operating system. +type ReportedOs struct { + _ struct{} `type:"structure"` + + // The operating system family. + Family *string `type:"string"` + + // The operating system name. + Name *string `type:"string"` + + // The operating system version. + Version *string `type:"string"` +} + +// String returns the string representation +func (s ReportedOs) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReportedOs) GoString() string { + return s.String() +} + +// Describes a user's SSH information. +type SelfUserProfile struct { + _ struct{} `type:"structure"` + + // The user's IAM ARN. + IamUserArn *string `type:"string"` + + // The user's name. + Name *string `type:"string"` + + // The user's SSH public key. + SshPublicKey *string `type:"string"` + + // The user's SSH user name. + SshUsername *string `type:"string"` +} + +// String returns the string representation +func (s SelfUserProfile) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SelfUserProfile) GoString() string { + return s.String() +} + +// Describes an AWS OpsWorks service error. +type ServiceError struct { + _ struct{} `type:"structure"` + + // When the error occurred. + CreatedAt *string `type:"string"` + + // The instance ID. + InstanceId *string `type:"string"` + + // A message that describes the error. + Message *string `type:"string"` + + // The error ID. + ServiceErrorId *string `type:"string"` + + // The stack ID. + StackId *string `type:"string"` + + // The error type. + Type *string `type:"string"` +} + +// String returns the string representation +func (s ServiceError) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ServiceError) GoString() string { + return s.String() +} + +type SetLoadBasedAutoScalingInput struct { + _ struct{} `type:"structure"` + + // An AutoScalingThresholds object with the downscaling threshold configuration. + // If the load falls below these thresholds for a specified amount of time, + // AWS OpsWorks stops a specified number of instances. + DownScaling *AutoScalingThresholds `type:"structure"` + + // Enables load-based auto scaling for the layer. + Enable *bool `type:"boolean"` + + // The layer ID. + LayerId *string `type:"string" required:"true"` + + // An AutoScalingThresholds object with the upscaling threshold configuration. + // If the load exceeds these thresholds for a specified amount of time, AWS + // OpsWorks starts a specified number of instances. + UpScaling *AutoScalingThresholds `type:"structure"` +} + +// String returns the string representation +func (s SetLoadBasedAutoScalingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetLoadBasedAutoScalingInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SetLoadBasedAutoScalingInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SetLoadBasedAutoScalingInput"} + if s.LayerId == nil { + invalidParams.Add(request.NewErrParamRequired("LayerId")) + } + if s.DownScaling != nil { + if err := s.DownScaling.Validate(); err != nil { + invalidParams.AddNested("DownScaling", err.(request.ErrInvalidParams)) + } + } + if s.UpScaling != nil { + if err := s.UpScaling.Validate(); err != nil { + invalidParams.AddNested("UpScaling", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type SetLoadBasedAutoScalingOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s SetLoadBasedAutoScalingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetLoadBasedAutoScalingOutput) GoString() string { + return s.String() +} + +type SetPermissionInput struct { + _ struct{} `type:"structure"` + + // The user is allowed to use SSH to communicate with the instance. + AllowSsh *bool `type:"boolean"` + + // The user is allowed to use sudo to elevate privileges. + AllowSudo *bool `type:"boolean"` + + // The user's IAM ARN. + IamUserArn *string `type:"string" required:"true"` + + // The user's permission level, which must be set to one of the following strings. + // You cannot set your own permissions level. + // + // deny + // + // show + // + // deploy + // + // manage + // + // iam_only + // + // For more information on the permissions associated with these levels, + // see Managing User Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). + Level *string `type:"string"` + + // The stack ID. + StackId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s SetPermissionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetPermissionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SetPermissionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SetPermissionInput"} + if s.IamUserArn == nil { + invalidParams.Add(request.NewErrParamRequired("IamUserArn")) + } + if s.StackId == nil { + invalidParams.Add(request.NewErrParamRequired("StackId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type SetPermissionOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s SetPermissionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetPermissionOutput) GoString() string { + return s.String() +} + +type SetTimeBasedAutoScalingInput struct { + _ struct{} `type:"structure"` + + // An AutoScalingSchedule with the instance schedule. + AutoScalingSchedule *WeeklyAutoScalingSchedule `type:"structure"` + + // The instance ID. + InstanceId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s SetTimeBasedAutoScalingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetTimeBasedAutoScalingInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SetTimeBasedAutoScalingInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SetTimeBasedAutoScalingInput"} + if s.InstanceId == nil { + invalidParams.Add(request.NewErrParamRequired("InstanceId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type SetTimeBasedAutoScalingOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s SetTimeBasedAutoScalingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetTimeBasedAutoScalingOutput) GoString() string { + return s.String() +} + +// The Shutdown event configuration. +type ShutdownEventConfiguration struct { + _ struct{} `type:"structure"` + + // Whether to enable Elastic Load Balancing connection draining. For more information, + // see Connection Draining (http://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/TerminologyandKeyConcepts.html#conn-drain) + DelayUntilElbConnectionsDrained *bool `type:"boolean"` + + // The time, in seconds, that AWS OpsWorks will wait after triggering a Shutdown + // event before shutting down an instance. + ExecutionTimeout *int64 `type:"integer"` +} + +// String returns the string representation +func (s ShutdownEventConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ShutdownEventConfiguration) GoString() string { + return s.String() +} + +// Contains the information required to retrieve an app or cookbook from a repository. +// For more information, see Creating Apps (http://docs.aws.amazon.com/opsworks/latest/userguide/workingapps-creating.html) +// or Custom Recipes and Cookbooks (http://docs.aws.amazon.com/opsworks/latest/userguide/workingcookbook.html). +type Source struct { + _ struct{} `type:"structure"` + + // When included in a request, the parameter depends on the repository type. + // + // For Amazon S3 bundles, set Password to the appropriate IAM secret access + // key. + // + // For HTTP bundles and Subversion repositories, set Password to the password. + // + // For more information on how to safely handle IAM credentials, see http://docs.aws.amazon.com/general/latest/gr/aws-access-keys-best-practices.html + // (http://docs.aws.amazon.com/general/latest/gr/aws-access-keys-best-practices.html). + // + // In responses, AWS OpsWorks returns *****FILTERED***** instead of the actual + // value. + Password *string `type:"string"` + + // The application's version. AWS OpsWorks enables you to easily deploy new + // versions of an application. One of the simplest approaches is to have branches + // or revisions in your repository that represent different versions that can + // potentially be deployed. + Revision *string `type:"string"` + + // In requests, the repository's SSH key. + // + // In responses, AWS OpsWorks returns *****FILTERED***** instead of the actual + // value. + SshKey *string `type:"string"` + + // The repository type. + Type *string `type:"string" enum:"SourceType"` + + // The source URL. + Url *string `type:"string"` + + // This parameter depends on the repository type. + // + // For Amazon S3 bundles, set Username to the appropriate IAM access key + // ID. + // + // For HTTP bundles, Git repositories, and Subversion repositories, set Username + // to the user name. + Username *string `type:"string"` +} + +// String returns the string representation +func (s Source) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Source) GoString() string { + return s.String() +} + +// Describes an app's SSL configuration. +type SslConfiguration struct { + _ struct{} `type:"structure"` + + // The contents of the certificate's domain.crt file. + Certificate *string `type:"string" required:"true"` + + // Optional. Can be used to specify an intermediate certificate authority key + // or client authentication. + Chain *string `type:"string"` + + // The private key; the contents of the certificate's domain.kex file. + PrivateKey *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s SslConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SslConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SslConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SslConfiguration"} + if s.Certificate == nil { + invalidParams.Add(request.NewErrParamRequired("Certificate")) + } + if s.PrivateKey == nil { + invalidParams.Add(request.NewErrParamRequired("PrivateKey")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Describes a stack. +type Stack struct { + _ struct{} `type:"structure"` + + // The agent version. This parameter is set to LATEST for auto-update. or a + // version number for a fixed agent version. + AgentVersion *string `type:"string"` + + // The stack's ARN. + Arn *string `type:"string"` + + // The stack's attributes. + Attributes map[string]*string `type:"map"` + + // A ChefConfiguration object that specifies whether to enable Berkshelf and + // the Berkshelf version. For more information, see Create a New Stack (http://docs.aws.amazon.com/opsworks/latest/userguide/workingstacks-creating.html). + ChefConfiguration *ChefConfiguration `type:"structure"` + + // The configuration manager. + ConfigurationManager *StackConfigurationManager `type:"structure"` + + // The date when the stack was created. + CreatedAt *string `type:"string"` + + // Contains the information required to retrieve an app or cookbook from a repository. + // For more information, see Creating Apps (http://docs.aws.amazon.com/opsworks/latest/userguide/workingapps-creating.html) + // or Custom Recipes and Cookbooks (http://docs.aws.amazon.com/opsworks/latest/userguide/workingcookbook.html). + CustomCookbooksSource *Source `type:"structure"` + + // A JSON object that contains user-defined attributes to be added to the stack + // configuration and deployment attributes. You can use custom JSON to override + // the corresponding default stack configuration attribute values or to pass + // data to recipes. The string should be in the following format and must escape + // characters such as '"': + // + // "{\"key1\": \"value1\", \"key2\": \"value2\",...}" + // + // For more information on custom JSON, see Use Custom JSON to Modify the Stack + // Configuration Attributes (http://docs.aws.amazon.com/opsworks/latest/userguide/workingstacks-json.html). + CustomJson *string `type:"string"` + + // The stack's default Availability Zone. For more information, see Regions + // and Endpoints (http://docs.aws.amazon.com/general/latest/gr/rande.html). + DefaultAvailabilityZone *string `type:"string"` + + // The ARN of an IAM profile that is the default profile for all of the stack's + // EC2 instances. For more information about IAM ARNs, see Using Identifiers + // (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html). + DefaultInstanceProfileArn *string `type:"string"` + + // The stack's default operating system. + DefaultOs *string `type:"string"` + + // The default root device type. This value is used by default for all instances + // in the stack, but you can override it when you create an instance. For more + // information, see Storage for the Root Device (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ComponentsAMIs.html#storage-for-the-root-device). + DefaultRootDeviceType *string `type:"string" enum:"RootDeviceType"` + + // A default Amazon EC2 key pair for the stack's instances. You can override + // this value when you create or update an instance. + DefaultSshKeyName *string `type:"string"` + + // The default subnet ID; applicable only if the stack is running in a VPC. + DefaultSubnetId *string `type:"string"` + + // The stack host name theme, with spaces replaced by underscores. + HostnameTheme *string `type:"string"` + + // The stack name. + Name *string `type:"string"` + + // The stack AWS region, such as "us-east-1". For more information about AWS + // regions, see Regions and Endpoints (http://docs.aws.amazon.com/general/latest/gr/rande.html). + Region *string `type:"string"` + + // The stack AWS Identity and Access Management (IAM) role. + ServiceRoleArn *string `type:"string"` + + // The stack ID. + StackId *string `type:"string"` + + // Whether the stack uses custom cookbooks. + UseCustomCookbooks *bool `type:"boolean"` + + // Whether the stack automatically associates the AWS OpsWorks built-in security + // groups with the stack's layers. + UseOpsworksSecurityGroups *bool `type:"boolean"` + + // The VPC ID; applicable only if the stack is running in a VPC. + VpcId *string `type:"string"` +} + +// String returns the string representation +func (s Stack) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Stack) GoString() string { + return s.String() +} + +// Describes the configuration manager. +type StackConfigurationManager struct { + _ struct{} `type:"structure"` + + // The name. This parameter must be set to "Chef". + Name *string `type:"string"` + + // The Chef version. This parameter must be set to 12, 11.10, or 11.4 for Linux + // stacks, and to 12.2 for Windows stacks. The default value for Linux stacks + // is 11.4. + Version *string `type:"string"` +} + +// String returns the string representation +func (s StackConfigurationManager) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StackConfigurationManager) GoString() string { + return s.String() +} + +// Summarizes the number of layers, instances, and apps in a stack. +type StackSummary struct { + _ struct{} `type:"structure"` + + // The number of apps. + AppsCount *int64 `type:"integer"` + + // The stack's ARN. + Arn *string `type:"string"` + + // An InstancesCount object with the number of instances in each status. + InstancesCount *InstancesCount `type:"structure"` + + // The number of layers. + LayersCount *int64 `type:"integer"` + + // The stack name. + Name *string `type:"string"` + + // The stack ID. + StackId *string `type:"string"` +} + +// String returns the string representation +func (s StackSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StackSummary) GoString() string { + return s.String() +} + +type StartInstanceInput struct { + _ struct{} `type:"structure"` + + // The instance ID. + InstanceId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s StartInstanceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StartInstanceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *StartInstanceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StartInstanceInput"} + if s.InstanceId == nil { + invalidParams.Add(request.NewErrParamRequired("InstanceId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type StartInstanceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s StartInstanceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StartInstanceOutput) GoString() string { + return s.String() +} + +type StartStackInput struct { + _ struct{} `type:"structure"` + + // The stack ID. + StackId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s StartStackInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StartStackInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *StartStackInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StartStackInput"} + if s.StackId == nil { + invalidParams.Add(request.NewErrParamRequired("StackId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type StartStackOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s StartStackOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StartStackOutput) GoString() string { + return s.String() +} + +type StopInstanceInput struct { + _ struct{} `type:"structure"` + + // The instance ID. + InstanceId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s StopInstanceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StopInstanceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *StopInstanceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StopInstanceInput"} + if s.InstanceId == nil { + invalidParams.Add(request.NewErrParamRequired("InstanceId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type StopInstanceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s StopInstanceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StopInstanceOutput) GoString() string { + return s.String() +} + +type StopStackInput struct { + _ struct{} `type:"structure"` + + // The stack ID. + StackId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s StopStackInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StopStackInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *StopStackInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StopStackInput"} + if s.StackId == nil { + invalidParams.Add(request.NewErrParamRequired("StackId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type StopStackOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s StopStackOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StopStackOutput) GoString() string { + return s.String() +} + +// Contains the data needed by RDP clients such as the Microsoft Remote Desktop +// Connection to log in to the instance. +type TemporaryCredential struct { + _ struct{} `type:"structure"` + + // The instance's AWS OpsWorks ID. + InstanceId *string `type:"string"` + + // The password. + Password *string `type:"string"` + + // The user name. + Username *string `type:"string"` + + // The length of time (in minutes) that the grant is valid. When the grant expires, + // at the end of this period, the user will no longer be able to use the credentials + // to log in. If they are logged in at the time, they will be automatically + // logged out. + ValidForInMinutes *int64 `type:"integer"` +} + +// String returns the string representation +func (s TemporaryCredential) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TemporaryCredential) GoString() string { + return s.String() +} + +// Describes an instance's time-based auto scaling configuration. +type TimeBasedAutoScalingConfiguration struct { + _ struct{} `type:"structure"` + + // A WeeklyAutoScalingSchedule object with the instance schedule. + AutoScalingSchedule *WeeklyAutoScalingSchedule `type:"structure"` + + // The instance ID. + InstanceId *string `type:"string"` +} + +// String returns the string representation +func (s TimeBasedAutoScalingConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TimeBasedAutoScalingConfiguration) GoString() string { + return s.String() +} + +type UnassignInstanceInput struct { + _ struct{} `type:"structure"` + + // The instance ID. + InstanceId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s UnassignInstanceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UnassignInstanceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UnassignInstanceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UnassignInstanceInput"} + if s.InstanceId == nil { + invalidParams.Add(request.NewErrParamRequired("InstanceId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type UnassignInstanceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UnassignInstanceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UnassignInstanceOutput) GoString() string { + return s.String() +} + +type UnassignVolumeInput struct { + _ struct{} `type:"structure"` + + // The volume ID. + VolumeId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s UnassignVolumeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UnassignVolumeInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UnassignVolumeInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UnassignVolumeInput"} + if s.VolumeId == nil { + invalidParams.Add(request.NewErrParamRequired("VolumeId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type UnassignVolumeOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UnassignVolumeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UnassignVolumeOutput) GoString() string { + return s.String() +} + +type UpdateAppInput struct { + _ struct{} `type:"structure"` + + // The app ID. + AppId *string `type:"string" required:"true"` + + // A Source object that specifies the app repository. + AppSource *Source `type:"structure"` + + // One or more user-defined key/value pairs to be added to the stack attributes. + Attributes map[string]*string `type:"map"` + + // The app's data sources. + DataSources []*DataSource `type:"list"` + + // A description of the app. + Description *string `type:"string"` + + // The app's virtual host settings, with multiple domains separated by commas. + // For example: 'www.example.com, example.com' + Domains []*string `type:"list"` + + // Whether SSL is enabled for the app. + EnableSsl *bool `type:"boolean"` + + // An array of EnvironmentVariable objects that specify environment variables + // to be associated with the app. After you deploy the app, these variables + // are defined on the associated app server instances.For more information, + // see Environment Variables (http://docs.aws.amazon.com/opsworks/latest/userguide/workingapps-creating.html#workingapps-creating-environment). + // + // There is no specific limit on the number of environment variables. However, + // the size of the associated data structure - which includes the variables' + // names, values, and protected flag values - cannot exceed 10 KB (10240 Bytes). + // This limit should accommodate most if not all use cases. Exceeding it will + // cause an exception with the message, "Environment: is too large (maximum + // is 10KB)." + // + // This parameter is supported only by Chef 11.10 stacks. If you have specified + // one or more environment variables, you cannot modify the stack's Chef version. + Environment []*EnvironmentVariable `type:"list"` + + // The app name. + Name *string `type:"string"` + + // An SslConfiguration object with the SSL configuration. + SslConfiguration *SslConfiguration `type:"structure"` + + // The app type. + Type *string `type:"string" enum:"AppType"` +} + +// String returns the string representation +func (s UpdateAppInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateAppInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateAppInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateAppInput"} + if s.AppId == nil { + invalidParams.Add(request.NewErrParamRequired("AppId")) + } + if s.Environment != nil { + for i, v := range s.Environment { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Environment", i), err.(request.ErrInvalidParams)) + } + } + } + if s.SslConfiguration != nil { + if err := s.SslConfiguration.Validate(); err != nil { + invalidParams.AddNested("SslConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type UpdateAppOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UpdateAppOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateAppOutput) GoString() string { + return s.String() +} + +type UpdateElasticIpInput struct { + _ struct{} `type:"structure"` + + // The address. + ElasticIp *string `type:"string" required:"true"` + + // The new name. + Name *string `type:"string"` +} + +// String returns the string representation +func (s UpdateElasticIpInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateElasticIpInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateElasticIpInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateElasticIpInput"} + if s.ElasticIp == nil { + invalidParams.Add(request.NewErrParamRequired("ElasticIp")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type UpdateElasticIpOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UpdateElasticIpOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateElasticIpOutput) GoString() string { + return s.String() +} + +type UpdateInstanceInput struct { + _ struct{} `type:"structure"` + + // The default AWS OpsWorks agent version. You have the following options: + // + // INHERIT - Use the stack's default agent version setting. + // + // version_number - Use the specified agent version. This value overrides + // the stack's default setting. To update the agent version, you must edit the + // instance configuration and specify a new version. AWS OpsWorks then automatically + // installs that version on the instance. + // + // The default setting is INHERIT. To specify an agent version, you must + // use the complete version number, not the abbreviated number shown on the + // console. For a list of available agent version numbers, call DescribeAgentVersions. + AgentVersion *string `type:"string"` + + // A custom AMI ID to be used to create the instance. The AMI must be based + // on one of the supported operating systems. For more information, see Instances + // (http://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-custom-ami.html) + // + // If you specify a custom AMI, you must set Os to Custom. + AmiId *string `type:"string"` + + // The instance architecture. Instance types do not necessarily support both + // architectures. For a list of the architectures that are supported by the + // different instance types, see Instance Families and Types (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html). + Architecture *string `type:"string" enum:"Architecture"` + + // For load-based or time-based instances, the type. Windows stacks can use + // only time-based instances. + AutoScalingType *string `type:"string" enum:"AutoScalingType"` + + // This property cannot be updated. + EbsOptimized *bool `type:"boolean"` + + // The instance host name. + Hostname *string `type:"string"` + + // Whether to install operating system and package updates when the instance + // boots. The default value is true. To control when updates are installed, + // set this value to false. You must then update your instances manually by + // using CreateDeployment to run the update_dependencies stack command or by + // manually running yum (Amazon Linux) or apt-get (Ubuntu) on the instances. + // + // We strongly recommend using the default value of true, to ensure that your + // instances have the latest security updates. + InstallUpdatesOnBoot *bool `type:"boolean"` + + // The instance ID. + InstanceId *string `type:"string" required:"true"` + + // The instance type, such as t2.micro. For a list of supported instance types, + // open the stack in the console, choose Instances, and choose + Instance. The + // Size list contains the currently supported types. For more information, see + // Instance Families and Types (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html). + // The parameter values that you use to specify the various types are in the + // API Name column of the Available Instance Types table. + InstanceType *string `type:"string"` + + // The instance's layer IDs. + LayerIds []*string `type:"list"` + + // The instance's operating system, which must be set to one of the following. + // + // A supported Linux operating system: An Amazon Linux version, such as Amazon + // Linux 2016.03, Amazon Linux 2015.09, or Amazon Linux 2015.03. + // + // A supported Ubuntu operating system, such as Ubuntu 16.04 LTS, Ubuntu + // 14.04 LTS, or Ubuntu 12.04 LTS. + // + // CentOS 7 + // + // Red Hat Enterprise Linux 7 + // + // A supported Windows operating system, such as Microsoft Windows Server + // 2012 R2 Base, Microsoft Windows Server 2012 R2 with SQL Server Express, Microsoft + // Windows Server 2012 R2 with SQL Server Standard, or Microsoft Windows Server + // 2012 R2 with SQL Server Web. + // + // A custom AMI: Custom. + // + // For more information on the supported operating systems, see AWS OpsWorks + // Operating Systems (http://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-os.html). + // + // The default option is the current Amazon Linux version. If you set this + // parameter to Custom, you must use the AmiId parameter to specify the custom + // AMI that you want to use. For more information on the supported operating + // systems, see Operating Systems (http://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-os.html). + // For more information on how to use custom AMIs with OpsWorks, see Using Custom + // AMIs (http://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-custom-ami.html). + // + // You can specify a different Linux operating system for the updated stack, + // but you cannot change from Linux to Windows or Windows to Linux. + Os *string `type:"string"` + + // The instance's Amazon EC2 key name. + SshKeyName *string `type:"string"` +} + +// String returns the string representation +func (s UpdateInstanceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateInstanceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateInstanceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateInstanceInput"} + if s.InstanceId == nil { + invalidParams.Add(request.NewErrParamRequired("InstanceId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type UpdateInstanceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UpdateInstanceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateInstanceOutput) GoString() string { + return s.String() +} + +type UpdateLayerInput struct { + _ struct{} `type:"structure"` + + // One or more user-defined key/value pairs to be added to the stack attributes. + Attributes map[string]*string `type:"map"` + + // Whether to automatically assign an Elastic IP address (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/elastic-ip-addresses-eip.html) + // to the layer's instances. For more information, see How to Edit a Layer (http://docs.aws.amazon.com/opsworks/latest/userguide/workinglayers-basics-edit.html). + AutoAssignElasticIps *bool `type:"boolean"` + + // For stacks that are running in a VPC, whether to automatically assign a public + // IP address to the layer's instances. For more information, see How to Edit + // a Layer (http://docs.aws.amazon.com/opsworks/latest/userguide/workinglayers-basics-edit.html). + AutoAssignPublicIps *bool `type:"boolean"` + + // The ARN of an IAM profile to be used for all of the layer's EC2 instances. + // For more information about IAM ARNs, see Using Identifiers (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html). + CustomInstanceProfileArn *string `type:"string"` + + // A JSON-formatted string containing custom stack configuration and deployment + // attributes to be installed on the layer's instances. For more information, + // see Using Custom JSON (http://docs.aws.amazon.com/opsworks/latest/userguide/workingcookbook-json-override.html). + CustomJson *string `type:"string"` + + // A LayerCustomRecipes object that specifies the layer's custom recipes. + CustomRecipes *Recipes `type:"structure"` + + // An array containing the layer's custom security group IDs. + CustomSecurityGroupIds []*string `type:"list"` + + // Whether to disable auto healing for the layer. + EnableAutoHealing *bool `type:"boolean"` + + // Whether to install operating system and package updates when the instance + // boots. The default value is true. To control when updates are installed, + // set this value to false. You must then update your instances manually by + // using CreateDeployment to run the update_dependencies stack command or manually + // running yum (Amazon Linux) or apt-get (Ubuntu) on the instances. + // + // We strongly recommend using the default value of true, to ensure that your + // instances have the latest security updates. + InstallUpdatesOnBoot *bool `type:"boolean"` + + // The layer ID. + LayerId *string `type:"string" required:"true"` + + LifecycleEventConfiguration *LifecycleEventConfiguration `type:"structure"` + + // The layer name, which is used by the console. + Name *string `type:"string"` + + // An array of Package objects that describe the layer's packages. + Packages []*string `type:"list"` + + // For custom layers only, use this parameter to specify the layer's short name, + // which is used internally by AWS OpsWorksand by Chef. The short name is also + // used as the name for the directory where your app files are installed. It + // can have a maximum of 200 characters and must be in the following format: + // /\A[a-z0-9\-\_\.]+\Z/. + // + // The built-in layers' short names are defined by AWS OpsWorks. For more information, + // see the Layer Reference (http://docs.aws.amazon.com/opsworks/latest/userguide/layers.html) + Shortname *string `type:"string"` + + // Whether to use Amazon EBS-optimized instances. + UseEbsOptimizedInstances *bool `type:"boolean"` + + // A VolumeConfigurations object that describes the layer's Amazon EBS volumes. + VolumeConfigurations []*VolumeConfiguration `type:"list"` +} + +// String returns the string representation +func (s UpdateLayerInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateLayerInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateLayerInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateLayerInput"} + if s.LayerId == nil { + invalidParams.Add(request.NewErrParamRequired("LayerId")) + } + if s.VolumeConfigurations != nil { + for i, v := range s.VolumeConfigurations { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "VolumeConfigurations", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type UpdateLayerOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UpdateLayerOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateLayerOutput) GoString() string { + return s.String() +} + +type UpdateMyUserProfileInput struct { + _ struct{} `type:"structure"` + + // The user's SSH public key. + SshPublicKey *string `type:"string"` +} + +// String returns the string representation +func (s UpdateMyUserProfileInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateMyUserProfileInput) GoString() string { + return s.String() +} + +type UpdateMyUserProfileOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UpdateMyUserProfileOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateMyUserProfileOutput) GoString() string { + return s.String() +} + +type UpdateRdsDbInstanceInput struct { + _ struct{} `type:"structure"` + + // The database password. + DbPassword *string `type:"string"` + + // The master user name. + DbUser *string `type:"string"` + + // The Amazon RDS instance's ARN. + RdsDbInstanceArn *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateRdsDbInstanceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateRdsDbInstanceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateRdsDbInstanceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateRdsDbInstanceInput"} + if s.RdsDbInstanceArn == nil { + invalidParams.Add(request.NewErrParamRequired("RdsDbInstanceArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type UpdateRdsDbInstanceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UpdateRdsDbInstanceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateRdsDbInstanceOutput) GoString() string { + return s.String() +} + +type UpdateStackInput struct { + _ struct{} `type:"structure"` + + // The default AWS OpsWorks agent version. You have the following options: + // + // Auto-update - Set this parameter to LATEST. AWS OpsWorks automatically + // installs new agent versions on the stack's instances as soon as they are + // available. + // + // Fixed version - Set this parameter to your preferred agent version. To + // update the agent version, you must edit the stack configuration and specify + // a new version. AWS OpsWorks then automatically installs that version on the + // stack's instances. + // + // The default setting is LATEST. To specify an agent version, you must use + // the complete version number, not the abbreviated number shown on the console. + // For a list of available agent version numbers, call DescribeAgentVersions. + // + // You can also specify an agent version when you create or update an instance, + // which overrides the stack's default setting. + AgentVersion *string `type:"string"` + + // One or more user-defined key-value pairs to be added to the stack attributes. + Attributes map[string]*string `type:"map"` + + // A ChefConfiguration object that specifies whether to enable Berkshelf and + // the Berkshelf version on Chef 11.10 stacks. For more information, see Create + // a New Stack (http://docs.aws.amazon.com/opsworks/latest/userguide/workingstacks-creating.html). + ChefConfiguration *ChefConfiguration `type:"structure"` + + // The configuration manager. When you update a stack, we recommend that you + // use the configuration manager to specify the Chef version: 12, 11.10, or + // 11.4 for Linux stacks, or 12.2 for Windows stacks. The default value for + // Linux stacks is currently 11.4. + ConfigurationManager *StackConfigurationManager `type:"structure"` + + // Contains the information required to retrieve an app or cookbook from a repository. + // For more information, see Creating Apps (http://docs.aws.amazon.com/opsworks/latest/userguide/workingapps-creating.html) + // or Custom Recipes and Cookbooks (http://docs.aws.amazon.com/opsworks/latest/userguide/workingcookbook.html). + CustomCookbooksSource *Source `type:"structure"` + + // A string that contains user-defined, custom JSON. It can be used to override + // the corresponding default stack configuration JSON values or to pass data + // to recipes. The string should be in the following format and escape characters + // such as '"': + // + // "{\"key1\": \"value1\", \"key2\": \"value2\",...}" + // + // For more information on custom JSON, see Use Custom JSON to Modify the Stack + // Configuration Attributes (http://docs.aws.amazon.com/opsworks/latest/userguide/workingstacks-json.html). + CustomJson *string `type:"string"` + + // The stack's default Availability Zone, which must be in the stack's region. + // For more information, see Regions and Endpoints (http://docs.aws.amazon.com/general/latest/gr/rande.html). + // If you also specify a value for DefaultSubnetId, the subnet must be in the + // same zone. For more information, see CreateStack. + DefaultAvailabilityZone *string `type:"string"` + + // The ARN of an IAM profile that is the default profile for all of the stack's + // EC2 instances. For more information about IAM ARNs, see Using Identifiers + // (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html). + DefaultInstanceProfileArn *string `type:"string"` + + // The stack's operating system, which must be set to one of the following: + // + // A supported Linux operating system: An Amazon Linux version, such as Amazon + // Linux 2016.03, Amazon Linux 2015.09, or Amazon Linux 2015.03. + // + // A supported Ubuntu operating system, such as Ubuntu 16.04 LTS, Ubuntu + // 14.04 LTS, or Ubuntu 12.04 LTS. + // + // CentOS 7 + // + // Red Hat Enterprise Linux 7 + // + // A supported Windows operating system, such as Microsoft Windows Server + // 2012 R2 Base, Microsoft Windows Server 2012 R2 with SQL Server Express, Microsoft + // Windows Server 2012 R2 with SQL Server Standard, or Microsoft Windows Server + // 2012 R2 with SQL Server Web. + // + // A custom AMI: Custom. You specify the custom AMI you want to use when + // you create instances. For more information on how to use custom AMIs with + // OpsWorks, see Using Custom AMIs (http://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-custom-ami.html). + // + // The default option is the stack's current operating system. For more information + // on the supported operating systems, see AWS OpsWorks Operating Systems (http://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-os.html). + DefaultOs *string `type:"string"` + + // The default root device type. This value is used by default for all instances + // in the stack, but you can override it when you create an instance. For more + // information, see Storage for the Root Device (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ComponentsAMIs.html#storage-for-the-root-device). + DefaultRootDeviceType *string `type:"string" enum:"RootDeviceType"` + + // A default Amazon EC2 key-pair name. The default value is none. If you specify + // a key-pair name, AWS OpsWorks installs the public key on the instance and + // you can use the private key with an SSH client to log in to the instance. + // For more information, see Using SSH to Communicate with an Instance (http://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-ssh.html) + // and Managing SSH Access (http://docs.aws.amazon.com/opsworks/latest/userguide/security-ssh-access.html). + // You can override this setting by specifying a different key pair, or no key + // pair, when you create an instance (http://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-add.html). + DefaultSshKeyName *string `type:"string"` + + // The stack's default VPC subnet ID. This parameter is required if you specify + // a value for the VpcId parameter. All instances are launched into this subnet + // unless you specify otherwise when you create the instance. If you also specify + // a value for DefaultAvailabilityZone, the subnet must be in that zone. For + // information on default values and when this parameter is required, see the + // VpcId parameter description. + DefaultSubnetId *string `type:"string"` + + // The stack's new host name theme, with spaces replaced by underscores. The + // theme is used to generate host names for the stack's instances. By default, + // HostnameTheme is set to Layer_Dependent, which creates host names by appending + // integers to the layer's short name. The other themes are: + // + // Baked_Goods + // + // Clouds + // + // Europe_Cities + // + // Fruits + // + // Greek_Deities + // + // Legendary_creatures_from_Japan + // + // Planets_and_Moons + // + // Roman_Deities + // + // Scottish_Islands + // + // US_Cities + // + // Wild_Cats + // + // To obtain a generated host name, call GetHostNameSuggestion, which returns + // a host name based on the current theme. + HostnameTheme *string `type:"string"` + + // The stack's new name. + Name *string `type:"string"` + + // Do not use this parameter. You cannot update a stack's service role. + ServiceRoleArn *string `type:"string"` + + // The stack ID. + StackId *string `type:"string" required:"true"` + + // Whether the stack uses custom cookbooks. + UseCustomCookbooks *bool `type:"boolean"` + + // Whether to associate the AWS OpsWorks built-in security groups with the stack's + // layers. + // + // AWS OpsWorks provides a standard set of built-in security groups, one for + // each layer, which are associated with layers by default. UseOpsworksSecurityGroups + // allows you to provide your own custom security groups instead of using the + // built-in groups. UseOpsworksSecurityGroups has the following settings: + // + // True - AWS OpsWorks automatically associates the appropriate built-in + // security group with each layer (default setting). You can associate additional + // security groups with a layer after you create it, but you cannot delete the + // built-in security group. + // + // False - AWS OpsWorks does not associate built-in security groups with + // layers. You must create appropriate EC2 security groups and associate a security + // group with each layer that you create. However, you can still manually associate + // a built-in security group with a layer on. Custom security groups are required + // only for those layers that need custom settings. + // + // For more information, see Create a New Stack (http://docs.aws.amazon.com/opsworks/latest/userguide/workingstacks-creating.html). + UseOpsworksSecurityGroups *bool `type:"boolean"` +} + +// String returns the string representation +func (s UpdateStackInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateStackInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateStackInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateStackInput"} + if s.StackId == nil { + invalidParams.Add(request.NewErrParamRequired("StackId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type UpdateStackOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UpdateStackOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateStackOutput) GoString() string { + return s.String() +} + +type UpdateUserProfileInput struct { + _ struct{} `type:"structure"` + + // Whether users can specify their own SSH public key through the My Settings + // page. For more information, see Managing User Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/security-settingsshkey.html). + AllowSelfManagement *bool `type:"boolean"` + + // The user IAM ARN. + IamUserArn *string `type:"string" required:"true"` + + // The user's new SSH public key. + SshPublicKey *string `type:"string"` + + // The user's SSH user name. The allowable characters are [a-z], [A-Z], [0-9], + // '-', and '_'. If the specified name includes other punctuation marks, AWS + // OpsWorks removes them. For example, my.name will be changed to myname. If + // you do not specify an SSH user name, AWS OpsWorks generates one from the + // IAM user name. + SshUsername *string `type:"string"` +} + +// String returns the string representation +func (s UpdateUserProfileInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateUserProfileInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateUserProfileInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateUserProfileInput"} + if s.IamUserArn == nil { + invalidParams.Add(request.NewErrParamRequired("IamUserArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type UpdateUserProfileOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UpdateUserProfileOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateUserProfileOutput) GoString() string { + return s.String() +} + +type UpdateVolumeInput struct { + _ struct{} `type:"structure"` + + // The new mount point. + MountPoint *string `type:"string"` + + // The new name. + Name *string `type:"string"` + + // The volume ID. + VolumeId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateVolumeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateVolumeInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateVolumeInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateVolumeInput"} + if s.VolumeId == nil { + invalidParams.Add(request.NewErrParamRequired("VolumeId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type UpdateVolumeOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UpdateVolumeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateVolumeOutput) GoString() string { + return s.String() +} + +// Describes a user's SSH information. +type UserProfile struct { + _ struct{} `type:"structure"` + + // Whether users can specify their own SSH public key through the My Settings + // page. For more information, see Managing User Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/security-settingsshkey.html). + AllowSelfManagement *bool `type:"boolean"` + + // The user's IAM ARN. + IamUserArn *string `type:"string"` + + // The user's name. + Name *string `type:"string"` + + // The user's SSH public key. + SshPublicKey *string `type:"string"` + + // The user's SSH user name. + SshUsername *string `type:"string"` +} + +// String returns the string representation +func (s UserProfile) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UserProfile) GoString() string { + return s.String() +} + +// Describes an instance's Amazon EBS volume. +type Volume struct { + _ struct{} `type:"structure"` + + // The volume Availability Zone. For more information, see Regions and Endpoints + // (http://docs.aws.amazon.com/general/latest/gr/rande.html). + AvailabilityZone *string `type:"string"` + + // The device name. + Device *string `type:"string"` + + // The Amazon EC2 volume ID. + Ec2VolumeId *string `type:"string"` + + // The instance ID. + InstanceId *string `type:"string"` + + // For PIOPS volumes, the IOPS per disk. + Iops *int64 `type:"integer"` + + // The volume mount point. For example, "/mnt/disk1". + MountPoint *string `type:"string"` + + // The volume name. + Name *string `type:"string"` + + // The RAID array ID. + RaidArrayId *string `type:"string"` + + // The AWS region. For more information about AWS regions, see Regions and Endpoints + // (http://docs.aws.amazon.com/general/latest/gr/rande.html). + Region *string `type:"string"` + + // The volume size. + Size *int64 `type:"integer"` + + // The value returned by DescribeVolumes (http://docs.aws.amazon.com/AWSEC2/latest/APIReference/ApiReference-query-DescribeVolumes.html). + Status *string `type:"string"` + + // The volume ID. + VolumeId *string `type:"string"` + + // The volume type, standard or PIOPS. + VolumeType *string `type:"string"` +} + +// String returns the string representation +func (s Volume) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Volume) GoString() string { + return s.String() +} + +// Describes an Amazon EBS volume configuration. +type VolumeConfiguration struct { + _ struct{} `type:"structure"` + + // For PIOPS volumes, the IOPS per disk. + Iops *int64 `type:"integer"` + + // The volume mount point. For example "/dev/sdh". + MountPoint *string `type:"string" required:"true"` + + // The number of disks in the volume. + NumberOfDisks *int64 `type:"integer" required:"true"` + + // The volume RAID level (http://en.wikipedia.org/wiki/Standard_RAID_levels). + RaidLevel *int64 `type:"integer"` + + // The volume size. + Size *int64 `type:"integer" required:"true"` + + // The volume type: + // + // standard - Magnetic + // + // io1 - Provisioned IOPS (SSD) + // + // gp2 - General Purpose (SSD) + VolumeType *string `type:"string"` +} + +// String returns the string representation +func (s VolumeConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VolumeConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *VolumeConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "VolumeConfiguration"} + if s.MountPoint == nil { + invalidParams.Add(request.NewErrParamRequired("MountPoint")) + } + if s.NumberOfDisks == nil { + invalidParams.Add(request.NewErrParamRequired("NumberOfDisks")) + } + if s.Size == nil { + invalidParams.Add(request.NewErrParamRequired("Size")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Describes a time-based instance's auto scaling schedule. The schedule consists +// of a set of key-value pairs. +// +// The key is the time period (a UTC hour) and must be an integer from 0 +// - 23. +// +// The value indicates whether the instance should be online or offline for +// the specified period, and must be set to "on" or "off" +// +// The default setting for all time periods is off, so you use the following +// parameters primarily to specify the online periods. You don't have to explicitly +// specify offline periods unless you want to change an online period to an +// offline period. +// +// The following example specifies that the instance should be online for four +// hours, from UTC 1200 - 1600. It will be off for the remainder of the day. +// +// { "12":"on", "13":"on", "14":"on", "15":"on" } +type WeeklyAutoScalingSchedule struct { + _ struct{} `type:"structure"` + + // The schedule for Friday. + Friday map[string]*string `type:"map"` + + // The schedule for Monday. + Monday map[string]*string `type:"map"` + + // The schedule for Saturday. + Saturday map[string]*string `type:"map"` + + // The schedule for Sunday. + Sunday map[string]*string `type:"map"` + + // The schedule for Thursday. + Thursday map[string]*string `type:"map"` + + // The schedule for Tuesday. + Tuesday map[string]*string `type:"map"` + + // The schedule for Wednesday. + Wednesday map[string]*string `type:"map"` +} + +// String returns the string representation +func (s WeeklyAutoScalingSchedule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s WeeklyAutoScalingSchedule) GoString() string { + return s.String() +} + +const ( + // @enum AppAttributesKeys + AppAttributesKeysDocumentRoot = "DocumentRoot" + // @enum AppAttributesKeys + AppAttributesKeysRailsEnv = "RailsEnv" + // @enum AppAttributesKeys + AppAttributesKeysAutoBundleOnDeploy = "AutoBundleOnDeploy" + // @enum AppAttributesKeys + AppAttributesKeysAwsFlowRubySettings = "AwsFlowRubySettings" +) + +const ( + // @enum AppType + AppTypeAwsFlowRuby = "aws-flow-ruby" + // @enum AppType + AppTypeJava = "java" + // @enum AppType + AppTypeRails = "rails" + // @enum AppType + AppTypePhp = "php" + // @enum AppType + AppTypeNodejs = "nodejs" + // @enum AppType + AppTypeStatic = "static" + // @enum AppType + AppTypeOther = "other" +) + +const ( + // @enum Architecture + ArchitectureX8664 = "x86_64" + // @enum Architecture + ArchitectureI386 = "i386" +) + +const ( + // @enum AutoScalingType + AutoScalingTypeLoad = "load" + // @enum AutoScalingType + AutoScalingTypeTimer = "timer" +) + +const ( + // @enum DeploymentCommandName + DeploymentCommandNameInstallDependencies = "install_dependencies" + // @enum DeploymentCommandName + DeploymentCommandNameUpdateDependencies = "update_dependencies" + // @enum DeploymentCommandName + DeploymentCommandNameUpdateCustomCookbooks = "update_custom_cookbooks" + // @enum DeploymentCommandName + DeploymentCommandNameExecuteRecipes = "execute_recipes" + // @enum DeploymentCommandName + DeploymentCommandNameConfigure = "configure" + // @enum DeploymentCommandName + DeploymentCommandNameSetup = "setup" + // @enum DeploymentCommandName + DeploymentCommandNameDeploy = "deploy" + // @enum DeploymentCommandName + DeploymentCommandNameRollback = "rollback" + // @enum DeploymentCommandName + DeploymentCommandNameStart = "start" + // @enum DeploymentCommandName + DeploymentCommandNameStop = "stop" + // @enum DeploymentCommandName + DeploymentCommandNameRestart = "restart" + // @enum DeploymentCommandName + DeploymentCommandNameUndeploy = "undeploy" +) + +const ( + // @enum LayerAttributesKeys + LayerAttributesKeysEcsClusterArn = "EcsClusterArn" + // @enum LayerAttributesKeys + LayerAttributesKeysEnableHaproxyStats = "EnableHaproxyStats" + // @enum LayerAttributesKeys + LayerAttributesKeysHaproxyStatsUrl = "HaproxyStatsUrl" + // @enum LayerAttributesKeys + LayerAttributesKeysHaproxyStatsUser = "HaproxyStatsUser" + // @enum LayerAttributesKeys + LayerAttributesKeysHaproxyStatsPassword = "HaproxyStatsPassword" + // @enum LayerAttributesKeys + LayerAttributesKeysHaproxyHealthCheckUrl = "HaproxyHealthCheckUrl" + // @enum LayerAttributesKeys + LayerAttributesKeysHaproxyHealthCheckMethod = "HaproxyHealthCheckMethod" + // @enum LayerAttributesKeys + LayerAttributesKeysMysqlRootPassword = "MysqlRootPassword" + // @enum LayerAttributesKeys + LayerAttributesKeysMysqlRootPasswordUbiquitous = "MysqlRootPasswordUbiquitous" + // @enum LayerAttributesKeys + LayerAttributesKeysGangliaUrl = "GangliaUrl" + // @enum LayerAttributesKeys + LayerAttributesKeysGangliaUser = "GangliaUser" + // @enum LayerAttributesKeys + LayerAttributesKeysGangliaPassword = "GangliaPassword" + // @enum LayerAttributesKeys + LayerAttributesKeysMemcachedMemory = "MemcachedMemory" + // @enum LayerAttributesKeys + LayerAttributesKeysNodejsVersion = "NodejsVersion" + // @enum LayerAttributesKeys + LayerAttributesKeysRubyVersion = "RubyVersion" + // @enum LayerAttributesKeys + LayerAttributesKeysRubygemsVersion = "RubygemsVersion" + // @enum LayerAttributesKeys + LayerAttributesKeysManageBundler = "ManageBundler" + // @enum LayerAttributesKeys + LayerAttributesKeysBundlerVersion = "BundlerVersion" + // @enum LayerAttributesKeys + LayerAttributesKeysRailsStack = "RailsStack" + // @enum LayerAttributesKeys + LayerAttributesKeysPassengerVersion = "PassengerVersion" + // @enum LayerAttributesKeys + LayerAttributesKeysJvm = "Jvm" + // @enum LayerAttributesKeys + LayerAttributesKeysJvmVersion = "JvmVersion" + // @enum LayerAttributesKeys + LayerAttributesKeysJvmOptions = "JvmOptions" + // @enum LayerAttributesKeys + LayerAttributesKeysJavaAppServer = "JavaAppServer" + // @enum LayerAttributesKeys + LayerAttributesKeysJavaAppServerVersion = "JavaAppServerVersion" +) + +const ( + // @enum LayerType + LayerTypeAwsFlowRuby = "aws-flow-ruby" + // @enum LayerType + LayerTypeEcsCluster = "ecs-cluster" + // @enum LayerType + LayerTypeJavaApp = "java-app" + // @enum LayerType + LayerTypeLb = "lb" + // @enum LayerType + LayerTypeWeb = "web" + // @enum LayerType + LayerTypePhpApp = "php-app" + // @enum LayerType + LayerTypeRailsApp = "rails-app" + // @enum LayerType + LayerTypeNodejsApp = "nodejs-app" + // @enum LayerType + LayerTypeMemcached = "memcached" + // @enum LayerType + LayerTypeDbMaster = "db-master" + // @enum LayerType + LayerTypeMonitoringMaster = "monitoring-master" + // @enum LayerType + LayerTypeCustom = "custom" +) + +const ( + // @enum RootDeviceType + RootDeviceTypeEbs = "ebs" + // @enum RootDeviceType + RootDeviceTypeInstanceStore = "instance-store" +) + +const ( + // @enum SourceType + SourceTypeGit = "git" + // @enum SourceType + SourceTypeSvn = "svn" + // @enum SourceType + SourceTypeArchive = "archive" + // @enum SourceType + SourceTypeS3 = "s3" +) + +const ( + // @enum StackAttributesKeys + StackAttributesKeysColor = "Color" +) + +const ( + // @enum VirtualizationType + VirtualizationTypeParavirtual = "paravirtual" + // @enum VirtualizationType + VirtualizationTypeHvm = "hvm" +) + +const ( + // @enum VolumeType + VolumeTypeGp2 = "gp2" + // @enum VolumeType + VolumeTypeIo1 = "io1" + // @enum VolumeType + VolumeTypeStandard = "standard" +) diff --git a/vendor/github.com/aws/aws-sdk-go/service/opsworks/examples_test.go b/vendor/github.com/aws/aws-sdk-go/service/opsworks/examples_test.go new file mode 100644 index 000000000..913e5d22c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/opsworks/examples_test.go @@ -0,0 +1,1895 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package opsworks_test + +import ( + "bytes" + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/opsworks" +) + +var _ time.Duration +var _ bytes.Buffer + +func ExampleOpsWorks_AssignInstance() { + svc := opsworks.New(session.New()) + + params := &opsworks.AssignInstanceInput{ + InstanceId: aws.String("String"), // Required + LayerIds: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.AssignInstance(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_AssignVolume() { + svc := opsworks.New(session.New()) + + params := &opsworks.AssignVolumeInput{ + VolumeId: aws.String("String"), // Required + InstanceId: aws.String("String"), + } + resp, err := svc.AssignVolume(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_AssociateElasticIp() { + svc := opsworks.New(session.New()) + + params := &opsworks.AssociateElasticIpInput{ + ElasticIp: aws.String("String"), // Required + InstanceId: aws.String("String"), + } + resp, err := svc.AssociateElasticIp(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_AttachElasticLoadBalancer() { + svc := opsworks.New(session.New()) + + params := &opsworks.AttachElasticLoadBalancerInput{ + ElasticLoadBalancerName: aws.String("String"), // Required + LayerId: aws.String("String"), // Required + } + resp, err := svc.AttachElasticLoadBalancer(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_CloneStack() { + svc := opsworks.New(session.New()) + + params := &opsworks.CloneStackInput{ + ServiceRoleArn: aws.String("String"), // Required + SourceStackId: aws.String("String"), // Required + AgentVersion: aws.String("String"), + Attributes: map[string]*string{ + "Key": aws.String("String"), // Required + // More values... + }, + ChefConfiguration: &opsworks.ChefConfiguration{ + BerkshelfVersion: aws.String("String"), + ManageBerkshelf: aws.Bool(true), + }, + CloneAppIds: []*string{ + aws.String("String"), // Required + // More values... + }, + ClonePermissions: aws.Bool(true), + ConfigurationManager: &opsworks.StackConfigurationManager{ + Name: aws.String("String"), + Version: aws.String("String"), + }, + CustomCookbooksSource: &opsworks.Source{ + Password: aws.String("String"), + Revision: aws.String("String"), + SshKey: aws.String("String"), + Type: aws.String("SourceType"), + Url: aws.String("String"), + Username: aws.String("String"), + }, + CustomJson: aws.String("String"), + DefaultAvailabilityZone: aws.String("String"), + DefaultInstanceProfileArn: aws.String("String"), + DefaultOs: aws.String("String"), + DefaultRootDeviceType: aws.String("RootDeviceType"), + DefaultSshKeyName: aws.String("String"), + DefaultSubnetId: aws.String("String"), + HostnameTheme: aws.String("String"), + Name: aws.String("String"), + Region: aws.String("String"), + UseCustomCookbooks: aws.Bool(true), + UseOpsworksSecurityGroups: aws.Bool(true), + VpcId: aws.String("String"), + } + resp, err := svc.CloneStack(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_CreateApp() { + svc := opsworks.New(session.New()) + + params := &opsworks.CreateAppInput{ + Name: aws.String("String"), // Required + StackId: aws.String("String"), // Required + Type: aws.String("AppType"), // Required + AppSource: &opsworks.Source{ + Password: aws.String("String"), + Revision: aws.String("String"), + SshKey: aws.String("String"), + Type: aws.String("SourceType"), + Url: aws.String("String"), + Username: aws.String("String"), + }, + Attributes: map[string]*string{ + "Key": aws.String("String"), // Required + // More values... + }, + DataSources: []*opsworks.DataSource{ + { // Required + Arn: aws.String("String"), + DatabaseName: aws.String("String"), + Type: aws.String("String"), + }, + // More values... + }, + Description: aws.String("String"), + Domains: []*string{ + aws.String("String"), // Required + // More values... + }, + EnableSsl: aws.Bool(true), + Environment: []*opsworks.EnvironmentVariable{ + { // Required + Key: aws.String("String"), // Required + Value: aws.String("String"), // Required + Secure: aws.Bool(true), + }, + // More values... + }, + Shortname: aws.String("String"), + SslConfiguration: &opsworks.SslConfiguration{ + Certificate: aws.String("String"), // Required + PrivateKey: aws.String("String"), // Required + Chain: aws.String("String"), + }, + } + resp, err := svc.CreateApp(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_CreateDeployment() { + svc := opsworks.New(session.New()) + + params := &opsworks.CreateDeploymentInput{ + Command: &opsworks.DeploymentCommand{ // Required + Name: aws.String("DeploymentCommandName"), // Required + Args: map[string][]*string{ + "Key": { // Required + aws.String("String"), // Required + // More values... + }, + // More values... + }, + }, + StackId: aws.String("String"), // Required + AppId: aws.String("String"), + Comment: aws.String("String"), + CustomJson: aws.String("String"), + InstanceIds: []*string{ + aws.String("String"), // Required + // More values... + }, + LayerIds: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.CreateDeployment(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_CreateInstance() { + svc := opsworks.New(session.New()) + + params := &opsworks.CreateInstanceInput{ + InstanceType: aws.String("String"), // Required + LayerIds: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + StackId: aws.String("String"), // Required + AgentVersion: aws.String("String"), + AmiId: aws.String("String"), + Architecture: aws.String("Architecture"), + AutoScalingType: aws.String("AutoScalingType"), + AvailabilityZone: aws.String("String"), + BlockDeviceMappings: []*opsworks.BlockDeviceMapping{ + { // Required + DeviceName: aws.String("String"), + Ebs: &opsworks.EbsBlockDevice{ + DeleteOnTermination: aws.Bool(true), + Iops: aws.Int64(1), + SnapshotId: aws.String("String"), + VolumeSize: aws.Int64(1), + VolumeType: aws.String("VolumeType"), + }, + NoDevice: aws.String("String"), + VirtualName: aws.String("String"), + }, + // More values... + }, + EbsOptimized: aws.Bool(true), + Hostname: aws.String("String"), + InstallUpdatesOnBoot: aws.Bool(true), + Os: aws.String("String"), + RootDeviceType: aws.String("RootDeviceType"), + SshKeyName: aws.String("String"), + SubnetId: aws.String("String"), + Tenancy: aws.String("String"), + VirtualizationType: aws.String("String"), + } + resp, err := svc.CreateInstance(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_CreateLayer() { + svc := opsworks.New(session.New()) + + params := &opsworks.CreateLayerInput{ + Name: aws.String("String"), // Required + Shortname: aws.String("String"), // Required + StackId: aws.String("String"), // Required + Type: aws.String("LayerType"), // Required + Attributes: map[string]*string{ + "Key": aws.String("String"), // Required + // More values... + }, + AutoAssignElasticIps: aws.Bool(true), + AutoAssignPublicIps: aws.Bool(true), + CustomInstanceProfileArn: aws.String("String"), + CustomJson: aws.String("String"), + CustomRecipes: &opsworks.Recipes{ + Configure: []*string{ + aws.String("String"), // Required + // More values... + }, + Deploy: []*string{ + aws.String("String"), // Required + // More values... + }, + Setup: []*string{ + aws.String("String"), // Required + // More values... + }, + Shutdown: []*string{ + aws.String("String"), // Required + // More values... + }, + Undeploy: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + CustomSecurityGroupIds: []*string{ + aws.String("String"), // Required + // More values... + }, + EnableAutoHealing: aws.Bool(true), + InstallUpdatesOnBoot: aws.Bool(true), + LifecycleEventConfiguration: &opsworks.LifecycleEventConfiguration{ + Shutdown: &opsworks.ShutdownEventConfiguration{ + DelayUntilElbConnectionsDrained: aws.Bool(true), + ExecutionTimeout: aws.Int64(1), + }, + }, + Packages: []*string{ + aws.String("String"), // Required + // More values... + }, + UseEbsOptimizedInstances: aws.Bool(true), + VolumeConfigurations: []*opsworks.VolumeConfiguration{ + { // Required + MountPoint: aws.String("String"), // Required + NumberOfDisks: aws.Int64(1), // Required + Size: aws.Int64(1), // Required + Iops: aws.Int64(1), + RaidLevel: aws.Int64(1), + VolumeType: aws.String("String"), + }, + // More values... + }, + } + resp, err := svc.CreateLayer(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_CreateStack() { + svc := opsworks.New(session.New()) + + params := &opsworks.CreateStackInput{ + DefaultInstanceProfileArn: aws.String("String"), // Required + Name: aws.String("String"), // Required + Region: aws.String("String"), // Required + ServiceRoleArn: aws.String("String"), // Required + AgentVersion: aws.String("String"), + Attributes: map[string]*string{ + "Key": aws.String("String"), // Required + // More values... + }, + ChefConfiguration: &opsworks.ChefConfiguration{ + BerkshelfVersion: aws.String("String"), + ManageBerkshelf: aws.Bool(true), + }, + ConfigurationManager: &opsworks.StackConfigurationManager{ + Name: aws.String("String"), + Version: aws.String("String"), + }, + CustomCookbooksSource: &opsworks.Source{ + Password: aws.String("String"), + Revision: aws.String("String"), + SshKey: aws.String("String"), + Type: aws.String("SourceType"), + Url: aws.String("String"), + Username: aws.String("String"), + }, + CustomJson: aws.String("String"), + DefaultAvailabilityZone: aws.String("String"), + DefaultOs: aws.String("String"), + DefaultRootDeviceType: aws.String("RootDeviceType"), + DefaultSshKeyName: aws.String("String"), + DefaultSubnetId: aws.String("String"), + HostnameTheme: aws.String("String"), + UseCustomCookbooks: aws.Bool(true), + UseOpsworksSecurityGroups: aws.Bool(true), + VpcId: aws.String("String"), + } + resp, err := svc.CreateStack(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_CreateUserProfile() { + svc := opsworks.New(session.New()) + + params := &opsworks.CreateUserProfileInput{ + IamUserArn: aws.String("String"), // Required + AllowSelfManagement: aws.Bool(true), + SshPublicKey: aws.String("String"), + SshUsername: aws.String("String"), + } + resp, err := svc.CreateUserProfile(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_DeleteApp() { + svc := opsworks.New(session.New()) + + params := &opsworks.DeleteAppInput{ + AppId: aws.String("String"), // Required + } + resp, err := svc.DeleteApp(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_DeleteInstance() { + svc := opsworks.New(session.New()) + + params := &opsworks.DeleteInstanceInput{ + InstanceId: aws.String("String"), // Required + DeleteElasticIp: aws.Bool(true), + DeleteVolumes: aws.Bool(true), + } + resp, err := svc.DeleteInstance(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_DeleteLayer() { + svc := opsworks.New(session.New()) + + params := &opsworks.DeleteLayerInput{ + LayerId: aws.String("String"), // Required + } + resp, err := svc.DeleteLayer(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_DeleteStack() { + svc := opsworks.New(session.New()) + + params := &opsworks.DeleteStackInput{ + StackId: aws.String("String"), // Required + } + resp, err := svc.DeleteStack(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_DeleteUserProfile() { + svc := opsworks.New(session.New()) + + params := &opsworks.DeleteUserProfileInput{ + IamUserArn: aws.String("String"), // Required + } + resp, err := svc.DeleteUserProfile(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_DeregisterEcsCluster() { + svc := opsworks.New(session.New()) + + params := &opsworks.DeregisterEcsClusterInput{ + EcsClusterArn: aws.String("String"), // Required + } + resp, err := svc.DeregisterEcsCluster(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_DeregisterElasticIp() { + svc := opsworks.New(session.New()) + + params := &opsworks.DeregisterElasticIpInput{ + ElasticIp: aws.String("String"), // Required + } + resp, err := svc.DeregisterElasticIp(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_DeregisterInstance() { + svc := opsworks.New(session.New()) + + params := &opsworks.DeregisterInstanceInput{ + InstanceId: aws.String("String"), // Required + } + resp, err := svc.DeregisterInstance(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_DeregisterRdsDbInstance() { + svc := opsworks.New(session.New()) + + params := &opsworks.DeregisterRdsDbInstanceInput{ + RdsDbInstanceArn: aws.String("String"), // Required + } + resp, err := svc.DeregisterRdsDbInstance(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_DeregisterVolume() { + svc := opsworks.New(session.New()) + + params := &opsworks.DeregisterVolumeInput{ + VolumeId: aws.String("String"), // Required + } + resp, err := svc.DeregisterVolume(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_DescribeAgentVersions() { + svc := opsworks.New(session.New()) + + params := &opsworks.DescribeAgentVersionsInput{ + ConfigurationManager: &opsworks.StackConfigurationManager{ + Name: aws.String("String"), + Version: aws.String("String"), + }, + StackId: aws.String("String"), + } + resp, err := svc.DescribeAgentVersions(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_DescribeApps() { + svc := opsworks.New(session.New()) + + params := &opsworks.DescribeAppsInput{ + AppIds: []*string{ + aws.String("String"), // Required + // More values... + }, + StackId: aws.String("String"), + } + resp, err := svc.DescribeApps(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_DescribeCommands() { + svc := opsworks.New(session.New()) + + params := &opsworks.DescribeCommandsInput{ + CommandIds: []*string{ + aws.String("String"), // Required + // More values... + }, + DeploymentId: aws.String("String"), + InstanceId: aws.String("String"), + } + resp, err := svc.DescribeCommands(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_DescribeDeployments() { + svc := opsworks.New(session.New()) + + params := &opsworks.DescribeDeploymentsInput{ + AppId: aws.String("String"), + DeploymentIds: []*string{ + aws.String("String"), // Required + // More values... + }, + StackId: aws.String("String"), + } + resp, err := svc.DescribeDeployments(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_DescribeEcsClusters() { + svc := opsworks.New(session.New()) + + params := &opsworks.DescribeEcsClustersInput{ + EcsClusterArns: []*string{ + aws.String("String"), // Required + // More values... + }, + MaxResults: aws.Int64(1), + NextToken: aws.String("String"), + StackId: aws.String("String"), + } + resp, err := svc.DescribeEcsClusters(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_DescribeElasticIps() { + svc := opsworks.New(session.New()) + + params := &opsworks.DescribeElasticIpsInput{ + InstanceId: aws.String("String"), + Ips: []*string{ + aws.String("String"), // Required + // More values... + }, + StackId: aws.String("String"), + } + resp, err := svc.DescribeElasticIps(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_DescribeElasticLoadBalancers() { + svc := opsworks.New(session.New()) + + params := &opsworks.DescribeElasticLoadBalancersInput{ + LayerIds: []*string{ + aws.String("String"), // Required + // More values... + }, + StackId: aws.String("String"), + } + resp, err := svc.DescribeElasticLoadBalancers(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_DescribeInstances() { + svc := opsworks.New(session.New()) + + params := &opsworks.DescribeInstancesInput{ + InstanceIds: []*string{ + aws.String("String"), // Required + // More values... + }, + LayerId: aws.String("String"), + StackId: aws.String("String"), + } + resp, err := svc.DescribeInstances(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_DescribeLayers() { + svc := opsworks.New(session.New()) + + params := &opsworks.DescribeLayersInput{ + LayerIds: []*string{ + aws.String("String"), // Required + // More values... + }, + StackId: aws.String("String"), + } + resp, err := svc.DescribeLayers(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_DescribeLoadBasedAutoScaling() { + svc := opsworks.New(session.New()) + + params := &opsworks.DescribeLoadBasedAutoScalingInput{ + LayerIds: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DescribeLoadBasedAutoScaling(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_DescribeMyUserProfile() { + svc := opsworks.New(session.New()) + + var params *opsworks.DescribeMyUserProfileInput + resp, err := svc.DescribeMyUserProfile(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_DescribePermissions() { + svc := opsworks.New(session.New()) + + params := &opsworks.DescribePermissionsInput{ + IamUserArn: aws.String("String"), + StackId: aws.String("String"), + } + resp, err := svc.DescribePermissions(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_DescribeRaidArrays() { + svc := opsworks.New(session.New()) + + params := &opsworks.DescribeRaidArraysInput{ + InstanceId: aws.String("String"), + RaidArrayIds: []*string{ + aws.String("String"), // Required + // More values... + }, + StackId: aws.String("String"), + } + resp, err := svc.DescribeRaidArrays(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_DescribeRdsDbInstances() { + svc := opsworks.New(session.New()) + + params := &opsworks.DescribeRdsDbInstancesInput{ + StackId: aws.String("String"), // Required + RdsDbInstanceArns: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DescribeRdsDbInstances(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_DescribeServiceErrors() { + svc := opsworks.New(session.New()) + + params := &opsworks.DescribeServiceErrorsInput{ + InstanceId: aws.String("String"), + ServiceErrorIds: []*string{ + aws.String("String"), // Required + // More values... + }, + StackId: aws.String("String"), + } + resp, err := svc.DescribeServiceErrors(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_DescribeStackProvisioningParameters() { + svc := opsworks.New(session.New()) + + params := &opsworks.DescribeStackProvisioningParametersInput{ + StackId: aws.String("String"), // Required + } + resp, err := svc.DescribeStackProvisioningParameters(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_DescribeStackSummary() { + svc := opsworks.New(session.New()) + + params := &opsworks.DescribeStackSummaryInput{ + StackId: aws.String("String"), // Required + } + resp, err := svc.DescribeStackSummary(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_DescribeStacks() { + svc := opsworks.New(session.New()) + + params := &opsworks.DescribeStacksInput{ + StackIds: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DescribeStacks(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_DescribeTimeBasedAutoScaling() { + svc := opsworks.New(session.New()) + + params := &opsworks.DescribeTimeBasedAutoScalingInput{ + InstanceIds: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DescribeTimeBasedAutoScaling(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_DescribeUserProfiles() { + svc := opsworks.New(session.New()) + + params := &opsworks.DescribeUserProfilesInput{ + IamUserArns: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DescribeUserProfiles(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_DescribeVolumes() { + svc := opsworks.New(session.New()) + + params := &opsworks.DescribeVolumesInput{ + InstanceId: aws.String("String"), + RaidArrayId: aws.String("String"), + StackId: aws.String("String"), + VolumeIds: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DescribeVolumes(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_DetachElasticLoadBalancer() { + svc := opsworks.New(session.New()) + + params := &opsworks.DetachElasticLoadBalancerInput{ + ElasticLoadBalancerName: aws.String("String"), // Required + LayerId: aws.String("String"), // Required + } + resp, err := svc.DetachElasticLoadBalancer(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_DisassociateElasticIp() { + svc := opsworks.New(session.New()) + + params := &opsworks.DisassociateElasticIpInput{ + ElasticIp: aws.String("String"), // Required + } + resp, err := svc.DisassociateElasticIp(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_GetHostnameSuggestion() { + svc := opsworks.New(session.New()) + + params := &opsworks.GetHostnameSuggestionInput{ + LayerId: aws.String("String"), // Required + } + resp, err := svc.GetHostnameSuggestion(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_GrantAccess() { + svc := opsworks.New(session.New()) + + params := &opsworks.GrantAccessInput{ + InstanceId: aws.String("String"), // Required + ValidForInMinutes: aws.Int64(1), + } + resp, err := svc.GrantAccess(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_RebootInstance() { + svc := opsworks.New(session.New()) + + params := &opsworks.RebootInstanceInput{ + InstanceId: aws.String("String"), // Required + } + resp, err := svc.RebootInstance(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_RegisterEcsCluster() { + svc := opsworks.New(session.New()) + + params := &opsworks.RegisterEcsClusterInput{ + EcsClusterArn: aws.String("String"), // Required + StackId: aws.String("String"), // Required + } + resp, err := svc.RegisterEcsCluster(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_RegisterElasticIp() { + svc := opsworks.New(session.New()) + + params := &opsworks.RegisterElasticIpInput{ + ElasticIp: aws.String("String"), // Required + StackId: aws.String("String"), // Required + } + resp, err := svc.RegisterElasticIp(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_RegisterInstance() { + svc := opsworks.New(session.New()) + + params := &opsworks.RegisterInstanceInput{ + StackId: aws.String("String"), // Required + Hostname: aws.String("String"), + InstanceIdentity: &opsworks.InstanceIdentity{ + Document: aws.String("String"), + Signature: aws.String("String"), + }, + PrivateIp: aws.String("String"), + PublicIp: aws.String("String"), + RsaPublicKey: aws.String("String"), + RsaPublicKeyFingerprint: aws.String("String"), + } + resp, err := svc.RegisterInstance(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_RegisterRdsDbInstance() { + svc := opsworks.New(session.New()) + + params := &opsworks.RegisterRdsDbInstanceInput{ + DbPassword: aws.String("String"), // Required + DbUser: aws.String("String"), // Required + RdsDbInstanceArn: aws.String("String"), // Required + StackId: aws.String("String"), // Required + } + resp, err := svc.RegisterRdsDbInstance(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_RegisterVolume() { + svc := opsworks.New(session.New()) + + params := &opsworks.RegisterVolumeInput{ + StackId: aws.String("String"), // Required + Ec2VolumeId: aws.String("String"), + } + resp, err := svc.RegisterVolume(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_SetLoadBasedAutoScaling() { + svc := opsworks.New(session.New()) + + params := &opsworks.SetLoadBasedAutoScalingInput{ + LayerId: aws.String("String"), // Required + DownScaling: &opsworks.AutoScalingThresholds{ + Alarms: []*string{ + aws.String("String"), // Required + // More values... + }, + CpuThreshold: aws.Float64(1.0), + IgnoreMetricsTime: aws.Int64(1), + InstanceCount: aws.Int64(1), + LoadThreshold: aws.Float64(1.0), + MemoryThreshold: aws.Float64(1.0), + ThresholdsWaitTime: aws.Int64(1), + }, + Enable: aws.Bool(true), + UpScaling: &opsworks.AutoScalingThresholds{ + Alarms: []*string{ + aws.String("String"), // Required + // More values... + }, + CpuThreshold: aws.Float64(1.0), + IgnoreMetricsTime: aws.Int64(1), + InstanceCount: aws.Int64(1), + LoadThreshold: aws.Float64(1.0), + MemoryThreshold: aws.Float64(1.0), + ThresholdsWaitTime: aws.Int64(1), + }, + } + resp, err := svc.SetLoadBasedAutoScaling(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_SetPermission() { + svc := opsworks.New(session.New()) + + params := &opsworks.SetPermissionInput{ + IamUserArn: aws.String("String"), // Required + StackId: aws.String("String"), // Required + AllowSsh: aws.Bool(true), + AllowSudo: aws.Bool(true), + Level: aws.String("String"), + } + resp, err := svc.SetPermission(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_SetTimeBasedAutoScaling() { + svc := opsworks.New(session.New()) + + params := &opsworks.SetTimeBasedAutoScalingInput{ + InstanceId: aws.String("String"), // Required + AutoScalingSchedule: &opsworks.WeeklyAutoScalingSchedule{ + Friday: map[string]*string{ + "Key": aws.String("Switch"), // Required + // More values... + }, + Monday: map[string]*string{ + "Key": aws.String("Switch"), // Required + // More values... + }, + Saturday: map[string]*string{ + "Key": aws.String("Switch"), // Required + // More values... + }, + Sunday: map[string]*string{ + "Key": aws.String("Switch"), // Required + // More values... + }, + Thursday: map[string]*string{ + "Key": aws.String("Switch"), // Required + // More values... + }, + Tuesday: map[string]*string{ + "Key": aws.String("Switch"), // Required + // More values... + }, + Wednesday: map[string]*string{ + "Key": aws.String("Switch"), // Required + // More values... + }, + }, + } + resp, err := svc.SetTimeBasedAutoScaling(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_StartInstance() { + svc := opsworks.New(session.New()) + + params := &opsworks.StartInstanceInput{ + InstanceId: aws.String("String"), // Required + } + resp, err := svc.StartInstance(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_StartStack() { + svc := opsworks.New(session.New()) + + params := &opsworks.StartStackInput{ + StackId: aws.String("String"), // Required + } + resp, err := svc.StartStack(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_StopInstance() { + svc := opsworks.New(session.New()) + + params := &opsworks.StopInstanceInput{ + InstanceId: aws.String("String"), // Required + } + resp, err := svc.StopInstance(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_StopStack() { + svc := opsworks.New(session.New()) + + params := &opsworks.StopStackInput{ + StackId: aws.String("String"), // Required + } + resp, err := svc.StopStack(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_UnassignInstance() { + svc := opsworks.New(session.New()) + + params := &opsworks.UnassignInstanceInput{ + InstanceId: aws.String("String"), // Required + } + resp, err := svc.UnassignInstance(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_UnassignVolume() { + svc := opsworks.New(session.New()) + + params := &opsworks.UnassignVolumeInput{ + VolumeId: aws.String("String"), // Required + } + resp, err := svc.UnassignVolume(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_UpdateApp() { + svc := opsworks.New(session.New()) + + params := &opsworks.UpdateAppInput{ + AppId: aws.String("String"), // Required + AppSource: &opsworks.Source{ + Password: aws.String("String"), + Revision: aws.String("String"), + SshKey: aws.String("String"), + Type: aws.String("SourceType"), + Url: aws.String("String"), + Username: aws.String("String"), + }, + Attributes: map[string]*string{ + "Key": aws.String("String"), // Required + // More values... + }, + DataSources: []*opsworks.DataSource{ + { // Required + Arn: aws.String("String"), + DatabaseName: aws.String("String"), + Type: aws.String("String"), + }, + // More values... + }, + Description: aws.String("String"), + Domains: []*string{ + aws.String("String"), // Required + // More values... + }, + EnableSsl: aws.Bool(true), + Environment: []*opsworks.EnvironmentVariable{ + { // Required + Key: aws.String("String"), // Required + Value: aws.String("String"), // Required + Secure: aws.Bool(true), + }, + // More values... + }, + Name: aws.String("String"), + SslConfiguration: &opsworks.SslConfiguration{ + Certificate: aws.String("String"), // Required + PrivateKey: aws.String("String"), // Required + Chain: aws.String("String"), + }, + Type: aws.String("AppType"), + } + resp, err := svc.UpdateApp(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_UpdateElasticIp() { + svc := opsworks.New(session.New()) + + params := &opsworks.UpdateElasticIpInput{ + ElasticIp: aws.String("String"), // Required + Name: aws.String("String"), + } + resp, err := svc.UpdateElasticIp(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_UpdateInstance() { + svc := opsworks.New(session.New()) + + params := &opsworks.UpdateInstanceInput{ + InstanceId: aws.String("String"), // Required + AgentVersion: aws.String("String"), + AmiId: aws.String("String"), + Architecture: aws.String("Architecture"), + AutoScalingType: aws.String("AutoScalingType"), + EbsOptimized: aws.Bool(true), + Hostname: aws.String("String"), + InstallUpdatesOnBoot: aws.Bool(true), + InstanceType: aws.String("String"), + LayerIds: []*string{ + aws.String("String"), // Required + // More values... + }, + Os: aws.String("String"), + SshKeyName: aws.String("String"), + } + resp, err := svc.UpdateInstance(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_UpdateLayer() { + svc := opsworks.New(session.New()) + + params := &opsworks.UpdateLayerInput{ + LayerId: aws.String("String"), // Required + Attributes: map[string]*string{ + "Key": aws.String("String"), // Required + // More values... + }, + AutoAssignElasticIps: aws.Bool(true), + AutoAssignPublicIps: aws.Bool(true), + CustomInstanceProfileArn: aws.String("String"), + CustomJson: aws.String("String"), + CustomRecipes: &opsworks.Recipes{ + Configure: []*string{ + aws.String("String"), // Required + // More values... + }, + Deploy: []*string{ + aws.String("String"), // Required + // More values... + }, + Setup: []*string{ + aws.String("String"), // Required + // More values... + }, + Shutdown: []*string{ + aws.String("String"), // Required + // More values... + }, + Undeploy: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + CustomSecurityGroupIds: []*string{ + aws.String("String"), // Required + // More values... + }, + EnableAutoHealing: aws.Bool(true), + InstallUpdatesOnBoot: aws.Bool(true), + LifecycleEventConfiguration: &opsworks.LifecycleEventConfiguration{ + Shutdown: &opsworks.ShutdownEventConfiguration{ + DelayUntilElbConnectionsDrained: aws.Bool(true), + ExecutionTimeout: aws.Int64(1), + }, + }, + Name: aws.String("String"), + Packages: []*string{ + aws.String("String"), // Required + // More values... + }, + Shortname: aws.String("String"), + UseEbsOptimizedInstances: aws.Bool(true), + VolumeConfigurations: []*opsworks.VolumeConfiguration{ + { // Required + MountPoint: aws.String("String"), // Required + NumberOfDisks: aws.Int64(1), // Required + Size: aws.Int64(1), // Required + Iops: aws.Int64(1), + RaidLevel: aws.Int64(1), + VolumeType: aws.String("String"), + }, + // More values... + }, + } + resp, err := svc.UpdateLayer(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_UpdateMyUserProfile() { + svc := opsworks.New(session.New()) + + params := &opsworks.UpdateMyUserProfileInput{ + SshPublicKey: aws.String("String"), + } + resp, err := svc.UpdateMyUserProfile(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_UpdateRdsDbInstance() { + svc := opsworks.New(session.New()) + + params := &opsworks.UpdateRdsDbInstanceInput{ + RdsDbInstanceArn: aws.String("String"), // Required + DbPassword: aws.String("String"), + DbUser: aws.String("String"), + } + resp, err := svc.UpdateRdsDbInstance(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_UpdateStack() { + svc := opsworks.New(session.New()) + + params := &opsworks.UpdateStackInput{ + StackId: aws.String("String"), // Required + AgentVersion: aws.String("String"), + Attributes: map[string]*string{ + "Key": aws.String("String"), // Required + // More values... + }, + ChefConfiguration: &opsworks.ChefConfiguration{ + BerkshelfVersion: aws.String("String"), + ManageBerkshelf: aws.Bool(true), + }, + ConfigurationManager: &opsworks.StackConfigurationManager{ + Name: aws.String("String"), + Version: aws.String("String"), + }, + CustomCookbooksSource: &opsworks.Source{ + Password: aws.String("String"), + Revision: aws.String("String"), + SshKey: aws.String("String"), + Type: aws.String("SourceType"), + Url: aws.String("String"), + Username: aws.String("String"), + }, + CustomJson: aws.String("String"), + DefaultAvailabilityZone: aws.String("String"), + DefaultInstanceProfileArn: aws.String("String"), + DefaultOs: aws.String("String"), + DefaultRootDeviceType: aws.String("RootDeviceType"), + DefaultSshKeyName: aws.String("String"), + DefaultSubnetId: aws.String("String"), + HostnameTheme: aws.String("String"), + Name: aws.String("String"), + ServiceRoleArn: aws.String("String"), + UseCustomCookbooks: aws.Bool(true), + UseOpsworksSecurityGroups: aws.Bool(true), + } + resp, err := svc.UpdateStack(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_UpdateUserProfile() { + svc := opsworks.New(session.New()) + + params := &opsworks.UpdateUserProfileInput{ + IamUserArn: aws.String("String"), // Required + AllowSelfManagement: aws.Bool(true), + SshPublicKey: aws.String("String"), + SshUsername: aws.String("String"), + } + resp, err := svc.UpdateUserProfile(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_UpdateVolume() { + svc := opsworks.New(session.New()) + + params := &opsworks.UpdateVolumeInput{ + VolumeId: aws.String("String"), // Required + MountPoint: aws.String("String"), + Name: aws.String("String"), + } + resp, err := svc.UpdateVolume(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/opsworks/opsworksiface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/opsworks/opsworksiface/interface.go new file mode 100644 index 000000000..9e477674a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/opsworks/opsworksiface/interface.go @@ -0,0 +1,296 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package opsworksiface provides an interface for the AWS OpsWorks. +package opsworksiface + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/opsworks" +) + +// OpsWorksAPI is the interface type for opsworks.OpsWorks. +type OpsWorksAPI interface { + AssignInstanceRequest(*opsworks.AssignInstanceInput) (*request.Request, *opsworks.AssignInstanceOutput) + + AssignInstance(*opsworks.AssignInstanceInput) (*opsworks.AssignInstanceOutput, error) + + AssignVolumeRequest(*opsworks.AssignVolumeInput) (*request.Request, *opsworks.AssignVolumeOutput) + + AssignVolume(*opsworks.AssignVolumeInput) (*opsworks.AssignVolumeOutput, error) + + AssociateElasticIpRequest(*opsworks.AssociateElasticIpInput) (*request.Request, *opsworks.AssociateElasticIpOutput) + + AssociateElasticIp(*opsworks.AssociateElasticIpInput) (*opsworks.AssociateElasticIpOutput, error) + + AttachElasticLoadBalancerRequest(*opsworks.AttachElasticLoadBalancerInput) (*request.Request, *opsworks.AttachElasticLoadBalancerOutput) + + AttachElasticLoadBalancer(*opsworks.AttachElasticLoadBalancerInput) (*opsworks.AttachElasticLoadBalancerOutput, error) + + CloneStackRequest(*opsworks.CloneStackInput) (*request.Request, *opsworks.CloneStackOutput) + + CloneStack(*opsworks.CloneStackInput) (*opsworks.CloneStackOutput, error) + + CreateAppRequest(*opsworks.CreateAppInput) (*request.Request, *opsworks.CreateAppOutput) + + CreateApp(*opsworks.CreateAppInput) (*opsworks.CreateAppOutput, error) + + CreateDeploymentRequest(*opsworks.CreateDeploymentInput) (*request.Request, *opsworks.CreateDeploymentOutput) + + CreateDeployment(*opsworks.CreateDeploymentInput) (*opsworks.CreateDeploymentOutput, error) + + CreateInstanceRequest(*opsworks.CreateInstanceInput) (*request.Request, *opsworks.CreateInstanceOutput) + + CreateInstance(*opsworks.CreateInstanceInput) (*opsworks.CreateInstanceOutput, error) + + CreateLayerRequest(*opsworks.CreateLayerInput) (*request.Request, *opsworks.CreateLayerOutput) + + CreateLayer(*opsworks.CreateLayerInput) (*opsworks.CreateLayerOutput, error) + + CreateStackRequest(*opsworks.CreateStackInput) (*request.Request, *opsworks.CreateStackOutput) + + CreateStack(*opsworks.CreateStackInput) (*opsworks.CreateStackOutput, error) + + CreateUserProfileRequest(*opsworks.CreateUserProfileInput) (*request.Request, *opsworks.CreateUserProfileOutput) + + CreateUserProfile(*opsworks.CreateUserProfileInput) (*opsworks.CreateUserProfileOutput, error) + + DeleteAppRequest(*opsworks.DeleteAppInput) (*request.Request, *opsworks.DeleteAppOutput) + + DeleteApp(*opsworks.DeleteAppInput) (*opsworks.DeleteAppOutput, error) + + DeleteInstanceRequest(*opsworks.DeleteInstanceInput) (*request.Request, *opsworks.DeleteInstanceOutput) + + DeleteInstance(*opsworks.DeleteInstanceInput) (*opsworks.DeleteInstanceOutput, error) + + DeleteLayerRequest(*opsworks.DeleteLayerInput) (*request.Request, *opsworks.DeleteLayerOutput) + + DeleteLayer(*opsworks.DeleteLayerInput) (*opsworks.DeleteLayerOutput, error) + + DeleteStackRequest(*opsworks.DeleteStackInput) (*request.Request, *opsworks.DeleteStackOutput) + + DeleteStack(*opsworks.DeleteStackInput) (*opsworks.DeleteStackOutput, error) + + DeleteUserProfileRequest(*opsworks.DeleteUserProfileInput) (*request.Request, *opsworks.DeleteUserProfileOutput) + + DeleteUserProfile(*opsworks.DeleteUserProfileInput) (*opsworks.DeleteUserProfileOutput, error) + + DeregisterEcsClusterRequest(*opsworks.DeregisterEcsClusterInput) (*request.Request, *opsworks.DeregisterEcsClusterOutput) + + DeregisterEcsCluster(*opsworks.DeregisterEcsClusterInput) (*opsworks.DeregisterEcsClusterOutput, error) + + DeregisterElasticIpRequest(*opsworks.DeregisterElasticIpInput) (*request.Request, *opsworks.DeregisterElasticIpOutput) + + DeregisterElasticIp(*opsworks.DeregisterElasticIpInput) (*opsworks.DeregisterElasticIpOutput, error) + + DeregisterInstanceRequest(*opsworks.DeregisterInstanceInput) (*request.Request, *opsworks.DeregisterInstanceOutput) + + DeregisterInstance(*opsworks.DeregisterInstanceInput) (*opsworks.DeregisterInstanceOutput, error) + + DeregisterRdsDbInstanceRequest(*opsworks.DeregisterRdsDbInstanceInput) (*request.Request, *opsworks.DeregisterRdsDbInstanceOutput) + + DeregisterRdsDbInstance(*opsworks.DeregisterRdsDbInstanceInput) (*opsworks.DeregisterRdsDbInstanceOutput, error) + + DeregisterVolumeRequest(*opsworks.DeregisterVolumeInput) (*request.Request, *opsworks.DeregisterVolumeOutput) + + DeregisterVolume(*opsworks.DeregisterVolumeInput) (*opsworks.DeregisterVolumeOutput, error) + + DescribeAgentVersionsRequest(*opsworks.DescribeAgentVersionsInput) (*request.Request, *opsworks.DescribeAgentVersionsOutput) + + DescribeAgentVersions(*opsworks.DescribeAgentVersionsInput) (*opsworks.DescribeAgentVersionsOutput, error) + + DescribeAppsRequest(*opsworks.DescribeAppsInput) (*request.Request, *opsworks.DescribeAppsOutput) + + DescribeApps(*opsworks.DescribeAppsInput) (*opsworks.DescribeAppsOutput, error) + + DescribeCommandsRequest(*opsworks.DescribeCommandsInput) (*request.Request, *opsworks.DescribeCommandsOutput) + + DescribeCommands(*opsworks.DescribeCommandsInput) (*opsworks.DescribeCommandsOutput, error) + + DescribeDeploymentsRequest(*opsworks.DescribeDeploymentsInput) (*request.Request, *opsworks.DescribeDeploymentsOutput) + + DescribeDeployments(*opsworks.DescribeDeploymentsInput) (*opsworks.DescribeDeploymentsOutput, error) + + DescribeEcsClustersRequest(*opsworks.DescribeEcsClustersInput) (*request.Request, *opsworks.DescribeEcsClustersOutput) + + DescribeEcsClusters(*opsworks.DescribeEcsClustersInput) (*opsworks.DescribeEcsClustersOutput, error) + + DescribeEcsClustersPages(*opsworks.DescribeEcsClustersInput, func(*opsworks.DescribeEcsClustersOutput, bool) bool) error + + DescribeElasticIpsRequest(*opsworks.DescribeElasticIpsInput) (*request.Request, *opsworks.DescribeElasticIpsOutput) + + DescribeElasticIps(*opsworks.DescribeElasticIpsInput) (*opsworks.DescribeElasticIpsOutput, error) + + DescribeElasticLoadBalancersRequest(*opsworks.DescribeElasticLoadBalancersInput) (*request.Request, *opsworks.DescribeElasticLoadBalancersOutput) + + DescribeElasticLoadBalancers(*opsworks.DescribeElasticLoadBalancersInput) (*opsworks.DescribeElasticLoadBalancersOutput, error) + + DescribeInstancesRequest(*opsworks.DescribeInstancesInput) (*request.Request, *opsworks.DescribeInstancesOutput) + + DescribeInstances(*opsworks.DescribeInstancesInput) (*opsworks.DescribeInstancesOutput, error) + + DescribeLayersRequest(*opsworks.DescribeLayersInput) (*request.Request, *opsworks.DescribeLayersOutput) + + DescribeLayers(*opsworks.DescribeLayersInput) (*opsworks.DescribeLayersOutput, error) + + DescribeLoadBasedAutoScalingRequest(*opsworks.DescribeLoadBasedAutoScalingInput) (*request.Request, *opsworks.DescribeLoadBasedAutoScalingOutput) + + DescribeLoadBasedAutoScaling(*opsworks.DescribeLoadBasedAutoScalingInput) (*opsworks.DescribeLoadBasedAutoScalingOutput, error) + + DescribeMyUserProfileRequest(*opsworks.DescribeMyUserProfileInput) (*request.Request, *opsworks.DescribeMyUserProfileOutput) + + DescribeMyUserProfile(*opsworks.DescribeMyUserProfileInput) (*opsworks.DescribeMyUserProfileOutput, error) + + DescribePermissionsRequest(*opsworks.DescribePermissionsInput) (*request.Request, *opsworks.DescribePermissionsOutput) + + DescribePermissions(*opsworks.DescribePermissionsInput) (*opsworks.DescribePermissionsOutput, error) + + DescribeRaidArraysRequest(*opsworks.DescribeRaidArraysInput) (*request.Request, *opsworks.DescribeRaidArraysOutput) + + DescribeRaidArrays(*opsworks.DescribeRaidArraysInput) (*opsworks.DescribeRaidArraysOutput, error) + + DescribeRdsDbInstancesRequest(*opsworks.DescribeRdsDbInstancesInput) (*request.Request, *opsworks.DescribeRdsDbInstancesOutput) + + DescribeRdsDbInstances(*opsworks.DescribeRdsDbInstancesInput) (*opsworks.DescribeRdsDbInstancesOutput, error) + + DescribeServiceErrorsRequest(*opsworks.DescribeServiceErrorsInput) (*request.Request, *opsworks.DescribeServiceErrorsOutput) + + DescribeServiceErrors(*opsworks.DescribeServiceErrorsInput) (*opsworks.DescribeServiceErrorsOutput, error) + + DescribeStackProvisioningParametersRequest(*opsworks.DescribeStackProvisioningParametersInput) (*request.Request, *opsworks.DescribeStackProvisioningParametersOutput) + + DescribeStackProvisioningParameters(*opsworks.DescribeStackProvisioningParametersInput) (*opsworks.DescribeStackProvisioningParametersOutput, error) + + DescribeStackSummaryRequest(*opsworks.DescribeStackSummaryInput) (*request.Request, *opsworks.DescribeStackSummaryOutput) + + DescribeStackSummary(*opsworks.DescribeStackSummaryInput) (*opsworks.DescribeStackSummaryOutput, error) + + DescribeStacksRequest(*opsworks.DescribeStacksInput) (*request.Request, *opsworks.DescribeStacksOutput) + + DescribeStacks(*opsworks.DescribeStacksInput) (*opsworks.DescribeStacksOutput, error) + + DescribeTimeBasedAutoScalingRequest(*opsworks.DescribeTimeBasedAutoScalingInput) (*request.Request, *opsworks.DescribeTimeBasedAutoScalingOutput) + + DescribeTimeBasedAutoScaling(*opsworks.DescribeTimeBasedAutoScalingInput) (*opsworks.DescribeTimeBasedAutoScalingOutput, error) + + DescribeUserProfilesRequest(*opsworks.DescribeUserProfilesInput) (*request.Request, *opsworks.DescribeUserProfilesOutput) + + DescribeUserProfiles(*opsworks.DescribeUserProfilesInput) (*opsworks.DescribeUserProfilesOutput, error) + + DescribeVolumesRequest(*opsworks.DescribeVolumesInput) (*request.Request, *opsworks.DescribeVolumesOutput) + + DescribeVolumes(*opsworks.DescribeVolumesInput) (*opsworks.DescribeVolumesOutput, error) + + DetachElasticLoadBalancerRequest(*opsworks.DetachElasticLoadBalancerInput) (*request.Request, *opsworks.DetachElasticLoadBalancerOutput) + + DetachElasticLoadBalancer(*opsworks.DetachElasticLoadBalancerInput) (*opsworks.DetachElasticLoadBalancerOutput, error) + + DisassociateElasticIpRequest(*opsworks.DisassociateElasticIpInput) (*request.Request, *opsworks.DisassociateElasticIpOutput) + + DisassociateElasticIp(*opsworks.DisassociateElasticIpInput) (*opsworks.DisassociateElasticIpOutput, error) + + GetHostnameSuggestionRequest(*opsworks.GetHostnameSuggestionInput) (*request.Request, *opsworks.GetHostnameSuggestionOutput) + + GetHostnameSuggestion(*opsworks.GetHostnameSuggestionInput) (*opsworks.GetHostnameSuggestionOutput, error) + + GrantAccessRequest(*opsworks.GrantAccessInput) (*request.Request, *opsworks.GrantAccessOutput) + + GrantAccess(*opsworks.GrantAccessInput) (*opsworks.GrantAccessOutput, error) + + RebootInstanceRequest(*opsworks.RebootInstanceInput) (*request.Request, *opsworks.RebootInstanceOutput) + + RebootInstance(*opsworks.RebootInstanceInput) (*opsworks.RebootInstanceOutput, error) + + RegisterEcsClusterRequest(*opsworks.RegisterEcsClusterInput) (*request.Request, *opsworks.RegisterEcsClusterOutput) + + RegisterEcsCluster(*opsworks.RegisterEcsClusterInput) (*opsworks.RegisterEcsClusterOutput, error) + + RegisterElasticIpRequest(*opsworks.RegisterElasticIpInput) (*request.Request, *opsworks.RegisterElasticIpOutput) + + RegisterElasticIp(*opsworks.RegisterElasticIpInput) (*opsworks.RegisterElasticIpOutput, error) + + RegisterInstanceRequest(*opsworks.RegisterInstanceInput) (*request.Request, *opsworks.RegisterInstanceOutput) + + RegisterInstance(*opsworks.RegisterInstanceInput) (*opsworks.RegisterInstanceOutput, error) + + RegisterRdsDbInstanceRequest(*opsworks.RegisterRdsDbInstanceInput) (*request.Request, *opsworks.RegisterRdsDbInstanceOutput) + + RegisterRdsDbInstance(*opsworks.RegisterRdsDbInstanceInput) (*opsworks.RegisterRdsDbInstanceOutput, error) + + RegisterVolumeRequest(*opsworks.RegisterVolumeInput) (*request.Request, *opsworks.RegisterVolumeOutput) + + RegisterVolume(*opsworks.RegisterVolumeInput) (*opsworks.RegisterVolumeOutput, error) + + SetLoadBasedAutoScalingRequest(*opsworks.SetLoadBasedAutoScalingInput) (*request.Request, *opsworks.SetLoadBasedAutoScalingOutput) + + SetLoadBasedAutoScaling(*opsworks.SetLoadBasedAutoScalingInput) (*opsworks.SetLoadBasedAutoScalingOutput, error) + + SetPermissionRequest(*opsworks.SetPermissionInput) (*request.Request, *opsworks.SetPermissionOutput) + + SetPermission(*opsworks.SetPermissionInput) (*opsworks.SetPermissionOutput, error) + + SetTimeBasedAutoScalingRequest(*opsworks.SetTimeBasedAutoScalingInput) (*request.Request, *opsworks.SetTimeBasedAutoScalingOutput) + + SetTimeBasedAutoScaling(*opsworks.SetTimeBasedAutoScalingInput) (*opsworks.SetTimeBasedAutoScalingOutput, error) + + StartInstanceRequest(*opsworks.StartInstanceInput) (*request.Request, *opsworks.StartInstanceOutput) + + StartInstance(*opsworks.StartInstanceInput) (*opsworks.StartInstanceOutput, error) + + StartStackRequest(*opsworks.StartStackInput) (*request.Request, *opsworks.StartStackOutput) + + StartStack(*opsworks.StartStackInput) (*opsworks.StartStackOutput, error) + + StopInstanceRequest(*opsworks.StopInstanceInput) (*request.Request, *opsworks.StopInstanceOutput) + + StopInstance(*opsworks.StopInstanceInput) (*opsworks.StopInstanceOutput, error) + + StopStackRequest(*opsworks.StopStackInput) (*request.Request, *opsworks.StopStackOutput) + + StopStack(*opsworks.StopStackInput) (*opsworks.StopStackOutput, error) + + UnassignInstanceRequest(*opsworks.UnassignInstanceInput) (*request.Request, *opsworks.UnassignInstanceOutput) + + UnassignInstance(*opsworks.UnassignInstanceInput) (*opsworks.UnassignInstanceOutput, error) + + UnassignVolumeRequest(*opsworks.UnassignVolumeInput) (*request.Request, *opsworks.UnassignVolumeOutput) + + UnassignVolume(*opsworks.UnassignVolumeInput) (*opsworks.UnassignVolumeOutput, error) + + UpdateAppRequest(*opsworks.UpdateAppInput) (*request.Request, *opsworks.UpdateAppOutput) + + UpdateApp(*opsworks.UpdateAppInput) (*opsworks.UpdateAppOutput, error) + + UpdateElasticIpRequest(*opsworks.UpdateElasticIpInput) (*request.Request, *opsworks.UpdateElasticIpOutput) + + UpdateElasticIp(*opsworks.UpdateElasticIpInput) (*opsworks.UpdateElasticIpOutput, error) + + UpdateInstanceRequest(*opsworks.UpdateInstanceInput) (*request.Request, *opsworks.UpdateInstanceOutput) + + UpdateInstance(*opsworks.UpdateInstanceInput) (*opsworks.UpdateInstanceOutput, error) + + UpdateLayerRequest(*opsworks.UpdateLayerInput) (*request.Request, *opsworks.UpdateLayerOutput) + + UpdateLayer(*opsworks.UpdateLayerInput) (*opsworks.UpdateLayerOutput, error) + + UpdateMyUserProfileRequest(*opsworks.UpdateMyUserProfileInput) (*request.Request, *opsworks.UpdateMyUserProfileOutput) + + UpdateMyUserProfile(*opsworks.UpdateMyUserProfileInput) (*opsworks.UpdateMyUserProfileOutput, error) + + UpdateRdsDbInstanceRequest(*opsworks.UpdateRdsDbInstanceInput) (*request.Request, *opsworks.UpdateRdsDbInstanceOutput) + + UpdateRdsDbInstance(*opsworks.UpdateRdsDbInstanceInput) (*opsworks.UpdateRdsDbInstanceOutput, error) + + UpdateStackRequest(*opsworks.UpdateStackInput) (*request.Request, *opsworks.UpdateStackOutput) + + UpdateStack(*opsworks.UpdateStackInput) (*opsworks.UpdateStackOutput, error) + + UpdateUserProfileRequest(*opsworks.UpdateUserProfileInput) (*request.Request, *opsworks.UpdateUserProfileOutput) + + UpdateUserProfile(*opsworks.UpdateUserProfileInput) (*opsworks.UpdateUserProfileOutput, error) + + UpdateVolumeRequest(*opsworks.UpdateVolumeInput) (*request.Request, *opsworks.UpdateVolumeOutput) + + UpdateVolume(*opsworks.UpdateVolumeInput) (*opsworks.UpdateVolumeOutput, error) +} + +var _ OpsWorksAPI = (*opsworks.OpsWorks)(nil) diff --git a/vendor/github.com/aws/aws-sdk-go/service/opsworks/service.go b/vendor/github.com/aws/aws-sdk-go/service/opsworks/service.go new file mode 100644 index 000000000..de8c77962 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/opsworks/service.go @@ -0,0 +1,135 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package opsworks + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" +) + +// Welcome to the AWS OpsWorks API Reference. This guide provides descriptions, +// syntax, and usage examples for AWS OpsWorks actions and data types, including +// common parameters and error codes. +// +// AWS OpsWorks is an application management service that provides an integrated +// experience for overseeing the complete application lifecycle. For information +// about this product, go to the AWS OpsWorks (http://aws.amazon.com/opsworks/) +// details page. +// +// SDKs and CLI +// +// The most common way to use the AWS OpsWorks API is by using the AWS Command +// Line Interface (CLI) or by using one of the AWS SDKs to implement applications +// in your preferred language. For more information, see: +// +// AWS CLI (http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-welcome.html) +// +// AWS SDK for Java (http://docs.aws.amazon.com/AWSJavaSDK/latest/javadoc/com/amazonaws/services/opsworks/AWSOpsWorksClient.html) +// +// AWS SDK for .NET (http://docs.aws.amazon.com/sdkfornet/latest/apidocs/html/N_Amazon_OpsWorks.htm) +// +// AWS SDK for PHP 2 (http://docs.aws.amazon.com/aws-sdk-php-2/latest/class-Aws.OpsWorks.OpsWorksClient.html) +// +// AWS SDK for Ruby (http://docs.aws.amazon.com/sdkforruby/api/) +// +// AWS SDK for Node.js (http://aws.amazon.com/documentation/sdkforjavascript/) +// +// AWS SDK for Python(Boto) (http://docs.pythonboto.org/en/latest/ref/opsworks.html) +// +// Endpoints +// +// AWS OpsWorks supports two endpoints, opsworks.us-east-1.amazonaws.com and +// opsworks.ap-south-1.amazonaws.com (both HTTPS). You must connect to one of +// those two endpoints. You can then use the API to direct AWS OpsWorks to create +// stacks in any AWS region. Stacks created in all regions except ap-south-1 +// are connected to the us-east-1 regional endpoint; stacks created in ap-south-1 +// are associated with the ap-south-1 regional endpoint, and can only be accessed +// or managed within that endpoint. +// +// Chef Versions +// +// When you call CreateStack, CloneStack, or UpdateStack we recommend you use +// the ConfigurationManager parameter to specify the Chef version. The recommended +// and default value for Linux stacks is currently 12. Windows stacks use Chef +// 12.2. For more information, see Chef Versions (http://docs.aws.amazon.com/opsworks/latest/userguide/workingcookbook-chef11.html). +// +// You can specify Chef 12, 11.10, or 11.4 for your Linux stack. We recommend +// migrating your existing Linux stacks to Chef 12 as soon as possible. +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OpsWorks struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "opsworks" + +// New creates a new instance of the OpsWorks client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OpsWorks client from just a session. +// svc := opsworks.New(mySession) +// +// // Create a OpsWorks client with additional configuration +// svc := opsworks.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *OpsWorks { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OpsWorks { + svc := &OpsWorks{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2013-02-18", + JSONVersion: "1.1", + TargetPrefix: "OpsWorks_20130218", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(jsonrpc.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a OpsWorks operation and runs any +// custom request initialization. +func (c *OpsWorks) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/opsworks/waiters.go b/vendor/github.com/aws/aws-sdk-go/service/opsworks/waiters.go new file mode 100644 index 000000000..9aa531992 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/opsworks/waiters.go @@ -0,0 +1,355 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package opsworks + +import ( + "github.com/aws/aws-sdk-go/private/waiter" +) + +func (c *OpsWorks) WaitUntilAppExists(input *DescribeAppsInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeApps", + Delay: 1, + MaxAttempts: 40, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "status", + Argument: "", + Expected: 200, + }, + { + State: "failure", + Matcher: "status", + Argument: "", + Expected: 400, + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *OpsWorks) WaitUntilDeploymentSuccessful(input *DescribeDeploymentsInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeDeployments", + Delay: 15, + MaxAttempts: 40, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "pathAll", + Argument: "Deployments[].Status", + Expected: "successful", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Deployments[].Status", + Expected: "failed", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *OpsWorks) WaitUntilInstanceOnline(input *DescribeInstancesInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeInstances", + Delay: 15, + MaxAttempts: 40, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "pathAll", + Argument: "Instances[].Status", + Expected: "online", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Instances[].Status", + Expected: "setup_failed", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Instances[].Status", + Expected: "shutting_down", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Instances[].Status", + Expected: "start_failed", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Instances[].Status", + Expected: "stopped", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Instances[].Status", + Expected: "stopping", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Instances[].Status", + Expected: "terminating", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Instances[].Status", + Expected: "terminated", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Instances[].Status", + Expected: "stop_failed", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *OpsWorks) WaitUntilInstanceRegistered(input *DescribeInstancesInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeInstances", + Delay: 15, + MaxAttempts: 40, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "pathAll", + Argument: "Instances[].Status", + Expected: "registered", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Instances[].Status", + Expected: "setup_failed", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Instances[].Status", + Expected: "shutting_down", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Instances[].Status", + Expected: "stopped", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Instances[].Status", + Expected: "stopping", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Instances[].Status", + Expected: "terminating", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Instances[].Status", + Expected: "terminated", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Instances[].Status", + Expected: "stop_failed", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *OpsWorks) WaitUntilInstanceStopped(input *DescribeInstancesInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeInstances", + Delay: 15, + MaxAttempts: 40, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "pathAll", + Argument: "Instances[].Status", + Expected: "stopped", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Instances[].Status", + Expected: "booting", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Instances[].Status", + Expected: "online", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Instances[].Status", + Expected: "pending", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Instances[].Status", + Expected: "rebooting", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Instances[].Status", + Expected: "requested", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Instances[].Status", + Expected: "running_setup", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Instances[].Status", + Expected: "setup_failed", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Instances[].Status", + Expected: "start_failed", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Instances[].Status", + Expected: "stop_failed", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *OpsWorks) WaitUntilInstanceTerminated(input *DescribeInstancesInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeInstances", + Delay: 15, + MaxAttempts: 40, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "pathAll", + Argument: "Instances[].Status", + Expected: "terminated", + }, + { + State: "success", + Matcher: "error", + Argument: "", + Expected: "ResourceNotFoundException", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Instances[].Status", + Expected: "booting", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Instances[].Status", + Expected: "online", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Instances[].Status", + Expected: "pending", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Instances[].Status", + Expected: "rebooting", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Instances[].Status", + Expected: "requested", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Instances[].Status", + Expected: "running_setup", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Instances[].Status", + Expected: "setup_failed", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Instances[].Status", + Expected: "start_failed", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/rds/api.go b/vendor/github.com/aws/aws-sdk-go/service/rds/api.go new file mode 100644 index 000000000..89af7fd3d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/rds/api.go @@ -0,0 +1,15249 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package rds provides a client for Amazon Relational Database Service. +package rds + +import ( + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/query" +) + +const opAddSourceIdentifierToSubscription = "AddSourceIdentifierToSubscription" + +// AddSourceIdentifierToSubscriptionRequest generates a "aws/request.Request" representing the +// client's request for the AddSourceIdentifierToSubscription operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the AddSourceIdentifierToSubscription method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the AddSourceIdentifierToSubscriptionRequest method. +// req, resp := client.AddSourceIdentifierToSubscriptionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *RDS) AddSourceIdentifierToSubscriptionRequest(input *AddSourceIdentifierToSubscriptionInput) (req *request.Request, output *AddSourceIdentifierToSubscriptionOutput) { + op := &request.Operation{ + Name: opAddSourceIdentifierToSubscription, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AddSourceIdentifierToSubscriptionInput{} + } + + req = c.newRequest(op, input, output) + output = &AddSourceIdentifierToSubscriptionOutput{} + req.Data = output + return +} + +// Adds a source identifier to an existing RDS event notification subscription. +func (c *RDS) AddSourceIdentifierToSubscription(input *AddSourceIdentifierToSubscriptionInput) (*AddSourceIdentifierToSubscriptionOutput, error) { + req, out := c.AddSourceIdentifierToSubscriptionRequest(input) + err := req.Send() + return out, err +} + +const opAddTagsToResource = "AddTagsToResource" + +// AddTagsToResourceRequest generates a "aws/request.Request" representing the +// client's request for the AddTagsToResource operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the AddTagsToResource method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the AddTagsToResourceRequest method. +// req, resp := client.AddTagsToResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *RDS) AddTagsToResourceRequest(input *AddTagsToResourceInput) (req *request.Request, output *AddTagsToResourceOutput) { + op := &request.Operation{ + Name: opAddTagsToResource, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AddTagsToResourceInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &AddTagsToResourceOutput{} + req.Data = output + return +} + +// Adds metadata tags to an Amazon RDS resource. These tags can also be used +// with cost allocation reporting to track cost associated with Amazon RDS resources, +// or used in a Condition statement in an IAM policy for Amazon RDS. +// +// For an overview on tagging Amazon RDS resources, see Tagging Amazon RDS +// Resources (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Overview.Tagging.html). +func (c *RDS) AddTagsToResource(input *AddTagsToResourceInput) (*AddTagsToResourceOutput, error) { + req, out := c.AddTagsToResourceRequest(input) + err := req.Send() + return out, err +} + +const opApplyPendingMaintenanceAction = "ApplyPendingMaintenanceAction" + +// ApplyPendingMaintenanceActionRequest generates a "aws/request.Request" representing the +// client's request for the ApplyPendingMaintenanceAction operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ApplyPendingMaintenanceAction method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ApplyPendingMaintenanceActionRequest method. +// req, resp := client.ApplyPendingMaintenanceActionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *RDS) ApplyPendingMaintenanceActionRequest(input *ApplyPendingMaintenanceActionInput) (req *request.Request, output *ApplyPendingMaintenanceActionOutput) { + op := &request.Operation{ + Name: opApplyPendingMaintenanceAction, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ApplyPendingMaintenanceActionInput{} + } + + req = c.newRequest(op, input, output) + output = &ApplyPendingMaintenanceActionOutput{} + req.Data = output + return +} + +// Applies a pending maintenance action to a resource (for example, to a DB +// instance). +func (c *RDS) ApplyPendingMaintenanceAction(input *ApplyPendingMaintenanceActionInput) (*ApplyPendingMaintenanceActionOutput, error) { + req, out := c.ApplyPendingMaintenanceActionRequest(input) + err := req.Send() + return out, err +} + +const opAuthorizeDBSecurityGroupIngress = "AuthorizeDBSecurityGroupIngress" + +// AuthorizeDBSecurityGroupIngressRequest generates a "aws/request.Request" representing the +// client's request for the AuthorizeDBSecurityGroupIngress operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the AuthorizeDBSecurityGroupIngress method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the AuthorizeDBSecurityGroupIngressRequest method. +// req, resp := client.AuthorizeDBSecurityGroupIngressRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *RDS) AuthorizeDBSecurityGroupIngressRequest(input *AuthorizeDBSecurityGroupIngressInput) (req *request.Request, output *AuthorizeDBSecurityGroupIngressOutput) { + op := &request.Operation{ + Name: opAuthorizeDBSecurityGroupIngress, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AuthorizeDBSecurityGroupIngressInput{} + } + + req = c.newRequest(op, input, output) + output = &AuthorizeDBSecurityGroupIngressOutput{} + req.Data = output + return +} + +// Enables ingress to a DBSecurityGroup using one of two forms of authorization. +// First, EC2 or VPC security groups can be added to the DBSecurityGroup if +// the application using the database is running on EC2 or VPC instances. Second, +// IP ranges are available if the application accessing your database is running +// on the Internet. Required parameters for this API are one of CIDR range, +// EC2SecurityGroupId for VPC, or (EC2SecurityGroupOwnerId and either EC2SecurityGroupName +// or EC2SecurityGroupId for non-VPC). +// +// You cannot authorize ingress from an EC2 security group in one region to +// an Amazon RDS DB instance in another. You cannot authorize ingress from a +// VPC security group in one VPC to an Amazon RDS DB instance in another. +// +// For an overview of CIDR ranges, go to the Wikipedia Tutorial (http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing). +func (c *RDS) AuthorizeDBSecurityGroupIngress(input *AuthorizeDBSecurityGroupIngressInput) (*AuthorizeDBSecurityGroupIngressOutput, error) { + req, out := c.AuthorizeDBSecurityGroupIngressRequest(input) + err := req.Send() + return out, err +} + +const opCopyDBClusterSnapshot = "CopyDBClusterSnapshot" + +// CopyDBClusterSnapshotRequest generates a "aws/request.Request" representing the +// client's request for the CopyDBClusterSnapshot operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CopyDBClusterSnapshot method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CopyDBClusterSnapshotRequest method. +// req, resp := client.CopyDBClusterSnapshotRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *RDS) CopyDBClusterSnapshotRequest(input *CopyDBClusterSnapshotInput) (req *request.Request, output *CopyDBClusterSnapshotOutput) { + op := &request.Operation{ + Name: opCopyDBClusterSnapshot, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CopyDBClusterSnapshotInput{} + } + + req = c.newRequest(op, input, output) + output = &CopyDBClusterSnapshotOutput{} + req.Data = output + return +} + +// Creates a snapshot of a DB cluster. For more information on Amazon Aurora, +// see Aurora on Amazon RDS (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_Aurora.html) +// in the Amazon RDS User Guide. +func (c *RDS) CopyDBClusterSnapshot(input *CopyDBClusterSnapshotInput) (*CopyDBClusterSnapshotOutput, error) { + req, out := c.CopyDBClusterSnapshotRequest(input) + err := req.Send() + return out, err +} + +const opCopyDBParameterGroup = "CopyDBParameterGroup" + +// CopyDBParameterGroupRequest generates a "aws/request.Request" representing the +// client's request for the CopyDBParameterGroup operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CopyDBParameterGroup method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CopyDBParameterGroupRequest method. +// req, resp := client.CopyDBParameterGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *RDS) CopyDBParameterGroupRequest(input *CopyDBParameterGroupInput) (req *request.Request, output *CopyDBParameterGroupOutput) { + op := &request.Operation{ + Name: opCopyDBParameterGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CopyDBParameterGroupInput{} + } + + req = c.newRequest(op, input, output) + output = &CopyDBParameterGroupOutput{} + req.Data = output + return +} + +// Copies the specified DB parameter group. +func (c *RDS) CopyDBParameterGroup(input *CopyDBParameterGroupInput) (*CopyDBParameterGroupOutput, error) { + req, out := c.CopyDBParameterGroupRequest(input) + err := req.Send() + return out, err +} + +const opCopyDBSnapshot = "CopyDBSnapshot" + +// CopyDBSnapshotRequest generates a "aws/request.Request" representing the +// client's request for the CopyDBSnapshot operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CopyDBSnapshot method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CopyDBSnapshotRequest method. +// req, resp := client.CopyDBSnapshotRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *RDS) CopyDBSnapshotRequest(input *CopyDBSnapshotInput) (req *request.Request, output *CopyDBSnapshotOutput) { + op := &request.Operation{ + Name: opCopyDBSnapshot, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CopyDBSnapshotInput{} + } + + req = c.newRequest(op, input, output) + output = &CopyDBSnapshotOutput{} + req.Data = output + return +} + +// Copies the specified DB snapshot. The source DB snapshot must be in the "available" +// state. +// +// If you are copying from a shared manual DB snapshot, the SourceDBSnapshotIdentifier +// must be the ARN of the shared DB snapshot. +func (c *RDS) CopyDBSnapshot(input *CopyDBSnapshotInput) (*CopyDBSnapshotOutput, error) { + req, out := c.CopyDBSnapshotRequest(input) + err := req.Send() + return out, err +} + +const opCopyOptionGroup = "CopyOptionGroup" + +// CopyOptionGroupRequest generates a "aws/request.Request" representing the +// client's request for the CopyOptionGroup operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CopyOptionGroup method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CopyOptionGroupRequest method. +// req, resp := client.CopyOptionGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *RDS) CopyOptionGroupRequest(input *CopyOptionGroupInput) (req *request.Request, output *CopyOptionGroupOutput) { + op := &request.Operation{ + Name: opCopyOptionGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CopyOptionGroupInput{} + } + + req = c.newRequest(op, input, output) + output = &CopyOptionGroupOutput{} + req.Data = output + return +} + +// Copies the specified option group. +func (c *RDS) CopyOptionGroup(input *CopyOptionGroupInput) (*CopyOptionGroupOutput, error) { + req, out := c.CopyOptionGroupRequest(input) + err := req.Send() + return out, err +} + +const opCreateDBCluster = "CreateDBCluster" + +// CreateDBClusterRequest generates a "aws/request.Request" representing the +// client's request for the CreateDBCluster operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateDBCluster method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateDBClusterRequest method. +// req, resp := client.CreateDBClusterRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *RDS) CreateDBClusterRequest(input *CreateDBClusterInput) (req *request.Request, output *CreateDBClusterOutput) { + op := &request.Operation{ + Name: opCreateDBCluster, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateDBClusterInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateDBClusterOutput{} + req.Data = output + return +} + +// Creates a new Amazon Aurora DB cluster. +// +// You can use the ReplicationSourceIdentifier parameter to create the DB cluster +// as a Read Replica of another DB cluster. +// +// For more information on Amazon Aurora, see Aurora on Amazon RDS (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_Aurora.html) +// in the Amazon RDS User Guide. +func (c *RDS) CreateDBCluster(input *CreateDBClusterInput) (*CreateDBClusterOutput, error) { + req, out := c.CreateDBClusterRequest(input) + err := req.Send() + return out, err +} + +const opCreateDBClusterParameterGroup = "CreateDBClusterParameterGroup" + +// CreateDBClusterParameterGroupRequest generates a "aws/request.Request" representing the +// client's request for the CreateDBClusterParameterGroup operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateDBClusterParameterGroup method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateDBClusterParameterGroupRequest method. +// req, resp := client.CreateDBClusterParameterGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *RDS) CreateDBClusterParameterGroupRequest(input *CreateDBClusterParameterGroupInput) (req *request.Request, output *CreateDBClusterParameterGroupOutput) { + op := &request.Operation{ + Name: opCreateDBClusterParameterGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateDBClusterParameterGroupInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateDBClusterParameterGroupOutput{} + req.Data = output + return +} + +// Creates a new DB cluster parameter group. +// +// Parameters in a DB cluster parameter group apply to all of the instances +// in a DB cluster. +// +// A DB cluster parameter group is initially created with the default parameters +// for the database engine used by instances in the DB cluster. To provide custom +// values for any of the parameters, you must modify the group after creating +// it using ModifyDBClusterParameterGroup. Once you've created a DB cluster +// parameter group, you need to associate it with your DB cluster using ModifyDBCluster. +// When you associate a new DB cluster parameter group with a running DB cluster, +// you need to reboot the DB instances in the DB cluster without failover for +// the new DB cluster parameter group and associated settings to take effect. +// +// After you create a DB cluster parameter group, you should wait at least +// 5 minutes before creating your first DB cluster that uses that DB cluster +// parameter group as the default parameter group. This allows Amazon RDS to +// fully complete the create action before the DB cluster parameter group is +// used as the default for a new DB cluster. This is especially important for +// parameters that are critical when creating the default database for a DB +// cluster, such as the character set for the default database defined by the +// character_set_database parameter. You can use the Parameter Groups option +// of the Amazon RDS console (https://console.aws.amazon.com/rds/) or the DescribeDBClusterParameters +// command to verify that your DB cluster parameter group has been created or +// modified. +// +// For more information on Amazon Aurora, see Aurora on Amazon RDS (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_Aurora.html) +// in the Amazon RDS User Guide. +func (c *RDS) CreateDBClusterParameterGroup(input *CreateDBClusterParameterGroupInput) (*CreateDBClusterParameterGroupOutput, error) { + req, out := c.CreateDBClusterParameterGroupRequest(input) + err := req.Send() + return out, err +} + +const opCreateDBClusterSnapshot = "CreateDBClusterSnapshot" + +// CreateDBClusterSnapshotRequest generates a "aws/request.Request" representing the +// client's request for the CreateDBClusterSnapshot operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateDBClusterSnapshot method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateDBClusterSnapshotRequest method. +// req, resp := client.CreateDBClusterSnapshotRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *RDS) CreateDBClusterSnapshotRequest(input *CreateDBClusterSnapshotInput) (req *request.Request, output *CreateDBClusterSnapshotOutput) { + op := &request.Operation{ + Name: opCreateDBClusterSnapshot, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateDBClusterSnapshotInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateDBClusterSnapshotOutput{} + req.Data = output + return +} + +// Creates a snapshot of a DB cluster. For more information on Amazon Aurora, +// see Aurora on Amazon RDS (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_Aurora.html) +// in the Amazon RDS User Guide. +func (c *RDS) CreateDBClusterSnapshot(input *CreateDBClusterSnapshotInput) (*CreateDBClusterSnapshotOutput, error) { + req, out := c.CreateDBClusterSnapshotRequest(input) + err := req.Send() + return out, err +} + +const opCreateDBInstance = "CreateDBInstance" + +// CreateDBInstanceRequest generates a "aws/request.Request" representing the +// client's request for the CreateDBInstance operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateDBInstance method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateDBInstanceRequest method. +// req, resp := client.CreateDBInstanceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *RDS) CreateDBInstanceRequest(input *CreateDBInstanceInput) (req *request.Request, output *CreateDBInstanceOutput) { + op := &request.Operation{ + Name: opCreateDBInstance, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateDBInstanceInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateDBInstanceOutput{} + req.Data = output + return +} + +// Creates a new DB instance. +func (c *RDS) CreateDBInstance(input *CreateDBInstanceInput) (*CreateDBInstanceOutput, error) { + req, out := c.CreateDBInstanceRequest(input) + err := req.Send() + return out, err +} + +const opCreateDBInstanceReadReplica = "CreateDBInstanceReadReplica" + +// CreateDBInstanceReadReplicaRequest generates a "aws/request.Request" representing the +// client's request for the CreateDBInstanceReadReplica operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateDBInstanceReadReplica method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateDBInstanceReadReplicaRequest method. +// req, resp := client.CreateDBInstanceReadReplicaRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *RDS) CreateDBInstanceReadReplicaRequest(input *CreateDBInstanceReadReplicaInput) (req *request.Request, output *CreateDBInstanceReadReplicaOutput) { + op := &request.Operation{ + Name: opCreateDBInstanceReadReplica, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateDBInstanceReadReplicaInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateDBInstanceReadReplicaOutput{} + req.Data = output + return +} + +// Creates a DB instance for a DB instance running MySQL, MariaDB, or PostgreSQL +// that acts as a Read Replica of a source DB instance. +// +// All Read Replica DB instances are created as Single-AZ deployments with +// backups disabled. All other DB instance attributes (including DB security +// groups and DB parameter groups) are inherited from the source DB instance, +// except as specified below. +// +// The source DB instance must have backup retention enabled. +func (c *RDS) CreateDBInstanceReadReplica(input *CreateDBInstanceReadReplicaInput) (*CreateDBInstanceReadReplicaOutput, error) { + req, out := c.CreateDBInstanceReadReplicaRequest(input) + err := req.Send() + return out, err +} + +const opCreateDBParameterGroup = "CreateDBParameterGroup" + +// CreateDBParameterGroupRequest generates a "aws/request.Request" representing the +// client's request for the CreateDBParameterGroup operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateDBParameterGroup method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateDBParameterGroupRequest method. +// req, resp := client.CreateDBParameterGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *RDS) CreateDBParameterGroupRequest(input *CreateDBParameterGroupInput) (req *request.Request, output *CreateDBParameterGroupOutput) { + op := &request.Operation{ + Name: opCreateDBParameterGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateDBParameterGroupInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateDBParameterGroupOutput{} + req.Data = output + return +} + +// Creates a new DB parameter group. +// +// A DB parameter group is initially created with the default parameters for +// the database engine used by the DB instance. To provide custom values for +// any of the parameters, you must modify the group after creating it using +// ModifyDBParameterGroup. Once you've created a DB parameter group, you need +// to associate it with your DB instance using ModifyDBInstance. When you associate +// a new DB parameter group with a running DB instance, you need to reboot the +// DB instance without failover for the new DB parameter group and associated +// settings to take effect. +// +// After you create a DB parameter group, you should wait at least 5 minutes +// before creating your first DB instance that uses that DB parameter group +// as the default parameter group. This allows Amazon RDS to fully complete +// the create action before the parameter group is used as the default for a +// new DB instance. This is especially important for parameters that are critical +// when creating the default database for a DB instance, such as the character +// set for the default database defined by the character_set_database parameter. +// You can use the Parameter Groups option of the Amazon RDS console (https://console.aws.amazon.com/rds/) +// or the DescribeDBParameters command to verify that your DB parameter group +// has been created or modified. +func (c *RDS) CreateDBParameterGroup(input *CreateDBParameterGroupInput) (*CreateDBParameterGroupOutput, error) { + req, out := c.CreateDBParameterGroupRequest(input) + err := req.Send() + return out, err +} + +const opCreateDBSecurityGroup = "CreateDBSecurityGroup" + +// CreateDBSecurityGroupRequest generates a "aws/request.Request" representing the +// client's request for the CreateDBSecurityGroup operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateDBSecurityGroup method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateDBSecurityGroupRequest method. +// req, resp := client.CreateDBSecurityGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *RDS) CreateDBSecurityGroupRequest(input *CreateDBSecurityGroupInput) (req *request.Request, output *CreateDBSecurityGroupOutput) { + op := &request.Operation{ + Name: opCreateDBSecurityGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateDBSecurityGroupInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateDBSecurityGroupOutput{} + req.Data = output + return +} + +// Creates a new DB security group. DB security groups control access to a DB +// instance. +func (c *RDS) CreateDBSecurityGroup(input *CreateDBSecurityGroupInput) (*CreateDBSecurityGroupOutput, error) { + req, out := c.CreateDBSecurityGroupRequest(input) + err := req.Send() + return out, err +} + +const opCreateDBSnapshot = "CreateDBSnapshot" + +// CreateDBSnapshotRequest generates a "aws/request.Request" representing the +// client's request for the CreateDBSnapshot operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateDBSnapshot method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateDBSnapshotRequest method. +// req, resp := client.CreateDBSnapshotRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *RDS) CreateDBSnapshotRequest(input *CreateDBSnapshotInput) (req *request.Request, output *CreateDBSnapshotOutput) { + op := &request.Operation{ + Name: opCreateDBSnapshot, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateDBSnapshotInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateDBSnapshotOutput{} + req.Data = output + return +} + +// Creates a DBSnapshot. The source DBInstance must be in "available" state. +func (c *RDS) CreateDBSnapshot(input *CreateDBSnapshotInput) (*CreateDBSnapshotOutput, error) { + req, out := c.CreateDBSnapshotRequest(input) + err := req.Send() + return out, err +} + +const opCreateDBSubnetGroup = "CreateDBSubnetGroup" + +// CreateDBSubnetGroupRequest generates a "aws/request.Request" representing the +// client's request for the CreateDBSubnetGroup operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateDBSubnetGroup method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateDBSubnetGroupRequest method. +// req, resp := client.CreateDBSubnetGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *RDS) CreateDBSubnetGroupRequest(input *CreateDBSubnetGroupInput) (req *request.Request, output *CreateDBSubnetGroupOutput) { + op := &request.Operation{ + Name: opCreateDBSubnetGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateDBSubnetGroupInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateDBSubnetGroupOutput{} + req.Data = output + return +} + +// Creates a new DB subnet group. DB subnet groups must contain at least one +// subnet in at least two AZs in the region. +func (c *RDS) CreateDBSubnetGroup(input *CreateDBSubnetGroupInput) (*CreateDBSubnetGroupOutput, error) { + req, out := c.CreateDBSubnetGroupRequest(input) + err := req.Send() + return out, err +} + +const opCreateEventSubscription = "CreateEventSubscription" + +// CreateEventSubscriptionRequest generates a "aws/request.Request" representing the +// client's request for the CreateEventSubscription operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateEventSubscription method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateEventSubscriptionRequest method. +// req, resp := client.CreateEventSubscriptionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *RDS) CreateEventSubscriptionRequest(input *CreateEventSubscriptionInput) (req *request.Request, output *CreateEventSubscriptionOutput) { + op := &request.Operation{ + Name: opCreateEventSubscription, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateEventSubscriptionInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateEventSubscriptionOutput{} + req.Data = output + return +} + +// Creates an RDS event notification subscription. This action requires a topic +// ARN (Amazon Resource Name) created by either the RDS console, the SNS console, +// or the SNS API. To obtain an ARN with SNS, you must create a topic in Amazon +// SNS and subscribe to the topic. The ARN is displayed in the SNS console. +// +// You can specify the type of source (SourceType) you want to be notified +// of, provide a list of RDS sources (SourceIds) that triggers the events, and +// provide a list of event categories (EventCategories) for events you want +// to be notified of. For example, you can specify SourceType = db-instance, +// SourceIds = mydbinstance1, mydbinstance2 and EventCategories = Availability, +// Backup. +// +// If you specify both the SourceType and SourceIds, such as SourceType = db-instance +// and SourceIdentifier = myDBInstance1, you will be notified of all the db-instance +// events for the specified source. If you specify a SourceType but do not specify +// a SourceIdentifier, you will receive notice of the events for that source +// type for all your RDS sources. If you do not specify either the SourceType +// nor the SourceIdentifier, you will be notified of events generated from all +// RDS sources belonging to your customer account. +func (c *RDS) CreateEventSubscription(input *CreateEventSubscriptionInput) (*CreateEventSubscriptionOutput, error) { + req, out := c.CreateEventSubscriptionRequest(input) + err := req.Send() + return out, err +} + +const opCreateOptionGroup = "CreateOptionGroup" + +// CreateOptionGroupRequest generates a "aws/request.Request" representing the +// client's request for the CreateOptionGroup operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateOptionGroup method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateOptionGroupRequest method. +// req, resp := client.CreateOptionGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *RDS) CreateOptionGroupRequest(input *CreateOptionGroupInput) (req *request.Request, output *CreateOptionGroupOutput) { + op := &request.Operation{ + Name: opCreateOptionGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateOptionGroupInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateOptionGroupOutput{} + req.Data = output + return +} + +// Creates a new option group. You can create up to 20 option groups. +func (c *RDS) CreateOptionGroup(input *CreateOptionGroupInput) (*CreateOptionGroupOutput, error) { + req, out := c.CreateOptionGroupRequest(input) + err := req.Send() + return out, err +} + +const opDeleteDBCluster = "DeleteDBCluster" + +// DeleteDBClusterRequest generates a "aws/request.Request" representing the +// client's request for the DeleteDBCluster operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteDBCluster method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteDBClusterRequest method. +// req, resp := client.DeleteDBClusterRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *RDS) DeleteDBClusterRequest(input *DeleteDBClusterInput) (req *request.Request, output *DeleteDBClusterOutput) { + op := &request.Operation{ + Name: opDeleteDBCluster, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteDBClusterInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteDBClusterOutput{} + req.Data = output + return +} + +// The DeleteDBCluster action deletes a previously provisioned DB cluster. When +// you delete a DB cluster, all automated backups for that DB cluster are deleted +// and cannot be recovered. Manual DB cluster snapshots of the specified DB +// cluster are not deleted. +// +// For more information on Amazon Aurora, see Aurora on Amazon RDS (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_Aurora.html) +// in the Amazon RDS User Guide. +func (c *RDS) DeleteDBCluster(input *DeleteDBClusterInput) (*DeleteDBClusterOutput, error) { + req, out := c.DeleteDBClusterRequest(input) + err := req.Send() + return out, err +} + +const opDeleteDBClusterParameterGroup = "DeleteDBClusterParameterGroup" + +// DeleteDBClusterParameterGroupRequest generates a "aws/request.Request" representing the +// client's request for the DeleteDBClusterParameterGroup operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteDBClusterParameterGroup method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteDBClusterParameterGroupRequest method. +// req, resp := client.DeleteDBClusterParameterGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *RDS) DeleteDBClusterParameterGroupRequest(input *DeleteDBClusterParameterGroupInput) (req *request.Request, output *DeleteDBClusterParameterGroupOutput) { + op := &request.Operation{ + Name: opDeleteDBClusterParameterGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteDBClusterParameterGroupInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteDBClusterParameterGroupOutput{} + req.Data = output + return +} + +// Deletes a specified DB cluster parameter group. The DB cluster parameter +// group to be deleted cannot be associated with any DB clusters. +// +// For more information on Amazon Aurora, see Aurora on Amazon RDS (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_Aurora.html) +// in the Amazon RDS User Guide. +func (c *RDS) DeleteDBClusterParameterGroup(input *DeleteDBClusterParameterGroupInput) (*DeleteDBClusterParameterGroupOutput, error) { + req, out := c.DeleteDBClusterParameterGroupRequest(input) + err := req.Send() + return out, err +} + +const opDeleteDBClusterSnapshot = "DeleteDBClusterSnapshot" + +// DeleteDBClusterSnapshotRequest generates a "aws/request.Request" representing the +// client's request for the DeleteDBClusterSnapshot operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteDBClusterSnapshot method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteDBClusterSnapshotRequest method. +// req, resp := client.DeleteDBClusterSnapshotRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *RDS) DeleteDBClusterSnapshotRequest(input *DeleteDBClusterSnapshotInput) (req *request.Request, output *DeleteDBClusterSnapshotOutput) { + op := &request.Operation{ + Name: opDeleteDBClusterSnapshot, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteDBClusterSnapshotInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteDBClusterSnapshotOutput{} + req.Data = output + return +} + +// Deletes a DB cluster snapshot. If the snapshot is being copied, the copy +// operation is terminated. +// +// The DB cluster snapshot must be in the available state to be deleted. +// +// For more information on Amazon Aurora, see Aurora on Amazon RDS (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_Aurora.html) +// in the Amazon RDS User Guide. +func (c *RDS) DeleteDBClusterSnapshot(input *DeleteDBClusterSnapshotInput) (*DeleteDBClusterSnapshotOutput, error) { + req, out := c.DeleteDBClusterSnapshotRequest(input) + err := req.Send() + return out, err +} + +const opDeleteDBInstance = "DeleteDBInstance" + +// DeleteDBInstanceRequest generates a "aws/request.Request" representing the +// client's request for the DeleteDBInstance operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteDBInstance method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteDBInstanceRequest method. +// req, resp := client.DeleteDBInstanceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *RDS) DeleteDBInstanceRequest(input *DeleteDBInstanceInput) (req *request.Request, output *DeleteDBInstanceOutput) { + op := &request.Operation{ + Name: opDeleteDBInstance, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteDBInstanceInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteDBInstanceOutput{} + req.Data = output + return +} + +// The DeleteDBInstance action deletes a previously provisioned DB instance. +// When you delete a DB instance, all automated backups for that instance are +// deleted and cannot be recovered. Manual DB snapshots of the DB instance to +// be deleted by DeleteDBInstance are not deleted. +// +// If you request a final DB snapshot the status of the Amazon RDS DB instance +// is deleting until the DB snapshot is created. The API action DescribeDBInstance +// is used to monitor the status of this operation. The action cannot be canceled +// or reverted once submitted. +// +// Note that when a DB instance is in a failure state and has a status of failed, +// incompatible-restore, or incompatible-network, you can only delete it when +// the SkipFinalSnapshot parameter is set to true. +// +// If the specified DB instance is part of an Amazon Aurora DB cluster, you +// cannot delete the DB instance if the following are true: +// +// The DB cluster is a Read Replica of another Amazon Aurora DB cluster. +// +// The DB instance is the only instance in the DB cluster. +// +// To delete a DB instance in this case, first call the PromoteReadReplicaDBCluster +// API action to promote the DB cluster so it's no longer a Read Replica. After +// the promotion completes, then call the DeleteDBInstance API action to delete +// the final instance in the DB cluster. +func (c *RDS) DeleteDBInstance(input *DeleteDBInstanceInput) (*DeleteDBInstanceOutput, error) { + req, out := c.DeleteDBInstanceRequest(input) + err := req.Send() + return out, err +} + +const opDeleteDBParameterGroup = "DeleteDBParameterGroup" + +// DeleteDBParameterGroupRequest generates a "aws/request.Request" representing the +// client's request for the DeleteDBParameterGroup operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteDBParameterGroup method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteDBParameterGroupRequest method. +// req, resp := client.DeleteDBParameterGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *RDS) DeleteDBParameterGroupRequest(input *DeleteDBParameterGroupInput) (req *request.Request, output *DeleteDBParameterGroupOutput) { + op := &request.Operation{ + Name: opDeleteDBParameterGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteDBParameterGroupInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteDBParameterGroupOutput{} + req.Data = output + return +} + +// Deletes a specified DBParameterGroup. The DBParameterGroup to be deleted +// cannot be associated with any DB instances. +func (c *RDS) DeleteDBParameterGroup(input *DeleteDBParameterGroupInput) (*DeleteDBParameterGroupOutput, error) { + req, out := c.DeleteDBParameterGroupRequest(input) + err := req.Send() + return out, err +} + +const opDeleteDBSecurityGroup = "DeleteDBSecurityGroup" + +// DeleteDBSecurityGroupRequest generates a "aws/request.Request" representing the +// client's request for the DeleteDBSecurityGroup operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteDBSecurityGroup method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteDBSecurityGroupRequest method. +// req, resp := client.DeleteDBSecurityGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *RDS) DeleteDBSecurityGroupRequest(input *DeleteDBSecurityGroupInput) (req *request.Request, output *DeleteDBSecurityGroupOutput) { + op := &request.Operation{ + Name: opDeleteDBSecurityGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteDBSecurityGroupInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteDBSecurityGroupOutput{} + req.Data = output + return +} + +// Deletes a DB security group. +// +// The specified DB security group must not be associated with any DB instances. +func (c *RDS) DeleteDBSecurityGroup(input *DeleteDBSecurityGroupInput) (*DeleteDBSecurityGroupOutput, error) { + req, out := c.DeleteDBSecurityGroupRequest(input) + err := req.Send() + return out, err +} + +const opDeleteDBSnapshot = "DeleteDBSnapshot" + +// DeleteDBSnapshotRequest generates a "aws/request.Request" representing the +// client's request for the DeleteDBSnapshot operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteDBSnapshot method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteDBSnapshotRequest method. +// req, resp := client.DeleteDBSnapshotRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *RDS) DeleteDBSnapshotRequest(input *DeleteDBSnapshotInput) (req *request.Request, output *DeleteDBSnapshotOutput) { + op := &request.Operation{ + Name: opDeleteDBSnapshot, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteDBSnapshotInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteDBSnapshotOutput{} + req.Data = output + return +} + +// Deletes a DBSnapshot. If the snapshot is being copied, the copy operation +// is terminated. +// +// The DBSnapshot must be in the available state to be deleted. +func (c *RDS) DeleteDBSnapshot(input *DeleteDBSnapshotInput) (*DeleteDBSnapshotOutput, error) { + req, out := c.DeleteDBSnapshotRequest(input) + err := req.Send() + return out, err +} + +const opDeleteDBSubnetGroup = "DeleteDBSubnetGroup" + +// DeleteDBSubnetGroupRequest generates a "aws/request.Request" representing the +// client's request for the DeleteDBSubnetGroup operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteDBSubnetGroup method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteDBSubnetGroupRequest method. +// req, resp := client.DeleteDBSubnetGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *RDS) DeleteDBSubnetGroupRequest(input *DeleteDBSubnetGroupInput) (req *request.Request, output *DeleteDBSubnetGroupOutput) { + op := &request.Operation{ + Name: opDeleteDBSubnetGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteDBSubnetGroupInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteDBSubnetGroupOutput{} + req.Data = output + return +} + +// Deletes a DB subnet group. +// +// The specified database subnet group must not be associated with any DB +// instances. +func (c *RDS) DeleteDBSubnetGroup(input *DeleteDBSubnetGroupInput) (*DeleteDBSubnetGroupOutput, error) { + req, out := c.DeleteDBSubnetGroupRequest(input) + err := req.Send() + return out, err +} + +const opDeleteEventSubscription = "DeleteEventSubscription" + +// DeleteEventSubscriptionRequest generates a "aws/request.Request" representing the +// client's request for the DeleteEventSubscription operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteEventSubscription method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteEventSubscriptionRequest method. +// req, resp := client.DeleteEventSubscriptionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *RDS) DeleteEventSubscriptionRequest(input *DeleteEventSubscriptionInput) (req *request.Request, output *DeleteEventSubscriptionOutput) { + op := &request.Operation{ + Name: opDeleteEventSubscription, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteEventSubscriptionInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteEventSubscriptionOutput{} + req.Data = output + return +} + +// Deletes an RDS event notification subscription. +func (c *RDS) DeleteEventSubscription(input *DeleteEventSubscriptionInput) (*DeleteEventSubscriptionOutput, error) { + req, out := c.DeleteEventSubscriptionRequest(input) + err := req.Send() + return out, err +} + +const opDeleteOptionGroup = "DeleteOptionGroup" + +// DeleteOptionGroupRequest generates a "aws/request.Request" representing the +// client's request for the DeleteOptionGroup operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteOptionGroup method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteOptionGroupRequest method. +// req, resp := client.DeleteOptionGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *RDS) DeleteOptionGroupRequest(input *DeleteOptionGroupInput) (req *request.Request, output *DeleteOptionGroupOutput) { + op := &request.Operation{ + Name: opDeleteOptionGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteOptionGroupInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteOptionGroupOutput{} + req.Data = output + return +} + +// Deletes an existing option group. +func (c *RDS) DeleteOptionGroup(input *DeleteOptionGroupInput) (*DeleteOptionGroupOutput, error) { + req, out := c.DeleteOptionGroupRequest(input) + err := req.Send() + return out, err +} + +const opDescribeAccountAttributes = "DescribeAccountAttributes" + +// DescribeAccountAttributesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeAccountAttributes operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeAccountAttributes method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeAccountAttributesRequest method. +// req, resp := client.DescribeAccountAttributesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *RDS) DescribeAccountAttributesRequest(input *DescribeAccountAttributesInput) (req *request.Request, output *DescribeAccountAttributesOutput) { + op := &request.Operation{ + Name: opDescribeAccountAttributes, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeAccountAttributesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeAccountAttributesOutput{} + req.Data = output + return +} + +// Lists all of the attributes for a customer account. The attributes include +// Amazon RDS quotas for the account, such as the number of DB instances allowed. +// The description for a quota includes the quota name, current usage toward +// that quota, and the quota's maximum value. +// +// This command does not take any parameters. +func (c *RDS) DescribeAccountAttributes(input *DescribeAccountAttributesInput) (*DescribeAccountAttributesOutput, error) { + req, out := c.DescribeAccountAttributesRequest(input) + err := req.Send() + return out, err +} + +const opDescribeCertificates = "DescribeCertificates" + +// DescribeCertificatesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeCertificates operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeCertificates method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeCertificatesRequest method. +// req, resp := client.DescribeCertificatesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *RDS) DescribeCertificatesRequest(input *DescribeCertificatesInput) (req *request.Request, output *DescribeCertificatesOutput) { + op := &request.Operation{ + Name: opDescribeCertificates, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeCertificatesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeCertificatesOutput{} + req.Data = output + return +} + +// Lists the set of CA certificates provided by Amazon RDS for this AWS account. +func (c *RDS) DescribeCertificates(input *DescribeCertificatesInput) (*DescribeCertificatesOutput, error) { + req, out := c.DescribeCertificatesRequest(input) + err := req.Send() + return out, err +} + +const opDescribeDBClusterParameterGroups = "DescribeDBClusterParameterGroups" + +// DescribeDBClusterParameterGroupsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeDBClusterParameterGroups operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeDBClusterParameterGroups method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeDBClusterParameterGroupsRequest method. +// req, resp := client.DescribeDBClusterParameterGroupsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *RDS) DescribeDBClusterParameterGroupsRequest(input *DescribeDBClusterParameterGroupsInput) (req *request.Request, output *DescribeDBClusterParameterGroupsOutput) { + op := &request.Operation{ + Name: opDescribeDBClusterParameterGroups, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeDBClusterParameterGroupsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeDBClusterParameterGroupsOutput{} + req.Data = output + return +} + +// Returns a list of DBClusterParameterGroup descriptions. If a DBClusterParameterGroupName +// parameter is specified, the list will contain only the description of the +// specified DB cluster parameter group. +// +// For more information on Amazon Aurora, see Aurora on Amazon RDS (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_Aurora.html) +// in the Amazon RDS User Guide. +func (c *RDS) DescribeDBClusterParameterGroups(input *DescribeDBClusterParameterGroupsInput) (*DescribeDBClusterParameterGroupsOutput, error) { + req, out := c.DescribeDBClusterParameterGroupsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeDBClusterParameters = "DescribeDBClusterParameters" + +// DescribeDBClusterParametersRequest generates a "aws/request.Request" representing the +// client's request for the DescribeDBClusterParameters operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeDBClusterParameters method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeDBClusterParametersRequest method. +// req, resp := client.DescribeDBClusterParametersRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *RDS) DescribeDBClusterParametersRequest(input *DescribeDBClusterParametersInput) (req *request.Request, output *DescribeDBClusterParametersOutput) { + op := &request.Operation{ + Name: opDescribeDBClusterParameters, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeDBClusterParametersInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeDBClusterParametersOutput{} + req.Data = output + return +} + +// Returns the detailed parameter list for a particular DB cluster parameter +// group. +// +// For more information on Amazon Aurora, see Aurora on Amazon RDS (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_Aurora.html) +// in the Amazon RDS User Guide. +func (c *RDS) DescribeDBClusterParameters(input *DescribeDBClusterParametersInput) (*DescribeDBClusterParametersOutput, error) { + req, out := c.DescribeDBClusterParametersRequest(input) + err := req.Send() + return out, err +} + +const opDescribeDBClusterSnapshotAttributes = "DescribeDBClusterSnapshotAttributes" + +// DescribeDBClusterSnapshotAttributesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeDBClusterSnapshotAttributes operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeDBClusterSnapshotAttributes method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeDBClusterSnapshotAttributesRequest method. +// req, resp := client.DescribeDBClusterSnapshotAttributesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *RDS) DescribeDBClusterSnapshotAttributesRequest(input *DescribeDBClusterSnapshotAttributesInput) (req *request.Request, output *DescribeDBClusterSnapshotAttributesOutput) { + op := &request.Operation{ + Name: opDescribeDBClusterSnapshotAttributes, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeDBClusterSnapshotAttributesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeDBClusterSnapshotAttributesOutput{} + req.Data = output + return +} + +// Returns a list of DB cluster snapshot attribute names and values for a manual +// DB cluster snapshot. +// +// When sharing snapshots with other AWS accounts, DescribeDBClusterSnapshotAttributes +// returns the restore attribute and a list of IDs for the AWS accounts that +// are authorized to copy or restore the manual DB cluster snapshot. If all +// is included in the list of values for the restore attribute, then the manual +// DB cluster snapshot is public and can be copied or restored by all AWS accounts. +// +// To add or remove access for an AWS account to copy or restore a manual DB +// cluster snapshot, or to make the manual DB cluster snapshot public or private, +// use the ModifyDBClusterSnapshotAttribute API action. +func (c *RDS) DescribeDBClusterSnapshotAttributes(input *DescribeDBClusterSnapshotAttributesInput) (*DescribeDBClusterSnapshotAttributesOutput, error) { + req, out := c.DescribeDBClusterSnapshotAttributesRequest(input) + err := req.Send() + return out, err +} + +const opDescribeDBClusterSnapshots = "DescribeDBClusterSnapshots" + +// DescribeDBClusterSnapshotsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeDBClusterSnapshots operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeDBClusterSnapshots method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeDBClusterSnapshotsRequest method. +// req, resp := client.DescribeDBClusterSnapshotsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *RDS) DescribeDBClusterSnapshotsRequest(input *DescribeDBClusterSnapshotsInput) (req *request.Request, output *DescribeDBClusterSnapshotsOutput) { + op := &request.Operation{ + Name: opDescribeDBClusterSnapshots, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeDBClusterSnapshotsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeDBClusterSnapshotsOutput{} + req.Data = output + return +} + +// Returns information about DB cluster snapshots. This API action supports +// pagination. +// +// For more information on Amazon Aurora, see Aurora on Amazon RDS (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_Aurora.html) +// in the Amazon RDS User Guide. +func (c *RDS) DescribeDBClusterSnapshots(input *DescribeDBClusterSnapshotsInput) (*DescribeDBClusterSnapshotsOutput, error) { + req, out := c.DescribeDBClusterSnapshotsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeDBClusters = "DescribeDBClusters" + +// DescribeDBClustersRequest generates a "aws/request.Request" representing the +// client's request for the DescribeDBClusters operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeDBClusters method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeDBClustersRequest method. +// req, resp := client.DescribeDBClustersRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *RDS) DescribeDBClustersRequest(input *DescribeDBClustersInput) (req *request.Request, output *DescribeDBClustersOutput) { + op := &request.Operation{ + Name: opDescribeDBClusters, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeDBClustersInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeDBClustersOutput{} + req.Data = output + return +} + +// Returns information about provisioned Aurora DB clusters. This API supports +// pagination. +// +// For more information on Amazon Aurora, see Aurora on Amazon RDS (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_Aurora.html) +// in the Amazon RDS User Guide. +func (c *RDS) DescribeDBClusters(input *DescribeDBClustersInput) (*DescribeDBClustersOutput, error) { + req, out := c.DescribeDBClustersRequest(input) + err := req.Send() + return out, err +} + +const opDescribeDBEngineVersions = "DescribeDBEngineVersions" + +// DescribeDBEngineVersionsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeDBEngineVersions operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeDBEngineVersions method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeDBEngineVersionsRequest method. +// req, resp := client.DescribeDBEngineVersionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *RDS) DescribeDBEngineVersionsRequest(input *DescribeDBEngineVersionsInput) (req *request.Request, output *DescribeDBEngineVersionsOutput) { + op := &request.Operation{ + Name: opDescribeDBEngineVersions, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeDBEngineVersionsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeDBEngineVersionsOutput{} + req.Data = output + return +} + +// Returns a list of the available DB engines. +func (c *RDS) DescribeDBEngineVersions(input *DescribeDBEngineVersionsInput) (*DescribeDBEngineVersionsOutput, error) { + req, out := c.DescribeDBEngineVersionsRequest(input) + err := req.Send() + return out, err +} + +// DescribeDBEngineVersionsPages iterates over the pages of a DescribeDBEngineVersions operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeDBEngineVersions method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeDBEngineVersions operation. +// pageNum := 0 +// err := client.DescribeDBEngineVersionsPages(params, +// func(page *DescribeDBEngineVersionsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *RDS) DescribeDBEngineVersionsPages(input *DescribeDBEngineVersionsInput, fn func(p *DescribeDBEngineVersionsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeDBEngineVersionsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeDBEngineVersionsOutput), lastPage) + }) +} + +const opDescribeDBInstances = "DescribeDBInstances" + +// DescribeDBInstancesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeDBInstances operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeDBInstances method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeDBInstancesRequest method. +// req, resp := client.DescribeDBInstancesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *RDS) DescribeDBInstancesRequest(input *DescribeDBInstancesInput) (req *request.Request, output *DescribeDBInstancesOutput) { + op := &request.Operation{ + Name: opDescribeDBInstances, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeDBInstancesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeDBInstancesOutput{} + req.Data = output + return +} + +// Returns information about provisioned RDS instances. This API supports pagination. +func (c *RDS) DescribeDBInstances(input *DescribeDBInstancesInput) (*DescribeDBInstancesOutput, error) { + req, out := c.DescribeDBInstancesRequest(input) + err := req.Send() + return out, err +} + +// DescribeDBInstancesPages iterates over the pages of a DescribeDBInstances operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeDBInstances method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeDBInstances operation. +// pageNum := 0 +// err := client.DescribeDBInstancesPages(params, +// func(page *DescribeDBInstancesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *RDS) DescribeDBInstancesPages(input *DescribeDBInstancesInput, fn func(p *DescribeDBInstancesOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeDBInstancesRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeDBInstancesOutput), lastPage) + }) +} + +const opDescribeDBLogFiles = "DescribeDBLogFiles" + +// DescribeDBLogFilesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeDBLogFiles operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeDBLogFiles method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeDBLogFilesRequest method. +// req, resp := client.DescribeDBLogFilesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *RDS) DescribeDBLogFilesRequest(input *DescribeDBLogFilesInput) (req *request.Request, output *DescribeDBLogFilesOutput) { + op := &request.Operation{ + Name: opDescribeDBLogFiles, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeDBLogFilesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeDBLogFilesOutput{} + req.Data = output + return +} + +// Returns a list of DB log files for the DB instance. +func (c *RDS) DescribeDBLogFiles(input *DescribeDBLogFilesInput) (*DescribeDBLogFilesOutput, error) { + req, out := c.DescribeDBLogFilesRequest(input) + err := req.Send() + return out, err +} + +// DescribeDBLogFilesPages iterates over the pages of a DescribeDBLogFiles operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeDBLogFiles method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeDBLogFiles operation. +// pageNum := 0 +// err := client.DescribeDBLogFilesPages(params, +// func(page *DescribeDBLogFilesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *RDS) DescribeDBLogFilesPages(input *DescribeDBLogFilesInput, fn func(p *DescribeDBLogFilesOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeDBLogFilesRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeDBLogFilesOutput), lastPage) + }) +} + +const opDescribeDBParameterGroups = "DescribeDBParameterGroups" + +// DescribeDBParameterGroupsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeDBParameterGroups operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeDBParameterGroups method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeDBParameterGroupsRequest method. +// req, resp := client.DescribeDBParameterGroupsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *RDS) DescribeDBParameterGroupsRequest(input *DescribeDBParameterGroupsInput) (req *request.Request, output *DescribeDBParameterGroupsOutput) { + op := &request.Operation{ + Name: opDescribeDBParameterGroups, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeDBParameterGroupsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeDBParameterGroupsOutput{} + req.Data = output + return +} + +// Returns a list of DBParameterGroup descriptions. If a DBParameterGroupName +// is specified, the list will contain only the description of the specified +// DB parameter group. +func (c *RDS) DescribeDBParameterGroups(input *DescribeDBParameterGroupsInput) (*DescribeDBParameterGroupsOutput, error) { + req, out := c.DescribeDBParameterGroupsRequest(input) + err := req.Send() + return out, err +} + +// DescribeDBParameterGroupsPages iterates over the pages of a DescribeDBParameterGroups operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeDBParameterGroups method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeDBParameterGroups operation. +// pageNum := 0 +// err := client.DescribeDBParameterGroupsPages(params, +// func(page *DescribeDBParameterGroupsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *RDS) DescribeDBParameterGroupsPages(input *DescribeDBParameterGroupsInput, fn func(p *DescribeDBParameterGroupsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeDBParameterGroupsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeDBParameterGroupsOutput), lastPage) + }) +} + +const opDescribeDBParameters = "DescribeDBParameters" + +// DescribeDBParametersRequest generates a "aws/request.Request" representing the +// client's request for the DescribeDBParameters operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeDBParameters method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeDBParametersRequest method. +// req, resp := client.DescribeDBParametersRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *RDS) DescribeDBParametersRequest(input *DescribeDBParametersInput) (req *request.Request, output *DescribeDBParametersOutput) { + op := &request.Operation{ + Name: opDescribeDBParameters, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeDBParametersInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeDBParametersOutput{} + req.Data = output + return +} + +// Returns the detailed parameter list for a particular DB parameter group. +func (c *RDS) DescribeDBParameters(input *DescribeDBParametersInput) (*DescribeDBParametersOutput, error) { + req, out := c.DescribeDBParametersRequest(input) + err := req.Send() + return out, err +} + +// DescribeDBParametersPages iterates over the pages of a DescribeDBParameters operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeDBParameters method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeDBParameters operation. +// pageNum := 0 +// err := client.DescribeDBParametersPages(params, +// func(page *DescribeDBParametersOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *RDS) DescribeDBParametersPages(input *DescribeDBParametersInput, fn func(p *DescribeDBParametersOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeDBParametersRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeDBParametersOutput), lastPage) + }) +} + +const opDescribeDBSecurityGroups = "DescribeDBSecurityGroups" + +// DescribeDBSecurityGroupsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeDBSecurityGroups operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeDBSecurityGroups method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeDBSecurityGroupsRequest method. +// req, resp := client.DescribeDBSecurityGroupsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *RDS) DescribeDBSecurityGroupsRequest(input *DescribeDBSecurityGroupsInput) (req *request.Request, output *DescribeDBSecurityGroupsOutput) { + op := &request.Operation{ + Name: opDescribeDBSecurityGroups, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeDBSecurityGroupsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeDBSecurityGroupsOutput{} + req.Data = output + return +} + +// Returns a list of DBSecurityGroup descriptions. If a DBSecurityGroupName +// is specified, the list will contain only the descriptions of the specified +// DB security group. +func (c *RDS) DescribeDBSecurityGroups(input *DescribeDBSecurityGroupsInput) (*DescribeDBSecurityGroupsOutput, error) { + req, out := c.DescribeDBSecurityGroupsRequest(input) + err := req.Send() + return out, err +} + +// DescribeDBSecurityGroupsPages iterates over the pages of a DescribeDBSecurityGroups operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeDBSecurityGroups method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeDBSecurityGroups operation. +// pageNum := 0 +// err := client.DescribeDBSecurityGroupsPages(params, +// func(page *DescribeDBSecurityGroupsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *RDS) DescribeDBSecurityGroupsPages(input *DescribeDBSecurityGroupsInput, fn func(p *DescribeDBSecurityGroupsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeDBSecurityGroupsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeDBSecurityGroupsOutput), lastPage) + }) +} + +const opDescribeDBSnapshotAttributes = "DescribeDBSnapshotAttributes" + +// DescribeDBSnapshotAttributesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeDBSnapshotAttributes operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeDBSnapshotAttributes method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeDBSnapshotAttributesRequest method. +// req, resp := client.DescribeDBSnapshotAttributesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *RDS) DescribeDBSnapshotAttributesRequest(input *DescribeDBSnapshotAttributesInput) (req *request.Request, output *DescribeDBSnapshotAttributesOutput) { + op := &request.Operation{ + Name: opDescribeDBSnapshotAttributes, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeDBSnapshotAttributesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeDBSnapshotAttributesOutput{} + req.Data = output + return +} + +// Returns a list of DB snapshot attribute names and values for a manual DB +// snapshot. +// +// When sharing snapshots with other AWS accounts, DescribeDBSnapshotAttributes +// returns the restore attribute and a list of IDs for the AWS accounts that +// are authorized to copy or restore the manual DB snapshot. If all is included +// in the list of values for the restore attribute, then the manual DB snapshot +// is public and can be copied or restored by all AWS accounts. +// +// To add or remove access for an AWS account to copy or restore a manual DB +// snapshot, or to make the manual DB snapshot public or private, use the ModifyDBSnapshotAttribute +// API action. +func (c *RDS) DescribeDBSnapshotAttributes(input *DescribeDBSnapshotAttributesInput) (*DescribeDBSnapshotAttributesOutput, error) { + req, out := c.DescribeDBSnapshotAttributesRequest(input) + err := req.Send() + return out, err +} + +const opDescribeDBSnapshots = "DescribeDBSnapshots" + +// DescribeDBSnapshotsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeDBSnapshots operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeDBSnapshots method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeDBSnapshotsRequest method. +// req, resp := client.DescribeDBSnapshotsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *RDS) DescribeDBSnapshotsRequest(input *DescribeDBSnapshotsInput) (req *request.Request, output *DescribeDBSnapshotsOutput) { + op := &request.Operation{ + Name: opDescribeDBSnapshots, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeDBSnapshotsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeDBSnapshotsOutput{} + req.Data = output + return +} + +// Returns information about DB snapshots. This API action supports pagination. +func (c *RDS) DescribeDBSnapshots(input *DescribeDBSnapshotsInput) (*DescribeDBSnapshotsOutput, error) { + req, out := c.DescribeDBSnapshotsRequest(input) + err := req.Send() + return out, err +} + +// DescribeDBSnapshotsPages iterates over the pages of a DescribeDBSnapshots operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeDBSnapshots method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeDBSnapshots operation. +// pageNum := 0 +// err := client.DescribeDBSnapshotsPages(params, +// func(page *DescribeDBSnapshotsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *RDS) DescribeDBSnapshotsPages(input *DescribeDBSnapshotsInput, fn func(p *DescribeDBSnapshotsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeDBSnapshotsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeDBSnapshotsOutput), lastPage) + }) +} + +const opDescribeDBSubnetGroups = "DescribeDBSubnetGroups" + +// DescribeDBSubnetGroupsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeDBSubnetGroups operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeDBSubnetGroups method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeDBSubnetGroupsRequest method. +// req, resp := client.DescribeDBSubnetGroupsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *RDS) DescribeDBSubnetGroupsRequest(input *DescribeDBSubnetGroupsInput) (req *request.Request, output *DescribeDBSubnetGroupsOutput) { + op := &request.Operation{ + Name: opDescribeDBSubnetGroups, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeDBSubnetGroupsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeDBSubnetGroupsOutput{} + req.Data = output + return +} + +// Returns a list of DBSubnetGroup descriptions. If a DBSubnetGroupName is specified, +// the list will contain only the descriptions of the specified DBSubnetGroup. +// +// For an overview of CIDR ranges, go to the Wikipedia Tutorial (http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing). +func (c *RDS) DescribeDBSubnetGroups(input *DescribeDBSubnetGroupsInput) (*DescribeDBSubnetGroupsOutput, error) { + req, out := c.DescribeDBSubnetGroupsRequest(input) + err := req.Send() + return out, err +} + +// DescribeDBSubnetGroupsPages iterates over the pages of a DescribeDBSubnetGroups operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeDBSubnetGroups method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeDBSubnetGroups operation. +// pageNum := 0 +// err := client.DescribeDBSubnetGroupsPages(params, +// func(page *DescribeDBSubnetGroupsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *RDS) DescribeDBSubnetGroupsPages(input *DescribeDBSubnetGroupsInput, fn func(p *DescribeDBSubnetGroupsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeDBSubnetGroupsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeDBSubnetGroupsOutput), lastPage) + }) +} + +const opDescribeEngineDefaultClusterParameters = "DescribeEngineDefaultClusterParameters" + +// DescribeEngineDefaultClusterParametersRequest generates a "aws/request.Request" representing the +// client's request for the DescribeEngineDefaultClusterParameters operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeEngineDefaultClusterParameters method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeEngineDefaultClusterParametersRequest method. +// req, resp := client.DescribeEngineDefaultClusterParametersRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *RDS) DescribeEngineDefaultClusterParametersRequest(input *DescribeEngineDefaultClusterParametersInput) (req *request.Request, output *DescribeEngineDefaultClusterParametersOutput) { + op := &request.Operation{ + Name: opDescribeEngineDefaultClusterParameters, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeEngineDefaultClusterParametersInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeEngineDefaultClusterParametersOutput{} + req.Data = output + return +} + +// Returns the default engine and system parameter information for the cluster +// database engine. +// +// For more information on Amazon Aurora, see Aurora on Amazon RDS (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_Aurora.html) +// in the Amazon RDS User Guide. +func (c *RDS) DescribeEngineDefaultClusterParameters(input *DescribeEngineDefaultClusterParametersInput) (*DescribeEngineDefaultClusterParametersOutput, error) { + req, out := c.DescribeEngineDefaultClusterParametersRequest(input) + err := req.Send() + return out, err +} + +const opDescribeEngineDefaultParameters = "DescribeEngineDefaultParameters" + +// DescribeEngineDefaultParametersRequest generates a "aws/request.Request" representing the +// client's request for the DescribeEngineDefaultParameters operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeEngineDefaultParameters method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeEngineDefaultParametersRequest method. +// req, resp := client.DescribeEngineDefaultParametersRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *RDS) DescribeEngineDefaultParametersRequest(input *DescribeEngineDefaultParametersInput) (req *request.Request, output *DescribeEngineDefaultParametersOutput) { + op := &request.Operation{ + Name: opDescribeEngineDefaultParameters, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"EngineDefaults.Marker"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeEngineDefaultParametersInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeEngineDefaultParametersOutput{} + req.Data = output + return +} + +// Returns the default engine and system parameter information for the specified +// database engine. +func (c *RDS) DescribeEngineDefaultParameters(input *DescribeEngineDefaultParametersInput) (*DescribeEngineDefaultParametersOutput, error) { + req, out := c.DescribeEngineDefaultParametersRequest(input) + err := req.Send() + return out, err +} + +// DescribeEngineDefaultParametersPages iterates over the pages of a DescribeEngineDefaultParameters operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeEngineDefaultParameters method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeEngineDefaultParameters operation. +// pageNum := 0 +// err := client.DescribeEngineDefaultParametersPages(params, +// func(page *DescribeEngineDefaultParametersOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *RDS) DescribeEngineDefaultParametersPages(input *DescribeEngineDefaultParametersInput, fn func(p *DescribeEngineDefaultParametersOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeEngineDefaultParametersRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeEngineDefaultParametersOutput), lastPage) + }) +} + +const opDescribeEventCategories = "DescribeEventCategories" + +// DescribeEventCategoriesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeEventCategories operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeEventCategories method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeEventCategoriesRequest method. +// req, resp := client.DescribeEventCategoriesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *RDS) DescribeEventCategoriesRequest(input *DescribeEventCategoriesInput) (req *request.Request, output *DescribeEventCategoriesOutput) { + op := &request.Operation{ + Name: opDescribeEventCategories, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeEventCategoriesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeEventCategoriesOutput{} + req.Data = output + return +} + +// Displays a list of categories for all event source types, or, if specified, +// for a specified source type. You can see a list of the event categories and +// source types in the Events (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Events.html) +// topic in the Amazon RDS User Guide. +func (c *RDS) DescribeEventCategories(input *DescribeEventCategoriesInput) (*DescribeEventCategoriesOutput, error) { + req, out := c.DescribeEventCategoriesRequest(input) + err := req.Send() + return out, err +} + +const opDescribeEventSubscriptions = "DescribeEventSubscriptions" + +// DescribeEventSubscriptionsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeEventSubscriptions operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeEventSubscriptions method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeEventSubscriptionsRequest method. +// req, resp := client.DescribeEventSubscriptionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *RDS) DescribeEventSubscriptionsRequest(input *DescribeEventSubscriptionsInput) (req *request.Request, output *DescribeEventSubscriptionsOutput) { + op := &request.Operation{ + Name: opDescribeEventSubscriptions, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeEventSubscriptionsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeEventSubscriptionsOutput{} + req.Data = output + return +} + +// Lists all the subscription descriptions for a customer account. The description +// for a subscription includes SubscriptionName, SNSTopicARN, CustomerID, SourceType, +// SourceID, CreationTime, and Status. +// +// If you specify a SubscriptionName, lists the description for that subscription. +func (c *RDS) DescribeEventSubscriptions(input *DescribeEventSubscriptionsInput) (*DescribeEventSubscriptionsOutput, error) { + req, out := c.DescribeEventSubscriptionsRequest(input) + err := req.Send() + return out, err +} + +// DescribeEventSubscriptionsPages iterates over the pages of a DescribeEventSubscriptions operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeEventSubscriptions method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeEventSubscriptions operation. +// pageNum := 0 +// err := client.DescribeEventSubscriptionsPages(params, +// func(page *DescribeEventSubscriptionsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *RDS) DescribeEventSubscriptionsPages(input *DescribeEventSubscriptionsInput, fn func(p *DescribeEventSubscriptionsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeEventSubscriptionsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeEventSubscriptionsOutput), lastPage) + }) +} + +const opDescribeEvents = "DescribeEvents" + +// DescribeEventsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeEvents operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeEvents method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeEventsRequest method. +// req, resp := client.DescribeEventsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *RDS) DescribeEventsRequest(input *DescribeEventsInput) (req *request.Request, output *DescribeEventsOutput) { + op := &request.Operation{ + Name: opDescribeEvents, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeEventsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeEventsOutput{} + req.Data = output + return +} + +// Returns events related to DB instances, DB security groups, DB snapshots, +// and DB parameter groups for the past 14 days. Events specific to a particular +// DB instance, DB security group, database snapshot, or DB parameter group +// can be obtained by providing the name as a parameter. By default, the past +// hour of events are returned. +func (c *RDS) DescribeEvents(input *DescribeEventsInput) (*DescribeEventsOutput, error) { + req, out := c.DescribeEventsRequest(input) + err := req.Send() + return out, err +} + +// DescribeEventsPages iterates over the pages of a DescribeEvents operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeEvents method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeEvents operation. +// pageNum := 0 +// err := client.DescribeEventsPages(params, +// func(page *DescribeEventsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *RDS) DescribeEventsPages(input *DescribeEventsInput, fn func(p *DescribeEventsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeEventsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeEventsOutput), lastPage) + }) +} + +const opDescribeOptionGroupOptions = "DescribeOptionGroupOptions" + +// DescribeOptionGroupOptionsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeOptionGroupOptions operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeOptionGroupOptions method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeOptionGroupOptionsRequest method. +// req, resp := client.DescribeOptionGroupOptionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *RDS) DescribeOptionGroupOptionsRequest(input *DescribeOptionGroupOptionsInput) (req *request.Request, output *DescribeOptionGroupOptionsOutput) { + op := &request.Operation{ + Name: opDescribeOptionGroupOptions, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeOptionGroupOptionsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeOptionGroupOptionsOutput{} + req.Data = output + return +} + +// Describes all available options. +func (c *RDS) DescribeOptionGroupOptions(input *DescribeOptionGroupOptionsInput) (*DescribeOptionGroupOptionsOutput, error) { + req, out := c.DescribeOptionGroupOptionsRequest(input) + err := req.Send() + return out, err +} + +// DescribeOptionGroupOptionsPages iterates over the pages of a DescribeOptionGroupOptions operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeOptionGroupOptions method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeOptionGroupOptions operation. +// pageNum := 0 +// err := client.DescribeOptionGroupOptionsPages(params, +// func(page *DescribeOptionGroupOptionsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *RDS) DescribeOptionGroupOptionsPages(input *DescribeOptionGroupOptionsInput, fn func(p *DescribeOptionGroupOptionsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeOptionGroupOptionsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeOptionGroupOptionsOutput), lastPage) + }) +} + +const opDescribeOptionGroups = "DescribeOptionGroups" + +// DescribeOptionGroupsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeOptionGroups operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeOptionGroups method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeOptionGroupsRequest method. +// req, resp := client.DescribeOptionGroupsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *RDS) DescribeOptionGroupsRequest(input *DescribeOptionGroupsInput) (req *request.Request, output *DescribeOptionGroupsOutput) { + op := &request.Operation{ + Name: opDescribeOptionGroups, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeOptionGroupsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeOptionGroupsOutput{} + req.Data = output + return +} + +// Describes the available option groups. +func (c *RDS) DescribeOptionGroups(input *DescribeOptionGroupsInput) (*DescribeOptionGroupsOutput, error) { + req, out := c.DescribeOptionGroupsRequest(input) + err := req.Send() + return out, err +} + +// DescribeOptionGroupsPages iterates over the pages of a DescribeOptionGroups operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeOptionGroups method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeOptionGroups operation. +// pageNum := 0 +// err := client.DescribeOptionGroupsPages(params, +// func(page *DescribeOptionGroupsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *RDS) DescribeOptionGroupsPages(input *DescribeOptionGroupsInput, fn func(p *DescribeOptionGroupsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeOptionGroupsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeOptionGroupsOutput), lastPage) + }) +} + +const opDescribeOrderableDBInstanceOptions = "DescribeOrderableDBInstanceOptions" + +// DescribeOrderableDBInstanceOptionsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeOrderableDBInstanceOptions operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeOrderableDBInstanceOptions method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeOrderableDBInstanceOptionsRequest method. +// req, resp := client.DescribeOrderableDBInstanceOptionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *RDS) DescribeOrderableDBInstanceOptionsRequest(input *DescribeOrderableDBInstanceOptionsInput) (req *request.Request, output *DescribeOrderableDBInstanceOptionsOutput) { + op := &request.Operation{ + Name: opDescribeOrderableDBInstanceOptions, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeOrderableDBInstanceOptionsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeOrderableDBInstanceOptionsOutput{} + req.Data = output + return +} + +// Returns a list of orderable DB instance options for the specified engine. +func (c *RDS) DescribeOrderableDBInstanceOptions(input *DescribeOrderableDBInstanceOptionsInput) (*DescribeOrderableDBInstanceOptionsOutput, error) { + req, out := c.DescribeOrderableDBInstanceOptionsRequest(input) + err := req.Send() + return out, err +} + +// DescribeOrderableDBInstanceOptionsPages iterates over the pages of a DescribeOrderableDBInstanceOptions operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeOrderableDBInstanceOptions method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeOrderableDBInstanceOptions operation. +// pageNum := 0 +// err := client.DescribeOrderableDBInstanceOptionsPages(params, +// func(page *DescribeOrderableDBInstanceOptionsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *RDS) DescribeOrderableDBInstanceOptionsPages(input *DescribeOrderableDBInstanceOptionsInput, fn func(p *DescribeOrderableDBInstanceOptionsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeOrderableDBInstanceOptionsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeOrderableDBInstanceOptionsOutput), lastPage) + }) +} + +const opDescribePendingMaintenanceActions = "DescribePendingMaintenanceActions" + +// DescribePendingMaintenanceActionsRequest generates a "aws/request.Request" representing the +// client's request for the DescribePendingMaintenanceActions operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribePendingMaintenanceActions method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribePendingMaintenanceActionsRequest method. +// req, resp := client.DescribePendingMaintenanceActionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *RDS) DescribePendingMaintenanceActionsRequest(input *DescribePendingMaintenanceActionsInput) (req *request.Request, output *DescribePendingMaintenanceActionsOutput) { + op := &request.Operation{ + Name: opDescribePendingMaintenanceActions, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribePendingMaintenanceActionsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribePendingMaintenanceActionsOutput{} + req.Data = output + return +} + +// Returns a list of resources (for example, DB instances) that have at least +// one pending maintenance action. +func (c *RDS) DescribePendingMaintenanceActions(input *DescribePendingMaintenanceActionsInput) (*DescribePendingMaintenanceActionsOutput, error) { + req, out := c.DescribePendingMaintenanceActionsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeReservedDBInstances = "DescribeReservedDBInstances" + +// DescribeReservedDBInstancesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeReservedDBInstances operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeReservedDBInstances method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeReservedDBInstancesRequest method. +// req, resp := client.DescribeReservedDBInstancesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *RDS) DescribeReservedDBInstancesRequest(input *DescribeReservedDBInstancesInput) (req *request.Request, output *DescribeReservedDBInstancesOutput) { + op := &request.Operation{ + Name: opDescribeReservedDBInstances, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeReservedDBInstancesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeReservedDBInstancesOutput{} + req.Data = output + return +} + +// Returns information about reserved DB instances for this account, or about +// a specified reserved DB instance. +func (c *RDS) DescribeReservedDBInstances(input *DescribeReservedDBInstancesInput) (*DescribeReservedDBInstancesOutput, error) { + req, out := c.DescribeReservedDBInstancesRequest(input) + err := req.Send() + return out, err +} + +// DescribeReservedDBInstancesPages iterates over the pages of a DescribeReservedDBInstances operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeReservedDBInstances method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeReservedDBInstances operation. +// pageNum := 0 +// err := client.DescribeReservedDBInstancesPages(params, +// func(page *DescribeReservedDBInstancesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *RDS) DescribeReservedDBInstancesPages(input *DescribeReservedDBInstancesInput, fn func(p *DescribeReservedDBInstancesOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeReservedDBInstancesRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeReservedDBInstancesOutput), lastPage) + }) +} + +const opDescribeReservedDBInstancesOfferings = "DescribeReservedDBInstancesOfferings" + +// DescribeReservedDBInstancesOfferingsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeReservedDBInstancesOfferings operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeReservedDBInstancesOfferings method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeReservedDBInstancesOfferingsRequest method. +// req, resp := client.DescribeReservedDBInstancesOfferingsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *RDS) DescribeReservedDBInstancesOfferingsRequest(input *DescribeReservedDBInstancesOfferingsInput) (req *request.Request, output *DescribeReservedDBInstancesOfferingsOutput) { + op := &request.Operation{ + Name: opDescribeReservedDBInstancesOfferings, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeReservedDBInstancesOfferingsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeReservedDBInstancesOfferingsOutput{} + req.Data = output + return +} + +// Lists available reserved DB instance offerings. +func (c *RDS) DescribeReservedDBInstancesOfferings(input *DescribeReservedDBInstancesOfferingsInput) (*DescribeReservedDBInstancesOfferingsOutput, error) { + req, out := c.DescribeReservedDBInstancesOfferingsRequest(input) + err := req.Send() + return out, err +} + +// DescribeReservedDBInstancesOfferingsPages iterates over the pages of a DescribeReservedDBInstancesOfferings operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeReservedDBInstancesOfferings method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeReservedDBInstancesOfferings operation. +// pageNum := 0 +// err := client.DescribeReservedDBInstancesOfferingsPages(params, +// func(page *DescribeReservedDBInstancesOfferingsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *RDS) DescribeReservedDBInstancesOfferingsPages(input *DescribeReservedDBInstancesOfferingsInput, fn func(p *DescribeReservedDBInstancesOfferingsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeReservedDBInstancesOfferingsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeReservedDBInstancesOfferingsOutput), lastPage) + }) +} + +const opDownloadDBLogFilePortion = "DownloadDBLogFilePortion" + +// DownloadDBLogFilePortionRequest generates a "aws/request.Request" representing the +// client's request for the DownloadDBLogFilePortion operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DownloadDBLogFilePortion method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DownloadDBLogFilePortionRequest method. +// req, resp := client.DownloadDBLogFilePortionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *RDS) DownloadDBLogFilePortionRequest(input *DownloadDBLogFilePortionInput) (req *request.Request, output *DownloadDBLogFilePortionOutput) { + op := &request.Operation{ + Name: opDownloadDBLogFilePortion, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "NumberOfLines", + TruncationToken: "AdditionalDataPending", + }, + } + + if input == nil { + input = &DownloadDBLogFilePortionInput{} + } + + req = c.newRequest(op, input, output) + output = &DownloadDBLogFilePortionOutput{} + req.Data = output + return +} + +// Downloads all or a portion of the specified log file, up to 1 MB in size. +func (c *RDS) DownloadDBLogFilePortion(input *DownloadDBLogFilePortionInput) (*DownloadDBLogFilePortionOutput, error) { + req, out := c.DownloadDBLogFilePortionRequest(input) + err := req.Send() + return out, err +} + +// DownloadDBLogFilePortionPages iterates over the pages of a DownloadDBLogFilePortion operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DownloadDBLogFilePortion method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DownloadDBLogFilePortion operation. +// pageNum := 0 +// err := client.DownloadDBLogFilePortionPages(params, +// func(page *DownloadDBLogFilePortionOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *RDS) DownloadDBLogFilePortionPages(input *DownloadDBLogFilePortionInput, fn func(p *DownloadDBLogFilePortionOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DownloadDBLogFilePortionRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DownloadDBLogFilePortionOutput), lastPage) + }) +} + +const opFailoverDBCluster = "FailoverDBCluster" + +// FailoverDBClusterRequest generates a "aws/request.Request" representing the +// client's request for the FailoverDBCluster operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the FailoverDBCluster method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the FailoverDBClusterRequest method. +// req, resp := client.FailoverDBClusterRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *RDS) FailoverDBClusterRequest(input *FailoverDBClusterInput) (req *request.Request, output *FailoverDBClusterOutput) { + op := &request.Operation{ + Name: opFailoverDBCluster, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &FailoverDBClusterInput{} + } + + req = c.newRequest(op, input, output) + output = &FailoverDBClusterOutput{} + req.Data = output + return +} + +// Forces a failover for a DB cluster. +// +// A failover for a DB cluster promotes one of the read-only instances in the +// DB cluster to the master DB instance (the cluster writer) and deletes the +// current primary instance. +// +// Amazon Aurora will automatically fail over to a read-only instance, if one +// exists, when the primary instance fails. You can force a failover when you +// want to simulate a failure of a DB instance for testing. Because each instance +// in a DB cluster has its own endpoint address, you will need to clean up and +// re-establish any existing connections that use those endpoint addresses when +// the failover is complete. +// +// For more information on Amazon Aurora, see Aurora on Amazon RDS (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_Aurora.html) +// in the Amazon RDS User Guide. +func (c *RDS) FailoverDBCluster(input *FailoverDBClusterInput) (*FailoverDBClusterOutput, error) { + req, out := c.FailoverDBClusterRequest(input) + err := req.Send() + return out, err +} + +const opListTagsForResource = "ListTagsForResource" + +// ListTagsForResourceRequest generates a "aws/request.Request" representing the +// client's request for the ListTagsForResource operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListTagsForResource method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListTagsForResourceRequest method. +// req, resp := client.ListTagsForResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *RDS) ListTagsForResourceRequest(input *ListTagsForResourceInput) (req *request.Request, output *ListTagsForResourceOutput) { + op := &request.Operation{ + Name: opListTagsForResource, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListTagsForResourceInput{} + } + + req = c.newRequest(op, input, output) + output = &ListTagsForResourceOutput{} + req.Data = output + return +} + +// Lists all tags on an Amazon RDS resource. +// +// For an overview on tagging an Amazon RDS resource, see Tagging Amazon RDS +// Resources (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Overview.Tagging.html). +func (c *RDS) ListTagsForResource(input *ListTagsForResourceInput) (*ListTagsForResourceOutput, error) { + req, out := c.ListTagsForResourceRequest(input) + err := req.Send() + return out, err +} + +const opModifyDBCluster = "ModifyDBCluster" + +// ModifyDBClusterRequest generates a "aws/request.Request" representing the +// client's request for the ModifyDBCluster operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ModifyDBCluster method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ModifyDBClusterRequest method. +// req, resp := client.ModifyDBClusterRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *RDS) ModifyDBClusterRequest(input *ModifyDBClusterInput) (req *request.Request, output *ModifyDBClusterOutput) { + op := &request.Operation{ + Name: opModifyDBCluster, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyDBClusterInput{} + } + + req = c.newRequest(op, input, output) + output = &ModifyDBClusterOutput{} + req.Data = output + return +} + +// Modify a setting for an Amazon Aurora DB cluster. You can change one or more +// database configuration parameters by specifying these parameters and the +// new values in the request. For more information on Amazon Aurora, see Aurora +// on Amazon RDS (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_Aurora.html) +// in the Amazon RDS User Guide. +func (c *RDS) ModifyDBCluster(input *ModifyDBClusterInput) (*ModifyDBClusterOutput, error) { + req, out := c.ModifyDBClusterRequest(input) + err := req.Send() + return out, err +} + +const opModifyDBClusterParameterGroup = "ModifyDBClusterParameterGroup" + +// ModifyDBClusterParameterGroupRequest generates a "aws/request.Request" representing the +// client's request for the ModifyDBClusterParameterGroup operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ModifyDBClusterParameterGroup method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ModifyDBClusterParameterGroupRequest method. +// req, resp := client.ModifyDBClusterParameterGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *RDS) ModifyDBClusterParameterGroupRequest(input *ModifyDBClusterParameterGroupInput) (req *request.Request, output *DBClusterParameterGroupNameMessage) { + op := &request.Operation{ + Name: opModifyDBClusterParameterGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyDBClusterParameterGroupInput{} + } + + req = c.newRequest(op, input, output) + output = &DBClusterParameterGroupNameMessage{} + req.Data = output + return +} + +// Modifies the parameters of a DB cluster parameter group. To modify more than +// one parameter, submit a list of the following: ParameterName, ParameterValue, +// and ApplyMethod. A maximum of 20 parameters can be modified in a single request. +// +// For more information on Amazon Aurora, see Aurora on Amazon RDS (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_Aurora.html) +// in the Amazon RDS User Guide. +// +// Changes to dynamic parameters are applied immediately. Changes to static +// parameters require a reboot without failover to the DB cluster associated +// with the parameter group before the change can take effect. +// +// After you create a DB cluster parameter group, you should wait at least +// 5 minutes before creating your first DB cluster that uses that DB cluster +// parameter group as the default parameter group. This allows Amazon RDS to +// fully complete the create action before the parameter group is used as the +// default for a new DB cluster. This is especially important for parameters +// that are critical when creating the default database for a DB cluster, such +// as the character set for the default database defined by the character_set_database +// parameter. You can use the Parameter Groups option of the Amazon RDS console +// (https://console.aws.amazon.com/rds/) or the DescribeDBClusterParameters +// command to verify that your DB cluster parameter group has been created or +// modified. +func (c *RDS) ModifyDBClusterParameterGroup(input *ModifyDBClusterParameterGroupInput) (*DBClusterParameterGroupNameMessage, error) { + req, out := c.ModifyDBClusterParameterGroupRequest(input) + err := req.Send() + return out, err +} + +const opModifyDBClusterSnapshotAttribute = "ModifyDBClusterSnapshotAttribute" + +// ModifyDBClusterSnapshotAttributeRequest generates a "aws/request.Request" representing the +// client's request for the ModifyDBClusterSnapshotAttribute operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ModifyDBClusterSnapshotAttribute method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ModifyDBClusterSnapshotAttributeRequest method. +// req, resp := client.ModifyDBClusterSnapshotAttributeRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *RDS) ModifyDBClusterSnapshotAttributeRequest(input *ModifyDBClusterSnapshotAttributeInput) (req *request.Request, output *ModifyDBClusterSnapshotAttributeOutput) { + op := &request.Operation{ + Name: opModifyDBClusterSnapshotAttribute, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyDBClusterSnapshotAttributeInput{} + } + + req = c.newRequest(op, input, output) + output = &ModifyDBClusterSnapshotAttributeOutput{} + req.Data = output + return +} + +// Adds an attribute and values to, or removes an attribute and values from, +// a manual DB cluster snapshot. +// +// To share a manual DB cluster snapshot with other AWS accounts, specify restore +// as the AttributeName and use the ValuesToAdd parameter to add a list of IDs +// of the AWS accounts that are authorized to restore the manual DB cluster +// snapshot. Use the value all to make the manual DB cluster snapshot public, +// which means that it can be copied or restored by all AWS accounts. Do not +// add the all value for any manual DB cluster snapshots that contain private +// information that you don't want available to all AWS accounts. +// +// To view which AWS accounts have access to copy or restore a manual DB cluster +// snapshot, or whether a manual DB cluster snapshot public or private, use +// the DescribeDBClusterSnapshotAttributes API action. +// +// If a manual DB cluster snapshot is encrypted, it cannot be shared. +func (c *RDS) ModifyDBClusterSnapshotAttribute(input *ModifyDBClusterSnapshotAttributeInput) (*ModifyDBClusterSnapshotAttributeOutput, error) { + req, out := c.ModifyDBClusterSnapshotAttributeRequest(input) + err := req.Send() + return out, err +} + +const opModifyDBInstance = "ModifyDBInstance" + +// ModifyDBInstanceRequest generates a "aws/request.Request" representing the +// client's request for the ModifyDBInstance operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ModifyDBInstance method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ModifyDBInstanceRequest method. +// req, resp := client.ModifyDBInstanceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *RDS) ModifyDBInstanceRequest(input *ModifyDBInstanceInput) (req *request.Request, output *ModifyDBInstanceOutput) { + op := &request.Operation{ + Name: opModifyDBInstance, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyDBInstanceInput{} + } + + req = c.newRequest(op, input, output) + output = &ModifyDBInstanceOutput{} + req.Data = output + return +} + +// Modify settings for a DB instance. You can change one or more database configuration +// parameters by specifying these parameters and the new values in the request. +func (c *RDS) ModifyDBInstance(input *ModifyDBInstanceInput) (*ModifyDBInstanceOutput, error) { + req, out := c.ModifyDBInstanceRequest(input) + err := req.Send() + return out, err +} + +const opModifyDBParameterGroup = "ModifyDBParameterGroup" + +// ModifyDBParameterGroupRequest generates a "aws/request.Request" representing the +// client's request for the ModifyDBParameterGroup operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ModifyDBParameterGroup method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ModifyDBParameterGroupRequest method. +// req, resp := client.ModifyDBParameterGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *RDS) ModifyDBParameterGroupRequest(input *ModifyDBParameterGroupInput) (req *request.Request, output *DBParameterGroupNameMessage) { + op := &request.Operation{ + Name: opModifyDBParameterGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyDBParameterGroupInput{} + } + + req = c.newRequest(op, input, output) + output = &DBParameterGroupNameMessage{} + req.Data = output + return +} + +// Modifies the parameters of a DB parameter group. To modify more than one +// parameter, submit a list of the following: ParameterName, ParameterValue, +// and ApplyMethod. A maximum of 20 parameters can be modified in a single request. +// +// Changes to dynamic parameters are applied immediately. Changes to static +// parameters require a reboot without failover to the DB instance associated +// with the parameter group before the change can take effect. +// +// After you modify a DB parameter group, you should wait at least 5 minutes +// before creating your first DB instance that uses that DB parameter group +// as the default parameter group. This allows Amazon RDS to fully complete +// the modify action before the parameter group is used as the default for a +// new DB instance. This is especially important for parameters that are critical +// when creating the default database for a DB instance, such as the character +// set for the default database defined by the character_set_database parameter. +// You can use the Parameter Groups option of the Amazon RDS console (https://console.aws.amazon.com/rds/) +// or the DescribeDBParameters command to verify that your DB parameter group +// has been created or modified. +func (c *RDS) ModifyDBParameterGroup(input *ModifyDBParameterGroupInput) (*DBParameterGroupNameMessage, error) { + req, out := c.ModifyDBParameterGroupRequest(input) + err := req.Send() + return out, err +} + +const opModifyDBSnapshotAttribute = "ModifyDBSnapshotAttribute" + +// ModifyDBSnapshotAttributeRequest generates a "aws/request.Request" representing the +// client's request for the ModifyDBSnapshotAttribute operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ModifyDBSnapshotAttribute method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ModifyDBSnapshotAttributeRequest method. +// req, resp := client.ModifyDBSnapshotAttributeRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *RDS) ModifyDBSnapshotAttributeRequest(input *ModifyDBSnapshotAttributeInput) (req *request.Request, output *ModifyDBSnapshotAttributeOutput) { + op := &request.Operation{ + Name: opModifyDBSnapshotAttribute, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyDBSnapshotAttributeInput{} + } + + req = c.newRequest(op, input, output) + output = &ModifyDBSnapshotAttributeOutput{} + req.Data = output + return +} + +// Adds an attribute and values to, or removes an attribute and values from, +// a manual DB snapshot. +// +// To share a manual DB snapshot with other AWS accounts, specify restore as +// the AttributeName and use the ValuesToAdd parameter to add a list of IDs +// of the AWS accounts that are authorized to restore the manual DB snapshot. +// Uses the value all to make the manual DB snapshot public, which means it +// can be copied or restored by all AWS accounts. Do not add the all value for +// any manual DB snapshots that contain private information that you don't want +// available to all AWS accounts. +// +// To view which AWS accounts have access to copy or restore a manual DB snapshot, +// or whether a manual DB snapshot public or private, use the DescribeDBSnapshotAttributes +// API action. +// +// If the manual DB snapshot is encrypted, it cannot be shared. +func (c *RDS) ModifyDBSnapshotAttribute(input *ModifyDBSnapshotAttributeInput) (*ModifyDBSnapshotAttributeOutput, error) { + req, out := c.ModifyDBSnapshotAttributeRequest(input) + err := req.Send() + return out, err +} + +const opModifyDBSubnetGroup = "ModifyDBSubnetGroup" + +// ModifyDBSubnetGroupRequest generates a "aws/request.Request" representing the +// client's request for the ModifyDBSubnetGroup operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ModifyDBSubnetGroup method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ModifyDBSubnetGroupRequest method. +// req, resp := client.ModifyDBSubnetGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *RDS) ModifyDBSubnetGroupRequest(input *ModifyDBSubnetGroupInput) (req *request.Request, output *ModifyDBSubnetGroupOutput) { + op := &request.Operation{ + Name: opModifyDBSubnetGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyDBSubnetGroupInput{} + } + + req = c.newRequest(op, input, output) + output = &ModifyDBSubnetGroupOutput{} + req.Data = output + return +} + +// Modifies an existing DB subnet group. DB subnet groups must contain at least +// one subnet in at least two AZs in the region. +func (c *RDS) ModifyDBSubnetGroup(input *ModifyDBSubnetGroupInput) (*ModifyDBSubnetGroupOutput, error) { + req, out := c.ModifyDBSubnetGroupRequest(input) + err := req.Send() + return out, err +} + +const opModifyEventSubscription = "ModifyEventSubscription" + +// ModifyEventSubscriptionRequest generates a "aws/request.Request" representing the +// client's request for the ModifyEventSubscription operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ModifyEventSubscription method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ModifyEventSubscriptionRequest method. +// req, resp := client.ModifyEventSubscriptionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *RDS) ModifyEventSubscriptionRequest(input *ModifyEventSubscriptionInput) (req *request.Request, output *ModifyEventSubscriptionOutput) { + op := &request.Operation{ + Name: opModifyEventSubscription, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyEventSubscriptionInput{} + } + + req = c.newRequest(op, input, output) + output = &ModifyEventSubscriptionOutput{} + req.Data = output + return +} + +// Modifies an existing RDS event notification subscription. Note that you cannot +// modify the source identifiers using this call; to change source identifiers +// for a subscription, use the AddSourceIdentifierToSubscription and RemoveSourceIdentifierFromSubscription +// calls. +// +// You can see a list of the event categories for a given SourceType in the +// Events (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Events.html) +// topic in the Amazon RDS User Guide or by using the DescribeEventCategories +// action. +func (c *RDS) ModifyEventSubscription(input *ModifyEventSubscriptionInput) (*ModifyEventSubscriptionOutput, error) { + req, out := c.ModifyEventSubscriptionRequest(input) + err := req.Send() + return out, err +} + +const opModifyOptionGroup = "ModifyOptionGroup" + +// ModifyOptionGroupRequest generates a "aws/request.Request" representing the +// client's request for the ModifyOptionGroup operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ModifyOptionGroup method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ModifyOptionGroupRequest method. +// req, resp := client.ModifyOptionGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *RDS) ModifyOptionGroupRequest(input *ModifyOptionGroupInput) (req *request.Request, output *ModifyOptionGroupOutput) { + op := &request.Operation{ + Name: opModifyOptionGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyOptionGroupInput{} + } + + req = c.newRequest(op, input, output) + output = &ModifyOptionGroupOutput{} + req.Data = output + return +} + +// Modifies an existing option group. +func (c *RDS) ModifyOptionGroup(input *ModifyOptionGroupInput) (*ModifyOptionGroupOutput, error) { + req, out := c.ModifyOptionGroupRequest(input) + err := req.Send() + return out, err +} + +const opPromoteReadReplica = "PromoteReadReplica" + +// PromoteReadReplicaRequest generates a "aws/request.Request" representing the +// client's request for the PromoteReadReplica operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PromoteReadReplica method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PromoteReadReplicaRequest method. +// req, resp := client.PromoteReadReplicaRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *RDS) PromoteReadReplicaRequest(input *PromoteReadReplicaInput) (req *request.Request, output *PromoteReadReplicaOutput) { + op := &request.Operation{ + Name: opPromoteReadReplica, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PromoteReadReplicaInput{} + } + + req = c.newRequest(op, input, output) + output = &PromoteReadReplicaOutput{} + req.Data = output + return +} + +// Promotes a Read Replica DB instance to a standalone DB instance. +// +// We recommend that you enable automated backups on your Read Replica before +// promoting the Read Replica. This ensures that no backup is taken during the +// promotion process. Once the instance is promoted to a primary instance, backups +// are taken based on your backup settings. +func (c *RDS) PromoteReadReplica(input *PromoteReadReplicaInput) (*PromoteReadReplicaOutput, error) { + req, out := c.PromoteReadReplicaRequest(input) + err := req.Send() + return out, err +} + +const opPromoteReadReplicaDBCluster = "PromoteReadReplicaDBCluster" + +// PromoteReadReplicaDBClusterRequest generates a "aws/request.Request" representing the +// client's request for the PromoteReadReplicaDBCluster operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PromoteReadReplicaDBCluster method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PromoteReadReplicaDBClusterRequest method. +// req, resp := client.PromoteReadReplicaDBClusterRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *RDS) PromoteReadReplicaDBClusterRequest(input *PromoteReadReplicaDBClusterInput) (req *request.Request, output *PromoteReadReplicaDBClusterOutput) { + op := &request.Operation{ + Name: opPromoteReadReplicaDBCluster, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PromoteReadReplicaDBClusterInput{} + } + + req = c.newRequest(op, input, output) + output = &PromoteReadReplicaDBClusterOutput{} + req.Data = output + return +} + +// Promotes a Read Replica DB cluster to a standalone DB cluster. +func (c *RDS) PromoteReadReplicaDBCluster(input *PromoteReadReplicaDBClusterInput) (*PromoteReadReplicaDBClusterOutput, error) { + req, out := c.PromoteReadReplicaDBClusterRequest(input) + err := req.Send() + return out, err +} + +const opPurchaseReservedDBInstancesOffering = "PurchaseReservedDBInstancesOffering" + +// PurchaseReservedDBInstancesOfferingRequest generates a "aws/request.Request" representing the +// client's request for the PurchaseReservedDBInstancesOffering operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PurchaseReservedDBInstancesOffering method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PurchaseReservedDBInstancesOfferingRequest method. +// req, resp := client.PurchaseReservedDBInstancesOfferingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *RDS) PurchaseReservedDBInstancesOfferingRequest(input *PurchaseReservedDBInstancesOfferingInput) (req *request.Request, output *PurchaseReservedDBInstancesOfferingOutput) { + op := &request.Operation{ + Name: opPurchaseReservedDBInstancesOffering, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PurchaseReservedDBInstancesOfferingInput{} + } + + req = c.newRequest(op, input, output) + output = &PurchaseReservedDBInstancesOfferingOutput{} + req.Data = output + return +} + +// Purchases a reserved DB instance offering. +func (c *RDS) PurchaseReservedDBInstancesOffering(input *PurchaseReservedDBInstancesOfferingInput) (*PurchaseReservedDBInstancesOfferingOutput, error) { + req, out := c.PurchaseReservedDBInstancesOfferingRequest(input) + err := req.Send() + return out, err +} + +const opRebootDBInstance = "RebootDBInstance" + +// RebootDBInstanceRequest generates a "aws/request.Request" representing the +// client's request for the RebootDBInstance operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the RebootDBInstance method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the RebootDBInstanceRequest method. +// req, resp := client.RebootDBInstanceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *RDS) RebootDBInstanceRequest(input *RebootDBInstanceInput) (req *request.Request, output *RebootDBInstanceOutput) { + op := &request.Operation{ + Name: opRebootDBInstance, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RebootDBInstanceInput{} + } + + req = c.newRequest(op, input, output) + output = &RebootDBInstanceOutput{} + req.Data = output + return +} + +// Rebooting a DB instance restarts the database engine service. A reboot also +// applies to the DB instance any modifications to the associated DB parameter +// group that were pending. Rebooting a DB instance results in a momentary outage +// of the instance, during which the DB instance status is set to rebooting. +// If the RDS instance is configured for MultiAZ, it is possible that the reboot +// will be conducted through a failover. An Amazon RDS event is created when +// the reboot is completed. +// +// If your DB instance is deployed in multiple Availability Zones, you can +// force a failover from one AZ to the other during the reboot. You might force +// a failover to test the availability of your DB instance deployment or to +// restore operations to the original AZ after a failover occurs. +// +// The time required to reboot is a function of the specific database engine's +// crash recovery process. To improve the reboot time, we recommend that you +// reduce database activities as much as possible during the reboot process +// to reduce rollback activity for in-transit transactions. +func (c *RDS) RebootDBInstance(input *RebootDBInstanceInput) (*RebootDBInstanceOutput, error) { + req, out := c.RebootDBInstanceRequest(input) + err := req.Send() + return out, err +} + +const opRemoveSourceIdentifierFromSubscription = "RemoveSourceIdentifierFromSubscription" + +// RemoveSourceIdentifierFromSubscriptionRequest generates a "aws/request.Request" representing the +// client's request for the RemoveSourceIdentifierFromSubscription operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the RemoveSourceIdentifierFromSubscription method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the RemoveSourceIdentifierFromSubscriptionRequest method. +// req, resp := client.RemoveSourceIdentifierFromSubscriptionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *RDS) RemoveSourceIdentifierFromSubscriptionRequest(input *RemoveSourceIdentifierFromSubscriptionInput) (req *request.Request, output *RemoveSourceIdentifierFromSubscriptionOutput) { + op := &request.Operation{ + Name: opRemoveSourceIdentifierFromSubscription, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RemoveSourceIdentifierFromSubscriptionInput{} + } + + req = c.newRequest(op, input, output) + output = &RemoveSourceIdentifierFromSubscriptionOutput{} + req.Data = output + return +} + +// Removes a source identifier from an existing RDS event notification subscription. +func (c *RDS) RemoveSourceIdentifierFromSubscription(input *RemoveSourceIdentifierFromSubscriptionInput) (*RemoveSourceIdentifierFromSubscriptionOutput, error) { + req, out := c.RemoveSourceIdentifierFromSubscriptionRequest(input) + err := req.Send() + return out, err +} + +const opRemoveTagsFromResource = "RemoveTagsFromResource" + +// RemoveTagsFromResourceRequest generates a "aws/request.Request" representing the +// client's request for the RemoveTagsFromResource operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the RemoveTagsFromResource method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the RemoveTagsFromResourceRequest method. +// req, resp := client.RemoveTagsFromResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *RDS) RemoveTagsFromResourceRequest(input *RemoveTagsFromResourceInput) (req *request.Request, output *RemoveTagsFromResourceOutput) { + op := &request.Operation{ + Name: opRemoveTagsFromResource, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RemoveTagsFromResourceInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &RemoveTagsFromResourceOutput{} + req.Data = output + return +} + +// Removes metadata tags from an Amazon RDS resource. +// +// For an overview on tagging an Amazon RDS resource, see Tagging Amazon RDS +// Resources (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Overview.Tagging.html). +func (c *RDS) RemoveTagsFromResource(input *RemoveTagsFromResourceInput) (*RemoveTagsFromResourceOutput, error) { + req, out := c.RemoveTagsFromResourceRequest(input) + err := req.Send() + return out, err +} + +const opResetDBClusterParameterGroup = "ResetDBClusterParameterGroup" + +// ResetDBClusterParameterGroupRequest generates a "aws/request.Request" representing the +// client's request for the ResetDBClusterParameterGroup operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ResetDBClusterParameterGroup method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ResetDBClusterParameterGroupRequest method. +// req, resp := client.ResetDBClusterParameterGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *RDS) ResetDBClusterParameterGroupRequest(input *ResetDBClusterParameterGroupInput) (req *request.Request, output *DBClusterParameterGroupNameMessage) { + op := &request.Operation{ + Name: opResetDBClusterParameterGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ResetDBClusterParameterGroupInput{} + } + + req = c.newRequest(op, input, output) + output = &DBClusterParameterGroupNameMessage{} + req.Data = output + return +} + +// Modifies the parameters of a DB cluster parameter group to the default value. +// To reset specific parameters submit a list of the following: ParameterName +// and ApplyMethod. To reset the entire DB cluster parameter group, specify +// the DBClusterParameterGroupName and ResetAllParameters parameters. +// +// When resetting the entire group, dynamic parameters are updated immediately +// and static parameters are set to pending-reboot to take effect on the next +// DB instance restart or RebootDBInstance request. You must call RebootDBInstance +// for every DB instance in your DB cluster that you want the updated static +// parameter to apply to. +// +// For more information on Amazon Aurora, see Aurora on Amazon RDS (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_Aurora.html) +// in the Amazon RDS User Guide. +func (c *RDS) ResetDBClusterParameterGroup(input *ResetDBClusterParameterGroupInput) (*DBClusterParameterGroupNameMessage, error) { + req, out := c.ResetDBClusterParameterGroupRequest(input) + err := req.Send() + return out, err +} + +const opResetDBParameterGroup = "ResetDBParameterGroup" + +// ResetDBParameterGroupRequest generates a "aws/request.Request" representing the +// client's request for the ResetDBParameterGroup operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ResetDBParameterGroup method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ResetDBParameterGroupRequest method. +// req, resp := client.ResetDBParameterGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *RDS) ResetDBParameterGroupRequest(input *ResetDBParameterGroupInput) (req *request.Request, output *DBParameterGroupNameMessage) { + op := &request.Operation{ + Name: opResetDBParameterGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ResetDBParameterGroupInput{} + } + + req = c.newRequest(op, input, output) + output = &DBParameterGroupNameMessage{} + req.Data = output + return +} + +// Modifies the parameters of a DB parameter group to the engine/system default +// value. To reset specific parameters submit a list of the following: ParameterName +// and ApplyMethod. To reset the entire DB parameter group, specify the DBParameterGroup +// name and ResetAllParameters parameters. When resetting the entire group, +// dynamic parameters are updated immediately and static parameters are set +// to pending-reboot to take effect on the next DB instance restart or RebootDBInstance +// request. +func (c *RDS) ResetDBParameterGroup(input *ResetDBParameterGroupInput) (*DBParameterGroupNameMessage, error) { + req, out := c.ResetDBParameterGroupRequest(input) + err := req.Send() + return out, err +} + +const opRestoreDBClusterFromSnapshot = "RestoreDBClusterFromSnapshot" + +// RestoreDBClusterFromSnapshotRequest generates a "aws/request.Request" representing the +// client's request for the RestoreDBClusterFromSnapshot operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the RestoreDBClusterFromSnapshot method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the RestoreDBClusterFromSnapshotRequest method. +// req, resp := client.RestoreDBClusterFromSnapshotRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *RDS) RestoreDBClusterFromSnapshotRequest(input *RestoreDBClusterFromSnapshotInput) (req *request.Request, output *RestoreDBClusterFromSnapshotOutput) { + op := &request.Operation{ + Name: opRestoreDBClusterFromSnapshot, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RestoreDBClusterFromSnapshotInput{} + } + + req = c.newRequest(op, input, output) + output = &RestoreDBClusterFromSnapshotOutput{} + req.Data = output + return +} + +// Creates a new DB cluster from a DB cluster snapshot. The target DB cluster +// is created from the source DB cluster restore point with the same configuration +// as the original source DB cluster, except that the new DB cluster is created +// with the default security group. +// +// For more information on Amazon Aurora, see Aurora on Amazon RDS (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_Aurora.html) +// in the Amazon RDS User Guide. +func (c *RDS) RestoreDBClusterFromSnapshot(input *RestoreDBClusterFromSnapshotInput) (*RestoreDBClusterFromSnapshotOutput, error) { + req, out := c.RestoreDBClusterFromSnapshotRequest(input) + err := req.Send() + return out, err +} + +const opRestoreDBClusterToPointInTime = "RestoreDBClusterToPointInTime" + +// RestoreDBClusterToPointInTimeRequest generates a "aws/request.Request" representing the +// client's request for the RestoreDBClusterToPointInTime operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the RestoreDBClusterToPointInTime method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the RestoreDBClusterToPointInTimeRequest method. +// req, resp := client.RestoreDBClusterToPointInTimeRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *RDS) RestoreDBClusterToPointInTimeRequest(input *RestoreDBClusterToPointInTimeInput) (req *request.Request, output *RestoreDBClusterToPointInTimeOutput) { + op := &request.Operation{ + Name: opRestoreDBClusterToPointInTime, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RestoreDBClusterToPointInTimeInput{} + } + + req = c.newRequest(op, input, output) + output = &RestoreDBClusterToPointInTimeOutput{} + req.Data = output + return +} + +// Restores a DB cluster to an arbitrary point in time. Users can restore to +// any point in time before LatestRestorableTime for up to BackupRetentionPeriod +// days. The target DB cluster is created from the source DB cluster with the +// same configuration as the original DB cluster, except that the new DB cluster +// is created with the default DB security group. +// +// For more information on Amazon Aurora, see Aurora on Amazon RDS (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_Aurora.html) +// in the Amazon RDS User Guide. +func (c *RDS) RestoreDBClusterToPointInTime(input *RestoreDBClusterToPointInTimeInput) (*RestoreDBClusterToPointInTimeOutput, error) { + req, out := c.RestoreDBClusterToPointInTimeRequest(input) + err := req.Send() + return out, err +} + +const opRestoreDBInstanceFromDBSnapshot = "RestoreDBInstanceFromDBSnapshot" + +// RestoreDBInstanceFromDBSnapshotRequest generates a "aws/request.Request" representing the +// client's request for the RestoreDBInstanceFromDBSnapshot operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the RestoreDBInstanceFromDBSnapshot method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the RestoreDBInstanceFromDBSnapshotRequest method. +// req, resp := client.RestoreDBInstanceFromDBSnapshotRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *RDS) RestoreDBInstanceFromDBSnapshotRequest(input *RestoreDBInstanceFromDBSnapshotInput) (req *request.Request, output *RestoreDBInstanceFromDBSnapshotOutput) { + op := &request.Operation{ + Name: opRestoreDBInstanceFromDBSnapshot, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RestoreDBInstanceFromDBSnapshotInput{} + } + + req = c.newRequest(op, input, output) + output = &RestoreDBInstanceFromDBSnapshotOutput{} + req.Data = output + return +} + +// Creates a new DB instance from a DB snapshot. The target database is created +// from the source database restore point with the most of original configuration +// with the default security group and the default DB parameter group. By default, +// the new DB instance is created as a single-AZ deployment except when the +// instance is a SQL Server instance that has an option group that is associated +// with mirroring; in this case, the instance becomes a mirrored AZ deployment +// and not a single-AZ deployment. +// +// If your intent is to replace your original DB instance with the new, restored +// DB instance, then rename your original DB instance before you call the RestoreDBInstanceFromDBSnapshot +// action. RDS does not allow two DB instances with the same name. Once you +// have renamed your original DB instance with a different identifier, then +// you can pass the original name of the DB instance as the DBInstanceIdentifier +// in the call to the RestoreDBInstanceFromDBSnapshot action. The result is +// that you will replace the original DB instance with the DB instance created +// from the snapshot. +// +// If you are restoring from a shared manual DB snapshot, the DBSnapshotIdentifier +// must be the ARN of the shared DB snapshot. +func (c *RDS) RestoreDBInstanceFromDBSnapshot(input *RestoreDBInstanceFromDBSnapshotInput) (*RestoreDBInstanceFromDBSnapshotOutput, error) { + req, out := c.RestoreDBInstanceFromDBSnapshotRequest(input) + err := req.Send() + return out, err +} + +const opRestoreDBInstanceToPointInTime = "RestoreDBInstanceToPointInTime" + +// RestoreDBInstanceToPointInTimeRequest generates a "aws/request.Request" representing the +// client's request for the RestoreDBInstanceToPointInTime operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the RestoreDBInstanceToPointInTime method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the RestoreDBInstanceToPointInTimeRequest method. +// req, resp := client.RestoreDBInstanceToPointInTimeRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *RDS) RestoreDBInstanceToPointInTimeRequest(input *RestoreDBInstanceToPointInTimeInput) (req *request.Request, output *RestoreDBInstanceToPointInTimeOutput) { + op := &request.Operation{ + Name: opRestoreDBInstanceToPointInTime, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RestoreDBInstanceToPointInTimeInput{} + } + + req = c.newRequest(op, input, output) + output = &RestoreDBInstanceToPointInTimeOutput{} + req.Data = output + return +} + +// Restores a DB instance to an arbitrary point in time. You can restore to +// any point in time before the time identified by the LatestRestorableTime +// property. You can restore to a point up to the number of days specified by +// the BackupRetentionPeriod property. +// +// The target database is created with most of the original configuration, +// but in a system-selected availability zone, with the default security group, +// the default subnet group, and the default DB parameter group. By default, +// the new DB instance is created as a single-AZ deployment except when the +// instance is a SQL Server instance that has an option group that is associated +// with mirroring; in this case, the instance becomes a mirrored deployment +// and not a single-AZ deployment. +func (c *RDS) RestoreDBInstanceToPointInTime(input *RestoreDBInstanceToPointInTimeInput) (*RestoreDBInstanceToPointInTimeOutput, error) { + req, out := c.RestoreDBInstanceToPointInTimeRequest(input) + err := req.Send() + return out, err +} + +const opRevokeDBSecurityGroupIngress = "RevokeDBSecurityGroupIngress" + +// RevokeDBSecurityGroupIngressRequest generates a "aws/request.Request" representing the +// client's request for the RevokeDBSecurityGroupIngress operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the RevokeDBSecurityGroupIngress method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the RevokeDBSecurityGroupIngressRequest method. +// req, resp := client.RevokeDBSecurityGroupIngressRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *RDS) RevokeDBSecurityGroupIngressRequest(input *RevokeDBSecurityGroupIngressInput) (req *request.Request, output *RevokeDBSecurityGroupIngressOutput) { + op := &request.Operation{ + Name: opRevokeDBSecurityGroupIngress, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RevokeDBSecurityGroupIngressInput{} + } + + req = c.newRequest(op, input, output) + output = &RevokeDBSecurityGroupIngressOutput{} + req.Data = output + return +} + +// Revokes ingress from a DBSecurityGroup for previously authorized IP ranges +// or EC2 or VPC Security Groups. Required parameters for this API are one of +// CIDRIP, EC2SecurityGroupId for VPC, or (EC2SecurityGroupOwnerId and either +// EC2SecurityGroupName or EC2SecurityGroupId). +func (c *RDS) RevokeDBSecurityGroupIngress(input *RevokeDBSecurityGroupIngressInput) (*RevokeDBSecurityGroupIngressOutput, error) { + req, out := c.RevokeDBSecurityGroupIngressRequest(input) + err := req.Send() + return out, err +} + +// Describes a quota for an AWS account, for example, the number of DB instances +// allowed. +type AccountQuota struct { + _ struct{} `type:"structure"` + + // The name of the Amazon RDS quota for this AWS account. + AccountQuotaName *string `type:"string"` + + // The maximum allowed value for the quota. + Max *int64 `type:"long"` + + // The amount currently used toward the quota maximum. + Used *int64 `type:"long"` +} + +// String returns the string representation +func (s AccountQuota) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AccountQuota) GoString() string { + return s.String() +} + +type AddSourceIdentifierToSubscriptionInput struct { + _ struct{} `type:"structure"` + + // The identifier of the event source to be added. An identifier must begin + // with a letter and must contain only ASCII letters, digits, and hyphens; it + // cannot end with a hyphen or contain two consecutive hyphens. + // + // Constraints: + // + // If the source type is a DB instance, then a DBInstanceIdentifier must + // be supplied. + // + // If the source type is a DB security group, a DBSecurityGroupName must + // be supplied. + // + // If the source type is a DB parameter group, a DBParameterGroupName must + // be supplied. + // + // If the source type is a DB snapshot, a DBSnapshotIdentifier must be supplied. + SourceIdentifier *string `type:"string" required:"true"` + + // The name of the RDS event notification subscription you want to add a source + // identifier to. + SubscriptionName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s AddSourceIdentifierToSubscriptionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddSourceIdentifierToSubscriptionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AddSourceIdentifierToSubscriptionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AddSourceIdentifierToSubscriptionInput"} + if s.SourceIdentifier == nil { + invalidParams.Add(request.NewErrParamRequired("SourceIdentifier")) + } + if s.SubscriptionName == nil { + invalidParams.Add(request.NewErrParamRequired("SubscriptionName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type AddSourceIdentifierToSubscriptionOutput struct { + _ struct{} `type:"structure"` + + // Contains the results of a successful invocation of the DescribeEventSubscriptions + // action. + EventSubscription *EventSubscription `type:"structure"` +} + +// String returns the string representation +func (s AddSourceIdentifierToSubscriptionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddSourceIdentifierToSubscriptionOutput) GoString() string { + return s.String() +} + +type AddTagsToResourceInput struct { + _ struct{} `type:"structure"` + + // The Amazon RDS resource the tags will be added to. This value is an Amazon + // Resource Name (ARN). For information about creating an ARN, see Constructing + // an RDS Amazon Resource Name (ARN) (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Tagging.html#USER_Tagging.ARN). + ResourceName *string `type:"string" required:"true"` + + // The tags to be assigned to the Amazon RDS resource. + Tags []*Tag `locationNameList:"Tag" type:"list" required:"true"` +} + +// String returns the string representation +func (s AddTagsToResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddTagsToResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AddTagsToResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AddTagsToResourceInput"} + if s.ResourceName == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceName")) + } + if s.Tags == nil { + invalidParams.Add(request.NewErrParamRequired("Tags")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type AddTagsToResourceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s AddTagsToResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddTagsToResourceOutput) GoString() string { + return s.String() +} + +type ApplyPendingMaintenanceActionInput struct { + _ struct{} `type:"structure"` + + // The pending maintenance action to apply to this resource. + // + // Valid values: system-update, db-upgrade + ApplyAction *string `type:"string" required:"true"` + + // A value that specifies the type of opt-in request, or undoes an opt-in request. + // An opt-in request of type immediate cannot be undone. + // + // Valid values: + // + // immediate - Apply the maintenance action immediately. + // + // next-maintenance - Apply the maintenance action during the next maintenance + // window for the resource. + // + // undo-opt-in - Cancel any existing next-maintenance opt-in requests. + OptInType *string `type:"string" required:"true"` + + // The RDS Amazon Resource Name (ARN) of the resource that the pending maintenance + // action applies to. For information about creating an ARN, see Constructing + // an RDS Amazon Resource Name (ARN) (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Tagging.html#USER_Tagging.ARN). + ResourceIdentifier *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ApplyPendingMaintenanceActionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ApplyPendingMaintenanceActionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ApplyPendingMaintenanceActionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ApplyPendingMaintenanceActionInput"} + if s.ApplyAction == nil { + invalidParams.Add(request.NewErrParamRequired("ApplyAction")) + } + if s.OptInType == nil { + invalidParams.Add(request.NewErrParamRequired("OptInType")) + } + if s.ResourceIdentifier == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceIdentifier")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type ApplyPendingMaintenanceActionOutput struct { + _ struct{} `type:"structure"` + + // Describes the pending maintenance actions for a resource. + ResourcePendingMaintenanceActions *ResourcePendingMaintenanceActions `type:"structure"` +} + +// String returns the string representation +func (s ApplyPendingMaintenanceActionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ApplyPendingMaintenanceActionOutput) GoString() string { + return s.String() +} + +type AuthorizeDBSecurityGroupIngressInput struct { + _ struct{} `type:"structure"` + + // The IP range to authorize. + CIDRIP *string `type:"string"` + + // The name of the DB security group to add authorization to. + DBSecurityGroupName *string `type:"string" required:"true"` + + // Id of the EC2 security group to authorize. For VPC DB security groups, EC2SecurityGroupId + // must be provided. Otherwise, EC2SecurityGroupOwnerId and either EC2SecurityGroupName + // or EC2SecurityGroupId must be provided. + EC2SecurityGroupId *string `type:"string"` + + // Name of the EC2 security group to authorize. For VPC DB security groups, + // EC2SecurityGroupId must be provided. Otherwise, EC2SecurityGroupOwnerId and + // either EC2SecurityGroupName or EC2SecurityGroupId must be provided. + EC2SecurityGroupName *string `type:"string"` + + // AWS account number of the owner of the EC2 security group specified in the + // EC2SecurityGroupName parameter. The AWS Access Key ID is not an acceptable + // value. For VPC DB security groups, EC2SecurityGroupId must be provided. Otherwise, + // EC2SecurityGroupOwnerId and either EC2SecurityGroupName or EC2SecurityGroupId + // must be provided. + EC2SecurityGroupOwnerId *string `type:"string"` +} + +// String returns the string representation +func (s AuthorizeDBSecurityGroupIngressInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AuthorizeDBSecurityGroupIngressInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AuthorizeDBSecurityGroupIngressInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AuthorizeDBSecurityGroupIngressInput"} + if s.DBSecurityGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("DBSecurityGroupName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type AuthorizeDBSecurityGroupIngressOutput struct { + _ struct{} `type:"structure"` + + // Contains the result of a successful invocation of the following actions: + // + // DescribeDBSecurityGroups + // + // AuthorizeDBSecurityGroupIngress + // + // CreateDBSecurityGroup + // + // RevokeDBSecurityGroupIngress + // + // This data type is used as a response element in the DescribeDBSecurityGroups + // action. + DBSecurityGroup *DBSecurityGroup `type:"structure"` +} + +// String returns the string representation +func (s AuthorizeDBSecurityGroupIngressOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AuthorizeDBSecurityGroupIngressOutput) GoString() string { + return s.String() +} + +// Contains Availability Zone information. +// +// This data type is used as an element in the following data type: +// +// OrderableDBInstanceOption +type AvailabilityZone struct { + _ struct{} `type:"structure"` + + // The name of the availability zone. + Name *string `type:"string"` +} + +// String returns the string representation +func (s AvailabilityZone) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AvailabilityZone) GoString() string { + return s.String() +} + +// A CA certificate for an AWS account. +type Certificate struct { + _ struct{} `type:"structure"` + + // The unique key that identifies a certificate. + CertificateIdentifier *string `type:"string"` + + // The type of the certificate. + CertificateType *string `type:"string"` + + // The thumbprint of the certificate. + Thumbprint *string `type:"string"` + + // The starting date from which the certificate is valid. + ValidFrom *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The final date that the certificate continues to be valid. + ValidTill *time.Time `type:"timestamp" timestampFormat:"iso8601"` +} + +// String returns the string representation +func (s Certificate) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Certificate) GoString() string { + return s.String() +} + +// This data type is used as a response element in the action DescribeDBEngineVersions. +type CharacterSet struct { + _ struct{} `type:"structure"` + + // The description of the character set. + CharacterSetDescription *string `type:"string"` + + // The name of the character set. + CharacterSetName *string `type:"string"` +} + +// String returns the string representation +func (s CharacterSet) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CharacterSet) GoString() string { + return s.String() +} + +type CopyDBClusterSnapshotInput struct { + _ struct{} `type:"structure"` + + // The identifier of the DB cluster snapshot to copy. This parameter is not + // case-sensitive. + // + // Constraints: + // + // Must contain from 1 to 63 alphanumeric characters or hyphens. + // + // First character must be a letter. + // + // Cannot end with a hyphen or contain two consecutive hyphens. + // + // Example: my-cluster-snapshot1 + SourceDBClusterSnapshotIdentifier *string `type:"string" required:"true"` + + // A list of tags. + Tags []*Tag `locationNameList:"Tag" type:"list"` + + // The identifier of the new DB cluster snapshot to create from the source DB + // cluster snapshot. This parameter is not case-sensitive. + // + // Constraints: + // + // Must contain from 1 to 63 alphanumeric characters or hyphens. + // + // First character must be a letter. + // + // Cannot end with a hyphen or contain two consecutive hyphens. + // + // Example: my-cluster-snapshot2 + TargetDBClusterSnapshotIdentifier *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CopyDBClusterSnapshotInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CopyDBClusterSnapshotInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CopyDBClusterSnapshotInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CopyDBClusterSnapshotInput"} + if s.SourceDBClusterSnapshotIdentifier == nil { + invalidParams.Add(request.NewErrParamRequired("SourceDBClusterSnapshotIdentifier")) + } + if s.TargetDBClusterSnapshotIdentifier == nil { + invalidParams.Add(request.NewErrParamRequired("TargetDBClusterSnapshotIdentifier")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type CopyDBClusterSnapshotOutput struct { + _ struct{} `type:"structure"` + + // Contains the result of a successful invocation of the following actions: + // + // CreateDBClusterSnapshot + // + // DeleteDBClusterSnapshot + // + // This data type is used as a response element in the DescribeDBClusterSnapshots + // action. + DBClusterSnapshot *DBClusterSnapshot `type:"structure"` +} + +// String returns the string representation +func (s CopyDBClusterSnapshotOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CopyDBClusterSnapshotOutput) GoString() string { + return s.String() +} + +type CopyDBParameterGroupInput struct { + _ struct{} `type:"structure"` + + // The identifier or ARN for the source DB parameter group. For information + // about creating an ARN, see Constructing an RDS Amazon Resource Name (ARN) + // (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Tagging.html#USER_Tagging.ARN). + // + // Constraints: + // + // Must specify a valid DB parameter group. + // + // If the source DB parameter group is in the same region as the copy, specify + // a valid DB parameter group identifier, for example my-db-param-group, or + // a valid ARN. + // + // If the source DB parameter group is in a different region than the copy, + // specify a valid DB parameter group ARN, for example arn:aws:rds:us-west-2:123456789012:pg:special-parameters. + SourceDBParameterGroupIdentifier *string `type:"string" required:"true"` + + // A list of tags. + Tags []*Tag `locationNameList:"Tag" type:"list"` + + // A description for the copied DB parameter group. + TargetDBParameterGroupDescription *string `type:"string" required:"true"` + + // The identifier for the copied DB parameter group. + // + // Constraints: + // + // Cannot be null, empty, or blank + // + // Must contain from 1 to 255 alphanumeric characters or hyphens + // + // First character must be a letter + // + // Cannot end with a hyphen or contain two consecutive hyphens + // + // Example: my-db-parameter-group + TargetDBParameterGroupIdentifier *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CopyDBParameterGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CopyDBParameterGroupInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CopyDBParameterGroupInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CopyDBParameterGroupInput"} + if s.SourceDBParameterGroupIdentifier == nil { + invalidParams.Add(request.NewErrParamRequired("SourceDBParameterGroupIdentifier")) + } + if s.TargetDBParameterGroupDescription == nil { + invalidParams.Add(request.NewErrParamRequired("TargetDBParameterGroupDescription")) + } + if s.TargetDBParameterGroupIdentifier == nil { + invalidParams.Add(request.NewErrParamRequired("TargetDBParameterGroupIdentifier")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type CopyDBParameterGroupOutput struct { + _ struct{} `type:"structure"` + + // Contains the result of a successful invocation of the CreateDBParameterGroup + // action. + // + // This data type is used as a request parameter in the DeleteDBParameterGroup + // action, and as a response element in the DescribeDBParameterGroups action. + DBParameterGroup *DBParameterGroup `type:"structure"` +} + +// String returns the string representation +func (s CopyDBParameterGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CopyDBParameterGroupOutput) GoString() string { + return s.String() +} + +type CopyDBSnapshotInput struct { + _ struct{} `type:"structure"` + + // True to copy all tags from the source DB snapshot to the target DB snapshot; + // otherwise false. The default is false. + CopyTags *bool `type:"boolean"` + + // The AWS Key Management Service (AWS KMS) key identifier for an encrypted + // DB snapshot. The KMS key identifier is the Amazon Resource Name (ARN) or + // the KMS key alias for the KMS encryption key. + // + // If you copy an unencrypted DB snapshot and specify a value for the KmsKeyId + // parameter, Amazon RDS encrypts the target DB snapshot using the specified + // KMS encryption key. + // + // If you copy an encrypted DB snapshot from your AWS account, you can specify + // a value for KmsKeyId to encrypt the copy with a new KMS encryption key. If + // you don't specify a value for KmsKeyId then the copy of the DB snapshot is + // encrypted with the same KMS key as the source DB snapshot. + // + // If you copy an encrypted DB snapshot that is shared from another AWS account, + // then you must specify a value for KmsKeyId. + KmsKeyId *string `type:"string"` + + // The identifier for the source DB snapshot. + // + // If you are copying from a shared manual DB snapshot, this must be the ARN + // of the shared DB snapshot. + // + // Constraints: + // + // Must specify a valid system snapshot in the "available" state. + // + // If the source snapshot is in the same region as the copy, specify a valid + // DB snapshot identifier. + // + // If the source snapshot is in a different region than the copy, specify + // a valid DB snapshot ARN. For more information, go to Copying a DB Snapshot + // (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_CopySnapshot.html). + // + // Example: rds:mydb-2012-04-02-00-01 + // + // Example: arn:aws:rds:rr-regn-1:123456789012:snapshot:mysql-instance1-snapshot-20130805 + SourceDBSnapshotIdentifier *string `type:"string" required:"true"` + + // A list of tags. + Tags []*Tag `locationNameList:"Tag" type:"list"` + + // The identifier for the copied snapshot. + // + // Constraints: + // + // Cannot be null, empty, or blank + // + // Must contain from 1 to 255 alphanumeric characters or hyphens + // + // First character must be a letter + // + // Cannot end with a hyphen or contain two consecutive hyphens + // + // Example: my-db-snapshot + TargetDBSnapshotIdentifier *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CopyDBSnapshotInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CopyDBSnapshotInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CopyDBSnapshotInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CopyDBSnapshotInput"} + if s.SourceDBSnapshotIdentifier == nil { + invalidParams.Add(request.NewErrParamRequired("SourceDBSnapshotIdentifier")) + } + if s.TargetDBSnapshotIdentifier == nil { + invalidParams.Add(request.NewErrParamRequired("TargetDBSnapshotIdentifier")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type CopyDBSnapshotOutput struct { + _ struct{} `type:"structure"` + + // Contains the result of a successful invocation of the following actions: + // + // CreateDBSnapshot + // + // DeleteDBSnapshot + // + // This data type is used as a response element in the DescribeDBSnapshots + // action. + DBSnapshot *DBSnapshot `type:"structure"` +} + +// String returns the string representation +func (s CopyDBSnapshotOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CopyDBSnapshotOutput) GoString() string { + return s.String() +} + +type CopyOptionGroupInput struct { + _ struct{} `type:"structure"` + + // The identifier or ARN for the source option group. For information about + // creating an ARN, see Constructing an RDS Amazon Resource Name (ARN) (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Tagging.html#USER_Tagging.ARN). + // + // Constraints: + // + // Must specify a valid option group. + // + // If the source option group is in the same region as the copy, specify + // a valid option group identifier, for example my-option-group, or a valid + // ARN. + // + // If the source option group is in a different region than the copy, specify + // a valid option group ARN, for example arn:aws:rds:us-west-2:123456789012:og:special-options. + SourceOptionGroupIdentifier *string `type:"string" required:"true"` + + // A list of tags. + Tags []*Tag `locationNameList:"Tag" type:"list"` + + // The description for the copied option group. + TargetOptionGroupDescription *string `type:"string" required:"true"` + + // The identifier for the copied option group. + // + // Constraints: + // + // Cannot be null, empty, or blank + // + // Must contain from 1 to 255 alphanumeric characters or hyphens + // + // First character must be a letter + // + // Cannot end with a hyphen or contain two consecutive hyphens + // + // Example: my-option-group + TargetOptionGroupIdentifier *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CopyOptionGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CopyOptionGroupInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CopyOptionGroupInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CopyOptionGroupInput"} + if s.SourceOptionGroupIdentifier == nil { + invalidParams.Add(request.NewErrParamRequired("SourceOptionGroupIdentifier")) + } + if s.TargetOptionGroupDescription == nil { + invalidParams.Add(request.NewErrParamRequired("TargetOptionGroupDescription")) + } + if s.TargetOptionGroupIdentifier == nil { + invalidParams.Add(request.NewErrParamRequired("TargetOptionGroupIdentifier")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type CopyOptionGroupOutput struct { + _ struct{} `type:"structure"` + + OptionGroup *OptionGroup `type:"structure"` +} + +// String returns the string representation +func (s CopyOptionGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CopyOptionGroupOutput) GoString() string { + return s.String() +} + +type CreateDBClusterInput struct { + _ struct{} `type:"structure"` + + // A list of EC2 Availability Zones that instances in the DB cluster can be + // created in. For information on regions and Availability Zones, see Regions + // and Availability Zones (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.RegionsAndAvailabilityZones.html). + AvailabilityZones []*string `locationNameList:"AvailabilityZone" type:"list"` + + // The number of days for which automated backups are retained. You must specify + // a minimum value of 1. + // + // Default: 1 + // + // Constraints: + // + // Must be a value from 1 to 35 + BackupRetentionPeriod *int64 `type:"integer"` + + // A value that indicates that the DB cluster should be associated with the + // specified CharacterSet. + CharacterSetName *string `type:"string"` + + // The DB cluster identifier. This parameter is stored as a lowercase string. + // + // Constraints: + // + // Must contain from 1 to 63 alphanumeric characters or hyphens. + // + // First character must be a letter. + // + // Cannot end with a hyphen or contain two consecutive hyphens. + // + // Example: my-cluster1 + DBClusterIdentifier *string `type:"string" required:"true"` + + // The name of the DB cluster parameter group to associate with this DB cluster. + // If this argument is omitted, default.aurora5.6 for the specified engine will + // be used. + // + // Constraints: + // + // Must be 1 to 255 alphanumeric characters + // + // First character must be a letter + // + // Cannot end with a hyphen or contain two consecutive hyphens + DBClusterParameterGroupName *string `type:"string"` + + // A DB subnet group to associate with this DB cluster. + // + // Constraints: Must contain no more than 255 alphanumeric characters, periods, + // underscores, spaces, or hyphens. Must not be default. + // + // Example: mySubnetgroup + DBSubnetGroupName *string `type:"string"` + + // The name for your database of up to 8 alpha-numeric characters. If you do + // not provide a name, Amazon RDS will not create a database in the DB cluster + // you are creating. + DatabaseName *string `type:"string"` + + // The name of the database engine to be used for this DB cluster. + // + // Valid Values: aurora + Engine *string `type:"string" required:"true"` + + // The version number of the database engine to use. + // + // Aurora + // + // Example: 5.6.10a + EngineVersion *string `type:"string"` + + // The KMS key identifier for an encrypted DB cluster. + // + // The KMS key identifier is the Amazon Resource Name (ARN) for the KMS encryption + // key. If you are creating a DB cluster with the same AWS account that owns + // the KMS encryption key used to encrypt the new DB cluster, then you can use + // the KMS key alias instead of the ARN for the KM encryption key. + // + // If the StorageEncrypted parameter is true, and you do not specify a value + // for the KmsKeyId parameter, then Amazon RDS will use your default encryption + // key. AWS KMS creates the default encryption key for your AWS account. Your + // AWS account has a different default encryption key for each AWS region. + KmsKeyId *string `type:"string"` + + // The password for the master database user. This password can contain any + // printable ASCII character except "/", """, or "@". + // + // Constraints: Must contain from 8 to 41 characters. + MasterUserPassword *string `type:"string" required:"true"` + + // The name of the master user for the client DB cluster. + // + // Constraints: + // + // Must be 1 to 16 alphanumeric characters. + // + // First character must be a letter. + // + // Cannot be a reserved word for the chosen database engine. + MasterUsername *string `type:"string" required:"true"` + + // A value that indicates that the DB cluster should be associated with the + // specified option group. + // + // Permanent options cannot be removed from an option group. The option group + // cannot be removed from a DB cluster once it is associated with a DB cluster. + OptionGroupName *string `type:"string"` + + // The port number on which the instances in the DB cluster accept connections. + // + // Default: 3306 + Port *int64 `type:"integer"` + + // The daily time range during which automated backups are created if automated + // backups are enabled using the BackupRetentionPeriod parameter. + // + // Default: A 30-minute window selected at random from an 8-hour block of time + // per region. To see the time blocks available, see Adjusting the Preferred + // Maintenance Window (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/AdjustingTheMaintenanceWindow.html) + // in the Amazon RDS User Guide. + // + // Constraints: + // + // Must be in the format hh24:mi-hh24:mi. + // + // Times should be in Universal Coordinated Time (UTC). + // + // Must not conflict with the preferred maintenance window. + // + // Must be at least 30 minutes. + PreferredBackupWindow *string `type:"string"` + + // The weekly time range during which system maintenance can occur, in Universal + // Coordinated Time (UTC). + // + // Format: ddd:hh24:mi-ddd:hh24:mi + // + // Default: A 30-minute window selected at random from an 8-hour block of time + // per region, occurring on a random day of the week. To see the time blocks + // available, see Adjusting the Preferred Maintenance Window (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/AdjustingTheMaintenanceWindow.html) + // in the Amazon RDS User Guide. + // + // Valid Days: Mon, Tue, Wed, Thu, Fri, Sat, Sun + // + // Constraints: Minimum 30-minute window. + PreferredMaintenanceWindow *string `type:"string"` + + // The Amazon Resource Name (ARN) of the source DB cluster if this DB cluster + // is created as a Read Replica. + ReplicationSourceIdentifier *string `type:"string"` + + // Specifies whether the DB cluster is encrypted. + StorageEncrypted *bool `type:"boolean"` + + // A list of tags. + Tags []*Tag `locationNameList:"Tag" type:"list"` + + // A list of EC2 VPC security groups to associate with this DB cluster. + VpcSecurityGroupIds []*string `locationNameList:"VpcSecurityGroupId" type:"list"` +} + +// String returns the string representation +func (s CreateDBClusterInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDBClusterInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateDBClusterInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateDBClusterInput"} + if s.DBClusterIdentifier == nil { + invalidParams.Add(request.NewErrParamRequired("DBClusterIdentifier")) + } + if s.Engine == nil { + invalidParams.Add(request.NewErrParamRequired("Engine")) + } + if s.MasterUserPassword == nil { + invalidParams.Add(request.NewErrParamRequired("MasterUserPassword")) + } + if s.MasterUsername == nil { + invalidParams.Add(request.NewErrParamRequired("MasterUsername")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type CreateDBClusterOutput struct { + _ struct{} `type:"structure"` + + // Contains the result of a successful invocation of the following actions: + // + // CreateDBCluster + // + // DeleteDBCluster + // + // FailoverDBCluster + // + // ModifyDBCluster + // + // RestoreDBClusterFromSnapshot + // + // RestoreDBClusterToPointInTime + // + // This data type is used as a response element in the DescribeDBClusters + // action. + DBCluster *DBCluster `type:"structure"` +} + +// String returns the string representation +func (s CreateDBClusterOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDBClusterOutput) GoString() string { + return s.String() +} + +type CreateDBClusterParameterGroupInput struct { + _ struct{} `type:"structure"` + + // The name of the DB cluster parameter group. + // + // Constraints: + // + // Must be 1 to 255 alphanumeric characters + // + // First character must be a letter + // + // Cannot end with a hyphen or contain two consecutive hyphens + // + // This value is stored as a lowercase string. + DBClusterParameterGroupName *string `type:"string" required:"true"` + + // The DB cluster parameter group family name. A DB cluster parameter group + // can be associated with one and only one DB cluster parameter group family, + // and can be applied only to a DB cluster running a database engine and engine + // version compatible with that DB cluster parameter group family. + DBParameterGroupFamily *string `type:"string" required:"true"` + + // The description for the DB cluster parameter group. + Description *string `type:"string" required:"true"` + + // A list of tags. + Tags []*Tag `locationNameList:"Tag" type:"list"` +} + +// String returns the string representation +func (s CreateDBClusterParameterGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDBClusterParameterGroupInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateDBClusterParameterGroupInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateDBClusterParameterGroupInput"} + if s.DBClusterParameterGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("DBClusterParameterGroupName")) + } + if s.DBParameterGroupFamily == nil { + invalidParams.Add(request.NewErrParamRequired("DBParameterGroupFamily")) + } + if s.Description == nil { + invalidParams.Add(request.NewErrParamRequired("Description")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type CreateDBClusterParameterGroupOutput struct { + _ struct{} `type:"structure"` + + // Contains the result of a successful invocation of the CreateDBClusterParameterGroup + // action. + // + // This data type is used as a request parameter in the DeleteDBClusterParameterGroup + // action, and as a response element in the DescribeDBClusterParameterGroups + // action. + DBClusterParameterGroup *DBClusterParameterGroup `type:"structure"` +} + +// String returns the string representation +func (s CreateDBClusterParameterGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDBClusterParameterGroupOutput) GoString() string { + return s.String() +} + +type CreateDBClusterSnapshotInput struct { + _ struct{} `type:"structure"` + + // The identifier of the DB cluster to create a snapshot for. This parameter + // is not case-sensitive. + // + // Constraints: + // + // Must contain from 1 to 63 alphanumeric characters or hyphens. + // + // First character must be a letter. + // + // Cannot end with a hyphen or contain two consecutive hyphens. + // + // Example: my-cluster1 + DBClusterIdentifier *string `type:"string" required:"true"` + + // The identifier of the DB cluster snapshot. This parameter is stored as a + // lowercase string. + // + // Constraints: + // + // Must contain from 1 to 63 alphanumeric characters or hyphens. + // + // First character must be a letter. + // + // Cannot end with a hyphen or contain two consecutive hyphens. + // + // Example: my-cluster1-snapshot1 + DBClusterSnapshotIdentifier *string `type:"string" required:"true"` + + // The tags to be assigned to the DB cluster snapshot. + Tags []*Tag `locationNameList:"Tag" type:"list"` +} + +// String returns the string representation +func (s CreateDBClusterSnapshotInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDBClusterSnapshotInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateDBClusterSnapshotInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateDBClusterSnapshotInput"} + if s.DBClusterIdentifier == nil { + invalidParams.Add(request.NewErrParamRequired("DBClusterIdentifier")) + } + if s.DBClusterSnapshotIdentifier == nil { + invalidParams.Add(request.NewErrParamRequired("DBClusterSnapshotIdentifier")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type CreateDBClusterSnapshotOutput struct { + _ struct{} `type:"structure"` + + // Contains the result of a successful invocation of the following actions: + // + // CreateDBClusterSnapshot + // + // DeleteDBClusterSnapshot + // + // This data type is used as a response element in the DescribeDBClusterSnapshots + // action. + DBClusterSnapshot *DBClusterSnapshot `type:"structure"` +} + +// String returns the string representation +func (s CreateDBClusterSnapshotOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDBClusterSnapshotOutput) GoString() string { + return s.String() +} + +type CreateDBInstanceInput struct { + _ struct{} `type:"structure"` + + // The amount of storage (in gigabytes) to be initially allocated for the database + // instance. + // + // Type: Integer + // + // MySQL + // + // Constraints: Must be an integer from 5 to 6144. + // + // MariaDB + // + // Constraints: Must be an integer from 5 to 6144. + // + // PostgreSQL + // + // Constraints: Must be an integer from 5 to 6144. + // + // Oracle + // + // Constraints: Must be an integer from 10 to 6144. + // + // SQL Server + // + // Constraints: Must be an integer from 200 to 4096 (Standard Edition and Enterprise + // Edition) or from 20 to 4096 (Express Edition and Web Edition) + AllocatedStorage *int64 `type:"integer"` + + // Indicates that minor engine upgrades will be applied automatically to the + // DB instance during the maintenance window. + // + // Default: true + AutoMinorVersionUpgrade *bool `type:"boolean"` + + // The EC2 Availability Zone that the database instance will be created in. + // For information on regions and Availability Zones, see Regions and Availability + // Zones (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.RegionsAndAvailabilityZones.html). + // + // Default: A random, system-chosen Availability Zone in the endpoint's region. + // + // Example: us-east-1d + // + // Constraint: The AvailabilityZone parameter cannot be specified if the MultiAZ + // parameter is set to true. The specified Availability Zone must be in the + // same region as the current endpoint. + AvailabilityZone *string `type:"string"` + + // The number of days for which automated backups are retained. Setting this + // parameter to a positive number enables backups. Setting this parameter to + // 0 disables automated backups. + // + // Default: 1 + // + // Constraints: + // + // Must be a value from 0 to 35 + // + // Cannot be set to 0 if the DB instance is a source to Read Replicas + BackupRetentionPeriod *int64 `type:"integer"` + + // For supported engines, indicates that the DB instance should be associated + // with the specified CharacterSet. + CharacterSetName *string `type:"string"` + + // True to copy all tags from the DB instance to snapshots of the DB instance; + // otherwise false. The default is false. + CopyTagsToSnapshot *bool `type:"boolean"` + + // The identifier of the DB cluster that the instance will belong to. + // + // For information on creating a DB cluster, see CreateDBCluster. + // + // Type: String + DBClusterIdentifier *string `type:"string"` + + // The compute and memory capacity of the DB instance. + // + // Valid Values: db.t1.micro | db.m1.small | db.m1.medium | db.m1.large | + // db.m1.xlarge | db.m2.xlarge |db.m2.2xlarge | db.m2.4xlarge | db.m3.medium + // | db.m3.large | db.m3.xlarge | db.m3.2xlarge | db.m4.large | db.m4.xlarge + // | db.m4.2xlarge | db.m4.4xlarge | db.m4.10xlarge | db.r3.large | db.r3.xlarge + // | db.r3.2xlarge | db.r3.4xlarge | db.r3.8xlarge | db.t2.micro | db.t2.small + // | db.t2.medium | db.t2.large + DBInstanceClass *string `type:"string" required:"true"` + + // The DB instance identifier. This parameter is stored as a lowercase string. + // + // Constraints: + // + // Must contain from 1 to 63 alphanumeric characters or hyphens (1 to 15 + // for SQL Server). + // + // First character must be a letter. + // + // Cannot end with a hyphen or contain two consecutive hyphens. + // + // Example: mydbinstance + DBInstanceIdentifier *string `type:"string" required:"true"` + + // The meaning of this parameter differs according to the database engine you + // use. + // + // Type: String + // + // MySQL + // + // The name of the database to create when the DB instance is created. If this + // parameter is not specified, no database is created in the DB instance. + // + // Constraints: + // + // Must contain 1 to 64 alphanumeric characters + // + // Cannot be a word reserved by the specified database engine + // + // MariaDB + // + // The name of the database to create when the DB instance is created. If this + // parameter is not specified, no database is created in the DB instance. + // + // Constraints: + // + // Must contain 1 to 64 alphanumeric characters + // + // Cannot be a word reserved by the specified database engine + // + // PostgreSQL + // + // The name of the database to create when the DB instance is created. If this + // parameter is not specified, the default "postgres" database is created in + // the DB instance. + // + // Constraints: + // + // Must contain 1 to 63 alphanumeric characters + // + // Must begin with a letter or an underscore. Subsequent characters can be + // letters, underscores, or digits (0-9). + // + // Cannot be a word reserved by the specified database engine + // + // Oracle + // + // The Oracle System ID (SID) of the created DB instance. + // + // Default: ORCL + // + // Constraints: + // + // Cannot be longer than 8 characters + // + // SQL Server + // + // Not applicable. Must be null. + // + // Amazon Aurora + // + // The name of the database to create when the primary instance of the DB cluster + // is created. If this parameter is not specified, no database is created in + // the DB instance. + // + // Constraints: + // + // Must contain 1 to 64 alphanumeric characters + // + // Cannot be a word reserved by the specified database engine + DBName *string `type:"string"` + + // The name of the DB parameter group to associate with this DB instance. If + // this argument is omitted, the default DBParameterGroup for the specified + // engine will be used. + // + // Constraints: + // + // Must be 1 to 255 alphanumeric characters + // + // First character must be a letter + // + // Cannot end with a hyphen or contain two consecutive hyphens + DBParameterGroupName *string `type:"string"` + + // A list of DB security groups to associate with this DB instance. + // + // Default: The default DB security group for the database engine. + DBSecurityGroups []*string `locationNameList:"DBSecurityGroupName" type:"list"` + + // A DB subnet group to associate with this DB instance. + // + // If there is no DB subnet group, then it is a non-VPC DB instance. + DBSubnetGroupName *string `type:"string"` + + // Specify the Active Directory Domain to create the instance in. + Domain *string `type:"string"` + + // Specify the name of the IAM role to be used when making API calls to the + // Directory Service. + DomainIAMRoleName *string `type:"string"` + + // The name of the database engine to be used for this instance. + // + // Valid Values: MySQL | mariadb | oracle-se1 | oracle-se | oracle-ee | sqlserver-ee + // | sqlserver-se | sqlserver-ex | sqlserver-web | postgres | aurora + // + // Not every database engine is available for every AWS region. + Engine *string `type:"string" required:"true"` + + // The version number of the database engine to use. + // + // The following are the database engines and major and minor versions that + // are available with Amazon RDS. Not every database engine is available for + // every AWS region. + // + // Amazon Aurora + // + // Version 5.6 (only available in AWS regions ap-northeast-1, ap-northeast-2, + // ap-southeast-2, eu-west-1, us-east-1, us-west-2): 5.6.10a + // + // MariaDB + // + // Version 10.0 (available in all AWS regions): 10.0.17 | 10.0.24 + // + // Microsoft SQL Server Enterprise Edition (sqlserver-ee) + // + // Version 11.00 (available in all AWS regions): 11.00.2100.60.v1 | 11.00.5058.0.v1 + // | 11.00.6020.0.v1 + // + // Version 10.50 (available in all AWS regions): 10.50.2789.0.v1 | 10.50.6000.34.v1 + // | 10.50.6529.0.v1 + // + // Microsoft SQL Server Express Edition (sqlserver-ex) + // + // Version 12.00 (available in all AWS regions): 12.00.4422.0.v1 + // + // Version 11.00 (available in all AWS regions): 11.00.2100.60.v1 | 11.00.5058.0.v1 + // | 11.00.6020.0.v1 + // + // Version 10.50 (available in all AWS regions): 10.50.2789.0.v1 | 10.50.6000.34.v1 + // | 10.50.6529.0.v1 + // + // Microsoft SQL Server Standard Edition (sqlserver-se) + // + // Version 12.00 (available in all AWS regions): 12.00.4422.0.v1 + // + // Version 11.00 (available in all AWS regions): 11.00.2100.60.v1 | 11.00.5058.0.v1 + // | 11.00.6020.0.v1 + // + // Version 10.50 (available in all AWS regions): 10.50.2789.0.v1 | 10.50.6000.34.v1 + // | 10.50.6529.0.v1 + // + // Microsoft SQL Server Web Edition (sqlserver-web) + // + // Version 12.00 (available in all AWS regions): 12.00.4422.0.v1 + // + // Version 11.00 (available in all AWS regions): 11.00.2100.60.v1 | 11.00.5058.0.v1 + // | 11.00.6020.0.v1 + // + // Version 10.50 (available in all AWS regions): 10.50.2789.0.v1 | 10.50.6000.34.v1 + // | 10.50.6529.0.v1 + // + // MySQL + // + // Version 5.7 (available in all AWS regions): 5.7.10 | 5.7.11 + // + // Version 5.6 (available in all AWS regions except ap-northeast-2): 5.6.19a + // | 5.6.19b | 5.6.21 | 5.6.21b | 5.6.22 + // + // Version 5.6 (available in all AWS regions): 5.6.23 | 5.6.27 | 5.6.29 + // + // Version 5.5 (available in all AWS regions except eu-central-1, ap-northeast-2): + // 5.5.40 | 5.5.40a + // + // Version 5.5 (available in all AWS regions except ap-northeast-2): 5.5.40b + // | 5.5.41 + // + // Version 5.5 (available in all AWS regions): 5.5.42 | 5.5.46 + // + // Version 5.1 (available in all AWS regions except eu-central-1, ap-northeast-2): + // 5.1.73a | 5.1.73b + // + // Oracle Database Enterprise Edition (oracle-ee) + // + // Version 12.1 (available in all AWS regions except ap-northeast-2): 12.1.0.1.v1 + // | 12.1.0.1.v2 + // + // Version 12.1 (available in all AWS regions except ap-northeast-2, us-gov-west-1): + // 12.1.0.1.v3 | 12.1.0.1.v4 + // + // Version 12.1 (available in all AWS regions): 12.1.0.2.v1 + // + // Version 12.1 (available in all AWS regions except us-gov-west-1): 12.1.0.2.v2 + // | 12.1.0.2.v3 + // + // Version 11.2 (available in all AWS regions except eu-central-1, ap-northeast-2): + // 11.2.0.2.v3 | 11.2.0.2.v4 | 11.2.0.2.v5 | 11.2.0.2.v6 | 11.2.0.2.v7 + // + // Version 11.2 (available in all AWS regions except ap-northeast-2): 11.2.0.3.v1 + // | 11.2.0.3.v2 | 11.2.0.3.v3 + // + // Version 11.2 (available in all AWS regions except ap-northeast-2, us-gov-west-1): + // 11.2.0.3.v4 + // + // Version 11.2 (available in all AWS regions): 11.2.0.4.v1 | 11.2.0.4.v3 + // | 11.2.0.4.v4 + // + // Version 11.2 (available in all AWS regions except us-gov-west-1): 11.2.0.4.v5 + // | 11.2.0.4.v6 | 11.2.0.4.v7 + // + // Oracle Database Standard Edition (oracle-se) + // + // Version 12.1 (available in all AWS regions except ap-northeast-2): 12.1.0.1.v1 + // | 12.1.0.1.v2 + // + // Version 12.1 (available in all AWS regions except ap-northeast-2, us-gov-west-1): + // 12.1.0.1.v3 | 12.1.0.1.v4 + // + // Version 11.2 (available in all AWS regions except eu-central-1, ap-northeast-2): + // 11.2.0.2.v3 | 11.2.0.2.v4 | 11.2.0.2.v5 | 11.2.0.2.v6 | 11.2.0.2.v7 + // + // Version 11.2 (available in all AWS regions except ap-northeast-2): 11.2.0.3.v1 + // | 11.2.0.3.v2 | 11.2.0.3.v3 + // + // Version 11.2 (available in all AWS regions except ap-northeast-2, us-gov-west-1): + // 11.2.0.3.v4 + // + // Version 11.2 (available in all AWS regions): 11.2.0.4.v1 | 11.2.0.4.v3 + // | 11.2.0.4.v4 + // + // Version 11.2 (available in all AWS regions except us-gov-west-1): 11.2.0.4.v5 + // | 11.2.0.4.v6 | 11.2.0.4.v7 + // + // Oracle Database Standard Edition One (oracle-se1) + // + // Version 12.1 (available in all AWS regions except ap-northeast-2): 12.1.0.1.v1 + // | 12.1.0.1.v2 + // + // Version 12.1 (available in all AWS regions except ap-northeast-2, us-gov-west-1): + // 12.1.0.1.v3 | 12.1.0.1.v4 + // + // Version 11.2 (available in all AWS regions except eu-central-1, ap-northeast-2): + // 11.2.0.2.v3 | 11.2.0.2.v4 | 11.2.0.2.v5 | 11.2.0.2.v6 | 11.2.0.2.v7 + // + // Version 11.2 (available in all AWS regions except ap-northeast-2): 11.2.0.3.v1 + // | 11.2.0.3.v2 | 11.2.0.3.v3 + // + // Version 11.2 (available in all AWS regions except ap-northeast-2, us-gov-west-1): + // 11.2.0.3.v4 + // + // Version 11.2 (available in all AWS regions): 11.2.0.4.v1 | 11.2.0.4.v3 + // | 11.2.0.4.v4 + // + // Version 11.2 (available in all AWS regions except us-gov-west-1): 11.2.0.4.v5 + // | 11.2.0.4.v6 | 11.2.0.4.v7 + // + // Oracle Database Standard Edition Two (oracle-se2) + // + // Version 12.1 (available in all AWS regions except us-gov-west-1): 12.1.0.2.v2 + // | 12.1.0.2.v3 + // + // PostgreSQL + // + // Version 9.5 (available in all AWS regions except us-gov-west-1): 9.5.2 + // + // Version 9.4 (available in all AWS regions): 9.4.1 | 9.4.4 | 9.4.5 + // + // Version 9.4 (available in all AWS regions except us-gov-west-1): 9.4.7 + // + // Version 9.3 (available in all AWS regions except eu-central-1, ap-northeast-2): + // 9.3.1 | 9.3.2 + // + // Version 9.3 (available in all AWS regions except ap-northeast-2): 9.3.10 + // | 9.3.3 | 9.3.5 | 9.3.6 | 9.3.9 + // + // Version 9.3 (available in all AWS regions except ap-northeast-2, us-gov-west-1): + // 9.3.12 + EngineVersion *string `type:"string"` + + // The amount of Provisioned IOPS (input/output operations per second) to be + // initially allocated for the DB instance. + // + // Constraints: Must be a multiple between 3 and 10 of the storage amount for + // the DB instance. Must also be an integer multiple of 1000. For example, if + // the size of your DB instance is 500 GB, then your Iops value can be 2000, + // 3000, 4000, or 5000. + Iops *int64 `type:"integer"` + + // The KMS key identifier for an encrypted DB instance. + // + // The KMS key identifier is the Amazon Resource Name (ARN) for the KMS encryption + // key. If you are creating a DB instance with the same AWS account that owns + // the KMS encryption key used to encrypt the new DB instance, then you can + // use the KMS key alias instead of the ARN for the KM encryption key. + // + // If the StorageEncrypted parameter is true, and you do not specify a value + // for the KmsKeyId parameter, then Amazon RDS will use your default encryption + // key. AWS KMS creates the default encryption key for your AWS account. Your + // AWS account has a different default encryption key for each AWS region. + KmsKeyId *string `type:"string"` + + // License model information for this DB instance. + // + // Valid values: license-included | bring-your-own-license | general-public-license + LicenseModel *string `type:"string"` + + // The password for the master database user. Can be any printable ASCII character + // except "/", """, or "@". + // + // Type: String + // + // MySQL + // + // Constraints: Must contain from 8 to 41 characters. + // + // MariaDB + // + // Constraints: Must contain from 8 to 41 characters. + // + // Oracle + // + // Constraints: Must contain from 8 to 30 characters. + // + // SQL Server + // + // Constraints: Must contain from 8 to 128 characters. + // + // PostgreSQL + // + // Constraints: Must contain from 8 to 128 characters. + // + // Amazon Aurora + // + // Constraints: Must contain from 8 to 41 characters. + MasterUserPassword *string `type:"string"` + + // The name of master user for the client DB instance. + // + // MySQL + // + // Constraints: + // + // Must be 1 to 16 alphanumeric characters. + // + // First character must be a letter. + // + // Cannot be a reserved word for the chosen database engine. + // + // MariaDB + // + // Constraints: + // + // Must be 1 to 16 alphanumeric characters. + // + // Cannot be a reserved word for the chosen database engine. + // + // Type: String + // + // Oracle + // + // Constraints: + // + // Must be 1 to 30 alphanumeric characters. + // + // First character must be a letter. + // + // Cannot be a reserved word for the chosen database engine. + // + // SQL Server + // + // Constraints: + // + // Must be 1 to 128 alphanumeric characters. + // + // First character must be a letter. + // + // Cannot be a reserved word for the chosen database engine. + // + // PostgreSQL + // + // Constraints: + // + // Must be 1 to 63 alphanumeric characters. + // + // First character must be a letter. + // + // Cannot be a reserved word for the chosen database engine. + MasterUsername *string `type:"string"` + + // The interval, in seconds, between points when Enhanced Monitoring metrics + // are collected for the DB instance. To disable collecting Enhanced Monitoring + // metrics, specify 0. The default is 0. + // + // If MonitoringRoleArn is specified, then you must also set MonitoringInterval + // to a value other than 0. + // + // Valid Values: 0, 1, 5, 10, 15, 30, 60 + MonitoringInterval *int64 `type:"integer"` + + // The ARN for the IAM role that permits RDS to send enhanced monitoring metrics + // to CloudWatch Logs. For example, arn:aws:iam:123456789012:role/emaccess. + // For information on creating a monitoring role, go to To create an IAM role + // for Amazon RDS Enhanced Monitoring (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Monitoring.html#USER_Monitoring.OS.IAMRole). + // + // If MonitoringInterval is set to a value other than 0, then you must supply + // a MonitoringRoleArn value. + MonitoringRoleArn *string `type:"string"` + + // Specifies if the DB instance is a Multi-AZ deployment. You cannot set the + // AvailabilityZone parameter if the MultiAZ parameter is set to true. + MultiAZ *bool `type:"boolean"` + + // Indicates that the DB instance should be associated with the specified option + // group. + // + // Permanent options, such as the TDE option for Oracle Advanced Security TDE, + // cannot be removed from an option group, and that option group cannot be removed + // from a DB instance once it is associated with a DB instance + OptionGroupName *string `type:"string"` + + // The port number on which the database accepts connections. + // + // MySQL + // + // Default: 3306 + // + // Valid Values: 1150-65535 + // + // Type: Integer + // + // MariaDB + // + // Default: 3306 + // + // Valid Values: 1150-65535 + // + // Type: Integer + // + // PostgreSQL + // + // Default: 5432 + // + // Valid Values: 1150-65535 + // + // Type: Integer + // + // Oracle + // + // Default: 1521 + // + // Valid Values: 1150-65535 + // + // SQL Server + // + // Default: 1433 + // + // Valid Values: 1150-65535 except for 1434, 3389, 47001, 49152, and 49152 + // through 49156. + // + // Amazon Aurora + // + // Default: 3306 + // + // Valid Values: 1150-65535 + // + // Type: Integer + Port *int64 `type:"integer"` + + // The daily time range during which automated backups are created if automated + // backups are enabled, using the BackupRetentionPeriod parameter. For more + // information, see DB Instance Backups (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Overview.BackingUpAndRestoringAmazonRDSInstances.html). + // + // Default: A 30-minute window selected at random from an 8-hour block of + // time per region. To see the time blocks available, see Adjusting the Preferred + // Maintenance Window (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/AdjustingTheMaintenanceWindow.html) + // in the Amazon RDS User Guide. + // + // Constraints: + // + // Must be in the format hh24:mi-hh24:mi. + // + // Times should be in Universal Coordinated Time (UTC). + // + // Must not conflict with the preferred maintenance window. + // + // Must be at least 30 minutes. + PreferredBackupWindow *string `type:"string"` + + // The weekly time range during which system maintenance can occur, in Universal + // Coordinated Time (UTC). For more information, see DB Instance Maintenance + // (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.DBMaintenance.html). + // + // Format: ddd:hh24:mi-ddd:hh24:mi + // + // Default: A 30-minute window selected at random from an 8-hour block of + // time per region, occurring on a random day of the week. To see the time blocks + // available, see Adjusting the Preferred Maintenance Window (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/AdjustingTheMaintenanceWindow.html) + // in the Amazon RDS User Guide. + // + // Valid Days: Mon, Tue, Wed, Thu, Fri, Sat, Sun + // + // Constraints: Minimum 30-minute window. + PreferredMaintenanceWindow *string `type:"string"` + + // A value that specifies the order in which an Aurora Replica is promoted to + // the primary instance after a failure of the existing primary instance. For + // more information, see Fault Tolerance for an Aurora DB Cluster (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Aurora.Managing.html#Aurora.Managing.FaultTolerance). + // + // Default: 1 + // + // Valid Values: 0 - 15 + PromotionTier *int64 `type:"integer"` + + // Specifies the accessibility options for the DB instance. A value of true + // specifies an Internet-facing instance with a publicly resolvable DNS name, + // which resolves to a public IP address. A value of false specifies an internal + // instance with a DNS name that resolves to a private IP address. + // + // Default: The default behavior varies depending on whether a VPC has been + // requested or not. The following list shows the default behavior in each case. + // + // Default VPC: true + // + // VPC: false + // + // If no DB subnet group has been specified as part of the request and the + // PubliclyAccessible value has not been set, the DB instance will be publicly + // accessible. If a specific DB subnet group has been specified as part of the + // request and the PubliclyAccessible value has not been set, the DB instance + // will be private. + PubliclyAccessible *bool `type:"boolean"` + + // Specifies whether the DB instance is encrypted. + // + // Default: false + StorageEncrypted *bool `type:"boolean"` + + // Specifies the storage type to be associated with the DB instance. + // + // Valid values: standard | gp2 | io1 + // + // If you specify io1, you must also include a value for the Iops parameter. + // + // Default: io1 if the Iops parameter is specified; otherwise standard + StorageType *string `type:"string"` + + // A list of tags. + Tags []*Tag `locationNameList:"Tag" type:"list"` + + // The ARN from the Key Store with which to associate the instance for TDE encryption. + TdeCredentialArn *string `type:"string"` + + // The password for the given ARN from the Key Store in order to access the + // device. + TdeCredentialPassword *string `type:"string"` + + // A list of EC2 VPC security groups to associate with this DB instance. + // + // Default: The default EC2 VPC security group for the DB subnet group's VPC. + VpcSecurityGroupIds []*string `locationNameList:"VpcSecurityGroupId" type:"list"` +} + +// String returns the string representation +func (s CreateDBInstanceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDBInstanceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateDBInstanceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateDBInstanceInput"} + if s.DBInstanceClass == nil { + invalidParams.Add(request.NewErrParamRequired("DBInstanceClass")) + } + if s.DBInstanceIdentifier == nil { + invalidParams.Add(request.NewErrParamRequired("DBInstanceIdentifier")) + } + if s.Engine == nil { + invalidParams.Add(request.NewErrParamRequired("Engine")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type CreateDBInstanceOutput struct { + _ struct{} `type:"structure"` + + // Contains the result of a successful invocation of the following actions: + // + // CreateDBInstance + // + // DeleteDBInstance + // + // ModifyDBInstance + // + // This data type is used as a response element in the DescribeDBInstances + // action. + DBInstance *DBInstance `type:"structure"` +} + +// String returns the string representation +func (s CreateDBInstanceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDBInstanceOutput) GoString() string { + return s.String() +} + +type CreateDBInstanceReadReplicaInput struct { + _ struct{} `type:"structure"` + + // Indicates that minor engine upgrades will be applied automatically to the + // Read Replica during the maintenance window. + // + // Default: Inherits from the source DB instance + AutoMinorVersionUpgrade *bool `type:"boolean"` + + // The Amazon EC2 Availability Zone that the Read Replica will be created in. + // + // Default: A random, system-chosen Availability Zone in the endpoint's region. + // + // Example: us-east-1d + AvailabilityZone *string `type:"string"` + + // True to copy all tags from the Read Replica to snapshots of the Read Replica; + // otherwise false. The default is false. + CopyTagsToSnapshot *bool `type:"boolean"` + + // The compute and memory capacity of the Read Replica. + // + // Valid Values: db.m1.small | db.m1.medium | db.m1.large | db.m1.xlarge | + // db.m2.xlarge |db.m2.2xlarge | db.m2.4xlarge | db.m3.medium | db.m3.large + // | db.m3.xlarge | db.m3.2xlarge | db.m4.large | db.m4.xlarge | db.m4.2xlarge + // | db.m4.4xlarge | db.m4.10xlarge | db.r3.large | db.r3.xlarge | db.r3.2xlarge + // | db.r3.4xlarge | db.r3.8xlarge | db.t2.micro | db.t2.small | db.t2.medium + // | db.t2.large + // + // Default: Inherits from the source DB instance. + DBInstanceClass *string `type:"string"` + + // The DB instance identifier of the Read Replica. This identifier is the unique + // key that identifies a DB instance. This parameter is stored as a lowercase + // string. + DBInstanceIdentifier *string `type:"string" required:"true"` + + // Specifies a DB subnet group for the DB instance. The new DB instance will + // be created in the VPC associated with the DB subnet group. If no DB subnet + // group is specified, then the new DB instance is not created in a VPC. + // + // Constraints: + // + // Can only be specified if the source DB instance identifier specifies a + // DB instance in another region. + // + // The specified DB subnet group must be in the same region in which the + // operation is running. + // + // All Read Replicas in one region that are created from the same source + // DB instance must either:> + // + // Specify DB subnet groups from the same VPC. All these Read Replicas will + // be created in the same VPC. + // + // Not specify a DB subnet group. All these Read Replicas will be created + // outside of any VPC. + // + // Constraints: Must contain no more than 255 alphanumeric characters, + // periods, underscores, spaces, or hyphens. Must not be default. + // + // Example: mySubnetgroup + DBSubnetGroupName *string `type:"string"` + + // The amount of Provisioned IOPS (input/output operations per second) to be + // initially allocated for the DB instance. + Iops *int64 `type:"integer"` + + // The interval, in seconds, between points when Enhanced Monitoring metrics + // are collected for the Read Replica. To disable collecting Enhanced Monitoring + // metrics, specify 0. The default is 0. + // + // If MonitoringRoleArn is specified, then you must also set MonitoringInterval + // to a value other than 0. + // + // Valid Values: 0, 1, 5, 10, 15, 30, 60 + MonitoringInterval *int64 `type:"integer"` + + // The ARN for the IAM role that permits RDS to send enhanced monitoring metrics + // to CloudWatch Logs. For example, arn:aws:iam:123456789012:role/emaccess. + // For information on creating a monitoring role, go to To create an IAM role + // for Amazon RDS Enhanced Monitoring (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Monitoring.html#USER_Monitoring.OS.IAMRole). + // + // If MonitoringInterval is set to a value other than 0, then you must supply + // a MonitoringRoleArn value. + MonitoringRoleArn *string `type:"string"` + + // The option group the DB instance will be associated with. If omitted, the + // default option group for the engine specified will be used. + OptionGroupName *string `type:"string"` + + // The port number that the DB instance uses for connections. + // + // Default: Inherits from the source DB instance + // + // Valid Values: 1150-65535 + Port *int64 `type:"integer"` + + // Specifies the accessibility options for the DB instance. A value of true + // specifies an Internet-facing instance with a publicly resolvable DNS name, + // which resolves to a public IP address. A value of false specifies an internal + // instance with a DNS name that resolves to a private IP address. + // + // Default: The default behavior varies depending on whether a VPC has been + // requested or not. The following list shows the default behavior in each case. + // + // Default VPC:true + // + // VPC:false + // + // If no DB subnet group has been specified as part of the request and the + // PubliclyAccessible value has not been set, the DB instance will be publicly + // accessible. If a specific DB subnet group has been specified as part of the + // request and the PubliclyAccessible value has not been set, the DB instance + // will be private. + PubliclyAccessible *bool `type:"boolean"` + + // The identifier of the DB instance that will act as the source for the Read + // Replica. Each DB instance can have up to five Read Replicas. + // + // Constraints: + // + // Must be the identifier of an existing MySQL, MariaDB, or PostgreSQL DB + // instance. + // + // Can specify a DB instance that is a MySQL Read Replica only if the source + // is running MySQL 5.6. + // + // Can specify a DB instance that is a PostgreSQL Read Replica only if the + // source is running PostgreSQL 9.3.5. + // + // The specified DB instance must have automatic backups enabled, its backup + // retention period must be greater than 0. + // + // If the source DB instance is in the same region as the Read Replica, specify + // a valid DB instance identifier. + // + // If the source DB instance is in a different region than the Read Replica, + // specify a valid DB instance ARN. For more information, go to Constructing + // a Amazon RDS Amazon Resource Name (ARN) (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Tagging.html#USER_Tagging.ARN). + SourceDBInstanceIdentifier *string `type:"string" required:"true"` + + // Specifies the storage type to be associated with the Read Replica. + // + // Valid values: standard | gp2 | io1 + // + // If you specify io1, you must also include a value for the Iops parameter. + // + // Default: io1 if the Iops parameter is specified; otherwise standard + StorageType *string `type:"string"` + + // A list of tags. + Tags []*Tag `locationNameList:"Tag" type:"list"` +} + +// String returns the string representation +func (s CreateDBInstanceReadReplicaInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDBInstanceReadReplicaInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateDBInstanceReadReplicaInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateDBInstanceReadReplicaInput"} + if s.DBInstanceIdentifier == nil { + invalidParams.Add(request.NewErrParamRequired("DBInstanceIdentifier")) + } + if s.SourceDBInstanceIdentifier == nil { + invalidParams.Add(request.NewErrParamRequired("SourceDBInstanceIdentifier")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type CreateDBInstanceReadReplicaOutput struct { + _ struct{} `type:"structure"` + + // Contains the result of a successful invocation of the following actions: + // + // CreateDBInstance + // + // DeleteDBInstance + // + // ModifyDBInstance + // + // This data type is used as a response element in the DescribeDBInstances + // action. + DBInstance *DBInstance `type:"structure"` +} + +// String returns the string representation +func (s CreateDBInstanceReadReplicaOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDBInstanceReadReplicaOutput) GoString() string { + return s.String() +} + +type CreateDBParameterGroupInput struct { + _ struct{} `type:"structure"` + + // The DB parameter group family name. A DB parameter group can be associated + // with one and only one DB parameter group family, and can be applied only + // to a DB instance running a database engine and engine version compatible + // with that DB parameter group family. + DBParameterGroupFamily *string `type:"string" required:"true"` + + // The name of the DB parameter group. + // + // Constraints: + // + // Must be 1 to 255 alphanumeric characters + // + // First character must be a letter + // + // Cannot end with a hyphen or contain two consecutive hyphens + // + // This value is stored as a lowercase string. + DBParameterGroupName *string `type:"string" required:"true"` + + // The description for the DB parameter group. + Description *string `type:"string" required:"true"` + + // A list of tags. + Tags []*Tag `locationNameList:"Tag" type:"list"` +} + +// String returns the string representation +func (s CreateDBParameterGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDBParameterGroupInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateDBParameterGroupInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateDBParameterGroupInput"} + if s.DBParameterGroupFamily == nil { + invalidParams.Add(request.NewErrParamRequired("DBParameterGroupFamily")) + } + if s.DBParameterGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("DBParameterGroupName")) + } + if s.Description == nil { + invalidParams.Add(request.NewErrParamRequired("Description")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type CreateDBParameterGroupOutput struct { + _ struct{} `type:"structure"` + + // Contains the result of a successful invocation of the CreateDBParameterGroup + // action. + // + // This data type is used as a request parameter in the DeleteDBParameterGroup + // action, and as a response element in the DescribeDBParameterGroups action. + DBParameterGroup *DBParameterGroup `type:"structure"` +} + +// String returns the string representation +func (s CreateDBParameterGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDBParameterGroupOutput) GoString() string { + return s.String() +} + +type CreateDBSecurityGroupInput struct { + _ struct{} `type:"structure"` + + // The description for the DB security group. + DBSecurityGroupDescription *string `type:"string" required:"true"` + + // The name for the DB security group. This value is stored as a lowercase string. + // + // Constraints: + // + // Must be 1 to 255 alphanumeric characters + // + // First character must be a letter + // + // Cannot end with a hyphen or contain two consecutive hyphens + // + // Must not be "Default" + // + // Cannot contain spaces + // + // Example: mysecuritygroup + DBSecurityGroupName *string `type:"string" required:"true"` + + // A list of tags. + Tags []*Tag `locationNameList:"Tag" type:"list"` +} + +// String returns the string representation +func (s CreateDBSecurityGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDBSecurityGroupInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateDBSecurityGroupInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateDBSecurityGroupInput"} + if s.DBSecurityGroupDescription == nil { + invalidParams.Add(request.NewErrParamRequired("DBSecurityGroupDescription")) + } + if s.DBSecurityGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("DBSecurityGroupName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type CreateDBSecurityGroupOutput struct { + _ struct{} `type:"structure"` + + // Contains the result of a successful invocation of the following actions: + // + // DescribeDBSecurityGroups + // + // AuthorizeDBSecurityGroupIngress + // + // CreateDBSecurityGroup + // + // RevokeDBSecurityGroupIngress + // + // This data type is used as a response element in the DescribeDBSecurityGroups + // action. + DBSecurityGroup *DBSecurityGroup `type:"structure"` +} + +// String returns the string representation +func (s CreateDBSecurityGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDBSecurityGroupOutput) GoString() string { + return s.String() +} + +type CreateDBSnapshotInput struct { + _ struct{} `type:"structure"` + + // The DB instance identifier. This is the unique key that identifies a DB instance. + // + // Constraints: + // + // Must contain from 1 to 63 alphanumeric characters or hyphens + // + // First character must be a letter + // + // Cannot end with a hyphen or contain two consecutive hyphens + DBInstanceIdentifier *string `type:"string" required:"true"` + + // The identifier for the DB snapshot. + // + // Constraints: + // + // Cannot be null, empty, or blank + // + // Must contain from 1 to 255 alphanumeric characters or hyphens + // + // First character must be a letter + // + // Cannot end with a hyphen or contain two consecutive hyphens + // + // Example: my-snapshot-id + DBSnapshotIdentifier *string `type:"string" required:"true"` + + // A list of tags. + Tags []*Tag `locationNameList:"Tag" type:"list"` +} + +// String returns the string representation +func (s CreateDBSnapshotInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDBSnapshotInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateDBSnapshotInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateDBSnapshotInput"} + if s.DBInstanceIdentifier == nil { + invalidParams.Add(request.NewErrParamRequired("DBInstanceIdentifier")) + } + if s.DBSnapshotIdentifier == nil { + invalidParams.Add(request.NewErrParamRequired("DBSnapshotIdentifier")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type CreateDBSnapshotOutput struct { + _ struct{} `type:"structure"` + + // Contains the result of a successful invocation of the following actions: + // + // CreateDBSnapshot + // + // DeleteDBSnapshot + // + // This data type is used as a response element in the DescribeDBSnapshots + // action. + DBSnapshot *DBSnapshot `type:"structure"` +} + +// String returns the string representation +func (s CreateDBSnapshotOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDBSnapshotOutput) GoString() string { + return s.String() +} + +type CreateDBSubnetGroupInput struct { + _ struct{} `type:"structure"` + + // The description for the DB subnet group. + DBSubnetGroupDescription *string `type:"string" required:"true"` + + // The name for the DB subnet group. This value is stored as a lowercase string. + // + // Constraints: Must contain no more than 255 alphanumeric characters. Cannot + // contain periods, underscores, spaces, or hyphens. Must not be default. + // + // Example: mySubnetgroup + DBSubnetGroupName *string `type:"string" required:"true"` + + // The EC2 Subnet IDs for the DB subnet group. + SubnetIds []*string `locationNameList:"SubnetIdentifier" type:"list" required:"true"` + + // A list of tags. + Tags []*Tag `locationNameList:"Tag" type:"list"` +} + +// String returns the string representation +func (s CreateDBSubnetGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDBSubnetGroupInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateDBSubnetGroupInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateDBSubnetGroupInput"} + if s.DBSubnetGroupDescription == nil { + invalidParams.Add(request.NewErrParamRequired("DBSubnetGroupDescription")) + } + if s.DBSubnetGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("DBSubnetGroupName")) + } + if s.SubnetIds == nil { + invalidParams.Add(request.NewErrParamRequired("SubnetIds")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type CreateDBSubnetGroupOutput struct { + _ struct{} `type:"structure"` + + // Contains the result of a successful invocation of the following actions: + // + // CreateDBSubnetGroup + // + // ModifyDBSubnetGroup + // + // DescribeDBSubnetGroups + // + // DeleteDBSubnetGroup + // + // This data type is used as a response element in the DescribeDBSubnetGroups + // action. + DBSubnetGroup *DBSubnetGroup `type:"structure"` +} + +// String returns the string representation +func (s CreateDBSubnetGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDBSubnetGroupOutput) GoString() string { + return s.String() +} + +type CreateEventSubscriptionInput struct { + _ struct{} `type:"structure"` + + // A Boolean value; set to true to activate the subscription, set to false to + // create the subscription but not active it. + Enabled *bool `type:"boolean"` + + // A list of event categories for a SourceType that you want to subscribe to. + // You can see a list of the categories for a given SourceType in the Events + // (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Events.html) + // topic in the Amazon RDS User Guide or by using the DescribeEventCategories + // action. + EventCategories []*string `locationNameList:"EventCategory" type:"list"` + + // The Amazon Resource Name (ARN) of the SNS topic created for event notification. + // The ARN is created by Amazon SNS when you create a topic and subscribe to + // it. + SnsTopicArn *string `type:"string" required:"true"` + + // The list of identifiers of the event sources for which events will be returned. + // If not specified, then all sources are included in the response. An identifier + // must begin with a letter and must contain only ASCII letters, digits, and + // hyphens; it cannot end with a hyphen or contain two consecutive hyphens. + // + // Constraints: + // + // If SourceIds are supplied, SourceType must also be provided. + // + // If the source type is a DB instance, then a DBInstanceIdentifier must + // be supplied. + // + // If the source type is a DB security group, a DBSecurityGroupName must + // be supplied. + // + // If the source type is a DB parameter group, a DBParameterGroupName must + // be supplied. + // + // If the source type is a DB snapshot, a DBSnapshotIdentifier must be supplied. + SourceIds []*string `locationNameList:"SourceId" type:"list"` + + // The type of source that will be generating the events. For example, if you + // want to be notified of events generated by a DB instance, you would set this + // parameter to db-instance. if this value is not specified, all events are + // returned. + // + // Valid values: db-instance | db-cluster | db-parameter-group | db-security-group + // | db-snapshot | db-cluster-snapshot + SourceType *string `type:"string"` + + // The name of the subscription. + // + // Constraints: The name must be less than 255 characters. + SubscriptionName *string `type:"string" required:"true"` + + // A list of tags. + Tags []*Tag `locationNameList:"Tag" type:"list"` +} + +// String returns the string representation +func (s CreateEventSubscriptionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateEventSubscriptionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateEventSubscriptionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateEventSubscriptionInput"} + if s.SnsTopicArn == nil { + invalidParams.Add(request.NewErrParamRequired("SnsTopicArn")) + } + if s.SubscriptionName == nil { + invalidParams.Add(request.NewErrParamRequired("SubscriptionName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type CreateEventSubscriptionOutput struct { + _ struct{} `type:"structure"` + + // Contains the results of a successful invocation of the DescribeEventSubscriptions + // action. + EventSubscription *EventSubscription `type:"structure"` +} + +// String returns the string representation +func (s CreateEventSubscriptionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateEventSubscriptionOutput) GoString() string { + return s.String() +} + +type CreateOptionGroupInput struct { + _ struct{} `type:"structure"` + + // Specifies the name of the engine that this option group should be associated + // with. + EngineName *string `type:"string" required:"true"` + + // Specifies the major version of the engine that this option group should be + // associated with. + MajorEngineVersion *string `type:"string" required:"true"` + + // The description of the option group. + OptionGroupDescription *string `type:"string" required:"true"` + + // Specifies the name of the option group to be created. + // + // Constraints: + // + // Must be 1 to 255 alphanumeric characters or hyphens + // + // First character must be a letter + // + // Cannot end with a hyphen or contain two consecutive hyphens + // + // Example: myoptiongroup + OptionGroupName *string `type:"string" required:"true"` + + // A list of tags. + Tags []*Tag `locationNameList:"Tag" type:"list"` +} + +// String returns the string representation +func (s CreateOptionGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateOptionGroupInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateOptionGroupInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateOptionGroupInput"} + if s.EngineName == nil { + invalidParams.Add(request.NewErrParamRequired("EngineName")) + } + if s.MajorEngineVersion == nil { + invalidParams.Add(request.NewErrParamRequired("MajorEngineVersion")) + } + if s.OptionGroupDescription == nil { + invalidParams.Add(request.NewErrParamRequired("OptionGroupDescription")) + } + if s.OptionGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("OptionGroupName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type CreateOptionGroupOutput struct { + _ struct{} `type:"structure"` + + OptionGroup *OptionGroup `type:"structure"` +} + +// String returns the string representation +func (s CreateOptionGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateOptionGroupOutput) GoString() string { + return s.String() +} + +// Contains the result of a successful invocation of the following actions: +// +// CreateDBCluster +// +// DeleteDBCluster +// +// FailoverDBCluster +// +// ModifyDBCluster +// +// RestoreDBClusterFromSnapshot +// +// RestoreDBClusterToPointInTime +// +// This data type is used as a response element in the DescribeDBClusters +// action. +type DBCluster struct { + _ struct{} `type:"structure"` + + // Specifies the allocated storage size in gigabytes (GB). + AllocatedStorage *int64 `type:"integer"` + + // Provides the list of EC2 Availability Zones that instances in the DB cluster + // can be created in. + AvailabilityZones []*string `locationNameList:"AvailabilityZone" type:"list"` + + // Specifies the number of days for which automatic DB snapshots are retained. + BackupRetentionPeriod *int64 `type:"integer"` + + // If present, specifies the name of the character set that this cluster is + // associated with. + CharacterSetName *string `type:"string"` + + // Contains a user-supplied DB cluster identifier. This identifier is the unique + // key that identifies a DB cluster. + DBClusterIdentifier *string `type:"string"` + + // Provides the list of instances that make up the DB cluster. + DBClusterMembers []*DBClusterMember `locationNameList:"DBClusterMember" type:"list"` + + // Provides the list of option group memberships for this DB cluster. + DBClusterOptionGroupMemberships []*DBClusterOptionGroupStatus `locationNameList:"DBClusterOptionGroup" type:"list"` + + // Specifies the name of the DB cluster parameter group for the DB cluster. + DBClusterParameterGroup *string `type:"string"` + + // Specifies information on the subnet group associated with the DB cluster, + // including the name, description, and subnets in the subnet group. + DBSubnetGroup *string `type:"string"` + + // Contains the name of the initial database of this DB cluster that was provided + // at create time, if one was specified when the DB cluster was created. This + // same name is returned for the life of the DB cluster. + DatabaseName *string `type:"string"` + + // The region-unique, immutable identifier for the DB cluster. This identifier + // is found in AWS CloudTrail log entries whenever the KMS key for the DB cluster + // is accessed. + DbClusterResourceId *string `type:"string"` + + // Specifies the earliest time to which a database can be restored with point-in-time + // restore. + EarliestRestorableTime *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // Specifies the connection endpoint for the primary instance of the DB cluster. + Endpoint *string `type:"string"` + + // Provides the name of the database engine to be used for this DB cluster. + Engine *string `type:"string"` + + // Indicates the database engine version. + EngineVersion *string `type:"string"` + + // Specifies the ID that Amazon Route 53 assigns when you create a hosted zone. + HostedZoneId *string `type:"string"` + + // If StorageEncrypted is true, the KMS key identifier for the encrypted DB + // cluster. + KmsKeyId *string `type:"string"` + + // Specifies the latest time to which a database can be restored with point-in-time + // restore. + LatestRestorableTime *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // Contains the master username for the DB cluster. + MasterUsername *string `type:"string"` + + // Specifies the progress of the operation as a percentage. + PercentProgress *string `type:"string"` + + // Specifies the port that the database engine is listening on. + Port *int64 `type:"integer"` + + // Specifies the daily time range during which automated backups are created + // if automated backups are enabled, as determined by the BackupRetentionPeriod. + PreferredBackupWindow *string `type:"string"` + + // Specifies the weekly time range during which system maintenance can occur, + // in Universal Coordinated Time (UTC). + PreferredMaintenanceWindow *string `type:"string"` + + // Contains one or more identifiers of the Read Replicas associated with this + // DB cluster. + ReadReplicaIdentifiers []*string `locationNameList:"ReadReplicaIdentifier" type:"list"` + + // Contains the identifier of the source DB cluster if this DB cluster is a + // Read Replica. + ReplicationSourceIdentifier *string `type:"string"` + + // Specifies the current state of this DB cluster. + Status *string `type:"string"` + + // Specifies whether the DB cluster is encrypted. + StorageEncrypted *bool `type:"boolean"` + + // Provides a list of VPC security groups that the DB cluster belongs to. + VpcSecurityGroups []*VpcSecurityGroupMembership `locationNameList:"VpcSecurityGroupMembership" type:"list"` +} + +// String returns the string representation +func (s DBCluster) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DBCluster) GoString() string { + return s.String() +} + +// Contains information about an instance that is part of a DB cluster. +type DBClusterMember struct { + _ struct{} `type:"structure"` + + // Specifies the status of the DB cluster parameter group for this member of + // the DB cluster. + DBClusterParameterGroupStatus *string `type:"string"` + + // Specifies the instance identifier for this member of the DB cluster. + DBInstanceIdentifier *string `type:"string"` + + // Value that is true if the cluster member is the primary instance for the + // DB cluster and false otherwise. + IsClusterWriter *bool `type:"boolean"` + + // A value that specifies the order in which an Aurora Replica is promoted to + // the primary instance after a failure of the existing primary instance. For + // more information, see Fault Tolerance for an Aurora DB Cluster (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Aurora.Managing.html#Aurora.Managing.FaultTolerance). + PromotionTier *int64 `type:"integer"` +} + +// String returns the string representation +func (s DBClusterMember) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DBClusterMember) GoString() string { + return s.String() +} + +// Contains status information for a DB cluster option group. +type DBClusterOptionGroupStatus struct { + _ struct{} `type:"structure"` + + // Specifies the name of the DB cluster option group. + DBClusterOptionGroupName *string `type:"string"` + + // Specifies the status of the DB cluster option group. + Status *string `type:"string"` +} + +// String returns the string representation +func (s DBClusterOptionGroupStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DBClusterOptionGroupStatus) GoString() string { + return s.String() +} + +// Contains the result of a successful invocation of the CreateDBClusterParameterGroup +// action. +// +// This data type is used as a request parameter in the DeleteDBClusterParameterGroup +// action, and as a response element in the DescribeDBClusterParameterGroups +// action. +type DBClusterParameterGroup struct { + _ struct{} `type:"structure"` + + // Provides the name of the DB cluster parameter group. + DBClusterParameterGroupName *string `type:"string"` + + // Provides the name of the DB parameter group family that this DB cluster parameter + // group is compatible with. + DBParameterGroupFamily *string `type:"string"` + + // Provides the customer-specified description for this DB cluster parameter + // group. + Description *string `type:"string"` +} + +// String returns the string representation +func (s DBClusterParameterGroup) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DBClusterParameterGroup) GoString() string { + return s.String() +} + +type DBClusterParameterGroupNameMessage struct { + _ struct{} `type:"structure"` + + // The name of the DB cluster parameter group. + // + // Constraints: + // + // Must be 1 to 255 alphanumeric characters + // + // First character must be a letter + // + // Cannot end with a hyphen or contain two consecutive hyphens + // + // This value is stored as a lowercase string. + DBClusterParameterGroupName *string `type:"string"` +} + +// String returns the string representation +func (s DBClusterParameterGroupNameMessage) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DBClusterParameterGroupNameMessage) GoString() string { + return s.String() +} + +// Contains the result of a successful invocation of the following actions: +// +// CreateDBClusterSnapshot +// +// DeleteDBClusterSnapshot +// +// This data type is used as a response element in the DescribeDBClusterSnapshots +// action. +type DBClusterSnapshot struct { + _ struct{} `type:"structure"` + + // Specifies the allocated storage size in gigabytes (GB). + AllocatedStorage *int64 `type:"integer"` + + // Provides the list of EC2 Availability Zones that instances in the DB cluster + // snapshot can be restored in. + AvailabilityZones []*string `locationNameList:"AvailabilityZone" type:"list"` + + // Specifies the time when the DB cluster was created, in Universal Coordinated + // Time (UTC). + ClusterCreateTime *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // Specifies the DB cluster identifier of the DB cluster that this DB cluster + // snapshot was created from. + DBClusterIdentifier *string `type:"string"` + + // Specifies the identifier for the DB cluster snapshot. + DBClusterSnapshotIdentifier *string `type:"string"` + + // Specifies the name of the database engine. + Engine *string `type:"string"` + + // Provides the version of the database engine for this DB cluster snapshot. + EngineVersion *string `type:"string"` + + // If StorageEncrypted is true, the KMS key identifier for the encrypted DB + // cluster snapshot. + KmsKeyId *string `type:"string"` + + // Provides the license model information for this DB cluster snapshot. + LicenseModel *string `type:"string"` + + // Provides the master username for the DB cluster snapshot. + MasterUsername *string `type:"string"` + + // Specifies the percentage of the estimated data that has been transferred. + PercentProgress *int64 `type:"integer"` + + // Specifies the port that the DB cluster was listening on at the time of the + // snapshot. + Port *int64 `type:"integer"` + + // Provides the time when the snapshot was taken, in Universal Coordinated Time + // (UTC). + SnapshotCreateTime *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // Provides the type of the DB cluster snapshot. + SnapshotType *string `type:"string"` + + // Specifies the status of this DB cluster snapshot. + Status *string `type:"string"` + + // Specifies whether the DB cluster snapshot is encrypted. + StorageEncrypted *bool `type:"boolean"` + + // Provides the VPC ID associated with the DB cluster snapshot. + VpcId *string `type:"string"` +} + +// String returns the string representation +func (s DBClusterSnapshot) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DBClusterSnapshot) GoString() string { + return s.String() +} + +// Contains the name and values of a manual DB cluster snapshot attribute. +// +// Manual DB cluster snapshot attributes are used to authorize other AWS accounts +// to restore a manual DB cluster snapshot. For more information, see the ModifyDBClusterSnapshotAttribute +// API action. +type DBClusterSnapshotAttribute struct { + _ struct{} `type:"structure"` + + // The name of the manual DB cluster snapshot attribute. + // + // The attribute named restore refers to the list of AWS accounts that have + // permission to copy or restore the manual DB cluster snapshot. For more information, + // see the ModifyDBClusterSnapshotAttribute API action. + AttributeName *string `type:"string"` + + // The value(s) for the manual DB cluster snapshot attribute. + // + // If the AttributeName field is set to restore, then this element returns + // a list of IDs of the AWS accounts that are authorized to copy or restore + // the manual DB cluster snapshot. If a value of all is in the list, then the + // manual DB cluster snapshot is public and available for any AWS account to + // copy or restore. + AttributeValues []*string `locationNameList:"AttributeValue" type:"list"` +} + +// String returns the string representation +func (s DBClusterSnapshotAttribute) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DBClusterSnapshotAttribute) GoString() string { + return s.String() +} + +// Contains the results of a successful call to the DescribeDBClusterSnapshotAttributes +// API action. +// +// Manual DB cluster snapshot attributes are used to authorize other AWS accounts +// to copy or restore a manual DB cluster snapshot. For more information, see +// the ModifyDBClusterSnapshotAttribute API action. +type DBClusterSnapshotAttributesResult struct { + _ struct{} `type:"structure"` + + // The list of attributes and values for the manual DB cluster snapshot. + DBClusterSnapshotAttributes []*DBClusterSnapshotAttribute `locationNameList:"DBClusterSnapshotAttribute" type:"list"` + + // The identifier of the manual DB cluster snapshot that the attributes apply + // to. + DBClusterSnapshotIdentifier *string `type:"string"` +} + +// String returns the string representation +func (s DBClusterSnapshotAttributesResult) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DBClusterSnapshotAttributesResult) GoString() string { + return s.String() +} + +// This data type is used as a response element in the action DescribeDBEngineVersions. +type DBEngineVersion struct { + _ struct{} `type:"structure"` + + // The description of the database engine. + DBEngineDescription *string `type:"string"` + + // The description of the database engine version. + DBEngineVersionDescription *string `type:"string"` + + // The name of the DB parameter group family for the database engine. + DBParameterGroupFamily *string `type:"string"` + + // The default character set for new instances of this engine version, if the + // CharacterSetName parameter of the CreateDBInstance API is not specified. + DefaultCharacterSet *CharacterSet `type:"structure"` + + // The name of the database engine. + Engine *string `type:"string"` + + // The version number of the database engine. + EngineVersion *string `type:"string"` + + // A list of the character sets supported by this engine for the CharacterSetName + // parameter of the CreateDBInstance API. + SupportedCharacterSets []*CharacterSet `locationNameList:"CharacterSet" type:"list"` + + // A list of engine versions that this database engine version can be upgraded + // to. + ValidUpgradeTarget []*UpgradeTarget `locationNameList:"UpgradeTarget" type:"list"` +} + +// String returns the string representation +func (s DBEngineVersion) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DBEngineVersion) GoString() string { + return s.String() +} + +// Contains the result of a successful invocation of the following actions: +// +// CreateDBInstance +// +// DeleteDBInstance +// +// ModifyDBInstance +// +// This data type is used as a response element in the DescribeDBInstances +// action. +type DBInstance struct { + _ struct{} `type:"structure"` + + // Specifies the allocated storage size specified in gigabytes. + AllocatedStorage *int64 `type:"integer"` + + // Indicates that minor version patches are applied automatically. + AutoMinorVersionUpgrade *bool `type:"boolean"` + + // Specifies the name of the Availability Zone the DB instance is located in. + AvailabilityZone *string `type:"string"` + + // Specifies the number of days for which automatic DB snapshots are retained. + BackupRetentionPeriod *int64 `type:"integer"` + + // The identifier of the CA certificate for this DB instance. + CACertificateIdentifier *string `type:"string"` + + // If present, specifies the name of the character set that this instance is + // associated with. + CharacterSetName *string `type:"string"` + + // Specifies whether tags are copied from the DB instance to snapshots of the + // DB instance. + CopyTagsToSnapshot *bool `type:"boolean"` + + // If the DB instance is a member of a DB cluster, contains the name of the + // DB cluster that the DB instance is a member of. + DBClusterIdentifier *string `type:"string"` + + // Contains the name of the compute and memory capacity class of the DB instance. + DBInstanceClass *string `type:"string"` + + // Contains a user-supplied database identifier. This identifier is the unique + // key that identifies a DB instance. + DBInstanceIdentifier *string `type:"string"` + + // Specifies the current state of this database. + DBInstanceStatus *string `type:"string"` + + // The meaning of this parameter differs according to the database engine you + // use. For example, this value returns MySQL, MariaDB, or PostgreSQL information + // when returning values from CreateDBInstanceReadReplica since Read Replicas + // are only supported for these engines. + // + // MySQL, MariaDB, SQL Server, PostgreSQL, Amazon Aurora + // + // Contains the name of the initial database of this instance that was provided + // at create time, if one was specified when the DB instance was created. This + // same name is returned for the life of the DB instance. + // + // Type: String + // + // Oracle + // + // Contains the Oracle System ID (SID) of the created DB instance. Not shown + // when the returned parameters do not apply to an Oracle DB instance. + DBName *string `type:"string"` + + // Provides the list of DB parameter groups applied to this DB instance. + DBParameterGroups []*DBParameterGroupStatus `locationNameList:"DBParameterGroup" type:"list"` + + // Provides List of DB security group elements containing only DBSecurityGroup.Name + // and DBSecurityGroup.Status subelements. + DBSecurityGroups []*DBSecurityGroupMembership `locationNameList:"DBSecurityGroup" type:"list"` + + // Specifies information on the subnet group associated with the DB instance, + // including the name, description, and subnets in the subnet group. + DBSubnetGroup *DBSubnetGroup `type:"structure"` + + // Specifies the port that the DB instance listens on. If the DB instance is + // part of a DB cluster, this can be a different port than the DB cluster port. + DbInstancePort *int64 `type:"integer"` + + // The region-unique, immutable identifier for the DB instance. This identifier + // is found in AWS CloudTrail log entries whenever the KMS key for the DB instance + // is accessed. + DbiResourceId *string `type:"string"` + + // The Active Directory Domain membership records associated with the DB instance. + DomainMemberships []*DomainMembership `locationNameList:"DomainMembership" type:"list"` + + // Specifies the connection endpoint. + Endpoint *Endpoint `type:"structure"` + + // Provides the name of the database engine to be used for this DB instance. + Engine *string `type:"string"` + + // Indicates the database engine version. + EngineVersion *string `type:"string"` + + // The Amazon Resource Name (ARN) of the Amazon CloudWatch Logs log stream that + // receives the Enhanced Monitoring metrics data for the DB instance. + EnhancedMonitoringResourceArn *string `type:"string"` + + // Provides the date and time the DB instance was created. + InstanceCreateTime *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // Specifies the Provisioned IOPS (I/O operations per second) value. + Iops *int64 `type:"integer"` + + // If StorageEncrypted is true, the KMS key identifier for the encrypted DB + // instance. + KmsKeyId *string `type:"string"` + + // Specifies the latest time to which a database can be restored with point-in-time + // restore. + LatestRestorableTime *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // License model information for this DB instance. + LicenseModel *string `type:"string"` + + // Contains the master username for the DB instance. + MasterUsername *string `type:"string"` + + // The interval, in seconds, between points when Enhanced Monitoring metrics + // are collected for the DB instance. + MonitoringInterval *int64 `type:"integer"` + + // The ARN for the IAM role that permits RDS to send Enhanced Monitoring metrics + // to CloudWatch Logs. + MonitoringRoleArn *string `type:"string"` + + // Specifies if the DB instance is a Multi-AZ deployment. + MultiAZ *bool `type:"boolean"` + + // Provides the list of option group memberships for this DB instance. + OptionGroupMemberships []*OptionGroupMembership `locationNameList:"OptionGroupMembership" type:"list"` + + // Specifies that changes to the DB instance are pending. This element is only + // included when changes are pending. Specific changes are identified by subelements. + PendingModifiedValues *PendingModifiedValues `type:"structure"` + + // Specifies the daily time range during which automated backups are created + // if automated backups are enabled, as determined by the BackupRetentionPeriod. + PreferredBackupWindow *string `type:"string"` + + // Specifies the weekly time range during which system maintenance can occur, + // in Universal Coordinated Time (UTC). + PreferredMaintenanceWindow *string `type:"string"` + + // A value that specifies the order in which an Aurora Replica is promoted to + // the primary instance after a failure of the existing primary instance. For + // more information, see Fault Tolerance for an Aurora DB Cluster (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Aurora.Managing.html#Aurora.Managing.FaultTolerance). + PromotionTier *int64 `type:"integer"` + + // Specifies the accessibility options for the DB instance. A value of true + // specifies an Internet-facing instance with a publicly resolvable DNS name, + // which resolves to a public IP address. A value of false specifies an internal + // instance with a DNS name that resolves to a private IP address. + // + // Default: The default behavior varies depending on whether a VPC has been + // requested or not. The following list shows the default behavior in each case. + // + // Default VPC:true + // + // VPC:false + // + // If no DB subnet group has been specified as part of the request and the + // PubliclyAccessible value has not been set, the DB instance will be publicly + // accessible. If a specific DB subnet group has been specified as part of the + // request and the PubliclyAccessible value has not been set, the DB instance + // will be private. + PubliclyAccessible *bool `type:"boolean"` + + // Contains one or more identifiers of the Read Replicas associated with this + // DB instance. + ReadReplicaDBInstanceIdentifiers []*string `locationNameList:"ReadReplicaDBInstanceIdentifier" type:"list"` + + // Contains the identifier of the source DB instance if this DB instance is + // a Read Replica. + ReadReplicaSourceDBInstanceIdentifier *string `type:"string"` + + // If present, specifies the name of the secondary Availability Zone for a DB + // instance with multi-AZ support. + SecondaryAvailabilityZone *string `type:"string"` + + // The status of a Read Replica. If the instance is not a Read Replica, this + // will be blank. + StatusInfos []*DBInstanceStatusInfo `locationNameList:"DBInstanceStatusInfo" type:"list"` + + // Specifies whether the DB instance is encrypted. + StorageEncrypted *bool `type:"boolean"` + + // Specifies the storage type associated with DB instance. + StorageType *string `type:"string"` + + // The ARN from the Key Store with which the instance is associated for TDE + // encryption. + TdeCredentialArn *string `type:"string"` + + // Provides List of VPC security group elements that the DB instance belongs + // to. + VpcSecurityGroups []*VpcSecurityGroupMembership `locationNameList:"VpcSecurityGroupMembership" type:"list"` +} + +// String returns the string representation +func (s DBInstance) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DBInstance) GoString() string { + return s.String() +} + +// Provides a list of status information for a DB instance. +type DBInstanceStatusInfo struct { + _ struct{} `type:"structure"` + + // Details of the error if there is an error for the instance. If the instance + // is not in an error state, this value is blank. + Message *string `type:"string"` + + // Boolean value that is true if the instance is operating normally, or false + // if the instance is in an error state. + Normal *bool `type:"boolean"` + + // Status of the DB instance. For a StatusType of read replica, the values can + // be replicating, error, stopped, or terminated. + Status *string `type:"string"` + + // This value is currently "read replication." + StatusType *string `type:"string"` +} + +// String returns the string representation +func (s DBInstanceStatusInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DBInstanceStatusInfo) GoString() string { + return s.String() +} + +// Contains the result of a successful invocation of the CreateDBParameterGroup +// action. +// +// This data type is used as a request parameter in the DeleteDBParameterGroup +// action, and as a response element in the DescribeDBParameterGroups action. +type DBParameterGroup struct { + _ struct{} `type:"structure"` + + // Provides the name of the DB parameter group family that this DB parameter + // group is compatible with. + DBParameterGroupFamily *string `type:"string"` + + // Provides the name of the DB parameter group. + DBParameterGroupName *string `type:"string"` + + // Provides the customer-specified description for this DB parameter group. + Description *string `type:"string"` +} + +// String returns the string representation +func (s DBParameterGroup) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DBParameterGroup) GoString() string { + return s.String() +} + +// Contains the result of a successful invocation of the ModifyDBParameterGroup +// or ResetDBParameterGroup action. +type DBParameterGroupNameMessage struct { + _ struct{} `type:"structure"` + + // Provides the name of the DB parameter group. + DBParameterGroupName *string `type:"string"` +} + +// String returns the string representation +func (s DBParameterGroupNameMessage) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DBParameterGroupNameMessage) GoString() string { + return s.String() +} + +// The status of the DB parameter group. +// +// This data type is used as a response element in the following actions: +// +// CreateDBInstance +// +// CreateDBInstanceReadReplica +// +// DeleteDBInstance +// +// ModifyDBInstance +// +// RebootDBInstance +// +// RestoreDBInstanceFromDBSnapshot +type DBParameterGroupStatus struct { + _ struct{} `type:"structure"` + + // The name of the DP parameter group. + DBParameterGroupName *string `type:"string"` + + // The status of parameter updates. + ParameterApplyStatus *string `type:"string"` +} + +// String returns the string representation +func (s DBParameterGroupStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DBParameterGroupStatus) GoString() string { + return s.String() +} + +// Contains the result of a successful invocation of the following actions: +// +// DescribeDBSecurityGroups +// +// AuthorizeDBSecurityGroupIngress +// +// CreateDBSecurityGroup +// +// RevokeDBSecurityGroupIngress +// +// This data type is used as a response element in the DescribeDBSecurityGroups +// action. +type DBSecurityGroup struct { + _ struct{} `type:"structure"` + + // Provides the description of the DB security group. + DBSecurityGroupDescription *string `type:"string"` + + // Specifies the name of the DB security group. + DBSecurityGroupName *string `type:"string"` + + // Contains a list of EC2SecurityGroup elements. + EC2SecurityGroups []*EC2SecurityGroup `locationNameList:"EC2SecurityGroup" type:"list"` + + // Contains a list of IPRange elements. + IPRanges []*IPRange `locationNameList:"IPRange" type:"list"` + + // Provides the AWS ID of the owner of a specific DB security group. + OwnerId *string `type:"string"` + + // Provides the VpcId of the DB security group. + VpcId *string `type:"string"` +} + +// String returns the string representation +func (s DBSecurityGroup) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DBSecurityGroup) GoString() string { + return s.String() +} + +// This data type is used as a response element in the following actions: +// +// ModifyDBInstance +// +// RebootDBInstance +// +// RestoreDBInstanceFromDBSnapshot +// +// RestoreDBInstanceToPointInTime +type DBSecurityGroupMembership struct { + _ struct{} `type:"structure"` + + // The name of the DB security group. + DBSecurityGroupName *string `type:"string"` + + // The status of the DB security group. + Status *string `type:"string"` +} + +// String returns the string representation +func (s DBSecurityGroupMembership) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DBSecurityGroupMembership) GoString() string { + return s.String() +} + +// Contains the result of a successful invocation of the following actions: +// +// CreateDBSnapshot +// +// DeleteDBSnapshot +// +// This data type is used as a response element in the DescribeDBSnapshots +// action. +type DBSnapshot struct { + _ struct{} `type:"structure"` + + // Specifies the allocated storage size in gigabytes (GB). + AllocatedStorage *int64 `type:"integer"` + + // Specifies the name of the Availability Zone the DB instance was located in + // at the time of the DB snapshot. + AvailabilityZone *string `type:"string"` + + // Specifies the DB instance identifier of the DB instance this DB snapshot + // was created from. + DBInstanceIdentifier *string `type:"string"` + + // Specifies the identifier for the DB snapshot. + DBSnapshotIdentifier *string `type:"string"` + + // Specifies whether the DB snapshot is encrypted. + Encrypted *bool `type:"boolean"` + + // Specifies the name of the database engine. + Engine *string `type:"string"` + + // Specifies the version of the database engine. + EngineVersion *string `type:"string"` + + // Specifies the time when the snapshot was taken, in Universal Coordinated + // Time (UTC). + InstanceCreateTime *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // Specifies the Provisioned IOPS (I/O operations per second) value of the DB + // instance at the time of the snapshot. + Iops *int64 `type:"integer"` + + // If Encrypted is true, the KMS key identifier for the encrypted DB snapshot. + KmsKeyId *string `type:"string"` + + // License model information for the restored DB instance. + LicenseModel *string `type:"string"` + + // Provides the master username for the DB snapshot. + MasterUsername *string `type:"string"` + + // Provides the option group name for the DB snapshot. + OptionGroupName *string `type:"string"` + + // The percentage of the estimated data that has been transferred. + PercentProgress *int64 `type:"integer"` + + // Specifies the port that the database engine was listening on at the time + // of the snapshot. + Port *int64 `type:"integer"` + + // Provides the time when the snapshot was taken, in Universal Coordinated Time + // (UTC). + SnapshotCreateTime *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // Provides the type of the DB snapshot. + SnapshotType *string `type:"string"` + + // The DB snapshot Arn that the DB snapshot was copied from. It only has value + // in case of cross customer or cross region copy. + SourceDBSnapshotIdentifier *string `type:"string"` + + // The region that the DB snapshot was created in or copied from. + SourceRegion *string `type:"string"` + + // Specifies the status of this DB snapshot. + Status *string `type:"string"` + + // Specifies the storage type associated with DB Snapshot. + StorageType *string `type:"string"` + + // The ARN from the Key Store with which to associate the instance for TDE encryption. + TdeCredentialArn *string `type:"string"` + + // Provides the VPC ID associated with the DB snapshot. + VpcId *string `type:"string"` +} + +// String returns the string representation +func (s DBSnapshot) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DBSnapshot) GoString() string { + return s.String() +} + +// Contains the name and values of a manual DB snapshot attribute +// +// Manual DB snapshot attributes are used to authorize other AWS accounts to +// restore a manual DB snapshot. For more information, see the ModifyDBSnapshotAttribute +// API. +type DBSnapshotAttribute struct { + _ struct{} `type:"structure"` + + // The name of the manual DB snapshot attribute. + // + // The attribute named restore refers to the list of AWS accounts that have + // permission to copy or restore the manual DB cluster snapshot. For more information, + // see the ModifyDBSnapshotAttribute API action. + AttributeName *string `type:"string"` + + // The value or values for the manual DB snapshot attribute. + // + // If the AttributeName field is set to restore, then this element returns + // a list of IDs of the AWS accounts that are authorized to copy or restore + // the manual DB snapshot. If a value of all is in the list, then the manual + // DB snapshot is public and available for any AWS account to copy or restore. + AttributeValues []*string `locationNameList:"AttributeValue" type:"list"` +} + +// String returns the string representation +func (s DBSnapshotAttribute) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DBSnapshotAttribute) GoString() string { + return s.String() +} + +// Contains the results of a successful call to the DescribeDBSnapshotAttributes +// API action. +// +// Manual DB snapshot attributes are used to authorize other AWS accounts to +// copy or restore a manual DB snapshot. For more information, see the ModifyDBSnapshotAttribute +// API action. +type DBSnapshotAttributesResult struct { + _ struct{} `type:"structure"` + + // The list of attributes and values for the manual DB snapshot. + DBSnapshotAttributes []*DBSnapshotAttribute `locationNameList:"DBSnapshotAttribute" type:"list"` + + // The identifier of the manual DB snapshot that the attributes apply to. + DBSnapshotIdentifier *string `type:"string"` +} + +// String returns the string representation +func (s DBSnapshotAttributesResult) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DBSnapshotAttributesResult) GoString() string { + return s.String() +} + +// Contains the result of a successful invocation of the following actions: +// +// CreateDBSubnetGroup +// +// ModifyDBSubnetGroup +// +// DescribeDBSubnetGroups +// +// DeleteDBSubnetGroup +// +// This data type is used as a response element in the DescribeDBSubnetGroups +// action. +type DBSubnetGroup struct { + _ struct{} `type:"structure"` + + // Provides the description of the DB subnet group. + DBSubnetGroupDescription *string `type:"string"` + + // The name of the DB subnet group. + DBSubnetGroupName *string `type:"string"` + + // Provides the status of the DB subnet group. + SubnetGroupStatus *string `type:"string"` + + // Contains a list of Subnet elements. + Subnets []*Subnet `locationNameList:"Subnet" type:"list"` + + // Provides the VpcId of the DB subnet group. + VpcId *string `type:"string"` +} + +// String returns the string representation +func (s DBSubnetGroup) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DBSubnetGroup) GoString() string { + return s.String() +} + +type DeleteDBClusterInput struct { + _ struct{} `type:"structure"` + + // The DB cluster identifier for the DB cluster to be deleted. This parameter + // isn't case-sensitive. + // + // Constraints: + // + // Must contain from 1 to 63 alphanumeric characters or hyphens + // + // First character must be a letter + // + // Cannot end with a hyphen or contain two consecutive hyphens + DBClusterIdentifier *string `type:"string" required:"true"` + + // The DB cluster snapshot identifier of the new DB cluster snapshot created + // when SkipFinalSnapshot is set to false. + // + // Specifying this parameter and also setting the SkipFinalShapshot parameter + // to true results in an error. + // + // Constraints: + // + // Must be 1 to 255 alphanumeric characters + // + // First character must be a letter + // + // Cannot end with a hyphen or contain two consecutive hyphens + FinalDBSnapshotIdentifier *string `type:"string"` + + // Determines whether a final DB cluster snapshot is created before the DB cluster + // is deleted. If true is specified, no DB cluster snapshot is created. If false + // is specified, a DB cluster snapshot is created before the DB cluster is deleted. + // + // You must specify a FinalDBSnapshotIdentifier parameter if SkipFinalSnapshot + // is false. + // + // Default: false + SkipFinalSnapshot *bool `type:"boolean"` +} + +// String returns the string representation +func (s DeleteDBClusterInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteDBClusterInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteDBClusterInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteDBClusterInput"} + if s.DBClusterIdentifier == nil { + invalidParams.Add(request.NewErrParamRequired("DBClusterIdentifier")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteDBClusterOutput struct { + _ struct{} `type:"structure"` + + // Contains the result of a successful invocation of the following actions: + // + // CreateDBCluster + // + // DeleteDBCluster + // + // FailoverDBCluster + // + // ModifyDBCluster + // + // RestoreDBClusterFromSnapshot + // + // RestoreDBClusterToPointInTime + // + // This data type is used as a response element in the DescribeDBClusters + // action. + DBCluster *DBCluster `type:"structure"` +} + +// String returns the string representation +func (s DeleteDBClusterOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteDBClusterOutput) GoString() string { + return s.String() +} + +type DeleteDBClusterParameterGroupInput struct { + _ struct{} `type:"structure"` + + // The name of the DB cluster parameter group. + // + // Constraints: + // + // Must be the name of an existing DB cluster parameter group. + // + // You cannot delete a default DB cluster parameter group. + // + // Cannot be associated with any DB clusters. + DBClusterParameterGroupName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteDBClusterParameterGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteDBClusterParameterGroupInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteDBClusterParameterGroupInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteDBClusterParameterGroupInput"} + if s.DBClusterParameterGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("DBClusterParameterGroupName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteDBClusterParameterGroupOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteDBClusterParameterGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteDBClusterParameterGroupOutput) GoString() string { + return s.String() +} + +type DeleteDBClusterSnapshotInput struct { + _ struct{} `type:"structure"` + + // The identifier of the DB cluster snapshot to delete. + // + // Constraints: Must be the name of an existing DB cluster snapshot in the + // available state. + DBClusterSnapshotIdentifier *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteDBClusterSnapshotInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteDBClusterSnapshotInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteDBClusterSnapshotInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteDBClusterSnapshotInput"} + if s.DBClusterSnapshotIdentifier == nil { + invalidParams.Add(request.NewErrParamRequired("DBClusterSnapshotIdentifier")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteDBClusterSnapshotOutput struct { + _ struct{} `type:"structure"` + + // Contains the result of a successful invocation of the following actions: + // + // CreateDBClusterSnapshot + // + // DeleteDBClusterSnapshot + // + // This data type is used as a response element in the DescribeDBClusterSnapshots + // action. + DBClusterSnapshot *DBClusterSnapshot `type:"structure"` +} + +// String returns the string representation +func (s DeleteDBClusterSnapshotOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteDBClusterSnapshotOutput) GoString() string { + return s.String() +} + +type DeleteDBInstanceInput struct { + _ struct{} `type:"structure"` + + // The DB instance identifier for the DB instance to be deleted. This parameter + // isn't case-sensitive. + // + // Constraints: + // + // Must contain from 1 to 63 alphanumeric characters or hyphens + // + // First character must be a letter + // + // Cannot end with a hyphen or contain two consecutive hyphens + DBInstanceIdentifier *string `type:"string" required:"true"` + + // The DBSnapshotIdentifier of the new DBSnapshot created when SkipFinalSnapshot + // is set to false. + // + // Specifying this parameter and also setting the SkipFinalShapshot parameter + // to true results in an error. + // + // Constraints: + // + // Must be 1 to 255 alphanumeric characters + // + // First character must be a letter + // + // Cannot end with a hyphen or contain two consecutive hyphens + // + // Cannot be specified when deleting a Read Replica. + FinalDBSnapshotIdentifier *string `type:"string"` + + // Determines whether a final DB snapshot is created before the DB instance + // is deleted. If true is specified, no DBSnapshot is created. If false is specified, + // a DB snapshot is created before the DB instance is deleted. + // + // Note that when a DB instance is in a failure state and has a status of 'failed', + // 'incompatible-restore', or 'incompatible-network', it can only be deleted + // when the SkipFinalSnapshot parameter is set to "true". + // + // Specify true when deleting a Read Replica. + // + // The FinalDBSnapshotIdentifier parameter must be specified if SkipFinalSnapshot + // is false. + // + // Default: false + SkipFinalSnapshot *bool `type:"boolean"` +} + +// String returns the string representation +func (s DeleteDBInstanceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteDBInstanceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteDBInstanceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteDBInstanceInput"} + if s.DBInstanceIdentifier == nil { + invalidParams.Add(request.NewErrParamRequired("DBInstanceIdentifier")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteDBInstanceOutput struct { + _ struct{} `type:"structure"` + + // Contains the result of a successful invocation of the following actions: + // + // CreateDBInstance + // + // DeleteDBInstance + // + // ModifyDBInstance + // + // This data type is used as a response element in the DescribeDBInstances + // action. + DBInstance *DBInstance `type:"structure"` +} + +// String returns the string representation +func (s DeleteDBInstanceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteDBInstanceOutput) GoString() string { + return s.String() +} + +type DeleteDBParameterGroupInput struct { + _ struct{} `type:"structure"` + + // The name of the DB parameter group. + // + // Constraints: + // + // Must be the name of an existing DB parameter group + // + // You cannot delete a default DB parameter group + // + // Cannot be associated with any DB instances + DBParameterGroupName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteDBParameterGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteDBParameterGroupInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteDBParameterGroupInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteDBParameterGroupInput"} + if s.DBParameterGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("DBParameterGroupName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteDBParameterGroupOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteDBParameterGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteDBParameterGroupOutput) GoString() string { + return s.String() +} + +type DeleteDBSecurityGroupInput struct { + _ struct{} `type:"structure"` + + // The name of the DB security group to delete. + // + // You cannot delete the default DB security group. + // + // Constraints: + // + // Must be 1 to 255 alphanumeric characters + // + // First character must be a letter + // + // Cannot end with a hyphen or contain two consecutive hyphens + // + // Must not be "Default" + // + // Cannot contain spaces + DBSecurityGroupName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteDBSecurityGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteDBSecurityGroupInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteDBSecurityGroupInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteDBSecurityGroupInput"} + if s.DBSecurityGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("DBSecurityGroupName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteDBSecurityGroupOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteDBSecurityGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteDBSecurityGroupOutput) GoString() string { + return s.String() +} + +type DeleteDBSnapshotInput struct { + _ struct{} `type:"structure"` + + // The DBSnapshot identifier. + // + // Constraints: Must be the name of an existing DB snapshot in the available + // state. + DBSnapshotIdentifier *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteDBSnapshotInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteDBSnapshotInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteDBSnapshotInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteDBSnapshotInput"} + if s.DBSnapshotIdentifier == nil { + invalidParams.Add(request.NewErrParamRequired("DBSnapshotIdentifier")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteDBSnapshotOutput struct { + _ struct{} `type:"structure"` + + // Contains the result of a successful invocation of the following actions: + // + // CreateDBSnapshot + // + // DeleteDBSnapshot + // + // This data type is used as a response element in the DescribeDBSnapshots + // action. + DBSnapshot *DBSnapshot `type:"structure"` +} + +// String returns the string representation +func (s DeleteDBSnapshotOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteDBSnapshotOutput) GoString() string { + return s.String() +} + +type DeleteDBSubnetGroupInput struct { + _ struct{} `type:"structure"` + + // The name of the database subnet group to delete. + // + // You cannot delete the default subnet group. + // + // Constraints: + // + // Constraints: Must contain no more than 255 alphanumeric characters, periods, + // underscores, spaces, or hyphens. Must not be default. + // + // Example: mySubnetgroup + DBSubnetGroupName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteDBSubnetGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteDBSubnetGroupInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteDBSubnetGroupInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteDBSubnetGroupInput"} + if s.DBSubnetGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("DBSubnetGroupName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteDBSubnetGroupOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteDBSubnetGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteDBSubnetGroupOutput) GoString() string { + return s.String() +} + +type DeleteEventSubscriptionInput struct { + _ struct{} `type:"structure"` + + // The name of the RDS event notification subscription you want to delete. + SubscriptionName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteEventSubscriptionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteEventSubscriptionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteEventSubscriptionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteEventSubscriptionInput"} + if s.SubscriptionName == nil { + invalidParams.Add(request.NewErrParamRequired("SubscriptionName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteEventSubscriptionOutput struct { + _ struct{} `type:"structure"` + + // Contains the results of a successful invocation of the DescribeEventSubscriptions + // action. + EventSubscription *EventSubscription `type:"structure"` +} + +// String returns the string representation +func (s DeleteEventSubscriptionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteEventSubscriptionOutput) GoString() string { + return s.String() +} + +type DeleteOptionGroupInput struct { + _ struct{} `type:"structure"` + + // The name of the option group to be deleted. + // + // You cannot delete default option groups. + OptionGroupName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteOptionGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteOptionGroupInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteOptionGroupInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteOptionGroupInput"} + if s.OptionGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("OptionGroupName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteOptionGroupOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteOptionGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteOptionGroupOutput) GoString() string { + return s.String() +} + +type DescribeAccountAttributesInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DescribeAccountAttributesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAccountAttributesInput) GoString() string { + return s.String() +} + +// Data returned by the DescribeAccountAttributes action. +type DescribeAccountAttributesOutput struct { + _ struct{} `type:"structure"` + + // A list of AccountQuota objects. Within this list, each quota has a name, + // a count of usage toward the quota maximum, and a maximum value for the quota. + AccountQuotas []*AccountQuota `locationNameList:"AccountQuota" type:"list"` +} + +// String returns the string representation +func (s DescribeAccountAttributesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAccountAttributesOutput) GoString() string { + return s.String() +} + +type DescribeCertificatesInput struct { + _ struct{} `type:"structure"` + + // The user-supplied certificate identifier. If this parameter is specified, + // information for only the identified certificate is returned. This parameter + // isn't case-sensitive. + // + // Constraints: + // + // Must contain from 1 to 63 alphanumeric characters or hyphens + // + // First character must be a letter + // + // Cannot end with a hyphen or contain two consecutive hyphens + CertificateIdentifier *string `type:"string"` + + // This parameter is not currently supported. + Filters []*Filter `locationNameList:"Filter" type:"list"` + + // An optional pagination token provided by a previous DescribeCertificates + // request. If this parameter is specified, the response includes only records + // beyond the marker, up to the value specified by MaxRecords. + Marker *string `type:"string"` + + // The maximum number of records to include in the response. If more records + // exist than the specified MaxRecords value, a pagination token called a marker + // is included in the response so that the remaining results can be retrieved. + // + // Default: 100 + // + // Constraints: Minimum 20, maximum 100. + MaxRecords *int64 `type:"integer"` +} + +// String returns the string representation +func (s DescribeCertificatesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeCertificatesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeCertificatesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeCertificatesInput"} + if s.Filters != nil { + for i, v := range s.Filters { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Filters", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Data returned by the DescribeCertificates action. +type DescribeCertificatesOutput struct { + _ struct{} `type:"structure"` + + // The list of Certificate objects for the AWS account. + Certificates []*Certificate `locationNameList:"Certificate" type:"list"` + + // An optional pagination token provided by a previous DescribeCertificates + // request. If this parameter is specified, the response includes only records + // beyond the marker, up to the value specified by MaxRecords . + Marker *string `type:"string"` +} + +// String returns the string representation +func (s DescribeCertificatesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeCertificatesOutput) GoString() string { + return s.String() +} + +type DescribeDBClusterParameterGroupsInput struct { + _ struct{} `type:"structure"` + + // The name of a specific DB cluster parameter group to return details for. + // + // Constraints: + // + // Must be 1 to 255 alphanumeric characters + // + // First character must be a letter + // + // Cannot end with a hyphen or contain two consecutive hyphens + DBClusterParameterGroupName *string `type:"string"` + + // This parameter is not currently supported. + Filters []*Filter `locationNameList:"Filter" type:"list"` + + // An optional pagination token provided by a previous DescribeDBClusterParameterGroups + // request. If this parameter is specified, the response includes only records + // beyond the marker, up to the value specified by MaxRecords. + Marker *string `type:"string"` + + // The maximum number of records to include in the response. If more records + // exist than the specified MaxRecords value, a pagination token called a marker + // is included in the response so that the remaining results can be retrieved. + // + // Default: 100 + // + // Constraints: Minimum 20, maximum 100. + MaxRecords *int64 `type:"integer"` +} + +// String returns the string representation +func (s DescribeDBClusterParameterGroupsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDBClusterParameterGroupsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeDBClusterParameterGroupsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeDBClusterParameterGroupsInput"} + if s.Filters != nil { + for i, v := range s.Filters { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Filters", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DescribeDBClusterParameterGroupsOutput struct { + _ struct{} `type:"structure"` + + // A list of DB cluster parameter groups. + DBClusterParameterGroups []*DBClusterParameterGroup `locationNameList:"DBClusterParameterGroup" type:"list"` + + // An optional pagination token provided by a previous DescribeDBClusterParameterGroups + // request. If this parameter is specified, the response includes only records + // beyond the marker, up to the value specified by MaxRecords. + Marker *string `type:"string"` +} + +// String returns the string representation +func (s DescribeDBClusterParameterGroupsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDBClusterParameterGroupsOutput) GoString() string { + return s.String() +} + +type DescribeDBClusterParametersInput struct { + _ struct{} `type:"structure"` + + // The name of a specific DB cluster parameter group to return parameter details + // for. + // + // Constraints: + // + // Must be 1 to 255 alphanumeric characters + // + // First character must be a letter + // + // Cannot end with a hyphen or contain two consecutive hyphens + DBClusterParameterGroupName *string `type:"string" required:"true"` + + // This parameter is not currently supported. + Filters []*Filter `locationNameList:"Filter" type:"list"` + + // An optional pagination token provided by a previous DescribeDBClusterParameters + // request. If this parameter is specified, the response includes only records + // beyond the marker, up to the value specified by MaxRecords. + Marker *string `type:"string"` + + // The maximum number of records to include in the response. If more records + // exist than the specified MaxRecords value, a pagination token called a marker + // is included in the response so that the remaining results can be retrieved. + // + // Default: 100 + // + // Constraints: Minimum 20, maximum 100. + MaxRecords *int64 `type:"integer"` + + // A value that indicates to return only parameters for a specific source. Parameter + // sources can be engine, service, or customer. + Source *string `type:"string"` +} + +// String returns the string representation +func (s DescribeDBClusterParametersInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDBClusterParametersInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeDBClusterParametersInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeDBClusterParametersInput"} + if s.DBClusterParameterGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("DBClusterParameterGroupName")) + } + if s.Filters != nil { + for i, v := range s.Filters { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Filters", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Provides details about a DB cluster parameter group including the parameters +// in the DB cluster parameter group. +type DescribeDBClusterParametersOutput struct { + _ struct{} `type:"structure"` + + // An optional pagination token provided by a previous DescribeDBClusterParameters + // request. If this parameter is specified, the response includes only records + // beyond the marker, up to the value specified by MaxRecords . + Marker *string `type:"string"` + + // Provides a list of parameters for the DB cluster parameter group. + Parameters []*Parameter `locationNameList:"Parameter" type:"list"` +} + +// String returns the string representation +func (s DescribeDBClusterParametersOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDBClusterParametersOutput) GoString() string { + return s.String() +} + +type DescribeDBClusterSnapshotAttributesInput struct { + _ struct{} `type:"structure"` + + // The identifier for the DB cluster snapshot to describe the attributes for. + DBClusterSnapshotIdentifier *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeDBClusterSnapshotAttributesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDBClusterSnapshotAttributesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeDBClusterSnapshotAttributesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeDBClusterSnapshotAttributesInput"} + if s.DBClusterSnapshotIdentifier == nil { + invalidParams.Add(request.NewErrParamRequired("DBClusterSnapshotIdentifier")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DescribeDBClusterSnapshotAttributesOutput struct { + _ struct{} `type:"structure"` + + // Contains the results of a successful call to the DescribeDBClusterSnapshotAttributes + // API action. + // + // Manual DB cluster snapshot attributes are used to authorize other AWS accounts + // to copy or restore a manual DB cluster snapshot. For more information, see + // the ModifyDBClusterSnapshotAttribute API action. + DBClusterSnapshotAttributesResult *DBClusterSnapshotAttributesResult `type:"structure"` +} + +// String returns the string representation +func (s DescribeDBClusterSnapshotAttributesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDBClusterSnapshotAttributesOutput) GoString() string { + return s.String() +} + +type DescribeDBClusterSnapshotsInput struct { + _ struct{} `type:"structure"` + + // The ID of the DB cluster to retrieve the list of DB cluster snapshots for. + // This parameter cannot be used in conjunction with the DBClusterSnapshotIdentifier + // parameter. This parameter is not case-sensitive. + // + // Constraints: + // + // Must contain from 1 to 63 alphanumeric characters or hyphens + // + // First character must be a letter + // + // Cannot end with a hyphen or contain two consecutive hyphens + DBClusterIdentifier *string `type:"string"` + + // A specific DB cluster snapshot identifier to describe. This parameter cannot + // be used in conjunction with the DBClusterIdentifier parameter. This value + // is stored as a lowercase string. + // + // Constraints: + // + // Must be 1 to 255 alphanumeric characters + // + // First character must be a letter + // + // Cannot end with a hyphen or contain two consecutive hyphens + // + // If this identifier is for an automated snapshot, the SnapshotType parameter + // must also be specified. + DBClusterSnapshotIdentifier *string `type:"string"` + + // This parameter is not currently supported. + Filters []*Filter `locationNameList:"Filter" type:"list"` + + // Set this value to true to include manual DB cluster snapshots that are public + // and can be copied or restored by any AWS account, otherwise set this value + // to false. The default is false. The default is false. + // + // You can share a manual DB cluster snapshot as public by using the ModifyDBClusterSnapshotAttribute + // API action. + IncludePublic *bool `type:"boolean"` + + // Set this value to true to include shared manual DB cluster snapshots from + // other AWS accounts that this AWS account has been given permission to copy + // or restore, otherwise set this value to false. The default is false. + // + // You can give an AWS account permission to restore a manual DB cluster snapshot + // from another AWS account by the ModifyDBClusterSnapshotAttribute API action. + IncludeShared *bool `type:"boolean"` + + // An optional pagination token provided by a previous DescribeDBClusterSnapshots + // request. If this parameter is specified, the response includes only records + // beyond the marker, up to the value specified by MaxRecords. + Marker *string `type:"string"` + + // The maximum number of records to include in the response. If more records + // exist than the specified MaxRecords value, a pagination token called a marker + // is included in the response so that the remaining results can be retrieved. + // + // Default: 100 + // + // Constraints: Minimum 20, maximum 100. + MaxRecords *int64 `type:"integer"` + + // The type of DB cluster snapshots to be returned. You can specify one of the + // following values: + // + // automated - Return all DB cluster snapshots that have been automatically + // taken by Amazon RDS for my AWS account. + // + // manual - Return all DB cluster snapshots that have been taken by my AWS + // account. + // + // shared - Return all manual DB cluster snapshots that have been shared + // to my AWS account. + // + // public - Return all DB cluster snapshots that have been marked as public. + // + // If you don't specify a SnapshotType value, then both automated and manual + // DB cluster snapshots are returned. You can include shared DB cluster snapshots + // with these results by setting the IncludeShared parameter to true. You can + // include public DB cluster snapshots with these results by setting the IncludePublic + // parameter to true. + // + // The IncludeShared and IncludePublic parameters don't apply for SnapshotType + // values of manual or automated. The IncludePublic parameter doesn't apply + // when SnapshotType is set to shared. The IncludeShared parameter doesn't apply + // when SnapshotType is set to public. + SnapshotType *string `type:"string"` +} + +// String returns the string representation +func (s DescribeDBClusterSnapshotsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDBClusterSnapshotsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeDBClusterSnapshotsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeDBClusterSnapshotsInput"} + if s.Filters != nil { + for i, v := range s.Filters { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Filters", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Provides a list of DB cluster snapshots for the user as the result of a call +// to the DescribeDBClusterSnapshots action. +type DescribeDBClusterSnapshotsOutput struct { + _ struct{} `type:"structure"` + + // Provides a list of DB cluster snapshots for the user. + DBClusterSnapshots []*DBClusterSnapshot `locationNameList:"DBClusterSnapshot" type:"list"` + + // An optional pagination token provided by a previous DescribeDBClusterSnapshots + // request. If this parameter is specified, the response includes only records + // beyond the marker, up to the value specified by MaxRecords. + Marker *string `type:"string"` +} + +// String returns the string representation +func (s DescribeDBClusterSnapshotsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDBClusterSnapshotsOutput) GoString() string { + return s.String() +} + +type DescribeDBClustersInput struct { + _ struct{} `type:"structure"` + + // The user-supplied DB cluster identifier. If this parameter is specified, + // information from only the specific DB cluster is returned. This parameter + // isn't case-sensitive. + // + // Constraints: + // + // Must contain from 1 to 63 alphanumeric characters or hyphens + // + // First character must be a letter + // + // Cannot end with a hyphen or contain two consecutive hyphens + DBClusterIdentifier *string `type:"string"` + + // This parameter is not currently supported. + Filters []*Filter `locationNameList:"Filter" type:"list"` + + // An optional pagination token provided by a previous DescribeDBClusters request. + // If this parameter is specified, the response includes only records beyond + // the marker, up to the value specified by MaxRecords. + Marker *string `type:"string"` + + // The maximum number of records to include in the response. If more records + // exist than the specified MaxRecords value, a pagination token called a marker + // is included in the response so that the remaining results can be retrieved. + // + // Default: 100 + // + // Constraints: Minimum 20, maximum 100. + MaxRecords *int64 `type:"integer"` +} + +// String returns the string representation +func (s DescribeDBClustersInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDBClustersInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeDBClustersInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeDBClustersInput"} + if s.Filters != nil { + for i, v := range s.Filters { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Filters", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the result of a successful invocation of the DescribeDBClusters +// action. +type DescribeDBClustersOutput struct { + _ struct{} `type:"structure"` + + // Contains a list of DB clusters for the user. + DBClusters []*DBCluster `locationNameList:"DBCluster" type:"list"` + + // A pagination token that can be used in a subsequent DescribeDBClusters request. + Marker *string `type:"string"` +} + +// String returns the string representation +func (s DescribeDBClustersOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDBClustersOutput) GoString() string { + return s.String() +} + +type DescribeDBEngineVersionsInput struct { + _ struct{} `type:"structure"` + + // The name of a specific DB parameter group family to return details for. + // + // Constraints: + // + // Must be 1 to 255 alphanumeric characters + // + // First character must be a letter + // + // Cannot end with a hyphen or contain two consecutive hyphens + DBParameterGroupFamily *string `type:"string"` + + // Indicates that only the default version of the specified engine or engine + // and major version combination is returned. + DefaultOnly *bool `type:"boolean"` + + // The database engine to return. + Engine *string `type:"string"` + + // The database engine version to return. + // + // Example: 5.1.49 + EngineVersion *string `type:"string"` + + // Not currently supported. + Filters []*Filter `locationNameList:"Filter" type:"list"` + + // If this parameter is specified, and if the requested engine supports the + // CharacterSetName parameter for CreateDBInstance, the response includes a + // list of supported character sets for each engine version. + ListSupportedCharacterSets *bool `type:"boolean"` + + // An optional pagination token provided by a previous request. If this parameter + // is specified, the response includes only records beyond the marker, up to + // the value specified by MaxRecords. + Marker *string `type:"string"` + + // The maximum number of records to include in the response. If more than the + // MaxRecords value is available, a pagination token called a marker is included + // in the response so that the following results can be retrieved. + // + // Default: 100 + // + // Constraints: Minimum 20, maximum 100. + MaxRecords *int64 `type:"integer"` +} + +// String returns the string representation +func (s DescribeDBEngineVersionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDBEngineVersionsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeDBEngineVersionsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeDBEngineVersionsInput"} + if s.Filters != nil { + for i, v := range s.Filters { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Filters", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the result of a successful invocation of the DescribeDBEngineVersions +// action. +type DescribeDBEngineVersionsOutput struct { + _ struct{} `type:"structure"` + + // A list of DBEngineVersion elements. + DBEngineVersions []*DBEngineVersion `locationNameList:"DBEngineVersion" type:"list"` + + // An optional pagination token provided by a previous request. If this parameter + // is specified, the response includes only records beyond the marker, up to + // the value specified by MaxRecords. + Marker *string `type:"string"` +} + +// String returns the string representation +func (s DescribeDBEngineVersionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDBEngineVersionsOutput) GoString() string { + return s.String() +} + +type DescribeDBInstancesInput struct { + _ struct{} `type:"structure"` + + // The user-supplied instance identifier. If this parameter is specified, information + // from only the specific DB instance is returned. This parameter isn't case-sensitive. + // + // Constraints: + // + // Must contain from 1 to 63 alphanumeric characters or hyphens + // + // First character must be a letter + // + // Cannot end with a hyphen or contain two consecutive hyphens + DBInstanceIdentifier *string `type:"string"` + + // This parameter is not currently supported. + Filters []*Filter `locationNameList:"Filter" type:"list"` + + // An optional pagination token provided by a previous DescribeDBInstances request. + // If this parameter is specified, the response includes only records beyond + // the marker, up to the value specified by MaxRecords. + Marker *string `type:"string"` + + // The maximum number of records to include in the response. If more records + // exist than the specified MaxRecords value, a pagination token called a marker + // is included in the response so that the remaining results can be retrieved. + // + // Default: 100 + // + // Constraints: Minimum 20, maximum 100. + MaxRecords *int64 `type:"integer"` +} + +// String returns the string representation +func (s DescribeDBInstancesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDBInstancesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeDBInstancesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeDBInstancesInput"} + if s.Filters != nil { + for i, v := range s.Filters { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Filters", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the result of a successful invocation of the DescribeDBInstances +// action. +type DescribeDBInstancesOutput struct { + _ struct{} `type:"structure"` + + // A list of DBInstance instances. + DBInstances []*DBInstance `locationNameList:"DBInstance" type:"list"` + + // An optional pagination token provided by a previous request. If this parameter + // is specified, the response includes only records beyond the marker, up to + // the value specified by MaxRecords . + Marker *string `type:"string"` +} + +// String returns the string representation +func (s DescribeDBInstancesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDBInstancesOutput) GoString() string { + return s.String() +} + +// This data type is used as a response element to DescribeDBLogFiles. +type DescribeDBLogFilesDetails struct { + _ struct{} `type:"structure"` + + // A POSIX timestamp when the last log entry was written. + LastWritten *int64 `type:"long"` + + // The name of the log file for the specified DB instance. + LogFileName *string `type:"string"` + + // The size, in bytes, of the log file for the specified DB instance. + Size *int64 `type:"long"` +} + +// String returns the string representation +func (s DescribeDBLogFilesDetails) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDBLogFilesDetails) GoString() string { + return s.String() +} + +type DescribeDBLogFilesInput struct { + _ struct{} `type:"structure"` + + // The customer-assigned name of the DB instance that contains the log files + // you want to list. + // + // Constraints: + // + // Must contain from 1 to 63 alphanumeric characters or hyphens + // + // First character must be a letter + // + // Cannot end with a hyphen or contain two consecutive hyphens + DBInstanceIdentifier *string `type:"string" required:"true"` + + // Filters the available log files for files written since the specified date, + // in POSIX timestamp format with milliseconds. + FileLastWritten *int64 `type:"long"` + + // Filters the available log files for files larger than the specified size. + FileSize *int64 `type:"long"` + + // Filters the available log files for log file names that contain the specified + // string. + FilenameContains *string `type:"string"` + + // This parameter is not currently supported. + Filters []*Filter `locationNameList:"Filter" type:"list"` + + // The pagination token provided in the previous request. If this parameter + // is specified the response includes only records beyond the marker, up to + // MaxRecords. + Marker *string `type:"string"` + + // The maximum number of records to include in the response. If more records + // exist than the specified MaxRecords value, a pagination token called a marker + // is included in the response so that the remaining results can be retrieved. + MaxRecords *int64 `type:"integer"` +} + +// String returns the string representation +func (s DescribeDBLogFilesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDBLogFilesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeDBLogFilesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeDBLogFilesInput"} + if s.DBInstanceIdentifier == nil { + invalidParams.Add(request.NewErrParamRequired("DBInstanceIdentifier")) + } + if s.Filters != nil { + for i, v := range s.Filters { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Filters", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The response from a call to DescribeDBLogFiles. +type DescribeDBLogFilesOutput struct { + _ struct{} `type:"structure"` + + // The DB log files returned. + DescribeDBLogFiles []*DescribeDBLogFilesDetails `locationNameList:"DescribeDBLogFilesDetails" type:"list"` + + // A pagination token that can be used in a subsequent DescribeDBLogFiles request. + Marker *string `type:"string"` +} + +// String returns the string representation +func (s DescribeDBLogFilesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDBLogFilesOutput) GoString() string { + return s.String() +} + +type DescribeDBParameterGroupsInput struct { + _ struct{} `type:"structure"` + + // The name of a specific DB parameter group to return details for. + // + // Constraints: + // + // Must be 1 to 255 alphanumeric characters + // + // First character must be a letter + // + // Cannot end with a hyphen or contain two consecutive hyphens + DBParameterGroupName *string `type:"string"` + + // This parameter is not currently supported. + Filters []*Filter `locationNameList:"Filter" type:"list"` + + // An optional pagination token provided by a previous DescribeDBParameterGroups + // request. If this parameter is specified, the response includes only records + // beyond the marker, up to the value specified by MaxRecords. + Marker *string `type:"string"` + + // The maximum number of records to include in the response. If more records + // exist than the specified MaxRecords value, a pagination token called a marker + // is included in the response so that the remaining results can be retrieved. + // + // Default: 100 + // + // Constraints: Minimum 20, maximum 100. + MaxRecords *int64 `type:"integer"` +} + +// String returns the string representation +func (s DescribeDBParameterGroupsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDBParameterGroupsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeDBParameterGroupsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeDBParameterGroupsInput"} + if s.Filters != nil { + for i, v := range s.Filters { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Filters", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the result of a successful invocation of the DescribeDBParameterGroups +// action. +type DescribeDBParameterGroupsOutput struct { + _ struct{} `type:"structure"` + + // A list of DBParameterGroup instances. + DBParameterGroups []*DBParameterGroup `locationNameList:"DBParameterGroup" type:"list"` + + // An optional pagination token provided by a previous request. If this parameter + // is specified, the response includes only records beyond the marker, up to + // the value specified by MaxRecords. + Marker *string `type:"string"` +} + +// String returns the string representation +func (s DescribeDBParameterGroupsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDBParameterGroupsOutput) GoString() string { + return s.String() +} + +type DescribeDBParametersInput struct { + _ struct{} `type:"structure"` + + // The name of a specific DB parameter group to return details for. + // + // Constraints: + // + // Must be 1 to 255 alphanumeric characters + // + // First character must be a letter + // + // Cannot end with a hyphen or contain two consecutive hyphens + DBParameterGroupName *string `type:"string" required:"true"` + + // This parameter is not currently supported. + Filters []*Filter `locationNameList:"Filter" type:"list"` + + // An optional pagination token provided by a previous DescribeDBParameters + // request. If this parameter is specified, the response includes only records + // beyond the marker, up to the value specified by MaxRecords. + Marker *string `type:"string"` + + // The maximum number of records to include in the response. If more records + // exist than the specified MaxRecords value, a pagination token called a marker + // is included in the response so that the remaining results can be retrieved. + // + // Default: 100 + // + // Constraints: Minimum 20, maximum 100. + MaxRecords *int64 `type:"integer"` + + // The parameter types to return. + // + // Default: All parameter types returned + // + // Valid Values: user | system | engine-default + Source *string `type:"string"` +} + +// String returns the string representation +func (s DescribeDBParametersInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDBParametersInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeDBParametersInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeDBParametersInput"} + if s.DBParameterGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("DBParameterGroupName")) + } + if s.Filters != nil { + for i, v := range s.Filters { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Filters", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the result of a successful invocation of the DescribeDBParameters +// action. +type DescribeDBParametersOutput struct { + _ struct{} `type:"structure"` + + // An optional pagination token provided by a previous request. If this parameter + // is specified, the response includes only records beyond the marker, up to + // the value specified by MaxRecords. + Marker *string `type:"string"` + + // A list of Parameter values. + Parameters []*Parameter `locationNameList:"Parameter" type:"list"` +} + +// String returns the string representation +func (s DescribeDBParametersOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDBParametersOutput) GoString() string { + return s.String() +} + +type DescribeDBSecurityGroupsInput struct { + _ struct{} `type:"structure"` + + // The name of the DB security group to return details for. + DBSecurityGroupName *string `type:"string"` + + // This parameter is not currently supported. + Filters []*Filter `locationNameList:"Filter" type:"list"` + + // An optional pagination token provided by a previous DescribeDBSecurityGroups + // request. If this parameter is specified, the response includes only records + // beyond the marker, up to the value specified by MaxRecords. + Marker *string `type:"string"` + + // The maximum number of records to include in the response. If more records + // exist than the specified MaxRecords value, a pagination token called a marker + // is included in the response so that the remaining results can be retrieved. + // + // Default: 100 + // + // Constraints: Minimum 20, maximum 100. + MaxRecords *int64 `type:"integer"` +} + +// String returns the string representation +func (s DescribeDBSecurityGroupsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDBSecurityGroupsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeDBSecurityGroupsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeDBSecurityGroupsInput"} + if s.Filters != nil { + for i, v := range s.Filters { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Filters", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the result of a successful invocation of the DescribeDBSecurityGroups +// action. +type DescribeDBSecurityGroupsOutput struct { + _ struct{} `type:"structure"` + + // A list of DBSecurityGroup instances. + DBSecurityGroups []*DBSecurityGroup `locationNameList:"DBSecurityGroup" type:"list"` + + // An optional pagination token provided by a previous request. If this parameter + // is specified, the response includes only records beyond the marker, up to + // the value specified by MaxRecords. + Marker *string `type:"string"` +} + +// String returns the string representation +func (s DescribeDBSecurityGroupsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDBSecurityGroupsOutput) GoString() string { + return s.String() +} + +type DescribeDBSnapshotAttributesInput struct { + _ struct{} `type:"structure"` + + // The identifier for the DB snapshot to describe the attributes for. + DBSnapshotIdentifier *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeDBSnapshotAttributesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDBSnapshotAttributesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeDBSnapshotAttributesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeDBSnapshotAttributesInput"} + if s.DBSnapshotIdentifier == nil { + invalidParams.Add(request.NewErrParamRequired("DBSnapshotIdentifier")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DescribeDBSnapshotAttributesOutput struct { + _ struct{} `type:"structure"` + + // Contains the results of a successful call to the DescribeDBSnapshotAttributes + // API action. + // + // Manual DB snapshot attributes are used to authorize other AWS accounts to + // copy or restore a manual DB snapshot. For more information, see the ModifyDBSnapshotAttribute + // API action. + DBSnapshotAttributesResult *DBSnapshotAttributesResult `type:"structure"` +} + +// String returns the string representation +func (s DescribeDBSnapshotAttributesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDBSnapshotAttributesOutput) GoString() string { + return s.String() +} + +type DescribeDBSnapshotsInput struct { + _ struct{} `type:"structure"` + + // The ID of the DB instance to retrieve the list of DB snapshots for. This + // parameter cannot be used in conjunction with DBSnapshotIdentifier. This parameter + // is not case-sensitive. + // + // Constraints: + // + // Must contain from 1 to 63 alphanumeric characters or hyphens + // + // First character must be a letter + // + // Cannot end with a hyphen or contain two consecutive hyphens + DBInstanceIdentifier *string `type:"string"` + + // A specific DB snapshot identifier to describe. This parameter cannot be used + // in conjunction with DBInstanceIdentifier. This value is stored as a lowercase + // string. + // + // Constraints: + // + // Must be 1 to 255 alphanumeric characters. + // + // First character must be a letter. + // + // Cannot end with a hyphen or contain two consecutive hyphens. + // + // If this identifier is for an automated snapshot, the SnapshotType parameter + // must also be specified. + DBSnapshotIdentifier *string `type:"string"` + + // This parameter is not currently supported. + Filters []*Filter `locationNameList:"Filter" type:"list"` + + // Set this value to true to include manual DB snapshots that are public and + // can be copied or restored by any AWS account, otherwise set this value to + // false. The default is false. + // + // You can share a manual DB snapshot as public by using the ModifyDBSnapshotAttribute + // API. + IncludePublic *bool `type:"boolean"` + + // Set this value to true to include shared manual DB snapshots from other AWS + // accounts that this AWS account has been given permission to copy or restore, + // otherwise set this value to false. The default is false. + // + // You can give an AWS account permission to restore a manual DB snapshot from + // another AWS account by using the ModifyDBSnapshotAttribute API action. + IncludeShared *bool `type:"boolean"` + + // An optional pagination token provided by a previous DescribeDBSnapshots request. + // If this parameter is specified, the response includes only records beyond + // the marker, up to the value specified by MaxRecords. + Marker *string `type:"string"` + + // The maximum number of records to include in the response. If more records + // exist than the specified MaxRecords value, a pagination token called a marker + // is included in the response so that the remaining results can be retrieved. + // + // Default: 100 + // + // Constraints: Minimum 20, maximum 100. + MaxRecords *int64 `type:"integer"` + + // The type of snapshots to be returned. You can specify one of the following + // values: + // + // automated - Return all DB snapshots that have been automatically taken + // by Amazon RDS for my AWS account. + // + // manual - Return all DB snapshots that have been taken by my AWS account. + // + // shared - Return all manual DB snapshots that have been shared to my AWS + // account. + // + // public - Return all DB snapshots that have been marked as public. + // + // If you don't specify a SnapshotType value, then both automated and manual + // snapshots are returned. You can include shared snapshots with these results + // by setting the IncludeShared parameter to true. You can include public snapshots + // with these results by setting the IncludePublic parameter to true. + // + // The IncludeShared and IncludePublic parameters don't apply for SnapshotType + // values of manual or automated. The IncludePublic parameter doesn't apply + // when SnapshotType is set to shared. The IncludeShared parameter doesn't apply + // when SnapshotType is set to public. + SnapshotType *string `type:"string"` +} + +// String returns the string representation +func (s DescribeDBSnapshotsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDBSnapshotsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeDBSnapshotsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeDBSnapshotsInput"} + if s.Filters != nil { + for i, v := range s.Filters { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Filters", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the result of a successful invocation of the DescribeDBSnapshots +// action. +type DescribeDBSnapshotsOutput struct { + _ struct{} `type:"structure"` + + // A list of DBSnapshot instances. + DBSnapshots []*DBSnapshot `locationNameList:"DBSnapshot" type:"list"` + + // An optional pagination token provided by a previous request. If this parameter + // is specified, the response includes only records beyond the marker, up to + // the value specified by MaxRecords. + Marker *string `type:"string"` +} + +// String returns the string representation +func (s DescribeDBSnapshotsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDBSnapshotsOutput) GoString() string { + return s.String() +} + +type DescribeDBSubnetGroupsInput struct { + _ struct{} `type:"structure"` + + // The name of the DB subnet group to return details for. + DBSubnetGroupName *string `type:"string"` + + // This parameter is not currently supported. + Filters []*Filter `locationNameList:"Filter" type:"list"` + + // An optional pagination token provided by a previous DescribeDBSubnetGroups + // request. If this parameter is specified, the response includes only records + // beyond the marker, up to the value specified by MaxRecords. + Marker *string `type:"string"` + + // The maximum number of records to include in the response. If more records + // exist than the specified MaxRecords value, a pagination token called a marker + // is included in the response so that the remaining results can be retrieved. + // + // Default: 100 + // + // Constraints: Minimum 20, maximum 100. + MaxRecords *int64 `type:"integer"` +} + +// String returns the string representation +func (s DescribeDBSubnetGroupsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDBSubnetGroupsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeDBSubnetGroupsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeDBSubnetGroupsInput"} + if s.Filters != nil { + for i, v := range s.Filters { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Filters", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the result of a successful invocation of the DescribeDBSubnetGroups +// action. +type DescribeDBSubnetGroupsOutput struct { + _ struct{} `type:"structure"` + + // A list of DBSubnetGroup instances. + DBSubnetGroups []*DBSubnetGroup `locationNameList:"DBSubnetGroup" type:"list"` + + // An optional pagination token provided by a previous request. If this parameter + // is specified, the response includes only records beyond the marker, up to + // the value specified by MaxRecords. + Marker *string `type:"string"` +} + +// String returns the string representation +func (s DescribeDBSubnetGroupsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDBSubnetGroupsOutput) GoString() string { + return s.String() +} + +type DescribeEngineDefaultClusterParametersInput struct { + _ struct{} `type:"structure"` + + // The name of the DB cluster parameter group family to return engine parameter + // information for. + DBParameterGroupFamily *string `type:"string" required:"true"` + + // This parameter is not currently supported. + Filters []*Filter `locationNameList:"Filter" type:"list"` + + // An optional pagination token provided by a previous DescribeEngineDefaultClusterParameters + // request. If this parameter is specified, the response includes only records + // beyond the marker, up to the value specified by MaxRecords. + Marker *string `type:"string"` + + // The maximum number of records to include in the response. If more records + // exist than the specified MaxRecords value, a pagination token called a marker + // is included in the response so that the remaining results can be retrieved. + // + // Default: 100 + // + // Constraints: Minimum 20, maximum 100. + MaxRecords *int64 `type:"integer"` +} + +// String returns the string representation +func (s DescribeEngineDefaultClusterParametersInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeEngineDefaultClusterParametersInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeEngineDefaultClusterParametersInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeEngineDefaultClusterParametersInput"} + if s.DBParameterGroupFamily == nil { + invalidParams.Add(request.NewErrParamRequired("DBParameterGroupFamily")) + } + if s.Filters != nil { + for i, v := range s.Filters { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Filters", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DescribeEngineDefaultClusterParametersOutput struct { + _ struct{} `type:"structure"` + + // Contains the result of a successful invocation of the DescribeEngineDefaultParameters + // action. + EngineDefaults *EngineDefaults `type:"structure"` +} + +// String returns the string representation +func (s DescribeEngineDefaultClusterParametersOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeEngineDefaultClusterParametersOutput) GoString() string { + return s.String() +} + +type DescribeEngineDefaultParametersInput struct { + _ struct{} `type:"structure"` + + // The name of the DB parameter group family. + DBParameterGroupFamily *string `type:"string" required:"true"` + + // Not currently supported. + Filters []*Filter `locationNameList:"Filter" type:"list"` + + // An optional pagination token provided by a previous DescribeEngineDefaultParameters + // request. If this parameter is specified, the response includes only records + // beyond the marker, up to the value specified by MaxRecords. + Marker *string `type:"string"` + + // The maximum number of records to include in the response. If more records + // exist than the specified MaxRecords value, a pagination token called a marker + // is included in the response so that the remaining results can be retrieved. + // + // Default: 100 + // + // Constraints: Minimum 20, maximum 100. + MaxRecords *int64 `type:"integer"` +} + +// String returns the string representation +func (s DescribeEngineDefaultParametersInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeEngineDefaultParametersInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeEngineDefaultParametersInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeEngineDefaultParametersInput"} + if s.DBParameterGroupFamily == nil { + invalidParams.Add(request.NewErrParamRequired("DBParameterGroupFamily")) + } + if s.Filters != nil { + for i, v := range s.Filters { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Filters", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DescribeEngineDefaultParametersOutput struct { + _ struct{} `type:"structure"` + + // Contains the result of a successful invocation of the DescribeEngineDefaultParameters + // action. + EngineDefaults *EngineDefaults `type:"structure"` +} + +// String returns the string representation +func (s DescribeEngineDefaultParametersOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeEngineDefaultParametersOutput) GoString() string { + return s.String() +} + +type DescribeEventCategoriesInput struct { + _ struct{} `type:"structure"` + + // This parameter is not currently supported. + Filters []*Filter `locationNameList:"Filter" type:"list"` + + // The type of source that will be generating the events. + // + // Valid values: db-instance | db-parameter-group | db-security-group | db-snapshot + SourceType *string `type:"string"` +} + +// String returns the string representation +func (s DescribeEventCategoriesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeEventCategoriesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeEventCategoriesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeEventCategoriesInput"} + if s.Filters != nil { + for i, v := range s.Filters { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Filters", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Data returned from the DescribeEventCategories action. +type DescribeEventCategoriesOutput struct { + _ struct{} `type:"structure"` + + // A list of EventCategoriesMap data types. + EventCategoriesMapList []*EventCategoriesMap `locationNameList:"EventCategoriesMap" type:"list"` +} + +// String returns the string representation +func (s DescribeEventCategoriesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeEventCategoriesOutput) GoString() string { + return s.String() +} + +type DescribeEventSubscriptionsInput struct { + _ struct{} `type:"structure"` + + // This parameter is not currently supported. + Filters []*Filter `locationNameList:"Filter" type:"list"` + + // An optional pagination token provided by a previous DescribeOrderableDBInstanceOptions + // request. If this parameter is specified, the response includes only records + // beyond the marker, up to the value specified by MaxRecords . + Marker *string `type:"string"` + + // The maximum number of records to include in the response. If more records + // exist than the specified MaxRecords value, a pagination token called a marker + // is included in the response so that the remaining results can be retrieved. + // + // Default: 100 + // + // Constraints: Minimum 20, maximum 100. + MaxRecords *int64 `type:"integer"` + + // The name of the RDS event notification subscription you want to describe. + SubscriptionName *string `type:"string"` +} + +// String returns the string representation +func (s DescribeEventSubscriptionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeEventSubscriptionsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeEventSubscriptionsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeEventSubscriptionsInput"} + if s.Filters != nil { + for i, v := range s.Filters { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Filters", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Data returned by the DescribeEventSubscriptions action. +type DescribeEventSubscriptionsOutput struct { + _ struct{} `type:"structure"` + + // A list of EventSubscriptions data types. + EventSubscriptionsList []*EventSubscription `locationNameList:"EventSubscription" type:"list"` + + // An optional pagination token provided by a previous DescribeOrderableDBInstanceOptions + // request. If this parameter is specified, the response includes only records + // beyond the marker, up to the value specified by MaxRecords. + Marker *string `type:"string"` +} + +// String returns the string representation +func (s DescribeEventSubscriptionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeEventSubscriptionsOutput) GoString() string { + return s.String() +} + +type DescribeEventsInput struct { + _ struct{} `type:"structure"` + + // The number of minutes to retrieve events for. + // + // Default: 60 + Duration *int64 `type:"integer"` + + // The end of the time interval for which to retrieve events, specified in ISO + // 8601 format. For more information about ISO 8601, go to the ISO8601 Wikipedia + // page. (http://en.wikipedia.org/wiki/ISO_8601) + // + // Example: 2009-07-08T18:00Z + EndTime *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // A list of event categories that trigger notifications for a event notification + // subscription. + EventCategories []*string `locationNameList:"EventCategory" type:"list"` + + // This parameter is not currently supported. + Filters []*Filter `locationNameList:"Filter" type:"list"` + + // An optional pagination token provided by a previous DescribeEvents request. + // If this parameter is specified, the response includes only records beyond + // the marker, up to the value specified by MaxRecords. + Marker *string `type:"string"` + + // The maximum number of records to include in the response. If more records + // exist than the specified MaxRecords value, a pagination token called a marker + // is included in the response so that the remaining results can be retrieved. + // + // Default: 100 + // + // Constraints: Minimum 20, maximum 100. + MaxRecords *int64 `type:"integer"` + + // The identifier of the event source for which events will be returned. If + // not specified, then all sources are included in the response. + // + // Constraints: + // + // If SourceIdentifier is supplied, SourceType must also be provided. + // + // If the source type is DBInstance, then a DBInstanceIdentifier must be + // supplied. + // + // If the source type is DBSecurityGroup, a DBSecurityGroupName must be supplied. + // + // If the source type is DBParameterGroup, a DBParameterGroupName must be + // supplied. + // + // If the source type is DBSnapshot, a DBSnapshotIdentifier must be supplied. + // + // Cannot end with a hyphen or contain two consecutive hyphens. + SourceIdentifier *string `type:"string"` + + // The event source to retrieve events for. If no value is specified, all events + // are returned. + SourceType *string `type:"string" enum:"SourceType"` + + // The beginning of the time interval to retrieve events for, specified in ISO + // 8601 format. For more information about ISO 8601, go to the ISO8601 Wikipedia + // page. (http://en.wikipedia.org/wiki/ISO_8601) + // + // Example: 2009-07-08T18:00Z + StartTime *time.Time `type:"timestamp" timestampFormat:"iso8601"` +} + +// String returns the string representation +func (s DescribeEventsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeEventsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeEventsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeEventsInput"} + if s.Filters != nil { + for i, v := range s.Filters { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Filters", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the result of a successful invocation of the DescribeEvents action. +type DescribeEventsOutput struct { + _ struct{} `type:"structure"` + + // A list of Event instances. + Events []*Event `locationNameList:"Event" type:"list"` + + // An optional pagination token provided by a previous Events request. If this + // parameter is specified, the response includes only records beyond the marker, + // up to the value specified by MaxRecords . + Marker *string `type:"string"` +} + +// String returns the string representation +func (s DescribeEventsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeEventsOutput) GoString() string { + return s.String() +} + +type DescribeOptionGroupOptionsInput struct { + _ struct{} `type:"structure"` + + // A required parameter. Options available for the given engine name will be + // described. + EngineName *string `type:"string" required:"true"` + + // This parameter is not currently supported. + Filters []*Filter `locationNameList:"Filter" type:"list"` + + // If specified, filters the results to include only options for the specified + // major engine version. + MajorEngineVersion *string `type:"string"` + + // An optional pagination token provided by a previous request. If this parameter + // is specified, the response includes only records beyond the marker, up to + // the value specified by MaxRecords. + Marker *string `type:"string"` + + // The maximum number of records to include in the response. If more records + // exist than the specified MaxRecords value, a pagination token called a marker + // is included in the response so that the remaining results can be retrieved. + // + // Default: 100 + // + // Constraints: Minimum 20, maximum 100. + MaxRecords *int64 `type:"integer"` +} + +// String returns the string representation +func (s DescribeOptionGroupOptionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeOptionGroupOptionsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeOptionGroupOptionsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeOptionGroupOptionsInput"} + if s.EngineName == nil { + invalidParams.Add(request.NewErrParamRequired("EngineName")) + } + if s.Filters != nil { + for i, v := range s.Filters { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Filters", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DescribeOptionGroupOptionsOutput struct { + _ struct{} `type:"structure"` + + // An optional pagination token provided by a previous request. If this parameter + // is specified, the response includes only records beyond the marker, up to + // the value specified by MaxRecords. + Marker *string `type:"string"` + + // List of available option group options. + OptionGroupOptions []*OptionGroupOption `locationNameList:"OptionGroupOption" type:"list"` +} + +// String returns the string representation +func (s DescribeOptionGroupOptionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeOptionGroupOptionsOutput) GoString() string { + return s.String() +} + +type DescribeOptionGroupsInput struct { + _ struct{} `type:"structure"` + + // Filters the list of option groups to only include groups associated with + // a specific database engine. + EngineName *string `type:"string"` + + // This parameter is not currently supported. + Filters []*Filter `locationNameList:"Filter" type:"list"` + + // Filters the list of option groups to only include groups associated with + // a specific database engine version. If specified, then EngineName must also + // be specified. + MajorEngineVersion *string `type:"string"` + + // An optional pagination token provided by a previous DescribeOptionGroups + // request. If this parameter is specified, the response includes only records + // beyond the marker, up to the value specified by MaxRecords. + Marker *string `type:"string"` + + // The maximum number of records to include in the response. If more records + // exist than the specified MaxRecords value, a pagination token called a marker + // is included in the response so that the remaining results can be retrieved. + // + // Default: 100 + // + // Constraints: Minimum 20, maximum 100. + MaxRecords *int64 `type:"integer"` + + // The name of the option group to describe. Cannot be supplied together with + // EngineName or MajorEngineVersion. + OptionGroupName *string `type:"string"` +} + +// String returns the string representation +func (s DescribeOptionGroupsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeOptionGroupsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeOptionGroupsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeOptionGroupsInput"} + if s.Filters != nil { + for i, v := range s.Filters { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Filters", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// List of option groups. +type DescribeOptionGroupsOutput struct { + _ struct{} `type:"structure"` + + // An optional pagination token provided by a previous request. If this parameter + // is specified, the response includes only records beyond the marker, up to + // the value specified by MaxRecords. + Marker *string `type:"string"` + + // List of option groups. + OptionGroupsList []*OptionGroup `locationNameList:"OptionGroup" type:"list"` +} + +// String returns the string representation +func (s DescribeOptionGroupsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeOptionGroupsOutput) GoString() string { + return s.String() +} + +type DescribeOrderableDBInstanceOptionsInput struct { + _ struct{} `type:"structure"` + + // The DB instance class filter value. Specify this parameter to show only the + // available offerings matching the specified DB instance class. + DBInstanceClass *string `type:"string"` + + // The name of the engine to retrieve DB instance options for. + Engine *string `type:"string" required:"true"` + + // The engine version filter value. Specify this parameter to show only the + // available offerings matching the specified engine version. + EngineVersion *string `type:"string"` + + // This parameter is not currently supported. + Filters []*Filter `locationNameList:"Filter" type:"list"` + + // The license model filter value. Specify this parameter to show only the available + // offerings matching the specified license model. + LicenseModel *string `type:"string"` + + // An optional pagination token provided by a previous DescribeOrderableDBInstanceOptions + // request. If this parameter is specified, the response includes only records + // beyond the marker, up to the value specified by MaxRecords . + Marker *string `type:"string"` + + // The maximum number of records to include in the response. If more records + // exist than the specified MaxRecords value, a pagination token called a marker + // is included in the response so that the remaining results can be retrieved. + // + // Default: 100 + // + // Constraints: Minimum 20, maximum 100. + MaxRecords *int64 `type:"integer"` + + // The VPC filter value. Specify this parameter to show only the available VPC + // or non-VPC offerings. + Vpc *bool `type:"boolean"` +} + +// String returns the string representation +func (s DescribeOrderableDBInstanceOptionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeOrderableDBInstanceOptionsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeOrderableDBInstanceOptionsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeOrderableDBInstanceOptionsInput"} + if s.Engine == nil { + invalidParams.Add(request.NewErrParamRequired("Engine")) + } + if s.Filters != nil { + for i, v := range s.Filters { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Filters", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the result of a successful invocation of the DescribeOrderableDBInstanceOptions +// action. +type DescribeOrderableDBInstanceOptionsOutput struct { + _ struct{} `type:"structure"` + + // An optional pagination token provided by a previous OrderableDBInstanceOptions + // request. If this parameter is specified, the response includes only records + // beyond the marker, up to the value specified by MaxRecords . + Marker *string `type:"string"` + + // An OrderableDBInstanceOption structure containing information about orderable + // options for the DB instance. + OrderableDBInstanceOptions []*OrderableDBInstanceOption `locationNameList:"OrderableDBInstanceOption" type:"list"` +} + +// String returns the string representation +func (s DescribeOrderableDBInstanceOptionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeOrderableDBInstanceOptionsOutput) GoString() string { + return s.String() +} + +type DescribePendingMaintenanceActionsInput struct { + _ struct{} `type:"structure"` + + // A filter that specifies one or more resources to return pending maintenance + // actions for. + // + // Supported filters: + // + // db-instance-id - Accepts DB instance identifiers and DB instance Amazon + // Resource Names (ARNs). The results list will only include pending maintenance + // actions for the DB instances identified by these ARNs. + Filters []*Filter `locationNameList:"Filter" type:"list"` + + // An optional pagination token provided by a previous DescribePendingMaintenanceActions + // request. If this parameter is specified, the response includes only records + // beyond the marker, up to a number of records specified by MaxRecords. + Marker *string `type:"string"` + + // The maximum number of records to include in the response. If more records + // exist than the specified MaxRecords value, a pagination token called a marker + // is included in the response so that the remaining results can be retrieved. + // + // Default: 100 + // + // Constraints: Minimum 20, maximum 100. + MaxRecords *int64 `type:"integer"` + + // The ARN of a resource to return pending maintenance actions for. + ResourceIdentifier *string `type:"string"` +} + +// String returns the string representation +func (s DescribePendingMaintenanceActionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribePendingMaintenanceActionsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribePendingMaintenanceActionsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribePendingMaintenanceActionsInput"} + if s.Filters != nil { + for i, v := range s.Filters { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Filters", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Data returned from the DescribePendingMaintenanceActions action. +type DescribePendingMaintenanceActionsOutput struct { + _ struct{} `type:"structure"` + + // An optional pagination token provided by a previous DescribePendingMaintenanceActions + // request. If this parameter is specified, the response includes only records + // beyond the marker, up to a number of records specified by MaxRecords. + Marker *string `type:"string"` + + // A list of the pending maintenance actions for the resource. + PendingMaintenanceActions []*ResourcePendingMaintenanceActions `locationNameList:"ResourcePendingMaintenanceActions" type:"list"` +} + +// String returns the string representation +func (s DescribePendingMaintenanceActionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribePendingMaintenanceActionsOutput) GoString() string { + return s.String() +} + +type DescribeReservedDBInstancesInput struct { + _ struct{} `type:"structure"` + + // The DB instance class filter value. Specify this parameter to show only those + // reservations matching the specified DB instances class. + DBInstanceClass *string `type:"string"` + + // The duration filter value, specified in years or seconds. Specify this parameter + // to show only reservations for this duration. + // + // Valid Values: 1 | 3 | 31536000 | 94608000 + Duration *string `type:"string"` + + // This parameter is not currently supported. + Filters []*Filter `locationNameList:"Filter" type:"list"` + + // An optional pagination token provided by a previous request. If this parameter + // is specified, the response includes only records beyond the marker, up to + // the value specified by MaxRecords. + Marker *string `type:"string"` + + // The maximum number of records to include in the response. If more than the + // MaxRecords value is available, a pagination token called a marker is included + // in the response so that the following results can be retrieved. + // + // Default: 100 + // + // Constraints: Minimum 20, maximum 100. + MaxRecords *int64 `type:"integer"` + + // The Multi-AZ filter value. Specify this parameter to show only those reservations + // matching the specified Multi-AZ parameter. + MultiAZ *bool `type:"boolean"` + + // The offering type filter value. Specify this parameter to show only the available + // offerings matching the specified offering type. + // + // Valid Values: "Partial Upfront" | "All Upfront" | "No Upfront" + OfferingType *string `type:"string"` + + // The product description filter value. Specify this parameter to show only + // those reservations matching the specified product description. + ProductDescription *string `type:"string"` + + // The reserved DB instance identifier filter value. Specify this parameter + // to show only the reservation that matches the specified reservation ID. + ReservedDBInstanceId *string `type:"string"` + + // The offering identifier filter value. Specify this parameter to show only + // purchased reservations matching the specified offering identifier. + ReservedDBInstancesOfferingId *string `type:"string"` +} + +// String returns the string representation +func (s DescribeReservedDBInstancesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeReservedDBInstancesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeReservedDBInstancesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeReservedDBInstancesInput"} + if s.Filters != nil { + for i, v := range s.Filters { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Filters", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DescribeReservedDBInstancesOfferingsInput struct { + _ struct{} `type:"structure"` + + // The DB instance class filter value. Specify this parameter to show only the + // available offerings matching the specified DB instance class. + DBInstanceClass *string `type:"string"` + + // Duration filter value, specified in years or seconds. Specify this parameter + // to show only reservations for this duration. + // + // Valid Values: 1 | 3 | 31536000 | 94608000 + Duration *string `type:"string"` + + // This parameter is not currently supported. + Filters []*Filter `locationNameList:"Filter" type:"list"` + + // An optional pagination token provided by a previous request. If this parameter + // is specified, the response includes only records beyond the marker, up to + // the value specified by MaxRecords. + Marker *string `type:"string"` + + // The maximum number of records to include in the response. If more than the + // MaxRecords value is available, a pagination token called a marker is included + // in the response so that the following results can be retrieved. + // + // Default: 100 + // + // Constraints: Minimum 20, maximum 100. + MaxRecords *int64 `type:"integer"` + + // The Multi-AZ filter value. Specify this parameter to show only the available + // offerings matching the specified Multi-AZ parameter. + MultiAZ *bool `type:"boolean"` + + // The offering type filter value. Specify this parameter to show only the available + // offerings matching the specified offering type. + // + // Valid Values: "Partial Upfront" | "All Upfront" | "No Upfront" + OfferingType *string `type:"string"` + + // Product description filter value. Specify this parameter to show only the + // available offerings matching the specified product description. + ProductDescription *string `type:"string"` + + // The offering identifier filter value. Specify this parameter to show only + // the available offering that matches the specified reservation identifier. + // + // Example: 438012d3-4052-4cc7-b2e3-8d3372e0e706 + ReservedDBInstancesOfferingId *string `type:"string"` +} + +// String returns the string representation +func (s DescribeReservedDBInstancesOfferingsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeReservedDBInstancesOfferingsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeReservedDBInstancesOfferingsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeReservedDBInstancesOfferingsInput"} + if s.Filters != nil { + for i, v := range s.Filters { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Filters", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the result of a successful invocation of the DescribeReservedDBInstancesOfferings +// action. +type DescribeReservedDBInstancesOfferingsOutput struct { + _ struct{} `type:"structure"` + + // An optional pagination token provided by a previous request. If this parameter + // is specified, the response includes only records beyond the marker, up to + // the value specified by MaxRecords. + Marker *string `type:"string"` + + // A list of reserved DB instance offerings. + ReservedDBInstancesOfferings []*ReservedDBInstancesOffering `locationNameList:"ReservedDBInstancesOffering" type:"list"` +} + +// String returns the string representation +func (s DescribeReservedDBInstancesOfferingsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeReservedDBInstancesOfferingsOutput) GoString() string { + return s.String() +} + +// Contains the result of a successful invocation of the DescribeReservedDBInstances +// action. +type DescribeReservedDBInstancesOutput struct { + _ struct{} `type:"structure"` + + // An optional pagination token provided by a previous request. If this parameter + // is specified, the response includes only records beyond the marker, up to + // the value specified by MaxRecords. + Marker *string `type:"string"` + + // A list of reserved DB instances. + ReservedDBInstances []*ReservedDBInstance `locationNameList:"ReservedDBInstance" type:"list"` +} + +// String returns the string representation +func (s DescribeReservedDBInstancesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeReservedDBInstancesOutput) GoString() string { + return s.String() +} + +// An Active Directory Domain membership record associated with the DB instance. +type DomainMembership struct { + _ struct{} `type:"structure"` + + // The identifier of the Active Directory Domain. + Domain *string `type:"string"` + + // The fully qualified domain name of the Active Directory Domain. + FQDN *string `type:"string"` + + // The name of the IAM role to be used when making API calls to the Directory + // Service. + IAMRoleName *string `type:"string"` + + // The status of the DB instance's Active Directory Domain membership, such + // as joined, pending-join, failed etc). + Status *string `type:"string"` +} + +// String returns the string representation +func (s DomainMembership) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DomainMembership) GoString() string { + return s.String() +} + +type DownloadDBLogFilePortionInput struct { + _ struct{} `type:"structure"` + + // The customer-assigned name of the DB instance that contains the log files + // you want to list. + // + // Constraints: + // + // Must contain from 1 to 63 alphanumeric characters or hyphens + // + // First character must be a letter + // + // Cannot end with a hyphen or contain two consecutive hyphens + DBInstanceIdentifier *string `type:"string" required:"true"` + + // The name of the log file to be downloaded. + LogFileName *string `type:"string" required:"true"` + + // The pagination token provided in the previous request or "0". If the Marker + // parameter is specified the response includes only records beyond the marker + // until the end of the file or up to NumberOfLines. + Marker *string `type:"string"` + + // The number of lines to download. If the number of lines specified results + // in a file over 1 MB in size, the file will be truncated at 1 MB in size. + // + // If the NumberOfLines parameter is specified, then the block of lines returned + // can be from the beginning or the end of the log file, depending on the value + // of the Marker parameter. + // + // If neither Marker or NumberOfLines are specified, the entire log file + // is returned up to a maximum of 10000 lines, starting with the most recent + // log entries first. + // + // If NumberOfLines is specified and Marker is not specified, then the most + // recent lines from the end of the log file are returned. + // + // If Marker is specified as "0", then the specified number of lines from + // the beginning of the log file are returned. + // + // You can download the log file in blocks of lines by specifying the size + // of the block using the NumberOfLines parameter, and by specifying a value + // of "0" for the Marker parameter in your first request. Include the Marker + // value returned in the response as the Marker value for the next request, + // continuing until the AdditionalDataPending response element returns false. + NumberOfLines *int64 `type:"integer"` +} + +// String returns the string representation +func (s DownloadDBLogFilePortionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DownloadDBLogFilePortionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DownloadDBLogFilePortionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DownloadDBLogFilePortionInput"} + if s.DBInstanceIdentifier == nil { + invalidParams.Add(request.NewErrParamRequired("DBInstanceIdentifier")) + } + if s.LogFileName == nil { + invalidParams.Add(request.NewErrParamRequired("LogFileName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// This data type is used as a response element to DownloadDBLogFilePortion. +type DownloadDBLogFilePortionOutput struct { + _ struct{} `type:"structure"` + + // Boolean value that if true, indicates there is more data to be downloaded. + AdditionalDataPending *bool `type:"boolean"` + + // Entries from the specified log file. + LogFileData *string `type:"string"` + + // A pagination token that can be used in a subsequent DownloadDBLogFilePortion + // request. + Marker *string `type:"string"` +} + +// String returns the string representation +func (s DownloadDBLogFilePortionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DownloadDBLogFilePortionOutput) GoString() string { + return s.String() +} + +// This data type is used as a response element in the following actions: +// +// AuthorizeDBSecurityGroupIngress +// +// DescribeDBSecurityGroups +// +// RevokeDBSecurityGroupIngress +type EC2SecurityGroup struct { + _ struct{} `type:"structure"` + + // Specifies the id of the EC2 security group. + EC2SecurityGroupId *string `type:"string"` + + // Specifies the name of the EC2 security group. + EC2SecurityGroupName *string `type:"string"` + + // Specifies the AWS ID of the owner of the EC2 security group specified in + // the EC2SecurityGroupName field. + EC2SecurityGroupOwnerId *string `type:"string"` + + // Provides the status of the EC2 security group. Status can be "authorizing", + // "authorized", "revoking", and "revoked". + Status *string `type:"string"` +} + +// String returns the string representation +func (s EC2SecurityGroup) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EC2SecurityGroup) GoString() string { + return s.String() +} + +// This data type is used as a response element in the following actions: +// +// CreateDBInstance +// +// DescribeDBInstances +// +// DeleteDBInstance +type Endpoint struct { + _ struct{} `type:"structure"` + + // Specifies the DNS address of the DB instance. + Address *string `type:"string"` + + // Specifies the ID that Amazon Route 53 assigns when you create a hosted zone. + HostedZoneId *string `type:"string"` + + // Specifies the port that the database engine is listening on. + Port *int64 `type:"integer"` +} + +// String returns the string representation +func (s Endpoint) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Endpoint) GoString() string { + return s.String() +} + +// Contains the result of a successful invocation of the DescribeEngineDefaultParameters +// action. +type EngineDefaults struct { + _ struct{} `type:"structure"` + + // Specifies the name of the DB parameter group family that the engine default + // parameters apply to. + DBParameterGroupFamily *string `type:"string"` + + // An optional pagination token provided by a previous EngineDefaults request. + // If this parameter is specified, the response includes only records beyond + // the marker, up to the value specified by MaxRecords . + Marker *string `type:"string"` + + // Contains a list of engine default parameters. + Parameters []*Parameter `locationNameList:"Parameter" type:"list"` +} + +// String returns the string representation +func (s EngineDefaults) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EngineDefaults) GoString() string { + return s.String() +} + +// This data type is used as a response element in the DescribeEvents action. +type Event struct { + _ struct{} `type:"structure"` + + // Specifies the date and time of the event. + Date *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // Specifies the category for the event. + EventCategories []*string `locationNameList:"EventCategory" type:"list"` + + // Provides the text of this event. + Message *string `type:"string"` + + // Provides the identifier for the source of the event. + SourceIdentifier *string `type:"string"` + + // Specifies the source type for this event. + SourceType *string `type:"string" enum:"SourceType"` +} + +// String returns the string representation +func (s Event) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Event) GoString() string { + return s.String() +} + +// Contains the results of a successful invocation of the DescribeEventCategories +// action. +type EventCategoriesMap struct { + _ struct{} `type:"structure"` + + // The event categories for the specified source type + EventCategories []*string `locationNameList:"EventCategory" type:"list"` + + // The source type that the returned categories belong to + SourceType *string `type:"string"` +} + +// String returns the string representation +func (s EventCategoriesMap) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EventCategoriesMap) GoString() string { + return s.String() +} + +// Contains the results of a successful invocation of the DescribeEventSubscriptions +// action. +type EventSubscription struct { + _ struct{} `type:"structure"` + + // The RDS event notification subscription Id. + CustSubscriptionId *string `type:"string"` + + // The AWS customer account associated with the RDS event notification subscription. + CustomerAwsId *string `type:"string"` + + // A Boolean value indicating if the subscription is enabled. True indicates + // the subscription is enabled. + Enabled *bool `type:"boolean"` + + // A list of event categories for the RDS event notification subscription. + EventCategoriesList []*string `locationNameList:"EventCategory" type:"list"` + + // The topic ARN of the RDS event notification subscription. + SnsTopicArn *string `type:"string"` + + // A list of source IDs for the RDS event notification subscription. + SourceIdsList []*string `locationNameList:"SourceId" type:"list"` + + // The source type for the RDS event notification subscription. + SourceType *string `type:"string"` + + // The status of the RDS event notification subscription. + // + // Constraints: + // + // Can be one of the following: creating | modifying | deleting | active | + // no-permission | topic-not-exist + // + // The status "no-permission" indicates that RDS no longer has permission to + // post to the SNS topic. The status "topic-not-exist" indicates that the topic + // was deleted after the subscription was created. + Status *string `type:"string"` + + // The time the RDS event notification subscription was created. + SubscriptionCreationTime *string `type:"string"` +} + +// String returns the string representation +func (s EventSubscription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EventSubscription) GoString() string { + return s.String() +} + +type FailoverDBClusterInput struct { + _ struct{} `type:"structure"` + + // A DB cluster identifier to force a failover for. This parameter is not case-sensitive. + // + // Constraints: + // + // Must contain from 1 to 63 alphanumeric characters or hyphens + // + // First character must be a letter + // + // Cannot end with a hyphen or contain two consecutive hyphens + DBClusterIdentifier *string `type:"string"` +} + +// String returns the string representation +func (s FailoverDBClusterInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FailoverDBClusterInput) GoString() string { + return s.String() +} + +type FailoverDBClusterOutput struct { + _ struct{} `type:"structure"` + + // Contains the result of a successful invocation of the following actions: + // + // CreateDBCluster + // + // DeleteDBCluster + // + // FailoverDBCluster + // + // ModifyDBCluster + // + // RestoreDBClusterFromSnapshot + // + // RestoreDBClusterToPointInTime + // + // This data type is used as a response element in the DescribeDBClusters + // action. + DBCluster *DBCluster `type:"structure"` +} + +// String returns the string representation +func (s FailoverDBClusterOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FailoverDBClusterOutput) GoString() string { + return s.String() +} + +// This type is not currently supported. +type Filter struct { + _ struct{} `type:"structure"` + + // This parameter is not currently supported. + Name *string `type:"string" required:"true"` + + // This parameter is not currently supported. + Values []*string `locationNameList:"Value" type:"list" required:"true"` +} + +// String returns the string representation +func (s Filter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Filter) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Filter) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Filter"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Values == nil { + invalidParams.Add(request.NewErrParamRequired("Values")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// This data type is used as a response element in the DescribeDBSecurityGroups +// action. +type IPRange struct { + _ struct{} `type:"structure"` + + // Specifies the IP range. + CIDRIP *string `type:"string"` + + // Specifies the status of the IP range. Status can be "authorizing", "authorized", + // "revoking", and "revoked". + Status *string `type:"string"` +} + +// String returns the string representation +func (s IPRange) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s IPRange) GoString() string { + return s.String() +} + +type ListTagsForResourceInput struct { + _ struct{} `type:"structure"` + + // This parameter is not currently supported. + Filters []*Filter `locationNameList:"Filter" type:"list"` + + // The Amazon RDS resource with tags to be listed. This value is an Amazon Resource + // Name (ARN). For information about creating an ARN, see Constructing an RDS + // Amazon Resource Name (ARN) (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Tagging.html#USER_Tagging.ARN). + ResourceName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ListTagsForResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTagsForResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListTagsForResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListTagsForResourceInput"} + if s.ResourceName == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceName")) + } + if s.Filters != nil { + for i, v := range s.Filters { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Filters", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type ListTagsForResourceOutput struct { + _ struct{} `type:"structure"` + + // List of tags returned by the ListTagsForResource operation. + TagList []*Tag `locationNameList:"Tag" type:"list"` +} + +// String returns the string representation +func (s ListTagsForResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTagsForResourceOutput) GoString() string { + return s.String() +} + +type ModifyDBClusterInput struct { + _ struct{} `type:"structure"` + + // A value that specifies whether the modifications in this request and any + // pending modifications are asynchronously applied as soon as possible, regardless + // of the PreferredMaintenanceWindow setting for the DB cluster. If this parameter + // is set to false, changes to the DB cluster are applied during the next maintenance + // window. + // + // The ApplyImmediately parameter only affects the NewDBClusterIdentifier and + // MasterUserPassword values. If you set the ApplyImmediately parameter value + // to false, then changes to the NewDBClusterIdentifier and MasterUserPassword + // values are applied during the next maintenance window. All other changes + // are applied immediately, regardless of the value of the ApplyImmediately + // parameter. + // + // Default: false + ApplyImmediately *bool `type:"boolean"` + + // The number of days for which automated backups are retained. You must specify + // a minimum value of 1. + // + // Default: 1 + // + // Constraints: + // + // Must be a value from 1 to 35 + BackupRetentionPeriod *int64 `type:"integer"` + + // The DB cluster identifier for the cluster being modified. This parameter + // is not case-sensitive. + // + // Constraints: + // + // Must be the identifier for an existing DB cluster. + // + // Must contain from 1 to 63 alphanumeric characters or hyphens. + // + // First character must be a letter. + // + // Cannot end with a hyphen or contain two consecutive hyphens. + DBClusterIdentifier *string `type:"string" required:"true"` + + // The name of the DB cluster parameter group to use for the DB cluster. + DBClusterParameterGroupName *string `type:"string"` + + // The new password for the master database user. This password can contain + // any printable ASCII character except "/", """, or "@". + // + // Constraints: Must contain from 8 to 41 characters. + MasterUserPassword *string `type:"string"` + + // The new DB cluster identifier for the DB cluster when renaming a DB cluster. + // This value is stored as a lowercase string. + // + // Constraints: + // + // Must contain from 1 to 63 alphanumeric characters or hyphens + // + // First character must be a letter + // + // Cannot end with a hyphen or contain two consecutive hyphens + // + // Example: my-cluster2 + NewDBClusterIdentifier *string `type:"string"` + + // A value that indicates that the DB cluster should be associated with the + // specified option group. Changing this parameter does not result in an outage + // except in the following case, and the change is applied during the next maintenance + // window unless the ApplyImmediately parameter is set to true for this request. + // If the parameter change results in an option group that enables OEM, this + // change can cause a brief (sub-second) period during which new connections + // are rejected but existing connections are not interrupted. + // + // Permanent options cannot be removed from an option group. The option group + // cannot be removed from a DB cluster once it is associated with a DB cluster. + OptionGroupName *string `type:"string"` + + // The port number on which the DB cluster accepts connections. + // + // Constraints: Value must be 1150-65535 + // + // Default: The same port as the original DB cluster. + Port *int64 `type:"integer"` + + // The daily time range during which automated backups are created if automated + // backups are enabled, using the BackupRetentionPeriod parameter. + // + // Default: A 30-minute window selected at random from an 8-hour block of time + // per region. To see the time blocks available, see Adjusting the Preferred + // Maintenance Window (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/AdjustingTheMaintenanceWindow.html) + // in the Amazon RDS User Guide. + // + // Constraints: + // + // Must be in the format hh24:mi-hh24:mi. + // + // Times should be in Universal Coordinated Time (UTC). + // + // Must not conflict with the preferred maintenance window. + // + // Must be at least 30 minutes. + PreferredBackupWindow *string `type:"string"` + + // The weekly time range during which system maintenance can occur, in Universal + // Coordinated Time (UTC). + // + // Format: ddd:hh24:mi-ddd:hh24:mi + // + // Default: A 30-minute window selected at random from an 8-hour block of time + // per region, occurring on a random day of the week. To see the time blocks + // available, see Adjusting the Preferred Maintenance Window (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/AdjustingTheMaintenanceWindow.html) + // in the Amazon RDS User Guide. + // + // Valid Days: Mon, Tue, Wed, Thu, Fri, Sat, Sun + // + // Constraints: Minimum 30-minute window. + PreferredMaintenanceWindow *string `type:"string"` + + // A lst of VPC security groups that the DB cluster will belong to. + VpcSecurityGroupIds []*string `locationNameList:"VpcSecurityGroupId" type:"list"` +} + +// String returns the string representation +func (s ModifyDBClusterInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyDBClusterInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ModifyDBClusterInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ModifyDBClusterInput"} + if s.DBClusterIdentifier == nil { + invalidParams.Add(request.NewErrParamRequired("DBClusterIdentifier")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type ModifyDBClusterOutput struct { + _ struct{} `type:"structure"` + + // Contains the result of a successful invocation of the following actions: + // + // CreateDBCluster + // + // DeleteDBCluster + // + // FailoverDBCluster + // + // ModifyDBCluster + // + // RestoreDBClusterFromSnapshot + // + // RestoreDBClusterToPointInTime + // + // This data type is used as a response element in the DescribeDBClusters + // action. + DBCluster *DBCluster `type:"structure"` +} + +// String returns the string representation +func (s ModifyDBClusterOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyDBClusterOutput) GoString() string { + return s.String() +} + +type ModifyDBClusterParameterGroupInput struct { + _ struct{} `type:"structure"` + + // The name of the DB cluster parameter group to modify. + DBClusterParameterGroupName *string `type:"string" required:"true"` + + // A list of parameters in the DB cluster parameter group to modify. + Parameters []*Parameter `locationNameList:"Parameter" type:"list" required:"true"` +} + +// String returns the string representation +func (s ModifyDBClusterParameterGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyDBClusterParameterGroupInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ModifyDBClusterParameterGroupInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ModifyDBClusterParameterGroupInput"} + if s.DBClusterParameterGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("DBClusterParameterGroupName")) + } + if s.Parameters == nil { + invalidParams.Add(request.NewErrParamRequired("Parameters")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type ModifyDBClusterSnapshotAttributeInput struct { + _ struct{} `type:"structure"` + + // The name of the DB cluster snapshot attribute to modify. + // + // To manage authorization for other AWS accounts to copy or restore a manual + // DB cluster snapshot, set this value to restore. + AttributeName *string `type:"string" required:"true"` + + // The identifier for the DB cluster snapshot to modify the attributes for. + DBClusterSnapshotIdentifier *string `type:"string" required:"true"` + + // A list of DB cluster snapshot attributes to add to the attribute specified + // by AttributeName. + // + // To authorize other AWS accounts to copy or restore a manual DB cluster snapshot, + // set this list to include one or more AWS account IDs, or all to make the + // manual DB cluster snapshot restorable by any AWS account. Do not add the + // all value for any manual DB cluster snapshots that contain private information + // that you don't want available to all AWS accounts. + ValuesToAdd []*string `locationNameList:"AttributeValue" type:"list"` + + // A list of DB cluster snapshot attributes to remove from the attribute specified + // by AttributeName. + // + // To remove authorization for other AWS accounts to copy or restore a manual + // DB cluster snapshot, set this list to include one or more AWS account identifiers, + // or all to remove authorization for any AWS account to copy or restore the + // DB cluster snapshot. If you specify all, an AWS account whose account ID + // is explicitly added to the restore attribute can still copy or restore a + // manual DB cluster snapshot. + ValuesToRemove []*string `locationNameList:"AttributeValue" type:"list"` +} + +// String returns the string representation +func (s ModifyDBClusterSnapshotAttributeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyDBClusterSnapshotAttributeInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ModifyDBClusterSnapshotAttributeInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ModifyDBClusterSnapshotAttributeInput"} + if s.AttributeName == nil { + invalidParams.Add(request.NewErrParamRequired("AttributeName")) + } + if s.DBClusterSnapshotIdentifier == nil { + invalidParams.Add(request.NewErrParamRequired("DBClusterSnapshotIdentifier")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type ModifyDBClusterSnapshotAttributeOutput struct { + _ struct{} `type:"structure"` + + // Contains the results of a successful call to the DescribeDBClusterSnapshotAttributes + // API action. + // + // Manual DB cluster snapshot attributes are used to authorize other AWS accounts + // to copy or restore a manual DB cluster snapshot. For more information, see + // the ModifyDBClusterSnapshotAttribute API action. + DBClusterSnapshotAttributesResult *DBClusterSnapshotAttributesResult `type:"structure"` +} + +// String returns the string representation +func (s ModifyDBClusterSnapshotAttributeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyDBClusterSnapshotAttributeOutput) GoString() string { + return s.String() +} + +type ModifyDBInstanceInput struct { + _ struct{} `type:"structure"` + + // The new storage capacity of the RDS instance. Changing this setting does + // not result in an outage and the change is applied during the next maintenance + // window unless ApplyImmediately is set to true for this request. + // + // MySQL + // + // Default: Uses existing setting + // + // Valid Values: 5-6144 + // + // Constraints: Value supplied must be at least 10% greater than the current + // value. Values that are not at least 10% greater than the existing value are + // rounded up so that they are 10% greater than the current value. + // + // Type: Integer + // + // MariaDB + // + // Default: Uses existing setting + // + // Valid Values: 5-6144 + // + // Constraints: Value supplied must be at least 10% greater than the current + // value. Values that are not at least 10% greater than the existing value are + // rounded up so that they are 10% greater than the current value. + // + // Type: Integer + // + // PostgreSQL + // + // Default: Uses existing setting + // + // Valid Values: 5-6144 + // + // Constraints: Value supplied must be at least 10% greater than the current + // value. Values that are not at least 10% greater than the existing value are + // rounded up so that they are 10% greater than the current value. + // + // Type: Integer + // + // Oracle + // + // Default: Uses existing setting + // + // Valid Values: 10-6144 + // + // Constraints: Value supplied must be at least 10% greater than the current + // value. Values that are not at least 10% greater than the existing value are + // rounded up so that they are 10% greater than the current value. + // + // SQL Server + // + // Cannot be modified. + // + // If you choose to migrate your DB instance from using standard storage to + // using Provisioned IOPS, or from using Provisioned IOPS to using standard + // storage, the process can take time. The duration of the migration depends + // on several factors such as database load, storage size, storage type (standard + // or Provisioned IOPS), amount of IOPS provisioned (if any), and the number + // of prior scale storage operations. Typical migration times are under 24 hours, + // but the process can take up to several days in some cases. During the migration, + // the DB instance will be available for use, but might experience performance + // degradation. While the migration takes place, nightly backups for the instance + // will be suspended. No other Amazon RDS operations can take place for the + // instance, including modifying the instance, rebooting the instance, deleting + // the instance, creating a Read Replica for the instance, and creating a DB + // snapshot of the instance. + AllocatedStorage *int64 `type:"integer"` + + // Indicates that major version upgrades are allowed. Changing this parameter + // does not result in an outage and the change is asynchronously applied as + // soon as possible. + // + // Constraints: This parameter must be set to true when specifying a value + // for the EngineVersion parameter that is a different major version than the + // DB instance's current version. + AllowMajorVersionUpgrade *bool `type:"boolean"` + + // Specifies whether the modifications in this request and any pending modifications + // are asynchronously applied as soon as possible, regardless of the PreferredMaintenanceWindow + // setting for the DB instance. + // + // If this parameter is set to false, changes to the DB instance are applied + // during the next maintenance window. Some parameter changes can cause an outage + // and will be applied on the next call to RebootDBInstance, or the next failure + // reboot. Review the table of parameters in Modifying a DB Instance and Using + // the Apply Immediately Parameter (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Overview.DBInstance.Modifying.html) + // to see the impact that setting ApplyImmediately to true or false has for + // each modified parameter and to determine when the changes will be applied. + // + // Default: false + ApplyImmediately *bool `type:"boolean"` + + // Indicates that minor version upgrades will be applied automatically to the + // DB instance during the maintenance window. Changing this parameter does not + // result in an outage except in the following case and the change is asynchronously + // applied as soon as possible. An outage will result if this parameter is set + // to true during the maintenance window, and a newer minor version is available, + // and RDS has enabled auto patching for that engine version. + AutoMinorVersionUpgrade *bool `type:"boolean"` + + // The number of days to retain automated backups. Setting this parameter to + // a positive number enables backups. Setting this parameter to 0 disables automated + // backups. + // + // Changing this parameter can result in an outage if you change from 0 to + // a non-zero value or from a non-zero value to 0. These changes are applied + // during the next maintenance window unless the ApplyImmediately parameter + // is set to true for this request. If you change the parameter from one non-zero + // value to another non-zero value, the change is asynchronously applied as + // soon as possible. + // + // Default: Uses existing setting + // + // Constraints: + // + // Must be a value from 0 to 35 + // + // Can be specified for a MySQL Read Replica only if the source is running + // MySQL 5.6 + // + // Can be specified for a PostgreSQL Read Replica only if the source is running + // PostgreSQL 9.3.5 + // + // Cannot be set to 0 if the DB instance is a source to Read Replicas + BackupRetentionPeriod *int64 `type:"integer"` + + // Indicates the certificate that needs to be associated with the instance. + CACertificateIdentifier *string `type:"string"` + + // True to copy all tags from the DB instance to snapshots of the DB instance; + // otherwise false. The default is false. + CopyTagsToSnapshot *bool `type:"boolean"` + + // The new compute and memory capacity of the DB instance. To determine the + // instance classes that are available for a particular DB engine, use the DescribeOrderableDBInstanceOptions + // action. + // + // Passing a value for this setting causes an outage during the change and + // is applied during the next maintenance window, unless ApplyImmediately is + // specified as true for this request. + // + // Default: Uses existing setting + // + // Valid Values: db.t1.micro | db.m1.small | db.m1.medium | db.m1.large | db.m1.xlarge + // | db.m2.xlarge | db.m2.2xlarge | db.m2.4xlarge | db.m3.medium | db.m3.large + // | db.m3.xlarge | db.m3.2xlarge | db.m4.large | db.m4.xlarge | db.m4.2xlarge + // | db.m4.4xlarge | db.m4.10xlarge | db.r3.large | db.r3.xlarge | db.r3.2xlarge + // | db.r3.4xlarge | db.r3.8xlarge | db.t2.micro | db.t2.small | db.t2.medium + // | db.t2.large + DBInstanceClass *string `type:"string"` + + // The DB instance identifier. This value is stored as a lowercase string. + // + // Constraints: + // + // Must be the identifier for an existing DB instance + // + // Must contain from 1 to 63 alphanumeric characters or hyphens + // + // First character must be a letter + // + // Cannot end with a hyphen or contain two consecutive hyphens + DBInstanceIdentifier *string `type:"string" required:"true"` + + // The name of the DB parameter group to apply to the DB instance. Changing + // this setting does not result in an outage. The parameter group name itself + // is changed immediately, but the actual parameter changes are not applied + // until you reboot the instance without failover. The db instance will NOT + // be rebooted automatically and the parameter changes will NOT be applied during + // the next maintenance window. + // + // Default: Uses existing setting + // + // Constraints: The DB parameter group must be in the same DB parameter group + // family as this DB instance. + DBParameterGroupName *string `type:"string"` + + // The port number on which the database accepts connections. + // + // The value of the DBPortNumber parameter must not match any of the port values + // specified for options in the option group for the DB instance. + // + // Your database will restart when you change the DBPortNumber value regardless + // of the value of the ApplyImmediately parameter. + // + // MySQL + // + // Default: 3306 + // + // Valid Values: 1150-65535 + // + // MariaDB + // + // Default: 3306 + // + // Valid Values: 1150-65535 + // + // PostgreSQL + // + // Default: 5432 + // + // Valid Values: 1150-65535 + // + // Type: Integer + // + // Oracle + // + // Default: 1521 + // + // Valid Values: 1150-65535 + // + // SQL Server + // + // Default: 1433 + // + // Valid Values: 1150-65535 except for 1434, 3389, 47001, 49152, and 49152 + // through 49156. + // + // Amazon Aurora + // + // Default: 3306 + // + // Valid Values: 1150-65535 + DBPortNumber *int64 `type:"integer"` + + // A list of DB security groups to authorize on this DB instance. Changing this + // setting does not result in an outage and the change is asynchronously applied + // as soon as possible. + // + // Constraints: + // + // Must be 1 to 255 alphanumeric characters + // + // First character must be a letter + // + // Cannot end with a hyphen or contain two consecutive hyphens + DBSecurityGroups []*string `locationNameList:"DBSecurityGroupName" type:"list"` + + // Specify the Active Directory Domain to move the instance to. + // + // The specified Active Directory Domain must be created prior to this operation. + // Currently only a SQL Server instance can be created in a Active Directory + // Domain. + Domain *string `type:"string"` + + // Specify the name of the IAM role to be used when making API calls to the + // Directory Service. + DomainIAMRoleName *string `type:"string"` + + // The version number of the database engine to upgrade to. Changing this parameter + // results in an outage and the change is applied during the next maintenance + // window unless the ApplyImmediately parameter is set to true for this request. + // + // For major version upgrades, if a non-default DB parameter group is currently + // in use, a new DB parameter group in the DB parameter group family for the + // new engine version must be specified. The new DB parameter group can be the + // default for that DB parameter group family. + // + // For a list of valid engine versions, see CreateDBInstance. + EngineVersion *string `type:"string"` + + // The new Provisioned IOPS (I/O operations per second) value for the RDS instance. + // Changing this setting does not result in an outage and the change is applied + // during the next maintenance window unless the ApplyImmediately parameter + // is set to true for this request. + // + // Default: Uses existing setting + // + // Constraints: Value supplied must be at least 10% greater than the current + // value. Values that are not at least 10% greater than the existing value are + // rounded up so that they are 10% greater than the current value. If you are + // migrating from Provisioned IOPS to standard storage, set this value to 0. + // The DB instance will require a reboot for the change in storage type to take + // effect. + // + // SQL Server + // + // Setting the IOPS value for the SQL Server database engine is not supported. + // + // Type: Integer + // + // If you choose to migrate your DB instance from using standard storage to + // using Provisioned IOPS, or from using Provisioned IOPS to using standard + // storage, the process can take time. The duration of the migration depends + // on several factors such as database load, storage size, storage type (standard + // or Provisioned IOPS), amount of IOPS provisioned (if any), and the number + // of prior scale storage operations. Typical migration times are under 24 hours, + // but the process can take up to several days in some cases. During the migration, + // the DB instance will be available for use, but might experience performance + // degradation. While the migration takes place, nightly backups for the instance + // will be suspended. No other Amazon RDS operations can take place for the + // instance, including modifying the instance, rebooting the instance, deleting + // the instance, creating a Read Replica for the instance, and creating a DB + // snapshot of the instance. + Iops *int64 `type:"integer"` + + // The new password for the DB instance master user. Can be any printable ASCII + // character except "/", """, or "@". + // + // Changing this parameter does not result in an outage and the change is + // asynchronously applied as soon as possible. Between the time of the request + // and the completion of the request, the MasterUserPassword element exists + // in the PendingModifiedValues element of the operation response. + // + // Default: Uses existing setting + // + // Constraints: Must be 8 to 41 alphanumeric characters (MySQL, MariaDB, and + // Amazon Aurora), 8 to 30 alphanumeric characters (Oracle), or 8 to 128 alphanumeric + // characters (SQL Server). + // + // Amazon RDS API actions never return the password, so this action provides + // a way to regain access to a primary instance user if the password is lost. + // This includes restoring privileges that might have been accidentally revoked. + MasterUserPassword *string `type:"string"` + + // The interval, in seconds, between points when Enhanced Monitoring metrics + // are collected for the DB instance. To disable collecting Enhanced Monitoring + // metrics, specify 0. The default is 0. + // + // If MonitoringRoleArn is specified, then you must also set MonitoringInterval + // to a value other than 0. + // + // Valid Values: 0, 1, 5, 10, 15, 30, 60 + MonitoringInterval *int64 `type:"integer"` + + // The ARN for the IAM role that permits RDS to send enhanced monitoring metrics + // to CloudWatch Logs. For example, arn:aws:iam:123456789012:role/emaccess. + // For information on creating a monitoring role, go to To create an IAM role + // for Amazon RDS Enhanced Monitoring (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Monitoring.html#USER_Monitoring.OS.IAMRole). + // + // If MonitoringInterval is set to a value other than 0, then you must supply + // a MonitoringRoleArn value. + MonitoringRoleArn *string `type:"string"` + + // Specifies if the DB instance is a Multi-AZ deployment. Changing this parameter + // does not result in an outage and the change is applied during the next maintenance + // window unless the ApplyImmediately parameter is set to true for this request. + // + // Constraints: Cannot be specified if the DB instance is a Read Replica. + MultiAZ *bool `type:"boolean"` + + // The new DB instance identifier for the DB instance when renaming a DB instance. + // When you change the DB instance identifier, an instance reboot will occur + // immediately if you set Apply Immediately to true, or will occur during the + // next maintenance window if Apply Immediately to false. This value is stored + // as a lowercase string. + // + // Constraints: + // + // Must contain from 1 to 63 alphanumeric characters or hyphens + // + // First character must be a letter + // + // Cannot end with a hyphen or contain two consecutive hyphens + NewDBInstanceIdentifier *string `type:"string"` + + // Indicates that the DB instance should be associated with the specified option + // group. Changing this parameter does not result in an outage except in the + // following case and the change is applied during the next maintenance window + // unless the ApplyImmediately parameter is set to true for this request. If + // the parameter change results in an option group that enables OEM, this change + // can cause a brief (sub-second) period during which new connections are rejected + // but existing connections are not interrupted. + // + // Permanent options, such as the TDE option for Oracle Advanced Security TDE, + // cannot be removed from an option group, and that option group cannot be removed + // from a DB instance once it is associated with a DB instance + OptionGroupName *string `type:"string"` + + // The daily time range during which automated backups are created if automated + // backups are enabled, as determined by the BackupRetentionPeriod parameter. + // Changing this parameter does not result in an outage and the change is asynchronously + // applied as soon as possible. + // + // Constraints: + // + // Must be in the format hh24:mi-hh24:mi + // + // Times should be in Universal Time Coordinated (UTC) + // + // Must not conflict with the preferred maintenance window + // + // Must be at least 30 minutes + PreferredBackupWindow *string `type:"string"` + + // The weekly time range (in UTC) during which system maintenance can occur, + // which might result in an outage. Changing this parameter does not result + // in an outage, except in the following situation, and the change is asynchronously + // applied as soon as possible. If there are pending actions that cause a reboot, + // and the maintenance window is changed to include the current time, then changing + // this parameter will cause a reboot of the DB instance. If moving this window + // to the current time, there must be at least 30 minutes between the current + // time and end of the window to ensure pending changes are applied. + // + // Default: Uses existing setting + // + // Format: ddd:hh24:mi-ddd:hh24:mi + // + // Valid Days: Mon | Tue | Wed | Thu | Fri | Sat | Sun + // + // Constraints: Must be at least 30 minutes + PreferredMaintenanceWindow *string `type:"string"` + + // A value that specifies the order in which an Aurora Replica is promoted to + // the primary instance after a failure of the existing primary instance. For + // more information, see Fault Tolerance for an Aurora DB Cluster (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Aurora.Managing.html#Aurora.Managing.FaultTolerance). + // + // Default: 1 + // + // Valid Values: 0 - 15 + PromotionTier *int64 `type:"integer"` + + // Boolean value that indicates if the DB instance has a publicly resolvable + // DNS name. Set to True to make the DB instance Internet-facing with a publicly + // resolvable DNS name, which resolves to a public IP address. Set to False + // to make the DB instance internal with a DNS name that resolves to a private + // IP address. + // + // PubliclyAccessible only applies to DB instances in a VPC. The DB instance + // must be part of a public subnet and PubliclyAccessible must be true in order + // for it to be publicly accessible. + // + // Changes to the PubliclyAccessible parameter are applied immediately regardless + // of the value of the ApplyImmediately parameter. + // + // Default: false + PubliclyAccessible *bool `type:"boolean"` + + // Specifies the storage type to be associated with the DB instance. + // + // Valid values: standard | gp2 | io1 + // + // If you specify io1, you must also include a value for the Iops parameter. + // + // Default: io1 if the Iops parameter is specified; otherwise standard + StorageType *string `type:"string"` + + // The ARN from the Key Store with which to associate the instance for TDE encryption. + TdeCredentialArn *string `type:"string"` + + // The password for the given ARN from the Key Store in order to access the + // device. + TdeCredentialPassword *string `type:"string"` + + // A list of EC2 VPC security groups to authorize on this DB instance. This + // change is asynchronously applied as soon as possible. + // + // Constraints: + // + // Must be 1 to 255 alphanumeric characters + // + // First character must be a letter + // + // Cannot end with a hyphen or contain two consecutive hyphens + VpcSecurityGroupIds []*string `locationNameList:"VpcSecurityGroupId" type:"list"` +} + +// String returns the string representation +func (s ModifyDBInstanceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyDBInstanceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ModifyDBInstanceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ModifyDBInstanceInput"} + if s.DBInstanceIdentifier == nil { + invalidParams.Add(request.NewErrParamRequired("DBInstanceIdentifier")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type ModifyDBInstanceOutput struct { + _ struct{} `type:"structure"` + + // Contains the result of a successful invocation of the following actions: + // + // CreateDBInstance + // + // DeleteDBInstance + // + // ModifyDBInstance + // + // This data type is used as a response element in the DescribeDBInstances + // action. + DBInstance *DBInstance `type:"structure"` +} + +// String returns the string representation +func (s ModifyDBInstanceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyDBInstanceOutput) GoString() string { + return s.String() +} + +type ModifyDBParameterGroupInput struct { + _ struct{} `type:"structure"` + + // The name of the DB parameter group. + // + // Constraints: + // + // Must be the name of an existing DB parameter group + // + // Must be 1 to 255 alphanumeric characters + // + // First character must be a letter + // + // Cannot end with a hyphen or contain two consecutive hyphens + DBParameterGroupName *string `type:"string" required:"true"` + + // An array of parameter names, values, and the apply method for the parameter + // update. At least one parameter name, value, and apply method must be supplied; + // subsequent arguments are optional. A maximum of 20 parameters can be modified + // in a single request. + // + // Valid Values (for the application method): immediate | pending-reboot + // + // You can use the immediate value with dynamic parameters only. You can use + // the pending-reboot value for both dynamic and static parameters, and changes + // are applied when you reboot the DB instance without failover. + Parameters []*Parameter `locationNameList:"Parameter" type:"list" required:"true"` +} + +// String returns the string representation +func (s ModifyDBParameterGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyDBParameterGroupInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ModifyDBParameterGroupInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ModifyDBParameterGroupInput"} + if s.DBParameterGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("DBParameterGroupName")) + } + if s.Parameters == nil { + invalidParams.Add(request.NewErrParamRequired("Parameters")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type ModifyDBSnapshotAttributeInput struct { + _ struct{} `type:"structure"` + + // The name of the DB snapshot attribute to modify. + // + // To manage authorization for other AWS accounts to copy or restore a manual + // DB snapshot, set this value to restore. + AttributeName *string `type:"string" required:"true"` + + // The identifier for the DB snapshot to modify the attributes for. + DBSnapshotIdentifier *string `type:"string" required:"true"` + + // A list of DB snapshot attributes to add to the attribute specified by AttributeName. + // + // To authorize other AWS accounts to copy or restore a manual snapshot, set + // this list to include one or more AWS account IDs, or all to make the manual + // DB snapshot restorable by any AWS account. Do not add the all value for any + // manual DB snapshots that contain private information that you don't want + // available to all AWS accounts. + ValuesToAdd []*string `locationNameList:"AttributeValue" type:"list"` + + // A list of DB snapshot attributes to remove from the attribute specified by + // AttributeName. + // + // To remove authorization for other AWS accounts to copy or restore a manual + // snapshot, set this list to include one or more AWS account identifiers, or + // all to remove authorization for any AWS account to copy or restore the DB + // snapshot. If you specify all, an AWS account whose account ID is explicitly + // added to the restore attribute can still copy or restore the manual DB snapshot. + ValuesToRemove []*string `locationNameList:"AttributeValue" type:"list"` +} + +// String returns the string representation +func (s ModifyDBSnapshotAttributeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyDBSnapshotAttributeInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ModifyDBSnapshotAttributeInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ModifyDBSnapshotAttributeInput"} + if s.AttributeName == nil { + invalidParams.Add(request.NewErrParamRequired("AttributeName")) + } + if s.DBSnapshotIdentifier == nil { + invalidParams.Add(request.NewErrParamRequired("DBSnapshotIdentifier")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type ModifyDBSnapshotAttributeOutput struct { + _ struct{} `type:"structure"` + + // Contains the results of a successful call to the DescribeDBSnapshotAttributes + // API action. + // + // Manual DB snapshot attributes are used to authorize other AWS accounts to + // copy or restore a manual DB snapshot. For more information, see the ModifyDBSnapshotAttribute + // API action. + DBSnapshotAttributesResult *DBSnapshotAttributesResult `type:"structure"` +} + +// String returns the string representation +func (s ModifyDBSnapshotAttributeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyDBSnapshotAttributeOutput) GoString() string { + return s.String() +} + +type ModifyDBSubnetGroupInput struct { + _ struct{} `type:"structure"` + + // The description for the DB subnet group. + DBSubnetGroupDescription *string `type:"string"` + + // The name for the DB subnet group. This value is stored as a lowercase string. + // + // Constraints: Must contain no more than 255 alphanumeric characters, periods, + // underscores, spaces, or hyphens. Must not be default. + // + // Example: mySubnetgroup + DBSubnetGroupName *string `type:"string" required:"true"` + + // The EC2 subnet IDs for the DB subnet group. + SubnetIds []*string `locationNameList:"SubnetIdentifier" type:"list" required:"true"` +} + +// String returns the string representation +func (s ModifyDBSubnetGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyDBSubnetGroupInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ModifyDBSubnetGroupInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ModifyDBSubnetGroupInput"} + if s.DBSubnetGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("DBSubnetGroupName")) + } + if s.SubnetIds == nil { + invalidParams.Add(request.NewErrParamRequired("SubnetIds")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type ModifyDBSubnetGroupOutput struct { + _ struct{} `type:"structure"` + + // Contains the result of a successful invocation of the following actions: + // + // CreateDBSubnetGroup + // + // ModifyDBSubnetGroup + // + // DescribeDBSubnetGroups + // + // DeleteDBSubnetGroup + // + // This data type is used as a response element in the DescribeDBSubnetGroups + // action. + DBSubnetGroup *DBSubnetGroup `type:"structure"` +} + +// String returns the string representation +func (s ModifyDBSubnetGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyDBSubnetGroupOutput) GoString() string { + return s.String() +} + +type ModifyEventSubscriptionInput struct { + _ struct{} `type:"structure"` + + // A Boolean value; set to true to activate the subscription. + Enabled *bool `type:"boolean"` + + // A list of event categories for a SourceType that you want to subscribe to. + // You can see a list of the categories for a given SourceType in the Events + // (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Events.html) + // topic in the Amazon RDS User Guide or by using the DescribeEventCategories + // action. + EventCategories []*string `locationNameList:"EventCategory" type:"list"` + + // The Amazon Resource Name (ARN) of the SNS topic created for event notification. + // The ARN is created by Amazon SNS when you create a topic and subscribe to + // it. + SnsTopicArn *string `type:"string"` + + // The type of source that will be generating the events. For example, if you + // want to be notified of events generated by a DB instance, you would set this + // parameter to db-instance. if this value is not specified, all events are + // returned. + // + // Valid values: db-instance | db-parameter-group | db-security-group | db-snapshot + SourceType *string `type:"string"` + + // The name of the RDS event notification subscription. + SubscriptionName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ModifyEventSubscriptionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyEventSubscriptionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ModifyEventSubscriptionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ModifyEventSubscriptionInput"} + if s.SubscriptionName == nil { + invalidParams.Add(request.NewErrParamRequired("SubscriptionName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type ModifyEventSubscriptionOutput struct { + _ struct{} `type:"structure"` + + // Contains the results of a successful invocation of the DescribeEventSubscriptions + // action. + EventSubscription *EventSubscription `type:"structure"` +} + +// String returns the string representation +func (s ModifyEventSubscriptionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyEventSubscriptionOutput) GoString() string { + return s.String() +} + +type ModifyOptionGroupInput struct { + _ struct{} `type:"structure"` + + // Indicates whether the changes should be applied immediately, or during the + // next maintenance window for each instance associated with the option group. + ApplyImmediately *bool `type:"boolean"` + + // The name of the option group to be modified. + // + // Permanent options, such as the TDE option for Oracle Advanced Security TDE, + // cannot be removed from an option group, and that option group cannot be removed + // from a DB instance once it is associated with a DB instance + OptionGroupName *string `type:"string" required:"true"` + + // Options in this list are added to the option group or, if already present, + // the specified configuration is used to update the existing configuration. + OptionsToInclude []*OptionConfiguration `locationNameList:"OptionConfiguration" type:"list"` + + // Options in this list are removed from the option group. + OptionsToRemove []*string `type:"list"` +} + +// String returns the string representation +func (s ModifyOptionGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyOptionGroupInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ModifyOptionGroupInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ModifyOptionGroupInput"} + if s.OptionGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("OptionGroupName")) + } + if s.OptionsToInclude != nil { + for i, v := range s.OptionsToInclude { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "OptionsToInclude", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type ModifyOptionGroupOutput struct { + _ struct{} `type:"structure"` + + OptionGroup *OptionGroup `type:"structure"` +} + +// String returns the string representation +func (s ModifyOptionGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyOptionGroupOutput) GoString() string { + return s.String() +} + +// Option details. +type Option struct { + _ struct{} `type:"structure"` + + // If the option requires access to a port, then this DB security group allows + // access to the port. + DBSecurityGroupMemberships []*DBSecurityGroupMembership `locationNameList:"DBSecurityGroup" type:"list"` + + // The description of the option. + OptionDescription *string `type:"string"` + + // The name of the option. + OptionName *string `type:"string"` + + // The option settings for this option. + OptionSettings []*OptionSetting `locationNameList:"OptionSetting" type:"list"` + + // Indicate if this option is permanent. + Permanent *bool `type:"boolean"` + + // Indicate if this option is persistent. + Persistent *bool `type:"boolean"` + + // If required, the port configured for this option to use. + Port *int64 `type:"integer"` + + // If the option requires access to a port, then this VPC security group allows + // access to the port. + VpcSecurityGroupMemberships []*VpcSecurityGroupMembership `locationNameList:"VpcSecurityGroupMembership" type:"list"` +} + +// String returns the string representation +func (s Option) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Option) GoString() string { + return s.String() +} + +// A list of all available options +type OptionConfiguration struct { + _ struct{} `type:"structure"` + + // A list of DBSecurityGroupMemebrship name strings used for this option. + DBSecurityGroupMemberships []*string `locationNameList:"DBSecurityGroupName" type:"list"` + + // The configuration of options to include in a group. + OptionName *string `type:"string" required:"true"` + + // The option settings to include in an option group. + OptionSettings []*OptionSetting `locationNameList:"OptionSetting" type:"list"` + + // The optional port for the option. + Port *int64 `type:"integer"` + + // A list of VpcSecurityGroupMemebrship name strings used for this option. + VpcSecurityGroupMemberships []*string `locationNameList:"VpcSecurityGroupId" type:"list"` +} + +// String returns the string representation +func (s OptionConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s OptionConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *OptionConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "OptionConfiguration"} + if s.OptionName == nil { + invalidParams.Add(request.NewErrParamRequired("OptionName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type OptionGroup struct { + _ struct{} `type:"structure"` + + // Indicates whether this option group can be applied to both VPC and non-VPC + // instances. The value true indicates the option group can be applied to both + // VPC and non-VPC instances. + AllowsVpcAndNonVpcInstanceMemberships *bool `type:"boolean"` + + // Indicates the name of the engine that this option group can be applied to. + EngineName *string `type:"string"` + + // Indicates the major engine version associated with this option group. + MajorEngineVersion *string `type:"string"` + + // Provides a description of the option group. + OptionGroupDescription *string `type:"string"` + + // Specifies the name of the option group. + OptionGroupName *string `type:"string"` + + // Indicates what options are available in the option group. + Options []*Option `locationNameList:"Option" type:"list"` + + // If AllowsVpcAndNonVpcInstanceMemberships is false, this field is blank. If + // AllowsVpcAndNonVpcInstanceMemberships is true and this field is blank, then + // this option group can be applied to both VPC and non-VPC instances. If this + // field contains a value, then this option group can only be applied to instances + // that are in the VPC indicated by this field. + VpcId *string `type:"string"` +} + +// String returns the string representation +func (s OptionGroup) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s OptionGroup) GoString() string { + return s.String() +} + +// Provides information on the option groups the DB instance is a member of. +type OptionGroupMembership struct { + _ struct{} `type:"structure"` + + // The name of the option group that the instance belongs to. + OptionGroupName *string `type:"string"` + + // The status of the DB instance's option group membership. Valid values are: + // in-sync, pending-apply, pending-removal, pending-maintenance-apply, pending-maintenance-removal, + // applying, removing, and failed. + Status *string `type:"string"` +} + +// String returns the string representation +func (s OptionGroupMembership) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s OptionGroupMembership) GoString() string { + return s.String() +} + +// Available option. +type OptionGroupOption struct { + _ struct{} `type:"structure"` + + // If the option requires a port, specifies the default port for the option. + DefaultPort *int64 `type:"integer"` + + // The description of the option. + Description *string `type:"string"` + + // The name of the engine that this option can be applied to. + EngineName *string `type:"string"` + + // Indicates the major engine version that the option is available for. + MajorEngineVersion *string `type:"string"` + + // The minimum required engine version for the option to be applied. + MinimumRequiredMinorEngineVersion *string `type:"string"` + + // The name of the option. + Name *string `type:"string"` + + // Specifies the option settings that are available (and the default value) + // for each option in an option group. + OptionGroupOptionSettings []*OptionGroupOptionSetting `locationNameList:"OptionGroupOptionSetting" type:"list"` + + // List of all options that are prerequisites for this option. + OptionsDependedOn []*string `locationNameList:"OptionName" type:"list"` + + // A permanent option cannot be removed from the option group once the option + // group is used, and it cannot be removed from the db instance after assigning + // an option group with this permanent option. + Permanent *bool `type:"boolean"` + + // A persistent option cannot be removed from the option group once the option + // group is used, but this option can be removed from the db instance while + // modifying the related data and assigning another option group without this + // option. + Persistent *bool `type:"boolean"` + + // Specifies whether the option requires a port. + PortRequired *bool `type:"boolean"` +} + +// String returns the string representation +func (s OptionGroupOption) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s OptionGroupOption) GoString() string { + return s.String() +} + +// Option group option settings are used to display settings available for each +// option with their default values and other information. These values are +// used with the DescribeOptionGroupOptions action. +type OptionGroupOptionSetting struct { + _ struct{} `type:"structure"` + + // Indicates the acceptable values for the option group option. + AllowedValues *string `type:"string"` + + // The DB engine specific parameter type for the option group option. + ApplyType *string `type:"string"` + + // The default value for the option group option. + DefaultValue *string `type:"string"` + + // Boolean value where true indicates that this option group option can be changed + // from the default value. + IsModifiable *bool `type:"boolean"` + + // The description of the option group option. + SettingDescription *string `type:"string"` + + // The name of the option group option. + SettingName *string `type:"string"` +} + +// String returns the string representation +func (s OptionGroupOptionSetting) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s OptionGroupOptionSetting) GoString() string { + return s.String() +} + +// Option settings are the actual settings being applied or configured for that +// option. It is used when you modify an option group or describe option groups. +// For example, the NATIVE_NETWORK_ENCRYPTION option has a setting called SQLNET.ENCRYPTION_SERVER +// that can have several different values. +type OptionSetting struct { + _ struct{} `type:"structure"` + + // The allowed values of the option setting. + AllowedValues *string `type:"string"` + + // The DB engine specific parameter type. + ApplyType *string `type:"string"` + + // The data type of the option setting. + DataType *string `type:"string"` + + // The default value of the option setting. + DefaultValue *string `type:"string"` + + // The description of the option setting. + Description *string `type:"string"` + + // Indicates if the option setting is part of a collection. + IsCollection *bool `type:"boolean"` + + // A Boolean value that, when true, indicates the option setting can be modified + // from the default. + IsModifiable *bool `type:"boolean"` + + // The name of the option that has settings that you can set. + Name *string `type:"string"` + + // The current value of the option setting. + Value *string `type:"string"` +} + +// String returns the string representation +func (s OptionSetting) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s OptionSetting) GoString() string { + return s.String() +} + +// Contains a list of available options for a DB instance +// +// This data type is used as a response element in the DescribeOrderableDBInstanceOptions +// action. +type OrderableDBInstanceOption struct { + _ struct{} `type:"structure"` + + // A list of Availability Zones for the orderable DB instance. + AvailabilityZones []*AvailabilityZone `locationNameList:"AvailabilityZone" type:"list"` + + // The DB instance class for the orderable DB instance. + DBInstanceClass *string `type:"string"` + + // The engine type of the orderable DB instance. + Engine *string `type:"string"` + + // The engine version of the orderable DB instance. + EngineVersion *string `type:"string"` + + // The license model for the orderable DB instance. + LicenseModel *string `type:"string"` + + // Indicates whether this orderable DB instance is multi-AZ capable. + MultiAZCapable *bool `type:"boolean"` + + // Indicates whether this orderable DB instance can have a Read Replica. + ReadReplicaCapable *bool `type:"boolean"` + + // Indicates the storage type for this orderable DB instance. + StorageType *string `type:"string"` + + // Indicates whether the DB instance supports enhanced monitoring at intervals + // from 1 to 60 seconds. + SupportsEnhancedMonitoring *bool `type:"boolean"` + + // Indicates whether this orderable DB instance supports provisioned IOPS. + SupportsIops *bool `type:"boolean"` + + // Indicates whether this orderable DB instance supports encrypted storage. + SupportsStorageEncryption *bool `type:"boolean"` + + // Indicates whether this is a VPC orderable DB instance. + Vpc *bool `type:"boolean"` +} + +// String returns the string representation +func (s OrderableDBInstanceOption) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s OrderableDBInstanceOption) GoString() string { + return s.String() +} + +// This data type is used as a request parameter in the ModifyDBParameterGroup +// and ResetDBParameterGroup actions. +// +// This data type is used as a response element in the DescribeEngineDefaultParameters +// and DescribeDBParameters actions. +type Parameter struct { + _ struct{} `type:"structure"` + + // Specifies the valid range of values for the parameter. + AllowedValues *string `type:"string"` + + // Indicates when to apply parameter updates. + ApplyMethod *string `type:"string" enum:"ApplyMethod"` + + // Specifies the engine specific parameters type. + ApplyType *string `type:"string"` + + // Specifies the valid data type for the parameter. + DataType *string `type:"string"` + + // Provides a description of the parameter. + Description *string `type:"string"` + + // Indicates whether (true) or not (false) the parameter can be modified. Some + // parameters have security or operational implications that prevent them from + // being changed. + IsModifiable *bool `type:"boolean"` + + // The earliest engine version to which the parameter can apply. + MinimumEngineVersion *string `type:"string"` + + // Specifies the name of the parameter. + ParameterName *string `type:"string"` + + // Specifies the value of the parameter. + ParameterValue *string `type:"string"` + + // Indicates the source of the parameter value. + Source *string `type:"string"` +} + +// String returns the string representation +func (s Parameter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Parameter) GoString() string { + return s.String() +} + +// Provides information about a pending maintenance action for a resource. +type PendingMaintenanceAction struct { + _ struct{} `type:"structure"` + + // The type of pending maintenance action that is available for the resource. + Action *string `type:"string"` + + // The date of the maintenance window when the action will be applied. The maintenance + // action will be applied to the resource during its first maintenance window + // after this date. If this date is specified, any next-maintenance opt-in requests + // are ignored. + AutoAppliedAfterDate *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The effective date when the pending maintenance action will be applied to + // the resource. This date takes into account opt-in requests received from + // the ApplyPendingMaintenanceAction API, the AutoAppliedAfterDate, and the + // ForcedApplyDate. This value is blank if an opt-in request has not been received + // and nothing has been specified as AutoAppliedAfterDate or ForcedApplyDate. + CurrentApplyDate *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // A description providing more detail about the maintenance action. + Description *string `type:"string"` + + // The date when the maintenance action will be automatically applied. The maintenance + // action will be applied to the resource on this date regardless of the maintenance + // window for the resource. If this date is specified, any immediate opt-in + // requests are ignored. + ForcedApplyDate *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // Indicates the type of opt-in request that has been received for the resource. + OptInStatus *string `type:"string"` +} + +// String returns the string representation +func (s PendingMaintenanceAction) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PendingMaintenanceAction) GoString() string { + return s.String() +} + +// This data type is used as a response element in the ModifyDBInstance action. +type PendingModifiedValues struct { + _ struct{} `type:"structure"` + + // Contains the new AllocatedStorage size for the DB instance that will be applied + // or is in progress. + AllocatedStorage *int64 `type:"integer"` + + // Specifies the pending number of days for which automated backups are retained. + BackupRetentionPeriod *int64 `type:"integer"` + + // Specifies the identifier of the CA certificate for the DB instance. + CACertificateIdentifier *string `type:"string"` + + // Contains the new DBInstanceClass for the DB instance that will be applied + // or is in progress. + DBInstanceClass *string `type:"string"` + + // Contains the new DBInstanceIdentifier for the DB instance that will be applied + // or is in progress. + DBInstanceIdentifier *string `type:"string"` + + // Indicates the database engine version. + EngineVersion *string `type:"string"` + + // Specifies the new Provisioned IOPS value for the DB instance that will be + // applied or is being applied. + Iops *int64 `type:"integer"` + + // Contains the pending or in-progress change of the master credentials for + // the DB instance. + MasterUserPassword *string `type:"string"` + + // Indicates that the Single-AZ DB instance is to change to a Multi-AZ deployment. + MultiAZ *bool `type:"boolean"` + + // Specifies the pending port for the DB instance. + Port *int64 `type:"integer"` + + // Specifies the storage type to be associated with the DB instance. + StorageType *string `type:"string"` +} + +// String returns the string representation +func (s PendingModifiedValues) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PendingModifiedValues) GoString() string { + return s.String() +} + +type PromoteReadReplicaDBClusterInput struct { + _ struct{} `type:"structure"` + + // The identifier of the DB cluster Read Replica to promote. This parameter + // is not case-sensitive. + // + // Constraints: + // + // Must contain from 1 to 63 alphanumeric characters or hyphens. + // + // First character must be a letter. + // + // Cannot end with a hyphen or contain two consecutive hyphens. + // + // Example: my-cluster-replica1 + DBClusterIdentifier *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s PromoteReadReplicaDBClusterInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PromoteReadReplicaDBClusterInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PromoteReadReplicaDBClusterInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PromoteReadReplicaDBClusterInput"} + if s.DBClusterIdentifier == nil { + invalidParams.Add(request.NewErrParamRequired("DBClusterIdentifier")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type PromoteReadReplicaDBClusterOutput struct { + _ struct{} `type:"structure"` + + // Contains the result of a successful invocation of the following actions: + // + // CreateDBCluster + // + // DeleteDBCluster + // + // FailoverDBCluster + // + // ModifyDBCluster + // + // RestoreDBClusterFromSnapshot + // + // RestoreDBClusterToPointInTime + // + // This data type is used as a response element in the DescribeDBClusters + // action. + DBCluster *DBCluster `type:"structure"` +} + +// String returns the string representation +func (s PromoteReadReplicaDBClusterOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PromoteReadReplicaDBClusterOutput) GoString() string { + return s.String() +} + +type PromoteReadReplicaInput struct { + _ struct{} `type:"structure"` + + // The number of days to retain automated backups. Setting this parameter to + // a positive number enables backups. Setting this parameter to 0 disables automated + // backups. + // + // Default: 1 + // + // Constraints: + // + // Must be a value from 0 to 8 + BackupRetentionPeriod *int64 `type:"integer"` + + // The DB instance identifier. This value is stored as a lowercase string. + // + // Constraints: + // + // Must be the identifier for an existing Read Replica DB instance + // + // Must contain from 1 to 63 alphanumeric characters or hyphens + // + // First character must be a letter + // + // Cannot end with a hyphen or contain two consecutive hyphens + // + // Example: mydbinstance + DBInstanceIdentifier *string `type:"string" required:"true"` + + // The daily time range during which automated backups are created if automated + // backups are enabled, using the BackupRetentionPeriod parameter. + // + // Default: A 30-minute window selected at random from an 8-hour block of + // time per region. To see the time blocks available, see Adjusting the Preferred + // Maintenance Window (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/AdjustingTheMaintenanceWindow.html) + // in the Amazon RDS User Guide. + // + // Constraints: + // + // Must be in the format hh24:mi-hh24:mi. + // + // Times should be in Universal Coordinated Time (UTC). + // + // Must not conflict with the preferred maintenance window. + // + // Must be at least 30 minutes. + PreferredBackupWindow *string `type:"string"` +} + +// String returns the string representation +func (s PromoteReadReplicaInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PromoteReadReplicaInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PromoteReadReplicaInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PromoteReadReplicaInput"} + if s.DBInstanceIdentifier == nil { + invalidParams.Add(request.NewErrParamRequired("DBInstanceIdentifier")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type PromoteReadReplicaOutput struct { + _ struct{} `type:"structure"` + + // Contains the result of a successful invocation of the following actions: + // + // CreateDBInstance + // + // DeleteDBInstance + // + // ModifyDBInstance + // + // This data type is used as a response element in the DescribeDBInstances + // action. + DBInstance *DBInstance `type:"structure"` +} + +// String returns the string representation +func (s PromoteReadReplicaOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PromoteReadReplicaOutput) GoString() string { + return s.String() +} + +type PurchaseReservedDBInstancesOfferingInput struct { + _ struct{} `type:"structure"` + + // The number of instances to reserve. + // + // Default: 1 + DBInstanceCount *int64 `type:"integer"` + + // Customer-specified identifier to track this reservation. + // + // Example: myreservationID + ReservedDBInstanceId *string `type:"string"` + + // The ID of the Reserved DB instance offering to purchase. + // + // Example: 438012d3-4052-4cc7-b2e3-8d3372e0e706 + ReservedDBInstancesOfferingId *string `type:"string" required:"true"` + + // A list of tags. + Tags []*Tag `locationNameList:"Tag" type:"list"` +} + +// String returns the string representation +func (s PurchaseReservedDBInstancesOfferingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PurchaseReservedDBInstancesOfferingInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PurchaseReservedDBInstancesOfferingInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PurchaseReservedDBInstancesOfferingInput"} + if s.ReservedDBInstancesOfferingId == nil { + invalidParams.Add(request.NewErrParamRequired("ReservedDBInstancesOfferingId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type PurchaseReservedDBInstancesOfferingOutput struct { + _ struct{} `type:"structure"` + + // This data type is used as a response element in the DescribeReservedDBInstances + // and PurchaseReservedDBInstancesOffering actions. + ReservedDBInstance *ReservedDBInstance `type:"structure"` +} + +// String returns the string representation +func (s PurchaseReservedDBInstancesOfferingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PurchaseReservedDBInstancesOfferingOutput) GoString() string { + return s.String() +} + +type RebootDBInstanceInput struct { + _ struct{} `type:"structure"` + + // The DB instance identifier. This parameter is stored as a lowercase string. + // + // Constraints: + // + // Must contain from 1 to 63 alphanumeric characters or hyphens + // + // First character must be a letter + // + // Cannot end with a hyphen or contain two consecutive hyphens + DBInstanceIdentifier *string `type:"string" required:"true"` + + // When true, the reboot will be conducted through a MultiAZ failover. + // + // Constraint: You cannot specify true if the instance is not configured for + // MultiAZ. + ForceFailover *bool `type:"boolean"` +} + +// String returns the string representation +func (s RebootDBInstanceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RebootDBInstanceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RebootDBInstanceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RebootDBInstanceInput"} + if s.DBInstanceIdentifier == nil { + invalidParams.Add(request.NewErrParamRequired("DBInstanceIdentifier")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type RebootDBInstanceOutput struct { + _ struct{} `type:"structure"` + + // Contains the result of a successful invocation of the following actions: + // + // CreateDBInstance + // + // DeleteDBInstance + // + // ModifyDBInstance + // + // This data type is used as a response element in the DescribeDBInstances + // action. + DBInstance *DBInstance `type:"structure"` +} + +// String returns the string representation +func (s RebootDBInstanceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RebootDBInstanceOutput) GoString() string { + return s.String() +} + +// This data type is used as a response element in the DescribeReservedDBInstances +// and DescribeReservedDBInstancesOfferings actions. +type RecurringCharge struct { + _ struct{} `type:"structure"` + + // The amount of the recurring charge. + RecurringChargeAmount *float64 `type:"double"` + + // The frequency of the recurring charge. + RecurringChargeFrequency *string `type:"string"` +} + +// String returns the string representation +func (s RecurringCharge) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RecurringCharge) GoString() string { + return s.String() +} + +type RemoveSourceIdentifierFromSubscriptionInput struct { + _ struct{} `type:"structure"` + + // The source identifier to be removed from the subscription, such as the DB + // instance identifier for a DB instance or the name of a security group. + SourceIdentifier *string `type:"string" required:"true"` + + // The name of the RDS event notification subscription you want to remove a + // source identifier from. + SubscriptionName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s RemoveSourceIdentifierFromSubscriptionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RemoveSourceIdentifierFromSubscriptionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RemoveSourceIdentifierFromSubscriptionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RemoveSourceIdentifierFromSubscriptionInput"} + if s.SourceIdentifier == nil { + invalidParams.Add(request.NewErrParamRequired("SourceIdentifier")) + } + if s.SubscriptionName == nil { + invalidParams.Add(request.NewErrParamRequired("SubscriptionName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type RemoveSourceIdentifierFromSubscriptionOutput struct { + _ struct{} `type:"structure"` + + // Contains the results of a successful invocation of the DescribeEventSubscriptions + // action. + EventSubscription *EventSubscription `type:"structure"` +} + +// String returns the string representation +func (s RemoveSourceIdentifierFromSubscriptionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RemoveSourceIdentifierFromSubscriptionOutput) GoString() string { + return s.String() +} + +type RemoveTagsFromResourceInput struct { + _ struct{} `type:"structure"` + + // The Amazon RDS resource the tags will be removed from. This value is an Amazon + // Resource Name (ARN). For information about creating an ARN, see Constructing + // an RDS Amazon Resource Name (ARN) (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Tagging.html#USER_Tagging.ARN). + ResourceName *string `type:"string" required:"true"` + + // The tag key (name) of the tag to be removed. + TagKeys []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s RemoveTagsFromResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RemoveTagsFromResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RemoveTagsFromResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RemoveTagsFromResourceInput"} + if s.ResourceName == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceName")) + } + if s.TagKeys == nil { + invalidParams.Add(request.NewErrParamRequired("TagKeys")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type RemoveTagsFromResourceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s RemoveTagsFromResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RemoveTagsFromResourceOutput) GoString() string { + return s.String() +} + +// This data type is used as a response element in the DescribeReservedDBInstances +// and PurchaseReservedDBInstancesOffering actions. +type ReservedDBInstance struct { + _ struct{} `type:"structure"` + + // The currency code for the reserved DB instance. + CurrencyCode *string `type:"string"` + + // The DB instance class for the reserved DB instance. + DBInstanceClass *string `type:"string"` + + // The number of reserved DB instances. + DBInstanceCount *int64 `type:"integer"` + + // The duration of the reservation in seconds. + Duration *int64 `type:"integer"` + + // The fixed price charged for this reserved DB instance. + FixedPrice *float64 `type:"double"` + + // Indicates if the reservation applies to Multi-AZ deployments. + MultiAZ *bool `type:"boolean"` + + // The offering type of this reserved DB instance. + OfferingType *string `type:"string"` + + // The description of the reserved DB instance. + ProductDescription *string `type:"string"` + + // The recurring price charged to run this reserved DB instance. + RecurringCharges []*RecurringCharge `locationNameList:"RecurringCharge" type:"list"` + + // The unique identifier for the reservation. + ReservedDBInstanceId *string `type:"string"` + + // The offering identifier. + ReservedDBInstancesOfferingId *string `type:"string"` + + // The time the reservation started. + StartTime *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The state of the reserved DB instance. + State *string `type:"string"` + + // The hourly price charged for this reserved DB instance. + UsagePrice *float64 `type:"double"` +} + +// String returns the string representation +func (s ReservedDBInstance) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReservedDBInstance) GoString() string { + return s.String() +} + +// This data type is used as a response element in the DescribeReservedDBInstancesOfferings +// action. +type ReservedDBInstancesOffering struct { + _ struct{} `type:"structure"` + + // The currency code for the reserved DB instance offering. + CurrencyCode *string `type:"string"` + + // The DB instance class for the reserved DB instance. + DBInstanceClass *string `type:"string"` + + // The duration of the offering in seconds. + Duration *int64 `type:"integer"` + + // The fixed price charged for this offering. + FixedPrice *float64 `type:"double"` + + // Indicates if the offering applies to Multi-AZ deployments. + MultiAZ *bool `type:"boolean"` + + // The offering type. + OfferingType *string `type:"string"` + + // The database engine used by the offering. + ProductDescription *string `type:"string"` + + // The recurring price charged to run this reserved DB instance. + RecurringCharges []*RecurringCharge `locationNameList:"RecurringCharge" type:"list"` + + // The offering identifier. + ReservedDBInstancesOfferingId *string `type:"string"` + + // The hourly price charged for this offering. + UsagePrice *float64 `type:"double"` +} + +// String returns the string representation +func (s ReservedDBInstancesOffering) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReservedDBInstancesOffering) GoString() string { + return s.String() +} + +type ResetDBClusterParameterGroupInput struct { + _ struct{} `type:"structure"` + + // The name of the DB cluster parameter group to reset. + DBClusterParameterGroupName *string `type:"string" required:"true"` + + // A list of parameter names in the DB cluster parameter group to reset to the + // default values. You cannot use this parameter if the ResetAllParameters parameter + // is set to true. + Parameters []*Parameter `locationNameList:"Parameter" type:"list"` + + // A value that is set to true to reset all parameters in the DB cluster parameter + // group to their default values, and false otherwise. You cannot use this parameter + // if there is a list of parameter names specified for the Parameters parameter. + ResetAllParameters *bool `type:"boolean"` +} + +// String returns the string representation +func (s ResetDBClusterParameterGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResetDBClusterParameterGroupInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ResetDBClusterParameterGroupInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ResetDBClusterParameterGroupInput"} + if s.DBClusterParameterGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("DBClusterParameterGroupName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type ResetDBParameterGroupInput struct { + _ struct{} `type:"structure"` + + // The name of the DB parameter group. + // + // Constraints: + // + // Must be 1 to 255 alphanumeric characters + // + // First character must be a letter + // + // Cannot end with a hyphen or contain two consecutive hyphens + DBParameterGroupName *string `type:"string" required:"true"` + + // An array of parameter names, values, and the apply method for the parameter + // update. At least one parameter name, value, and apply method must be supplied; + // subsequent arguments are optional. A maximum of 20 parameters can be modified + // in a single request. + // + // MySQL + // + // Valid Values (for Apply method): immediate | pending-reboot + // + // You can use the immediate value with dynamic parameters only. You can use + // the pending-reboot value for both dynamic and static parameters, and changes + // are applied when DB instance reboots. + // + // MariaDB + // + // Valid Values (for Apply method): immediate | pending-reboot + // + // You can use the immediate value with dynamic parameters only. You can use + // the pending-reboot value for both dynamic and static parameters, and changes + // are applied when DB instance reboots. + // + // Oracle + // + // Valid Values (for Apply method): pending-reboot + Parameters []*Parameter `locationNameList:"Parameter" type:"list"` + + // Specifies whether (true) or not (false) to reset all parameters in the DB + // parameter group to default values. + // + // Default: true + ResetAllParameters *bool `type:"boolean"` +} + +// String returns the string representation +func (s ResetDBParameterGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResetDBParameterGroupInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ResetDBParameterGroupInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ResetDBParameterGroupInput"} + if s.DBParameterGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("DBParameterGroupName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Describes the pending maintenance actions for a resource. +type ResourcePendingMaintenanceActions struct { + _ struct{} `type:"structure"` + + // A list that provides details about the pending maintenance actions for the + // resource. + PendingMaintenanceActionDetails []*PendingMaintenanceAction `locationNameList:"PendingMaintenanceAction" type:"list"` + + // The ARN of the resource that has pending maintenance actions. + ResourceIdentifier *string `type:"string"` +} + +// String returns the string representation +func (s ResourcePendingMaintenanceActions) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResourcePendingMaintenanceActions) GoString() string { + return s.String() +} + +type RestoreDBClusterFromSnapshotInput struct { + _ struct{} `type:"structure"` + + // Provides the list of EC2 Availability Zones that instances in the restored + // DB cluster can be created in. + AvailabilityZones []*string `locationNameList:"AvailabilityZone" type:"list"` + + // The name of the DB cluster to create from the DB cluster snapshot. This parameter + // isn't case-sensitive. + // + // Constraints: + // + // Must contain from 1 to 255 alphanumeric characters or hyphens + // + // First character must be a letter + // + // Cannot end with a hyphen or contain two consecutive hyphens + // + // Example: my-snapshot-id + DBClusterIdentifier *string `type:"string" required:"true"` + + // The name of the DB subnet group to use for the new DB cluster. + // + // Constraints: Must contain no more than 255 alphanumeric characters, periods, + // underscores, spaces, or hyphens. Must not be default. + // + // Example: mySubnetgroup + DBSubnetGroupName *string `type:"string"` + + // The database name for the restored DB cluster. + DatabaseName *string `type:"string"` + + // The database engine to use for the new DB cluster. + // + // Default: The same as source + // + // Constraint: Must be compatible with the engine of the source + Engine *string `type:"string" required:"true"` + + // The version of the database engine to use for the new DB cluster. + EngineVersion *string `type:"string"` + + // The KMS key identifier to use when restoring an encrypted DB cluster from + // an encrypted DB cluster snapshot. + // + // The KMS key identifier is the Amazon Resource Name (ARN) for the KMS encryption + // key. If you are restoring a DB cluster with the same AWS account that owns + // the KMS encryption key used to encrypt the new DB cluster, then you can use + // the KMS key alias instead of the ARN for the KMS encryption key. + // + // If you do not specify a value for the KmsKeyId parameter, then the following + // will occur: + // + // If the DB cluster snapshot is encrypted, then the restored DB cluster + // is encrypted using the KMS key that was used to encrypt the DB cluster snapshot. + // + // If the DB cluster snapshot is not encrypted, then the restored DB cluster + // is not encrypted. + // + // If SnapshotIdentifier refers to a DB cluster snapshot that is not encrypted, + // and you specify a value for the KmsKeyId parameter, then the restore request + // is rejected. + KmsKeyId *string `type:"string"` + + // The name of the option group to use for the restored DB cluster. + OptionGroupName *string `type:"string"` + + // The port number on which the new DB cluster accepts connections. + // + // Constraints: Value must be 1150-65535 + // + // Default: The same port as the original DB cluster. + Port *int64 `type:"integer"` + + // The identifier for the DB cluster snapshot to restore from. + // + // Constraints: + // + // Must contain from 1 to 63 alphanumeric characters or hyphens + // + // First character must be a letter + // + // Cannot end with a hyphen or contain two consecutive hyphens + SnapshotIdentifier *string `type:"string" required:"true"` + + // The tags to be assigned to the restored DB cluster. + Tags []*Tag `locationNameList:"Tag" type:"list"` + + // A list of VPC security groups that the new DB cluster will belong to. + VpcSecurityGroupIds []*string `locationNameList:"VpcSecurityGroupId" type:"list"` +} + +// String returns the string representation +func (s RestoreDBClusterFromSnapshotInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RestoreDBClusterFromSnapshotInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RestoreDBClusterFromSnapshotInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RestoreDBClusterFromSnapshotInput"} + if s.DBClusterIdentifier == nil { + invalidParams.Add(request.NewErrParamRequired("DBClusterIdentifier")) + } + if s.Engine == nil { + invalidParams.Add(request.NewErrParamRequired("Engine")) + } + if s.SnapshotIdentifier == nil { + invalidParams.Add(request.NewErrParamRequired("SnapshotIdentifier")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type RestoreDBClusterFromSnapshotOutput struct { + _ struct{} `type:"structure"` + + // Contains the result of a successful invocation of the following actions: + // + // CreateDBCluster + // + // DeleteDBCluster + // + // FailoverDBCluster + // + // ModifyDBCluster + // + // RestoreDBClusterFromSnapshot + // + // RestoreDBClusterToPointInTime + // + // This data type is used as a response element in the DescribeDBClusters + // action. + DBCluster *DBCluster `type:"structure"` +} + +// String returns the string representation +func (s RestoreDBClusterFromSnapshotOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RestoreDBClusterFromSnapshotOutput) GoString() string { + return s.String() +} + +type RestoreDBClusterToPointInTimeInput struct { + _ struct{} `type:"structure"` + + // The name of the new DB cluster to be created. + // + // Constraints: + // + // Must contain from 1 to 63 alphanumeric characters or hyphens + // + // First character must be a letter + // + // Cannot end with a hyphen or contain two consecutive hyphens + DBClusterIdentifier *string `type:"string" required:"true"` + + // The DB subnet group name to use for the new DB cluster. + // + // Constraints: Must contain no more than 255 alphanumeric characters, periods, + // underscores, spaces, or hyphens. Must not be default. + // + // Example: mySubnetgroup + DBSubnetGroupName *string `type:"string"` + + // The KMS key identifier to use when restoring an encrypted DB cluster from + // an encrypted DB cluster. + // + // The KMS key identifier is the Amazon Resource Name (ARN) for the KMS encryption + // key. If you are restoring a DB cluster with the same AWS account that owns + // the KMS encryption key used to encrypt the new DB cluster, then you can use + // the KMS key alias instead of the ARN for the KMS encryption key. + // + // You can restore to a new DB cluster and encrypt the new DB cluster with + // a KMS key that is different than the KMS key used to encrypt the source DB + // cluster. The new DB cluster will be encrypted with the KMS key identified + // by the KmsKeyId parameter. + // + // If you do not specify a value for the KmsKeyId parameter, then the following + // will occur: + // + // If the DB cluster is encrypted, then the restored DB cluster is encrypted + // using the KMS key that was used to encrypt the source DB cluster. + // + // If the DB cluster is not encrypted, then the restored DB cluster is not + // encrypted. + // + // If DBClusterIdentifier refers to a DB cluster that is note encrypted, + // then the restore request is rejected. + KmsKeyId *string `type:"string"` + + // The name of the option group for the new DB cluster. + OptionGroupName *string `type:"string"` + + // The port number on which the new DB cluster accepts connections. + // + // Constraints: Value must be 1150-65535 + // + // Default: The same port as the original DB cluster. + Port *int64 `type:"integer"` + + // The date and time to restore the DB cluster to. + // + // Valid Values: Value must be a time in Universal Coordinated Time (UTC) format + // + // Constraints: + // + // Must be before the latest restorable time for the DB instance + // + // Cannot be specified if UseLatestRestorableTime parameter is true + // + // Example: 2015-03-07T23:45:00Z + RestoreToTime *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The identifier of the source DB cluster from which to restore. + // + // Constraints: + // + // Must be the identifier of an existing database instance + // + // Must contain from 1 to 63 alphanumeric characters or hyphens + // + // First character must be a letter + // + // Cannot end with a hyphen or contain two consecutive hyphens + SourceDBClusterIdentifier *string `type:"string" required:"true"` + + // A list of tags. + Tags []*Tag `locationNameList:"Tag" type:"list"` + + // A value that is set to true to restore the DB cluster to the latest restorable + // backup time, and false otherwise. + // + // Default: false + // + // Constraints: Cannot be specified if RestoreToTime parameter is provided. + UseLatestRestorableTime *bool `type:"boolean"` + + // A lst of VPC security groups that the new DB cluster belongs to. + VpcSecurityGroupIds []*string `locationNameList:"VpcSecurityGroupId" type:"list"` +} + +// String returns the string representation +func (s RestoreDBClusterToPointInTimeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RestoreDBClusterToPointInTimeInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RestoreDBClusterToPointInTimeInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RestoreDBClusterToPointInTimeInput"} + if s.DBClusterIdentifier == nil { + invalidParams.Add(request.NewErrParamRequired("DBClusterIdentifier")) + } + if s.SourceDBClusterIdentifier == nil { + invalidParams.Add(request.NewErrParamRequired("SourceDBClusterIdentifier")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type RestoreDBClusterToPointInTimeOutput struct { + _ struct{} `type:"structure"` + + // Contains the result of a successful invocation of the following actions: + // + // CreateDBCluster + // + // DeleteDBCluster + // + // FailoverDBCluster + // + // ModifyDBCluster + // + // RestoreDBClusterFromSnapshot + // + // RestoreDBClusterToPointInTime + // + // This data type is used as a response element in the DescribeDBClusters + // action. + DBCluster *DBCluster `type:"structure"` +} + +// String returns the string representation +func (s RestoreDBClusterToPointInTimeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RestoreDBClusterToPointInTimeOutput) GoString() string { + return s.String() +} + +type RestoreDBInstanceFromDBSnapshotInput struct { + _ struct{} `type:"structure"` + + // Indicates that minor version upgrades will be applied automatically to the + // DB instance during the maintenance window. + AutoMinorVersionUpgrade *bool `type:"boolean"` + + // The EC2 Availability Zone that the database instance will be created in. + // + // Default: A random, system-chosen Availability Zone. + // + // Constraint: You cannot specify the AvailabilityZone parameter if the MultiAZ + // parameter is set to true. + // + // Example: us-east-1a + AvailabilityZone *string `type:"string"` + + // True to copy all tags from the restored DB instance to snapshots of the DB + // instance; otherwise false. The default is false. + CopyTagsToSnapshot *bool `type:"boolean"` + + // The compute and memory capacity of the Amazon RDS DB instance. + // + // Valid Values: db.t1.micro | db.m1.small | db.m1.medium | db.m1.large | db.m1.xlarge + // | db.m2.2xlarge | db.m2.4xlarge | db.m3.medium | db.m3.large | db.m3.xlarge + // | db.m3.2xlarge | db.m4.large | db.m4.xlarge | db.m4.2xlarge | db.m4.4xlarge + // | db.m4.10xlarge | db.r3.large | db.r3.xlarge | db.r3.2xlarge | db.r3.4xlarge + // | db.r3.8xlarge | db.t2.micro | db.t2.small | db.t2.medium | db.t2.large + DBInstanceClass *string `type:"string"` + + // Name of the DB instance to create from the DB snapshot. This parameter isn't + // case-sensitive. + // + // Constraints: + // + // Must contain from 1 to 63 alphanumeric characters or hyphens (1 to 15 + // for SQL Server) + // + // First character must be a letter + // + // Cannot end with a hyphen or contain two consecutive hyphens + // + // Example: my-snapshot-id + DBInstanceIdentifier *string `type:"string" required:"true"` + + // The database name for the restored DB instance. + // + // This parameter doesn't apply to the MySQL or MariaDB engines. + DBName *string `type:"string"` + + // The identifier for the DB snapshot to restore from. + // + // Constraints: + // + // Must contain from 1 to 255 alphanumeric characters or hyphens + // + // First character must be a letter + // + // Cannot end with a hyphen or contain two consecutive hyphens + // + // If you are restoring from a shared manual DB snapshot, the DBSnapshotIdentifier + // must be the ARN of the shared DB snapshot. + DBSnapshotIdentifier *string `type:"string" required:"true"` + + // The DB subnet group name to use for the new instance. + // + // Constraints: Must contain no more than 255 alphanumeric characters, periods, + // underscores, spaces, or hyphens. Must not be default. + // + // Example: mySubnetgroup + DBSubnetGroupName *string `type:"string"` + + // Specify the Active Directory Domain to restore the instance in. + Domain *string `type:"string"` + + // Specify the name of the IAM role to be used when making API calls to the + // Directory Service. + DomainIAMRoleName *string `type:"string"` + + // The database engine to use for the new instance. + // + // Default: The same as source + // + // Constraint: Must be compatible with the engine of the source + // + // Valid Values: MySQL | mariadb | oracle-se1 | oracle-se | oracle-ee | sqlserver-ee + // | sqlserver-se | sqlserver-ex | sqlserver-web | postgres | aurora + Engine *string `type:"string"` + + // Specifies the amount of provisioned IOPS for the DB instance, expressed in + // I/O operations per second. If this parameter is not specified, the IOPS value + // will be taken from the backup. If this parameter is set to 0, the new instance + // will be converted to a non-PIOPS instance, which will take additional time, + // though your DB instance will be available for connections before the conversion + // starts. + // + // Constraints: Must be an integer greater than 1000. + // + // SQL Server + // + // Setting the IOPS value for the SQL Server database engine is not supported. + Iops *int64 `type:"integer"` + + // License model information for the restored DB instance. + // + // Default: Same as source. + // + // Valid values: license-included | bring-your-own-license | general-public-license + LicenseModel *string `type:"string"` + + // Specifies if the DB instance is a Multi-AZ deployment. + // + // Constraint: You cannot specify the AvailabilityZone parameter if the MultiAZ + // parameter is set to true. + MultiAZ *bool `type:"boolean"` + + // The name of the option group to be used for the restored DB instance. + // + // Permanent options, such as the TDE option for Oracle Advanced Security TDE, + // cannot be removed from an option group, and that option group cannot be removed + // from a DB instance once it is associated with a DB instance + OptionGroupName *string `type:"string"` + + // The port number on which the database accepts connections. + // + // Default: The same port as the original DB instance + // + // Constraints: Value must be 1150-65535 + Port *int64 `type:"integer"` + + // Specifies the accessibility options for the DB instance. A value of true + // specifies an Internet-facing instance with a publicly resolvable DNS name, + // which resolves to a public IP address. A value of false specifies an internal + // instance with a DNS name that resolves to a private IP address. + // + // Default: The default behavior varies depending on whether a VPC has been + // requested or not. The following list shows the default behavior in each case. + // + // Default VPC: true + // + // VPC: false + // + // If no DB subnet group has been specified as part of the request and the + // PubliclyAccessible value has not been set, the DB instance will be publicly + // accessible. If a specific DB subnet group has been specified as part of the + // request and the PubliclyAccessible value has not been set, the DB instance + // will be private. + PubliclyAccessible *bool `type:"boolean"` + + // Specifies the storage type to be associated with the DB instance. + // + // Valid values: standard | gp2 | io1 + // + // If you specify io1, you must also include a value for the Iops parameter. + // + // Default: io1 if the Iops parameter is specified; otherwise standard + StorageType *string `type:"string"` + + // A list of tags. + Tags []*Tag `locationNameList:"Tag" type:"list"` + + // The ARN from the Key Store with which to associate the instance for TDE encryption. + TdeCredentialArn *string `type:"string"` + + // The password for the given ARN from the Key Store in order to access the + // device. + TdeCredentialPassword *string `type:"string"` +} + +// String returns the string representation +func (s RestoreDBInstanceFromDBSnapshotInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RestoreDBInstanceFromDBSnapshotInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RestoreDBInstanceFromDBSnapshotInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RestoreDBInstanceFromDBSnapshotInput"} + if s.DBInstanceIdentifier == nil { + invalidParams.Add(request.NewErrParamRequired("DBInstanceIdentifier")) + } + if s.DBSnapshotIdentifier == nil { + invalidParams.Add(request.NewErrParamRequired("DBSnapshotIdentifier")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type RestoreDBInstanceFromDBSnapshotOutput struct { + _ struct{} `type:"structure"` + + // Contains the result of a successful invocation of the following actions: + // + // CreateDBInstance + // + // DeleteDBInstance + // + // ModifyDBInstance + // + // This data type is used as a response element in the DescribeDBInstances + // action. + DBInstance *DBInstance `type:"structure"` +} + +// String returns the string representation +func (s RestoreDBInstanceFromDBSnapshotOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RestoreDBInstanceFromDBSnapshotOutput) GoString() string { + return s.String() +} + +type RestoreDBInstanceToPointInTimeInput struct { + _ struct{} `type:"structure"` + + // Indicates that minor version upgrades will be applied automatically to the + // DB instance during the maintenance window. + AutoMinorVersionUpgrade *bool `type:"boolean"` + + // The EC2 Availability Zone that the database instance will be created in. + // + // Default: A random, system-chosen Availability Zone. + // + // Constraint: You cannot specify the AvailabilityZone parameter if the MultiAZ + // parameter is set to true. + // + // Example: us-east-1a + AvailabilityZone *string `type:"string"` + + // True to copy all tags from the restored DB instance to snapshots of the DB + // instance; otherwise false. The default is false. + CopyTagsToSnapshot *bool `type:"boolean"` + + // The compute and memory capacity of the Amazon RDS DB instance. + // + // Valid Values: db.t1.micro | db.m1.small | db.m1.medium | db.m1.large | db.m1.xlarge + // | db.m2.2xlarge | db.m2.4xlarge | db.m3.medium | db.m3.large | db.m3.xlarge + // | db.m3.2xlarge | db.m4.large | db.m4.xlarge | db.m4.2xlarge | db.m4.4xlarge + // | db.m4.10xlarge | db.r3.large | db.r3.xlarge | db.r3.2xlarge | db.r3.4xlarge + // | db.r3.8xlarge | db.t2.micro | db.t2.small | db.t2.medium | db.t2.large + // + // Default: The same DBInstanceClass as the original DB instance. + DBInstanceClass *string `type:"string"` + + // The database name for the restored DB instance. + // + // This parameter is not used for the MySQL or MariaDB engines. + DBName *string `type:"string"` + + // The DB subnet group name to use for the new instance. + // + // Constraints: Must contain no more than 255 alphanumeric characters, periods, + // underscores, spaces, or hyphens. Must not be default. + // + // Example: mySubnetgroup + DBSubnetGroupName *string `type:"string"` + + // Specify the Active Directory Domain to restore the instance in. + Domain *string `type:"string"` + + // Specify the name of the IAM role to be used when making API calls to the + // Directory Service. + DomainIAMRoleName *string `type:"string"` + + // The database engine to use for the new instance. + // + // Default: The same as source + // + // Constraint: Must be compatible with the engine of the source + // + // Valid Values: MySQL | mariadb | oracle-se1 | oracle-se | oracle-ee | sqlserver-ee + // | sqlserver-se | sqlserver-ex | sqlserver-web | postgres | aurora + Engine *string `type:"string"` + + // The amount of Provisioned IOPS (input/output operations per second) to be + // initially allocated for the DB instance. + // + // Constraints: Must be an integer greater than 1000. + // + // SQL Server + // + // Setting the IOPS value for the SQL Server database engine is not supported. + Iops *int64 `type:"integer"` + + // License model information for the restored DB instance. + // + // Default: Same as source. + // + // Valid values: license-included | bring-your-own-license | general-public-license + LicenseModel *string `type:"string"` + + // Specifies if the DB instance is a Multi-AZ deployment. + // + // Constraint: You cannot specify the AvailabilityZone parameter if the MultiAZ + // parameter is set to true. + MultiAZ *bool `type:"boolean"` + + // The name of the option group to be used for the restored DB instance. + // + // Permanent options, such as the TDE option for Oracle Advanced Security TDE, + // cannot be removed from an option group, and that option group cannot be removed + // from a DB instance once it is associated with a DB instance + OptionGroupName *string `type:"string"` + + // The port number on which the database accepts connections. + // + // Constraints: Value must be 1150-65535 + // + // Default: The same port as the original DB instance. + Port *int64 `type:"integer"` + + // Specifies the accessibility options for the DB instance. A value of true + // specifies an Internet-facing instance with a publicly resolvable DNS name, + // which resolves to a public IP address. A value of false specifies an internal + // instance with a DNS name that resolves to a private IP address. + // + // Default: The default behavior varies depending on whether a VPC has been + // requested or not. The following list shows the default behavior in each case. + // + // Default VPC:true + // + // VPC:false + // + // If no DB subnet group has been specified as part of the request and the + // PubliclyAccessible value has not been set, the DB instance will be publicly + // accessible. If a specific DB subnet group has been specified as part of the + // request and the PubliclyAccessible value has not been set, the DB instance + // will be private. + PubliclyAccessible *bool `type:"boolean"` + + // The date and time to restore from. + // + // Valid Values: Value must be a time in Universal Coordinated Time (UTC) format + // + // Constraints: + // + // Must be before the latest restorable time for the DB instance + // + // Cannot be specified if UseLatestRestorableTime parameter is true + // + // Example: 2009-09-07T23:45:00Z + RestoreTime *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The identifier of the source DB instance from which to restore. + // + // Constraints: + // + // Must be the identifier of an existing database instance + // + // Must contain from 1 to 63 alphanumeric characters or hyphens + // + // First character must be a letter + // + // Cannot end with a hyphen or contain two consecutive hyphens + SourceDBInstanceIdentifier *string `type:"string" required:"true"` + + // Specifies the storage type to be associated with the DB instance. + // + // Valid values: standard | gp2 | io1 + // + // If you specify io1, you must also include a value for the Iops parameter. + // + // Default: io1 if the Iops parameter is specified; otherwise standard + StorageType *string `type:"string"` + + // A list of tags. + Tags []*Tag `locationNameList:"Tag" type:"list"` + + // The name of the new database instance to be created. + // + // Constraints: + // + // Must contain from 1 to 63 alphanumeric characters or hyphens + // + // First character must be a letter + // + // Cannot end with a hyphen or contain two consecutive hyphens + TargetDBInstanceIdentifier *string `type:"string" required:"true"` + + // The ARN from the Key Store with which to associate the instance for TDE encryption. + TdeCredentialArn *string `type:"string"` + + // The password for the given ARN from the Key Store in order to access the + // device. + TdeCredentialPassword *string `type:"string"` + + // Specifies whether (true) or not (false) the DB instance is restored from + // the latest backup time. + // + // Default: false + // + // Constraints: Cannot be specified if RestoreTime parameter is provided. + UseLatestRestorableTime *bool `type:"boolean"` +} + +// String returns the string representation +func (s RestoreDBInstanceToPointInTimeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RestoreDBInstanceToPointInTimeInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RestoreDBInstanceToPointInTimeInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RestoreDBInstanceToPointInTimeInput"} + if s.SourceDBInstanceIdentifier == nil { + invalidParams.Add(request.NewErrParamRequired("SourceDBInstanceIdentifier")) + } + if s.TargetDBInstanceIdentifier == nil { + invalidParams.Add(request.NewErrParamRequired("TargetDBInstanceIdentifier")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type RestoreDBInstanceToPointInTimeOutput struct { + _ struct{} `type:"structure"` + + // Contains the result of a successful invocation of the following actions: + // + // CreateDBInstance + // + // DeleteDBInstance + // + // ModifyDBInstance + // + // This data type is used as a response element in the DescribeDBInstances + // action. + DBInstance *DBInstance `type:"structure"` +} + +// String returns the string representation +func (s RestoreDBInstanceToPointInTimeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RestoreDBInstanceToPointInTimeOutput) GoString() string { + return s.String() +} + +type RevokeDBSecurityGroupIngressInput struct { + _ struct{} `type:"structure"` + + // The IP range to revoke access from. Must be a valid CIDR range. If CIDRIP + // is specified, EC2SecurityGroupName, EC2SecurityGroupId and EC2SecurityGroupOwnerId + // cannot be provided. + CIDRIP *string `type:"string"` + + // The name of the DB security group to revoke ingress from. + DBSecurityGroupName *string `type:"string" required:"true"` + + // The id of the EC2 security group to revoke access from. For VPC DB security + // groups, EC2SecurityGroupId must be provided. Otherwise, EC2SecurityGroupOwnerId + // and either EC2SecurityGroupName or EC2SecurityGroupId must be provided. + EC2SecurityGroupId *string `type:"string"` + + // The name of the EC2 security group to revoke access from. For VPC DB security + // groups, EC2SecurityGroupId must be provided. Otherwise, EC2SecurityGroupOwnerId + // and either EC2SecurityGroupName or EC2SecurityGroupId must be provided. + EC2SecurityGroupName *string `type:"string"` + + // The AWS Account Number of the owner of the EC2 security group specified in + // the EC2SecurityGroupName parameter. The AWS Access Key ID is not an acceptable + // value. For VPC DB security groups, EC2SecurityGroupId must be provided. Otherwise, + // EC2SecurityGroupOwnerId and either EC2SecurityGroupName or EC2SecurityGroupId + // must be provided. + EC2SecurityGroupOwnerId *string `type:"string"` +} + +// String returns the string representation +func (s RevokeDBSecurityGroupIngressInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RevokeDBSecurityGroupIngressInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RevokeDBSecurityGroupIngressInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RevokeDBSecurityGroupIngressInput"} + if s.DBSecurityGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("DBSecurityGroupName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type RevokeDBSecurityGroupIngressOutput struct { + _ struct{} `type:"structure"` + + // Contains the result of a successful invocation of the following actions: + // + // DescribeDBSecurityGroups + // + // AuthorizeDBSecurityGroupIngress + // + // CreateDBSecurityGroup + // + // RevokeDBSecurityGroupIngress + // + // This data type is used as a response element in the DescribeDBSecurityGroups + // action. + DBSecurityGroup *DBSecurityGroup `type:"structure"` +} + +// String returns the string representation +func (s RevokeDBSecurityGroupIngressOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RevokeDBSecurityGroupIngressOutput) GoString() string { + return s.String() +} + +// This data type is used as a response element in the DescribeDBSubnetGroups +// action. +type Subnet struct { + _ struct{} `type:"structure"` + + // Contains Availability Zone information. + // + // This data type is used as an element in the following data type: + // + // OrderableDBInstanceOption + SubnetAvailabilityZone *AvailabilityZone `type:"structure"` + + // Specifies the identifier of the subnet. + SubnetIdentifier *string `type:"string"` + + // Specifies the status of the subnet. + SubnetStatus *string `type:"string"` +} + +// String returns the string representation +func (s Subnet) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Subnet) GoString() string { + return s.String() +} + +// Metadata assigned to an Amazon RDS resource consisting of a key-value pair. +type Tag struct { + _ struct{} `type:"structure"` + + // A key is the required name of the tag. The string value can be from 1 to + // 128 Unicode characters in length and cannot be prefixed with "aws:" or "rds:". + // The string can only contain only the set of Unicode letters, digits, white-space, + // '_', '.', '/', '=', '+', '-' (Java regex: "^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-]*)$"). + Key *string `type:"string"` + + // A value is the optional value of the tag. The string value can be from 1 + // to 256 Unicode characters in length and cannot be prefixed with "aws:" or + // "rds:". The string can only contain only the set of Unicode letters, digits, + // white-space, '_', '.', '/', '=', '+', '-' (Java regex: "^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-]*)$"). + Value *string `type:"string"` +} + +// String returns the string representation +func (s Tag) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Tag) GoString() string { + return s.String() +} + +// The version of the database engine that a DB instance can be upgraded to. +type UpgradeTarget struct { + _ struct{} `type:"structure"` + + // A value that indicates whether the target version will be applied to any + // source DB instances that have AutoMinorVersionUpgrade set to true. + AutoUpgrade *bool `type:"boolean"` + + // The version of the database engine that a DB instance can be upgraded to. + Description *string `type:"string"` + + // The name of the upgrade target database engine. + Engine *string `type:"string"` + + // The version number of the upgrade target database engine. + EngineVersion *string `type:"string"` + + // A value that indicates whether a database engine will be upgraded to a major + // version. + IsMajorVersionUpgrade *bool `type:"boolean"` +} + +// String returns the string representation +func (s UpgradeTarget) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpgradeTarget) GoString() string { + return s.String() +} + +// This data type is used as a response element for queries on VPC security +// group membership. +type VpcSecurityGroupMembership struct { + _ struct{} `type:"structure"` + + // The status of the VPC security group. + Status *string `type:"string"` + + // The name of the VPC security group. + VpcSecurityGroupId *string `type:"string"` +} + +// String returns the string representation +func (s VpcSecurityGroupMembership) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VpcSecurityGroupMembership) GoString() string { + return s.String() +} + +const ( + // @enum ApplyMethod + ApplyMethodImmediate = "immediate" + // @enum ApplyMethod + ApplyMethodPendingReboot = "pending-reboot" +) + +const ( + // @enum SourceType + SourceTypeDbInstance = "db-instance" + // @enum SourceType + SourceTypeDbParameterGroup = "db-parameter-group" + // @enum SourceType + SourceTypeDbSecurityGroup = "db-security-group" + // @enum SourceType + SourceTypeDbSnapshot = "db-snapshot" + // @enum SourceType + SourceTypeDbCluster = "db-cluster" + // @enum SourceType + SourceTypeDbClusterSnapshot = "db-cluster-snapshot" +) diff --git a/vendor/github.com/aws/aws-sdk-go/service/rds/examples_test.go b/vendor/github.com/aws/aws-sdk-go/service/rds/examples_test.go new file mode 100644 index 000000000..f62a2cefa --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/rds/examples_test.go @@ -0,0 +1,2420 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package rds_test + +import ( + "bytes" + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/rds" +) + +var _ time.Duration +var _ bytes.Buffer + +func ExampleRDS_AddSourceIdentifierToSubscription() { + svc := rds.New(session.New()) + + params := &rds.AddSourceIdentifierToSubscriptionInput{ + SourceIdentifier: aws.String("String"), // Required + SubscriptionName: aws.String("String"), // Required + } + resp, err := svc.AddSourceIdentifierToSubscription(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_AddTagsToResource() { + svc := rds.New(session.New()) + + params := &rds.AddTagsToResourceInput{ + ResourceName: aws.String("String"), // Required + Tags: []*rds.Tag{ // Required + { // Required + Key: aws.String("String"), + Value: aws.String("String"), + }, + // More values... + }, + } + resp, err := svc.AddTagsToResource(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_ApplyPendingMaintenanceAction() { + svc := rds.New(session.New()) + + params := &rds.ApplyPendingMaintenanceActionInput{ + ApplyAction: aws.String("String"), // Required + OptInType: aws.String("String"), // Required + ResourceIdentifier: aws.String("String"), // Required + } + resp, err := svc.ApplyPendingMaintenanceAction(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_AuthorizeDBSecurityGroupIngress() { + svc := rds.New(session.New()) + + params := &rds.AuthorizeDBSecurityGroupIngressInput{ + DBSecurityGroupName: aws.String("String"), // Required + CIDRIP: aws.String("String"), + EC2SecurityGroupId: aws.String("String"), + EC2SecurityGroupName: aws.String("String"), + EC2SecurityGroupOwnerId: aws.String("String"), + } + resp, err := svc.AuthorizeDBSecurityGroupIngress(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_CopyDBClusterSnapshot() { + svc := rds.New(session.New()) + + params := &rds.CopyDBClusterSnapshotInput{ + SourceDBClusterSnapshotIdentifier: aws.String("String"), // Required + TargetDBClusterSnapshotIdentifier: aws.String("String"), // Required + Tags: []*rds.Tag{ + { // Required + Key: aws.String("String"), + Value: aws.String("String"), + }, + // More values... + }, + } + resp, err := svc.CopyDBClusterSnapshot(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_CopyDBParameterGroup() { + svc := rds.New(session.New()) + + params := &rds.CopyDBParameterGroupInput{ + SourceDBParameterGroupIdentifier: aws.String("String"), // Required + TargetDBParameterGroupDescription: aws.String("String"), // Required + TargetDBParameterGroupIdentifier: aws.String("String"), // Required + Tags: []*rds.Tag{ + { // Required + Key: aws.String("String"), + Value: aws.String("String"), + }, + // More values... + }, + } + resp, err := svc.CopyDBParameterGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_CopyDBSnapshot() { + svc := rds.New(session.New()) + + params := &rds.CopyDBSnapshotInput{ + SourceDBSnapshotIdentifier: aws.String("String"), // Required + TargetDBSnapshotIdentifier: aws.String("String"), // Required + CopyTags: aws.Bool(true), + KmsKeyId: aws.String("String"), + Tags: []*rds.Tag{ + { // Required + Key: aws.String("String"), + Value: aws.String("String"), + }, + // More values... + }, + } + resp, err := svc.CopyDBSnapshot(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_CopyOptionGroup() { + svc := rds.New(session.New()) + + params := &rds.CopyOptionGroupInput{ + SourceOptionGroupIdentifier: aws.String("String"), // Required + TargetOptionGroupDescription: aws.String("String"), // Required + TargetOptionGroupIdentifier: aws.String("String"), // Required + Tags: []*rds.Tag{ + { // Required + Key: aws.String("String"), + Value: aws.String("String"), + }, + // More values... + }, + } + resp, err := svc.CopyOptionGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_CreateDBCluster() { + svc := rds.New(session.New()) + + params := &rds.CreateDBClusterInput{ + DBClusterIdentifier: aws.String("String"), // Required + Engine: aws.String("String"), // Required + MasterUserPassword: aws.String("String"), // Required + MasterUsername: aws.String("String"), // Required + AvailabilityZones: []*string{ + aws.String("String"), // Required + // More values... + }, + BackupRetentionPeriod: aws.Int64(1), + CharacterSetName: aws.String("String"), + DBClusterParameterGroupName: aws.String("String"), + DBSubnetGroupName: aws.String("String"), + DatabaseName: aws.String("String"), + EngineVersion: aws.String("String"), + KmsKeyId: aws.String("String"), + OptionGroupName: aws.String("String"), + Port: aws.Int64(1), + PreferredBackupWindow: aws.String("String"), + PreferredMaintenanceWindow: aws.String("String"), + ReplicationSourceIdentifier: aws.String("String"), + StorageEncrypted: aws.Bool(true), + Tags: []*rds.Tag{ + { // Required + Key: aws.String("String"), + Value: aws.String("String"), + }, + // More values... + }, + VpcSecurityGroupIds: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.CreateDBCluster(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_CreateDBClusterParameterGroup() { + svc := rds.New(session.New()) + + params := &rds.CreateDBClusterParameterGroupInput{ + DBClusterParameterGroupName: aws.String("String"), // Required + DBParameterGroupFamily: aws.String("String"), // Required + Description: aws.String("String"), // Required + Tags: []*rds.Tag{ + { // Required + Key: aws.String("String"), + Value: aws.String("String"), + }, + // More values... + }, + } + resp, err := svc.CreateDBClusterParameterGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_CreateDBClusterSnapshot() { + svc := rds.New(session.New()) + + params := &rds.CreateDBClusterSnapshotInput{ + DBClusterIdentifier: aws.String("String"), // Required + DBClusterSnapshotIdentifier: aws.String("String"), // Required + Tags: []*rds.Tag{ + { // Required + Key: aws.String("String"), + Value: aws.String("String"), + }, + // More values... + }, + } + resp, err := svc.CreateDBClusterSnapshot(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_CreateDBInstance() { + svc := rds.New(session.New()) + + params := &rds.CreateDBInstanceInput{ + DBInstanceClass: aws.String("String"), // Required + DBInstanceIdentifier: aws.String("String"), // Required + Engine: aws.String("String"), // Required + AllocatedStorage: aws.Int64(1), + AutoMinorVersionUpgrade: aws.Bool(true), + AvailabilityZone: aws.String("String"), + BackupRetentionPeriod: aws.Int64(1), + CharacterSetName: aws.String("String"), + CopyTagsToSnapshot: aws.Bool(true), + DBClusterIdentifier: aws.String("String"), + DBName: aws.String("String"), + DBParameterGroupName: aws.String("String"), + DBSecurityGroups: []*string{ + aws.String("String"), // Required + // More values... + }, + DBSubnetGroupName: aws.String("String"), + Domain: aws.String("String"), + DomainIAMRoleName: aws.String("String"), + EngineVersion: aws.String("String"), + Iops: aws.Int64(1), + KmsKeyId: aws.String("String"), + LicenseModel: aws.String("String"), + MasterUserPassword: aws.String("String"), + MasterUsername: aws.String("String"), + MonitoringInterval: aws.Int64(1), + MonitoringRoleArn: aws.String("String"), + MultiAZ: aws.Bool(true), + OptionGroupName: aws.String("String"), + Port: aws.Int64(1), + PreferredBackupWindow: aws.String("String"), + PreferredMaintenanceWindow: aws.String("String"), + PromotionTier: aws.Int64(1), + PubliclyAccessible: aws.Bool(true), + StorageEncrypted: aws.Bool(true), + StorageType: aws.String("String"), + Tags: []*rds.Tag{ + { // Required + Key: aws.String("String"), + Value: aws.String("String"), + }, + // More values... + }, + TdeCredentialArn: aws.String("String"), + TdeCredentialPassword: aws.String("String"), + VpcSecurityGroupIds: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.CreateDBInstance(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_CreateDBInstanceReadReplica() { + svc := rds.New(session.New()) + + params := &rds.CreateDBInstanceReadReplicaInput{ + DBInstanceIdentifier: aws.String("String"), // Required + SourceDBInstanceIdentifier: aws.String("String"), // Required + AutoMinorVersionUpgrade: aws.Bool(true), + AvailabilityZone: aws.String("String"), + CopyTagsToSnapshot: aws.Bool(true), + DBInstanceClass: aws.String("String"), + DBSubnetGroupName: aws.String("String"), + Iops: aws.Int64(1), + MonitoringInterval: aws.Int64(1), + MonitoringRoleArn: aws.String("String"), + OptionGroupName: aws.String("String"), + Port: aws.Int64(1), + PubliclyAccessible: aws.Bool(true), + StorageType: aws.String("String"), + Tags: []*rds.Tag{ + { // Required + Key: aws.String("String"), + Value: aws.String("String"), + }, + // More values... + }, + } + resp, err := svc.CreateDBInstanceReadReplica(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_CreateDBParameterGroup() { + svc := rds.New(session.New()) + + params := &rds.CreateDBParameterGroupInput{ + DBParameterGroupFamily: aws.String("String"), // Required + DBParameterGroupName: aws.String("String"), // Required + Description: aws.String("String"), // Required + Tags: []*rds.Tag{ + { // Required + Key: aws.String("String"), + Value: aws.String("String"), + }, + // More values... + }, + } + resp, err := svc.CreateDBParameterGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_CreateDBSecurityGroup() { + svc := rds.New(session.New()) + + params := &rds.CreateDBSecurityGroupInput{ + DBSecurityGroupDescription: aws.String("String"), // Required + DBSecurityGroupName: aws.String("String"), // Required + Tags: []*rds.Tag{ + { // Required + Key: aws.String("String"), + Value: aws.String("String"), + }, + // More values... + }, + } + resp, err := svc.CreateDBSecurityGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_CreateDBSnapshot() { + svc := rds.New(session.New()) + + params := &rds.CreateDBSnapshotInput{ + DBInstanceIdentifier: aws.String("String"), // Required + DBSnapshotIdentifier: aws.String("String"), // Required + Tags: []*rds.Tag{ + { // Required + Key: aws.String("String"), + Value: aws.String("String"), + }, + // More values... + }, + } + resp, err := svc.CreateDBSnapshot(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_CreateDBSubnetGroup() { + svc := rds.New(session.New()) + + params := &rds.CreateDBSubnetGroupInput{ + DBSubnetGroupDescription: aws.String("String"), // Required + DBSubnetGroupName: aws.String("String"), // Required + SubnetIds: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + Tags: []*rds.Tag{ + { // Required + Key: aws.String("String"), + Value: aws.String("String"), + }, + // More values... + }, + } + resp, err := svc.CreateDBSubnetGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_CreateEventSubscription() { + svc := rds.New(session.New()) + + params := &rds.CreateEventSubscriptionInput{ + SnsTopicArn: aws.String("String"), // Required + SubscriptionName: aws.String("String"), // Required + Enabled: aws.Bool(true), + EventCategories: []*string{ + aws.String("String"), // Required + // More values... + }, + SourceIds: []*string{ + aws.String("String"), // Required + // More values... + }, + SourceType: aws.String("String"), + Tags: []*rds.Tag{ + { // Required + Key: aws.String("String"), + Value: aws.String("String"), + }, + // More values... + }, + } + resp, err := svc.CreateEventSubscription(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_CreateOptionGroup() { + svc := rds.New(session.New()) + + params := &rds.CreateOptionGroupInput{ + EngineName: aws.String("String"), // Required + MajorEngineVersion: aws.String("String"), // Required + OptionGroupDescription: aws.String("String"), // Required + OptionGroupName: aws.String("String"), // Required + Tags: []*rds.Tag{ + { // Required + Key: aws.String("String"), + Value: aws.String("String"), + }, + // More values... + }, + } + resp, err := svc.CreateOptionGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_DeleteDBCluster() { + svc := rds.New(session.New()) + + params := &rds.DeleteDBClusterInput{ + DBClusterIdentifier: aws.String("String"), // Required + FinalDBSnapshotIdentifier: aws.String("String"), + SkipFinalSnapshot: aws.Bool(true), + } + resp, err := svc.DeleteDBCluster(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_DeleteDBClusterParameterGroup() { + svc := rds.New(session.New()) + + params := &rds.DeleteDBClusterParameterGroupInput{ + DBClusterParameterGroupName: aws.String("String"), // Required + } + resp, err := svc.DeleteDBClusterParameterGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_DeleteDBClusterSnapshot() { + svc := rds.New(session.New()) + + params := &rds.DeleteDBClusterSnapshotInput{ + DBClusterSnapshotIdentifier: aws.String("String"), // Required + } + resp, err := svc.DeleteDBClusterSnapshot(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_DeleteDBInstance() { + svc := rds.New(session.New()) + + params := &rds.DeleteDBInstanceInput{ + DBInstanceIdentifier: aws.String("String"), // Required + FinalDBSnapshotIdentifier: aws.String("String"), + SkipFinalSnapshot: aws.Bool(true), + } + resp, err := svc.DeleteDBInstance(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_DeleteDBParameterGroup() { + svc := rds.New(session.New()) + + params := &rds.DeleteDBParameterGroupInput{ + DBParameterGroupName: aws.String("String"), // Required + } + resp, err := svc.DeleteDBParameterGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_DeleteDBSecurityGroup() { + svc := rds.New(session.New()) + + params := &rds.DeleteDBSecurityGroupInput{ + DBSecurityGroupName: aws.String("String"), // Required + } + resp, err := svc.DeleteDBSecurityGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_DeleteDBSnapshot() { + svc := rds.New(session.New()) + + params := &rds.DeleteDBSnapshotInput{ + DBSnapshotIdentifier: aws.String("String"), // Required + } + resp, err := svc.DeleteDBSnapshot(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_DeleteDBSubnetGroup() { + svc := rds.New(session.New()) + + params := &rds.DeleteDBSubnetGroupInput{ + DBSubnetGroupName: aws.String("String"), // Required + } + resp, err := svc.DeleteDBSubnetGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_DeleteEventSubscription() { + svc := rds.New(session.New()) + + params := &rds.DeleteEventSubscriptionInput{ + SubscriptionName: aws.String("String"), // Required + } + resp, err := svc.DeleteEventSubscription(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_DeleteOptionGroup() { + svc := rds.New(session.New()) + + params := &rds.DeleteOptionGroupInput{ + OptionGroupName: aws.String("String"), // Required + } + resp, err := svc.DeleteOptionGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_DescribeAccountAttributes() { + svc := rds.New(session.New()) + + var params *rds.DescribeAccountAttributesInput + resp, err := svc.DescribeAccountAttributes(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_DescribeCertificates() { + svc := rds.New(session.New()) + + params := &rds.DescribeCertificatesInput{ + CertificateIdentifier: aws.String("String"), + Filters: []*rds.Filter{ + { // Required + Name: aws.String("String"), // Required + Values: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + Marker: aws.String("String"), + MaxRecords: aws.Int64(1), + } + resp, err := svc.DescribeCertificates(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_DescribeDBClusterParameterGroups() { + svc := rds.New(session.New()) + + params := &rds.DescribeDBClusterParameterGroupsInput{ + DBClusterParameterGroupName: aws.String("String"), + Filters: []*rds.Filter{ + { // Required + Name: aws.String("String"), // Required + Values: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + Marker: aws.String("String"), + MaxRecords: aws.Int64(1), + } + resp, err := svc.DescribeDBClusterParameterGroups(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_DescribeDBClusterParameters() { + svc := rds.New(session.New()) + + params := &rds.DescribeDBClusterParametersInput{ + DBClusterParameterGroupName: aws.String("String"), // Required + Filters: []*rds.Filter{ + { // Required + Name: aws.String("String"), // Required + Values: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + Marker: aws.String("String"), + MaxRecords: aws.Int64(1), + Source: aws.String("String"), + } + resp, err := svc.DescribeDBClusterParameters(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_DescribeDBClusterSnapshotAttributes() { + svc := rds.New(session.New()) + + params := &rds.DescribeDBClusterSnapshotAttributesInput{ + DBClusterSnapshotIdentifier: aws.String("String"), // Required + } + resp, err := svc.DescribeDBClusterSnapshotAttributes(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_DescribeDBClusterSnapshots() { + svc := rds.New(session.New()) + + params := &rds.DescribeDBClusterSnapshotsInput{ + DBClusterIdentifier: aws.String("String"), + DBClusterSnapshotIdentifier: aws.String("String"), + Filters: []*rds.Filter{ + { // Required + Name: aws.String("String"), // Required + Values: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + IncludePublic: aws.Bool(true), + IncludeShared: aws.Bool(true), + Marker: aws.String("String"), + MaxRecords: aws.Int64(1), + SnapshotType: aws.String("String"), + } + resp, err := svc.DescribeDBClusterSnapshots(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_DescribeDBClusters() { + svc := rds.New(session.New()) + + params := &rds.DescribeDBClustersInput{ + DBClusterIdentifier: aws.String("String"), + Filters: []*rds.Filter{ + { // Required + Name: aws.String("String"), // Required + Values: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + Marker: aws.String("String"), + MaxRecords: aws.Int64(1), + } + resp, err := svc.DescribeDBClusters(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_DescribeDBEngineVersions() { + svc := rds.New(session.New()) + + params := &rds.DescribeDBEngineVersionsInput{ + DBParameterGroupFamily: aws.String("String"), + DefaultOnly: aws.Bool(true), + Engine: aws.String("String"), + EngineVersion: aws.String("String"), + Filters: []*rds.Filter{ + { // Required + Name: aws.String("String"), // Required + Values: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + ListSupportedCharacterSets: aws.Bool(true), + Marker: aws.String("String"), + MaxRecords: aws.Int64(1), + } + resp, err := svc.DescribeDBEngineVersions(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_DescribeDBInstances() { + svc := rds.New(session.New()) + + params := &rds.DescribeDBInstancesInput{ + DBInstanceIdentifier: aws.String("String"), + Filters: []*rds.Filter{ + { // Required + Name: aws.String("String"), // Required + Values: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + Marker: aws.String("String"), + MaxRecords: aws.Int64(1), + } + resp, err := svc.DescribeDBInstances(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_DescribeDBLogFiles() { + svc := rds.New(session.New()) + + params := &rds.DescribeDBLogFilesInput{ + DBInstanceIdentifier: aws.String("String"), // Required + FileLastWritten: aws.Int64(1), + FileSize: aws.Int64(1), + FilenameContains: aws.String("String"), + Filters: []*rds.Filter{ + { // Required + Name: aws.String("String"), // Required + Values: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + Marker: aws.String("String"), + MaxRecords: aws.Int64(1), + } + resp, err := svc.DescribeDBLogFiles(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_DescribeDBParameterGroups() { + svc := rds.New(session.New()) + + params := &rds.DescribeDBParameterGroupsInput{ + DBParameterGroupName: aws.String("String"), + Filters: []*rds.Filter{ + { // Required + Name: aws.String("String"), // Required + Values: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + Marker: aws.String("String"), + MaxRecords: aws.Int64(1), + } + resp, err := svc.DescribeDBParameterGroups(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_DescribeDBParameters() { + svc := rds.New(session.New()) + + params := &rds.DescribeDBParametersInput{ + DBParameterGroupName: aws.String("String"), // Required + Filters: []*rds.Filter{ + { // Required + Name: aws.String("String"), // Required + Values: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + Marker: aws.String("String"), + MaxRecords: aws.Int64(1), + Source: aws.String("String"), + } + resp, err := svc.DescribeDBParameters(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_DescribeDBSecurityGroups() { + svc := rds.New(session.New()) + + params := &rds.DescribeDBSecurityGroupsInput{ + DBSecurityGroupName: aws.String("String"), + Filters: []*rds.Filter{ + { // Required + Name: aws.String("String"), // Required + Values: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + Marker: aws.String("String"), + MaxRecords: aws.Int64(1), + } + resp, err := svc.DescribeDBSecurityGroups(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_DescribeDBSnapshotAttributes() { + svc := rds.New(session.New()) + + params := &rds.DescribeDBSnapshotAttributesInput{ + DBSnapshotIdentifier: aws.String("String"), // Required + } + resp, err := svc.DescribeDBSnapshotAttributes(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_DescribeDBSnapshots() { + svc := rds.New(session.New()) + + params := &rds.DescribeDBSnapshotsInput{ + DBInstanceIdentifier: aws.String("String"), + DBSnapshotIdentifier: aws.String("String"), + Filters: []*rds.Filter{ + { // Required + Name: aws.String("String"), // Required + Values: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + IncludePublic: aws.Bool(true), + IncludeShared: aws.Bool(true), + Marker: aws.String("String"), + MaxRecords: aws.Int64(1), + SnapshotType: aws.String("String"), + } + resp, err := svc.DescribeDBSnapshots(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_DescribeDBSubnetGroups() { + svc := rds.New(session.New()) + + params := &rds.DescribeDBSubnetGroupsInput{ + DBSubnetGroupName: aws.String("String"), + Filters: []*rds.Filter{ + { // Required + Name: aws.String("String"), // Required + Values: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + Marker: aws.String("String"), + MaxRecords: aws.Int64(1), + } + resp, err := svc.DescribeDBSubnetGroups(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_DescribeEngineDefaultClusterParameters() { + svc := rds.New(session.New()) + + params := &rds.DescribeEngineDefaultClusterParametersInput{ + DBParameterGroupFamily: aws.String("String"), // Required + Filters: []*rds.Filter{ + { // Required + Name: aws.String("String"), // Required + Values: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + Marker: aws.String("String"), + MaxRecords: aws.Int64(1), + } + resp, err := svc.DescribeEngineDefaultClusterParameters(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_DescribeEngineDefaultParameters() { + svc := rds.New(session.New()) + + params := &rds.DescribeEngineDefaultParametersInput{ + DBParameterGroupFamily: aws.String("String"), // Required + Filters: []*rds.Filter{ + { // Required + Name: aws.String("String"), // Required + Values: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + Marker: aws.String("String"), + MaxRecords: aws.Int64(1), + } + resp, err := svc.DescribeEngineDefaultParameters(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_DescribeEventCategories() { + svc := rds.New(session.New()) + + params := &rds.DescribeEventCategoriesInput{ + Filters: []*rds.Filter{ + { // Required + Name: aws.String("String"), // Required + Values: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + SourceType: aws.String("String"), + } + resp, err := svc.DescribeEventCategories(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_DescribeEventSubscriptions() { + svc := rds.New(session.New()) + + params := &rds.DescribeEventSubscriptionsInput{ + Filters: []*rds.Filter{ + { // Required + Name: aws.String("String"), // Required + Values: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + Marker: aws.String("String"), + MaxRecords: aws.Int64(1), + SubscriptionName: aws.String("String"), + } + resp, err := svc.DescribeEventSubscriptions(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_DescribeEvents() { + svc := rds.New(session.New()) + + params := &rds.DescribeEventsInput{ + Duration: aws.Int64(1), + EndTime: aws.Time(time.Now()), + EventCategories: []*string{ + aws.String("String"), // Required + // More values... + }, + Filters: []*rds.Filter{ + { // Required + Name: aws.String("String"), // Required + Values: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + Marker: aws.String("String"), + MaxRecords: aws.Int64(1), + SourceIdentifier: aws.String("String"), + SourceType: aws.String("SourceType"), + StartTime: aws.Time(time.Now()), + } + resp, err := svc.DescribeEvents(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_DescribeOptionGroupOptions() { + svc := rds.New(session.New()) + + params := &rds.DescribeOptionGroupOptionsInput{ + EngineName: aws.String("String"), // Required + Filters: []*rds.Filter{ + { // Required + Name: aws.String("String"), // Required + Values: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + MajorEngineVersion: aws.String("String"), + Marker: aws.String("String"), + MaxRecords: aws.Int64(1), + } + resp, err := svc.DescribeOptionGroupOptions(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_DescribeOptionGroups() { + svc := rds.New(session.New()) + + params := &rds.DescribeOptionGroupsInput{ + EngineName: aws.String("String"), + Filters: []*rds.Filter{ + { // Required + Name: aws.String("String"), // Required + Values: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + MajorEngineVersion: aws.String("String"), + Marker: aws.String("String"), + MaxRecords: aws.Int64(1), + OptionGroupName: aws.String("String"), + } + resp, err := svc.DescribeOptionGroups(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_DescribeOrderableDBInstanceOptions() { + svc := rds.New(session.New()) + + params := &rds.DescribeOrderableDBInstanceOptionsInput{ + Engine: aws.String("String"), // Required + DBInstanceClass: aws.String("String"), + EngineVersion: aws.String("String"), + Filters: []*rds.Filter{ + { // Required + Name: aws.String("String"), // Required + Values: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + LicenseModel: aws.String("String"), + Marker: aws.String("String"), + MaxRecords: aws.Int64(1), + Vpc: aws.Bool(true), + } + resp, err := svc.DescribeOrderableDBInstanceOptions(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_DescribePendingMaintenanceActions() { + svc := rds.New(session.New()) + + params := &rds.DescribePendingMaintenanceActionsInput{ + Filters: []*rds.Filter{ + { // Required + Name: aws.String("String"), // Required + Values: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + Marker: aws.String("String"), + MaxRecords: aws.Int64(1), + ResourceIdentifier: aws.String("String"), + } + resp, err := svc.DescribePendingMaintenanceActions(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_DescribeReservedDBInstances() { + svc := rds.New(session.New()) + + params := &rds.DescribeReservedDBInstancesInput{ + DBInstanceClass: aws.String("String"), + Duration: aws.String("String"), + Filters: []*rds.Filter{ + { // Required + Name: aws.String("String"), // Required + Values: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + Marker: aws.String("String"), + MaxRecords: aws.Int64(1), + MultiAZ: aws.Bool(true), + OfferingType: aws.String("String"), + ProductDescription: aws.String("String"), + ReservedDBInstanceId: aws.String("String"), + ReservedDBInstancesOfferingId: aws.String("String"), + } + resp, err := svc.DescribeReservedDBInstances(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_DescribeReservedDBInstancesOfferings() { + svc := rds.New(session.New()) + + params := &rds.DescribeReservedDBInstancesOfferingsInput{ + DBInstanceClass: aws.String("String"), + Duration: aws.String("String"), + Filters: []*rds.Filter{ + { // Required + Name: aws.String("String"), // Required + Values: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + Marker: aws.String("String"), + MaxRecords: aws.Int64(1), + MultiAZ: aws.Bool(true), + OfferingType: aws.String("String"), + ProductDescription: aws.String("String"), + ReservedDBInstancesOfferingId: aws.String("String"), + } + resp, err := svc.DescribeReservedDBInstancesOfferings(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_DownloadDBLogFilePortion() { + svc := rds.New(session.New()) + + params := &rds.DownloadDBLogFilePortionInput{ + DBInstanceIdentifier: aws.String("String"), // Required + LogFileName: aws.String("String"), // Required + Marker: aws.String("String"), + NumberOfLines: aws.Int64(1), + } + resp, err := svc.DownloadDBLogFilePortion(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_FailoverDBCluster() { + svc := rds.New(session.New()) + + params := &rds.FailoverDBClusterInput{ + DBClusterIdentifier: aws.String("String"), + } + resp, err := svc.FailoverDBCluster(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_ListTagsForResource() { + svc := rds.New(session.New()) + + params := &rds.ListTagsForResourceInput{ + ResourceName: aws.String("String"), // Required + Filters: []*rds.Filter{ + { // Required + Name: aws.String("String"), // Required + Values: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + } + resp, err := svc.ListTagsForResource(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_ModifyDBCluster() { + svc := rds.New(session.New()) + + params := &rds.ModifyDBClusterInput{ + DBClusterIdentifier: aws.String("String"), // Required + ApplyImmediately: aws.Bool(true), + BackupRetentionPeriod: aws.Int64(1), + DBClusterParameterGroupName: aws.String("String"), + MasterUserPassword: aws.String("String"), + NewDBClusterIdentifier: aws.String("String"), + OptionGroupName: aws.String("String"), + Port: aws.Int64(1), + PreferredBackupWindow: aws.String("String"), + PreferredMaintenanceWindow: aws.String("String"), + VpcSecurityGroupIds: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.ModifyDBCluster(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_ModifyDBClusterParameterGroup() { + svc := rds.New(session.New()) + + params := &rds.ModifyDBClusterParameterGroupInput{ + DBClusterParameterGroupName: aws.String("String"), // Required + Parameters: []*rds.Parameter{ // Required + { // Required + AllowedValues: aws.String("String"), + ApplyMethod: aws.String("ApplyMethod"), + ApplyType: aws.String("String"), + DataType: aws.String("String"), + Description: aws.String("String"), + IsModifiable: aws.Bool(true), + MinimumEngineVersion: aws.String("String"), + ParameterName: aws.String("String"), + ParameterValue: aws.String("String"), + Source: aws.String("String"), + }, + // More values... + }, + } + resp, err := svc.ModifyDBClusterParameterGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_ModifyDBClusterSnapshotAttribute() { + svc := rds.New(session.New()) + + params := &rds.ModifyDBClusterSnapshotAttributeInput{ + AttributeName: aws.String("String"), // Required + DBClusterSnapshotIdentifier: aws.String("String"), // Required + ValuesToAdd: []*string{ + aws.String("String"), // Required + // More values... + }, + ValuesToRemove: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.ModifyDBClusterSnapshotAttribute(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_ModifyDBInstance() { + svc := rds.New(session.New()) + + params := &rds.ModifyDBInstanceInput{ + DBInstanceIdentifier: aws.String("String"), // Required + AllocatedStorage: aws.Int64(1), + AllowMajorVersionUpgrade: aws.Bool(true), + ApplyImmediately: aws.Bool(true), + AutoMinorVersionUpgrade: aws.Bool(true), + BackupRetentionPeriod: aws.Int64(1), + CACertificateIdentifier: aws.String("String"), + CopyTagsToSnapshot: aws.Bool(true), + DBInstanceClass: aws.String("String"), + DBParameterGroupName: aws.String("String"), + DBPortNumber: aws.Int64(1), + DBSecurityGroups: []*string{ + aws.String("String"), // Required + // More values... + }, + Domain: aws.String("String"), + DomainIAMRoleName: aws.String("String"), + EngineVersion: aws.String("String"), + Iops: aws.Int64(1), + MasterUserPassword: aws.String("String"), + MonitoringInterval: aws.Int64(1), + MonitoringRoleArn: aws.String("String"), + MultiAZ: aws.Bool(true), + NewDBInstanceIdentifier: aws.String("String"), + OptionGroupName: aws.String("String"), + PreferredBackupWindow: aws.String("String"), + PreferredMaintenanceWindow: aws.String("String"), + PromotionTier: aws.Int64(1), + PubliclyAccessible: aws.Bool(true), + StorageType: aws.String("String"), + TdeCredentialArn: aws.String("String"), + TdeCredentialPassword: aws.String("String"), + VpcSecurityGroupIds: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.ModifyDBInstance(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_ModifyDBParameterGroup() { + svc := rds.New(session.New()) + + params := &rds.ModifyDBParameterGroupInput{ + DBParameterGroupName: aws.String("String"), // Required + Parameters: []*rds.Parameter{ // Required + { // Required + AllowedValues: aws.String("String"), + ApplyMethod: aws.String("ApplyMethod"), + ApplyType: aws.String("String"), + DataType: aws.String("String"), + Description: aws.String("String"), + IsModifiable: aws.Bool(true), + MinimumEngineVersion: aws.String("String"), + ParameterName: aws.String("String"), + ParameterValue: aws.String("String"), + Source: aws.String("String"), + }, + // More values... + }, + } + resp, err := svc.ModifyDBParameterGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_ModifyDBSnapshotAttribute() { + svc := rds.New(session.New()) + + params := &rds.ModifyDBSnapshotAttributeInput{ + AttributeName: aws.String("String"), // Required + DBSnapshotIdentifier: aws.String("String"), // Required + ValuesToAdd: []*string{ + aws.String("String"), // Required + // More values... + }, + ValuesToRemove: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.ModifyDBSnapshotAttribute(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_ModifyDBSubnetGroup() { + svc := rds.New(session.New()) + + params := &rds.ModifyDBSubnetGroupInput{ + DBSubnetGroupName: aws.String("String"), // Required + SubnetIds: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + DBSubnetGroupDescription: aws.String("String"), + } + resp, err := svc.ModifyDBSubnetGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_ModifyEventSubscription() { + svc := rds.New(session.New()) + + params := &rds.ModifyEventSubscriptionInput{ + SubscriptionName: aws.String("String"), // Required + Enabled: aws.Bool(true), + EventCategories: []*string{ + aws.String("String"), // Required + // More values... + }, + SnsTopicArn: aws.String("String"), + SourceType: aws.String("String"), + } + resp, err := svc.ModifyEventSubscription(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_ModifyOptionGroup() { + svc := rds.New(session.New()) + + params := &rds.ModifyOptionGroupInput{ + OptionGroupName: aws.String("String"), // Required + ApplyImmediately: aws.Bool(true), + OptionsToInclude: []*rds.OptionConfiguration{ + { // Required + OptionName: aws.String("String"), // Required + DBSecurityGroupMemberships: []*string{ + aws.String("String"), // Required + // More values... + }, + OptionSettings: []*rds.OptionSetting{ + { // Required + AllowedValues: aws.String("String"), + ApplyType: aws.String("String"), + DataType: aws.String("String"), + DefaultValue: aws.String("String"), + Description: aws.String("String"), + IsCollection: aws.Bool(true), + IsModifiable: aws.Bool(true), + Name: aws.String("String"), + Value: aws.String("String"), + }, + // More values... + }, + Port: aws.Int64(1), + VpcSecurityGroupMemberships: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + OptionsToRemove: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.ModifyOptionGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_PromoteReadReplica() { + svc := rds.New(session.New()) + + params := &rds.PromoteReadReplicaInput{ + DBInstanceIdentifier: aws.String("String"), // Required + BackupRetentionPeriod: aws.Int64(1), + PreferredBackupWindow: aws.String("String"), + } + resp, err := svc.PromoteReadReplica(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_PromoteReadReplicaDBCluster() { + svc := rds.New(session.New()) + + params := &rds.PromoteReadReplicaDBClusterInput{ + DBClusterIdentifier: aws.String("String"), // Required + } + resp, err := svc.PromoteReadReplicaDBCluster(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_PurchaseReservedDBInstancesOffering() { + svc := rds.New(session.New()) + + params := &rds.PurchaseReservedDBInstancesOfferingInput{ + ReservedDBInstancesOfferingId: aws.String("String"), // Required + DBInstanceCount: aws.Int64(1), + ReservedDBInstanceId: aws.String("String"), + Tags: []*rds.Tag{ + { // Required + Key: aws.String("String"), + Value: aws.String("String"), + }, + // More values... + }, + } + resp, err := svc.PurchaseReservedDBInstancesOffering(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_RebootDBInstance() { + svc := rds.New(session.New()) + + params := &rds.RebootDBInstanceInput{ + DBInstanceIdentifier: aws.String("String"), // Required + ForceFailover: aws.Bool(true), + } + resp, err := svc.RebootDBInstance(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_RemoveSourceIdentifierFromSubscription() { + svc := rds.New(session.New()) + + params := &rds.RemoveSourceIdentifierFromSubscriptionInput{ + SourceIdentifier: aws.String("String"), // Required + SubscriptionName: aws.String("String"), // Required + } + resp, err := svc.RemoveSourceIdentifierFromSubscription(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_RemoveTagsFromResource() { + svc := rds.New(session.New()) + + params := &rds.RemoveTagsFromResourceInput{ + ResourceName: aws.String("String"), // Required + TagKeys: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.RemoveTagsFromResource(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_ResetDBClusterParameterGroup() { + svc := rds.New(session.New()) + + params := &rds.ResetDBClusterParameterGroupInput{ + DBClusterParameterGroupName: aws.String("String"), // Required + Parameters: []*rds.Parameter{ + { // Required + AllowedValues: aws.String("String"), + ApplyMethod: aws.String("ApplyMethod"), + ApplyType: aws.String("String"), + DataType: aws.String("String"), + Description: aws.String("String"), + IsModifiable: aws.Bool(true), + MinimumEngineVersion: aws.String("String"), + ParameterName: aws.String("String"), + ParameterValue: aws.String("String"), + Source: aws.String("String"), + }, + // More values... + }, + ResetAllParameters: aws.Bool(true), + } + resp, err := svc.ResetDBClusterParameterGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_ResetDBParameterGroup() { + svc := rds.New(session.New()) + + params := &rds.ResetDBParameterGroupInput{ + DBParameterGroupName: aws.String("String"), // Required + Parameters: []*rds.Parameter{ + { // Required + AllowedValues: aws.String("String"), + ApplyMethod: aws.String("ApplyMethod"), + ApplyType: aws.String("String"), + DataType: aws.String("String"), + Description: aws.String("String"), + IsModifiable: aws.Bool(true), + MinimumEngineVersion: aws.String("String"), + ParameterName: aws.String("String"), + ParameterValue: aws.String("String"), + Source: aws.String("String"), + }, + // More values... + }, + ResetAllParameters: aws.Bool(true), + } + resp, err := svc.ResetDBParameterGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_RestoreDBClusterFromSnapshot() { + svc := rds.New(session.New()) + + params := &rds.RestoreDBClusterFromSnapshotInput{ + DBClusterIdentifier: aws.String("String"), // Required + Engine: aws.String("String"), // Required + SnapshotIdentifier: aws.String("String"), // Required + AvailabilityZones: []*string{ + aws.String("String"), // Required + // More values... + }, + DBSubnetGroupName: aws.String("String"), + DatabaseName: aws.String("String"), + EngineVersion: aws.String("String"), + KmsKeyId: aws.String("String"), + OptionGroupName: aws.String("String"), + Port: aws.Int64(1), + Tags: []*rds.Tag{ + { // Required + Key: aws.String("String"), + Value: aws.String("String"), + }, + // More values... + }, + VpcSecurityGroupIds: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.RestoreDBClusterFromSnapshot(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_RestoreDBClusterToPointInTime() { + svc := rds.New(session.New()) + + params := &rds.RestoreDBClusterToPointInTimeInput{ + DBClusterIdentifier: aws.String("String"), // Required + SourceDBClusterIdentifier: aws.String("String"), // Required + DBSubnetGroupName: aws.String("String"), + KmsKeyId: aws.String("String"), + OptionGroupName: aws.String("String"), + Port: aws.Int64(1), + RestoreToTime: aws.Time(time.Now()), + Tags: []*rds.Tag{ + { // Required + Key: aws.String("String"), + Value: aws.String("String"), + }, + // More values... + }, + UseLatestRestorableTime: aws.Bool(true), + VpcSecurityGroupIds: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.RestoreDBClusterToPointInTime(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_RestoreDBInstanceFromDBSnapshot() { + svc := rds.New(session.New()) + + params := &rds.RestoreDBInstanceFromDBSnapshotInput{ + DBInstanceIdentifier: aws.String("String"), // Required + DBSnapshotIdentifier: aws.String("String"), // Required + AutoMinorVersionUpgrade: aws.Bool(true), + AvailabilityZone: aws.String("String"), + CopyTagsToSnapshot: aws.Bool(true), + DBInstanceClass: aws.String("String"), + DBName: aws.String("String"), + DBSubnetGroupName: aws.String("String"), + Domain: aws.String("String"), + DomainIAMRoleName: aws.String("String"), + Engine: aws.String("String"), + Iops: aws.Int64(1), + LicenseModel: aws.String("String"), + MultiAZ: aws.Bool(true), + OptionGroupName: aws.String("String"), + Port: aws.Int64(1), + PubliclyAccessible: aws.Bool(true), + StorageType: aws.String("String"), + Tags: []*rds.Tag{ + { // Required + Key: aws.String("String"), + Value: aws.String("String"), + }, + // More values... + }, + TdeCredentialArn: aws.String("String"), + TdeCredentialPassword: aws.String("String"), + } + resp, err := svc.RestoreDBInstanceFromDBSnapshot(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_RestoreDBInstanceToPointInTime() { + svc := rds.New(session.New()) + + params := &rds.RestoreDBInstanceToPointInTimeInput{ + SourceDBInstanceIdentifier: aws.String("String"), // Required + TargetDBInstanceIdentifier: aws.String("String"), // Required + AutoMinorVersionUpgrade: aws.Bool(true), + AvailabilityZone: aws.String("String"), + CopyTagsToSnapshot: aws.Bool(true), + DBInstanceClass: aws.String("String"), + DBName: aws.String("String"), + DBSubnetGroupName: aws.String("String"), + Domain: aws.String("String"), + DomainIAMRoleName: aws.String("String"), + Engine: aws.String("String"), + Iops: aws.Int64(1), + LicenseModel: aws.String("String"), + MultiAZ: aws.Bool(true), + OptionGroupName: aws.String("String"), + Port: aws.Int64(1), + PubliclyAccessible: aws.Bool(true), + RestoreTime: aws.Time(time.Now()), + StorageType: aws.String("String"), + Tags: []*rds.Tag{ + { // Required + Key: aws.String("String"), + Value: aws.String("String"), + }, + // More values... + }, + TdeCredentialArn: aws.String("String"), + TdeCredentialPassword: aws.String("String"), + UseLatestRestorableTime: aws.Bool(true), + } + resp, err := svc.RestoreDBInstanceToPointInTime(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_RevokeDBSecurityGroupIngress() { + svc := rds.New(session.New()) + + params := &rds.RevokeDBSecurityGroupIngressInput{ + DBSecurityGroupName: aws.String("String"), // Required + CIDRIP: aws.String("String"), + EC2SecurityGroupId: aws.String("String"), + EC2SecurityGroupName: aws.String("String"), + EC2SecurityGroupOwnerId: aws.String("String"), + } + resp, err := svc.RevokeDBSecurityGroupIngress(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/rds/rdsiface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/rds/rdsiface/interface.go new file mode 100644 index 000000000..c33bc72d2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/rds/rdsiface/interface.go @@ -0,0 +1,372 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package rdsiface provides an interface for the Amazon Relational Database Service. +package rdsiface + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/rds" +) + +// RDSAPI is the interface type for rds.RDS. +type RDSAPI interface { + AddSourceIdentifierToSubscriptionRequest(*rds.AddSourceIdentifierToSubscriptionInput) (*request.Request, *rds.AddSourceIdentifierToSubscriptionOutput) + + AddSourceIdentifierToSubscription(*rds.AddSourceIdentifierToSubscriptionInput) (*rds.AddSourceIdentifierToSubscriptionOutput, error) + + AddTagsToResourceRequest(*rds.AddTagsToResourceInput) (*request.Request, *rds.AddTagsToResourceOutput) + + AddTagsToResource(*rds.AddTagsToResourceInput) (*rds.AddTagsToResourceOutput, error) + + ApplyPendingMaintenanceActionRequest(*rds.ApplyPendingMaintenanceActionInput) (*request.Request, *rds.ApplyPendingMaintenanceActionOutput) + + ApplyPendingMaintenanceAction(*rds.ApplyPendingMaintenanceActionInput) (*rds.ApplyPendingMaintenanceActionOutput, error) + + AuthorizeDBSecurityGroupIngressRequest(*rds.AuthorizeDBSecurityGroupIngressInput) (*request.Request, *rds.AuthorizeDBSecurityGroupIngressOutput) + + AuthorizeDBSecurityGroupIngress(*rds.AuthorizeDBSecurityGroupIngressInput) (*rds.AuthorizeDBSecurityGroupIngressOutput, error) + + CopyDBClusterSnapshotRequest(*rds.CopyDBClusterSnapshotInput) (*request.Request, *rds.CopyDBClusterSnapshotOutput) + + CopyDBClusterSnapshot(*rds.CopyDBClusterSnapshotInput) (*rds.CopyDBClusterSnapshotOutput, error) + + CopyDBParameterGroupRequest(*rds.CopyDBParameterGroupInput) (*request.Request, *rds.CopyDBParameterGroupOutput) + + CopyDBParameterGroup(*rds.CopyDBParameterGroupInput) (*rds.CopyDBParameterGroupOutput, error) + + CopyDBSnapshotRequest(*rds.CopyDBSnapshotInput) (*request.Request, *rds.CopyDBSnapshotOutput) + + CopyDBSnapshot(*rds.CopyDBSnapshotInput) (*rds.CopyDBSnapshotOutput, error) + + CopyOptionGroupRequest(*rds.CopyOptionGroupInput) (*request.Request, *rds.CopyOptionGroupOutput) + + CopyOptionGroup(*rds.CopyOptionGroupInput) (*rds.CopyOptionGroupOutput, error) + + CreateDBClusterRequest(*rds.CreateDBClusterInput) (*request.Request, *rds.CreateDBClusterOutput) + + CreateDBCluster(*rds.CreateDBClusterInput) (*rds.CreateDBClusterOutput, error) + + CreateDBClusterParameterGroupRequest(*rds.CreateDBClusterParameterGroupInput) (*request.Request, *rds.CreateDBClusterParameterGroupOutput) + + CreateDBClusterParameterGroup(*rds.CreateDBClusterParameterGroupInput) (*rds.CreateDBClusterParameterGroupOutput, error) + + CreateDBClusterSnapshotRequest(*rds.CreateDBClusterSnapshotInput) (*request.Request, *rds.CreateDBClusterSnapshotOutput) + + CreateDBClusterSnapshot(*rds.CreateDBClusterSnapshotInput) (*rds.CreateDBClusterSnapshotOutput, error) + + CreateDBInstanceRequest(*rds.CreateDBInstanceInput) (*request.Request, *rds.CreateDBInstanceOutput) + + CreateDBInstance(*rds.CreateDBInstanceInput) (*rds.CreateDBInstanceOutput, error) + + CreateDBInstanceReadReplicaRequest(*rds.CreateDBInstanceReadReplicaInput) (*request.Request, *rds.CreateDBInstanceReadReplicaOutput) + + CreateDBInstanceReadReplica(*rds.CreateDBInstanceReadReplicaInput) (*rds.CreateDBInstanceReadReplicaOutput, error) + + CreateDBParameterGroupRequest(*rds.CreateDBParameterGroupInput) (*request.Request, *rds.CreateDBParameterGroupOutput) + + CreateDBParameterGroup(*rds.CreateDBParameterGroupInput) (*rds.CreateDBParameterGroupOutput, error) + + CreateDBSecurityGroupRequest(*rds.CreateDBSecurityGroupInput) (*request.Request, *rds.CreateDBSecurityGroupOutput) + + CreateDBSecurityGroup(*rds.CreateDBSecurityGroupInput) (*rds.CreateDBSecurityGroupOutput, error) + + CreateDBSnapshotRequest(*rds.CreateDBSnapshotInput) (*request.Request, *rds.CreateDBSnapshotOutput) + + CreateDBSnapshot(*rds.CreateDBSnapshotInput) (*rds.CreateDBSnapshotOutput, error) + + CreateDBSubnetGroupRequest(*rds.CreateDBSubnetGroupInput) (*request.Request, *rds.CreateDBSubnetGroupOutput) + + CreateDBSubnetGroup(*rds.CreateDBSubnetGroupInput) (*rds.CreateDBSubnetGroupOutput, error) + + CreateEventSubscriptionRequest(*rds.CreateEventSubscriptionInput) (*request.Request, *rds.CreateEventSubscriptionOutput) + + CreateEventSubscription(*rds.CreateEventSubscriptionInput) (*rds.CreateEventSubscriptionOutput, error) + + CreateOptionGroupRequest(*rds.CreateOptionGroupInput) (*request.Request, *rds.CreateOptionGroupOutput) + + CreateOptionGroup(*rds.CreateOptionGroupInput) (*rds.CreateOptionGroupOutput, error) + + DeleteDBClusterRequest(*rds.DeleteDBClusterInput) (*request.Request, *rds.DeleteDBClusterOutput) + + DeleteDBCluster(*rds.DeleteDBClusterInput) (*rds.DeleteDBClusterOutput, error) + + DeleteDBClusterParameterGroupRequest(*rds.DeleteDBClusterParameterGroupInput) (*request.Request, *rds.DeleteDBClusterParameterGroupOutput) + + DeleteDBClusterParameterGroup(*rds.DeleteDBClusterParameterGroupInput) (*rds.DeleteDBClusterParameterGroupOutput, error) + + DeleteDBClusterSnapshotRequest(*rds.DeleteDBClusterSnapshotInput) (*request.Request, *rds.DeleteDBClusterSnapshotOutput) + + DeleteDBClusterSnapshot(*rds.DeleteDBClusterSnapshotInput) (*rds.DeleteDBClusterSnapshotOutput, error) + + DeleteDBInstanceRequest(*rds.DeleteDBInstanceInput) (*request.Request, *rds.DeleteDBInstanceOutput) + + DeleteDBInstance(*rds.DeleteDBInstanceInput) (*rds.DeleteDBInstanceOutput, error) + + DeleteDBParameterGroupRequest(*rds.DeleteDBParameterGroupInput) (*request.Request, *rds.DeleteDBParameterGroupOutput) + + DeleteDBParameterGroup(*rds.DeleteDBParameterGroupInput) (*rds.DeleteDBParameterGroupOutput, error) + + DeleteDBSecurityGroupRequest(*rds.DeleteDBSecurityGroupInput) (*request.Request, *rds.DeleteDBSecurityGroupOutput) + + DeleteDBSecurityGroup(*rds.DeleteDBSecurityGroupInput) (*rds.DeleteDBSecurityGroupOutput, error) + + DeleteDBSnapshotRequest(*rds.DeleteDBSnapshotInput) (*request.Request, *rds.DeleteDBSnapshotOutput) + + DeleteDBSnapshot(*rds.DeleteDBSnapshotInput) (*rds.DeleteDBSnapshotOutput, error) + + DeleteDBSubnetGroupRequest(*rds.DeleteDBSubnetGroupInput) (*request.Request, *rds.DeleteDBSubnetGroupOutput) + + DeleteDBSubnetGroup(*rds.DeleteDBSubnetGroupInput) (*rds.DeleteDBSubnetGroupOutput, error) + + DeleteEventSubscriptionRequest(*rds.DeleteEventSubscriptionInput) (*request.Request, *rds.DeleteEventSubscriptionOutput) + + DeleteEventSubscription(*rds.DeleteEventSubscriptionInput) (*rds.DeleteEventSubscriptionOutput, error) + + DeleteOptionGroupRequest(*rds.DeleteOptionGroupInput) (*request.Request, *rds.DeleteOptionGroupOutput) + + DeleteOptionGroup(*rds.DeleteOptionGroupInput) (*rds.DeleteOptionGroupOutput, error) + + DescribeAccountAttributesRequest(*rds.DescribeAccountAttributesInput) (*request.Request, *rds.DescribeAccountAttributesOutput) + + DescribeAccountAttributes(*rds.DescribeAccountAttributesInput) (*rds.DescribeAccountAttributesOutput, error) + + DescribeCertificatesRequest(*rds.DescribeCertificatesInput) (*request.Request, *rds.DescribeCertificatesOutput) + + DescribeCertificates(*rds.DescribeCertificatesInput) (*rds.DescribeCertificatesOutput, error) + + DescribeDBClusterParameterGroupsRequest(*rds.DescribeDBClusterParameterGroupsInput) (*request.Request, *rds.DescribeDBClusterParameterGroupsOutput) + + DescribeDBClusterParameterGroups(*rds.DescribeDBClusterParameterGroupsInput) (*rds.DescribeDBClusterParameterGroupsOutput, error) + + DescribeDBClusterParametersRequest(*rds.DescribeDBClusterParametersInput) (*request.Request, *rds.DescribeDBClusterParametersOutput) + + DescribeDBClusterParameters(*rds.DescribeDBClusterParametersInput) (*rds.DescribeDBClusterParametersOutput, error) + + DescribeDBClusterSnapshotAttributesRequest(*rds.DescribeDBClusterSnapshotAttributesInput) (*request.Request, *rds.DescribeDBClusterSnapshotAttributesOutput) + + DescribeDBClusterSnapshotAttributes(*rds.DescribeDBClusterSnapshotAttributesInput) (*rds.DescribeDBClusterSnapshotAttributesOutput, error) + + DescribeDBClusterSnapshotsRequest(*rds.DescribeDBClusterSnapshotsInput) (*request.Request, *rds.DescribeDBClusterSnapshotsOutput) + + DescribeDBClusterSnapshots(*rds.DescribeDBClusterSnapshotsInput) (*rds.DescribeDBClusterSnapshotsOutput, error) + + DescribeDBClustersRequest(*rds.DescribeDBClustersInput) (*request.Request, *rds.DescribeDBClustersOutput) + + DescribeDBClusters(*rds.DescribeDBClustersInput) (*rds.DescribeDBClustersOutput, error) + + DescribeDBEngineVersionsRequest(*rds.DescribeDBEngineVersionsInput) (*request.Request, *rds.DescribeDBEngineVersionsOutput) + + DescribeDBEngineVersions(*rds.DescribeDBEngineVersionsInput) (*rds.DescribeDBEngineVersionsOutput, error) + + DescribeDBEngineVersionsPages(*rds.DescribeDBEngineVersionsInput, func(*rds.DescribeDBEngineVersionsOutput, bool) bool) error + + DescribeDBInstancesRequest(*rds.DescribeDBInstancesInput) (*request.Request, *rds.DescribeDBInstancesOutput) + + DescribeDBInstances(*rds.DescribeDBInstancesInput) (*rds.DescribeDBInstancesOutput, error) + + DescribeDBInstancesPages(*rds.DescribeDBInstancesInput, func(*rds.DescribeDBInstancesOutput, bool) bool) error + + DescribeDBLogFilesRequest(*rds.DescribeDBLogFilesInput) (*request.Request, *rds.DescribeDBLogFilesOutput) + + DescribeDBLogFiles(*rds.DescribeDBLogFilesInput) (*rds.DescribeDBLogFilesOutput, error) + + DescribeDBLogFilesPages(*rds.DescribeDBLogFilesInput, func(*rds.DescribeDBLogFilesOutput, bool) bool) error + + DescribeDBParameterGroupsRequest(*rds.DescribeDBParameterGroupsInput) (*request.Request, *rds.DescribeDBParameterGroupsOutput) + + DescribeDBParameterGroups(*rds.DescribeDBParameterGroupsInput) (*rds.DescribeDBParameterGroupsOutput, error) + + DescribeDBParameterGroupsPages(*rds.DescribeDBParameterGroupsInput, func(*rds.DescribeDBParameterGroupsOutput, bool) bool) error + + DescribeDBParametersRequest(*rds.DescribeDBParametersInput) (*request.Request, *rds.DescribeDBParametersOutput) + + DescribeDBParameters(*rds.DescribeDBParametersInput) (*rds.DescribeDBParametersOutput, error) + + DescribeDBParametersPages(*rds.DescribeDBParametersInput, func(*rds.DescribeDBParametersOutput, bool) bool) error + + DescribeDBSecurityGroupsRequest(*rds.DescribeDBSecurityGroupsInput) (*request.Request, *rds.DescribeDBSecurityGroupsOutput) + + DescribeDBSecurityGroups(*rds.DescribeDBSecurityGroupsInput) (*rds.DescribeDBSecurityGroupsOutput, error) + + DescribeDBSecurityGroupsPages(*rds.DescribeDBSecurityGroupsInput, func(*rds.DescribeDBSecurityGroupsOutput, bool) bool) error + + DescribeDBSnapshotAttributesRequest(*rds.DescribeDBSnapshotAttributesInput) (*request.Request, *rds.DescribeDBSnapshotAttributesOutput) + + DescribeDBSnapshotAttributes(*rds.DescribeDBSnapshotAttributesInput) (*rds.DescribeDBSnapshotAttributesOutput, error) + + DescribeDBSnapshotsRequest(*rds.DescribeDBSnapshotsInput) (*request.Request, *rds.DescribeDBSnapshotsOutput) + + DescribeDBSnapshots(*rds.DescribeDBSnapshotsInput) (*rds.DescribeDBSnapshotsOutput, error) + + DescribeDBSnapshotsPages(*rds.DescribeDBSnapshotsInput, func(*rds.DescribeDBSnapshotsOutput, bool) bool) error + + DescribeDBSubnetGroupsRequest(*rds.DescribeDBSubnetGroupsInput) (*request.Request, *rds.DescribeDBSubnetGroupsOutput) + + DescribeDBSubnetGroups(*rds.DescribeDBSubnetGroupsInput) (*rds.DescribeDBSubnetGroupsOutput, error) + + DescribeDBSubnetGroupsPages(*rds.DescribeDBSubnetGroupsInput, func(*rds.DescribeDBSubnetGroupsOutput, bool) bool) error + + DescribeEngineDefaultClusterParametersRequest(*rds.DescribeEngineDefaultClusterParametersInput) (*request.Request, *rds.DescribeEngineDefaultClusterParametersOutput) + + DescribeEngineDefaultClusterParameters(*rds.DescribeEngineDefaultClusterParametersInput) (*rds.DescribeEngineDefaultClusterParametersOutput, error) + + DescribeEngineDefaultParametersRequest(*rds.DescribeEngineDefaultParametersInput) (*request.Request, *rds.DescribeEngineDefaultParametersOutput) + + DescribeEngineDefaultParameters(*rds.DescribeEngineDefaultParametersInput) (*rds.DescribeEngineDefaultParametersOutput, error) + + DescribeEngineDefaultParametersPages(*rds.DescribeEngineDefaultParametersInput, func(*rds.DescribeEngineDefaultParametersOutput, bool) bool) error + + DescribeEventCategoriesRequest(*rds.DescribeEventCategoriesInput) (*request.Request, *rds.DescribeEventCategoriesOutput) + + DescribeEventCategories(*rds.DescribeEventCategoriesInput) (*rds.DescribeEventCategoriesOutput, error) + + DescribeEventSubscriptionsRequest(*rds.DescribeEventSubscriptionsInput) (*request.Request, *rds.DescribeEventSubscriptionsOutput) + + DescribeEventSubscriptions(*rds.DescribeEventSubscriptionsInput) (*rds.DescribeEventSubscriptionsOutput, error) + + DescribeEventSubscriptionsPages(*rds.DescribeEventSubscriptionsInput, func(*rds.DescribeEventSubscriptionsOutput, bool) bool) error + + DescribeEventsRequest(*rds.DescribeEventsInput) (*request.Request, *rds.DescribeEventsOutput) + + DescribeEvents(*rds.DescribeEventsInput) (*rds.DescribeEventsOutput, error) + + DescribeEventsPages(*rds.DescribeEventsInput, func(*rds.DescribeEventsOutput, bool) bool) error + + DescribeOptionGroupOptionsRequest(*rds.DescribeOptionGroupOptionsInput) (*request.Request, *rds.DescribeOptionGroupOptionsOutput) + + DescribeOptionGroupOptions(*rds.DescribeOptionGroupOptionsInput) (*rds.DescribeOptionGroupOptionsOutput, error) + + DescribeOptionGroupOptionsPages(*rds.DescribeOptionGroupOptionsInput, func(*rds.DescribeOptionGroupOptionsOutput, bool) bool) error + + DescribeOptionGroupsRequest(*rds.DescribeOptionGroupsInput) (*request.Request, *rds.DescribeOptionGroupsOutput) + + DescribeOptionGroups(*rds.DescribeOptionGroupsInput) (*rds.DescribeOptionGroupsOutput, error) + + DescribeOptionGroupsPages(*rds.DescribeOptionGroupsInput, func(*rds.DescribeOptionGroupsOutput, bool) bool) error + + DescribeOrderableDBInstanceOptionsRequest(*rds.DescribeOrderableDBInstanceOptionsInput) (*request.Request, *rds.DescribeOrderableDBInstanceOptionsOutput) + + DescribeOrderableDBInstanceOptions(*rds.DescribeOrderableDBInstanceOptionsInput) (*rds.DescribeOrderableDBInstanceOptionsOutput, error) + + DescribeOrderableDBInstanceOptionsPages(*rds.DescribeOrderableDBInstanceOptionsInput, func(*rds.DescribeOrderableDBInstanceOptionsOutput, bool) bool) error + + DescribePendingMaintenanceActionsRequest(*rds.DescribePendingMaintenanceActionsInput) (*request.Request, *rds.DescribePendingMaintenanceActionsOutput) + + DescribePendingMaintenanceActions(*rds.DescribePendingMaintenanceActionsInput) (*rds.DescribePendingMaintenanceActionsOutput, error) + + DescribeReservedDBInstancesRequest(*rds.DescribeReservedDBInstancesInput) (*request.Request, *rds.DescribeReservedDBInstancesOutput) + + DescribeReservedDBInstances(*rds.DescribeReservedDBInstancesInput) (*rds.DescribeReservedDBInstancesOutput, error) + + DescribeReservedDBInstancesPages(*rds.DescribeReservedDBInstancesInput, func(*rds.DescribeReservedDBInstancesOutput, bool) bool) error + + DescribeReservedDBInstancesOfferingsRequest(*rds.DescribeReservedDBInstancesOfferingsInput) (*request.Request, *rds.DescribeReservedDBInstancesOfferingsOutput) + + DescribeReservedDBInstancesOfferings(*rds.DescribeReservedDBInstancesOfferingsInput) (*rds.DescribeReservedDBInstancesOfferingsOutput, error) + + DescribeReservedDBInstancesOfferingsPages(*rds.DescribeReservedDBInstancesOfferingsInput, func(*rds.DescribeReservedDBInstancesOfferingsOutput, bool) bool) error + + DownloadDBLogFilePortionRequest(*rds.DownloadDBLogFilePortionInput) (*request.Request, *rds.DownloadDBLogFilePortionOutput) + + DownloadDBLogFilePortion(*rds.DownloadDBLogFilePortionInput) (*rds.DownloadDBLogFilePortionOutput, error) + + DownloadDBLogFilePortionPages(*rds.DownloadDBLogFilePortionInput, func(*rds.DownloadDBLogFilePortionOutput, bool) bool) error + + FailoverDBClusterRequest(*rds.FailoverDBClusterInput) (*request.Request, *rds.FailoverDBClusterOutput) + + FailoverDBCluster(*rds.FailoverDBClusterInput) (*rds.FailoverDBClusterOutput, error) + + ListTagsForResourceRequest(*rds.ListTagsForResourceInput) (*request.Request, *rds.ListTagsForResourceOutput) + + ListTagsForResource(*rds.ListTagsForResourceInput) (*rds.ListTagsForResourceOutput, error) + + ModifyDBClusterRequest(*rds.ModifyDBClusterInput) (*request.Request, *rds.ModifyDBClusterOutput) + + ModifyDBCluster(*rds.ModifyDBClusterInput) (*rds.ModifyDBClusterOutput, error) + + ModifyDBClusterParameterGroupRequest(*rds.ModifyDBClusterParameterGroupInput) (*request.Request, *rds.DBClusterParameterGroupNameMessage) + + ModifyDBClusterParameterGroup(*rds.ModifyDBClusterParameterGroupInput) (*rds.DBClusterParameterGroupNameMessage, error) + + ModifyDBClusterSnapshotAttributeRequest(*rds.ModifyDBClusterSnapshotAttributeInput) (*request.Request, *rds.ModifyDBClusterSnapshotAttributeOutput) + + ModifyDBClusterSnapshotAttribute(*rds.ModifyDBClusterSnapshotAttributeInput) (*rds.ModifyDBClusterSnapshotAttributeOutput, error) + + ModifyDBInstanceRequest(*rds.ModifyDBInstanceInput) (*request.Request, *rds.ModifyDBInstanceOutput) + + ModifyDBInstance(*rds.ModifyDBInstanceInput) (*rds.ModifyDBInstanceOutput, error) + + ModifyDBParameterGroupRequest(*rds.ModifyDBParameterGroupInput) (*request.Request, *rds.DBParameterGroupNameMessage) + + ModifyDBParameterGroup(*rds.ModifyDBParameterGroupInput) (*rds.DBParameterGroupNameMessage, error) + + ModifyDBSnapshotAttributeRequest(*rds.ModifyDBSnapshotAttributeInput) (*request.Request, *rds.ModifyDBSnapshotAttributeOutput) + + ModifyDBSnapshotAttribute(*rds.ModifyDBSnapshotAttributeInput) (*rds.ModifyDBSnapshotAttributeOutput, error) + + ModifyDBSubnetGroupRequest(*rds.ModifyDBSubnetGroupInput) (*request.Request, *rds.ModifyDBSubnetGroupOutput) + + ModifyDBSubnetGroup(*rds.ModifyDBSubnetGroupInput) (*rds.ModifyDBSubnetGroupOutput, error) + + ModifyEventSubscriptionRequest(*rds.ModifyEventSubscriptionInput) (*request.Request, *rds.ModifyEventSubscriptionOutput) + + ModifyEventSubscription(*rds.ModifyEventSubscriptionInput) (*rds.ModifyEventSubscriptionOutput, error) + + ModifyOptionGroupRequest(*rds.ModifyOptionGroupInput) (*request.Request, *rds.ModifyOptionGroupOutput) + + ModifyOptionGroup(*rds.ModifyOptionGroupInput) (*rds.ModifyOptionGroupOutput, error) + + PromoteReadReplicaRequest(*rds.PromoteReadReplicaInput) (*request.Request, *rds.PromoteReadReplicaOutput) + + PromoteReadReplica(*rds.PromoteReadReplicaInput) (*rds.PromoteReadReplicaOutput, error) + + PromoteReadReplicaDBClusterRequest(*rds.PromoteReadReplicaDBClusterInput) (*request.Request, *rds.PromoteReadReplicaDBClusterOutput) + + PromoteReadReplicaDBCluster(*rds.PromoteReadReplicaDBClusterInput) (*rds.PromoteReadReplicaDBClusterOutput, error) + + PurchaseReservedDBInstancesOfferingRequest(*rds.PurchaseReservedDBInstancesOfferingInput) (*request.Request, *rds.PurchaseReservedDBInstancesOfferingOutput) + + PurchaseReservedDBInstancesOffering(*rds.PurchaseReservedDBInstancesOfferingInput) (*rds.PurchaseReservedDBInstancesOfferingOutput, error) + + RebootDBInstanceRequest(*rds.RebootDBInstanceInput) (*request.Request, *rds.RebootDBInstanceOutput) + + RebootDBInstance(*rds.RebootDBInstanceInput) (*rds.RebootDBInstanceOutput, error) + + RemoveSourceIdentifierFromSubscriptionRequest(*rds.RemoveSourceIdentifierFromSubscriptionInput) (*request.Request, *rds.RemoveSourceIdentifierFromSubscriptionOutput) + + RemoveSourceIdentifierFromSubscription(*rds.RemoveSourceIdentifierFromSubscriptionInput) (*rds.RemoveSourceIdentifierFromSubscriptionOutput, error) + + RemoveTagsFromResourceRequest(*rds.RemoveTagsFromResourceInput) (*request.Request, *rds.RemoveTagsFromResourceOutput) + + RemoveTagsFromResource(*rds.RemoveTagsFromResourceInput) (*rds.RemoveTagsFromResourceOutput, error) + + ResetDBClusterParameterGroupRequest(*rds.ResetDBClusterParameterGroupInput) (*request.Request, *rds.DBClusterParameterGroupNameMessage) + + ResetDBClusterParameterGroup(*rds.ResetDBClusterParameterGroupInput) (*rds.DBClusterParameterGroupNameMessage, error) + + ResetDBParameterGroupRequest(*rds.ResetDBParameterGroupInput) (*request.Request, *rds.DBParameterGroupNameMessage) + + ResetDBParameterGroup(*rds.ResetDBParameterGroupInput) (*rds.DBParameterGroupNameMessage, error) + + RestoreDBClusterFromSnapshotRequest(*rds.RestoreDBClusterFromSnapshotInput) (*request.Request, *rds.RestoreDBClusterFromSnapshotOutput) + + RestoreDBClusterFromSnapshot(*rds.RestoreDBClusterFromSnapshotInput) (*rds.RestoreDBClusterFromSnapshotOutput, error) + + RestoreDBClusterToPointInTimeRequest(*rds.RestoreDBClusterToPointInTimeInput) (*request.Request, *rds.RestoreDBClusterToPointInTimeOutput) + + RestoreDBClusterToPointInTime(*rds.RestoreDBClusterToPointInTimeInput) (*rds.RestoreDBClusterToPointInTimeOutput, error) + + RestoreDBInstanceFromDBSnapshotRequest(*rds.RestoreDBInstanceFromDBSnapshotInput) (*request.Request, *rds.RestoreDBInstanceFromDBSnapshotOutput) + + RestoreDBInstanceFromDBSnapshot(*rds.RestoreDBInstanceFromDBSnapshotInput) (*rds.RestoreDBInstanceFromDBSnapshotOutput, error) + + RestoreDBInstanceToPointInTimeRequest(*rds.RestoreDBInstanceToPointInTimeInput) (*request.Request, *rds.RestoreDBInstanceToPointInTimeOutput) + + RestoreDBInstanceToPointInTime(*rds.RestoreDBInstanceToPointInTimeInput) (*rds.RestoreDBInstanceToPointInTimeOutput, error) + + RevokeDBSecurityGroupIngressRequest(*rds.RevokeDBSecurityGroupIngressInput) (*request.Request, *rds.RevokeDBSecurityGroupIngressOutput) + + RevokeDBSecurityGroupIngress(*rds.RevokeDBSecurityGroupIngressInput) (*rds.RevokeDBSecurityGroupIngressOutput, error) +} + +var _ RDSAPI = (*rds.RDS)(nil) diff --git a/vendor/github.com/aws/aws-sdk-go/service/rds/service.go b/vendor/github.com/aws/aws-sdk-go/service/rds/service.go new file mode 100644 index 000000000..1082f992a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/rds/service.go @@ -0,0 +1,127 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package rds + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/private/protocol/query" +) + +// Amazon Relational Database Service (Amazon RDS) is a web service that makes +// it easier to set up, operate, and scale a relational database in the cloud. +// It provides cost-efficient, resizeable capacity for an industry-standard +// relational database and manages common database administration tasks, freeing +// up developers to focus on what makes their applications and businesses unique. +// +// Amazon RDS gives you access to the capabilities of a MySQL, MariaDB, PostgreSQL, +// Microsoft SQL Server, Oracle, or Amazon Aurora database server. These capabilities +// mean that the code, applications, and tools you already use today with your +// existing databases work with Amazon RDS without modification. Amazon RDS +// automatically backs up your database and maintains the database software +// that powers your DB instance. Amazon RDS is flexible: you can scale your +// database instance's compute resources and storage capacity to meet your application's +// demand. As with all Amazon Web Services, there are no up-front investments, +// and you pay only for the resources you use. +// +// This interface reference for Amazon RDS contains documentation for a programming +// or command line interface you can use to manage Amazon RDS. Note that Amazon +// RDS is asynchronous, which means that some interfaces might require techniques +// such as polling or callback functions to determine when a command has been +// applied. In this reference, the parameter descriptions indicate whether a +// command is applied immediately, on the next instance reboot, or during the +// maintenance window. The reference structure is as follows, and we list following +// some related topics from the user guide. +// +// Amazon RDS API Reference +// +// For the alphabetical list of API actions, see API Actions (http://docs.aws.amazon.com/AmazonRDS/latest/APIReference/API_Operations.html). +// +// For the alphabetical list of data types, see Data Types (http://docs.aws.amazon.com/AmazonRDS/latest/APIReference/API_Types.html). +// +// For a list of common query parameters, see Common Parameters (http://docs.aws.amazon.com/AmazonRDS/latest/APIReference/CommonParameters.html). +// +// For descriptions of the error codes, see Common Errors (http://docs.aws.amazon.com/AmazonRDS/latest/APIReference/CommonErrors.html). +// +// Amazon RDS User Guide +// +// For a summary of the Amazon RDS interfaces, see Available RDS Interfaces +// (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Welcome.html#Welcome.Interfaces). +// +// For more information about how to use the Query API, see Using the Query +// API (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Using_the_Query_API.html). +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type RDS struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "rds" + +// New creates a new instance of the RDS client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a RDS client from just a session. +// svc := rds.New(mySession) +// +// // Create a RDS client with additional configuration +// svc := rds.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *RDS { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *RDS { + svc := &RDS{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-10-31", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(query.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(query.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(query.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(query.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a RDS operation and runs any +// custom request initialization. +func (c *RDS) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/rds/waiters.go b/vendor/github.com/aws/aws-sdk-go/service/rds/waiters.go new file mode 100644 index 000000000..b73b51bda --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/rds/waiters.go @@ -0,0 +1,125 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package rds + +import ( + "github.com/aws/aws-sdk-go/private/waiter" +) + +func (c *RDS) WaitUntilDBInstanceAvailable(input *DescribeDBInstancesInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeDBInstances", + Delay: 30, + MaxAttempts: 60, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "pathAll", + Argument: "DBInstances[].DBInstanceStatus", + Expected: "available", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "DBInstances[].DBInstanceStatus", + Expected: "deleted", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "DBInstances[].DBInstanceStatus", + Expected: "deleting", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "DBInstances[].DBInstanceStatus", + Expected: "failed", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "DBInstances[].DBInstanceStatus", + Expected: "incompatible-restore", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "DBInstances[].DBInstanceStatus", + Expected: "incompatible-parameters", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "DBInstances[].DBInstanceStatus", + Expected: "incompatible-parameters", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "DBInstances[].DBInstanceStatus", + Expected: "incompatible-restore", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *RDS) WaitUntilDBInstanceDeleted(input *DescribeDBInstancesInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeDBInstances", + Delay: 30, + MaxAttempts: 60, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "pathAll", + Argument: "DBInstances[].DBInstanceStatus", + Expected: "deleted", + }, + { + State: "success", + Matcher: "error", + Argument: "", + Expected: "DBInstanceNotFound", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "DBInstances[].DBInstanceStatus", + Expected: "creating", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "DBInstances[].DBInstanceStatus", + Expected: "modifying", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "DBInstances[].DBInstanceStatus", + Expected: "rebooting", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "DBInstances[].DBInstanceStatus", + Expected: "resetting-master-credentials", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/redshift/api.go b/vendor/github.com/aws/aws-sdk-go/service/redshift/api.go new file mode 100644 index 000000000..d9d17aefa --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/redshift/api.go @@ -0,0 +1,9829 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package redshift provides a client for Amazon Redshift. +package redshift + +import ( + "time" + + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/query" +) + +const opAuthorizeClusterSecurityGroupIngress = "AuthorizeClusterSecurityGroupIngress" + +// AuthorizeClusterSecurityGroupIngressRequest generates a "aws/request.Request" representing the +// client's request for the AuthorizeClusterSecurityGroupIngress operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the AuthorizeClusterSecurityGroupIngress method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the AuthorizeClusterSecurityGroupIngressRequest method. +// req, resp := client.AuthorizeClusterSecurityGroupIngressRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Redshift) AuthorizeClusterSecurityGroupIngressRequest(input *AuthorizeClusterSecurityGroupIngressInput) (req *request.Request, output *AuthorizeClusterSecurityGroupIngressOutput) { + op := &request.Operation{ + Name: opAuthorizeClusterSecurityGroupIngress, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AuthorizeClusterSecurityGroupIngressInput{} + } + + req = c.newRequest(op, input, output) + output = &AuthorizeClusterSecurityGroupIngressOutput{} + req.Data = output + return +} + +// Adds an inbound (ingress) rule to an Amazon Redshift security group. Depending +// on whether the application accessing your cluster is running on the Internet +// or an Amazon EC2 instance, you can authorize inbound access to either a Classless +// Interdomain Routing (CIDR)/Internet Protocol (IP) range or to an Amazon EC2 +// security group. You can add as many as 20 ingress rules to an Amazon Redshift +// security group. +// +// If you authorize access to an Amazon EC2 security group, specify EC2SecurityGroupName +// and EC2SecurityGroupOwnerId. The Amazon EC2 security group and Amazon Redshift +// cluster must be in the same AWS region. +// +// If you authorize access to a CIDR/IP address range, specify CIDRIP. For +// an overview of CIDR blocks, see the Wikipedia article on Classless Inter-Domain +// Routing (http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing). +// +// You must also associate the security group with a cluster so that clients +// running on these IP addresses or the EC2 instance are authorized to connect +// to the cluster. For information about managing security groups, go to Working +// with Security Groups (http://docs.aws.amazon.com/redshift/latest/mgmt/working-with-security-groups.html) +// in the Amazon Redshift Cluster Management Guide. +func (c *Redshift) AuthorizeClusterSecurityGroupIngress(input *AuthorizeClusterSecurityGroupIngressInput) (*AuthorizeClusterSecurityGroupIngressOutput, error) { + req, out := c.AuthorizeClusterSecurityGroupIngressRequest(input) + err := req.Send() + return out, err +} + +const opAuthorizeSnapshotAccess = "AuthorizeSnapshotAccess" + +// AuthorizeSnapshotAccessRequest generates a "aws/request.Request" representing the +// client's request for the AuthorizeSnapshotAccess operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the AuthorizeSnapshotAccess method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the AuthorizeSnapshotAccessRequest method. +// req, resp := client.AuthorizeSnapshotAccessRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Redshift) AuthorizeSnapshotAccessRequest(input *AuthorizeSnapshotAccessInput) (req *request.Request, output *AuthorizeSnapshotAccessOutput) { + op := &request.Operation{ + Name: opAuthorizeSnapshotAccess, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AuthorizeSnapshotAccessInput{} + } + + req = c.newRequest(op, input, output) + output = &AuthorizeSnapshotAccessOutput{} + req.Data = output + return +} + +// Authorizes the specified AWS customer account to restore the specified snapshot. +// +// For more information about working with snapshots, go to Amazon Redshift +// Snapshots (http://docs.aws.amazon.com/redshift/latest/mgmt/working-with-snapshots.html) +// in the Amazon Redshift Cluster Management Guide. +func (c *Redshift) AuthorizeSnapshotAccess(input *AuthorizeSnapshotAccessInput) (*AuthorizeSnapshotAccessOutput, error) { + req, out := c.AuthorizeSnapshotAccessRequest(input) + err := req.Send() + return out, err +} + +const opCopyClusterSnapshot = "CopyClusterSnapshot" + +// CopyClusterSnapshotRequest generates a "aws/request.Request" representing the +// client's request for the CopyClusterSnapshot operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CopyClusterSnapshot method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CopyClusterSnapshotRequest method. +// req, resp := client.CopyClusterSnapshotRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Redshift) CopyClusterSnapshotRequest(input *CopyClusterSnapshotInput) (req *request.Request, output *CopyClusterSnapshotOutput) { + op := &request.Operation{ + Name: opCopyClusterSnapshot, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CopyClusterSnapshotInput{} + } + + req = c.newRequest(op, input, output) + output = &CopyClusterSnapshotOutput{} + req.Data = output + return +} + +// Copies the specified automated cluster snapshot to a new manual cluster snapshot. +// The source must be an automated snapshot and it must be in the available +// state. +// +// When you delete a cluster, Amazon Redshift deletes any automated snapshots +// of the cluster. Also, when the retention period of the snapshot expires, +// Amazon Redshift automatically deletes it. If you want to keep an automated +// snapshot for a longer period, you can make a manual copy of the snapshot. +// Manual snapshots are retained until you delete them. +// +// For more information about working with snapshots, go to Amazon Redshift +// Snapshots (http://docs.aws.amazon.com/redshift/latest/mgmt/working-with-snapshots.html) +// in the Amazon Redshift Cluster Management Guide. +func (c *Redshift) CopyClusterSnapshot(input *CopyClusterSnapshotInput) (*CopyClusterSnapshotOutput, error) { + req, out := c.CopyClusterSnapshotRequest(input) + err := req.Send() + return out, err +} + +const opCreateCluster = "CreateCluster" + +// CreateClusterRequest generates a "aws/request.Request" representing the +// client's request for the CreateCluster operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateCluster method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateClusterRequest method. +// req, resp := client.CreateClusterRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Redshift) CreateClusterRequest(input *CreateClusterInput) (req *request.Request, output *CreateClusterOutput) { + op := &request.Operation{ + Name: opCreateCluster, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateClusterInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateClusterOutput{} + req.Data = output + return +} + +// Creates a new cluster. To create the cluster in virtual private cloud (VPC), +// you must provide cluster subnet group name. If you don't provide a cluster +// subnet group name or the cluster security group parameter, Amazon Redshift +// creates a non-VPC cluster, it associates the default cluster security group +// with the cluster. For more information about managing clusters, go to Amazon +// Redshift Clusters (http://docs.aws.amazon.com/redshift/latest/mgmt/working-with-clusters.html) +// in the Amazon Redshift Cluster Management Guide . +func (c *Redshift) CreateCluster(input *CreateClusterInput) (*CreateClusterOutput, error) { + req, out := c.CreateClusterRequest(input) + err := req.Send() + return out, err +} + +const opCreateClusterParameterGroup = "CreateClusterParameterGroup" + +// CreateClusterParameterGroupRequest generates a "aws/request.Request" representing the +// client's request for the CreateClusterParameterGroup operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateClusterParameterGroup method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateClusterParameterGroupRequest method. +// req, resp := client.CreateClusterParameterGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Redshift) CreateClusterParameterGroupRequest(input *CreateClusterParameterGroupInput) (req *request.Request, output *CreateClusterParameterGroupOutput) { + op := &request.Operation{ + Name: opCreateClusterParameterGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateClusterParameterGroupInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateClusterParameterGroupOutput{} + req.Data = output + return +} + +// Creates an Amazon Redshift parameter group. +// +// Creating parameter groups is independent of creating clusters. You can associate +// a cluster with a parameter group when you create the cluster. You can also +// associate an existing cluster with a parameter group after the cluster is +// created by using ModifyCluster. +// +// Parameters in the parameter group define specific behavior that applies +// to the databases you create on the cluster. For more information about parameters +// and parameter groups, go to Amazon Redshift Parameter Groups (http://docs.aws.amazon.com/redshift/latest/mgmt/working-with-parameter-groups.html) +// in the Amazon Redshift Cluster Management Guide. +func (c *Redshift) CreateClusterParameterGroup(input *CreateClusterParameterGroupInput) (*CreateClusterParameterGroupOutput, error) { + req, out := c.CreateClusterParameterGroupRequest(input) + err := req.Send() + return out, err +} + +const opCreateClusterSecurityGroup = "CreateClusterSecurityGroup" + +// CreateClusterSecurityGroupRequest generates a "aws/request.Request" representing the +// client's request for the CreateClusterSecurityGroup operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateClusterSecurityGroup method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateClusterSecurityGroupRequest method. +// req, resp := client.CreateClusterSecurityGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Redshift) CreateClusterSecurityGroupRequest(input *CreateClusterSecurityGroupInput) (req *request.Request, output *CreateClusterSecurityGroupOutput) { + op := &request.Operation{ + Name: opCreateClusterSecurityGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateClusterSecurityGroupInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateClusterSecurityGroupOutput{} + req.Data = output + return +} + +// Creates a new Amazon Redshift security group. You use security groups to +// control access to non-VPC clusters. +// +// For information about managing security groups, go to Amazon Redshift Cluster +// Security Groups (http://docs.aws.amazon.com/redshift/latest/mgmt/working-with-security-groups.html) +// in the Amazon Redshift Cluster Management Guide. +func (c *Redshift) CreateClusterSecurityGroup(input *CreateClusterSecurityGroupInput) (*CreateClusterSecurityGroupOutput, error) { + req, out := c.CreateClusterSecurityGroupRequest(input) + err := req.Send() + return out, err +} + +const opCreateClusterSnapshot = "CreateClusterSnapshot" + +// CreateClusterSnapshotRequest generates a "aws/request.Request" representing the +// client's request for the CreateClusterSnapshot operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateClusterSnapshot method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateClusterSnapshotRequest method. +// req, resp := client.CreateClusterSnapshotRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Redshift) CreateClusterSnapshotRequest(input *CreateClusterSnapshotInput) (req *request.Request, output *CreateClusterSnapshotOutput) { + op := &request.Operation{ + Name: opCreateClusterSnapshot, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateClusterSnapshotInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateClusterSnapshotOutput{} + req.Data = output + return +} + +// Creates a manual snapshot of the specified cluster. The cluster must be in +// the available state. +// +// For more information about working with snapshots, go to Amazon Redshift +// Snapshots (http://docs.aws.amazon.com/redshift/latest/mgmt/working-with-snapshots.html) +// in the Amazon Redshift Cluster Management Guide. +func (c *Redshift) CreateClusterSnapshot(input *CreateClusterSnapshotInput) (*CreateClusterSnapshotOutput, error) { + req, out := c.CreateClusterSnapshotRequest(input) + err := req.Send() + return out, err +} + +const opCreateClusterSubnetGroup = "CreateClusterSubnetGroup" + +// CreateClusterSubnetGroupRequest generates a "aws/request.Request" representing the +// client's request for the CreateClusterSubnetGroup operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateClusterSubnetGroup method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateClusterSubnetGroupRequest method. +// req, resp := client.CreateClusterSubnetGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Redshift) CreateClusterSubnetGroupRequest(input *CreateClusterSubnetGroupInput) (req *request.Request, output *CreateClusterSubnetGroupOutput) { + op := &request.Operation{ + Name: opCreateClusterSubnetGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateClusterSubnetGroupInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateClusterSubnetGroupOutput{} + req.Data = output + return +} + +// Creates a new Amazon Redshift subnet group. You must provide a list of one +// or more subnets in your existing Amazon Virtual Private Cloud (Amazon VPC) +// when creating Amazon Redshift subnet group. +// +// For information about subnet groups, go to Amazon Redshift Cluster Subnet +// Groups (http://docs.aws.amazon.com/redshift/latest/mgmt/working-with-cluster-subnet-groups.html) +// in the Amazon Redshift Cluster Management Guide. +func (c *Redshift) CreateClusterSubnetGroup(input *CreateClusterSubnetGroupInput) (*CreateClusterSubnetGroupOutput, error) { + req, out := c.CreateClusterSubnetGroupRequest(input) + err := req.Send() + return out, err +} + +const opCreateEventSubscription = "CreateEventSubscription" + +// CreateEventSubscriptionRequest generates a "aws/request.Request" representing the +// client's request for the CreateEventSubscription operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateEventSubscription method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateEventSubscriptionRequest method. +// req, resp := client.CreateEventSubscriptionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Redshift) CreateEventSubscriptionRequest(input *CreateEventSubscriptionInput) (req *request.Request, output *CreateEventSubscriptionOutput) { + op := &request.Operation{ + Name: opCreateEventSubscription, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateEventSubscriptionInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateEventSubscriptionOutput{} + req.Data = output + return +} + +// Creates an Amazon Redshift event notification subscription. This action requires +// an ARN (Amazon Resource Name) of an Amazon SNS topic created by either the +// Amazon Redshift console, the Amazon SNS console, or the Amazon SNS API. To +// obtain an ARN with Amazon SNS, you must create a topic in Amazon SNS and +// subscribe to the topic. The ARN is displayed in the SNS console. +// +// You can specify the source type, and lists of Amazon Redshift source IDs, +// event categories, and event severities. Notifications will be sent for all +// events you want that match those criteria. For example, you can specify source +// type = cluster, source ID = my-cluster-1 and mycluster2, event categories +// = Availability, Backup, and severity = ERROR. The subscription will only +// send notifications for those ERROR events in the Availability and Backup +// categories for the specified clusters. +// +// If you specify both the source type and source IDs, such as source type +// = cluster and source identifier = my-cluster-1, notifications will be sent +// for all the cluster events for my-cluster-1. If you specify a source type +// but do not specify a source identifier, you will receive notice of the events +// for the objects of that type in your AWS account. If you do not specify either +// the SourceType nor the SourceIdentifier, you will be notified of events generated +// from all Amazon Redshift sources belonging to your AWS account. You must +// specify a source type if you specify a source ID. +func (c *Redshift) CreateEventSubscription(input *CreateEventSubscriptionInput) (*CreateEventSubscriptionOutput, error) { + req, out := c.CreateEventSubscriptionRequest(input) + err := req.Send() + return out, err +} + +const opCreateHsmClientCertificate = "CreateHsmClientCertificate" + +// CreateHsmClientCertificateRequest generates a "aws/request.Request" representing the +// client's request for the CreateHsmClientCertificate operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateHsmClientCertificate method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateHsmClientCertificateRequest method. +// req, resp := client.CreateHsmClientCertificateRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Redshift) CreateHsmClientCertificateRequest(input *CreateHsmClientCertificateInput) (req *request.Request, output *CreateHsmClientCertificateOutput) { + op := &request.Operation{ + Name: opCreateHsmClientCertificate, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateHsmClientCertificateInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateHsmClientCertificateOutput{} + req.Data = output + return +} + +// Creates an HSM client certificate that an Amazon Redshift cluster will use +// to connect to the client's HSM in order to store and retrieve the keys used +// to encrypt the cluster databases. +// +// The command returns a public key, which you must store in the HSM. In addition +// to creating the HSM certificate, you must create an Amazon Redshift HSM configuration +// that provides a cluster the information needed to store and use encryption +// keys in the HSM. For more information, go to Hardware Security Modules (http://docs.aws.amazon.com/redshift/latest/mgmt/working-with-HSM.html) +// in the Amazon Redshift Cluster Management Guide. +func (c *Redshift) CreateHsmClientCertificate(input *CreateHsmClientCertificateInput) (*CreateHsmClientCertificateOutput, error) { + req, out := c.CreateHsmClientCertificateRequest(input) + err := req.Send() + return out, err +} + +const opCreateHsmConfiguration = "CreateHsmConfiguration" + +// CreateHsmConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the CreateHsmConfiguration operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateHsmConfiguration method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateHsmConfigurationRequest method. +// req, resp := client.CreateHsmConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Redshift) CreateHsmConfigurationRequest(input *CreateHsmConfigurationInput) (req *request.Request, output *CreateHsmConfigurationOutput) { + op := &request.Operation{ + Name: opCreateHsmConfiguration, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateHsmConfigurationInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateHsmConfigurationOutput{} + req.Data = output + return +} + +// Creates an HSM configuration that contains the information required by an +// Amazon Redshift cluster to store and use database encryption keys in a Hardware +// Security Module (HSM). After creating the HSM configuration, you can specify +// it as a parameter when creating a cluster. The cluster will then store its +// encryption keys in the HSM. +// +// In addition to creating an HSM configuration, you must also create an HSM +// client certificate. For more information, go to Hardware Security Modules +// (http://docs.aws.amazon.com/redshift/latest/mgmt/working-with-HSM.html) in +// the Amazon Redshift Cluster Management Guide. +func (c *Redshift) CreateHsmConfiguration(input *CreateHsmConfigurationInput) (*CreateHsmConfigurationOutput, error) { + req, out := c.CreateHsmConfigurationRequest(input) + err := req.Send() + return out, err +} + +const opCreateSnapshotCopyGrant = "CreateSnapshotCopyGrant" + +// CreateSnapshotCopyGrantRequest generates a "aws/request.Request" representing the +// client's request for the CreateSnapshotCopyGrant operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateSnapshotCopyGrant method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateSnapshotCopyGrantRequest method. +// req, resp := client.CreateSnapshotCopyGrantRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Redshift) CreateSnapshotCopyGrantRequest(input *CreateSnapshotCopyGrantInput) (req *request.Request, output *CreateSnapshotCopyGrantOutput) { + op := &request.Operation{ + Name: opCreateSnapshotCopyGrant, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateSnapshotCopyGrantInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateSnapshotCopyGrantOutput{} + req.Data = output + return +} + +// Creates a snapshot copy grant that permits Amazon Redshift to use a customer +// master key (CMK) from AWS Key Management Service (AWS KMS) to encrypt copied +// snapshots in a destination region. +// +// For more information about managing snapshot copy grants, go to Amazon +// Redshift Database Encryption (http://docs.aws.amazon.com/redshift/latest/mgmt/working-with-db-encryption.html) +// in the Amazon Redshift Cluster Management Guide. +func (c *Redshift) CreateSnapshotCopyGrant(input *CreateSnapshotCopyGrantInput) (*CreateSnapshotCopyGrantOutput, error) { + req, out := c.CreateSnapshotCopyGrantRequest(input) + err := req.Send() + return out, err +} + +const opCreateTags = "CreateTags" + +// CreateTagsRequest generates a "aws/request.Request" representing the +// client's request for the CreateTags operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateTags method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateTagsRequest method. +// req, resp := client.CreateTagsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Redshift) CreateTagsRequest(input *CreateTagsInput) (req *request.Request, output *CreateTagsOutput) { + op := &request.Operation{ + Name: opCreateTags, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateTagsInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &CreateTagsOutput{} + req.Data = output + return +} + +// Adds one or more tags to a specified resource. +// +// A resource can have up to 10 tags. If you try to create more than 10 tags +// for a resource, you will receive an error and the attempt will fail. +// +// If you specify a key that already exists for the resource, the value for +// that key will be updated with the new value. +func (c *Redshift) CreateTags(input *CreateTagsInput) (*CreateTagsOutput, error) { + req, out := c.CreateTagsRequest(input) + err := req.Send() + return out, err +} + +const opDeleteCluster = "DeleteCluster" + +// DeleteClusterRequest generates a "aws/request.Request" representing the +// client's request for the DeleteCluster operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteCluster method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteClusterRequest method. +// req, resp := client.DeleteClusterRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Redshift) DeleteClusterRequest(input *DeleteClusterInput) (req *request.Request, output *DeleteClusterOutput) { + op := &request.Operation{ + Name: opDeleteCluster, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteClusterInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteClusterOutput{} + req.Data = output + return +} + +// Deletes a previously provisioned cluster. A successful response from the +// web service indicates that the request was received correctly. Use DescribeClusters +// to monitor the status of the deletion. The delete operation cannot be canceled +// or reverted once submitted. For more information about managing clusters, +// go to Amazon Redshift Clusters (http://docs.aws.amazon.com/redshift/latest/mgmt/working-with-clusters.html) +// in the Amazon Redshift Cluster Management Guide . +// +// If you want to shut down the cluster and retain it for future use, set +// SkipFinalClusterSnapshot to false and specify a name for FinalClusterSnapshotIdentifier. +// You can later restore this snapshot to resume using the cluster. If a final +// cluster snapshot is requested, the status of the cluster will be "final-snapshot" +// while the snapshot is being taken, then it's "deleting" once Amazon Redshift +// begins deleting the cluster. +// +// For more information about managing clusters, go to Amazon Redshift Clusters +// (http://docs.aws.amazon.com/redshift/latest/mgmt/working-with-clusters.html) +// in the Amazon Redshift Cluster Management Guide . +func (c *Redshift) DeleteCluster(input *DeleteClusterInput) (*DeleteClusterOutput, error) { + req, out := c.DeleteClusterRequest(input) + err := req.Send() + return out, err +} + +const opDeleteClusterParameterGroup = "DeleteClusterParameterGroup" + +// DeleteClusterParameterGroupRequest generates a "aws/request.Request" representing the +// client's request for the DeleteClusterParameterGroup operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteClusterParameterGroup method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteClusterParameterGroupRequest method. +// req, resp := client.DeleteClusterParameterGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Redshift) DeleteClusterParameterGroupRequest(input *DeleteClusterParameterGroupInput) (req *request.Request, output *DeleteClusterParameterGroupOutput) { + op := &request.Operation{ + Name: opDeleteClusterParameterGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteClusterParameterGroupInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteClusterParameterGroupOutput{} + req.Data = output + return +} + +// Deletes a specified Amazon Redshift parameter group. You cannot delete a +// parameter group if it is associated with a cluster. +func (c *Redshift) DeleteClusterParameterGroup(input *DeleteClusterParameterGroupInput) (*DeleteClusterParameterGroupOutput, error) { + req, out := c.DeleteClusterParameterGroupRequest(input) + err := req.Send() + return out, err +} + +const opDeleteClusterSecurityGroup = "DeleteClusterSecurityGroup" + +// DeleteClusterSecurityGroupRequest generates a "aws/request.Request" representing the +// client's request for the DeleteClusterSecurityGroup operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteClusterSecurityGroup method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteClusterSecurityGroupRequest method. +// req, resp := client.DeleteClusterSecurityGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Redshift) DeleteClusterSecurityGroupRequest(input *DeleteClusterSecurityGroupInput) (req *request.Request, output *DeleteClusterSecurityGroupOutput) { + op := &request.Operation{ + Name: opDeleteClusterSecurityGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteClusterSecurityGroupInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteClusterSecurityGroupOutput{} + req.Data = output + return +} + +// Deletes an Amazon Redshift security group. +// +// You cannot delete a security group that is associated with any clusters. +// You cannot delete the default security group. For information about managing +// security groups, go to Amazon Redshift Cluster Security Groups (http://docs.aws.amazon.com/redshift/latest/mgmt/working-with-security-groups.html) +// in the Amazon Redshift Cluster Management Guide. +func (c *Redshift) DeleteClusterSecurityGroup(input *DeleteClusterSecurityGroupInput) (*DeleteClusterSecurityGroupOutput, error) { + req, out := c.DeleteClusterSecurityGroupRequest(input) + err := req.Send() + return out, err +} + +const opDeleteClusterSnapshot = "DeleteClusterSnapshot" + +// DeleteClusterSnapshotRequest generates a "aws/request.Request" representing the +// client's request for the DeleteClusterSnapshot operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteClusterSnapshot method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteClusterSnapshotRequest method. +// req, resp := client.DeleteClusterSnapshotRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Redshift) DeleteClusterSnapshotRequest(input *DeleteClusterSnapshotInput) (req *request.Request, output *DeleteClusterSnapshotOutput) { + op := &request.Operation{ + Name: opDeleteClusterSnapshot, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteClusterSnapshotInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteClusterSnapshotOutput{} + req.Data = output + return +} + +// Deletes the specified manual snapshot. The snapshot must be in the available +// state, with no other users authorized to access the snapshot. +// +// Unlike automated snapshots, manual snapshots are retained even after you +// delete your cluster. Amazon Redshift does not delete your manual snapshots. +// You must delete manual snapshot explicitly to avoid getting charged. If other +// accounts are authorized to access the snapshot, you must revoke all of the +// authorizations before you can delete the snapshot. +func (c *Redshift) DeleteClusterSnapshot(input *DeleteClusterSnapshotInput) (*DeleteClusterSnapshotOutput, error) { + req, out := c.DeleteClusterSnapshotRequest(input) + err := req.Send() + return out, err +} + +const opDeleteClusterSubnetGroup = "DeleteClusterSubnetGroup" + +// DeleteClusterSubnetGroupRequest generates a "aws/request.Request" representing the +// client's request for the DeleteClusterSubnetGroup operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteClusterSubnetGroup method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteClusterSubnetGroupRequest method. +// req, resp := client.DeleteClusterSubnetGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Redshift) DeleteClusterSubnetGroupRequest(input *DeleteClusterSubnetGroupInput) (req *request.Request, output *DeleteClusterSubnetGroupOutput) { + op := &request.Operation{ + Name: opDeleteClusterSubnetGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteClusterSubnetGroupInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteClusterSubnetGroupOutput{} + req.Data = output + return +} + +// Deletes the specified cluster subnet group. +func (c *Redshift) DeleteClusterSubnetGroup(input *DeleteClusterSubnetGroupInput) (*DeleteClusterSubnetGroupOutput, error) { + req, out := c.DeleteClusterSubnetGroupRequest(input) + err := req.Send() + return out, err +} + +const opDeleteEventSubscription = "DeleteEventSubscription" + +// DeleteEventSubscriptionRequest generates a "aws/request.Request" representing the +// client's request for the DeleteEventSubscription operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteEventSubscription method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteEventSubscriptionRequest method. +// req, resp := client.DeleteEventSubscriptionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Redshift) DeleteEventSubscriptionRequest(input *DeleteEventSubscriptionInput) (req *request.Request, output *DeleteEventSubscriptionOutput) { + op := &request.Operation{ + Name: opDeleteEventSubscription, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteEventSubscriptionInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteEventSubscriptionOutput{} + req.Data = output + return +} + +// Deletes an Amazon Redshift event notification subscription. +func (c *Redshift) DeleteEventSubscription(input *DeleteEventSubscriptionInput) (*DeleteEventSubscriptionOutput, error) { + req, out := c.DeleteEventSubscriptionRequest(input) + err := req.Send() + return out, err +} + +const opDeleteHsmClientCertificate = "DeleteHsmClientCertificate" + +// DeleteHsmClientCertificateRequest generates a "aws/request.Request" representing the +// client's request for the DeleteHsmClientCertificate operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteHsmClientCertificate method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteHsmClientCertificateRequest method. +// req, resp := client.DeleteHsmClientCertificateRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Redshift) DeleteHsmClientCertificateRequest(input *DeleteHsmClientCertificateInput) (req *request.Request, output *DeleteHsmClientCertificateOutput) { + op := &request.Operation{ + Name: opDeleteHsmClientCertificate, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteHsmClientCertificateInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteHsmClientCertificateOutput{} + req.Data = output + return +} + +// Deletes the specified HSM client certificate. +func (c *Redshift) DeleteHsmClientCertificate(input *DeleteHsmClientCertificateInput) (*DeleteHsmClientCertificateOutput, error) { + req, out := c.DeleteHsmClientCertificateRequest(input) + err := req.Send() + return out, err +} + +const opDeleteHsmConfiguration = "DeleteHsmConfiguration" + +// DeleteHsmConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the DeleteHsmConfiguration operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteHsmConfiguration method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteHsmConfigurationRequest method. +// req, resp := client.DeleteHsmConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Redshift) DeleteHsmConfigurationRequest(input *DeleteHsmConfigurationInput) (req *request.Request, output *DeleteHsmConfigurationOutput) { + op := &request.Operation{ + Name: opDeleteHsmConfiguration, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteHsmConfigurationInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteHsmConfigurationOutput{} + req.Data = output + return +} + +// Deletes the specified Amazon Redshift HSM configuration. +func (c *Redshift) DeleteHsmConfiguration(input *DeleteHsmConfigurationInput) (*DeleteHsmConfigurationOutput, error) { + req, out := c.DeleteHsmConfigurationRequest(input) + err := req.Send() + return out, err +} + +const opDeleteSnapshotCopyGrant = "DeleteSnapshotCopyGrant" + +// DeleteSnapshotCopyGrantRequest generates a "aws/request.Request" representing the +// client's request for the DeleteSnapshotCopyGrant operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteSnapshotCopyGrant method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteSnapshotCopyGrantRequest method. +// req, resp := client.DeleteSnapshotCopyGrantRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Redshift) DeleteSnapshotCopyGrantRequest(input *DeleteSnapshotCopyGrantInput) (req *request.Request, output *DeleteSnapshotCopyGrantOutput) { + op := &request.Operation{ + Name: opDeleteSnapshotCopyGrant, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteSnapshotCopyGrantInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteSnapshotCopyGrantOutput{} + req.Data = output + return +} + +// Deletes the specified snapshot copy grant. +func (c *Redshift) DeleteSnapshotCopyGrant(input *DeleteSnapshotCopyGrantInput) (*DeleteSnapshotCopyGrantOutput, error) { + req, out := c.DeleteSnapshotCopyGrantRequest(input) + err := req.Send() + return out, err +} + +const opDeleteTags = "DeleteTags" + +// DeleteTagsRequest generates a "aws/request.Request" representing the +// client's request for the DeleteTags operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteTags method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteTagsRequest method. +// req, resp := client.DeleteTagsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Redshift) DeleteTagsRequest(input *DeleteTagsInput) (req *request.Request, output *DeleteTagsOutput) { + op := &request.Operation{ + Name: opDeleteTags, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteTagsInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteTagsOutput{} + req.Data = output + return +} + +// Deletes a tag or tags from a resource. You must provide the ARN of the resource +// from which you want to delete the tag or tags. +func (c *Redshift) DeleteTags(input *DeleteTagsInput) (*DeleteTagsOutput, error) { + req, out := c.DeleteTagsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeClusterParameterGroups = "DescribeClusterParameterGroups" + +// DescribeClusterParameterGroupsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeClusterParameterGroups operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeClusterParameterGroups method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeClusterParameterGroupsRequest method. +// req, resp := client.DescribeClusterParameterGroupsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Redshift) DescribeClusterParameterGroupsRequest(input *DescribeClusterParameterGroupsInput) (req *request.Request, output *DescribeClusterParameterGroupsOutput) { + op := &request.Operation{ + Name: opDescribeClusterParameterGroups, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeClusterParameterGroupsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeClusterParameterGroupsOutput{} + req.Data = output + return +} + +// Returns a list of Amazon Redshift parameter groups, including parameter groups +// you created and the default parameter group. For each parameter group, the +// response includes the parameter group name, description, and parameter group +// family name. You can optionally specify a name to retrieve the description +// of a specific parameter group. +// +// For more information about parameters and parameter groups, go to Amazon +// Redshift Parameter Groups (http://docs.aws.amazon.com/redshift/latest/mgmt/working-with-parameter-groups.html) +// in the Amazon Redshift Cluster Management Guide. +// +// If you specify both tag keys and tag values in the same request, Amazon +// Redshift returns all parameter groups that match any combination of the specified +// keys and values. For example, if you have owner and environment for tag keys, +// and admin and test for tag values, all parameter groups that have any combination +// of those values are returned. +// +// If both tag keys and values are omitted from the request, parameter groups +// are returned regardless of whether they have tag keys or values associated +// with them. +func (c *Redshift) DescribeClusterParameterGroups(input *DescribeClusterParameterGroupsInput) (*DescribeClusterParameterGroupsOutput, error) { + req, out := c.DescribeClusterParameterGroupsRequest(input) + err := req.Send() + return out, err +} + +// DescribeClusterParameterGroupsPages iterates over the pages of a DescribeClusterParameterGroups operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeClusterParameterGroups method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeClusterParameterGroups operation. +// pageNum := 0 +// err := client.DescribeClusterParameterGroupsPages(params, +// func(page *DescribeClusterParameterGroupsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *Redshift) DescribeClusterParameterGroupsPages(input *DescribeClusterParameterGroupsInput, fn func(p *DescribeClusterParameterGroupsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeClusterParameterGroupsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeClusterParameterGroupsOutput), lastPage) + }) +} + +const opDescribeClusterParameters = "DescribeClusterParameters" + +// DescribeClusterParametersRequest generates a "aws/request.Request" representing the +// client's request for the DescribeClusterParameters operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeClusterParameters method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeClusterParametersRequest method. +// req, resp := client.DescribeClusterParametersRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Redshift) DescribeClusterParametersRequest(input *DescribeClusterParametersInput) (req *request.Request, output *DescribeClusterParametersOutput) { + op := &request.Operation{ + Name: opDescribeClusterParameters, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeClusterParametersInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeClusterParametersOutput{} + req.Data = output + return +} + +// Returns a detailed list of parameters contained within the specified Amazon +// Redshift parameter group. For each parameter the response includes information +// such as parameter name, description, data type, value, whether the parameter +// value is modifiable, and so on. +// +// You can specify source filter to retrieve parameters of only specific type. +// For example, to retrieve parameters that were modified by a user action such +// as from ModifyClusterParameterGroup, you can specify source equal to user. +// +// For more information about parameters and parameter groups, go to Amazon +// Redshift Parameter Groups (http://docs.aws.amazon.com/redshift/latest/mgmt/working-with-parameter-groups.html) +// in the Amazon Redshift Cluster Management Guide. +func (c *Redshift) DescribeClusterParameters(input *DescribeClusterParametersInput) (*DescribeClusterParametersOutput, error) { + req, out := c.DescribeClusterParametersRequest(input) + err := req.Send() + return out, err +} + +// DescribeClusterParametersPages iterates over the pages of a DescribeClusterParameters operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeClusterParameters method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeClusterParameters operation. +// pageNum := 0 +// err := client.DescribeClusterParametersPages(params, +// func(page *DescribeClusterParametersOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *Redshift) DescribeClusterParametersPages(input *DescribeClusterParametersInput, fn func(p *DescribeClusterParametersOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeClusterParametersRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeClusterParametersOutput), lastPage) + }) +} + +const opDescribeClusterSecurityGroups = "DescribeClusterSecurityGroups" + +// DescribeClusterSecurityGroupsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeClusterSecurityGroups operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeClusterSecurityGroups method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeClusterSecurityGroupsRequest method. +// req, resp := client.DescribeClusterSecurityGroupsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Redshift) DescribeClusterSecurityGroupsRequest(input *DescribeClusterSecurityGroupsInput) (req *request.Request, output *DescribeClusterSecurityGroupsOutput) { + op := &request.Operation{ + Name: opDescribeClusterSecurityGroups, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeClusterSecurityGroupsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeClusterSecurityGroupsOutput{} + req.Data = output + return +} + +// Returns information about Amazon Redshift security groups. If the name of +// a security group is specified, the response will contain only information +// about only that security group. +// +// For information about managing security groups, go to Amazon Redshift Cluster +// Security Groups (http://docs.aws.amazon.com/redshift/latest/mgmt/working-with-security-groups.html) +// in the Amazon Redshift Cluster Management Guide. +// +// If you specify both tag keys and tag values in the same request, Amazon +// Redshift returns all security groups that match any combination of the specified +// keys and values. For example, if you have owner and environment for tag keys, +// and admin and test for tag values, all security groups that have any combination +// of those values are returned. +// +// If both tag keys and values are omitted from the request, security groups +// are returned regardless of whether they have tag keys or values associated +// with them. +func (c *Redshift) DescribeClusterSecurityGroups(input *DescribeClusterSecurityGroupsInput) (*DescribeClusterSecurityGroupsOutput, error) { + req, out := c.DescribeClusterSecurityGroupsRequest(input) + err := req.Send() + return out, err +} + +// DescribeClusterSecurityGroupsPages iterates over the pages of a DescribeClusterSecurityGroups operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeClusterSecurityGroups method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeClusterSecurityGroups operation. +// pageNum := 0 +// err := client.DescribeClusterSecurityGroupsPages(params, +// func(page *DescribeClusterSecurityGroupsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *Redshift) DescribeClusterSecurityGroupsPages(input *DescribeClusterSecurityGroupsInput, fn func(p *DescribeClusterSecurityGroupsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeClusterSecurityGroupsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeClusterSecurityGroupsOutput), lastPage) + }) +} + +const opDescribeClusterSnapshots = "DescribeClusterSnapshots" + +// DescribeClusterSnapshotsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeClusterSnapshots operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeClusterSnapshots method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeClusterSnapshotsRequest method. +// req, resp := client.DescribeClusterSnapshotsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Redshift) DescribeClusterSnapshotsRequest(input *DescribeClusterSnapshotsInput) (req *request.Request, output *DescribeClusterSnapshotsOutput) { + op := &request.Operation{ + Name: opDescribeClusterSnapshots, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeClusterSnapshotsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeClusterSnapshotsOutput{} + req.Data = output + return +} + +// Returns one or more snapshot objects, which contain metadata about your cluster +// snapshots. By default, this operation returns information about all snapshots +// of all clusters that are owned by you AWS customer account. No information +// is returned for snapshots owned by inactive AWS customer accounts. +// +// If you specify both tag keys and tag values in the same request, Amazon +// Redshift returns all snapshots that match any combination of the specified +// keys and values. For example, if you have owner and environment for tag keys, +// and admin and test for tag values, all snapshots that have any combination +// of those values are returned. Only snapshots that you own are returned in +// the response; shared snapshots are not returned with the tag key and tag +// value request parameters. +// +// If both tag keys and values are omitted from the request, snapshots are +// returned regardless of whether they have tag keys or values associated with +// them. +func (c *Redshift) DescribeClusterSnapshots(input *DescribeClusterSnapshotsInput) (*DescribeClusterSnapshotsOutput, error) { + req, out := c.DescribeClusterSnapshotsRequest(input) + err := req.Send() + return out, err +} + +// DescribeClusterSnapshotsPages iterates over the pages of a DescribeClusterSnapshots operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeClusterSnapshots method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeClusterSnapshots operation. +// pageNum := 0 +// err := client.DescribeClusterSnapshotsPages(params, +// func(page *DescribeClusterSnapshotsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *Redshift) DescribeClusterSnapshotsPages(input *DescribeClusterSnapshotsInput, fn func(p *DescribeClusterSnapshotsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeClusterSnapshotsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeClusterSnapshotsOutput), lastPage) + }) +} + +const opDescribeClusterSubnetGroups = "DescribeClusterSubnetGroups" + +// DescribeClusterSubnetGroupsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeClusterSubnetGroups operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeClusterSubnetGroups method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeClusterSubnetGroupsRequest method. +// req, resp := client.DescribeClusterSubnetGroupsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Redshift) DescribeClusterSubnetGroupsRequest(input *DescribeClusterSubnetGroupsInput) (req *request.Request, output *DescribeClusterSubnetGroupsOutput) { + op := &request.Operation{ + Name: opDescribeClusterSubnetGroups, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeClusterSubnetGroupsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeClusterSubnetGroupsOutput{} + req.Data = output + return +} + +// Returns one or more cluster subnet group objects, which contain metadata +// about your cluster subnet groups. By default, this operation returns information +// about all cluster subnet groups that are defined in you AWS account. +// +// If you specify both tag keys and tag values in the same request, Amazon +// Redshift returns all subnet groups that match any combination of the specified +// keys and values. For example, if you have owner and environment for tag keys, +// and admin and test for tag values, all subnet groups that have any combination +// of those values are returned. +// +// If both tag keys and values are omitted from the request, subnet groups +// are returned regardless of whether they have tag keys or values associated +// with them. +func (c *Redshift) DescribeClusterSubnetGroups(input *DescribeClusterSubnetGroupsInput) (*DescribeClusterSubnetGroupsOutput, error) { + req, out := c.DescribeClusterSubnetGroupsRequest(input) + err := req.Send() + return out, err +} + +// DescribeClusterSubnetGroupsPages iterates over the pages of a DescribeClusterSubnetGroups operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeClusterSubnetGroups method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeClusterSubnetGroups operation. +// pageNum := 0 +// err := client.DescribeClusterSubnetGroupsPages(params, +// func(page *DescribeClusterSubnetGroupsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *Redshift) DescribeClusterSubnetGroupsPages(input *DescribeClusterSubnetGroupsInput, fn func(p *DescribeClusterSubnetGroupsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeClusterSubnetGroupsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeClusterSubnetGroupsOutput), lastPage) + }) +} + +const opDescribeClusterVersions = "DescribeClusterVersions" + +// DescribeClusterVersionsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeClusterVersions operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeClusterVersions method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeClusterVersionsRequest method. +// req, resp := client.DescribeClusterVersionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Redshift) DescribeClusterVersionsRequest(input *DescribeClusterVersionsInput) (req *request.Request, output *DescribeClusterVersionsOutput) { + op := &request.Operation{ + Name: opDescribeClusterVersions, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeClusterVersionsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeClusterVersionsOutput{} + req.Data = output + return +} + +// Returns descriptions of the available Amazon Redshift cluster versions. You +// can call this operation even before creating any clusters to learn more about +// the Amazon Redshift versions. For more information about managing clusters, +// go to Amazon Redshift Clusters (http://docs.aws.amazon.com/redshift/latest/mgmt/working-with-clusters.html) +// in the Amazon Redshift Cluster Management Guide +func (c *Redshift) DescribeClusterVersions(input *DescribeClusterVersionsInput) (*DescribeClusterVersionsOutput, error) { + req, out := c.DescribeClusterVersionsRequest(input) + err := req.Send() + return out, err +} + +// DescribeClusterVersionsPages iterates over the pages of a DescribeClusterVersions operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeClusterVersions method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeClusterVersions operation. +// pageNum := 0 +// err := client.DescribeClusterVersionsPages(params, +// func(page *DescribeClusterVersionsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *Redshift) DescribeClusterVersionsPages(input *DescribeClusterVersionsInput, fn func(p *DescribeClusterVersionsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeClusterVersionsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeClusterVersionsOutput), lastPage) + }) +} + +const opDescribeClusters = "DescribeClusters" + +// DescribeClustersRequest generates a "aws/request.Request" representing the +// client's request for the DescribeClusters operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeClusters method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeClustersRequest method. +// req, resp := client.DescribeClustersRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Redshift) DescribeClustersRequest(input *DescribeClustersInput) (req *request.Request, output *DescribeClustersOutput) { + op := &request.Operation{ + Name: opDescribeClusters, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeClustersInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeClustersOutput{} + req.Data = output + return +} + +// Returns properties of provisioned clusters including general cluster properties, +// cluster database properties, maintenance and backup properties, and security +// and access properties. This operation supports pagination. For more information +// about managing clusters, go to Amazon Redshift Clusters (http://docs.aws.amazon.com/redshift/latest/mgmt/working-with-clusters.html) +// in the Amazon Redshift Cluster Management Guide . +// +// If you specify both tag keys and tag values in the same request, Amazon +// Redshift returns all clusters that match any combination of the specified +// keys and values. For example, if you have owner and environment for tag keys, +// and admin and test for tag values, all clusters that have any combination +// of those values are returned. +// +// If both tag keys and values are omitted from the request, clusters are returned +// regardless of whether they have tag keys or values associated with them. +func (c *Redshift) DescribeClusters(input *DescribeClustersInput) (*DescribeClustersOutput, error) { + req, out := c.DescribeClustersRequest(input) + err := req.Send() + return out, err +} + +// DescribeClustersPages iterates over the pages of a DescribeClusters operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeClusters method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeClusters operation. +// pageNum := 0 +// err := client.DescribeClustersPages(params, +// func(page *DescribeClustersOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *Redshift) DescribeClustersPages(input *DescribeClustersInput, fn func(p *DescribeClustersOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeClustersRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeClustersOutput), lastPage) + }) +} + +const opDescribeDefaultClusterParameters = "DescribeDefaultClusterParameters" + +// DescribeDefaultClusterParametersRequest generates a "aws/request.Request" representing the +// client's request for the DescribeDefaultClusterParameters operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeDefaultClusterParameters method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeDefaultClusterParametersRequest method. +// req, resp := client.DescribeDefaultClusterParametersRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Redshift) DescribeDefaultClusterParametersRequest(input *DescribeDefaultClusterParametersInput) (req *request.Request, output *DescribeDefaultClusterParametersOutput) { + op := &request.Operation{ + Name: opDescribeDefaultClusterParameters, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"DefaultClusterParameters.Marker"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeDefaultClusterParametersInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeDefaultClusterParametersOutput{} + req.Data = output + return +} + +// Returns a list of parameter settings for the specified parameter group family. +// +// For more information about parameters and parameter groups, go to Amazon +// Redshift Parameter Groups (http://docs.aws.amazon.com/redshift/latest/mgmt/working-with-parameter-groups.html) +// in the Amazon Redshift Cluster Management Guide. +func (c *Redshift) DescribeDefaultClusterParameters(input *DescribeDefaultClusterParametersInput) (*DescribeDefaultClusterParametersOutput, error) { + req, out := c.DescribeDefaultClusterParametersRequest(input) + err := req.Send() + return out, err +} + +// DescribeDefaultClusterParametersPages iterates over the pages of a DescribeDefaultClusterParameters operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeDefaultClusterParameters method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeDefaultClusterParameters operation. +// pageNum := 0 +// err := client.DescribeDefaultClusterParametersPages(params, +// func(page *DescribeDefaultClusterParametersOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *Redshift) DescribeDefaultClusterParametersPages(input *DescribeDefaultClusterParametersInput, fn func(p *DescribeDefaultClusterParametersOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeDefaultClusterParametersRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeDefaultClusterParametersOutput), lastPage) + }) +} + +const opDescribeEventCategories = "DescribeEventCategories" + +// DescribeEventCategoriesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeEventCategories operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeEventCategories method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeEventCategoriesRequest method. +// req, resp := client.DescribeEventCategoriesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Redshift) DescribeEventCategoriesRequest(input *DescribeEventCategoriesInput) (req *request.Request, output *DescribeEventCategoriesOutput) { + op := &request.Operation{ + Name: opDescribeEventCategories, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeEventCategoriesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeEventCategoriesOutput{} + req.Data = output + return +} + +// Displays a list of event categories for all event source types, or for a +// specified source type. For a list of the event categories and source types, +// go to Amazon Redshift Event Notifications (http://docs.aws.amazon.com/redshift/latest/mgmt/working-with-event-notifications.html). +func (c *Redshift) DescribeEventCategories(input *DescribeEventCategoriesInput) (*DescribeEventCategoriesOutput, error) { + req, out := c.DescribeEventCategoriesRequest(input) + err := req.Send() + return out, err +} + +const opDescribeEventSubscriptions = "DescribeEventSubscriptions" + +// DescribeEventSubscriptionsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeEventSubscriptions operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeEventSubscriptions method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeEventSubscriptionsRequest method. +// req, resp := client.DescribeEventSubscriptionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Redshift) DescribeEventSubscriptionsRequest(input *DescribeEventSubscriptionsInput) (req *request.Request, output *DescribeEventSubscriptionsOutput) { + op := &request.Operation{ + Name: opDescribeEventSubscriptions, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeEventSubscriptionsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeEventSubscriptionsOutput{} + req.Data = output + return +} + +// Lists descriptions of all the Amazon Redshift event notifications subscription +// for a customer account. If you specify a subscription name, lists the description +// for that subscription. +func (c *Redshift) DescribeEventSubscriptions(input *DescribeEventSubscriptionsInput) (*DescribeEventSubscriptionsOutput, error) { + req, out := c.DescribeEventSubscriptionsRequest(input) + err := req.Send() + return out, err +} + +// DescribeEventSubscriptionsPages iterates over the pages of a DescribeEventSubscriptions operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeEventSubscriptions method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeEventSubscriptions operation. +// pageNum := 0 +// err := client.DescribeEventSubscriptionsPages(params, +// func(page *DescribeEventSubscriptionsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *Redshift) DescribeEventSubscriptionsPages(input *DescribeEventSubscriptionsInput, fn func(p *DescribeEventSubscriptionsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeEventSubscriptionsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeEventSubscriptionsOutput), lastPage) + }) +} + +const opDescribeEvents = "DescribeEvents" + +// DescribeEventsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeEvents operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeEvents method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeEventsRequest method. +// req, resp := client.DescribeEventsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Redshift) DescribeEventsRequest(input *DescribeEventsInput) (req *request.Request, output *DescribeEventsOutput) { + op := &request.Operation{ + Name: opDescribeEvents, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeEventsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeEventsOutput{} + req.Data = output + return +} + +// Returns events related to clusters, security groups, snapshots, and parameter +// groups for the past 14 days. Events specific to a particular cluster, security +// group, snapshot or parameter group can be obtained by providing the name +// as a parameter. By default, the past hour of events are returned. +func (c *Redshift) DescribeEvents(input *DescribeEventsInput) (*DescribeEventsOutput, error) { + req, out := c.DescribeEventsRequest(input) + err := req.Send() + return out, err +} + +// DescribeEventsPages iterates over the pages of a DescribeEvents operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeEvents method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeEvents operation. +// pageNum := 0 +// err := client.DescribeEventsPages(params, +// func(page *DescribeEventsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *Redshift) DescribeEventsPages(input *DescribeEventsInput, fn func(p *DescribeEventsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeEventsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeEventsOutput), lastPage) + }) +} + +const opDescribeHsmClientCertificates = "DescribeHsmClientCertificates" + +// DescribeHsmClientCertificatesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeHsmClientCertificates operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeHsmClientCertificates method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeHsmClientCertificatesRequest method. +// req, resp := client.DescribeHsmClientCertificatesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Redshift) DescribeHsmClientCertificatesRequest(input *DescribeHsmClientCertificatesInput) (req *request.Request, output *DescribeHsmClientCertificatesOutput) { + op := &request.Operation{ + Name: opDescribeHsmClientCertificates, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeHsmClientCertificatesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeHsmClientCertificatesOutput{} + req.Data = output + return +} + +// Returns information about the specified HSM client certificate. If no certificate +// ID is specified, returns information about all the HSM certificates owned +// by your AWS customer account. +// +// If you specify both tag keys and tag values in the same request, Amazon +// Redshift returns all HSM client certificates that match any combination of +// the specified keys and values. For example, if you have owner and environment +// for tag keys, and admin and test for tag values, all HSM client certificates +// that have any combination of those values are returned. +// +// If both tag keys and values are omitted from the request, HSM client certificates +// are returned regardless of whether they have tag keys or values associated +// with them. +func (c *Redshift) DescribeHsmClientCertificates(input *DescribeHsmClientCertificatesInput) (*DescribeHsmClientCertificatesOutput, error) { + req, out := c.DescribeHsmClientCertificatesRequest(input) + err := req.Send() + return out, err +} + +// DescribeHsmClientCertificatesPages iterates over the pages of a DescribeHsmClientCertificates operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeHsmClientCertificates method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeHsmClientCertificates operation. +// pageNum := 0 +// err := client.DescribeHsmClientCertificatesPages(params, +// func(page *DescribeHsmClientCertificatesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *Redshift) DescribeHsmClientCertificatesPages(input *DescribeHsmClientCertificatesInput, fn func(p *DescribeHsmClientCertificatesOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeHsmClientCertificatesRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeHsmClientCertificatesOutput), lastPage) + }) +} + +const opDescribeHsmConfigurations = "DescribeHsmConfigurations" + +// DescribeHsmConfigurationsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeHsmConfigurations operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeHsmConfigurations method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeHsmConfigurationsRequest method. +// req, resp := client.DescribeHsmConfigurationsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Redshift) DescribeHsmConfigurationsRequest(input *DescribeHsmConfigurationsInput) (req *request.Request, output *DescribeHsmConfigurationsOutput) { + op := &request.Operation{ + Name: opDescribeHsmConfigurations, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeHsmConfigurationsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeHsmConfigurationsOutput{} + req.Data = output + return +} + +// Returns information about the specified Amazon Redshift HSM configuration. +// If no configuration ID is specified, returns information about all the HSM +// configurations owned by your AWS customer account. +// +// If you specify both tag keys and tag values in the same request, Amazon +// Redshift returns all HSM connections that match any combination of the specified +// keys and values. For example, if you have owner and environment for tag keys, +// and admin and test for tag values, all HSM connections that have any combination +// of those values are returned. +// +// If both tag keys and values are omitted from the request, HSM connections +// are returned regardless of whether they have tag keys or values associated +// with them. +func (c *Redshift) DescribeHsmConfigurations(input *DescribeHsmConfigurationsInput) (*DescribeHsmConfigurationsOutput, error) { + req, out := c.DescribeHsmConfigurationsRequest(input) + err := req.Send() + return out, err +} + +// DescribeHsmConfigurationsPages iterates over the pages of a DescribeHsmConfigurations operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeHsmConfigurations method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeHsmConfigurations operation. +// pageNum := 0 +// err := client.DescribeHsmConfigurationsPages(params, +// func(page *DescribeHsmConfigurationsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *Redshift) DescribeHsmConfigurationsPages(input *DescribeHsmConfigurationsInput, fn func(p *DescribeHsmConfigurationsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeHsmConfigurationsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeHsmConfigurationsOutput), lastPage) + }) +} + +const opDescribeLoggingStatus = "DescribeLoggingStatus" + +// DescribeLoggingStatusRequest generates a "aws/request.Request" representing the +// client's request for the DescribeLoggingStatus operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeLoggingStatus method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeLoggingStatusRequest method. +// req, resp := client.DescribeLoggingStatusRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Redshift) DescribeLoggingStatusRequest(input *DescribeLoggingStatusInput) (req *request.Request, output *LoggingStatus) { + op := &request.Operation{ + Name: opDescribeLoggingStatus, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeLoggingStatusInput{} + } + + req = c.newRequest(op, input, output) + output = &LoggingStatus{} + req.Data = output + return +} + +// Describes whether information, such as queries and connection attempts, is +// being logged for the specified Amazon Redshift cluster. +func (c *Redshift) DescribeLoggingStatus(input *DescribeLoggingStatusInput) (*LoggingStatus, error) { + req, out := c.DescribeLoggingStatusRequest(input) + err := req.Send() + return out, err +} + +const opDescribeOrderableClusterOptions = "DescribeOrderableClusterOptions" + +// DescribeOrderableClusterOptionsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeOrderableClusterOptions operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeOrderableClusterOptions method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeOrderableClusterOptionsRequest method. +// req, resp := client.DescribeOrderableClusterOptionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Redshift) DescribeOrderableClusterOptionsRequest(input *DescribeOrderableClusterOptionsInput) (req *request.Request, output *DescribeOrderableClusterOptionsOutput) { + op := &request.Operation{ + Name: opDescribeOrderableClusterOptions, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeOrderableClusterOptionsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeOrderableClusterOptionsOutput{} + req.Data = output + return +} + +// Returns a list of orderable cluster options. Before you create a new cluster +// you can use this operation to find what options are available, such as the +// EC2 Availability Zones (AZ) in the specific AWS region that you can specify, +// and the node types you can request. The node types differ by available storage, +// memory, CPU and price. With the cost involved you might want to obtain a +// list of cluster options in the specific region and specify values when creating +// a cluster. For more information about managing clusters, go to Amazon Redshift +// Clusters (http://docs.aws.amazon.com/redshift/latest/mgmt/working-with-clusters.html) +// in the Amazon Redshift Cluster Management Guide +func (c *Redshift) DescribeOrderableClusterOptions(input *DescribeOrderableClusterOptionsInput) (*DescribeOrderableClusterOptionsOutput, error) { + req, out := c.DescribeOrderableClusterOptionsRequest(input) + err := req.Send() + return out, err +} + +// DescribeOrderableClusterOptionsPages iterates over the pages of a DescribeOrderableClusterOptions operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeOrderableClusterOptions method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeOrderableClusterOptions operation. +// pageNum := 0 +// err := client.DescribeOrderableClusterOptionsPages(params, +// func(page *DescribeOrderableClusterOptionsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *Redshift) DescribeOrderableClusterOptionsPages(input *DescribeOrderableClusterOptionsInput, fn func(p *DescribeOrderableClusterOptionsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeOrderableClusterOptionsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeOrderableClusterOptionsOutput), lastPage) + }) +} + +const opDescribeReservedNodeOfferings = "DescribeReservedNodeOfferings" + +// DescribeReservedNodeOfferingsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeReservedNodeOfferings operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeReservedNodeOfferings method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeReservedNodeOfferingsRequest method. +// req, resp := client.DescribeReservedNodeOfferingsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Redshift) DescribeReservedNodeOfferingsRequest(input *DescribeReservedNodeOfferingsInput) (req *request.Request, output *DescribeReservedNodeOfferingsOutput) { + op := &request.Operation{ + Name: opDescribeReservedNodeOfferings, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeReservedNodeOfferingsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeReservedNodeOfferingsOutput{} + req.Data = output + return +} + +// Returns a list of the available reserved node offerings by Amazon Redshift +// with their descriptions including the node type, the fixed and recurring +// costs of reserving the node and duration the node will be reserved for you. +// These descriptions help you determine which reserve node offering you want +// to purchase. You then use the unique offering ID in you call to PurchaseReservedNodeOffering +// to reserve one or more nodes for your Amazon Redshift cluster. +// +// For more information about reserved node offerings, go to Purchasing Reserved +// Nodes (http://docs.aws.amazon.com/redshift/latest/mgmt/purchase-reserved-node-instance.html) +// in the Amazon Redshift Cluster Management Guide. +func (c *Redshift) DescribeReservedNodeOfferings(input *DescribeReservedNodeOfferingsInput) (*DescribeReservedNodeOfferingsOutput, error) { + req, out := c.DescribeReservedNodeOfferingsRequest(input) + err := req.Send() + return out, err +} + +// DescribeReservedNodeOfferingsPages iterates over the pages of a DescribeReservedNodeOfferings operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeReservedNodeOfferings method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeReservedNodeOfferings operation. +// pageNum := 0 +// err := client.DescribeReservedNodeOfferingsPages(params, +// func(page *DescribeReservedNodeOfferingsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *Redshift) DescribeReservedNodeOfferingsPages(input *DescribeReservedNodeOfferingsInput, fn func(p *DescribeReservedNodeOfferingsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeReservedNodeOfferingsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeReservedNodeOfferingsOutput), lastPage) + }) +} + +const opDescribeReservedNodes = "DescribeReservedNodes" + +// DescribeReservedNodesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeReservedNodes operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeReservedNodes method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeReservedNodesRequest method. +// req, resp := client.DescribeReservedNodesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Redshift) DescribeReservedNodesRequest(input *DescribeReservedNodesInput) (req *request.Request, output *DescribeReservedNodesOutput) { + op := &request.Operation{ + Name: opDescribeReservedNodes, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeReservedNodesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeReservedNodesOutput{} + req.Data = output + return +} + +// Returns the descriptions of the reserved nodes. +func (c *Redshift) DescribeReservedNodes(input *DescribeReservedNodesInput) (*DescribeReservedNodesOutput, error) { + req, out := c.DescribeReservedNodesRequest(input) + err := req.Send() + return out, err +} + +// DescribeReservedNodesPages iterates over the pages of a DescribeReservedNodes operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeReservedNodes method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeReservedNodes operation. +// pageNum := 0 +// err := client.DescribeReservedNodesPages(params, +// func(page *DescribeReservedNodesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *Redshift) DescribeReservedNodesPages(input *DescribeReservedNodesInput, fn func(p *DescribeReservedNodesOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeReservedNodesRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeReservedNodesOutput), lastPage) + }) +} + +const opDescribeResize = "DescribeResize" + +// DescribeResizeRequest generates a "aws/request.Request" representing the +// client's request for the DescribeResize operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeResize method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeResizeRequest method. +// req, resp := client.DescribeResizeRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Redshift) DescribeResizeRequest(input *DescribeResizeInput) (req *request.Request, output *DescribeResizeOutput) { + op := &request.Operation{ + Name: opDescribeResize, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeResizeInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeResizeOutput{} + req.Data = output + return +} + +// Returns information about the last resize operation for the specified cluster. +// If no resize operation has ever been initiated for the specified cluster, +// a HTTP 404 error is returned. If a resize operation was initiated and completed, +// the status of the resize remains as SUCCEEDED until the next resize. +// +// A resize operation can be requested using ModifyCluster and specifying +// a different number or type of nodes for the cluster. +func (c *Redshift) DescribeResize(input *DescribeResizeInput) (*DescribeResizeOutput, error) { + req, out := c.DescribeResizeRequest(input) + err := req.Send() + return out, err +} + +const opDescribeSnapshotCopyGrants = "DescribeSnapshotCopyGrants" + +// DescribeSnapshotCopyGrantsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeSnapshotCopyGrants operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeSnapshotCopyGrants method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeSnapshotCopyGrantsRequest method. +// req, resp := client.DescribeSnapshotCopyGrantsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Redshift) DescribeSnapshotCopyGrantsRequest(input *DescribeSnapshotCopyGrantsInput) (req *request.Request, output *DescribeSnapshotCopyGrantsOutput) { + op := &request.Operation{ + Name: opDescribeSnapshotCopyGrants, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeSnapshotCopyGrantsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeSnapshotCopyGrantsOutput{} + req.Data = output + return +} + +// Returns a list of snapshot copy grants owned by the AWS account in the destination +// region. +// +// For more information about managing snapshot copy grants, go to Amazon +// Redshift Database Encryption (http://docs.aws.amazon.com/redshift/latest/mgmt/working-with-db-encryption.html) +// in the Amazon Redshift Cluster Management Guide. +func (c *Redshift) DescribeSnapshotCopyGrants(input *DescribeSnapshotCopyGrantsInput) (*DescribeSnapshotCopyGrantsOutput, error) { + req, out := c.DescribeSnapshotCopyGrantsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeTableRestoreStatus = "DescribeTableRestoreStatus" + +// DescribeTableRestoreStatusRequest generates a "aws/request.Request" representing the +// client's request for the DescribeTableRestoreStatus operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeTableRestoreStatus method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeTableRestoreStatusRequest method. +// req, resp := client.DescribeTableRestoreStatusRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Redshift) DescribeTableRestoreStatusRequest(input *DescribeTableRestoreStatusInput) (req *request.Request, output *DescribeTableRestoreStatusOutput) { + op := &request.Operation{ + Name: opDescribeTableRestoreStatus, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeTableRestoreStatusInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeTableRestoreStatusOutput{} + req.Data = output + return +} + +// Lists the status of one or more table restore requests made using the RestoreTableFromClusterSnapshot +// API action. If you don't specify a value for the TableRestoreRequestId parameter, +// then DescribeTableRestoreStatus returns the status of all table restore requests +// ordered by the date and time of the request in ascending order. Otherwise +// DescribeTableRestoreStatus returns the status of the table specified by TableRestoreRequestId. +func (c *Redshift) DescribeTableRestoreStatus(input *DescribeTableRestoreStatusInput) (*DescribeTableRestoreStatusOutput, error) { + req, out := c.DescribeTableRestoreStatusRequest(input) + err := req.Send() + return out, err +} + +const opDescribeTags = "DescribeTags" + +// DescribeTagsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeTags operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeTags method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeTagsRequest method. +// req, resp := client.DescribeTagsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Redshift) DescribeTagsRequest(input *DescribeTagsInput) (req *request.Request, output *DescribeTagsOutput) { + op := &request.Operation{ + Name: opDescribeTags, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeTagsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeTagsOutput{} + req.Data = output + return +} + +// Returns a list of tags. You can return tags from a specific resource by specifying +// an ARN, or you can return all tags for a given type of resource, such as +// clusters, snapshots, and so on. +// +// The following are limitations for DescribeTags: You cannot specify an +// ARN and a resource-type value together in the same request. You cannot use +// the MaxRecords and Marker parameters together with the ARN parameter. The +// MaxRecords parameter can be a range from 10 to 50 results to return in a +// request. +// +// If you specify both tag keys and tag values in the same request, Amazon +// Redshift returns all resources that match any combination of the specified +// keys and values. For example, if you have owner and environment for tag keys, +// and admin and test for tag values, all resources that have any combination +// of those values are returned. +// +// If both tag keys and values are omitted from the request, resources are +// returned regardless of whether they have tag keys or values associated with +// them. +func (c *Redshift) DescribeTags(input *DescribeTagsInput) (*DescribeTagsOutput, error) { + req, out := c.DescribeTagsRequest(input) + err := req.Send() + return out, err +} + +const opDisableLogging = "DisableLogging" + +// DisableLoggingRequest generates a "aws/request.Request" representing the +// client's request for the DisableLogging operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DisableLogging method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DisableLoggingRequest method. +// req, resp := client.DisableLoggingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Redshift) DisableLoggingRequest(input *DisableLoggingInput) (req *request.Request, output *LoggingStatus) { + op := &request.Operation{ + Name: opDisableLogging, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DisableLoggingInput{} + } + + req = c.newRequest(op, input, output) + output = &LoggingStatus{} + req.Data = output + return +} + +// Stops logging information, such as queries and connection attempts, for the +// specified Amazon Redshift cluster. +func (c *Redshift) DisableLogging(input *DisableLoggingInput) (*LoggingStatus, error) { + req, out := c.DisableLoggingRequest(input) + err := req.Send() + return out, err +} + +const opDisableSnapshotCopy = "DisableSnapshotCopy" + +// DisableSnapshotCopyRequest generates a "aws/request.Request" representing the +// client's request for the DisableSnapshotCopy operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DisableSnapshotCopy method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DisableSnapshotCopyRequest method. +// req, resp := client.DisableSnapshotCopyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Redshift) DisableSnapshotCopyRequest(input *DisableSnapshotCopyInput) (req *request.Request, output *DisableSnapshotCopyOutput) { + op := &request.Operation{ + Name: opDisableSnapshotCopy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DisableSnapshotCopyInput{} + } + + req = c.newRequest(op, input, output) + output = &DisableSnapshotCopyOutput{} + req.Data = output + return +} + +// Disables the automatic copying of snapshots from one region to another region +// for a specified cluster. +// +// If your cluster and its snapshots are encrypted using a customer master +// key (CMK) from AWS KMS, use DeleteSnapshotCopyGrant to delete the grant that +// grants Amazon Redshift permission to the CMK in the destination region. +func (c *Redshift) DisableSnapshotCopy(input *DisableSnapshotCopyInput) (*DisableSnapshotCopyOutput, error) { + req, out := c.DisableSnapshotCopyRequest(input) + err := req.Send() + return out, err +} + +const opEnableLogging = "EnableLogging" + +// EnableLoggingRequest generates a "aws/request.Request" representing the +// client's request for the EnableLogging operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the EnableLogging method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the EnableLoggingRequest method. +// req, resp := client.EnableLoggingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Redshift) EnableLoggingRequest(input *EnableLoggingInput) (req *request.Request, output *LoggingStatus) { + op := &request.Operation{ + Name: opEnableLogging, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &EnableLoggingInput{} + } + + req = c.newRequest(op, input, output) + output = &LoggingStatus{} + req.Data = output + return +} + +// Starts logging information, such as queries and connection attempts, for +// the specified Amazon Redshift cluster. +func (c *Redshift) EnableLogging(input *EnableLoggingInput) (*LoggingStatus, error) { + req, out := c.EnableLoggingRequest(input) + err := req.Send() + return out, err +} + +const opEnableSnapshotCopy = "EnableSnapshotCopy" + +// EnableSnapshotCopyRequest generates a "aws/request.Request" representing the +// client's request for the EnableSnapshotCopy operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the EnableSnapshotCopy method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the EnableSnapshotCopyRequest method. +// req, resp := client.EnableSnapshotCopyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Redshift) EnableSnapshotCopyRequest(input *EnableSnapshotCopyInput) (req *request.Request, output *EnableSnapshotCopyOutput) { + op := &request.Operation{ + Name: opEnableSnapshotCopy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &EnableSnapshotCopyInput{} + } + + req = c.newRequest(op, input, output) + output = &EnableSnapshotCopyOutput{} + req.Data = output + return +} + +// Enables the automatic copy of snapshots from one region to another region +// for a specified cluster. +func (c *Redshift) EnableSnapshotCopy(input *EnableSnapshotCopyInput) (*EnableSnapshotCopyOutput, error) { + req, out := c.EnableSnapshotCopyRequest(input) + err := req.Send() + return out, err +} + +const opModifyCluster = "ModifyCluster" + +// ModifyClusterRequest generates a "aws/request.Request" representing the +// client's request for the ModifyCluster operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ModifyCluster method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ModifyClusterRequest method. +// req, resp := client.ModifyClusterRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Redshift) ModifyClusterRequest(input *ModifyClusterInput) (req *request.Request, output *ModifyClusterOutput) { + op := &request.Operation{ + Name: opModifyCluster, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyClusterInput{} + } + + req = c.newRequest(op, input, output) + output = &ModifyClusterOutput{} + req.Data = output + return +} + +// Modifies the settings for a cluster. For example, you can add another security +// or parameter group, update the preferred maintenance window, or change the +// master user password. Resetting a cluster password or modifying the security +// groups associated with a cluster do not need a reboot. However, modifying +// a parameter group requires a reboot for parameters to take effect. For more +// information about managing clusters, go to Amazon Redshift Clusters (http://docs.aws.amazon.com/redshift/latest/mgmt/working-with-clusters.html) +// in the Amazon Redshift Cluster Management Guide . +// +// You can also change node type and the number of nodes to scale up or down +// the cluster. When resizing a cluster, you must specify both the number of +// nodes and the node type even if one of the parameters does not change. +func (c *Redshift) ModifyCluster(input *ModifyClusterInput) (*ModifyClusterOutput, error) { + req, out := c.ModifyClusterRequest(input) + err := req.Send() + return out, err +} + +const opModifyClusterIamRoles = "ModifyClusterIamRoles" + +// ModifyClusterIamRolesRequest generates a "aws/request.Request" representing the +// client's request for the ModifyClusterIamRoles operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ModifyClusterIamRoles method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ModifyClusterIamRolesRequest method. +// req, resp := client.ModifyClusterIamRolesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Redshift) ModifyClusterIamRolesRequest(input *ModifyClusterIamRolesInput) (req *request.Request, output *ModifyClusterIamRolesOutput) { + op := &request.Operation{ + Name: opModifyClusterIamRoles, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyClusterIamRolesInput{} + } + + req = c.newRequest(op, input, output) + output = &ModifyClusterIamRolesOutput{} + req.Data = output + return +} + +// Modifies the list of AWS Identity and Access Management (IAM) roles that +// can be used by the cluster to access other AWS services. +// +// A cluster can have up to 10 IAM roles associated at any time. +func (c *Redshift) ModifyClusterIamRoles(input *ModifyClusterIamRolesInput) (*ModifyClusterIamRolesOutput, error) { + req, out := c.ModifyClusterIamRolesRequest(input) + err := req.Send() + return out, err +} + +const opModifyClusterParameterGroup = "ModifyClusterParameterGroup" + +// ModifyClusterParameterGroupRequest generates a "aws/request.Request" representing the +// client's request for the ModifyClusterParameterGroup operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ModifyClusterParameterGroup method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ModifyClusterParameterGroupRequest method. +// req, resp := client.ModifyClusterParameterGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Redshift) ModifyClusterParameterGroupRequest(input *ModifyClusterParameterGroupInput) (req *request.Request, output *ClusterParameterGroupNameMessage) { + op := &request.Operation{ + Name: opModifyClusterParameterGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyClusterParameterGroupInput{} + } + + req = c.newRequest(op, input, output) + output = &ClusterParameterGroupNameMessage{} + req.Data = output + return +} + +// Modifies the parameters of a parameter group. +// +// For more information about parameters and parameter groups, go to Amazon +// Redshift Parameter Groups (http://docs.aws.amazon.com/redshift/latest/mgmt/working-with-parameter-groups.html) +// in the Amazon Redshift Cluster Management Guide. +func (c *Redshift) ModifyClusterParameterGroup(input *ModifyClusterParameterGroupInput) (*ClusterParameterGroupNameMessage, error) { + req, out := c.ModifyClusterParameterGroupRequest(input) + err := req.Send() + return out, err +} + +const opModifyClusterSubnetGroup = "ModifyClusterSubnetGroup" + +// ModifyClusterSubnetGroupRequest generates a "aws/request.Request" representing the +// client's request for the ModifyClusterSubnetGroup operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ModifyClusterSubnetGroup method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ModifyClusterSubnetGroupRequest method. +// req, resp := client.ModifyClusterSubnetGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Redshift) ModifyClusterSubnetGroupRequest(input *ModifyClusterSubnetGroupInput) (req *request.Request, output *ModifyClusterSubnetGroupOutput) { + op := &request.Operation{ + Name: opModifyClusterSubnetGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyClusterSubnetGroupInput{} + } + + req = c.newRequest(op, input, output) + output = &ModifyClusterSubnetGroupOutput{} + req.Data = output + return +} + +// Modifies a cluster subnet group to include the specified list of VPC subnets. +// The operation replaces the existing list of subnets with the new list of +// subnets. +func (c *Redshift) ModifyClusterSubnetGroup(input *ModifyClusterSubnetGroupInput) (*ModifyClusterSubnetGroupOutput, error) { + req, out := c.ModifyClusterSubnetGroupRequest(input) + err := req.Send() + return out, err +} + +const opModifyEventSubscription = "ModifyEventSubscription" + +// ModifyEventSubscriptionRequest generates a "aws/request.Request" representing the +// client's request for the ModifyEventSubscription operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ModifyEventSubscription method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ModifyEventSubscriptionRequest method. +// req, resp := client.ModifyEventSubscriptionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Redshift) ModifyEventSubscriptionRequest(input *ModifyEventSubscriptionInput) (req *request.Request, output *ModifyEventSubscriptionOutput) { + op := &request.Operation{ + Name: opModifyEventSubscription, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyEventSubscriptionInput{} + } + + req = c.newRequest(op, input, output) + output = &ModifyEventSubscriptionOutput{} + req.Data = output + return +} + +// Modifies an existing Amazon Redshift event notification subscription. +func (c *Redshift) ModifyEventSubscription(input *ModifyEventSubscriptionInput) (*ModifyEventSubscriptionOutput, error) { + req, out := c.ModifyEventSubscriptionRequest(input) + err := req.Send() + return out, err +} + +const opModifySnapshotCopyRetentionPeriod = "ModifySnapshotCopyRetentionPeriod" + +// ModifySnapshotCopyRetentionPeriodRequest generates a "aws/request.Request" representing the +// client's request for the ModifySnapshotCopyRetentionPeriod operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ModifySnapshotCopyRetentionPeriod method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ModifySnapshotCopyRetentionPeriodRequest method. +// req, resp := client.ModifySnapshotCopyRetentionPeriodRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Redshift) ModifySnapshotCopyRetentionPeriodRequest(input *ModifySnapshotCopyRetentionPeriodInput) (req *request.Request, output *ModifySnapshotCopyRetentionPeriodOutput) { + op := &request.Operation{ + Name: opModifySnapshotCopyRetentionPeriod, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifySnapshotCopyRetentionPeriodInput{} + } + + req = c.newRequest(op, input, output) + output = &ModifySnapshotCopyRetentionPeriodOutput{} + req.Data = output + return +} + +// Modifies the number of days to retain automated snapshots in the destination +// region after they are copied from the source region. +func (c *Redshift) ModifySnapshotCopyRetentionPeriod(input *ModifySnapshotCopyRetentionPeriodInput) (*ModifySnapshotCopyRetentionPeriodOutput, error) { + req, out := c.ModifySnapshotCopyRetentionPeriodRequest(input) + err := req.Send() + return out, err +} + +const opPurchaseReservedNodeOffering = "PurchaseReservedNodeOffering" + +// PurchaseReservedNodeOfferingRequest generates a "aws/request.Request" representing the +// client's request for the PurchaseReservedNodeOffering operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PurchaseReservedNodeOffering method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PurchaseReservedNodeOfferingRequest method. +// req, resp := client.PurchaseReservedNodeOfferingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Redshift) PurchaseReservedNodeOfferingRequest(input *PurchaseReservedNodeOfferingInput) (req *request.Request, output *PurchaseReservedNodeOfferingOutput) { + op := &request.Operation{ + Name: opPurchaseReservedNodeOffering, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PurchaseReservedNodeOfferingInput{} + } + + req = c.newRequest(op, input, output) + output = &PurchaseReservedNodeOfferingOutput{} + req.Data = output + return +} + +// Allows you to purchase reserved nodes. Amazon Redshift offers a predefined +// set of reserved node offerings. You can purchase one or more of the offerings. +// You can call the DescribeReservedNodeOfferings API to obtain the available +// reserved node offerings. You can call this API by providing a specific reserved +// node offering and the number of nodes you want to reserve. +// +// For more information about reserved node offerings, go to Purchasing Reserved +// Nodes (http://docs.aws.amazon.com/redshift/latest/mgmt/purchase-reserved-node-instance.html) +// in the Amazon Redshift Cluster Management Guide. +func (c *Redshift) PurchaseReservedNodeOffering(input *PurchaseReservedNodeOfferingInput) (*PurchaseReservedNodeOfferingOutput, error) { + req, out := c.PurchaseReservedNodeOfferingRequest(input) + err := req.Send() + return out, err +} + +const opRebootCluster = "RebootCluster" + +// RebootClusterRequest generates a "aws/request.Request" representing the +// client's request for the RebootCluster operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the RebootCluster method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the RebootClusterRequest method. +// req, resp := client.RebootClusterRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Redshift) RebootClusterRequest(input *RebootClusterInput) (req *request.Request, output *RebootClusterOutput) { + op := &request.Operation{ + Name: opRebootCluster, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RebootClusterInput{} + } + + req = c.newRequest(op, input, output) + output = &RebootClusterOutput{} + req.Data = output + return +} + +// Reboots a cluster. This action is taken as soon as possible. It results in +// a momentary outage to the cluster, during which the cluster status is set +// to rebooting. A cluster event is created when the reboot is completed. Any +// pending cluster modifications (see ModifyCluster) are applied at this reboot. +// For more information about managing clusters, go to Amazon Redshift Clusters +// (http://docs.aws.amazon.com/redshift/latest/mgmt/working-with-clusters.html) +// in the Amazon Redshift Cluster Management Guide +func (c *Redshift) RebootCluster(input *RebootClusterInput) (*RebootClusterOutput, error) { + req, out := c.RebootClusterRequest(input) + err := req.Send() + return out, err +} + +const opResetClusterParameterGroup = "ResetClusterParameterGroup" + +// ResetClusterParameterGroupRequest generates a "aws/request.Request" representing the +// client's request for the ResetClusterParameterGroup operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ResetClusterParameterGroup method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ResetClusterParameterGroupRequest method. +// req, resp := client.ResetClusterParameterGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Redshift) ResetClusterParameterGroupRequest(input *ResetClusterParameterGroupInput) (req *request.Request, output *ClusterParameterGroupNameMessage) { + op := &request.Operation{ + Name: opResetClusterParameterGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ResetClusterParameterGroupInput{} + } + + req = c.newRequest(op, input, output) + output = &ClusterParameterGroupNameMessage{} + req.Data = output + return +} + +// Sets one or more parameters of the specified parameter group to their default +// values and sets the source values of the parameters to "engine-default". +// To reset the entire parameter group specify the ResetAllParameters parameter. +// For parameter changes to take effect you must reboot any associated clusters. +func (c *Redshift) ResetClusterParameterGroup(input *ResetClusterParameterGroupInput) (*ClusterParameterGroupNameMessage, error) { + req, out := c.ResetClusterParameterGroupRequest(input) + err := req.Send() + return out, err +} + +const opRestoreFromClusterSnapshot = "RestoreFromClusterSnapshot" + +// RestoreFromClusterSnapshotRequest generates a "aws/request.Request" representing the +// client's request for the RestoreFromClusterSnapshot operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the RestoreFromClusterSnapshot method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the RestoreFromClusterSnapshotRequest method. +// req, resp := client.RestoreFromClusterSnapshotRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Redshift) RestoreFromClusterSnapshotRequest(input *RestoreFromClusterSnapshotInput) (req *request.Request, output *RestoreFromClusterSnapshotOutput) { + op := &request.Operation{ + Name: opRestoreFromClusterSnapshot, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RestoreFromClusterSnapshotInput{} + } + + req = c.newRequest(op, input, output) + output = &RestoreFromClusterSnapshotOutput{} + req.Data = output + return +} + +// Creates a new cluster from a snapshot. By default, Amazon Redshift creates +// the resulting cluster with the same configuration as the original cluster +// from which the snapshot was created, except that the new cluster is created +// with the default cluster security and parameter groups. After Amazon Redshift +// creates the cluster, you can use the ModifyCluster API to associate a different +// security group and different parameter group with the restored cluster. If +// you are using a DS node type, you can also choose to change to another DS +// node type of the same size during restore. +// +// If you restore a cluster into a VPC, you must provide a cluster subnet +// group where you want the cluster restored. +// +// For more information about working with snapshots, go to Amazon Redshift +// Snapshots (http://docs.aws.amazon.com/redshift/latest/mgmt/working-with-snapshots.html) +// in the Amazon Redshift Cluster Management Guide. +func (c *Redshift) RestoreFromClusterSnapshot(input *RestoreFromClusterSnapshotInput) (*RestoreFromClusterSnapshotOutput, error) { + req, out := c.RestoreFromClusterSnapshotRequest(input) + err := req.Send() + return out, err +} + +const opRestoreTableFromClusterSnapshot = "RestoreTableFromClusterSnapshot" + +// RestoreTableFromClusterSnapshotRequest generates a "aws/request.Request" representing the +// client's request for the RestoreTableFromClusterSnapshot operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the RestoreTableFromClusterSnapshot method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the RestoreTableFromClusterSnapshotRequest method. +// req, resp := client.RestoreTableFromClusterSnapshotRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Redshift) RestoreTableFromClusterSnapshotRequest(input *RestoreTableFromClusterSnapshotInput) (req *request.Request, output *RestoreTableFromClusterSnapshotOutput) { + op := &request.Operation{ + Name: opRestoreTableFromClusterSnapshot, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RestoreTableFromClusterSnapshotInput{} + } + + req = c.newRequest(op, input, output) + output = &RestoreTableFromClusterSnapshotOutput{} + req.Data = output + return +} + +// Creates a new table from a table in an Amazon Redshift cluster snapshot. +// You must create the new table within the Amazon Redshift cluster that the +// snapshot was taken from. +// +// You cannot use RestoreTableFromClusterSnapshot to restore a table with the +// same name as an existing table in an Amazon Redshift cluster. That is, you +// cannot overwrite an existing table in a cluster with a restored table. If +// you want to replace your original table with a new, restored table, then +// rename or drop your original table before you call RestoreTableFromClusterSnapshot. +// When you have renamed your original table, then you can pass the original +// name of the table as the NewTableName parameter value in the call to RestoreTableFromClusterSnapshot. +// This way, you can replace the original table with the table created from +// the snapshot. +func (c *Redshift) RestoreTableFromClusterSnapshot(input *RestoreTableFromClusterSnapshotInput) (*RestoreTableFromClusterSnapshotOutput, error) { + req, out := c.RestoreTableFromClusterSnapshotRequest(input) + err := req.Send() + return out, err +} + +const opRevokeClusterSecurityGroupIngress = "RevokeClusterSecurityGroupIngress" + +// RevokeClusterSecurityGroupIngressRequest generates a "aws/request.Request" representing the +// client's request for the RevokeClusterSecurityGroupIngress operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the RevokeClusterSecurityGroupIngress method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the RevokeClusterSecurityGroupIngressRequest method. +// req, resp := client.RevokeClusterSecurityGroupIngressRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Redshift) RevokeClusterSecurityGroupIngressRequest(input *RevokeClusterSecurityGroupIngressInput) (req *request.Request, output *RevokeClusterSecurityGroupIngressOutput) { + op := &request.Operation{ + Name: opRevokeClusterSecurityGroupIngress, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RevokeClusterSecurityGroupIngressInput{} + } + + req = c.newRequest(op, input, output) + output = &RevokeClusterSecurityGroupIngressOutput{} + req.Data = output + return +} + +// Revokes an ingress rule in an Amazon Redshift security group for a previously +// authorized IP range or Amazon EC2 security group. To add an ingress rule, +// see AuthorizeClusterSecurityGroupIngress. For information about managing +// security groups, go to Amazon Redshift Cluster Security Groups (http://docs.aws.amazon.com/redshift/latest/mgmt/working-with-security-groups.html) +// in the Amazon Redshift Cluster Management Guide. +func (c *Redshift) RevokeClusterSecurityGroupIngress(input *RevokeClusterSecurityGroupIngressInput) (*RevokeClusterSecurityGroupIngressOutput, error) { + req, out := c.RevokeClusterSecurityGroupIngressRequest(input) + err := req.Send() + return out, err +} + +const opRevokeSnapshotAccess = "RevokeSnapshotAccess" + +// RevokeSnapshotAccessRequest generates a "aws/request.Request" representing the +// client's request for the RevokeSnapshotAccess operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the RevokeSnapshotAccess method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the RevokeSnapshotAccessRequest method. +// req, resp := client.RevokeSnapshotAccessRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Redshift) RevokeSnapshotAccessRequest(input *RevokeSnapshotAccessInput) (req *request.Request, output *RevokeSnapshotAccessOutput) { + op := &request.Operation{ + Name: opRevokeSnapshotAccess, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RevokeSnapshotAccessInput{} + } + + req = c.newRequest(op, input, output) + output = &RevokeSnapshotAccessOutput{} + req.Data = output + return +} + +// Removes the ability of the specified AWS customer account to restore the +// specified snapshot. If the account is currently restoring the snapshot, the +// restore will run to completion. +// +// For more information about working with snapshots, go to Amazon Redshift +// Snapshots (http://docs.aws.amazon.com/redshift/latest/mgmt/working-with-snapshots.html) +// in the Amazon Redshift Cluster Management Guide. +func (c *Redshift) RevokeSnapshotAccess(input *RevokeSnapshotAccessInput) (*RevokeSnapshotAccessOutput, error) { + req, out := c.RevokeSnapshotAccessRequest(input) + err := req.Send() + return out, err +} + +const opRotateEncryptionKey = "RotateEncryptionKey" + +// RotateEncryptionKeyRequest generates a "aws/request.Request" representing the +// client's request for the RotateEncryptionKey operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the RotateEncryptionKey method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the RotateEncryptionKeyRequest method. +// req, resp := client.RotateEncryptionKeyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Redshift) RotateEncryptionKeyRequest(input *RotateEncryptionKeyInput) (req *request.Request, output *RotateEncryptionKeyOutput) { + op := &request.Operation{ + Name: opRotateEncryptionKey, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RotateEncryptionKeyInput{} + } + + req = c.newRequest(op, input, output) + output = &RotateEncryptionKeyOutput{} + req.Data = output + return +} + +// Rotates the encryption keys for a cluster. +func (c *Redshift) RotateEncryptionKey(input *RotateEncryptionKeyInput) (*RotateEncryptionKeyOutput, error) { + req, out := c.RotateEncryptionKeyRequest(input) + err := req.Send() + return out, err +} + +// Describes an AWS customer account authorized to restore a snapshot. +type AccountWithRestoreAccess struct { + _ struct{} `type:"structure"` + + // The identifier of an AWS customer account authorized to restore a snapshot. + AccountId *string `type:"string"` +} + +// String returns the string representation +func (s AccountWithRestoreAccess) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AccountWithRestoreAccess) GoString() string { + return s.String() +} + +type AuthorizeClusterSecurityGroupIngressInput struct { + _ struct{} `type:"structure"` + + // The IP range to be added the Amazon Redshift security group. + CIDRIP *string `type:"string"` + + // The name of the security group to which the ingress rule is added. + ClusterSecurityGroupName *string `type:"string" required:"true"` + + // The EC2 security group to be added the Amazon Redshift security group. + EC2SecurityGroupName *string `type:"string"` + + // The AWS account number of the owner of the security group specified by the + // EC2SecurityGroupName parameter. The AWS Access Key ID is not an acceptable + // value. + // + // Example: 111122223333 + EC2SecurityGroupOwnerId *string `type:"string"` +} + +// String returns the string representation +func (s AuthorizeClusterSecurityGroupIngressInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AuthorizeClusterSecurityGroupIngressInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AuthorizeClusterSecurityGroupIngressInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AuthorizeClusterSecurityGroupIngressInput"} + if s.ClusterSecurityGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("ClusterSecurityGroupName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type AuthorizeClusterSecurityGroupIngressOutput struct { + _ struct{} `type:"structure"` + + // Describes a security group. + ClusterSecurityGroup *ClusterSecurityGroup `type:"structure"` +} + +// String returns the string representation +func (s AuthorizeClusterSecurityGroupIngressOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AuthorizeClusterSecurityGroupIngressOutput) GoString() string { + return s.String() +} + +type AuthorizeSnapshotAccessInput struct { + _ struct{} `type:"structure"` + + // The identifier of the AWS customer account authorized to restore the specified + // snapshot. + AccountWithRestoreAccess *string `type:"string" required:"true"` + + // The identifier of the cluster the snapshot was created from. This parameter + // is required if your IAM user has a policy containing a snapshot resource + // element that specifies anything other than * for the cluster name. + SnapshotClusterIdentifier *string `type:"string"` + + // The identifier of the snapshot the account is authorized to restore. + SnapshotIdentifier *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s AuthorizeSnapshotAccessInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AuthorizeSnapshotAccessInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AuthorizeSnapshotAccessInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AuthorizeSnapshotAccessInput"} + if s.AccountWithRestoreAccess == nil { + invalidParams.Add(request.NewErrParamRequired("AccountWithRestoreAccess")) + } + if s.SnapshotIdentifier == nil { + invalidParams.Add(request.NewErrParamRequired("SnapshotIdentifier")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type AuthorizeSnapshotAccessOutput struct { + _ struct{} `type:"structure"` + + // Describes a snapshot. + Snapshot *Snapshot `type:"structure"` +} + +// String returns the string representation +func (s AuthorizeSnapshotAccessOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AuthorizeSnapshotAccessOutput) GoString() string { + return s.String() +} + +// Describes an availability zone. +type AvailabilityZone struct { + _ struct{} `type:"structure"` + + // The name of the availability zone. + Name *string `type:"string"` +} + +// String returns the string representation +func (s AvailabilityZone) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AvailabilityZone) GoString() string { + return s.String() +} + +// Describes a cluster. +type Cluster struct { + _ struct{} `type:"structure"` + + // If true, major version upgrades will be applied automatically to the cluster + // during the maintenance window. + AllowVersionUpgrade *bool `type:"boolean"` + + // The number of days that automatic cluster snapshots are retained. + AutomatedSnapshotRetentionPeriod *int64 `type:"integer"` + + // The name of the Availability Zone in which the cluster is located. + AvailabilityZone *string `type:"string"` + + // The date and time that the cluster was created. + ClusterCreateTime *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The unique identifier of the cluster. + ClusterIdentifier *string `type:"string"` + + // The nodes in a cluster. + ClusterNodes []*ClusterNode `type:"list"` + + // The list of cluster parameter groups that are associated with this cluster. + // Each parameter group in the list is returned with its status. + ClusterParameterGroups []*ClusterParameterGroupStatus `locationNameList:"ClusterParameterGroup" type:"list"` + + // The public key for the cluster. + ClusterPublicKey *string `type:"string"` + + // The specific revision number of the database in the cluster. + ClusterRevisionNumber *string `type:"string"` + + // A list of cluster security group that are associated with the cluster. Each + // security group is represented by an element that contains ClusterSecurityGroup.Name + // and ClusterSecurityGroup.Status subelements. + // + // Cluster security groups are used when the cluster is not created in a VPC. + // Clusters that are created in a VPC use VPC security groups, which are listed + // by the VpcSecurityGroups parameter. + ClusterSecurityGroups []*ClusterSecurityGroupMembership `locationNameList:"ClusterSecurityGroup" type:"list"` + + // Returns the destination region and retention period that are configured for + // cross-region snapshot copy. + ClusterSnapshotCopyStatus *ClusterSnapshotCopyStatus `type:"structure"` + + // The current state of the cluster. Possible values are: available creating + // deleting final-snapshot hardware-failure incompatible-hsm incompatible-network + // incompatible-parameters incompatible-restore modifying rebooting renaming + // resizing rotating-keys storage-full updating-hsm + ClusterStatus *string `type:"string"` + + // The name of the subnet group that is associated with the cluster. This parameter + // is valid only when the cluster is in a VPC. + ClusterSubnetGroupName *string `type:"string"` + + // The version ID of the Amazon Redshift engine that is running on the cluster. + ClusterVersion *string `type:"string"` + + // The name of the initial database that was created when the cluster was created. + // This same name is returned for the life of the cluster. If an initial database + // was not specified, a database named "dev" was created by default. + DBName *string `type:"string"` + + // The status of the elastic IP (EIP) address. + ElasticIpStatus *ElasticIpStatus `type:"structure"` + + // If true, data in the cluster is encrypted at rest. + Encrypted *bool `type:"boolean"` + + // The connection endpoint. + Endpoint *Endpoint `type:"structure"` + + // Reports whether the Amazon Redshift cluster has finished applying any HSM + // settings changes specified in a modify cluster command. + // + // Values: active, applying + HsmStatus *HsmStatus `type:"structure"` + + // A list of AWS Identity and Access Management (IAM) roles that can be used + // by the cluster to access other AWS services. + IamRoles []*ClusterIamRole `locationNameList:"ClusterIamRole" type:"list"` + + // The AWS Key Management Service (KMS) key ID of the encryption key used to + // encrypt data in the cluster. + KmsKeyId *string `type:"string"` + + // The master user name for the cluster. This name is used to connect to the + // database that is specified in DBName. + MasterUsername *string `type:"string"` + + // The status of a modify operation, if any, initiated for the cluster. + ModifyStatus *string `type:"string"` + + // The node type for the nodes in the cluster. + NodeType *string `type:"string"` + + // The number of compute nodes in the cluster. + NumberOfNodes *int64 `type:"integer"` + + // If present, changes to the cluster are pending. Specific pending changes + // are identified by subelements. + PendingModifiedValues *PendingModifiedValues `type:"structure"` + + // The weekly time range (in UTC) during which system maintenance can occur. + PreferredMaintenanceWindow *string `type:"string"` + + // If true, the cluster can be accessed from a public network. + PubliclyAccessible *bool `type:"boolean"` + + // Describes the status of a cluster restore action. Returns null if the cluster + // was not created by restoring a snapshot. + RestoreStatus *RestoreStatus `type:"structure"` + + // The list of tags for the cluster. + Tags []*Tag `locationNameList:"Tag" type:"list"` + + // The identifier of the VPC the cluster is in, if the cluster is in a VPC. + VpcId *string `type:"string"` + + // A list of Virtual Private Cloud (VPC) security groups that are associated + // with the cluster. This parameter is returned only if the cluster is in a + // VPC. + VpcSecurityGroups []*VpcSecurityGroupMembership `locationNameList:"VpcSecurityGroup" type:"list"` +} + +// String returns the string representation +func (s Cluster) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Cluster) GoString() string { + return s.String() +} + +// An AWS Identity and Access Management (IAM) role that can be used by the +// associated Amazon Redshift cluster to access other AWS services. +type ClusterIamRole struct { + _ struct{} `type:"structure"` + + // Describes the status of the IAM role's association with an Amazon Redshift + // cluster. + // + // The following are possible statuses and descriptions. in-sync: The role + // is available for use by the cluster. adding: The role is in the process of + // being associated with the cluster. removing: The role is in the process of + // being disassociated with the cluster. + ApplyStatus *string `type:"string"` + + // The Amazon Resource Name (ARN) of the IAM role. For example, arn:aws:iam::123456789012:role/RedshiftCopyUnload. + IamRoleArn *string `type:"string"` +} + +// String returns the string representation +func (s ClusterIamRole) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ClusterIamRole) GoString() string { + return s.String() +} + +// The identifier of a node in a cluster. +type ClusterNode struct { + _ struct{} `type:"structure"` + + // Whether the node is a leader node or a compute node. + NodeRole *string `type:"string"` + + // The private IP address of a node within a cluster. + PrivateIPAddress *string `type:"string"` + + // The public IP address of a node within a cluster. + PublicIPAddress *string `type:"string"` +} + +// String returns the string representation +func (s ClusterNode) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ClusterNode) GoString() string { + return s.String() +} + +// Describes a parameter group. +type ClusterParameterGroup struct { + _ struct{} `type:"structure"` + + // The description of the parameter group. + Description *string `type:"string"` + + // The name of the cluster parameter group family that this cluster parameter + // group is compatible with. + ParameterGroupFamily *string `type:"string"` + + // The name of the cluster parameter group. + ParameterGroupName *string `type:"string"` + + // The list of tags for the cluster parameter group. + Tags []*Tag `locationNameList:"Tag" type:"list"` +} + +// String returns the string representation +func (s ClusterParameterGroup) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ClusterParameterGroup) GoString() string { + return s.String() +} + +type ClusterParameterGroupNameMessage struct { + _ struct{} `type:"structure"` + + // The name of the cluster parameter group. + ParameterGroupName *string `type:"string"` + + // The status of the parameter group. For example, if you made a change to a + // parameter group name-value pair, then the change could be pending a reboot + // of an associated cluster. + ParameterGroupStatus *string `type:"string"` +} + +// String returns the string representation +func (s ClusterParameterGroupNameMessage) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ClusterParameterGroupNameMessage) GoString() string { + return s.String() +} + +// Describes the status of a parameter group. +type ClusterParameterGroupStatus struct { + _ struct{} `type:"structure"` + + // The list of parameter statuses. + // + // For more information about parameters and parameter groups, go to Amazon + // Redshift Parameter Groups (http://docs.aws.amazon.com/redshift/latest/mgmt/working-with-parameter-groups.html) + // in the Amazon Redshift Cluster Management Guide. + ClusterParameterStatusList []*ClusterParameterStatus `type:"list"` + + // The status of parameter updates. + ParameterApplyStatus *string `type:"string"` + + // The name of the cluster parameter group. + ParameterGroupName *string `type:"string"` +} + +// String returns the string representation +func (s ClusterParameterGroupStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ClusterParameterGroupStatus) GoString() string { + return s.String() +} + +// Describes the status of a parameter group. +type ClusterParameterStatus struct { + _ struct{} `type:"structure"` + + // The error that prevented the parameter from being applied to the database. + ParameterApplyErrorDescription *string `type:"string"` + + // The status of the parameter that indicates whether the parameter is in sync + // with the database, waiting for a cluster reboot, or encountered an error + // when being applied. + // + // The following are possible statuses and descriptions. in-sync: The parameter + // value is in sync with the database. pending-reboot: The parameter value will + // be applied after the cluster reboots. applying: The parameter value is being + // applied to the database. invalid-parameter: Cannot apply the parameter value + // because it has an invalid value or syntax. apply-deferred: The parameter + // contains static property changes. The changes are deferred until the cluster + // reboots. apply-error: Cannot connect to the cluster. The parameter change + // will be applied after the cluster reboots. unknown-error: Cannot apply the + // parameter change right now. The change will be applied after the cluster + // reboots. + ParameterApplyStatus *string `type:"string"` + + // The name of the parameter. + ParameterName *string `type:"string"` +} + +// String returns the string representation +func (s ClusterParameterStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ClusterParameterStatus) GoString() string { + return s.String() +} + +// Describes a security group. +type ClusterSecurityGroup struct { + _ struct{} `type:"structure"` + + // The name of the cluster security group to which the operation was applied. + ClusterSecurityGroupName *string `type:"string"` + + // A description of the security group. + Description *string `type:"string"` + + // A list of EC2 security groups that are permitted to access clusters associated + // with this cluster security group. + EC2SecurityGroups []*EC2SecurityGroup `locationNameList:"EC2SecurityGroup" type:"list"` + + // A list of IP ranges (CIDR blocks) that are permitted to access clusters associated + // with this cluster security group. + IPRanges []*IPRange `locationNameList:"IPRange" type:"list"` + + // The list of tags for the cluster security group. + Tags []*Tag `locationNameList:"Tag" type:"list"` +} + +// String returns the string representation +func (s ClusterSecurityGroup) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ClusterSecurityGroup) GoString() string { + return s.String() +} + +// Describes a cluster security group. +type ClusterSecurityGroupMembership struct { + _ struct{} `type:"structure"` + + // The name of the cluster security group. + ClusterSecurityGroupName *string `type:"string"` + + // The status of the cluster security group. + Status *string `type:"string"` +} + +// String returns the string representation +func (s ClusterSecurityGroupMembership) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ClusterSecurityGroupMembership) GoString() string { + return s.String() +} + +// Returns the destination region and retention period that are configured for +// cross-region snapshot copy. +type ClusterSnapshotCopyStatus struct { + _ struct{} `type:"structure"` + + // The destination region that snapshots are automatically copied to when cross-region + // snapshot copy is enabled. + DestinationRegion *string `type:"string"` + + // The number of days that automated snapshots are retained in the destination + // region after they are copied from a source region. + RetentionPeriod *int64 `type:"long"` + + // The name of the snapshot copy grant. + SnapshotCopyGrantName *string `type:"string"` +} + +// String returns the string representation +func (s ClusterSnapshotCopyStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ClusterSnapshotCopyStatus) GoString() string { + return s.String() +} + +// Describes a subnet group. +type ClusterSubnetGroup struct { + _ struct{} `type:"structure"` + + // The name of the cluster subnet group. + ClusterSubnetGroupName *string `type:"string"` + + // The description of the cluster subnet group. + Description *string `type:"string"` + + // The status of the cluster subnet group. Possible values are Complete, Incomplete + // and Invalid. + SubnetGroupStatus *string `type:"string"` + + // A list of the VPC Subnet elements. + Subnets []*Subnet `locationNameList:"Subnet" type:"list"` + + // The list of tags for the cluster subnet group. + Tags []*Tag `locationNameList:"Tag" type:"list"` + + // The VPC ID of the cluster subnet group. + VpcId *string `type:"string"` +} + +// String returns the string representation +func (s ClusterSubnetGroup) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ClusterSubnetGroup) GoString() string { + return s.String() +} + +// Describes a cluster version, including the parameter group family and description +// of the version. +type ClusterVersion struct { + _ struct{} `type:"structure"` + + // The name of the cluster parameter group family for the cluster. + ClusterParameterGroupFamily *string `type:"string"` + + // The version number used by the cluster. + ClusterVersion *string `type:"string"` + + // The description of the cluster version. + Description *string `type:"string"` +} + +// String returns the string representation +func (s ClusterVersion) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ClusterVersion) GoString() string { + return s.String() +} + +type CopyClusterSnapshotInput struct { + _ struct{} `type:"structure"` + + // The identifier of the cluster the source snapshot was created from. This + // parameter is required if your IAM user has a policy containing a snapshot + // resource element that specifies anything other than * for the cluster name. + // + // Constraints: + // + // Must be the identifier for a valid cluster. + SourceSnapshotClusterIdentifier *string `type:"string"` + + // The identifier for the source snapshot. + // + // Constraints: + // + // Must be the identifier for a valid automated snapshot whose state is available. + SourceSnapshotIdentifier *string `type:"string" required:"true"` + + // The identifier given to the new manual snapshot. + // + // Constraints: + // + // Cannot be null, empty, or blank. Must contain from 1 to 255 alphanumeric + // characters or hyphens. First character must be a letter. Cannot end with + // a hyphen or contain two consecutive hyphens. Must be unique for the AWS account + // that is making the request. + TargetSnapshotIdentifier *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CopyClusterSnapshotInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CopyClusterSnapshotInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CopyClusterSnapshotInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CopyClusterSnapshotInput"} + if s.SourceSnapshotIdentifier == nil { + invalidParams.Add(request.NewErrParamRequired("SourceSnapshotIdentifier")) + } + if s.TargetSnapshotIdentifier == nil { + invalidParams.Add(request.NewErrParamRequired("TargetSnapshotIdentifier")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type CopyClusterSnapshotOutput struct { + _ struct{} `type:"structure"` + + // Describes a snapshot. + Snapshot *Snapshot `type:"structure"` +} + +// String returns the string representation +func (s CopyClusterSnapshotOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CopyClusterSnapshotOutput) GoString() string { + return s.String() +} + +type CreateClusterInput struct { + _ struct{} `type:"structure"` + + // Reserved. + AdditionalInfo *string `type:"string"` + + // If true, major version upgrades can be applied during the maintenance window + // to the Amazon Redshift engine that is running on the cluster. + // + // When a new major version of the Amazon Redshift engine is released, you + // can request that the service automatically apply upgrades during the maintenance + // window to the Amazon Redshift engine that is running on your cluster. + // + // Default: true + AllowVersionUpgrade *bool `type:"boolean"` + + // The number of days that automated snapshots are retained. If the value is + // 0, automated snapshots are disabled. Even if automated snapshots are disabled, + // you can still create manual snapshots when you want with CreateClusterSnapshot. + // + // Default: 1 + // + // Constraints: Must be a value from 0 to 35. + AutomatedSnapshotRetentionPeriod *int64 `type:"integer"` + + // The EC2 Availability Zone (AZ) in which you want Amazon Redshift to provision + // the cluster. For example, if you have several EC2 instances running in a + // specific Availability Zone, then you might want the cluster to be provisioned + // in the same zone in order to decrease network latency. + // + // Default: A random, system-chosen Availability Zone in the region that is + // specified by the endpoint. + // + // Example: us-east-1d + // + // Constraint: The specified Availability Zone must be in the same region + // as the current endpoint. + AvailabilityZone *string `type:"string"` + + // A unique identifier for the cluster. You use this identifier to refer to + // the cluster for any subsequent cluster operations such as deleting or modifying. + // The identifier also appears in the Amazon Redshift console. + // + // Constraints: + // + // Must contain from 1 to 63 alphanumeric characters or hyphens. Alphabetic + // characters must be lowercase. First character must be a letter. Cannot end + // with a hyphen or contain two consecutive hyphens. Must be unique for all + // clusters within an AWS account. Example: myexamplecluster + ClusterIdentifier *string `type:"string" required:"true"` + + // The name of the parameter group to be associated with this cluster. + // + // Default: The default Amazon Redshift cluster parameter group. For information + // about the default parameter group, go to Working with Amazon Redshift Parameter + // Groups (http://docs.aws.amazon.com/redshift/latest/mgmt/working-with-parameter-groups.html) + // + // Constraints: + // + // Must be 1 to 255 alphanumeric characters or hyphens. First character must + // be a letter. Cannot end with a hyphen or contain two consecutive hyphens. + ClusterParameterGroupName *string `type:"string"` + + // A list of security groups to be associated with this cluster. + // + // Default: The default cluster security group for Amazon Redshift. + ClusterSecurityGroups []*string `locationNameList:"ClusterSecurityGroupName" type:"list"` + + // The name of a cluster subnet group to be associated with this cluster. + // + // If this parameter is not provided the resulting cluster will be deployed + // outside virtual private cloud (VPC). + ClusterSubnetGroupName *string `type:"string"` + + // The type of the cluster. When cluster type is specified as single-node, + // the NumberOfNodes parameter is not required. multi-node, the NumberOfNodes + // parameter is required. + // + // Valid Values: multi-node | single-node + // + // Default: multi-node + ClusterType *string `type:"string"` + + // The version of the Amazon Redshift engine software that you want to deploy + // on the cluster. + // + // The version selected runs on all the nodes in the cluster. + // + // Constraints: Only version 1.0 is currently available. + // + // Example: 1.0 + ClusterVersion *string `type:"string"` + + // The name of the first database to be created when the cluster is created. + // + // To create additional databases after the cluster is created, connect to + // the cluster with a SQL client and use SQL commands to create a database. + // For more information, go to Create a Database (http://docs.aws.amazon.com/redshift/latest/dg/t_creating_database.html) + // in the Amazon Redshift Database Developer Guide. + // + // Default: dev + // + // Constraints: + // + // Must contain 1 to 64 alphanumeric characters. Must contain only lowercase + // letters. Cannot be a word that is reserved by the service. A list of reserved + // words can be found in Reserved Words (http://docs.aws.amazon.com/redshift/latest/dg/r_pg_keywords.html) + // in the Amazon Redshift Database Developer Guide. + DBName *string `type:"string"` + + // The Elastic IP (EIP) address for the cluster. + // + // Constraints: The cluster must be provisioned in EC2-VPC and publicly-accessible + // through an Internet gateway. For more information about provisioning clusters + // in EC2-VPC, go to Supported Platforms to Launch Your Cluster (http://docs.aws.amazon.com/redshift/latest/mgmt/working-with-clusters.html#cluster-platforms) + // in the Amazon Redshift Cluster Management Guide. + ElasticIp *string `type:"string"` + + // If true, the data in the cluster is encrypted at rest. + // + // Default: false + Encrypted *bool `type:"boolean"` + + // Specifies the name of the HSM client certificate the Amazon Redshift cluster + // uses to retrieve the data encryption keys stored in an HSM. + HsmClientCertificateIdentifier *string `type:"string"` + + // Specifies the name of the HSM configuration that contains the information + // the Amazon Redshift cluster can use to retrieve and store keys in an HSM. + HsmConfigurationIdentifier *string `type:"string"` + + // A list of AWS Identity and Access Management (IAM) roles that can be used + // by the cluster to access other AWS services. You must supply the IAM roles + // in their Amazon Resource Name (ARN) format. You can supply up to 10 IAM roles + // in a single request. + // + // A cluster can have up to 10 IAM roles associated at any time. + IamRoles []*string `locationNameList:"IamRoleArn" type:"list"` + + // The AWS Key Management Service (KMS) key ID of the encryption key that you + // want to use to encrypt data in the cluster. + KmsKeyId *string `type:"string"` + + // The password associated with the master user account for the cluster that + // is being created. + // + // Constraints: + // + // Must be between 8 and 64 characters in length. Must contain at least one + // uppercase letter. Must contain at least one lowercase letter. Must contain + // one number. Can be any printable ASCII character (ASCII code 33 to 126) except + // ' (single quote), " (double quote), \, /, @, or space. + MasterUserPassword *string `type:"string" required:"true"` + + // The user name associated with the master user account for the cluster that + // is being created. + // + // Constraints: + // + // Must be 1 - 128 alphanumeric characters. First character must be a letter. + // Cannot be a reserved word. A list of reserved words can be found in Reserved + // Words (http://docs.aws.amazon.com/redshift/latest/dg/r_pg_keywords.html) + // in the Amazon Redshift Database Developer Guide. + MasterUsername *string `type:"string" required:"true"` + + // The node type to be provisioned for the cluster. For information about node + // types, go to Working with Clusters (http://docs.aws.amazon.com/redshift/latest/mgmt/working-with-clusters.html#how-many-nodes) + // in the Amazon Redshift Cluster Management Guide. + // + // Valid Values: ds1.xlarge | ds1.8xlarge | ds2.xlarge | ds2.8xlarge | dc1.large + // | dc1.8xlarge. + NodeType *string `type:"string" required:"true"` + + // The number of compute nodes in the cluster. This parameter is required when + // the ClusterType parameter is specified as multi-node. + // + // For information about determining how many nodes you need, go to Working + // with Clusters (http://docs.aws.amazon.com/redshift/latest/mgmt/working-with-clusters.html#how-many-nodes) + // in the Amazon Redshift Cluster Management Guide. + // + // If you don't specify this parameter, you get a single-node cluster. When + // requesting a multi-node cluster, you must specify the number of nodes that + // you want in the cluster. + // + // Default: 1 + // + // Constraints: Value must be at least 1 and no more than 100. + NumberOfNodes *int64 `type:"integer"` + + // The port number on which the cluster accepts incoming connections. + // + // The cluster is accessible only via the JDBC and ODBC connection strings. + // Part of the connection string requires the port on which the cluster will + // listen for incoming connections. + // + // Default: 5439 + // + // Valid Values: 1150-65535 + Port *int64 `type:"integer"` + + // The weekly time range (in UTC) during which automated cluster maintenance + // can occur. + // + // Format: ddd:hh24:mi-ddd:hh24:mi + // + // Default: A 30-minute window selected at random from an 8-hour block of + // time per region, occurring on a random day of the week. For more information + // about the time blocks for each region, see Maintenance Windows (http://docs.aws.amazon.com/redshift/latest/mgmt/working-with-clusters.html#rs-maintenance-windows) + // in Amazon Redshift Cluster Management Guide. + // + // Valid Days: Mon | Tue | Wed | Thu | Fri | Sat | Sun + // + // Constraints: Minimum 30-minute window. + PreferredMaintenanceWindow *string `type:"string"` + + // If true, the cluster can be accessed from a public network. + PubliclyAccessible *bool `type:"boolean"` + + // A list of tag instances. + Tags []*Tag `locationNameList:"Tag" type:"list"` + + // A list of Virtual Private Cloud (VPC) security groups to be associated with + // the cluster. + // + // Default: The default VPC security group is associated with the cluster. + VpcSecurityGroupIds []*string `locationNameList:"VpcSecurityGroupId" type:"list"` +} + +// String returns the string representation +func (s CreateClusterInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateClusterInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateClusterInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateClusterInput"} + if s.ClusterIdentifier == nil { + invalidParams.Add(request.NewErrParamRequired("ClusterIdentifier")) + } + if s.MasterUserPassword == nil { + invalidParams.Add(request.NewErrParamRequired("MasterUserPassword")) + } + if s.MasterUsername == nil { + invalidParams.Add(request.NewErrParamRequired("MasterUsername")) + } + if s.NodeType == nil { + invalidParams.Add(request.NewErrParamRequired("NodeType")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type CreateClusterOutput struct { + _ struct{} `type:"structure"` + + // Describes a cluster. + Cluster *Cluster `type:"structure"` +} + +// String returns the string representation +func (s CreateClusterOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateClusterOutput) GoString() string { + return s.String() +} + +type CreateClusterParameterGroupInput struct { + _ struct{} `type:"structure"` + + // A description of the parameter group. + Description *string `type:"string" required:"true"` + + // The Amazon Redshift engine version to which the cluster parameter group applies. + // The cluster engine version determines the set of parameters. + // + // To get a list of valid parameter group family names, you can call DescribeClusterParameterGroups. + // By default, Amazon Redshift returns a list of all the parameter groups that + // are owned by your AWS account, including the default parameter groups for + // each Amazon Redshift engine version. The parameter group family names associated + // with the default parameter groups provide you the valid values. For example, + // a valid family name is "redshift-1.0". + ParameterGroupFamily *string `type:"string" required:"true"` + + // The name of the cluster parameter group. + // + // Constraints: + // + // Must be 1 to 255 alphanumeric characters or hyphens First character must + // be a letter. Cannot end with a hyphen or contain two consecutive hyphens. + // Must be unique withing your AWS account. This value is stored as a lower-case + // string. + ParameterGroupName *string `type:"string" required:"true"` + + // A list of tag instances. + Tags []*Tag `locationNameList:"Tag" type:"list"` +} + +// String returns the string representation +func (s CreateClusterParameterGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateClusterParameterGroupInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateClusterParameterGroupInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateClusterParameterGroupInput"} + if s.Description == nil { + invalidParams.Add(request.NewErrParamRequired("Description")) + } + if s.ParameterGroupFamily == nil { + invalidParams.Add(request.NewErrParamRequired("ParameterGroupFamily")) + } + if s.ParameterGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("ParameterGroupName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type CreateClusterParameterGroupOutput struct { + _ struct{} `type:"structure"` + + // Describes a parameter group. + ClusterParameterGroup *ClusterParameterGroup `type:"structure"` +} + +// String returns the string representation +func (s CreateClusterParameterGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateClusterParameterGroupOutput) GoString() string { + return s.String() +} + +type CreateClusterSecurityGroupInput struct { + _ struct{} `type:"structure"` + + // The name for the security group. Amazon Redshift stores the value as a lowercase + // string. + // + // Constraints: + // + // Must contain no more than 255 alphanumeric characters or hyphens. Must + // not be "Default". Must be unique for all security groups that are created + // by your AWS account. Example: examplesecuritygroup + ClusterSecurityGroupName *string `type:"string" required:"true"` + + // A description for the security group. + Description *string `type:"string" required:"true"` + + // A list of tag instances. + Tags []*Tag `locationNameList:"Tag" type:"list"` +} + +// String returns the string representation +func (s CreateClusterSecurityGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateClusterSecurityGroupInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateClusterSecurityGroupInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateClusterSecurityGroupInput"} + if s.ClusterSecurityGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("ClusterSecurityGroupName")) + } + if s.Description == nil { + invalidParams.Add(request.NewErrParamRequired("Description")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type CreateClusterSecurityGroupOutput struct { + _ struct{} `type:"structure"` + + // Describes a security group. + ClusterSecurityGroup *ClusterSecurityGroup `type:"structure"` +} + +// String returns the string representation +func (s CreateClusterSecurityGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateClusterSecurityGroupOutput) GoString() string { + return s.String() +} + +type CreateClusterSnapshotInput struct { + _ struct{} `type:"structure"` + + // The cluster identifier for which you want a snapshot. + ClusterIdentifier *string `type:"string" required:"true"` + + // A unique identifier for the snapshot that you are requesting. This identifier + // must be unique for all snapshots within the AWS account. + // + // Constraints: + // + // Cannot be null, empty, or blank Must contain from 1 to 255 alphanumeric + // characters or hyphens First character must be a letter Cannot end with a + // hyphen or contain two consecutive hyphens Example: my-snapshot-id + SnapshotIdentifier *string `type:"string" required:"true"` + + // A list of tag instances. + Tags []*Tag `locationNameList:"Tag" type:"list"` +} + +// String returns the string representation +func (s CreateClusterSnapshotInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateClusterSnapshotInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateClusterSnapshotInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateClusterSnapshotInput"} + if s.ClusterIdentifier == nil { + invalidParams.Add(request.NewErrParamRequired("ClusterIdentifier")) + } + if s.SnapshotIdentifier == nil { + invalidParams.Add(request.NewErrParamRequired("SnapshotIdentifier")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type CreateClusterSnapshotOutput struct { + _ struct{} `type:"structure"` + + // Describes a snapshot. + Snapshot *Snapshot `type:"structure"` +} + +// String returns the string representation +func (s CreateClusterSnapshotOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateClusterSnapshotOutput) GoString() string { + return s.String() +} + +type CreateClusterSubnetGroupInput struct { + _ struct{} `type:"structure"` + + // The name for the subnet group. Amazon Redshift stores the value as a lowercase + // string. + // + // Constraints: + // + // Must contain no more than 255 alphanumeric characters or hyphens. Must + // not be "Default". Must be unique for all subnet groups that are created by + // your AWS account. Example: examplesubnetgroup + ClusterSubnetGroupName *string `type:"string" required:"true"` + + // A description for the subnet group. + Description *string `type:"string" required:"true"` + + // An array of VPC subnet IDs. A maximum of 20 subnets can be modified in a + // single request. + SubnetIds []*string `locationNameList:"SubnetIdentifier" type:"list" required:"true"` + + // A list of tag instances. + Tags []*Tag `locationNameList:"Tag" type:"list"` +} + +// String returns the string representation +func (s CreateClusterSubnetGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateClusterSubnetGroupInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateClusterSubnetGroupInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateClusterSubnetGroupInput"} + if s.ClusterSubnetGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("ClusterSubnetGroupName")) + } + if s.Description == nil { + invalidParams.Add(request.NewErrParamRequired("Description")) + } + if s.SubnetIds == nil { + invalidParams.Add(request.NewErrParamRequired("SubnetIds")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type CreateClusterSubnetGroupOutput struct { + _ struct{} `type:"structure"` + + // Describes a subnet group. + ClusterSubnetGroup *ClusterSubnetGroup `type:"structure"` +} + +// String returns the string representation +func (s CreateClusterSubnetGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateClusterSubnetGroupOutput) GoString() string { + return s.String() +} + +type CreateEventSubscriptionInput struct { + _ struct{} `type:"structure"` + + // A Boolean value; set to true to activate the subscription, set to false to + // create the subscription but not active it. + Enabled *bool `type:"boolean"` + + // Specifies the Amazon Redshift event categories to be published by the event + // notification subscription. + // + // Values: Configuration, Management, Monitoring, Security + EventCategories []*string `locationNameList:"EventCategory" type:"list"` + + // Specifies the Amazon Redshift event severity to be published by the event + // notification subscription. + // + // Values: ERROR, INFO + Severity *string `type:"string"` + + // The Amazon Resource Name (ARN) of the Amazon SNS topic used to transmit the + // event notifications. The ARN is created by Amazon SNS when you create a topic + // and subscribe to it. + SnsTopicArn *string `type:"string" required:"true"` + + // A list of one or more identifiers of Amazon Redshift source objects. All + // of the objects must be of the same type as was specified in the source type + // parameter. The event subscription will return only events generated by the + // specified objects. If not specified, then events are returned for all objects + // within the source type specified. + // + // Example: my-cluster-1, my-cluster-2 + // + // Example: my-snapshot-20131010 + SourceIds []*string `locationNameList:"SourceId" type:"list"` + + // The type of source that will be generating the events. For example, if you + // want to be notified of events generated by a cluster, you would set this + // parameter to cluster. If this value is not specified, events are returned + // for all Amazon Redshift objects in your AWS account. You must specify a source + // type in order to specify source IDs. + // + // Valid values: cluster, cluster-parameter-group, cluster-security-group, + // and cluster-snapshot. + SourceType *string `type:"string"` + + // The name of the event subscription to be created. + // + // Constraints: + // + // Cannot be null, empty, or blank. Must contain from 1 to 255 alphanumeric + // characters or hyphens. First character must be a letter. Cannot end with + // a hyphen or contain two consecutive hyphens. + SubscriptionName *string `type:"string" required:"true"` + + // A list of tag instances. + Tags []*Tag `locationNameList:"Tag" type:"list"` +} + +// String returns the string representation +func (s CreateEventSubscriptionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateEventSubscriptionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateEventSubscriptionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateEventSubscriptionInput"} + if s.SnsTopicArn == nil { + invalidParams.Add(request.NewErrParamRequired("SnsTopicArn")) + } + if s.SubscriptionName == nil { + invalidParams.Add(request.NewErrParamRequired("SubscriptionName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type CreateEventSubscriptionOutput struct { + _ struct{} `type:"structure"` + + // Describes event subscriptions. + EventSubscription *EventSubscription `type:"structure"` +} + +// String returns the string representation +func (s CreateEventSubscriptionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateEventSubscriptionOutput) GoString() string { + return s.String() +} + +type CreateHsmClientCertificateInput struct { + _ struct{} `type:"structure"` + + // The identifier to be assigned to the new HSM client certificate that the + // cluster will use to connect to the HSM to use the database encryption keys. + HsmClientCertificateIdentifier *string `type:"string" required:"true"` + + // A list of tag instances. + Tags []*Tag `locationNameList:"Tag" type:"list"` +} + +// String returns the string representation +func (s CreateHsmClientCertificateInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateHsmClientCertificateInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateHsmClientCertificateInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateHsmClientCertificateInput"} + if s.HsmClientCertificateIdentifier == nil { + invalidParams.Add(request.NewErrParamRequired("HsmClientCertificateIdentifier")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type CreateHsmClientCertificateOutput struct { + _ struct{} `type:"structure"` + + // Returns information about an HSM client certificate. The certificate is stored + // in a secure Hardware Storage Module (HSM), and used by the Amazon Redshift + // cluster to encrypt data files. + HsmClientCertificate *HsmClientCertificate `type:"structure"` +} + +// String returns the string representation +func (s CreateHsmClientCertificateOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateHsmClientCertificateOutput) GoString() string { + return s.String() +} + +type CreateHsmConfigurationInput struct { + _ struct{} `type:"structure"` + + // A text description of the HSM configuration to be created. + Description *string `type:"string" required:"true"` + + // The identifier to be assigned to the new Amazon Redshift HSM configuration. + HsmConfigurationIdentifier *string `type:"string" required:"true"` + + // The IP address that the Amazon Redshift cluster must use to access the HSM. + HsmIpAddress *string `type:"string" required:"true"` + + // The name of the partition in the HSM where the Amazon Redshift clusters will + // store their database encryption keys. + HsmPartitionName *string `type:"string" required:"true"` + + // The password required to access the HSM partition. + HsmPartitionPassword *string `type:"string" required:"true"` + + // The HSMs public certificate file. When using Cloud HSM, the file name is + // server.pem. + HsmServerPublicCertificate *string `type:"string" required:"true"` + + // A list of tag instances. + Tags []*Tag `locationNameList:"Tag" type:"list"` +} + +// String returns the string representation +func (s CreateHsmConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateHsmConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateHsmConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateHsmConfigurationInput"} + if s.Description == nil { + invalidParams.Add(request.NewErrParamRequired("Description")) + } + if s.HsmConfigurationIdentifier == nil { + invalidParams.Add(request.NewErrParamRequired("HsmConfigurationIdentifier")) + } + if s.HsmIpAddress == nil { + invalidParams.Add(request.NewErrParamRequired("HsmIpAddress")) + } + if s.HsmPartitionName == nil { + invalidParams.Add(request.NewErrParamRequired("HsmPartitionName")) + } + if s.HsmPartitionPassword == nil { + invalidParams.Add(request.NewErrParamRequired("HsmPartitionPassword")) + } + if s.HsmServerPublicCertificate == nil { + invalidParams.Add(request.NewErrParamRequired("HsmServerPublicCertificate")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type CreateHsmConfigurationOutput struct { + _ struct{} `type:"structure"` + + // Returns information about an HSM configuration, which is an object that describes + // to Amazon Redshift clusters the information they require to connect to an + // HSM where they can store database encryption keys. + HsmConfiguration *HsmConfiguration `type:"structure"` +} + +// String returns the string representation +func (s CreateHsmConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateHsmConfigurationOutput) GoString() string { + return s.String() +} + +// The result of the CreateSnapshotCopyGrant action. +type CreateSnapshotCopyGrantInput struct { + _ struct{} `type:"structure"` + + // The unique identifier of the customer master key (CMK) to which to grant + // Amazon Redshift permission. If no key is specified, the default key is used. + KmsKeyId *string `type:"string"` + + // The name of the snapshot copy grant. This name must be unique in the region + // for the AWS account. + // + // Constraints: + // + // Must contain from 1 to 63 alphanumeric characters or hyphens. Alphabetic + // characters must be lowercase. First character must be a letter. Cannot end + // with a hyphen or contain two consecutive hyphens. Must be unique for all + // clusters within an AWS account. + SnapshotCopyGrantName *string `type:"string" required:"true"` + + // A list of tag instances. + Tags []*Tag `locationNameList:"Tag" type:"list"` +} + +// String returns the string representation +func (s CreateSnapshotCopyGrantInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateSnapshotCopyGrantInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateSnapshotCopyGrantInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateSnapshotCopyGrantInput"} + if s.SnapshotCopyGrantName == nil { + invalidParams.Add(request.NewErrParamRequired("SnapshotCopyGrantName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type CreateSnapshotCopyGrantOutput struct { + _ struct{} `type:"structure"` + + // The snapshot copy grant that grants Amazon Redshift permission to encrypt + // copied snapshots with the specified customer master key (CMK) from AWS KMS + // in the destination region. + // + // For more information about managing snapshot copy grants, go to Amazon + // Redshift Database Encryption (http://docs.aws.amazon.com/redshift/latest/mgmt/working-with-db-encryption.html) + // in the Amazon Redshift Cluster Management Guide. + SnapshotCopyGrant *SnapshotCopyGrant `type:"structure"` +} + +// String returns the string representation +func (s CreateSnapshotCopyGrantOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateSnapshotCopyGrantOutput) GoString() string { + return s.String() +} + +// Contains the output from the CreateTags action. +type CreateTagsInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) to which you want to add the tag or tags. + // For example, arn:aws:redshift:us-east-1:123456789:cluster:t1. + ResourceName *string `type:"string" required:"true"` + + // One or more name/value pairs to add as tags to the specified resource. Each + // tag name is passed in with the parameter Key and the corresponding value + // is passed in with the parameter Value. The Key and Value parameters are separated + // by a comma (,). Separate multiple tags with a space. For example, --tags + // "Key"="owner","Value"="admin" "Key"="environment","Value"="test" "Key"="version","Value"="1.0". + Tags []*Tag `locationNameList:"Tag" type:"list" required:"true"` +} + +// String returns the string representation +func (s CreateTagsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateTagsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateTagsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateTagsInput"} + if s.ResourceName == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceName")) + } + if s.Tags == nil { + invalidParams.Add(request.NewErrParamRequired("Tags")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type CreateTagsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s CreateTagsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateTagsOutput) GoString() string { + return s.String() +} + +// Describes the default cluster parameters for a parameter group family. +type DefaultClusterParameters struct { + _ struct{} `type:"structure"` + + // A value that indicates the starting point for the next set of response records + // in a subsequent request. If a value is returned in a response, you can retrieve + // the next set of records by providing this returned marker value in the Marker + // parameter and retrying the command. If the Marker field is empty, all response + // records have been retrieved for the request. + Marker *string `type:"string"` + + // The name of the cluster parameter group family to which the engine default + // parameters apply. + ParameterGroupFamily *string `type:"string"` + + // The list of cluster default parameters. + Parameters []*Parameter `locationNameList:"Parameter" type:"list"` +} + +// String returns the string representation +func (s DefaultClusterParameters) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DefaultClusterParameters) GoString() string { + return s.String() +} + +type DeleteClusterInput struct { + _ struct{} `type:"structure"` + + // The identifier of the cluster to be deleted. + // + // Constraints: + // + // Must contain lowercase characters. Must contain from 1 to 63 alphanumeric + // characters or hyphens. First character must be a letter. Cannot end with + // a hyphen or contain two consecutive hyphens. + ClusterIdentifier *string `type:"string" required:"true"` + + // The identifier of the final snapshot that is to be created immediately before + // deleting the cluster. If this parameter is provided, SkipFinalClusterSnapshot + // must be false. + // + // Constraints: + // + // Must be 1 to 255 alphanumeric characters. First character must be a letter. + // Cannot end with a hyphen or contain two consecutive hyphens. + FinalClusterSnapshotIdentifier *string `type:"string"` + + // Determines whether a final snapshot of the cluster is created before Amazon + // Redshift deletes the cluster. If true, a final cluster snapshot is not created. + // If false, a final cluster snapshot is created before the cluster is deleted. + // + // The FinalClusterSnapshotIdentifier parameter must be specified if SkipFinalClusterSnapshot + // is false. Default: false + SkipFinalClusterSnapshot *bool `type:"boolean"` +} + +// String returns the string representation +func (s DeleteClusterInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteClusterInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteClusterInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteClusterInput"} + if s.ClusterIdentifier == nil { + invalidParams.Add(request.NewErrParamRequired("ClusterIdentifier")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteClusterOutput struct { + _ struct{} `type:"structure"` + + // Describes a cluster. + Cluster *Cluster `type:"structure"` +} + +// String returns the string representation +func (s DeleteClusterOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteClusterOutput) GoString() string { + return s.String() +} + +type DeleteClusterParameterGroupInput struct { + _ struct{} `type:"structure"` + + // The name of the parameter group to be deleted. + // + // Constraints: + // + // Must be the name of an existing cluster parameter group. Cannot delete + // a default cluster parameter group. + ParameterGroupName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteClusterParameterGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteClusterParameterGroupInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteClusterParameterGroupInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteClusterParameterGroupInput"} + if s.ParameterGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("ParameterGroupName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteClusterParameterGroupOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteClusterParameterGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteClusterParameterGroupOutput) GoString() string { + return s.String() +} + +type DeleteClusterSecurityGroupInput struct { + _ struct{} `type:"structure"` + + // The name of the cluster security group to be deleted. + ClusterSecurityGroupName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteClusterSecurityGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteClusterSecurityGroupInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteClusterSecurityGroupInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteClusterSecurityGroupInput"} + if s.ClusterSecurityGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("ClusterSecurityGroupName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteClusterSecurityGroupOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteClusterSecurityGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteClusterSecurityGroupOutput) GoString() string { + return s.String() +} + +type DeleteClusterSnapshotInput struct { + _ struct{} `type:"structure"` + + // The unique identifier of the cluster the snapshot was created from. This + // parameter is required if your IAM user has a policy containing a snapshot + // resource element that specifies anything other than * for the cluster name. + // + // Constraints: Must be the name of valid cluster. + SnapshotClusterIdentifier *string `type:"string"` + + // The unique identifier of the manual snapshot to be deleted. + // + // Constraints: Must be the name of an existing snapshot that is in the available + // state. + SnapshotIdentifier *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteClusterSnapshotInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteClusterSnapshotInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteClusterSnapshotInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteClusterSnapshotInput"} + if s.SnapshotIdentifier == nil { + invalidParams.Add(request.NewErrParamRequired("SnapshotIdentifier")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteClusterSnapshotOutput struct { + _ struct{} `type:"structure"` + + // Describes a snapshot. + Snapshot *Snapshot `type:"structure"` +} + +// String returns the string representation +func (s DeleteClusterSnapshotOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteClusterSnapshotOutput) GoString() string { + return s.String() +} + +type DeleteClusterSubnetGroupInput struct { + _ struct{} `type:"structure"` + + // The name of the cluster subnet group name to be deleted. + ClusterSubnetGroupName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteClusterSubnetGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteClusterSubnetGroupInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteClusterSubnetGroupInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteClusterSubnetGroupInput"} + if s.ClusterSubnetGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("ClusterSubnetGroupName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteClusterSubnetGroupOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteClusterSubnetGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteClusterSubnetGroupOutput) GoString() string { + return s.String() +} + +type DeleteEventSubscriptionInput struct { + _ struct{} `type:"structure"` + + // The name of the Amazon Redshift event notification subscription to be deleted. + SubscriptionName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteEventSubscriptionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteEventSubscriptionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteEventSubscriptionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteEventSubscriptionInput"} + if s.SubscriptionName == nil { + invalidParams.Add(request.NewErrParamRequired("SubscriptionName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteEventSubscriptionOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteEventSubscriptionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteEventSubscriptionOutput) GoString() string { + return s.String() +} + +type DeleteHsmClientCertificateInput struct { + _ struct{} `type:"structure"` + + // The identifier of the HSM client certificate to be deleted. + HsmClientCertificateIdentifier *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteHsmClientCertificateInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteHsmClientCertificateInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteHsmClientCertificateInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteHsmClientCertificateInput"} + if s.HsmClientCertificateIdentifier == nil { + invalidParams.Add(request.NewErrParamRequired("HsmClientCertificateIdentifier")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteHsmClientCertificateOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteHsmClientCertificateOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteHsmClientCertificateOutput) GoString() string { + return s.String() +} + +type DeleteHsmConfigurationInput struct { + _ struct{} `type:"structure"` + + // The identifier of the Amazon Redshift HSM configuration to be deleted. + HsmConfigurationIdentifier *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteHsmConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteHsmConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteHsmConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteHsmConfigurationInput"} + if s.HsmConfigurationIdentifier == nil { + invalidParams.Add(request.NewErrParamRequired("HsmConfigurationIdentifier")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteHsmConfigurationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteHsmConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteHsmConfigurationOutput) GoString() string { + return s.String() +} + +// The result of the DeleteSnapshotCopyGrant action. +type DeleteSnapshotCopyGrantInput struct { + _ struct{} `type:"structure"` + + // The name of the snapshot copy grant to delete. + SnapshotCopyGrantName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteSnapshotCopyGrantInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteSnapshotCopyGrantInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteSnapshotCopyGrantInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteSnapshotCopyGrantInput"} + if s.SnapshotCopyGrantName == nil { + invalidParams.Add(request.NewErrParamRequired("SnapshotCopyGrantName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteSnapshotCopyGrantOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteSnapshotCopyGrantOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteSnapshotCopyGrantOutput) GoString() string { + return s.String() +} + +// Contains the output from the DeleteTags action. +type DeleteTagsInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) from which you want to remove the tag or tags. + // For example, arn:aws:redshift:us-east-1:123456789:cluster:t1. + ResourceName *string `type:"string" required:"true"` + + // The tag key that you want to delete. + TagKeys []*string `locationNameList:"TagKey" type:"list" required:"true"` +} + +// String returns the string representation +func (s DeleteTagsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteTagsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteTagsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteTagsInput"} + if s.ResourceName == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceName")) + } + if s.TagKeys == nil { + invalidParams.Add(request.NewErrParamRequired("TagKeys")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteTagsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteTagsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteTagsOutput) GoString() string { + return s.String() +} + +type DescribeClusterParameterGroupsInput struct { + _ struct{} `type:"structure"` + + // An optional parameter that specifies the starting point to return a set of + // response records. When the results of a DescribeClusterParameterGroups request + // exceed the value specified in MaxRecords, AWS returns a value in the Marker + // field of the response. You can retrieve the next set of response records + // by providing the returned marker value in the Marker parameter and retrying + // the request. + Marker *string `type:"string"` + + // The maximum number of response records to return in each call. If the number + // of remaining response records exceeds the specified MaxRecords value, a value + // is returned in a marker field of the response. You can retrieve the next + // set of records by retrying the command with the returned marker value. + // + // Default: 100 + // + // Constraints: minimum 20, maximum 100. + MaxRecords *int64 `type:"integer"` + + // The name of a specific parameter group for which to return details. By default, + // details about all parameter groups and the default parameter group are returned. + ParameterGroupName *string `type:"string"` + + // A tag key or keys for which you want to return all matching cluster parameter + // groups that are associated with the specified key or keys. For example, suppose + // that you have parameter groups that are tagged with keys called owner and + // environment. If you specify both of these tag keys in the request, Amazon + // Redshift returns a response with the parameter groups that have either or + // both of these tag keys associated with them. + TagKeys []*string `locationNameList:"TagKey" type:"list"` + + // A tag value or values for which you want to return all matching cluster parameter + // groups that are associated with the specified tag value or values. For example, + // suppose that you have parameter groups that are tagged with values called + // admin and test. If you specify both of these tag values in the request, Amazon + // Redshift returns a response with the parameter groups that have either or + // both of these tag values associated with them. + TagValues []*string `locationNameList:"TagValue" type:"list"` +} + +// String returns the string representation +func (s DescribeClusterParameterGroupsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeClusterParameterGroupsInput) GoString() string { + return s.String() +} + +// Contains the output from the DescribeClusterParameterGroups action. +type DescribeClusterParameterGroupsOutput struct { + _ struct{} `type:"structure"` + + // A value that indicates the starting point for the next set of response records + // in a subsequent request. If a value is returned in a response, you can retrieve + // the next set of records by providing this returned marker value in the Marker + // parameter and retrying the command. If the Marker field is empty, all response + // records have been retrieved for the request. + Marker *string `type:"string"` + + // A list of ClusterParameterGroup instances. Each instance describes one cluster + // parameter group. + ParameterGroups []*ClusterParameterGroup `locationNameList:"ClusterParameterGroup" type:"list"` +} + +// String returns the string representation +func (s DescribeClusterParameterGroupsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeClusterParameterGroupsOutput) GoString() string { + return s.String() +} + +type DescribeClusterParametersInput struct { + _ struct{} `type:"structure"` + + // An optional parameter that specifies the starting point to return a set of + // response records. When the results of a DescribeClusterParameters request + // exceed the value specified in MaxRecords, AWS returns a value in the Marker + // field of the response. You can retrieve the next set of response records + // by providing the returned marker value in the Marker parameter and retrying + // the request. + Marker *string `type:"string"` + + // The maximum number of response records to return in each call. If the number + // of remaining response records exceeds the specified MaxRecords value, a value + // is returned in a marker field of the response. You can retrieve the next + // set of records by retrying the command with the returned marker value. + // + // Default: 100 + // + // Constraints: minimum 20, maximum 100. + MaxRecords *int64 `type:"integer"` + + // The name of a cluster parameter group for which to return details. + ParameterGroupName *string `type:"string" required:"true"` + + // The parameter types to return. Specify user to show parameters that are different + // form the default. Similarly, specify engine-default to show parameters that + // are the same as the default parameter group. + // + // Default: All parameter types returned. + // + // Valid Values: user | engine-default + Source *string `type:"string"` +} + +// String returns the string representation +func (s DescribeClusterParametersInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeClusterParametersInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeClusterParametersInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeClusterParametersInput"} + if s.ParameterGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("ParameterGroupName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the output from the DescribeClusterParameters action. +type DescribeClusterParametersOutput struct { + _ struct{} `type:"structure"` + + // A value that indicates the starting point for the next set of response records + // in a subsequent request. If a value is returned in a response, you can retrieve + // the next set of records by providing this returned marker value in the Marker + // parameter and retrying the command. If the Marker field is empty, all response + // records have been retrieved for the request. + Marker *string `type:"string"` + + // A list of Parameter instances. Each instance lists the parameters of one + // cluster parameter group. + Parameters []*Parameter `locationNameList:"Parameter" type:"list"` +} + +// String returns the string representation +func (s DescribeClusterParametersOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeClusterParametersOutput) GoString() string { + return s.String() +} + +type DescribeClusterSecurityGroupsInput struct { + _ struct{} `type:"structure"` + + // The name of a cluster security group for which you are requesting details. + // You can specify either the Marker parameter or a ClusterSecurityGroupName + // parameter, but not both. + // + // Example: securitygroup1 + ClusterSecurityGroupName *string `type:"string"` + + // An optional parameter that specifies the starting point to return a set of + // response records. When the results of a DescribeClusterSecurityGroups request + // exceed the value specified in MaxRecords, AWS returns a value in the Marker + // field of the response. You can retrieve the next set of response records + // by providing the returned marker value in the Marker parameter and retrying + // the request. + // + // Constraints: You can specify either the ClusterSecurityGroupName parameter + // or the Marker parameter, but not both. + Marker *string `type:"string"` + + // The maximum number of response records to return in each call. If the number + // of remaining response records exceeds the specified MaxRecords value, a value + // is returned in a marker field of the response. You can retrieve the next + // set of records by retrying the command with the returned marker value. + // + // Default: 100 + // + // Constraints: minimum 20, maximum 100. + MaxRecords *int64 `type:"integer"` + + // A tag key or keys for which you want to return all matching cluster security + // groups that are associated with the specified key or keys. For example, suppose + // that you have security groups that are tagged with keys called owner and + // environment. If you specify both of these tag keys in the request, Amazon + // Redshift returns a response with the security groups that have either or + // both of these tag keys associated with them. + TagKeys []*string `locationNameList:"TagKey" type:"list"` + + // A tag value or values for which you want to return all matching cluster security + // groups that are associated with the specified tag value or values. For example, + // suppose that you have security groups that are tagged with values called + // admin and test. If you specify both of these tag values in the request, Amazon + // Redshift returns a response with the security groups that have either or + // both of these tag values associated with them. + TagValues []*string `locationNameList:"TagValue" type:"list"` +} + +// String returns the string representation +func (s DescribeClusterSecurityGroupsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeClusterSecurityGroupsInput) GoString() string { + return s.String() +} + +type DescribeClusterSecurityGroupsOutput struct { + _ struct{} `type:"structure"` + + // A list of ClusterSecurityGroup instances. + ClusterSecurityGroups []*ClusterSecurityGroup `locationNameList:"ClusterSecurityGroup" type:"list"` + + // A value that indicates the starting point for the next set of response records + // in a subsequent request. If a value is returned in a response, you can retrieve + // the next set of records by providing this returned marker value in the Marker + // parameter and retrying the command. If the Marker field is empty, all response + // records have been retrieved for the request. + Marker *string `type:"string"` +} + +// String returns the string representation +func (s DescribeClusterSecurityGroupsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeClusterSecurityGroupsOutput) GoString() string { + return s.String() +} + +type DescribeClusterSnapshotsInput struct { + _ struct{} `type:"structure"` + + // The identifier of the cluster for which information about snapshots is requested. + ClusterIdentifier *string `type:"string"` + + // A time value that requests only snapshots created at or before the specified + // time. The time value is specified in ISO 8601 format. For more information + // about ISO 8601, go to the ISO8601 Wikipedia page. (http://en.wikipedia.org/wiki/ISO_8601) + // + // Example: 2012-07-16T18:00:00Z + EndTime *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // An optional parameter that specifies the starting point to return a set of + // response records. When the results of a DescribeClusterSnapshots request + // exceed the value specified in MaxRecords, AWS returns a value in the Marker + // field of the response. You can retrieve the next set of response records + // by providing the returned marker value in the Marker parameter and retrying + // the request. + Marker *string `type:"string"` + + // The maximum number of response records to return in each call. If the number + // of remaining response records exceeds the specified MaxRecords value, a value + // is returned in a marker field of the response. You can retrieve the next + // set of records by retrying the command with the returned marker value. + // + // Default: 100 + // + // Constraints: minimum 20, maximum 100. + MaxRecords *int64 `type:"integer"` + + // The AWS customer account used to create or copy the snapshot. Use this field + // to filter the results to snapshots owned by a particular account. To describe + // snapshots you own, either specify your AWS customer account, or do not specify + // the parameter. + OwnerAccount *string `type:"string"` + + // The snapshot identifier of the snapshot about which to return information. + SnapshotIdentifier *string `type:"string"` + + // The type of snapshots for which you are requesting information. By default, + // snapshots of all types are returned. + // + // Valid Values: automated | manual + SnapshotType *string `type:"string"` + + // A value that requests only snapshots created at or after the specified time. + // The time value is specified in ISO 8601 format. For more information about + // ISO 8601, go to the ISO8601 Wikipedia page. (http://en.wikipedia.org/wiki/ISO_8601) + // + // Example: 2012-07-16T18:00:00Z + StartTime *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // A tag key or keys for which you want to return all matching cluster snapshots + // that are associated with the specified key or keys. For example, suppose + // that you have snapshots that are tagged with keys called owner and environment. + // If you specify both of these tag keys in the request, Amazon Redshift returns + // a response with the snapshots that have either or both of these tag keys + // associated with them. + TagKeys []*string `locationNameList:"TagKey" type:"list"` + + // A tag value or values for which you want to return all matching cluster snapshots + // that are associated with the specified tag value or values. For example, + // suppose that you have snapshots that are tagged with values called admin + // and test. If you specify both of these tag values in the request, Amazon + // Redshift returns a response with the snapshots that have either or both of + // these tag values associated with them. + TagValues []*string `locationNameList:"TagValue" type:"list"` +} + +// String returns the string representation +func (s DescribeClusterSnapshotsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeClusterSnapshotsInput) GoString() string { + return s.String() +} + +// Contains the output from the DescribeClusterSnapshots action. +type DescribeClusterSnapshotsOutput struct { + _ struct{} `type:"structure"` + + // A value that indicates the starting point for the next set of response records + // in a subsequent request. If a value is returned in a response, you can retrieve + // the next set of records by providing this returned marker value in the Marker + // parameter and retrying the command. If the Marker field is empty, all response + // records have been retrieved for the request. + Marker *string `type:"string"` + + // A list of Snapshot instances. + Snapshots []*Snapshot `locationNameList:"Snapshot" type:"list"` +} + +// String returns the string representation +func (s DescribeClusterSnapshotsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeClusterSnapshotsOutput) GoString() string { + return s.String() +} + +type DescribeClusterSubnetGroupsInput struct { + _ struct{} `type:"structure"` + + // The name of the cluster subnet group for which information is requested. + ClusterSubnetGroupName *string `type:"string"` + + // An optional parameter that specifies the starting point to return a set of + // response records. When the results of a DescribeClusterSubnetGroups request + // exceed the value specified in MaxRecords, AWS returns a value in the Marker + // field of the response. You can retrieve the next set of response records + // by providing the returned marker value in the Marker parameter and retrying + // the request. + Marker *string `type:"string"` + + // The maximum number of response records to return in each call. If the number + // of remaining response records exceeds the specified MaxRecords value, a value + // is returned in a marker field of the response. You can retrieve the next + // set of records by retrying the command with the returned marker value. + // + // Default: 100 + // + // Constraints: minimum 20, maximum 100. + MaxRecords *int64 `type:"integer"` + + // A tag key or keys for which you want to return all matching cluster subnet + // groups that are associated with the specified key or keys. For example, suppose + // that you have subnet groups that are tagged with keys called owner and environment. + // If you specify both of these tag keys in the request, Amazon Redshift returns + // a response with the subnet groups that have either or both of these tag keys + // associated with them. + TagKeys []*string `locationNameList:"TagKey" type:"list"` + + // A tag value or values for which you want to return all matching cluster subnet + // groups that are associated with the specified tag value or values. For example, + // suppose that you have subnet groups that are tagged with values called admin + // and test. If you specify both of these tag values in the request, Amazon + // Redshift returns a response with the subnet groups that have either or both + // of these tag values associated with them. + TagValues []*string `locationNameList:"TagValue" type:"list"` +} + +// String returns the string representation +func (s DescribeClusterSubnetGroupsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeClusterSubnetGroupsInput) GoString() string { + return s.String() +} + +// Contains the output from the DescribeClusterSubnetGroups action. +type DescribeClusterSubnetGroupsOutput struct { + _ struct{} `type:"structure"` + + // A list of ClusterSubnetGroup instances. + ClusterSubnetGroups []*ClusterSubnetGroup `locationNameList:"ClusterSubnetGroup" type:"list"` + + // A value that indicates the starting point for the next set of response records + // in a subsequent request. If a value is returned in a response, you can retrieve + // the next set of records by providing this returned marker value in the Marker + // parameter and retrying the command. If the Marker field is empty, all response + // records have been retrieved for the request. + Marker *string `type:"string"` +} + +// String returns the string representation +func (s DescribeClusterSubnetGroupsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeClusterSubnetGroupsOutput) GoString() string { + return s.String() +} + +type DescribeClusterVersionsInput struct { + _ struct{} `type:"structure"` + + // The name of a specific cluster parameter group family to return details for. + // + // Constraints: + // + // Must be 1 to 255 alphanumeric characters First character must be a letter + // Cannot end with a hyphen or contain two consecutive hyphens + ClusterParameterGroupFamily *string `type:"string"` + + // The specific cluster version to return. + // + // Example: 1.0 + ClusterVersion *string `type:"string"` + + // An optional parameter that specifies the starting point to return a set of + // response records. When the results of a DescribeClusterVersions request exceed + // the value specified in MaxRecords, AWS returns a value in the Marker field + // of the response. You can retrieve the next set of response records by providing + // the returned marker value in the Marker parameter and retrying the request. + Marker *string `type:"string"` + + // The maximum number of response records to return in each call. If the number + // of remaining response records exceeds the specified MaxRecords value, a value + // is returned in a marker field of the response. You can retrieve the next + // set of records by retrying the command with the returned marker value. + // + // Default: 100 + // + // Constraints: minimum 20, maximum 100. + MaxRecords *int64 `type:"integer"` +} + +// String returns the string representation +func (s DescribeClusterVersionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeClusterVersionsInput) GoString() string { + return s.String() +} + +// Contains the output from the DescribeClusterVersions action. +type DescribeClusterVersionsOutput struct { + _ struct{} `type:"structure"` + + // A list of Version elements. + ClusterVersions []*ClusterVersion `locationNameList:"ClusterVersion" type:"list"` + + // A value that indicates the starting point for the next set of response records + // in a subsequent request. If a value is returned in a response, you can retrieve + // the next set of records by providing this returned marker value in the Marker + // parameter and retrying the command. If the Marker field is empty, all response + // records have been retrieved for the request. + Marker *string `type:"string"` +} + +// String returns the string representation +func (s DescribeClusterVersionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeClusterVersionsOutput) GoString() string { + return s.String() +} + +type DescribeClustersInput struct { + _ struct{} `type:"structure"` + + // The unique identifier of a cluster whose properties you are requesting. This + // parameter is case sensitive. + // + // The default is that all clusters defined for an account are returned. + ClusterIdentifier *string `type:"string"` + + // An optional parameter that specifies the starting point to return a set of + // response records. When the results of a DescribeClusters request exceed the + // value specified in MaxRecords, AWS returns a value in the Marker field of + // the response. You can retrieve the next set of response records by providing + // the returned marker value in the Marker parameter and retrying the request. + // + // Constraints: You can specify either the ClusterIdentifier parameter or + // the Marker parameter, but not both. + Marker *string `type:"string"` + + // The maximum number of response records to return in each call. If the number + // of remaining response records exceeds the specified MaxRecords value, a value + // is returned in a marker field of the response. You can retrieve the next + // set of records by retrying the command with the returned marker value. + // + // Default: 100 + // + // Constraints: minimum 20, maximum 100. + MaxRecords *int64 `type:"integer"` + + // A tag key or keys for which you want to return all matching clusters that + // are associated with the specified key or keys. For example, suppose that + // you have clusters that are tagged with keys called owner and environment. + // If you specify both of these tag keys in the request, Amazon Redshift returns + // a response with the clusters that have either or both of these tag keys associated + // with them. + TagKeys []*string `locationNameList:"TagKey" type:"list"` + + // A tag value or values for which you want to return all matching clusters + // that are associated with the specified tag value or values. For example, + // suppose that you have clusters that are tagged with values called admin and + // test. If you specify both of these tag values in the request, Amazon Redshift + // returns a response with the clusters that have either or both of these tag + // values associated with them. + TagValues []*string `locationNameList:"TagValue" type:"list"` +} + +// String returns the string representation +func (s DescribeClustersInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeClustersInput) GoString() string { + return s.String() +} + +// Contains the output from the DescribeClusters action. +type DescribeClustersOutput struct { + _ struct{} `type:"structure"` + + // A list of Cluster objects, where each object describes one cluster. + Clusters []*Cluster `locationNameList:"Cluster" type:"list"` + + // A value that indicates the starting point for the next set of response records + // in a subsequent request. If a value is returned in a response, you can retrieve + // the next set of records by providing this returned marker value in the Marker + // parameter and retrying the command. If the Marker field is empty, all response + // records have been retrieved for the request. + Marker *string `type:"string"` +} + +// String returns the string representation +func (s DescribeClustersOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeClustersOutput) GoString() string { + return s.String() +} + +type DescribeDefaultClusterParametersInput struct { + _ struct{} `type:"structure"` + + // An optional parameter that specifies the starting point to return a set of + // response records. When the results of a DescribeDefaultClusterParameters + // request exceed the value specified in MaxRecords, AWS returns a value in + // the Marker field of the response. You can retrieve the next set of response + // records by providing the returned marker value in the Marker parameter and + // retrying the request. + Marker *string `type:"string"` + + // The maximum number of response records to return in each call. If the number + // of remaining response records exceeds the specified MaxRecords value, a value + // is returned in a marker field of the response. You can retrieve the next + // set of records by retrying the command with the returned marker value. + // + // Default: 100 + // + // Constraints: minimum 20, maximum 100. + MaxRecords *int64 `type:"integer"` + + // The name of the cluster parameter group family. + ParameterGroupFamily *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeDefaultClusterParametersInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDefaultClusterParametersInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeDefaultClusterParametersInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeDefaultClusterParametersInput"} + if s.ParameterGroupFamily == nil { + invalidParams.Add(request.NewErrParamRequired("ParameterGroupFamily")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DescribeDefaultClusterParametersOutput struct { + _ struct{} `type:"structure"` + + // Describes the default cluster parameters for a parameter group family. + DefaultClusterParameters *DefaultClusterParameters `type:"structure"` +} + +// String returns the string representation +func (s DescribeDefaultClusterParametersOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDefaultClusterParametersOutput) GoString() string { + return s.String() +} + +type DescribeEventCategoriesInput struct { + _ struct{} `type:"structure"` + + // The source type, such as cluster or parameter group, to which the described + // event categories apply. + // + // Valid values: cluster, cluster-snapshot, cluster-parameter-group, and cluster-security-group. + SourceType *string `type:"string"` +} + +// String returns the string representation +func (s DescribeEventCategoriesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeEventCategoriesInput) GoString() string { + return s.String() +} + +type DescribeEventCategoriesOutput struct { + _ struct{} `type:"structure"` + + // A list of event categories descriptions. + EventCategoriesMapList []*EventCategoriesMap `locationNameList:"EventCategoriesMap" type:"list"` +} + +// String returns the string representation +func (s DescribeEventCategoriesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeEventCategoriesOutput) GoString() string { + return s.String() +} + +type DescribeEventSubscriptionsInput struct { + _ struct{} `type:"structure"` + + // An optional parameter that specifies the starting point to return a set of + // response records. When the results of a DescribeEventSubscriptions request + // exceed the value specified in MaxRecords, AWS returns a value in the Marker + // field of the response. You can retrieve the next set of response records + // by providing the returned marker value in the Marker parameter and retrying + // the request. + Marker *string `type:"string"` + + // The maximum number of response records to return in each call. If the number + // of remaining response records exceeds the specified MaxRecords value, a value + // is returned in a marker field of the response. You can retrieve the next + // set of records by retrying the command with the returned marker value. + // + // Default: 100 + // + // Constraints: minimum 20, maximum 100. + MaxRecords *int64 `type:"integer"` + + // The name of the Amazon Redshift event notification subscription to be described. + SubscriptionName *string `type:"string"` +} + +// String returns the string representation +func (s DescribeEventSubscriptionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeEventSubscriptionsInput) GoString() string { + return s.String() +} + +type DescribeEventSubscriptionsOutput struct { + _ struct{} `type:"structure"` + + // A list of event subscriptions. + EventSubscriptionsList []*EventSubscription `locationNameList:"EventSubscription" type:"list"` + + // A value that indicates the starting point for the next set of response records + // in a subsequent request. If a value is returned in a response, you can retrieve + // the next set of records by providing this returned marker value in the Marker + // parameter and retrying the command. If the Marker field is empty, all response + // records have been retrieved for the request. + Marker *string `type:"string"` +} + +// String returns the string representation +func (s DescribeEventSubscriptionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeEventSubscriptionsOutput) GoString() string { + return s.String() +} + +type DescribeEventsInput struct { + _ struct{} `type:"structure"` + + // The number of minutes prior to the time of the request for which to retrieve + // events. For example, if the request is sent at 18:00 and you specify a duration + // of 60, then only events which have occurred after 17:00 will be returned. + // + // Default: 60 + Duration *int64 `type:"integer"` + + // The end of the time interval for which to retrieve events, specified in ISO + // 8601 format. For more information about ISO 8601, go to the ISO8601 Wikipedia + // page. (http://en.wikipedia.org/wiki/ISO_8601) + // + // Example: 2009-07-08T18:00Z + EndTime *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // An optional parameter that specifies the starting point to return a set of + // response records. When the results of a DescribeEvents request exceed the + // value specified in MaxRecords, AWS returns a value in the Marker field of + // the response. You can retrieve the next set of response records by providing + // the returned marker value in the Marker parameter and retrying the request. + Marker *string `type:"string"` + + // The maximum number of response records to return in each call. If the number + // of remaining response records exceeds the specified MaxRecords value, a value + // is returned in a marker field of the response. You can retrieve the next + // set of records by retrying the command with the returned marker value. + // + // Default: 100 + // + // Constraints: minimum 20, maximum 100. + MaxRecords *int64 `type:"integer"` + + // The identifier of the event source for which events will be returned. If + // this parameter is not specified, then all sources are included in the response. + // + // Constraints: + // + // If SourceIdentifier is supplied, SourceType must also be provided. + // + // Specify a cluster identifier when SourceType is cluster. Specify a cluster + // security group name when SourceType is cluster-security-group. Specify a + // cluster parameter group name when SourceType is cluster-parameter-group. + // Specify a cluster snapshot identifier when SourceType is cluster-snapshot. + SourceIdentifier *string `type:"string"` + + // The event source to retrieve events for. If no value is specified, all events + // are returned. + // + // Constraints: + // + // If SourceType is supplied, SourceIdentifier must also be provided. + // + // Specify cluster when SourceIdentifier is a cluster identifier. Specify + // cluster-security-group when SourceIdentifier is a cluster security group + // name. Specify cluster-parameter-group when SourceIdentifier is a cluster + // parameter group name. Specify cluster-snapshot when SourceIdentifier is a + // cluster snapshot identifier. + SourceType *string `type:"string" enum:"SourceType"` + + // The beginning of the time interval to retrieve events for, specified in ISO + // 8601 format. For more information about ISO 8601, go to the ISO8601 Wikipedia + // page. (http://en.wikipedia.org/wiki/ISO_8601) + // + // Example: 2009-07-08T18:00Z + StartTime *time.Time `type:"timestamp" timestampFormat:"iso8601"` +} + +// String returns the string representation +func (s DescribeEventsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeEventsInput) GoString() string { + return s.String() +} + +type DescribeEventsOutput struct { + _ struct{} `type:"structure"` + + // A list of Event instances. + Events []*Event `locationNameList:"Event" type:"list"` + + // A value that indicates the starting point for the next set of response records + // in a subsequent request. If a value is returned in a response, you can retrieve + // the next set of records by providing this returned marker value in the Marker + // parameter and retrying the command. If the Marker field is empty, all response + // records have been retrieved for the request. + Marker *string `type:"string"` +} + +// String returns the string representation +func (s DescribeEventsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeEventsOutput) GoString() string { + return s.String() +} + +type DescribeHsmClientCertificatesInput struct { + _ struct{} `type:"structure"` + + // The identifier of a specific HSM client certificate for which you want information. + // If no identifier is specified, information is returned for all HSM client + // certificates owned by your AWS customer account. + HsmClientCertificateIdentifier *string `type:"string"` + + // An optional parameter that specifies the starting point to return a set of + // response records. When the results of a DescribeHsmClientCertificates request + // exceed the value specified in MaxRecords, AWS returns a value in the Marker + // field of the response. You can retrieve the next set of response records + // by providing the returned marker value in the Marker parameter and retrying + // the request. + Marker *string `type:"string"` + + // The maximum number of response records to return in each call. If the number + // of remaining response records exceeds the specified MaxRecords value, a value + // is returned in a marker field of the response. You can retrieve the next + // set of records by retrying the command with the returned marker value. + // + // Default: 100 + // + // Constraints: minimum 20, maximum 100. + MaxRecords *int64 `type:"integer"` + + // A tag key or keys for which you want to return all matching HSM client certificates + // that are associated with the specified key or keys. For example, suppose + // that you have HSM client certificates that are tagged with keys called owner + // and environment. If you specify both of these tag keys in the request, Amazon + // Redshift returns a response with the HSM client certificates that have either + // or both of these tag keys associated with them. + TagKeys []*string `locationNameList:"TagKey" type:"list"` + + // A tag value or values for which you want to return all matching HSM client + // certificates that are associated with the specified tag value or values. + // For example, suppose that you have HSM client certificates that are tagged + // with values called admin and test. If you specify both of these tag values + // in the request, Amazon Redshift returns a response with the HSM client certificates + // that have either or both of these tag values associated with them. + TagValues []*string `locationNameList:"TagValue" type:"list"` +} + +// String returns the string representation +func (s DescribeHsmClientCertificatesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeHsmClientCertificatesInput) GoString() string { + return s.String() +} + +type DescribeHsmClientCertificatesOutput struct { + _ struct{} `type:"structure"` + + // A list of the identifiers for one or more HSM client certificates used by + // Amazon Redshift clusters to store and retrieve database encryption keys in + // an HSM. + HsmClientCertificates []*HsmClientCertificate `locationNameList:"HsmClientCertificate" type:"list"` + + // A value that indicates the starting point for the next set of response records + // in a subsequent request. If a value is returned in a response, you can retrieve + // the next set of records by providing this returned marker value in the Marker + // parameter and retrying the command. If the Marker field is empty, all response + // records have been retrieved for the request. + Marker *string `type:"string"` +} + +// String returns the string representation +func (s DescribeHsmClientCertificatesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeHsmClientCertificatesOutput) GoString() string { + return s.String() +} + +type DescribeHsmConfigurationsInput struct { + _ struct{} `type:"structure"` + + // The identifier of a specific Amazon Redshift HSM configuration to be described. + // If no identifier is specified, information is returned for all HSM configurations + // owned by your AWS customer account. + HsmConfigurationIdentifier *string `type:"string"` + + // An optional parameter that specifies the starting point to return a set of + // response records. When the results of a DescribeHsmConfigurations request + // exceed the value specified in MaxRecords, AWS returns a value in the Marker + // field of the response. You can retrieve the next set of response records + // by providing the returned marker value in the Marker parameter and retrying + // the request. + Marker *string `type:"string"` + + // The maximum number of response records to return in each call. If the number + // of remaining response records exceeds the specified MaxRecords value, a value + // is returned in a marker field of the response. You can retrieve the next + // set of records by retrying the command with the returned marker value. + // + // Default: 100 + // + // Constraints: minimum 20, maximum 100. + MaxRecords *int64 `type:"integer"` + + // A tag key or keys for which you want to return all matching HSM configurations + // that are associated with the specified key or keys. For example, suppose + // that you have HSM configurations that are tagged with keys called owner and + // environment. If you specify both of these tag keys in the request, Amazon + // Redshift returns a response with the HSM configurations that have either + // or both of these tag keys associated with them. + TagKeys []*string `locationNameList:"TagKey" type:"list"` + + // A tag value or values for which you want to return all matching HSM configurations + // that are associated with the specified tag value or values. For example, + // suppose that you have HSM configurations that are tagged with values called + // admin and test. If you specify both of these tag values in the request, Amazon + // Redshift returns a response with the HSM configurations that have either + // or both of these tag values associated with them. + TagValues []*string `locationNameList:"TagValue" type:"list"` +} + +// String returns the string representation +func (s DescribeHsmConfigurationsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeHsmConfigurationsInput) GoString() string { + return s.String() +} + +type DescribeHsmConfigurationsOutput struct { + _ struct{} `type:"structure"` + + // A list of HsmConfiguration objects. + HsmConfigurations []*HsmConfiguration `locationNameList:"HsmConfiguration" type:"list"` + + // A value that indicates the starting point for the next set of response records + // in a subsequent request. If a value is returned in a response, you can retrieve + // the next set of records by providing this returned marker value in the Marker + // parameter and retrying the command. If the Marker field is empty, all response + // records have been retrieved for the request. + Marker *string `type:"string"` +} + +// String returns the string representation +func (s DescribeHsmConfigurationsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeHsmConfigurationsOutput) GoString() string { + return s.String() +} + +type DescribeLoggingStatusInput struct { + _ struct{} `type:"structure"` + + // The identifier of the cluster from which to get the logging status. + // + // Example: examplecluster + ClusterIdentifier *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeLoggingStatusInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeLoggingStatusInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeLoggingStatusInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeLoggingStatusInput"} + if s.ClusterIdentifier == nil { + invalidParams.Add(request.NewErrParamRequired("ClusterIdentifier")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DescribeOrderableClusterOptionsInput struct { + _ struct{} `type:"structure"` + + // The version filter value. Specify this parameter to show only the available + // offerings matching the specified version. + // + // Default: All versions. + // + // Constraints: Must be one of the version returned from DescribeClusterVersions. + ClusterVersion *string `type:"string"` + + // An optional parameter that specifies the starting point to return a set of + // response records. When the results of a DescribeOrderableClusterOptions request + // exceed the value specified in MaxRecords, AWS returns a value in the Marker + // field of the response. You can retrieve the next set of response records + // by providing the returned marker value in the Marker parameter and retrying + // the request. + Marker *string `type:"string"` + + // The maximum number of response records to return in each call. If the number + // of remaining response records exceeds the specified MaxRecords value, a value + // is returned in a marker field of the response. You can retrieve the next + // set of records by retrying the command with the returned marker value. + // + // Default: 100 + // + // Constraints: minimum 20, maximum 100. + MaxRecords *int64 `type:"integer"` + + // The node type filter value. Specify this parameter to show only the available + // offerings matching the specified node type. + NodeType *string `type:"string"` +} + +// String returns the string representation +func (s DescribeOrderableClusterOptionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeOrderableClusterOptionsInput) GoString() string { + return s.String() +} + +// Contains the output from the DescribeOrderableClusterOptions action. +type DescribeOrderableClusterOptionsOutput struct { + _ struct{} `type:"structure"` + + // A value that indicates the starting point for the next set of response records + // in a subsequent request. If a value is returned in a response, you can retrieve + // the next set of records by providing this returned marker value in the Marker + // parameter and retrying the command. If the Marker field is empty, all response + // records have been retrieved for the request. + Marker *string `type:"string"` + + // An OrderableClusterOption structure containing information about orderable + // options for the cluster. + OrderableClusterOptions []*OrderableClusterOption `locationNameList:"OrderableClusterOption" type:"list"` +} + +// String returns the string representation +func (s DescribeOrderableClusterOptionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeOrderableClusterOptionsOutput) GoString() string { + return s.String() +} + +type DescribeReservedNodeOfferingsInput struct { + _ struct{} `type:"structure"` + + // An optional parameter that specifies the starting point to return a set of + // response records. When the results of a DescribeReservedNodeOfferings request + // exceed the value specified in MaxRecords, AWS returns a value in the Marker + // field of the response. You can retrieve the next set of response records + // by providing the returned marker value in the Marker parameter and retrying + // the request. + Marker *string `type:"string"` + + // The maximum number of response records to return in each call. If the number + // of remaining response records exceeds the specified MaxRecords value, a value + // is returned in a marker field of the response. You can retrieve the next + // set of records by retrying the command with the returned marker value. + // + // Default: 100 + // + // Constraints: minimum 20, maximum 100. + MaxRecords *int64 `type:"integer"` + + // The unique identifier for the offering. + ReservedNodeOfferingId *string `type:"string"` +} + +// String returns the string representation +func (s DescribeReservedNodeOfferingsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeReservedNodeOfferingsInput) GoString() string { + return s.String() +} + +type DescribeReservedNodeOfferingsOutput struct { + _ struct{} `type:"structure"` + + // A value that indicates the starting point for the next set of response records + // in a subsequent request. If a value is returned in a response, you can retrieve + // the next set of records by providing this returned marker value in the Marker + // parameter and retrying the command. If the Marker field is empty, all response + // records have been retrieved for the request. + Marker *string `type:"string"` + + // A list of ReservedNodeOffering objects. + ReservedNodeOfferings []*ReservedNodeOffering `locationNameList:"ReservedNodeOffering" type:"list"` +} + +// String returns the string representation +func (s DescribeReservedNodeOfferingsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeReservedNodeOfferingsOutput) GoString() string { + return s.String() +} + +type DescribeReservedNodesInput struct { + _ struct{} `type:"structure"` + + // An optional parameter that specifies the starting point to return a set of + // response records. When the results of a DescribeReservedNodes request exceed + // the value specified in MaxRecords, AWS returns a value in the Marker field + // of the response. You can retrieve the next set of response records by providing + // the returned marker value in the Marker parameter and retrying the request. + Marker *string `type:"string"` + + // The maximum number of response records to return in each call. If the number + // of remaining response records exceeds the specified MaxRecords value, a value + // is returned in a marker field of the response. You can retrieve the next + // set of records by retrying the command with the returned marker value. + // + // Default: 100 + // + // Constraints: minimum 20, maximum 100. + MaxRecords *int64 `type:"integer"` + + // Identifier for the node reservation. + ReservedNodeId *string `type:"string"` +} + +// String returns the string representation +func (s DescribeReservedNodesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeReservedNodesInput) GoString() string { + return s.String() +} + +type DescribeReservedNodesOutput struct { + _ struct{} `type:"structure"` + + // A value that indicates the starting point for the next set of response records + // in a subsequent request. If a value is returned in a response, you can retrieve + // the next set of records by providing this returned marker value in the Marker + // parameter and retrying the command. If the Marker field is empty, all response + // records have been retrieved for the request. + Marker *string `type:"string"` + + // The list of ReservedNode objects. + ReservedNodes []*ReservedNode `locationNameList:"ReservedNode" type:"list"` +} + +// String returns the string representation +func (s DescribeReservedNodesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeReservedNodesOutput) GoString() string { + return s.String() +} + +type DescribeResizeInput struct { + _ struct{} `type:"structure"` + + // The unique identifier of a cluster whose resize progress you are requesting. + // This parameter is case-sensitive. + // + // By default, resize operations for all clusters defined for an AWS account + // are returned. + ClusterIdentifier *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeResizeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeResizeInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeResizeInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeResizeInput"} + if s.ClusterIdentifier == nil { + invalidParams.Add(request.NewErrParamRequired("ClusterIdentifier")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Describes the result of a cluster resize operation. +type DescribeResizeOutput struct { + _ struct{} `type:"structure"` + + // The average rate of the resize operation over the last few minutes, measured + // in megabytes per second. After the resize operation completes, this value + // shows the average rate of the entire resize operation. + AvgResizeRateInMegaBytesPerSecond *float64 `type:"double"` + + // The amount of seconds that have elapsed since the resize operation began. + // After the resize operation completes, this value shows the total actual time, + // in seconds, for the resize operation. + ElapsedTimeInSeconds *int64 `type:"long"` + + // The estimated time remaining, in seconds, until the resize operation is complete. + // This value is calculated based on the average resize rate and the estimated + // amount of data remaining to be processed. Once the resize operation is complete, + // this value will be 0. + EstimatedTimeToCompletionInSeconds *int64 `type:"long"` + + // The names of tables that have been completely imported . + // + // Valid Values: List of table names. + ImportTablesCompleted []*string `type:"list"` + + // The names of tables that are being currently imported. + // + // Valid Values: List of table names. + ImportTablesInProgress []*string `type:"list"` + + // The names of tables that have not been yet imported. + // + // Valid Values: List of table names + ImportTablesNotStarted []*string `type:"list"` + + // While the resize operation is in progress, this value shows the current amount + // of data, in megabytes, that has been processed so far. When the resize operation + // is complete, this value shows the total amount of data, in megabytes, on + // the cluster, which may be more or less than TotalResizeDataInMegaBytes (the + // estimated total amount of data before resize). + ProgressInMegaBytes *int64 `type:"long"` + + // The status of the resize operation. + // + // Valid Values: NONE | IN_PROGRESS | FAILED | SUCCEEDED + Status *string `type:"string"` + + // The cluster type after the resize operation is complete. + // + // Valid Values: multi-node | single-node + TargetClusterType *string `type:"string"` + + // The node type that the cluster will have after the resize operation is complete. + TargetNodeType *string `type:"string"` + + // The number of nodes that the cluster will have after the resize operation + // is complete. + TargetNumberOfNodes *int64 `type:"integer"` + + // The estimated total amount of data, in megabytes, on the cluster before the + // resize operation began. + TotalResizeDataInMegaBytes *int64 `type:"long"` +} + +// String returns the string representation +func (s DescribeResizeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeResizeOutput) GoString() string { + return s.String() +} + +// The result of the DescribeSnapshotCopyGrants action. +type DescribeSnapshotCopyGrantsInput struct { + _ struct{} `type:"structure"` + + // An optional parameter that specifies the starting point to return a set of + // response records. When the results of a DescribeSnapshotCopyGrant request + // exceed the value specified in MaxRecords, AWS returns a value in the Marker + // field of the response. You can retrieve the next set of response records + // by providing the returned marker value in the Marker parameter and retrying + // the request. + // + // Constraints: You can specify either the SnapshotCopyGrantName parameter + // or the Marker parameter, but not both. + Marker *string `type:"string"` + + // The maximum number of response records to return in each call. If the number + // of remaining response records exceeds the specified MaxRecords value, a value + // is returned in a marker field of the response. You can retrieve the next + // set of records by retrying the command with the returned marker value. + // + // Default: 100 + // + // Constraints: minimum 20, maximum 100. + MaxRecords *int64 `type:"integer"` + + // The name of the snapshot copy grant. + SnapshotCopyGrantName *string `type:"string"` + + // A tag key or keys for which you want to return all matching resources that + // are associated with the specified key or keys. For example, suppose that + // you have resources tagged with keys called owner and environment. If you + // specify both of these tag keys in the request, Amazon Redshift returns a + // response with all resources that have either or both of these tag keys associated + // with them. + TagKeys []*string `locationNameList:"TagKey" type:"list"` + + // A tag value or values for which you want to return all matching resources + // that are associated with the specified value or values. For example, suppose + // that you have resources tagged with values called admin and test. If you + // specify both of these tag values in the request, Amazon Redshift returns + // a response with all resources that have either or both of these tag values + // associated with them. + TagValues []*string `locationNameList:"TagValue" type:"list"` +} + +// String returns the string representation +func (s DescribeSnapshotCopyGrantsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeSnapshotCopyGrantsInput) GoString() string { + return s.String() +} + +type DescribeSnapshotCopyGrantsOutput struct { + _ struct{} `type:"structure"` + + // An optional parameter that specifies the starting point to return a set of + // response records. When the results of a DescribeSnapshotCopyGrant request + // exceed the value specified in MaxRecords, AWS returns a value in the Marker + // field of the response. You can retrieve the next set of response records + // by providing the returned marker value in the Marker parameter and retrying + // the request. + // + // Constraints: You can specify either the SnapshotCopyGrantName parameter + // or the Marker parameter, but not both. + Marker *string `type:"string"` + + // The list of SnapshotCopyGrant objects. + SnapshotCopyGrants []*SnapshotCopyGrant `locationNameList:"SnapshotCopyGrant" type:"list"` +} + +// String returns the string representation +func (s DescribeSnapshotCopyGrantsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeSnapshotCopyGrantsOutput) GoString() string { + return s.String() +} + +type DescribeTableRestoreStatusInput struct { + _ struct{} `type:"structure"` + + // The Amazon Redshift cluster that the table is being restored to. + ClusterIdentifier *string `type:"string"` + + // An optional pagination token provided by a previous DescribeTableRestoreStatus + // request. If this parameter is specified, the response includes only records + // beyond the marker, up to the value specified by the MaxRecords parameter. + Marker *string `type:"string"` + + // The maximum number of records to include in the response. If more records + // exist than the specified MaxRecords value, a pagination token called a marker + // is included in the response so that the remaining results can be retrieved. + MaxRecords *int64 `type:"integer"` + + // The identifier of the table restore request to return status for. If you + // don't specify a TableRestoreRequestId value, then DescribeTableRestoreStatus + // returns the status of all in-progress table restore requests. + TableRestoreRequestId *string `type:"string"` +} + +// String returns the string representation +func (s DescribeTableRestoreStatusInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeTableRestoreStatusInput) GoString() string { + return s.String() +} + +type DescribeTableRestoreStatusOutput struct { + _ struct{} `type:"structure"` + + // A pagination token that can be used in a subsequent DescribeTableRestoreStatus + // request. + Marker *string `type:"string"` + + // A list of status details for one or more table restore requests. + TableRestoreStatusDetails []*TableRestoreStatus `locationNameList:"TableRestoreStatus" type:"list"` +} + +// String returns the string representation +func (s DescribeTableRestoreStatusOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeTableRestoreStatusOutput) GoString() string { + return s.String() +} + +type DescribeTagsInput struct { + _ struct{} `type:"structure"` + + // A value that indicates the starting point for the next set of response records + // in a subsequent request. If a value is returned in a response, you can retrieve + // the next set of records by providing this returned marker value in the marker + // parameter and retrying the command. If the marker field is empty, all response + // records have been retrieved for the request. + Marker *string `type:"string"` + + // The maximum number or response records to return in each call. If the number + // of remaining response records exceeds the specified MaxRecords value, a value + // is returned in a marker field of the response. You can retrieve the next + // set of records by retrying the command with the returned marker value. + MaxRecords *int64 `type:"integer"` + + // The Amazon Resource Name (ARN) for which you want to describe the tag or + // tags. For example, arn:aws:redshift:us-east-1:123456789:cluster:t1. + ResourceName *string `type:"string"` + + // The type of resource with which you want to view tags. Valid resource types + // are: Cluster CIDR/IP EC2 security group Snapshot Cluster security group + // Subnet group HSM connection HSM certificate Parameter group Snapshot copy + // grant + // + // For more information about Amazon Redshift resource types and constructing + // ARNs, go to Constructing an Amazon Redshift Amazon Resource Name (ARN) (http://docs.aws.amazon.com/redshift/latest/mgmt/constructing-redshift-arn.html) + // in the Amazon Redshift Cluster Management Guide. + ResourceType *string `type:"string"` + + // A tag key or keys for which you want to return all matching resources that + // are associated with the specified key or keys. For example, suppose that + // you have resources tagged with keys called owner and environment. If you + // specify both of these tag keys in the request, Amazon Redshift returns a + // response with all resources that have either or both of these tag keys associated + // with them. + TagKeys []*string `locationNameList:"TagKey" type:"list"` + + // A tag value or values for which you want to return all matching resources + // that are associated with the specified value or values. For example, suppose + // that you have resources tagged with values called admin and test. If you + // specify both of these tag values in the request, Amazon Redshift returns + // a response with all resources that have either or both of these tag values + // associated with them. + TagValues []*string `locationNameList:"TagValue" type:"list"` +} + +// String returns the string representation +func (s DescribeTagsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeTagsInput) GoString() string { + return s.String() +} + +type DescribeTagsOutput struct { + _ struct{} `type:"structure"` + + // A value that indicates the starting point for the next set of response records + // in a subsequent request. If a value is returned in a response, you can retrieve + // the next set of records by providing this returned marker value in the Marker + // parameter and retrying the command. If the Marker field is empty, all response + // records have been retrieved for the request. + Marker *string `type:"string"` + + // A list of tags with their associated resources. + TaggedResources []*TaggedResource `locationNameList:"TaggedResource" type:"list"` +} + +// String returns the string representation +func (s DescribeTagsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeTagsOutput) GoString() string { + return s.String() +} + +type DisableLoggingInput struct { + _ struct{} `type:"structure"` + + // The identifier of the cluster on which logging is to be stopped. + // + // Example: examplecluster + ClusterIdentifier *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DisableLoggingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisableLoggingInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DisableLoggingInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DisableLoggingInput"} + if s.ClusterIdentifier == nil { + invalidParams.Add(request.NewErrParamRequired("ClusterIdentifier")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DisableSnapshotCopyInput struct { + _ struct{} `type:"structure"` + + // The unique identifier of the source cluster that you want to disable copying + // of snapshots to a destination region. + // + // Constraints: Must be the valid name of an existing cluster that has cross-region + // snapshot copy enabled. + ClusterIdentifier *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DisableSnapshotCopyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisableSnapshotCopyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DisableSnapshotCopyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DisableSnapshotCopyInput"} + if s.ClusterIdentifier == nil { + invalidParams.Add(request.NewErrParamRequired("ClusterIdentifier")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DisableSnapshotCopyOutput struct { + _ struct{} `type:"structure"` + + // Describes a cluster. + Cluster *Cluster `type:"structure"` +} + +// String returns the string representation +func (s DisableSnapshotCopyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisableSnapshotCopyOutput) GoString() string { + return s.String() +} + +// Describes an Amazon EC2 security group. +type EC2SecurityGroup struct { + _ struct{} `type:"structure"` + + // The name of the EC2 Security Group. + EC2SecurityGroupName *string `type:"string"` + + // The AWS ID of the owner of the EC2 security group specified in the EC2SecurityGroupName + // field. + EC2SecurityGroupOwnerId *string `type:"string"` + + // The status of the EC2 security group. + Status *string `type:"string"` + + // The list of tags for the EC2 security group. + Tags []*Tag `locationNameList:"Tag" type:"list"` +} + +// String returns the string representation +func (s EC2SecurityGroup) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EC2SecurityGroup) GoString() string { + return s.String() +} + +// Describes the status of the elastic IP (EIP) address. +type ElasticIpStatus struct { + _ struct{} `type:"structure"` + + // The elastic IP (EIP) address for the cluster. + ElasticIp *string `type:"string"` + + // The status of the elastic IP (EIP) address. + Status *string `type:"string"` +} + +// String returns the string representation +func (s ElasticIpStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ElasticIpStatus) GoString() string { + return s.String() +} + +type EnableLoggingInput struct { + _ struct{} `type:"structure"` + + // The name of an existing S3 bucket where the log files are to be stored. + // + // Constraints: + // + // Must be in the same region as the cluster The cluster must have read bucket + // and put object permissions + BucketName *string `type:"string" required:"true"` + + // The identifier of the cluster on which logging is to be started. + // + // Example: examplecluster + ClusterIdentifier *string `type:"string" required:"true"` + + // The prefix applied to the log file names. + // + // Constraints: + // + // Cannot exceed 512 characters Cannot contain spaces( ), double quotes ("), + // single quotes ('), a backslash (\), or control characters. The hexadecimal + // codes for invalid characters are: x00 to x20 x22 x27 x5c x7f or larger + S3KeyPrefix *string `type:"string"` +} + +// String returns the string representation +func (s EnableLoggingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnableLoggingInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *EnableLoggingInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "EnableLoggingInput"} + if s.BucketName == nil { + invalidParams.Add(request.NewErrParamRequired("BucketName")) + } + if s.ClusterIdentifier == nil { + invalidParams.Add(request.NewErrParamRequired("ClusterIdentifier")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type EnableSnapshotCopyInput struct { + _ struct{} `type:"structure"` + + // The unique identifier of the source cluster to copy snapshots from. + // + // Constraints: Must be the valid name of an existing cluster that does not + // already have cross-region snapshot copy enabled. + ClusterIdentifier *string `type:"string" required:"true"` + + // The destination region that you want to copy snapshots to. + // + // Constraints: Must be the name of a valid region. For more information, + // see Regions and Endpoints (http://docs.aws.amazon.com/general/latest/gr/rande.html#redshift_region) + // in the Amazon Web Services General Reference. + DestinationRegion *string `type:"string" required:"true"` + + // The number of days to retain automated snapshots in the destination region + // after they are copied from the source region. + // + // Default: 7. + // + // Constraints: Must be at least 1 and no more than 35. + RetentionPeriod *int64 `type:"integer"` + + // The name of the snapshot copy grant to use when snapshots of an AWS KMS-encrypted + // cluster are copied to the destination region. + SnapshotCopyGrantName *string `type:"string"` +} + +// String returns the string representation +func (s EnableSnapshotCopyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnableSnapshotCopyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *EnableSnapshotCopyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "EnableSnapshotCopyInput"} + if s.ClusterIdentifier == nil { + invalidParams.Add(request.NewErrParamRequired("ClusterIdentifier")) + } + if s.DestinationRegion == nil { + invalidParams.Add(request.NewErrParamRequired("DestinationRegion")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type EnableSnapshotCopyOutput struct { + _ struct{} `type:"structure"` + + // Describes a cluster. + Cluster *Cluster `type:"structure"` +} + +// String returns the string representation +func (s EnableSnapshotCopyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnableSnapshotCopyOutput) GoString() string { + return s.String() +} + +// Describes a connection endpoint. +type Endpoint struct { + _ struct{} `type:"structure"` + + // The DNS address of the Cluster. + Address *string `type:"string"` + + // The port that the database engine is listening on. + Port *int64 `type:"integer"` +} + +// String returns the string representation +func (s Endpoint) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Endpoint) GoString() string { + return s.String() +} + +// Describes an event. +type Event struct { + _ struct{} `type:"structure"` + + // The date and time of the event. + Date *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // A list of the event categories. + // + // Values: Configuration, Management, Monitoring, Security + EventCategories []*string `locationNameList:"EventCategory" type:"list"` + + // The identifier of the event. + EventId *string `type:"string"` + + // The text of this event. + Message *string `type:"string"` + + // The severity of the event. + // + // Values: ERROR, INFO + Severity *string `type:"string"` + + // The identifier for the source of the event. + SourceIdentifier *string `type:"string"` + + // The source type for this event. + SourceType *string `type:"string" enum:"SourceType"` +} + +// String returns the string representation +func (s Event) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Event) GoString() string { + return s.String() +} + +// Describes event categories. +type EventCategoriesMap struct { + _ struct{} `type:"structure"` + + // The events in the event category. + Events []*EventInfoMap `locationNameList:"EventInfoMap" type:"list"` + + // The source type, such as cluster or cluster-snapshot, that the returned categories + // belong to. + SourceType *string `type:"string"` +} + +// String returns the string representation +func (s EventCategoriesMap) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EventCategoriesMap) GoString() string { + return s.String() +} + +// Describes event information. +type EventInfoMap struct { + _ struct{} `type:"structure"` + + // The category of an Amazon Redshift event. + EventCategories []*string `locationNameList:"EventCategory" type:"list"` + + // The description of an Amazon Redshift event. + EventDescription *string `type:"string"` + + // The identifier of an Amazon Redshift event. + EventId *string `type:"string"` + + // The severity of the event. + // + // Values: ERROR, INFO + Severity *string `type:"string"` +} + +// String returns the string representation +func (s EventInfoMap) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EventInfoMap) GoString() string { + return s.String() +} + +// Describes event subscriptions. +type EventSubscription struct { + _ struct{} `type:"structure"` + + // The name of the Amazon Redshift event notification subscription. + CustSubscriptionId *string `type:"string"` + + // The AWS customer account associated with the Amazon Redshift event notification + // subscription. + CustomerAwsId *string `type:"string"` + + // A Boolean value indicating whether the subscription is enabled. true indicates + // the subscription is enabled. + Enabled *bool `type:"boolean"` + + // The list of Amazon Redshift event categories specified in the event notification + // subscription. + // + // Values: Configuration, Management, Monitoring, Security + EventCategoriesList []*string `locationNameList:"EventCategory" type:"list"` + + // The event severity specified in the Amazon Redshift event notification subscription. + // + // Values: ERROR, INFO + Severity *string `type:"string"` + + // The Amazon Resource Name (ARN) of the Amazon SNS topic used by the event + // notification subscription. + SnsTopicArn *string `type:"string"` + + // A list of the sources that publish events to the Amazon Redshift event notification + // subscription. + SourceIdsList []*string `locationNameList:"SourceId" type:"list"` + + // The source type of the events returned the Amazon Redshift event notification, + // such as cluster, or cluster-snapshot. + SourceType *string `type:"string"` + + // The status of the Amazon Redshift event notification subscription. + // + // Constraints: + // + // Can be one of the following: active | no-permission | topic-not-exist The + // status "no-permission" indicates that Amazon Redshift no longer has permission + // to post to the Amazon SNS topic. The status "topic-not-exist" indicates that + // the topic was deleted after the subscription was created. + Status *string `type:"string"` + + // The date and time the Amazon Redshift event notification subscription was + // created. + SubscriptionCreationTime *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The list of tags for the event subscription. + Tags []*Tag `locationNameList:"Tag" type:"list"` +} + +// String returns the string representation +func (s EventSubscription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EventSubscription) GoString() string { + return s.String() +} + +// Returns information about an HSM client certificate. The certificate is stored +// in a secure Hardware Storage Module (HSM), and used by the Amazon Redshift +// cluster to encrypt data files. +type HsmClientCertificate struct { + _ struct{} `type:"structure"` + + // The identifier of the HSM client certificate. + HsmClientCertificateIdentifier *string `type:"string"` + + // The public key that the Amazon Redshift cluster will use to connect to the + // HSM. You must register the public key in the HSM. + HsmClientCertificatePublicKey *string `type:"string"` + + // The list of tags for the HSM client certificate. + Tags []*Tag `locationNameList:"Tag" type:"list"` +} + +// String returns the string representation +func (s HsmClientCertificate) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HsmClientCertificate) GoString() string { + return s.String() +} + +// Returns information about an HSM configuration, which is an object that describes +// to Amazon Redshift clusters the information they require to connect to an +// HSM where they can store database encryption keys. +type HsmConfiguration struct { + _ struct{} `type:"structure"` + + // A text description of the HSM configuration. + Description *string `type:"string"` + + // The name of the Amazon Redshift HSM configuration. + HsmConfigurationIdentifier *string `type:"string"` + + // The IP address that the Amazon Redshift cluster must use to access the HSM. + HsmIpAddress *string `type:"string"` + + // The name of the partition in the HSM where the Amazon Redshift clusters will + // store their database encryption keys. + HsmPartitionName *string `type:"string"` + + // The list of tags for the HSM configuration. + Tags []*Tag `locationNameList:"Tag" type:"list"` +} + +// String returns the string representation +func (s HsmConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HsmConfiguration) GoString() string { + return s.String() +} + +// Describes the status of changes to HSM settings. +type HsmStatus struct { + _ struct{} `type:"structure"` + + // Specifies the name of the HSM client certificate the Amazon Redshift cluster + // uses to retrieve the data encryption keys stored in an HSM. + HsmClientCertificateIdentifier *string `type:"string"` + + // Specifies the name of the HSM configuration that contains the information + // the Amazon Redshift cluster can use to retrieve and store keys in an HSM. + HsmConfigurationIdentifier *string `type:"string"` + + // Reports whether the Amazon Redshift cluster has finished applying any HSM + // settings changes specified in a modify cluster command. + // + // Values: active, applying + Status *string `type:"string"` +} + +// String returns the string representation +func (s HsmStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HsmStatus) GoString() string { + return s.String() +} + +// Describes an IP range used in a security group. +type IPRange struct { + _ struct{} `type:"structure"` + + // The IP range in Classless Inter-Domain Routing (CIDR) notation. + CIDRIP *string `type:"string"` + + // The status of the IP range, for example, "authorized". + Status *string `type:"string"` + + // The list of tags for the IP range. + Tags []*Tag `locationNameList:"Tag" type:"list"` +} + +// String returns the string representation +func (s IPRange) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s IPRange) GoString() string { + return s.String() +} + +// Describes the status of logging for a cluster. +type LoggingStatus struct { + _ struct{} `type:"structure"` + + // The name of the S3 bucket where the log files are stored. + BucketName *string `type:"string"` + + // The message indicating that logs failed to be delivered. + LastFailureMessage *string `type:"string"` + + // The last time when logs failed to be delivered. + LastFailureTime *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The last time that logs were delivered. + LastSuccessfulDeliveryTime *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // true if logging is on, false if logging is off. + LoggingEnabled *bool `type:"boolean"` + + // The prefix applied to the log file names. + S3KeyPrefix *string `type:"string"` +} + +// String returns the string representation +func (s LoggingStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LoggingStatus) GoString() string { + return s.String() +} + +type ModifyClusterIamRolesInput struct { + _ struct{} `type:"structure"` + + // Zero or more IAM roles (in their ARN format) to associate with the cluster. + // You can associate up to 10 IAM roles with a single cluster in a single request. + AddIamRoles []*string `locationNameList:"IamRoleArn" type:"list"` + + // The unique identifier of the cluster for which you want to associate or disassociate + // IAM roles. + ClusterIdentifier *string `type:"string" required:"true"` + + // Zero or more IAM roles (in their ARN format) to disassociate from the cluster. + // You can disassociate up to 10 IAM roles from a single cluster in a single + // request. + RemoveIamRoles []*string `locationNameList:"IamRoleArn" type:"list"` +} + +// String returns the string representation +func (s ModifyClusterIamRolesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyClusterIamRolesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ModifyClusterIamRolesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ModifyClusterIamRolesInput"} + if s.ClusterIdentifier == nil { + invalidParams.Add(request.NewErrParamRequired("ClusterIdentifier")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type ModifyClusterIamRolesOutput struct { + _ struct{} `type:"structure"` + + // Describes a cluster. + Cluster *Cluster `type:"structure"` +} + +// String returns the string representation +func (s ModifyClusterIamRolesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyClusterIamRolesOutput) GoString() string { + return s.String() +} + +type ModifyClusterInput struct { + _ struct{} `type:"structure"` + + // If true, major version upgrades will be applied automatically to the cluster + // during the maintenance window. + // + // Default: false + AllowVersionUpgrade *bool `type:"boolean"` + + // The number of days that automated snapshots are retained. If the value is + // 0, automated snapshots are disabled. Even if automated snapshots are disabled, + // you can still create manual snapshots when you want with CreateClusterSnapshot. + // + // If you decrease the automated snapshot retention period from its current + // value, existing automated snapshots that fall outside of the new retention + // period will be immediately deleted. + // + // Default: Uses existing setting. + // + // Constraints: Must be a value from 0 to 35. + AutomatedSnapshotRetentionPeriod *int64 `type:"integer"` + + // The unique identifier of the cluster to be modified. + // + // Example: examplecluster + ClusterIdentifier *string `type:"string" required:"true"` + + // The name of the cluster parameter group to apply to this cluster. This change + // is applied only after the cluster is rebooted. To reboot a cluster use RebootCluster. + // + // Default: Uses existing setting. + // + // Constraints: The cluster parameter group must be in the same parameter group + // family that matches the cluster version. + ClusterParameterGroupName *string `type:"string"` + + // A list of cluster security groups to be authorized on this cluster. This + // change is asynchronously applied as soon as possible. + // + // Security groups currently associated with the cluster, and not in the list + // of groups to apply, will be revoked from the cluster. + // + // Constraints: + // + // Must be 1 to 255 alphanumeric characters or hyphens First character must + // be a letter Cannot end with a hyphen or contain two consecutive hyphens + ClusterSecurityGroups []*string `locationNameList:"ClusterSecurityGroupName" type:"list"` + + // The new cluster type. + // + // When you submit your cluster resize request, your existing cluster goes + // into a read-only mode. After Amazon Redshift provisions a new cluster based + // on your resize requirements, there will be outage for a period while the + // old cluster is deleted and your connection is switched to the new cluster. + // You can use DescribeResize to track the progress of the resize request. + // + // Valid Values: multi-node | single-node + ClusterType *string `type:"string"` + + // The new version number of the Amazon Redshift engine to upgrade to. + // + // For major version upgrades, if a non-default cluster parameter group is + // currently in use, a new cluster parameter group in the cluster parameter + // group family for the new version must be specified. The new cluster parameter + // group can be the default for that cluster parameter group family. For more + // information about parameters and parameter groups, go to Amazon Redshift + // Parameter Groups (http://docs.aws.amazon.com/redshift/latest/mgmt/working-with-parameter-groups.html) + // in the Amazon Redshift Cluster Management Guide. + // + // Example: 1.0 + ClusterVersion *string `type:"string"` + + // The Elastic IP (EIP) address for the cluster. + // + // Constraints: The cluster must be provisioned in EC2-VPC and publicly-accessible + // through an Internet gateway. For more information about provisioning clusters + // in EC2-VPC, go to Supported Platforms to Launch Your Cluster (http://docs.aws.amazon.com/redshift/latest/mgmt/working-with-clusters.html#cluster-platforms) + // in the Amazon Redshift Cluster Management Guide. + ElasticIp *string `type:"string"` + + // Specifies the name of the HSM client certificate the Amazon Redshift cluster + // uses to retrieve the data encryption keys stored in an HSM. + HsmClientCertificateIdentifier *string `type:"string"` + + // Specifies the name of the HSM configuration that contains the information + // the Amazon Redshift cluster can use to retrieve and store keys in an HSM. + HsmConfigurationIdentifier *string `type:"string"` + + // The new password for the cluster master user. This change is asynchronously + // applied as soon as possible. Between the time of the request and the completion + // of the request, the MasterUserPassword element exists in the PendingModifiedValues + // element of the operation response. Operations never return the password, + // so this operation provides a way to regain access to the master user account + // for a cluster if the password is lost. + // + // Default: Uses existing setting. + // + // Constraints: + // + // Must be between 8 and 64 characters in length. Must contain at least one + // uppercase letter. Must contain at least one lowercase letter. Must contain + // one number. Can be any printable ASCII character (ASCII code 33 to 126) except + // ' (single quote), " (double quote), \, /, @, or space. + MasterUserPassword *string `type:"string"` + + // The new identifier for the cluster. + // + // Constraints: + // + // Must contain from 1 to 63 alphanumeric characters or hyphens. Alphabetic + // characters must be lowercase. First character must be a letter. Cannot end + // with a hyphen or contain two consecutive hyphens. Must be unique for all + // clusters within an AWS account. Example: examplecluster + NewClusterIdentifier *string `type:"string"` + + // The new node type of the cluster. If you specify a new node type, you must + // also specify the number of nodes parameter. + // + // When you submit your request to resize a cluster, Amazon Redshift sets + // access permissions for the cluster to read-only. After Amazon Redshift provisions + // a new cluster according to your resize requirements, there will be a temporary + // outage while the old cluster is deleted and your connection is switched to + // the new cluster. When the new connection is complete, the original access + // permissions for the cluster are restored. You can use DescribeResize to track + // the progress of the resize request. + // + // Valid Values: ds1.xlarge | ds1.8xlarge | ds2.xlarge | ds2.8xlarge | dc1.large + // | dc1.8xlarge. + NodeType *string `type:"string"` + + // The new number of nodes of the cluster. If you specify a new number of nodes, + // you must also specify the node type parameter. + // + // When you submit your request to resize a cluster, Amazon Redshift sets + // access permissions for the cluster to read-only. After Amazon Redshift provisions + // a new cluster according to your resize requirements, there will be a temporary + // outage while the old cluster is deleted and your connection is switched to + // the new cluster. When the new connection is complete, the original access + // permissions for the cluster are restored. You can use DescribeResize to track + // the progress of the resize request. + // + // Valid Values: Integer greater than 0. + NumberOfNodes *int64 `type:"integer"` + + // The weekly time range (in UTC) during which system maintenance can occur, + // if necessary. If system maintenance is necessary during the window, it may + // result in an outage. + // + // This maintenance window change is made immediately. If the new maintenance + // window indicates the current time, there must be at least 120 minutes between + // the current time and end of the window in order to ensure that pending changes + // are applied. + // + // Default: Uses existing setting. + // + // Format: ddd:hh24:mi-ddd:hh24:mi, for example wed:07:30-wed:08:00. + // + // Valid Days: Mon | Tue | Wed | Thu | Fri | Sat | Sun + // + // Constraints: Must be at least 30 minutes. + PreferredMaintenanceWindow *string `type:"string"` + + // If true, the cluster can be accessed from a public network. Only clusters + // in VPCs can be set to be publicly available. + PubliclyAccessible *bool `type:"boolean"` + + // A list of virtual private cloud (VPC) security groups to be associated with + // the cluster. + VpcSecurityGroupIds []*string `locationNameList:"VpcSecurityGroupId" type:"list"` +} + +// String returns the string representation +func (s ModifyClusterInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyClusterInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ModifyClusterInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ModifyClusterInput"} + if s.ClusterIdentifier == nil { + invalidParams.Add(request.NewErrParamRequired("ClusterIdentifier")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type ModifyClusterOutput struct { + _ struct{} `type:"structure"` + + // Describes a cluster. + Cluster *Cluster `type:"structure"` +} + +// String returns the string representation +func (s ModifyClusterOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyClusterOutput) GoString() string { + return s.String() +} + +type ModifyClusterParameterGroupInput struct { + _ struct{} `type:"structure"` + + // The name of the parameter group to be modified. + ParameterGroupName *string `type:"string" required:"true"` + + // An array of parameters to be modified. A maximum of 20 parameters can be + // modified in a single request. + // + // For each parameter to be modified, you must supply at least the parameter + // name and parameter value; other name-value pairs of the parameter are optional. + // + // For the workload management (WLM) configuration, you must supply all the + // name-value pairs in the wlm_json_configuration parameter. + Parameters []*Parameter `locationNameList:"Parameter" type:"list" required:"true"` +} + +// String returns the string representation +func (s ModifyClusterParameterGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyClusterParameterGroupInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ModifyClusterParameterGroupInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ModifyClusterParameterGroupInput"} + if s.ParameterGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("ParameterGroupName")) + } + if s.Parameters == nil { + invalidParams.Add(request.NewErrParamRequired("Parameters")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type ModifyClusterSubnetGroupInput struct { + _ struct{} `type:"structure"` + + // The name of the subnet group to be modified. + ClusterSubnetGroupName *string `type:"string" required:"true"` + + // A text description of the subnet group to be modified. + Description *string `type:"string"` + + // An array of VPC subnet IDs. A maximum of 20 subnets can be modified in a + // single request. + SubnetIds []*string `locationNameList:"SubnetIdentifier" type:"list" required:"true"` +} + +// String returns the string representation +func (s ModifyClusterSubnetGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyClusterSubnetGroupInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ModifyClusterSubnetGroupInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ModifyClusterSubnetGroupInput"} + if s.ClusterSubnetGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("ClusterSubnetGroupName")) + } + if s.SubnetIds == nil { + invalidParams.Add(request.NewErrParamRequired("SubnetIds")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type ModifyClusterSubnetGroupOutput struct { + _ struct{} `type:"structure"` + + // Describes a subnet group. + ClusterSubnetGroup *ClusterSubnetGroup `type:"structure"` +} + +// String returns the string representation +func (s ModifyClusterSubnetGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyClusterSubnetGroupOutput) GoString() string { + return s.String() +} + +type ModifyEventSubscriptionInput struct { + _ struct{} `type:"structure"` + + // A Boolean value indicating if the subscription is enabled. true indicates + // the subscription is enabled + Enabled *bool `type:"boolean"` + + // Specifies the Amazon Redshift event categories to be published by the event + // notification subscription. + // + // Values: Configuration, Management, Monitoring, Security + EventCategories []*string `locationNameList:"EventCategory" type:"list"` + + // Specifies the Amazon Redshift event severity to be published by the event + // notification subscription. + // + // Values: ERROR, INFO + Severity *string `type:"string"` + + // The Amazon Resource Name (ARN) of the SNS topic to be used by the event notification + // subscription. + SnsTopicArn *string `type:"string"` + + // A list of one or more identifiers of Amazon Redshift source objects. All + // of the objects must be of the same type as was specified in the source type + // parameter. The event subscription will return only events generated by the + // specified objects. If not specified, then events are returned for all objects + // within the source type specified. + // + // Example: my-cluster-1, my-cluster-2 + // + // Example: my-snapshot-20131010 + SourceIds []*string `locationNameList:"SourceId" type:"list"` + + // The type of source that will be generating the events. For example, if you + // want to be notified of events generated by a cluster, you would set this + // parameter to cluster. If this value is not specified, events are returned + // for all Amazon Redshift objects in your AWS account. You must specify a source + // type in order to specify source IDs. + // + // Valid values: cluster, cluster-parameter-group, cluster-security-group, + // and cluster-snapshot. + SourceType *string `type:"string"` + + // The name of the modified Amazon Redshift event notification subscription. + SubscriptionName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ModifyEventSubscriptionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyEventSubscriptionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ModifyEventSubscriptionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ModifyEventSubscriptionInput"} + if s.SubscriptionName == nil { + invalidParams.Add(request.NewErrParamRequired("SubscriptionName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type ModifyEventSubscriptionOutput struct { + _ struct{} `type:"structure"` + + // Describes event subscriptions. + EventSubscription *EventSubscription `type:"structure"` +} + +// String returns the string representation +func (s ModifyEventSubscriptionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyEventSubscriptionOutput) GoString() string { + return s.String() +} + +type ModifySnapshotCopyRetentionPeriodInput struct { + _ struct{} `type:"structure"` + + // The unique identifier of the cluster for which you want to change the retention + // period for automated snapshots that are copied to a destination region. + // + // Constraints: Must be the valid name of an existing cluster that has cross-region + // snapshot copy enabled. + ClusterIdentifier *string `type:"string" required:"true"` + + // The number of days to retain automated snapshots in the destination region + // after they are copied from the source region. + // + // If you decrease the retention period for automated snapshots that are copied + // to a destination region, Amazon Redshift will delete any existing automated + // snapshots that were copied to the destination region and that fall outside + // of the new retention period. + // + // Constraints: Must be at least 1 and no more than 35. + RetentionPeriod *int64 `type:"integer" required:"true"` +} + +// String returns the string representation +func (s ModifySnapshotCopyRetentionPeriodInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifySnapshotCopyRetentionPeriodInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ModifySnapshotCopyRetentionPeriodInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ModifySnapshotCopyRetentionPeriodInput"} + if s.ClusterIdentifier == nil { + invalidParams.Add(request.NewErrParamRequired("ClusterIdentifier")) + } + if s.RetentionPeriod == nil { + invalidParams.Add(request.NewErrParamRequired("RetentionPeriod")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type ModifySnapshotCopyRetentionPeriodOutput struct { + _ struct{} `type:"structure"` + + // Describes a cluster. + Cluster *Cluster `type:"structure"` +} + +// String returns the string representation +func (s ModifySnapshotCopyRetentionPeriodOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifySnapshotCopyRetentionPeriodOutput) GoString() string { + return s.String() +} + +// Describes an orderable cluster option. +type OrderableClusterOption struct { + _ struct{} `type:"structure"` + + // A list of availability zones for the orderable cluster. + AvailabilityZones []*AvailabilityZone `locationNameList:"AvailabilityZone" type:"list"` + + // The cluster type, for example multi-node. + ClusterType *string `type:"string"` + + // The version of the orderable cluster. + ClusterVersion *string `type:"string"` + + // The node type for the orderable cluster. + NodeType *string `type:"string"` +} + +// String returns the string representation +func (s OrderableClusterOption) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s OrderableClusterOption) GoString() string { + return s.String() +} + +// Describes a parameter in a cluster parameter group. +type Parameter struct { + _ struct{} `type:"structure"` + + // The valid range of values for the parameter. + AllowedValues *string `type:"string"` + + // Specifies how to apply the WLM configuration parameter. Some properties can + // be applied dynamically, while other properties require that any associated + // clusters be rebooted for the configuration changes to be applied. For more + // information about parameters and parameter groups, go to Amazon Redshift + // Parameter Groups (http://docs.aws.amazon.com/redshift/latest/mgmt/working-with-parameter-groups.html) + // in the Amazon Redshift Cluster Management Guide. + ApplyType *string `type:"string" enum:"ParameterApplyType"` + + // The data type of the parameter. + DataType *string `type:"string"` + + // A description of the parameter. + Description *string `type:"string"` + + // If true, the parameter can be modified. Some parameters have security or + // operational implications that prevent them from being changed. + IsModifiable *bool `type:"boolean"` + + // The earliest engine version to which the parameter can apply. + MinimumEngineVersion *string `type:"string"` + + // The name of the parameter. + ParameterName *string `type:"string"` + + // The value of the parameter. + ParameterValue *string `type:"string"` + + // The source of the parameter value, such as "engine-default" or "user". + Source *string `type:"string"` +} + +// String returns the string representation +func (s Parameter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Parameter) GoString() string { + return s.String() +} + +// Describes cluster attributes that are in a pending state. A change to one +// or more the attributes was requested and is in progress or will be applied. +type PendingModifiedValues struct { + _ struct{} `type:"structure"` + + // The pending or in-progress change of the automated snapshot retention period. + AutomatedSnapshotRetentionPeriod *int64 `type:"integer"` + + // The pending or in-progress change of the new identifier for the cluster. + ClusterIdentifier *string `type:"string"` + + // The pending or in-progress change of the cluster type. + ClusterType *string `type:"string"` + + // The pending or in-progress change of the service version. + ClusterVersion *string `type:"string"` + + // The pending or in-progress change of the master user password for the cluster. + MasterUserPassword *string `type:"string"` + + // The pending or in-progress change of the cluster's node type. + NodeType *string `type:"string"` + + // The pending or in-progress change of the number of nodes in the cluster. + NumberOfNodes *int64 `type:"integer"` + + // The pending or in-progress change of the ability to connect to the cluster + // from the public network. + PubliclyAccessible *bool `type:"boolean"` +} + +// String returns the string representation +func (s PendingModifiedValues) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PendingModifiedValues) GoString() string { + return s.String() +} + +type PurchaseReservedNodeOfferingInput struct { + _ struct{} `type:"structure"` + + // The number of reserved nodes that you want to purchase. + // + // Default: 1 + NodeCount *int64 `type:"integer"` + + // The unique identifier of the reserved node offering you want to purchase. + ReservedNodeOfferingId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s PurchaseReservedNodeOfferingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PurchaseReservedNodeOfferingInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PurchaseReservedNodeOfferingInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PurchaseReservedNodeOfferingInput"} + if s.ReservedNodeOfferingId == nil { + invalidParams.Add(request.NewErrParamRequired("ReservedNodeOfferingId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type PurchaseReservedNodeOfferingOutput struct { + _ struct{} `type:"structure"` + + // Describes a reserved node. You can call the DescribeReservedNodeOfferings + // API to obtain the available reserved node offerings. + ReservedNode *ReservedNode `type:"structure"` +} + +// String returns the string representation +func (s PurchaseReservedNodeOfferingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PurchaseReservedNodeOfferingOutput) GoString() string { + return s.String() +} + +type RebootClusterInput struct { + _ struct{} `type:"structure"` + + // The cluster identifier. + ClusterIdentifier *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s RebootClusterInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RebootClusterInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RebootClusterInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RebootClusterInput"} + if s.ClusterIdentifier == nil { + invalidParams.Add(request.NewErrParamRequired("ClusterIdentifier")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type RebootClusterOutput struct { + _ struct{} `type:"structure"` + + // Describes a cluster. + Cluster *Cluster `type:"structure"` +} + +// String returns the string representation +func (s RebootClusterOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RebootClusterOutput) GoString() string { + return s.String() +} + +// Describes a recurring charge. +type RecurringCharge struct { + _ struct{} `type:"structure"` + + // The amount charged per the period of time specified by the recurring charge + // frequency. + RecurringChargeAmount *float64 `type:"double"` + + // The frequency at which the recurring charge amount is applied. + RecurringChargeFrequency *string `type:"string"` +} + +// String returns the string representation +func (s RecurringCharge) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RecurringCharge) GoString() string { + return s.String() +} + +// Describes a reserved node. You can call the DescribeReservedNodeOfferings +// API to obtain the available reserved node offerings. +type ReservedNode struct { + _ struct{} `type:"structure"` + + // The currency code for the reserved cluster. + CurrencyCode *string `type:"string"` + + // The duration of the node reservation in seconds. + Duration *int64 `type:"integer"` + + // The fixed cost Amazon Redshift charges you for this reserved node. + FixedPrice *float64 `type:"double"` + + // The number of reserved compute nodes. + NodeCount *int64 `type:"integer"` + + // The node type of the reserved node. + NodeType *string `type:"string"` + + // The anticipated utilization of the reserved node, as defined in the reserved + // node offering. + OfferingType *string `type:"string"` + + // The recurring charges for the reserved node. + RecurringCharges []*RecurringCharge `locationNameList:"RecurringCharge" type:"list"` + + // The unique identifier for the reservation. + ReservedNodeId *string `type:"string"` + + // The identifier for the reserved node offering. + ReservedNodeOfferingId *string `type:"string"` + + // The time the reservation started. You purchase a reserved node offering for + // a duration. This is the start time of that duration. + StartTime *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The state of the reserved compute node. + // + // Possible Values: + // + // pending-payment-This reserved node has recently been purchased, and the + // sale has been approved, but payment has not yet been confirmed. active-This + // reserved node is owned by the caller and is available for use. payment-failed-Payment + // failed for the purchase attempt. + State *string `type:"string"` + + // The hourly rate Amazon Redshift charges you for this reserved node. + UsagePrice *float64 `type:"double"` +} + +// String returns the string representation +func (s ReservedNode) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReservedNode) GoString() string { + return s.String() +} + +// Describes a reserved node offering. +type ReservedNodeOffering struct { + _ struct{} `type:"structure"` + + // The currency code for the compute nodes offering. + CurrencyCode *string `type:"string"` + + // The duration, in seconds, for which the offering will reserve the node. + Duration *int64 `type:"integer"` + + // The upfront fixed charge you will pay to purchase the specific reserved node + // offering. + FixedPrice *float64 `type:"double"` + + // The node type offered by the reserved node offering. + NodeType *string `type:"string"` + + // The anticipated utilization of the reserved node, as defined in the reserved + // node offering. + OfferingType *string `type:"string"` + + // The charge to your account regardless of whether you are creating any clusters + // using the node offering. Recurring charges are only in effect for heavy-utilization + // reserved nodes. + RecurringCharges []*RecurringCharge `locationNameList:"RecurringCharge" type:"list"` + + // The offering identifier. + ReservedNodeOfferingId *string `type:"string"` + + // The rate you are charged for each hour the cluster that is using the offering + // is running. + UsagePrice *float64 `type:"double"` +} + +// String returns the string representation +func (s ReservedNodeOffering) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReservedNodeOffering) GoString() string { + return s.String() +} + +type ResetClusterParameterGroupInput struct { + _ struct{} `type:"structure"` + + // The name of the cluster parameter group to be reset. + ParameterGroupName *string `type:"string" required:"true"` + + // An array of names of parameters to be reset. If ResetAllParameters option + // is not used, then at least one parameter name must be supplied. + // + // Constraints: A maximum of 20 parameters can be reset in a single request. + Parameters []*Parameter `locationNameList:"Parameter" type:"list"` + + // If true, all parameters in the specified parameter group will be reset to + // their default values. + // + // Default: true + ResetAllParameters *bool `type:"boolean"` +} + +// String returns the string representation +func (s ResetClusterParameterGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResetClusterParameterGroupInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ResetClusterParameterGroupInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ResetClusterParameterGroupInput"} + if s.ParameterGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("ParameterGroupName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type RestoreFromClusterSnapshotInput struct { + _ struct{} `type:"structure"` + + // Reserved. + AdditionalInfo *string `type:"string"` + + // If true, major version upgrades can be applied during the maintenance window + // to the Amazon Redshift engine that is running on the cluster. + // + // Default: true + AllowVersionUpgrade *bool `type:"boolean"` + + // The number of days that automated snapshots are retained. If the value is + // 0, automated snapshots are disabled. Even if automated snapshots are disabled, + // you can still create manual snapshots when you want with CreateClusterSnapshot. + // + // Default: The value selected for the cluster from which the snapshot was + // taken. + // + // Constraints: Must be a value from 0 to 35. + AutomatedSnapshotRetentionPeriod *int64 `type:"integer"` + + // The Amazon EC2 Availability Zone in which to restore the cluster. + // + // Default: A random, system-chosen Availability Zone. + // + // Example: us-east-1a + AvailabilityZone *string `type:"string"` + + // The identifier of the cluster that will be created from restoring the snapshot. + // + // Constraints: + // + // Must contain from 1 to 63 alphanumeric characters or hyphens. Alphabetic + // characters must be lowercase. First character must be a letter. Cannot end + // with a hyphen or contain two consecutive hyphens. Must be unique for all + // clusters within an AWS account. + ClusterIdentifier *string `type:"string" required:"true"` + + // The name of the parameter group to be associated with this cluster. + // + // Default: The default Amazon Redshift cluster parameter group. For information + // about the default parameter group, go to Working with Amazon Redshift Parameter + // Groups (http://docs.aws.amazon.com/redshift/latest/mgmt/working-with-parameter-groups.html). + // + // Constraints: + // + // Must be 1 to 255 alphanumeric characters or hyphens. First character must + // be a letter. Cannot end with a hyphen or contain two consecutive hyphens. + ClusterParameterGroupName *string `type:"string"` + + // A list of security groups to be associated with this cluster. + // + // Default: The default cluster security group for Amazon Redshift. + // + // Cluster security groups only apply to clusters outside of VPCs. + ClusterSecurityGroups []*string `locationNameList:"ClusterSecurityGroupName" type:"list"` + + // The name of the subnet group where you want to cluster restored. + // + // A snapshot of cluster in VPC can be restored only in VPC. Therefore, you + // must provide subnet group name where you want the cluster restored. + ClusterSubnetGroupName *string `type:"string"` + + // The elastic IP (EIP) address for the cluster. + ElasticIp *string `type:"string"` + + // Specifies the name of the HSM client certificate the Amazon Redshift cluster + // uses to retrieve the data encryption keys stored in an HSM. + HsmClientCertificateIdentifier *string `type:"string"` + + // Specifies the name of the HSM configuration that contains the information + // the Amazon Redshift cluster can use to retrieve and store keys in an HSM. + HsmConfigurationIdentifier *string `type:"string"` + + // A list of AWS Identity and Access Management (IAM) roles that can be used + // by the cluster to access other AWS services. You must supply the IAM roles + // in their Amazon Resource Name (ARN) format. You can supply up to 10 IAM roles + // in a single request. + // + // A cluster can have up to 10 IAM roles associated at any time. + IamRoles []*string `locationNameList:"IamRoleArn" type:"list"` + + // The AWS Key Management Service (KMS) key ID of the encryption key that you + // want to use to encrypt data in the cluster that you restore from a shared + // snapshot. + KmsKeyId *string `type:"string"` + + // The node type that the restored cluster will be provisioned with. + // + // Default: The node type of the cluster from which the snapshot was taken. + // You can modify this if you are using any DS node type. In that case, you + // can choose to restore into another DS node type of the same size. For example, + // you can restore ds1.8xlarge into ds2.8xlarge, or ds2.xlarge into ds1.xlarge. + // If you have a DC instance type, you must restore into that same instance + // type and size. In other words, you can only restore a dc1.large instance + // type into another dc1.large instance type. For more information about node + // types, see About Clusters and Nodes (http://docs.aws.amazon.com/redshift/latest/mgmt/working-with-clusters.html#rs-about-clusters-and-nodes) + // in the Amazon Redshift Cluster Management Guide + NodeType *string `type:"string"` + + // The AWS customer account used to create or copy the snapshot. Required if + // you are restoring a snapshot you do not own, optional if you own the snapshot. + OwnerAccount *string `type:"string"` + + // The port number on which the cluster accepts connections. + // + // Default: The same port as the original cluster. + // + // Constraints: Must be between 1115 and 65535. + Port *int64 `type:"integer"` + + // The weekly time range (in UTC) during which automated cluster maintenance + // can occur. + // + // Format: ddd:hh24:mi-ddd:hh24:mi + // + // Default: The value selected for the cluster from which the snapshot was + // taken. For more information about the time blocks for each region, see Maintenance + // Windows (http://docs.aws.amazon.com/redshift/latest/mgmt/working-with-clusters.html#rs-maintenance-windows) + // in Amazon Redshift Cluster Management Guide. + // + // Valid Days: Mon | Tue | Wed | Thu | Fri | Sat | Sun + // + // Constraints: Minimum 30-minute window. + PreferredMaintenanceWindow *string `type:"string"` + + // If true, the cluster can be accessed from a public network. + PubliclyAccessible *bool `type:"boolean"` + + // The name of the cluster the source snapshot was created from. This parameter + // is required if your IAM user has a policy containing a snapshot resource + // element that specifies anything other than * for the cluster name. + SnapshotClusterIdentifier *string `type:"string"` + + // The name of the snapshot from which to create the new cluster. This parameter + // isn't case sensitive. + // + // Example: my-snapshot-id + SnapshotIdentifier *string `type:"string" required:"true"` + + // A list of Virtual Private Cloud (VPC) security groups to be associated with + // the cluster. + // + // Default: The default VPC security group is associated with the cluster. + // + // VPC security groups only apply to clusters in VPCs. + VpcSecurityGroupIds []*string `locationNameList:"VpcSecurityGroupId" type:"list"` +} + +// String returns the string representation +func (s RestoreFromClusterSnapshotInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RestoreFromClusterSnapshotInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RestoreFromClusterSnapshotInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RestoreFromClusterSnapshotInput"} + if s.ClusterIdentifier == nil { + invalidParams.Add(request.NewErrParamRequired("ClusterIdentifier")) + } + if s.SnapshotIdentifier == nil { + invalidParams.Add(request.NewErrParamRequired("SnapshotIdentifier")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type RestoreFromClusterSnapshotOutput struct { + _ struct{} `type:"structure"` + + // Describes a cluster. + Cluster *Cluster `type:"structure"` +} + +// String returns the string representation +func (s RestoreFromClusterSnapshotOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RestoreFromClusterSnapshotOutput) GoString() string { + return s.String() +} + +// Describes the status of a cluster restore action. Returns null if the cluster +// was not created by restoring a snapshot. +type RestoreStatus struct { + _ struct{} `type:"structure"` + + // The number of megabytes per second being transferred from the backup storage. + // Returns the average rate for a completed backup. + CurrentRestoreRateInMegaBytesPerSecond *float64 `type:"double"` + + // The amount of time an in-progress restore has been running, or the amount + // of time it took a completed restore to finish. + ElapsedTimeInSeconds *int64 `type:"long"` + + // The estimate of the time remaining before the restore will complete. Returns + // 0 for a completed restore. + EstimatedTimeToCompletionInSeconds *int64 `type:"long"` + + // The number of megabytes that have been transferred from snapshot storage. + ProgressInMegaBytes *int64 `type:"long"` + + // The size of the set of snapshot data used to restore the cluster. + SnapshotSizeInMegaBytes *int64 `type:"long"` + + // The status of the restore action. Returns starting, restoring, completed, + // or failed. + Status *string `type:"string"` +} + +// String returns the string representation +func (s RestoreStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RestoreStatus) GoString() string { + return s.String() +} + +type RestoreTableFromClusterSnapshotInput struct { + _ struct{} `type:"structure"` + + // The identifier of the Amazon Redshift cluster to restore the table to. + ClusterIdentifier *string `type:"string" required:"true"` + + // The name of the table to create as a result of the current request. + NewTableName *string `type:"string" required:"true"` + + // The identifier of the snapshot to restore the table from. This snapshot must + // have been created from the Amazon Redshift cluster specified by the ClusterIdentifier + // parameter. + SnapshotIdentifier *string `type:"string" required:"true"` + + // The name of the source database that contains the table to restore from. + SourceDatabaseName *string `type:"string" required:"true"` + + // The name of the source schema that contains the table to restore from. If + // you do not specify a SourceSchemaName value, the default is public. + SourceSchemaName *string `type:"string"` + + // The name of the source table to restore from. + SourceTableName *string `type:"string" required:"true"` + + // The name of the database to restore the table to. + TargetDatabaseName *string `type:"string"` + + // The name of the schema to restore the table to. + TargetSchemaName *string `type:"string"` +} + +// String returns the string representation +func (s RestoreTableFromClusterSnapshotInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RestoreTableFromClusterSnapshotInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RestoreTableFromClusterSnapshotInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RestoreTableFromClusterSnapshotInput"} + if s.ClusterIdentifier == nil { + invalidParams.Add(request.NewErrParamRequired("ClusterIdentifier")) + } + if s.NewTableName == nil { + invalidParams.Add(request.NewErrParamRequired("NewTableName")) + } + if s.SnapshotIdentifier == nil { + invalidParams.Add(request.NewErrParamRequired("SnapshotIdentifier")) + } + if s.SourceDatabaseName == nil { + invalidParams.Add(request.NewErrParamRequired("SourceDatabaseName")) + } + if s.SourceTableName == nil { + invalidParams.Add(request.NewErrParamRequired("SourceTableName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type RestoreTableFromClusterSnapshotOutput struct { + _ struct{} `type:"structure"` + + // Describes the status of a RestoreTableFromClusterSnapshot operation. + TableRestoreStatus *TableRestoreStatus `type:"structure"` +} + +// String returns the string representation +func (s RestoreTableFromClusterSnapshotOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RestoreTableFromClusterSnapshotOutput) GoString() string { + return s.String() +} + +type RevokeClusterSecurityGroupIngressInput struct { + _ struct{} `type:"structure"` + + // The IP range for which to revoke access. This range must be a valid Classless + // Inter-Domain Routing (CIDR) block of IP addresses. If CIDRIP is specified, + // EC2SecurityGroupName and EC2SecurityGroupOwnerId cannot be provided. + CIDRIP *string `type:"string"` + + // The name of the security Group from which to revoke the ingress rule. + ClusterSecurityGroupName *string `type:"string" required:"true"` + + // The name of the EC2 Security Group whose access is to be revoked. If EC2SecurityGroupName + // is specified, EC2SecurityGroupOwnerId must also be provided and CIDRIP cannot + // be provided. + EC2SecurityGroupName *string `type:"string"` + + // The AWS account number of the owner of the security group specified in the + // EC2SecurityGroupName parameter. The AWS access key ID is not an acceptable + // value. If EC2SecurityGroupOwnerId is specified, EC2SecurityGroupName must + // also be provided. and CIDRIP cannot be provided. + // + // Example: 111122223333 + EC2SecurityGroupOwnerId *string `type:"string"` +} + +// String returns the string representation +func (s RevokeClusterSecurityGroupIngressInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RevokeClusterSecurityGroupIngressInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RevokeClusterSecurityGroupIngressInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RevokeClusterSecurityGroupIngressInput"} + if s.ClusterSecurityGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("ClusterSecurityGroupName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type RevokeClusterSecurityGroupIngressOutput struct { + _ struct{} `type:"structure"` + + // Describes a security group. + ClusterSecurityGroup *ClusterSecurityGroup `type:"structure"` +} + +// String returns the string representation +func (s RevokeClusterSecurityGroupIngressOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RevokeClusterSecurityGroupIngressOutput) GoString() string { + return s.String() +} + +type RevokeSnapshotAccessInput struct { + _ struct{} `type:"structure"` + + // The identifier of the AWS customer account that can no longer restore the + // specified snapshot. + AccountWithRestoreAccess *string `type:"string" required:"true"` + + // The identifier of the cluster the snapshot was created from. This parameter + // is required if your IAM user has a policy containing a snapshot resource + // element that specifies anything other than * for the cluster name. + SnapshotClusterIdentifier *string `type:"string"` + + // The identifier of the snapshot that the account can no longer access. + SnapshotIdentifier *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s RevokeSnapshotAccessInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RevokeSnapshotAccessInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RevokeSnapshotAccessInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RevokeSnapshotAccessInput"} + if s.AccountWithRestoreAccess == nil { + invalidParams.Add(request.NewErrParamRequired("AccountWithRestoreAccess")) + } + if s.SnapshotIdentifier == nil { + invalidParams.Add(request.NewErrParamRequired("SnapshotIdentifier")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type RevokeSnapshotAccessOutput struct { + _ struct{} `type:"structure"` + + // Describes a snapshot. + Snapshot *Snapshot `type:"structure"` +} + +// String returns the string representation +func (s RevokeSnapshotAccessOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RevokeSnapshotAccessOutput) GoString() string { + return s.String() +} + +type RotateEncryptionKeyInput struct { + _ struct{} `type:"structure"` + + // The unique identifier of the cluster that you want to rotate the encryption + // keys for. + // + // Constraints: Must be the name of valid cluster that has encryption enabled. + ClusterIdentifier *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s RotateEncryptionKeyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RotateEncryptionKeyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RotateEncryptionKeyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RotateEncryptionKeyInput"} + if s.ClusterIdentifier == nil { + invalidParams.Add(request.NewErrParamRequired("ClusterIdentifier")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type RotateEncryptionKeyOutput struct { + _ struct{} `type:"structure"` + + // Describes a cluster. + Cluster *Cluster `type:"structure"` +} + +// String returns the string representation +func (s RotateEncryptionKeyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RotateEncryptionKeyOutput) GoString() string { + return s.String() +} + +// Describes a snapshot. +type Snapshot struct { + _ struct{} `type:"structure"` + + // A list of the AWS customer accounts authorized to restore the snapshot. Returns + // null if no accounts are authorized. Visible only to the snapshot owner. + AccountsWithRestoreAccess []*AccountWithRestoreAccess `locationNameList:"AccountWithRestoreAccess" type:"list"` + + // The size of the incremental backup. + ActualIncrementalBackupSizeInMegaBytes *float64 `type:"double"` + + // The Availability Zone in which the cluster was created. + AvailabilityZone *string `type:"string"` + + // The number of megabytes that have been transferred to the snapshot backup. + BackupProgressInMegaBytes *float64 `type:"double"` + + // The time (UTC) when the cluster was originally created. + ClusterCreateTime *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The identifier of the cluster for which the snapshot was taken. + ClusterIdentifier *string `type:"string"` + + // The version ID of the Amazon Redshift engine that is running on the cluster. + ClusterVersion *string `type:"string"` + + // The number of megabytes per second being transferred to the snapshot backup. + // Returns 0 for a completed backup. + CurrentBackupRateInMegaBytesPerSecond *float64 `type:"double"` + + // The name of the database that was created when the cluster was created. + DBName *string `type:"string"` + + // The amount of time an in-progress snapshot backup has been running, or the + // amount of time it took a completed backup to finish. + ElapsedTimeInSeconds *int64 `type:"long"` + + // If true, the data in the snapshot is encrypted at rest. + Encrypted *bool `type:"boolean"` + + // A boolean that indicates whether the snapshot data is encrypted using the + // HSM keys of the source cluster. true indicates that the data is encrypted + // using HSM keys. + EncryptedWithHSM *bool `type:"boolean"` + + // The estimate of the time remaining before the snapshot backup will complete. + // Returns 0 for a completed backup. + EstimatedSecondsToCompletion *int64 `type:"long"` + + // The AWS Key Management Service (KMS) key ID of the encryption key that was + // used to encrypt data in the cluster from which the snapshot was taken. + KmsKeyId *string `type:"string"` + + // The master user name for the cluster. + MasterUsername *string `type:"string"` + + // The node type of the nodes in the cluster. + NodeType *string `type:"string"` + + // The number of nodes in the cluster. + NumberOfNodes *int64 `type:"integer"` + + // For manual snapshots, the AWS customer account used to create or copy the + // snapshot. For automatic snapshots, the owner of the cluster. The owner can + // perform all snapshot actions, such as sharing a manual snapshot. + OwnerAccount *string `type:"string"` + + // The port that the cluster is listening on. + Port *int64 `type:"integer"` + + // The list of node types that this cluster snapshot is able to restore into. + RestorableNodeTypes []*string `locationNameList:"NodeType" type:"list"` + + // The time (UTC) when Amazon Redshift began the snapshot. A snapshot contains + // a copy of the cluster data as of this exact time. + SnapshotCreateTime *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The snapshot identifier that is provided in the request. + SnapshotIdentifier *string `type:"string"` + + // The snapshot type. Snapshots created using CreateClusterSnapshot and CopyClusterSnapshot + // will be of type "manual". + SnapshotType *string `type:"string"` + + // The source region from which the snapshot was copied. + SourceRegion *string `type:"string"` + + // The snapshot status. The value of the status depends on the API operation + // used. CreateClusterSnapshot and CopyClusterSnapshot returns status as "creating". + // DescribeClusterSnapshots returns status as "creating", "available", "final + // snapshot", or "failed". DeleteClusterSnapshot returns status as "deleted". + Status *string `type:"string"` + + // The list of tags for the cluster snapshot. + Tags []*Tag `locationNameList:"Tag" type:"list"` + + // The size of the complete set of backup data that would be used to restore + // the cluster. + TotalBackupSizeInMegaBytes *float64 `type:"double"` + + // The VPC identifier of the cluster if the snapshot is from a cluster in a + // VPC. Otherwise, this field is not in the output. + VpcId *string `type:"string"` +} + +// String returns the string representation +func (s Snapshot) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Snapshot) GoString() string { + return s.String() +} + +// The snapshot copy grant that grants Amazon Redshift permission to encrypt +// copied snapshots with the specified customer master key (CMK) from AWS KMS +// in the destination region. +// +// For more information about managing snapshot copy grants, go to Amazon +// Redshift Database Encryption (http://docs.aws.amazon.com/redshift/latest/mgmt/working-with-db-encryption.html) +// in the Amazon Redshift Cluster Management Guide. +type SnapshotCopyGrant struct { + _ struct{} `type:"structure"` + + // The unique identifier of the customer master key (CMK) in AWS KMS to which + // Amazon Redshift is granted permission. + KmsKeyId *string `type:"string"` + + // The name of the snapshot copy grant. + SnapshotCopyGrantName *string `type:"string"` + + // A list of tag instances. + Tags []*Tag `locationNameList:"Tag" type:"list"` +} + +// String returns the string representation +func (s SnapshotCopyGrant) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SnapshotCopyGrant) GoString() string { + return s.String() +} + +// Describes a subnet. +type Subnet struct { + _ struct{} `type:"structure"` + + // Describes an availability zone. + SubnetAvailabilityZone *AvailabilityZone `type:"structure"` + + // The identifier of the subnet. + SubnetIdentifier *string `type:"string"` + + // The status of the subnet. + SubnetStatus *string `type:"string"` +} + +// String returns the string representation +func (s Subnet) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Subnet) GoString() string { + return s.String() +} + +// Describes the status of a RestoreTableFromClusterSnapshot operation. +type TableRestoreStatus struct { + _ struct{} `type:"structure"` + + // The identifier of the Amazon Redshift cluster that the table is being restored + // to. + ClusterIdentifier *string `type:"string"` + + // A description of the status of the table restore request. Status values include + // SUCCEEDED, FAILED, CANCELED, PENDING, IN_PROGRESS. + Message *string `type:"string"` + + // The name of the table to create as a result of the table restore request. + NewTableName *string `type:"string"` + + // The amount of data restored to the new table so far, in megabytes (MB). + ProgressInMegaBytes *int64 `type:"long"` + + // The time that the table restore request was made, in Universal Coordinated + // Time (UTC). + RequestTime *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The identifier of the snapshot that the table is being restored from. + SnapshotIdentifier *string `type:"string"` + + // The name of the source database that contains the table being restored. + SourceDatabaseName *string `type:"string"` + + // The name of the source schema that contains the table being restored. + SourceSchemaName *string `type:"string"` + + // The name of the source table being restored. + SourceTableName *string `type:"string"` + + // A value that describes the current state of the table restore request. + // + // Valid Values: SUCCEEDED, FAILED, CANCELED, PENDING, IN_PROGRESS + Status *string `type:"string" enum:"TableRestoreStatusType"` + + // The unique identifier for the table restore request. + TableRestoreRequestId *string `type:"string"` + + // The name of the database to restore the table to. + TargetDatabaseName *string `type:"string"` + + // The name of the schema to restore the table to. + TargetSchemaName *string `type:"string"` + + // The total amount of data to restore to the new table, in megabytes (MB). + TotalDataInMegaBytes *int64 `type:"long"` +} + +// String returns the string representation +func (s TableRestoreStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TableRestoreStatus) GoString() string { + return s.String() +} + +// A tag consisting of a name/value pair for a resource. +type Tag struct { + _ struct{} `type:"structure"` + + // The key, or name, for the resource tag. + Key *string `type:"string"` + + // The value for the resource tag. + Value *string `type:"string"` +} + +// String returns the string representation +func (s Tag) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Tag) GoString() string { + return s.String() +} + +// A tag and its associated resource. +type TaggedResource struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) with which the tag is associated. For example, + // arn:aws:redshift:us-east-1:123456789:cluster:t1. + ResourceName *string `type:"string"` + + // The type of resource with which the tag is associated. Valid resource types + // are: Cluster CIDR/IP EC2 security group Snapshot Cluster security group + // Subnet group HSM connection HSM certificate Parameter group + // + // For more information about Amazon Redshift resource types and constructing + // ARNs, go to Constructing an Amazon Redshift Amazon Resource Name (ARN) (http://docs.aws.amazon.com/redshift/latest/mgmt/constructing-redshift-arn.html) + // in the Amazon Redshift Cluster Management Guide. + ResourceType *string `type:"string"` + + // The tag for the resource. + Tag *Tag `type:"structure"` +} + +// String returns the string representation +func (s TaggedResource) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TaggedResource) GoString() string { + return s.String() +} + +// Describes the members of a VPC security group. +type VpcSecurityGroupMembership struct { + _ struct{} `type:"structure"` + + // The status of the VPC security group. + Status *string `type:"string"` + + // The identifier of the VPC security group. + VpcSecurityGroupId *string `type:"string"` +} + +// String returns the string representation +func (s VpcSecurityGroupMembership) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VpcSecurityGroupMembership) GoString() string { + return s.String() +} + +const ( + // @enum ParameterApplyType + ParameterApplyTypeStatic = "static" + // @enum ParameterApplyType + ParameterApplyTypeDynamic = "dynamic" +) + +const ( + // @enum SourceType + SourceTypeCluster = "cluster" + // @enum SourceType + SourceTypeClusterParameterGroup = "cluster-parameter-group" + // @enum SourceType + SourceTypeClusterSecurityGroup = "cluster-security-group" + // @enum SourceType + SourceTypeClusterSnapshot = "cluster-snapshot" +) + +const ( + // @enum TableRestoreStatusType + TableRestoreStatusTypePending = "PENDING" + // @enum TableRestoreStatusType + TableRestoreStatusTypeInProgress = "IN_PROGRESS" + // @enum TableRestoreStatusType + TableRestoreStatusTypeSucceeded = "SUCCEEDED" + // @enum TableRestoreStatusType + TableRestoreStatusTypeFailed = "FAILED" + // @enum TableRestoreStatusType + TableRestoreStatusTypeCanceled = "CANCELED" +) diff --git a/vendor/github.com/aws/aws-sdk-go/service/redshift/examples_test.go b/vendor/github.com/aws/aws-sdk-go/service/redshift/examples_test.go new file mode 100644 index 000000000..c18210f23 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/redshift/examples_test.go @@ -0,0 +1,1584 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package redshift_test + +import ( + "bytes" + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/redshift" +) + +var _ time.Duration +var _ bytes.Buffer + +func ExampleRedshift_AuthorizeClusterSecurityGroupIngress() { + svc := redshift.New(session.New()) + + params := &redshift.AuthorizeClusterSecurityGroupIngressInput{ + ClusterSecurityGroupName: aws.String("String"), // Required + CIDRIP: aws.String("String"), + EC2SecurityGroupName: aws.String("String"), + EC2SecurityGroupOwnerId: aws.String("String"), + } + resp, err := svc.AuthorizeClusterSecurityGroupIngress(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_AuthorizeSnapshotAccess() { + svc := redshift.New(session.New()) + + params := &redshift.AuthorizeSnapshotAccessInput{ + AccountWithRestoreAccess: aws.String("String"), // Required + SnapshotIdentifier: aws.String("String"), // Required + SnapshotClusterIdentifier: aws.String("String"), + } + resp, err := svc.AuthorizeSnapshotAccess(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_CopyClusterSnapshot() { + svc := redshift.New(session.New()) + + params := &redshift.CopyClusterSnapshotInput{ + SourceSnapshotIdentifier: aws.String("String"), // Required + TargetSnapshotIdentifier: aws.String("String"), // Required + SourceSnapshotClusterIdentifier: aws.String("String"), + } + resp, err := svc.CopyClusterSnapshot(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_CreateCluster() { + svc := redshift.New(session.New()) + + params := &redshift.CreateClusterInput{ + ClusterIdentifier: aws.String("String"), // Required + MasterUserPassword: aws.String("String"), // Required + MasterUsername: aws.String("String"), // Required + NodeType: aws.String("String"), // Required + AdditionalInfo: aws.String("String"), + AllowVersionUpgrade: aws.Bool(true), + AutomatedSnapshotRetentionPeriod: aws.Int64(1), + AvailabilityZone: aws.String("String"), + ClusterParameterGroupName: aws.String("String"), + ClusterSecurityGroups: []*string{ + aws.String("String"), // Required + // More values... + }, + ClusterSubnetGroupName: aws.String("String"), + ClusterType: aws.String("String"), + ClusterVersion: aws.String("String"), + DBName: aws.String("String"), + ElasticIp: aws.String("String"), + Encrypted: aws.Bool(true), + HsmClientCertificateIdentifier: aws.String("String"), + HsmConfigurationIdentifier: aws.String("String"), + IamRoles: []*string{ + aws.String("String"), // Required + // More values... + }, + KmsKeyId: aws.String("String"), + NumberOfNodes: aws.Int64(1), + Port: aws.Int64(1), + PreferredMaintenanceWindow: aws.String("String"), + PubliclyAccessible: aws.Bool(true), + Tags: []*redshift.Tag{ + { // Required + Key: aws.String("String"), + Value: aws.String("String"), + }, + // More values... + }, + VpcSecurityGroupIds: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.CreateCluster(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_CreateClusterParameterGroup() { + svc := redshift.New(session.New()) + + params := &redshift.CreateClusterParameterGroupInput{ + Description: aws.String("String"), // Required + ParameterGroupFamily: aws.String("String"), // Required + ParameterGroupName: aws.String("String"), // Required + Tags: []*redshift.Tag{ + { // Required + Key: aws.String("String"), + Value: aws.String("String"), + }, + // More values... + }, + } + resp, err := svc.CreateClusterParameterGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_CreateClusterSecurityGroup() { + svc := redshift.New(session.New()) + + params := &redshift.CreateClusterSecurityGroupInput{ + ClusterSecurityGroupName: aws.String("String"), // Required + Description: aws.String("String"), // Required + Tags: []*redshift.Tag{ + { // Required + Key: aws.String("String"), + Value: aws.String("String"), + }, + // More values... + }, + } + resp, err := svc.CreateClusterSecurityGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_CreateClusterSnapshot() { + svc := redshift.New(session.New()) + + params := &redshift.CreateClusterSnapshotInput{ + ClusterIdentifier: aws.String("String"), // Required + SnapshotIdentifier: aws.String("String"), // Required + Tags: []*redshift.Tag{ + { // Required + Key: aws.String("String"), + Value: aws.String("String"), + }, + // More values... + }, + } + resp, err := svc.CreateClusterSnapshot(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_CreateClusterSubnetGroup() { + svc := redshift.New(session.New()) + + params := &redshift.CreateClusterSubnetGroupInput{ + ClusterSubnetGroupName: aws.String("String"), // Required + Description: aws.String("String"), // Required + SubnetIds: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + Tags: []*redshift.Tag{ + { // Required + Key: aws.String("String"), + Value: aws.String("String"), + }, + // More values... + }, + } + resp, err := svc.CreateClusterSubnetGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_CreateEventSubscription() { + svc := redshift.New(session.New()) + + params := &redshift.CreateEventSubscriptionInput{ + SnsTopicArn: aws.String("String"), // Required + SubscriptionName: aws.String("String"), // Required + Enabled: aws.Bool(true), + EventCategories: []*string{ + aws.String("String"), // Required + // More values... + }, + Severity: aws.String("String"), + SourceIds: []*string{ + aws.String("String"), // Required + // More values... + }, + SourceType: aws.String("String"), + Tags: []*redshift.Tag{ + { // Required + Key: aws.String("String"), + Value: aws.String("String"), + }, + // More values... + }, + } + resp, err := svc.CreateEventSubscription(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_CreateHsmClientCertificate() { + svc := redshift.New(session.New()) + + params := &redshift.CreateHsmClientCertificateInput{ + HsmClientCertificateIdentifier: aws.String("String"), // Required + Tags: []*redshift.Tag{ + { // Required + Key: aws.String("String"), + Value: aws.String("String"), + }, + // More values... + }, + } + resp, err := svc.CreateHsmClientCertificate(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_CreateHsmConfiguration() { + svc := redshift.New(session.New()) + + params := &redshift.CreateHsmConfigurationInput{ + Description: aws.String("String"), // Required + HsmConfigurationIdentifier: aws.String("String"), // Required + HsmIpAddress: aws.String("String"), // Required + HsmPartitionName: aws.String("String"), // Required + HsmPartitionPassword: aws.String("String"), // Required + HsmServerPublicCertificate: aws.String("String"), // Required + Tags: []*redshift.Tag{ + { // Required + Key: aws.String("String"), + Value: aws.String("String"), + }, + // More values... + }, + } + resp, err := svc.CreateHsmConfiguration(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_CreateSnapshotCopyGrant() { + svc := redshift.New(session.New()) + + params := &redshift.CreateSnapshotCopyGrantInput{ + SnapshotCopyGrantName: aws.String("String"), // Required + KmsKeyId: aws.String("String"), + Tags: []*redshift.Tag{ + { // Required + Key: aws.String("String"), + Value: aws.String("String"), + }, + // More values... + }, + } + resp, err := svc.CreateSnapshotCopyGrant(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_CreateTags() { + svc := redshift.New(session.New()) + + params := &redshift.CreateTagsInput{ + ResourceName: aws.String("String"), // Required + Tags: []*redshift.Tag{ // Required + { // Required + Key: aws.String("String"), + Value: aws.String("String"), + }, + // More values... + }, + } + resp, err := svc.CreateTags(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_DeleteCluster() { + svc := redshift.New(session.New()) + + params := &redshift.DeleteClusterInput{ + ClusterIdentifier: aws.String("String"), // Required + FinalClusterSnapshotIdentifier: aws.String("String"), + SkipFinalClusterSnapshot: aws.Bool(true), + } + resp, err := svc.DeleteCluster(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_DeleteClusterParameterGroup() { + svc := redshift.New(session.New()) + + params := &redshift.DeleteClusterParameterGroupInput{ + ParameterGroupName: aws.String("String"), // Required + } + resp, err := svc.DeleteClusterParameterGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_DeleteClusterSecurityGroup() { + svc := redshift.New(session.New()) + + params := &redshift.DeleteClusterSecurityGroupInput{ + ClusterSecurityGroupName: aws.String("String"), // Required + } + resp, err := svc.DeleteClusterSecurityGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_DeleteClusterSnapshot() { + svc := redshift.New(session.New()) + + params := &redshift.DeleteClusterSnapshotInput{ + SnapshotIdentifier: aws.String("String"), // Required + SnapshotClusterIdentifier: aws.String("String"), + } + resp, err := svc.DeleteClusterSnapshot(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_DeleteClusterSubnetGroup() { + svc := redshift.New(session.New()) + + params := &redshift.DeleteClusterSubnetGroupInput{ + ClusterSubnetGroupName: aws.String("String"), // Required + } + resp, err := svc.DeleteClusterSubnetGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_DeleteEventSubscription() { + svc := redshift.New(session.New()) + + params := &redshift.DeleteEventSubscriptionInput{ + SubscriptionName: aws.String("String"), // Required + } + resp, err := svc.DeleteEventSubscription(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_DeleteHsmClientCertificate() { + svc := redshift.New(session.New()) + + params := &redshift.DeleteHsmClientCertificateInput{ + HsmClientCertificateIdentifier: aws.String("String"), // Required + } + resp, err := svc.DeleteHsmClientCertificate(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_DeleteHsmConfiguration() { + svc := redshift.New(session.New()) + + params := &redshift.DeleteHsmConfigurationInput{ + HsmConfigurationIdentifier: aws.String("String"), // Required + } + resp, err := svc.DeleteHsmConfiguration(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_DeleteSnapshotCopyGrant() { + svc := redshift.New(session.New()) + + params := &redshift.DeleteSnapshotCopyGrantInput{ + SnapshotCopyGrantName: aws.String("String"), // Required + } + resp, err := svc.DeleteSnapshotCopyGrant(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_DeleteTags() { + svc := redshift.New(session.New()) + + params := &redshift.DeleteTagsInput{ + ResourceName: aws.String("String"), // Required + TagKeys: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DeleteTags(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_DescribeClusterParameterGroups() { + svc := redshift.New(session.New()) + + params := &redshift.DescribeClusterParameterGroupsInput{ + Marker: aws.String("String"), + MaxRecords: aws.Int64(1), + ParameterGroupName: aws.String("String"), + TagKeys: []*string{ + aws.String("String"), // Required + // More values... + }, + TagValues: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DescribeClusterParameterGroups(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_DescribeClusterParameters() { + svc := redshift.New(session.New()) + + params := &redshift.DescribeClusterParametersInput{ + ParameterGroupName: aws.String("String"), // Required + Marker: aws.String("String"), + MaxRecords: aws.Int64(1), + Source: aws.String("String"), + } + resp, err := svc.DescribeClusterParameters(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_DescribeClusterSecurityGroups() { + svc := redshift.New(session.New()) + + params := &redshift.DescribeClusterSecurityGroupsInput{ + ClusterSecurityGroupName: aws.String("String"), + Marker: aws.String("String"), + MaxRecords: aws.Int64(1), + TagKeys: []*string{ + aws.String("String"), // Required + // More values... + }, + TagValues: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DescribeClusterSecurityGroups(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_DescribeClusterSnapshots() { + svc := redshift.New(session.New()) + + params := &redshift.DescribeClusterSnapshotsInput{ + ClusterIdentifier: aws.String("String"), + EndTime: aws.Time(time.Now()), + Marker: aws.String("String"), + MaxRecords: aws.Int64(1), + OwnerAccount: aws.String("String"), + SnapshotIdentifier: aws.String("String"), + SnapshotType: aws.String("String"), + StartTime: aws.Time(time.Now()), + TagKeys: []*string{ + aws.String("String"), // Required + // More values... + }, + TagValues: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DescribeClusterSnapshots(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_DescribeClusterSubnetGroups() { + svc := redshift.New(session.New()) + + params := &redshift.DescribeClusterSubnetGroupsInput{ + ClusterSubnetGroupName: aws.String("String"), + Marker: aws.String("String"), + MaxRecords: aws.Int64(1), + TagKeys: []*string{ + aws.String("String"), // Required + // More values... + }, + TagValues: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DescribeClusterSubnetGroups(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_DescribeClusterVersions() { + svc := redshift.New(session.New()) + + params := &redshift.DescribeClusterVersionsInput{ + ClusterParameterGroupFamily: aws.String("String"), + ClusterVersion: aws.String("String"), + Marker: aws.String("String"), + MaxRecords: aws.Int64(1), + } + resp, err := svc.DescribeClusterVersions(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_DescribeClusters() { + svc := redshift.New(session.New()) + + params := &redshift.DescribeClustersInput{ + ClusterIdentifier: aws.String("String"), + Marker: aws.String("String"), + MaxRecords: aws.Int64(1), + TagKeys: []*string{ + aws.String("String"), // Required + // More values... + }, + TagValues: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DescribeClusters(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_DescribeDefaultClusterParameters() { + svc := redshift.New(session.New()) + + params := &redshift.DescribeDefaultClusterParametersInput{ + ParameterGroupFamily: aws.String("String"), // Required + Marker: aws.String("String"), + MaxRecords: aws.Int64(1), + } + resp, err := svc.DescribeDefaultClusterParameters(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_DescribeEventCategories() { + svc := redshift.New(session.New()) + + params := &redshift.DescribeEventCategoriesInput{ + SourceType: aws.String("String"), + } + resp, err := svc.DescribeEventCategories(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_DescribeEventSubscriptions() { + svc := redshift.New(session.New()) + + params := &redshift.DescribeEventSubscriptionsInput{ + Marker: aws.String("String"), + MaxRecords: aws.Int64(1), + SubscriptionName: aws.String("String"), + } + resp, err := svc.DescribeEventSubscriptions(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_DescribeEvents() { + svc := redshift.New(session.New()) + + params := &redshift.DescribeEventsInput{ + Duration: aws.Int64(1), + EndTime: aws.Time(time.Now()), + Marker: aws.String("String"), + MaxRecords: aws.Int64(1), + SourceIdentifier: aws.String("String"), + SourceType: aws.String("SourceType"), + StartTime: aws.Time(time.Now()), + } + resp, err := svc.DescribeEvents(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_DescribeHsmClientCertificates() { + svc := redshift.New(session.New()) + + params := &redshift.DescribeHsmClientCertificatesInput{ + HsmClientCertificateIdentifier: aws.String("String"), + Marker: aws.String("String"), + MaxRecords: aws.Int64(1), + TagKeys: []*string{ + aws.String("String"), // Required + // More values... + }, + TagValues: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DescribeHsmClientCertificates(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_DescribeHsmConfigurations() { + svc := redshift.New(session.New()) + + params := &redshift.DescribeHsmConfigurationsInput{ + HsmConfigurationIdentifier: aws.String("String"), + Marker: aws.String("String"), + MaxRecords: aws.Int64(1), + TagKeys: []*string{ + aws.String("String"), // Required + // More values... + }, + TagValues: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DescribeHsmConfigurations(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_DescribeLoggingStatus() { + svc := redshift.New(session.New()) + + params := &redshift.DescribeLoggingStatusInput{ + ClusterIdentifier: aws.String("String"), // Required + } + resp, err := svc.DescribeLoggingStatus(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_DescribeOrderableClusterOptions() { + svc := redshift.New(session.New()) + + params := &redshift.DescribeOrderableClusterOptionsInput{ + ClusterVersion: aws.String("String"), + Marker: aws.String("String"), + MaxRecords: aws.Int64(1), + NodeType: aws.String("String"), + } + resp, err := svc.DescribeOrderableClusterOptions(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_DescribeReservedNodeOfferings() { + svc := redshift.New(session.New()) + + params := &redshift.DescribeReservedNodeOfferingsInput{ + Marker: aws.String("String"), + MaxRecords: aws.Int64(1), + ReservedNodeOfferingId: aws.String("String"), + } + resp, err := svc.DescribeReservedNodeOfferings(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_DescribeReservedNodes() { + svc := redshift.New(session.New()) + + params := &redshift.DescribeReservedNodesInput{ + Marker: aws.String("String"), + MaxRecords: aws.Int64(1), + ReservedNodeId: aws.String("String"), + } + resp, err := svc.DescribeReservedNodes(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_DescribeResize() { + svc := redshift.New(session.New()) + + params := &redshift.DescribeResizeInput{ + ClusterIdentifier: aws.String("String"), // Required + } + resp, err := svc.DescribeResize(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_DescribeSnapshotCopyGrants() { + svc := redshift.New(session.New()) + + params := &redshift.DescribeSnapshotCopyGrantsInput{ + Marker: aws.String("String"), + MaxRecords: aws.Int64(1), + SnapshotCopyGrantName: aws.String("String"), + TagKeys: []*string{ + aws.String("String"), // Required + // More values... + }, + TagValues: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DescribeSnapshotCopyGrants(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_DescribeTableRestoreStatus() { + svc := redshift.New(session.New()) + + params := &redshift.DescribeTableRestoreStatusInput{ + ClusterIdentifier: aws.String("String"), + Marker: aws.String("String"), + MaxRecords: aws.Int64(1), + TableRestoreRequestId: aws.String("String"), + } + resp, err := svc.DescribeTableRestoreStatus(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_DescribeTags() { + svc := redshift.New(session.New()) + + params := &redshift.DescribeTagsInput{ + Marker: aws.String("String"), + MaxRecords: aws.Int64(1), + ResourceName: aws.String("String"), + ResourceType: aws.String("String"), + TagKeys: []*string{ + aws.String("String"), // Required + // More values... + }, + TagValues: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DescribeTags(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_DisableLogging() { + svc := redshift.New(session.New()) + + params := &redshift.DisableLoggingInput{ + ClusterIdentifier: aws.String("String"), // Required + } + resp, err := svc.DisableLogging(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_DisableSnapshotCopy() { + svc := redshift.New(session.New()) + + params := &redshift.DisableSnapshotCopyInput{ + ClusterIdentifier: aws.String("String"), // Required + } + resp, err := svc.DisableSnapshotCopy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_EnableLogging() { + svc := redshift.New(session.New()) + + params := &redshift.EnableLoggingInput{ + BucketName: aws.String("String"), // Required + ClusterIdentifier: aws.String("String"), // Required + S3KeyPrefix: aws.String("String"), + } + resp, err := svc.EnableLogging(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_EnableSnapshotCopy() { + svc := redshift.New(session.New()) + + params := &redshift.EnableSnapshotCopyInput{ + ClusterIdentifier: aws.String("String"), // Required + DestinationRegion: aws.String("String"), // Required + RetentionPeriod: aws.Int64(1), + SnapshotCopyGrantName: aws.String("String"), + } + resp, err := svc.EnableSnapshotCopy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_ModifyCluster() { + svc := redshift.New(session.New()) + + params := &redshift.ModifyClusterInput{ + ClusterIdentifier: aws.String("String"), // Required + AllowVersionUpgrade: aws.Bool(true), + AutomatedSnapshotRetentionPeriod: aws.Int64(1), + ClusterParameterGroupName: aws.String("String"), + ClusterSecurityGroups: []*string{ + aws.String("String"), // Required + // More values... + }, + ClusterType: aws.String("String"), + ClusterVersion: aws.String("String"), + ElasticIp: aws.String("String"), + HsmClientCertificateIdentifier: aws.String("String"), + HsmConfigurationIdentifier: aws.String("String"), + MasterUserPassword: aws.String("String"), + NewClusterIdentifier: aws.String("String"), + NodeType: aws.String("String"), + NumberOfNodes: aws.Int64(1), + PreferredMaintenanceWindow: aws.String("String"), + PubliclyAccessible: aws.Bool(true), + VpcSecurityGroupIds: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.ModifyCluster(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_ModifyClusterIamRoles() { + svc := redshift.New(session.New()) + + params := &redshift.ModifyClusterIamRolesInput{ + ClusterIdentifier: aws.String("String"), // Required + AddIamRoles: []*string{ + aws.String("String"), // Required + // More values... + }, + RemoveIamRoles: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.ModifyClusterIamRoles(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_ModifyClusterParameterGroup() { + svc := redshift.New(session.New()) + + params := &redshift.ModifyClusterParameterGroupInput{ + ParameterGroupName: aws.String("String"), // Required + Parameters: []*redshift.Parameter{ // Required + { // Required + AllowedValues: aws.String("String"), + ApplyType: aws.String("ParameterApplyType"), + DataType: aws.String("String"), + Description: aws.String("String"), + IsModifiable: aws.Bool(true), + MinimumEngineVersion: aws.String("String"), + ParameterName: aws.String("String"), + ParameterValue: aws.String("String"), + Source: aws.String("String"), + }, + // More values... + }, + } + resp, err := svc.ModifyClusterParameterGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_ModifyClusterSubnetGroup() { + svc := redshift.New(session.New()) + + params := &redshift.ModifyClusterSubnetGroupInput{ + ClusterSubnetGroupName: aws.String("String"), // Required + SubnetIds: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + Description: aws.String("String"), + } + resp, err := svc.ModifyClusterSubnetGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_ModifyEventSubscription() { + svc := redshift.New(session.New()) + + params := &redshift.ModifyEventSubscriptionInput{ + SubscriptionName: aws.String("String"), // Required + Enabled: aws.Bool(true), + EventCategories: []*string{ + aws.String("String"), // Required + // More values... + }, + Severity: aws.String("String"), + SnsTopicArn: aws.String("String"), + SourceIds: []*string{ + aws.String("String"), // Required + // More values... + }, + SourceType: aws.String("String"), + } + resp, err := svc.ModifyEventSubscription(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_ModifySnapshotCopyRetentionPeriod() { + svc := redshift.New(session.New()) + + params := &redshift.ModifySnapshotCopyRetentionPeriodInput{ + ClusterIdentifier: aws.String("String"), // Required + RetentionPeriod: aws.Int64(1), // Required + } + resp, err := svc.ModifySnapshotCopyRetentionPeriod(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_PurchaseReservedNodeOffering() { + svc := redshift.New(session.New()) + + params := &redshift.PurchaseReservedNodeOfferingInput{ + ReservedNodeOfferingId: aws.String("String"), // Required + NodeCount: aws.Int64(1), + } + resp, err := svc.PurchaseReservedNodeOffering(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_RebootCluster() { + svc := redshift.New(session.New()) + + params := &redshift.RebootClusterInput{ + ClusterIdentifier: aws.String("String"), // Required + } + resp, err := svc.RebootCluster(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_ResetClusterParameterGroup() { + svc := redshift.New(session.New()) + + params := &redshift.ResetClusterParameterGroupInput{ + ParameterGroupName: aws.String("String"), // Required + Parameters: []*redshift.Parameter{ + { // Required + AllowedValues: aws.String("String"), + ApplyType: aws.String("ParameterApplyType"), + DataType: aws.String("String"), + Description: aws.String("String"), + IsModifiable: aws.Bool(true), + MinimumEngineVersion: aws.String("String"), + ParameterName: aws.String("String"), + ParameterValue: aws.String("String"), + Source: aws.String("String"), + }, + // More values... + }, + ResetAllParameters: aws.Bool(true), + } + resp, err := svc.ResetClusterParameterGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_RestoreFromClusterSnapshot() { + svc := redshift.New(session.New()) + + params := &redshift.RestoreFromClusterSnapshotInput{ + ClusterIdentifier: aws.String("String"), // Required + SnapshotIdentifier: aws.String("String"), // Required + AdditionalInfo: aws.String("String"), + AllowVersionUpgrade: aws.Bool(true), + AutomatedSnapshotRetentionPeriod: aws.Int64(1), + AvailabilityZone: aws.String("String"), + ClusterParameterGroupName: aws.String("String"), + ClusterSecurityGroups: []*string{ + aws.String("String"), // Required + // More values... + }, + ClusterSubnetGroupName: aws.String("String"), + ElasticIp: aws.String("String"), + HsmClientCertificateIdentifier: aws.String("String"), + HsmConfigurationIdentifier: aws.String("String"), + IamRoles: []*string{ + aws.String("String"), // Required + // More values... + }, + KmsKeyId: aws.String("String"), + NodeType: aws.String("String"), + OwnerAccount: aws.String("String"), + Port: aws.Int64(1), + PreferredMaintenanceWindow: aws.String("String"), + PubliclyAccessible: aws.Bool(true), + SnapshotClusterIdentifier: aws.String("String"), + VpcSecurityGroupIds: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.RestoreFromClusterSnapshot(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_RestoreTableFromClusterSnapshot() { + svc := redshift.New(session.New()) + + params := &redshift.RestoreTableFromClusterSnapshotInput{ + ClusterIdentifier: aws.String("String"), // Required + NewTableName: aws.String("String"), // Required + SnapshotIdentifier: aws.String("String"), // Required + SourceDatabaseName: aws.String("String"), // Required + SourceTableName: aws.String("String"), // Required + SourceSchemaName: aws.String("String"), + TargetDatabaseName: aws.String("String"), + TargetSchemaName: aws.String("String"), + } + resp, err := svc.RestoreTableFromClusterSnapshot(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_RevokeClusterSecurityGroupIngress() { + svc := redshift.New(session.New()) + + params := &redshift.RevokeClusterSecurityGroupIngressInput{ + ClusterSecurityGroupName: aws.String("String"), // Required + CIDRIP: aws.String("String"), + EC2SecurityGroupName: aws.String("String"), + EC2SecurityGroupOwnerId: aws.String("String"), + } + resp, err := svc.RevokeClusterSecurityGroupIngress(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_RevokeSnapshotAccess() { + svc := redshift.New(session.New()) + + params := &redshift.RevokeSnapshotAccessInput{ + AccountWithRestoreAccess: aws.String("String"), // Required + SnapshotIdentifier: aws.String("String"), // Required + SnapshotClusterIdentifier: aws.String("String"), + } + resp, err := svc.RevokeSnapshotAccess(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_RotateEncryptionKey() { + svc := redshift.New(session.New()) + + params := &redshift.RotateEncryptionKeyInput{ + ClusterIdentifier: aws.String("String"), // Required + } + resp, err := svc.RotateEncryptionKey(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/redshift/redshiftiface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/redshift/redshiftiface/interface.go new file mode 100644 index 000000000..158a6ef12 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/redshift/redshiftiface/interface.go @@ -0,0 +1,292 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package redshiftiface provides an interface for the Amazon Redshift. +package redshiftiface + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/redshift" +) + +// RedshiftAPI is the interface type for redshift.Redshift. +type RedshiftAPI interface { + AuthorizeClusterSecurityGroupIngressRequest(*redshift.AuthorizeClusterSecurityGroupIngressInput) (*request.Request, *redshift.AuthorizeClusterSecurityGroupIngressOutput) + + AuthorizeClusterSecurityGroupIngress(*redshift.AuthorizeClusterSecurityGroupIngressInput) (*redshift.AuthorizeClusterSecurityGroupIngressOutput, error) + + AuthorizeSnapshotAccessRequest(*redshift.AuthorizeSnapshotAccessInput) (*request.Request, *redshift.AuthorizeSnapshotAccessOutput) + + AuthorizeSnapshotAccess(*redshift.AuthorizeSnapshotAccessInput) (*redshift.AuthorizeSnapshotAccessOutput, error) + + CopyClusterSnapshotRequest(*redshift.CopyClusterSnapshotInput) (*request.Request, *redshift.CopyClusterSnapshotOutput) + + CopyClusterSnapshot(*redshift.CopyClusterSnapshotInput) (*redshift.CopyClusterSnapshotOutput, error) + + CreateClusterRequest(*redshift.CreateClusterInput) (*request.Request, *redshift.CreateClusterOutput) + + CreateCluster(*redshift.CreateClusterInput) (*redshift.CreateClusterOutput, error) + + CreateClusterParameterGroupRequest(*redshift.CreateClusterParameterGroupInput) (*request.Request, *redshift.CreateClusterParameterGroupOutput) + + CreateClusterParameterGroup(*redshift.CreateClusterParameterGroupInput) (*redshift.CreateClusterParameterGroupOutput, error) + + CreateClusterSecurityGroupRequest(*redshift.CreateClusterSecurityGroupInput) (*request.Request, *redshift.CreateClusterSecurityGroupOutput) + + CreateClusterSecurityGroup(*redshift.CreateClusterSecurityGroupInput) (*redshift.CreateClusterSecurityGroupOutput, error) + + CreateClusterSnapshotRequest(*redshift.CreateClusterSnapshotInput) (*request.Request, *redshift.CreateClusterSnapshotOutput) + + CreateClusterSnapshot(*redshift.CreateClusterSnapshotInput) (*redshift.CreateClusterSnapshotOutput, error) + + CreateClusterSubnetGroupRequest(*redshift.CreateClusterSubnetGroupInput) (*request.Request, *redshift.CreateClusterSubnetGroupOutput) + + CreateClusterSubnetGroup(*redshift.CreateClusterSubnetGroupInput) (*redshift.CreateClusterSubnetGroupOutput, error) + + CreateEventSubscriptionRequest(*redshift.CreateEventSubscriptionInput) (*request.Request, *redshift.CreateEventSubscriptionOutput) + + CreateEventSubscription(*redshift.CreateEventSubscriptionInput) (*redshift.CreateEventSubscriptionOutput, error) + + CreateHsmClientCertificateRequest(*redshift.CreateHsmClientCertificateInput) (*request.Request, *redshift.CreateHsmClientCertificateOutput) + + CreateHsmClientCertificate(*redshift.CreateHsmClientCertificateInput) (*redshift.CreateHsmClientCertificateOutput, error) + + CreateHsmConfigurationRequest(*redshift.CreateHsmConfigurationInput) (*request.Request, *redshift.CreateHsmConfigurationOutput) + + CreateHsmConfiguration(*redshift.CreateHsmConfigurationInput) (*redshift.CreateHsmConfigurationOutput, error) + + CreateSnapshotCopyGrantRequest(*redshift.CreateSnapshotCopyGrantInput) (*request.Request, *redshift.CreateSnapshotCopyGrantOutput) + + CreateSnapshotCopyGrant(*redshift.CreateSnapshotCopyGrantInput) (*redshift.CreateSnapshotCopyGrantOutput, error) + + CreateTagsRequest(*redshift.CreateTagsInput) (*request.Request, *redshift.CreateTagsOutput) + + CreateTags(*redshift.CreateTagsInput) (*redshift.CreateTagsOutput, error) + + DeleteClusterRequest(*redshift.DeleteClusterInput) (*request.Request, *redshift.DeleteClusterOutput) + + DeleteCluster(*redshift.DeleteClusterInput) (*redshift.DeleteClusterOutput, error) + + DeleteClusterParameterGroupRequest(*redshift.DeleteClusterParameterGroupInput) (*request.Request, *redshift.DeleteClusterParameterGroupOutput) + + DeleteClusterParameterGroup(*redshift.DeleteClusterParameterGroupInput) (*redshift.DeleteClusterParameterGroupOutput, error) + + DeleteClusterSecurityGroupRequest(*redshift.DeleteClusterSecurityGroupInput) (*request.Request, *redshift.DeleteClusterSecurityGroupOutput) + + DeleteClusterSecurityGroup(*redshift.DeleteClusterSecurityGroupInput) (*redshift.DeleteClusterSecurityGroupOutput, error) + + DeleteClusterSnapshotRequest(*redshift.DeleteClusterSnapshotInput) (*request.Request, *redshift.DeleteClusterSnapshotOutput) + + DeleteClusterSnapshot(*redshift.DeleteClusterSnapshotInput) (*redshift.DeleteClusterSnapshotOutput, error) + + DeleteClusterSubnetGroupRequest(*redshift.DeleteClusterSubnetGroupInput) (*request.Request, *redshift.DeleteClusterSubnetGroupOutput) + + DeleteClusterSubnetGroup(*redshift.DeleteClusterSubnetGroupInput) (*redshift.DeleteClusterSubnetGroupOutput, error) + + DeleteEventSubscriptionRequest(*redshift.DeleteEventSubscriptionInput) (*request.Request, *redshift.DeleteEventSubscriptionOutput) + + DeleteEventSubscription(*redshift.DeleteEventSubscriptionInput) (*redshift.DeleteEventSubscriptionOutput, error) + + DeleteHsmClientCertificateRequest(*redshift.DeleteHsmClientCertificateInput) (*request.Request, *redshift.DeleteHsmClientCertificateOutput) + + DeleteHsmClientCertificate(*redshift.DeleteHsmClientCertificateInput) (*redshift.DeleteHsmClientCertificateOutput, error) + + DeleteHsmConfigurationRequest(*redshift.DeleteHsmConfigurationInput) (*request.Request, *redshift.DeleteHsmConfigurationOutput) + + DeleteHsmConfiguration(*redshift.DeleteHsmConfigurationInput) (*redshift.DeleteHsmConfigurationOutput, error) + + DeleteSnapshotCopyGrantRequest(*redshift.DeleteSnapshotCopyGrantInput) (*request.Request, *redshift.DeleteSnapshotCopyGrantOutput) + + DeleteSnapshotCopyGrant(*redshift.DeleteSnapshotCopyGrantInput) (*redshift.DeleteSnapshotCopyGrantOutput, error) + + DeleteTagsRequest(*redshift.DeleteTagsInput) (*request.Request, *redshift.DeleteTagsOutput) + + DeleteTags(*redshift.DeleteTagsInput) (*redshift.DeleteTagsOutput, error) + + DescribeClusterParameterGroupsRequest(*redshift.DescribeClusterParameterGroupsInput) (*request.Request, *redshift.DescribeClusterParameterGroupsOutput) + + DescribeClusterParameterGroups(*redshift.DescribeClusterParameterGroupsInput) (*redshift.DescribeClusterParameterGroupsOutput, error) + + DescribeClusterParameterGroupsPages(*redshift.DescribeClusterParameterGroupsInput, func(*redshift.DescribeClusterParameterGroupsOutput, bool) bool) error + + DescribeClusterParametersRequest(*redshift.DescribeClusterParametersInput) (*request.Request, *redshift.DescribeClusterParametersOutput) + + DescribeClusterParameters(*redshift.DescribeClusterParametersInput) (*redshift.DescribeClusterParametersOutput, error) + + DescribeClusterParametersPages(*redshift.DescribeClusterParametersInput, func(*redshift.DescribeClusterParametersOutput, bool) bool) error + + DescribeClusterSecurityGroupsRequest(*redshift.DescribeClusterSecurityGroupsInput) (*request.Request, *redshift.DescribeClusterSecurityGroupsOutput) + + DescribeClusterSecurityGroups(*redshift.DescribeClusterSecurityGroupsInput) (*redshift.DescribeClusterSecurityGroupsOutput, error) + + DescribeClusterSecurityGroupsPages(*redshift.DescribeClusterSecurityGroupsInput, func(*redshift.DescribeClusterSecurityGroupsOutput, bool) bool) error + + DescribeClusterSnapshotsRequest(*redshift.DescribeClusterSnapshotsInput) (*request.Request, *redshift.DescribeClusterSnapshotsOutput) + + DescribeClusterSnapshots(*redshift.DescribeClusterSnapshotsInput) (*redshift.DescribeClusterSnapshotsOutput, error) + + DescribeClusterSnapshotsPages(*redshift.DescribeClusterSnapshotsInput, func(*redshift.DescribeClusterSnapshotsOutput, bool) bool) error + + DescribeClusterSubnetGroupsRequest(*redshift.DescribeClusterSubnetGroupsInput) (*request.Request, *redshift.DescribeClusterSubnetGroupsOutput) + + DescribeClusterSubnetGroups(*redshift.DescribeClusterSubnetGroupsInput) (*redshift.DescribeClusterSubnetGroupsOutput, error) + + DescribeClusterSubnetGroupsPages(*redshift.DescribeClusterSubnetGroupsInput, func(*redshift.DescribeClusterSubnetGroupsOutput, bool) bool) error + + DescribeClusterVersionsRequest(*redshift.DescribeClusterVersionsInput) (*request.Request, *redshift.DescribeClusterVersionsOutput) + + DescribeClusterVersions(*redshift.DescribeClusterVersionsInput) (*redshift.DescribeClusterVersionsOutput, error) + + DescribeClusterVersionsPages(*redshift.DescribeClusterVersionsInput, func(*redshift.DescribeClusterVersionsOutput, bool) bool) error + + DescribeClustersRequest(*redshift.DescribeClustersInput) (*request.Request, *redshift.DescribeClustersOutput) + + DescribeClusters(*redshift.DescribeClustersInput) (*redshift.DescribeClustersOutput, error) + + DescribeClustersPages(*redshift.DescribeClustersInput, func(*redshift.DescribeClustersOutput, bool) bool) error + + DescribeDefaultClusterParametersRequest(*redshift.DescribeDefaultClusterParametersInput) (*request.Request, *redshift.DescribeDefaultClusterParametersOutput) + + DescribeDefaultClusterParameters(*redshift.DescribeDefaultClusterParametersInput) (*redshift.DescribeDefaultClusterParametersOutput, error) + + DescribeDefaultClusterParametersPages(*redshift.DescribeDefaultClusterParametersInput, func(*redshift.DescribeDefaultClusterParametersOutput, bool) bool) error + + DescribeEventCategoriesRequest(*redshift.DescribeEventCategoriesInput) (*request.Request, *redshift.DescribeEventCategoriesOutput) + + DescribeEventCategories(*redshift.DescribeEventCategoriesInput) (*redshift.DescribeEventCategoriesOutput, error) + + DescribeEventSubscriptionsRequest(*redshift.DescribeEventSubscriptionsInput) (*request.Request, *redshift.DescribeEventSubscriptionsOutput) + + DescribeEventSubscriptions(*redshift.DescribeEventSubscriptionsInput) (*redshift.DescribeEventSubscriptionsOutput, error) + + DescribeEventSubscriptionsPages(*redshift.DescribeEventSubscriptionsInput, func(*redshift.DescribeEventSubscriptionsOutput, bool) bool) error + + DescribeEventsRequest(*redshift.DescribeEventsInput) (*request.Request, *redshift.DescribeEventsOutput) + + DescribeEvents(*redshift.DescribeEventsInput) (*redshift.DescribeEventsOutput, error) + + DescribeEventsPages(*redshift.DescribeEventsInput, func(*redshift.DescribeEventsOutput, bool) bool) error + + DescribeHsmClientCertificatesRequest(*redshift.DescribeHsmClientCertificatesInput) (*request.Request, *redshift.DescribeHsmClientCertificatesOutput) + + DescribeHsmClientCertificates(*redshift.DescribeHsmClientCertificatesInput) (*redshift.DescribeHsmClientCertificatesOutput, error) + + DescribeHsmClientCertificatesPages(*redshift.DescribeHsmClientCertificatesInput, func(*redshift.DescribeHsmClientCertificatesOutput, bool) bool) error + + DescribeHsmConfigurationsRequest(*redshift.DescribeHsmConfigurationsInput) (*request.Request, *redshift.DescribeHsmConfigurationsOutput) + + DescribeHsmConfigurations(*redshift.DescribeHsmConfigurationsInput) (*redshift.DescribeHsmConfigurationsOutput, error) + + DescribeHsmConfigurationsPages(*redshift.DescribeHsmConfigurationsInput, func(*redshift.DescribeHsmConfigurationsOutput, bool) bool) error + + DescribeLoggingStatusRequest(*redshift.DescribeLoggingStatusInput) (*request.Request, *redshift.LoggingStatus) + + DescribeLoggingStatus(*redshift.DescribeLoggingStatusInput) (*redshift.LoggingStatus, error) + + DescribeOrderableClusterOptionsRequest(*redshift.DescribeOrderableClusterOptionsInput) (*request.Request, *redshift.DescribeOrderableClusterOptionsOutput) + + DescribeOrderableClusterOptions(*redshift.DescribeOrderableClusterOptionsInput) (*redshift.DescribeOrderableClusterOptionsOutput, error) + + DescribeOrderableClusterOptionsPages(*redshift.DescribeOrderableClusterOptionsInput, func(*redshift.DescribeOrderableClusterOptionsOutput, bool) bool) error + + DescribeReservedNodeOfferingsRequest(*redshift.DescribeReservedNodeOfferingsInput) (*request.Request, *redshift.DescribeReservedNodeOfferingsOutput) + + DescribeReservedNodeOfferings(*redshift.DescribeReservedNodeOfferingsInput) (*redshift.DescribeReservedNodeOfferingsOutput, error) + + DescribeReservedNodeOfferingsPages(*redshift.DescribeReservedNodeOfferingsInput, func(*redshift.DescribeReservedNodeOfferingsOutput, bool) bool) error + + DescribeReservedNodesRequest(*redshift.DescribeReservedNodesInput) (*request.Request, *redshift.DescribeReservedNodesOutput) + + DescribeReservedNodes(*redshift.DescribeReservedNodesInput) (*redshift.DescribeReservedNodesOutput, error) + + DescribeReservedNodesPages(*redshift.DescribeReservedNodesInput, func(*redshift.DescribeReservedNodesOutput, bool) bool) error + + DescribeResizeRequest(*redshift.DescribeResizeInput) (*request.Request, *redshift.DescribeResizeOutput) + + DescribeResize(*redshift.DescribeResizeInput) (*redshift.DescribeResizeOutput, error) + + DescribeSnapshotCopyGrantsRequest(*redshift.DescribeSnapshotCopyGrantsInput) (*request.Request, *redshift.DescribeSnapshotCopyGrantsOutput) + + DescribeSnapshotCopyGrants(*redshift.DescribeSnapshotCopyGrantsInput) (*redshift.DescribeSnapshotCopyGrantsOutput, error) + + DescribeTableRestoreStatusRequest(*redshift.DescribeTableRestoreStatusInput) (*request.Request, *redshift.DescribeTableRestoreStatusOutput) + + DescribeTableRestoreStatus(*redshift.DescribeTableRestoreStatusInput) (*redshift.DescribeTableRestoreStatusOutput, error) + + DescribeTagsRequest(*redshift.DescribeTagsInput) (*request.Request, *redshift.DescribeTagsOutput) + + DescribeTags(*redshift.DescribeTagsInput) (*redshift.DescribeTagsOutput, error) + + DisableLoggingRequest(*redshift.DisableLoggingInput) (*request.Request, *redshift.LoggingStatus) + + DisableLogging(*redshift.DisableLoggingInput) (*redshift.LoggingStatus, error) + + DisableSnapshotCopyRequest(*redshift.DisableSnapshotCopyInput) (*request.Request, *redshift.DisableSnapshotCopyOutput) + + DisableSnapshotCopy(*redshift.DisableSnapshotCopyInput) (*redshift.DisableSnapshotCopyOutput, error) + + EnableLoggingRequest(*redshift.EnableLoggingInput) (*request.Request, *redshift.LoggingStatus) + + EnableLogging(*redshift.EnableLoggingInput) (*redshift.LoggingStatus, error) + + EnableSnapshotCopyRequest(*redshift.EnableSnapshotCopyInput) (*request.Request, *redshift.EnableSnapshotCopyOutput) + + EnableSnapshotCopy(*redshift.EnableSnapshotCopyInput) (*redshift.EnableSnapshotCopyOutput, error) + + ModifyClusterRequest(*redshift.ModifyClusterInput) (*request.Request, *redshift.ModifyClusterOutput) + + ModifyCluster(*redshift.ModifyClusterInput) (*redshift.ModifyClusterOutput, error) + + ModifyClusterIamRolesRequest(*redshift.ModifyClusterIamRolesInput) (*request.Request, *redshift.ModifyClusterIamRolesOutput) + + ModifyClusterIamRoles(*redshift.ModifyClusterIamRolesInput) (*redshift.ModifyClusterIamRolesOutput, error) + + ModifyClusterParameterGroupRequest(*redshift.ModifyClusterParameterGroupInput) (*request.Request, *redshift.ClusterParameterGroupNameMessage) + + ModifyClusterParameterGroup(*redshift.ModifyClusterParameterGroupInput) (*redshift.ClusterParameterGroupNameMessage, error) + + ModifyClusterSubnetGroupRequest(*redshift.ModifyClusterSubnetGroupInput) (*request.Request, *redshift.ModifyClusterSubnetGroupOutput) + + ModifyClusterSubnetGroup(*redshift.ModifyClusterSubnetGroupInput) (*redshift.ModifyClusterSubnetGroupOutput, error) + + ModifyEventSubscriptionRequest(*redshift.ModifyEventSubscriptionInput) (*request.Request, *redshift.ModifyEventSubscriptionOutput) + + ModifyEventSubscription(*redshift.ModifyEventSubscriptionInput) (*redshift.ModifyEventSubscriptionOutput, error) + + ModifySnapshotCopyRetentionPeriodRequest(*redshift.ModifySnapshotCopyRetentionPeriodInput) (*request.Request, *redshift.ModifySnapshotCopyRetentionPeriodOutput) + + ModifySnapshotCopyRetentionPeriod(*redshift.ModifySnapshotCopyRetentionPeriodInput) (*redshift.ModifySnapshotCopyRetentionPeriodOutput, error) + + PurchaseReservedNodeOfferingRequest(*redshift.PurchaseReservedNodeOfferingInput) (*request.Request, *redshift.PurchaseReservedNodeOfferingOutput) + + PurchaseReservedNodeOffering(*redshift.PurchaseReservedNodeOfferingInput) (*redshift.PurchaseReservedNodeOfferingOutput, error) + + RebootClusterRequest(*redshift.RebootClusterInput) (*request.Request, *redshift.RebootClusterOutput) + + RebootCluster(*redshift.RebootClusterInput) (*redshift.RebootClusterOutput, error) + + ResetClusterParameterGroupRequest(*redshift.ResetClusterParameterGroupInput) (*request.Request, *redshift.ClusterParameterGroupNameMessage) + + ResetClusterParameterGroup(*redshift.ResetClusterParameterGroupInput) (*redshift.ClusterParameterGroupNameMessage, error) + + RestoreFromClusterSnapshotRequest(*redshift.RestoreFromClusterSnapshotInput) (*request.Request, *redshift.RestoreFromClusterSnapshotOutput) + + RestoreFromClusterSnapshot(*redshift.RestoreFromClusterSnapshotInput) (*redshift.RestoreFromClusterSnapshotOutput, error) + + RestoreTableFromClusterSnapshotRequest(*redshift.RestoreTableFromClusterSnapshotInput) (*request.Request, *redshift.RestoreTableFromClusterSnapshotOutput) + + RestoreTableFromClusterSnapshot(*redshift.RestoreTableFromClusterSnapshotInput) (*redshift.RestoreTableFromClusterSnapshotOutput, error) + + RevokeClusterSecurityGroupIngressRequest(*redshift.RevokeClusterSecurityGroupIngressInput) (*request.Request, *redshift.RevokeClusterSecurityGroupIngressOutput) + + RevokeClusterSecurityGroupIngress(*redshift.RevokeClusterSecurityGroupIngressInput) (*redshift.RevokeClusterSecurityGroupIngressOutput, error) + + RevokeSnapshotAccessRequest(*redshift.RevokeSnapshotAccessInput) (*request.Request, *redshift.RevokeSnapshotAccessOutput) + + RevokeSnapshotAccess(*redshift.RevokeSnapshotAccessInput) (*redshift.RevokeSnapshotAccessOutput, error) + + RotateEncryptionKeyRequest(*redshift.RotateEncryptionKeyInput) (*request.Request, *redshift.RotateEncryptionKeyOutput) + + RotateEncryptionKey(*redshift.RotateEncryptionKeyInput) (*redshift.RotateEncryptionKeyOutput, error) +} + +var _ RedshiftAPI = (*redshift.Redshift)(nil) diff --git a/vendor/github.com/aws/aws-sdk-go/service/redshift/service.go b/vendor/github.com/aws/aws-sdk-go/service/redshift/service.go new file mode 100644 index 000000000..f2870625d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/redshift/service.go @@ -0,0 +1,107 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package redshift + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/private/protocol/query" +) + +// Overview This is an interface reference for Amazon Redshift. It contains +// documentation for one of the programming or command line interfaces you can +// use to manage Amazon Redshift clusters. Note that Amazon Redshift is asynchronous, +// which means that some interfaces may require techniques, such as polling +// or asynchronous callback handlers, to determine when a command has been applied. +// In this reference, the parameter descriptions indicate whether a change is +// applied immediately, on the next instance reboot, or during the next maintenance +// window. For a summary of the Amazon Redshift cluster management interfaces, +// go to Using the Amazon Redshift Management Interfaces (http://docs.aws.amazon.com/redshift/latest/mgmt/using-aws-sdk.html). +// +// Amazon Redshift manages all the work of setting up, operating, and scaling +// a data warehouse: provisioning capacity, monitoring and backing up the cluster, +// and applying patches and upgrades to the Amazon Redshift engine. You can +// focus on using your data to acquire new insights for your business and customers. +// +// If you are a first-time user of Amazon Redshift, we recommend that you begin +// by reading the The Amazon Redshift Getting Started Guide (http://docs.aws.amazon.com/redshift/latest/gsg/getting-started.html) +// +// If you are a database developer, the Amazon Redshift Database Developer +// Guide (http://docs.aws.amazon.com/redshift/latest/dg/welcome.html) explains +// how to design, build, query, and maintain the databases that make up your +// data warehouse. +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type Redshift struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "redshift" + +// New creates a new instance of the Redshift client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a Redshift client from just a session. +// svc := redshift.New(mySession) +// +// // Create a Redshift client with additional configuration +// svc := redshift.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *Redshift { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *Redshift { + svc := &Redshift{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2012-12-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(query.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(query.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(query.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(query.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a Redshift operation and runs any +// custom request initialization. +func (c *Redshift) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/redshift/waiters.go b/vendor/github.com/aws/aws-sdk-go/service/redshift/waiters.go new file mode 100644 index 000000000..7fe00bc4e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/redshift/waiters.go @@ -0,0 +1,141 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package redshift + +import ( + "github.com/aws/aws-sdk-go/private/waiter" +) + +func (c *Redshift) WaitUntilClusterAvailable(input *DescribeClustersInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeClusters", + Delay: 60, + MaxAttempts: 30, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "pathAll", + Argument: "Clusters[].ClusterStatus", + Expected: "available", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Clusters[].ClusterStatus", + Expected: "deleting", + }, + { + State: "retry", + Matcher: "error", + Argument: "", + Expected: "ClusterNotFound", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *Redshift) WaitUntilClusterDeleted(input *DescribeClustersInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeClusters", + Delay: 60, + MaxAttempts: 30, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "error", + Argument: "", + Expected: "ClusterNotFound", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Clusters[].ClusterStatus", + Expected: "creating", + }, + { + State: "failure", + Matcher: "pathList", + Argument: "Clusters[].ClusterStatus", + Expected: "pathAny", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *Redshift) WaitUntilClusterRestored(input *DescribeClustersInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeClusters", + Delay: 60, + MaxAttempts: 30, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "pathAll", + Argument: "Clusters[].RestoreStatus.Status", + Expected: "completed", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Clusters[].ClusterStatus", + Expected: "deleting", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *Redshift) WaitUntilSnapshotAvailable(input *DescribeClusterSnapshotsInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeClusterSnapshots", + Delay: 15, + MaxAttempts: 20, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "pathAll", + Argument: "Snapshots[].Status", + Expected: "available", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Snapshots[].Status", + Expected: "failed", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Snapshots[].Status", + Expected: "deleted", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/route53/api.go b/vendor/github.com/aws/aws-sdk-go/service/route53/api.go new file mode 100644 index 000000000..5a07be987 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/route53/api.go @@ -0,0 +1,7830 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package route53 provides a client for Amazon Route 53. +package route53 + +import ( + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" +) + +const opAssociateVPCWithHostedZone = "AssociateVPCWithHostedZone" + +// AssociateVPCWithHostedZoneRequest generates a "aws/request.Request" representing the +// client's request for the AssociateVPCWithHostedZone operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the AssociateVPCWithHostedZone method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the AssociateVPCWithHostedZoneRequest method. +// req, resp := client.AssociateVPCWithHostedZoneRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Route53) AssociateVPCWithHostedZoneRequest(input *AssociateVPCWithHostedZoneInput) (req *request.Request, output *AssociateVPCWithHostedZoneOutput) { + op := &request.Operation{ + Name: opAssociateVPCWithHostedZone, + HTTPMethod: "POST", + HTTPPath: "/2013-04-01/hostedzone/{Id}/associatevpc", + } + + if input == nil { + input = &AssociateVPCWithHostedZoneInput{} + } + + req = c.newRequest(op, input, output) + output = &AssociateVPCWithHostedZoneOutput{} + req.Data = output + return +} + +// This action associates a VPC with an hosted zone. +// +// To associate a VPC with an hosted zone, send a POST request to the /Route +// 53 API version/hostedzone/hosted zone ID/associatevpc resource. The request +// body must include a document with a AssociateVPCWithHostedZoneRequest element. +// The response returns the AssociateVPCWithHostedZoneResponse element that +// contains ChangeInfo for you to track the progress of the AssociateVPCWithHostedZoneRequest +// you made. See GetChange operation for how to track the progress of your change. +func (c *Route53) AssociateVPCWithHostedZone(input *AssociateVPCWithHostedZoneInput) (*AssociateVPCWithHostedZoneOutput, error) { + req, out := c.AssociateVPCWithHostedZoneRequest(input) + err := req.Send() + return out, err +} + +const opChangeResourceRecordSets = "ChangeResourceRecordSets" + +// ChangeResourceRecordSetsRequest generates a "aws/request.Request" representing the +// client's request for the ChangeResourceRecordSets operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ChangeResourceRecordSets method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ChangeResourceRecordSetsRequest method. +// req, resp := client.ChangeResourceRecordSetsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Route53) ChangeResourceRecordSetsRequest(input *ChangeResourceRecordSetsInput) (req *request.Request, output *ChangeResourceRecordSetsOutput) { + op := &request.Operation{ + Name: opChangeResourceRecordSets, + HTTPMethod: "POST", + HTTPPath: "/2013-04-01/hostedzone/{Id}/rrset/", + } + + if input == nil { + input = &ChangeResourceRecordSetsInput{} + } + + req = c.newRequest(op, input, output) + output = &ChangeResourceRecordSetsOutput{} + req.Data = output + return +} + +// Use this action to create or change your authoritative DNS information. To +// use this action, send a POST request to the /Route 53 API version/hostedzone/hosted +// Zone ID/rrset resource. The request body must include a document with a ChangeResourceRecordSetsRequest +// element. +// +// Changes are a list of change items and are considered transactional. For +// more information on transactional changes, also known as change batches, +// see POST ChangeResourceRecordSets (http://docs.aws.amazon.com/Route53/latest/APIReference/API_ChangeResourceRecordSets.html) +// in the Amazon Route 53 API Reference. +// +// Due to the nature of transactional changes, you cannot delete the same resource +// record set more than once in a single change batch. If you attempt to delete +// the same change batch more than once, Amazon Route 53 returns an InvalidChangeBatch +// error. In response to a ChangeResourceRecordSets request, your DNS data is +// changed on all Amazon Route 53 DNS servers. Initially, the status of a change +// is PENDING. This means the change has not yet propagated to all the authoritative +// Amazon Route 53 DNS servers. When the change is propagated to all hosts, +// the change returns a status of INSYNC. +// +// Note the following limitations on a ChangeResourceRecordSets request: +// +// A request cannot contain more than 100 Change elements. A request cannot +// contain more than 1000 ResourceRecord elements. The sum of the number of +// characters (including spaces) in all Value elements in a request cannot exceed +// 32,000 characters. +func (c *Route53) ChangeResourceRecordSets(input *ChangeResourceRecordSetsInput) (*ChangeResourceRecordSetsOutput, error) { + req, out := c.ChangeResourceRecordSetsRequest(input) + err := req.Send() + return out, err +} + +const opChangeTagsForResource = "ChangeTagsForResource" + +// ChangeTagsForResourceRequest generates a "aws/request.Request" representing the +// client's request for the ChangeTagsForResource operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ChangeTagsForResource method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ChangeTagsForResourceRequest method. +// req, resp := client.ChangeTagsForResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Route53) ChangeTagsForResourceRequest(input *ChangeTagsForResourceInput) (req *request.Request, output *ChangeTagsForResourceOutput) { + op := &request.Operation{ + Name: opChangeTagsForResource, + HTTPMethod: "POST", + HTTPPath: "/2013-04-01/tags/{ResourceType}/{ResourceId}", + } + + if input == nil { + input = &ChangeTagsForResourceInput{} + } + + req = c.newRequest(op, input, output) + output = &ChangeTagsForResourceOutput{} + req.Data = output + return +} + +func (c *Route53) ChangeTagsForResource(input *ChangeTagsForResourceInput) (*ChangeTagsForResourceOutput, error) { + req, out := c.ChangeTagsForResourceRequest(input) + err := req.Send() + return out, err +} + +const opCreateHealthCheck = "CreateHealthCheck" + +// CreateHealthCheckRequest generates a "aws/request.Request" representing the +// client's request for the CreateHealthCheck operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateHealthCheck method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateHealthCheckRequest method. +// req, resp := client.CreateHealthCheckRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Route53) CreateHealthCheckRequest(input *CreateHealthCheckInput) (req *request.Request, output *CreateHealthCheckOutput) { + op := &request.Operation{ + Name: opCreateHealthCheck, + HTTPMethod: "POST", + HTTPPath: "/2013-04-01/healthcheck", + } + + if input == nil { + input = &CreateHealthCheckInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateHealthCheckOutput{} + req.Data = output + return +} + +// This action creates a new health check. +// +// To create a new health check, send a POST request to the /Route 53 API version/healthcheck +// resource. The request body must include a document with a CreateHealthCheckRequest +// element. The response returns the CreateHealthCheckResponse element that +// contains metadata about the health check. +func (c *Route53) CreateHealthCheck(input *CreateHealthCheckInput) (*CreateHealthCheckOutput, error) { + req, out := c.CreateHealthCheckRequest(input) + err := req.Send() + return out, err +} + +const opCreateHostedZone = "CreateHostedZone" + +// CreateHostedZoneRequest generates a "aws/request.Request" representing the +// client's request for the CreateHostedZone operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateHostedZone method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateHostedZoneRequest method. +// req, resp := client.CreateHostedZoneRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Route53) CreateHostedZoneRequest(input *CreateHostedZoneInput) (req *request.Request, output *CreateHostedZoneOutput) { + op := &request.Operation{ + Name: opCreateHostedZone, + HTTPMethod: "POST", + HTTPPath: "/2013-04-01/hostedzone", + } + + if input == nil { + input = &CreateHostedZoneInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateHostedZoneOutput{} + req.Data = output + return +} + +// This action creates a new hosted zone. +// +// To create a new hosted zone, send a POST request to the /Route 53 API version/hostedzone +// resource. The request body must include a document with a CreateHostedZoneRequest +// element. The response returns the CreateHostedZoneResponse element that contains +// metadata about the hosted zone. +// +// Amazon Route 53 automatically creates a default SOA record and four NS records +// for the zone. The NS records in the hosted zone are the name servers you +// give your registrar to delegate your domain to. For more information about +// SOA and NS records, see NS and SOA Records that Amazon Route 53 Creates for +// a Hosted Zone (http://docs.aws.amazon.com/Route53/latest/DeveloperGuide/SOA-NSrecords.html) +// in the Amazon Route 53 Developer Guide. +// +// When you create a zone, its initial status is PENDING. This means that it +// is not yet available on all DNS servers. The status of the zone changes to +// INSYNC when the NS and SOA records are available on all Amazon Route 53 DNS +// servers. +// +// When trying to create a hosted zone using a reusable delegation set, you +// could specify an optional DelegationSetId, and Route53 would assign those +// 4 NS records for the zone, instead of alloting a new one. +func (c *Route53) CreateHostedZone(input *CreateHostedZoneInput) (*CreateHostedZoneOutput, error) { + req, out := c.CreateHostedZoneRequest(input) + err := req.Send() + return out, err +} + +const opCreateReusableDelegationSet = "CreateReusableDelegationSet" + +// CreateReusableDelegationSetRequest generates a "aws/request.Request" representing the +// client's request for the CreateReusableDelegationSet operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateReusableDelegationSet method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateReusableDelegationSetRequest method. +// req, resp := client.CreateReusableDelegationSetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Route53) CreateReusableDelegationSetRequest(input *CreateReusableDelegationSetInput) (req *request.Request, output *CreateReusableDelegationSetOutput) { + op := &request.Operation{ + Name: opCreateReusableDelegationSet, + HTTPMethod: "POST", + HTTPPath: "/2013-04-01/delegationset", + } + + if input == nil { + input = &CreateReusableDelegationSetInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateReusableDelegationSetOutput{} + req.Data = output + return +} + +// This action creates a reusable delegationSet. +// +// To create a new reusable delegationSet, send a POST request to the /Route +// 53 API version/delegationset resource. The request body must include a document +// with a CreateReusableDelegationSetRequest element. The response returns the +// CreateReusableDelegationSetResponse element that contains metadata about +// the delegationSet. +// +// If the optional parameter HostedZoneId is specified, it marks the delegationSet +// associated with that particular hosted zone as reusable. +func (c *Route53) CreateReusableDelegationSet(input *CreateReusableDelegationSetInput) (*CreateReusableDelegationSetOutput, error) { + req, out := c.CreateReusableDelegationSetRequest(input) + err := req.Send() + return out, err +} + +const opCreateTrafficPolicy = "CreateTrafficPolicy" + +// CreateTrafficPolicyRequest generates a "aws/request.Request" representing the +// client's request for the CreateTrafficPolicy operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateTrafficPolicy method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateTrafficPolicyRequest method. +// req, resp := client.CreateTrafficPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Route53) CreateTrafficPolicyRequest(input *CreateTrafficPolicyInput) (req *request.Request, output *CreateTrafficPolicyOutput) { + op := &request.Operation{ + Name: opCreateTrafficPolicy, + HTTPMethod: "POST", + HTTPPath: "/2013-04-01/trafficpolicy", + } + + if input == nil { + input = &CreateTrafficPolicyInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateTrafficPolicyOutput{} + req.Data = output + return +} + +// Creates a traffic policy, which you use to create multiple DNS resource record +// sets for one domain name (such as example.com) or one subdomain name (such +// as www.example.com). +// +// To create a traffic policy, send a POST request to the /Route 53 API version/trafficpolicy +// resource. The request body must include a document with a CreateTrafficPolicyRequest +// element. The response includes the CreateTrafficPolicyResponse element, which +// contains information about the new traffic policy. +func (c *Route53) CreateTrafficPolicy(input *CreateTrafficPolicyInput) (*CreateTrafficPolicyOutput, error) { + req, out := c.CreateTrafficPolicyRequest(input) + err := req.Send() + return out, err +} + +const opCreateTrafficPolicyInstance = "CreateTrafficPolicyInstance" + +// CreateTrafficPolicyInstanceRequest generates a "aws/request.Request" representing the +// client's request for the CreateTrafficPolicyInstance operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateTrafficPolicyInstance method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateTrafficPolicyInstanceRequest method. +// req, resp := client.CreateTrafficPolicyInstanceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Route53) CreateTrafficPolicyInstanceRequest(input *CreateTrafficPolicyInstanceInput) (req *request.Request, output *CreateTrafficPolicyInstanceOutput) { + op := &request.Operation{ + Name: opCreateTrafficPolicyInstance, + HTTPMethod: "POST", + HTTPPath: "/2013-04-01/trafficpolicyinstance", + } + + if input == nil { + input = &CreateTrafficPolicyInstanceInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateTrafficPolicyInstanceOutput{} + req.Data = output + return +} + +// Creates resource record sets in a specified hosted zone based on the settings +// in a specified traffic policy version. In addition, CreateTrafficPolicyInstance +// associates the resource record sets with a specified domain name (such as +// example.com) or subdomain name (such as www.example.com). Amazon Route 53 +// responds to DNS queries for the domain or subdomain name by using the resource +// record sets that CreateTrafficPolicyInstance created. +// +// To create a traffic policy instance, send a POST request to the /Route 53 +// API version/trafficpolicyinstance resource. The request body must include +// a document with a CreateTrafficPolicyRequest element. The response returns +// the CreateTrafficPolicyInstanceResponse element, which contains information +// about the traffic policy instance. +func (c *Route53) CreateTrafficPolicyInstance(input *CreateTrafficPolicyInstanceInput) (*CreateTrafficPolicyInstanceOutput, error) { + req, out := c.CreateTrafficPolicyInstanceRequest(input) + err := req.Send() + return out, err +} + +const opCreateTrafficPolicyVersion = "CreateTrafficPolicyVersion" + +// CreateTrafficPolicyVersionRequest generates a "aws/request.Request" representing the +// client's request for the CreateTrafficPolicyVersion operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateTrafficPolicyVersion method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateTrafficPolicyVersionRequest method. +// req, resp := client.CreateTrafficPolicyVersionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Route53) CreateTrafficPolicyVersionRequest(input *CreateTrafficPolicyVersionInput) (req *request.Request, output *CreateTrafficPolicyVersionOutput) { + op := &request.Operation{ + Name: opCreateTrafficPolicyVersion, + HTTPMethod: "POST", + HTTPPath: "/2013-04-01/trafficpolicy/{Id}", + } + + if input == nil { + input = &CreateTrafficPolicyVersionInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateTrafficPolicyVersionOutput{} + req.Data = output + return +} + +// Creates a new version of an existing traffic policy. When you create a new +// version of a traffic policy, you specify the ID of the traffic policy that +// you want to update and a JSON-formatted document that describes the new version. +// +// You use traffic policies to create multiple DNS resource record sets for +// one domain name (such as example.com) or one subdomain name (such as www.example.com). +// +// To create a new version, send a POST request to the /Route 53 API version/trafficpolicy/ +// resource. The request body includes a document with a CreateTrafficPolicyVersionRequest +// element. The response returns the CreateTrafficPolicyVersionResponse element, +// which contains information about the new version of the traffic policy. +func (c *Route53) CreateTrafficPolicyVersion(input *CreateTrafficPolicyVersionInput) (*CreateTrafficPolicyVersionOutput, error) { + req, out := c.CreateTrafficPolicyVersionRequest(input) + err := req.Send() + return out, err +} + +const opDeleteHealthCheck = "DeleteHealthCheck" + +// DeleteHealthCheckRequest generates a "aws/request.Request" representing the +// client's request for the DeleteHealthCheck operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteHealthCheck method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteHealthCheckRequest method. +// req, resp := client.DeleteHealthCheckRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Route53) DeleteHealthCheckRequest(input *DeleteHealthCheckInput) (req *request.Request, output *DeleteHealthCheckOutput) { + op := &request.Operation{ + Name: opDeleteHealthCheck, + HTTPMethod: "DELETE", + HTTPPath: "/2013-04-01/healthcheck/{HealthCheckId}", + } + + if input == nil { + input = &DeleteHealthCheckInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteHealthCheckOutput{} + req.Data = output + return +} + +// This action deletes a health check. To delete a health check, send a DELETE +// request to the /Route 53 API version/healthcheck/health check ID resource. +// +// You can delete a health check only if there are no resource record sets +// associated with this health check. If resource record sets are associated +// with this health check, you must disassociate them before you can delete +// your health check. If you try to delete a health check that is associated +// with resource record sets, Amazon Route 53 will deny your request with a +// HealthCheckInUse error. For information about disassociating the records +// from your health check, see ChangeResourceRecordSets. +func (c *Route53) DeleteHealthCheck(input *DeleteHealthCheckInput) (*DeleteHealthCheckOutput, error) { + req, out := c.DeleteHealthCheckRequest(input) + err := req.Send() + return out, err +} + +const opDeleteHostedZone = "DeleteHostedZone" + +// DeleteHostedZoneRequest generates a "aws/request.Request" representing the +// client's request for the DeleteHostedZone operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteHostedZone method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteHostedZoneRequest method. +// req, resp := client.DeleteHostedZoneRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Route53) DeleteHostedZoneRequest(input *DeleteHostedZoneInput) (req *request.Request, output *DeleteHostedZoneOutput) { + op := &request.Operation{ + Name: opDeleteHostedZone, + HTTPMethod: "DELETE", + HTTPPath: "/2013-04-01/hostedzone/{Id}", + } + + if input == nil { + input = &DeleteHostedZoneInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteHostedZoneOutput{} + req.Data = output + return +} + +// This action deletes a hosted zone. To delete a hosted zone, send a DELETE +// request to the /Route 53 API version/hostedzone/hosted zone ID resource. +// +// You can delete a hosted zone only if there are no resource record sets other +// than the default SOA record and NS resource record sets. If your hosted zone +// contains other resource record sets, you must delete them before you can +// delete your hosted zone. If you try to delete a hosted zone that contains +// other resource record sets, Amazon Route 53 will deny your request with a +// HostedZoneNotEmpty error. For information about deleting records from your +// hosted zone, see ChangeResourceRecordSets. +func (c *Route53) DeleteHostedZone(input *DeleteHostedZoneInput) (*DeleteHostedZoneOutput, error) { + req, out := c.DeleteHostedZoneRequest(input) + err := req.Send() + return out, err +} + +const opDeleteReusableDelegationSet = "DeleteReusableDelegationSet" + +// DeleteReusableDelegationSetRequest generates a "aws/request.Request" representing the +// client's request for the DeleteReusableDelegationSet operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteReusableDelegationSet method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteReusableDelegationSetRequest method. +// req, resp := client.DeleteReusableDelegationSetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Route53) DeleteReusableDelegationSetRequest(input *DeleteReusableDelegationSetInput) (req *request.Request, output *DeleteReusableDelegationSetOutput) { + op := &request.Operation{ + Name: opDeleteReusableDelegationSet, + HTTPMethod: "DELETE", + HTTPPath: "/2013-04-01/delegationset/{Id}", + } + + if input == nil { + input = &DeleteReusableDelegationSetInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteReusableDelegationSetOutput{} + req.Data = output + return +} + +// This action deletes a reusable delegation set. To delete a reusable delegation +// set, send a DELETE request to the /Route 53 API version/delegationset/delegation +// set ID resource. +// +// You can delete a reusable delegation set only if there are no associated +// hosted zones. If your reusable delegation set contains associated hosted +// zones, you must delete them before you can delete your reusable delegation +// set. If you try to delete a reusable delegation set that contains associated +// hosted zones, Amazon Route 53 will deny your request with a DelegationSetInUse +// error. +func (c *Route53) DeleteReusableDelegationSet(input *DeleteReusableDelegationSetInput) (*DeleteReusableDelegationSetOutput, error) { + req, out := c.DeleteReusableDelegationSetRequest(input) + err := req.Send() + return out, err +} + +const opDeleteTrafficPolicy = "DeleteTrafficPolicy" + +// DeleteTrafficPolicyRequest generates a "aws/request.Request" representing the +// client's request for the DeleteTrafficPolicy operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteTrafficPolicy method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteTrafficPolicyRequest method. +// req, resp := client.DeleteTrafficPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Route53) DeleteTrafficPolicyRequest(input *DeleteTrafficPolicyInput) (req *request.Request, output *DeleteTrafficPolicyOutput) { + op := &request.Operation{ + Name: opDeleteTrafficPolicy, + HTTPMethod: "DELETE", + HTTPPath: "/2013-04-01/trafficpolicy/{Id}/{Version}", + } + + if input == nil { + input = &DeleteTrafficPolicyInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteTrafficPolicyOutput{} + req.Data = output + return +} + +// Deletes a traffic policy. To delete a traffic policy, send a DELETE request +// to the /Route 53 API version/trafficpolicy resource. +func (c *Route53) DeleteTrafficPolicy(input *DeleteTrafficPolicyInput) (*DeleteTrafficPolicyOutput, error) { + req, out := c.DeleteTrafficPolicyRequest(input) + err := req.Send() + return out, err +} + +const opDeleteTrafficPolicyInstance = "DeleteTrafficPolicyInstance" + +// DeleteTrafficPolicyInstanceRequest generates a "aws/request.Request" representing the +// client's request for the DeleteTrafficPolicyInstance operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteTrafficPolicyInstance method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteTrafficPolicyInstanceRequest method. +// req, resp := client.DeleteTrafficPolicyInstanceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Route53) DeleteTrafficPolicyInstanceRequest(input *DeleteTrafficPolicyInstanceInput) (req *request.Request, output *DeleteTrafficPolicyInstanceOutput) { + op := &request.Operation{ + Name: opDeleteTrafficPolicyInstance, + HTTPMethod: "DELETE", + HTTPPath: "/2013-04-01/trafficpolicyinstance/{Id}", + } + + if input == nil { + input = &DeleteTrafficPolicyInstanceInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteTrafficPolicyInstanceOutput{} + req.Data = output + return +} + +// Deletes a traffic policy instance and all of the resource record sets that +// Amazon Route 53 created when you created the instance. +// +// To delete a traffic policy instance, send a DELETE request to the /Route +// 53 API version/trafficpolicy/traffic policy instance ID resource. +// +// When you delete a traffic policy instance, Amazon Route 53 also deletes +// all of the resource record sets that were created when you created the traffic +// policy instance. +func (c *Route53) DeleteTrafficPolicyInstance(input *DeleteTrafficPolicyInstanceInput) (*DeleteTrafficPolicyInstanceOutput, error) { + req, out := c.DeleteTrafficPolicyInstanceRequest(input) + err := req.Send() + return out, err +} + +const opDisassociateVPCFromHostedZone = "DisassociateVPCFromHostedZone" + +// DisassociateVPCFromHostedZoneRequest generates a "aws/request.Request" representing the +// client's request for the DisassociateVPCFromHostedZone operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DisassociateVPCFromHostedZone method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DisassociateVPCFromHostedZoneRequest method. +// req, resp := client.DisassociateVPCFromHostedZoneRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Route53) DisassociateVPCFromHostedZoneRequest(input *DisassociateVPCFromHostedZoneInput) (req *request.Request, output *DisassociateVPCFromHostedZoneOutput) { + op := &request.Operation{ + Name: opDisassociateVPCFromHostedZone, + HTTPMethod: "POST", + HTTPPath: "/2013-04-01/hostedzone/{Id}/disassociatevpc", + } + + if input == nil { + input = &DisassociateVPCFromHostedZoneInput{} + } + + req = c.newRequest(op, input, output) + output = &DisassociateVPCFromHostedZoneOutput{} + req.Data = output + return +} + +// This action disassociates a VPC from an hosted zone. +// +// To disassociate a VPC to a hosted zone, send a POST request to the /Route +// 53 API version/hostedzone/hosted zone ID/disassociatevpc resource. The request +// body must include a document with a DisassociateVPCFromHostedZoneRequest +// element. The response returns the DisassociateVPCFromHostedZoneResponse element +// that contains ChangeInfo for you to track the progress of the DisassociateVPCFromHostedZoneRequest +// you made. See GetChange operation for how to track the progress of your change. +func (c *Route53) DisassociateVPCFromHostedZone(input *DisassociateVPCFromHostedZoneInput) (*DisassociateVPCFromHostedZoneOutput, error) { + req, out := c.DisassociateVPCFromHostedZoneRequest(input) + err := req.Send() + return out, err +} + +const opGetChange = "GetChange" + +// GetChangeRequest generates a "aws/request.Request" representing the +// client's request for the GetChange operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetChange method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetChangeRequest method. +// req, resp := client.GetChangeRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Route53) GetChangeRequest(input *GetChangeInput) (req *request.Request, output *GetChangeOutput) { + op := &request.Operation{ + Name: opGetChange, + HTTPMethod: "GET", + HTTPPath: "/2013-04-01/change/{Id}", + } + + if input == nil { + input = &GetChangeInput{} + } + + req = c.newRequest(op, input, output) + output = &GetChangeOutput{} + req.Data = output + return +} + +// This action returns the current status of a change batch request. The status +// is one of the following values: +// +// - PENDING indicates that the changes in this request have not replicated +// to all Amazon Route 53 DNS servers. This is the initial status of all change +// batch requests. +// +// - INSYNC indicates that the changes have replicated to all Amazon Route +// 53 DNS servers. +func (c *Route53) GetChange(input *GetChangeInput) (*GetChangeOutput, error) { + req, out := c.GetChangeRequest(input) + err := req.Send() + return out, err +} + +const opGetChangeDetails = "GetChangeDetails" + +// GetChangeDetailsRequest generates a "aws/request.Request" representing the +// client's request for the GetChangeDetails operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetChangeDetails method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetChangeDetailsRequest method. +// req, resp := client.GetChangeDetailsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Route53) GetChangeDetailsRequest(input *GetChangeDetailsInput) (req *request.Request, output *GetChangeDetailsOutput) { + if c.Client.Config.Logger != nil { + c.Client.Config.Logger.Log("This operation, GetChangeDetails, has been deprecated") + } + op := &request.Operation{ + Name: opGetChangeDetails, + HTTPMethod: "GET", + HTTPPath: "/2013-04-01/changedetails/{Id}", + } + + if input == nil { + input = &GetChangeDetailsInput{} + } + + req = c.newRequest(op, input, output) + output = &GetChangeDetailsOutput{} + req.Data = output + return +} + +// This action returns the status and changes of a change batch request. +func (c *Route53) GetChangeDetails(input *GetChangeDetailsInput) (*GetChangeDetailsOutput, error) { + req, out := c.GetChangeDetailsRequest(input) + err := req.Send() + return out, err +} + +const opGetCheckerIpRanges = "GetCheckerIpRanges" + +// GetCheckerIpRangesRequest generates a "aws/request.Request" representing the +// client's request for the GetCheckerIpRanges operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetCheckerIpRanges method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetCheckerIpRangesRequest method. +// req, resp := client.GetCheckerIpRangesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Route53) GetCheckerIpRangesRequest(input *GetCheckerIpRangesInput) (req *request.Request, output *GetCheckerIpRangesOutput) { + op := &request.Operation{ + Name: opGetCheckerIpRanges, + HTTPMethod: "GET", + HTTPPath: "/2013-04-01/checkeripranges", + } + + if input == nil { + input = &GetCheckerIpRangesInput{} + } + + req = c.newRequest(op, input, output) + output = &GetCheckerIpRangesOutput{} + req.Data = output + return +} + +// To retrieve a list of the IP ranges used by Amazon Route 53 health checkers +// to check the health of your resources, send a GET request to the /Route 53 +// API version/checkeripranges resource. You can use these IP addresses to configure +// router and firewall rules to allow health checkers to check the health of +// your resources. +func (c *Route53) GetCheckerIpRanges(input *GetCheckerIpRangesInput) (*GetCheckerIpRangesOutput, error) { + req, out := c.GetCheckerIpRangesRequest(input) + err := req.Send() + return out, err +} + +const opGetGeoLocation = "GetGeoLocation" + +// GetGeoLocationRequest generates a "aws/request.Request" representing the +// client's request for the GetGeoLocation operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetGeoLocation method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetGeoLocationRequest method. +// req, resp := client.GetGeoLocationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Route53) GetGeoLocationRequest(input *GetGeoLocationInput) (req *request.Request, output *GetGeoLocationOutput) { + op := &request.Operation{ + Name: opGetGeoLocation, + HTTPMethod: "GET", + HTTPPath: "/2013-04-01/geolocation", + } + + if input == nil { + input = &GetGeoLocationInput{} + } + + req = c.newRequest(op, input, output) + output = &GetGeoLocationOutput{} + req.Data = output + return +} + +// To retrieve a single geo location, send a GET request to the /Route 53 API +// version/geolocation resource with one of these options: continentcode | countrycode +// | countrycode and subdivisioncode. +func (c *Route53) GetGeoLocation(input *GetGeoLocationInput) (*GetGeoLocationOutput, error) { + req, out := c.GetGeoLocationRequest(input) + err := req.Send() + return out, err +} + +const opGetHealthCheck = "GetHealthCheck" + +// GetHealthCheckRequest generates a "aws/request.Request" representing the +// client's request for the GetHealthCheck operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetHealthCheck method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetHealthCheckRequest method. +// req, resp := client.GetHealthCheckRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Route53) GetHealthCheckRequest(input *GetHealthCheckInput) (req *request.Request, output *GetHealthCheckOutput) { + op := &request.Operation{ + Name: opGetHealthCheck, + HTTPMethod: "GET", + HTTPPath: "/2013-04-01/healthcheck/{HealthCheckId}", + } + + if input == nil { + input = &GetHealthCheckInput{} + } + + req = c.newRequest(op, input, output) + output = &GetHealthCheckOutput{} + req.Data = output + return +} + +// To retrieve the health check, send a GET request to the /Route 53 API version/healthcheck/health +// check ID resource. +func (c *Route53) GetHealthCheck(input *GetHealthCheckInput) (*GetHealthCheckOutput, error) { + req, out := c.GetHealthCheckRequest(input) + err := req.Send() + return out, err +} + +const opGetHealthCheckCount = "GetHealthCheckCount" + +// GetHealthCheckCountRequest generates a "aws/request.Request" representing the +// client's request for the GetHealthCheckCount operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetHealthCheckCount method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetHealthCheckCountRequest method. +// req, resp := client.GetHealthCheckCountRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Route53) GetHealthCheckCountRequest(input *GetHealthCheckCountInput) (req *request.Request, output *GetHealthCheckCountOutput) { + op := &request.Operation{ + Name: opGetHealthCheckCount, + HTTPMethod: "GET", + HTTPPath: "/2013-04-01/healthcheckcount", + } + + if input == nil { + input = &GetHealthCheckCountInput{} + } + + req = c.newRequest(op, input, output) + output = &GetHealthCheckCountOutput{} + req.Data = output + return +} + +// To retrieve a count of all your health checks, send a GET request to the +// /Route 53 API version/healthcheckcount resource. +func (c *Route53) GetHealthCheckCount(input *GetHealthCheckCountInput) (*GetHealthCheckCountOutput, error) { + req, out := c.GetHealthCheckCountRequest(input) + err := req.Send() + return out, err +} + +const opGetHealthCheckLastFailureReason = "GetHealthCheckLastFailureReason" + +// GetHealthCheckLastFailureReasonRequest generates a "aws/request.Request" representing the +// client's request for the GetHealthCheckLastFailureReason operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetHealthCheckLastFailureReason method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetHealthCheckLastFailureReasonRequest method. +// req, resp := client.GetHealthCheckLastFailureReasonRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Route53) GetHealthCheckLastFailureReasonRequest(input *GetHealthCheckLastFailureReasonInput) (req *request.Request, output *GetHealthCheckLastFailureReasonOutput) { + op := &request.Operation{ + Name: opGetHealthCheckLastFailureReason, + HTTPMethod: "GET", + HTTPPath: "/2013-04-01/healthcheck/{HealthCheckId}/lastfailurereason", + } + + if input == nil { + input = &GetHealthCheckLastFailureReasonInput{} + } + + req = c.newRequest(op, input, output) + output = &GetHealthCheckLastFailureReasonOutput{} + req.Data = output + return +} + +// If you want to learn why a health check is currently failing or why it failed +// most recently (if at all), you can get the failure reason for the most recent +// failure. Send a GET request to the /Route 53 API version/healthcheck/health +// check ID/lastfailurereason resource. +func (c *Route53) GetHealthCheckLastFailureReason(input *GetHealthCheckLastFailureReasonInput) (*GetHealthCheckLastFailureReasonOutput, error) { + req, out := c.GetHealthCheckLastFailureReasonRequest(input) + err := req.Send() + return out, err +} + +const opGetHealthCheckStatus = "GetHealthCheckStatus" + +// GetHealthCheckStatusRequest generates a "aws/request.Request" representing the +// client's request for the GetHealthCheckStatus operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetHealthCheckStatus method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetHealthCheckStatusRequest method. +// req, resp := client.GetHealthCheckStatusRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Route53) GetHealthCheckStatusRequest(input *GetHealthCheckStatusInput) (req *request.Request, output *GetHealthCheckStatusOutput) { + op := &request.Operation{ + Name: opGetHealthCheckStatus, + HTTPMethod: "GET", + HTTPPath: "/2013-04-01/healthcheck/{HealthCheckId}/status", + } + + if input == nil { + input = &GetHealthCheckStatusInput{} + } + + req = c.newRequest(op, input, output) + output = &GetHealthCheckStatusOutput{} + req.Data = output + return +} + +// To retrieve the health check status, send a GET request to the /Route 53 +// API version/healthcheck/health check ID/status resource. You can use this +// call to get a health check's current status. +func (c *Route53) GetHealthCheckStatus(input *GetHealthCheckStatusInput) (*GetHealthCheckStatusOutput, error) { + req, out := c.GetHealthCheckStatusRequest(input) + err := req.Send() + return out, err +} + +const opGetHostedZone = "GetHostedZone" + +// GetHostedZoneRequest generates a "aws/request.Request" representing the +// client's request for the GetHostedZone operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetHostedZone method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetHostedZoneRequest method. +// req, resp := client.GetHostedZoneRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Route53) GetHostedZoneRequest(input *GetHostedZoneInput) (req *request.Request, output *GetHostedZoneOutput) { + op := &request.Operation{ + Name: opGetHostedZone, + HTTPMethod: "GET", + HTTPPath: "/2013-04-01/hostedzone/{Id}", + } + + if input == nil { + input = &GetHostedZoneInput{} + } + + req = c.newRequest(op, input, output) + output = &GetHostedZoneOutput{} + req.Data = output + return +} + +// To retrieve the delegation set for a hosted zone, send a GET request to the +// /Route 53 API version/hostedzone/hosted zone ID resource. The delegation +// set is the four Amazon Route 53 name servers that were assigned to the hosted +// zone when you created it. +func (c *Route53) GetHostedZone(input *GetHostedZoneInput) (*GetHostedZoneOutput, error) { + req, out := c.GetHostedZoneRequest(input) + err := req.Send() + return out, err +} + +const opGetHostedZoneCount = "GetHostedZoneCount" + +// GetHostedZoneCountRequest generates a "aws/request.Request" representing the +// client's request for the GetHostedZoneCount operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetHostedZoneCount method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetHostedZoneCountRequest method. +// req, resp := client.GetHostedZoneCountRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Route53) GetHostedZoneCountRequest(input *GetHostedZoneCountInput) (req *request.Request, output *GetHostedZoneCountOutput) { + op := &request.Operation{ + Name: opGetHostedZoneCount, + HTTPMethod: "GET", + HTTPPath: "/2013-04-01/hostedzonecount", + } + + if input == nil { + input = &GetHostedZoneCountInput{} + } + + req = c.newRequest(op, input, output) + output = &GetHostedZoneCountOutput{} + req.Data = output + return +} + +// To retrieve a count of all your hosted zones, send a GET request to the /Route +// 53 API version/hostedzonecount resource. +func (c *Route53) GetHostedZoneCount(input *GetHostedZoneCountInput) (*GetHostedZoneCountOutput, error) { + req, out := c.GetHostedZoneCountRequest(input) + err := req.Send() + return out, err +} + +const opGetReusableDelegationSet = "GetReusableDelegationSet" + +// GetReusableDelegationSetRequest generates a "aws/request.Request" representing the +// client's request for the GetReusableDelegationSet operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetReusableDelegationSet method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetReusableDelegationSetRequest method. +// req, resp := client.GetReusableDelegationSetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Route53) GetReusableDelegationSetRequest(input *GetReusableDelegationSetInput) (req *request.Request, output *GetReusableDelegationSetOutput) { + op := &request.Operation{ + Name: opGetReusableDelegationSet, + HTTPMethod: "GET", + HTTPPath: "/2013-04-01/delegationset/{Id}", + } + + if input == nil { + input = &GetReusableDelegationSetInput{} + } + + req = c.newRequest(op, input, output) + output = &GetReusableDelegationSetOutput{} + req.Data = output + return +} + +// To retrieve the reusable delegation set, send a GET request to the /Route +// 53 API version/delegationset/delegation set ID resource. +func (c *Route53) GetReusableDelegationSet(input *GetReusableDelegationSetInput) (*GetReusableDelegationSetOutput, error) { + req, out := c.GetReusableDelegationSetRequest(input) + err := req.Send() + return out, err +} + +const opGetTrafficPolicy = "GetTrafficPolicy" + +// GetTrafficPolicyRequest generates a "aws/request.Request" representing the +// client's request for the GetTrafficPolicy operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetTrafficPolicy method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetTrafficPolicyRequest method. +// req, resp := client.GetTrafficPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Route53) GetTrafficPolicyRequest(input *GetTrafficPolicyInput) (req *request.Request, output *GetTrafficPolicyOutput) { + op := &request.Operation{ + Name: opGetTrafficPolicy, + HTTPMethod: "GET", + HTTPPath: "/2013-04-01/trafficpolicy/{Id}/{Version}", + } + + if input == nil { + input = &GetTrafficPolicyInput{} + } + + req = c.newRequest(op, input, output) + output = &GetTrafficPolicyOutput{} + req.Data = output + return +} + +// Gets information about a specific traffic policy version. To get the information, +// send a GET request to the /Route 53 API version/trafficpolicy resource. +func (c *Route53) GetTrafficPolicy(input *GetTrafficPolicyInput) (*GetTrafficPolicyOutput, error) { + req, out := c.GetTrafficPolicyRequest(input) + err := req.Send() + return out, err +} + +const opGetTrafficPolicyInstance = "GetTrafficPolicyInstance" + +// GetTrafficPolicyInstanceRequest generates a "aws/request.Request" representing the +// client's request for the GetTrafficPolicyInstance operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetTrafficPolicyInstance method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetTrafficPolicyInstanceRequest method. +// req, resp := client.GetTrafficPolicyInstanceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Route53) GetTrafficPolicyInstanceRequest(input *GetTrafficPolicyInstanceInput) (req *request.Request, output *GetTrafficPolicyInstanceOutput) { + op := &request.Operation{ + Name: opGetTrafficPolicyInstance, + HTTPMethod: "GET", + HTTPPath: "/2013-04-01/trafficpolicyinstance/{Id}", + } + + if input == nil { + input = &GetTrafficPolicyInstanceInput{} + } + + req = c.newRequest(op, input, output) + output = &GetTrafficPolicyInstanceOutput{} + req.Data = output + return +} + +// Gets information about a specified traffic policy instance. +// +// To get information about the traffic policy instance, send a GET request +// to the /Route 53 API version/trafficpolicyinstance resource. +// +// After you submit a CreateTrafficPolicyInstance or an UpdateTrafficPolicyInstance +// request, there's a brief delay while Amazon Route 53 creates the resource +// record sets that are specified in the traffic policy definition. For more +// information, see the State response element. +func (c *Route53) GetTrafficPolicyInstance(input *GetTrafficPolicyInstanceInput) (*GetTrafficPolicyInstanceOutput, error) { + req, out := c.GetTrafficPolicyInstanceRequest(input) + err := req.Send() + return out, err +} + +const opGetTrafficPolicyInstanceCount = "GetTrafficPolicyInstanceCount" + +// GetTrafficPolicyInstanceCountRequest generates a "aws/request.Request" representing the +// client's request for the GetTrafficPolicyInstanceCount operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetTrafficPolicyInstanceCount method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetTrafficPolicyInstanceCountRequest method. +// req, resp := client.GetTrafficPolicyInstanceCountRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Route53) GetTrafficPolicyInstanceCountRequest(input *GetTrafficPolicyInstanceCountInput) (req *request.Request, output *GetTrafficPolicyInstanceCountOutput) { + op := &request.Operation{ + Name: opGetTrafficPolicyInstanceCount, + HTTPMethod: "GET", + HTTPPath: "/2013-04-01/trafficpolicyinstancecount", + } + + if input == nil { + input = &GetTrafficPolicyInstanceCountInput{} + } + + req = c.newRequest(op, input, output) + output = &GetTrafficPolicyInstanceCountOutput{} + req.Data = output + return +} + +// Gets the number of traffic policy instances that are associated with the +// current AWS account. +// +// To get the number of traffic policy instances, send a GET request to the +// /Route 53 API version/trafficpolicyinstancecount resource. +func (c *Route53) GetTrafficPolicyInstanceCount(input *GetTrafficPolicyInstanceCountInput) (*GetTrafficPolicyInstanceCountOutput, error) { + req, out := c.GetTrafficPolicyInstanceCountRequest(input) + err := req.Send() + return out, err +} + +const opListChangeBatchesByHostedZone = "ListChangeBatchesByHostedZone" + +// ListChangeBatchesByHostedZoneRequest generates a "aws/request.Request" representing the +// client's request for the ListChangeBatchesByHostedZone operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListChangeBatchesByHostedZone method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListChangeBatchesByHostedZoneRequest method. +// req, resp := client.ListChangeBatchesByHostedZoneRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Route53) ListChangeBatchesByHostedZoneRequest(input *ListChangeBatchesByHostedZoneInput) (req *request.Request, output *ListChangeBatchesByHostedZoneOutput) { + if c.Client.Config.Logger != nil { + c.Client.Config.Logger.Log("This operation, ListChangeBatchesByHostedZone, has been deprecated") + } + op := &request.Operation{ + Name: opListChangeBatchesByHostedZone, + HTTPMethod: "GET", + HTTPPath: "/2013-04-01/hostedzone/{Id}/changes", + } + + if input == nil { + input = &ListChangeBatchesByHostedZoneInput{} + } + + req = c.newRequest(op, input, output) + output = &ListChangeBatchesByHostedZoneOutput{} + req.Data = output + return +} + +// This action gets the list of ChangeBatches in a given time period for a given +// hosted zone. +func (c *Route53) ListChangeBatchesByHostedZone(input *ListChangeBatchesByHostedZoneInput) (*ListChangeBatchesByHostedZoneOutput, error) { + req, out := c.ListChangeBatchesByHostedZoneRequest(input) + err := req.Send() + return out, err +} + +const opListChangeBatchesByRRSet = "ListChangeBatchesByRRSet" + +// ListChangeBatchesByRRSetRequest generates a "aws/request.Request" representing the +// client's request for the ListChangeBatchesByRRSet operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListChangeBatchesByRRSet method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListChangeBatchesByRRSetRequest method. +// req, resp := client.ListChangeBatchesByRRSetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Route53) ListChangeBatchesByRRSetRequest(input *ListChangeBatchesByRRSetInput) (req *request.Request, output *ListChangeBatchesByRRSetOutput) { + if c.Client.Config.Logger != nil { + c.Client.Config.Logger.Log("This operation, ListChangeBatchesByRRSet, has been deprecated") + } + op := &request.Operation{ + Name: opListChangeBatchesByRRSet, + HTTPMethod: "GET", + HTTPPath: "/2013-04-01/hostedzone/{Id}/rrsChanges", + } + + if input == nil { + input = &ListChangeBatchesByRRSetInput{} + } + + req = c.newRequest(op, input, output) + output = &ListChangeBatchesByRRSetOutput{} + req.Data = output + return +} + +// This action gets the list of ChangeBatches in a given time period for a given +// hosted zone and RRSet. +func (c *Route53) ListChangeBatchesByRRSet(input *ListChangeBatchesByRRSetInput) (*ListChangeBatchesByRRSetOutput, error) { + req, out := c.ListChangeBatchesByRRSetRequest(input) + err := req.Send() + return out, err +} + +const opListGeoLocations = "ListGeoLocations" + +// ListGeoLocationsRequest generates a "aws/request.Request" representing the +// client's request for the ListGeoLocations operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListGeoLocations method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListGeoLocationsRequest method. +// req, resp := client.ListGeoLocationsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Route53) ListGeoLocationsRequest(input *ListGeoLocationsInput) (req *request.Request, output *ListGeoLocationsOutput) { + op := &request.Operation{ + Name: opListGeoLocations, + HTTPMethod: "GET", + HTTPPath: "/2013-04-01/geolocations", + } + + if input == nil { + input = &ListGeoLocationsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListGeoLocationsOutput{} + req.Data = output + return +} + +// To retrieve a list of supported geo locations, send a GET request to the +// /Route 53 API version/geolocations resource. The response to this request +// includes a GeoLocationDetailsList element with zero, one, or multiple GeoLocationDetails +// child elements. The list is sorted by country code, and then subdivision +// code, followed by continents at the end of the list. +// +// By default, the list of geo locations is displayed on a single page. You +// can control the length of the page that is displayed by using the MaxItems +// parameter. If the list is truncated, IsTruncated will be set to true and +// a combination of NextContinentCode, NextCountryCode, NextSubdivisionCode +// will be populated. You can pass these as parameters to StartContinentCode, +// StartCountryCode, StartSubdivisionCode to control the geo location that the +// list begins with. +func (c *Route53) ListGeoLocations(input *ListGeoLocationsInput) (*ListGeoLocationsOutput, error) { + req, out := c.ListGeoLocationsRequest(input) + err := req.Send() + return out, err +} + +const opListHealthChecks = "ListHealthChecks" + +// ListHealthChecksRequest generates a "aws/request.Request" representing the +// client's request for the ListHealthChecks operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListHealthChecks method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListHealthChecksRequest method. +// req, resp := client.ListHealthChecksRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Route53) ListHealthChecksRequest(input *ListHealthChecksInput) (req *request.Request, output *ListHealthChecksOutput) { + op := &request.Operation{ + Name: opListHealthChecks, + HTTPMethod: "GET", + HTTPPath: "/2013-04-01/healthcheck", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"NextMarker"}, + LimitToken: "MaxItems", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListHealthChecksInput{} + } + + req = c.newRequest(op, input, output) + output = &ListHealthChecksOutput{} + req.Data = output + return +} + +// To retrieve a list of your health checks, send a GET request to the /Route +// 53 API version/healthcheck resource. The response to this request includes +// a HealthChecks element with zero, one, or multiple HealthCheck child elements. +// By default, the list of health checks is displayed on a single page. You +// can control the length of the page that is displayed by using the MaxItems +// parameter. You can use the Marker parameter to control the health check that +// the list begins with. +// +// Amazon Route 53 returns a maximum of 100 items. If you set MaxItems to +// a value greater than 100, Amazon Route 53 returns only the first 100. +func (c *Route53) ListHealthChecks(input *ListHealthChecksInput) (*ListHealthChecksOutput, error) { + req, out := c.ListHealthChecksRequest(input) + err := req.Send() + return out, err +} + +// ListHealthChecksPages iterates over the pages of a ListHealthChecks operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListHealthChecks method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListHealthChecks operation. +// pageNum := 0 +// err := client.ListHealthChecksPages(params, +// func(page *ListHealthChecksOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *Route53) ListHealthChecksPages(input *ListHealthChecksInput, fn func(p *ListHealthChecksOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListHealthChecksRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListHealthChecksOutput), lastPage) + }) +} + +const opListHostedZones = "ListHostedZones" + +// ListHostedZonesRequest generates a "aws/request.Request" representing the +// client's request for the ListHostedZones operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListHostedZones method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListHostedZonesRequest method. +// req, resp := client.ListHostedZonesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Route53) ListHostedZonesRequest(input *ListHostedZonesInput) (req *request.Request, output *ListHostedZonesOutput) { + op := &request.Operation{ + Name: opListHostedZones, + HTTPMethod: "GET", + HTTPPath: "/2013-04-01/hostedzone", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"NextMarker"}, + LimitToken: "MaxItems", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListHostedZonesInput{} + } + + req = c.newRequest(op, input, output) + output = &ListHostedZonesOutput{} + req.Data = output + return +} + +// To retrieve a list of your hosted zones, send a GET request to the /Route +// 53 API version/hostedzone resource. The response to this request includes +// a HostedZones element with zero, one, or multiple HostedZone child elements. +// By default, the list of hosted zones is displayed on a single page. You can +// control the length of the page that is displayed by using the MaxItems parameter. +// You can use the Marker parameter to control the hosted zone that the list +// begins with. +// +// Amazon Route 53 returns a maximum of 100 items. If you set MaxItems to +// a value greater than 100, Amazon Route 53 returns only the first 100. +func (c *Route53) ListHostedZones(input *ListHostedZonesInput) (*ListHostedZonesOutput, error) { + req, out := c.ListHostedZonesRequest(input) + err := req.Send() + return out, err +} + +// ListHostedZonesPages iterates over the pages of a ListHostedZones operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListHostedZones method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListHostedZones operation. +// pageNum := 0 +// err := client.ListHostedZonesPages(params, +// func(page *ListHostedZonesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *Route53) ListHostedZonesPages(input *ListHostedZonesInput, fn func(p *ListHostedZonesOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListHostedZonesRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListHostedZonesOutput), lastPage) + }) +} + +const opListHostedZonesByName = "ListHostedZonesByName" + +// ListHostedZonesByNameRequest generates a "aws/request.Request" representing the +// client's request for the ListHostedZonesByName operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListHostedZonesByName method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListHostedZonesByNameRequest method. +// req, resp := client.ListHostedZonesByNameRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Route53) ListHostedZonesByNameRequest(input *ListHostedZonesByNameInput) (req *request.Request, output *ListHostedZonesByNameOutput) { + op := &request.Operation{ + Name: opListHostedZonesByName, + HTTPMethod: "GET", + HTTPPath: "/2013-04-01/hostedzonesbyname", + } + + if input == nil { + input = &ListHostedZonesByNameInput{} + } + + req = c.newRequest(op, input, output) + output = &ListHostedZonesByNameOutput{} + req.Data = output + return +} + +// To retrieve a list of your hosted zones in lexicographic order, send a GET +// request to the /Route 53 API version/hostedzonesbyname resource. The response +// to this request includes a HostedZones element with zero or more HostedZone +// child elements lexicographically ordered by DNS name. By default, the list +// of hosted zones is displayed on a single page. You can control the length +// of the page that is displayed by using the MaxItems parameter. You can use +// the DNSName and HostedZoneId parameters to control the hosted zone that the +// list begins with. +// +// Amazon Route 53 returns a maximum of 100 items. If you set MaxItems to +// a value greater than 100, Amazon Route 53 returns only the first 100. +func (c *Route53) ListHostedZonesByName(input *ListHostedZonesByNameInput) (*ListHostedZonesByNameOutput, error) { + req, out := c.ListHostedZonesByNameRequest(input) + err := req.Send() + return out, err +} + +const opListResourceRecordSets = "ListResourceRecordSets" + +// ListResourceRecordSetsRequest generates a "aws/request.Request" representing the +// client's request for the ListResourceRecordSets operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListResourceRecordSets method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListResourceRecordSetsRequest method. +// req, resp := client.ListResourceRecordSetsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Route53) ListResourceRecordSetsRequest(input *ListResourceRecordSetsInput) (req *request.Request, output *ListResourceRecordSetsOutput) { + op := &request.Operation{ + Name: opListResourceRecordSets, + HTTPMethod: "GET", + HTTPPath: "/2013-04-01/hostedzone/{Id}/rrset", + Paginator: &request.Paginator{ + InputTokens: []string{"StartRecordName", "StartRecordType", "StartRecordIdentifier"}, + OutputTokens: []string{"NextRecordName", "NextRecordType", "NextRecordIdentifier"}, + LimitToken: "MaxItems", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListResourceRecordSetsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListResourceRecordSetsOutput{} + req.Data = output + return +} + +// List the resource record sets in a specified hosted zone. Send a GET request +// to the 2013-04-01/hostedzone/hosted zone ID/rrset resource. +// +// ListResourceRecordSets returns up to 100 resource record sets at a time +// in ASCII order, beginning at a position specified by the name and type elements. +// The action sorts results first by DNS name with the labels reversed, for +// example: +// +// com.example.www. +// +// Note the trailing dot, which can change the sort order in some circumstances. +// When multiple records have the same DNS name, the action sorts results by +// the record type. +// +// You can use the name and type elements to adjust the beginning position +// of the list of resource record sets returned: +// +// If you do not specify Name or Type: The results begin with the first resource +// record set that the hosted zone contains. If you specify Name but not Type: +// The results begin with the first resource record set in the list whose name +// is greater than or equal to Name. If you specify Type but not Name: Amazon +// Route 53 returns the InvalidInput error. If you specify both Name and Type: +// The results begin with the first resource record set in the list whose name +// is greater than or equal to Name, and whose type is greater than or equal +// to Type. This action returns the most current version of the records. This +// includes records that are PENDING, and that are not yet available on all +// Amazon Route 53 DNS servers. +// +// To ensure that you get an accurate listing of the resource record sets for +// a hosted zone at a point in time, do not submit a ChangeResourceRecordSets +// request while you are paging through the results of a ListResourceRecordSets +// request. If you do, some pages may display results without the latest changes +// while other pages display results with the latest changes. +func (c *Route53) ListResourceRecordSets(input *ListResourceRecordSetsInput) (*ListResourceRecordSetsOutput, error) { + req, out := c.ListResourceRecordSetsRequest(input) + err := req.Send() + return out, err +} + +// ListResourceRecordSetsPages iterates over the pages of a ListResourceRecordSets operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListResourceRecordSets method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListResourceRecordSets operation. +// pageNum := 0 +// err := client.ListResourceRecordSetsPages(params, +// func(page *ListResourceRecordSetsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *Route53) ListResourceRecordSetsPages(input *ListResourceRecordSetsInput, fn func(p *ListResourceRecordSetsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListResourceRecordSetsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListResourceRecordSetsOutput), lastPage) + }) +} + +const opListReusableDelegationSets = "ListReusableDelegationSets" + +// ListReusableDelegationSetsRequest generates a "aws/request.Request" representing the +// client's request for the ListReusableDelegationSets operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListReusableDelegationSets method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListReusableDelegationSetsRequest method. +// req, resp := client.ListReusableDelegationSetsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Route53) ListReusableDelegationSetsRequest(input *ListReusableDelegationSetsInput) (req *request.Request, output *ListReusableDelegationSetsOutput) { + op := &request.Operation{ + Name: opListReusableDelegationSets, + HTTPMethod: "GET", + HTTPPath: "/2013-04-01/delegationset", + } + + if input == nil { + input = &ListReusableDelegationSetsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListReusableDelegationSetsOutput{} + req.Data = output + return +} + +// To retrieve a list of your reusable delegation sets, send a GET request to +// the /Route 53 API version/delegationset resource. The response to this request +// includes a DelegationSets element with zero, one, or multiple DelegationSet +// child elements. By default, the list of delegation sets is displayed on a +// single page. You can control the length of the page that is displayed by +// using the MaxItems parameter. You can use the Marker parameter to control +// the delegation set that the list begins with. +// +// Amazon Route 53 returns a maximum of 100 items. If you set MaxItems to +// a value greater than 100, Amazon Route 53 returns only the first 100. +func (c *Route53) ListReusableDelegationSets(input *ListReusableDelegationSetsInput) (*ListReusableDelegationSetsOutput, error) { + req, out := c.ListReusableDelegationSetsRequest(input) + err := req.Send() + return out, err +} + +const opListTagsForResource = "ListTagsForResource" + +// ListTagsForResourceRequest generates a "aws/request.Request" representing the +// client's request for the ListTagsForResource operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListTagsForResource method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListTagsForResourceRequest method. +// req, resp := client.ListTagsForResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Route53) ListTagsForResourceRequest(input *ListTagsForResourceInput) (req *request.Request, output *ListTagsForResourceOutput) { + op := &request.Operation{ + Name: opListTagsForResource, + HTTPMethod: "GET", + HTTPPath: "/2013-04-01/tags/{ResourceType}/{ResourceId}", + } + + if input == nil { + input = &ListTagsForResourceInput{} + } + + req = c.newRequest(op, input, output) + output = &ListTagsForResourceOutput{} + req.Data = output + return +} + +func (c *Route53) ListTagsForResource(input *ListTagsForResourceInput) (*ListTagsForResourceOutput, error) { + req, out := c.ListTagsForResourceRequest(input) + err := req.Send() + return out, err +} + +const opListTagsForResources = "ListTagsForResources" + +// ListTagsForResourcesRequest generates a "aws/request.Request" representing the +// client's request for the ListTagsForResources operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListTagsForResources method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListTagsForResourcesRequest method. +// req, resp := client.ListTagsForResourcesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Route53) ListTagsForResourcesRequest(input *ListTagsForResourcesInput) (req *request.Request, output *ListTagsForResourcesOutput) { + op := &request.Operation{ + Name: opListTagsForResources, + HTTPMethod: "POST", + HTTPPath: "/2013-04-01/tags/{ResourceType}", + } + + if input == nil { + input = &ListTagsForResourcesInput{} + } + + req = c.newRequest(op, input, output) + output = &ListTagsForResourcesOutput{} + req.Data = output + return +} + +func (c *Route53) ListTagsForResources(input *ListTagsForResourcesInput) (*ListTagsForResourcesOutput, error) { + req, out := c.ListTagsForResourcesRequest(input) + err := req.Send() + return out, err +} + +const opListTrafficPolicies = "ListTrafficPolicies" + +// ListTrafficPoliciesRequest generates a "aws/request.Request" representing the +// client's request for the ListTrafficPolicies operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListTrafficPolicies method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListTrafficPoliciesRequest method. +// req, resp := client.ListTrafficPoliciesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Route53) ListTrafficPoliciesRequest(input *ListTrafficPoliciesInput) (req *request.Request, output *ListTrafficPoliciesOutput) { + op := &request.Operation{ + Name: opListTrafficPolicies, + HTTPMethod: "GET", + HTTPPath: "/2013-04-01/trafficpolicies", + } + + if input == nil { + input = &ListTrafficPoliciesInput{} + } + + req = c.newRequest(op, input, output) + output = &ListTrafficPoliciesOutput{} + req.Data = output + return +} + +// Gets information about the latest version for every traffic policy that is +// associated with the current AWS account. To get the information, send a GET +// request to the /Route 53 API version/trafficpolicy resource. +// +// Amazon Route 53 returns a maximum of 100 items in each response. If you +// have a lot of traffic policies, you can use the maxitems parameter to list +// them in groups of up to 100. +// +// The response includes three values that help you navigate from one group +// of maxitems traffic policies to the next: +// +// IsTruncated If the value of IsTruncated in the response is true, there +// are more traffic policies associated with the current AWS account. +// +// If IsTruncated is false, this response includes the last traffic policy +// that is associated with the current account. +// +// TrafficPolicyIdMarker If IsTruncated is true, TrafficPolicyIdMarker is the +// ID of the first traffic policy in the next group of MaxItems traffic policies. +// If you want to list more traffic policies, make another call to ListTrafficPolicies, +// and specify the value of the TrafficPolicyIdMarker element from the response +// in the TrafficPolicyIdMarker request parameter. +// +// If IsTruncated is false, the TrafficPolicyIdMarker element is omitted from +// the response. +// +// MaxItems The value that you specified for the MaxItems parameter in the +// request that produced the current response. +func (c *Route53) ListTrafficPolicies(input *ListTrafficPoliciesInput) (*ListTrafficPoliciesOutput, error) { + req, out := c.ListTrafficPoliciesRequest(input) + err := req.Send() + return out, err +} + +const opListTrafficPolicyInstances = "ListTrafficPolicyInstances" + +// ListTrafficPolicyInstancesRequest generates a "aws/request.Request" representing the +// client's request for the ListTrafficPolicyInstances operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListTrafficPolicyInstances method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListTrafficPolicyInstancesRequest method. +// req, resp := client.ListTrafficPolicyInstancesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Route53) ListTrafficPolicyInstancesRequest(input *ListTrafficPolicyInstancesInput) (req *request.Request, output *ListTrafficPolicyInstancesOutput) { + op := &request.Operation{ + Name: opListTrafficPolicyInstances, + HTTPMethod: "GET", + HTTPPath: "/2013-04-01/trafficpolicyinstances", + } + + if input == nil { + input = &ListTrafficPolicyInstancesInput{} + } + + req = c.newRequest(op, input, output) + output = &ListTrafficPolicyInstancesOutput{} + req.Data = output + return +} + +// Gets information about the traffic policy instances that you created by using +// the current AWS account. +// +// After you submit an UpdateTrafficPolicyInstance request, there's a brief +// delay while Amazon Route 53 creates the resource record sets that are specified +// in the traffic policy definition. For more information, see the State response +// element. To get information about the traffic policy instances that are associated +// with the current AWS account, send a GET request to the /Route 53 API version/trafficpolicyinstance +// resource. +// +// Amazon Route 53 returns a maximum of 100 items in each response. If you +// have a lot of traffic policy instances, you can use the MaxItems parameter +// to list them in groups of up to 100. +// +// The response includes five values that help you navigate from one group +// of MaxItems traffic policy instances to the next: +// +// IsTruncated If the value of IsTruncated in the response is true, there +// are more traffic policy instances associated with the current AWS account. +// +// If IsTruncated is false, this response includes the last traffic policy +// instance that is associated with the current account. +// +// MaxItems The value that you specified for the MaxItems parameter in the +// request that produced the current response. +// +// HostedZoneIdMarker, TrafficPolicyInstanceNameMarker, and TrafficPolicyInstanceTypeMarker +// If IsTruncated is true, these three values in the response represent the +// first traffic policy instance in the next group of MaxItems traffic policy +// instances. To list more traffic policy instances, make another call to ListTrafficPolicyInstances, +// and specify these values in the corresponding request parameters. +// +// If IsTruncated is false, all three elements are omitted from the response. +func (c *Route53) ListTrafficPolicyInstances(input *ListTrafficPolicyInstancesInput) (*ListTrafficPolicyInstancesOutput, error) { + req, out := c.ListTrafficPolicyInstancesRequest(input) + err := req.Send() + return out, err +} + +const opListTrafficPolicyInstancesByHostedZone = "ListTrafficPolicyInstancesByHostedZone" + +// ListTrafficPolicyInstancesByHostedZoneRequest generates a "aws/request.Request" representing the +// client's request for the ListTrafficPolicyInstancesByHostedZone operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListTrafficPolicyInstancesByHostedZone method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListTrafficPolicyInstancesByHostedZoneRequest method. +// req, resp := client.ListTrafficPolicyInstancesByHostedZoneRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Route53) ListTrafficPolicyInstancesByHostedZoneRequest(input *ListTrafficPolicyInstancesByHostedZoneInput) (req *request.Request, output *ListTrafficPolicyInstancesByHostedZoneOutput) { + op := &request.Operation{ + Name: opListTrafficPolicyInstancesByHostedZone, + HTTPMethod: "GET", + HTTPPath: "/2013-04-01/trafficpolicyinstances/hostedzone", + } + + if input == nil { + input = &ListTrafficPolicyInstancesByHostedZoneInput{} + } + + req = c.newRequest(op, input, output) + output = &ListTrafficPolicyInstancesByHostedZoneOutput{} + req.Data = output + return +} + +// Gets information about the traffic policy instances that you created in a +// specified hosted zone. +// +// After you submit an UpdateTrafficPolicyInstance request, there's a brief +// delay while Amazon Route 53 creates the resource record sets that are specified +// in the traffic policy definition. For more information, see the State response +// element. To get information about the traffic policy instances that you created +// in a specified hosted zone, send a GET request to the /Route 53 API version/trafficpolicyinstance +// resource and include the ID of the hosted zone. +// +// Amazon Route 53 returns a maximum of 100 items in each response. If you +// have a lot of traffic policy instances, you can use the MaxItems parameter +// to list them in groups of up to 100. +// +// The response includes four values that help you navigate from one group +// of MaxItems traffic policy instances to the next: +// +// IsTruncated If the value of IsTruncated in the response is true, there +// are more traffic policy instances associated with the current AWS account. +// +// If IsTruncated is false, this response includes the last traffic policy +// instance that is associated with the current account. +// +// MaxItems The value that you specified for the MaxItems parameter in the +// request that produced the current response. +// +// TrafficPolicyInstanceNameMarker and TrafficPolicyInstanceTypeMarker If IsTruncated +// is true, these two values in the response represent the first traffic policy +// instance in the next group of MaxItems traffic policy instances. To list +// more traffic policy instances, make another call to ListTrafficPolicyInstancesByHostedZone, +// and specify these values in the corresponding request parameters. +// +// If IsTruncated is false, all three elements are omitted from the response. +func (c *Route53) ListTrafficPolicyInstancesByHostedZone(input *ListTrafficPolicyInstancesByHostedZoneInput) (*ListTrafficPolicyInstancesByHostedZoneOutput, error) { + req, out := c.ListTrafficPolicyInstancesByHostedZoneRequest(input) + err := req.Send() + return out, err +} + +const opListTrafficPolicyInstancesByPolicy = "ListTrafficPolicyInstancesByPolicy" + +// ListTrafficPolicyInstancesByPolicyRequest generates a "aws/request.Request" representing the +// client's request for the ListTrafficPolicyInstancesByPolicy operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListTrafficPolicyInstancesByPolicy method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListTrafficPolicyInstancesByPolicyRequest method. +// req, resp := client.ListTrafficPolicyInstancesByPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Route53) ListTrafficPolicyInstancesByPolicyRequest(input *ListTrafficPolicyInstancesByPolicyInput) (req *request.Request, output *ListTrafficPolicyInstancesByPolicyOutput) { + op := &request.Operation{ + Name: opListTrafficPolicyInstancesByPolicy, + HTTPMethod: "GET", + HTTPPath: "/2013-04-01/trafficpolicyinstances/trafficpolicy", + } + + if input == nil { + input = &ListTrafficPolicyInstancesByPolicyInput{} + } + + req = c.newRequest(op, input, output) + output = &ListTrafficPolicyInstancesByPolicyOutput{} + req.Data = output + return +} + +// Gets information about the traffic policy instances that you created by using +// a specify traffic policy version. +// +// After you submit a CreateTrafficPolicyInstance or an UpdateTrafficPolicyInstance +// request, there's a brief delay while Amazon Route 53 creates the resource +// record sets that are specified in the traffic policy definition. For more +// information, see the State response element. To get information about the +// traffic policy instances that you created by using a specify traffic policy +// version, send a GET request to the /Route 53 API version/trafficpolicyinstance +// resource and include the ID and version of the traffic policy. +// +// Amazon Route 53 returns a maximum of 100 items in each response. If you +// have a lot of traffic policy instances, you can use the MaxItems parameter +// to list them in groups of up to 100. +// +// The response includes five values that help you navigate from one group +// of MaxItems traffic policy instances to the next: +// +// IsTruncated If the value of IsTruncated in the response is true, there +// are more traffic policy instances associated with the specified traffic policy. +// +// If IsTruncated is false, this response includes the last traffic policy +// instance that is associated with the specified traffic policy. +// +// MaxItems The value that you specified for the MaxItems parameter in the +// request that produced the current response. +// +// HostedZoneIdMarker, TrafficPolicyInstanceNameMarker, and TrafficPolicyInstanceTypeMarker +// If IsTruncated is true, these values in the response represent the first +// traffic policy instance in the next group of MaxItems traffic policy instances. +// To list more traffic policy instances, make another call to ListTrafficPolicyInstancesByPolicy, +// and specify these values in the corresponding request parameters. +// +// If IsTruncated is false, all three elements are omitted from the response. +func (c *Route53) ListTrafficPolicyInstancesByPolicy(input *ListTrafficPolicyInstancesByPolicyInput) (*ListTrafficPolicyInstancesByPolicyOutput, error) { + req, out := c.ListTrafficPolicyInstancesByPolicyRequest(input) + err := req.Send() + return out, err +} + +const opListTrafficPolicyVersions = "ListTrafficPolicyVersions" + +// ListTrafficPolicyVersionsRequest generates a "aws/request.Request" representing the +// client's request for the ListTrafficPolicyVersions operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListTrafficPolicyVersions method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListTrafficPolicyVersionsRequest method. +// req, resp := client.ListTrafficPolicyVersionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Route53) ListTrafficPolicyVersionsRequest(input *ListTrafficPolicyVersionsInput) (req *request.Request, output *ListTrafficPolicyVersionsOutput) { + op := &request.Operation{ + Name: opListTrafficPolicyVersions, + HTTPMethod: "GET", + HTTPPath: "/2013-04-01/trafficpolicies/{Id}/versions", + } + + if input == nil { + input = &ListTrafficPolicyVersionsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListTrafficPolicyVersionsOutput{} + req.Data = output + return +} + +// Gets information about all of the versions for a specified traffic policy. +// ListTrafficPolicyVersions lists only versions that have not been deleted. +// +// Amazon Route 53 returns a maximum of 100 items in each response. If you +// have a lot of traffic policies, you can use the maxitems parameter to list +// them in groups of up to 100. +// +// The response includes three values that help you navigate from one group +// of maxitemsmaxitems traffic policies to the next: +// +// IsTruncated If the value of IsTruncated in the response is true, there +// are more traffic policy versions associated with the specified traffic policy. +// +// If IsTruncated is false, this response includes the last traffic policy +// version that is associated with the specified traffic policy. +// +// TrafficPolicyVersionMarker The ID of the next traffic policy version that +// is associated with the current AWS account. If you want to list more traffic +// policies, make another call to ListTrafficPolicyVersions, and specify the +// value of the TrafficPolicyVersionMarker element in the TrafficPolicyVersionMarker +// request parameter. +// +// If IsTruncated is false, Amazon Route 53 omits the TrafficPolicyVersionMarker +// element from the response. +// +// MaxItems The value that you specified for the MaxItems parameter in the +// request that produced the current response. +func (c *Route53) ListTrafficPolicyVersions(input *ListTrafficPolicyVersionsInput) (*ListTrafficPolicyVersionsOutput, error) { + req, out := c.ListTrafficPolicyVersionsRequest(input) + err := req.Send() + return out, err +} + +const opUpdateHealthCheck = "UpdateHealthCheck" + +// UpdateHealthCheckRequest generates a "aws/request.Request" representing the +// client's request for the UpdateHealthCheck operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateHealthCheck method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateHealthCheckRequest method. +// req, resp := client.UpdateHealthCheckRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Route53) UpdateHealthCheckRequest(input *UpdateHealthCheckInput) (req *request.Request, output *UpdateHealthCheckOutput) { + op := &request.Operation{ + Name: opUpdateHealthCheck, + HTTPMethod: "POST", + HTTPPath: "/2013-04-01/healthcheck/{HealthCheckId}", + } + + if input == nil { + input = &UpdateHealthCheckInput{} + } + + req = c.newRequest(op, input, output) + output = &UpdateHealthCheckOutput{} + req.Data = output + return +} + +// This action updates an existing health check. +// +// To update a health check, send a POST request to the /Route 53 API version/healthcheck/health +// check ID resource. The request body must include a document with an UpdateHealthCheckRequest +// element. The response returns an UpdateHealthCheckResponse element, which +// contains metadata about the health check. +func (c *Route53) UpdateHealthCheck(input *UpdateHealthCheckInput) (*UpdateHealthCheckOutput, error) { + req, out := c.UpdateHealthCheckRequest(input) + err := req.Send() + return out, err +} + +const opUpdateHostedZoneComment = "UpdateHostedZoneComment" + +// UpdateHostedZoneCommentRequest generates a "aws/request.Request" representing the +// client's request for the UpdateHostedZoneComment operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateHostedZoneComment method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateHostedZoneCommentRequest method. +// req, resp := client.UpdateHostedZoneCommentRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Route53) UpdateHostedZoneCommentRequest(input *UpdateHostedZoneCommentInput) (req *request.Request, output *UpdateHostedZoneCommentOutput) { + op := &request.Operation{ + Name: opUpdateHostedZoneComment, + HTTPMethod: "POST", + HTTPPath: "/2013-04-01/hostedzone/{Id}", + } + + if input == nil { + input = &UpdateHostedZoneCommentInput{} + } + + req = c.newRequest(op, input, output) + output = &UpdateHostedZoneCommentOutput{} + req.Data = output + return +} + +// To update the hosted zone comment, send a POST request to the /Route 53 API +// version/hostedzone/hosted zone ID resource. The request body must include +// a document with a UpdateHostedZoneCommentRequest element. The response to +// this request includes the modified HostedZone element. +// +// The comment can have a maximum length of 256 characters. +func (c *Route53) UpdateHostedZoneComment(input *UpdateHostedZoneCommentInput) (*UpdateHostedZoneCommentOutput, error) { + req, out := c.UpdateHostedZoneCommentRequest(input) + err := req.Send() + return out, err +} + +const opUpdateTrafficPolicyComment = "UpdateTrafficPolicyComment" + +// UpdateTrafficPolicyCommentRequest generates a "aws/request.Request" representing the +// client's request for the UpdateTrafficPolicyComment operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateTrafficPolicyComment method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateTrafficPolicyCommentRequest method. +// req, resp := client.UpdateTrafficPolicyCommentRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Route53) UpdateTrafficPolicyCommentRequest(input *UpdateTrafficPolicyCommentInput) (req *request.Request, output *UpdateTrafficPolicyCommentOutput) { + op := &request.Operation{ + Name: opUpdateTrafficPolicyComment, + HTTPMethod: "POST", + HTTPPath: "/2013-04-01/trafficpolicy/{Id}/{Version}", + } + + if input == nil { + input = &UpdateTrafficPolicyCommentInput{} + } + + req = c.newRequest(op, input, output) + output = &UpdateTrafficPolicyCommentOutput{} + req.Data = output + return +} + +// Updates the comment for a specified traffic policy version. +// +// To update the comment, send a POST request to the /Route 53 API version/trafficpolicy/ +// resource. +// +// The request body must include a document with an UpdateTrafficPolicyCommentRequest +// element. +func (c *Route53) UpdateTrafficPolicyComment(input *UpdateTrafficPolicyCommentInput) (*UpdateTrafficPolicyCommentOutput, error) { + req, out := c.UpdateTrafficPolicyCommentRequest(input) + err := req.Send() + return out, err +} + +const opUpdateTrafficPolicyInstance = "UpdateTrafficPolicyInstance" + +// UpdateTrafficPolicyInstanceRequest generates a "aws/request.Request" representing the +// client's request for the UpdateTrafficPolicyInstance operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateTrafficPolicyInstance method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateTrafficPolicyInstanceRequest method. +// req, resp := client.UpdateTrafficPolicyInstanceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Route53) UpdateTrafficPolicyInstanceRequest(input *UpdateTrafficPolicyInstanceInput) (req *request.Request, output *UpdateTrafficPolicyInstanceOutput) { + op := &request.Operation{ + Name: opUpdateTrafficPolicyInstance, + HTTPMethod: "POST", + HTTPPath: "/2013-04-01/trafficpolicyinstance/{Id}", + } + + if input == nil { + input = &UpdateTrafficPolicyInstanceInput{} + } + + req = c.newRequest(op, input, output) + output = &UpdateTrafficPolicyInstanceOutput{} + req.Data = output + return +} + +// Updates the resource record sets in a specified hosted zone that were created +// based on the settings in a specified traffic policy version. +// +// The DNS type of the resource record sets that you're updating must match +// the DNS type in the JSON document that is associated with the traffic policy +// version that you're using to update the traffic policy instance. When you +// update a traffic policy instance, Amazon Route 53 continues to respond to +// DNS queries for the root resource record set name (such as example.com) while +// it replaces one group of resource record sets with another. Amazon Route +// 53 performs the following operations: +// +// Amazon Route 53 creates a new group of resource record sets based on the +// specified traffic policy. This is true regardless of how substantial the +// differences are between the existing resource record sets and the new resource +// record sets. When all of the new resource record sets have been created, +// Amazon Route 53 starts to respond to DNS queries for the root resource record +// set name (such as example.com) by using the new resource record sets. Amazon +// Route 53 deletes the old group of resource record sets that are associated +// with the root resource record set name. To update a traffic policy instance, +// send a POST request to the /Route 53 API version/trafficpolicyinstance/traffic +// policy ID resource. The request body must include a document with an UpdateTrafficPolicyInstanceRequest +// element. +func (c *Route53) UpdateTrafficPolicyInstance(input *UpdateTrafficPolicyInstanceInput) (*UpdateTrafficPolicyInstanceOutput, error) { + req, out := c.UpdateTrafficPolicyInstanceRequest(input) + err := req.Send() + return out, err +} + +// A complex type that contains information to uniquely identify the CloudWatch +// alarm that you're associating with a Route 53 health check. +type AlarmIdentifier struct { + _ struct{} `type:"structure"` + + // The name of the CloudWatch alarm. + Name *string `min:"1" type:"string" required:"true"` + + // The CloudWatchRegion that the CloudWatch alarm was created in. + Region *string `min:"1" type:"string" required:"true" enum:"CloudWatchRegion"` +} + +// String returns the string representation +func (s AlarmIdentifier) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AlarmIdentifier) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AlarmIdentifier) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AlarmIdentifier"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + if s.Region == nil { + invalidParams.Add(request.NewErrParamRequired("Region")) + } + if s.Region != nil && len(*s.Region) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Region", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Alias resource record sets only: Information about the CloudFront distribution, +// ELB load balancer, Amazon S3 bucket, or Amazon Route 53 resource record set +// to which you are routing traffic. +// +// If you're creating resource record sets for a private hosted zone, note +// the following: +// +// You can create alias resource record sets only for Amazon Route 53 resource +// record sets in the same private hosted zone. Creating alias resource record +// sets for CloudFront distributions, ELB load balancers, and Amazon S3 buckets +// is not supported. You can't create alias resource record sets for failover, +// geolocation, or latency resource record sets in a private hosted zone. +type AliasTarget struct { + _ struct{} `type:"structure"` + + // Alias resource record sets only: The external DNS name associated with the + // AWS Resource. The value that you specify depends on where you want to route + // queries: + // + // A CloudFront distribution: Specify the domain name that CloudFront assigned + // when you created your distribution. Your CloudFront distribution must include + // an alternate domain name that matches the name of the resource record set. + // For example, if the name of the resource record set is acme.example.com, + // your CloudFront distribution must include acme.example.com as one of the + // alternate domain names. For more information, see Using Alternate Domain + // Names (CNAMEs) (http://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/CNAMEs.html) + // in the Amazon CloudFront Developer Guide. An ELB load balancer: Specify the + // DNS name associated with the load balancer. You can get the DNS name by using + // the AWS Management Console, the ELB API, or the AWS CLI. Use the same method + // to get values for HostedZoneId and DNSName. If you get one value from the + // console and the other value from the API or the CLI, creating the resource + // record set will fail. An Elastic Beanstalk environment: Specify the CNAME + // attribute for the environment. (The environment must have a regionalized + // domain name.) An Amazon S3 bucket that is configured as a static website: + // Specify the domain name of the Amazon S3 website endpoint in which you created + // the bucket; for example, s3-website-us-east-1.amazonaws.com. For more information + // about valid values, see the table Amazon Simple Storage Service (S3) Website + // Endpoints (http://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) + // in the Amazon Web Services General Reference. For more information about + // using Amazon S3 buckets for websites, see Hosting a Static Website on Amazon + // S3 (http://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html) in + // the Amazon Simple Storage Service Developer Guide. Another Amazon Route 53 + // resource record set: Specify the value of the Name element for a resource + // record set in the current hosted zone. + DNSName *string `type:"string" required:"true"` + + // Alias resource record sets only: If you set the value of EvaluateTargetHealth + // to true for the resource record set or sets in an alias, weighted alias, + // latency alias, or failover alias resource record set, and if you specify + // a value for HealthCheckId for every resource record set that is referenced + // by these alias resource record sets, the alias resource record sets inherit + // the health of the referenced resource record sets. + // + // In this configuration, when Amazon Route 53 receives a DNS query for an + // alias resource record set: + // + // Amazon Route 53 looks at the resource record sets that are referenced by + // the alias resource record sets to determine which health checks they're using. + // Amazon Route 53 checks the current status of each health check. (Amazon Route + // 53 periodically checks the health of the endpoint that is specified in a + // health check; it doesn't perform the health check when the DNS query arrives.) + // Based on the status of the health checks, Amazon Route 53 determines which + // resource record sets are healthy. Unhealthy resource record sets are immediately + // removed from consideration. In addition, if all of the resource record sets + // that are referenced by an alias resource record set are unhealthy, that alias + // resource record set also is immediately removed from consideration. Based + // on the configuration of the alias resource record sets (weighted alias or + // latency alias, for example) and the configuration of the resource record + // sets that they reference, Amazon Route 53 chooses a resource record set from + // the healthy resource record sets, and responds to the query. Note the following: + // + // You cannot set EvaluateTargetHealth to true when the alias target is a CloudFront + // distribution. If the AWS resource that you specify in AliasTarget is a resource + // record set or a group of resource record sets (for example, a group of weighted + // resource record sets), but it is not another alias resource record set, we + // recommend that you associate a health check with all of the resource record + // sets in the alias target. If you specify an ELB load balancer in AliasTarget, + // Elastic Load Balancing routes queries only to the healthy Amazon EC2 instances + // that are registered with the load balancer. If no Amazon EC2 instances are + // healthy or if the load balancer itself is unhealthy, and if EvaluateTargetHealth + // is true for the corresponding alias resource record set, Amazon Route 53 + // routes queries to other resources. When you create a load balancer, you configure + // settings for Elastic Load Balancing health checks; they're not Amazon Route + // 53 health checks, but they perform a similar function. Do not create Amazon + // Route 53 health checks for the Amazon EC2 instances that you register with + // an ELB load balancer. For more information, see How Health Checks Work in + // More Complex Amazon Route 53 Configurations (http://docs.aws.amazon.com/Route53/latest/DeveloperGuide/dns-failover-complex-configs.html) + // in the Amazon Route 53 Developer Guide. We recommend that you set EvaluateTargetHealth + // to true only when you have enough idle capacity to handle the failure of + // one or more endpoints. + // + // For more information and examples, see Amazon Route 53 Health Checks and + // DNS Failover (http://docs.aws.amazon.com/Route53/latest/DeveloperGuide/dns-failover.html) + // in the Amazon Route 53 Developer Guide. + EvaluateTargetHealth *bool `type:"boolean" required:"true"` + + // Alias resource record sets only: The value you use depends on where you want + // to route queries: + // + // A CloudFront distribution: Specify Z2FDTNDATAQYW2. An ELB load balancer: + // Specify the value of the hosted zone ID for the load balancer. You can get + // the hosted zone ID by using the AWS Management Console, the ELB API, or the + // AWS CLI. Use the same method to get values for HostedZoneId and DNSName. + // If you get one value from the console and the other value from the API or + // the CLI, creating the resource record set will fail. An Amazon S3 bucket + // that is configured as a static website: Specify the hosted zone ID for the + // Amazon S3 website endpoint in which you created the bucket. For more information + // about valid values, see the table Amazon Simple Storage Service (S3) Website + // Endpoints (http://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) + // in the Amazon Web Services General Reference. Another Amazon Route 53 resource + // record set in your hosted zone: Specify the hosted zone ID of your hosted + // zone. (An alias resource record set cannot reference a resource record set + // in a different hosted zone.) + HostedZoneId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s AliasTarget) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AliasTarget) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AliasTarget) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AliasTarget"} + if s.DNSName == nil { + invalidParams.Add(request.NewErrParamRequired("DNSName")) + } + if s.EvaluateTargetHealth == nil { + invalidParams.Add(request.NewErrParamRequired("EvaluateTargetHealth")) + } + if s.HostedZoneId == nil { + invalidParams.Add(request.NewErrParamRequired("HostedZoneId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// A complex type that contains information about the request to associate a +// VPC with an hosted zone. +type AssociateVPCWithHostedZoneInput struct { + _ struct{} `locationName:"AssociateVPCWithHostedZoneRequest" type:"structure" xmlURI:"https://route53.amazonaws.com/doc/2013-04-01/"` + + // Optional: Any comments you want to include about a AssociateVPCWithHostedZoneRequest. + Comment *string `type:"string"` + + // The ID of the hosted zone you want to associate your VPC with. + // + // Note that you cannot associate a VPC with a hosted zone that doesn't have + // an existing VPC association. + HostedZoneId *string `location:"uri" locationName:"Id" type:"string" required:"true"` + + // The VPC that you want your hosted zone to be associated with. + VPC *VPC `type:"structure" required:"true"` +} + +// String returns the string representation +func (s AssociateVPCWithHostedZoneInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssociateVPCWithHostedZoneInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AssociateVPCWithHostedZoneInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AssociateVPCWithHostedZoneInput"} + if s.HostedZoneId == nil { + invalidParams.Add(request.NewErrParamRequired("HostedZoneId")) + } + if s.VPC == nil { + invalidParams.Add(request.NewErrParamRequired("VPC")) + } + if s.VPC != nil { + if err := s.VPC.Validate(); err != nil { + invalidParams.AddNested("VPC", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// A complex type containing the response information for the request. +type AssociateVPCWithHostedZoneOutput struct { + _ struct{} `type:"structure"` + + // A complex type that contains the ID, the status, and the date and time of + // your AssociateVPCWithHostedZoneRequest. + ChangeInfo *ChangeInfo `type:"structure" required:"true"` +} + +// String returns the string representation +func (s AssociateVPCWithHostedZoneOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssociateVPCWithHostedZoneOutput) GoString() string { + return s.String() +} + +// A complex type that contains the information for each change in a change +// batch request. +type Change struct { + _ struct{} `type:"structure"` + + // The action to perform: + // + // CREATE: Creates a resource record set that has the specified values. DELETE: + // Deletes a existing resource record set that has the specified values for + // Name, Type, SetIdentifier (for latency, weighted, geolocation, and failover + // resource record sets), and TTL (except alias resource record sets, for which + // the TTL is determined by the AWS resource that you're routing DNS queries + // to). UPSERT: If a resource record set does not already exist, Amazon Route + // 53 creates it. If a resource record set does exist, Amazon Route 53 updates + // it with the values in the request. Amazon Route 53 can update an existing + // resource record set only when all of the following values match: Name, Type, + // and SetIdentifier (for weighted, latency, geolocation, and failover resource + // record sets). + Action *string `type:"string" required:"true" enum:"ChangeAction"` + + // Information about the resource record set to create or delete. + ResourceRecordSet *ResourceRecordSet `type:"structure" required:"true"` +} + +// String returns the string representation +func (s Change) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Change) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Change) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Change"} + if s.Action == nil { + invalidParams.Add(request.NewErrParamRequired("Action")) + } + if s.ResourceRecordSet == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceRecordSet")) + } + if s.ResourceRecordSet != nil { + if err := s.ResourceRecordSet.Validate(); err != nil { + invalidParams.AddNested("ResourceRecordSet", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// A complex type that contains an optional comment and the changes that you +// want to make with a change batch request. +type ChangeBatch struct { + _ struct{} `type:"structure"` + + // A complex type that contains one Change element for each resource record + // set that you want to create or delete. + Changes []*Change `locationNameList:"Change" min:"1" type:"list" required:"true"` + + // Optional: Any comments you want to include about a change batch request. + Comment *string `type:"string"` +} + +// String returns the string representation +func (s ChangeBatch) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ChangeBatch) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ChangeBatch) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ChangeBatch"} + if s.Changes == nil { + invalidParams.Add(request.NewErrParamRequired("Changes")) + } + if s.Changes != nil && len(s.Changes) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Changes", 1)) + } + if s.Changes != nil { + for i, v := range s.Changes { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Changes", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// A complex type that lists the changes and information for a ChangeBatch. +type ChangeBatchRecord struct { + _ struct{} `deprecated:"true" type:"structure"` + + // A list of changes made in the ChangeBatch. + Changes []*Change `locationNameList:"Change" min:"1" type:"list"` + + // A complex type that describes change information about changes made to your + // hosted zone. + // + // This element contains an ID that you use when performing a GetChange action + // to get detailed information about the change. + Comment *string `type:"string"` + + // The ID of the request. Use this ID to track when the change has completed + // across all Amazon Route 53 DNS servers. + Id *string `type:"string" required:"true"` + + // The current state of the request. PENDING indicates that this request has + // not yet been applied to all Amazon Route 53 DNS servers. + // + // Valid Values: PENDING | INSYNC + Status *string `type:"string" required:"true" enum:"ChangeStatus"` + + // The date and time the change was submitted, in the format YYYY-MM-DDThh:mm:ssZ, + // as specified in the ISO 8601 standard (for example, 2009-11-19T19:37:58Z). + // The Z after the time indicates that the time is listed in Coordinated Universal + // Time (UTC). + SubmittedAt *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The AWS account ID attached to the changes. + Submitter *string `type:"string"` +} + +// String returns the string representation +func (s ChangeBatchRecord) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ChangeBatchRecord) GoString() string { + return s.String() +} + +// A complex type that describes change information about changes made to your +// hosted zone. +// +// This element contains an ID that you use when performing a GetChange action +// to get detailed information about the change. +type ChangeInfo struct { + _ struct{} `type:"structure"` + + // A complex type that describes change information about changes made to your + // hosted zone. + // + // This element contains an ID that you use when performing a GetChange action + // to get detailed information about the change. + Comment *string `type:"string"` + + // The ID of the request. Use this ID to track when the change has completed + // across all Amazon Route 53 DNS servers. + Id *string `type:"string" required:"true"` + + // The current state of the request. PENDING indicates that this request has + // not yet been applied to all Amazon Route 53 DNS servers. + // + // Valid Values: PENDING | INSYNC + Status *string `type:"string" required:"true" enum:"ChangeStatus"` + + // The date and time the change was submitted, in the format YYYY-MM-DDThh:mm:ssZ, + // as specified in the ISO 8601 standard (for example, 2009-11-19T19:37:58Z). + // The Z after the time indicates that the time is listed in Coordinated Universal + // Time (UTC). + SubmittedAt *time.Time `type:"timestamp" timestampFormat:"iso8601" required:"true"` +} + +// String returns the string representation +func (s ChangeInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ChangeInfo) GoString() string { + return s.String() +} + +// A complex type that contains a change batch. +type ChangeResourceRecordSetsInput struct { + _ struct{} `locationName:"ChangeResourceRecordSetsRequest" type:"structure" xmlURI:"https://route53.amazonaws.com/doc/2013-04-01/"` + + // A complex type that contains an optional comment and the Changes element. + ChangeBatch *ChangeBatch `type:"structure" required:"true"` + + // The ID of the hosted zone that contains the resource record sets that you + // want to change. + HostedZoneId *string `location:"uri" locationName:"Id" type:"string" required:"true"` +} + +// String returns the string representation +func (s ChangeResourceRecordSetsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ChangeResourceRecordSetsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ChangeResourceRecordSetsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ChangeResourceRecordSetsInput"} + if s.ChangeBatch == nil { + invalidParams.Add(request.NewErrParamRequired("ChangeBatch")) + } + if s.HostedZoneId == nil { + invalidParams.Add(request.NewErrParamRequired("HostedZoneId")) + } + if s.ChangeBatch != nil { + if err := s.ChangeBatch.Validate(); err != nil { + invalidParams.AddNested("ChangeBatch", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// A complex type containing the response for the request. +type ChangeResourceRecordSetsOutput struct { + _ struct{} `type:"structure"` + + // A complex type that contains information about changes made to your hosted + // zone. + // + // This element contains an ID that you use when performing a GetChange action + // to get detailed information about the change. + ChangeInfo *ChangeInfo `type:"structure" required:"true"` +} + +// String returns the string representation +func (s ChangeResourceRecordSetsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ChangeResourceRecordSetsOutput) GoString() string { + return s.String() +} + +// A complex type containing information about a request to add, change, or +// delete the tags that are associated with a resource. +type ChangeTagsForResourceInput struct { + _ struct{} `locationName:"ChangeTagsForResourceRequest" type:"structure" xmlURI:"https://route53.amazonaws.com/doc/2013-04-01/"` + + // A complex type that contains a list of Tag elements. Each Tag element identifies + // a tag that you want to add or update for the specified resource. + AddTags []*Tag `locationNameList:"Tag" min:"1" type:"list"` + + // A list of Tag keys that you want to remove from the specified resource. + RemoveTagKeys []*string `locationNameList:"Key" min:"1" type:"list"` + + // The ID of the resource for which you want to add, change, or delete tags. + ResourceId *string `location:"uri" locationName:"ResourceId" type:"string" required:"true"` + + // The type of the resource. + // + // - The resource type for health checks is healthcheck. + // + // - The resource type for hosted zones is hostedzone. + ResourceType *string `location:"uri" locationName:"ResourceType" type:"string" required:"true" enum:"TagResourceType"` +} + +// String returns the string representation +func (s ChangeTagsForResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ChangeTagsForResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ChangeTagsForResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ChangeTagsForResourceInput"} + if s.AddTags != nil && len(s.AddTags) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AddTags", 1)) + } + if s.RemoveTagKeys != nil && len(s.RemoveTagKeys) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RemoveTagKeys", 1)) + } + if s.ResourceId == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceId")) + } + if s.ResourceType == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceType")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Empty response for the request. +type ChangeTagsForResourceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ChangeTagsForResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ChangeTagsForResourceOutput) GoString() string { + return s.String() +} + +// For CLOUDWATCH_METRIC health checks, a complex type that contains information +// about the CloudWatch alarm that you're associating with the health check. +type CloudWatchAlarmConfiguration struct { + _ struct{} `type:"structure"` + + // The arithmetic operation to use when comparing the specified Statistic and + // Threshold. + // + // Valid Values are GreaterThanOrEqualToThreshold, GreaterThanThreshold, LessThanThreshold + // and LessThanOrEqualToThreshold + ComparisonOperator *string `type:"string" required:"true" enum:"ComparisonOperator"` + + // A list of Dimension elements for the CloudWatch metric that is associated + // with the CloudWatch alarm. For information about the metrics and dimensions + // that CloudWatch supports, see Amazon CloudWatch Namespaces, Dimensions, and + // Metrics Reference (http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/CW_Support_For_AWS.html). + Dimensions []*Dimension `locationNameList:"Dimension" type:"list"` + + // The number of periods over which data is compared to the specified threshold. + EvaluationPeriods *int64 `min:"1" type:"integer" required:"true"` + + // The name of the CloudWatch metric that is associated with the CloudWatch + // alarm. + MetricName *string `min:"1" type:"string" required:"true"` + + // The namespace of the CloudWatch metric that is associated with the CloudWatch + // alarm. + Namespace *string `min:"1" type:"string" required:"true"` + + // An integer that represents the period in seconds over which the statistic + // is applied. + Period *int64 `min:"60" type:"integer" required:"true"` + + // The statistic to apply to the CloudWatch metric that is associated with the + // CloudWatch alarm. + // + // Valid Values are SampleCount, Average, Sum, Minimum and Maximum + Statistic *string `type:"string" required:"true" enum:"Statistic"` + + // The value that the metric is compared with to determine the state of the + // alarm. For example, if you want the health check to fail if the average TCP + // connection time is greater than 500 milliseconds for more than 60 seconds, + // the threshold is 500. + Threshold *float64 `type:"double" required:"true"` +} + +// String returns the string representation +func (s CloudWatchAlarmConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CloudWatchAlarmConfiguration) GoString() string { + return s.String() +} + +// >A complex type that contains information about the request to create a health +// check. +type CreateHealthCheckInput struct { + _ struct{} `locationName:"CreateHealthCheckRequest" type:"structure" xmlURI:"https://route53.amazonaws.com/doc/2013-04-01/"` + + // A unique string that identifies the request and that allows failed CreateHealthCheck + // requests to be retried without the risk of executing the operation twice. + // You must use a unique CallerReference string every time you create a health + // check. CallerReference can be any unique string; you might choose to use + // a string that identifies your project. + // + // Valid characters are any Unicode code points that are legal in an XML 1.0 + // document. The UTF-8 encoding of the value must be less than 128 bytes. + CallerReference *string `min:"1" type:"string" required:"true"` + + // A complex type that contains health check configuration. + HealthCheckConfig *HealthCheckConfig `type:"structure" required:"true"` +} + +// String returns the string representation +func (s CreateHealthCheckInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateHealthCheckInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateHealthCheckInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateHealthCheckInput"} + if s.CallerReference == nil { + invalidParams.Add(request.NewErrParamRequired("CallerReference")) + } + if s.CallerReference != nil && len(*s.CallerReference) < 1 { + invalidParams.Add(request.NewErrParamMinLen("CallerReference", 1)) + } + if s.HealthCheckConfig == nil { + invalidParams.Add(request.NewErrParamRequired("HealthCheckConfig")) + } + if s.HealthCheckConfig != nil { + if err := s.HealthCheckConfig.Validate(); err != nil { + invalidParams.AddNested("HealthCheckConfig", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// A complex type containing the response information for the new health check. +type CreateHealthCheckOutput struct { + _ struct{} `type:"structure"` + + // A complex type that contains identifying information about the health check. + HealthCheck *HealthCheck `type:"structure" required:"true"` + + // The unique URL representing the new health check. + Location *string `location:"header" locationName:"Location" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateHealthCheckOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateHealthCheckOutput) GoString() string { + return s.String() +} + +// A complex type that contains information about the request to create a hosted +// zone. +type CreateHostedZoneInput struct { + _ struct{} `locationName:"CreateHostedZoneRequest" type:"structure" xmlURI:"https://route53.amazonaws.com/doc/2013-04-01/"` + + // A unique string that identifies the request and that allows failed CreateHostedZone + // requests to be retried without the risk of executing the operation twice. + // You must use a unique CallerReference string every time you create a hosted + // zone. CallerReference can be any unique string; you might choose to use a + // string that identifies your project, such as DNSMigration_01. + // + // Valid characters are any Unicode code points that are legal in an XML 1.0 + // document. The UTF-8 encoding of the value must be less than 128 bytes. + CallerReference *string `min:"1" type:"string" required:"true"` + + // The delegation set id of the reusable delgation set whose NS records you + // want to assign to the new hosted zone. + DelegationSetId *string `type:"string"` + + // A complex type that contains an optional comment about your hosted zone. + HostedZoneConfig *HostedZoneConfig `type:"structure"` + + // The name of the domain. This must be a fully-specified domain, for example, + // www.example.com. The trailing dot is optional; Amazon Route 53 assumes that + // the domain name is fully qualified. This means that Amazon Route 53 treats + // www.example.com (without a trailing dot) and www.example.com. (with a trailing + // dot) as identical. + // + // This is the name you have registered with your DNS registrar. You should + // ask your registrar to change the authoritative name servers for your domain + // to the set of NameServers elements returned in DelegationSet. + Name *string `type:"string" required:"true"` + + // The VPC that you want your hosted zone to be associated with. By providing + // this parameter, your newly created hosted cannot be resolved anywhere other + // than the given VPC. + VPC *VPC `type:"structure"` +} + +// String returns the string representation +func (s CreateHostedZoneInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateHostedZoneInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateHostedZoneInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateHostedZoneInput"} + if s.CallerReference == nil { + invalidParams.Add(request.NewErrParamRequired("CallerReference")) + } + if s.CallerReference != nil && len(*s.CallerReference) < 1 { + invalidParams.Add(request.NewErrParamMinLen("CallerReference", 1)) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.VPC != nil { + if err := s.VPC.Validate(); err != nil { + invalidParams.AddNested("VPC", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// A complex type containing the response information for the new hosted zone. +type CreateHostedZoneOutput struct { + _ struct{} `type:"structure"` + + // A complex type that contains information about the request to create a hosted + // zone. This includes an ID that you use when you call the GetChange action + // to get the current status of the change request. + ChangeInfo *ChangeInfo `type:"structure" required:"true"` + + // A complex type that contains name server information. + DelegationSet *DelegationSet `type:"structure" required:"true"` + + // A complex type that contains identifying information about the hosted zone. + HostedZone *HostedZone `type:"structure" required:"true"` + + // The unique URL representing the new hosted zone. + Location *string `location:"header" locationName:"Location" type:"string" required:"true"` + + VPC *VPC `type:"structure"` +} + +// String returns the string representation +func (s CreateHostedZoneOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateHostedZoneOutput) GoString() string { + return s.String() +} + +type CreateReusableDelegationSetInput struct { + _ struct{} `locationName:"CreateReusableDelegationSetRequest" type:"structure" xmlURI:"https://route53.amazonaws.com/doc/2013-04-01/"` + + // A unique string that identifies the request and that allows failed CreateReusableDelegationSet + // requests to be retried without the risk of executing the operation twice. + // You must use a unique CallerReference string every time you create a reusable + // delegation set. CallerReference can be any unique string; you might choose + // to use a string that identifies your project, such as DNSMigration_01. + // + // Valid characters are any Unicode code points that are legal in an XML 1.0 + // document. The UTF-8 encoding of the value must be less than 128 bytes. + CallerReference *string `min:"1" type:"string" required:"true"` + + // The ID of the hosted zone whose delegation set you want to mark as reusable. + // It is an optional parameter. + HostedZoneId *string `type:"string"` +} + +// String returns the string representation +func (s CreateReusableDelegationSetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateReusableDelegationSetInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateReusableDelegationSetInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateReusableDelegationSetInput"} + if s.CallerReference == nil { + invalidParams.Add(request.NewErrParamRequired("CallerReference")) + } + if s.CallerReference != nil && len(*s.CallerReference) < 1 { + invalidParams.Add(request.NewErrParamMinLen("CallerReference", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type CreateReusableDelegationSetOutput struct { + _ struct{} `type:"structure"` + + // A complex type that contains name server information. + DelegationSet *DelegationSet `type:"structure" required:"true"` + + // The unique URL representing the new reusbale delegation set. + Location *string `location:"header" locationName:"Location" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateReusableDelegationSetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateReusableDelegationSetOutput) GoString() string { + return s.String() +} + +// A complex type that contains information about the traffic policy that you +// want to create. +type CreateTrafficPolicyInput struct { + _ struct{} `locationName:"CreateTrafficPolicyRequest" type:"structure" xmlURI:"https://route53.amazonaws.com/doc/2013-04-01/"` + + // Any comments that you want to include about the traffic policy. + Comment *string `type:"string"` + + // The definition of this traffic policy in JSON format. For more information, + // see Traffic Policy Document Format (http://docs.aws.amazon.com/Route53/latest/APIReference/api-policies-traffic-policy-document-format.html) + // in the Amazon Route 53 API Reference. + Document *string `type:"string" required:"true"` + + // The name of the traffic policy. + Name *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateTrafficPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateTrafficPolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateTrafficPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateTrafficPolicyInput"} + if s.Document == nil { + invalidParams.Add(request.NewErrParamRequired("Document")) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// A complex type that contains information about the resource record sets that +// you want to create based on a specified traffic policy. +type CreateTrafficPolicyInstanceInput struct { + _ struct{} `locationName:"CreateTrafficPolicyInstanceRequest" type:"structure" xmlURI:"https://route53.amazonaws.com/doc/2013-04-01/"` + + // The ID of the hosted zone in which you want Amazon Route 53 to create resource + // record sets by using the configuration in a traffic policy. + HostedZoneId *string `type:"string" required:"true"` + + // The domain name (such as example.com) or subdomain name (such as www.example.com) + // for which Amazon Route 53 responds to DNS queries by using the resource record + // sets that Amazon Route 53 creates for this traffic policy instance. + Name *string `type:"string" required:"true"` + + // The TTL that you want Amazon Route 53 to assign to all of the resource record + // sets that it creates in the specified hosted zone. + TTL *int64 `type:"long" required:"true"` + + // The ID of the traffic policy that you want to use to create resource record + // sets in the specified hosted zone. + TrafficPolicyId *string `type:"string" required:"true"` + + // The version of the traffic policy that you want to use to create resource + // record sets in the specified hosted zone. + TrafficPolicyVersion *int64 `min:"1" type:"integer" required:"true"` +} + +// String returns the string representation +func (s CreateTrafficPolicyInstanceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateTrafficPolicyInstanceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateTrafficPolicyInstanceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateTrafficPolicyInstanceInput"} + if s.HostedZoneId == nil { + invalidParams.Add(request.NewErrParamRequired("HostedZoneId")) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.TTL == nil { + invalidParams.Add(request.NewErrParamRequired("TTL")) + } + if s.TrafficPolicyId == nil { + invalidParams.Add(request.NewErrParamRequired("TrafficPolicyId")) + } + if s.TrafficPolicyVersion == nil { + invalidParams.Add(request.NewErrParamRequired("TrafficPolicyVersion")) + } + if s.TrafficPolicyVersion != nil && *s.TrafficPolicyVersion < 1 { + invalidParams.Add(request.NewErrParamMinValue("TrafficPolicyVersion", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// A complex type that contains the response information for the CreateTrafficPolicyInstance +// request. +type CreateTrafficPolicyInstanceOutput struct { + _ struct{} `type:"structure"` + + // A unique URL that represents a new traffic policy instance. + Location *string `location:"header" locationName:"Location" type:"string" required:"true"` + + // A complex type that contains settings for the new traffic policy instance. + TrafficPolicyInstance *TrafficPolicyInstance `type:"structure" required:"true"` +} + +// String returns the string representation +func (s CreateTrafficPolicyInstanceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateTrafficPolicyInstanceOutput) GoString() string { + return s.String() +} + +// A complex type that contains the response information for the CreateTrafficPolicy +// request. +type CreateTrafficPolicyOutput struct { + _ struct{} `type:"structure"` + + Location *string `location:"header" locationName:"Location" type:"string" required:"true"` + + // A complex type that contains settings for the new traffic policy. + TrafficPolicy *TrafficPolicy `type:"structure" required:"true"` +} + +// String returns the string representation +func (s CreateTrafficPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateTrafficPolicyOutput) GoString() string { + return s.String() +} + +// A complex type that contains information about the traffic policy for which +// you want to create a new version. +type CreateTrafficPolicyVersionInput struct { + _ struct{} `locationName:"CreateTrafficPolicyVersionRequest" type:"structure" xmlURI:"https://route53.amazonaws.com/doc/2013-04-01/"` + + // Any comments that you want to include about the new traffic policy version. + Comment *string `type:"string"` + + // The definition of a new traffic policy version, in JSON format. You must + // specify the full definition of the new traffic policy. You cannot specify + // just the differences between the new version and a previous version. For + // more information, see Traffic Policy Document Format (http://docs.aws.amazon.com/Route53/latest/APIReference/api-policies-traffic-policy-document-format.html) + // in the Amazon Route 53 API Reference. + Document *string `type:"string" required:"true"` + + // The ID of the traffic policy for which you want to create a new version. + Id *string `location:"uri" locationName:"Id" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateTrafficPolicyVersionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateTrafficPolicyVersionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateTrafficPolicyVersionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateTrafficPolicyVersionInput"} + if s.Document == nil { + invalidParams.Add(request.NewErrParamRequired("Document")) + } + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// A complex type that contains the response information for the CreateTrafficPolicyVersion +// request. +type CreateTrafficPolicyVersionOutput struct { + _ struct{} `type:"structure"` + + Location *string `location:"header" locationName:"Location" type:"string" required:"true"` + + // A complex type that contains settings for the new version of the traffic + // policy. + TrafficPolicy *TrafficPolicy `type:"structure" required:"true"` +} + +// String returns the string representation +func (s CreateTrafficPolicyVersionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateTrafficPolicyVersionOutput) GoString() string { + return s.String() +} + +// A complex type that contains name server information. +type DelegationSet struct { + _ struct{} `type:"structure"` + + CallerReference *string `min:"1" type:"string"` + + Id *string `type:"string"` + + // A complex type that contains the authoritative name servers for the hosted + // zone. Use the method provided by your domain registrar to add an NS record + // to your domain for each NameServer that is assigned to your hosted zone. + NameServers []*string `locationNameList:"NameServer" min:"1" type:"list" required:"true"` +} + +// String returns the string representation +func (s DelegationSet) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DelegationSet) GoString() string { + return s.String() +} + +// A complex type containing the request information for delete health check. +type DeleteHealthCheckInput struct { + _ struct{} `type:"structure"` + + // The ID of the health check to delete. + HealthCheckId *string `location:"uri" locationName:"HealthCheckId" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteHealthCheckInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteHealthCheckInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteHealthCheckInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteHealthCheckInput"} + if s.HealthCheckId == nil { + invalidParams.Add(request.NewErrParamRequired("HealthCheckId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Empty response for the request. +type DeleteHealthCheckOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteHealthCheckOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteHealthCheckOutput) GoString() string { + return s.String() +} + +// A complex type that contains information about the hosted zone that you want +// to delete. +type DeleteHostedZoneInput struct { + _ struct{} `type:"structure"` + + // The ID of the hosted zone you want to delete. + Id *string `location:"uri" locationName:"Id" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteHostedZoneInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteHostedZoneInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteHostedZoneInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteHostedZoneInput"} + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// A complex type containing the response information for the request. +type DeleteHostedZoneOutput struct { + _ struct{} `type:"structure"` + + // A complex type that contains the ID, the status, and the date and time of + // your delete request. + ChangeInfo *ChangeInfo `type:"structure" required:"true"` +} + +// String returns the string representation +func (s DeleteHostedZoneOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteHostedZoneOutput) GoString() string { + return s.String() +} + +// A complex type containing the information for the delete request. +type DeleteReusableDelegationSetInput struct { + _ struct{} `type:"structure"` + + // The ID of the reusable delegation set you want to delete. + Id *string `location:"uri" locationName:"Id" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteReusableDelegationSetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteReusableDelegationSetInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteReusableDelegationSetInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteReusableDelegationSetInput"} + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Empty response for the request. +type DeleteReusableDelegationSetOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteReusableDelegationSetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteReusableDelegationSetOutput) GoString() string { + return s.String() +} + +// A request to delete a specified traffic policy version. +type DeleteTrafficPolicyInput struct { + _ struct{} `type:"structure"` + + // The ID of the traffic policy that you want to delete. + Id *string `location:"uri" locationName:"Id" type:"string" required:"true"` + + // The version number of the traffic policy that you want to delete. + Version *int64 `location:"uri" locationName:"Version" min:"1" type:"integer" required:"true"` +} + +// String returns the string representation +func (s DeleteTrafficPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteTrafficPolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteTrafficPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteTrafficPolicyInput"} + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + if s.Version == nil { + invalidParams.Add(request.NewErrParamRequired("Version")) + } + if s.Version != nil && *s.Version < 1 { + invalidParams.Add(request.NewErrParamMinValue("Version", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// A complex type that contains information about the traffic policy instance +// that you want to delete. +type DeleteTrafficPolicyInstanceInput struct { + _ struct{} `type:"structure"` + + // The ID of the traffic policy instance that you want to delete. + // + // When you delete a traffic policy instance, Amazon Route 53 also deletes + // all of the resource record sets that were created when you created the traffic + // policy instance. + Id *string `location:"uri" locationName:"Id" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteTrafficPolicyInstanceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteTrafficPolicyInstanceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteTrafficPolicyInstanceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteTrafficPolicyInstanceInput"} + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// An empty element. +type DeleteTrafficPolicyInstanceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteTrafficPolicyInstanceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteTrafficPolicyInstanceOutput) GoString() string { + return s.String() +} + +// An empty element. +type DeleteTrafficPolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteTrafficPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteTrafficPolicyOutput) GoString() string { + return s.String() +} + +// The name and value of a dimension for a CloudWatch metric. +type Dimension struct { + _ struct{} `type:"structure"` + + // The name of the dimension. + Name *string `min:"1" type:"string" required:"true"` + + // The value of the dimension. + Value *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s Dimension) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Dimension) GoString() string { + return s.String() +} + +// A complex type that contains information about the request to disassociate +// a VPC from an hosted zone. +type DisassociateVPCFromHostedZoneInput struct { + _ struct{} `locationName:"DisassociateVPCFromHostedZoneRequest" type:"structure" xmlURI:"https://route53.amazonaws.com/doc/2013-04-01/"` + + // Optional: Any comments you want to include about a DisassociateVPCFromHostedZoneRequest. + Comment *string `type:"string"` + + // The ID of the hosted zone you want to disassociate your VPC from. + // + // Note that you cannot disassociate the last VPC from a hosted zone. + HostedZoneId *string `location:"uri" locationName:"Id" type:"string" required:"true"` + + // The VPC that you want your hosted zone to be disassociated from. + VPC *VPC `type:"structure" required:"true"` +} + +// String returns the string representation +func (s DisassociateVPCFromHostedZoneInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisassociateVPCFromHostedZoneInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DisassociateVPCFromHostedZoneInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DisassociateVPCFromHostedZoneInput"} + if s.HostedZoneId == nil { + invalidParams.Add(request.NewErrParamRequired("HostedZoneId")) + } + if s.VPC == nil { + invalidParams.Add(request.NewErrParamRequired("VPC")) + } + if s.VPC != nil { + if err := s.VPC.Validate(); err != nil { + invalidParams.AddNested("VPC", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// A complex type containing the response information for the request. +type DisassociateVPCFromHostedZoneOutput struct { + _ struct{} `type:"structure"` + + // A complex type that contains the ID, the status, and the date and time of + // your DisassociateVPCFromHostedZoneRequest. + ChangeInfo *ChangeInfo `type:"structure" required:"true"` +} + +// String returns the string representation +func (s DisassociateVPCFromHostedZoneOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisassociateVPCFromHostedZoneOutput) GoString() string { + return s.String() +} + +// A complex type that contains information about a geo location. +type GeoLocation struct { + _ struct{} `type:"structure"` + + // The code for a continent geo location. Note: only continent locations have + // a continent code. + // + // Valid values: AF | AN | AS | EU | OC | NA | SA + // + // Constraint: Specifying ContinentCode with either CountryCode or SubdivisionCode + // returns an InvalidInput error. + ContinentCode *string `min:"2" type:"string"` + + // The code for a country geo location. The default location uses '*' for the + // country code and will match all locations that are not matched by a geo location. + // + // The default geo location uses a * for the country code. All other country + // codes follow the ISO 3166 two-character code. + CountryCode *string `min:"1" type:"string"` + + // The code for a country's subdivision (e.g., a province of Canada). A subdivision + // code is only valid with the appropriate country code. + // + // Constraint: Specifying SubdivisionCode without CountryCode returns an InvalidInput + // error. + SubdivisionCode *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s GeoLocation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GeoLocation) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GeoLocation) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GeoLocation"} + if s.ContinentCode != nil && len(*s.ContinentCode) < 2 { + invalidParams.Add(request.NewErrParamMinLen("ContinentCode", 2)) + } + if s.CountryCode != nil && len(*s.CountryCode) < 1 { + invalidParams.Add(request.NewErrParamMinLen("CountryCode", 1)) + } + if s.SubdivisionCode != nil && len(*s.SubdivisionCode) < 1 { + invalidParams.Add(request.NewErrParamMinLen("SubdivisionCode", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// A complex type that contains information about a GeoLocation. +type GeoLocationDetails struct { + _ struct{} `type:"structure"` + + // The code for a continent geo location. Note: only continent locations have + // a continent code. + ContinentCode *string `min:"2" type:"string"` + + // The name of the continent. This element is only present if ContinentCode + // is also present. + ContinentName *string `min:"1" type:"string"` + + // The code for a country geo location. The default location uses '*' for the + // country code and will match all locations that are not matched by a geo location. + // + // The default geo location uses a * for the country code. All other country + // codes follow the ISO 3166 two-character code. + CountryCode *string `min:"1" type:"string"` + + // The name of the country. This element is only present if CountryCode is also + // present. + CountryName *string `min:"1" type:"string"` + + // The code for a country's subdivision (e.g., a province of Canada). A subdivision + // code is only valid with the appropriate country code. + SubdivisionCode *string `min:"1" type:"string"` + + // The name of the subdivision. This element is only present if SubdivisionCode + // is also present. + SubdivisionName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s GeoLocationDetails) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GeoLocationDetails) GoString() string { + return s.String() +} + +// The input for a GetChangeDetails request. +type GetChangeDetailsInput struct { + _ struct{} `deprecated:"true" type:"structure"` + + // The ID of the change batch request. The value that you specify here is the + // value that ChangeResourceRecordSets returned in the Id element when you submitted + // the request. + Id *string `location:"uri" locationName:"Id" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetChangeDetailsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetChangeDetailsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetChangeDetailsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetChangeDetailsInput"} + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// A complex type that contains the ChangeBatchRecord element. +type GetChangeDetailsOutput struct { + _ struct{} `deprecated:"true" type:"structure"` + + // A complex type that contains information about the specified change batch, + // including the change batch ID, the status of the change, and the contained + // changes. + ChangeBatchRecord *ChangeBatchRecord `deprecated:"true" type:"structure" required:"true"` +} + +// String returns the string representation +func (s GetChangeDetailsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetChangeDetailsOutput) GoString() string { + return s.String() +} + +// The input for a GetChange request. +type GetChangeInput struct { + _ struct{} `type:"structure"` + + // The ID of the change batch request. The value that you specify here is the + // value that ChangeResourceRecordSets returned in the Id element when you submitted + // the request. + Id *string `location:"uri" locationName:"Id" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetChangeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetChangeInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetChangeInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetChangeInput"} + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// A complex type that contains the ChangeInfo element. +type GetChangeOutput struct { + _ struct{} `type:"structure"` + + // A complex type that contains information about the specified change batch, + // including the change batch ID, the status of the change, and the date and + // time of the request. + ChangeInfo *ChangeInfo `type:"structure" required:"true"` +} + +// String returns the string representation +func (s GetChangeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetChangeOutput) GoString() string { + return s.String() +} + +// Empty request. +type GetCheckerIpRangesInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s GetCheckerIpRangesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetCheckerIpRangesInput) GoString() string { + return s.String() +} + +// A complex type that contains the CheckerIpRanges element. +type GetCheckerIpRangesOutput struct { + _ struct{} `type:"structure"` + + // A complex type that contains sorted list of IP ranges in CIDR format for + // Amazon Route 53 health checkers. + CheckerIpRanges []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s GetCheckerIpRangesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetCheckerIpRangesOutput) GoString() string { + return s.String() +} + +// A complex type that contains information about the request to get a geo location. +type GetGeoLocationInput struct { + _ struct{} `type:"structure"` + + // The code for a continent geo location. Note: only continent locations have + // a continent code. + // + // Valid values: AF | AN | AS | EU | OC | NA | SA + // + // Constraint: Specifying ContinentCode with either CountryCode or SubdivisionCode + // returns an InvalidInput error. + ContinentCode *string `location:"querystring" locationName:"continentcode" min:"2" type:"string"` + + // The code for a country geo location. The default location uses '*' for the + // country code and will match all locations that are not matched by a geo location. + // + // The default geo location uses a * for the country code. All other country + // codes follow the ISO 3166 two-character code. + CountryCode *string `location:"querystring" locationName:"countrycode" min:"1" type:"string"` + + // The code for a country's subdivision (e.g., a province of Canada). A subdivision + // code is only valid with the appropriate country code. + // + // Constraint: Specifying SubdivisionCode without CountryCode returns an InvalidInput + // error. + SubdivisionCode *string `location:"querystring" locationName:"subdivisioncode" min:"1" type:"string"` +} + +// String returns the string representation +func (s GetGeoLocationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetGeoLocationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetGeoLocationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetGeoLocationInput"} + if s.ContinentCode != nil && len(*s.ContinentCode) < 2 { + invalidParams.Add(request.NewErrParamMinLen("ContinentCode", 2)) + } + if s.CountryCode != nil && len(*s.CountryCode) < 1 { + invalidParams.Add(request.NewErrParamMinLen("CountryCode", 1)) + } + if s.SubdivisionCode != nil && len(*s.SubdivisionCode) < 1 { + invalidParams.Add(request.NewErrParamMinLen("SubdivisionCode", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// A complex type containing information about the specified geo location. +type GetGeoLocationOutput struct { + _ struct{} `type:"structure"` + + // A complex type that contains the information about the specified geo location. + GeoLocationDetails *GeoLocationDetails `type:"structure" required:"true"` +} + +// String returns the string representation +func (s GetGeoLocationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetGeoLocationOutput) GoString() string { + return s.String() +} + +// To retrieve a count of all your health checks, send a GET request to the +// /Route 53 API version/healthcheckcount resource. +type GetHealthCheckCountInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s GetHealthCheckCountInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetHealthCheckCountInput) GoString() string { + return s.String() +} + +// A complex type that contains the count of health checks associated with the +// current AWS account. +type GetHealthCheckCountOutput struct { + _ struct{} `type:"structure"` + + // The number of health checks associated with the current AWS account. + HealthCheckCount *int64 `type:"long" required:"true"` +} + +// String returns the string representation +func (s GetHealthCheckCountOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetHealthCheckCountOutput) GoString() string { + return s.String() +} + +// A complex type that contains information about the request to get a health +// check. +type GetHealthCheckInput struct { + _ struct{} `type:"structure"` + + // The ID of the health check to retrieve. + HealthCheckId *string `location:"uri" locationName:"HealthCheckId" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetHealthCheckInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetHealthCheckInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetHealthCheckInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetHealthCheckInput"} + if s.HealthCheckId == nil { + invalidParams.Add(request.NewErrParamRequired("HealthCheckId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// A complex type that contains information about the request to get the most +// recent failure reason for a health check. +type GetHealthCheckLastFailureReasonInput struct { + _ struct{} `type:"structure"` + + // The ID of the health check for which you want to retrieve the reason for + // the most recent failure. + HealthCheckId *string `location:"uri" locationName:"HealthCheckId" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetHealthCheckLastFailureReasonInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetHealthCheckLastFailureReasonInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetHealthCheckLastFailureReasonInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetHealthCheckLastFailureReasonInput"} + if s.HealthCheckId == nil { + invalidParams.Add(request.NewErrParamRequired("HealthCheckId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// A complex type that contains information about the most recent failure for +// the specified health check. +type GetHealthCheckLastFailureReasonOutput struct { + _ struct{} `type:"structure"` + + // A list that contains one HealthCheckObservation element for each Amazon Route + // 53 health checker. + HealthCheckObservations []*HealthCheckObservation `locationNameList:"HealthCheckObservation" type:"list" required:"true"` +} + +// String returns the string representation +func (s GetHealthCheckLastFailureReasonOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetHealthCheckLastFailureReasonOutput) GoString() string { + return s.String() +} + +// A complex type containing information about the specified health check. +type GetHealthCheckOutput struct { + _ struct{} `type:"structure"` + + // A complex type that contains the information about the specified health check. + HealthCheck *HealthCheck `type:"structure" required:"true"` +} + +// String returns the string representation +func (s GetHealthCheckOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetHealthCheckOutput) GoString() string { + return s.String() +} + +// A complex type that contains information about the request to get health +// check status for a health check. +type GetHealthCheckStatusInput struct { + _ struct{} `type:"structure"` + + // If you want Amazon Route 53 to return this resource record set in response + // to a DNS query only when a health check is passing, include the HealthCheckId + // element and specify the ID of the applicable health check. + // + // Amazon Route 53 determines whether a resource record set is healthy by periodically + // sending a request to the endpoint that is specified in the health check. + // If that endpoint returns an HTTP status code of 2xx or 3xx, the endpoint + // is healthy. If the endpoint returns an HTTP status code of 400 or greater, + // or if the endpoint doesn't respond for a certain amount of time, Amazon Route + // 53 considers the endpoint unhealthy and also considers the resource record + // set unhealthy. + // + // The HealthCheckId element is only useful when Amazon Route 53 is choosing + // between two or more resource record sets to respond to a DNS query, and you + // want Amazon Route 53 to base the choice in part on the status of a health + // check. Configuring health checks only makes sense in the following configurations: + // + // You're checking the health of the resource record sets in a weighted, latency, + // geolocation, or failover resource record set, and you specify health check + // IDs for all of the resource record sets. If the health check for one resource + // record set specifies an endpoint that is not healthy, Amazon Route 53 stops + // responding to queries using the value for that resource record set. You set + // EvaluateTargetHealth to true for the resource record sets in an alias, weighted + // alias, latency alias, geolocation alias, or failover alias resource record + // set, and you specify health check IDs for all of the resource record sets + // that are referenced by the alias resource record sets. For more information + // about this configuration, see EvaluateTargetHealth. + // + // Amazon Route 53 doesn't check the health of the endpoint specified in the + // resource record set, for example, the endpoint specified by the IP address + // in the Value element. When you add a HealthCheckId element to a resource + // record set, Amazon Route 53 checks the health of the endpoint that you specified + // in the health check. + // + // For geolocation resource record sets, if an endpoint is unhealthy, Amazon + // Route 53 looks for a resource record set for the larger, associated geographic + // region. For example, suppose you have resource record sets for a state in + // the United States, for the United States, for North America, and for all + // locations. If the endpoint for the state resource record set is unhealthy, + // Amazon Route 53 checks the resource record sets for the United States, for + // North America, and for all locations (a resource record set for which the + // value of CountryCode is *), in that order, until it finds a resource record + // set for which the endpoint is healthy. + // + // If your health checks specify the endpoint only by domain name, we recommend + // that you create a separate health check for each endpoint. For example, create + // a health check for each HTTP server that is serving content for www.example.com. + // For the value of FullyQualifiedDomainName, specify the domain name of the + // server (such as us-east-1-www.example.com), not the name of the resource + // record sets (example.com). + // + // In this configuration, if you create a health check for which the value + // of FullyQualifiedDomainName matches the name of the resource record sets + // and then associate the health check with those resource record sets, health + // check results will be unpredictable. + HealthCheckId *string `location:"uri" locationName:"HealthCheckId" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetHealthCheckStatusInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetHealthCheckStatusInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetHealthCheckStatusInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetHealthCheckStatusInput"} + if s.HealthCheckId == nil { + invalidParams.Add(request.NewErrParamRequired("HealthCheckId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// A complex type that contains information about the status of the specified +// health check. +type GetHealthCheckStatusOutput struct { + _ struct{} `type:"structure"` + + // A list that contains one HealthCheckObservation element for each Amazon Route + // 53 health checker. + HealthCheckObservations []*HealthCheckObservation `locationNameList:"HealthCheckObservation" type:"list" required:"true"` +} + +// String returns the string representation +func (s GetHealthCheckStatusOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetHealthCheckStatusOutput) GoString() string { + return s.String() +} + +// To retrieve a count of all your hosted zones, send a GET request to the /Route +// 53 API version/hostedzonecount resource. +type GetHostedZoneCountInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s GetHostedZoneCountInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetHostedZoneCountInput) GoString() string { + return s.String() +} + +// A complex type that contains the count of hosted zones associated with the +// current AWS account. +type GetHostedZoneCountOutput struct { + _ struct{} `type:"structure"` + + // The number of hosted zones associated with the current AWS account. + HostedZoneCount *int64 `type:"long" required:"true"` +} + +// String returns the string representation +func (s GetHostedZoneCountOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetHostedZoneCountOutput) GoString() string { + return s.String() +} + +// The input for a GetHostedZone request. +type GetHostedZoneInput struct { + _ struct{} `type:"structure"` + + // The ID of the hosted zone for which you want to get a list of the name servers + // in the delegation set. + Id *string `location:"uri" locationName:"Id" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetHostedZoneInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetHostedZoneInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetHostedZoneInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetHostedZoneInput"} + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// A complex type containing information about the specified hosted zone. +type GetHostedZoneOutput struct { + _ struct{} `type:"structure"` + + // A complex type that contains information about the name servers for the specified + // hosted zone. + DelegationSet *DelegationSet `type:"structure"` + + // A complex type that contains the information about the specified hosted zone. + HostedZone *HostedZone `type:"structure" required:"true"` + + // A complex type that contains information about VPCs associated with the specified + // hosted zone. + VPCs []*VPC `locationNameList:"VPC" min:"1" type:"list"` +} + +// String returns the string representation +func (s GetHostedZoneOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetHostedZoneOutput) GoString() string { + return s.String() +} + +// The input for a GetReusableDelegationSet request. +type GetReusableDelegationSetInput struct { + _ struct{} `type:"structure"` + + // The ID of the reusable delegation set for which you want to get a list of + // the name server. + Id *string `location:"uri" locationName:"Id" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetReusableDelegationSetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetReusableDelegationSetInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetReusableDelegationSetInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetReusableDelegationSetInput"} + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// A complex type containing information about the specified reusable delegation +// set. +type GetReusableDelegationSetOutput struct { + _ struct{} `type:"structure"` + + // A complex type that contains the information about the nameservers for the + // specified delegation set ID. + DelegationSet *DelegationSet `type:"structure" required:"true"` +} + +// String returns the string representation +func (s GetReusableDelegationSetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetReusableDelegationSetOutput) GoString() string { + return s.String() +} + +// Gets information about a specific traffic policy version. To get the information, +// send a GET request to the /Route 53 API version/trafficpolicy resource, and +// specify the ID and the version of the traffic policy. +type GetTrafficPolicyInput struct { + _ struct{} `type:"structure"` + + // The ID of the traffic policy that you want to get information about. + Id *string `location:"uri" locationName:"Id" type:"string" required:"true"` + + // The version number of the traffic policy that you want to get information + // about. + Version *int64 `location:"uri" locationName:"Version" min:"1" type:"integer" required:"true"` +} + +// String returns the string representation +func (s GetTrafficPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetTrafficPolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetTrafficPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetTrafficPolicyInput"} + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + if s.Version == nil { + invalidParams.Add(request.NewErrParamRequired("Version")) + } + if s.Version != nil && *s.Version < 1 { + invalidParams.Add(request.NewErrParamMinValue("Version", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// To retrieve a count of all your traffic policy instances, send a GET request +// to the /Route 53 API version/trafficpolicyinstancecount resource. +type GetTrafficPolicyInstanceCountInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s GetTrafficPolicyInstanceCountInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetTrafficPolicyInstanceCountInput) GoString() string { + return s.String() +} + +// A complex type that contains information about the number of traffic policy +// instances that are associated with the current AWS account. +type GetTrafficPolicyInstanceCountOutput struct { + _ struct{} `type:"structure"` + + // The number of traffic policy instances that are associated with the current + // AWS account. + TrafficPolicyInstanceCount *int64 `type:"integer" required:"true"` +} + +// String returns the string representation +func (s GetTrafficPolicyInstanceCountOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetTrafficPolicyInstanceCountOutput) GoString() string { + return s.String() +} + +// Gets information about a specified traffic policy instance. +// +// To get information about a traffic policy instance, send a GET request to +// the /Route 53 API version/trafficpolicyinstance/Id resource. +type GetTrafficPolicyInstanceInput struct { + _ struct{} `type:"structure"` + + // The ID of the traffic policy instance that you want to get information about. + Id *string `location:"uri" locationName:"Id" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetTrafficPolicyInstanceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetTrafficPolicyInstanceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetTrafficPolicyInstanceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetTrafficPolicyInstanceInput"} + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// A complex type that contains information about the resource record sets that +// Amazon Route 53 created based on a specified traffic policy. +type GetTrafficPolicyInstanceOutput struct { + _ struct{} `type:"structure"` + + // A complex type that contains settings for the traffic policy instance. + TrafficPolicyInstance *TrafficPolicyInstance `type:"structure" required:"true"` +} + +// String returns the string representation +func (s GetTrafficPolicyInstanceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetTrafficPolicyInstanceOutput) GoString() string { + return s.String() +} + +// A complex type that contains the response information for the request. +type GetTrafficPolicyOutput struct { + _ struct{} `type:"structure"` + + // A complex type that contains settings for the specified traffic policy. + TrafficPolicy *TrafficPolicy `type:"structure" required:"true"` +} + +// String returns the string representation +func (s GetTrafficPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetTrafficPolicyOutput) GoString() string { + return s.String() +} + +// A complex type that contains identifying information about the health check. +type HealthCheck struct { + _ struct{} `type:"structure"` + + // A unique string that identifies the request to create the health check. + CallerReference *string `min:"1" type:"string" required:"true"` + + // For CLOUDWATCH_METRIC health checks, a complex type that contains information + // about the CloudWatch alarm that you're associating with the health check. + CloudWatchAlarmConfiguration *CloudWatchAlarmConfiguration `type:"structure"` + + // A complex type that contains the health check configuration. + HealthCheckConfig *HealthCheckConfig `type:"structure" required:"true"` + + // The version of the health check. You can optionally pass this value in a + // call to UpdateHealthCheck to prevent overwriting another change to the health + // check. + HealthCheckVersion *int64 `min:"1" type:"long" required:"true"` + + // The ID of the specified health check. + Id *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s HealthCheck) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HealthCheck) GoString() string { + return s.String() +} + +// A complex type that contains the health check configuration. +type HealthCheckConfig struct { + _ struct{} `type:"structure"` + + // A complex type that contains information to uniquely identify the CloudWatch + // alarm that you're associating with a Route 53 health check. + AlarmIdentifier *AlarmIdentifier `type:"structure"` + + // For a specified parent health check, a list of HealthCheckId values for the + // associated child health checks. + ChildHealthChecks []*string `locationNameList:"ChildHealthCheck" type:"list"` + + // Specify whether you want Amazon Route 53 to send the value of FullyQualifiedDomainName + // to the endpoint in the client_hello message during TLS negotiation. If you + // don't specify a value for EnableSNI, Amazon Route 53 defaults to true when + // Type is HTTPS or HTTPS_STR_MATCH and defaults to false when Type is any other + // value. + EnableSNI *bool `type:"boolean"` + + // The number of consecutive health checks that an endpoint must pass or fail + // for Amazon Route 53 to change the current status of the endpoint from unhealthy + // to healthy or vice versa. + // + // Valid values are integers between 1 and 10. For more information, see "How + // Amazon Route 53 Determines Whether an Endpoint Is Healthy" in the Amazon + // Route 53 Developer Guide. + FailureThreshold *int64 `min:"1" type:"integer"` + + // Fully qualified domain name of the instance to be health checked. + FullyQualifiedDomainName *string `type:"string"` + + // The minimum number of child health checks that must be healthy for Amazon + // Route 53 to consider the parent health check to be healthy. Valid values + // are integers between 0 and 256, inclusive. + HealthThreshold *int64 `type:"integer"` + + // IP Address of the instance being checked. + IPAddress *string `type:"string"` + + // The status of the health check when CloudWatch has insufficient data about + // the state of associated alarm. Valid values are Healthy, Unhealthy and LastKnownStatus. + InsufficientDataHealthStatus *string `type:"string" enum:"InsufficientDataHealthStatus"` + + // A boolean value that indicates whether the status of health check should + // be inverted. For example, if a health check is healthy but Inverted is True, + // then Amazon Route 53 considers the health check to be unhealthy. + Inverted *bool `type:"boolean"` + + // A Boolean value that indicates whether you want Amazon Route 53 to measure + // the latency between health checkers in multiple AWS regions and your endpoint + // and to display CloudWatch latency graphs in the Amazon Route 53 console. + MeasureLatency *bool `type:"boolean"` + + // Port on which connection will be opened to the instance to health check. + // For HTTP and HTTP_STR_MATCH this defaults to 80 if the port is not specified. + // For HTTPS and HTTPS_STR_MATCH this defaults to 443 if the port is not specified. + Port *int64 `min:"1" type:"integer"` + + // A list of HealthCheckRegion values that you want Amazon Route 53 to use to + // perform health checks for the specified endpoint. You must specify at least + // three regions. + Regions []*string `locationNameList:"Region" min:"1" type:"list"` + + // The number of seconds between the time that Amazon Route 53 gets a response + // from your endpoint and the time that it sends the next health-check request. + // + // Each Amazon Route 53 health checker makes requests at this interval. Valid + // values are 10 and 30. The default value is 30. + RequestInterval *int64 `min:"10" type:"integer"` + + // Path to ping on the instance to check the health. Required for HTTP, HTTPS, + // HTTP_STR_MATCH, and HTTPS_STR_MATCH health checks. The HTTP request is issued + // to the instance on the given port and path. + ResourcePath *string `type:"string"` + + // A string to search for in the body of a health check response. Required for + // HTTP_STR_MATCH and HTTPS_STR_MATCH health checks. Amazon Route 53 considers + // case when searching for SearchString in the response body. + SearchString *string `type:"string"` + + // The type of health check to be performed. Currently supported types are TCP, + // HTTP, HTTPS, HTTP_STR_MATCH, HTTPS_STR_MATCH, CALCULATED and CLOUDWATCH_METRIC. + Type *string `type:"string" required:"true" enum:"HealthCheckType"` +} + +// String returns the string representation +func (s HealthCheckConfig) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HealthCheckConfig) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *HealthCheckConfig) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "HealthCheckConfig"} + if s.FailureThreshold != nil && *s.FailureThreshold < 1 { + invalidParams.Add(request.NewErrParamMinValue("FailureThreshold", 1)) + } + if s.Port != nil && *s.Port < 1 { + invalidParams.Add(request.NewErrParamMinValue("Port", 1)) + } + if s.Regions != nil && len(s.Regions) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Regions", 1)) + } + if s.RequestInterval != nil && *s.RequestInterval < 10 { + invalidParams.Add(request.NewErrParamMinValue("RequestInterval", 10)) + } + if s.Type == nil { + invalidParams.Add(request.NewErrParamRequired("Type")) + } + if s.AlarmIdentifier != nil { + if err := s.AlarmIdentifier.Validate(); err != nil { + invalidParams.AddNested("AlarmIdentifier", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// A complex type that contains the IP address of a Amazon Route 53 health checker +// and the reason for the health check status. +type HealthCheckObservation struct { + _ struct{} `type:"structure"` + + // The IP address of the Amazon Route 53 health checker that performed this + // health check. + IPAddress *string `type:"string"` + + // The HealthCheckRegion of the Amazon Route 53 health checker that performed + // this health check. + Region *string `min:"1" type:"string" enum:"HealthCheckRegion"` + + // A complex type that contains information about the health check status for + // the current observation. + StatusReport *StatusReport `type:"structure"` +} + +// String returns the string representation +func (s HealthCheckObservation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HealthCheckObservation) GoString() string { + return s.String() +} + +// A complex type that contain information about the specified hosted zone. +type HostedZone struct { + _ struct{} `type:"structure"` + + // A unique string that identifies the request to create the hosted zone. + CallerReference *string `min:"1" type:"string" required:"true"` + + // A complex type that contains the Comment element. + Config *HostedZoneConfig `type:"structure"` + + // The ID of the specified hosted zone. + Id *string `type:"string" required:"true"` + + // The name of the domain. This must be a fully-specified domain, for example, + // www.example.com. The trailing dot is optional; Amazon Route 53 assumes that + // the domain name is fully qualified. This means that Amazon Route 53 treats + // www.example.com (without a trailing dot) and www.example.com. (with a trailing + // dot) as identical. + // + // This is the name you have registered with your DNS registrar. You should + // ask your registrar to change the authoritative name servers for your domain + // to the set of NameServers elements returned in DelegationSet. + Name *string `type:"string" required:"true"` + + // Total number of resource record sets in the hosted zone. + ResourceRecordSetCount *int64 `type:"long"` +} + +// String returns the string representation +func (s HostedZone) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HostedZone) GoString() string { + return s.String() +} + +// A complex type that contains an optional comment about your hosted zone. +// If you don't want to specify a comment, you can omit the HostedZoneConfig +// and Comment elements from the XML document. +type HostedZoneConfig struct { + _ struct{} `type:"structure"` + + // An optional comment about your hosted zone. If you don't want to specify + // a comment, you can omit the HostedZoneConfig and Comment elements from the + // XML document. + Comment *string `type:"string"` + + // GetHostedZone and ListHostedZone responses: A Boolean value that indicates + // whether a hosted zone is private. + // + // CreateHostedZone requests: When you're creating a private hosted zone (when + // you specify values for VPCId and VPCRegion), you can optionally specify true + // for PrivateZone. + PrivateZone *bool `type:"boolean"` +} + +// String returns the string representation +func (s HostedZoneConfig) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HostedZoneConfig) GoString() string { + return s.String() +} + +// The input for a ListChangeBatchesByHostedZone request. +type ListChangeBatchesByHostedZoneInput struct { + _ struct{} `deprecated:"true" type:"structure"` + + // The end of the time period you want to see changes for. + EndDate *string `location:"querystring" locationName:"endDate" deprecated:"true" type:"string" required:"true"` + + // The ID of the hosted zone that you want to see changes for. + HostedZoneId *string `location:"uri" locationName:"Id" type:"string" required:"true"` + + // The page marker. + Marker *string `location:"querystring" locationName:"marker" type:"string"` + + // The maximum number of items on a page. + MaxItems *string `location:"querystring" locationName:"maxItems" type:"string"` + + // The start of the time period you want to see changes for. + StartDate *string `location:"querystring" locationName:"startDate" deprecated:"true" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListChangeBatchesByHostedZoneInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListChangeBatchesByHostedZoneInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListChangeBatchesByHostedZoneInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListChangeBatchesByHostedZoneInput"} + if s.EndDate == nil { + invalidParams.Add(request.NewErrParamRequired("EndDate")) + } + if s.HostedZoneId == nil { + invalidParams.Add(request.NewErrParamRequired("HostedZoneId")) + } + if s.StartDate == nil { + invalidParams.Add(request.NewErrParamRequired("StartDate")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The input for a ListChangeBatchesByHostedZone request. +type ListChangeBatchesByHostedZoneOutput struct { + _ struct{} `deprecated:"true" type:"structure"` + + // The change batches within the given hosted zone and time period. + ChangeBatchRecords []*ChangeBatchRecord `locationNameList:"ChangeBatchRecord" min:"1" deprecated:"true" type:"list" required:"true"` + + // A flag that indicates if there are more change batches to list. + IsTruncated *bool `type:"boolean"` + + // The page marker. + Marker *string `type:"string" required:"true"` + + // The maximum number of items on a page. + MaxItems *string `type:"string" required:"true"` + + // The next page marker. + NextMarker *string `type:"string"` +} + +// String returns the string representation +func (s ListChangeBatchesByHostedZoneOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListChangeBatchesByHostedZoneOutput) GoString() string { + return s.String() +} + +// The input for a ListChangeBatchesByRRSet request. +type ListChangeBatchesByRRSetInput struct { + _ struct{} `deprecated:"true" type:"structure"` + + // The end of the time period you want to see changes for. + EndDate *string `location:"querystring" locationName:"endDate" deprecated:"true" type:"string" required:"true"` + + // The ID of the hosted zone that you want to see changes for. + HostedZoneId *string `location:"uri" locationName:"Id" type:"string" required:"true"` + + // The page marker. + Marker *string `location:"querystring" locationName:"marker" type:"string"` + + // The maximum number of items on a page. + MaxItems *string `location:"querystring" locationName:"maxItems" type:"string"` + + // The name of the RRSet that you want to see changes for. + Name *string `location:"querystring" locationName:"rrSet_name" type:"string" required:"true"` + + // The identifier of the RRSet that you want to see changes for. + SetIdentifier *string `location:"querystring" locationName:"identifier" min:"1" type:"string"` + + // The start of the time period you want to see changes for. + StartDate *string `location:"querystring" locationName:"startDate" deprecated:"true" type:"string" required:"true"` + + // The type of the RRSet that you want to see changes for. + Type *string `location:"querystring" locationName:"type" type:"string" required:"true" enum:"RRType"` +} + +// String returns the string representation +func (s ListChangeBatchesByRRSetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListChangeBatchesByRRSetInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListChangeBatchesByRRSetInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListChangeBatchesByRRSetInput"} + if s.EndDate == nil { + invalidParams.Add(request.NewErrParamRequired("EndDate")) + } + if s.HostedZoneId == nil { + invalidParams.Add(request.NewErrParamRequired("HostedZoneId")) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.SetIdentifier != nil && len(*s.SetIdentifier) < 1 { + invalidParams.Add(request.NewErrParamMinLen("SetIdentifier", 1)) + } + if s.StartDate == nil { + invalidParams.Add(request.NewErrParamRequired("StartDate")) + } + if s.Type == nil { + invalidParams.Add(request.NewErrParamRequired("Type")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The input for a ListChangeBatchesByRRSet request. +type ListChangeBatchesByRRSetOutput struct { + _ struct{} `deprecated:"true" type:"structure"` + + // The change batches within the given hosted zone and time period. + ChangeBatchRecords []*ChangeBatchRecord `locationNameList:"ChangeBatchRecord" min:"1" deprecated:"true" type:"list" required:"true"` + + // A flag that indicates if there are more change batches to list. + IsTruncated *bool `type:"boolean"` + + // The page marker. + Marker *string `type:"string" required:"true"` + + // The maximum number of items on a page. + MaxItems *string `type:"string" required:"true"` + + // The next page marker. + NextMarker *string `type:"string"` +} + +// String returns the string representation +func (s ListChangeBatchesByRRSetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListChangeBatchesByRRSetOutput) GoString() string { + return s.String() +} + +// The input for a ListGeoLocations request. +type ListGeoLocationsInput struct { + _ struct{} `type:"structure"` + + // The maximum number of geo locations you want in the response body. + MaxItems *string `location:"querystring" locationName:"maxitems" type:"string"` + + // The first continent code in the lexicographic ordering of geo locations that + // you want the ListGeoLocations request to list. For non-continent geo locations, + // this should be null. + // + // Valid values: AF | AN | AS | EU | OC | NA | SA + // + // Constraint: Specifying ContinentCode with either CountryCode or SubdivisionCode + // returns an InvalidInput error. + StartContinentCode *string `location:"querystring" locationName:"startcontinentcode" min:"2" type:"string"` + + // The first country code in the lexicographic ordering of geo locations that + // you want the ListGeoLocations request to list. + // + // The default geo location uses a * for the country code. All other country + // codes follow the ISO 3166 two-character code. + StartCountryCode *string `location:"querystring" locationName:"startcountrycode" min:"1" type:"string"` + + // The first subdivision code in the lexicographic ordering of geo locations + // that you want the ListGeoLocations request to list. + // + // Constraint: Specifying SubdivisionCode without CountryCode returns an InvalidInput + // error. + StartSubdivisionCode *string `location:"querystring" locationName:"startsubdivisioncode" min:"1" type:"string"` +} + +// String returns the string representation +func (s ListGeoLocationsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListGeoLocationsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListGeoLocationsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListGeoLocationsInput"} + if s.StartContinentCode != nil && len(*s.StartContinentCode) < 2 { + invalidParams.Add(request.NewErrParamMinLen("StartContinentCode", 2)) + } + if s.StartCountryCode != nil && len(*s.StartCountryCode) < 1 { + invalidParams.Add(request.NewErrParamMinLen("StartCountryCode", 1)) + } + if s.StartSubdivisionCode != nil && len(*s.StartSubdivisionCode) < 1 { + invalidParams.Add(request.NewErrParamMinLen("StartSubdivisionCode", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// A complex type that contains information about the geo locations that are +// returned by the request and information about the response. +type ListGeoLocationsOutput struct { + _ struct{} `type:"structure"` + + // A complex type that contains information about the geo locations that are + // returned by the request. + GeoLocationDetailsList []*GeoLocationDetails `locationNameList:"GeoLocationDetails" type:"list" required:"true"` + + // A flag that indicates whether there are more geo locations to be listed. + // If your results were truncated, you can make a follow-up request for the + // next page of results by using the values included in the NextContinentCode, + // NextCountryCode, and NextSubdivisionCode elements. + // + // Valid Values: true | false + IsTruncated *bool `type:"boolean" required:"true"` + + // The maximum number of records you requested. The maximum value of MaxItems + // is 100. + MaxItems *string `type:"string" required:"true"` + + // If the results were truncated, the continent code of the next geo location + // in the list. This element is present only if IsTruncated is true and the + // next geo location to list is a continent location. + NextContinentCode *string `min:"2" type:"string"` + + // If the results were truncated, the country code of the next geo location + // in the list. This element is present only if IsTruncated is true and the + // next geo location to list is not a continent location. + NextCountryCode *string `min:"1" type:"string"` + + // If the results were truncated, the subdivision code of the next geo location + // in the list. This element is present only if IsTruncated is true and the + // next geo location has a subdivision. + NextSubdivisionCode *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListGeoLocationsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListGeoLocationsOutput) GoString() string { + return s.String() +} + +// To retrieve a list of your health checks, send a GET request to the /Route +// 53 API version/healthcheck resource. The response to this request includes +// a HealthChecks element with zero or more HealthCheck child elements. By default, +// the list of health checks is displayed on a single page. You can control +// the length of the page that is displayed by using the MaxItems parameter. +// You can use the Marker parameter to control the health check that the list +// begins with. +// +// Amazon Route 53 returns a maximum of 100 items. If you set MaxItems to +// a value greater than 100, Amazon Route 53 returns only the first 100. +type ListHealthChecksInput struct { + _ struct{} `type:"structure"` + + // If the request returned more than one page of results, submit another request + // and specify the value of NextMarker from the last response in the marker + // parameter to get the next page of results. + Marker *string `location:"querystring" locationName:"marker" type:"string"` + + // Specify the maximum number of health checks to return per page of results. + MaxItems *string `location:"querystring" locationName:"maxitems" type:"string"` +} + +// String returns the string representation +func (s ListHealthChecksInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListHealthChecksInput) GoString() string { + return s.String() +} + +// A complex type that contains the response for the request. +type ListHealthChecksOutput struct { + _ struct{} `type:"structure"` + + // A complex type that contains information about the health checks associated + // with the current AWS account. + HealthChecks []*HealthCheck `locationNameList:"HealthCheck" type:"list" required:"true"` + + // A flag indicating whether there are more health checks to be listed. If your + // results were truncated, you can make a follow-up request for the next page + // of results by using the Marker element. + // + // Valid Values: true | false + IsTruncated *bool `type:"boolean" required:"true"` + + // If the request returned more than one page of results, submit another request + // and specify the value of NextMarker from the last response in the marker + // parameter to get the next page of results. + Marker *string `type:"string" required:"true"` + + // The maximum number of health checks to be included in the response body. + // If the number of health checks associated with this AWS account exceeds MaxItems, + // the value of IsTruncated in the response is true. Call ListHealthChecks again + // and specify the value of NextMarker from the last response in the Marker + // element of the next request to get the next page of results. + MaxItems *string `type:"string" required:"true"` + + // Indicates where to continue listing health checks. If IsTruncated is true, + // make another request to ListHealthChecks and include the value of the NextMarker + // element in the Marker element to get the next page of results. + NextMarker *string `type:"string"` +} + +// String returns the string representation +func (s ListHealthChecksOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListHealthChecksOutput) GoString() string { + return s.String() +} + +// To retrieve a list of your hosted zones in lexicographic order, send a GET +// request to the /Route 53 API version/hostedzonesbyname resource. The response +// to this request includes a HostedZones element with zero or more HostedZone +// child elements lexicographically ordered by DNS name. By default, the list +// of hosted zones is displayed on a single page. You can control the length +// of the page that is displayed by using the MaxItems parameter. You can use +// the DNSName and HostedZoneId parameters to control the hosted zone that the +// list begins with. +type ListHostedZonesByNameInput struct { + _ struct{} `type:"structure"` + + // The first name in the lexicographic ordering of domain names that you want + // the ListHostedZonesByNameRequest request to list. + // + // If the request returned more than one page of results, submit another request + // and specify the value of NextDNSName and NextHostedZoneId from the last response + // in the DNSName and HostedZoneId parameters to get the next page of results. + DNSName *string `location:"querystring" locationName:"dnsname" type:"string"` + + // If the request returned more than one page of results, submit another request + // and specify the value of NextDNSName and NextHostedZoneId from the last response + // in the DNSName and HostedZoneId parameters to get the next page of results. + HostedZoneId *string `location:"querystring" locationName:"hostedzoneid" type:"string"` + + // Specify the maximum number of hosted zones to return per page of results. + MaxItems *string `location:"querystring" locationName:"maxitems" type:"string"` +} + +// String returns the string representation +func (s ListHostedZonesByNameInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListHostedZonesByNameInput) GoString() string { + return s.String() +} + +// A complex type that contains the response for the request. +type ListHostedZonesByNameOutput struct { + _ struct{} `type:"structure"` + + // The DNSName value sent in the request. + DNSName *string `type:"string"` + + // The HostedZoneId value sent in the request. + HostedZoneId *string `type:"string"` + + // A complex type that contains information about the hosted zones associated + // with the current AWS account. + HostedZones []*HostedZone `locationNameList:"HostedZone" type:"list" required:"true"` + + // A flag indicating whether there are more hosted zones to be listed. If your + // results were truncated, you can make a follow-up request for the next page + // of results by using the NextDNSName and NextHostedZoneId elements. + // + // Valid Values: true | false + IsTruncated *bool `type:"boolean" required:"true"` + + // The maximum number of hosted zones to be included in the response body. If + // the number of hosted zones associated with this AWS account exceeds MaxItems, + // the value of IsTruncated in the ListHostedZonesByNameResponse is true. Call + // ListHostedZonesByName again and specify the value of NextDNSName and NextHostedZoneId + // elements from the previous response to get the next page of results. + MaxItems *string `type:"string" required:"true"` + + // If the value of IsTruncated in the ListHostedZonesByNameResponse is true, + // there are more hosted zones associated with the current AWS account. To get + // the next page of results, make another request to ListHostedZonesByName. + // Specify the value of NextDNSName in the DNSName parameter. Specify NextHostedZoneId + // in the HostedZoneId parameter. + NextDNSName *string `type:"string"` + + // If the value of IsTruncated in the ListHostedZonesByNameResponse is true, + // there are more hosted zones associated with the current AWS account. To get + // the next page of results, make another request to ListHostedZonesByName. + // Specify the value of NextDNSName in the DNSName parameter. Specify NextHostedZoneId + // in the HostedZoneId parameter. + NextHostedZoneId *string `type:"string"` +} + +// String returns the string representation +func (s ListHostedZonesByNameOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListHostedZonesByNameOutput) GoString() string { + return s.String() +} + +// To retrieve a list of your hosted zones, send a GET request to the /Route +// 53 API version/hostedzone resource. The response to this request includes +// a HostedZones element with zero or more HostedZone child elements. By default, +// the list of hosted zones is displayed on a single page. You can control the +// length of the page that is displayed by using the MaxItems parameter. You +// can use the Marker parameter to control the hosted zone that the list begins +// with. +// +// Amazon Route 53 returns a maximum of 100 items. If you set MaxItems to a +// value greater than 100, Amazon Route 53 returns only the first 100. +type ListHostedZonesInput struct { + _ struct{} `type:"structure"` + + DelegationSetId *string `location:"querystring" locationName:"delegationsetid" type:"string"` + + // If the request returned more than one page of results, submit another request + // and specify the value of NextMarker from the last response in the marker + // parameter to get the next page of results. + Marker *string `location:"querystring" locationName:"marker" type:"string"` + + // Specify the maximum number of hosted zones to return per page of results. + MaxItems *string `location:"querystring" locationName:"maxitems" type:"string"` +} + +// String returns the string representation +func (s ListHostedZonesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListHostedZonesInput) GoString() string { + return s.String() +} + +// A complex type that contains the response for the request. +type ListHostedZonesOutput struct { + _ struct{} `type:"structure"` + + // A complex type that contains information about the hosted zones associated + // with the current AWS account. + HostedZones []*HostedZone `locationNameList:"HostedZone" type:"list" required:"true"` + + // A flag indicating whether there are more hosted zones to be listed. If your + // results were truncated, you can make a follow-up request for the next page + // of results by using the Marker element. + // + // Valid Values: true | false + IsTruncated *bool `type:"boolean" required:"true"` + + // If the request returned more than one page of results, submit another request + // and specify the value of NextMarker from the last response in the marker + // parameter to get the next page of results. + Marker *string `type:"string" required:"true"` + + // The maximum number of hosted zones to be included in the response body. If + // the number of hosted zones associated with this AWS account exceeds MaxItems, + // the value of IsTruncated in the response is true. Call ListHostedZones again + // and specify the value of NextMarker in the Marker parameter to get the next + // page of results. + MaxItems *string `type:"string" required:"true"` + + // Indicates where to continue listing hosted zones. If IsTruncated is true, + // make another request to ListHostedZones and include the value of the NextMarker + // element in the Marker element to get the next page of results. + NextMarker *string `type:"string"` +} + +// String returns the string representation +func (s ListHostedZonesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListHostedZonesOutput) GoString() string { + return s.String() +} + +// The input for a ListResourceRecordSets request. +type ListResourceRecordSetsInput struct { + _ struct{} `type:"structure"` + + // The ID of the hosted zone that contains the resource record sets that you + // want to get. + HostedZoneId *string `location:"uri" locationName:"Id" type:"string" required:"true"` + + // The maximum number of records you want in the response body. + MaxItems *string `location:"querystring" locationName:"maxitems" type:"string"` + + // Weighted resource record sets only: If results were truncated for a given + // DNS name and type, specify the value of NextRecordIdentifier from the previous + // response to get the next resource record set that has the current DNS name + // and type. + StartRecordIdentifier *string `location:"querystring" locationName:"identifier" min:"1" type:"string"` + + // The first name in the lexicographic ordering of domain names that you want + // the ListResourceRecordSets request to list. + StartRecordName *string `location:"querystring" locationName:"name" type:"string"` + + // The DNS type at which to begin the listing of resource record sets. + // + // Valid values: A | AAAA | CNAME | MX | NS | PTR | SOA | SPF | SRV | TXT + // + // Values for Weighted Resource Record Sets: A | AAAA | CNAME | TXT + // + // Values for Regional Resource Record Sets: A | AAAA | CNAME | TXT + // + // Values for Alias Resource Record Sets: A | AAAA + // + // Constraint: Specifying type without specifying name returns an InvalidInput + // error. + StartRecordType *string `location:"querystring" locationName:"type" type:"string" enum:"RRType"` +} + +// String returns the string representation +func (s ListResourceRecordSetsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListResourceRecordSetsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListResourceRecordSetsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListResourceRecordSetsInput"} + if s.HostedZoneId == nil { + invalidParams.Add(request.NewErrParamRequired("HostedZoneId")) + } + if s.StartRecordIdentifier != nil && len(*s.StartRecordIdentifier) < 1 { + invalidParams.Add(request.NewErrParamMinLen("StartRecordIdentifier", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// A complex type that contains information about the resource record sets that +// are returned by the request and information about the response. +type ListResourceRecordSetsOutput struct { + _ struct{} `type:"structure"` + + // A flag that indicates whether there are more resource record sets to be listed. + // If your results were truncated, you can make a follow-up request for the + // next page of results by using the NextRecordName element. + // + // Valid Values: true | false + IsTruncated *bool `type:"boolean" required:"true"` + + // The maximum number of records you requested. The maximum value of MaxItems + // is 100. + MaxItems *string `type:"string" required:"true"` + + // Weighted resource record sets only: If results were truncated for a given + // DNS name and type, the value of SetIdentifier for the next resource record + // set that has the current DNS name and type. + NextRecordIdentifier *string `min:"1" type:"string"` + + // If the results were truncated, the name of the next record in the list. This + // element is present only if IsTruncated is true. + NextRecordName *string `type:"string"` + + // If the results were truncated, the type of the next record in the list. This + // element is present only if IsTruncated is true. + NextRecordType *string `type:"string" enum:"RRType"` + + // A complex type that contains information about the resource record sets that + // are returned by the request. + ResourceRecordSets []*ResourceRecordSet `locationNameList:"ResourceRecordSet" type:"list" required:"true"` +} + +// String returns the string representation +func (s ListResourceRecordSetsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListResourceRecordSetsOutput) GoString() string { + return s.String() +} + +// To retrieve a list of your reusable delegation sets, send a GET request to +// the /Route 53 API version/delegationset resource. The response to this request +// includes a DelegationSets element with zero or more DelegationSet child elements. +// By default, the list of reusable delegation sets is displayed on a single +// page. You can control the length of the page that is displayed by using the +// MaxItems parameter. You can use the Marker parameter to control the delegation +// set that the list begins with. +// +// Amazon Route 53 returns a maximum of 100 items. If you set MaxItems to +// a value greater than 100, Amazon Route 53 returns only the first 100. +type ListReusableDelegationSetsInput struct { + _ struct{} `type:"structure"` + + // If the request returned more than one page of results, submit another request + // and specify the value of NextMarker from the last response in the marker + // parameter to get the next page of results. + Marker *string `location:"querystring" locationName:"marker" type:"string"` + + // Specify the maximum number of reusable delegation sets to return per page + // of results. + MaxItems *string `location:"querystring" locationName:"maxitems" type:"string"` +} + +// String returns the string representation +func (s ListReusableDelegationSetsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListReusableDelegationSetsInput) GoString() string { + return s.String() +} + +// A complex type that contains the response for the request. +type ListReusableDelegationSetsOutput struct { + _ struct{} `type:"structure"` + + // A complex type that contains information about the reusable delegation sets + // associated with the current AWS account. + DelegationSets []*DelegationSet `locationNameList:"DelegationSet" type:"list" required:"true"` + + // A flag indicating whether there are more reusable delegation sets to be listed. + // If your results were truncated, you can make a follow-up request for the + // next page of results by using the Marker element. + // + // Valid Values: true | false + IsTruncated *bool `type:"boolean" required:"true"` + + // If the request returned more than one page of results, submit another request + // and specify the value of NextMarker from the last response in the marker + // parameter to get the next page of results. + Marker *string `type:"string" required:"true"` + + // The maximum number of reusable delegation sets to be included in the response + // body. If the number of reusable delegation sets associated with this AWS + // account exceeds MaxItems, the value of IsTruncated in the response is true. + // To get the next page of results, call ListReusableDelegationSets again and + // specify the value of NextMarker from the previous response in the Marker + // element of the request. + MaxItems *string `type:"string" required:"true"` + + // Indicates where to continue listing reusable delegation sets. If IsTruncated + // is true, make another request to ListReusableDelegationSets and include the + // value of the NextMarker element in the Marker element of the previous response + // to get the next page of results. + NextMarker *string `type:"string"` +} + +// String returns the string representation +func (s ListReusableDelegationSetsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListReusableDelegationSetsOutput) GoString() string { + return s.String() +} + +// A complex type containing information about a request for a list of the tags +// that are associated with an individual resource. +type ListTagsForResourceInput struct { + _ struct{} `type:"structure"` + + // The ID of the resource for which you want to retrieve tags. + ResourceId *string `location:"uri" locationName:"ResourceId" type:"string" required:"true"` + + // The type of the resource. + // + // - The resource type for health checks is healthcheck. + // + // - The resource type for hosted zones is hostedzone. + ResourceType *string `location:"uri" locationName:"ResourceType" type:"string" required:"true" enum:"TagResourceType"` +} + +// String returns the string representation +func (s ListTagsForResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTagsForResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListTagsForResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListTagsForResourceInput"} + if s.ResourceId == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceId")) + } + if s.ResourceType == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceType")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// A complex type containing tags for the specified resource. +type ListTagsForResourceOutput struct { + _ struct{} `type:"structure"` + + // A ResourceTagSet containing tags associated with the specified resource. + ResourceTagSet *ResourceTagSet `type:"structure" required:"true"` +} + +// String returns the string representation +func (s ListTagsForResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTagsForResourceOutput) GoString() string { + return s.String() +} + +// A complex type containing information about a request for a list of the tags +// that are associated with up to 10 specified resources. +type ListTagsForResourcesInput struct { + _ struct{} `locationName:"ListTagsForResourcesRequest" type:"structure" xmlURI:"https://route53.amazonaws.com/doc/2013-04-01/"` + + // A complex type that contains the ResourceId element for each resource for + // which you want to get a list of tags. + ResourceIds []*string `locationNameList:"ResourceId" min:"1" type:"list" required:"true"` + + // The type of the resources. + // + // - The resource type for health checks is healthcheck. + // + // - The resource type for hosted zones is hostedzone. + ResourceType *string `location:"uri" locationName:"ResourceType" type:"string" required:"true" enum:"TagResourceType"` +} + +// String returns the string representation +func (s ListTagsForResourcesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTagsForResourcesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListTagsForResourcesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListTagsForResourcesInput"} + if s.ResourceIds == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceIds")) + } + if s.ResourceIds != nil && len(s.ResourceIds) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceIds", 1)) + } + if s.ResourceType == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceType")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// A complex type containing tags for the specified resources. +type ListTagsForResourcesOutput struct { + _ struct{} `type:"structure"` + + // A list of ResourceTagSets containing tags associated with the specified resources. + ResourceTagSets []*ResourceTagSet `locationNameList:"ResourceTagSet" type:"list" required:"true"` +} + +// String returns the string representation +func (s ListTagsForResourcesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTagsForResourcesOutput) GoString() string { + return s.String() +} + +// A complex type that contains the information about the request to list the +// traffic policies that are associated with the current AWS account. +type ListTrafficPoliciesInput struct { + _ struct{} `type:"structure"` + + // The maximum number of traffic policies to be included in the response body + // for this request. If you have more than MaxItems traffic policies, the value + // of the IsTruncated element in the response is true, and the value of the + // TrafficPolicyIdMarker element is the ID of the first traffic policy in the + // next group of MaxItems traffic policies. + MaxItems *string `location:"querystring" locationName:"maxitems" type:"string"` + + // For your first request to ListTrafficPolicies, do not include the TrafficPolicyIdMarker + // parameter. + // + // If you have more traffic policies than the value of MaxItems, ListTrafficPolicies + // returns only the first MaxItems traffic policies. To get the next group of + // MaxItems policies, submit another request to ListTrafficPolicies. For the + // value of TrafficPolicyIdMarker, specify the value of the TrafficPolicyIdMarker + // element that was returned in the previous response. + // + // Policies are listed in the order in which they were created. + TrafficPolicyIdMarker *string `location:"querystring" locationName:"trafficpolicyid" type:"string"` +} + +// String returns the string representation +func (s ListTrafficPoliciesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTrafficPoliciesInput) GoString() string { + return s.String() +} + +// A complex type that contains the response information for the request. +type ListTrafficPoliciesOutput struct { + _ struct{} `type:"structure"` + + // A flag that indicates whether there are more traffic policies to be listed. + // If the response was truncated, you can get the next group of MaxItems traffic + // policies by calling ListTrafficPolicies again and specifying the value of + // the TrafficPolicyIdMarker element in the TrafficPolicyIdMarker request parameter. + // + // Valid Values: true | false + IsTruncated *bool `type:"boolean" required:"true"` + + // The value that you specified for the MaxItems parameter in the call to ListTrafficPolicies + // that produced the current response. + MaxItems *string `type:"string" required:"true"` + + // If the value of IsTruncated is true, TrafficPolicyIdMarker is the ID of the + // first traffic policy in the next group of MaxItems traffic policies. + TrafficPolicyIdMarker *string `type:"string" required:"true"` + + // A list that contains one TrafficPolicySummary element for each traffic policy + // that was created by the current AWS account. + TrafficPolicySummaries []*TrafficPolicySummary `locationNameList:"TrafficPolicySummary" type:"list" required:"true"` +} + +// String returns the string representation +func (s ListTrafficPoliciesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTrafficPoliciesOutput) GoString() string { + return s.String() +} + +// A request for the traffic policy instances that you created in a specified +// hosted zone. +type ListTrafficPolicyInstancesByHostedZoneInput struct { + _ struct{} `type:"structure"` + + // The ID of the hosted zone for which you want to list traffic policy instances. + HostedZoneId *string `location:"querystring" locationName:"id" type:"string" required:"true"` + + // The maximum number of traffic policy instances to be included in the response + // body for this request. If you have more than MaxItems traffic policy instances, + // the value of the IsTruncated element in the response is true, and the values + // of HostedZoneIdMarker, TrafficPolicyInstanceNameMarker, and TrafficPolicyInstanceTypeMarker + // represent the first traffic policy instance in the next group of MaxItems + // traffic policy instances. + MaxItems *string `location:"querystring" locationName:"maxitems" type:"string"` + + // For the first request to ListTrafficPolicyInstancesByHostedZone, omit this + // value. + // + // If the value of IsTruncated in the previous response was true, TrafficPolicyInstanceNameMarker + // is the name of the first traffic policy instance in the next group of MaxItems + // traffic policy instances. + // + // If the value of IsTruncated in the previous response was false, there are + // no more traffic policy instances to get for this hosted zone. + // + // If the value of IsTruncated in the previous response was false, omit this + // value. + TrafficPolicyInstanceNameMarker *string `location:"querystring" locationName:"trafficpolicyinstancename" type:"string"` + + // For the first request to ListTrafficPolicyInstancesByHostedZone, omit this + // value. + // + // If the value of IsTruncated in the previous response was true, TrafficPolicyInstanceTypeMarker + // is the DNS type of the first traffic policy instance in the next group of + // MaxItems traffic policy instances. + // + // If the value of IsTruncated in the previous response was false, there are + // no more traffic policy instances to get for this hosted zone. + TrafficPolicyInstanceTypeMarker *string `location:"querystring" locationName:"trafficpolicyinstancetype" type:"string" enum:"RRType"` +} + +// String returns the string representation +func (s ListTrafficPolicyInstancesByHostedZoneInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTrafficPolicyInstancesByHostedZoneInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListTrafficPolicyInstancesByHostedZoneInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListTrafficPolicyInstancesByHostedZoneInput"} + if s.HostedZoneId == nil { + invalidParams.Add(request.NewErrParamRequired("HostedZoneId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// A complex type that contains the response information for the request. +type ListTrafficPolicyInstancesByHostedZoneOutput struct { + _ struct{} `type:"structure"` + + // A flag that indicates whether there are more traffic policy instances to + // be listed. If the response was truncated, you can get the next group of MaxItems + // traffic policy instances by calling ListTrafficPolicyInstancesByHostedZone + // again and specifying the values of the HostedZoneIdMarker, TrafficPolicyInstanceNameMarker, + // and TrafficPolicyInstanceTypeMarker elements in the corresponding request + // parameters. + // + // Valid Values: true | false + IsTruncated *bool `type:"boolean" required:"true"` + + // The value that you specified for the MaxItems parameter in the call to ListTrafficPolicyInstancesByHostedZone + // that produced the current response. + MaxItems *string `type:"string" required:"true"` + + // If IsTruncated is true, TrafficPolicyInstanceNameMarker is the name of the + // first traffic policy instance in the next group of MaxItems traffic policy + // instances. + TrafficPolicyInstanceNameMarker *string `type:"string"` + + // If IsTruncated is true, TrafficPolicyInstanceTypeMarker is the DNS type of + // the resource record sets that are associated with the first traffic policy + // instance in the next group of MaxItems traffic policy instances. + TrafficPolicyInstanceTypeMarker *string `type:"string" enum:"RRType"` + + // A list that contains one TrafficPolicyInstance element for each traffic policy + // instance that matches the elements in the request. + TrafficPolicyInstances []*TrafficPolicyInstance `locationNameList:"TrafficPolicyInstance" type:"list" required:"true"` +} + +// String returns the string representation +func (s ListTrafficPolicyInstancesByHostedZoneOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTrafficPolicyInstancesByHostedZoneOutput) GoString() string { + return s.String() +} + +// A complex type that contains the information about the request to list your +// traffic policy instances. +type ListTrafficPolicyInstancesByPolicyInput struct { + _ struct{} `type:"structure"` + + // For the first request to ListTrafficPolicyInstancesByPolicy, omit this value. + // + // If the value of IsTruncated in the previous response was true, HostedZoneIdMarker + // is the ID of the hosted zone for the first traffic policy instance in the + // next group of MaxItems traffic policy instances. + // + // If the value of IsTruncated in the previous response was false, there are + // no more traffic policy instances to get for this hosted zone. + // + // If the value of IsTruncated in the previous response was false, omit this + // value. + HostedZoneIdMarker *string `location:"querystring" locationName:"hostedzoneid" type:"string"` + + // The maximum number of traffic policy instances to be included in the response + // body for this request. If you have more than MaxItems traffic policy instances, + // the value of the IsTruncated element in the response is true, and the values + // of HostedZoneIdMarker, TrafficPolicyInstanceNameMarker, and TrafficPolicyInstanceTypeMarker + // represent the first traffic policy instance in the next group of MaxItems + // traffic policy instances. + MaxItems *string `location:"querystring" locationName:"maxitems" type:"string"` + + // The ID of the traffic policy for which you want to list traffic policy instances. + TrafficPolicyId *string `location:"querystring" locationName:"id" type:"string" required:"true"` + + // For the first request to ListTrafficPolicyInstancesByPolicy, omit this value. + // + // If the value of IsTruncated in the previous response was true, TrafficPolicyInstanceNameMarker + // is the name of the first traffic policy instance in the next group of MaxItems + // traffic policy instances. + // + // If the value of IsTruncated in the previous response was false, there are + // no more traffic policy instances to get for this hosted zone. + // + // If the value of IsTruncated in the previous response was false, omit this + // value. + TrafficPolicyInstanceNameMarker *string `location:"querystring" locationName:"trafficpolicyinstancename" type:"string"` + + // For the first request to ListTrafficPolicyInstancesByPolicy, omit this value. + // + // If the value of IsTruncated in the previous response was true, TrafficPolicyInstanceTypeMarker + // is the DNS type of the first traffic policy instance in the next group of + // MaxItems traffic policy instances. + // + // If the value of IsTruncated in the previous response was false, there are + // no more traffic policy instances to get for this hosted zone. + TrafficPolicyInstanceTypeMarker *string `location:"querystring" locationName:"trafficpolicyinstancetype" type:"string" enum:"RRType"` + + // The version of the traffic policy for which you want to list traffic policy + // instances. The version must be associated with the traffic policy that is + // specified by TrafficPolicyId. + TrafficPolicyVersion *int64 `location:"querystring" locationName:"version" min:"1" type:"integer" required:"true"` +} + +// String returns the string representation +func (s ListTrafficPolicyInstancesByPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTrafficPolicyInstancesByPolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListTrafficPolicyInstancesByPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListTrafficPolicyInstancesByPolicyInput"} + if s.TrafficPolicyId == nil { + invalidParams.Add(request.NewErrParamRequired("TrafficPolicyId")) + } + if s.TrafficPolicyVersion == nil { + invalidParams.Add(request.NewErrParamRequired("TrafficPolicyVersion")) + } + if s.TrafficPolicyVersion != nil && *s.TrafficPolicyVersion < 1 { + invalidParams.Add(request.NewErrParamMinValue("TrafficPolicyVersion", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// A complex type that contains the response information for the request. +type ListTrafficPolicyInstancesByPolicyOutput struct { + _ struct{} `type:"structure"` + + // If IsTruncated is true, HostedZoneIdMarker is the ID of the hosted zone of + // the first traffic policy instance in the next group of MaxItems traffic policy + // instances. + HostedZoneIdMarker *string `type:"string"` + + // A flag that indicates whether there are more traffic policy instances to + // be listed. If the response was truncated, you can get the next group of MaxItems + // traffic policy instances by calling ListTrafficPolicyInstancesByPolicy again + // and specifying the values of the HostedZoneIdMarker, TrafficPolicyInstanceNameMarker, + // and TrafficPolicyInstanceTypeMarker elements in the corresponding request + // parameters. + // + // Valid Values: true | false + IsTruncated *bool `type:"boolean" required:"true"` + + // The value that you specified for the MaxItems parameter in the call to ListTrafficPolicyInstancesByPolicy + // that produced the current response. + MaxItems *string `type:"string" required:"true"` + + // If IsTruncated is true, TrafficPolicyInstanceNameMarker is the name of the + // first traffic policy instance in the next group of MaxItems traffic policy + // instances. + TrafficPolicyInstanceNameMarker *string `type:"string"` + + // If IsTruncated is true, TrafficPolicyInstanceTypeMarker is the DNS type of + // the resource record sets that are associated with the first traffic policy + // instance in the next group of MaxItems traffic policy instances. + TrafficPolicyInstanceTypeMarker *string `type:"string" enum:"RRType"` + + // A list that contains one TrafficPolicyInstance element for each traffic policy + // instance that matches the elements in the request. + TrafficPolicyInstances []*TrafficPolicyInstance `locationNameList:"TrafficPolicyInstance" type:"list" required:"true"` +} + +// String returns the string representation +func (s ListTrafficPolicyInstancesByPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTrafficPolicyInstancesByPolicyOutput) GoString() string { + return s.String() +} + +// A complex type that contains the information about the request to list your +// traffic policy instances. +type ListTrafficPolicyInstancesInput struct { + _ struct{} `type:"structure"` + + // For the first request to ListTrafficPolicyInstances, omit this value. + // + // If the value of IsTruncated in the previous response was true, you have + // more traffic policy instances. To get the next group of MaxItems traffic + // policy instances, submit another ListTrafficPolicyInstances request. For + // the value of HostedZoneIdMarker, specify the value of HostedZoneIdMarker + // from the previous response, which is the hosted zone ID of the first traffic + // policy instance in the next group of MaxItems traffic policy instances. + // + // If the value of IsTruncated in the previous response was false, there are + // no more traffic policy instances to get. + HostedZoneIdMarker *string `location:"querystring" locationName:"hostedzoneid" type:"string"` + + // The maximum number of traffic policy instances to be included in the response + // body for this request. If you have more than MaxItems traffic policy instances, + // the value of the IsTruncated element in the response is true, and the values + // of HostedZoneIdMarker, TrafficPolicyInstanceNameMarker, and TrafficPolicyInstanceTypeMarker + // represent the first traffic policy instance in the next group of MaxItems + // traffic policy instances. + MaxItems *string `location:"querystring" locationName:"maxitems" type:"string"` + + // For the first request to ListTrafficPolicyInstances, omit this value. + // + // If the value of IsTruncated in the previous response was true, TrafficPolicyInstanceNameMarker + // is the name of the first traffic policy instance in the next group of MaxItems + // traffic policy instances. + // + // If the value of IsTruncated in the previous response was false, there are + // no more traffic policy instances to get. + TrafficPolicyInstanceNameMarker *string `location:"querystring" locationName:"trafficpolicyinstancename" type:"string"` + + // For the first request to ListTrafficPolicyInstances, omit this value. + // + // If the value of IsTruncated in the previous response was true, TrafficPolicyInstanceTypeMarker + // is the DNS type of the first traffic policy instance in the next group of + // MaxItems traffic policy instances. + // + // If the value of IsTruncated in the previous response was false, there are + // no more traffic policy instances to get. + TrafficPolicyInstanceTypeMarker *string `location:"querystring" locationName:"trafficpolicyinstancetype" type:"string" enum:"RRType"` +} + +// String returns the string representation +func (s ListTrafficPolicyInstancesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTrafficPolicyInstancesInput) GoString() string { + return s.String() +} + +// A complex type that contains the response information for the request. +type ListTrafficPolicyInstancesOutput struct { + _ struct{} `type:"structure"` + + // If IsTruncated is true, HostedZoneIdMarker is the ID of the hosted zone of + // the first traffic policy instance in the next group of MaxItems traffic policy + // instances. + HostedZoneIdMarker *string `type:"string"` + + // A flag that indicates whether there are more traffic policy instances to + // be listed. If the response was truncated, you can get the next group of MaxItems + // traffic policy instances by calling ListTrafficPolicyInstances again and + // specifying the values of the HostedZoneIdMarker, TrafficPolicyInstanceNameMarker, + // and TrafficPolicyInstanceTypeMarker elements in the corresponding request + // parameters. + // + // Valid Values: true | false + IsTruncated *bool `type:"boolean" required:"true"` + + // The value that you specified for the MaxItems parameter in the call to ListTrafficPolicyInstances + // that produced the current response. + MaxItems *string `type:"string" required:"true"` + + // If IsTruncated is true, TrafficPolicyInstanceNameMarker is the name of the + // first traffic policy instance in the next group of MaxItems traffic policy + // instances. + TrafficPolicyInstanceNameMarker *string `type:"string"` + + // If IsTruncated is true, TrafficPolicyInstanceTypeMarker is the DNS type of + // the resource record sets that are associated with the first traffic policy + // instance in the next group of MaxItems traffic policy instances. + TrafficPolicyInstanceTypeMarker *string `type:"string" enum:"RRType"` + + // A list that contains one TrafficPolicyInstance element for each traffic policy + // instance that matches the elements in the request. + TrafficPolicyInstances []*TrafficPolicyInstance `locationNameList:"TrafficPolicyInstance" type:"list" required:"true"` +} + +// String returns the string representation +func (s ListTrafficPolicyInstancesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTrafficPolicyInstancesOutput) GoString() string { + return s.String() +} + +// A complex type that contains the information about the request to list your +// traffic policies. +type ListTrafficPolicyVersionsInput struct { + _ struct{} `type:"structure"` + + // Specify the value of Id of the traffic policy for which you want to list + // all versions. + Id *string `location:"uri" locationName:"Id" type:"string" required:"true"` + + // The maximum number of traffic policy versions that you want Amazon Route + // 53 to include in the response body for this request. If the specified traffic + // policy has more than MaxItems versions, the value of the IsTruncated element + // in the response is true, and the value of the TrafficPolicyVersionMarker + // element is the ID of the first version in the next group of MaxItems traffic + // policy versions. + MaxItems *string `location:"querystring" locationName:"maxitems" type:"string"` + + // For your first request to ListTrafficPolicyVersions, do not include the TrafficPolicyVersionMarker + // parameter. + // + // If you have more traffic policy versions than the value of MaxItems, ListTrafficPolicyVersions + // returns only the first group of MaxItems versions. To get the next group + // of MaxItems traffic policy versions, submit another request to ListTrafficPolicyVersions. + // For the value of TrafficPolicyVersionMarker, specify the value of the TrafficPolicyVersionMarker + // element that was returned in the previous response. + // + // Traffic policy versions are listed in sequential order. + TrafficPolicyVersionMarker *string `location:"querystring" locationName:"trafficpolicyversion" type:"string"` +} + +// String returns the string representation +func (s ListTrafficPolicyVersionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTrafficPolicyVersionsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListTrafficPolicyVersionsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListTrafficPolicyVersionsInput"} + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// A complex type that contains the response information for the request. +type ListTrafficPolicyVersionsOutput struct { + _ struct{} `type:"structure"` + + // A flag that indicates whether there are more traffic policies to be listed. + // If the response was truncated, you can get the next group of maxitems traffic + // policies by calling ListTrafficPolicyVersions again and specifying the value + // of the NextMarker element in the marker parameter. + // + // Valid Values: true | false + IsTruncated *bool `type:"boolean" required:"true"` + + // The value that you specified for the maxitems parameter in the call to ListTrafficPolicyVersions + // that produced the current response. + MaxItems *string `type:"string" required:"true"` + + // A list that contains one TrafficPolicy element for each traffic policy version + // that is associated with the specified traffic policy. + TrafficPolicies []*TrafficPolicy `locationNameList:"TrafficPolicy" type:"list" required:"true"` + + // If IsTruncated is true, the value of TrafficPolicyVersionMarker identifies + // the first traffic policy in the next group of MaxItems traffic policies. + // Call ListTrafficPolicyVersions again and specify the value of TrafficPolicyVersionMarker + // in the TrafficPolicyVersionMarker request parameter. + // + // This element is present only if IsTruncated is true. + TrafficPolicyVersionMarker *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ListTrafficPolicyVersionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTrafficPolicyVersionsOutput) GoString() string { + return s.String() +} + +// A complex type that contains the value of the Value element for the current +// resource record set. +type ResourceRecord struct { + _ struct{} `type:"structure"` + + // The current or new DNS record value, not to exceed 4,000 characters. In the + // case of a DELETE action, if the current value does not match the actual value, + // an error is returned. For descriptions about how to format Value for different + // record types, see Supported DNS Resource Record Types (http://docs.aws.amazon.com/Route53/latest/DeveloperGuide/ResourceRecordTypes.html) + // in the Amazon Route 53 Developer Guide. + // + // You can specify more than one value for all record types except CNAME and + // SOA. + Value *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ResourceRecord) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResourceRecord) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ResourceRecord) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ResourceRecord"} + if s.Value == nil { + invalidParams.Add(request.NewErrParamRequired("Value")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// A complex type that contains information about the current resource record +// set. +type ResourceRecordSet struct { + _ struct{} `type:"structure"` + + // Alias resource record sets only: Information about the AWS resource to which + // you are redirecting traffic. + AliasTarget *AliasTarget `type:"structure"` + + // Failover resource record sets only: To configure failover, you add the Failover + // element to two resource record sets. For one resource record set, you specify + // PRIMARY as the value for Failover; for the other resource record set, you + // specify SECONDARY. In addition, you include the HealthCheckId element and + // specify the health check that you want Amazon Route 53 to perform for each + // resource record set. + // + // You can create failover and failover alias resource record sets only in + // public hosted zones. Except where noted, the following failover behaviors + // assume that you have included the HealthCheckId element in both resource + // record sets: + // + // When the primary resource record set is healthy, Amazon Route 53 responds + // to DNS queries with the applicable value from the primary resource record + // set regardless of the health of the secondary resource record set. When the + // primary resource record set is unhealthy and the secondary resource record + // set is healthy, Amazon Route 53 responds to DNS queries with the applicable + // value from the secondary resource record set. When the secondary resource + // record set is unhealthy, Amazon Route 53 responds to DNS queries with the + // applicable value from the primary resource record set regardless of the health + // of the primary resource record set. If you omit the HealthCheckId element + // for the secondary resource record set, and if the primary resource record + // set is unhealthy, Amazon Route 53 always responds to DNS queries with the + // applicable value from the secondary resource record set. This is true regardless + // of the health of the associated endpoint. You cannot create non-failover + // resource record sets that have the same values for the Name and Type elements + // as failover resource record sets. + // + // For failover alias resource record sets, you must also include the EvaluateTargetHealth + // element and set the value to true. + // + // For more information about configuring failover for Amazon Route 53, see + // Amazon Route 53 Health Checks and DNS Failover (http://docs.aws.amazon.com/Route53/latest/DeveloperGuide/dns-failover.html) + // in the Amazon Route 53 Developer Guide. + // + // Valid values: PRIMARY | SECONDARY + Failover *string `type:"string" enum:"ResourceRecordSetFailover"` + + // Geo location resource record sets only: A complex type that lets you control + // how Amazon Route 53 responds to DNS queries based on the geographic origin + // of the query. For example, if you want all queries from Africa to be routed + // to a web server with an IP address of 192.0.2.111, create a resource record + // set with a Type of A and a ContinentCode of AF. + // + // You can create geolocation and geolocation alias resource record sets only + // in public hosted zones. If you create separate resource record sets for overlapping + // geographic regions (for example, one resource record set for a continent + // and one for a country on the same continent), priority goes to the smallest + // geographic region. This allows you to route most queries for a continent + // to one resource and to route queries for a country on that continent to a + // different resource. + // + // You cannot create two geolocation resource record sets that specify the + // same geographic location. + // + // The value * in the CountryCode element matches all geographic locations + // that aren't specified in other geolocation resource record sets that have + // the same values for the Name and Type elements. + // + // Geolocation works by mapping IP addresses to locations. However, some IP + // addresses aren't mapped to geographic locations, so even if you create geolocation + // resource record sets that cover all seven continents, Amazon Route 53 will + // receive some DNS queries from locations that it can't identify. We recommend + // that you create a resource record set for which the value of CountryCode + // is *, which handles both queries that come from locations for which you haven't + // created geolocation resource record sets and queries from IP addresses that + // aren't mapped to a location. If you don't create a * resource record set, + // Amazon Route 53 returns a "no answer" response for queries from those locations. + // You cannot create non-geolocation resource record sets that have the same + // values for the Name and Type elements as geolocation resource record sets. + GeoLocation *GeoLocation `type:"structure"` + + // Health Check resource record sets only, not required for alias resource record + // sets: An identifier that is used to identify health check associated with + // the resource record set. + HealthCheckId *string `type:"string"` + + // The name of the domain you want to perform the action on. + // + // Enter a fully qualified domain name, for example, www.example.com. You can + // optionally include a trailing dot. If you omit the trailing dot, Amazon Route + // 53 still assumes that the domain name that you specify is fully qualified. + // This means that Amazon Route 53 treats www.example.com (without a trailing + // dot) and www.example.com. (with a trailing dot) as identical. + // + // For information about how to specify characters other than a-z, 0-9, and + // - (hyphen) and how to specify internationalized domain names, see DNS Domain + // Name Format (http://docs.aws.amazon.com/Route53/latest/DeveloperGuide/DomainNameFormat.html) + // in the Amazon Route 53 Developer Guide. + // + // You can use an asterisk (*) character in the name. DNS treats the * character + // either as a wildcard or as the * character (ASCII 42), depending on where + // it appears in the name. For more information, see Using an Asterisk (*) in + // the Names of Hosted Zones and Resource Record Sets (http://docs.aws.amazon.com/Route53/latest/DeveloperGuide/DomainNameFormat.html#domain-name-format-asterisk) + // in the Amazon Route 53 Developer Guide + // + // You can't use the * wildcard for resource records sets that have a type + // of NS. + Name *string `type:"string" required:"true"` + + // Latency-based resource record sets only: The Amazon EC2 region where the + // resource that is specified in this resource record set resides. The resource + // typically is an AWS resource, such as an Amazon EC2 instance or an ELB load + // balancer, and is referred to by an IP address or a DNS domain name, depending + // on the record type. + // + // You can create latency and latency alias resource record sets only in public + // hosted zones. When Amazon Route 53 receives a DNS query for a domain name + // and type for which you have created latency resource record sets, Amazon + // Route 53 selects the latency resource record set that has the lowest latency + // between the end user and the associated Amazon EC2 region. Amazon Route 53 + // then returns the value that is associated with the selected resource record + // set. + // + // Note the following: + // + // You can only specify one ResourceRecord per latency resource record set. + // You can only create one latency resource record set for each Amazon EC2 region. + // You are not required to create latency resource record sets for all Amazon + // EC2 regions. Amazon Route 53 will choose the region with the best latency + // from among the regions for which you create latency resource record sets. + // You cannot create non-latency resource record sets that have the same values + // for the Name and Type elements as latency resource record sets. + Region *string `min:"1" type:"string" enum:"ResourceRecordSetRegion"` + + // A complex type that contains the resource records for the current resource + // record set. + ResourceRecords []*ResourceRecord `locationNameList:"ResourceRecord" min:"1" type:"list"` + + // Weighted, Latency, Geo, and Failover resource record sets only: An identifier + // that differentiates among multiple resource record sets that have the same + // combination of DNS name and type. The value of SetIdentifier must be unique + // for each resource record set that has the same combination of DNS name and + // type. + SetIdentifier *string `min:"1" type:"string"` + + // The cache time to live for the current resource record set. Note the following: + // + // If you're creating a non-alias resource record set, TTL is required. If + // you're creating an alias resource record set, omit TTL. Amazon Route 53 uses + // the value of TTL for the alias target. If you're associating this resource + // record set with a health check (if you're adding a HealthCheckId element), + // we recommend that you specify a TTL of 60 seconds or less so clients respond + // quickly to changes in health status. All of the resource record sets in a + // group of weighted, latency, geolocation, or failover resource record sets + // must have the same value for TTL. If a group of weighted resource record + // sets includes one or more weighted alias resource record sets for which the + // alias target is an ELB load balancer, we recommend that you specify a TTL + // of 60 seconds for all of the non-alias weighted resource record sets that + // have the same name and type. Values other than 60 seconds (the TTL for load + // balancers) will change the effect of the values that you specify for Weight. + TTL *int64 `type:"long"` + + TrafficPolicyInstanceId *string `type:"string"` + + // The DNS record type. For information about different record types and how + // data is encoded for them, see Supported DNS Resource Record Types (http://docs.aws.amazon.com/Route53/latest/DeveloperGuide/ResourceRecordTypes.html) + // in the Amazon Route 53 Developer Guide. + // + // Valid values for basic resource record sets: A | AAAA | CNAME | MX | NS + // | PTR | SOA | SPF | SRV | TXT + // + // Values for weighted, latency, geolocation, and failover resource record + // sets: A | AAAA | CNAME | MX | PTR | SPF | SRV | TXT. When creating a group + // of weighted, latency, geolocation, or failover resource record sets, specify + // the same value for all of the resource record sets in the group. + // + // SPF records were formerly used to verify the identity of the sender of email + // messages. However, we no longer recommend that you create resource record + // sets for which the value of Type is SPF. RFC 7208, Sender Policy Framework + // (SPF) for Authorizing Use of Domains in Email, Version 1, has been updated + // to say, "...[I]ts existence and mechanism defined in [RFC4408] have led to + // some interoperability issues. Accordingly, its use is no longer appropriate + // for SPF version 1; implementations are not to use it." In RFC 7208, see section + // 14.1, The SPF DNS Record Type (http://tools.ietf.org/html/rfc7208#section-14.1). + // Values for alias resource record sets: + // + // CloudFront distributions: A ELB load balancers: A | AAAA Amazon S3 buckets: + // A Another resource record set in this hosted zone: Specify the type of the + // resource record set for which you're creating the alias. Specify any value + // except NS or SOA. + Type *string `type:"string" required:"true" enum:"RRType"` + + // Weighted resource record sets only: Among resource record sets that have + // the same combination of DNS name and type, a value that determines the proportion + // of DNS queries that Amazon Route 53 responds to using the current resource + // record set. Amazon Route 53 calculates the sum of the weights for the resource + // record sets that have the same combination of DNS name and type. Amazon Route + // 53 then responds to queries based on the ratio of a resource's weight to + // the total. Note the following: + // + // You must specify a value for the Weight element for every weighted resource + // record set. You can only specify one ResourceRecord per weighted resource + // record set. You cannot create latency, failover, or geolocation resource + // record sets that have the same values for the Name and Type elements as weighted + // resource record sets. You can create a maximum of 100 weighted resource record + // sets that have the same values for the Name and Type elements. For weighted + // (but not weighted alias) resource record sets, if you set Weight to 0 for + // a resource record set, Amazon Route 53 never responds to queries with the + // applicable value for that resource record set. However, if you set Weight + // to 0 for all resource record sets that have the same combination of DNS name + // and type, traffic is routed to all resources with equal probability. + // + // The effect of setting Weight to 0 is different when you associate health + // checks with weighted resource record sets. For more information, see Options + // for Configuring Amazon Route 53 Active-Active and Active-Passive Failover + // (http://docs.aws.amazon.com/Route53/latest/DeveloperGuide/dns-failover-configuring-options.html) + // in the Amazon Route 53 Developer Guide. + Weight *int64 `type:"long"` +} + +// String returns the string representation +func (s ResourceRecordSet) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResourceRecordSet) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ResourceRecordSet) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ResourceRecordSet"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Region != nil && len(*s.Region) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Region", 1)) + } + if s.ResourceRecords != nil && len(s.ResourceRecords) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceRecords", 1)) + } + if s.SetIdentifier != nil && len(*s.SetIdentifier) < 1 { + invalidParams.Add(request.NewErrParamMinLen("SetIdentifier", 1)) + } + if s.Type == nil { + invalidParams.Add(request.NewErrParamRequired("Type")) + } + if s.AliasTarget != nil { + if err := s.AliasTarget.Validate(); err != nil { + invalidParams.AddNested("AliasTarget", err.(request.ErrInvalidParams)) + } + } + if s.GeoLocation != nil { + if err := s.GeoLocation.Validate(); err != nil { + invalidParams.AddNested("GeoLocation", err.(request.ErrInvalidParams)) + } + } + if s.ResourceRecords != nil { + for i, v := range s.ResourceRecords { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "ResourceRecords", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// A complex type containing a resource and its associated tags. +type ResourceTagSet struct { + _ struct{} `type:"structure"` + + // The ID for the specified resource. + ResourceId *string `type:"string"` + + // The type of the resource. + // + // - The resource type for health checks is healthcheck. + // + // - The resource type for hosted zones is hostedzone. + ResourceType *string `type:"string" enum:"TagResourceType"` + + // The tags associated with the specified resource. + Tags []*Tag `locationNameList:"Tag" min:"1" type:"list"` +} + +// String returns the string representation +func (s ResourceTagSet) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResourceTagSet) GoString() string { + return s.String() +} + +// A complex type that contains information about the health check status for +// the current observation. +type StatusReport struct { + _ struct{} `type:"structure"` + + // The date and time the health check status was observed, in the format YYYY-MM-DDThh:mm:ssZ, + // as specified in the ISO 8601 standard (for example, 2009-11-19T19:37:58Z). + // The Z after the time indicates that the time is listed in Coordinated Universal + // Time (UTC). + CheckedTime *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The observed health check status. + Status *string `type:"string"` +} + +// String returns the string representation +func (s StatusReport) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StatusReport) GoString() string { + return s.String() +} + +// A single tag containing a key and value. +type Tag struct { + _ struct{} `type:"structure"` + + // The key for a Tag. + Key *string `type:"string"` + + // The value for a Tag. + Value *string `type:"string"` +} + +// String returns the string representation +func (s Tag) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Tag) GoString() string { + return s.String() +} + +type TrafficPolicy struct { + _ struct{} `type:"structure"` + + Comment *string `type:"string"` + + Document *string `type:"string" required:"true"` + + Id *string `type:"string" required:"true"` + + Name *string `type:"string" required:"true"` + + Type *string `type:"string" required:"true" enum:"RRType"` + + Version *int64 `min:"1" type:"integer" required:"true"` +} + +// String returns the string representation +func (s TrafficPolicy) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TrafficPolicy) GoString() string { + return s.String() +} + +type TrafficPolicyInstance struct { + _ struct{} `type:"structure"` + + HostedZoneId *string `type:"string" required:"true"` + + Id *string `type:"string" required:"true"` + + Message *string `type:"string" required:"true"` + + Name *string `type:"string" required:"true"` + + State *string `type:"string" required:"true"` + + TTL *int64 `type:"long" required:"true"` + + TrafficPolicyId *string `type:"string" required:"true"` + + TrafficPolicyType *string `type:"string" required:"true" enum:"RRType"` + + TrafficPolicyVersion *int64 `min:"1" type:"integer" required:"true"` +} + +// String returns the string representation +func (s TrafficPolicyInstance) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TrafficPolicyInstance) GoString() string { + return s.String() +} + +type TrafficPolicySummary struct { + _ struct{} `type:"structure"` + + Id *string `type:"string" required:"true"` + + LatestVersion *int64 `min:"1" type:"integer" required:"true"` + + Name *string `type:"string" required:"true"` + + TrafficPolicyCount *int64 `min:"1" type:"integer" required:"true"` + + Type *string `type:"string" required:"true" enum:"RRType"` +} + +// String returns the string representation +func (s TrafficPolicySummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TrafficPolicySummary) GoString() string { + return s.String() +} + +// >A complex type that contains information about the request to update a health +// check. +type UpdateHealthCheckInput struct { + _ struct{} `locationName:"UpdateHealthCheckRequest" type:"structure" xmlURI:"https://route53.amazonaws.com/doc/2013-04-01/"` + + // A complex type that contains information to uniquely identify the CloudWatch + // alarm that you're associating with a Route 53 health check. + AlarmIdentifier *AlarmIdentifier `type:"structure"` + + // For a specified parent health check, a list of HealthCheckId values for the + // associated child health checks. + // + // Specify this value only if you want to change it. + ChildHealthChecks []*string `locationNameList:"ChildHealthCheck" type:"list"` + + // Specify whether you want Amazon Route 53 to send the value of FullyQualifiedDomainName + // to the endpoint in the client_hello message during TLS negotiation. If you + // don't specify a value for EnableSNI, Amazon Route 53 defaults to true when + // Type is HTTPS or HTTPS_STR_MATCH and defaults to false when Type is any other + // value. + // + // Specify this value only if you want to change it. + EnableSNI *bool `type:"boolean"` + + // The number of consecutive health checks that an endpoint must pass or fail + // for Amazon Route 53 to change the current status of the endpoint from unhealthy + // to healthy or vice versa. + // + // Valid values are integers between 1 and 10. For more information, see "How + // Amazon Route 53 Determines Whether an Endpoint Is Healthy" in the Amazon + // Route 53 Developer Guide. + // + // Specify this value only if you want to change it. + FailureThreshold *int64 `min:"1" type:"integer"` + + // Fully qualified domain name of the instance to be health checked. + // + // Specify this value only if you want to change it. + FullyQualifiedDomainName *string `type:"string"` + + // The ID of the health check to update. + HealthCheckId *string `location:"uri" locationName:"HealthCheckId" type:"string" required:"true"` + + // Optional. When you specify a health check version, Amazon Route 53 compares + // this value with the current value in the health check, which prevents you + // from updating the health check when the versions don't match. Using HealthCheckVersion + // lets you prevent overwriting another change to the health check. + HealthCheckVersion *int64 `min:"1" type:"long"` + + // The minimum number of child health checks that must be healthy for Amazon + // Route 53 to consider the parent health check to be healthy. Valid values + // are integers between 0 and 256, inclusive. + // + // Specify this value only if you want to change it. + HealthThreshold *int64 `type:"integer"` + + // The IP address of the resource that you want to check. + // + // Specify this value only if you want to change it. + IPAddress *string `type:"string"` + + InsufficientDataHealthStatus *string `type:"string" enum:"InsufficientDataHealthStatus"` + + // A boolean value that indicates whether the status of health check should + // be inverted. For example, if a health check is healthy but Inverted is True, + // then Amazon Route 53 considers the health check to be unhealthy. + // + // Specify this value only if you want to change it. + Inverted *bool `type:"boolean"` + + // The port on which you want Amazon Route 53 to open a connection to perform + // health checks. + // + // Specify this value only if you want to change it. + Port *int64 `min:"1" type:"integer"` + + // A list of HealthCheckRegion values that specify the Amazon EC2 regions that + // you want Amazon Route 53 to use to perform health checks. You must specify + // at least three regions. + // + // When you remove a region from the list, Amazon Route 53 will briefly continue + // to check your endpoint from that region. Specify this value only if you want + // to change it. + Regions []*string `locationNameList:"Region" min:"1" type:"list"` + + // The path that you want Amazon Route 53 to request when performing health + // checks. The path can be any value for which your endpoint will return an + // HTTP status code of 2xx or 3xx when the endpoint is healthy, for example + // the file /docs/route53-health-check.html. + // + // Specify this value only if you want to change it. + ResourcePath *string `type:"string"` + + // If the value of Type is HTTP_STR_MATCH or HTTP_STR_MATCH, the string that + // you want Amazon Route 53 to search for in the response body from the specified + // resource. If the string appears in the response body, Amazon Route 53 considers + // the resource healthy. Amazon Route 53 considers case when searching for SearchString + // in the response body. + // + // Specify this value only if you want to change it. + SearchString *string `type:"string"` +} + +// String returns the string representation +func (s UpdateHealthCheckInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateHealthCheckInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateHealthCheckInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateHealthCheckInput"} + if s.FailureThreshold != nil && *s.FailureThreshold < 1 { + invalidParams.Add(request.NewErrParamMinValue("FailureThreshold", 1)) + } + if s.HealthCheckId == nil { + invalidParams.Add(request.NewErrParamRequired("HealthCheckId")) + } + if s.HealthCheckVersion != nil && *s.HealthCheckVersion < 1 { + invalidParams.Add(request.NewErrParamMinValue("HealthCheckVersion", 1)) + } + if s.Port != nil && *s.Port < 1 { + invalidParams.Add(request.NewErrParamMinValue("Port", 1)) + } + if s.Regions != nil && len(s.Regions) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Regions", 1)) + } + if s.AlarmIdentifier != nil { + if err := s.AlarmIdentifier.Validate(); err != nil { + invalidParams.AddNested("AlarmIdentifier", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type UpdateHealthCheckOutput struct { + _ struct{} `type:"structure"` + + // A complex type that contains identifying information about the health check. + HealthCheck *HealthCheck `type:"structure" required:"true"` +} + +// String returns the string representation +func (s UpdateHealthCheckOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateHealthCheckOutput) GoString() string { + return s.String() +} + +// A complex type that contains information about the request to update a hosted +// zone comment. +type UpdateHostedZoneCommentInput struct { + _ struct{} `locationName:"UpdateHostedZoneCommentRequest" type:"structure" xmlURI:"https://route53.amazonaws.com/doc/2013-04-01/"` + + // A comment about your hosted zone. + Comment *string `type:"string"` + + // The ID of the hosted zone you want to update. + Id *string `location:"uri" locationName:"Id" type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateHostedZoneCommentInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateHostedZoneCommentInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateHostedZoneCommentInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateHostedZoneCommentInput"} + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// A complex type containing information about the specified hosted zone after +// the update. +type UpdateHostedZoneCommentOutput struct { + _ struct{} `type:"structure"` + + // A complex type that contain information about the specified hosted zone. + HostedZone *HostedZone `type:"structure" required:"true"` +} + +// String returns the string representation +func (s UpdateHostedZoneCommentOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateHostedZoneCommentOutput) GoString() string { + return s.String() +} + +// A complex type that contains information about the traffic policy for which +// you want to update the comment. +type UpdateTrafficPolicyCommentInput struct { + _ struct{} `locationName:"UpdateTrafficPolicyCommentRequest" type:"structure" xmlURI:"https://route53.amazonaws.com/doc/2013-04-01/"` + + // The new comment for the specified traffic policy and version. + Comment *string `type:"string" required:"true"` + + // The value of Id for the traffic policy for which you want to update the comment. + Id *string `location:"uri" locationName:"Id" type:"string" required:"true"` + + // The value of Version for the traffic policy for which you want to update + // the comment. + Version *int64 `location:"uri" locationName:"Version" min:"1" type:"integer" required:"true"` +} + +// String returns the string representation +func (s UpdateTrafficPolicyCommentInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateTrafficPolicyCommentInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateTrafficPolicyCommentInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateTrafficPolicyCommentInput"} + if s.Comment == nil { + invalidParams.Add(request.NewErrParamRequired("Comment")) + } + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + if s.Version == nil { + invalidParams.Add(request.NewErrParamRequired("Version")) + } + if s.Version != nil && *s.Version < 1 { + invalidParams.Add(request.NewErrParamMinValue("Version", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// A complex type that contains the response information for the traffic policy. +type UpdateTrafficPolicyCommentOutput struct { + _ struct{} `type:"structure"` + + // A complex type that contains settings for the specified traffic policy. + TrafficPolicy *TrafficPolicy `type:"structure" required:"true"` +} + +// String returns the string representation +func (s UpdateTrafficPolicyCommentOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateTrafficPolicyCommentOutput) GoString() string { + return s.String() +} + +// A complex type that contains information about the resource record sets that +// you want to update based on a specified traffic policy instance. +type UpdateTrafficPolicyInstanceInput struct { + _ struct{} `locationName:"UpdateTrafficPolicyInstanceRequest" type:"structure" xmlURI:"https://route53.amazonaws.com/doc/2013-04-01/"` + + // The ID of the traffic policy instance that you want to update. + Id *string `location:"uri" locationName:"Id" type:"string" required:"true"` + + // The TTL that you want Amazon Route 53 to assign to all of the updated resource + // record sets. + TTL *int64 `type:"long" required:"true"` + + // The ID of the traffic policy that you want Amazon Route 53 to use to update + // resource record sets for the specified traffic policy instance. + TrafficPolicyId *string `type:"string" required:"true"` + + // The version of the traffic policy that you want Amazon Route 53 to use to + // update resource record sets for the specified traffic policy instance. + TrafficPolicyVersion *int64 `min:"1" type:"integer" required:"true"` +} + +// String returns the string representation +func (s UpdateTrafficPolicyInstanceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateTrafficPolicyInstanceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateTrafficPolicyInstanceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateTrafficPolicyInstanceInput"} + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + if s.TTL == nil { + invalidParams.Add(request.NewErrParamRequired("TTL")) + } + if s.TrafficPolicyId == nil { + invalidParams.Add(request.NewErrParamRequired("TrafficPolicyId")) + } + if s.TrafficPolicyVersion == nil { + invalidParams.Add(request.NewErrParamRequired("TrafficPolicyVersion")) + } + if s.TrafficPolicyVersion != nil && *s.TrafficPolicyVersion < 1 { + invalidParams.Add(request.NewErrParamMinValue("TrafficPolicyVersion", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// A complex type that contains information about the resource record sets that +// Amazon Route 53 created based on a specified traffic policy. +type UpdateTrafficPolicyInstanceOutput struct { + _ struct{} `type:"structure"` + + // A complex type that contains settings for the updated traffic policy instance. + TrafficPolicyInstance *TrafficPolicyInstance `type:"structure" required:"true"` +} + +// String returns the string representation +func (s UpdateTrafficPolicyInstanceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateTrafficPolicyInstanceOutput) GoString() string { + return s.String() +} + +type VPC struct { + _ struct{} `type:"structure"` + + // A VPC ID + VPCId *string `type:"string"` + + VPCRegion *string `min:"1" type:"string" enum:"VPCRegion"` +} + +// String returns the string representation +func (s VPC) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VPC) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *VPC) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "VPC"} + if s.VPCRegion != nil && len(*s.VPCRegion) < 1 { + invalidParams.Add(request.NewErrParamMinLen("VPCRegion", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +const ( + // @enum ChangeAction + ChangeActionCreate = "CREATE" + // @enum ChangeAction + ChangeActionDelete = "DELETE" + // @enum ChangeAction + ChangeActionUpsert = "UPSERT" +) + +const ( + // @enum ChangeStatus + ChangeStatusPending = "PENDING" + // @enum ChangeStatus + ChangeStatusInsync = "INSYNC" +) + +const ( + // @enum CloudWatchRegion + CloudWatchRegionUsEast1 = "us-east-1" + // @enum CloudWatchRegion + CloudWatchRegionUsWest1 = "us-west-1" + // @enum CloudWatchRegion + CloudWatchRegionUsWest2 = "us-west-2" + // @enum CloudWatchRegion + CloudWatchRegionEuCentral1 = "eu-central-1" + // @enum CloudWatchRegion + CloudWatchRegionEuWest1 = "eu-west-1" + // @enum CloudWatchRegion + CloudWatchRegionApSoutheast1 = "ap-southeast-1" + // @enum CloudWatchRegion + CloudWatchRegionApSoutheast2 = "ap-southeast-2" + // @enum CloudWatchRegion + CloudWatchRegionApNortheast1 = "ap-northeast-1" + // @enum CloudWatchRegion + CloudWatchRegionApNortheast2 = "ap-northeast-2" + // @enum CloudWatchRegion + CloudWatchRegionSaEast1 = "sa-east-1" +) + +const ( + // @enum ComparisonOperator + ComparisonOperatorGreaterThanOrEqualToThreshold = "GreaterThanOrEqualToThreshold" + // @enum ComparisonOperator + ComparisonOperatorGreaterThanThreshold = "GreaterThanThreshold" + // @enum ComparisonOperator + ComparisonOperatorLessThanThreshold = "LessThanThreshold" + // @enum ComparisonOperator + ComparisonOperatorLessThanOrEqualToThreshold = "LessThanOrEqualToThreshold" +) + +// An Amazon EC2 region that you want Amazon Route 53 to use to perform health +// checks. +const ( + // @enum HealthCheckRegion + HealthCheckRegionUsEast1 = "us-east-1" + // @enum HealthCheckRegion + HealthCheckRegionUsWest1 = "us-west-1" + // @enum HealthCheckRegion + HealthCheckRegionUsWest2 = "us-west-2" + // @enum HealthCheckRegion + HealthCheckRegionEuWest1 = "eu-west-1" + // @enum HealthCheckRegion + HealthCheckRegionApSoutheast1 = "ap-southeast-1" + // @enum HealthCheckRegion + HealthCheckRegionApSoutheast2 = "ap-southeast-2" + // @enum HealthCheckRegion + HealthCheckRegionApNortheast1 = "ap-northeast-1" + // @enum HealthCheckRegion + HealthCheckRegionSaEast1 = "sa-east-1" +) + +const ( + // @enum HealthCheckType + HealthCheckTypeHttp = "HTTP" + // @enum HealthCheckType + HealthCheckTypeHttps = "HTTPS" + // @enum HealthCheckType + HealthCheckTypeHttpStrMatch = "HTTP_STR_MATCH" + // @enum HealthCheckType + HealthCheckTypeHttpsStrMatch = "HTTPS_STR_MATCH" + // @enum HealthCheckType + HealthCheckTypeTcp = "TCP" + // @enum HealthCheckType + HealthCheckTypeCalculated = "CALCULATED" + // @enum HealthCheckType + HealthCheckTypeCloudwatchMetric = "CLOUDWATCH_METRIC" +) + +const ( + // @enum InsufficientDataHealthStatus + InsufficientDataHealthStatusHealthy = "Healthy" + // @enum InsufficientDataHealthStatus + InsufficientDataHealthStatusUnhealthy = "Unhealthy" + // @enum InsufficientDataHealthStatus + InsufficientDataHealthStatusLastKnownStatus = "LastKnownStatus" +) + +const ( + // @enum RRType + RRTypeSoa = "SOA" + // @enum RRType + RRTypeA = "A" + // @enum RRType + RRTypeTxt = "TXT" + // @enum RRType + RRTypeNs = "NS" + // @enum RRType + RRTypeCname = "CNAME" + // @enum RRType + RRTypeMx = "MX" + // @enum RRType + RRTypePtr = "PTR" + // @enum RRType + RRTypeSrv = "SRV" + // @enum RRType + RRTypeSpf = "SPF" + // @enum RRType + RRTypeAaaa = "AAAA" +) + +const ( + // @enum ResourceRecordSetFailover + ResourceRecordSetFailoverPrimary = "PRIMARY" + // @enum ResourceRecordSetFailover + ResourceRecordSetFailoverSecondary = "SECONDARY" +) + +const ( + // @enum ResourceRecordSetRegion + ResourceRecordSetRegionUsEast1 = "us-east-1" + // @enum ResourceRecordSetRegion + ResourceRecordSetRegionUsWest1 = "us-west-1" + // @enum ResourceRecordSetRegion + ResourceRecordSetRegionUsWest2 = "us-west-2" + // @enum ResourceRecordSetRegion + ResourceRecordSetRegionEuWest1 = "eu-west-1" + // @enum ResourceRecordSetRegion + ResourceRecordSetRegionEuCentral1 = "eu-central-1" + // @enum ResourceRecordSetRegion + ResourceRecordSetRegionApSoutheast1 = "ap-southeast-1" + // @enum ResourceRecordSetRegion + ResourceRecordSetRegionApSoutheast2 = "ap-southeast-2" + // @enum ResourceRecordSetRegion + ResourceRecordSetRegionApNortheast1 = "ap-northeast-1" + // @enum ResourceRecordSetRegion + ResourceRecordSetRegionApNortheast2 = "ap-northeast-2" + // @enum ResourceRecordSetRegion + ResourceRecordSetRegionSaEast1 = "sa-east-1" + // @enum ResourceRecordSetRegion + ResourceRecordSetRegionCnNorth1 = "cn-north-1" + // @enum ResourceRecordSetRegion + ResourceRecordSetRegionApSouth1 = "ap-south-1" +) + +const ( + // @enum Statistic + StatisticAverage = "Average" + // @enum Statistic + StatisticSum = "Sum" + // @enum Statistic + StatisticSampleCount = "SampleCount" + // @enum Statistic + StatisticMaximum = "Maximum" + // @enum Statistic + StatisticMinimum = "Minimum" +) + +const ( + // @enum TagResourceType + TagResourceTypeHealthcheck = "healthcheck" + // @enum TagResourceType + TagResourceTypeHostedzone = "hostedzone" +) + +const ( + // @enum VPCRegion + VPCRegionUsEast1 = "us-east-1" + // @enum VPCRegion + VPCRegionUsWest1 = "us-west-1" + // @enum VPCRegion + VPCRegionUsWest2 = "us-west-2" + // @enum VPCRegion + VPCRegionEuWest1 = "eu-west-1" + // @enum VPCRegion + VPCRegionEuCentral1 = "eu-central-1" + // @enum VPCRegion + VPCRegionApSoutheast1 = "ap-southeast-1" + // @enum VPCRegion + VPCRegionApSoutheast2 = "ap-southeast-2" + // @enum VPCRegion + VPCRegionApSouth1 = "ap-south-1" + // @enum VPCRegion + VPCRegionApNortheast1 = "ap-northeast-1" + // @enum VPCRegion + VPCRegionApNortheast2 = "ap-northeast-2" + // @enum VPCRegion + VPCRegionSaEast1 = "sa-east-1" + // @enum VPCRegion + VPCRegionCnNorth1 = "cn-north-1" +) diff --git a/vendor/github.com/aws/aws-sdk-go/service/route53/customizations.go b/vendor/github.com/aws/aws-sdk-go/service/route53/customizations.go new file mode 100644 index 000000000..91af196e2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/route53/customizations.go @@ -0,0 +1,30 @@ +package route53 + +import ( + "regexp" + + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/restxml" +) + +func init() { + initClient = func(c *client.Client) { + c.Handlers.Build.PushBack(sanitizeURL) + } + + initRequest = func(r *request.Request) { + switch r.Operation.Name { + case opChangeResourceRecordSets: + r.Handlers.UnmarshalError.Remove(restxml.UnmarshalErrorHandler) + r.Handlers.UnmarshalError.PushBack(unmarshalChangeResourceRecordSetsError) + } + } +} + +var reSanitizeURL = regexp.MustCompile(`\/%2F\w+%2F`) + +func sanitizeURL(r *request.Request) { + r.HTTPRequest.URL.Opaque = + reSanitizeURL.ReplaceAllString(r.HTTPRequest.URL.Opaque, "/") +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/route53/customizations_test.go b/vendor/github.com/aws/aws-sdk-go/service/route53/customizations_test.go new file mode 100644 index 000000000..518790a24 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/route53/customizations_test.go @@ -0,0 +1,22 @@ +package route53_test + +import ( + "testing" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/awstesting" + "github.com/aws/aws-sdk-go/awstesting/unit" + "github.com/aws/aws-sdk-go/service/route53" +) + +func TestBuildCorrectURI(t *testing.T) { + svc := route53.New(unit.Session) + svc.Handlers.Validate.Clear() + req, _ := svc.GetHostedZoneRequest(&route53.GetHostedZoneInput{ + Id: aws.String("/hostedzone/ABCDEFG"), + }) + + req.Build() + + awstesting.Match(t, `\/hostedzone\/ABCDEFG$`, req.HTTPRequest.URL.String()) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/route53/examples_test.go b/vendor/github.com/aws/aws-sdk-go/service/route53/examples_test.go new file mode 100644 index 000000000..cdbfea1fb --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/route53/examples_test.go @@ -0,0 +1,1100 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package route53_test + +import ( + "bytes" + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/route53" +) + +var _ time.Duration +var _ bytes.Buffer + +func ExampleRoute53_AssociateVPCWithHostedZone() { + svc := route53.New(session.New()) + + params := &route53.AssociateVPCWithHostedZoneInput{ + HostedZoneId: aws.String("ResourceId"), // Required + VPC: &route53.VPC{ // Required + VPCId: aws.String("VPCId"), + VPCRegion: aws.String("VPCRegion"), + }, + Comment: aws.String("AssociateVPCComment"), + } + resp, err := svc.AssociateVPCWithHostedZone(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53_ChangeResourceRecordSets() { + svc := route53.New(session.New()) + + params := &route53.ChangeResourceRecordSetsInput{ + ChangeBatch: &route53.ChangeBatch{ // Required + Changes: []*route53.Change{ // Required + { // Required + Action: aws.String("ChangeAction"), // Required + ResourceRecordSet: &route53.ResourceRecordSet{ // Required + Name: aws.String("DNSName"), // Required + Type: aws.String("RRType"), // Required + AliasTarget: &route53.AliasTarget{ + DNSName: aws.String("DNSName"), // Required + EvaluateTargetHealth: aws.Bool(true), // Required + HostedZoneId: aws.String("ResourceId"), // Required + }, + Failover: aws.String("ResourceRecordSetFailover"), + GeoLocation: &route53.GeoLocation{ + ContinentCode: aws.String("GeoLocationContinentCode"), + CountryCode: aws.String("GeoLocationCountryCode"), + SubdivisionCode: aws.String("GeoLocationSubdivisionCode"), + }, + HealthCheckId: aws.String("HealthCheckId"), + Region: aws.String("ResourceRecordSetRegion"), + ResourceRecords: []*route53.ResourceRecord{ + { // Required + Value: aws.String("RData"), // Required + }, + // More values... + }, + SetIdentifier: aws.String("ResourceRecordSetIdentifier"), + TTL: aws.Int64(1), + TrafficPolicyInstanceId: aws.String("TrafficPolicyInstanceId"), + Weight: aws.Int64(1), + }, + }, + // More values... + }, + Comment: aws.String("ResourceDescription"), + }, + HostedZoneId: aws.String("ResourceId"), // Required + } + resp, err := svc.ChangeResourceRecordSets(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53_ChangeTagsForResource() { + svc := route53.New(session.New()) + + params := &route53.ChangeTagsForResourceInput{ + ResourceId: aws.String("TagResourceId"), // Required + ResourceType: aws.String("TagResourceType"), // Required + AddTags: []*route53.Tag{ + { // Required + Key: aws.String("TagKey"), + Value: aws.String("TagValue"), + }, + // More values... + }, + RemoveTagKeys: []*string{ + aws.String("TagKey"), // Required + // More values... + }, + } + resp, err := svc.ChangeTagsForResource(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53_CreateHealthCheck() { + svc := route53.New(session.New()) + + params := &route53.CreateHealthCheckInput{ + CallerReference: aws.String("HealthCheckNonce"), // Required + HealthCheckConfig: &route53.HealthCheckConfig{ // Required + Type: aws.String("HealthCheckType"), // Required + AlarmIdentifier: &route53.AlarmIdentifier{ + Name: aws.String("AlarmName"), // Required + Region: aws.String("CloudWatchRegion"), // Required + }, + ChildHealthChecks: []*string{ + aws.String("HealthCheckId"), // Required + // More values... + }, + EnableSNI: aws.Bool(true), + FailureThreshold: aws.Int64(1), + FullyQualifiedDomainName: aws.String("FullyQualifiedDomainName"), + HealthThreshold: aws.Int64(1), + IPAddress: aws.String("IPAddress"), + InsufficientDataHealthStatus: aws.String("InsufficientDataHealthStatus"), + Inverted: aws.Bool(true), + MeasureLatency: aws.Bool(true), + Port: aws.Int64(1), + Regions: []*string{ + aws.String("HealthCheckRegion"), // Required + // More values... + }, + RequestInterval: aws.Int64(1), + ResourcePath: aws.String("ResourcePath"), + SearchString: aws.String("SearchString"), + }, + } + resp, err := svc.CreateHealthCheck(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53_CreateHostedZone() { + svc := route53.New(session.New()) + + params := &route53.CreateHostedZoneInput{ + CallerReference: aws.String("Nonce"), // Required + Name: aws.String("DNSName"), // Required + DelegationSetId: aws.String("ResourceId"), + HostedZoneConfig: &route53.HostedZoneConfig{ + Comment: aws.String("ResourceDescription"), + PrivateZone: aws.Bool(true), + }, + VPC: &route53.VPC{ + VPCId: aws.String("VPCId"), + VPCRegion: aws.String("VPCRegion"), + }, + } + resp, err := svc.CreateHostedZone(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53_CreateReusableDelegationSet() { + svc := route53.New(session.New()) + + params := &route53.CreateReusableDelegationSetInput{ + CallerReference: aws.String("Nonce"), // Required + HostedZoneId: aws.String("ResourceId"), + } + resp, err := svc.CreateReusableDelegationSet(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53_CreateTrafficPolicy() { + svc := route53.New(session.New()) + + params := &route53.CreateTrafficPolicyInput{ + Document: aws.String("TrafficPolicyDocument"), // Required + Name: aws.String("TrafficPolicyName"), // Required + Comment: aws.String("TrafficPolicyComment"), + } + resp, err := svc.CreateTrafficPolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53_CreateTrafficPolicyInstance() { + svc := route53.New(session.New()) + + params := &route53.CreateTrafficPolicyInstanceInput{ + HostedZoneId: aws.String("ResourceId"), // Required + Name: aws.String("DNSName"), // Required + TTL: aws.Int64(1), // Required + TrafficPolicyId: aws.String("TrafficPolicyId"), // Required + TrafficPolicyVersion: aws.Int64(1), // Required + } + resp, err := svc.CreateTrafficPolicyInstance(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53_CreateTrafficPolicyVersion() { + svc := route53.New(session.New()) + + params := &route53.CreateTrafficPolicyVersionInput{ + Document: aws.String("TrafficPolicyDocument"), // Required + Id: aws.String("TrafficPolicyId"), // Required + Comment: aws.String("TrafficPolicyComment"), + } + resp, err := svc.CreateTrafficPolicyVersion(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53_DeleteHealthCheck() { + svc := route53.New(session.New()) + + params := &route53.DeleteHealthCheckInput{ + HealthCheckId: aws.String("HealthCheckId"), // Required + } + resp, err := svc.DeleteHealthCheck(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53_DeleteHostedZone() { + svc := route53.New(session.New()) + + params := &route53.DeleteHostedZoneInput{ + Id: aws.String("ResourceId"), // Required + } + resp, err := svc.DeleteHostedZone(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53_DeleteReusableDelegationSet() { + svc := route53.New(session.New()) + + params := &route53.DeleteReusableDelegationSetInput{ + Id: aws.String("ResourceId"), // Required + } + resp, err := svc.DeleteReusableDelegationSet(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53_DeleteTrafficPolicy() { + svc := route53.New(session.New()) + + params := &route53.DeleteTrafficPolicyInput{ + Id: aws.String("TrafficPolicyId"), // Required + Version: aws.Int64(1), // Required + } + resp, err := svc.DeleteTrafficPolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53_DeleteTrafficPolicyInstance() { + svc := route53.New(session.New()) + + params := &route53.DeleteTrafficPolicyInstanceInput{ + Id: aws.String("TrafficPolicyInstanceId"), // Required + } + resp, err := svc.DeleteTrafficPolicyInstance(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53_DisassociateVPCFromHostedZone() { + svc := route53.New(session.New()) + + params := &route53.DisassociateVPCFromHostedZoneInput{ + HostedZoneId: aws.String("ResourceId"), // Required + VPC: &route53.VPC{ // Required + VPCId: aws.String("VPCId"), + VPCRegion: aws.String("VPCRegion"), + }, + Comment: aws.String("DisassociateVPCComment"), + } + resp, err := svc.DisassociateVPCFromHostedZone(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53_GetChange() { + svc := route53.New(session.New()) + + params := &route53.GetChangeInput{ + Id: aws.String("ResourceId"), // Required + } + resp, err := svc.GetChange(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53_GetChangeDetails() { + svc := route53.New(session.New()) + + params := &route53.GetChangeDetailsInput{ + Id: aws.String("ResourceId"), // Required + } + resp, err := svc.GetChangeDetails(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53_GetCheckerIpRanges() { + svc := route53.New(session.New()) + + var params *route53.GetCheckerIpRangesInput + resp, err := svc.GetCheckerIpRanges(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53_GetGeoLocation() { + svc := route53.New(session.New()) + + params := &route53.GetGeoLocationInput{ + ContinentCode: aws.String("GeoLocationContinentCode"), + CountryCode: aws.String("GeoLocationCountryCode"), + SubdivisionCode: aws.String("GeoLocationSubdivisionCode"), + } + resp, err := svc.GetGeoLocation(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53_GetHealthCheck() { + svc := route53.New(session.New()) + + params := &route53.GetHealthCheckInput{ + HealthCheckId: aws.String("HealthCheckId"), // Required + } + resp, err := svc.GetHealthCheck(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53_GetHealthCheckCount() { + svc := route53.New(session.New()) + + var params *route53.GetHealthCheckCountInput + resp, err := svc.GetHealthCheckCount(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53_GetHealthCheckLastFailureReason() { + svc := route53.New(session.New()) + + params := &route53.GetHealthCheckLastFailureReasonInput{ + HealthCheckId: aws.String("HealthCheckId"), // Required + } + resp, err := svc.GetHealthCheckLastFailureReason(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53_GetHealthCheckStatus() { + svc := route53.New(session.New()) + + params := &route53.GetHealthCheckStatusInput{ + HealthCheckId: aws.String("HealthCheckId"), // Required + } + resp, err := svc.GetHealthCheckStatus(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53_GetHostedZone() { + svc := route53.New(session.New()) + + params := &route53.GetHostedZoneInput{ + Id: aws.String("ResourceId"), // Required + } + resp, err := svc.GetHostedZone(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53_GetHostedZoneCount() { + svc := route53.New(session.New()) + + var params *route53.GetHostedZoneCountInput + resp, err := svc.GetHostedZoneCount(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53_GetReusableDelegationSet() { + svc := route53.New(session.New()) + + params := &route53.GetReusableDelegationSetInput{ + Id: aws.String("ResourceId"), // Required + } + resp, err := svc.GetReusableDelegationSet(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53_GetTrafficPolicy() { + svc := route53.New(session.New()) + + params := &route53.GetTrafficPolicyInput{ + Id: aws.String("TrafficPolicyId"), // Required + Version: aws.Int64(1), // Required + } + resp, err := svc.GetTrafficPolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53_GetTrafficPolicyInstance() { + svc := route53.New(session.New()) + + params := &route53.GetTrafficPolicyInstanceInput{ + Id: aws.String("TrafficPolicyInstanceId"), // Required + } + resp, err := svc.GetTrafficPolicyInstance(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53_GetTrafficPolicyInstanceCount() { + svc := route53.New(session.New()) + + var params *route53.GetTrafficPolicyInstanceCountInput + resp, err := svc.GetTrafficPolicyInstanceCount(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53_ListChangeBatchesByHostedZone() { + svc := route53.New(session.New()) + + params := &route53.ListChangeBatchesByHostedZoneInput{ + EndDate: aws.String("Date"), // Required + HostedZoneId: aws.String("ResourceId"), // Required + StartDate: aws.String("Date"), // Required + Marker: aws.String("PageMarker"), + MaxItems: aws.String("PageMaxItems"), + } + resp, err := svc.ListChangeBatchesByHostedZone(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53_ListChangeBatchesByRRSet() { + svc := route53.New(session.New()) + + params := &route53.ListChangeBatchesByRRSetInput{ + EndDate: aws.String("Date"), // Required + HostedZoneId: aws.String("ResourceId"), // Required + Name: aws.String("DNSName"), // Required + StartDate: aws.String("Date"), // Required + Type: aws.String("RRType"), // Required + Marker: aws.String("PageMarker"), + MaxItems: aws.String("PageMaxItems"), + SetIdentifier: aws.String("ResourceRecordSetIdentifier"), + } + resp, err := svc.ListChangeBatchesByRRSet(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53_ListGeoLocations() { + svc := route53.New(session.New()) + + params := &route53.ListGeoLocationsInput{ + MaxItems: aws.String("PageMaxItems"), + StartContinentCode: aws.String("GeoLocationContinentCode"), + StartCountryCode: aws.String("GeoLocationCountryCode"), + StartSubdivisionCode: aws.String("GeoLocationSubdivisionCode"), + } + resp, err := svc.ListGeoLocations(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53_ListHealthChecks() { + svc := route53.New(session.New()) + + params := &route53.ListHealthChecksInput{ + Marker: aws.String("PageMarker"), + MaxItems: aws.String("PageMaxItems"), + } + resp, err := svc.ListHealthChecks(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53_ListHostedZones() { + svc := route53.New(session.New()) + + params := &route53.ListHostedZonesInput{ + DelegationSetId: aws.String("ResourceId"), + Marker: aws.String("PageMarker"), + MaxItems: aws.String("PageMaxItems"), + } + resp, err := svc.ListHostedZones(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53_ListHostedZonesByName() { + svc := route53.New(session.New()) + + params := &route53.ListHostedZonesByNameInput{ + DNSName: aws.String("DNSName"), + HostedZoneId: aws.String("ResourceId"), + MaxItems: aws.String("PageMaxItems"), + } + resp, err := svc.ListHostedZonesByName(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53_ListResourceRecordSets() { + svc := route53.New(session.New()) + + params := &route53.ListResourceRecordSetsInput{ + HostedZoneId: aws.String("ResourceId"), // Required + MaxItems: aws.String("PageMaxItems"), + StartRecordIdentifier: aws.String("ResourceRecordSetIdentifier"), + StartRecordName: aws.String("DNSName"), + StartRecordType: aws.String("RRType"), + } + resp, err := svc.ListResourceRecordSets(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53_ListReusableDelegationSets() { + svc := route53.New(session.New()) + + params := &route53.ListReusableDelegationSetsInput{ + Marker: aws.String("PageMarker"), + MaxItems: aws.String("PageMaxItems"), + } + resp, err := svc.ListReusableDelegationSets(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53_ListTagsForResource() { + svc := route53.New(session.New()) + + params := &route53.ListTagsForResourceInput{ + ResourceId: aws.String("TagResourceId"), // Required + ResourceType: aws.String("TagResourceType"), // Required + } + resp, err := svc.ListTagsForResource(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53_ListTagsForResources() { + svc := route53.New(session.New()) + + params := &route53.ListTagsForResourcesInput{ + ResourceIds: []*string{ // Required + aws.String("TagResourceId"), // Required + // More values... + }, + ResourceType: aws.String("TagResourceType"), // Required + } + resp, err := svc.ListTagsForResources(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53_ListTrafficPolicies() { + svc := route53.New(session.New()) + + params := &route53.ListTrafficPoliciesInput{ + MaxItems: aws.String("PageMaxItems"), + TrafficPolicyIdMarker: aws.String("TrafficPolicyId"), + } + resp, err := svc.ListTrafficPolicies(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53_ListTrafficPolicyInstances() { + svc := route53.New(session.New()) + + params := &route53.ListTrafficPolicyInstancesInput{ + HostedZoneIdMarker: aws.String("ResourceId"), + MaxItems: aws.String("PageMaxItems"), + TrafficPolicyInstanceNameMarker: aws.String("DNSName"), + TrafficPolicyInstanceTypeMarker: aws.String("RRType"), + } + resp, err := svc.ListTrafficPolicyInstances(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53_ListTrafficPolicyInstancesByHostedZone() { + svc := route53.New(session.New()) + + params := &route53.ListTrafficPolicyInstancesByHostedZoneInput{ + HostedZoneId: aws.String("ResourceId"), // Required + MaxItems: aws.String("PageMaxItems"), + TrafficPolicyInstanceNameMarker: aws.String("DNSName"), + TrafficPolicyInstanceTypeMarker: aws.String("RRType"), + } + resp, err := svc.ListTrafficPolicyInstancesByHostedZone(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53_ListTrafficPolicyInstancesByPolicy() { + svc := route53.New(session.New()) + + params := &route53.ListTrafficPolicyInstancesByPolicyInput{ + TrafficPolicyId: aws.String("TrafficPolicyId"), // Required + TrafficPolicyVersion: aws.Int64(1), // Required + HostedZoneIdMarker: aws.String("ResourceId"), + MaxItems: aws.String("PageMaxItems"), + TrafficPolicyInstanceNameMarker: aws.String("DNSName"), + TrafficPolicyInstanceTypeMarker: aws.String("RRType"), + } + resp, err := svc.ListTrafficPolicyInstancesByPolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53_ListTrafficPolicyVersions() { + svc := route53.New(session.New()) + + params := &route53.ListTrafficPolicyVersionsInput{ + Id: aws.String("TrafficPolicyId"), // Required + MaxItems: aws.String("PageMaxItems"), + TrafficPolicyVersionMarker: aws.String("TrafficPolicyVersionMarker"), + } + resp, err := svc.ListTrafficPolicyVersions(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53_UpdateHealthCheck() { + svc := route53.New(session.New()) + + params := &route53.UpdateHealthCheckInput{ + HealthCheckId: aws.String("HealthCheckId"), // Required + AlarmIdentifier: &route53.AlarmIdentifier{ + Name: aws.String("AlarmName"), // Required + Region: aws.String("CloudWatchRegion"), // Required + }, + ChildHealthChecks: []*string{ + aws.String("HealthCheckId"), // Required + // More values... + }, + EnableSNI: aws.Bool(true), + FailureThreshold: aws.Int64(1), + FullyQualifiedDomainName: aws.String("FullyQualifiedDomainName"), + HealthCheckVersion: aws.Int64(1), + HealthThreshold: aws.Int64(1), + IPAddress: aws.String("IPAddress"), + InsufficientDataHealthStatus: aws.String("InsufficientDataHealthStatus"), + Inverted: aws.Bool(true), + Port: aws.Int64(1), + Regions: []*string{ + aws.String("HealthCheckRegion"), // Required + // More values... + }, + ResourcePath: aws.String("ResourcePath"), + SearchString: aws.String("SearchString"), + } + resp, err := svc.UpdateHealthCheck(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53_UpdateHostedZoneComment() { + svc := route53.New(session.New()) + + params := &route53.UpdateHostedZoneCommentInput{ + Id: aws.String("ResourceId"), // Required + Comment: aws.String("ResourceDescription"), + } + resp, err := svc.UpdateHostedZoneComment(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53_UpdateTrafficPolicyComment() { + svc := route53.New(session.New()) + + params := &route53.UpdateTrafficPolicyCommentInput{ + Comment: aws.String("TrafficPolicyComment"), // Required + Id: aws.String("TrafficPolicyId"), // Required + Version: aws.Int64(1), // Required + } + resp, err := svc.UpdateTrafficPolicyComment(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53_UpdateTrafficPolicyInstance() { + svc := route53.New(session.New()) + + params := &route53.UpdateTrafficPolicyInstanceInput{ + Id: aws.String("TrafficPolicyInstanceId"), // Required + TTL: aws.Int64(1), // Required + TrafficPolicyId: aws.String("TrafficPolicyId"), // Required + TrafficPolicyVersion: aws.Int64(1), // Required + } + resp, err := svc.UpdateTrafficPolicyInstance(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/route53/route53iface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/route53/route53iface/interface.go new file mode 100644 index 000000000..a2baa130e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/route53/route53iface/interface.go @@ -0,0 +1,212 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package route53iface provides an interface for the Amazon Route 53. +package route53iface + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/route53" +) + +// Route53API is the interface type for route53.Route53. +type Route53API interface { + AssociateVPCWithHostedZoneRequest(*route53.AssociateVPCWithHostedZoneInput) (*request.Request, *route53.AssociateVPCWithHostedZoneOutput) + + AssociateVPCWithHostedZone(*route53.AssociateVPCWithHostedZoneInput) (*route53.AssociateVPCWithHostedZoneOutput, error) + + ChangeResourceRecordSetsRequest(*route53.ChangeResourceRecordSetsInput) (*request.Request, *route53.ChangeResourceRecordSetsOutput) + + ChangeResourceRecordSets(*route53.ChangeResourceRecordSetsInput) (*route53.ChangeResourceRecordSetsOutput, error) + + ChangeTagsForResourceRequest(*route53.ChangeTagsForResourceInput) (*request.Request, *route53.ChangeTagsForResourceOutput) + + ChangeTagsForResource(*route53.ChangeTagsForResourceInput) (*route53.ChangeTagsForResourceOutput, error) + + CreateHealthCheckRequest(*route53.CreateHealthCheckInput) (*request.Request, *route53.CreateHealthCheckOutput) + + CreateHealthCheck(*route53.CreateHealthCheckInput) (*route53.CreateHealthCheckOutput, error) + + CreateHostedZoneRequest(*route53.CreateHostedZoneInput) (*request.Request, *route53.CreateHostedZoneOutput) + + CreateHostedZone(*route53.CreateHostedZoneInput) (*route53.CreateHostedZoneOutput, error) + + CreateReusableDelegationSetRequest(*route53.CreateReusableDelegationSetInput) (*request.Request, *route53.CreateReusableDelegationSetOutput) + + CreateReusableDelegationSet(*route53.CreateReusableDelegationSetInput) (*route53.CreateReusableDelegationSetOutput, error) + + CreateTrafficPolicyRequest(*route53.CreateTrafficPolicyInput) (*request.Request, *route53.CreateTrafficPolicyOutput) + + CreateTrafficPolicy(*route53.CreateTrafficPolicyInput) (*route53.CreateTrafficPolicyOutput, error) + + CreateTrafficPolicyInstanceRequest(*route53.CreateTrafficPolicyInstanceInput) (*request.Request, *route53.CreateTrafficPolicyInstanceOutput) + + CreateTrafficPolicyInstance(*route53.CreateTrafficPolicyInstanceInput) (*route53.CreateTrafficPolicyInstanceOutput, error) + + CreateTrafficPolicyVersionRequest(*route53.CreateTrafficPolicyVersionInput) (*request.Request, *route53.CreateTrafficPolicyVersionOutput) + + CreateTrafficPolicyVersion(*route53.CreateTrafficPolicyVersionInput) (*route53.CreateTrafficPolicyVersionOutput, error) + + DeleteHealthCheckRequest(*route53.DeleteHealthCheckInput) (*request.Request, *route53.DeleteHealthCheckOutput) + + DeleteHealthCheck(*route53.DeleteHealthCheckInput) (*route53.DeleteHealthCheckOutput, error) + + DeleteHostedZoneRequest(*route53.DeleteHostedZoneInput) (*request.Request, *route53.DeleteHostedZoneOutput) + + DeleteHostedZone(*route53.DeleteHostedZoneInput) (*route53.DeleteHostedZoneOutput, error) + + DeleteReusableDelegationSetRequest(*route53.DeleteReusableDelegationSetInput) (*request.Request, *route53.DeleteReusableDelegationSetOutput) + + DeleteReusableDelegationSet(*route53.DeleteReusableDelegationSetInput) (*route53.DeleteReusableDelegationSetOutput, error) + + DeleteTrafficPolicyRequest(*route53.DeleteTrafficPolicyInput) (*request.Request, *route53.DeleteTrafficPolicyOutput) + + DeleteTrafficPolicy(*route53.DeleteTrafficPolicyInput) (*route53.DeleteTrafficPolicyOutput, error) + + DeleteTrafficPolicyInstanceRequest(*route53.DeleteTrafficPolicyInstanceInput) (*request.Request, *route53.DeleteTrafficPolicyInstanceOutput) + + DeleteTrafficPolicyInstance(*route53.DeleteTrafficPolicyInstanceInput) (*route53.DeleteTrafficPolicyInstanceOutput, error) + + DisassociateVPCFromHostedZoneRequest(*route53.DisassociateVPCFromHostedZoneInput) (*request.Request, *route53.DisassociateVPCFromHostedZoneOutput) + + DisassociateVPCFromHostedZone(*route53.DisassociateVPCFromHostedZoneInput) (*route53.DisassociateVPCFromHostedZoneOutput, error) + + GetChangeRequest(*route53.GetChangeInput) (*request.Request, *route53.GetChangeOutput) + + GetChange(*route53.GetChangeInput) (*route53.GetChangeOutput, error) + + GetChangeDetailsRequest(*route53.GetChangeDetailsInput) (*request.Request, *route53.GetChangeDetailsOutput) + + GetChangeDetails(*route53.GetChangeDetailsInput) (*route53.GetChangeDetailsOutput, error) + + GetCheckerIpRangesRequest(*route53.GetCheckerIpRangesInput) (*request.Request, *route53.GetCheckerIpRangesOutput) + + GetCheckerIpRanges(*route53.GetCheckerIpRangesInput) (*route53.GetCheckerIpRangesOutput, error) + + GetGeoLocationRequest(*route53.GetGeoLocationInput) (*request.Request, *route53.GetGeoLocationOutput) + + GetGeoLocation(*route53.GetGeoLocationInput) (*route53.GetGeoLocationOutput, error) + + GetHealthCheckRequest(*route53.GetHealthCheckInput) (*request.Request, *route53.GetHealthCheckOutput) + + GetHealthCheck(*route53.GetHealthCheckInput) (*route53.GetHealthCheckOutput, error) + + GetHealthCheckCountRequest(*route53.GetHealthCheckCountInput) (*request.Request, *route53.GetHealthCheckCountOutput) + + GetHealthCheckCount(*route53.GetHealthCheckCountInput) (*route53.GetHealthCheckCountOutput, error) + + GetHealthCheckLastFailureReasonRequest(*route53.GetHealthCheckLastFailureReasonInput) (*request.Request, *route53.GetHealthCheckLastFailureReasonOutput) + + GetHealthCheckLastFailureReason(*route53.GetHealthCheckLastFailureReasonInput) (*route53.GetHealthCheckLastFailureReasonOutput, error) + + GetHealthCheckStatusRequest(*route53.GetHealthCheckStatusInput) (*request.Request, *route53.GetHealthCheckStatusOutput) + + GetHealthCheckStatus(*route53.GetHealthCheckStatusInput) (*route53.GetHealthCheckStatusOutput, error) + + GetHostedZoneRequest(*route53.GetHostedZoneInput) (*request.Request, *route53.GetHostedZoneOutput) + + GetHostedZone(*route53.GetHostedZoneInput) (*route53.GetHostedZoneOutput, error) + + GetHostedZoneCountRequest(*route53.GetHostedZoneCountInput) (*request.Request, *route53.GetHostedZoneCountOutput) + + GetHostedZoneCount(*route53.GetHostedZoneCountInput) (*route53.GetHostedZoneCountOutput, error) + + GetReusableDelegationSetRequest(*route53.GetReusableDelegationSetInput) (*request.Request, *route53.GetReusableDelegationSetOutput) + + GetReusableDelegationSet(*route53.GetReusableDelegationSetInput) (*route53.GetReusableDelegationSetOutput, error) + + GetTrafficPolicyRequest(*route53.GetTrafficPolicyInput) (*request.Request, *route53.GetTrafficPolicyOutput) + + GetTrafficPolicy(*route53.GetTrafficPolicyInput) (*route53.GetTrafficPolicyOutput, error) + + GetTrafficPolicyInstanceRequest(*route53.GetTrafficPolicyInstanceInput) (*request.Request, *route53.GetTrafficPolicyInstanceOutput) + + GetTrafficPolicyInstance(*route53.GetTrafficPolicyInstanceInput) (*route53.GetTrafficPolicyInstanceOutput, error) + + GetTrafficPolicyInstanceCountRequest(*route53.GetTrafficPolicyInstanceCountInput) (*request.Request, *route53.GetTrafficPolicyInstanceCountOutput) + + GetTrafficPolicyInstanceCount(*route53.GetTrafficPolicyInstanceCountInput) (*route53.GetTrafficPolicyInstanceCountOutput, error) + + ListChangeBatchesByHostedZoneRequest(*route53.ListChangeBatchesByHostedZoneInput) (*request.Request, *route53.ListChangeBatchesByHostedZoneOutput) + + ListChangeBatchesByHostedZone(*route53.ListChangeBatchesByHostedZoneInput) (*route53.ListChangeBatchesByHostedZoneOutput, error) + + ListChangeBatchesByRRSetRequest(*route53.ListChangeBatchesByRRSetInput) (*request.Request, *route53.ListChangeBatchesByRRSetOutput) + + ListChangeBatchesByRRSet(*route53.ListChangeBatchesByRRSetInput) (*route53.ListChangeBatchesByRRSetOutput, error) + + ListGeoLocationsRequest(*route53.ListGeoLocationsInput) (*request.Request, *route53.ListGeoLocationsOutput) + + ListGeoLocations(*route53.ListGeoLocationsInput) (*route53.ListGeoLocationsOutput, error) + + ListHealthChecksRequest(*route53.ListHealthChecksInput) (*request.Request, *route53.ListHealthChecksOutput) + + ListHealthChecks(*route53.ListHealthChecksInput) (*route53.ListHealthChecksOutput, error) + + ListHealthChecksPages(*route53.ListHealthChecksInput, func(*route53.ListHealthChecksOutput, bool) bool) error + + ListHostedZonesRequest(*route53.ListHostedZonesInput) (*request.Request, *route53.ListHostedZonesOutput) + + ListHostedZones(*route53.ListHostedZonesInput) (*route53.ListHostedZonesOutput, error) + + ListHostedZonesPages(*route53.ListHostedZonesInput, func(*route53.ListHostedZonesOutput, bool) bool) error + + ListHostedZonesByNameRequest(*route53.ListHostedZonesByNameInput) (*request.Request, *route53.ListHostedZonesByNameOutput) + + ListHostedZonesByName(*route53.ListHostedZonesByNameInput) (*route53.ListHostedZonesByNameOutput, error) + + ListResourceRecordSetsRequest(*route53.ListResourceRecordSetsInput) (*request.Request, *route53.ListResourceRecordSetsOutput) + + ListResourceRecordSets(*route53.ListResourceRecordSetsInput) (*route53.ListResourceRecordSetsOutput, error) + + ListResourceRecordSetsPages(*route53.ListResourceRecordSetsInput, func(*route53.ListResourceRecordSetsOutput, bool) bool) error + + ListReusableDelegationSetsRequest(*route53.ListReusableDelegationSetsInput) (*request.Request, *route53.ListReusableDelegationSetsOutput) + + ListReusableDelegationSets(*route53.ListReusableDelegationSetsInput) (*route53.ListReusableDelegationSetsOutput, error) + + ListTagsForResourceRequest(*route53.ListTagsForResourceInput) (*request.Request, *route53.ListTagsForResourceOutput) + + ListTagsForResource(*route53.ListTagsForResourceInput) (*route53.ListTagsForResourceOutput, error) + + ListTagsForResourcesRequest(*route53.ListTagsForResourcesInput) (*request.Request, *route53.ListTagsForResourcesOutput) + + ListTagsForResources(*route53.ListTagsForResourcesInput) (*route53.ListTagsForResourcesOutput, error) + + ListTrafficPoliciesRequest(*route53.ListTrafficPoliciesInput) (*request.Request, *route53.ListTrafficPoliciesOutput) + + ListTrafficPolicies(*route53.ListTrafficPoliciesInput) (*route53.ListTrafficPoliciesOutput, error) + + ListTrafficPolicyInstancesRequest(*route53.ListTrafficPolicyInstancesInput) (*request.Request, *route53.ListTrafficPolicyInstancesOutput) + + ListTrafficPolicyInstances(*route53.ListTrafficPolicyInstancesInput) (*route53.ListTrafficPolicyInstancesOutput, error) + + ListTrafficPolicyInstancesByHostedZoneRequest(*route53.ListTrafficPolicyInstancesByHostedZoneInput) (*request.Request, *route53.ListTrafficPolicyInstancesByHostedZoneOutput) + + ListTrafficPolicyInstancesByHostedZone(*route53.ListTrafficPolicyInstancesByHostedZoneInput) (*route53.ListTrafficPolicyInstancesByHostedZoneOutput, error) + + ListTrafficPolicyInstancesByPolicyRequest(*route53.ListTrafficPolicyInstancesByPolicyInput) (*request.Request, *route53.ListTrafficPolicyInstancesByPolicyOutput) + + ListTrafficPolicyInstancesByPolicy(*route53.ListTrafficPolicyInstancesByPolicyInput) (*route53.ListTrafficPolicyInstancesByPolicyOutput, error) + + ListTrafficPolicyVersionsRequest(*route53.ListTrafficPolicyVersionsInput) (*request.Request, *route53.ListTrafficPolicyVersionsOutput) + + ListTrafficPolicyVersions(*route53.ListTrafficPolicyVersionsInput) (*route53.ListTrafficPolicyVersionsOutput, error) + + UpdateHealthCheckRequest(*route53.UpdateHealthCheckInput) (*request.Request, *route53.UpdateHealthCheckOutput) + + UpdateHealthCheck(*route53.UpdateHealthCheckInput) (*route53.UpdateHealthCheckOutput, error) + + UpdateHostedZoneCommentRequest(*route53.UpdateHostedZoneCommentInput) (*request.Request, *route53.UpdateHostedZoneCommentOutput) + + UpdateHostedZoneComment(*route53.UpdateHostedZoneCommentInput) (*route53.UpdateHostedZoneCommentOutput, error) + + UpdateTrafficPolicyCommentRequest(*route53.UpdateTrafficPolicyCommentInput) (*request.Request, *route53.UpdateTrafficPolicyCommentOutput) + + UpdateTrafficPolicyComment(*route53.UpdateTrafficPolicyCommentInput) (*route53.UpdateTrafficPolicyCommentOutput, error) + + UpdateTrafficPolicyInstanceRequest(*route53.UpdateTrafficPolicyInstanceInput) (*request.Request, *route53.UpdateTrafficPolicyInstanceOutput) + + UpdateTrafficPolicyInstance(*route53.UpdateTrafficPolicyInstanceInput) (*route53.UpdateTrafficPolicyInstanceOutput, error) +} + +var _ Route53API = (*route53.Route53)(nil) diff --git a/vendor/github.com/aws/aws-sdk-go/service/route53/service.go b/vendor/github.com/aws/aws-sdk-go/service/route53/service.go new file mode 100644 index 000000000..269c4db36 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/route53/service.go @@ -0,0 +1,86 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package route53 + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/private/protocol/restxml" +) + +// Route53 is a client for Route 53. +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type Route53 struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "route53" + +// New creates a new instance of the Route53 client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a Route53 client from just a session. +// svc := route53.New(mySession) +// +// // Create a Route53 client with additional configuration +// svc := route53.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *Route53 { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *Route53 { + svc := &Route53{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2013-04-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(restxml.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restxml.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restxml.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restxml.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a Route53 operation and runs any +// custom request initialization. +func (c *Route53) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/route53/unmarshal_error.go b/vendor/github.com/aws/aws-sdk-go/service/route53/unmarshal_error.go new file mode 100644 index 000000000..e91375dc4 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/route53/unmarshal_error.go @@ -0,0 +1,77 @@ +package route53 + +import ( + "bytes" + "encoding/xml" + "io/ioutil" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/restxml" +) + +type baseXMLErrorResponse struct { + XMLName xml.Name +} + +type standardXMLErrorResponse struct { + XMLName xml.Name `xml:"ErrorResponse"` + Code string `xml:"Error>Code"` + Message string `xml:"Error>Message"` + RequestID string `xml:"RequestId"` +} + +type invalidChangeBatchXMLErrorResponse struct { + XMLName xml.Name `xml:"InvalidChangeBatch"` + Messages []string `xml:"Messages>Message"` +} + +func unmarshalChangeResourceRecordSetsError(r *request.Request) { + defer r.HTTPResponse.Body.Close() + + responseBody, err := ioutil.ReadAll(r.HTTPResponse.Body) + + if err != nil { + r.Error = awserr.New("SerializationError", "failed to read Route53 XML error response", err) + return + } + + baseError := &baseXMLErrorResponse{} + + if err := xml.Unmarshal(responseBody, baseError); err != nil { + r.Error = awserr.New("SerializationError", "failed to decode Route53 XML error response", err) + return + } + + switch baseError.XMLName.Local { + case "InvalidChangeBatch": + unmarshalInvalidChangeBatchError(r, responseBody) + default: + r.HTTPResponse.Body = ioutil.NopCloser(bytes.NewReader(responseBody)) + restxml.UnmarshalError(r) + } +} + +func unmarshalInvalidChangeBatchError(r *request.Request, requestBody []byte) { + resp := &invalidChangeBatchXMLErrorResponse{} + err := xml.Unmarshal(requestBody, resp) + + if err != nil { + r.Error = awserr.New("SerializationError", "failed to decode query XML error response", err) + return + } + + const errorCode = "InvalidChangeBatch" + errors := []error{} + + for _, msg := range resp.Messages { + errors = append(errors, awserr.New(errorCode, msg, nil)) + } + + r.Error = awserr.NewRequestFailure( + awserr.NewBatchError(errorCode, "ChangeBatch errors occured", errors), + r.HTTPResponse.StatusCode, + r.RequestID, + ) + +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/route53/unmarshal_error_leak_test.go b/vendor/github.com/aws/aws-sdk-go/service/route53/unmarshal_error_leak_test.go new file mode 100644 index 000000000..2d6d86d89 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/route53/unmarshal_error_leak_test.go @@ -0,0 +1,37 @@ +package route53 + +import ( + "net/http" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/awstesting" +) + +func TestUnmarhsalErrorLeak(t *testing.T) { + req := &request.Request{ + Operation: &request.Operation{ + Name: opChangeResourceRecordSets, + }, + HTTPRequest: &http.Request{ + Header: make(http.Header), + Body: &awstesting.ReadCloser{Size: 2048}, + }, + } + req.HTTPResponse = &http.Response{ + Body: &awstesting.ReadCloser{Size: 2048}, + Header: http.Header{ + "X-Amzn-Requestid": []string{"1"}, + }, + StatusCode: http.StatusOK, + } + + reader := req.HTTPResponse.Body.(*awstesting.ReadCloser) + unmarshalChangeResourceRecordSetsError(req) + + assert.NotNil(t, req.Error) + assert.Equal(t, reader.Closed, true) + assert.Equal(t, reader.Size, 0) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/route53/unmarshal_error_test.go b/vendor/github.com/aws/aws-sdk-go/service/route53/unmarshal_error_test.go new file mode 100644 index 000000000..7d5aa90c2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/route53/unmarshal_error_test.go @@ -0,0 +1,111 @@ +package route53_test + +import ( + "bytes" + "io/ioutil" + "net/http" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/awstesting/unit" + "github.com/aws/aws-sdk-go/service/route53" +) + +func makeClientWithResponse(response string) *route53.Route53 { + r := route53.New(unit.Session) + r.Handlers.Send.Clear() + r.Handlers.Send.PushBack(func(r *request.Request) { + body := ioutil.NopCloser(bytes.NewReader([]byte(response))) + r.HTTPResponse = &http.Response{ + ContentLength: int64(len(response)), + StatusCode: 400, + Status: "Bad Request", + Body: body, + } + }) + + return r +} + +func TestUnmarshalStandardError(t *testing.T) { + const errorResponse = ` + + + InvalidDomainName + The domain name is invalid + + 12345 + +` + + r := makeClientWithResponse(errorResponse) + + _, err := r.CreateHostedZone(&route53.CreateHostedZoneInput{ + CallerReference: aws.String("test"), + Name: aws.String("test_zone"), + }) + + assert.Error(t, err) + assert.Equal(t, "InvalidDomainName", err.(awserr.Error).Code()) + assert.Equal(t, "The domain name is invalid", err.(awserr.Error).Message()) +} + +func TestUnmarshalInvalidChangeBatch(t *testing.T) { + const errorMessage = ` +Tried to create resource record set duplicate.example.com. type A, +but it already exists +` + const errorResponse = ` + + + ` + errorMessage + ` + + +` + + r := makeClientWithResponse(errorResponse) + + req := &route53.ChangeResourceRecordSetsInput{ + HostedZoneId: aws.String("zoneId"), + ChangeBatch: &route53.ChangeBatch{ + Changes: []*route53.Change{ + { + Action: aws.String("CREATE"), + ResourceRecordSet: &route53.ResourceRecordSet{ + Name: aws.String("domain"), + Type: aws.String("CNAME"), + TTL: aws.Int64(120), + ResourceRecords: []*route53.ResourceRecord{ + { + Value: aws.String("cname"), + }, + }, + }, + }, + }, + }, + } + + _, err := r.ChangeResourceRecordSets(req) + assert.Error(t, err) + + if reqErr, ok := err.(awserr.RequestFailure); ok { + assert.Error(t, reqErr) + assert.Equal(t, 400, reqErr.StatusCode()) + } else { + assert.Fail(t, "returned error is not a RequestFailure") + } + + if batchErr, ok := err.(awserr.BatchedErrors); ok { + errs := batchErr.OrigErrs() + assert.Len(t, errs, 1) + assert.Equal(t, "InvalidChangeBatch", errs[0].(awserr.Error).Code()) + assert.Equal(t, errorMessage, errs[0].(awserr.Error).Message()) + } else { + assert.Fail(t, "returned error is not a BatchedErrors") + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/route53/waiters.go b/vendor/github.com/aws/aws-sdk-go/service/route53/waiters.go new file mode 100644 index 000000000..04786169e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/route53/waiters.go @@ -0,0 +1,30 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package route53 + +import ( + "github.com/aws/aws-sdk-go/private/waiter" +) + +func (c *Route53) WaitUntilResourceRecordSetsChanged(input *GetChangeInput) error { + waiterCfg := waiter.Config{ + Operation: "GetChange", + Delay: 30, + MaxAttempts: 60, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "path", + Argument: "ChangeInfo.Status", + Expected: "INSYNC", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/route53domains/api.go b/vendor/github.com/aws/aws-sdk-go/service/route53domains/api.go new file mode 100644 index 000000000..b5da552ab --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/route53domains/api.go @@ -0,0 +1,4025 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package route53domains provides a client for Amazon Route 53 Domains. +package route53domains + +import ( + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" +) + +const opCheckDomainAvailability = "CheckDomainAvailability" + +// CheckDomainAvailabilityRequest generates a "aws/request.Request" representing the +// client's request for the CheckDomainAvailability operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CheckDomainAvailability method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CheckDomainAvailabilityRequest method. +// req, resp := client.CheckDomainAvailabilityRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Route53Domains) CheckDomainAvailabilityRequest(input *CheckDomainAvailabilityInput) (req *request.Request, output *CheckDomainAvailabilityOutput) { + op := &request.Operation{ + Name: opCheckDomainAvailability, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CheckDomainAvailabilityInput{} + } + + req = c.newRequest(op, input, output) + output = &CheckDomainAvailabilityOutput{} + req.Data = output + return +} + +// This operation checks the availability of one domain name. Note that if the +// availability status of a domain is pending, you must submit another request +// to determine the availability of the domain name. +func (c *Route53Domains) CheckDomainAvailability(input *CheckDomainAvailabilityInput) (*CheckDomainAvailabilityOutput, error) { + req, out := c.CheckDomainAvailabilityRequest(input) + err := req.Send() + return out, err +} + +const opDeleteTagsForDomain = "DeleteTagsForDomain" + +// DeleteTagsForDomainRequest generates a "aws/request.Request" representing the +// client's request for the DeleteTagsForDomain operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteTagsForDomain method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteTagsForDomainRequest method. +// req, resp := client.DeleteTagsForDomainRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Route53Domains) DeleteTagsForDomainRequest(input *DeleteTagsForDomainInput) (req *request.Request, output *DeleteTagsForDomainOutput) { + op := &request.Operation{ + Name: opDeleteTagsForDomain, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteTagsForDomainInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteTagsForDomainOutput{} + req.Data = output + return +} + +// This operation deletes the specified tags for a domain. +// +// All tag operations are eventually consistent; subsequent operations may +// not immediately represent all issued operations. +func (c *Route53Domains) DeleteTagsForDomain(input *DeleteTagsForDomainInput) (*DeleteTagsForDomainOutput, error) { + req, out := c.DeleteTagsForDomainRequest(input) + err := req.Send() + return out, err +} + +const opDisableDomainAutoRenew = "DisableDomainAutoRenew" + +// DisableDomainAutoRenewRequest generates a "aws/request.Request" representing the +// client's request for the DisableDomainAutoRenew operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DisableDomainAutoRenew method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DisableDomainAutoRenewRequest method. +// req, resp := client.DisableDomainAutoRenewRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Route53Domains) DisableDomainAutoRenewRequest(input *DisableDomainAutoRenewInput) (req *request.Request, output *DisableDomainAutoRenewOutput) { + op := &request.Operation{ + Name: opDisableDomainAutoRenew, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DisableDomainAutoRenewInput{} + } + + req = c.newRequest(op, input, output) + output = &DisableDomainAutoRenewOutput{} + req.Data = output + return +} + +// This operation disables automatic renewal of domain registration for the +// specified domain. +// +// Caution! Amazon Route 53 doesn't have a manual renewal process, so if you +// disable automatic renewal, registration for the domain will not be renewed +// when the expiration date passes, and you will lose control of the domain +// name. +func (c *Route53Domains) DisableDomainAutoRenew(input *DisableDomainAutoRenewInput) (*DisableDomainAutoRenewOutput, error) { + req, out := c.DisableDomainAutoRenewRequest(input) + err := req.Send() + return out, err +} + +const opDisableDomainTransferLock = "DisableDomainTransferLock" + +// DisableDomainTransferLockRequest generates a "aws/request.Request" representing the +// client's request for the DisableDomainTransferLock operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DisableDomainTransferLock method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DisableDomainTransferLockRequest method. +// req, resp := client.DisableDomainTransferLockRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Route53Domains) DisableDomainTransferLockRequest(input *DisableDomainTransferLockInput) (req *request.Request, output *DisableDomainTransferLockOutput) { + op := &request.Operation{ + Name: opDisableDomainTransferLock, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DisableDomainTransferLockInput{} + } + + req = c.newRequest(op, input, output) + output = &DisableDomainTransferLockOutput{} + req.Data = output + return +} + +// This operation removes the transfer lock on the domain (specifically the +// clientTransferProhibited status) to allow domain transfers. We recommend +// you refrain from performing this action unless you intend to transfer the +// domain to a different registrar. Successful submission returns an operation +// ID that you can use to track the progress and completion of the action. If +// the request is not completed successfully, the domain registrant will be +// notified by email. +func (c *Route53Domains) DisableDomainTransferLock(input *DisableDomainTransferLockInput) (*DisableDomainTransferLockOutput, error) { + req, out := c.DisableDomainTransferLockRequest(input) + err := req.Send() + return out, err +} + +const opEnableDomainAutoRenew = "EnableDomainAutoRenew" + +// EnableDomainAutoRenewRequest generates a "aws/request.Request" representing the +// client's request for the EnableDomainAutoRenew operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the EnableDomainAutoRenew method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the EnableDomainAutoRenewRequest method. +// req, resp := client.EnableDomainAutoRenewRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Route53Domains) EnableDomainAutoRenewRequest(input *EnableDomainAutoRenewInput) (req *request.Request, output *EnableDomainAutoRenewOutput) { + op := &request.Operation{ + Name: opEnableDomainAutoRenew, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &EnableDomainAutoRenewInput{} + } + + req = c.newRequest(op, input, output) + output = &EnableDomainAutoRenewOutput{} + req.Data = output + return +} + +// This operation configures Amazon Route 53 to automatically renew the specified +// domain before the domain registration expires. The cost of renewing your +// domain registration is billed to your AWS account. +// +// The period during which you can renew a domain name varies by TLD. For a +// list of TLDs and their renewal policies, see "Renewal, restoration, and deletion +// times" (http://wiki.gandi.net/en/domains/renew#renewal_restoration_and_deletion_times) +// on the website for our registrar partner, Gandi. Route 53 requires that you +// renew before the end of the renewal period that is listed on the Gandi website +// so we can complete processing before the deadline. +func (c *Route53Domains) EnableDomainAutoRenew(input *EnableDomainAutoRenewInput) (*EnableDomainAutoRenewOutput, error) { + req, out := c.EnableDomainAutoRenewRequest(input) + err := req.Send() + return out, err +} + +const opEnableDomainTransferLock = "EnableDomainTransferLock" + +// EnableDomainTransferLockRequest generates a "aws/request.Request" representing the +// client's request for the EnableDomainTransferLock operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the EnableDomainTransferLock method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the EnableDomainTransferLockRequest method. +// req, resp := client.EnableDomainTransferLockRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Route53Domains) EnableDomainTransferLockRequest(input *EnableDomainTransferLockInput) (req *request.Request, output *EnableDomainTransferLockOutput) { + op := &request.Operation{ + Name: opEnableDomainTransferLock, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &EnableDomainTransferLockInput{} + } + + req = c.newRequest(op, input, output) + output = &EnableDomainTransferLockOutput{} + req.Data = output + return +} + +// This operation sets the transfer lock on the domain (specifically the clientTransferProhibited +// status) to prevent domain transfers. Successful submission returns an operation +// ID that you can use to track the progress and completion of the action. If +// the request is not completed successfully, the domain registrant will be +// notified by email. +func (c *Route53Domains) EnableDomainTransferLock(input *EnableDomainTransferLockInput) (*EnableDomainTransferLockOutput, error) { + req, out := c.EnableDomainTransferLockRequest(input) + err := req.Send() + return out, err +} + +const opGetContactReachabilityStatus = "GetContactReachabilityStatus" + +// GetContactReachabilityStatusRequest generates a "aws/request.Request" representing the +// client's request for the GetContactReachabilityStatus operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetContactReachabilityStatus method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetContactReachabilityStatusRequest method. +// req, resp := client.GetContactReachabilityStatusRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Route53Domains) GetContactReachabilityStatusRequest(input *GetContactReachabilityStatusInput) (req *request.Request, output *GetContactReachabilityStatusOutput) { + op := &request.Operation{ + Name: opGetContactReachabilityStatus, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetContactReachabilityStatusInput{} + } + + req = c.newRequest(op, input, output) + output = &GetContactReachabilityStatusOutput{} + req.Data = output + return +} + +// For operations that require confirmation that the email address for the registrant +// contact is valid, such as registering a new domain, this operation returns +// information about whether the registrant contact has responded. +// +// If you want us to resend the email, use the ResendContactReachabilityEmail +// operation. +func (c *Route53Domains) GetContactReachabilityStatus(input *GetContactReachabilityStatusInput) (*GetContactReachabilityStatusOutput, error) { + req, out := c.GetContactReachabilityStatusRequest(input) + err := req.Send() + return out, err +} + +const opGetDomainDetail = "GetDomainDetail" + +// GetDomainDetailRequest generates a "aws/request.Request" representing the +// client's request for the GetDomainDetail operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetDomainDetail method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetDomainDetailRequest method. +// req, resp := client.GetDomainDetailRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Route53Domains) GetDomainDetailRequest(input *GetDomainDetailInput) (req *request.Request, output *GetDomainDetailOutput) { + op := &request.Operation{ + Name: opGetDomainDetail, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetDomainDetailInput{} + } + + req = c.newRequest(op, input, output) + output = &GetDomainDetailOutput{} + req.Data = output + return +} + +// This operation returns detailed information about the domain. The domain's +// contact information is also returned as part of the output. +func (c *Route53Domains) GetDomainDetail(input *GetDomainDetailInput) (*GetDomainDetailOutput, error) { + req, out := c.GetDomainDetailRequest(input) + err := req.Send() + return out, err +} + +const opGetOperationDetail = "GetOperationDetail" + +// GetOperationDetailRequest generates a "aws/request.Request" representing the +// client's request for the GetOperationDetail operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetOperationDetail method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetOperationDetailRequest method. +// req, resp := client.GetOperationDetailRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Route53Domains) GetOperationDetailRequest(input *GetOperationDetailInput) (req *request.Request, output *GetOperationDetailOutput) { + op := &request.Operation{ + Name: opGetOperationDetail, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetOperationDetailInput{} + } + + req = c.newRequest(op, input, output) + output = &GetOperationDetailOutput{} + req.Data = output + return +} + +// This operation returns the current status of an operation that is not completed. +func (c *Route53Domains) GetOperationDetail(input *GetOperationDetailInput) (*GetOperationDetailOutput, error) { + req, out := c.GetOperationDetailRequest(input) + err := req.Send() + return out, err +} + +const opListDomains = "ListDomains" + +// ListDomainsRequest generates a "aws/request.Request" representing the +// client's request for the ListDomains operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListDomains method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListDomainsRequest method. +// req, resp := client.ListDomainsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Route53Domains) ListDomainsRequest(input *ListDomainsInput) (req *request.Request, output *ListDomainsOutput) { + op := &request.Operation{ + Name: opListDomains, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"NextPageMarker"}, + LimitToken: "MaxItems", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListDomainsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListDomainsOutput{} + req.Data = output + return +} + +// This operation returns all the domain names registered with Amazon Route +// 53 for the current AWS account. +func (c *Route53Domains) ListDomains(input *ListDomainsInput) (*ListDomainsOutput, error) { + req, out := c.ListDomainsRequest(input) + err := req.Send() + return out, err +} + +// ListDomainsPages iterates over the pages of a ListDomains operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListDomains method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListDomains operation. +// pageNum := 0 +// err := client.ListDomainsPages(params, +// func(page *ListDomainsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *Route53Domains) ListDomainsPages(input *ListDomainsInput, fn func(p *ListDomainsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListDomainsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListDomainsOutput), lastPage) + }) +} + +const opListOperations = "ListOperations" + +// ListOperationsRequest generates a "aws/request.Request" representing the +// client's request for the ListOperations operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListOperations method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListOperationsRequest method. +// req, resp := client.ListOperationsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Route53Domains) ListOperationsRequest(input *ListOperationsInput) (req *request.Request, output *ListOperationsOutput) { + op := &request.Operation{ + Name: opListOperations, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"NextPageMarker"}, + LimitToken: "MaxItems", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListOperationsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListOperationsOutput{} + req.Data = output + return +} + +// This operation returns the operation IDs of operations that are not yet complete. +func (c *Route53Domains) ListOperations(input *ListOperationsInput) (*ListOperationsOutput, error) { + req, out := c.ListOperationsRequest(input) + err := req.Send() + return out, err +} + +// ListOperationsPages iterates over the pages of a ListOperations operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListOperations method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListOperations operation. +// pageNum := 0 +// err := client.ListOperationsPages(params, +// func(page *ListOperationsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *Route53Domains) ListOperationsPages(input *ListOperationsInput, fn func(p *ListOperationsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListOperationsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListOperationsOutput), lastPage) + }) +} + +const opListTagsForDomain = "ListTagsForDomain" + +// ListTagsForDomainRequest generates a "aws/request.Request" representing the +// client's request for the ListTagsForDomain operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListTagsForDomain method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListTagsForDomainRequest method. +// req, resp := client.ListTagsForDomainRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Route53Domains) ListTagsForDomainRequest(input *ListTagsForDomainInput) (req *request.Request, output *ListTagsForDomainOutput) { + op := &request.Operation{ + Name: opListTagsForDomain, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListTagsForDomainInput{} + } + + req = c.newRequest(op, input, output) + output = &ListTagsForDomainOutput{} + req.Data = output + return +} + +// This operation returns all of the tags that are associated with the specified +// domain. +// +// All tag operations are eventually consistent; subsequent operations may +// not immediately represent all issued operations. +func (c *Route53Domains) ListTagsForDomain(input *ListTagsForDomainInput) (*ListTagsForDomainOutput, error) { + req, out := c.ListTagsForDomainRequest(input) + err := req.Send() + return out, err +} + +const opRegisterDomain = "RegisterDomain" + +// RegisterDomainRequest generates a "aws/request.Request" representing the +// client's request for the RegisterDomain operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the RegisterDomain method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the RegisterDomainRequest method. +// req, resp := client.RegisterDomainRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Route53Domains) RegisterDomainRequest(input *RegisterDomainInput) (req *request.Request, output *RegisterDomainOutput) { + op := &request.Operation{ + Name: opRegisterDomain, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RegisterDomainInput{} + } + + req = c.newRequest(op, input, output) + output = &RegisterDomainOutput{} + req.Data = output + return +} + +// This operation registers a domain. Domains are registered by the AWS registrar +// partner, Gandi. For some top-level domains (TLDs), this operation requires +// extra parameters. +// +// When you register a domain, Amazon Route 53 does the following: +// +// Creates a Amazon Route 53 hosted zone that has the same name as the domain. +// Amazon Route 53 assigns four name servers to your hosted zone and automatically +// updates your domain registration with the names of these name servers. Enables +// autorenew, so your domain registration will renew automatically each year. +// We'll notify you in advance of the renewal date so you can choose whether +// to renew the registration. Optionally enables privacy protection, so WHOIS +// queries return contact information for our registrar partner, Gandi, instead +// of the information you entered for registrant, admin, and tech contacts. +// If registration is successful, returns an operation ID that you can use to +// track the progress and completion of the action. If the request is not completed +// successfully, the domain registrant is notified by email. Charges your AWS +// account an amount based on the top-level domain. For more information, see +// Amazon Route 53 Pricing (http://aws.amazon.com/route53/pricing/). +func (c *Route53Domains) RegisterDomain(input *RegisterDomainInput) (*RegisterDomainOutput, error) { + req, out := c.RegisterDomainRequest(input) + err := req.Send() + return out, err +} + +const opResendContactReachabilityEmail = "ResendContactReachabilityEmail" + +// ResendContactReachabilityEmailRequest generates a "aws/request.Request" representing the +// client's request for the ResendContactReachabilityEmail operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ResendContactReachabilityEmail method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ResendContactReachabilityEmailRequest method. +// req, resp := client.ResendContactReachabilityEmailRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Route53Domains) ResendContactReachabilityEmailRequest(input *ResendContactReachabilityEmailInput) (req *request.Request, output *ResendContactReachabilityEmailOutput) { + op := &request.Operation{ + Name: opResendContactReachabilityEmail, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ResendContactReachabilityEmailInput{} + } + + req = c.newRequest(op, input, output) + output = &ResendContactReachabilityEmailOutput{} + req.Data = output + return +} + +// For operations that require confirmation that the email address for the registrant +// contact is valid, such as registering a new domain, this operation resends +// the confirmation email to the current email address for the registrant contact. +func (c *Route53Domains) ResendContactReachabilityEmail(input *ResendContactReachabilityEmailInput) (*ResendContactReachabilityEmailOutput, error) { + req, out := c.ResendContactReachabilityEmailRequest(input) + err := req.Send() + return out, err +} + +const opRetrieveDomainAuthCode = "RetrieveDomainAuthCode" + +// RetrieveDomainAuthCodeRequest generates a "aws/request.Request" representing the +// client's request for the RetrieveDomainAuthCode operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the RetrieveDomainAuthCode method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the RetrieveDomainAuthCodeRequest method. +// req, resp := client.RetrieveDomainAuthCodeRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Route53Domains) RetrieveDomainAuthCodeRequest(input *RetrieveDomainAuthCodeInput) (req *request.Request, output *RetrieveDomainAuthCodeOutput) { + op := &request.Operation{ + Name: opRetrieveDomainAuthCode, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RetrieveDomainAuthCodeInput{} + } + + req = c.newRequest(op, input, output) + output = &RetrieveDomainAuthCodeOutput{} + req.Data = output + return +} + +// This operation returns the AuthCode for the domain. To transfer a domain +// to another registrar, you provide this value to the new registrar. +func (c *Route53Domains) RetrieveDomainAuthCode(input *RetrieveDomainAuthCodeInput) (*RetrieveDomainAuthCodeOutput, error) { + req, out := c.RetrieveDomainAuthCodeRequest(input) + err := req.Send() + return out, err +} + +const opTransferDomain = "TransferDomain" + +// TransferDomainRequest generates a "aws/request.Request" representing the +// client's request for the TransferDomain operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the TransferDomain method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the TransferDomainRequest method. +// req, resp := client.TransferDomainRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Route53Domains) TransferDomainRequest(input *TransferDomainInput) (req *request.Request, output *TransferDomainOutput) { + op := &request.Operation{ + Name: opTransferDomain, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &TransferDomainInput{} + } + + req = c.newRequest(op, input, output) + output = &TransferDomainOutput{} + req.Data = output + return +} + +// This operation transfers a domain from another registrar to Amazon Route +// 53. When the transfer is complete, the domain is registered with the AWS +// registrar partner, Gandi. +// +// For transfer requirements, a detailed procedure, and information about viewing +// the status of a domain transfer, see Transferring Registration for a Domain +// to Amazon Route 53 (http://docs.aws.amazon.com/Route53/latest/DeveloperGuide/domain-transfer-to-route-53.html) +// in the Amazon Route 53 Developer Guide. +// +// If the registrar for your domain is also the DNS service provider for the +// domain, we highly recommend that you consider transferring your DNS service +// to Amazon Route 53 or to another DNS service provider before you transfer +// your registration. Some registrars provide free DNS service when you purchase +// a domain registration. When you transfer the registration, the previous registrar +// will not renew your domain registration and could end your DNS service at +// any time. +// +// Caution! If the registrar for your domain is also the DNS service provider +// for the domain and you don't transfer DNS service to another provider, your +// website, email, and the web applications associated with the domain might +// become unavailable. If the transfer is successful, this method returns an +// operation ID that you can use to track the progress and completion of the +// action. If the transfer doesn't complete successfully, the domain registrant +// will be notified by email. +func (c *Route53Domains) TransferDomain(input *TransferDomainInput) (*TransferDomainOutput, error) { + req, out := c.TransferDomainRequest(input) + err := req.Send() + return out, err +} + +const opUpdateDomainContact = "UpdateDomainContact" + +// UpdateDomainContactRequest generates a "aws/request.Request" representing the +// client's request for the UpdateDomainContact operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateDomainContact method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateDomainContactRequest method. +// req, resp := client.UpdateDomainContactRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Route53Domains) UpdateDomainContactRequest(input *UpdateDomainContactInput) (req *request.Request, output *UpdateDomainContactOutput) { + op := &request.Operation{ + Name: opUpdateDomainContact, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateDomainContactInput{} + } + + req = c.newRequest(op, input, output) + output = &UpdateDomainContactOutput{} + req.Data = output + return +} + +// This operation updates the contact information for a particular domain. Information +// for at least one contact (registrant, administrator, or technical) must be +// supplied for update. +// +// If the update is successful, this method returns an operation ID that you +// can use to track the progress and completion of the action. If the request +// is not completed successfully, the domain registrant will be notified by +// email. +func (c *Route53Domains) UpdateDomainContact(input *UpdateDomainContactInput) (*UpdateDomainContactOutput, error) { + req, out := c.UpdateDomainContactRequest(input) + err := req.Send() + return out, err +} + +const opUpdateDomainContactPrivacy = "UpdateDomainContactPrivacy" + +// UpdateDomainContactPrivacyRequest generates a "aws/request.Request" representing the +// client's request for the UpdateDomainContactPrivacy operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateDomainContactPrivacy method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateDomainContactPrivacyRequest method. +// req, resp := client.UpdateDomainContactPrivacyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Route53Domains) UpdateDomainContactPrivacyRequest(input *UpdateDomainContactPrivacyInput) (req *request.Request, output *UpdateDomainContactPrivacyOutput) { + op := &request.Operation{ + Name: opUpdateDomainContactPrivacy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateDomainContactPrivacyInput{} + } + + req = c.newRequest(op, input, output) + output = &UpdateDomainContactPrivacyOutput{} + req.Data = output + return +} + +// This operation updates the specified domain contact's privacy setting. When +// the privacy option is enabled, personal information such as postal or email +// address is hidden from the results of a public WHOIS query. The privacy services +// are provided by the AWS registrar, Gandi. For more information, see the Gandi +// privacy features (http://www.gandi.net/domain/whois/?currency=USD&lang=en). +// +// This operation only affects the privacy of the specified contact type (registrant, +// administrator, or tech). Successful acceptance returns an operation ID that +// you can use with GetOperationDetail to track the progress and completion +// of the action. If the request is not completed successfully, the domain registrant +// will be notified by email. +func (c *Route53Domains) UpdateDomainContactPrivacy(input *UpdateDomainContactPrivacyInput) (*UpdateDomainContactPrivacyOutput, error) { + req, out := c.UpdateDomainContactPrivacyRequest(input) + err := req.Send() + return out, err +} + +const opUpdateDomainNameservers = "UpdateDomainNameservers" + +// UpdateDomainNameserversRequest generates a "aws/request.Request" representing the +// client's request for the UpdateDomainNameservers operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateDomainNameservers method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateDomainNameserversRequest method. +// req, resp := client.UpdateDomainNameserversRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Route53Domains) UpdateDomainNameserversRequest(input *UpdateDomainNameserversInput) (req *request.Request, output *UpdateDomainNameserversOutput) { + op := &request.Operation{ + Name: opUpdateDomainNameservers, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateDomainNameserversInput{} + } + + req = c.newRequest(op, input, output) + output = &UpdateDomainNameserversOutput{} + req.Data = output + return +} + +// This operation replaces the current set of name servers for the domain with +// the specified set of name servers. If you use Amazon Route 53 as your DNS +// service, specify the four name servers in the delegation set for the hosted +// zone for the domain. +// +// If successful, this operation returns an operation ID that you can use to +// track the progress and completion of the action. If the request is not completed +// successfully, the domain registrant will be notified by email. +func (c *Route53Domains) UpdateDomainNameservers(input *UpdateDomainNameserversInput) (*UpdateDomainNameserversOutput, error) { + req, out := c.UpdateDomainNameserversRequest(input) + err := req.Send() + return out, err +} + +const opUpdateTagsForDomain = "UpdateTagsForDomain" + +// UpdateTagsForDomainRequest generates a "aws/request.Request" representing the +// client's request for the UpdateTagsForDomain operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateTagsForDomain method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateTagsForDomainRequest method. +// req, resp := client.UpdateTagsForDomainRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Route53Domains) UpdateTagsForDomainRequest(input *UpdateTagsForDomainInput) (req *request.Request, output *UpdateTagsForDomainOutput) { + op := &request.Operation{ + Name: opUpdateTagsForDomain, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateTagsForDomainInput{} + } + + req = c.newRequest(op, input, output) + output = &UpdateTagsForDomainOutput{} + req.Data = output + return +} + +// This operation adds or updates tags for a specified domain. +// +// All tag operations are eventually consistent; subsequent operations may +// not immediately represent all issued operations. +func (c *Route53Domains) UpdateTagsForDomain(input *UpdateTagsForDomainInput) (*UpdateTagsForDomainOutput, error) { + req, out := c.UpdateTagsForDomainRequest(input) + err := req.Send() + return out, err +} + +// The CheckDomainAvailability request contains the following elements. +type CheckDomainAvailabilityInput struct { + _ struct{} `type:"structure"` + + // The name of a domain. + // + // Type: String + // + // Default: None + // + // Constraints: The domain name can contain only the letters a through z, the + // numbers 0 through 9, and hyphen (-). Internationalized Domain Names are not + // supported. + // + // Required: Yes + DomainName *string `type:"string" required:"true"` + + // Reserved for future use. + IdnLangCode *string `type:"string"` +} + +// String returns the string representation +func (s CheckDomainAvailabilityInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CheckDomainAvailabilityInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CheckDomainAvailabilityInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CheckDomainAvailabilityInput"} + if s.DomainName == nil { + invalidParams.Add(request.NewErrParamRequired("DomainName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The CheckDomainAvailability response includes the following elements. +type CheckDomainAvailabilityOutput struct { + _ struct{} `type:"structure"` + + // Whether the domain name is available for registering. + // + // You can only register domains designated as AVAILABLE. + // + // Type: String + // + // Valid values: + // + // AVAILABLE – The domain name is available. AVAILABLE_RESERVED – The domain + // name is reserved under specific conditions. AVAILABLE_PREORDER – The domain + // name is available and can be preordered. UNAVAILABLE – The domain name is + // not available. UNAVAILABLE_PREMIUM – The domain name is not available. UNAVAILABLE_RESTRICTED + // – The domain name is forbidden. RESERVED – The domain name has been reserved + // for another person or organization. DONT_KNOW – The TLD registry didn't reply + // with a definitive answer about whether the domain name is available. Amazon + // Route 53 can return this response for a variety of reasons, for example, + // the registry is performing maintenance. Try again later. + Availability *string `type:"string" required:"true" enum:"DomainAvailability"` +} + +// String returns the string representation +func (s CheckDomainAvailabilityOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CheckDomainAvailabilityOutput) GoString() string { + return s.String() +} + +// ContactDetail includes the following elements. +type ContactDetail struct { + _ struct{} `type:"structure"` + + // First line of the contact's address. + // + // Type: String + // + // Default: None + // + // Constraints: Maximum 255 characters. + // + // Parents: RegistrantContact, AdminContact, TechContact + // + // Required: Yes + AddressLine1 *string `type:"string"` + + // Second line of contact's address, if any. + // + // Type: String + // + // Default: None + // + // Constraints: Maximum 255 characters. + // + // Parents: RegistrantContact, AdminContact, TechContact + // + // Required: No + AddressLine2 *string `type:"string"` + + // The city of the contact's address. + // + // Type: String + // + // Default: None + // + // Constraints: Maximum 255 characters. + // + // Parents: RegistrantContact, AdminContact, TechContact + // + // Required: Yes + City *string `type:"string"` + + // Indicates whether the contact is a person, company, association, or public + // organization. If you choose an option other than PERSON, you must enter an + // organization name, and you can't enable privacy protection for the contact. + // + // Type: String + // + // Default: None + // + // Constraints: Maximum 255 characters. + // + // Valid values: PERSON | COMPANY | ASSOCIATION | PUBLIC_BODY + // + // Parents: RegistrantContact, AdminContact, TechContact + // + // Required: Yes + ContactType *string `type:"string" enum:"ContactType"` + + // Code for the country of the contact's address. + // + // Type: String + // + // Default: None + // + // Constraints: Maximum 255 characters. + // + // Parents: RegistrantContact, AdminContact, TechContact + // + // Required: Yes + CountryCode *string `type:"string" enum:"CountryCode"` + + // Email address of the contact. + // + // Type: String + // + // Default: None + // + // Constraints: Maximum 254 characters. + // + // Parents: RegistrantContact, AdminContact, TechContact + // + // Required: Yes + Email *string `type:"string"` + + // A list of name-value pairs for parameters required by certain top-level domains. + // + // Type: Complex + // + // Default: None + // + // Parents: RegistrantContact, AdminContact, TechContact + // + // Children: Name, Value + // + // Required: No + ExtraParams []*ExtraParam `type:"list"` + + // Fax number of the contact. + // + // Type: String + // + // Default: None + // + // Constraints: Phone number must be specified in the format "+[country dialing + // code].[number including any area code]". For example, a US phone number might + // appear as "+1.1234567890". + // + // Parents: RegistrantContact, AdminContact, TechContact + // + // Required: No + Fax *string `type:"string"` + + // First name of contact. + // + // Type: String + // + // Default: None + // + // Constraints: Maximum 255 characters. + // + // Parents: RegistrantContact, AdminContact, TechContact + // + // Required: Yes + FirstName *string `type:"string"` + + // Last name of contact. + // + // Type: String + // + // Default: None + // + // Constraints: Maximum 255 characters. + // + // Parents: RegistrantContact, AdminContact, TechContact + // + // Required: Yes + LastName *string `type:"string"` + + // Name of the organization for contact types other than PERSON. + // + // Type: String + // + // Default: None + // + // Constraints: Maximum 255 characters. Contact type must not be PERSON. + // + // Parents: RegistrantContact, AdminContact, TechContact + // + // Required: No + OrganizationName *string `type:"string"` + + // The phone number of the contact. + // + // Type: String + // + // Default: None + // + // Constraints: Phone number must be specified in the format "+[country dialing + // code].[number including any area code>]". For example, a US phone number + // might appear as "+1.1234567890". + // + // Parents: RegistrantContact, AdminContact, TechContact + // + // Required: Yes + PhoneNumber *string `type:"string"` + + // The state or province of the contact's city. + // + // Type: String + // + // Default: None + // + // Constraints: Maximum 255 characters. + // + // Parents: RegistrantContact, AdminContact, TechContact + // + // Required: No + State *string `type:"string"` + + // The zip or postal code of the contact's address. + // + // Type: String + // + // Default: None + // + // Constraints: Maximum 255 characters. + // + // Parents: RegistrantContact, AdminContact, TechContact + // + // Required: No + ZipCode *string `type:"string"` +} + +// String returns the string representation +func (s ContactDetail) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ContactDetail) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ContactDetail) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ContactDetail"} + if s.ExtraParams != nil { + for i, v := range s.ExtraParams { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "ExtraParams", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The DeleteTagsForDomainRequest includes the following elements. +type DeleteTagsForDomainInput struct { + _ struct{} `type:"structure"` + + // The domain for which you want to delete one or more tags. + // + // The name of a domain. + // + // Type: String + // + // Default: None + // + // Constraints: The domain name can contain only the letters a through z, the + // numbers 0 through 9, and hyphen (-). Hyphens are allowed only when they're + // surrounded by letters, numbers, or other hyphens. You can't specify a hyphen + // at the beginning or end of a label. To specify an Internationalized Domain + // Name, you must convert the name to Punycode. + // + // Required: Yes + DomainName *string `type:"string" required:"true"` + + // A list of tag keys to delete. + // + // Type: A list that contains the keys of the tags that you want to delete. + // + // Default: None + // + // Required: No + // + // '> + TagsToDelete []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s DeleteTagsForDomainInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteTagsForDomainInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteTagsForDomainInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteTagsForDomainInput"} + if s.DomainName == nil { + invalidParams.Add(request.NewErrParamRequired("DomainName")) + } + if s.TagsToDelete == nil { + invalidParams.Add(request.NewErrParamRequired("TagsToDelete")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteTagsForDomainOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteTagsForDomainOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteTagsForDomainOutput) GoString() string { + return s.String() +} + +type DisableDomainAutoRenewInput struct { + _ struct{} `type:"structure"` + + DomainName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DisableDomainAutoRenewInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisableDomainAutoRenewInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DisableDomainAutoRenewInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DisableDomainAutoRenewInput"} + if s.DomainName == nil { + invalidParams.Add(request.NewErrParamRequired("DomainName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DisableDomainAutoRenewOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DisableDomainAutoRenewOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisableDomainAutoRenewOutput) GoString() string { + return s.String() +} + +// The DisableDomainTransferLock request includes the following element. +type DisableDomainTransferLockInput struct { + _ struct{} `type:"structure"` + + // The name of a domain. + // + // Type: String + // + // Default: None + // + // Constraints: The domain name can contain only the letters a through z, the + // numbers 0 through 9, and hyphen (-). Internationalized Domain Names are not + // supported. + // + // Required: Yes + DomainName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DisableDomainTransferLockInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisableDomainTransferLockInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DisableDomainTransferLockInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DisableDomainTransferLockInput"} + if s.DomainName == nil { + invalidParams.Add(request.NewErrParamRequired("DomainName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The DisableDomainTransferLock response includes the following element. +type DisableDomainTransferLockOutput struct { + _ struct{} `type:"structure"` + + // Identifier for tracking the progress of the request. To use this ID to query + // the operation status, use GetOperationDetail. + // + // Type: String + // + // Default: None + // + // Constraints: Maximum 255 characters. + OperationId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DisableDomainTransferLockOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisableDomainTransferLockOutput) GoString() string { + return s.String() +} + +type DomainSummary struct { + _ struct{} `type:"structure"` + + // Indicates whether the domain is automatically renewed upon expiration. + // + // Type: Boolean + // + // Valid values: True | False + AutoRenew *bool `type:"boolean"` + + // The name of a domain. + // + // Type: String + DomainName *string `type:"string" required:"true"` + + // Expiration date of the domain in Coordinated Universal Time (UTC). + // + // Type: Long + Expiry *time.Time `type:"timestamp" timestampFormat:"unix"` + + // Indicates whether a domain is locked from unauthorized transfer to another + // party. + // + // Type: Boolean + // + // Valid values: True | False + TransferLock *bool `type:"boolean"` +} + +// String returns the string representation +func (s DomainSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DomainSummary) GoString() string { + return s.String() +} + +type EnableDomainAutoRenewInput struct { + _ struct{} `type:"structure"` + + DomainName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s EnableDomainAutoRenewInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnableDomainAutoRenewInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *EnableDomainAutoRenewInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "EnableDomainAutoRenewInput"} + if s.DomainName == nil { + invalidParams.Add(request.NewErrParamRequired("DomainName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type EnableDomainAutoRenewOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s EnableDomainAutoRenewOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnableDomainAutoRenewOutput) GoString() string { + return s.String() +} + +// The EnableDomainTransferLock request includes the following element. +type EnableDomainTransferLockInput struct { + _ struct{} `type:"structure"` + + // The name of a domain. + // + // Type: String + // + // Default: None + // + // Constraints: The domain name can contain only the letters a through z, the + // numbers 0 through 9, and hyphen (-). Internationalized Domain Names are not + // supported. + // + // Required: Yes + DomainName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s EnableDomainTransferLockInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnableDomainTransferLockInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *EnableDomainTransferLockInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "EnableDomainTransferLockInput"} + if s.DomainName == nil { + invalidParams.Add(request.NewErrParamRequired("DomainName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The EnableDomainTransferLock response includes the following elements. +type EnableDomainTransferLockOutput struct { + _ struct{} `type:"structure"` + + // Identifier for tracking the progress of the request. To use this ID to query + // the operation status, use GetOperationDetail. + // + // Type: String + // + // Default: None + // + // Constraints: Maximum 255 characters. + OperationId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s EnableDomainTransferLockOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnableDomainTransferLockOutput) GoString() string { + return s.String() +} + +// ExtraParam includes the following elements. +type ExtraParam struct { + _ struct{} `type:"structure"` + + // Name of the additional parameter required by the top-level domain. + // + // Type: String + // + // Default: None + // + // Valid values: DUNS_NUMBER | BRAND_NUMBER | BIRTH_DEPARTMENT | BIRTH_DATE_IN_YYYY_MM_DD + // | BIRTH_COUNTRY | BIRTH_CITY | DOCUMENT_NUMBER | AU_ID_NUMBER | AU_ID_TYPE + // | CA_LEGAL_TYPE | CA_BUSINESS_ENTITY_TYPE |ES_IDENTIFICATION | ES_IDENTIFICATION_TYPE + // | ES_LEGAL_FORM | FI_BUSINESS_NUMBER | FI_ID_NUMBER | IT_PIN | RU_PASSPORT_DATA + // | SE_ID_NUMBER | SG_ID_NUMBER | VAT_NUMBER + // + // Parent: ExtraParams + // + // Required: Yes + Name *string `type:"string" required:"true" enum:"ExtraParamName"` + + // Values corresponding to the additional parameter names required by some top-level + // domains. + // + // Type: String + // + // Default: None + // + // Constraints: Maximum 2048 characters. + // + // Parent: ExtraParams + // + // Required: Yes + Value *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ExtraParam) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ExtraParam) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ExtraParam) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ExtraParam"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Value == nil { + invalidParams.Add(request.NewErrParamRequired("Value")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type GetContactReachabilityStatusInput struct { + _ struct{} `type:"structure"` + + // The name of the domain for which you want to know whether the registrant + // contact has confirmed that the email address is valid. + // + // Type: String + // + // Default: None + // + // Required: Yes + DomainName *string `locationName:"domainName" type:"string"` +} + +// String returns the string representation +func (s GetContactReachabilityStatusInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetContactReachabilityStatusInput) GoString() string { + return s.String() +} + +type GetContactReachabilityStatusOutput struct { + _ struct{} `type:"structure"` + + // The domain name for which you requested the reachability status. + DomainName *string `locationName:"domainName" type:"string"` + + // Whether the registrant contact has responded. PENDING indicates that we sent + // the confirmation email and haven't received a response yet, DONE indicates + // that we sent the email and got confirmation from the registrant contact, + // and EXPIRED indicates that the time limit expired before the registrant contact + // responded. + // + // Type: String + // + // Valid values: PENDING, DONE, EXPIRED + Status *string `locationName:"status" type:"string" enum:"ReachabilityStatus"` +} + +// String returns the string representation +func (s GetContactReachabilityStatusOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetContactReachabilityStatusOutput) GoString() string { + return s.String() +} + +// The GetDomainDetail request includes the following element. +type GetDomainDetailInput struct { + _ struct{} `type:"structure"` + + // The name of a domain. + // + // Type: String + // + // Default: None + // + // Constraints: The domain name can contain only the letters a through z, the + // numbers 0 through 9, and hyphen (-). Internationalized Domain Names are not + // supported. + // + // Required: Yes + DomainName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s GetDomainDetailInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetDomainDetailInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetDomainDetailInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetDomainDetailInput"} + if s.DomainName == nil { + invalidParams.Add(request.NewErrParamRequired("DomainName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The GetDomainDetail response includes the following elements. +type GetDomainDetailOutput struct { + _ struct{} `type:"structure"` + + // Email address to contact to report incorrect contact information for a domain, + // to report that the domain is being used to send spam, to report that someone + // is cybersquatting on a domain name, or report some other type of abuse. + // + // Type: String + AbuseContactEmail *string `type:"string"` + + // Phone number for reporting abuse. + // + // Type: String + AbuseContactPhone *string `type:"string"` + + // Provides details about the domain administrative contact. + // + // Type: Complex + // + // Children: FirstName, MiddleName, LastName, ContactType, OrganizationName, + // AddressLine1, AddressLine2, City, State, CountryCode, ZipCode, PhoneNumber, + // Email, Fax, ExtraParams + AdminContact *ContactDetail `type:"structure" required:"true"` + + // Specifies whether contact information for the admin contact is concealed + // from WHOIS queries. If the value is true, WHOIS ("who is") queries will return + // contact information for our registrar partner, Gandi, instead of the contact + // information that you enter. + // + // Type: Boolean + AdminPrivacy *bool `type:"boolean"` + + // Specifies whether the domain registration is set to renew automatically. + // + // Type: Boolean + AutoRenew *bool `type:"boolean"` + + // The date when the domain was created as found in the response to a WHOIS + // query. The date format is Unix time. + CreationDate *time.Time `type:"timestamp" timestampFormat:"unix"` + + // Reserved for future use. + DnsSec *string `type:"string"` + + // The name of a domain. + // + // Type: String + DomainName *string `type:"string" required:"true"` + + // The date when the registration for the domain is set to expire. The date + // format is Unix time. + ExpirationDate *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The name of the domain. + // + // Type: String + Nameservers []*Nameserver `type:"list" required:"true"` + + // Provides details about the domain registrant. + // + // Type: Complex + // + // Children: FirstName, MiddleName, LastName, ContactType, OrganizationName, + // AddressLine1, AddressLine2, City, State, CountryCode, ZipCode, PhoneNumber, + // Email, Fax, ExtraParams + RegistrantContact *ContactDetail `type:"structure" required:"true"` + + // Specifies whether contact information for the registrant contact is concealed + // from WHOIS queries. If the value is true, WHOIS ("who is") queries will return + // contact information for our registrar partner, Gandi, instead of the contact + // information that you enter. + // + // Type: Boolean + RegistrantPrivacy *bool `type:"boolean"` + + // Name of the registrar of the domain as identified in the registry. Amazon + // Route 53 domains are registered by registrar Gandi. The value is "GANDI SAS". + // + // Type: String + RegistrarName *string `type:"string"` + + // Web address of the registrar. + // + // Type: String + RegistrarUrl *string `type:"string"` + + // Reserved for future use. + RegistryDomainId *string `type:"string"` + + // Reseller of the domain. Domains registered or transferred using Amazon Route + // 53 domains will have "Amazon" as the reseller. + // + // Type: String + Reseller *string `type:"string"` + + // An array of domain name status codes, also known as Extensible Provisioning + // Protocol (EPP) status codes. + // + // ICANN, the organization that maintains a central database of domain names, + // has developed a set of domain name status codes that tell you the status + // of a variety of operations on a domain name, for example, registering a domain + // name, transferring a domain name to another registrar, renewing the registration + // for a domain name, and so on. All registrars use this same set of status + // codes. + // + // For a current list of domain name status codes and an explanation of what + // each code means, go to the ICANN website (https://www.icann.org/) and search + // for epp status codes. (Search on the ICANN website; web searches sometimes + // return an old version of the document.) + // + // Type: Array of String + StatusList []*string `type:"list"` + + // Provides details about the domain technical contact. + // + // Type: Complex + // + // Children: FirstName, MiddleName, LastName, ContactType, OrganizationName, + // AddressLine1, AddressLine2, City, State, CountryCode, ZipCode, PhoneNumber, + // Email, Fax, ExtraParams + TechContact *ContactDetail `type:"structure" required:"true"` + + // Specifies whether contact information for the tech contact is concealed from + // WHOIS queries. If the value is true, WHOIS ("who is") queries will return + // contact information for our registrar partner, Gandi, instead of the contact + // information that you enter. + // + // Type: Boolean + TechPrivacy *bool `type:"boolean"` + + // The last updated date of the domain as found in the response to a WHOIS query. + // The date format is Unix time. + UpdatedDate *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The fully qualified name of the WHOIS server that can answer the WHOIS query + // for the domain. + // + // Type: String + WhoIsServer *string `type:"string"` +} + +// String returns the string representation +func (s GetDomainDetailOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetDomainDetailOutput) GoString() string { + return s.String() +} + +// The GetOperationDetail request includes the following element. +type GetOperationDetailInput struct { + _ struct{} `type:"structure"` + + // The identifier for the operation for which you want to get the status. Amazon + // Route 53 returned the identifier in the response to the original request. + // + // Type: String + // + // Default: None + // + // Required: Yes + OperationId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s GetOperationDetailInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetOperationDetailInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetOperationDetailInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetOperationDetailInput"} + if s.OperationId == nil { + invalidParams.Add(request.NewErrParamRequired("OperationId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The GetOperationDetail response includes the following elements. +type GetOperationDetailOutput struct { + _ struct{} `type:"structure"` + + // The name of a domain. + // + // Type: String + DomainName *string `type:"string"` + + // Detailed information on the status including possible errors. + // + // Type: String + Message *string `type:"string"` + + // The identifier for the operation. + // + // Type: String + OperationId *string `type:"string"` + + // The current status of the requested operation in the system. + // + // Type: String + Status *string `type:"string" enum:"OperationStatus"` + + // The date when the request was submitted. + SubmittedDate *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The type of operation that was requested. + // + // Type: String + Type *string `type:"string" enum:"OperationType"` +} + +// String returns the string representation +func (s GetOperationDetailOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetOperationDetailOutput) GoString() string { + return s.String() +} + +// The ListDomains request includes the following elements. +type ListDomainsInput struct { + _ struct{} `type:"structure"` + + // For an initial request for a list of domains, omit this element. If the number + // of domains that are associated with the current AWS account is greater than + // the value that you specified for MaxItems, you can use Marker to return additional + // domains. Get the value of NextPageMarker from the previous response, and + // submit another request that includes the value of NextPageMarker in the Marker + // element. + // + // Type: String + // + // Default: None + // + // Constraints: The marker must match the value specified in the previous request. + // + // Required: No + Marker *string `type:"string"` + + // Number of domains to be returned. + // + // Type: Integer + // + // Default: 20 + // + // Constraints: A numeral between 1 and 100. + // + // Required: No + MaxItems *int64 `type:"integer"` +} + +// String returns the string representation +func (s ListDomainsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListDomainsInput) GoString() string { + return s.String() +} + +// The ListDomains response includes the following elements. +type ListDomainsOutput struct { + _ struct{} `type:"structure"` + + // A summary of domains. + // + // Type: Complex type containing a list of domain summaries. + // + // Children: AutoRenew, DomainName, Expiry, TransferLock + Domains []*DomainSummary `type:"list" required:"true"` + + // If there are more domains than you specified for MaxItems in the request, + // submit another request and include the value of NextPageMarker in the value + // of Marker. + // + // Type: String + // + // Parent: Operations + NextPageMarker *string `type:"string"` +} + +// String returns the string representation +func (s ListDomainsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListDomainsOutput) GoString() string { + return s.String() +} + +// The ListOperations request includes the following elements. +type ListOperationsInput struct { + _ struct{} `type:"structure"` + + // For an initial request for a list of operations, omit this element. If the + // number of operations that are not yet complete is greater than the value + // that you specified for MaxItems, you can use Marker to return additional + // operations. Get the value of NextPageMarker from the previous response, and + // submit another request that includes the value of NextPageMarker in the Marker + // element. + // + // Type: String + // + // Default: None + // + // Required: No + Marker *string `type:"string"` + + // Number of domains to be returned. + // + // Type: Integer + // + // Default: 20 + // + // Constraints: A value between 1 and 100. + // + // Required: No + MaxItems *int64 `type:"integer"` +} + +// String returns the string representation +func (s ListOperationsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListOperationsInput) GoString() string { + return s.String() +} + +// The ListOperations response includes the following elements. +type ListOperationsOutput struct { + _ struct{} `type:"structure"` + + // If there are more operations than you specified for MaxItems in the request, + // submit another request and include the value of NextPageMarker in the value + // of Marker. + // + // Type: String + // + // Parent: Operations + NextPageMarker *string `type:"string"` + + // Lists summaries of the operations. + // + // Type: Complex type containing a list of operation summaries + // + // Children: OperationId, Status, SubmittedDate, Type + Operations []*OperationSummary `type:"list" required:"true"` +} + +// String returns the string representation +func (s ListOperationsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListOperationsOutput) GoString() string { + return s.String() +} + +// The ListTagsForDomainRequest includes the following elements. +type ListTagsForDomainInput struct { + _ struct{} `type:"structure"` + + // The domain for which you want to get a list of tags. + DomainName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ListTagsForDomainInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTagsForDomainInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListTagsForDomainInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListTagsForDomainInput"} + if s.DomainName == nil { + invalidParams.Add(request.NewErrParamRequired("DomainName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The ListTagsForDomain response includes the following elements. +type ListTagsForDomainOutput struct { + _ struct{} `type:"structure"` + + // A list of the tags that are associated with the specified domain. + // + // Type: A complex type containing a list of tags + // + // Each tag includes the following elements. + // + // Key + // + // The key (name) of a tag. + // + // Type: String + // + // Value + // + // The value of a tag. + // + // Type: String + TagList []*Tag `type:"list" required:"true"` +} + +// String returns the string representation +func (s ListTagsForDomainOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTagsForDomainOutput) GoString() string { + return s.String() +} + +// Nameserver includes the following elements. +type Nameserver struct { + _ struct{} `type:"structure"` + + // Glue IP address of a name server entry. Glue IP addresses are required only + // when the name of the name server is a subdomain of the domain. For example, + // if your domain is example.com and the name server for the domain is ns.example.com, + // you need to specify the IP address for ns.example.com. + // + // Type: List of IP addresses. + // + // Constraints: The list can contain only one IPv4 and one IPv6 address. + // + // Parent: Nameservers + GlueIps []*string `type:"list"` + + // The fully qualified host name of the name server. + // + // Type: String + // + // Constraint: Maximum 255 characterss + // + // Parent: Nameservers + Name *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s Nameserver) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Nameserver) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Nameserver) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Nameserver"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// OperationSummary includes the following elements. +type OperationSummary struct { + _ struct{} `type:"structure"` + + // Identifier returned to track the requested action. + // + // Type: String + OperationId *string `type:"string" required:"true"` + + // The current status of the requested operation in the system. + // + // Type: String + Status *string `type:"string" required:"true" enum:"OperationStatus"` + + // The date when the request was submitted. + SubmittedDate *time.Time `type:"timestamp" timestampFormat:"unix" required:"true"` + + // Type of the action requested. + // + // Type: String + // + // Valid values: REGISTER_DOMAIN | DELETE_DOMAIN | TRANSFER_IN_DOMAIN | UPDATE_DOMAIN_CONTACT + // | UPDATE_NAMESERVER | CHANGE_PRIVACY_PROTECTION | DOMAIN_LOCK + Type *string `type:"string" required:"true" enum:"OperationType"` +} + +// String returns the string representation +func (s OperationSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s OperationSummary) GoString() string { + return s.String() +} + +// The RegisterDomain request includes the following elements. +type RegisterDomainInput struct { + _ struct{} `type:"structure"` + + // Provides detailed contact information. + // + // Type: Complex + // + // Children: FirstName, MiddleName, LastName, ContactType, OrganizationName, + // AddressLine1, AddressLine2, City, State, CountryCode, ZipCode, PhoneNumber, + // Email, Fax, ExtraParams + // + // Required: Yes + AdminContact *ContactDetail `type:"structure" required:"true"` + + // Indicates whether the domain will be automatically renewed (true) or not + // (false). Autorenewal only takes effect after the account is charged. + // + // Type: Boolean + // + // Valid values: true | false + // + // Default: true + // + // Required: No + AutoRenew *bool `type:"boolean"` + + // The name of a domain. + // + // Type: String + // + // Default: None + // + // Constraints: The domain name can contain only the letters a through z, the + // numbers 0 through 9, and hyphen (-). Internationalized Domain Names are not + // supported. + // + // Required: Yes + DomainName *string `type:"string" required:"true"` + + // The number of years the domain will be registered. Domains are registered + // for a minimum of one year. The maximum period depends on the top-level domain. + // + // Type: Integer + // + // Default: 1 + // + // Valid values: Integer from 1 to 10 + // + // Required: Yes + DurationInYears *int64 `min:"1" type:"integer" required:"true"` + + // Reserved for future use. + IdnLangCode *string `type:"string"` + + // Whether you want to conceal contact information from WHOIS queries. If you + // specify true, WHOIS ("who is") queries will return contact information for + // our registrar partner, Gandi, instead of the contact information that you + // enter. + // + // Type: Boolean + // + // Default: true + // + // Valid values: true | false + // + // Required: No + PrivacyProtectAdminContact *bool `type:"boolean"` + + // Whether you want to conceal contact information from WHOIS queries. If you + // specify true, WHOIS ("who is") queries will return contact information for + // our registrar partner, Gandi, instead of the contact information that you + // enter. + // + // Type: Boolean + // + // Default: true + // + // Valid values: true | false + // + // Required: No + PrivacyProtectRegistrantContact *bool `type:"boolean"` + + // Whether you want to conceal contact information from WHOIS queries. If you + // specify true, WHOIS ("who is") queries will return contact information for + // our registrar partner, Gandi, instead of the contact information that you + // enter. + // + // Type: Boolean + // + // Default: true + // + // Valid values: true | false + // + // Required: No + PrivacyProtectTechContact *bool `type:"boolean"` + + // Provides detailed contact information. + // + // Type: Complex + // + // Children: FirstName, MiddleName, LastName, ContactType, OrganizationName, + // AddressLine1, AddressLine2, City, State, CountryCode, ZipCode, PhoneNumber, + // Email, Fax, ExtraParams + // + // Required: Yes + RegistrantContact *ContactDetail `type:"structure" required:"true"` + + // Provides detailed contact information. + // + // Type: Complex + // + // Children: FirstName, MiddleName, LastName, ContactType, OrganizationName, + // AddressLine1, AddressLine2, City, State, CountryCode, ZipCode, PhoneNumber, + // Email, Fax, ExtraParams + // + // Required: Yes + TechContact *ContactDetail `type:"structure" required:"true"` +} + +// String returns the string representation +func (s RegisterDomainInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RegisterDomainInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RegisterDomainInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RegisterDomainInput"} + if s.AdminContact == nil { + invalidParams.Add(request.NewErrParamRequired("AdminContact")) + } + if s.DomainName == nil { + invalidParams.Add(request.NewErrParamRequired("DomainName")) + } + if s.DurationInYears == nil { + invalidParams.Add(request.NewErrParamRequired("DurationInYears")) + } + if s.DurationInYears != nil && *s.DurationInYears < 1 { + invalidParams.Add(request.NewErrParamMinValue("DurationInYears", 1)) + } + if s.RegistrantContact == nil { + invalidParams.Add(request.NewErrParamRequired("RegistrantContact")) + } + if s.TechContact == nil { + invalidParams.Add(request.NewErrParamRequired("TechContact")) + } + if s.AdminContact != nil { + if err := s.AdminContact.Validate(); err != nil { + invalidParams.AddNested("AdminContact", err.(request.ErrInvalidParams)) + } + } + if s.RegistrantContact != nil { + if err := s.RegistrantContact.Validate(); err != nil { + invalidParams.AddNested("RegistrantContact", err.(request.ErrInvalidParams)) + } + } + if s.TechContact != nil { + if err := s.TechContact.Validate(); err != nil { + invalidParams.AddNested("TechContact", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The RegisterDomain response includes the following element. +type RegisterDomainOutput struct { + _ struct{} `type:"structure"` + + // Identifier for tracking the progress of the request. To use this ID to query + // the operation status, use GetOperationDetail. + // + // Type: String + // + // Default: None + // + // Constraints: Maximum 255 characters. + OperationId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s RegisterDomainOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RegisterDomainOutput) GoString() string { + return s.String() +} + +type ResendContactReachabilityEmailInput struct { + _ struct{} `type:"structure"` + + // The name of the domain for which you want Amazon Route 53 to resend a confirmation + // email to the registrant contact. + // + // Type: String + // + // Default: None + // + // Required: Yes + DomainName *string `locationName:"domainName" type:"string"` +} + +// String returns the string representation +func (s ResendContactReachabilityEmailInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResendContactReachabilityEmailInput) GoString() string { + return s.String() +} + +type ResendContactReachabilityEmailOutput struct { + _ struct{} `type:"structure"` + + // The domain name for which you requested a confirmation email. + DomainName *string `locationName:"domainName" type:"string"` + + // The email address for the registrant contact at the time that we sent the + // verification email. + EmailAddress *string `locationName:"emailAddress" type:"string"` + + // True if the email address for the registrant contact has already been verified, + // and false otherwise. If the email address has already been verified, we don't + // send another confirmation email. + IsAlreadyVerified *bool `locationName:"isAlreadyVerified" type:"boolean"` +} + +// String returns the string representation +func (s ResendContactReachabilityEmailOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResendContactReachabilityEmailOutput) GoString() string { + return s.String() +} + +// The RetrieveDomainAuthCode request includes the following element. +type RetrieveDomainAuthCodeInput struct { + _ struct{} `type:"structure"` + + // The name of a domain. + // + // Type: String + // + // Default: None + // + // Constraints: The domain name can contain only the letters a through z, the + // numbers 0 through 9, and hyphen (-). Internationalized Domain Names are not + // supported. + // + // Required: Yes + DomainName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s RetrieveDomainAuthCodeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RetrieveDomainAuthCodeInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RetrieveDomainAuthCodeInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RetrieveDomainAuthCodeInput"} + if s.DomainName == nil { + invalidParams.Add(request.NewErrParamRequired("DomainName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The RetrieveDomainAuthCode response includes the following element. +type RetrieveDomainAuthCodeOutput struct { + _ struct{} `type:"structure"` + + // The authorization code for the domain. + // + // Type: String + AuthCode *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s RetrieveDomainAuthCodeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RetrieveDomainAuthCodeOutput) GoString() string { + return s.String() +} + +// Each tag includes the following elements. +type Tag struct { + _ struct{} `type:"structure"` + + // The key (name) of a tag. + // + // Type: String + // + // Default: None + // + // Valid values: A-Z, a-z, 0-9, space, ".:/=+\-@" + // + // Constraints: Each key can be 1-128 characters long. + // + // Required: Yes + Key *string `type:"string"` + + // The value of a tag. + // + // Type: String + // + // Default: None + // + // Valid values: A-Z, a-z, 0-9, space, ".:/=+\-@" + // + // Constraints: Each value can be 0-256 characters long. + // + // Required: Yes + Value *string `type:"string"` +} + +// String returns the string representation +func (s Tag) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Tag) GoString() string { + return s.String() +} + +// The TransferDomain request includes the following elements. +type TransferDomainInput struct { + _ struct{} `type:"structure"` + + // Provides detailed contact information. + // + // Type: Complex + // + // Children: FirstName, MiddleName, LastName, ContactType, OrganizationName, + // AddressLine1, AddressLine2, City, State, CountryCode, ZipCode, PhoneNumber, + // Email, Fax, ExtraParams + // + // Required: Yes + AdminContact *ContactDetail `type:"structure" required:"true"` + + // The authorization code for the domain. You get this value from the current + // registrar. + // + // Type: String + // + // Required: Yes + AuthCode *string `type:"string"` + + // Indicates whether the domain will be automatically renewed (true) or not + // (false). Autorenewal only takes effect after the account is charged. + // + // Type: Boolean + // + // Valid values: true | false + // + // Default: true + // + // Required: No + AutoRenew *bool `type:"boolean"` + + // The name of a domain. + // + // Type: String + // + // Default: None + // + // Constraints: The domain name can contain only the letters a through z, the + // numbers 0 through 9, and hyphen (-). Internationalized Domain Names are not + // supported. + // + // Required: Yes + DomainName *string `type:"string" required:"true"` + + // The number of years the domain will be registered. Domains are registered + // for a minimum of one year. The maximum period depends on the top-level domain. + // + // Type: Integer + // + // Default: 1 + // + // Valid values: Integer from 1 to 10 + // + // Required: Yes + DurationInYears *int64 `min:"1" type:"integer" required:"true"` + + // Reserved for future use. + IdnLangCode *string `type:"string"` + + // Contains details for the host and glue IP addresses. + // + // Type: Complex + // + // Children: GlueIps, Name + // + // Required: No + Nameservers []*Nameserver `type:"list"` + + // Whether you want to conceal contact information from WHOIS queries. If you + // specify true, WHOIS ("who is") queries will return contact information for + // our registrar partner, Gandi, instead of the contact information that you + // enter. + // + // Type: Boolean + // + // Default: true + // + // Valid values: true | false + // + // Required: No + PrivacyProtectAdminContact *bool `type:"boolean"` + + // Whether you want to conceal contact information from WHOIS queries. If you + // specify true, WHOIS ("who is") queries will return contact information for + // our registrar partner, Gandi, instead of the contact information that you + // enter. + // + // Type: Boolean + // + // Default: true + // + // Valid values: true | false + // + // Required: No + PrivacyProtectRegistrantContact *bool `type:"boolean"` + + // Whether you want to conceal contact information from WHOIS queries. If you + // specify true, WHOIS ("who is") queries will return contact information for + // our registrar partner, Gandi, instead of the contact information that you + // enter. + // + // Type: Boolean + // + // Default: true + // + // Valid values: true | false + // + // Required: No + PrivacyProtectTechContact *bool `type:"boolean"` + + // Provides detailed contact information. + // + // Type: Complex + // + // Children: FirstName, MiddleName, LastName, ContactType, OrganizationName, + // AddressLine1, AddressLine2, City, State, CountryCode, ZipCode, PhoneNumber, + // Email, Fax, ExtraParams + // + // Required: Yes + RegistrantContact *ContactDetail `type:"structure" required:"true"` + + // Provides detailed contact information. + // + // Type: Complex + // + // Children: FirstName, MiddleName, LastName, ContactType, OrganizationName, + // AddressLine1, AddressLine2, City, State, CountryCode, ZipCode, PhoneNumber, + // Email, Fax, ExtraParams + // + // Required: Yes + TechContact *ContactDetail `type:"structure" required:"true"` +} + +// String returns the string representation +func (s TransferDomainInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TransferDomainInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TransferDomainInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TransferDomainInput"} + if s.AdminContact == nil { + invalidParams.Add(request.NewErrParamRequired("AdminContact")) + } + if s.DomainName == nil { + invalidParams.Add(request.NewErrParamRequired("DomainName")) + } + if s.DurationInYears == nil { + invalidParams.Add(request.NewErrParamRequired("DurationInYears")) + } + if s.DurationInYears != nil && *s.DurationInYears < 1 { + invalidParams.Add(request.NewErrParamMinValue("DurationInYears", 1)) + } + if s.RegistrantContact == nil { + invalidParams.Add(request.NewErrParamRequired("RegistrantContact")) + } + if s.TechContact == nil { + invalidParams.Add(request.NewErrParamRequired("TechContact")) + } + if s.AdminContact != nil { + if err := s.AdminContact.Validate(); err != nil { + invalidParams.AddNested("AdminContact", err.(request.ErrInvalidParams)) + } + } + if s.Nameservers != nil { + for i, v := range s.Nameservers { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Nameservers", i), err.(request.ErrInvalidParams)) + } + } + } + if s.RegistrantContact != nil { + if err := s.RegistrantContact.Validate(); err != nil { + invalidParams.AddNested("RegistrantContact", err.(request.ErrInvalidParams)) + } + } + if s.TechContact != nil { + if err := s.TechContact.Validate(); err != nil { + invalidParams.AddNested("TechContact", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The TranserDomain response includes the following element. +type TransferDomainOutput struct { + _ struct{} `type:"structure"` + + // Identifier for tracking the progress of the request. To use this ID to query + // the operation status, use GetOperationDetail. + // + // Type: String + // + // Default: None + // + // Constraints: Maximum 255 characters. + OperationId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s TransferDomainOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TransferDomainOutput) GoString() string { + return s.String() +} + +// The UpdateDomainContact request includes the following elements. +type UpdateDomainContactInput struct { + _ struct{} `type:"structure"` + + // Provides detailed contact information. + // + // Type: Complex + // + // Children: FirstName, MiddleName, LastName, ContactType, OrganizationName, + // AddressLine1, AddressLine2, City, State, CountryCode, ZipCode, PhoneNumber, + // Email, Fax, ExtraParams + // + // Required: Yes + AdminContact *ContactDetail `type:"structure"` + + // The name of a domain. + // + // Type: String + // + // Default: None + // + // Constraints: The domain name can contain only the letters a through z, the + // numbers 0 through 9, and hyphen (-). Internationalized Domain Names are not + // supported. + // + // Required: Yes + DomainName *string `type:"string" required:"true"` + + // Provides detailed contact information. + // + // Type: Complex + // + // Children: FirstName, MiddleName, LastName, ContactType, OrganizationName, + // AddressLine1, AddressLine2, City, State, CountryCode, ZipCode, PhoneNumber, + // Email, Fax, ExtraParams + // + // Required: Yes + RegistrantContact *ContactDetail `type:"structure"` + + // Provides detailed contact information. + // + // Type: Complex + // + // Children: FirstName, MiddleName, LastName, ContactType, OrganizationName, + // AddressLine1, AddressLine2, City, State, CountryCode, ZipCode, PhoneNumber, + // Email, Fax, ExtraParams + // + // Required: Yes + TechContact *ContactDetail `type:"structure"` +} + +// String returns the string representation +func (s UpdateDomainContactInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateDomainContactInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateDomainContactInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateDomainContactInput"} + if s.DomainName == nil { + invalidParams.Add(request.NewErrParamRequired("DomainName")) + } + if s.AdminContact != nil { + if err := s.AdminContact.Validate(); err != nil { + invalidParams.AddNested("AdminContact", err.(request.ErrInvalidParams)) + } + } + if s.RegistrantContact != nil { + if err := s.RegistrantContact.Validate(); err != nil { + invalidParams.AddNested("RegistrantContact", err.(request.ErrInvalidParams)) + } + } + if s.TechContact != nil { + if err := s.TechContact.Validate(); err != nil { + invalidParams.AddNested("TechContact", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The UpdateDomainContact response includes the following element. +type UpdateDomainContactOutput struct { + _ struct{} `type:"structure"` + + // Identifier for tracking the progress of the request. To use this ID to query + // the operation status, use GetOperationDetail. + // + // Type: String + // + // Default: None + // + // Constraints: Maximum 255 characters. + OperationId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateDomainContactOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateDomainContactOutput) GoString() string { + return s.String() +} + +// The UpdateDomainContactPrivacy request includes the following elements. +type UpdateDomainContactPrivacyInput struct { + _ struct{} `type:"structure"` + + // Whether you want to conceal contact information from WHOIS queries. If you + // specify true, WHOIS ("who is") queries will return contact information for + // our registrar partner, Gandi, instead of the contact information that you + // enter. + // + // Type: Boolean + // + // Default: None + // + // Valid values: true | false + // + // Required: No + AdminPrivacy *bool `type:"boolean"` + + // The name of a domain. + // + // Type: String + // + // Default: None + // + // Constraints: The domain name can contain only the letters a through z, the + // numbers 0 through 9, and hyphen (-). Internationalized Domain Names are not + // supported. + // + // Required: Yes + DomainName *string `type:"string" required:"true"` + + // Whether you want to conceal contact information from WHOIS queries. If you + // specify true, WHOIS ("who is") queries will return contact information for + // our registrar partner, Gandi, instead of the contact information that you + // enter. + // + // Type: Boolean + // + // Default: None + // + // Valid values: true | false + // + // Required: No + RegistrantPrivacy *bool `type:"boolean"` + + // Whether you want to conceal contact information from WHOIS queries. If you + // specify true, WHOIS ("who is") queries will return contact information for + // our registrar partner, Gandi, instead of the contact information that you + // enter. + // + // Type: Boolean + // + // Default: None + // + // Valid values: true | false + // + // Required: No + TechPrivacy *bool `type:"boolean"` +} + +// String returns the string representation +func (s UpdateDomainContactPrivacyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateDomainContactPrivacyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateDomainContactPrivacyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateDomainContactPrivacyInput"} + if s.DomainName == nil { + invalidParams.Add(request.NewErrParamRequired("DomainName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The UpdateDomainContactPrivacy response includes the following element. +type UpdateDomainContactPrivacyOutput struct { + _ struct{} `type:"structure"` + + // Identifier for tracking the progress of the request. To use this ID to query + // the operation status, use GetOperationDetail. + // + // Type: String + // + // Default: None + // + // Constraints: Maximum 255 characters. + OperationId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateDomainContactPrivacyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateDomainContactPrivacyOutput) GoString() string { + return s.String() +} + +// The UpdateDomainNameserver request includes the following elements. +type UpdateDomainNameserversInput struct { + _ struct{} `type:"structure"` + + // The name of a domain. + // + // Type: String + // + // Default: None + // + // Constraints: The domain name can contain only the letters a through z, the + // numbers 0 through 9, and hyphen (-). Internationalized Domain Names are not + // supported. + // + // Required: Yes + DomainName *string `type:"string" required:"true"` + + // The authorization key for .fi domains + FIAuthKey *string `type:"string"` + + // A list of new name servers for the domain. + // + // Type: Complex + // + // Children: Name, GlueIps + // + // Required: Yes + Nameservers []*Nameserver `type:"list" required:"true"` +} + +// String returns the string representation +func (s UpdateDomainNameserversInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateDomainNameserversInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateDomainNameserversInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateDomainNameserversInput"} + if s.DomainName == nil { + invalidParams.Add(request.NewErrParamRequired("DomainName")) + } + if s.Nameservers == nil { + invalidParams.Add(request.NewErrParamRequired("Nameservers")) + } + if s.Nameservers != nil { + for i, v := range s.Nameservers { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Nameservers", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The UpdateDomainNameservers response includes the following element. +type UpdateDomainNameserversOutput struct { + _ struct{} `type:"structure"` + + // Identifier for tracking the progress of the request. To use this ID to query + // the operation status, use GetOperationDetail. + // + // Type: String + // + // Default: None + // + // Constraints: Maximum 255 characters. + OperationId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateDomainNameserversOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateDomainNameserversOutput) GoString() string { + return s.String() +} + +// The UpdateTagsForDomainRequest includes the following elements. +type UpdateTagsForDomainInput struct { + _ struct{} `type:"structure"` + + // The domain for which you want to add or update tags. + // + // The name of a domain. + // + // Type: String + // + // Default: None + // + // Constraints: The domain name can contain only the letters a through z, the + // numbers 0 through 9, and hyphen (-). Hyphens are allowed only when they're + // surrounded by letters, numbers, or other hyphens. You can't specify a hyphen + // at the beginning or end of a label. To specify an Internationalized Domain + // Name, you must convert the name to Punycode. + // + // Required: Yes + DomainName *string `type:"string" required:"true"` + + // A list of the tag keys and values that you want to add or update. If you + // specify a key that already exists, the corresponding value will be replaced. + // + // Type: A complex type containing a list of tags + // + // Default: None + // + // Required: No + // + // '> Each tag includes the following elements: + // + // Key + // + // The key (name) of a tag. + // + // Type: String + // + // Default: None + // + // Valid values: Unicode characters including alphanumeric, space, and ".:/=+\-@" + // + // Constraints: Each key can be 1-128 characters long. + // + // Required: Yes + // + // Value + // + // The value of a tag. + // + // Type: String + // + // Default: None + // + // Valid values: Unicode characters including alphanumeric, space, and ".:/=+\-@" + // + // Constraints: Each value can be 0-256 characters long. + // + // Required: Yes + TagsToUpdate []*Tag `type:"list"` +} + +// String returns the string representation +func (s UpdateTagsForDomainInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateTagsForDomainInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateTagsForDomainInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateTagsForDomainInput"} + if s.DomainName == nil { + invalidParams.Add(request.NewErrParamRequired("DomainName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type UpdateTagsForDomainOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UpdateTagsForDomainOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateTagsForDomainOutput) GoString() string { + return s.String() +} + +const ( + // @enum ContactType + ContactTypePerson = "PERSON" + // @enum ContactType + ContactTypeCompany = "COMPANY" + // @enum ContactType + ContactTypeAssociation = "ASSOCIATION" + // @enum ContactType + ContactTypePublicBody = "PUBLIC_BODY" + // @enum ContactType + ContactTypeReseller = "RESELLER" +) + +const ( + // @enum CountryCode + CountryCodeAd = "AD" + // @enum CountryCode + CountryCodeAe = "AE" + // @enum CountryCode + CountryCodeAf = "AF" + // @enum CountryCode + CountryCodeAg = "AG" + // @enum CountryCode + CountryCodeAi = "AI" + // @enum CountryCode + CountryCodeAl = "AL" + // @enum CountryCode + CountryCodeAm = "AM" + // @enum CountryCode + CountryCodeAn = "AN" + // @enum CountryCode + CountryCodeAo = "AO" + // @enum CountryCode + CountryCodeAq = "AQ" + // @enum CountryCode + CountryCodeAr = "AR" + // @enum CountryCode + CountryCodeAs = "AS" + // @enum CountryCode + CountryCodeAt = "AT" + // @enum CountryCode + CountryCodeAu = "AU" + // @enum CountryCode + CountryCodeAw = "AW" + // @enum CountryCode + CountryCodeAz = "AZ" + // @enum CountryCode + CountryCodeBa = "BA" + // @enum CountryCode + CountryCodeBb = "BB" + // @enum CountryCode + CountryCodeBd = "BD" + // @enum CountryCode + CountryCodeBe = "BE" + // @enum CountryCode + CountryCodeBf = "BF" + // @enum CountryCode + CountryCodeBg = "BG" + // @enum CountryCode + CountryCodeBh = "BH" + // @enum CountryCode + CountryCodeBi = "BI" + // @enum CountryCode + CountryCodeBj = "BJ" + // @enum CountryCode + CountryCodeBl = "BL" + // @enum CountryCode + CountryCodeBm = "BM" + // @enum CountryCode + CountryCodeBn = "BN" + // @enum CountryCode + CountryCodeBo = "BO" + // @enum CountryCode + CountryCodeBr = "BR" + // @enum CountryCode + CountryCodeBs = "BS" + // @enum CountryCode + CountryCodeBt = "BT" + // @enum CountryCode + CountryCodeBw = "BW" + // @enum CountryCode + CountryCodeBy = "BY" + // @enum CountryCode + CountryCodeBz = "BZ" + // @enum CountryCode + CountryCodeCa = "CA" + // @enum CountryCode + CountryCodeCc = "CC" + // @enum CountryCode + CountryCodeCd = "CD" + // @enum CountryCode + CountryCodeCf = "CF" + // @enum CountryCode + CountryCodeCg = "CG" + // @enum CountryCode + CountryCodeCh = "CH" + // @enum CountryCode + CountryCodeCi = "CI" + // @enum CountryCode + CountryCodeCk = "CK" + // @enum CountryCode + CountryCodeCl = "CL" + // @enum CountryCode + CountryCodeCm = "CM" + // @enum CountryCode + CountryCodeCn = "CN" + // @enum CountryCode + CountryCodeCo = "CO" + // @enum CountryCode + CountryCodeCr = "CR" + // @enum CountryCode + CountryCodeCu = "CU" + // @enum CountryCode + CountryCodeCv = "CV" + // @enum CountryCode + CountryCodeCx = "CX" + // @enum CountryCode + CountryCodeCy = "CY" + // @enum CountryCode + CountryCodeCz = "CZ" + // @enum CountryCode + CountryCodeDe = "DE" + // @enum CountryCode + CountryCodeDj = "DJ" + // @enum CountryCode + CountryCodeDk = "DK" + // @enum CountryCode + CountryCodeDm = "DM" + // @enum CountryCode + CountryCodeDo = "DO" + // @enum CountryCode + CountryCodeDz = "DZ" + // @enum CountryCode + CountryCodeEc = "EC" + // @enum CountryCode + CountryCodeEe = "EE" + // @enum CountryCode + CountryCodeEg = "EG" + // @enum CountryCode + CountryCodeEr = "ER" + // @enum CountryCode + CountryCodeEs = "ES" + // @enum CountryCode + CountryCodeEt = "ET" + // @enum CountryCode + CountryCodeFi = "FI" + // @enum CountryCode + CountryCodeFj = "FJ" + // @enum CountryCode + CountryCodeFk = "FK" + // @enum CountryCode + CountryCodeFm = "FM" + // @enum CountryCode + CountryCodeFo = "FO" + // @enum CountryCode + CountryCodeFr = "FR" + // @enum CountryCode + CountryCodeGa = "GA" + // @enum CountryCode + CountryCodeGb = "GB" + // @enum CountryCode + CountryCodeGd = "GD" + // @enum CountryCode + CountryCodeGe = "GE" + // @enum CountryCode + CountryCodeGh = "GH" + // @enum CountryCode + CountryCodeGi = "GI" + // @enum CountryCode + CountryCodeGl = "GL" + // @enum CountryCode + CountryCodeGm = "GM" + // @enum CountryCode + CountryCodeGn = "GN" + // @enum CountryCode + CountryCodeGq = "GQ" + // @enum CountryCode + CountryCodeGr = "GR" + // @enum CountryCode + CountryCodeGt = "GT" + // @enum CountryCode + CountryCodeGu = "GU" + // @enum CountryCode + CountryCodeGw = "GW" + // @enum CountryCode + CountryCodeGy = "GY" + // @enum CountryCode + CountryCodeHk = "HK" + // @enum CountryCode + CountryCodeHn = "HN" + // @enum CountryCode + CountryCodeHr = "HR" + // @enum CountryCode + CountryCodeHt = "HT" + // @enum CountryCode + CountryCodeHu = "HU" + // @enum CountryCode + CountryCodeId = "ID" + // @enum CountryCode + CountryCodeIe = "IE" + // @enum CountryCode + CountryCodeIl = "IL" + // @enum CountryCode + CountryCodeIm = "IM" + // @enum CountryCode + CountryCodeIn = "IN" + // @enum CountryCode + CountryCodeIq = "IQ" + // @enum CountryCode + CountryCodeIr = "IR" + // @enum CountryCode + CountryCodeIs = "IS" + // @enum CountryCode + CountryCodeIt = "IT" + // @enum CountryCode + CountryCodeJm = "JM" + // @enum CountryCode + CountryCodeJo = "JO" + // @enum CountryCode + CountryCodeJp = "JP" + // @enum CountryCode + CountryCodeKe = "KE" + // @enum CountryCode + CountryCodeKg = "KG" + // @enum CountryCode + CountryCodeKh = "KH" + // @enum CountryCode + CountryCodeKi = "KI" + // @enum CountryCode + CountryCodeKm = "KM" + // @enum CountryCode + CountryCodeKn = "KN" + // @enum CountryCode + CountryCodeKp = "KP" + // @enum CountryCode + CountryCodeKr = "KR" + // @enum CountryCode + CountryCodeKw = "KW" + // @enum CountryCode + CountryCodeKy = "KY" + // @enum CountryCode + CountryCodeKz = "KZ" + // @enum CountryCode + CountryCodeLa = "LA" + // @enum CountryCode + CountryCodeLb = "LB" + // @enum CountryCode + CountryCodeLc = "LC" + // @enum CountryCode + CountryCodeLi = "LI" + // @enum CountryCode + CountryCodeLk = "LK" + // @enum CountryCode + CountryCodeLr = "LR" + // @enum CountryCode + CountryCodeLs = "LS" + // @enum CountryCode + CountryCodeLt = "LT" + // @enum CountryCode + CountryCodeLu = "LU" + // @enum CountryCode + CountryCodeLv = "LV" + // @enum CountryCode + CountryCodeLy = "LY" + // @enum CountryCode + CountryCodeMa = "MA" + // @enum CountryCode + CountryCodeMc = "MC" + // @enum CountryCode + CountryCodeMd = "MD" + // @enum CountryCode + CountryCodeMe = "ME" + // @enum CountryCode + CountryCodeMf = "MF" + // @enum CountryCode + CountryCodeMg = "MG" + // @enum CountryCode + CountryCodeMh = "MH" + // @enum CountryCode + CountryCodeMk = "MK" + // @enum CountryCode + CountryCodeMl = "ML" + // @enum CountryCode + CountryCodeMm = "MM" + // @enum CountryCode + CountryCodeMn = "MN" + // @enum CountryCode + CountryCodeMo = "MO" + // @enum CountryCode + CountryCodeMp = "MP" + // @enum CountryCode + CountryCodeMr = "MR" + // @enum CountryCode + CountryCodeMs = "MS" + // @enum CountryCode + CountryCodeMt = "MT" + // @enum CountryCode + CountryCodeMu = "MU" + // @enum CountryCode + CountryCodeMv = "MV" + // @enum CountryCode + CountryCodeMw = "MW" + // @enum CountryCode + CountryCodeMx = "MX" + // @enum CountryCode + CountryCodeMy = "MY" + // @enum CountryCode + CountryCodeMz = "MZ" + // @enum CountryCode + CountryCodeNa = "NA" + // @enum CountryCode + CountryCodeNc = "NC" + // @enum CountryCode + CountryCodeNe = "NE" + // @enum CountryCode + CountryCodeNg = "NG" + // @enum CountryCode + CountryCodeNi = "NI" + // @enum CountryCode + CountryCodeNl = "NL" + // @enum CountryCode + CountryCodeNo = "NO" + // @enum CountryCode + CountryCodeNp = "NP" + // @enum CountryCode + CountryCodeNr = "NR" + // @enum CountryCode + CountryCodeNu = "NU" + // @enum CountryCode + CountryCodeNz = "NZ" + // @enum CountryCode + CountryCodeOm = "OM" + // @enum CountryCode + CountryCodePa = "PA" + // @enum CountryCode + CountryCodePe = "PE" + // @enum CountryCode + CountryCodePf = "PF" + // @enum CountryCode + CountryCodePg = "PG" + // @enum CountryCode + CountryCodePh = "PH" + // @enum CountryCode + CountryCodePk = "PK" + // @enum CountryCode + CountryCodePl = "PL" + // @enum CountryCode + CountryCodePm = "PM" + // @enum CountryCode + CountryCodePn = "PN" + // @enum CountryCode + CountryCodePr = "PR" + // @enum CountryCode + CountryCodePt = "PT" + // @enum CountryCode + CountryCodePw = "PW" + // @enum CountryCode + CountryCodePy = "PY" + // @enum CountryCode + CountryCodeQa = "QA" + // @enum CountryCode + CountryCodeRo = "RO" + // @enum CountryCode + CountryCodeRs = "RS" + // @enum CountryCode + CountryCodeRu = "RU" + // @enum CountryCode + CountryCodeRw = "RW" + // @enum CountryCode + CountryCodeSa = "SA" + // @enum CountryCode + CountryCodeSb = "SB" + // @enum CountryCode + CountryCodeSc = "SC" + // @enum CountryCode + CountryCodeSd = "SD" + // @enum CountryCode + CountryCodeSe = "SE" + // @enum CountryCode + CountryCodeSg = "SG" + // @enum CountryCode + CountryCodeSh = "SH" + // @enum CountryCode + CountryCodeSi = "SI" + // @enum CountryCode + CountryCodeSk = "SK" + // @enum CountryCode + CountryCodeSl = "SL" + // @enum CountryCode + CountryCodeSm = "SM" + // @enum CountryCode + CountryCodeSn = "SN" + // @enum CountryCode + CountryCodeSo = "SO" + // @enum CountryCode + CountryCodeSr = "SR" + // @enum CountryCode + CountryCodeSt = "ST" + // @enum CountryCode + CountryCodeSv = "SV" + // @enum CountryCode + CountryCodeSy = "SY" + // @enum CountryCode + CountryCodeSz = "SZ" + // @enum CountryCode + CountryCodeTc = "TC" + // @enum CountryCode + CountryCodeTd = "TD" + // @enum CountryCode + CountryCodeTg = "TG" + // @enum CountryCode + CountryCodeTh = "TH" + // @enum CountryCode + CountryCodeTj = "TJ" + // @enum CountryCode + CountryCodeTk = "TK" + // @enum CountryCode + CountryCodeTl = "TL" + // @enum CountryCode + CountryCodeTm = "TM" + // @enum CountryCode + CountryCodeTn = "TN" + // @enum CountryCode + CountryCodeTo = "TO" + // @enum CountryCode + CountryCodeTr = "TR" + // @enum CountryCode + CountryCodeTt = "TT" + // @enum CountryCode + CountryCodeTv = "TV" + // @enum CountryCode + CountryCodeTw = "TW" + // @enum CountryCode + CountryCodeTz = "TZ" + // @enum CountryCode + CountryCodeUa = "UA" + // @enum CountryCode + CountryCodeUg = "UG" + // @enum CountryCode + CountryCodeUs = "US" + // @enum CountryCode + CountryCodeUy = "UY" + // @enum CountryCode + CountryCodeUz = "UZ" + // @enum CountryCode + CountryCodeVa = "VA" + // @enum CountryCode + CountryCodeVc = "VC" + // @enum CountryCode + CountryCodeVe = "VE" + // @enum CountryCode + CountryCodeVg = "VG" + // @enum CountryCode + CountryCodeVi = "VI" + // @enum CountryCode + CountryCodeVn = "VN" + // @enum CountryCode + CountryCodeVu = "VU" + // @enum CountryCode + CountryCodeWf = "WF" + // @enum CountryCode + CountryCodeWs = "WS" + // @enum CountryCode + CountryCodeYe = "YE" + // @enum CountryCode + CountryCodeYt = "YT" + // @enum CountryCode + CountryCodeZa = "ZA" + // @enum CountryCode + CountryCodeZm = "ZM" + // @enum CountryCode + CountryCodeZw = "ZW" +) + +const ( + // @enum DomainAvailability + DomainAvailabilityAvailable = "AVAILABLE" + // @enum DomainAvailability + DomainAvailabilityAvailableReserved = "AVAILABLE_RESERVED" + // @enum DomainAvailability + DomainAvailabilityAvailablePreorder = "AVAILABLE_PREORDER" + // @enum DomainAvailability + DomainAvailabilityUnavailable = "UNAVAILABLE" + // @enum DomainAvailability + DomainAvailabilityUnavailablePremium = "UNAVAILABLE_PREMIUM" + // @enum DomainAvailability + DomainAvailabilityUnavailableRestricted = "UNAVAILABLE_RESTRICTED" + // @enum DomainAvailability + DomainAvailabilityReserved = "RESERVED" + // @enum DomainAvailability + DomainAvailabilityDontKnow = "DONT_KNOW" +) + +const ( + // @enum ExtraParamName + ExtraParamNameDunsNumber = "DUNS_NUMBER" + // @enum ExtraParamName + ExtraParamNameBrandNumber = "BRAND_NUMBER" + // @enum ExtraParamName + ExtraParamNameBirthDepartment = "BIRTH_DEPARTMENT" + // @enum ExtraParamName + ExtraParamNameBirthDateInYyyyMmDd = "BIRTH_DATE_IN_YYYY_MM_DD" + // @enum ExtraParamName + ExtraParamNameBirthCountry = "BIRTH_COUNTRY" + // @enum ExtraParamName + ExtraParamNameBirthCity = "BIRTH_CITY" + // @enum ExtraParamName + ExtraParamNameDocumentNumber = "DOCUMENT_NUMBER" + // @enum ExtraParamName + ExtraParamNameAuIdNumber = "AU_ID_NUMBER" + // @enum ExtraParamName + ExtraParamNameAuIdType = "AU_ID_TYPE" + // @enum ExtraParamName + ExtraParamNameCaLegalType = "CA_LEGAL_TYPE" + // @enum ExtraParamName + ExtraParamNameCaBusinessEntityType = "CA_BUSINESS_ENTITY_TYPE" + // @enum ExtraParamName + ExtraParamNameEsIdentification = "ES_IDENTIFICATION" + // @enum ExtraParamName + ExtraParamNameEsIdentificationType = "ES_IDENTIFICATION_TYPE" + // @enum ExtraParamName + ExtraParamNameEsLegalForm = "ES_LEGAL_FORM" + // @enum ExtraParamName + ExtraParamNameFiBusinessNumber = "FI_BUSINESS_NUMBER" + // @enum ExtraParamName + ExtraParamNameFiIdNumber = "FI_ID_NUMBER" + // @enum ExtraParamName + ExtraParamNameItPin = "IT_PIN" + // @enum ExtraParamName + ExtraParamNameRuPassportData = "RU_PASSPORT_DATA" + // @enum ExtraParamName + ExtraParamNameSeIdNumber = "SE_ID_NUMBER" + // @enum ExtraParamName + ExtraParamNameSgIdNumber = "SG_ID_NUMBER" + // @enum ExtraParamName + ExtraParamNameVatNumber = "VAT_NUMBER" +) + +const ( + // @enum OperationStatus + OperationStatusSubmitted = "SUBMITTED" + // @enum OperationStatus + OperationStatusInProgress = "IN_PROGRESS" + // @enum OperationStatus + OperationStatusError = "ERROR" + // @enum OperationStatus + OperationStatusSuccessful = "SUCCESSFUL" + // @enum OperationStatus + OperationStatusFailed = "FAILED" +) + +const ( + // @enum OperationType + OperationTypeRegisterDomain = "REGISTER_DOMAIN" + // @enum OperationType + OperationTypeDeleteDomain = "DELETE_DOMAIN" + // @enum OperationType + OperationTypeTransferInDomain = "TRANSFER_IN_DOMAIN" + // @enum OperationType + OperationTypeUpdateDomainContact = "UPDATE_DOMAIN_CONTACT" + // @enum OperationType + OperationTypeUpdateNameserver = "UPDATE_NAMESERVER" + // @enum OperationType + OperationTypeChangePrivacyProtection = "CHANGE_PRIVACY_PROTECTION" + // @enum OperationType + OperationTypeDomainLock = "DOMAIN_LOCK" +) + +const ( + // @enum ReachabilityStatus + ReachabilityStatusPending = "PENDING" + // @enum ReachabilityStatus + ReachabilityStatusDone = "DONE" + // @enum ReachabilityStatus + ReachabilityStatusExpired = "EXPIRED" +) diff --git a/vendor/github.com/aws/aws-sdk-go/service/route53domains/examples_test.go b/vendor/github.com/aws/aws-sdk-go/service/route53domains/examples_test.go new file mode 100644 index 000000000..5e8459103 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/route53domains/examples_test.go @@ -0,0 +1,645 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package route53domains_test + +import ( + "bytes" + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/route53domains" +) + +var _ time.Duration +var _ bytes.Buffer + +func ExampleRoute53Domains_CheckDomainAvailability() { + svc := route53domains.New(session.New()) + + params := &route53domains.CheckDomainAvailabilityInput{ + DomainName: aws.String("DomainName"), // Required + IdnLangCode: aws.String("LangCode"), + } + resp, err := svc.CheckDomainAvailability(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53Domains_DeleteTagsForDomain() { + svc := route53domains.New(session.New()) + + params := &route53domains.DeleteTagsForDomainInput{ + DomainName: aws.String("DomainName"), // Required + TagsToDelete: []*string{ // Required + aws.String("TagKey"), // Required + // More values... + }, + } + resp, err := svc.DeleteTagsForDomain(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53Domains_DisableDomainAutoRenew() { + svc := route53domains.New(session.New()) + + params := &route53domains.DisableDomainAutoRenewInput{ + DomainName: aws.String("DomainName"), // Required + } + resp, err := svc.DisableDomainAutoRenew(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53Domains_DisableDomainTransferLock() { + svc := route53domains.New(session.New()) + + params := &route53domains.DisableDomainTransferLockInput{ + DomainName: aws.String("DomainName"), // Required + } + resp, err := svc.DisableDomainTransferLock(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53Domains_EnableDomainAutoRenew() { + svc := route53domains.New(session.New()) + + params := &route53domains.EnableDomainAutoRenewInput{ + DomainName: aws.String("DomainName"), // Required + } + resp, err := svc.EnableDomainAutoRenew(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53Domains_EnableDomainTransferLock() { + svc := route53domains.New(session.New()) + + params := &route53domains.EnableDomainTransferLockInput{ + DomainName: aws.String("DomainName"), // Required + } + resp, err := svc.EnableDomainTransferLock(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53Domains_GetContactReachabilityStatus() { + svc := route53domains.New(session.New()) + + params := &route53domains.GetContactReachabilityStatusInput{ + DomainName: aws.String("DomainName"), + } + resp, err := svc.GetContactReachabilityStatus(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53Domains_GetDomainDetail() { + svc := route53domains.New(session.New()) + + params := &route53domains.GetDomainDetailInput{ + DomainName: aws.String("DomainName"), // Required + } + resp, err := svc.GetDomainDetail(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53Domains_GetOperationDetail() { + svc := route53domains.New(session.New()) + + params := &route53domains.GetOperationDetailInput{ + OperationId: aws.String("OperationId"), // Required + } + resp, err := svc.GetOperationDetail(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53Domains_ListDomains() { + svc := route53domains.New(session.New()) + + params := &route53domains.ListDomainsInput{ + Marker: aws.String("PageMarker"), + MaxItems: aws.Int64(1), + } + resp, err := svc.ListDomains(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53Domains_ListOperations() { + svc := route53domains.New(session.New()) + + params := &route53domains.ListOperationsInput{ + Marker: aws.String("PageMarker"), + MaxItems: aws.Int64(1), + } + resp, err := svc.ListOperations(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53Domains_ListTagsForDomain() { + svc := route53domains.New(session.New()) + + params := &route53domains.ListTagsForDomainInput{ + DomainName: aws.String("DomainName"), // Required + } + resp, err := svc.ListTagsForDomain(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53Domains_RegisterDomain() { + svc := route53domains.New(session.New()) + + params := &route53domains.RegisterDomainInput{ + AdminContact: &route53domains.ContactDetail{ // Required + AddressLine1: aws.String("AddressLine"), + AddressLine2: aws.String("AddressLine"), + City: aws.String("City"), + ContactType: aws.String("ContactType"), + CountryCode: aws.String("CountryCode"), + Email: aws.String("Email"), + ExtraParams: []*route53domains.ExtraParam{ + { // Required + Name: aws.String("ExtraParamName"), // Required + Value: aws.String("ExtraParamValue"), // Required + }, + // More values... + }, + Fax: aws.String("ContactNumber"), + FirstName: aws.String("ContactName"), + LastName: aws.String("ContactName"), + OrganizationName: aws.String("ContactName"), + PhoneNumber: aws.String("ContactNumber"), + State: aws.String("State"), + ZipCode: aws.String("ZipCode"), + }, + DomainName: aws.String("DomainName"), // Required + DurationInYears: aws.Int64(1), // Required + RegistrantContact: &route53domains.ContactDetail{ // Required + AddressLine1: aws.String("AddressLine"), + AddressLine2: aws.String("AddressLine"), + City: aws.String("City"), + ContactType: aws.String("ContactType"), + CountryCode: aws.String("CountryCode"), + Email: aws.String("Email"), + ExtraParams: []*route53domains.ExtraParam{ + { // Required + Name: aws.String("ExtraParamName"), // Required + Value: aws.String("ExtraParamValue"), // Required + }, + // More values... + }, + Fax: aws.String("ContactNumber"), + FirstName: aws.String("ContactName"), + LastName: aws.String("ContactName"), + OrganizationName: aws.String("ContactName"), + PhoneNumber: aws.String("ContactNumber"), + State: aws.String("State"), + ZipCode: aws.String("ZipCode"), + }, + TechContact: &route53domains.ContactDetail{ // Required + AddressLine1: aws.String("AddressLine"), + AddressLine2: aws.String("AddressLine"), + City: aws.String("City"), + ContactType: aws.String("ContactType"), + CountryCode: aws.String("CountryCode"), + Email: aws.String("Email"), + ExtraParams: []*route53domains.ExtraParam{ + { // Required + Name: aws.String("ExtraParamName"), // Required + Value: aws.String("ExtraParamValue"), // Required + }, + // More values... + }, + Fax: aws.String("ContactNumber"), + FirstName: aws.String("ContactName"), + LastName: aws.String("ContactName"), + OrganizationName: aws.String("ContactName"), + PhoneNumber: aws.String("ContactNumber"), + State: aws.String("State"), + ZipCode: aws.String("ZipCode"), + }, + AutoRenew: aws.Bool(true), + IdnLangCode: aws.String("LangCode"), + PrivacyProtectAdminContact: aws.Bool(true), + PrivacyProtectRegistrantContact: aws.Bool(true), + PrivacyProtectTechContact: aws.Bool(true), + } + resp, err := svc.RegisterDomain(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53Domains_ResendContactReachabilityEmail() { + svc := route53domains.New(session.New()) + + params := &route53domains.ResendContactReachabilityEmailInput{ + DomainName: aws.String("DomainName"), + } + resp, err := svc.ResendContactReachabilityEmail(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53Domains_RetrieveDomainAuthCode() { + svc := route53domains.New(session.New()) + + params := &route53domains.RetrieveDomainAuthCodeInput{ + DomainName: aws.String("DomainName"), // Required + } + resp, err := svc.RetrieveDomainAuthCode(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53Domains_TransferDomain() { + svc := route53domains.New(session.New()) + + params := &route53domains.TransferDomainInput{ + AdminContact: &route53domains.ContactDetail{ // Required + AddressLine1: aws.String("AddressLine"), + AddressLine2: aws.String("AddressLine"), + City: aws.String("City"), + ContactType: aws.String("ContactType"), + CountryCode: aws.String("CountryCode"), + Email: aws.String("Email"), + ExtraParams: []*route53domains.ExtraParam{ + { // Required + Name: aws.String("ExtraParamName"), // Required + Value: aws.String("ExtraParamValue"), // Required + }, + // More values... + }, + Fax: aws.String("ContactNumber"), + FirstName: aws.String("ContactName"), + LastName: aws.String("ContactName"), + OrganizationName: aws.String("ContactName"), + PhoneNumber: aws.String("ContactNumber"), + State: aws.String("State"), + ZipCode: aws.String("ZipCode"), + }, + DomainName: aws.String("DomainName"), // Required + DurationInYears: aws.Int64(1), // Required + RegistrantContact: &route53domains.ContactDetail{ // Required + AddressLine1: aws.String("AddressLine"), + AddressLine2: aws.String("AddressLine"), + City: aws.String("City"), + ContactType: aws.String("ContactType"), + CountryCode: aws.String("CountryCode"), + Email: aws.String("Email"), + ExtraParams: []*route53domains.ExtraParam{ + { // Required + Name: aws.String("ExtraParamName"), // Required + Value: aws.String("ExtraParamValue"), // Required + }, + // More values... + }, + Fax: aws.String("ContactNumber"), + FirstName: aws.String("ContactName"), + LastName: aws.String("ContactName"), + OrganizationName: aws.String("ContactName"), + PhoneNumber: aws.String("ContactNumber"), + State: aws.String("State"), + ZipCode: aws.String("ZipCode"), + }, + TechContact: &route53domains.ContactDetail{ // Required + AddressLine1: aws.String("AddressLine"), + AddressLine2: aws.String("AddressLine"), + City: aws.String("City"), + ContactType: aws.String("ContactType"), + CountryCode: aws.String("CountryCode"), + Email: aws.String("Email"), + ExtraParams: []*route53domains.ExtraParam{ + { // Required + Name: aws.String("ExtraParamName"), // Required + Value: aws.String("ExtraParamValue"), // Required + }, + // More values... + }, + Fax: aws.String("ContactNumber"), + FirstName: aws.String("ContactName"), + LastName: aws.String("ContactName"), + OrganizationName: aws.String("ContactName"), + PhoneNumber: aws.String("ContactNumber"), + State: aws.String("State"), + ZipCode: aws.String("ZipCode"), + }, + AuthCode: aws.String("DomainAuthCode"), + AutoRenew: aws.Bool(true), + IdnLangCode: aws.String("LangCode"), + Nameservers: []*route53domains.Nameserver{ + { // Required + Name: aws.String("HostName"), // Required + GlueIps: []*string{ + aws.String("GlueIp"), // Required + // More values... + }, + }, + // More values... + }, + PrivacyProtectAdminContact: aws.Bool(true), + PrivacyProtectRegistrantContact: aws.Bool(true), + PrivacyProtectTechContact: aws.Bool(true), + } + resp, err := svc.TransferDomain(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53Domains_UpdateDomainContact() { + svc := route53domains.New(session.New()) + + params := &route53domains.UpdateDomainContactInput{ + DomainName: aws.String("DomainName"), // Required + AdminContact: &route53domains.ContactDetail{ + AddressLine1: aws.String("AddressLine"), + AddressLine2: aws.String("AddressLine"), + City: aws.String("City"), + ContactType: aws.String("ContactType"), + CountryCode: aws.String("CountryCode"), + Email: aws.String("Email"), + ExtraParams: []*route53domains.ExtraParam{ + { // Required + Name: aws.String("ExtraParamName"), // Required + Value: aws.String("ExtraParamValue"), // Required + }, + // More values... + }, + Fax: aws.String("ContactNumber"), + FirstName: aws.String("ContactName"), + LastName: aws.String("ContactName"), + OrganizationName: aws.String("ContactName"), + PhoneNumber: aws.String("ContactNumber"), + State: aws.String("State"), + ZipCode: aws.String("ZipCode"), + }, + RegistrantContact: &route53domains.ContactDetail{ + AddressLine1: aws.String("AddressLine"), + AddressLine2: aws.String("AddressLine"), + City: aws.String("City"), + ContactType: aws.String("ContactType"), + CountryCode: aws.String("CountryCode"), + Email: aws.String("Email"), + ExtraParams: []*route53domains.ExtraParam{ + { // Required + Name: aws.String("ExtraParamName"), // Required + Value: aws.String("ExtraParamValue"), // Required + }, + // More values... + }, + Fax: aws.String("ContactNumber"), + FirstName: aws.String("ContactName"), + LastName: aws.String("ContactName"), + OrganizationName: aws.String("ContactName"), + PhoneNumber: aws.String("ContactNumber"), + State: aws.String("State"), + ZipCode: aws.String("ZipCode"), + }, + TechContact: &route53domains.ContactDetail{ + AddressLine1: aws.String("AddressLine"), + AddressLine2: aws.String("AddressLine"), + City: aws.String("City"), + ContactType: aws.String("ContactType"), + CountryCode: aws.String("CountryCode"), + Email: aws.String("Email"), + ExtraParams: []*route53domains.ExtraParam{ + { // Required + Name: aws.String("ExtraParamName"), // Required + Value: aws.String("ExtraParamValue"), // Required + }, + // More values... + }, + Fax: aws.String("ContactNumber"), + FirstName: aws.String("ContactName"), + LastName: aws.String("ContactName"), + OrganizationName: aws.String("ContactName"), + PhoneNumber: aws.String("ContactNumber"), + State: aws.String("State"), + ZipCode: aws.String("ZipCode"), + }, + } + resp, err := svc.UpdateDomainContact(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53Domains_UpdateDomainContactPrivacy() { + svc := route53domains.New(session.New()) + + params := &route53domains.UpdateDomainContactPrivacyInput{ + DomainName: aws.String("DomainName"), // Required + AdminPrivacy: aws.Bool(true), + RegistrantPrivacy: aws.Bool(true), + TechPrivacy: aws.Bool(true), + } + resp, err := svc.UpdateDomainContactPrivacy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53Domains_UpdateDomainNameservers() { + svc := route53domains.New(session.New()) + + params := &route53domains.UpdateDomainNameserversInput{ + DomainName: aws.String("DomainName"), // Required + Nameservers: []*route53domains.Nameserver{ // Required + { // Required + Name: aws.String("HostName"), // Required + GlueIps: []*string{ + aws.String("GlueIp"), // Required + // More values... + }, + }, + // More values... + }, + FIAuthKey: aws.String("FIAuthKey"), + } + resp, err := svc.UpdateDomainNameservers(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53Domains_UpdateTagsForDomain() { + svc := route53domains.New(session.New()) + + params := &route53domains.UpdateTagsForDomainInput{ + DomainName: aws.String("DomainName"), // Required + TagsToUpdate: []*route53domains.Tag{ + { // Required + Key: aws.String("TagKey"), + Value: aws.String("TagValue"), + }, + // More values... + }, + } + resp, err := svc.UpdateTagsForDomain(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/route53domains/route53domainsiface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/route53domains/route53domainsiface/interface.go new file mode 100644 index 000000000..11792aa14 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/route53domains/route53domainsiface/interface.go @@ -0,0 +1,98 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package route53domainsiface provides an interface for the Amazon Route 53 Domains. +package route53domainsiface + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/route53domains" +) + +// Route53DomainsAPI is the interface type for route53domains.Route53Domains. +type Route53DomainsAPI interface { + CheckDomainAvailabilityRequest(*route53domains.CheckDomainAvailabilityInput) (*request.Request, *route53domains.CheckDomainAvailabilityOutput) + + CheckDomainAvailability(*route53domains.CheckDomainAvailabilityInput) (*route53domains.CheckDomainAvailabilityOutput, error) + + DeleteTagsForDomainRequest(*route53domains.DeleteTagsForDomainInput) (*request.Request, *route53domains.DeleteTagsForDomainOutput) + + DeleteTagsForDomain(*route53domains.DeleteTagsForDomainInput) (*route53domains.DeleteTagsForDomainOutput, error) + + DisableDomainAutoRenewRequest(*route53domains.DisableDomainAutoRenewInput) (*request.Request, *route53domains.DisableDomainAutoRenewOutput) + + DisableDomainAutoRenew(*route53domains.DisableDomainAutoRenewInput) (*route53domains.DisableDomainAutoRenewOutput, error) + + DisableDomainTransferLockRequest(*route53domains.DisableDomainTransferLockInput) (*request.Request, *route53domains.DisableDomainTransferLockOutput) + + DisableDomainTransferLock(*route53domains.DisableDomainTransferLockInput) (*route53domains.DisableDomainTransferLockOutput, error) + + EnableDomainAutoRenewRequest(*route53domains.EnableDomainAutoRenewInput) (*request.Request, *route53domains.EnableDomainAutoRenewOutput) + + EnableDomainAutoRenew(*route53domains.EnableDomainAutoRenewInput) (*route53domains.EnableDomainAutoRenewOutput, error) + + EnableDomainTransferLockRequest(*route53domains.EnableDomainTransferLockInput) (*request.Request, *route53domains.EnableDomainTransferLockOutput) + + EnableDomainTransferLock(*route53domains.EnableDomainTransferLockInput) (*route53domains.EnableDomainTransferLockOutput, error) + + GetContactReachabilityStatusRequest(*route53domains.GetContactReachabilityStatusInput) (*request.Request, *route53domains.GetContactReachabilityStatusOutput) + + GetContactReachabilityStatus(*route53domains.GetContactReachabilityStatusInput) (*route53domains.GetContactReachabilityStatusOutput, error) + + GetDomainDetailRequest(*route53domains.GetDomainDetailInput) (*request.Request, *route53domains.GetDomainDetailOutput) + + GetDomainDetail(*route53domains.GetDomainDetailInput) (*route53domains.GetDomainDetailOutput, error) + + GetOperationDetailRequest(*route53domains.GetOperationDetailInput) (*request.Request, *route53domains.GetOperationDetailOutput) + + GetOperationDetail(*route53domains.GetOperationDetailInput) (*route53domains.GetOperationDetailOutput, error) + + ListDomainsRequest(*route53domains.ListDomainsInput) (*request.Request, *route53domains.ListDomainsOutput) + + ListDomains(*route53domains.ListDomainsInput) (*route53domains.ListDomainsOutput, error) + + ListDomainsPages(*route53domains.ListDomainsInput, func(*route53domains.ListDomainsOutput, bool) bool) error + + ListOperationsRequest(*route53domains.ListOperationsInput) (*request.Request, *route53domains.ListOperationsOutput) + + ListOperations(*route53domains.ListOperationsInput) (*route53domains.ListOperationsOutput, error) + + ListOperationsPages(*route53domains.ListOperationsInput, func(*route53domains.ListOperationsOutput, bool) bool) error + + ListTagsForDomainRequest(*route53domains.ListTagsForDomainInput) (*request.Request, *route53domains.ListTagsForDomainOutput) + + ListTagsForDomain(*route53domains.ListTagsForDomainInput) (*route53domains.ListTagsForDomainOutput, error) + + RegisterDomainRequest(*route53domains.RegisterDomainInput) (*request.Request, *route53domains.RegisterDomainOutput) + + RegisterDomain(*route53domains.RegisterDomainInput) (*route53domains.RegisterDomainOutput, error) + + ResendContactReachabilityEmailRequest(*route53domains.ResendContactReachabilityEmailInput) (*request.Request, *route53domains.ResendContactReachabilityEmailOutput) + + ResendContactReachabilityEmail(*route53domains.ResendContactReachabilityEmailInput) (*route53domains.ResendContactReachabilityEmailOutput, error) + + RetrieveDomainAuthCodeRequest(*route53domains.RetrieveDomainAuthCodeInput) (*request.Request, *route53domains.RetrieveDomainAuthCodeOutput) + + RetrieveDomainAuthCode(*route53domains.RetrieveDomainAuthCodeInput) (*route53domains.RetrieveDomainAuthCodeOutput, error) + + TransferDomainRequest(*route53domains.TransferDomainInput) (*request.Request, *route53domains.TransferDomainOutput) + + TransferDomain(*route53domains.TransferDomainInput) (*route53domains.TransferDomainOutput, error) + + UpdateDomainContactRequest(*route53domains.UpdateDomainContactInput) (*request.Request, *route53domains.UpdateDomainContactOutput) + + UpdateDomainContact(*route53domains.UpdateDomainContactInput) (*route53domains.UpdateDomainContactOutput, error) + + UpdateDomainContactPrivacyRequest(*route53domains.UpdateDomainContactPrivacyInput) (*request.Request, *route53domains.UpdateDomainContactPrivacyOutput) + + UpdateDomainContactPrivacy(*route53domains.UpdateDomainContactPrivacyInput) (*route53domains.UpdateDomainContactPrivacyOutput, error) + + UpdateDomainNameserversRequest(*route53domains.UpdateDomainNameserversInput) (*request.Request, *route53domains.UpdateDomainNameserversOutput) + + UpdateDomainNameservers(*route53domains.UpdateDomainNameserversInput) (*route53domains.UpdateDomainNameserversOutput, error) + + UpdateTagsForDomainRequest(*route53domains.UpdateTagsForDomainInput) (*request.Request, *route53domains.UpdateTagsForDomainOutput) + + UpdateTagsForDomain(*route53domains.UpdateTagsForDomainInput) (*route53domains.UpdateTagsForDomainOutput, error) +} + +var _ Route53DomainsAPI = (*route53domains.Route53Domains)(nil) diff --git a/vendor/github.com/aws/aws-sdk-go/service/route53domains/service.go b/vendor/github.com/aws/aws-sdk-go/service/route53domains/service.go new file mode 100644 index 000000000..f119641ea --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/route53domains/service.go @@ -0,0 +1,88 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package route53domains + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" +) + +// Route53Domains is a client for Amazon Route 53 Domains. +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type Route53Domains struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "route53domains" + +// New creates a new instance of the Route53Domains client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a Route53Domains client from just a session. +// svc := route53domains.New(mySession) +// +// // Create a Route53Domains client with additional configuration +// svc := route53domains.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *Route53Domains { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *Route53Domains { + svc := &Route53Domains{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-05-15", + JSONVersion: "1.1", + TargetPrefix: "Route53Domains_v20140515", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(jsonrpc.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a Route53Domains operation and runs any +// custom request initialization. +func (c *Route53Domains) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/api.go b/vendor/github.com/aws/aws-sdk-go/service/s3/api.go new file mode 100644 index 000000000..5132954f3 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/api.go @@ -0,0 +1,9527 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package s3 provides a client for Amazon Simple Storage Service. +package s3 + +import ( + "fmt" + "io" + "time" + + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/restxml" +) + +const opAbortMultipartUpload = "AbortMultipartUpload" + +// AbortMultipartUploadRequest generates a "aws/request.Request" representing the +// client's request for the AbortMultipartUpload operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the AbortMultipartUpload method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the AbortMultipartUploadRequest method. +// req, resp := client.AbortMultipartUploadRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *S3) AbortMultipartUploadRequest(input *AbortMultipartUploadInput) (req *request.Request, output *AbortMultipartUploadOutput) { + op := &request.Operation{ + Name: opAbortMultipartUpload, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}/{Key+}", + } + + if input == nil { + input = &AbortMultipartUploadInput{} + } + + req = c.newRequest(op, input, output) + output = &AbortMultipartUploadOutput{} + req.Data = output + return +} + +// Aborts a multipart upload. +// +// To verify that all parts have been removed, so you don't get charged for +// the part storage, you should call the List Parts operation and ensure the +// parts list is empty. +func (c *S3) AbortMultipartUpload(input *AbortMultipartUploadInput) (*AbortMultipartUploadOutput, error) { + req, out := c.AbortMultipartUploadRequest(input) + err := req.Send() + return out, err +} + +const opCompleteMultipartUpload = "CompleteMultipartUpload" + +// CompleteMultipartUploadRequest generates a "aws/request.Request" representing the +// client's request for the CompleteMultipartUpload operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CompleteMultipartUpload method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CompleteMultipartUploadRequest method. +// req, resp := client.CompleteMultipartUploadRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *S3) CompleteMultipartUploadRequest(input *CompleteMultipartUploadInput) (req *request.Request, output *CompleteMultipartUploadOutput) { + op := &request.Operation{ + Name: opCompleteMultipartUpload, + HTTPMethod: "POST", + HTTPPath: "/{Bucket}/{Key+}", + } + + if input == nil { + input = &CompleteMultipartUploadInput{} + } + + req = c.newRequest(op, input, output) + output = &CompleteMultipartUploadOutput{} + req.Data = output + return +} + +// Completes a multipart upload by assembling previously uploaded parts. +func (c *S3) CompleteMultipartUpload(input *CompleteMultipartUploadInput) (*CompleteMultipartUploadOutput, error) { + req, out := c.CompleteMultipartUploadRequest(input) + err := req.Send() + return out, err +} + +const opCopyObject = "CopyObject" + +// CopyObjectRequest generates a "aws/request.Request" representing the +// client's request for the CopyObject operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CopyObject method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CopyObjectRequest method. +// req, resp := client.CopyObjectRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *S3) CopyObjectRequest(input *CopyObjectInput) (req *request.Request, output *CopyObjectOutput) { + op := &request.Operation{ + Name: opCopyObject, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}/{Key+}", + } + + if input == nil { + input = &CopyObjectInput{} + } + + req = c.newRequest(op, input, output) + output = &CopyObjectOutput{} + req.Data = output + return +} + +// Creates a copy of an object that is already stored in Amazon S3. +func (c *S3) CopyObject(input *CopyObjectInput) (*CopyObjectOutput, error) { + req, out := c.CopyObjectRequest(input) + err := req.Send() + return out, err +} + +const opCreateBucket = "CreateBucket" + +// CreateBucketRequest generates a "aws/request.Request" representing the +// client's request for the CreateBucket operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateBucket method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateBucketRequest method. +// req, resp := client.CreateBucketRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *S3) CreateBucketRequest(input *CreateBucketInput) (req *request.Request, output *CreateBucketOutput) { + op := &request.Operation{ + Name: opCreateBucket, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}", + } + + if input == nil { + input = &CreateBucketInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateBucketOutput{} + req.Data = output + return +} + +// Creates a new bucket. +func (c *S3) CreateBucket(input *CreateBucketInput) (*CreateBucketOutput, error) { + req, out := c.CreateBucketRequest(input) + err := req.Send() + return out, err +} + +const opCreateMultipartUpload = "CreateMultipartUpload" + +// CreateMultipartUploadRequest generates a "aws/request.Request" representing the +// client's request for the CreateMultipartUpload operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateMultipartUpload method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateMultipartUploadRequest method. +// req, resp := client.CreateMultipartUploadRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *S3) CreateMultipartUploadRequest(input *CreateMultipartUploadInput) (req *request.Request, output *CreateMultipartUploadOutput) { + op := &request.Operation{ + Name: opCreateMultipartUpload, + HTTPMethod: "POST", + HTTPPath: "/{Bucket}/{Key+}?uploads", + } + + if input == nil { + input = &CreateMultipartUploadInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateMultipartUploadOutput{} + req.Data = output + return +} + +// Initiates a multipart upload and returns an upload ID. +// +// Note: After you initiate multipart upload and upload one or more parts, you +// must either complete or abort multipart upload in order to stop getting charged +// for storage of the uploaded parts. Only after you either complete or abort +// multipart upload, Amazon S3 frees up the parts storage and stops charging +// you for the parts storage. +func (c *S3) CreateMultipartUpload(input *CreateMultipartUploadInput) (*CreateMultipartUploadOutput, error) { + req, out := c.CreateMultipartUploadRequest(input) + err := req.Send() + return out, err +} + +const opDeleteBucket = "DeleteBucket" + +// DeleteBucketRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBucket operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteBucket method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteBucketRequest method. +// req, resp := client.DeleteBucketRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *S3) DeleteBucketRequest(input *DeleteBucketInput) (req *request.Request, output *DeleteBucketOutput) { + op := &request.Operation{ + Name: opDeleteBucket, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}", + } + + if input == nil { + input = &DeleteBucketInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteBucketOutput{} + req.Data = output + return +} + +// Deletes the bucket. All objects (including all object versions and Delete +// Markers) in the bucket must be deleted before the bucket itself can be deleted. +func (c *S3) DeleteBucket(input *DeleteBucketInput) (*DeleteBucketOutput, error) { + req, out := c.DeleteBucketRequest(input) + err := req.Send() + return out, err +} + +const opDeleteBucketCors = "DeleteBucketCors" + +// DeleteBucketCorsRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBucketCors operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteBucketCors method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteBucketCorsRequest method. +// req, resp := client.DeleteBucketCorsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *S3) DeleteBucketCorsRequest(input *DeleteBucketCorsInput) (req *request.Request, output *DeleteBucketCorsOutput) { + op := &request.Operation{ + Name: opDeleteBucketCors, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}?cors", + } + + if input == nil { + input = &DeleteBucketCorsInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteBucketCorsOutput{} + req.Data = output + return +} + +// Deletes the cors configuration information set for the bucket. +func (c *S3) DeleteBucketCors(input *DeleteBucketCorsInput) (*DeleteBucketCorsOutput, error) { + req, out := c.DeleteBucketCorsRequest(input) + err := req.Send() + return out, err +} + +const opDeleteBucketLifecycle = "DeleteBucketLifecycle" + +// DeleteBucketLifecycleRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBucketLifecycle operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteBucketLifecycle method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteBucketLifecycleRequest method. +// req, resp := client.DeleteBucketLifecycleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *S3) DeleteBucketLifecycleRequest(input *DeleteBucketLifecycleInput) (req *request.Request, output *DeleteBucketLifecycleOutput) { + op := &request.Operation{ + Name: opDeleteBucketLifecycle, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}?lifecycle", + } + + if input == nil { + input = &DeleteBucketLifecycleInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteBucketLifecycleOutput{} + req.Data = output + return +} + +// Deletes the lifecycle configuration from the bucket. +func (c *S3) DeleteBucketLifecycle(input *DeleteBucketLifecycleInput) (*DeleteBucketLifecycleOutput, error) { + req, out := c.DeleteBucketLifecycleRequest(input) + err := req.Send() + return out, err +} + +const opDeleteBucketPolicy = "DeleteBucketPolicy" + +// DeleteBucketPolicyRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBucketPolicy operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteBucketPolicy method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteBucketPolicyRequest method. +// req, resp := client.DeleteBucketPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *S3) DeleteBucketPolicyRequest(input *DeleteBucketPolicyInput) (req *request.Request, output *DeleteBucketPolicyOutput) { + op := &request.Operation{ + Name: opDeleteBucketPolicy, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}?policy", + } + + if input == nil { + input = &DeleteBucketPolicyInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteBucketPolicyOutput{} + req.Data = output + return +} + +// Deletes the policy from the bucket. +func (c *S3) DeleteBucketPolicy(input *DeleteBucketPolicyInput) (*DeleteBucketPolicyOutput, error) { + req, out := c.DeleteBucketPolicyRequest(input) + err := req.Send() + return out, err +} + +const opDeleteBucketReplication = "DeleteBucketReplication" + +// DeleteBucketReplicationRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBucketReplication operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteBucketReplication method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteBucketReplicationRequest method. +// req, resp := client.DeleteBucketReplicationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *S3) DeleteBucketReplicationRequest(input *DeleteBucketReplicationInput) (req *request.Request, output *DeleteBucketReplicationOutput) { + op := &request.Operation{ + Name: opDeleteBucketReplication, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}?replication", + } + + if input == nil { + input = &DeleteBucketReplicationInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteBucketReplicationOutput{} + req.Data = output + return +} + +// Deletes the replication configuration from the bucket. +func (c *S3) DeleteBucketReplication(input *DeleteBucketReplicationInput) (*DeleteBucketReplicationOutput, error) { + req, out := c.DeleteBucketReplicationRequest(input) + err := req.Send() + return out, err +} + +const opDeleteBucketTagging = "DeleteBucketTagging" + +// DeleteBucketTaggingRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBucketTagging operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteBucketTagging method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteBucketTaggingRequest method. +// req, resp := client.DeleteBucketTaggingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *S3) DeleteBucketTaggingRequest(input *DeleteBucketTaggingInput) (req *request.Request, output *DeleteBucketTaggingOutput) { + op := &request.Operation{ + Name: opDeleteBucketTagging, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}?tagging", + } + + if input == nil { + input = &DeleteBucketTaggingInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteBucketTaggingOutput{} + req.Data = output + return +} + +// Deletes the tags from the bucket. +func (c *S3) DeleteBucketTagging(input *DeleteBucketTaggingInput) (*DeleteBucketTaggingOutput, error) { + req, out := c.DeleteBucketTaggingRequest(input) + err := req.Send() + return out, err +} + +const opDeleteBucketWebsite = "DeleteBucketWebsite" + +// DeleteBucketWebsiteRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBucketWebsite operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteBucketWebsite method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteBucketWebsiteRequest method. +// req, resp := client.DeleteBucketWebsiteRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *S3) DeleteBucketWebsiteRequest(input *DeleteBucketWebsiteInput) (req *request.Request, output *DeleteBucketWebsiteOutput) { + op := &request.Operation{ + Name: opDeleteBucketWebsite, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}?website", + } + + if input == nil { + input = &DeleteBucketWebsiteInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteBucketWebsiteOutput{} + req.Data = output + return +} + +// This operation removes the website configuration from the bucket. +func (c *S3) DeleteBucketWebsite(input *DeleteBucketWebsiteInput) (*DeleteBucketWebsiteOutput, error) { + req, out := c.DeleteBucketWebsiteRequest(input) + err := req.Send() + return out, err +} + +const opDeleteObject = "DeleteObject" + +// DeleteObjectRequest generates a "aws/request.Request" representing the +// client's request for the DeleteObject operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteObject method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteObjectRequest method. +// req, resp := client.DeleteObjectRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *S3) DeleteObjectRequest(input *DeleteObjectInput) (req *request.Request, output *DeleteObjectOutput) { + op := &request.Operation{ + Name: opDeleteObject, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}/{Key+}", + } + + if input == nil { + input = &DeleteObjectInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteObjectOutput{} + req.Data = output + return +} + +// Removes the null version (if there is one) of an object and inserts a delete +// marker, which becomes the latest version of the object. If there isn't a +// null version, Amazon S3 does not remove any objects. +func (c *S3) DeleteObject(input *DeleteObjectInput) (*DeleteObjectOutput, error) { + req, out := c.DeleteObjectRequest(input) + err := req.Send() + return out, err +} + +const opDeleteObjects = "DeleteObjects" + +// DeleteObjectsRequest generates a "aws/request.Request" representing the +// client's request for the DeleteObjects operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteObjects method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteObjectsRequest method. +// req, resp := client.DeleteObjectsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *S3) DeleteObjectsRequest(input *DeleteObjectsInput) (req *request.Request, output *DeleteObjectsOutput) { + op := &request.Operation{ + Name: opDeleteObjects, + HTTPMethod: "POST", + HTTPPath: "/{Bucket}?delete", + } + + if input == nil { + input = &DeleteObjectsInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteObjectsOutput{} + req.Data = output + return +} + +// This operation enables you to delete multiple objects from a bucket using +// a single HTTP request. You may specify up to 1000 keys. +func (c *S3) DeleteObjects(input *DeleteObjectsInput) (*DeleteObjectsOutput, error) { + req, out := c.DeleteObjectsRequest(input) + err := req.Send() + return out, err +} + +const opGetBucketAccelerateConfiguration = "GetBucketAccelerateConfiguration" + +// GetBucketAccelerateConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketAccelerateConfiguration operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetBucketAccelerateConfiguration method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetBucketAccelerateConfigurationRequest method. +// req, resp := client.GetBucketAccelerateConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *S3) GetBucketAccelerateConfigurationRequest(input *GetBucketAccelerateConfigurationInput) (req *request.Request, output *GetBucketAccelerateConfigurationOutput) { + op := &request.Operation{ + Name: opGetBucketAccelerateConfiguration, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?accelerate", + } + + if input == nil { + input = &GetBucketAccelerateConfigurationInput{} + } + + req = c.newRequest(op, input, output) + output = &GetBucketAccelerateConfigurationOutput{} + req.Data = output + return +} + +// Returns the accelerate configuration of a bucket. +func (c *S3) GetBucketAccelerateConfiguration(input *GetBucketAccelerateConfigurationInput) (*GetBucketAccelerateConfigurationOutput, error) { + req, out := c.GetBucketAccelerateConfigurationRequest(input) + err := req.Send() + return out, err +} + +const opGetBucketAcl = "GetBucketAcl" + +// GetBucketAclRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketAcl operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetBucketAcl method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetBucketAclRequest method. +// req, resp := client.GetBucketAclRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *S3) GetBucketAclRequest(input *GetBucketAclInput) (req *request.Request, output *GetBucketAclOutput) { + op := &request.Operation{ + Name: opGetBucketAcl, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?acl", + } + + if input == nil { + input = &GetBucketAclInput{} + } + + req = c.newRequest(op, input, output) + output = &GetBucketAclOutput{} + req.Data = output + return +} + +// Gets the access control policy for the bucket. +func (c *S3) GetBucketAcl(input *GetBucketAclInput) (*GetBucketAclOutput, error) { + req, out := c.GetBucketAclRequest(input) + err := req.Send() + return out, err +} + +const opGetBucketCors = "GetBucketCors" + +// GetBucketCorsRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketCors operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetBucketCors method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetBucketCorsRequest method. +// req, resp := client.GetBucketCorsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *S3) GetBucketCorsRequest(input *GetBucketCorsInput) (req *request.Request, output *GetBucketCorsOutput) { + op := &request.Operation{ + Name: opGetBucketCors, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?cors", + } + + if input == nil { + input = &GetBucketCorsInput{} + } + + req = c.newRequest(op, input, output) + output = &GetBucketCorsOutput{} + req.Data = output + return +} + +// Returns the cors configuration for the bucket. +func (c *S3) GetBucketCors(input *GetBucketCorsInput) (*GetBucketCorsOutput, error) { + req, out := c.GetBucketCorsRequest(input) + err := req.Send() + return out, err +} + +const opGetBucketLifecycle = "GetBucketLifecycle" + +// GetBucketLifecycleRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketLifecycle operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetBucketLifecycle method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetBucketLifecycleRequest method. +// req, resp := client.GetBucketLifecycleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *S3) GetBucketLifecycleRequest(input *GetBucketLifecycleInput) (req *request.Request, output *GetBucketLifecycleOutput) { + if c.Client.Config.Logger != nil { + c.Client.Config.Logger.Log("This operation, GetBucketLifecycle, has been deprecated") + } + op := &request.Operation{ + Name: opGetBucketLifecycle, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?lifecycle", + } + + if input == nil { + input = &GetBucketLifecycleInput{} + } + + req = c.newRequest(op, input, output) + output = &GetBucketLifecycleOutput{} + req.Data = output + return +} + +// Deprecated, see the GetBucketLifecycleConfiguration operation. +func (c *S3) GetBucketLifecycle(input *GetBucketLifecycleInput) (*GetBucketLifecycleOutput, error) { + req, out := c.GetBucketLifecycleRequest(input) + err := req.Send() + return out, err +} + +const opGetBucketLifecycleConfiguration = "GetBucketLifecycleConfiguration" + +// GetBucketLifecycleConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketLifecycleConfiguration operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetBucketLifecycleConfiguration method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetBucketLifecycleConfigurationRequest method. +// req, resp := client.GetBucketLifecycleConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *S3) GetBucketLifecycleConfigurationRequest(input *GetBucketLifecycleConfigurationInput) (req *request.Request, output *GetBucketLifecycleConfigurationOutput) { + op := &request.Operation{ + Name: opGetBucketLifecycleConfiguration, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?lifecycle", + } + + if input == nil { + input = &GetBucketLifecycleConfigurationInput{} + } + + req = c.newRequest(op, input, output) + output = &GetBucketLifecycleConfigurationOutput{} + req.Data = output + return +} + +// Returns the lifecycle configuration information set on the bucket. +func (c *S3) GetBucketLifecycleConfiguration(input *GetBucketLifecycleConfigurationInput) (*GetBucketLifecycleConfigurationOutput, error) { + req, out := c.GetBucketLifecycleConfigurationRequest(input) + err := req.Send() + return out, err +} + +const opGetBucketLocation = "GetBucketLocation" + +// GetBucketLocationRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketLocation operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetBucketLocation method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetBucketLocationRequest method. +// req, resp := client.GetBucketLocationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *S3) GetBucketLocationRequest(input *GetBucketLocationInput) (req *request.Request, output *GetBucketLocationOutput) { + op := &request.Operation{ + Name: opGetBucketLocation, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?location", + } + + if input == nil { + input = &GetBucketLocationInput{} + } + + req = c.newRequest(op, input, output) + output = &GetBucketLocationOutput{} + req.Data = output + return +} + +// Returns the region the bucket resides in. +func (c *S3) GetBucketLocation(input *GetBucketLocationInput) (*GetBucketLocationOutput, error) { + req, out := c.GetBucketLocationRequest(input) + err := req.Send() + return out, err +} + +const opGetBucketLogging = "GetBucketLogging" + +// GetBucketLoggingRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketLogging operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetBucketLogging method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetBucketLoggingRequest method. +// req, resp := client.GetBucketLoggingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *S3) GetBucketLoggingRequest(input *GetBucketLoggingInput) (req *request.Request, output *GetBucketLoggingOutput) { + op := &request.Operation{ + Name: opGetBucketLogging, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?logging", + } + + if input == nil { + input = &GetBucketLoggingInput{} + } + + req = c.newRequest(op, input, output) + output = &GetBucketLoggingOutput{} + req.Data = output + return +} + +// Returns the logging status of a bucket and the permissions users have to +// view and modify that status. To use GET, you must be the bucket owner. +func (c *S3) GetBucketLogging(input *GetBucketLoggingInput) (*GetBucketLoggingOutput, error) { + req, out := c.GetBucketLoggingRequest(input) + err := req.Send() + return out, err +} + +const opGetBucketNotification = "GetBucketNotification" + +// GetBucketNotificationRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketNotification operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetBucketNotification method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetBucketNotificationRequest method. +// req, resp := client.GetBucketNotificationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *S3) GetBucketNotificationRequest(input *GetBucketNotificationConfigurationRequest) (req *request.Request, output *NotificationConfigurationDeprecated) { + if c.Client.Config.Logger != nil { + c.Client.Config.Logger.Log("This operation, GetBucketNotification, has been deprecated") + } + op := &request.Operation{ + Name: opGetBucketNotification, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?notification", + } + + if input == nil { + input = &GetBucketNotificationConfigurationRequest{} + } + + req = c.newRequest(op, input, output) + output = &NotificationConfigurationDeprecated{} + req.Data = output + return +} + +// Deprecated, see the GetBucketNotificationConfiguration operation. +func (c *S3) GetBucketNotification(input *GetBucketNotificationConfigurationRequest) (*NotificationConfigurationDeprecated, error) { + req, out := c.GetBucketNotificationRequest(input) + err := req.Send() + return out, err +} + +const opGetBucketNotificationConfiguration = "GetBucketNotificationConfiguration" + +// GetBucketNotificationConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketNotificationConfiguration operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetBucketNotificationConfiguration method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetBucketNotificationConfigurationRequest method. +// req, resp := client.GetBucketNotificationConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *S3) GetBucketNotificationConfigurationRequest(input *GetBucketNotificationConfigurationRequest) (req *request.Request, output *NotificationConfiguration) { + op := &request.Operation{ + Name: opGetBucketNotificationConfiguration, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?notification", + } + + if input == nil { + input = &GetBucketNotificationConfigurationRequest{} + } + + req = c.newRequest(op, input, output) + output = &NotificationConfiguration{} + req.Data = output + return +} + +// Returns the notification configuration of a bucket. +func (c *S3) GetBucketNotificationConfiguration(input *GetBucketNotificationConfigurationRequest) (*NotificationConfiguration, error) { + req, out := c.GetBucketNotificationConfigurationRequest(input) + err := req.Send() + return out, err +} + +const opGetBucketPolicy = "GetBucketPolicy" + +// GetBucketPolicyRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketPolicy operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetBucketPolicy method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetBucketPolicyRequest method. +// req, resp := client.GetBucketPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *S3) GetBucketPolicyRequest(input *GetBucketPolicyInput) (req *request.Request, output *GetBucketPolicyOutput) { + op := &request.Operation{ + Name: opGetBucketPolicy, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?policy", + } + + if input == nil { + input = &GetBucketPolicyInput{} + } + + req = c.newRequest(op, input, output) + output = &GetBucketPolicyOutput{} + req.Data = output + return +} + +// Returns the policy of a specified bucket. +func (c *S3) GetBucketPolicy(input *GetBucketPolicyInput) (*GetBucketPolicyOutput, error) { + req, out := c.GetBucketPolicyRequest(input) + err := req.Send() + return out, err +} + +const opGetBucketReplication = "GetBucketReplication" + +// GetBucketReplicationRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketReplication operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetBucketReplication method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetBucketReplicationRequest method. +// req, resp := client.GetBucketReplicationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *S3) GetBucketReplicationRequest(input *GetBucketReplicationInput) (req *request.Request, output *GetBucketReplicationOutput) { + op := &request.Operation{ + Name: opGetBucketReplication, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?replication", + } + + if input == nil { + input = &GetBucketReplicationInput{} + } + + req = c.newRequest(op, input, output) + output = &GetBucketReplicationOutput{} + req.Data = output + return +} + +// Deprecated, see the GetBucketReplicationConfiguration operation. +func (c *S3) GetBucketReplication(input *GetBucketReplicationInput) (*GetBucketReplicationOutput, error) { + req, out := c.GetBucketReplicationRequest(input) + err := req.Send() + return out, err +} + +const opGetBucketRequestPayment = "GetBucketRequestPayment" + +// GetBucketRequestPaymentRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketRequestPayment operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetBucketRequestPayment method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetBucketRequestPaymentRequest method. +// req, resp := client.GetBucketRequestPaymentRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *S3) GetBucketRequestPaymentRequest(input *GetBucketRequestPaymentInput) (req *request.Request, output *GetBucketRequestPaymentOutput) { + op := &request.Operation{ + Name: opGetBucketRequestPayment, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?requestPayment", + } + + if input == nil { + input = &GetBucketRequestPaymentInput{} + } + + req = c.newRequest(op, input, output) + output = &GetBucketRequestPaymentOutput{} + req.Data = output + return +} + +// Returns the request payment configuration of a bucket. +func (c *S3) GetBucketRequestPayment(input *GetBucketRequestPaymentInput) (*GetBucketRequestPaymentOutput, error) { + req, out := c.GetBucketRequestPaymentRequest(input) + err := req.Send() + return out, err +} + +const opGetBucketTagging = "GetBucketTagging" + +// GetBucketTaggingRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketTagging operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetBucketTagging method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetBucketTaggingRequest method. +// req, resp := client.GetBucketTaggingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *S3) GetBucketTaggingRequest(input *GetBucketTaggingInput) (req *request.Request, output *GetBucketTaggingOutput) { + op := &request.Operation{ + Name: opGetBucketTagging, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?tagging", + } + + if input == nil { + input = &GetBucketTaggingInput{} + } + + req = c.newRequest(op, input, output) + output = &GetBucketTaggingOutput{} + req.Data = output + return +} + +// Returns the tag set associated with the bucket. +func (c *S3) GetBucketTagging(input *GetBucketTaggingInput) (*GetBucketTaggingOutput, error) { + req, out := c.GetBucketTaggingRequest(input) + err := req.Send() + return out, err +} + +const opGetBucketVersioning = "GetBucketVersioning" + +// GetBucketVersioningRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketVersioning operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetBucketVersioning method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetBucketVersioningRequest method. +// req, resp := client.GetBucketVersioningRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *S3) GetBucketVersioningRequest(input *GetBucketVersioningInput) (req *request.Request, output *GetBucketVersioningOutput) { + op := &request.Operation{ + Name: opGetBucketVersioning, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?versioning", + } + + if input == nil { + input = &GetBucketVersioningInput{} + } + + req = c.newRequest(op, input, output) + output = &GetBucketVersioningOutput{} + req.Data = output + return +} + +// Returns the versioning state of a bucket. +func (c *S3) GetBucketVersioning(input *GetBucketVersioningInput) (*GetBucketVersioningOutput, error) { + req, out := c.GetBucketVersioningRequest(input) + err := req.Send() + return out, err +} + +const opGetBucketWebsite = "GetBucketWebsite" + +// GetBucketWebsiteRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketWebsite operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetBucketWebsite method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetBucketWebsiteRequest method. +// req, resp := client.GetBucketWebsiteRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *S3) GetBucketWebsiteRequest(input *GetBucketWebsiteInput) (req *request.Request, output *GetBucketWebsiteOutput) { + op := &request.Operation{ + Name: opGetBucketWebsite, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?website", + } + + if input == nil { + input = &GetBucketWebsiteInput{} + } + + req = c.newRequest(op, input, output) + output = &GetBucketWebsiteOutput{} + req.Data = output + return +} + +// Returns the website configuration for a bucket. +func (c *S3) GetBucketWebsite(input *GetBucketWebsiteInput) (*GetBucketWebsiteOutput, error) { + req, out := c.GetBucketWebsiteRequest(input) + err := req.Send() + return out, err +} + +const opGetObject = "GetObject" + +// GetObjectRequest generates a "aws/request.Request" representing the +// client's request for the GetObject operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetObject method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetObjectRequest method. +// req, resp := client.GetObjectRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *S3) GetObjectRequest(input *GetObjectInput) (req *request.Request, output *GetObjectOutput) { + op := &request.Operation{ + Name: opGetObject, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}/{Key+}", + } + + if input == nil { + input = &GetObjectInput{} + } + + req = c.newRequest(op, input, output) + output = &GetObjectOutput{} + req.Data = output + return +} + +// Retrieves objects from Amazon S3. +func (c *S3) GetObject(input *GetObjectInput) (*GetObjectOutput, error) { + req, out := c.GetObjectRequest(input) + err := req.Send() + return out, err +} + +const opGetObjectAcl = "GetObjectAcl" + +// GetObjectAclRequest generates a "aws/request.Request" representing the +// client's request for the GetObjectAcl operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetObjectAcl method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetObjectAclRequest method. +// req, resp := client.GetObjectAclRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *S3) GetObjectAclRequest(input *GetObjectAclInput) (req *request.Request, output *GetObjectAclOutput) { + op := &request.Operation{ + Name: opGetObjectAcl, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}/{Key+}?acl", + } + + if input == nil { + input = &GetObjectAclInput{} + } + + req = c.newRequest(op, input, output) + output = &GetObjectAclOutput{} + req.Data = output + return +} + +// Returns the access control list (ACL) of an object. +func (c *S3) GetObjectAcl(input *GetObjectAclInput) (*GetObjectAclOutput, error) { + req, out := c.GetObjectAclRequest(input) + err := req.Send() + return out, err +} + +const opGetObjectTorrent = "GetObjectTorrent" + +// GetObjectTorrentRequest generates a "aws/request.Request" representing the +// client's request for the GetObjectTorrent operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetObjectTorrent method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetObjectTorrentRequest method. +// req, resp := client.GetObjectTorrentRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *S3) GetObjectTorrentRequest(input *GetObjectTorrentInput) (req *request.Request, output *GetObjectTorrentOutput) { + op := &request.Operation{ + Name: opGetObjectTorrent, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}/{Key+}?torrent", + } + + if input == nil { + input = &GetObjectTorrentInput{} + } + + req = c.newRequest(op, input, output) + output = &GetObjectTorrentOutput{} + req.Data = output + return +} + +// Return torrent files from a bucket. +func (c *S3) GetObjectTorrent(input *GetObjectTorrentInput) (*GetObjectTorrentOutput, error) { + req, out := c.GetObjectTorrentRequest(input) + err := req.Send() + return out, err +} + +const opHeadBucket = "HeadBucket" + +// HeadBucketRequest generates a "aws/request.Request" representing the +// client's request for the HeadBucket operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the HeadBucket method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the HeadBucketRequest method. +// req, resp := client.HeadBucketRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *S3) HeadBucketRequest(input *HeadBucketInput) (req *request.Request, output *HeadBucketOutput) { + op := &request.Operation{ + Name: opHeadBucket, + HTTPMethod: "HEAD", + HTTPPath: "/{Bucket}", + } + + if input == nil { + input = &HeadBucketInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &HeadBucketOutput{} + req.Data = output + return +} + +// This operation is useful to determine if a bucket exists and you have permission +// to access it. +func (c *S3) HeadBucket(input *HeadBucketInput) (*HeadBucketOutput, error) { + req, out := c.HeadBucketRequest(input) + err := req.Send() + return out, err +} + +const opHeadObject = "HeadObject" + +// HeadObjectRequest generates a "aws/request.Request" representing the +// client's request for the HeadObject operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the HeadObject method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the HeadObjectRequest method. +// req, resp := client.HeadObjectRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *S3) HeadObjectRequest(input *HeadObjectInput) (req *request.Request, output *HeadObjectOutput) { + op := &request.Operation{ + Name: opHeadObject, + HTTPMethod: "HEAD", + HTTPPath: "/{Bucket}/{Key+}", + } + + if input == nil { + input = &HeadObjectInput{} + } + + req = c.newRequest(op, input, output) + output = &HeadObjectOutput{} + req.Data = output + return +} + +// The HEAD operation retrieves metadata from an object without returning the +// object itself. This operation is useful if you're only interested in an object's +// metadata. To use HEAD, you must have READ access to the object. +func (c *S3) HeadObject(input *HeadObjectInput) (*HeadObjectOutput, error) { + req, out := c.HeadObjectRequest(input) + err := req.Send() + return out, err +} + +const opListBuckets = "ListBuckets" + +// ListBucketsRequest generates a "aws/request.Request" representing the +// client's request for the ListBuckets operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListBuckets method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListBucketsRequest method. +// req, resp := client.ListBucketsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *S3) ListBucketsRequest(input *ListBucketsInput) (req *request.Request, output *ListBucketsOutput) { + op := &request.Operation{ + Name: opListBuckets, + HTTPMethod: "GET", + HTTPPath: "/", + } + + if input == nil { + input = &ListBucketsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListBucketsOutput{} + req.Data = output + return +} + +// Returns a list of all buckets owned by the authenticated sender of the request. +func (c *S3) ListBuckets(input *ListBucketsInput) (*ListBucketsOutput, error) { + req, out := c.ListBucketsRequest(input) + err := req.Send() + return out, err +} + +const opListMultipartUploads = "ListMultipartUploads" + +// ListMultipartUploadsRequest generates a "aws/request.Request" representing the +// client's request for the ListMultipartUploads operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListMultipartUploads method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListMultipartUploadsRequest method. +// req, resp := client.ListMultipartUploadsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *S3) ListMultipartUploadsRequest(input *ListMultipartUploadsInput) (req *request.Request, output *ListMultipartUploadsOutput) { + op := &request.Operation{ + Name: opListMultipartUploads, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?uploads", + Paginator: &request.Paginator{ + InputTokens: []string{"KeyMarker", "UploadIdMarker"}, + OutputTokens: []string{"NextKeyMarker", "NextUploadIdMarker"}, + LimitToken: "MaxUploads", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListMultipartUploadsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListMultipartUploadsOutput{} + req.Data = output + return +} + +// This operation lists in-progress multipart uploads. +func (c *S3) ListMultipartUploads(input *ListMultipartUploadsInput) (*ListMultipartUploadsOutput, error) { + req, out := c.ListMultipartUploadsRequest(input) + err := req.Send() + return out, err +} + +// ListMultipartUploadsPages iterates over the pages of a ListMultipartUploads operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListMultipartUploads method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListMultipartUploads operation. +// pageNum := 0 +// err := client.ListMultipartUploadsPages(params, +// func(page *ListMultipartUploadsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *S3) ListMultipartUploadsPages(input *ListMultipartUploadsInput, fn func(p *ListMultipartUploadsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListMultipartUploadsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListMultipartUploadsOutput), lastPage) + }) +} + +const opListObjectVersions = "ListObjectVersions" + +// ListObjectVersionsRequest generates a "aws/request.Request" representing the +// client's request for the ListObjectVersions operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListObjectVersions method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListObjectVersionsRequest method. +// req, resp := client.ListObjectVersionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *S3) ListObjectVersionsRequest(input *ListObjectVersionsInput) (req *request.Request, output *ListObjectVersionsOutput) { + op := &request.Operation{ + Name: opListObjectVersions, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?versions", + Paginator: &request.Paginator{ + InputTokens: []string{"KeyMarker", "VersionIdMarker"}, + OutputTokens: []string{"NextKeyMarker", "NextVersionIdMarker"}, + LimitToken: "MaxKeys", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListObjectVersionsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListObjectVersionsOutput{} + req.Data = output + return +} + +// Returns metadata about all of the versions of objects in a bucket. +func (c *S3) ListObjectVersions(input *ListObjectVersionsInput) (*ListObjectVersionsOutput, error) { + req, out := c.ListObjectVersionsRequest(input) + err := req.Send() + return out, err +} + +// ListObjectVersionsPages iterates over the pages of a ListObjectVersions operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListObjectVersions method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListObjectVersions operation. +// pageNum := 0 +// err := client.ListObjectVersionsPages(params, +// func(page *ListObjectVersionsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *S3) ListObjectVersionsPages(input *ListObjectVersionsInput, fn func(p *ListObjectVersionsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListObjectVersionsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListObjectVersionsOutput), lastPage) + }) +} + +const opListObjects = "ListObjects" + +// ListObjectsRequest generates a "aws/request.Request" representing the +// client's request for the ListObjects operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListObjects method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListObjectsRequest method. +// req, resp := client.ListObjectsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *S3) ListObjectsRequest(input *ListObjectsInput) (req *request.Request, output *ListObjectsOutput) { + op := &request.Operation{ + Name: opListObjects, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"NextMarker || Contents[-1].Key"}, + LimitToken: "MaxKeys", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListObjectsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListObjectsOutput{} + req.Data = output + return +} + +// Returns some or all (up to 1000) of the objects in a bucket. You can use +// the request parameters as selection criteria to return a subset of the objects +// in a bucket. +func (c *S3) ListObjects(input *ListObjectsInput) (*ListObjectsOutput, error) { + req, out := c.ListObjectsRequest(input) + err := req.Send() + return out, err +} + +// ListObjectsPages iterates over the pages of a ListObjects operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListObjects method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListObjects operation. +// pageNum := 0 +// err := client.ListObjectsPages(params, +// func(page *ListObjectsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *S3) ListObjectsPages(input *ListObjectsInput, fn func(p *ListObjectsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListObjectsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListObjectsOutput), lastPage) + }) +} + +const opListObjectsV2 = "ListObjectsV2" + +// ListObjectsV2Request generates a "aws/request.Request" representing the +// client's request for the ListObjectsV2 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListObjectsV2 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListObjectsV2Request method. +// req, resp := client.ListObjectsV2Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *S3) ListObjectsV2Request(input *ListObjectsV2Input) (req *request.Request, output *ListObjectsV2Output) { + op := &request.Operation{ + Name: opListObjectsV2, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?list-type=2", + Paginator: &request.Paginator{ + InputTokens: []string{"ContinuationToken"}, + OutputTokens: []string{"NextContinuationToken"}, + LimitToken: "MaxKeys", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListObjectsV2Input{} + } + + req = c.newRequest(op, input, output) + output = &ListObjectsV2Output{} + req.Data = output + return +} + +// Returns some or all (up to 1000) of the objects in a bucket. You can use +// the request parameters as selection criteria to return a subset of the objects +// in a bucket. Note: ListObjectsV2 is the revised List Objects API and we recommend +// you use this revised API for new application development. +func (c *S3) ListObjectsV2(input *ListObjectsV2Input) (*ListObjectsV2Output, error) { + req, out := c.ListObjectsV2Request(input) + err := req.Send() + return out, err +} + +// ListObjectsV2Pages iterates over the pages of a ListObjectsV2 operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListObjectsV2 method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListObjectsV2 operation. +// pageNum := 0 +// err := client.ListObjectsV2Pages(params, +// func(page *ListObjectsV2Output, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *S3) ListObjectsV2Pages(input *ListObjectsV2Input, fn func(p *ListObjectsV2Output, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListObjectsV2Request(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListObjectsV2Output), lastPage) + }) +} + +const opListParts = "ListParts" + +// ListPartsRequest generates a "aws/request.Request" representing the +// client's request for the ListParts operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListParts method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListPartsRequest method. +// req, resp := client.ListPartsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *S3) ListPartsRequest(input *ListPartsInput) (req *request.Request, output *ListPartsOutput) { + op := &request.Operation{ + Name: opListParts, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}/{Key+}", + Paginator: &request.Paginator{ + InputTokens: []string{"PartNumberMarker"}, + OutputTokens: []string{"NextPartNumberMarker"}, + LimitToken: "MaxParts", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListPartsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListPartsOutput{} + req.Data = output + return +} + +// Lists the parts that have been uploaded for a specific multipart upload. +func (c *S3) ListParts(input *ListPartsInput) (*ListPartsOutput, error) { + req, out := c.ListPartsRequest(input) + err := req.Send() + return out, err +} + +// ListPartsPages iterates over the pages of a ListParts operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListParts method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListParts operation. +// pageNum := 0 +// err := client.ListPartsPages(params, +// func(page *ListPartsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *S3) ListPartsPages(input *ListPartsInput, fn func(p *ListPartsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListPartsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListPartsOutput), lastPage) + }) +} + +const opPutBucketAccelerateConfiguration = "PutBucketAccelerateConfiguration" + +// PutBucketAccelerateConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketAccelerateConfiguration operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutBucketAccelerateConfiguration method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutBucketAccelerateConfigurationRequest method. +// req, resp := client.PutBucketAccelerateConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *S3) PutBucketAccelerateConfigurationRequest(input *PutBucketAccelerateConfigurationInput) (req *request.Request, output *PutBucketAccelerateConfigurationOutput) { + op := &request.Operation{ + Name: opPutBucketAccelerateConfiguration, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?accelerate", + } + + if input == nil { + input = &PutBucketAccelerateConfigurationInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &PutBucketAccelerateConfigurationOutput{} + req.Data = output + return +} + +// Sets the accelerate configuration of an existing bucket. +func (c *S3) PutBucketAccelerateConfiguration(input *PutBucketAccelerateConfigurationInput) (*PutBucketAccelerateConfigurationOutput, error) { + req, out := c.PutBucketAccelerateConfigurationRequest(input) + err := req.Send() + return out, err +} + +const opPutBucketAcl = "PutBucketAcl" + +// PutBucketAclRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketAcl operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutBucketAcl method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutBucketAclRequest method. +// req, resp := client.PutBucketAclRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *S3) PutBucketAclRequest(input *PutBucketAclInput) (req *request.Request, output *PutBucketAclOutput) { + op := &request.Operation{ + Name: opPutBucketAcl, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?acl", + } + + if input == nil { + input = &PutBucketAclInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &PutBucketAclOutput{} + req.Data = output + return +} + +// Sets the permissions on a bucket using access control lists (ACL). +func (c *S3) PutBucketAcl(input *PutBucketAclInput) (*PutBucketAclOutput, error) { + req, out := c.PutBucketAclRequest(input) + err := req.Send() + return out, err +} + +const opPutBucketCors = "PutBucketCors" + +// PutBucketCorsRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketCors operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutBucketCors method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutBucketCorsRequest method. +// req, resp := client.PutBucketCorsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *S3) PutBucketCorsRequest(input *PutBucketCorsInput) (req *request.Request, output *PutBucketCorsOutput) { + op := &request.Operation{ + Name: opPutBucketCors, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?cors", + } + + if input == nil { + input = &PutBucketCorsInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &PutBucketCorsOutput{} + req.Data = output + return +} + +// Sets the cors configuration for a bucket. +func (c *S3) PutBucketCors(input *PutBucketCorsInput) (*PutBucketCorsOutput, error) { + req, out := c.PutBucketCorsRequest(input) + err := req.Send() + return out, err +} + +const opPutBucketLifecycle = "PutBucketLifecycle" + +// PutBucketLifecycleRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketLifecycle operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutBucketLifecycle method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutBucketLifecycleRequest method. +// req, resp := client.PutBucketLifecycleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *S3) PutBucketLifecycleRequest(input *PutBucketLifecycleInput) (req *request.Request, output *PutBucketLifecycleOutput) { + if c.Client.Config.Logger != nil { + c.Client.Config.Logger.Log("This operation, PutBucketLifecycle, has been deprecated") + } + op := &request.Operation{ + Name: opPutBucketLifecycle, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?lifecycle", + } + + if input == nil { + input = &PutBucketLifecycleInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &PutBucketLifecycleOutput{} + req.Data = output + return +} + +// Deprecated, see the PutBucketLifecycleConfiguration operation. +func (c *S3) PutBucketLifecycle(input *PutBucketLifecycleInput) (*PutBucketLifecycleOutput, error) { + req, out := c.PutBucketLifecycleRequest(input) + err := req.Send() + return out, err +} + +const opPutBucketLifecycleConfiguration = "PutBucketLifecycleConfiguration" + +// PutBucketLifecycleConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketLifecycleConfiguration operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutBucketLifecycleConfiguration method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutBucketLifecycleConfigurationRequest method. +// req, resp := client.PutBucketLifecycleConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *S3) PutBucketLifecycleConfigurationRequest(input *PutBucketLifecycleConfigurationInput) (req *request.Request, output *PutBucketLifecycleConfigurationOutput) { + op := &request.Operation{ + Name: opPutBucketLifecycleConfiguration, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?lifecycle", + } + + if input == nil { + input = &PutBucketLifecycleConfigurationInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &PutBucketLifecycleConfigurationOutput{} + req.Data = output + return +} + +// Sets lifecycle configuration for your bucket. If a lifecycle configuration +// exists, it replaces it. +func (c *S3) PutBucketLifecycleConfiguration(input *PutBucketLifecycleConfigurationInput) (*PutBucketLifecycleConfigurationOutput, error) { + req, out := c.PutBucketLifecycleConfigurationRequest(input) + err := req.Send() + return out, err +} + +const opPutBucketLogging = "PutBucketLogging" + +// PutBucketLoggingRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketLogging operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutBucketLogging method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutBucketLoggingRequest method. +// req, resp := client.PutBucketLoggingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *S3) PutBucketLoggingRequest(input *PutBucketLoggingInput) (req *request.Request, output *PutBucketLoggingOutput) { + op := &request.Operation{ + Name: opPutBucketLogging, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?logging", + } + + if input == nil { + input = &PutBucketLoggingInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &PutBucketLoggingOutput{} + req.Data = output + return +} + +// Set the logging parameters for a bucket and to specify permissions for who +// can view and modify the logging parameters. To set the logging status of +// a bucket, you must be the bucket owner. +func (c *S3) PutBucketLogging(input *PutBucketLoggingInput) (*PutBucketLoggingOutput, error) { + req, out := c.PutBucketLoggingRequest(input) + err := req.Send() + return out, err +} + +const opPutBucketNotification = "PutBucketNotification" + +// PutBucketNotificationRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketNotification operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutBucketNotification method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutBucketNotificationRequest method. +// req, resp := client.PutBucketNotificationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *S3) PutBucketNotificationRequest(input *PutBucketNotificationInput) (req *request.Request, output *PutBucketNotificationOutput) { + if c.Client.Config.Logger != nil { + c.Client.Config.Logger.Log("This operation, PutBucketNotification, has been deprecated") + } + op := &request.Operation{ + Name: opPutBucketNotification, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?notification", + } + + if input == nil { + input = &PutBucketNotificationInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &PutBucketNotificationOutput{} + req.Data = output + return +} + +// Deprecated, see the PutBucketNotificationConfiguraiton operation. +func (c *S3) PutBucketNotification(input *PutBucketNotificationInput) (*PutBucketNotificationOutput, error) { + req, out := c.PutBucketNotificationRequest(input) + err := req.Send() + return out, err +} + +const opPutBucketNotificationConfiguration = "PutBucketNotificationConfiguration" + +// PutBucketNotificationConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketNotificationConfiguration operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutBucketNotificationConfiguration method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutBucketNotificationConfigurationRequest method. +// req, resp := client.PutBucketNotificationConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *S3) PutBucketNotificationConfigurationRequest(input *PutBucketNotificationConfigurationInput) (req *request.Request, output *PutBucketNotificationConfigurationOutput) { + op := &request.Operation{ + Name: opPutBucketNotificationConfiguration, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?notification", + } + + if input == nil { + input = &PutBucketNotificationConfigurationInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &PutBucketNotificationConfigurationOutput{} + req.Data = output + return +} + +// Enables notifications of specified events for a bucket. +func (c *S3) PutBucketNotificationConfiguration(input *PutBucketNotificationConfigurationInput) (*PutBucketNotificationConfigurationOutput, error) { + req, out := c.PutBucketNotificationConfigurationRequest(input) + err := req.Send() + return out, err +} + +const opPutBucketPolicy = "PutBucketPolicy" + +// PutBucketPolicyRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketPolicy operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutBucketPolicy method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutBucketPolicyRequest method. +// req, resp := client.PutBucketPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *S3) PutBucketPolicyRequest(input *PutBucketPolicyInput) (req *request.Request, output *PutBucketPolicyOutput) { + op := &request.Operation{ + Name: opPutBucketPolicy, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?policy", + } + + if input == nil { + input = &PutBucketPolicyInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &PutBucketPolicyOutput{} + req.Data = output + return +} + +// Replaces a policy on a bucket. If the bucket already has a policy, the one +// in this request completely replaces it. +func (c *S3) PutBucketPolicy(input *PutBucketPolicyInput) (*PutBucketPolicyOutput, error) { + req, out := c.PutBucketPolicyRequest(input) + err := req.Send() + return out, err +} + +const opPutBucketReplication = "PutBucketReplication" + +// PutBucketReplicationRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketReplication operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutBucketReplication method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutBucketReplicationRequest method. +// req, resp := client.PutBucketReplicationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *S3) PutBucketReplicationRequest(input *PutBucketReplicationInput) (req *request.Request, output *PutBucketReplicationOutput) { + op := &request.Operation{ + Name: opPutBucketReplication, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?replication", + } + + if input == nil { + input = &PutBucketReplicationInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &PutBucketReplicationOutput{} + req.Data = output + return +} + +// Creates a new replication configuration (or replaces an existing one, if +// present). +func (c *S3) PutBucketReplication(input *PutBucketReplicationInput) (*PutBucketReplicationOutput, error) { + req, out := c.PutBucketReplicationRequest(input) + err := req.Send() + return out, err +} + +const opPutBucketRequestPayment = "PutBucketRequestPayment" + +// PutBucketRequestPaymentRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketRequestPayment operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutBucketRequestPayment method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutBucketRequestPaymentRequest method. +// req, resp := client.PutBucketRequestPaymentRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *S3) PutBucketRequestPaymentRequest(input *PutBucketRequestPaymentInput) (req *request.Request, output *PutBucketRequestPaymentOutput) { + op := &request.Operation{ + Name: opPutBucketRequestPayment, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?requestPayment", + } + + if input == nil { + input = &PutBucketRequestPaymentInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &PutBucketRequestPaymentOutput{} + req.Data = output + return +} + +// Sets the request payment configuration for a bucket. By default, the bucket +// owner pays for downloads from the bucket. This configuration parameter enables +// the bucket owner (only) to specify that the person requesting the download +// will be charged for the download. Documentation on requester pays buckets +// can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/RequesterPaysBuckets.html +func (c *S3) PutBucketRequestPayment(input *PutBucketRequestPaymentInput) (*PutBucketRequestPaymentOutput, error) { + req, out := c.PutBucketRequestPaymentRequest(input) + err := req.Send() + return out, err +} + +const opPutBucketTagging = "PutBucketTagging" + +// PutBucketTaggingRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketTagging operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutBucketTagging method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutBucketTaggingRequest method. +// req, resp := client.PutBucketTaggingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *S3) PutBucketTaggingRequest(input *PutBucketTaggingInput) (req *request.Request, output *PutBucketTaggingOutput) { + op := &request.Operation{ + Name: opPutBucketTagging, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?tagging", + } + + if input == nil { + input = &PutBucketTaggingInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &PutBucketTaggingOutput{} + req.Data = output + return +} + +// Sets the tags for a bucket. +func (c *S3) PutBucketTagging(input *PutBucketTaggingInput) (*PutBucketTaggingOutput, error) { + req, out := c.PutBucketTaggingRequest(input) + err := req.Send() + return out, err +} + +const opPutBucketVersioning = "PutBucketVersioning" + +// PutBucketVersioningRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketVersioning operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutBucketVersioning method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutBucketVersioningRequest method. +// req, resp := client.PutBucketVersioningRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *S3) PutBucketVersioningRequest(input *PutBucketVersioningInput) (req *request.Request, output *PutBucketVersioningOutput) { + op := &request.Operation{ + Name: opPutBucketVersioning, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?versioning", + } + + if input == nil { + input = &PutBucketVersioningInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &PutBucketVersioningOutput{} + req.Data = output + return +} + +// Sets the versioning state of an existing bucket. To set the versioning state, +// you must be the bucket owner. +func (c *S3) PutBucketVersioning(input *PutBucketVersioningInput) (*PutBucketVersioningOutput, error) { + req, out := c.PutBucketVersioningRequest(input) + err := req.Send() + return out, err +} + +const opPutBucketWebsite = "PutBucketWebsite" + +// PutBucketWebsiteRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketWebsite operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutBucketWebsite method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutBucketWebsiteRequest method. +// req, resp := client.PutBucketWebsiteRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *S3) PutBucketWebsiteRequest(input *PutBucketWebsiteInput) (req *request.Request, output *PutBucketWebsiteOutput) { + op := &request.Operation{ + Name: opPutBucketWebsite, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?website", + } + + if input == nil { + input = &PutBucketWebsiteInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &PutBucketWebsiteOutput{} + req.Data = output + return +} + +// Set the website configuration for a bucket. +func (c *S3) PutBucketWebsite(input *PutBucketWebsiteInput) (*PutBucketWebsiteOutput, error) { + req, out := c.PutBucketWebsiteRequest(input) + err := req.Send() + return out, err +} + +const opPutObject = "PutObject" + +// PutObjectRequest generates a "aws/request.Request" representing the +// client's request for the PutObject operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutObject method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutObjectRequest method. +// req, resp := client.PutObjectRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *S3) PutObjectRequest(input *PutObjectInput) (req *request.Request, output *PutObjectOutput) { + op := &request.Operation{ + Name: opPutObject, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}/{Key+}", + } + + if input == nil { + input = &PutObjectInput{} + } + + req = c.newRequest(op, input, output) + output = &PutObjectOutput{} + req.Data = output + return +} + +// Adds an object to a bucket. +func (c *S3) PutObject(input *PutObjectInput) (*PutObjectOutput, error) { + req, out := c.PutObjectRequest(input) + err := req.Send() + return out, err +} + +const opPutObjectAcl = "PutObjectAcl" + +// PutObjectAclRequest generates a "aws/request.Request" representing the +// client's request for the PutObjectAcl operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutObjectAcl method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutObjectAclRequest method. +// req, resp := client.PutObjectAclRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *S3) PutObjectAclRequest(input *PutObjectAclInput) (req *request.Request, output *PutObjectAclOutput) { + op := &request.Operation{ + Name: opPutObjectAcl, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}/{Key+}?acl", + } + + if input == nil { + input = &PutObjectAclInput{} + } + + req = c.newRequest(op, input, output) + output = &PutObjectAclOutput{} + req.Data = output + return +} + +// uses the acl subresource to set the access control list (ACL) permissions +// for an object that already exists in a bucket +func (c *S3) PutObjectAcl(input *PutObjectAclInput) (*PutObjectAclOutput, error) { + req, out := c.PutObjectAclRequest(input) + err := req.Send() + return out, err +} + +const opRestoreObject = "RestoreObject" + +// RestoreObjectRequest generates a "aws/request.Request" representing the +// client's request for the RestoreObject operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the RestoreObject method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the RestoreObjectRequest method. +// req, resp := client.RestoreObjectRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *S3) RestoreObjectRequest(input *RestoreObjectInput) (req *request.Request, output *RestoreObjectOutput) { + op := &request.Operation{ + Name: opRestoreObject, + HTTPMethod: "POST", + HTTPPath: "/{Bucket}/{Key+}?restore", + } + + if input == nil { + input = &RestoreObjectInput{} + } + + req = c.newRequest(op, input, output) + output = &RestoreObjectOutput{} + req.Data = output + return +} + +// Restores an archived copy of an object back into Amazon S3 +func (c *S3) RestoreObject(input *RestoreObjectInput) (*RestoreObjectOutput, error) { + req, out := c.RestoreObjectRequest(input) + err := req.Send() + return out, err +} + +const opUploadPart = "UploadPart" + +// UploadPartRequest generates a "aws/request.Request" representing the +// client's request for the UploadPart operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UploadPart method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UploadPartRequest method. +// req, resp := client.UploadPartRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *S3) UploadPartRequest(input *UploadPartInput) (req *request.Request, output *UploadPartOutput) { + op := &request.Operation{ + Name: opUploadPart, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}/{Key+}", + } + + if input == nil { + input = &UploadPartInput{} + } + + req = c.newRequest(op, input, output) + output = &UploadPartOutput{} + req.Data = output + return +} + +// Uploads a part in a multipart upload. +// +// Note: After you initiate multipart upload and upload one or more parts, you +// must either complete or abort multipart upload in order to stop getting charged +// for storage of the uploaded parts. Only after you either complete or abort +// multipart upload, Amazon S3 frees up the parts storage and stops charging +// you for the parts storage. +func (c *S3) UploadPart(input *UploadPartInput) (*UploadPartOutput, error) { + req, out := c.UploadPartRequest(input) + err := req.Send() + return out, err +} + +const opUploadPartCopy = "UploadPartCopy" + +// UploadPartCopyRequest generates a "aws/request.Request" representing the +// client's request for the UploadPartCopy operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UploadPartCopy method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UploadPartCopyRequest method. +// req, resp := client.UploadPartCopyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *S3) UploadPartCopyRequest(input *UploadPartCopyInput) (req *request.Request, output *UploadPartCopyOutput) { + op := &request.Operation{ + Name: opUploadPartCopy, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}/{Key+}", + } + + if input == nil { + input = &UploadPartCopyInput{} + } + + req = c.newRequest(op, input, output) + output = &UploadPartCopyOutput{} + req.Data = output + return +} + +// Uploads a part by copying data from an existing object as data source. +func (c *S3) UploadPartCopy(input *UploadPartCopyInput) (*UploadPartCopyOutput, error) { + req, out := c.UploadPartCopyRequest(input) + err := req.Send() + return out, err +} + +// Specifies the days since the initiation of an Incomplete Multipart Upload +// that Lifecycle will wait before permanently removing all parts of the upload. +type AbortIncompleteMultipartUpload struct { + _ struct{} `type:"structure"` + + // Indicates the number of days that must pass since initiation for Lifecycle + // to abort an Incomplete Multipart Upload. + DaysAfterInitiation *int64 `type:"integer"` +} + +// String returns the string representation +func (s AbortIncompleteMultipartUpload) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AbortIncompleteMultipartUpload) GoString() string { + return s.String() +} + +type AbortMultipartUploadInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + UploadId *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"` +} + +// String returns the string representation +func (s AbortMultipartUploadInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AbortMultipartUploadInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AbortMultipartUploadInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AbortMultipartUploadInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.UploadId == nil { + invalidParams.Add(request.NewErrParamRequired("UploadId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type AbortMultipartUploadOutput struct { + _ struct{} `type:"structure"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` +} + +// String returns the string representation +func (s AbortMultipartUploadOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AbortMultipartUploadOutput) GoString() string { + return s.String() +} + +type AccelerateConfiguration struct { + _ struct{} `type:"structure"` + + // The accelerate configuration of the bucket. + Status *string `type:"string" enum:"BucketAccelerateStatus"` +} + +// String returns the string representation +func (s AccelerateConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AccelerateConfiguration) GoString() string { + return s.String() +} + +type AccessControlPolicy struct { + _ struct{} `type:"structure"` + + // A list of grants. + Grants []*Grant `locationName:"AccessControlList" locationNameList:"Grant" type:"list"` + + Owner *Owner `type:"structure"` +} + +// String returns the string representation +func (s AccessControlPolicy) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AccessControlPolicy) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AccessControlPolicy) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AccessControlPolicy"} + if s.Grants != nil { + for i, v := range s.Grants { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Grants", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type Bucket struct { + _ struct{} `type:"structure"` + + // Date the bucket was created. + CreationDate *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The name of the bucket. + Name *string `type:"string"` +} + +// String returns the string representation +func (s Bucket) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Bucket) GoString() string { + return s.String() +} + +type BucketLifecycleConfiguration struct { + _ struct{} `type:"structure"` + + Rules []*LifecycleRule `locationName:"Rule" type:"list" flattened:"true" required:"true"` +} + +// String returns the string representation +func (s BucketLifecycleConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BucketLifecycleConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *BucketLifecycleConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "BucketLifecycleConfiguration"} + if s.Rules == nil { + invalidParams.Add(request.NewErrParamRequired("Rules")) + } + if s.Rules != nil { + for i, v := range s.Rules { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Rules", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type BucketLoggingStatus struct { + _ struct{} `type:"structure"` + + LoggingEnabled *LoggingEnabled `type:"structure"` +} + +// String returns the string representation +func (s BucketLoggingStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BucketLoggingStatus) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *BucketLoggingStatus) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "BucketLoggingStatus"} + if s.LoggingEnabled != nil { + if err := s.LoggingEnabled.Validate(); err != nil { + invalidParams.AddNested("LoggingEnabled", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type CORSConfiguration struct { + _ struct{} `type:"structure"` + + CORSRules []*CORSRule `locationName:"CORSRule" type:"list" flattened:"true" required:"true"` +} + +// String returns the string representation +func (s CORSConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CORSConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CORSConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CORSConfiguration"} + if s.CORSRules == nil { + invalidParams.Add(request.NewErrParamRequired("CORSRules")) + } + if s.CORSRules != nil { + for i, v := range s.CORSRules { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "CORSRules", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type CORSRule struct { + _ struct{} `type:"structure"` + + // Specifies which headers are allowed in a pre-flight OPTIONS request. + AllowedHeaders []*string `locationName:"AllowedHeader" type:"list" flattened:"true"` + + // Identifies HTTP methods that the domain/origin specified in the rule is allowed + // to execute. + AllowedMethods []*string `locationName:"AllowedMethod" type:"list" flattened:"true" required:"true"` + + // One or more origins you want customers to be able to access the bucket from. + AllowedOrigins []*string `locationName:"AllowedOrigin" type:"list" flattened:"true" required:"true"` + + // One or more headers in the response that you want customers to be able to + // access from their applications (for example, from a JavaScript XMLHttpRequest + // object). + ExposeHeaders []*string `locationName:"ExposeHeader" type:"list" flattened:"true"` + + // The time in seconds that your browser is to cache the preflight response + // for the specified resource. + MaxAgeSeconds *int64 `type:"integer"` +} + +// String returns the string representation +func (s CORSRule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CORSRule) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CORSRule) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CORSRule"} + if s.AllowedMethods == nil { + invalidParams.Add(request.NewErrParamRequired("AllowedMethods")) + } + if s.AllowedOrigins == nil { + invalidParams.Add(request.NewErrParamRequired("AllowedOrigins")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type CloudFunctionConfiguration struct { + _ struct{} `type:"structure"` + + CloudFunction *string `type:"string"` + + // Bucket event for which to send notifications. + Event *string `deprecated:"true" type:"string" enum:"Event"` + + Events []*string `locationName:"Event" type:"list" flattened:"true"` + + // Optional unique identifier for configurations in a notification configuration. + // If you don't provide one, Amazon S3 will assign an ID. + Id *string `type:"string"` + + InvocationRole *string `type:"string"` +} + +// String returns the string representation +func (s CloudFunctionConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CloudFunctionConfiguration) GoString() string { + return s.String() +} + +type CommonPrefix struct { + _ struct{} `type:"structure"` + + Prefix *string `type:"string"` +} + +// String returns the string representation +func (s CommonPrefix) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CommonPrefix) GoString() string { + return s.String() +} + +type CompleteMultipartUploadInput struct { + _ struct{} `type:"structure" payload:"MultipartUpload"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + MultipartUpload *CompletedMultipartUpload `locationName:"CompleteMultipartUpload" type:"structure"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + UploadId *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"` +} + +// String returns the string representation +func (s CompleteMultipartUploadInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CompleteMultipartUploadInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CompleteMultipartUploadInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CompleteMultipartUploadInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.UploadId == nil { + invalidParams.Add(request.NewErrParamRequired("UploadId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type CompleteMultipartUploadOutput struct { + _ struct{} `type:"structure"` + + Bucket *string `type:"string"` + + // Entity tag of the object. + ETag *string `type:"string"` + + // If the object expiration is configured, this will contain the expiration + // date (expiry-date) and rule ID (rule-id). The value of rule-id is URL encoded. + Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"` + + Key *string `min:"1" type:"string"` + + Location *string `type:"string"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // If present, specifies the ID of the AWS Key Management Service (KMS) master + // encryption key that was used for the object. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` + + // The Server-side encryption algorithm used when storing this object in S3 + // (e.g., AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + + // Version of the object. + VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` +} + +// String returns the string representation +func (s CompleteMultipartUploadOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CompleteMultipartUploadOutput) GoString() string { + return s.String() +} + +type CompletedMultipartUpload struct { + _ struct{} `type:"structure"` + + Parts []*CompletedPart `locationName:"Part" type:"list" flattened:"true"` +} + +// String returns the string representation +func (s CompletedMultipartUpload) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CompletedMultipartUpload) GoString() string { + return s.String() +} + +type CompletedPart struct { + _ struct{} `type:"structure"` + + // Entity tag returned when the part was uploaded. + ETag *string `type:"string"` + + // Part number that identifies the part. This is a positive integer between + // 1 and 10,000. + PartNumber *int64 `type:"integer"` +} + +// String returns the string representation +func (s CompletedPart) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CompletedPart) GoString() string { + return s.String() +} + +type Condition struct { + _ struct{} `type:"structure"` + + // The HTTP error code when the redirect is applied. In the event of an error, + // if the error code equals this value, then the specified redirect is applied. + // Required when parent element Condition is specified and sibling KeyPrefixEquals + // is not specified. If both are specified, then both must be true for the redirect + // to be applied. + HttpErrorCodeReturnedEquals *string `type:"string"` + + // The object key name prefix when the redirect is applied. For example, to + // redirect requests for ExamplePage.html, the key prefix will be ExamplePage.html. + // To redirect request for all pages with the prefix docs/, the key prefix will + // be /docs, which identifies all objects in the docs/ folder. Required when + // the parent element Condition is specified and sibling HttpErrorCodeReturnedEquals + // is not specified. If both conditions are specified, both must be true for + // the redirect to be applied. + KeyPrefixEquals *string `type:"string"` +} + +// String returns the string representation +func (s Condition) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Condition) GoString() string { + return s.String() +} + +type CopyObjectInput struct { + _ struct{} `type:"structure"` + + // The canned ACL to apply to the object. + ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Specifies caching behavior along the request/reply chain. + CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` + + // Specifies presentational information for the object. + ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"` + + // Specifies what content encodings have been applied to the object and thus + // what decoding mechanisms must be applied to obtain the media-type referenced + // by the Content-Type header field. + ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"` + + // The language the content is in. + ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"` + + // A standard MIME type describing the format of the object data. + ContentType *string `location:"header" locationName:"Content-Type" type:"string"` + + // The name of the source bucket and key name of the source object, separated + // by a slash (/). Must be URL-encoded. + CopySource *string `location:"header" locationName:"x-amz-copy-source" type:"string" required:"true"` + + // Copies the object if its entity tag (ETag) matches the specified tag. + CopySourceIfMatch *string `location:"header" locationName:"x-amz-copy-source-if-match" type:"string"` + + // Copies the object if it has been modified since the specified time. + CopySourceIfModifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-modified-since" type:"timestamp" timestampFormat:"rfc822"` + + // Copies the object if its entity tag (ETag) is different than the specified + // ETag. + CopySourceIfNoneMatch *string `location:"header" locationName:"x-amz-copy-source-if-none-match" type:"string"` + + // Copies the object if it hasn't been modified since the specified time. + CopySourceIfUnmodifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-unmodified-since" type:"timestamp" timestampFormat:"rfc822"` + + // Specifies the algorithm to use when decrypting the source object (e.g., AES256). + CopySourceSSECustomerAlgorithm *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 to use to decrypt + // the source object. The encryption key provided in this header must be one + // that was used when the source object was created. + CopySourceSSECustomerKey *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key" type:"string"` + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure the encryption + // key was transmitted without error. + CopySourceSSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key-MD5" type:"string"` + + // The date and time at which the object is no longer cacheable. + Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp" timestampFormat:"rfc822"` + + // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object. + GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` + + // Allows grantee to read the object data and its metadata. + GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` + + // Allows grantee to read the object ACL. + GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` + + // Allows grantee to write the ACL for the applicable object. + GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` + + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // A map of metadata to store with the object in S3. + Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` + + // Specifies whether the metadata is copied from the source object or replaced + // with metadata provided in the request. + MetadataDirective *string `location:"header" locationName:"x-amz-metadata-directive" type:"string" enum:"MetadataDirective"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // Specifies the algorithm to use to when encrypting the object (e.g., AES256). + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting + // data. This value is used to store the object and then it is discarded; Amazon + // does not store the encryption key. The key must be appropriate for use with + // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm + // header. + SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"` + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure the encryption + // key was transmitted without error. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // Specifies the AWS KMS key ID to use for object encryption. All GET and PUT + // requests for an object protected by AWS KMS will fail if not made via SSL + // or using SigV4. Documentation on configuring any of the officially supported + // AWS SDKs and CLI can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` + + // The Server-side encryption algorithm used when storing this object in S3 + // (e.g., AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + + // The type of storage to use for the object. Defaults to 'STANDARD'. + StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"` + + // If the bucket is configured as a website, redirects requests for this object + // to another object in the same bucket or to an external URL. Amazon S3 stores + // the value of this header in the object metadata. + WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"` +} + +// String returns the string representation +func (s CopyObjectInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CopyObjectInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CopyObjectInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CopyObjectInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.CopySource == nil { + invalidParams.Add(request.NewErrParamRequired("CopySource")) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type CopyObjectOutput struct { + _ struct{} `type:"structure" payload:"CopyObjectResult"` + + CopyObjectResult *CopyObjectResult `type:"structure"` + + CopySourceVersionId *string `location:"header" locationName:"x-amz-copy-source-version-id" type:"string"` + + // If the object expiration is configured, the response includes this header. + Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header confirming the encryption algorithm + // used. + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header to provide round trip message integrity + // verification of the customer-provided encryption key. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // If present, specifies the ID of the AWS Key Management Service (KMS) master + // encryption key that was used for the object. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` + + // The Server-side encryption algorithm used when storing this object in S3 + // (e.g., AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + + // Version ID of the newly created copy. + VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` +} + +// String returns the string representation +func (s CopyObjectOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CopyObjectOutput) GoString() string { + return s.String() +} + +type CopyObjectResult struct { + _ struct{} `type:"structure"` + + ETag *string `type:"string"` + + LastModified *time.Time `type:"timestamp" timestampFormat:"iso8601"` +} + +// String returns the string representation +func (s CopyObjectResult) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CopyObjectResult) GoString() string { + return s.String() +} + +type CopyPartResult struct { + _ struct{} `type:"structure"` + + // Entity tag of the object. + ETag *string `type:"string"` + + // Date and time at which the object was uploaded. + LastModified *time.Time `type:"timestamp" timestampFormat:"iso8601"` +} + +// String returns the string representation +func (s CopyPartResult) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CopyPartResult) GoString() string { + return s.String() +} + +type CreateBucketConfiguration struct { + _ struct{} `type:"structure"` + + // Specifies the region where the bucket will be created. If you don't specify + // a region, the bucket will be created in US Standard. + LocationConstraint *string `type:"string" enum:"BucketLocationConstraint"` +} + +// String returns the string representation +func (s CreateBucketConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateBucketConfiguration) GoString() string { + return s.String() +} + +type CreateBucketInput struct { + _ struct{} `type:"structure" payload:"CreateBucketConfiguration"` + + // The canned ACL to apply to the bucket. + ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"BucketCannedACL"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + CreateBucketConfiguration *CreateBucketConfiguration `locationName:"CreateBucketConfiguration" type:"structure"` + + // Allows grantee the read, write, read ACP, and write ACP permissions on the + // bucket. + GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` + + // Allows grantee to list the objects in the bucket. + GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` + + // Allows grantee to read the bucket ACL. + GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` + + // Allows grantee to create, overwrite, and delete any object in the bucket. + GrantWrite *string `location:"header" locationName:"x-amz-grant-write" type:"string"` + + // Allows grantee to write the ACL for the applicable bucket. + GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` +} + +// String returns the string representation +func (s CreateBucketInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateBucketInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateBucketInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateBucketInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type CreateBucketOutput struct { + _ struct{} `type:"structure"` + + Location *string `location:"header" locationName:"Location" type:"string"` +} + +// String returns the string representation +func (s CreateBucketOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateBucketOutput) GoString() string { + return s.String() +} + +type CreateMultipartUploadInput struct { + _ struct{} `type:"structure"` + + // The canned ACL to apply to the object. + ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Specifies caching behavior along the request/reply chain. + CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` + + // Specifies presentational information for the object. + ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"` + + // Specifies what content encodings have been applied to the object and thus + // what decoding mechanisms must be applied to obtain the media-type referenced + // by the Content-Type header field. + ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"` + + // The language the content is in. + ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"` + + // A standard MIME type describing the format of the object data. + ContentType *string `location:"header" locationName:"Content-Type" type:"string"` + + // The date and time at which the object is no longer cacheable. + Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp" timestampFormat:"rfc822"` + + // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object. + GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` + + // Allows grantee to read the object data and its metadata. + GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` + + // Allows grantee to read the object ACL. + GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` + + // Allows grantee to write the ACL for the applicable object. + GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` + + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // A map of metadata to store with the object in S3. + Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // Specifies the algorithm to use to when encrypting the object (e.g., AES256). + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting + // data. This value is used to store the object and then it is discarded; Amazon + // does not store the encryption key. The key must be appropriate for use with + // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm + // header. + SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"` + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure the encryption + // key was transmitted without error. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // Specifies the AWS KMS key ID to use for object encryption. All GET and PUT + // requests for an object protected by AWS KMS will fail if not made via SSL + // or using SigV4. Documentation on configuring any of the officially supported + // AWS SDKs and CLI can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` + + // The Server-side encryption algorithm used when storing this object in S3 + // (e.g., AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + + // The type of storage to use for the object. Defaults to 'STANDARD'. + StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"` + + // If the bucket is configured as a website, redirects requests for this object + // to another object in the same bucket or to an external URL. Amazon S3 stores + // the value of this header in the object metadata. + WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"` +} + +// String returns the string representation +func (s CreateMultipartUploadInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateMultipartUploadInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateMultipartUploadInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateMultipartUploadInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type CreateMultipartUploadOutput struct { + _ struct{} `type:"structure"` + + // Date when multipart upload will become eligible for abort operation by lifecycle. + AbortDate *time.Time `location:"header" locationName:"x-amz-abort-date" type:"timestamp" timestampFormat:"rfc822"` + + // Id of the lifecycle rule that makes a multipart upload eligible for abort + // operation. + AbortRuleId *string `location:"header" locationName:"x-amz-abort-rule-id" type:"string"` + + // Name of the bucket to which the multipart upload was initiated. + Bucket *string `locationName:"Bucket" type:"string"` + + // Object key for which the multipart upload was initiated. + Key *string `min:"1" type:"string"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header confirming the encryption algorithm + // used. + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header to provide round trip message integrity + // verification of the customer-provided encryption key. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // If present, specifies the ID of the AWS Key Management Service (KMS) master + // encryption key that was used for the object. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` + + // The Server-side encryption algorithm used when storing this object in S3 + // (e.g., AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + + // ID for the initiated multipart upload. + UploadId *string `type:"string"` +} + +// String returns the string representation +func (s CreateMultipartUploadOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateMultipartUploadOutput) GoString() string { + return s.String() +} + +type Delete struct { + _ struct{} `type:"structure"` + + Objects []*ObjectIdentifier `locationName:"Object" type:"list" flattened:"true" required:"true"` + + // Element to enable quiet mode for the request. When you add this element, + // you must set its value to true. + Quiet *bool `type:"boolean"` +} + +// String returns the string representation +func (s Delete) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Delete) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Delete) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Delete"} + if s.Objects == nil { + invalidParams.Add(request.NewErrParamRequired("Objects")) + } + if s.Objects != nil { + for i, v := range s.Objects { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Objects", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteBucketCorsInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteBucketCorsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketCorsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBucketCorsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBucketCorsInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteBucketCorsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteBucketCorsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketCorsOutput) GoString() string { + return s.String() +} + +type DeleteBucketInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteBucketInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBucketInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBucketInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteBucketLifecycleInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteBucketLifecycleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketLifecycleInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBucketLifecycleInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBucketLifecycleInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteBucketLifecycleOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteBucketLifecycleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketLifecycleOutput) GoString() string { + return s.String() +} + +type DeleteBucketOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteBucketOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketOutput) GoString() string { + return s.String() +} + +type DeleteBucketPolicyInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteBucketPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketPolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBucketPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBucketPolicyInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteBucketPolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteBucketPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketPolicyOutput) GoString() string { + return s.String() +} + +type DeleteBucketReplicationInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteBucketReplicationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketReplicationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBucketReplicationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBucketReplicationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteBucketReplicationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteBucketReplicationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketReplicationOutput) GoString() string { + return s.String() +} + +type DeleteBucketTaggingInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteBucketTaggingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketTaggingInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBucketTaggingInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBucketTaggingInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteBucketTaggingOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteBucketTaggingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketTaggingOutput) GoString() string { + return s.String() +} + +type DeleteBucketWebsiteInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteBucketWebsiteInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketWebsiteInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBucketWebsiteInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBucketWebsiteInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteBucketWebsiteOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteBucketWebsiteOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketWebsiteOutput) GoString() string { + return s.String() +} + +type DeleteMarkerEntry struct { + _ struct{} `type:"structure"` + + // Specifies whether the object is (true) or is not (false) the latest version + // of an object. + IsLatest *bool `type:"boolean"` + + // The object key. + Key *string `min:"1" type:"string"` + + // Date and time the object was last modified. + LastModified *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + Owner *Owner `type:"structure"` + + // Version ID of an object. + VersionId *string `type:"string"` +} + +// String returns the string representation +func (s DeleteMarkerEntry) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteMarkerEntry) GoString() string { + return s.String() +} + +type DeleteObjectInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // The concatenation of the authentication device's serial number, a space, + // and the value that is displayed on your authentication device. + MFA *string `location:"header" locationName:"x-amz-mfa" type:"string"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // VersionId used to reference a specific version of the object. + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` +} + +// String returns the string representation +func (s DeleteObjectInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteObjectInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteObjectInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteObjectInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteObjectOutput struct { + _ struct{} `type:"structure"` + + // Specifies whether the versioned object that was permanently deleted was (true) + // or was not (false) a delete marker. + DeleteMarker *bool `location:"header" locationName:"x-amz-delete-marker" type:"boolean"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // Returns the version ID of the delete marker created as a result of the DELETE + // operation. + VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` +} + +// String returns the string representation +func (s DeleteObjectOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteObjectOutput) GoString() string { + return s.String() +} + +type DeleteObjectsInput struct { + _ struct{} `type:"structure" payload:"Delete"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + Delete *Delete `locationName:"Delete" type:"structure" required:"true"` + + // The concatenation of the authentication device's serial number, a space, + // and the value that is displayed on your authentication device. + MFA *string `location:"header" locationName:"x-amz-mfa" type:"string"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` +} + +// String returns the string representation +func (s DeleteObjectsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteObjectsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteObjectsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteObjectsInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Delete == nil { + invalidParams.Add(request.NewErrParamRequired("Delete")) + } + if s.Delete != nil { + if err := s.Delete.Validate(); err != nil { + invalidParams.AddNested("Delete", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteObjectsOutput struct { + _ struct{} `type:"structure"` + + Deleted []*DeletedObject `type:"list" flattened:"true"` + + Errors []*Error `locationName:"Error" type:"list" flattened:"true"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` +} + +// String returns the string representation +func (s DeleteObjectsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteObjectsOutput) GoString() string { + return s.String() +} + +type DeletedObject struct { + _ struct{} `type:"structure"` + + DeleteMarker *bool `type:"boolean"` + + DeleteMarkerVersionId *string `type:"string"` + + Key *string `min:"1" type:"string"` + + VersionId *string `type:"string"` +} + +// String returns the string representation +func (s DeletedObject) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeletedObject) GoString() string { + return s.String() +} + +type Destination struct { + _ struct{} `type:"structure"` + + // Amazon resource name (ARN) of the bucket where you want Amazon S3 to store + // replicas of the object identified by the rule. + Bucket *string `type:"string" required:"true"` + + // The class of storage used to store the object. + StorageClass *string `type:"string" enum:"StorageClass"` +} + +// String returns the string representation +func (s Destination) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Destination) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Destination) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Destination"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type Error struct { + _ struct{} `type:"structure"` + + Code *string `type:"string"` + + Key *string `min:"1" type:"string"` + + Message *string `type:"string"` + + VersionId *string `type:"string"` +} + +// String returns the string representation +func (s Error) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Error) GoString() string { + return s.String() +} + +type ErrorDocument struct { + _ struct{} `type:"structure"` + + // The object key name to use when a 4XX class error occurs. + Key *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s ErrorDocument) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ErrorDocument) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ErrorDocument) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ErrorDocument"} + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Container for key value pair that defines the criteria for the filter rule. +type FilterRule struct { + _ struct{} `type:"structure"` + + // Object key name prefix or suffix identifying one or more objects to which + // the filtering rule applies. Maximum prefix length can be up to 1,024 characters. + // Overlapping prefixes and suffixes are not supported. For more information, + // go to Configuring Event Notifications (http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) + // in the Amazon Simple Storage Service Developer Guide. + Name *string `type:"string" enum:"FilterRuleName"` + + Value *string `type:"string"` +} + +// String returns the string representation +func (s FilterRule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FilterRule) GoString() string { + return s.String() +} + +type GetBucketAccelerateConfigurationInput struct { + _ struct{} `type:"structure"` + + // Name of the bucket for which the accelerate configuration is retrieved. + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketAccelerateConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketAccelerateConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketAccelerateConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketAccelerateConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type GetBucketAccelerateConfigurationOutput struct { + _ struct{} `type:"structure"` + + // The accelerate configuration of the bucket. + Status *string `type:"string" enum:"BucketAccelerateStatus"` +} + +// String returns the string representation +func (s GetBucketAccelerateConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketAccelerateConfigurationOutput) GoString() string { + return s.String() +} + +type GetBucketAclInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketAclInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketAclInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketAclInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketAclInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type GetBucketAclOutput struct { + _ struct{} `type:"structure"` + + // A list of grants. + Grants []*Grant `locationName:"AccessControlList" locationNameList:"Grant" type:"list"` + + Owner *Owner `type:"structure"` +} + +// String returns the string representation +func (s GetBucketAclOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketAclOutput) GoString() string { + return s.String() +} + +type GetBucketCorsInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketCorsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketCorsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketCorsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketCorsInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type GetBucketCorsOutput struct { + _ struct{} `type:"structure"` + + CORSRules []*CORSRule `locationName:"CORSRule" type:"list" flattened:"true"` +} + +// String returns the string representation +func (s GetBucketCorsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketCorsOutput) GoString() string { + return s.String() +} + +type GetBucketLifecycleConfigurationInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketLifecycleConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketLifecycleConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketLifecycleConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketLifecycleConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type GetBucketLifecycleConfigurationOutput struct { + _ struct{} `type:"structure"` + + Rules []*LifecycleRule `locationName:"Rule" type:"list" flattened:"true"` +} + +// String returns the string representation +func (s GetBucketLifecycleConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketLifecycleConfigurationOutput) GoString() string { + return s.String() +} + +type GetBucketLifecycleInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketLifecycleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketLifecycleInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketLifecycleInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketLifecycleInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type GetBucketLifecycleOutput struct { + _ struct{} `type:"structure"` + + Rules []*Rule `locationName:"Rule" type:"list" flattened:"true"` +} + +// String returns the string representation +func (s GetBucketLifecycleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketLifecycleOutput) GoString() string { + return s.String() +} + +type GetBucketLocationInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketLocationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketLocationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketLocationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketLocationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type GetBucketLocationOutput struct { + _ struct{} `type:"structure"` + + LocationConstraint *string `type:"string" enum:"BucketLocationConstraint"` +} + +// String returns the string representation +func (s GetBucketLocationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketLocationOutput) GoString() string { + return s.String() +} + +type GetBucketLoggingInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketLoggingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketLoggingInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketLoggingInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketLoggingInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type GetBucketLoggingOutput struct { + _ struct{} `type:"structure"` + + LoggingEnabled *LoggingEnabled `type:"structure"` +} + +// String returns the string representation +func (s GetBucketLoggingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketLoggingOutput) GoString() string { + return s.String() +} + +type GetBucketNotificationConfigurationRequest struct { + _ struct{} `type:"structure"` + + // Name of the bucket to get the notification configuration for. + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketNotificationConfigurationRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketNotificationConfigurationRequest) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketNotificationConfigurationRequest) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketNotificationConfigurationRequest"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type GetBucketPolicyInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketPolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketPolicyInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type GetBucketPolicyOutput struct { + _ struct{} `type:"structure" payload:"Policy"` + + // The bucket policy as a JSON document. + Policy *string `type:"string"` +} + +// String returns the string representation +func (s GetBucketPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketPolicyOutput) GoString() string { + return s.String() +} + +type GetBucketReplicationInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketReplicationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketReplicationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketReplicationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketReplicationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type GetBucketReplicationOutput struct { + _ struct{} `type:"structure" payload:"ReplicationConfiguration"` + + // Container for replication rules. You can add as many as 1,000 rules. Total + // replication configuration size can be up to 2 MB. + ReplicationConfiguration *ReplicationConfiguration `type:"structure"` +} + +// String returns the string representation +func (s GetBucketReplicationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketReplicationOutput) GoString() string { + return s.String() +} + +type GetBucketRequestPaymentInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketRequestPaymentInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketRequestPaymentInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketRequestPaymentInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketRequestPaymentInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type GetBucketRequestPaymentOutput struct { + _ struct{} `type:"structure"` + + // Specifies who pays for the download and request fees. + Payer *string `type:"string" enum:"Payer"` +} + +// String returns the string representation +func (s GetBucketRequestPaymentOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketRequestPaymentOutput) GoString() string { + return s.String() +} + +type GetBucketTaggingInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketTaggingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketTaggingInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketTaggingInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketTaggingInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type GetBucketTaggingOutput struct { + _ struct{} `type:"structure"` + + TagSet []*Tag `locationNameList:"Tag" type:"list" required:"true"` +} + +// String returns the string representation +func (s GetBucketTaggingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketTaggingOutput) GoString() string { + return s.String() +} + +type GetBucketVersioningInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketVersioningInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketVersioningInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketVersioningInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketVersioningInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type GetBucketVersioningOutput struct { + _ struct{} `type:"structure"` + + // Specifies whether MFA delete is enabled in the bucket versioning configuration. + // This element is only returned if the bucket has been configured with MFA + // delete. If the bucket has never been so configured, this element is not returned. + MFADelete *string `locationName:"MfaDelete" type:"string" enum:"MFADeleteStatus"` + + // The versioning state of the bucket. + Status *string `type:"string" enum:"BucketVersioningStatus"` +} + +// String returns the string representation +func (s GetBucketVersioningOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketVersioningOutput) GoString() string { + return s.String() +} + +type GetBucketWebsiteInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketWebsiteInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketWebsiteInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketWebsiteInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketWebsiteInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type GetBucketWebsiteOutput struct { + _ struct{} `type:"structure"` + + ErrorDocument *ErrorDocument `type:"structure"` + + IndexDocument *IndexDocument `type:"structure"` + + RedirectAllRequestsTo *RedirectAllRequestsTo `type:"structure"` + + RoutingRules []*RoutingRule `locationNameList:"RoutingRule" type:"list"` +} + +// String returns the string representation +func (s GetBucketWebsiteOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketWebsiteOutput) GoString() string { + return s.String() +} + +type GetObjectAclInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // VersionId used to reference a specific version of the object. + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` +} + +// String returns the string representation +func (s GetObjectAclInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetObjectAclInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetObjectAclInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetObjectAclInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type GetObjectAclOutput struct { + _ struct{} `type:"structure"` + + // A list of grants. + Grants []*Grant `locationName:"AccessControlList" locationNameList:"Grant" type:"list"` + + Owner *Owner `type:"structure"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` +} + +// String returns the string representation +func (s GetObjectAclOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetObjectAclOutput) GoString() string { + return s.String() +} + +type GetObjectInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Return the object only if its entity tag (ETag) is the same as the one specified, + // otherwise return a 412 (precondition failed). + IfMatch *string `location:"header" locationName:"If-Match" type:"string"` + + // Return the object only if it has been modified since the specified time, + // otherwise return a 304 (not modified). + IfModifiedSince *time.Time `location:"header" locationName:"If-Modified-Since" type:"timestamp" timestampFormat:"rfc822"` + + // Return the object only if its entity tag (ETag) is different from the one + // specified, otherwise return a 304 (not modified). + IfNoneMatch *string `location:"header" locationName:"If-None-Match" type:"string"` + + // Return the object only if it has not been modified since the specified time, + // otherwise return a 412 (precondition failed). + IfUnmodifiedSince *time.Time `location:"header" locationName:"If-Unmodified-Since" type:"timestamp" timestampFormat:"rfc822"` + + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Downloads the specified range bytes of an object. For more information about + // the HTTP Range header, go to http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35. + Range *string `location:"header" locationName:"Range" type:"string"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // Sets the Cache-Control header of the response. + ResponseCacheControl *string `location:"querystring" locationName:"response-cache-control" type:"string"` + + // Sets the Content-Disposition header of the response + ResponseContentDisposition *string `location:"querystring" locationName:"response-content-disposition" type:"string"` + + // Sets the Content-Encoding header of the response. + ResponseContentEncoding *string `location:"querystring" locationName:"response-content-encoding" type:"string"` + + // Sets the Content-Language header of the response. + ResponseContentLanguage *string `location:"querystring" locationName:"response-content-language" type:"string"` + + // Sets the Content-Type header of the response. + ResponseContentType *string `location:"querystring" locationName:"response-content-type" type:"string"` + + // Sets the Expires header of the response. + ResponseExpires *time.Time `location:"querystring" locationName:"response-expires" type:"timestamp" timestampFormat:"iso8601"` + + // Specifies the algorithm to use to when encrypting the object (e.g., AES256). + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting + // data. This value is used to store the object and then it is discarded; Amazon + // does not store the encryption key. The key must be appropriate for use with + // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm + // header. + SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"` + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure the encryption + // key was transmitted without error. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // VersionId used to reference a specific version of the object. + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` +} + +// String returns the string representation +func (s GetObjectInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetObjectInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetObjectInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetObjectInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type GetObjectOutput struct { + _ struct{} `type:"structure" payload:"Body"` + + AcceptRanges *string `location:"header" locationName:"accept-ranges" type:"string"` + + // Object data. + Body io.ReadCloser `type:"blob"` + + // Specifies caching behavior along the request/reply chain. + CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` + + // Specifies presentational information for the object. + ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"` + + // Specifies what content encodings have been applied to the object and thus + // what decoding mechanisms must be applied to obtain the media-type referenced + // by the Content-Type header field. + ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"` + + // The language the content is in. + ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"` + + // Size of the body in bytes. + ContentLength *int64 `location:"header" locationName:"Content-Length" type:"long"` + + // The portion of the object returned in the response. + ContentRange *string `location:"header" locationName:"Content-Range" type:"string"` + + // A standard MIME type describing the format of the object data. + ContentType *string `location:"header" locationName:"Content-Type" type:"string"` + + // Specifies whether the object retrieved was (true) or was not (false) a Delete + // Marker. If false, this response header does not appear in the response. + DeleteMarker *bool `location:"header" locationName:"x-amz-delete-marker" type:"boolean"` + + // An ETag is an opaque identifier assigned by a web server to a specific version + // of a resource found at a URL + ETag *string `location:"header" locationName:"ETag" type:"string"` + + // If the object expiration is configured (see PUT Bucket lifecycle), the response + // includes this header. It includes the expiry-date and rule-id key value pairs + // providing object expiration information. The value of the rule-id is URL + // encoded. + Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"` + + // The date and time at which the object is no longer cacheable. + Expires *string `location:"header" locationName:"Expires" type:"string"` + + // Last modified date of the object + LastModified *time.Time `location:"header" locationName:"Last-Modified" type:"timestamp" timestampFormat:"rfc822"` + + // A map of metadata to store with the object in S3. + Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` + + // This is set to the number of metadata entries not returned in x-amz-meta + // headers. This can happen if you create metadata using an API like SOAP that + // supports more flexible metadata than the REST API. For example, using SOAP, + // you can create metadata whose values are not legal HTTP headers. + MissingMeta *int64 `location:"header" locationName:"x-amz-missing-meta" type:"integer"` + + ReplicationStatus *string `location:"header" locationName:"x-amz-replication-status" type:"string" enum:"ReplicationStatus"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // Provides information about object restoration operation and expiration time + // of the restored object copy. + Restore *string `location:"header" locationName:"x-amz-restore" type:"string"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header confirming the encryption algorithm + // used. + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header to provide round trip message integrity + // verification of the customer-provided encryption key. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // If present, specifies the ID of the AWS Key Management Service (KMS) master + // encryption key that was used for the object. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` + + // The Server-side encryption algorithm used when storing this object in S3 + // (e.g., AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + + StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"` + + // Version of the object. + VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` + + // If the bucket is configured as a website, redirects requests for this object + // to another object in the same bucket or to an external URL. Amazon S3 stores + // the value of this header in the object metadata. + WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"` +} + +// String returns the string representation +func (s GetObjectOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetObjectOutput) GoString() string { + return s.String() +} + +type GetObjectTorrentInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` +} + +// String returns the string representation +func (s GetObjectTorrentInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetObjectTorrentInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetObjectTorrentInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetObjectTorrentInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type GetObjectTorrentOutput struct { + _ struct{} `type:"structure" payload:"Body"` + + Body io.ReadCloser `type:"blob"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` +} + +// String returns the string representation +func (s GetObjectTorrentOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetObjectTorrentOutput) GoString() string { + return s.String() +} + +type Grant struct { + _ struct{} `type:"structure"` + + Grantee *Grantee `type:"structure"` + + // Specifies the permission given to the grantee. + Permission *string `type:"string" enum:"Permission"` +} + +// String returns the string representation +func (s Grant) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Grant) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Grant) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Grant"} + if s.Grantee != nil { + if err := s.Grantee.Validate(); err != nil { + invalidParams.AddNested("Grantee", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type Grantee struct { + _ struct{} `type:"structure" xmlPrefix:"xsi" xmlURI:"http://www.w3.org/2001/XMLSchema-instance"` + + // Screen name of the grantee. + DisplayName *string `type:"string"` + + // Email address of the grantee. + EmailAddress *string `type:"string"` + + // The canonical user ID of the grantee. + ID *string `type:"string"` + + // Type of grantee + Type *string `locationName:"xsi:type" type:"string" xmlAttribute:"true" required:"true" enum:"Type"` + + // URI of the grantee group. + URI *string `type:"string"` +} + +// String returns the string representation +func (s Grantee) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Grantee) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Grantee) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Grantee"} + if s.Type == nil { + invalidParams.Add(request.NewErrParamRequired("Type")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type HeadBucketInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s HeadBucketInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HeadBucketInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *HeadBucketInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "HeadBucketInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type HeadBucketOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s HeadBucketOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HeadBucketOutput) GoString() string { + return s.String() +} + +type HeadObjectInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Return the object only if its entity tag (ETag) is the same as the one specified, + // otherwise return a 412 (precondition failed). + IfMatch *string `location:"header" locationName:"If-Match" type:"string"` + + // Return the object only if it has been modified since the specified time, + // otherwise return a 304 (not modified). + IfModifiedSince *time.Time `location:"header" locationName:"If-Modified-Since" type:"timestamp" timestampFormat:"rfc822"` + + // Return the object only if its entity tag (ETag) is different from the one + // specified, otherwise return a 304 (not modified). + IfNoneMatch *string `location:"header" locationName:"If-None-Match" type:"string"` + + // Return the object only if it has not been modified since the specified time, + // otherwise return a 412 (precondition failed). + IfUnmodifiedSince *time.Time `location:"header" locationName:"If-Unmodified-Since" type:"timestamp" timestampFormat:"rfc822"` + + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Downloads the specified range bytes of an object. For more information about + // the HTTP Range header, go to http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35. + Range *string `location:"header" locationName:"Range" type:"string"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // Specifies the algorithm to use to when encrypting the object (e.g., AES256). + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting + // data. This value is used to store the object and then it is discarded; Amazon + // does not store the encryption key. The key must be appropriate for use with + // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm + // header. + SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"` + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure the encryption + // key was transmitted without error. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // VersionId used to reference a specific version of the object. + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` +} + +// String returns the string representation +func (s HeadObjectInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HeadObjectInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *HeadObjectInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "HeadObjectInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type HeadObjectOutput struct { + _ struct{} `type:"structure"` + + AcceptRanges *string `location:"header" locationName:"accept-ranges" type:"string"` + + // Specifies caching behavior along the request/reply chain. + CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` + + // Specifies presentational information for the object. + ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"` + + // Specifies what content encodings have been applied to the object and thus + // what decoding mechanisms must be applied to obtain the media-type referenced + // by the Content-Type header field. + ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"` + + // The language the content is in. + ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"` + + // Size of the body in bytes. + ContentLength *int64 `location:"header" locationName:"Content-Length" type:"long"` + + // A standard MIME type describing the format of the object data. + ContentType *string `location:"header" locationName:"Content-Type" type:"string"` + + // Specifies whether the object retrieved was (true) or was not (false) a Delete + // Marker. If false, this response header does not appear in the response. + DeleteMarker *bool `location:"header" locationName:"x-amz-delete-marker" type:"boolean"` + + // An ETag is an opaque identifier assigned by a web server to a specific version + // of a resource found at a URL + ETag *string `location:"header" locationName:"ETag" type:"string"` + + // If the object expiration is configured (see PUT Bucket lifecycle), the response + // includes this header. It includes the expiry-date and rule-id key value pairs + // providing object expiration information. The value of the rule-id is URL + // encoded. + Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"` + + // The date and time at which the object is no longer cacheable. + Expires *string `location:"header" locationName:"Expires" type:"string"` + + // Last modified date of the object + LastModified *time.Time `location:"header" locationName:"Last-Modified" type:"timestamp" timestampFormat:"rfc822"` + + // A map of metadata to store with the object in S3. + Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` + + // This is set to the number of metadata entries not returned in x-amz-meta + // headers. This can happen if you create metadata using an API like SOAP that + // supports more flexible metadata than the REST API. For example, using SOAP, + // you can create metadata whose values are not legal HTTP headers. + MissingMeta *int64 `location:"header" locationName:"x-amz-missing-meta" type:"integer"` + + ReplicationStatus *string `location:"header" locationName:"x-amz-replication-status" type:"string" enum:"ReplicationStatus"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // Provides information about object restoration operation and expiration time + // of the restored object copy. + Restore *string `location:"header" locationName:"x-amz-restore" type:"string"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header confirming the encryption algorithm + // used. + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header to provide round trip message integrity + // verification of the customer-provided encryption key. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // If present, specifies the ID of the AWS Key Management Service (KMS) master + // encryption key that was used for the object. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` + + // The Server-side encryption algorithm used when storing this object in S3 + // (e.g., AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + + StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"` + + // Version of the object. + VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` + + // If the bucket is configured as a website, redirects requests for this object + // to another object in the same bucket or to an external URL. Amazon S3 stores + // the value of this header in the object metadata. + WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"` +} + +// String returns the string representation +func (s HeadObjectOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HeadObjectOutput) GoString() string { + return s.String() +} + +type IndexDocument struct { + _ struct{} `type:"structure"` + + // A suffix that is appended to a request that is for a directory on the website + // endpoint (e.g. if the suffix is index.html and you make a request to samplebucket/images/ + // the data that is returned will be for the object with the key name images/index.html) + // The suffix must not be empty and must not include a slash character. + Suffix *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s IndexDocument) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s IndexDocument) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *IndexDocument) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "IndexDocument"} + if s.Suffix == nil { + invalidParams.Add(request.NewErrParamRequired("Suffix")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type Initiator struct { + _ struct{} `type:"structure"` + + // Name of the Principal. + DisplayName *string `type:"string"` + + // If the principal is an AWS account, it provides the Canonical User ID. If + // the principal is an IAM User, it provides a user ARN value. + ID *string `type:"string"` +} + +// String returns the string representation +func (s Initiator) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Initiator) GoString() string { + return s.String() +} + +// Container for object key name prefix and suffix filtering rules. +type KeyFilter struct { + _ struct{} `type:"structure"` + + // A list of containers for key value pair that defines the criteria for the + // filter rule. + FilterRules []*FilterRule `locationName:"FilterRule" type:"list" flattened:"true"` +} + +// String returns the string representation +func (s KeyFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s KeyFilter) GoString() string { + return s.String() +} + +// Container for specifying the AWS Lambda notification configuration. +type LambdaFunctionConfiguration struct { + _ struct{} `type:"structure"` + + Events []*string `locationName:"Event" type:"list" flattened:"true" required:"true"` + + // Container for object key name filtering rules. For information about key + // name filtering, go to Configuring Event Notifications (http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) + // in the Amazon Simple Storage Service Developer Guide. + Filter *NotificationConfigurationFilter `type:"structure"` + + // Optional unique identifier for configurations in a notification configuration. + // If you don't provide one, Amazon S3 will assign an ID. + Id *string `type:"string"` + + // Lambda cloud function ARN that Amazon S3 can invoke when it detects events + // of the specified type. + LambdaFunctionArn *string `locationName:"CloudFunction" type:"string" required:"true"` +} + +// String returns the string representation +func (s LambdaFunctionConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LambdaFunctionConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *LambdaFunctionConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "LambdaFunctionConfiguration"} + if s.Events == nil { + invalidParams.Add(request.NewErrParamRequired("Events")) + } + if s.LambdaFunctionArn == nil { + invalidParams.Add(request.NewErrParamRequired("LambdaFunctionArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type LifecycleConfiguration struct { + _ struct{} `type:"structure"` + + Rules []*Rule `locationName:"Rule" type:"list" flattened:"true" required:"true"` +} + +// String returns the string representation +func (s LifecycleConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LifecycleConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *LifecycleConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "LifecycleConfiguration"} + if s.Rules == nil { + invalidParams.Add(request.NewErrParamRequired("Rules")) + } + if s.Rules != nil { + for i, v := range s.Rules { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Rules", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type LifecycleExpiration struct { + _ struct{} `type:"structure"` + + // Indicates at what date the object is to be moved or deleted. Should be in + // GMT ISO 8601 Format. + Date *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // Indicates the lifetime, in days, of the objects that are subject to the rule. + // The value must be a non-zero positive integer. + Days *int64 `type:"integer"` + + // Indicates whether Amazon S3 will remove a delete marker with no noncurrent + // versions. If set to true, the delete marker will be expired; if set to false + // the policy takes no action. This cannot be specified with Days or Date in + // a Lifecycle Expiration Policy. + ExpiredObjectDeleteMarker *bool `type:"boolean"` +} + +// String returns the string representation +func (s LifecycleExpiration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LifecycleExpiration) GoString() string { + return s.String() +} + +type LifecycleRule struct { + _ struct{} `type:"structure"` + + // Specifies the days since the initiation of an Incomplete Multipart Upload + // that Lifecycle will wait before permanently removing all parts of the upload. + AbortIncompleteMultipartUpload *AbortIncompleteMultipartUpload `type:"structure"` + + Expiration *LifecycleExpiration `type:"structure"` + + // Unique identifier for the rule. The value cannot be longer than 255 characters. + ID *string `type:"string"` + + // Specifies when noncurrent object versions expire. Upon expiration, Amazon + // S3 permanently deletes the noncurrent object versions. You set this lifecycle + // configuration action on a bucket that has versioning enabled (or suspended) + // to request that Amazon S3 delete noncurrent object versions at a specific + // period in the object's lifetime. + NoncurrentVersionExpiration *NoncurrentVersionExpiration `type:"structure"` + + NoncurrentVersionTransitions []*NoncurrentVersionTransition `locationName:"NoncurrentVersionTransition" type:"list" flattened:"true"` + + // Prefix identifying one or more objects to which the rule applies. + Prefix *string `type:"string" required:"true"` + + // If 'Enabled', the rule is currently being applied. If 'Disabled', the rule + // is not currently being applied. + Status *string `type:"string" required:"true" enum:"ExpirationStatus"` + + Transitions []*Transition `locationName:"Transition" type:"list" flattened:"true"` +} + +// String returns the string representation +func (s LifecycleRule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LifecycleRule) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *LifecycleRule) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "LifecycleRule"} + if s.Prefix == nil { + invalidParams.Add(request.NewErrParamRequired("Prefix")) + } + if s.Status == nil { + invalidParams.Add(request.NewErrParamRequired("Status")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type ListBucketsInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ListBucketsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListBucketsInput) GoString() string { + return s.String() +} + +type ListBucketsOutput struct { + _ struct{} `type:"structure"` + + Buckets []*Bucket `locationNameList:"Bucket" type:"list"` + + Owner *Owner `type:"structure"` +} + +// String returns the string representation +func (s ListBucketsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListBucketsOutput) GoString() string { + return s.String() +} + +type ListMultipartUploadsInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Character you use to group keys. + Delimiter *string `location:"querystring" locationName:"delimiter" type:"string"` + + // Requests Amazon S3 to encode the object keys in the response and specifies + // the encoding method to use. An object key may contain any Unicode character; + // however, XML 1.0 parser cannot parse some characters, such as characters + // with an ASCII value from 0 to 10. For characters that are not supported in + // XML 1.0, you can add this parameter to request that Amazon S3 encode the + // keys in the response. + EncodingType *string `location:"querystring" locationName:"encoding-type" type:"string" enum:"EncodingType"` + + // Together with upload-id-marker, this parameter specifies the multipart upload + // after which listing should begin. + KeyMarker *string `location:"querystring" locationName:"key-marker" type:"string"` + + // Sets the maximum number of multipart uploads, from 1 to 1,000, to return + // in the response body. 1,000 is the maximum number of uploads that can be + // returned in a response. + MaxUploads *int64 `location:"querystring" locationName:"max-uploads" type:"integer"` + + // Lists in-progress uploads only for those keys that begin with the specified + // prefix. + Prefix *string `location:"querystring" locationName:"prefix" type:"string"` + + // Together with key-marker, specifies the multipart upload after which listing + // should begin. If key-marker is not specified, the upload-id-marker parameter + // is ignored. + UploadIdMarker *string `location:"querystring" locationName:"upload-id-marker" type:"string"` +} + +// String returns the string representation +func (s ListMultipartUploadsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListMultipartUploadsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListMultipartUploadsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListMultipartUploadsInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type ListMultipartUploadsOutput struct { + _ struct{} `type:"structure"` + + // Name of the bucket to which the multipart upload was initiated. + Bucket *string `type:"string"` + + CommonPrefixes []*CommonPrefix `type:"list" flattened:"true"` + + Delimiter *string `type:"string"` + + // Encoding type used by Amazon S3 to encode object keys in the response. + EncodingType *string `type:"string" enum:"EncodingType"` + + // Indicates whether the returned list of multipart uploads is truncated. A + // value of true indicates that the list was truncated. The list can be truncated + // if the number of multipart uploads exceeds the limit allowed or specified + // by max uploads. + IsTruncated *bool `type:"boolean"` + + // The key at or after which the listing began. + KeyMarker *string `type:"string"` + + // Maximum number of multipart uploads that could have been included in the + // response. + MaxUploads *int64 `type:"integer"` + + // When a list is truncated, this element specifies the value that should be + // used for the key-marker request parameter in a subsequent request. + NextKeyMarker *string `type:"string"` + + // When a list is truncated, this element specifies the value that should be + // used for the upload-id-marker request parameter in a subsequent request. + NextUploadIdMarker *string `type:"string"` + + // When a prefix is provided in the request, this field contains the specified + // prefix. The result contains only keys starting with the specified prefix. + Prefix *string `type:"string"` + + // Upload ID after which listing began. + UploadIdMarker *string `type:"string"` + + Uploads []*MultipartUpload `locationName:"Upload" type:"list" flattened:"true"` +} + +// String returns the string representation +func (s ListMultipartUploadsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListMultipartUploadsOutput) GoString() string { + return s.String() +} + +type ListObjectVersionsInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // A delimiter is a character you use to group keys. + Delimiter *string `location:"querystring" locationName:"delimiter" type:"string"` + + // Requests Amazon S3 to encode the object keys in the response and specifies + // the encoding method to use. An object key may contain any Unicode character; + // however, XML 1.0 parser cannot parse some characters, such as characters + // with an ASCII value from 0 to 10. For characters that are not supported in + // XML 1.0, you can add this parameter to request that Amazon S3 encode the + // keys in the response. + EncodingType *string `location:"querystring" locationName:"encoding-type" type:"string" enum:"EncodingType"` + + // Specifies the key to start with when listing objects in a bucket. + KeyMarker *string `location:"querystring" locationName:"key-marker" type:"string"` + + // Sets the maximum number of keys returned in the response. The response might + // contain fewer keys but will never contain more. + MaxKeys *int64 `location:"querystring" locationName:"max-keys" type:"integer"` + + // Limits the response to keys that begin with the specified prefix. + Prefix *string `location:"querystring" locationName:"prefix" type:"string"` + + // Specifies the object version you want to start listing from. + VersionIdMarker *string `location:"querystring" locationName:"version-id-marker" type:"string"` +} + +// String returns the string representation +func (s ListObjectVersionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListObjectVersionsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListObjectVersionsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListObjectVersionsInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type ListObjectVersionsOutput struct { + _ struct{} `type:"structure"` + + CommonPrefixes []*CommonPrefix `type:"list" flattened:"true"` + + DeleteMarkers []*DeleteMarkerEntry `locationName:"DeleteMarker" type:"list" flattened:"true"` + + Delimiter *string `type:"string"` + + // Encoding type used by Amazon S3 to encode object keys in the response. + EncodingType *string `type:"string" enum:"EncodingType"` + + // A flag that indicates whether or not Amazon S3 returned all of the results + // that satisfied the search criteria. If your results were truncated, you can + // make a follow-up paginated request using the NextKeyMarker and NextVersionIdMarker + // response parameters as a starting place in another request to return the + // rest of the results. + IsTruncated *bool `type:"boolean"` + + // Marks the last Key returned in a truncated response. + KeyMarker *string `type:"string"` + + MaxKeys *int64 `type:"integer"` + + Name *string `type:"string"` + + // Use this value for the key marker request parameter in a subsequent request. + NextKeyMarker *string `type:"string"` + + // Use this value for the next version id marker parameter in a subsequent request. + NextVersionIdMarker *string `type:"string"` + + Prefix *string `type:"string"` + + VersionIdMarker *string `type:"string"` + + Versions []*ObjectVersion `locationName:"Version" type:"list" flattened:"true"` +} + +// String returns the string representation +func (s ListObjectVersionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListObjectVersionsOutput) GoString() string { + return s.String() +} + +type ListObjectsInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // A delimiter is a character you use to group keys. + Delimiter *string `location:"querystring" locationName:"delimiter" type:"string"` + + // Requests Amazon S3 to encode the object keys in the response and specifies + // the encoding method to use. An object key may contain any Unicode character; + // however, XML 1.0 parser cannot parse some characters, such as characters + // with an ASCII value from 0 to 10. For characters that are not supported in + // XML 1.0, you can add this parameter to request that Amazon S3 encode the + // keys in the response. + EncodingType *string `location:"querystring" locationName:"encoding-type" type:"string" enum:"EncodingType"` + + // Specifies the key to start with when listing objects in a bucket. + Marker *string `location:"querystring" locationName:"marker" type:"string"` + + // Sets the maximum number of keys returned in the response. The response might + // contain fewer keys but will never contain more. + MaxKeys *int64 `location:"querystring" locationName:"max-keys" type:"integer"` + + // Limits the response to keys that begin with the specified prefix. + Prefix *string `location:"querystring" locationName:"prefix" type:"string"` +} + +// String returns the string representation +func (s ListObjectsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListObjectsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListObjectsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListObjectsInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type ListObjectsOutput struct { + _ struct{} `type:"structure"` + + CommonPrefixes []*CommonPrefix `type:"list" flattened:"true"` + + Contents []*Object `type:"list" flattened:"true"` + + Delimiter *string `type:"string"` + + // Encoding type used by Amazon S3 to encode object keys in the response. + EncodingType *string `type:"string" enum:"EncodingType"` + + // A flag that indicates whether or not Amazon S3 returned all of the results + // that satisfied the search criteria. + IsTruncated *bool `type:"boolean"` + + Marker *string `type:"string"` + + MaxKeys *int64 `type:"integer"` + + Name *string `type:"string"` + + // When response is truncated (the IsTruncated element value in the response + // is true), you can use the key name in this field as marker in the subsequent + // request to get next set of objects. Amazon S3 lists objects in alphabetical + // order Note: This element is returned only if you have delimiter request parameter + // specified. If response does not include the NextMaker and it is truncated, + // you can use the value of the last Key in the response as the marker in the + // subsequent request to get the next set of object keys. + NextMarker *string `type:"string"` + + Prefix *string `type:"string"` +} + +// String returns the string representation +func (s ListObjectsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListObjectsOutput) GoString() string { + return s.String() +} + +type ListObjectsV2Input struct { + _ struct{} `type:"structure"` + + // Name of the bucket to list. + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // ContinuationToken indicates Amazon S3 that the list is being continued on + // this bucket with a token. ContinuationToken is obfuscated and is not a real + // key + ContinuationToken *string `location:"querystring" locationName:"continuation-token" type:"string"` + + // A delimiter is a character you use to group keys. + Delimiter *string `location:"querystring" locationName:"delimiter" type:"string"` + + // Encoding type used by Amazon S3 to encode object keys in the response. + EncodingType *string `location:"querystring" locationName:"encoding-type" type:"string" enum:"EncodingType"` + + // The owner field is not present in listV2 by default, if you want to return + // owner field with each key in the result then set the fetch owner field to + // true + FetchOwner *bool `location:"querystring" locationName:"fetch-owner" type:"boolean"` + + // Sets the maximum number of keys returned in the response. The response might + // contain fewer keys but will never contain more. + MaxKeys *int64 `location:"querystring" locationName:"max-keys" type:"integer"` + + // Limits the response to keys that begin with the specified prefix. + Prefix *string `location:"querystring" locationName:"prefix" type:"string"` + + // StartAfter is where you want Amazon S3 to start listing from. Amazon S3 starts + // listing after this specified key. StartAfter can be any key in the bucket + StartAfter *string `location:"querystring" locationName:"start-after" type:"string"` +} + +// String returns the string representation +func (s ListObjectsV2Input) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListObjectsV2Input) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListObjectsV2Input) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListObjectsV2Input"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type ListObjectsV2Output struct { + _ struct{} `type:"structure"` + + // CommonPrefixes contains all (if there are any) keys between Prefix and the + // next occurrence of the string specified by delimiter + CommonPrefixes []*CommonPrefix `type:"list" flattened:"true"` + + // Metadata about each object returned. + Contents []*Object `type:"list" flattened:"true"` + + // ContinuationToken indicates Amazon S3 that the list is being continued on + // this bucket with a token. ContinuationToken is obfuscated and is not a real + // key + ContinuationToken *string `type:"string"` + + // A delimiter is a character you use to group keys. + Delimiter *string `type:"string"` + + // Encoding type used by Amazon S3 to encode object keys in the response. + EncodingType *string `type:"string" enum:"EncodingType"` + + // A flag that indicates whether or not Amazon S3 returned all of the results + // that satisfied the search criteria. + IsTruncated *bool `type:"boolean"` + + // KeyCount is the number of keys returned with this request. KeyCount will + // always be less than equals to MaxKeys field. Say you ask for 50 keys, your + // result will include less than equals 50 keys + KeyCount *int64 `type:"integer"` + + // Sets the maximum number of keys returned in the response. The response might + // contain fewer keys but will never contain more. + MaxKeys *int64 `type:"integer"` + + // Name of the bucket to list. + Name *string `type:"string"` + + // NextContinuationToken is sent when isTruncated is true which means there + // are more keys in the bucket that can be listed. The next list requests to + // Amazon S3 can be continued with this NextContinuationToken. NextContinuationToken + // is obfuscated and is not a real key + NextContinuationToken *string `type:"string"` + + // Limits the response to keys that begin with the specified prefix. + Prefix *string `type:"string"` + + // StartAfter is where you want Amazon S3 to start listing from. Amazon S3 starts + // listing after this specified key. StartAfter can be any key in the bucket + StartAfter *string `type:"string"` +} + +// String returns the string representation +func (s ListObjectsV2Output) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListObjectsV2Output) GoString() string { + return s.String() +} + +type ListPartsInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Sets the maximum number of parts to return. + MaxParts *int64 `location:"querystring" locationName:"max-parts" type:"integer"` + + // Specifies the part after which listing should begin. Only parts with higher + // part numbers will be listed. + PartNumberMarker *int64 `location:"querystring" locationName:"part-number-marker" type:"integer"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // Upload ID identifying the multipart upload whose parts are being listed. + UploadId *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListPartsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListPartsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListPartsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListPartsInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.UploadId == nil { + invalidParams.Add(request.NewErrParamRequired("UploadId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type ListPartsOutput struct { + _ struct{} `type:"structure"` + + // Date when multipart upload will become eligible for abort operation by lifecycle. + AbortDate *time.Time `location:"header" locationName:"x-amz-abort-date" type:"timestamp" timestampFormat:"rfc822"` + + // Id of the lifecycle rule that makes a multipart upload eligible for abort + // operation. + AbortRuleId *string `location:"header" locationName:"x-amz-abort-rule-id" type:"string"` + + // Name of the bucket to which the multipart upload was initiated. + Bucket *string `type:"string"` + + // Identifies who initiated the multipart upload. + Initiator *Initiator `type:"structure"` + + // Indicates whether the returned list of parts is truncated. + IsTruncated *bool `type:"boolean"` + + // Object key for which the multipart upload was initiated. + Key *string `min:"1" type:"string"` + + // Maximum number of parts that were allowed in the response. + MaxParts *int64 `type:"integer"` + + // When a list is truncated, this element specifies the last part in the list, + // as well as the value to use for the part-number-marker request parameter + // in a subsequent request. + NextPartNumberMarker *int64 `type:"integer"` + + Owner *Owner `type:"structure"` + + // Part number after which listing begins. + PartNumberMarker *int64 `type:"integer"` + + Parts []*Part `locationName:"Part" type:"list" flattened:"true"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // The class of storage used to store the object. + StorageClass *string `type:"string" enum:"StorageClass"` + + // Upload ID identifying the multipart upload whose parts are being listed. + UploadId *string `type:"string"` +} + +// String returns the string representation +func (s ListPartsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListPartsOutput) GoString() string { + return s.String() +} + +type LoggingEnabled struct { + _ struct{} `type:"structure"` + + // Specifies the bucket where you want Amazon S3 to store server access logs. + // You can have your logs delivered to any bucket that you own, including the + // same bucket that is being logged. You can also configure multiple buckets + // to deliver their logs to the same target bucket. In this case you should + // choose a different TargetPrefix for each source bucket so that the delivered + // log files can be distinguished by key. + TargetBucket *string `type:"string"` + + TargetGrants []*TargetGrant `locationNameList:"Grant" type:"list"` + + // This element lets you specify a prefix for the keys that the log files will + // be stored under. + TargetPrefix *string `type:"string"` +} + +// String returns the string representation +func (s LoggingEnabled) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LoggingEnabled) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *LoggingEnabled) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "LoggingEnabled"} + if s.TargetGrants != nil { + for i, v := range s.TargetGrants { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "TargetGrants", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type MultipartUpload struct { + _ struct{} `type:"structure"` + + // Date and time at which the multipart upload was initiated. + Initiated *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // Identifies who initiated the multipart upload. + Initiator *Initiator `type:"structure"` + + // Key of the object for which the multipart upload was initiated. + Key *string `min:"1" type:"string"` + + Owner *Owner `type:"structure"` + + // The class of storage used to store the object. + StorageClass *string `type:"string" enum:"StorageClass"` + + // Upload ID that identifies the multipart upload. + UploadId *string `type:"string"` +} + +// String returns the string representation +func (s MultipartUpload) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MultipartUpload) GoString() string { + return s.String() +} + +// Specifies when noncurrent object versions expire. Upon expiration, Amazon +// S3 permanently deletes the noncurrent object versions. You set this lifecycle +// configuration action on a bucket that has versioning enabled (or suspended) +// to request that Amazon S3 delete noncurrent object versions at a specific +// period in the object's lifetime. +type NoncurrentVersionExpiration struct { + _ struct{} `type:"structure"` + + // Specifies the number of days an object is noncurrent before Amazon S3 can + // perform the associated action. For information about the noncurrent days + // calculations, see How Amazon S3 Calculates When an Object Became Noncurrent + // (http://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html) in + // the Amazon Simple Storage Service Developer Guide. + NoncurrentDays *int64 `type:"integer"` +} + +// String returns the string representation +func (s NoncurrentVersionExpiration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NoncurrentVersionExpiration) GoString() string { + return s.String() +} + +// Container for the transition rule that describes when noncurrent objects +// transition to the STANDARD_IA or GLACIER storage class. If your bucket is +// versioning-enabled (or versioning is suspended), you can set this action +// to request that Amazon S3 transition noncurrent object versions to the STANDARD_IA +// or GLACIER storage class at a specific period in the object's lifetime. +type NoncurrentVersionTransition struct { + _ struct{} `type:"structure"` + + // Specifies the number of days an object is noncurrent before Amazon S3 can + // perform the associated action. For information about the noncurrent days + // calculations, see How Amazon S3 Calculates When an Object Became Noncurrent + // (http://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html) in + // the Amazon Simple Storage Service Developer Guide. + NoncurrentDays *int64 `type:"integer"` + + // The class of storage used to store the object. + StorageClass *string `type:"string" enum:"TransitionStorageClass"` +} + +// String returns the string representation +func (s NoncurrentVersionTransition) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NoncurrentVersionTransition) GoString() string { + return s.String() +} + +// Container for specifying the notification configuration of the bucket. If +// this element is empty, notifications are turned off on the bucket. +type NotificationConfiguration struct { + _ struct{} `type:"structure"` + + LambdaFunctionConfigurations []*LambdaFunctionConfiguration `locationName:"CloudFunctionConfiguration" type:"list" flattened:"true"` + + QueueConfigurations []*QueueConfiguration `locationName:"QueueConfiguration" type:"list" flattened:"true"` + + TopicConfigurations []*TopicConfiguration `locationName:"TopicConfiguration" type:"list" flattened:"true"` +} + +// String returns the string representation +func (s NotificationConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NotificationConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *NotificationConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "NotificationConfiguration"} + if s.LambdaFunctionConfigurations != nil { + for i, v := range s.LambdaFunctionConfigurations { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "LambdaFunctionConfigurations", i), err.(request.ErrInvalidParams)) + } + } + } + if s.QueueConfigurations != nil { + for i, v := range s.QueueConfigurations { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "QueueConfigurations", i), err.(request.ErrInvalidParams)) + } + } + } + if s.TopicConfigurations != nil { + for i, v := range s.TopicConfigurations { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "TopicConfigurations", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type NotificationConfigurationDeprecated struct { + _ struct{} `type:"structure"` + + CloudFunctionConfiguration *CloudFunctionConfiguration `type:"structure"` + + QueueConfiguration *QueueConfigurationDeprecated `type:"structure"` + + TopicConfiguration *TopicConfigurationDeprecated `type:"structure"` +} + +// String returns the string representation +func (s NotificationConfigurationDeprecated) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NotificationConfigurationDeprecated) GoString() string { + return s.String() +} + +// Container for object key name filtering rules. For information about key +// name filtering, go to Configuring Event Notifications (http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) +// in the Amazon Simple Storage Service Developer Guide. +type NotificationConfigurationFilter struct { + _ struct{} `type:"structure"` + + // Container for object key name prefix and suffix filtering rules. + Key *KeyFilter `locationName:"S3Key" type:"structure"` +} + +// String returns the string representation +func (s NotificationConfigurationFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NotificationConfigurationFilter) GoString() string { + return s.String() +} + +type Object struct { + _ struct{} `type:"structure"` + + ETag *string `type:"string"` + + Key *string `min:"1" type:"string"` + + LastModified *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + Owner *Owner `type:"structure"` + + Size *int64 `type:"integer"` + + // The class of storage used to store the object. + StorageClass *string `type:"string" enum:"ObjectStorageClass"` +} + +// String returns the string representation +func (s Object) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Object) GoString() string { + return s.String() +} + +type ObjectIdentifier struct { + _ struct{} `type:"structure"` + + // Key name of the object to delete. + Key *string `min:"1" type:"string" required:"true"` + + // VersionId for the specific version of the object to delete. + VersionId *string `type:"string"` +} + +// String returns the string representation +func (s ObjectIdentifier) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ObjectIdentifier) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ObjectIdentifier) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ObjectIdentifier"} + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type ObjectVersion struct { + _ struct{} `type:"structure"` + + ETag *string `type:"string"` + + // Specifies whether the object is (true) or is not (false) the latest version + // of an object. + IsLatest *bool `type:"boolean"` + + // The object key. + Key *string `min:"1" type:"string"` + + // Date and time the object was last modified. + LastModified *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + Owner *Owner `type:"structure"` + + // Size in bytes of the object. + Size *int64 `type:"integer"` + + // The class of storage used to store the object. + StorageClass *string `type:"string" enum:"ObjectVersionStorageClass"` + + // Version ID of an object. + VersionId *string `type:"string"` +} + +// String returns the string representation +func (s ObjectVersion) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ObjectVersion) GoString() string { + return s.String() +} + +type Owner struct { + _ struct{} `type:"structure"` + + DisplayName *string `type:"string"` + + ID *string `type:"string"` +} + +// String returns the string representation +func (s Owner) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Owner) GoString() string { + return s.String() +} + +type Part struct { + _ struct{} `type:"structure"` + + // Entity tag returned when the part was uploaded. + ETag *string `type:"string"` + + // Date and time at which the part was uploaded. + LastModified *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // Part number identifying the part. This is a positive integer between 1 and + // 10,000. + PartNumber *int64 `type:"integer"` + + // Size of the uploaded part data. + Size *int64 `type:"integer"` +} + +// String returns the string representation +func (s Part) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Part) GoString() string { + return s.String() +} + +type PutBucketAccelerateConfigurationInput struct { + _ struct{} `type:"structure" payload:"AccelerateConfiguration"` + + // Specifies the Accelerate Configuration you want to set for the bucket. + AccelerateConfiguration *AccelerateConfiguration `locationName:"AccelerateConfiguration" type:"structure" required:"true"` + + // Name of the bucket for which the accelerate configuration is set. + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s PutBucketAccelerateConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketAccelerateConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketAccelerateConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketAccelerateConfigurationInput"} + if s.AccelerateConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("AccelerateConfiguration")) + } + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type PutBucketAccelerateConfigurationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketAccelerateConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketAccelerateConfigurationOutput) GoString() string { + return s.String() +} + +type PutBucketAclInput struct { + _ struct{} `type:"structure" payload:"AccessControlPolicy"` + + // The canned ACL to apply to the bucket. + ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"BucketCannedACL"` + + AccessControlPolicy *AccessControlPolicy `locationName:"AccessControlPolicy" type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Allows grantee the read, write, read ACP, and write ACP permissions on the + // bucket. + GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` + + // Allows grantee to list the objects in the bucket. + GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` + + // Allows grantee to read the bucket ACL. + GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` + + // Allows grantee to create, overwrite, and delete any object in the bucket. + GrantWrite *string `location:"header" locationName:"x-amz-grant-write" type:"string"` + + // Allows grantee to write the ACL for the applicable bucket. + GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` +} + +// String returns the string representation +func (s PutBucketAclInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketAclInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketAclInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketAclInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.AccessControlPolicy != nil { + if err := s.AccessControlPolicy.Validate(); err != nil { + invalidParams.AddNested("AccessControlPolicy", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type PutBucketAclOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketAclOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketAclOutput) GoString() string { + return s.String() +} + +type PutBucketCorsInput struct { + _ struct{} `type:"structure" payload:"CORSConfiguration"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + CORSConfiguration *CORSConfiguration `locationName:"CORSConfiguration" type:"structure" required:"true"` +} + +// String returns the string representation +func (s PutBucketCorsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketCorsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketCorsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketCorsInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.CORSConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("CORSConfiguration")) + } + if s.CORSConfiguration != nil { + if err := s.CORSConfiguration.Validate(); err != nil { + invalidParams.AddNested("CORSConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type PutBucketCorsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketCorsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketCorsOutput) GoString() string { + return s.String() +} + +type PutBucketLifecycleConfigurationInput struct { + _ struct{} `type:"structure" payload:"LifecycleConfiguration"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + LifecycleConfiguration *BucketLifecycleConfiguration `locationName:"LifecycleConfiguration" type:"structure"` +} + +// String returns the string representation +func (s PutBucketLifecycleConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketLifecycleConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketLifecycleConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketLifecycleConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.LifecycleConfiguration != nil { + if err := s.LifecycleConfiguration.Validate(); err != nil { + invalidParams.AddNested("LifecycleConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type PutBucketLifecycleConfigurationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketLifecycleConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketLifecycleConfigurationOutput) GoString() string { + return s.String() +} + +type PutBucketLifecycleInput struct { + _ struct{} `type:"structure" payload:"LifecycleConfiguration"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + LifecycleConfiguration *LifecycleConfiguration `locationName:"LifecycleConfiguration" type:"structure"` +} + +// String returns the string representation +func (s PutBucketLifecycleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketLifecycleInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketLifecycleInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketLifecycleInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.LifecycleConfiguration != nil { + if err := s.LifecycleConfiguration.Validate(); err != nil { + invalidParams.AddNested("LifecycleConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type PutBucketLifecycleOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketLifecycleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketLifecycleOutput) GoString() string { + return s.String() +} + +type PutBucketLoggingInput struct { + _ struct{} `type:"structure" payload:"BucketLoggingStatus"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + BucketLoggingStatus *BucketLoggingStatus `locationName:"BucketLoggingStatus" type:"structure" required:"true"` +} + +// String returns the string representation +func (s PutBucketLoggingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketLoggingInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketLoggingInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketLoggingInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.BucketLoggingStatus == nil { + invalidParams.Add(request.NewErrParamRequired("BucketLoggingStatus")) + } + if s.BucketLoggingStatus != nil { + if err := s.BucketLoggingStatus.Validate(); err != nil { + invalidParams.AddNested("BucketLoggingStatus", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type PutBucketLoggingOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketLoggingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketLoggingOutput) GoString() string { + return s.String() +} + +type PutBucketNotificationConfigurationInput struct { + _ struct{} `type:"structure" payload:"NotificationConfiguration"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Container for specifying the notification configuration of the bucket. If + // this element is empty, notifications are turned off on the bucket. + NotificationConfiguration *NotificationConfiguration `locationName:"NotificationConfiguration" type:"structure" required:"true"` +} + +// String returns the string representation +func (s PutBucketNotificationConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketNotificationConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketNotificationConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketNotificationConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.NotificationConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("NotificationConfiguration")) + } + if s.NotificationConfiguration != nil { + if err := s.NotificationConfiguration.Validate(); err != nil { + invalidParams.AddNested("NotificationConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type PutBucketNotificationConfigurationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketNotificationConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketNotificationConfigurationOutput) GoString() string { + return s.String() +} + +type PutBucketNotificationInput struct { + _ struct{} `type:"structure" payload:"NotificationConfiguration"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + NotificationConfiguration *NotificationConfigurationDeprecated `locationName:"NotificationConfiguration" type:"structure" required:"true"` +} + +// String returns the string representation +func (s PutBucketNotificationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketNotificationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketNotificationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketNotificationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.NotificationConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("NotificationConfiguration")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type PutBucketNotificationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketNotificationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketNotificationOutput) GoString() string { + return s.String() +} + +type PutBucketPolicyInput struct { + _ struct{} `type:"structure" payload:"Policy"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The bucket policy as a JSON document. + Policy *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s PutBucketPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketPolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketPolicyInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Policy == nil { + invalidParams.Add(request.NewErrParamRequired("Policy")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type PutBucketPolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketPolicyOutput) GoString() string { + return s.String() +} + +type PutBucketReplicationInput struct { + _ struct{} `type:"structure" payload:"ReplicationConfiguration"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Container for replication rules. You can add as many as 1,000 rules. Total + // replication configuration size can be up to 2 MB. + ReplicationConfiguration *ReplicationConfiguration `locationName:"ReplicationConfiguration" type:"structure" required:"true"` +} + +// String returns the string representation +func (s PutBucketReplicationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketReplicationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketReplicationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketReplicationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.ReplicationConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("ReplicationConfiguration")) + } + if s.ReplicationConfiguration != nil { + if err := s.ReplicationConfiguration.Validate(); err != nil { + invalidParams.AddNested("ReplicationConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type PutBucketReplicationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketReplicationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketReplicationOutput) GoString() string { + return s.String() +} + +type PutBucketRequestPaymentInput struct { + _ struct{} `type:"structure" payload:"RequestPaymentConfiguration"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + RequestPaymentConfiguration *RequestPaymentConfiguration `locationName:"RequestPaymentConfiguration" type:"structure" required:"true"` +} + +// String returns the string representation +func (s PutBucketRequestPaymentInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketRequestPaymentInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketRequestPaymentInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketRequestPaymentInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.RequestPaymentConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("RequestPaymentConfiguration")) + } + if s.RequestPaymentConfiguration != nil { + if err := s.RequestPaymentConfiguration.Validate(); err != nil { + invalidParams.AddNested("RequestPaymentConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type PutBucketRequestPaymentOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketRequestPaymentOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketRequestPaymentOutput) GoString() string { + return s.String() +} + +type PutBucketTaggingInput struct { + _ struct{} `type:"structure" payload:"Tagging"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + Tagging *Tagging `locationName:"Tagging" type:"structure" required:"true"` +} + +// String returns the string representation +func (s PutBucketTaggingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketTaggingInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketTaggingInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketTaggingInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Tagging == nil { + invalidParams.Add(request.NewErrParamRequired("Tagging")) + } + if s.Tagging != nil { + if err := s.Tagging.Validate(); err != nil { + invalidParams.AddNested("Tagging", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type PutBucketTaggingOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketTaggingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketTaggingOutput) GoString() string { + return s.String() +} + +type PutBucketVersioningInput struct { + _ struct{} `type:"structure" payload:"VersioningConfiguration"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The concatenation of the authentication device's serial number, a space, + // and the value that is displayed on your authentication device. + MFA *string `location:"header" locationName:"x-amz-mfa" type:"string"` + + VersioningConfiguration *VersioningConfiguration `locationName:"VersioningConfiguration" type:"structure" required:"true"` +} + +// String returns the string representation +func (s PutBucketVersioningInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketVersioningInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketVersioningInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketVersioningInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.VersioningConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("VersioningConfiguration")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type PutBucketVersioningOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketVersioningOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketVersioningOutput) GoString() string { + return s.String() +} + +type PutBucketWebsiteInput struct { + _ struct{} `type:"structure" payload:"WebsiteConfiguration"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + WebsiteConfiguration *WebsiteConfiguration `locationName:"WebsiteConfiguration" type:"structure" required:"true"` +} + +// String returns the string representation +func (s PutBucketWebsiteInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketWebsiteInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketWebsiteInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketWebsiteInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.WebsiteConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("WebsiteConfiguration")) + } + if s.WebsiteConfiguration != nil { + if err := s.WebsiteConfiguration.Validate(); err != nil { + invalidParams.AddNested("WebsiteConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type PutBucketWebsiteOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketWebsiteOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketWebsiteOutput) GoString() string { + return s.String() +} + +type PutObjectAclInput struct { + _ struct{} `type:"structure" payload:"AccessControlPolicy"` + + // The canned ACL to apply to the object. + ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"` + + AccessControlPolicy *AccessControlPolicy `locationName:"AccessControlPolicy" type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Allows grantee the read, write, read ACP, and write ACP permissions on the + // bucket. + GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` + + // Allows grantee to list the objects in the bucket. + GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` + + // Allows grantee to read the bucket ACL. + GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` + + // Allows grantee to create, overwrite, and delete any object in the bucket. + GrantWrite *string `location:"header" locationName:"x-amz-grant-write" type:"string"` + + // Allows grantee to write the ACL for the applicable bucket. + GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` + + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // VersionId used to reference a specific version of the object. + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` +} + +// String returns the string representation +func (s PutObjectAclInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutObjectAclInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutObjectAclInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutObjectAclInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.AccessControlPolicy != nil { + if err := s.AccessControlPolicy.Validate(); err != nil { + invalidParams.AddNested("AccessControlPolicy", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type PutObjectAclOutput struct { + _ struct{} `type:"structure"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` +} + +// String returns the string representation +func (s PutObjectAclOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutObjectAclOutput) GoString() string { + return s.String() +} + +type PutObjectInput struct { + _ struct{} `type:"structure" payload:"Body"` + + // The canned ACL to apply to the object. + ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"` + + // Object data. + Body io.ReadSeeker `type:"blob"` + + // Name of the bucket to which the PUT operation was initiated. + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Specifies caching behavior along the request/reply chain. + CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` + + // Specifies presentational information for the object. + ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"` + + // Specifies what content encodings have been applied to the object and thus + // what decoding mechanisms must be applied to obtain the media-type referenced + // by the Content-Type header field. + ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"` + + // The language the content is in. + ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"` + + // Size of the body in bytes. This parameter is useful when the size of the + // body cannot be determined automatically. + ContentLength *int64 `location:"header" locationName:"Content-Length" type:"long"` + + // A standard MIME type describing the format of the object data. + ContentType *string `location:"header" locationName:"Content-Type" type:"string"` + + // The date and time at which the object is no longer cacheable. + Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp" timestampFormat:"rfc822"` + + // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object. + GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` + + // Allows grantee to read the object data and its metadata. + GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` + + // Allows grantee to read the object ACL. + GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` + + // Allows grantee to write the ACL for the applicable object. + GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` + + // Object key for which the PUT operation was initiated. + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // A map of metadata to store with the object in S3. + Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // Specifies the algorithm to use to when encrypting the object (e.g., AES256). + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting + // data. This value is used to store the object and then it is discarded; Amazon + // does not store the encryption key. The key must be appropriate for use with + // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm + // header. + SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"` + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure the encryption + // key was transmitted without error. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // Specifies the AWS KMS key ID to use for object encryption. All GET and PUT + // requests for an object protected by AWS KMS will fail if not made via SSL + // or using SigV4. Documentation on configuring any of the officially supported + // AWS SDKs and CLI can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` + + // The Server-side encryption algorithm used when storing this object in S3 + // (e.g., AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + + // The type of storage to use for the object. Defaults to 'STANDARD'. + StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"` + + // If the bucket is configured as a website, redirects requests for this object + // to another object in the same bucket or to an external URL. Amazon S3 stores + // the value of this header in the object metadata. + WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"` +} + +// String returns the string representation +func (s PutObjectInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutObjectInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutObjectInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutObjectInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type PutObjectOutput struct { + _ struct{} `type:"structure"` + + // Entity tag for the uploaded object. + ETag *string `location:"header" locationName:"ETag" type:"string"` + + // If the object expiration is configured, this will contain the expiration + // date (expiry-date) and rule ID (rule-id). The value of rule-id is URL encoded. + Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header confirming the encryption algorithm + // used. + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header to provide round trip message integrity + // verification of the customer-provided encryption key. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // If present, specifies the ID of the AWS Key Management Service (KMS) master + // encryption key that was used for the object. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` + + // The Server-side encryption algorithm used when storing this object in S3 + // (e.g., AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + + // Version of the object. + VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` +} + +// String returns the string representation +func (s PutObjectOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutObjectOutput) GoString() string { + return s.String() +} + +// Container for specifying an configuration when you want Amazon S3 to publish +// events to an Amazon Simple Queue Service (Amazon SQS) queue. +type QueueConfiguration struct { + _ struct{} `type:"structure"` + + Events []*string `locationName:"Event" type:"list" flattened:"true" required:"true"` + + // Container for object key name filtering rules. For information about key + // name filtering, go to Configuring Event Notifications (http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) + // in the Amazon Simple Storage Service Developer Guide. + Filter *NotificationConfigurationFilter `type:"structure"` + + // Optional unique identifier for configurations in a notification configuration. + // If you don't provide one, Amazon S3 will assign an ID. + Id *string `type:"string"` + + // Amazon SQS queue ARN to which Amazon S3 will publish a message when it detects + // events of specified type. + QueueArn *string `locationName:"Queue" type:"string" required:"true"` +} + +// String returns the string representation +func (s QueueConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s QueueConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *QueueConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "QueueConfiguration"} + if s.Events == nil { + invalidParams.Add(request.NewErrParamRequired("Events")) + } + if s.QueueArn == nil { + invalidParams.Add(request.NewErrParamRequired("QueueArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type QueueConfigurationDeprecated struct { + _ struct{} `type:"structure"` + + // Bucket event for which to send notifications. + Event *string `deprecated:"true" type:"string" enum:"Event"` + + Events []*string `locationName:"Event" type:"list" flattened:"true"` + + // Optional unique identifier for configurations in a notification configuration. + // If you don't provide one, Amazon S3 will assign an ID. + Id *string `type:"string"` + + Queue *string `type:"string"` +} + +// String returns the string representation +func (s QueueConfigurationDeprecated) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s QueueConfigurationDeprecated) GoString() string { + return s.String() +} + +type Redirect struct { + _ struct{} `type:"structure"` + + // The host name to use in the redirect request. + HostName *string `type:"string"` + + // The HTTP redirect code to use on the response. Not required if one of the + // siblings is present. + HttpRedirectCode *string `type:"string"` + + // Protocol to use (http, https) when redirecting requests. The default is the + // protocol that is used in the original request. + Protocol *string `type:"string" enum:"Protocol"` + + // The object key prefix to use in the redirect request. For example, to redirect + // requests for all pages with prefix docs/ (objects in the docs/ folder) to + // documents/, you can set a condition block with KeyPrefixEquals set to docs/ + // and in the Redirect set ReplaceKeyPrefixWith to /documents. Not required + // if one of the siblings is present. Can be present only if ReplaceKeyWith + // is not provided. + ReplaceKeyPrefixWith *string `type:"string"` + + // The specific object key to use in the redirect request. For example, redirect + // request to error.html. Not required if one of the sibling is present. Can + // be present only if ReplaceKeyPrefixWith is not provided. + ReplaceKeyWith *string `type:"string"` +} + +// String returns the string representation +func (s Redirect) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Redirect) GoString() string { + return s.String() +} + +type RedirectAllRequestsTo struct { + _ struct{} `type:"structure"` + + // Name of the host where requests will be redirected. + HostName *string `type:"string" required:"true"` + + // Protocol to use (http, https) when redirecting requests. The default is the + // protocol that is used in the original request. + Protocol *string `type:"string" enum:"Protocol"` +} + +// String returns the string representation +func (s RedirectAllRequestsTo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RedirectAllRequestsTo) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RedirectAllRequestsTo) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RedirectAllRequestsTo"} + if s.HostName == nil { + invalidParams.Add(request.NewErrParamRequired("HostName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Container for replication rules. You can add as many as 1,000 rules. Total +// replication configuration size can be up to 2 MB. +type ReplicationConfiguration struct { + _ struct{} `type:"structure"` + + // Amazon Resource Name (ARN) of an IAM role for Amazon S3 to assume when replicating + // the objects. + Role *string `type:"string" required:"true"` + + // Container for information about a particular replication rule. Replication + // configuration must have at least one rule and can contain up to 1,000 rules. + Rules []*ReplicationRule `locationName:"Rule" type:"list" flattened:"true" required:"true"` +} + +// String returns the string representation +func (s ReplicationConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReplicationConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ReplicationConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ReplicationConfiguration"} + if s.Role == nil { + invalidParams.Add(request.NewErrParamRequired("Role")) + } + if s.Rules == nil { + invalidParams.Add(request.NewErrParamRequired("Rules")) + } + if s.Rules != nil { + for i, v := range s.Rules { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Rules", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type ReplicationRule struct { + _ struct{} `type:"structure"` + + Destination *Destination `type:"structure" required:"true"` + + // Unique identifier for the rule. The value cannot be longer than 255 characters. + ID *string `type:"string"` + + // Object keyname prefix identifying one or more objects to which the rule applies. + // Maximum prefix length can be up to 1,024 characters. Overlapping prefixes + // are not supported. + Prefix *string `type:"string" required:"true"` + + // The rule is ignored if status is not Enabled. + Status *string `type:"string" required:"true" enum:"ReplicationRuleStatus"` +} + +// String returns the string representation +func (s ReplicationRule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReplicationRule) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ReplicationRule) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ReplicationRule"} + if s.Destination == nil { + invalidParams.Add(request.NewErrParamRequired("Destination")) + } + if s.Prefix == nil { + invalidParams.Add(request.NewErrParamRequired("Prefix")) + } + if s.Status == nil { + invalidParams.Add(request.NewErrParamRequired("Status")) + } + if s.Destination != nil { + if err := s.Destination.Validate(); err != nil { + invalidParams.AddNested("Destination", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type RequestPaymentConfiguration struct { + _ struct{} `type:"structure"` + + // Specifies who pays for the download and request fees. + Payer *string `type:"string" required:"true" enum:"Payer"` +} + +// String returns the string representation +func (s RequestPaymentConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RequestPaymentConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RequestPaymentConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RequestPaymentConfiguration"} + if s.Payer == nil { + invalidParams.Add(request.NewErrParamRequired("Payer")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type RestoreObjectInput struct { + _ struct{} `type:"structure" payload:"RestoreRequest"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + RestoreRequest *RestoreRequest `locationName:"RestoreRequest" type:"structure"` + + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` +} + +// String returns the string representation +func (s RestoreObjectInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RestoreObjectInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RestoreObjectInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RestoreObjectInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.RestoreRequest != nil { + if err := s.RestoreRequest.Validate(); err != nil { + invalidParams.AddNested("RestoreRequest", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type RestoreObjectOutput struct { + _ struct{} `type:"structure"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` +} + +// String returns the string representation +func (s RestoreObjectOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RestoreObjectOutput) GoString() string { + return s.String() +} + +type RestoreRequest struct { + _ struct{} `type:"structure"` + + // Lifetime of the active copy in days + Days *int64 `type:"integer" required:"true"` +} + +// String returns the string representation +func (s RestoreRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RestoreRequest) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RestoreRequest) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RestoreRequest"} + if s.Days == nil { + invalidParams.Add(request.NewErrParamRequired("Days")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type RoutingRule struct { + _ struct{} `type:"structure"` + + // A container for describing a condition that must be met for the specified + // redirect to apply. For example, 1. If request is for pages in the /docs folder, + // redirect to the /documents folder. 2. If request results in HTTP error 4xx, + // redirect request to another host where you might process the error. + Condition *Condition `type:"structure"` + + // Container for redirect information. You can redirect requests to another + // host, to another page, or with another protocol. In the event of an error, + // you can can specify a different error code to return. + Redirect *Redirect `type:"structure" required:"true"` +} + +// String returns the string representation +func (s RoutingRule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RoutingRule) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RoutingRule) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RoutingRule"} + if s.Redirect == nil { + invalidParams.Add(request.NewErrParamRequired("Redirect")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type Rule struct { + _ struct{} `type:"structure"` + + // Specifies the days since the initiation of an Incomplete Multipart Upload + // that Lifecycle will wait before permanently removing all parts of the upload. + AbortIncompleteMultipartUpload *AbortIncompleteMultipartUpload `type:"structure"` + + Expiration *LifecycleExpiration `type:"structure"` + + // Unique identifier for the rule. The value cannot be longer than 255 characters. + ID *string `type:"string"` + + // Specifies when noncurrent object versions expire. Upon expiration, Amazon + // S3 permanently deletes the noncurrent object versions. You set this lifecycle + // configuration action on a bucket that has versioning enabled (or suspended) + // to request that Amazon S3 delete noncurrent object versions at a specific + // period in the object's lifetime. + NoncurrentVersionExpiration *NoncurrentVersionExpiration `type:"structure"` + + // Container for the transition rule that describes when noncurrent objects + // transition to the STANDARD_IA or GLACIER storage class. If your bucket is + // versioning-enabled (or versioning is suspended), you can set this action + // to request that Amazon S3 transition noncurrent object versions to the STANDARD_IA + // or GLACIER storage class at a specific period in the object's lifetime. + NoncurrentVersionTransition *NoncurrentVersionTransition `type:"structure"` + + // Prefix identifying one or more objects to which the rule applies. + Prefix *string `type:"string" required:"true"` + + // If 'Enabled', the rule is currently being applied. If 'Disabled', the rule + // is not currently being applied. + Status *string `type:"string" required:"true" enum:"ExpirationStatus"` + + Transition *Transition `type:"structure"` +} + +// String returns the string representation +func (s Rule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Rule) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Rule) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Rule"} + if s.Prefix == nil { + invalidParams.Add(request.NewErrParamRequired("Prefix")) + } + if s.Status == nil { + invalidParams.Add(request.NewErrParamRequired("Status")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type Tag struct { + _ struct{} `type:"structure"` + + // Name of the tag. + Key *string `min:"1" type:"string" required:"true"` + + // Value of the tag. + Value *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s Tag) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Tag) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Tag) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Tag"} + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.Value == nil { + invalidParams.Add(request.NewErrParamRequired("Value")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type Tagging struct { + _ struct{} `type:"structure"` + + TagSet []*Tag `locationNameList:"Tag" type:"list" required:"true"` +} + +// String returns the string representation +func (s Tagging) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Tagging) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Tagging) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Tagging"} + if s.TagSet == nil { + invalidParams.Add(request.NewErrParamRequired("TagSet")) + } + if s.TagSet != nil { + for i, v := range s.TagSet { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "TagSet", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type TargetGrant struct { + _ struct{} `type:"structure"` + + Grantee *Grantee `type:"structure"` + + // Logging permissions assigned to the Grantee for the bucket. + Permission *string `type:"string" enum:"BucketLogsPermission"` +} + +// String returns the string representation +func (s TargetGrant) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TargetGrant) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TargetGrant) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TargetGrant"} + if s.Grantee != nil { + if err := s.Grantee.Validate(); err != nil { + invalidParams.AddNested("Grantee", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Container for specifying the configuration when you want Amazon S3 to publish +// events to an Amazon Simple Notification Service (Amazon SNS) topic. +type TopicConfiguration struct { + _ struct{} `type:"structure"` + + Events []*string `locationName:"Event" type:"list" flattened:"true" required:"true"` + + // Container for object key name filtering rules. For information about key + // name filtering, go to Configuring Event Notifications (http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) + // in the Amazon Simple Storage Service Developer Guide. + Filter *NotificationConfigurationFilter `type:"structure"` + + // Optional unique identifier for configurations in a notification configuration. + // If you don't provide one, Amazon S3 will assign an ID. + Id *string `type:"string"` + + // Amazon SNS topic ARN to which Amazon S3 will publish a message when it detects + // events of specified type. + TopicArn *string `locationName:"Topic" type:"string" required:"true"` +} + +// String returns the string representation +func (s TopicConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TopicConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TopicConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TopicConfiguration"} + if s.Events == nil { + invalidParams.Add(request.NewErrParamRequired("Events")) + } + if s.TopicArn == nil { + invalidParams.Add(request.NewErrParamRequired("TopicArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type TopicConfigurationDeprecated struct { + _ struct{} `type:"structure"` + + // Bucket event for which to send notifications. + Event *string `deprecated:"true" type:"string" enum:"Event"` + + Events []*string `locationName:"Event" type:"list" flattened:"true"` + + // Optional unique identifier for configurations in a notification configuration. + // If you don't provide one, Amazon S3 will assign an ID. + Id *string `type:"string"` + + // Amazon SNS topic to which Amazon S3 will publish a message to report the + // specified events for the bucket. + Topic *string `type:"string"` +} + +// String returns the string representation +func (s TopicConfigurationDeprecated) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TopicConfigurationDeprecated) GoString() string { + return s.String() +} + +type Transition struct { + _ struct{} `type:"structure"` + + // Indicates at what date the object is to be moved or deleted. Should be in + // GMT ISO 8601 Format. + Date *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // Indicates the lifetime, in days, of the objects that are subject to the rule. + // The value must be a non-zero positive integer. + Days *int64 `type:"integer"` + + // The class of storage used to store the object. + StorageClass *string `type:"string" enum:"TransitionStorageClass"` +} + +// String returns the string representation +func (s Transition) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Transition) GoString() string { + return s.String() +} + +type UploadPartCopyInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The name of the source bucket and key name of the source object, separated + // by a slash (/). Must be URL-encoded. + CopySource *string `location:"header" locationName:"x-amz-copy-source" type:"string" required:"true"` + + // Copies the object if its entity tag (ETag) matches the specified tag. + CopySourceIfMatch *string `location:"header" locationName:"x-amz-copy-source-if-match" type:"string"` + + // Copies the object if it has been modified since the specified time. + CopySourceIfModifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-modified-since" type:"timestamp" timestampFormat:"rfc822"` + + // Copies the object if its entity tag (ETag) is different than the specified + // ETag. + CopySourceIfNoneMatch *string `location:"header" locationName:"x-amz-copy-source-if-none-match" type:"string"` + + // Copies the object if it hasn't been modified since the specified time. + CopySourceIfUnmodifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-unmodified-since" type:"timestamp" timestampFormat:"rfc822"` + + // The range of bytes to copy from the source object. The range value must use + // the form bytes=first-last, where the first and last are the zero-based byte + // offsets to copy. For example, bytes=0-9 indicates that you want to copy the + // first ten bytes of the source. You can copy a range only if the source object + // is greater than 5 GB. + CopySourceRange *string `location:"header" locationName:"x-amz-copy-source-range" type:"string"` + + // Specifies the algorithm to use when decrypting the source object (e.g., AES256). + CopySourceSSECustomerAlgorithm *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 to use to decrypt + // the source object. The encryption key provided in this header must be one + // that was used when the source object was created. + CopySourceSSECustomerKey *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key" type:"string"` + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure the encryption + // key was transmitted without error. + CopySourceSSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key-MD5" type:"string"` + + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Part number of part being copied. This is a positive integer between 1 and + // 10,000. + PartNumber *int64 `location:"querystring" locationName:"partNumber" type:"integer" required:"true"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // Specifies the algorithm to use to when encrypting the object (e.g., AES256). + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting + // data. This value is used to store the object and then it is discarded; Amazon + // does not store the encryption key. The key must be appropriate for use with + // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm + // header. This must be the same encryption key specified in the initiate multipart + // upload request. + SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"` + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure the encryption + // key was transmitted without error. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // Upload ID identifying the multipart upload whose part is being copied. + UploadId *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"` +} + +// String returns the string representation +func (s UploadPartCopyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UploadPartCopyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UploadPartCopyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UploadPartCopyInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.CopySource == nil { + invalidParams.Add(request.NewErrParamRequired("CopySource")) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.PartNumber == nil { + invalidParams.Add(request.NewErrParamRequired("PartNumber")) + } + if s.UploadId == nil { + invalidParams.Add(request.NewErrParamRequired("UploadId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type UploadPartCopyOutput struct { + _ struct{} `type:"structure" payload:"CopyPartResult"` + + CopyPartResult *CopyPartResult `type:"structure"` + + // The version of the source object that was copied, if you have enabled versioning + // on the source bucket. + CopySourceVersionId *string `location:"header" locationName:"x-amz-copy-source-version-id" type:"string"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header confirming the encryption algorithm + // used. + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header to provide round trip message integrity + // verification of the customer-provided encryption key. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // If present, specifies the ID of the AWS Key Management Service (KMS) master + // encryption key that was used for the object. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` + + // The Server-side encryption algorithm used when storing this object in S3 + // (e.g., AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` +} + +// String returns the string representation +func (s UploadPartCopyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UploadPartCopyOutput) GoString() string { + return s.String() +} + +type UploadPartInput struct { + _ struct{} `type:"structure" payload:"Body"` + + // Object data. + Body io.ReadSeeker `type:"blob"` + + // Name of the bucket to which the multipart upload was initiated. + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Size of the body in bytes. This parameter is useful when the size of the + // body cannot be determined automatically. + ContentLength *int64 `location:"header" locationName:"Content-Length" type:"long"` + + // Object key for which the multipart upload was initiated. + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Part number of part being uploaded. This is a positive integer between 1 + // and 10,000. + PartNumber *int64 `location:"querystring" locationName:"partNumber" type:"integer" required:"true"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // Specifies the algorithm to use to when encrypting the object (e.g., AES256). + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting + // data. This value is used to store the object and then it is discarded; Amazon + // does not store the encryption key. The key must be appropriate for use with + // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm + // header. This must be the same encryption key specified in the initiate multipart + // upload request. + SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"` + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure the encryption + // key was transmitted without error. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // Upload ID identifying the multipart upload whose part is being uploaded. + UploadId *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"` +} + +// String returns the string representation +func (s UploadPartInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UploadPartInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UploadPartInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UploadPartInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.PartNumber == nil { + invalidParams.Add(request.NewErrParamRequired("PartNumber")) + } + if s.UploadId == nil { + invalidParams.Add(request.NewErrParamRequired("UploadId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type UploadPartOutput struct { + _ struct{} `type:"structure"` + + // Entity tag for the uploaded object. + ETag *string `location:"header" locationName:"ETag" type:"string"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header confirming the encryption algorithm + // used. + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header to provide round trip message integrity + // verification of the customer-provided encryption key. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // If present, specifies the ID of the AWS Key Management Service (KMS) master + // encryption key that was used for the object. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` + + // The Server-side encryption algorithm used when storing this object in S3 + // (e.g., AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` +} + +// String returns the string representation +func (s UploadPartOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UploadPartOutput) GoString() string { + return s.String() +} + +type VersioningConfiguration struct { + _ struct{} `type:"structure"` + + // Specifies whether MFA delete is enabled in the bucket versioning configuration. + // This element is only returned if the bucket has been configured with MFA + // delete. If the bucket has never been so configured, this element is not returned. + MFADelete *string `locationName:"MfaDelete" type:"string" enum:"MFADelete"` + + // The versioning state of the bucket. + Status *string `type:"string" enum:"BucketVersioningStatus"` +} + +// String returns the string representation +func (s VersioningConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VersioningConfiguration) GoString() string { + return s.String() +} + +type WebsiteConfiguration struct { + _ struct{} `type:"structure"` + + ErrorDocument *ErrorDocument `type:"structure"` + + IndexDocument *IndexDocument `type:"structure"` + + RedirectAllRequestsTo *RedirectAllRequestsTo `type:"structure"` + + RoutingRules []*RoutingRule `locationNameList:"RoutingRule" type:"list"` +} + +// String returns the string representation +func (s WebsiteConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s WebsiteConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *WebsiteConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "WebsiteConfiguration"} + if s.ErrorDocument != nil { + if err := s.ErrorDocument.Validate(); err != nil { + invalidParams.AddNested("ErrorDocument", err.(request.ErrInvalidParams)) + } + } + if s.IndexDocument != nil { + if err := s.IndexDocument.Validate(); err != nil { + invalidParams.AddNested("IndexDocument", err.(request.ErrInvalidParams)) + } + } + if s.RedirectAllRequestsTo != nil { + if err := s.RedirectAllRequestsTo.Validate(); err != nil { + invalidParams.AddNested("RedirectAllRequestsTo", err.(request.ErrInvalidParams)) + } + } + if s.RoutingRules != nil { + for i, v := range s.RoutingRules { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "RoutingRules", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +const ( + // @enum BucketAccelerateStatus + BucketAccelerateStatusEnabled = "Enabled" + // @enum BucketAccelerateStatus + BucketAccelerateStatusSuspended = "Suspended" +) + +const ( + // @enum BucketCannedACL + BucketCannedACLPrivate = "private" + // @enum BucketCannedACL + BucketCannedACLPublicRead = "public-read" + // @enum BucketCannedACL + BucketCannedACLPublicReadWrite = "public-read-write" + // @enum BucketCannedACL + BucketCannedACLAuthenticatedRead = "authenticated-read" +) + +const ( + // @enum BucketLocationConstraint + BucketLocationConstraintEu = "EU" + // @enum BucketLocationConstraint + BucketLocationConstraintEuWest1 = "eu-west-1" + // @enum BucketLocationConstraint + BucketLocationConstraintUsWest1 = "us-west-1" + // @enum BucketLocationConstraint + BucketLocationConstraintUsWest2 = "us-west-2" + // @enum BucketLocationConstraint + BucketLocationConstraintApSouth1 = "ap-south-1" + // @enum BucketLocationConstraint + BucketLocationConstraintApSoutheast1 = "ap-southeast-1" + // @enum BucketLocationConstraint + BucketLocationConstraintApSoutheast2 = "ap-southeast-2" + // @enum BucketLocationConstraint + BucketLocationConstraintApNortheast1 = "ap-northeast-1" + // @enum BucketLocationConstraint + BucketLocationConstraintSaEast1 = "sa-east-1" + // @enum BucketLocationConstraint + BucketLocationConstraintCnNorth1 = "cn-north-1" + // @enum BucketLocationConstraint + BucketLocationConstraintEuCentral1 = "eu-central-1" +) + +const ( + // @enum BucketLogsPermission + BucketLogsPermissionFullControl = "FULL_CONTROL" + // @enum BucketLogsPermission + BucketLogsPermissionRead = "READ" + // @enum BucketLogsPermission + BucketLogsPermissionWrite = "WRITE" +) + +const ( + // @enum BucketVersioningStatus + BucketVersioningStatusEnabled = "Enabled" + // @enum BucketVersioningStatus + BucketVersioningStatusSuspended = "Suspended" +) + +// Requests Amazon S3 to encode the object keys in the response and specifies +// the encoding method to use. An object key may contain any Unicode character; +// however, XML 1.0 parser cannot parse some characters, such as characters +// with an ASCII value from 0 to 10. For characters that are not supported in +// XML 1.0, you can add this parameter to request that Amazon S3 encode the +// keys in the response. +const ( + // @enum EncodingType + EncodingTypeUrl = "url" +) + +// Bucket event for which to send notifications. +const ( + // @enum Event + EventS3ReducedRedundancyLostObject = "s3:ReducedRedundancyLostObject" + // @enum Event + EventS3ObjectCreated = "s3:ObjectCreated:*" + // @enum Event + EventS3ObjectCreatedPut = "s3:ObjectCreated:Put" + // @enum Event + EventS3ObjectCreatedPost = "s3:ObjectCreated:Post" + // @enum Event + EventS3ObjectCreatedCopy = "s3:ObjectCreated:Copy" + // @enum Event + EventS3ObjectCreatedCompleteMultipartUpload = "s3:ObjectCreated:CompleteMultipartUpload" + // @enum Event + EventS3ObjectRemoved = "s3:ObjectRemoved:*" + // @enum Event + EventS3ObjectRemovedDelete = "s3:ObjectRemoved:Delete" + // @enum Event + EventS3ObjectRemovedDeleteMarkerCreated = "s3:ObjectRemoved:DeleteMarkerCreated" +) + +const ( + // @enum ExpirationStatus + ExpirationStatusEnabled = "Enabled" + // @enum ExpirationStatus + ExpirationStatusDisabled = "Disabled" +) + +const ( + // @enum FilterRuleName + FilterRuleNamePrefix = "prefix" + // @enum FilterRuleName + FilterRuleNameSuffix = "suffix" +) + +const ( + // @enum MFADelete + MFADeleteEnabled = "Enabled" + // @enum MFADelete + MFADeleteDisabled = "Disabled" +) + +const ( + // @enum MFADeleteStatus + MFADeleteStatusEnabled = "Enabled" + // @enum MFADeleteStatus + MFADeleteStatusDisabled = "Disabled" +) + +const ( + // @enum MetadataDirective + MetadataDirectiveCopy = "COPY" + // @enum MetadataDirective + MetadataDirectiveReplace = "REPLACE" +) + +const ( + // @enum ObjectCannedACL + ObjectCannedACLPrivate = "private" + // @enum ObjectCannedACL + ObjectCannedACLPublicRead = "public-read" + // @enum ObjectCannedACL + ObjectCannedACLPublicReadWrite = "public-read-write" + // @enum ObjectCannedACL + ObjectCannedACLAuthenticatedRead = "authenticated-read" + // @enum ObjectCannedACL + ObjectCannedACLAwsExecRead = "aws-exec-read" + // @enum ObjectCannedACL + ObjectCannedACLBucketOwnerRead = "bucket-owner-read" + // @enum ObjectCannedACL + ObjectCannedACLBucketOwnerFullControl = "bucket-owner-full-control" +) + +const ( + // @enum ObjectStorageClass + ObjectStorageClassStandard = "STANDARD" + // @enum ObjectStorageClass + ObjectStorageClassReducedRedundancy = "REDUCED_REDUNDANCY" + // @enum ObjectStorageClass + ObjectStorageClassGlacier = "GLACIER" +) + +const ( + // @enum ObjectVersionStorageClass + ObjectVersionStorageClassStandard = "STANDARD" +) + +const ( + // @enum Payer + PayerRequester = "Requester" + // @enum Payer + PayerBucketOwner = "BucketOwner" +) + +const ( + // @enum Permission + PermissionFullControl = "FULL_CONTROL" + // @enum Permission + PermissionWrite = "WRITE" + // @enum Permission + PermissionWriteAcp = "WRITE_ACP" + // @enum Permission + PermissionRead = "READ" + // @enum Permission + PermissionReadAcp = "READ_ACP" +) + +const ( + // @enum Protocol + ProtocolHttp = "http" + // @enum Protocol + ProtocolHttps = "https" +) + +const ( + // @enum ReplicationRuleStatus + ReplicationRuleStatusEnabled = "Enabled" + // @enum ReplicationRuleStatus + ReplicationRuleStatusDisabled = "Disabled" +) + +const ( + // @enum ReplicationStatus + ReplicationStatusComplete = "COMPLETE" + // @enum ReplicationStatus + ReplicationStatusPending = "PENDING" + // @enum ReplicationStatus + ReplicationStatusFailed = "FAILED" + // @enum ReplicationStatus + ReplicationStatusReplica = "REPLICA" +) + +// If present, indicates that the requester was successfully charged for the +// request. +const ( + // @enum RequestCharged + RequestChargedRequester = "requester" +) + +// Confirms that the requester knows that she or he will be charged for the +// request. Bucket owners need not specify this parameter in their requests. +// Documentation on downloading objects from requester pays buckets can be found +// at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html +const ( + // @enum RequestPayer + RequestPayerRequester = "requester" +) + +const ( + // @enum ServerSideEncryption + ServerSideEncryptionAes256 = "AES256" + // @enum ServerSideEncryption + ServerSideEncryptionAwsKms = "aws:kms" +) + +const ( + // @enum StorageClass + StorageClassStandard = "STANDARD" + // @enum StorageClass + StorageClassReducedRedundancy = "REDUCED_REDUNDANCY" + // @enum StorageClass + StorageClassStandardIa = "STANDARD_IA" +) + +const ( + // @enum TransitionStorageClass + TransitionStorageClassGlacier = "GLACIER" + // @enum TransitionStorageClass + TransitionStorageClassStandardIa = "STANDARD_IA" +) + +const ( + // @enum Type + TypeCanonicalUser = "CanonicalUser" + // @enum Type + TypeAmazonCustomerByEmail = "AmazonCustomerByEmail" + // @enum Type + TypeGroup = "Group" +) diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/bucket_location.go b/vendor/github.com/aws/aws-sdk-go/service/s3/bucket_location.go new file mode 100644 index 000000000..c3a2702da --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/bucket_location.go @@ -0,0 +1,43 @@ +package s3 + +import ( + "io/ioutil" + "regexp" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" +) + +var reBucketLocation = regexp.MustCompile(`>([^<>]+)<\/Location`) + +func buildGetBucketLocation(r *request.Request) { + if r.DataFilled() { + out := r.Data.(*GetBucketLocationOutput) + b, err := ioutil.ReadAll(r.HTTPResponse.Body) + if err != nil { + r.Error = awserr.New("SerializationError", "failed reading response body", err) + return + } + + match := reBucketLocation.FindSubmatch(b) + if len(match) > 1 { + loc := string(match[1]) + out.LocationConstraint = &loc + } + } +} + +func populateLocationConstraint(r *request.Request) { + if r.ParamsFilled() && aws.StringValue(r.Config.Region) != "us-east-1" { + in := r.Params.(*CreateBucketInput) + if in.CreateBucketConfiguration == nil { + r.Params = awsutil.CopyOf(r.Params) + in = r.Params.(*CreateBucketInput) + in.CreateBucketConfiguration = &CreateBucketConfiguration{ + LocationConstraint: r.Config.Region, + } + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/bucket_location_test.go b/vendor/github.com/aws/aws-sdk-go/service/s3/bucket_location_test.go new file mode 100644 index 000000000..8ef61b0e5 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/bucket_location_test.go @@ -0,0 +1,78 @@ +package s3_test + +import ( + "bytes" + "io/ioutil" + "net/http" + "testing" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/awstesting/unit" + "github.com/aws/aws-sdk-go/service/s3" + "github.com/stretchr/testify/assert" +) + +var s3LocationTests = []struct { + body string + loc string +}{ + {``, ``}, + {`EU`, `EU`}, +} + +func TestGetBucketLocation(t *testing.T) { + for _, test := range s3LocationTests { + s := s3.New(unit.Session) + s.Handlers.Send.Clear() + s.Handlers.Send.PushBack(func(r *request.Request) { + reader := ioutil.NopCloser(bytes.NewReader([]byte(test.body))) + r.HTTPResponse = &http.Response{StatusCode: 200, Body: reader} + }) + + resp, err := s.GetBucketLocation(&s3.GetBucketLocationInput{Bucket: aws.String("bucket")}) + assert.NoError(t, err) + if test.loc == "" { + assert.Nil(t, resp.LocationConstraint) + } else { + assert.Equal(t, test.loc, *resp.LocationConstraint) + } + } +} + +func TestPopulateLocationConstraint(t *testing.T) { + s := s3.New(unit.Session) + in := &s3.CreateBucketInput{ + Bucket: aws.String("bucket"), + } + req, _ := s.CreateBucketRequest(in) + err := req.Build() + assert.NoError(t, err) + v, _ := awsutil.ValuesAtPath(req.Params, "CreateBucketConfiguration.LocationConstraint") + assert.Equal(t, "mock-region", *(v[0].(*string))) + assert.Nil(t, in.CreateBucketConfiguration) // don't modify original params +} + +func TestNoPopulateLocationConstraintIfProvided(t *testing.T) { + s := s3.New(unit.Session) + req, _ := s.CreateBucketRequest(&s3.CreateBucketInput{ + Bucket: aws.String("bucket"), + CreateBucketConfiguration: &s3.CreateBucketConfiguration{}, + }) + err := req.Build() + assert.NoError(t, err) + v, _ := awsutil.ValuesAtPath(req.Params, "CreateBucketConfiguration.LocationConstraint") + assert.Equal(t, 0, len(v)) +} + +func TestNoPopulateLocationConstraintIfClassic(t *testing.T) { + s := s3.New(unit.Session, &aws.Config{Region: aws.String("us-east-1")}) + req, _ := s.CreateBucketRequest(&s3.CreateBucketInput{ + Bucket: aws.String("bucket"), + }) + err := req.Build() + assert.NoError(t, err) + v, _ := awsutil.ValuesAtPath(req.Params, "CreateBucketConfiguration.LocationConstraint") + assert.Equal(t, 0, len(v)) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/content_md5.go b/vendor/github.com/aws/aws-sdk-go/service/s3/content_md5.go new file mode 100644 index 000000000..9fc5df94d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/content_md5.go @@ -0,0 +1,36 @@ +package s3 + +import ( + "crypto/md5" + "encoding/base64" + "io" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" +) + +// contentMD5 computes and sets the HTTP Content-MD5 header for requests that +// require it. +func contentMD5(r *request.Request) { + h := md5.New() + + // hash the body. seek back to the first position after reading to reset + // the body for transmission. copy errors may be assumed to be from the + // body. + _, err := io.Copy(h, r.Body) + if err != nil { + r.Error = awserr.New("ContentMD5", "failed to read body", err) + return + } + _, err = r.Body.Seek(0, 0) + if err != nil { + r.Error = awserr.New("ContentMD5", "failed to seek body", err) + return + } + + // encode the md5 checksum in base64 and set the request header. + sum := h.Sum(nil) + sum64 := make([]byte, base64.StdEncoding.EncodedLen(len(sum))) + base64.StdEncoding.Encode(sum64, sum) + r.HTTPRequest.Header.Set("Content-MD5", string(sum64)) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go b/vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go new file mode 100644 index 000000000..846334723 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go @@ -0,0 +1,46 @@ +package s3 + +import ( + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/request" +) + +func init() { + initClient = defaultInitClientFn + initRequest = defaultInitRequestFn +} + +func defaultInitClientFn(c *client.Client) { + // Support building custom endpoints based on config + c.Handlers.Build.PushFront(updateEndpointForS3Config) + + // Require SSL when using SSE keys + c.Handlers.Validate.PushBack(validateSSERequiresSSL) + c.Handlers.Build.PushBack(computeSSEKeys) + + // S3 uses custom error unmarshaling logic + c.Handlers.UnmarshalError.Clear() + c.Handlers.UnmarshalError.PushBack(unmarshalError) +} + +func defaultInitRequestFn(r *request.Request) { + // Add reuest handlers for specific platforms. + // e.g. 100-continue support for PUT requests using Go 1.6 + platformRequestHandlers(r) + + switch r.Operation.Name { + case opPutBucketCors, opPutBucketLifecycle, opPutBucketPolicy, + opPutBucketTagging, opDeleteObjects, opPutBucketLifecycleConfiguration, + opPutBucketReplication: + // These S3 operations require Content-MD5 to be set + r.Handlers.Build.PushBack(contentMD5) + case opGetBucketLocation: + // GetBucketLocation has custom parsing logic + r.Handlers.Unmarshal.PushFront(buildGetBucketLocation) + case opCreateBucket: + // Auto-populate LocationConstraint with current region + r.Handlers.Validate.PushFront(populateLocationConstraint) + case opCopyObject, opUploadPartCopy, opCompleteMultipartUpload: + r.Handlers.Unmarshal.PushFront(copyMultipartStatusOKUnmarhsalError) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/customizations_test.go b/vendor/github.com/aws/aws-sdk-go/service/s3/customizations_test.go new file mode 100644 index 000000000..20a62d7ac --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/customizations_test.go @@ -0,0 +1,105 @@ +package s3_test + +import ( + "crypto/md5" + "encoding/base64" + "io/ioutil" + "testing" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/awstesting/unit" + "github.com/aws/aws-sdk-go/service/s3" + "github.com/stretchr/testify/assert" +) + +func assertMD5(t *testing.T, req *request.Request) { + err := req.Build() + assert.NoError(t, err) + + b, _ := ioutil.ReadAll(req.HTTPRequest.Body) + out := md5.Sum(b) + assert.NotEmpty(t, b) + assert.Equal(t, base64.StdEncoding.EncodeToString(out[:]), req.HTTPRequest.Header.Get("Content-MD5")) +} + +func TestMD5InPutBucketCors(t *testing.T) { + svc := s3.New(unit.Session) + req, _ := svc.PutBucketCorsRequest(&s3.PutBucketCorsInput{ + Bucket: aws.String("bucketname"), + CORSConfiguration: &s3.CORSConfiguration{ + CORSRules: []*s3.CORSRule{ + { + AllowedMethods: []*string{aws.String("GET")}, + AllowedOrigins: []*string{aws.String("*")}, + }, + }, + }, + }) + assertMD5(t, req) +} + +func TestMD5InPutBucketLifecycle(t *testing.T) { + svc := s3.New(unit.Session) + req, _ := svc.PutBucketLifecycleRequest(&s3.PutBucketLifecycleInput{ + Bucket: aws.String("bucketname"), + LifecycleConfiguration: &s3.LifecycleConfiguration{ + Rules: []*s3.Rule{ + { + ID: aws.String("ID"), + Prefix: aws.String("Prefix"), + Status: aws.String("Enabled"), + }, + }, + }, + }) + assertMD5(t, req) +} + +func TestMD5InPutBucketPolicy(t *testing.T) { + svc := s3.New(unit.Session) + req, _ := svc.PutBucketPolicyRequest(&s3.PutBucketPolicyInput{ + Bucket: aws.String("bucketname"), + Policy: aws.String("{}"), + }) + assertMD5(t, req) +} + +func TestMD5InPutBucketTagging(t *testing.T) { + svc := s3.New(unit.Session) + req, _ := svc.PutBucketTaggingRequest(&s3.PutBucketTaggingInput{ + Bucket: aws.String("bucketname"), + Tagging: &s3.Tagging{ + TagSet: []*s3.Tag{ + {Key: aws.String("KEY"), Value: aws.String("VALUE")}, + }, + }, + }) + assertMD5(t, req) +} + +func TestMD5InDeleteObjects(t *testing.T) { + svc := s3.New(unit.Session) + req, _ := svc.DeleteObjectsRequest(&s3.DeleteObjectsInput{ + Bucket: aws.String("bucketname"), + Delete: &s3.Delete{ + Objects: []*s3.ObjectIdentifier{ + {Key: aws.String("key")}, + }, + }, + }) + assertMD5(t, req) +} + +func TestMD5InPutBucketLifecycleConfiguration(t *testing.T) { + svc := s3.New(unit.Session) + req, _ := svc.PutBucketLifecycleConfigurationRequest(&s3.PutBucketLifecycleConfigurationInput{ + Bucket: aws.String("bucketname"), + LifecycleConfiguration: &s3.BucketLifecycleConfiguration{ + Rules: []*s3.LifecycleRule{ + {Prefix: aws.String("prefix"), Status: aws.String(s3.ExpirationStatusEnabled)}, + }, + }, + }) + assertMD5(t, req) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/examples_test.go b/vendor/github.com/aws/aws-sdk-go/service/s3/examples_test.go new file mode 100644 index 000000000..1f613f575 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/examples_test.go @@ -0,0 +1,1675 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package s3_test + +import ( + "bytes" + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/s3" +) + +var _ time.Duration +var _ bytes.Buffer + +func ExampleS3_AbortMultipartUpload() { + svc := s3.New(session.New()) + + params := &s3.AbortMultipartUploadInput{ + Bucket: aws.String("BucketName"), // Required + Key: aws.String("ObjectKey"), // Required + UploadId: aws.String("MultipartUploadId"), // Required + RequestPayer: aws.String("RequestPayer"), + } + resp, err := svc.AbortMultipartUpload(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleS3_CompleteMultipartUpload() { + svc := s3.New(session.New()) + + params := &s3.CompleteMultipartUploadInput{ + Bucket: aws.String("BucketName"), // Required + Key: aws.String("ObjectKey"), // Required + UploadId: aws.String("MultipartUploadId"), // Required + MultipartUpload: &s3.CompletedMultipartUpload{ + Parts: []*s3.CompletedPart{ + { // Required + ETag: aws.String("ETag"), + PartNumber: aws.Int64(1), + }, + // More values... + }, + }, + RequestPayer: aws.String("RequestPayer"), + } + resp, err := svc.CompleteMultipartUpload(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleS3_CopyObject() { + svc := s3.New(session.New()) + + params := &s3.CopyObjectInput{ + Bucket: aws.String("BucketName"), // Required + CopySource: aws.String("CopySource"), // Required + Key: aws.String("ObjectKey"), // Required + ACL: aws.String("ObjectCannedACL"), + CacheControl: aws.String("CacheControl"), + ContentDisposition: aws.String("ContentDisposition"), + ContentEncoding: aws.String("ContentEncoding"), + ContentLanguage: aws.String("ContentLanguage"), + ContentType: aws.String("ContentType"), + CopySourceIfMatch: aws.String("CopySourceIfMatch"), + CopySourceIfModifiedSince: aws.Time(time.Now()), + CopySourceIfNoneMatch: aws.String("CopySourceIfNoneMatch"), + CopySourceIfUnmodifiedSince: aws.Time(time.Now()), + CopySourceSSECustomerAlgorithm: aws.String("CopySourceSSECustomerAlgorithm"), + CopySourceSSECustomerKey: aws.String("CopySourceSSECustomerKey"), + CopySourceSSECustomerKeyMD5: aws.String("CopySourceSSECustomerKeyMD5"), + Expires: aws.Time(time.Now()), + GrantFullControl: aws.String("GrantFullControl"), + GrantRead: aws.String("GrantRead"), + GrantReadACP: aws.String("GrantReadACP"), + GrantWriteACP: aws.String("GrantWriteACP"), + Metadata: map[string]*string{ + "Key": aws.String("MetadataValue"), // Required + // More values... + }, + MetadataDirective: aws.String("MetadataDirective"), + RequestPayer: aws.String("RequestPayer"), + SSECustomerAlgorithm: aws.String("SSECustomerAlgorithm"), + SSECustomerKey: aws.String("SSECustomerKey"), + SSECustomerKeyMD5: aws.String("SSECustomerKeyMD5"), + SSEKMSKeyId: aws.String("SSEKMSKeyId"), + ServerSideEncryption: aws.String("ServerSideEncryption"), + StorageClass: aws.String("StorageClass"), + WebsiteRedirectLocation: aws.String("WebsiteRedirectLocation"), + } + resp, err := svc.CopyObject(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleS3_CreateBucket() { + svc := s3.New(session.New()) + + params := &s3.CreateBucketInput{ + Bucket: aws.String("BucketName"), // Required + ACL: aws.String("BucketCannedACL"), + CreateBucketConfiguration: &s3.CreateBucketConfiguration{ + LocationConstraint: aws.String("BucketLocationConstraint"), + }, + GrantFullControl: aws.String("GrantFullControl"), + GrantRead: aws.String("GrantRead"), + GrantReadACP: aws.String("GrantReadACP"), + GrantWrite: aws.String("GrantWrite"), + GrantWriteACP: aws.String("GrantWriteACP"), + } + resp, err := svc.CreateBucket(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleS3_CreateMultipartUpload() { + svc := s3.New(session.New()) + + params := &s3.CreateMultipartUploadInput{ + Bucket: aws.String("BucketName"), // Required + Key: aws.String("ObjectKey"), // Required + ACL: aws.String("ObjectCannedACL"), + CacheControl: aws.String("CacheControl"), + ContentDisposition: aws.String("ContentDisposition"), + ContentEncoding: aws.String("ContentEncoding"), + ContentLanguage: aws.String("ContentLanguage"), + ContentType: aws.String("ContentType"), + Expires: aws.Time(time.Now()), + GrantFullControl: aws.String("GrantFullControl"), + GrantRead: aws.String("GrantRead"), + GrantReadACP: aws.String("GrantReadACP"), + GrantWriteACP: aws.String("GrantWriteACP"), + Metadata: map[string]*string{ + "Key": aws.String("MetadataValue"), // Required + // More values... + }, + RequestPayer: aws.String("RequestPayer"), + SSECustomerAlgorithm: aws.String("SSECustomerAlgorithm"), + SSECustomerKey: aws.String("SSECustomerKey"), + SSECustomerKeyMD5: aws.String("SSECustomerKeyMD5"), + SSEKMSKeyId: aws.String("SSEKMSKeyId"), + ServerSideEncryption: aws.String("ServerSideEncryption"), + StorageClass: aws.String("StorageClass"), + WebsiteRedirectLocation: aws.String("WebsiteRedirectLocation"), + } + resp, err := svc.CreateMultipartUpload(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleS3_DeleteBucket() { + svc := s3.New(session.New()) + + params := &s3.DeleteBucketInput{ + Bucket: aws.String("BucketName"), // Required + } + resp, err := svc.DeleteBucket(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleS3_DeleteBucketCors() { + svc := s3.New(session.New()) + + params := &s3.DeleteBucketCorsInput{ + Bucket: aws.String("BucketName"), // Required + } + resp, err := svc.DeleteBucketCors(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleS3_DeleteBucketLifecycle() { + svc := s3.New(session.New()) + + params := &s3.DeleteBucketLifecycleInput{ + Bucket: aws.String("BucketName"), // Required + } + resp, err := svc.DeleteBucketLifecycle(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleS3_DeleteBucketPolicy() { + svc := s3.New(session.New()) + + params := &s3.DeleteBucketPolicyInput{ + Bucket: aws.String("BucketName"), // Required + } + resp, err := svc.DeleteBucketPolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleS3_DeleteBucketReplication() { + svc := s3.New(session.New()) + + params := &s3.DeleteBucketReplicationInput{ + Bucket: aws.String("BucketName"), // Required + } + resp, err := svc.DeleteBucketReplication(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleS3_DeleteBucketTagging() { + svc := s3.New(session.New()) + + params := &s3.DeleteBucketTaggingInput{ + Bucket: aws.String("BucketName"), // Required + } + resp, err := svc.DeleteBucketTagging(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleS3_DeleteBucketWebsite() { + svc := s3.New(session.New()) + + params := &s3.DeleteBucketWebsiteInput{ + Bucket: aws.String("BucketName"), // Required + } + resp, err := svc.DeleteBucketWebsite(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleS3_DeleteObject() { + svc := s3.New(session.New()) + + params := &s3.DeleteObjectInput{ + Bucket: aws.String("BucketName"), // Required + Key: aws.String("ObjectKey"), // Required + MFA: aws.String("MFA"), + RequestPayer: aws.String("RequestPayer"), + VersionId: aws.String("ObjectVersionId"), + } + resp, err := svc.DeleteObject(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleS3_DeleteObjects() { + svc := s3.New(session.New()) + + params := &s3.DeleteObjectsInput{ + Bucket: aws.String("BucketName"), // Required + Delete: &s3.Delete{ // Required + Objects: []*s3.ObjectIdentifier{ // Required + { // Required + Key: aws.String("ObjectKey"), // Required + VersionId: aws.String("ObjectVersionId"), + }, + // More values... + }, + Quiet: aws.Bool(true), + }, + MFA: aws.String("MFA"), + RequestPayer: aws.String("RequestPayer"), + } + resp, err := svc.DeleteObjects(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleS3_GetBucketAccelerateConfiguration() { + svc := s3.New(session.New()) + + params := &s3.GetBucketAccelerateConfigurationInput{ + Bucket: aws.String("BucketName"), // Required + } + resp, err := svc.GetBucketAccelerateConfiguration(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleS3_GetBucketAcl() { + svc := s3.New(session.New()) + + params := &s3.GetBucketAclInput{ + Bucket: aws.String("BucketName"), // Required + } + resp, err := svc.GetBucketAcl(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleS3_GetBucketCors() { + svc := s3.New(session.New()) + + params := &s3.GetBucketCorsInput{ + Bucket: aws.String("BucketName"), // Required + } + resp, err := svc.GetBucketCors(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleS3_GetBucketLifecycle() { + svc := s3.New(session.New()) + + params := &s3.GetBucketLifecycleInput{ + Bucket: aws.String("BucketName"), // Required + } + resp, err := svc.GetBucketLifecycle(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleS3_GetBucketLifecycleConfiguration() { + svc := s3.New(session.New()) + + params := &s3.GetBucketLifecycleConfigurationInput{ + Bucket: aws.String("BucketName"), // Required + } + resp, err := svc.GetBucketLifecycleConfiguration(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleS3_GetBucketLocation() { + svc := s3.New(session.New()) + + params := &s3.GetBucketLocationInput{ + Bucket: aws.String("BucketName"), // Required + } + resp, err := svc.GetBucketLocation(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleS3_GetBucketLogging() { + svc := s3.New(session.New()) + + params := &s3.GetBucketLoggingInput{ + Bucket: aws.String("BucketName"), // Required + } + resp, err := svc.GetBucketLogging(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleS3_GetBucketNotification() { + svc := s3.New(session.New()) + + params := &s3.GetBucketNotificationConfigurationRequest{ + Bucket: aws.String("BucketName"), // Required + } + resp, err := svc.GetBucketNotification(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleS3_GetBucketNotificationConfiguration() { + svc := s3.New(session.New()) + + params := &s3.GetBucketNotificationConfigurationRequest{ + Bucket: aws.String("BucketName"), // Required + } + resp, err := svc.GetBucketNotificationConfiguration(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleS3_GetBucketPolicy() { + svc := s3.New(session.New()) + + params := &s3.GetBucketPolicyInput{ + Bucket: aws.String("BucketName"), // Required + } + resp, err := svc.GetBucketPolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleS3_GetBucketReplication() { + svc := s3.New(session.New()) + + params := &s3.GetBucketReplicationInput{ + Bucket: aws.String("BucketName"), // Required + } + resp, err := svc.GetBucketReplication(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleS3_GetBucketRequestPayment() { + svc := s3.New(session.New()) + + params := &s3.GetBucketRequestPaymentInput{ + Bucket: aws.String("BucketName"), // Required + } + resp, err := svc.GetBucketRequestPayment(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleS3_GetBucketTagging() { + svc := s3.New(session.New()) + + params := &s3.GetBucketTaggingInput{ + Bucket: aws.String("BucketName"), // Required + } + resp, err := svc.GetBucketTagging(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleS3_GetBucketVersioning() { + svc := s3.New(session.New()) + + params := &s3.GetBucketVersioningInput{ + Bucket: aws.String("BucketName"), // Required + } + resp, err := svc.GetBucketVersioning(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleS3_GetBucketWebsite() { + svc := s3.New(session.New()) + + params := &s3.GetBucketWebsiteInput{ + Bucket: aws.String("BucketName"), // Required + } + resp, err := svc.GetBucketWebsite(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleS3_GetObject() { + svc := s3.New(session.New()) + + params := &s3.GetObjectInput{ + Bucket: aws.String("BucketName"), // Required + Key: aws.String("ObjectKey"), // Required + IfMatch: aws.String("IfMatch"), + IfModifiedSince: aws.Time(time.Now()), + IfNoneMatch: aws.String("IfNoneMatch"), + IfUnmodifiedSince: aws.Time(time.Now()), + Range: aws.String("Range"), + RequestPayer: aws.String("RequestPayer"), + ResponseCacheControl: aws.String("ResponseCacheControl"), + ResponseContentDisposition: aws.String("ResponseContentDisposition"), + ResponseContentEncoding: aws.String("ResponseContentEncoding"), + ResponseContentLanguage: aws.String("ResponseContentLanguage"), + ResponseContentType: aws.String("ResponseContentType"), + ResponseExpires: aws.Time(time.Now()), + SSECustomerAlgorithm: aws.String("SSECustomerAlgorithm"), + SSECustomerKey: aws.String("SSECustomerKey"), + SSECustomerKeyMD5: aws.String("SSECustomerKeyMD5"), + VersionId: aws.String("ObjectVersionId"), + } + resp, err := svc.GetObject(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleS3_GetObjectAcl() { + svc := s3.New(session.New()) + + params := &s3.GetObjectAclInput{ + Bucket: aws.String("BucketName"), // Required + Key: aws.String("ObjectKey"), // Required + RequestPayer: aws.String("RequestPayer"), + VersionId: aws.String("ObjectVersionId"), + } + resp, err := svc.GetObjectAcl(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleS3_GetObjectTorrent() { + svc := s3.New(session.New()) + + params := &s3.GetObjectTorrentInput{ + Bucket: aws.String("BucketName"), // Required + Key: aws.String("ObjectKey"), // Required + RequestPayer: aws.String("RequestPayer"), + } + resp, err := svc.GetObjectTorrent(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleS3_HeadBucket() { + svc := s3.New(session.New()) + + params := &s3.HeadBucketInput{ + Bucket: aws.String("BucketName"), // Required + } + resp, err := svc.HeadBucket(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleS3_HeadObject() { + svc := s3.New(session.New()) + + params := &s3.HeadObjectInput{ + Bucket: aws.String("BucketName"), // Required + Key: aws.String("ObjectKey"), // Required + IfMatch: aws.String("IfMatch"), + IfModifiedSince: aws.Time(time.Now()), + IfNoneMatch: aws.String("IfNoneMatch"), + IfUnmodifiedSince: aws.Time(time.Now()), + Range: aws.String("Range"), + RequestPayer: aws.String("RequestPayer"), + SSECustomerAlgorithm: aws.String("SSECustomerAlgorithm"), + SSECustomerKey: aws.String("SSECustomerKey"), + SSECustomerKeyMD5: aws.String("SSECustomerKeyMD5"), + VersionId: aws.String("ObjectVersionId"), + } + resp, err := svc.HeadObject(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleS3_ListBuckets() { + svc := s3.New(session.New()) + + var params *s3.ListBucketsInput + resp, err := svc.ListBuckets(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleS3_ListMultipartUploads() { + svc := s3.New(session.New()) + + params := &s3.ListMultipartUploadsInput{ + Bucket: aws.String("BucketName"), // Required + Delimiter: aws.String("Delimiter"), + EncodingType: aws.String("EncodingType"), + KeyMarker: aws.String("KeyMarker"), + MaxUploads: aws.Int64(1), + Prefix: aws.String("Prefix"), + UploadIdMarker: aws.String("UploadIdMarker"), + } + resp, err := svc.ListMultipartUploads(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleS3_ListObjectVersions() { + svc := s3.New(session.New()) + + params := &s3.ListObjectVersionsInput{ + Bucket: aws.String("BucketName"), // Required + Delimiter: aws.String("Delimiter"), + EncodingType: aws.String("EncodingType"), + KeyMarker: aws.String("KeyMarker"), + MaxKeys: aws.Int64(1), + Prefix: aws.String("Prefix"), + VersionIdMarker: aws.String("VersionIdMarker"), + } + resp, err := svc.ListObjectVersions(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleS3_ListObjects() { + svc := s3.New(session.New()) + + params := &s3.ListObjectsInput{ + Bucket: aws.String("BucketName"), // Required + Delimiter: aws.String("Delimiter"), + EncodingType: aws.String("EncodingType"), + Marker: aws.String("Marker"), + MaxKeys: aws.Int64(1), + Prefix: aws.String("Prefix"), + } + resp, err := svc.ListObjects(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleS3_ListObjectsV2() { + svc := s3.New(session.New()) + + params := &s3.ListObjectsV2Input{ + Bucket: aws.String("BucketName"), // Required + ContinuationToken: aws.String("Token"), + Delimiter: aws.String("Delimiter"), + EncodingType: aws.String("EncodingType"), + FetchOwner: aws.Bool(true), + MaxKeys: aws.Int64(1), + Prefix: aws.String("Prefix"), + StartAfter: aws.String("StartAfter"), + } + resp, err := svc.ListObjectsV2(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleS3_ListParts() { + svc := s3.New(session.New()) + + params := &s3.ListPartsInput{ + Bucket: aws.String("BucketName"), // Required + Key: aws.String("ObjectKey"), // Required + UploadId: aws.String("MultipartUploadId"), // Required + MaxParts: aws.Int64(1), + PartNumberMarker: aws.Int64(1), + RequestPayer: aws.String("RequestPayer"), + } + resp, err := svc.ListParts(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleS3_PutBucketAccelerateConfiguration() { + svc := s3.New(session.New()) + + params := &s3.PutBucketAccelerateConfigurationInput{ + AccelerateConfiguration: &s3.AccelerateConfiguration{ // Required + Status: aws.String("BucketAccelerateStatus"), + }, + Bucket: aws.String("BucketName"), // Required + } + resp, err := svc.PutBucketAccelerateConfiguration(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleS3_PutBucketAcl() { + svc := s3.New(session.New()) + + params := &s3.PutBucketAclInput{ + Bucket: aws.String("BucketName"), // Required + ACL: aws.String("BucketCannedACL"), + AccessControlPolicy: &s3.AccessControlPolicy{ + Grants: []*s3.Grant{ + { // Required + Grantee: &s3.Grantee{ + Type: aws.String("Type"), // Required + DisplayName: aws.String("DisplayName"), + EmailAddress: aws.String("EmailAddress"), + ID: aws.String("ID"), + URI: aws.String("URI"), + }, + Permission: aws.String("Permission"), + }, + // More values... + }, + Owner: &s3.Owner{ + DisplayName: aws.String("DisplayName"), + ID: aws.String("ID"), + }, + }, + GrantFullControl: aws.String("GrantFullControl"), + GrantRead: aws.String("GrantRead"), + GrantReadACP: aws.String("GrantReadACP"), + GrantWrite: aws.String("GrantWrite"), + GrantWriteACP: aws.String("GrantWriteACP"), + } + resp, err := svc.PutBucketAcl(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleS3_PutBucketCors() { + svc := s3.New(session.New()) + + params := &s3.PutBucketCorsInput{ + Bucket: aws.String("BucketName"), // Required + CORSConfiguration: &s3.CORSConfiguration{ // Required + CORSRules: []*s3.CORSRule{ // Required + { // Required + AllowedMethods: []*string{ // Required + aws.String("AllowedMethod"), // Required + // More values... + }, + AllowedOrigins: []*string{ // Required + aws.String("AllowedOrigin"), // Required + // More values... + }, + AllowedHeaders: []*string{ + aws.String("AllowedHeader"), // Required + // More values... + }, + ExposeHeaders: []*string{ + aws.String("ExposeHeader"), // Required + // More values... + }, + MaxAgeSeconds: aws.Int64(1), + }, + // More values... + }, + }, + } + resp, err := svc.PutBucketCors(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleS3_PutBucketLifecycle() { + svc := s3.New(session.New()) + + params := &s3.PutBucketLifecycleInput{ + Bucket: aws.String("BucketName"), // Required + LifecycleConfiguration: &s3.LifecycleConfiguration{ + Rules: []*s3.Rule{ // Required + { // Required + Prefix: aws.String("Prefix"), // Required + Status: aws.String("ExpirationStatus"), // Required + AbortIncompleteMultipartUpload: &s3.AbortIncompleteMultipartUpload{ + DaysAfterInitiation: aws.Int64(1), + }, + Expiration: &s3.LifecycleExpiration{ + Date: aws.Time(time.Now()), + Days: aws.Int64(1), + ExpiredObjectDeleteMarker: aws.Bool(true), + }, + ID: aws.String("ID"), + NoncurrentVersionExpiration: &s3.NoncurrentVersionExpiration{ + NoncurrentDays: aws.Int64(1), + }, + NoncurrentVersionTransition: &s3.NoncurrentVersionTransition{ + NoncurrentDays: aws.Int64(1), + StorageClass: aws.String("TransitionStorageClass"), + }, + Transition: &s3.Transition{ + Date: aws.Time(time.Now()), + Days: aws.Int64(1), + StorageClass: aws.String("TransitionStorageClass"), + }, + }, + // More values... + }, + }, + } + resp, err := svc.PutBucketLifecycle(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleS3_PutBucketLifecycleConfiguration() { + svc := s3.New(session.New()) + + params := &s3.PutBucketLifecycleConfigurationInput{ + Bucket: aws.String("BucketName"), // Required + LifecycleConfiguration: &s3.BucketLifecycleConfiguration{ + Rules: []*s3.LifecycleRule{ // Required + { // Required + Prefix: aws.String("Prefix"), // Required + Status: aws.String("ExpirationStatus"), // Required + AbortIncompleteMultipartUpload: &s3.AbortIncompleteMultipartUpload{ + DaysAfterInitiation: aws.Int64(1), + }, + Expiration: &s3.LifecycleExpiration{ + Date: aws.Time(time.Now()), + Days: aws.Int64(1), + ExpiredObjectDeleteMarker: aws.Bool(true), + }, + ID: aws.String("ID"), + NoncurrentVersionExpiration: &s3.NoncurrentVersionExpiration{ + NoncurrentDays: aws.Int64(1), + }, + NoncurrentVersionTransitions: []*s3.NoncurrentVersionTransition{ + { // Required + NoncurrentDays: aws.Int64(1), + StorageClass: aws.String("TransitionStorageClass"), + }, + // More values... + }, + Transitions: []*s3.Transition{ + { // Required + Date: aws.Time(time.Now()), + Days: aws.Int64(1), + StorageClass: aws.String("TransitionStorageClass"), + }, + // More values... + }, + }, + // More values... + }, + }, + } + resp, err := svc.PutBucketLifecycleConfiguration(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleS3_PutBucketLogging() { + svc := s3.New(session.New()) + + params := &s3.PutBucketLoggingInput{ + Bucket: aws.String("BucketName"), // Required + BucketLoggingStatus: &s3.BucketLoggingStatus{ // Required + LoggingEnabled: &s3.LoggingEnabled{ + TargetBucket: aws.String("TargetBucket"), + TargetGrants: []*s3.TargetGrant{ + { // Required + Grantee: &s3.Grantee{ + Type: aws.String("Type"), // Required + DisplayName: aws.String("DisplayName"), + EmailAddress: aws.String("EmailAddress"), + ID: aws.String("ID"), + URI: aws.String("URI"), + }, + Permission: aws.String("BucketLogsPermission"), + }, + // More values... + }, + TargetPrefix: aws.String("TargetPrefix"), + }, + }, + } + resp, err := svc.PutBucketLogging(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleS3_PutBucketNotification() { + svc := s3.New(session.New()) + + params := &s3.PutBucketNotificationInput{ + Bucket: aws.String("BucketName"), // Required + NotificationConfiguration: &s3.NotificationConfigurationDeprecated{ // Required + CloudFunctionConfiguration: &s3.CloudFunctionConfiguration{ + CloudFunction: aws.String("CloudFunction"), + Event: aws.String("Event"), + Events: []*string{ + aws.String("Event"), // Required + // More values... + }, + Id: aws.String("NotificationId"), + InvocationRole: aws.String("CloudFunctionInvocationRole"), + }, + QueueConfiguration: &s3.QueueConfigurationDeprecated{ + Event: aws.String("Event"), + Events: []*string{ + aws.String("Event"), // Required + // More values... + }, + Id: aws.String("NotificationId"), + Queue: aws.String("QueueArn"), + }, + TopicConfiguration: &s3.TopicConfigurationDeprecated{ + Event: aws.String("Event"), + Events: []*string{ + aws.String("Event"), // Required + // More values... + }, + Id: aws.String("NotificationId"), + Topic: aws.String("TopicArn"), + }, + }, + } + resp, err := svc.PutBucketNotification(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleS3_PutBucketNotificationConfiguration() { + svc := s3.New(session.New()) + + params := &s3.PutBucketNotificationConfigurationInput{ + Bucket: aws.String("BucketName"), // Required + NotificationConfiguration: &s3.NotificationConfiguration{ // Required + LambdaFunctionConfigurations: []*s3.LambdaFunctionConfiguration{ + { // Required + Events: []*string{ // Required + aws.String("Event"), // Required + // More values... + }, + LambdaFunctionArn: aws.String("LambdaFunctionArn"), // Required + Filter: &s3.NotificationConfigurationFilter{ + Key: &s3.KeyFilter{ + FilterRules: []*s3.FilterRule{ + { // Required + Name: aws.String("FilterRuleName"), + Value: aws.String("FilterRuleValue"), + }, + // More values... + }, + }, + }, + Id: aws.String("NotificationId"), + }, + // More values... + }, + QueueConfigurations: []*s3.QueueConfiguration{ + { // Required + Events: []*string{ // Required + aws.String("Event"), // Required + // More values... + }, + QueueArn: aws.String("QueueArn"), // Required + Filter: &s3.NotificationConfigurationFilter{ + Key: &s3.KeyFilter{ + FilterRules: []*s3.FilterRule{ + { // Required + Name: aws.String("FilterRuleName"), + Value: aws.String("FilterRuleValue"), + }, + // More values... + }, + }, + }, + Id: aws.String("NotificationId"), + }, + // More values... + }, + TopicConfigurations: []*s3.TopicConfiguration{ + { // Required + Events: []*string{ // Required + aws.String("Event"), // Required + // More values... + }, + TopicArn: aws.String("TopicArn"), // Required + Filter: &s3.NotificationConfigurationFilter{ + Key: &s3.KeyFilter{ + FilterRules: []*s3.FilterRule{ + { // Required + Name: aws.String("FilterRuleName"), + Value: aws.String("FilterRuleValue"), + }, + // More values... + }, + }, + }, + Id: aws.String("NotificationId"), + }, + // More values... + }, + }, + } + resp, err := svc.PutBucketNotificationConfiguration(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleS3_PutBucketPolicy() { + svc := s3.New(session.New()) + + params := &s3.PutBucketPolicyInput{ + Bucket: aws.String("BucketName"), // Required + Policy: aws.String("Policy"), // Required + } + resp, err := svc.PutBucketPolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleS3_PutBucketReplication() { + svc := s3.New(session.New()) + + params := &s3.PutBucketReplicationInput{ + Bucket: aws.String("BucketName"), // Required + ReplicationConfiguration: &s3.ReplicationConfiguration{ // Required + Role: aws.String("Role"), // Required + Rules: []*s3.ReplicationRule{ // Required + { // Required + Destination: &s3.Destination{ // Required + Bucket: aws.String("BucketName"), // Required + StorageClass: aws.String("StorageClass"), + }, + Prefix: aws.String("Prefix"), // Required + Status: aws.String("ReplicationRuleStatus"), // Required + ID: aws.String("ID"), + }, + // More values... + }, + }, + } + resp, err := svc.PutBucketReplication(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleS3_PutBucketRequestPayment() { + svc := s3.New(session.New()) + + params := &s3.PutBucketRequestPaymentInput{ + Bucket: aws.String("BucketName"), // Required + RequestPaymentConfiguration: &s3.RequestPaymentConfiguration{ // Required + Payer: aws.String("Payer"), // Required + }, + } + resp, err := svc.PutBucketRequestPayment(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleS3_PutBucketTagging() { + svc := s3.New(session.New()) + + params := &s3.PutBucketTaggingInput{ + Bucket: aws.String("BucketName"), // Required + Tagging: &s3.Tagging{ // Required + TagSet: []*s3.Tag{ // Required + { // Required + Key: aws.String("ObjectKey"), // Required + Value: aws.String("Value"), // Required + }, + // More values... + }, + }, + } + resp, err := svc.PutBucketTagging(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleS3_PutBucketVersioning() { + svc := s3.New(session.New()) + + params := &s3.PutBucketVersioningInput{ + Bucket: aws.String("BucketName"), // Required + VersioningConfiguration: &s3.VersioningConfiguration{ // Required + MFADelete: aws.String("MFADelete"), + Status: aws.String("BucketVersioningStatus"), + }, + MFA: aws.String("MFA"), + } + resp, err := svc.PutBucketVersioning(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleS3_PutBucketWebsite() { + svc := s3.New(session.New()) + + params := &s3.PutBucketWebsiteInput{ + Bucket: aws.String("BucketName"), // Required + WebsiteConfiguration: &s3.WebsiteConfiguration{ // Required + ErrorDocument: &s3.ErrorDocument{ + Key: aws.String("ObjectKey"), // Required + }, + IndexDocument: &s3.IndexDocument{ + Suffix: aws.String("Suffix"), // Required + }, + RedirectAllRequestsTo: &s3.RedirectAllRequestsTo{ + HostName: aws.String("HostName"), // Required + Protocol: aws.String("Protocol"), + }, + RoutingRules: []*s3.RoutingRule{ + { // Required + Redirect: &s3.Redirect{ // Required + HostName: aws.String("HostName"), + HttpRedirectCode: aws.String("HttpRedirectCode"), + Protocol: aws.String("Protocol"), + ReplaceKeyPrefixWith: aws.String("ReplaceKeyPrefixWith"), + ReplaceKeyWith: aws.String("ReplaceKeyWith"), + }, + Condition: &s3.Condition{ + HttpErrorCodeReturnedEquals: aws.String("HttpErrorCodeReturnedEquals"), + KeyPrefixEquals: aws.String("KeyPrefixEquals"), + }, + }, + // More values... + }, + }, + } + resp, err := svc.PutBucketWebsite(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleS3_PutObject() { + svc := s3.New(session.New()) + + params := &s3.PutObjectInput{ + Bucket: aws.String("BucketName"), // Required + Key: aws.String("ObjectKey"), // Required + ACL: aws.String("ObjectCannedACL"), + Body: bytes.NewReader([]byte("PAYLOAD")), + CacheControl: aws.String("CacheControl"), + ContentDisposition: aws.String("ContentDisposition"), + ContentEncoding: aws.String("ContentEncoding"), + ContentLanguage: aws.String("ContentLanguage"), + ContentLength: aws.Int64(1), + ContentType: aws.String("ContentType"), + Expires: aws.Time(time.Now()), + GrantFullControl: aws.String("GrantFullControl"), + GrantRead: aws.String("GrantRead"), + GrantReadACP: aws.String("GrantReadACP"), + GrantWriteACP: aws.String("GrantWriteACP"), + Metadata: map[string]*string{ + "Key": aws.String("MetadataValue"), // Required + // More values... + }, + RequestPayer: aws.String("RequestPayer"), + SSECustomerAlgorithm: aws.String("SSECustomerAlgorithm"), + SSECustomerKey: aws.String("SSECustomerKey"), + SSECustomerKeyMD5: aws.String("SSECustomerKeyMD5"), + SSEKMSKeyId: aws.String("SSEKMSKeyId"), + ServerSideEncryption: aws.String("ServerSideEncryption"), + StorageClass: aws.String("StorageClass"), + WebsiteRedirectLocation: aws.String("WebsiteRedirectLocation"), + } + resp, err := svc.PutObject(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleS3_PutObjectAcl() { + svc := s3.New(session.New()) + + params := &s3.PutObjectAclInput{ + Bucket: aws.String("BucketName"), // Required + Key: aws.String("ObjectKey"), // Required + ACL: aws.String("ObjectCannedACL"), + AccessControlPolicy: &s3.AccessControlPolicy{ + Grants: []*s3.Grant{ + { // Required + Grantee: &s3.Grantee{ + Type: aws.String("Type"), // Required + DisplayName: aws.String("DisplayName"), + EmailAddress: aws.String("EmailAddress"), + ID: aws.String("ID"), + URI: aws.String("URI"), + }, + Permission: aws.String("Permission"), + }, + // More values... + }, + Owner: &s3.Owner{ + DisplayName: aws.String("DisplayName"), + ID: aws.String("ID"), + }, + }, + GrantFullControl: aws.String("GrantFullControl"), + GrantRead: aws.String("GrantRead"), + GrantReadACP: aws.String("GrantReadACP"), + GrantWrite: aws.String("GrantWrite"), + GrantWriteACP: aws.String("GrantWriteACP"), + RequestPayer: aws.String("RequestPayer"), + VersionId: aws.String("ObjectVersionId"), + } + resp, err := svc.PutObjectAcl(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleS3_RestoreObject() { + svc := s3.New(session.New()) + + params := &s3.RestoreObjectInput{ + Bucket: aws.String("BucketName"), // Required + Key: aws.String("ObjectKey"), // Required + RequestPayer: aws.String("RequestPayer"), + RestoreRequest: &s3.RestoreRequest{ + Days: aws.Int64(1), // Required + }, + VersionId: aws.String("ObjectVersionId"), + } + resp, err := svc.RestoreObject(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleS3_UploadPart() { + svc := s3.New(session.New()) + + params := &s3.UploadPartInput{ + Bucket: aws.String("BucketName"), // Required + Key: aws.String("ObjectKey"), // Required + PartNumber: aws.Int64(1), // Required + UploadId: aws.String("MultipartUploadId"), // Required + Body: bytes.NewReader([]byte("PAYLOAD")), + ContentLength: aws.Int64(1), + RequestPayer: aws.String("RequestPayer"), + SSECustomerAlgorithm: aws.String("SSECustomerAlgorithm"), + SSECustomerKey: aws.String("SSECustomerKey"), + SSECustomerKeyMD5: aws.String("SSECustomerKeyMD5"), + } + resp, err := svc.UploadPart(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleS3_UploadPartCopy() { + svc := s3.New(session.New()) + + params := &s3.UploadPartCopyInput{ + Bucket: aws.String("BucketName"), // Required + CopySource: aws.String("CopySource"), // Required + Key: aws.String("ObjectKey"), // Required + PartNumber: aws.Int64(1), // Required + UploadId: aws.String("MultipartUploadId"), // Required + CopySourceIfMatch: aws.String("CopySourceIfMatch"), + CopySourceIfModifiedSince: aws.Time(time.Now()), + CopySourceIfNoneMatch: aws.String("CopySourceIfNoneMatch"), + CopySourceIfUnmodifiedSince: aws.Time(time.Now()), + CopySourceRange: aws.String("CopySourceRange"), + CopySourceSSECustomerAlgorithm: aws.String("CopySourceSSECustomerAlgorithm"), + CopySourceSSECustomerKey: aws.String("CopySourceSSECustomerKey"), + CopySourceSSECustomerKeyMD5: aws.String("CopySourceSSECustomerKeyMD5"), + RequestPayer: aws.String("RequestPayer"), + SSECustomerAlgorithm: aws.String("SSECustomerAlgorithm"), + SSECustomerKey: aws.String("SSECustomerKey"), + SSECustomerKeyMD5: aws.String("SSECustomerKeyMD5"), + } + resp, err := svc.UploadPartCopy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go b/vendor/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go new file mode 100644 index 000000000..517292903 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go @@ -0,0 +1,165 @@ +package s3 + +import ( + "fmt" + "net/url" + "regexp" + "strings" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" +) + +// an operationBlacklist is a list of operation names that should a +// request handler should not be executed with. +type operationBlacklist []string + +// Continue will return true of the Request's operation name is not +// in the blacklist. False otherwise. +func (b operationBlacklist) Continue(r *request.Request) bool { + for i := 0; i < len(b); i++ { + if b[i] == r.Operation.Name { + return false + } + } + return true +} + +var accelerateOpBlacklist = operationBlacklist{ + opListBuckets, opCreateBucket, opDeleteBucket, +} + +// Request handler to automatically add the bucket name to the endpoint domain +// if possible. This style of bucket is valid for all bucket names which are +// DNS compatible and do not contain "." +func updateEndpointForS3Config(r *request.Request) { + forceHostStyle := aws.BoolValue(r.Config.S3ForcePathStyle) + accelerate := aws.BoolValue(r.Config.S3UseAccelerate) + + if accelerate && accelerateOpBlacklist.Continue(r) { + if forceHostStyle { + if r.Config.Logger != nil { + r.Config.Logger.Log("ERROR: aws.Config.S3UseAccelerate is not compatible with aws.Config.S3ForcePathStyle, ignoring S3ForcePathStyle.") + } + } + updateEndpointForAccelerate(r) + } else if !forceHostStyle && r.Operation.Name != opGetBucketLocation { + updateEndpointForHostStyle(r) + } +} + +func updateEndpointForHostStyle(r *request.Request) { + bucket, ok := bucketNameFromReqParams(r.Params) + if !ok { + // Ignore operation requests if the bucketname was not provided + // if this is an input validation error the validation handler + // will report it. + return + } + + if !hostCompatibleBucketName(r.HTTPRequest.URL, bucket) { + // bucket name must be valid to put into the host + return + } + + moveBucketToHost(r.HTTPRequest.URL, bucket) +} + +func updateEndpointForAccelerate(r *request.Request) { + bucket, ok := bucketNameFromReqParams(r.Params) + if !ok { + // Ignore operation requests if the bucketname was not provided + // if this is an input validation error the validation handler + // will report it. + return + } + + if !hostCompatibleBucketName(r.HTTPRequest.URL, bucket) { + r.Error = awserr.New("InvalidParameterException", + fmt.Sprintf("bucket name %s is not compatibile with S3 Accelerate", bucket), + nil) + return + } + + // Change endpoint from s3(-[a-z0-1-])?.amazonaws.com to s3-accelerate.amazonaws.com + r.HTTPRequest.URL.Host = replaceHostRegion(r.HTTPRequest.URL.Host, "accelerate") + moveBucketToHost(r.HTTPRequest.URL, bucket) +} + +// Attempts to retrieve the bucket name from the request input parameters. +// If no bucket is found, or the field is empty "", false will be returned. +func bucketNameFromReqParams(params interface{}) (string, bool) { + b, _ := awsutil.ValuesAtPath(params, "Bucket") + if len(b) == 0 { + return "", false + } + + if bucket, ok := b[0].(*string); ok { + if bucketStr := aws.StringValue(bucket); bucketStr != "" { + return bucketStr, true + } + } + + return "", false +} + +// hostCompatibleBucketName returns true if the request should +// put the bucket in the host. This is false if S3ForcePathStyle is +// explicitly set or if the bucket is not DNS compatible. +func hostCompatibleBucketName(u *url.URL, bucket string) bool { + // Bucket might be DNS compatible but dots in the hostname will fail + // certificate validation, so do not use host-style. + if u.Scheme == "https" && strings.Contains(bucket, ".") { + return false + } + + // if the bucket is DNS compatible + return dnsCompatibleBucketName(bucket) +} + +var reDomain = regexp.MustCompile(`^[a-z0-9][a-z0-9\.\-]{1,61}[a-z0-9]$`) +var reIPAddress = regexp.MustCompile(`^(\d+\.){3}\d+$`) + +// dnsCompatibleBucketName returns true if the bucket name is DNS compatible. +// Buckets created outside of the classic region MUST be DNS compatible. +func dnsCompatibleBucketName(bucket string) bool { + return reDomain.MatchString(bucket) && + !reIPAddress.MatchString(bucket) && + !strings.Contains(bucket, "..") +} + +// moveBucketToHost moves the bucket name from the URI path to URL host. +func moveBucketToHost(u *url.URL, bucket string) { + u.Host = bucket + "." + u.Host + u.Path = strings.Replace(u.Path, "/{Bucket}", "", -1) + if u.Path == "" { + u.Path = "/" + } +} + +const s3HostPrefix = "s3" + +// replaceHostRegion replaces the S3 region string in the host with the +// value provided. If v is empty the host prefix returned will be s3. +func replaceHostRegion(host, v string) string { + if !strings.HasPrefix(host, s3HostPrefix) { + return host + } + + suffix := host[len(s3HostPrefix):] + for i := len(s3HostPrefix); i < len(host); i++ { + if host[i] == '.' { + // Trim until '.' leave the it in place. + suffix = host[i:] + break + } + } + + if len(v) == 0 { + return fmt.Sprintf("s3%s", suffix) + } + + return fmt.Sprintf("s3-%s%s", v, suffix) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/host_style_bucket_test.go b/vendor/github.com/aws/aws-sdk-go/service/s3/host_style_bucket_test.go new file mode 100644 index 000000000..faf0a9edc --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/host_style_bucket_test.go @@ -0,0 +1,103 @@ +package s3_test + +import ( + "net/url" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/awstesting/unit" + "github.com/aws/aws-sdk-go/service/s3" +) + +type s3BucketTest struct { + bucket string + url string + errCode string +} + +var ( + sslTests = []s3BucketTest{ + {"abc", "https://abc.s3-mock-region.amazonaws.com/", ""}, + {"a$b$c", "https://s3-mock-region.amazonaws.com/a%24b%24c", ""}, + {"a.b.c", "https://s3-mock-region.amazonaws.com/a.b.c", ""}, + {"a..bc", "https://s3-mock-region.amazonaws.com/a..bc", ""}, + } + + nosslTests = []s3BucketTest{ + {"a.b.c", "http://a.b.c.s3-mock-region.amazonaws.com/", ""}, + {"a..bc", "http://s3-mock-region.amazonaws.com/a..bc", ""}, + } + + forcepathTests = []s3BucketTest{ + {"abc", "https://s3-mock-region.amazonaws.com/abc", ""}, + {"a$b$c", "https://s3-mock-region.amazonaws.com/a%24b%24c", ""}, + {"a.b.c", "https://s3-mock-region.amazonaws.com/a.b.c", ""}, + {"a..bc", "https://s3-mock-region.amazonaws.com/a..bc", ""}, + } + + accelerateTests = []s3BucketTest{ + {"abc", "https://abc.s3-accelerate.amazonaws.com/", ""}, + {"a.b.c", "https://s3-mock-region.amazonaws.com/%7BBucket%7D", "InvalidParameterException"}, + {"a$b$c", "https://s3-mock-region.amazonaws.com/%7BBucket%7D", "InvalidParameterException"}, + } + + accelerateNoSSLTests = []s3BucketTest{ + {"abc", "http://abc.s3-accelerate.amazonaws.com/", ""}, + {"a.b.c", "http://a.b.c.s3-accelerate.amazonaws.com/", ""}, + {"a$b$c", "http://s3-mock-region.amazonaws.com/%7BBucket%7D", "InvalidParameterException"}, + } +) + +func runTests(t *testing.T, svc *s3.S3, tests []s3BucketTest) { + for i, test := range tests { + req, _ := svc.ListObjectsRequest(&s3.ListObjectsInput{Bucket: &test.bucket}) + req.Build() + assert.Equal(t, test.url, req.HTTPRequest.URL.String(), "test case %d", i) + if test.errCode != "" { + require.Error(t, req.Error, "test case %d", i) + assert.Contains(t, req.Error.(awserr.Error).Code(), test.errCode, "test case %d", i) + } + } +} + +func TestAccelerateBucketBuild(t *testing.T) { + s := s3.New(unit.Session, &aws.Config{S3UseAccelerate: aws.Bool(true)}) + runTests(t, s, accelerateTests) +} + +func TestAccelerateNoSSLBucketBuild(t *testing.T) { + s := s3.New(unit.Session, &aws.Config{S3UseAccelerate: aws.Bool(true), DisableSSL: aws.Bool(true)}) + runTests(t, s, accelerateNoSSLTests) +} + +func TestHostStyleBucketBuild(t *testing.T) { + s := s3.New(unit.Session) + runTests(t, s, sslTests) +} + +func TestHostStyleBucketBuildNoSSL(t *testing.T) { + s := s3.New(unit.Session, &aws.Config{DisableSSL: aws.Bool(true)}) + runTests(t, s, nosslTests) +} + +func TestPathStyleBucketBuild(t *testing.T) { + s := s3.New(unit.Session, &aws.Config{S3ForcePathStyle: aws.Bool(true)}) + runTests(t, s, forcepathTests) +} + +func TestHostStyleBucketGetBucketLocation(t *testing.T) { + s := s3.New(unit.Session) + req, _ := s.GetBucketLocationRequest(&s3.GetBucketLocationInput{ + Bucket: aws.String("bucket"), + }) + + req.Build() + require.NoError(t, req.Error) + u, _ := url.Parse(req.HTTPRequest.URL.String()) + assert.NotContains(t, u.Host, "bucket") + assert.Contains(t, u.Path, "bucket") +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/platform_handlers.go b/vendor/github.com/aws/aws-sdk-go/service/s3/platform_handlers.go new file mode 100644 index 000000000..8e6f3307d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/platform_handlers.go @@ -0,0 +1,8 @@ +// +build !go1.6 + +package s3 + +import "github.com/aws/aws-sdk-go/aws/request" + +func platformRequestHandlers(r *request.Request) { +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/platform_handlers_go1.6.go b/vendor/github.com/aws/aws-sdk-go/service/s3/platform_handlers_go1.6.go new file mode 100644 index 000000000..14d05f7b7 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/platform_handlers_go1.6.go @@ -0,0 +1,28 @@ +// +build go1.6 + +package s3 + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/request" +) + +func platformRequestHandlers(r *request.Request) { + if r.Operation.HTTPMethod == "PUT" { + // 100-Continue should only be used on put requests. + r.Handlers.Sign.PushBack(add100Continue) + } +} + +func add100Continue(r *request.Request) { + if aws.BoolValue(r.Config.S3Disable100Continue) { + return + } + if r.HTTPRequest.ContentLength < 1024*1024*2 { + // Ignore requests smaller than 2MB. This helps prevent delaying + // requests unnecessarily. + return + } + + r.HTTPRequest.Header.Set("Expect", "100-Continue") +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/platform_handlers_go1.6_test.go b/vendor/github.com/aws/aws-sdk-go/service/s3/platform_handlers_go1.6_test.go new file mode 100644 index 000000000..b119ce8bb --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/platform_handlers_go1.6_test.go @@ -0,0 +1,68 @@ +// +build go1.6 + +package s3_test + +import ( + "bytes" + "testing" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/awstesting/unit" + "github.com/aws/aws-sdk-go/service/s3" + "github.com/stretchr/testify/assert" +) + +func TestAdd100Continue_Added(t *testing.T) { + svc := s3.New(unit.Session) + r, _ := svc.PutObjectRequest(&s3.PutObjectInput{ + Bucket: aws.String("bucket"), + Key: aws.String("dest"), + Body: bytes.NewReader(make([]byte, 1024*1024*5)), + }) + + err := r.Sign() + + assert.NoError(t, err) + assert.Equal(t, "100-Continue", r.HTTPRequest.Header.Get("Expect")) +} + +func TestAdd100Continue_SkipDisabled(t *testing.T) { + svc := s3.New(unit.Session, aws.NewConfig().WithS3Disable100Continue(true)) + r, _ := svc.PutObjectRequest(&s3.PutObjectInput{ + Bucket: aws.String("bucket"), + Key: aws.String("dest"), + Body: bytes.NewReader(make([]byte, 1024*1024*5)), + }) + + err := r.Sign() + + assert.NoError(t, err) + assert.Empty(t, r.HTTPRequest.Header.Get("Expect")) +} + +func TestAdd100Continue_SkipNonPUT(t *testing.T) { + svc := s3.New(unit.Session) + r, _ := svc.GetObjectRequest(&s3.GetObjectInput{ + Bucket: aws.String("bucket"), + Key: aws.String("dest"), + }) + + err := r.Sign() + + assert.NoError(t, err) + assert.Empty(t, r.HTTPRequest.Header.Get("Expect")) +} + +func TestAdd100Continue_SkipTooSmall(t *testing.T) { + svc := s3.New(unit.Session) + r, _ := svc.PutObjectRequest(&s3.PutObjectInput{ + Bucket: aws.String("bucket"), + Key: aws.String("dest"), + Body: bytes.NewReader(make([]byte, 1024*1024*1)), + }) + + err := r.Sign() + + assert.NoError(t, err) + assert.Empty(t, r.HTTPRequest.Header.Get("Expect")) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/s3iface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/s3/s3iface/interface.go new file mode 100644 index 000000000..e6f8796a1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/s3iface/interface.go @@ -0,0 +1,260 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package s3iface provides an interface for the Amazon Simple Storage Service. +package s3iface + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/s3" +) + +// S3API is the interface type for s3.S3. +type S3API interface { + AbortMultipartUploadRequest(*s3.AbortMultipartUploadInput) (*request.Request, *s3.AbortMultipartUploadOutput) + + AbortMultipartUpload(*s3.AbortMultipartUploadInput) (*s3.AbortMultipartUploadOutput, error) + + CompleteMultipartUploadRequest(*s3.CompleteMultipartUploadInput) (*request.Request, *s3.CompleteMultipartUploadOutput) + + CompleteMultipartUpload(*s3.CompleteMultipartUploadInput) (*s3.CompleteMultipartUploadOutput, error) + + CopyObjectRequest(*s3.CopyObjectInput) (*request.Request, *s3.CopyObjectOutput) + + CopyObject(*s3.CopyObjectInput) (*s3.CopyObjectOutput, error) + + CreateBucketRequest(*s3.CreateBucketInput) (*request.Request, *s3.CreateBucketOutput) + + CreateBucket(*s3.CreateBucketInput) (*s3.CreateBucketOutput, error) + + CreateMultipartUploadRequest(*s3.CreateMultipartUploadInput) (*request.Request, *s3.CreateMultipartUploadOutput) + + CreateMultipartUpload(*s3.CreateMultipartUploadInput) (*s3.CreateMultipartUploadOutput, error) + + DeleteBucketRequest(*s3.DeleteBucketInput) (*request.Request, *s3.DeleteBucketOutput) + + DeleteBucket(*s3.DeleteBucketInput) (*s3.DeleteBucketOutput, error) + + DeleteBucketCorsRequest(*s3.DeleteBucketCorsInput) (*request.Request, *s3.DeleteBucketCorsOutput) + + DeleteBucketCors(*s3.DeleteBucketCorsInput) (*s3.DeleteBucketCorsOutput, error) + + DeleteBucketLifecycleRequest(*s3.DeleteBucketLifecycleInput) (*request.Request, *s3.DeleteBucketLifecycleOutput) + + DeleteBucketLifecycle(*s3.DeleteBucketLifecycleInput) (*s3.DeleteBucketLifecycleOutput, error) + + DeleteBucketPolicyRequest(*s3.DeleteBucketPolicyInput) (*request.Request, *s3.DeleteBucketPolicyOutput) + + DeleteBucketPolicy(*s3.DeleteBucketPolicyInput) (*s3.DeleteBucketPolicyOutput, error) + + DeleteBucketReplicationRequest(*s3.DeleteBucketReplicationInput) (*request.Request, *s3.DeleteBucketReplicationOutput) + + DeleteBucketReplication(*s3.DeleteBucketReplicationInput) (*s3.DeleteBucketReplicationOutput, error) + + DeleteBucketTaggingRequest(*s3.DeleteBucketTaggingInput) (*request.Request, *s3.DeleteBucketTaggingOutput) + + DeleteBucketTagging(*s3.DeleteBucketTaggingInput) (*s3.DeleteBucketTaggingOutput, error) + + DeleteBucketWebsiteRequest(*s3.DeleteBucketWebsiteInput) (*request.Request, *s3.DeleteBucketWebsiteOutput) + + DeleteBucketWebsite(*s3.DeleteBucketWebsiteInput) (*s3.DeleteBucketWebsiteOutput, error) + + DeleteObjectRequest(*s3.DeleteObjectInput) (*request.Request, *s3.DeleteObjectOutput) + + DeleteObject(*s3.DeleteObjectInput) (*s3.DeleteObjectOutput, error) + + DeleteObjectsRequest(*s3.DeleteObjectsInput) (*request.Request, *s3.DeleteObjectsOutput) + + DeleteObjects(*s3.DeleteObjectsInput) (*s3.DeleteObjectsOutput, error) + + GetBucketAccelerateConfigurationRequest(*s3.GetBucketAccelerateConfigurationInput) (*request.Request, *s3.GetBucketAccelerateConfigurationOutput) + + GetBucketAccelerateConfiguration(*s3.GetBucketAccelerateConfigurationInput) (*s3.GetBucketAccelerateConfigurationOutput, error) + + GetBucketAclRequest(*s3.GetBucketAclInput) (*request.Request, *s3.GetBucketAclOutput) + + GetBucketAcl(*s3.GetBucketAclInput) (*s3.GetBucketAclOutput, error) + + GetBucketCorsRequest(*s3.GetBucketCorsInput) (*request.Request, *s3.GetBucketCorsOutput) + + GetBucketCors(*s3.GetBucketCorsInput) (*s3.GetBucketCorsOutput, error) + + GetBucketLifecycleRequest(*s3.GetBucketLifecycleInput) (*request.Request, *s3.GetBucketLifecycleOutput) + + GetBucketLifecycle(*s3.GetBucketLifecycleInput) (*s3.GetBucketLifecycleOutput, error) + + GetBucketLifecycleConfigurationRequest(*s3.GetBucketLifecycleConfigurationInput) (*request.Request, *s3.GetBucketLifecycleConfigurationOutput) + + GetBucketLifecycleConfiguration(*s3.GetBucketLifecycleConfigurationInput) (*s3.GetBucketLifecycleConfigurationOutput, error) + + GetBucketLocationRequest(*s3.GetBucketLocationInput) (*request.Request, *s3.GetBucketLocationOutput) + + GetBucketLocation(*s3.GetBucketLocationInput) (*s3.GetBucketLocationOutput, error) + + GetBucketLoggingRequest(*s3.GetBucketLoggingInput) (*request.Request, *s3.GetBucketLoggingOutput) + + GetBucketLogging(*s3.GetBucketLoggingInput) (*s3.GetBucketLoggingOutput, error) + + GetBucketNotificationRequest(*s3.GetBucketNotificationConfigurationRequest) (*request.Request, *s3.NotificationConfigurationDeprecated) + + GetBucketNotification(*s3.GetBucketNotificationConfigurationRequest) (*s3.NotificationConfigurationDeprecated, error) + + GetBucketNotificationConfigurationRequest(*s3.GetBucketNotificationConfigurationRequest) (*request.Request, *s3.NotificationConfiguration) + + GetBucketNotificationConfiguration(*s3.GetBucketNotificationConfigurationRequest) (*s3.NotificationConfiguration, error) + + GetBucketPolicyRequest(*s3.GetBucketPolicyInput) (*request.Request, *s3.GetBucketPolicyOutput) + + GetBucketPolicy(*s3.GetBucketPolicyInput) (*s3.GetBucketPolicyOutput, error) + + GetBucketReplicationRequest(*s3.GetBucketReplicationInput) (*request.Request, *s3.GetBucketReplicationOutput) + + GetBucketReplication(*s3.GetBucketReplicationInput) (*s3.GetBucketReplicationOutput, error) + + GetBucketRequestPaymentRequest(*s3.GetBucketRequestPaymentInput) (*request.Request, *s3.GetBucketRequestPaymentOutput) + + GetBucketRequestPayment(*s3.GetBucketRequestPaymentInput) (*s3.GetBucketRequestPaymentOutput, error) + + GetBucketTaggingRequest(*s3.GetBucketTaggingInput) (*request.Request, *s3.GetBucketTaggingOutput) + + GetBucketTagging(*s3.GetBucketTaggingInput) (*s3.GetBucketTaggingOutput, error) + + GetBucketVersioningRequest(*s3.GetBucketVersioningInput) (*request.Request, *s3.GetBucketVersioningOutput) + + GetBucketVersioning(*s3.GetBucketVersioningInput) (*s3.GetBucketVersioningOutput, error) + + GetBucketWebsiteRequest(*s3.GetBucketWebsiteInput) (*request.Request, *s3.GetBucketWebsiteOutput) + + GetBucketWebsite(*s3.GetBucketWebsiteInput) (*s3.GetBucketWebsiteOutput, error) + + GetObjectRequest(*s3.GetObjectInput) (*request.Request, *s3.GetObjectOutput) + + GetObject(*s3.GetObjectInput) (*s3.GetObjectOutput, error) + + GetObjectAclRequest(*s3.GetObjectAclInput) (*request.Request, *s3.GetObjectAclOutput) + + GetObjectAcl(*s3.GetObjectAclInput) (*s3.GetObjectAclOutput, error) + + GetObjectTorrentRequest(*s3.GetObjectTorrentInput) (*request.Request, *s3.GetObjectTorrentOutput) + + GetObjectTorrent(*s3.GetObjectTorrentInput) (*s3.GetObjectTorrentOutput, error) + + HeadBucketRequest(*s3.HeadBucketInput) (*request.Request, *s3.HeadBucketOutput) + + HeadBucket(*s3.HeadBucketInput) (*s3.HeadBucketOutput, error) + + HeadObjectRequest(*s3.HeadObjectInput) (*request.Request, *s3.HeadObjectOutput) + + HeadObject(*s3.HeadObjectInput) (*s3.HeadObjectOutput, error) + + ListBucketsRequest(*s3.ListBucketsInput) (*request.Request, *s3.ListBucketsOutput) + + ListBuckets(*s3.ListBucketsInput) (*s3.ListBucketsOutput, error) + + ListMultipartUploadsRequest(*s3.ListMultipartUploadsInput) (*request.Request, *s3.ListMultipartUploadsOutput) + + ListMultipartUploads(*s3.ListMultipartUploadsInput) (*s3.ListMultipartUploadsOutput, error) + + ListMultipartUploadsPages(*s3.ListMultipartUploadsInput, func(*s3.ListMultipartUploadsOutput, bool) bool) error + + ListObjectVersionsRequest(*s3.ListObjectVersionsInput) (*request.Request, *s3.ListObjectVersionsOutput) + + ListObjectVersions(*s3.ListObjectVersionsInput) (*s3.ListObjectVersionsOutput, error) + + ListObjectVersionsPages(*s3.ListObjectVersionsInput, func(*s3.ListObjectVersionsOutput, bool) bool) error + + ListObjectsRequest(*s3.ListObjectsInput) (*request.Request, *s3.ListObjectsOutput) + + ListObjects(*s3.ListObjectsInput) (*s3.ListObjectsOutput, error) + + ListObjectsPages(*s3.ListObjectsInput, func(*s3.ListObjectsOutput, bool) bool) error + + ListObjectsV2Request(*s3.ListObjectsV2Input) (*request.Request, *s3.ListObjectsV2Output) + + ListObjectsV2(*s3.ListObjectsV2Input) (*s3.ListObjectsV2Output, error) + + ListObjectsV2Pages(*s3.ListObjectsV2Input, func(*s3.ListObjectsV2Output, bool) bool) error + + ListPartsRequest(*s3.ListPartsInput) (*request.Request, *s3.ListPartsOutput) + + ListParts(*s3.ListPartsInput) (*s3.ListPartsOutput, error) + + ListPartsPages(*s3.ListPartsInput, func(*s3.ListPartsOutput, bool) bool) error + + PutBucketAccelerateConfigurationRequest(*s3.PutBucketAccelerateConfigurationInput) (*request.Request, *s3.PutBucketAccelerateConfigurationOutput) + + PutBucketAccelerateConfiguration(*s3.PutBucketAccelerateConfigurationInput) (*s3.PutBucketAccelerateConfigurationOutput, error) + + PutBucketAclRequest(*s3.PutBucketAclInput) (*request.Request, *s3.PutBucketAclOutput) + + PutBucketAcl(*s3.PutBucketAclInput) (*s3.PutBucketAclOutput, error) + + PutBucketCorsRequest(*s3.PutBucketCorsInput) (*request.Request, *s3.PutBucketCorsOutput) + + PutBucketCors(*s3.PutBucketCorsInput) (*s3.PutBucketCorsOutput, error) + + PutBucketLifecycleRequest(*s3.PutBucketLifecycleInput) (*request.Request, *s3.PutBucketLifecycleOutput) + + PutBucketLifecycle(*s3.PutBucketLifecycleInput) (*s3.PutBucketLifecycleOutput, error) + + PutBucketLifecycleConfigurationRequest(*s3.PutBucketLifecycleConfigurationInput) (*request.Request, *s3.PutBucketLifecycleConfigurationOutput) + + PutBucketLifecycleConfiguration(*s3.PutBucketLifecycleConfigurationInput) (*s3.PutBucketLifecycleConfigurationOutput, error) + + PutBucketLoggingRequest(*s3.PutBucketLoggingInput) (*request.Request, *s3.PutBucketLoggingOutput) + + PutBucketLogging(*s3.PutBucketLoggingInput) (*s3.PutBucketLoggingOutput, error) + + PutBucketNotificationRequest(*s3.PutBucketNotificationInput) (*request.Request, *s3.PutBucketNotificationOutput) + + PutBucketNotification(*s3.PutBucketNotificationInput) (*s3.PutBucketNotificationOutput, error) + + PutBucketNotificationConfigurationRequest(*s3.PutBucketNotificationConfigurationInput) (*request.Request, *s3.PutBucketNotificationConfigurationOutput) + + PutBucketNotificationConfiguration(*s3.PutBucketNotificationConfigurationInput) (*s3.PutBucketNotificationConfigurationOutput, error) + + PutBucketPolicyRequest(*s3.PutBucketPolicyInput) (*request.Request, *s3.PutBucketPolicyOutput) + + PutBucketPolicy(*s3.PutBucketPolicyInput) (*s3.PutBucketPolicyOutput, error) + + PutBucketReplicationRequest(*s3.PutBucketReplicationInput) (*request.Request, *s3.PutBucketReplicationOutput) + + PutBucketReplication(*s3.PutBucketReplicationInput) (*s3.PutBucketReplicationOutput, error) + + PutBucketRequestPaymentRequest(*s3.PutBucketRequestPaymentInput) (*request.Request, *s3.PutBucketRequestPaymentOutput) + + PutBucketRequestPayment(*s3.PutBucketRequestPaymentInput) (*s3.PutBucketRequestPaymentOutput, error) + + PutBucketTaggingRequest(*s3.PutBucketTaggingInput) (*request.Request, *s3.PutBucketTaggingOutput) + + PutBucketTagging(*s3.PutBucketTaggingInput) (*s3.PutBucketTaggingOutput, error) + + PutBucketVersioningRequest(*s3.PutBucketVersioningInput) (*request.Request, *s3.PutBucketVersioningOutput) + + PutBucketVersioning(*s3.PutBucketVersioningInput) (*s3.PutBucketVersioningOutput, error) + + PutBucketWebsiteRequest(*s3.PutBucketWebsiteInput) (*request.Request, *s3.PutBucketWebsiteOutput) + + PutBucketWebsite(*s3.PutBucketWebsiteInput) (*s3.PutBucketWebsiteOutput, error) + + PutObjectRequest(*s3.PutObjectInput) (*request.Request, *s3.PutObjectOutput) + + PutObject(*s3.PutObjectInput) (*s3.PutObjectOutput, error) + + PutObjectAclRequest(*s3.PutObjectAclInput) (*request.Request, *s3.PutObjectAclOutput) + + PutObjectAcl(*s3.PutObjectAclInput) (*s3.PutObjectAclOutput, error) + + RestoreObjectRequest(*s3.RestoreObjectInput) (*request.Request, *s3.RestoreObjectOutput) + + RestoreObject(*s3.RestoreObjectInput) (*s3.RestoreObjectOutput, error) + + UploadPartRequest(*s3.UploadPartInput) (*request.Request, *s3.UploadPartOutput) + + UploadPart(*s3.UploadPartInput) (*s3.UploadPartOutput, error) + + UploadPartCopyRequest(*s3.UploadPartCopyInput) (*request.Request, *s3.UploadPartCopyOutput) + + UploadPartCopy(*s3.UploadPartCopyInput) (*s3.UploadPartCopyOutput, error) +} + +var _ S3API = (*s3.S3)(nil) diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/doc.go b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/doc.go new file mode 100644 index 000000000..229c0d63b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/doc.go @@ -0,0 +1,3 @@ +// Package s3manager provides utilities to upload and download objects from +// S3 concurrently. Helpful for when working with large objects. +package s3manager diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/download.go b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/download.go new file mode 100644 index 000000000..6c123fc63 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/download.go @@ -0,0 +1,354 @@ +package s3manager + +import ( + "fmt" + "io" + "net/http" + "strconv" + "strings" + "sync" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/s3" + "github.com/aws/aws-sdk-go/service/s3/s3iface" +) + +// DefaultDownloadPartSize is the default range of bytes to get at a time when +// using Download(). +const DefaultDownloadPartSize = 1024 * 1024 * 5 + +// DefaultDownloadConcurrency is the default number of goroutines to spin up +// when using Download(). +const DefaultDownloadConcurrency = 5 + +// The Downloader structure that calls Download(). It is safe to call Download() +// on this structure for multiple objects and across concurrent goroutines. +// Mutating the Downloader's properties is not safe to be done concurrently. +type Downloader struct { + // The buffer size (in bytes) to use when buffering data into chunks and + // sending them as parts to S3. The minimum allowed part size is 5MB, and + // if this value is set to zero, the DefaultPartSize value will be used. + PartSize int64 + + // The number of goroutines to spin up in parallel when sending parts. + // If this is set to zero, the DefaultDownloadConcurrency value will be used. + Concurrency int + + // An S3 client to use when performing downloads. + S3 s3iface.S3API +} + +// NewDownloader creates a new Downloader instance to downloads objects from +// S3 in concurrent chunks. Pass in additional functional options to customize +// the downloader behavior. Requires a client.ConfigProvider in order to create +// a S3 service client. The session.Session satisfies the client.ConfigProvider +// interface. +// +// Example: +// // The session the S3 Downloader will use +// sess := session.New() +// +// // Create a downloader with the session and default options +// downloader := s3manager.NewDownloader(sess) +// +// // Create a downloader with the session and custom options +// downloader := s3manager.NewDownloader(sess, func(d *s3manager.Uploader) { +// d.PartSize = 64 * 1024 * 1024 // 64MB per part +// }) +func NewDownloader(c client.ConfigProvider, options ...func(*Downloader)) *Downloader { + d := &Downloader{ + S3: s3.New(c), + PartSize: DefaultDownloadPartSize, + Concurrency: DefaultDownloadConcurrency, + } + for _, option := range options { + option(d) + } + + return d +} + +// NewDownloaderWithClient creates a new Downloader instance to downloads +// objects from S3 in concurrent chunks. Pass in additional functional +// options to customize the downloader behavior. Requires a S3 service client +// to make S3 API calls. +// +// Example: +// // The S3 client the S3 Downloader will use +// s3Svc := s3.new(session.New()) +// +// // Create a downloader with the s3 client and default options +// downloader := s3manager.NewDownloaderWithClient(s3Svc) +// +// // Create a downloader with the s3 client and custom options +// downloader := s3manager.NewDownloaderWithClient(s3Svc, func(d *s3manager.Uploader) { +// d.PartSize = 64 * 1024 * 1024 // 64MB per part +// }) +func NewDownloaderWithClient(svc s3iface.S3API, options ...func(*Downloader)) *Downloader { + d := &Downloader{ + S3: svc, + PartSize: DefaultDownloadPartSize, + Concurrency: DefaultDownloadConcurrency, + } + for _, option := range options { + option(d) + } + + return d +} + +// Download downloads an object in S3 and writes the payload into w using +// concurrent GET requests. +// +// Additional functional options can be provided to configure the individual +// upload. These options are copies of the Uploader instance Upload is called from. +// Modifying the options will not impact the original Uploader instance. +// +// It is safe to call this method concurrently across goroutines. +// +// The w io.WriterAt can be satisfied by an os.File to do multipart concurrent +// downloads, or in memory []byte wrapper using aws.WriteAtBuffer. +func (d Downloader) Download(w io.WriterAt, input *s3.GetObjectInput, options ...func(*Downloader)) (n int64, err error) { + impl := downloader{w: w, in: input, ctx: d} + + for _, option := range options { + option(&impl.ctx) + } + + return impl.download() +} + +// downloader is the implementation structure used internally by Downloader. +type downloader struct { + ctx Downloader + + in *s3.GetObjectInput + w io.WriterAt + + wg sync.WaitGroup + m sync.Mutex + + pos int64 + totalBytes int64 + written int64 + err error +} + +// init initializes the downloader with default options. +func (d *downloader) init() { + d.totalBytes = -1 + + if d.ctx.Concurrency == 0 { + d.ctx.Concurrency = DefaultDownloadConcurrency + } + + if d.ctx.PartSize == 0 { + d.ctx.PartSize = DefaultDownloadPartSize + } +} + +// download performs the implementation of the object download across ranged +// GETs. +func (d *downloader) download() (n int64, err error) { + d.init() + + // Spin off first worker to check additional header information + d.getChunk() + + if total := d.getTotalBytes(); total >= 0 { + // Spin up workers + ch := make(chan dlchunk, d.ctx.Concurrency) + + for i := 0; i < d.ctx.Concurrency; i++ { + d.wg.Add(1) + go d.downloadPart(ch) + } + + // Assign work + for d.getErr() == nil { + if d.pos >= total { + break // We're finished queueing chunks + } + + // Queue the next range of bytes to read. + ch <- dlchunk{w: d.w, start: d.pos, size: d.ctx.PartSize} + d.pos += d.ctx.PartSize + } + + // Wait for completion + close(ch) + d.wg.Wait() + } else { + // Checking if we read anything new + for d.err == nil { + d.getChunk() + } + + // We expect a 416 error letting us know we are done downloading the + // total bytes. Since we do not know the content's length, this will + // keep grabbing chunks of data until the range of bytes specified in + // the request is out of range of the content. Once, this happens, a + // 416 should occur. + e, ok := d.err.(awserr.RequestFailure) + if ok && e.StatusCode() == http.StatusRequestedRangeNotSatisfiable { + d.err = nil + } + } + + // Return error + return d.written, d.err +} + +// downloadPart is an individual goroutine worker reading from the ch channel +// and performing a GetObject request on the data with a given byte range. +// +// If this is the first worker, this operation also resolves the total number +// of bytes to be read so that the worker manager knows when it is finished. +func (d *downloader) downloadPart(ch chan dlchunk) { + defer d.wg.Done() + for { + chunk, ok := <-ch + if !ok { + break + } + d.downloadChunk(chunk) + } +} + +// getChunk grabs a chunk of data from the body. +// Not thread safe. Should only used when grabbing data on a single thread. +func (d *downloader) getChunk() { + chunk := dlchunk{w: d.w, start: d.pos, size: d.ctx.PartSize} + d.pos += d.ctx.PartSize + d.downloadChunk(chunk) +} + +// downloadChunk downloads the chunk froom s3 +func (d *downloader) downloadChunk(chunk dlchunk) { + if d.getErr() != nil { + return + } + // Get the next byte range of data + in := &s3.GetObjectInput{} + awsutil.Copy(in, d.in) + rng := fmt.Sprintf("bytes=%d-%d", + chunk.start, chunk.start+chunk.size-1) + in.Range = &rng + + req, resp := d.ctx.S3.GetObjectRequest(in) + req.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("S3Manager")) + err := req.Send() + + if err != nil { + d.setErr(err) + } else { + d.setTotalBytes(resp) // Set total if not yet set. + + n, err := io.Copy(&chunk, resp.Body) + resp.Body.Close() + + if err != nil { + d.setErr(err) + } + d.incrWritten(n) + } +} + +// getTotalBytes is a thread-safe getter for retrieving the total byte status. +func (d *downloader) getTotalBytes() int64 { + d.m.Lock() + defer d.m.Unlock() + + return d.totalBytes +} + +// setTotalBytes is a thread-safe setter for setting the total byte status. +// Will extract the object's total bytes from the Content-Range if the file +// will be chunked, or Content-Length. Content-Length is used when the response +// does not include a Content-Range. Meaning the object was not chunked. This +// occurs when the full file fits within the PartSize directive. +func (d *downloader) setTotalBytes(resp *s3.GetObjectOutput) { + d.m.Lock() + defer d.m.Unlock() + + if d.totalBytes >= 0 { + return + } + + if resp.ContentRange == nil { + // ContentRange is nil when the full file contents is provied, and + // is not chunked. Use ContentLength instead. + if resp.ContentLength != nil { + d.totalBytes = *resp.ContentLength + return + } + } else { + parts := strings.Split(*resp.ContentRange, "/") + + total := int64(-1) + var err error + // Checking for whether or not a numbered total exists + // If one does not exist, we will assume the total to be -1, undefined, + // and sequentially download each chunk until hitting a 416 error + totalStr := parts[len(parts)-1] + if totalStr != "*" { + total, err = strconv.ParseInt(totalStr, 10, 64) + if err != nil { + d.err = err + return + } + } + + d.totalBytes = total + } +} + +func (d *downloader) incrWritten(n int64) { + d.m.Lock() + defer d.m.Unlock() + + d.written += n +} + +// getErr is a thread-safe getter for the error object +func (d *downloader) getErr() error { + d.m.Lock() + defer d.m.Unlock() + + return d.err +} + +// setErr is a thread-safe setter for the error object +func (d *downloader) setErr(e error) { + d.m.Lock() + defer d.m.Unlock() + + d.err = e +} + +// dlchunk represents a single chunk of data to write by the worker routine. +// This structure also implements an io.SectionReader style interface for +// io.WriterAt, effectively making it an io.SectionWriter (which does not +// exist). +type dlchunk struct { + w io.WriterAt + start int64 + size int64 + cur int64 +} + +// Write wraps io.WriterAt for the dlchunk, writing from the dlchunk's start +// position to its end (or EOF). +func (c *dlchunk) Write(p []byte) (n int, err error) { + if c.cur >= c.size { + return 0, io.EOF + } + + n, err = c.w.WriteAt(p, c.start+c.cur) + c.cur += int64(n) + + return +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/download_test.go b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/download_test.go new file mode 100644 index 000000000..af57e17fc --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/download_test.go @@ -0,0 +1,309 @@ +package s3manager_test + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "regexp" + "strconv" + "sync" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/awstesting/unit" + "github.com/aws/aws-sdk-go/service/s3" + "github.com/aws/aws-sdk-go/service/s3/s3manager" +) + +func dlLoggingSvc(data []byte) (*s3.S3, *[]string, *[]string) { + var m sync.Mutex + names := []string{} + ranges := []string{} + + svc := s3.New(unit.Session) + svc.Handlers.Send.Clear() + svc.Handlers.Send.PushBack(func(r *request.Request) { + m.Lock() + defer m.Unlock() + + names = append(names, r.Operation.Name) + ranges = append(ranges, *r.Params.(*s3.GetObjectInput).Range) + + rerng := regexp.MustCompile(`bytes=(\d+)-(\d+)`) + rng := rerng.FindStringSubmatch(r.HTTPRequest.Header.Get("Range")) + start, _ := strconv.ParseInt(rng[1], 10, 64) + fin, _ := strconv.ParseInt(rng[2], 10, 64) + fin++ + + if fin > int64(len(data)) { + fin = int64(len(data)) + } + + bodyBytes := data[start:fin] + r.HTTPResponse = &http.Response{ + StatusCode: 200, + Body: ioutil.NopCloser(bytes.NewReader(bodyBytes)), + Header: http.Header{}, + } + r.HTTPResponse.Header.Set("Content-Range", fmt.Sprintf("bytes %d-%d/%d", + start, fin-1, len(data))) + r.HTTPResponse.Header.Set("Content-Length", fmt.Sprintf("%d", len(bodyBytes))) + }) + + return svc, &names, &ranges +} + +func dlLoggingSvcNoChunk(data []byte) (*s3.S3, *[]string) { + var m sync.Mutex + names := []string{} + + svc := s3.New(unit.Session) + svc.Handlers.Send.Clear() + svc.Handlers.Send.PushBack(func(r *request.Request) { + m.Lock() + defer m.Unlock() + + names = append(names, r.Operation.Name) + + r.HTTPResponse = &http.Response{ + StatusCode: 200, + Body: ioutil.NopCloser(bytes.NewReader(data[:])), + Header: http.Header{}, + } + r.HTTPResponse.Header.Set("Content-Length", fmt.Sprintf("%d", len(data))) + }) + + return svc, &names +} + +func dlLoggingSvcNoContentRangeLength(data []byte, states []int) (*s3.S3, *[]string) { + var m sync.Mutex + names := []string{} + var index int = 0 + + svc := s3.New(unit.Session) + svc.Handlers.Send.Clear() + svc.Handlers.Send.PushBack(func(r *request.Request) { + m.Lock() + defer m.Unlock() + + names = append(names, r.Operation.Name) + + r.HTTPResponse = &http.Response{ + StatusCode: states[index], + Body: ioutil.NopCloser(bytes.NewReader(data[:])), + Header: http.Header{}, + } + index++ + }) + + return svc, &names +} + +func dlLoggingSvcContentRangeTotalAny(data []byte, states []int) (*s3.S3, *[]string) { + var m sync.Mutex + names := []string{} + ranges := []string{} + var index int = 0 + + svc := s3.New(unit.Session) + svc.Handlers.Send.Clear() + svc.Handlers.Send.PushBack(func(r *request.Request) { + m.Lock() + defer m.Unlock() + + names = append(names, r.Operation.Name) + ranges = append(ranges, *r.Params.(*s3.GetObjectInput).Range) + + rerng := regexp.MustCompile(`bytes=(\d+)-(\d+)`) + rng := rerng.FindStringSubmatch(r.HTTPRequest.Header.Get("Range")) + start, _ := strconv.ParseInt(rng[1], 10, 64) + fin, _ := strconv.ParseInt(rng[2], 10, 64) + fin++ + + if fin >= int64(len(data)) { + fin = int64(len(data)) + } + + // Setting start and finish to 0 because this state of 1 is suppose to + // be an error state of 416 + if index == len(states)-1 { + start = 0 + fin = 0 + } + + bodyBytes := data[start:fin] + + r.HTTPResponse = &http.Response{ + StatusCode: states[index], + Body: ioutil.NopCloser(bytes.NewReader(bodyBytes)), + Header: http.Header{}, + } + r.HTTPResponse.Header.Set("Content-Range", fmt.Sprintf("bytes %d-%d/*", + start, fin-1)) + index++ + }) + + return svc, &names +} + +func TestDownloadOrder(t *testing.T) { + s, names, ranges := dlLoggingSvc(buf12MB) + + d := s3manager.NewDownloaderWithClient(s, func(d *s3manager.Downloader) { + d.Concurrency = 1 + }) + w := &aws.WriteAtBuffer{} + n, err := d.Download(w, &s3.GetObjectInput{ + Bucket: aws.String("bucket"), + Key: aws.String("key"), + }) + + assert.Nil(t, err) + assert.Equal(t, int64(len(buf12MB)), n) + assert.Equal(t, []string{"GetObject", "GetObject", "GetObject"}, *names) + assert.Equal(t, []string{"bytes=0-5242879", "bytes=5242880-10485759", "bytes=10485760-15728639"}, *ranges) + + count := 0 + for _, b := range w.Bytes() { + count += int(b) + } + assert.Equal(t, 0, count) +} + +func TestDownloadZero(t *testing.T) { + s, names, ranges := dlLoggingSvc([]byte{}) + + d := s3manager.NewDownloaderWithClient(s) + w := &aws.WriteAtBuffer{} + n, err := d.Download(w, &s3.GetObjectInput{ + Bucket: aws.String("bucket"), + Key: aws.String("key"), + }) + + assert.Nil(t, err) + assert.Equal(t, int64(0), n) + assert.Equal(t, []string{"GetObject"}, *names) + assert.Equal(t, []string{"bytes=0-5242879"}, *ranges) +} + +func TestDownloadSetPartSize(t *testing.T) { + s, names, ranges := dlLoggingSvc([]byte{1, 2, 3}) + + d := s3manager.NewDownloaderWithClient(s, func(d *s3manager.Downloader) { + d.Concurrency = 1 + d.PartSize = 1 + }) + w := &aws.WriteAtBuffer{} + n, err := d.Download(w, &s3.GetObjectInput{ + Bucket: aws.String("bucket"), + Key: aws.String("key"), + }) + + assert.Nil(t, err) + assert.Equal(t, int64(3), n) + assert.Equal(t, []string{"GetObject", "GetObject", "GetObject"}, *names) + assert.Equal(t, []string{"bytes=0-0", "bytes=1-1", "bytes=2-2"}, *ranges) + assert.Equal(t, []byte{1, 2, 3}, w.Bytes()) +} + +func TestDownloadError(t *testing.T) { + s, names, _ := dlLoggingSvc([]byte{1, 2, 3}) + + num := 0 + s.Handlers.Send.PushBack(func(r *request.Request) { + num++ + if num > 1 { + r.HTTPResponse.StatusCode = 400 + r.HTTPResponse.Body = ioutil.NopCloser(bytes.NewReader([]byte{})) + } + }) + + d := s3manager.NewDownloaderWithClient(s, func(d *s3manager.Downloader) { + d.Concurrency = 1 + d.PartSize = 1 + }) + w := &aws.WriteAtBuffer{} + n, err := d.Download(w, &s3.GetObjectInput{ + Bucket: aws.String("bucket"), + Key: aws.String("key"), + }) + + assert.NotNil(t, err) + assert.Equal(t, int64(1), n) + assert.Equal(t, []string{"GetObject", "GetObject"}, *names) + assert.Equal(t, []byte{1}, w.Bytes()) +} + +func TestDownloadNonChunk(t *testing.T) { + s, names := dlLoggingSvcNoChunk(buf2MB) + + d := s3manager.NewDownloaderWithClient(s, func(d *s3manager.Downloader) { + d.Concurrency = 1 + }) + w := &aws.WriteAtBuffer{} + n, err := d.Download(w, &s3.GetObjectInput{ + Bucket: aws.String("bucket"), + Key: aws.String("key"), + }) + + assert.Nil(t, err) + assert.Equal(t, int64(len(buf2MB)), n) + assert.Equal(t, []string{"GetObject"}, *names) + + count := 0 + for _, b := range w.Bytes() { + count += int(b) + } + assert.Equal(t, 0, count) +} + +func TestDownloadNoContentRangeLength(t *testing.T) { + s, names := dlLoggingSvcNoContentRangeLength(buf2MB, []int{200, 416}) + + d := s3manager.NewDownloaderWithClient(s, func(d *s3manager.Downloader) { + d.Concurrency = 1 + }) + w := &aws.WriteAtBuffer{} + n, err := d.Download(w, &s3.GetObjectInput{ + Bucket: aws.String("bucket"), + Key: aws.String("key"), + }) + + assert.Nil(t, err) + assert.Equal(t, int64(len(buf2MB)), n) + assert.Equal(t, []string{"GetObject", "GetObject"}, *names) + + count := 0 + for _, b := range w.Bytes() { + count += int(b) + } + assert.Equal(t, 0, count) +} + +func TestDownloadContentRangeTotalAny(t *testing.T) { + s, names := dlLoggingSvcContentRangeTotalAny(buf2MB, []int{200, 416}) + + d := s3manager.NewDownloaderWithClient(s, func(d *s3manager.Downloader) { + d.Concurrency = 1 + }) + w := &aws.WriteAtBuffer{} + n, err := d.Download(w, &s3.GetObjectInput{ + Bucket: aws.String("bucket"), + Key: aws.String("key"), + }) + + assert.Nil(t, err) + assert.Equal(t, int64(len(buf2MB)), n) + assert.Equal(t, []string{"GetObject", "GetObject"}, *names) + + count := 0 + for _, b := range w.Bytes() { + count += int(b) + } + assert.Equal(t, 0, count) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/s3manageriface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/s3manageriface/interface.go new file mode 100644 index 000000000..b7d0a1256 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/s3manageriface/interface.go @@ -0,0 +1,23 @@ +// Package s3manageriface provides an interface for the s3manager package +package s3manageriface + +import ( + "io" + + "github.com/aws/aws-sdk-go/service/s3" + "github.com/aws/aws-sdk-go/service/s3/s3manager" +) + +// DownloaderAPI is the interface type for s3manager.Downloader. +type DownloaderAPI interface { + Download(io.WriterAt, *s3.GetObjectInput, ...func(*s3manager.Downloader)) (int64, error) +} + +var _ DownloaderAPI = (*s3manager.Downloader)(nil) + +// UploaderAPI is the interface type for s3manager.Uploader. +type UploaderAPI interface { + Upload(*s3manager.UploadInput, ...func(*s3manager.Uploader)) (*s3manager.UploadOutput, error) +} + +var _ UploaderAPI = (*s3manager.Uploader)(nil) diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/shared_test.go b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/shared_test.go new file mode 100644 index 000000000..b5b613143 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/shared_test.go @@ -0,0 +1,4 @@ +package s3manager_test + +var buf12MB = make([]byte, 1024*1024*12) +var buf2MB = make([]byte, 1024*1024*2) diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/upload.go b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/upload.go new file mode 100644 index 000000000..b2d1afd48 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/upload.go @@ -0,0 +1,664 @@ +package s3manager + +import ( + "bytes" + "fmt" + "io" + "sort" + "sync" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/s3" + "github.com/aws/aws-sdk-go/service/s3/s3iface" +) + +// MaxUploadParts is the maximum allowed number of parts in a multi-part upload +// on Amazon S3. +const MaxUploadParts = 10000 + +// MinUploadPartSize is the minimum allowed part size when uploading a part to +// Amazon S3. +const MinUploadPartSize int64 = 1024 * 1024 * 5 + +// DefaultUploadPartSize is the default part size to buffer chunks of a +// payload into. +const DefaultUploadPartSize = MinUploadPartSize + +// DefaultUploadConcurrency is the default number of goroutines to spin up when +// using Upload(). +const DefaultUploadConcurrency = 5 + +// A MultiUploadFailure wraps a failed S3 multipart upload. An error returned +// will satisfy this interface when a multi part upload failed to upload all +// chucks to S3. In the case of a failure the UploadID is needed to operate on +// the chunks, if any, which were uploaded. +// +// Example: +// +// u := s3manager.NewUploader(opts) +// output, err := u.upload(input) +// if err != nil { +// if multierr, ok := err.(s3manager.MultiUploadFailure); ok { +// // Process error and its associated uploadID +// fmt.Println("Error:", multierr.Code(), multierr.Message(), multierr.UploadID()) +// } else { +// // Process error generically +// fmt.Println("Error:", err.Error()) +// } +// } +// +type MultiUploadFailure interface { + awserr.Error + + // Returns the upload id for the S3 multipart upload that failed. + UploadID() string +} + +// So that the Error interface type can be included as an anonymous field +// in the multiUploadError struct and not conflict with the error.Error() method. +type awsError awserr.Error + +// A multiUploadError wraps the upload ID of a failed s3 multipart upload. +// Composed of BaseError for code, message, and original error +// +// Should be used for an error that occurred failing a S3 multipart upload, +// and a upload ID is available. If an uploadID is not available a more relevant +type multiUploadError struct { + awsError + + // ID for multipart upload which failed. + uploadID string +} + +// Error returns the string representation of the error. +// +// See apierr.BaseError ErrorWithExtra for output format +// +// Satisfies the error interface. +func (m multiUploadError) Error() string { + extra := fmt.Sprintf("upload id: %s", m.uploadID) + return awserr.SprintError(m.Code(), m.Message(), extra, m.OrigErr()) +} + +// String returns the string representation of the error. +// Alias for Error to satisfy the stringer interface. +func (m multiUploadError) String() string { + return m.Error() +} + +// UploadID returns the id of the S3 upload which failed. +func (m multiUploadError) UploadID() string { + return m.uploadID +} + +// UploadInput contains all input for upload requests to Amazon S3. +type UploadInput struct { + // The canned ACL to apply to the object. + ACL *string `location:"header" locationName:"x-amz-acl" type:"string"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Specifies caching behavior along the request/reply chain. + CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` + + // Specifies presentational information for the object. + ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"` + + // Specifies what content encodings have been applied to the object and thus + // what decoding mechanisms must be applied to obtain the media-type referenced + // by the Content-Type header field. + ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"` + + // The language the content is in. + ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"` + + // A standard MIME type describing the format of the object data. + ContentType *string `location:"header" locationName:"Content-Type" type:"string"` + + // The date and time at which the object is no longer cacheable. + Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp" timestampFormat:"rfc822"` + + // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object. + GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` + + // Allows grantee to read the object data and its metadata. + GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` + + // Allows grantee to read the object ACL. + GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` + + // Allows grantee to write the ACL for the applicable object. + GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` + + Key *string `location:"uri" locationName:"Key" type:"string" required:"true"` + + // A map of metadata to store with the object in S3. + Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string"` + + // Specifies the algorithm to use to when encrypting the object (e.g., AES256, + // aws:kms). + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting + // data. This value is used to store the object and then it is discarded; Amazon + // does not store the encryption key. The key must be appropriate for use with + // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm + // header. + SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"` + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure the encryption + // key was transmitted without error. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // Specifies the AWS KMS key ID to use for object encryption. All GET and PUT + // requests for an object protected by AWS KMS will fail if not made via SSL + // or using SigV4. Documentation on configuring any of the officially supported + // AWS SDKs and CLI can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` + + // The Server-side encryption algorithm used when storing this object in S3 + // (e.g., AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string"` + + // The type of storage to use for the object. Defaults to 'STANDARD'. + StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string"` + + // If the bucket is configured as a website, redirects requests for this object + // to another object in the same bucket or to an external URL. Amazon S3 stores + // the value of this header in the object metadata. + WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"` + + // The readable body payload to send to S3. + Body io.Reader +} + +// UploadOutput represents a response from the Upload() call. +type UploadOutput struct { + // The URL where the object was uploaded to. + Location string + + // The version of the object that was uploaded. Will only be populated if + // the S3 Bucket is versioned. If the bucket is not versioned this field + // will not be set. + VersionID *string + + // The ID for a multipart upload to S3. In the case of an error the error + // can be cast to the MultiUploadFailure interface to extract the upload ID. + UploadID string +} + +// The Uploader structure that calls Upload(). It is safe to call Upload() +// on this structure for multiple objects and across concurrent goroutines. +// Mutating the Uploader's properties is not safe to be done concurrently. +type Uploader struct { + // The buffer size (in bytes) to use when buffering data into chunks and + // sending them as parts to S3. The minimum allowed part size is 5MB, and + // if this value is set to zero, the DefaultPartSize value will be used. + PartSize int64 + + // The number of goroutines to spin up in parallel when sending parts. + // If this is set to zero, the DefaultUploadConcurrency value will be used. + Concurrency int + + // Setting this value to true will cause the SDK to avoid calling + // AbortMultipartUpload on a failure, leaving all successfully uploaded + // parts on S3 for manual recovery. + // + // Note that storing parts of an incomplete multipart upload counts towards + // space usage on S3 and will add additional costs if not cleaned up. + LeavePartsOnError bool + + // MaxUploadParts is the max number of parts which will be uploaded to S3. + // Will be used to calculate the partsize of the object to be uploaded. + // E.g: 5GB file, with MaxUploadParts set to 100, will upload the file + // as 100, 50MB parts. + // With a limited of s3.MaxUploadParts (10,000 parts). + MaxUploadParts int + + // The client to use when uploading to S3. + S3 s3iface.S3API +} + +// NewUploader creates a new Uploader instance to upload objects to S3. Pass In +// additional functional options to customize the uploader's behavior. Requires a +// client.ConfigProvider in order to create a S3 service client. The session.Session +// satisfies the client.ConfigProvider interface. +// +// Example: +// // The session the S3 Uploader will use +// sess := session.New() +// +// // Create an uploader with the session and default options +// uploader := s3manager.NewUploader(sess) +// +// // Create an uploader with the session and custom options +// uploader := s3manager.NewUploader(session, func(u *s3manager.Uploader) { +// u.PartSize = 64 * 1024 * 1024 // 64MB per part +// }) +func NewUploader(c client.ConfigProvider, options ...func(*Uploader)) *Uploader { + u := &Uploader{ + S3: s3.New(c), + PartSize: DefaultUploadPartSize, + Concurrency: DefaultUploadConcurrency, + LeavePartsOnError: false, + MaxUploadParts: MaxUploadParts, + } + + for _, option := range options { + option(u) + } + + return u +} + +// NewUploaderWithClient creates a new Uploader instance to upload objects to S3. Pass in +// additional functional options to customize the uploader's behavior. Requires +// a S3 service client to make S3 API calls. +// +// Example: +// // S3 service client the Upload manager will use. +// s3Svc := s3.New(session.New()) +// +// // Create an uploader with S3 client and default options +// uploader := s3manager.NewUploaderWithClient(s3Svc) +// +// // Create an uploader with S3 client and custom options +// uploader := s3manager.NewUploaderWithClient(s3Svc, func(u *s3manager.Uploader) { +// u.PartSize = 64 * 1024 * 1024 // 64MB per part +// }) +func NewUploaderWithClient(svc s3iface.S3API, options ...func(*Uploader)) *Uploader { + u := &Uploader{ + S3: svc, + PartSize: DefaultUploadPartSize, + Concurrency: DefaultUploadConcurrency, + LeavePartsOnError: false, + MaxUploadParts: MaxUploadParts, + } + + for _, option := range options { + option(u) + } + + return u +} + +// Upload uploads an object to S3, intelligently buffering large files into +// smaller chunks and sending them in parallel across multiple goroutines. You +// can configure the buffer size and concurrency through the Uploader's parameters. +// +// Additional functional options can be provided to configure the individual +// upload. These options are copies of the Uploader instance Upload is called from. +// Modifying the options will not impact the original Uploader instance. +// +// It is safe to call this method concurrently across goroutines. +// +// Example: +// // Upload input parameters +// upParams := &s3manager.UploadInput{ +// Bucket: &bucketName, +// Key: &keyName, +// Body: file, +// } +// +// // Perform an upload. +// result, err := uploader.Upload(upParams) +// +// // Perform upload with options different than the those in the Uploader. +// result, err := uploader.Upload(upParams, func(u *s3manager.Uploader) { +// u.PartSize = 10 * 1024 * 1024 // 10MB part size +// u.LeavePartsOnError = true // Dont delete the parts if the upload fails. +// }) +func (u Uploader) Upload(input *UploadInput, options ...func(*Uploader)) (*UploadOutput, error) { + i := uploader{in: input, ctx: u} + + for _, option := range options { + option(&i.ctx) + } + + return i.upload() +} + +// internal structure to manage an upload to S3. +type uploader struct { + ctx Uploader + + in *UploadInput + + readerPos int64 // current reader position + totalSize int64 // set to -1 if the size is not known +} + +// internal logic for deciding whether to upload a single part or use a +// multipart upload. +func (u *uploader) upload() (*UploadOutput, error) { + u.init() + + if u.ctx.PartSize < MinUploadPartSize { + msg := fmt.Sprintf("part size must be at least %d bytes", MinUploadPartSize) + return nil, awserr.New("ConfigError", msg, nil) + } + + // Do one read to determine if we have more than one part + buf, err := u.nextReader() + if err == io.EOF || err == io.ErrUnexpectedEOF { // single part + return u.singlePart(buf) + } else if err != nil { + return nil, awserr.New("ReadRequestBody", "read upload data failed", err) + } + + mu := multiuploader{uploader: u} + return mu.upload(buf) +} + +// init will initialize all default options. +func (u *uploader) init() { + if u.ctx.Concurrency == 0 { + u.ctx.Concurrency = DefaultUploadConcurrency + } + if u.ctx.PartSize == 0 { + u.ctx.PartSize = DefaultUploadPartSize + } + + // Try to get the total size for some optimizations + u.initSize() +} + +// initSize tries to detect the total stream size, setting u.totalSize. If +// the size is not known, totalSize is set to -1. +func (u *uploader) initSize() { + u.totalSize = -1 + + switch r := u.in.Body.(type) { + case io.Seeker: + pos, _ := r.Seek(0, 1) + defer r.Seek(pos, 0) + + n, err := r.Seek(0, 2) + if err != nil { + return + } + u.totalSize = n + + // Try to adjust partSize if it is too small and account for + // integer division truncation. + if u.totalSize/u.ctx.PartSize >= int64(u.ctx.MaxUploadParts) { + // Add one to the part size to account for remainders + // during the size calculation. e.g odd number of bytes. + u.ctx.PartSize = (u.totalSize / int64(u.ctx.MaxUploadParts)) + 1 + } + } +} + +// nextReader returns a seekable reader representing the next packet of data. +// This operation increases the shared u.readerPos counter, but note that it +// does not need to be wrapped in a mutex because nextReader is only called +// from the main thread. +func (u *uploader) nextReader() (io.ReadSeeker, error) { + switch r := u.in.Body.(type) { + case io.ReaderAt: + var err error + + n := u.ctx.PartSize + if u.totalSize >= 0 { + bytesLeft := u.totalSize - u.readerPos + + if bytesLeft == 0 { + err = io.EOF + n = bytesLeft + } else if bytesLeft <= u.ctx.PartSize { + err = io.ErrUnexpectedEOF + n = bytesLeft + } + } + + buf := io.NewSectionReader(r, u.readerPos, n) + u.readerPos += n + + return buf, err + + default: + packet := make([]byte, u.ctx.PartSize) + n, err := io.ReadFull(u.in.Body, packet) + u.readerPos += int64(n) + + return bytes.NewReader(packet[0:n]), err + } +} + +// singlePart contains upload logic for uploading a single chunk via +// a regular PutObject request. Multipart requests require at least two +// parts, or at least 5MB of data. +func (u *uploader) singlePart(buf io.ReadSeeker) (*UploadOutput, error) { + params := &s3.PutObjectInput{} + awsutil.Copy(params, u.in) + params.Body = buf + + req, out := u.ctx.S3.PutObjectRequest(params) + req.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("S3Manager")) + if err := req.Send(); err != nil { + return nil, err + } + + url := req.HTTPRequest.URL.String() + return &UploadOutput{ + Location: url, + VersionID: out.VersionId, + }, nil +} + +// internal structure to manage a specific multipart upload to S3. +type multiuploader struct { + *uploader + wg sync.WaitGroup + m sync.Mutex + err error + uploadID string + parts completedParts +} + +// keeps track of a single chunk of data being sent to S3. +type chunk struct { + buf io.ReadSeeker + num int64 +} + +// completedParts is a wrapper to make parts sortable by their part number, +// since S3 required this list to be sent in sorted order. +type completedParts []*s3.CompletedPart + +func (a completedParts) Len() int { return len(a) } +func (a completedParts) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a completedParts) Less(i, j int) bool { return *a[i].PartNumber < *a[j].PartNumber } + +// upload will perform a multipart upload using the firstBuf buffer containing +// the first chunk of data. +func (u *multiuploader) upload(firstBuf io.ReadSeeker) (*UploadOutput, error) { + params := &s3.CreateMultipartUploadInput{} + awsutil.Copy(params, u.in) + + // Create the multipart + req, resp := u.ctx.S3.CreateMultipartUploadRequest(params) + req.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("S3Manager")) + if err := req.Send(); err != nil { + return nil, err + } + u.uploadID = *resp.UploadId + + // Create the workers + ch := make(chan chunk, u.ctx.Concurrency) + for i := 0; i < u.ctx.Concurrency; i++ { + u.wg.Add(1) + go u.readChunk(ch) + } + + // Send part 1 to the workers + var num int64 = 1 + ch <- chunk{buf: firstBuf, num: num} + + // Read and queue the rest of the parts + for u.geterr() == nil { + num++ + // This upload exceeded maximum number of supported parts, error now. + if num > int64(u.ctx.MaxUploadParts) || num > int64(MaxUploadParts) { + var msg string + if num > int64(u.ctx.MaxUploadParts) { + msg = fmt.Sprintf("exceeded total allowed configured MaxUploadParts (%d). Adjust PartSize to fit in this limit", + u.ctx.MaxUploadParts) + } else { + msg = fmt.Sprintf("exceeded total allowed S3 limit MaxUploadParts (%d). Adjust PartSize to fit in this limit", + MaxUploadParts) + } + u.seterr(awserr.New("TotalPartsExceeded", msg, nil)) + break + } + + buf, err := u.nextReader() + if err == io.EOF { + break + } + + ch <- chunk{buf: buf, num: num} + + if err == io.ErrUnexpectedEOF { + break + } else if err != nil { + u.seterr(awserr.New( + "ReadRequestBody", + "read multipart upload data failed", + err)) + break + } + } + + // Close the channel, wait for workers, and complete upload + close(ch) + u.wg.Wait() + complete := u.complete() + + if err := u.geterr(); err != nil { + return nil, &multiUploadError{ + awsError: awserr.New( + "MultipartUpload", + "upload multipart failed", + err), + uploadID: u.uploadID, + } + } + return &UploadOutput{ + Location: aws.StringValue(complete.Location), + VersionID: complete.VersionId, + UploadID: u.uploadID, + }, nil +} + +// readChunk runs in worker goroutines to pull chunks off of the ch channel +// and send() them as UploadPart requests. +func (u *multiuploader) readChunk(ch chan chunk) { + defer u.wg.Done() + for { + data, ok := <-ch + + if !ok { + break + } + + if u.geterr() == nil { + if err := u.send(data); err != nil { + u.seterr(err) + } + } + } +} + +// send performs an UploadPart request and keeps track of the completed +// part information. +func (u *multiuploader) send(c chunk) error { + req, resp := u.ctx.S3.UploadPartRequest(&s3.UploadPartInput{ + Bucket: u.in.Bucket, + Key: u.in.Key, + Body: c.buf, + UploadId: &u.uploadID, + PartNumber: &c.num, + }) + req.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("S3Manager")) + if err := req.Send(); err != nil { + return err + } + + n := c.num + completed := &s3.CompletedPart{ETag: resp.ETag, PartNumber: &n} + + u.m.Lock() + u.parts = append(u.parts, completed) + u.m.Unlock() + + return nil +} + +// geterr is a thread-safe getter for the error object +func (u *multiuploader) geterr() error { + u.m.Lock() + defer u.m.Unlock() + + return u.err +} + +// seterr is a thread-safe setter for the error object +func (u *multiuploader) seterr(e error) { + u.m.Lock() + defer u.m.Unlock() + + u.err = e +} + +// fail will abort the multipart unless LeavePartsOnError is set to true. +func (u *multiuploader) fail() { + if u.ctx.LeavePartsOnError { + return + } + + req, _ := u.ctx.S3.AbortMultipartUploadRequest(&s3.AbortMultipartUploadInput{ + Bucket: u.in.Bucket, + Key: u.in.Key, + UploadId: &u.uploadID, + }) + req.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("S3Manager")) + req.Send() +} + +// complete successfully completes a multipart upload and returns the response. +func (u *multiuploader) complete() *s3.CompleteMultipartUploadOutput { + if u.geterr() != nil { + u.fail() + return nil + } + + // Parts must be sorted in PartNumber order. + sort.Sort(u.parts) + + req, resp := u.ctx.S3.CompleteMultipartUploadRequest(&s3.CompleteMultipartUploadInput{ + Bucket: u.in.Bucket, + Key: u.in.Key, + UploadId: &u.uploadID, + MultipartUpload: &s3.CompletedMultipartUpload{Parts: u.parts}, + }) + req.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("S3Manager")) + if err := req.Send(); err != nil { + u.seterr(err) + u.fail() + } + + return resp +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/upload_test.go b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/upload_test.go new file mode 100644 index 000000000..9b408dabb --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/upload_test.go @@ -0,0 +1,595 @@ +package s3manager_test + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/http/httptest" + "reflect" + "sort" + "strings" + "sync" + "testing" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/awstesting/unit" + "github.com/aws/aws-sdk-go/service/s3" + "github.com/aws/aws-sdk-go/service/s3/s3manager" + "github.com/stretchr/testify/assert" +) + +var emptyList = []string{} + +func val(i interface{}, s string) interface{} { + v, err := awsutil.ValuesAtPath(i, s) + if err != nil || len(v) == 0 { + return nil + } + if _, ok := v[0].(io.Reader); ok { + return v[0] + } + + if rv := reflect.ValueOf(v[0]); rv.Kind() == reflect.Ptr { + return rv.Elem().Interface() + } + + return v[0] +} + +func contains(src []string, s string) bool { + for _, v := range src { + if s == v { + return true + } + } + return false +} + +func loggingSvc(ignoreOps []string) (*s3.S3, *[]string, *[]interface{}) { + var m sync.Mutex + partNum := 0 + names := []string{} + params := []interface{}{} + svc := s3.New(unit.Session) + svc.Handlers.Unmarshal.Clear() + svc.Handlers.UnmarshalMeta.Clear() + svc.Handlers.UnmarshalError.Clear() + svc.Handlers.Send.Clear() + svc.Handlers.Send.PushBack(func(r *request.Request) { + m.Lock() + defer m.Unlock() + + if !contains(ignoreOps, r.Operation.Name) { + names = append(names, r.Operation.Name) + params = append(params, r.Params) + } + + r.HTTPResponse = &http.Response{ + StatusCode: 200, + Body: ioutil.NopCloser(bytes.NewReader([]byte{})), + } + + switch data := r.Data.(type) { + case *s3.CreateMultipartUploadOutput: + data.UploadId = aws.String("UPLOAD-ID") + case *s3.UploadPartOutput: + partNum++ + data.ETag = aws.String(fmt.Sprintf("ETAG%d", partNum)) + case *s3.CompleteMultipartUploadOutput: + data.Location = aws.String("https://location") + data.VersionId = aws.String("VERSION-ID") + case *s3.PutObjectOutput: + data.VersionId = aws.String("VERSION-ID") + } + }) + + return svc, &names, ¶ms +} + +func buflen(i interface{}) int { + r := i.(io.Reader) + b, _ := ioutil.ReadAll(r) + return len(b) +} + +func TestUploadOrderMulti(t *testing.T) { + s, ops, args := loggingSvc(emptyList) + u := s3manager.NewUploaderWithClient(s) + + resp, err := u.Upload(&s3manager.UploadInput{ + Bucket: aws.String("Bucket"), + Key: aws.String("Key"), + Body: bytes.NewReader(buf12MB), + ServerSideEncryption: aws.String("aws:kms"), + SSEKMSKeyId: aws.String("KmsId"), + ContentType: aws.String("content/type"), + }) + + assert.NoError(t, err) + assert.Equal(t, []string{"CreateMultipartUpload", "UploadPart", "UploadPart", "UploadPart", "CompleteMultipartUpload"}, *ops) + assert.Equal(t, "https://location", resp.Location) + assert.Equal(t, "UPLOAD-ID", resp.UploadID) + assert.Equal(t, aws.String("VERSION-ID"), resp.VersionID) + + // Validate input values + + // UploadPart + assert.Equal(t, "UPLOAD-ID", val((*args)[1], "UploadId")) + assert.Equal(t, "UPLOAD-ID", val((*args)[2], "UploadId")) + assert.Equal(t, "UPLOAD-ID", val((*args)[3], "UploadId")) + + // CompleteMultipartUpload + assert.Equal(t, "UPLOAD-ID", val((*args)[4], "UploadId")) + assert.Equal(t, int64(1), val((*args)[4], "MultipartUpload.Parts[0].PartNumber")) + assert.Equal(t, int64(2), val((*args)[4], "MultipartUpload.Parts[1].PartNumber")) + assert.Equal(t, int64(3), val((*args)[4], "MultipartUpload.Parts[2].PartNumber")) + assert.Regexp(t, `^ETAG\d+$`, val((*args)[4], "MultipartUpload.Parts[0].ETag")) + assert.Regexp(t, `^ETAG\d+$`, val((*args)[4], "MultipartUpload.Parts[1].ETag")) + assert.Regexp(t, `^ETAG\d+$`, val((*args)[4], "MultipartUpload.Parts[2].ETag")) + + // Custom headers + assert.Equal(t, "aws:kms", val((*args)[0], "ServerSideEncryption")) + assert.Equal(t, "KmsId", val((*args)[0], "SSEKMSKeyId")) + assert.Equal(t, "content/type", val((*args)[0], "ContentType")) +} + +func TestUploadOrderMultiDifferentPartSize(t *testing.T) { + s, ops, args := loggingSvc(emptyList) + mgr := s3manager.NewUploaderWithClient(s, func(u *s3manager.Uploader) { + u.PartSize = 1024 * 1024 * 7 + u.Concurrency = 1 + }) + _, err := mgr.Upload(&s3manager.UploadInput{ + Bucket: aws.String("Bucket"), + Key: aws.String("Key"), + Body: bytes.NewReader(buf12MB), + }) + + assert.NoError(t, err) + assert.Equal(t, []string{"CreateMultipartUpload", "UploadPart", "UploadPart", "CompleteMultipartUpload"}, *ops) + + // Part lengths + assert.Equal(t, 1024*1024*7, buflen(val((*args)[1], "Body"))) + assert.Equal(t, 1024*1024*5, buflen(val((*args)[2], "Body"))) +} + +func TestUploadIncreasePartSize(t *testing.T) { + s, ops, args := loggingSvc(emptyList) + mgr := s3manager.NewUploaderWithClient(s, func(u *s3manager.Uploader) { + u.Concurrency = 1 + u.MaxUploadParts = 2 + }) + _, err := mgr.Upload(&s3manager.UploadInput{ + Bucket: aws.String("Bucket"), + Key: aws.String("Key"), + Body: bytes.NewReader(buf12MB), + }) + + assert.NoError(t, err) + assert.Equal(t, int64(s3manager.DefaultDownloadPartSize), mgr.PartSize) + assert.Equal(t, []string{"CreateMultipartUpload", "UploadPart", "UploadPart", "CompleteMultipartUpload"}, *ops) + + // Part lengths + assert.Equal(t, (1024*1024*6)+1, buflen(val((*args)[1], "Body"))) + assert.Equal(t, (1024*1024*6)-1, buflen(val((*args)[2], "Body"))) +} + +func TestUploadFailIfPartSizeTooSmall(t *testing.T) { + mgr := s3manager.NewUploader(unit.Session, func(u *s3manager.Uploader) { + u.PartSize = 5 + }) + resp, err := mgr.Upload(&s3manager.UploadInput{ + Bucket: aws.String("Bucket"), + Key: aws.String("Key"), + Body: bytes.NewReader(buf12MB), + }) + + assert.Nil(t, resp) + assert.NotNil(t, err) + + aerr := err.(awserr.Error) + assert.Equal(t, "ConfigError", aerr.Code()) + assert.Contains(t, aerr.Message(), "part size must be at least") +} + +func TestUploadOrderSingle(t *testing.T) { + s, ops, args := loggingSvc(emptyList) + mgr := s3manager.NewUploaderWithClient(s) + resp, err := mgr.Upload(&s3manager.UploadInput{ + Bucket: aws.String("Bucket"), + Key: aws.String("Key"), + Body: bytes.NewReader(buf2MB), + ServerSideEncryption: aws.String("aws:kms"), + SSEKMSKeyId: aws.String("KmsId"), + ContentType: aws.String("content/type"), + }) + + assert.NoError(t, err) + assert.Equal(t, []string{"PutObject"}, *ops) + assert.NotEqual(t, "", resp.Location) + assert.Equal(t, aws.String("VERSION-ID"), resp.VersionID) + assert.Equal(t, "", resp.UploadID) + assert.Equal(t, "aws:kms", val((*args)[0], "ServerSideEncryption")) + assert.Equal(t, "KmsId", val((*args)[0], "SSEKMSKeyId")) + assert.Equal(t, "content/type", val((*args)[0], "ContentType")) +} + +func TestUploadOrderSingleFailure(t *testing.T) { + s, ops, _ := loggingSvc(emptyList) + s.Handlers.Send.PushBack(func(r *request.Request) { + r.HTTPResponse.StatusCode = 400 + }) + mgr := s3manager.NewUploaderWithClient(s) + resp, err := mgr.Upload(&s3manager.UploadInput{ + Bucket: aws.String("Bucket"), + Key: aws.String("Key"), + Body: bytes.NewReader(buf2MB), + }) + + assert.Error(t, err) + assert.Equal(t, []string{"PutObject"}, *ops) + assert.Nil(t, resp) +} + +func TestUploadOrderZero(t *testing.T) { + s, ops, args := loggingSvc(emptyList) + mgr := s3manager.NewUploaderWithClient(s) + resp, err := mgr.Upload(&s3manager.UploadInput{ + Bucket: aws.String("Bucket"), + Key: aws.String("Key"), + Body: bytes.NewReader(make([]byte, 0)), + }) + + assert.NoError(t, err) + assert.Equal(t, []string{"PutObject"}, *ops) + assert.NotEqual(t, "", resp.Location) + assert.Equal(t, "", resp.UploadID) + assert.Equal(t, 0, buflen(val((*args)[0], "Body"))) +} + +func TestUploadOrderMultiFailure(t *testing.T) { + s, ops, _ := loggingSvc(emptyList) + s.Handlers.Send.PushBack(func(r *request.Request) { + switch t := r.Data.(type) { + case *s3.UploadPartOutput: + if *t.ETag == "ETAG2" { + r.HTTPResponse.StatusCode = 400 + } + } + }) + + mgr := s3manager.NewUploaderWithClient(s, func(u *s3manager.Uploader) { + u.Concurrency = 1 + }) + _, err := mgr.Upload(&s3manager.UploadInput{ + Bucket: aws.String("Bucket"), + Key: aws.String("Key"), + Body: bytes.NewReader(buf12MB), + }) + + assert.Error(t, err) + assert.Equal(t, []string{"CreateMultipartUpload", "UploadPart", "UploadPart", "AbortMultipartUpload"}, *ops) +} + +func TestUploadOrderMultiFailureOnComplete(t *testing.T) { + s, ops, _ := loggingSvc(emptyList) + s.Handlers.Send.PushBack(func(r *request.Request) { + switch r.Data.(type) { + case *s3.CompleteMultipartUploadOutput: + r.HTTPResponse.StatusCode = 400 + } + }) + + mgr := s3manager.NewUploaderWithClient(s, func(u *s3manager.Uploader) { + u.Concurrency = 1 + }) + _, err := mgr.Upload(&s3manager.UploadInput{ + Bucket: aws.String("Bucket"), + Key: aws.String("Key"), + Body: bytes.NewReader(buf12MB), + }) + + assert.Error(t, err) + assert.Equal(t, []string{"CreateMultipartUpload", "UploadPart", "UploadPart", + "UploadPart", "CompleteMultipartUpload", "AbortMultipartUpload"}, *ops) +} + +func TestUploadOrderMultiFailureOnCreate(t *testing.T) { + s, ops, _ := loggingSvc(emptyList) + s.Handlers.Send.PushBack(func(r *request.Request) { + switch r.Data.(type) { + case *s3.CreateMultipartUploadOutput: + r.HTTPResponse.StatusCode = 400 + } + }) + + mgr := s3manager.NewUploaderWithClient(s) + _, err := mgr.Upload(&s3manager.UploadInput{ + Bucket: aws.String("Bucket"), + Key: aws.String("Key"), + Body: bytes.NewReader(make([]byte, 1024*1024*12)), + }) + + assert.Error(t, err) + assert.Equal(t, []string{"CreateMultipartUpload"}, *ops) +} + +func TestUploadOrderMultiFailureLeaveParts(t *testing.T) { + s, ops, _ := loggingSvc(emptyList) + s.Handlers.Send.PushBack(func(r *request.Request) { + switch data := r.Data.(type) { + case *s3.UploadPartOutput: + if *data.ETag == "ETAG2" { + r.HTTPResponse.StatusCode = 400 + } + } + }) + + mgr := s3manager.NewUploaderWithClient(s, func(u *s3manager.Uploader) { + u.Concurrency = 1 + u.LeavePartsOnError = true + }) + _, err := mgr.Upload(&s3manager.UploadInput{ + Bucket: aws.String("Bucket"), + Key: aws.String("Key"), + Body: bytes.NewReader(make([]byte, 1024*1024*12)), + }) + + assert.Error(t, err) + assert.Equal(t, []string{"CreateMultipartUpload", "UploadPart", "UploadPart"}, *ops) +} + +type failreader struct { + times int + failCount int +} + +func (f *failreader) Read(b []byte) (int, error) { + f.failCount++ + if f.failCount >= f.times { + return 0, fmt.Errorf("random failure") + } + return len(b), nil +} + +func TestUploadOrderReadFail1(t *testing.T) { + s, ops, _ := loggingSvc(emptyList) + mgr := s3manager.NewUploaderWithClient(s) + _, err := mgr.Upload(&s3manager.UploadInput{ + Bucket: aws.String("Bucket"), + Key: aws.String("Key"), + Body: &failreader{times: 1}, + }) + + assert.Equal(t, "ReadRequestBody", err.(awserr.Error).Code()) + assert.EqualError(t, err.(awserr.Error).OrigErr(), "random failure") + assert.Equal(t, []string{}, *ops) +} + +func TestUploadOrderReadFail2(t *testing.T) { + s, ops, _ := loggingSvc([]string{"UploadPart"}) + mgr := s3manager.NewUploaderWithClient(s, func(u *s3manager.Uploader) { + u.Concurrency = 1 + }) + _, err := mgr.Upload(&s3manager.UploadInput{ + Bucket: aws.String("Bucket"), + Key: aws.String("Key"), + Body: &failreader{times: 2}, + }) + + assert.Equal(t, "MultipartUpload", err.(awserr.Error).Code()) + assert.Equal(t, "ReadRequestBody", err.(awserr.Error).OrigErr().(awserr.Error).Code()) + assert.Contains(t, err.(awserr.Error).OrigErr().Error(), "random failure") + assert.Equal(t, []string{"CreateMultipartUpload", "AbortMultipartUpload"}, *ops) +} + +type sizedReader struct { + size int + cur int + err error +} + +func (s *sizedReader) Read(p []byte) (n int, err error) { + if s.cur >= s.size { + if s.err == nil { + s.err = io.EOF + } + return 0, s.err + } + + n = len(p) + s.cur += len(p) + if s.cur > s.size { + n -= s.cur - s.size + } + + return +} + +func TestUploadOrderMultiBufferedReader(t *testing.T) { + s, ops, args := loggingSvc(emptyList) + mgr := s3manager.NewUploaderWithClient(s) + _, err := mgr.Upload(&s3manager.UploadInput{ + Bucket: aws.String("Bucket"), + Key: aws.String("Key"), + Body: &sizedReader{size: 1024 * 1024 * 12}, + }) + + assert.NoError(t, err) + assert.Equal(t, []string{"CreateMultipartUpload", "UploadPart", "UploadPart", "UploadPart", "CompleteMultipartUpload"}, *ops) + + // Part lengths + parts := []int{ + buflen(val((*args)[1], "Body")), + buflen(val((*args)[2], "Body")), + buflen(val((*args)[3], "Body")), + } + sort.Ints(parts) + assert.Equal(t, []int{1024 * 1024 * 2, 1024 * 1024 * 5, 1024 * 1024 * 5}, parts) +} + +func TestUploadOrderMultiBufferedReaderUnexpectedEOF(t *testing.T) { + s, ops, args := loggingSvc(emptyList) + mgr := s3manager.NewUploaderWithClient(s) + _, err := mgr.Upload(&s3manager.UploadInput{ + Bucket: aws.String("Bucket"), + Key: aws.String("Key"), + Body: &sizedReader{size: 1024 * 1024 * 12, err: io.ErrUnexpectedEOF}, + }) + + assert.NoError(t, err) + assert.Equal(t, []string{"CreateMultipartUpload", "UploadPart", "UploadPart", "UploadPart", "CompleteMultipartUpload"}, *ops) + + // Part lengths + parts := []int{ + buflen(val((*args)[1], "Body")), + buflen(val((*args)[2], "Body")), + buflen(val((*args)[3], "Body")), + } + sort.Ints(parts) + assert.Equal(t, []int{1024 * 1024 * 2, 1024 * 1024 * 5, 1024 * 1024 * 5}, parts) +} + +// TestUploadOrderMultiBufferedReaderEOF tests the edge case where the +// file size is the same as part size, which means nextReader will +// return io.EOF rather than io.ErrUnexpectedEOF +func TestUploadOrderMultiBufferedReaderEOF(t *testing.T) { + s, ops, args := loggingSvc(emptyList) + mgr := s3manager.NewUploaderWithClient(s) + _, err := mgr.Upload(&s3manager.UploadInput{ + Bucket: aws.String("Bucket"), + Key: aws.String("Key"), + Body: &sizedReader{size: 1024 * 1024 * 10, err: io.EOF}, + }) + + assert.NoError(t, err) + assert.Equal(t, []string{"CreateMultipartUpload", "UploadPart", "UploadPart", "CompleteMultipartUpload"}, *ops) + + // Part lengths + parts := []int{ + buflen(val((*args)[1], "Body")), + buflen(val((*args)[2], "Body")), + } + sort.Ints(parts) + assert.Equal(t, []int{1024 * 1024 * 5, 1024 * 1024 * 5}, parts) +} + +func TestUploadOrderMultiBufferedReaderExceedTotalParts(t *testing.T) { + s, ops, _ := loggingSvc([]string{"UploadPart"}) + mgr := s3manager.NewUploaderWithClient(s, func(u *s3manager.Uploader) { + u.Concurrency = 1 + u.MaxUploadParts = 2 + }) + resp, err := mgr.Upload(&s3manager.UploadInput{ + Bucket: aws.String("Bucket"), + Key: aws.String("Key"), + Body: &sizedReader{size: 1024 * 1024 * 12}, + }) + + assert.Error(t, err) + assert.Nil(t, resp) + assert.Equal(t, []string{"CreateMultipartUpload", "AbortMultipartUpload"}, *ops) + + aerr := err.(awserr.Error) + assert.Equal(t, "MultipartUpload", aerr.Code()) + assert.Equal(t, "TotalPartsExceeded", aerr.OrigErr().(awserr.Error).Code()) + assert.Contains(t, aerr.Error(), "configured MaxUploadParts (2)") +} + +func TestUploadOrderSingleBufferedReader(t *testing.T) { + s, ops, _ := loggingSvc(emptyList) + mgr := s3manager.NewUploaderWithClient(s) + resp, err := mgr.Upload(&s3manager.UploadInput{ + Bucket: aws.String("Bucket"), + Key: aws.String("Key"), + Body: &sizedReader{size: 1024 * 1024 * 2}, + }) + + assert.NoError(t, err) + assert.Equal(t, []string{"PutObject"}, *ops) + assert.NotEqual(t, "", resp.Location) + assert.Equal(t, "", resp.UploadID) +} + +func TestUploadZeroLenObject(t *testing.T) { + requestMade := false + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + requestMade = true + w.WriteHeader(http.StatusOK) + })) + mgr := s3manager.NewUploaderWithClient(s3.New(unit.Session, &aws.Config{ + Endpoint: aws.String(server.URL), + })) + resp, err := mgr.Upload(&s3manager.UploadInput{ + Bucket: aws.String("Bucket"), + Key: aws.String("Key"), + Body: strings.NewReader(""), + }) + + assert.NoError(t, err) + assert.True(t, requestMade) + assert.NotEqual(t, "", resp.Location) + assert.Equal(t, "", resp.UploadID) +} + +func TestUploadInputS3PutObjectInputPairity(t *testing.T) { + matchings := compareStructType(reflect.TypeOf(s3.PutObjectInput{}), + reflect.TypeOf(s3manager.UploadInput{})) + aOnly := []string{} + bOnly := []string{} + + for k, c := range matchings { + if c == 1 && k != "ContentLength" { + aOnly = append(aOnly, k) + } else if c == 2 { + bOnly = append(bOnly, k) + } + } + assert.Empty(t, aOnly, "s3.PutObjectInput") + assert.Empty(t, bOnly, "s3Manager.UploadInput") +} +func compareStructType(a, b reflect.Type) map[string]int { + if a.Kind() != reflect.Struct || b.Kind() != reflect.Struct { + panic(fmt.Sprintf("types must both be structs, got %v and %v", a.Kind(), b.Kind())) + } + + aFields := enumFields(a) + bFields := enumFields(b) + + matchings := map[string]int{} + + for i := 0; i < len(aFields) || i < len(bFields); i++ { + if i < len(aFields) { + c := matchings[aFields[i].Name] + matchings[aFields[i].Name] = c + 1 + } + if i < len(bFields) { + c := matchings[bFields[i].Name] + matchings[bFields[i].Name] = c + 2 + } + } + + return matchings +} + +func enumFields(v reflect.Type) []reflect.StructField { + fields := []reflect.StructField{} + + for i := 0; i < v.NumField(); i++ { + field := v.Field(i) + // Ignoreing anon fields + if field.PkgPath != "" { + // Ignore unexported fields + continue + } + + fields = append(fields, field) + } + + return fields +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/service.go b/vendor/github.com/aws/aws-sdk-go/service/s3/service.go new file mode 100644 index 000000000..5833952a2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/service.go @@ -0,0 +1,86 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package s3 + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/private/protocol/restxml" +) + +// S3 is a client for Amazon S3. +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type S3 struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "s3" + +// New creates a new instance of the S3 client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a S3 client from just a session. +// svc := s3.New(mySession) +// +// // Create a S3 client with additional configuration +// svc := s3.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *S3 { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *S3 { + svc := &S3{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2006-03-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(restxml.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restxml.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restxml.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restxml.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a S3 operation and runs any +// custom request initialization. +func (c *S3) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/sse.go b/vendor/github.com/aws/aws-sdk-go/service/s3/sse.go new file mode 100644 index 000000000..268ea2fb4 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/sse.go @@ -0,0 +1,44 @@ +package s3 + +import ( + "crypto/md5" + "encoding/base64" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" +) + +var errSSERequiresSSL = awserr.New("ConfigError", "cannot send SSE keys over HTTP.", nil) + +func validateSSERequiresSSL(r *request.Request) { + if r.HTTPRequest.URL.Scheme != "https" { + p, _ := awsutil.ValuesAtPath(r.Params, "SSECustomerKey||CopySourceSSECustomerKey") + if len(p) > 0 { + r.Error = errSSERequiresSSL + } + } +} + +func computeSSEKeys(r *request.Request) { + headers := []string{ + "x-amz-server-side-encryption-customer-key", + "x-amz-copy-source-server-side-encryption-customer-key", + } + + for _, h := range headers { + md5h := h + "-md5" + if key := r.HTTPRequest.Header.Get(h); key != "" { + // Base64-encode the value + b64v := base64.StdEncoding.EncodeToString([]byte(key)) + r.HTTPRequest.Header.Set(h, b64v) + + // Add MD5 if it wasn't computed + if r.HTTPRequest.Header.Get(md5h) == "" { + sum := md5.Sum([]byte(key)) + b64sum := base64.StdEncoding.EncodeToString(sum[:]) + r.HTTPRequest.Header.Set(md5h, b64sum) + } + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/sse_test.go b/vendor/github.com/aws/aws-sdk-go/service/s3/sse_test.go new file mode 100644 index 000000000..5f1ca64bf --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/sse_test.go @@ -0,0 +1,79 @@ +package s3_test + +import ( + "testing" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/awstesting/unit" + "github.com/aws/aws-sdk-go/service/s3" + "github.com/stretchr/testify/assert" +) + +func TestSSECustomerKeyOverHTTPError(t *testing.T) { + s := s3.New(unit.Session, &aws.Config{DisableSSL: aws.Bool(true)}) + req, _ := s.CopyObjectRequest(&s3.CopyObjectInput{ + Bucket: aws.String("bucket"), + CopySource: aws.String("bucket/source"), + Key: aws.String("dest"), + SSECustomerKey: aws.String("key"), + }) + err := req.Build() + + assert.Error(t, err) + assert.Equal(t, "ConfigError", err.(awserr.Error).Code()) + assert.Contains(t, err.(awserr.Error).Message(), "cannot send SSE keys over HTTP") +} + +func TestCopySourceSSECustomerKeyOverHTTPError(t *testing.T) { + s := s3.New(unit.Session, &aws.Config{DisableSSL: aws.Bool(true)}) + req, _ := s.CopyObjectRequest(&s3.CopyObjectInput{ + Bucket: aws.String("bucket"), + CopySource: aws.String("bucket/source"), + Key: aws.String("dest"), + CopySourceSSECustomerKey: aws.String("key"), + }) + err := req.Build() + + assert.Error(t, err) + assert.Equal(t, "ConfigError", err.(awserr.Error).Code()) + assert.Contains(t, err.(awserr.Error).Message(), "cannot send SSE keys over HTTP") +} + +func TestComputeSSEKeys(t *testing.T) { + s := s3.New(unit.Session) + req, _ := s.CopyObjectRequest(&s3.CopyObjectInput{ + Bucket: aws.String("bucket"), + CopySource: aws.String("bucket/source"), + Key: aws.String("dest"), + SSECustomerKey: aws.String("key"), + CopySourceSSECustomerKey: aws.String("key"), + }) + err := req.Build() + + assert.NoError(t, err) + assert.Equal(t, "a2V5", req.HTTPRequest.Header.Get("x-amz-server-side-encryption-customer-key")) + assert.Equal(t, "a2V5", req.HTTPRequest.Header.Get("x-amz-copy-source-server-side-encryption-customer-key")) + assert.Equal(t, "PG4LipwVIkqCKLmpjKFTHQ==", req.HTTPRequest.Header.Get("x-amz-server-side-encryption-customer-key-md5")) + assert.Equal(t, "PG4LipwVIkqCKLmpjKFTHQ==", req.HTTPRequest.Header.Get("x-amz-copy-source-server-side-encryption-customer-key-md5")) +} + +func TestComputeSSEKeysShortcircuit(t *testing.T) { + s := s3.New(unit.Session) + req, _ := s.CopyObjectRequest(&s3.CopyObjectInput{ + Bucket: aws.String("bucket"), + CopySource: aws.String("bucket/source"), + Key: aws.String("dest"), + SSECustomerKey: aws.String("key"), + CopySourceSSECustomerKey: aws.String("key"), + SSECustomerKeyMD5: aws.String("MD5"), + CopySourceSSECustomerKeyMD5: aws.String("MD5"), + }) + err := req.Build() + + assert.NoError(t, err) + assert.Equal(t, "a2V5", req.HTTPRequest.Header.Get("x-amz-server-side-encryption-customer-key")) + assert.Equal(t, "a2V5", req.HTTPRequest.Header.Get("x-amz-copy-source-server-side-encryption-customer-key")) + assert.Equal(t, "MD5", req.HTTPRequest.Header.Get("x-amz-server-side-encryption-customer-key-md5")) + assert.Equal(t, "MD5", req.HTTPRequest.Header.Get("x-amz-copy-source-server-side-encryption-customer-key-md5")) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/statusok_error.go b/vendor/github.com/aws/aws-sdk-go/service/s3/statusok_error.go new file mode 100644 index 000000000..ce65fcdaf --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/statusok_error.go @@ -0,0 +1,36 @@ +package s3 + +import ( + "bytes" + "io/ioutil" + "net/http" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" +) + +func copyMultipartStatusOKUnmarhsalError(r *request.Request) { + b, err := ioutil.ReadAll(r.HTTPResponse.Body) + if err != nil { + r.Error = awserr.New("SerializationError", "unable to read response body", err) + return + } + body := bytes.NewReader(b) + r.HTTPResponse.Body = aws.ReadSeekCloser(body) + defer r.HTTPResponse.Body.(aws.ReaderSeekerCloser).Seek(0, 0) + + if body.Len() == 0 { + // If there is no body don't attempt to parse the body. + return + } + + unmarshalError(r) + if err, ok := r.Error.(awserr.Error); ok && err != nil { + if err.Code() == "SerializationError" { + r.Error = nil + return + } + r.HTTPResponse.StatusCode = http.StatusServiceUnavailable + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/statusok_error_test.go b/vendor/github.com/aws/aws-sdk-go/service/s3/statusok_error_test.go new file mode 100644 index 000000000..f508cd153 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/statusok_error_test.go @@ -0,0 +1,130 @@ +package s3_test + +import ( + "fmt" + "net/http" + "net/http/httptest" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/awstesting/unit" + "github.com/aws/aws-sdk-go/service/s3" +) + +const errMsg = `ErrorCodemessage bodyrequestIDhostID=` + +var lastModifiedTime = time.Date(2009, 11, 23, 0, 0, 0, 0, time.UTC) + +func TestCopyObjectNoError(t *testing.T) { + const successMsg = ` + +2009-11-23T0:00:00Z"1da64c7f13d1e8dbeaea40b905fd586c"` + + res, err := newCopyTestSvc(successMsg).CopyObject(&s3.CopyObjectInput{ + Bucket: aws.String("bucketname"), + CopySource: aws.String("bucketname/exists.txt"), + Key: aws.String("destination.txt"), + }) + + require.NoError(t, err) + + assert.Equal(t, fmt.Sprintf(`%q`, "1da64c7f13d1e8dbeaea40b905fd586c"), *res.CopyObjectResult.ETag) + assert.Equal(t, lastModifiedTime, *res.CopyObjectResult.LastModified) +} + +func TestCopyObjectError(t *testing.T) { + _, err := newCopyTestSvc(errMsg).CopyObject(&s3.CopyObjectInput{ + Bucket: aws.String("bucketname"), + CopySource: aws.String("bucketname/doesnotexist.txt"), + Key: aws.String("destination.txt"), + }) + + require.Error(t, err) + e := err.(awserr.Error) + + assert.Equal(t, "ErrorCode", e.Code()) + assert.Equal(t, "message body", e.Message()) +} + +func TestUploadPartCopySuccess(t *testing.T) { + const successMsg = ` + +2009-11-23T0:00:00Z"1da64c7f13d1e8dbeaea40b905fd586c"` + + res, err := newCopyTestSvc(successMsg).UploadPartCopy(&s3.UploadPartCopyInput{ + Bucket: aws.String("bucketname"), + CopySource: aws.String("bucketname/doesnotexist.txt"), + Key: aws.String("destination.txt"), + PartNumber: aws.Int64(0), + UploadId: aws.String("uploadID"), + }) + + require.NoError(t, err) + + assert.Equal(t, fmt.Sprintf(`%q`, "1da64c7f13d1e8dbeaea40b905fd586c"), *res.CopyPartResult.ETag) + assert.Equal(t, lastModifiedTime, *res.CopyPartResult.LastModified) +} + +func TestUploadPartCopyError(t *testing.T) { + _, err := newCopyTestSvc(errMsg).UploadPartCopy(&s3.UploadPartCopyInput{ + Bucket: aws.String("bucketname"), + CopySource: aws.String("bucketname/doesnotexist.txt"), + Key: aws.String("destination.txt"), + PartNumber: aws.Int64(0), + UploadId: aws.String("uploadID"), + }) + + require.Error(t, err) + e := err.(awserr.Error) + + assert.Equal(t, "ErrorCode", e.Code()) + assert.Equal(t, "message body", e.Message()) +} + +func TestCompleteMultipartUploadSuccess(t *testing.T) { + const successMsg = ` + +locationNamebucketNamekeyName"etagVal"` + res, err := newCopyTestSvc(successMsg).CompleteMultipartUpload(&s3.CompleteMultipartUploadInput{ + Bucket: aws.String("bucketname"), + Key: aws.String("key"), + UploadId: aws.String("uploadID"), + }) + + require.NoError(t, err) + + assert.Equal(t, `"etagVal"`, *res.ETag) + assert.Equal(t, "bucketName", *res.Bucket) + assert.Equal(t, "keyName", *res.Key) + assert.Equal(t, "locationName", *res.Location) +} + +func TestCompleteMultipartUploadError(t *testing.T) { + _, err := newCopyTestSvc(errMsg).CompleteMultipartUpload(&s3.CompleteMultipartUploadInput{ + Bucket: aws.String("bucketname"), + Key: aws.String("key"), + UploadId: aws.String("uploadID"), + }) + + require.Error(t, err) + e := err.(awserr.Error) + + assert.Equal(t, "ErrorCode", e.Code()) + assert.Equal(t, "message body", e.Message()) +} + +func newCopyTestSvc(errMsg string) *s3.S3 { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + http.Error(w, errMsg, http.StatusOK) + })) + return s3.New(unit.Session, aws.NewConfig(). + WithEndpoint(server.URL). + WithDisableSSL(true). + WithMaxRetries(0). + WithS3ForcePathStyle(true)) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go b/vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go new file mode 100644 index 000000000..59e4950b8 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go @@ -0,0 +1,59 @@ +package s3 + +import ( + "encoding/xml" + "fmt" + "io" + "io/ioutil" + "net/http" + "strings" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" +) + +type xmlErrorResponse struct { + XMLName xml.Name `xml:"Error"` + Code string `xml:"Code"` + Message string `xml:"Message"` +} + +func unmarshalError(r *request.Request) { + defer r.HTTPResponse.Body.Close() + defer io.Copy(ioutil.Discard, r.HTTPResponse.Body) + + if r.HTTPResponse.StatusCode == http.StatusMovedPermanently { + r.Error = awserr.NewRequestFailure( + awserr.New("BucketRegionError", + fmt.Sprintf("incorrect region, the bucket is not in '%s' region", + aws.StringValue(r.Config.Region)), + nil), + r.HTTPResponse.StatusCode, + r.RequestID, + ) + return + } + + if r.HTTPResponse.ContentLength == 0 { + // No body, use status code to generate an awserr.Error + r.Error = awserr.NewRequestFailure( + awserr.New(strings.Replace(r.HTTPResponse.Status, " ", "", -1), r.HTTPResponse.Status, nil), + r.HTTPResponse.StatusCode, + r.RequestID, + ) + return + } + + resp := &xmlErrorResponse{} + err := xml.NewDecoder(r.HTTPResponse.Body).Decode(resp) + if err != nil && err != io.EOF { + r.Error = awserr.New("SerializationError", "failed to decode S3 XML error response", nil) + } else { + r.Error = awserr.NewRequestFailure( + awserr.New(resp.Code, resp.Message, nil), + r.HTTPResponse.StatusCode, + r.RequestID, + ) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error_leak_test.go b/vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error_leak_test.go new file mode 100644 index 000000000..449637144 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error_leak_test.go @@ -0,0 +1,33 @@ +package s3 + +import ( + "github.com/stretchr/testify/assert" + "net/http" + "testing" + + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/awstesting" +) + +func TestUnmarhsalErrorLeak(t *testing.T) { + req := &request.Request{ + HTTPRequest: &http.Request{ + Header: make(http.Header), + Body: &awstesting.ReadCloser{Size: 2048}, + }, + } + req.HTTPResponse = &http.Response{ + Body: &awstesting.ReadCloser{Size: 2048}, + Header: http.Header{ + "X-Amzn-Requestid": []string{"1"}, + }, + StatusCode: http.StatusOK, + } + + reader := req.HTTPResponse.Body.(*awstesting.ReadCloser) + unmarshalError(req) + + assert.NotNil(t, req.Error) + assert.Equal(t, reader.Closed, true) + assert.Equal(t, reader.Size, 0) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error_test.go b/vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error_test.go new file mode 100644 index 000000000..deddc702f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error_test.go @@ -0,0 +1,166 @@ +package s3_test + +import ( + "bytes" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/awstesting/unit" + "github.com/aws/aws-sdk-go/service/s3" +) + +type testErrorCase struct { + RespFn func() *http.Response + ReqID string + Code, Msg string +} + +var testUnmarshalCases = []testErrorCase{ + { + RespFn: func() *http.Response { + return &http.Response{ + StatusCode: 301, + Header: http.Header{"X-Amz-Request-Id": []string{"abc123"}}, + Body: ioutil.NopCloser(bytes.NewReader(nil)), + ContentLength: -1, + } + }, + ReqID: "abc123", + Code: "BucketRegionError", Msg: "incorrect region, the bucket is not in 'mock-region' region", + }, + { + RespFn: func() *http.Response { + return &http.Response{ + StatusCode: 403, + Header: http.Header{"X-Amz-Request-Id": []string{"abc123"}}, + Body: ioutil.NopCloser(bytes.NewReader(nil)), + ContentLength: 0, + } + }, + ReqID: "abc123", + Code: "Forbidden", Msg: "Forbidden", + }, + { + RespFn: func() *http.Response { + return &http.Response{ + StatusCode: 400, + Header: http.Header{"X-Amz-Request-Id": []string{"abc123"}}, + Body: ioutil.NopCloser(bytes.NewReader(nil)), + ContentLength: 0, + } + }, + ReqID: "abc123", + Code: "BadRequest", Msg: "Bad Request", + }, + { + RespFn: func() *http.Response { + return &http.Response{ + StatusCode: 404, + Header: http.Header{"X-Amz-Request-Id": []string{"abc123"}}, + Body: ioutil.NopCloser(bytes.NewReader(nil)), + ContentLength: 0, + } + }, + ReqID: "abc123", + Code: "NotFound", Msg: "Not Found", + }, + { + RespFn: func() *http.Response { + body := `SomeExceptionException message` + return &http.Response{ + StatusCode: 500, + Header: http.Header{"X-Amz-Request-Id": []string{"abc123"}}, + Body: ioutil.NopCloser(strings.NewReader(body)), + ContentLength: int64(len(body)), + } + }, + ReqID: "abc123", + Code: "SomeException", Msg: "Exception message", + }, +} + +func TestUnmarshalError(t *testing.T) { + for _, c := range testUnmarshalCases { + s := s3.New(unit.Session) + s.Handlers.Send.Clear() + s.Handlers.Send.PushBack(func(r *request.Request) { + r.HTTPResponse = c.RespFn() + r.HTTPResponse.Status = http.StatusText(r.HTTPResponse.StatusCode) + }) + _, err := s.PutBucketAcl(&s3.PutBucketAclInput{ + Bucket: aws.String("bucket"), ACL: aws.String("public-read"), + }) + + assert.Error(t, err) + assert.Equal(t, c.Code, err.(awserr.Error).Code()) + assert.Equal(t, c.Msg, err.(awserr.Error).Message()) + assert.Equal(t, c.ReqID, err.(awserr.RequestFailure).RequestID()) + } +} + +const completeMultiResp = ` +163 + + +https://bucket.s3-us-west-2.amazonaws.com/keybucketkey"a7d414b9133d6483d9a1c4e04e856e3b-2" +0 +` + +func Test200NoErrorUnmarshalError(t *testing.T) { + s := s3.New(unit.Session) + s.Handlers.Send.Clear() + s.Handlers.Send.PushBack(func(r *request.Request) { + r.HTTPResponse = &http.Response{ + StatusCode: 200, + Header: http.Header{"X-Amz-Request-Id": []string{"abc123"}}, + Body: ioutil.NopCloser(strings.NewReader(completeMultiResp)), + ContentLength: -1, + } + r.HTTPResponse.Status = http.StatusText(r.HTTPResponse.StatusCode) + }) + _, err := s.CompleteMultipartUpload(&s3.CompleteMultipartUploadInput{ + Bucket: aws.String("bucket"), Key: aws.String("key"), + UploadId: aws.String("id"), + MultipartUpload: &s3.CompletedMultipartUpload{Parts: []*s3.CompletedPart{ + {ETag: aws.String("etag"), PartNumber: aws.Int64(1)}, + }}, + }) + + assert.NoError(t, err) +} + +const completeMultiErrResp = `SomeExceptionException message` + +func Test200WithErrorUnmarshalError(t *testing.T) { + s := s3.New(unit.Session) + s.Handlers.Send.Clear() + s.Handlers.Send.PushBack(func(r *request.Request) { + r.HTTPResponse = &http.Response{ + StatusCode: 200, + Header: http.Header{"X-Amz-Request-Id": []string{"abc123"}}, + Body: ioutil.NopCloser(strings.NewReader(completeMultiErrResp)), + ContentLength: -1, + } + r.HTTPResponse.Status = http.StatusText(r.HTTPResponse.StatusCode) + }) + _, err := s.CompleteMultipartUpload(&s3.CompleteMultipartUploadInput{ + Bucket: aws.String("bucket"), Key: aws.String("key"), + UploadId: aws.String("id"), + MultipartUpload: &s3.CompletedMultipartUpload{Parts: []*s3.CompletedPart{ + {ETag: aws.String("etag"), PartNumber: aws.Int64(1)}, + }}, + }) + + assert.Error(t, err) + + assert.Equal(t, "SomeException", err.(awserr.Error).Code()) + assert.Equal(t, "Exception message", err.(awserr.Error).Message()) + assert.Equal(t, "abc123", err.(awserr.RequestFailure).RequestID()) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/waiters.go b/vendor/github.com/aws/aws-sdk-go/service/s3/waiters.go new file mode 100644 index 000000000..cbd3d3116 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/waiters.go @@ -0,0 +1,123 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package s3 + +import ( + "github.com/aws/aws-sdk-go/private/waiter" +) + +func (c *S3) WaitUntilBucketExists(input *HeadBucketInput) error { + waiterCfg := waiter.Config{ + Operation: "HeadBucket", + Delay: 5, + MaxAttempts: 20, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "status", + Argument: "", + Expected: 200, + }, + { + State: "success", + Matcher: "status", + Argument: "", + Expected: 301, + }, + { + State: "success", + Matcher: "status", + Argument: "", + Expected: 403, + }, + { + State: "retry", + Matcher: "status", + Argument: "", + Expected: 404, + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *S3) WaitUntilBucketNotExists(input *HeadBucketInput) error { + waiterCfg := waiter.Config{ + Operation: "HeadBucket", + Delay: 5, + MaxAttempts: 20, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "status", + Argument: "", + Expected: 404, + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *S3) WaitUntilObjectExists(input *HeadObjectInput) error { + waiterCfg := waiter.Config{ + Operation: "HeadObject", + Delay: 5, + MaxAttempts: 20, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "status", + Argument: "", + Expected: 200, + }, + { + State: "retry", + Matcher: "status", + Argument: "", + Expected: 404, + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *S3) WaitUntilObjectNotExists(input *HeadObjectInput) error { + waiterCfg := waiter.Config{ + Operation: "HeadObject", + Delay: 5, + MaxAttempts: 20, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "status", + Argument: "", + Expected: 404, + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/servicecatalog/api.go b/vendor/github.com/aws/aws-sdk-go/service/servicecatalog/api.go new file mode 100644 index 000000000..a6b2bc7ce --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/servicecatalog/api.go @@ -0,0 +1,1930 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package servicecatalog provides a client for AWS Service Catalog. +package servicecatalog + +import ( + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" +) + +const opDescribeProduct = "DescribeProduct" + +// DescribeProductRequest generates a "aws/request.Request" representing the +// client's request for the DescribeProduct operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeProduct method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeProductRequest method. +// req, resp := client.DescribeProductRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ServiceCatalog) DescribeProductRequest(input *DescribeProductInput) (req *request.Request, output *DescribeProductOutput) { + op := &request.Operation{ + Name: opDescribeProduct, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeProductInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeProductOutput{} + req.Data = output + return +} + +// Retrieves information about a specified product. +// +// This operation is functionally identical to DescribeProductView except that +// it takes as input ProductId instead of ProductViewId. +func (c *ServiceCatalog) DescribeProduct(input *DescribeProductInput) (*DescribeProductOutput, error) { + req, out := c.DescribeProductRequest(input) + err := req.Send() + return out, err +} + +const opDescribeProductView = "DescribeProductView" + +// DescribeProductViewRequest generates a "aws/request.Request" representing the +// client's request for the DescribeProductView operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeProductView method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeProductViewRequest method. +// req, resp := client.DescribeProductViewRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ServiceCatalog) DescribeProductViewRequest(input *DescribeProductViewInput) (req *request.Request, output *DescribeProductViewOutput) { + op := &request.Operation{ + Name: opDescribeProductView, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeProductViewInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeProductViewOutput{} + req.Data = output + return +} + +// Retrieves information about a specified product. +// +// This operation is functionally identical to DescribeProduct except that +// it takes as input ProductViewId instead of ProductId. +func (c *ServiceCatalog) DescribeProductView(input *DescribeProductViewInput) (*DescribeProductViewOutput, error) { + req, out := c.DescribeProductViewRequest(input) + err := req.Send() + return out, err +} + +const opDescribeProvisioningParameters = "DescribeProvisioningParameters" + +// DescribeProvisioningParametersRequest generates a "aws/request.Request" representing the +// client's request for the DescribeProvisioningParameters operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeProvisioningParameters method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeProvisioningParametersRequest method. +// req, resp := client.DescribeProvisioningParametersRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ServiceCatalog) DescribeProvisioningParametersRequest(input *DescribeProvisioningParametersInput) (req *request.Request, output *DescribeProvisioningParametersOutput) { + op := &request.Operation{ + Name: opDescribeProvisioningParameters, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeProvisioningParametersInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeProvisioningParametersOutput{} + req.Data = output + return +} + +// Provides information about parameters required to provision a specified product +// in a specified manner. Use this operation to obtain the list of ProvisioningArtifactParameters +// parameters available to call the ProvisionProduct operation for the specified +// product. +func (c *ServiceCatalog) DescribeProvisioningParameters(input *DescribeProvisioningParametersInput) (*DescribeProvisioningParametersOutput, error) { + req, out := c.DescribeProvisioningParametersRequest(input) + err := req.Send() + return out, err +} + +const opDescribeRecord = "DescribeRecord" + +// DescribeRecordRequest generates a "aws/request.Request" representing the +// client's request for the DescribeRecord operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeRecord method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeRecordRequest method. +// req, resp := client.DescribeRecordRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ServiceCatalog) DescribeRecordRequest(input *DescribeRecordInput) (req *request.Request, output *DescribeRecordOutput) { + op := &request.Operation{ + Name: opDescribeRecord, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeRecordInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeRecordOutput{} + req.Data = output + return +} + +// Retrieves a paginated list of the full details of a specific request. Use +// this operation after calling a request operation (ProvisionProduct, TerminateProvisionedProduct, +// or UpdateProvisionedProduct). +func (c *ServiceCatalog) DescribeRecord(input *DescribeRecordInput) (*DescribeRecordOutput, error) { + req, out := c.DescribeRecordRequest(input) + err := req.Send() + return out, err +} + +const opListLaunchPaths = "ListLaunchPaths" + +// ListLaunchPathsRequest generates a "aws/request.Request" representing the +// client's request for the ListLaunchPaths operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListLaunchPaths method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListLaunchPathsRequest method. +// req, resp := client.ListLaunchPathsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ServiceCatalog) ListLaunchPathsRequest(input *ListLaunchPathsInput) (req *request.Request, output *ListLaunchPathsOutput) { + op := &request.Operation{ + Name: opListLaunchPaths, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListLaunchPathsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListLaunchPathsOutput{} + req.Data = output + return +} + +// Returns a paginated list of all paths to a specified product. A path is how +// the user has access to a specified product, and is necessary when provisioning +// a product. A path also determines the constraints put on the product. +func (c *ServiceCatalog) ListLaunchPaths(input *ListLaunchPathsInput) (*ListLaunchPathsOutput, error) { + req, out := c.ListLaunchPathsRequest(input) + err := req.Send() + return out, err +} + +const opListRecordHistory = "ListRecordHistory" + +// ListRecordHistoryRequest generates a "aws/request.Request" representing the +// client's request for the ListRecordHistory operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListRecordHistory method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListRecordHistoryRequest method. +// req, resp := client.ListRecordHistoryRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ServiceCatalog) ListRecordHistoryRequest(input *ListRecordHistoryInput) (req *request.Request, output *ListRecordHistoryOutput) { + op := &request.Operation{ + Name: opListRecordHistory, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListRecordHistoryInput{} + } + + req = c.newRequest(op, input, output) + output = &ListRecordHistoryOutput{} + req.Data = output + return +} + +// Returns a paginated list of all performed requests, in the form of RecordDetails +// objects that are filtered as specified. +func (c *ServiceCatalog) ListRecordHistory(input *ListRecordHistoryInput) (*ListRecordHistoryOutput, error) { + req, out := c.ListRecordHistoryRequest(input) + err := req.Send() + return out, err +} + +const opProvisionProduct = "ProvisionProduct" + +// ProvisionProductRequest generates a "aws/request.Request" representing the +// client's request for the ProvisionProduct operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ProvisionProduct method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ProvisionProductRequest method. +// req, resp := client.ProvisionProductRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ServiceCatalog) ProvisionProductRequest(input *ProvisionProductInput) (req *request.Request, output *ProvisionProductOutput) { + op := &request.Operation{ + Name: opProvisionProduct, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ProvisionProductInput{} + } + + req = c.newRequest(op, input, output) + output = &ProvisionProductOutput{} + req.Data = output + return +} + +// Requests a Provision of a specified product. A ProvisionedProduct is a resourced +// instance for a product. For example, provisioning a CloudFormation-template-backed +// product results in launching a CloudFormation stack and all the underlying +// resources that come with it. +// +// You can check the status of this request using the DescribeRecord operation. +func (c *ServiceCatalog) ProvisionProduct(input *ProvisionProductInput) (*ProvisionProductOutput, error) { + req, out := c.ProvisionProductRequest(input) + err := req.Send() + return out, err +} + +const opScanProvisionedProducts = "ScanProvisionedProducts" + +// ScanProvisionedProductsRequest generates a "aws/request.Request" representing the +// client's request for the ScanProvisionedProducts operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ScanProvisionedProducts method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ScanProvisionedProductsRequest method. +// req, resp := client.ScanProvisionedProductsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ServiceCatalog) ScanProvisionedProductsRequest(input *ScanProvisionedProductsInput) (req *request.Request, output *ScanProvisionedProductsOutput) { + op := &request.Operation{ + Name: opScanProvisionedProducts, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ScanProvisionedProductsInput{} + } + + req = c.newRequest(op, input, output) + output = &ScanProvisionedProductsOutput{} + req.Data = output + return +} + +// Returns a paginated list of all the ProvisionedProduct objects that are currently +// available (not terminated). +func (c *ServiceCatalog) ScanProvisionedProducts(input *ScanProvisionedProductsInput) (*ScanProvisionedProductsOutput, error) { + req, out := c.ScanProvisionedProductsRequest(input) + err := req.Send() + return out, err +} + +const opSearchProducts = "SearchProducts" + +// SearchProductsRequest generates a "aws/request.Request" representing the +// client's request for the SearchProducts operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the SearchProducts method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the SearchProductsRequest method. +// req, resp := client.SearchProductsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ServiceCatalog) SearchProductsRequest(input *SearchProductsInput) (req *request.Request, output *SearchProductsOutput) { + op := &request.Operation{ + Name: opSearchProducts, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SearchProductsInput{} + } + + req = c.newRequest(op, input, output) + output = &SearchProductsOutput{} + req.Data = output + return +} + +// Returns a paginated list all of the Products objects to which the caller +// has access. +// +// The output of this operation can be used as input for other operations, +// such as DescribeProductView. +func (c *ServiceCatalog) SearchProducts(input *SearchProductsInput) (*SearchProductsOutput, error) { + req, out := c.SearchProductsRequest(input) + err := req.Send() + return out, err +} + +const opTerminateProvisionedProduct = "TerminateProvisionedProduct" + +// TerminateProvisionedProductRequest generates a "aws/request.Request" representing the +// client's request for the TerminateProvisionedProduct operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the TerminateProvisionedProduct method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the TerminateProvisionedProductRequest method. +// req, resp := client.TerminateProvisionedProductRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ServiceCatalog) TerminateProvisionedProductRequest(input *TerminateProvisionedProductInput) (req *request.Request, output *TerminateProvisionedProductOutput) { + op := &request.Operation{ + Name: opTerminateProvisionedProduct, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &TerminateProvisionedProductInput{} + } + + req = c.newRequest(op, input, output) + output = &TerminateProvisionedProductOutput{} + req.Data = output + return +} + +// Requests termination of an existing ProvisionedProduct object. If there are +// Tags associated with the object, they are terminated when the ProvisionedProduct +// object is terminated. +// +// This operation does not delete any records associated with the ProvisionedProduct +// object. +// +// You can check the status of this request using the DescribeRecord operation. +func (c *ServiceCatalog) TerminateProvisionedProduct(input *TerminateProvisionedProductInput) (*TerminateProvisionedProductOutput, error) { + req, out := c.TerminateProvisionedProductRequest(input) + err := req.Send() + return out, err +} + +const opUpdateProvisionedProduct = "UpdateProvisionedProduct" + +// UpdateProvisionedProductRequest generates a "aws/request.Request" representing the +// client's request for the UpdateProvisionedProduct operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateProvisionedProduct method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateProvisionedProductRequest method. +// req, resp := client.UpdateProvisionedProductRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *ServiceCatalog) UpdateProvisionedProductRequest(input *UpdateProvisionedProductInput) (req *request.Request, output *UpdateProvisionedProductOutput) { + op := &request.Operation{ + Name: opUpdateProvisionedProduct, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateProvisionedProductInput{} + } + + req = c.newRequest(op, input, output) + output = &UpdateProvisionedProductOutput{} + req.Data = output + return +} + +// Requests updates to the configuration of an existing ProvisionedProduct object. +// If there are tags associated with the object, they cannot be updated or added +// with this operation. Depending on the specific updates requested, this operation +// may update with no interruption, with some interruption, or replace the ProvisionedProduct +// object entirely. +// +// You can check the status of this request using the DescribeRecord operation. +func (c *ServiceCatalog) UpdateProvisionedProduct(input *UpdateProvisionedProductInput) (*UpdateProvisionedProductOutput, error) { + req, out := c.UpdateProvisionedProductRequest(input) + err := req.Send() + return out, err +} + +// An administrator-specified constraint to apply when provisioning a product. +type ConstraintSummary struct { + _ struct{} `type:"structure"` + + // The text description of the constraint. + Description *string `type:"string"` + + // The type of the constraint. + Type *string `type:"string"` +} + +// String returns the string representation +func (s ConstraintSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ConstraintSummary) GoString() string { + return s.String() +} + +type DescribeProductInput struct { + _ struct{} `type:"structure"` + + // Optional language code. Supported language codes are as follows: + // + // "en" (English) + // + // "jp" (Japanese) + // + // "zh" (Chinese) + // + // If no code is specified, "en" is used as the default. + AcceptLanguage *string `type:"string"` + + // The ProductId of the product to describe. + Id *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeProductInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeProductInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeProductInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeProductInput"} + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DescribeProductOutput struct { + _ struct{} `type:"structure"` + + // The summary metadata about the specified product. + ProductViewSummary *ProductViewSummary `type:"structure"` + + // A list of provisioning artifact objects for the specified product. The ProvisioningArtifacts + // parameter represent the ways the specified product can be provisioned. + ProvisioningArtifacts []*ProvisioningArtifact `type:"list"` +} + +// String returns the string representation +func (s DescribeProductOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeProductOutput) GoString() string { + return s.String() +} + +type DescribeProductViewInput struct { + _ struct{} `type:"structure"` + + // Optional language code. Supported language codes are as follows: + // + // "en" (English) + // + // "jp" (Japanese) + // + // "zh" (Chinese) + // + // If no code is specified, "en" is used as the default. + AcceptLanguage *string `type:"string"` + + // The ProductViewId of the product to describe. + Id *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeProductViewInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeProductViewInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeProductViewInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeProductViewInput"} + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DescribeProductViewOutput struct { + _ struct{} `type:"structure"` + + // The summary metadata about the specified product. + ProductViewSummary *ProductViewSummary `type:"structure"` + + // A list of provisioning artifact objects for the specified product. The ProvisioningArtifacts + // represent the ways in which the specified product can be provisioned. + ProvisioningArtifacts []*ProvisioningArtifact `type:"list"` +} + +// String returns the string representation +func (s DescribeProductViewOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeProductViewOutput) GoString() string { + return s.String() +} + +type DescribeProvisioningParametersInput struct { + _ struct{} `type:"structure"` + + // Optional language code. Supported language codes are as follows: + // + // "en" (English) + // + // "jp" (Japanese) + // + // "zh" (Chinese) + // + // If no code is specified, "en" is used as the default. + AcceptLanguage *string `type:"string"` + + // The identifier of the path for this product's provisioning. This value is + // optional if the product has a default path, and is required if there is more + // than one path for the specified product. + PathId *string `type:"string"` + + // The identifier of the product. + ProductId *string `type:"string" required:"true"` + + // The provisioning artifact identifier for this product. + ProvisioningArtifactId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeProvisioningParametersInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeProvisioningParametersInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeProvisioningParametersInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeProvisioningParametersInput"} + if s.ProductId == nil { + invalidParams.Add(request.NewErrParamRequired("ProductId")) + } + if s.ProvisioningArtifactId == nil { + invalidParams.Add(request.NewErrParamRequired("ProvisioningArtifactId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DescribeProvisioningParametersOutput struct { + _ struct{} `type:"structure"` + + // The list of constraint summaries that apply to provisioning this product. + ConstraintSummaries []*ConstraintSummary `type:"list"` + + // The list of parameters used to successfully provision the product. Each parameter + // includes a list of allowable values and additional metadata about each parameter. + ProvisioningArtifactParameters []*ProvisioningArtifactParameter `type:"list"` + + // Any additional metadata specifically related to the provisioning of the product. + // For example, see the Version field of the CloudFormation template. + UsageInstructions []*UsageInstruction `type:"list"` +} + +// String returns the string representation +func (s DescribeProvisioningParametersOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeProvisioningParametersOutput) GoString() string { + return s.String() +} + +type DescribeRecordInput struct { + _ struct{} `type:"structure"` + + // Optional language code. Supported language codes are as follows: + // + // "en" (English) + // + // "jp" (Japanese) + // + // "zh" (Chinese) + // + // If no code is specified, "en" is used as the default. + AcceptLanguage *string `type:"string"` + + // The record identifier of the ProvisionedProduct object for which to retrieve + // output information. This is the RecordDetail.RecordId obtained from the request + // operation's response. + Id *string `type:"string" required:"true"` + + // The maximum number of items to return in the results. If more results exist + // than fit in the specified PageSize, the value of NextPageToken in the response + // is non-null. + PageSize *int64 `type:"integer"` + + // The page token of the first page retrieve. If null, this retrieves the first + // page of size PageSize. + PageToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeRecordInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeRecordInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeRecordInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeRecordInput"} + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DescribeRecordOutput struct { + _ struct{} `type:"structure"` + + // The page token to use to retrieve the next page of results for this operation. + // If there are no more pages, this value is null. + NextPageToken *string `type:"string"` + + // Detailed record information for the specified product. + RecordDetail *RecordDetail `type:"structure"` + + // A list of outputs for the specified Product object created as the result + // of a request. For example, a CloudFormation-backed product that creates an + // S3 bucket would have an output for the S3 bucket URL. + RecordOutputs []*RecordOutput `type:"list"` +} + +// String returns the string representation +func (s DescribeRecordOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeRecordOutput) GoString() string { + return s.String() +} + +// Summary information about a path for a user to have access to a specified +// product. +type LaunchPathSummary struct { + _ struct{} `type:"structure"` + + // List of constraints on the portfolio-product relationship. + ConstraintSummaries []*ConstraintSummary `type:"list"` + + // The unique identifier of the product path. + Id *string `type:"string"` + + // Corresponds to the name of the portfolio to which the user was assigned. + Name *string `type:"string"` + + // List of tags used by this launch path. + Tags []*Tag `type:"list"` +} + +// String returns the string representation +func (s LaunchPathSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LaunchPathSummary) GoString() string { + return s.String() +} + +type ListLaunchPathsInput struct { + _ struct{} `type:"structure"` + + // Optional language code. Supported language codes are as follows: + // + // "en" (English) + // + // "jp" (Japanese) + // + // "zh" (Chinese) + // + // If no code is specified, "en" is used as the default. + AcceptLanguage *string `type:"string"` + + // The maximum number of items to return in the results. If more results exist + // than fit in the specified PageSize, the value of NextPageToken in the response + // is non-null. + PageSize *int64 `type:"integer"` + + // The page token of the first page retrieve. If null, this retrieves the first + // page of size PageSize. + PageToken *string `type:"string"` + + // Identifies the product for which to retrieve LaunchPathSummaries information. + ProductId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ListLaunchPathsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListLaunchPathsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListLaunchPathsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListLaunchPathsInput"} + if s.ProductId == nil { + invalidParams.Add(request.NewErrParamRequired("ProductId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type ListLaunchPathsOutput struct { + _ struct{} `type:"structure"` + + // List of launch path information summaries for the specified PageToken. + LaunchPathSummaries []*LaunchPathSummary `type:"list"` + + // The page token to use to retrieve the next page of results for this operation. + // If there are no more pages, this value is null. + NextPageToken *string `type:"string"` +} + +// String returns the string representation +func (s ListLaunchPathsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListLaunchPathsOutput) GoString() string { + return s.String() +} + +type ListRecordHistoryInput struct { + _ struct{} `type:"structure"` + + // Optional language code. Supported language codes are as follows: + // + // "en" (English) + // + // "jp" (Japanese) + // + // "zh" (Chinese) + // + // If no code is specified, "en" is used as the default. + AcceptLanguage *string `type:"string"` + + // The maximum number of items to return in the results. If more results exist + // than fit in the specified PageSize, the value of NextPageToken in the response + // is non-null. + PageSize *int64 `type:"integer"` + + // The page token of the first page retrieve. If null, this retrieves the first + // page of size PageSize. + PageToken *string `type:"string"` + + // (Optional) The filter to limit search results. + SearchFilter *ListRecordHistorySearchFilter `type:"structure"` +} + +// String returns the string representation +func (s ListRecordHistoryInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListRecordHistoryInput) GoString() string { + return s.String() +} + +type ListRecordHistoryOutput struct { + _ struct{} `type:"structure"` + + // The page token to use to retrieve the next page of results for this operation. + // If there are no more pages, this value is null. + NextPageToken *string `type:"string"` + + // A list of record detail objects, listed in reverse chronological order. + RecordDetails []*RecordDetail `type:"list"` +} + +// String returns the string representation +func (s ListRecordHistoryOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListRecordHistoryOutput) GoString() string { + return s.String() +} + +// The search filter to limit results when listing request history records. +type ListRecordHistorySearchFilter struct { + _ struct{} `type:"structure"` + + // The filter key. + Key *string `type:"string"` + + // The filter value for Key. + Value *string `type:"string"` +} + +// String returns the string representation +func (s ListRecordHistorySearchFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListRecordHistorySearchFilter) GoString() string { + return s.String() +} + +// The constraints that the administrator has put on the parameter. +type ParameterConstraints struct { + _ struct{} `type:"structure"` + + // The values that the administrator has allowed for the parameter. + AllowedValues []*string `type:"list"` +} + +// String returns the string representation +func (s ParameterConstraints) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ParameterConstraints) GoString() string { + return s.String() +} + +// A single product view aggregation value/count pair, containing metadata about +// each product to which the calling user has access. +type ProductViewAggregationValue struct { + _ struct{} `type:"structure"` + + // An approximate count of the products that match the value. + ApproximateCount *int64 `type:"integer"` + + // The value of the product view aggregation. + Value *string `type:"string"` +} + +// String returns the string representation +func (s ProductViewAggregationValue) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ProductViewAggregationValue) GoString() string { + return s.String() +} + +// The summary metadata about the specified product. +type ProductViewSummary struct { + _ struct{} `type:"structure"` + + // The distributor of the product. Contact the product administrator for the + // significance of this value. + Distributor *string `type:"string"` + + // A value of false indicates that the product does not have a default path, + // while a value of true indicates that it does. If it's false, call ListLaunchPaths + // to disambiguate between paths. If true, ListLaunchPaths is not required, + // and the output of the ProductViewSummary operation can be used directly with + // DescribeProvisioningParameters. + HasDefaultPath *bool `type:"boolean"` + + // The product view identifier. + Id *string `type:"string"` + + // The name of the product. + Name *string `type:"string"` + + // The owner of the product. Contact the product administrator for the significance + // of this value. + Owner *string `type:"string"` + + // The product identifier. + ProductId *string `type:"string"` + + // Short description of the product. + ShortDescription *string `type:"string"` + + // The description of the support for this Product. + SupportDescription *string `type:"string"` + + // The email contact information to obtain support for this Product. + SupportEmail *string `type:"string"` + + // The URL information to obtain support for this Product. + SupportUrl *string `type:"string"` + + // The product type. Contact the product administrator for the significance + // of this value. + Type *string `type:"string"` +} + +// String returns the string representation +func (s ProductViewSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ProductViewSummary) GoString() string { + return s.String() +} + +type ProvisionProductInput struct { + _ struct{} `type:"structure"` + + // Optional language code. Supported language codes are as follows: + // + // "en" (English) + // + // "jp" (Japanese) + // + // "zh" (Chinese) + // + // If no code is specified, "en" is used as the default. + AcceptLanguage *string `type:"string"` + + // Passed to CloudFormation. The SNS topic ARNs to which to publish stack-related + // events. + NotificationArns []*string `type:"list"` + + // The identifier of the path for this product's provisioning. This value is + // optional if the product has a default path, and is required if there is more + // than one path for the specified product. + PathId *string `type:"string"` + + // The identifier of the product. + ProductId *string `type:"string" required:"true"` + + // An idempotency token that uniquely identifies the provisioning request. + ProvisionToken *string `min:"1" type:"string" required:"true" idempotencyToken:"true"` + + // A user-friendly name to identify the ProvisionedProduct object. This value + // must be unique for the AWS account and cannot be updated after the product + // is provisioned. + ProvisionedProductName *string `type:"string" required:"true"` + + // The provisioning artifact identifier for this product. + ProvisioningArtifactId *string `type:"string" required:"true"` + + // Parameters specified by the administrator that are required for provisioning + // the product. + ProvisioningParameters []*ProvisioningParameter `type:"list"` + + // (Optional) A list of tags to use as provisioning options. + Tags []*Tag `type:"list"` +} + +// String returns the string representation +func (s ProvisionProductInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ProvisionProductInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ProvisionProductInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ProvisionProductInput"} + if s.ProductId == nil { + invalidParams.Add(request.NewErrParamRequired("ProductId")) + } + if s.ProvisionToken == nil { + invalidParams.Add(request.NewErrParamRequired("ProvisionToken")) + } + if s.ProvisionToken != nil && len(*s.ProvisionToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ProvisionToken", 1)) + } + if s.ProvisionedProductName == nil { + invalidParams.Add(request.NewErrParamRequired("ProvisionedProductName")) + } + if s.ProvisioningArtifactId == nil { + invalidParams.Add(request.NewErrParamRequired("ProvisioningArtifactId")) + } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type ProvisionProductOutput struct { + _ struct{} `type:"structure"` + + // The detailed result of the ProvisionProduct request, containing the inputs + // made to that request, the current state of the request, a pointer to the + // ProvisionedProduct object of the request, and a list of any errors that the + // request encountered. + RecordDetail *RecordDetail `type:"structure"` +} + +// String returns the string representation +func (s ProvisionProductOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ProvisionProductOutput) GoString() string { + return s.String() +} + +// Detailed information about a ProvisionedProduct object. +type ProvisionedProductDetail struct { + _ struct{} `type:"structure"` + + // The ARN associated with the ProvisionedProduct object. + Arn *string `min:"1" type:"string"` + + // The time the ProvisionedProduct was created. + CreatedTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The identifier of the ProvisionedProduct object. + Id *string `type:"string"` + + // An idempotency token that uniquely identifies this ProvisionedProduct. + IdempotencyToken *string `min:"1" type:"string"` + + // The record identifier of the last request performed on this ProvisionedProduct + // object. + LastRecordId *string `type:"string"` + + // The user-friendly name of the ProvisionedProduct object. + Name *string `min:"1" type:"string"` + + // The current status of the ProvisionedProduct. + Status *string `type:"string" enum:"RecordStatus"` + + // The current status message of the ProvisionedProduct. + StatusMessage *string `type:"string"` + + // The type of the ProvisionedProduct object. + Type *string `type:"string"` +} + +// String returns the string representation +func (s ProvisionedProductDetail) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ProvisionedProductDetail) GoString() string { + return s.String() +} + +// Contains information indicating the ways in which a product can be provisioned. +type ProvisioningArtifact struct { + _ struct{} `type:"structure"` + + // The time that the artifact was created by the Administrator. + CreatedTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The text description of the artifact. + Description *string `type:"string"` + + // The identifier for the artifact. + Id *string `type:"string"` + + // The name of the artifact. + Name *string `type:"string"` +} + +// String returns the string representation +func (s ProvisioningArtifact) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ProvisioningArtifact) GoString() string { + return s.String() +} + +// A parameter used to successfully provision the product. This value includes +// a list of allowable values and additional metadata. +type ProvisioningArtifactParameter struct { + _ struct{} `type:"structure"` + + // The default value for this parameter. + DefaultValue *string `type:"string"` + + // The text description of the parameter. + Description *string `type:"string"` + + // If this value is true, the value for this parameter is obfuscated from view + // when the parameter is retrieved. This parameter is used to hide sensitive + // information. + IsNoEcho *bool `type:"boolean"` + + // The list of constraints that the administrator has put on the parameter. + ParameterConstraints *ParameterConstraints `type:"structure"` + + // The parameter key. + ParameterKey *string `type:"string"` + + // The parameter type. + ParameterType *string `type:"string"` +} + +// String returns the string representation +func (s ProvisioningArtifactParameter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ProvisioningArtifactParameter) GoString() string { + return s.String() +} + +// The arameter key/value pairs used to provision a product. +type ProvisioningParameter struct { + _ struct{} `type:"structure"` + + // The ProvisioningArtifactParameter.ParameterKey parameter from DescribeProvisioningParameters. + Key *string `type:"string"` + + // The value to use for provisioning. Any constraints on this value can be found + // in ProvisioningArtifactParameter for Key. + Value *string `type:"string"` +} + +// String returns the string representation +func (s ProvisioningParameter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ProvisioningParameter) GoString() string { + return s.String() +} + +// The full details of a specific ProvisionedProduct object. +type RecordDetail struct { + _ struct{} `type:"structure"` + + // The time when the record for the ProvisionedProduct object was created. + CreatedTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The identifier of the path for this product's provisioning. + PathId *string `type:"string"` + + // The identifier of the product. + ProductId *string `type:"string"` + + // The identifier of the ProvisionedProduct object. + ProvisionedProductId *string `type:"string"` + + // The user-friendly name of the ProvisionedProduct object. + ProvisionedProductName *string `type:"string"` + + // The type of the ProvisionedProduct object. + ProvisionedProductType *string `type:"string"` + + // The provisioning artifact identifier for this product. + ProvisioningArtifactId *string `type:"string"` + + // A list of errors that occurred while processing the request. + RecordErrors []*RecordError `type:"list"` + + // The identifier of the ProvisionedProduct object record. + RecordId *string `type:"string"` + + // List of tags associated with this record. + RecordTags []*RecordTag `type:"list"` + + // The record type for this record. + RecordType *string `type:"string"` + + // The status of the ProvisionedProduct object. + Status *string `type:"string" enum:"RecordStatus"` + + // The time when the record for the ProvisionedProduct object was last updated. + UpdatedTime *time.Time `type:"timestamp" timestampFormat:"unix"` +} + +// String returns the string representation +func (s RecordDetail) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RecordDetail) GoString() string { + return s.String() +} + +// The error code and description resulting from an operation. +type RecordError struct { + _ struct{} `type:"structure"` + + // The numeric value of the error. + Code *string `type:"string"` + + // The text description of the error. + Description *string `type:"string"` +} + +// String returns the string representation +func (s RecordError) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RecordError) GoString() string { + return s.String() +} + +// An output for the specified Product object created as the result of a request. +// For example, a CloudFormation-backed product that creates an S3 bucket would +// have an output for the S3 bucket URL. +type RecordOutput struct { + _ struct{} `type:"structure"` + + // The text description of the output. + Description *string `type:"string"` + + // The output key. + OutputKey *string `type:"string"` + + // The output value. + OutputValue *string `type:"string"` +} + +// String returns the string representation +func (s RecordOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RecordOutput) GoString() string { + return s.String() +} + +// A tag associated with the record, stored as a key-value pair. +type RecordTag struct { + _ struct{} `type:"structure"` + + // The key for this tag. + Key *string `min:"1" type:"string"` + + // The value for this tag. + Value *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s RecordTag) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RecordTag) GoString() string { + return s.String() +} + +type ScanProvisionedProductsInput struct { + _ struct{} `type:"structure"` + + // Optional language code. Supported language codes are as follows: + // + // "en" (English) + // + // "jp" (Japanese) + // + // "zh" (Chinese) + // + // If no code is specified, "en" is used as the default. + AcceptLanguage *string `type:"string"` + + // The maximum number of items to return in the results. If more results exist + // than fit in the specified PageSize, the value of NextPageToken in the response + // is non-null. + PageSize *int64 `type:"integer"` + + // The page token of the first page retrieve. If null, this retrieves the first + // page of size PageSize. + PageToken *string `type:"string"` +} + +// String returns the string representation +func (s ScanProvisionedProductsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ScanProvisionedProductsInput) GoString() string { + return s.String() +} + +type ScanProvisionedProductsOutput struct { + _ struct{} `type:"structure"` + + // The page token to use to retrieve the next page of results for this operation. + // If there are no more pages, this value is null. + NextPageToken *string `type:"string"` + + // A list of ProvisionedProduct detail objects. + ProvisionedProducts []*ProvisionedProductDetail `type:"list"` +} + +// String returns the string representation +func (s ScanProvisionedProductsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ScanProvisionedProductsOutput) GoString() string { + return s.String() +} + +type SearchProductsInput struct { + _ struct{} `type:"structure"` + + // Optional language code. Supported language codes are as follows: + // + // "en" (English) + // + // "jp" (Japanese) + // + // "zh" (Chinese) + // + // If no code is specified, "en" is used as the default. + AcceptLanguage *string `type:"string"` + + // (Optional) The list of filters with which to limit search results. If no + // search filters are specified, the output is all the products to which the + // calling user has access. + Filters map[string][]*string `type:"map"` + + // The maximum number of items to return in the results. If more results exist + // than fit in the specified PageSize, the value of NextPageToken in the response + // is non-null. + PageSize *int64 `type:"integer"` + + // The page token of the first page retrieve. If null, this retrieves the first + // page of size PageSize. + PageToken *string `type:"string"` + + // (Optional) The sort field specifier. If no value is specified, results are + // not sorted. + SortBy *string `type:"string" enum:"ProductViewSortBy"` + + // (Optional) The sort order specifier. If no value is specified, results are + // not sorted. + SortOrder *string `type:"string" enum:"SortOrder"` +} + +// String returns the string representation +func (s SearchProductsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SearchProductsInput) GoString() string { + return s.String() +} + +type SearchProductsOutput struct { + _ struct{} `type:"structure"` + + // The page token to use to retrieve the next page of results for this operation. + // If there are no more pages, this value is null. + NextPageToken *string `type:"string"` + + // A list of the product view aggregation value objects. + ProductViewAggregations map[string][]*ProductViewAggregationValue `type:"map"` + + // A list of the product view summary objects. + ProductViewSummaries []*ProductViewSummary `type:"list"` +} + +// String returns the string representation +func (s SearchProductsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SearchProductsOutput) GoString() string { + return s.String() +} + +// Optional key/value pairs to associate with this provisioning. These tags +// are propagated to the resources created in the provisioning. +type Tag struct { + _ struct{} `type:"structure"` + + // The ProvisioningArtifactParameter.TagKey parameter from DescribeProvisioningParameters. + Key *string `min:"1" type:"string"` + + // The esired value for this key. + Value *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s Tag) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Tag) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Tag) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Tag"} + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.Value != nil && len(*s.Value) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Value", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type TerminateProvisionedProductInput struct { + _ struct{} `type:"structure"` + + // Optional language code. Supported language codes are as follows: + // + // "en" (English) + // + // "jp" (Japanese) + // + // "zh" (Chinese) + // + // If no code is specified, "en" is used as the default. + AcceptLanguage *string `type:"string"` + + // Optional Boolean parameter. If set to true, AWS Service Catalog stops managing + // the specified ProvisionedProduct object even if it cannot delete the underlying + // resources. + IgnoreErrors *bool `type:"boolean"` + + // The identifier of the ProvisionedProduct object to terminate. You must specify + // either ProvisionedProductName or ProvisionedProductId, but not both. + ProvisionedProductId *string `type:"string"` + + // The name of the ProvisionedProduct object to terminate. You must specify + // either ProvisionedProductName or ProvisionedProductId, but not both. + ProvisionedProductName *string `min:"1" type:"string"` + + // An idempotency token that uniquely identifies the termination request. This + // token is only valid during the termination process. After the ProvisionedProduct + // object is terminated, further requests to terminate the same ProvisionedProduct + // object always return ResourceNotFound regardless of the value of TerminateToken. + TerminateToken *string `min:"1" type:"string" required:"true" idempotencyToken:"true"` +} + +// String returns the string representation +func (s TerminateProvisionedProductInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TerminateProvisionedProductInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TerminateProvisionedProductInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TerminateProvisionedProductInput"} + if s.ProvisionedProductName != nil && len(*s.ProvisionedProductName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ProvisionedProductName", 1)) + } + if s.TerminateToken == nil { + invalidParams.Add(request.NewErrParamRequired("TerminateToken")) + } + if s.TerminateToken != nil && len(*s.TerminateToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TerminateToken", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type TerminateProvisionedProductOutput struct { + _ struct{} `type:"structure"` + + // The detailed result of the TerminateProvisionedProduct request, containing + // the inputs made to that request, the current state of the request, a pointer + // to the ProvisionedProduct object that the request is modifying, and a list + // of any errors that the request encountered. + RecordDetail *RecordDetail `type:"structure"` +} + +// String returns the string representation +func (s TerminateProvisionedProductOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TerminateProvisionedProductOutput) GoString() string { + return s.String() +} + +type UpdateProvisionedProductInput struct { + _ struct{} `type:"structure"` + + // Optional language code. Supported language codes are as follows: + // + // "en" (English) + // + // "jp" (Japanese) + // + // "zh" (Chinese) + // + // If no code is specified, "en" is used as the default. + AcceptLanguage *string `type:"string"` + + // The identifier of the path to use in the updated ProvisionedProduct object. + // This value is optional if the product has a default path, and is required + // if there is more than one path for the specified product. + PathId *string `type:"string"` + + // The identifier of the ProvisionedProduct object. + ProductId *string `type:"string"` + + // The identifier of the ProvisionedProduct object to update. You must specify + // either ProvisionedProductName or ProvisionedProductId, but not both. + ProvisionedProductId *string `type:"string"` + + // The updated name of the ProvisionedProduct object . You must specify either + // ProvisionedProductName or ProvisionedProductId, but not both. + ProvisionedProductName *string `min:"1" type:"string"` + + // The provisioning artifact identifier for this product. + ProvisioningArtifactId *string `type:"string"` + + // A list of ProvisioningParameter objects used to update the ProvisionedProduct + // object. + ProvisioningParameters []*UpdateProvisioningParameter `type:"list"` + + // The idempotency token that uniquely identifies the provisioning update request. + UpdateToken *string `min:"1" type:"string" required:"true" idempotencyToken:"true"` +} + +// String returns the string representation +func (s UpdateProvisionedProductInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateProvisionedProductInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateProvisionedProductInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateProvisionedProductInput"} + if s.ProvisionedProductName != nil && len(*s.ProvisionedProductName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ProvisionedProductName", 1)) + } + if s.UpdateToken == nil { + invalidParams.Add(request.NewErrParamRequired("UpdateToken")) + } + if s.UpdateToken != nil && len(*s.UpdateToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("UpdateToken", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type UpdateProvisionedProductOutput struct { + _ struct{} `type:"structure"` + + // The detailed result of the UpdateProvisionedProduct request, containing the + // inputs made to that request, the current state of the request, a pointer + // to the ProvisionedProduct object that the request is modifying, and a list + // of any errors that the request encountered. + RecordDetail *RecordDetail `type:"structure"` +} + +// String returns the string representation +func (s UpdateProvisionedProductOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateProvisionedProductOutput) GoString() string { + return s.String() +} + +// The parameter key/value pair used to update a ProvisionedProduct object. +// If UsePreviousValue is set to true, Value is ignored and the value for Key +// is kept as previously set (current value). +type UpdateProvisioningParameter struct { + _ struct{} `type:"structure"` + + // The ProvisioningArtifactParameter.ParameterKey parameter from DescribeProvisioningParameters. + Key *string `type:"string"` + + // If true, uses the currently set value for Key, ignoring UpdateProvisioningParameter.Value. + UsePreviousValue *bool `type:"boolean"` + + // The value to use for updating the product provisioning. Any constraints on + // this value can be found in the ProvisioningArtifactParameter parameter for + // Key. + Value *string `type:"string"` +} + +// String returns the string representation +func (s UpdateProvisioningParameter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateProvisioningParameter) GoString() string { + return s.String() +} + +// Additional information provided by the administrator. +type UsageInstruction struct { + _ struct{} `type:"structure"` + + // The usage instruction type for the value. + Type *string `type:"string"` + + // The usage instruction value for this type. + Value *string `type:"string"` +} + +// String returns the string representation +func (s UsageInstruction) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UsageInstruction) GoString() string { + return s.String() +} + +const ( + // @enum ProductViewFilterBy + ProductViewFilterByFullTextSearch = "FullTextSearch" + // @enum ProductViewFilterBy + ProductViewFilterByOwner = "Owner" + // @enum ProductViewFilterBy + ProductViewFilterByProductType = "ProductType" +) + +const ( + // @enum ProductViewSortBy + ProductViewSortByTitle = "Title" + // @enum ProductViewSortBy + ProductViewSortByVersionCount = "VersionCount" + // @enum ProductViewSortBy + ProductViewSortByCreationDate = "CreationDate" +) + +const ( + // @enum RecordStatus + RecordStatusInProgress = "IN_PROGRESS" + // @enum RecordStatus + RecordStatusSucceeded = "SUCCEEDED" + // @enum RecordStatus + RecordStatusError = "ERROR" +) + +const ( + // @enum SortOrder + SortOrderAscending = "ASCENDING" + // @enum SortOrder + SortOrderDescending = "DESCENDING" +) diff --git a/vendor/github.com/aws/aws-sdk-go/service/servicecatalog/examples_test.go b/vendor/github.com/aws/aws-sdk-go/service/servicecatalog/examples_test.go new file mode 100644 index 000000000..dbd99135d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/servicecatalog/examples_test.go @@ -0,0 +1,296 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package servicecatalog_test + +import ( + "bytes" + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/servicecatalog" +) + +var _ time.Duration +var _ bytes.Buffer + +func ExampleServiceCatalog_DescribeProduct() { + svc := servicecatalog.New(session.New()) + + params := &servicecatalog.DescribeProductInput{ + Id: aws.String("Id"), // Required + AcceptLanguage: aws.String("AcceptLanguage"), + } + resp, err := svc.DescribeProduct(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleServiceCatalog_DescribeProductView() { + svc := servicecatalog.New(session.New()) + + params := &servicecatalog.DescribeProductViewInput{ + Id: aws.String("Id"), // Required + AcceptLanguage: aws.String("AcceptLanguage"), + } + resp, err := svc.DescribeProductView(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleServiceCatalog_DescribeProvisioningParameters() { + svc := servicecatalog.New(session.New()) + + params := &servicecatalog.DescribeProvisioningParametersInput{ + ProductId: aws.String("Id"), // Required + ProvisioningArtifactId: aws.String("Id"), // Required + AcceptLanguage: aws.String("AcceptLanguage"), + PathId: aws.String("Id"), + } + resp, err := svc.DescribeProvisioningParameters(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleServiceCatalog_DescribeRecord() { + svc := servicecatalog.New(session.New()) + + params := &servicecatalog.DescribeRecordInput{ + Id: aws.String("Id"), // Required + AcceptLanguage: aws.String("AcceptLanguage"), + PageSize: aws.Int64(1), + PageToken: aws.String("PageToken"), + } + resp, err := svc.DescribeRecord(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleServiceCatalog_ListLaunchPaths() { + svc := servicecatalog.New(session.New()) + + params := &servicecatalog.ListLaunchPathsInput{ + ProductId: aws.String("Id"), // Required + AcceptLanguage: aws.String("AcceptLanguage"), + PageSize: aws.Int64(1), + PageToken: aws.String("PageToken"), + } + resp, err := svc.ListLaunchPaths(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleServiceCatalog_ListRecordHistory() { + svc := servicecatalog.New(session.New()) + + params := &servicecatalog.ListRecordHistoryInput{ + AcceptLanguage: aws.String("AcceptLanguage"), + PageSize: aws.Int64(1), + PageToken: aws.String("PageToken"), + SearchFilter: &servicecatalog.ListRecordHistorySearchFilter{ + Key: aws.String("SearchFilterKey"), + Value: aws.String("SearchFilterValue"), + }, + } + resp, err := svc.ListRecordHistory(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleServiceCatalog_ProvisionProduct() { + svc := servicecatalog.New(session.New()) + + params := &servicecatalog.ProvisionProductInput{ + ProductId: aws.String("Id"), // Required + ProvisionToken: aws.String("IdempotencyToken"), // Required + ProvisionedProductName: aws.String("ProvisionedProductName"), // Required + ProvisioningArtifactId: aws.String("Id"), // Required + AcceptLanguage: aws.String("AcceptLanguage"), + NotificationArns: []*string{ + aws.String("NotificationArn"), // Required + // More values... + }, + PathId: aws.String("Id"), + ProvisioningParameters: []*servicecatalog.ProvisioningParameter{ + { // Required + Key: aws.String("ParameterKey"), + Value: aws.String("ParameterValue"), + }, + // More values... + }, + Tags: []*servicecatalog.Tag{ + { // Required + Key: aws.String("TagKey"), + Value: aws.String("TagValue"), + }, + // More values... + }, + } + resp, err := svc.ProvisionProduct(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleServiceCatalog_ScanProvisionedProducts() { + svc := servicecatalog.New(session.New()) + + params := &servicecatalog.ScanProvisionedProductsInput{ + AcceptLanguage: aws.String("AcceptLanguage"), + PageSize: aws.Int64(1), + PageToken: aws.String("PageToken"), + } + resp, err := svc.ScanProvisionedProducts(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleServiceCatalog_SearchProducts() { + svc := servicecatalog.New(session.New()) + + params := &servicecatalog.SearchProductsInput{ + AcceptLanguage: aws.String("AcceptLanguage"), + Filters: map[string][]*string{ + "Key": { // Required + aws.String("ProductViewFilterValue"), // Required + // More values... + }, + // More values... + }, + PageSize: aws.Int64(1), + PageToken: aws.String("PageToken"), + SortBy: aws.String("ProductViewSortBy"), + SortOrder: aws.String("SortOrder"), + } + resp, err := svc.SearchProducts(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleServiceCatalog_TerminateProvisionedProduct() { + svc := servicecatalog.New(session.New()) + + params := &servicecatalog.TerminateProvisionedProductInput{ + TerminateToken: aws.String("IdempotencyToken"), // Required + AcceptLanguage: aws.String("AcceptLanguage"), + IgnoreErrors: aws.Bool(true), + ProvisionedProductId: aws.String("Id"), + ProvisionedProductName: aws.String("ProvisionedProductNameOrArn"), + } + resp, err := svc.TerminateProvisionedProduct(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleServiceCatalog_UpdateProvisionedProduct() { + svc := servicecatalog.New(session.New()) + + params := &servicecatalog.UpdateProvisionedProductInput{ + UpdateToken: aws.String("IdempotencyToken"), // Required + AcceptLanguage: aws.String("AcceptLanguage"), + PathId: aws.String("Id"), + ProductId: aws.String("Id"), + ProvisionedProductId: aws.String("Id"), + ProvisionedProductName: aws.String("ProvisionedProductNameOrArn"), + ProvisioningArtifactId: aws.String("Id"), + ProvisioningParameters: []*servicecatalog.UpdateProvisioningParameter{ + { // Required + Key: aws.String("ParameterKey"), + UsePreviousValue: aws.Bool(true), + Value: aws.String("ParameterValue"), + }, + // More values... + }, + } + resp, err := svc.UpdateProvisionedProduct(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/servicecatalog/service.go b/vendor/github.com/aws/aws-sdk-go/service/servicecatalog/service.go new file mode 100644 index 000000000..cfa77b214 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/servicecatalog/service.go @@ -0,0 +1,100 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package servicecatalog + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" +) + +// Overview +// +// AWS Service Catalog (https://aws.amazon.com/servicecatalog/) allows organizations +// to create and manage catalogs of IT services that are approved for use on +// AWS. This documentation provides reference material for the AWS Service Catalog +// end user API. To get the most out of this documentation, you need to be familiar +// with the terminology discussed in AWS Service Catalog Concepts (http://docs.aws.amazon.com/servicecatalog/latest/userguide/what-is_concepts.html). +// +// Additional Resources +// +// AWS Service Catalog Administrator Guide (http://docs.aws.amazon.com/servicecatalog/latest/adminguide/introduction.html) +// +// AWS Service Catalog User Guide (http://docs.aws.amazon.com/servicecatalog/latest/userguide/introduction.html) +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type ServiceCatalog struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "servicecatalog" + +// New creates a new instance of the ServiceCatalog client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a ServiceCatalog client from just a session. +// svc := servicecatalog.New(mySession) +// +// // Create a ServiceCatalog client with additional configuration +// svc := servicecatalog.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *ServiceCatalog { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *ServiceCatalog { + svc := &ServiceCatalog{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2015-12-10", + JSONVersion: "1.1", + TargetPrefix: "AWS242ServiceCatalogService", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(jsonrpc.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a ServiceCatalog operation and runs any +// custom request initialization. +func (c *ServiceCatalog) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/servicecatalog/servicecatalogiface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/servicecatalog/servicecatalogiface/interface.go new file mode 100644 index 000000000..d47436ecd --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/servicecatalog/servicecatalogiface/interface.go @@ -0,0 +1,58 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package servicecatalogiface provides an interface for the AWS Service Catalog. +package servicecatalogiface + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/servicecatalog" +) + +// ServiceCatalogAPI is the interface type for servicecatalog.ServiceCatalog. +type ServiceCatalogAPI interface { + DescribeProductRequest(*servicecatalog.DescribeProductInput) (*request.Request, *servicecatalog.DescribeProductOutput) + + DescribeProduct(*servicecatalog.DescribeProductInput) (*servicecatalog.DescribeProductOutput, error) + + DescribeProductViewRequest(*servicecatalog.DescribeProductViewInput) (*request.Request, *servicecatalog.DescribeProductViewOutput) + + DescribeProductView(*servicecatalog.DescribeProductViewInput) (*servicecatalog.DescribeProductViewOutput, error) + + DescribeProvisioningParametersRequest(*servicecatalog.DescribeProvisioningParametersInput) (*request.Request, *servicecatalog.DescribeProvisioningParametersOutput) + + DescribeProvisioningParameters(*servicecatalog.DescribeProvisioningParametersInput) (*servicecatalog.DescribeProvisioningParametersOutput, error) + + DescribeRecordRequest(*servicecatalog.DescribeRecordInput) (*request.Request, *servicecatalog.DescribeRecordOutput) + + DescribeRecord(*servicecatalog.DescribeRecordInput) (*servicecatalog.DescribeRecordOutput, error) + + ListLaunchPathsRequest(*servicecatalog.ListLaunchPathsInput) (*request.Request, *servicecatalog.ListLaunchPathsOutput) + + ListLaunchPaths(*servicecatalog.ListLaunchPathsInput) (*servicecatalog.ListLaunchPathsOutput, error) + + ListRecordHistoryRequest(*servicecatalog.ListRecordHistoryInput) (*request.Request, *servicecatalog.ListRecordHistoryOutput) + + ListRecordHistory(*servicecatalog.ListRecordHistoryInput) (*servicecatalog.ListRecordHistoryOutput, error) + + ProvisionProductRequest(*servicecatalog.ProvisionProductInput) (*request.Request, *servicecatalog.ProvisionProductOutput) + + ProvisionProduct(*servicecatalog.ProvisionProductInput) (*servicecatalog.ProvisionProductOutput, error) + + ScanProvisionedProductsRequest(*servicecatalog.ScanProvisionedProductsInput) (*request.Request, *servicecatalog.ScanProvisionedProductsOutput) + + ScanProvisionedProducts(*servicecatalog.ScanProvisionedProductsInput) (*servicecatalog.ScanProvisionedProductsOutput, error) + + SearchProductsRequest(*servicecatalog.SearchProductsInput) (*request.Request, *servicecatalog.SearchProductsOutput) + + SearchProducts(*servicecatalog.SearchProductsInput) (*servicecatalog.SearchProductsOutput, error) + + TerminateProvisionedProductRequest(*servicecatalog.TerminateProvisionedProductInput) (*request.Request, *servicecatalog.TerminateProvisionedProductOutput) + + TerminateProvisionedProduct(*servicecatalog.TerminateProvisionedProductInput) (*servicecatalog.TerminateProvisionedProductOutput, error) + + UpdateProvisionedProductRequest(*servicecatalog.UpdateProvisionedProductInput) (*request.Request, *servicecatalog.UpdateProvisionedProductOutput) + + UpdateProvisionedProduct(*servicecatalog.UpdateProvisionedProductInput) (*servicecatalog.UpdateProvisionedProductOutput, error) +} + +var _ ServiceCatalogAPI = (*servicecatalog.ServiceCatalog)(nil) diff --git a/vendor/github.com/aws/aws-sdk-go/service/ses/api.go b/vendor/github.com/aws/aws-sdk-go/service/ses/api.go new file mode 100644 index 000000000..bf5b3fb76 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/ses/api.go @@ -0,0 +1,6279 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package ses provides a client for Amazon Simple Email Service. +package ses + +import ( + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/query" +) + +const opCloneReceiptRuleSet = "CloneReceiptRuleSet" + +// CloneReceiptRuleSetRequest generates a "aws/request.Request" representing the +// client's request for the CloneReceiptRuleSet operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CloneReceiptRuleSet method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CloneReceiptRuleSetRequest method. +// req, resp := client.CloneReceiptRuleSetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *SES) CloneReceiptRuleSetRequest(input *CloneReceiptRuleSetInput) (req *request.Request, output *CloneReceiptRuleSetOutput) { + op := &request.Operation{ + Name: opCloneReceiptRuleSet, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CloneReceiptRuleSetInput{} + } + + req = c.newRequest(op, input, output) + output = &CloneReceiptRuleSetOutput{} + req.Data = output + return +} + +// Creates a receipt rule set by cloning an existing one. All receipt rules +// and configurations are copied to the new receipt rule set and are completely +// independent of the source rule set. +// +// For information about setting up rule sets, see the Amazon SES Developer +// Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-receipt-rule-set.html). +// +// This action is throttled at one request per second. +func (c *SES) CloneReceiptRuleSet(input *CloneReceiptRuleSetInput) (*CloneReceiptRuleSetOutput, error) { + req, out := c.CloneReceiptRuleSetRequest(input) + err := req.Send() + return out, err +} + +const opCreateReceiptFilter = "CreateReceiptFilter" + +// CreateReceiptFilterRequest generates a "aws/request.Request" representing the +// client's request for the CreateReceiptFilter operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateReceiptFilter method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateReceiptFilterRequest method. +// req, resp := client.CreateReceiptFilterRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *SES) CreateReceiptFilterRequest(input *CreateReceiptFilterInput) (req *request.Request, output *CreateReceiptFilterOutput) { + op := &request.Operation{ + Name: opCreateReceiptFilter, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateReceiptFilterInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateReceiptFilterOutput{} + req.Data = output + return +} + +// Creates a new IP address filter. +// +// For information about setting up IP address filters, see the Amazon SES +// Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-ip-filters.html). +// +// This action is throttled at one request per second. +func (c *SES) CreateReceiptFilter(input *CreateReceiptFilterInput) (*CreateReceiptFilterOutput, error) { + req, out := c.CreateReceiptFilterRequest(input) + err := req.Send() + return out, err +} + +const opCreateReceiptRule = "CreateReceiptRule" + +// CreateReceiptRuleRequest generates a "aws/request.Request" representing the +// client's request for the CreateReceiptRule operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateReceiptRule method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateReceiptRuleRequest method. +// req, resp := client.CreateReceiptRuleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *SES) CreateReceiptRuleRequest(input *CreateReceiptRuleInput) (req *request.Request, output *CreateReceiptRuleOutput) { + op := &request.Operation{ + Name: opCreateReceiptRule, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateReceiptRuleInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateReceiptRuleOutput{} + req.Data = output + return +} + +// Creates a receipt rule. +// +// For information about setting up receipt rules, see the Amazon SES Developer +// Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-receipt-rules.html). +// +// This action is throttled at one request per second. +func (c *SES) CreateReceiptRule(input *CreateReceiptRuleInput) (*CreateReceiptRuleOutput, error) { + req, out := c.CreateReceiptRuleRequest(input) + err := req.Send() + return out, err +} + +const opCreateReceiptRuleSet = "CreateReceiptRuleSet" + +// CreateReceiptRuleSetRequest generates a "aws/request.Request" representing the +// client's request for the CreateReceiptRuleSet operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateReceiptRuleSet method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateReceiptRuleSetRequest method. +// req, resp := client.CreateReceiptRuleSetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *SES) CreateReceiptRuleSetRequest(input *CreateReceiptRuleSetInput) (req *request.Request, output *CreateReceiptRuleSetOutput) { + op := &request.Operation{ + Name: opCreateReceiptRuleSet, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateReceiptRuleSetInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateReceiptRuleSetOutput{} + req.Data = output + return +} + +// Creates an empty receipt rule set. +// +// For information about setting up receipt rule sets, see the Amazon SES Developer +// Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-receipt-rule-set.html). +// +// This action is throttled at one request per second. +func (c *SES) CreateReceiptRuleSet(input *CreateReceiptRuleSetInput) (*CreateReceiptRuleSetOutput, error) { + req, out := c.CreateReceiptRuleSetRequest(input) + err := req.Send() + return out, err +} + +const opDeleteIdentity = "DeleteIdentity" + +// DeleteIdentityRequest generates a "aws/request.Request" representing the +// client's request for the DeleteIdentity operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteIdentity method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteIdentityRequest method. +// req, resp := client.DeleteIdentityRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *SES) DeleteIdentityRequest(input *DeleteIdentityInput) (req *request.Request, output *DeleteIdentityOutput) { + op := &request.Operation{ + Name: opDeleteIdentity, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteIdentityInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteIdentityOutput{} + req.Data = output + return +} + +// Deletes the specified identity (an email address or a domain) from the list +// of verified identities. +// +// This action is throttled at one request per second. +func (c *SES) DeleteIdentity(input *DeleteIdentityInput) (*DeleteIdentityOutput, error) { + req, out := c.DeleteIdentityRequest(input) + err := req.Send() + return out, err +} + +const opDeleteIdentityPolicy = "DeleteIdentityPolicy" + +// DeleteIdentityPolicyRequest generates a "aws/request.Request" representing the +// client's request for the DeleteIdentityPolicy operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteIdentityPolicy method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteIdentityPolicyRequest method. +// req, resp := client.DeleteIdentityPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *SES) DeleteIdentityPolicyRequest(input *DeleteIdentityPolicyInput) (req *request.Request, output *DeleteIdentityPolicyOutput) { + op := &request.Operation{ + Name: opDeleteIdentityPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteIdentityPolicyInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteIdentityPolicyOutput{} + req.Data = output + return +} + +// Deletes the specified sending authorization policy for the given identity +// (an email address or a domain). This API returns successfully even if a policy +// with the specified name does not exist. +// +// This API is for the identity owner only. If you have not verified the identity, +// this API will return an error. +// +// Sending authorization is a feature that enables an identity owner to authorize +// other senders to use its identities. For information about using sending +// authorization, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/sending-authorization.html). +// +// This action is throttled at one request per second. +func (c *SES) DeleteIdentityPolicy(input *DeleteIdentityPolicyInput) (*DeleteIdentityPolicyOutput, error) { + req, out := c.DeleteIdentityPolicyRequest(input) + err := req.Send() + return out, err +} + +const opDeleteReceiptFilter = "DeleteReceiptFilter" + +// DeleteReceiptFilterRequest generates a "aws/request.Request" representing the +// client's request for the DeleteReceiptFilter operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteReceiptFilter method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteReceiptFilterRequest method. +// req, resp := client.DeleteReceiptFilterRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *SES) DeleteReceiptFilterRequest(input *DeleteReceiptFilterInput) (req *request.Request, output *DeleteReceiptFilterOutput) { + op := &request.Operation{ + Name: opDeleteReceiptFilter, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteReceiptFilterInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteReceiptFilterOutput{} + req.Data = output + return +} + +// Deletes the specified IP address filter. +// +// For information about managing IP address filters, see the Amazon SES Developer +// Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-managing-ip-filters.html). +// +// This action is throttled at one request per second. +func (c *SES) DeleteReceiptFilter(input *DeleteReceiptFilterInput) (*DeleteReceiptFilterOutput, error) { + req, out := c.DeleteReceiptFilterRequest(input) + err := req.Send() + return out, err +} + +const opDeleteReceiptRule = "DeleteReceiptRule" + +// DeleteReceiptRuleRequest generates a "aws/request.Request" representing the +// client's request for the DeleteReceiptRule operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteReceiptRule method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteReceiptRuleRequest method. +// req, resp := client.DeleteReceiptRuleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *SES) DeleteReceiptRuleRequest(input *DeleteReceiptRuleInput) (req *request.Request, output *DeleteReceiptRuleOutput) { + op := &request.Operation{ + Name: opDeleteReceiptRule, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteReceiptRuleInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteReceiptRuleOutput{} + req.Data = output + return +} + +// Deletes the specified receipt rule. +// +// For information about managing receipt rules, see the Amazon SES Developer +// Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-managing-receipt-rules.html). +// +// This action is throttled at one request per second. +func (c *SES) DeleteReceiptRule(input *DeleteReceiptRuleInput) (*DeleteReceiptRuleOutput, error) { + req, out := c.DeleteReceiptRuleRequest(input) + err := req.Send() + return out, err +} + +const opDeleteReceiptRuleSet = "DeleteReceiptRuleSet" + +// DeleteReceiptRuleSetRequest generates a "aws/request.Request" representing the +// client's request for the DeleteReceiptRuleSet operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteReceiptRuleSet method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteReceiptRuleSetRequest method. +// req, resp := client.DeleteReceiptRuleSetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *SES) DeleteReceiptRuleSetRequest(input *DeleteReceiptRuleSetInput) (req *request.Request, output *DeleteReceiptRuleSetOutput) { + op := &request.Operation{ + Name: opDeleteReceiptRuleSet, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteReceiptRuleSetInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteReceiptRuleSetOutput{} + req.Data = output + return +} + +// Deletes the specified receipt rule set and all of the receipt rules it contains. +// +// The currently active rule set cannot be deleted. +// +// For information about managing receipt rule sets, see the Amazon SES Developer +// Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-managing-receipt-rule-sets.html). +// +// This action is throttled at one request per second. +func (c *SES) DeleteReceiptRuleSet(input *DeleteReceiptRuleSetInput) (*DeleteReceiptRuleSetOutput, error) { + req, out := c.DeleteReceiptRuleSetRequest(input) + err := req.Send() + return out, err +} + +const opDeleteVerifiedEmailAddress = "DeleteVerifiedEmailAddress" + +// DeleteVerifiedEmailAddressRequest generates a "aws/request.Request" representing the +// client's request for the DeleteVerifiedEmailAddress operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteVerifiedEmailAddress method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteVerifiedEmailAddressRequest method. +// req, resp := client.DeleteVerifiedEmailAddressRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *SES) DeleteVerifiedEmailAddressRequest(input *DeleteVerifiedEmailAddressInput) (req *request.Request, output *DeleteVerifiedEmailAddressOutput) { + op := &request.Operation{ + Name: opDeleteVerifiedEmailAddress, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteVerifiedEmailAddressInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteVerifiedEmailAddressOutput{} + req.Data = output + return +} + +// Deletes the specified email address from the list of verified addresses. +// +// The DeleteVerifiedEmailAddress action is deprecated as of the May 15, 2012 +// release of Domain Verification. The DeleteIdentity action is now preferred. +// +// This action is throttled at one request per second. +func (c *SES) DeleteVerifiedEmailAddress(input *DeleteVerifiedEmailAddressInput) (*DeleteVerifiedEmailAddressOutput, error) { + req, out := c.DeleteVerifiedEmailAddressRequest(input) + err := req.Send() + return out, err +} + +const opDescribeActiveReceiptRuleSet = "DescribeActiveReceiptRuleSet" + +// DescribeActiveReceiptRuleSetRequest generates a "aws/request.Request" representing the +// client's request for the DescribeActiveReceiptRuleSet operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeActiveReceiptRuleSet method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeActiveReceiptRuleSetRequest method. +// req, resp := client.DescribeActiveReceiptRuleSetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *SES) DescribeActiveReceiptRuleSetRequest(input *DescribeActiveReceiptRuleSetInput) (req *request.Request, output *DescribeActiveReceiptRuleSetOutput) { + op := &request.Operation{ + Name: opDescribeActiveReceiptRuleSet, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeActiveReceiptRuleSetInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeActiveReceiptRuleSetOutput{} + req.Data = output + return +} + +// Returns the metadata and receipt rules for the receipt rule set that is currently +// active. +// +// For information about setting up receipt rule sets, see the Amazon SES Developer +// Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-receipt-rule-set.html). +// +// This action is throttled at one request per second. +func (c *SES) DescribeActiveReceiptRuleSet(input *DescribeActiveReceiptRuleSetInput) (*DescribeActiveReceiptRuleSetOutput, error) { + req, out := c.DescribeActiveReceiptRuleSetRequest(input) + err := req.Send() + return out, err +} + +const opDescribeReceiptRule = "DescribeReceiptRule" + +// DescribeReceiptRuleRequest generates a "aws/request.Request" representing the +// client's request for the DescribeReceiptRule operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeReceiptRule method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeReceiptRuleRequest method. +// req, resp := client.DescribeReceiptRuleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *SES) DescribeReceiptRuleRequest(input *DescribeReceiptRuleInput) (req *request.Request, output *DescribeReceiptRuleOutput) { + op := &request.Operation{ + Name: opDescribeReceiptRule, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeReceiptRuleInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeReceiptRuleOutput{} + req.Data = output + return +} + +// Returns the details of the specified receipt rule. +// +// For information about setting up receipt rules, see the Amazon SES Developer +// Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-receipt-rules.html). +// +// This action is throttled at one request per second. +func (c *SES) DescribeReceiptRule(input *DescribeReceiptRuleInput) (*DescribeReceiptRuleOutput, error) { + req, out := c.DescribeReceiptRuleRequest(input) + err := req.Send() + return out, err +} + +const opDescribeReceiptRuleSet = "DescribeReceiptRuleSet" + +// DescribeReceiptRuleSetRequest generates a "aws/request.Request" representing the +// client's request for the DescribeReceiptRuleSet operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeReceiptRuleSet method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeReceiptRuleSetRequest method. +// req, resp := client.DescribeReceiptRuleSetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *SES) DescribeReceiptRuleSetRequest(input *DescribeReceiptRuleSetInput) (req *request.Request, output *DescribeReceiptRuleSetOutput) { + op := &request.Operation{ + Name: opDescribeReceiptRuleSet, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeReceiptRuleSetInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeReceiptRuleSetOutput{} + req.Data = output + return +} + +// Returns the details of the specified receipt rule set. +// +// For information about managing receipt rule sets, see the Amazon SES Developer +// Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-managing-receipt-rule-sets.html). +// +// This action is throttled at one request per second. +func (c *SES) DescribeReceiptRuleSet(input *DescribeReceiptRuleSetInput) (*DescribeReceiptRuleSetOutput, error) { + req, out := c.DescribeReceiptRuleSetRequest(input) + err := req.Send() + return out, err +} + +const opGetIdentityDkimAttributes = "GetIdentityDkimAttributes" + +// GetIdentityDkimAttributesRequest generates a "aws/request.Request" representing the +// client's request for the GetIdentityDkimAttributes operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetIdentityDkimAttributes method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetIdentityDkimAttributesRequest method. +// req, resp := client.GetIdentityDkimAttributesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *SES) GetIdentityDkimAttributesRequest(input *GetIdentityDkimAttributesInput) (req *request.Request, output *GetIdentityDkimAttributesOutput) { + op := &request.Operation{ + Name: opGetIdentityDkimAttributes, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetIdentityDkimAttributesInput{} + } + + req = c.newRequest(op, input, output) + output = &GetIdentityDkimAttributesOutput{} + req.Data = output + return +} + +// Returns the current status of Easy DKIM signing for an entity. For domain +// name identities, this action also returns the DKIM tokens that are required +// for Easy DKIM signing, and whether Amazon SES has successfully verified that +// these tokens have been published. +// +// This action takes a list of identities as input and returns the following +// information for each: +// +// Whether Easy DKIM signing is enabled or disabled. +// +// A set of DKIM tokens that represent the identity. If the identity is an +// email address, the tokens represent the domain of that address. +// +// Whether Amazon SES has successfully verified the DKIM tokens published +// in the domain's DNS. This information is only returned for domain name identities, +// not for email addresses. +// +// This action is throttled at one request per second and can only get DKIM +// attributes for up to 100 identities at a time. +// +// For more information about creating DNS records using DKIM tokens, go to +// the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/easy-dkim-dns-records.html). +func (c *SES) GetIdentityDkimAttributes(input *GetIdentityDkimAttributesInput) (*GetIdentityDkimAttributesOutput, error) { + req, out := c.GetIdentityDkimAttributesRequest(input) + err := req.Send() + return out, err +} + +const opGetIdentityMailFromDomainAttributes = "GetIdentityMailFromDomainAttributes" + +// GetIdentityMailFromDomainAttributesRequest generates a "aws/request.Request" representing the +// client's request for the GetIdentityMailFromDomainAttributes operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetIdentityMailFromDomainAttributes method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetIdentityMailFromDomainAttributesRequest method. +// req, resp := client.GetIdentityMailFromDomainAttributesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *SES) GetIdentityMailFromDomainAttributesRequest(input *GetIdentityMailFromDomainAttributesInput) (req *request.Request, output *GetIdentityMailFromDomainAttributesOutput) { + op := &request.Operation{ + Name: opGetIdentityMailFromDomainAttributes, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetIdentityMailFromDomainAttributesInput{} + } + + req = c.newRequest(op, input, output) + output = &GetIdentityMailFromDomainAttributesOutput{} + req.Data = output + return +} + +// Returns the custom MAIL FROM attributes for a list of identities (email addresses +// and/or domains). +// +// This action is throttled at one request per second and can only get custom +// MAIL FROM attributes for up to 100 identities at a time. +func (c *SES) GetIdentityMailFromDomainAttributes(input *GetIdentityMailFromDomainAttributesInput) (*GetIdentityMailFromDomainAttributesOutput, error) { + req, out := c.GetIdentityMailFromDomainAttributesRequest(input) + err := req.Send() + return out, err +} + +const opGetIdentityNotificationAttributes = "GetIdentityNotificationAttributes" + +// GetIdentityNotificationAttributesRequest generates a "aws/request.Request" representing the +// client's request for the GetIdentityNotificationAttributes operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetIdentityNotificationAttributes method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetIdentityNotificationAttributesRequest method. +// req, resp := client.GetIdentityNotificationAttributesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *SES) GetIdentityNotificationAttributesRequest(input *GetIdentityNotificationAttributesInput) (req *request.Request, output *GetIdentityNotificationAttributesOutput) { + op := &request.Operation{ + Name: opGetIdentityNotificationAttributes, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetIdentityNotificationAttributesInput{} + } + + req = c.newRequest(op, input, output) + output = &GetIdentityNotificationAttributesOutput{} + req.Data = output + return +} + +// Given a list of verified identities (email addresses and/or domains), returns +// a structure describing identity notification attributes. +// +// This action is throttled at one request per second and can only get notification +// attributes for up to 100 identities at a time. +// +// For more information about using notifications with Amazon SES, see the +// Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/notifications.html). +func (c *SES) GetIdentityNotificationAttributes(input *GetIdentityNotificationAttributesInput) (*GetIdentityNotificationAttributesOutput, error) { + req, out := c.GetIdentityNotificationAttributesRequest(input) + err := req.Send() + return out, err +} + +const opGetIdentityPolicies = "GetIdentityPolicies" + +// GetIdentityPoliciesRequest generates a "aws/request.Request" representing the +// client's request for the GetIdentityPolicies operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetIdentityPolicies method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetIdentityPoliciesRequest method. +// req, resp := client.GetIdentityPoliciesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *SES) GetIdentityPoliciesRequest(input *GetIdentityPoliciesInput) (req *request.Request, output *GetIdentityPoliciesOutput) { + op := &request.Operation{ + Name: opGetIdentityPolicies, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetIdentityPoliciesInput{} + } + + req = c.newRequest(op, input, output) + output = &GetIdentityPoliciesOutput{} + req.Data = output + return +} + +// Returns the requested sending authorization policies for the given identity +// (an email address or a domain). The policies are returned as a map of policy +// names to policy contents. You can retrieve a maximum of 20 policies at a +// time. +// +// This API is for the identity owner only. If you have not verified the identity, +// this API will return an error. +// +// Sending authorization is a feature that enables an identity owner to authorize +// other senders to use its identities. For information about using sending +// authorization, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/sending-authorization.html). +// +// This action is throttled at one request per second. +func (c *SES) GetIdentityPolicies(input *GetIdentityPoliciesInput) (*GetIdentityPoliciesOutput, error) { + req, out := c.GetIdentityPoliciesRequest(input) + err := req.Send() + return out, err +} + +const opGetIdentityVerificationAttributes = "GetIdentityVerificationAttributes" + +// GetIdentityVerificationAttributesRequest generates a "aws/request.Request" representing the +// client's request for the GetIdentityVerificationAttributes operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetIdentityVerificationAttributes method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetIdentityVerificationAttributesRequest method. +// req, resp := client.GetIdentityVerificationAttributesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *SES) GetIdentityVerificationAttributesRequest(input *GetIdentityVerificationAttributesInput) (req *request.Request, output *GetIdentityVerificationAttributesOutput) { + op := &request.Operation{ + Name: opGetIdentityVerificationAttributes, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetIdentityVerificationAttributesInput{} + } + + req = c.newRequest(op, input, output) + output = &GetIdentityVerificationAttributesOutput{} + req.Data = output + return +} + +// Given a list of identities (email addresses and/or domains), returns the +// verification status and (for domain identities) the verification token for +// each identity. +// +// This action is throttled at one request per second and can only get verification +// attributes for up to 100 identities at a time. +func (c *SES) GetIdentityVerificationAttributes(input *GetIdentityVerificationAttributesInput) (*GetIdentityVerificationAttributesOutput, error) { + req, out := c.GetIdentityVerificationAttributesRequest(input) + err := req.Send() + return out, err +} + +const opGetSendQuota = "GetSendQuota" + +// GetSendQuotaRequest generates a "aws/request.Request" representing the +// client's request for the GetSendQuota operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetSendQuota method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetSendQuotaRequest method. +// req, resp := client.GetSendQuotaRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *SES) GetSendQuotaRequest(input *GetSendQuotaInput) (req *request.Request, output *GetSendQuotaOutput) { + op := &request.Operation{ + Name: opGetSendQuota, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetSendQuotaInput{} + } + + req = c.newRequest(op, input, output) + output = &GetSendQuotaOutput{} + req.Data = output + return +} + +// Returns the user's current sending limits. +// +// This action is throttled at one request per second. +func (c *SES) GetSendQuota(input *GetSendQuotaInput) (*GetSendQuotaOutput, error) { + req, out := c.GetSendQuotaRequest(input) + err := req.Send() + return out, err +} + +const opGetSendStatistics = "GetSendStatistics" + +// GetSendStatisticsRequest generates a "aws/request.Request" representing the +// client's request for the GetSendStatistics operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetSendStatistics method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetSendStatisticsRequest method. +// req, resp := client.GetSendStatisticsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *SES) GetSendStatisticsRequest(input *GetSendStatisticsInput) (req *request.Request, output *GetSendStatisticsOutput) { + op := &request.Operation{ + Name: opGetSendStatistics, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetSendStatisticsInput{} + } + + req = c.newRequest(op, input, output) + output = &GetSendStatisticsOutput{} + req.Data = output + return +} + +// Returns the user's sending statistics. The result is a list of data points, +// representing the last two weeks of sending activity. +// +// Each data point in the list contains statistics for a 15-minute interval. +// +// This action is throttled at one request per second. +func (c *SES) GetSendStatistics(input *GetSendStatisticsInput) (*GetSendStatisticsOutput, error) { + req, out := c.GetSendStatisticsRequest(input) + err := req.Send() + return out, err +} + +const opListIdentities = "ListIdentities" + +// ListIdentitiesRequest generates a "aws/request.Request" representing the +// client's request for the ListIdentities operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListIdentities method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListIdentitiesRequest method. +// req, resp := client.ListIdentitiesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *SES) ListIdentitiesRequest(input *ListIdentitiesInput) (req *request.Request, output *ListIdentitiesOutput) { + op := &request.Operation{ + Name: opListIdentities, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxItems", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListIdentitiesInput{} + } + + req = c.newRequest(op, input, output) + output = &ListIdentitiesOutput{} + req.Data = output + return +} + +// Returns a list containing all of the identities (email addresses and domains) +// for your AWS account, regardless of verification status. +// +// This action is throttled at one request per second. +func (c *SES) ListIdentities(input *ListIdentitiesInput) (*ListIdentitiesOutput, error) { + req, out := c.ListIdentitiesRequest(input) + err := req.Send() + return out, err +} + +// ListIdentitiesPages iterates over the pages of a ListIdentities operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListIdentities method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListIdentities operation. +// pageNum := 0 +// err := client.ListIdentitiesPages(params, +// func(page *ListIdentitiesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *SES) ListIdentitiesPages(input *ListIdentitiesInput, fn func(p *ListIdentitiesOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListIdentitiesRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListIdentitiesOutput), lastPage) + }) +} + +const opListIdentityPolicies = "ListIdentityPolicies" + +// ListIdentityPoliciesRequest generates a "aws/request.Request" representing the +// client's request for the ListIdentityPolicies operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListIdentityPolicies method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListIdentityPoliciesRequest method. +// req, resp := client.ListIdentityPoliciesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *SES) ListIdentityPoliciesRequest(input *ListIdentityPoliciesInput) (req *request.Request, output *ListIdentityPoliciesOutput) { + op := &request.Operation{ + Name: opListIdentityPolicies, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListIdentityPoliciesInput{} + } + + req = c.newRequest(op, input, output) + output = &ListIdentityPoliciesOutput{} + req.Data = output + return +} + +// Returns a list of sending authorization policies that are attached to the +// given identity (an email address or a domain). This API returns only a list. +// If you want the actual policy content, you can use GetIdentityPolicies. +// +// This API is for the identity owner only. If you have not verified the identity, +// this API will return an error. +// +// Sending authorization is a feature that enables an identity owner to authorize +// other senders to use its identities. For information about using sending +// authorization, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/sending-authorization.html). +// +// This action is throttled at one request per second. +func (c *SES) ListIdentityPolicies(input *ListIdentityPoliciesInput) (*ListIdentityPoliciesOutput, error) { + req, out := c.ListIdentityPoliciesRequest(input) + err := req.Send() + return out, err +} + +const opListReceiptFilters = "ListReceiptFilters" + +// ListReceiptFiltersRequest generates a "aws/request.Request" representing the +// client's request for the ListReceiptFilters operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListReceiptFilters method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListReceiptFiltersRequest method. +// req, resp := client.ListReceiptFiltersRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *SES) ListReceiptFiltersRequest(input *ListReceiptFiltersInput) (req *request.Request, output *ListReceiptFiltersOutput) { + op := &request.Operation{ + Name: opListReceiptFilters, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListReceiptFiltersInput{} + } + + req = c.newRequest(op, input, output) + output = &ListReceiptFiltersOutput{} + req.Data = output + return +} + +// Lists the IP address filters associated with your AWS account. +// +// For information about managing IP address filters, see the Amazon SES Developer +// Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-managing-ip-filters.html). +// +// This action is throttled at one request per second. +func (c *SES) ListReceiptFilters(input *ListReceiptFiltersInput) (*ListReceiptFiltersOutput, error) { + req, out := c.ListReceiptFiltersRequest(input) + err := req.Send() + return out, err +} + +const opListReceiptRuleSets = "ListReceiptRuleSets" + +// ListReceiptRuleSetsRequest generates a "aws/request.Request" representing the +// client's request for the ListReceiptRuleSets operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListReceiptRuleSets method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListReceiptRuleSetsRequest method. +// req, resp := client.ListReceiptRuleSetsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *SES) ListReceiptRuleSetsRequest(input *ListReceiptRuleSetsInput) (req *request.Request, output *ListReceiptRuleSetsOutput) { + op := &request.Operation{ + Name: opListReceiptRuleSets, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListReceiptRuleSetsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListReceiptRuleSetsOutput{} + req.Data = output + return +} + +// Lists the receipt rule sets that exist under your AWS account. If there are +// additional receipt rule sets to be retrieved, you will receive a NextToken +// that you can provide to the next call to ListReceiptRuleSets to retrieve +// the additional entries. +// +// For information about managing receipt rule sets, see the Amazon SES Developer +// Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-managing-receipt-rule-sets.html). +// +// This action is throttled at one request per second. +func (c *SES) ListReceiptRuleSets(input *ListReceiptRuleSetsInput) (*ListReceiptRuleSetsOutput, error) { + req, out := c.ListReceiptRuleSetsRequest(input) + err := req.Send() + return out, err +} + +const opListVerifiedEmailAddresses = "ListVerifiedEmailAddresses" + +// ListVerifiedEmailAddressesRequest generates a "aws/request.Request" representing the +// client's request for the ListVerifiedEmailAddresses operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListVerifiedEmailAddresses method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListVerifiedEmailAddressesRequest method. +// req, resp := client.ListVerifiedEmailAddressesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *SES) ListVerifiedEmailAddressesRequest(input *ListVerifiedEmailAddressesInput) (req *request.Request, output *ListVerifiedEmailAddressesOutput) { + op := &request.Operation{ + Name: opListVerifiedEmailAddresses, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListVerifiedEmailAddressesInput{} + } + + req = c.newRequest(op, input, output) + output = &ListVerifiedEmailAddressesOutput{} + req.Data = output + return +} + +// Returns a list containing all of the email addresses that have been verified. +// +// The ListVerifiedEmailAddresses action is deprecated as of the May 15, 2012 +// release of Domain Verification. The ListIdentities action is now preferred. +// +// This action is throttled at one request per second. +func (c *SES) ListVerifiedEmailAddresses(input *ListVerifiedEmailAddressesInput) (*ListVerifiedEmailAddressesOutput, error) { + req, out := c.ListVerifiedEmailAddressesRequest(input) + err := req.Send() + return out, err +} + +const opPutIdentityPolicy = "PutIdentityPolicy" + +// PutIdentityPolicyRequest generates a "aws/request.Request" representing the +// client's request for the PutIdentityPolicy operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutIdentityPolicy method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutIdentityPolicyRequest method. +// req, resp := client.PutIdentityPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *SES) PutIdentityPolicyRequest(input *PutIdentityPolicyInput) (req *request.Request, output *PutIdentityPolicyOutput) { + op := &request.Operation{ + Name: opPutIdentityPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutIdentityPolicyInput{} + } + + req = c.newRequest(op, input, output) + output = &PutIdentityPolicyOutput{} + req.Data = output + return +} + +// Adds or updates a sending authorization policy for the specified identity +// (an email address or a domain). +// +// This API is for the identity owner only. If you have not verified the identity, +// this API will return an error. +// +// Sending authorization is a feature that enables an identity owner to authorize +// other senders to use its identities. For information about using sending +// authorization, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/sending-authorization.html). +// +// This action is throttled at one request per second. +func (c *SES) PutIdentityPolicy(input *PutIdentityPolicyInput) (*PutIdentityPolicyOutput, error) { + req, out := c.PutIdentityPolicyRequest(input) + err := req.Send() + return out, err +} + +const opReorderReceiptRuleSet = "ReorderReceiptRuleSet" + +// ReorderReceiptRuleSetRequest generates a "aws/request.Request" representing the +// client's request for the ReorderReceiptRuleSet operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ReorderReceiptRuleSet method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ReorderReceiptRuleSetRequest method. +// req, resp := client.ReorderReceiptRuleSetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *SES) ReorderReceiptRuleSetRequest(input *ReorderReceiptRuleSetInput) (req *request.Request, output *ReorderReceiptRuleSetOutput) { + op := &request.Operation{ + Name: opReorderReceiptRuleSet, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ReorderReceiptRuleSetInput{} + } + + req = c.newRequest(op, input, output) + output = &ReorderReceiptRuleSetOutput{} + req.Data = output + return +} + +// Reorders the receipt rules within a receipt rule set. +// +// All of the rules in the rule set must be represented in this request. That +// is, this API will return an error if the reorder request doesn't explicitly +// position all of the rules. +// +// For information about managing receipt rule sets, see the Amazon SES Developer +// Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-managing-receipt-rule-sets.html). +// +// This action is throttled at one request per second. +func (c *SES) ReorderReceiptRuleSet(input *ReorderReceiptRuleSetInput) (*ReorderReceiptRuleSetOutput, error) { + req, out := c.ReorderReceiptRuleSetRequest(input) + err := req.Send() + return out, err +} + +const opSendBounce = "SendBounce" + +// SendBounceRequest generates a "aws/request.Request" representing the +// client's request for the SendBounce operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the SendBounce method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the SendBounceRequest method. +// req, resp := client.SendBounceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *SES) SendBounceRequest(input *SendBounceInput) (req *request.Request, output *SendBounceOutput) { + op := &request.Operation{ + Name: opSendBounce, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SendBounceInput{} + } + + req = c.newRequest(op, input, output) + output = &SendBounceOutput{} + req.Data = output + return +} + +// Generates and sends a bounce message to the sender of an email you received +// through Amazon SES. You can only use this API on an email up to 24 hours +// after you receive it. +// +// You cannot use this API to send generic bounces for mail that was not received +// by Amazon SES. +// +// For information about receiving email through Amazon SES, see the Amazon +// SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email.html). +// +// This action is throttled at one request per second. +func (c *SES) SendBounce(input *SendBounceInput) (*SendBounceOutput, error) { + req, out := c.SendBounceRequest(input) + err := req.Send() + return out, err +} + +const opSendEmail = "SendEmail" + +// SendEmailRequest generates a "aws/request.Request" representing the +// client's request for the SendEmail operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the SendEmail method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the SendEmailRequest method. +// req, resp := client.SendEmailRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *SES) SendEmailRequest(input *SendEmailInput) (req *request.Request, output *SendEmailOutput) { + op := &request.Operation{ + Name: opSendEmail, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SendEmailInput{} + } + + req = c.newRequest(op, input, output) + output = &SendEmailOutput{} + req.Data = output + return +} + +// Composes an email message based on input data, and then immediately queues +// the message for sending. +// +// There are several important points to know about SendEmail: +// +// You can only send email from verified email addresses and domains; otherwise, +// you will get an "Email address not verified" error. If your account is still +// in the Amazon SES sandbox, you must also verify every recipient email address +// except for the recipients provided by the Amazon SES mailbox simulator. For +// more information, go to the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/verify-addresses-and-domains.html). +// +// The total size of the message cannot exceed 10 MB. This includes any attachments +// that are part of the message. +// +// Amazon SES has a limit on the total number of recipients per message. +// The combined number of To:, CC: and BCC: email addresses cannot exceed 50. +// If you need to send an email message to a larger audience, you can divide +// your recipient list into groups of 50 or fewer, and then call Amazon SES +// repeatedly to send the message to each group. +// +// For every message that you send, the total number of recipients (To:, +// CC: and BCC:) is counted against your sending quota - the maximum number +// of emails you can send in a 24-hour period. For information about your sending +// quota, go to the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/manage-sending-limits.html). +func (c *SES) SendEmail(input *SendEmailInput) (*SendEmailOutput, error) { + req, out := c.SendEmailRequest(input) + err := req.Send() + return out, err +} + +const opSendRawEmail = "SendRawEmail" + +// SendRawEmailRequest generates a "aws/request.Request" representing the +// client's request for the SendRawEmail operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the SendRawEmail method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the SendRawEmailRequest method. +// req, resp := client.SendRawEmailRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *SES) SendRawEmailRequest(input *SendRawEmailInput) (req *request.Request, output *SendRawEmailOutput) { + op := &request.Operation{ + Name: opSendRawEmail, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SendRawEmailInput{} + } + + req = c.newRequest(op, input, output) + output = &SendRawEmailOutput{} + req.Data = output + return +} + +// Sends an email message, with header and content specified by the client. +// The SendRawEmail action is useful for sending multipart MIME emails. The +// raw text of the message must comply with Internet email standards; otherwise, +// the message cannot be sent. +// +// There are several important points to know about SendRawEmail: +// +// You can only send email from verified email addresses and domains; otherwise, +// you will get an "Email address not verified" error. If your account is still +// in the Amazon SES sandbox, you must also verify every recipient email address +// except for the recipients provided by the Amazon SES mailbox simulator. For +// more information, go to the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/verify-addresses-and-domains.html). +// +// The total size of the message cannot exceed 10 MB. This includes any attachments +// that are part of the message. +// +// Amazon SES has a limit on the total number of recipients per message. +// The combined number of To:, CC: and BCC: email addresses cannot exceed 50. +// If you need to send an email message to a larger audience, you can divide +// your recipient list into groups of 50 or fewer, and then call Amazon SES +// repeatedly to send the message to each group. +// +// The To:, CC:, and BCC: headers in the raw message can contain a group +// list. Note that each recipient in a group list counts towards the 50-recipient +// limit. +// +// For every message that you send, the total number of recipients (To:, +// CC: and BCC:) is counted against your sending quota - the maximum number +// of emails you can send in a 24-hour period. For information about your sending +// quota, go to the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/manage-sending-limits.html). +// +// If you are using sending authorization to send on behalf of another user, +// SendRawEmail enables you to specify the cross-account identity for the email's +// "Source," "From," and "Return-Path" parameters in one of two ways: you can +// pass optional parameters SourceArn, FromArn, and/or ReturnPathArn to the +// API, or you can include the following X-headers in the header of your raw +// email: +// +// X-SES-SOURCE-ARN +// +// X-SES-FROM-ARN +// +// X-SES-RETURN-PATH-ARN +// +// Do not include these X-headers in the DKIM signature, because they are +// removed by Amazon SES before sending the email. +// +// For the most common sending authorization use case, we recommend that you +// specify the SourceIdentityArn and do not specify either the FromIdentityArn +// or ReturnPathIdentityArn. (The same note applies to the corresponding X-headers.) +// If you only specify the SourceIdentityArn, Amazon SES will simply set the +// "From" address and the "Return Path" address to the identity specified in +// SourceIdentityArn. For more information about sending authorization, see +// the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/sending-authorization.html). +func (c *SES) SendRawEmail(input *SendRawEmailInput) (*SendRawEmailOutput, error) { + req, out := c.SendRawEmailRequest(input) + err := req.Send() + return out, err +} + +const opSetActiveReceiptRuleSet = "SetActiveReceiptRuleSet" + +// SetActiveReceiptRuleSetRequest generates a "aws/request.Request" representing the +// client's request for the SetActiveReceiptRuleSet operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the SetActiveReceiptRuleSet method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the SetActiveReceiptRuleSetRequest method. +// req, resp := client.SetActiveReceiptRuleSetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *SES) SetActiveReceiptRuleSetRequest(input *SetActiveReceiptRuleSetInput) (req *request.Request, output *SetActiveReceiptRuleSetOutput) { + op := &request.Operation{ + Name: opSetActiveReceiptRuleSet, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SetActiveReceiptRuleSetInput{} + } + + req = c.newRequest(op, input, output) + output = &SetActiveReceiptRuleSetOutput{} + req.Data = output + return +} + +// Sets the specified receipt rule set as the active receipt rule set. +// +// To disable your email-receiving through Amazon SES completely, you can +// call this API with RuleSetName set to null. +// +// For information about managing receipt rule sets, see the Amazon SES Developer +// Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-managing-receipt-rule-sets.html). +// +// This action is throttled at one request per second. +func (c *SES) SetActiveReceiptRuleSet(input *SetActiveReceiptRuleSetInput) (*SetActiveReceiptRuleSetOutput, error) { + req, out := c.SetActiveReceiptRuleSetRequest(input) + err := req.Send() + return out, err +} + +const opSetIdentityDkimEnabled = "SetIdentityDkimEnabled" + +// SetIdentityDkimEnabledRequest generates a "aws/request.Request" representing the +// client's request for the SetIdentityDkimEnabled operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the SetIdentityDkimEnabled method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the SetIdentityDkimEnabledRequest method. +// req, resp := client.SetIdentityDkimEnabledRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *SES) SetIdentityDkimEnabledRequest(input *SetIdentityDkimEnabledInput) (req *request.Request, output *SetIdentityDkimEnabledOutput) { + op := &request.Operation{ + Name: opSetIdentityDkimEnabled, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SetIdentityDkimEnabledInput{} + } + + req = c.newRequest(op, input, output) + output = &SetIdentityDkimEnabledOutput{} + req.Data = output + return +} + +// Enables or disables Easy DKIM signing of email sent from an identity: +// +// If Easy DKIM signing is enabled for a domain name identity (e.g., example.com), +// then Amazon SES will DKIM-sign all email sent by addresses under that domain +// name (e.g., user@example.com). +// +// If Easy DKIM signing is enabled for an email address, then Amazon SES +// will DKIM-sign all email sent by that email address. +// +// For email addresses (e.g., user@example.com), you can only enable Easy +// DKIM signing if the corresponding domain (e.g., example.com) has been set +// up for Easy DKIM using the AWS Console or the VerifyDomainDkim action. +// +// This action is throttled at one request per second. +// +// For more information about Easy DKIM signing, go to the Amazon SES Developer +// Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/easy-dkim.html). +func (c *SES) SetIdentityDkimEnabled(input *SetIdentityDkimEnabledInput) (*SetIdentityDkimEnabledOutput, error) { + req, out := c.SetIdentityDkimEnabledRequest(input) + err := req.Send() + return out, err +} + +const opSetIdentityFeedbackForwardingEnabled = "SetIdentityFeedbackForwardingEnabled" + +// SetIdentityFeedbackForwardingEnabledRequest generates a "aws/request.Request" representing the +// client's request for the SetIdentityFeedbackForwardingEnabled operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the SetIdentityFeedbackForwardingEnabled method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the SetIdentityFeedbackForwardingEnabledRequest method. +// req, resp := client.SetIdentityFeedbackForwardingEnabledRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *SES) SetIdentityFeedbackForwardingEnabledRequest(input *SetIdentityFeedbackForwardingEnabledInput) (req *request.Request, output *SetIdentityFeedbackForwardingEnabledOutput) { + op := &request.Operation{ + Name: opSetIdentityFeedbackForwardingEnabled, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SetIdentityFeedbackForwardingEnabledInput{} + } + + req = c.newRequest(op, input, output) + output = &SetIdentityFeedbackForwardingEnabledOutput{} + req.Data = output + return +} + +// Given an identity (an email address or a domain), enables or disables whether +// Amazon SES forwards bounce and complaint notifications as email. Feedback +// forwarding can only be disabled when Amazon Simple Notification Service (Amazon +// SNS) topics are specified for both bounces and complaints. +// +// Feedback forwarding does not apply to delivery notifications. Delivery +// notifications are only available through Amazon SNS. +// +// This action is throttled at one request per second. +// +// For more information about using notifications with Amazon SES, see the +// Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/notifications.html). +func (c *SES) SetIdentityFeedbackForwardingEnabled(input *SetIdentityFeedbackForwardingEnabledInput) (*SetIdentityFeedbackForwardingEnabledOutput, error) { + req, out := c.SetIdentityFeedbackForwardingEnabledRequest(input) + err := req.Send() + return out, err +} + +const opSetIdentityHeadersInNotificationsEnabled = "SetIdentityHeadersInNotificationsEnabled" + +// SetIdentityHeadersInNotificationsEnabledRequest generates a "aws/request.Request" representing the +// client's request for the SetIdentityHeadersInNotificationsEnabled operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the SetIdentityHeadersInNotificationsEnabled method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the SetIdentityHeadersInNotificationsEnabledRequest method. +// req, resp := client.SetIdentityHeadersInNotificationsEnabledRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *SES) SetIdentityHeadersInNotificationsEnabledRequest(input *SetIdentityHeadersInNotificationsEnabledInput) (req *request.Request, output *SetIdentityHeadersInNotificationsEnabledOutput) { + op := &request.Operation{ + Name: opSetIdentityHeadersInNotificationsEnabled, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SetIdentityHeadersInNotificationsEnabledInput{} + } + + req = c.newRequest(op, input, output) + output = &SetIdentityHeadersInNotificationsEnabledOutput{} + req.Data = output + return +} + +// Given an identity (an email address or a domain), sets whether Amazon SES +// includes the original email headers in the Amazon Simple Notification Service +// (Amazon SNS) notifications of a specified type. +// +// This action is throttled at one request per second. +// +// For more information about using notifications with Amazon SES, see the +// Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/notifications.html). +func (c *SES) SetIdentityHeadersInNotificationsEnabled(input *SetIdentityHeadersInNotificationsEnabledInput) (*SetIdentityHeadersInNotificationsEnabledOutput, error) { + req, out := c.SetIdentityHeadersInNotificationsEnabledRequest(input) + err := req.Send() + return out, err +} + +const opSetIdentityMailFromDomain = "SetIdentityMailFromDomain" + +// SetIdentityMailFromDomainRequest generates a "aws/request.Request" representing the +// client's request for the SetIdentityMailFromDomain operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the SetIdentityMailFromDomain method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the SetIdentityMailFromDomainRequest method. +// req, resp := client.SetIdentityMailFromDomainRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *SES) SetIdentityMailFromDomainRequest(input *SetIdentityMailFromDomainInput) (req *request.Request, output *SetIdentityMailFromDomainOutput) { + op := &request.Operation{ + Name: opSetIdentityMailFromDomain, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SetIdentityMailFromDomainInput{} + } + + req = c.newRequest(op, input, output) + output = &SetIdentityMailFromDomainOutput{} + req.Data = output + return +} + +// Enables or disables the custom MAIL FROM domain setup for a verified identity +// (an email address or a domain). +// +// To send emails using the specified MAIL FROM domain, you must add an MX +// record to your MAIL FROM domain's DNS settings. If you want your emails to +// pass Sender Policy Framework (SPF) checks, you must also add or update an +// SPF record. For more information, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/mail-from-set.html). +// +// This action is throttled at one request per second. +func (c *SES) SetIdentityMailFromDomain(input *SetIdentityMailFromDomainInput) (*SetIdentityMailFromDomainOutput, error) { + req, out := c.SetIdentityMailFromDomainRequest(input) + err := req.Send() + return out, err +} + +const opSetIdentityNotificationTopic = "SetIdentityNotificationTopic" + +// SetIdentityNotificationTopicRequest generates a "aws/request.Request" representing the +// client's request for the SetIdentityNotificationTopic operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the SetIdentityNotificationTopic method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the SetIdentityNotificationTopicRequest method. +// req, resp := client.SetIdentityNotificationTopicRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *SES) SetIdentityNotificationTopicRequest(input *SetIdentityNotificationTopicInput) (req *request.Request, output *SetIdentityNotificationTopicOutput) { + op := &request.Operation{ + Name: opSetIdentityNotificationTopic, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SetIdentityNotificationTopicInput{} + } + + req = c.newRequest(op, input, output) + output = &SetIdentityNotificationTopicOutput{} + req.Data = output + return +} + +// Given an identity (an email address or a domain), sets the Amazon Simple +// Notification Service (Amazon SNS) topic to which Amazon SES will publish +// bounce, complaint, and/or delivery notifications for emails sent with that +// identity as the Source. +// +// Unless feedback forwarding is enabled, you must specify Amazon SNS topics +// for bounce and complaint notifications. For more information, see SetIdentityFeedbackForwardingEnabled. +// +// This action is throttled at one request per second. +// +// For more information about feedback notification, see the Amazon SES Developer +// Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/notifications.html). +func (c *SES) SetIdentityNotificationTopic(input *SetIdentityNotificationTopicInput) (*SetIdentityNotificationTopicOutput, error) { + req, out := c.SetIdentityNotificationTopicRequest(input) + err := req.Send() + return out, err +} + +const opSetReceiptRulePosition = "SetReceiptRulePosition" + +// SetReceiptRulePositionRequest generates a "aws/request.Request" representing the +// client's request for the SetReceiptRulePosition operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the SetReceiptRulePosition method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the SetReceiptRulePositionRequest method. +// req, resp := client.SetReceiptRulePositionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *SES) SetReceiptRulePositionRequest(input *SetReceiptRulePositionInput) (req *request.Request, output *SetReceiptRulePositionOutput) { + op := &request.Operation{ + Name: opSetReceiptRulePosition, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SetReceiptRulePositionInput{} + } + + req = c.newRequest(op, input, output) + output = &SetReceiptRulePositionOutput{} + req.Data = output + return +} + +// Sets the position of the specified receipt rule in the receipt rule set. +// +// For information about managing receipt rules, see the Amazon SES Developer +// Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-managing-receipt-rules.html). +// +// This action is throttled at one request per second. +func (c *SES) SetReceiptRulePosition(input *SetReceiptRulePositionInput) (*SetReceiptRulePositionOutput, error) { + req, out := c.SetReceiptRulePositionRequest(input) + err := req.Send() + return out, err +} + +const opUpdateReceiptRule = "UpdateReceiptRule" + +// UpdateReceiptRuleRequest generates a "aws/request.Request" representing the +// client's request for the UpdateReceiptRule operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateReceiptRule method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateReceiptRuleRequest method. +// req, resp := client.UpdateReceiptRuleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *SES) UpdateReceiptRuleRequest(input *UpdateReceiptRuleInput) (req *request.Request, output *UpdateReceiptRuleOutput) { + op := &request.Operation{ + Name: opUpdateReceiptRule, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateReceiptRuleInput{} + } + + req = c.newRequest(op, input, output) + output = &UpdateReceiptRuleOutput{} + req.Data = output + return +} + +// Updates a receipt rule. +// +// For information about managing receipt rules, see the Amazon SES Developer +// Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-managing-receipt-rules.html). +// +// This action is throttled at one request per second. +func (c *SES) UpdateReceiptRule(input *UpdateReceiptRuleInput) (*UpdateReceiptRuleOutput, error) { + req, out := c.UpdateReceiptRuleRequest(input) + err := req.Send() + return out, err +} + +const opVerifyDomainDkim = "VerifyDomainDkim" + +// VerifyDomainDkimRequest generates a "aws/request.Request" representing the +// client's request for the VerifyDomainDkim operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the VerifyDomainDkim method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the VerifyDomainDkimRequest method. +// req, resp := client.VerifyDomainDkimRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *SES) VerifyDomainDkimRequest(input *VerifyDomainDkimInput) (req *request.Request, output *VerifyDomainDkimOutput) { + op := &request.Operation{ + Name: opVerifyDomainDkim, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &VerifyDomainDkimInput{} + } + + req = c.newRequest(op, input, output) + output = &VerifyDomainDkimOutput{} + req.Data = output + return +} + +// Returns a set of DKIM tokens for a domain. DKIM tokens are character strings +// that represent your domain's identity. Using these tokens, you will need +// to create DNS CNAME records that point to DKIM public keys hosted by Amazon +// SES. Amazon Web Services will eventually detect that you have updated your +// DNS records; this detection process may take up to 72 hours. Upon successful +// detection, Amazon SES will be able to DKIM-sign email originating from that +// domain. +// +// This action is throttled at one request per second. +// +// To enable or disable Easy DKIM signing for a domain, use the SetIdentityDkimEnabled +// action. +// +// For more information about creating DNS records using DKIM tokens, go to +// the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/easy-dkim-dns-records.html). +func (c *SES) VerifyDomainDkim(input *VerifyDomainDkimInput) (*VerifyDomainDkimOutput, error) { + req, out := c.VerifyDomainDkimRequest(input) + err := req.Send() + return out, err +} + +const opVerifyDomainIdentity = "VerifyDomainIdentity" + +// VerifyDomainIdentityRequest generates a "aws/request.Request" representing the +// client's request for the VerifyDomainIdentity operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the VerifyDomainIdentity method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the VerifyDomainIdentityRequest method. +// req, resp := client.VerifyDomainIdentityRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *SES) VerifyDomainIdentityRequest(input *VerifyDomainIdentityInput) (req *request.Request, output *VerifyDomainIdentityOutput) { + op := &request.Operation{ + Name: opVerifyDomainIdentity, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &VerifyDomainIdentityInput{} + } + + req = c.newRequest(op, input, output) + output = &VerifyDomainIdentityOutput{} + req.Data = output + return +} + +// Verifies a domain. +// +// This action is throttled at one request per second. +func (c *SES) VerifyDomainIdentity(input *VerifyDomainIdentityInput) (*VerifyDomainIdentityOutput, error) { + req, out := c.VerifyDomainIdentityRequest(input) + err := req.Send() + return out, err +} + +const opVerifyEmailAddress = "VerifyEmailAddress" + +// VerifyEmailAddressRequest generates a "aws/request.Request" representing the +// client's request for the VerifyEmailAddress operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the VerifyEmailAddress method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the VerifyEmailAddressRequest method. +// req, resp := client.VerifyEmailAddressRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *SES) VerifyEmailAddressRequest(input *VerifyEmailAddressInput) (req *request.Request, output *VerifyEmailAddressOutput) { + op := &request.Operation{ + Name: opVerifyEmailAddress, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &VerifyEmailAddressInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &VerifyEmailAddressOutput{} + req.Data = output + return +} + +// Verifies an email address. This action causes a confirmation email message +// to be sent to the specified address. +// +// The VerifyEmailAddress action is deprecated as of the May 15, 2012 release +// of Domain Verification. The VerifyEmailIdentity action is now preferred. +// +// This action is throttled at one request per second. +func (c *SES) VerifyEmailAddress(input *VerifyEmailAddressInput) (*VerifyEmailAddressOutput, error) { + req, out := c.VerifyEmailAddressRequest(input) + err := req.Send() + return out, err +} + +const opVerifyEmailIdentity = "VerifyEmailIdentity" + +// VerifyEmailIdentityRequest generates a "aws/request.Request" representing the +// client's request for the VerifyEmailIdentity operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the VerifyEmailIdentity method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the VerifyEmailIdentityRequest method. +// req, resp := client.VerifyEmailIdentityRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *SES) VerifyEmailIdentityRequest(input *VerifyEmailIdentityInput) (req *request.Request, output *VerifyEmailIdentityOutput) { + op := &request.Operation{ + Name: opVerifyEmailIdentity, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &VerifyEmailIdentityInput{} + } + + req = c.newRequest(op, input, output) + output = &VerifyEmailIdentityOutput{} + req.Data = output + return +} + +// Verifies an email address. This action causes a confirmation email message +// to be sent to the specified address. +// +// This action is throttled at one request per second. +func (c *SES) VerifyEmailIdentity(input *VerifyEmailIdentityInput) (*VerifyEmailIdentityOutput, error) { + req, out := c.VerifyEmailIdentityRequest(input) + err := req.Send() + return out, err +} + +// When included in a receipt rule, this action adds a header to the received +// email. +// +// For information about adding a header using a receipt rule, see the Amazon +// SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-action-add-header.html). +type AddHeaderAction struct { + _ struct{} `type:"structure"` + + // The name of the header to add. Must be between 1 and 50 characters, inclusive, + // and consist of alphanumeric (a-z, A-Z, 0-9) characters and dashes only. + HeaderName *string `type:"string" required:"true"` + + // Must be less than 2048 characters, and must not contain newline characters + // ("\r" or "\n"). + HeaderValue *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s AddHeaderAction) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddHeaderAction) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AddHeaderAction) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AddHeaderAction"} + if s.HeaderName == nil { + invalidParams.Add(request.NewErrParamRequired("HeaderName")) + } + if s.HeaderValue == nil { + invalidParams.Add(request.NewErrParamRequired("HeaderValue")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the body of the message. You can specify text, HTML, or both. +// If you use both, then the message should display correctly in the widest +// variety of email clients. +type Body struct { + _ struct{} `type:"structure"` + + // The content of the message, in HTML format. Use this for email clients that + // can process HTML. You can include clickable links, formatted text, and much + // more in an HTML message. + Html *Content `type:"structure"` + + // The content of the message, in text format. Use this for text-based email + // clients, or clients on high-latency networks (such as mobile devices). + Text *Content `type:"structure"` +} + +// String returns the string representation +func (s Body) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Body) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Body) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Body"} + if s.Html != nil { + if err := s.Html.Validate(); err != nil { + invalidParams.AddNested("Html", err.(request.ErrInvalidParams)) + } + } + if s.Text != nil { + if err := s.Text.Validate(); err != nil { + invalidParams.AddNested("Text", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// When included in a receipt rule, this action rejects the received email by +// returning a bounce response to the sender and, optionally, publishes a notification +// to Amazon Simple Notification Service (Amazon SNS). +// +// For information about sending a bounce message in response to a received +// email, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-action-bounce.html). +type BounceAction struct { + _ struct{} `type:"structure"` + + // Human-readable text to include in the bounce message. + Message *string `type:"string" required:"true"` + + // The email address of the sender of the bounced email. This is the address + // from which the bounce message will be sent. + Sender *string `type:"string" required:"true"` + + // The SMTP reply code, as defined by RFC 5321 (https://tools.ietf.org/html/rfc5321). + SmtpReplyCode *string `type:"string" required:"true"` + + // The SMTP enhanced status code, as defined by RFC 3463 (https://tools.ietf.org/html/rfc3463). + StatusCode *string `type:"string"` + + // The Amazon Resource Name (ARN) of the Amazon SNS topic to notify when the + // bounce action is taken. An example of an Amazon SNS topic ARN is arn:aws:sns:us-west-2:123456789012:MyTopic. + // For more information about Amazon SNS topics, see the Amazon SNS Developer + // Guide (http://docs.aws.amazon.com/sns/latest/dg/CreateTopic.html). + TopicArn *string `type:"string"` +} + +// String returns the string representation +func (s BounceAction) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BounceAction) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *BounceAction) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "BounceAction"} + if s.Message == nil { + invalidParams.Add(request.NewErrParamRequired("Message")) + } + if s.Sender == nil { + invalidParams.Add(request.NewErrParamRequired("Sender")) + } + if s.SmtpReplyCode == nil { + invalidParams.Add(request.NewErrParamRequired("SmtpReplyCode")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Recipient-related information to include in the Delivery Status Notification +// (DSN) when an email that Amazon SES receives on your behalf bounces. +// +// For information about receiving email through Amazon SES, see the Amazon +// SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email.html). +type BouncedRecipientInfo struct { + _ struct{} `type:"structure"` + + // The reason for the bounce. You must provide either this parameter or RecipientDsnFields. + BounceType *string `type:"string" enum:"BounceType"` + + // The email address of the recipient of the bounced email. + Recipient *string `type:"string" required:"true"` + + // This parameter is used only for sending authorization. It is the ARN of the + // identity that is associated with the sending authorization policy that permits + // you to receive email for the recipient of the bounced email. For more information + // about sending authorization, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/sending-authorization.html). + RecipientArn *string `type:"string"` + + // Recipient-related DSN fields, most of which would normally be filled in automatically + // when provided with a BounceType. You must provide either this parameter or + // BounceType. + RecipientDsnFields *RecipientDsnFields `type:"structure"` +} + +// String returns the string representation +func (s BouncedRecipientInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BouncedRecipientInfo) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *BouncedRecipientInfo) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "BouncedRecipientInfo"} + if s.Recipient == nil { + invalidParams.Add(request.NewErrParamRequired("Recipient")) + } + if s.RecipientDsnFields != nil { + if err := s.RecipientDsnFields.Validate(); err != nil { + invalidParams.AddNested("RecipientDsnFields", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents a request to create a receipt rule set by cloning an existing +// one. You use receipt rule sets to receive email with Amazon SES. For more +// information, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-concepts.html). +type CloneReceiptRuleSetInput struct { + _ struct{} `type:"structure"` + + // The name of the rule set to clone. + OriginalRuleSetName *string `type:"string" required:"true"` + + // The name of the rule set to create. The name must: + // + // Contain only ASCII letters (a-z, A-Z), numbers (0-9), periods (.), underscores + // (_), or dashes (-). + // + // Start and end with a letter or number. + // + // Contain less than 64 characters. + RuleSetName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CloneReceiptRuleSetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CloneReceiptRuleSetInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CloneReceiptRuleSetInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CloneReceiptRuleSetInput"} + if s.OriginalRuleSetName == nil { + invalidParams.Add(request.NewErrParamRequired("OriginalRuleSetName")) + } + if s.RuleSetName == nil { + invalidParams.Add(request.NewErrParamRequired("RuleSetName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// An empty element returned on a successful request. +type CloneReceiptRuleSetOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s CloneReceiptRuleSetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CloneReceiptRuleSetOutput) GoString() string { + return s.String() +} + +// Represents textual data, plus an optional character set specification. +// +// By default, the text must be 7-bit ASCII, due to the constraints of the +// SMTP protocol. If the text must contain any other characters, then you must +// also specify a character set. Examples include UTF-8, ISO-8859-1, and Shift_JIS. +type Content struct { + _ struct{} `type:"structure"` + + // The character set of the content. + Charset *string `type:"string"` + + // The textual data of the content. + Data *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s Content) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Content) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Content) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Content"} + if s.Data == nil { + invalidParams.Add(request.NewErrParamRequired("Data")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents a request to create a new IP address filter. You use IP address +// filters when you receive email with Amazon SES. For more information, see +// the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-concepts.html). +type CreateReceiptFilterInput struct { + _ struct{} `type:"structure"` + + // A data structure that describes the IP address filter to create, which consists + // of a name, an IP address range, and whether to allow or block mail from it. + Filter *ReceiptFilter `type:"structure" required:"true"` +} + +// String returns the string representation +func (s CreateReceiptFilterInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateReceiptFilterInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateReceiptFilterInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateReceiptFilterInput"} + if s.Filter == nil { + invalidParams.Add(request.NewErrParamRequired("Filter")) + } + if s.Filter != nil { + if err := s.Filter.Validate(); err != nil { + invalidParams.AddNested("Filter", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// An empty element returned on a successful request. +type CreateReceiptFilterOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s CreateReceiptFilterOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateReceiptFilterOutput) GoString() string { + return s.String() +} + +// Represents a request to create a receipt rule. You use receipt rules to receive +// email with Amazon SES. For more information, see the Amazon SES Developer +// Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-concepts.html). +type CreateReceiptRuleInput struct { + _ struct{} `type:"structure"` + + // The name of an existing rule after which the new rule will be placed. If + // this parameter is null, the new rule will be inserted at the beginning of + // the rule list. + After *string `type:"string"` + + // A data structure that contains the specified rule's name, actions, recipients, + // domains, enabled status, scan status, and TLS policy. + Rule *ReceiptRule `type:"structure" required:"true"` + + // The name of the rule set to which to add the rule. + RuleSetName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateReceiptRuleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateReceiptRuleInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateReceiptRuleInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateReceiptRuleInput"} + if s.Rule == nil { + invalidParams.Add(request.NewErrParamRequired("Rule")) + } + if s.RuleSetName == nil { + invalidParams.Add(request.NewErrParamRequired("RuleSetName")) + } + if s.Rule != nil { + if err := s.Rule.Validate(); err != nil { + invalidParams.AddNested("Rule", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// An empty element returned on a successful request. +type CreateReceiptRuleOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s CreateReceiptRuleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateReceiptRuleOutput) GoString() string { + return s.String() +} + +// Represents a request to create an empty receipt rule set. You use receipt +// rule sets to receive email with Amazon SES. For more information, see the +// Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-concepts.html). +type CreateReceiptRuleSetInput struct { + _ struct{} `type:"structure"` + + // The name of the rule set to create. The name must: + // + // Contain only ASCII letters (a-z, A-Z), numbers (0-9), periods (.), underscores + // (_), or dashes (-). + // + // Start and end with a letter or number. + // + // Contain less than 64 characters. + RuleSetName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateReceiptRuleSetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateReceiptRuleSetInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateReceiptRuleSetInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateReceiptRuleSetInput"} + if s.RuleSetName == nil { + invalidParams.Add(request.NewErrParamRequired("RuleSetName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// An empty element returned on a successful request. +type CreateReceiptRuleSetOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s CreateReceiptRuleSetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateReceiptRuleSetOutput) GoString() string { + return s.String() +} + +// Represents a request to delete one of your Amazon SES identities (an email +// address or domain). +type DeleteIdentityInput struct { + _ struct{} `type:"structure"` + + // The identity to be removed from the list of identities for the AWS Account. + Identity *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteIdentityInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteIdentityInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteIdentityInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteIdentityInput"} + if s.Identity == nil { + invalidParams.Add(request.NewErrParamRequired("Identity")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// An empty element returned on a successful request. +type DeleteIdentityOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteIdentityOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteIdentityOutput) GoString() string { + return s.String() +} + +// Represents a request to delete a sending authorization policy for an identity. +// Sending authorization is an Amazon SES feature that enables you to authorize +// other senders to use your identities. For information, see the Amazon SES +// Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/sending-authorization.html). +type DeleteIdentityPolicyInput struct { + _ struct{} `type:"structure"` + + // The identity that is associated with the policy that you want to delete. + // You can specify the identity by using its name or by using its Amazon Resource + // Name (ARN). Examples: user@example.com, example.com, arn:aws:ses:us-east-1:123456789012:identity/example.com. + // + // To successfully call this API, you must own the identity. + Identity *string `type:"string" required:"true"` + + // The name of the policy to be deleted. + PolicyName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteIdentityPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteIdentityPolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteIdentityPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteIdentityPolicyInput"} + if s.Identity == nil { + invalidParams.Add(request.NewErrParamRequired("Identity")) + } + if s.PolicyName == nil { + invalidParams.Add(request.NewErrParamRequired("PolicyName")) + } + if s.PolicyName != nil && len(*s.PolicyName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("PolicyName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// An empty element returned on a successful request. +type DeleteIdentityPolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteIdentityPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteIdentityPolicyOutput) GoString() string { + return s.String() +} + +// Represents a request to delete an IP address filter. You use IP address filters +// when you receive email with Amazon SES. For more information, see the Amazon +// SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-concepts.html). +type DeleteReceiptFilterInput struct { + _ struct{} `type:"structure"` + + // The name of the IP address filter to delete. + FilterName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteReceiptFilterInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteReceiptFilterInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteReceiptFilterInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteReceiptFilterInput"} + if s.FilterName == nil { + invalidParams.Add(request.NewErrParamRequired("FilterName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// An empty element returned on a successful request. +type DeleteReceiptFilterOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteReceiptFilterOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteReceiptFilterOutput) GoString() string { + return s.String() +} + +// Represents a request to delete a receipt rule. You use receipt rules to receive +// email with Amazon SES. For more information, see the Amazon SES Developer +// Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-concepts.html). +type DeleteReceiptRuleInput struct { + _ struct{} `type:"structure"` + + // The name of the receipt rule to delete. + RuleName *string `type:"string" required:"true"` + + // The name of the receipt rule set that contains the receipt rule to delete. + RuleSetName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteReceiptRuleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteReceiptRuleInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteReceiptRuleInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteReceiptRuleInput"} + if s.RuleName == nil { + invalidParams.Add(request.NewErrParamRequired("RuleName")) + } + if s.RuleSetName == nil { + invalidParams.Add(request.NewErrParamRequired("RuleSetName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// An empty element returned on a successful request. +type DeleteReceiptRuleOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteReceiptRuleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteReceiptRuleOutput) GoString() string { + return s.String() +} + +// Represents a request to delete a receipt rule set and all of the receipt +// rules it contains. You use receipt rule sets to receive email with Amazon +// SES. For more information, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-concepts.html). +type DeleteReceiptRuleSetInput struct { + _ struct{} `type:"structure"` + + // The name of the receipt rule set to delete. + RuleSetName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteReceiptRuleSetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteReceiptRuleSetInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteReceiptRuleSetInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteReceiptRuleSetInput"} + if s.RuleSetName == nil { + invalidParams.Add(request.NewErrParamRequired("RuleSetName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// An empty element returned on a successful request. +type DeleteReceiptRuleSetOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteReceiptRuleSetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteReceiptRuleSetOutput) GoString() string { + return s.String() +} + +// Represents a request to delete an email address from the list of email addresses +// you have attempted to verify under your AWS account. +type DeleteVerifiedEmailAddressInput struct { + _ struct{} `type:"structure"` + + // An email address to be removed from the list of verified addresses. + EmailAddress *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteVerifiedEmailAddressInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteVerifiedEmailAddressInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteVerifiedEmailAddressInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteVerifiedEmailAddressInput"} + if s.EmailAddress == nil { + invalidParams.Add(request.NewErrParamRequired("EmailAddress")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteVerifiedEmailAddressOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteVerifiedEmailAddressOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteVerifiedEmailAddressOutput) GoString() string { + return s.String() +} + +// Represents a request to return the metadata and receipt rules for the receipt +// rule set that is currently active. You use receipt rule sets to receive email +// with Amazon SES. For more information, see the Amazon SES Developer Guide +// (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-concepts.html). +type DescribeActiveReceiptRuleSetInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DescribeActiveReceiptRuleSetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeActiveReceiptRuleSetInput) GoString() string { + return s.String() +} + +// Represents the metadata and receipt rules for the receipt rule set that is +// currently active. +type DescribeActiveReceiptRuleSetOutput struct { + _ struct{} `type:"structure"` + + // The metadata for the currently active receipt rule set. The metadata consists + // of the rule set name and a timestamp of when the rule set was created. + Metadata *ReceiptRuleSetMetadata `type:"structure"` + + // The receipt rules that belong to the active rule set. + Rules []*ReceiptRule `type:"list"` +} + +// String returns the string representation +func (s DescribeActiveReceiptRuleSetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeActiveReceiptRuleSetOutput) GoString() string { + return s.String() +} + +// Represents a request to return the details of a receipt rule. You use receipt +// rules to receive email with Amazon SES. For more information, see the Amazon +// SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-concepts.html). +type DescribeReceiptRuleInput struct { + _ struct{} `type:"structure"` + + // The name of the receipt rule. + RuleName *string `type:"string" required:"true"` + + // The name of the receipt rule set to which the receipt rule belongs. + RuleSetName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeReceiptRuleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeReceiptRuleInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeReceiptRuleInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeReceiptRuleInput"} + if s.RuleName == nil { + invalidParams.Add(request.NewErrParamRequired("RuleName")) + } + if s.RuleSetName == nil { + invalidParams.Add(request.NewErrParamRequired("RuleSetName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the details of a receipt rule. +type DescribeReceiptRuleOutput struct { + _ struct{} `type:"structure"` + + // A data structure that contains the specified receipt rule's name, actions, + // recipients, domains, enabled status, scan status, and Transport Layer Security + // (TLS) policy. + Rule *ReceiptRule `type:"structure"` +} + +// String returns the string representation +func (s DescribeReceiptRuleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeReceiptRuleOutput) GoString() string { + return s.String() +} + +// Represents a request to return the details of a receipt rule set. You use +// receipt rule sets to receive email with Amazon SES. For more information, +// see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-concepts.html). +type DescribeReceiptRuleSetInput struct { + _ struct{} `type:"structure"` + + // The name of the receipt rule set to describe. + RuleSetName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeReceiptRuleSetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeReceiptRuleSetInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeReceiptRuleSetInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeReceiptRuleSetInput"} + if s.RuleSetName == nil { + invalidParams.Add(request.NewErrParamRequired("RuleSetName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the details of the specified receipt rule set. +type DescribeReceiptRuleSetOutput struct { + _ struct{} `type:"structure"` + + // The metadata for the receipt rule set, which consists of the rule set name + // and the timestamp of when the rule set was created. + Metadata *ReceiptRuleSetMetadata `type:"structure"` + + // A list of the receipt rules that belong to the specified receipt rule set. + Rules []*ReceiptRule `type:"list"` +} + +// String returns the string representation +func (s DescribeReceiptRuleSetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeReceiptRuleSetOutput) GoString() string { + return s.String() +} + +// Represents the destination of the message, consisting of To:, CC:, and BCC: +// fields. +// +// By default, the string must be 7-bit ASCII. If the text must contain any +// other characters, then you must use MIME encoded-word syntax (RFC 2047) instead +// of a literal string. MIME encoded-word syntax uses the following form: =?charset?encoding?encoded-text?=. +// For more information, see RFC 2047 (http://tools.ietf.org/html/rfc2047). +type Destination struct { + _ struct{} `type:"structure"` + + // The BCC: field(s) of the message. + BccAddresses []*string `type:"list"` + + // The CC: field(s) of the message. + CcAddresses []*string `type:"list"` + + // The To: field(s) of the message. + ToAddresses []*string `type:"list"` +} + +// String returns the string representation +func (s Destination) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Destination) GoString() string { + return s.String() +} + +// Additional X-headers to include in the Delivery Status Notification (DSN) +// when an email that Amazon SES receives on your behalf bounces. +// +// For information about receiving email through Amazon SES, see the Amazon +// SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email.html). +type ExtensionField struct { + _ struct{} `type:"structure"` + + // The name of the header to add. Must be between 1 and 50 characters, inclusive, + // and consist of alphanumeric (a-z, A-Z, 0-9) characters and dashes only. + Name *string `type:"string" required:"true"` + + // The value of the header to add. Must be less than 2048 characters, and must + // not contain newline characters ("\r" or "\n"). + Value *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ExtensionField) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ExtensionField) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ExtensionField) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ExtensionField"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Value == nil { + invalidParams.Add(request.NewErrParamRequired("Value")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents a request for the status of Amazon SES Easy DKIM signing for an +// identity. For domain identities, this request also returns the DKIM tokens +// that are required for Easy DKIM signing, and whether Amazon SES successfully +// verified that these tokens were published. For more information about Easy +// DKIM, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/easy-dkim.html). +type GetIdentityDkimAttributesInput struct { + _ struct{} `type:"structure"` + + // A list of one or more verified identities - email addresses, domains, or + // both. + Identities []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s GetIdentityDkimAttributesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetIdentityDkimAttributesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetIdentityDkimAttributesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetIdentityDkimAttributesInput"} + if s.Identities == nil { + invalidParams.Add(request.NewErrParamRequired("Identities")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the status of Amazon SES Easy DKIM signing for an identity. For +// domain identities, this response also contains the DKIM tokens that are required +// for Easy DKIM signing, and whether Amazon SES successfully verified that +// these tokens were published. +type GetIdentityDkimAttributesOutput struct { + _ struct{} `type:"structure"` + + // The DKIM attributes for an email address or a domain. + DkimAttributes map[string]*IdentityDkimAttributes `type:"map" required:"true"` +} + +// String returns the string representation +func (s GetIdentityDkimAttributesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetIdentityDkimAttributesOutput) GoString() string { + return s.String() +} + +// Represents a request to return the Amazon SES custom MAIL FROM attributes +// for a list of identities. For information about using a custom MAIL FROM +// domain, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/mail-from.html). +type GetIdentityMailFromDomainAttributesInput struct { + _ struct{} `type:"structure"` + + // A list of one or more identities. + Identities []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s GetIdentityMailFromDomainAttributesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetIdentityMailFromDomainAttributesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetIdentityMailFromDomainAttributesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetIdentityMailFromDomainAttributesInput"} + if s.Identities == nil { + invalidParams.Add(request.NewErrParamRequired("Identities")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the custom MAIL FROM attributes for a list of identities. +type GetIdentityMailFromDomainAttributesOutput struct { + _ struct{} `type:"structure"` + + // A map of identities to custom MAIL FROM attributes. + MailFromDomainAttributes map[string]*IdentityMailFromDomainAttributes `type:"map" required:"true"` +} + +// String returns the string representation +func (s GetIdentityMailFromDomainAttributesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetIdentityMailFromDomainAttributesOutput) GoString() string { + return s.String() +} + +// Represents a request to return the notification attributes for a list of +// identities you verified with Amazon SES. For information about Amazon SES +// notifications, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/notifications.html). +type GetIdentityNotificationAttributesInput struct { + _ struct{} `type:"structure"` + + // A list of one or more identities. You can specify an identity by using its + // name or by using its Amazon Resource Name (ARN). Examples: user@example.com, + // example.com, arn:aws:ses:us-east-1:123456789012:identity/example.com. + Identities []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s GetIdentityNotificationAttributesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetIdentityNotificationAttributesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetIdentityNotificationAttributesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetIdentityNotificationAttributesInput"} + if s.Identities == nil { + invalidParams.Add(request.NewErrParamRequired("Identities")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the notification attributes for a list of identities. +type GetIdentityNotificationAttributesOutput struct { + _ struct{} `type:"structure"` + + // A map of Identity to IdentityNotificationAttributes. + NotificationAttributes map[string]*IdentityNotificationAttributes `type:"map" required:"true"` +} + +// String returns the string representation +func (s GetIdentityNotificationAttributesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetIdentityNotificationAttributesOutput) GoString() string { + return s.String() +} + +// Represents a request to return the requested sending authorization policies +// for an identity. Sending authorization is an Amazon SES feature that enables +// you to authorize other senders to use your identities. For information, see +// the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/sending-authorization.html). +type GetIdentityPoliciesInput struct { + _ struct{} `type:"structure"` + + // The identity for which the policies will be retrieved. You can specify an + // identity by using its name or by using its Amazon Resource Name (ARN). Examples: + // user@example.com, example.com, arn:aws:ses:us-east-1:123456789012:identity/example.com. + // + // To successfully call this API, you must own the identity. + Identity *string `type:"string" required:"true"` + + // A list of the names of policies to be retrieved. You can retrieve a maximum + // of 20 policies at a time. If you do not know the names of the policies that + // are attached to the identity, you can use ListIdentityPolicies. + PolicyNames []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s GetIdentityPoliciesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetIdentityPoliciesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetIdentityPoliciesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetIdentityPoliciesInput"} + if s.Identity == nil { + invalidParams.Add(request.NewErrParamRequired("Identity")) + } + if s.PolicyNames == nil { + invalidParams.Add(request.NewErrParamRequired("PolicyNames")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the requested sending authorization policies. +type GetIdentityPoliciesOutput struct { + _ struct{} `type:"structure"` + + // A map of policy names to policies. + Policies map[string]*string `type:"map" required:"true"` +} + +// String returns the string representation +func (s GetIdentityPoliciesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetIdentityPoliciesOutput) GoString() string { + return s.String() +} + +// Represents a request to return the Amazon SES verification status of a list +// of identities. For domain identities, this request also returns the verification +// token. For information about verifying identities with Amazon SES, see the +// Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/verify-addresses-and-domains.html). +type GetIdentityVerificationAttributesInput struct { + _ struct{} `type:"structure"` + + // A list of identities. + Identities []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s GetIdentityVerificationAttributesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetIdentityVerificationAttributesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetIdentityVerificationAttributesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetIdentityVerificationAttributesInput"} + if s.Identities == nil { + invalidParams.Add(request.NewErrParamRequired("Identities")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The Amazon SES verification status of a list of identities. For domain identities, +// this response also contains the verification token. +type GetIdentityVerificationAttributesOutput struct { + _ struct{} `type:"structure"` + + // A map of Identities to IdentityVerificationAttributes objects. + VerificationAttributes map[string]*IdentityVerificationAttributes `type:"map" required:"true"` +} + +// String returns the string representation +func (s GetIdentityVerificationAttributesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetIdentityVerificationAttributesOutput) GoString() string { + return s.String() +} + +type GetSendQuotaInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s GetSendQuotaInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetSendQuotaInput) GoString() string { + return s.String() +} + +// Represents your Amazon SES daily sending quota, maximum send rate, and the +// number of emails you have sent in the last 24 hours. +type GetSendQuotaOutput struct { + _ struct{} `type:"structure"` + + // The maximum number of emails the user is allowed to send in a 24-hour interval. + // A value of -1 signifies an unlimited quota. + Max24HourSend *float64 `type:"double"` + + // The maximum number of emails that Amazon SES can accept from the user's account + // per second. + // + // The rate at which Amazon SES accepts the user's messages might be less + // than the maximum send rate. + MaxSendRate *float64 `type:"double"` + + // The number of emails sent during the previous 24 hours. + SentLast24Hours *float64 `type:"double"` +} + +// String returns the string representation +func (s GetSendQuotaOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetSendQuotaOutput) GoString() string { + return s.String() +} + +type GetSendStatisticsInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s GetSendStatisticsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetSendStatisticsInput) GoString() string { + return s.String() +} + +// Represents a list of data points. This list contains aggregated data from +// the previous two weeks of your sending activity with Amazon SES. +type GetSendStatisticsOutput struct { + _ struct{} `type:"structure"` + + // A list of data points, each of which represents 15 minutes of activity. + SendDataPoints []*SendDataPoint `type:"list"` +} + +// String returns the string representation +func (s GetSendStatisticsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetSendStatisticsOutput) GoString() string { + return s.String() +} + +// Represents the DKIM attributes of a verified email address or a domain. +type IdentityDkimAttributes struct { + _ struct{} `type:"structure"` + + // True if DKIM signing is enabled for email sent from the identity; false otherwise. + DkimEnabled *bool `type:"boolean" required:"true"` + + // A set of character strings that represent the domain's identity. Using these + // tokens, you will need to create DNS CNAME records that point to DKIM public + // keys hosted by Amazon SES. Amazon Web Services will eventually detect that + // you have updated your DNS records; this detection process may take up to + // 72 hours. Upon successful detection, Amazon SES will be able to DKIM-sign + // email originating from that domain. (This only applies to domain identities, + // not email address identities.) + // + // For more information about creating DNS records using DKIM tokens, go to + // the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/easy-dkim-dns-records.html). + DkimTokens []*string `type:"list"` + + // Describes whether Amazon SES has successfully verified the DKIM DNS records + // (tokens) published in the domain name's DNS. (This only applies to domain + // identities, not email address identities.) + DkimVerificationStatus *string `type:"string" required:"true" enum:"VerificationStatus"` +} + +// String returns the string representation +func (s IdentityDkimAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s IdentityDkimAttributes) GoString() string { + return s.String() +} + +// Represents the custom MAIL FROM domain attributes of a verified identity +// (email address or domain). +type IdentityMailFromDomainAttributes struct { + _ struct{} `type:"structure"` + + // The action that Amazon SES takes if it cannot successfully read the required + // MX record when you send an email. A value of UseDefaultValue indicates that + // if Amazon SES cannot read the required MX record, it uses amazonses.com (or + // a subdomain of that) as the MAIL FROM domain. A value of RejectMessage indicates + // that if Amazon SES cannot read the required MX record, Amazon SES returns + // a MailFromDomainNotVerified error and does not send the email. + // + // The custom MAIL FROM setup states that result in this behavior are Pending, + // Failed, and TemporaryFailure. + BehaviorOnMXFailure *string `type:"string" required:"true" enum:"BehaviorOnMXFailure"` + + // The custom MAIL FROM domain that the identity is configured to use. + MailFromDomain *string `type:"string" required:"true"` + + // The state that indicates whether Amazon SES has successfully read the MX + // record required for custom MAIL FROM domain setup. If the state is Success, + // Amazon SES uses the specified custom MAIL FROM domain when the verified identity + // sends an email. All other states indicate that Amazon SES takes the action + // described by BehaviorOnMXFailure. + MailFromDomainStatus *string `type:"string" required:"true" enum:"CustomMailFromStatus"` +} + +// String returns the string representation +func (s IdentityMailFromDomainAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s IdentityMailFromDomainAttributes) GoString() string { + return s.String() +} + +// Represents the notification attributes of an identity, including whether +// an identity has Amazon Simple Notification Service (Amazon SNS) topics set +// for bounce, complaint, and/or delivery notifications, and whether feedback +// forwarding is enabled for bounce and complaint notifications. +type IdentityNotificationAttributes struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the Amazon SNS topic where Amazon SES will + // publish bounce notifications. + BounceTopic *string `type:"string" required:"true"` + + // The Amazon Resource Name (ARN) of the Amazon SNS topic where Amazon SES will + // publish complaint notifications. + ComplaintTopic *string `type:"string" required:"true"` + + // The Amazon Resource Name (ARN) of the Amazon SNS topic where Amazon SES will + // publish delivery notifications. + DeliveryTopic *string `type:"string" required:"true"` + + // Describes whether Amazon SES will forward bounce and complaint notifications + // as email. true indicates that Amazon SES will forward bounce and complaint + // notifications as email, while false indicates that bounce and complaint notifications + // will be published only to the specified bounce and complaint Amazon SNS topics. + ForwardingEnabled *bool `type:"boolean" required:"true"` + + // Describes whether Amazon SES includes the original email headers in Amazon + // SNS notifications of type Bounce. A value of true specifies that Amazon SES + // will include headers in bounce notifications, and a value of false specifies + // that Amazon SES will not include headers in bounce notifications. + HeadersInBounceNotificationsEnabled *bool `type:"boolean"` + + // Describes whether Amazon SES includes the original email headers in Amazon + // SNS notifications of type Complaint. A value of true specifies that Amazon + // SES will include headers in complaint notifications, and a value of false + // specifies that Amazon SES will not include headers in complaint notifications. + HeadersInComplaintNotificationsEnabled *bool `type:"boolean"` + + // Describes whether Amazon SES includes the original email headers in Amazon + // SNS notifications of type Delivery. A value of true specifies that Amazon + // SES will include headers in delivery notifications, and a value of false + // specifies that Amazon SES will not include headers in delivery notifications. + HeadersInDeliveryNotificationsEnabled *bool `type:"boolean"` +} + +// String returns the string representation +func (s IdentityNotificationAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s IdentityNotificationAttributes) GoString() string { + return s.String() +} + +// Represents the verification attributes of a single identity. +type IdentityVerificationAttributes struct { + _ struct{} `type:"structure"` + + // The verification status of the identity: "Pending", "Success", "Failed", + // or "TemporaryFailure". + VerificationStatus *string `type:"string" required:"true" enum:"VerificationStatus"` + + // The verification token for a domain identity. Null for email address identities. + VerificationToken *string `type:"string"` +} + +// String returns the string representation +func (s IdentityVerificationAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s IdentityVerificationAttributes) GoString() string { + return s.String() +} + +// When included in a receipt rule, this action calls an AWS Lambda function +// and, optionally, publishes a notification to Amazon Simple Notification Service +// (Amazon SNS). +// +// To enable Amazon SES to call your AWS Lambda function or to publish to an +// Amazon SNS topic of another account, Amazon SES must have permission to access +// those resources. For information about giving permissions, see the Amazon +// SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-permissions.html). +// +// For information about using AWS Lambda actions in receipt rules, see the +// Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-action-lambda.html). +type LambdaAction struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the AWS Lambda function. An example of + // an AWS Lambda function ARN is arn:aws:lambda:us-west-2:account-id:function:MyFunction. + // For more information about AWS Lambda, see the AWS Lambda Developer Guide + // (http://docs.aws.amazon.com/lambda/latest/dg/welcome.html). + FunctionArn *string `type:"string" required:"true"` + + // The invocation type of the AWS Lambda function. An invocation type of RequestResponse + // means that the execution of the function will immediately result in a response, + // and a value of Event means that the function will be invoked asynchronously. + // The default value is Event. For information about AWS Lambda invocation types, + // see the AWS Lambda Developer Guide (http://docs.aws.amazon.com/lambda/latest/dg/API_Invoke.html). + // + // There is a 30-second timeout on RequestResponse invocations. You should + // use Event invocation in most cases. Use RequestResponse only when you want + // to make a mail flow decision, such as whether to stop the receipt rule or + // the receipt rule set. + InvocationType *string `type:"string" enum:"InvocationType"` + + // The Amazon Resource Name (ARN) of the Amazon SNS topic to notify when the + // Lambda action is taken. An example of an Amazon SNS topic ARN is arn:aws:sns:us-west-2:123456789012:MyTopic. + // For more information about Amazon SNS topics, see the Amazon SNS Developer + // Guide (http://docs.aws.amazon.com/sns/latest/dg/CreateTopic.html). + TopicArn *string `type:"string"` +} + +// String returns the string representation +func (s LambdaAction) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LambdaAction) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *LambdaAction) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "LambdaAction"} + if s.FunctionArn == nil { + invalidParams.Add(request.NewErrParamRequired("FunctionArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents a request to return a list of all identities (email addresses +// and domains) that you have attempted to verify under your AWS account, regardless +// of verification status. +type ListIdentitiesInput struct { + _ struct{} `type:"structure"` + + // The type of the identities to list. Possible values are "EmailAddress" and + // "Domain". If this parameter is omitted, then all identities will be listed. + IdentityType *string `type:"string" enum:"IdentityType"` + + // The maximum number of identities per page. Possible values are 1-1000 inclusive. + MaxItems *int64 `type:"integer"` + + // The token to use for pagination. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s ListIdentitiesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListIdentitiesInput) GoString() string { + return s.String() +} + +// A list of all identities that you have attempted to verify under your AWS +// account, regardless of verification status. +type ListIdentitiesOutput struct { + _ struct{} `type:"structure"` + + // A list of identities. + Identities []*string `type:"list" required:"true"` + + // The token used for pagination. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s ListIdentitiesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListIdentitiesOutput) GoString() string { + return s.String() +} + +// Represents a request to return a list of sending authorization policies that +// are attached to an identity. Sending authorization is an Amazon SES feature +// that enables you to authorize other senders to use your identities. For information, +// see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/sending-authorization.html). +type ListIdentityPoliciesInput struct { + _ struct{} `type:"structure"` + + // The identity that is associated with the policy for which the policies will + // be listed. You can specify an identity by using its name or by using its + // Amazon Resource Name (ARN). Examples: user@example.com, example.com, arn:aws:ses:us-east-1:123456789012:identity/example.com. + // + // To successfully call this API, you must own the identity. + Identity *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ListIdentityPoliciesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListIdentityPoliciesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListIdentityPoliciesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListIdentityPoliciesInput"} + if s.Identity == nil { + invalidParams.Add(request.NewErrParamRequired("Identity")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// A list of names of sending authorization policies that apply to an identity. +type ListIdentityPoliciesOutput struct { + _ struct{} `type:"structure"` + + // A list of names of policies that apply to the specified identity. + PolicyNames []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s ListIdentityPoliciesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListIdentityPoliciesOutput) GoString() string { + return s.String() +} + +// : Represents a request to list the IP address filters that exist under your +// AWS account. You use IP address filters when you receive email with Amazon +// SES. For more information, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-concepts.html). +type ListReceiptFiltersInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ListReceiptFiltersInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListReceiptFiltersInput) GoString() string { + return s.String() +} + +// A list of IP address filters that exist under your AWS account. +type ListReceiptFiltersOutput struct { + _ struct{} `type:"structure"` + + // A list of IP address filter data structures, which each consist of a name, + // an IP address range, and whether to allow or block mail from it. + Filters []*ReceiptFilter `type:"list"` +} + +// String returns the string representation +func (s ListReceiptFiltersOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListReceiptFiltersOutput) GoString() string { + return s.String() +} + +// Represents a request to list the receipt rule sets that exist under your +// AWS account. You use receipt rule sets to receive email with Amazon SES. +// For more information, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-concepts.html). +type ListReceiptRuleSetsInput struct { + _ struct{} `type:"structure"` + + // A token returned from a previous call to ListReceiptRuleSets to indicate + // the position in the receipt rule set list. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s ListReceiptRuleSetsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListReceiptRuleSetsInput) GoString() string { + return s.String() +} + +// A list of receipt rule sets that exist under your AWS account. +type ListReceiptRuleSetsOutput struct { + _ struct{} `type:"structure"` + + // A token indicating that there are additional receipt rule sets available + // to be listed. Pass this token to successive calls of ListReceiptRuleSets + // to retrieve up to 100 receipt rule sets at a time. + NextToken *string `type:"string"` + + // The metadata for the currently active receipt rule set. The metadata consists + // of the rule set name and the timestamp of when the rule set was created. + RuleSets []*ReceiptRuleSetMetadata `type:"list"` +} + +// String returns the string representation +func (s ListReceiptRuleSetsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListReceiptRuleSetsOutput) GoString() string { + return s.String() +} + +type ListVerifiedEmailAddressesInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ListVerifiedEmailAddressesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListVerifiedEmailAddressesInput) GoString() string { + return s.String() +} + +// A list of email addresses that you have verified with Amazon SES under your +// AWS account. +type ListVerifiedEmailAddressesOutput struct { + _ struct{} `type:"structure"` + + // A list of email addresses that have been verified. + VerifiedEmailAddresses []*string `type:"list"` +} + +// String returns the string representation +func (s ListVerifiedEmailAddressesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListVerifiedEmailAddressesOutput) GoString() string { + return s.String() +} + +// Represents the message to be sent, composed of a subject and a body. +type Message struct { + _ struct{} `type:"structure"` + + // The message body. + Body *Body `type:"structure" required:"true"` + + // The subject of the message: A short summary of the content, which will appear + // in the recipient's inbox. + Subject *Content `type:"structure" required:"true"` +} + +// String returns the string representation +func (s Message) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Message) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Message) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Message"} + if s.Body == nil { + invalidParams.Add(request.NewErrParamRequired("Body")) + } + if s.Subject == nil { + invalidParams.Add(request.NewErrParamRequired("Subject")) + } + if s.Body != nil { + if err := s.Body.Validate(); err != nil { + invalidParams.AddNested("Body", err.(request.ErrInvalidParams)) + } + } + if s.Subject != nil { + if err := s.Subject.Validate(); err != nil { + invalidParams.AddNested("Subject", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Message-related information to include in the Delivery Status Notification +// (DSN) when an email that Amazon SES receives on your behalf bounces. +// +// For information about receiving email through Amazon SES, see the Amazon +// SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email.html). +type MessageDsn struct { + _ struct{} `type:"structure"` + + // When the message was received by the reporting mail transfer agent (MTA), + // in RFC 822 (https://www.ietf.org/rfc/rfc0822.txt) date-time format. + ArrivalDate *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // Additional X-headers to include in the DSN. + ExtensionFields []*ExtensionField `type:"list"` + + // The reporting MTA that attempted to deliver the message, formatted as specified + // in RFC 3464 (https://tools.ietf.org/html/rfc3464) (mta-name-type; mta-name). + // The default value is dns; inbound-smtp.[region].amazonaws.com. + ReportingMta *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s MessageDsn) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MessageDsn) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *MessageDsn) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "MessageDsn"} + if s.ReportingMta == nil { + invalidParams.Add(request.NewErrParamRequired("ReportingMta")) + } + if s.ExtensionFields != nil { + for i, v := range s.ExtensionFields { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "ExtensionFields", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents a request to add or update a sending authorization policy for +// an identity. Sending authorization is an Amazon SES feature that enables +// you to authorize other senders to use your identities. For information, see +// the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/sending-authorization.html). +type PutIdentityPolicyInput struct { + _ struct{} `type:"structure"` + + // The identity to which the policy will apply. You can specify an identity + // by using its name or by using its Amazon Resource Name (ARN). Examples: user@example.com, + // example.com, arn:aws:ses:us-east-1:123456789012:identity/example.com. + // + // To successfully call this API, you must own the identity. + Identity *string `type:"string" required:"true"` + + // The text of the policy in JSON format. The policy cannot exceed 4 KB. + // + // For information about the syntax of sending authorization policies, see + // the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/sending-authorization-policies.html). + Policy *string `min:"1" type:"string" required:"true"` + + // The name of the policy. + // + // The policy name cannot exceed 64 characters and can only include alphanumeric + // characters, dashes, and underscores. + PolicyName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s PutIdentityPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutIdentityPolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutIdentityPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutIdentityPolicyInput"} + if s.Identity == nil { + invalidParams.Add(request.NewErrParamRequired("Identity")) + } + if s.Policy == nil { + invalidParams.Add(request.NewErrParamRequired("Policy")) + } + if s.Policy != nil && len(*s.Policy) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Policy", 1)) + } + if s.PolicyName == nil { + invalidParams.Add(request.NewErrParamRequired("PolicyName")) + } + if s.PolicyName != nil && len(*s.PolicyName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("PolicyName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// An empty element returned on a successful request. +type PutIdentityPolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutIdentityPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutIdentityPolicyOutput) GoString() string { + return s.String() +} + +// Represents the raw data of the message. +type RawMessage struct { + _ struct{} `type:"structure"` + + // The raw data of the message. The client must ensure that the message format + // complies with Internet email standards regarding email header fields, MIME + // types, MIME encoding, and base64 encoding (if necessary). + // + // The To:, CC:, and BCC: headers in the raw message can contain a group list. + // + // If you are using SendRawEmail with sending authorization, you can include + // X-headers in the raw message to specify the "Source," "From," and "Return-Path" + // addresses. For more information, see the documentation for SendRawEmail. + // + // Do not include these X-headers in the DKIM signature, because they are + // removed by Amazon SES before sending the email. + // + // For more information, go to the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/send-email-raw.html). + // + // Data is automatically base64 encoded/decoded by the SDK. + Data []byte `type:"blob" required:"true"` +} + +// String returns the string representation +func (s RawMessage) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RawMessage) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RawMessage) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RawMessage"} + if s.Data == nil { + invalidParams.Add(request.NewErrParamRequired("Data")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// An action that Amazon SES can take when it receives an email on behalf of +// one or more email addresses or domains that you own. An instance of this +// data type can represent only one action. +// +// For information about setting up receipt rules, see the Amazon SES Developer +// Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-receipt-rules.html). +type ReceiptAction struct { + _ struct{} `type:"structure"` + + // Adds a header to the received email. + AddHeaderAction *AddHeaderAction `type:"structure"` + + // Rejects the received email by returning a bounce response to the sender and, + // optionally, publishes a notification to Amazon Simple Notification Service + // (Amazon SNS). + BounceAction *BounceAction `type:"structure"` + + // Calls an AWS Lambda function, and optionally, publishes a notification to + // Amazon SNS. + LambdaAction *LambdaAction `type:"structure"` + + // Saves the received message to an Amazon Simple Storage Service (Amazon S3) + // bucket and, optionally, publishes a notification to Amazon SNS. + S3Action *S3Action `type:"structure"` + + // Publishes the email content within a notification to Amazon SNS. + SNSAction *SNSAction `type:"structure"` + + // Terminates the evaluation of the receipt rule set and optionally publishes + // a notification to Amazon SNS. + StopAction *StopAction `type:"structure"` + + // Calls Amazon WorkMail and, optionally, publishes a notification to Amazon + // SNS. + WorkmailAction *WorkmailAction `type:"structure"` +} + +// String returns the string representation +func (s ReceiptAction) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReceiptAction) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ReceiptAction) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ReceiptAction"} + if s.AddHeaderAction != nil { + if err := s.AddHeaderAction.Validate(); err != nil { + invalidParams.AddNested("AddHeaderAction", err.(request.ErrInvalidParams)) + } + } + if s.BounceAction != nil { + if err := s.BounceAction.Validate(); err != nil { + invalidParams.AddNested("BounceAction", err.(request.ErrInvalidParams)) + } + } + if s.LambdaAction != nil { + if err := s.LambdaAction.Validate(); err != nil { + invalidParams.AddNested("LambdaAction", err.(request.ErrInvalidParams)) + } + } + if s.S3Action != nil { + if err := s.S3Action.Validate(); err != nil { + invalidParams.AddNested("S3Action", err.(request.ErrInvalidParams)) + } + } + if s.SNSAction != nil { + if err := s.SNSAction.Validate(); err != nil { + invalidParams.AddNested("SNSAction", err.(request.ErrInvalidParams)) + } + } + if s.StopAction != nil { + if err := s.StopAction.Validate(); err != nil { + invalidParams.AddNested("StopAction", err.(request.ErrInvalidParams)) + } + } + if s.WorkmailAction != nil { + if err := s.WorkmailAction.Validate(); err != nil { + invalidParams.AddNested("WorkmailAction", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// A receipt IP address filter enables you to specify whether to accept or reject +// mail originating from an IP address or range of IP addresses. +// +// For information about setting up IP address filters, see the Amazon SES +// Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-ip-filters.html). +type ReceiptFilter struct { + _ struct{} `type:"structure"` + + // A structure that provides the IP addresses to block or allow, and whether + // to block or allow incoming mail from them. + IpFilter *ReceiptIpFilter `type:"structure" required:"true"` + + // The name of the IP address filter. The name must: + // + // Contain only ASCII letters (a-z, A-Z), numbers (0-9), periods (.), underscores + // (_), or dashes (-). + // + // Start and end with a letter or number. + // + // Contain less than 64 characters. + Name *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ReceiptFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReceiptFilter) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ReceiptFilter) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ReceiptFilter"} + if s.IpFilter == nil { + invalidParams.Add(request.NewErrParamRequired("IpFilter")) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.IpFilter != nil { + if err := s.IpFilter.Validate(); err != nil { + invalidParams.AddNested("IpFilter", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// A receipt IP address filter enables you to specify whether to accept or reject +// mail originating from an IP address or range of IP addresses. +// +// For information about setting up IP address filters, see the Amazon SES +// Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-ip-filters.html). +type ReceiptIpFilter struct { + _ struct{} `type:"structure"` + + // A single IP address or a range of IP addresses that you want to block or + // allow, specified in Classless Inter-Domain Routing (CIDR) notation. An example + // of a single email address is 10.0.0.1. An example of a range of IP addresses + // is 10.0.0.1/24. For more information about CIDR notation, see RFC 2317 (https://tools.ietf.org/html/rfc2317). + Cidr *string `type:"string" required:"true"` + + // Indicates whether to block or allow incoming mail from the specified IP addresses. + Policy *string `type:"string" required:"true" enum:"ReceiptFilterPolicy"` +} + +// String returns the string representation +func (s ReceiptIpFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReceiptIpFilter) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ReceiptIpFilter) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ReceiptIpFilter"} + if s.Cidr == nil { + invalidParams.Add(request.NewErrParamRequired("Cidr")) + } + if s.Policy == nil { + invalidParams.Add(request.NewErrParamRequired("Policy")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Receipt rules enable you to specify which actions Amazon SES should take +// when it receives mail on behalf of one or more email addresses or domains +// that you own. +// +// Each receipt rule defines a set of email addresses or domains to which it +// applies. If the email addresses or domains match at least one recipient address +// of the message, Amazon SES executes all of the receipt rule's actions on +// the message. +// +// For information about setting up receipt rules, see the Amazon SES Developer +// Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-receipt-rules.html). +type ReceiptRule struct { + _ struct{} `type:"structure"` + + // An ordered list of actions to perform on messages that match at least one + // of the recipient email addresses or domains specified in the receipt rule. + Actions []*ReceiptAction `type:"list"` + + // If true, the receipt rule is active. The default value is false. + Enabled *bool `type:"boolean"` + + // The name of the receipt rule. The name must: + // + // Contain only ASCII letters (a-z, A-Z), numbers (0-9), periods (.), underscores + // (_), or dashes (-). + // + // Start and end with a letter or number. + // + // Contain less than 64 characters. + Name *string `type:"string" required:"true"` + + // The recipient domains and email addresses to which the receipt rule applies. + // If this field is not specified, this rule will match all recipients under + // all verified domains. + Recipients []*string `type:"list"` + + // If true, then messages to which this receipt rule applies are scanned for + // spam and viruses. The default value is false. + ScanEnabled *bool `type:"boolean"` + + // Specifies whether Amazon SES should require that incoming email is delivered + // over a connection encrypted with Transport Layer Security (TLS). If this + // parameter is set to Require, Amazon SES will bounce emails that are not received + // over TLS. The default is Optional. + TlsPolicy *string `type:"string" enum:"TlsPolicy"` +} + +// String returns the string representation +func (s ReceiptRule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReceiptRule) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ReceiptRule) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ReceiptRule"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Actions != nil { + for i, v := range s.Actions { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Actions", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Information about a receipt rule set. +// +// A receipt rule set is a collection of rules that specify what Amazon SES +// should do with mail it receives on behalf of your account's verified domains. +// +// For information about setting up receipt rule sets, see the Amazon SES Developer +// Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-receipt-rule-set.html). +type ReceiptRuleSetMetadata struct { + _ struct{} `type:"structure"` + + // The date and time the receipt rule set was created. + CreatedTimestamp *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The name of the receipt rule set. The name must: + // + // Contain only ASCII letters (a-z, A-Z), numbers (0-9), periods (.), underscores + // (_), or dashes (-). + // + // Start and end with a letter or number. + // + // Contain less than 64 characters. + Name *string `type:"string"` +} + +// String returns the string representation +func (s ReceiptRuleSetMetadata) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReceiptRuleSetMetadata) GoString() string { + return s.String() +} + +// Recipient-related information to include in the Delivery Status Notification +// (DSN) when an email that Amazon SES receives on your behalf bounces. +// +// For information about receiving email through Amazon SES, see the Amazon +// SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email.html). +type RecipientDsnFields struct { + _ struct{} `type:"structure"` + + // The action performed by the reporting mail transfer agent (MTA) as a result + // of its attempt to deliver the message to the recipient address. This is required + // by RFC 3464 (https://tools.ietf.org/html/rfc3464). + Action *string `type:"string" required:"true" enum:"DsnAction"` + + // An extended explanation of what went wrong; this is usually an SMTP response. + // See RFC 3463 (https://tools.ietf.org/html/rfc3463) for the correct formatting + // of this parameter. + DiagnosticCode *string `type:"string"` + + // Additional X-headers to include in the DSN. + ExtensionFields []*ExtensionField `type:"list"` + + // The email address to which the message was ultimately delivered. This corresponds + // to the Final-Recipient in the DSN. If not specified, FinalRecipient will + // be set to the Recipient specified in the BouncedRecipientInfo structure. + // Either FinalRecipient or the recipient in BouncedRecipientInfo must be a + // recipient of the original bounced message. + // + // Do not prepend the FinalRecipient email address with rfc 822;, as described + // in RFC 3798 (https://tools.ietf.org/html/rfc3798). + FinalRecipient *string `type:"string"` + + // The time the final delivery attempt was made, in RFC 822 (https://www.ietf.org/rfc/rfc0822.txt) + // date-time format. + LastAttemptDate *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The MTA to which the remote MTA attempted to deliver the message, formatted + // as specified in RFC 3464 (https://tools.ietf.org/html/rfc3464) (mta-name-type; + // mta-name). This parameter typically applies only to propagating synchronous + // bounces. + RemoteMta *string `type:"string"` + + // The status code that indicates what went wrong. This is required by RFC 3464 + // (https://tools.ietf.org/html/rfc3464). + Status *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s RecipientDsnFields) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RecipientDsnFields) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RecipientDsnFields) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RecipientDsnFields"} + if s.Action == nil { + invalidParams.Add(request.NewErrParamRequired("Action")) + } + if s.Status == nil { + invalidParams.Add(request.NewErrParamRequired("Status")) + } + if s.ExtensionFields != nil { + for i, v := range s.ExtensionFields { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "ExtensionFields", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents a request to reorder the receipt rules within a receipt rule set. +// You use receipt rule sets to receive email with Amazon SES. For more information, +// see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-concepts.html). +type ReorderReceiptRuleSetInput struct { + _ struct{} `type:"structure"` + + // A list of the specified receipt rule set's receipt rules in the order that + // you want to put them. + RuleNames []*string `type:"list" required:"true"` + + // The name of the receipt rule set to reorder. + RuleSetName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ReorderReceiptRuleSetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReorderReceiptRuleSetInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ReorderReceiptRuleSetInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ReorderReceiptRuleSetInput"} + if s.RuleNames == nil { + invalidParams.Add(request.NewErrParamRequired("RuleNames")) + } + if s.RuleSetName == nil { + invalidParams.Add(request.NewErrParamRequired("RuleSetName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// An empty element returned on a successful request. +type ReorderReceiptRuleSetOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ReorderReceiptRuleSetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReorderReceiptRuleSetOutput) GoString() string { + return s.String() +} + +// When included in a receipt rule, this action saves the received message to +// an Amazon Simple Storage Service (Amazon S3) bucket and, optionally, publishes +// a notification to Amazon Simple Notification Service (Amazon SNS). +// +// To enable Amazon SES to write emails to your Amazon S3 bucket, use an AWS +// KMS key to encrypt your emails, or publish to an Amazon SNS topic of another +// account, Amazon SES must have permission to access those resources. For information +// about giving permissions, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-permissions.html). +// +// When you save your emails to an Amazon S3 bucket, the maximum email size +// (including headers) is 30 MB. Emails larger than that will bounce. +// +// For information about specifying Amazon S3 actions in receipt rules, see +// the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-action-s3.html). +type S3Action struct { + _ struct{} `type:"structure"` + + // The name of the Amazon S3 bucket to which to save the received email. + BucketName *string `type:"string" required:"true"` + + // The customer master key that Amazon SES should use to encrypt your emails + // before saving them to the Amazon S3 bucket. You can use the default master + // key or a custom master key you created in AWS KMS as follows: + // + // To use the default master key, provide an ARN in the form of arn:aws:kms:REGION:ACCOUNT-ID-WITHOUT-HYPHENS:alias/aws/ses. + // For example, if your AWS account ID is 123456789012 and you want to use the + // default master key in the US West (Oregon) region, the ARN of the default + // master key would be arn:aws:kms:us-west-2:123456789012:alias/aws/ses. If + // you use the default master key, you don't need to perform any extra steps + // to give Amazon SES permission to use the key. + // + // To use a custom master key you created in AWS KMS, provide the ARN of + // the master key and ensure that you add a statement to your key's policy to + // give Amazon SES permission to use it. For more information about giving permissions, + // see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-permissions.html). + // + // For more information about key policies, see the AWS KMS Developer Guide + // (http://docs.aws.amazon.com/kms/latest/developerguide/concepts.html). If + // you do not specify a master key, Amazon SES will not encrypt your emails. + // + // Your mail is encrypted by Amazon SES using the Amazon S3 encryption client + // before the mail is submitted to Amazon S3 for storage. It is not encrypted + // using Amazon S3 server-side encryption. This means that you must use the + // Amazon S3 encryption client to decrypt the email after retrieving it from + // Amazon S3, as the service has no access to use your AWS KMS keys for decryption. + // This encryption client is currently available with the AWS Java SDK (http://aws.amazon.com/sdk-for-java/) + // and AWS Ruby SDK (http://aws.amazon.com/sdk-for-ruby/) only. For more information + // about client-side encryption using AWS KMS master keys, see the Amazon S3 + // Developer Guide (http://alpha-docs-aws.amazon.com/AmazonS3/latest/dev/UsingClientSideEncryption.html). + KmsKeyArn *string `type:"string"` + + // The key prefix of the Amazon S3 bucket. The key prefix is similar to a directory + // name that enables you to store similar data under the same directory in a + // bucket. + ObjectKeyPrefix *string `type:"string"` + + // The ARN of the Amazon SNS topic to notify when the message is saved to the + // Amazon S3 bucket. An example of an Amazon SNS topic ARN is arn:aws:sns:us-west-2:123456789012:MyTopic. + // For more information about Amazon SNS topics, see the Amazon SNS Developer + // Guide (http://docs.aws.amazon.com/sns/latest/dg/CreateTopic.html). + TopicArn *string `type:"string"` +} + +// String returns the string representation +func (s S3Action) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s S3Action) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *S3Action) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "S3Action"} + if s.BucketName == nil { + invalidParams.Add(request.NewErrParamRequired("BucketName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// When included in a receipt rule, this action publishes a notification to +// Amazon Simple Notification Service (Amazon SNS). This action includes a complete +// copy of the email content in the Amazon SNS notifications. Amazon SNS notifications +// for all other actions simply provide information about the email. They do +// not include the email content itself. +// +// If you own the Amazon SNS topic, you don't need to do anything to give Amazon +// SES permission to publish emails to it. However, if you don't own the Amazon +// SNS topic, you need to attach a policy to the topic to give Amazon SES permissions +// to access it. For information about giving permissions, see the Amazon SES +// Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-permissions.html). +// +// You can only publish emails that are 150 KB or less (including the header) +// to Amazon SNS. Larger emails will bounce. If you anticipate emails larger +// than 150 KB, use the S3 action instead. +// +// For information about using a receipt rule to publish an Amazon SNS notification, +// see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-action-sns.html). +type SNSAction struct { + _ struct{} `type:"structure"` + + // The encoding to use for the email within the Amazon SNS notification. UTF-8 + // is easier to use, but may not preserve all special characters when a message + // was encoded with a different encoding format. Base64 preserves all special + // characters. The default value is UTF-8. + Encoding *string `type:"string" enum:"SNSActionEncoding"` + + // The Amazon Resource Name (ARN) of the Amazon SNS topic to notify. An example + // of an Amazon SNS topic ARN is arn:aws:sns:us-west-2:123456789012:MyTopic. + // For more information about Amazon SNS topics, see the Amazon SNS Developer + // Guide (http://docs.aws.amazon.com/sns/latest/dg/CreateTopic.html). + TopicArn *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s SNSAction) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SNSAction) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SNSAction) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SNSAction"} + if s.TopicArn == nil { + invalidParams.Add(request.NewErrParamRequired("TopicArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents a request to send a bounce message to the sender of an email you +// received through Amazon SES. +type SendBounceInput struct { + _ struct{} `type:"structure"` + + // The address to use in the "From" header of the bounce message. This must + // be an identity that you have verified with Amazon SES. + BounceSender *string `type:"string" required:"true"` + + // This parameter is used only for sending authorization. It is the ARN of the + // identity that is associated with the sending authorization policy that permits + // you to use the address in the "From" header of the bounce. For more information + // about sending authorization, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/sending-authorization.html). + BounceSenderArn *string `type:"string"` + + // A list of recipients of the bounced message, including the information required + // to create the Delivery Status Notifications (DSNs) for the recipients. You + // must specify at least one BouncedRecipientInfo in the list. + BouncedRecipientInfoList []*BouncedRecipientInfo `type:"list" required:"true"` + + // Human-readable text for the bounce message to explain the failure. If not + // specified, the text will be auto-generated based on the bounced recipient + // information. + Explanation *string `type:"string"` + + // Message-related DSN fields. If not specified, Amazon SES will choose the + // values. + MessageDsn *MessageDsn `type:"structure"` + + // The message ID of the message to be bounced. + OriginalMessageId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s SendBounceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SendBounceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SendBounceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SendBounceInput"} + if s.BounceSender == nil { + invalidParams.Add(request.NewErrParamRequired("BounceSender")) + } + if s.BouncedRecipientInfoList == nil { + invalidParams.Add(request.NewErrParamRequired("BouncedRecipientInfoList")) + } + if s.OriginalMessageId == nil { + invalidParams.Add(request.NewErrParamRequired("OriginalMessageId")) + } + if s.BouncedRecipientInfoList != nil { + for i, v := range s.BouncedRecipientInfoList { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "BouncedRecipientInfoList", i), err.(request.ErrInvalidParams)) + } + } + } + if s.MessageDsn != nil { + if err := s.MessageDsn.Validate(); err != nil { + invalidParams.AddNested("MessageDsn", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents a unique message ID. +type SendBounceOutput struct { + _ struct{} `type:"structure"` + + // The message ID of the bounce message. + MessageId *string `type:"string"` +} + +// String returns the string representation +func (s SendBounceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SendBounceOutput) GoString() string { + return s.String() +} + +// Represents sending statistics data. Each SendDataPoint contains statistics +// for a 15-minute period of sending activity. +type SendDataPoint struct { + _ struct{} `type:"structure"` + + // Number of emails that have bounced. + Bounces *int64 `type:"long"` + + // Number of unwanted emails that were rejected by recipients. + Complaints *int64 `type:"long"` + + // Number of emails that have been enqueued for sending. + DeliveryAttempts *int64 `type:"long"` + + // Number of emails rejected by Amazon SES. + Rejects *int64 `type:"long"` + + // Time of the data point. + Timestamp *time.Time `type:"timestamp" timestampFormat:"iso8601"` +} + +// String returns the string representation +func (s SendDataPoint) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SendDataPoint) GoString() string { + return s.String() +} + +// Represents a request to send a single formatted email using Amazon SES. For +// more information, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/send-email-formatted.html). +type SendEmailInput struct { + _ struct{} `type:"structure"` + + // The destination for this email, composed of To:, CC:, and BCC: fields. + Destination *Destination `type:"structure" required:"true"` + + // The message to be sent. + Message *Message `type:"structure" required:"true"` + + // The reply-to email address(es) for the message. If the recipient replies + // to the message, each reply-to address will receive the reply. + ReplyToAddresses []*string `type:"list"` + + // The email address to which bounces and complaints are to be forwarded when + // feedback forwarding is enabled. If the message cannot be delivered to the + // recipient, then an error message will be returned from the recipient's ISP; + // this message will then be forwarded to the email address specified by the + // ReturnPath parameter. The ReturnPath parameter is never overwritten. This + // email address must be either individually verified with Amazon SES, or from + // a domain that has been verified with Amazon SES. + ReturnPath *string `type:"string"` + + // This parameter is used only for sending authorization. It is the ARN of the + // identity that is associated with the sending authorization policy that permits + // you to use the email address specified in the ReturnPath parameter. + // + // For example, if the owner of example.com (which has ARN arn:aws:ses:us-east-1:123456789012:identity/example.com) + // attaches a policy to it that authorizes you to use feedback@example.com, + // then you would specify the ReturnPathArn to be arn:aws:ses:us-east-1:123456789012:identity/example.com, + // and the ReturnPath to be feedback@example.com. + // + // For more information about sending authorization, see the Amazon SES Developer + // Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/sending-authorization.html). + ReturnPathArn *string `type:"string"` + + // The email address that is sending the email. This email address must be either + // individually verified with Amazon SES, or from a domain that has been verified + // with Amazon SES. For information about verifying identities, see the Amazon + // SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/verify-addresses-and-domains.html). + // + // If you are sending on behalf of another user and have been permitted to + // do so by a sending authorization policy, then you must also specify the SourceArn + // parameter. For more information about sending authorization, see the Amazon + // SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/sending-authorization.html). + // + // In all cases, the email address must be 7-bit ASCII. If the text must contain + // any other characters, then you must use MIME encoded-word syntax (RFC 2047) + // instead of a literal string. MIME encoded-word syntax uses the following + // form: =?charset?encoding?encoded-text?=. For more information, see RFC 2047 + // (http://tools.ietf.org/html/rfc2047). + Source *string `type:"string" required:"true"` + + // This parameter is used only for sending authorization. It is the ARN of the + // identity that is associated with the sending authorization policy that permits + // you to send for the email address specified in the Source parameter. + // + // For example, if the owner of example.com (which has ARN arn:aws:ses:us-east-1:123456789012:identity/example.com) + // attaches a policy to it that authorizes you to send from user@example.com, + // then you would specify the SourceArn to be arn:aws:ses:us-east-1:123456789012:identity/example.com, + // and the Source to be user@example.com. + // + // For more information about sending authorization, see the Amazon SES Developer + // Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/sending-authorization.html). + SourceArn *string `type:"string"` +} + +// String returns the string representation +func (s SendEmailInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SendEmailInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SendEmailInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SendEmailInput"} + if s.Destination == nil { + invalidParams.Add(request.NewErrParamRequired("Destination")) + } + if s.Message == nil { + invalidParams.Add(request.NewErrParamRequired("Message")) + } + if s.Source == nil { + invalidParams.Add(request.NewErrParamRequired("Source")) + } + if s.Message != nil { + if err := s.Message.Validate(); err != nil { + invalidParams.AddNested("Message", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents a unique message ID. +type SendEmailOutput struct { + _ struct{} `type:"structure"` + + // The unique message identifier returned from the SendEmail action. + MessageId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s SendEmailOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SendEmailOutput) GoString() string { + return s.String() +} + +// Represents a request to send a single raw email using Amazon SES. For more +// information, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/send-email-raw.html). +type SendRawEmailInput struct { + _ struct{} `type:"structure"` + + // A list of destinations for the message, consisting of To:, CC:, and BCC: + // addresses. + Destinations []*string `type:"list"` + + // This parameter is used only for sending authorization. It is the ARN of the + // identity that is associated with the sending authorization policy that permits + // you to specify a particular "From" address in the header of the raw email. + // + // Instead of using this parameter, you can use the X-header X-SES-FROM-ARN + // in the raw message of the email. If you use both the FromArn parameter and + // the corresponding X-header, Amazon SES uses the value of the FromArn parameter. + // + // For information about when to use this parameter, see the description of + // SendRawEmail in this guide, or see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/sending-authorization-delegate-sender-tasks-email.html). + FromArn *string `type:"string"` + + // The raw text of the message. The client is responsible for ensuring the following: + // + // Message must contain a header and a body, separated by a blank line. + // + // All required header fields must be present. + // + // Each part of a multipart MIME message must be formatted properly. + // + // MIME content types must be among those supported by Amazon SES. For more + // information, go to the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/mime-types.html). + // + // Content must be base64-encoded, if MIME requires it. + RawMessage *RawMessage `type:"structure" required:"true"` + + // This parameter is used only for sending authorization. It is the ARN of the + // identity that is associated with the sending authorization policy that permits + // you to use the email address specified in the ReturnPath parameter. + // + // For example, if the owner of example.com (which has ARN arn:aws:ses:us-east-1:123456789012:identity/example.com) + // attaches a policy to it that authorizes you to use feedback@example.com, + // then you would specify the ReturnPathArn to be arn:aws:ses:us-east-1:123456789012:identity/example.com, + // and the ReturnPath to be feedback@example.com. + // + // Instead of using this parameter, you can use the X-header X-SES-RETURN-PATH-ARN + // in the raw message of the email. If you use both the ReturnPathArn parameter + // and the corresponding X-header, Amazon SES uses the value of the ReturnPathArn + // parameter. + // + // For information about when to use this parameter, see the description of + // SendRawEmail in this guide, or see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/sending-authorization-delegate-sender-tasks-email.html). + ReturnPathArn *string `type:"string"` + + // The identity's email address. If you do not provide a value for this parameter, + // you must specify a "From" address in the raw text of the message. (You can + // also specify both.) + // + // By default, the string must be 7-bit ASCII. If the text must contain any + // other characters, then you must use MIME encoded-word syntax (RFC 2047) instead + // of a literal string. MIME encoded-word syntax uses the following form: =?charset?encoding?encoded-text?=. + // For more information, see RFC 2047 (http://tools.ietf.org/html/rfc2047). + // + // If you specify the Source parameter and have feedback forwarding enabled, + // then bounces and complaints will be sent to this email address. This takes + // precedence over any Return-Path header that you might include in the raw + // text of the message. + Source *string `type:"string"` + + // This parameter is used only for sending authorization. It is the ARN of the + // identity that is associated with the sending authorization policy that permits + // you to send for the email address specified in the Source parameter. + // + // For example, if the owner of example.com (which has ARN arn:aws:ses:us-east-1:123456789012:identity/example.com) + // attaches a policy to it that authorizes you to send from user@example.com, + // then you would specify the SourceArn to be arn:aws:ses:us-east-1:123456789012:identity/example.com, + // and the Source to be user@example.com. + // + // Instead of using this parameter, you can use the X-header X-SES-SOURCE-ARN + // in the raw message of the email. If you use both the SourceArn parameter + // and the corresponding X-header, Amazon SES uses the value of the SourceArn + // parameter. + // + // For information about when to use this parameter, see the description of + // SendRawEmail in this guide, or see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/sending-authorization-delegate-sender-tasks-email.html). + SourceArn *string `type:"string"` +} + +// String returns the string representation +func (s SendRawEmailInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SendRawEmailInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SendRawEmailInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SendRawEmailInput"} + if s.RawMessage == nil { + invalidParams.Add(request.NewErrParamRequired("RawMessage")) + } + if s.RawMessage != nil { + if err := s.RawMessage.Validate(); err != nil { + invalidParams.AddNested("RawMessage", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents a unique message ID. +type SendRawEmailOutput struct { + _ struct{} `type:"structure"` + + // The unique message identifier returned from the SendRawEmail action. + MessageId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s SendRawEmailOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SendRawEmailOutput) GoString() string { + return s.String() +} + +// Represents a request to set a receipt rule set as the active receipt rule +// set. You use receipt rule sets to receive email with Amazon SES. For more +// information, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-concepts.html). +type SetActiveReceiptRuleSetInput struct { + _ struct{} `type:"structure"` + + // The name of the receipt rule set to make active. Setting this value to null + // disables all email receiving. + RuleSetName *string `type:"string"` +} + +// String returns the string representation +func (s SetActiveReceiptRuleSetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetActiveReceiptRuleSetInput) GoString() string { + return s.String() +} + +// An empty element returned on a successful request. +type SetActiveReceiptRuleSetOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s SetActiveReceiptRuleSetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetActiveReceiptRuleSetOutput) GoString() string { + return s.String() +} + +// Represents a request to enable or disable Amazon SES Easy DKIM signing for +// an identity. For more information about setting up Easy DKIM, see the Amazon +// SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/easy-dkim.html). +type SetIdentityDkimEnabledInput struct { + _ struct{} `type:"structure"` + + // Sets whether DKIM signing is enabled for an identity. Set to true to enable + // DKIM signing for this identity; false to disable it. + DkimEnabled *bool `type:"boolean" required:"true"` + + // The identity for which DKIM signing should be enabled or disabled. + Identity *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s SetIdentityDkimEnabledInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetIdentityDkimEnabledInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SetIdentityDkimEnabledInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SetIdentityDkimEnabledInput"} + if s.DkimEnabled == nil { + invalidParams.Add(request.NewErrParamRequired("DkimEnabled")) + } + if s.Identity == nil { + invalidParams.Add(request.NewErrParamRequired("Identity")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// An empty element returned on a successful request. +type SetIdentityDkimEnabledOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s SetIdentityDkimEnabledOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetIdentityDkimEnabledOutput) GoString() string { + return s.String() +} + +// Represents a request to enable or disable whether Amazon SES forwards you +// bounce and complaint notifications through email. For information about email +// feedback forwarding, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/notifications-via-email.html). +type SetIdentityFeedbackForwardingEnabledInput struct { + _ struct{} `type:"structure"` + + // Sets whether Amazon SES will forward bounce and complaint notifications as + // email. true specifies that Amazon SES will forward bounce and complaint notifications + // as email, in addition to any Amazon SNS topic publishing otherwise specified. + // false specifies that Amazon SES will publish bounce and complaint notifications + // only through Amazon SNS. This value can only be set to false when Amazon + // SNS topics are set for both Bounce and Complaint notification types. + ForwardingEnabled *bool `type:"boolean" required:"true"` + + // The identity for which to set bounce and complaint notification forwarding. + // Examples: user@example.com, example.com. + Identity *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s SetIdentityFeedbackForwardingEnabledInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetIdentityFeedbackForwardingEnabledInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SetIdentityFeedbackForwardingEnabledInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SetIdentityFeedbackForwardingEnabledInput"} + if s.ForwardingEnabled == nil { + invalidParams.Add(request.NewErrParamRequired("ForwardingEnabled")) + } + if s.Identity == nil { + invalidParams.Add(request.NewErrParamRequired("Identity")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// An empty element returned on a successful request. +type SetIdentityFeedbackForwardingEnabledOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s SetIdentityFeedbackForwardingEnabledOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetIdentityFeedbackForwardingEnabledOutput) GoString() string { + return s.String() +} + +// Represents a request to set whether Amazon SES includes the original email +// headers in the Amazon SNS notifications of a specified type. For information +// about notifications, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/notifications-via-sns.html). +type SetIdentityHeadersInNotificationsEnabledInput struct { + _ struct{} `type:"structure"` + + // Sets whether Amazon SES includes the original email headers in Amazon SNS + // notifications of the specified notification type. A value of true specifies + // that Amazon SES will include headers in notifications, and a value of false + // specifies that Amazon SES will not include headers in notifications. + // + // This value can only be set when NotificationType is already set to use a + // particular Amazon SNS topic. + Enabled *bool `type:"boolean" required:"true"` + + // The identity for which to enable or disable headers in notifications. Examples: + // user@example.com, example.com. + Identity *string `type:"string" required:"true"` + + // The notification type for which to enable or disable headers in notifications. + NotificationType *string `type:"string" required:"true" enum:"NotificationType"` +} + +// String returns the string representation +func (s SetIdentityHeadersInNotificationsEnabledInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetIdentityHeadersInNotificationsEnabledInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SetIdentityHeadersInNotificationsEnabledInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SetIdentityHeadersInNotificationsEnabledInput"} + if s.Enabled == nil { + invalidParams.Add(request.NewErrParamRequired("Enabled")) + } + if s.Identity == nil { + invalidParams.Add(request.NewErrParamRequired("Identity")) + } + if s.NotificationType == nil { + invalidParams.Add(request.NewErrParamRequired("NotificationType")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// An empty element returned on a successful request. +type SetIdentityHeadersInNotificationsEnabledOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s SetIdentityHeadersInNotificationsEnabledOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetIdentityHeadersInNotificationsEnabledOutput) GoString() string { + return s.String() +} + +// Represents a request to enable or disable the Amazon SES custom MAIL FROM +// domain setup for a verified identity. For information about using a custom +// MAIL FROM domain, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/mail-from.html). +type SetIdentityMailFromDomainInput struct { + _ struct{} `type:"structure"` + + // The action that you want Amazon SES to take if it cannot successfully read + // the required MX record when you send an email. If you choose UseDefaultValue, + // Amazon SES will use amazonses.com (or a subdomain of that) as the MAIL FROM + // domain. If you choose RejectMessage, Amazon SES will return a MailFromDomainNotVerified + // error and not send the email. + // + // The action specified in BehaviorOnMXFailure is taken when the custom MAIL + // FROM domain setup is in the Pending, Failed, and TemporaryFailure states. + BehaviorOnMXFailure *string `type:"string" enum:"BehaviorOnMXFailure"` + + // The verified identity for which you want to enable or disable the specified + // custom MAIL FROM domain. + Identity *string `type:"string" required:"true"` + + // The custom MAIL FROM domain that you want the verified identity to use. The + // MAIL FROM domain must 1) be a subdomain of the verified identity, 2) not + // be used in a "From" address if the MAIL FROM domain is the destination of + // email feedback forwarding (for more information, see the Amazon SES Developer + // Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/mail-from.html)), + // and 3) not be used to receive emails. A value of null disables the custom + // MAIL FROM setting for the identity. + MailFromDomain *string `type:"string"` +} + +// String returns the string representation +func (s SetIdentityMailFromDomainInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetIdentityMailFromDomainInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SetIdentityMailFromDomainInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SetIdentityMailFromDomainInput"} + if s.Identity == nil { + invalidParams.Add(request.NewErrParamRequired("Identity")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// An empty element returned on a successful request. +type SetIdentityMailFromDomainOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s SetIdentityMailFromDomainOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetIdentityMailFromDomainOutput) GoString() string { + return s.String() +} + +// Represents a request to specify the Amazon SNS topic to which Amazon SES +// will publish bounce, complaint, or delivery notifications for emails sent +// with that identity as the Source. For information about Amazon SES notifications, +// see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/notifications-via-sns.html). +type SetIdentityNotificationTopicInput struct { + _ struct{} `type:"structure"` + + // The identity for which the Amazon SNS topic will be set. You can specify + // an identity by using its name or by using its Amazon Resource Name (ARN). + // Examples: user@example.com, example.com, arn:aws:ses:us-east-1:123456789012:identity/example.com. + Identity *string `type:"string" required:"true"` + + // The type of notifications that will be published to the specified Amazon + // SNS topic. + NotificationType *string `type:"string" required:"true" enum:"NotificationType"` + + // The Amazon Resource Name (ARN) of the Amazon SNS topic. If the parameter + // is omitted from the request or a null value is passed, SnsTopic is cleared + // and publishing is disabled. + SnsTopic *string `type:"string"` +} + +// String returns the string representation +func (s SetIdentityNotificationTopicInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetIdentityNotificationTopicInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SetIdentityNotificationTopicInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SetIdentityNotificationTopicInput"} + if s.Identity == nil { + invalidParams.Add(request.NewErrParamRequired("Identity")) + } + if s.NotificationType == nil { + invalidParams.Add(request.NewErrParamRequired("NotificationType")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// An empty element returned on a successful request. +type SetIdentityNotificationTopicOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s SetIdentityNotificationTopicOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetIdentityNotificationTopicOutput) GoString() string { + return s.String() +} + +// Represents a request to set the position of a receipt rule in a receipt rule +// set. You use receipt rule sets to receive email with Amazon SES. For more +// information, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-concepts.html). +type SetReceiptRulePositionInput struct { + _ struct{} `type:"structure"` + + // The name of the receipt rule after which to place the specified receipt rule. + After *string `type:"string"` + + // The name of the receipt rule to reposition. + RuleName *string `type:"string" required:"true"` + + // The name of the receipt rule set that contains the receipt rule to reposition. + RuleSetName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s SetReceiptRulePositionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetReceiptRulePositionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SetReceiptRulePositionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SetReceiptRulePositionInput"} + if s.RuleName == nil { + invalidParams.Add(request.NewErrParamRequired("RuleName")) + } + if s.RuleSetName == nil { + invalidParams.Add(request.NewErrParamRequired("RuleSetName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// An empty element returned on a successful request. +type SetReceiptRulePositionOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s SetReceiptRulePositionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetReceiptRulePositionOutput) GoString() string { + return s.String() +} + +// When included in a receipt rule, this action terminates the evaluation of +// the receipt rule set and, optionally, publishes a notification to Amazon +// Simple Notification Service (Amazon SNS). +// +// For information about setting a stop action in a receipt rule, see the Amazon +// SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-action-stop.html). +type StopAction struct { + _ struct{} `type:"structure"` + + // The scope to which the Stop action applies. That is, what is being stopped. + Scope *string `type:"string" required:"true" enum:"StopScope"` + + // The Amazon Resource Name (ARN) of the Amazon SNS topic to notify when the + // stop action is taken. An example of an Amazon SNS topic ARN is arn:aws:sns:us-west-2:123456789012:MyTopic. + // For more information about Amazon SNS topics, see the Amazon SNS Developer + // Guide (http://docs.aws.amazon.com/sns/latest/dg/CreateTopic.html). + TopicArn *string `type:"string"` +} + +// String returns the string representation +func (s StopAction) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StopAction) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *StopAction) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StopAction"} + if s.Scope == nil { + invalidParams.Add(request.NewErrParamRequired("Scope")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents a request to update a receipt rule. You use receipt rules to receive +// email with Amazon SES. For more information, see the Amazon SES Developer +// Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-concepts.html). +type UpdateReceiptRuleInput struct { + _ struct{} `type:"structure"` + + // A data structure that contains the updated receipt rule information. + Rule *ReceiptRule `type:"structure" required:"true"` + + // The name of the receipt rule set to which the receipt rule belongs. + RuleSetName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateReceiptRuleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateReceiptRuleInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateReceiptRuleInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateReceiptRuleInput"} + if s.Rule == nil { + invalidParams.Add(request.NewErrParamRequired("Rule")) + } + if s.RuleSetName == nil { + invalidParams.Add(request.NewErrParamRequired("RuleSetName")) + } + if s.Rule != nil { + if err := s.Rule.Validate(); err != nil { + invalidParams.AddNested("Rule", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// An empty element returned on a successful request. +type UpdateReceiptRuleOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UpdateReceiptRuleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateReceiptRuleOutput) GoString() string { + return s.String() +} + +// Represents a request to generate the CNAME records needed to set up Easy +// DKIM with Amazon SES. For more information about setting up Easy DKIM, see +// the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/easy-dkim.html). +type VerifyDomainDkimInput struct { + _ struct{} `type:"structure"` + + // The name of the domain to be verified for Easy DKIM signing. + Domain *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s VerifyDomainDkimInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VerifyDomainDkimInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *VerifyDomainDkimInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "VerifyDomainDkimInput"} + if s.Domain == nil { + invalidParams.Add(request.NewErrParamRequired("Domain")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Returns CNAME records that you must publish to the DNS server of your domain +// to set up Easy DKIM with Amazon SES. +type VerifyDomainDkimOutput struct { + _ struct{} `type:"structure"` + + // A set of character strings that represent the domain's identity. If the identity + // is an email address, the tokens represent the domain of that address. + // + // Using these tokens, you will need to create DNS CNAME records that point + // to DKIM public keys hosted by Amazon SES. Amazon Web Services will eventually + // detect that you have updated your DNS records; this detection process may + // take up to 72 hours. Upon successful detection, Amazon SES will be able to + // DKIM-sign emails originating from that domain. + // + // For more information about creating DNS records using DKIM tokens, go to + // the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/easy-dkim-dns-records.html). + DkimTokens []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s VerifyDomainDkimOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VerifyDomainDkimOutput) GoString() string { + return s.String() +} + +// Represents a request to begin Amazon SES domain verification and to generate +// the TXT records that you must publish to the DNS server of your domain to +// complete the verification. For information about domain verification, see +// the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/verify-domains.html). +type VerifyDomainIdentityInput struct { + _ struct{} `type:"structure"` + + // The domain to be verified. + Domain *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s VerifyDomainIdentityInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VerifyDomainIdentityInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *VerifyDomainIdentityInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "VerifyDomainIdentityInput"} + if s.Domain == nil { + invalidParams.Add(request.NewErrParamRequired("Domain")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Returns a TXT record that you must publish to the DNS server of your domain +// to complete domain verification with Amazon SES. +type VerifyDomainIdentityOutput struct { + _ struct{} `type:"structure"` + + // A TXT record that must be placed in the DNS settings for the domain, in order + // to complete domain verification. + VerificationToken *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s VerifyDomainIdentityOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VerifyDomainIdentityOutput) GoString() string { + return s.String() +} + +// Represents a request to begin email address verification with Amazon SES. +// For information about email address verification, see the Amazon SES Developer +// Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/verify-email-addresses.html). +type VerifyEmailAddressInput struct { + _ struct{} `type:"structure"` + + // The email address to be verified. + EmailAddress *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s VerifyEmailAddressInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VerifyEmailAddressInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *VerifyEmailAddressInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "VerifyEmailAddressInput"} + if s.EmailAddress == nil { + invalidParams.Add(request.NewErrParamRequired("EmailAddress")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type VerifyEmailAddressOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s VerifyEmailAddressOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VerifyEmailAddressOutput) GoString() string { + return s.String() +} + +// Represents a request to begin email address verification with Amazon SES. +// For information about email address verification, see the Amazon SES Developer +// Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/verify-email-addresses.html). +type VerifyEmailIdentityInput struct { + _ struct{} `type:"structure"` + + // The email address to be verified. + EmailAddress *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s VerifyEmailIdentityInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VerifyEmailIdentityInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *VerifyEmailIdentityInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "VerifyEmailIdentityInput"} + if s.EmailAddress == nil { + invalidParams.Add(request.NewErrParamRequired("EmailAddress")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// An empty element returned on a successful request. +type VerifyEmailIdentityOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s VerifyEmailIdentityOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VerifyEmailIdentityOutput) GoString() string { + return s.String() +} + +// When included in a receipt rule, this action calls Amazon WorkMail and, optionally, +// publishes a notification to Amazon Simple Notification Service (Amazon SNS). +// You will typically not use this action directly because Amazon WorkMail adds +// the rule automatically during its setup procedure. +// +// For information using a receipt rule to call Amazon WorkMail, see the Amazon +// SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-action-workmail.html). +type WorkmailAction struct { + _ struct{} `type:"structure"` + + // The ARN of the Amazon WorkMail organization. An example of an Amazon WorkMail + // organization ARN is arn:aws:workmail:us-west-2:123456789012:organization/m-68755160c4cb4e29a2b2f8fb58f359d7. + // For information about Amazon WorkMail organizations, see the Amazon WorkMail + // Administrator Guide (http://docs.aws.amazon.com/workmail/latest/adminguide/organizations_overview.html). + OrganizationArn *string `type:"string" required:"true"` + + // The Amazon Resource Name (ARN) of the Amazon SNS topic to notify when the + // WorkMail action is called. An example of an Amazon SNS topic ARN is arn:aws:sns:us-west-2:123456789012:MyTopic. + // For more information about Amazon SNS topics, see the Amazon SNS Developer + // Guide (http://docs.aws.amazon.com/sns/latest/dg/CreateTopic.html). + TopicArn *string `type:"string"` +} + +// String returns the string representation +func (s WorkmailAction) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s WorkmailAction) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *WorkmailAction) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "WorkmailAction"} + if s.OrganizationArn == nil { + invalidParams.Add(request.NewErrParamRequired("OrganizationArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +const ( + // @enum BehaviorOnMXFailure + BehaviorOnMXFailureUseDefaultValue = "UseDefaultValue" + // @enum BehaviorOnMXFailure + BehaviorOnMXFailureRejectMessage = "RejectMessage" +) + +const ( + // @enum BounceType + BounceTypeDoesNotExist = "DoesNotExist" + // @enum BounceType + BounceTypeMessageTooLarge = "MessageTooLarge" + // @enum BounceType + BounceTypeExceededQuota = "ExceededQuota" + // @enum BounceType + BounceTypeContentRejected = "ContentRejected" + // @enum BounceType + BounceTypeUndefined = "Undefined" + // @enum BounceType + BounceTypeTemporaryFailure = "TemporaryFailure" +) + +const ( + // @enum CustomMailFromStatus + CustomMailFromStatusPending = "Pending" + // @enum CustomMailFromStatus + CustomMailFromStatusSuccess = "Success" + // @enum CustomMailFromStatus + CustomMailFromStatusFailed = "Failed" + // @enum CustomMailFromStatus + CustomMailFromStatusTemporaryFailure = "TemporaryFailure" +) + +const ( + // @enum DsnAction + DsnActionFailed = "failed" + // @enum DsnAction + DsnActionDelayed = "delayed" + // @enum DsnAction + DsnActionDelivered = "delivered" + // @enum DsnAction + DsnActionRelayed = "relayed" + // @enum DsnAction + DsnActionExpanded = "expanded" +) + +const ( + // @enum IdentityType + IdentityTypeEmailAddress = "EmailAddress" + // @enum IdentityType + IdentityTypeDomain = "Domain" +) + +const ( + // @enum InvocationType + InvocationTypeEvent = "Event" + // @enum InvocationType + InvocationTypeRequestResponse = "RequestResponse" +) + +const ( + // @enum NotificationType + NotificationTypeBounce = "Bounce" + // @enum NotificationType + NotificationTypeComplaint = "Complaint" + // @enum NotificationType + NotificationTypeDelivery = "Delivery" +) + +const ( + // @enum ReceiptFilterPolicy + ReceiptFilterPolicyBlock = "Block" + // @enum ReceiptFilterPolicy + ReceiptFilterPolicyAllow = "Allow" +) + +const ( + // @enum SNSActionEncoding + SNSActionEncodingUtf8 = "UTF-8" + // @enum SNSActionEncoding + SNSActionEncodingBase64 = "Base64" +) + +const ( + // @enum StopScope + StopScopeRuleSet = "RuleSet" +) + +const ( + // @enum TlsPolicy + TlsPolicyRequire = "Require" + // @enum TlsPolicy + TlsPolicyOptional = "Optional" +) + +const ( + // @enum VerificationStatus + VerificationStatusPending = "Pending" + // @enum VerificationStatus + VerificationStatusSuccess = "Success" + // @enum VerificationStatus + VerificationStatusFailed = "Failed" + // @enum VerificationStatus + VerificationStatusTemporaryFailure = "TemporaryFailure" + // @enum VerificationStatus + VerificationStatusNotStarted = "NotStarted" +) diff --git a/vendor/github.com/aws/aws-sdk-go/service/ses/examples_test.go b/vendor/github.com/aws/aws-sdk-go/service/ses/examples_test.go new file mode 100644 index 000000000..3b3cd16e8 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/ses/examples_test.go @@ -0,0 +1,1031 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package ses_test + +import ( + "bytes" + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/ses" +) + +var _ time.Duration +var _ bytes.Buffer + +func ExampleSES_CloneReceiptRuleSet() { + svc := ses.New(session.New()) + + params := &ses.CloneReceiptRuleSetInput{ + OriginalRuleSetName: aws.String("ReceiptRuleSetName"), // Required + RuleSetName: aws.String("ReceiptRuleSetName"), // Required + } + resp, err := svc.CloneReceiptRuleSet(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSES_CreateReceiptFilter() { + svc := ses.New(session.New()) + + params := &ses.CreateReceiptFilterInput{ + Filter: &ses.ReceiptFilter{ // Required + IpFilter: &ses.ReceiptIpFilter{ // Required + Cidr: aws.String("Cidr"), // Required + Policy: aws.String("ReceiptFilterPolicy"), // Required + }, + Name: aws.String("ReceiptFilterName"), // Required + }, + } + resp, err := svc.CreateReceiptFilter(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSES_CreateReceiptRule() { + svc := ses.New(session.New()) + + params := &ses.CreateReceiptRuleInput{ + Rule: &ses.ReceiptRule{ // Required + Name: aws.String("ReceiptRuleName"), // Required + Actions: []*ses.ReceiptAction{ + { // Required + AddHeaderAction: &ses.AddHeaderAction{ + HeaderName: aws.String("HeaderName"), // Required + HeaderValue: aws.String("HeaderValue"), // Required + }, + BounceAction: &ses.BounceAction{ + Message: aws.String("BounceMessage"), // Required + Sender: aws.String("Address"), // Required + SmtpReplyCode: aws.String("BounceSmtpReplyCode"), // Required + StatusCode: aws.String("BounceStatusCode"), + TopicArn: aws.String("AmazonResourceName"), + }, + LambdaAction: &ses.LambdaAction{ + FunctionArn: aws.String("AmazonResourceName"), // Required + InvocationType: aws.String("InvocationType"), + TopicArn: aws.String("AmazonResourceName"), + }, + S3Action: &ses.S3Action{ + BucketName: aws.String("S3BucketName"), // Required + KmsKeyArn: aws.String("AmazonResourceName"), + ObjectKeyPrefix: aws.String("S3KeyPrefix"), + TopicArn: aws.String("AmazonResourceName"), + }, + SNSAction: &ses.SNSAction{ + TopicArn: aws.String("AmazonResourceName"), // Required + Encoding: aws.String("SNSActionEncoding"), + }, + StopAction: &ses.StopAction{ + Scope: aws.String("StopScope"), // Required + TopicArn: aws.String("AmazonResourceName"), + }, + WorkmailAction: &ses.WorkmailAction{ + OrganizationArn: aws.String("AmazonResourceName"), // Required + TopicArn: aws.String("AmazonResourceName"), + }, + }, + // More values... + }, + Enabled: aws.Bool(true), + Recipients: []*string{ + aws.String("Recipient"), // Required + // More values... + }, + ScanEnabled: aws.Bool(true), + TlsPolicy: aws.String("TlsPolicy"), + }, + RuleSetName: aws.String("ReceiptRuleSetName"), // Required + After: aws.String("ReceiptRuleName"), + } + resp, err := svc.CreateReceiptRule(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSES_CreateReceiptRuleSet() { + svc := ses.New(session.New()) + + params := &ses.CreateReceiptRuleSetInput{ + RuleSetName: aws.String("ReceiptRuleSetName"), // Required + } + resp, err := svc.CreateReceiptRuleSet(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSES_DeleteIdentity() { + svc := ses.New(session.New()) + + params := &ses.DeleteIdentityInput{ + Identity: aws.String("Identity"), // Required + } + resp, err := svc.DeleteIdentity(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSES_DeleteIdentityPolicy() { + svc := ses.New(session.New()) + + params := &ses.DeleteIdentityPolicyInput{ + Identity: aws.String("Identity"), // Required + PolicyName: aws.String("PolicyName"), // Required + } + resp, err := svc.DeleteIdentityPolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSES_DeleteReceiptFilter() { + svc := ses.New(session.New()) + + params := &ses.DeleteReceiptFilterInput{ + FilterName: aws.String("ReceiptFilterName"), // Required + } + resp, err := svc.DeleteReceiptFilter(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSES_DeleteReceiptRule() { + svc := ses.New(session.New()) + + params := &ses.DeleteReceiptRuleInput{ + RuleName: aws.String("ReceiptRuleName"), // Required + RuleSetName: aws.String("ReceiptRuleSetName"), // Required + } + resp, err := svc.DeleteReceiptRule(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSES_DeleteReceiptRuleSet() { + svc := ses.New(session.New()) + + params := &ses.DeleteReceiptRuleSetInput{ + RuleSetName: aws.String("ReceiptRuleSetName"), // Required + } + resp, err := svc.DeleteReceiptRuleSet(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSES_DeleteVerifiedEmailAddress() { + svc := ses.New(session.New()) + + params := &ses.DeleteVerifiedEmailAddressInput{ + EmailAddress: aws.String("Address"), // Required + } + resp, err := svc.DeleteVerifiedEmailAddress(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSES_DescribeActiveReceiptRuleSet() { + svc := ses.New(session.New()) + + var params *ses.DescribeActiveReceiptRuleSetInput + resp, err := svc.DescribeActiveReceiptRuleSet(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSES_DescribeReceiptRule() { + svc := ses.New(session.New()) + + params := &ses.DescribeReceiptRuleInput{ + RuleName: aws.String("ReceiptRuleName"), // Required + RuleSetName: aws.String("ReceiptRuleSetName"), // Required + } + resp, err := svc.DescribeReceiptRule(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSES_DescribeReceiptRuleSet() { + svc := ses.New(session.New()) + + params := &ses.DescribeReceiptRuleSetInput{ + RuleSetName: aws.String("ReceiptRuleSetName"), // Required + } + resp, err := svc.DescribeReceiptRuleSet(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSES_GetIdentityDkimAttributes() { + svc := ses.New(session.New()) + + params := &ses.GetIdentityDkimAttributesInput{ + Identities: []*string{ // Required + aws.String("Identity"), // Required + // More values... + }, + } + resp, err := svc.GetIdentityDkimAttributes(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSES_GetIdentityMailFromDomainAttributes() { + svc := ses.New(session.New()) + + params := &ses.GetIdentityMailFromDomainAttributesInput{ + Identities: []*string{ // Required + aws.String("Identity"), // Required + // More values... + }, + } + resp, err := svc.GetIdentityMailFromDomainAttributes(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSES_GetIdentityNotificationAttributes() { + svc := ses.New(session.New()) + + params := &ses.GetIdentityNotificationAttributesInput{ + Identities: []*string{ // Required + aws.String("Identity"), // Required + // More values... + }, + } + resp, err := svc.GetIdentityNotificationAttributes(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSES_GetIdentityPolicies() { + svc := ses.New(session.New()) + + params := &ses.GetIdentityPoliciesInput{ + Identity: aws.String("Identity"), // Required + PolicyNames: []*string{ // Required + aws.String("PolicyName"), // Required + // More values... + }, + } + resp, err := svc.GetIdentityPolicies(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSES_GetIdentityVerificationAttributes() { + svc := ses.New(session.New()) + + params := &ses.GetIdentityVerificationAttributesInput{ + Identities: []*string{ // Required + aws.String("Identity"), // Required + // More values... + }, + } + resp, err := svc.GetIdentityVerificationAttributes(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSES_GetSendQuota() { + svc := ses.New(session.New()) + + var params *ses.GetSendQuotaInput + resp, err := svc.GetSendQuota(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSES_GetSendStatistics() { + svc := ses.New(session.New()) + + var params *ses.GetSendStatisticsInput + resp, err := svc.GetSendStatistics(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSES_ListIdentities() { + svc := ses.New(session.New()) + + params := &ses.ListIdentitiesInput{ + IdentityType: aws.String("IdentityType"), + MaxItems: aws.Int64(1), + NextToken: aws.String("NextToken"), + } + resp, err := svc.ListIdentities(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSES_ListIdentityPolicies() { + svc := ses.New(session.New()) + + params := &ses.ListIdentityPoliciesInput{ + Identity: aws.String("Identity"), // Required + } + resp, err := svc.ListIdentityPolicies(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSES_ListReceiptFilters() { + svc := ses.New(session.New()) + + var params *ses.ListReceiptFiltersInput + resp, err := svc.ListReceiptFilters(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSES_ListReceiptRuleSets() { + svc := ses.New(session.New()) + + params := &ses.ListReceiptRuleSetsInput{ + NextToken: aws.String("NextToken"), + } + resp, err := svc.ListReceiptRuleSets(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSES_ListVerifiedEmailAddresses() { + svc := ses.New(session.New()) + + var params *ses.ListVerifiedEmailAddressesInput + resp, err := svc.ListVerifiedEmailAddresses(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSES_PutIdentityPolicy() { + svc := ses.New(session.New()) + + params := &ses.PutIdentityPolicyInput{ + Identity: aws.String("Identity"), // Required + Policy: aws.String("Policy"), // Required + PolicyName: aws.String("PolicyName"), // Required + } + resp, err := svc.PutIdentityPolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSES_ReorderReceiptRuleSet() { + svc := ses.New(session.New()) + + params := &ses.ReorderReceiptRuleSetInput{ + RuleNames: []*string{ // Required + aws.String("ReceiptRuleName"), // Required + // More values... + }, + RuleSetName: aws.String("ReceiptRuleSetName"), // Required + } + resp, err := svc.ReorderReceiptRuleSet(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSES_SendBounce() { + svc := ses.New(session.New()) + + params := &ses.SendBounceInput{ + BounceSender: aws.String("Address"), // Required + BouncedRecipientInfoList: []*ses.BouncedRecipientInfo{ // Required + { // Required + Recipient: aws.String("Address"), // Required + BounceType: aws.String("BounceType"), + RecipientArn: aws.String("AmazonResourceName"), + RecipientDsnFields: &ses.RecipientDsnFields{ + Action: aws.String("DsnAction"), // Required + Status: aws.String("DsnStatus"), // Required + DiagnosticCode: aws.String("DiagnosticCode"), + ExtensionFields: []*ses.ExtensionField{ + { // Required + Name: aws.String("ExtensionFieldName"), // Required + Value: aws.String("ExtensionFieldValue"), // Required + }, + // More values... + }, + FinalRecipient: aws.String("Address"), + LastAttemptDate: aws.Time(time.Now()), + RemoteMta: aws.String("RemoteMta"), + }, + }, + // More values... + }, + OriginalMessageId: aws.String("MessageId"), // Required + BounceSenderArn: aws.String("AmazonResourceName"), + Explanation: aws.String("Explanation"), + MessageDsn: &ses.MessageDsn{ + ReportingMta: aws.String("ReportingMta"), // Required + ArrivalDate: aws.Time(time.Now()), + ExtensionFields: []*ses.ExtensionField{ + { // Required + Name: aws.String("ExtensionFieldName"), // Required + Value: aws.String("ExtensionFieldValue"), // Required + }, + // More values... + }, + }, + } + resp, err := svc.SendBounce(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSES_SendEmail() { + svc := ses.New(session.New()) + + params := &ses.SendEmailInput{ + Destination: &ses.Destination{ // Required + BccAddresses: []*string{ + aws.String("Address"), // Required + // More values... + }, + CcAddresses: []*string{ + aws.String("Address"), // Required + // More values... + }, + ToAddresses: []*string{ + aws.String("Address"), // Required + // More values... + }, + }, + Message: &ses.Message{ // Required + Body: &ses.Body{ // Required + Html: &ses.Content{ + Data: aws.String("MessageData"), // Required + Charset: aws.String("Charset"), + }, + Text: &ses.Content{ + Data: aws.String("MessageData"), // Required + Charset: aws.String("Charset"), + }, + }, + Subject: &ses.Content{ // Required + Data: aws.String("MessageData"), // Required + Charset: aws.String("Charset"), + }, + }, + Source: aws.String("Address"), // Required + ReplyToAddresses: []*string{ + aws.String("Address"), // Required + // More values... + }, + ReturnPath: aws.String("Address"), + ReturnPathArn: aws.String("AmazonResourceName"), + SourceArn: aws.String("AmazonResourceName"), + } + resp, err := svc.SendEmail(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSES_SendRawEmail() { + svc := ses.New(session.New()) + + params := &ses.SendRawEmailInput{ + RawMessage: &ses.RawMessage{ // Required + Data: []byte("PAYLOAD"), // Required + }, + Destinations: []*string{ + aws.String("Address"), // Required + // More values... + }, + FromArn: aws.String("AmazonResourceName"), + ReturnPathArn: aws.String("AmazonResourceName"), + Source: aws.String("Address"), + SourceArn: aws.String("AmazonResourceName"), + } + resp, err := svc.SendRawEmail(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSES_SetActiveReceiptRuleSet() { + svc := ses.New(session.New()) + + params := &ses.SetActiveReceiptRuleSetInput{ + RuleSetName: aws.String("ReceiptRuleSetName"), + } + resp, err := svc.SetActiveReceiptRuleSet(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSES_SetIdentityDkimEnabled() { + svc := ses.New(session.New()) + + params := &ses.SetIdentityDkimEnabledInput{ + DkimEnabled: aws.Bool(true), // Required + Identity: aws.String("Identity"), // Required + } + resp, err := svc.SetIdentityDkimEnabled(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSES_SetIdentityFeedbackForwardingEnabled() { + svc := ses.New(session.New()) + + params := &ses.SetIdentityFeedbackForwardingEnabledInput{ + ForwardingEnabled: aws.Bool(true), // Required + Identity: aws.String("Identity"), // Required + } + resp, err := svc.SetIdentityFeedbackForwardingEnabled(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSES_SetIdentityHeadersInNotificationsEnabled() { + svc := ses.New(session.New()) + + params := &ses.SetIdentityHeadersInNotificationsEnabledInput{ + Enabled: aws.Bool(true), // Required + Identity: aws.String("Identity"), // Required + NotificationType: aws.String("NotificationType"), // Required + } + resp, err := svc.SetIdentityHeadersInNotificationsEnabled(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSES_SetIdentityMailFromDomain() { + svc := ses.New(session.New()) + + params := &ses.SetIdentityMailFromDomainInput{ + Identity: aws.String("Identity"), // Required + BehaviorOnMXFailure: aws.String("BehaviorOnMXFailure"), + MailFromDomain: aws.String("MailFromDomainName"), + } + resp, err := svc.SetIdentityMailFromDomain(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSES_SetIdentityNotificationTopic() { + svc := ses.New(session.New()) + + params := &ses.SetIdentityNotificationTopicInput{ + Identity: aws.String("Identity"), // Required + NotificationType: aws.String("NotificationType"), // Required + SnsTopic: aws.String("NotificationTopic"), + } + resp, err := svc.SetIdentityNotificationTopic(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSES_SetReceiptRulePosition() { + svc := ses.New(session.New()) + + params := &ses.SetReceiptRulePositionInput{ + RuleName: aws.String("ReceiptRuleName"), // Required + RuleSetName: aws.String("ReceiptRuleSetName"), // Required + After: aws.String("ReceiptRuleName"), + } + resp, err := svc.SetReceiptRulePosition(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSES_UpdateReceiptRule() { + svc := ses.New(session.New()) + + params := &ses.UpdateReceiptRuleInput{ + Rule: &ses.ReceiptRule{ // Required + Name: aws.String("ReceiptRuleName"), // Required + Actions: []*ses.ReceiptAction{ + { // Required + AddHeaderAction: &ses.AddHeaderAction{ + HeaderName: aws.String("HeaderName"), // Required + HeaderValue: aws.String("HeaderValue"), // Required + }, + BounceAction: &ses.BounceAction{ + Message: aws.String("BounceMessage"), // Required + Sender: aws.String("Address"), // Required + SmtpReplyCode: aws.String("BounceSmtpReplyCode"), // Required + StatusCode: aws.String("BounceStatusCode"), + TopicArn: aws.String("AmazonResourceName"), + }, + LambdaAction: &ses.LambdaAction{ + FunctionArn: aws.String("AmazonResourceName"), // Required + InvocationType: aws.String("InvocationType"), + TopicArn: aws.String("AmazonResourceName"), + }, + S3Action: &ses.S3Action{ + BucketName: aws.String("S3BucketName"), // Required + KmsKeyArn: aws.String("AmazonResourceName"), + ObjectKeyPrefix: aws.String("S3KeyPrefix"), + TopicArn: aws.String("AmazonResourceName"), + }, + SNSAction: &ses.SNSAction{ + TopicArn: aws.String("AmazonResourceName"), // Required + Encoding: aws.String("SNSActionEncoding"), + }, + StopAction: &ses.StopAction{ + Scope: aws.String("StopScope"), // Required + TopicArn: aws.String("AmazonResourceName"), + }, + WorkmailAction: &ses.WorkmailAction{ + OrganizationArn: aws.String("AmazonResourceName"), // Required + TopicArn: aws.String("AmazonResourceName"), + }, + }, + // More values... + }, + Enabled: aws.Bool(true), + Recipients: []*string{ + aws.String("Recipient"), // Required + // More values... + }, + ScanEnabled: aws.Bool(true), + TlsPolicy: aws.String("TlsPolicy"), + }, + RuleSetName: aws.String("ReceiptRuleSetName"), // Required + } + resp, err := svc.UpdateReceiptRule(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSES_VerifyDomainDkim() { + svc := ses.New(session.New()) + + params := &ses.VerifyDomainDkimInput{ + Domain: aws.String("Domain"), // Required + } + resp, err := svc.VerifyDomainDkim(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSES_VerifyDomainIdentity() { + svc := ses.New(session.New()) + + params := &ses.VerifyDomainIdentityInput{ + Domain: aws.String("Domain"), // Required + } + resp, err := svc.VerifyDomainIdentity(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSES_VerifyEmailAddress() { + svc := ses.New(session.New()) + + params := &ses.VerifyEmailAddressInput{ + EmailAddress: aws.String("Address"), // Required + } + resp, err := svc.VerifyEmailAddress(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSES_VerifyEmailIdentity() { + svc := ses.New(session.New()) + + params := &ses.VerifyEmailIdentityInput{ + EmailAddress: aws.String("Address"), // Required + } + resp, err := svc.VerifyEmailIdentity(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/ses/service.go b/vendor/github.com/aws/aws-sdk-go/service/ses/service.go new file mode 100644 index 000000000..2952ac131 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/ses/service.go @@ -0,0 +1,93 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package ses + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/private/protocol/query" +) + +// This is the API Reference for Amazon Simple Email Service (Amazon SES). This +// documentation is intended to be used in conjunction with the Amazon SES Developer +// Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/Welcome.html). +// +// For a list of Amazon SES endpoints to use in service requests, see Regions +// and Amazon SES (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/regions.html) +// in the Amazon SES Developer Guide. +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type SES struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "email" + +// New creates a new instance of the SES client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a SES client from just a session. +// svc := ses.New(mySession) +// +// // Create a SES client with additional configuration +// svc := ses.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *SES { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *SES { + svc := &SES{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningName: "ses", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2010-12-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(query.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(query.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(query.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(query.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a SES operation and runs any +// custom request initialization. +func (c *SES) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/ses/sesiface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/ses/sesiface/interface.go new file mode 100644 index 000000000..ba1b84c8d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/ses/sesiface/interface.go @@ -0,0 +1,184 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package sesiface provides an interface for the Amazon Simple Email Service. +package sesiface + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/ses" +) + +// SESAPI is the interface type for ses.SES. +type SESAPI interface { + CloneReceiptRuleSetRequest(*ses.CloneReceiptRuleSetInput) (*request.Request, *ses.CloneReceiptRuleSetOutput) + + CloneReceiptRuleSet(*ses.CloneReceiptRuleSetInput) (*ses.CloneReceiptRuleSetOutput, error) + + CreateReceiptFilterRequest(*ses.CreateReceiptFilterInput) (*request.Request, *ses.CreateReceiptFilterOutput) + + CreateReceiptFilter(*ses.CreateReceiptFilterInput) (*ses.CreateReceiptFilterOutput, error) + + CreateReceiptRuleRequest(*ses.CreateReceiptRuleInput) (*request.Request, *ses.CreateReceiptRuleOutput) + + CreateReceiptRule(*ses.CreateReceiptRuleInput) (*ses.CreateReceiptRuleOutput, error) + + CreateReceiptRuleSetRequest(*ses.CreateReceiptRuleSetInput) (*request.Request, *ses.CreateReceiptRuleSetOutput) + + CreateReceiptRuleSet(*ses.CreateReceiptRuleSetInput) (*ses.CreateReceiptRuleSetOutput, error) + + DeleteIdentityRequest(*ses.DeleteIdentityInput) (*request.Request, *ses.DeleteIdentityOutput) + + DeleteIdentity(*ses.DeleteIdentityInput) (*ses.DeleteIdentityOutput, error) + + DeleteIdentityPolicyRequest(*ses.DeleteIdentityPolicyInput) (*request.Request, *ses.DeleteIdentityPolicyOutput) + + DeleteIdentityPolicy(*ses.DeleteIdentityPolicyInput) (*ses.DeleteIdentityPolicyOutput, error) + + DeleteReceiptFilterRequest(*ses.DeleteReceiptFilterInput) (*request.Request, *ses.DeleteReceiptFilterOutput) + + DeleteReceiptFilter(*ses.DeleteReceiptFilterInput) (*ses.DeleteReceiptFilterOutput, error) + + DeleteReceiptRuleRequest(*ses.DeleteReceiptRuleInput) (*request.Request, *ses.DeleteReceiptRuleOutput) + + DeleteReceiptRule(*ses.DeleteReceiptRuleInput) (*ses.DeleteReceiptRuleOutput, error) + + DeleteReceiptRuleSetRequest(*ses.DeleteReceiptRuleSetInput) (*request.Request, *ses.DeleteReceiptRuleSetOutput) + + DeleteReceiptRuleSet(*ses.DeleteReceiptRuleSetInput) (*ses.DeleteReceiptRuleSetOutput, error) + + DeleteVerifiedEmailAddressRequest(*ses.DeleteVerifiedEmailAddressInput) (*request.Request, *ses.DeleteVerifiedEmailAddressOutput) + + DeleteVerifiedEmailAddress(*ses.DeleteVerifiedEmailAddressInput) (*ses.DeleteVerifiedEmailAddressOutput, error) + + DescribeActiveReceiptRuleSetRequest(*ses.DescribeActiveReceiptRuleSetInput) (*request.Request, *ses.DescribeActiveReceiptRuleSetOutput) + + DescribeActiveReceiptRuleSet(*ses.DescribeActiveReceiptRuleSetInput) (*ses.DescribeActiveReceiptRuleSetOutput, error) + + DescribeReceiptRuleRequest(*ses.DescribeReceiptRuleInput) (*request.Request, *ses.DescribeReceiptRuleOutput) + + DescribeReceiptRule(*ses.DescribeReceiptRuleInput) (*ses.DescribeReceiptRuleOutput, error) + + DescribeReceiptRuleSetRequest(*ses.DescribeReceiptRuleSetInput) (*request.Request, *ses.DescribeReceiptRuleSetOutput) + + DescribeReceiptRuleSet(*ses.DescribeReceiptRuleSetInput) (*ses.DescribeReceiptRuleSetOutput, error) + + GetIdentityDkimAttributesRequest(*ses.GetIdentityDkimAttributesInput) (*request.Request, *ses.GetIdentityDkimAttributesOutput) + + GetIdentityDkimAttributes(*ses.GetIdentityDkimAttributesInput) (*ses.GetIdentityDkimAttributesOutput, error) + + GetIdentityMailFromDomainAttributesRequest(*ses.GetIdentityMailFromDomainAttributesInput) (*request.Request, *ses.GetIdentityMailFromDomainAttributesOutput) + + GetIdentityMailFromDomainAttributes(*ses.GetIdentityMailFromDomainAttributesInput) (*ses.GetIdentityMailFromDomainAttributesOutput, error) + + GetIdentityNotificationAttributesRequest(*ses.GetIdentityNotificationAttributesInput) (*request.Request, *ses.GetIdentityNotificationAttributesOutput) + + GetIdentityNotificationAttributes(*ses.GetIdentityNotificationAttributesInput) (*ses.GetIdentityNotificationAttributesOutput, error) + + GetIdentityPoliciesRequest(*ses.GetIdentityPoliciesInput) (*request.Request, *ses.GetIdentityPoliciesOutput) + + GetIdentityPolicies(*ses.GetIdentityPoliciesInput) (*ses.GetIdentityPoliciesOutput, error) + + GetIdentityVerificationAttributesRequest(*ses.GetIdentityVerificationAttributesInput) (*request.Request, *ses.GetIdentityVerificationAttributesOutput) + + GetIdentityVerificationAttributes(*ses.GetIdentityVerificationAttributesInput) (*ses.GetIdentityVerificationAttributesOutput, error) + + GetSendQuotaRequest(*ses.GetSendQuotaInput) (*request.Request, *ses.GetSendQuotaOutput) + + GetSendQuota(*ses.GetSendQuotaInput) (*ses.GetSendQuotaOutput, error) + + GetSendStatisticsRequest(*ses.GetSendStatisticsInput) (*request.Request, *ses.GetSendStatisticsOutput) + + GetSendStatistics(*ses.GetSendStatisticsInput) (*ses.GetSendStatisticsOutput, error) + + ListIdentitiesRequest(*ses.ListIdentitiesInput) (*request.Request, *ses.ListIdentitiesOutput) + + ListIdentities(*ses.ListIdentitiesInput) (*ses.ListIdentitiesOutput, error) + + ListIdentitiesPages(*ses.ListIdentitiesInput, func(*ses.ListIdentitiesOutput, bool) bool) error + + ListIdentityPoliciesRequest(*ses.ListIdentityPoliciesInput) (*request.Request, *ses.ListIdentityPoliciesOutput) + + ListIdentityPolicies(*ses.ListIdentityPoliciesInput) (*ses.ListIdentityPoliciesOutput, error) + + ListReceiptFiltersRequest(*ses.ListReceiptFiltersInput) (*request.Request, *ses.ListReceiptFiltersOutput) + + ListReceiptFilters(*ses.ListReceiptFiltersInput) (*ses.ListReceiptFiltersOutput, error) + + ListReceiptRuleSetsRequest(*ses.ListReceiptRuleSetsInput) (*request.Request, *ses.ListReceiptRuleSetsOutput) + + ListReceiptRuleSets(*ses.ListReceiptRuleSetsInput) (*ses.ListReceiptRuleSetsOutput, error) + + ListVerifiedEmailAddressesRequest(*ses.ListVerifiedEmailAddressesInput) (*request.Request, *ses.ListVerifiedEmailAddressesOutput) + + ListVerifiedEmailAddresses(*ses.ListVerifiedEmailAddressesInput) (*ses.ListVerifiedEmailAddressesOutput, error) + + PutIdentityPolicyRequest(*ses.PutIdentityPolicyInput) (*request.Request, *ses.PutIdentityPolicyOutput) + + PutIdentityPolicy(*ses.PutIdentityPolicyInput) (*ses.PutIdentityPolicyOutput, error) + + ReorderReceiptRuleSetRequest(*ses.ReorderReceiptRuleSetInput) (*request.Request, *ses.ReorderReceiptRuleSetOutput) + + ReorderReceiptRuleSet(*ses.ReorderReceiptRuleSetInput) (*ses.ReorderReceiptRuleSetOutput, error) + + SendBounceRequest(*ses.SendBounceInput) (*request.Request, *ses.SendBounceOutput) + + SendBounce(*ses.SendBounceInput) (*ses.SendBounceOutput, error) + + SendEmailRequest(*ses.SendEmailInput) (*request.Request, *ses.SendEmailOutput) + + SendEmail(*ses.SendEmailInput) (*ses.SendEmailOutput, error) + + SendRawEmailRequest(*ses.SendRawEmailInput) (*request.Request, *ses.SendRawEmailOutput) + + SendRawEmail(*ses.SendRawEmailInput) (*ses.SendRawEmailOutput, error) + + SetActiveReceiptRuleSetRequest(*ses.SetActiveReceiptRuleSetInput) (*request.Request, *ses.SetActiveReceiptRuleSetOutput) + + SetActiveReceiptRuleSet(*ses.SetActiveReceiptRuleSetInput) (*ses.SetActiveReceiptRuleSetOutput, error) + + SetIdentityDkimEnabledRequest(*ses.SetIdentityDkimEnabledInput) (*request.Request, *ses.SetIdentityDkimEnabledOutput) + + SetIdentityDkimEnabled(*ses.SetIdentityDkimEnabledInput) (*ses.SetIdentityDkimEnabledOutput, error) + + SetIdentityFeedbackForwardingEnabledRequest(*ses.SetIdentityFeedbackForwardingEnabledInput) (*request.Request, *ses.SetIdentityFeedbackForwardingEnabledOutput) + + SetIdentityFeedbackForwardingEnabled(*ses.SetIdentityFeedbackForwardingEnabledInput) (*ses.SetIdentityFeedbackForwardingEnabledOutput, error) + + SetIdentityHeadersInNotificationsEnabledRequest(*ses.SetIdentityHeadersInNotificationsEnabledInput) (*request.Request, *ses.SetIdentityHeadersInNotificationsEnabledOutput) + + SetIdentityHeadersInNotificationsEnabled(*ses.SetIdentityHeadersInNotificationsEnabledInput) (*ses.SetIdentityHeadersInNotificationsEnabledOutput, error) + + SetIdentityMailFromDomainRequest(*ses.SetIdentityMailFromDomainInput) (*request.Request, *ses.SetIdentityMailFromDomainOutput) + + SetIdentityMailFromDomain(*ses.SetIdentityMailFromDomainInput) (*ses.SetIdentityMailFromDomainOutput, error) + + SetIdentityNotificationTopicRequest(*ses.SetIdentityNotificationTopicInput) (*request.Request, *ses.SetIdentityNotificationTopicOutput) + + SetIdentityNotificationTopic(*ses.SetIdentityNotificationTopicInput) (*ses.SetIdentityNotificationTopicOutput, error) + + SetReceiptRulePositionRequest(*ses.SetReceiptRulePositionInput) (*request.Request, *ses.SetReceiptRulePositionOutput) + + SetReceiptRulePosition(*ses.SetReceiptRulePositionInput) (*ses.SetReceiptRulePositionOutput, error) + + UpdateReceiptRuleRequest(*ses.UpdateReceiptRuleInput) (*request.Request, *ses.UpdateReceiptRuleOutput) + + UpdateReceiptRule(*ses.UpdateReceiptRuleInput) (*ses.UpdateReceiptRuleOutput, error) + + VerifyDomainDkimRequest(*ses.VerifyDomainDkimInput) (*request.Request, *ses.VerifyDomainDkimOutput) + + VerifyDomainDkim(*ses.VerifyDomainDkimInput) (*ses.VerifyDomainDkimOutput, error) + + VerifyDomainIdentityRequest(*ses.VerifyDomainIdentityInput) (*request.Request, *ses.VerifyDomainIdentityOutput) + + VerifyDomainIdentity(*ses.VerifyDomainIdentityInput) (*ses.VerifyDomainIdentityOutput, error) + + VerifyEmailAddressRequest(*ses.VerifyEmailAddressInput) (*request.Request, *ses.VerifyEmailAddressOutput) + + VerifyEmailAddress(*ses.VerifyEmailAddressInput) (*ses.VerifyEmailAddressOutput, error) + + VerifyEmailIdentityRequest(*ses.VerifyEmailIdentityInput) (*request.Request, *ses.VerifyEmailIdentityOutput) + + VerifyEmailIdentity(*ses.VerifyEmailIdentityInput) (*ses.VerifyEmailIdentityOutput, error) +} + +var _ SESAPI = (*ses.SES)(nil) diff --git a/vendor/github.com/aws/aws-sdk-go/service/ses/waiters.go b/vendor/github.com/aws/aws-sdk-go/service/ses/waiters.go new file mode 100644 index 000000000..8156c0fc0 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/ses/waiters.go @@ -0,0 +1,30 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package ses + +import ( + "github.com/aws/aws-sdk-go/private/waiter" +) + +func (c *SES) WaitUntilIdentityExists(input *GetIdentityVerificationAttributesInput) error { + waiterCfg := waiter.Config{ + Operation: "GetIdentityVerificationAttributes", + Delay: 3, + MaxAttempts: 20, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "pathAll", + Argument: "VerificationAttributes.*.VerificationStatus", + Expected: "Success", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/simpledb/api.go b/vendor/github.com/aws/aws-sdk-go/service/simpledb/api.go new file mode 100644 index 000000000..c4d0e5cf8 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/simpledb/api.go @@ -0,0 +1,1528 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package simpledb provides a client for Amazon SimpleDB. +package simpledb + +import ( + "fmt" + + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/query" +) + +const opBatchDeleteAttributes = "BatchDeleteAttributes" + +// BatchDeleteAttributesRequest generates a "aws/request.Request" representing the +// client's request for the BatchDeleteAttributes operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the BatchDeleteAttributes method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the BatchDeleteAttributesRequest method. +// req, resp := client.BatchDeleteAttributesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *SimpleDB) BatchDeleteAttributesRequest(input *BatchDeleteAttributesInput) (req *request.Request, output *BatchDeleteAttributesOutput) { + op := &request.Operation{ + Name: opBatchDeleteAttributes, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &BatchDeleteAttributesInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &BatchDeleteAttributesOutput{} + req.Data = output + return +} + +// Performs multiple DeleteAttributes operations in a single call, which reduces +// round trips and latencies. This enables Amazon SimpleDB to optimize requests, +// which generally yields better throughput. +// +// If you specify BatchDeleteAttributes without attributes or values, all +// the attributes for the item are deleted. +// +// BatchDeleteAttributes is an idempotent operation; running it multiple times +// on the same item or attribute doesn't result in an error. +// +// The BatchDeleteAttributes operation succeeds or fails in its entirety. +// There are no partial deletes. You can execute multiple BatchDeleteAttributes +// operations and other operations in parallel. However, large numbers of concurrent +// BatchDeleteAttributes calls can result in Service Unavailable (503) responses. +// +// This operation is vulnerable to exceeding the maximum URL size when making +// a REST request using the HTTP GET method. +// +// This operation does not support conditions using Expected.X.Name, Expected.X.Value, +// or Expected.X.Exists. +// +// The following limitations are enforced for this operation: 1 MB request +// size 25 item limit per BatchDeleteAttributes operation +func (c *SimpleDB) BatchDeleteAttributes(input *BatchDeleteAttributesInput) (*BatchDeleteAttributesOutput, error) { + req, out := c.BatchDeleteAttributesRequest(input) + err := req.Send() + return out, err +} + +const opBatchPutAttributes = "BatchPutAttributes" + +// BatchPutAttributesRequest generates a "aws/request.Request" representing the +// client's request for the BatchPutAttributes operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the BatchPutAttributes method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the BatchPutAttributesRequest method. +// req, resp := client.BatchPutAttributesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *SimpleDB) BatchPutAttributesRequest(input *BatchPutAttributesInput) (req *request.Request, output *BatchPutAttributesOutput) { + op := &request.Operation{ + Name: opBatchPutAttributes, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &BatchPutAttributesInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &BatchPutAttributesOutput{} + req.Data = output + return +} + +// The BatchPutAttributes operation creates or replaces attributes within one +// or more items. By using this operation, the client can perform multiple PutAttribute +// operation with a single call. This helps yield savings in round trips and +// latencies, enabling Amazon SimpleDB to optimize requests and generally produce +// better throughput. +// +// The client may specify the item name with the Item.X.ItemName parameter. +// The client may specify new attributes using a combination of the Item.X.Attribute.Y.Name +// and Item.X.Attribute.Y.Value parameters. The client may specify the first +// attribute for the first item using the parameters Item.0.Attribute.0.Name +// and Item.0.Attribute.0.Value, and for the second attribute for the first +// item by the parameters Item.0.Attribute.1.Name and Item.0.Attribute.1.Value, +// and so on. +// +// Attributes are uniquely identified within an item by their name/value combination. +// For example, a single item can have the attributes { "first_name", "first_value" +// } and { "first_name", "second_value" }. However, it cannot have two attribute +// instances where both the Item.X.Attribute.Y.Name and Item.X.Attribute.Y.Value +// are the same. +// +// Optionally, the requester can supply the Replace parameter for each individual +// value. Setting this value to true will cause the new attribute values to +// replace the existing attribute values. For example, if an item I has the +// attributes { 'a', '1' }, { 'b', '2'} and { 'b', '3' } and the requester does +// a BatchPutAttributes of {'I', 'b', '4' } with the Replace parameter set to +// true, the final attributes of the item will be { 'a', '1' } and { 'b', '4' +// }, replacing the previous values of the 'b' attribute with the new value. +// +// You cannot specify an empty string as an item or as an attribute name. +// The BatchPutAttributes operation succeeds or fails in its entirety. There +// are no partial puts. This operation is vulnerable to exceeding the maximum +// URL size when making a REST request using the HTTP GET method. This operation +// does not support conditions using Expected.X.Name, Expected.X.Value, or Expected.X.Exists. +// You can execute multiple BatchPutAttributes operations and other operations +// in parallel. However, large numbers of concurrent BatchPutAttributes calls +// can result in Service Unavailable (503) responses. +// +// The following limitations are enforced for this operation: 256 attribute +// name-value pairs per item 1 MB request size 1 billion attributes per domain +// 10 GB of total user data storage per domain 25 item limit per BatchPutAttributes +// operation +func (c *SimpleDB) BatchPutAttributes(input *BatchPutAttributesInput) (*BatchPutAttributesOutput, error) { + req, out := c.BatchPutAttributesRequest(input) + err := req.Send() + return out, err +} + +const opCreateDomain = "CreateDomain" + +// CreateDomainRequest generates a "aws/request.Request" representing the +// client's request for the CreateDomain operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateDomain method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateDomainRequest method. +// req, resp := client.CreateDomainRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *SimpleDB) CreateDomainRequest(input *CreateDomainInput) (req *request.Request, output *CreateDomainOutput) { + op := &request.Operation{ + Name: opCreateDomain, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateDomainInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &CreateDomainOutput{} + req.Data = output + return +} + +// The CreateDomain operation creates a new domain. The domain name should be +// unique among the domains associated with the Access Key ID provided in the +// request. The CreateDomain operation may take 10 or more seconds to complete. +// +// CreateDomain is an idempotent operation; running it multiple times using +// the same domain name will not result in an error response. The client can +// create up to 100 domains per account. +// +// If the client requires additional domains, go to http://aws.amazon.com/contact-us/simpledb-limit-request/ +// (http://aws.amazon.com/contact-us/simpledb-limit-request/). +func (c *SimpleDB) CreateDomain(input *CreateDomainInput) (*CreateDomainOutput, error) { + req, out := c.CreateDomainRequest(input) + err := req.Send() + return out, err +} + +const opDeleteAttributes = "DeleteAttributes" + +// DeleteAttributesRequest generates a "aws/request.Request" representing the +// client's request for the DeleteAttributes operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteAttributes method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteAttributesRequest method. +// req, resp := client.DeleteAttributesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *SimpleDB) DeleteAttributesRequest(input *DeleteAttributesInput) (req *request.Request, output *DeleteAttributesOutput) { + op := &request.Operation{ + Name: opDeleteAttributes, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteAttributesInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteAttributesOutput{} + req.Data = output + return +} + +// Deletes one or more attributes associated with an item. If all attributes +// of the item are deleted, the item is deleted. +// +// If DeleteAttributes is called without being passed any attributes or values +// specified, all the attributes for the item are deleted. DeleteAttributes +// is an idempotent operation; running it multiple times on the same item or +// attribute does not result in an error response. +// +// Because Amazon SimpleDB makes multiple copies of item data and uses an +// eventual consistency update model, performing a GetAttributes or Select operation +// (read) immediately after a DeleteAttributes or PutAttributes operation (write) +// might not return updated item data. +func (c *SimpleDB) DeleteAttributes(input *DeleteAttributesInput) (*DeleteAttributesOutput, error) { + req, out := c.DeleteAttributesRequest(input) + err := req.Send() + return out, err +} + +const opDeleteDomain = "DeleteDomain" + +// DeleteDomainRequest generates a "aws/request.Request" representing the +// client's request for the DeleteDomain operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteDomain method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteDomainRequest method. +// req, resp := client.DeleteDomainRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *SimpleDB) DeleteDomainRequest(input *DeleteDomainInput) (req *request.Request, output *DeleteDomainOutput) { + op := &request.Operation{ + Name: opDeleteDomain, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteDomainInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteDomainOutput{} + req.Data = output + return +} + +// The DeleteDomain operation deletes a domain. Any items (and their attributes) +// in the domain are deleted as well. The DeleteDomain operation might take +// 10 or more seconds to complete. +// +// Running DeleteDomain on a domain that does not exist or running the function +// multiple times using the same domain name will not result in an error response. +func (c *SimpleDB) DeleteDomain(input *DeleteDomainInput) (*DeleteDomainOutput, error) { + req, out := c.DeleteDomainRequest(input) + err := req.Send() + return out, err +} + +const opDomainMetadata = "DomainMetadata" + +// DomainMetadataRequest generates a "aws/request.Request" representing the +// client's request for the DomainMetadata operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DomainMetadata method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DomainMetadataRequest method. +// req, resp := client.DomainMetadataRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *SimpleDB) DomainMetadataRequest(input *DomainMetadataInput) (req *request.Request, output *DomainMetadataOutput) { + op := &request.Operation{ + Name: opDomainMetadata, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DomainMetadataInput{} + } + + req = c.newRequest(op, input, output) + output = &DomainMetadataOutput{} + req.Data = output + return +} + +// Returns information about the domain, including when the domain was created, +// the number of items and attributes in the domain, and the size of the attribute +// names and values. +func (c *SimpleDB) DomainMetadata(input *DomainMetadataInput) (*DomainMetadataOutput, error) { + req, out := c.DomainMetadataRequest(input) + err := req.Send() + return out, err +} + +const opGetAttributes = "GetAttributes" + +// GetAttributesRequest generates a "aws/request.Request" representing the +// client's request for the GetAttributes operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetAttributes method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetAttributesRequest method. +// req, resp := client.GetAttributesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *SimpleDB) GetAttributesRequest(input *GetAttributesInput) (req *request.Request, output *GetAttributesOutput) { + op := &request.Operation{ + Name: opGetAttributes, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetAttributesInput{} + } + + req = c.newRequest(op, input, output) + output = &GetAttributesOutput{} + req.Data = output + return +} + +// Returns all of the attributes associated with the specified item. Optionally, +// the attributes returned can be limited to one or more attributes by specifying +// an attribute name parameter. +// +// If the item does not exist on the replica that was accessed for this operation, +// an empty set is returned. The system does not return an error as it cannot +// guarantee the item does not exist on other replicas. +// +// If GetAttributes is called without being passed any attribute names, all +// the attributes for the item are returned. +func (c *SimpleDB) GetAttributes(input *GetAttributesInput) (*GetAttributesOutput, error) { + req, out := c.GetAttributesRequest(input) + err := req.Send() + return out, err +} + +const opListDomains = "ListDomains" + +// ListDomainsRequest generates a "aws/request.Request" representing the +// client's request for the ListDomains operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListDomains method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListDomainsRequest method. +// req, resp := client.ListDomainsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *SimpleDB) ListDomainsRequest(input *ListDomainsInput) (req *request.Request, output *ListDomainsOutput) { + op := &request.Operation{ + Name: opListDomains, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxNumberOfDomains", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListDomainsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListDomainsOutput{} + req.Data = output + return +} + +// The ListDomains operation lists all domains associated with the Access Key +// ID. It returns domain names up to the limit set by MaxNumberOfDomains (#MaxNumberOfDomains). +// A NextToken (#NextToken) is returned if there are more than MaxNumberOfDomains +// domains. Calling ListDomains successive times with the NextToken provided +// by the operation returns up to MaxNumberOfDomains more domain names with +// each successive operation call. +func (c *SimpleDB) ListDomains(input *ListDomainsInput) (*ListDomainsOutput, error) { + req, out := c.ListDomainsRequest(input) + err := req.Send() + return out, err +} + +// ListDomainsPages iterates over the pages of a ListDomains operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListDomains method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListDomains operation. +// pageNum := 0 +// err := client.ListDomainsPages(params, +// func(page *ListDomainsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *SimpleDB) ListDomainsPages(input *ListDomainsInput, fn func(p *ListDomainsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListDomainsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListDomainsOutput), lastPage) + }) +} + +const opPutAttributes = "PutAttributes" + +// PutAttributesRequest generates a "aws/request.Request" representing the +// client's request for the PutAttributes operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutAttributes method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutAttributesRequest method. +// req, resp := client.PutAttributesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *SimpleDB) PutAttributesRequest(input *PutAttributesInput) (req *request.Request, output *PutAttributesOutput) { + op := &request.Operation{ + Name: opPutAttributes, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutAttributesInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &PutAttributesOutput{} + req.Data = output + return +} + +// The PutAttributes operation creates or replaces attributes in an item. The +// client may specify new attributes using a combination of the Attribute.X.Name +// and Attribute.X.Value parameters. The client specifies the first attribute +// by the parameters Attribute.0.Name and Attribute.0.Value, the second attribute +// by the parameters Attribute.1.Name and Attribute.1.Value, and so on. +// +// Attributes are uniquely identified in an item by their name/value combination. +// For example, a single item can have the attributes { "first_name", "first_value" +// } and { "first_name", second_value" }. However, it cannot have two attribute +// instances where both the Attribute.X.Name and Attribute.X.Value are the same. +// +// Optionally, the requestor can supply the Replace parameter for each individual +// attribute. Setting this value to true causes the new attribute value to replace +// the existing attribute value(s). For example, if an item has the attributes +// { 'a', '1' }, { 'b', '2'} and { 'b', '3' } and the requestor calls PutAttributes +// using the attributes { 'b', '4' } with the Replace parameter set to true, +// the final attributes of the item are changed to { 'a', '1' } and { 'b', '4' +// }, which replaces the previous values of the 'b' attribute with the new value. +// +// Using PutAttributes to replace attribute values that do not exist will +// not result in an error response. You cannot specify an empty string as +// an attribute name. +// +// Because Amazon SimpleDB makes multiple copies of client data and uses an +// eventual consistency update model, an immediate GetAttributes or Select operation +// (read) immediately after a PutAttributes or DeleteAttributes operation (write) +// might not return the updated data. +// +// The following limitations are enforced for this operation: 256 total attribute +// name-value pairs per item One billion attributes per domain 10 GB of total +// user data storage per domain +func (c *SimpleDB) PutAttributes(input *PutAttributesInput) (*PutAttributesOutput, error) { + req, out := c.PutAttributesRequest(input) + err := req.Send() + return out, err +} + +const opSelect = "Select" + +// SelectRequest generates a "aws/request.Request" representing the +// client's request for the Select operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the Select method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the SelectRequest method. +// req, resp := client.SelectRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *SimpleDB) SelectRequest(input *SelectInput) (req *request.Request, output *SelectOutput) { + op := &request.Operation{ + Name: opSelect, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "", + TruncationToken: "", + }, + } + + if input == nil { + input = &SelectInput{} + } + + req = c.newRequest(op, input, output) + output = &SelectOutput{} + req.Data = output + return +} + +// The Select operation returns a set of attributes for ItemNames that match +// the select expression. Select is similar to the standard SQL SELECT statement. +// +// The total size of the response cannot exceed 1 MB in total size. Amazon +// SimpleDB automatically adjusts the number of items returned per page to enforce +// this limit. For example, if the client asks to retrieve 2500 items, but each +// individual item is 10 kB in size, the system returns 100 items and an appropriate +// NextToken so the client can access the next page of results. +// +// For information on how to construct select expressions, see Using Select +// to Create Amazon SimpleDB Queries in the Developer Guide. +func (c *SimpleDB) Select(input *SelectInput) (*SelectOutput, error) { + req, out := c.SelectRequest(input) + err := req.Send() + return out, err +} + +// SelectPages iterates over the pages of a Select operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See Select method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a Select operation. +// pageNum := 0 +// err := client.SelectPages(params, +// func(page *SelectOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *SimpleDB) SelectPages(input *SelectInput, fn func(p *SelectOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.SelectRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*SelectOutput), lastPage) + }) +} + +type Attribute struct { + _ struct{} `type:"structure"` + + AlternateNameEncoding *string `type:"string"` + + AlternateValueEncoding *string `type:"string"` + + // The name of the attribute. + Name *string `type:"string" required:"true"` + + // The value of the attribute. + Value *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s Attribute) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Attribute) GoString() string { + return s.String() +} + +type BatchDeleteAttributesInput struct { + _ struct{} `type:"structure"` + + // The name of the domain in which the attributes are being deleted. + DomainName *string `type:"string" required:"true"` + + // A list of items on which to perform the operation. + Items []*DeletableItem `locationNameList:"Item" type:"list" flattened:"true" required:"true"` +} + +// String returns the string representation +func (s BatchDeleteAttributesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchDeleteAttributesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *BatchDeleteAttributesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "BatchDeleteAttributesInput"} + if s.DomainName == nil { + invalidParams.Add(request.NewErrParamRequired("DomainName")) + } + if s.Items == nil { + invalidParams.Add(request.NewErrParamRequired("Items")) + } + if s.Items != nil { + for i, v := range s.Items { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Items", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type BatchDeleteAttributesOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s BatchDeleteAttributesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchDeleteAttributesOutput) GoString() string { + return s.String() +} + +type BatchPutAttributesInput struct { + _ struct{} `type:"structure"` + + // The name of the domain in which the attributes are being stored. + DomainName *string `type:"string" required:"true"` + + // A list of items on which to perform the operation. + Items []*ReplaceableItem `locationNameList:"Item" type:"list" flattened:"true" required:"true"` +} + +// String returns the string representation +func (s BatchPutAttributesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchPutAttributesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *BatchPutAttributesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "BatchPutAttributesInput"} + if s.DomainName == nil { + invalidParams.Add(request.NewErrParamRequired("DomainName")) + } + if s.Items == nil { + invalidParams.Add(request.NewErrParamRequired("Items")) + } + if s.Items != nil { + for i, v := range s.Items { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Items", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type BatchPutAttributesOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s BatchPutAttributesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchPutAttributesOutput) GoString() string { + return s.String() +} + +type CreateDomainInput struct { + _ struct{} `type:"structure"` + + // The name of the domain to create. The name can range between 3 and 255 characters + // and can contain the following characters: a-z, A-Z, 0-9, '_', '-', and '.'. + DomainName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateDomainInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDomainInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateDomainInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateDomainInput"} + if s.DomainName == nil { + invalidParams.Add(request.NewErrParamRequired("DomainName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type CreateDomainOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s CreateDomainOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDomainOutput) GoString() string { + return s.String() +} + +type DeletableAttribute struct { + _ struct{} `type:"structure"` + + // The name of the attribute. + Name *string `type:"string" required:"true"` + + // The value of the attribute. + Value *string `type:"string"` +} + +// String returns the string representation +func (s DeletableAttribute) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeletableAttribute) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeletableAttribute) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeletableAttribute"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeletableItem struct { + _ struct{} `type:"structure"` + + Attributes []*DeletableAttribute `locationNameList:"Attribute" type:"list" flattened:"true"` + + Name *string `locationName:"ItemName" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeletableItem) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeletableItem) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeletableItem) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeletableItem"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Attributes != nil { + for i, v := range s.Attributes { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Attributes", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteAttributesInput struct { + _ struct{} `type:"structure"` + + // A list of Attributes. Similar to columns on a spreadsheet, attributes represent + // categories of data that can be assigned to items. + Attributes []*DeletableAttribute `locationNameList:"Attribute" type:"list" flattened:"true"` + + // The name of the domain in which to perform the operation. + DomainName *string `type:"string" required:"true"` + + // The update condition which, if specified, determines whether the specified + // attributes will be deleted or not. The update condition must be satisfied + // in order for this request to be processed and the attributes to be deleted. + Expected *UpdateCondition `type:"structure"` + + // The name of the item. Similar to rows on a spreadsheet, items represent individual + // objects that contain one or more value-attribute pairs. + ItemName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteAttributesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteAttributesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteAttributesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteAttributesInput"} + if s.DomainName == nil { + invalidParams.Add(request.NewErrParamRequired("DomainName")) + } + if s.ItemName == nil { + invalidParams.Add(request.NewErrParamRequired("ItemName")) + } + if s.Attributes != nil { + for i, v := range s.Attributes { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Attributes", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteAttributesOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteAttributesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteAttributesOutput) GoString() string { + return s.String() +} + +type DeleteDomainInput struct { + _ struct{} `type:"structure"` + + // The name of the domain to delete. + DomainName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteDomainInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteDomainInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteDomainInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteDomainInput"} + if s.DomainName == nil { + invalidParams.Add(request.NewErrParamRequired("DomainName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteDomainOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteDomainOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteDomainOutput) GoString() string { + return s.String() +} + +type DomainMetadataInput struct { + _ struct{} `type:"structure"` + + // The name of the domain for which to display the metadata of. + DomainName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DomainMetadataInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DomainMetadataInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DomainMetadataInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DomainMetadataInput"} + if s.DomainName == nil { + invalidParams.Add(request.NewErrParamRequired("DomainName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DomainMetadataOutput struct { + _ struct{} `type:"structure"` + + // The number of unique attribute names in the domain. + AttributeNameCount *int64 `type:"integer"` + + // The total size of all unique attribute names in the domain, in bytes. + AttributeNamesSizeBytes *int64 `type:"long"` + + // The number of all attribute name/value pairs in the domain. + AttributeValueCount *int64 `type:"integer"` + + // The total size of all attribute values in the domain, in bytes. + AttributeValuesSizeBytes *int64 `type:"long"` + + // The number of all items in the domain. + ItemCount *int64 `type:"integer"` + + // The total size of all item names in the domain, in bytes. + ItemNamesSizeBytes *int64 `type:"long"` + + // The data and time when metadata was calculated, in Epoch (UNIX) seconds. + Timestamp *int64 `type:"integer"` +} + +// String returns the string representation +func (s DomainMetadataOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DomainMetadataOutput) GoString() string { + return s.String() +} + +type GetAttributesInput struct { + _ struct{} `type:"structure"` + + // The names of the attributes. + AttributeNames []*string `locationNameList:"AttributeName" type:"list" flattened:"true"` + + // Determines whether or not strong consistency should be enforced when data + // is read from SimpleDB. If true, any data previously written to SimpleDB will + // be returned. Otherwise, results will be consistent eventually, and the client + // may not see data that was written immediately before your read. + ConsistentRead *bool `type:"boolean"` + + // The name of the domain in which to perform the operation. + DomainName *string `type:"string" required:"true"` + + // The name of the item. + ItemName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s GetAttributesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetAttributesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetAttributesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetAttributesInput"} + if s.DomainName == nil { + invalidParams.Add(request.NewErrParamRequired("DomainName")) + } + if s.ItemName == nil { + invalidParams.Add(request.NewErrParamRequired("ItemName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type GetAttributesOutput struct { + _ struct{} `type:"structure"` + + // The list of attributes returned by the operation. + Attributes []*Attribute `locationNameList:"Attribute" type:"list" flattened:"true"` +} + +// String returns the string representation +func (s GetAttributesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetAttributesOutput) GoString() string { + return s.String() +} + +type Item struct { + _ struct{} `type:"structure"` + + AlternateNameEncoding *string `type:"string"` + + // A list of attributes. + Attributes []*Attribute `locationNameList:"Attribute" type:"list" flattened:"true" required:"true"` + + // The name of the item. + Name *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s Item) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Item) GoString() string { + return s.String() +} + +type ListDomainsInput struct { + _ struct{} `type:"structure"` + + // The maximum number of domain names you want returned. The range is 1 to 100. + // The default setting is 100. + MaxNumberOfDomains *int64 `type:"integer"` + + // A string informing Amazon SimpleDB where to start the next list of domain + // names. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s ListDomainsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListDomainsInput) GoString() string { + return s.String() +} + +type ListDomainsOutput struct { + _ struct{} `type:"structure"` + + // A list of domain names that match the expression. + DomainNames []*string `locationNameList:"DomainName" type:"list" flattened:"true"` + + // An opaque token indicating that there are more domains than the specified + // MaxNumberOfDomains still available. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s ListDomainsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListDomainsOutput) GoString() string { + return s.String() +} + +type PutAttributesInput struct { + _ struct{} `type:"structure"` + + // The list of attributes. + Attributes []*ReplaceableAttribute `locationNameList:"Attribute" type:"list" flattened:"true" required:"true"` + + // The name of the domain in which to perform the operation. + DomainName *string `type:"string" required:"true"` + + // The update condition which, if specified, determines whether the specified + // attributes will be updated or not. The update condition must be satisfied + // in order for this request to be processed and the attributes to be updated. + Expected *UpdateCondition `type:"structure"` + + // The name of the item. + ItemName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s PutAttributesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutAttributesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutAttributesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutAttributesInput"} + if s.Attributes == nil { + invalidParams.Add(request.NewErrParamRequired("Attributes")) + } + if s.DomainName == nil { + invalidParams.Add(request.NewErrParamRequired("DomainName")) + } + if s.ItemName == nil { + invalidParams.Add(request.NewErrParamRequired("ItemName")) + } + if s.Attributes != nil { + for i, v := range s.Attributes { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Attributes", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type PutAttributesOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutAttributesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutAttributesOutput) GoString() string { + return s.String() +} + +type ReplaceableAttribute struct { + _ struct{} `type:"structure"` + + // The name of the replaceable attribute. + Name *string `type:"string" required:"true"` + + // A flag specifying whether or not to replace the attribute/value pair or to + // add a new attribute/value pair. The default setting is false. + Replace *bool `type:"boolean"` + + // The value of the replaceable attribute. + Value *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ReplaceableAttribute) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReplaceableAttribute) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ReplaceableAttribute) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ReplaceableAttribute"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Value == nil { + invalidParams.Add(request.NewErrParamRequired("Value")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type ReplaceableItem struct { + _ struct{} `type:"structure"` + + // The list of attributes for a replaceable item. + Attributes []*ReplaceableAttribute `locationNameList:"Attribute" type:"list" flattened:"true" required:"true"` + + // The name of the replaceable item. + Name *string `locationName:"ItemName" type:"string" required:"true"` +} + +// String returns the string representation +func (s ReplaceableItem) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReplaceableItem) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ReplaceableItem) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ReplaceableItem"} + if s.Attributes == nil { + invalidParams.Add(request.NewErrParamRequired("Attributes")) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Attributes != nil { + for i, v := range s.Attributes { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Attributes", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type SelectInput struct { + _ struct{} `type:"structure"` + + // Determines whether or not strong consistency should be enforced when data + // is read from SimpleDB. If true, any data previously written to SimpleDB will + // be returned. Otherwise, results will be consistent eventually, and the client + // may not see data that was written immediately before your read. + ConsistentRead *bool `type:"boolean"` + + // A string informing Amazon SimpleDB where to start the next list of ItemNames. + NextToken *string `type:"string"` + + // The expression used to query the domain. + SelectExpression *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s SelectInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SelectInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SelectInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SelectInput"} + if s.SelectExpression == nil { + invalidParams.Add(request.NewErrParamRequired("SelectExpression")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type SelectOutput struct { + _ struct{} `type:"structure"` + + // A list of items that match the select expression. + Items []*Item `locationNameList:"Item" type:"list" flattened:"true"` + + // An opaque token indicating that more items than MaxNumberOfItems were matched, + // the response size exceeded 1 megabyte, or the execution time exceeded 5 seconds. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s SelectOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SelectOutput) GoString() string { + return s.String() +} + +// Specifies the conditions under which data should be updated. If an update +// condition is specified for a request, the data will only be updated if the +// condition is satisfied. For example, if an attribute with a specific name +// and value exists, or if a specific attribute doesn't exist. +type UpdateCondition struct { + _ struct{} `type:"structure"` + + // A value specifying whether or not the specified attribute must exist with + // the specified value in order for the update condition to be satisfied. Specify + // true if the attribute must exist for the update condition to be satisfied. + // Specify false if the attribute should not exist in order for the update condition + // to be satisfied. + Exists *bool `type:"boolean"` + + // The name of the attribute involved in the condition. + Name *string `type:"string"` + + // The value of an attribute. This value can only be specified when the Exists + // parameter is equal to true. + Value *string `type:"string"` +} + +// String returns the string representation +func (s UpdateCondition) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateCondition) GoString() string { + return s.String() +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/simpledb/customizations.go b/vendor/github.com/aws/aws-sdk-go/service/simpledb/customizations.go new file mode 100644 index 000000000..a0dcce54b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/simpledb/customizations.go @@ -0,0 +1,11 @@ +package simpledb + +import "github.com/aws/aws-sdk-go/aws/client" + +func init() { + initClient = func(c *client.Client) { + // SimpleDB uses custom error unmarshaling logic + c.Handlers.UnmarshalError.Clear() + c.Handlers.UnmarshalError.PushBack(unmarshalError) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/simpledb/examples_test.go b/vendor/github.com/aws/aws-sdk-go/service/simpledb/examples_test.go new file mode 100644 index 000000000..3cc39cc35 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/simpledb/examples_test.go @@ -0,0 +1,269 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package simpledb_test + +import ( + "bytes" + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/simpledb" +) + +var _ time.Duration +var _ bytes.Buffer + +func ExampleSimpleDB_BatchDeleteAttributes() { + svc := simpledb.New(session.New()) + + params := &simpledb.BatchDeleteAttributesInput{ + DomainName: aws.String("String"), // Required + Items: []*simpledb.DeletableItem{ // Required + { // Required + Name: aws.String("String"), // Required + Attributes: []*simpledb.DeletableAttribute{ + { // Required + Name: aws.String("String"), // Required + Value: aws.String("String"), + }, + // More values... + }, + }, + // More values... + }, + } + resp, err := svc.BatchDeleteAttributes(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSimpleDB_BatchPutAttributes() { + svc := simpledb.New(session.New()) + + params := &simpledb.BatchPutAttributesInput{ + DomainName: aws.String("String"), // Required + Items: []*simpledb.ReplaceableItem{ // Required + { // Required + Attributes: []*simpledb.ReplaceableAttribute{ // Required + { // Required + Name: aws.String("String"), // Required + Value: aws.String("String"), // Required + Replace: aws.Bool(true), + }, + // More values... + }, + Name: aws.String("String"), // Required + }, + // More values... + }, + } + resp, err := svc.BatchPutAttributes(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSimpleDB_CreateDomain() { + svc := simpledb.New(session.New()) + + params := &simpledb.CreateDomainInput{ + DomainName: aws.String("String"), // Required + } + resp, err := svc.CreateDomain(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSimpleDB_DeleteAttributes() { + svc := simpledb.New(session.New()) + + params := &simpledb.DeleteAttributesInput{ + DomainName: aws.String("String"), // Required + ItemName: aws.String("String"), // Required + Attributes: []*simpledb.DeletableAttribute{ + { // Required + Name: aws.String("String"), // Required + Value: aws.String("String"), + }, + // More values... + }, + Expected: &simpledb.UpdateCondition{ + Exists: aws.Bool(true), + Name: aws.String("String"), + Value: aws.String("String"), + }, + } + resp, err := svc.DeleteAttributes(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSimpleDB_DeleteDomain() { + svc := simpledb.New(session.New()) + + params := &simpledb.DeleteDomainInput{ + DomainName: aws.String("String"), // Required + } + resp, err := svc.DeleteDomain(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSimpleDB_DomainMetadata() { + svc := simpledb.New(session.New()) + + params := &simpledb.DomainMetadataInput{ + DomainName: aws.String("String"), // Required + } + resp, err := svc.DomainMetadata(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSimpleDB_GetAttributes() { + svc := simpledb.New(session.New()) + + params := &simpledb.GetAttributesInput{ + DomainName: aws.String("String"), // Required + ItemName: aws.String("String"), // Required + AttributeNames: []*string{ + aws.String("String"), // Required + // More values... + }, + ConsistentRead: aws.Bool(true), + } + resp, err := svc.GetAttributes(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSimpleDB_ListDomains() { + svc := simpledb.New(session.New()) + + params := &simpledb.ListDomainsInput{ + MaxNumberOfDomains: aws.Int64(1), + NextToken: aws.String("String"), + } + resp, err := svc.ListDomains(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSimpleDB_PutAttributes() { + svc := simpledb.New(session.New()) + + params := &simpledb.PutAttributesInput{ + Attributes: []*simpledb.ReplaceableAttribute{ // Required + { // Required + Name: aws.String("String"), // Required + Value: aws.String("String"), // Required + Replace: aws.Bool(true), + }, + // More values... + }, + DomainName: aws.String("String"), // Required + ItemName: aws.String("String"), // Required + Expected: &simpledb.UpdateCondition{ + Exists: aws.Bool(true), + Name: aws.String("String"), + Value: aws.String("String"), + }, + } + resp, err := svc.PutAttributes(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSimpleDB_Select() { + svc := simpledb.New(session.New()) + + params := &simpledb.SelectInput{ + SelectExpression: aws.String("String"), // Required + ConsistentRead: aws.Bool(true), + NextToken: aws.String("String"), + } + resp, err := svc.Select(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/simpledb/service.go b/vendor/github.com/aws/aws-sdk-go/service/simpledb/service.go new file mode 100644 index 000000000..196047620 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/simpledb/service.go @@ -0,0 +1,102 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package simpledb + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/corehandlers" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/query" + "github.com/aws/aws-sdk-go/private/signer/v2" +) + +// Amazon SimpleDB is a web service providing the core database functions of +// data indexing and querying in the cloud. By offloading the time and effort +// associated with building and operating a web-scale database, SimpleDB provides +// developers the freedom to focus on application development. A traditional, +// clustered relational database requires a sizable upfront capital outlay, +// is complex to design, and often requires extensive and repetitive database +// administration. Amazon SimpleDB is dramatically simpler, requiring no schema, +// automatically indexing your data and providing a simple API for storage and +// access. This approach eliminates the administrative burden of data modeling, +// index maintenance, and performance tuning. Developers gain access to this +// functionality within Amazon's proven computing environment, are able to scale +// instantly, and pay only for what they use. +// +// Visit http://aws.amazon.com/simpledb/ (http://aws.amazon.com/simpledb/) +// for more information. +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type SimpleDB struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "sdb" + +// New creates a new instance of the SimpleDB client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a SimpleDB client from just a session. +// svc := simpledb.New(mySession) +// +// // Create a SimpleDB client with additional configuration +// svc := simpledb.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *SimpleDB { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *SimpleDB { + svc := &SimpleDB{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2009-04-15", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v2.SignRequestHandler) + svc.Handlers.Sign.PushBackNamed(corehandlers.BuildContentLengthHandler) + svc.Handlers.Build.PushBackNamed(query.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(query.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(query.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(query.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a SimpleDB operation and runs any +// custom request initialization. +func (c *SimpleDB) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/simpledb/simpledbiface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/simpledb/simpledbiface/interface.go new file mode 100644 index 000000000..ec20c016c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/simpledb/simpledbiface/interface.go @@ -0,0 +1,58 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package simpledbiface provides an interface for the Amazon SimpleDB. +package simpledbiface + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/simpledb" +) + +// SimpleDBAPI is the interface type for simpledb.SimpleDB. +type SimpleDBAPI interface { + BatchDeleteAttributesRequest(*simpledb.BatchDeleteAttributesInput) (*request.Request, *simpledb.BatchDeleteAttributesOutput) + + BatchDeleteAttributes(*simpledb.BatchDeleteAttributesInput) (*simpledb.BatchDeleteAttributesOutput, error) + + BatchPutAttributesRequest(*simpledb.BatchPutAttributesInput) (*request.Request, *simpledb.BatchPutAttributesOutput) + + BatchPutAttributes(*simpledb.BatchPutAttributesInput) (*simpledb.BatchPutAttributesOutput, error) + + CreateDomainRequest(*simpledb.CreateDomainInput) (*request.Request, *simpledb.CreateDomainOutput) + + CreateDomain(*simpledb.CreateDomainInput) (*simpledb.CreateDomainOutput, error) + + DeleteAttributesRequest(*simpledb.DeleteAttributesInput) (*request.Request, *simpledb.DeleteAttributesOutput) + + DeleteAttributes(*simpledb.DeleteAttributesInput) (*simpledb.DeleteAttributesOutput, error) + + DeleteDomainRequest(*simpledb.DeleteDomainInput) (*request.Request, *simpledb.DeleteDomainOutput) + + DeleteDomain(*simpledb.DeleteDomainInput) (*simpledb.DeleteDomainOutput, error) + + DomainMetadataRequest(*simpledb.DomainMetadataInput) (*request.Request, *simpledb.DomainMetadataOutput) + + DomainMetadata(*simpledb.DomainMetadataInput) (*simpledb.DomainMetadataOutput, error) + + GetAttributesRequest(*simpledb.GetAttributesInput) (*request.Request, *simpledb.GetAttributesOutput) + + GetAttributes(*simpledb.GetAttributesInput) (*simpledb.GetAttributesOutput, error) + + ListDomainsRequest(*simpledb.ListDomainsInput) (*request.Request, *simpledb.ListDomainsOutput) + + ListDomains(*simpledb.ListDomainsInput) (*simpledb.ListDomainsOutput, error) + + ListDomainsPages(*simpledb.ListDomainsInput, func(*simpledb.ListDomainsOutput, bool) bool) error + + PutAttributesRequest(*simpledb.PutAttributesInput) (*request.Request, *simpledb.PutAttributesOutput) + + PutAttributes(*simpledb.PutAttributesInput) (*simpledb.PutAttributesOutput, error) + + SelectRequest(*simpledb.SelectInput) (*request.Request, *simpledb.SelectOutput) + + Select(*simpledb.SelectInput) (*simpledb.SelectOutput, error) + + SelectPages(*simpledb.SelectInput, func(*simpledb.SelectOutput, bool) bool) error +} + +var _ SimpleDBAPI = (*simpledb.SimpleDB)(nil) diff --git a/vendor/github.com/aws/aws-sdk-go/service/simpledb/unmarshal_error_leak_test.go b/vendor/github.com/aws/aws-sdk-go/service/simpledb/unmarshal_error_leak_test.go new file mode 100644 index 000000000..340791db1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/simpledb/unmarshal_error_leak_test.go @@ -0,0 +1,33 @@ +package simpledb + +import ( + "github.com/stretchr/testify/assert" + "net/http" + "testing" + + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/awstesting" +) + +func TestUnmarhsalErrorLeak(t *testing.T) { + req := &request.Request{ + HTTPRequest: &http.Request{ + Header: make(http.Header), + Body: &awstesting.ReadCloser{Size: 2048}, + }, + } + req.HTTPResponse = &http.Response{ + Body: &awstesting.ReadCloser{Size: 2048}, + Header: http.Header{ + "X-Amzn-Requestid": []string{"1"}, + }, + StatusCode: http.StatusOK, + } + + reader := req.HTTPResponse.Body.(*awstesting.ReadCloser) + unmarshalError(req) + + assert.NotNil(t, req.Error) + assert.Equal(t, reader.Closed, true) + assert.Equal(t, reader.Size, 0) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/simpledb/unmarshall_error.go b/vendor/github.com/aws/aws-sdk-go/service/simpledb/unmarshall_error.go new file mode 100644 index 000000000..acc8a86eb --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/simpledb/unmarshall_error.go @@ -0,0 +1,53 @@ +package simpledb + +import ( + "encoding/xml" + "io" + "io/ioutil" + "strings" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" +) + +type xmlErrorDetail struct { + Code string `xml:"Code"` + Message string `xml:"Message"` +} + +type xmlErrorResponse struct { + XMLName xml.Name `xml:"Response"` + Errors []xmlErrorDetail `xml:"Errors>Error"` + RequestID string `xml:"RequestID"` +} + +func unmarshalError(r *request.Request) { + defer r.HTTPResponse.Body.Close() + defer io.Copy(ioutil.Discard, r.HTTPResponse.Body) + + if r.HTTPResponse.ContentLength == int64(0) { + // No body, use status code to generate an awserr.Error + r.Error = awserr.NewRequestFailure( + awserr.New(strings.Replace(r.HTTPResponse.Status, " ", "", -1), r.HTTPResponse.Status, nil), + r.HTTPResponse.StatusCode, + "", + ) + return + } + + resp := &xmlErrorResponse{} + err := xml.NewDecoder(r.HTTPResponse.Body).Decode(resp) + if err != nil && err != io.EOF { + r.Error = awserr.New("SerializationError", "failed to decode SimpleDB XML error response", nil) + } else if len(resp.Errors) == 0 { + r.Error = awserr.New("MissingError", "missing error code in SimpleDB XML error response", nil) + } else { + // If there are multiple error codes, return only the first as the aws.Error interface only supports + // one error code. + r.Error = awserr.NewRequestFailure( + awserr.New(resp.Errors[0].Code, resp.Errors[0].Message, nil), + r.HTTPResponse.StatusCode, + resp.RequestID, + ) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/simpledb/unmarshall_error_test.go b/vendor/github.com/aws/aws-sdk-go/service/simpledb/unmarshall_error_test.go new file mode 100644 index 000000000..53f35a2d5 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/simpledb/unmarshall_error_test.go @@ -0,0 +1,139 @@ +package simpledb_test + +import ( + "bytes" + "io/ioutil" + "net/http" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/awstesting/unit" + "github.com/aws/aws-sdk-go/service/simpledb" +) + +var statusCodeErrorTests = []struct { + scode int + status string + code string + message string +}{ + {301, "Moved Permanently", "MovedPermanently", "Moved Permanently"}, + {403, "Forbidden", "Forbidden", "Forbidden"}, + {400, "Bad Request", "BadRequest", "Bad Request"}, + {404, "Not Found", "NotFound", "Not Found"}, + {500, "Internal Error", "InternalError", "Internal Error"}, +} + +func TestStatusCodeError(t *testing.T) { + for _, test := range statusCodeErrorTests { + s := simpledb.New(unit.Session) + s.Handlers.Send.Clear() + s.Handlers.Send.PushBack(func(r *request.Request) { + body := ioutil.NopCloser(bytes.NewReader([]byte{})) + r.HTTPResponse = &http.Response{ + ContentLength: 0, + StatusCode: test.scode, + Status: test.status, + Body: body, + } + }) + _, err := s.CreateDomain(&simpledb.CreateDomainInput{ + DomainName: aws.String("test-domain"), + }) + + assert.Error(t, err) + assert.Equal(t, test.code, err.(awserr.Error).Code()) + assert.Equal(t, test.message, err.(awserr.Error).Message()) + } +} + +var responseErrorTests = []struct { + scode int + status string + code string + message string + requestID string + errors []struct { + code string + message string + } +}{ + { + scode: 400, + status: "Bad Request", + code: "MissingError", + message: "missing error code in SimpleDB XML error response", + requestID: "101", + errors: []struct{ code, message string }{}, + }, + { + scode: 403, + status: "Forbidden", + code: "AuthFailure", + message: "AWS was not able to validate the provided access keys.", + requestID: "1111", + errors: []struct{ code, message string }{ + {"AuthFailure", "AWS was not able to validate the provided access keys."}, + }, + }, + { + scode: 500, + status: "Internal Error", + code: "MissingParameter", + message: "Message #1", + requestID: "8756", + errors: []struct{ code, message string }{ + {"MissingParameter", "Message #1"}, + {"InternalError", "Message #2"}, + }, + }, +} + +func TestResponseError(t *testing.T) { + for _, test := range responseErrorTests { + s := simpledb.New(unit.Session) + s.Handlers.Send.Clear() + s.Handlers.Send.PushBack(func(r *request.Request) { + xml := createXMLResponse(test.requestID, test.errors) + body := ioutil.NopCloser(bytes.NewReader([]byte(xml))) + r.HTTPResponse = &http.Response{ + ContentLength: int64(len(xml)), + StatusCode: test.scode, + Status: test.status, + Body: body, + } + }) + _, err := s.CreateDomain(&simpledb.CreateDomainInput{ + DomainName: aws.String("test-domain"), + }) + + assert.Error(t, err) + assert.Equal(t, test.code, err.(awserr.Error).Code()) + assert.Equal(t, test.message, err.(awserr.Error).Message()) + if len(test.errors) > 0 { + assert.Equal(t, test.requestID, err.(awserr.RequestFailure).RequestID()) + assert.Equal(t, test.scode, err.(awserr.RequestFailure).StatusCode()) + } + } +} + +// createXMLResponse constructs an XML string that has one or more error messages in it. +func createXMLResponse(requestID string, errors []struct{ code, message string }) []byte { + var buf bytes.Buffer + buf.WriteString(``) + for _, e := range errors { + buf.WriteString(``) + buf.WriteString(e.code) + buf.WriteString(``) + buf.WriteString(e.message) + buf.WriteString(``) + } + buf.WriteString(``) + buf.WriteString(requestID) + buf.WriteString(``) + return buf.Bytes() +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/sns/api.go b/vendor/github.com/aws/aws-sdk-go/service/sns/api.go new file mode 100644 index 000000000..5f0be867c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/sns/api.go @@ -0,0 +1,3739 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package sns provides a client for Amazon Simple Notification Service. +package sns + +import ( + "fmt" + + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/query" +) + +const opAddPermission = "AddPermission" + +// AddPermissionRequest generates a "aws/request.Request" representing the +// client's request for the AddPermission operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the AddPermission method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the AddPermissionRequest method. +// req, resp := client.AddPermissionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *SNS) AddPermissionRequest(input *AddPermissionInput) (req *request.Request, output *AddPermissionOutput) { + op := &request.Operation{ + Name: opAddPermission, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AddPermissionInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &AddPermissionOutput{} + req.Data = output + return +} + +// Adds a statement to a topic's access control policy, granting access for +// the specified AWS accounts to the specified actions. +func (c *SNS) AddPermission(input *AddPermissionInput) (*AddPermissionOutput, error) { + req, out := c.AddPermissionRequest(input) + err := req.Send() + return out, err +} + +const opCheckIfPhoneNumberIsOptedOut = "CheckIfPhoneNumberIsOptedOut" + +// CheckIfPhoneNumberIsOptedOutRequest generates a "aws/request.Request" representing the +// client's request for the CheckIfPhoneNumberIsOptedOut operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CheckIfPhoneNumberIsOptedOut method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CheckIfPhoneNumberIsOptedOutRequest method. +// req, resp := client.CheckIfPhoneNumberIsOptedOutRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *SNS) CheckIfPhoneNumberIsOptedOutRequest(input *CheckIfPhoneNumberIsOptedOutInput) (req *request.Request, output *CheckIfPhoneNumberIsOptedOutOutput) { + op := &request.Operation{ + Name: opCheckIfPhoneNumberIsOptedOut, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CheckIfPhoneNumberIsOptedOutInput{} + } + + req = c.newRequest(op, input, output) + output = &CheckIfPhoneNumberIsOptedOutOutput{} + req.Data = output + return +} + +// Accepts a phone number and indicates whether the phone holder has opted out +// of receiving SMS messages from your account. You cannot send SMS messages +// to a number that is opted out. +// +// To resume sending messages, you can opt in the number by using the OptInPhoneNumber +// action. +func (c *SNS) CheckIfPhoneNumberIsOptedOut(input *CheckIfPhoneNumberIsOptedOutInput) (*CheckIfPhoneNumberIsOptedOutOutput, error) { + req, out := c.CheckIfPhoneNumberIsOptedOutRequest(input) + err := req.Send() + return out, err +} + +const opConfirmSubscription = "ConfirmSubscription" + +// ConfirmSubscriptionRequest generates a "aws/request.Request" representing the +// client's request for the ConfirmSubscription operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ConfirmSubscription method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ConfirmSubscriptionRequest method. +// req, resp := client.ConfirmSubscriptionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *SNS) ConfirmSubscriptionRequest(input *ConfirmSubscriptionInput) (req *request.Request, output *ConfirmSubscriptionOutput) { + op := &request.Operation{ + Name: opConfirmSubscription, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ConfirmSubscriptionInput{} + } + + req = c.newRequest(op, input, output) + output = &ConfirmSubscriptionOutput{} + req.Data = output + return +} + +// Verifies an endpoint owner's intent to receive messages by validating the +// token sent to the endpoint by an earlier Subscribe action. If the token is +// valid, the action creates a new subscription and returns its Amazon Resource +// Name (ARN). This call requires an AWS signature only when the AuthenticateOnUnsubscribe +// flag is set to "true". +func (c *SNS) ConfirmSubscription(input *ConfirmSubscriptionInput) (*ConfirmSubscriptionOutput, error) { + req, out := c.ConfirmSubscriptionRequest(input) + err := req.Send() + return out, err +} + +const opCreatePlatformApplication = "CreatePlatformApplication" + +// CreatePlatformApplicationRequest generates a "aws/request.Request" representing the +// client's request for the CreatePlatformApplication operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreatePlatformApplication method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreatePlatformApplicationRequest method. +// req, resp := client.CreatePlatformApplicationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *SNS) CreatePlatformApplicationRequest(input *CreatePlatformApplicationInput) (req *request.Request, output *CreatePlatformApplicationOutput) { + op := &request.Operation{ + Name: opCreatePlatformApplication, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreatePlatformApplicationInput{} + } + + req = c.newRequest(op, input, output) + output = &CreatePlatformApplicationOutput{} + req.Data = output + return +} + +// Creates a platform application object for one of the supported push notification +// services, such as APNS and GCM, to which devices and mobile apps may register. +// You must specify PlatformPrincipal and PlatformCredential attributes when +// using the CreatePlatformApplication action. The PlatformPrincipal is received +// from the notification service. For APNS/APNS_SANDBOX, PlatformPrincipal is +// "SSL certificate". For GCM, PlatformPrincipal is not applicable. For ADM, +// PlatformPrincipal is "client id". The PlatformCredential is also received +// from the notification service. For WNS, PlatformPrincipal is "Package Security +// Identifier". For MPNS, PlatformPrincipal is "TLS certificate". For Baidu, +// PlatformPrincipal is "API key". +// +// For APNS/APNS_SANDBOX, PlatformCredential is "private key". For GCM, PlatformCredential +// is "API key". For ADM, PlatformCredential is "client secret". For WNS, PlatformCredential +// is "secret key". For MPNS, PlatformCredential is "private key". For Baidu, +// PlatformCredential is "secret key". The PlatformApplicationArn that is returned +// when using CreatePlatformApplication is then used as an attribute for the +// CreatePlatformEndpoint action. For more information, see Using Amazon SNS +// Mobile Push Notifications (http://docs.aws.amazon.com/sns/latest/dg/SNSMobilePush.html). +// For more information about obtaining the PlatformPrincipal and PlatformCredential +// for each of the supported push notification services, see Getting Started +// with Apple Push Notification Service (http://docs.aws.amazon.com/sns/latest/dg/mobile-push-apns.html), +// Getting Started with Amazon Device Messaging (http://docs.aws.amazon.com/sns/latest/dg/mobile-push-adm.html), +// Getting Started with Baidu Cloud Push (http://docs.aws.amazon.com/sns/latest/dg/mobile-push-baidu.html), +// Getting Started with Google Cloud Messaging for Android (http://docs.aws.amazon.com/sns/latest/dg/mobile-push-gcm.html), +// Getting Started with MPNS (http://docs.aws.amazon.com/sns/latest/dg/mobile-push-mpns.html), +// or Getting Started with WNS (http://docs.aws.amazon.com/sns/latest/dg/mobile-push-wns.html). +func (c *SNS) CreatePlatformApplication(input *CreatePlatformApplicationInput) (*CreatePlatformApplicationOutput, error) { + req, out := c.CreatePlatformApplicationRequest(input) + err := req.Send() + return out, err +} + +const opCreatePlatformEndpoint = "CreatePlatformEndpoint" + +// CreatePlatformEndpointRequest generates a "aws/request.Request" representing the +// client's request for the CreatePlatformEndpoint operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreatePlatformEndpoint method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreatePlatformEndpointRequest method. +// req, resp := client.CreatePlatformEndpointRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *SNS) CreatePlatformEndpointRequest(input *CreatePlatformEndpointInput) (req *request.Request, output *CreatePlatformEndpointOutput) { + op := &request.Operation{ + Name: opCreatePlatformEndpoint, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreatePlatformEndpointInput{} + } + + req = c.newRequest(op, input, output) + output = &CreatePlatformEndpointOutput{} + req.Data = output + return +} + +// Creates an endpoint for a device and mobile app on one of the supported push +// notification services, such as GCM and APNS. CreatePlatformEndpoint requires +// the PlatformApplicationArn that is returned from CreatePlatformApplication. +// The EndpointArn that is returned when using CreatePlatformEndpoint can then +// be used by the Publish action to send a message to a mobile app or by the +// Subscribe action for subscription to a topic. The CreatePlatformEndpoint +// action is idempotent, so if the requester already owns an endpoint with the +// same device token and attributes, that endpoint's ARN is returned without +// creating a new endpoint. For more information, see Using Amazon SNS Mobile +// Push Notifications (http://docs.aws.amazon.com/sns/latest/dg/SNSMobilePush.html). +// +// When using CreatePlatformEndpoint with Baidu, two attributes must be provided: +// ChannelId and UserId. The token field must also contain the ChannelId. For +// more information, see Creating an Amazon SNS Endpoint for Baidu (http://docs.aws.amazon.com/sns/latest/dg/SNSMobilePushBaiduEndpoint.html). +func (c *SNS) CreatePlatformEndpoint(input *CreatePlatformEndpointInput) (*CreatePlatformEndpointOutput, error) { + req, out := c.CreatePlatformEndpointRequest(input) + err := req.Send() + return out, err +} + +const opCreateTopic = "CreateTopic" + +// CreateTopicRequest generates a "aws/request.Request" representing the +// client's request for the CreateTopic operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateTopic method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateTopicRequest method. +// req, resp := client.CreateTopicRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *SNS) CreateTopicRequest(input *CreateTopicInput) (req *request.Request, output *CreateTopicOutput) { + op := &request.Operation{ + Name: opCreateTopic, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateTopicInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateTopicOutput{} + req.Data = output + return +} + +// Creates a topic to which notifications can be published. Users can create +// at most 100,000 topics. For more information, see http://aws.amazon.com/sns +// (http://aws.amazon.com/sns/). This action is idempotent, so if the requester +// already owns a topic with the specified name, that topic's ARN is returned +// without creating a new topic. +func (c *SNS) CreateTopic(input *CreateTopicInput) (*CreateTopicOutput, error) { + req, out := c.CreateTopicRequest(input) + err := req.Send() + return out, err +} + +const opDeleteEndpoint = "DeleteEndpoint" + +// DeleteEndpointRequest generates a "aws/request.Request" representing the +// client's request for the DeleteEndpoint operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteEndpoint method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteEndpointRequest method. +// req, resp := client.DeleteEndpointRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *SNS) DeleteEndpointRequest(input *DeleteEndpointInput) (req *request.Request, output *DeleteEndpointOutput) { + op := &request.Operation{ + Name: opDeleteEndpoint, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteEndpointInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteEndpointOutput{} + req.Data = output + return +} + +// Deletes the endpoint for a device and mobile app from Amazon SNS. This action +// is idempotent. For more information, see Using Amazon SNS Mobile Push Notifications +// (http://docs.aws.amazon.com/sns/latest/dg/SNSMobilePush.html). +// +// When you delete an endpoint that is also subscribed to a topic, then you +// must also unsubscribe the endpoint from the topic. +func (c *SNS) DeleteEndpoint(input *DeleteEndpointInput) (*DeleteEndpointOutput, error) { + req, out := c.DeleteEndpointRequest(input) + err := req.Send() + return out, err +} + +const opDeletePlatformApplication = "DeletePlatformApplication" + +// DeletePlatformApplicationRequest generates a "aws/request.Request" representing the +// client's request for the DeletePlatformApplication operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeletePlatformApplication method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeletePlatformApplicationRequest method. +// req, resp := client.DeletePlatformApplicationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *SNS) DeletePlatformApplicationRequest(input *DeletePlatformApplicationInput) (req *request.Request, output *DeletePlatformApplicationOutput) { + op := &request.Operation{ + Name: opDeletePlatformApplication, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeletePlatformApplicationInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeletePlatformApplicationOutput{} + req.Data = output + return +} + +// Deletes a platform application object for one of the supported push notification +// services, such as APNS and GCM. For more information, see Using Amazon SNS +// Mobile Push Notifications (http://docs.aws.amazon.com/sns/latest/dg/SNSMobilePush.html). +func (c *SNS) DeletePlatformApplication(input *DeletePlatformApplicationInput) (*DeletePlatformApplicationOutput, error) { + req, out := c.DeletePlatformApplicationRequest(input) + err := req.Send() + return out, err +} + +const opDeleteTopic = "DeleteTopic" + +// DeleteTopicRequest generates a "aws/request.Request" representing the +// client's request for the DeleteTopic operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteTopic method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteTopicRequest method. +// req, resp := client.DeleteTopicRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *SNS) DeleteTopicRequest(input *DeleteTopicInput) (req *request.Request, output *DeleteTopicOutput) { + op := &request.Operation{ + Name: opDeleteTopic, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteTopicInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteTopicOutput{} + req.Data = output + return +} + +// Deletes a topic and all its subscriptions. Deleting a topic might prevent +// some messages previously sent to the topic from being delivered to subscribers. +// This action is idempotent, so deleting a topic that does not exist does not +// result in an error. +func (c *SNS) DeleteTopic(input *DeleteTopicInput) (*DeleteTopicOutput, error) { + req, out := c.DeleteTopicRequest(input) + err := req.Send() + return out, err +} + +const opGetEndpointAttributes = "GetEndpointAttributes" + +// GetEndpointAttributesRequest generates a "aws/request.Request" representing the +// client's request for the GetEndpointAttributes operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetEndpointAttributes method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetEndpointAttributesRequest method. +// req, resp := client.GetEndpointAttributesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *SNS) GetEndpointAttributesRequest(input *GetEndpointAttributesInput) (req *request.Request, output *GetEndpointAttributesOutput) { + op := &request.Operation{ + Name: opGetEndpointAttributes, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetEndpointAttributesInput{} + } + + req = c.newRequest(op, input, output) + output = &GetEndpointAttributesOutput{} + req.Data = output + return +} + +// Retrieves the endpoint attributes for a device on one of the supported push +// notification services, such as GCM and APNS. For more information, see Using +// Amazon SNS Mobile Push Notifications (http://docs.aws.amazon.com/sns/latest/dg/SNSMobilePush.html). +func (c *SNS) GetEndpointAttributes(input *GetEndpointAttributesInput) (*GetEndpointAttributesOutput, error) { + req, out := c.GetEndpointAttributesRequest(input) + err := req.Send() + return out, err +} + +const opGetPlatformApplicationAttributes = "GetPlatformApplicationAttributes" + +// GetPlatformApplicationAttributesRequest generates a "aws/request.Request" representing the +// client's request for the GetPlatformApplicationAttributes operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetPlatformApplicationAttributes method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetPlatformApplicationAttributesRequest method. +// req, resp := client.GetPlatformApplicationAttributesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *SNS) GetPlatformApplicationAttributesRequest(input *GetPlatformApplicationAttributesInput) (req *request.Request, output *GetPlatformApplicationAttributesOutput) { + op := &request.Operation{ + Name: opGetPlatformApplicationAttributes, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetPlatformApplicationAttributesInput{} + } + + req = c.newRequest(op, input, output) + output = &GetPlatformApplicationAttributesOutput{} + req.Data = output + return +} + +// Retrieves the attributes of the platform application object for the supported +// push notification services, such as APNS and GCM. For more information, see +// Using Amazon SNS Mobile Push Notifications (http://docs.aws.amazon.com/sns/latest/dg/SNSMobilePush.html). +func (c *SNS) GetPlatformApplicationAttributes(input *GetPlatformApplicationAttributesInput) (*GetPlatformApplicationAttributesOutput, error) { + req, out := c.GetPlatformApplicationAttributesRequest(input) + err := req.Send() + return out, err +} + +const opGetSMSAttributes = "GetSMSAttributes" + +// GetSMSAttributesRequest generates a "aws/request.Request" representing the +// client's request for the GetSMSAttributes operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetSMSAttributes method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetSMSAttributesRequest method. +// req, resp := client.GetSMSAttributesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *SNS) GetSMSAttributesRequest(input *GetSMSAttributesInput) (req *request.Request, output *GetSMSAttributesOutput) { + op := &request.Operation{ + Name: opGetSMSAttributes, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetSMSAttributesInput{} + } + + req = c.newRequest(op, input, output) + output = &GetSMSAttributesOutput{} + req.Data = output + return +} + +// Returns the settings for sending SMS messages from your account. +// +// These settings are set with the SetSMSAttributes action. +func (c *SNS) GetSMSAttributes(input *GetSMSAttributesInput) (*GetSMSAttributesOutput, error) { + req, out := c.GetSMSAttributesRequest(input) + err := req.Send() + return out, err +} + +const opGetSubscriptionAttributes = "GetSubscriptionAttributes" + +// GetSubscriptionAttributesRequest generates a "aws/request.Request" representing the +// client's request for the GetSubscriptionAttributes operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetSubscriptionAttributes method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetSubscriptionAttributesRequest method. +// req, resp := client.GetSubscriptionAttributesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *SNS) GetSubscriptionAttributesRequest(input *GetSubscriptionAttributesInput) (req *request.Request, output *GetSubscriptionAttributesOutput) { + op := &request.Operation{ + Name: opGetSubscriptionAttributes, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetSubscriptionAttributesInput{} + } + + req = c.newRequest(op, input, output) + output = &GetSubscriptionAttributesOutput{} + req.Data = output + return +} + +// Returns all of the properties of a subscription. +func (c *SNS) GetSubscriptionAttributes(input *GetSubscriptionAttributesInput) (*GetSubscriptionAttributesOutput, error) { + req, out := c.GetSubscriptionAttributesRequest(input) + err := req.Send() + return out, err +} + +const opGetTopicAttributes = "GetTopicAttributes" + +// GetTopicAttributesRequest generates a "aws/request.Request" representing the +// client's request for the GetTopicAttributes operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetTopicAttributes method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetTopicAttributesRequest method. +// req, resp := client.GetTopicAttributesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *SNS) GetTopicAttributesRequest(input *GetTopicAttributesInput) (req *request.Request, output *GetTopicAttributesOutput) { + op := &request.Operation{ + Name: opGetTopicAttributes, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetTopicAttributesInput{} + } + + req = c.newRequest(op, input, output) + output = &GetTopicAttributesOutput{} + req.Data = output + return +} + +// Returns all of the properties of a topic. Topic properties returned might +// differ based on the authorization of the user. +func (c *SNS) GetTopicAttributes(input *GetTopicAttributesInput) (*GetTopicAttributesOutput, error) { + req, out := c.GetTopicAttributesRequest(input) + err := req.Send() + return out, err +} + +const opListEndpointsByPlatformApplication = "ListEndpointsByPlatformApplication" + +// ListEndpointsByPlatformApplicationRequest generates a "aws/request.Request" representing the +// client's request for the ListEndpointsByPlatformApplication operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListEndpointsByPlatformApplication method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListEndpointsByPlatformApplicationRequest method. +// req, resp := client.ListEndpointsByPlatformApplicationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *SNS) ListEndpointsByPlatformApplicationRequest(input *ListEndpointsByPlatformApplicationInput) (req *request.Request, output *ListEndpointsByPlatformApplicationOutput) { + op := &request.Operation{ + Name: opListEndpointsByPlatformApplication, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListEndpointsByPlatformApplicationInput{} + } + + req = c.newRequest(op, input, output) + output = &ListEndpointsByPlatformApplicationOutput{} + req.Data = output + return +} + +// Lists the endpoints and endpoint attributes for devices in a supported push +// notification service, such as GCM and APNS. The results for ListEndpointsByPlatformApplication +// are paginated and return a limited list of endpoints, up to 100. If additional +// records are available after the first page results, then a NextToken string +// will be returned. To receive the next page, you call ListEndpointsByPlatformApplication +// again using the NextToken string received from the previous call. When there +// are no more records to return, NextToken will be null. For more information, +// see Using Amazon SNS Mobile Push Notifications (http://docs.aws.amazon.com/sns/latest/dg/SNSMobilePush.html). +func (c *SNS) ListEndpointsByPlatformApplication(input *ListEndpointsByPlatformApplicationInput) (*ListEndpointsByPlatformApplicationOutput, error) { + req, out := c.ListEndpointsByPlatformApplicationRequest(input) + err := req.Send() + return out, err +} + +// ListEndpointsByPlatformApplicationPages iterates over the pages of a ListEndpointsByPlatformApplication operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListEndpointsByPlatformApplication method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListEndpointsByPlatformApplication operation. +// pageNum := 0 +// err := client.ListEndpointsByPlatformApplicationPages(params, +// func(page *ListEndpointsByPlatformApplicationOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *SNS) ListEndpointsByPlatformApplicationPages(input *ListEndpointsByPlatformApplicationInput, fn func(p *ListEndpointsByPlatformApplicationOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListEndpointsByPlatformApplicationRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListEndpointsByPlatformApplicationOutput), lastPage) + }) +} + +const opListPhoneNumbersOptedOut = "ListPhoneNumbersOptedOut" + +// ListPhoneNumbersOptedOutRequest generates a "aws/request.Request" representing the +// client's request for the ListPhoneNumbersOptedOut operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListPhoneNumbersOptedOut method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListPhoneNumbersOptedOutRequest method. +// req, resp := client.ListPhoneNumbersOptedOutRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *SNS) ListPhoneNumbersOptedOutRequest(input *ListPhoneNumbersOptedOutInput) (req *request.Request, output *ListPhoneNumbersOptedOutOutput) { + op := &request.Operation{ + Name: opListPhoneNumbersOptedOut, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListPhoneNumbersOptedOutInput{} + } + + req = c.newRequest(op, input, output) + output = &ListPhoneNumbersOptedOutOutput{} + req.Data = output + return +} + +// Returns a list of phone numbers that are opted out, meaning you cannot send +// SMS messages to them. +// +// The results for ListPhoneNumbersOptedOut are paginated, and each page returns +// up to 100 phone numbers. If additional phone numbers are available after +// the first page of results, then a NextToken string will be returned. To receive +// the next page, you call ListPhoneNumbersOptedOut again using the NextToken +// string received from the previous call. When there are no more records to +// return, NextToken will be null. +func (c *SNS) ListPhoneNumbersOptedOut(input *ListPhoneNumbersOptedOutInput) (*ListPhoneNumbersOptedOutOutput, error) { + req, out := c.ListPhoneNumbersOptedOutRequest(input) + err := req.Send() + return out, err +} + +const opListPlatformApplications = "ListPlatformApplications" + +// ListPlatformApplicationsRequest generates a "aws/request.Request" representing the +// client's request for the ListPlatformApplications operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListPlatformApplications method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListPlatformApplicationsRequest method. +// req, resp := client.ListPlatformApplicationsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *SNS) ListPlatformApplicationsRequest(input *ListPlatformApplicationsInput) (req *request.Request, output *ListPlatformApplicationsOutput) { + op := &request.Operation{ + Name: opListPlatformApplications, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListPlatformApplicationsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListPlatformApplicationsOutput{} + req.Data = output + return +} + +// Lists the platform application objects for the supported push notification +// services, such as APNS and GCM. The results for ListPlatformApplications +// are paginated and return a limited list of applications, up to 100. If additional +// records are available after the first page results, then a NextToken string +// will be returned. To receive the next page, you call ListPlatformApplications +// using the NextToken string received from the previous call. When there are +// no more records to return, NextToken will be null. For more information, +// see Using Amazon SNS Mobile Push Notifications (http://docs.aws.amazon.com/sns/latest/dg/SNSMobilePush.html). +func (c *SNS) ListPlatformApplications(input *ListPlatformApplicationsInput) (*ListPlatformApplicationsOutput, error) { + req, out := c.ListPlatformApplicationsRequest(input) + err := req.Send() + return out, err +} + +// ListPlatformApplicationsPages iterates over the pages of a ListPlatformApplications operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListPlatformApplications method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListPlatformApplications operation. +// pageNum := 0 +// err := client.ListPlatformApplicationsPages(params, +// func(page *ListPlatformApplicationsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *SNS) ListPlatformApplicationsPages(input *ListPlatformApplicationsInput, fn func(p *ListPlatformApplicationsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListPlatformApplicationsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListPlatformApplicationsOutput), lastPage) + }) +} + +const opListSubscriptions = "ListSubscriptions" + +// ListSubscriptionsRequest generates a "aws/request.Request" representing the +// client's request for the ListSubscriptions operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListSubscriptions method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListSubscriptionsRequest method. +// req, resp := client.ListSubscriptionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *SNS) ListSubscriptionsRequest(input *ListSubscriptionsInput) (req *request.Request, output *ListSubscriptionsOutput) { + op := &request.Operation{ + Name: opListSubscriptions, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListSubscriptionsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListSubscriptionsOutput{} + req.Data = output + return +} + +// Returns a list of the requester's subscriptions. Each call returns a limited +// list of subscriptions, up to 100. If there are more subscriptions, a NextToken +// is also returned. Use the NextToken parameter in a new ListSubscriptions +// call to get further results. +func (c *SNS) ListSubscriptions(input *ListSubscriptionsInput) (*ListSubscriptionsOutput, error) { + req, out := c.ListSubscriptionsRequest(input) + err := req.Send() + return out, err +} + +// ListSubscriptionsPages iterates over the pages of a ListSubscriptions operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListSubscriptions method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListSubscriptions operation. +// pageNum := 0 +// err := client.ListSubscriptionsPages(params, +// func(page *ListSubscriptionsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *SNS) ListSubscriptionsPages(input *ListSubscriptionsInput, fn func(p *ListSubscriptionsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListSubscriptionsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListSubscriptionsOutput), lastPage) + }) +} + +const opListSubscriptionsByTopic = "ListSubscriptionsByTopic" + +// ListSubscriptionsByTopicRequest generates a "aws/request.Request" representing the +// client's request for the ListSubscriptionsByTopic operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListSubscriptionsByTopic method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListSubscriptionsByTopicRequest method. +// req, resp := client.ListSubscriptionsByTopicRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *SNS) ListSubscriptionsByTopicRequest(input *ListSubscriptionsByTopicInput) (req *request.Request, output *ListSubscriptionsByTopicOutput) { + op := &request.Operation{ + Name: opListSubscriptionsByTopic, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListSubscriptionsByTopicInput{} + } + + req = c.newRequest(op, input, output) + output = &ListSubscriptionsByTopicOutput{} + req.Data = output + return +} + +// Returns a list of the subscriptions to a specific topic. Each call returns +// a limited list of subscriptions, up to 100. If there are more subscriptions, +// a NextToken is also returned. Use the NextToken parameter in a new ListSubscriptionsByTopic +// call to get further results. +func (c *SNS) ListSubscriptionsByTopic(input *ListSubscriptionsByTopicInput) (*ListSubscriptionsByTopicOutput, error) { + req, out := c.ListSubscriptionsByTopicRequest(input) + err := req.Send() + return out, err +} + +// ListSubscriptionsByTopicPages iterates over the pages of a ListSubscriptionsByTopic operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListSubscriptionsByTopic method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListSubscriptionsByTopic operation. +// pageNum := 0 +// err := client.ListSubscriptionsByTopicPages(params, +// func(page *ListSubscriptionsByTopicOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *SNS) ListSubscriptionsByTopicPages(input *ListSubscriptionsByTopicInput, fn func(p *ListSubscriptionsByTopicOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListSubscriptionsByTopicRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListSubscriptionsByTopicOutput), lastPage) + }) +} + +const opListTopics = "ListTopics" + +// ListTopicsRequest generates a "aws/request.Request" representing the +// client's request for the ListTopics operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListTopics method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListTopicsRequest method. +// req, resp := client.ListTopicsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *SNS) ListTopicsRequest(input *ListTopicsInput) (req *request.Request, output *ListTopicsOutput) { + op := &request.Operation{ + Name: opListTopics, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListTopicsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListTopicsOutput{} + req.Data = output + return +} + +// Returns a list of the requester's topics. Each call returns a limited list +// of topics, up to 100. If there are more topics, a NextToken is also returned. +// Use the NextToken parameter in a new ListTopics call to get further results. +func (c *SNS) ListTopics(input *ListTopicsInput) (*ListTopicsOutput, error) { + req, out := c.ListTopicsRequest(input) + err := req.Send() + return out, err +} + +// ListTopicsPages iterates over the pages of a ListTopics operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListTopics method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListTopics operation. +// pageNum := 0 +// err := client.ListTopicsPages(params, +// func(page *ListTopicsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *SNS) ListTopicsPages(input *ListTopicsInput, fn func(p *ListTopicsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListTopicsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListTopicsOutput), lastPage) + }) +} + +const opOptInPhoneNumber = "OptInPhoneNumber" + +// OptInPhoneNumberRequest generates a "aws/request.Request" representing the +// client's request for the OptInPhoneNumber operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the OptInPhoneNumber method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the OptInPhoneNumberRequest method. +// req, resp := client.OptInPhoneNumberRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *SNS) OptInPhoneNumberRequest(input *OptInPhoneNumberInput) (req *request.Request, output *OptInPhoneNumberOutput) { + op := &request.Operation{ + Name: opOptInPhoneNumber, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &OptInPhoneNumberInput{} + } + + req = c.newRequest(op, input, output) + output = &OptInPhoneNumberOutput{} + req.Data = output + return +} + +// Use this request to opt in a phone number that is opted out, which enables +// you to resume sending SMS messages to the number. +// +// You can opt in a phone number only once every 30 days. +func (c *SNS) OptInPhoneNumber(input *OptInPhoneNumberInput) (*OptInPhoneNumberOutput, error) { + req, out := c.OptInPhoneNumberRequest(input) + err := req.Send() + return out, err +} + +const opPublish = "Publish" + +// PublishRequest generates a "aws/request.Request" representing the +// client's request for the Publish operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the Publish method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PublishRequest method. +// req, resp := client.PublishRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *SNS) PublishRequest(input *PublishInput) (req *request.Request, output *PublishOutput) { + op := &request.Operation{ + Name: opPublish, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PublishInput{} + } + + req = c.newRequest(op, input, output) + output = &PublishOutput{} + req.Data = output + return +} + +// Sends a message to all of a topic's subscribed endpoints. When a messageId +// is returned, the message has been saved and Amazon SNS will attempt to deliver +// it to the topic's subscribers shortly. The format of the outgoing message +// to each subscribed endpoint depends on the notification protocol. +// +// To use the Publish action for sending a message to a mobile endpoint, such +// as an app on a Kindle device or mobile phone, you must specify the EndpointArn +// for the TargetArn parameter. The EndpointArn is returned when making a call +// with the CreatePlatformEndpoint action. The second example below shows a +// request and response for publishing to a mobile endpoint. +// +// For more information about formatting messages, see Send Custom Platform-Specific +// Payloads in Messages to Mobile Devices (http://docs.aws.amazon.com/sns/latest/dg/mobile-push-send-custommessage.html). +func (c *SNS) Publish(input *PublishInput) (*PublishOutput, error) { + req, out := c.PublishRequest(input) + err := req.Send() + return out, err +} + +const opRemovePermission = "RemovePermission" + +// RemovePermissionRequest generates a "aws/request.Request" representing the +// client's request for the RemovePermission operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the RemovePermission method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the RemovePermissionRequest method. +// req, resp := client.RemovePermissionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *SNS) RemovePermissionRequest(input *RemovePermissionInput) (req *request.Request, output *RemovePermissionOutput) { + op := &request.Operation{ + Name: opRemovePermission, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RemovePermissionInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &RemovePermissionOutput{} + req.Data = output + return +} + +// Removes a statement from a topic's access control policy. +func (c *SNS) RemovePermission(input *RemovePermissionInput) (*RemovePermissionOutput, error) { + req, out := c.RemovePermissionRequest(input) + err := req.Send() + return out, err +} + +const opSetEndpointAttributes = "SetEndpointAttributes" + +// SetEndpointAttributesRequest generates a "aws/request.Request" representing the +// client's request for the SetEndpointAttributes operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the SetEndpointAttributes method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the SetEndpointAttributesRequest method. +// req, resp := client.SetEndpointAttributesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *SNS) SetEndpointAttributesRequest(input *SetEndpointAttributesInput) (req *request.Request, output *SetEndpointAttributesOutput) { + op := &request.Operation{ + Name: opSetEndpointAttributes, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SetEndpointAttributesInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &SetEndpointAttributesOutput{} + req.Data = output + return +} + +// Sets the attributes for an endpoint for a device on one of the supported +// push notification services, such as GCM and APNS. For more information, see +// Using Amazon SNS Mobile Push Notifications (http://docs.aws.amazon.com/sns/latest/dg/SNSMobilePush.html). +func (c *SNS) SetEndpointAttributes(input *SetEndpointAttributesInput) (*SetEndpointAttributesOutput, error) { + req, out := c.SetEndpointAttributesRequest(input) + err := req.Send() + return out, err +} + +const opSetPlatformApplicationAttributes = "SetPlatformApplicationAttributes" + +// SetPlatformApplicationAttributesRequest generates a "aws/request.Request" representing the +// client's request for the SetPlatformApplicationAttributes operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the SetPlatformApplicationAttributes method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the SetPlatformApplicationAttributesRequest method. +// req, resp := client.SetPlatformApplicationAttributesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *SNS) SetPlatformApplicationAttributesRequest(input *SetPlatformApplicationAttributesInput) (req *request.Request, output *SetPlatformApplicationAttributesOutput) { + op := &request.Operation{ + Name: opSetPlatformApplicationAttributes, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SetPlatformApplicationAttributesInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &SetPlatformApplicationAttributesOutput{} + req.Data = output + return +} + +// Sets the attributes of the platform application object for the supported +// push notification services, such as APNS and GCM. For more information, see +// Using Amazon SNS Mobile Push Notifications (http://docs.aws.amazon.com/sns/latest/dg/SNSMobilePush.html). +// For information on configuring attributes for message delivery status, see +// Using Amazon SNS Application Attributes for Message Delivery Status (http://docs.aws.amazon.com/sns/latest/dg/sns-msg-status.html). +func (c *SNS) SetPlatformApplicationAttributes(input *SetPlatformApplicationAttributesInput) (*SetPlatformApplicationAttributesOutput, error) { + req, out := c.SetPlatformApplicationAttributesRequest(input) + err := req.Send() + return out, err +} + +const opSetSMSAttributes = "SetSMSAttributes" + +// SetSMSAttributesRequest generates a "aws/request.Request" representing the +// client's request for the SetSMSAttributes operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the SetSMSAttributes method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the SetSMSAttributesRequest method. +// req, resp := client.SetSMSAttributesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *SNS) SetSMSAttributesRequest(input *SetSMSAttributesInput) (req *request.Request, output *SetSMSAttributesOutput) { + op := &request.Operation{ + Name: opSetSMSAttributes, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SetSMSAttributesInput{} + } + + req = c.newRequest(op, input, output) + output = &SetSMSAttributesOutput{} + req.Data = output + return +} + +// Use this request to set the default settings for sending SMS messages and +// receiving daily SMS usage reports. +// +// You can override some of these settings for a single message when you use +// the Publish action with the MessageAttributes.entry.N parameter. For more +// information, see Sending an SMS Message (http://docs.aws.amazon.com/sns/latest/dg/sms_publish-to-phone.html) +// in the Amazon SNS Developer Guide. +func (c *SNS) SetSMSAttributes(input *SetSMSAttributesInput) (*SetSMSAttributesOutput, error) { + req, out := c.SetSMSAttributesRequest(input) + err := req.Send() + return out, err +} + +const opSetSubscriptionAttributes = "SetSubscriptionAttributes" + +// SetSubscriptionAttributesRequest generates a "aws/request.Request" representing the +// client's request for the SetSubscriptionAttributes operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the SetSubscriptionAttributes method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the SetSubscriptionAttributesRequest method. +// req, resp := client.SetSubscriptionAttributesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *SNS) SetSubscriptionAttributesRequest(input *SetSubscriptionAttributesInput) (req *request.Request, output *SetSubscriptionAttributesOutput) { + op := &request.Operation{ + Name: opSetSubscriptionAttributes, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SetSubscriptionAttributesInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &SetSubscriptionAttributesOutput{} + req.Data = output + return +} + +// Allows a subscription owner to set an attribute of the topic to a new value. +func (c *SNS) SetSubscriptionAttributes(input *SetSubscriptionAttributesInput) (*SetSubscriptionAttributesOutput, error) { + req, out := c.SetSubscriptionAttributesRequest(input) + err := req.Send() + return out, err +} + +const opSetTopicAttributes = "SetTopicAttributes" + +// SetTopicAttributesRequest generates a "aws/request.Request" representing the +// client's request for the SetTopicAttributes operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the SetTopicAttributes method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the SetTopicAttributesRequest method. +// req, resp := client.SetTopicAttributesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *SNS) SetTopicAttributesRequest(input *SetTopicAttributesInput) (req *request.Request, output *SetTopicAttributesOutput) { + op := &request.Operation{ + Name: opSetTopicAttributes, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SetTopicAttributesInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &SetTopicAttributesOutput{} + req.Data = output + return +} + +// Allows a topic owner to set an attribute of the topic to a new value. +func (c *SNS) SetTopicAttributes(input *SetTopicAttributesInput) (*SetTopicAttributesOutput, error) { + req, out := c.SetTopicAttributesRequest(input) + err := req.Send() + return out, err +} + +const opSubscribe = "Subscribe" + +// SubscribeRequest generates a "aws/request.Request" representing the +// client's request for the Subscribe operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the Subscribe method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the SubscribeRequest method. +// req, resp := client.SubscribeRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *SNS) SubscribeRequest(input *SubscribeInput) (req *request.Request, output *SubscribeOutput) { + op := &request.Operation{ + Name: opSubscribe, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SubscribeInput{} + } + + req = c.newRequest(op, input, output) + output = &SubscribeOutput{} + req.Data = output + return +} + +// Prepares to subscribe an endpoint by sending the endpoint a confirmation +// message. To actually create a subscription, the endpoint owner must call +// the ConfirmSubscription action with the token from the confirmation message. +// Confirmation tokens are valid for three days. +func (c *SNS) Subscribe(input *SubscribeInput) (*SubscribeOutput, error) { + req, out := c.SubscribeRequest(input) + err := req.Send() + return out, err +} + +const opUnsubscribe = "Unsubscribe" + +// UnsubscribeRequest generates a "aws/request.Request" representing the +// client's request for the Unsubscribe operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the Unsubscribe method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UnsubscribeRequest method. +// req, resp := client.UnsubscribeRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *SNS) UnsubscribeRequest(input *UnsubscribeInput) (req *request.Request, output *UnsubscribeOutput) { + op := &request.Operation{ + Name: opUnsubscribe, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UnsubscribeInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &UnsubscribeOutput{} + req.Data = output + return +} + +// Deletes a subscription. If the subscription requires authentication for deletion, +// only the owner of the subscription or the topic's owner can unsubscribe, +// and an AWS signature is required. If the Unsubscribe call does not require +// authentication and the requester is not the subscription owner, a final cancellation +// message is delivered to the endpoint, so that the endpoint owner can easily +// resubscribe to the topic if the Unsubscribe request was unintended. +func (c *SNS) Unsubscribe(input *UnsubscribeInput) (*UnsubscribeOutput, error) { + req, out := c.UnsubscribeRequest(input) + err := req.Send() + return out, err +} + +type AddPermissionInput struct { + _ struct{} `type:"structure"` + + // The AWS account IDs of the users (principals) who will be given access to + // the specified actions. The users must have AWS accounts, but do not need + // to be signed up for this service. + AWSAccountId []*string `type:"list" required:"true"` + + // The action you want to allow for the specified principal(s). + // + // Valid values: any Amazon SNS action name. + ActionName []*string `type:"list" required:"true"` + + // A unique identifier for the new policy statement. + Label *string `type:"string" required:"true"` + + // The ARN of the topic whose access control policy you wish to modify. + TopicArn *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s AddPermissionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddPermissionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AddPermissionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AddPermissionInput"} + if s.AWSAccountId == nil { + invalidParams.Add(request.NewErrParamRequired("AWSAccountId")) + } + if s.ActionName == nil { + invalidParams.Add(request.NewErrParamRequired("ActionName")) + } + if s.Label == nil { + invalidParams.Add(request.NewErrParamRequired("Label")) + } + if s.TopicArn == nil { + invalidParams.Add(request.NewErrParamRequired("TopicArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type AddPermissionOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s AddPermissionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddPermissionOutput) GoString() string { + return s.String() +} + +// The input for the CheckIfPhoneNumberIsOptedOut action. +type CheckIfPhoneNumberIsOptedOutInput struct { + _ struct{} `type:"structure"` + + // The phone number for which you want to check the opt out status. + PhoneNumber *string `locationName:"phoneNumber" type:"string" required:"true"` +} + +// String returns the string representation +func (s CheckIfPhoneNumberIsOptedOutInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CheckIfPhoneNumberIsOptedOutInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CheckIfPhoneNumberIsOptedOutInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CheckIfPhoneNumberIsOptedOutInput"} + if s.PhoneNumber == nil { + invalidParams.Add(request.NewErrParamRequired("PhoneNumber")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The response from the CheckIfPhoneNumberIsOptedOut action. +type CheckIfPhoneNumberIsOptedOutOutput struct { + _ struct{} `type:"structure"` + + // Indicates whether the phone number is opted out: + // + // true – The phone number is opted out, meaning you cannot publish SMS messages + // to it. + // + // false – The phone number is opted in, meaning you can publish SMS messages + // to it. + IsOptedOut *bool `locationName:"isOptedOut" type:"boolean"` +} + +// String returns the string representation +func (s CheckIfPhoneNumberIsOptedOutOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CheckIfPhoneNumberIsOptedOutOutput) GoString() string { + return s.String() +} + +// Input for ConfirmSubscription action. +type ConfirmSubscriptionInput struct { + _ struct{} `type:"structure"` + + // Disallows unauthenticated unsubscribes of the subscription. If the value + // of this parameter is true and the request has an AWS signature, then only + // the topic owner and the subscription owner can unsubscribe the endpoint. + // The unsubscribe action requires AWS authentication. + AuthenticateOnUnsubscribe *string `type:"string"` + + // Short-lived token sent to an endpoint during the Subscribe action. + Token *string `type:"string" required:"true"` + + // The ARN of the topic for which you wish to confirm a subscription. + TopicArn *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ConfirmSubscriptionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ConfirmSubscriptionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ConfirmSubscriptionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ConfirmSubscriptionInput"} + if s.Token == nil { + invalidParams.Add(request.NewErrParamRequired("Token")) + } + if s.TopicArn == nil { + invalidParams.Add(request.NewErrParamRequired("TopicArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Response for ConfirmSubscriptions action. +type ConfirmSubscriptionOutput struct { + _ struct{} `type:"structure"` + + // The ARN of the created subscription. + SubscriptionArn *string `type:"string"` +} + +// String returns the string representation +func (s ConfirmSubscriptionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ConfirmSubscriptionOutput) GoString() string { + return s.String() +} + +// Input for CreatePlatformApplication action. +type CreatePlatformApplicationInput struct { + _ struct{} `type:"structure"` + + // For a list of attributes, see SetPlatformApplicationAttributes (http://docs.aws.amazon.com/sns/latest/api/API_SetPlatformApplicationAttributes.html) + Attributes map[string]*string `type:"map" required:"true"` + + // Application names must be made up of only uppercase and lowercase ASCII letters, + // numbers, underscores, hyphens, and periods, and must be between 1 and 256 + // characters long. + Name *string `type:"string" required:"true"` + + // The following platforms are supported: ADM (Amazon Device Messaging), APNS + // (Apple Push Notification Service), APNS_SANDBOX, and GCM (Google Cloud Messaging). + Platform *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CreatePlatformApplicationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreatePlatformApplicationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreatePlatformApplicationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreatePlatformApplicationInput"} + if s.Attributes == nil { + invalidParams.Add(request.NewErrParamRequired("Attributes")) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Platform == nil { + invalidParams.Add(request.NewErrParamRequired("Platform")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Response from CreatePlatformApplication action. +type CreatePlatformApplicationOutput struct { + _ struct{} `type:"structure"` + + // PlatformApplicationArn is returned. + PlatformApplicationArn *string `type:"string"` +} + +// String returns the string representation +func (s CreatePlatformApplicationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreatePlatformApplicationOutput) GoString() string { + return s.String() +} + +// Input for CreatePlatformEndpoint action. +type CreatePlatformEndpointInput struct { + _ struct{} `type:"structure"` + + // For a list of attributes, see SetEndpointAttributes (http://docs.aws.amazon.com/sns/latest/api/API_SetEndpointAttributes.html). + Attributes map[string]*string `type:"map"` + + // Arbitrary user data to associate with the endpoint. Amazon SNS does not use + // this data. The data must be in UTF-8 format and less than 2KB. + CustomUserData *string `type:"string"` + + // PlatformApplicationArn returned from CreatePlatformApplication is used to + // create a an endpoint. + PlatformApplicationArn *string `type:"string" required:"true"` + + // Unique identifier created by the notification service for an app on a device. + // The specific name for Token will vary, depending on which notification service + // is being used. For example, when using APNS as the notification service, + // you need the device token. Alternatively, when using GCM or ADM, the device + // token equivalent is called the registration ID. + Token *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CreatePlatformEndpointInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreatePlatformEndpointInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreatePlatformEndpointInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreatePlatformEndpointInput"} + if s.PlatformApplicationArn == nil { + invalidParams.Add(request.NewErrParamRequired("PlatformApplicationArn")) + } + if s.Token == nil { + invalidParams.Add(request.NewErrParamRequired("Token")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Response from CreateEndpoint action. +type CreatePlatformEndpointOutput struct { + _ struct{} `type:"structure"` + + // EndpointArn returned from CreateEndpoint action. + EndpointArn *string `type:"string"` +} + +// String returns the string representation +func (s CreatePlatformEndpointOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreatePlatformEndpointOutput) GoString() string { + return s.String() +} + +// Input for CreateTopic action. +type CreateTopicInput struct { + _ struct{} `type:"structure"` + + // The name of the topic you want to create. + // + // Constraints: Topic names must be made up of only uppercase and lowercase + // ASCII letters, numbers, underscores, and hyphens, and must be between 1 and + // 256 characters long. + Name *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateTopicInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateTopicInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateTopicInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateTopicInput"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Response from CreateTopic action. +type CreateTopicOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) assigned to the created topic. + TopicArn *string `type:"string"` +} + +// String returns the string representation +func (s CreateTopicOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateTopicOutput) GoString() string { + return s.String() +} + +// Input for DeleteEndpoint action. +type DeleteEndpointInput struct { + _ struct{} `type:"structure"` + + // EndpointArn of endpoint to delete. + EndpointArn *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteEndpointInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteEndpointInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteEndpointInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteEndpointInput"} + if s.EndpointArn == nil { + invalidParams.Add(request.NewErrParamRequired("EndpointArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteEndpointOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteEndpointOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteEndpointOutput) GoString() string { + return s.String() +} + +// Input for DeletePlatformApplication action. +type DeletePlatformApplicationInput struct { + _ struct{} `type:"structure"` + + // PlatformApplicationArn of platform application object to delete. + PlatformApplicationArn *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeletePlatformApplicationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeletePlatformApplicationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeletePlatformApplicationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeletePlatformApplicationInput"} + if s.PlatformApplicationArn == nil { + invalidParams.Add(request.NewErrParamRequired("PlatformApplicationArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeletePlatformApplicationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeletePlatformApplicationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeletePlatformApplicationOutput) GoString() string { + return s.String() +} + +type DeleteTopicInput struct { + _ struct{} `type:"structure"` + + // The ARN of the topic you want to delete. + TopicArn *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteTopicInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteTopicInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteTopicInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteTopicInput"} + if s.TopicArn == nil { + invalidParams.Add(request.NewErrParamRequired("TopicArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteTopicOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteTopicOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteTopicOutput) GoString() string { + return s.String() +} + +// Endpoint for mobile app and device. +type Endpoint struct { + _ struct{} `type:"structure"` + + // Attributes for endpoint. + Attributes map[string]*string `type:"map"` + + // EndpointArn for mobile app and device. + EndpointArn *string `type:"string"` +} + +// String returns the string representation +func (s Endpoint) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Endpoint) GoString() string { + return s.String() +} + +// Input for GetEndpointAttributes action. +type GetEndpointAttributesInput struct { + _ struct{} `type:"structure"` + + // EndpointArn for GetEndpointAttributes input. + EndpointArn *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s GetEndpointAttributesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetEndpointAttributesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetEndpointAttributesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetEndpointAttributesInput"} + if s.EndpointArn == nil { + invalidParams.Add(request.NewErrParamRequired("EndpointArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Response from GetEndpointAttributes of the EndpointArn. +type GetEndpointAttributesOutput struct { + _ struct{} `type:"structure"` + + // Attributes include the following: + // + // CustomUserData -- arbitrary user data to associate with the endpoint. Amazon + // SNS does not use this data. The data must be in UTF-8 format and less than + // 2KB. + // + // Enabled -- flag that enables/disables delivery to the endpoint. Amazon SNS + // will set this to false when a notification service indicates to Amazon SNS + // that the endpoint is invalid. Users can set it back to true, typically after + // updating Token. + // + // Token -- device token, also referred to as a registration id, for an app + // and mobile device. This is returned from the notification service when an + // app and mobile device are registered with the notification service. + Attributes map[string]*string `type:"map"` +} + +// String returns the string representation +func (s GetEndpointAttributesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetEndpointAttributesOutput) GoString() string { + return s.String() +} + +// Input for GetPlatformApplicationAttributes action. +type GetPlatformApplicationAttributesInput struct { + _ struct{} `type:"structure"` + + // PlatformApplicationArn for GetPlatformApplicationAttributesInput. + PlatformApplicationArn *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s GetPlatformApplicationAttributesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetPlatformApplicationAttributesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetPlatformApplicationAttributesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetPlatformApplicationAttributesInput"} + if s.PlatformApplicationArn == nil { + invalidParams.Add(request.NewErrParamRequired("PlatformApplicationArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Response for GetPlatformApplicationAttributes action. +type GetPlatformApplicationAttributesOutput struct { + _ struct{} `type:"structure"` + + // Attributes include the following: + // + // EventEndpointCreated -- Topic ARN to which EndpointCreated event notifications + // should be sent. + // + // EventEndpointDeleted -- Topic ARN to which EndpointDeleted event notifications + // should be sent. + // + // EventEndpointUpdated -- Topic ARN to which EndpointUpdate event notifications + // should be sent. + // + // EventDeliveryFailure -- Topic ARN to which DeliveryFailure event notifications + // should be sent upon Direct Publish delivery failure (permanent) to one of + // the application's endpoints. + Attributes map[string]*string `type:"map"` +} + +// String returns the string representation +func (s GetPlatformApplicationAttributesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetPlatformApplicationAttributesOutput) GoString() string { + return s.String() +} + +// The input for the GetSMSAttributes request. +type GetSMSAttributesInput struct { + _ struct{} `type:"structure"` + + // A list of the individual attribute names, such as MonthlySpendLimit, for + // which you want values. + // + // For all attribute names, see SetSMSAttributes (http://docs.aws.amazon.com/sns/latest/api/API_SetSMSAttributes.html). + // + // If you don't use this parameter, Amazon SNS returns all SMS attributes. + Attributes []*string `locationName:"attributes" type:"list"` +} + +// String returns the string representation +func (s GetSMSAttributesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetSMSAttributesInput) GoString() string { + return s.String() +} + +// The response from the GetSMSAttributes request. +type GetSMSAttributesOutput struct { + _ struct{} `type:"structure"` + + // The SMS attribute names and their values. + Attributes map[string]*string `locationName:"attributes" type:"map"` +} + +// String returns the string representation +func (s GetSMSAttributesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetSMSAttributesOutput) GoString() string { + return s.String() +} + +// Input for GetSubscriptionAttributes. +type GetSubscriptionAttributesInput struct { + _ struct{} `type:"structure"` + + // The ARN of the subscription whose properties you want to get. + SubscriptionArn *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s GetSubscriptionAttributesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetSubscriptionAttributesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetSubscriptionAttributesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetSubscriptionAttributesInput"} + if s.SubscriptionArn == nil { + invalidParams.Add(request.NewErrParamRequired("SubscriptionArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Response for GetSubscriptionAttributes action. +type GetSubscriptionAttributesOutput struct { + _ struct{} `type:"structure"` + + // A map of the subscription's attributes. Attributes in this map include the + // following: + // + // SubscriptionArn -- the subscription's ARN + // + // TopicArn -- the topic ARN that the subscription is associated with + // + // Owner -- the AWS account ID of the subscription's owner + // + // ConfirmationWasAuthenticated -- true if the subscription confirmation + // request was authenticated + // + // DeliveryPolicy -- the JSON serialization of the subscription's delivery + // policy + // + // EffectiveDeliveryPolicy -- the JSON serialization of the effective delivery + // policy that takes into account the topic delivery policy and account system + // defaults + Attributes map[string]*string `type:"map"` +} + +// String returns the string representation +func (s GetSubscriptionAttributesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetSubscriptionAttributesOutput) GoString() string { + return s.String() +} + +// Input for GetTopicAttributes action. +type GetTopicAttributesInput struct { + _ struct{} `type:"structure"` + + // The ARN of the topic whose properties you want to get. + TopicArn *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s GetTopicAttributesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetTopicAttributesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetTopicAttributesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetTopicAttributesInput"} + if s.TopicArn == nil { + invalidParams.Add(request.NewErrParamRequired("TopicArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Response for GetTopicAttributes action. +type GetTopicAttributesOutput struct { + _ struct{} `type:"structure"` + + // A map of the topic's attributes. Attributes in this map include the following: + // + // TopicArn -- the topic's ARN + // + // Owner -- the AWS account ID of the topic's owner + // + // Policy -- the JSON serialization of the topic's access control policy + // + // DisplayName -- the human-readable name used in the "From" field for notifications + // to email and email-json endpoints + // + // SubscriptionsPending -- the number of subscriptions pending confirmation + // on this topic + // + // SubscriptionsConfirmed -- the number of confirmed subscriptions on this + // topic + // + // SubscriptionsDeleted -- the number of deleted subscriptions on this topic + // + // DeliveryPolicy -- the JSON serialization of the topic's delivery policy + // + // EffectiveDeliveryPolicy -- the JSON serialization of the effective delivery + // policy that takes into account system defaults + Attributes map[string]*string `type:"map"` +} + +// String returns the string representation +func (s GetTopicAttributesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetTopicAttributesOutput) GoString() string { + return s.String() +} + +// Input for ListEndpointsByPlatformApplication action. +type ListEndpointsByPlatformApplicationInput struct { + _ struct{} `type:"structure"` + + // NextToken string is used when calling ListEndpointsByPlatformApplication + // action to retrieve additional records that are available after the first + // page results. + NextToken *string `type:"string"` + + // PlatformApplicationArn for ListEndpointsByPlatformApplicationInput action. + PlatformApplicationArn *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ListEndpointsByPlatformApplicationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListEndpointsByPlatformApplicationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListEndpointsByPlatformApplicationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListEndpointsByPlatformApplicationInput"} + if s.PlatformApplicationArn == nil { + invalidParams.Add(request.NewErrParamRequired("PlatformApplicationArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Response for ListEndpointsByPlatformApplication action. +type ListEndpointsByPlatformApplicationOutput struct { + _ struct{} `type:"structure"` + + // Endpoints returned for ListEndpointsByPlatformApplication action. + Endpoints []*Endpoint `type:"list"` + + // NextToken string is returned when calling ListEndpointsByPlatformApplication + // action if additional records are available after the first page results. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s ListEndpointsByPlatformApplicationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListEndpointsByPlatformApplicationOutput) GoString() string { + return s.String() +} + +// The input for the ListPhoneNumbersOptedOut action. +type ListPhoneNumbersOptedOutInput struct { + _ struct{} `type:"structure"` + + // A NextToken string is used when you call the ListPhoneNumbersOptedOut action + // to retrieve additional records that are available after the first page of + // results. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s ListPhoneNumbersOptedOutInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListPhoneNumbersOptedOutInput) GoString() string { + return s.String() +} + +// The response from the ListPhoneNumbersOptedOut action. +type ListPhoneNumbersOptedOutOutput struct { + _ struct{} `type:"structure"` + + // A NextToken string is returned when you call the ListPhoneNumbersOptedOut + // action if additional records are available after the first page of results. + NextToken *string `locationName:"nextToken" type:"string"` + + // A list of phone numbers that are opted out of receiving SMS messages. The + // list is paginated, and each page can contain up to 100 phone numbers. + PhoneNumbers []*string `locationName:"phoneNumbers" type:"list"` +} + +// String returns the string representation +func (s ListPhoneNumbersOptedOutOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListPhoneNumbersOptedOutOutput) GoString() string { + return s.String() +} + +// Input for ListPlatformApplications action. +type ListPlatformApplicationsInput struct { + _ struct{} `type:"structure"` + + // NextToken string is used when calling ListPlatformApplications action to + // retrieve additional records that are available after the first page results. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s ListPlatformApplicationsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListPlatformApplicationsInput) GoString() string { + return s.String() +} + +// Response for ListPlatformApplications action. +type ListPlatformApplicationsOutput struct { + _ struct{} `type:"structure"` + + // NextToken string is returned when calling ListPlatformApplications action + // if additional records are available after the first page results. + NextToken *string `type:"string"` + + // Platform applications returned when calling ListPlatformApplications action. + PlatformApplications []*PlatformApplication `type:"list"` +} + +// String returns the string representation +func (s ListPlatformApplicationsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListPlatformApplicationsOutput) GoString() string { + return s.String() +} + +// Input for ListSubscriptionsByTopic action. +type ListSubscriptionsByTopicInput struct { + _ struct{} `type:"structure"` + + // Token returned by the previous ListSubscriptionsByTopic request. + NextToken *string `type:"string"` + + // The ARN of the topic for which you wish to find subscriptions. + TopicArn *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ListSubscriptionsByTopicInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListSubscriptionsByTopicInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListSubscriptionsByTopicInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListSubscriptionsByTopicInput"} + if s.TopicArn == nil { + invalidParams.Add(request.NewErrParamRequired("TopicArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Response for ListSubscriptionsByTopic action. +type ListSubscriptionsByTopicOutput struct { + _ struct{} `type:"structure"` + + // Token to pass along to the next ListSubscriptionsByTopic request. This element + // is returned if there are more subscriptions to retrieve. + NextToken *string `type:"string"` + + // A list of subscriptions. + Subscriptions []*Subscription `type:"list"` +} + +// String returns the string representation +func (s ListSubscriptionsByTopicOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListSubscriptionsByTopicOutput) GoString() string { + return s.String() +} + +// Input for ListSubscriptions action. +type ListSubscriptionsInput struct { + _ struct{} `type:"structure"` + + // Token returned by the previous ListSubscriptions request. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s ListSubscriptionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListSubscriptionsInput) GoString() string { + return s.String() +} + +// Response for ListSubscriptions action +type ListSubscriptionsOutput struct { + _ struct{} `type:"structure"` + + // Token to pass along to the next ListSubscriptions request. This element is + // returned if there are more subscriptions to retrieve. + NextToken *string `type:"string"` + + // A list of subscriptions. + Subscriptions []*Subscription `type:"list"` +} + +// String returns the string representation +func (s ListSubscriptionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListSubscriptionsOutput) GoString() string { + return s.String() +} + +type ListTopicsInput struct { + _ struct{} `type:"structure"` + + // Token returned by the previous ListTopics request. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s ListTopicsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTopicsInput) GoString() string { + return s.String() +} + +// Response for ListTopics action. +type ListTopicsOutput struct { + _ struct{} `type:"structure"` + + // Token to pass along to the next ListTopics request. This element is returned + // if there are additional topics to retrieve. + NextToken *string `type:"string"` + + // A list of topic ARNs. + Topics []*Topic `type:"list"` +} + +// String returns the string representation +func (s ListTopicsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTopicsOutput) GoString() string { + return s.String() +} + +// The user-specified message attribute value. For string data types, the value +// attribute has the same restrictions on the content as the message body. For +// more information, see Publish (http://docs.aws.amazon.com/sns/latest/api/API_Publish.html). +// +// Name, type, and value must not be empty or null. In addition, the message +// body should not be empty or null. All parts of the message attribute, including +// name, type, and value, are included in the message size restriction, which +// is currently 256 KB (262,144 bytes). For more information, see Using Amazon +// SNS Message Attributes (http://docs.aws.amazon.com/sns/latest/dg/SNSMessageAttributes.html). +type MessageAttributeValue struct { + _ struct{} `type:"structure"` + + // Binary type attributes can store any binary data, for example, compressed + // data, encrypted data, or images. + // + // BinaryValue is automatically base64 encoded/decoded by the SDK. + BinaryValue []byte `type:"blob"` + + // Amazon SNS supports the following logical data types: String, Number, and + // Binary. For more information, see Message Attribute Data Types (http://docs.aws.amazon.com/sns/latest/dg/SNSMessageAttributes.html#SNSMessageAttributes.DataTypes). + DataType *string `type:"string" required:"true"` + + // Strings are Unicode with UTF8 binary encoding. For a list of code values, + // see http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters (http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters). + StringValue *string `type:"string"` +} + +// String returns the string representation +func (s MessageAttributeValue) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MessageAttributeValue) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *MessageAttributeValue) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "MessageAttributeValue"} + if s.DataType == nil { + invalidParams.Add(request.NewErrParamRequired("DataType")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Input for the OptInPhoneNumber action. +type OptInPhoneNumberInput struct { + _ struct{} `type:"structure"` + + // The phone number to opt in. + PhoneNumber *string `locationName:"phoneNumber" type:"string" required:"true"` +} + +// String returns the string representation +func (s OptInPhoneNumberInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s OptInPhoneNumberInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *OptInPhoneNumberInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "OptInPhoneNumberInput"} + if s.PhoneNumber == nil { + invalidParams.Add(request.NewErrParamRequired("PhoneNumber")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The response for the OptInPhoneNumber action. +type OptInPhoneNumberOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s OptInPhoneNumberOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s OptInPhoneNumberOutput) GoString() string { + return s.String() +} + +// Platform application object. +type PlatformApplication struct { + _ struct{} `type:"structure"` + + // Attributes for platform application object. + Attributes map[string]*string `type:"map"` + + // PlatformApplicationArn for platform application object. + PlatformApplicationArn *string `type:"string"` +} + +// String returns the string representation +func (s PlatformApplication) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PlatformApplication) GoString() string { + return s.String() +} + +// Input for Publish action. +type PublishInput struct { + _ struct{} `type:"structure"` + + // The message you want to send to the topic. + // + // If you want to send the same message to all transport protocols, include + // the text of the message as a String value. + // + // If you want to send different messages for each transport protocol, set + // the value of the MessageStructure parameter to json and use a JSON object + // for the Message parameter. See the Examples section for the format of the + // JSON object. + // + // Constraints: Messages must be UTF-8 encoded strings at most 256 KB in size + // (262144 bytes, not 262144 characters). + // + // JSON-specific constraints: + // + // Keys in the JSON object that correspond to supported transport protocols + // must have simple JSON string values. + // + // The values will be parsed (unescaped) before they are used in outgoing + // messages. + // + // Outbound notifications are JSON encoded (meaning that the characters will + // be reescaped for sending). + // + // Values have a minimum length of 0 (the empty string, "", is allowed). + // + // Values have a maximum length bounded by the overall message size (so, + // including multiple protocols may limit message sizes). + // + // Non-string values will cause the key to be ignored. + // + // Keys that do not correspond to supported transport protocols are ignored. + // + // Duplicate keys are not allowed. + // + // Failure to parse or validate any key or value in the message will cause + // the Publish call to return an error (no partial delivery). + Message *string `type:"string" required:"true"` + + // Message attributes for Publish action. + MessageAttributes map[string]*MessageAttributeValue `locationNameKey:"Name" locationNameValue:"Value" type:"map"` + + // Set MessageStructure to json if you want to send a different message for + // each protocol. For example, using one publish action, you can send a short + // message to your SMS subscribers and a longer message to your email subscribers. + // If you set MessageStructure to json, the value of the Message parameter must: + // + // be a syntactically valid JSON object; and + // + // contain at least a top-level JSON key of "default" with a value that is + // a string. + // + // You can define other top-level keys that define the message you want + // to send to a specific transport protocol (e.g., "http"). + // + // For information about sending different messages for each protocol using + // the AWS Management Console, go to Create Different Messages for Each Protocol + // (http://docs.aws.amazon.com/sns/latest/gsg/Publish.html#sns-message-formatting-by-protocol) + // in the Amazon Simple Notification Service Getting Started Guide. + // + // Valid value: json + MessageStructure *string `type:"string"` + + // The phone number to which you want to deliver an SMS message. Use E.164 format. + // + // If you don't specify a value for the PhoneNumber parameter, you must specify + // a value for the TargetArn or TopicArn parameters. + PhoneNumber *string `type:"string"` + + // Optional parameter to be used as the "Subject" line when the message is delivered + // to email endpoints. This field will also be included, if present, in the + // standard JSON messages delivered to other endpoints. + // + // Constraints: Subjects must be ASCII text that begins with a letter, number, + // or punctuation mark; must not include line breaks or control characters; + // and must be less than 100 characters long. + Subject *string `type:"string"` + + // Either TopicArn or EndpointArn, but not both. + // + // If you don't specify a value for the TargetArn parameter, you must specify + // a value for the PhoneNumber or TopicArn parameters. + TargetArn *string `type:"string"` + + // The topic you want to publish to. + // + // If you don't specify a value for the TopicArn parameter, you must specify + // a value for the PhoneNumber or TargetArn parameters. + TopicArn *string `type:"string"` +} + +// String returns the string representation +func (s PublishInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PublishInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PublishInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PublishInput"} + if s.Message == nil { + invalidParams.Add(request.NewErrParamRequired("Message")) + } + if s.MessageAttributes != nil { + for i, v := range s.MessageAttributes { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "MessageAttributes", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Response for Publish action. +type PublishOutput struct { + _ struct{} `type:"structure"` + + // Unique identifier assigned to the published message. + // + // Length Constraint: Maximum 100 characters + MessageId *string `type:"string"` +} + +// String returns the string representation +func (s PublishOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PublishOutput) GoString() string { + return s.String() +} + +// Input for RemovePermission action. +type RemovePermissionInput struct { + _ struct{} `type:"structure"` + + // The unique label of the statement you want to remove. + Label *string `type:"string" required:"true"` + + // The ARN of the topic whose access control policy you wish to modify. + TopicArn *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s RemovePermissionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RemovePermissionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RemovePermissionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RemovePermissionInput"} + if s.Label == nil { + invalidParams.Add(request.NewErrParamRequired("Label")) + } + if s.TopicArn == nil { + invalidParams.Add(request.NewErrParamRequired("TopicArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type RemovePermissionOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s RemovePermissionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RemovePermissionOutput) GoString() string { + return s.String() +} + +// Input for SetEndpointAttributes action. +type SetEndpointAttributesInput struct { + _ struct{} `type:"structure"` + + // A map of the endpoint attributes. Attributes in this map include the following: + // + // CustomUserData -- arbitrary user data to associate with the endpoint. Amazon + // SNS does not use this data. The data must be in UTF-8 format and less than + // 2KB. + // + // Enabled -- flag that enables/disables delivery to the endpoint. Amazon SNS + // will set this to false when a notification service indicates to Amazon SNS + // that the endpoint is invalid. Users can set it back to true, typically after + // updating Token. + // + // Token -- device token, also referred to as a registration id, for an app + // and mobile device. This is returned from the notification service when an + // app and mobile device are registered with the notification service. + Attributes map[string]*string `type:"map" required:"true"` + + // EndpointArn used for SetEndpointAttributes action. + EndpointArn *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s SetEndpointAttributesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetEndpointAttributesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SetEndpointAttributesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SetEndpointAttributesInput"} + if s.Attributes == nil { + invalidParams.Add(request.NewErrParamRequired("Attributes")) + } + if s.EndpointArn == nil { + invalidParams.Add(request.NewErrParamRequired("EndpointArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type SetEndpointAttributesOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s SetEndpointAttributesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetEndpointAttributesOutput) GoString() string { + return s.String() +} + +// Input for SetPlatformApplicationAttributes action. +type SetPlatformApplicationAttributesInput struct { + _ struct{} `type:"structure"` + + // A map of the platform application attributes. Attributes in this map include + // the following: + // + // PlatformCredential -- The credential received from the notification service. + // For APNS/APNS_SANDBOX, PlatformCredential is private key. For GCM, PlatformCredential + // is "API key". For ADM, PlatformCredential is "client secret". + // + // PlatformPrincipal -- The principal received from the notification service. + // For APNS/APNS_SANDBOX, PlatformPrincipal is SSL certificate. For GCM, PlatformPrincipal + // is not applicable. For ADM, PlatformPrincipal is "client id". + // + // EventEndpointCreated -- Topic ARN to which EndpointCreated event notifications + // should be sent. + // + // EventEndpointDeleted -- Topic ARN to which EndpointDeleted event notifications + // should be sent. + // + // EventEndpointUpdated -- Topic ARN to which EndpointUpdate event notifications + // should be sent. + // + // EventDeliveryFailure -- Topic ARN to which DeliveryFailure event notifications + // should be sent upon Direct Publish delivery failure (permanent) to one of + // the application's endpoints. + // + // SuccessFeedbackRoleArn -- IAM role ARN used to give Amazon SNS write access + // to use CloudWatch Logs on your behalf. + // + // FailureFeedbackRoleArn -- IAM role ARN used to give Amazon SNS write access + // to use CloudWatch Logs on your behalf. + // + // SuccessFeedbackSampleRate -- Sample rate percentage (0-100) of successfully + // delivered messages. + Attributes map[string]*string `type:"map" required:"true"` + + // PlatformApplicationArn for SetPlatformApplicationAttributes action. + PlatformApplicationArn *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s SetPlatformApplicationAttributesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetPlatformApplicationAttributesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SetPlatformApplicationAttributesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SetPlatformApplicationAttributesInput"} + if s.Attributes == nil { + invalidParams.Add(request.NewErrParamRequired("Attributes")) + } + if s.PlatformApplicationArn == nil { + invalidParams.Add(request.NewErrParamRequired("PlatformApplicationArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type SetPlatformApplicationAttributesOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s SetPlatformApplicationAttributesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetPlatformApplicationAttributesOutput) GoString() string { + return s.String() +} + +// The input for the SetSMSAttributes action. +type SetSMSAttributesInput struct { + _ struct{} `type:"structure"` + + // The default settings for sending SMS messages from your account. You can + // set values for the following attribute names: + // + // MonthlySpendLimit – The maximum amount in USD that you are willing to spend + // each month to send SMS messages. When Amazon SNS determines that sending + // an SMS message would incur a cost that exceeds this limit, it stops sending + // SMS messages within minutes. + // + // Amazon SNS stops sending SMS messages within minutes of the limit being + // crossed. During that interval, if you continue to send SMS messages, you + // will incur costs that exceed your limit. + // + // DeliveryStatusIAMRole – The ARN of the IAM role that allows Amazon SNS + // to write logs about SMS deliveries in CloudWatch Logs. For each SMS message + // that you send, Amazon SNS writes a log that includes the message price, the + // success or failure status, the reason for failure (if the message failed), + // the message dwell time, and other information. + // + // DeliveryStatusSuccessSamplingRate – The percentage of successful SMS deliveries + // for which Amazon SNS will write logs in CloudWatch Logs. The value can be + // an integer from 0 - 100. For example, to write logs only for failed deliveries, + // set this value to 0. To write logs for 10% of your successful deliveries, + // set it to 10. + // + // DefaultSenderID – A string, such as your business brand, that is displayed + // as the sender on the receiving device. Support for sender IDs varies by country. + // The sender ID can be 1 - 11 alphanumeric characters, and it must contain + // at least one letter. + // + // DefaultSMSType – The type of SMS message that you will send by default. + // You can assign the following values: + // + // Promotional – Noncritical messages, such as marketing messages. Amazon + // SNS optimizes the message delivery to incur the lowest cost. + // + // Transactional – (Default) Critical messages that support customer transactions, + // such as one-time passcodes for multi-factor authentication. Amazon SNS optimizes + // the message delivery to achieve the highest reliability. + // + // UsageReportS3Bucket – The name of the Amazon S3 bucket to receive daily + // SMS usage reports from Amazon SNS. Each day, Amazon SNS will deliver a usage + // report as a CSV file to the bucket. The report includes the following information + // for each SMS message that was successfully delivered by your account: + // + // Time that the message was published (in UTC) + // + // Message ID + // + // Destination phone number + // + // Message type + // + // Delivery status + // + // Message price (in USD) + // + // Part number (a message is split into multiple parts if it is too long + // for a single message) + // + // Total number of parts + // + // To receive the report, the bucket must have a policy that allows the Amazon + // SNS service principle to perform the s3:PutObject and s3:GetBucketLocation + // actions. + // + // For an example bucket policy and usage report, see Viewing Statistics About + // SMS Message Delivery (http://docs.aws.amazon.com/sns/latest/dg/sms_stats.html) + // in the Amazon SNS Developer Guide. + Attributes map[string]*string `locationName:"attributes" type:"map" required:"true"` +} + +// String returns the string representation +func (s SetSMSAttributesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetSMSAttributesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SetSMSAttributesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SetSMSAttributesInput"} + if s.Attributes == nil { + invalidParams.Add(request.NewErrParamRequired("Attributes")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The response for the SetSMSAttributes action. +type SetSMSAttributesOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s SetSMSAttributesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetSMSAttributesOutput) GoString() string { + return s.String() +} + +// Input for SetSubscriptionAttributes action. +type SetSubscriptionAttributesInput struct { + _ struct{} `type:"structure"` + + // The name of the attribute you want to set. Only a subset of the subscriptions + // attributes are mutable. + // + // Valid values: DeliveryPolicy | RawMessageDelivery + AttributeName *string `type:"string" required:"true"` + + // The new value for the attribute in JSON format. + AttributeValue *string `type:"string"` + + // The ARN of the subscription to modify. + SubscriptionArn *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s SetSubscriptionAttributesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetSubscriptionAttributesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SetSubscriptionAttributesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SetSubscriptionAttributesInput"} + if s.AttributeName == nil { + invalidParams.Add(request.NewErrParamRequired("AttributeName")) + } + if s.SubscriptionArn == nil { + invalidParams.Add(request.NewErrParamRequired("SubscriptionArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type SetSubscriptionAttributesOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s SetSubscriptionAttributesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetSubscriptionAttributesOutput) GoString() string { + return s.String() +} + +// Input for SetTopicAttributes action. +type SetTopicAttributesInput struct { + _ struct{} `type:"structure"` + + // The name of the attribute you want to set. Only a subset of the topic's attributes + // are mutable. + // + // Valid values: Policy | DisplayName | DeliveryPolicy + AttributeName *string `type:"string" required:"true"` + + // The new value for the attribute. + AttributeValue *string `type:"string"` + + // The ARN of the topic to modify. + TopicArn *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s SetTopicAttributesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetTopicAttributesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SetTopicAttributesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SetTopicAttributesInput"} + if s.AttributeName == nil { + invalidParams.Add(request.NewErrParamRequired("AttributeName")) + } + if s.TopicArn == nil { + invalidParams.Add(request.NewErrParamRequired("TopicArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type SetTopicAttributesOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s SetTopicAttributesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetTopicAttributesOutput) GoString() string { + return s.String() +} + +// Input for Subscribe action. +type SubscribeInput struct { + _ struct{} `type:"structure"` + + // The endpoint that you want to receive notifications. Endpoints vary by protocol: + // + // For the http protocol, the endpoint is an URL beginning with "http://" + // + // For the https protocol, the endpoint is a URL beginning with "https://" + // + // For the email protocol, the endpoint is an email address + // + // For the email-json protocol, the endpoint is an email address + // + // For the sms protocol, the endpoint is a phone number of an SMS-enabled + // device + // + // For the sqs protocol, the endpoint is the ARN of an Amazon SQS queue + // + // For the application protocol, the endpoint is the EndpointArn of a mobile + // app and device. + // + // For the lambda protocol, the endpoint is the ARN of an AWS Lambda function. + Endpoint *string `type:"string"` + + // The protocol you want to use. Supported protocols include: + // + // http -- delivery of JSON-encoded message via HTTP POST + // + // https -- delivery of JSON-encoded message via HTTPS POST + // + // email -- delivery of message via SMTP + // + // email-json -- delivery of JSON-encoded message via SMTP + // + // sms -- delivery of message via SMS + // + // sqs -- delivery of JSON-encoded message to an Amazon SQS queue + // + // application -- delivery of JSON-encoded message to an EndpointArn for + // a mobile app and device. + // + // lambda -- delivery of JSON-encoded message to an AWS Lambda function. + Protocol *string `type:"string" required:"true"` + + // The ARN of the topic you want to subscribe to. + TopicArn *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s SubscribeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SubscribeInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SubscribeInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SubscribeInput"} + if s.Protocol == nil { + invalidParams.Add(request.NewErrParamRequired("Protocol")) + } + if s.TopicArn == nil { + invalidParams.Add(request.NewErrParamRequired("TopicArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Response for Subscribe action. +type SubscribeOutput struct { + _ struct{} `type:"structure"` + + // The ARN of the subscription, if the service was able to create a subscription + // immediately (without requiring endpoint owner confirmation). + SubscriptionArn *string `type:"string"` +} + +// String returns the string representation +func (s SubscribeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SubscribeOutput) GoString() string { + return s.String() +} + +// A wrapper type for the attributes of an Amazon SNS subscription. +type Subscription struct { + _ struct{} `type:"structure"` + + // The subscription's endpoint (format depends on the protocol). + Endpoint *string `type:"string"` + + // The subscription's owner. + Owner *string `type:"string"` + + // The subscription's protocol. + Protocol *string `type:"string"` + + // The subscription's ARN. + SubscriptionArn *string `type:"string"` + + // The ARN of the subscription's topic. + TopicArn *string `type:"string"` +} + +// String returns the string representation +func (s Subscription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Subscription) GoString() string { + return s.String() +} + +// A wrapper type for the topic's Amazon Resource Name (ARN). To retrieve a +// topic's attributes, use GetTopicAttributes. +type Topic struct { + _ struct{} `type:"structure"` + + // The topic's ARN. + TopicArn *string `type:"string"` +} + +// String returns the string representation +func (s Topic) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Topic) GoString() string { + return s.String() +} + +// Input for Unsubscribe action. +type UnsubscribeInput struct { + _ struct{} `type:"structure"` + + // The ARN of the subscription to be deleted. + SubscriptionArn *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s UnsubscribeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UnsubscribeInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UnsubscribeInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UnsubscribeInput"} + if s.SubscriptionArn == nil { + invalidParams.Add(request.NewErrParamRequired("SubscriptionArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type UnsubscribeOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UnsubscribeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UnsubscribeOutput) GoString() string { + return s.String() +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/sns/examples_test.go b/vendor/github.com/aws/aws-sdk-go/service/sns/examples_test.go new file mode 100644 index 000000000..e2b8cce07 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/sns/examples_test.go @@ -0,0 +1,644 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package sns_test + +import ( + "bytes" + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/sns" +) + +var _ time.Duration +var _ bytes.Buffer + +func ExampleSNS_AddPermission() { + svc := sns.New(session.New()) + + params := &sns.AddPermissionInput{ + AWSAccountId: []*string{ // Required + aws.String("delegate"), // Required + // More values... + }, + ActionName: []*string{ // Required + aws.String("action"), // Required + // More values... + }, + Label: aws.String("label"), // Required + TopicArn: aws.String("topicARN"), // Required + } + resp, err := svc.AddPermission(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSNS_CheckIfPhoneNumberIsOptedOut() { + svc := sns.New(session.New()) + + params := &sns.CheckIfPhoneNumberIsOptedOutInput{ + PhoneNumber: aws.String("PhoneNumber"), // Required + } + resp, err := svc.CheckIfPhoneNumberIsOptedOut(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSNS_ConfirmSubscription() { + svc := sns.New(session.New()) + + params := &sns.ConfirmSubscriptionInput{ + Token: aws.String("token"), // Required + TopicArn: aws.String("topicARN"), // Required + AuthenticateOnUnsubscribe: aws.String("authenticateOnUnsubscribe"), + } + resp, err := svc.ConfirmSubscription(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSNS_CreatePlatformApplication() { + svc := sns.New(session.New()) + + params := &sns.CreatePlatformApplicationInput{ + Attributes: map[string]*string{ // Required + "Key": aws.String("String"), // Required + // More values... + }, + Name: aws.String("String"), // Required + Platform: aws.String("String"), // Required + } + resp, err := svc.CreatePlatformApplication(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSNS_CreatePlatformEndpoint() { + svc := sns.New(session.New()) + + params := &sns.CreatePlatformEndpointInput{ + PlatformApplicationArn: aws.String("String"), // Required + Token: aws.String("String"), // Required + Attributes: map[string]*string{ + "Key": aws.String("String"), // Required + // More values... + }, + CustomUserData: aws.String("String"), + } + resp, err := svc.CreatePlatformEndpoint(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSNS_CreateTopic() { + svc := sns.New(session.New()) + + params := &sns.CreateTopicInput{ + Name: aws.String("topicName"), // Required + } + resp, err := svc.CreateTopic(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSNS_DeleteEndpoint() { + svc := sns.New(session.New()) + + params := &sns.DeleteEndpointInput{ + EndpointArn: aws.String("String"), // Required + } + resp, err := svc.DeleteEndpoint(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSNS_DeletePlatformApplication() { + svc := sns.New(session.New()) + + params := &sns.DeletePlatformApplicationInput{ + PlatformApplicationArn: aws.String("String"), // Required + } + resp, err := svc.DeletePlatformApplication(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSNS_DeleteTopic() { + svc := sns.New(session.New()) + + params := &sns.DeleteTopicInput{ + TopicArn: aws.String("topicARN"), // Required + } + resp, err := svc.DeleteTopic(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSNS_GetEndpointAttributes() { + svc := sns.New(session.New()) + + params := &sns.GetEndpointAttributesInput{ + EndpointArn: aws.String("String"), // Required + } + resp, err := svc.GetEndpointAttributes(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSNS_GetPlatformApplicationAttributes() { + svc := sns.New(session.New()) + + params := &sns.GetPlatformApplicationAttributesInput{ + PlatformApplicationArn: aws.String("String"), // Required + } + resp, err := svc.GetPlatformApplicationAttributes(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSNS_GetSMSAttributes() { + svc := sns.New(session.New()) + + params := &sns.GetSMSAttributesInput{ + Attributes: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.GetSMSAttributes(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSNS_GetSubscriptionAttributes() { + svc := sns.New(session.New()) + + params := &sns.GetSubscriptionAttributesInput{ + SubscriptionArn: aws.String("subscriptionARN"), // Required + } + resp, err := svc.GetSubscriptionAttributes(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSNS_GetTopicAttributes() { + svc := sns.New(session.New()) + + params := &sns.GetTopicAttributesInput{ + TopicArn: aws.String("topicARN"), // Required + } + resp, err := svc.GetTopicAttributes(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSNS_ListEndpointsByPlatformApplication() { + svc := sns.New(session.New()) + + params := &sns.ListEndpointsByPlatformApplicationInput{ + PlatformApplicationArn: aws.String("String"), // Required + NextToken: aws.String("String"), + } + resp, err := svc.ListEndpointsByPlatformApplication(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSNS_ListPhoneNumbersOptedOut() { + svc := sns.New(session.New()) + + params := &sns.ListPhoneNumbersOptedOutInput{ + NextToken: aws.String("string"), + } + resp, err := svc.ListPhoneNumbersOptedOut(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSNS_ListPlatformApplications() { + svc := sns.New(session.New()) + + params := &sns.ListPlatformApplicationsInput{ + NextToken: aws.String("String"), + } + resp, err := svc.ListPlatformApplications(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSNS_ListSubscriptions() { + svc := sns.New(session.New()) + + params := &sns.ListSubscriptionsInput{ + NextToken: aws.String("nextToken"), + } + resp, err := svc.ListSubscriptions(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSNS_ListSubscriptionsByTopic() { + svc := sns.New(session.New()) + + params := &sns.ListSubscriptionsByTopicInput{ + TopicArn: aws.String("topicARN"), // Required + NextToken: aws.String("nextToken"), + } + resp, err := svc.ListSubscriptionsByTopic(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSNS_ListTopics() { + svc := sns.New(session.New()) + + params := &sns.ListTopicsInput{ + NextToken: aws.String("nextToken"), + } + resp, err := svc.ListTopics(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSNS_OptInPhoneNumber() { + svc := sns.New(session.New()) + + params := &sns.OptInPhoneNumberInput{ + PhoneNumber: aws.String("PhoneNumber"), // Required + } + resp, err := svc.OptInPhoneNumber(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSNS_Publish() { + svc := sns.New(session.New()) + + params := &sns.PublishInput{ + Message: aws.String("message"), // Required + MessageAttributes: map[string]*sns.MessageAttributeValue{ + "Key": { // Required + DataType: aws.String("String"), // Required + BinaryValue: []byte("PAYLOAD"), + StringValue: aws.String("String"), + }, + // More values... + }, + MessageStructure: aws.String("messageStructure"), + PhoneNumber: aws.String("String"), + Subject: aws.String("subject"), + TargetArn: aws.String("String"), + TopicArn: aws.String("topicARN"), + } + resp, err := svc.Publish(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSNS_RemovePermission() { + svc := sns.New(session.New()) + + params := &sns.RemovePermissionInput{ + Label: aws.String("label"), // Required + TopicArn: aws.String("topicARN"), // Required + } + resp, err := svc.RemovePermission(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSNS_SetEndpointAttributes() { + svc := sns.New(session.New()) + + params := &sns.SetEndpointAttributesInput{ + Attributes: map[string]*string{ // Required + "Key": aws.String("String"), // Required + // More values... + }, + EndpointArn: aws.String("String"), // Required + } + resp, err := svc.SetEndpointAttributes(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSNS_SetPlatformApplicationAttributes() { + svc := sns.New(session.New()) + + params := &sns.SetPlatformApplicationAttributesInput{ + Attributes: map[string]*string{ // Required + "Key": aws.String("String"), // Required + // More values... + }, + PlatformApplicationArn: aws.String("String"), // Required + } + resp, err := svc.SetPlatformApplicationAttributes(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSNS_SetSMSAttributes() { + svc := sns.New(session.New()) + + params := &sns.SetSMSAttributesInput{ + Attributes: map[string]*string{ // Required + "Key": aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.SetSMSAttributes(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSNS_SetSubscriptionAttributes() { + svc := sns.New(session.New()) + + params := &sns.SetSubscriptionAttributesInput{ + AttributeName: aws.String("attributeName"), // Required + SubscriptionArn: aws.String("subscriptionARN"), // Required + AttributeValue: aws.String("attributeValue"), + } + resp, err := svc.SetSubscriptionAttributes(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSNS_SetTopicAttributes() { + svc := sns.New(session.New()) + + params := &sns.SetTopicAttributesInput{ + AttributeName: aws.String("attributeName"), // Required + TopicArn: aws.String("topicARN"), // Required + AttributeValue: aws.String("attributeValue"), + } + resp, err := svc.SetTopicAttributes(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSNS_Subscribe() { + svc := sns.New(session.New()) + + params := &sns.SubscribeInput{ + Protocol: aws.String("protocol"), // Required + TopicArn: aws.String("topicARN"), // Required + Endpoint: aws.String("endpoint"), + } + resp, err := svc.Subscribe(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSNS_Unsubscribe() { + svc := sns.New(session.New()) + + params := &sns.UnsubscribeInput{ + SubscriptionArn: aws.String("subscriptionARN"), // Required + } + resp, err := svc.Unsubscribe(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/sns/service.go b/vendor/github.com/aws/aws-sdk-go/service/sns/service.go new file mode 100644 index 000000000..75ecb0ac5 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/sns/service.go @@ -0,0 +1,98 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package sns + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/private/protocol/query" +) + +// Amazon Simple Notification Service (Amazon SNS) is a web service that enables +// you to build distributed web-enabled applications. Applications can use Amazon +// SNS to easily push real-time notification messages to interested subscribers +// over multiple delivery protocols. For more information about this product +// see http://aws.amazon.com/sns (http://aws.amazon.com/sns/). For detailed +// information about Amazon SNS features and their associated API calls, see +// the Amazon SNS Developer Guide (http://docs.aws.amazon.com/sns/latest/dg/). +// +// We also provide SDKs that enable you to access Amazon SNS from your preferred +// programming language. The SDKs contain functionality that automatically takes +// care of tasks such as: cryptographically signing your service requests, retrying +// requests, and handling error responses. For a list of available SDKs, go +// to Tools for Amazon Web Services (http://aws.amazon.com/tools/). +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type SNS struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "sns" + +// New creates a new instance of the SNS client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a SNS client from just a session. +// svc := sns.New(mySession) +// +// // Create a SNS client with additional configuration +// svc := sns.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *SNS { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *SNS { + svc := &SNS{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2010-03-31", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(query.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(query.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(query.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(query.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a SNS operation and runs any +// custom request initialization. +func (c *SNS) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/sns/snsiface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/sns/snsiface/interface.go new file mode 100644 index 000000000..3b7f1f5c3 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/sns/snsiface/interface.go @@ -0,0 +1,144 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package snsiface provides an interface for the Amazon Simple Notification Service. +package snsiface + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/sns" +) + +// SNSAPI is the interface type for sns.SNS. +type SNSAPI interface { + AddPermissionRequest(*sns.AddPermissionInput) (*request.Request, *sns.AddPermissionOutput) + + AddPermission(*sns.AddPermissionInput) (*sns.AddPermissionOutput, error) + + CheckIfPhoneNumberIsOptedOutRequest(*sns.CheckIfPhoneNumberIsOptedOutInput) (*request.Request, *sns.CheckIfPhoneNumberIsOptedOutOutput) + + CheckIfPhoneNumberIsOptedOut(*sns.CheckIfPhoneNumberIsOptedOutInput) (*sns.CheckIfPhoneNumberIsOptedOutOutput, error) + + ConfirmSubscriptionRequest(*sns.ConfirmSubscriptionInput) (*request.Request, *sns.ConfirmSubscriptionOutput) + + ConfirmSubscription(*sns.ConfirmSubscriptionInput) (*sns.ConfirmSubscriptionOutput, error) + + CreatePlatformApplicationRequest(*sns.CreatePlatformApplicationInput) (*request.Request, *sns.CreatePlatformApplicationOutput) + + CreatePlatformApplication(*sns.CreatePlatformApplicationInput) (*sns.CreatePlatformApplicationOutput, error) + + CreatePlatformEndpointRequest(*sns.CreatePlatformEndpointInput) (*request.Request, *sns.CreatePlatformEndpointOutput) + + CreatePlatformEndpoint(*sns.CreatePlatformEndpointInput) (*sns.CreatePlatformEndpointOutput, error) + + CreateTopicRequest(*sns.CreateTopicInput) (*request.Request, *sns.CreateTopicOutput) + + CreateTopic(*sns.CreateTopicInput) (*sns.CreateTopicOutput, error) + + DeleteEndpointRequest(*sns.DeleteEndpointInput) (*request.Request, *sns.DeleteEndpointOutput) + + DeleteEndpoint(*sns.DeleteEndpointInput) (*sns.DeleteEndpointOutput, error) + + DeletePlatformApplicationRequest(*sns.DeletePlatformApplicationInput) (*request.Request, *sns.DeletePlatformApplicationOutput) + + DeletePlatformApplication(*sns.DeletePlatformApplicationInput) (*sns.DeletePlatformApplicationOutput, error) + + DeleteTopicRequest(*sns.DeleteTopicInput) (*request.Request, *sns.DeleteTopicOutput) + + DeleteTopic(*sns.DeleteTopicInput) (*sns.DeleteTopicOutput, error) + + GetEndpointAttributesRequest(*sns.GetEndpointAttributesInput) (*request.Request, *sns.GetEndpointAttributesOutput) + + GetEndpointAttributes(*sns.GetEndpointAttributesInput) (*sns.GetEndpointAttributesOutput, error) + + GetPlatformApplicationAttributesRequest(*sns.GetPlatformApplicationAttributesInput) (*request.Request, *sns.GetPlatformApplicationAttributesOutput) + + GetPlatformApplicationAttributes(*sns.GetPlatformApplicationAttributesInput) (*sns.GetPlatformApplicationAttributesOutput, error) + + GetSMSAttributesRequest(*sns.GetSMSAttributesInput) (*request.Request, *sns.GetSMSAttributesOutput) + + GetSMSAttributes(*sns.GetSMSAttributesInput) (*sns.GetSMSAttributesOutput, error) + + GetSubscriptionAttributesRequest(*sns.GetSubscriptionAttributesInput) (*request.Request, *sns.GetSubscriptionAttributesOutput) + + GetSubscriptionAttributes(*sns.GetSubscriptionAttributesInput) (*sns.GetSubscriptionAttributesOutput, error) + + GetTopicAttributesRequest(*sns.GetTopicAttributesInput) (*request.Request, *sns.GetTopicAttributesOutput) + + GetTopicAttributes(*sns.GetTopicAttributesInput) (*sns.GetTopicAttributesOutput, error) + + ListEndpointsByPlatformApplicationRequest(*sns.ListEndpointsByPlatformApplicationInput) (*request.Request, *sns.ListEndpointsByPlatformApplicationOutput) + + ListEndpointsByPlatformApplication(*sns.ListEndpointsByPlatformApplicationInput) (*sns.ListEndpointsByPlatformApplicationOutput, error) + + ListEndpointsByPlatformApplicationPages(*sns.ListEndpointsByPlatformApplicationInput, func(*sns.ListEndpointsByPlatformApplicationOutput, bool) bool) error + + ListPhoneNumbersOptedOutRequest(*sns.ListPhoneNumbersOptedOutInput) (*request.Request, *sns.ListPhoneNumbersOptedOutOutput) + + ListPhoneNumbersOptedOut(*sns.ListPhoneNumbersOptedOutInput) (*sns.ListPhoneNumbersOptedOutOutput, error) + + ListPlatformApplicationsRequest(*sns.ListPlatformApplicationsInput) (*request.Request, *sns.ListPlatformApplicationsOutput) + + ListPlatformApplications(*sns.ListPlatformApplicationsInput) (*sns.ListPlatformApplicationsOutput, error) + + ListPlatformApplicationsPages(*sns.ListPlatformApplicationsInput, func(*sns.ListPlatformApplicationsOutput, bool) bool) error + + ListSubscriptionsRequest(*sns.ListSubscriptionsInput) (*request.Request, *sns.ListSubscriptionsOutput) + + ListSubscriptions(*sns.ListSubscriptionsInput) (*sns.ListSubscriptionsOutput, error) + + ListSubscriptionsPages(*sns.ListSubscriptionsInput, func(*sns.ListSubscriptionsOutput, bool) bool) error + + ListSubscriptionsByTopicRequest(*sns.ListSubscriptionsByTopicInput) (*request.Request, *sns.ListSubscriptionsByTopicOutput) + + ListSubscriptionsByTopic(*sns.ListSubscriptionsByTopicInput) (*sns.ListSubscriptionsByTopicOutput, error) + + ListSubscriptionsByTopicPages(*sns.ListSubscriptionsByTopicInput, func(*sns.ListSubscriptionsByTopicOutput, bool) bool) error + + ListTopicsRequest(*sns.ListTopicsInput) (*request.Request, *sns.ListTopicsOutput) + + ListTopics(*sns.ListTopicsInput) (*sns.ListTopicsOutput, error) + + ListTopicsPages(*sns.ListTopicsInput, func(*sns.ListTopicsOutput, bool) bool) error + + OptInPhoneNumberRequest(*sns.OptInPhoneNumberInput) (*request.Request, *sns.OptInPhoneNumberOutput) + + OptInPhoneNumber(*sns.OptInPhoneNumberInput) (*sns.OptInPhoneNumberOutput, error) + + PublishRequest(*sns.PublishInput) (*request.Request, *sns.PublishOutput) + + Publish(*sns.PublishInput) (*sns.PublishOutput, error) + + RemovePermissionRequest(*sns.RemovePermissionInput) (*request.Request, *sns.RemovePermissionOutput) + + RemovePermission(*sns.RemovePermissionInput) (*sns.RemovePermissionOutput, error) + + SetEndpointAttributesRequest(*sns.SetEndpointAttributesInput) (*request.Request, *sns.SetEndpointAttributesOutput) + + SetEndpointAttributes(*sns.SetEndpointAttributesInput) (*sns.SetEndpointAttributesOutput, error) + + SetPlatformApplicationAttributesRequest(*sns.SetPlatformApplicationAttributesInput) (*request.Request, *sns.SetPlatformApplicationAttributesOutput) + + SetPlatformApplicationAttributes(*sns.SetPlatformApplicationAttributesInput) (*sns.SetPlatformApplicationAttributesOutput, error) + + SetSMSAttributesRequest(*sns.SetSMSAttributesInput) (*request.Request, *sns.SetSMSAttributesOutput) + + SetSMSAttributes(*sns.SetSMSAttributesInput) (*sns.SetSMSAttributesOutput, error) + + SetSubscriptionAttributesRequest(*sns.SetSubscriptionAttributesInput) (*request.Request, *sns.SetSubscriptionAttributesOutput) + + SetSubscriptionAttributes(*sns.SetSubscriptionAttributesInput) (*sns.SetSubscriptionAttributesOutput, error) + + SetTopicAttributesRequest(*sns.SetTopicAttributesInput) (*request.Request, *sns.SetTopicAttributesOutput) + + SetTopicAttributes(*sns.SetTopicAttributesInput) (*sns.SetTopicAttributesOutput, error) + + SubscribeRequest(*sns.SubscribeInput) (*request.Request, *sns.SubscribeOutput) + + Subscribe(*sns.SubscribeInput) (*sns.SubscribeOutput, error) + + UnsubscribeRequest(*sns.UnsubscribeInput) (*request.Request, *sns.UnsubscribeOutput) + + Unsubscribe(*sns.UnsubscribeInput) (*sns.UnsubscribeOutput, error) +} + +var _ SNSAPI = (*sns.SNS)(nil) diff --git a/vendor/github.com/aws/aws-sdk-go/service/sqs/api.go b/vendor/github.com/aws/aws-sdk-go/service/sqs/api.go new file mode 100644 index 000000000..9b416bdab --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/sqs/api.go @@ -0,0 +1,2627 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package sqs provides a client for Amazon Simple Queue Service. +package sqs + +import ( + "fmt" + + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/query" +) + +const opAddPermission = "AddPermission" + +// AddPermissionRequest generates a "aws/request.Request" representing the +// client's request for the AddPermission operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the AddPermission method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the AddPermissionRequest method. +// req, resp := client.AddPermissionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *SQS) AddPermissionRequest(input *AddPermissionInput) (req *request.Request, output *AddPermissionOutput) { + op := &request.Operation{ + Name: opAddPermission, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AddPermissionInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &AddPermissionOutput{} + req.Data = output + return +} + +// Adds a permission to a queue for a specific principal (http://docs.aws.amazon.com/general/latest/gr/glos-chap.html#P). +// This allows for sharing access to the queue. +// +// When you create a queue, you have full control access rights for the queue. +// Only you (as owner of the queue) can grant or deny permissions to the queue. +// For more information about these permissions, see Shared Queues (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/acp-overview.html) +// in the Amazon SQS Developer Guide. +// +// AddPermission writes an Amazon SQS-generated policy. If you want to write +// your own policy, use SetQueueAttributes to upload your policy. For more information +// about writing your own policy, see Using The Access Policy Language (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/AccessPolicyLanguage.html) +// in the Amazon SQS Developer Guide. +// +// Some API actions take lists of parameters. These lists are specified using +// the param.n notation. Values of n are integers starting from 1. For example, +// a parameter list with two elements looks like this: +func (c *SQS) AddPermission(input *AddPermissionInput) (*AddPermissionOutput, error) { + req, out := c.AddPermissionRequest(input) + err := req.Send() + return out, err +} + +const opChangeMessageVisibility = "ChangeMessageVisibility" + +// ChangeMessageVisibilityRequest generates a "aws/request.Request" representing the +// client's request for the ChangeMessageVisibility operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ChangeMessageVisibility method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ChangeMessageVisibilityRequest method. +// req, resp := client.ChangeMessageVisibilityRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *SQS) ChangeMessageVisibilityRequest(input *ChangeMessageVisibilityInput) (req *request.Request, output *ChangeMessageVisibilityOutput) { + op := &request.Operation{ + Name: opChangeMessageVisibility, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ChangeMessageVisibilityInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &ChangeMessageVisibilityOutput{} + req.Data = output + return +} + +// Changes the visibility timeout of a specified message in a queue to a new +// value. The maximum allowed timeout value you can set the value to is 12 hours. +// This means you can't extend the timeout of a message in an existing queue +// to more than a total visibility timeout of 12 hours. (For more information +// visibility timeout, see Visibility Timeout (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/AboutVT.html) +// in the Amazon SQS Developer Guide.) +// +// For example, let's say you have a message and its default message visibility +// timeout is 5 minutes. After 3 minutes, you call ChangeMessageVisiblity with +// a timeout of 10 minutes. At that time, the timeout for the message would +// be extended by 10 minutes beyond the time of the ChangeMessageVisibility +// call. This results in a total visibility timeout of 13 minutes. You can continue +// to call ChangeMessageVisibility to extend the visibility timeout to a maximum +// of 12 hours. If you try to extend beyond 12 hours, the request will be rejected. +// +// There is a 120,000 limit for the number of inflight messages per queue. +// Messages are inflight after they have been received from the queue by a consuming +// component, but have not yet been deleted from the queue. If you reach the +// 120,000 limit, you will receive an OverLimit error message from Amazon SQS. +// To help avoid reaching the limit, you should delete the messages from the +// queue after they have been processed. You can also increase the number of +// queues you use to process the messages. +// +// If you attempt to set the VisibilityTimeout to an amount more than the maximum +// time left, Amazon SQS returns an error. It will not automatically recalculate +// and increase the timeout to the maximum time remaining. +// +// Unlike with a queue, when you change the visibility timeout for a specific +// message, that timeout value is applied immediately but is not saved in memory +// for that message. If you don't delete a message after it is received, the +// visibility timeout for the message the next time it is received reverts to +// the original timeout value, not the value you set with the ChangeMessageVisibility +// action. +func (c *SQS) ChangeMessageVisibility(input *ChangeMessageVisibilityInput) (*ChangeMessageVisibilityOutput, error) { + req, out := c.ChangeMessageVisibilityRequest(input) + err := req.Send() + return out, err +} + +const opChangeMessageVisibilityBatch = "ChangeMessageVisibilityBatch" + +// ChangeMessageVisibilityBatchRequest generates a "aws/request.Request" representing the +// client's request for the ChangeMessageVisibilityBatch operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ChangeMessageVisibilityBatch method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ChangeMessageVisibilityBatchRequest method. +// req, resp := client.ChangeMessageVisibilityBatchRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *SQS) ChangeMessageVisibilityBatchRequest(input *ChangeMessageVisibilityBatchInput) (req *request.Request, output *ChangeMessageVisibilityBatchOutput) { + op := &request.Operation{ + Name: opChangeMessageVisibilityBatch, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ChangeMessageVisibilityBatchInput{} + } + + req = c.newRequest(op, input, output) + output = &ChangeMessageVisibilityBatchOutput{} + req.Data = output + return +} + +// Changes the visibility timeout of multiple messages. This is a batch version +// of ChangeMessageVisibility. The result of the action on each message is reported +// individually in the response. You can send up to 10 ChangeMessageVisibility +// requests with each ChangeMessageVisibilityBatch action. +// +// Because the batch request can result in a combination of successful and +// unsuccessful actions, you should check for batch errors even when the call +// returns an HTTP status code of 200. +// +// Some API actions take lists of parameters. These lists are specified using +// the param.n notation. Values of n are integers starting from 1. For example, +// a parameter list with two elements looks like this: +func (c *SQS) ChangeMessageVisibilityBatch(input *ChangeMessageVisibilityBatchInput) (*ChangeMessageVisibilityBatchOutput, error) { + req, out := c.ChangeMessageVisibilityBatchRequest(input) + err := req.Send() + return out, err +} + +const opCreateQueue = "CreateQueue" + +// CreateQueueRequest generates a "aws/request.Request" representing the +// client's request for the CreateQueue operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateQueue method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateQueueRequest method. +// req, resp := client.CreateQueueRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *SQS) CreateQueueRequest(input *CreateQueueInput) (req *request.Request, output *CreateQueueOutput) { + op := &request.Operation{ + Name: opCreateQueue, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateQueueInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateQueueOutput{} + req.Data = output + return +} + +// Creates a new queue, or returns the URL of an existing one. When you request +// CreateQueue, you provide a name for the queue. To successfully create a new +// queue, you must provide a name that is unique within the scope of your own +// queues. +// +// If you delete a queue, you must wait at least 60 seconds before creating +// a queue with the same name. +// +// You may pass one or more attributes in the request. If you do not provide +// a value for any attribute, the queue will have the default value for that +// attribute. +// +// Use GetQueueUrl to get a queue's URL. GetQueueUrl requires only the QueueName +// parameter. +// +// If you provide the name of an existing queue, along with the exact names +// and values of all the queue's attributes, CreateQueue returns the queue URL +// for the existing queue. If the queue name, attribute names, or attribute +// values do not match an existing queue, CreateQueue returns an error. +// +// Some API actions take lists of parameters. These lists are specified using +// the param.n notation. Values of n are integers starting from 1. For example, +// a parameter list with two elements looks like this: +func (c *SQS) CreateQueue(input *CreateQueueInput) (*CreateQueueOutput, error) { + req, out := c.CreateQueueRequest(input) + err := req.Send() + return out, err +} + +const opDeleteMessage = "DeleteMessage" + +// DeleteMessageRequest generates a "aws/request.Request" representing the +// client's request for the DeleteMessage operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteMessage method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteMessageRequest method. +// req, resp := client.DeleteMessageRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *SQS) DeleteMessageRequest(input *DeleteMessageInput) (req *request.Request, output *DeleteMessageOutput) { + op := &request.Operation{ + Name: opDeleteMessage, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteMessageInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteMessageOutput{} + req.Data = output + return +} + +// Deletes the specified message from the specified queue. You specify the message +// by using the message's receipt handle and not the message ID you received +// when you sent the message. Even if the message is locked by another reader +// due to the visibility timeout setting, it is still deleted from the queue. +// If you leave a message in the queue for longer than the queue's configured +// retention period, Amazon SQS automatically deletes it. +// +// The receipt handle is associated with a specific instance of receiving +// the message. If you receive a message more than once, the receipt handle +// you get each time you receive the message is different. When you request +// DeleteMessage, if you don't provide the most recently received receipt handle +// for the message, the request will still succeed, but the message might not +// be deleted. +// +// It is possible you will receive a message even after you have deleted +// it. This might happen on rare occasions if one of the servers storing a copy +// of the message is unavailable when you request to delete the message. The +// copy remains on the server and might be returned to you again on a subsequent +// receive request. You should create your system to be idempotent so that receiving +// a particular message more than once is not a problem. +func (c *SQS) DeleteMessage(input *DeleteMessageInput) (*DeleteMessageOutput, error) { + req, out := c.DeleteMessageRequest(input) + err := req.Send() + return out, err +} + +const opDeleteMessageBatch = "DeleteMessageBatch" + +// DeleteMessageBatchRequest generates a "aws/request.Request" representing the +// client's request for the DeleteMessageBatch operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteMessageBatch method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteMessageBatchRequest method. +// req, resp := client.DeleteMessageBatchRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *SQS) DeleteMessageBatchRequest(input *DeleteMessageBatchInput) (req *request.Request, output *DeleteMessageBatchOutput) { + op := &request.Operation{ + Name: opDeleteMessageBatch, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteMessageBatchInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteMessageBatchOutput{} + req.Data = output + return +} + +// Deletes up to ten messages from the specified queue. This is a batch version +// of DeleteMessage. The result of the delete action on each message is reported +// individually in the response. +// +// Because the batch request can result in a combination of successful and +// unsuccessful actions, you should check for batch errors even when the call +// returns an HTTP status code of 200. +// +// Some API actions take lists of parameters. These lists are specified using +// the param.n notation. Values of n are integers starting from 1. For example, +// a parameter list with two elements looks like this: +func (c *SQS) DeleteMessageBatch(input *DeleteMessageBatchInput) (*DeleteMessageBatchOutput, error) { + req, out := c.DeleteMessageBatchRequest(input) + err := req.Send() + return out, err +} + +const opDeleteQueue = "DeleteQueue" + +// DeleteQueueRequest generates a "aws/request.Request" representing the +// client's request for the DeleteQueue operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteQueue method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteQueueRequest method. +// req, resp := client.DeleteQueueRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *SQS) DeleteQueueRequest(input *DeleteQueueInput) (req *request.Request, output *DeleteQueueOutput) { + op := &request.Operation{ + Name: opDeleteQueue, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteQueueInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteQueueOutput{} + req.Data = output + return +} + +// Deletes the queue specified by the queue URL, regardless of whether the queue +// is empty. If the specified queue does not exist, Amazon SQS returns a successful +// response. +// +// Use DeleteQueue with care; once you delete your queue, any messages in +// the queue are no longer available. +// +// When you delete a queue, the deletion process takes up to 60 seconds. +// Requests you send involving that queue during the 60 seconds might succeed. +// For example, a SendMessage request might succeed, but after the 60 seconds, +// the queue and that message you sent no longer exist. Also, when you delete +// a queue, you must wait at least 60 seconds before creating a queue with the +// same name. +// +// We reserve the right to delete queues that have had no activity for more +// than 30 days. For more information, see How Amazon SQS Queues Work (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/SQSConcepts.html) +// in the Amazon SQS Developer Guide. +func (c *SQS) DeleteQueue(input *DeleteQueueInput) (*DeleteQueueOutput, error) { + req, out := c.DeleteQueueRequest(input) + err := req.Send() + return out, err +} + +const opGetQueueAttributes = "GetQueueAttributes" + +// GetQueueAttributesRequest generates a "aws/request.Request" representing the +// client's request for the GetQueueAttributes operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetQueueAttributes method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetQueueAttributesRequest method. +// req, resp := client.GetQueueAttributesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *SQS) GetQueueAttributesRequest(input *GetQueueAttributesInput) (req *request.Request, output *GetQueueAttributesOutput) { + op := &request.Operation{ + Name: opGetQueueAttributes, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetQueueAttributesInput{} + } + + req = c.newRequest(op, input, output) + output = &GetQueueAttributesOutput{} + req.Data = output + return +} + +// Gets attributes for the specified queue. +// +// Some API actions take lists of parameters. These lists are specified using +// the param.n notation. Values of n are integers starting from 1. For example, +// a parameter list with two elements looks like this: +func (c *SQS) GetQueueAttributes(input *GetQueueAttributesInput) (*GetQueueAttributesOutput, error) { + req, out := c.GetQueueAttributesRequest(input) + err := req.Send() + return out, err +} + +const opGetQueueUrl = "GetQueueUrl" + +// GetQueueUrlRequest generates a "aws/request.Request" representing the +// client's request for the GetQueueUrl operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetQueueUrl method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetQueueUrlRequest method. +// req, resp := client.GetQueueUrlRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *SQS) GetQueueUrlRequest(input *GetQueueUrlInput) (req *request.Request, output *GetQueueUrlOutput) { + op := &request.Operation{ + Name: opGetQueueUrl, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetQueueUrlInput{} + } + + req = c.newRequest(op, input, output) + output = &GetQueueUrlOutput{} + req.Data = output + return +} + +// Returns the URL of an existing queue. This action provides a simple way to +// retrieve the URL of an Amazon SQS queue. +// +// To access a queue that belongs to another AWS account, use the QueueOwnerAWSAccountId +// parameter to specify the account ID of the queue's owner. The queue's owner +// must grant you permission to access the queue. For more information about +// shared queue access, see AddPermission or go to Shared Queues (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/acp-overview.html) +// in the Amazon SQS Developer Guide. +func (c *SQS) GetQueueUrl(input *GetQueueUrlInput) (*GetQueueUrlOutput, error) { + req, out := c.GetQueueUrlRequest(input) + err := req.Send() + return out, err +} + +const opListDeadLetterSourceQueues = "ListDeadLetterSourceQueues" + +// ListDeadLetterSourceQueuesRequest generates a "aws/request.Request" representing the +// client's request for the ListDeadLetterSourceQueues operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListDeadLetterSourceQueues method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListDeadLetterSourceQueuesRequest method. +// req, resp := client.ListDeadLetterSourceQueuesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *SQS) ListDeadLetterSourceQueuesRequest(input *ListDeadLetterSourceQueuesInput) (req *request.Request, output *ListDeadLetterSourceQueuesOutput) { + op := &request.Operation{ + Name: opListDeadLetterSourceQueues, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListDeadLetterSourceQueuesInput{} + } + + req = c.newRequest(op, input, output) + output = &ListDeadLetterSourceQueuesOutput{} + req.Data = output + return +} + +// Returns a list of your queues that have the RedrivePolicy queue attribute +// configured with a dead letter queue. +// +// For more information about using dead letter queues, see Using Amazon SQS +// Dead Letter Queues (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/SQSDeadLetterQueue.html). +func (c *SQS) ListDeadLetterSourceQueues(input *ListDeadLetterSourceQueuesInput) (*ListDeadLetterSourceQueuesOutput, error) { + req, out := c.ListDeadLetterSourceQueuesRequest(input) + err := req.Send() + return out, err +} + +const opListQueues = "ListQueues" + +// ListQueuesRequest generates a "aws/request.Request" representing the +// client's request for the ListQueues operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListQueues method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListQueuesRequest method. +// req, resp := client.ListQueuesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *SQS) ListQueuesRequest(input *ListQueuesInput) (req *request.Request, output *ListQueuesOutput) { + op := &request.Operation{ + Name: opListQueues, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListQueuesInput{} + } + + req = c.newRequest(op, input, output) + output = &ListQueuesOutput{} + req.Data = output + return +} + +// Returns a list of your queues. The maximum number of queues that can be returned +// is 1000. If you specify a value for the optional QueueNamePrefix parameter, +// only queues with a name beginning with the specified value are returned. +func (c *SQS) ListQueues(input *ListQueuesInput) (*ListQueuesOutput, error) { + req, out := c.ListQueuesRequest(input) + err := req.Send() + return out, err +} + +const opPurgeQueue = "PurgeQueue" + +// PurgeQueueRequest generates a "aws/request.Request" representing the +// client's request for the PurgeQueue operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PurgeQueue method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PurgeQueueRequest method. +// req, resp := client.PurgeQueueRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *SQS) PurgeQueueRequest(input *PurgeQueueInput) (req *request.Request, output *PurgeQueueOutput) { + op := &request.Operation{ + Name: opPurgeQueue, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PurgeQueueInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &PurgeQueueOutput{} + req.Data = output + return +} + +// Deletes the messages in a queue specified by the queue URL. +// +// When you use the PurgeQueue API, the deleted messages in the queue cannot +// be retrieved. +// +// When you purge a queue, the message deletion process takes up to 60 seconds. +// All messages sent to the queue before calling PurgeQueue will be deleted; +// messages sent to the queue while it is being purged may be deleted. While +// the queue is being purged, messages sent to the queue before PurgeQueue was +// called may be received, but will be deleted within the next minute. +func (c *SQS) PurgeQueue(input *PurgeQueueInput) (*PurgeQueueOutput, error) { + req, out := c.PurgeQueueRequest(input) + err := req.Send() + return out, err +} + +const opReceiveMessage = "ReceiveMessage" + +// ReceiveMessageRequest generates a "aws/request.Request" representing the +// client's request for the ReceiveMessage operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ReceiveMessage method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ReceiveMessageRequest method. +// req, resp := client.ReceiveMessageRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *SQS) ReceiveMessageRequest(input *ReceiveMessageInput) (req *request.Request, output *ReceiveMessageOutput) { + op := &request.Operation{ + Name: opReceiveMessage, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ReceiveMessageInput{} + } + + req = c.newRequest(op, input, output) + output = &ReceiveMessageOutput{} + req.Data = output + return +} + +// Retrieves one or more messages, with a maximum limit of 10 messages, from +// the specified queue. Long poll support is enabled by using the WaitTimeSeconds +// parameter. For more information, see Amazon SQS Long Poll (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-long-polling.html) +// in the Amazon SQS Developer Guide. +// +// Short poll is the default behavior where a weighted random set of machines +// is sampled on a ReceiveMessage call. This means only the messages on the +// sampled machines are returned. If the number of messages in the queue is +// small (less than 1000), it is likely you will get fewer messages than you +// requested per ReceiveMessage call. If the number of messages in the queue +// is extremely small, you might not receive any messages in a particular ReceiveMessage +// response; in which case you should repeat the request. +// +// For each message returned, the response includes the following: +// +// Message body +// +// MD5 digest of the message body. For information about MD5, go to http://www.faqs.org/rfcs/rfc1321.html +// (http://www.faqs.org/rfcs/rfc1321.html). +// +// Message ID you received when you sent the message to the queue. +// +// Receipt handle. +// +// Message attributes. +// +// MD5 digest of the message attributes. +// +// The receipt handle is the identifier you must provide when deleting the +// message. For more information, see Queue and Message Identifiers (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/ImportantIdentifiers.html) +// in the Amazon SQS Developer Guide. +// +// You can provide the VisibilityTimeout parameter in your request, which +// will be applied to the messages that Amazon SQS returns in the response. +// If you do not include the parameter, the overall visibility timeout for the +// queue is used for the returned messages. For more information, see Visibility +// Timeout (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/AboutVT.html) +// in the Amazon SQS Developer Guide. +// +// Going forward, new attributes might be added. If you are writing code +// that calls this action, we recommend that you structure your code so that +// it can handle new attributes gracefully. +func (c *SQS) ReceiveMessage(input *ReceiveMessageInput) (*ReceiveMessageOutput, error) { + req, out := c.ReceiveMessageRequest(input) + err := req.Send() + return out, err +} + +const opRemovePermission = "RemovePermission" + +// RemovePermissionRequest generates a "aws/request.Request" representing the +// client's request for the RemovePermission operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the RemovePermission method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the RemovePermissionRequest method. +// req, resp := client.RemovePermissionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *SQS) RemovePermissionRequest(input *RemovePermissionInput) (req *request.Request, output *RemovePermissionOutput) { + op := &request.Operation{ + Name: opRemovePermission, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RemovePermissionInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &RemovePermissionOutput{} + req.Data = output + return +} + +// Revokes any permissions in the queue policy that matches the specified Label +// parameter. Only the owner of the queue can remove permissions. +func (c *SQS) RemovePermission(input *RemovePermissionInput) (*RemovePermissionOutput, error) { + req, out := c.RemovePermissionRequest(input) + err := req.Send() + return out, err +} + +const opSendMessage = "SendMessage" + +// SendMessageRequest generates a "aws/request.Request" representing the +// client's request for the SendMessage operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the SendMessage method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the SendMessageRequest method. +// req, resp := client.SendMessageRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *SQS) SendMessageRequest(input *SendMessageInput) (req *request.Request, output *SendMessageOutput) { + op := &request.Operation{ + Name: opSendMessage, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SendMessageInput{} + } + + req = c.newRequest(op, input, output) + output = &SendMessageOutput{} + req.Data = output + return +} + +// Delivers a message to the specified queue. With Amazon SQS, you now have +// the ability to send large payload messages that are up to 256KB (262,144 +// bytes) in size. To send large payloads, you must use an AWS SDK that supports +// SigV4 signing. To verify whether SigV4 is supported for an AWS SDK, check +// the SDK release notes. +// +// The following list shows the characters (in Unicode) allowed in your message, +// according to the W3C XML specification. For more information, go to http://www.w3.org/TR/REC-xml/#charsets +// (http://www.w3.org/TR/REC-xml/#charsets) If you send any characters not included +// in the list, your request will be rejected. +// +// #x9 | #xA | #xD | [#x20 to #xD7FF] | [#xE000 to #xFFFD] | [#x10000 to #x10FFFF] +func (c *SQS) SendMessage(input *SendMessageInput) (*SendMessageOutput, error) { + req, out := c.SendMessageRequest(input) + err := req.Send() + return out, err +} + +const opSendMessageBatch = "SendMessageBatch" + +// SendMessageBatchRequest generates a "aws/request.Request" representing the +// client's request for the SendMessageBatch operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the SendMessageBatch method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the SendMessageBatchRequest method. +// req, resp := client.SendMessageBatchRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *SQS) SendMessageBatchRequest(input *SendMessageBatchInput) (req *request.Request, output *SendMessageBatchOutput) { + op := &request.Operation{ + Name: opSendMessageBatch, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SendMessageBatchInput{} + } + + req = c.newRequest(op, input, output) + output = &SendMessageBatchOutput{} + req.Data = output + return +} + +// Delivers up to ten messages to the specified queue. This is a batch version +// of SendMessage. The result of the send action on each message is reported +// individually in the response. The maximum allowed individual message size +// is 256 KB (262,144 bytes). +// +// The maximum total payload size (i.e., the sum of all a batch's individual +// message lengths) is also 256 KB (262,144 bytes). +// +// If the DelaySeconds parameter is not specified for an entry, the default +// for the queue is used. +// +// The following list shows the characters (in Unicode) that are allowed in +// your message, according to the W3C XML specification. For more information, +// go to http://www.faqs.org/rfcs/rfc1321.html (http://www.faqs.org/rfcs/rfc1321.html). +// If you send any characters that are not included in the list, your request +// will be rejected. +// +// #x9 | #xA | #xD | [#x20 to #xD7FF] | [#xE000 to #xFFFD] | [#x10000 to #x10FFFF] +// +// Because the batch request can result in a combination of successful and +// unsuccessful actions, you should check for batch errors even when the call +// returns an HTTP status code of 200. +// +// Some API actions take lists of parameters. These lists are specified using +// the param.n notation. Values of n are integers starting from 1. For example, +// a parameter list with two elements looks like this: +func (c *SQS) SendMessageBatch(input *SendMessageBatchInput) (*SendMessageBatchOutput, error) { + req, out := c.SendMessageBatchRequest(input) + err := req.Send() + return out, err +} + +const opSetQueueAttributes = "SetQueueAttributes" + +// SetQueueAttributesRequest generates a "aws/request.Request" representing the +// client's request for the SetQueueAttributes operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the SetQueueAttributes method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the SetQueueAttributesRequest method. +// req, resp := client.SetQueueAttributesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *SQS) SetQueueAttributesRequest(input *SetQueueAttributesInput) (req *request.Request, output *SetQueueAttributesOutput) { + op := &request.Operation{ + Name: opSetQueueAttributes, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SetQueueAttributesInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &SetQueueAttributesOutput{} + req.Data = output + return +} + +// Sets the value of one or more queue attributes. When you change a queue's +// attributes, the change can take up to 60 seconds for most of the attributes +// to propagate throughout the SQS system. Changes made to the MessageRetentionPeriod +// attribute can take up to 15 minutes. +// +// Going forward, new attributes might be added. If you are writing code that +// calls this action, we recommend that you structure your code so that it can +// handle new attributes gracefully. +func (c *SQS) SetQueueAttributes(input *SetQueueAttributesInput) (*SetQueueAttributesOutput, error) { + req, out := c.SetQueueAttributesRequest(input) + err := req.Send() + return out, err +} + +type AddPermissionInput struct { + _ struct{} `type:"structure"` + + // The AWS account number of the principal (http://docs.aws.amazon.com/general/latest/gr/glos-chap.html#P) + // who will be given permission. The principal must have an AWS account, but + // does not need to be signed up for Amazon SQS. For information about locating + // the AWS account identification, see Your AWS Identifiers (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/AWSCredentials.html) + // in the Amazon SQS Developer Guide. + AWSAccountIds []*string `locationNameList:"AWSAccountId" type:"list" flattened:"true" required:"true"` + + // The action the client wants to allow for the specified principal. The following + // are valid values: * | SendMessage | ReceiveMessage | DeleteMessage | ChangeMessageVisibility + // | GetQueueAttributes | GetQueueUrl. For more information about these actions, + // see Understanding Permissions (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/acp-overview.html#PermissionTypes) + // in the Amazon SQS Developer Guide. + // + // Specifying SendMessage, DeleteMessage, or ChangeMessageVisibility for the + // ActionName.n also grants permissions for the corresponding batch versions + // of those actions: SendMessageBatch, DeleteMessageBatch, and ChangeMessageVisibilityBatch. + Actions []*string `locationNameList:"ActionName" type:"list" flattened:"true" required:"true"` + + // The unique identification of the permission you're setting (e.g., AliceSendMessage). + // Constraints: Maximum 80 characters; alphanumeric characters, hyphens (-), + // and underscores (_) are allowed. + Label *string `type:"string" required:"true"` + + // The URL of the Amazon SQS queue to take action on. + // + // Queue URLs are case-sensitive. + QueueUrl *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s AddPermissionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddPermissionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AddPermissionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AddPermissionInput"} + if s.AWSAccountIds == nil { + invalidParams.Add(request.NewErrParamRequired("AWSAccountIds")) + } + if s.Actions == nil { + invalidParams.Add(request.NewErrParamRequired("Actions")) + } + if s.Label == nil { + invalidParams.Add(request.NewErrParamRequired("Label")) + } + if s.QueueUrl == nil { + invalidParams.Add(request.NewErrParamRequired("QueueUrl")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type AddPermissionOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s AddPermissionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddPermissionOutput) GoString() string { + return s.String() +} + +// This is used in the responses of batch API to give a detailed description +// of the result of an action on each entry in the request. +type BatchResultErrorEntry struct { + _ struct{} `type:"structure"` + + // An error code representing why the action failed on this entry. + Code *string `type:"string" required:"true"` + + // The id of an entry in a batch request. + Id *string `type:"string" required:"true"` + + // A message explaining why the action failed on this entry. + Message *string `type:"string"` + + // Whether the error happened due to the sender's fault. + SenderFault *bool `type:"boolean" required:"true"` +} + +// String returns the string representation +func (s BatchResultErrorEntry) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchResultErrorEntry) GoString() string { + return s.String() +} + +type ChangeMessageVisibilityBatchInput struct { + _ struct{} `type:"structure"` + + // A list of receipt handles of the messages for which the visibility timeout + // must be changed. + Entries []*ChangeMessageVisibilityBatchRequestEntry `locationNameList:"ChangeMessageVisibilityBatchRequestEntry" type:"list" flattened:"true" required:"true"` + + // The URL of the Amazon SQS queue to take action on. + // + // Queue URLs are case-sensitive. + QueueUrl *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ChangeMessageVisibilityBatchInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ChangeMessageVisibilityBatchInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ChangeMessageVisibilityBatchInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ChangeMessageVisibilityBatchInput"} + if s.Entries == nil { + invalidParams.Add(request.NewErrParamRequired("Entries")) + } + if s.QueueUrl == nil { + invalidParams.Add(request.NewErrParamRequired("QueueUrl")) + } + if s.Entries != nil { + for i, v := range s.Entries { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Entries", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// For each message in the batch, the response contains a ChangeMessageVisibilityBatchResultEntry +// tag if the message succeeds or a BatchResultErrorEntry tag if the message +// fails. +type ChangeMessageVisibilityBatchOutput struct { + _ struct{} `type:"structure"` + + // A list of BatchResultErrorEntry items. + Failed []*BatchResultErrorEntry `locationNameList:"BatchResultErrorEntry" type:"list" flattened:"true" required:"true"` + + // A list of ChangeMessageVisibilityBatchResultEntry items. + Successful []*ChangeMessageVisibilityBatchResultEntry `locationNameList:"ChangeMessageVisibilityBatchResultEntry" type:"list" flattened:"true" required:"true"` +} + +// String returns the string representation +func (s ChangeMessageVisibilityBatchOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ChangeMessageVisibilityBatchOutput) GoString() string { + return s.String() +} + +// Encloses a receipt handle and an entry id for each message in ChangeMessageVisibilityBatch. +// +// All of the following parameters are list parameters that must be prefixed +// with ChangeMessageVisibilityBatchRequestEntry.n, where n is an integer value +// starting with 1. For example, a parameter list for this action might look +// like this: +// +// +// +// Your_Receipt_Handle]]> +type ChangeMessageVisibilityBatchRequestEntry struct { + _ struct{} `type:"structure"` + + // An identifier for this particular receipt handle. This is used to communicate + // the result. Note that the Ids of a batch request need to be unique within + // the request. + Id *string `type:"string" required:"true"` + + // A receipt handle. + ReceiptHandle *string `type:"string" required:"true"` + + // The new value (in seconds) for the message's visibility timeout. + VisibilityTimeout *int64 `type:"integer"` +} + +// String returns the string representation +func (s ChangeMessageVisibilityBatchRequestEntry) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ChangeMessageVisibilityBatchRequestEntry) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ChangeMessageVisibilityBatchRequestEntry) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ChangeMessageVisibilityBatchRequestEntry"} + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + if s.ReceiptHandle == nil { + invalidParams.Add(request.NewErrParamRequired("ReceiptHandle")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Encloses the id of an entry in ChangeMessageVisibilityBatch. +type ChangeMessageVisibilityBatchResultEntry struct { + _ struct{} `type:"structure"` + + // Represents a message whose visibility timeout has been changed successfully. + Id *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ChangeMessageVisibilityBatchResultEntry) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ChangeMessageVisibilityBatchResultEntry) GoString() string { + return s.String() +} + +type ChangeMessageVisibilityInput struct { + _ struct{} `type:"structure"` + + // The URL of the Amazon SQS queue to take action on. + // + // Queue URLs are case-sensitive. + QueueUrl *string `type:"string" required:"true"` + + // The receipt handle associated with the message whose visibility timeout should + // be changed. This parameter is returned by the ReceiveMessage action. + ReceiptHandle *string `type:"string" required:"true"` + + // The new value (in seconds - from 0 to 43200 - maximum 12 hours) for the message's + // visibility timeout. + VisibilityTimeout *int64 `type:"integer" required:"true"` +} + +// String returns the string representation +func (s ChangeMessageVisibilityInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ChangeMessageVisibilityInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ChangeMessageVisibilityInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ChangeMessageVisibilityInput"} + if s.QueueUrl == nil { + invalidParams.Add(request.NewErrParamRequired("QueueUrl")) + } + if s.ReceiptHandle == nil { + invalidParams.Add(request.NewErrParamRequired("ReceiptHandle")) + } + if s.VisibilityTimeout == nil { + invalidParams.Add(request.NewErrParamRequired("VisibilityTimeout")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type ChangeMessageVisibilityOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ChangeMessageVisibilityOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ChangeMessageVisibilityOutput) GoString() string { + return s.String() +} + +type CreateQueueInput struct { + _ struct{} `type:"structure"` + + // A map of attributes with their corresponding values. + // + // The following lists the names, descriptions, and values of the special request + // parameters the CreateQueue action uses: + // + // DelaySeconds - The time in seconds that the delivery of all messages in + // the queue will be delayed. An integer from 0 to 900 (15 minutes). The default + // for this attribute is 0 (zero). + // + // MaximumMessageSize - The limit of how many bytes a message can contain before + // Amazon SQS rejects it. An integer from 1024 bytes (1 KiB) up to 262144 bytes + // (256 KiB). The default for this attribute is 262144 (256 KiB). + // + // MessageRetentionPeriod - The number of seconds Amazon SQS retains a message. + // Integer representing seconds, from 60 (1 minute) to 1209600 (14 days). The + // default for this attribute is 345600 (4 days). + // + // Policy - The queue's policy. A valid AWS policy. For more information about + // policy structure, see Overview of AWS IAM Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/PoliciesOverview.html) + // in the Amazon IAM User Guide. + // + // ReceiveMessageWaitTimeSeconds - The time for which a ReceiveMessage call + // will wait for a message to arrive. An integer from 0 to 20 (seconds). The + // default for this attribute is 0. + // + // RedrivePolicy - The parameters for dead letter queue functionality of the + // source queue. For more information about RedrivePolicy and dead letter queues, + // see Using Amazon SQS Dead Letter Queues (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/SQSDeadLetterQueue.html) + // in the Amazon SQS Developer Guide. + // + // VisibilityTimeout - The visibility timeout for the queue. An integer from + // 0 to 43200 (12 hours). The default for this attribute is 30. For more information + // about visibility timeout, see Visibility Timeout (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/AboutVT.html) + // in the Amazon SQS Developer Guide. + // + // Any other valid special request parameters that are specified (such as + // ApproximateNumberOfMessages, ApproximateNumberOfMessagesDelayed, ApproximateNumberOfMessagesNotVisible, + // CreatedTimestamp, LastModifiedTimestamp, and QueueArn) will be ignored. + Attributes map[string]*string `locationName:"Attribute" locationNameKey:"Name" locationNameValue:"Value" type:"map" flattened:"true"` + + // The name for the queue to be created. + // + // Queue names are case-sensitive. + QueueName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateQueueInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateQueueInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateQueueInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateQueueInput"} + if s.QueueName == nil { + invalidParams.Add(request.NewErrParamRequired("QueueName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Returns the QueueUrl element of the created queue. +type CreateQueueOutput struct { + _ struct{} `type:"structure"` + + // The URL for the created Amazon SQS queue. + QueueUrl *string `type:"string"` +} + +// String returns the string representation +func (s CreateQueueOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateQueueOutput) GoString() string { + return s.String() +} + +type DeleteMessageBatchInput struct { + _ struct{} `type:"structure"` + + // A list of receipt handles for the messages to be deleted. + Entries []*DeleteMessageBatchRequestEntry `locationNameList:"DeleteMessageBatchRequestEntry" type:"list" flattened:"true" required:"true"` + + // The URL of the Amazon SQS queue to take action on. + // + // Queue URLs are case-sensitive. + QueueUrl *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteMessageBatchInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteMessageBatchInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteMessageBatchInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteMessageBatchInput"} + if s.Entries == nil { + invalidParams.Add(request.NewErrParamRequired("Entries")) + } + if s.QueueUrl == nil { + invalidParams.Add(request.NewErrParamRequired("QueueUrl")) + } + if s.Entries != nil { + for i, v := range s.Entries { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Entries", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// For each message in the batch, the response contains a DeleteMessageBatchResultEntry +// tag if the message is deleted or a BatchResultErrorEntry tag if the message +// cannot be deleted. +type DeleteMessageBatchOutput struct { + _ struct{} `type:"structure"` + + // A list of BatchResultErrorEntry items. + Failed []*BatchResultErrorEntry `locationNameList:"BatchResultErrorEntry" type:"list" flattened:"true" required:"true"` + + // A list of DeleteMessageBatchResultEntry items. + Successful []*DeleteMessageBatchResultEntry `locationNameList:"DeleteMessageBatchResultEntry" type:"list" flattened:"true" required:"true"` +} + +// String returns the string representation +func (s DeleteMessageBatchOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteMessageBatchOutput) GoString() string { + return s.String() +} + +// Encloses a receipt handle and an identifier for it. +type DeleteMessageBatchRequestEntry struct { + _ struct{} `type:"structure"` + + // An identifier for this particular receipt handle. This is used to communicate + // the result. Note that the Ids of a batch request need to be unique within + // the request. + Id *string `type:"string" required:"true"` + + // A receipt handle. + ReceiptHandle *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteMessageBatchRequestEntry) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteMessageBatchRequestEntry) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteMessageBatchRequestEntry) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteMessageBatchRequestEntry"} + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + if s.ReceiptHandle == nil { + invalidParams.Add(request.NewErrParamRequired("ReceiptHandle")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Encloses the id an entry in DeleteMessageBatch. +type DeleteMessageBatchResultEntry struct { + _ struct{} `type:"structure"` + + // Represents a successfully deleted message. + Id *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteMessageBatchResultEntry) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteMessageBatchResultEntry) GoString() string { + return s.String() +} + +type DeleteMessageInput struct { + _ struct{} `type:"structure"` + + // The URL of the Amazon SQS queue to take action on. + // + // Queue URLs are case-sensitive. + QueueUrl *string `type:"string" required:"true"` + + // The receipt handle associated with the message to delete. + ReceiptHandle *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteMessageInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteMessageInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteMessageInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteMessageInput"} + if s.QueueUrl == nil { + invalidParams.Add(request.NewErrParamRequired("QueueUrl")) + } + if s.ReceiptHandle == nil { + invalidParams.Add(request.NewErrParamRequired("ReceiptHandle")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteMessageOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteMessageOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteMessageOutput) GoString() string { + return s.String() +} + +type DeleteQueueInput struct { + _ struct{} `type:"structure"` + + // The URL of the Amazon SQS queue to take action on. + // + // Queue URLs are case-sensitive. + QueueUrl *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteQueueInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteQueueInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteQueueInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteQueueInput"} + if s.QueueUrl == nil { + invalidParams.Add(request.NewErrParamRequired("QueueUrl")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteQueueOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteQueueOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteQueueOutput) GoString() string { + return s.String() +} + +type GetQueueAttributesInput struct { + _ struct{} `type:"structure"` + + // A list of attributes to retrieve information for. The following attributes + // are supported: + // + // All - returns all values. + // + // ApproximateNumberOfMessages - returns the approximate number of visible + // messages in a queue. For more information, see Resources Required to Process + // Messages (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/ApproximateNumber.html) + // in the Amazon SQS Developer Guide. + // + // ApproximateNumberOfMessagesNotVisible - returns the approximate number of + // messages that are not timed-out and not deleted. For more information, see + // Resources Required to Process Messages (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/ApproximateNumber.html) + // in the Amazon SQS Developer Guide. + // + // VisibilityTimeout - returns the visibility timeout for the queue. For more + // information about visibility timeout, see Visibility Timeout (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/AboutVT.html) + // in the Amazon SQS Developer Guide. + // + // CreatedTimestamp - returns the time when the queue was created (epoch time + // in seconds). + // + // LastModifiedTimestamp - returns the time when the queue was last changed + // (epoch time in seconds). + // + // Policy - returns the queue's policy. + // + // MaximumMessageSize - returns the limit of how many bytes a message can contain + // before Amazon SQS rejects it. + // + // MessageRetentionPeriod - returns the number of seconds Amazon SQS retains + // a message. + // + // QueueArn - returns the queue's Amazon resource name (ARN). + // + // ApproximateNumberOfMessagesDelayed - returns the approximate number of messages + // that are pending to be added to the queue. + // + // DelaySeconds - returns the default delay on the queue in seconds. + // + // ReceiveMessageWaitTimeSeconds - returns the time for which a ReceiveMessage + // call will wait for a message to arrive. + // + // RedrivePolicy - returns the parameters for dead letter queue functionality + // of the source queue. For more information about RedrivePolicy and dead letter + // queues, see Using Amazon SQS Dead Letter Queues (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/SQSDeadLetterQueue.html) + // in the Amazon SQS Developer Guide. + // + // Going forward, new attributes might be added. If you are writing code that + // calls this action, we recommend that you structure your code so that it can + // handle new attributes gracefully. + AttributeNames []*string `locationNameList:"AttributeName" type:"list" flattened:"true"` + + // The URL of the Amazon SQS queue to take action on. + // + // Queue URLs are case-sensitive. + QueueUrl *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s GetQueueAttributesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetQueueAttributesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetQueueAttributesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetQueueAttributesInput"} + if s.QueueUrl == nil { + invalidParams.Add(request.NewErrParamRequired("QueueUrl")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// A list of returned queue attributes. +type GetQueueAttributesOutput struct { + _ struct{} `type:"structure"` + + // A map of attributes to the respective values. + Attributes map[string]*string `locationName:"Attribute" locationNameKey:"Name" locationNameValue:"Value" type:"map" flattened:"true"` +} + +// String returns the string representation +func (s GetQueueAttributesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetQueueAttributesOutput) GoString() string { + return s.String() +} + +type GetQueueUrlInput struct { + _ struct{} `type:"structure"` + + // The name of the queue whose URL must be fetched. Maximum 80 characters; alphanumeric + // characters, hyphens (-), and underscores (_) are allowed. + // + // Queue names are case-sensitive. + QueueName *string `type:"string" required:"true"` + + // The AWS account ID of the account that created the queue. + QueueOwnerAWSAccountId *string `type:"string"` +} + +// String returns the string representation +func (s GetQueueUrlInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetQueueUrlInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetQueueUrlInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetQueueUrlInput"} + if s.QueueName == nil { + invalidParams.Add(request.NewErrParamRequired("QueueName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// For more information, see Responses (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/UnderstandingResponses.html) +// in the Amazon SQS Developer Guide. +type GetQueueUrlOutput struct { + _ struct{} `type:"structure"` + + // The URL for the queue. + QueueUrl *string `type:"string"` +} + +// String returns the string representation +func (s GetQueueUrlOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetQueueUrlOutput) GoString() string { + return s.String() +} + +type ListDeadLetterSourceQueuesInput struct { + _ struct{} `type:"structure"` + + // The queue URL of a dead letter queue. + // + // Queue URLs are case-sensitive. + QueueUrl *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ListDeadLetterSourceQueuesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListDeadLetterSourceQueuesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListDeadLetterSourceQueuesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListDeadLetterSourceQueuesInput"} + if s.QueueUrl == nil { + invalidParams.Add(request.NewErrParamRequired("QueueUrl")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// A list of your dead letter source queues. +type ListDeadLetterSourceQueuesOutput struct { + _ struct{} `type:"structure"` + + // A list of source queue URLs that have the RedrivePolicy queue attribute configured + // with a dead letter queue. + QueueUrls []*string `locationName:"queueUrls" locationNameList:"QueueUrl" type:"list" flattened:"true" required:"true"` +} + +// String returns the string representation +func (s ListDeadLetterSourceQueuesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListDeadLetterSourceQueuesOutput) GoString() string { + return s.String() +} + +type ListQueuesInput struct { + _ struct{} `type:"structure"` + + // A string to use for filtering the list results. Only those queues whose name + // begins with the specified string are returned. + // + // Queue names are case-sensitive. + QueueNamePrefix *string `type:"string"` +} + +// String returns the string representation +func (s ListQueuesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListQueuesInput) GoString() string { + return s.String() +} + +// A list of your queues. +type ListQueuesOutput struct { + _ struct{} `type:"structure"` + + // A list of queue URLs, up to 1000 entries. + QueueUrls []*string `locationNameList:"QueueUrl" type:"list" flattened:"true"` +} + +// String returns the string representation +func (s ListQueuesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListQueuesOutput) GoString() string { + return s.String() +} + +// An Amazon SQS message. +type Message struct { + _ struct{} `type:"structure"` + + // SenderId, SentTimestamp, ApproximateReceiveCount, and/or ApproximateFirstReceiveTimestamp. + // SentTimestamp and ApproximateFirstReceiveTimestamp are each returned as an + // integer representing the epoch time (http://en.wikipedia.org/wiki/Unix_time) + // in milliseconds. + Attributes map[string]*string `locationName:"Attribute" locationNameKey:"Name" locationNameValue:"Value" type:"map" flattened:"true"` + + // The message's contents (not URL-encoded). + Body *string `type:"string"` + + // An MD5 digest of the non-URL-encoded message body string. + MD5OfBody *string `type:"string"` + + // An MD5 digest of the non-URL-encoded message attribute string. This can be + // used to verify that Amazon SQS received the message correctly. Amazon SQS + // first URL decodes the message before creating the MD5 digest. For information + // about MD5, go to http://www.faqs.org/rfcs/rfc1321.html (http://www.faqs.org/rfcs/rfc1321.html). + MD5OfMessageAttributes *string `type:"string"` + + // Each message attribute consists of a Name, Type, and Value. For more information, + // see Message Attribute Items (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/SQSMessageAttributes.html#SQSMessageAttributesNTV). + MessageAttributes map[string]*MessageAttributeValue `locationName:"MessageAttribute" locationNameKey:"Name" locationNameValue:"Value" type:"map" flattened:"true"` + + // A unique identifier for the message. Message IDs are considered unique across + // all AWS accounts for an extended period of time. + MessageId *string `type:"string"` + + // An identifier associated with the act of receiving the message. A new receipt + // handle is returned every time you receive a message. When deleting a message, + // you provide the last received receipt handle to delete the message. + ReceiptHandle *string `type:"string"` +} + +// String returns the string representation +func (s Message) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Message) GoString() string { + return s.String() +} + +// The user-specified message attribute value. For string data types, the value +// attribute has the same restrictions on the content as the message body. For +// more information, see SendMessage (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/APIReference/API_SendMessage.html). +// +// Name, type, and value must not be empty or null. In addition, the message +// body should not be empty or null. All parts of the message attribute, including +// name, type, and value, are included in the message size restriction, which +// is currently 256 KB (262,144 bytes). +type MessageAttributeValue struct { + _ struct{} `type:"structure"` + + // Not implemented. Reserved for future use. + BinaryListValues [][]byte `locationName:"BinaryListValue" locationNameList:"BinaryListValue" type:"list" flattened:"true"` + + // Binary type attributes can store any binary data, for example, compressed + // data, encrypted data, or images. + // + // BinaryValue is automatically base64 encoded/decoded by the SDK. + BinaryValue []byte `type:"blob"` + + // Amazon SQS supports the following logical data types: String, Number, and + // Binary. For the Number data type, you must use StringValue. + // + // You can also append custom labels. For more information, see Message Attribute + // Data Types (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/SQSMessageAttributes.html#SQSMessageAttributes.DataTypes). + DataType *string `type:"string" required:"true"` + + // Not implemented. Reserved for future use. + StringListValues []*string `locationName:"StringListValue" locationNameList:"StringListValue" type:"list" flattened:"true"` + + // Strings are Unicode with UTF8 binary encoding. For a list of code values, + // see http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters (http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters). + StringValue *string `type:"string"` +} + +// String returns the string representation +func (s MessageAttributeValue) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MessageAttributeValue) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *MessageAttributeValue) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "MessageAttributeValue"} + if s.DataType == nil { + invalidParams.Add(request.NewErrParamRequired("DataType")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type PurgeQueueInput struct { + _ struct{} `type:"structure"` + + // The queue URL of the queue to delete the messages from when using the PurgeQueue + // API. + // + // Queue URLs are case-sensitive. + QueueUrl *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s PurgeQueueInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PurgeQueueInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PurgeQueueInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PurgeQueueInput"} + if s.QueueUrl == nil { + invalidParams.Add(request.NewErrParamRequired("QueueUrl")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type PurgeQueueOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PurgeQueueOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PurgeQueueOutput) GoString() string { + return s.String() +} + +type ReceiveMessageInput struct { + _ struct{} `type:"structure"` + + // A list of attributes that need to be returned along with each message. These + // attributes include: + // + // All - returns all values. + // + // ApproximateFirstReceiveTimestamp - returns the time when the message was + // first received from the queue (epoch time in milliseconds). + // + // ApproximateReceiveCount - returns the number of times a message has been + // received from the queue but not deleted. + // + // SenderId - returns the AWS account number (or the IP address, if anonymous + // access is allowed) of the sender. + // + // SentTimestamp - returns the time when the message was sent to the queue + // (epoch time in milliseconds). + // + // Any other valid special request parameters that are specified (such as + // ApproximateNumberOfMessages, ApproximateNumberOfMessagesDelayed, ApproximateNumberOfMessagesNotVisible, + // CreatedTimestamp, DelaySeconds, LastModifiedTimestamp, MaximumMessageSize, + // MessageRetentionPeriod, Policy, QueueArn, ReceiveMessageWaitTimeSeconds, + // RedrivePolicy, and VisibilityTimeout) will be ignored. + AttributeNames []*string `locationNameList:"AttributeName" type:"list" flattened:"true"` + + // The maximum number of messages to return. Amazon SQS never returns more messages + // than this value but may return fewer. Values can be from 1 to 10. Default + // is 1. + // + // All of the messages are not necessarily returned. + MaxNumberOfMessages *int64 `type:"integer"` + + // The name of the message attribute, where N is the index. The message attribute + // name can contain the following characters: A-Z, a-z, 0-9, underscore (_), + // hyphen (-), and period (.). The name must not start or end with a period, + // and it should not have successive periods. The name is case sensitive and + // must be unique among all attribute names for the message. The name can be + // up to 256 characters long. The name cannot start with "AWS." or "Amazon." + // (or any variations in casing), because these prefixes are reserved for use + // by Amazon Web Services. + // + // When using ReceiveMessage, you can send a list of attribute names to receive, + // or you can return all of the attributes by specifying "All" or ".*" in your + // request. You can also use "bar.*" to return all message attributes starting + // with the "bar" prefix. + MessageAttributeNames []*string `locationNameList:"MessageAttributeName" type:"list" flattened:"true"` + + // The URL of the Amazon SQS queue to take action on. + // + // Queue URLs are case-sensitive. + QueueUrl *string `type:"string" required:"true"` + + // The duration (in seconds) that the received messages are hidden from subsequent + // retrieve requests after being retrieved by a ReceiveMessage request. + VisibilityTimeout *int64 `type:"integer"` + + // The duration (in seconds) for which the call will wait for a message to arrive + // in the queue before returning. If a message is available, the call will return + // sooner than WaitTimeSeconds. + WaitTimeSeconds *int64 `type:"integer"` +} + +// String returns the string representation +func (s ReceiveMessageInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReceiveMessageInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ReceiveMessageInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ReceiveMessageInput"} + if s.QueueUrl == nil { + invalidParams.Add(request.NewErrParamRequired("QueueUrl")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// A list of received messages. +type ReceiveMessageOutput struct { + _ struct{} `type:"structure"` + + // A list of messages. + Messages []*Message `locationNameList:"Message" type:"list" flattened:"true"` +} + +// String returns the string representation +func (s ReceiveMessageOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReceiveMessageOutput) GoString() string { + return s.String() +} + +type RemovePermissionInput struct { + _ struct{} `type:"structure"` + + // The identification of the permission to remove. This is the label added with + // the AddPermission action. + Label *string `type:"string" required:"true"` + + // The URL of the Amazon SQS queue to take action on. + // + // Queue URLs are case-sensitive. + QueueUrl *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s RemovePermissionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RemovePermissionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RemovePermissionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RemovePermissionInput"} + if s.Label == nil { + invalidParams.Add(request.NewErrParamRequired("Label")) + } + if s.QueueUrl == nil { + invalidParams.Add(request.NewErrParamRequired("QueueUrl")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type RemovePermissionOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s RemovePermissionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RemovePermissionOutput) GoString() string { + return s.String() +} + +type SendMessageBatchInput struct { + _ struct{} `type:"structure"` + + // A list of SendMessageBatchRequestEntry items. + Entries []*SendMessageBatchRequestEntry `locationNameList:"SendMessageBatchRequestEntry" type:"list" flattened:"true" required:"true"` + + // The URL of the Amazon SQS queue to take action on. + // + // Queue URLs are case-sensitive. + QueueUrl *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s SendMessageBatchInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SendMessageBatchInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SendMessageBatchInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SendMessageBatchInput"} + if s.Entries == nil { + invalidParams.Add(request.NewErrParamRequired("Entries")) + } + if s.QueueUrl == nil { + invalidParams.Add(request.NewErrParamRequired("QueueUrl")) + } + if s.Entries != nil { + for i, v := range s.Entries { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Entries", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// For each message in the batch, the response contains a SendMessageBatchResultEntry +// tag if the message succeeds or a BatchResultErrorEntry tag if the message +// fails. +type SendMessageBatchOutput struct { + _ struct{} `type:"structure"` + + // A list of BatchResultErrorEntry items with the error detail about each message + // that could not be enqueued. + Failed []*BatchResultErrorEntry `locationNameList:"BatchResultErrorEntry" type:"list" flattened:"true" required:"true"` + + // A list of SendMessageBatchResultEntry items. + Successful []*SendMessageBatchResultEntry `locationNameList:"SendMessageBatchResultEntry" type:"list" flattened:"true" required:"true"` +} + +// String returns the string representation +func (s SendMessageBatchOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SendMessageBatchOutput) GoString() string { + return s.String() +} + +// Contains the details of a single Amazon SQS message along with a Id. +type SendMessageBatchRequestEntry struct { + _ struct{} `type:"structure"` + + // The number of seconds for which the message has to be delayed. + DelaySeconds *int64 `type:"integer"` + + // An identifier for the message in this batch. This is used to communicate + // the result. Note that the Ids of a batch request need to be unique within + // the request. + Id *string `type:"string" required:"true"` + + // Each message attribute consists of a Name, Type, and Value. For more information, + // see Message Attribute Items (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/SQSMessageAttributes.html#SQSMessageAttributesNTV). + MessageAttributes map[string]*MessageAttributeValue `locationName:"MessageAttribute" locationNameKey:"Name" locationNameValue:"Value" type:"map" flattened:"true"` + + // Body of the message. + MessageBody *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s SendMessageBatchRequestEntry) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SendMessageBatchRequestEntry) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SendMessageBatchRequestEntry) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SendMessageBatchRequestEntry"} + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + if s.MessageBody == nil { + invalidParams.Add(request.NewErrParamRequired("MessageBody")) + } + if s.MessageAttributes != nil { + for i, v := range s.MessageAttributes { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "MessageAttributes", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Encloses a message ID for successfully enqueued message of a SendMessageBatch. +type SendMessageBatchResultEntry struct { + _ struct{} `type:"structure"` + + // An identifier for the message in this batch. + Id *string `type:"string" required:"true"` + + // An MD5 digest of the non-URL-encoded message attribute string. This can be + // used to verify that Amazon SQS received the message batch correctly. Amazon + // SQS first URL decodes the message before creating the MD5 digest. For information + // about MD5, go to http://www.faqs.org/rfcs/rfc1321.html (http://www.faqs.org/rfcs/rfc1321.html). + MD5OfMessageAttributes *string `type:"string"` + + // An MD5 digest of the non-URL-encoded message body string. This can be used + // to verify that Amazon SQS received the message correctly. Amazon SQS first + // URL decodes the message before creating the MD5 digest. For information about + // MD5, go to http://www.faqs.org/rfcs/rfc1321.html (http://www.faqs.org/rfcs/rfc1321.html). + MD5OfMessageBody *string `type:"string" required:"true"` + + // An identifier for the message. + MessageId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s SendMessageBatchResultEntry) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SendMessageBatchResultEntry) GoString() string { + return s.String() +} + +type SendMessageInput struct { + _ struct{} `type:"structure"` + + // The number of seconds (0 to 900 - 15 minutes) to delay a specific message. + // Messages with a positive DelaySeconds value become available for processing + // after the delay time is finished. If you don't specify a value, the default + // value for the queue applies. + DelaySeconds *int64 `type:"integer"` + + // Each message attribute consists of a Name, Type, and Value. For more information, + // see Message Attribute Items (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/SQSMessageAttributes.html#SQSMessageAttributesNTV). + MessageAttributes map[string]*MessageAttributeValue `locationName:"MessageAttribute" locationNameKey:"Name" locationNameValue:"Value" type:"map" flattened:"true"` + + // The message to send. String maximum 256 KB in size. For a list of allowed + // characters, see the preceding important note. + MessageBody *string `type:"string" required:"true"` + + // The URL of the Amazon SQS queue to take action on. + // + // Queue URLs are case-sensitive. + QueueUrl *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s SendMessageInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SendMessageInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SendMessageInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SendMessageInput"} + if s.MessageBody == nil { + invalidParams.Add(request.NewErrParamRequired("MessageBody")) + } + if s.QueueUrl == nil { + invalidParams.Add(request.NewErrParamRequired("QueueUrl")) + } + if s.MessageAttributes != nil { + for i, v := range s.MessageAttributes { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "MessageAttributes", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The MD5OfMessageBody and MessageId elements. +type SendMessageOutput struct { + _ struct{} `type:"structure"` + + // An MD5 digest of the non-URL-encoded message attribute string. This can be + // used to verify that Amazon SQS received the message correctly. Amazon SQS + // first URL decodes the message before creating the MD5 digest. For information + // about MD5, go to http://www.faqs.org/rfcs/rfc1321.html (http://www.faqs.org/rfcs/rfc1321.html). + MD5OfMessageAttributes *string `type:"string"` + + // An MD5 digest of the non-URL-encoded message body string. This can be used + // to verify that Amazon SQS received the message correctly. Amazon SQS first + // URL decodes the message before creating the MD5 digest. For information about + // MD5, go to http://www.faqs.org/rfcs/rfc1321.html (http://www.faqs.org/rfcs/rfc1321.html). + MD5OfMessageBody *string `type:"string"` + + // An element containing the message ID of the message sent to the queue. For + // more information, see Queue and Message Identifiers (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/ImportantIdentifiers.html) + // in the Amazon SQS Developer Guide. + MessageId *string `type:"string"` +} + +// String returns the string representation +func (s SendMessageOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SendMessageOutput) GoString() string { + return s.String() +} + +type SetQueueAttributesInput struct { + _ struct{} `type:"structure"` + + // A map of attributes to set. + // + // The following lists the names, descriptions, and values of the special request + // parameters the SetQueueAttributes action uses: + // + // DelaySeconds - The time in seconds that the delivery of all messages in + // the queue will be delayed. An integer from 0 to 900 (15 minutes). The default + // for this attribute is 0 (zero). + // + // MaximumMessageSize - The limit of how many bytes a message can contain before + // Amazon SQS rejects it. An integer from 1024 bytes (1 KiB) up to 262144 bytes + // (256 KiB). The default for this attribute is 262144 (256 KiB). + // + // MessageRetentionPeriod - The number of seconds Amazon SQS retains a message. + // Integer representing seconds, from 60 (1 minute) to 1209600 (14 days). The + // default for this attribute is 345600 (4 days). + // + // Policy - The queue's policy. A valid AWS policy. For more information about + // policy structure, see Overview of AWS IAM Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/PoliciesOverview.html) + // in the Amazon IAM User Guide. + // + // ReceiveMessageWaitTimeSeconds - The time for which a ReceiveMessage call + // will wait for a message to arrive. An integer from 0 to 20 (seconds). The + // default for this attribute is 0. + // + // VisibilityTimeout - The visibility timeout for the queue. An integer from + // 0 to 43200 (12 hours). The default for this attribute is 30. For more information + // about visibility timeout, see Visibility Timeout in the Amazon SQS Developer + // Guide. + // + // RedrivePolicy - The parameters for dead letter queue functionality of the + // source queue. For more information about RedrivePolicy and dead letter queues, + // see Using Amazon SQS Dead Letter Queues in the Amazon SQS Developer Guide. + // + // Any other valid special request parameters that are specified (such as + // ApproximateNumberOfMessages, ApproximateNumberOfMessagesDelayed, ApproximateNumberOfMessagesNotVisible, + // CreatedTimestamp, LastModifiedTimestamp, and QueueArn) will be ignored. + Attributes map[string]*string `locationName:"Attribute" locationNameKey:"Name" locationNameValue:"Value" type:"map" flattened:"true" required:"true"` + + // The URL of the Amazon SQS queue to take action on. + // + // Queue URLs are case-sensitive. + QueueUrl *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s SetQueueAttributesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetQueueAttributesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SetQueueAttributesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SetQueueAttributesInput"} + if s.Attributes == nil { + invalidParams.Add(request.NewErrParamRequired("Attributes")) + } + if s.QueueUrl == nil { + invalidParams.Add(request.NewErrParamRequired("QueueUrl")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type SetQueueAttributesOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s SetQueueAttributesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetQueueAttributesOutput) GoString() string { + return s.String() +} + +const ( + // @enum QueueAttributeName + QueueAttributeNamePolicy = "Policy" + // @enum QueueAttributeName + QueueAttributeNameVisibilityTimeout = "VisibilityTimeout" + // @enum QueueAttributeName + QueueAttributeNameMaximumMessageSize = "MaximumMessageSize" + // @enum QueueAttributeName + QueueAttributeNameMessageRetentionPeriod = "MessageRetentionPeriod" + // @enum QueueAttributeName + QueueAttributeNameApproximateNumberOfMessages = "ApproximateNumberOfMessages" + // @enum QueueAttributeName + QueueAttributeNameApproximateNumberOfMessagesNotVisible = "ApproximateNumberOfMessagesNotVisible" + // @enum QueueAttributeName + QueueAttributeNameCreatedTimestamp = "CreatedTimestamp" + // @enum QueueAttributeName + QueueAttributeNameLastModifiedTimestamp = "LastModifiedTimestamp" + // @enum QueueAttributeName + QueueAttributeNameQueueArn = "QueueArn" + // @enum QueueAttributeName + QueueAttributeNameApproximateNumberOfMessagesDelayed = "ApproximateNumberOfMessagesDelayed" + // @enum QueueAttributeName + QueueAttributeNameDelaySeconds = "DelaySeconds" + // @enum QueueAttributeName + QueueAttributeNameReceiveMessageWaitTimeSeconds = "ReceiveMessageWaitTimeSeconds" + // @enum QueueAttributeName + QueueAttributeNameRedrivePolicy = "RedrivePolicy" +) diff --git a/vendor/github.com/aws/aws-sdk-go/service/sqs/api_test.go b/vendor/github.com/aws/aws-sdk-go/service/sqs/api_test.go new file mode 100644 index 000000000..3f5517e60 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/sqs/api_test.go @@ -0,0 +1,30 @@ +// +build integration + +package sqs_test + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/sqs" +) + +func TestFlattenedTraits(t *testing.T) { + s := sqs.New(session.New()) + _, err := s.DeleteMessageBatch(&sqs.DeleteMessageBatchInput{ + QueueURL: aws.String("QUEUE"), + Entries: []*sqs.DeleteMessageBatchRequestEntry{ + { + ID: aws.String("TEST"), + ReceiptHandle: aws.String("RECEIPT"), + }, + }, + }) + + assert.Error(t, err) + assert.Equal(t, "InvalidAddress", err.Code()) + assert.Equal(t, "The address QUEUE is not valid for this endpoint.", err.Message()) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/sqs/checksums.go b/vendor/github.com/aws/aws-sdk-go/service/sqs/checksums.go new file mode 100644 index 000000000..5dd17c4d9 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/sqs/checksums.go @@ -0,0 +1,115 @@ +package sqs + +import ( + "crypto/md5" + "encoding/hex" + "fmt" + "strings" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" +) + +var ( + errChecksumMissingBody = fmt.Errorf("cannot compute checksum. missing body") + errChecksumMissingMD5 = fmt.Errorf("cannot verify checksum. missing response MD5") +) + +func setupChecksumValidation(r *request.Request) { + if aws.BoolValue(r.Config.DisableComputeChecksums) { + return + } + + switch r.Operation.Name { + case opSendMessage: + r.Handlers.Unmarshal.PushBack(verifySendMessage) + case opSendMessageBatch: + r.Handlers.Unmarshal.PushBack(verifySendMessageBatch) + case opReceiveMessage: + r.Handlers.Unmarshal.PushBack(verifyReceiveMessage) + } +} + +func verifySendMessage(r *request.Request) { + if r.DataFilled() && r.ParamsFilled() { + in := r.Params.(*SendMessageInput) + out := r.Data.(*SendMessageOutput) + err := checksumsMatch(in.MessageBody, out.MD5OfMessageBody) + if err != nil { + setChecksumError(r, err.Error()) + } + } +} + +func verifySendMessageBatch(r *request.Request) { + if r.DataFilled() && r.ParamsFilled() { + entries := map[string]*SendMessageBatchResultEntry{} + ids := []string{} + + out := r.Data.(*SendMessageBatchOutput) + for _, entry := range out.Successful { + entries[*entry.Id] = entry + } + + in := r.Params.(*SendMessageBatchInput) + for _, entry := range in.Entries { + if e := entries[*entry.Id]; e != nil { + err := checksumsMatch(entry.MessageBody, e.MD5OfMessageBody) + if err != nil { + ids = append(ids, *e.MessageId) + } + } + } + if len(ids) > 0 { + setChecksumError(r, "invalid messages: %s", strings.Join(ids, ", ")) + } + } +} + +func verifyReceiveMessage(r *request.Request) { + if r.DataFilled() && r.ParamsFilled() { + ids := []string{} + out := r.Data.(*ReceiveMessageOutput) + for i, msg := range out.Messages { + err := checksumsMatch(msg.Body, msg.MD5OfBody) + if err != nil { + if msg.MessageId == nil { + if r.Config.Logger != nil { + r.Config.Logger.Log(fmt.Sprintf( + "WARN: SQS.ReceiveMessage failed checksum request id: %s, message %d has no message ID.", + r.RequestID, i, + )) + } + continue + } + + ids = append(ids, *msg.MessageId) + } + } + if len(ids) > 0 { + setChecksumError(r, "invalid messages: %s", strings.Join(ids, ", ")) + } + } +} + +func checksumsMatch(body, expectedMD5 *string) error { + if body == nil { + return errChecksumMissingBody + } else if expectedMD5 == nil { + return errChecksumMissingMD5 + } + + msum := md5.Sum([]byte(*body)) + sum := hex.EncodeToString(msum[:]) + if sum != *expectedMD5 { + return fmt.Errorf("expected MD5 checksum '%s', got '%s'", *expectedMD5, sum) + } + + return nil +} + +func setChecksumError(r *request.Request, format string, args ...interface{}) { + r.Retryable = aws.Bool(true) + r.Error = awserr.New("InvalidChecksum", fmt.Sprintf(format, args...), nil) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/sqs/checksums_test.go b/vendor/github.com/aws/aws-sdk-go/service/sqs/checksums_test.go new file mode 100644 index 000000000..c7451c7b4 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/sqs/checksums_test.go @@ -0,0 +1,208 @@ +package sqs_test + +import ( + "bytes" + "io/ioutil" + "net/http" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/awstesting/unit" + "github.com/aws/aws-sdk-go/service/sqs" +) + +var svc = func() *sqs.SQS { + s := sqs.New(unit.Session, &aws.Config{ + DisableParamValidation: aws.Bool(true), + }) + s.Handlers.Send.Clear() + return s +}() + +func TestSendMessageChecksum(t *testing.T) { + req, _ := svc.SendMessageRequest(&sqs.SendMessageInput{ + MessageBody: aws.String("test"), + }) + req.Handlers.Send.PushBack(func(r *request.Request) { + body := ioutil.NopCloser(bytes.NewReader([]byte(""))) + r.HTTPResponse = &http.Response{StatusCode: 200, Body: body} + r.Data = &sqs.SendMessageOutput{ + MD5OfMessageBody: aws.String("098f6bcd4621d373cade4e832627b4f6"), + MessageId: aws.String("12345"), + } + }) + err := req.Send() + assert.NoError(t, err) +} + +func TestSendMessageChecksumInvalid(t *testing.T) { + req, _ := svc.SendMessageRequest(&sqs.SendMessageInput{ + MessageBody: aws.String("test"), + }) + req.Handlers.Send.PushBack(func(r *request.Request) { + body := ioutil.NopCloser(bytes.NewReader([]byte(""))) + r.HTTPResponse = &http.Response{StatusCode: 200, Body: body} + r.Data = &sqs.SendMessageOutput{ + MD5OfMessageBody: aws.String("000"), + MessageId: aws.String("12345"), + } + }) + err := req.Send() + assert.Error(t, err) + + assert.Equal(t, "InvalidChecksum", err.(awserr.Error).Code()) + assert.Contains(t, err.(awserr.Error).Message(), "expected MD5 checksum '000', got '098f6bcd4621d373cade4e832627b4f6'") +} + +func TestSendMessageChecksumInvalidNoValidation(t *testing.T) { + s := sqs.New(unit.Session, &aws.Config{ + DisableParamValidation: aws.Bool(true), + DisableComputeChecksums: aws.Bool(true), + }) + s.Handlers.Send.Clear() + + req, _ := s.SendMessageRequest(&sqs.SendMessageInput{ + MessageBody: aws.String("test"), + }) + req.Handlers.Send.PushBack(func(r *request.Request) { + body := ioutil.NopCloser(bytes.NewReader([]byte(""))) + r.HTTPResponse = &http.Response{StatusCode: 200, Body: body} + r.Data = &sqs.SendMessageOutput{ + MD5OfMessageBody: aws.String("000"), + MessageId: aws.String("12345"), + } + }) + err := req.Send() + assert.NoError(t, err) +} + +func TestSendMessageChecksumNoInput(t *testing.T) { + req, _ := svc.SendMessageRequest(&sqs.SendMessageInput{}) + req.Handlers.Send.PushBack(func(r *request.Request) { + body := ioutil.NopCloser(bytes.NewReader([]byte(""))) + r.HTTPResponse = &http.Response{StatusCode: 200, Body: body} + r.Data = &sqs.SendMessageOutput{} + }) + err := req.Send() + assert.Error(t, err) + + assert.Equal(t, "InvalidChecksum", err.(awserr.Error).Code()) + assert.Contains(t, err.(awserr.Error).Message(), "cannot compute checksum. missing body") +} + +func TestSendMessageChecksumNoOutput(t *testing.T) { + req, _ := svc.SendMessageRequest(&sqs.SendMessageInput{ + MessageBody: aws.String("test"), + }) + req.Handlers.Send.PushBack(func(r *request.Request) { + body := ioutil.NopCloser(bytes.NewReader([]byte(""))) + r.HTTPResponse = &http.Response{StatusCode: 200, Body: body} + r.Data = &sqs.SendMessageOutput{} + }) + err := req.Send() + assert.Error(t, err) + + assert.Equal(t, "InvalidChecksum", err.(awserr.Error).Code()) + assert.Contains(t, err.(awserr.Error).Message(), "cannot verify checksum. missing response MD5") +} + +func TestRecieveMessageChecksum(t *testing.T) { + req, _ := svc.ReceiveMessageRequest(&sqs.ReceiveMessageInput{}) + req.Handlers.Send.PushBack(func(r *request.Request) { + md5 := "098f6bcd4621d373cade4e832627b4f6" + body := ioutil.NopCloser(bytes.NewReader([]byte(""))) + r.HTTPResponse = &http.Response{StatusCode: 200, Body: body} + r.Data = &sqs.ReceiveMessageOutput{ + Messages: []*sqs.Message{ + {Body: aws.String("test"), MD5OfBody: &md5}, + {Body: aws.String("test"), MD5OfBody: &md5}, + {Body: aws.String("test"), MD5OfBody: &md5}, + {Body: aws.String("test"), MD5OfBody: &md5}, + }, + } + }) + err := req.Send() + assert.NoError(t, err) +} + +func TestRecieveMessageChecksumInvalid(t *testing.T) { + req, _ := svc.ReceiveMessageRequest(&sqs.ReceiveMessageInput{}) + req.Handlers.Send.PushBack(func(r *request.Request) { + md5 := "098f6bcd4621d373cade4e832627b4f6" + body := ioutil.NopCloser(bytes.NewReader([]byte(""))) + r.HTTPResponse = &http.Response{StatusCode: 200, Body: body} + r.Data = &sqs.ReceiveMessageOutput{ + Messages: []*sqs.Message{ + {Body: aws.String("test"), MD5OfBody: &md5}, + {Body: aws.String("test"), MD5OfBody: aws.String("000"), MessageId: aws.String("123")}, + {Body: aws.String("test"), MD5OfBody: aws.String("000"), MessageId: aws.String("456")}, + {Body: aws.String("test"), MD5OfBody: aws.String("000")}, + {Body: aws.String("test"), MD5OfBody: &md5}, + }, + } + }) + err := req.Send() + assert.Error(t, err) + + assert.Equal(t, "InvalidChecksum", err.(awserr.Error).Code()) + assert.Contains(t, err.(awserr.Error).Message(), "invalid messages: 123, 456") +} + +func TestSendMessageBatchChecksum(t *testing.T) { + req, _ := svc.SendMessageBatchRequest(&sqs.SendMessageBatchInput{ + Entries: []*sqs.SendMessageBatchRequestEntry{ + {Id: aws.String("1"), MessageBody: aws.String("test")}, + {Id: aws.String("2"), MessageBody: aws.String("test")}, + {Id: aws.String("3"), MessageBody: aws.String("test")}, + {Id: aws.String("4"), MessageBody: aws.String("test")}, + }, + }) + req.Handlers.Send.PushBack(func(r *request.Request) { + md5 := "098f6bcd4621d373cade4e832627b4f6" + body := ioutil.NopCloser(bytes.NewReader([]byte(""))) + r.HTTPResponse = &http.Response{StatusCode: 200, Body: body} + r.Data = &sqs.SendMessageBatchOutput{ + Successful: []*sqs.SendMessageBatchResultEntry{ + {MD5OfMessageBody: &md5, MessageId: aws.String("123"), Id: aws.String("1")}, + {MD5OfMessageBody: &md5, MessageId: aws.String("456"), Id: aws.String("2")}, + {MD5OfMessageBody: &md5, MessageId: aws.String("789"), Id: aws.String("3")}, + {MD5OfMessageBody: &md5, MessageId: aws.String("012"), Id: aws.String("4")}, + }, + } + }) + err := req.Send() + assert.NoError(t, err) +} + +func TestSendMessageBatchChecksumInvalid(t *testing.T) { + req, _ := svc.SendMessageBatchRequest(&sqs.SendMessageBatchInput{ + Entries: []*sqs.SendMessageBatchRequestEntry{ + {Id: aws.String("1"), MessageBody: aws.String("test")}, + {Id: aws.String("2"), MessageBody: aws.String("test")}, + {Id: aws.String("3"), MessageBody: aws.String("test")}, + {Id: aws.String("4"), MessageBody: aws.String("test")}, + }, + }) + req.Handlers.Send.PushBack(func(r *request.Request) { + md5 := "098f6bcd4621d373cade4e832627b4f6" + body := ioutil.NopCloser(bytes.NewReader([]byte(""))) + r.HTTPResponse = &http.Response{StatusCode: 200, Body: body} + r.Data = &sqs.SendMessageBatchOutput{ + Successful: []*sqs.SendMessageBatchResultEntry{ + {MD5OfMessageBody: &md5, MessageId: aws.String("123"), Id: aws.String("1")}, + {MD5OfMessageBody: aws.String("000"), MessageId: aws.String("456"), Id: aws.String("2")}, + {MD5OfMessageBody: aws.String("000"), MessageId: aws.String("789"), Id: aws.String("3")}, + {MD5OfMessageBody: &md5, MessageId: aws.String("012"), Id: aws.String("4")}, + }, + } + }) + err := req.Send() + assert.Error(t, err) + + assert.Equal(t, "InvalidChecksum", err.(awserr.Error).Code()) + assert.Contains(t, err.(awserr.Error).Message(), "invalid messages: 456, 789") +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/sqs/customizations.go b/vendor/github.com/aws/aws-sdk-go/service/sqs/customizations.go new file mode 100644 index 000000000..7498363de --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/sqs/customizations.go @@ -0,0 +1,9 @@ +package sqs + +import "github.com/aws/aws-sdk-go/aws/request" + +func init() { + initRequest = func(r *request.Request) { + setupChecksumValidation(r) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/sqs/examples_test.go b/vendor/github.com/aws/aws-sdk-go/service/sqs/examples_test.go new file mode 100644 index 000000000..2b8626773 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/sqs/examples_test.go @@ -0,0 +1,433 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package sqs_test + +import ( + "bytes" + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/sqs" +) + +var _ time.Duration +var _ bytes.Buffer + +func ExampleSQS_AddPermission() { + svc := sqs.New(session.New()) + + params := &sqs.AddPermissionInput{ + AWSAccountIds: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + Actions: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + Label: aws.String("String"), // Required + QueueUrl: aws.String("String"), // Required + } + resp, err := svc.AddPermission(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSQS_ChangeMessageVisibility() { + svc := sqs.New(session.New()) + + params := &sqs.ChangeMessageVisibilityInput{ + QueueUrl: aws.String("String"), // Required + ReceiptHandle: aws.String("String"), // Required + VisibilityTimeout: aws.Int64(1), // Required + } + resp, err := svc.ChangeMessageVisibility(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSQS_ChangeMessageVisibilityBatch() { + svc := sqs.New(session.New()) + + params := &sqs.ChangeMessageVisibilityBatchInput{ + Entries: []*sqs.ChangeMessageVisibilityBatchRequestEntry{ // Required + { // Required + Id: aws.String("String"), // Required + ReceiptHandle: aws.String("String"), // Required + VisibilityTimeout: aws.Int64(1), + }, + // More values... + }, + QueueUrl: aws.String("String"), // Required + } + resp, err := svc.ChangeMessageVisibilityBatch(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSQS_CreateQueue() { + svc := sqs.New(session.New()) + + params := &sqs.CreateQueueInput{ + QueueName: aws.String("String"), // Required + Attributes: map[string]*string{ + "Key": aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.CreateQueue(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSQS_DeleteMessage() { + svc := sqs.New(session.New()) + + params := &sqs.DeleteMessageInput{ + QueueUrl: aws.String("String"), // Required + ReceiptHandle: aws.String("String"), // Required + } + resp, err := svc.DeleteMessage(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSQS_DeleteMessageBatch() { + svc := sqs.New(session.New()) + + params := &sqs.DeleteMessageBatchInput{ + Entries: []*sqs.DeleteMessageBatchRequestEntry{ // Required + { // Required + Id: aws.String("String"), // Required + ReceiptHandle: aws.String("String"), // Required + }, + // More values... + }, + QueueUrl: aws.String("String"), // Required + } + resp, err := svc.DeleteMessageBatch(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSQS_DeleteQueue() { + svc := sqs.New(session.New()) + + params := &sqs.DeleteQueueInput{ + QueueUrl: aws.String("String"), // Required + } + resp, err := svc.DeleteQueue(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSQS_GetQueueAttributes() { + svc := sqs.New(session.New()) + + params := &sqs.GetQueueAttributesInput{ + QueueUrl: aws.String("String"), // Required + AttributeNames: []*string{ + aws.String("QueueAttributeName"), // Required + // More values... + }, + } + resp, err := svc.GetQueueAttributes(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSQS_GetQueueUrl() { + svc := sqs.New(session.New()) + + params := &sqs.GetQueueUrlInput{ + QueueName: aws.String("String"), // Required + QueueOwnerAWSAccountId: aws.String("String"), + } + resp, err := svc.GetQueueUrl(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSQS_ListDeadLetterSourceQueues() { + svc := sqs.New(session.New()) + + params := &sqs.ListDeadLetterSourceQueuesInput{ + QueueUrl: aws.String("String"), // Required + } + resp, err := svc.ListDeadLetterSourceQueues(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSQS_ListQueues() { + svc := sqs.New(session.New()) + + params := &sqs.ListQueuesInput{ + QueueNamePrefix: aws.String("String"), + } + resp, err := svc.ListQueues(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSQS_PurgeQueue() { + svc := sqs.New(session.New()) + + params := &sqs.PurgeQueueInput{ + QueueUrl: aws.String("String"), // Required + } + resp, err := svc.PurgeQueue(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSQS_ReceiveMessage() { + svc := sqs.New(session.New()) + + params := &sqs.ReceiveMessageInput{ + QueueUrl: aws.String("String"), // Required + AttributeNames: []*string{ + aws.String("QueueAttributeName"), // Required + // More values... + }, + MaxNumberOfMessages: aws.Int64(1), + MessageAttributeNames: []*string{ + aws.String("MessageAttributeName"), // Required + // More values... + }, + VisibilityTimeout: aws.Int64(1), + WaitTimeSeconds: aws.Int64(1), + } + resp, err := svc.ReceiveMessage(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSQS_RemovePermission() { + svc := sqs.New(session.New()) + + params := &sqs.RemovePermissionInput{ + Label: aws.String("String"), // Required + QueueUrl: aws.String("String"), // Required + } + resp, err := svc.RemovePermission(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSQS_SendMessage() { + svc := sqs.New(session.New()) + + params := &sqs.SendMessageInput{ + MessageBody: aws.String("String"), // Required + QueueUrl: aws.String("String"), // Required + DelaySeconds: aws.Int64(1), + MessageAttributes: map[string]*sqs.MessageAttributeValue{ + "Key": { // Required + DataType: aws.String("String"), // Required + BinaryListValues: [][]byte{ + []byte("PAYLOAD"), // Required + // More values... + }, + BinaryValue: []byte("PAYLOAD"), + StringListValues: []*string{ + aws.String("String"), // Required + // More values... + }, + StringValue: aws.String("String"), + }, + // More values... + }, + } + resp, err := svc.SendMessage(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSQS_SendMessageBatch() { + svc := sqs.New(session.New()) + + params := &sqs.SendMessageBatchInput{ + Entries: []*sqs.SendMessageBatchRequestEntry{ // Required + { // Required + Id: aws.String("String"), // Required + MessageBody: aws.String("String"), // Required + DelaySeconds: aws.Int64(1), + MessageAttributes: map[string]*sqs.MessageAttributeValue{ + "Key": { // Required + DataType: aws.String("String"), // Required + BinaryListValues: [][]byte{ + []byte("PAYLOAD"), // Required + // More values... + }, + BinaryValue: []byte("PAYLOAD"), + StringListValues: []*string{ + aws.String("String"), // Required + // More values... + }, + StringValue: aws.String("String"), + }, + // More values... + }, + }, + // More values... + }, + QueueUrl: aws.String("String"), // Required + } + resp, err := svc.SendMessageBatch(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSQS_SetQueueAttributes() { + svc := sqs.New(session.New()) + + params := &sqs.SetQueueAttributesInput{ + Attributes: map[string]*string{ // Required + "Key": aws.String("String"), // Required + // More values... + }, + QueueUrl: aws.String("String"), // Required + } + resp, err := svc.SetQueueAttributes(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/sqs/service.go b/vendor/github.com/aws/aws-sdk-go/service/sqs/service.go new file mode 100644 index 000000000..aaf456caa --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/sqs/service.go @@ -0,0 +1,119 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package sqs + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/private/protocol/query" +) + +// Welcome to the Amazon Simple Queue Service API Reference. This section describes +// who should read this guide, how the guide is organized, and other resources +// related to the Amazon Simple Queue Service (Amazon SQS). +// +// Amazon SQS offers reliable and scalable hosted queues for storing messages +// as they travel between computers. By using Amazon SQS, you can move data +// between distributed components of your applications that perform different +// tasks without losing messages or requiring each component to be always available. +// +// Helpful Links: +// +// Current WSDL (2012-11-05) (http://queue.amazonaws.com/doc/2012-11-05/QueueService.wsdl) +// +// Making API Requests (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/MakingRequestsArticle.html) +// +// Amazon SQS product page (http://aws.amazon.com/sqs/) +// +// Using Amazon SQS Message Attributes (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/SQSMessageAttributes.html) +// +// Using Amazon SQS Dead Letter Queues (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/SQSDeadLetterQueue.html) +// +// Regions and Endpoints (http://docs.aws.amazon.com/general/latest/gr/rande.html#sqs_region) +// +// We also provide SDKs that enable you to access Amazon SQS from your preferred +// programming language. The SDKs contain functionality that automatically takes +// care of tasks such as: +// +// Cryptographically signing your service requests +// +// Retrying requests +// +// Handling error responses +// +// For a list of available SDKs, go to Tools for Amazon Web Services (http://aws.amazon.com/tools/). +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type SQS struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "sqs" + +// New creates a new instance of the SQS client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a SQS client from just a session. +// svc := sqs.New(mySession) +// +// // Create a SQS client with additional configuration +// svc := sqs.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *SQS { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *SQS { + svc := &SQS{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2012-11-05", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(query.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(query.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(query.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(query.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a SQS operation and runs any +// custom request initialization. +func (c *SQS) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/sqs/sqsiface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/sqs/sqsiface/interface.go new file mode 100644 index 000000000..55647fa17 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/sqs/sqsiface/interface.go @@ -0,0 +1,82 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package sqsiface provides an interface for the Amazon Simple Queue Service. +package sqsiface + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/sqs" +) + +// SQSAPI is the interface type for sqs.SQS. +type SQSAPI interface { + AddPermissionRequest(*sqs.AddPermissionInput) (*request.Request, *sqs.AddPermissionOutput) + + AddPermission(*sqs.AddPermissionInput) (*sqs.AddPermissionOutput, error) + + ChangeMessageVisibilityRequest(*sqs.ChangeMessageVisibilityInput) (*request.Request, *sqs.ChangeMessageVisibilityOutput) + + ChangeMessageVisibility(*sqs.ChangeMessageVisibilityInput) (*sqs.ChangeMessageVisibilityOutput, error) + + ChangeMessageVisibilityBatchRequest(*sqs.ChangeMessageVisibilityBatchInput) (*request.Request, *sqs.ChangeMessageVisibilityBatchOutput) + + ChangeMessageVisibilityBatch(*sqs.ChangeMessageVisibilityBatchInput) (*sqs.ChangeMessageVisibilityBatchOutput, error) + + CreateQueueRequest(*sqs.CreateQueueInput) (*request.Request, *sqs.CreateQueueOutput) + + CreateQueue(*sqs.CreateQueueInput) (*sqs.CreateQueueOutput, error) + + DeleteMessageRequest(*sqs.DeleteMessageInput) (*request.Request, *sqs.DeleteMessageOutput) + + DeleteMessage(*sqs.DeleteMessageInput) (*sqs.DeleteMessageOutput, error) + + DeleteMessageBatchRequest(*sqs.DeleteMessageBatchInput) (*request.Request, *sqs.DeleteMessageBatchOutput) + + DeleteMessageBatch(*sqs.DeleteMessageBatchInput) (*sqs.DeleteMessageBatchOutput, error) + + DeleteQueueRequest(*sqs.DeleteQueueInput) (*request.Request, *sqs.DeleteQueueOutput) + + DeleteQueue(*sqs.DeleteQueueInput) (*sqs.DeleteQueueOutput, error) + + GetQueueAttributesRequest(*sqs.GetQueueAttributesInput) (*request.Request, *sqs.GetQueueAttributesOutput) + + GetQueueAttributes(*sqs.GetQueueAttributesInput) (*sqs.GetQueueAttributesOutput, error) + + GetQueueUrlRequest(*sqs.GetQueueUrlInput) (*request.Request, *sqs.GetQueueUrlOutput) + + GetQueueUrl(*sqs.GetQueueUrlInput) (*sqs.GetQueueUrlOutput, error) + + ListDeadLetterSourceQueuesRequest(*sqs.ListDeadLetterSourceQueuesInput) (*request.Request, *sqs.ListDeadLetterSourceQueuesOutput) + + ListDeadLetterSourceQueues(*sqs.ListDeadLetterSourceQueuesInput) (*sqs.ListDeadLetterSourceQueuesOutput, error) + + ListQueuesRequest(*sqs.ListQueuesInput) (*request.Request, *sqs.ListQueuesOutput) + + ListQueues(*sqs.ListQueuesInput) (*sqs.ListQueuesOutput, error) + + PurgeQueueRequest(*sqs.PurgeQueueInput) (*request.Request, *sqs.PurgeQueueOutput) + + PurgeQueue(*sqs.PurgeQueueInput) (*sqs.PurgeQueueOutput, error) + + ReceiveMessageRequest(*sqs.ReceiveMessageInput) (*request.Request, *sqs.ReceiveMessageOutput) + + ReceiveMessage(*sqs.ReceiveMessageInput) (*sqs.ReceiveMessageOutput, error) + + RemovePermissionRequest(*sqs.RemovePermissionInput) (*request.Request, *sqs.RemovePermissionOutput) + + RemovePermission(*sqs.RemovePermissionInput) (*sqs.RemovePermissionOutput, error) + + SendMessageRequest(*sqs.SendMessageInput) (*request.Request, *sqs.SendMessageOutput) + + SendMessage(*sqs.SendMessageInput) (*sqs.SendMessageOutput, error) + + SendMessageBatchRequest(*sqs.SendMessageBatchInput) (*request.Request, *sqs.SendMessageBatchOutput) + + SendMessageBatch(*sqs.SendMessageBatchInput) (*sqs.SendMessageBatchOutput, error) + + SetQueueAttributesRequest(*sqs.SetQueueAttributesInput) (*request.Request, *sqs.SetQueueAttributesOutput) + + SetQueueAttributes(*sqs.SetQueueAttributesInput) (*sqs.SetQueueAttributesOutput, error) +} + +var _ SQSAPI = (*sqs.SQS)(nil) diff --git a/vendor/github.com/aws/aws-sdk-go/service/ssm/api.go b/vendor/github.com/aws/aws-sdk-go/service/ssm/api.go new file mode 100644 index 000000000..ab46ce93d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/ssm/api.go @@ -0,0 +1,3953 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package ssm provides a client for Amazon Simple Systems Management Service. +package ssm + +import ( + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" +) + +const opAddTagsToResource = "AddTagsToResource" + +// AddTagsToResourceRequest generates a "aws/request.Request" representing the +// client's request for the AddTagsToResource operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the AddTagsToResource method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the AddTagsToResourceRequest method. +// req, resp := client.AddTagsToResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *SSM) AddTagsToResourceRequest(input *AddTagsToResourceInput) (req *request.Request, output *AddTagsToResourceOutput) { + op := &request.Operation{ + Name: opAddTagsToResource, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AddTagsToResourceInput{} + } + + req = c.newRequest(op, input, output) + output = &AddTagsToResourceOutput{} + req.Data = output + return +} + +// Adds or overwrites one or more tags for the specified resource. Tags are +// metadata that you assign to your managed instances. Tags enable you to categorize +// your managed instances in different ways, for example, by purpose, owner, +// or environment. Each tag consists of a key and an optional value, both of +// which you define. For example, you could define a set of tags for your account's +// managed instances that helps you track each instance's owner and stack level. +// For example: Key=Owner and Value=DbAdmin, SysAdmin, or Dev. Or Key=Stack +// and Value=Production, Pre-Production, or Test. Each resource can have a maximum +// of 10 tags. +// +// We recommend that you devise a set of tag keys that meets your needs for +// each resource type. Using a consistent set of tag keys makes it easier for +// you to manage your resources. You can search and filter the resources based +// on the tags you add. Tags don't have any semantic meaning to Amazon EC2 and +// are interpreted strictly as a string of characters. +// +// For more information about tags, see Tagging Your Amazon EC2 Resources (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html) +// in the Amazon EC2 User Guide. +func (c *SSM) AddTagsToResource(input *AddTagsToResourceInput) (*AddTagsToResourceOutput, error) { + req, out := c.AddTagsToResourceRequest(input) + err := req.Send() + return out, err +} + +const opCancelCommand = "CancelCommand" + +// CancelCommandRequest generates a "aws/request.Request" representing the +// client's request for the CancelCommand operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CancelCommand method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CancelCommandRequest method. +// req, resp := client.CancelCommandRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *SSM) CancelCommandRequest(input *CancelCommandInput) (req *request.Request, output *CancelCommandOutput) { + op := &request.Operation{ + Name: opCancelCommand, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CancelCommandInput{} + } + + req = c.newRequest(op, input, output) + output = &CancelCommandOutput{} + req.Data = output + return +} + +// Attempts to cancel the command specified by the Command ID. There is no guarantee +// that the command will be terminated and the underlying process stopped. +func (c *SSM) CancelCommand(input *CancelCommandInput) (*CancelCommandOutput, error) { + req, out := c.CancelCommandRequest(input) + err := req.Send() + return out, err +} + +const opCreateActivation = "CreateActivation" + +// CreateActivationRequest generates a "aws/request.Request" representing the +// client's request for the CreateActivation operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateActivation method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateActivationRequest method. +// req, resp := client.CreateActivationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *SSM) CreateActivationRequest(input *CreateActivationInput) (req *request.Request, output *CreateActivationOutput) { + op := &request.Operation{ + Name: opCreateActivation, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateActivationInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateActivationOutput{} + req.Data = output + return +} + +// Registers your on-premises server or virtual machine with Amazon EC2 so that +// you can manage these resources using Run Command. An on-premises server or +// virtual machine that has been registered with EC2 is called a managed instance. +// For more information about activations, see Setting Up Managed Instances +// (Linux) (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/managed-instances.html) +// or Setting Up Managed Instances (Windows) (http://docs.aws.amazon.com/AWSEC2/latest/WindowsGuide/managed-instances.html) +// in the Amazon EC2 User Guide. +func (c *SSM) CreateActivation(input *CreateActivationInput) (*CreateActivationOutput, error) { + req, out := c.CreateActivationRequest(input) + err := req.Send() + return out, err +} + +const opCreateAssociation = "CreateAssociation" + +// CreateAssociationRequest generates a "aws/request.Request" representing the +// client's request for the CreateAssociation operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateAssociation method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateAssociationRequest method. +// req, resp := client.CreateAssociationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *SSM) CreateAssociationRequest(input *CreateAssociationInput) (req *request.Request, output *CreateAssociationOutput) { + op := &request.Operation{ + Name: opCreateAssociation, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateAssociationInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateAssociationOutput{} + req.Data = output + return +} + +// Associates the specified SSM document with the specified instance. +// +// When you associate an SSM document with an instance, the configuration agent +// on the instance processes the document and configures the instance as specified. +// +// If you associate a document with an instance that already has an associated +// document, the system throws the AssociationAlreadyExists exception. +func (c *SSM) CreateAssociation(input *CreateAssociationInput) (*CreateAssociationOutput, error) { + req, out := c.CreateAssociationRequest(input) + err := req.Send() + return out, err +} + +const opCreateAssociationBatch = "CreateAssociationBatch" + +// CreateAssociationBatchRequest generates a "aws/request.Request" representing the +// client's request for the CreateAssociationBatch operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateAssociationBatch method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateAssociationBatchRequest method. +// req, resp := client.CreateAssociationBatchRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *SSM) CreateAssociationBatchRequest(input *CreateAssociationBatchInput) (req *request.Request, output *CreateAssociationBatchOutput) { + op := &request.Operation{ + Name: opCreateAssociationBatch, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateAssociationBatchInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateAssociationBatchOutput{} + req.Data = output + return +} + +// Associates the specified SSM document with the specified instances. +// +// When you associate an SSM document with an instance, the configuration agent +// on the instance processes the document and configures the instance as specified. +// +// If you associate a document with an instance that already has an associated +// document, the system throws the AssociationAlreadyExists exception. +func (c *SSM) CreateAssociationBatch(input *CreateAssociationBatchInput) (*CreateAssociationBatchOutput, error) { + req, out := c.CreateAssociationBatchRequest(input) + err := req.Send() + return out, err +} + +const opCreateDocument = "CreateDocument" + +// CreateDocumentRequest generates a "aws/request.Request" representing the +// client's request for the CreateDocument operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateDocument method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateDocumentRequest method. +// req, resp := client.CreateDocumentRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *SSM) CreateDocumentRequest(input *CreateDocumentInput) (req *request.Request, output *CreateDocumentOutput) { + op := &request.Operation{ + Name: opCreateDocument, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateDocumentInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateDocumentOutput{} + req.Data = output + return +} + +// Creates an SSM document. +// +// After you create an SSM document, you can use CreateAssociation to associate +// it with one or more running instances. +func (c *SSM) CreateDocument(input *CreateDocumentInput) (*CreateDocumentOutput, error) { + req, out := c.CreateDocumentRequest(input) + err := req.Send() + return out, err +} + +const opDeleteActivation = "DeleteActivation" + +// DeleteActivationRequest generates a "aws/request.Request" representing the +// client's request for the DeleteActivation operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteActivation method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteActivationRequest method. +// req, resp := client.DeleteActivationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *SSM) DeleteActivationRequest(input *DeleteActivationInput) (req *request.Request, output *DeleteActivationOutput) { + op := &request.Operation{ + Name: opDeleteActivation, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteActivationInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteActivationOutput{} + req.Data = output + return +} + +// Deletes an activation. You are not required to delete an activation. If you +// delete an activation, you can no longer use it to register additional managed +// instances. Deleting an activation does not de-register managed instances. +// You must manually de-register managed instances. +func (c *SSM) DeleteActivation(input *DeleteActivationInput) (*DeleteActivationOutput, error) { + req, out := c.DeleteActivationRequest(input) + err := req.Send() + return out, err +} + +const opDeleteAssociation = "DeleteAssociation" + +// DeleteAssociationRequest generates a "aws/request.Request" representing the +// client's request for the DeleteAssociation operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteAssociation method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteAssociationRequest method. +// req, resp := client.DeleteAssociationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *SSM) DeleteAssociationRequest(input *DeleteAssociationInput) (req *request.Request, output *DeleteAssociationOutput) { + op := &request.Operation{ + Name: opDeleteAssociation, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteAssociationInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteAssociationOutput{} + req.Data = output + return +} + +// Disassociates the specified SSM document from the specified instance. +// +// When you disassociate an SSM document from an instance, it does not change +// the configuration of the instance. To change the configuration state of an +// instance after you disassociate a document, you must create a new document +// with the desired configuration and associate it with the instance. +func (c *SSM) DeleteAssociation(input *DeleteAssociationInput) (*DeleteAssociationOutput, error) { + req, out := c.DeleteAssociationRequest(input) + err := req.Send() + return out, err +} + +const opDeleteDocument = "DeleteDocument" + +// DeleteDocumentRequest generates a "aws/request.Request" representing the +// client's request for the DeleteDocument operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteDocument method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteDocumentRequest method. +// req, resp := client.DeleteDocumentRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *SSM) DeleteDocumentRequest(input *DeleteDocumentInput) (req *request.Request, output *DeleteDocumentOutput) { + op := &request.Operation{ + Name: opDeleteDocument, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteDocumentInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteDocumentOutput{} + req.Data = output + return +} + +// Deletes the SSM document and all instance associations to the document. +// +// Before you delete the SSM document, we recommend that you use DeleteAssociation +// to disassociate all instances that are associated with the document. +func (c *SSM) DeleteDocument(input *DeleteDocumentInput) (*DeleteDocumentOutput, error) { + req, out := c.DeleteDocumentRequest(input) + err := req.Send() + return out, err +} + +const opDeregisterManagedInstance = "DeregisterManagedInstance" + +// DeregisterManagedInstanceRequest generates a "aws/request.Request" representing the +// client's request for the DeregisterManagedInstance operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeregisterManagedInstance method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeregisterManagedInstanceRequest method. +// req, resp := client.DeregisterManagedInstanceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *SSM) DeregisterManagedInstanceRequest(input *DeregisterManagedInstanceInput) (req *request.Request, output *DeregisterManagedInstanceOutput) { + op := &request.Operation{ + Name: opDeregisterManagedInstance, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeregisterManagedInstanceInput{} + } + + req = c.newRequest(op, input, output) + output = &DeregisterManagedInstanceOutput{} + req.Data = output + return +} + +// Removes the server or virtual machine from the list of registered servers. +// You can reregister the instance again at any time. If you don’t plan to use +// Run Command on the server, we suggest uninstalling the SSM agent first. +func (c *SSM) DeregisterManagedInstance(input *DeregisterManagedInstanceInput) (*DeregisterManagedInstanceOutput, error) { + req, out := c.DeregisterManagedInstanceRequest(input) + err := req.Send() + return out, err +} + +const opDescribeActivations = "DescribeActivations" + +// DescribeActivationsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeActivations operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeActivations method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeActivationsRequest method. +// req, resp := client.DescribeActivationsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *SSM) DescribeActivationsRequest(input *DescribeActivationsInput) (req *request.Request, output *DescribeActivationsOutput) { + op := &request.Operation{ + Name: opDescribeActivations, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeActivationsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeActivationsOutput{} + req.Data = output + return +} + +// Details about the activation, including: the date and time the activation +// was created, the expiration date, the IAM role assigned to the instances +// in the activation, and the number of instances activated by this registration. +func (c *SSM) DescribeActivations(input *DescribeActivationsInput) (*DescribeActivationsOutput, error) { + req, out := c.DescribeActivationsRequest(input) + err := req.Send() + return out, err +} + +// DescribeActivationsPages iterates over the pages of a DescribeActivations operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeActivations method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeActivations operation. +// pageNum := 0 +// err := client.DescribeActivationsPages(params, +// func(page *DescribeActivationsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *SSM) DescribeActivationsPages(input *DescribeActivationsInput, fn func(p *DescribeActivationsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeActivationsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeActivationsOutput), lastPage) + }) +} + +const opDescribeAssociation = "DescribeAssociation" + +// DescribeAssociationRequest generates a "aws/request.Request" representing the +// client's request for the DescribeAssociation operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeAssociation method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeAssociationRequest method. +// req, resp := client.DescribeAssociationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *SSM) DescribeAssociationRequest(input *DescribeAssociationInput) (req *request.Request, output *DescribeAssociationOutput) { + op := &request.Operation{ + Name: opDescribeAssociation, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeAssociationInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeAssociationOutput{} + req.Data = output + return +} + +// Describes the associations for the specified SSM document or instance. +func (c *SSM) DescribeAssociation(input *DescribeAssociationInput) (*DescribeAssociationOutput, error) { + req, out := c.DescribeAssociationRequest(input) + err := req.Send() + return out, err +} + +const opDescribeDocument = "DescribeDocument" + +// DescribeDocumentRequest generates a "aws/request.Request" representing the +// client's request for the DescribeDocument operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeDocument method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeDocumentRequest method. +// req, resp := client.DescribeDocumentRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *SSM) DescribeDocumentRequest(input *DescribeDocumentInput) (req *request.Request, output *DescribeDocumentOutput) { + op := &request.Operation{ + Name: opDescribeDocument, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeDocumentInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeDocumentOutput{} + req.Data = output + return +} + +// Describes the specified SSM document. +func (c *SSM) DescribeDocument(input *DescribeDocumentInput) (*DescribeDocumentOutput, error) { + req, out := c.DescribeDocumentRequest(input) + err := req.Send() + return out, err +} + +const opDescribeDocumentPermission = "DescribeDocumentPermission" + +// DescribeDocumentPermissionRequest generates a "aws/request.Request" representing the +// client's request for the DescribeDocumentPermission operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeDocumentPermission method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeDocumentPermissionRequest method. +// req, resp := client.DescribeDocumentPermissionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *SSM) DescribeDocumentPermissionRequest(input *DescribeDocumentPermissionInput) (req *request.Request, output *DescribeDocumentPermissionOutput) { + op := &request.Operation{ + Name: opDescribeDocumentPermission, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeDocumentPermissionInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeDocumentPermissionOutput{} + req.Data = output + return +} + +// Describes the permissions for an SSM document. If you created the document, +// you are the owner. If a document is shared, it can either be shared privately +// (by specifying a user’s AWS account ID) or publicly (All). +func (c *SSM) DescribeDocumentPermission(input *DescribeDocumentPermissionInput) (*DescribeDocumentPermissionOutput, error) { + req, out := c.DescribeDocumentPermissionRequest(input) + err := req.Send() + return out, err +} + +const opDescribeInstanceInformation = "DescribeInstanceInformation" + +// DescribeInstanceInformationRequest generates a "aws/request.Request" representing the +// client's request for the DescribeInstanceInformation operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeInstanceInformation method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeInstanceInformationRequest method. +// req, resp := client.DescribeInstanceInformationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *SSM) DescribeInstanceInformationRequest(input *DescribeInstanceInformationInput) (req *request.Request, output *DescribeInstanceInformationOutput) { + op := &request.Operation{ + Name: opDescribeInstanceInformation, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeInstanceInformationInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeInstanceInformationOutput{} + req.Data = output + return +} + +// Describes one or more of your instances. You can use this to get information +// about instances like the operating system platform, the SSM agent version, +// status etc. If you specify one or more instance IDs, it returns information +// for those instances. If you do not specify instance IDs, it returns information +// for all your instances. If you specify an instance ID that is not valid or +// an instance that you do not own, you receive an error. +func (c *SSM) DescribeInstanceInformation(input *DescribeInstanceInformationInput) (*DescribeInstanceInformationOutput, error) { + req, out := c.DescribeInstanceInformationRequest(input) + err := req.Send() + return out, err +} + +const opGetDocument = "GetDocument" + +// GetDocumentRequest generates a "aws/request.Request" representing the +// client's request for the GetDocument operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetDocument method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetDocumentRequest method. +// req, resp := client.GetDocumentRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *SSM) GetDocumentRequest(input *GetDocumentInput) (req *request.Request, output *GetDocumentOutput) { + op := &request.Operation{ + Name: opGetDocument, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetDocumentInput{} + } + + req = c.newRequest(op, input, output) + output = &GetDocumentOutput{} + req.Data = output + return +} + +// Gets the contents of the specified SSM document. +func (c *SSM) GetDocument(input *GetDocumentInput) (*GetDocumentOutput, error) { + req, out := c.GetDocumentRequest(input) + err := req.Send() + return out, err +} + +const opListAssociations = "ListAssociations" + +// ListAssociationsRequest generates a "aws/request.Request" representing the +// client's request for the ListAssociations operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListAssociations method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListAssociationsRequest method. +// req, resp := client.ListAssociationsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *SSM) ListAssociationsRequest(input *ListAssociationsInput) (req *request.Request, output *ListAssociationsOutput) { + op := &request.Operation{ + Name: opListAssociations, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListAssociationsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListAssociationsOutput{} + req.Data = output + return +} + +// Lists the associations for the specified SSM document or instance. +func (c *SSM) ListAssociations(input *ListAssociationsInput) (*ListAssociationsOutput, error) { + req, out := c.ListAssociationsRequest(input) + err := req.Send() + return out, err +} + +// ListAssociationsPages iterates over the pages of a ListAssociations operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListAssociations method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListAssociations operation. +// pageNum := 0 +// err := client.ListAssociationsPages(params, +// func(page *ListAssociationsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *SSM) ListAssociationsPages(input *ListAssociationsInput, fn func(p *ListAssociationsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListAssociationsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListAssociationsOutput), lastPage) + }) +} + +const opListCommandInvocations = "ListCommandInvocations" + +// ListCommandInvocationsRequest generates a "aws/request.Request" representing the +// client's request for the ListCommandInvocations operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListCommandInvocations method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListCommandInvocationsRequest method. +// req, resp := client.ListCommandInvocationsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *SSM) ListCommandInvocationsRequest(input *ListCommandInvocationsInput) (req *request.Request, output *ListCommandInvocationsOutput) { + op := &request.Operation{ + Name: opListCommandInvocations, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListCommandInvocationsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListCommandInvocationsOutput{} + req.Data = output + return +} + +// An invocation is copy of a command sent to a specific instance. A command +// can apply to one or more instances. A command invocation applies to one instance. +// For example, if a user executes SendCommand against three instances, then +// a command invocation is created for each requested instance ID. ListCommandInvocations +// provide status about command execution. +func (c *SSM) ListCommandInvocations(input *ListCommandInvocationsInput) (*ListCommandInvocationsOutput, error) { + req, out := c.ListCommandInvocationsRequest(input) + err := req.Send() + return out, err +} + +// ListCommandInvocationsPages iterates over the pages of a ListCommandInvocations operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListCommandInvocations method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListCommandInvocations operation. +// pageNum := 0 +// err := client.ListCommandInvocationsPages(params, +// func(page *ListCommandInvocationsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *SSM) ListCommandInvocationsPages(input *ListCommandInvocationsInput, fn func(p *ListCommandInvocationsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListCommandInvocationsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListCommandInvocationsOutput), lastPage) + }) +} + +const opListCommands = "ListCommands" + +// ListCommandsRequest generates a "aws/request.Request" representing the +// client's request for the ListCommands operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListCommands method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListCommandsRequest method. +// req, resp := client.ListCommandsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *SSM) ListCommandsRequest(input *ListCommandsInput) (req *request.Request, output *ListCommandsOutput) { + op := &request.Operation{ + Name: opListCommands, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListCommandsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListCommandsOutput{} + req.Data = output + return +} + +// Lists the commands requested by users of the AWS account. +func (c *SSM) ListCommands(input *ListCommandsInput) (*ListCommandsOutput, error) { + req, out := c.ListCommandsRequest(input) + err := req.Send() + return out, err +} + +// ListCommandsPages iterates over the pages of a ListCommands operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListCommands method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListCommands operation. +// pageNum := 0 +// err := client.ListCommandsPages(params, +// func(page *ListCommandsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *SSM) ListCommandsPages(input *ListCommandsInput, fn func(p *ListCommandsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListCommandsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListCommandsOutput), lastPage) + }) +} + +const opListDocuments = "ListDocuments" + +// ListDocumentsRequest generates a "aws/request.Request" representing the +// client's request for the ListDocuments operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListDocuments method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListDocumentsRequest method. +// req, resp := client.ListDocumentsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *SSM) ListDocumentsRequest(input *ListDocumentsInput) (req *request.Request, output *ListDocumentsOutput) { + op := &request.Operation{ + Name: opListDocuments, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListDocumentsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListDocumentsOutput{} + req.Data = output + return +} + +// Describes one or more of your SSM documents. +func (c *SSM) ListDocuments(input *ListDocumentsInput) (*ListDocumentsOutput, error) { + req, out := c.ListDocumentsRequest(input) + err := req.Send() + return out, err +} + +// ListDocumentsPages iterates over the pages of a ListDocuments operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListDocuments method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListDocuments operation. +// pageNum := 0 +// err := client.ListDocumentsPages(params, +// func(page *ListDocumentsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *SSM) ListDocumentsPages(input *ListDocumentsInput, fn func(p *ListDocumentsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListDocumentsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListDocumentsOutput), lastPage) + }) +} + +const opListTagsForResource = "ListTagsForResource" + +// ListTagsForResourceRequest generates a "aws/request.Request" representing the +// client's request for the ListTagsForResource operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListTagsForResource method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListTagsForResourceRequest method. +// req, resp := client.ListTagsForResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *SSM) ListTagsForResourceRequest(input *ListTagsForResourceInput) (req *request.Request, output *ListTagsForResourceOutput) { + op := &request.Operation{ + Name: opListTagsForResource, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListTagsForResourceInput{} + } + + req = c.newRequest(op, input, output) + output = &ListTagsForResourceOutput{} + req.Data = output + return +} + +// Returns a list of the tags assigned to the specified resource. +func (c *SSM) ListTagsForResource(input *ListTagsForResourceInput) (*ListTagsForResourceOutput, error) { + req, out := c.ListTagsForResourceRequest(input) + err := req.Send() + return out, err +} + +const opModifyDocumentPermission = "ModifyDocumentPermission" + +// ModifyDocumentPermissionRequest generates a "aws/request.Request" representing the +// client's request for the ModifyDocumentPermission operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ModifyDocumentPermission method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ModifyDocumentPermissionRequest method. +// req, resp := client.ModifyDocumentPermissionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *SSM) ModifyDocumentPermissionRequest(input *ModifyDocumentPermissionInput) (req *request.Request, output *ModifyDocumentPermissionOutput) { + op := &request.Operation{ + Name: opModifyDocumentPermission, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyDocumentPermissionInput{} + } + + req = c.newRequest(op, input, output) + output = &ModifyDocumentPermissionOutput{} + req.Data = output + return +} + +// Share a document publicly or privately. If you share a document privately, +// you must specify the AWS user account IDs for those people who can use the +// document. If you share a document publicly, you must specify All as the account +// ID. +func (c *SSM) ModifyDocumentPermission(input *ModifyDocumentPermissionInput) (*ModifyDocumentPermissionOutput, error) { + req, out := c.ModifyDocumentPermissionRequest(input) + err := req.Send() + return out, err +} + +const opRemoveTagsFromResource = "RemoveTagsFromResource" + +// RemoveTagsFromResourceRequest generates a "aws/request.Request" representing the +// client's request for the RemoveTagsFromResource operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the RemoveTagsFromResource method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the RemoveTagsFromResourceRequest method. +// req, resp := client.RemoveTagsFromResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *SSM) RemoveTagsFromResourceRequest(input *RemoveTagsFromResourceInput) (req *request.Request, output *RemoveTagsFromResourceOutput) { + op := &request.Operation{ + Name: opRemoveTagsFromResource, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RemoveTagsFromResourceInput{} + } + + req = c.newRequest(op, input, output) + output = &RemoveTagsFromResourceOutput{} + req.Data = output + return +} + +// Removes all tags from the specified resource. +func (c *SSM) RemoveTagsFromResource(input *RemoveTagsFromResourceInput) (*RemoveTagsFromResourceOutput, error) { + req, out := c.RemoveTagsFromResourceRequest(input) + err := req.Send() + return out, err +} + +const opSendCommand = "SendCommand" + +// SendCommandRequest generates a "aws/request.Request" representing the +// client's request for the SendCommand operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the SendCommand method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the SendCommandRequest method. +// req, resp := client.SendCommandRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *SSM) SendCommandRequest(input *SendCommandInput) (req *request.Request, output *SendCommandOutput) { + op := &request.Operation{ + Name: opSendCommand, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SendCommandInput{} + } + + req = c.newRequest(op, input, output) + output = &SendCommandOutput{} + req.Data = output + return +} + +// Executes commands on one or more remote instances. +func (c *SSM) SendCommand(input *SendCommandInput) (*SendCommandOutput, error) { + req, out := c.SendCommandRequest(input) + err := req.Send() + return out, err +} + +const opUpdateAssociationStatus = "UpdateAssociationStatus" + +// UpdateAssociationStatusRequest generates a "aws/request.Request" representing the +// client's request for the UpdateAssociationStatus operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateAssociationStatus method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateAssociationStatusRequest method. +// req, resp := client.UpdateAssociationStatusRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *SSM) UpdateAssociationStatusRequest(input *UpdateAssociationStatusInput) (req *request.Request, output *UpdateAssociationStatusOutput) { + op := &request.Operation{ + Name: opUpdateAssociationStatus, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateAssociationStatusInput{} + } + + req = c.newRequest(op, input, output) + output = &UpdateAssociationStatusOutput{} + req.Data = output + return +} + +// Updates the status of the SSM document associated with the specified instance. +func (c *SSM) UpdateAssociationStatus(input *UpdateAssociationStatusInput) (*UpdateAssociationStatusOutput, error) { + req, out := c.UpdateAssociationStatusRequest(input) + err := req.Send() + return out, err +} + +const opUpdateManagedInstanceRole = "UpdateManagedInstanceRole" + +// UpdateManagedInstanceRoleRequest generates a "aws/request.Request" representing the +// client's request for the UpdateManagedInstanceRole operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateManagedInstanceRole method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateManagedInstanceRoleRequest method. +// req, resp := client.UpdateManagedInstanceRoleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *SSM) UpdateManagedInstanceRoleRequest(input *UpdateManagedInstanceRoleInput) (req *request.Request, output *UpdateManagedInstanceRoleOutput) { + op := &request.Operation{ + Name: opUpdateManagedInstanceRole, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateManagedInstanceRoleInput{} + } + + req = c.newRequest(op, input, output) + output = &UpdateManagedInstanceRoleOutput{} + req.Data = output + return +} + +// Assigns or changes an Amazon Identity and Access Management (IAM) role to +// the managed instance. +func (c *SSM) UpdateManagedInstanceRole(input *UpdateManagedInstanceRoleInput) (*UpdateManagedInstanceRoleOutput, error) { + req, out := c.UpdateManagedInstanceRoleRequest(input) + err := req.Send() + return out, err +} + +// An activation registers one or more on-premises servers or virtual machines +// (VMs) with AWS so that you can configure those servers or VMs using Run Command. +// A server or VM that has been registered with AWS is called a managed instance. +type Activation struct { + _ struct{} `type:"structure"` + + // The ID created by SSM when you submitted the activation. + ActivationId *string `type:"string"` + + // The date the activation was created. + CreatedDate *time.Time `type:"timestamp" timestampFormat:"unix"` + + // A name for the managed instance when it is created. + DefaultInstanceName *string `type:"string"` + + // A user defined description of the activation. + Description *string `type:"string"` + + // The date when this activation can no longer be used to register managed instances. + ExpirationDate *time.Time `type:"timestamp" timestampFormat:"unix"` + + // Whether or not the activation is expired. + Expired *bool `type:"boolean"` + + // The Amazon Identity and Access Management (IAM) role to assign to the managed + // instance. + IamRole *string `type:"string"` + + // The maximum number of managed instances that can be registered using this + // activation. + RegistrationLimit *int64 `min:"1" type:"integer"` + + // The number of managed instances already registered with this activation. + RegistrationsCount *int64 `min:"1" type:"integer"` +} + +// String returns the string representation +func (s Activation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Activation) GoString() string { + return s.String() +} + +type AddTagsToResourceInput struct { + _ struct{} `type:"structure"` + + // The resource ID you want to tag. + ResourceId *string `type:"string" required:"true"` + + // Specifies the type of resource you are tagging. + ResourceType *string `type:"string" required:"true" enum:"ResourceTypeForTagging"` + + // One or more tags. The value parameter is required, but if you don't want + // the tag to have a value, specify the parameter with no value, and we set + // the value to an empty string. + Tags []*Tag `type:"list" required:"true"` +} + +// String returns the string representation +func (s AddTagsToResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddTagsToResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AddTagsToResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AddTagsToResourceInput"} + if s.ResourceId == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceId")) + } + if s.ResourceType == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceType")) + } + if s.Tags == nil { + invalidParams.Add(request.NewErrParamRequired("Tags")) + } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type AddTagsToResourceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s AddTagsToResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddTagsToResourceOutput) GoString() string { + return s.String() +} + +// Describes an association of an SSM document and an instance. +type Association struct { + _ struct{} `type:"structure"` + + // The ID of the instance. + InstanceId *string `type:"string"` + + // The name of the SSM document. + Name *string `type:"string"` +} + +// String returns the string representation +func (s Association) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Association) GoString() string { + return s.String() +} + +// Describes the parameters for a document. +type AssociationDescription struct { + _ struct{} `type:"structure"` + + // The date when the association was made. + Date *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The ID of the instance. + InstanceId *string `type:"string"` + + // The name of the SSM document. + Name *string `type:"string"` + + // A description of the parameters for a document. + Parameters map[string][]*string `type:"map"` + + // The association status. + Status *AssociationStatus `type:"structure"` +} + +// String returns the string representation +func (s AssociationDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssociationDescription) GoString() string { + return s.String() +} + +// Describes a filter. +type AssociationFilter struct { + _ struct{} `type:"structure"` + + // The name of the filter. + Key *string `locationName:"key" type:"string" required:"true" enum:"AssociationFilterKey"` + + // The filter value. + Value *string `locationName:"value" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s AssociationFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssociationFilter) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AssociationFilter) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AssociationFilter"} + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Value == nil { + invalidParams.Add(request.NewErrParamRequired("Value")) + } + if s.Value != nil && len(*s.Value) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Value", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Describes an association status. +type AssociationStatus struct { + _ struct{} `type:"structure"` + + // A user-defined string. + AdditionalInfo *string `type:"string"` + + // The date when the status changed. + Date *time.Time `type:"timestamp" timestampFormat:"unix" required:"true"` + + // The reason for the status. + Message *string `type:"string" required:"true"` + + // The status. + Name *string `type:"string" required:"true" enum:"AssociationStatusName"` +} + +// String returns the string representation +func (s AssociationStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssociationStatus) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AssociationStatus) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AssociationStatus"} + if s.Date == nil { + invalidParams.Add(request.NewErrParamRequired("Date")) + } + if s.Message == nil { + invalidParams.Add(request.NewErrParamRequired("Message")) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type CancelCommandInput struct { + _ struct{} `type:"structure"` + + // The ID of the command you want to cancel. + CommandId *string `min:"36" type:"string" required:"true"` + + // (Optional) A list of instance IDs on which you want to cancel the command. + // If not provided, the command is canceled on every instance on which it was + // requested. + InstanceIds []*string `min:"1" type:"list"` +} + +// String returns the string representation +func (s CancelCommandInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CancelCommandInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CancelCommandInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CancelCommandInput"} + if s.CommandId == nil { + invalidParams.Add(request.NewErrParamRequired("CommandId")) + } + if s.CommandId != nil && len(*s.CommandId) < 36 { + invalidParams.Add(request.NewErrParamMinLen("CommandId", 36)) + } + if s.InstanceIds != nil && len(s.InstanceIds) < 1 { + invalidParams.Add(request.NewErrParamMinLen("InstanceIds", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Whether or not the command was successfully canceled. There is no guarantee +// that a request can be canceled. +type CancelCommandOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s CancelCommandOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CancelCommandOutput) GoString() string { + return s.String() +} + +// Describes a command request. +type Command struct { + _ struct{} `type:"structure"` + + // A unique identifier for this command. + CommandId *string `min:"36" type:"string"` + + // User-specified information about the command, such as a brief description + // of what the command should do. + Comment *string `type:"string"` + + // The name of the SSM document requested for execution. + DocumentName *string `type:"string"` + + // If this time is reached and the command has not already started executing, + // it will not execute. Calculated based on the ExpiresAfter user input provided + // as part of the SendCommand API. + ExpiresAfter *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The instance IDs against which this command was requested. + InstanceIds []*string `min:"1" type:"list"` + + // The S3 bucket where the responses to the command executions should be stored. + // This was requested when issuing the command. + OutputS3BucketName *string `min:"3" type:"string"` + + // The S3 directory path inside the bucket where the responses to the command + // executions should be stored. This was requested when issuing the command. + OutputS3KeyPrefix *string `type:"string"` + + // The parameter values to be inserted in the SSM document when executing the + // command. + Parameters map[string][]*string `type:"map"` + + // The date and time the command was requested. + RequestedDateTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The status of the command. + Status *string `type:"string" enum:"CommandStatus"` +} + +// String returns the string representation +func (s Command) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Command) GoString() string { + return s.String() +} + +// Describes a command filter. +type CommandFilter struct { + _ struct{} `type:"structure"` + + // The name of the filter. For example, requested date and time. + Key *string `locationName:"key" type:"string" required:"true" enum:"CommandFilterKey"` + + // The filter value. For example: June 30, 2015. + Value *string `locationName:"value" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CommandFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CommandFilter) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CommandFilter) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CommandFilter"} + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Value == nil { + invalidParams.Add(request.NewErrParamRequired("Value")) + } + if s.Value != nil && len(*s.Value) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Value", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// An invocation is copy of a command sent to a specific instance. A command +// can apply to one or more instances. A command invocation applies to one instance. +// For example, if a user executes SendCommand against three instances, then +// a command invocation is created for each requested instance ID. A command +// invocation returns status and detail information about a command you executed. +type CommandInvocation struct { + _ struct{} `type:"structure"` + + // The command against which this invocation was requested. + CommandId *string `min:"36" type:"string"` + + CommandPlugins []*CommandPlugin `type:"list"` + + // User-specified information about the command, such as a brief description + // of what the command should do. + Comment *string `type:"string"` + + // The document name that was requested for execution. + DocumentName *string `type:"string"` + + // The instance ID in which this invocation was requested. + InstanceId *string `type:"string"` + + // The time and date the request was sent to this instance. + RequestedDateTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // Whether or not the invocation succeeded, failed, or is pending. + Status *string `type:"string" enum:"CommandInvocationStatus"` + + // Gets the trace output sent by the agent. + TraceOutput *string `type:"string"` +} + +// String returns the string representation +func (s CommandInvocation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CommandInvocation) GoString() string { + return s.String() +} + +// Describes plugin details. +type CommandPlugin struct { + _ struct{} `type:"structure"` + + // The name of the plugin. Must be one of the following: aws:updateAgent, aws:domainjoin, + // aws:applications, aws:runPowerShellScript, aws:psmodule, aws:cloudWatch, + // aws:runShellScript, or aws:updateSSMAgent. + Name *string `min:"4" type:"string"` + + // Output of the plugin execution. + Output *string `type:"string"` + + // The S3 bucket where the responses to the command executions should be stored. + // This was requested when issuing the command. + OutputS3BucketName *string `min:"3" type:"string"` + + // The S3 directory path inside the bucket where the responses to the command + // executions should be stored. This was requested when issuing the command. + OutputS3KeyPrefix *string `type:"string"` + + // A numeric response code generated after executing the plugin. + ResponseCode *int64 `type:"integer"` + + // The time the plugin stopped executing. Could stop prematurely if, for example, + // a cancel command was sent. + ResponseFinishDateTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The time the plugin started executing. + ResponseStartDateTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The status of this plugin. You can execute a document with multiple plugins. + Status *string `type:"string" enum:"CommandPluginStatus"` +} + +// String returns the string representation +func (s CommandPlugin) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CommandPlugin) GoString() string { + return s.String() +} + +type CreateActivationInput struct { + _ struct{} `type:"structure"` + + // The name of the registered, managed instance as it will appear in the Amazon + // EC2 console or when you use the AWS command line tools to list EC2 resources. + DefaultInstanceName *string `type:"string"` + + // A user-defined description of the resource that you want to register with + // Amazon EC2. + Description *string `type:"string"` + + // The date by which this activation request should expire. The default value + // is 24 hours. + ExpirationDate *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The Amazon Identity and Access Management (IAM) role that you want to assign + // to the managed instance. + IamRole *string `type:"string" required:"true"` + + // Specify the maximum number of managed instances you want to register. The + // default value is 1 instance. + RegistrationLimit *int64 `min:"1" type:"integer"` +} + +// String returns the string representation +func (s CreateActivationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateActivationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateActivationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateActivationInput"} + if s.IamRole == nil { + invalidParams.Add(request.NewErrParamRequired("IamRole")) + } + if s.RegistrationLimit != nil && *s.RegistrationLimit < 1 { + invalidParams.Add(request.NewErrParamMinValue("RegistrationLimit", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type CreateActivationOutput struct { + _ struct{} `type:"structure"` + + // The code the system generates when it processes the activation. The activation + // code functions like a password to validate the activation ID. + ActivationCode *string `min:"20" type:"string"` + + // The ID number generated by the system when it processed the activation. The + // activation ID functions like a user name. + ActivationId *string `type:"string"` +} + +// String returns the string representation +func (s CreateActivationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateActivationOutput) GoString() string { + return s.String() +} + +type CreateAssociationBatchInput struct { + _ struct{} `type:"structure"` + + // One or more associations. + Entries []*CreateAssociationBatchRequestEntry `locationNameList:"entries" type:"list" required:"true"` +} + +// String returns the string representation +func (s CreateAssociationBatchInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateAssociationBatchInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateAssociationBatchInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateAssociationBatchInput"} + if s.Entries == nil { + invalidParams.Add(request.NewErrParamRequired("Entries")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type CreateAssociationBatchOutput struct { + _ struct{} `type:"structure"` + + // Information about the associations that failed. + Failed []*FailedCreateAssociation `locationNameList:"FailedCreateAssociationEntry" type:"list"` + + // Information about the associations that succeeded. + Successful []*AssociationDescription `locationNameList:"AssociationDescription" type:"list"` +} + +// String returns the string representation +func (s CreateAssociationBatchOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateAssociationBatchOutput) GoString() string { + return s.String() +} + +// Describes the association of an SSM document and an instance. +type CreateAssociationBatchRequestEntry struct { + _ struct{} `type:"structure"` + + // The ID of the instance. + InstanceId *string `type:"string"` + + // The name of the configuration document. + Name *string `type:"string"` + + // A description of the parameters for a document. + Parameters map[string][]*string `type:"map"` +} + +// String returns the string representation +func (s CreateAssociationBatchRequestEntry) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateAssociationBatchRequestEntry) GoString() string { + return s.String() +} + +type CreateAssociationInput struct { + _ struct{} `type:"structure"` + + // The instance ID. + InstanceId *string `type:"string" required:"true"` + + // The name of the SSM document. + Name *string `type:"string" required:"true"` + + // The parameters for the documents runtime configuration. + Parameters map[string][]*string `type:"map"` +} + +// String returns the string representation +func (s CreateAssociationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateAssociationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateAssociationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateAssociationInput"} + if s.InstanceId == nil { + invalidParams.Add(request.NewErrParamRequired("InstanceId")) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type CreateAssociationOutput struct { + _ struct{} `type:"structure"` + + // Information about the association. + AssociationDescription *AssociationDescription `type:"structure"` +} + +// String returns the string representation +func (s CreateAssociationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateAssociationOutput) GoString() string { + return s.String() +} + +type CreateDocumentInput struct { + _ struct{} `type:"structure"` + + // A valid JSON string. + Content *string `min:"1" type:"string" required:"true"` + + // A name for the SSM document. + Name *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateDocumentInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDocumentInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateDocumentInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateDocumentInput"} + if s.Content == nil { + invalidParams.Add(request.NewErrParamRequired("Content")) + } + if s.Content != nil && len(*s.Content) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Content", 1)) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type CreateDocumentOutput struct { + _ struct{} `type:"structure"` + + // Information about the SSM document. + DocumentDescription *DocumentDescription `type:"structure"` +} + +// String returns the string representation +func (s CreateDocumentOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDocumentOutput) GoString() string { + return s.String() +} + +type DeleteActivationInput struct { + _ struct{} `type:"structure"` + + // The ID of the activation that you want to delete. + ActivationId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteActivationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteActivationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteActivationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteActivationInput"} + if s.ActivationId == nil { + invalidParams.Add(request.NewErrParamRequired("ActivationId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteActivationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteActivationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteActivationOutput) GoString() string { + return s.String() +} + +type DeleteAssociationInput struct { + _ struct{} `type:"structure"` + + // The ID of the instance. + InstanceId *string `type:"string" required:"true"` + + // The name of the SSM document. + Name *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteAssociationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteAssociationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteAssociationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteAssociationInput"} + if s.InstanceId == nil { + invalidParams.Add(request.NewErrParamRequired("InstanceId")) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteAssociationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteAssociationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteAssociationOutput) GoString() string { + return s.String() +} + +type DeleteDocumentInput struct { + _ struct{} `type:"structure"` + + // The name of the SSM document. + Name *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteDocumentInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteDocumentInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteDocumentInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteDocumentInput"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteDocumentOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteDocumentOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteDocumentOutput) GoString() string { + return s.String() +} + +type DeregisterManagedInstanceInput struct { + _ struct{} `type:"structure"` + + // The ID assigned to the managed instance when you registered it using the + // activation process. + InstanceId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeregisterManagedInstanceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeregisterManagedInstanceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeregisterManagedInstanceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeregisterManagedInstanceInput"} + if s.InstanceId == nil { + invalidParams.Add(request.NewErrParamRequired("InstanceId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeregisterManagedInstanceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeregisterManagedInstanceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeregisterManagedInstanceOutput) GoString() string { + return s.String() +} + +// Filter for the DescribeActivation API. +type DescribeActivationsFilter struct { + _ struct{} `type:"structure"` + + // The name of the filter. + FilterKey *string `type:"string" enum:"DescribeActivationsFilterKeys"` + + // The filter values. + FilterValues []*string `type:"list"` +} + +// String returns the string representation +func (s DescribeActivationsFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeActivationsFilter) GoString() string { + return s.String() +} + +type DescribeActivationsInput struct { + _ struct{} `type:"structure"` + + // A filter to view information about your activations. + Filters []*DescribeActivationsFilter `type:"list"` + + // The maximum number of items to return for this call. The call also returns + // a token that you can specify in a subsequent call to get the next set of + // results. + MaxResults *int64 `min:"1" type:"integer"` + + // A token to start the list. Use this token to get the next set of results. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeActivationsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeActivationsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeActivationsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeActivationsInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DescribeActivationsOutput struct { + _ struct{} `type:"structure"` + + // A list of activations for your AWS account. + ActivationList []*Activation `type:"list"` + + // The token for the next set of items to return. Use this token to get the + // next set of results. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeActivationsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeActivationsOutput) GoString() string { + return s.String() +} + +type DescribeAssociationInput struct { + _ struct{} `type:"structure"` + + // The instance ID. + InstanceId *string `type:"string" required:"true"` + + // The name of the SSM document. + Name *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeAssociationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAssociationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeAssociationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeAssociationInput"} + if s.InstanceId == nil { + invalidParams.Add(request.NewErrParamRequired("InstanceId")) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DescribeAssociationOutput struct { + _ struct{} `type:"structure"` + + // Information about the association. + AssociationDescription *AssociationDescription `type:"structure"` +} + +// String returns the string representation +func (s DescribeAssociationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAssociationOutput) GoString() string { + return s.String() +} + +type DescribeDocumentInput struct { + _ struct{} `type:"structure"` + + // The name of the SSM document. + Name *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeDocumentInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDocumentInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeDocumentInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeDocumentInput"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DescribeDocumentOutput struct { + _ struct{} `type:"structure"` + + // Information about the SSM document. + Document *DocumentDescription `type:"structure"` +} + +// String returns the string representation +func (s DescribeDocumentOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDocumentOutput) GoString() string { + return s.String() +} + +type DescribeDocumentPermissionInput struct { + _ struct{} `type:"structure"` + + // The name of the document for which you are the owner. + Name *string `type:"string" required:"true"` + + // The permission type for the document. The permission type can be Share. + PermissionType *string `type:"string" required:"true" enum:"DocumentPermissionType"` +} + +// String returns the string representation +func (s DescribeDocumentPermissionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDocumentPermissionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeDocumentPermissionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeDocumentPermissionInput"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.PermissionType == nil { + invalidParams.Add(request.NewErrParamRequired("PermissionType")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DescribeDocumentPermissionOutput struct { + _ struct{} `type:"structure"` + + // The account IDs that have permission to use this document. The ID can be + // either an AWS account or All. + AccountIds []*string `locationNameList:"AccountId" type:"list"` +} + +// String returns the string representation +func (s DescribeDocumentPermissionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDocumentPermissionOutput) GoString() string { + return s.String() +} + +type DescribeInstanceInformationInput struct { + _ struct{} `type:"structure"` + + // One or more filters. Use a filter to return a more specific list of instances. + InstanceInformationFilterList []*InstanceInformationFilter `locationNameList:"InstanceInformationFilter" min:"1" type:"list"` + + // The maximum number of items to return for this call. The call also returns + // a token that you can specify in a subsequent call to get the next set of + // results. + MaxResults *int64 `min:"5" type:"integer"` + + // The token for the next set of items to return. (You received this token from + // a previous call.) + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeInstanceInformationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeInstanceInformationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeInstanceInformationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeInstanceInformationInput"} + if s.InstanceInformationFilterList != nil && len(s.InstanceInformationFilterList) < 1 { + invalidParams.Add(request.NewErrParamMinLen("InstanceInformationFilterList", 1)) + } + if s.MaxResults != nil && *s.MaxResults < 5 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 5)) + } + if s.InstanceInformationFilterList != nil { + for i, v := range s.InstanceInformationFilterList { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "InstanceInformationFilterList", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DescribeInstanceInformationOutput struct { + _ struct{} `type:"structure"` + + // The instance information list. + InstanceInformationList []*InstanceInformation `locationNameList:"InstanceInformation" type:"list"` + + // The token to use when requesting the next set of items. If there are no additional + // items to return, the string is empty. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeInstanceInformationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeInstanceInformationOutput) GoString() string { + return s.String() +} + +// Describes an SSM document. +type DocumentDescription struct { + _ struct{} `type:"structure"` + + // The date when the SSM document was created. + CreatedDate *time.Time `type:"timestamp" timestampFormat:"unix"` + + // A description of the document. + Description *string `type:"string"` + + // The Sha256 or Sha1 hash created by the system when the document was created. + // + // Sha1 hashes have been deprecated. + Hash *string `type:"string"` + + // Sha256 or Sha1. + // + // Sha1 hashes have been deprecated. + HashType *string `type:"string" enum:"DocumentHashType"` + + // The name of the SSM document. + Name *string `type:"string"` + + // The AWS user account of the person who created the document. + Owner *string `type:"string"` + + // A description of the parameters for a document. + Parameters []*DocumentParameter `locationNameList:"DocumentParameter" type:"list"` + + // The list of OS platforms compatible with this SSM document. + PlatformTypes []*string `locationNameList:"PlatformType" type:"list"` + + // The SHA1 hash of the document, which you can use for verification purposes. + Sha1 *string `type:"string"` + + // The status of the SSM document. + Status *string `type:"string" enum:"DocumentStatus"` +} + +// String returns the string representation +func (s DocumentDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DocumentDescription) GoString() string { + return s.String() +} + +// Describes a filter. +type DocumentFilter struct { + _ struct{} `type:"structure"` + + // The name of the filter. + Key *string `locationName:"key" type:"string" required:"true" enum:"DocumentFilterKey"` + + // The value of the filter. + Value *string `locationName:"value" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DocumentFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DocumentFilter) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DocumentFilter) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DocumentFilter"} + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Value == nil { + invalidParams.Add(request.NewErrParamRequired("Value")) + } + if s.Value != nil && len(*s.Value) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Value", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Describes the name of an SSM document. +type DocumentIdentifier struct { + _ struct{} `type:"structure"` + + // The name of the SSM document. + Name *string `type:"string"` + + // The AWS user account of the person who created the document. + Owner *string `type:"string"` + + // The operating system platform. + PlatformTypes []*string `locationNameList:"PlatformType" type:"list"` +} + +// String returns the string representation +func (s DocumentIdentifier) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DocumentIdentifier) GoString() string { + return s.String() +} + +// Parameters specified in the SSM document that execute on the server when +// the command is run. +type DocumentParameter struct { + _ struct{} `type:"structure"` + + // If specified, the default values for the parameters. Parameters without a + // default value are required. Parameters with a default value are optional. + DefaultValue *string `type:"string"` + + // A description of what the parameter does, how to use it, the default value, + // and whether or not the parameter is optional. + Description *string `type:"string"` + + // The name of the parameter. + Name *string `type:"string"` + + // The type of parameter. The type can be either “String” or “StringList”. + Type *string `type:"string" enum:"DocumentParameterType"` +} + +// String returns the string representation +func (s DocumentParameter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DocumentParameter) GoString() string { + return s.String() +} + +// Describes a failed association. +type FailedCreateAssociation struct { + _ struct{} `type:"structure"` + + // The association. + Entry *CreateAssociationBatchRequestEntry `type:"structure"` + + // The source of the failure. + Fault *string `type:"string" enum:"Fault"` + + // A description of the failure. + Message *string `type:"string"` +} + +// String returns the string representation +func (s FailedCreateAssociation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FailedCreateAssociation) GoString() string { + return s.String() +} + +type GetDocumentInput struct { + _ struct{} `type:"structure"` + + // The name of the SSM document. + Name *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s GetDocumentInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetDocumentInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetDocumentInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetDocumentInput"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type GetDocumentOutput struct { + _ struct{} `type:"structure"` + + // The contents of the SSM document. + Content *string `min:"1" type:"string"` + + // The name of the SSM document. + Name *string `type:"string"` +} + +// String returns the string representation +func (s GetDocumentOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetDocumentOutput) GoString() string { + return s.String() +} + +// Describes a filter for a specific list of instances. +type InstanceInformation struct { + _ struct{} `type:"structure"` + + // The activation ID created by SSM when the server or VM was registered. + ActivationId *string `type:"string"` + + // The version of the SSM agent running on your instance. + AgentVersion *string `type:"string"` + + // The fully qualified host name of the managed instance. + ComputerName *string `min:"1" type:"string"` + + // The IP address of the managed instance. + IPAddress *string `min:"1" type:"string"` + + // The Amazon Identity and Access Management (IAM) role assigned to EC2 instances + // or managed instances. + IamRole *string `type:"string"` + + // The instance ID. + InstanceId *string `type:"string"` + + // Indicates whether latest version of the SSM agent is running on your instance. + IsLatestVersion *bool `type:"boolean"` + + // The date and time when agent last pinged SSM service. + LastPingDateTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The name of the managed instance. + Name *string `type:"string"` + + // Connection status of the SSM agent. + PingStatus *string `type:"string" enum:"PingStatus"` + + // The name of the operating system platform running on your instance. + PlatformName *string `type:"string"` + + // The operating system platform type. + PlatformType *string `type:"string" enum:"PlatformType"` + + // The version of the OS platform running on your instance. + PlatformVersion *string `type:"string"` + + // The date the server or VM was registered with AWS as a managed instance. + RegistrationDate *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The type of instance. Instances are either EC2 instances or managed instances. + ResourceType *string `type:"string" enum:"ResourceType"` +} + +// String returns the string representation +func (s InstanceInformation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstanceInformation) GoString() string { + return s.String() +} + +// Describes a filter for a specific list of instances. +type InstanceInformationFilter struct { + _ struct{} `type:"structure"` + + // The name of the filter. + Key *string `locationName:"key" type:"string" required:"true" enum:"InstanceInformationFilterKey"` + + // The filter values. + ValueSet []*string `locationName:"valueSet" locationNameList:"InstanceInformationFilterValue" min:"1" type:"list" required:"true"` +} + +// String returns the string representation +func (s InstanceInformationFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstanceInformationFilter) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *InstanceInformationFilter) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "InstanceInformationFilter"} + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.ValueSet == nil { + invalidParams.Add(request.NewErrParamRequired("ValueSet")) + } + if s.ValueSet != nil && len(s.ValueSet) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ValueSet", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type ListAssociationsInput struct { + _ struct{} `type:"structure"` + + // One or more filters. Use a filter to return a more specific list of results. + AssociationFilterList []*AssociationFilter `locationNameList:"AssociationFilter" min:"1" type:"list" required:"true"` + + // The maximum number of items to return for this call. The call also returns + // a token that you can specify in a subsequent call to get the next set of + // results. + MaxResults *int64 `min:"1" type:"integer"` + + // The token for the next set of items to return. (You received this token from + // a previous call.) + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s ListAssociationsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListAssociationsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListAssociationsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListAssociationsInput"} + if s.AssociationFilterList == nil { + invalidParams.Add(request.NewErrParamRequired("AssociationFilterList")) + } + if s.AssociationFilterList != nil && len(s.AssociationFilterList) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AssociationFilterList", 1)) + } + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + if s.AssociationFilterList != nil { + for i, v := range s.AssociationFilterList { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "AssociationFilterList", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type ListAssociationsOutput struct { + _ struct{} `type:"structure"` + + // The associations. + Associations []*Association `locationNameList:"Association" type:"list"` + + // The token to use when requesting the next set of items. If there are no additional + // items to return, the string is empty. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s ListAssociationsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListAssociationsOutput) GoString() string { + return s.String() +} + +type ListCommandInvocationsInput struct { + _ struct{} `type:"structure"` + + // (Optional) The invocations for a specific command ID. + CommandId *string `min:"36" type:"string"` + + // (Optional) If set this returns the response of the command executions and + // any command output. By default this is set to False. + Details *bool `type:"boolean"` + + // (Optional) One or more filters. Use a filter to return a more specific list + // of results. + Filters []*CommandFilter `min:"1" type:"list"` + + // (Optional) The command execution details for a specific instance ID. + InstanceId *string `type:"string"` + + // (Optional) The maximum number of items to return for this call. The call + // also returns a token that you can specify in a subsequent call to get the + // next set of results. + MaxResults *int64 `min:"1" type:"integer"` + + // (Optional) The token for the next set of items to return. (You received this + // token from a previous call.) + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s ListCommandInvocationsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListCommandInvocationsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListCommandInvocationsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListCommandInvocationsInput"} + if s.CommandId != nil && len(*s.CommandId) < 36 { + invalidParams.Add(request.NewErrParamMinLen("CommandId", 36)) + } + if s.Filters != nil && len(s.Filters) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Filters", 1)) + } + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + if s.Filters != nil { + for i, v := range s.Filters { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Filters", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type ListCommandInvocationsOutput struct { + _ struct{} `type:"structure"` + + // (Optional) A list of all invocations. + CommandInvocations []*CommandInvocation `type:"list"` + + // (Optional) The token for the next set of items to return. (You received this + // token from a previous call.) + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s ListCommandInvocationsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListCommandInvocationsOutput) GoString() string { + return s.String() +} + +type ListCommandsInput struct { + _ struct{} `type:"structure"` + + // (Optional) If provided, lists only the specified command. + CommandId *string `min:"36" type:"string"` + + // (Optional) One or more filters. Use a filter to return a more specific list + // of results. + Filters []*CommandFilter `min:"1" type:"list"` + + // (Optional) Lists commands issued against this instance ID. + InstanceId *string `type:"string"` + + // (Optional) The maximum number of items to return for this call. The call + // also returns a token that you can specify in a subsequent call to get the + // next set of results. + MaxResults *int64 `min:"1" type:"integer"` + + // (Optional) The token for the next set of items to return. (You received this + // token from a previous call.) + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s ListCommandsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListCommandsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListCommandsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListCommandsInput"} + if s.CommandId != nil && len(*s.CommandId) < 36 { + invalidParams.Add(request.NewErrParamMinLen("CommandId", 36)) + } + if s.Filters != nil && len(s.Filters) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Filters", 1)) + } + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + if s.Filters != nil { + for i, v := range s.Filters { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Filters", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type ListCommandsOutput struct { + _ struct{} `type:"structure"` + + // (Optional) The list of commands requested by the user. + Commands []*Command `type:"list"` + + // (Optional) The token for the next set of items to return. (You received this + // token from a previous call.) + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s ListCommandsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListCommandsOutput) GoString() string { + return s.String() +} + +type ListDocumentsInput struct { + _ struct{} `type:"structure"` + + // One or more filters. Use a filter to return a more specific list of results. + DocumentFilterList []*DocumentFilter `locationNameList:"DocumentFilter" min:"1" type:"list"` + + // The maximum number of items to return for this call. The call also returns + // a token that you can specify in a subsequent call to get the next set of + // results. + MaxResults *int64 `min:"1" type:"integer"` + + // The token for the next set of items to return. (You received this token from + // a previous call.) + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s ListDocumentsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListDocumentsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListDocumentsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListDocumentsInput"} + if s.DocumentFilterList != nil && len(s.DocumentFilterList) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DocumentFilterList", 1)) + } + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + if s.DocumentFilterList != nil { + for i, v := range s.DocumentFilterList { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "DocumentFilterList", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type ListDocumentsOutput struct { + _ struct{} `type:"structure"` + + // The names of the SSM documents. + DocumentIdentifiers []*DocumentIdentifier `locationNameList:"DocumentIdentifier" type:"list"` + + // The token to use when requesting the next set of items. If there are no additional + // items to return, the string is empty. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s ListDocumentsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListDocumentsOutput) GoString() string { + return s.String() +} + +type ListTagsForResourceInput struct { + _ struct{} `type:"structure"` + + // The resource ID for which you want to see a list of tags. + ResourceId *string `type:"string" required:"true"` + + // Returns a list of tags for a specific resource type. + ResourceType *string `type:"string" required:"true" enum:"ResourceTypeForTagging"` +} + +// String returns the string representation +func (s ListTagsForResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTagsForResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListTagsForResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListTagsForResourceInput"} + if s.ResourceId == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceId")) + } + if s.ResourceType == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceType")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type ListTagsForResourceOutput struct { + _ struct{} `type:"structure"` + + // A list of tags. + TagList []*Tag `type:"list"` +} + +// String returns the string representation +func (s ListTagsForResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTagsForResourceOutput) GoString() string { + return s.String() +} + +type ModifyDocumentPermissionInput struct { + _ struct{} `type:"structure"` + + // The AWS user accounts that should have access to the document. The account + // IDs can either be a group of account IDs or All. + AccountIdsToAdd []*string `locationNameList:"AccountId" type:"list"` + + // The AWS user accounts that should no longer have access to the document. + // The AWS user account can either be a group of account IDs or All. This action + // has a higher priority than AccountIdsToAdd. If you specify an account ID + // to add and the same ID to remove, the system removes access to the document. + AccountIdsToRemove []*string `locationNameList:"AccountId" type:"list"` + + // The name of the document that you want to share. + Name *string `type:"string" required:"true"` + + // The permission type for the document. The permission type can be Share. + PermissionType *string `type:"string" required:"true" enum:"DocumentPermissionType"` +} + +// String returns the string representation +func (s ModifyDocumentPermissionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyDocumentPermissionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ModifyDocumentPermissionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ModifyDocumentPermissionInput"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.PermissionType == nil { + invalidParams.Add(request.NewErrParamRequired("PermissionType")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type ModifyDocumentPermissionOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ModifyDocumentPermissionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyDocumentPermissionOutput) GoString() string { + return s.String() +} + +type RemoveTagsFromResourceInput struct { + _ struct{} `type:"structure"` + + // The resource ID for which you want to remove tags. + ResourceId *string `type:"string" required:"true"` + + // The type of resource of which you want to remove a tag. + ResourceType *string `type:"string" required:"true" enum:"ResourceTypeForTagging"` + + // Tag keys that you want to remove from the specified resource. + TagKeys []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s RemoveTagsFromResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RemoveTagsFromResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RemoveTagsFromResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RemoveTagsFromResourceInput"} + if s.ResourceId == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceId")) + } + if s.ResourceType == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceType")) + } + if s.TagKeys == nil { + invalidParams.Add(request.NewErrParamRequired("TagKeys")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type RemoveTagsFromResourceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s RemoveTagsFromResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RemoveTagsFromResourceOutput) GoString() string { + return s.String() +} + +type SendCommandInput struct { + _ struct{} `type:"structure"` + + // User-specified information about the command, such as a brief description + // of what the command should do. + Comment *string `type:"string"` + + // The Sha256 or Sha1 hash created by the system when the document was created. + // + // Sha1 hashes have been deprecated. + DocumentHash *string `type:"string"` + + // Sha256 or Sha1. + // + // Sha1 hashes have been deprecated. + DocumentHashType *string `type:"string" enum:"DocumentHashType"` + + // Required. The name of the SSM document to execute. This can be an SSM public + // document or a custom document. + DocumentName *string `type:"string" required:"true"` + + // Required. The instance IDs where the command should execute. You can specify + // a maximum of 50 IDs. + InstanceIds []*string `min:"1" type:"list" required:"true"` + + // The name of the S3 bucket where command execution responses should be stored. + OutputS3BucketName *string `min:"3" type:"string"` + + // The directory structure within the S3 bucket where the responses should be + // stored. + OutputS3KeyPrefix *string `type:"string"` + + // The required and optional parameters specified in the SSM document being + // executed. + Parameters map[string][]*string `type:"map"` + + // If this time is reached and the command has not already started executing, + // it will not execute. + TimeoutSeconds *int64 `min:"30" type:"integer"` +} + +// String returns the string representation +func (s SendCommandInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SendCommandInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SendCommandInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SendCommandInput"} + if s.DocumentName == nil { + invalidParams.Add(request.NewErrParamRequired("DocumentName")) + } + if s.InstanceIds == nil { + invalidParams.Add(request.NewErrParamRequired("InstanceIds")) + } + if s.InstanceIds != nil && len(s.InstanceIds) < 1 { + invalidParams.Add(request.NewErrParamMinLen("InstanceIds", 1)) + } + if s.OutputS3BucketName != nil && len(*s.OutputS3BucketName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("OutputS3BucketName", 3)) + } + if s.TimeoutSeconds != nil && *s.TimeoutSeconds < 30 { + invalidParams.Add(request.NewErrParamMinValue("TimeoutSeconds", 30)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type SendCommandOutput struct { + _ struct{} `type:"structure"` + + // The request as it was received by SSM. Also provides the command ID which + // can be used future references to this request. + Command *Command `type:"structure"` +} + +// String returns the string representation +func (s SendCommandOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SendCommandOutput) GoString() string { + return s.String() +} + +// Metadata that you assign to your managed instances. Tags enable you to categorize +// your managed instances in different ways, for example, by purpose, owner, +// or environment. +type Tag struct { + _ struct{} `type:"structure"` + + // The name of the tag. + Key *string `min:"1" type:"string" required:"true"` + + // The value of the tag. + Value *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s Tag) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Tag) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Tag) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Tag"} + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.Value == nil { + invalidParams.Add(request.NewErrParamRequired("Value")) + } + if s.Value != nil && len(*s.Value) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Value", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type UpdateAssociationStatusInput struct { + _ struct{} `type:"structure"` + + // The association status. + AssociationStatus *AssociationStatus `type:"structure" required:"true"` + + // The ID of the instance. + InstanceId *string `type:"string" required:"true"` + + // The name of the SSM document. + Name *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateAssociationStatusInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateAssociationStatusInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateAssociationStatusInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateAssociationStatusInput"} + if s.AssociationStatus == nil { + invalidParams.Add(request.NewErrParamRequired("AssociationStatus")) + } + if s.InstanceId == nil { + invalidParams.Add(request.NewErrParamRequired("InstanceId")) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.AssociationStatus != nil { + if err := s.AssociationStatus.Validate(); err != nil { + invalidParams.AddNested("AssociationStatus", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type UpdateAssociationStatusOutput struct { + _ struct{} `type:"structure"` + + // Information about the association. + AssociationDescription *AssociationDescription `type:"structure"` +} + +// String returns the string representation +func (s UpdateAssociationStatusOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateAssociationStatusOutput) GoString() string { + return s.String() +} + +type UpdateManagedInstanceRoleInput struct { + _ struct{} `type:"structure"` + + // The IAM role you want to assign or change. + IamRole *string `type:"string" required:"true"` + + // The ID of the managed instance where you want to update the role. + InstanceId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateManagedInstanceRoleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateManagedInstanceRoleInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateManagedInstanceRoleInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateManagedInstanceRoleInput"} + if s.IamRole == nil { + invalidParams.Add(request.NewErrParamRequired("IamRole")) + } + if s.InstanceId == nil { + invalidParams.Add(request.NewErrParamRequired("InstanceId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type UpdateManagedInstanceRoleOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UpdateManagedInstanceRoleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateManagedInstanceRoleOutput) GoString() string { + return s.String() +} + +const ( + // @enum AssociationFilterKey + AssociationFilterKeyInstanceId = "InstanceId" + // @enum AssociationFilterKey + AssociationFilterKeyName = "Name" +) + +const ( + // @enum AssociationStatusName + AssociationStatusNamePending = "Pending" + // @enum AssociationStatusName + AssociationStatusNameSuccess = "Success" + // @enum AssociationStatusName + AssociationStatusNameFailed = "Failed" +) + +const ( + // @enum CommandFilterKey + CommandFilterKeyInvokedAfter = "InvokedAfter" + // @enum CommandFilterKey + CommandFilterKeyInvokedBefore = "InvokedBefore" + // @enum CommandFilterKey + CommandFilterKeyStatus = "Status" +) + +const ( + // @enum CommandInvocationStatus + CommandInvocationStatusPending = "Pending" + // @enum CommandInvocationStatus + CommandInvocationStatusInProgress = "InProgress" + // @enum CommandInvocationStatus + CommandInvocationStatusCancelling = "Cancelling" + // @enum CommandInvocationStatus + CommandInvocationStatusSuccess = "Success" + // @enum CommandInvocationStatus + CommandInvocationStatusTimedOut = "TimedOut" + // @enum CommandInvocationStatus + CommandInvocationStatusCancelled = "Cancelled" + // @enum CommandInvocationStatus + CommandInvocationStatusFailed = "Failed" +) + +const ( + // @enum CommandPluginStatus + CommandPluginStatusPending = "Pending" + // @enum CommandPluginStatus + CommandPluginStatusInProgress = "InProgress" + // @enum CommandPluginStatus + CommandPluginStatusSuccess = "Success" + // @enum CommandPluginStatus + CommandPluginStatusTimedOut = "TimedOut" + // @enum CommandPluginStatus + CommandPluginStatusCancelled = "Cancelled" + // @enum CommandPluginStatus + CommandPluginStatusFailed = "Failed" +) + +const ( + // @enum CommandStatus + CommandStatusPending = "Pending" + // @enum CommandStatus + CommandStatusInProgress = "InProgress" + // @enum CommandStatus + CommandStatusCancelling = "Cancelling" + // @enum CommandStatus + CommandStatusSuccess = "Success" + // @enum CommandStatus + CommandStatusTimedOut = "TimedOut" + // @enum CommandStatus + CommandStatusCancelled = "Cancelled" + // @enum CommandStatus + CommandStatusFailed = "Failed" +) + +const ( + // @enum DescribeActivationsFilterKeys + DescribeActivationsFilterKeysActivationIds = "ActivationIds" + // @enum DescribeActivationsFilterKeys + DescribeActivationsFilterKeysDefaultInstanceName = "DefaultInstanceName" + // @enum DescribeActivationsFilterKeys + DescribeActivationsFilterKeysIamRole = "IamRole" +) + +const ( + // @enum DocumentFilterKey + DocumentFilterKeyName = "Name" + // @enum DocumentFilterKey + DocumentFilterKeyOwner = "Owner" + // @enum DocumentFilterKey + DocumentFilterKeyPlatformTypes = "PlatformTypes" +) + +const ( + // @enum DocumentHashType + DocumentHashTypeSha256 = "Sha256" + // @enum DocumentHashType + DocumentHashTypeSha1 = "Sha1" +) + +const ( + // @enum DocumentParameterType + DocumentParameterTypeString = "String" + // @enum DocumentParameterType + DocumentParameterTypeStringList = "StringList" +) + +const ( + // @enum DocumentPermissionType + DocumentPermissionTypeShare = "Share" +) + +const ( + // @enum DocumentStatus + DocumentStatusCreating = "Creating" + // @enum DocumentStatus + DocumentStatusActive = "Active" + // @enum DocumentStatus + DocumentStatusDeleting = "Deleting" +) + +const ( + // @enum Fault + FaultClient = "Client" + // @enum Fault + FaultServer = "Server" + // @enum Fault + FaultUnknown = "Unknown" +) + +const ( + // @enum InstanceInformationFilterKey + InstanceInformationFilterKeyInstanceIds = "InstanceIds" + // @enum InstanceInformationFilterKey + InstanceInformationFilterKeyAgentVersion = "AgentVersion" + // @enum InstanceInformationFilterKey + InstanceInformationFilterKeyPingStatus = "PingStatus" + // @enum InstanceInformationFilterKey + InstanceInformationFilterKeyPlatformTypes = "PlatformTypes" + // @enum InstanceInformationFilterKey + InstanceInformationFilterKeyActivationIds = "ActivationIds" + // @enum InstanceInformationFilterKey + InstanceInformationFilterKeyIamRole = "IamRole" + // @enum InstanceInformationFilterKey + InstanceInformationFilterKeyResourceType = "ResourceType" +) + +const ( + // @enum PingStatus + PingStatusOnline = "Online" + // @enum PingStatus + PingStatusConnectionLost = "ConnectionLost" + // @enum PingStatus + PingStatusInactive = "Inactive" +) + +const ( + // @enum PlatformType + PlatformTypeWindows = "Windows" + // @enum PlatformType + PlatformTypeLinux = "Linux" +) + +const ( + // @enum ResourceType + ResourceTypeManagedInstance = "ManagedInstance" + // @enum ResourceType + ResourceTypeDocument = "Document" + // @enum ResourceType + ResourceTypeEc2instance = "EC2Instance" +) + +const ( + // @enum ResourceTypeForTagging + ResourceTypeForTaggingManagedInstance = "ManagedInstance" +) diff --git a/vendor/github.com/aws/aws-sdk-go/service/ssm/examples_test.go b/vendor/github.com/aws/aws-sdk-go/service/ssm/examples_test.go new file mode 100644 index 000000000..d1783926b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/ssm/examples_test.go @@ -0,0 +1,650 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package ssm_test + +import ( + "bytes" + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/ssm" +) + +var _ time.Duration +var _ bytes.Buffer + +func ExampleSSM_AddTagsToResource() { + svc := ssm.New(session.New()) + + params := &ssm.AddTagsToResourceInput{ + ResourceId: aws.String("ResourceId"), // Required + ResourceType: aws.String("ResourceTypeForTagging"), // Required + Tags: []*ssm.Tag{ // Required + { // Required + Key: aws.String("TagKey"), // Required + Value: aws.String("TagValue"), // Required + }, + // More values... + }, + } + resp, err := svc.AddTagsToResource(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSSM_CancelCommand() { + svc := ssm.New(session.New()) + + params := &ssm.CancelCommandInput{ + CommandId: aws.String("CommandId"), // Required + InstanceIds: []*string{ + aws.String("InstanceId"), // Required + // More values... + }, + } + resp, err := svc.CancelCommand(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSSM_CreateActivation() { + svc := ssm.New(session.New()) + + params := &ssm.CreateActivationInput{ + IamRole: aws.String("IamRole"), // Required + DefaultInstanceName: aws.String("DefaultInstanceName"), + Description: aws.String("ActivationDescription"), + ExpirationDate: aws.Time(time.Now()), + RegistrationLimit: aws.Int64(1), + } + resp, err := svc.CreateActivation(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSSM_CreateAssociation() { + svc := ssm.New(session.New()) + + params := &ssm.CreateAssociationInput{ + InstanceId: aws.String("InstanceId"), // Required + Name: aws.String("DocumentName"), // Required + Parameters: map[string][]*string{ + "Key": { // Required + aws.String("ParameterValue"), // Required + // More values... + }, + // More values... + }, + } + resp, err := svc.CreateAssociation(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSSM_CreateAssociationBatch() { + svc := ssm.New(session.New()) + + params := &ssm.CreateAssociationBatchInput{ + Entries: []*ssm.CreateAssociationBatchRequestEntry{ // Required + { // Required + InstanceId: aws.String("InstanceId"), + Name: aws.String("DocumentName"), + Parameters: map[string][]*string{ + "Key": { // Required + aws.String("ParameterValue"), // Required + // More values... + }, + // More values... + }, + }, + // More values... + }, + } + resp, err := svc.CreateAssociationBatch(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSSM_CreateDocument() { + svc := ssm.New(session.New()) + + params := &ssm.CreateDocumentInput{ + Content: aws.String("DocumentContent"), // Required + Name: aws.String("DocumentName"), // Required + } + resp, err := svc.CreateDocument(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSSM_DeleteActivation() { + svc := ssm.New(session.New()) + + params := &ssm.DeleteActivationInput{ + ActivationId: aws.String("ActivationId"), // Required + } + resp, err := svc.DeleteActivation(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSSM_DeleteAssociation() { + svc := ssm.New(session.New()) + + params := &ssm.DeleteAssociationInput{ + InstanceId: aws.String("InstanceId"), // Required + Name: aws.String("DocumentName"), // Required + } + resp, err := svc.DeleteAssociation(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSSM_DeleteDocument() { + svc := ssm.New(session.New()) + + params := &ssm.DeleteDocumentInput{ + Name: aws.String("DocumentName"), // Required + } + resp, err := svc.DeleteDocument(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSSM_DeregisterManagedInstance() { + svc := ssm.New(session.New()) + + params := &ssm.DeregisterManagedInstanceInput{ + InstanceId: aws.String("ManagedInstanceId"), // Required + } + resp, err := svc.DeregisterManagedInstance(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSSM_DescribeActivations() { + svc := ssm.New(session.New()) + + params := &ssm.DescribeActivationsInput{ + Filters: []*ssm.DescribeActivationsFilter{ + { // Required + FilterKey: aws.String("DescribeActivationsFilterKeys"), + FilterValues: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + MaxResults: aws.Int64(1), + NextToken: aws.String("NextToken"), + } + resp, err := svc.DescribeActivations(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSSM_DescribeAssociation() { + svc := ssm.New(session.New()) + + params := &ssm.DescribeAssociationInput{ + InstanceId: aws.String("InstanceId"), // Required + Name: aws.String("DocumentName"), // Required + } + resp, err := svc.DescribeAssociation(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSSM_DescribeDocument() { + svc := ssm.New(session.New()) + + params := &ssm.DescribeDocumentInput{ + Name: aws.String("DocumentARN"), // Required + } + resp, err := svc.DescribeDocument(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSSM_DescribeDocumentPermission() { + svc := ssm.New(session.New()) + + params := &ssm.DescribeDocumentPermissionInput{ + Name: aws.String("DocumentName"), // Required + PermissionType: aws.String("DocumentPermissionType"), // Required + } + resp, err := svc.DescribeDocumentPermission(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSSM_DescribeInstanceInformation() { + svc := ssm.New(session.New()) + + params := &ssm.DescribeInstanceInformationInput{ + InstanceInformationFilterList: []*ssm.InstanceInformationFilter{ + { // Required + Key: aws.String("InstanceInformationFilterKey"), // Required + ValueSet: []*string{ // Required + aws.String("InstanceInformationFilterValue"), // Required + // More values... + }, + }, + // More values... + }, + MaxResults: aws.Int64(1), + NextToken: aws.String("NextToken"), + } + resp, err := svc.DescribeInstanceInformation(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSSM_GetDocument() { + svc := ssm.New(session.New()) + + params := &ssm.GetDocumentInput{ + Name: aws.String("DocumentARN"), // Required + } + resp, err := svc.GetDocument(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSSM_ListAssociations() { + svc := ssm.New(session.New()) + + params := &ssm.ListAssociationsInput{ + AssociationFilterList: []*ssm.AssociationFilter{ // Required + { // Required + Key: aws.String("AssociationFilterKey"), // Required + Value: aws.String("AssociationFilterValue"), // Required + }, + // More values... + }, + MaxResults: aws.Int64(1), + NextToken: aws.String("NextToken"), + } + resp, err := svc.ListAssociations(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSSM_ListCommandInvocations() { + svc := ssm.New(session.New()) + + params := &ssm.ListCommandInvocationsInput{ + CommandId: aws.String("CommandId"), + Details: aws.Bool(true), + Filters: []*ssm.CommandFilter{ + { // Required + Key: aws.String("CommandFilterKey"), // Required + Value: aws.String("CommandFilterValue"), // Required + }, + // More values... + }, + InstanceId: aws.String("InstanceId"), + MaxResults: aws.Int64(1), + NextToken: aws.String("NextToken"), + } + resp, err := svc.ListCommandInvocations(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSSM_ListCommands() { + svc := ssm.New(session.New()) + + params := &ssm.ListCommandsInput{ + CommandId: aws.String("CommandId"), + Filters: []*ssm.CommandFilter{ + { // Required + Key: aws.String("CommandFilterKey"), // Required + Value: aws.String("CommandFilterValue"), // Required + }, + // More values... + }, + InstanceId: aws.String("InstanceId"), + MaxResults: aws.Int64(1), + NextToken: aws.String("NextToken"), + } + resp, err := svc.ListCommands(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSSM_ListDocuments() { + svc := ssm.New(session.New()) + + params := &ssm.ListDocumentsInput{ + DocumentFilterList: []*ssm.DocumentFilter{ + { // Required + Key: aws.String("DocumentFilterKey"), // Required + Value: aws.String("DocumentFilterValue"), // Required + }, + // More values... + }, + MaxResults: aws.Int64(1), + NextToken: aws.String("NextToken"), + } + resp, err := svc.ListDocuments(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSSM_ListTagsForResource() { + svc := ssm.New(session.New()) + + params := &ssm.ListTagsForResourceInput{ + ResourceId: aws.String("ResourceId"), // Required + ResourceType: aws.String("ResourceTypeForTagging"), // Required + } + resp, err := svc.ListTagsForResource(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSSM_ModifyDocumentPermission() { + svc := ssm.New(session.New()) + + params := &ssm.ModifyDocumentPermissionInput{ + Name: aws.String("DocumentName"), // Required + PermissionType: aws.String("DocumentPermissionType"), // Required + AccountIdsToAdd: []*string{ + aws.String("AccountId"), // Required + // More values... + }, + AccountIdsToRemove: []*string{ + aws.String("AccountId"), // Required + // More values... + }, + } + resp, err := svc.ModifyDocumentPermission(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSSM_RemoveTagsFromResource() { + svc := ssm.New(session.New()) + + params := &ssm.RemoveTagsFromResourceInput{ + ResourceId: aws.String("ResourceId"), // Required + ResourceType: aws.String("ResourceTypeForTagging"), // Required + TagKeys: []*string{ // Required + aws.String("TagKey"), // Required + // More values... + }, + } + resp, err := svc.RemoveTagsFromResource(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSSM_SendCommand() { + svc := ssm.New(session.New()) + + params := &ssm.SendCommandInput{ + DocumentName: aws.String("DocumentARN"), // Required + InstanceIds: []*string{ // Required + aws.String("InstanceId"), // Required + // More values... + }, + Comment: aws.String("Comment"), + DocumentHash: aws.String("DocumentHash"), + DocumentHashType: aws.String("DocumentHashType"), + OutputS3BucketName: aws.String("S3BucketName"), + OutputS3KeyPrefix: aws.String("S3KeyPrefix"), + Parameters: map[string][]*string{ + "Key": { // Required + aws.String("ParameterValue"), // Required + // More values... + }, + // More values... + }, + TimeoutSeconds: aws.Int64(1), + } + resp, err := svc.SendCommand(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSSM_UpdateAssociationStatus() { + svc := ssm.New(session.New()) + + params := &ssm.UpdateAssociationStatusInput{ + AssociationStatus: &ssm.AssociationStatus{ // Required + Date: aws.Time(time.Now()), // Required + Message: aws.String("StatusMessage"), // Required + Name: aws.String("AssociationStatusName"), // Required + AdditionalInfo: aws.String("StatusAdditionalInfo"), + }, + InstanceId: aws.String("InstanceId"), // Required + Name: aws.String("DocumentName"), // Required + } + resp, err := svc.UpdateAssociationStatus(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSSM_UpdateManagedInstanceRole() { + svc := ssm.New(session.New()) + + params := &ssm.UpdateManagedInstanceRoleInput{ + IamRole: aws.String("IamRole"), // Required + InstanceId: aws.String("ManagedInstanceId"), // Required + } + resp, err := svc.UpdateManagedInstanceRole(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/ssm/service.go b/vendor/github.com/aws/aws-sdk-go/service/ssm/service.go new file mode 100644 index 000000000..d319af5ab --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/ssm/service.go @@ -0,0 +1,204 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package ssm + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" +) + +// This is the Amazon Simple Systems Manager (SSM) API Reference. SSM enables +// you to remotely manage the configuration of your on-premises servers and +// virtual machines (VMs) and your Amazon EC2 instances using scripts, commands, +// or the Amazon EC2 console. SSM includes an on-demand solution called Amazon +// EC2 Run Command and a lightweight instance configuration solution called +// SSM Config. +// +// This references is intended to be used with the EC2 Run Command User Guide +// for Linux (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/execute-remote-commands.html) +// or Windows (http://docs.aws.amazon.com/AWSEC2/latest/WindowsGuide/execute-remote-commands.html). +// +// You must register your on-premises servers and VMs through an activation +// process before you can configure them using Run Command. Registered servers +// and VMs are called managed instances. For more information, see Setting Up +// Run Command On Managed Instances (On-Premises Servers and VMs) on Linux (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/managed-instances.html) +// or Setting Up Run Command On Managed Instances (On-Premises Servers and VMs) +// on Windows (http://docs.aws.amazon.com/AWSEC2/latest/WindowsGuide/managed-instances.html). +// +// Run Command +// +// Run Command provides an on-demand experience for executing commands. You +// can use pre-defined SSM documents to perform the actions listed later in +// this section, or you can create your own documents. With these documents, +// you can remotely configure your instances by sending commands using the Commands +// page in the Amazon EC2 console (http://console.aws.amazon.com/ec2/), AWS +// Tools for Windows PowerShell (http://docs.aws.amazon.com/powershell/latest/reference/items/Amazon_Simple_Systems_Management_cmdlets.html), +// the AWS CLI (http://docs.aws.amazon.com/cli/latest/reference/ssm/index.html), +// or AWS SDKs. +// +// Run Command reports the status of the command execution for each instance +// targeted by a command. You can also audit the command execution to understand +// who executed commands, when, and what changes were made. By switching between +// different SSM documents, you can quickly configure your instances with different +// types of commands. To get started with Run Command, verify that your environment +// meets the prerequisites for remotely running commands on EC2 instances (Linux +// (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/remote-commands-prereq.html) +// or Windows (http://docs.aws.amazon.com/AWSEC2/latest/WindowsGuide/remote-commands-prereq.html)). +// +// SSM Config +// +// SSM Config is a lightweight instance configuration solution. SSM Config +// is currently only available for Windows instances. With SSM Config, you can +// specify a setup configuration for your instances. SSM Config is similar to +// EC2 User Data, which is another way of running one-time scripts or applying +// settings during instance launch. SSM Config is an extension of this capability. +// Using SSM documents, you can specify which actions the system should perform +// on your instances, including which applications to install, which AWS Directory +// Service directory to join, which Microsoft PowerShell modules to install, +// etc. If an instance is missing one or more of these configurations, the system +// makes those changes. By default, the system checks every five minutes to +// see if there is a new configuration to apply as defined in a new SSM document. +// If so, the system updates the instances accordingly. In this way, you can +// remotely maintain a consistent configuration baseline on your instances. +// SSM Config is available using the AWS CLI or the AWS Tools for Windows PowerShell. +// For more information, see Managing Windows Instance Configuration (http://docs.aws.amazon.com/AWSEC2/latest/WindowsGuide/ec2-configuration-manage.html). +// +// SSM Config and Run Command include the following pre-defined documents. +// +// Linux +// +// AWS-RunShellScript to run shell scripts +// +// AWS-UpdateSSMAgent to update the Amazon SSM agent +// +// Windows +// +// AWS-JoinDirectoryServiceDomain to join an AWS Directory +// +// AWS-RunPowerShellScript to run PowerShell commands or scripts +// +// AWS-UpdateEC2Config to update the EC2Config service +// +// AWS-ConfigureWindowsUpdate to configure Windows Update settings +// +// AWS-InstallApplication to install, repair, or uninstall software using +// an MSI package +// +// AWS-InstallPowerShellModule to install PowerShell modules +// +// AWS-ConfigureCloudWatch to configure Amazon CloudWatch Logs to monitor +// applications and systems +// +// AWS-ListWindowsInventory to collect information about an EC2 instance +// running in Windows. +// +// AWS-FindWindowsUpdates to scan an instance and determines which updates +// are missing. +// +// AWS-InstallMissingWindowsUpdates to install missing updates on your EC2 +// instance. +// +// AWS-InstallSpecificWindowsUpdates to install one or more specific updates. +// +// The commands or scripts specified in SSM documents run with administrative +// privilege on your instances because the Amazon SSM agent runs as root on +// Linux and the EC2Config service runs in the Local System account on Windows. +// If a user has permission to execute any of the pre-defined SSM documents +// (any document that begins with AWS-*) then that user also has administrator +// access to the instance. Delegate access to Run Command and SSM Config judiciously. +// This becomes extremely important if you create your own SSM documents. Amazon +// Web Services does not provide guidance about how to create secure SSM documents. +// You create SSM documents and delegate access to Run Command at your own risk. +// As a security best practice, we recommend that you assign access to "AWS-*" +// documents, especially the AWS-RunShellScript document on Linux and the AWS-RunPowerShellScript +// document on Windows, to trusted administrators only. You can create SSM documents +// for specific tasks and delegate access to non-administrators. +// +// For information about creating and sharing SSM documents, see the following +// topics in the SSM User Guide: +// +// Creating SSM Documents (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/create-ssm-doc.html) +// and Sharing SSM Documents (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ssm-sharing.html) +// (Linux) +// +// Creating SSM Documents (http://docs.aws.amazon.com/AWSEC2/latest/WindowsGuide/create-ssm-doc.html) +// and Sharing SSM Documents (http://docs.aws.amazon.com/AWSEC2/latest/WindowsGuide/ssm-sharing.html) +// (Windows) +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type SSM struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "ssm" + +// New creates a new instance of the SSM client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a SSM client from just a session. +// svc := ssm.New(mySession) +// +// // Create a SSM client with additional configuration +// svc := ssm.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *SSM { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *SSM { + svc := &SSM{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-11-06", + JSONVersion: "1.1", + TargetPrefix: "AmazonSSM", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(jsonrpc.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a SSM operation and runs any +// custom request initialization. +func (c *SSM) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/ssm/ssmiface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/ssm/ssmiface/interface.go new file mode 100644 index 000000000..fa9766a12 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/ssm/ssmiface/interface.go @@ -0,0 +1,128 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package ssmiface provides an interface for the Amazon Simple Systems Management Service. +package ssmiface + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/ssm" +) + +// SSMAPI is the interface type for ssm.SSM. +type SSMAPI interface { + AddTagsToResourceRequest(*ssm.AddTagsToResourceInput) (*request.Request, *ssm.AddTagsToResourceOutput) + + AddTagsToResource(*ssm.AddTagsToResourceInput) (*ssm.AddTagsToResourceOutput, error) + + CancelCommandRequest(*ssm.CancelCommandInput) (*request.Request, *ssm.CancelCommandOutput) + + CancelCommand(*ssm.CancelCommandInput) (*ssm.CancelCommandOutput, error) + + CreateActivationRequest(*ssm.CreateActivationInput) (*request.Request, *ssm.CreateActivationOutput) + + CreateActivation(*ssm.CreateActivationInput) (*ssm.CreateActivationOutput, error) + + CreateAssociationRequest(*ssm.CreateAssociationInput) (*request.Request, *ssm.CreateAssociationOutput) + + CreateAssociation(*ssm.CreateAssociationInput) (*ssm.CreateAssociationOutput, error) + + CreateAssociationBatchRequest(*ssm.CreateAssociationBatchInput) (*request.Request, *ssm.CreateAssociationBatchOutput) + + CreateAssociationBatch(*ssm.CreateAssociationBatchInput) (*ssm.CreateAssociationBatchOutput, error) + + CreateDocumentRequest(*ssm.CreateDocumentInput) (*request.Request, *ssm.CreateDocumentOutput) + + CreateDocument(*ssm.CreateDocumentInput) (*ssm.CreateDocumentOutput, error) + + DeleteActivationRequest(*ssm.DeleteActivationInput) (*request.Request, *ssm.DeleteActivationOutput) + + DeleteActivation(*ssm.DeleteActivationInput) (*ssm.DeleteActivationOutput, error) + + DeleteAssociationRequest(*ssm.DeleteAssociationInput) (*request.Request, *ssm.DeleteAssociationOutput) + + DeleteAssociation(*ssm.DeleteAssociationInput) (*ssm.DeleteAssociationOutput, error) + + DeleteDocumentRequest(*ssm.DeleteDocumentInput) (*request.Request, *ssm.DeleteDocumentOutput) + + DeleteDocument(*ssm.DeleteDocumentInput) (*ssm.DeleteDocumentOutput, error) + + DeregisterManagedInstanceRequest(*ssm.DeregisterManagedInstanceInput) (*request.Request, *ssm.DeregisterManagedInstanceOutput) + + DeregisterManagedInstance(*ssm.DeregisterManagedInstanceInput) (*ssm.DeregisterManagedInstanceOutput, error) + + DescribeActivationsRequest(*ssm.DescribeActivationsInput) (*request.Request, *ssm.DescribeActivationsOutput) + + DescribeActivations(*ssm.DescribeActivationsInput) (*ssm.DescribeActivationsOutput, error) + + DescribeActivationsPages(*ssm.DescribeActivationsInput, func(*ssm.DescribeActivationsOutput, bool) bool) error + + DescribeAssociationRequest(*ssm.DescribeAssociationInput) (*request.Request, *ssm.DescribeAssociationOutput) + + DescribeAssociation(*ssm.DescribeAssociationInput) (*ssm.DescribeAssociationOutput, error) + + DescribeDocumentRequest(*ssm.DescribeDocumentInput) (*request.Request, *ssm.DescribeDocumentOutput) + + DescribeDocument(*ssm.DescribeDocumentInput) (*ssm.DescribeDocumentOutput, error) + + DescribeDocumentPermissionRequest(*ssm.DescribeDocumentPermissionInput) (*request.Request, *ssm.DescribeDocumentPermissionOutput) + + DescribeDocumentPermission(*ssm.DescribeDocumentPermissionInput) (*ssm.DescribeDocumentPermissionOutput, error) + + DescribeInstanceInformationRequest(*ssm.DescribeInstanceInformationInput) (*request.Request, *ssm.DescribeInstanceInformationOutput) + + DescribeInstanceInformation(*ssm.DescribeInstanceInformationInput) (*ssm.DescribeInstanceInformationOutput, error) + + GetDocumentRequest(*ssm.GetDocumentInput) (*request.Request, *ssm.GetDocumentOutput) + + GetDocument(*ssm.GetDocumentInput) (*ssm.GetDocumentOutput, error) + + ListAssociationsRequest(*ssm.ListAssociationsInput) (*request.Request, *ssm.ListAssociationsOutput) + + ListAssociations(*ssm.ListAssociationsInput) (*ssm.ListAssociationsOutput, error) + + ListAssociationsPages(*ssm.ListAssociationsInput, func(*ssm.ListAssociationsOutput, bool) bool) error + + ListCommandInvocationsRequest(*ssm.ListCommandInvocationsInput) (*request.Request, *ssm.ListCommandInvocationsOutput) + + ListCommandInvocations(*ssm.ListCommandInvocationsInput) (*ssm.ListCommandInvocationsOutput, error) + + ListCommandInvocationsPages(*ssm.ListCommandInvocationsInput, func(*ssm.ListCommandInvocationsOutput, bool) bool) error + + ListCommandsRequest(*ssm.ListCommandsInput) (*request.Request, *ssm.ListCommandsOutput) + + ListCommands(*ssm.ListCommandsInput) (*ssm.ListCommandsOutput, error) + + ListCommandsPages(*ssm.ListCommandsInput, func(*ssm.ListCommandsOutput, bool) bool) error + + ListDocumentsRequest(*ssm.ListDocumentsInput) (*request.Request, *ssm.ListDocumentsOutput) + + ListDocuments(*ssm.ListDocumentsInput) (*ssm.ListDocumentsOutput, error) + + ListDocumentsPages(*ssm.ListDocumentsInput, func(*ssm.ListDocumentsOutput, bool) bool) error + + ListTagsForResourceRequest(*ssm.ListTagsForResourceInput) (*request.Request, *ssm.ListTagsForResourceOutput) + + ListTagsForResource(*ssm.ListTagsForResourceInput) (*ssm.ListTagsForResourceOutput, error) + + ModifyDocumentPermissionRequest(*ssm.ModifyDocumentPermissionInput) (*request.Request, *ssm.ModifyDocumentPermissionOutput) + + ModifyDocumentPermission(*ssm.ModifyDocumentPermissionInput) (*ssm.ModifyDocumentPermissionOutput, error) + + RemoveTagsFromResourceRequest(*ssm.RemoveTagsFromResourceInput) (*request.Request, *ssm.RemoveTagsFromResourceOutput) + + RemoveTagsFromResource(*ssm.RemoveTagsFromResourceInput) (*ssm.RemoveTagsFromResourceOutput, error) + + SendCommandRequest(*ssm.SendCommandInput) (*request.Request, *ssm.SendCommandOutput) + + SendCommand(*ssm.SendCommandInput) (*ssm.SendCommandOutput, error) + + UpdateAssociationStatusRequest(*ssm.UpdateAssociationStatusInput) (*request.Request, *ssm.UpdateAssociationStatusOutput) + + UpdateAssociationStatus(*ssm.UpdateAssociationStatusInput) (*ssm.UpdateAssociationStatusOutput, error) + + UpdateManagedInstanceRoleRequest(*ssm.UpdateManagedInstanceRoleInput) (*request.Request, *ssm.UpdateManagedInstanceRoleOutput) + + UpdateManagedInstanceRole(*ssm.UpdateManagedInstanceRoleInput) (*ssm.UpdateManagedInstanceRoleOutput, error) +} + +var _ SSMAPI = (*ssm.SSM)(nil) diff --git a/vendor/github.com/aws/aws-sdk-go/service/storagegateway/api.go b/vendor/github.com/aws/aws-sdk-go/service/storagegateway/api.go new file mode 100644 index 000000000..9029bfff8 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/storagegateway/api.go @@ -0,0 +1,7730 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package storagegateway provides a client for AWS Storage Gateway. +package storagegateway + +import ( + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" +) + +const opActivateGateway = "ActivateGateway" + +// ActivateGatewayRequest generates a "aws/request.Request" representing the +// client's request for the ActivateGateway operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ActivateGateway method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ActivateGatewayRequest method. +// req, resp := client.ActivateGatewayRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *StorageGateway) ActivateGatewayRequest(input *ActivateGatewayInput) (req *request.Request, output *ActivateGatewayOutput) { + op := &request.Operation{ + Name: opActivateGateway, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ActivateGatewayInput{} + } + + req = c.newRequest(op, input, output) + output = &ActivateGatewayOutput{} + req.Data = output + return +} + +// Activates the gateway you previously deployed on your host. For more information, +// see Activate the AWS Storage Gateway (http://docs.aws.amazon.com/storagegateway/latest/userguide/GettingStartedActivateGateway-common.html). +// In the activation process, you specify information such as the you want to +// use for storing snapshots, the time zone for scheduled snapshots the gateway +// snapshot schedule window, an activation key, and a name for your gateway. +// The activation process also associates your gateway with your account; for +// more information, see UpdateGatewayInformation. +// +// You must turn on the gateway VM before you can activate your gateway. +func (c *StorageGateway) ActivateGateway(input *ActivateGatewayInput) (*ActivateGatewayOutput, error) { + req, out := c.ActivateGatewayRequest(input) + err := req.Send() + return out, err +} + +const opAddCache = "AddCache" + +// AddCacheRequest generates a "aws/request.Request" representing the +// client's request for the AddCache operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the AddCache method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the AddCacheRequest method. +// req, resp := client.AddCacheRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *StorageGateway) AddCacheRequest(input *AddCacheInput) (req *request.Request, output *AddCacheOutput) { + op := &request.Operation{ + Name: opAddCache, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AddCacheInput{} + } + + req = c.newRequest(op, input, output) + output = &AddCacheOutput{} + req.Data = output + return +} + +// Configures one or more gateway local disks as cache for a cached-volume gateway. +// This operation is supported only for the gateway-cached volume architecture +// (see Storage Gateway Concepts (http://docs.aws.amazon.com/storagegateway/latest/userguide/StorageGatewayConcepts.html)). +// +// In the request, you specify the gateway Amazon Resource Name (ARN) to which +// you want to add cache, and one or more disk IDs that you want to configure +// as cache. +func (c *StorageGateway) AddCache(input *AddCacheInput) (*AddCacheOutput, error) { + req, out := c.AddCacheRequest(input) + err := req.Send() + return out, err +} + +const opAddTagsToResource = "AddTagsToResource" + +// AddTagsToResourceRequest generates a "aws/request.Request" representing the +// client's request for the AddTagsToResource operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the AddTagsToResource method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the AddTagsToResourceRequest method. +// req, resp := client.AddTagsToResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *StorageGateway) AddTagsToResourceRequest(input *AddTagsToResourceInput) (req *request.Request, output *AddTagsToResourceOutput) { + op := &request.Operation{ + Name: opAddTagsToResource, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AddTagsToResourceInput{} + } + + req = c.newRequest(op, input, output) + output = &AddTagsToResourceOutput{} + req.Data = output + return +} + +// Adds one or more tags to the specified resource. You use tags to add metadata +// to resources, which you can use to categorize these resources. For example, +// you can categorize resources by purpose, owner, environment, or team. Each +// tag consists of a key and a value, which you define. You can add tags to +// the following AWS Storage Gateway resources: +// +// Storage gateways of all types +// +// Storage Volumes +// +// Virtual Tapes +// +// You can create a maximum of 10 tags for each resource. Virtual tapes and +// storage volumes that are recovered to a new gateway maintain their tags. +func (c *StorageGateway) AddTagsToResource(input *AddTagsToResourceInput) (*AddTagsToResourceOutput, error) { + req, out := c.AddTagsToResourceRequest(input) + err := req.Send() + return out, err +} + +const opAddUploadBuffer = "AddUploadBuffer" + +// AddUploadBufferRequest generates a "aws/request.Request" representing the +// client's request for the AddUploadBuffer operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the AddUploadBuffer method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the AddUploadBufferRequest method. +// req, resp := client.AddUploadBufferRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *StorageGateway) AddUploadBufferRequest(input *AddUploadBufferInput) (req *request.Request, output *AddUploadBufferOutput) { + op := &request.Operation{ + Name: opAddUploadBuffer, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AddUploadBufferInput{} + } + + req = c.newRequest(op, input, output) + output = &AddUploadBufferOutput{} + req.Data = output + return +} + +// Configures one or more gateway local disks as upload buffer for a specified +// gateway. This operation is supported for both the gateway-stored and gateway-cached +// volume architectures. +// +// In the request, you specify the gateway Amazon Resource Name (ARN) to which +// you want to add upload buffer, and one or more disk IDs that you want to +// configure as upload buffer. +func (c *StorageGateway) AddUploadBuffer(input *AddUploadBufferInput) (*AddUploadBufferOutput, error) { + req, out := c.AddUploadBufferRequest(input) + err := req.Send() + return out, err +} + +const opAddWorkingStorage = "AddWorkingStorage" + +// AddWorkingStorageRequest generates a "aws/request.Request" representing the +// client's request for the AddWorkingStorage operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the AddWorkingStorage method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the AddWorkingStorageRequest method. +// req, resp := client.AddWorkingStorageRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *StorageGateway) AddWorkingStorageRequest(input *AddWorkingStorageInput) (req *request.Request, output *AddWorkingStorageOutput) { + op := &request.Operation{ + Name: opAddWorkingStorage, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AddWorkingStorageInput{} + } + + req = c.newRequest(op, input, output) + output = &AddWorkingStorageOutput{} + req.Data = output + return +} + +// Configures one or more gateway local disks as working storage for a gateway. +// This operation is supported only for the gateway-stored volume architecture. +// This operation is deprecated in cached-volumes API version 20120630. Use +// AddUploadBuffer instead. +// +// Working storage is also referred to as upload buffer. You can also use +// the AddUploadBuffer operation to add upload buffer to a stored-volume gateway. +// +// In the request, you specify the gateway Amazon Resource Name (ARN) to which +// you want to add working storage, and one or more disk IDs that you want to +// configure as working storage. +func (c *StorageGateway) AddWorkingStorage(input *AddWorkingStorageInput) (*AddWorkingStorageOutput, error) { + req, out := c.AddWorkingStorageRequest(input) + err := req.Send() + return out, err +} + +const opCancelArchival = "CancelArchival" + +// CancelArchivalRequest generates a "aws/request.Request" representing the +// client's request for the CancelArchival operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CancelArchival method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CancelArchivalRequest method. +// req, resp := client.CancelArchivalRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *StorageGateway) CancelArchivalRequest(input *CancelArchivalInput) (req *request.Request, output *CancelArchivalOutput) { + op := &request.Operation{ + Name: opCancelArchival, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CancelArchivalInput{} + } + + req = c.newRequest(op, input, output) + output = &CancelArchivalOutput{} + req.Data = output + return +} + +// Cancels archiving of a virtual tape to the virtual tape shelf (VTS) after +// the archiving process is initiated. +func (c *StorageGateway) CancelArchival(input *CancelArchivalInput) (*CancelArchivalOutput, error) { + req, out := c.CancelArchivalRequest(input) + err := req.Send() + return out, err +} + +const opCancelRetrieval = "CancelRetrieval" + +// CancelRetrievalRequest generates a "aws/request.Request" representing the +// client's request for the CancelRetrieval operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CancelRetrieval method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CancelRetrievalRequest method. +// req, resp := client.CancelRetrievalRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *StorageGateway) CancelRetrievalRequest(input *CancelRetrievalInput) (req *request.Request, output *CancelRetrievalOutput) { + op := &request.Operation{ + Name: opCancelRetrieval, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CancelRetrievalInput{} + } + + req = c.newRequest(op, input, output) + output = &CancelRetrievalOutput{} + req.Data = output + return +} + +// Cancels retrieval of a virtual tape from the virtual tape shelf (VTS) to +// a gateway after the retrieval process is initiated. The virtual tape is returned +// to the VTS. +func (c *StorageGateway) CancelRetrieval(input *CancelRetrievalInput) (*CancelRetrievalOutput, error) { + req, out := c.CancelRetrievalRequest(input) + err := req.Send() + return out, err +} + +const opCreateCachediSCSIVolume = "CreateCachediSCSIVolume" + +// CreateCachediSCSIVolumeRequest generates a "aws/request.Request" representing the +// client's request for the CreateCachediSCSIVolume operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateCachediSCSIVolume method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateCachediSCSIVolumeRequest method. +// req, resp := client.CreateCachediSCSIVolumeRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *StorageGateway) CreateCachediSCSIVolumeRequest(input *CreateCachediSCSIVolumeInput) (req *request.Request, output *CreateCachediSCSIVolumeOutput) { + op := &request.Operation{ + Name: opCreateCachediSCSIVolume, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateCachediSCSIVolumeInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateCachediSCSIVolumeOutput{} + req.Data = output + return +} + +// Creates a cached volume on a specified cached gateway. This operation is +// supported only for the gateway-cached volume architecture. +// +// Cache storage must be allocated to the gateway before you can create a cached +// volume. Use the AddCache operation to add cache storage to a gateway. +// +// In the request, you must specify the gateway, size of the volume in bytes, +// the iSCSI target name, an IP address on which to expose the target, and a +// unique client token. In response, AWS Storage Gateway creates the volume +// and returns information about it such as the volume Amazon Resource Name +// (ARN), its size, and the iSCSI target ARN that initiators can use to connect +// to the volume target. +func (c *StorageGateway) CreateCachediSCSIVolume(input *CreateCachediSCSIVolumeInput) (*CreateCachediSCSIVolumeOutput, error) { + req, out := c.CreateCachediSCSIVolumeRequest(input) + err := req.Send() + return out, err +} + +const opCreateSnapshot = "CreateSnapshot" + +// CreateSnapshotRequest generates a "aws/request.Request" representing the +// client's request for the CreateSnapshot operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateSnapshot method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateSnapshotRequest method. +// req, resp := client.CreateSnapshotRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *StorageGateway) CreateSnapshotRequest(input *CreateSnapshotInput) (req *request.Request, output *CreateSnapshotOutput) { + op := &request.Operation{ + Name: opCreateSnapshot, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateSnapshotInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateSnapshotOutput{} + req.Data = output + return +} + +// Initiates a snapshot of a volume. +// +// AWS Storage Gateway provides the ability to back up point-in-time snapshots +// of your data to Amazon Simple Storage (S3) for durable off-site recovery, +// as well as import the data to an Amazon Elastic Block Store (EBS) volume +// in Amazon Elastic Compute Cloud (EC2). You can take snapshots of your gateway +// volume on a scheduled or ad-hoc basis. This API enables you to take ad-hoc +// snapshot. For more information, see Working With Snapshots in the AWS Storage +// Gateway Console (http://docs.aws.amazon.com/storagegateway/latest/userguide/WorkingWithSnapshots.html). +// +// In the CreateSnapshot request you identify the volume by providing its Amazon +// Resource Name (ARN). You must also provide description for the snapshot. +// When AWS Storage Gateway takes the snapshot of specified volume, the snapshot +// and description appears in the AWS Storage Gateway Console. In response, +// AWS Storage Gateway returns you a snapshot ID. You can use this snapshot +// ID to check the snapshot progress or later use it when you want to create +// a volume from a snapshot. +// +// To list or delete a snapshot, you must use the Amazon EC2 API. For more +// information, see DescribeSnapshots or DeleteSnapshot in the EC2 API reference +// (http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_Operations.html). +// +// Volume and snapshot IDs are changing to a longer length ID format. For +// more information, see the important note on the Welcome (http://docs.aws.amazon.com/storagegateway/latest/APIReference/Welcome.html) +// page. +func (c *StorageGateway) CreateSnapshot(input *CreateSnapshotInput) (*CreateSnapshotOutput, error) { + req, out := c.CreateSnapshotRequest(input) + err := req.Send() + return out, err +} + +const opCreateSnapshotFromVolumeRecoveryPoint = "CreateSnapshotFromVolumeRecoveryPoint" + +// CreateSnapshotFromVolumeRecoveryPointRequest generates a "aws/request.Request" representing the +// client's request for the CreateSnapshotFromVolumeRecoveryPoint operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateSnapshotFromVolumeRecoveryPoint method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateSnapshotFromVolumeRecoveryPointRequest method. +// req, resp := client.CreateSnapshotFromVolumeRecoveryPointRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *StorageGateway) CreateSnapshotFromVolumeRecoveryPointRequest(input *CreateSnapshotFromVolumeRecoveryPointInput) (req *request.Request, output *CreateSnapshotFromVolumeRecoveryPointOutput) { + op := &request.Operation{ + Name: opCreateSnapshotFromVolumeRecoveryPoint, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateSnapshotFromVolumeRecoveryPointInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateSnapshotFromVolumeRecoveryPointOutput{} + req.Data = output + return +} + +// Initiates a snapshot of a gateway from a volume recovery point. This operation +// is supported only for the gateway-cached volume architecture. +// +// A volume recovery point is a point in time at which all data of the volume +// is consistent and from which you can create a snapshot. To get a list of +// volume recovery point for gateway-cached volumes, use ListVolumeRecoveryPoints. +// +// In the CreateSnapshotFromVolumeRecoveryPoint request, you identify the volume +// by providing its Amazon Resource Name (ARN). You must also provide a description +// for the snapshot. When AWS Storage Gateway takes a snapshot of the specified +// volume, the snapshot and its description appear in the AWS Storage Gateway +// console. In response, AWS Storage Gateway returns you a snapshot ID. You +// can use this snapshot ID to check the snapshot progress or later use it when +// you want to create a volume from a snapshot. +// +// To list or delete a snapshot, you must use the Amazon EC2 API. For more +// information, in Amazon Elastic Compute Cloud API Reference. +func (c *StorageGateway) CreateSnapshotFromVolumeRecoveryPoint(input *CreateSnapshotFromVolumeRecoveryPointInput) (*CreateSnapshotFromVolumeRecoveryPointOutput, error) { + req, out := c.CreateSnapshotFromVolumeRecoveryPointRequest(input) + err := req.Send() + return out, err +} + +const opCreateStorediSCSIVolume = "CreateStorediSCSIVolume" + +// CreateStorediSCSIVolumeRequest generates a "aws/request.Request" representing the +// client's request for the CreateStorediSCSIVolume operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateStorediSCSIVolume method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateStorediSCSIVolumeRequest method. +// req, resp := client.CreateStorediSCSIVolumeRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *StorageGateway) CreateStorediSCSIVolumeRequest(input *CreateStorediSCSIVolumeInput) (req *request.Request, output *CreateStorediSCSIVolumeOutput) { + op := &request.Operation{ + Name: opCreateStorediSCSIVolume, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateStorediSCSIVolumeInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateStorediSCSIVolumeOutput{} + req.Data = output + return +} + +// Creates a volume on a specified gateway. This operation is supported only +// for the gateway-stored volume architecture. +// +// The size of the volume to create is inferred from the disk size. You can +// choose to preserve existing data on the disk, create volume from an existing +// snapshot, or create an empty volume. If you choose to create an empty gateway +// volume, then any existing data on the disk is erased. +// +// In the request you must specify the gateway and the disk information on +// which you are creating the volume. In response, AWS Storage Gateway creates +// the volume and returns volume information such as the volume Amazon Resource +// Name (ARN), its size, and the iSCSI target ARN that initiators can use to +// connect to the volume target. +func (c *StorageGateway) CreateStorediSCSIVolume(input *CreateStorediSCSIVolumeInput) (*CreateStorediSCSIVolumeOutput, error) { + req, out := c.CreateStorediSCSIVolumeRequest(input) + err := req.Send() + return out, err +} + +const opCreateTapeWithBarcode = "CreateTapeWithBarcode" + +// CreateTapeWithBarcodeRequest generates a "aws/request.Request" representing the +// client's request for the CreateTapeWithBarcode operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateTapeWithBarcode method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateTapeWithBarcodeRequest method. +// req, resp := client.CreateTapeWithBarcodeRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *StorageGateway) CreateTapeWithBarcodeRequest(input *CreateTapeWithBarcodeInput) (req *request.Request, output *CreateTapeWithBarcodeOutput) { + op := &request.Operation{ + Name: opCreateTapeWithBarcode, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateTapeWithBarcodeInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateTapeWithBarcodeOutput{} + req.Data = output + return +} + +// Creates a virtual tape by using your own barcode. You write data to the virtual +// tape and then archive the tape. +// +// Cache storage must be allocated to the gateway before you can create a virtual +// tape. Use the AddCache operation to add cache storage to a gateway. +func (c *StorageGateway) CreateTapeWithBarcode(input *CreateTapeWithBarcodeInput) (*CreateTapeWithBarcodeOutput, error) { + req, out := c.CreateTapeWithBarcodeRequest(input) + err := req.Send() + return out, err +} + +const opCreateTapes = "CreateTapes" + +// CreateTapesRequest generates a "aws/request.Request" representing the +// client's request for the CreateTapes operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateTapes method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateTapesRequest method. +// req, resp := client.CreateTapesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *StorageGateway) CreateTapesRequest(input *CreateTapesInput) (req *request.Request, output *CreateTapesOutput) { + op := &request.Operation{ + Name: opCreateTapes, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateTapesInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateTapesOutput{} + req.Data = output + return +} + +// Creates one or more virtual tapes. You write data to the virtual tapes and +// then archive the tapes. +// +// Cache storage must be allocated to the gateway before you can create virtual +// tapes. Use the AddCache operation to add cache storage to a gateway. +func (c *StorageGateway) CreateTapes(input *CreateTapesInput) (*CreateTapesOutput, error) { + req, out := c.CreateTapesRequest(input) + err := req.Send() + return out, err +} + +const opDeleteBandwidthRateLimit = "DeleteBandwidthRateLimit" + +// DeleteBandwidthRateLimitRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBandwidthRateLimit operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteBandwidthRateLimit method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteBandwidthRateLimitRequest method. +// req, resp := client.DeleteBandwidthRateLimitRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *StorageGateway) DeleteBandwidthRateLimitRequest(input *DeleteBandwidthRateLimitInput) (req *request.Request, output *DeleteBandwidthRateLimitOutput) { + op := &request.Operation{ + Name: opDeleteBandwidthRateLimit, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteBandwidthRateLimitInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteBandwidthRateLimitOutput{} + req.Data = output + return +} + +// Deletes the bandwidth rate limits of a gateway. You can delete either the +// upload and download bandwidth rate limit, or you can delete both. If you +// delete only one of the limits, the other limit remains unchanged. To specify +// which gateway to work with, use the Amazon Resource Name (ARN) of the gateway +// in your request. +func (c *StorageGateway) DeleteBandwidthRateLimit(input *DeleteBandwidthRateLimitInput) (*DeleteBandwidthRateLimitOutput, error) { + req, out := c.DeleteBandwidthRateLimitRequest(input) + err := req.Send() + return out, err +} + +const opDeleteChapCredentials = "DeleteChapCredentials" + +// DeleteChapCredentialsRequest generates a "aws/request.Request" representing the +// client's request for the DeleteChapCredentials operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteChapCredentials method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteChapCredentialsRequest method. +// req, resp := client.DeleteChapCredentialsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *StorageGateway) DeleteChapCredentialsRequest(input *DeleteChapCredentialsInput) (req *request.Request, output *DeleteChapCredentialsOutput) { + op := &request.Operation{ + Name: opDeleteChapCredentials, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteChapCredentialsInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteChapCredentialsOutput{} + req.Data = output + return +} + +// Deletes Challenge-Handshake Authentication Protocol (CHAP) credentials for +// a specified iSCSI target and initiator pair. +func (c *StorageGateway) DeleteChapCredentials(input *DeleteChapCredentialsInput) (*DeleteChapCredentialsOutput, error) { + req, out := c.DeleteChapCredentialsRequest(input) + err := req.Send() + return out, err +} + +const opDeleteGateway = "DeleteGateway" + +// DeleteGatewayRequest generates a "aws/request.Request" representing the +// client's request for the DeleteGateway operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteGateway method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteGatewayRequest method. +// req, resp := client.DeleteGatewayRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *StorageGateway) DeleteGatewayRequest(input *DeleteGatewayInput) (req *request.Request, output *DeleteGatewayOutput) { + op := &request.Operation{ + Name: opDeleteGateway, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteGatewayInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteGatewayOutput{} + req.Data = output + return +} + +// Deletes a gateway. To specify which gateway to delete, use the Amazon Resource +// Name (ARN) of the gateway in your request. The operation deletes the gateway; +// however, it does not delete the gateway virtual machine (VM) from your host +// computer. +// +// After you delete a gateway, you cannot reactivate it. Completed snapshots +// of the gateway volumes are not deleted upon deleting the gateway, however, +// pending snapshots will not complete. After you delete a gateway, your next +// step is to remove it from your environment. +// +// You no longer pay software charges after the gateway is deleted; however, +// your existing Amazon EBS snapshots persist and you will continue to be billed +// for these snapshots. You can choose to remove all remaining Amazon EBS snapshots +// by canceling your Amazon EC2 subscription.  If you prefer not to cancel your +// Amazon EC2 subscription, you can delete your snapshots using the Amazon EC2 +// console. For more information, see the AWS Storage Gateway Detail Page (http://aws.amazon.com/storagegateway). +func (c *StorageGateway) DeleteGateway(input *DeleteGatewayInput) (*DeleteGatewayOutput, error) { + req, out := c.DeleteGatewayRequest(input) + err := req.Send() + return out, err +} + +const opDeleteSnapshotSchedule = "DeleteSnapshotSchedule" + +// DeleteSnapshotScheduleRequest generates a "aws/request.Request" representing the +// client's request for the DeleteSnapshotSchedule operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteSnapshotSchedule method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteSnapshotScheduleRequest method. +// req, resp := client.DeleteSnapshotScheduleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *StorageGateway) DeleteSnapshotScheduleRequest(input *DeleteSnapshotScheduleInput) (req *request.Request, output *DeleteSnapshotScheduleOutput) { + op := &request.Operation{ + Name: opDeleteSnapshotSchedule, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteSnapshotScheduleInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteSnapshotScheduleOutput{} + req.Data = output + return +} + +// Deletes a snapshot of a volume. +// +// You can take snapshots of your gateway volumes on a scheduled or ad hoc +// basis. This API action enables you to delete a snapshot schedule for a volume. +// For more information, see Working with Snapshots (http://docs.aws.amazon.com/storagegateway/latest/userguide/WorkingWithSnapshots.html). +// In the DeleteSnapshotSchedule request, you identify the volume by providing +// its Amazon Resource Name (ARN). +// +// To list or delete a snapshot, you must use the Amazon EC2 API. in Amazon +// Elastic Compute Cloud API Reference. +func (c *StorageGateway) DeleteSnapshotSchedule(input *DeleteSnapshotScheduleInput) (*DeleteSnapshotScheduleOutput, error) { + req, out := c.DeleteSnapshotScheduleRequest(input) + err := req.Send() + return out, err +} + +const opDeleteTape = "DeleteTape" + +// DeleteTapeRequest generates a "aws/request.Request" representing the +// client's request for the DeleteTape operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteTape method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteTapeRequest method. +// req, resp := client.DeleteTapeRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *StorageGateway) DeleteTapeRequest(input *DeleteTapeInput) (req *request.Request, output *DeleteTapeOutput) { + op := &request.Operation{ + Name: opDeleteTape, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteTapeInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteTapeOutput{} + req.Data = output + return +} + +// Deletes the specified virtual tape. +func (c *StorageGateway) DeleteTape(input *DeleteTapeInput) (*DeleteTapeOutput, error) { + req, out := c.DeleteTapeRequest(input) + err := req.Send() + return out, err +} + +const opDeleteTapeArchive = "DeleteTapeArchive" + +// DeleteTapeArchiveRequest generates a "aws/request.Request" representing the +// client's request for the DeleteTapeArchive operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteTapeArchive method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteTapeArchiveRequest method. +// req, resp := client.DeleteTapeArchiveRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *StorageGateway) DeleteTapeArchiveRequest(input *DeleteTapeArchiveInput) (req *request.Request, output *DeleteTapeArchiveOutput) { + op := &request.Operation{ + Name: opDeleteTapeArchive, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteTapeArchiveInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteTapeArchiveOutput{} + req.Data = output + return +} + +// Deletes the specified virtual tape from the virtual tape shelf (VTS). +func (c *StorageGateway) DeleteTapeArchive(input *DeleteTapeArchiveInput) (*DeleteTapeArchiveOutput, error) { + req, out := c.DeleteTapeArchiveRequest(input) + err := req.Send() + return out, err +} + +const opDeleteVolume = "DeleteVolume" + +// DeleteVolumeRequest generates a "aws/request.Request" representing the +// client's request for the DeleteVolume operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteVolume method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteVolumeRequest method. +// req, resp := client.DeleteVolumeRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *StorageGateway) DeleteVolumeRequest(input *DeleteVolumeInput) (req *request.Request, output *DeleteVolumeOutput) { + op := &request.Operation{ + Name: opDeleteVolume, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteVolumeInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteVolumeOutput{} + req.Data = output + return +} + +// Deletes the specified gateway volume that you previously created using the +// CreateCachediSCSIVolume or CreateStorediSCSIVolume API. For gateway-stored +// volumes, the local disk that was configured as the storage volume is not +// deleted. You can reuse the local disk to create another storage volume. +// +// Before you delete a gateway volume, make sure there are no iSCSI connections +// to the volume you are deleting. You should also make sure there is no snapshot +// in progress. You can use the Amazon Elastic Compute Cloud (Amazon EC2) API +// to query snapshots on the volume you are deleting and check the snapshot +// status. For more information, go to DescribeSnapshots (http://docs.aws.amazon.com/AWSEC2/latest/APIReference/ApiReference-query-DescribeSnapshots.html) +// in the Amazon Elastic Compute Cloud API Reference. +// +// In the request, you must provide the Amazon Resource Name (ARN) of the storage +// volume you want to delete. +func (c *StorageGateway) DeleteVolume(input *DeleteVolumeInput) (*DeleteVolumeOutput, error) { + req, out := c.DeleteVolumeRequest(input) + err := req.Send() + return out, err +} + +const opDescribeBandwidthRateLimit = "DescribeBandwidthRateLimit" + +// DescribeBandwidthRateLimitRequest generates a "aws/request.Request" representing the +// client's request for the DescribeBandwidthRateLimit operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeBandwidthRateLimit method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeBandwidthRateLimitRequest method. +// req, resp := client.DescribeBandwidthRateLimitRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *StorageGateway) DescribeBandwidthRateLimitRequest(input *DescribeBandwidthRateLimitInput) (req *request.Request, output *DescribeBandwidthRateLimitOutput) { + op := &request.Operation{ + Name: opDescribeBandwidthRateLimit, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeBandwidthRateLimitInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeBandwidthRateLimitOutput{} + req.Data = output + return +} + +// Returns the bandwidth rate limits of a gateway. By default, these limits +// are not set, which means no bandwidth rate limiting is in effect. +// +// This operation only returns a value for a bandwidth rate limit only if the +// limit is set. If no limits are set for the gateway, then this operation returns +// only the gateway ARN in the response body. To specify which gateway to describe, +// use the Amazon Resource Name (ARN) of the gateway in your request. +func (c *StorageGateway) DescribeBandwidthRateLimit(input *DescribeBandwidthRateLimitInput) (*DescribeBandwidthRateLimitOutput, error) { + req, out := c.DescribeBandwidthRateLimitRequest(input) + err := req.Send() + return out, err +} + +const opDescribeCache = "DescribeCache" + +// DescribeCacheRequest generates a "aws/request.Request" representing the +// client's request for the DescribeCache operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeCache method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeCacheRequest method. +// req, resp := client.DescribeCacheRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *StorageGateway) DescribeCacheRequest(input *DescribeCacheInput) (req *request.Request, output *DescribeCacheOutput) { + op := &request.Operation{ + Name: opDescribeCache, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeCacheInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeCacheOutput{} + req.Data = output + return +} + +// Returns information about the cache of a gateway. This operation is supported +// only for the gateway-cached volume architecture. +// +// The response includes disk IDs that are configured as cache, and it includes +// the amount of cache allocated and used. +func (c *StorageGateway) DescribeCache(input *DescribeCacheInput) (*DescribeCacheOutput, error) { + req, out := c.DescribeCacheRequest(input) + err := req.Send() + return out, err +} + +const opDescribeCachediSCSIVolumes = "DescribeCachediSCSIVolumes" + +// DescribeCachediSCSIVolumesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeCachediSCSIVolumes operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeCachediSCSIVolumes method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeCachediSCSIVolumesRequest method. +// req, resp := client.DescribeCachediSCSIVolumesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *StorageGateway) DescribeCachediSCSIVolumesRequest(input *DescribeCachediSCSIVolumesInput) (req *request.Request, output *DescribeCachediSCSIVolumesOutput) { + op := &request.Operation{ + Name: opDescribeCachediSCSIVolumes, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeCachediSCSIVolumesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeCachediSCSIVolumesOutput{} + req.Data = output + return +} + +// Returns a description of the gateway volumes specified in the request. This +// operation is supported only for the gateway-cached volume architecture. +// +// The list of gateway volumes in the request must be from one gateway. In +// the response Amazon Storage Gateway returns volume information sorted by +// volume Amazon Resource Name (ARN). +func (c *StorageGateway) DescribeCachediSCSIVolumes(input *DescribeCachediSCSIVolumesInput) (*DescribeCachediSCSIVolumesOutput, error) { + req, out := c.DescribeCachediSCSIVolumesRequest(input) + err := req.Send() + return out, err +} + +const opDescribeChapCredentials = "DescribeChapCredentials" + +// DescribeChapCredentialsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeChapCredentials operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeChapCredentials method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeChapCredentialsRequest method. +// req, resp := client.DescribeChapCredentialsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *StorageGateway) DescribeChapCredentialsRequest(input *DescribeChapCredentialsInput) (req *request.Request, output *DescribeChapCredentialsOutput) { + op := &request.Operation{ + Name: opDescribeChapCredentials, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeChapCredentialsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeChapCredentialsOutput{} + req.Data = output + return +} + +// Returns an array of Challenge-Handshake Authentication Protocol (CHAP) credentials +// information for a specified iSCSI target, one for each target-initiator pair. +func (c *StorageGateway) DescribeChapCredentials(input *DescribeChapCredentialsInput) (*DescribeChapCredentialsOutput, error) { + req, out := c.DescribeChapCredentialsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeGatewayInformation = "DescribeGatewayInformation" + +// DescribeGatewayInformationRequest generates a "aws/request.Request" representing the +// client's request for the DescribeGatewayInformation operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeGatewayInformation method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeGatewayInformationRequest method. +// req, resp := client.DescribeGatewayInformationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *StorageGateway) DescribeGatewayInformationRequest(input *DescribeGatewayInformationInput) (req *request.Request, output *DescribeGatewayInformationOutput) { + op := &request.Operation{ + Name: opDescribeGatewayInformation, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeGatewayInformationInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeGatewayInformationOutput{} + req.Data = output + return +} + +// Returns metadata about a gateway such as its name, network interfaces, configured +// time zone, and the state (whether the gateway is running or not). To specify +// which gateway to describe, use the Amazon Resource Name (ARN) of the gateway +// in your request. +func (c *StorageGateway) DescribeGatewayInformation(input *DescribeGatewayInformationInput) (*DescribeGatewayInformationOutput, error) { + req, out := c.DescribeGatewayInformationRequest(input) + err := req.Send() + return out, err +} + +const opDescribeMaintenanceStartTime = "DescribeMaintenanceStartTime" + +// DescribeMaintenanceStartTimeRequest generates a "aws/request.Request" representing the +// client's request for the DescribeMaintenanceStartTime operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeMaintenanceStartTime method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeMaintenanceStartTimeRequest method. +// req, resp := client.DescribeMaintenanceStartTimeRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *StorageGateway) DescribeMaintenanceStartTimeRequest(input *DescribeMaintenanceStartTimeInput) (req *request.Request, output *DescribeMaintenanceStartTimeOutput) { + op := &request.Operation{ + Name: opDescribeMaintenanceStartTime, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeMaintenanceStartTimeInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeMaintenanceStartTimeOutput{} + req.Data = output + return +} + +// Returns your gateway's weekly maintenance start time including the day and +// time of the week. Note that values are in terms of the gateway's time zone. +func (c *StorageGateway) DescribeMaintenanceStartTime(input *DescribeMaintenanceStartTimeInput) (*DescribeMaintenanceStartTimeOutput, error) { + req, out := c.DescribeMaintenanceStartTimeRequest(input) + err := req.Send() + return out, err +} + +const opDescribeSnapshotSchedule = "DescribeSnapshotSchedule" + +// DescribeSnapshotScheduleRequest generates a "aws/request.Request" representing the +// client's request for the DescribeSnapshotSchedule operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeSnapshotSchedule method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeSnapshotScheduleRequest method. +// req, resp := client.DescribeSnapshotScheduleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *StorageGateway) DescribeSnapshotScheduleRequest(input *DescribeSnapshotScheduleInput) (req *request.Request, output *DescribeSnapshotScheduleOutput) { + op := &request.Operation{ + Name: opDescribeSnapshotSchedule, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeSnapshotScheduleInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeSnapshotScheduleOutput{} + req.Data = output + return +} + +// Describes the snapshot schedule for the specified gateway volume. The snapshot +// schedule information includes intervals at which snapshots are automatically +// initiated on the volume. +func (c *StorageGateway) DescribeSnapshotSchedule(input *DescribeSnapshotScheduleInput) (*DescribeSnapshotScheduleOutput, error) { + req, out := c.DescribeSnapshotScheduleRequest(input) + err := req.Send() + return out, err +} + +const opDescribeStorediSCSIVolumes = "DescribeStorediSCSIVolumes" + +// DescribeStorediSCSIVolumesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeStorediSCSIVolumes operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeStorediSCSIVolumes method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeStorediSCSIVolumesRequest method. +// req, resp := client.DescribeStorediSCSIVolumesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *StorageGateway) DescribeStorediSCSIVolumesRequest(input *DescribeStorediSCSIVolumesInput) (req *request.Request, output *DescribeStorediSCSIVolumesOutput) { + op := &request.Operation{ + Name: opDescribeStorediSCSIVolumes, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeStorediSCSIVolumesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeStorediSCSIVolumesOutput{} + req.Data = output + return +} + +// Returns the description of the gateway volumes specified in the request. +// The list of gateway volumes in the request must be from one gateway. In the +// response Amazon Storage Gateway returns volume information sorted by volume +// ARNs. +func (c *StorageGateway) DescribeStorediSCSIVolumes(input *DescribeStorediSCSIVolumesInput) (*DescribeStorediSCSIVolumesOutput, error) { + req, out := c.DescribeStorediSCSIVolumesRequest(input) + err := req.Send() + return out, err +} + +const opDescribeTapeArchives = "DescribeTapeArchives" + +// DescribeTapeArchivesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeTapeArchives operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeTapeArchives method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeTapeArchivesRequest method. +// req, resp := client.DescribeTapeArchivesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *StorageGateway) DescribeTapeArchivesRequest(input *DescribeTapeArchivesInput) (req *request.Request, output *DescribeTapeArchivesOutput) { + op := &request.Operation{ + Name: opDescribeTapeArchives, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "Limit", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeTapeArchivesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeTapeArchivesOutput{} + req.Data = output + return +} + +// Returns a description of specified virtual tapes in the virtual tape shelf +// (VTS). +// +// If a specific TapeARN is not specified, AWS Storage Gateway returns a description +// of all virtual tapes found in the VTS associated with your account. +func (c *StorageGateway) DescribeTapeArchives(input *DescribeTapeArchivesInput) (*DescribeTapeArchivesOutput, error) { + req, out := c.DescribeTapeArchivesRequest(input) + err := req.Send() + return out, err +} + +// DescribeTapeArchivesPages iterates over the pages of a DescribeTapeArchives operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeTapeArchives method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeTapeArchives operation. +// pageNum := 0 +// err := client.DescribeTapeArchivesPages(params, +// func(page *DescribeTapeArchivesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *StorageGateway) DescribeTapeArchivesPages(input *DescribeTapeArchivesInput, fn func(p *DescribeTapeArchivesOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeTapeArchivesRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeTapeArchivesOutput), lastPage) + }) +} + +const opDescribeTapeRecoveryPoints = "DescribeTapeRecoveryPoints" + +// DescribeTapeRecoveryPointsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeTapeRecoveryPoints operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeTapeRecoveryPoints method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeTapeRecoveryPointsRequest method. +// req, resp := client.DescribeTapeRecoveryPointsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *StorageGateway) DescribeTapeRecoveryPointsRequest(input *DescribeTapeRecoveryPointsInput) (req *request.Request, output *DescribeTapeRecoveryPointsOutput) { + op := &request.Operation{ + Name: opDescribeTapeRecoveryPoints, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "Limit", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeTapeRecoveryPointsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeTapeRecoveryPointsOutput{} + req.Data = output + return +} + +// Returns a list of virtual tape recovery points that are available for the +// specified gateway-VTL. +// +// A recovery point is a point-in-time view of a virtual tape at which all +// the data on the virtual tape is consistent. If your gateway crashes, virtual +// tapes that have recovery points can be recovered to a new gateway. +func (c *StorageGateway) DescribeTapeRecoveryPoints(input *DescribeTapeRecoveryPointsInput) (*DescribeTapeRecoveryPointsOutput, error) { + req, out := c.DescribeTapeRecoveryPointsRequest(input) + err := req.Send() + return out, err +} + +// DescribeTapeRecoveryPointsPages iterates over the pages of a DescribeTapeRecoveryPoints operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeTapeRecoveryPoints method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeTapeRecoveryPoints operation. +// pageNum := 0 +// err := client.DescribeTapeRecoveryPointsPages(params, +// func(page *DescribeTapeRecoveryPointsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *StorageGateway) DescribeTapeRecoveryPointsPages(input *DescribeTapeRecoveryPointsInput, fn func(p *DescribeTapeRecoveryPointsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeTapeRecoveryPointsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeTapeRecoveryPointsOutput), lastPage) + }) +} + +const opDescribeTapes = "DescribeTapes" + +// DescribeTapesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeTapes operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeTapes method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeTapesRequest method. +// req, resp := client.DescribeTapesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *StorageGateway) DescribeTapesRequest(input *DescribeTapesInput) (req *request.Request, output *DescribeTapesOutput) { + op := &request.Operation{ + Name: opDescribeTapes, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "Limit", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeTapesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeTapesOutput{} + req.Data = output + return +} + +// Returns a description of the specified Amazon Resource Name (ARN) of virtual +// tapes. If a TapeARN is not specified, returns a description of all virtual +// tapes associated with the specified gateway. +func (c *StorageGateway) DescribeTapes(input *DescribeTapesInput) (*DescribeTapesOutput, error) { + req, out := c.DescribeTapesRequest(input) + err := req.Send() + return out, err +} + +// DescribeTapesPages iterates over the pages of a DescribeTapes operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeTapes method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeTapes operation. +// pageNum := 0 +// err := client.DescribeTapesPages(params, +// func(page *DescribeTapesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *StorageGateway) DescribeTapesPages(input *DescribeTapesInput, fn func(p *DescribeTapesOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeTapesRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeTapesOutput), lastPage) + }) +} + +const opDescribeUploadBuffer = "DescribeUploadBuffer" + +// DescribeUploadBufferRequest generates a "aws/request.Request" representing the +// client's request for the DescribeUploadBuffer operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeUploadBuffer method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeUploadBufferRequest method. +// req, resp := client.DescribeUploadBufferRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *StorageGateway) DescribeUploadBufferRequest(input *DescribeUploadBufferInput) (req *request.Request, output *DescribeUploadBufferOutput) { + op := &request.Operation{ + Name: opDescribeUploadBuffer, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeUploadBufferInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeUploadBufferOutput{} + req.Data = output + return +} + +// Returns information about the upload buffer of a gateway. This operation +// is supported for both the gateway-stored and gateway-cached volume architectures. +// +// The response includes disk IDs that are configured as upload buffer space, +// and it includes the amount of upload buffer space allocated and used. +func (c *StorageGateway) DescribeUploadBuffer(input *DescribeUploadBufferInput) (*DescribeUploadBufferOutput, error) { + req, out := c.DescribeUploadBufferRequest(input) + err := req.Send() + return out, err +} + +const opDescribeVTLDevices = "DescribeVTLDevices" + +// DescribeVTLDevicesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeVTLDevices operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeVTLDevices method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeVTLDevicesRequest method. +// req, resp := client.DescribeVTLDevicesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *StorageGateway) DescribeVTLDevicesRequest(input *DescribeVTLDevicesInput) (req *request.Request, output *DescribeVTLDevicesOutput) { + op := &request.Operation{ + Name: opDescribeVTLDevices, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "Limit", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeVTLDevicesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeVTLDevicesOutput{} + req.Data = output + return +} + +// Returns a description of virtual tape library (VTL) devices for the specified +// gateway. In the response, AWS Storage Gateway returns VTL device information. +// +// The list of VTL devices must be from one gateway. +func (c *StorageGateway) DescribeVTLDevices(input *DescribeVTLDevicesInput) (*DescribeVTLDevicesOutput, error) { + req, out := c.DescribeVTLDevicesRequest(input) + err := req.Send() + return out, err +} + +// DescribeVTLDevicesPages iterates over the pages of a DescribeVTLDevices operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeVTLDevices method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeVTLDevices operation. +// pageNum := 0 +// err := client.DescribeVTLDevicesPages(params, +// func(page *DescribeVTLDevicesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *StorageGateway) DescribeVTLDevicesPages(input *DescribeVTLDevicesInput, fn func(p *DescribeVTLDevicesOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeVTLDevicesRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeVTLDevicesOutput), lastPage) + }) +} + +const opDescribeWorkingStorage = "DescribeWorkingStorage" + +// DescribeWorkingStorageRequest generates a "aws/request.Request" representing the +// client's request for the DescribeWorkingStorage operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeWorkingStorage method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeWorkingStorageRequest method. +// req, resp := client.DescribeWorkingStorageRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *StorageGateway) DescribeWorkingStorageRequest(input *DescribeWorkingStorageInput) (req *request.Request, output *DescribeWorkingStorageOutput) { + op := &request.Operation{ + Name: opDescribeWorkingStorage, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeWorkingStorageInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeWorkingStorageOutput{} + req.Data = output + return +} + +// Returns information about the working storage of a gateway. This operation +// is supported only for the gateway-stored volume architecture. This operation +// is deprecated in cached-volumes API version (20120630). Use DescribeUploadBuffer +// instead. +// +// Working storage is also referred to as upload buffer. You can also use +// the DescribeUploadBuffer operation to add upload buffer to a stored-volume +// gateway. +// +// The response includes disk IDs that are configured as working storage, +// and it includes the amount of working storage allocated and used. +func (c *StorageGateway) DescribeWorkingStorage(input *DescribeWorkingStorageInput) (*DescribeWorkingStorageOutput, error) { + req, out := c.DescribeWorkingStorageRequest(input) + err := req.Send() + return out, err +} + +const opDisableGateway = "DisableGateway" + +// DisableGatewayRequest generates a "aws/request.Request" representing the +// client's request for the DisableGateway operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DisableGateway method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DisableGatewayRequest method. +// req, resp := client.DisableGatewayRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *StorageGateway) DisableGatewayRequest(input *DisableGatewayInput) (req *request.Request, output *DisableGatewayOutput) { + op := &request.Operation{ + Name: opDisableGateway, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DisableGatewayInput{} + } + + req = c.newRequest(op, input, output) + output = &DisableGatewayOutput{} + req.Data = output + return +} + +// Disables a gateway when the gateway is no longer functioning. For example, +// if your gateway VM is damaged, you can disable the gateway so you can recover +// virtual tapes. +// +// Use this operation for a gateway-VTL that is not reachable or not functioning. +// +// Once a gateway is disabled it cannot be enabled. +func (c *StorageGateway) DisableGateway(input *DisableGatewayInput) (*DisableGatewayOutput, error) { + req, out := c.DisableGatewayRequest(input) + err := req.Send() + return out, err +} + +const opListGateways = "ListGateways" + +// ListGatewaysRequest generates a "aws/request.Request" representing the +// client's request for the ListGateways operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListGateways method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListGatewaysRequest method. +// req, resp := client.ListGatewaysRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *StorageGateway) ListGatewaysRequest(input *ListGatewaysInput) (req *request.Request, output *ListGatewaysOutput) { + op := &request.Operation{ + Name: opListGateways, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "Limit", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListGatewaysInput{} + } + + req = c.newRequest(op, input, output) + output = &ListGatewaysOutput{} + req.Data = output + return +} + +// Lists gateways owned by an AWS account in a region specified in the request. +// The returned list is ordered by gateway Amazon Resource Name (ARN). +// +// By default, the operation returns a maximum of 100 gateways. This operation +// supports pagination that allows you to optionally reduce the number of gateways +// returned in a response. +// +// If you have more gateways than are returned in a response (that is, the +// response returns only a truncated list of your gateways), the response contains +// a marker that you can specify in your next request to fetch the next page +// of gateways. +func (c *StorageGateway) ListGateways(input *ListGatewaysInput) (*ListGatewaysOutput, error) { + req, out := c.ListGatewaysRequest(input) + err := req.Send() + return out, err +} + +// ListGatewaysPages iterates over the pages of a ListGateways operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListGateways method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListGateways operation. +// pageNum := 0 +// err := client.ListGatewaysPages(params, +// func(page *ListGatewaysOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *StorageGateway) ListGatewaysPages(input *ListGatewaysInput, fn func(p *ListGatewaysOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListGatewaysRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListGatewaysOutput), lastPage) + }) +} + +const opListLocalDisks = "ListLocalDisks" + +// ListLocalDisksRequest generates a "aws/request.Request" representing the +// client's request for the ListLocalDisks operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListLocalDisks method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListLocalDisksRequest method. +// req, resp := client.ListLocalDisksRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *StorageGateway) ListLocalDisksRequest(input *ListLocalDisksInput) (req *request.Request, output *ListLocalDisksOutput) { + op := &request.Operation{ + Name: opListLocalDisks, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListLocalDisksInput{} + } + + req = c.newRequest(op, input, output) + output = &ListLocalDisksOutput{} + req.Data = output + return +} + +// Returns a list of the gateway's local disks. To specify which gateway to +// describe, you use the Amazon Resource Name (ARN) of the gateway in the body +// of the request. +// +// The request returns a list of all disks, specifying which are configured +// as working storage, cache storage, or stored volume or not configured at +// all. The response includes a DiskStatus field. This field can have a value +// of present (the disk is available to use), missing (the disk is no longer +// connected to the gateway), or mismatch (the disk node is occupied by a disk +// that has incorrect metadata or the disk content is corrupted). +func (c *StorageGateway) ListLocalDisks(input *ListLocalDisksInput) (*ListLocalDisksOutput, error) { + req, out := c.ListLocalDisksRequest(input) + err := req.Send() + return out, err +} + +const opListTagsForResource = "ListTagsForResource" + +// ListTagsForResourceRequest generates a "aws/request.Request" representing the +// client's request for the ListTagsForResource operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListTagsForResource method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListTagsForResourceRequest method. +// req, resp := client.ListTagsForResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *StorageGateway) ListTagsForResourceRequest(input *ListTagsForResourceInput) (req *request.Request, output *ListTagsForResourceOutput) { + op := &request.Operation{ + Name: opListTagsForResource, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListTagsForResourceInput{} + } + + req = c.newRequest(op, input, output) + output = &ListTagsForResourceOutput{} + req.Data = output + return +} + +// Lists the tags that have been added to the specified resource. +func (c *StorageGateway) ListTagsForResource(input *ListTagsForResourceInput) (*ListTagsForResourceOutput, error) { + req, out := c.ListTagsForResourceRequest(input) + err := req.Send() + return out, err +} + +const opListTapes = "ListTapes" + +// ListTapesRequest generates a "aws/request.Request" representing the +// client's request for the ListTapes operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListTapes method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListTapesRequest method. +// req, resp := client.ListTapesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *StorageGateway) ListTapesRequest(input *ListTapesInput) (req *request.Request, output *ListTapesOutput) { + op := &request.Operation{ + Name: opListTapes, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListTapesInput{} + } + + req = c.newRequest(op, input, output) + output = &ListTapesOutput{} + req.Data = output + return +} + +// Lists virtual tapes in your virtual tape library (VTL) and your virtual tape +// shelf (VTS). You specify the tapes to list by specifying one or more tape +// Amazon Resource Names (ARNs). If you don't specify a tape ARN, the operation +// lists all virtual tapes in both your VTL and VTS. +// +// This operation supports pagination. By default, the operation returns a +// maximum of up to 100 tapes. You can optionally specify the Limit parameter +// in the body to limit the number of tapes in the response. If the number of +// tapes returned in the response is truncated, the response includes a Marker +// element that you can use in your subsequent request to retrieve the next +// set of tapes. +func (c *StorageGateway) ListTapes(input *ListTapesInput) (*ListTapesOutput, error) { + req, out := c.ListTapesRequest(input) + err := req.Send() + return out, err +} + +const opListVolumeInitiators = "ListVolumeInitiators" + +// ListVolumeInitiatorsRequest generates a "aws/request.Request" representing the +// client's request for the ListVolumeInitiators operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListVolumeInitiators method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListVolumeInitiatorsRequest method. +// req, resp := client.ListVolumeInitiatorsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *StorageGateway) ListVolumeInitiatorsRequest(input *ListVolumeInitiatorsInput) (req *request.Request, output *ListVolumeInitiatorsOutput) { + op := &request.Operation{ + Name: opListVolumeInitiators, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListVolumeInitiatorsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListVolumeInitiatorsOutput{} + req.Data = output + return +} + +// Lists iSCSI initiators that are connected to a volume. You can use this operation +// to determine whether a volume is being used or not. +func (c *StorageGateway) ListVolumeInitiators(input *ListVolumeInitiatorsInput) (*ListVolumeInitiatorsOutput, error) { + req, out := c.ListVolumeInitiatorsRequest(input) + err := req.Send() + return out, err +} + +const opListVolumeRecoveryPoints = "ListVolumeRecoveryPoints" + +// ListVolumeRecoveryPointsRequest generates a "aws/request.Request" representing the +// client's request for the ListVolumeRecoveryPoints operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListVolumeRecoveryPoints method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListVolumeRecoveryPointsRequest method. +// req, resp := client.ListVolumeRecoveryPointsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *StorageGateway) ListVolumeRecoveryPointsRequest(input *ListVolumeRecoveryPointsInput) (req *request.Request, output *ListVolumeRecoveryPointsOutput) { + op := &request.Operation{ + Name: opListVolumeRecoveryPoints, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListVolumeRecoveryPointsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListVolumeRecoveryPointsOutput{} + req.Data = output + return +} + +// Lists the recovery points for a specified gateway. This operation is supported +// only for the gateway-cached volume architecture. +// +// Each gateway-cached volume has one recovery point. A volume recovery point +// is a point in time at which all data of the volume is consistent and from +// which you can create a snapshot. To create a snapshot from a volume recovery +// point use the CreateSnapshotFromVolumeRecoveryPoint operation. +func (c *StorageGateway) ListVolumeRecoveryPoints(input *ListVolumeRecoveryPointsInput) (*ListVolumeRecoveryPointsOutput, error) { + req, out := c.ListVolumeRecoveryPointsRequest(input) + err := req.Send() + return out, err +} + +const opListVolumes = "ListVolumes" + +// ListVolumesRequest generates a "aws/request.Request" representing the +// client's request for the ListVolumes operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListVolumes method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListVolumesRequest method. +// req, resp := client.ListVolumesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *StorageGateway) ListVolumesRequest(input *ListVolumesInput) (req *request.Request, output *ListVolumesOutput) { + op := &request.Operation{ + Name: opListVolumes, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "Limit", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListVolumesInput{} + } + + req = c.newRequest(op, input, output) + output = &ListVolumesOutput{} + req.Data = output + return +} + +// Lists the iSCSI stored volumes of a gateway. Results are sorted by volume +// ARN. The response includes only the volume ARNs. If you want additional volume +// information, use the DescribeStorediSCSIVolumes API. +// +// The operation supports pagination. By default, the operation returns a maximum +// of up to 100 volumes. You can optionally specify the Limit field in the body +// to limit the number of volumes in the response. If the number of volumes +// returned in the response is truncated, the response includes a Marker field. +// You can use this Marker value in your subsequent request to retrieve the +// next set of volumes. +func (c *StorageGateway) ListVolumes(input *ListVolumesInput) (*ListVolumesOutput, error) { + req, out := c.ListVolumesRequest(input) + err := req.Send() + return out, err +} + +// ListVolumesPages iterates over the pages of a ListVolumes operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListVolumes method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListVolumes operation. +// pageNum := 0 +// err := client.ListVolumesPages(params, +// func(page *ListVolumesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *StorageGateway) ListVolumesPages(input *ListVolumesInput, fn func(p *ListVolumesOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListVolumesRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListVolumesOutput), lastPage) + }) +} + +const opRemoveTagsFromResource = "RemoveTagsFromResource" + +// RemoveTagsFromResourceRequest generates a "aws/request.Request" representing the +// client's request for the RemoveTagsFromResource operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the RemoveTagsFromResource method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the RemoveTagsFromResourceRequest method. +// req, resp := client.RemoveTagsFromResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *StorageGateway) RemoveTagsFromResourceRequest(input *RemoveTagsFromResourceInput) (req *request.Request, output *RemoveTagsFromResourceOutput) { + op := &request.Operation{ + Name: opRemoveTagsFromResource, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RemoveTagsFromResourceInput{} + } + + req = c.newRequest(op, input, output) + output = &RemoveTagsFromResourceOutput{} + req.Data = output + return +} + +// Removes one or more tags from the specified resource. +func (c *StorageGateway) RemoveTagsFromResource(input *RemoveTagsFromResourceInput) (*RemoveTagsFromResourceOutput, error) { + req, out := c.RemoveTagsFromResourceRequest(input) + err := req.Send() + return out, err +} + +const opResetCache = "ResetCache" + +// ResetCacheRequest generates a "aws/request.Request" representing the +// client's request for the ResetCache operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ResetCache method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ResetCacheRequest method. +// req, resp := client.ResetCacheRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *StorageGateway) ResetCacheRequest(input *ResetCacheInput) (req *request.Request, output *ResetCacheOutput) { + op := &request.Operation{ + Name: opResetCache, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ResetCacheInput{} + } + + req = c.newRequest(op, input, output) + output = &ResetCacheOutput{} + req.Data = output + return +} + +// Resets all cache disks that have encountered a error and makes the disks +// available for reconfiguration as cache storage. If your cache disk encounters +// a error, the gateway prevents read and write operations on virtual tapes +// in the gateway. For example, an error can occur when a disk is corrupted +// or removed from the gateway. When a cache is reset, the gateway loses its +// cache storage. At this point you can reconfigure the disks as cache disks. +// +// If the cache disk you are resetting contains data that has not been uploaded +// to Amazon S3 yet, that data can be lost. After you reset cache disks, there +// will be no configured cache disks left in the gateway, so you must configure +// at least one new cache disk for your gateway to function properly. +func (c *StorageGateway) ResetCache(input *ResetCacheInput) (*ResetCacheOutput, error) { + req, out := c.ResetCacheRequest(input) + err := req.Send() + return out, err +} + +const opRetrieveTapeArchive = "RetrieveTapeArchive" + +// RetrieveTapeArchiveRequest generates a "aws/request.Request" representing the +// client's request for the RetrieveTapeArchive operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the RetrieveTapeArchive method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the RetrieveTapeArchiveRequest method. +// req, resp := client.RetrieveTapeArchiveRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *StorageGateway) RetrieveTapeArchiveRequest(input *RetrieveTapeArchiveInput) (req *request.Request, output *RetrieveTapeArchiveOutput) { + op := &request.Operation{ + Name: opRetrieveTapeArchive, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RetrieveTapeArchiveInput{} + } + + req = c.newRequest(op, input, output) + output = &RetrieveTapeArchiveOutput{} + req.Data = output + return +} + +// Retrieves an archived virtual tape from the virtual tape shelf (VTS) to a +// gateway-VTL. Virtual tapes archived in the VTS are not associated with any +// gateway. However after a tape is retrieved, it is associated with a gateway, +// even though it is also listed in the VTS. +// +// Once a tape is successfully retrieved to a gateway, it cannot be retrieved +// again to another gateway. You must archive the tape again before you can +// retrieve it to another gateway. +func (c *StorageGateway) RetrieveTapeArchive(input *RetrieveTapeArchiveInput) (*RetrieveTapeArchiveOutput, error) { + req, out := c.RetrieveTapeArchiveRequest(input) + err := req.Send() + return out, err +} + +const opRetrieveTapeRecoveryPoint = "RetrieveTapeRecoveryPoint" + +// RetrieveTapeRecoveryPointRequest generates a "aws/request.Request" representing the +// client's request for the RetrieveTapeRecoveryPoint operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the RetrieveTapeRecoveryPoint method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the RetrieveTapeRecoveryPointRequest method. +// req, resp := client.RetrieveTapeRecoveryPointRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *StorageGateway) RetrieveTapeRecoveryPointRequest(input *RetrieveTapeRecoveryPointInput) (req *request.Request, output *RetrieveTapeRecoveryPointOutput) { + op := &request.Operation{ + Name: opRetrieveTapeRecoveryPoint, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RetrieveTapeRecoveryPointInput{} + } + + req = c.newRequest(op, input, output) + output = &RetrieveTapeRecoveryPointOutput{} + req.Data = output + return +} + +// Retrieves the recovery point for the specified virtual tape. +// +// A recovery point is a point in time view of a virtual tape at which all +// the data on the tape is consistent. If your gateway crashes, virtual tapes +// that have recovery points can be recovered to a new gateway. +// +// The virtual tape can be retrieved to only one gateway. The retrieved tape +// is read-only. The virtual tape can be retrieved to only a gateway-VTL. There +// is no charge for retrieving recovery points. +func (c *StorageGateway) RetrieveTapeRecoveryPoint(input *RetrieveTapeRecoveryPointInput) (*RetrieveTapeRecoveryPointOutput, error) { + req, out := c.RetrieveTapeRecoveryPointRequest(input) + err := req.Send() + return out, err +} + +const opSetLocalConsolePassword = "SetLocalConsolePassword" + +// SetLocalConsolePasswordRequest generates a "aws/request.Request" representing the +// client's request for the SetLocalConsolePassword operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the SetLocalConsolePassword method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the SetLocalConsolePasswordRequest method. +// req, resp := client.SetLocalConsolePasswordRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *StorageGateway) SetLocalConsolePasswordRequest(input *SetLocalConsolePasswordInput) (req *request.Request, output *SetLocalConsolePasswordOutput) { + op := &request.Operation{ + Name: opSetLocalConsolePassword, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SetLocalConsolePasswordInput{} + } + + req = c.newRequest(op, input, output) + output = &SetLocalConsolePasswordOutput{} + req.Data = output + return +} + +// Sets the password for your VM local console. When you log in to the local +// console for the first time, you log in to the VM with the default credentials. +// We recommend that you set a new password. You don't need to know the default +// password to set a new password. +func (c *StorageGateway) SetLocalConsolePassword(input *SetLocalConsolePasswordInput) (*SetLocalConsolePasswordOutput, error) { + req, out := c.SetLocalConsolePasswordRequest(input) + err := req.Send() + return out, err +} + +const opShutdownGateway = "ShutdownGateway" + +// ShutdownGatewayRequest generates a "aws/request.Request" representing the +// client's request for the ShutdownGateway operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ShutdownGateway method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ShutdownGatewayRequest method. +// req, resp := client.ShutdownGatewayRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *StorageGateway) ShutdownGatewayRequest(input *ShutdownGatewayInput) (req *request.Request, output *ShutdownGatewayOutput) { + op := &request.Operation{ + Name: opShutdownGateway, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ShutdownGatewayInput{} + } + + req = c.newRequest(op, input, output) + output = &ShutdownGatewayOutput{} + req.Data = output + return +} + +// Shuts down a gateway. To specify which gateway to shut down, use the Amazon +// Resource Name (ARN) of the gateway in the body of your request. +// +// The operation shuts down the gateway service component running in the storage +// gateway's virtual machine (VM) and not the VM. +// +// If you want to shut down the VM, it is recommended that you first shut down +// the gateway component in the VM to avoid unpredictable conditions. +// +// After the gateway is shutdown, you cannot call any other API except StartGateway, +// DescribeGatewayInformation, and ListGateways. For more information, see ActivateGateway. +// Your applications cannot read from or write to the gateway's storage volumes, +// and there are no snapshots taken. +// +// When you make a shutdown request, you will get a 200 OK success response +// immediately. However, it might take some time for the gateway to shut down. +// You can call the DescribeGatewayInformation API to check the status. For +// more information, see ActivateGateway. +// +// If do not intend to use the gateway again, you must delete the gateway +// (using DeleteGateway) to no longer pay software charges associated with the +// gateway. +func (c *StorageGateway) ShutdownGateway(input *ShutdownGatewayInput) (*ShutdownGatewayOutput, error) { + req, out := c.ShutdownGatewayRequest(input) + err := req.Send() + return out, err +} + +const opStartGateway = "StartGateway" + +// StartGatewayRequest generates a "aws/request.Request" representing the +// client's request for the StartGateway operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the StartGateway method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the StartGatewayRequest method. +// req, resp := client.StartGatewayRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *StorageGateway) StartGatewayRequest(input *StartGatewayInput) (req *request.Request, output *StartGatewayOutput) { + op := &request.Operation{ + Name: opStartGateway, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &StartGatewayInput{} + } + + req = c.newRequest(op, input, output) + output = &StartGatewayOutput{} + req.Data = output + return +} + +// Starts a gateway that you previously shut down (see ShutdownGateway). After +// the gateway starts, you can then make other API calls, your applications +// can read from or write to the gateway's storage volumes and you will be able +// to take snapshot backups. +// +// When you make a request, you will get a 200 OK success response immediately. +// However, it might take some time for the gateway to be ready. You should +// call DescribeGatewayInformation and check the status before making any additional +// API calls. For more information, see ActivateGateway. +// +// To specify which gateway to start, use the Amazon Resource Name (ARN) of +// the gateway in your request. +func (c *StorageGateway) StartGateway(input *StartGatewayInput) (*StartGatewayOutput, error) { + req, out := c.StartGatewayRequest(input) + err := req.Send() + return out, err +} + +const opUpdateBandwidthRateLimit = "UpdateBandwidthRateLimit" + +// UpdateBandwidthRateLimitRequest generates a "aws/request.Request" representing the +// client's request for the UpdateBandwidthRateLimit operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateBandwidthRateLimit method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateBandwidthRateLimitRequest method. +// req, resp := client.UpdateBandwidthRateLimitRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *StorageGateway) UpdateBandwidthRateLimitRequest(input *UpdateBandwidthRateLimitInput) (req *request.Request, output *UpdateBandwidthRateLimitOutput) { + op := &request.Operation{ + Name: opUpdateBandwidthRateLimit, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateBandwidthRateLimitInput{} + } + + req = c.newRequest(op, input, output) + output = &UpdateBandwidthRateLimitOutput{} + req.Data = output + return +} + +// Updates the bandwidth rate limits of a gateway. You can update both the upload +// and download bandwidth rate limit or specify only one of the two. If you +// don't set a bandwidth rate limit, the existing rate limit remains. +// +// By default, a gateway's bandwidth rate limits are not set. If you don't +// set any limit, the gateway does not have any limitations on its bandwidth +// usage and could potentially use the maximum available bandwidth. +// +// To specify which gateway to update, use the Amazon Resource Name (ARN) of +// the gateway in your request. +func (c *StorageGateway) UpdateBandwidthRateLimit(input *UpdateBandwidthRateLimitInput) (*UpdateBandwidthRateLimitOutput, error) { + req, out := c.UpdateBandwidthRateLimitRequest(input) + err := req.Send() + return out, err +} + +const opUpdateChapCredentials = "UpdateChapCredentials" + +// UpdateChapCredentialsRequest generates a "aws/request.Request" representing the +// client's request for the UpdateChapCredentials operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateChapCredentials method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateChapCredentialsRequest method. +// req, resp := client.UpdateChapCredentialsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *StorageGateway) UpdateChapCredentialsRequest(input *UpdateChapCredentialsInput) (req *request.Request, output *UpdateChapCredentialsOutput) { + op := &request.Operation{ + Name: opUpdateChapCredentials, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateChapCredentialsInput{} + } + + req = c.newRequest(op, input, output) + output = &UpdateChapCredentialsOutput{} + req.Data = output + return +} + +// Updates the Challenge-Handshake Authentication Protocol (CHAP) credentials +// for a specified iSCSI target. By default, a gateway does not have CHAP enabled; +// however, for added security, you might use it. +// +// When you update CHAP credentials, all existing connections on the target +// are closed and initiators must reconnect with the new credentials. +func (c *StorageGateway) UpdateChapCredentials(input *UpdateChapCredentialsInput) (*UpdateChapCredentialsOutput, error) { + req, out := c.UpdateChapCredentialsRequest(input) + err := req.Send() + return out, err +} + +const opUpdateGatewayInformation = "UpdateGatewayInformation" + +// UpdateGatewayInformationRequest generates a "aws/request.Request" representing the +// client's request for the UpdateGatewayInformation operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateGatewayInformation method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateGatewayInformationRequest method. +// req, resp := client.UpdateGatewayInformationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *StorageGateway) UpdateGatewayInformationRequest(input *UpdateGatewayInformationInput) (req *request.Request, output *UpdateGatewayInformationOutput) { + op := &request.Operation{ + Name: opUpdateGatewayInformation, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateGatewayInformationInput{} + } + + req = c.newRequest(op, input, output) + output = &UpdateGatewayInformationOutput{} + req.Data = output + return +} + +// Updates a gateway's metadata, which includes the gateway's name and time +// zone. To specify which gateway to update, use the Amazon Resource Name (ARN) +// of the gateway in your request. +// +// For Gateways activated after September 2, 2015, the gateway's ARN contains +// the gateway ID rather than the gateway name. However, changing the name of +// the gateway has no effect on the gateway's ARN. +func (c *StorageGateway) UpdateGatewayInformation(input *UpdateGatewayInformationInput) (*UpdateGatewayInformationOutput, error) { + req, out := c.UpdateGatewayInformationRequest(input) + err := req.Send() + return out, err +} + +const opUpdateGatewaySoftwareNow = "UpdateGatewaySoftwareNow" + +// UpdateGatewaySoftwareNowRequest generates a "aws/request.Request" representing the +// client's request for the UpdateGatewaySoftwareNow operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateGatewaySoftwareNow method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateGatewaySoftwareNowRequest method. +// req, resp := client.UpdateGatewaySoftwareNowRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *StorageGateway) UpdateGatewaySoftwareNowRequest(input *UpdateGatewaySoftwareNowInput) (req *request.Request, output *UpdateGatewaySoftwareNowOutput) { + op := &request.Operation{ + Name: opUpdateGatewaySoftwareNow, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateGatewaySoftwareNowInput{} + } + + req = c.newRequest(op, input, output) + output = &UpdateGatewaySoftwareNowOutput{} + req.Data = output + return +} + +// Updates the gateway virtual machine (VM) software. The request immediately +// triggers the software update. +// +// When you make this request, you get a 200 OK success response immediately. +// However, it might take some time for the update to complete. You can call +// DescribeGatewayInformation to verify the gateway is in the STATE_RUNNING +// state. +// +// A software update forces a system restart of your gateway. You can minimize +// the chance of any disruption to your applications by increasing your iSCSI +// Initiators' timeouts. For more information about increasing iSCSI Initiator +// timeouts for Windows and Linux, see Customizing Your Windows iSCSI Settings +// (http://docs.aws.amazon.com/storagegateway/latest/userguide/ConfiguringiSCSIClientInitiatorWindowsClient.html#CustomizeWindowsiSCSISettings) +// and Customizing Your Linux iSCSI Settings (http://docs.aws.amazon.com/storagegateway/latest/userguide/ConfiguringiSCSIClientInitiatorRedHatClient.html#CustomizeLinuxiSCSISettings), +// respectively. +func (c *StorageGateway) UpdateGatewaySoftwareNow(input *UpdateGatewaySoftwareNowInput) (*UpdateGatewaySoftwareNowOutput, error) { + req, out := c.UpdateGatewaySoftwareNowRequest(input) + err := req.Send() + return out, err +} + +const opUpdateMaintenanceStartTime = "UpdateMaintenanceStartTime" + +// UpdateMaintenanceStartTimeRequest generates a "aws/request.Request" representing the +// client's request for the UpdateMaintenanceStartTime operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateMaintenanceStartTime method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateMaintenanceStartTimeRequest method. +// req, resp := client.UpdateMaintenanceStartTimeRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *StorageGateway) UpdateMaintenanceStartTimeRequest(input *UpdateMaintenanceStartTimeInput) (req *request.Request, output *UpdateMaintenanceStartTimeOutput) { + op := &request.Operation{ + Name: opUpdateMaintenanceStartTime, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateMaintenanceStartTimeInput{} + } + + req = c.newRequest(op, input, output) + output = &UpdateMaintenanceStartTimeOutput{} + req.Data = output + return +} + +// Updates a gateway's weekly maintenance start time information, including +// day and time of the week. The maintenance time is the time in your gateway's +// time zone. +func (c *StorageGateway) UpdateMaintenanceStartTime(input *UpdateMaintenanceStartTimeInput) (*UpdateMaintenanceStartTimeOutput, error) { + req, out := c.UpdateMaintenanceStartTimeRequest(input) + err := req.Send() + return out, err +} + +const opUpdateSnapshotSchedule = "UpdateSnapshotSchedule" + +// UpdateSnapshotScheduleRequest generates a "aws/request.Request" representing the +// client's request for the UpdateSnapshotSchedule operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateSnapshotSchedule method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateSnapshotScheduleRequest method. +// req, resp := client.UpdateSnapshotScheduleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *StorageGateway) UpdateSnapshotScheduleRequest(input *UpdateSnapshotScheduleInput) (req *request.Request, output *UpdateSnapshotScheduleOutput) { + op := &request.Operation{ + Name: opUpdateSnapshotSchedule, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateSnapshotScheduleInput{} + } + + req = c.newRequest(op, input, output) + output = &UpdateSnapshotScheduleOutput{} + req.Data = output + return +} + +// Updates a snapshot schedule configured for a gateway volume. +// +// The default snapshot schedule for volume is once every 24 hours, starting +// at the creation time of the volume. You can use this API to change the snapshot +// schedule configured for the volume. +// +// In the request you must identify the gateway volume whose snapshot schedule +// you want to update, and the schedule information, including when you want +// the snapshot to begin on a day and the frequency (in hours) of snapshots. +func (c *StorageGateway) UpdateSnapshotSchedule(input *UpdateSnapshotScheduleInput) (*UpdateSnapshotScheduleOutput, error) { + req, out := c.UpdateSnapshotScheduleRequest(input) + err := req.Send() + return out, err +} + +const opUpdateVTLDeviceType = "UpdateVTLDeviceType" + +// UpdateVTLDeviceTypeRequest generates a "aws/request.Request" representing the +// client's request for the UpdateVTLDeviceType operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateVTLDeviceType method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateVTLDeviceTypeRequest method. +// req, resp := client.UpdateVTLDeviceTypeRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *StorageGateway) UpdateVTLDeviceTypeRequest(input *UpdateVTLDeviceTypeInput) (req *request.Request, output *UpdateVTLDeviceTypeOutput) { + op := &request.Operation{ + Name: opUpdateVTLDeviceType, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateVTLDeviceTypeInput{} + } + + req = c.newRequest(op, input, output) + output = &UpdateVTLDeviceTypeOutput{} + req.Data = output + return +} + +// Updates the type of medium changer in a gateway-VTL. When you activate a +// gateway-VTL, you select a medium changer type for the gateway-VTL. This operation +// enables you to select a different type of medium changer after a gateway-VTL +// is activated. +func (c *StorageGateway) UpdateVTLDeviceType(input *UpdateVTLDeviceTypeInput) (*UpdateVTLDeviceTypeOutput, error) { + req, out := c.UpdateVTLDeviceTypeRequest(input) + err := req.Send() + return out, err +} + +// A JSON object containing one or more of the following fields: +// +// ActivateGatewayInput$ActivationKey +// +// ActivateGatewayInput$GatewayName +// +// ActivateGatewayInput$GatewayRegion +// +// ActivateGatewayInput$GatewayTimezone +// +// ActivateGatewayInput$GatewayType +// +// ActivateGatewayInput$TapeDriveType +// +// ActivateGatewayInput$MediumChangerType +type ActivateGatewayInput struct { + _ struct{} `type:"structure"` + + // Your gateway activation key. You can obtain the activation key by sending + // an HTTP GET request with redirects enabled to the gateway IP address (port + // 80). The redirect URL returned in the response provides you the activation + // key for your gateway in the query string parameter activationKey. It may + // also include other activation-related parameters, however, these are merely + // defaults -- the arguments you pass to the ActivateGateway API call determine + // the actual configuration of your gateway. + ActivationKey *string `min:"1" type:"string" required:"true"` + + // The name you configured for your gateway. + GatewayName *string `min:"2" type:"string" required:"true"` + + // A value that indicates the region where you want to store the snapshot backups. + // The gateway region specified must be the same region as the region in your + // Host header in the request. For more information about available regions + // and endpoints for AWS Storage Gateway, see Regions and Endpoints (http://docs.aws.amazon.com/general/latest/gr/rande.html#sg_region) + // in the Amazon Web Services Glossary. + // + // Valid Values: "us-east-1", "us-west-1", "us-west-2", "eu-west-1", "eu-central-1", + // "ap-northeast-1", "ap-northeast-2", "ap-southeast-1", "ap-southeast-2", "sa-east-1" + GatewayRegion *string `min:"1" type:"string" required:"true"` + + // A value that indicates the time zone you want to set for the gateway. The + // time zone is used, for example, for scheduling snapshots and your gateway's + // maintenance schedule. + GatewayTimezone *string `min:"3" type:"string" required:"true"` + + // A value that defines the type of gateway to activate. The type specified + // is critical to all later functions of the gateway and cannot be changed after + // activation. The default value is STORED. + GatewayType *string `min:"2" type:"string"` + + // The value that indicates the type of medium changer to use for gateway-VTL. + // This field is optional. + // + // Valid Values: "STK-L700", "AWS-Gateway-VTL" + MediumChangerType *string `min:"2" type:"string"` + + // The value that indicates the type of tape drive to use for gateway-VTL. This + // field is optional. + // + // Valid Values: "IBM-ULT3580-TD5" + TapeDriveType *string `min:"2" type:"string"` +} + +// String returns the string representation +func (s ActivateGatewayInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ActivateGatewayInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ActivateGatewayInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ActivateGatewayInput"} + if s.ActivationKey == nil { + invalidParams.Add(request.NewErrParamRequired("ActivationKey")) + } + if s.ActivationKey != nil && len(*s.ActivationKey) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ActivationKey", 1)) + } + if s.GatewayName == nil { + invalidParams.Add(request.NewErrParamRequired("GatewayName")) + } + if s.GatewayName != nil && len(*s.GatewayName) < 2 { + invalidParams.Add(request.NewErrParamMinLen("GatewayName", 2)) + } + if s.GatewayRegion == nil { + invalidParams.Add(request.NewErrParamRequired("GatewayRegion")) + } + if s.GatewayRegion != nil && len(*s.GatewayRegion) < 1 { + invalidParams.Add(request.NewErrParamMinLen("GatewayRegion", 1)) + } + if s.GatewayTimezone == nil { + invalidParams.Add(request.NewErrParamRequired("GatewayTimezone")) + } + if s.GatewayTimezone != nil && len(*s.GatewayTimezone) < 3 { + invalidParams.Add(request.NewErrParamMinLen("GatewayTimezone", 3)) + } + if s.GatewayType != nil && len(*s.GatewayType) < 2 { + invalidParams.Add(request.NewErrParamMinLen("GatewayType", 2)) + } + if s.MediumChangerType != nil && len(*s.MediumChangerType) < 2 { + invalidParams.Add(request.NewErrParamMinLen("MediumChangerType", 2)) + } + if s.TapeDriveType != nil && len(*s.TapeDriveType) < 2 { + invalidParams.Add(request.NewErrParamMinLen("TapeDriveType", 2)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// AWS Storage Gateway returns the Amazon Resource Name (ARN) of the activated +// gateway. It is a string made of information such as your account, gateway +// name, and region. This ARN is used to reference the gateway in other API +// operations as well as resource-based authorization. +// +// For gateways activated prior to September 02, 2015 the gateway ARN contains +// the gateway name rather than the gateway id. Changing the name of the gateway +// has no effect on the gateway ARN. +type ActivateGatewayOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation + // to return a list of gateways for your account and region. + GatewayARN *string `min:"50" type:"string"` +} + +// String returns the string representation +func (s ActivateGatewayOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ActivateGatewayOutput) GoString() string { + return s.String() +} + +type AddCacheInput struct { + _ struct{} `type:"structure"` + + DiskIds []*string `type:"list" required:"true"` + + // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation + // to return a list of gateways for your account and region. + GatewayARN *string `min:"50" type:"string" required:"true"` +} + +// String returns the string representation +func (s AddCacheInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddCacheInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AddCacheInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AddCacheInput"} + if s.DiskIds == nil { + invalidParams.Add(request.NewErrParamRequired("DiskIds")) + } + if s.GatewayARN == nil { + invalidParams.Add(request.NewErrParamRequired("GatewayARN")) + } + if s.GatewayARN != nil && len(*s.GatewayARN) < 50 { + invalidParams.Add(request.NewErrParamMinLen("GatewayARN", 50)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type AddCacheOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation + // to return a list of gateways for your account and region. + GatewayARN *string `min:"50" type:"string"` +} + +// String returns the string representation +func (s AddCacheOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddCacheOutput) GoString() string { + return s.String() +} + +// AddTagsToResourceInput +type AddTagsToResourceInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the resource you want to add tags to. + ResourceARN *string `min:"50" type:"string" required:"true"` + + // The key-value pair that represents the tag you want to add to the resource. + // The value can be an empty string. + // + // Valid characters for key and value are letters, spaces, and numbers representable + // in UTF-8 format, and the following special characters: + - = . _ : / @. + Tags []*Tag `type:"list" required:"true"` +} + +// String returns the string representation +func (s AddTagsToResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddTagsToResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AddTagsToResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AddTagsToResourceInput"} + if s.ResourceARN == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceARN")) + } + if s.ResourceARN != nil && len(*s.ResourceARN) < 50 { + invalidParams.Add(request.NewErrParamMinLen("ResourceARN", 50)) + } + if s.Tags == nil { + invalidParams.Add(request.NewErrParamRequired("Tags")) + } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// AddTagsToResourceOutput +type AddTagsToResourceOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the resource you want to add tags to. + ResourceARN *string `min:"50" type:"string"` +} + +// String returns the string representation +func (s AddTagsToResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddTagsToResourceOutput) GoString() string { + return s.String() +} + +type AddUploadBufferInput struct { + _ struct{} `type:"structure"` + + DiskIds []*string `type:"list" required:"true"` + + // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation + // to return a list of gateways for your account and region. + GatewayARN *string `min:"50" type:"string" required:"true"` +} + +// String returns the string representation +func (s AddUploadBufferInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddUploadBufferInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AddUploadBufferInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AddUploadBufferInput"} + if s.DiskIds == nil { + invalidParams.Add(request.NewErrParamRequired("DiskIds")) + } + if s.GatewayARN == nil { + invalidParams.Add(request.NewErrParamRequired("GatewayARN")) + } + if s.GatewayARN != nil && len(*s.GatewayARN) < 50 { + invalidParams.Add(request.NewErrParamMinLen("GatewayARN", 50)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type AddUploadBufferOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation + // to return a list of gateways for your account and region. + GatewayARN *string `min:"50" type:"string"` +} + +// String returns the string representation +func (s AddUploadBufferOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddUploadBufferOutput) GoString() string { + return s.String() +} + +// A JSON object containing one or more of the following fields: +// +// AddWorkingStorageInput$DiskIds +type AddWorkingStorageInput struct { + _ struct{} `type:"structure"` + + // An array of strings that identify disks that are to be configured as working + // storage. Each string have a minimum length of 1 and maximum length of 300. + // You can get the disk IDs from the ListLocalDisks API. + DiskIds []*string `type:"list" required:"true"` + + // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation + // to return a list of gateways for your account and region. + GatewayARN *string `min:"50" type:"string" required:"true"` +} + +// String returns the string representation +func (s AddWorkingStorageInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddWorkingStorageInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AddWorkingStorageInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AddWorkingStorageInput"} + if s.DiskIds == nil { + invalidParams.Add(request.NewErrParamRequired("DiskIds")) + } + if s.GatewayARN == nil { + invalidParams.Add(request.NewErrParamRequired("GatewayARN")) + } + if s.GatewayARN != nil && len(*s.GatewayARN) < 50 { + invalidParams.Add(request.NewErrParamMinLen("GatewayARN", 50)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// A JSON object containing the of the gateway for which working storage was +// configured. +type AddWorkingStorageOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation + // to return a list of gateways for your account and region. + GatewayARN *string `min:"50" type:"string"` +} + +// String returns the string representation +func (s AddWorkingStorageOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddWorkingStorageOutput) GoString() string { + return s.String() +} + +type CachediSCSIVolume struct { + _ struct{} `type:"structure"` + + SourceSnapshotId *string `type:"string"` + + VolumeARN *string `min:"50" type:"string"` + + VolumeId *string `min:"12" type:"string"` + + VolumeProgress *float64 `type:"double"` + + VolumeSizeInBytes *int64 `type:"long"` + + VolumeStatus *string `min:"3" type:"string"` + + VolumeType *string `min:"3" type:"string"` + + // Lists iSCSI information about a volume. + VolumeiSCSIAttributes *VolumeiSCSIAttributes `type:"structure"` +} + +// String returns the string representation +func (s CachediSCSIVolume) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CachediSCSIVolume) GoString() string { + return s.String() +} + +// CancelArchivalInput +type CancelArchivalInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation + // to return a list of gateways for your account and region. + GatewayARN *string `min:"50" type:"string" required:"true"` + + // The Amazon Resource Name (ARN) of the virtual tape you want to cancel archiving + // for. + TapeARN *string `min:"50" type:"string" required:"true"` +} + +// String returns the string representation +func (s CancelArchivalInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CancelArchivalInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CancelArchivalInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CancelArchivalInput"} + if s.GatewayARN == nil { + invalidParams.Add(request.NewErrParamRequired("GatewayARN")) + } + if s.GatewayARN != nil && len(*s.GatewayARN) < 50 { + invalidParams.Add(request.NewErrParamMinLen("GatewayARN", 50)) + } + if s.TapeARN == nil { + invalidParams.Add(request.NewErrParamRequired("TapeARN")) + } + if s.TapeARN != nil && len(*s.TapeARN) < 50 { + invalidParams.Add(request.NewErrParamMinLen("TapeARN", 50)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// CancelArchivalOutput +type CancelArchivalOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the virtual tape for which archiving was + // canceled. + TapeARN *string `min:"50" type:"string"` +} + +// String returns the string representation +func (s CancelArchivalOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CancelArchivalOutput) GoString() string { + return s.String() +} + +// CancelRetrievalInput +type CancelRetrievalInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation + // to return a list of gateways for your account and region. + GatewayARN *string `min:"50" type:"string" required:"true"` + + // The Amazon Resource Name (ARN) of the virtual tape you want to cancel retrieval + // for. + TapeARN *string `min:"50" type:"string" required:"true"` +} + +// String returns the string representation +func (s CancelRetrievalInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CancelRetrievalInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CancelRetrievalInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CancelRetrievalInput"} + if s.GatewayARN == nil { + invalidParams.Add(request.NewErrParamRequired("GatewayARN")) + } + if s.GatewayARN != nil && len(*s.GatewayARN) < 50 { + invalidParams.Add(request.NewErrParamMinLen("GatewayARN", 50)) + } + if s.TapeARN == nil { + invalidParams.Add(request.NewErrParamRequired("TapeARN")) + } + if s.TapeARN != nil && len(*s.TapeARN) < 50 { + invalidParams.Add(request.NewErrParamMinLen("TapeARN", 50)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// CancelRetrievalOutput +type CancelRetrievalOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the virtual tape for which retrieval was + // canceled. + TapeARN *string `min:"50" type:"string"` +} + +// String returns the string representation +func (s CancelRetrievalOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CancelRetrievalOutput) GoString() string { + return s.String() +} + +// Describes Challenge-Handshake Authentication Protocol (CHAP) information +// that supports authentication between your gateway and iSCSI initiators. +type ChapInfo struct { + _ struct{} `type:"structure"` + + // The iSCSI initiator that connects to the target. + InitiatorName *string `min:"1" type:"string"` + + // The secret key that the initiator (for example, the Windows client) must + // provide to participate in mutual CHAP with the target. + SecretToAuthenticateInitiator *string `min:"1" type:"string"` + + // The secret key that the target must provide to participate in mutual CHAP + // with the initiator (e.g. Windows client). + SecretToAuthenticateTarget *string `min:"1" type:"string"` + + // The Amazon Resource Name (ARN) of the volume. + // + // Valid Values: 50 to 500 lowercase letters, numbers, periods (.), and hyphens + // (-). + TargetARN *string `min:"50" type:"string"` +} + +// String returns the string representation +func (s ChapInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ChapInfo) GoString() string { + return s.String() +} + +type CreateCachediSCSIVolumeInput struct { + _ struct{} `type:"structure"` + + ClientToken *string `min:"5" type:"string" required:"true"` + + // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation + // to return a list of gateways for your account and region. + GatewayARN *string `min:"50" type:"string" required:"true"` + + NetworkInterfaceId *string `type:"string" required:"true"` + + SnapshotId *string `type:"string"` + + TargetName *string `min:"1" type:"string" required:"true"` + + VolumeSizeInBytes *int64 `type:"long" required:"true"` +} + +// String returns the string representation +func (s CreateCachediSCSIVolumeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateCachediSCSIVolumeInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateCachediSCSIVolumeInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateCachediSCSIVolumeInput"} + if s.ClientToken == nil { + invalidParams.Add(request.NewErrParamRequired("ClientToken")) + } + if s.ClientToken != nil && len(*s.ClientToken) < 5 { + invalidParams.Add(request.NewErrParamMinLen("ClientToken", 5)) + } + if s.GatewayARN == nil { + invalidParams.Add(request.NewErrParamRequired("GatewayARN")) + } + if s.GatewayARN != nil && len(*s.GatewayARN) < 50 { + invalidParams.Add(request.NewErrParamMinLen("GatewayARN", 50)) + } + if s.NetworkInterfaceId == nil { + invalidParams.Add(request.NewErrParamRequired("NetworkInterfaceId")) + } + if s.TargetName == nil { + invalidParams.Add(request.NewErrParamRequired("TargetName")) + } + if s.TargetName != nil && len(*s.TargetName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TargetName", 1)) + } + if s.VolumeSizeInBytes == nil { + invalidParams.Add(request.NewErrParamRequired("VolumeSizeInBytes")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type CreateCachediSCSIVolumeOutput struct { + _ struct{} `type:"structure"` + + TargetARN *string `min:"50" type:"string"` + + VolumeARN *string `min:"50" type:"string"` +} + +// String returns the string representation +func (s CreateCachediSCSIVolumeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateCachediSCSIVolumeOutput) GoString() string { + return s.String() +} + +type CreateSnapshotFromVolumeRecoveryPointInput struct { + _ struct{} `type:"structure"` + + SnapshotDescription *string `min:"1" type:"string" required:"true"` + + VolumeARN *string `min:"50" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateSnapshotFromVolumeRecoveryPointInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateSnapshotFromVolumeRecoveryPointInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateSnapshotFromVolumeRecoveryPointInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateSnapshotFromVolumeRecoveryPointInput"} + if s.SnapshotDescription == nil { + invalidParams.Add(request.NewErrParamRequired("SnapshotDescription")) + } + if s.SnapshotDescription != nil && len(*s.SnapshotDescription) < 1 { + invalidParams.Add(request.NewErrParamMinLen("SnapshotDescription", 1)) + } + if s.VolumeARN == nil { + invalidParams.Add(request.NewErrParamRequired("VolumeARN")) + } + if s.VolumeARN != nil && len(*s.VolumeARN) < 50 { + invalidParams.Add(request.NewErrParamMinLen("VolumeARN", 50)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type CreateSnapshotFromVolumeRecoveryPointOutput struct { + _ struct{} `type:"structure"` + + SnapshotId *string `type:"string"` + + VolumeARN *string `min:"50" type:"string"` + + VolumeRecoveryPointTime *string `type:"string"` +} + +// String returns the string representation +func (s CreateSnapshotFromVolumeRecoveryPointOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateSnapshotFromVolumeRecoveryPointOutput) GoString() string { + return s.String() +} + +// A JSON object containing one or more of the following fields: +// +// CreateSnapshotInput$SnapshotDescription +// +// CreateSnapshotInput$VolumeARN +type CreateSnapshotInput struct { + _ struct{} `type:"structure"` + + // Textual description of the snapshot that appears in the Amazon EC2 console, + // Elastic Block Store snapshots panel in the Description field, and in the + // AWS Storage Gateway snapshot Details pane, Description field + SnapshotDescription *string `min:"1" type:"string" required:"true"` + + // The Amazon Resource Name (ARN) of the volume. Use the ListVolumes operation + // to return a list of gateway volumes. + VolumeARN *string `min:"50" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateSnapshotInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateSnapshotInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateSnapshotInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateSnapshotInput"} + if s.SnapshotDescription == nil { + invalidParams.Add(request.NewErrParamRequired("SnapshotDescription")) + } + if s.SnapshotDescription != nil && len(*s.SnapshotDescription) < 1 { + invalidParams.Add(request.NewErrParamMinLen("SnapshotDescription", 1)) + } + if s.VolumeARN == nil { + invalidParams.Add(request.NewErrParamRequired("VolumeARN")) + } + if s.VolumeARN != nil && len(*s.VolumeARN) < 50 { + invalidParams.Add(request.NewErrParamMinLen("VolumeARN", 50)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// A JSON object containing the following fields: +type CreateSnapshotOutput struct { + _ struct{} `type:"structure"` + + // The snapshot ID that is used to refer to the snapshot in future operations + // such as describing snapshots (Amazon Elastic Compute Cloud API DescribeSnapshots) + // or creating a volume from a snapshot (CreateStorediSCSIVolume). + SnapshotId *string `type:"string"` + + // The Amazon Resource Name (ARN) of the volume of which the snapshot was taken. + VolumeARN *string `min:"50" type:"string"` +} + +// String returns the string representation +func (s CreateSnapshotOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateSnapshotOutput) GoString() string { + return s.String() +} + +// A JSON object containing one or more of the following fields: +// +// CreateStorediSCSIVolumeInput$DiskId +// +// CreateStorediSCSIVolumeInput$NetworkInterfaceId +// +// CreateStorediSCSIVolumeInput$PreserveExistingData +// +// CreateStorediSCSIVolumeInput$SnapshotId +// +// CreateStorediSCSIVolumeInput$TargetName +type CreateStorediSCSIVolumeInput struct { + _ struct{} `type:"structure"` + + // The unique identifier for the gateway local disk that is configured as a + // stored volume. Use ListLocalDisks (http://docs.aws.amazon.com/storagegateway/latest/userguide/API_ListLocalDisks.html) + // to list disk IDs for a gateway. + DiskId *string `min:"1" type:"string" required:"true"` + + // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation + // to return a list of gateways for your account and region. + GatewayARN *string `min:"50" type:"string" required:"true"` + + // The network interface of the gateway on which to expose the iSCSI target. + // Only IPv4 addresses are accepted. Use DescribeGatewayInformation to get a + // list of the network interfaces available on a gateway. + // + // Valid Values: A valid IP address. + NetworkInterfaceId *string `type:"string" required:"true"` + + // Specify this field as true if you want to preserve the data on the local + // disk. Otherwise, specifying this field as false creates an empty volume. + // + // Valid Values: true, false + PreserveExistingData *bool `type:"boolean" required:"true"` + + // The snapshot ID (e.g. "snap-1122aabb") of the snapshot to restore as the + // new stored volume. Specify this field if you want to create the iSCSI storage + // volume from a snapshot otherwise do not include this field. To list snapshots + // for your account use DescribeSnapshots (http://docs.aws.amazon.com/AWSEC2/latest/APIReference/ApiReference-query-DescribeSnapshots.html) + // in the Amazon Elastic Compute Cloud API Reference. + SnapshotId *string `type:"string"` + + // The name of the iSCSI target used by initiators to connect to the target + // and as a suffix for the target ARN. For example, specifying TargetName as + // myvolume results in the target ARN of arn:aws:storagegateway:us-east-1:111122223333:gateway/sgw-12A3456B/target/iqn.1997-05.com.amazon:myvolume. + // The target name must be unique across all volumes of a gateway. + TargetName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateStorediSCSIVolumeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateStorediSCSIVolumeInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateStorediSCSIVolumeInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateStorediSCSIVolumeInput"} + if s.DiskId == nil { + invalidParams.Add(request.NewErrParamRequired("DiskId")) + } + if s.DiskId != nil && len(*s.DiskId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DiskId", 1)) + } + if s.GatewayARN == nil { + invalidParams.Add(request.NewErrParamRequired("GatewayARN")) + } + if s.GatewayARN != nil && len(*s.GatewayARN) < 50 { + invalidParams.Add(request.NewErrParamMinLen("GatewayARN", 50)) + } + if s.NetworkInterfaceId == nil { + invalidParams.Add(request.NewErrParamRequired("NetworkInterfaceId")) + } + if s.PreserveExistingData == nil { + invalidParams.Add(request.NewErrParamRequired("PreserveExistingData")) + } + if s.TargetName == nil { + invalidParams.Add(request.NewErrParamRequired("TargetName")) + } + if s.TargetName != nil && len(*s.TargetName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TargetName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// A JSON object containing the following fields: +type CreateStorediSCSIVolumeOutput struct { + _ struct{} `type:"structure"` + + // he Amazon Resource Name (ARN) of the volume target that includes the iSCSI + // name that initiators can use to connect to the target. + TargetARN *string `min:"50" type:"string"` + + // The Amazon Resource Name (ARN) of the configured volume. + VolumeARN *string `min:"50" type:"string"` + + // The size of the volume in bytes. + VolumeSizeInBytes *int64 `type:"long"` +} + +// String returns the string representation +func (s CreateStorediSCSIVolumeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateStorediSCSIVolumeOutput) GoString() string { + return s.String() +} + +// CreateTapeWithBarcodeInput +type CreateTapeWithBarcodeInput struct { + _ struct{} `type:"structure"` + + // The unique Amazon Resource Name (ARN) that represents the gateway to associate + // the virtual tape with. Use the ListGateways operation to return a list of + // gateways for your account and region. + GatewayARN *string `min:"50" type:"string" required:"true"` + + // The barcode that you want to assign to the tape. + TapeBarcode *string `min:"7" type:"string" required:"true"` + + // The size, in bytes, of the virtual tape that you want to create. + // + // The size must be aligned by gigabyte (1024*1024*1024 byte). + TapeSizeInBytes *int64 `type:"long" required:"true"` +} + +// String returns the string representation +func (s CreateTapeWithBarcodeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateTapeWithBarcodeInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateTapeWithBarcodeInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateTapeWithBarcodeInput"} + if s.GatewayARN == nil { + invalidParams.Add(request.NewErrParamRequired("GatewayARN")) + } + if s.GatewayARN != nil && len(*s.GatewayARN) < 50 { + invalidParams.Add(request.NewErrParamMinLen("GatewayARN", 50)) + } + if s.TapeBarcode == nil { + invalidParams.Add(request.NewErrParamRequired("TapeBarcode")) + } + if s.TapeBarcode != nil && len(*s.TapeBarcode) < 7 { + invalidParams.Add(request.NewErrParamMinLen("TapeBarcode", 7)) + } + if s.TapeSizeInBytes == nil { + invalidParams.Add(request.NewErrParamRequired("TapeSizeInBytes")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// CreateTapeOutput +type CreateTapeWithBarcodeOutput struct { + _ struct{} `type:"structure"` + + // A unique Amazon Resource Name (ARN) that represents the virtual tape that + // was created. + TapeARN *string `min:"50" type:"string"` +} + +// String returns the string representation +func (s CreateTapeWithBarcodeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateTapeWithBarcodeOutput) GoString() string { + return s.String() +} + +// CreateTapesInput +type CreateTapesInput struct { + _ struct{} `type:"structure"` + + // A unique identifier that you use to retry a request. If you retry a request, + // use the same ClientToken you specified in the initial request. + // + // Using the same ClientToken prevents creating the tape multiple times. + ClientToken *string `min:"5" type:"string" required:"true"` + + // The unique Amazon Resource Name (ARN) that represents the gateway to associate + // the virtual tapes with. Use the ListGateways operation to return a list of + // gateways for your account and region. + GatewayARN *string `min:"50" type:"string" required:"true"` + + // The number of virtual tapes that you want to create. + NumTapesToCreate *int64 `min:"1" type:"integer" required:"true"` + + // A prefix that you append to the barcode of the virtual tape you are creating. + // This prefix makes the barcode unique. + // + // The prefix must be 1 to 4 characters in length and must be one of the uppercase + // letters from A to Z. + TapeBarcodePrefix *string `min:"1" type:"string" required:"true"` + + // The size, in bytes, of the virtual tapes that you want to create. + // + // The size must be aligned by gigabyte (1024*1024*1024 byte). + TapeSizeInBytes *int64 `type:"long" required:"true"` +} + +// String returns the string representation +func (s CreateTapesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateTapesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateTapesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateTapesInput"} + if s.ClientToken == nil { + invalidParams.Add(request.NewErrParamRequired("ClientToken")) + } + if s.ClientToken != nil && len(*s.ClientToken) < 5 { + invalidParams.Add(request.NewErrParamMinLen("ClientToken", 5)) + } + if s.GatewayARN == nil { + invalidParams.Add(request.NewErrParamRequired("GatewayARN")) + } + if s.GatewayARN != nil && len(*s.GatewayARN) < 50 { + invalidParams.Add(request.NewErrParamMinLen("GatewayARN", 50)) + } + if s.NumTapesToCreate == nil { + invalidParams.Add(request.NewErrParamRequired("NumTapesToCreate")) + } + if s.NumTapesToCreate != nil && *s.NumTapesToCreate < 1 { + invalidParams.Add(request.NewErrParamMinValue("NumTapesToCreate", 1)) + } + if s.TapeBarcodePrefix == nil { + invalidParams.Add(request.NewErrParamRequired("TapeBarcodePrefix")) + } + if s.TapeBarcodePrefix != nil && len(*s.TapeBarcodePrefix) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TapeBarcodePrefix", 1)) + } + if s.TapeSizeInBytes == nil { + invalidParams.Add(request.NewErrParamRequired("TapeSizeInBytes")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// CreateTapeOutput +type CreateTapesOutput struct { + _ struct{} `type:"structure"` + + // A list of unique Amazon Resource Names (ARNs) that represents the virtual + // tapes that were created. + TapeARNs []*string `type:"list"` +} + +// String returns the string representation +func (s CreateTapesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateTapesOutput) GoString() string { + return s.String() +} + +type DeleteBandwidthRateLimitInput struct { + _ struct{} `type:"structure"` + + BandwidthType *string `min:"3" type:"string" required:"true"` + + // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation + // to return a list of gateways for your account and region. + GatewayARN *string `min:"50" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteBandwidthRateLimitInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBandwidthRateLimitInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBandwidthRateLimitInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBandwidthRateLimitInput"} + if s.BandwidthType == nil { + invalidParams.Add(request.NewErrParamRequired("BandwidthType")) + } + if s.BandwidthType != nil && len(*s.BandwidthType) < 3 { + invalidParams.Add(request.NewErrParamMinLen("BandwidthType", 3)) + } + if s.GatewayARN == nil { + invalidParams.Add(request.NewErrParamRequired("GatewayARN")) + } + if s.GatewayARN != nil && len(*s.GatewayARN) < 50 { + invalidParams.Add(request.NewErrParamMinLen("GatewayARN", 50)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// A JSON object containing the of the gateway whose bandwidth rate information +// was deleted. +type DeleteBandwidthRateLimitOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation + // to return a list of gateways for your account and region. + GatewayARN *string `min:"50" type:"string"` +} + +// String returns the string representation +func (s DeleteBandwidthRateLimitOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBandwidthRateLimitOutput) GoString() string { + return s.String() +} + +// A JSON object containing one or more of the following fields: +// +// DeleteChapCredentialsInput$InitiatorName +// +// DeleteChapCredentialsInput$TargetARN +type DeleteChapCredentialsInput struct { + _ struct{} `type:"structure"` + + // The iSCSI initiator that connects to the target. + InitiatorName *string `min:"1" type:"string" required:"true"` + + // The Amazon Resource Name (ARN) of the iSCSI volume target. Use the DescribeStorediSCSIVolumes + // operation to return to retrieve the TargetARN for specified VolumeARN. + TargetARN *string `min:"50" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteChapCredentialsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteChapCredentialsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteChapCredentialsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteChapCredentialsInput"} + if s.InitiatorName == nil { + invalidParams.Add(request.NewErrParamRequired("InitiatorName")) + } + if s.InitiatorName != nil && len(*s.InitiatorName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("InitiatorName", 1)) + } + if s.TargetARN == nil { + invalidParams.Add(request.NewErrParamRequired("TargetARN")) + } + if s.TargetARN != nil && len(*s.TargetARN) < 50 { + invalidParams.Add(request.NewErrParamMinLen("TargetARN", 50)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// A JSON object containing the following fields: +type DeleteChapCredentialsOutput struct { + _ struct{} `type:"structure"` + + // The iSCSI initiator that connects to the target. + InitiatorName *string `min:"1" type:"string"` + + // The Amazon Resource Name (ARN) of the target. + TargetARN *string `min:"50" type:"string"` +} + +// String returns the string representation +func (s DeleteChapCredentialsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteChapCredentialsOutput) GoString() string { + return s.String() +} + +// A JSON object containing the id of the gateway to delete. +type DeleteGatewayInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation + // to return a list of gateways for your account and region. + GatewayARN *string `min:"50" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteGatewayInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteGatewayInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteGatewayInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteGatewayInput"} + if s.GatewayARN == nil { + invalidParams.Add(request.NewErrParamRequired("GatewayARN")) + } + if s.GatewayARN != nil && len(*s.GatewayARN) < 50 { + invalidParams.Add(request.NewErrParamMinLen("GatewayARN", 50)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// A JSON object containing the id of the deleted gateway. +type DeleteGatewayOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation + // to return a list of gateways for your account and region. + GatewayARN *string `min:"50" type:"string"` +} + +// String returns the string representation +func (s DeleteGatewayOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteGatewayOutput) GoString() string { + return s.String() +} + +type DeleteSnapshotScheduleInput struct { + _ struct{} `type:"structure"` + + VolumeARN *string `min:"50" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteSnapshotScheduleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteSnapshotScheduleInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteSnapshotScheduleInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteSnapshotScheduleInput"} + if s.VolumeARN == nil { + invalidParams.Add(request.NewErrParamRequired("VolumeARN")) + } + if s.VolumeARN != nil && len(*s.VolumeARN) < 50 { + invalidParams.Add(request.NewErrParamMinLen("VolumeARN", 50)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteSnapshotScheduleOutput struct { + _ struct{} `type:"structure"` + + VolumeARN *string `min:"50" type:"string"` +} + +// String returns the string representation +func (s DeleteSnapshotScheduleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteSnapshotScheduleOutput) GoString() string { + return s.String() +} + +// DeleteTapeArchiveInput +type DeleteTapeArchiveInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the virtual tape to delete from the virtual + // tape shelf (VTS). + TapeARN *string `min:"50" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteTapeArchiveInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteTapeArchiveInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteTapeArchiveInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteTapeArchiveInput"} + if s.TapeARN == nil { + invalidParams.Add(request.NewErrParamRequired("TapeARN")) + } + if s.TapeARN != nil && len(*s.TapeARN) < 50 { + invalidParams.Add(request.NewErrParamMinLen("TapeARN", 50)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// DeleteTapeArchiveOutput +type DeleteTapeArchiveOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the virtual tape that was deleted from + // the virtual tape shelf (VTS). + TapeARN *string `min:"50" type:"string"` +} + +// String returns the string representation +func (s DeleteTapeArchiveOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteTapeArchiveOutput) GoString() string { + return s.String() +} + +// DeleteTapeInput +type DeleteTapeInput struct { + _ struct{} `type:"structure"` + + // The unique Amazon Resource Name (ARN) of the gateway that the virtual tape + // to delete is associated with. Use the ListGateways operation to return a + // list of gateways for your account and region. + GatewayARN *string `min:"50" type:"string" required:"true"` + + // The Amazon Resource Name (ARN) of the virtual tape to delete. + TapeARN *string `min:"50" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteTapeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteTapeInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteTapeInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteTapeInput"} + if s.GatewayARN == nil { + invalidParams.Add(request.NewErrParamRequired("GatewayARN")) + } + if s.GatewayARN != nil && len(*s.GatewayARN) < 50 { + invalidParams.Add(request.NewErrParamMinLen("GatewayARN", 50)) + } + if s.TapeARN == nil { + invalidParams.Add(request.NewErrParamRequired("TapeARN")) + } + if s.TapeARN != nil && len(*s.TapeARN) < 50 { + invalidParams.Add(request.NewErrParamMinLen("TapeARN", 50)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// DeleteTapeOutput +type DeleteTapeOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the deleted virtual tape. + TapeARN *string `min:"50" type:"string"` +} + +// String returns the string representation +func (s DeleteTapeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteTapeOutput) GoString() string { + return s.String() +} + +// A JSON object containing the DeleteVolumeInput$VolumeARN to delete. +type DeleteVolumeInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the volume. Use the ListVolumes operation + // to return a list of gateway volumes. + VolumeARN *string `min:"50" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteVolumeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteVolumeInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteVolumeInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteVolumeInput"} + if s.VolumeARN == nil { + invalidParams.Add(request.NewErrParamRequired("VolumeARN")) + } + if s.VolumeARN != nil && len(*s.VolumeARN) < 50 { + invalidParams.Add(request.NewErrParamMinLen("VolumeARN", 50)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// A JSON object containing the of the storage volume that was deleted +type DeleteVolumeOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the storage volume that was deleted. It + // is the same ARN you provided in the request. + VolumeARN *string `min:"50" type:"string"` +} + +// String returns the string representation +func (s DeleteVolumeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteVolumeOutput) GoString() string { + return s.String() +} + +// A JSON object containing the of the gateway. +type DescribeBandwidthRateLimitInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation + // to return a list of gateways for your account and region. + GatewayARN *string `min:"50" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeBandwidthRateLimitInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeBandwidthRateLimitInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeBandwidthRateLimitInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeBandwidthRateLimitInput"} + if s.GatewayARN == nil { + invalidParams.Add(request.NewErrParamRequired("GatewayARN")) + } + if s.GatewayARN != nil && len(*s.GatewayARN) < 50 { + invalidParams.Add(request.NewErrParamMinLen("GatewayARN", 50)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// A JSON object containing the following fields: +type DescribeBandwidthRateLimitOutput struct { + _ struct{} `type:"structure"` + + // The average download bandwidth rate limit in bits per second. This field + // does not appear in the response if the download rate limit is not set. + AverageDownloadRateLimitInBitsPerSec *int64 `min:"102400" type:"long"` + + // The average upload bandwidth rate limit in bits per second. This field does + // not appear in the response if the upload rate limit is not set. + AverageUploadRateLimitInBitsPerSec *int64 `min:"51200" type:"long"` + + // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation + // to return a list of gateways for your account and region. + GatewayARN *string `min:"50" type:"string"` +} + +// String returns the string representation +func (s DescribeBandwidthRateLimitOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeBandwidthRateLimitOutput) GoString() string { + return s.String() +} + +type DescribeCacheInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation + // to return a list of gateways for your account and region. + GatewayARN *string `min:"50" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeCacheInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeCacheInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeCacheInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeCacheInput"} + if s.GatewayARN == nil { + invalidParams.Add(request.NewErrParamRequired("GatewayARN")) + } + if s.GatewayARN != nil && len(*s.GatewayARN) < 50 { + invalidParams.Add(request.NewErrParamMinLen("GatewayARN", 50)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DescribeCacheOutput struct { + _ struct{} `type:"structure"` + + CacheAllocatedInBytes *int64 `type:"long"` + + CacheDirtyPercentage *float64 `type:"double"` + + CacheHitPercentage *float64 `type:"double"` + + CacheMissPercentage *float64 `type:"double"` + + CacheUsedPercentage *float64 `type:"double"` + + DiskIds []*string `type:"list"` + + // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation + // to return a list of gateways for your account and region. + GatewayARN *string `min:"50" type:"string"` +} + +// String returns the string representation +func (s DescribeCacheOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeCacheOutput) GoString() string { + return s.String() +} + +type DescribeCachediSCSIVolumesInput struct { + _ struct{} `type:"structure"` + + VolumeARNs []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s DescribeCachediSCSIVolumesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeCachediSCSIVolumesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeCachediSCSIVolumesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeCachediSCSIVolumesInput"} + if s.VolumeARNs == nil { + invalidParams.Add(request.NewErrParamRequired("VolumeARNs")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// A JSON object containing the following fields: +type DescribeCachediSCSIVolumesOutput struct { + _ struct{} `type:"structure"` + + // An array of objects where each object contains metadata about one cached + // volume. + CachediSCSIVolumes []*CachediSCSIVolume `type:"list"` +} + +// String returns the string representation +func (s DescribeCachediSCSIVolumesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeCachediSCSIVolumesOutput) GoString() string { + return s.String() +} + +// A JSON object containing the Amazon Resource Name (ARN) of the iSCSI volume +// target. +type DescribeChapCredentialsInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the iSCSI volume target. Use the DescribeStorediSCSIVolumes + // operation to return to retrieve the TargetARN for specified VolumeARN. + TargetARN *string `min:"50" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeChapCredentialsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeChapCredentialsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeChapCredentialsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeChapCredentialsInput"} + if s.TargetARN == nil { + invalidParams.Add(request.NewErrParamRequired("TargetARN")) + } + if s.TargetARN != nil && len(*s.TargetARN) < 50 { + invalidParams.Add(request.NewErrParamMinLen("TargetARN", 50)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// A JSON object containing a . +type DescribeChapCredentialsOutput struct { + _ struct{} `type:"structure"` + + // An array of ChapInfo objects that represent CHAP credentials. Each object + // in the array contains CHAP credential information for one target-initiator + // pair. If no CHAP credentials are set, an empty array is returned. CHAP credential + // information is provided in a JSON object with the following fields: + // + // InitiatorName: The iSCSI initiator that connects to the target. + // + // SecretToAuthenticateInitiator: The secret key that the initiator (for + // example, the Windows client) must provide to participate in mutual CHAP with + // the target. + // + // SecretToAuthenticateTarget: The secret key that the target must provide + // to participate in mutual CHAP with the initiator (e.g. Windows client). + // + // TargetARN: The Amazon Resource Name (ARN) of the storage volume. + ChapCredentials []*ChapInfo `type:"list"` +} + +// String returns the string representation +func (s DescribeChapCredentialsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeChapCredentialsOutput) GoString() string { + return s.String() +} + +// A JSON object containing the id of the gateway. +type DescribeGatewayInformationInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation + // to return a list of gateways for your account and region. + GatewayARN *string `min:"50" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeGatewayInformationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeGatewayInformationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeGatewayInformationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeGatewayInformationInput"} + if s.GatewayARN == nil { + invalidParams.Add(request.NewErrParamRequired("GatewayARN")) + } + if s.GatewayARN != nil && len(*s.GatewayARN) < 50 { + invalidParams.Add(request.NewErrParamMinLen("GatewayARN", 50)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// A JSON object containing the following fields: +type DescribeGatewayInformationOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation + // to return a list of gateways for your account and region. + GatewayARN *string `min:"50" type:"string"` + + // The unique identifier assigned to your gateway during activation. This ID + // becomes part of the gateway Amazon Resource Name (ARN), which you use as + // input for other operations. + GatewayId *string `min:"12" type:"string"` + + // The name you configured for your gateway. + GatewayName *string `type:"string"` + + // A NetworkInterface array that contains descriptions of the gateway network + // interfaces. + GatewayNetworkInterfaces []*NetworkInterface `type:"list"` + + // A value that indicates the operating state of the gateway. + GatewayState *string `min:"2" type:"string"` + + // A value that indicates the time zone configured for the gateway. + GatewayTimezone *string `min:"3" type:"string"` + + // The type of the gateway. + GatewayType *string `min:"2" type:"string"` + + // The date on which the last software update was applied to the gateway. If + // the gateway has never been updated, this field does not return a value in + // the response. + LastSoftwareUpdate *string `min:"1" type:"string"` + + // The date on which an update to the gateway is available. This date is in + // the time zone of the gateway. If the gateway is not available for an update + // this field is not returned in the response. + NextUpdateAvailabilityDate *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s DescribeGatewayInformationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeGatewayInformationOutput) GoString() string { + return s.String() +} + +// A JSON object containing the of the gateway. +type DescribeMaintenanceStartTimeInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation + // to return a list of gateways for your account and region. + GatewayARN *string `min:"50" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeMaintenanceStartTimeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeMaintenanceStartTimeInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeMaintenanceStartTimeInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeMaintenanceStartTimeInput"} + if s.GatewayARN == nil { + invalidParams.Add(request.NewErrParamRequired("GatewayARN")) + } + if s.GatewayARN != nil && len(*s.GatewayARN) < 50 { + invalidParams.Add(request.NewErrParamMinLen("GatewayARN", 50)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DescribeMaintenanceStartTimeOutput struct { + _ struct{} `type:"structure"` + + DayOfWeek *int64 `type:"integer"` + + // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation + // to return a list of gateways for your account and region. + GatewayARN *string `min:"50" type:"string"` + + HourOfDay *int64 `type:"integer"` + + MinuteOfHour *int64 `type:"integer"` + + Timezone *string `min:"3" type:"string"` +} + +// String returns the string representation +func (s DescribeMaintenanceStartTimeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeMaintenanceStartTimeOutput) GoString() string { + return s.String() +} + +// A JSON object containing the DescribeSnapshotScheduleInput$VolumeARN of the +// volume. +type DescribeSnapshotScheduleInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the volume. Use the ListVolumes operation + // to return a list of gateway volumes. + VolumeARN *string `min:"50" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeSnapshotScheduleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeSnapshotScheduleInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeSnapshotScheduleInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeSnapshotScheduleInput"} + if s.VolumeARN == nil { + invalidParams.Add(request.NewErrParamRequired("VolumeARN")) + } + if s.VolumeARN != nil && len(*s.VolumeARN) < 50 { + invalidParams.Add(request.NewErrParamMinLen("VolumeARN", 50)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DescribeSnapshotScheduleOutput struct { + _ struct{} `type:"structure"` + + Description *string `min:"1" type:"string"` + + RecurrenceInHours *int64 `min:"1" type:"integer"` + + StartAt *int64 `type:"integer"` + + Timezone *string `min:"3" type:"string"` + + VolumeARN *string `min:"50" type:"string"` +} + +// String returns the string representation +func (s DescribeSnapshotScheduleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeSnapshotScheduleOutput) GoString() string { + return s.String() +} + +// A JSON object containing a list of DescribeStorediSCSIVolumesInput$VolumeARNs. +type DescribeStorediSCSIVolumesInput struct { + _ struct{} `type:"structure"` + + // An array of strings where each string represents the Amazon Resource Name + // (ARN) of a stored volume. All of the specified stored volumes must from the + // same gateway. Use ListVolumes to get volume ARNs for a gateway. + VolumeARNs []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s DescribeStorediSCSIVolumesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeStorediSCSIVolumesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeStorediSCSIVolumesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeStorediSCSIVolumesInput"} + if s.VolumeARNs == nil { + invalidParams.Add(request.NewErrParamRequired("VolumeARNs")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DescribeStorediSCSIVolumesOutput struct { + _ struct{} `type:"structure"` + + StorediSCSIVolumes []*StorediSCSIVolume `type:"list"` +} + +// String returns the string representation +func (s DescribeStorediSCSIVolumesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeStorediSCSIVolumesOutput) GoString() string { + return s.String() +} + +// DescribeTapeArchivesInput +type DescribeTapeArchivesInput struct { + _ struct{} `type:"structure"` + + // Specifies that the number of virtual tapes descried be limited to the specified + // number. + Limit *int64 `min:"1" type:"integer"` + + // An opaque string that indicates the position at which to begin describing + // virtual tapes. + Marker *string `min:"1" type:"string"` + + // Specifies one or more unique Amazon Resource Names (ARNs) that represent + // the virtual tapes you want to describe. + TapeARNs []*string `type:"list"` +} + +// String returns the string representation +func (s DescribeTapeArchivesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeTapeArchivesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeTapeArchivesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeTapeArchivesInput"} + if s.Limit != nil && *s.Limit < 1 { + invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) + } + if s.Marker != nil && len(*s.Marker) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Marker", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// DescribeTapeArchivesOutput +type DescribeTapeArchivesOutput struct { + _ struct{} `type:"structure"` + + // An opaque string that indicates the position at which the virtual tapes that + // were fetched for description ended. Use this marker in your next request + // to fetch the next set of virtual tapes in the virtual tape shelf (VTS). If + // there are no more virtual tapes to describe, this field does not appear in + // the response. + Marker *string `min:"1" type:"string"` + + // An array of virtual tape objects in the virtual tape shelf (VTS). The description + // includes of the Amazon Resource Name(ARN) of the virtual tapes. The information + // returned includes the Amazon Resource Names (ARNs) of the tapes, size of + // the tapes, status of the tapes, progress of the description and tape barcode. + TapeArchives []*TapeArchive `type:"list"` +} + +// String returns the string representation +func (s DescribeTapeArchivesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeTapeArchivesOutput) GoString() string { + return s.String() +} + +// DescribeTapeRecoveryPointsInput +type DescribeTapeRecoveryPointsInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation + // to return a list of gateways for your account and region. + GatewayARN *string `min:"50" type:"string" required:"true"` + + // Specifies that the number of virtual tape recovery points that are described + // be limited to the specified number. + Limit *int64 `min:"1" type:"integer"` + + // An opaque string that indicates the position at which to begin describing + // the virtual tape recovery points. + Marker *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s DescribeTapeRecoveryPointsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeTapeRecoveryPointsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeTapeRecoveryPointsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeTapeRecoveryPointsInput"} + if s.GatewayARN == nil { + invalidParams.Add(request.NewErrParamRequired("GatewayARN")) + } + if s.GatewayARN != nil && len(*s.GatewayARN) < 50 { + invalidParams.Add(request.NewErrParamMinLen("GatewayARN", 50)) + } + if s.Limit != nil && *s.Limit < 1 { + invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) + } + if s.Marker != nil && len(*s.Marker) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Marker", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// DescribeTapeRecoveryPointsOutput +type DescribeTapeRecoveryPointsOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation + // to return a list of gateways for your account and region. + GatewayARN *string `min:"50" type:"string"` + + // An opaque string that indicates the position at which the virtual tape recovery + // points that were listed for description ended. + // + // Use this marker in your next request to list the next set of virtual tape + // recovery points in the list. If there are no more recovery points to describe, + // this field does not appear in the response. + Marker *string `min:"1" type:"string"` + + // An array of TapeRecoveryPointInfos that are available for the specified gateway. + TapeRecoveryPointInfos []*TapeRecoveryPointInfo `type:"list"` +} + +// String returns the string representation +func (s DescribeTapeRecoveryPointsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeTapeRecoveryPointsOutput) GoString() string { + return s.String() +} + +// DescribeTapesInput +type DescribeTapesInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation + // to return a list of gateways for your account and region. + GatewayARN *string `min:"50" type:"string" required:"true"` + + // Specifies that the number of virtual tapes described be limited to the specified + // number. + // + // Amazon Web Services may impose its own limit, if this field is not set. + Limit *int64 `min:"1" type:"integer"` + + // A marker value, obtained in a previous call to DescribeTapes. This marker + // indicates which page of results to retrieve. + // + // If not specified, the first page of results is retrieved. + Marker *string `min:"1" type:"string"` + + // Specifies one or more unique Amazon Resource Names (ARNs) that represent + // the virtual tapes you want to describe. If this parameter is not specified, + // AWS Storage Gateway returns a description of all virtual tapes associated + // with the specified gateway. + TapeARNs []*string `type:"list"` +} + +// String returns the string representation +func (s DescribeTapesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeTapesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeTapesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeTapesInput"} + if s.GatewayARN == nil { + invalidParams.Add(request.NewErrParamRequired("GatewayARN")) + } + if s.GatewayARN != nil && len(*s.GatewayARN) < 50 { + invalidParams.Add(request.NewErrParamMinLen("GatewayARN", 50)) + } + if s.Limit != nil && *s.Limit < 1 { + invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) + } + if s.Marker != nil && len(*s.Marker) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Marker", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// DescribeTapesOutput +type DescribeTapesOutput struct { + _ struct{} `type:"structure"` + + // An opaque string which can be used as part of a subsequent DescribeTapes + // call to retrieve the next page of results. + // + // If a response does not contain a marker, then there are no more results + // to be retrieved. + Marker *string `min:"1" type:"string"` + + // An array of virtual tape descriptions. + Tapes []*Tape `type:"list"` +} + +// String returns the string representation +func (s DescribeTapesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeTapesOutput) GoString() string { + return s.String() +} + +type DescribeUploadBufferInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation + // to return a list of gateways for your account and region. + GatewayARN *string `min:"50" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeUploadBufferInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeUploadBufferInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeUploadBufferInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeUploadBufferInput"} + if s.GatewayARN == nil { + invalidParams.Add(request.NewErrParamRequired("GatewayARN")) + } + if s.GatewayARN != nil && len(*s.GatewayARN) < 50 { + invalidParams.Add(request.NewErrParamMinLen("GatewayARN", 50)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DescribeUploadBufferOutput struct { + _ struct{} `type:"structure"` + + DiskIds []*string `type:"list"` + + // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation + // to return a list of gateways for your account and region. + GatewayARN *string `min:"50" type:"string"` + + UploadBufferAllocatedInBytes *int64 `type:"long"` + + UploadBufferUsedInBytes *int64 `type:"long"` +} + +// String returns the string representation +func (s DescribeUploadBufferOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeUploadBufferOutput) GoString() string { + return s.String() +} + +// DescribeVTLDevicesInput +type DescribeVTLDevicesInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation + // to return a list of gateways for your account and region. + GatewayARN *string `min:"50" type:"string" required:"true"` + + // Specifies that the number of VTL devices described be limited to the specified + // number. + Limit *int64 `min:"1" type:"integer"` + + // An opaque string that indicates the position at which to begin describing + // the VTL devices. + Marker *string `min:"1" type:"string"` + + // An array of strings, where each string represents the Amazon Resource Name + // (ARN) of a VTL device. + // + // All of the specified VTL devices must be from the same gateway. If no VTL + // devices are specified, the result will contain all devices on the specified + // gateway. + VTLDeviceARNs []*string `type:"list"` +} + +// String returns the string representation +func (s DescribeVTLDevicesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeVTLDevicesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeVTLDevicesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeVTLDevicesInput"} + if s.GatewayARN == nil { + invalidParams.Add(request.NewErrParamRequired("GatewayARN")) + } + if s.GatewayARN != nil && len(*s.GatewayARN) < 50 { + invalidParams.Add(request.NewErrParamMinLen("GatewayARN", 50)) + } + if s.Limit != nil && *s.Limit < 1 { + invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) + } + if s.Marker != nil && len(*s.Marker) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Marker", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// DescribeVTLDevicesOutput +type DescribeVTLDevicesOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation + // to return a list of gateways for your account and region. + GatewayARN *string `min:"50" type:"string"` + + // An opaque string that indicates the position at which the VTL devices that + // were fetched for description ended. Use the marker in your next request to + // fetch the next set of VTL devices in the list. If there are no more VTL devices + // to describe, this field does not appear in the response. + Marker *string `min:"1" type:"string"` + + // An array of VTL device objects composed of the Amazon Resource Name(ARN) + // of the VTL devices. + VTLDevices []*VTLDevice `type:"list"` +} + +// String returns the string representation +func (s DescribeVTLDevicesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeVTLDevicesOutput) GoString() string { + return s.String() +} + +// A JSON object containing the of the gateway. +type DescribeWorkingStorageInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation + // to return a list of gateways for your account and region. + GatewayARN *string `min:"50" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeWorkingStorageInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeWorkingStorageInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeWorkingStorageInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeWorkingStorageInput"} + if s.GatewayARN == nil { + invalidParams.Add(request.NewErrParamRequired("GatewayARN")) + } + if s.GatewayARN != nil && len(*s.GatewayARN) < 50 { + invalidParams.Add(request.NewErrParamMinLen("GatewayARN", 50)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// A JSON object containing the following fields: +type DescribeWorkingStorageOutput struct { + _ struct{} `type:"structure"` + + // An array of the gateway's local disk IDs that are configured as working storage. + // Each local disk ID is specified as a string (minimum length of 1 and maximum + // length of 300). If no local disks are configured as working storage, then + // the DiskIds array is empty. + DiskIds []*string `type:"list"` + + // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation + // to return a list of gateways for your account and region. + GatewayARN *string `min:"50" type:"string"` + + // The total working storage in bytes allocated for the gateway. If no working + // storage is configured for the gateway, this field returns 0. + WorkingStorageAllocatedInBytes *int64 `type:"long"` + + // The total working storage in bytes in use by the gateway. If no working storage + // is configured for the gateway, this field returns 0. + WorkingStorageUsedInBytes *int64 `type:"long"` +} + +// String returns the string representation +func (s DescribeWorkingStorageOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeWorkingStorageOutput) GoString() string { + return s.String() +} + +// Lists iSCSI information about a VTL device. +type DeviceiSCSIAttributes struct { + _ struct{} `type:"structure"` + + // Indicates whether mutual CHAP is enabled for the iSCSI target. + ChapEnabled *bool `type:"boolean"` + + // The network interface identifier of the VTL device. + NetworkInterfaceId *string `type:"string"` + + // The port used to communicate with iSCSI VTL device targets. + NetworkInterfacePort *int64 `type:"integer"` + + // Specifies the unique Amazon Resource Name(ARN) that encodes the iSCSI qualified + // name(iqn) of a tape drive or media changer target. + TargetARN *string `min:"50" type:"string"` +} + +// String returns the string representation +func (s DeviceiSCSIAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeviceiSCSIAttributes) GoString() string { + return s.String() +} + +// DisableGatewayInput +type DisableGatewayInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation + // to return a list of gateways for your account and region. + GatewayARN *string `min:"50" type:"string" required:"true"` +} + +// String returns the string representation +func (s DisableGatewayInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisableGatewayInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DisableGatewayInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DisableGatewayInput"} + if s.GatewayARN == nil { + invalidParams.Add(request.NewErrParamRequired("GatewayARN")) + } + if s.GatewayARN != nil && len(*s.GatewayARN) < 50 { + invalidParams.Add(request.NewErrParamMinLen("GatewayARN", 50)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// DisableGatewayOutput +type DisableGatewayOutput struct { + _ struct{} `type:"structure"` + + // The unique Amazon Resource Name of the disabled gateway. + GatewayARN *string `min:"50" type:"string"` +} + +// String returns the string representation +func (s DisableGatewayOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisableGatewayOutput) GoString() string { + return s.String() +} + +type Disk struct { + _ struct{} `type:"structure"` + + DiskAllocationResource *string `type:"string"` + + DiskAllocationType *string `min:"3" type:"string"` + + DiskId *string `min:"1" type:"string"` + + DiskNode *string `type:"string"` + + DiskPath *string `type:"string"` + + DiskSizeInBytes *int64 `type:"long"` + + DiskStatus *string `type:"string"` +} + +// String returns the string representation +func (s Disk) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Disk) GoString() string { + return s.String() +} + +// Provides additional information about an error that was returned by the service +// as an or. See the errorCode and errorDetails members for more information +// about the error. +type Error struct { + _ struct{} `type:"structure"` + + // Additional information about the error. + ErrorCode *string `locationName:"errorCode" type:"string" enum:"ErrorCode"` + + // Human-readable text that provides detail about the error that occurred. + ErrorDetails map[string]*string `locationName:"errorDetails" type:"map"` +} + +// String returns the string representation +func (s Error) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Error) GoString() string { + return s.String() +} + +// Describes a gateway object. +type GatewayInfo struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation + // to return a list of gateways for your account and region. + GatewayARN *string `min:"50" type:"string"` + + // The unique identifier assigned to your gateway during activation. This ID + // becomes part of the gateway Amazon Resource Name (ARN), which you use as + // input for other operations. + GatewayId *string `min:"12" type:"string"` + + // The name of the gateway. + GatewayName *string `type:"string"` + + // The state of the gateway. + // + // Valid Values: DISABLED or ACTIVE + GatewayOperationalState *string `min:"2" type:"string"` + + // The type of the gateway. + GatewayType *string `min:"2" type:"string"` +} + +// String returns the string representation +func (s GatewayInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GatewayInfo) GoString() string { + return s.String() +} + +// A JSON object containing zero or more of the following fields: +// +// ListGatewaysInput$Limit +// +// ListGatewaysInput$Marker +type ListGatewaysInput struct { + _ struct{} `type:"structure"` + + // Specifies that the list of gateways returned be limited to the specified + // number of items. + Limit *int64 `min:"1" type:"integer"` + + // An opaque string that indicates the position at which to begin the returned + // list of gateways. + Marker *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListGatewaysInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListGatewaysInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListGatewaysInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListGatewaysInput"} + if s.Limit != nil && *s.Limit < 1 { + invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) + } + if s.Marker != nil && len(*s.Marker) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Marker", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type ListGatewaysOutput struct { + _ struct{} `type:"structure"` + + Gateways []*GatewayInfo `type:"list"` + + Marker *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListGatewaysOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListGatewaysOutput) GoString() string { + return s.String() +} + +// A JSON object containing the of the gateway. +type ListLocalDisksInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation + // to return a list of gateways for your account and region. + GatewayARN *string `min:"50" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListLocalDisksInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListLocalDisksInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListLocalDisksInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListLocalDisksInput"} + if s.GatewayARN == nil { + invalidParams.Add(request.NewErrParamRequired("GatewayARN")) + } + if s.GatewayARN != nil && len(*s.GatewayARN) < 50 { + invalidParams.Add(request.NewErrParamMinLen("GatewayARN", 50)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type ListLocalDisksOutput struct { + _ struct{} `type:"structure"` + + Disks []*Disk `type:"list"` + + // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation + // to return a list of gateways for your account and region. + GatewayARN *string `min:"50" type:"string"` +} + +// String returns the string representation +func (s ListLocalDisksOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListLocalDisksOutput) GoString() string { + return s.String() +} + +// ListTagsForResourceInput +type ListTagsForResourceInput struct { + _ struct{} `type:"structure"` + + // Specifies that the list of tags returned be limited to the specified number + // of items. + Limit *int64 `min:"1" type:"integer"` + + // An opaque string that indicates the position at which to begin returning + // the list of tags. + Marker *string `min:"1" type:"string"` + + // The Amazon Resource Name (ARN) of the resource for which you want to list + // tags. + ResourceARN *string `min:"50" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListTagsForResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTagsForResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListTagsForResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListTagsForResourceInput"} + if s.Limit != nil && *s.Limit < 1 { + invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) + } + if s.Marker != nil && len(*s.Marker) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Marker", 1)) + } + if s.ResourceARN == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceARN")) + } + if s.ResourceARN != nil && len(*s.ResourceARN) < 50 { + invalidParams.Add(request.NewErrParamMinLen("ResourceARN", 50)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// ListTagsForResourceOutput +type ListTagsForResourceOutput struct { + _ struct{} `type:"structure"` + + // An opaque string that indicates the position at which to stop returning the + // list of tags. + Marker *string `min:"1" type:"string"` + + // he Amazon Resource Name (ARN) of the resource for which you want to list + // tags. + ResourceARN *string `min:"50" type:"string"` + + // An array that contains the tags for the specified resource. + Tags []*Tag `type:"list"` +} + +// String returns the string representation +func (s ListTagsForResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTagsForResourceOutput) GoString() string { + return s.String() +} + +// A JSON object that contains one or more of the following fields: +// +// ListTapesInput$Limit +// +// ListTapesInput$Marker +// +// ListTapesInput$TapeARNs +type ListTapesInput struct { + _ struct{} `type:"structure"` + + // An optional number limit for the tapes in the list returned by this call. + Limit *int64 `min:"1" type:"integer"` + + // A string that indicates the position at which to begin the returned list + // of tapes. + Marker *string `min:"1" type:"string"` + + // The Amazon Resource Name (ARN) of each of the tapes you want to list. If + // you don't specify a tape ARN, the response lists all tapes in both your VTL + // and VTS. + TapeARNs []*string `type:"list"` +} + +// String returns the string representation +func (s ListTapesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTapesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListTapesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListTapesInput"} + if s.Limit != nil && *s.Limit < 1 { + invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) + } + if s.Marker != nil && len(*s.Marker) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Marker", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// A JSON object containing the following fields: +// +// ListTapesOutput$Marker +// +// ListTapesOutput$VolumeInfos +type ListTapesOutput struct { + _ struct{} `type:"structure"` + + // A string that indicates the position at which to begin returning the next + // list of tapes. Use the marker in your next request to continue pagination + // of tapes. If there are no more tapes to list, this element does not appear + // in the response body. + Marker *string `min:"1" type:"string"` + + // An array of TapeInfo objects, where each object describes an a single tape. + // If there not tapes in the tape library or VTS, then the TapeInfos is an empty + // array. + TapeInfos []*TapeInfo `type:"list"` +} + +// String returns the string representation +func (s ListTapesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTapesOutput) GoString() string { + return s.String() +} + +// ListVolumeInitiatorsInput +type ListVolumeInitiatorsInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the volume. Use the ListVolumes operation + // to return a list of gateway volumes for the gateway. + VolumeARN *string `min:"50" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListVolumeInitiatorsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListVolumeInitiatorsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListVolumeInitiatorsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListVolumeInitiatorsInput"} + if s.VolumeARN == nil { + invalidParams.Add(request.NewErrParamRequired("VolumeARN")) + } + if s.VolumeARN != nil && len(*s.VolumeARN) < 50 { + invalidParams.Add(request.NewErrParamMinLen("VolumeARN", 50)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// ListVolumeInitiatorsOutput +type ListVolumeInitiatorsOutput struct { + _ struct{} `type:"structure"` + + // The host names and port numbers of all iSCSI initiators that are connected + // to the gateway. + Initiators []*string `type:"list"` +} + +// String returns the string representation +func (s ListVolumeInitiatorsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListVolumeInitiatorsOutput) GoString() string { + return s.String() +} + +type ListVolumeRecoveryPointsInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation + // to return a list of gateways for your account and region. + GatewayARN *string `min:"50" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListVolumeRecoveryPointsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListVolumeRecoveryPointsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListVolumeRecoveryPointsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListVolumeRecoveryPointsInput"} + if s.GatewayARN == nil { + invalidParams.Add(request.NewErrParamRequired("GatewayARN")) + } + if s.GatewayARN != nil && len(*s.GatewayARN) < 50 { + invalidParams.Add(request.NewErrParamMinLen("GatewayARN", 50)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type ListVolumeRecoveryPointsOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation + // to return a list of gateways for your account and region. + GatewayARN *string `min:"50" type:"string"` + + VolumeRecoveryPointInfos []*VolumeRecoveryPointInfo `type:"list"` +} + +// String returns the string representation +func (s ListVolumeRecoveryPointsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListVolumeRecoveryPointsOutput) GoString() string { + return s.String() +} + +// A JSON object that contains one or more of the following fields: +// +// ListVolumesInput$Limit +// +// ListVolumesInput$Marker +type ListVolumesInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation + // to return a list of gateways for your account and region. + GatewayARN *string `min:"50" type:"string"` + + // Specifies that the list of volumes returned be limited to the specified number + // of items. + Limit *int64 `min:"1" type:"integer"` + + // A string that indicates the position at which to begin the returned list + // of volumes. Obtain the marker from the response of a previous List iSCSI + // Volumes request. + Marker *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListVolumesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListVolumesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListVolumesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListVolumesInput"} + if s.GatewayARN != nil && len(*s.GatewayARN) < 50 { + invalidParams.Add(request.NewErrParamMinLen("GatewayARN", 50)) + } + if s.Limit != nil && *s.Limit < 1 { + invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) + } + if s.Marker != nil && len(*s.Marker) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Marker", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type ListVolumesOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation + // to return a list of gateways for your account and region. + GatewayARN *string `min:"50" type:"string"` + + Marker *string `min:"1" type:"string"` + + VolumeInfos []*VolumeInfo `type:"list"` +} + +// String returns the string representation +func (s ListVolumesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListVolumesOutput) GoString() string { + return s.String() +} + +// Describes a gateway's network interface. +type NetworkInterface struct { + _ struct{} `type:"structure"` + + // The Internet Protocol version 4 (IPv4) address of the interface. + Ipv4Address *string `type:"string"` + + // The Internet Protocol version 6 (IPv6) address of the interface. Currently + // not supported. + Ipv6Address *string `type:"string"` + + // The Media Access Control (MAC) address of the interface. + // + // This is currently unsupported and will not be returned in output. + MacAddress *string `type:"string"` +} + +// String returns the string representation +func (s NetworkInterface) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NetworkInterface) GoString() string { + return s.String() +} + +// RemoveTagsFromResourceInput +type RemoveTagsFromResourceInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the resource you want to remove the tags + // from. + ResourceARN *string `min:"50" type:"string" required:"true"` + + // The keys of the tags you want to remove from the specified resource. A tag + // is composed of a key/value pair. + TagKeys []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s RemoveTagsFromResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RemoveTagsFromResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RemoveTagsFromResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RemoveTagsFromResourceInput"} + if s.ResourceARN == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceARN")) + } + if s.ResourceARN != nil && len(*s.ResourceARN) < 50 { + invalidParams.Add(request.NewErrParamMinLen("ResourceARN", 50)) + } + if s.TagKeys == nil { + invalidParams.Add(request.NewErrParamRequired("TagKeys")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// RemoveTagsFromResourceOutput +type RemoveTagsFromResourceOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the resource that the tags were removed + // from. + ResourceARN *string `min:"50" type:"string"` +} + +// String returns the string representation +func (s RemoveTagsFromResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RemoveTagsFromResourceOutput) GoString() string { + return s.String() +} + +type ResetCacheInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation + // to return a list of gateways for your account and region. + GatewayARN *string `min:"50" type:"string" required:"true"` +} + +// String returns the string representation +func (s ResetCacheInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResetCacheInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ResetCacheInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ResetCacheInput"} + if s.GatewayARN == nil { + invalidParams.Add(request.NewErrParamRequired("GatewayARN")) + } + if s.GatewayARN != nil && len(*s.GatewayARN) < 50 { + invalidParams.Add(request.NewErrParamMinLen("GatewayARN", 50)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type ResetCacheOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation + // to return a list of gateways for your account and region. + GatewayARN *string `min:"50" type:"string"` +} + +// String returns the string representation +func (s ResetCacheOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResetCacheOutput) GoString() string { + return s.String() +} + +// RetrieveTapeArchiveInput +type RetrieveTapeArchiveInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the gateway you want to retrieve the virtual + // tape to. Use the ListGateways operation to return a list of gateways for + // your account and region. + // + // You retrieve archived virtual tapes to only one gateway and the gateway + // must be a gateway-VTL. + GatewayARN *string `min:"50" type:"string" required:"true"` + + // The Amazon Resource Name (ARN) of the virtual tape you want to retrieve from + // the virtual tape shelf (VTS). + TapeARN *string `min:"50" type:"string" required:"true"` +} + +// String returns the string representation +func (s RetrieveTapeArchiveInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RetrieveTapeArchiveInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RetrieveTapeArchiveInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RetrieveTapeArchiveInput"} + if s.GatewayARN == nil { + invalidParams.Add(request.NewErrParamRequired("GatewayARN")) + } + if s.GatewayARN != nil && len(*s.GatewayARN) < 50 { + invalidParams.Add(request.NewErrParamMinLen("GatewayARN", 50)) + } + if s.TapeARN == nil { + invalidParams.Add(request.NewErrParamRequired("TapeARN")) + } + if s.TapeARN != nil && len(*s.TapeARN) < 50 { + invalidParams.Add(request.NewErrParamMinLen("TapeARN", 50)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// RetrieveTapeArchiveOutput +type RetrieveTapeArchiveOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the retrieved virtual tape. + TapeARN *string `min:"50" type:"string"` +} + +// String returns the string representation +func (s RetrieveTapeArchiveOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RetrieveTapeArchiveOutput) GoString() string { + return s.String() +} + +// RetrieveTapeRecoveryPointInput +type RetrieveTapeRecoveryPointInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation + // to return a list of gateways for your account and region. + GatewayARN *string `min:"50" type:"string" required:"true"` + + // The Amazon Resource Name (ARN) of the virtual tape for which you want to + // retrieve the recovery point. + TapeARN *string `min:"50" type:"string" required:"true"` +} + +// String returns the string representation +func (s RetrieveTapeRecoveryPointInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RetrieveTapeRecoveryPointInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RetrieveTapeRecoveryPointInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RetrieveTapeRecoveryPointInput"} + if s.GatewayARN == nil { + invalidParams.Add(request.NewErrParamRequired("GatewayARN")) + } + if s.GatewayARN != nil && len(*s.GatewayARN) < 50 { + invalidParams.Add(request.NewErrParamMinLen("GatewayARN", 50)) + } + if s.TapeARN == nil { + invalidParams.Add(request.NewErrParamRequired("TapeARN")) + } + if s.TapeARN != nil && len(*s.TapeARN) < 50 { + invalidParams.Add(request.NewErrParamMinLen("TapeARN", 50)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// RetrieveTapeRecoveryPointOutput +type RetrieveTapeRecoveryPointOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the virtual tape for which the recovery + // point was retrieved. + TapeARN *string `min:"50" type:"string"` +} + +// String returns the string representation +func (s RetrieveTapeRecoveryPointOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RetrieveTapeRecoveryPointOutput) GoString() string { + return s.String() +} + +// SetLocalConsolePasswordInput +type SetLocalConsolePasswordInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation + // to return a list of gateways for your account and region. + GatewayARN *string `min:"50" type:"string" required:"true"` + + // The password you want to set for your VM local console. + LocalConsolePassword *string `min:"6" type:"string" required:"true"` +} + +// String returns the string representation +func (s SetLocalConsolePasswordInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetLocalConsolePasswordInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SetLocalConsolePasswordInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SetLocalConsolePasswordInput"} + if s.GatewayARN == nil { + invalidParams.Add(request.NewErrParamRequired("GatewayARN")) + } + if s.GatewayARN != nil && len(*s.GatewayARN) < 50 { + invalidParams.Add(request.NewErrParamMinLen("GatewayARN", 50)) + } + if s.LocalConsolePassword == nil { + invalidParams.Add(request.NewErrParamRequired("LocalConsolePassword")) + } + if s.LocalConsolePassword != nil && len(*s.LocalConsolePassword) < 6 { + invalidParams.Add(request.NewErrParamMinLen("LocalConsolePassword", 6)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type SetLocalConsolePasswordOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation + // to return a list of gateways for your account and region. + GatewayARN *string `min:"50" type:"string"` +} + +// String returns the string representation +func (s SetLocalConsolePasswordOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetLocalConsolePasswordOutput) GoString() string { + return s.String() +} + +// A JSON object containing the of the gateway to shut down. +type ShutdownGatewayInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation + // to return a list of gateways for your account and region. + GatewayARN *string `min:"50" type:"string" required:"true"` +} + +// String returns the string representation +func (s ShutdownGatewayInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ShutdownGatewayInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ShutdownGatewayInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ShutdownGatewayInput"} + if s.GatewayARN == nil { + invalidParams.Add(request.NewErrParamRequired("GatewayARN")) + } + if s.GatewayARN != nil && len(*s.GatewayARN) < 50 { + invalidParams.Add(request.NewErrParamMinLen("GatewayARN", 50)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// A JSON object containing the of the gateway that was shut down. +type ShutdownGatewayOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation + // to return a list of gateways for your account and region. + GatewayARN *string `min:"50" type:"string"` +} + +// String returns the string representation +func (s ShutdownGatewayOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ShutdownGatewayOutput) GoString() string { + return s.String() +} + +// A JSON object containing the of the gateway to start. +type StartGatewayInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation + // to return a list of gateways for your account and region. + GatewayARN *string `min:"50" type:"string" required:"true"` +} + +// String returns the string representation +func (s StartGatewayInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StartGatewayInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *StartGatewayInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StartGatewayInput"} + if s.GatewayARN == nil { + invalidParams.Add(request.NewErrParamRequired("GatewayARN")) + } + if s.GatewayARN != nil && len(*s.GatewayARN) < 50 { + invalidParams.Add(request.NewErrParamMinLen("GatewayARN", 50)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// A JSON object containing the of the gateway that was restarted. +type StartGatewayOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation + // to return a list of gateways for your account and region. + GatewayARN *string `min:"50" type:"string"` +} + +// String returns the string representation +func (s StartGatewayOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StartGatewayOutput) GoString() string { + return s.String() +} + +type StorediSCSIVolume struct { + _ struct{} `type:"structure"` + + PreservedExistingData *bool `type:"boolean"` + + SourceSnapshotId *string `type:"string"` + + VolumeARN *string `min:"50" type:"string"` + + VolumeDiskId *string `min:"1" type:"string"` + + VolumeId *string `min:"12" type:"string"` + + VolumeProgress *float64 `type:"double"` + + VolumeSizeInBytes *int64 `type:"long"` + + VolumeStatus *string `min:"3" type:"string"` + + VolumeType *string `min:"3" type:"string"` + + // Lists iSCSI information about a volume. + VolumeiSCSIAttributes *VolumeiSCSIAttributes `type:"structure"` +} + +// String returns the string representation +func (s StorediSCSIVolume) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StorediSCSIVolume) GoString() string { + return s.String() +} + +type Tag struct { + _ struct{} `type:"structure"` + + Key *string `min:"1" type:"string" required:"true"` + + Value *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s Tag) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Tag) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Tag) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Tag"} + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.Value == nil { + invalidParams.Add(request.NewErrParamRequired("Value")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Describes a virtual tape object. +type Tape struct { + _ struct{} `type:"structure"` + + // For archiving virtual tapes, indicates how much data remains to be uploaded + // before archiving is complete. + // + // Range: 0 (not started) to 100 (complete). + Progress *float64 `type:"double"` + + // The Amazon Resource Name (ARN) of the virtual tape. + TapeARN *string `min:"50" type:"string"` + + // The barcode that identifies a specific virtual tape. + TapeBarcode *string `min:"7" type:"string"` + + // The size, in bytes, of the virtual tape. + TapeSizeInBytes *int64 `type:"long"` + + // The current state of the virtual tape. + TapeStatus *string `type:"string"` + + // The virtual tape library (VTL) device that the virtual tape is associated + // with. + VTLDevice *string `min:"50" type:"string"` +} + +// String returns the string representation +func (s Tape) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Tape) GoString() string { + return s.String() +} + +// Represents a virtual tape that is archived in the virtual tape shelf (VTS). +type TapeArchive struct { + _ struct{} `type:"structure"` + + // The time that the archiving of the virtual tape was completed. + // + // The string format of the completion time is in the ISO8601 extended YYYY-MM-DD'T'HH:MM:SS'Z' + // format. + CompletionTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The Amazon Resource Name (ARN) of the gateway-VTL that the virtual tape is + // being retrieved to. + // + // The virtual tape is retrieved from the virtual tape shelf (VTS). + RetrievedTo *string `min:"50" type:"string"` + + // The Amazon Resource Name (ARN) of an archived virtual tape. + TapeARN *string `min:"50" type:"string"` + + // The barcode that identifies the archived virtual tape. + TapeBarcode *string `min:"7" type:"string"` + + // The size, in bytes, of the archived virtual tape. + TapeSizeInBytes *int64 `type:"long"` + + // The current state of the archived virtual tape. + TapeStatus *string `type:"string"` +} + +// String returns the string representation +func (s TapeArchive) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TapeArchive) GoString() string { + return s.String() +} + +// Describes a virtual tape. +type TapeInfo struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation + // to return a list of gateways for your account and region. + GatewayARN *string `min:"50" type:"string"` + + // The Amazon Resource Name (ARN) of a virtual tape. + TapeARN *string `min:"50" type:"string"` + + // The barcode that identifies a specific virtual tape. + TapeBarcode *string `min:"7" type:"string"` + + // The size, in bytes, of a virtual tape. + TapeSizeInBytes *int64 `type:"long"` + + // The status of the tape. + TapeStatus *string `type:"string"` +} + +// String returns the string representation +func (s TapeInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TapeInfo) GoString() string { + return s.String() +} + +// Describes a recovery point. +type TapeRecoveryPointInfo struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the virtual tape. + TapeARN *string `min:"50" type:"string"` + + // The time when the point-in-time view of the virtual tape was replicated for + // later recovery. + // + // The string format of the tape recovery point time is in the ISO8601 extended + // YYYY-MM-DD'T'HH:MM:SS'Z' format. + TapeRecoveryPointTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The size, in bytes, of the virtual tapes to recover. + TapeSizeInBytes *int64 `type:"long"` + + TapeStatus *string `type:"string"` +} + +// String returns the string representation +func (s TapeRecoveryPointInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TapeRecoveryPointInfo) GoString() string { + return s.String() +} + +// A JSON object containing one or more of the following fields: +// +// UpdateBandwidthRateLimitInput$AverageDownloadRateLimitInBitsPerSec +// +// UpdateBandwidthRateLimitInput$AverageUploadRateLimitInBitsPerSec +type UpdateBandwidthRateLimitInput struct { + _ struct{} `type:"structure"` + + // The average download bandwidth rate limit in bits per second. + AverageDownloadRateLimitInBitsPerSec *int64 `min:"102400" type:"long"` + + // The average upload bandwidth rate limit in bits per second. + AverageUploadRateLimitInBitsPerSec *int64 `min:"51200" type:"long"` + + // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation + // to return a list of gateways for your account and region. + GatewayARN *string `min:"50" type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateBandwidthRateLimitInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateBandwidthRateLimitInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateBandwidthRateLimitInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateBandwidthRateLimitInput"} + if s.AverageDownloadRateLimitInBitsPerSec != nil && *s.AverageDownloadRateLimitInBitsPerSec < 102400 { + invalidParams.Add(request.NewErrParamMinValue("AverageDownloadRateLimitInBitsPerSec", 102400)) + } + if s.AverageUploadRateLimitInBitsPerSec != nil && *s.AverageUploadRateLimitInBitsPerSec < 51200 { + invalidParams.Add(request.NewErrParamMinValue("AverageUploadRateLimitInBitsPerSec", 51200)) + } + if s.GatewayARN == nil { + invalidParams.Add(request.NewErrParamRequired("GatewayARN")) + } + if s.GatewayARN != nil && len(*s.GatewayARN) < 50 { + invalidParams.Add(request.NewErrParamMinLen("GatewayARN", 50)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// A JSON object containing the of the gateway whose throttle information was +// updated. +type UpdateBandwidthRateLimitOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation + // to return a list of gateways for your account and region. + GatewayARN *string `min:"50" type:"string"` +} + +// String returns the string representation +func (s UpdateBandwidthRateLimitOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateBandwidthRateLimitOutput) GoString() string { + return s.String() +} + +// A JSON object containing one or more of the following fields: +// +// UpdateChapCredentialsInput$InitiatorName +// +// UpdateChapCredentialsInput$SecretToAuthenticateInitiator +// +// UpdateChapCredentialsInput$SecretToAuthenticateTarget +// +// UpdateChapCredentialsInput$TargetARN +type UpdateChapCredentialsInput struct { + _ struct{} `type:"structure"` + + // The iSCSI initiator that connects to the target. + InitiatorName *string `min:"1" type:"string" required:"true"` + + // The secret key that the initiator (for example, the Windows client) must + // provide to participate in mutual CHAP with the target. + // + // The secret key must be between 12 and 16 bytes when encoded in UTF-8. + SecretToAuthenticateInitiator *string `min:"1" type:"string" required:"true"` + + // The secret key that the target must provide to participate in mutual CHAP + // with the initiator (e.g. Windows client). + // + // Byte constraints: Minimum bytes of 12. Maximum bytes of 16. + // + // The secret key must be between 12 and 16 bytes when encoded in UTF-8. + SecretToAuthenticateTarget *string `min:"1" type:"string"` + + // The Amazon Resource Name (ARN) of the iSCSI volume target. Use the DescribeStorediSCSIVolumes + // operation to return the TargetARN for specified VolumeARN. + TargetARN *string `min:"50" type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateChapCredentialsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateChapCredentialsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateChapCredentialsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateChapCredentialsInput"} + if s.InitiatorName == nil { + invalidParams.Add(request.NewErrParamRequired("InitiatorName")) + } + if s.InitiatorName != nil && len(*s.InitiatorName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("InitiatorName", 1)) + } + if s.SecretToAuthenticateInitiator == nil { + invalidParams.Add(request.NewErrParamRequired("SecretToAuthenticateInitiator")) + } + if s.SecretToAuthenticateInitiator != nil && len(*s.SecretToAuthenticateInitiator) < 1 { + invalidParams.Add(request.NewErrParamMinLen("SecretToAuthenticateInitiator", 1)) + } + if s.SecretToAuthenticateTarget != nil && len(*s.SecretToAuthenticateTarget) < 1 { + invalidParams.Add(request.NewErrParamMinLen("SecretToAuthenticateTarget", 1)) + } + if s.TargetARN == nil { + invalidParams.Add(request.NewErrParamRequired("TargetARN")) + } + if s.TargetARN != nil && len(*s.TargetARN) < 50 { + invalidParams.Add(request.NewErrParamMinLen("TargetARN", 50)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// A JSON object containing the following fields: +type UpdateChapCredentialsOutput struct { + _ struct{} `type:"structure"` + + // The iSCSI initiator that connects to the target. This is the same initiator + // name specified in the request. + InitiatorName *string `min:"1" type:"string"` + + // The Amazon Resource Name (ARN) of the target. This is the same target specified + // in the request. + TargetARN *string `min:"50" type:"string"` +} + +// String returns the string representation +func (s UpdateChapCredentialsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateChapCredentialsOutput) GoString() string { + return s.String() +} + +type UpdateGatewayInformationInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation + // to return a list of gateways for your account and region. + GatewayARN *string `min:"50" type:"string" required:"true"` + + // The name you configured for your gateway. + GatewayName *string `min:"2" type:"string"` + + GatewayTimezone *string `min:"3" type:"string"` +} + +// String returns the string representation +func (s UpdateGatewayInformationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateGatewayInformationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateGatewayInformationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateGatewayInformationInput"} + if s.GatewayARN == nil { + invalidParams.Add(request.NewErrParamRequired("GatewayARN")) + } + if s.GatewayARN != nil && len(*s.GatewayARN) < 50 { + invalidParams.Add(request.NewErrParamMinLen("GatewayARN", 50)) + } + if s.GatewayName != nil && len(*s.GatewayName) < 2 { + invalidParams.Add(request.NewErrParamMinLen("GatewayName", 2)) + } + if s.GatewayTimezone != nil && len(*s.GatewayTimezone) < 3 { + invalidParams.Add(request.NewErrParamMinLen("GatewayTimezone", 3)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// A JSON object containing the ARN of the gateway that was updated. +type UpdateGatewayInformationOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation + // to return a list of gateways for your account and region. + GatewayARN *string `min:"50" type:"string"` + + GatewayName *string `type:"string"` +} + +// String returns the string representation +func (s UpdateGatewayInformationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateGatewayInformationOutput) GoString() string { + return s.String() +} + +// A JSON object containing the of the gateway to update. +type UpdateGatewaySoftwareNowInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation + // to return a list of gateways for your account and region. + GatewayARN *string `min:"50" type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateGatewaySoftwareNowInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateGatewaySoftwareNowInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateGatewaySoftwareNowInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateGatewaySoftwareNowInput"} + if s.GatewayARN == nil { + invalidParams.Add(request.NewErrParamRequired("GatewayARN")) + } + if s.GatewayARN != nil && len(*s.GatewayARN) < 50 { + invalidParams.Add(request.NewErrParamMinLen("GatewayARN", 50)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// A JSON object containing the of the gateway that was updated. +type UpdateGatewaySoftwareNowOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation + // to return a list of gateways for your account and region. + GatewayARN *string `min:"50" type:"string"` +} + +// String returns the string representation +func (s UpdateGatewaySoftwareNowOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateGatewaySoftwareNowOutput) GoString() string { + return s.String() +} + +// A JSON object containing the following fields: +// +// UpdateMaintenanceStartTimeInput$DayOfWeek +// +// UpdateMaintenanceStartTimeInput$HourOfDay +// +// UpdateMaintenanceStartTimeInput$MinuteOfHour +type UpdateMaintenanceStartTimeInput struct { + _ struct{} `type:"structure"` + + // The maintenance start time day of the week. + DayOfWeek *int64 `type:"integer" required:"true"` + + // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation + // to return a list of gateways for your account and region. + GatewayARN *string `min:"50" type:"string" required:"true"` + + // The hour component of the maintenance start time represented as hh, where + // hh is the hour (00 to 23). The hour of the day is in the time zone of the + // gateway. + HourOfDay *int64 `type:"integer" required:"true"` + + // The minute component of the maintenance start time represented as mm, where + // mm is the minute (00 to 59). The minute of the hour is in the time zone of + // the gateway. + MinuteOfHour *int64 `type:"integer" required:"true"` +} + +// String returns the string representation +func (s UpdateMaintenanceStartTimeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateMaintenanceStartTimeInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateMaintenanceStartTimeInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateMaintenanceStartTimeInput"} + if s.DayOfWeek == nil { + invalidParams.Add(request.NewErrParamRequired("DayOfWeek")) + } + if s.GatewayARN == nil { + invalidParams.Add(request.NewErrParamRequired("GatewayARN")) + } + if s.GatewayARN != nil && len(*s.GatewayARN) < 50 { + invalidParams.Add(request.NewErrParamMinLen("GatewayARN", 50)) + } + if s.HourOfDay == nil { + invalidParams.Add(request.NewErrParamRequired("HourOfDay")) + } + if s.MinuteOfHour == nil { + invalidParams.Add(request.NewErrParamRequired("MinuteOfHour")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// A JSON object containing the of the gateway whose maintenance start time +// is updated. +type UpdateMaintenanceStartTimeOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation + // to return a list of gateways for your account and region. + GatewayARN *string `min:"50" type:"string"` +} + +// String returns the string representation +func (s UpdateMaintenanceStartTimeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateMaintenanceStartTimeOutput) GoString() string { + return s.String() +} + +// A JSON object containing one or more of the following fields: +// +// UpdateSnapshotScheduleInput$Description +// +// UpdateSnapshotScheduleInput$RecurrenceInHours +// +// UpdateSnapshotScheduleInput$StartAt +// +// UpdateSnapshotScheduleInput$VolumeARN +type UpdateSnapshotScheduleInput struct { + _ struct{} `type:"structure"` + + // Optional description of the snapshot that overwrites the existing description. + Description *string `min:"1" type:"string"` + + // Frequency of snapshots. Specify the number of hours between snapshots. + RecurrenceInHours *int64 `min:"1" type:"integer" required:"true"` + + // The hour of the day at which the snapshot schedule begins represented as + // hh, where hh is the hour (0 to 23). The hour of the day is in the time zone + // of the gateway. + StartAt *int64 `type:"integer" required:"true"` + + // The Amazon Resource Name (ARN) of the volume. Use the ListVolumes operation + // to return a list of gateway volumes. + VolumeARN *string `min:"50" type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateSnapshotScheduleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateSnapshotScheduleInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateSnapshotScheduleInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateSnapshotScheduleInput"} + if s.Description != nil && len(*s.Description) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Description", 1)) + } + if s.RecurrenceInHours == nil { + invalidParams.Add(request.NewErrParamRequired("RecurrenceInHours")) + } + if s.RecurrenceInHours != nil && *s.RecurrenceInHours < 1 { + invalidParams.Add(request.NewErrParamMinValue("RecurrenceInHours", 1)) + } + if s.StartAt == nil { + invalidParams.Add(request.NewErrParamRequired("StartAt")) + } + if s.VolumeARN == nil { + invalidParams.Add(request.NewErrParamRequired("VolumeARN")) + } + if s.VolumeARN != nil && len(*s.VolumeARN) < 50 { + invalidParams.Add(request.NewErrParamMinLen("VolumeARN", 50)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// A JSON object containing the of the updated storage volume. +type UpdateSnapshotScheduleOutput struct { + _ struct{} `type:"structure"` + + VolumeARN *string `min:"50" type:"string"` +} + +// String returns the string representation +func (s UpdateSnapshotScheduleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateSnapshotScheduleOutput) GoString() string { + return s.String() +} + +type UpdateVTLDeviceTypeInput struct { + _ struct{} `type:"structure"` + + // The type of medium changer you want to select. + // + // Valid Values: "STK-L700", "AWS-Gateway-VTL" + DeviceType *string `min:"2" type:"string" required:"true"` + + // The Amazon Resource Name (ARN) of the medium changer you want to select. + VTLDeviceARN *string `min:"50" type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateVTLDeviceTypeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateVTLDeviceTypeInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateVTLDeviceTypeInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateVTLDeviceTypeInput"} + if s.DeviceType == nil { + invalidParams.Add(request.NewErrParamRequired("DeviceType")) + } + if s.DeviceType != nil && len(*s.DeviceType) < 2 { + invalidParams.Add(request.NewErrParamMinLen("DeviceType", 2)) + } + if s.VTLDeviceARN == nil { + invalidParams.Add(request.NewErrParamRequired("VTLDeviceARN")) + } + if s.VTLDeviceARN != nil && len(*s.VTLDeviceARN) < 50 { + invalidParams.Add(request.NewErrParamMinLen("VTLDeviceARN", 50)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// UpdateVTLDeviceTypeOutput +type UpdateVTLDeviceTypeOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the medium changer you have selected. + VTLDeviceARN *string `min:"50" type:"string"` +} + +// String returns the string representation +func (s UpdateVTLDeviceTypeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateVTLDeviceTypeOutput) GoString() string { + return s.String() +} + +// Represents a device object associated with a gateway-VTL. +type VTLDevice struct { + _ struct{} `type:"structure"` + + // A list of iSCSI information about a VTL device. + DeviceiSCSIAttributes *DeviceiSCSIAttributes `type:"structure"` + + // Specifies the unique Amazon Resource Name (ARN) of the device (tape drive + // or media changer). + VTLDeviceARN *string `min:"50" type:"string"` + + VTLDeviceProductIdentifier *string `type:"string"` + + VTLDeviceType *string `type:"string"` + + VTLDeviceVendor *string `type:"string"` +} + +// String returns the string representation +func (s VTLDevice) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VTLDevice) GoString() string { + return s.String() +} + +// Describes a storage volume object. +type VolumeInfo struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation + // to return a list of gateways for your account and region. + GatewayARN *string `min:"50" type:"string"` + + // The unique identifier assigned to your gateway during activation. This ID + // becomes part of the gateway Amazon Resource Name (ARN), which you use as + // input for other operations. + // + // Valid Values: 50 to 500 lowercase letters, numbers, periods (.), and hyphens + // (-). + GatewayId *string `min:"12" type:"string"` + + // The Amazon Resource Name (ARN) for the storage volume. For example, the following + // is a valid ARN: + // + // arn:aws:storagegateway:us-east-1:111122223333:gateway/sgw-12A3456B/volume/vol-1122AABB + // + // Valid Values: 50 to 500 lowercase letters, numbers, periods (.), and hyphens + // (-). + VolumeARN *string `min:"50" type:"string"` + + // The unique identifier assigned to the volume. This ID becomes part of the + // volume Amazon Resource Name (ARN), which you use as input for other operations. + // + // Valid Values: 50 to 500 lowercase letters, numbers, periods (.), and hyphens + // (-). + VolumeId *string `min:"12" type:"string"` + + // The size, in bytes, of the volume. + // + // Valid Values: 50 to 500 lowercase letters, numbers, periods (.), and hyphens + // (-). + VolumeSizeInBytes *int64 `type:"long"` + + VolumeType *string `min:"3" type:"string"` +} + +// String returns the string representation +func (s VolumeInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VolumeInfo) GoString() string { + return s.String() +} + +type VolumeRecoveryPointInfo struct { + _ struct{} `type:"structure"` + + VolumeARN *string `min:"50" type:"string"` + + VolumeRecoveryPointTime *string `type:"string"` + + VolumeSizeInBytes *int64 `type:"long"` + + VolumeUsageInBytes *int64 `type:"long"` +} + +// String returns the string representation +func (s VolumeRecoveryPointInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VolumeRecoveryPointInfo) GoString() string { + return s.String() +} + +// Lists iSCSI information about a volume. +type VolumeiSCSIAttributes struct { + _ struct{} `type:"structure"` + + // Indicates whether mutual CHAP is enabled for the iSCSI target. + ChapEnabled *bool `type:"boolean"` + + // The logical disk number. + LunNumber *int64 `min:"1" type:"integer"` + + // The network interface identifier. + NetworkInterfaceId *string `type:"string"` + + // The port used to communicate with iSCSI targets. + NetworkInterfacePort *int64 `type:"integer"` + + // The Amazon Resource Name (ARN) of the volume target. + TargetARN *string `min:"50" type:"string"` +} + +// String returns the string representation +func (s VolumeiSCSIAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VolumeiSCSIAttributes) GoString() string { + return s.String() +} + +const ( + // @enum ErrorCode + ErrorCodeActivationKeyExpired = "ActivationKeyExpired" + // @enum ErrorCode + ErrorCodeActivationKeyInvalid = "ActivationKeyInvalid" + // @enum ErrorCode + ErrorCodeActivationKeyNotFound = "ActivationKeyNotFound" + // @enum ErrorCode + ErrorCodeGatewayInternalError = "GatewayInternalError" + // @enum ErrorCode + ErrorCodeGatewayNotConnected = "GatewayNotConnected" + // @enum ErrorCode + ErrorCodeGatewayNotFound = "GatewayNotFound" + // @enum ErrorCode + ErrorCodeGatewayProxyNetworkConnectionBusy = "GatewayProxyNetworkConnectionBusy" + // @enum ErrorCode + ErrorCodeAuthenticationFailure = "AuthenticationFailure" + // @enum ErrorCode + ErrorCodeBandwidthThrottleScheduleNotFound = "BandwidthThrottleScheduleNotFound" + // @enum ErrorCode + ErrorCodeBlocked = "Blocked" + // @enum ErrorCode + ErrorCodeCannotExportSnapshot = "CannotExportSnapshot" + // @enum ErrorCode + ErrorCodeChapCredentialNotFound = "ChapCredentialNotFound" + // @enum ErrorCode + ErrorCodeDiskAlreadyAllocated = "DiskAlreadyAllocated" + // @enum ErrorCode + ErrorCodeDiskDoesNotExist = "DiskDoesNotExist" + // @enum ErrorCode + ErrorCodeDiskSizeGreaterThanVolumeMaxSize = "DiskSizeGreaterThanVolumeMaxSize" + // @enum ErrorCode + ErrorCodeDiskSizeLessThanVolumeSize = "DiskSizeLessThanVolumeSize" + // @enum ErrorCode + ErrorCodeDiskSizeNotGigAligned = "DiskSizeNotGigAligned" + // @enum ErrorCode + ErrorCodeDuplicateCertificateInfo = "DuplicateCertificateInfo" + // @enum ErrorCode + ErrorCodeDuplicateSchedule = "DuplicateSchedule" + // @enum ErrorCode + ErrorCodeEndpointNotFound = "EndpointNotFound" + // @enum ErrorCode + ErrorCodeIamnotSupported = "IAMNotSupported" + // @enum ErrorCode + ErrorCodeInitiatorInvalid = "InitiatorInvalid" + // @enum ErrorCode + ErrorCodeInitiatorNotFound = "InitiatorNotFound" + // @enum ErrorCode + ErrorCodeInternalError = "InternalError" + // @enum ErrorCode + ErrorCodeInvalidGateway = "InvalidGateway" + // @enum ErrorCode + ErrorCodeInvalidEndpoint = "InvalidEndpoint" + // @enum ErrorCode + ErrorCodeInvalidParameters = "InvalidParameters" + // @enum ErrorCode + ErrorCodeInvalidSchedule = "InvalidSchedule" + // @enum ErrorCode + ErrorCodeLocalStorageLimitExceeded = "LocalStorageLimitExceeded" + // @enum ErrorCode + ErrorCodeLunAlreadyAllocated = "LunAlreadyAllocated " + // @enum ErrorCode + ErrorCodeLunInvalid = "LunInvalid" + // @enum ErrorCode + ErrorCodeMaximumContentLengthExceeded = "MaximumContentLengthExceeded" + // @enum ErrorCode + ErrorCodeMaximumTapeCartridgeCountExceeded = "MaximumTapeCartridgeCountExceeded" + // @enum ErrorCode + ErrorCodeMaximumVolumeCountExceeded = "MaximumVolumeCountExceeded" + // @enum ErrorCode + ErrorCodeNetworkConfigurationChanged = "NetworkConfigurationChanged" + // @enum ErrorCode + ErrorCodeNoDisksAvailable = "NoDisksAvailable" + // @enum ErrorCode + ErrorCodeNotImplemented = "NotImplemented" + // @enum ErrorCode + ErrorCodeNotSupported = "NotSupported" + // @enum ErrorCode + ErrorCodeOperationAborted = "OperationAborted" + // @enum ErrorCode + ErrorCodeOutdatedGateway = "OutdatedGateway" + // @enum ErrorCode + ErrorCodeParametersNotImplemented = "ParametersNotImplemented" + // @enum ErrorCode + ErrorCodeRegionInvalid = "RegionInvalid" + // @enum ErrorCode + ErrorCodeRequestTimeout = "RequestTimeout" + // @enum ErrorCode + ErrorCodeServiceUnavailable = "ServiceUnavailable" + // @enum ErrorCode + ErrorCodeSnapshotDeleted = "SnapshotDeleted" + // @enum ErrorCode + ErrorCodeSnapshotIdInvalid = "SnapshotIdInvalid" + // @enum ErrorCode + ErrorCodeSnapshotInProgress = "SnapshotInProgress" + // @enum ErrorCode + ErrorCodeSnapshotNotFound = "SnapshotNotFound" + // @enum ErrorCode + ErrorCodeSnapshotScheduleNotFound = "SnapshotScheduleNotFound" + // @enum ErrorCode + ErrorCodeStagingAreaFull = "StagingAreaFull" + // @enum ErrorCode + ErrorCodeStorageFailure = "StorageFailure" + // @enum ErrorCode + ErrorCodeTapeCartridgeNotFound = "TapeCartridgeNotFound" + // @enum ErrorCode + ErrorCodeTargetAlreadyExists = "TargetAlreadyExists" + // @enum ErrorCode + ErrorCodeTargetInvalid = "TargetInvalid" + // @enum ErrorCode + ErrorCodeTargetNotFound = "TargetNotFound" + // @enum ErrorCode + ErrorCodeUnauthorizedOperation = "UnauthorizedOperation" + // @enum ErrorCode + ErrorCodeVolumeAlreadyExists = "VolumeAlreadyExists" + // @enum ErrorCode + ErrorCodeVolumeIdInvalid = "VolumeIdInvalid" + // @enum ErrorCode + ErrorCodeVolumeInUse = "VolumeInUse" + // @enum ErrorCode + ErrorCodeVolumeNotFound = "VolumeNotFound" + // @enum ErrorCode + ErrorCodeVolumeNotReady = "VolumeNotReady" +) diff --git a/vendor/github.com/aws/aws-sdk-go/service/storagegateway/examples_test.go b/vendor/github.com/aws/aws-sdk-go/service/storagegateway/examples_test.go new file mode 100644 index 000000000..5f03167fa --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/storagegateway/examples_test.go @@ -0,0 +1,1184 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package storagegateway_test + +import ( + "bytes" + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/storagegateway" +) + +var _ time.Duration +var _ bytes.Buffer + +func ExampleStorageGateway_ActivateGateway() { + svc := storagegateway.New(session.New()) + + params := &storagegateway.ActivateGatewayInput{ + ActivationKey: aws.String("ActivationKey"), // Required + GatewayName: aws.String("GatewayName"), // Required + GatewayRegion: aws.String("RegionId"), // Required + GatewayTimezone: aws.String("GatewayTimezone"), // Required + GatewayType: aws.String("GatewayType"), + MediumChangerType: aws.String("MediumChangerType"), + TapeDriveType: aws.String("TapeDriveType"), + } + resp, err := svc.ActivateGateway(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleStorageGateway_AddCache() { + svc := storagegateway.New(session.New()) + + params := &storagegateway.AddCacheInput{ + DiskIds: []*string{ // Required + aws.String("DiskId"), // Required + // More values... + }, + GatewayARN: aws.String("GatewayARN"), // Required + } + resp, err := svc.AddCache(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleStorageGateway_AddTagsToResource() { + svc := storagegateway.New(session.New()) + + params := &storagegateway.AddTagsToResourceInput{ + ResourceARN: aws.String("ResourceARN"), // Required + Tags: []*storagegateway.Tag{ // Required + { // Required + Key: aws.String("TagKey"), // Required + Value: aws.String("TagValue"), // Required + }, + // More values... + }, + } + resp, err := svc.AddTagsToResource(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleStorageGateway_AddUploadBuffer() { + svc := storagegateway.New(session.New()) + + params := &storagegateway.AddUploadBufferInput{ + DiskIds: []*string{ // Required + aws.String("DiskId"), // Required + // More values... + }, + GatewayARN: aws.String("GatewayARN"), // Required + } + resp, err := svc.AddUploadBuffer(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleStorageGateway_AddWorkingStorage() { + svc := storagegateway.New(session.New()) + + params := &storagegateway.AddWorkingStorageInput{ + DiskIds: []*string{ // Required + aws.String("DiskId"), // Required + // More values... + }, + GatewayARN: aws.String("GatewayARN"), // Required + } + resp, err := svc.AddWorkingStorage(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleStorageGateway_CancelArchival() { + svc := storagegateway.New(session.New()) + + params := &storagegateway.CancelArchivalInput{ + GatewayARN: aws.String("GatewayARN"), // Required + TapeARN: aws.String("TapeARN"), // Required + } + resp, err := svc.CancelArchival(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleStorageGateway_CancelRetrieval() { + svc := storagegateway.New(session.New()) + + params := &storagegateway.CancelRetrievalInput{ + GatewayARN: aws.String("GatewayARN"), // Required + TapeARN: aws.String("TapeARN"), // Required + } + resp, err := svc.CancelRetrieval(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleStorageGateway_CreateCachediSCSIVolume() { + svc := storagegateway.New(session.New()) + + params := &storagegateway.CreateCachediSCSIVolumeInput{ + ClientToken: aws.String("ClientToken"), // Required + GatewayARN: aws.String("GatewayARN"), // Required + NetworkInterfaceId: aws.String("NetworkInterfaceId"), // Required + TargetName: aws.String("TargetName"), // Required + VolumeSizeInBytes: aws.Int64(1), // Required + SnapshotId: aws.String("SnapshotId"), + } + resp, err := svc.CreateCachediSCSIVolume(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleStorageGateway_CreateSnapshot() { + svc := storagegateway.New(session.New()) + + params := &storagegateway.CreateSnapshotInput{ + SnapshotDescription: aws.String("SnapshotDescription"), // Required + VolumeARN: aws.String("VolumeARN"), // Required + } + resp, err := svc.CreateSnapshot(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleStorageGateway_CreateSnapshotFromVolumeRecoveryPoint() { + svc := storagegateway.New(session.New()) + + params := &storagegateway.CreateSnapshotFromVolumeRecoveryPointInput{ + SnapshotDescription: aws.String("SnapshotDescription"), // Required + VolumeARN: aws.String("VolumeARN"), // Required + } + resp, err := svc.CreateSnapshotFromVolumeRecoveryPoint(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleStorageGateway_CreateStorediSCSIVolume() { + svc := storagegateway.New(session.New()) + + params := &storagegateway.CreateStorediSCSIVolumeInput{ + DiskId: aws.String("DiskId"), // Required + GatewayARN: aws.String("GatewayARN"), // Required + NetworkInterfaceId: aws.String("NetworkInterfaceId"), // Required + PreserveExistingData: aws.Bool(true), // Required + TargetName: aws.String("TargetName"), // Required + SnapshotId: aws.String("SnapshotId"), + } + resp, err := svc.CreateStorediSCSIVolume(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleStorageGateway_CreateTapeWithBarcode() { + svc := storagegateway.New(session.New()) + + params := &storagegateway.CreateTapeWithBarcodeInput{ + GatewayARN: aws.String("GatewayARN"), // Required + TapeBarcode: aws.String("TapeBarcode"), // Required + TapeSizeInBytes: aws.Int64(1), // Required + } + resp, err := svc.CreateTapeWithBarcode(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleStorageGateway_CreateTapes() { + svc := storagegateway.New(session.New()) + + params := &storagegateway.CreateTapesInput{ + ClientToken: aws.String("ClientToken"), // Required + GatewayARN: aws.String("GatewayARN"), // Required + NumTapesToCreate: aws.Int64(1), // Required + TapeBarcodePrefix: aws.String("TapeBarcodePrefix"), // Required + TapeSizeInBytes: aws.Int64(1), // Required + } + resp, err := svc.CreateTapes(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleStorageGateway_DeleteBandwidthRateLimit() { + svc := storagegateway.New(session.New()) + + params := &storagegateway.DeleteBandwidthRateLimitInput{ + BandwidthType: aws.String("BandwidthType"), // Required + GatewayARN: aws.String("GatewayARN"), // Required + } + resp, err := svc.DeleteBandwidthRateLimit(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleStorageGateway_DeleteChapCredentials() { + svc := storagegateway.New(session.New()) + + params := &storagegateway.DeleteChapCredentialsInput{ + InitiatorName: aws.String("IqnName"), // Required + TargetARN: aws.String("TargetARN"), // Required + } + resp, err := svc.DeleteChapCredentials(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleStorageGateway_DeleteGateway() { + svc := storagegateway.New(session.New()) + + params := &storagegateway.DeleteGatewayInput{ + GatewayARN: aws.String("GatewayARN"), // Required + } + resp, err := svc.DeleteGateway(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleStorageGateway_DeleteSnapshotSchedule() { + svc := storagegateway.New(session.New()) + + params := &storagegateway.DeleteSnapshotScheduleInput{ + VolumeARN: aws.String("VolumeARN"), // Required + } + resp, err := svc.DeleteSnapshotSchedule(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleStorageGateway_DeleteTape() { + svc := storagegateway.New(session.New()) + + params := &storagegateway.DeleteTapeInput{ + GatewayARN: aws.String("GatewayARN"), // Required + TapeARN: aws.String("TapeARN"), // Required + } + resp, err := svc.DeleteTape(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleStorageGateway_DeleteTapeArchive() { + svc := storagegateway.New(session.New()) + + params := &storagegateway.DeleteTapeArchiveInput{ + TapeARN: aws.String("TapeARN"), // Required + } + resp, err := svc.DeleteTapeArchive(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleStorageGateway_DeleteVolume() { + svc := storagegateway.New(session.New()) + + params := &storagegateway.DeleteVolumeInput{ + VolumeARN: aws.String("VolumeARN"), // Required + } + resp, err := svc.DeleteVolume(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleStorageGateway_DescribeBandwidthRateLimit() { + svc := storagegateway.New(session.New()) + + params := &storagegateway.DescribeBandwidthRateLimitInput{ + GatewayARN: aws.String("GatewayARN"), // Required + } + resp, err := svc.DescribeBandwidthRateLimit(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleStorageGateway_DescribeCache() { + svc := storagegateway.New(session.New()) + + params := &storagegateway.DescribeCacheInput{ + GatewayARN: aws.String("GatewayARN"), // Required + } + resp, err := svc.DescribeCache(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleStorageGateway_DescribeCachediSCSIVolumes() { + svc := storagegateway.New(session.New()) + + params := &storagegateway.DescribeCachediSCSIVolumesInput{ + VolumeARNs: []*string{ // Required + aws.String("VolumeARN"), // Required + // More values... + }, + } + resp, err := svc.DescribeCachediSCSIVolumes(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleStorageGateway_DescribeChapCredentials() { + svc := storagegateway.New(session.New()) + + params := &storagegateway.DescribeChapCredentialsInput{ + TargetARN: aws.String("TargetARN"), // Required + } + resp, err := svc.DescribeChapCredentials(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleStorageGateway_DescribeGatewayInformation() { + svc := storagegateway.New(session.New()) + + params := &storagegateway.DescribeGatewayInformationInput{ + GatewayARN: aws.String("GatewayARN"), // Required + } + resp, err := svc.DescribeGatewayInformation(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleStorageGateway_DescribeMaintenanceStartTime() { + svc := storagegateway.New(session.New()) + + params := &storagegateway.DescribeMaintenanceStartTimeInput{ + GatewayARN: aws.String("GatewayARN"), // Required + } + resp, err := svc.DescribeMaintenanceStartTime(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleStorageGateway_DescribeSnapshotSchedule() { + svc := storagegateway.New(session.New()) + + params := &storagegateway.DescribeSnapshotScheduleInput{ + VolumeARN: aws.String("VolumeARN"), // Required + } + resp, err := svc.DescribeSnapshotSchedule(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleStorageGateway_DescribeStorediSCSIVolumes() { + svc := storagegateway.New(session.New()) + + params := &storagegateway.DescribeStorediSCSIVolumesInput{ + VolumeARNs: []*string{ // Required + aws.String("VolumeARN"), // Required + // More values... + }, + } + resp, err := svc.DescribeStorediSCSIVolumes(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleStorageGateway_DescribeTapeArchives() { + svc := storagegateway.New(session.New()) + + params := &storagegateway.DescribeTapeArchivesInput{ + Limit: aws.Int64(1), + Marker: aws.String("Marker"), + TapeARNs: []*string{ + aws.String("TapeARN"), // Required + // More values... + }, + } + resp, err := svc.DescribeTapeArchives(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleStorageGateway_DescribeTapeRecoveryPoints() { + svc := storagegateway.New(session.New()) + + params := &storagegateway.DescribeTapeRecoveryPointsInput{ + GatewayARN: aws.String("GatewayARN"), // Required + Limit: aws.Int64(1), + Marker: aws.String("Marker"), + } + resp, err := svc.DescribeTapeRecoveryPoints(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleStorageGateway_DescribeTapes() { + svc := storagegateway.New(session.New()) + + params := &storagegateway.DescribeTapesInput{ + GatewayARN: aws.String("GatewayARN"), // Required + Limit: aws.Int64(1), + Marker: aws.String("Marker"), + TapeARNs: []*string{ + aws.String("TapeARN"), // Required + // More values... + }, + } + resp, err := svc.DescribeTapes(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleStorageGateway_DescribeUploadBuffer() { + svc := storagegateway.New(session.New()) + + params := &storagegateway.DescribeUploadBufferInput{ + GatewayARN: aws.String("GatewayARN"), // Required + } + resp, err := svc.DescribeUploadBuffer(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleStorageGateway_DescribeVTLDevices() { + svc := storagegateway.New(session.New()) + + params := &storagegateway.DescribeVTLDevicesInput{ + GatewayARN: aws.String("GatewayARN"), // Required + Limit: aws.Int64(1), + Marker: aws.String("Marker"), + VTLDeviceARNs: []*string{ + aws.String("VTLDeviceARN"), // Required + // More values... + }, + } + resp, err := svc.DescribeVTLDevices(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleStorageGateway_DescribeWorkingStorage() { + svc := storagegateway.New(session.New()) + + params := &storagegateway.DescribeWorkingStorageInput{ + GatewayARN: aws.String("GatewayARN"), // Required + } + resp, err := svc.DescribeWorkingStorage(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleStorageGateway_DisableGateway() { + svc := storagegateway.New(session.New()) + + params := &storagegateway.DisableGatewayInput{ + GatewayARN: aws.String("GatewayARN"), // Required + } + resp, err := svc.DisableGateway(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleStorageGateway_ListGateways() { + svc := storagegateway.New(session.New()) + + params := &storagegateway.ListGatewaysInput{ + Limit: aws.Int64(1), + Marker: aws.String("Marker"), + } + resp, err := svc.ListGateways(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleStorageGateway_ListLocalDisks() { + svc := storagegateway.New(session.New()) + + params := &storagegateway.ListLocalDisksInput{ + GatewayARN: aws.String("GatewayARN"), // Required + } + resp, err := svc.ListLocalDisks(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleStorageGateway_ListTagsForResource() { + svc := storagegateway.New(session.New()) + + params := &storagegateway.ListTagsForResourceInput{ + ResourceARN: aws.String("ResourceARN"), // Required + Limit: aws.Int64(1), + Marker: aws.String("Marker"), + } + resp, err := svc.ListTagsForResource(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleStorageGateway_ListTapes() { + svc := storagegateway.New(session.New()) + + params := &storagegateway.ListTapesInput{ + Limit: aws.Int64(1), + Marker: aws.String("Marker"), + TapeARNs: []*string{ + aws.String("TapeARN"), // Required + // More values... + }, + } + resp, err := svc.ListTapes(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleStorageGateway_ListVolumeInitiators() { + svc := storagegateway.New(session.New()) + + params := &storagegateway.ListVolumeInitiatorsInput{ + VolumeARN: aws.String("VolumeARN"), // Required + } + resp, err := svc.ListVolumeInitiators(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleStorageGateway_ListVolumeRecoveryPoints() { + svc := storagegateway.New(session.New()) + + params := &storagegateway.ListVolumeRecoveryPointsInput{ + GatewayARN: aws.String("GatewayARN"), // Required + } + resp, err := svc.ListVolumeRecoveryPoints(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleStorageGateway_ListVolumes() { + svc := storagegateway.New(session.New()) + + params := &storagegateway.ListVolumesInput{ + GatewayARN: aws.String("GatewayARN"), + Limit: aws.Int64(1), + Marker: aws.String("Marker"), + } + resp, err := svc.ListVolumes(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleStorageGateway_RemoveTagsFromResource() { + svc := storagegateway.New(session.New()) + + params := &storagegateway.RemoveTagsFromResourceInput{ + ResourceARN: aws.String("ResourceARN"), // Required + TagKeys: []*string{ // Required + aws.String("TagKey"), // Required + // More values... + }, + } + resp, err := svc.RemoveTagsFromResource(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleStorageGateway_ResetCache() { + svc := storagegateway.New(session.New()) + + params := &storagegateway.ResetCacheInput{ + GatewayARN: aws.String("GatewayARN"), // Required + } + resp, err := svc.ResetCache(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleStorageGateway_RetrieveTapeArchive() { + svc := storagegateway.New(session.New()) + + params := &storagegateway.RetrieveTapeArchiveInput{ + GatewayARN: aws.String("GatewayARN"), // Required + TapeARN: aws.String("TapeARN"), // Required + } + resp, err := svc.RetrieveTapeArchive(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleStorageGateway_RetrieveTapeRecoveryPoint() { + svc := storagegateway.New(session.New()) + + params := &storagegateway.RetrieveTapeRecoveryPointInput{ + GatewayARN: aws.String("GatewayARN"), // Required + TapeARN: aws.String("TapeARN"), // Required + } + resp, err := svc.RetrieveTapeRecoveryPoint(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleStorageGateway_SetLocalConsolePassword() { + svc := storagegateway.New(session.New()) + + params := &storagegateway.SetLocalConsolePasswordInput{ + GatewayARN: aws.String("GatewayARN"), // Required + LocalConsolePassword: aws.String("LocalConsolePassword"), // Required + } + resp, err := svc.SetLocalConsolePassword(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleStorageGateway_ShutdownGateway() { + svc := storagegateway.New(session.New()) + + params := &storagegateway.ShutdownGatewayInput{ + GatewayARN: aws.String("GatewayARN"), // Required + } + resp, err := svc.ShutdownGateway(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleStorageGateway_StartGateway() { + svc := storagegateway.New(session.New()) + + params := &storagegateway.StartGatewayInput{ + GatewayARN: aws.String("GatewayARN"), // Required + } + resp, err := svc.StartGateway(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleStorageGateway_UpdateBandwidthRateLimit() { + svc := storagegateway.New(session.New()) + + params := &storagegateway.UpdateBandwidthRateLimitInput{ + GatewayARN: aws.String("GatewayARN"), // Required + AverageDownloadRateLimitInBitsPerSec: aws.Int64(1), + AverageUploadRateLimitInBitsPerSec: aws.Int64(1), + } + resp, err := svc.UpdateBandwidthRateLimit(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleStorageGateway_UpdateChapCredentials() { + svc := storagegateway.New(session.New()) + + params := &storagegateway.UpdateChapCredentialsInput{ + InitiatorName: aws.String("IqnName"), // Required + SecretToAuthenticateInitiator: aws.String("ChapSecret"), // Required + TargetARN: aws.String("TargetARN"), // Required + SecretToAuthenticateTarget: aws.String("ChapSecret"), + } + resp, err := svc.UpdateChapCredentials(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleStorageGateway_UpdateGatewayInformation() { + svc := storagegateway.New(session.New()) + + params := &storagegateway.UpdateGatewayInformationInput{ + GatewayARN: aws.String("GatewayARN"), // Required + GatewayName: aws.String("GatewayName"), + GatewayTimezone: aws.String("GatewayTimezone"), + } + resp, err := svc.UpdateGatewayInformation(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleStorageGateway_UpdateGatewaySoftwareNow() { + svc := storagegateway.New(session.New()) + + params := &storagegateway.UpdateGatewaySoftwareNowInput{ + GatewayARN: aws.String("GatewayARN"), // Required + } + resp, err := svc.UpdateGatewaySoftwareNow(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleStorageGateway_UpdateMaintenanceStartTime() { + svc := storagegateway.New(session.New()) + + params := &storagegateway.UpdateMaintenanceStartTimeInput{ + DayOfWeek: aws.Int64(1), // Required + GatewayARN: aws.String("GatewayARN"), // Required + HourOfDay: aws.Int64(1), // Required + MinuteOfHour: aws.Int64(1), // Required + } + resp, err := svc.UpdateMaintenanceStartTime(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleStorageGateway_UpdateSnapshotSchedule() { + svc := storagegateway.New(session.New()) + + params := &storagegateway.UpdateSnapshotScheduleInput{ + RecurrenceInHours: aws.Int64(1), // Required + StartAt: aws.Int64(1), // Required + VolumeARN: aws.String("VolumeARN"), // Required + Description: aws.String("Description"), + } + resp, err := svc.UpdateSnapshotSchedule(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleStorageGateway_UpdateVTLDeviceType() { + svc := storagegateway.New(session.New()) + + params := &storagegateway.UpdateVTLDeviceTypeInput{ + DeviceType: aws.String("DeviceType"), // Required + VTLDeviceARN: aws.String("VTLDeviceARN"), // Required + } + resp, err := svc.UpdateVTLDeviceType(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/storagegateway/service.go b/vendor/github.com/aws/aws-sdk-go/service/storagegateway/service.go new file mode 100644 index 000000000..aefbd4945 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/storagegateway/service.go @@ -0,0 +1,139 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package storagegateway + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" +) + +// AWS Storage Gateway is the service that connects an on-premises software +// appliance with cloud-based storage to provide seamless and secure integration +// between an organization's on-premises IT environment and AWS's storage infrastructure. +// The service enables you to securely upload data to the AWS cloud for cost +// effective backup and rapid disaster recovery. +// +// Use the following links to get started using the AWS Storage Gateway Service +// API Reference: +// +// AWS Storage Gateway Required Request Headers (http://docs.aws.amazon.com/storagegateway/latest/userguide/AWSStorageGatewayHTTPRequestsHeaders.html): +// Describes the required headers that you must send with every POST request +// to AWS Storage Gateway. +// +// Signing Requests (http://docs.aws.amazon.com/storagegateway/latest/userguide/AWSStorageGatewaySigningRequests.html): +// AWS Storage Gateway requires that you authenticate every request you send; +// this topic describes how sign such a request. +// +// Error Responses (http://docs.aws.amazon.com/storagegateway/latest/userguide/APIErrorResponses.html): +// Provides reference information about AWS Storage Gateway errors. +// +// Operations in AWS Storage Gateway (http://docs.aws.amazon.com/storagegateway/latest/userguide/AWSStorageGatewayAPIOperations.html): +// Contains detailed descriptions of all AWS Storage Gateway operations, their +// request parameters, response elements, possible errors, and examples of requests +// and responses. +// +// AWS Storage Gateway Regions and Endpoints (http://docs.aws.amazon.com/general/latest/gr/index.html?rande.html): +// Provides a list of each of the s and endpoints available for use with AWS +// Storage Gateway. +// +// AWS Storage Gateway resource IDs are in uppercase. When you use these +// resource IDs with the Amazon EC2 API, EC2 expects resource IDs in lowercase. +// You must change your resource ID to lowercase to use it with the EC2 API. +// For example, in Storage Gateway the ID for a volume might be vol-1122AABB. +// When you use this ID with the EC2 API, you must change it to vol-1122aabb. +// Otherwise, the EC2 API might not behave as expected. +// +// IDs for Storage Gateway volumes and Amazon EBS snapshots created from +// gateway volumes are changing to a longer format. Starting in December 2016, +// all new volumes and snapshots will be created with a 17-character string. +// Starting in April 2016, you will be able to use these longer IDs so you can +// test your systems with the new format. For more information, see Longer EC2 +// and EBS Resource IDs (https://aws.amazon.com/ec2/faqs/#longer-ids). +// +// For example, a volume ARN with the longer volume ID format will look like +// this: +// +// arn:aws:storagegateway:us-west-2:111122223333:gateway/sgw-12A3456B/volume/vol-1122AABBCCDDEEFFG. +// +// A snapshot ID with the longer ID format will look like this: snap-78e226633445566ee. +// +// For more information, see Announcement: Heads-up – Longer AWS Storage Gateway +// volume and snapshot IDs coming in 2016 (https://forums.aws.amazon.com/ann.jspa?annID=3557). +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type StorageGateway struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "storagegateway" + +// New creates a new instance of the StorageGateway client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a StorageGateway client from just a session. +// svc := storagegateway.New(mySession) +// +// // Create a StorageGateway client with additional configuration +// svc := storagegateway.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *StorageGateway { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *StorageGateway { + svc := &StorageGateway{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2013-06-30", + JSONVersion: "1.1", + TargetPrefix: "StorageGateway_20130630", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(jsonrpc.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a StorageGateway operation and runs any +// custom request initialization. +func (c *StorageGateway) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/storagegateway/storagegatewayiface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/storagegateway/storagegatewayiface/interface.go new file mode 100644 index 000000000..c9b2680ac --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/storagegateway/storagegatewayiface/interface.go @@ -0,0 +1,250 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package storagegatewayiface provides an interface for the AWS Storage Gateway. +package storagegatewayiface + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/storagegateway" +) + +// StorageGatewayAPI is the interface type for storagegateway.StorageGateway. +type StorageGatewayAPI interface { + ActivateGatewayRequest(*storagegateway.ActivateGatewayInput) (*request.Request, *storagegateway.ActivateGatewayOutput) + + ActivateGateway(*storagegateway.ActivateGatewayInput) (*storagegateway.ActivateGatewayOutput, error) + + AddCacheRequest(*storagegateway.AddCacheInput) (*request.Request, *storagegateway.AddCacheOutput) + + AddCache(*storagegateway.AddCacheInput) (*storagegateway.AddCacheOutput, error) + + AddTagsToResourceRequest(*storagegateway.AddTagsToResourceInput) (*request.Request, *storagegateway.AddTagsToResourceOutput) + + AddTagsToResource(*storagegateway.AddTagsToResourceInput) (*storagegateway.AddTagsToResourceOutput, error) + + AddUploadBufferRequest(*storagegateway.AddUploadBufferInput) (*request.Request, *storagegateway.AddUploadBufferOutput) + + AddUploadBuffer(*storagegateway.AddUploadBufferInput) (*storagegateway.AddUploadBufferOutput, error) + + AddWorkingStorageRequest(*storagegateway.AddWorkingStorageInput) (*request.Request, *storagegateway.AddWorkingStorageOutput) + + AddWorkingStorage(*storagegateway.AddWorkingStorageInput) (*storagegateway.AddWorkingStorageOutput, error) + + CancelArchivalRequest(*storagegateway.CancelArchivalInput) (*request.Request, *storagegateway.CancelArchivalOutput) + + CancelArchival(*storagegateway.CancelArchivalInput) (*storagegateway.CancelArchivalOutput, error) + + CancelRetrievalRequest(*storagegateway.CancelRetrievalInput) (*request.Request, *storagegateway.CancelRetrievalOutput) + + CancelRetrieval(*storagegateway.CancelRetrievalInput) (*storagegateway.CancelRetrievalOutput, error) + + CreateCachediSCSIVolumeRequest(*storagegateway.CreateCachediSCSIVolumeInput) (*request.Request, *storagegateway.CreateCachediSCSIVolumeOutput) + + CreateCachediSCSIVolume(*storagegateway.CreateCachediSCSIVolumeInput) (*storagegateway.CreateCachediSCSIVolumeOutput, error) + + CreateSnapshotRequest(*storagegateway.CreateSnapshotInput) (*request.Request, *storagegateway.CreateSnapshotOutput) + + CreateSnapshot(*storagegateway.CreateSnapshotInput) (*storagegateway.CreateSnapshotOutput, error) + + CreateSnapshotFromVolumeRecoveryPointRequest(*storagegateway.CreateSnapshotFromVolumeRecoveryPointInput) (*request.Request, *storagegateway.CreateSnapshotFromVolumeRecoveryPointOutput) + + CreateSnapshotFromVolumeRecoveryPoint(*storagegateway.CreateSnapshotFromVolumeRecoveryPointInput) (*storagegateway.CreateSnapshotFromVolumeRecoveryPointOutput, error) + + CreateStorediSCSIVolumeRequest(*storagegateway.CreateStorediSCSIVolumeInput) (*request.Request, *storagegateway.CreateStorediSCSIVolumeOutput) + + CreateStorediSCSIVolume(*storagegateway.CreateStorediSCSIVolumeInput) (*storagegateway.CreateStorediSCSIVolumeOutput, error) + + CreateTapeWithBarcodeRequest(*storagegateway.CreateTapeWithBarcodeInput) (*request.Request, *storagegateway.CreateTapeWithBarcodeOutput) + + CreateTapeWithBarcode(*storagegateway.CreateTapeWithBarcodeInput) (*storagegateway.CreateTapeWithBarcodeOutput, error) + + CreateTapesRequest(*storagegateway.CreateTapesInput) (*request.Request, *storagegateway.CreateTapesOutput) + + CreateTapes(*storagegateway.CreateTapesInput) (*storagegateway.CreateTapesOutput, error) + + DeleteBandwidthRateLimitRequest(*storagegateway.DeleteBandwidthRateLimitInput) (*request.Request, *storagegateway.DeleteBandwidthRateLimitOutput) + + DeleteBandwidthRateLimit(*storagegateway.DeleteBandwidthRateLimitInput) (*storagegateway.DeleteBandwidthRateLimitOutput, error) + + DeleteChapCredentialsRequest(*storagegateway.DeleteChapCredentialsInput) (*request.Request, *storagegateway.DeleteChapCredentialsOutput) + + DeleteChapCredentials(*storagegateway.DeleteChapCredentialsInput) (*storagegateway.DeleteChapCredentialsOutput, error) + + DeleteGatewayRequest(*storagegateway.DeleteGatewayInput) (*request.Request, *storagegateway.DeleteGatewayOutput) + + DeleteGateway(*storagegateway.DeleteGatewayInput) (*storagegateway.DeleteGatewayOutput, error) + + DeleteSnapshotScheduleRequest(*storagegateway.DeleteSnapshotScheduleInput) (*request.Request, *storagegateway.DeleteSnapshotScheduleOutput) + + DeleteSnapshotSchedule(*storagegateway.DeleteSnapshotScheduleInput) (*storagegateway.DeleteSnapshotScheduleOutput, error) + + DeleteTapeRequest(*storagegateway.DeleteTapeInput) (*request.Request, *storagegateway.DeleteTapeOutput) + + DeleteTape(*storagegateway.DeleteTapeInput) (*storagegateway.DeleteTapeOutput, error) + + DeleteTapeArchiveRequest(*storagegateway.DeleteTapeArchiveInput) (*request.Request, *storagegateway.DeleteTapeArchiveOutput) + + DeleteTapeArchive(*storagegateway.DeleteTapeArchiveInput) (*storagegateway.DeleteTapeArchiveOutput, error) + + DeleteVolumeRequest(*storagegateway.DeleteVolumeInput) (*request.Request, *storagegateway.DeleteVolumeOutput) + + DeleteVolume(*storagegateway.DeleteVolumeInput) (*storagegateway.DeleteVolumeOutput, error) + + DescribeBandwidthRateLimitRequest(*storagegateway.DescribeBandwidthRateLimitInput) (*request.Request, *storagegateway.DescribeBandwidthRateLimitOutput) + + DescribeBandwidthRateLimit(*storagegateway.DescribeBandwidthRateLimitInput) (*storagegateway.DescribeBandwidthRateLimitOutput, error) + + DescribeCacheRequest(*storagegateway.DescribeCacheInput) (*request.Request, *storagegateway.DescribeCacheOutput) + + DescribeCache(*storagegateway.DescribeCacheInput) (*storagegateway.DescribeCacheOutput, error) + + DescribeCachediSCSIVolumesRequest(*storagegateway.DescribeCachediSCSIVolumesInput) (*request.Request, *storagegateway.DescribeCachediSCSIVolumesOutput) + + DescribeCachediSCSIVolumes(*storagegateway.DescribeCachediSCSIVolumesInput) (*storagegateway.DescribeCachediSCSIVolumesOutput, error) + + DescribeChapCredentialsRequest(*storagegateway.DescribeChapCredentialsInput) (*request.Request, *storagegateway.DescribeChapCredentialsOutput) + + DescribeChapCredentials(*storagegateway.DescribeChapCredentialsInput) (*storagegateway.DescribeChapCredentialsOutput, error) + + DescribeGatewayInformationRequest(*storagegateway.DescribeGatewayInformationInput) (*request.Request, *storagegateway.DescribeGatewayInformationOutput) + + DescribeGatewayInformation(*storagegateway.DescribeGatewayInformationInput) (*storagegateway.DescribeGatewayInformationOutput, error) + + DescribeMaintenanceStartTimeRequest(*storagegateway.DescribeMaintenanceStartTimeInput) (*request.Request, *storagegateway.DescribeMaintenanceStartTimeOutput) + + DescribeMaintenanceStartTime(*storagegateway.DescribeMaintenanceStartTimeInput) (*storagegateway.DescribeMaintenanceStartTimeOutput, error) + + DescribeSnapshotScheduleRequest(*storagegateway.DescribeSnapshotScheduleInput) (*request.Request, *storagegateway.DescribeSnapshotScheduleOutput) + + DescribeSnapshotSchedule(*storagegateway.DescribeSnapshotScheduleInput) (*storagegateway.DescribeSnapshotScheduleOutput, error) + + DescribeStorediSCSIVolumesRequest(*storagegateway.DescribeStorediSCSIVolumesInput) (*request.Request, *storagegateway.DescribeStorediSCSIVolumesOutput) + + DescribeStorediSCSIVolumes(*storagegateway.DescribeStorediSCSIVolumesInput) (*storagegateway.DescribeStorediSCSIVolumesOutput, error) + + DescribeTapeArchivesRequest(*storagegateway.DescribeTapeArchivesInput) (*request.Request, *storagegateway.DescribeTapeArchivesOutput) + + DescribeTapeArchives(*storagegateway.DescribeTapeArchivesInput) (*storagegateway.DescribeTapeArchivesOutput, error) + + DescribeTapeArchivesPages(*storagegateway.DescribeTapeArchivesInput, func(*storagegateway.DescribeTapeArchivesOutput, bool) bool) error + + DescribeTapeRecoveryPointsRequest(*storagegateway.DescribeTapeRecoveryPointsInput) (*request.Request, *storagegateway.DescribeTapeRecoveryPointsOutput) + + DescribeTapeRecoveryPoints(*storagegateway.DescribeTapeRecoveryPointsInput) (*storagegateway.DescribeTapeRecoveryPointsOutput, error) + + DescribeTapeRecoveryPointsPages(*storagegateway.DescribeTapeRecoveryPointsInput, func(*storagegateway.DescribeTapeRecoveryPointsOutput, bool) bool) error + + DescribeTapesRequest(*storagegateway.DescribeTapesInput) (*request.Request, *storagegateway.DescribeTapesOutput) + + DescribeTapes(*storagegateway.DescribeTapesInput) (*storagegateway.DescribeTapesOutput, error) + + DescribeTapesPages(*storagegateway.DescribeTapesInput, func(*storagegateway.DescribeTapesOutput, bool) bool) error + + DescribeUploadBufferRequest(*storagegateway.DescribeUploadBufferInput) (*request.Request, *storagegateway.DescribeUploadBufferOutput) + + DescribeUploadBuffer(*storagegateway.DescribeUploadBufferInput) (*storagegateway.DescribeUploadBufferOutput, error) + + DescribeVTLDevicesRequest(*storagegateway.DescribeVTLDevicesInput) (*request.Request, *storagegateway.DescribeVTLDevicesOutput) + + DescribeVTLDevices(*storagegateway.DescribeVTLDevicesInput) (*storagegateway.DescribeVTLDevicesOutput, error) + + DescribeVTLDevicesPages(*storagegateway.DescribeVTLDevicesInput, func(*storagegateway.DescribeVTLDevicesOutput, bool) bool) error + + DescribeWorkingStorageRequest(*storagegateway.DescribeWorkingStorageInput) (*request.Request, *storagegateway.DescribeWorkingStorageOutput) + + DescribeWorkingStorage(*storagegateway.DescribeWorkingStorageInput) (*storagegateway.DescribeWorkingStorageOutput, error) + + DisableGatewayRequest(*storagegateway.DisableGatewayInput) (*request.Request, *storagegateway.DisableGatewayOutput) + + DisableGateway(*storagegateway.DisableGatewayInput) (*storagegateway.DisableGatewayOutput, error) + + ListGatewaysRequest(*storagegateway.ListGatewaysInput) (*request.Request, *storagegateway.ListGatewaysOutput) + + ListGateways(*storagegateway.ListGatewaysInput) (*storagegateway.ListGatewaysOutput, error) + + ListGatewaysPages(*storagegateway.ListGatewaysInput, func(*storagegateway.ListGatewaysOutput, bool) bool) error + + ListLocalDisksRequest(*storagegateway.ListLocalDisksInput) (*request.Request, *storagegateway.ListLocalDisksOutput) + + ListLocalDisks(*storagegateway.ListLocalDisksInput) (*storagegateway.ListLocalDisksOutput, error) + + ListTagsForResourceRequest(*storagegateway.ListTagsForResourceInput) (*request.Request, *storagegateway.ListTagsForResourceOutput) + + ListTagsForResource(*storagegateway.ListTagsForResourceInput) (*storagegateway.ListTagsForResourceOutput, error) + + ListTapesRequest(*storagegateway.ListTapesInput) (*request.Request, *storagegateway.ListTapesOutput) + + ListTapes(*storagegateway.ListTapesInput) (*storagegateway.ListTapesOutput, error) + + ListVolumeInitiatorsRequest(*storagegateway.ListVolumeInitiatorsInput) (*request.Request, *storagegateway.ListVolumeInitiatorsOutput) + + ListVolumeInitiators(*storagegateway.ListVolumeInitiatorsInput) (*storagegateway.ListVolumeInitiatorsOutput, error) + + ListVolumeRecoveryPointsRequest(*storagegateway.ListVolumeRecoveryPointsInput) (*request.Request, *storagegateway.ListVolumeRecoveryPointsOutput) + + ListVolumeRecoveryPoints(*storagegateway.ListVolumeRecoveryPointsInput) (*storagegateway.ListVolumeRecoveryPointsOutput, error) + + ListVolumesRequest(*storagegateway.ListVolumesInput) (*request.Request, *storagegateway.ListVolumesOutput) + + ListVolumes(*storagegateway.ListVolumesInput) (*storagegateway.ListVolumesOutput, error) + + ListVolumesPages(*storagegateway.ListVolumesInput, func(*storagegateway.ListVolumesOutput, bool) bool) error + + RemoveTagsFromResourceRequest(*storagegateway.RemoveTagsFromResourceInput) (*request.Request, *storagegateway.RemoveTagsFromResourceOutput) + + RemoveTagsFromResource(*storagegateway.RemoveTagsFromResourceInput) (*storagegateway.RemoveTagsFromResourceOutput, error) + + ResetCacheRequest(*storagegateway.ResetCacheInput) (*request.Request, *storagegateway.ResetCacheOutput) + + ResetCache(*storagegateway.ResetCacheInput) (*storagegateway.ResetCacheOutput, error) + + RetrieveTapeArchiveRequest(*storagegateway.RetrieveTapeArchiveInput) (*request.Request, *storagegateway.RetrieveTapeArchiveOutput) + + RetrieveTapeArchive(*storagegateway.RetrieveTapeArchiveInput) (*storagegateway.RetrieveTapeArchiveOutput, error) + + RetrieveTapeRecoveryPointRequest(*storagegateway.RetrieveTapeRecoveryPointInput) (*request.Request, *storagegateway.RetrieveTapeRecoveryPointOutput) + + RetrieveTapeRecoveryPoint(*storagegateway.RetrieveTapeRecoveryPointInput) (*storagegateway.RetrieveTapeRecoveryPointOutput, error) + + SetLocalConsolePasswordRequest(*storagegateway.SetLocalConsolePasswordInput) (*request.Request, *storagegateway.SetLocalConsolePasswordOutput) + + SetLocalConsolePassword(*storagegateway.SetLocalConsolePasswordInput) (*storagegateway.SetLocalConsolePasswordOutput, error) + + ShutdownGatewayRequest(*storagegateway.ShutdownGatewayInput) (*request.Request, *storagegateway.ShutdownGatewayOutput) + + ShutdownGateway(*storagegateway.ShutdownGatewayInput) (*storagegateway.ShutdownGatewayOutput, error) + + StartGatewayRequest(*storagegateway.StartGatewayInput) (*request.Request, *storagegateway.StartGatewayOutput) + + StartGateway(*storagegateway.StartGatewayInput) (*storagegateway.StartGatewayOutput, error) + + UpdateBandwidthRateLimitRequest(*storagegateway.UpdateBandwidthRateLimitInput) (*request.Request, *storagegateway.UpdateBandwidthRateLimitOutput) + + UpdateBandwidthRateLimit(*storagegateway.UpdateBandwidthRateLimitInput) (*storagegateway.UpdateBandwidthRateLimitOutput, error) + + UpdateChapCredentialsRequest(*storagegateway.UpdateChapCredentialsInput) (*request.Request, *storagegateway.UpdateChapCredentialsOutput) + + UpdateChapCredentials(*storagegateway.UpdateChapCredentialsInput) (*storagegateway.UpdateChapCredentialsOutput, error) + + UpdateGatewayInformationRequest(*storagegateway.UpdateGatewayInformationInput) (*request.Request, *storagegateway.UpdateGatewayInformationOutput) + + UpdateGatewayInformation(*storagegateway.UpdateGatewayInformationInput) (*storagegateway.UpdateGatewayInformationOutput, error) + + UpdateGatewaySoftwareNowRequest(*storagegateway.UpdateGatewaySoftwareNowInput) (*request.Request, *storagegateway.UpdateGatewaySoftwareNowOutput) + + UpdateGatewaySoftwareNow(*storagegateway.UpdateGatewaySoftwareNowInput) (*storagegateway.UpdateGatewaySoftwareNowOutput, error) + + UpdateMaintenanceStartTimeRequest(*storagegateway.UpdateMaintenanceStartTimeInput) (*request.Request, *storagegateway.UpdateMaintenanceStartTimeOutput) + + UpdateMaintenanceStartTime(*storagegateway.UpdateMaintenanceStartTimeInput) (*storagegateway.UpdateMaintenanceStartTimeOutput, error) + + UpdateSnapshotScheduleRequest(*storagegateway.UpdateSnapshotScheduleInput) (*request.Request, *storagegateway.UpdateSnapshotScheduleOutput) + + UpdateSnapshotSchedule(*storagegateway.UpdateSnapshotScheduleInput) (*storagegateway.UpdateSnapshotScheduleOutput, error) + + UpdateVTLDeviceTypeRequest(*storagegateway.UpdateVTLDeviceTypeInput) (*request.Request, *storagegateway.UpdateVTLDeviceTypeOutput) + + UpdateVTLDeviceType(*storagegateway.UpdateVTLDeviceTypeInput) (*storagegateway.UpdateVTLDeviceTypeOutput, error) +} + +var _ StorageGatewayAPI = (*storagegateway.StorageGateway)(nil) diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/api.go b/vendor/github.com/aws/aws-sdk-go/service/sts/api.go new file mode 100644 index 000000000..5e4078ea8 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/api.go @@ -0,0 +1,1625 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package sts provides a client for AWS Security Token Service. +package sts + +import ( + "time" + + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" +) + +const opAssumeRole = "AssumeRole" + +// AssumeRoleRequest generates a "aws/request.Request" representing the +// client's request for the AssumeRole operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the AssumeRole method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the AssumeRoleRequest method. +// req, resp := client.AssumeRoleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, output *AssumeRoleOutput) { + op := &request.Operation{ + Name: opAssumeRole, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AssumeRoleInput{} + } + + req = c.newRequest(op, input, output) + output = &AssumeRoleOutput{} + req.Data = output + return +} + +// Returns a set of temporary security credentials (consisting of an access +// key ID, a secret access key, and a security token) that you can use to access +// AWS resources that you might not normally have access to. Typically, you +// use AssumeRole for cross-account access or federation. For a comparison of +// AssumeRole with the other APIs that produce temporary credentials, see Requesting +// Temporary Security Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) +// and Comparing the AWS STS APIs (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) +// in the IAM User Guide. +// +// Important: You cannot call AssumeRole by using AWS root account credentials; +// access is denied. You must use credentials for an IAM user or an IAM role +// to call AssumeRole. +// +// For cross-account access, imagine that you own multiple accounts and need +// to access resources in each account. You could create long-term credentials +// in each account to access those resources. However, managing all those credentials +// and remembering which one can access which account can be time consuming. +// Instead, you can create one set of long-term credentials in one account and +// then use temporary security credentials to access all the other accounts +// by assuming roles in those accounts. For more information about roles, see +// IAM Roles (Delegation and Federation) (http://docs.aws.amazon.com/IAM/latest/UserGuide/roles-toplevel.html) +// in the IAM User Guide. +// +// For federation, you can, for example, grant single sign-on access to the +// AWS Management Console. If you already have an identity and authentication +// system in your corporate network, you don't have to recreate user identities +// in AWS in order to grant those user identities access to AWS. Instead, after +// a user has been authenticated, you call AssumeRole (and specify the role +// with the appropriate permissions) to get temporary security credentials for +// that user. With those temporary security credentials, you construct a sign-in +// URL that users can use to access the console. For more information, see Common +// Scenarios for Temporary Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html#sts-introduction) +// in the IAM User Guide. +// +// The temporary security credentials are valid for the duration that you specified +// when calling AssumeRole, which can be from 900 seconds (15 minutes) to a +// maximum of 3600 seconds (1 hour). The default is 1 hour. +// +// The temporary security credentials created by AssumeRole can be used to +// make API calls to any AWS service with the following exception: you cannot +// call the STS service's GetFederationToken or GetSessionToken APIs. +// +// Optionally, you can pass an IAM access policy to this operation. If you +// choose not to pass a policy, the temporary security credentials that are +// returned by the operation have the permissions that are defined in the access +// policy of the role that is being assumed. If you pass a policy to this operation, +// the temporary security credentials that are returned by the operation have +// the permissions that are allowed by both the access policy of the role that +// is being assumed, and the policy that you pass. This gives you a way to +// further restrict the permissions for the resulting temporary security credentials. +// You cannot use the passed policy to grant permissions that are in excess +// of those allowed by the access policy of the role that is being assumed. +// For more information, see Permissions for AssumeRole, AssumeRoleWithSAML, +// and AssumeRoleWithWebIdentity (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html) +// in the IAM User Guide. +// +// To assume a role, your AWS account must be trusted by the role. The trust +// relationship is defined in the role's trust policy when the role is created. +// That trust policy states which accounts are allowed to delegate access to +// this account's role. +// +// The user who wants to access the role must also have permissions delegated +// from the role's administrator. If the user is in a different account than +// the role, then the user's administrator must attach a policy that allows +// the user to call AssumeRole on the ARN of the role in the other account. +// If the user is in the same account as the role, then you can either attach +// a policy to the user (identical to the previous different account user), +// or you can add the user as a principal directly in the role's trust policy +// +// Using MFA with AssumeRole +// +// You can optionally include multi-factor authentication (MFA) information +// when you call AssumeRole. This is useful for cross-account scenarios in which +// you want to make sure that the user who is assuming the role has been authenticated +// using an AWS MFA device. In that scenario, the trust policy of the role being +// assumed includes a condition that tests for MFA authentication; if the caller +// does not include valid MFA information, the request to assume the role is +// denied. The condition in a trust policy that tests for MFA authentication +// might look like the following example. +// +// "Condition": {"Bool": {"aws:MultiFactorAuthPresent": true}} +// +// For more information, see Configuring MFA-Protected API Access (http://docs.aws.amazon.com/IAM/latest/UserGuide/MFAProtectedAPI.html) +// in the IAM User Guide guide. +// +// To use MFA with AssumeRole, you pass values for the SerialNumber and TokenCode +// parameters. The SerialNumber value identifies the user's hardware or virtual +// MFA device. The TokenCode is the time-based one-time password (TOTP) that +// the MFA devices produces. +func (c *STS) AssumeRole(input *AssumeRoleInput) (*AssumeRoleOutput, error) { + req, out := c.AssumeRoleRequest(input) + err := req.Send() + return out, err +} + +const opAssumeRoleWithSAML = "AssumeRoleWithSAML" + +// AssumeRoleWithSAMLRequest generates a "aws/request.Request" representing the +// client's request for the AssumeRoleWithSAML operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the AssumeRoleWithSAML method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the AssumeRoleWithSAMLRequest method. +// req, resp := client.AssumeRoleWithSAMLRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *request.Request, output *AssumeRoleWithSAMLOutput) { + op := &request.Operation{ + Name: opAssumeRoleWithSAML, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AssumeRoleWithSAMLInput{} + } + + req = c.newRequest(op, input, output) + output = &AssumeRoleWithSAMLOutput{} + req.Data = output + return +} + +// Returns a set of temporary security credentials for users who have been authenticated +// via a SAML authentication response. This operation provides a mechanism for +// tying an enterprise identity store or directory to role-based AWS access +// without user-specific credentials or configuration. For a comparison of AssumeRoleWithSAML +// with the other APIs that produce temporary credentials, see Requesting Temporary +// Security Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) +// and Comparing the AWS STS APIs (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) +// in the IAM User Guide. +// +// The temporary security credentials returned by this operation consist of +// an access key ID, a secret access key, and a security token. Applications +// can use these temporary security credentials to sign calls to AWS services. +// +// The temporary security credentials are valid for the duration that you specified +// when calling AssumeRole, or until the time specified in the SAML authentication +// response's SessionNotOnOrAfter value, whichever is shorter. The duration +// can be from 900 seconds (15 minutes) to a maximum of 3600 seconds (1 hour). +// The default is 1 hour. +// +// The temporary security credentials created by AssumeRoleWithSAML can be +// used to make API calls to any AWS service with the following exception: you +// cannot call the STS service's GetFederationToken or GetSessionToken APIs. +// +// Optionally, you can pass an IAM access policy to this operation. If you +// choose not to pass a policy, the temporary security credentials that are +// returned by the operation have the permissions that are defined in the access +// policy of the role that is being assumed. If you pass a policy to this operation, +// the temporary security credentials that are returned by the operation have +// the permissions that are allowed by both the access policy of the role that +// is being assumed, and the policy that you pass. This gives you a way to +// further restrict the permissions for the resulting temporary security credentials. +// You cannot use the passed policy to grant permissions that are in excess +// of those allowed by the access policy of the role that is being assumed. +// For more information, see Permissions for AssumeRole, AssumeRoleWithSAML, +// and AssumeRoleWithWebIdentity (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html) +// in the IAM User Guide. +// +// Before your application can call AssumeRoleWithSAML, you must configure +// your SAML identity provider (IdP) to issue the claims required by AWS. Additionally, +// you must use AWS Identity and Access Management (IAM) to create a SAML provider +// entity in your AWS account that represents your identity provider, and create +// an IAM role that specifies this SAML provider in its trust policy. +// +// Calling AssumeRoleWithSAML does not require the use of AWS security credentials. +// The identity of the caller is validated by using keys in the metadata document +// that is uploaded for the SAML provider entity for your identity provider. +// +// Calling AssumeRoleWithSAML can result in an entry in your AWS CloudTrail +// logs. The entry includes the value in the NameID element of the SAML assertion. +// We recommend that you use a NameIDType that is not associated with any personally +// identifiable information (PII). For example, you could instead use the Persistent +// Identifier (urn:oasis:names:tc:SAML:2.0:nameid-format:persistent). +// +// For more information, see the following resources: +// +// About SAML 2.0-based Federation (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_saml.html) +// in the IAM User Guide. +// +// Creating SAML Identity Providers (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml.html) +// in the IAM User Guide. +// +// Configuring a Relying Party and Claims (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml_relying-party.html) +// in the IAM User Guide. +// +// Creating a Role for SAML 2.0 Federation (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-idp_saml.html) +// in the IAM User Guide. +func (c *STS) AssumeRoleWithSAML(input *AssumeRoleWithSAMLInput) (*AssumeRoleWithSAMLOutput, error) { + req, out := c.AssumeRoleWithSAMLRequest(input) + err := req.Send() + return out, err +} + +const opAssumeRoleWithWebIdentity = "AssumeRoleWithWebIdentity" + +// AssumeRoleWithWebIdentityRequest generates a "aws/request.Request" representing the +// client's request for the AssumeRoleWithWebIdentity operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the AssumeRoleWithWebIdentity method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the AssumeRoleWithWebIdentityRequest method. +// req, resp := client.AssumeRoleWithWebIdentityRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityInput) (req *request.Request, output *AssumeRoleWithWebIdentityOutput) { + op := &request.Operation{ + Name: opAssumeRoleWithWebIdentity, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AssumeRoleWithWebIdentityInput{} + } + + req = c.newRequest(op, input, output) + output = &AssumeRoleWithWebIdentityOutput{} + req.Data = output + return +} + +// Returns a set of temporary security credentials for users who have been authenticated +// in a mobile or web application with a web identity provider, such as Amazon +// Cognito, Login with Amazon, Facebook, Google, or any OpenID Connect-compatible +// identity provider. +// +// For mobile applications, we recommend that you use Amazon Cognito. You +// can use Amazon Cognito with the AWS SDK for iOS (http://aws.amazon.com/sdkforios/) +// and the AWS SDK for Android (http://aws.amazon.com/sdkforandroid/) to uniquely +// identify a user and supply the user with a consistent identity throughout +// the lifetime of an application. +// +// To learn more about Amazon Cognito, see Amazon Cognito Overview (http://docs.aws.amazon.com/mobile/sdkforandroid/developerguide/cognito-auth.html#d0e840) +// in the AWS SDK for Android Developer Guide guide and Amazon Cognito Overview +// (http://docs.aws.amazon.com/mobile/sdkforios/developerguide/cognito-auth.html#d0e664) +// in the AWS SDK for iOS Developer Guide. +// +// Calling AssumeRoleWithWebIdentity does not require the use of AWS security +// credentials. Therefore, you can distribute an application (for example, on +// mobile devices) that requests temporary security credentials without including +// long-term AWS credentials in the application, and without deploying server-based +// proxy services that use long-term AWS credentials. Instead, the identity +// of the caller is validated by using a token from the web identity provider. +// For a comparison of AssumeRoleWithWebIdentity with the other APIs that produce +// temporary credentials, see Requesting Temporary Security Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) +// and Comparing the AWS STS APIs (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) +// in the IAM User Guide. +// +// The temporary security credentials returned by this API consist of an access +// key ID, a secret access key, and a security token. Applications can use these +// temporary security credentials to sign calls to AWS service APIs. +// +// The credentials are valid for the duration that you specified when calling +// AssumeRoleWithWebIdentity, which can be from 900 seconds (15 minutes) to +// a maximum of 3600 seconds (1 hour). The default is 1 hour. +// +// The temporary security credentials created by AssumeRoleWithWebIdentity +// can be used to make API calls to any AWS service with the following exception: +// you cannot call the STS service's GetFederationToken or GetSessionToken APIs. +// +// Optionally, you can pass an IAM access policy to this operation. If you +// choose not to pass a policy, the temporary security credentials that are +// returned by the operation have the permissions that are defined in the access +// policy of the role that is being assumed. If you pass a policy to this operation, +// the temporary security credentials that are returned by the operation have +// the permissions that are allowed by both the access policy of the role that +// is being assumed, and the policy that you pass. This gives you a way to +// further restrict the permissions for the resulting temporary security credentials. +// You cannot use the passed policy to grant permissions that are in excess +// of those allowed by the access policy of the role that is being assumed. +// For more information, see Permissions for AssumeRole, AssumeRoleWithSAML, +// and AssumeRoleWithWebIdentity (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html) +// in the IAM User Guide. +// +// Before your application can call AssumeRoleWithWebIdentity, you must have +// an identity token from a supported identity provider and create a role that +// the application can assume. The role that your application assumes must trust +// the identity provider that is associated with the identity token. In other +// words, the identity provider must be specified in the role's trust policy. +// +// Calling AssumeRoleWithWebIdentity can result in an entry in your AWS CloudTrail +// logs. The entry includes the Subject (http://openid.net/specs/openid-connect-core-1_0.html#Claims) +// of the provided Web Identity Token. We recommend that you avoid using any +// personally identifiable information (PII) in this field. For example, you +// could instead use a GUID or a pairwise identifier, as suggested in the OIDC +// specification (http://openid.net/specs/openid-connect-core-1_0.html#SubjectIDTypes). +// +// For more information about how to use web identity federation and the AssumeRoleWithWebIdentity +// API, see the following resources: +// +// Using Web Identity Federation APIs for Mobile Apps (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_oidc_manual) +// and Federation Through a Web-based Identity Provider (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity). +// +// Web Identity Federation Playground (https://web-identity-federation-playground.s3.amazonaws.com/index.html). +// This interactive website lets you walk through the process of authenticating +// via Login with Amazon, Facebook, or Google, getting temporary security credentials, +// and then using those credentials to make a request to AWS. +// +// AWS SDK for iOS (http://aws.amazon.com/sdkforios/) and AWS SDK for Android +// (http://aws.amazon.com/sdkforandroid/). These toolkits contain sample apps +// that show how to invoke the identity providers, and then how to use the information +// from these providers to get and use temporary security credentials. +// +// Web Identity Federation with Mobile Applications (http://aws.amazon.com/articles/4617974389850313). +// This article discusses web identity federation and shows an example of how +// to use web identity federation to get access to content in Amazon S3. +func (c *STS) AssumeRoleWithWebIdentity(input *AssumeRoleWithWebIdentityInput) (*AssumeRoleWithWebIdentityOutput, error) { + req, out := c.AssumeRoleWithWebIdentityRequest(input) + err := req.Send() + return out, err +} + +const opDecodeAuthorizationMessage = "DecodeAuthorizationMessage" + +// DecodeAuthorizationMessageRequest generates a "aws/request.Request" representing the +// client's request for the DecodeAuthorizationMessage operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DecodeAuthorizationMessage method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DecodeAuthorizationMessageRequest method. +// req, resp := client.DecodeAuthorizationMessageRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *STS) DecodeAuthorizationMessageRequest(input *DecodeAuthorizationMessageInput) (req *request.Request, output *DecodeAuthorizationMessageOutput) { + op := &request.Operation{ + Name: opDecodeAuthorizationMessage, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DecodeAuthorizationMessageInput{} + } + + req = c.newRequest(op, input, output) + output = &DecodeAuthorizationMessageOutput{} + req.Data = output + return +} + +// Decodes additional information about the authorization status of a request +// from an encoded message returned in response to an AWS request. +// +// For example, if a user is not authorized to perform an action that he or +// she has requested, the request returns a Client.UnauthorizedOperation response +// (an HTTP 403 response). Some AWS actions additionally return an encoded message +// that can provide details about this authorization failure. +// +// Only certain AWS actions return an encoded authorization message. The documentation +// for an individual action indicates whether that action returns an encoded +// message in addition to returning an HTTP code. +// +// The message is encoded because the details of the authorization status +// can constitute privileged information that the user who requested the action +// should not see. To decode an authorization status message, a user must be +// granted permissions via an IAM policy to request the DecodeAuthorizationMessage +// (sts:DecodeAuthorizationMessage) action. +// +// The decoded message includes the following type of information: +// +// Whether the request was denied due to an explicit deny or due to the absence +// of an explicit allow. For more information, see Determining Whether a Request +// is Allowed or Denied (http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_evaluation-logic.html#policy-eval-denyallow) +// in the IAM User Guide. +// +// The principal who made the request. +// +// The requested action. +// +// The requested resource. +// +// The values of condition keys in the context of the user's request. +func (c *STS) DecodeAuthorizationMessage(input *DecodeAuthorizationMessageInput) (*DecodeAuthorizationMessageOutput, error) { + req, out := c.DecodeAuthorizationMessageRequest(input) + err := req.Send() + return out, err +} + +const opGetCallerIdentity = "GetCallerIdentity" + +// GetCallerIdentityRequest generates a "aws/request.Request" representing the +// client's request for the GetCallerIdentity operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetCallerIdentity method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetCallerIdentityRequest method. +// req, resp := client.GetCallerIdentityRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *STS) GetCallerIdentityRequest(input *GetCallerIdentityInput) (req *request.Request, output *GetCallerIdentityOutput) { + op := &request.Operation{ + Name: opGetCallerIdentity, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetCallerIdentityInput{} + } + + req = c.newRequest(op, input, output) + output = &GetCallerIdentityOutput{} + req.Data = output + return +} + +// Returns details about the IAM identity whose credentials are used to call +// the API. +func (c *STS) GetCallerIdentity(input *GetCallerIdentityInput) (*GetCallerIdentityOutput, error) { + req, out := c.GetCallerIdentityRequest(input) + err := req.Send() + return out, err +} + +const opGetFederationToken = "GetFederationToken" + +// GetFederationTokenRequest generates a "aws/request.Request" representing the +// client's request for the GetFederationToken operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetFederationToken method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetFederationTokenRequest method. +// req, resp := client.GetFederationTokenRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *STS) GetFederationTokenRequest(input *GetFederationTokenInput) (req *request.Request, output *GetFederationTokenOutput) { + op := &request.Operation{ + Name: opGetFederationToken, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetFederationTokenInput{} + } + + req = c.newRequest(op, input, output) + output = &GetFederationTokenOutput{} + req.Data = output + return +} + +// Returns a set of temporary security credentials (consisting of an access +// key ID, a secret access key, and a security token) for a federated user. +// A typical use is in a proxy application that gets temporary security credentials +// on behalf of distributed applications inside a corporate network. Because +// you must call the GetFederationToken action using the long-term security +// credentials of an IAM user, this call is appropriate in contexts where those +// credentials can be safely stored, usually in a server-based application. +// For a comparison of GetFederationToken with the other APIs that produce temporary +// credentials, see Requesting Temporary Security Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) +// and Comparing the AWS STS APIs (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) +// in the IAM User Guide. +// +// If you are creating a mobile-based or browser-based app that can authenticate +// users using a web identity provider like Login with Amazon, Facebook, Google, +// or an OpenID Connect-compatible identity provider, we recommend that you +// use Amazon Cognito (http://aws.amazon.com/cognito/) or AssumeRoleWithWebIdentity. +// For more information, see Federation Through a Web-based Identity Provider +// (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity). +// +// The GetFederationToken action must be called by using the long-term AWS +// security credentials of an IAM user. You can also call GetFederationToken +// using the security credentials of an AWS root account, but we do not recommended +// it. Instead, we recommend that you create an IAM user for the purpose of +// the proxy application and then attach a policy to the IAM user that limits +// federated users to only the actions and resources that they need access to. +// For more information, see IAM Best Practices (http://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html) +// in the IAM User Guide. +// +// The temporary security credentials that are obtained by using the long-term +// credentials of an IAM user are valid for the specified duration, from 900 +// seconds (15 minutes) up to a maximium of 129600 seconds (36 hours). The default +// is 43200 seconds (12 hours). Temporary credentials that are obtained by using +// AWS root account credentials have a maximum duration of 3600 seconds (1 hour). +// +// The temporary security credentials created by GetFederationToken can be +// used to make API calls to any AWS service with the following exceptions: +// +// You cannot use these credentials to call any IAM APIs. +// +// You cannot call any STS APIs. +// +// Permissions +// +// The permissions for the temporary security credentials returned by GetFederationToken +// are determined by a combination of the following: +// +// The policy or policies that are attached to the IAM user whose credentials +// are used to call GetFederationToken. +// +// The policy that is passed as a parameter in the call. +// +// The passed policy is attached to the temporary security credentials that +// result from the GetFederationToken API call--that is, to the federated user. +// When the federated user makes an AWS request, AWS evaluates the policy attached +// to the federated user in combination with the policy or policies attached +// to the IAM user whose credentials were used to call GetFederationToken. AWS +// allows the federated user's request only when both the federated user and +// the IAM user are explicitly allowed to perform the requested action. The +// passed policy cannot grant more permissions than those that are defined in +// the IAM user policy. +// +// A typical use case is that the permissions of the IAM user whose credentials +// are used to call GetFederationToken are designed to allow access to all the +// actions and resources that any federated user will need. Then, for individual +// users, you pass a policy to the operation that scopes down the permissions +// to a level that's appropriate to that individual user, using a policy that +// allows only a subset of permissions that are granted to the IAM user. +// +// If you do not pass a policy, the resulting temporary security credentials +// have no effective permissions. The only exception is when the temporary security +// credentials are used to access a resource that has a resource-based policy +// that specifically allows the federated user to access the resource. +// +// For more information about how permissions work, see Permissions for GetFederationToken +// (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_getfederationtoken.html). +// For information about using GetFederationToken to create temporary security +// credentials, see GetFederationToken—Federation Through a Custom Identity +// Broker (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getfederationtoken). +func (c *STS) GetFederationToken(input *GetFederationTokenInput) (*GetFederationTokenOutput, error) { + req, out := c.GetFederationTokenRequest(input) + err := req.Send() + return out, err +} + +const opGetSessionToken = "GetSessionToken" + +// GetSessionTokenRequest generates a "aws/request.Request" representing the +// client's request for the GetSessionToken operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetSessionToken method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetSessionTokenRequest method. +// req, resp := client.GetSessionTokenRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *STS) GetSessionTokenRequest(input *GetSessionTokenInput) (req *request.Request, output *GetSessionTokenOutput) { + op := &request.Operation{ + Name: opGetSessionToken, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetSessionTokenInput{} + } + + req = c.newRequest(op, input, output) + output = &GetSessionTokenOutput{} + req.Data = output + return +} + +// Returns a set of temporary credentials for an AWS account or IAM user. The +// credentials consist of an access key ID, a secret access key, and a security +// token. Typically, you use GetSessionToken if you want to use MFA to protect +// programmatic calls to specific AWS APIs like Amazon EC2 StopInstances. MFA-enabled +// IAM users would need to call GetSessionToken and submit an MFA code that +// is associated with their MFA device. Using the temporary security credentials +// that are returned from the call, IAM users can then make programmatic calls +// to APIs that require MFA authentication. If you do not supply a correct MFA +// code, then the API returns an access denied error. For a comparison of GetSessionToken +// with the other APIs that produce temporary credentials, see Requesting Temporary +// Security Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) +// and Comparing the AWS STS APIs (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) +// in the IAM User Guide. +// +// The GetSessionToken action must be called by using the long-term AWS security +// credentials of the AWS account or an IAM user. Credentials that are created +// by IAM users are valid for the duration that you specify, from 900 seconds +// (15 minutes) up to a maximum of 129600 seconds (36 hours), with a default +// of 43200 seconds (12 hours); credentials that are created by using account +// credentials can range from 900 seconds (15 minutes) up to a maximum of 3600 +// seconds (1 hour), with a default of 1 hour. +// +// The temporary security credentials created by GetSessionToken can be used +// to make API calls to any AWS service with the following exceptions: +// +// You cannot call any IAM APIs unless MFA authentication information is +// included in the request. +// +// You cannot call any STS API except AssumeRole. +// +// We recommend that you do not call GetSessionToken with root account credentials. +// Instead, follow our best practices (http://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#create-iam-users) +// by creating one or more IAM users, giving them the necessary permissions, +// and using IAM users for everyday interaction with AWS. +// +// The permissions associated with the temporary security credentials returned +// by GetSessionToken are based on the permissions associated with account or +// IAM user whose credentials are used to call the action. If GetSessionToken +// is called using root account credentials, the temporary credentials have +// root account permissions. Similarly, if GetSessionToken is called using the +// credentials of an IAM user, the temporary credentials have the same permissions +// as the IAM user. +// +// For more information about using GetSessionToken to create temporary credentials, +// go to Temporary Credentials for Users in Untrusted Environments (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getsessiontoken) +// in the IAM User Guide. +func (c *STS) GetSessionToken(input *GetSessionTokenInput) (*GetSessionTokenOutput, error) { + req, out := c.GetSessionTokenRequest(input) + err := req.Send() + return out, err +} + +type AssumeRoleInput struct { + _ struct{} `type:"structure"` + + // The duration, in seconds, of the role session. The value can range from 900 + // seconds (15 minutes) to 3600 seconds (1 hour). By default, the value is set + // to 3600 seconds. + DurationSeconds *int64 `min:"900" type:"integer"` + + // A unique identifier that is used by third parties when assuming roles in + // their customers' accounts. For each role that the third party can assume, + // they should instruct their customers to ensure the role's trust policy checks + // for the external ID that the third party generated. Each time the third party + // assumes the role, they should pass the customer's external ID. The external + // ID is useful in order to help third parties bind a role to the customer who + // created it. For more information about the external ID, see How to Use an + // External ID When Granting Access to Your AWS Resources to a Third Party (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-user_externalid.html) + // in the IAM User Guide. + // + // The format for this parameter, as described by its regex pattern, is a string + // of characters consisting of upper- and lower-case alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@:\/- + ExternalId *string `min:"2" type:"string"` + + // An IAM policy in JSON format. + // + // This parameter is optional. If you pass a policy, the temporary security + // credentials that are returned by the operation have the permissions that + // are allowed by both (the intersection of) the access policy of the role that + // is being assumed, and the policy that you pass. This gives you a way to further + // restrict the permissions for the resulting temporary security credentials. + // You cannot use the passed policy to grant permissions that are in excess + // of those allowed by the access policy of the role that is being assumed. + // For more information, see Permissions for AssumeRole, AssumeRoleWithSAML, + // and AssumeRoleWithWebIdentity (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html) + // in the IAM User Guide. + // + // The format for this parameter, as described by its regex pattern, is a string + // of characters up to 2048 characters in length. The characters can be any + // ASCII character from the space character to the end of the valid character + // list (\u0020-\u00FF). It can also include the tab (\u0009), linefeed (\u000A), + // and carriage return (\u000D) characters. + // + // The policy plain text must be 2048 bytes or shorter. However, an internal + // conversion compresses it into a packed binary format with a separate limit. + // The PackedPolicySize response element indicates by percentage how close to + // the upper size limit the policy is, with 100% equaling the maximum allowed + // size. + Policy *string `min:"1" type:"string"` + + // The Amazon Resource Name (ARN) of the role to assume. + RoleArn *string `min:"20" type:"string" required:"true"` + + // An identifier for the assumed role session. + // + // Use the role session name to uniquely identify a session when the same role + // is assumed by different principals or for different reasons. In cross-account + // scenarios, the role session name is visible to, and can be logged by the + // account that owns the role. The role session name is also used in the ARN + // of the assumed role principal. This means that subsequent cross-account API + // requests using the temporary security credentials will expose the role session + // name to the external account in their CloudTrail logs. + // + // The format for this parameter, as described by its regex pattern, is a string + // of characters consisting of upper- and lower-case alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- + RoleSessionName *string `min:"2" type:"string" required:"true"` + + // The identification number of the MFA device that is associated with the user + // who is making the AssumeRole call. Specify this value if the trust policy + // of the role being assumed includes a condition that requires MFA authentication. + // The value is either the serial number for a hardware device (such as GAHT12345678) + // or an Amazon Resource Name (ARN) for a virtual device (such as arn:aws:iam::123456789012:mfa/user). + // + // The format for this parameter, as described by its regex pattern, is a string + // of characters consisting of upper- and lower-case alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- + SerialNumber *string `min:"9" type:"string"` + + // The value provided by the MFA device, if the trust policy of the role being + // assumed requires MFA (that is, if the policy includes a condition that tests + // for MFA). If the role being assumed requires MFA and if the TokenCode value + // is missing or expired, the AssumeRole call returns an "access denied" error. + // + // The format for this parameter, as described by its regex pattern, is a sequence + // of six numeric digits. + TokenCode *string `min:"6" type:"string"` +} + +// String returns the string representation +func (s AssumeRoleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssumeRoleInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AssumeRoleInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AssumeRoleInput"} + if s.DurationSeconds != nil && *s.DurationSeconds < 900 { + invalidParams.Add(request.NewErrParamMinValue("DurationSeconds", 900)) + } + if s.ExternalId != nil && len(*s.ExternalId) < 2 { + invalidParams.Add(request.NewErrParamMinLen("ExternalId", 2)) + } + if s.Policy != nil && len(*s.Policy) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Policy", 1)) + } + if s.RoleArn == nil { + invalidParams.Add(request.NewErrParamRequired("RoleArn")) + } + if s.RoleArn != nil && len(*s.RoleArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("RoleArn", 20)) + } + if s.RoleSessionName == nil { + invalidParams.Add(request.NewErrParamRequired("RoleSessionName")) + } + if s.RoleSessionName != nil && len(*s.RoleSessionName) < 2 { + invalidParams.Add(request.NewErrParamMinLen("RoleSessionName", 2)) + } + if s.SerialNumber != nil && len(*s.SerialNumber) < 9 { + invalidParams.Add(request.NewErrParamMinLen("SerialNumber", 9)) + } + if s.TokenCode != nil && len(*s.TokenCode) < 6 { + invalidParams.Add(request.NewErrParamMinLen("TokenCode", 6)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the response to a successful AssumeRole request, including temporary +// AWS credentials that can be used to make AWS requests. +type AssumeRoleOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) and the assumed role ID, which are identifiers + // that you can use to refer to the resulting temporary security credentials. + // For example, you can reference these credentials as a principal in a resource-based + // policy by using the ARN or assumed role ID. The ARN and ID include the RoleSessionName + // that you specified when you called AssumeRole. + AssumedRoleUser *AssumedRoleUser `type:"structure"` + + // The temporary security credentials, which include an access key ID, a secret + // access key, and a security (or session) token. + // + // Note: The size of the security token that STS APIs return is not fixed. + // We strongly recommend that you make no assumptions about the maximum size. + // As of this writing, the typical size is less than 4096 bytes, but that can + // vary. Also, future updates to AWS might require larger sizes. + Credentials *Credentials `type:"structure"` + + // A percentage value that indicates the size of the policy in packed form. + // The service rejects any policy with a packed size greater than 100 percent, + // which means the policy exceeded the allowed space. + PackedPolicySize *int64 `type:"integer"` +} + +// String returns the string representation +func (s AssumeRoleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssumeRoleOutput) GoString() string { + return s.String() +} + +type AssumeRoleWithSAMLInput struct { + _ struct{} `type:"structure"` + + // The duration, in seconds, of the role session. The value can range from 900 + // seconds (15 minutes) to 3600 seconds (1 hour). By default, the value is set + // to 3600 seconds. An expiration can also be specified in the SAML authentication + // response's SessionNotOnOrAfter value. The actual expiration time is whichever + // value is shorter. + // + // The maximum duration for a session is 1 hour, and the minimum duration + // is 15 minutes, even if values outside this range are specified. + DurationSeconds *int64 `min:"900" type:"integer"` + + // An IAM policy in JSON format. + // + // The policy parameter is optional. If you pass a policy, the temporary security + // credentials that are returned by the operation have the permissions that + // are allowed by both the access policy of the role that is being assumed, + // and the policy that you pass. This gives you a way to further restrict + // the permissions for the resulting temporary security credentials. You cannot + // use the passed policy to grant permissions that are in excess of those allowed + // by the access policy of the role that is being assumed. For more information, + // Permissions for AssumeRole, AssumeRoleWithSAML, and AssumeRoleWithWebIdentity + // (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html) + // in the IAM User Guide. + // + // The format for this parameter, as described by its regex pattern, is a string + // of characters up to 2048 characters in length. The characters can be any + // ASCII character from the space character to the end of the valid character + // list (\u0020-\u00FF). It can also include the tab (\u0009), linefeed (\u000A), + // and carriage return (\u000D) characters. + // + // The policy plain text must be 2048 bytes or shorter. However, an internal + // conversion compresses it into a packed binary format with a separate limit. + // The PackedPolicySize response element indicates by percentage how close to + // the upper size limit the policy is, with 100% equaling the maximum allowed + // size. + Policy *string `min:"1" type:"string"` + + // The Amazon Resource Name (ARN) of the SAML provider in IAM that describes + // the IdP. + PrincipalArn *string `min:"20" type:"string" required:"true"` + + // The Amazon Resource Name (ARN) of the role that the caller is assuming. + RoleArn *string `min:"20" type:"string" required:"true"` + + // The base-64 encoded SAML authentication response provided by the IdP. + // + // For more information, see Configuring a Relying Party and Adding Claims + // (http://docs.aws.amazon.com/IAM/latest/UserGuide/create-role-saml-IdP-tasks.html) + // in the Using IAM guide. + SAMLAssertion *string `min:"4" type:"string" required:"true"` +} + +// String returns the string representation +func (s AssumeRoleWithSAMLInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssumeRoleWithSAMLInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AssumeRoleWithSAMLInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AssumeRoleWithSAMLInput"} + if s.DurationSeconds != nil && *s.DurationSeconds < 900 { + invalidParams.Add(request.NewErrParamMinValue("DurationSeconds", 900)) + } + if s.Policy != nil && len(*s.Policy) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Policy", 1)) + } + if s.PrincipalArn == nil { + invalidParams.Add(request.NewErrParamRequired("PrincipalArn")) + } + if s.PrincipalArn != nil && len(*s.PrincipalArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("PrincipalArn", 20)) + } + if s.RoleArn == nil { + invalidParams.Add(request.NewErrParamRequired("RoleArn")) + } + if s.RoleArn != nil && len(*s.RoleArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("RoleArn", 20)) + } + if s.SAMLAssertion == nil { + invalidParams.Add(request.NewErrParamRequired("SAMLAssertion")) + } + if s.SAMLAssertion != nil && len(*s.SAMLAssertion) < 4 { + invalidParams.Add(request.NewErrParamMinLen("SAMLAssertion", 4)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the response to a successful AssumeRoleWithSAML request, including +// temporary AWS credentials that can be used to make AWS requests. +type AssumeRoleWithSAMLOutput struct { + _ struct{} `type:"structure"` + + // The identifiers for the temporary security credentials that the operation + // returns. + AssumedRoleUser *AssumedRoleUser `type:"structure"` + + // The value of the Recipient attribute of the SubjectConfirmationData element + // of the SAML assertion. + Audience *string `type:"string"` + + // The temporary security credentials, which include an access key ID, a secret + // access key, and a security (or session) token. + // + // Note: The size of the security token that STS APIs return is not fixed. + // We strongly recommend that you make no assumptions about the maximum size. + // As of this writing, the typical size is less than 4096 bytes, but that can + // vary. Also, future updates to AWS might require larger sizes. + Credentials *Credentials `type:"structure"` + + // The value of the Issuer element of the SAML assertion. + Issuer *string `type:"string"` + + // A hash value based on the concatenation of the Issuer response value, the + // AWS account ID, and the friendly name (the last part of the ARN) of the SAML + // provider in IAM. The combination of NameQualifier and Subject can be used + // to uniquely identify a federated user. + // + // The following pseudocode shows how the hash value is calculated: + // + // BASE64 ( SHA1 ( "https://example.com/saml" + "123456789012" + "/MySAMLIdP" + // ) ) + NameQualifier *string `type:"string"` + + // A percentage value that indicates the size of the policy in packed form. + // The service rejects any policy with a packed size greater than 100 percent, + // which means the policy exceeded the allowed space. + PackedPolicySize *int64 `type:"integer"` + + // The value of the NameID element in the Subject element of the SAML assertion. + Subject *string `type:"string"` + + // The format of the name ID, as defined by the Format attribute in the NameID + // element of the SAML assertion. Typical examples of the format are transient + // or persistent. + // + // If the format includes the prefix urn:oasis:names:tc:SAML:2.0:nameid-format, + // that prefix is removed. For example, urn:oasis:names:tc:SAML:2.0:nameid-format:transient + // is returned as transient. If the format includes any other prefix, the format + // is returned with no modifications. + SubjectType *string `type:"string"` +} + +// String returns the string representation +func (s AssumeRoleWithSAMLOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssumeRoleWithSAMLOutput) GoString() string { + return s.String() +} + +type AssumeRoleWithWebIdentityInput struct { + _ struct{} `type:"structure"` + + // The duration, in seconds, of the role session. The value can range from 900 + // seconds (15 minutes) to 3600 seconds (1 hour). By default, the value is set + // to 3600 seconds. + DurationSeconds *int64 `min:"900" type:"integer"` + + // An IAM policy in JSON format. + // + // The policy parameter is optional. If you pass a policy, the temporary security + // credentials that are returned by the operation have the permissions that + // are allowed by both the access policy of the role that is being assumed, + // and the policy that you pass. This gives you a way to further restrict + // the permissions for the resulting temporary security credentials. You cannot + // use the passed policy to grant permissions that are in excess of those allowed + // by the access policy of the role that is being assumed. For more information, + // see Permissions for AssumeRoleWithWebIdentity (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html) + // in the IAM User Guide. + // + // The format for this parameter, as described by its regex pattern, is a string + // of characters up to 2048 characters in length. The characters can be any + // ASCII character from the space character to the end of the valid character + // list (\u0020-\u00FF). It can also include the tab (\u0009), linefeed (\u000A), + // and carriage return (\u000D) characters. + // + // The policy plain text must be 2048 bytes or shorter. However, an internal + // conversion compresses it into a packed binary format with a separate limit. + // The PackedPolicySize response element indicates by percentage how close to + // the upper size limit the policy is, with 100% equaling the maximum allowed + // size. + Policy *string `min:"1" type:"string"` + + // The fully qualified host component of the domain name of the identity provider. + // + // Specify this value only for OAuth 2.0 access tokens. Currently www.amazon.com + // and graph.facebook.com are the only supported identity providers for OAuth + // 2.0 access tokens. Do not include URL schemes and port numbers. + // + // Do not specify this value for OpenID Connect ID tokens. + ProviderId *string `min:"4" type:"string"` + + // The Amazon Resource Name (ARN) of the role that the caller is assuming. + RoleArn *string `min:"20" type:"string" required:"true"` + + // An identifier for the assumed role session. Typically, you pass the name + // or identifier that is associated with the user who is using your application. + // That way, the temporary security credentials that your application will use + // are associated with that user. This session name is included as part of the + // ARN and assumed role ID in the AssumedRoleUser response element. + // + // The format for this parameter, as described by its regex pattern, is a string + // of characters consisting of upper- and lower-case alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- + RoleSessionName *string `min:"2" type:"string" required:"true"` + + // The OAuth 2.0 access token or OpenID Connect ID token that is provided by + // the identity provider. Your application must get this token by authenticating + // the user who is using your application with a web identity provider before + // the application makes an AssumeRoleWithWebIdentity call. + WebIdentityToken *string `min:"4" type:"string" required:"true"` +} + +// String returns the string representation +func (s AssumeRoleWithWebIdentityInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssumeRoleWithWebIdentityInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AssumeRoleWithWebIdentityInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AssumeRoleWithWebIdentityInput"} + if s.DurationSeconds != nil && *s.DurationSeconds < 900 { + invalidParams.Add(request.NewErrParamMinValue("DurationSeconds", 900)) + } + if s.Policy != nil && len(*s.Policy) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Policy", 1)) + } + if s.ProviderId != nil && len(*s.ProviderId) < 4 { + invalidParams.Add(request.NewErrParamMinLen("ProviderId", 4)) + } + if s.RoleArn == nil { + invalidParams.Add(request.NewErrParamRequired("RoleArn")) + } + if s.RoleArn != nil && len(*s.RoleArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("RoleArn", 20)) + } + if s.RoleSessionName == nil { + invalidParams.Add(request.NewErrParamRequired("RoleSessionName")) + } + if s.RoleSessionName != nil && len(*s.RoleSessionName) < 2 { + invalidParams.Add(request.NewErrParamMinLen("RoleSessionName", 2)) + } + if s.WebIdentityToken == nil { + invalidParams.Add(request.NewErrParamRequired("WebIdentityToken")) + } + if s.WebIdentityToken != nil && len(*s.WebIdentityToken) < 4 { + invalidParams.Add(request.NewErrParamMinLen("WebIdentityToken", 4)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the response to a successful AssumeRoleWithWebIdentity request, +// including temporary AWS credentials that can be used to make AWS requests. +type AssumeRoleWithWebIdentityOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) and the assumed role ID, which are identifiers + // that you can use to refer to the resulting temporary security credentials. + // For example, you can reference these credentials as a principal in a resource-based + // policy by using the ARN or assumed role ID. The ARN and ID include the RoleSessionName + // that you specified when you called AssumeRole. + AssumedRoleUser *AssumedRoleUser `type:"structure"` + + // The intended audience (also known as client ID) of the web identity token. + // This is traditionally the client identifier issued to the application that + // requested the web identity token. + Audience *string `type:"string"` + + // The temporary security credentials, which include an access key ID, a secret + // access key, and a security token. + // + // Note: The size of the security token that STS APIs return is not fixed. + // We strongly recommend that you make no assumptions about the maximum size. + // As of this writing, the typical size is less than 4096 bytes, but that can + // vary. Also, future updates to AWS might require larger sizes. + Credentials *Credentials `type:"structure"` + + // A percentage value that indicates the size of the policy in packed form. + // The service rejects any policy with a packed size greater than 100 percent, + // which means the policy exceeded the allowed space. + PackedPolicySize *int64 `type:"integer"` + + // The issuing authority of the web identity token presented. For OpenID Connect + // ID Tokens this contains the value of the iss field. For OAuth 2.0 access + // tokens, this contains the value of the ProviderId parameter that was passed + // in the AssumeRoleWithWebIdentity request. + Provider *string `type:"string"` + + // The unique user identifier that is returned by the identity provider. This + // identifier is associated with the WebIdentityToken that was submitted with + // the AssumeRoleWithWebIdentity call. The identifier is typically unique to + // the user and the application that acquired the WebIdentityToken (pairwise + // identifier). For OpenID Connect ID tokens, this field contains the value + // returned by the identity provider as the token's sub (Subject) claim. + SubjectFromWebIdentityToken *string `min:"6" type:"string"` +} + +// String returns the string representation +func (s AssumeRoleWithWebIdentityOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssumeRoleWithWebIdentityOutput) GoString() string { + return s.String() +} + +// The identifiers for the temporary security credentials that the operation +// returns. +type AssumedRoleUser struct { + _ struct{} `type:"structure"` + + // The ARN of the temporary security credentials that are returned from the + // AssumeRole action. For more information about ARNs and how to use them in + // policies, see IAM Identifiers (http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html) + // in Using IAM. + Arn *string `min:"20" type:"string" required:"true"` + + // A unique identifier that contains the role ID and the role session name of + // the role that is being assumed. The role ID is generated by AWS when the + // role is created. + AssumedRoleId *string `min:"2" type:"string" required:"true"` +} + +// String returns the string representation +func (s AssumedRoleUser) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssumedRoleUser) GoString() string { + return s.String() +} + +// AWS credentials for API authentication. +type Credentials struct { + _ struct{} `type:"structure"` + + // The access key ID that identifies the temporary security credentials. + AccessKeyId *string `min:"16" type:"string" required:"true"` + + // The date on which the current credentials expire. + Expiration *time.Time `type:"timestamp" timestampFormat:"iso8601" required:"true"` + + // The secret access key that can be used to sign requests. + SecretAccessKey *string `type:"string" required:"true"` + + // The token that users must pass to the service API to use the temporary credentials. + SessionToken *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s Credentials) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Credentials) GoString() string { + return s.String() +} + +type DecodeAuthorizationMessageInput struct { + _ struct{} `type:"structure"` + + // The encoded message that was returned with the response. + EncodedMessage *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DecodeAuthorizationMessageInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DecodeAuthorizationMessageInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DecodeAuthorizationMessageInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DecodeAuthorizationMessageInput"} + if s.EncodedMessage == nil { + invalidParams.Add(request.NewErrParamRequired("EncodedMessage")) + } + if s.EncodedMessage != nil && len(*s.EncodedMessage) < 1 { + invalidParams.Add(request.NewErrParamMinLen("EncodedMessage", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// A document that contains additional information about the authorization status +// of a request from an encoded message that is returned in response to an AWS +// request. +type DecodeAuthorizationMessageOutput struct { + _ struct{} `type:"structure"` + + // An XML document that contains the decoded message. + DecodedMessage *string `type:"string"` +} + +// String returns the string representation +func (s DecodeAuthorizationMessageOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DecodeAuthorizationMessageOutput) GoString() string { + return s.String() +} + +// Identifiers for the federated user that is associated with the credentials. +type FederatedUser struct { + _ struct{} `type:"structure"` + + // The ARN that specifies the federated user that is associated with the credentials. + // For more information about ARNs and how to use them in policies, see IAM + // Identifiers (http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html) + // in Using IAM. + Arn *string `min:"20" type:"string" required:"true"` + + // The string that identifies the federated user associated with the credentials, + // similar to the unique ID of an IAM user. + FederatedUserId *string `min:"2" type:"string" required:"true"` +} + +// String returns the string representation +func (s FederatedUser) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FederatedUser) GoString() string { + return s.String() +} + +type GetCallerIdentityInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s GetCallerIdentityInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetCallerIdentityInput) GoString() string { + return s.String() +} + +// Contains the response to a successful GetCallerIdentity request, including +// information about the entity making the request. +type GetCallerIdentityOutput struct { + _ struct{} `type:"structure"` + + // The AWS account ID number of the account that owns or contains the calling + // entity. + Account *string `type:"string"` + + // The AWS ARN associated with the calling entity. + Arn *string `min:"20" type:"string"` + + // The unique identifier of the calling entity. The exact value depends on the + // type of entity making the call. The values returned are those listed in the + // aws:userid column in the Principal table (http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_variables.html#principaltable) + // found on the Policy Variables reference page in the IAM User Guide. + UserId *string `type:"string"` +} + +// String returns the string representation +func (s GetCallerIdentityOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetCallerIdentityOutput) GoString() string { + return s.String() +} + +type GetFederationTokenInput struct { + _ struct{} `type:"structure"` + + // The duration, in seconds, that the session should last. Acceptable durations + // for federation sessions range from 900 seconds (15 minutes) to 129600 seconds + // (36 hours), with 43200 seconds (12 hours) as the default. Sessions obtained + // using AWS account (root) credentials are restricted to a maximum of 3600 + // seconds (one hour). If the specified duration is longer than one hour, the + // session obtained by using AWS account (root) credentials defaults to one + // hour. + DurationSeconds *int64 `min:"900" type:"integer"` + + // The name of the federated user. The name is used as an identifier for the + // temporary security credentials (such as Bob). For example, you can reference + // the federated user name in a resource-based policy, such as in an Amazon + // S3 bucket policy. + // + // The format for this parameter, as described by its regex pattern, is a string + // of characters consisting of upper- and lower-case alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- + Name *string `min:"2" type:"string" required:"true"` + + // An IAM policy in JSON format that is passed with the GetFederationToken call + // and evaluated along with the policy or policies that are attached to the + // IAM user whose credentials are used to call GetFederationToken. The passed + // policy is used to scope down the permissions that are available to the IAM + // user, by allowing only a subset of the permissions that are granted to the + // IAM user. The passed policy cannot grant more permissions than those granted + // to the IAM user. The final permissions for the federated user are the most + // restrictive set based on the intersection of the passed policy and the IAM + // user policy. + // + // If you do not pass a policy, the resulting temporary security credentials + // have no effective permissions. The only exception is when the temporary security + // credentials are used to access a resource that has a resource-based policy + // that specifically allows the federated user to access the resource. + // + // The format for this parameter, as described by its regex pattern, is a string + // of characters up to 2048 characters in length. The characters can be any + // ASCII character from the space character to the end of the valid character + // list (\u0020-\u00FF). It can also include the tab (\u0009), linefeed (\u000A), + // and carriage return (\u000D) characters. + // + // The policy plain text must be 2048 bytes or shorter. However, an internal + // conversion compresses it into a packed binary format with a separate limit. + // The PackedPolicySize response element indicates by percentage how close to + // the upper size limit the policy is, with 100% equaling the maximum allowed + // size. + // + // For more information about how permissions work, see Permissions for GetFederationToken + // (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_getfederationtoken.html). + Policy *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s GetFederationTokenInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetFederationTokenInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetFederationTokenInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetFederationTokenInput"} + if s.DurationSeconds != nil && *s.DurationSeconds < 900 { + invalidParams.Add(request.NewErrParamMinValue("DurationSeconds", 900)) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 2 { + invalidParams.Add(request.NewErrParamMinLen("Name", 2)) + } + if s.Policy != nil && len(*s.Policy) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Policy", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the response to a successful GetFederationToken request, including +// temporary AWS credentials that can be used to make AWS requests. +type GetFederationTokenOutput struct { + _ struct{} `type:"structure"` + + // The temporary security credentials, which include an access key ID, a secret + // access key, and a security (or session) token. + // + // Note: The size of the security token that STS APIs return is not fixed. + // We strongly recommend that you make no assumptions about the maximum size. + // As of this writing, the typical size is less than 4096 bytes, but that can + // vary. Also, future updates to AWS might require larger sizes. + Credentials *Credentials `type:"structure"` + + // Identifiers for the federated user associated with the credentials (such + // as arn:aws:sts::123456789012:federated-user/Bob or 123456789012:Bob). You + // can use the federated user's ARN in your resource-based policies, such as + // an Amazon S3 bucket policy. + FederatedUser *FederatedUser `type:"structure"` + + // A percentage value indicating the size of the policy in packed form. The + // service rejects policies for which the packed size is greater than 100 percent + // of the allowed value. + PackedPolicySize *int64 `type:"integer"` +} + +// String returns the string representation +func (s GetFederationTokenOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetFederationTokenOutput) GoString() string { + return s.String() +} + +type GetSessionTokenInput struct { + _ struct{} `type:"structure"` + + // The duration, in seconds, that the credentials should remain valid. Acceptable + // durations for IAM user sessions range from 900 seconds (15 minutes) to 129600 + // seconds (36 hours), with 43200 seconds (12 hours) as the default. Sessions + // for AWS account owners are restricted to a maximum of 3600 seconds (one hour). + // If the duration is longer than one hour, the session for AWS account owners + // defaults to one hour. + DurationSeconds *int64 `min:"900" type:"integer"` + + // The identification number of the MFA device that is associated with the IAM + // user who is making the GetSessionToken call. Specify this value if the IAM + // user has a policy that requires MFA authentication. The value is either the + // serial number for a hardware device (such as GAHT12345678) or an Amazon Resource + // Name (ARN) for a virtual device (such as arn:aws:iam::123456789012:mfa/user). + // You can find the device for an IAM user by going to the AWS Management Console + // and viewing the user's security credentials. + // + // The format for this parameter, as described by its regex pattern, is a string + // of characters consisting of upper- and lower-case alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- + SerialNumber *string `min:"9" type:"string"` + + // The value provided by the MFA device, if MFA is required. If any policy requires + // the IAM user to submit an MFA code, specify this value. If MFA authentication + // is required, and the user does not provide a code when requesting a set of + // temporary security credentials, the user will receive an "access denied" + // response when requesting resources that require MFA authentication. + // + // The format for this parameter, as described by its regex pattern, is a sequence + // of six numeric digits. + TokenCode *string `min:"6" type:"string"` +} + +// String returns the string representation +func (s GetSessionTokenInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetSessionTokenInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetSessionTokenInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetSessionTokenInput"} + if s.DurationSeconds != nil && *s.DurationSeconds < 900 { + invalidParams.Add(request.NewErrParamMinValue("DurationSeconds", 900)) + } + if s.SerialNumber != nil && len(*s.SerialNumber) < 9 { + invalidParams.Add(request.NewErrParamMinLen("SerialNumber", 9)) + } + if s.TokenCode != nil && len(*s.TokenCode) < 6 { + invalidParams.Add(request.NewErrParamMinLen("TokenCode", 6)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the response to a successful GetSessionToken request, including +// temporary AWS credentials that can be used to make AWS requests. +type GetSessionTokenOutput struct { + _ struct{} `type:"structure"` + + // The temporary security credentials, which include an access key ID, a secret + // access key, and a security (or session) token. + // + // Note: The size of the security token that STS APIs return is not fixed. + // We strongly recommend that you make no assumptions about the maximum size. + // As of this writing, the typical size is less than 4096 bytes, but that can + // vary. Also, future updates to AWS might require larger sizes. + Credentials *Credentials `type:"structure"` +} + +// String returns the string representation +func (s GetSessionTokenOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetSessionTokenOutput) GoString() string { + return s.String() +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/customizations.go b/vendor/github.com/aws/aws-sdk-go/service/sts/customizations.go new file mode 100644 index 000000000..4010cc7fa --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/customizations.go @@ -0,0 +1,12 @@ +package sts + +import "github.com/aws/aws-sdk-go/aws/request" + +func init() { + initRequest = func(r *request.Request) { + switch r.Operation.Name { + case opAssumeRoleWithSAML, opAssumeRoleWithWebIdentity: + r.Handlers.Sign.Clear() // these operations are unsigned + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/customizations_test.go b/vendor/github.com/aws/aws-sdk-go/service/sts/customizations_test.go new file mode 100644 index 000000000..6f870d35e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/customizations_test.go @@ -0,0 +1,39 @@ +package sts_test + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/awstesting/unit" + "github.com/aws/aws-sdk-go/service/sts" +) + +var svc = sts.New(unit.Session, &aws.Config{ + Region: aws.String("mock-region"), +}) + +func TestUnsignedRequest_AssumeRoleWithSAML(t *testing.T) { + req, _ := svc.AssumeRoleWithSAMLRequest(&sts.AssumeRoleWithSAMLInput{ + PrincipalArn: aws.String("ARN01234567890123456789"), + RoleArn: aws.String("ARN01234567890123456789"), + SAMLAssertion: aws.String("ASSERT"), + }) + + err := req.Sign() + assert.NoError(t, err) + assert.Equal(t, "", req.HTTPRequest.Header.Get("Authorization")) +} + +func TestUnsignedRequest_AssumeRoleWithWebIdentity(t *testing.T) { + req, _ := svc.AssumeRoleWithWebIdentityRequest(&sts.AssumeRoleWithWebIdentityInput{ + RoleArn: aws.String("ARN01234567890123456789"), + RoleSessionName: aws.String("SESSION"), + WebIdentityToken: aws.String("TOKEN"), + }) + + err := req.Sign() + assert.NoError(t, err) + assert.Equal(t, "", req.HTTPRequest.Header.Get("Authorization")) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/examples_test.go b/vendor/github.com/aws/aws-sdk-go/service/sts/examples_test.go new file mode 100644 index 000000000..c77c93d7e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/examples_test.go @@ -0,0 +1,166 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package sts_test + +import ( + "bytes" + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/sts" +) + +var _ time.Duration +var _ bytes.Buffer + +func ExampleSTS_AssumeRole() { + svc := sts.New(session.New()) + + params := &sts.AssumeRoleInput{ + RoleArn: aws.String("arnType"), // Required + RoleSessionName: aws.String("roleSessionNameType"), // Required + DurationSeconds: aws.Int64(1), + ExternalId: aws.String("externalIdType"), + Policy: aws.String("sessionPolicyDocumentType"), + SerialNumber: aws.String("serialNumberType"), + TokenCode: aws.String("tokenCodeType"), + } + resp, err := svc.AssumeRole(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSTS_AssumeRoleWithSAML() { + svc := sts.New(session.New()) + + params := &sts.AssumeRoleWithSAMLInput{ + PrincipalArn: aws.String("arnType"), // Required + RoleArn: aws.String("arnType"), // Required + SAMLAssertion: aws.String("SAMLAssertionType"), // Required + DurationSeconds: aws.Int64(1), + Policy: aws.String("sessionPolicyDocumentType"), + } + resp, err := svc.AssumeRoleWithSAML(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSTS_AssumeRoleWithWebIdentity() { + svc := sts.New(session.New()) + + params := &sts.AssumeRoleWithWebIdentityInput{ + RoleArn: aws.String("arnType"), // Required + RoleSessionName: aws.String("roleSessionNameType"), // Required + WebIdentityToken: aws.String("clientTokenType"), // Required + DurationSeconds: aws.Int64(1), + Policy: aws.String("sessionPolicyDocumentType"), + ProviderId: aws.String("urlType"), + } + resp, err := svc.AssumeRoleWithWebIdentity(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSTS_DecodeAuthorizationMessage() { + svc := sts.New(session.New()) + + params := &sts.DecodeAuthorizationMessageInput{ + EncodedMessage: aws.String("encodedMessageType"), // Required + } + resp, err := svc.DecodeAuthorizationMessage(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSTS_GetCallerIdentity() { + svc := sts.New(session.New()) + + var params *sts.GetCallerIdentityInput + resp, err := svc.GetCallerIdentity(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSTS_GetFederationToken() { + svc := sts.New(session.New()) + + params := &sts.GetFederationTokenInput{ + Name: aws.String("userNameType"), // Required + DurationSeconds: aws.Int64(1), + Policy: aws.String("sessionPolicyDocumentType"), + } + resp, err := svc.GetFederationToken(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSTS_GetSessionToken() { + svc := sts.New(session.New()) + + params := &sts.GetSessionTokenInput{ + DurationSeconds: aws.Int64(1), + SerialNumber: aws.String("serialNumberType"), + TokenCode: aws.String("tokenCodeType"), + } + resp, err := svc.GetSessionToken(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/service.go b/vendor/github.com/aws/aws-sdk-go/service/sts/service.go new file mode 100644 index 000000000..c938e6ca1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/service.go @@ -0,0 +1,130 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package sts + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/private/protocol/query" +) + +// The AWS Security Token Service (STS) is a web service that enables you to +// request temporary, limited-privilege credentials for AWS Identity and Access +// Management (IAM) users or for users that you authenticate (federated users). +// This guide provides descriptions of the STS API. For more detailed information +// about using this service, go to Temporary Security Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html). +// +// As an alternative to using the API, you can use one of the AWS SDKs, which +// consist of libraries and sample code for various programming languages and +// platforms (Java, Ruby, .NET, iOS, Android, etc.). The SDKs provide a convenient +// way to create programmatic access to STS. For example, the SDKs take care +// of cryptographically signing requests, managing errors, and retrying requests +// automatically. For information about the AWS SDKs, including how to download +// and install them, see the Tools for Amazon Web Services page (http://aws.amazon.com/tools/). +// +// For information about setting up signatures and authorization through the +// API, go to Signing AWS API Requests (http://docs.aws.amazon.com/general/latest/gr/signing_aws_api_requests.html) +// in the AWS General Reference. For general information about the Query API, +// go to Making Query Requests (http://docs.aws.amazon.com/IAM/latest/UserGuide/IAM_UsingQueryAPI.html) +// in Using IAM. For information about using security tokens with other AWS +// products, go to AWS Services That Work with IAM (http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_aws-services-that-work-with-iam.html) +// in the IAM User Guide. +// +// If you're new to AWS and need additional technical information about a specific +// AWS product, you can find the product's technical documentation at http://aws.amazon.com/documentation/ +// (http://aws.amazon.com/documentation/). +// +// Endpoints +// +// The AWS Security Token Service (STS) has a default endpoint of https://sts.amazonaws.com +// that maps to the US East (N. Virginia) region. Additional regions are available +// and are activated by default. For more information, see Activating and Deactivating +// AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// in the IAM User Guide. +// +// For information about STS endpoints, see Regions and Endpoints (http://docs.aws.amazon.com/general/latest/gr/rande.html#sts_region) +// in the AWS General Reference. +// +// Recording API requests +// +// STS supports AWS CloudTrail, which is a service that records AWS calls for +// your AWS account and delivers log files to an Amazon S3 bucket. By using +// information collected by CloudTrail, you can determine what requests were +// successfully made to STS, who made the request, when it was made, and so +// on. To learn more about CloudTrail, including how to turn it on and find +// your log files, see the AWS CloudTrail User Guide (http://docs.aws.amazon.com/awscloudtrail/latest/userguide/what_is_cloud_trail_top_level.html). +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type STS struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "sts" + +// New creates a new instance of the STS client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a STS client from just a session. +// svc := sts.New(mySession) +// +// // Create a STS client with additional configuration +// svc := sts.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *STS { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *STS { + svc := &STS{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2011-06-15", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(query.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(query.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(query.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(query.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a STS operation and runs any +// custom request initialization. +func (c *STS) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/stsiface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/sts/stsiface/interface.go new file mode 100644 index 000000000..22301ca98 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/stsiface/interface.go @@ -0,0 +1,42 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package stsiface provides an interface for the AWS Security Token Service. +package stsiface + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/sts" +) + +// STSAPI is the interface type for sts.STS. +type STSAPI interface { + AssumeRoleRequest(*sts.AssumeRoleInput) (*request.Request, *sts.AssumeRoleOutput) + + AssumeRole(*sts.AssumeRoleInput) (*sts.AssumeRoleOutput, error) + + AssumeRoleWithSAMLRequest(*sts.AssumeRoleWithSAMLInput) (*request.Request, *sts.AssumeRoleWithSAMLOutput) + + AssumeRoleWithSAML(*sts.AssumeRoleWithSAMLInput) (*sts.AssumeRoleWithSAMLOutput, error) + + AssumeRoleWithWebIdentityRequest(*sts.AssumeRoleWithWebIdentityInput) (*request.Request, *sts.AssumeRoleWithWebIdentityOutput) + + AssumeRoleWithWebIdentity(*sts.AssumeRoleWithWebIdentityInput) (*sts.AssumeRoleWithWebIdentityOutput, error) + + DecodeAuthorizationMessageRequest(*sts.DecodeAuthorizationMessageInput) (*request.Request, *sts.DecodeAuthorizationMessageOutput) + + DecodeAuthorizationMessage(*sts.DecodeAuthorizationMessageInput) (*sts.DecodeAuthorizationMessageOutput, error) + + GetCallerIdentityRequest(*sts.GetCallerIdentityInput) (*request.Request, *sts.GetCallerIdentityOutput) + + GetCallerIdentity(*sts.GetCallerIdentityInput) (*sts.GetCallerIdentityOutput, error) + + GetFederationTokenRequest(*sts.GetFederationTokenInput) (*request.Request, *sts.GetFederationTokenOutput) + + GetFederationToken(*sts.GetFederationTokenInput) (*sts.GetFederationTokenOutput, error) + + GetSessionTokenRequest(*sts.GetSessionTokenInput) (*request.Request, *sts.GetSessionTokenOutput) + + GetSessionToken(*sts.GetSessionTokenInput) (*sts.GetSessionTokenOutput, error) +} + +var _ STSAPI = (*sts.STS)(nil) diff --git a/vendor/github.com/aws/aws-sdk-go/service/support/api.go b/vendor/github.com/aws/aws-sdk-go/service/support/api.go new file mode 100644 index 000000000..0051204d1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/support/api.go @@ -0,0 +1,2128 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package support provides a client for AWS Support. +package support + +import ( + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" +) + +const opAddAttachmentsToSet = "AddAttachmentsToSet" + +// AddAttachmentsToSetRequest generates a "aws/request.Request" representing the +// client's request for the AddAttachmentsToSet operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the AddAttachmentsToSet method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the AddAttachmentsToSetRequest method. +// req, resp := client.AddAttachmentsToSetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Support) AddAttachmentsToSetRequest(input *AddAttachmentsToSetInput) (req *request.Request, output *AddAttachmentsToSetOutput) { + op := &request.Operation{ + Name: opAddAttachmentsToSet, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AddAttachmentsToSetInput{} + } + + req = c.newRequest(op, input, output) + output = &AddAttachmentsToSetOutput{} + req.Data = output + return +} + +// Adds one or more attachments to an attachment set. If an AttachmentSetId +// is not specified, a new attachment set is created, and the ID of the set +// is returned in the response. If an AttachmentSetId is specified, the attachments +// are added to the specified set, if it exists. +// +// An attachment set is a temporary container for attachments that are to be +// added to a case or case communication. The set is available for one hour +// after it is created; the ExpiryTime returned in the response indicates when +// the set expires. The maximum number of attachments in a set is 3, and the +// maximum size of any attachment in the set is 5 MB. +func (c *Support) AddAttachmentsToSet(input *AddAttachmentsToSetInput) (*AddAttachmentsToSetOutput, error) { + req, out := c.AddAttachmentsToSetRequest(input) + err := req.Send() + return out, err +} + +const opAddCommunicationToCase = "AddCommunicationToCase" + +// AddCommunicationToCaseRequest generates a "aws/request.Request" representing the +// client's request for the AddCommunicationToCase operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the AddCommunicationToCase method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the AddCommunicationToCaseRequest method. +// req, resp := client.AddCommunicationToCaseRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Support) AddCommunicationToCaseRequest(input *AddCommunicationToCaseInput) (req *request.Request, output *AddCommunicationToCaseOutput) { + op := &request.Operation{ + Name: opAddCommunicationToCase, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AddCommunicationToCaseInput{} + } + + req = c.newRequest(op, input, output) + output = &AddCommunicationToCaseOutput{} + req.Data = output + return +} + +// Adds additional customer communication to an AWS Support case. You use the +// CaseId value to identify the case to add communication to. You can list a +// set of email addresses to copy on the communication using the CcEmailAddresses +// value. The CommunicationBody value contains the text of the communication. +// +// The response indicates the success or failure of the request. +// +// This operation implements a subset of the features of the AWS Support Center. +func (c *Support) AddCommunicationToCase(input *AddCommunicationToCaseInput) (*AddCommunicationToCaseOutput, error) { + req, out := c.AddCommunicationToCaseRequest(input) + err := req.Send() + return out, err +} + +const opCreateCase = "CreateCase" + +// CreateCaseRequest generates a "aws/request.Request" representing the +// client's request for the CreateCase operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateCase method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateCaseRequest method. +// req, resp := client.CreateCaseRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Support) CreateCaseRequest(input *CreateCaseInput) (req *request.Request, output *CreateCaseOutput) { + op := &request.Operation{ + Name: opCreateCase, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateCaseInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateCaseOutput{} + req.Data = output + return +} + +// Creates a new case in the AWS Support Center. This operation is modeled on +// the behavior of the AWS Support Center Create Case (https://console.aws.amazon.com/support/home#/case/create) +// page. Its parameters require you to specify the following information: +// +// IssueType. The type of issue for the case. You can specify either "customer-service" +// or "technical." If you do not indicate a value, the default is "technical." +// ServiceCode. The code for an AWS service. You obtain the ServiceCode by +// calling DescribeServices. CategoryCode. The category for the service defined +// for the ServiceCode value. You also obtain the category code for a service +// by calling DescribeServices. Each AWS service defines its own set of category +// codes. SeverityCode. A value that indicates the urgency of the case, which +// in turn determines the response time according to your service level agreement +// with AWS Support. You obtain the SeverityCode by calling DescribeSeverityLevels. +// Subject. The Subject field on the AWS Support Center Create Case (https://console.aws.amazon.com/support/home#/case/create) +// page. CommunicationBody. The Description field on the AWS Support Center +// Create Case (https://console.aws.amazon.com/support/home#/case/create) page. +// AttachmentSetId. The ID of a set of attachments that has been created by +// using AddAttachmentsToSet. Language. The human language in which AWS Support +// handles the case. English and Japanese are currently supported. CcEmailAddresses. +// The AWS Support Center CC field on the Create Case (https://console.aws.amazon.com/support/home#/case/create) +// page. You can list email addresses to be copied on any correspondence about +// the case. The account that opens the case is already identified by passing +// the AWS Credentials in the HTTP POST method or in a method or function call +// from one of the programming languages supported by an AWS SDK (http://aws.amazon.com/tools/). +// To add additional communication or attachments to an existing case, use +// AddCommunicationToCase. +// +// A successful CreateCase request returns an AWS Support case number. Case +// numbers are used by the DescribeCases operation to retrieve existing AWS +// Support cases. +func (c *Support) CreateCase(input *CreateCaseInput) (*CreateCaseOutput, error) { + req, out := c.CreateCaseRequest(input) + err := req.Send() + return out, err +} + +const opDescribeAttachment = "DescribeAttachment" + +// DescribeAttachmentRequest generates a "aws/request.Request" representing the +// client's request for the DescribeAttachment operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeAttachment method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeAttachmentRequest method. +// req, resp := client.DescribeAttachmentRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Support) DescribeAttachmentRequest(input *DescribeAttachmentInput) (req *request.Request, output *DescribeAttachmentOutput) { + op := &request.Operation{ + Name: opDescribeAttachment, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeAttachmentInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeAttachmentOutput{} + req.Data = output + return +} + +// Returns the attachment that has the specified ID. Attachment IDs are generated +// by the case management system when you add an attachment to a case or case +// communication. Attachment IDs are returned in the AttachmentDetails objects +// that are returned by the DescribeCommunications operation. +func (c *Support) DescribeAttachment(input *DescribeAttachmentInput) (*DescribeAttachmentOutput, error) { + req, out := c.DescribeAttachmentRequest(input) + err := req.Send() + return out, err +} + +const opDescribeCases = "DescribeCases" + +// DescribeCasesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeCases operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeCases method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeCasesRequest method. +// req, resp := client.DescribeCasesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Support) DescribeCasesRequest(input *DescribeCasesInput) (req *request.Request, output *DescribeCasesOutput) { + op := &request.Operation{ + Name: opDescribeCases, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "maxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeCasesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeCasesOutput{} + req.Data = output + return +} + +// Returns a list of cases that you specify by passing one or more case IDs. +// In addition, you can filter the cases by date by setting values for the AfterTime +// and BeforeTime request parameters. You can set values for the IncludeResolvedCases +// and IncludeCommunications request parameters to control how much information +// is returned. +// +// Case data is available for 12 months after creation. If a case was created +// more than 12 months ago, a request for data might cause an error. +// +// The response returns the following in JSON format: +// +// One or more CaseDetails data types. One or more NextToken values, which +// specify where to paginate the returned records represented by the CaseDetails +// objects. +func (c *Support) DescribeCases(input *DescribeCasesInput) (*DescribeCasesOutput, error) { + req, out := c.DescribeCasesRequest(input) + err := req.Send() + return out, err +} + +// DescribeCasesPages iterates over the pages of a DescribeCases operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeCases method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeCases operation. +// pageNum := 0 +// err := client.DescribeCasesPages(params, +// func(page *DescribeCasesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *Support) DescribeCasesPages(input *DescribeCasesInput, fn func(p *DescribeCasesOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeCasesRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeCasesOutput), lastPage) + }) +} + +const opDescribeCommunications = "DescribeCommunications" + +// DescribeCommunicationsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeCommunications operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeCommunications method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeCommunicationsRequest method. +// req, resp := client.DescribeCommunicationsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Support) DescribeCommunicationsRequest(input *DescribeCommunicationsInput) (req *request.Request, output *DescribeCommunicationsOutput) { + op := &request.Operation{ + Name: opDescribeCommunications, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "maxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeCommunicationsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeCommunicationsOutput{} + req.Data = output + return +} + +// Returns communications (and attachments) for one or more support cases. You +// can use the AfterTime and BeforeTime parameters to filter by date. You can +// use the CaseId parameter to restrict the results to a particular case. +// +// Case data is available for 12 months after creation. If a case was created +// more than 12 months ago, a request for data might cause an error. +// +// You can use the MaxResults and NextToken parameters to control the pagination +// of the result set. Set MaxResults to the number of cases you want displayed +// on each page, and use NextToken to specify the resumption of pagination. +func (c *Support) DescribeCommunications(input *DescribeCommunicationsInput) (*DescribeCommunicationsOutput, error) { + req, out := c.DescribeCommunicationsRequest(input) + err := req.Send() + return out, err +} + +// DescribeCommunicationsPages iterates over the pages of a DescribeCommunications operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeCommunications method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeCommunications operation. +// pageNum := 0 +// err := client.DescribeCommunicationsPages(params, +// func(page *DescribeCommunicationsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *Support) DescribeCommunicationsPages(input *DescribeCommunicationsInput, fn func(p *DescribeCommunicationsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeCommunicationsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeCommunicationsOutput), lastPage) + }) +} + +const opDescribeServices = "DescribeServices" + +// DescribeServicesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeServices operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeServices method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeServicesRequest method. +// req, resp := client.DescribeServicesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Support) DescribeServicesRequest(input *DescribeServicesInput) (req *request.Request, output *DescribeServicesOutput) { + op := &request.Operation{ + Name: opDescribeServices, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeServicesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeServicesOutput{} + req.Data = output + return +} + +// Returns the current list of AWS services and a list of service categories +// that applies to each one. You then use service names and categories in your +// CreateCase requests. Each AWS service has its own set of categories. +// +// The service codes and category codes correspond to the values that are displayed +// in the Service and Category drop-down lists on the AWS Support Center Create +// Case (https://console.aws.amazon.com/support/home#/case/create) page. The +// values in those fields, however, do not necessarily match the service codes +// and categories returned by the DescribeServices request. Always use the service +// codes and categories obtained programmatically. This practice ensures that +// you always have the most recent set of service and category codes. +func (c *Support) DescribeServices(input *DescribeServicesInput) (*DescribeServicesOutput, error) { + req, out := c.DescribeServicesRequest(input) + err := req.Send() + return out, err +} + +const opDescribeSeverityLevels = "DescribeSeverityLevels" + +// DescribeSeverityLevelsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeSeverityLevels operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeSeverityLevels method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeSeverityLevelsRequest method. +// req, resp := client.DescribeSeverityLevelsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Support) DescribeSeverityLevelsRequest(input *DescribeSeverityLevelsInput) (req *request.Request, output *DescribeSeverityLevelsOutput) { + op := &request.Operation{ + Name: opDescribeSeverityLevels, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeSeverityLevelsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeSeverityLevelsOutput{} + req.Data = output + return +} + +// Returns the list of severity levels that you can assign to an AWS Support +// case. The severity level for a case is also a field in the CaseDetails data +// type included in any CreateCase request. +func (c *Support) DescribeSeverityLevels(input *DescribeSeverityLevelsInput) (*DescribeSeverityLevelsOutput, error) { + req, out := c.DescribeSeverityLevelsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeTrustedAdvisorCheckRefreshStatuses = "DescribeTrustedAdvisorCheckRefreshStatuses" + +// DescribeTrustedAdvisorCheckRefreshStatusesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeTrustedAdvisorCheckRefreshStatuses operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeTrustedAdvisorCheckRefreshStatuses method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeTrustedAdvisorCheckRefreshStatusesRequest method. +// req, resp := client.DescribeTrustedAdvisorCheckRefreshStatusesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Support) DescribeTrustedAdvisorCheckRefreshStatusesRequest(input *DescribeTrustedAdvisorCheckRefreshStatusesInput) (req *request.Request, output *DescribeTrustedAdvisorCheckRefreshStatusesOutput) { + op := &request.Operation{ + Name: opDescribeTrustedAdvisorCheckRefreshStatuses, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeTrustedAdvisorCheckRefreshStatusesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeTrustedAdvisorCheckRefreshStatusesOutput{} + req.Data = output + return +} + +// Returns the refresh status of the Trusted Advisor checks that have the specified +// check IDs. Check IDs can be obtained by calling DescribeTrustedAdvisorChecks. +func (c *Support) DescribeTrustedAdvisorCheckRefreshStatuses(input *DescribeTrustedAdvisorCheckRefreshStatusesInput) (*DescribeTrustedAdvisorCheckRefreshStatusesOutput, error) { + req, out := c.DescribeTrustedAdvisorCheckRefreshStatusesRequest(input) + err := req.Send() + return out, err +} + +const opDescribeTrustedAdvisorCheckResult = "DescribeTrustedAdvisorCheckResult" + +// DescribeTrustedAdvisorCheckResultRequest generates a "aws/request.Request" representing the +// client's request for the DescribeTrustedAdvisorCheckResult operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeTrustedAdvisorCheckResult method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeTrustedAdvisorCheckResultRequest method. +// req, resp := client.DescribeTrustedAdvisorCheckResultRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Support) DescribeTrustedAdvisorCheckResultRequest(input *DescribeTrustedAdvisorCheckResultInput) (req *request.Request, output *DescribeTrustedAdvisorCheckResultOutput) { + op := &request.Operation{ + Name: opDescribeTrustedAdvisorCheckResult, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeTrustedAdvisorCheckResultInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeTrustedAdvisorCheckResultOutput{} + req.Data = output + return +} + +// Returns the results of the Trusted Advisor check that has the specified check +// ID. Check IDs can be obtained by calling DescribeTrustedAdvisorChecks. +// +// The response contains a TrustedAdvisorCheckResult object, which contains +// these three objects: +// +// TrustedAdvisorCategorySpecificSummary TrustedAdvisorResourceDetail TrustedAdvisorResourcesSummary +// In addition, the response contains these fields: +// +// Status. The alert status of the check: "ok" (green), "warning" (yellow), +// "error" (red), or "not_available". Timestamp. The time of the last refresh +// of the check. CheckId. The unique identifier for the check. +func (c *Support) DescribeTrustedAdvisorCheckResult(input *DescribeTrustedAdvisorCheckResultInput) (*DescribeTrustedAdvisorCheckResultOutput, error) { + req, out := c.DescribeTrustedAdvisorCheckResultRequest(input) + err := req.Send() + return out, err +} + +const opDescribeTrustedAdvisorCheckSummaries = "DescribeTrustedAdvisorCheckSummaries" + +// DescribeTrustedAdvisorCheckSummariesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeTrustedAdvisorCheckSummaries operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeTrustedAdvisorCheckSummaries method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeTrustedAdvisorCheckSummariesRequest method. +// req, resp := client.DescribeTrustedAdvisorCheckSummariesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Support) DescribeTrustedAdvisorCheckSummariesRequest(input *DescribeTrustedAdvisorCheckSummariesInput) (req *request.Request, output *DescribeTrustedAdvisorCheckSummariesOutput) { + op := &request.Operation{ + Name: opDescribeTrustedAdvisorCheckSummaries, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeTrustedAdvisorCheckSummariesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeTrustedAdvisorCheckSummariesOutput{} + req.Data = output + return +} + +// Returns the summaries of the results of the Trusted Advisor checks that have +// the specified check IDs. Check IDs can be obtained by calling DescribeTrustedAdvisorChecks. +// +// The response contains an array of TrustedAdvisorCheckSummary objects. +func (c *Support) DescribeTrustedAdvisorCheckSummaries(input *DescribeTrustedAdvisorCheckSummariesInput) (*DescribeTrustedAdvisorCheckSummariesOutput, error) { + req, out := c.DescribeTrustedAdvisorCheckSummariesRequest(input) + err := req.Send() + return out, err +} + +const opDescribeTrustedAdvisorChecks = "DescribeTrustedAdvisorChecks" + +// DescribeTrustedAdvisorChecksRequest generates a "aws/request.Request" representing the +// client's request for the DescribeTrustedAdvisorChecks operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeTrustedAdvisorChecks method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeTrustedAdvisorChecksRequest method. +// req, resp := client.DescribeTrustedAdvisorChecksRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Support) DescribeTrustedAdvisorChecksRequest(input *DescribeTrustedAdvisorChecksInput) (req *request.Request, output *DescribeTrustedAdvisorChecksOutput) { + op := &request.Operation{ + Name: opDescribeTrustedAdvisorChecks, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeTrustedAdvisorChecksInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeTrustedAdvisorChecksOutput{} + req.Data = output + return +} + +// Returns information about all available Trusted Advisor checks, including +// name, ID, category, description, and metadata. You must specify a language +// code; English ("en") and Japanese ("ja") are currently supported. The response +// contains a TrustedAdvisorCheckDescription for each check. +func (c *Support) DescribeTrustedAdvisorChecks(input *DescribeTrustedAdvisorChecksInput) (*DescribeTrustedAdvisorChecksOutput, error) { + req, out := c.DescribeTrustedAdvisorChecksRequest(input) + err := req.Send() + return out, err +} + +const opRefreshTrustedAdvisorCheck = "RefreshTrustedAdvisorCheck" + +// RefreshTrustedAdvisorCheckRequest generates a "aws/request.Request" representing the +// client's request for the RefreshTrustedAdvisorCheck operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the RefreshTrustedAdvisorCheck method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the RefreshTrustedAdvisorCheckRequest method. +// req, resp := client.RefreshTrustedAdvisorCheckRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Support) RefreshTrustedAdvisorCheckRequest(input *RefreshTrustedAdvisorCheckInput) (req *request.Request, output *RefreshTrustedAdvisorCheckOutput) { + op := &request.Operation{ + Name: opRefreshTrustedAdvisorCheck, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RefreshTrustedAdvisorCheckInput{} + } + + req = c.newRequest(op, input, output) + output = &RefreshTrustedAdvisorCheckOutput{} + req.Data = output + return +} + +// Requests a refresh of the Trusted Advisor check that has the specified check +// ID. Check IDs can be obtained by calling DescribeTrustedAdvisorChecks. +// +// The response contains a TrustedAdvisorCheckRefreshStatus object, which contains +// these fields: +// +// Status. The refresh status of the check: "none", "enqueued", "processing", +// "success", or "abandoned". MillisUntilNextRefreshable. The amount of time, +// in milliseconds, until the check is eligible for refresh. CheckId. The unique +// identifier for the check. +func (c *Support) RefreshTrustedAdvisorCheck(input *RefreshTrustedAdvisorCheckInput) (*RefreshTrustedAdvisorCheckOutput, error) { + req, out := c.RefreshTrustedAdvisorCheckRequest(input) + err := req.Send() + return out, err +} + +const opResolveCase = "ResolveCase" + +// ResolveCaseRequest generates a "aws/request.Request" representing the +// client's request for the ResolveCase operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ResolveCase method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ResolveCaseRequest method. +// req, resp := client.ResolveCaseRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *Support) ResolveCaseRequest(input *ResolveCaseInput) (req *request.Request, output *ResolveCaseOutput) { + op := &request.Operation{ + Name: opResolveCase, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ResolveCaseInput{} + } + + req = c.newRequest(op, input, output) + output = &ResolveCaseOutput{} + req.Data = output + return +} + +// Takes a CaseId and returns the initial state of the case along with the state +// of the case after the call to ResolveCase completed. +func (c *Support) ResolveCase(input *ResolveCaseInput) (*ResolveCaseOutput, error) { + req, out := c.ResolveCaseRequest(input) + err := req.Send() + return out, err +} + +type AddAttachmentsToSetInput struct { + _ struct{} `type:"structure"` + + // The ID of the attachment set. If an AttachmentSetId is not specified, a new + // attachment set is created, and the ID of the set is returned in the response. + // If an AttachmentSetId is specified, the attachments are added to the specified + // set, if it exists. + AttachmentSetId *string `locationName:"attachmentSetId" type:"string"` + + // One or more attachments to add to the set. The limit is 3 attachments per + // set, and the size limit is 5 MB per attachment. + Attachments []*Attachment `locationName:"attachments" type:"list" required:"true"` +} + +// String returns the string representation +func (s AddAttachmentsToSetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddAttachmentsToSetInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AddAttachmentsToSetInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AddAttachmentsToSetInput"} + if s.Attachments == nil { + invalidParams.Add(request.NewErrParamRequired("Attachments")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The ID and expiry time of the attachment set returned by the AddAttachmentsToSet +// operation. +type AddAttachmentsToSetOutput struct { + _ struct{} `type:"structure"` + + // The ID of the attachment set. If an AttachmentSetId was not specified, a + // new attachment set is created, and the ID of the set is returned in the response. + // If an AttachmentSetId was specified, the attachments are added to the specified + // set, if it exists. + AttachmentSetId *string `locationName:"attachmentSetId" type:"string"` + + // The time and date when the attachment set expires. + ExpiryTime *string `locationName:"expiryTime" type:"string"` +} + +// String returns the string representation +func (s AddAttachmentsToSetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddAttachmentsToSetOutput) GoString() string { + return s.String() +} + +// To be written. +type AddCommunicationToCaseInput struct { + _ struct{} `type:"structure"` + + // The ID of a set of one or more attachments for the communication to add to + // the case. Create the set by calling AddAttachmentsToSet + AttachmentSetId *string `locationName:"attachmentSetId" type:"string"` + + // The AWS Support case ID requested or returned in the call. The case ID is + // an alphanumeric string formatted as shown in this example: case-12345678910-2013-c4c1d2bf33c5cf47 + CaseId *string `locationName:"caseId" type:"string"` + + // The email addresses in the CC line of an email to be added to the support + // case. + CcEmailAddresses []*string `locationName:"ccEmailAddresses" type:"list"` + + // The body of an email communication to add to the support case. + CommunicationBody *string `locationName:"communicationBody" type:"string" required:"true"` +} + +// String returns the string representation +func (s AddCommunicationToCaseInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddCommunicationToCaseInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AddCommunicationToCaseInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AddCommunicationToCaseInput"} + if s.CommunicationBody == nil { + invalidParams.Add(request.NewErrParamRequired("CommunicationBody")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The result of the AddCommunicationToCase operation. +type AddCommunicationToCaseOutput struct { + _ struct{} `type:"structure"` + + // True if AddCommunicationToCase succeeds. Otherwise, returns an error. + Result *bool `locationName:"result" type:"boolean"` +} + +// String returns the string representation +func (s AddCommunicationToCaseOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddCommunicationToCaseOutput) GoString() string { + return s.String() +} + +// An attachment to a case communication. The attachment consists of the file +// name and the content of the file. +type Attachment struct { + _ struct{} `type:"structure"` + + // The content of the attachment file. + // + // Data is automatically base64 encoded/decoded by the SDK. + Data []byte `locationName:"data" type:"blob"` + + // The name of the attachment file. + FileName *string `locationName:"fileName" type:"string"` +} + +// String returns the string representation +func (s Attachment) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Attachment) GoString() string { + return s.String() +} + +// The file name and ID of an attachment to a case communication. You can use +// the ID to retrieve the attachment with the DescribeAttachment operation. +type AttachmentDetails struct { + _ struct{} `type:"structure"` + + // The ID of the attachment. + AttachmentId *string `locationName:"attachmentId" type:"string"` + + // The file name of the attachment. + FileName *string `locationName:"fileName" type:"string"` +} + +// String returns the string representation +func (s AttachmentDetails) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AttachmentDetails) GoString() string { + return s.String() +} + +// A JSON-formatted object that contains the metadata for a support case. It +// is contained the response from a DescribeCases request. CaseDetails contains +// the following fields: +// +// CaseID. The AWS Support case ID requested or returned in the call. The +// case ID is an alphanumeric string formatted as shown in this example: case-12345678910-2013-c4c1d2bf33c5cf47. +// CategoryCode. The category of problem for the AWS Support case. Corresponds +// to the CategoryCode values returned by a call to DescribeServices. DisplayId. +// The identifier for the case on pages in the AWS Support Center. Language. +// The ISO 639-1 code for the language in which AWS provides support. AWS Support +// currently supports English ("en") and Japanese ("ja"). Language parameters +// must be passed explicitly for operations that take them. RecentCommunications. +// One or more Communication objects. Fields of these objects are Attachments, +// Body, CaseId, SubmittedBy, and TimeCreated. NextToken. A resumption point +// for pagination. ServiceCode. The identifier for the AWS service that corresponds +// to the service code defined in the call to DescribeServices. SeverityCode. +// The severity code assigned to the case. Contains one of the values returned +// by the call to DescribeSeverityLevels. Status. The status of the case in +// the AWS Support Center. Subject. The subject line of the case. SubmittedBy. +// The email address of the account that submitted the case. TimeCreated. The +// time the case was created, in ISO-8601 format. +type CaseDetails struct { + _ struct{} `type:"structure"` + + // The AWS Support case ID requested or returned in the call. The case ID is + // an alphanumeric string formatted as shown in this example: case-12345678910-2013-c4c1d2bf33c5cf47 + CaseId *string `locationName:"caseId" type:"string"` + + // The category of problem for the AWS Support case. + CategoryCode *string `locationName:"categoryCode" type:"string"` + + // The email addresses that receive copies of communication about the case. + CcEmailAddresses []*string `locationName:"ccEmailAddresses" type:"list"` + + // The ID displayed for the case in the AWS Support Center. This is a numeric + // string. + DisplayId *string `locationName:"displayId" type:"string"` + + // The ISO 639-1 code for the language in which AWS provides support. AWS Support + // currently supports English ("en") and Japanese ("ja"). Language parameters + // must be passed explicitly for operations that take them. + Language *string `locationName:"language" type:"string"` + + // The five most recent communications between you and AWS Support Center, including + // the IDs of any attachments to the communications. Also includes a nextToken + // that you can use to retrieve earlier communications. + RecentCommunications *RecentCaseCommunications `locationName:"recentCommunications" type:"structure"` + + // The code for the AWS service returned by the call to DescribeServices. + ServiceCode *string `locationName:"serviceCode" type:"string"` + + // The code for the severity level returned by the call to DescribeSeverityLevels. + SeverityCode *string `locationName:"severityCode" type:"string"` + + // The status of the case. + Status *string `locationName:"status" type:"string"` + + // The subject line for the case in the AWS Support Center. + Subject *string `locationName:"subject" type:"string"` + + // The email address of the account that submitted the case. + SubmittedBy *string `locationName:"submittedBy" type:"string"` + + // The time that the case was case created in the AWS Support Center. + TimeCreated *string `locationName:"timeCreated" type:"string"` +} + +// String returns the string representation +func (s CaseDetails) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CaseDetails) GoString() string { + return s.String() +} + +// A JSON-formatted name/value pair that represents the category name and category +// code of the problem, selected from the DescribeServices response for each +// AWS service. +type Category struct { + _ struct{} `type:"structure"` + + // The category code for the support case. + Code *string `locationName:"code" type:"string"` + + // The category name for the support case. + Name *string `locationName:"name" type:"string"` +} + +// String returns the string representation +func (s Category) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Category) GoString() string { + return s.String() +} + +// A communication associated with an AWS Support case. The communication consists +// of the case ID, the message body, attachment information, the account email +// address, and the date and time of the communication. +type Communication struct { + _ struct{} `type:"structure"` + + // Information about the attachments to the case communication. + AttachmentSet []*AttachmentDetails `locationName:"attachmentSet" type:"list"` + + // The text of the communication between the customer and AWS Support. + Body *string `locationName:"body" type:"string"` + + // The AWS Support case ID requested or returned in the call. The case ID is + // an alphanumeric string formatted as shown in this example: case-12345678910-2013-c4c1d2bf33c5cf47 + CaseId *string `locationName:"caseId" type:"string"` + + // The email address of the account that submitted the AWS Support case. + SubmittedBy *string `locationName:"submittedBy" type:"string"` + + // The time the communication was created. + TimeCreated *string `locationName:"timeCreated" type:"string"` +} + +// String returns the string representation +func (s Communication) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Communication) GoString() string { + return s.String() +} + +type CreateCaseInput struct { + _ struct{} `type:"structure"` + + // The ID of a set of one or more attachments for the case. Create the set by + // using AddAttachmentsToSet. + AttachmentSetId *string `locationName:"attachmentSetId" type:"string"` + + // The category of problem for the AWS Support case. + CategoryCode *string `locationName:"categoryCode" type:"string"` + + // A list of email addresses that AWS Support copies on case correspondence. + CcEmailAddresses []*string `locationName:"ccEmailAddresses" type:"list"` + + // The communication body text when you create an AWS Support case by calling + // CreateCase. + CommunicationBody *string `locationName:"communicationBody" type:"string" required:"true"` + + // The type of issue for the case. You can specify either "customer-service" + // or "technical." If you do not indicate a value, the default is "technical." + IssueType *string `locationName:"issueType" type:"string"` + + // The ISO 639-1 code for the language in which AWS provides support. AWS Support + // currently supports English ("en") and Japanese ("ja"). Language parameters + // must be passed explicitly for operations that take them. + Language *string `locationName:"language" type:"string"` + + // The code for the AWS service returned by the call to DescribeServices. + ServiceCode *string `locationName:"serviceCode" type:"string"` + + // The code for the severity level returned by the call to DescribeSeverityLevels. + // + // The availability of severity levels depends on each customer's support subscription. + // In other words, your subscription may not necessarily require the urgent + // level of response time. + SeverityCode *string `locationName:"severityCode" type:"string"` + + // The title of the AWS Support case. + Subject *string `locationName:"subject" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateCaseInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateCaseInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateCaseInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateCaseInput"} + if s.CommunicationBody == nil { + invalidParams.Add(request.NewErrParamRequired("CommunicationBody")) + } + if s.Subject == nil { + invalidParams.Add(request.NewErrParamRequired("Subject")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The AWS Support case ID returned by a successful completion of the CreateCase +// operation. +type CreateCaseOutput struct { + _ struct{} `type:"structure"` + + // The AWS Support case ID requested or returned in the call. The case ID is + // an alphanumeric string formatted as shown in this example: case-12345678910-2013-c4c1d2bf33c5cf47 + CaseId *string `locationName:"caseId" type:"string"` +} + +// String returns the string representation +func (s CreateCaseOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateCaseOutput) GoString() string { + return s.String() +} + +type DescribeAttachmentInput struct { + _ struct{} `type:"structure"` + + // The ID of the attachment to return. Attachment IDs are returned by the DescribeCommunications + // operation. + AttachmentId *string `locationName:"attachmentId" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeAttachmentInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAttachmentInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeAttachmentInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeAttachmentInput"} + if s.AttachmentId == nil { + invalidParams.Add(request.NewErrParamRequired("AttachmentId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The content and file name of the attachment returned by the DescribeAttachment +// operation. +type DescribeAttachmentOutput struct { + _ struct{} `type:"structure"` + + // The attachment content and file name. + Attachment *Attachment `locationName:"attachment" type:"structure"` +} + +// String returns the string representation +func (s DescribeAttachmentOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAttachmentOutput) GoString() string { + return s.String() +} + +type DescribeCasesInput struct { + _ struct{} `type:"structure"` + + // The start date for a filtered date search on support case communications. + // Case communications are available for 12 months after creation. + AfterTime *string `locationName:"afterTime" type:"string"` + + // The end date for a filtered date search on support case communications. Case + // communications are available for 12 months after creation. + BeforeTime *string `locationName:"beforeTime" type:"string"` + + // A list of ID numbers of the support cases you want returned. The maximum + // number of cases is 100. + CaseIdList []*string `locationName:"caseIdList" type:"list"` + + // The ID displayed for a case in the AWS Support Center user interface. + DisplayId *string `locationName:"displayId" type:"string"` + + // Specifies whether communications should be included in the DescribeCases + // results. The default is true. + IncludeCommunications *bool `locationName:"includeCommunications" type:"boolean"` + + // Specifies whether resolved support cases should be included in the DescribeCases + // results. The default is false. + IncludeResolvedCases *bool `locationName:"includeResolvedCases" type:"boolean"` + + // The ISO 639-1 code for the language in which AWS provides support. AWS Support + // currently supports English ("en") and Japanese ("ja"). Language parameters + // must be passed explicitly for operations that take them. + Language *string `locationName:"language" type:"string"` + + // The maximum number of results to return before paginating. + MaxResults *int64 `locationName:"maxResults" min:"10" type:"integer"` + + // A resumption point for pagination. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s DescribeCasesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeCasesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeCasesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeCasesInput"} + if s.MaxResults != nil && *s.MaxResults < 10 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 10)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Returns an array of CaseDetails objects and a NextToken that defines a point +// for pagination in the result set. +type DescribeCasesOutput struct { + _ struct{} `type:"structure"` + + // The details for the cases that match the request. + Cases []*CaseDetails `locationName:"cases" type:"list"` + + // A resumption point for pagination. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s DescribeCasesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeCasesOutput) GoString() string { + return s.String() +} + +type DescribeCommunicationsInput struct { + _ struct{} `type:"structure"` + + // The start date for a filtered date search on support case communications. + // Case communications are available for 12 months after creation. + AfterTime *string `locationName:"afterTime" type:"string"` + + // The end date for a filtered date search on support case communications. Case + // communications are available for 12 months after creation. + BeforeTime *string `locationName:"beforeTime" type:"string"` + + // The AWS Support case ID requested or returned in the call. The case ID is + // an alphanumeric string formatted as shown in this example: case-12345678910-2013-c4c1d2bf33c5cf47 + CaseId *string `locationName:"caseId" type:"string" required:"true"` + + // The maximum number of results to return before paginating. + MaxResults *int64 `locationName:"maxResults" min:"10" type:"integer"` + + // A resumption point for pagination. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s DescribeCommunicationsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeCommunicationsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeCommunicationsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeCommunicationsInput"} + if s.CaseId == nil { + invalidParams.Add(request.NewErrParamRequired("CaseId")) + } + if s.MaxResults != nil && *s.MaxResults < 10 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 10)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The communications returned by the DescribeCommunications operation. +type DescribeCommunicationsOutput struct { + _ struct{} `type:"structure"` + + // The communications for the case. + Communications []*Communication `locationName:"communications" type:"list"` + + // A resumption point for pagination. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s DescribeCommunicationsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeCommunicationsOutput) GoString() string { + return s.String() +} + +type DescribeServicesInput struct { + _ struct{} `type:"structure"` + + // The ISO 639-1 code for the language in which AWS provides support. AWS Support + // currently supports English ("en") and Japanese ("ja"). Language parameters + // must be passed explicitly for operations that take them. + Language *string `locationName:"language" type:"string"` + + // A JSON-formatted list of service codes available for AWS services. + ServiceCodeList []*string `locationName:"serviceCodeList" type:"list"` +} + +// String returns the string representation +func (s DescribeServicesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeServicesInput) GoString() string { + return s.String() +} + +// The list of AWS services returned by the DescribeServices operation. +type DescribeServicesOutput struct { + _ struct{} `type:"structure"` + + // A JSON-formatted list of AWS services. + Services []*Service `locationName:"services" type:"list"` +} + +// String returns the string representation +func (s DescribeServicesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeServicesOutput) GoString() string { + return s.String() +} + +type DescribeSeverityLevelsInput struct { + _ struct{} `type:"structure"` + + // The ISO 639-1 code for the language in which AWS provides support. AWS Support + // currently supports English ("en") and Japanese ("ja"). Language parameters + // must be passed explicitly for operations that take them. + Language *string `locationName:"language" type:"string"` +} + +// String returns the string representation +func (s DescribeSeverityLevelsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeSeverityLevelsInput) GoString() string { + return s.String() +} + +// The list of severity levels returned by the DescribeSeverityLevels operation. +type DescribeSeverityLevelsOutput struct { + _ struct{} `type:"structure"` + + // The available severity levels for the support case. Available severity levels + // are defined by your service level agreement with AWS. + SeverityLevels []*SeverityLevel `locationName:"severityLevels" type:"list"` +} + +// String returns the string representation +func (s DescribeSeverityLevelsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeSeverityLevelsOutput) GoString() string { + return s.String() +} + +type DescribeTrustedAdvisorCheckRefreshStatusesInput struct { + _ struct{} `type:"structure"` + + // The IDs of the Trusted Advisor checks. + CheckIds []*string `locationName:"checkIds" type:"list" required:"true"` +} + +// String returns the string representation +func (s DescribeTrustedAdvisorCheckRefreshStatusesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeTrustedAdvisorCheckRefreshStatusesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeTrustedAdvisorCheckRefreshStatusesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeTrustedAdvisorCheckRefreshStatusesInput"} + if s.CheckIds == nil { + invalidParams.Add(request.NewErrParamRequired("CheckIds")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The statuses of the Trusted Advisor checks returned by the DescribeTrustedAdvisorCheckRefreshStatuses +// operation. +type DescribeTrustedAdvisorCheckRefreshStatusesOutput struct { + _ struct{} `type:"structure"` + + // The refresh status of the specified Trusted Advisor checks. + Statuses []*TrustedAdvisorCheckRefreshStatus `locationName:"statuses" type:"list" required:"true"` +} + +// String returns the string representation +func (s DescribeTrustedAdvisorCheckRefreshStatusesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeTrustedAdvisorCheckRefreshStatusesOutput) GoString() string { + return s.String() +} + +type DescribeTrustedAdvisorCheckResultInput struct { + _ struct{} `type:"structure"` + + // The unique identifier for the Trusted Advisor check. + CheckId *string `locationName:"checkId" type:"string" required:"true"` + + // The ISO 639-1 code for the language in which AWS provides support. AWS Support + // currently supports English ("en") and Japanese ("ja"). Language parameters + // must be passed explicitly for operations that take them. + Language *string `locationName:"language" type:"string"` +} + +// String returns the string representation +func (s DescribeTrustedAdvisorCheckResultInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeTrustedAdvisorCheckResultInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeTrustedAdvisorCheckResultInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeTrustedAdvisorCheckResultInput"} + if s.CheckId == nil { + invalidParams.Add(request.NewErrParamRequired("CheckId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The result of the Trusted Advisor check returned by the DescribeTrustedAdvisorCheckResult +// operation. +type DescribeTrustedAdvisorCheckResultOutput struct { + _ struct{} `type:"structure"` + + // The detailed results of the Trusted Advisor check. + Result *TrustedAdvisorCheckResult `locationName:"result" type:"structure"` +} + +// String returns the string representation +func (s DescribeTrustedAdvisorCheckResultOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeTrustedAdvisorCheckResultOutput) GoString() string { + return s.String() +} + +type DescribeTrustedAdvisorCheckSummariesInput struct { + _ struct{} `type:"structure"` + + // The IDs of the Trusted Advisor checks. + CheckIds []*string `locationName:"checkIds" type:"list" required:"true"` +} + +// String returns the string representation +func (s DescribeTrustedAdvisorCheckSummariesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeTrustedAdvisorCheckSummariesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeTrustedAdvisorCheckSummariesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeTrustedAdvisorCheckSummariesInput"} + if s.CheckIds == nil { + invalidParams.Add(request.NewErrParamRequired("CheckIds")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The summaries of the Trusted Advisor checks returned by the DescribeTrustedAdvisorCheckSummaries +// operation. +type DescribeTrustedAdvisorCheckSummariesOutput struct { + _ struct{} `type:"structure"` + + // The summary information for the requested Trusted Advisor checks. + Summaries []*TrustedAdvisorCheckSummary `locationName:"summaries" type:"list" required:"true"` +} + +// String returns the string representation +func (s DescribeTrustedAdvisorCheckSummariesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeTrustedAdvisorCheckSummariesOutput) GoString() string { + return s.String() +} + +type DescribeTrustedAdvisorChecksInput struct { + _ struct{} `type:"structure"` + + // The ISO 639-1 code for the language in which AWS provides support. AWS Support + // currently supports English ("en") and Japanese ("ja"). Language parameters + // must be passed explicitly for operations that take them. + Language *string `locationName:"language" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeTrustedAdvisorChecksInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeTrustedAdvisorChecksInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeTrustedAdvisorChecksInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeTrustedAdvisorChecksInput"} + if s.Language == nil { + invalidParams.Add(request.NewErrParamRequired("Language")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Information about the Trusted Advisor checks returned by the DescribeTrustedAdvisorChecks +// operation. +type DescribeTrustedAdvisorChecksOutput struct { + _ struct{} `type:"structure"` + + // Information about all available Trusted Advisor checks. + Checks []*TrustedAdvisorCheckDescription `locationName:"checks" type:"list" required:"true"` +} + +// String returns the string representation +func (s DescribeTrustedAdvisorChecksOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeTrustedAdvisorChecksOutput) GoString() string { + return s.String() +} + +// The five most recent communications associated with the case. +type RecentCaseCommunications struct { + _ struct{} `type:"structure"` + + // The five most recent communications associated with the case. + Communications []*Communication `locationName:"communications" type:"list"` + + // A resumption point for pagination. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s RecentCaseCommunications) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RecentCaseCommunications) GoString() string { + return s.String() +} + +type RefreshTrustedAdvisorCheckInput struct { + _ struct{} `type:"structure"` + + // The unique identifier for the Trusted Advisor check. + CheckId *string `locationName:"checkId" type:"string" required:"true"` +} + +// String returns the string representation +func (s RefreshTrustedAdvisorCheckInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RefreshTrustedAdvisorCheckInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RefreshTrustedAdvisorCheckInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RefreshTrustedAdvisorCheckInput"} + if s.CheckId == nil { + invalidParams.Add(request.NewErrParamRequired("CheckId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The current refresh status of a Trusted Advisor check. +type RefreshTrustedAdvisorCheckOutput struct { + _ struct{} `type:"structure"` + + // The current refresh status for a check, including the amount of time until + // the check is eligible for refresh. + Status *TrustedAdvisorCheckRefreshStatus `locationName:"status" type:"structure" required:"true"` +} + +// String returns the string representation +func (s RefreshTrustedAdvisorCheckOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RefreshTrustedAdvisorCheckOutput) GoString() string { + return s.String() +} + +type ResolveCaseInput struct { + _ struct{} `type:"structure"` + + // The AWS Support case ID requested or returned in the call. The case ID is + // an alphanumeric string formatted as shown in this example: case-12345678910-2013-c4c1d2bf33c5cf47 + CaseId *string `locationName:"caseId" type:"string"` +} + +// String returns the string representation +func (s ResolveCaseInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResolveCaseInput) GoString() string { + return s.String() +} + +// The status of the case returned by the ResolveCase operation. +type ResolveCaseOutput struct { + _ struct{} `type:"structure"` + + // The status of the case after the ResolveCase request was processed. + FinalCaseStatus *string `locationName:"finalCaseStatus" type:"string"` + + // The status of the case when the ResolveCase request was sent. + InitialCaseStatus *string `locationName:"initialCaseStatus" type:"string"` +} + +// String returns the string representation +func (s ResolveCaseOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResolveCaseOutput) GoString() string { + return s.String() +} + +// Information about an AWS service returned by the DescribeServices operation. +type Service struct { + _ struct{} `type:"structure"` + + // A list of categories that describe the type of support issue a case describes. + // Categories consist of a category name and a category code. Category names + // and codes are passed to AWS Support when you call CreateCase. + Categories []*Category `locationName:"categories" type:"list"` + + // The code for an AWS service returned by the DescribeServices response. The + // Name element contains the corresponding friendly name. + Code *string `locationName:"code" type:"string"` + + // The friendly name for an AWS service. The Code element contains the corresponding + // code. + Name *string `locationName:"name" type:"string"` +} + +// String returns the string representation +func (s Service) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Service) GoString() string { + return s.String() +} + +// A code and name pair that represent a severity level that can be applied +// to a support case. +type SeverityLevel struct { + _ struct{} `type:"structure"` + + // One of four values: "low," "medium," "high," and "urgent". These values correspond + // to response times returned to the caller in SeverityLevel.name. + Code *string `locationName:"code" type:"string"` + + // The name of the severity level that corresponds to the severity level code. + Name *string `locationName:"name" type:"string"` +} + +// String returns the string representation +func (s SeverityLevel) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SeverityLevel) GoString() string { + return s.String() +} + +// The container for summary information that relates to the category of the +// Trusted Advisor check. +type TrustedAdvisorCategorySpecificSummary struct { + _ struct{} `type:"structure"` + + // The summary information about cost savings for a Trusted Advisor check that + // is in the Cost Optimizing category. + CostOptimizing *TrustedAdvisorCostOptimizingSummary `locationName:"costOptimizing" type:"structure"` +} + +// String returns the string representation +func (s TrustedAdvisorCategorySpecificSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TrustedAdvisorCategorySpecificSummary) GoString() string { + return s.String() +} + +// The description and metadata for a Trusted Advisor check. +type TrustedAdvisorCheckDescription struct { + _ struct{} `type:"structure"` + + // The category of the Trusted Advisor check. + Category *string `locationName:"category" type:"string" required:"true"` + + // The description of the Trusted Advisor check, which includes the alert criteria + // and recommended actions (contains HTML markup). + Description *string `locationName:"description" type:"string" required:"true"` + + // The unique identifier for the Trusted Advisor check. + Id *string `locationName:"id" type:"string" required:"true"` + + // The column headings for the data returned by the Trusted Advisor check. The + // order of the headings corresponds to the order of the data in the Metadata + // element of the TrustedAdvisorResourceDetail for the check. Metadata contains + // all the data that is shown in the Excel download, even in those cases where + // the UI shows just summary data. + Metadata []*string `locationName:"metadata" type:"list" required:"true"` + + // The display name for the Trusted Advisor check. + Name *string `locationName:"name" type:"string" required:"true"` +} + +// String returns the string representation +func (s TrustedAdvisorCheckDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TrustedAdvisorCheckDescription) GoString() string { + return s.String() +} + +// The refresh status of a Trusted Advisor check. +type TrustedAdvisorCheckRefreshStatus struct { + _ struct{} `type:"structure"` + + // The unique identifier for the Trusted Advisor check. + CheckId *string `locationName:"checkId" type:"string" required:"true"` + + // The amount of time, in milliseconds, until the Trusted Advisor check is eligible + // for refresh. + MillisUntilNextRefreshable *int64 `locationName:"millisUntilNextRefreshable" type:"long" required:"true"` + + // The status of the Trusted Advisor check for which a refresh has been requested: + // "none", "enqueued", "processing", "success", or "abandoned". + Status *string `locationName:"status" type:"string" required:"true"` +} + +// String returns the string representation +func (s TrustedAdvisorCheckRefreshStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TrustedAdvisorCheckRefreshStatus) GoString() string { + return s.String() +} + +// The results of a Trusted Advisor check returned by DescribeTrustedAdvisorCheckResult. +type TrustedAdvisorCheckResult struct { + _ struct{} `type:"structure"` + + // Summary information that relates to the category of the check. Cost Optimizing + // is the only category that is currently supported. + CategorySpecificSummary *TrustedAdvisorCategorySpecificSummary `locationName:"categorySpecificSummary" type:"structure" required:"true"` + + // The unique identifier for the Trusted Advisor check. + CheckId *string `locationName:"checkId" type:"string" required:"true"` + + // The details about each resource listed in the check result. + FlaggedResources []*TrustedAdvisorResourceDetail `locationName:"flaggedResources" type:"list" required:"true"` + + // Details about AWS resources that were analyzed in a call to Trusted Advisor + // DescribeTrustedAdvisorCheckSummaries. + ResourcesSummary *TrustedAdvisorResourcesSummary `locationName:"resourcesSummary" type:"structure" required:"true"` + + // The alert status of the check: "ok" (green), "warning" (yellow), "error" + // (red), or "not_available". + Status *string `locationName:"status" type:"string" required:"true"` + + // The time of the last refresh of the check. + Timestamp *string `locationName:"timestamp" type:"string" required:"true"` +} + +// String returns the string representation +func (s TrustedAdvisorCheckResult) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TrustedAdvisorCheckResult) GoString() string { + return s.String() +} + +// A summary of a Trusted Advisor check result, including the alert status, +// last refresh, and number of resources examined. +type TrustedAdvisorCheckSummary struct { + _ struct{} `type:"structure"` + + // Summary information that relates to the category of the check. Cost Optimizing + // is the only category that is currently supported. + CategorySpecificSummary *TrustedAdvisorCategorySpecificSummary `locationName:"categorySpecificSummary" type:"structure" required:"true"` + + // The unique identifier for the Trusted Advisor check. + CheckId *string `locationName:"checkId" type:"string" required:"true"` + + // Specifies whether the Trusted Advisor check has flagged resources. + HasFlaggedResources *bool `locationName:"hasFlaggedResources" type:"boolean"` + + // Details about AWS resources that were analyzed in a call to Trusted Advisor + // DescribeTrustedAdvisorCheckSummaries. + ResourcesSummary *TrustedAdvisorResourcesSummary `locationName:"resourcesSummary" type:"structure" required:"true"` + + // The alert status of the check: "ok" (green), "warning" (yellow), "error" + // (red), or "not_available". + Status *string `locationName:"status" type:"string" required:"true"` + + // The time of the last refresh of the check. + Timestamp *string `locationName:"timestamp" type:"string" required:"true"` +} + +// String returns the string representation +func (s TrustedAdvisorCheckSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TrustedAdvisorCheckSummary) GoString() string { + return s.String() +} + +// The estimated cost savings that might be realized if the recommended actions +// are taken. +type TrustedAdvisorCostOptimizingSummary struct { + _ struct{} `type:"structure"` + + // The estimated monthly savings that might be realized if the recommended actions + // are taken. + EstimatedMonthlySavings *float64 `locationName:"estimatedMonthlySavings" type:"double" required:"true"` + + // The estimated percentage of savings that might be realized if the recommended + // actions are taken. + EstimatedPercentMonthlySavings *float64 `locationName:"estimatedPercentMonthlySavings" type:"double" required:"true"` +} + +// String returns the string representation +func (s TrustedAdvisorCostOptimizingSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TrustedAdvisorCostOptimizingSummary) GoString() string { + return s.String() +} + +// Contains information about a resource identified by a Trusted Advisor check. +type TrustedAdvisorResourceDetail struct { + _ struct{} `type:"structure"` + + // Specifies whether the AWS resource was ignored by Trusted Advisor because + // it was marked as suppressed by the user. + IsSuppressed *bool `locationName:"isSuppressed" type:"boolean"` + + // Additional information about the identified resource. The exact metadata + // and its order can be obtained by inspecting the TrustedAdvisorCheckDescription + // object returned by the call to DescribeTrustedAdvisorChecks. Metadata contains + // all the data that is shown in the Excel download, even in those cases where + // the UI shows just summary data. + Metadata []*string `locationName:"metadata" type:"list" required:"true"` + + // The AWS region in which the identified resource is located. + Region *string `locationName:"region" type:"string" required:"true"` + + // The unique identifier for the identified resource. + ResourceId *string `locationName:"resourceId" type:"string" required:"true"` + + // The status code for the resource identified in the Trusted Advisor check. + Status *string `locationName:"status" type:"string" required:"true"` +} + +// String returns the string representation +func (s TrustedAdvisorResourceDetail) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TrustedAdvisorResourceDetail) GoString() string { + return s.String() +} + +// Details about AWS resources that were analyzed in a call to Trusted Advisor +// DescribeTrustedAdvisorCheckSummaries. +type TrustedAdvisorResourcesSummary struct { + _ struct{} `type:"structure"` + + // The number of AWS resources that were flagged (listed) by the Trusted Advisor + // check. + ResourcesFlagged *int64 `locationName:"resourcesFlagged" type:"long" required:"true"` + + // The number of AWS resources ignored by Trusted Advisor because information + // was unavailable. + ResourcesIgnored *int64 `locationName:"resourcesIgnored" type:"long" required:"true"` + + // The number of AWS resources that were analyzed by the Trusted Advisor check. + ResourcesProcessed *int64 `locationName:"resourcesProcessed" type:"long" required:"true"` + + // The number of AWS resources ignored by Trusted Advisor because they were + // marked as suppressed by the user. + ResourcesSuppressed *int64 `locationName:"resourcesSuppressed" type:"long" required:"true"` +} + +// String returns the string representation +func (s TrustedAdvisorResourcesSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TrustedAdvisorResourcesSummary) GoString() string { + return s.String() +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/support/examples_test.go b/vendor/github.com/aws/aws-sdk-go/service/support/examples_test.go new file mode 100644 index 000000000..b67d41b8b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/support/examples_test.go @@ -0,0 +1,332 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package support_test + +import ( + "bytes" + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/support" +) + +var _ time.Duration +var _ bytes.Buffer + +func ExampleSupport_AddAttachmentsToSet() { + svc := support.New(session.New()) + + params := &support.AddAttachmentsToSetInput{ + Attachments: []*support.Attachment{ // Required + { // Required + Data: []byte("PAYLOAD"), + FileName: aws.String("FileName"), + }, + // More values... + }, + AttachmentSetId: aws.String("AttachmentSetId"), + } + resp, err := svc.AddAttachmentsToSet(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSupport_AddCommunicationToCase() { + svc := support.New(session.New()) + + params := &support.AddCommunicationToCaseInput{ + CommunicationBody: aws.String("CommunicationBody"), // Required + AttachmentSetId: aws.String("AttachmentSetId"), + CaseId: aws.String("CaseId"), + CcEmailAddresses: []*string{ + aws.String("CcEmailAddress"), // Required + // More values... + }, + } + resp, err := svc.AddCommunicationToCase(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSupport_CreateCase() { + svc := support.New(session.New()) + + params := &support.CreateCaseInput{ + CommunicationBody: aws.String("CommunicationBody"), // Required + Subject: aws.String("Subject"), // Required + AttachmentSetId: aws.String("AttachmentSetId"), + CategoryCode: aws.String("CategoryCode"), + CcEmailAddresses: []*string{ + aws.String("CcEmailAddress"), // Required + // More values... + }, + IssueType: aws.String("IssueType"), + Language: aws.String("Language"), + ServiceCode: aws.String("ServiceCode"), + SeverityCode: aws.String("SeverityCode"), + } + resp, err := svc.CreateCase(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSupport_DescribeAttachment() { + svc := support.New(session.New()) + + params := &support.DescribeAttachmentInput{ + AttachmentId: aws.String("AttachmentId"), // Required + } + resp, err := svc.DescribeAttachment(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSupport_DescribeCases() { + svc := support.New(session.New()) + + params := &support.DescribeCasesInput{ + AfterTime: aws.String("AfterTime"), + BeforeTime: aws.String("BeforeTime"), + CaseIdList: []*string{ + aws.String("CaseId"), // Required + // More values... + }, + DisplayId: aws.String("DisplayId"), + IncludeCommunications: aws.Bool(true), + IncludeResolvedCases: aws.Bool(true), + Language: aws.String("Language"), + MaxResults: aws.Int64(1), + NextToken: aws.String("NextToken"), + } + resp, err := svc.DescribeCases(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSupport_DescribeCommunications() { + svc := support.New(session.New()) + + params := &support.DescribeCommunicationsInput{ + CaseId: aws.String("CaseId"), // Required + AfterTime: aws.String("AfterTime"), + BeforeTime: aws.String("BeforeTime"), + MaxResults: aws.Int64(1), + NextToken: aws.String("NextToken"), + } + resp, err := svc.DescribeCommunications(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSupport_DescribeServices() { + svc := support.New(session.New()) + + params := &support.DescribeServicesInput{ + Language: aws.String("Language"), + ServiceCodeList: []*string{ + aws.String("ServiceCode"), // Required + // More values... + }, + } + resp, err := svc.DescribeServices(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSupport_DescribeSeverityLevels() { + svc := support.New(session.New()) + + params := &support.DescribeSeverityLevelsInput{ + Language: aws.String("Language"), + } + resp, err := svc.DescribeSeverityLevels(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSupport_DescribeTrustedAdvisorCheckRefreshStatuses() { + svc := support.New(session.New()) + + params := &support.DescribeTrustedAdvisorCheckRefreshStatusesInput{ + CheckIds: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DescribeTrustedAdvisorCheckRefreshStatuses(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSupport_DescribeTrustedAdvisorCheckResult() { + svc := support.New(session.New()) + + params := &support.DescribeTrustedAdvisorCheckResultInput{ + CheckId: aws.String("String"), // Required + Language: aws.String("String"), + } + resp, err := svc.DescribeTrustedAdvisorCheckResult(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSupport_DescribeTrustedAdvisorCheckSummaries() { + svc := support.New(session.New()) + + params := &support.DescribeTrustedAdvisorCheckSummariesInput{ + CheckIds: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DescribeTrustedAdvisorCheckSummaries(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSupport_DescribeTrustedAdvisorChecks() { + svc := support.New(session.New()) + + params := &support.DescribeTrustedAdvisorChecksInput{ + Language: aws.String("String"), // Required + } + resp, err := svc.DescribeTrustedAdvisorChecks(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSupport_RefreshTrustedAdvisorCheck() { + svc := support.New(session.New()) + + params := &support.RefreshTrustedAdvisorCheckInput{ + CheckId: aws.String("String"), // Required + } + resp, err := svc.RefreshTrustedAdvisorCheck(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSupport_ResolveCase() { + svc := support.New(session.New()) + + params := &support.ResolveCaseInput{ + CaseId: aws.String("CaseId"), + } + resp, err := svc.ResolveCase(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/support/service.go b/vendor/github.com/aws/aws-sdk-go/service/support/service.go new file mode 100644 index 000000000..7b7eb0f06 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/support/service.go @@ -0,0 +1,122 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package support + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" +) + +// The AWS Support API reference is intended for programmers who need detailed +// information about the AWS Support operations and data types. This service +// enables you to manage your AWS Support cases programmatically. It uses HTTP +// methods that return results in JSON format. +// +// The AWS Support service also exposes a set of Trusted Advisor (https://aws.amazon.com/premiumsupport/trustedadvisor/) +// features. You can retrieve a list of checks and their descriptions, get check +// results, specify checks to refresh, and get the refresh status of checks. +// +// The following list describes the AWS Support case management operations: +// +// Service names, issue categories, and available severity levels. The DescribeServices +// and DescribeSeverityLevels operations return AWS service names, service codes, +// service categories, and problem severity levels. You use these values when +// you call the CreateCase operation. Case creation, case details, and case +// resolution. The CreateCase, DescribeCases, DescribeAttachment, and ResolveCase +// operations create AWS Support cases, retrieve information about cases, and +// resolve cases. Case communication. The DescribeCommunications, AddCommunicationToCase, +// and AddAttachmentsToSet operations retrieve and add communications and attachments +// to AWS Support cases. The following list describes the operations available +// from the AWS Support service for Trusted Advisor: +// +// DescribeTrustedAdvisorChecks returns the list of checks that run against +// your AWS resources. Using the CheckId for a specific check returned by DescribeTrustedAdvisorChecks, +// you can call DescribeTrustedAdvisorCheckResult to obtain the results for +// the check you specified. DescribeTrustedAdvisorCheckSummaries returns summarized +// results for one or more Trusted Advisor checks. RefreshTrustedAdvisorCheck +// requests that Trusted Advisor rerun a specified check. DescribeTrustedAdvisorCheckRefreshStatuses +// reports the refresh status of one or more checks. For authentication of +// requests, AWS Support uses Signature Version 4 Signing Process (http://docs.aws.amazon.com/general/latest/gr/signature-version-4.html). +// +// See About the AWS Support API (http://docs.aws.amazon.com/awssupport/latest/user/Welcome.html) +// in the AWS Support User Guide for information about how to use this service +// to create and manage your support cases, and how to call Trusted Advisor +// for results of checks on your resources. +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type Support struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "support" + +// New creates a new instance of the Support client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a Support client from just a session. +// svc := support.New(mySession) +// +// // Create a Support client with additional configuration +// svc := support.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *Support { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *Support { + svc := &Support{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2013-04-15", + JSONVersion: "1.1", + TargetPrefix: "AWSSupport_20130415", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(jsonrpc.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a Support operation and runs any +// custom request initialization. +func (c *Support) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/support/supportiface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/support/supportiface/interface.go new file mode 100644 index 000000000..21c3b19c1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/support/supportiface/interface.go @@ -0,0 +1,74 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package supportiface provides an interface for the AWS Support. +package supportiface + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/support" +) + +// SupportAPI is the interface type for support.Support. +type SupportAPI interface { + AddAttachmentsToSetRequest(*support.AddAttachmentsToSetInput) (*request.Request, *support.AddAttachmentsToSetOutput) + + AddAttachmentsToSet(*support.AddAttachmentsToSetInput) (*support.AddAttachmentsToSetOutput, error) + + AddCommunicationToCaseRequest(*support.AddCommunicationToCaseInput) (*request.Request, *support.AddCommunicationToCaseOutput) + + AddCommunicationToCase(*support.AddCommunicationToCaseInput) (*support.AddCommunicationToCaseOutput, error) + + CreateCaseRequest(*support.CreateCaseInput) (*request.Request, *support.CreateCaseOutput) + + CreateCase(*support.CreateCaseInput) (*support.CreateCaseOutput, error) + + DescribeAttachmentRequest(*support.DescribeAttachmentInput) (*request.Request, *support.DescribeAttachmentOutput) + + DescribeAttachment(*support.DescribeAttachmentInput) (*support.DescribeAttachmentOutput, error) + + DescribeCasesRequest(*support.DescribeCasesInput) (*request.Request, *support.DescribeCasesOutput) + + DescribeCases(*support.DescribeCasesInput) (*support.DescribeCasesOutput, error) + + DescribeCasesPages(*support.DescribeCasesInput, func(*support.DescribeCasesOutput, bool) bool) error + + DescribeCommunicationsRequest(*support.DescribeCommunicationsInput) (*request.Request, *support.DescribeCommunicationsOutput) + + DescribeCommunications(*support.DescribeCommunicationsInput) (*support.DescribeCommunicationsOutput, error) + + DescribeCommunicationsPages(*support.DescribeCommunicationsInput, func(*support.DescribeCommunicationsOutput, bool) bool) error + + DescribeServicesRequest(*support.DescribeServicesInput) (*request.Request, *support.DescribeServicesOutput) + + DescribeServices(*support.DescribeServicesInput) (*support.DescribeServicesOutput, error) + + DescribeSeverityLevelsRequest(*support.DescribeSeverityLevelsInput) (*request.Request, *support.DescribeSeverityLevelsOutput) + + DescribeSeverityLevels(*support.DescribeSeverityLevelsInput) (*support.DescribeSeverityLevelsOutput, error) + + DescribeTrustedAdvisorCheckRefreshStatusesRequest(*support.DescribeTrustedAdvisorCheckRefreshStatusesInput) (*request.Request, *support.DescribeTrustedAdvisorCheckRefreshStatusesOutput) + + DescribeTrustedAdvisorCheckRefreshStatuses(*support.DescribeTrustedAdvisorCheckRefreshStatusesInput) (*support.DescribeTrustedAdvisorCheckRefreshStatusesOutput, error) + + DescribeTrustedAdvisorCheckResultRequest(*support.DescribeTrustedAdvisorCheckResultInput) (*request.Request, *support.DescribeTrustedAdvisorCheckResultOutput) + + DescribeTrustedAdvisorCheckResult(*support.DescribeTrustedAdvisorCheckResultInput) (*support.DescribeTrustedAdvisorCheckResultOutput, error) + + DescribeTrustedAdvisorCheckSummariesRequest(*support.DescribeTrustedAdvisorCheckSummariesInput) (*request.Request, *support.DescribeTrustedAdvisorCheckSummariesOutput) + + DescribeTrustedAdvisorCheckSummaries(*support.DescribeTrustedAdvisorCheckSummariesInput) (*support.DescribeTrustedAdvisorCheckSummariesOutput, error) + + DescribeTrustedAdvisorChecksRequest(*support.DescribeTrustedAdvisorChecksInput) (*request.Request, *support.DescribeTrustedAdvisorChecksOutput) + + DescribeTrustedAdvisorChecks(*support.DescribeTrustedAdvisorChecksInput) (*support.DescribeTrustedAdvisorChecksOutput, error) + + RefreshTrustedAdvisorCheckRequest(*support.RefreshTrustedAdvisorCheckInput) (*request.Request, *support.RefreshTrustedAdvisorCheckOutput) + + RefreshTrustedAdvisorCheck(*support.RefreshTrustedAdvisorCheckInput) (*support.RefreshTrustedAdvisorCheckOutput, error) + + ResolveCaseRequest(*support.ResolveCaseInput) (*request.Request, *support.ResolveCaseOutput) + + ResolveCase(*support.ResolveCaseInput) (*support.ResolveCaseOutput, error) +} + +var _ SupportAPI = (*support.Support)(nil) diff --git a/vendor/github.com/aws/aws-sdk-go/service/swf/api.go b/vendor/github.com/aws/aws-sdk-go/service/swf/api.go new file mode 100644 index 000000000..91b34bea3 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/swf/api.go @@ -0,0 +1,9517 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package swf provides a client for Amazon Simple Workflow Service. +package swf + +import ( + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" +) + +const opCountClosedWorkflowExecutions = "CountClosedWorkflowExecutions" + +// CountClosedWorkflowExecutionsRequest generates a "aws/request.Request" representing the +// client's request for the CountClosedWorkflowExecutions operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CountClosedWorkflowExecutions method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CountClosedWorkflowExecutionsRequest method. +// req, resp := client.CountClosedWorkflowExecutionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *SWF) CountClosedWorkflowExecutionsRequest(input *CountClosedWorkflowExecutionsInput) (req *request.Request, output *WorkflowExecutionCount) { + op := &request.Operation{ + Name: opCountClosedWorkflowExecutions, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CountClosedWorkflowExecutionsInput{} + } + + req = c.newRequest(op, input, output) + output = &WorkflowExecutionCount{} + req.Data = output + return +} + +// Returns the number of closed workflow executions within the given domain +// that meet the specified filtering criteria. +// +// This operation is eventually consistent. The results are best effort and +// may not exactly reflect recent updates and changes. Access Control +// +// You can use IAM policies to control this action's access to Amazon SWF resources +// as follows: +// +// Use a Resource element with the domain name to limit the action to only +// specified domains. Use an Action element to allow or deny permission to call +// this action. Constrain the following parameters by using a Condition element +// with the appropriate keys. tagFilter.tag: String constraint. The key is +// swf:tagFilter.tag. typeFilter.name: String constraint. The key is swf:typeFilter.name. +// typeFilter.version: String constraint. The key is swf:typeFilter.version. +// If the caller does not have sufficient permissions to invoke the action, +// or the parameter values fall outside the specified constraints, the action +// fails. The associated event attribute's cause parameter will be set to OPERATION_NOT_PERMITTED. +// For details and example IAM policies, see Using IAM to Manage Access to Amazon +// SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html). +func (c *SWF) CountClosedWorkflowExecutions(input *CountClosedWorkflowExecutionsInput) (*WorkflowExecutionCount, error) { + req, out := c.CountClosedWorkflowExecutionsRequest(input) + err := req.Send() + return out, err +} + +const opCountOpenWorkflowExecutions = "CountOpenWorkflowExecutions" + +// CountOpenWorkflowExecutionsRequest generates a "aws/request.Request" representing the +// client's request for the CountOpenWorkflowExecutions operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CountOpenWorkflowExecutions method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CountOpenWorkflowExecutionsRequest method. +// req, resp := client.CountOpenWorkflowExecutionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *SWF) CountOpenWorkflowExecutionsRequest(input *CountOpenWorkflowExecutionsInput) (req *request.Request, output *WorkflowExecutionCount) { + op := &request.Operation{ + Name: opCountOpenWorkflowExecutions, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CountOpenWorkflowExecutionsInput{} + } + + req = c.newRequest(op, input, output) + output = &WorkflowExecutionCount{} + req.Data = output + return +} + +// Returns the number of open workflow executions within the given domain that +// meet the specified filtering criteria. +// +// This operation is eventually consistent. The results are best effort and +// may not exactly reflect recent updates and changes. Access Control +// +// You can use IAM policies to control this action's access to Amazon SWF resources +// as follows: +// +// Use a Resource element with the domain name to limit the action to only +// specified domains. Use an Action element to allow or deny permission to call +// this action. Constrain the following parameters by using a Condition element +// with the appropriate keys. tagFilter.tag: String constraint. The key is +// swf:tagFilter.tag. typeFilter.name: String constraint. The key is swf:typeFilter.name. +// typeFilter.version: String constraint. The key is swf:typeFilter.version. +// If the caller does not have sufficient permissions to invoke the action, +// or the parameter values fall outside the specified constraints, the action +// fails. The associated event attribute's cause parameter will be set to OPERATION_NOT_PERMITTED. +// For details and example IAM policies, see Using IAM to Manage Access to Amazon +// SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html). +func (c *SWF) CountOpenWorkflowExecutions(input *CountOpenWorkflowExecutionsInput) (*WorkflowExecutionCount, error) { + req, out := c.CountOpenWorkflowExecutionsRequest(input) + err := req.Send() + return out, err +} + +const opCountPendingActivityTasks = "CountPendingActivityTasks" + +// CountPendingActivityTasksRequest generates a "aws/request.Request" representing the +// client's request for the CountPendingActivityTasks operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CountPendingActivityTasks method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CountPendingActivityTasksRequest method. +// req, resp := client.CountPendingActivityTasksRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *SWF) CountPendingActivityTasksRequest(input *CountPendingActivityTasksInput) (req *request.Request, output *PendingTaskCount) { + op := &request.Operation{ + Name: opCountPendingActivityTasks, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CountPendingActivityTasksInput{} + } + + req = c.newRequest(op, input, output) + output = &PendingTaskCount{} + req.Data = output + return +} + +// Returns the estimated number of activity tasks in the specified task list. +// The count returned is an approximation and is not guaranteed to be exact. +// If you specify a task list that no activity task was ever scheduled in then +// 0 will be returned. +// +// Access Control +// +// You can use IAM policies to control this action's access to Amazon SWF resources +// as follows: +// +// Use a Resource element with the domain name to limit the action to only +// specified domains. Use an Action element to allow or deny permission to call +// this action. Constrain the taskList.name parameter by using a Condition element +// with the swf:taskList.name key to allow the action to access only certain +// task lists. If the caller does not have sufficient permissions to invoke +// the action, or the parameter values fall outside the specified constraints, +// the action fails. The associated event attribute's cause parameter will be +// set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see +// Using IAM to Manage Access to Amazon SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html). +func (c *SWF) CountPendingActivityTasks(input *CountPendingActivityTasksInput) (*PendingTaskCount, error) { + req, out := c.CountPendingActivityTasksRequest(input) + err := req.Send() + return out, err +} + +const opCountPendingDecisionTasks = "CountPendingDecisionTasks" + +// CountPendingDecisionTasksRequest generates a "aws/request.Request" representing the +// client's request for the CountPendingDecisionTasks operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CountPendingDecisionTasks method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CountPendingDecisionTasksRequest method. +// req, resp := client.CountPendingDecisionTasksRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *SWF) CountPendingDecisionTasksRequest(input *CountPendingDecisionTasksInput) (req *request.Request, output *PendingTaskCount) { + op := &request.Operation{ + Name: opCountPendingDecisionTasks, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CountPendingDecisionTasksInput{} + } + + req = c.newRequest(op, input, output) + output = &PendingTaskCount{} + req.Data = output + return +} + +// Returns the estimated number of decision tasks in the specified task list. +// The count returned is an approximation and is not guaranteed to be exact. +// If you specify a task list that no decision task was ever scheduled in then +// 0 will be returned. +// +// Access Control +// +// You can use IAM policies to control this action's access to Amazon SWF resources +// as follows: +// +// Use a Resource element with the domain name to limit the action to only +// specified domains. Use an Action element to allow or deny permission to call +// this action. Constrain the taskList.name parameter by using a Condition element +// with the swf:taskList.name key to allow the action to access only certain +// task lists. If the caller does not have sufficient permissions to invoke +// the action, or the parameter values fall outside the specified constraints, +// the action fails. The associated event attribute's cause parameter will be +// set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see +// Using IAM to Manage Access to Amazon SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html). +func (c *SWF) CountPendingDecisionTasks(input *CountPendingDecisionTasksInput) (*PendingTaskCount, error) { + req, out := c.CountPendingDecisionTasksRequest(input) + err := req.Send() + return out, err +} + +const opDeprecateActivityType = "DeprecateActivityType" + +// DeprecateActivityTypeRequest generates a "aws/request.Request" representing the +// client's request for the DeprecateActivityType operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeprecateActivityType method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeprecateActivityTypeRequest method. +// req, resp := client.DeprecateActivityTypeRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *SWF) DeprecateActivityTypeRequest(input *DeprecateActivityTypeInput) (req *request.Request, output *DeprecateActivityTypeOutput) { + op := &request.Operation{ + Name: opDeprecateActivityType, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeprecateActivityTypeInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeprecateActivityTypeOutput{} + req.Data = output + return +} + +// Deprecates the specified activity type. After an activity type has been deprecated, +// you cannot create new tasks of that activity type. Tasks of this type that +// were scheduled before the type was deprecated will continue to run. +// +// This operation is eventually consistent. The results are best effort and +// may not exactly reflect recent updates and changes. Access Control +// +// You can use IAM policies to control this action's access to Amazon SWF resources +// as follows: +// +// Use a Resource element with the domain name to limit the action to only +// specified domains. Use an Action element to allow or deny permission to call +// this action. Constrain the following parameters by using a Condition element +// with the appropriate keys. activityType.name: String constraint. The key +// is swf:activityType.name. activityType.version: String constraint. The key +// is swf:activityType.version. If the caller does not have sufficient permissions +// to invoke the action, or the parameter values fall outside the specified +// constraints, the action fails. The associated event attribute's cause parameter +// will be set to OPERATION_NOT_PERMITTED. For details and example IAM policies, +// see Using IAM to Manage Access to Amazon SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html). +func (c *SWF) DeprecateActivityType(input *DeprecateActivityTypeInput) (*DeprecateActivityTypeOutput, error) { + req, out := c.DeprecateActivityTypeRequest(input) + err := req.Send() + return out, err +} + +const opDeprecateDomain = "DeprecateDomain" + +// DeprecateDomainRequest generates a "aws/request.Request" representing the +// client's request for the DeprecateDomain operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeprecateDomain method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeprecateDomainRequest method. +// req, resp := client.DeprecateDomainRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *SWF) DeprecateDomainRequest(input *DeprecateDomainInput) (req *request.Request, output *DeprecateDomainOutput) { + op := &request.Operation{ + Name: opDeprecateDomain, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeprecateDomainInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeprecateDomainOutput{} + req.Data = output + return +} + +// Deprecates the specified domain. After a domain has been deprecated it cannot +// be used to create new workflow executions or register new types. However, +// you can still use visibility actions on this domain. Deprecating a domain +// also deprecates all activity and workflow types registered in the domain. +// Executions that were started before the domain was deprecated will continue +// to run. +// +// This operation is eventually consistent. The results are best effort and +// may not exactly reflect recent updates and changes. Access Control +// +// You can use IAM policies to control this action's access to Amazon SWF resources +// as follows: +// +// Use a Resource element with the domain name to limit the action to only +// specified domains. Use an Action element to allow or deny permission to call +// this action. You cannot use an IAM policy to constrain this action's parameters. +// If the caller does not have sufficient permissions to invoke the action, +// or the parameter values fall outside the specified constraints, the action +// fails. The associated event attribute's cause parameter will be set to OPERATION_NOT_PERMITTED. +// For details and example IAM policies, see Using IAM to Manage Access to Amazon +// SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html). +func (c *SWF) DeprecateDomain(input *DeprecateDomainInput) (*DeprecateDomainOutput, error) { + req, out := c.DeprecateDomainRequest(input) + err := req.Send() + return out, err +} + +const opDeprecateWorkflowType = "DeprecateWorkflowType" + +// DeprecateWorkflowTypeRequest generates a "aws/request.Request" representing the +// client's request for the DeprecateWorkflowType operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeprecateWorkflowType method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeprecateWorkflowTypeRequest method. +// req, resp := client.DeprecateWorkflowTypeRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *SWF) DeprecateWorkflowTypeRequest(input *DeprecateWorkflowTypeInput) (req *request.Request, output *DeprecateWorkflowTypeOutput) { + op := &request.Operation{ + Name: opDeprecateWorkflowType, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeprecateWorkflowTypeInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeprecateWorkflowTypeOutput{} + req.Data = output + return +} + +// Deprecates the specified workflow type. After a workflow type has been deprecated, +// you cannot create new executions of that type. Executions that were started +// before the type was deprecated will continue to run. A deprecated workflow +// type may still be used when calling visibility actions. +// +// This operation is eventually consistent. The results are best effort and +// may not exactly reflect recent updates and changes. Access Control +// +// You can use IAM policies to control this action's access to Amazon SWF resources +// as follows: +// +// Use a Resource element with the domain name to limit the action to only +// specified domains. Use an Action element to allow or deny permission to call +// this action. Constrain the following parameters by using a Condition element +// with the appropriate keys. workflowType.name: String constraint. The key +// is swf:workflowType.name. workflowType.version: String constraint. The key +// is swf:workflowType.version. If the caller does not have sufficient permissions +// to invoke the action, or the parameter values fall outside the specified +// constraints, the action fails. The associated event attribute's cause parameter +// will be set to OPERATION_NOT_PERMITTED. For details and example IAM policies, +// see Using IAM to Manage Access to Amazon SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html). +func (c *SWF) DeprecateWorkflowType(input *DeprecateWorkflowTypeInput) (*DeprecateWorkflowTypeOutput, error) { + req, out := c.DeprecateWorkflowTypeRequest(input) + err := req.Send() + return out, err +} + +const opDescribeActivityType = "DescribeActivityType" + +// DescribeActivityTypeRequest generates a "aws/request.Request" representing the +// client's request for the DescribeActivityType operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeActivityType method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeActivityTypeRequest method. +// req, resp := client.DescribeActivityTypeRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *SWF) DescribeActivityTypeRequest(input *DescribeActivityTypeInput) (req *request.Request, output *DescribeActivityTypeOutput) { + op := &request.Operation{ + Name: opDescribeActivityType, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeActivityTypeInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeActivityTypeOutput{} + req.Data = output + return +} + +// Returns information about the specified activity type. This includes configuration +// settings provided when the type was registered and other general information +// about the type. +// +// Access Control +// +// You can use IAM policies to control this action's access to Amazon SWF resources +// as follows: +// +// Use a Resource element with the domain name to limit the action to only +// specified domains. Use an Action element to allow or deny permission to call +// this action. Constrain the following parameters by using a Condition element +// with the appropriate keys. activityType.name: String constraint. The key +// is swf:activityType.name. activityType.version: String constraint. The key +// is swf:activityType.version. If the caller does not have sufficient permissions +// to invoke the action, or the parameter values fall outside the specified +// constraints, the action fails. The associated event attribute's cause parameter +// will be set to OPERATION_NOT_PERMITTED. For details and example IAM policies, +// see Using IAM to Manage Access to Amazon SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html). +func (c *SWF) DescribeActivityType(input *DescribeActivityTypeInput) (*DescribeActivityTypeOutput, error) { + req, out := c.DescribeActivityTypeRequest(input) + err := req.Send() + return out, err +} + +const opDescribeDomain = "DescribeDomain" + +// DescribeDomainRequest generates a "aws/request.Request" representing the +// client's request for the DescribeDomain operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeDomain method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeDomainRequest method. +// req, resp := client.DescribeDomainRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *SWF) DescribeDomainRequest(input *DescribeDomainInput) (req *request.Request, output *DescribeDomainOutput) { + op := &request.Operation{ + Name: opDescribeDomain, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeDomainInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeDomainOutput{} + req.Data = output + return +} + +// Returns information about the specified domain, including description and +// status. +// +// Access Control +// +// You can use IAM policies to control this action's access to Amazon SWF resources +// as follows: +// +// Use a Resource element with the domain name to limit the action to only +// specified domains. Use an Action element to allow or deny permission to call +// this action. You cannot use an IAM policy to constrain this action's parameters. +// If the caller does not have sufficient permissions to invoke the action, +// or the parameter values fall outside the specified constraints, the action +// fails. The associated event attribute's cause parameter will be set to OPERATION_NOT_PERMITTED. +// For details and example IAM policies, see Using IAM to Manage Access to Amazon +// SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html). +func (c *SWF) DescribeDomain(input *DescribeDomainInput) (*DescribeDomainOutput, error) { + req, out := c.DescribeDomainRequest(input) + err := req.Send() + return out, err +} + +const opDescribeWorkflowExecution = "DescribeWorkflowExecution" + +// DescribeWorkflowExecutionRequest generates a "aws/request.Request" representing the +// client's request for the DescribeWorkflowExecution operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeWorkflowExecution method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeWorkflowExecutionRequest method. +// req, resp := client.DescribeWorkflowExecutionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *SWF) DescribeWorkflowExecutionRequest(input *DescribeWorkflowExecutionInput) (req *request.Request, output *DescribeWorkflowExecutionOutput) { + op := &request.Operation{ + Name: opDescribeWorkflowExecution, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeWorkflowExecutionInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeWorkflowExecutionOutput{} + req.Data = output + return +} + +// Returns information about the specified workflow execution including its +// type and some statistics. +// +// This operation is eventually consistent. The results are best effort and +// may not exactly reflect recent updates and changes. Access Control +// +// You can use IAM policies to control this action's access to Amazon SWF resources +// as follows: +// +// Use a Resource element with the domain name to limit the action to only +// specified domains. Use an Action element to allow or deny permission to call +// this action. You cannot use an IAM policy to constrain this action's parameters. +// If the caller does not have sufficient permissions to invoke the action, +// or the parameter values fall outside the specified constraints, the action +// fails. The associated event attribute's cause parameter will be set to OPERATION_NOT_PERMITTED. +// For details and example IAM policies, see Using IAM to Manage Access to Amazon +// SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html). +func (c *SWF) DescribeWorkflowExecution(input *DescribeWorkflowExecutionInput) (*DescribeWorkflowExecutionOutput, error) { + req, out := c.DescribeWorkflowExecutionRequest(input) + err := req.Send() + return out, err +} + +const opDescribeWorkflowType = "DescribeWorkflowType" + +// DescribeWorkflowTypeRequest generates a "aws/request.Request" representing the +// client's request for the DescribeWorkflowType operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeWorkflowType method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeWorkflowTypeRequest method. +// req, resp := client.DescribeWorkflowTypeRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *SWF) DescribeWorkflowTypeRequest(input *DescribeWorkflowTypeInput) (req *request.Request, output *DescribeWorkflowTypeOutput) { + op := &request.Operation{ + Name: opDescribeWorkflowType, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeWorkflowTypeInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeWorkflowTypeOutput{} + req.Data = output + return +} + +// Returns information about the specified workflow type. This includes configuration +// settings specified when the type was registered and other information such +// as creation date, current status, and so on. +// +// Access Control +// +// You can use IAM policies to control this action's access to Amazon SWF resources +// as follows: +// +// Use a Resource element with the domain name to limit the action to only +// specified domains. Use an Action element to allow or deny permission to call +// this action. Constrain the following parameters by using a Condition element +// with the appropriate keys. workflowType.name: String constraint. The key +// is swf:workflowType.name. workflowType.version: String constraint. The key +// is swf:workflowType.version. If the caller does not have sufficient permissions +// to invoke the action, or the parameter values fall outside the specified +// constraints, the action fails. The associated event attribute's cause parameter +// will be set to OPERATION_NOT_PERMITTED. For details and example IAM policies, +// see Using IAM to Manage Access to Amazon SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html). +func (c *SWF) DescribeWorkflowType(input *DescribeWorkflowTypeInput) (*DescribeWorkflowTypeOutput, error) { + req, out := c.DescribeWorkflowTypeRequest(input) + err := req.Send() + return out, err +} + +const opGetWorkflowExecutionHistory = "GetWorkflowExecutionHistory" + +// GetWorkflowExecutionHistoryRequest generates a "aws/request.Request" representing the +// client's request for the GetWorkflowExecutionHistory operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetWorkflowExecutionHistory method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetWorkflowExecutionHistoryRequest method. +// req, resp := client.GetWorkflowExecutionHistoryRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *SWF) GetWorkflowExecutionHistoryRequest(input *GetWorkflowExecutionHistoryInput) (req *request.Request, output *GetWorkflowExecutionHistoryOutput) { + op := &request.Operation{ + Name: opGetWorkflowExecutionHistory, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextPageToken"}, + OutputTokens: []string{"nextPageToken"}, + LimitToken: "maximumPageSize", + TruncationToken: "", + }, + } + + if input == nil { + input = &GetWorkflowExecutionHistoryInput{} + } + + req = c.newRequest(op, input, output) + output = &GetWorkflowExecutionHistoryOutput{} + req.Data = output + return +} + +// Returns the history of the specified workflow execution. The results may +// be split into multiple pages. To retrieve subsequent pages, make the call +// again using the nextPageToken returned by the initial call. +// +// This operation is eventually consistent. The results are best effort and +// may not exactly reflect recent updates and changes. Access Control +// +// You can use IAM policies to control this action's access to Amazon SWF resources +// as follows: +// +// Use a Resource element with the domain name to limit the action to only +// specified domains. Use an Action element to allow or deny permission to call +// this action. You cannot use an IAM policy to constrain this action's parameters. +// If the caller does not have sufficient permissions to invoke the action, +// or the parameter values fall outside the specified constraints, the action +// fails. The associated event attribute's cause parameter will be set to OPERATION_NOT_PERMITTED. +// For details and example IAM policies, see Using IAM to Manage Access to Amazon +// SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html). +func (c *SWF) GetWorkflowExecutionHistory(input *GetWorkflowExecutionHistoryInput) (*GetWorkflowExecutionHistoryOutput, error) { + req, out := c.GetWorkflowExecutionHistoryRequest(input) + err := req.Send() + return out, err +} + +// GetWorkflowExecutionHistoryPages iterates over the pages of a GetWorkflowExecutionHistory operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See GetWorkflowExecutionHistory method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a GetWorkflowExecutionHistory operation. +// pageNum := 0 +// err := client.GetWorkflowExecutionHistoryPages(params, +// func(page *GetWorkflowExecutionHistoryOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *SWF) GetWorkflowExecutionHistoryPages(input *GetWorkflowExecutionHistoryInput, fn func(p *GetWorkflowExecutionHistoryOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.GetWorkflowExecutionHistoryRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*GetWorkflowExecutionHistoryOutput), lastPage) + }) +} + +const opListActivityTypes = "ListActivityTypes" + +// ListActivityTypesRequest generates a "aws/request.Request" representing the +// client's request for the ListActivityTypes operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListActivityTypes method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListActivityTypesRequest method. +// req, resp := client.ListActivityTypesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *SWF) ListActivityTypesRequest(input *ListActivityTypesInput) (req *request.Request, output *ListActivityTypesOutput) { + op := &request.Operation{ + Name: opListActivityTypes, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextPageToken"}, + OutputTokens: []string{"nextPageToken"}, + LimitToken: "maximumPageSize", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListActivityTypesInput{} + } + + req = c.newRequest(op, input, output) + output = &ListActivityTypesOutput{} + req.Data = output + return +} + +// Returns information about all activities registered in the specified domain +// that match the specified name and registration status. The result includes +// information like creation date, current status of the activity, etc. The +// results may be split into multiple pages. To retrieve subsequent pages, make +// the call again using the nextPageToken returned by the initial call. +// +// Access Control +// +// You can use IAM policies to control this action's access to Amazon SWF resources +// as follows: +// +// Use a Resource element with the domain name to limit the action to only +// specified domains. Use an Action element to allow or deny permission to call +// this action. You cannot use an IAM policy to constrain this action's parameters. +// If the caller does not have sufficient permissions to invoke the action, +// or the parameter values fall outside the specified constraints, the action +// fails. The associated event attribute's cause parameter will be set to OPERATION_NOT_PERMITTED. +// For details and example IAM policies, see Using IAM to Manage Access to Amazon +// SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html). +func (c *SWF) ListActivityTypes(input *ListActivityTypesInput) (*ListActivityTypesOutput, error) { + req, out := c.ListActivityTypesRequest(input) + err := req.Send() + return out, err +} + +// ListActivityTypesPages iterates over the pages of a ListActivityTypes operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListActivityTypes method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListActivityTypes operation. +// pageNum := 0 +// err := client.ListActivityTypesPages(params, +// func(page *ListActivityTypesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *SWF) ListActivityTypesPages(input *ListActivityTypesInput, fn func(p *ListActivityTypesOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListActivityTypesRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListActivityTypesOutput), lastPage) + }) +} + +const opListClosedWorkflowExecutions = "ListClosedWorkflowExecutions" + +// ListClosedWorkflowExecutionsRequest generates a "aws/request.Request" representing the +// client's request for the ListClosedWorkflowExecutions operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListClosedWorkflowExecutions method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListClosedWorkflowExecutionsRequest method. +// req, resp := client.ListClosedWorkflowExecutionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *SWF) ListClosedWorkflowExecutionsRequest(input *ListClosedWorkflowExecutionsInput) (req *request.Request, output *WorkflowExecutionInfos) { + op := &request.Operation{ + Name: opListClosedWorkflowExecutions, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextPageToken"}, + OutputTokens: []string{"nextPageToken"}, + LimitToken: "maximumPageSize", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListClosedWorkflowExecutionsInput{} + } + + req = c.newRequest(op, input, output) + output = &WorkflowExecutionInfos{} + req.Data = output + return +} + +// Returns a list of closed workflow executions in the specified domain that +// meet the filtering criteria. The results may be split into multiple pages. +// To retrieve subsequent pages, make the call again using the nextPageToken +// returned by the initial call. +// +// This operation is eventually consistent. The results are best effort and +// may not exactly reflect recent updates and changes. Access Control +// +// You can use IAM policies to control this action's access to Amazon SWF resources +// as follows: +// +// Use a Resource element with the domain name to limit the action to only +// specified domains. Use an Action element to allow or deny permission to call +// this action. Constrain the following parameters by using a Condition element +// with the appropriate keys. tagFilter.tag: String constraint. The key is +// swf:tagFilter.tag. typeFilter.name: String constraint. The key is swf:typeFilter.name. +// typeFilter.version: String constraint. The key is swf:typeFilter.version. +// If the caller does not have sufficient permissions to invoke the action, +// or the parameter values fall outside the specified constraints, the action +// fails. The associated event attribute's cause parameter will be set to OPERATION_NOT_PERMITTED. +// For details and example IAM policies, see Using IAM to Manage Access to Amazon +// SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html). +func (c *SWF) ListClosedWorkflowExecutions(input *ListClosedWorkflowExecutionsInput) (*WorkflowExecutionInfos, error) { + req, out := c.ListClosedWorkflowExecutionsRequest(input) + err := req.Send() + return out, err +} + +// ListClosedWorkflowExecutionsPages iterates over the pages of a ListClosedWorkflowExecutions operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListClosedWorkflowExecutions method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListClosedWorkflowExecutions operation. +// pageNum := 0 +// err := client.ListClosedWorkflowExecutionsPages(params, +// func(page *WorkflowExecutionInfos, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *SWF) ListClosedWorkflowExecutionsPages(input *ListClosedWorkflowExecutionsInput, fn func(p *WorkflowExecutionInfos, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListClosedWorkflowExecutionsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*WorkflowExecutionInfos), lastPage) + }) +} + +const opListDomains = "ListDomains" + +// ListDomainsRequest generates a "aws/request.Request" representing the +// client's request for the ListDomains operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListDomains method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListDomainsRequest method. +// req, resp := client.ListDomainsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *SWF) ListDomainsRequest(input *ListDomainsInput) (req *request.Request, output *ListDomainsOutput) { + op := &request.Operation{ + Name: opListDomains, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextPageToken"}, + OutputTokens: []string{"nextPageToken"}, + LimitToken: "maximumPageSize", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListDomainsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListDomainsOutput{} + req.Data = output + return +} + +// Returns the list of domains registered in the account. The results may be +// split into multiple pages. To retrieve subsequent pages, make the call again +// using the nextPageToken returned by the initial call. +// +// This operation is eventually consistent. The results are best effort and +// may not exactly reflect recent updates and changes. Access Control +// +// You can use IAM policies to control this action's access to Amazon SWF resources +// as follows: +// +// Use a Resource element with the domain name to limit the action to only +// specified domains. The element must be set to arn:aws:swf::AccountID:domain/*, +// where AccountID is the account ID, with no dashes. Use an Action element +// to allow or deny permission to call this action. You cannot use an IAM policy +// to constrain this action's parameters. If the caller does not have sufficient +// permissions to invoke the action, or the parameter values fall outside the +// specified constraints, the action fails. The associated event attribute's +// cause parameter will be set to OPERATION_NOT_PERMITTED. For details and example +// IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html). +func (c *SWF) ListDomains(input *ListDomainsInput) (*ListDomainsOutput, error) { + req, out := c.ListDomainsRequest(input) + err := req.Send() + return out, err +} + +// ListDomainsPages iterates over the pages of a ListDomains operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListDomains method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListDomains operation. +// pageNum := 0 +// err := client.ListDomainsPages(params, +// func(page *ListDomainsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *SWF) ListDomainsPages(input *ListDomainsInput, fn func(p *ListDomainsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListDomainsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListDomainsOutput), lastPage) + }) +} + +const opListOpenWorkflowExecutions = "ListOpenWorkflowExecutions" + +// ListOpenWorkflowExecutionsRequest generates a "aws/request.Request" representing the +// client's request for the ListOpenWorkflowExecutions operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListOpenWorkflowExecutions method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListOpenWorkflowExecutionsRequest method. +// req, resp := client.ListOpenWorkflowExecutionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *SWF) ListOpenWorkflowExecutionsRequest(input *ListOpenWorkflowExecutionsInput) (req *request.Request, output *WorkflowExecutionInfos) { + op := &request.Operation{ + Name: opListOpenWorkflowExecutions, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextPageToken"}, + OutputTokens: []string{"nextPageToken"}, + LimitToken: "maximumPageSize", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListOpenWorkflowExecutionsInput{} + } + + req = c.newRequest(op, input, output) + output = &WorkflowExecutionInfos{} + req.Data = output + return +} + +// Returns a list of open workflow executions in the specified domain that meet +// the filtering criteria. The results may be split into multiple pages. To +// retrieve subsequent pages, make the call again using the nextPageToken returned +// by the initial call. +// +// This operation is eventually consistent. The results are best effort and +// may not exactly reflect recent updates and changes. Access Control +// +// You can use IAM policies to control this action's access to Amazon SWF resources +// as follows: +// +// Use a Resource element with the domain name to limit the action to only +// specified domains. Use an Action element to allow or deny permission to call +// this action. Constrain the following parameters by using a Condition element +// with the appropriate keys. tagFilter.tag: String constraint. The key is +// swf:tagFilter.tag. typeFilter.name: String constraint. The key is swf:typeFilter.name. +// typeFilter.version: String constraint. The key is swf:typeFilter.version. +// If the caller does not have sufficient permissions to invoke the action, +// or the parameter values fall outside the specified constraints, the action +// fails. The associated event attribute's cause parameter will be set to OPERATION_NOT_PERMITTED. +// For details and example IAM policies, see Using IAM to Manage Access to Amazon +// SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html). +func (c *SWF) ListOpenWorkflowExecutions(input *ListOpenWorkflowExecutionsInput) (*WorkflowExecutionInfos, error) { + req, out := c.ListOpenWorkflowExecutionsRequest(input) + err := req.Send() + return out, err +} + +// ListOpenWorkflowExecutionsPages iterates over the pages of a ListOpenWorkflowExecutions operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListOpenWorkflowExecutions method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListOpenWorkflowExecutions operation. +// pageNum := 0 +// err := client.ListOpenWorkflowExecutionsPages(params, +// func(page *WorkflowExecutionInfos, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *SWF) ListOpenWorkflowExecutionsPages(input *ListOpenWorkflowExecutionsInput, fn func(p *WorkflowExecutionInfos, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListOpenWorkflowExecutionsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*WorkflowExecutionInfos), lastPage) + }) +} + +const opListWorkflowTypes = "ListWorkflowTypes" + +// ListWorkflowTypesRequest generates a "aws/request.Request" representing the +// client's request for the ListWorkflowTypes operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListWorkflowTypes method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListWorkflowTypesRequest method. +// req, resp := client.ListWorkflowTypesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *SWF) ListWorkflowTypesRequest(input *ListWorkflowTypesInput) (req *request.Request, output *ListWorkflowTypesOutput) { + op := &request.Operation{ + Name: opListWorkflowTypes, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextPageToken"}, + OutputTokens: []string{"nextPageToken"}, + LimitToken: "maximumPageSize", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListWorkflowTypesInput{} + } + + req = c.newRequest(op, input, output) + output = &ListWorkflowTypesOutput{} + req.Data = output + return +} + +// Returns information about workflow types in the specified domain. The results +// may be split into multiple pages that can be retrieved by making the call +// repeatedly. +// +// Access Control +// +// You can use IAM policies to control this action's access to Amazon SWF resources +// as follows: +// +// Use a Resource element with the domain name to limit the action to only +// specified domains. Use an Action element to allow or deny permission to call +// this action. You cannot use an IAM policy to constrain this action's parameters. +// If the caller does not have sufficient permissions to invoke the action, +// or the parameter values fall outside the specified constraints, the action +// fails. The associated event attribute's cause parameter will be set to OPERATION_NOT_PERMITTED. +// For details and example IAM policies, see Using IAM to Manage Access to Amazon +// SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html). +func (c *SWF) ListWorkflowTypes(input *ListWorkflowTypesInput) (*ListWorkflowTypesOutput, error) { + req, out := c.ListWorkflowTypesRequest(input) + err := req.Send() + return out, err +} + +// ListWorkflowTypesPages iterates over the pages of a ListWorkflowTypes operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListWorkflowTypes method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListWorkflowTypes operation. +// pageNum := 0 +// err := client.ListWorkflowTypesPages(params, +// func(page *ListWorkflowTypesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *SWF) ListWorkflowTypesPages(input *ListWorkflowTypesInput, fn func(p *ListWorkflowTypesOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListWorkflowTypesRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListWorkflowTypesOutput), lastPage) + }) +} + +const opPollForActivityTask = "PollForActivityTask" + +// PollForActivityTaskRequest generates a "aws/request.Request" representing the +// client's request for the PollForActivityTask operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PollForActivityTask method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PollForActivityTaskRequest method. +// req, resp := client.PollForActivityTaskRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *SWF) PollForActivityTaskRequest(input *PollForActivityTaskInput) (req *request.Request, output *PollForActivityTaskOutput) { + op := &request.Operation{ + Name: opPollForActivityTask, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PollForActivityTaskInput{} + } + + req = c.newRequest(op, input, output) + output = &PollForActivityTaskOutput{} + req.Data = output + return +} + +// Used by workers to get an ActivityTask from the specified activity taskList. +// This initiates a long poll, where the service holds the HTTP connection open +// and responds as soon as a task becomes available. The maximum time the service +// holds on to the request before responding is 60 seconds. If no task is available +// within 60 seconds, the poll will return an empty result. An empty result, +// in this context, means that an ActivityTask is returned, but that the value +// of taskToken is an empty string. If a task is returned, the worker should +// use its type to identify and process it correctly. +// +// Workers should set their client side socket timeout to at least 70 seconds +// (10 seconds higher than the maximum time service may hold the poll request). +// Access Control +// +// You can use IAM policies to control this action's access to Amazon SWF resources +// as follows: +// +// Use a Resource element with the domain name to limit the action to only +// specified domains. Use an Action element to allow or deny permission to call +// this action. Constrain the taskList.name parameter by using a Condition element +// with the swf:taskList.name key to allow the action to access only certain +// task lists. If the caller does not have sufficient permissions to invoke +// the action, or the parameter values fall outside the specified constraints, +// the action fails. The associated event attribute's cause parameter will be +// set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see +// Using IAM to Manage Access to Amazon SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html). +func (c *SWF) PollForActivityTask(input *PollForActivityTaskInput) (*PollForActivityTaskOutput, error) { + req, out := c.PollForActivityTaskRequest(input) + err := req.Send() + return out, err +} + +const opPollForDecisionTask = "PollForDecisionTask" + +// PollForDecisionTaskRequest generates a "aws/request.Request" representing the +// client's request for the PollForDecisionTask operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PollForDecisionTask method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PollForDecisionTaskRequest method. +// req, resp := client.PollForDecisionTaskRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *SWF) PollForDecisionTaskRequest(input *PollForDecisionTaskInput) (req *request.Request, output *PollForDecisionTaskOutput) { + op := &request.Operation{ + Name: opPollForDecisionTask, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextPageToken"}, + OutputTokens: []string{"nextPageToken"}, + LimitToken: "maximumPageSize", + TruncationToken: "", + }, + } + + if input == nil { + input = &PollForDecisionTaskInput{} + } + + req = c.newRequest(op, input, output) + output = &PollForDecisionTaskOutput{} + req.Data = output + return +} + +// Used by deciders to get a DecisionTask from the specified decision taskList. +// A decision task may be returned for any open workflow execution that is using +// the specified task list. The task includes a paginated view of the history +// of the workflow execution. The decider should use the workflow type and the +// history to determine how to properly handle the task. +// +// This action initiates a long poll, where the service holds the HTTP connection +// open and responds as soon a task becomes available. If no decision task is +// available in the specified task list before the timeout of 60 seconds expires, +// an empty result is returned. An empty result, in this context, means that +// a DecisionTask is returned, but that the value of taskToken is an empty string. +// +// Deciders should set their client-side socket timeout to at least 70 seconds +// (10 seconds higher than the timeout). Because the number of workflow history +// events for a single workflow execution might be very large, the result returned +// might be split up across a number of pages. To retrieve subsequent pages, +// make additional calls to PollForDecisionTask using the nextPageToken returned +// by the initial call. Note that you do not call GetWorkflowExecutionHistory +// with this nextPageToken. Instead, call PollForDecisionTask again. Access +// Control +// +// You can use IAM policies to control this action's access to Amazon SWF resources +// as follows: +// +// Use a Resource element with the domain name to limit the action to only +// specified domains. Use an Action element to allow or deny permission to call +// this action. Constrain the taskList.name parameter by using a Condition element +// with the swf:taskList.name key to allow the action to access only certain +// task lists. If the caller does not have sufficient permissions to invoke +// the action, or the parameter values fall outside the specified constraints, +// the action fails. The associated event attribute's cause parameter will be +// set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see +// Using IAM to Manage Access to Amazon SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html). +func (c *SWF) PollForDecisionTask(input *PollForDecisionTaskInput) (*PollForDecisionTaskOutput, error) { + req, out := c.PollForDecisionTaskRequest(input) + err := req.Send() + return out, err +} + +// PollForDecisionTaskPages iterates over the pages of a PollForDecisionTask operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See PollForDecisionTask method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a PollForDecisionTask operation. +// pageNum := 0 +// err := client.PollForDecisionTaskPages(params, +// func(page *PollForDecisionTaskOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *SWF) PollForDecisionTaskPages(input *PollForDecisionTaskInput, fn func(p *PollForDecisionTaskOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.PollForDecisionTaskRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*PollForDecisionTaskOutput), lastPage) + }) +} + +const opRecordActivityTaskHeartbeat = "RecordActivityTaskHeartbeat" + +// RecordActivityTaskHeartbeatRequest generates a "aws/request.Request" representing the +// client's request for the RecordActivityTaskHeartbeat operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the RecordActivityTaskHeartbeat method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the RecordActivityTaskHeartbeatRequest method. +// req, resp := client.RecordActivityTaskHeartbeatRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *SWF) RecordActivityTaskHeartbeatRequest(input *RecordActivityTaskHeartbeatInput) (req *request.Request, output *RecordActivityTaskHeartbeatOutput) { + op := &request.Operation{ + Name: opRecordActivityTaskHeartbeat, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RecordActivityTaskHeartbeatInput{} + } + + req = c.newRequest(op, input, output) + output = &RecordActivityTaskHeartbeatOutput{} + req.Data = output + return +} + +// Used by activity workers to report to the service that the ActivityTask represented +// by the specified taskToken is still making progress. The worker can also +// (optionally) specify details of the progress, for example percent complete, +// using the details parameter. This action can also be used by the worker as +// a mechanism to check if cancellation is being requested for the activity +// task. If a cancellation is being attempted for the specified task, then the +// boolean cancelRequested flag returned by the service is set to true. +// +// This action resets the taskHeartbeatTimeout clock. The taskHeartbeatTimeout +// is specified in RegisterActivityType. +// +// This action does not in itself create an event in the workflow execution +// history. However, if the task times out, the workflow execution history will +// contain a ActivityTaskTimedOut event that contains the information from the +// last heartbeat generated by the activity worker. +// +// The taskStartToCloseTimeout of an activity type is the maximum duration +// of an activity task, regardless of the number of RecordActivityTaskHeartbeat +// requests received. The taskStartToCloseTimeout is also specified in RegisterActivityType. +// This operation is only useful for long-lived activities to report liveliness +// of the task and to determine if a cancellation is being attempted. If the +// cancelRequested flag returns true, a cancellation is being attempted. If +// the worker can cancel the activity, it should respond with RespondActivityTaskCanceled. +// Otherwise, it should ignore the cancellation request. Access Control +// +// You can use IAM policies to control this action's access to Amazon SWF resources +// as follows: +// +// Use a Resource element with the domain name to limit the action to only +// specified domains. Use an Action element to allow or deny permission to call +// this action. You cannot use an IAM policy to constrain this action's parameters. +// If the caller does not have sufficient permissions to invoke the action, +// or the parameter values fall outside the specified constraints, the action +// fails. The associated event attribute's cause parameter will be set to OPERATION_NOT_PERMITTED. +// For details and example IAM policies, see Using IAM to Manage Access to Amazon +// SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html). +func (c *SWF) RecordActivityTaskHeartbeat(input *RecordActivityTaskHeartbeatInput) (*RecordActivityTaskHeartbeatOutput, error) { + req, out := c.RecordActivityTaskHeartbeatRequest(input) + err := req.Send() + return out, err +} + +const opRegisterActivityType = "RegisterActivityType" + +// RegisterActivityTypeRequest generates a "aws/request.Request" representing the +// client's request for the RegisterActivityType operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the RegisterActivityType method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the RegisterActivityTypeRequest method. +// req, resp := client.RegisterActivityTypeRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *SWF) RegisterActivityTypeRequest(input *RegisterActivityTypeInput) (req *request.Request, output *RegisterActivityTypeOutput) { + op := &request.Operation{ + Name: opRegisterActivityType, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RegisterActivityTypeInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &RegisterActivityTypeOutput{} + req.Data = output + return +} + +// Registers a new activity type along with its configuration settings in the +// specified domain. +// +// A TypeAlreadyExists fault is returned if the type already exists in the +// domain. You cannot change any configuration settings of the type after its +// registration, and it must be registered as a new version. Access Control +// +// You can use IAM policies to control this action's access to Amazon SWF resources +// as follows: +// +// Use a Resource element with the domain name to limit the action to only +// specified domains. Use an Action element to allow or deny permission to call +// this action. Constrain the following parameters by using a Condition element +// with the appropriate keys. defaultTaskList.name: String constraint. The +// key is swf:defaultTaskList.name. name: String constraint. The key is swf:name. +// version: String constraint. The key is swf:version. If the caller does +// not have sufficient permissions to invoke the action, or the parameter values +// fall outside the specified constraints, the action fails. The associated +// event attribute's cause parameter will be set to OPERATION_NOT_PERMITTED. +// For details and example IAM policies, see Using IAM to Manage Access to Amazon +// SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html). +func (c *SWF) RegisterActivityType(input *RegisterActivityTypeInput) (*RegisterActivityTypeOutput, error) { + req, out := c.RegisterActivityTypeRequest(input) + err := req.Send() + return out, err +} + +const opRegisterDomain = "RegisterDomain" + +// RegisterDomainRequest generates a "aws/request.Request" representing the +// client's request for the RegisterDomain operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the RegisterDomain method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the RegisterDomainRequest method. +// req, resp := client.RegisterDomainRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *SWF) RegisterDomainRequest(input *RegisterDomainInput) (req *request.Request, output *RegisterDomainOutput) { + op := &request.Operation{ + Name: opRegisterDomain, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RegisterDomainInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &RegisterDomainOutput{} + req.Data = output + return +} + +// Registers a new domain. +// +// Access Control +// +// You can use IAM policies to control this action's access to Amazon SWF resources +// as follows: +// +// You cannot use an IAM policy to control domain access for this action. +// The name of the domain being registered is available as the resource of this +// action. Use an Action element to allow or deny permission to call this action. +// You cannot use an IAM policy to constrain this action's parameters. If the +// caller does not have sufficient permissions to invoke the action, or the +// parameter values fall outside the specified constraints, the action fails. +// The associated event attribute's cause parameter will be set to OPERATION_NOT_PERMITTED. +// For details and example IAM policies, see Using IAM to Manage Access to Amazon +// SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html). +func (c *SWF) RegisterDomain(input *RegisterDomainInput) (*RegisterDomainOutput, error) { + req, out := c.RegisterDomainRequest(input) + err := req.Send() + return out, err +} + +const opRegisterWorkflowType = "RegisterWorkflowType" + +// RegisterWorkflowTypeRequest generates a "aws/request.Request" representing the +// client's request for the RegisterWorkflowType operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the RegisterWorkflowType method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the RegisterWorkflowTypeRequest method. +// req, resp := client.RegisterWorkflowTypeRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *SWF) RegisterWorkflowTypeRequest(input *RegisterWorkflowTypeInput) (req *request.Request, output *RegisterWorkflowTypeOutput) { + op := &request.Operation{ + Name: opRegisterWorkflowType, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RegisterWorkflowTypeInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &RegisterWorkflowTypeOutput{} + req.Data = output + return +} + +// Registers a new workflow type and its configuration settings in the specified +// domain. +// +// The retention period for the workflow history is set by the RegisterDomain +// action. +// +// If the type already exists, then a TypeAlreadyExists fault is returned. +// You cannot change the configuration settings of a workflow type once it is +// registered and it must be registered as a new version. Access Control +// +// You can use IAM policies to control this action's access to Amazon SWF resources +// as follows: +// +// Use a Resource element with the domain name to limit the action to only +// specified domains. Use an Action element to allow or deny permission to call +// this action. Constrain the following parameters by using a Condition element +// with the appropriate keys. defaultTaskList.name: String constraint. The +// key is swf:defaultTaskList.name. name: String constraint. The key is swf:name. +// version: String constraint. The key is swf:version. If the caller does +// not have sufficient permissions to invoke the action, or the parameter values +// fall outside the specified constraints, the action fails. The associated +// event attribute's cause parameter will be set to OPERATION_NOT_PERMITTED. +// For details and example IAM policies, see Using IAM to Manage Access to Amazon +// SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html). +func (c *SWF) RegisterWorkflowType(input *RegisterWorkflowTypeInput) (*RegisterWorkflowTypeOutput, error) { + req, out := c.RegisterWorkflowTypeRequest(input) + err := req.Send() + return out, err +} + +const opRequestCancelWorkflowExecution = "RequestCancelWorkflowExecution" + +// RequestCancelWorkflowExecutionRequest generates a "aws/request.Request" representing the +// client's request for the RequestCancelWorkflowExecution operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the RequestCancelWorkflowExecution method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the RequestCancelWorkflowExecutionRequest method. +// req, resp := client.RequestCancelWorkflowExecutionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *SWF) RequestCancelWorkflowExecutionRequest(input *RequestCancelWorkflowExecutionInput) (req *request.Request, output *RequestCancelWorkflowExecutionOutput) { + op := &request.Operation{ + Name: opRequestCancelWorkflowExecution, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RequestCancelWorkflowExecutionInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &RequestCancelWorkflowExecutionOutput{} + req.Data = output + return +} + +// Records a WorkflowExecutionCancelRequested event in the currently running +// workflow execution identified by the given domain, workflowId, and runId. +// This logically requests the cancellation of the workflow execution as a whole. +// It is up to the decider to take appropriate actions when it receives an execution +// history with this event. +// +// If the runId is not specified, the WorkflowExecutionCancelRequested event +// is recorded in the history of the current open workflow execution with the +// specified workflowId in the domain. Because this action allows the workflow +// to properly clean up and gracefully close, it should be used instead of TerminateWorkflowExecution +// when possible. Access Control +// +// You can use IAM policies to control this action's access to Amazon SWF resources +// as follows: +// +// Use a Resource element with the domain name to limit the action to only +// specified domains. Use an Action element to allow or deny permission to call +// this action. You cannot use an IAM policy to constrain this action's parameters. +// If the caller does not have sufficient permissions to invoke the action, +// or the parameter values fall outside the specified constraints, the action +// fails. The associated event attribute's cause parameter will be set to OPERATION_NOT_PERMITTED. +// For details and example IAM policies, see Using IAM to Manage Access to Amazon +// SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html). +func (c *SWF) RequestCancelWorkflowExecution(input *RequestCancelWorkflowExecutionInput) (*RequestCancelWorkflowExecutionOutput, error) { + req, out := c.RequestCancelWorkflowExecutionRequest(input) + err := req.Send() + return out, err +} + +const opRespondActivityTaskCanceled = "RespondActivityTaskCanceled" + +// RespondActivityTaskCanceledRequest generates a "aws/request.Request" representing the +// client's request for the RespondActivityTaskCanceled operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the RespondActivityTaskCanceled method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the RespondActivityTaskCanceledRequest method. +// req, resp := client.RespondActivityTaskCanceledRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *SWF) RespondActivityTaskCanceledRequest(input *RespondActivityTaskCanceledInput) (req *request.Request, output *RespondActivityTaskCanceledOutput) { + op := &request.Operation{ + Name: opRespondActivityTaskCanceled, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RespondActivityTaskCanceledInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &RespondActivityTaskCanceledOutput{} + req.Data = output + return +} + +// Used by workers to tell the service that the ActivityTask identified by the +// taskToken was successfully canceled. Additional details can be optionally +// provided using the details argument. +// +// These details (if provided) appear in the ActivityTaskCanceled event added +// to the workflow history. +// +// Only use this operation if the canceled flag of a RecordActivityTaskHeartbeat +// request returns true and if the activity can be safely undone or abandoned. +// A task is considered open from the time that it is scheduled until it is +// closed. Therefore a task is reported as open while a worker is processing +// it. A task is closed after it has been specified in a call to RespondActivityTaskCompleted, +// RespondActivityTaskCanceled, RespondActivityTaskFailed, or the task has timed +// out (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dg-basic.html#swf-dev-timeout-types). +// +// Access Control +// +// You can use IAM policies to control this action's access to Amazon SWF resources +// as follows: +// +// Use a Resource element with the domain name to limit the action to only +// specified domains. Use an Action element to allow or deny permission to call +// this action. You cannot use an IAM policy to constrain this action's parameters. +// If the caller does not have sufficient permissions to invoke the action, +// or the parameter values fall outside the specified constraints, the action +// fails. The associated event attribute's cause parameter will be set to OPERATION_NOT_PERMITTED. +// For details and example IAM policies, see Using IAM to Manage Access to Amazon +// SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html). +func (c *SWF) RespondActivityTaskCanceled(input *RespondActivityTaskCanceledInput) (*RespondActivityTaskCanceledOutput, error) { + req, out := c.RespondActivityTaskCanceledRequest(input) + err := req.Send() + return out, err +} + +const opRespondActivityTaskCompleted = "RespondActivityTaskCompleted" + +// RespondActivityTaskCompletedRequest generates a "aws/request.Request" representing the +// client's request for the RespondActivityTaskCompleted operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the RespondActivityTaskCompleted method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the RespondActivityTaskCompletedRequest method. +// req, resp := client.RespondActivityTaskCompletedRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *SWF) RespondActivityTaskCompletedRequest(input *RespondActivityTaskCompletedInput) (req *request.Request, output *RespondActivityTaskCompletedOutput) { + op := &request.Operation{ + Name: opRespondActivityTaskCompleted, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RespondActivityTaskCompletedInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &RespondActivityTaskCompletedOutput{} + req.Data = output + return +} + +// Used by workers to tell the service that the ActivityTask identified by the +// taskToken completed successfully with a result (if provided). The result +// appears in the ActivityTaskCompleted event in the workflow history. +// +// If the requested task does not complete successfully, use RespondActivityTaskFailed +// instead. If the worker finds that the task is canceled through the canceled +// flag returned by RecordActivityTaskHeartbeat, it should cancel the task, +// clean up and then call RespondActivityTaskCanceled. A task is considered +// open from the time that it is scheduled until it is closed. Therefore a task +// is reported as open while a worker is processing it. A task is closed after +// it has been specified in a call to RespondActivityTaskCompleted, RespondActivityTaskCanceled, +// RespondActivityTaskFailed, or the task has timed out (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dg-basic.html#swf-dev-timeout-types). +// +// Access Control +// +// You can use IAM policies to control this action's access to Amazon SWF resources +// as follows: +// +// Use a Resource element with the domain name to limit the action to only +// specified domains. Use an Action element to allow or deny permission to call +// this action. You cannot use an IAM policy to constrain this action's parameters. +// If the caller does not have sufficient permissions to invoke the action, +// or the parameter values fall outside the specified constraints, the action +// fails. The associated event attribute's cause parameter will be set to OPERATION_NOT_PERMITTED. +// For details and example IAM policies, see Using IAM to Manage Access to Amazon +// SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html). +func (c *SWF) RespondActivityTaskCompleted(input *RespondActivityTaskCompletedInput) (*RespondActivityTaskCompletedOutput, error) { + req, out := c.RespondActivityTaskCompletedRequest(input) + err := req.Send() + return out, err +} + +const opRespondActivityTaskFailed = "RespondActivityTaskFailed" + +// RespondActivityTaskFailedRequest generates a "aws/request.Request" representing the +// client's request for the RespondActivityTaskFailed operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the RespondActivityTaskFailed method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the RespondActivityTaskFailedRequest method. +// req, resp := client.RespondActivityTaskFailedRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *SWF) RespondActivityTaskFailedRequest(input *RespondActivityTaskFailedInput) (req *request.Request, output *RespondActivityTaskFailedOutput) { + op := &request.Operation{ + Name: opRespondActivityTaskFailed, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RespondActivityTaskFailedInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &RespondActivityTaskFailedOutput{} + req.Data = output + return +} + +// Used by workers to tell the service that the ActivityTask identified by the +// taskToken has failed with reason (if specified). The reason and details appear +// in the ActivityTaskFailed event added to the workflow history. +// +// A task is considered open from the time that it is scheduled until it is +// closed. Therefore a task is reported as open while a worker is processing +// it. A task is closed after it has been specified in a call to RespondActivityTaskCompleted, +// RespondActivityTaskCanceled, RespondActivityTaskFailed, or the task has timed +// out (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dg-basic.html#swf-dev-timeout-types). +// +// Access Control +// +// You can use IAM policies to control this action's access to Amazon SWF resources +// as follows: +// +// Use a Resource element with the domain name to limit the action to only +// specified domains. Use an Action element to allow or deny permission to call +// this action. You cannot use an IAM policy to constrain this action's parameters. +// If the caller does not have sufficient permissions to invoke the action, +// or the parameter values fall outside the specified constraints, the action +// fails. The associated event attribute's cause parameter will be set to OPERATION_NOT_PERMITTED. +// For details and example IAM policies, see Using IAM to Manage Access to Amazon +// SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html). +func (c *SWF) RespondActivityTaskFailed(input *RespondActivityTaskFailedInput) (*RespondActivityTaskFailedOutput, error) { + req, out := c.RespondActivityTaskFailedRequest(input) + err := req.Send() + return out, err +} + +const opRespondDecisionTaskCompleted = "RespondDecisionTaskCompleted" + +// RespondDecisionTaskCompletedRequest generates a "aws/request.Request" representing the +// client's request for the RespondDecisionTaskCompleted operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the RespondDecisionTaskCompleted method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the RespondDecisionTaskCompletedRequest method. +// req, resp := client.RespondDecisionTaskCompletedRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *SWF) RespondDecisionTaskCompletedRequest(input *RespondDecisionTaskCompletedInput) (req *request.Request, output *RespondDecisionTaskCompletedOutput) { + op := &request.Operation{ + Name: opRespondDecisionTaskCompleted, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RespondDecisionTaskCompletedInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &RespondDecisionTaskCompletedOutput{} + req.Data = output + return +} + +// Used by deciders to tell the service that the DecisionTask identified by +// the taskToken has successfully completed. The decisions argument specifies +// the list of decisions made while processing the task. +// +// A DecisionTaskCompleted event is added to the workflow history. The executionContext +// specified is attached to the event in the workflow execution history. +// +// Access Control +// +// If an IAM policy grants permission to use RespondDecisionTaskCompleted, +// it can express permissions for the list of decisions in the decisions parameter. +// Each of the decisions has one or more parameters, much like a regular API +// call. To allow for policies to be as readable as possible, you can express +// permissions on decisions as if they were actual API calls, including applying +// conditions to some parameters. For more information, see Using IAM to Manage +// Access to Amazon SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html). +func (c *SWF) RespondDecisionTaskCompleted(input *RespondDecisionTaskCompletedInput) (*RespondDecisionTaskCompletedOutput, error) { + req, out := c.RespondDecisionTaskCompletedRequest(input) + err := req.Send() + return out, err +} + +const opSignalWorkflowExecution = "SignalWorkflowExecution" + +// SignalWorkflowExecutionRequest generates a "aws/request.Request" representing the +// client's request for the SignalWorkflowExecution operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the SignalWorkflowExecution method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the SignalWorkflowExecutionRequest method. +// req, resp := client.SignalWorkflowExecutionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *SWF) SignalWorkflowExecutionRequest(input *SignalWorkflowExecutionInput) (req *request.Request, output *SignalWorkflowExecutionOutput) { + op := &request.Operation{ + Name: opSignalWorkflowExecution, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SignalWorkflowExecutionInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &SignalWorkflowExecutionOutput{} + req.Data = output + return +} + +// Records a WorkflowExecutionSignaled event in the workflow execution history +// and creates a decision task for the workflow execution identified by the +// given domain, workflowId and runId. The event is recorded with the specified +// user defined signalName and input (if provided). +// +// If a runId is not specified, then the WorkflowExecutionSignaled event is +// recorded in the history of the current open workflow with the matching workflowId +// in the domain. If the specified workflow execution is not open, this method +// fails with UnknownResource. Access Control +// +// You can use IAM policies to control this action's access to Amazon SWF resources +// as follows: +// +// Use a Resource element with the domain name to limit the action to only +// specified domains. Use an Action element to allow or deny permission to call +// this action. You cannot use an IAM policy to constrain this action's parameters. +// If the caller does not have sufficient permissions to invoke the action, +// or the parameter values fall outside the specified constraints, the action +// fails. The associated event attribute's cause parameter will be set to OPERATION_NOT_PERMITTED. +// For details and example IAM policies, see Using IAM to Manage Access to Amazon +// SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html). +func (c *SWF) SignalWorkflowExecution(input *SignalWorkflowExecutionInput) (*SignalWorkflowExecutionOutput, error) { + req, out := c.SignalWorkflowExecutionRequest(input) + err := req.Send() + return out, err +} + +const opStartWorkflowExecution = "StartWorkflowExecution" + +// StartWorkflowExecutionRequest generates a "aws/request.Request" representing the +// client's request for the StartWorkflowExecution operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the StartWorkflowExecution method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the StartWorkflowExecutionRequest method. +// req, resp := client.StartWorkflowExecutionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *SWF) StartWorkflowExecutionRequest(input *StartWorkflowExecutionInput) (req *request.Request, output *StartWorkflowExecutionOutput) { + op := &request.Operation{ + Name: opStartWorkflowExecution, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &StartWorkflowExecutionInput{} + } + + req = c.newRequest(op, input, output) + output = &StartWorkflowExecutionOutput{} + req.Data = output + return +} + +// Starts an execution of the workflow type in the specified domain using the +// provided workflowId and input data. +// +// This action returns the newly started workflow execution. +// +// Access Control +// +// You can use IAM policies to control this action's access to Amazon SWF resources +// as follows: +// +// Use a Resource element with the domain name to limit the action to only +// specified domains. Use an Action element to allow or deny permission to call +// this action. Constrain the following parameters by using a Condition element +// with the appropriate keys. tagList.member.0: The key is swf:tagList.member.0. +// tagList.member.1: The key is swf:tagList.member.1. tagList.member.2: The +// key is swf:tagList.member.2. tagList.member.3: The key is swf:tagList.member.3. +// tagList.member.4: The key is swf:tagList.member.4. taskList: String constraint. +// The key is swf:taskList.name. workflowType.name: String constraint. The key +// is swf:workflowType.name. workflowType.version: String constraint. The key +// is swf:workflowType.version. If the caller does not have sufficient permissions +// to invoke the action, or the parameter values fall outside the specified +// constraints, the action fails. The associated event attribute's cause parameter +// will be set to OPERATION_NOT_PERMITTED. For details and example IAM policies, +// see Using IAM to Manage Access to Amazon SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html). +func (c *SWF) StartWorkflowExecution(input *StartWorkflowExecutionInput) (*StartWorkflowExecutionOutput, error) { + req, out := c.StartWorkflowExecutionRequest(input) + err := req.Send() + return out, err +} + +const opTerminateWorkflowExecution = "TerminateWorkflowExecution" + +// TerminateWorkflowExecutionRequest generates a "aws/request.Request" representing the +// client's request for the TerminateWorkflowExecution operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the TerminateWorkflowExecution method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the TerminateWorkflowExecutionRequest method. +// req, resp := client.TerminateWorkflowExecutionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *SWF) TerminateWorkflowExecutionRequest(input *TerminateWorkflowExecutionInput) (req *request.Request, output *TerminateWorkflowExecutionOutput) { + op := &request.Operation{ + Name: opTerminateWorkflowExecution, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &TerminateWorkflowExecutionInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &TerminateWorkflowExecutionOutput{} + req.Data = output + return +} + +// Records a WorkflowExecutionTerminated event and forces closure of the workflow +// execution identified by the given domain, runId, and workflowId. The child +// policy, registered with the workflow type or specified when starting this +// execution, is applied to any open child workflow executions of this workflow +// execution. +// +// If the identified workflow execution was in progress, it is terminated +// immediately. If a runId is not specified, then the WorkflowExecutionTerminated +// event is recorded in the history of the current open workflow with the matching +// workflowId in the domain. You should consider using RequestCancelWorkflowExecution +// action instead because it allows the workflow to gracefully close while TerminateWorkflowExecution +// does not. Access Control +// +// You can use IAM policies to control this action's access to Amazon SWF resources +// as follows: +// +// Use a Resource element with the domain name to limit the action to only +// specified domains. Use an Action element to allow or deny permission to call +// this action. You cannot use an IAM policy to constrain this action's parameters. +// If the caller does not have sufficient permissions to invoke the action, +// or the parameter values fall outside the specified constraints, the action +// fails. The associated event attribute's cause parameter will be set to OPERATION_NOT_PERMITTED. +// For details and example IAM policies, see Using IAM to Manage Access to Amazon +// SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html). +func (c *SWF) TerminateWorkflowExecution(input *TerminateWorkflowExecutionInput) (*TerminateWorkflowExecutionOutput, error) { + req, out := c.TerminateWorkflowExecutionRequest(input) + err := req.Send() + return out, err +} + +// Provides details of the ActivityTaskCancelRequested event. +type ActivityTaskCancelRequestedEventAttributes struct { + _ struct{} `type:"structure"` + + // The unique ID of the task. + ActivityId *string `locationName:"activityId" min:"1" type:"string" required:"true"` + + // The ID of the DecisionTaskCompleted event corresponding to the decision task + // that resulted in the RequestCancelActivityTask decision for this cancellation + // request. This information can be useful for diagnosing problems by tracing + // back the chain of events leading up to this event. + DecisionTaskCompletedEventId *int64 `locationName:"decisionTaskCompletedEventId" type:"long" required:"true"` +} + +// String returns the string representation +func (s ActivityTaskCancelRequestedEventAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ActivityTaskCancelRequestedEventAttributes) GoString() string { + return s.String() +} + +// Provides details of the ActivityTaskCanceled event. +type ActivityTaskCanceledEventAttributes struct { + _ struct{} `type:"structure"` + + // Details of the cancellation (if any). + Details *string `locationName:"details" type:"string"` + + // If set, contains the ID of the last ActivityTaskCancelRequested event recorded + // for this activity task. This information can be useful for diagnosing problems + // by tracing back the chain of events leading up to this event. + LatestCancelRequestedEventId *int64 `locationName:"latestCancelRequestedEventId" type:"long"` + + // The ID of the ActivityTaskScheduled event that was recorded when this activity + // task was scheduled. This information can be useful for diagnosing problems + // by tracing back the chain of events leading up to this event. + ScheduledEventId *int64 `locationName:"scheduledEventId" type:"long" required:"true"` + + // The ID of the ActivityTaskStarted event recorded when this activity task + // was started. This information can be useful for diagnosing problems by tracing + // back the chain of events leading up to this event. + StartedEventId *int64 `locationName:"startedEventId" type:"long" required:"true"` +} + +// String returns the string representation +func (s ActivityTaskCanceledEventAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ActivityTaskCanceledEventAttributes) GoString() string { + return s.String() +} + +// Provides details of the ActivityTaskCompleted event. +type ActivityTaskCompletedEventAttributes struct { + _ struct{} `type:"structure"` + + // The results of the activity task (if any). + Result *string `locationName:"result" type:"string"` + + // The ID of the ActivityTaskScheduled event that was recorded when this activity + // task was scheduled. This information can be useful for diagnosing problems + // by tracing back the chain of events leading up to this event. + ScheduledEventId *int64 `locationName:"scheduledEventId" type:"long" required:"true"` + + // The ID of the ActivityTaskStarted event recorded when this activity task + // was started. This information can be useful for diagnosing problems by tracing + // back the chain of events leading up to this event. + StartedEventId *int64 `locationName:"startedEventId" type:"long" required:"true"` +} + +// String returns the string representation +func (s ActivityTaskCompletedEventAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ActivityTaskCompletedEventAttributes) GoString() string { + return s.String() +} + +// Provides details of the ActivityTaskFailed event. +type ActivityTaskFailedEventAttributes struct { + _ struct{} `type:"structure"` + + // The details of the failure (if any). + Details *string `locationName:"details" type:"string"` + + // The reason provided for the failure (if any). + Reason *string `locationName:"reason" type:"string"` + + // The ID of the ActivityTaskScheduled event that was recorded when this activity + // task was scheduled. This information can be useful for diagnosing problems + // by tracing back the chain of events leading up to this event. + ScheduledEventId *int64 `locationName:"scheduledEventId" type:"long" required:"true"` + + // The ID of the ActivityTaskStarted event recorded when this activity task + // was started. This information can be useful for diagnosing problems by tracing + // back the chain of events leading up to this event. + StartedEventId *int64 `locationName:"startedEventId" type:"long" required:"true"` +} + +// String returns the string representation +func (s ActivityTaskFailedEventAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ActivityTaskFailedEventAttributes) GoString() string { + return s.String() +} + +// Provides details of the ActivityTaskScheduled event. +type ActivityTaskScheduledEventAttributes struct { + _ struct{} `type:"structure"` + + // The unique ID of the activity task. + ActivityId *string `locationName:"activityId" min:"1" type:"string" required:"true"` + + // The type of the activity task. + ActivityType *ActivityType `locationName:"activityType" type:"structure" required:"true"` + + // Optional. Data attached to the event that can be used by the decider in subsequent + // workflow tasks. This data is not sent to the activity. + Control *string `locationName:"control" type:"string"` + + // The ID of the DecisionTaskCompleted event corresponding to the decision that + // resulted in the scheduling of this activity task. This information can be + // useful for diagnosing problems by tracing back the chain of events leading + // up to this event. + DecisionTaskCompletedEventId *int64 `locationName:"decisionTaskCompletedEventId" type:"long" required:"true"` + + // The maximum time before which the worker processing this task must report + // progress by calling RecordActivityTaskHeartbeat. If the timeout is exceeded, + // the activity task is automatically timed out. If the worker subsequently + // attempts to record a heartbeat or return a result, it will be ignored. + HeartbeatTimeout *string `locationName:"heartbeatTimeout" type:"string"` + + // The input provided to the activity task. + Input *string `locationName:"input" type:"string"` + + // The maximum amount of time for this activity task. + ScheduleToCloseTimeout *string `locationName:"scheduleToCloseTimeout" type:"string"` + + // The maximum amount of time the activity task can wait to be assigned to a + // worker. + ScheduleToStartTimeout *string `locationName:"scheduleToStartTimeout" type:"string"` + + // The maximum amount of time a worker may take to process the activity task. + StartToCloseTimeout *string `locationName:"startToCloseTimeout" type:"string"` + + // The task list in which the activity task has been scheduled. + TaskList *TaskList `locationName:"taskList" type:"structure" required:"true"` + + // Optional. The priority to assign to the scheduled activity task. If set, + // this will override any default priority value that was assigned when the + // activity type was registered. + // + // Valid values are integers that range from Java's Integer.MIN_VALUE (-2147483648) + // to Integer.MAX_VALUE (2147483647). Higher numbers indicate higher priority. + // + // For more information about setting task priority, see Setting Task Priority + // (http://docs.aws.amazon.com/amazonswf/latest/developerguide/programming-priority.html) + // in the Amazon Simple Workflow Developer Guide. + TaskPriority *string `locationName:"taskPriority" type:"string"` +} + +// String returns the string representation +func (s ActivityTaskScheduledEventAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ActivityTaskScheduledEventAttributes) GoString() string { + return s.String() +} + +// Provides details of the ActivityTaskStarted event. +type ActivityTaskStartedEventAttributes struct { + _ struct{} `type:"structure"` + + // Identity of the worker that was assigned this task. This aids diagnostics + // when problems arise. The form of this identity is user defined. + Identity *string `locationName:"identity" type:"string"` + + // The ID of the ActivityTaskScheduled event that was recorded when this activity + // task was scheduled. This information can be useful for diagnosing problems + // by tracing back the chain of events leading up to this event. + ScheduledEventId *int64 `locationName:"scheduledEventId" type:"long" required:"true"` +} + +// String returns the string representation +func (s ActivityTaskStartedEventAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ActivityTaskStartedEventAttributes) GoString() string { + return s.String() +} + +// Provides details of the ActivityTaskTimedOut event. +type ActivityTaskTimedOutEventAttributes struct { + _ struct{} `type:"structure"` + + // Contains the content of the details parameter for the last call made by the + // activity to RecordActivityTaskHeartbeat. + Details *string `locationName:"details" type:"string"` + + // The ID of the ActivityTaskScheduled event that was recorded when this activity + // task was scheduled. This information can be useful for diagnosing problems + // by tracing back the chain of events leading up to this event. + ScheduledEventId *int64 `locationName:"scheduledEventId" type:"long" required:"true"` + + // The ID of the ActivityTaskStarted event recorded when this activity task + // was started. This information can be useful for diagnosing problems by tracing + // back the chain of events leading up to this event. + StartedEventId *int64 `locationName:"startedEventId" type:"long" required:"true"` + + // The type of the timeout that caused this event. + TimeoutType *string `locationName:"timeoutType" type:"string" required:"true" enum:"ActivityTaskTimeoutType"` +} + +// String returns the string representation +func (s ActivityTaskTimedOutEventAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ActivityTaskTimedOutEventAttributes) GoString() string { + return s.String() +} + +// Represents an activity type. +type ActivityType struct { + _ struct{} `type:"structure"` + + // The name of this activity. + // + // The combination of activity type name and version must be unique within + // a domain. + Name *string `locationName:"name" min:"1" type:"string" required:"true"` + + // The version of this activity. + // + // The combination of activity type name and version must be unique with in + // a domain. + Version *string `locationName:"version" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s ActivityType) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ActivityType) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ActivityType) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ActivityType"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + if s.Version == nil { + invalidParams.Add(request.NewErrParamRequired("Version")) + } + if s.Version != nil && len(*s.Version) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Version", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Configuration settings registered with the activity type. +type ActivityTypeConfiguration struct { + _ struct{} `type:"structure"` + + // Optional. The default maximum time, in seconds, before which a worker processing + // a task must report progress by calling RecordActivityTaskHeartbeat. + // + // You can specify this value only when registering an activity type. The registered + // default value can be overridden when you schedule a task through the ScheduleActivityTask + // decision. If the activity worker subsequently attempts to record a heartbeat + // or returns a result, the activity worker receives an UnknownResource fault. + // In this case, Amazon SWF no longer considers the activity task to be valid; + // the activity worker should clean up the activity task. + // + // The duration is specified in seconds; an integer greater than or equal to + // 0. The value "NONE" can be used to specify unlimited duration. + DefaultTaskHeartbeatTimeout *string `locationName:"defaultTaskHeartbeatTimeout" type:"string"` + + // Optional. The default task list specified for this activity type at registration. + // This default is used if a task list is not provided when a task is scheduled + // through the ScheduleActivityTask decision. You can override the default registered + // task list when scheduling a task through the ScheduleActivityTask decision. + DefaultTaskList *TaskList `locationName:"defaultTaskList" type:"structure"` + + // Optional. The default task priority for tasks of this activity type, specified + // at registration. If not set, then "0" will be used as the default priority. + // This default can be overridden when scheduling an activity task. + // + // Valid values are integers that range from Java's Integer.MIN_VALUE (-2147483648) + // to Integer.MAX_VALUE (2147483647). Higher numbers indicate higher priority. + // + // For more information about setting task priority, see Setting Task Priority + // (http://docs.aws.amazon.com/amazonswf/latest/developerguide/programming-priority.html) + // in the Amazon Simple Workflow Developer Guide. + DefaultTaskPriority *string `locationName:"defaultTaskPriority" type:"string"` + + // Optional. The default maximum duration, specified when registering the activity + // type, for tasks of this activity type. You can override this default when + // scheduling a task through the ScheduleActivityTask decision. + // + // The duration is specified in seconds; an integer greater than or equal to + // 0. The value "NONE" can be used to specify unlimited duration. + DefaultTaskScheduleToCloseTimeout *string `locationName:"defaultTaskScheduleToCloseTimeout" type:"string"` + + // Optional. The default maximum duration, specified when registering the activity + // type, that a task of an activity type can wait before being assigned to a + // worker. You can override this default when scheduling a task through the + // ScheduleActivityTask decision. + // + // The duration is specified in seconds; an integer greater than or equal to + // 0. The value "NONE" can be used to specify unlimited duration. + DefaultTaskScheduleToStartTimeout *string `locationName:"defaultTaskScheduleToStartTimeout" type:"string"` + + // Optional. The default maximum duration for tasks of an activity type specified + // when registering the activity type. You can override this default when scheduling + // a task through the ScheduleActivityTask decision. + // + // The duration is specified in seconds; an integer greater than or equal to + // 0. The value "NONE" can be used to specify unlimited duration. + DefaultTaskStartToCloseTimeout *string `locationName:"defaultTaskStartToCloseTimeout" type:"string"` +} + +// String returns the string representation +func (s ActivityTypeConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ActivityTypeConfiguration) GoString() string { + return s.String() +} + +// Detailed information about an activity type. +type ActivityTypeInfo struct { + _ struct{} `type:"structure"` + + // The ActivityType type structure representing the activity type. + ActivityType *ActivityType `locationName:"activityType" type:"structure" required:"true"` + + // The date and time this activity type was created through RegisterActivityType. + CreationDate *time.Time `locationName:"creationDate" type:"timestamp" timestampFormat:"unix" required:"true"` + + // If DEPRECATED, the date and time DeprecateActivityType was called. + DeprecationDate *time.Time `locationName:"deprecationDate" type:"timestamp" timestampFormat:"unix"` + + // The description of the activity type provided in RegisterActivityType. + Description *string `locationName:"description" type:"string"` + + // The current status of the activity type. + Status *string `locationName:"status" type:"string" required:"true" enum:"RegistrationStatus"` +} + +// String returns the string representation +func (s ActivityTypeInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ActivityTypeInfo) GoString() string { + return s.String() +} + +// Provides details of the CancelTimer decision. +// +// Access Control +// +// You can use IAM policies to control this decision's access to Amazon SWF +// resources as follows: +// +// Use a Resource element with the domain name to limit the action to only +// specified domains. Use an Action element to allow or deny permission to call +// this action. You cannot use an IAM policy to constrain this action's parameters. +// If the caller does not have sufficient permissions to invoke the action, +// or the parameter values fall outside the specified constraints, the action +// fails. The associated event attribute's cause parameter will be set to OPERATION_NOT_PERMITTED. +// For details and example IAM policies, see Using IAM to Manage Access to Amazon +// SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html). +type CancelTimerDecisionAttributes struct { + _ struct{} `type:"structure"` + + // Required. The unique ID of the timer to cancel. + TimerId *string `locationName:"timerId" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CancelTimerDecisionAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CancelTimerDecisionAttributes) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CancelTimerDecisionAttributes) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CancelTimerDecisionAttributes"} + if s.TimerId == nil { + invalidParams.Add(request.NewErrParamRequired("TimerId")) + } + if s.TimerId != nil && len(*s.TimerId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TimerId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Provides details of the CancelTimerFailed event. +type CancelTimerFailedEventAttributes struct { + _ struct{} `type:"structure"` + + // The cause of the failure. This information is generated by the system and + // can be useful for diagnostic purposes. + // + // If cause is set to OPERATION_NOT_PERMITTED, the decision failed because + // it lacked sufficient permissions. For details and example IAM policies, see + // Using IAM to Manage Access to Amazon SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html). + Cause *string `locationName:"cause" type:"string" required:"true" enum:"CancelTimerFailedCause"` + + // The ID of the DecisionTaskCompleted event corresponding to the decision task + // that resulted in the CancelTimer decision to cancel this timer. This information + // can be useful for diagnosing problems by tracing back the chain of events + // leading up to this event. + DecisionTaskCompletedEventId *int64 `locationName:"decisionTaskCompletedEventId" type:"long" required:"true"` + + // The timerId provided in the CancelTimer decision that failed. + TimerId *string `locationName:"timerId" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CancelTimerFailedEventAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CancelTimerFailedEventAttributes) GoString() string { + return s.String() +} + +// Provides details of the CancelWorkflowExecution decision. +// +// Access Control +// +// You can use IAM policies to control this decision's access to Amazon SWF +// resources as follows: +// +// Use a Resource element with the domain name to limit the action to only +// specified domains. Use an Action element to allow or deny permission to call +// this action. You cannot use an IAM policy to constrain this action's parameters. +// If the caller does not have sufficient permissions to invoke the action, +// or the parameter values fall outside the specified constraints, the action +// fails. The associated event attribute's cause parameter will be set to OPERATION_NOT_PERMITTED. +// For details and example IAM policies, see Using IAM to Manage Access to Amazon +// SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html). +type CancelWorkflowExecutionDecisionAttributes struct { + _ struct{} `type:"structure"` + + // Optional. details of the cancellation. + Details *string `locationName:"details" type:"string"` +} + +// String returns the string representation +func (s CancelWorkflowExecutionDecisionAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CancelWorkflowExecutionDecisionAttributes) GoString() string { + return s.String() +} + +// Provides details of the CancelWorkflowExecutionFailed event. +type CancelWorkflowExecutionFailedEventAttributes struct { + _ struct{} `type:"structure"` + + // The cause of the failure. This information is generated by the system and + // can be useful for diagnostic purposes. + // + // If cause is set to OPERATION_NOT_PERMITTED, the decision failed because + // it lacked sufficient permissions. For details and example IAM policies, see + // Using IAM to Manage Access to Amazon SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html). + Cause *string `locationName:"cause" type:"string" required:"true" enum:"CancelWorkflowExecutionFailedCause"` + + // The ID of the DecisionTaskCompleted event corresponding to the decision task + // that resulted in the CancelWorkflowExecution decision for this cancellation + // request. This information can be useful for diagnosing problems by tracing + // back the chain of events leading up to this event. + DecisionTaskCompletedEventId *int64 `locationName:"decisionTaskCompletedEventId" type:"long" required:"true"` +} + +// String returns the string representation +func (s CancelWorkflowExecutionFailedEventAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CancelWorkflowExecutionFailedEventAttributes) GoString() string { + return s.String() +} + +// Provide details of the ChildWorkflowExecutionCanceled event. +type ChildWorkflowExecutionCanceledEventAttributes struct { + _ struct{} `type:"structure"` + + // Details of the cancellation (if provided). + Details *string `locationName:"details" type:"string"` + + // The ID of the StartChildWorkflowExecutionInitiated event corresponding to + // the StartChildWorkflowExecution decision to start this child workflow execution. + // This information can be useful for diagnosing problems by tracing back the + // chain of events leading up to this event. + InitiatedEventId *int64 `locationName:"initiatedEventId" type:"long" required:"true"` + + // The ID of the ChildWorkflowExecutionStarted event recorded when this child + // workflow execution was started. This information can be useful for diagnosing + // problems by tracing back the chain of events leading up to this event. + StartedEventId *int64 `locationName:"startedEventId" type:"long" required:"true"` + + // The child workflow execution that was canceled. + WorkflowExecution *WorkflowExecution `locationName:"workflowExecution" type:"structure" required:"true"` + + // The type of the child workflow execution. + WorkflowType *WorkflowType `locationName:"workflowType" type:"structure" required:"true"` +} + +// String returns the string representation +func (s ChildWorkflowExecutionCanceledEventAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ChildWorkflowExecutionCanceledEventAttributes) GoString() string { + return s.String() +} + +// Provides details of the ChildWorkflowExecutionCompleted event. +type ChildWorkflowExecutionCompletedEventAttributes struct { + _ struct{} `type:"structure"` + + // The ID of the StartChildWorkflowExecutionInitiated event corresponding to + // the StartChildWorkflowExecution decision to start this child workflow execution. + // This information can be useful for diagnosing problems by tracing back the + // chain of events leading up to this event. + InitiatedEventId *int64 `locationName:"initiatedEventId" type:"long" required:"true"` + + // The result of the child workflow execution (if any). + Result *string `locationName:"result" type:"string"` + + // The ID of the ChildWorkflowExecutionStarted event recorded when this child + // workflow execution was started. This information can be useful for diagnosing + // problems by tracing back the chain of events leading up to this event. + StartedEventId *int64 `locationName:"startedEventId" type:"long" required:"true"` + + // The child workflow execution that was completed. + WorkflowExecution *WorkflowExecution `locationName:"workflowExecution" type:"structure" required:"true"` + + // The type of the child workflow execution. + WorkflowType *WorkflowType `locationName:"workflowType" type:"structure" required:"true"` +} + +// String returns the string representation +func (s ChildWorkflowExecutionCompletedEventAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ChildWorkflowExecutionCompletedEventAttributes) GoString() string { + return s.String() +} + +// Provides details of the ChildWorkflowExecutionFailed event. +type ChildWorkflowExecutionFailedEventAttributes struct { + _ struct{} `type:"structure"` + + // The details of the failure (if provided). + Details *string `locationName:"details" type:"string"` + + // The ID of the StartChildWorkflowExecutionInitiated event corresponding to + // the StartChildWorkflowExecution decision to start this child workflow execution. + // This information can be useful for diagnosing problems by tracing back the + // chain of events leading up to this event. + InitiatedEventId *int64 `locationName:"initiatedEventId" type:"long" required:"true"` + + // The reason for the failure (if provided). + Reason *string `locationName:"reason" type:"string"` + + // The ID of the ChildWorkflowExecutionStarted event recorded when this child + // workflow execution was started. This information can be useful for diagnosing + // problems by tracing back the chain of events leading up to this event. + StartedEventId *int64 `locationName:"startedEventId" type:"long" required:"true"` + + // The child workflow execution that failed. + WorkflowExecution *WorkflowExecution `locationName:"workflowExecution" type:"structure" required:"true"` + + // The type of the child workflow execution. + WorkflowType *WorkflowType `locationName:"workflowType" type:"structure" required:"true"` +} + +// String returns the string representation +func (s ChildWorkflowExecutionFailedEventAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ChildWorkflowExecutionFailedEventAttributes) GoString() string { + return s.String() +} + +// Provides details of the ChildWorkflowExecutionStarted event. +type ChildWorkflowExecutionStartedEventAttributes struct { + _ struct{} `type:"structure"` + + // The ID of the StartChildWorkflowExecutionInitiated event corresponding to + // the StartChildWorkflowExecution decision to start this child workflow execution. + // This information can be useful for diagnosing problems by tracing back the + // chain of events leading up to this event. + InitiatedEventId *int64 `locationName:"initiatedEventId" type:"long" required:"true"` + + // The child workflow execution that was started. + WorkflowExecution *WorkflowExecution `locationName:"workflowExecution" type:"structure" required:"true"` + + // The type of the child workflow execution. + WorkflowType *WorkflowType `locationName:"workflowType" type:"structure" required:"true"` +} + +// String returns the string representation +func (s ChildWorkflowExecutionStartedEventAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ChildWorkflowExecutionStartedEventAttributes) GoString() string { + return s.String() +} + +// Provides details of the ChildWorkflowExecutionTerminated event. +type ChildWorkflowExecutionTerminatedEventAttributes struct { + _ struct{} `type:"structure"` + + // The ID of the StartChildWorkflowExecutionInitiated event corresponding to + // the StartChildWorkflowExecution decision to start this child workflow execution. + // This information can be useful for diagnosing problems by tracing back the + // chain of events leading up to this event. + InitiatedEventId *int64 `locationName:"initiatedEventId" type:"long" required:"true"` + + // The ID of the ChildWorkflowExecutionStarted event recorded when this child + // workflow execution was started. This information can be useful for diagnosing + // problems by tracing back the chain of events leading up to this event. + StartedEventId *int64 `locationName:"startedEventId" type:"long" required:"true"` + + // The child workflow execution that was terminated. + WorkflowExecution *WorkflowExecution `locationName:"workflowExecution" type:"structure" required:"true"` + + // The type of the child workflow execution. + WorkflowType *WorkflowType `locationName:"workflowType" type:"structure" required:"true"` +} + +// String returns the string representation +func (s ChildWorkflowExecutionTerminatedEventAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ChildWorkflowExecutionTerminatedEventAttributes) GoString() string { + return s.String() +} + +// Provides details of the ChildWorkflowExecutionTimedOut event. +type ChildWorkflowExecutionTimedOutEventAttributes struct { + _ struct{} `type:"structure"` + + // The ID of the StartChildWorkflowExecutionInitiated event corresponding to + // the StartChildWorkflowExecution decision to start this child workflow execution. + // This information can be useful for diagnosing problems by tracing back the + // chain of events leading up to this event. + InitiatedEventId *int64 `locationName:"initiatedEventId" type:"long" required:"true"` + + // The ID of the ChildWorkflowExecutionStarted event recorded when this child + // workflow execution was started. This information can be useful for diagnosing + // problems by tracing back the chain of events leading up to this event. + StartedEventId *int64 `locationName:"startedEventId" type:"long" required:"true"` + + // The type of the timeout that caused the child workflow execution to time + // out. + TimeoutType *string `locationName:"timeoutType" type:"string" required:"true" enum:"WorkflowExecutionTimeoutType"` + + // The child workflow execution that timed out. + WorkflowExecution *WorkflowExecution `locationName:"workflowExecution" type:"structure" required:"true"` + + // The type of the child workflow execution. + WorkflowType *WorkflowType `locationName:"workflowType" type:"structure" required:"true"` +} + +// String returns the string representation +func (s ChildWorkflowExecutionTimedOutEventAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ChildWorkflowExecutionTimedOutEventAttributes) GoString() string { + return s.String() +} + +// Used to filter the closed workflow executions in visibility APIs by their +// close status. +type CloseStatusFilter struct { + _ struct{} `type:"structure"` + + // Required. The close status that must match the close status of an execution + // for it to meet the criteria of this filter. + Status *string `locationName:"status" type:"string" required:"true" enum:"CloseStatus"` +} + +// String returns the string representation +func (s CloseStatusFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CloseStatusFilter) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CloseStatusFilter) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CloseStatusFilter"} + if s.Status == nil { + invalidParams.Add(request.NewErrParamRequired("Status")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Provides details of the CompleteWorkflowExecution decision. +// +// Access Control +// +// You can use IAM policies to control this decision's access to Amazon SWF +// resources as follows: +// +// Use a Resource element with the domain name to limit the action to only +// specified domains. Use an Action element to allow or deny permission to call +// this action. You cannot use an IAM policy to constrain this action's parameters. +// If the caller does not have sufficient permissions to invoke the action, +// or the parameter values fall outside the specified constraints, the action +// fails. The associated event attribute's cause parameter will be set to OPERATION_NOT_PERMITTED. +// For details and example IAM policies, see Using IAM to Manage Access to Amazon +// SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html). +type CompleteWorkflowExecutionDecisionAttributes struct { + _ struct{} `type:"structure"` + + // The result of the workflow execution. The form of the result is implementation + // defined. + Result *string `locationName:"result" type:"string"` +} + +// String returns the string representation +func (s CompleteWorkflowExecutionDecisionAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CompleteWorkflowExecutionDecisionAttributes) GoString() string { + return s.String() +} + +// Provides details of the CompleteWorkflowExecutionFailed event. +type CompleteWorkflowExecutionFailedEventAttributes struct { + _ struct{} `type:"structure"` + + // The cause of the failure. This information is generated by the system and + // can be useful for diagnostic purposes. + // + // If cause is set to OPERATION_NOT_PERMITTED, the decision failed because + // it lacked sufficient permissions. For details and example IAM policies, see + // Using IAM to Manage Access to Amazon SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html). + Cause *string `locationName:"cause" type:"string" required:"true" enum:"CompleteWorkflowExecutionFailedCause"` + + // The ID of the DecisionTaskCompleted event corresponding to the decision task + // that resulted in the CompleteWorkflowExecution decision to complete this + // execution. This information can be useful for diagnosing problems by tracing + // back the chain of events leading up to this event. + DecisionTaskCompletedEventId *int64 `locationName:"decisionTaskCompletedEventId" type:"long" required:"true"` +} + +// String returns the string representation +func (s CompleteWorkflowExecutionFailedEventAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CompleteWorkflowExecutionFailedEventAttributes) GoString() string { + return s.String() +} + +// Provides details of the ContinueAsNewWorkflowExecution decision. +// +// Access Control +// +// You can use IAM policies to control this decision's access to Amazon SWF +// resources as follows: +// +// Use a Resource element with the domain name to limit the action to only +// specified domains. Use an Action element to allow or deny permission to call +// this action. Constrain the following parameters by using a Condition element +// with the appropriate keys. tag: Optional.. A tag used to identify the workflow +// execution taskList: String constraint. The key is swf:taskList.name. workflowType.version: +// String constraint. The key is swf:workflowType.version. If the caller +// does not have sufficient permissions to invoke the action, or the parameter +// values fall outside the specified constraints, the action fails. The associated +// event attribute's cause parameter will be set to OPERATION_NOT_PERMITTED. +// For details and example IAM policies, see Using IAM to Manage Access to Amazon +// SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html). +type ContinueAsNewWorkflowExecutionDecisionAttributes struct { + _ struct{} `type:"structure"` + + // If set, specifies the policy to use for the child workflow executions of + // the new execution if it is terminated by calling the TerminateWorkflowExecution + // action explicitly or due to an expired timeout. This policy overrides the + // default child policy specified when registering the workflow type using RegisterWorkflowType. + // + // The supported child policies are: + // + // TERMINATE: the child executions will be terminated. REQUEST_CANCEL: a request + // to cancel will be attempted for each child execution by recording a WorkflowExecutionCancelRequested + // event in its history. It is up to the decider to take appropriate actions + // when it receives an execution history with this event. ABANDON: no action + // will be taken. The child executions will continue to run. A child policy + // for this workflow execution must be specified either as a default for the + // workflow type or through this parameter. If neither this parameter is set + // nor a default child policy was specified at registration time then a fault + // will be returned. + ChildPolicy *string `locationName:"childPolicy" type:"string" enum:"ChildPolicy"` + + // If set, specifies the total duration for this workflow execution. This overrides + // the defaultExecutionStartToCloseTimeout specified when registering the workflow + // type. + // + // The duration is specified in seconds; an integer greater than or equal to + // 0. The value "NONE" can be used to specify unlimited duration. + // + // An execution start-to-close timeout for this workflow execution must be + // specified either as a default for the workflow type or through this field. + // If neither this field is set nor a default execution start-to-close timeout + // was specified at registration time then a fault will be returned. + ExecutionStartToCloseTimeout *string `locationName:"executionStartToCloseTimeout" type:"string"` + + // The input provided to the new workflow execution. + Input *string `locationName:"input" type:"string"` + + // The ARN of an IAM role that authorizes Amazon SWF to invoke AWS Lambda functions. + // + // In order for this workflow execution to invoke AWS Lambda functions, an + // appropriate IAM role must be specified either as a default for the workflow + // type or through this field. + LambdaRole *string `locationName:"lambdaRole" min:"1" type:"string"` + + // The list of tags to associate with the new workflow execution. A maximum + // of 5 tags can be specified. You can list workflow executions with a specific + // tag by calling ListOpenWorkflowExecutions or ListClosedWorkflowExecutions + // and specifying a TagFilter. + TagList []*string `locationName:"tagList" type:"list"` + + // Represents a task list. + TaskList *TaskList `locationName:"taskList" type:"structure"` + + // Optional. The task priority that, if set, specifies the priority for the + // decision tasks for this workflow execution. This overrides the defaultTaskPriority + // specified when registering the workflow type. Valid values are integers that + // range from Java's Integer.MIN_VALUE (-2147483648) to Integer.MAX_VALUE (2147483647). + // Higher numbers indicate higher priority. + // + // For more information about setting task priority, see Setting Task Priority + // (http://docs.aws.amazon.com/amazonswf/latest/developerguide/programming-priority.html) + // in the Amazon Simple Workflow Developer Guide. + TaskPriority *string `locationName:"taskPriority" type:"string"` + + // Specifies the maximum duration of decision tasks for the new workflow execution. + // This parameter overrides the defaultTaskStartToCloseTimout specified when + // registering the workflow type using RegisterWorkflowType. + // + // The duration is specified in seconds; an integer greater than or equal to + // 0. The value "NONE" can be used to specify unlimited duration. + // + // A task start-to-close timeout for the new workflow execution must be specified + // either as a default for the workflow type or through this parameter. If neither + // this parameter is set nor a default task start-to-close timeout was specified + // at registration time then a fault will be returned. + TaskStartToCloseTimeout *string `locationName:"taskStartToCloseTimeout" type:"string"` + + WorkflowTypeVersion *string `locationName:"workflowTypeVersion" min:"1" type:"string"` +} + +// String returns the string representation +func (s ContinueAsNewWorkflowExecutionDecisionAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ContinueAsNewWorkflowExecutionDecisionAttributes) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ContinueAsNewWorkflowExecutionDecisionAttributes) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ContinueAsNewWorkflowExecutionDecisionAttributes"} + if s.LambdaRole != nil && len(*s.LambdaRole) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LambdaRole", 1)) + } + if s.WorkflowTypeVersion != nil && len(*s.WorkflowTypeVersion) < 1 { + invalidParams.Add(request.NewErrParamMinLen("WorkflowTypeVersion", 1)) + } + if s.TaskList != nil { + if err := s.TaskList.Validate(); err != nil { + invalidParams.AddNested("TaskList", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Provides details of the ContinueAsNewWorkflowExecutionFailed event. +type ContinueAsNewWorkflowExecutionFailedEventAttributes struct { + _ struct{} `type:"structure"` + + // The cause of the failure. This information is generated by the system and + // can be useful for diagnostic purposes. + // + // If cause is set to OPERATION_NOT_PERMITTED, the decision failed because + // it lacked sufficient permissions. For details and example IAM policies, see + // Using IAM to Manage Access to Amazon SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html). + Cause *string `locationName:"cause" type:"string" required:"true" enum:"ContinueAsNewWorkflowExecutionFailedCause"` + + // The ID of the DecisionTaskCompleted event corresponding to the decision task + // that resulted in the ContinueAsNewWorkflowExecution decision that started + // this execution. This information can be useful for diagnosing problems by + // tracing back the chain of events leading up to this event. + DecisionTaskCompletedEventId *int64 `locationName:"decisionTaskCompletedEventId" type:"long" required:"true"` +} + +// String returns the string representation +func (s ContinueAsNewWorkflowExecutionFailedEventAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ContinueAsNewWorkflowExecutionFailedEventAttributes) GoString() string { + return s.String() +} + +type CountClosedWorkflowExecutionsInput struct { + _ struct{} `type:"structure"` + + // If specified, only workflow executions that match this close status are counted. + // This filter has an affect only if executionStatus is specified as CLOSED. + // + // closeStatusFilter, executionFilter, typeFilter and tagFilter are mutually + // exclusive. You can specify at most one of these in a request. + CloseStatusFilter *CloseStatusFilter `locationName:"closeStatusFilter" type:"structure"` + + // If specified, only workflow executions that meet the close time criteria + // of the filter are counted. + // + // startTimeFilter and closeTimeFilter are mutually exclusive. You must specify + // one of these in a request but not both. + CloseTimeFilter *ExecutionTimeFilter `locationName:"closeTimeFilter" type:"structure"` + + // The name of the domain containing the workflow executions to count. + Domain *string `locationName:"domain" min:"1" type:"string" required:"true"` + + // If specified, only workflow executions matching the WorkflowId in the filter + // are counted. + // + // closeStatusFilter, executionFilter, typeFilter and tagFilter are mutually + // exclusive. You can specify at most one of these in a request. + ExecutionFilter *WorkflowExecutionFilter `locationName:"executionFilter" type:"structure"` + + // If specified, only workflow executions that meet the start time criteria + // of the filter are counted. + // + // startTimeFilter and closeTimeFilter are mutually exclusive. You must specify + // one of these in a request but not both. + StartTimeFilter *ExecutionTimeFilter `locationName:"startTimeFilter" type:"structure"` + + // If specified, only executions that have a tag that matches the filter are + // counted. + // + // closeStatusFilter, executionFilter, typeFilter and tagFilter are mutually + // exclusive. You can specify at most one of these in a request. + TagFilter *TagFilter `locationName:"tagFilter" type:"structure"` + + // If specified, indicates the type of the workflow executions to be counted. + // + // closeStatusFilter, executionFilter, typeFilter and tagFilter are mutually + // exclusive. You can specify at most one of these in a request. + TypeFilter *WorkflowTypeFilter `locationName:"typeFilter" type:"structure"` +} + +// String returns the string representation +func (s CountClosedWorkflowExecutionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CountClosedWorkflowExecutionsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CountClosedWorkflowExecutionsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CountClosedWorkflowExecutionsInput"} + if s.Domain == nil { + invalidParams.Add(request.NewErrParamRequired("Domain")) + } + if s.Domain != nil && len(*s.Domain) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Domain", 1)) + } + if s.CloseStatusFilter != nil { + if err := s.CloseStatusFilter.Validate(); err != nil { + invalidParams.AddNested("CloseStatusFilter", err.(request.ErrInvalidParams)) + } + } + if s.CloseTimeFilter != nil { + if err := s.CloseTimeFilter.Validate(); err != nil { + invalidParams.AddNested("CloseTimeFilter", err.(request.ErrInvalidParams)) + } + } + if s.ExecutionFilter != nil { + if err := s.ExecutionFilter.Validate(); err != nil { + invalidParams.AddNested("ExecutionFilter", err.(request.ErrInvalidParams)) + } + } + if s.StartTimeFilter != nil { + if err := s.StartTimeFilter.Validate(); err != nil { + invalidParams.AddNested("StartTimeFilter", err.(request.ErrInvalidParams)) + } + } + if s.TagFilter != nil { + if err := s.TagFilter.Validate(); err != nil { + invalidParams.AddNested("TagFilter", err.(request.ErrInvalidParams)) + } + } + if s.TypeFilter != nil { + if err := s.TypeFilter.Validate(); err != nil { + invalidParams.AddNested("TypeFilter", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type CountOpenWorkflowExecutionsInput struct { + _ struct{} `type:"structure"` + + // The name of the domain containing the workflow executions to count. + Domain *string `locationName:"domain" min:"1" type:"string" required:"true"` + + // If specified, only workflow executions matching the WorkflowId in the filter + // are counted. + // + // executionFilter, typeFilter and tagFilter are mutually exclusive. You can + // specify at most one of these in a request. + ExecutionFilter *WorkflowExecutionFilter `locationName:"executionFilter" type:"structure"` + + // Specifies the start time criteria that workflow executions must meet in order + // to be counted. + StartTimeFilter *ExecutionTimeFilter `locationName:"startTimeFilter" type:"structure" required:"true"` + + // If specified, only executions that have a tag that matches the filter are + // counted. + // + // executionFilter, typeFilter and tagFilter are mutually exclusive. You can + // specify at most one of these in a request. + TagFilter *TagFilter `locationName:"tagFilter" type:"structure"` + + // Specifies the type of the workflow executions to be counted. + // + // executionFilter, typeFilter and tagFilter are mutually exclusive. You can + // specify at most one of these in a request. + TypeFilter *WorkflowTypeFilter `locationName:"typeFilter" type:"structure"` +} + +// String returns the string representation +func (s CountOpenWorkflowExecutionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CountOpenWorkflowExecutionsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CountOpenWorkflowExecutionsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CountOpenWorkflowExecutionsInput"} + if s.Domain == nil { + invalidParams.Add(request.NewErrParamRequired("Domain")) + } + if s.Domain != nil && len(*s.Domain) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Domain", 1)) + } + if s.StartTimeFilter == nil { + invalidParams.Add(request.NewErrParamRequired("StartTimeFilter")) + } + if s.ExecutionFilter != nil { + if err := s.ExecutionFilter.Validate(); err != nil { + invalidParams.AddNested("ExecutionFilter", err.(request.ErrInvalidParams)) + } + } + if s.StartTimeFilter != nil { + if err := s.StartTimeFilter.Validate(); err != nil { + invalidParams.AddNested("StartTimeFilter", err.(request.ErrInvalidParams)) + } + } + if s.TagFilter != nil { + if err := s.TagFilter.Validate(); err != nil { + invalidParams.AddNested("TagFilter", err.(request.ErrInvalidParams)) + } + } + if s.TypeFilter != nil { + if err := s.TypeFilter.Validate(); err != nil { + invalidParams.AddNested("TypeFilter", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type CountPendingActivityTasksInput struct { + _ struct{} `type:"structure"` + + // The name of the domain that contains the task list. + Domain *string `locationName:"domain" min:"1" type:"string" required:"true"` + + // The name of the task list. + TaskList *TaskList `locationName:"taskList" type:"structure" required:"true"` +} + +// String returns the string representation +func (s CountPendingActivityTasksInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CountPendingActivityTasksInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CountPendingActivityTasksInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CountPendingActivityTasksInput"} + if s.Domain == nil { + invalidParams.Add(request.NewErrParamRequired("Domain")) + } + if s.Domain != nil && len(*s.Domain) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Domain", 1)) + } + if s.TaskList == nil { + invalidParams.Add(request.NewErrParamRequired("TaskList")) + } + if s.TaskList != nil { + if err := s.TaskList.Validate(); err != nil { + invalidParams.AddNested("TaskList", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type CountPendingDecisionTasksInput struct { + _ struct{} `type:"structure"` + + // The name of the domain that contains the task list. + Domain *string `locationName:"domain" min:"1" type:"string" required:"true"` + + // The name of the task list. + TaskList *TaskList `locationName:"taskList" type:"structure" required:"true"` +} + +// String returns the string representation +func (s CountPendingDecisionTasksInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CountPendingDecisionTasksInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CountPendingDecisionTasksInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CountPendingDecisionTasksInput"} + if s.Domain == nil { + invalidParams.Add(request.NewErrParamRequired("Domain")) + } + if s.Domain != nil && len(*s.Domain) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Domain", 1)) + } + if s.TaskList == nil { + invalidParams.Add(request.NewErrParamRequired("TaskList")) + } + if s.TaskList != nil { + if err := s.TaskList.Validate(); err != nil { + invalidParams.AddNested("TaskList", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Specifies a decision made by the decider. A decision can be one of these +// types: +// +// CancelTimer: cancels a previously started timer and records a TimerCanceled +// event in the history. CancelWorkflowExecution: closes the workflow execution +// and records a WorkflowExecutionCanceled event in the history. CompleteWorkflowExecution: +// closes the workflow execution and records a WorkflowExecutionCompleted event +// in the history . ContinueAsNewWorkflowExecution: closes the workflow execution +// and starts a new workflow execution of the same type using the same workflow +// ID and a unique run ID. A WorkflowExecutionContinuedAsNew event is recorded +// in the history. FailWorkflowExecution: closes the workflow execution and +// records a WorkflowExecutionFailed event in the history. RecordMarker: records +// a MarkerRecorded event in the history. Markers can be used for adding custom +// information in the history for instance to let deciders know that they do +// not need to look at the history beyond the marker event. RequestCancelActivityTask: +// attempts to cancel a previously scheduled activity task. If the activity +// task was scheduled but has not been assigned to a worker, then it will be +// canceled. If the activity task was already assigned to a worker, then the +// worker will be informed that cancellation has been requested in the response +// to RecordActivityTaskHeartbeat. RequestCancelExternalWorkflowExecution: +// requests that a request be made to cancel the specified external workflow +// execution and records a RequestCancelExternalWorkflowExecutionInitiated event +// in the history. ScheduleActivityTask: schedules an activity task. ScheduleLambdaFunction: +// schedules a AWS Lambda function. SignalExternalWorkflowExecution: requests +// a signal to be delivered to the specified external workflow execution and +// records a SignalExternalWorkflowExecutionInitiated event in the history. +// StartChildWorkflowExecution: requests that a child workflow execution be +// started and records a StartChildWorkflowExecutionInitiated event in the history. +// The child workflow execution is a separate workflow execution with its own +// history. StartTimer: starts a timer for this workflow execution and records +// a TimerStarted event in the history. This timer will fire after the specified +// delay and record a TimerFired event. Access Control +// +// If you grant permission to use RespondDecisionTaskCompleted, you can use +// IAM policies to express permissions for the list of decisions returned by +// this action as if they were members of the API. Treating decisions as a pseudo +// API maintains a uniform conceptual model and helps keep policies readable. +// For details and example IAM policies, see Using IAM to Manage Access to Amazon +// SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html). +// +// Decision Failure +// +// Decisions can fail for several reasons +// +// The ordering of decisions should follow a logical flow. Some decisions +// might not make sense in the current context of the workflow execution and +// will therefore fail. A limit on your account was reached. The decision lacks +// sufficient permissions. One of the following events might be added to the +// history to indicate an error. The event attribute's cause parameter indicates +// the cause. If cause is set to OPERATION_NOT_PERMITTED, the decision failed +// because it lacked sufficient permissions. For details and example IAM policies, +// see Using IAM to Manage Access to Amazon SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html). +// +// ScheduleActivityTaskFailed: a ScheduleActivityTask decision failed. This +// could happen if the activity type specified in the decision is not registered, +// is in a deprecated state, or the decision is not properly configured. ScheduleLambdaFunctionFailed: +// a ScheduleLambdaFunctionFailed decision failed. This could happen if the +// AWS Lambda function specified in the decision does not exist, or the AWS +// Lambda service's limits are exceeded. RequestCancelActivityTaskFailed: a +// RequestCancelActivityTask decision failed. This could happen if there is +// no open activity task with the specified activityId. StartTimerFailed: a +// StartTimer decision failed. This could happen if there is another open timer +// with the same timerId. CancelTimerFailed: a CancelTimer decision failed. +// This could happen if there is no open timer with the specified timerId. +// StartChildWorkflowExecutionFailed: a StartChildWorkflowExecution decision +// failed. This could happen if the workflow type specified is not registered, +// is deprecated, or the decision is not properly configured. SignalExternalWorkflowExecutionFailed: +// a SignalExternalWorkflowExecution decision failed. This could happen if the +// workflowID specified in the decision was incorrect. RequestCancelExternalWorkflowExecutionFailed: +// a RequestCancelExternalWorkflowExecution decision failed. This could happen +// if the workflowID specified in the decision was incorrect. CancelWorkflowExecutionFailed: +// a CancelWorkflowExecution decision failed. This could happen if there is +// an unhandled decision task pending in the workflow execution. CompleteWorkflowExecutionFailed: +// a CompleteWorkflowExecution decision failed. This could happen if there is +// an unhandled decision task pending in the workflow execution. ContinueAsNewWorkflowExecutionFailed: +// a ContinueAsNewWorkflowExecution decision failed. This could happen if there +// is an unhandled decision task pending in the workflow execution or the ContinueAsNewWorkflowExecution +// decision was not configured correctly. FailWorkflowExecutionFailed: a FailWorkflowExecution +// decision failed. This could happen if there is an unhandled decision task +// pending in the workflow execution. The preceding error events might occur +// due to an error in the decider logic, which might put the workflow execution +// in an unstable state The cause field in the event structure for the error +// event indicates the cause of the error. +// +// A workflow execution may be closed by the decider by returning one of the +// following decisions when completing a decision task: CompleteWorkflowExecution, +// FailWorkflowExecution, CancelWorkflowExecution and ContinueAsNewWorkflowExecution. +// An UnhandledDecision fault will be returned if a workflow closing decision +// is specified and a signal or activity event had been added to the history +// while the decision task was being performed by the decider. Unlike the above +// situations which are logic issues, this fault is always possible because +// of race conditions in a distributed system. The right action here is to call +// RespondDecisionTaskCompleted without any decisions. This would result in +// another decision task with these new events included in the history. The +// decider should handle the new events and may decide to close the workflow +// execution. How to code a decision +// +// You code a decision by first setting the decision type field to one of the +// above decision values, and then set the corresponding attributes field shown +// below: +// +// ScheduleActivityTaskDecisionAttributes ScheduleLambdaFunctionDecisionAttributes +// RequestCancelActivityTaskDecisionAttributes CompleteWorkflowExecutionDecisionAttributes +// FailWorkflowExecutionDecisionAttributes CancelWorkflowExecutionDecisionAttributes +// ContinueAsNewWorkflowExecutionDecisionAttributes RecordMarkerDecisionAttributes +// StartTimerDecisionAttributes CancelTimerDecisionAttributes SignalExternalWorkflowExecutionDecisionAttributes +// RequestCancelExternalWorkflowExecutionDecisionAttributes StartChildWorkflowExecutionDecisionAttributes +type Decision struct { + _ struct{} `type:"structure"` + + // Provides details of the CancelTimer decision. It is not set for other decision + // types. + CancelTimerDecisionAttributes *CancelTimerDecisionAttributes `locationName:"cancelTimerDecisionAttributes" type:"structure"` + + // Provides details of the CancelWorkflowExecution decision. It is not set for + // other decision types. + CancelWorkflowExecutionDecisionAttributes *CancelWorkflowExecutionDecisionAttributes `locationName:"cancelWorkflowExecutionDecisionAttributes" type:"structure"` + + // Provides details of the CompleteWorkflowExecution decision. It is not set + // for other decision types. + CompleteWorkflowExecutionDecisionAttributes *CompleteWorkflowExecutionDecisionAttributes `locationName:"completeWorkflowExecutionDecisionAttributes" type:"structure"` + + // Provides details of the ContinueAsNewWorkflowExecution decision. It is not + // set for other decision types. + ContinueAsNewWorkflowExecutionDecisionAttributes *ContinueAsNewWorkflowExecutionDecisionAttributes `locationName:"continueAsNewWorkflowExecutionDecisionAttributes" type:"structure"` + + // Specifies the type of the decision. + DecisionType *string `locationName:"decisionType" type:"string" required:"true" enum:"DecisionType"` + + // Provides details of the FailWorkflowExecution decision. It is not set for + // other decision types. + FailWorkflowExecutionDecisionAttributes *FailWorkflowExecutionDecisionAttributes `locationName:"failWorkflowExecutionDecisionAttributes" type:"structure"` + + // Provides details of the RecordMarker decision. It is not set for other decision + // types. + RecordMarkerDecisionAttributes *RecordMarkerDecisionAttributes `locationName:"recordMarkerDecisionAttributes" type:"structure"` + + // Provides details of the RequestCancelActivityTask decision. It is not set + // for other decision types. + RequestCancelActivityTaskDecisionAttributes *RequestCancelActivityTaskDecisionAttributes `locationName:"requestCancelActivityTaskDecisionAttributes" type:"structure"` + + // Provides details of the RequestCancelExternalWorkflowExecution decision. + // It is not set for other decision types. + RequestCancelExternalWorkflowExecutionDecisionAttributes *RequestCancelExternalWorkflowExecutionDecisionAttributes `locationName:"requestCancelExternalWorkflowExecutionDecisionAttributes" type:"structure"` + + // Provides details of the ScheduleActivityTask decision. It is not set for + // other decision types. + ScheduleActivityTaskDecisionAttributes *ScheduleActivityTaskDecisionAttributes `locationName:"scheduleActivityTaskDecisionAttributes" type:"structure"` + + // Provides details of the ScheduleLambdaFunction decision. + // + // Access Control + // + // You can use IAM policies to control this decision's access to Amazon SWF + // resources as follows: + // + // Use a Resource element with the domain name to limit the action to only + // specified domains. Use an Action element to allow or deny permission to call + // this action. Constrain the following parameters by using a Condition element + // with the appropriate keys. activityType.name: String constraint. The key + // is swf:activityType.name. activityType.version: String constraint. The key + // is swf:activityType.version. taskList: String constraint. The key is swf:taskList.name. + // If the caller does not have sufficient permissions to invoke the action, + // or the parameter values fall outside the specified constraints, the action + // fails. The associated event attribute's cause parameter will be set to OPERATION_NOT_PERMITTED. + // For details and example IAM policies, see Using IAM to Manage Access to Amazon + // SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html). + ScheduleLambdaFunctionDecisionAttributes *ScheduleLambdaFunctionDecisionAttributes `locationName:"scheduleLambdaFunctionDecisionAttributes" type:"structure"` + + // Provides details of the SignalExternalWorkflowExecution decision. It is not + // set for other decision types. + SignalExternalWorkflowExecutionDecisionAttributes *SignalExternalWorkflowExecutionDecisionAttributes `locationName:"signalExternalWorkflowExecutionDecisionAttributes" type:"structure"` + + // Provides details of the StartChildWorkflowExecution decision. It is not set + // for other decision types. + StartChildWorkflowExecutionDecisionAttributes *StartChildWorkflowExecutionDecisionAttributes `locationName:"startChildWorkflowExecutionDecisionAttributes" type:"structure"` + + // Provides details of the StartTimer decision. It is not set for other decision + // types. + StartTimerDecisionAttributes *StartTimerDecisionAttributes `locationName:"startTimerDecisionAttributes" type:"structure"` +} + +// String returns the string representation +func (s Decision) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Decision) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Decision) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Decision"} + if s.DecisionType == nil { + invalidParams.Add(request.NewErrParamRequired("DecisionType")) + } + if s.CancelTimerDecisionAttributes != nil { + if err := s.CancelTimerDecisionAttributes.Validate(); err != nil { + invalidParams.AddNested("CancelTimerDecisionAttributes", err.(request.ErrInvalidParams)) + } + } + if s.ContinueAsNewWorkflowExecutionDecisionAttributes != nil { + if err := s.ContinueAsNewWorkflowExecutionDecisionAttributes.Validate(); err != nil { + invalidParams.AddNested("ContinueAsNewWorkflowExecutionDecisionAttributes", err.(request.ErrInvalidParams)) + } + } + if s.RecordMarkerDecisionAttributes != nil { + if err := s.RecordMarkerDecisionAttributes.Validate(); err != nil { + invalidParams.AddNested("RecordMarkerDecisionAttributes", err.(request.ErrInvalidParams)) + } + } + if s.RequestCancelActivityTaskDecisionAttributes != nil { + if err := s.RequestCancelActivityTaskDecisionAttributes.Validate(); err != nil { + invalidParams.AddNested("RequestCancelActivityTaskDecisionAttributes", err.(request.ErrInvalidParams)) + } + } + if s.RequestCancelExternalWorkflowExecutionDecisionAttributes != nil { + if err := s.RequestCancelExternalWorkflowExecutionDecisionAttributes.Validate(); err != nil { + invalidParams.AddNested("RequestCancelExternalWorkflowExecutionDecisionAttributes", err.(request.ErrInvalidParams)) + } + } + if s.ScheduleActivityTaskDecisionAttributes != nil { + if err := s.ScheduleActivityTaskDecisionAttributes.Validate(); err != nil { + invalidParams.AddNested("ScheduleActivityTaskDecisionAttributes", err.(request.ErrInvalidParams)) + } + } + if s.ScheduleLambdaFunctionDecisionAttributes != nil { + if err := s.ScheduleLambdaFunctionDecisionAttributes.Validate(); err != nil { + invalidParams.AddNested("ScheduleLambdaFunctionDecisionAttributes", err.(request.ErrInvalidParams)) + } + } + if s.SignalExternalWorkflowExecutionDecisionAttributes != nil { + if err := s.SignalExternalWorkflowExecutionDecisionAttributes.Validate(); err != nil { + invalidParams.AddNested("SignalExternalWorkflowExecutionDecisionAttributes", err.(request.ErrInvalidParams)) + } + } + if s.StartChildWorkflowExecutionDecisionAttributes != nil { + if err := s.StartChildWorkflowExecutionDecisionAttributes.Validate(); err != nil { + invalidParams.AddNested("StartChildWorkflowExecutionDecisionAttributes", err.(request.ErrInvalidParams)) + } + } + if s.StartTimerDecisionAttributes != nil { + if err := s.StartTimerDecisionAttributes.Validate(); err != nil { + invalidParams.AddNested("StartTimerDecisionAttributes", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Provides details of the DecisionTaskCompleted event. +type DecisionTaskCompletedEventAttributes struct { + _ struct{} `type:"structure"` + + // User defined context for the workflow execution. + ExecutionContext *string `locationName:"executionContext" type:"string"` + + // The ID of the DecisionTaskScheduled event that was recorded when this decision + // task was scheduled. This information can be useful for diagnosing problems + // by tracing back the chain of events leading up to this event. + ScheduledEventId *int64 `locationName:"scheduledEventId" type:"long" required:"true"` + + // The ID of the DecisionTaskStarted event recorded when this decision task + // was started. This information can be useful for diagnosing problems by tracing + // back the chain of events leading up to this event. + StartedEventId *int64 `locationName:"startedEventId" type:"long" required:"true"` +} + +// String returns the string representation +func (s DecisionTaskCompletedEventAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DecisionTaskCompletedEventAttributes) GoString() string { + return s.String() +} + +// Provides details about the DecisionTaskScheduled event. +type DecisionTaskScheduledEventAttributes struct { + _ struct{} `type:"structure"` + + // The maximum duration for this decision task. The task is considered timed + // out if it does not completed within this duration. + // + // The duration is specified in seconds; an integer greater than or equal to + // 0. The value "NONE" can be used to specify unlimited duration. + StartToCloseTimeout *string `locationName:"startToCloseTimeout" type:"string"` + + // The name of the task list in which the decision task was scheduled. + TaskList *TaskList `locationName:"taskList" type:"structure" required:"true"` + + // Optional. A task priority that, if set, specifies the priority for this decision + // task. Valid values are integers that range from Java's Integer.MIN_VALUE + // (-2147483648) to Integer.MAX_VALUE (2147483647). Higher numbers indicate + // higher priority. + // + // For more information about setting task priority, see Setting Task Priority + // (http://docs.aws.amazon.com/amazonswf/latest/developerguide/programming-priority.html) + // in the Amazon Simple Workflow Developer Guide. + TaskPriority *string `locationName:"taskPriority" type:"string"` +} + +// String returns the string representation +func (s DecisionTaskScheduledEventAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DecisionTaskScheduledEventAttributes) GoString() string { + return s.String() +} + +// Provides details of the DecisionTaskStarted event. +type DecisionTaskStartedEventAttributes struct { + _ struct{} `type:"structure"` + + // Identity of the decider making the request. This enables diagnostic tracing + // when problems arise. The form of this identity is user defined. + Identity *string `locationName:"identity" type:"string"` + + // The ID of the DecisionTaskScheduled event that was recorded when this decision + // task was scheduled. This information can be useful for diagnosing problems + // by tracing back the chain of events leading up to this event. + ScheduledEventId *int64 `locationName:"scheduledEventId" type:"long" required:"true"` +} + +// String returns the string representation +func (s DecisionTaskStartedEventAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DecisionTaskStartedEventAttributes) GoString() string { + return s.String() +} + +// Provides details of the DecisionTaskTimedOut event. +type DecisionTaskTimedOutEventAttributes struct { + _ struct{} `type:"structure"` + + // The ID of the DecisionTaskScheduled event that was recorded when this decision + // task was scheduled. This information can be useful for diagnosing problems + // by tracing back the chain of events leading up to this event. + ScheduledEventId *int64 `locationName:"scheduledEventId" type:"long" required:"true"` + + // The ID of the DecisionTaskStarted event recorded when this decision task + // was started. This information can be useful for diagnosing problems by tracing + // back the chain of events leading up to this event. + StartedEventId *int64 `locationName:"startedEventId" type:"long" required:"true"` + + // The type of timeout that expired before the decision task could be completed. + TimeoutType *string `locationName:"timeoutType" type:"string" required:"true" enum:"DecisionTaskTimeoutType"` +} + +// String returns the string representation +func (s DecisionTaskTimedOutEventAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DecisionTaskTimedOutEventAttributes) GoString() string { + return s.String() +} + +type DeprecateActivityTypeInput struct { + _ struct{} `type:"structure"` + + // The activity type to deprecate. + ActivityType *ActivityType `locationName:"activityType" type:"structure" required:"true"` + + // The name of the domain in which the activity type is registered. + Domain *string `locationName:"domain" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeprecateActivityTypeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeprecateActivityTypeInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeprecateActivityTypeInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeprecateActivityTypeInput"} + if s.ActivityType == nil { + invalidParams.Add(request.NewErrParamRequired("ActivityType")) + } + if s.Domain == nil { + invalidParams.Add(request.NewErrParamRequired("Domain")) + } + if s.Domain != nil && len(*s.Domain) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Domain", 1)) + } + if s.ActivityType != nil { + if err := s.ActivityType.Validate(); err != nil { + invalidParams.AddNested("ActivityType", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeprecateActivityTypeOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeprecateActivityTypeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeprecateActivityTypeOutput) GoString() string { + return s.String() +} + +type DeprecateDomainInput struct { + _ struct{} `type:"structure"` + + // The name of the domain to deprecate. + Name *string `locationName:"name" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeprecateDomainInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeprecateDomainInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeprecateDomainInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeprecateDomainInput"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeprecateDomainOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeprecateDomainOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeprecateDomainOutput) GoString() string { + return s.String() +} + +type DeprecateWorkflowTypeInput struct { + _ struct{} `type:"structure"` + + // The name of the domain in which the workflow type is registered. + Domain *string `locationName:"domain" min:"1" type:"string" required:"true"` + + // The workflow type to deprecate. + WorkflowType *WorkflowType `locationName:"workflowType" type:"structure" required:"true"` +} + +// String returns the string representation +func (s DeprecateWorkflowTypeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeprecateWorkflowTypeInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeprecateWorkflowTypeInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeprecateWorkflowTypeInput"} + if s.Domain == nil { + invalidParams.Add(request.NewErrParamRequired("Domain")) + } + if s.Domain != nil && len(*s.Domain) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Domain", 1)) + } + if s.WorkflowType == nil { + invalidParams.Add(request.NewErrParamRequired("WorkflowType")) + } + if s.WorkflowType != nil { + if err := s.WorkflowType.Validate(); err != nil { + invalidParams.AddNested("WorkflowType", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeprecateWorkflowTypeOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeprecateWorkflowTypeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeprecateWorkflowTypeOutput) GoString() string { + return s.String() +} + +type DescribeActivityTypeInput struct { + _ struct{} `type:"structure"` + + // The activity type to get information about. Activity types are identified + // by the name and version that were supplied when the activity was registered. + ActivityType *ActivityType `locationName:"activityType" type:"structure" required:"true"` + + // The name of the domain in which the activity type is registered. + Domain *string `locationName:"domain" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeActivityTypeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeActivityTypeInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeActivityTypeInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeActivityTypeInput"} + if s.ActivityType == nil { + invalidParams.Add(request.NewErrParamRequired("ActivityType")) + } + if s.Domain == nil { + invalidParams.Add(request.NewErrParamRequired("Domain")) + } + if s.Domain != nil && len(*s.Domain) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Domain", 1)) + } + if s.ActivityType != nil { + if err := s.ActivityType.Validate(); err != nil { + invalidParams.AddNested("ActivityType", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Detailed information about an activity type. +type DescribeActivityTypeOutput struct { + _ struct{} `type:"structure"` + + // The configuration settings registered with the activity type. + Configuration *ActivityTypeConfiguration `locationName:"configuration" type:"structure" required:"true"` + + // General information about the activity type. + // + // The status of activity type (returned in the ActivityTypeInfo structure) + // can be one of the following. + // + // REGISTERED: The type is registered and available. Workers supporting this + // type should be running. DEPRECATED: The type was deprecated using DeprecateActivityType, + // but is still in use. You should keep workers supporting this type running. + // You cannot create new tasks of this type. + TypeInfo *ActivityTypeInfo `locationName:"typeInfo" type:"structure" required:"true"` +} + +// String returns the string representation +func (s DescribeActivityTypeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeActivityTypeOutput) GoString() string { + return s.String() +} + +type DescribeDomainInput struct { + _ struct{} `type:"structure"` + + // The name of the domain to describe. + Name *string `locationName:"name" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeDomainInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDomainInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeDomainInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeDomainInput"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains details of a domain. +type DescribeDomainOutput struct { + _ struct{} `type:"structure"` + + // Contains the configuration settings of a domain. + Configuration *DomainConfiguration `locationName:"configuration" type:"structure" required:"true"` + + // Contains general information about a domain. + DomainInfo *DomainInfo `locationName:"domainInfo" type:"structure" required:"true"` +} + +// String returns the string representation +func (s DescribeDomainOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDomainOutput) GoString() string { + return s.String() +} + +type DescribeWorkflowExecutionInput struct { + _ struct{} `type:"structure"` + + // The name of the domain containing the workflow execution. + Domain *string `locationName:"domain" min:"1" type:"string" required:"true"` + + // The workflow execution to describe. + Execution *WorkflowExecution `locationName:"execution" type:"structure" required:"true"` +} + +// String returns the string representation +func (s DescribeWorkflowExecutionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeWorkflowExecutionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeWorkflowExecutionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeWorkflowExecutionInput"} + if s.Domain == nil { + invalidParams.Add(request.NewErrParamRequired("Domain")) + } + if s.Domain != nil && len(*s.Domain) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Domain", 1)) + } + if s.Execution == nil { + invalidParams.Add(request.NewErrParamRequired("Execution")) + } + if s.Execution != nil { + if err := s.Execution.Validate(); err != nil { + invalidParams.AddNested("Execution", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains details about a workflow execution. +type DescribeWorkflowExecutionOutput struct { + _ struct{} `type:"structure"` + + // The configuration settings for this workflow execution including timeout + // values, tasklist etc. + ExecutionConfiguration *WorkflowExecutionConfiguration `locationName:"executionConfiguration" type:"structure" required:"true"` + + // Information about the workflow execution. + ExecutionInfo *WorkflowExecutionInfo `locationName:"executionInfo" type:"structure" required:"true"` + + // The time when the last activity task was scheduled for this workflow execution. + // You can use this information to determine if the workflow has not made progress + // for an unusually long period of time and might require a corrective action. + LatestActivityTaskTimestamp *time.Time `locationName:"latestActivityTaskTimestamp" type:"timestamp" timestampFormat:"unix"` + + // The latest executionContext provided by the decider for this workflow execution. + // A decider can provide an executionContext (a free-form string) when closing + // a decision task using RespondDecisionTaskCompleted. + LatestExecutionContext *string `locationName:"latestExecutionContext" type:"string"` + + // The number of tasks for this workflow execution. This includes open and closed + // tasks of all types. + OpenCounts *WorkflowExecutionOpenCounts `locationName:"openCounts" type:"structure" required:"true"` +} + +// String returns the string representation +func (s DescribeWorkflowExecutionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeWorkflowExecutionOutput) GoString() string { + return s.String() +} + +type DescribeWorkflowTypeInput struct { + _ struct{} `type:"structure"` + + // The name of the domain in which this workflow type is registered. + Domain *string `locationName:"domain" min:"1" type:"string" required:"true"` + + // The workflow type to describe. + WorkflowType *WorkflowType `locationName:"workflowType" type:"structure" required:"true"` +} + +// String returns the string representation +func (s DescribeWorkflowTypeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeWorkflowTypeInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeWorkflowTypeInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeWorkflowTypeInput"} + if s.Domain == nil { + invalidParams.Add(request.NewErrParamRequired("Domain")) + } + if s.Domain != nil && len(*s.Domain) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Domain", 1)) + } + if s.WorkflowType == nil { + invalidParams.Add(request.NewErrParamRequired("WorkflowType")) + } + if s.WorkflowType != nil { + if err := s.WorkflowType.Validate(); err != nil { + invalidParams.AddNested("WorkflowType", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains details about a workflow type. +type DescribeWorkflowTypeOutput struct { + _ struct{} `type:"structure"` + + // Configuration settings of the workflow type registered through RegisterWorkflowType + Configuration *WorkflowTypeConfiguration `locationName:"configuration" type:"structure" required:"true"` + + // General information about the workflow type. + // + // The status of the workflow type (returned in the WorkflowTypeInfo structure) + // can be one of the following. + // + // REGISTERED: The type is registered and available. Workers supporting this + // type should be running. DEPRECATED: The type was deprecated using DeprecateWorkflowType, + // but is still in use. You should keep workers supporting this type running. + // You cannot create new workflow executions of this type. + TypeInfo *WorkflowTypeInfo `locationName:"typeInfo" type:"structure" required:"true"` +} + +// String returns the string representation +func (s DescribeWorkflowTypeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeWorkflowTypeOutput) GoString() string { + return s.String() +} + +// Contains the configuration settings of a domain. +type DomainConfiguration struct { + _ struct{} `type:"structure"` + + // The retention period for workflow executions in this domain. + WorkflowExecutionRetentionPeriodInDays *string `locationName:"workflowExecutionRetentionPeriodInDays" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DomainConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DomainConfiguration) GoString() string { + return s.String() +} + +// Contains general information about a domain. +type DomainInfo struct { + _ struct{} `type:"structure"` + + // The description of the domain provided through RegisterDomain. + Description *string `locationName:"description" type:"string"` + + // The name of the domain. This name is unique within the account. + Name *string `locationName:"name" min:"1" type:"string" required:"true"` + + // The status of the domain: + // + // REGISTERED: The domain is properly registered and available. You can use + // this domain for registering types and creating new workflow executions. + // DEPRECATED: The domain was deprecated using DeprecateDomain, but is still + // in use. You should not create new workflow executions in this domain. + Status *string `locationName:"status" type:"string" required:"true" enum:"RegistrationStatus"` +} + +// String returns the string representation +func (s DomainInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DomainInfo) GoString() string { + return s.String() +} + +// Used to filter the workflow executions in visibility APIs by various time-based +// rules. Each parameter, if specified, defines a rule that must be satisfied +// by each returned query result. The parameter values are in the Unix Time +// format (https://en.wikipedia.org/wiki/Unix_time). For example: "oldestDate": +// 1325376070. +type ExecutionTimeFilter struct { + _ struct{} `type:"structure"` + + // Specifies the latest start or close date and time to return. + LatestDate *time.Time `locationName:"latestDate" type:"timestamp" timestampFormat:"unix"` + + // Specifies the oldest start or close date and time to return. + OldestDate *time.Time `locationName:"oldestDate" type:"timestamp" timestampFormat:"unix" required:"true"` +} + +// String returns the string representation +func (s ExecutionTimeFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ExecutionTimeFilter) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ExecutionTimeFilter) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ExecutionTimeFilter"} + if s.OldestDate == nil { + invalidParams.Add(request.NewErrParamRequired("OldestDate")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Provides details of the ExternalWorkflowExecutionCancelRequested event. +type ExternalWorkflowExecutionCancelRequestedEventAttributes struct { + _ struct{} `type:"structure"` + + // The ID of the RequestCancelExternalWorkflowExecutionInitiated event corresponding + // to the RequestCancelExternalWorkflowExecution decision to cancel this external + // workflow execution. This information can be useful for diagnosing problems + // by tracing back the chain of events leading up to this event. + InitiatedEventId *int64 `locationName:"initiatedEventId" type:"long" required:"true"` + + // The external workflow execution to which the cancellation request was delivered. + WorkflowExecution *WorkflowExecution `locationName:"workflowExecution" type:"structure" required:"true"` +} + +// String returns the string representation +func (s ExternalWorkflowExecutionCancelRequestedEventAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ExternalWorkflowExecutionCancelRequestedEventAttributes) GoString() string { + return s.String() +} + +// Provides details of the ExternalWorkflowExecutionSignaled event. +type ExternalWorkflowExecutionSignaledEventAttributes struct { + _ struct{} `type:"structure"` + + // The ID of the SignalExternalWorkflowExecutionInitiated event corresponding + // to the SignalExternalWorkflowExecution decision to request this signal. This + // information can be useful for diagnosing problems by tracing back the chain + // of events leading up to this event. + InitiatedEventId *int64 `locationName:"initiatedEventId" type:"long" required:"true"` + + // The external workflow execution that the signal was delivered to. + WorkflowExecution *WorkflowExecution `locationName:"workflowExecution" type:"structure" required:"true"` +} + +// String returns the string representation +func (s ExternalWorkflowExecutionSignaledEventAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ExternalWorkflowExecutionSignaledEventAttributes) GoString() string { + return s.String() +} + +// Provides details of the FailWorkflowExecution decision. +// +// Access Control +// +// You can use IAM policies to control this decision's access to Amazon SWF +// resources as follows: +// +// Use a Resource element with the domain name to limit the action to only +// specified domains. Use an Action element to allow or deny permission to call +// this action. You cannot use an IAM policy to constrain this action's parameters. +// If the caller does not have sufficient permissions to invoke the action, +// or the parameter values fall outside the specified constraints, the action +// fails. The associated event attribute's cause parameter will be set to OPERATION_NOT_PERMITTED. +// For details and example IAM policies, see Using IAM to Manage Access to Amazon +// SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html). +type FailWorkflowExecutionDecisionAttributes struct { + _ struct{} `type:"structure"` + + // Optional. Details of the failure. + Details *string `locationName:"details" type:"string"` + + // A descriptive reason for the failure that may help in diagnostics. + Reason *string `locationName:"reason" type:"string"` +} + +// String returns the string representation +func (s FailWorkflowExecutionDecisionAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FailWorkflowExecutionDecisionAttributes) GoString() string { + return s.String() +} + +// Provides details of the FailWorkflowExecutionFailed event. +type FailWorkflowExecutionFailedEventAttributes struct { + _ struct{} `type:"structure"` + + // The cause of the failure. This information is generated by the system and + // can be useful for diagnostic purposes. + // + // If cause is set to OPERATION_NOT_PERMITTED, the decision failed because + // it lacked sufficient permissions. For details and example IAM policies, see + // Using IAM to Manage Access to Amazon SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html). + Cause *string `locationName:"cause" type:"string" required:"true" enum:"FailWorkflowExecutionFailedCause"` + + // The ID of the DecisionTaskCompleted event corresponding to the decision task + // that resulted in the FailWorkflowExecution decision to fail this execution. + // This information can be useful for diagnosing problems by tracing back the + // chain of events leading up to this event. + DecisionTaskCompletedEventId *int64 `locationName:"decisionTaskCompletedEventId" type:"long" required:"true"` +} + +// String returns the string representation +func (s FailWorkflowExecutionFailedEventAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FailWorkflowExecutionFailedEventAttributes) GoString() string { + return s.String() +} + +type GetWorkflowExecutionHistoryInput struct { + _ struct{} `type:"structure"` + + // The name of the domain containing the workflow execution. + Domain *string `locationName:"domain" min:"1" type:"string" required:"true"` + + // Specifies the workflow execution for which to return the history. + Execution *WorkflowExecution `locationName:"execution" type:"structure" required:"true"` + + // The maximum number of results that will be returned per call. nextPageToken + // can be used to obtain futher pages of results. The default is 1000, which + // is the maximum allowed page size. You can, however, specify a page size smaller + // than the maximum. + // + // This is an upper limit only; the actual number of results returned per call + // may be fewer than the specified maximum. + MaximumPageSize *int64 `locationName:"maximumPageSize" type:"integer"` + + // If a NextPageToken was returned by a previous call, there are more results + // available. To retrieve the next page of results, make the call again using + // the returned token in nextPageToken. Keep all other arguments unchanged. + // + // The configured maximumPageSize determines how many results can be returned + // in a single call. + NextPageToken *string `locationName:"nextPageToken" type:"string"` + + // When set to true, returns the events in reverse order. By default the results + // are returned in ascending order of the eventTimeStamp of the events. + ReverseOrder *bool `locationName:"reverseOrder" type:"boolean"` +} + +// String returns the string representation +func (s GetWorkflowExecutionHistoryInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetWorkflowExecutionHistoryInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetWorkflowExecutionHistoryInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetWorkflowExecutionHistoryInput"} + if s.Domain == nil { + invalidParams.Add(request.NewErrParamRequired("Domain")) + } + if s.Domain != nil && len(*s.Domain) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Domain", 1)) + } + if s.Execution == nil { + invalidParams.Add(request.NewErrParamRequired("Execution")) + } + if s.Execution != nil { + if err := s.Execution.Validate(); err != nil { + invalidParams.AddNested("Execution", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Paginated representation of a workflow history for a workflow execution. +// This is the up to date, complete and authoritative record of the events related +// to all tasks and events in the life of the workflow execution. +type GetWorkflowExecutionHistoryOutput struct { + _ struct{} `type:"structure"` + + // The list of history events. + Events []*HistoryEvent `locationName:"events" type:"list" required:"true"` + + // If a NextPageToken was returned by a previous call, there are more results + // available. To retrieve the next page of results, make the call again using + // the returned token in nextPageToken. Keep all other arguments unchanged. + // + // The configured maximumPageSize determines how many results can be returned + // in a single call. + NextPageToken *string `locationName:"nextPageToken" type:"string"` +} + +// String returns the string representation +func (s GetWorkflowExecutionHistoryOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetWorkflowExecutionHistoryOutput) GoString() string { + return s.String() +} + +// Event within a workflow execution. A history event can be one of these types: +// +// WorkflowExecutionStarted: The workflow execution was started. WorkflowExecutionCompleted: +// The workflow execution was closed due to successful completion. WorkflowExecutionFailed: +// The workflow execution closed due to a failure. WorkflowExecutionTimedOut: +// The workflow execution was closed because a time out was exceeded. WorkflowExecutionCanceled: +// The workflow execution was successfully canceled and closed. WorkflowExecutionTerminated: +// The workflow execution was terminated. WorkflowExecutionContinuedAsNew: +// The workflow execution was closed and a new execution of the same type was +// created with the same workflowId. WorkflowExecutionCancelRequested: A request +// to cancel this workflow execution was made. DecisionTaskScheduled: A decision +// task was scheduled for the workflow execution. DecisionTaskStarted: The +// decision task was dispatched to a decider. DecisionTaskCompleted: The decider +// successfully completed a decision task by calling RespondDecisionTaskCompleted. +// DecisionTaskTimedOut: The decision task timed out. ActivityTaskScheduled: +// An activity task was scheduled for execution. ScheduleActivityTaskFailed: +// Failed to process ScheduleActivityTask decision. This happens when the decision +// is not configured properly, for example the activity type specified is not +// registered. ActivityTaskStarted: The scheduled activity task was dispatched +// to a worker. ActivityTaskCompleted: An activity worker successfully completed +// an activity task by calling RespondActivityTaskCompleted. ActivityTaskFailed: +// An activity worker failed an activity task by calling RespondActivityTaskFailed. +// ActivityTaskTimedOut: The activity task timed out. ActivityTaskCanceled: +// The activity task was successfully canceled. ActivityTaskCancelRequested: +// A RequestCancelActivityTask decision was received by the system. RequestCancelActivityTaskFailed: +// Failed to process RequestCancelActivityTask decision. This happens when the +// decision is not configured properly. WorkflowExecutionSignaled: An external +// signal was received for the workflow execution. MarkerRecorded: A marker +// was recorded in the workflow history as the result of a RecordMarker decision. +// TimerStarted: A timer was started for the workflow execution due to a StartTimer +// decision. StartTimerFailed: Failed to process StartTimer decision. This +// happens when the decision is not configured properly, for example a timer +// already exists with the specified timer ID. TimerFired: A timer, previously +// started for this workflow execution, fired. TimerCanceled: A timer, previously +// started for this workflow execution, was successfully canceled. CancelTimerFailed: +// Failed to process CancelTimer decision. This happens when the decision is +// not configured properly, for example no timer exists with the specified timer +// ID. StartChildWorkflowExecutionInitiated: A request was made to start a +// child workflow execution. StartChildWorkflowExecutionFailed: Failed to process +// StartChildWorkflowExecution decision. This happens when the decision is not +// configured properly, for example the workflow type specified is not registered. +// ChildWorkflowExecutionStarted: A child workflow execution was successfully +// started. ChildWorkflowExecutionCompleted: A child workflow execution, started +// by this workflow execution, completed successfully and was closed. ChildWorkflowExecutionFailed: +// A child workflow execution, started by this workflow execution, failed to +// complete successfully and was closed. ChildWorkflowExecutionTimedOut: A +// child workflow execution, started by this workflow execution, timed out and +// was closed. ChildWorkflowExecutionCanceled: A child workflow execution, +// started by this workflow execution, was canceled and closed. ChildWorkflowExecutionTerminated: +// A child workflow execution, started by this workflow execution, was terminated. +// SignalExternalWorkflowExecutionInitiated: A request to signal an external +// workflow was made. ExternalWorkflowExecutionSignaled: A signal, requested +// by this workflow execution, was successfully delivered to the target external +// workflow execution. SignalExternalWorkflowExecutionFailed: The request to +// signal an external workflow execution failed. RequestCancelExternalWorkflowExecutionInitiated: +// A request was made to request the cancellation of an external workflow execution. +// ExternalWorkflowExecutionCancelRequested: Request to cancel an external +// workflow execution was successfully delivered to the target execution. RequestCancelExternalWorkflowExecutionFailed: +// Request to cancel an external workflow execution failed. LambdaFunctionScheduled: +// An AWS Lambda function was scheduled for execution. LambdaFunctionStarted: +// The scheduled function was invoked in the AWS Lambda service. LambdaFunctionCompleted: +// The AWS Lambda function successfully completed. LambdaFunctionFailed: The +// AWS Lambda function execution failed. LambdaFunctionTimedOut: The AWS Lambda +// function execution timed out. ScheduleLambdaFunctionFailed: Failed to process +// ScheduleLambdaFunction decision. This happens when the workflow execution +// does not have the proper IAM role attached to invoke AWS Lambda functions. +// StartLambdaFunctionFailed: Failed to invoke the scheduled function in the +// AWS Lambda service. This happens when the AWS Lambda service is not available +// in the current region, or received too many requests. +type HistoryEvent struct { + _ struct{} `type:"structure"` + + // If the event is of type ActivityTaskcancelRequested then this member is set + // and provides detailed information about the event. It is not set for other + // event types. + ActivityTaskCancelRequestedEventAttributes *ActivityTaskCancelRequestedEventAttributes `locationName:"activityTaskCancelRequestedEventAttributes" type:"structure"` + + // If the event is of type ActivityTaskCanceled then this member is set and + // provides detailed information about the event. It is not set for other event + // types. + ActivityTaskCanceledEventAttributes *ActivityTaskCanceledEventAttributes `locationName:"activityTaskCanceledEventAttributes" type:"structure"` + + // If the event is of type ActivityTaskCompleted then this member is set and + // provides detailed information about the event. It is not set for other event + // types. + ActivityTaskCompletedEventAttributes *ActivityTaskCompletedEventAttributes `locationName:"activityTaskCompletedEventAttributes" type:"structure"` + + // If the event is of type ActivityTaskFailed then this member is set and provides + // detailed information about the event. It is not set for other event types. + ActivityTaskFailedEventAttributes *ActivityTaskFailedEventAttributes `locationName:"activityTaskFailedEventAttributes" type:"structure"` + + // If the event is of type ActivityTaskScheduled then this member is set and + // provides detailed information about the event. It is not set for other event + // types. + ActivityTaskScheduledEventAttributes *ActivityTaskScheduledEventAttributes `locationName:"activityTaskScheduledEventAttributes" type:"structure"` + + // If the event is of type ActivityTaskStarted then this member is set and provides + // detailed information about the event. It is not set for other event types. + ActivityTaskStartedEventAttributes *ActivityTaskStartedEventAttributes `locationName:"activityTaskStartedEventAttributes" type:"structure"` + + // If the event is of type ActivityTaskTimedOut then this member is set and + // provides detailed information about the event. It is not set for other event + // types. + ActivityTaskTimedOutEventAttributes *ActivityTaskTimedOutEventAttributes `locationName:"activityTaskTimedOutEventAttributes" type:"structure"` + + // If the event is of type CancelTimerFailed then this member is set and provides + // detailed information about the event. It is not set for other event types. + CancelTimerFailedEventAttributes *CancelTimerFailedEventAttributes `locationName:"cancelTimerFailedEventAttributes" type:"structure"` + + // If the event is of type CancelWorkflowExecutionFailed then this member is + // set and provides detailed information about the event. It is not set for + // other event types. + CancelWorkflowExecutionFailedEventAttributes *CancelWorkflowExecutionFailedEventAttributes `locationName:"cancelWorkflowExecutionFailedEventAttributes" type:"structure"` + + // If the event is of type ChildWorkflowExecutionCanceled then this member is + // set and provides detailed information about the event. It is not set for + // other event types. + ChildWorkflowExecutionCanceledEventAttributes *ChildWorkflowExecutionCanceledEventAttributes `locationName:"childWorkflowExecutionCanceledEventAttributes" type:"structure"` + + // If the event is of type ChildWorkflowExecutionCompleted then this member + // is set and provides detailed information about the event. It is not set for + // other event types. + ChildWorkflowExecutionCompletedEventAttributes *ChildWorkflowExecutionCompletedEventAttributes `locationName:"childWorkflowExecutionCompletedEventAttributes" type:"structure"` + + // If the event is of type ChildWorkflowExecutionFailed then this member is + // set and provides detailed information about the event. It is not set for + // other event types. + ChildWorkflowExecutionFailedEventAttributes *ChildWorkflowExecutionFailedEventAttributes `locationName:"childWorkflowExecutionFailedEventAttributes" type:"structure"` + + // If the event is of type ChildWorkflowExecutionStarted then this member is + // set and provides detailed information about the event. It is not set for + // other event types. + ChildWorkflowExecutionStartedEventAttributes *ChildWorkflowExecutionStartedEventAttributes `locationName:"childWorkflowExecutionStartedEventAttributes" type:"structure"` + + // If the event is of type ChildWorkflowExecutionTerminated then this member + // is set and provides detailed information about the event. It is not set for + // other event types. + ChildWorkflowExecutionTerminatedEventAttributes *ChildWorkflowExecutionTerminatedEventAttributes `locationName:"childWorkflowExecutionTerminatedEventAttributes" type:"structure"` + + // If the event is of type ChildWorkflowExecutionTimedOut then this member is + // set and provides detailed information about the event. It is not set for + // other event types. + ChildWorkflowExecutionTimedOutEventAttributes *ChildWorkflowExecutionTimedOutEventAttributes `locationName:"childWorkflowExecutionTimedOutEventAttributes" type:"structure"` + + // If the event is of type CompleteWorkflowExecutionFailed then this member + // is set and provides detailed information about the event. It is not set for + // other event types. + CompleteWorkflowExecutionFailedEventAttributes *CompleteWorkflowExecutionFailedEventAttributes `locationName:"completeWorkflowExecutionFailedEventAttributes" type:"structure"` + + // If the event is of type ContinueAsNewWorkflowExecutionFailed then this member + // is set and provides detailed information about the event. It is not set for + // other event types. + ContinueAsNewWorkflowExecutionFailedEventAttributes *ContinueAsNewWorkflowExecutionFailedEventAttributes `locationName:"continueAsNewWorkflowExecutionFailedEventAttributes" type:"structure"` + + // If the event is of type DecisionTaskCompleted then this member is set and + // provides detailed information about the event. It is not set for other event + // types. + DecisionTaskCompletedEventAttributes *DecisionTaskCompletedEventAttributes `locationName:"decisionTaskCompletedEventAttributes" type:"structure"` + + // If the event is of type DecisionTaskScheduled then this member is set and + // provides detailed information about the event. It is not set for other event + // types. + DecisionTaskScheduledEventAttributes *DecisionTaskScheduledEventAttributes `locationName:"decisionTaskScheduledEventAttributes" type:"structure"` + + // If the event is of type DecisionTaskStarted then this member is set and provides + // detailed information about the event. It is not set for other event types. + DecisionTaskStartedEventAttributes *DecisionTaskStartedEventAttributes `locationName:"decisionTaskStartedEventAttributes" type:"structure"` + + // If the event is of type DecisionTaskTimedOut then this member is set and + // provides detailed information about the event. It is not set for other event + // types. + DecisionTaskTimedOutEventAttributes *DecisionTaskTimedOutEventAttributes `locationName:"decisionTaskTimedOutEventAttributes" type:"structure"` + + // The system generated ID of the event. This ID uniquely identifies the event + // with in the workflow execution history. + EventId *int64 `locationName:"eventId" type:"long" required:"true"` + + // The date and time when the event occurred. + EventTimestamp *time.Time `locationName:"eventTimestamp" type:"timestamp" timestampFormat:"unix" required:"true"` + + // The type of the history event. + EventType *string `locationName:"eventType" type:"string" required:"true" enum:"EventType"` + + // If the event is of type ExternalWorkflowExecutionCancelRequested then this + // member is set and provides detailed information about the event. It is not + // set for other event types. + ExternalWorkflowExecutionCancelRequestedEventAttributes *ExternalWorkflowExecutionCancelRequestedEventAttributes `locationName:"externalWorkflowExecutionCancelRequestedEventAttributes" type:"structure"` + + // If the event is of type ExternalWorkflowExecutionSignaled then this member + // is set and provides detailed information about the event. It is not set for + // other event types. + ExternalWorkflowExecutionSignaledEventAttributes *ExternalWorkflowExecutionSignaledEventAttributes `locationName:"externalWorkflowExecutionSignaledEventAttributes" type:"structure"` + + // If the event is of type FailWorkflowExecutionFailed then this member is set + // and provides detailed information about the event. It is not set for other + // event types. + FailWorkflowExecutionFailedEventAttributes *FailWorkflowExecutionFailedEventAttributes `locationName:"failWorkflowExecutionFailedEventAttributes" type:"structure"` + + // Provides details for the LambdaFunctionCompleted event. + LambdaFunctionCompletedEventAttributes *LambdaFunctionCompletedEventAttributes `locationName:"lambdaFunctionCompletedEventAttributes" type:"structure"` + + // Provides details for the LambdaFunctionFailed event. + LambdaFunctionFailedEventAttributes *LambdaFunctionFailedEventAttributes `locationName:"lambdaFunctionFailedEventAttributes" type:"structure"` + + // Provides details for the LambdaFunctionScheduled event. + LambdaFunctionScheduledEventAttributes *LambdaFunctionScheduledEventAttributes `locationName:"lambdaFunctionScheduledEventAttributes" type:"structure"` + + // Provides details for the LambdaFunctionStarted event. + LambdaFunctionStartedEventAttributes *LambdaFunctionStartedEventAttributes `locationName:"lambdaFunctionStartedEventAttributes" type:"structure"` + + // Provides details for the LambdaFunctionTimedOut event. + LambdaFunctionTimedOutEventAttributes *LambdaFunctionTimedOutEventAttributes `locationName:"lambdaFunctionTimedOutEventAttributes" type:"structure"` + + // If the event is of type MarkerRecorded then this member is set and provides + // detailed information about the event. It is not set for other event types. + MarkerRecordedEventAttributes *MarkerRecordedEventAttributes `locationName:"markerRecordedEventAttributes" type:"structure"` + + // If the event is of type DecisionTaskFailed then this member is set and provides + // detailed information about the event. It is not set for other event types. + RecordMarkerFailedEventAttributes *RecordMarkerFailedEventAttributes `locationName:"recordMarkerFailedEventAttributes" type:"structure"` + + // If the event is of type RequestCancelActivityTaskFailed then this member + // is set and provides detailed information about the event. It is not set for + // other event types. + RequestCancelActivityTaskFailedEventAttributes *RequestCancelActivityTaskFailedEventAttributes `locationName:"requestCancelActivityTaskFailedEventAttributes" type:"structure"` + + // If the event is of type RequestCancelExternalWorkflowExecutionFailed then + // this member is set and provides detailed information about the event. It + // is not set for other event types. + RequestCancelExternalWorkflowExecutionFailedEventAttributes *RequestCancelExternalWorkflowExecutionFailedEventAttributes `locationName:"requestCancelExternalWorkflowExecutionFailedEventAttributes" type:"structure"` + + // If the event is of type RequestCancelExternalWorkflowExecutionInitiated then + // this member is set and provides detailed information about the event. It + // is not set for other event types. + RequestCancelExternalWorkflowExecutionInitiatedEventAttributes *RequestCancelExternalWorkflowExecutionInitiatedEventAttributes `locationName:"requestCancelExternalWorkflowExecutionInitiatedEventAttributes" type:"structure"` + + // If the event is of type ScheduleActivityTaskFailed then this member is set + // and provides detailed information about the event. It is not set for other + // event types. + ScheduleActivityTaskFailedEventAttributes *ScheduleActivityTaskFailedEventAttributes `locationName:"scheduleActivityTaskFailedEventAttributes" type:"structure"` + + // Provides details for the ScheduleLambdaFunctionFailed event. + ScheduleLambdaFunctionFailedEventAttributes *ScheduleLambdaFunctionFailedEventAttributes `locationName:"scheduleLambdaFunctionFailedEventAttributes" type:"structure"` + + // If the event is of type SignalExternalWorkflowExecutionFailed then this member + // is set and provides detailed information about the event. It is not set for + // other event types. + SignalExternalWorkflowExecutionFailedEventAttributes *SignalExternalWorkflowExecutionFailedEventAttributes `locationName:"signalExternalWorkflowExecutionFailedEventAttributes" type:"structure"` + + // If the event is of type SignalExternalWorkflowExecutionInitiated then this + // member is set and provides detailed information about the event. It is not + // set for other event types. + SignalExternalWorkflowExecutionInitiatedEventAttributes *SignalExternalWorkflowExecutionInitiatedEventAttributes `locationName:"signalExternalWorkflowExecutionInitiatedEventAttributes" type:"structure"` + + // If the event is of type StartChildWorkflowExecutionFailed then this member + // is set and provides detailed information about the event. It is not set for + // other event types. + StartChildWorkflowExecutionFailedEventAttributes *StartChildWorkflowExecutionFailedEventAttributes `locationName:"startChildWorkflowExecutionFailedEventAttributes" type:"structure"` + + // If the event is of type StartChildWorkflowExecutionInitiated then this member + // is set and provides detailed information about the event. It is not set for + // other event types. + StartChildWorkflowExecutionInitiatedEventAttributes *StartChildWorkflowExecutionInitiatedEventAttributes `locationName:"startChildWorkflowExecutionInitiatedEventAttributes" type:"structure"` + + // Provides details for the StartLambdaFunctionFailed event. + StartLambdaFunctionFailedEventAttributes *StartLambdaFunctionFailedEventAttributes `locationName:"startLambdaFunctionFailedEventAttributes" type:"structure"` + + // If the event is of type StartTimerFailed then this member is set and provides + // detailed information about the event. It is not set for other event types. + StartTimerFailedEventAttributes *StartTimerFailedEventAttributes `locationName:"startTimerFailedEventAttributes" type:"structure"` + + // If the event is of type TimerCanceled then this member is set and provides + // detailed information about the event. It is not set for other event types. + TimerCanceledEventAttributes *TimerCanceledEventAttributes `locationName:"timerCanceledEventAttributes" type:"structure"` + + // If the event is of type TimerFired then this member is set and provides detailed + // information about the event. It is not set for other event types. + TimerFiredEventAttributes *TimerFiredEventAttributes `locationName:"timerFiredEventAttributes" type:"structure"` + + // If the event is of type TimerStarted then this member is set and provides + // detailed information about the event. It is not set for other event types. + TimerStartedEventAttributes *TimerStartedEventAttributes `locationName:"timerStartedEventAttributes" type:"structure"` + + // If the event is of type WorkflowExecutionCancelRequested then this member + // is set and provides detailed information about the event. It is not set for + // other event types. + WorkflowExecutionCancelRequestedEventAttributes *WorkflowExecutionCancelRequestedEventAttributes `locationName:"workflowExecutionCancelRequestedEventAttributes" type:"structure"` + + // If the event is of type WorkflowExecutionCanceled then this member is set + // and provides detailed information about the event. It is not set for other + // event types. + WorkflowExecutionCanceledEventAttributes *WorkflowExecutionCanceledEventAttributes `locationName:"workflowExecutionCanceledEventAttributes" type:"structure"` + + // If the event is of type WorkflowExecutionCompleted then this member is set + // and provides detailed information about the event. It is not set for other + // event types. + WorkflowExecutionCompletedEventAttributes *WorkflowExecutionCompletedEventAttributes `locationName:"workflowExecutionCompletedEventAttributes" type:"structure"` + + // If the event is of type WorkflowExecutionContinuedAsNew then this member + // is set and provides detailed information about the event. It is not set for + // other event types. + WorkflowExecutionContinuedAsNewEventAttributes *WorkflowExecutionContinuedAsNewEventAttributes `locationName:"workflowExecutionContinuedAsNewEventAttributes" type:"structure"` + + // If the event is of type WorkflowExecutionFailed then this member is set and + // provides detailed information about the event. It is not set for other event + // types. + WorkflowExecutionFailedEventAttributes *WorkflowExecutionFailedEventAttributes `locationName:"workflowExecutionFailedEventAttributes" type:"structure"` + + // If the event is of type WorkflowExecutionSignaled then this member is set + // and provides detailed information about the event. It is not set for other + // event types. + WorkflowExecutionSignaledEventAttributes *WorkflowExecutionSignaledEventAttributes `locationName:"workflowExecutionSignaledEventAttributes" type:"structure"` + + // If the event is of type WorkflowExecutionStarted then this member is set + // and provides detailed information about the event. It is not set for other + // event types. + WorkflowExecutionStartedEventAttributes *WorkflowExecutionStartedEventAttributes `locationName:"workflowExecutionStartedEventAttributes" type:"structure"` + + // If the event is of type WorkflowExecutionTerminated then this member is set + // and provides detailed information about the event. It is not set for other + // event types. + WorkflowExecutionTerminatedEventAttributes *WorkflowExecutionTerminatedEventAttributes `locationName:"workflowExecutionTerminatedEventAttributes" type:"structure"` + + // If the event is of type WorkflowExecutionTimedOut then this member is set + // and provides detailed information about the event. It is not set for other + // event types. + WorkflowExecutionTimedOutEventAttributes *WorkflowExecutionTimedOutEventAttributes `locationName:"workflowExecutionTimedOutEventAttributes" type:"structure"` +} + +// String returns the string representation +func (s HistoryEvent) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HistoryEvent) GoString() string { + return s.String() +} + +// Provides details for the LambdaFunctionCompleted event. +type LambdaFunctionCompletedEventAttributes struct { + _ struct{} `type:"structure"` + + // The result of the function execution (if any). + Result *string `locationName:"result" type:"string"` + + // The ID of the LambdaFunctionScheduled event that was recorded when this AWS + // Lambda function was scheduled. This information can be useful for diagnosing + // problems by tracing back the chain of events leading up to this event. + ScheduledEventId *int64 `locationName:"scheduledEventId" type:"long" required:"true"` + + // The ID of the LambdaFunctionStarted event recorded in the history. + StartedEventId *int64 `locationName:"startedEventId" type:"long" required:"true"` +} + +// String returns the string representation +func (s LambdaFunctionCompletedEventAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LambdaFunctionCompletedEventAttributes) GoString() string { + return s.String() +} + +// Provides details for the LambdaFunctionFailed event. +type LambdaFunctionFailedEventAttributes struct { + _ struct{} `type:"structure"` + + // The details of the failure (if any). + Details *string `locationName:"details" type:"string"` + + // The reason provided for the failure (if any). + Reason *string `locationName:"reason" type:"string"` + + // The ID of the LambdaFunctionScheduled event that was recorded when this AWS + // Lambda function was scheduled. This information can be useful for diagnosing + // problems by tracing back the chain of events leading up to this event. + ScheduledEventId *int64 `locationName:"scheduledEventId" type:"long" required:"true"` + + // The ID of the LambdaFunctionStarted event recorded in the history. + StartedEventId *int64 `locationName:"startedEventId" type:"long" required:"true"` +} + +// String returns the string representation +func (s LambdaFunctionFailedEventAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LambdaFunctionFailedEventAttributes) GoString() string { + return s.String() +} + +// Provides details for the LambdaFunctionScheduled event. +type LambdaFunctionScheduledEventAttributes struct { + _ struct{} `type:"structure"` + + // The ID of the DecisionTaskCompleted event for the decision that resulted + // in the scheduling of this AWS Lambda function. This information can be useful + // for diagnosing problems by tracing back the chain of events leading up to + // this event. + DecisionTaskCompletedEventId *int64 `locationName:"decisionTaskCompletedEventId" type:"long" required:"true"` + + // The unique Amazon SWF ID for the AWS Lambda task. + Id *string `locationName:"id" min:"1" type:"string" required:"true"` + + // Input provided to the AWS Lambda function. + Input *string `locationName:"input" min:"1" type:"string"` + + // The name of the scheduled AWS Lambda function. + Name *string `locationName:"name" min:"1" type:"string" required:"true"` + + // The maximum time, in seconds, that the AWS Lambda function can take to execute + // from start to close before it is marked as failed. + StartToCloseTimeout *string `locationName:"startToCloseTimeout" type:"string"` +} + +// String returns the string representation +func (s LambdaFunctionScheduledEventAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LambdaFunctionScheduledEventAttributes) GoString() string { + return s.String() +} + +// Provides details for the LambdaFunctionStarted event. +type LambdaFunctionStartedEventAttributes struct { + _ struct{} `type:"structure"` + + // The ID of the LambdaFunctionScheduled event that was recorded when this AWS + // Lambda function was scheduled. This information can be useful for diagnosing + // problems by tracing back the chain of events leading up to this event. + ScheduledEventId *int64 `locationName:"scheduledEventId" type:"long" required:"true"` +} + +// String returns the string representation +func (s LambdaFunctionStartedEventAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LambdaFunctionStartedEventAttributes) GoString() string { + return s.String() +} + +// Provides details for the LambdaFunctionTimedOut event. +type LambdaFunctionTimedOutEventAttributes struct { + _ struct{} `type:"structure"` + + // The ID of the LambdaFunctionScheduled event that was recorded when this AWS + // Lambda function was scheduled. This information can be useful for diagnosing + // problems by tracing back the chain of events leading up to this event. + ScheduledEventId *int64 `locationName:"scheduledEventId" type:"long" required:"true"` + + // The ID of the LambdaFunctionStarted event recorded in the history. + StartedEventId *int64 `locationName:"startedEventId" type:"long" required:"true"` + + // The type of the timeout that caused this event. + TimeoutType *string `locationName:"timeoutType" type:"string" enum:"LambdaFunctionTimeoutType"` +} + +// String returns the string representation +func (s LambdaFunctionTimedOutEventAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LambdaFunctionTimedOutEventAttributes) GoString() string { + return s.String() +} + +type ListActivityTypesInput struct { + _ struct{} `type:"structure"` + + // The name of the domain in which the activity types have been registered. + Domain *string `locationName:"domain" min:"1" type:"string" required:"true"` + + // The maximum number of results that will be returned per call. nextPageToken + // can be used to obtain futher pages of results. The default is 1000, which + // is the maximum allowed page size. You can, however, specify a page size smaller + // than the maximum. + // + // This is an upper limit only; the actual number of results returned per call + // may be fewer than the specified maximum. + MaximumPageSize *int64 `locationName:"maximumPageSize" type:"integer"` + + // If specified, only lists the activity types that have this name. + Name *string `locationName:"name" min:"1" type:"string"` + + // If a NextPageToken was returned by a previous call, there are more results + // available. To retrieve the next page of results, make the call again using + // the returned token in nextPageToken. Keep all other arguments unchanged. + // + // The configured maximumPageSize determines how many results can be returned + // in a single call. + NextPageToken *string `locationName:"nextPageToken" type:"string"` + + // Specifies the registration status of the activity types to list. + RegistrationStatus *string `locationName:"registrationStatus" type:"string" required:"true" enum:"RegistrationStatus"` + + // When set to true, returns the results in reverse order. By default, the results + // are returned in ascending alphabetical order by name of the activity types. + ReverseOrder *bool `locationName:"reverseOrder" type:"boolean"` +} + +// String returns the string representation +func (s ListActivityTypesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListActivityTypesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListActivityTypesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListActivityTypesInput"} + if s.Domain == nil { + invalidParams.Add(request.NewErrParamRequired("Domain")) + } + if s.Domain != nil && len(*s.Domain) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Domain", 1)) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + if s.RegistrationStatus == nil { + invalidParams.Add(request.NewErrParamRequired("RegistrationStatus")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains a paginated list of activity type information structures. +type ListActivityTypesOutput struct { + _ struct{} `type:"structure"` + + // If a NextPageToken was returned by a previous call, there are more results + // available. To retrieve the next page of results, make the call again using + // the returned token in nextPageToken. Keep all other arguments unchanged. + // + // The configured maximumPageSize determines how many results can be returned + // in a single call. + NextPageToken *string `locationName:"nextPageToken" type:"string"` + + // List of activity type information. + TypeInfos []*ActivityTypeInfo `locationName:"typeInfos" type:"list" required:"true"` +} + +// String returns the string representation +func (s ListActivityTypesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListActivityTypesOutput) GoString() string { + return s.String() +} + +type ListClosedWorkflowExecutionsInput struct { + _ struct{} `type:"structure"` + + // If specified, only workflow executions that match this close status are listed. + // For example, if TERMINATED is specified, then only TERMINATED workflow executions + // are listed. + // + // closeStatusFilter, executionFilter, typeFilter and tagFilter are mutually + // exclusive. You can specify at most one of these in a request. + CloseStatusFilter *CloseStatusFilter `locationName:"closeStatusFilter" type:"structure"` + + // If specified, the workflow executions are included in the returned results + // based on whether their close times are within the range specified by this + // filter. Also, if this parameter is specified, the returned results are ordered + // by their close times. + // + // startTimeFilter and closeTimeFilter are mutually exclusive. You must specify + // one of these in a request but not both. + CloseTimeFilter *ExecutionTimeFilter `locationName:"closeTimeFilter" type:"structure"` + + // The name of the domain that contains the workflow executions to list. + Domain *string `locationName:"domain" min:"1" type:"string" required:"true"` + + // If specified, only workflow executions matching the workflow ID specified + // in the filter are returned. + // + // closeStatusFilter, executionFilter, typeFilter and tagFilter are mutually + // exclusive. You can specify at most one of these in a request. + ExecutionFilter *WorkflowExecutionFilter `locationName:"executionFilter" type:"structure"` + + // The maximum number of results that will be returned per call. nextPageToken + // can be used to obtain futher pages of results. The default is 1000, which + // is the maximum allowed page size. You can, however, specify a page size smaller + // than the maximum. + // + // This is an upper limit only; the actual number of results returned per call + // may be fewer than the specified maximum. + MaximumPageSize *int64 `locationName:"maximumPageSize" type:"integer"` + + // If a NextPageToken was returned by a previous call, there are more results + // available. To retrieve the next page of results, make the call again using + // the returned token in nextPageToken. Keep all other arguments unchanged. + // + // The configured maximumPageSize determines how many results can be returned + // in a single call. + NextPageToken *string `locationName:"nextPageToken" type:"string"` + + // When set to true, returns the results in reverse order. By default the results + // are returned in descending order of the start or the close time of the executions. + ReverseOrder *bool `locationName:"reverseOrder" type:"boolean"` + + // If specified, the workflow executions are included in the returned results + // based on whether their start times are within the range specified by this + // filter. Also, if this parameter is specified, the returned results are ordered + // by their start times. + // + // startTimeFilter and closeTimeFilter are mutually exclusive. You must specify + // one of these in a request but not both. + StartTimeFilter *ExecutionTimeFilter `locationName:"startTimeFilter" type:"structure"` + + // If specified, only executions that have the matching tag are listed. + // + // closeStatusFilter, executionFilter, typeFilter and tagFilter are mutually + // exclusive. You can specify at most one of these in a request. + TagFilter *TagFilter `locationName:"tagFilter" type:"structure"` + + // If specified, only executions of the type specified in the filter are returned. + // + // closeStatusFilter, executionFilter, typeFilter and tagFilter are mutually + // exclusive. You can specify at most one of these in a request. + TypeFilter *WorkflowTypeFilter `locationName:"typeFilter" type:"structure"` +} + +// String returns the string representation +func (s ListClosedWorkflowExecutionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListClosedWorkflowExecutionsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListClosedWorkflowExecutionsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListClosedWorkflowExecutionsInput"} + if s.Domain == nil { + invalidParams.Add(request.NewErrParamRequired("Domain")) + } + if s.Domain != nil && len(*s.Domain) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Domain", 1)) + } + if s.CloseStatusFilter != nil { + if err := s.CloseStatusFilter.Validate(); err != nil { + invalidParams.AddNested("CloseStatusFilter", err.(request.ErrInvalidParams)) + } + } + if s.CloseTimeFilter != nil { + if err := s.CloseTimeFilter.Validate(); err != nil { + invalidParams.AddNested("CloseTimeFilter", err.(request.ErrInvalidParams)) + } + } + if s.ExecutionFilter != nil { + if err := s.ExecutionFilter.Validate(); err != nil { + invalidParams.AddNested("ExecutionFilter", err.(request.ErrInvalidParams)) + } + } + if s.StartTimeFilter != nil { + if err := s.StartTimeFilter.Validate(); err != nil { + invalidParams.AddNested("StartTimeFilter", err.(request.ErrInvalidParams)) + } + } + if s.TagFilter != nil { + if err := s.TagFilter.Validate(); err != nil { + invalidParams.AddNested("TagFilter", err.(request.ErrInvalidParams)) + } + } + if s.TypeFilter != nil { + if err := s.TypeFilter.Validate(); err != nil { + invalidParams.AddNested("TypeFilter", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type ListDomainsInput struct { + _ struct{} `type:"structure"` + + // The maximum number of results that will be returned per call. nextPageToken + // can be used to obtain futher pages of results. The default is 1000, which + // is the maximum allowed page size. You can, however, specify a page size smaller + // than the maximum. + // + // This is an upper limit only; the actual number of results returned per call + // may be fewer than the specified maximum. + MaximumPageSize *int64 `locationName:"maximumPageSize" type:"integer"` + + // If a NextPageToken was returned by a previous call, there are more results + // available. To retrieve the next page of results, make the call again using + // the returned token in nextPageToken. Keep all other arguments unchanged. + // + // The configured maximumPageSize determines how many results can be returned + // in a single call. + NextPageToken *string `locationName:"nextPageToken" type:"string"` + + // Specifies the registration status of the domains to list. + RegistrationStatus *string `locationName:"registrationStatus" type:"string" required:"true" enum:"RegistrationStatus"` + + // When set to true, returns the results in reverse order. By default, the results + // are returned in ascending alphabetical order by name of the domains. + ReverseOrder *bool `locationName:"reverseOrder" type:"boolean"` +} + +// String returns the string representation +func (s ListDomainsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListDomainsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListDomainsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListDomainsInput"} + if s.RegistrationStatus == nil { + invalidParams.Add(request.NewErrParamRequired("RegistrationStatus")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains a paginated collection of DomainInfo structures. +type ListDomainsOutput struct { + _ struct{} `type:"structure"` + + // A list of DomainInfo structures. + DomainInfos []*DomainInfo `locationName:"domainInfos" type:"list" required:"true"` + + // If a NextPageToken was returned by a previous call, there are more results + // available. To retrieve the next page of results, make the call again using + // the returned token in nextPageToken. Keep all other arguments unchanged. + // + // The configured maximumPageSize determines how many results can be returned + // in a single call. + NextPageToken *string `locationName:"nextPageToken" type:"string"` +} + +// String returns the string representation +func (s ListDomainsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListDomainsOutput) GoString() string { + return s.String() +} + +type ListOpenWorkflowExecutionsInput struct { + _ struct{} `type:"structure"` + + // The name of the domain that contains the workflow executions to list. + Domain *string `locationName:"domain" min:"1" type:"string" required:"true"` + + // If specified, only workflow executions matching the workflow ID specified + // in the filter are returned. + // + // executionFilter, typeFilter and tagFilter are mutually exclusive. You can + // specify at most one of these in a request. + ExecutionFilter *WorkflowExecutionFilter `locationName:"executionFilter" type:"structure"` + + // The maximum number of results that will be returned per call. nextPageToken + // can be used to obtain futher pages of results. The default is 1000, which + // is the maximum allowed page size. You can, however, specify a page size smaller + // than the maximum. + // + // This is an upper limit only; the actual number of results returned per call + // may be fewer than the specified maximum. + MaximumPageSize *int64 `locationName:"maximumPageSize" type:"integer"` + + // If a NextPageToken was returned by a previous call, there are more results + // available. To retrieve the next page of results, make the call again using + // the returned token in nextPageToken. Keep all other arguments unchanged. + // + // The configured maximumPageSize determines how many results can be returned + // in a single call. + NextPageToken *string `locationName:"nextPageToken" type:"string"` + + // When set to true, returns the results in reverse order. By default the results + // are returned in descending order of the start time of the executions. + ReverseOrder *bool `locationName:"reverseOrder" type:"boolean"` + + // Workflow executions are included in the returned results based on whether + // their start times are within the range specified by this filter. + StartTimeFilter *ExecutionTimeFilter `locationName:"startTimeFilter" type:"structure" required:"true"` + + // If specified, only executions that have the matching tag are listed. + // + // executionFilter, typeFilter and tagFilter are mutually exclusive. You can + // specify at most one of these in a request. + TagFilter *TagFilter `locationName:"tagFilter" type:"structure"` + + // If specified, only executions of the type specified in the filter are returned. + // + // executionFilter, typeFilter and tagFilter are mutually exclusive. You can + // specify at most one of these in a request. + TypeFilter *WorkflowTypeFilter `locationName:"typeFilter" type:"structure"` +} + +// String returns the string representation +func (s ListOpenWorkflowExecutionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListOpenWorkflowExecutionsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListOpenWorkflowExecutionsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListOpenWorkflowExecutionsInput"} + if s.Domain == nil { + invalidParams.Add(request.NewErrParamRequired("Domain")) + } + if s.Domain != nil && len(*s.Domain) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Domain", 1)) + } + if s.StartTimeFilter == nil { + invalidParams.Add(request.NewErrParamRequired("StartTimeFilter")) + } + if s.ExecutionFilter != nil { + if err := s.ExecutionFilter.Validate(); err != nil { + invalidParams.AddNested("ExecutionFilter", err.(request.ErrInvalidParams)) + } + } + if s.StartTimeFilter != nil { + if err := s.StartTimeFilter.Validate(); err != nil { + invalidParams.AddNested("StartTimeFilter", err.(request.ErrInvalidParams)) + } + } + if s.TagFilter != nil { + if err := s.TagFilter.Validate(); err != nil { + invalidParams.AddNested("TagFilter", err.(request.ErrInvalidParams)) + } + } + if s.TypeFilter != nil { + if err := s.TypeFilter.Validate(); err != nil { + invalidParams.AddNested("TypeFilter", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type ListWorkflowTypesInput struct { + _ struct{} `type:"structure"` + + // The name of the domain in which the workflow types have been registered. + Domain *string `locationName:"domain" min:"1" type:"string" required:"true"` + + // The maximum number of results that will be returned per call. nextPageToken + // can be used to obtain futher pages of results. The default is 1000, which + // is the maximum allowed page size. You can, however, specify a page size smaller + // than the maximum. + // + // This is an upper limit only; the actual number of results returned per call + // may be fewer than the specified maximum. + MaximumPageSize *int64 `locationName:"maximumPageSize" type:"integer"` + + // If specified, lists the workflow type with this name. + Name *string `locationName:"name" min:"1" type:"string"` + + // If a NextPageToken was returned by a previous call, there are more results + // available. To retrieve the next page of results, make the call again using + // the returned token in nextPageToken. Keep all other arguments unchanged. + // + // The configured maximumPageSize determines how many results can be returned + // in a single call. + NextPageToken *string `locationName:"nextPageToken" type:"string"` + + // Specifies the registration status of the workflow types to list. + RegistrationStatus *string `locationName:"registrationStatus" type:"string" required:"true" enum:"RegistrationStatus"` + + // When set to true, returns the results in reverse order. By default the results + // are returned in ascending alphabetical order of the name of the workflow + // types. + ReverseOrder *bool `locationName:"reverseOrder" type:"boolean"` +} + +// String returns the string representation +func (s ListWorkflowTypesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListWorkflowTypesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListWorkflowTypesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListWorkflowTypesInput"} + if s.Domain == nil { + invalidParams.Add(request.NewErrParamRequired("Domain")) + } + if s.Domain != nil && len(*s.Domain) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Domain", 1)) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + if s.RegistrationStatus == nil { + invalidParams.Add(request.NewErrParamRequired("RegistrationStatus")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains a paginated list of information structures about workflow types. +type ListWorkflowTypesOutput struct { + _ struct{} `type:"structure"` + + // If a NextPageToken was returned by a previous call, there are more results + // available. To retrieve the next page of results, make the call again using + // the returned token in nextPageToken. Keep all other arguments unchanged. + // + // The configured maximumPageSize determines how many results can be returned + // in a single call. + NextPageToken *string `locationName:"nextPageToken" type:"string"` + + // The list of workflow type information. + TypeInfos []*WorkflowTypeInfo `locationName:"typeInfos" type:"list" required:"true"` +} + +// String returns the string representation +func (s ListWorkflowTypesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListWorkflowTypesOutput) GoString() string { + return s.String() +} + +// Provides details of the MarkerRecorded event. +type MarkerRecordedEventAttributes struct { + _ struct{} `type:"structure"` + + // The ID of the DecisionTaskCompleted event corresponding to the decision task + // that resulted in the RecordMarker decision that requested this marker. This + // information can be useful for diagnosing problems by tracing back the chain + // of events leading up to this event. + DecisionTaskCompletedEventId *int64 `locationName:"decisionTaskCompletedEventId" type:"long" required:"true"` + + // Details of the marker (if any). + Details *string `locationName:"details" type:"string"` + + // The name of the marker. + MarkerName *string `locationName:"markerName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s MarkerRecordedEventAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MarkerRecordedEventAttributes) GoString() string { + return s.String() +} + +// Contains the count of tasks in a task list. +type PendingTaskCount struct { + _ struct{} `type:"structure"` + + // The number of tasks in the task list. + Count *int64 `locationName:"count" type:"integer" required:"true"` + + // If set to true, indicates that the actual count was more than the maximum + // supported by this API and the count returned is the truncated value. + Truncated *bool `locationName:"truncated" type:"boolean"` +} + +// String returns the string representation +func (s PendingTaskCount) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PendingTaskCount) GoString() string { + return s.String() +} + +type PollForActivityTaskInput struct { + _ struct{} `type:"structure"` + + // The name of the domain that contains the task lists being polled. + Domain *string `locationName:"domain" min:"1" type:"string" required:"true"` + + // Identity of the worker making the request, recorded in the ActivityTaskStarted + // event in the workflow history. This enables diagnostic tracing when problems + // arise. The form of this identity is user defined. + Identity *string `locationName:"identity" type:"string"` + + // Specifies the task list to poll for activity tasks. + // + // The specified string must not start or end with whitespace. It must not + // contain a : (colon), / (slash), | (vertical bar), or any control characters + // (\u0000-\u001f | \u007f - \u009f). Also, it must not contain the literal + // string quotarnquot. + TaskList *TaskList `locationName:"taskList" type:"structure" required:"true"` +} + +// String returns the string representation +func (s PollForActivityTaskInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PollForActivityTaskInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PollForActivityTaskInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PollForActivityTaskInput"} + if s.Domain == nil { + invalidParams.Add(request.NewErrParamRequired("Domain")) + } + if s.Domain != nil && len(*s.Domain) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Domain", 1)) + } + if s.TaskList == nil { + invalidParams.Add(request.NewErrParamRequired("TaskList")) + } + if s.TaskList != nil { + if err := s.TaskList.Validate(); err != nil { + invalidParams.AddNested("TaskList", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Unit of work sent to an activity worker. +type PollForActivityTaskOutput struct { + _ struct{} `type:"structure"` + + // The unique ID of the task. + ActivityId *string `locationName:"activityId" min:"1" type:"string" required:"true"` + + // The type of this activity task. + ActivityType *ActivityType `locationName:"activityType" type:"structure" required:"true"` + + // The inputs provided when the activity task was scheduled. The form of the + // input is user defined and should be meaningful to the activity implementation. + Input *string `locationName:"input" type:"string"` + + // The ID of the ActivityTaskStarted event recorded in the history. + StartedEventId *int64 `locationName:"startedEventId" type:"long" required:"true"` + + // The opaque string used as a handle on the task. This token is used by workers + // to communicate progress and response information back to the system about + // the task. + TaskToken *string `locationName:"taskToken" min:"1" type:"string" required:"true"` + + // The workflow execution that started this activity task. + WorkflowExecution *WorkflowExecution `locationName:"workflowExecution" type:"structure" required:"true"` +} + +// String returns the string representation +func (s PollForActivityTaskOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PollForActivityTaskOutput) GoString() string { + return s.String() +} + +type PollForDecisionTaskInput struct { + _ struct{} `type:"structure"` + + // The name of the domain containing the task lists to poll. + Domain *string `locationName:"domain" min:"1" type:"string" required:"true"` + + // Identity of the decider making the request, which is recorded in the DecisionTaskStarted + // event in the workflow history. This enables diagnostic tracing when problems + // arise. The form of this identity is user defined. + Identity *string `locationName:"identity" type:"string"` + + // The maximum number of results that will be returned per call. nextPageToken + // can be used to obtain futher pages of results. The default is 1000, which + // is the maximum allowed page size. You can, however, specify a page size smaller + // than the maximum. + // + // This is an upper limit only; the actual number of results returned per call + // may be fewer than the specified maximum. + MaximumPageSize *int64 `locationName:"maximumPageSize" type:"integer"` + + // If a NextPageToken was returned by a previous call, there are more results + // available. To retrieve the next page of results, make the call again using + // the returned token in nextPageToken. Keep all other arguments unchanged. + // + // The configured maximumPageSize determines how many results can be returned + // in a single call. + // + // The nextPageToken returned by this action cannot be used with GetWorkflowExecutionHistory + // to get the next page. You must call PollForDecisionTask again (with the nextPageToken) + // to retrieve the next page of history records. Calling PollForDecisionTask + // with a nextPageToken will not return a new decision task.. + NextPageToken *string `locationName:"nextPageToken" type:"string"` + + // When set to true, returns the events in reverse order. By default the results + // are returned in ascending order of the eventTimestamp of the events. + ReverseOrder *bool `locationName:"reverseOrder" type:"boolean"` + + // Specifies the task list to poll for decision tasks. + // + // The specified string must not start or end with whitespace. It must not + // contain a : (colon), / (slash), | (vertical bar), or any control characters + // (\u0000-\u001f | \u007f - \u009f). Also, it must not contain the literal + // string quotarnquot. + TaskList *TaskList `locationName:"taskList" type:"structure" required:"true"` +} + +// String returns the string representation +func (s PollForDecisionTaskInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PollForDecisionTaskInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PollForDecisionTaskInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PollForDecisionTaskInput"} + if s.Domain == nil { + invalidParams.Add(request.NewErrParamRequired("Domain")) + } + if s.Domain != nil && len(*s.Domain) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Domain", 1)) + } + if s.TaskList == nil { + invalidParams.Add(request.NewErrParamRequired("TaskList")) + } + if s.TaskList != nil { + if err := s.TaskList.Validate(); err != nil { + invalidParams.AddNested("TaskList", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// A structure that represents a decision task. Decision tasks are sent to deciders +// in order for them to make decisions. +type PollForDecisionTaskOutput struct { + _ struct{} `type:"structure"` + + // A paginated list of history events of the workflow execution. The decider + // uses this during the processing of the decision task. + Events []*HistoryEvent `locationName:"events" type:"list" required:"true"` + + // If a NextPageToken was returned by a previous call, there are more results + // available. To retrieve the next page of results, make the call again using + // the returned token in nextPageToken. Keep all other arguments unchanged. + // + // The configured maximumPageSize determines how many results can be returned + // in a single call. + NextPageToken *string `locationName:"nextPageToken" type:"string"` + + // The ID of the DecisionTaskStarted event of the previous decision task of + // this workflow execution that was processed by the decider. This can be used + // to determine the events in the history new since the last decision task received + // by the decider. + PreviousStartedEventId *int64 `locationName:"previousStartedEventId" type:"long"` + + // The ID of the DecisionTaskStarted event recorded in the history. + StartedEventId *int64 `locationName:"startedEventId" type:"long" required:"true"` + + // The opaque string used as a handle on the task. This token is used by workers + // to communicate progress and response information back to the system about + // the task. + TaskToken *string `locationName:"taskToken" min:"1" type:"string" required:"true"` + + // The workflow execution for which this decision task was created. + WorkflowExecution *WorkflowExecution `locationName:"workflowExecution" type:"structure" required:"true"` + + // The type of the workflow execution for which this decision task was created. + WorkflowType *WorkflowType `locationName:"workflowType" type:"structure" required:"true"` +} + +// String returns the string representation +func (s PollForDecisionTaskOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PollForDecisionTaskOutput) GoString() string { + return s.String() +} + +type RecordActivityTaskHeartbeatInput struct { + _ struct{} `type:"structure"` + + // If specified, contains details about the progress of the task. + Details *string `locationName:"details" type:"string"` + + // The taskToken of the ActivityTask. + // + // taskToken is generated by the service and should be treated as an opaque + // value. If the task is passed to another process, its taskToken must also + // be passed. This enables it to provide its progress and respond with results. + TaskToken *string `locationName:"taskToken" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s RecordActivityTaskHeartbeatInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RecordActivityTaskHeartbeatInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RecordActivityTaskHeartbeatInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RecordActivityTaskHeartbeatInput"} + if s.TaskToken == nil { + invalidParams.Add(request.NewErrParamRequired("TaskToken")) + } + if s.TaskToken != nil && len(*s.TaskToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TaskToken", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Status information about an activity task. +type RecordActivityTaskHeartbeatOutput struct { + _ struct{} `type:"structure"` + + // Set to true if cancellation of the task is requested. + CancelRequested *bool `locationName:"cancelRequested" type:"boolean" required:"true"` +} + +// String returns the string representation +func (s RecordActivityTaskHeartbeatOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RecordActivityTaskHeartbeatOutput) GoString() string { + return s.String() +} + +// Provides details of the RecordMarker decision. +// +// Access Control +// +// You can use IAM policies to control this decision's access to Amazon SWF +// resources as follows: +// +// Use a Resource element with the domain name to limit the action to only +// specified domains. Use an Action element to allow or deny permission to call +// this action. You cannot use an IAM policy to constrain this action's parameters. +// If the caller does not have sufficient permissions to invoke the action, +// or the parameter values fall outside the specified constraints, the action +// fails. The associated event attribute's cause parameter will be set to OPERATION_NOT_PERMITTED. +// For details and example IAM policies, see Using IAM to Manage Access to Amazon +// SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html). +type RecordMarkerDecisionAttributes struct { + _ struct{} `type:"structure"` + + // Optional. details of the marker. + Details *string `locationName:"details" type:"string"` + + // Required. The name of the marker. + MarkerName *string `locationName:"markerName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s RecordMarkerDecisionAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RecordMarkerDecisionAttributes) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RecordMarkerDecisionAttributes) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RecordMarkerDecisionAttributes"} + if s.MarkerName == nil { + invalidParams.Add(request.NewErrParamRequired("MarkerName")) + } + if s.MarkerName != nil && len(*s.MarkerName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("MarkerName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Provides details of the RecordMarkerFailed event. +type RecordMarkerFailedEventAttributes struct { + _ struct{} `type:"structure"` + + // The cause of the failure. This information is generated by the system and + // can be useful for diagnostic purposes. + // + // If cause is set to OPERATION_NOT_PERMITTED, the decision failed because + // it lacked sufficient permissions. For details and example IAM policies, see + // Using IAM to Manage Access to Amazon SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html). + Cause *string `locationName:"cause" type:"string" required:"true" enum:"RecordMarkerFailedCause"` + + // The ID of the DecisionTaskCompleted event corresponding to the decision task + // that resulted in the RecordMarkerFailed decision for this cancellation request. + // This information can be useful for diagnosing problems by tracing back the + // chain of events leading up to this event. + DecisionTaskCompletedEventId *int64 `locationName:"decisionTaskCompletedEventId" type:"long" required:"true"` + + // The marker's name. + MarkerName *string `locationName:"markerName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s RecordMarkerFailedEventAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RecordMarkerFailedEventAttributes) GoString() string { + return s.String() +} + +type RegisterActivityTypeInput struct { + _ struct{} `type:"structure"` + + // If set, specifies the default maximum time before which a worker processing + // a task of this type must report progress by calling RecordActivityTaskHeartbeat. + // If the timeout is exceeded, the activity task is automatically timed out. + // This default can be overridden when scheduling an activity task using the + // ScheduleActivityTask decision. If the activity worker subsequently attempts + // to record a heartbeat or returns a result, the activity worker receives an + // UnknownResource fault. In this case, Amazon SWF no longer considers the activity + // task to be valid; the activity worker should clean up the activity task. + // + // The duration is specified in seconds; an integer greater than or equal to + // 0. The value "NONE" can be used to specify unlimited duration. + DefaultTaskHeartbeatTimeout *string `locationName:"defaultTaskHeartbeatTimeout" type:"string"` + + // If set, specifies the default task list to use for scheduling tasks of this + // activity type. This default task list is used if a task list is not provided + // when a task is scheduled through the ScheduleActivityTask decision. + DefaultTaskList *TaskList `locationName:"defaultTaskList" type:"structure"` + + // The default task priority to assign to the activity type. If not assigned, + // then "0" will be used. Valid values are integers that range from Java's Integer.MIN_VALUE + // (-2147483648) to Integer.MAX_VALUE (2147483647). Higher numbers indicate + // higher priority. + // + // For more information about setting task priority, see Setting Task Priority + // (http://docs.aws.amazon.com/amazonswf/latest/developerguide/programming-priority.html) + // in the Amazon Simple Workflow Developer Guide. + DefaultTaskPriority *string `locationName:"defaultTaskPriority" type:"string"` + + // If set, specifies the default maximum duration for a task of this activity + // type. This default can be overridden when scheduling an activity task using + // the ScheduleActivityTask decision. + // + // The duration is specified in seconds; an integer greater than or equal to + // 0. The value "NONE" can be used to specify unlimited duration. + DefaultTaskScheduleToCloseTimeout *string `locationName:"defaultTaskScheduleToCloseTimeout" type:"string"` + + // If set, specifies the default maximum duration that a task of this activity + // type can wait before being assigned to a worker. This default can be overridden + // when scheduling an activity task using the ScheduleActivityTask decision. + // + // The duration is specified in seconds; an integer greater than or equal to + // 0. The value "NONE" can be used to specify unlimited duration. + DefaultTaskScheduleToStartTimeout *string `locationName:"defaultTaskScheduleToStartTimeout" type:"string"` + + // If set, specifies the default maximum duration that a worker can take to + // process tasks of this activity type. This default can be overridden when + // scheduling an activity task using the ScheduleActivityTask decision. + // + // The duration is specified in seconds; an integer greater than or equal to + // 0. The value "NONE" can be used to specify unlimited duration. + DefaultTaskStartToCloseTimeout *string `locationName:"defaultTaskStartToCloseTimeout" type:"string"` + + // A textual description of the activity type. + Description *string `locationName:"description" type:"string"` + + // The name of the domain in which this activity is to be registered. + Domain *string `locationName:"domain" min:"1" type:"string" required:"true"` + + // The name of the activity type within the domain. + // + // The specified string must not start or end with whitespace. It must not + // contain a : (colon), / (slash), | (vertical bar), or any control characters + // (\u0000-\u001f | \u007f - \u009f). Also, it must not contain the literal + // string quotarnquot. + Name *string `locationName:"name" min:"1" type:"string" required:"true"` + + // The version of the activity type. + // + // The activity type consists of the name and version, the combination of which + // must be unique within the domain. The specified string must not start or + // end with whitespace. It must not contain a : (colon), / (slash), | (vertical + // bar), or any control characters (\u0000-\u001f | \u007f - \u009f). Also, + // it must not contain the literal string quotarnquot. + Version *string `locationName:"version" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s RegisterActivityTypeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RegisterActivityTypeInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RegisterActivityTypeInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RegisterActivityTypeInput"} + if s.Domain == nil { + invalidParams.Add(request.NewErrParamRequired("Domain")) + } + if s.Domain != nil && len(*s.Domain) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Domain", 1)) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + if s.Version == nil { + invalidParams.Add(request.NewErrParamRequired("Version")) + } + if s.Version != nil && len(*s.Version) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Version", 1)) + } + if s.DefaultTaskList != nil { + if err := s.DefaultTaskList.Validate(); err != nil { + invalidParams.AddNested("DefaultTaskList", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type RegisterActivityTypeOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s RegisterActivityTypeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RegisterActivityTypeOutput) GoString() string { + return s.String() +} + +type RegisterDomainInput struct { + _ struct{} `type:"structure"` + + // A text description of the domain. + Description *string `locationName:"description" type:"string"` + + // Name of the domain to register. The name must be unique in the region that + // the domain is registered in. + // + // The specified string must not start or end with whitespace. It must not + // contain a : (colon), / (slash), | (vertical bar), or any control characters + // (\u0000-\u001f | \u007f - \u009f). Also, it must not contain the literal + // string quotarnquot. + Name *string `locationName:"name" min:"1" type:"string" required:"true"` + + // The duration (in days) that records and histories of workflow executions + // on the domain should be kept by the service. After the retention period, + // the workflow execution is not available in the results of visibility calls. + // + // If you pass the value NONE or 0 (zero), then the workflow execution history + // will not be retained. As soon as the workflow execution completes, the execution + // record and its history are deleted. + // + // The maximum workflow execution retention period is 90 days. For more information + // about Amazon SWF service limits, see: Amazon SWF Service Limits (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dg-limits.html) + // in the Amazon SWF Developer Guide. + WorkflowExecutionRetentionPeriodInDays *string `locationName:"workflowExecutionRetentionPeriodInDays" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s RegisterDomainInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RegisterDomainInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RegisterDomainInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RegisterDomainInput"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + if s.WorkflowExecutionRetentionPeriodInDays == nil { + invalidParams.Add(request.NewErrParamRequired("WorkflowExecutionRetentionPeriodInDays")) + } + if s.WorkflowExecutionRetentionPeriodInDays != nil && len(*s.WorkflowExecutionRetentionPeriodInDays) < 1 { + invalidParams.Add(request.NewErrParamMinLen("WorkflowExecutionRetentionPeriodInDays", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type RegisterDomainOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s RegisterDomainOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RegisterDomainOutput) GoString() string { + return s.String() +} + +type RegisterWorkflowTypeInput struct { + _ struct{} `type:"structure"` + + // If set, specifies the default policy to use for the child workflow executions + // when a workflow execution of this type is terminated, by calling the TerminateWorkflowExecution + // action explicitly or due to an expired timeout. This default can be overridden + // when starting a workflow execution using the StartWorkflowExecution action + // or the StartChildWorkflowExecution decision. + // + // The supported child policies are: + // + // TERMINATE: the child executions will be terminated. REQUEST_CANCEL: a request + // to cancel will be attempted for each child execution by recording a WorkflowExecutionCancelRequested + // event in its history. It is up to the decider to take appropriate actions + // when it receives an execution history with this event. ABANDON: no action + // will be taken. The child executions will continue to run. + DefaultChildPolicy *string `locationName:"defaultChildPolicy" type:"string" enum:"ChildPolicy"` + + // If set, specifies the default maximum duration for executions of this workflow + // type. You can override this default when starting an execution through the + // StartWorkflowExecution action or StartChildWorkflowExecution decision. + // + // The duration is specified in seconds; an integer greater than or equal to + // 0. Unlike some of the other timeout parameters in Amazon SWF, you cannot + // specify a value of "NONE" for defaultExecutionStartToCloseTimeout; there + // is a one-year max limit on the time that a workflow execution can run. Exceeding + // this limit will always cause the workflow execution to time out. + DefaultExecutionStartToCloseTimeout *string `locationName:"defaultExecutionStartToCloseTimeout" type:"string"` + + // The ARN of the default IAM role to use when a workflow execution of this + // type invokes AWS Lambda functions. + // + // This default can be overridden when starting a workflow execution using + // the StartWorkflowExecution action or the StartChildWorkflowExecution and + // ContinueAsNewWorkflowExecution decision. + DefaultLambdaRole *string `locationName:"defaultLambdaRole" min:"1" type:"string"` + + // If set, specifies the default task list to use for scheduling decision tasks + // for executions of this workflow type. This default is used only if a task + // list is not provided when starting the execution through the StartWorkflowExecution + // action or StartChildWorkflowExecution decision. + DefaultTaskList *TaskList `locationName:"defaultTaskList" type:"structure"` + + // The default task priority to assign to the workflow type. If not assigned, + // then "0" will be used. Valid values are integers that range from Java's Integer.MIN_VALUE + // (-2147483648) to Integer.MAX_VALUE (2147483647). Higher numbers indicate + // higher priority. + // + // For more information about setting task priority, see Setting Task Priority + // (http://docs.aws.amazon.com/amazonswf/latest/developerguide/programming-priority.html) + // in the Amazon Simple Workflow Developer Guide. + DefaultTaskPriority *string `locationName:"defaultTaskPriority" type:"string"` + + // If set, specifies the default maximum duration of decision tasks for this + // workflow type. This default can be overridden when starting a workflow execution + // using the StartWorkflowExecution action or the StartChildWorkflowExecution + // decision. + // + // The duration is specified in seconds; an integer greater than or equal to + // 0. The value "NONE" can be used to specify unlimited duration. + DefaultTaskStartToCloseTimeout *string `locationName:"defaultTaskStartToCloseTimeout" type:"string"` + + // Textual description of the workflow type. + Description *string `locationName:"description" type:"string"` + + // The name of the domain in which to register the workflow type. + Domain *string `locationName:"domain" min:"1" type:"string" required:"true"` + + // The name of the workflow type. + // + // The specified string must not start or end with whitespace. It must not + // contain a : (colon), / (slash), | (vertical bar), or any control characters + // (\u0000-\u001f | \u007f - \u009f). Also, it must not contain the literal + // string quotarnquot. + Name *string `locationName:"name" min:"1" type:"string" required:"true"` + + // The version of the workflow type. + // + // The workflow type consists of the name and version, the combination of which + // must be unique within the domain. To get a list of all currently registered + // workflow types, use the ListWorkflowTypes action. The specified string must + // not start or end with whitespace. It must not contain a : (colon), / (slash), + // | (vertical bar), or any control characters (\u0000-\u001f | \u007f - \u009f). + // Also, it must not contain the literal string quotarnquot. + Version *string `locationName:"version" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s RegisterWorkflowTypeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RegisterWorkflowTypeInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RegisterWorkflowTypeInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RegisterWorkflowTypeInput"} + if s.DefaultLambdaRole != nil && len(*s.DefaultLambdaRole) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DefaultLambdaRole", 1)) + } + if s.Domain == nil { + invalidParams.Add(request.NewErrParamRequired("Domain")) + } + if s.Domain != nil && len(*s.Domain) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Domain", 1)) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + if s.Version == nil { + invalidParams.Add(request.NewErrParamRequired("Version")) + } + if s.Version != nil && len(*s.Version) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Version", 1)) + } + if s.DefaultTaskList != nil { + if err := s.DefaultTaskList.Validate(); err != nil { + invalidParams.AddNested("DefaultTaskList", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type RegisterWorkflowTypeOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s RegisterWorkflowTypeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RegisterWorkflowTypeOutput) GoString() string { + return s.String() +} + +// Provides details of the RequestCancelActivityTask decision. +// +// Access Control +// +// You can use IAM policies to control this decision's access to Amazon SWF +// resources as follows: +// +// Use a Resource element with the domain name to limit the action to only +// specified domains. Use an Action element to allow or deny permission to call +// this action. You cannot use an IAM policy to constrain this action's parameters. +// If the caller does not have sufficient permissions to invoke the action, +// or the parameter values fall outside the specified constraints, the action +// fails. The associated event attribute's cause parameter will be set to OPERATION_NOT_PERMITTED. +// For details and example IAM policies, see Using IAM to Manage Access to Amazon +// SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html). +type RequestCancelActivityTaskDecisionAttributes struct { + _ struct{} `type:"structure"` + + // The activityId of the activity task to be canceled. + ActivityId *string `locationName:"activityId" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s RequestCancelActivityTaskDecisionAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RequestCancelActivityTaskDecisionAttributes) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RequestCancelActivityTaskDecisionAttributes) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RequestCancelActivityTaskDecisionAttributes"} + if s.ActivityId == nil { + invalidParams.Add(request.NewErrParamRequired("ActivityId")) + } + if s.ActivityId != nil && len(*s.ActivityId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ActivityId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Provides details of the RequestCancelActivityTaskFailed event. +type RequestCancelActivityTaskFailedEventAttributes struct { + _ struct{} `type:"structure"` + + // The activityId provided in the RequestCancelActivityTask decision that failed. + ActivityId *string `locationName:"activityId" min:"1" type:"string" required:"true"` + + // The cause of the failure. This information is generated by the system and + // can be useful for diagnostic purposes. + // + // If cause is set to OPERATION_NOT_PERMITTED, the decision failed because + // it lacked sufficient permissions. For details and example IAM policies, see + // Using IAM to Manage Access to Amazon SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html). + Cause *string `locationName:"cause" type:"string" required:"true" enum:"RequestCancelActivityTaskFailedCause"` + + // The ID of the DecisionTaskCompleted event corresponding to the decision task + // that resulted in the RequestCancelActivityTask decision for this cancellation + // request. This information can be useful for diagnosing problems by tracing + // back the chain of events leading up to this event. + DecisionTaskCompletedEventId *int64 `locationName:"decisionTaskCompletedEventId" type:"long" required:"true"` +} + +// String returns the string representation +func (s RequestCancelActivityTaskFailedEventAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RequestCancelActivityTaskFailedEventAttributes) GoString() string { + return s.String() +} + +// Provides details of the RequestCancelExternalWorkflowExecution decision. +// +// Access Control +// +// You can use IAM policies to control this decision's access to Amazon SWF +// resources as follows: +// +// Use a Resource element with the domain name to limit the action to only +// specified domains. Use an Action element to allow or deny permission to call +// this action. You cannot use an IAM policy to constrain this action's parameters. +// If the caller does not have sufficient permissions to invoke the action, +// or the parameter values fall outside the specified constraints, the action +// fails. The associated event attribute's cause parameter will be set to OPERATION_NOT_PERMITTED. +// For details and example IAM policies, see Using IAM to Manage Access to Amazon +// SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html). +type RequestCancelExternalWorkflowExecutionDecisionAttributes struct { + _ struct{} `type:"structure"` + + // Optional. Data attached to the event that can be used by the decider in subsequent + // workflow tasks. + Control *string `locationName:"control" type:"string"` + + // The runId of the external workflow execution to cancel. + RunId *string `locationName:"runId" type:"string"` + + // Required. The workflowId of the external workflow execution to cancel. + WorkflowId *string `locationName:"workflowId" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s RequestCancelExternalWorkflowExecutionDecisionAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RequestCancelExternalWorkflowExecutionDecisionAttributes) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RequestCancelExternalWorkflowExecutionDecisionAttributes) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RequestCancelExternalWorkflowExecutionDecisionAttributes"} + if s.WorkflowId == nil { + invalidParams.Add(request.NewErrParamRequired("WorkflowId")) + } + if s.WorkflowId != nil && len(*s.WorkflowId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("WorkflowId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Provides details of the RequestCancelExternalWorkflowExecutionFailed event. +type RequestCancelExternalWorkflowExecutionFailedEventAttributes struct { + _ struct{} `type:"structure"` + + // The cause of the failure. This information is generated by the system and + // can be useful for diagnostic purposes. + // + // If cause is set to OPERATION_NOT_PERMITTED, the decision failed because + // it lacked sufficient permissions. For details and example IAM policies, see + // Using IAM to Manage Access to Amazon SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html). + Cause *string `locationName:"cause" type:"string" required:"true" enum:"RequestCancelExternalWorkflowExecutionFailedCause"` + + Control *string `locationName:"control" type:"string"` + + // The ID of the DecisionTaskCompleted event corresponding to the decision task + // that resulted in the RequestCancelExternalWorkflowExecution decision for + // this cancellation request. This information can be useful for diagnosing + // problems by tracing back the chain of events leading up to this event. + DecisionTaskCompletedEventId *int64 `locationName:"decisionTaskCompletedEventId" type:"long" required:"true"` + + // The ID of the RequestCancelExternalWorkflowExecutionInitiated event corresponding + // to the RequestCancelExternalWorkflowExecution decision to cancel this external + // workflow execution. This information can be useful for diagnosing problems + // by tracing back the chain of events leading up to this event. + InitiatedEventId *int64 `locationName:"initiatedEventId" type:"long" required:"true"` + + // The runId of the external workflow execution. + RunId *string `locationName:"runId" type:"string"` + + // The workflowId of the external workflow to which the cancel request was to + // be delivered. + WorkflowId *string `locationName:"workflowId" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s RequestCancelExternalWorkflowExecutionFailedEventAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RequestCancelExternalWorkflowExecutionFailedEventAttributes) GoString() string { + return s.String() +} + +// Provides details of the RequestCancelExternalWorkflowExecutionInitiated event. +type RequestCancelExternalWorkflowExecutionInitiatedEventAttributes struct { + _ struct{} `type:"structure"` + + // Optional. Data attached to the event that can be used by the decider in subsequent + // workflow tasks. + Control *string `locationName:"control" type:"string"` + + // The ID of the DecisionTaskCompleted event corresponding to the decision task + // that resulted in the RequestCancelExternalWorkflowExecution decision for + // this cancellation request. This information can be useful for diagnosing + // problems by tracing back the chain of events leading up to this event. + DecisionTaskCompletedEventId *int64 `locationName:"decisionTaskCompletedEventId" type:"long" required:"true"` + + // The runId of the external workflow execution to be canceled. + RunId *string `locationName:"runId" type:"string"` + + // The workflowId of the external workflow execution to be canceled. + WorkflowId *string `locationName:"workflowId" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s RequestCancelExternalWorkflowExecutionInitiatedEventAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RequestCancelExternalWorkflowExecutionInitiatedEventAttributes) GoString() string { + return s.String() +} + +type RequestCancelWorkflowExecutionInput struct { + _ struct{} `type:"structure"` + + // The name of the domain containing the workflow execution to cancel. + Domain *string `locationName:"domain" min:"1" type:"string" required:"true"` + + // The runId of the workflow execution to cancel. + RunId *string `locationName:"runId" type:"string"` + + // The workflowId of the workflow execution to cancel. + WorkflowId *string `locationName:"workflowId" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s RequestCancelWorkflowExecutionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RequestCancelWorkflowExecutionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RequestCancelWorkflowExecutionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RequestCancelWorkflowExecutionInput"} + if s.Domain == nil { + invalidParams.Add(request.NewErrParamRequired("Domain")) + } + if s.Domain != nil && len(*s.Domain) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Domain", 1)) + } + if s.WorkflowId == nil { + invalidParams.Add(request.NewErrParamRequired("WorkflowId")) + } + if s.WorkflowId != nil && len(*s.WorkflowId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("WorkflowId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type RequestCancelWorkflowExecutionOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s RequestCancelWorkflowExecutionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RequestCancelWorkflowExecutionOutput) GoString() string { + return s.String() +} + +type RespondActivityTaskCanceledInput struct { + _ struct{} `type:"structure"` + + // Optional. Information about the cancellation. + Details *string `locationName:"details" type:"string"` + + // The taskToken of the ActivityTask. + // + // taskToken is generated by the service and should be treated as an opaque + // value. If the task is passed to another process, its taskToken must also + // be passed. This enables it to provide its progress and respond with results. + TaskToken *string `locationName:"taskToken" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s RespondActivityTaskCanceledInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RespondActivityTaskCanceledInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RespondActivityTaskCanceledInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RespondActivityTaskCanceledInput"} + if s.TaskToken == nil { + invalidParams.Add(request.NewErrParamRequired("TaskToken")) + } + if s.TaskToken != nil && len(*s.TaskToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TaskToken", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type RespondActivityTaskCanceledOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s RespondActivityTaskCanceledOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RespondActivityTaskCanceledOutput) GoString() string { + return s.String() +} + +type RespondActivityTaskCompletedInput struct { + _ struct{} `type:"structure"` + + // The result of the activity task. It is a free form string that is implementation + // specific. + Result *string `locationName:"result" type:"string"` + + // The taskToken of the ActivityTask. + // + // taskToken is generated by the service and should be treated as an opaque + // value. If the task is passed to another process, its taskToken must also + // be passed. This enables it to provide its progress and respond with results. + TaskToken *string `locationName:"taskToken" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s RespondActivityTaskCompletedInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RespondActivityTaskCompletedInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RespondActivityTaskCompletedInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RespondActivityTaskCompletedInput"} + if s.TaskToken == nil { + invalidParams.Add(request.NewErrParamRequired("TaskToken")) + } + if s.TaskToken != nil && len(*s.TaskToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TaskToken", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type RespondActivityTaskCompletedOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s RespondActivityTaskCompletedOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RespondActivityTaskCompletedOutput) GoString() string { + return s.String() +} + +type RespondActivityTaskFailedInput struct { + _ struct{} `type:"structure"` + + // Optional. Detailed information about the failure. + Details *string `locationName:"details" type:"string"` + + // Description of the error that may assist in diagnostics. + Reason *string `locationName:"reason" type:"string"` + + // The taskToken of the ActivityTask. + // + // taskToken is generated by the service and should be treated as an opaque + // value. If the task is passed to another process, its taskToken must also + // be passed. This enables it to provide its progress and respond with results. + TaskToken *string `locationName:"taskToken" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s RespondActivityTaskFailedInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RespondActivityTaskFailedInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RespondActivityTaskFailedInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RespondActivityTaskFailedInput"} + if s.TaskToken == nil { + invalidParams.Add(request.NewErrParamRequired("TaskToken")) + } + if s.TaskToken != nil && len(*s.TaskToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TaskToken", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type RespondActivityTaskFailedOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s RespondActivityTaskFailedOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RespondActivityTaskFailedOutput) GoString() string { + return s.String() +} + +type RespondDecisionTaskCompletedInput struct { + _ struct{} `type:"structure"` + + // The list of decisions (possibly empty) made by the decider while processing + // this decision task. See the docs for the decision structure for details. + Decisions []*Decision `locationName:"decisions" type:"list"` + + // User defined context to add to workflow execution. + ExecutionContext *string `locationName:"executionContext" type:"string"` + + // The taskToken from the DecisionTask. + // + // taskToken is generated by the service and should be treated as an opaque + // value. If the task is passed to another process, its taskToken must also + // be passed. This enables it to provide its progress and respond with results. + TaskToken *string `locationName:"taskToken" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s RespondDecisionTaskCompletedInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RespondDecisionTaskCompletedInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RespondDecisionTaskCompletedInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RespondDecisionTaskCompletedInput"} + if s.TaskToken == nil { + invalidParams.Add(request.NewErrParamRequired("TaskToken")) + } + if s.TaskToken != nil && len(*s.TaskToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TaskToken", 1)) + } + if s.Decisions != nil { + for i, v := range s.Decisions { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Decisions", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type RespondDecisionTaskCompletedOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s RespondDecisionTaskCompletedOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RespondDecisionTaskCompletedOutput) GoString() string { + return s.String() +} + +// Provides details of the ScheduleActivityTask decision. +// +// Access Control +// +// You can use IAM policies to control this decision's access to Amazon SWF +// resources as follows: +// +// Use a Resource element with the domain name to limit the action to only +// specified domains. Use an Action element to allow or deny permission to call +// this action. Constrain the following parameters by using a Condition element +// with the appropriate keys. activityType.name: String constraint. The key +// is swf:activityType.name. activityType.version: String constraint. The key +// is swf:activityType.version. taskList: String constraint. The key is swf:taskList.name. +// If the caller does not have sufficient permissions to invoke the action, +// or the parameter values fall outside the specified constraints, the action +// fails. The associated event attribute's cause parameter will be set to OPERATION_NOT_PERMITTED. +// For details and example IAM policies, see Using IAM to Manage Access to Amazon +// SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html). +type ScheduleActivityTaskDecisionAttributes struct { + _ struct{} `type:"structure"` + + // Required. The activityId of the activity task. + // + // The specified string must not start or end with whitespace. It must not + // contain a : (colon), / (slash), | (vertical bar), or any control characters + // (\u0000-\u001f | \u007f - \u009f). Also, it must not contain the literal + // string quotarnquot. + ActivityId *string `locationName:"activityId" min:"1" type:"string" required:"true"` + + // Required. The type of the activity task to schedule. + ActivityType *ActivityType `locationName:"activityType" type:"structure" required:"true"` + + // Optional. Data attached to the event that can be used by the decider in subsequent + // workflow tasks. This data is not sent to the activity. + Control *string `locationName:"control" type:"string"` + + // If set, specifies the maximum time before which a worker processing a task + // of this type must report progress by calling RecordActivityTaskHeartbeat. + // If the timeout is exceeded, the activity task is automatically timed out. + // If the worker subsequently attempts to record a heartbeat or returns a result, + // it will be ignored. This overrides the default heartbeat timeout specified + // when registering the activity type using RegisterActivityType. + // + // The duration is specified in seconds; an integer greater than or equal to + // 0. The value "NONE" can be used to specify unlimited duration. + HeartbeatTimeout *string `locationName:"heartbeatTimeout" type:"string"` + + // The input provided to the activity task. + Input *string `locationName:"input" type:"string"` + + // The maximum duration for this activity task. + // + // The duration is specified in seconds; an integer greater than or equal to + // 0. The value "NONE" can be used to specify unlimited duration. + // + // A schedule-to-close timeout for this activity task must be specified either + // as a default for the activity type or through this field. If neither this + // field is set nor a default schedule-to-close timeout was specified at registration + // time then a fault will be returned. + ScheduleToCloseTimeout *string `locationName:"scheduleToCloseTimeout" type:"string"` + + // Optional. If set, specifies the maximum duration the activity task can wait + // to be assigned to a worker. This overrides the default schedule-to-start + // timeout specified when registering the activity type using RegisterActivityType. + // + // The duration is specified in seconds; an integer greater than or equal to + // 0. The value "NONE" can be used to specify unlimited duration. + // + // A schedule-to-start timeout for this activity task must be specified either + // as a default for the activity type or through this field. If neither this + // field is set nor a default schedule-to-start timeout was specified at registration + // time then a fault will be returned. + ScheduleToStartTimeout *string `locationName:"scheduleToStartTimeout" type:"string"` + + // If set, specifies the maximum duration a worker may take to process this + // activity task. This overrides the default start-to-close timeout specified + // when registering the activity type using RegisterActivityType. + // + // The duration is specified in seconds; an integer greater than or equal to + // 0. The value "NONE" can be used to specify unlimited duration. + // + // A start-to-close timeout for this activity task must be specified either + // as a default for the activity type or through this field. If neither this + // field is set nor a default start-to-close timeout was specified at registration + // time then a fault will be returned. + StartToCloseTimeout *string `locationName:"startToCloseTimeout" type:"string"` + + // If set, specifies the name of the task list in which to schedule the activity + // task. If not specified, the defaultTaskList registered with the activity + // type will be used. + // + // A task list for this activity task must be specified either as a default + // for the activity type or through this field. If neither this field is set + // nor a default task list was specified at registration time then a fault will + // be returned. The specified string must not start or end with whitespace. + // It must not contain a : (colon), / (slash), | (vertical bar), or any control + // characters (\u0000-\u001f | \u007f - \u009f). Also, it must not contain the + // literal string quotarnquot. + TaskList *TaskList `locationName:"taskList" type:"structure"` + + // Optional. If set, specifies the priority with which the activity task is + // to be assigned to a worker. This overrides the defaultTaskPriority specified + // when registering the activity type using RegisterActivityType. Valid values + // are integers that range from Java's Integer.MIN_VALUE (-2147483648) to Integer.MAX_VALUE + // (2147483647). Higher numbers indicate higher priority. + // + // For more information about setting task priority, see Setting Task Priority + // (http://docs.aws.amazon.com/amazonswf/latest/developerguide/programming-priority.html) + // in the Amazon Simple Workflow Developer Guide. + TaskPriority *string `locationName:"taskPriority" type:"string"` +} + +// String returns the string representation +func (s ScheduleActivityTaskDecisionAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ScheduleActivityTaskDecisionAttributes) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ScheduleActivityTaskDecisionAttributes) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ScheduleActivityTaskDecisionAttributes"} + if s.ActivityId == nil { + invalidParams.Add(request.NewErrParamRequired("ActivityId")) + } + if s.ActivityId != nil && len(*s.ActivityId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ActivityId", 1)) + } + if s.ActivityType == nil { + invalidParams.Add(request.NewErrParamRequired("ActivityType")) + } + if s.ActivityType != nil { + if err := s.ActivityType.Validate(); err != nil { + invalidParams.AddNested("ActivityType", err.(request.ErrInvalidParams)) + } + } + if s.TaskList != nil { + if err := s.TaskList.Validate(); err != nil { + invalidParams.AddNested("TaskList", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Provides details of the ScheduleActivityTaskFailed event. +type ScheduleActivityTaskFailedEventAttributes struct { + _ struct{} `type:"structure"` + + // The activityId provided in the ScheduleActivityTask decision that failed. + ActivityId *string `locationName:"activityId" min:"1" type:"string" required:"true"` + + // The activity type provided in the ScheduleActivityTask decision that failed. + ActivityType *ActivityType `locationName:"activityType" type:"structure" required:"true"` + + // The cause of the failure. This information is generated by the system and + // can be useful for diagnostic purposes. + // + // If cause is set to OPERATION_NOT_PERMITTED, the decision failed because + // it lacked sufficient permissions. For details and example IAM policies, see + // Using IAM to Manage Access to Amazon SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html). + Cause *string `locationName:"cause" type:"string" required:"true" enum:"ScheduleActivityTaskFailedCause"` + + // The ID of the DecisionTaskCompleted event corresponding to the decision that + // resulted in the scheduling of this activity task. This information can be + // useful for diagnosing problems by tracing back the chain of events leading + // up to this event. + DecisionTaskCompletedEventId *int64 `locationName:"decisionTaskCompletedEventId" type:"long" required:"true"` +} + +// String returns the string representation +func (s ScheduleActivityTaskFailedEventAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ScheduleActivityTaskFailedEventAttributes) GoString() string { + return s.String() +} + +// Provides details of the ScheduleLambdaFunction decision. +// +// Access Control +// +// You can use IAM policies to control this decision's access to Amazon SWF +// resources as follows: +// +// Use a Resource element with the domain name to limit the action to only +// specified domains. Use an Action element to allow or deny permission to call +// this action. Constrain the following parameters by using a Condition element +// with the appropriate keys. activityType.name: String constraint. The key +// is swf:activityType.name. activityType.version: String constraint. The key +// is swf:activityType.version. taskList: String constraint. The key is swf:taskList.name. +// If the caller does not have sufficient permissions to invoke the action, +// or the parameter values fall outside the specified constraints, the action +// fails. The associated event attribute's cause parameter will be set to OPERATION_NOT_PERMITTED. +// For details and example IAM policies, see Using IAM to Manage Access to Amazon +// SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html). +type ScheduleLambdaFunctionDecisionAttributes struct { + _ struct{} `type:"structure"` + + // Required. The SWF id of the AWS Lambda task. + // + // The specified string must not start or end with whitespace. It must not + // contain a : (colon), / (slash), | (vertical bar), or any control characters + // (\u0000-\u001f | \u007f - \u009f). Also, it must not contain the literal + // string quotarnquot. + Id *string `locationName:"id" min:"1" type:"string" required:"true"` + + // The input provided to the AWS Lambda function. + Input *string `locationName:"input" min:"1" type:"string"` + + // Required. The name of the AWS Lambda function to invoke. + Name *string `locationName:"name" min:"1" type:"string" required:"true"` + + // If set, specifies the maximum duration the function may take to execute. + StartToCloseTimeout *string `locationName:"startToCloseTimeout" type:"string"` +} + +// String returns the string representation +func (s ScheduleLambdaFunctionDecisionAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ScheduleLambdaFunctionDecisionAttributes) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ScheduleLambdaFunctionDecisionAttributes) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ScheduleLambdaFunctionDecisionAttributes"} + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + if s.Id != nil && len(*s.Id) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Id", 1)) + } + if s.Input != nil && len(*s.Input) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Input", 1)) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Provides details for the ScheduleLambdaFunctionFailed event. +type ScheduleLambdaFunctionFailedEventAttributes struct { + _ struct{} `type:"structure"` + + // The cause of the failure. This information is generated by the system and + // can be useful for diagnostic purposes. + // + // If cause is set to OPERATION_NOT_PERMITTED, the decision failed because + // it lacked sufficient permissions. For details and example IAM policies, see + // Using IAM to Manage Access to Amazon SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html). + Cause *string `locationName:"cause" type:"string" required:"true" enum:"ScheduleLambdaFunctionFailedCause"` + + // The ID of the DecisionTaskCompleted event corresponding to the decision that + // resulted in the scheduling of this AWS Lambda function. This information + // can be useful for diagnosing problems by tracing back the chain of events + // leading up to this event. + DecisionTaskCompletedEventId *int64 `locationName:"decisionTaskCompletedEventId" type:"long" required:"true"` + + // The unique Amazon SWF ID of the AWS Lambda task. + Id *string `locationName:"id" min:"1" type:"string" required:"true"` + + // The name of the scheduled AWS Lambda function. + Name *string `locationName:"name" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s ScheduleLambdaFunctionFailedEventAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ScheduleLambdaFunctionFailedEventAttributes) GoString() string { + return s.String() +} + +// Provides details of the SignalExternalWorkflowExecution decision. +// +// Access Control +// +// You can use IAM policies to control this decision's access to Amazon SWF +// resources as follows: +// +// Use a Resource element with the domain name to limit the action to only +// specified domains. Use an Action element to allow or deny permission to call +// this action. You cannot use an IAM policy to constrain this action's parameters. +// If the caller does not have sufficient permissions to invoke the action, +// or the parameter values fall outside the specified constraints, the action +// fails. The associated event attribute's cause parameter will be set to OPERATION_NOT_PERMITTED. +// For details and example IAM policies, see Using IAM to Manage Access to Amazon +// SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html). +type SignalExternalWorkflowExecutionDecisionAttributes struct { + _ struct{} `type:"structure"` + + // Optional. Data attached to the event that can be used by the decider in subsequent + // decision tasks. + Control *string `locationName:"control" type:"string"` + + // Optional. Input data to be provided with the signal. The target workflow + // execution will use the signal name and input data to process the signal. + Input *string `locationName:"input" type:"string"` + + // The runId of the workflow execution to be signaled. + RunId *string `locationName:"runId" type:"string"` + + // Required. The name of the signal.The target workflow execution will use the + // signal name and input to process the signal. + SignalName *string `locationName:"signalName" min:"1" type:"string" required:"true"` + + // Required. The workflowId of the workflow execution to be signaled. + WorkflowId *string `locationName:"workflowId" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s SignalExternalWorkflowExecutionDecisionAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SignalExternalWorkflowExecutionDecisionAttributes) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SignalExternalWorkflowExecutionDecisionAttributes) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SignalExternalWorkflowExecutionDecisionAttributes"} + if s.SignalName == nil { + invalidParams.Add(request.NewErrParamRequired("SignalName")) + } + if s.SignalName != nil && len(*s.SignalName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("SignalName", 1)) + } + if s.WorkflowId == nil { + invalidParams.Add(request.NewErrParamRequired("WorkflowId")) + } + if s.WorkflowId != nil && len(*s.WorkflowId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("WorkflowId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Provides details of the SignalExternalWorkflowExecutionFailed event. +type SignalExternalWorkflowExecutionFailedEventAttributes struct { + _ struct{} `type:"structure"` + + // The cause of the failure. This information is generated by the system and + // can be useful for diagnostic purposes. + // + // If cause is set to OPERATION_NOT_PERMITTED, the decision failed because + // it lacked sufficient permissions. For details and example IAM policies, see + // Using IAM to Manage Access to Amazon SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html). + Cause *string `locationName:"cause" type:"string" required:"true" enum:"SignalExternalWorkflowExecutionFailedCause"` + + Control *string `locationName:"control" type:"string"` + + // The ID of the DecisionTaskCompleted event corresponding to the decision task + // that resulted in the SignalExternalWorkflowExecution decision for this signal. + // This information can be useful for diagnosing problems by tracing back the + // chain of events leading up to this event. + DecisionTaskCompletedEventId *int64 `locationName:"decisionTaskCompletedEventId" type:"long" required:"true"` + + // The ID of the SignalExternalWorkflowExecutionInitiated event corresponding + // to the SignalExternalWorkflowExecution decision to request this signal. This + // information can be useful for diagnosing problems by tracing back the chain + // of events leading up to this event. + InitiatedEventId *int64 `locationName:"initiatedEventId" type:"long" required:"true"` + + // The runId of the external workflow execution that the signal was being delivered + // to. + RunId *string `locationName:"runId" type:"string"` + + // The workflowId of the external workflow execution that the signal was being + // delivered to. + WorkflowId *string `locationName:"workflowId" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s SignalExternalWorkflowExecutionFailedEventAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SignalExternalWorkflowExecutionFailedEventAttributes) GoString() string { + return s.String() +} + +// Provides details of the SignalExternalWorkflowExecutionInitiated event. +type SignalExternalWorkflowExecutionInitiatedEventAttributes struct { + _ struct{} `type:"structure"` + + // Optional. data attached to the event that can be used by the decider in subsequent + // decision tasks. + Control *string `locationName:"control" type:"string"` + + // The ID of the DecisionTaskCompleted event corresponding to the decision task + // that resulted in the SignalExternalWorkflowExecution decision for this signal. + // This information can be useful for diagnosing problems by tracing back the + // chain of events leading up to this event. + DecisionTaskCompletedEventId *int64 `locationName:"decisionTaskCompletedEventId" type:"long" required:"true"` + + // Input provided to the signal (if any). + Input *string `locationName:"input" type:"string"` + + // The runId of the external workflow execution to send the signal to. + RunId *string `locationName:"runId" type:"string"` + + // The name of the signal. + SignalName *string `locationName:"signalName" min:"1" type:"string" required:"true"` + + // The workflowId of the external workflow execution. + WorkflowId *string `locationName:"workflowId" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s SignalExternalWorkflowExecutionInitiatedEventAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SignalExternalWorkflowExecutionInitiatedEventAttributes) GoString() string { + return s.String() +} + +type SignalWorkflowExecutionInput struct { + _ struct{} `type:"structure"` + + // The name of the domain containing the workflow execution to signal. + Domain *string `locationName:"domain" min:"1" type:"string" required:"true"` + + // Data to attach to the WorkflowExecutionSignaled event in the target workflow + // execution's history. + Input *string `locationName:"input" type:"string"` + + // The runId of the workflow execution to signal. + RunId *string `locationName:"runId" type:"string"` + + // The name of the signal. This name must be meaningful to the target workflow. + SignalName *string `locationName:"signalName" min:"1" type:"string" required:"true"` + + // The workflowId of the workflow execution to signal. + WorkflowId *string `locationName:"workflowId" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s SignalWorkflowExecutionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SignalWorkflowExecutionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SignalWorkflowExecutionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SignalWorkflowExecutionInput"} + if s.Domain == nil { + invalidParams.Add(request.NewErrParamRequired("Domain")) + } + if s.Domain != nil && len(*s.Domain) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Domain", 1)) + } + if s.SignalName == nil { + invalidParams.Add(request.NewErrParamRequired("SignalName")) + } + if s.SignalName != nil && len(*s.SignalName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("SignalName", 1)) + } + if s.WorkflowId == nil { + invalidParams.Add(request.NewErrParamRequired("WorkflowId")) + } + if s.WorkflowId != nil && len(*s.WorkflowId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("WorkflowId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type SignalWorkflowExecutionOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s SignalWorkflowExecutionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SignalWorkflowExecutionOutput) GoString() string { + return s.String() +} + +// Provides details of the StartChildWorkflowExecution decision. +// +// Access Control +// +// You can use IAM policies to control this decision's access to Amazon SWF +// resources as follows: +// +// Use a Resource element with the domain name to limit the action to only +// specified domains. Use an Action element to allow or deny permission to call +// this action. Constrain the following parameters by using a Condition element +// with the appropriate keys. tagList.member.N: The key is "swf:tagList.N" +// where N is the tag number from 0 to 4, inclusive. taskList: String constraint. +// The key is swf:taskList.name. workflowType.name: String constraint. The key +// is swf:workflowType.name. workflowType.version: String constraint. The key +// is swf:workflowType.version. If the caller does not have sufficient permissions +// to invoke the action, or the parameter values fall outside the specified +// constraints, the action fails. The associated event attribute's cause parameter +// will be set to OPERATION_NOT_PERMITTED. For details and example IAM policies, +// see Using IAM to Manage Access to Amazon SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html). +type StartChildWorkflowExecutionDecisionAttributes struct { + _ struct{} `type:"structure"` + + // Optional. If set, specifies the policy to use for the child workflow executions + // if the workflow execution being started is terminated by calling the TerminateWorkflowExecution + // action explicitly or due to an expired timeout. This policy overrides the + // default child policy specified when registering the workflow type using RegisterWorkflowType. + // + // The supported child policies are: + // + // TERMINATE: the child executions will be terminated. REQUEST_CANCEL: a request + // to cancel will be attempted for each child execution by recording a WorkflowExecutionCancelRequested + // event in its history. It is up to the decider to take appropriate actions + // when it receives an execution history with this event. ABANDON: no action + // will be taken. The child executions will continue to run. A child policy + // for this workflow execution must be specified either as a default for the + // workflow type or through this parameter. If neither this parameter is set + // nor a default child policy was specified at registration time then a fault + // will be returned. + ChildPolicy *string `locationName:"childPolicy" type:"string" enum:"ChildPolicy"` + + // Optional. Data attached to the event that can be used by the decider in subsequent + // workflow tasks. This data is not sent to the child workflow execution. + Control *string `locationName:"control" type:"string"` + + // The total duration for this workflow execution. This overrides the defaultExecutionStartToCloseTimeout + // specified when registering the workflow type. + // + // The duration is specified in seconds; an integer greater than or equal to + // 0. The value "NONE" can be used to specify unlimited duration. + // + // An execution start-to-close timeout for this workflow execution must be + // specified either as a default for the workflow type or through this parameter. + // If neither this parameter is set nor a default execution start-to-close timeout + // was specified at registration time then a fault will be returned. + ExecutionStartToCloseTimeout *string `locationName:"executionStartToCloseTimeout" type:"string"` + + // The input to be provided to the workflow execution. + Input *string `locationName:"input" type:"string"` + + // The ARN of an IAM role that authorizes Amazon SWF to invoke AWS Lambda functions. + // + // In order for this workflow execution to invoke AWS Lambda functions, an + // appropriate IAM role must be specified either as a default for the workflow + // type or through this field. + LambdaRole *string `locationName:"lambdaRole" min:"1" type:"string"` + + // The list of tags to associate with the child workflow execution. A maximum + // of 5 tags can be specified. You can list workflow executions with a specific + // tag by calling ListOpenWorkflowExecutions or ListClosedWorkflowExecutions + // and specifying a TagFilter. + TagList []*string `locationName:"tagList" type:"list"` + + // The name of the task list to be used for decision tasks of the child workflow + // execution. + // + // A task list for this workflow execution must be specified either as a default + // for the workflow type or through this parameter. If neither this parameter + // is set nor a default task list was specified at registration time then a + // fault will be returned. The specified string must not start or end with whitespace. + // It must not contain a : (colon), / (slash), | (vertical bar), or any control + // characters (\u0000-\u001f | \u007f - \u009f). Also, it must not contain the + // literal string quotarnquot. + TaskList *TaskList `locationName:"taskList" type:"structure"` + + // Optional. A task priority that, if set, specifies the priority for a decision + // task of this workflow execution. This overrides the defaultTaskPriority specified + // when registering the workflow type. Valid values are integers that range + // from Java's Integer.MIN_VALUE (-2147483648) to Integer.MAX_VALUE (2147483647). + // Higher numbers indicate higher priority. + // + // For more information about setting task priority, see Setting Task Priority + // (http://docs.aws.amazon.com/amazonswf/latest/developerguide/programming-priority.html) + // in the Amazon Simple Workflow Developer Guide. + TaskPriority *string `locationName:"taskPriority" type:"string"` + + // Specifies the maximum duration of decision tasks for this workflow execution. + // This parameter overrides the defaultTaskStartToCloseTimout specified when + // registering the workflow type using RegisterWorkflowType. + // + // The duration is specified in seconds; an integer greater than or equal to + // 0. The value "NONE" can be used to specify unlimited duration. + // + // A task start-to-close timeout for this workflow execution must be specified + // either as a default for the workflow type or through this parameter. If neither + // this parameter is set nor a default task start-to-close timeout was specified + // at registration time then a fault will be returned. + TaskStartToCloseTimeout *string `locationName:"taskStartToCloseTimeout" type:"string"` + + // Required. The workflowId of the workflow execution. + // + // The specified string must not start or end with whitespace. It must not + // contain a : (colon), / (slash), | (vertical bar), or any control characters + // (\u0000-\u001f | \u007f - \u009f). Also, it must not contain the literal + // string quotarnquot. + WorkflowId *string `locationName:"workflowId" min:"1" type:"string" required:"true"` + + // Required. The type of the workflow execution to be started. + WorkflowType *WorkflowType `locationName:"workflowType" type:"structure" required:"true"` +} + +// String returns the string representation +func (s StartChildWorkflowExecutionDecisionAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StartChildWorkflowExecutionDecisionAttributes) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *StartChildWorkflowExecutionDecisionAttributes) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StartChildWorkflowExecutionDecisionAttributes"} + if s.LambdaRole != nil && len(*s.LambdaRole) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LambdaRole", 1)) + } + if s.WorkflowId == nil { + invalidParams.Add(request.NewErrParamRequired("WorkflowId")) + } + if s.WorkflowId != nil && len(*s.WorkflowId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("WorkflowId", 1)) + } + if s.WorkflowType == nil { + invalidParams.Add(request.NewErrParamRequired("WorkflowType")) + } + if s.TaskList != nil { + if err := s.TaskList.Validate(); err != nil { + invalidParams.AddNested("TaskList", err.(request.ErrInvalidParams)) + } + } + if s.WorkflowType != nil { + if err := s.WorkflowType.Validate(); err != nil { + invalidParams.AddNested("WorkflowType", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Provides details of the StartChildWorkflowExecutionFailed event. +type StartChildWorkflowExecutionFailedEventAttributes struct { + _ struct{} `type:"structure"` + + // The cause of the failure. This information is generated by the system and + // can be useful for diagnostic purposes. + // + // If cause is set to OPERATION_NOT_PERMITTED, the decision failed because + // it lacked sufficient permissions. For details and example IAM policies, see + // Using IAM to Manage Access to Amazon SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html). + Cause *string `locationName:"cause" type:"string" required:"true" enum:"StartChildWorkflowExecutionFailedCause"` + + Control *string `locationName:"control" type:"string"` + + // The ID of the DecisionTaskCompleted event corresponding to the decision task + // that resulted in the StartChildWorkflowExecution decision to request this + // child workflow execution. This information can be useful for diagnosing problems + // by tracing back the cause of events. + DecisionTaskCompletedEventId *int64 `locationName:"decisionTaskCompletedEventId" type:"long" required:"true"` + + // The ID of the StartChildWorkflowExecutionInitiated event corresponding to + // the StartChildWorkflowExecution decision to start this child workflow execution. + // This information can be useful for diagnosing problems by tracing back the + // chain of events leading up to this event. + InitiatedEventId *int64 `locationName:"initiatedEventId" type:"long" required:"true"` + + // The workflowId of the child workflow execution. + WorkflowId *string `locationName:"workflowId" min:"1" type:"string" required:"true"` + + // The workflow type provided in the StartChildWorkflowExecution decision that + // failed. + WorkflowType *WorkflowType `locationName:"workflowType" type:"structure" required:"true"` +} + +// String returns the string representation +func (s StartChildWorkflowExecutionFailedEventAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StartChildWorkflowExecutionFailedEventAttributes) GoString() string { + return s.String() +} + +// Provides details of the StartChildWorkflowExecutionInitiated event. +type StartChildWorkflowExecutionInitiatedEventAttributes struct { + _ struct{} `type:"structure"` + + // The policy to use for the child workflow executions if this execution gets + // terminated by explicitly calling the TerminateWorkflowExecution action or + // due to an expired timeout. + // + // The supported child policies are: + // + // TERMINATE: the child executions will be terminated. REQUEST_CANCEL: a request + // to cancel will be attempted for each child execution by recording a WorkflowExecutionCancelRequested + // event in its history. It is up to the decider to take appropriate actions + // when it receives an execution history with this event. ABANDON: no action + // will be taken. The child executions will continue to run. + ChildPolicy *string `locationName:"childPolicy" type:"string" required:"true" enum:"ChildPolicy"` + + // Optional. Data attached to the event that can be used by the decider in subsequent + // decision tasks. This data is not sent to the activity. + Control *string `locationName:"control" type:"string"` + + // The ID of the DecisionTaskCompleted event corresponding to the decision task + // that resulted in the StartChildWorkflowExecution decision to request this + // child workflow execution. This information can be useful for diagnosing problems + // by tracing back the cause of events. + DecisionTaskCompletedEventId *int64 `locationName:"decisionTaskCompletedEventId" type:"long" required:"true"` + + // The maximum duration for the child workflow execution. If the workflow execution + // is not closed within this duration, it will be timed out and force terminated. + // + // The duration is specified in seconds; an integer greater than or equal to + // 0. The value "NONE" can be used to specify unlimited duration. + ExecutionStartToCloseTimeout *string `locationName:"executionStartToCloseTimeout" type:"string"` + + // The inputs provided to the child workflow execution (if any). + Input *string `locationName:"input" type:"string"` + + // The IAM role attached to this workflow execution to use when invoking AWS + // Lambda functions. + LambdaRole *string `locationName:"lambdaRole" min:"1" type:"string"` + + // The list of tags to associated with the child workflow execution. + TagList []*string `locationName:"tagList" type:"list"` + + // The name of the task list used for the decision tasks of the child workflow + // execution. + TaskList *TaskList `locationName:"taskList" type:"structure" required:"true"` + + // Optional. The priority assigned for the decision tasks for this workflow + // execution. Valid values are integers that range from Java's Integer.MIN_VALUE + // (-2147483648) to Integer.MAX_VALUE (2147483647). Higher numbers indicate + // higher priority. + // + // For more information about setting task priority, see Setting Task Priority + // (http://docs.aws.amazon.com/amazonswf/latest/developerguide/programming-priority.html) + // in the Amazon Simple Workflow Developer Guide. + TaskPriority *string `locationName:"taskPriority" type:"string"` + + // The maximum duration allowed for the decision tasks for this workflow execution. + // + // The duration is specified in seconds; an integer greater than or equal to + // 0. The value "NONE" can be used to specify unlimited duration. + TaskStartToCloseTimeout *string `locationName:"taskStartToCloseTimeout" type:"string"` + + // The workflowId of the child workflow execution. + WorkflowId *string `locationName:"workflowId" min:"1" type:"string" required:"true"` + + // The type of the child workflow execution. + WorkflowType *WorkflowType `locationName:"workflowType" type:"structure" required:"true"` +} + +// String returns the string representation +func (s StartChildWorkflowExecutionInitiatedEventAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StartChildWorkflowExecutionInitiatedEventAttributes) GoString() string { + return s.String() +} + +// Provides details for the StartLambdaFunctionFailed event. +type StartLambdaFunctionFailedEventAttributes struct { + _ struct{} `type:"structure"` + + // The cause of the failure. This information is generated by the system and + // can be useful for diagnostic purposes. + // + // If cause is set to OPERATION_NOT_PERMITTED, the decision failed because + // it lacked sufficient permissions. For details and example IAM policies, see + // Using IAM to Manage Access to Amazon SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html). + Cause *string `locationName:"cause" type:"string" enum:"StartLambdaFunctionFailedCause"` + + // The error message (if any). + Message *string `locationName:"message" type:"string"` + + // The ID of the LambdaFunctionScheduled event that was recorded when this AWS + // Lambda function was scheduled. This information can be useful for diagnosing + // problems by tracing back the chain of events leading up to this event. + ScheduledEventId *int64 `locationName:"scheduledEventId" type:"long"` +} + +// String returns the string representation +func (s StartLambdaFunctionFailedEventAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StartLambdaFunctionFailedEventAttributes) GoString() string { + return s.String() +} + +// Provides details of the StartTimer decision. +// +// Access Control +// +// You can use IAM policies to control this decision's access to Amazon SWF +// resources as follows: +// +// Use a Resource element with the domain name to limit the action to only +// specified domains. Use an Action element to allow or deny permission to call +// this action. You cannot use an IAM policy to constrain this action's parameters. +// If the caller does not have sufficient permissions to invoke the action, +// or the parameter values fall outside the specified constraints, the action +// fails. The associated event attribute's cause parameter will be set to OPERATION_NOT_PERMITTED. +// For details and example IAM policies, see Using IAM to Manage Access to Amazon +// SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html). +type StartTimerDecisionAttributes struct { + _ struct{} `type:"structure"` + + // Optional. Data attached to the event that can be used by the decider in subsequent + // workflow tasks. + Control *string `locationName:"control" type:"string"` + + // Required. The duration to wait before firing the timer. + // + // The duration is specified in seconds; an integer greater than or equal to + // 0. + StartToFireTimeout *string `locationName:"startToFireTimeout" min:"1" type:"string" required:"true"` + + // Required. The unique ID of the timer. + // + // The specified string must not start or end with whitespace. It must not + // contain a : (colon), / (slash), | (vertical bar), or any control characters + // (\u0000-\u001f | \u007f - \u009f). Also, it must not contain the literal + // string quotarnquot. + TimerId *string `locationName:"timerId" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s StartTimerDecisionAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StartTimerDecisionAttributes) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *StartTimerDecisionAttributes) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StartTimerDecisionAttributes"} + if s.StartToFireTimeout == nil { + invalidParams.Add(request.NewErrParamRequired("StartToFireTimeout")) + } + if s.StartToFireTimeout != nil && len(*s.StartToFireTimeout) < 1 { + invalidParams.Add(request.NewErrParamMinLen("StartToFireTimeout", 1)) + } + if s.TimerId == nil { + invalidParams.Add(request.NewErrParamRequired("TimerId")) + } + if s.TimerId != nil && len(*s.TimerId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TimerId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Provides details of the StartTimerFailed event. +type StartTimerFailedEventAttributes struct { + _ struct{} `type:"structure"` + + // The cause of the failure. This information is generated by the system and + // can be useful for diagnostic purposes. + // + // If cause is set to OPERATION_NOT_PERMITTED, the decision failed because + // it lacked sufficient permissions. For details and example IAM policies, see + // Using IAM to Manage Access to Amazon SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html). + Cause *string `locationName:"cause" type:"string" required:"true" enum:"StartTimerFailedCause"` + + // The ID of the DecisionTaskCompleted event corresponding to the decision task + // that resulted in the StartTimer decision for this activity task. This information + // can be useful for diagnosing problems by tracing back the chain of events + // leading up to this event. + DecisionTaskCompletedEventId *int64 `locationName:"decisionTaskCompletedEventId" type:"long" required:"true"` + + // The timerId provided in the StartTimer decision that failed. + TimerId *string `locationName:"timerId" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s StartTimerFailedEventAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StartTimerFailedEventAttributes) GoString() string { + return s.String() +} + +type StartWorkflowExecutionInput struct { + _ struct{} `type:"structure"` + + // If set, specifies the policy to use for the child workflow executions of + // this workflow execution if it is terminated, by calling the TerminateWorkflowExecution + // action explicitly or due to an expired timeout. This policy overrides the + // default child policy specified when registering the workflow type using RegisterWorkflowType. + // + // The supported child policies are: + // + // TERMINATE: the child executions will be terminated. REQUEST_CANCEL: a request + // to cancel will be attempted for each child execution by recording a WorkflowExecutionCancelRequested + // event in its history. It is up to the decider to take appropriate actions + // when it receives an execution history with this event. ABANDON: no action + // will be taken. The child executions will continue to run. A child policy + // for this workflow execution must be specified either as a default for the + // workflow type or through this parameter. If neither this parameter is set + // nor a default child policy was specified at registration time then a fault + // will be returned. + ChildPolicy *string `locationName:"childPolicy" type:"string" enum:"ChildPolicy"` + + // The name of the domain in which the workflow execution is created. + Domain *string `locationName:"domain" min:"1" type:"string" required:"true"` + + // The total duration for this workflow execution. This overrides the defaultExecutionStartToCloseTimeout + // specified when registering the workflow type. + // + // The duration is specified in seconds; an integer greater than or equal to + // 0. Exceeding this limit will cause the workflow execution to time out. Unlike + // some of the other timeout parameters in Amazon SWF, you cannot specify a + // value of "NONE" for this timeout; there is a one-year max limit on the time + // that a workflow execution can run. + // + // An execution start-to-close timeout must be specified either through this + // parameter or as a default when the workflow type is registered. If neither + // this parameter nor a default execution start-to-close timeout is specified, + // a fault is returned. + ExecutionStartToCloseTimeout *string `locationName:"executionStartToCloseTimeout" type:"string"` + + // The input for the workflow execution. This is a free form string which should + // be meaningful to the workflow you are starting. This input is made available + // to the new workflow execution in the WorkflowExecutionStarted history event. + Input *string `locationName:"input" type:"string"` + + // The ARN of an IAM role that authorizes Amazon SWF to invoke AWS Lambda functions. + // + // In order for this workflow execution to invoke AWS Lambda functions, an + // appropriate IAM role must be specified either as a default for the workflow + // type or through this field. + LambdaRole *string `locationName:"lambdaRole" min:"1" type:"string"` + + // The list of tags to associate with the workflow execution. You can specify + // a maximum of 5 tags. You can list workflow executions with a specific tag + // by calling ListOpenWorkflowExecutions or ListClosedWorkflowExecutions and + // specifying a TagFilter. + TagList []*string `locationName:"tagList" type:"list"` + + // The task list to use for the decision tasks generated for this workflow execution. + // This overrides the defaultTaskList specified when registering the workflow + // type. + // + // A task list for this workflow execution must be specified either as a default + // for the workflow type or through this parameter. If neither this parameter + // is set nor a default task list was specified at registration time then a + // fault will be returned. The specified string must not start or end with whitespace. + // It must not contain a : (colon), / (slash), | (vertical bar), or any control + // characters (\u0000-\u001f | \u007f - \u009f). Also, it must not contain the + // literal string quotarnquot. + TaskList *TaskList `locationName:"taskList" type:"structure"` + + // The task priority to use for this workflow execution. This will override + // any default priority that was assigned when the workflow type was registered. + // If not set, then the default task priority for the workflow type will be + // used. Valid values are integers that range from Java's Integer.MIN_VALUE + // (-2147483648) to Integer.MAX_VALUE (2147483647). Higher numbers indicate + // higher priority. + // + // For more information about setting task priority, see Setting Task Priority + // (http://docs.aws.amazon.com/amazonswf/latest/developerguide/programming-priority.html) + // in the Amazon Simple Workflow Developer Guide. + TaskPriority *string `locationName:"taskPriority" type:"string"` + + // Specifies the maximum duration of decision tasks for this workflow execution. + // This parameter overrides the defaultTaskStartToCloseTimout specified when + // registering the workflow type using RegisterWorkflowType. + // + // The duration is specified in seconds; an integer greater than or equal to + // 0. The value "NONE" can be used to specify unlimited duration. + // + // A task start-to-close timeout for this workflow execution must be specified + // either as a default for the workflow type or through this parameter. If neither + // this parameter is set nor a default task start-to-close timeout was specified + // at registration time then a fault will be returned. + TaskStartToCloseTimeout *string `locationName:"taskStartToCloseTimeout" type:"string"` + + // The user defined identifier associated with the workflow execution. You can + // use this to associate a custom identifier with the workflow execution. You + // may specify the same identifier if a workflow execution is logically a restart + // of a previous execution. You cannot have two open workflow executions with + // the same workflowId at the same time. + // + // The specified string must not start or end with whitespace. It must not + // contain a : (colon), / (slash), | (vertical bar), or any control characters + // (\u0000-\u001f | \u007f - \u009f). Also, it must not contain the literal + // string quotarnquot. + WorkflowId *string `locationName:"workflowId" min:"1" type:"string" required:"true"` + + // The type of the workflow to start. + WorkflowType *WorkflowType `locationName:"workflowType" type:"structure" required:"true"` +} + +// String returns the string representation +func (s StartWorkflowExecutionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StartWorkflowExecutionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *StartWorkflowExecutionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StartWorkflowExecutionInput"} + if s.Domain == nil { + invalidParams.Add(request.NewErrParamRequired("Domain")) + } + if s.Domain != nil && len(*s.Domain) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Domain", 1)) + } + if s.LambdaRole != nil && len(*s.LambdaRole) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LambdaRole", 1)) + } + if s.WorkflowId == nil { + invalidParams.Add(request.NewErrParamRequired("WorkflowId")) + } + if s.WorkflowId != nil && len(*s.WorkflowId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("WorkflowId", 1)) + } + if s.WorkflowType == nil { + invalidParams.Add(request.NewErrParamRequired("WorkflowType")) + } + if s.TaskList != nil { + if err := s.TaskList.Validate(); err != nil { + invalidParams.AddNested("TaskList", err.(request.ErrInvalidParams)) + } + } + if s.WorkflowType != nil { + if err := s.WorkflowType.Validate(); err != nil { + invalidParams.AddNested("WorkflowType", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Specifies the runId of a workflow execution. +type StartWorkflowExecutionOutput struct { + _ struct{} `type:"structure"` + + // The runId of a workflow execution. This ID is generated by the service and + // can be used to uniquely identify the workflow execution within a domain. + RunId *string `locationName:"runId" min:"1" type:"string"` +} + +// String returns the string representation +func (s StartWorkflowExecutionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StartWorkflowExecutionOutput) GoString() string { + return s.String() +} + +// Used to filter the workflow executions in visibility APIs based on a tag. +type TagFilter struct { + _ struct{} `type:"structure"` + + // Required. Specifies the tag that must be associated with the execution for + // it to meet the filter criteria. + Tag *string `locationName:"tag" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s TagFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TagFilter) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TagFilter) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TagFilter"} + if s.Tag == nil { + invalidParams.Add(request.NewErrParamRequired("Tag")) + } + if s.Tag != nil && len(*s.Tag) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Tag", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents a task list. +type TaskList struct { + _ struct{} `type:"structure"` + + // The name of the task list. + Name *string `locationName:"name" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s TaskList) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TaskList) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TaskList) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TaskList"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type TerminateWorkflowExecutionInput struct { + _ struct{} `type:"structure"` + + // If set, specifies the policy to use for the child workflow executions of + // the workflow execution being terminated. This policy overrides the child + // policy specified for the workflow execution at registration time or when + // starting the execution. + // + // The supported child policies are: + // + // TERMINATE: the child executions will be terminated. REQUEST_CANCEL: a request + // to cancel will be attempted for each child execution by recording a WorkflowExecutionCancelRequested + // event in its history. It is up to the decider to take appropriate actions + // when it receives an execution history with this event. ABANDON: no action + // will be taken. The child executions will continue to run. A child policy + // for this workflow execution must be specified either as a default for the + // workflow type or through this parameter. If neither this parameter is set + // nor a default child policy was specified at registration time then a fault + // will be returned. + ChildPolicy *string `locationName:"childPolicy" type:"string" enum:"ChildPolicy"` + + // Optional. Details for terminating the workflow execution. + Details *string `locationName:"details" type:"string"` + + // The domain of the workflow execution to terminate. + Domain *string `locationName:"domain" min:"1" type:"string" required:"true"` + + // Optional. A descriptive reason for terminating the workflow execution. + Reason *string `locationName:"reason" type:"string"` + + // The runId of the workflow execution to terminate. + RunId *string `locationName:"runId" type:"string"` + + // The workflowId of the workflow execution to terminate. + WorkflowId *string `locationName:"workflowId" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s TerminateWorkflowExecutionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TerminateWorkflowExecutionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TerminateWorkflowExecutionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TerminateWorkflowExecutionInput"} + if s.Domain == nil { + invalidParams.Add(request.NewErrParamRequired("Domain")) + } + if s.Domain != nil && len(*s.Domain) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Domain", 1)) + } + if s.WorkflowId == nil { + invalidParams.Add(request.NewErrParamRequired("WorkflowId")) + } + if s.WorkflowId != nil && len(*s.WorkflowId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("WorkflowId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type TerminateWorkflowExecutionOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s TerminateWorkflowExecutionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TerminateWorkflowExecutionOutput) GoString() string { + return s.String() +} + +// Provides details of the TimerCanceled event. +type TimerCanceledEventAttributes struct { + _ struct{} `type:"structure"` + + // The ID of the DecisionTaskCompleted event corresponding to the decision task + // that resulted in the CancelTimer decision to cancel this timer. This information + // can be useful for diagnosing problems by tracing back the chain of events + // leading up to this event. + DecisionTaskCompletedEventId *int64 `locationName:"decisionTaskCompletedEventId" type:"long" required:"true"` + + // The ID of the TimerStarted event that was recorded when this timer was started. + // This information can be useful for diagnosing problems by tracing back the + // chain of events leading up to this event. + StartedEventId *int64 `locationName:"startedEventId" type:"long" required:"true"` + + // The unique ID of the timer that was canceled. + TimerId *string `locationName:"timerId" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s TimerCanceledEventAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TimerCanceledEventAttributes) GoString() string { + return s.String() +} + +// Provides details of the TimerFired event. +type TimerFiredEventAttributes struct { + _ struct{} `type:"structure"` + + // The ID of the TimerStarted event that was recorded when this timer was started. + // This information can be useful for diagnosing problems by tracing back the + // chain of events leading up to this event. + StartedEventId *int64 `locationName:"startedEventId" type:"long" required:"true"` + + // The unique ID of the timer that fired. + TimerId *string `locationName:"timerId" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s TimerFiredEventAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TimerFiredEventAttributes) GoString() string { + return s.String() +} + +// Provides details of the TimerStarted event. +type TimerStartedEventAttributes struct { + _ struct{} `type:"structure"` + + // Optional. Data attached to the event that can be used by the decider in subsequent + // workflow tasks. + Control *string `locationName:"control" type:"string"` + + // The ID of the DecisionTaskCompleted event corresponding to the decision task + // that resulted in the StartTimer decision for this activity task. This information + // can be useful for diagnosing problems by tracing back the chain of events + // leading up to this event. + DecisionTaskCompletedEventId *int64 `locationName:"decisionTaskCompletedEventId" type:"long" required:"true"` + + // The duration of time after which the timer will fire. + // + // The duration is specified in seconds; an integer greater than or equal to + // 0. + StartToFireTimeout *string `locationName:"startToFireTimeout" min:"1" type:"string" required:"true"` + + // The unique ID of the timer that was started. + TimerId *string `locationName:"timerId" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s TimerStartedEventAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TimerStartedEventAttributes) GoString() string { + return s.String() +} + +// Represents a workflow execution. +type WorkflowExecution struct { + _ struct{} `type:"structure"` + + // A system-generated unique identifier for the workflow execution. + RunId *string `locationName:"runId" min:"1" type:"string" required:"true"` + + // The user defined identifier associated with the workflow execution. + WorkflowId *string `locationName:"workflowId" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s WorkflowExecution) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s WorkflowExecution) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *WorkflowExecution) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "WorkflowExecution"} + if s.RunId == nil { + invalidParams.Add(request.NewErrParamRequired("RunId")) + } + if s.RunId != nil && len(*s.RunId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RunId", 1)) + } + if s.WorkflowId == nil { + invalidParams.Add(request.NewErrParamRequired("WorkflowId")) + } + if s.WorkflowId != nil && len(*s.WorkflowId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("WorkflowId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Provides details of the WorkflowExecutionCancelRequested event. +type WorkflowExecutionCancelRequestedEventAttributes struct { + _ struct{} `type:"structure"` + + // If set, indicates that the request to cancel the workflow execution was automatically + // generated, and specifies the cause. This happens if the parent workflow execution + // times out or is terminated, and the child policy is set to cancel child executions. + Cause *string `locationName:"cause" type:"string" enum:"WorkflowExecutionCancelRequestedCause"` + + // The ID of the RequestCancelExternalWorkflowExecutionInitiated event corresponding + // to the RequestCancelExternalWorkflowExecution decision to cancel this workflow + // execution.The source event with this ID can be found in the history of the + // source workflow execution. This information can be useful for diagnosing + // problems by tracing back the chain of events leading up to this event. + ExternalInitiatedEventId *int64 `locationName:"externalInitiatedEventId" type:"long"` + + // The external workflow execution for which the cancellation was requested. + ExternalWorkflowExecution *WorkflowExecution `locationName:"externalWorkflowExecution" type:"structure"` +} + +// String returns the string representation +func (s WorkflowExecutionCancelRequestedEventAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s WorkflowExecutionCancelRequestedEventAttributes) GoString() string { + return s.String() +} + +// Provides details of the WorkflowExecutionCanceled event. +type WorkflowExecutionCanceledEventAttributes struct { + _ struct{} `type:"structure"` + + // The ID of the DecisionTaskCompleted event corresponding to the decision task + // that resulted in the CancelWorkflowExecution decision for this cancellation + // request. This information can be useful for diagnosing problems by tracing + // back the chain of events leading up to this event. + DecisionTaskCompletedEventId *int64 `locationName:"decisionTaskCompletedEventId" type:"long" required:"true"` + + // Details for the cancellation (if any). + Details *string `locationName:"details" type:"string"` +} + +// String returns the string representation +func (s WorkflowExecutionCanceledEventAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s WorkflowExecutionCanceledEventAttributes) GoString() string { + return s.String() +} + +// Provides details of the WorkflowExecutionCompleted event. +type WorkflowExecutionCompletedEventAttributes struct { + _ struct{} `type:"structure"` + + // The ID of the DecisionTaskCompleted event corresponding to the decision task + // that resulted in the CompleteWorkflowExecution decision to complete this + // execution. This information can be useful for diagnosing problems by tracing + // back the chain of events leading up to this event. + DecisionTaskCompletedEventId *int64 `locationName:"decisionTaskCompletedEventId" type:"long" required:"true"` + + // The result produced by the workflow execution upon successful completion. + Result *string `locationName:"result" type:"string"` +} + +// String returns the string representation +func (s WorkflowExecutionCompletedEventAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s WorkflowExecutionCompletedEventAttributes) GoString() string { + return s.String() +} + +// The configuration settings for a workflow execution including timeout values, +// tasklist etc. These configuration settings are determined from the defaults +// specified when registering the workflow type and those specified when starting +// the workflow execution. +type WorkflowExecutionConfiguration struct { + _ struct{} `type:"structure"` + + // The policy to use for the child workflow executions if this workflow execution + // is terminated, by calling the TerminateWorkflowExecution action explicitly + // or due to an expired timeout. + // + // The supported child policies are: + // + // TERMINATE: the child executions will be terminated. REQUEST_CANCEL: a request + // to cancel will be attempted for each child execution by recording a WorkflowExecutionCancelRequested + // event in its history. It is up to the decider to take appropriate actions + // when it receives an execution history with this event. ABANDON: no action + // will be taken. The child executions will continue to run. + ChildPolicy *string `locationName:"childPolicy" type:"string" required:"true" enum:"ChildPolicy"` + + // The total duration for this workflow execution. + // + // The duration is specified in seconds; an integer greater than or equal to + // 0. The value "NONE" can be used to specify unlimited duration. + ExecutionStartToCloseTimeout *string `locationName:"executionStartToCloseTimeout" min:"1" type:"string" required:"true"` + + // The IAM role used by this workflow execution when invoking AWS Lambda functions. + LambdaRole *string `locationName:"lambdaRole" min:"1" type:"string"` + + // The task list used for the decision tasks generated for this workflow execution. + TaskList *TaskList `locationName:"taskList" type:"structure" required:"true"` + + // The priority assigned to decision tasks for this workflow execution. Valid + // values are integers that range from Java's Integer.MIN_VALUE (-2147483648) + // to Integer.MAX_VALUE (2147483647). Higher numbers indicate higher priority. + // + // For more information about setting task priority, see Setting Task Priority + // (http://docs.aws.amazon.com/amazonswf/latest/developerguide/programming-priority.html) + // in the Amazon Simple Workflow Developer Guide. + TaskPriority *string `locationName:"taskPriority" type:"string"` + + // The maximum duration allowed for decision tasks for this workflow execution. + // + // The duration is specified in seconds; an integer greater than or equal to + // 0. The value "NONE" can be used to specify unlimited duration. + TaskStartToCloseTimeout *string `locationName:"taskStartToCloseTimeout" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s WorkflowExecutionConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s WorkflowExecutionConfiguration) GoString() string { + return s.String() +} + +// Provides details of the WorkflowExecutionContinuedAsNew event. +type WorkflowExecutionContinuedAsNewEventAttributes struct { + _ struct{} `type:"structure"` + + // The policy to use for the child workflow executions of the new execution + // if it is terminated by calling the TerminateWorkflowExecution action explicitly + // or due to an expired timeout. + // + // The supported child policies are: + // + // TERMINATE: the child executions will be terminated. REQUEST_CANCEL: a request + // to cancel will be attempted for each child execution by recording a WorkflowExecutionCancelRequested + // event in its history. It is up to the decider to take appropriate actions + // when it receives an execution history with this event. ABANDON: no action + // will be taken. The child executions will continue to run. + ChildPolicy *string `locationName:"childPolicy" type:"string" required:"true" enum:"ChildPolicy"` + + // The ID of the DecisionTaskCompleted event corresponding to the decision task + // that resulted in the ContinueAsNewWorkflowExecution decision that started + // this execution. This information can be useful for diagnosing problems by + // tracing back the chain of events leading up to this event. + DecisionTaskCompletedEventId *int64 `locationName:"decisionTaskCompletedEventId" type:"long" required:"true"` + + // The total duration allowed for the new workflow execution. + // + // The duration is specified in seconds; an integer greater than or equal to + // 0. The value "NONE" can be used to specify unlimited duration. + ExecutionStartToCloseTimeout *string `locationName:"executionStartToCloseTimeout" type:"string"` + + // The input provided to the new workflow execution. + Input *string `locationName:"input" type:"string"` + + // The IAM role attached to this workflow execution to use when invoking AWS + // Lambda functions. + LambdaRole *string `locationName:"lambdaRole" min:"1" type:"string"` + + // The runId of the new workflow execution. + NewExecutionRunId *string `locationName:"newExecutionRunId" min:"1" type:"string" required:"true"` + + // The list of tags associated with the new workflow execution. + TagList []*string `locationName:"tagList" type:"list"` + + // Represents a task list. + TaskList *TaskList `locationName:"taskList" type:"structure" required:"true"` + + TaskPriority *string `locationName:"taskPriority" type:"string"` + + // The maximum duration of decision tasks for the new workflow execution. + // + // The duration is specified in seconds; an integer greater than or equal to + // 0. The value "NONE" can be used to specify unlimited duration. + TaskStartToCloseTimeout *string `locationName:"taskStartToCloseTimeout" type:"string"` + + // Represents a workflow type. + WorkflowType *WorkflowType `locationName:"workflowType" type:"structure" required:"true"` +} + +// String returns the string representation +func (s WorkflowExecutionContinuedAsNewEventAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s WorkflowExecutionContinuedAsNewEventAttributes) GoString() string { + return s.String() +} + +// Contains the count of workflow executions returned from CountOpenWorkflowExecutions +// or CountClosedWorkflowExecutions +type WorkflowExecutionCount struct { + _ struct{} `type:"structure"` + + // The number of workflow executions. + Count *int64 `locationName:"count" type:"integer" required:"true"` + + // If set to true, indicates that the actual count was more than the maximum + // supported by this API and the count returned is the truncated value. + Truncated *bool `locationName:"truncated" type:"boolean"` +} + +// String returns the string representation +func (s WorkflowExecutionCount) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s WorkflowExecutionCount) GoString() string { + return s.String() +} + +// Provides details of the WorkflowExecutionFailed event. +type WorkflowExecutionFailedEventAttributes struct { + _ struct{} `type:"structure"` + + // The ID of the DecisionTaskCompleted event corresponding to the decision task + // that resulted in the FailWorkflowExecution decision to fail this execution. + // This information can be useful for diagnosing problems by tracing back the + // chain of events leading up to this event. + DecisionTaskCompletedEventId *int64 `locationName:"decisionTaskCompletedEventId" type:"long" required:"true"` + + // The details of the failure (if any). + Details *string `locationName:"details" type:"string"` + + // The descriptive reason provided for the failure (if any). + Reason *string `locationName:"reason" type:"string"` +} + +// String returns the string representation +func (s WorkflowExecutionFailedEventAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s WorkflowExecutionFailedEventAttributes) GoString() string { + return s.String() +} + +// Used to filter the workflow executions in visibility APIs by their workflowId. +type WorkflowExecutionFilter struct { + _ struct{} `type:"structure"` + + // The workflowId to pass of match the criteria of this filter. + WorkflowId *string `locationName:"workflowId" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s WorkflowExecutionFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s WorkflowExecutionFilter) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *WorkflowExecutionFilter) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "WorkflowExecutionFilter"} + if s.WorkflowId == nil { + invalidParams.Add(request.NewErrParamRequired("WorkflowId")) + } + if s.WorkflowId != nil && len(*s.WorkflowId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("WorkflowId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains information about a workflow execution. +type WorkflowExecutionInfo struct { + _ struct{} `type:"structure"` + + // Set to true if a cancellation is requested for this workflow execution. + CancelRequested *bool `locationName:"cancelRequested" type:"boolean"` + + // If the execution status is closed then this specifies how the execution was + // closed: + // + // COMPLETED: the execution was successfully completed. CANCELED: the execution + // was canceled.Cancellation allows the implementation to gracefully clean up + // before the execution is closed. TERMINATED: the execution was force terminated. + // FAILED: the execution failed to complete. TIMED_OUT: the execution did + // not complete in the alloted time and was automatically timed out. CONTINUED_AS_NEW: + // the execution is logically continued. This means the current execution was + // completed and a new execution was started to carry on the workflow. + CloseStatus *string `locationName:"closeStatus" type:"string" enum:"CloseStatus"` + + // The time when the workflow execution was closed. Set only if the execution + // status is CLOSED. + CloseTimestamp *time.Time `locationName:"closeTimestamp" type:"timestamp" timestampFormat:"unix"` + + // The workflow execution this information is about. + Execution *WorkflowExecution `locationName:"execution" type:"structure" required:"true"` + + // The current status of the execution. + ExecutionStatus *string `locationName:"executionStatus" type:"string" required:"true" enum:"ExecutionStatus"` + + // If this workflow execution is a child of another execution then contains + // the workflow execution that started this execution. + Parent *WorkflowExecution `locationName:"parent" type:"structure"` + + // The time when the execution was started. + StartTimestamp *time.Time `locationName:"startTimestamp" type:"timestamp" timestampFormat:"unix" required:"true"` + + // The list of tags associated with the workflow execution. Tags can be used + // to identify and list workflow executions of interest through the visibility + // APIs. A workflow execution can have a maximum of 5 tags. + TagList []*string `locationName:"tagList" type:"list"` + + // The type of the workflow execution. + WorkflowType *WorkflowType `locationName:"workflowType" type:"structure" required:"true"` +} + +// String returns the string representation +func (s WorkflowExecutionInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s WorkflowExecutionInfo) GoString() string { + return s.String() +} + +// Contains a paginated list of information about workflow executions. +type WorkflowExecutionInfos struct { + _ struct{} `type:"structure"` + + // The list of workflow information structures. + ExecutionInfos []*WorkflowExecutionInfo `locationName:"executionInfos" type:"list" required:"true"` + + // If a NextPageToken was returned by a previous call, there are more results + // available. To retrieve the next page of results, make the call again using + // the returned token in nextPageToken. Keep all other arguments unchanged. + // + // The configured maximumPageSize determines how many results can be returned + // in a single call. + NextPageToken *string `locationName:"nextPageToken" type:"string"` +} + +// String returns the string representation +func (s WorkflowExecutionInfos) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s WorkflowExecutionInfos) GoString() string { + return s.String() +} + +// Contains the counts of open tasks, child workflow executions and timers for +// a workflow execution. +type WorkflowExecutionOpenCounts struct { + _ struct{} `type:"structure"` + + // The count of activity tasks whose status is OPEN. + OpenActivityTasks *int64 `locationName:"openActivityTasks" type:"integer" required:"true"` + + // The count of child workflow executions whose status is OPEN. + OpenChildWorkflowExecutions *int64 `locationName:"openChildWorkflowExecutions" type:"integer" required:"true"` + + // The count of decision tasks whose status is OPEN. A workflow execution can + // have at most one open decision task. + OpenDecisionTasks *int64 `locationName:"openDecisionTasks" type:"integer" required:"true"` + + // The count of AWS Lambda functions that are currently executing. + OpenLambdaFunctions *int64 `locationName:"openLambdaFunctions" type:"integer"` + + // The count of timers started by this workflow execution that have not fired + // yet. + OpenTimers *int64 `locationName:"openTimers" type:"integer" required:"true"` +} + +// String returns the string representation +func (s WorkflowExecutionOpenCounts) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s WorkflowExecutionOpenCounts) GoString() string { + return s.String() +} + +// Provides details of the WorkflowExecutionSignaled event. +type WorkflowExecutionSignaledEventAttributes struct { + _ struct{} `type:"structure"` + + // The ID of the SignalExternalWorkflowExecutionInitiated event corresponding + // to the SignalExternalWorkflow decision to signal this workflow execution.The + // source event with this ID can be found in the history of the source workflow + // execution. This information can be useful for diagnosing problems by tracing + // back the chain of events leading up to this event. This field is set only + // if the signal was initiated by another workflow execution. + ExternalInitiatedEventId *int64 `locationName:"externalInitiatedEventId" type:"long"` + + // The workflow execution that sent the signal. This is set only of the signal + // was sent by another workflow execution. + ExternalWorkflowExecution *WorkflowExecution `locationName:"externalWorkflowExecution" type:"structure"` + + // Inputs provided with the signal (if any). The decider can use the signal + // name and inputs to determine how to process the signal. + Input *string `locationName:"input" type:"string"` + + // The name of the signal received. The decider can use the signal name and + // inputs to determine how to the process the signal. + SignalName *string `locationName:"signalName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s WorkflowExecutionSignaledEventAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s WorkflowExecutionSignaledEventAttributes) GoString() string { + return s.String() +} + +// Provides details of WorkflowExecutionStarted event. +type WorkflowExecutionStartedEventAttributes struct { + _ struct{} `type:"structure"` + + // The policy to use for the child workflow executions if this workflow execution + // is terminated, by calling the TerminateWorkflowExecution action explicitly + // or due to an expired timeout. + // + // The supported child policies are: + // + // TERMINATE: the child executions will be terminated. REQUEST_CANCEL: a request + // to cancel will be attempted for each child execution by recording a WorkflowExecutionCancelRequested + // event in its history. It is up to the decider to take appropriate actions + // when it receives an execution history with this event. ABANDON: no action + // will be taken. The child executions will continue to run. + ChildPolicy *string `locationName:"childPolicy" type:"string" required:"true" enum:"ChildPolicy"` + + // If this workflow execution was started due to a ContinueAsNewWorkflowExecution + // decision, then it contains the runId of the previous workflow execution that + // was closed and continued as this execution. + ContinuedExecutionRunId *string `locationName:"continuedExecutionRunId" type:"string"` + + // The maximum duration for this workflow execution. + // + // The duration is specified in seconds; an integer greater than or equal to + // 0. The value "NONE" can be used to specify unlimited duration. + ExecutionStartToCloseTimeout *string `locationName:"executionStartToCloseTimeout" type:"string"` + + // The input provided to the workflow execution (if any). + Input *string `locationName:"input" type:"string"` + + // The IAM role attached to this workflow execution to use when invoking AWS + // Lambda functions. + LambdaRole *string `locationName:"lambdaRole" min:"1" type:"string"` + + // The ID of the StartChildWorkflowExecutionInitiated event corresponding to + // the StartChildWorkflowExecution decision to start this workflow execution. + // The source event with this ID can be found in the history of the source workflow + // execution. This information can be useful for diagnosing problems by tracing + // back the chain of events leading up to this event. + ParentInitiatedEventId *int64 `locationName:"parentInitiatedEventId" type:"long"` + + // The source workflow execution that started this workflow execution. The member + // is not set if the workflow execution was not started by a workflow. + ParentWorkflowExecution *WorkflowExecution `locationName:"parentWorkflowExecution" type:"structure"` + + // The list of tags associated with this workflow execution. An execution can + // have up to 5 tags. + TagList []*string `locationName:"tagList" type:"list"` + + // The name of the task list for scheduling the decision tasks for this workflow + // execution. + TaskList *TaskList `locationName:"taskList" type:"structure" required:"true"` + + TaskPriority *string `locationName:"taskPriority" type:"string"` + + // The maximum duration of decision tasks for this workflow type. + // + // The duration is specified in seconds; an integer greater than or equal to + // 0. The value "NONE" can be used to specify unlimited duration. + TaskStartToCloseTimeout *string `locationName:"taskStartToCloseTimeout" type:"string"` + + // The workflow type of this execution. + WorkflowType *WorkflowType `locationName:"workflowType" type:"structure" required:"true"` +} + +// String returns the string representation +func (s WorkflowExecutionStartedEventAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s WorkflowExecutionStartedEventAttributes) GoString() string { + return s.String() +} + +// Provides details of the WorkflowExecutionTerminated event. +type WorkflowExecutionTerminatedEventAttributes struct { + _ struct{} `type:"structure"` + + // If set, indicates that the workflow execution was automatically terminated, + // and specifies the cause. This happens if the parent workflow execution times + // out or is terminated and the child policy is set to terminate child executions. + Cause *string `locationName:"cause" type:"string" enum:"WorkflowExecutionTerminatedCause"` + + // The policy used for the child workflow executions of this workflow execution. + // + // The supported child policies are: + // + // TERMINATE: the child executions will be terminated. REQUEST_CANCEL: a request + // to cancel will be attempted for each child execution by recording a WorkflowExecutionCancelRequested + // event in its history. It is up to the decider to take appropriate actions + // when it receives an execution history with this event. ABANDON: no action + // will be taken. The child executions will continue to run. + ChildPolicy *string `locationName:"childPolicy" type:"string" required:"true" enum:"ChildPolicy"` + + // The details provided for the termination (if any). + Details *string `locationName:"details" type:"string"` + + // The reason provided for the termination (if any). + Reason *string `locationName:"reason" type:"string"` +} + +// String returns the string representation +func (s WorkflowExecutionTerminatedEventAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s WorkflowExecutionTerminatedEventAttributes) GoString() string { + return s.String() +} + +// Provides details of the WorkflowExecutionTimedOut event. +type WorkflowExecutionTimedOutEventAttributes struct { + _ struct{} `type:"structure"` + + // The policy used for the child workflow executions of this workflow execution. + // + // The supported child policies are: + // + // TERMINATE: the child executions will be terminated. REQUEST_CANCEL: a request + // to cancel will be attempted for each child execution by recording a WorkflowExecutionCancelRequested + // event in its history. It is up to the decider to take appropriate actions + // when it receives an execution history with this event. ABANDON: no action + // will be taken. The child executions will continue to run. + ChildPolicy *string `locationName:"childPolicy" type:"string" required:"true" enum:"ChildPolicy"` + + // The type of timeout that caused this event. + TimeoutType *string `locationName:"timeoutType" type:"string" required:"true" enum:"WorkflowExecutionTimeoutType"` +} + +// String returns the string representation +func (s WorkflowExecutionTimedOutEventAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s WorkflowExecutionTimedOutEventAttributes) GoString() string { + return s.String() +} + +// Represents a workflow type. +type WorkflowType struct { + _ struct{} `type:"structure"` + + // Required. The name of the workflow type. + // + // The combination of workflow type name and version must be unique with in + // a domain. + Name *string `locationName:"name" min:"1" type:"string" required:"true"` + + // Required. The version of the workflow type. + // + // The combination of workflow type name and version must be unique with in + // a domain. + Version *string `locationName:"version" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s WorkflowType) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s WorkflowType) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *WorkflowType) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "WorkflowType"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + if s.Version == nil { + invalidParams.Add(request.NewErrParamRequired("Version")) + } + if s.Version != nil && len(*s.Version) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Version", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The configuration settings of a workflow type. +type WorkflowTypeConfiguration struct { + _ struct{} `type:"structure"` + + // Optional. The default policy to use for the child workflow executions when + // a workflow execution of this type is terminated, by calling the TerminateWorkflowExecution + // action explicitly or due to an expired timeout. This default can be overridden + // when starting a workflow execution using the StartWorkflowExecution action + // or the StartChildWorkflowExecution decision. + // + // The supported child policies are: + // + // TERMINATE: the child executions will be terminated. REQUEST_CANCEL: a request + // to cancel will be attempted for each child execution by recording a WorkflowExecutionCancelRequested + // event in its history. It is up to the decider to take appropriate actions + // when it receives an execution history with this event. ABANDON: no action + // will be taken. The child executions will continue to run. + DefaultChildPolicy *string `locationName:"defaultChildPolicy" type:"string" enum:"ChildPolicy"` + + // Optional. The default maximum duration, specified when registering the workflow + // type, for executions of this workflow type. This default can be overridden + // when starting a workflow execution using the StartWorkflowExecution action + // or the StartChildWorkflowExecution decision. + // + // The duration is specified in seconds; an integer greater than or equal to + // 0. The value "NONE" can be used to specify unlimited duration. + DefaultExecutionStartToCloseTimeout *string `locationName:"defaultExecutionStartToCloseTimeout" type:"string"` + + // The default IAM role to use when a workflow execution invokes a AWS Lambda + // function. + DefaultLambdaRole *string `locationName:"defaultLambdaRole" min:"1" type:"string"` + + // Optional. The default task list, specified when registering the workflow + // type, for decisions tasks scheduled for workflow executions of this type. + // This default can be overridden when starting a workflow execution using the + // StartWorkflowExecution action or the StartChildWorkflowExecution decision. + DefaultTaskList *TaskList `locationName:"defaultTaskList" type:"structure"` + + // Optional. The default task priority, specified when registering the workflow + // type, for all decision tasks of this workflow type. This default can be overridden + // when starting a workflow execution using the StartWorkflowExecution action + // or the StartChildWorkflowExecution decision. + // + // Valid values are integers that range from Java's Integer.MIN_VALUE (-2147483648) + // to Integer.MAX_VALUE (2147483647). Higher numbers indicate higher priority. + // + // For more information about setting task priority, see Setting Task Priority + // (http://docs.aws.amazon.com/amazonswf/latest/developerguide/programming-priority.html) + // in the Amazon Simple Workflow Developer Guide. + DefaultTaskPriority *string `locationName:"defaultTaskPriority" type:"string"` + + // Optional. The default maximum duration, specified when registering the workflow + // type, that a decision task for executions of this workflow type might take + // before returning completion or failure. If the task does not close in the + // specified time then the task is automatically timed out and rescheduled. + // If the decider eventually reports a completion or failure, it is ignored. + // This default can be overridden when starting a workflow execution using the + // StartWorkflowExecution action or the StartChildWorkflowExecution decision. + // + // The duration is specified in seconds; an integer greater than or equal to + // 0. The value "NONE" can be used to specify unlimited duration. + DefaultTaskStartToCloseTimeout *string `locationName:"defaultTaskStartToCloseTimeout" type:"string"` +} + +// String returns the string representation +func (s WorkflowTypeConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s WorkflowTypeConfiguration) GoString() string { + return s.String() +} + +// Used to filter workflow execution query results by type. Each parameter, +// if specified, defines a rule that must be satisfied by each returned result. +type WorkflowTypeFilter struct { + _ struct{} `type:"structure"` + + // Required. Name of the workflow type. + Name *string `locationName:"name" min:"1" type:"string" required:"true"` + + // Version of the workflow type. + Version *string `locationName:"version" type:"string"` +} + +// String returns the string representation +func (s WorkflowTypeFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s WorkflowTypeFilter) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *WorkflowTypeFilter) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "WorkflowTypeFilter"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains information about a workflow type. +type WorkflowTypeInfo struct { + _ struct{} `type:"structure"` + + // The date when this type was registered. + CreationDate *time.Time `locationName:"creationDate" type:"timestamp" timestampFormat:"unix" required:"true"` + + // If the type is in deprecated state, then it is set to the date when the type + // was deprecated. + DeprecationDate *time.Time `locationName:"deprecationDate" type:"timestamp" timestampFormat:"unix"` + + // The description of the type registered through RegisterWorkflowType. + Description *string `locationName:"description" type:"string"` + + // The current status of the workflow type. + Status *string `locationName:"status" type:"string" required:"true" enum:"RegistrationStatus"` + + // The workflow type this information is about. + WorkflowType *WorkflowType `locationName:"workflowType" type:"structure" required:"true"` +} + +// String returns the string representation +func (s WorkflowTypeInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s WorkflowTypeInfo) GoString() string { + return s.String() +} + +const ( + // @enum ActivityTaskTimeoutType + ActivityTaskTimeoutTypeStartToClose = "START_TO_CLOSE" + // @enum ActivityTaskTimeoutType + ActivityTaskTimeoutTypeScheduleToStart = "SCHEDULE_TO_START" + // @enum ActivityTaskTimeoutType + ActivityTaskTimeoutTypeScheduleToClose = "SCHEDULE_TO_CLOSE" + // @enum ActivityTaskTimeoutType + ActivityTaskTimeoutTypeHeartbeat = "HEARTBEAT" +) + +const ( + // @enum CancelTimerFailedCause + CancelTimerFailedCauseTimerIdUnknown = "TIMER_ID_UNKNOWN" + // @enum CancelTimerFailedCause + CancelTimerFailedCauseOperationNotPermitted = "OPERATION_NOT_PERMITTED" +) + +const ( + // @enum CancelWorkflowExecutionFailedCause + CancelWorkflowExecutionFailedCauseUnhandledDecision = "UNHANDLED_DECISION" + // @enum CancelWorkflowExecutionFailedCause + CancelWorkflowExecutionFailedCauseOperationNotPermitted = "OPERATION_NOT_PERMITTED" +) + +const ( + // @enum ChildPolicy + ChildPolicyTerminate = "TERMINATE" + // @enum ChildPolicy + ChildPolicyRequestCancel = "REQUEST_CANCEL" + // @enum ChildPolicy + ChildPolicyAbandon = "ABANDON" +) + +const ( + // @enum CloseStatus + CloseStatusCompleted = "COMPLETED" + // @enum CloseStatus + CloseStatusFailed = "FAILED" + // @enum CloseStatus + CloseStatusCanceled = "CANCELED" + // @enum CloseStatus + CloseStatusTerminated = "TERMINATED" + // @enum CloseStatus + CloseStatusContinuedAsNew = "CONTINUED_AS_NEW" + // @enum CloseStatus + CloseStatusTimedOut = "TIMED_OUT" +) + +const ( + // @enum CompleteWorkflowExecutionFailedCause + CompleteWorkflowExecutionFailedCauseUnhandledDecision = "UNHANDLED_DECISION" + // @enum CompleteWorkflowExecutionFailedCause + CompleteWorkflowExecutionFailedCauseOperationNotPermitted = "OPERATION_NOT_PERMITTED" +) + +const ( + // @enum ContinueAsNewWorkflowExecutionFailedCause + ContinueAsNewWorkflowExecutionFailedCauseUnhandledDecision = "UNHANDLED_DECISION" + // @enum ContinueAsNewWorkflowExecutionFailedCause + ContinueAsNewWorkflowExecutionFailedCauseWorkflowTypeDeprecated = "WORKFLOW_TYPE_DEPRECATED" + // @enum ContinueAsNewWorkflowExecutionFailedCause + ContinueAsNewWorkflowExecutionFailedCauseWorkflowTypeDoesNotExist = "WORKFLOW_TYPE_DOES_NOT_EXIST" + // @enum ContinueAsNewWorkflowExecutionFailedCause + ContinueAsNewWorkflowExecutionFailedCauseDefaultExecutionStartToCloseTimeoutUndefined = "DEFAULT_EXECUTION_START_TO_CLOSE_TIMEOUT_UNDEFINED" + // @enum ContinueAsNewWorkflowExecutionFailedCause + ContinueAsNewWorkflowExecutionFailedCauseDefaultTaskStartToCloseTimeoutUndefined = "DEFAULT_TASK_START_TO_CLOSE_TIMEOUT_UNDEFINED" + // @enum ContinueAsNewWorkflowExecutionFailedCause + ContinueAsNewWorkflowExecutionFailedCauseDefaultTaskListUndefined = "DEFAULT_TASK_LIST_UNDEFINED" + // @enum ContinueAsNewWorkflowExecutionFailedCause + ContinueAsNewWorkflowExecutionFailedCauseDefaultChildPolicyUndefined = "DEFAULT_CHILD_POLICY_UNDEFINED" + // @enum ContinueAsNewWorkflowExecutionFailedCause + ContinueAsNewWorkflowExecutionFailedCauseContinueAsNewWorkflowExecutionRateExceeded = "CONTINUE_AS_NEW_WORKFLOW_EXECUTION_RATE_EXCEEDED" + // @enum ContinueAsNewWorkflowExecutionFailedCause + ContinueAsNewWorkflowExecutionFailedCauseOperationNotPermitted = "OPERATION_NOT_PERMITTED" +) + +const ( + // @enum DecisionTaskTimeoutType + DecisionTaskTimeoutTypeStartToClose = "START_TO_CLOSE" +) + +const ( + // @enum DecisionType + DecisionTypeScheduleActivityTask = "ScheduleActivityTask" + // @enum DecisionType + DecisionTypeRequestCancelActivityTask = "RequestCancelActivityTask" + // @enum DecisionType + DecisionTypeCompleteWorkflowExecution = "CompleteWorkflowExecution" + // @enum DecisionType + DecisionTypeFailWorkflowExecution = "FailWorkflowExecution" + // @enum DecisionType + DecisionTypeCancelWorkflowExecution = "CancelWorkflowExecution" + // @enum DecisionType + DecisionTypeContinueAsNewWorkflowExecution = "ContinueAsNewWorkflowExecution" + // @enum DecisionType + DecisionTypeRecordMarker = "RecordMarker" + // @enum DecisionType + DecisionTypeStartTimer = "StartTimer" + // @enum DecisionType + DecisionTypeCancelTimer = "CancelTimer" + // @enum DecisionType + DecisionTypeSignalExternalWorkflowExecution = "SignalExternalWorkflowExecution" + // @enum DecisionType + DecisionTypeRequestCancelExternalWorkflowExecution = "RequestCancelExternalWorkflowExecution" + // @enum DecisionType + DecisionTypeStartChildWorkflowExecution = "StartChildWorkflowExecution" + // @enum DecisionType + DecisionTypeScheduleLambdaFunction = "ScheduleLambdaFunction" +) + +const ( + // @enum EventType + EventTypeWorkflowExecutionStarted = "WorkflowExecutionStarted" + // @enum EventType + EventTypeWorkflowExecutionCancelRequested = "WorkflowExecutionCancelRequested" + // @enum EventType + EventTypeWorkflowExecutionCompleted = "WorkflowExecutionCompleted" + // @enum EventType + EventTypeCompleteWorkflowExecutionFailed = "CompleteWorkflowExecutionFailed" + // @enum EventType + EventTypeWorkflowExecutionFailed = "WorkflowExecutionFailed" + // @enum EventType + EventTypeFailWorkflowExecutionFailed = "FailWorkflowExecutionFailed" + // @enum EventType + EventTypeWorkflowExecutionTimedOut = "WorkflowExecutionTimedOut" + // @enum EventType + EventTypeWorkflowExecutionCanceled = "WorkflowExecutionCanceled" + // @enum EventType + EventTypeCancelWorkflowExecutionFailed = "CancelWorkflowExecutionFailed" + // @enum EventType + EventTypeWorkflowExecutionContinuedAsNew = "WorkflowExecutionContinuedAsNew" + // @enum EventType + EventTypeContinueAsNewWorkflowExecutionFailed = "ContinueAsNewWorkflowExecutionFailed" + // @enum EventType + EventTypeWorkflowExecutionTerminated = "WorkflowExecutionTerminated" + // @enum EventType + EventTypeDecisionTaskScheduled = "DecisionTaskScheduled" + // @enum EventType + EventTypeDecisionTaskStarted = "DecisionTaskStarted" + // @enum EventType + EventTypeDecisionTaskCompleted = "DecisionTaskCompleted" + // @enum EventType + EventTypeDecisionTaskTimedOut = "DecisionTaskTimedOut" + // @enum EventType + EventTypeActivityTaskScheduled = "ActivityTaskScheduled" + // @enum EventType + EventTypeScheduleActivityTaskFailed = "ScheduleActivityTaskFailed" + // @enum EventType + EventTypeActivityTaskStarted = "ActivityTaskStarted" + // @enum EventType + EventTypeActivityTaskCompleted = "ActivityTaskCompleted" + // @enum EventType + EventTypeActivityTaskFailed = "ActivityTaskFailed" + // @enum EventType + EventTypeActivityTaskTimedOut = "ActivityTaskTimedOut" + // @enum EventType + EventTypeActivityTaskCanceled = "ActivityTaskCanceled" + // @enum EventType + EventTypeActivityTaskCancelRequested = "ActivityTaskCancelRequested" + // @enum EventType + EventTypeRequestCancelActivityTaskFailed = "RequestCancelActivityTaskFailed" + // @enum EventType + EventTypeWorkflowExecutionSignaled = "WorkflowExecutionSignaled" + // @enum EventType + EventTypeMarkerRecorded = "MarkerRecorded" + // @enum EventType + EventTypeRecordMarkerFailed = "RecordMarkerFailed" + // @enum EventType + EventTypeTimerStarted = "TimerStarted" + // @enum EventType + EventTypeStartTimerFailed = "StartTimerFailed" + // @enum EventType + EventTypeTimerFired = "TimerFired" + // @enum EventType + EventTypeTimerCanceled = "TimerCanceled" + // @enum EventType + EventTypeCancelTimerFailed = "CancelTimerFailed" + // @enum EventType + EventTypeStartChildWorkflowExecutionInitiated = "StartChildWorkflowExecutionInitiated" + // @enum EventType + EventTypeStartChildWorkflowExecutionFailed = "StartChildWorkflowExecutionFailed" + // @enum EventType + EventTypeChildWorkflowExecutionStarted = "ChildWorkflowExecutionStarted" + // @enum EventType + EventTypeChildWorkflowExecutionCompleted = "ChildWorkflowExecutionCompleted" + // @enum EventType + EventTypeChildWorkflowExecutionFailed = "ChildWorkflowExecutionFailed" + // @enum EventType + EventTypeChildWorkflowExecutionTimedOut = "ChildWorkflowExecutionTimedOut" + // @enum EventType + EventTypeChildWorkflowExecutionCanceled = "ChildWorkflowExecutionCanceled" + // @enum EventType + EventTypeChildWorkflowExecutionTerminated = "ChildWorkflowExecutionTerminated" + // @enum EventType + EventTypeSignalExternalWorkflowExecutionInitiated = "SignalExternalWorkflowExecutionInitiated" + // @enum EventType + EventTypeSignalExternalWorkflowExecutionFailed = "SignalExternalWorkflowExecutionFailed" + // @enum EventType + EventTypeExternalWorkflowExecutionSignaled = "ExternalWorkflowExecutionSignaled" + // @enum EventType + EventTypeRequestCancelExternalWorkflowExecutionInitiated = "RequestCancelExternalWorkflowExecutionInitiated" + // @enum EventType + EventTypeRequestCancelExternalWorkflowExecutionFailed = "RequestCancelExternalWorkflowExecutionFailed" + // @enum EventType + EventTypeExternalWorkflowExecutionCancelRequested = "ExternalWorkflowExecutionCancelRequested" + // @enum EventType + EventTypeLambdaFunctionScheduled = "LambdaFunctionScheduled" + // @enum EventType + EventTypeLambdaFunctionStarted = "LambdaFunctionStarted" + // @enum EventType + EventTypeLambdaFunctionCompleted = "LambdaFunctionCompleted" + // @enum EventType + EventTypeLambdaFunctionFailed = "LambdaFunctionFailed" + // @enum EventType + EventTypeLambdaFunctionTimedOut = "LambdaFunctionTimedOut" + // @enum EventType + EventTypeScheduleLambdaFunctionFailed = "ScheduleLambdaFunctionFailed" + // @enum EventType + EventTypeStartLambdaFunctionFailed = "StartLambdaFunctionFailed" +) + +const ( + // @enum ExecutionStatus + ExecutionStatusOpen = "OPEN" + // @enum ExecutionStatus + ExecutionStatusClosed = "CLOSED" +) + +const ( + // @enum FailWorkflowExecutionFailedCause + FailWorkflowExecutionFailedCauseUnhandledDecision = "UNHANDLED_DECISION" + // @enum FailWorkflowExecutionFailedCause + FailWorkflowExecutionFailedCauseOperationNotPermitted = "OPERATION_NOT_PERMITTED" +) + +const ( + // @enum LambdaFunctionTimeoutType + LambdaFunctionTimeoutTypeStartToClose = "START_TO_CLOSE" +) + +const ( + // @enum RecordMarkerFailedCause + RecordMarkerFailedCauseOperationNotPermitted = "OPERATION_NOT_PERMITTED" +) + +const ( + // @enum RegistrationStatus + RegistrationStatusRegistered = "REGISTERED" + // @enum RegistrationStatus + RegistrationStatusDeprecated = "DEPRECATED" +) + +const ( + // @enum RequestCancelActivityTaskFailedCause + RequestCancelActivityTaskFailedCauseActivityIdUnknown = "ACTIVITY_ID_UNKNOWN" + // @enum RequestCancelActivityTaskFailedCause + RequestCancelActivityTaskFailedCauseOperationNotPermitted = "OPERATION_NOT_PERMITTED" +) + +const ( + // @enum RequestCancelExternalWorkflowExecutionFailedCause + RequestCancelExternalWorkflowExecutionFailedCauseUnknownExternalWorkflowExecution = "UNKNOWN_EXTERNAL_WORKFLOW_EXECUTION" + // @enum RequestCancelExternalWorkflowExecutionFailedCause + RequestCancelExternalWorkflowExecutionFailedCauseRequestCancelExternalWorkflowExecutionRateExceeded = "REQUEST_CANCEL_EXTERNAL_WORKFLOW_EXECUTION_RATE_EXCEEDED" + // @enum RequestCancelExternalWorkflowExecutionFailedCause + RequestCancelExternalWorkflowExecutionFailedCauseOperationNotPermitted = "OPERATION_NOT_PERMITTED" +) + +const ( + // @enum ScheduleActivityTaskFailedCause + ScheduleActivityTaskFailedCauseActivityTypeDeprecated = "ACTIVITY_TYPE_DEPRECATED" + // @enum ScheduleActivityTaskFailedCause + ScheduleActivityTaskFailedCauseActivityTypeDoesNotExist = "ACTIVITY_TYPE_DOES_NOT_EXIST" + // @enum ScheduleActivityTaskFailedCause + ScheduleActivityTaskFailedCauseActivityIdAlreadyInUse = "ACTIVITY_ID_ALREADY_IN_USE" + // @enum ScheduleActivityTaskFailedCause + ScheduleActivityTaskFailedCauseOpenActivitiesLimitExceeded = "OPEN_ACTIVITIES_LIMIT_EXCEEDED" + // @enum ScheduleActivityTaskFailedCause + ScheduleActivityTaskFailedCauseActivityCreationRateExceeded = "ACTIVITY_CREATION_RATE_EXCEEDED" + // @enum ScheduleActivityTaskFailedCause + ScheduleActivityTaskFailedCauseDefaultScheduleToCloseTimeoutUndefined = "DEFAULT_SCHEDULE_TO_CLOSE_TIMEOUT_UNDEFINED" + // @enum ScheduleActivityTaskFailedCause + ScheduleActivityTaskFailedCauseDefaultTaskListUndefined = "DEFAULT_TASK_LIST_UNDEFINED" + // @enum ScheduleActivityTaskFailedCause + ScheduleActivityTaskFailedCauseDefaultScheduleToStartTimeoutUndefined = "DEFAULT_SCHEDULE_TO_START_TIMEOUT_UNDEFINED" + // @enum ScheduleActivityTaskFailedCause + ScheduleActivityTaskFailedCauseDefaultStartToCloseTimeoutUndefined = "DEFAULT_START_TO_CLOSE_TIMEOUT_UNDEFINED" + // @enum ScheduleActivityTaskFailedCause + ScheduleActivityTaskFailedCauseDefaultHeartbeatTimeoutUndefined = "DEFAULT_HEARTBEAT_TIMEOUT_UNDEFINED" + // @enum ScheduleActivityTaskFailedCause + ScheduleActivityTaskFailedCauseOperationNotPermitted = "OPERATION_NOT_PERMITTED" +) + +const ( + // @enum ScheduleLambdaFunctionFailedCause + ScheduleLambdaFunctionFailedCauseIdAlreadyInUse = "ID_ALREADY_IN_USE" + // @enum ScheduleLambdaFunctionFailedCause + ScheduleLambdaFunctionFailedCauseOpenLambdaFunctionsLimitExceeded = "OPEN_LAMBDA_FUNCTIONS_LIMIT_EXCEEDED" + // @enum ScheduleLambdaFunctionFailedCause + ScheduleLambdaFunctionFailedCauseLambdaFunctionCreationRateExceeded = "LAMBDA_FUNCTION_CREATION_RATE_EXCEEDED" + // @enum ScheduleLambdaFunctionFailedCause + ScheduleLambdaFunctionFailedCauseLambdaServiceNotAvailableInRegion = "LAMBDA_SERVICE_NOT_AVAILABLE_IN_REGION" +) + +const ( + // @enum SignalExternalWorkflowExecutionFailedCause + SignalExternalWorkflowExecutionFailedCauseUnknownExternalWorkflowExecution = "UNKNOWN_EXTERNAL_WORKFLOW_EXECUTION" + // @enum SignalExternalWorkflowExecutionFailedCause + SignalExternalWorkflowExecutionFailedCauseSignalExternalWorkflowExecutionRateExceeded = "SIGNAL_EXTERNAL_WORKFLOW_EXECUTION_RATE_EXCEEDED" + // @enum SignalExternalWorkflowExecutionFailedCause + SignalExternalWorkflowExecutionFailedCauseOperationNotPermitted = "OPERATION_NOT_PERMITTED" +) + +const ( + // @enum StartChildWorkflowExecutionFailedCause + StartChildWorkflowExecutionFailedCauseWorkflowTypeDoesNotExist = "WORKFLOW_TYPE_DOES_NOT_EXIST" + // @enum StartChildWorkflowExecutionFailedCause + StartChildWorkflowExecutionFailedCauseWorkflowTypeDeprecated = "WORKFLOW_TYPE_DEPRECATED" + // @enum StartChildWorkflowExecutionFailedCause + StartChildWorkflowExecutionFailedCauseOpenChildrenLimitExceeded = "OPEN_CHILDREN_LIMIT_EXCEEDED" + // @enum StartChildWorkflowExecutionFailedCause + StartChildWorkflowExecutionFailedCauseOpenWorkflowsLimitExceeded = "OPEN_WORKFLOWS_LIMIT_EXCEEDED" + // @enum StartChildWorkflowExecutionFailedCause + StartChildWorkflowExecutionFailedCauseChildCreationRateExceeded = "CHILD_CREATION_RATE_EXCEEDED" + // @enum StartChildWorkflowExecutionFailedCause + StartChildWorkflowExecutionFailedCauseWorkflowAlreadyRunning = "WORKFLOW_ALREADY_RUNNING" + // @enum StartChildWorkflowExecutionFailedCause + StartChildWorkflowExecutionFailedCauseDefaultExecutionStartToCloseTimeoutUndefined = "DEFAULT_EXECUTION_START_TO_CLOSE_TIMEOUT_UNDEFINED" + // @enum StartChildWorkflowExecutionFailedCause + StartChildWorkflowExecutionFailedCauseDefaultTaskListUndefined = "DEFAULT_TASK_LIST_UNDEFINED" + // @enum StartChildWorkflowExecutionFailedCause + StartChildWorkflowExecutionFailedCauseDefaultTaskStartToCloseTimeoutUndefined = "DEFAULT_TASK_START_TO_CLOSE_TIMEOUT_UNDEFINED" + // @enum StartChildWorkflowExecutionFailedCause + StartChildWorkflowExecutionFailedCauseDefaultChildPolicyUndefined = "DEFAULT_CHILD_POLICY_UNDEFINED" + // @enum StartChildWorkflowExecutionFailedCause + StartChildWorkflowExecutionFailedCauseOperationNotPermitted = "OPERATION_NOT_PERMITTED" +) + +const ( + // @enum StartLambdaFunctionFailedCause + StartLambdaFunctionFailedCauseAssumeRoleFailed = "ASSUME_ROLE_FAILED" +) + +const ( + // @enum StartTimerFailedCause + StartTimerFailedCauseTimerIdAlreadyInUse = "TIMER_ID_ALREADY_IN_USE" + // @enum StartTimerFailedCause + StartTimerFailedCauseOpenTimersLimitExceeded = "OPEN_TIMERS_LIMIT_EXCEEDED" + // @enum StartTimerFailedCause + StartTimerFailedCauseTimerCreationRateExceeded = "TIMER_CREATION_RATE_EXCEEDED" + // @enum StartTimerFailedCause + StartTimerFailedCauseOperationNotPermitted = "OPERATION_NOT_PERMITTED" +) + +const ( + // @enum WorkflowExecutionCancelRequestedCause + WorkflowExecutionCancelRequestedCauseChildPolicyApplied = "CHILD_POLICY_APPLIED" +) + +const ( + // @enum WorkflowExecutionTerminatedCause + WorkflowExecutionTerminatedCauseChildPolicyApplied = "CHILD_POLICY_APPLIED" + // @enum WorkflowExecutionTerminatedCause + WorkflowExecutionTerminatedCauseEventLimitExceeded = "EVENT_LIMIT_EXCEEDED" + // @enum WorkflowExecutionTerminatedCause + WorkflowExecutionTerminatedCauseOperatorInitiated = "OPERATOR_INITIATED" +) + +const ( + // @enum WorkflowExecutionTimeoutType + WorkflowExecutionTimeoutTypeStartToClose = "START_TO_CLOSE" +) diff --git a/vendor/github.com/aws/aws-sdk-go/service/swf/examples_test.go b/vendor/github.com/aws/aws-sdk-go/service/swf/examples_test.go new file mode 100644 index 000000000..7c993fbaf --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/swf/examples_test.go @@ -0,0 +1,900 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package swf_test + +import ( + "bytes" + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/swf" +) + +var _ time.Duration +var _ bytes.Buffer + +func ExampleSWF_CountClosedWorkflowExecutions() { + svc := swf.New(session.New()) + + params := &swf.CountClosedWorkflowExecutionsInput{ + Domain: aws.String("DomainName"), // Required + CloseStatusFilter: &swf.CloseStatusFilter{ + Status: aws.String("CloseStatus"), // Required + }, + CloseTimeFilter: &swf.ExecutionTimeFilter{ + OldestDate: aws.Time(time.Now()), // Required + LatestDate: aws.Time(time.Now()), + }, + ExecutionFilter: &swf.WorkflowExecutionFilter{ + WorkflowId: aws.String("WorkflowId"), // Required + }, + StartTimeFilter: &swf.ExecutionTimeFilter{ + OldestDate: aws.Time(time.Now()), // Required + LatestDate: aws.Time(time.Now()), + }, + TagFilter: &swf.TagFilter{ + Tag: aws.String("Tag"), // Required + }, + TypeFilter: &swf.WorkflowTypeFilter{ + Name: aws.String("Name"), // Required + Version: aws.String("VersionOptional"), + }, + } + resp, err := svc.CountClosedWorkflowExecutions(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSWF_CountOpenWorkflowExecutions() { + svc := swf.New(session.New()) + + params := &swf.CountOpenWorkflowExecutionsInput{ + Domain: aws.String("DomainName"), // Required + StartTimeFilter: &swf.ExecutionTimeFilter{ // Required + OldestDate: aws.Time(time.Now()), // Required + LatestDate: aws.Time(time.Now()), + }, + ExecutionFilter: &swf.WorkflowExecutionFilter{ + WorkflowId: aws.String("WorkflowId"), // Required + }, + TagFilter: &swf.TagFilter{ + Tag: aws.String("Tag"), // Required + }, + TypeFilter: &swf.WorkflowTypeFilter{ + Name: aws.String("Name"), // Required + Version: aws.String("VersionOptional"), + }, + } + resp, err := svc.CountOpenWorkflowExecutions(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSWF_CountPendingActivityTasks() { + svc := swf.New(session.New()) + + params := &swf.CountPendingActivityTasksInput{ + Domain: aws.String("DomainName"), // Required + TaskList: &swf.TaskList{ // Required + Name: aws.String("Name"), // Required + }, + } + resp, err := svc.CountPendingActivityTasks(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSWF_CountPendingDecisionTasks() { + svc := swf.New(session.New()) + + params := &swf.CountPendingDecisionTasksInput{ + Domain: aws.String("DomainName"), // Required + TaskList: &swf.TaskList{ // Required + Name: aws.String("Name"), // Required + }, + } + resp, err := svc.CountPendingDecisionTasks(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSWF_DeprecateActivityType() { + svc := swf.New(session.New()) + + params := &swf.DeprecateActivityTypeInput{ + ActivityType: &swf.ActivityType{ // Required + Name: aws.String("Name"), // Required + Version: aws.String("Version"), // Required + }, + Domain: aws.String("DomainName"), // Required + } + resp, err := svc.DeprecateActivityType(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSWF_DeprecateDomain() { + svc := swf.New(session.New()) + + params := &swf.DeprecateDomainInput{ + Name: aws.String("DomainName"), // Required + } + resp, err := svc.DeprecateDomain(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSWF_DeprecateWorkflowType() { + svc := swf.New(session.New()) + + params := &swf.DeprecateWorkflowTypeInput{ + Domain: aws.String("DomainName"), // Required + WorkflowType: &swf.WorkflowType{ // Required + Name: aws.String("Name"), // Required + Version: aws.String("Version"), // Required + }, + } + resp, err := svc.DeprecateWorkflowType(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSWF_DescribeActivityType() { + svc := swf.New(session.New()) + + params := &swf.DescribeActivityTypeInput{ + ActivityType: &swf.ActivityType{ // Required + Name: aws.String("Name"), // Required + Version: aws.String("Version"), // Required + }, + Domain: aws.String("DomainName"), // Required + } + resp, err := svc.DescribeActivityType(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSWF_DescribeDomain() { + svc := swf.New(session.New()) + + params := &swf.DescribeDomainInput{ + Name: aws.String("DomainName"), // Required + } + resp, err := svc.DescribeDomain(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSWF_DescribeWorkflowExecution() { + svc := swf.New(session.New()) + + params := &swf.DescribeWorkflowExecutionInput{ + Domain: aws.String("DomainName"), // Required + Execution: &swf.WorkflowExecution{ // Required + RunId: aws.String("RunId"), // Required + WorkflowId: aws.String("WorkflowId"), // Required + }, + } + resp, err := svc.DescribeWorkflowExecution(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSWF_DescribeWorkflowType() { + svc := swf.New(session.New()) + + params := &swf.DescribeWorkflowTypeInput{ + Domain: aws.String("DomainName"), // Required + WorkflowType: &swf.WorkflowType{ // Required + Name: aws.String("Name"), // Required + Version: aws.String("Version"), // Required + }, + } + resp, err := svc.DescribeWorkflowType(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSWF_GetWorkflowExecutionHistory() { + svc := swf.New(session.New()) + + params := &swf.GetWorkflowExecutionHistoryInput{ + Domain: aws.String("DomainName"), // Required + Execution: &swf.WorkflowExecution{ // Required + RunId: aws.String("RunId"), // Required + WorkflowId: aws.String("WorkflowId"), // Required + }, + MaximumPageSize: aws.Int64(1), + NextPageToken: aws.String("PageToken"), + ReverseOrder: aws.Bool(true), + } + resp, err := svc.GetWorkflowExecutionHistory(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSWF_ListActivityTypes() { + svc := swf.New(session.New()) + + params := &swf.ListActivityTypesInput{ + Domain: aws.String("DomainName"), // Required + RegistrationStatus: aws.String("RegistrationStatus"), // Required + MaximumPageSize: aws.Int64(1), + Name: aws.String("Name"), + NextPageToken: aws.String("PageToken"), + ReverseOrder: aws.Bool(true), + } + resp, err := svc.ListActivityTypes(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSWF_ListClosedWorkflowExecutions() { + svc := swf.New(session.New()) + + params := &swf.ListClosedWorkflowExecutionsInput{ + Domain: aws.String("DomainName"), // Required + CloseStatusFilter: &swf.CloseStatusFilter{ + Status: aws.String("CloseStatus"), // Required + }, + CloseTimeFilter: &swf.ExecutionTimeFilter{ + OldestDate: aws.Time(time.Now()), // Required + LatestDate: aws.Time(time.Now()), + }, + ExecutionFilter: &swf.WorkflowExecutionFilter{ + WorkflowId: aws.String("WorkflowId"), // Required + }, + MaximumPageSize: aws.Int64(1), + NextPageToken: aws.String("PageToken"), + ReverseOrder: aws.Bool(true), + StartTimeFilter: &swf.ExecutionTimeFilter{ + OldestDate: aws.Time(time.Now()), // Required + LatestDate: aws.Time(time.Now()), + }, + TagFilter: &swf.TagFilter{ + Tag: aws.String("Tag"), // Required + }, + TypeFilter: &swf.WorkflowTypeFilter{ + Name: aws.String("Name"), // Required + Version: aws.String("VersionOptional"), + }, + } + resp, err := svc.ListClosedWorkflowExecutions(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSWF_ListDomains() { + svc := swf.New(session.New()) + + params := &swf.ListDomainsInput{ + RegistrationStatus: aws.String("RegistrationStatus"), // Required + MaximumPageSize: aws.Int64(1), + NextPageToken: aws.String("PageToken"), + ReverseOrder: aws.Bool(true), + } + resp, err := svc.ListDomains(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSWF_ListOpenWorkflowExecutions() { + svc := swf.New(session.New()) + + params := &swf.ListOpenWorkflowExecutionsInput{ + Domain: aws.String("DomainName"), // Required + StartTimeFilter: &swf.ExecutionTimeFilter{ // Required + OldestDate: aws.Time(time.Now()), // Required + LatestDate: aws.Time(time.Now()), + }, + ExecutionFilter: &swf.WorkflowExecutionFilter{ + WorkflowId: aws.String("WorkflowId"), // Required + }, + MaximumPageSize: aws.Int64(1), + NextPageToken: aws.String("PageToken"), + ReverseOrder: aws.Bool(true), + TagFilter: &swf.TagFilter{ + Tag: aws.String("Tag"), // Required + }, + TypeFilter: &swf.WorkflowTypeFilter{ + Name: aws.String("Name"), // Required + Version: aws.String("VersionOptional"), + }, + } + resp, err := svc.ListOpenWorkflowExecutions(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSWF_ListWorkflowTypes() { + svc := swf.New(session.New()) + + params := &swf.ListWorkflowTypesInput{ + Domain: aws.String("DomainName"), // Required + RegistrationStatus: aws.String("RegistrationStatus"), // Required + MaximumPageSize: aws.Int64(1), + Name: aws.String("Name"), + NextPageToken: aws.String("PageToken"), + ReverseOrder: aws.Bool(true), + } + resp, err := svc.ListWorkflowTypes(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSWF_PollForActivityTask() { + svc := swf.New(session.New()) + + params := &swf.PollForActivityTaskInput{ + Domain: aws.String("DomainName"), // Required + TaskList: &swf.TaskList{ // Required + Name: aws.String("Name"), // Required + }, + Identity: aws.String("Identity"), + } + resp, err := svc.PollForActivityTask(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSWF_PollForDecisionTask() { + svc := swf.New(session.New()) + + params := &swf.PollForDecisionTaskInput{ + Domain: aws.String("DomainName"), // Required + TaskList: &swf.TaskList{ // Required + Name: aws.String("Name"), // Required + }, + Identity: aws.String("Identity"), + MaximumPageSize: aws.Int64(1), + NextPageToken: aws.String("PageToken"), + ReverseOrder: aws.Bool(true), + } + resp, err := svc.PollForDecisionTask(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSWF_RecordActivityTaskHeartbeat() { + svc := swf.New(session.New()) + + params := &swf.RecordActivityTaskHeartbeatInput{ + TaskToken: aws.String("TaskToken"), // Required + Details: aws.String("LimitedData"), + } + resp, err := svc.RecordActivityTaskHeartbeat(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSWF_RegisterActivityType() { + svc := swf.New(session.New()) + + params := &swf.RegisterActivityTypeInput{ + Domain: aws.String("DomainName"), // Required + Name: aws.String("Name"), // Required + Version: aws.String("Version"), // Required + DefaultTaskHeartbeatTimeout: aws.String("DurationInSecondsOptional"), + DefaultTaskList: &swf.TaskList{ + Name: aws.String("Name"), // Required + }, + DefaultTaskPriority: aws.String("TaskPriority"), + DefaultTaskScheduleToCloseTimeout: aws.String("DurationInSecondsOptional"), + DefaultTaskScheduleToStartTimeout: aws.String("DurationInSecondsOptional"), + DefaultTaskStartToCloseTimeout: aws.String("DurationInSecondsOptional"), + Description: aws.String("Description"), + } + resp, err := svc.RegisterActivityType(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSWF_RegisterDomain() { + svc := swf.New(session.New()) + + params := &swf.RegisterDomainInput{ + Name: aws.String("DomainName"), // Required + WorkflowExecutionRetentionPeriodInDays: aws.String("DurationInDays"), // Required + Description: aws.String("Description"), + } + resp, err := svc.RegisterDomain(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSWF_RegisterWorkflowType() { + svc := swf.New(session.New()) + + params := &swf.RegisterWorkflowTypeInput{ + Domain: aws.String("DomainName"), // Required + Name: aws.String("Name"), // Required + Version: aws.String("Version"), // Required + DefaultChildPolicy: aws.String("ChildPolicy"), + DefaultExecutionStartToCloseTimeout: aws.String("DurationInSecondsOptional"), + DefaultLambdaRole: aws.String("Arn"), + DefaultTaskList: &swf.TaskList{ + Name: aws.String("Name"), // Required + }, + DefaultTaskPriority: aws.String("TaskPriority"), + DefaultTaskStartToCloseTimeout: aws.String("DurationInSecondsOptional"), + Description: aws.String("Description"), + } + resp, err := svc.RegisterWorkflowType(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSWF_RequestCancelWorkflowExecution() { + svc := swf.New(session.New()) + + params := &swf.RequestCancelWorkflowExecutionInput{ + Domain: aws.String("DomainName"), // Required + WorkflowId: aws.String("WorkflowId"), // Required + RunId: aws.String("RunIdOptional"), + } + resp, err := svc.RequestCancelWorkflowExecution(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSWF_RespondActivityTaskCanceled() { + svc := swf.New(session.New()) + + params := &swf.RespondActivityTaskCanceledInput{ + TaskToken: aws.String("TaskToken"), // Required + Details: aws.String("Data"), + } + resp, err := svc.RespondActivityTaskCanceled(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSWF_RespondActivityTaskCompleted() { + svc := swf.New(session.New()) + + params := &swf.RespondActivityTaskCompletedInput{ + TaskToken: aws.String("TaskToken"), // Required + Result: aws.String("Data"), + } + resp, err := svc.RespondActivityTaskCompleted(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSWF_RespondActivityTaskFailed() { + svc := swf.New(session.New()) + + params := &swf.RespondActivityTaskFailedInput{ + TaskToken: aws.String("TaskToken"), // Required + Details: aws.String("Data"), + Reason: aws.String("FailureReason"), + } + resp, err := svc.RespondActivityTaskFailed(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSWF_RespondDecisionTaskCompleted() { + svc := swf.New(session.New()) + + params := &swf.RespondDecisionTaskCompletedInput{ + TaskToken: aws.String("TaskToken"), // Required + Decisions: []*swf.Decision{ + { // Required + DecisionType: aws.String("DecisionType"), // Required + CancelTimerDecisionAttributes: &swf.CancelTimerDecisionAttributes{ + TimerId: aws.String("TimerId"), // Required + }, + CancelWorkflowExecutionDecisionAttributes: &swf.CancelWorkflowExecutionDecisionAttributes{ + Details: aws.String("Data"), + }, + CompleteWorkflowExecutionDecisionAttributes: &swf.CompleteWorkflowExecutionDecisionAttributes{ + Result: aws.String("Data"), + }, + ContinueAsNewWorkflowExecutionDecisionAttributes: &swf.ContinueAsNewWorkflowExecutionDecisionAttributes{ + ChildPolicy: aws.String("ChildPolicy"), + ExecutionStartToCloseTimeout: aws.String("DurationInSecondsOptional"), + Input: aws.String("Data"), + LambdaRole: aws.String("Arn"), + TagList: []*string{ + aws.String("Tag"), // Required + // More values... + }, + TaskList: &swf.TaskList{ + Name: aws.String("Name"), // Required + }, + TaskPriority: aws.String("TaskPriority"), + TaskStartToCloseTimeout: aws.String("DurationInSecondsOptional"), + WorkflowTypeVersion: aws.String("Version"), + }, + FailWorkflowExecutionDecisionAttributes: &swf.FailWorkflowExecutionDecisionAttributes{ + Details: aws.String("Data"), + Reason: aws.String("FailureReason"), + }, + RecordMarkerDecisionAttributes: &swf.RecordMarkerDecisionAttributes{ + MarkerName: aws.String("MarkerName"), // Required + Details: aws.String("Data"), + }, + RequestCancelActivityTaskDecisionAttributes: &swf.RequestCancelActivityTaskDecisionAttributes{ + ActivityId: aws.String("ActivityId"), // Required + }, + RequestCancelExternalWorkflowExecutionDecisionAttributes: &swf.RequestCancelExternalWorkflowExecutionDecisionAttributes{ + WorkflowId: aws.String("WorkflowId"), // Required + Control: aws.String("Data"), + RunId: aws.String("RunIdOptional"), + }, + ScheduleActivityTaskDecisionAttributes: &swf.ScheduleActivityTaskDecisionAttributes{ + ActivityId: aws.String("ActivityId"), // Required + ActivityType: &swf.ActivityType{ // Required + Name: aws.String("Name"), // Required + Version: aws.String("Version"), // Required + }, + Control: aws.String("Data"), + HeartbeatTimeout: aws.String("DurationInSecondsOptional"), + Input: aws.String("Data"), + ScheduleToCloseTimeout: aws.String("DurationInSecondsOptional"), + ScheduleToStartTimeout: aws.String("DurationInSecondsOptional"), + StartToCloseTimeout: aws.String("DurationInSecondsOptional"), + TaskList: &swf.TaskList{ + Name: aws.String("Name"), // Required + }, + TaskPriority: aws.String("TaskPriority"), + }, + ScheduleLambdaFunctionDecisionAttributes: &swf.ScheduleLambdaFunctionDecisionAttributes{ + Id: aws.String("FunctionId"), // Required + Name: aws.String("FunctionName"), // Required + Input: aws.String("FunctionInput"), + StartToCloseTimeout: aws.String("DurationInSecondsOptional"), + }, + SignalExternalWorkflowExecutionDecisionAttributes: &swf.SignalExternalWorkflowExecutionDecisionAttributes{ + SignalName: aws.String("SignalName"), // Required + WorkflowId: aws.String("WorkflowId"), // Required + Control: aws.String("Data"), + Input: aws.String("Data"), + RunId: aws.String("RunIdOptional"), + }, + StartChildWorkflowExecutionDecisionAttributes: &swf.StartChildWorkflowExecutionDecisionAttributes{ + WorkflowId: aws.String("WorkflowId"), // Required + WorkflowType: &swf.WorkflowType{ // Required + Name: aws.String("Name"), // Required + Version: aws.String("Version"), // Required + }, + ChildPolicy: aws.String("ChildPolicy"), + Control: aws.String("Data"), + ExecutionStartToCloseTimeout: aws.String("DurationInSecondsOptional"), + Input: aws.String("Data"), + LambdaRole: aws.String("Arn"), + TagList: []*string{ + aws.String("Tag"), // Required + // More values... + }, + TaskList: &swf.TaskList{ + Name: aws.String("Name"), // Required + }, + TaskPriority: aws.String("TaskPriority"), + TaskStartToCloseTimeout: aws.String("DurationInSecondsOptional"), + }, + StartTimerDecisionAttributes: &swf.StartTimerDecisionAttributes{ + StartToFireTimeout: aws.String("DurationInSeconds"), // Required + TimerId: aws.String("TimerId"), // Required + Control: aws.String("Data"), + }, + }, + // More values... + }, + ExecutionContext: aws.String("Data"), + } + resp, err := svc.RespondDecisionTaskCompleted(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSWF_SignalWorkflowExecution() { + svc := swf.New(session.New()) + + params := &swf.SignalWorkflowExecutionInput{ + Domain: aws.String("DomainName"), // Required + SignalName: aws.String("SignalName"), // Required + WorkflowId: aws.String("WorkflowId"), // Required + Input: aws.String("Data"), + RunId: aws.String("RunIdOptional"), + } + resp, err := svc.SignalWorkflowExecution(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSWF_StartWorkflowExecution() { + svc := swf.New(session.New()) + + params := &swf.StartWorkflowExecutionInput{ + Domain: aws.String("DomainName"), // Required + WorkflowId: aws.String("WorkflowId"), // Required + WorkflowType: &swf.WorkflowType{ // Required + Name: aws.String("Name"), // Required + Version: aws.String("Version"), // Required + }, + ChildPolicy: aws.String("ChildPolicy"), + ExecutionStartToCloseTimeout: aws.String("DurationInSecondsOptional"), + Input: aws.String("Data"), + LambdaRole: aws.String("Arn"), + TagList: []*string{ + aws.String("Tag"), // Required + // More values... + }, + TaskList: &swf.TaskList{ + Name: aws.String("Name"), // Required + }, + TaskPriority: aws.String("TaskPriority"), + TaskStartToCloseTimeout: aws.String("DurationInSecondsOptional"), + } + resp, err := svc.StartWorkflowExecution(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSWF_TerminateWorkflowExecution() { + svc := swf.New(session.New()) + + params := &swf.TerminateWorkflowExecutionInput{ + Domain: aws.String("DomainName"), // Required + WorkflowId: aws.String("WorkflowId"), // Required + ChildPolicy: aws.String("ChildPolicy"), + Details: aws.String("Data"), + Reason: aws.String("TerminateReason"), + RunId: aws.String("RunIdOptional"), + } + resp, err := svc.TerminateWorkflowExecution(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/swf/service.go b/vendor/github.com/aws/aws-sdk-go/service/swf/service.go new file mode 100644 index 000000000..9c9dd66da --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/swf/service.go @@ -0,0 +1,100 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package swf + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" +) + +// The Amazon Simple Workflow Service (Amazon SWF) makes it easy to build applications +// that use Amazon's cloud to coordinate work across distributed components. +// In Amazon SWF, a task represents a logical unit of work that is performed +// by a component of your workflow. Coordinating tasks in a workflow involves +// managing intertask dependencies, scheduling, and concurrency in accordance +// with the logical flow of the application. +// +// Amazon SWF gives you full control over implementing tasks and coordinating +// them without worrying about underlying complexities such as tracking their +// progress and maintaining their state. +// +// This documentation serves as reference only. For a broader overview of the +// Amazon SWF programming model, see the Amazon SWF Developer Guide (http://docs.aws.amazon.com/amazonswf/latest/developerguide/). +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type SWF struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "swf" + +// New creates a new instance of the SWF client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a SWF client from just a session. +// svc := swf.New(mySession) +// +// // Create a SWF client with additional configuration +// svc := swf.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *SWF { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *SWF { + svc := &SWF{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2012-01-25", + JSONVersion: "1.0", + TargetPrefix: "SimpleWorkflowService", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(jsonrpc.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a SWF operation and runs any +// custom request initialization. +func (c *SWF) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/swf/swfiface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/swf/swfiface/interface.go new file mode 100644 index 000000000..5c3d51f2d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/swf/swfiface/interface.go @@ -0,0 +1,152 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package swfiface provides an interface for the Amazon Simple Workflow Service. +package swfiface + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/swf" +) + +// SWFAPI is the interface type for swf.SWF. +type SWFAPI interface { + CountClosedWorkflowExecutionsRequest(*swf.CountClosedWorkflowExecutionsInput) (*request.Request, *swf.WorkflowExecutionCount) + + CountClosedWorkflowExecutions(*swf.CountClosedWorkflowExecutionsInput) (*swf.WorkflowExecutionCount, error) + + CountOpenWorkflowExecutionsRequest(*swf.CountOpenWorkflowExecutionsInput) (*request.Request, *swf.WorkflowExecutionCount) + + CountOpenWorkflowExecutions(*swf.CountOpenWorkflowExecutionsInput) (*swf.WorkflowExecutionCount, error) + + CountPendingActivityTasksRequest(*swf.CountPendingActivityTasksInput) (*request.Request, *swf.PendingTaskCount) + + CountPendingActivityTasks(*swf.CountPendingActivityTasksInput) (*swf.PendingTaskCount, error) + + CountPendingDecisionTasksRequest(*swf.CountPendingDecisionTasksInput) (*request.Request, *swf.PendingTaskCount) + + CountPendingDecisionTasks(*swf.CountPendingDecisionTasksInput) (*swf.PendingTaskCount, error) + + DeprecateActivityTypeRequest(*swf.DeprecateActivityTypeInput) (*request.Request, *swf.DeprecateActivityTypeOutput) + + DeprecateActivityType(*swf.DeprecateActivityTypeInput) (*swf.DeprecateActivityTypeOutput, error) + + DeprecateDomainRequest(*swf.DeprecateDomainInput) (*request.Request, *swf.DeprecateDomainOutput) + + DeprecateDomain(*swf.DeprecateDomainInput) (*swf.DeprecateDomainOutput, error) + + DeprecateWorkflowTypeRequest(*swf.DeprecateWorkflowTypeInput) (*request.Request, *swf.DeprecateWorkflowTypeOutput) + + DeprecateWorkflowType(*swf.DeprecateWorkflowTypeInput) (*swf.DeprecateWorkflowTypeOutput, error) + + DescribeActivityTypeRequest(*swf.DescribeActivityTypeInput) (*request.Request, *swf.DescribeActivityTypeOutput) + + DescribeActivityType(*swf.DescribeActivityTypeInput) (*swf.DescribeActivityTypeOutput, error) + + DescribeDomainRequest(*swf.DescribeDomainInput) (*request.Request, *swf.DescribeDomainOutput) + + DescribeDomain(*swf.DescribeDomainInput) (*swf.DescribeDomainOutput, error) + + DescribeWorkflowExecutionRequest(*swf.DescribeWorkflowExecutionInput) (*request.Request, *swf.DescribeWorkflowExecutionOutput) + + DescribeWorkflowExecution(*swf.DescribeWorkflowExecutionInput) (*swf.DescribeWorkflowExecutionOutput, error) + + DescribeWorkflowTypeRequest(*swf.DescribeWorkflowTypeInput) (*request.Request, *swf.DescribeWorkflowTypeOutput) + + DescribeWorkflowType(*swf.DescribeWorkflowTypeInput) (*swf.DescribeWorkflowTypeOutput, error) + + GetWorkflowExecutionHistoryRequest(*swf.GetWorkflowExecutionHistoryInput) (*request.Request, *swf.GetWorkflowExecutionHistoryOutput) + + GetWorkflowExecutionHistory(*swf.GetWorkflowExecutionHistoryInput) (*swf.GetWorkflowExecutionHistoryOutput, error) + + GetWorkflowExecutionHistoryPages(*swf.GetWorkflowExecutionHistoryInput, func(*swf.GetWorkflowExecutionHistoryOutput, bool) bool) error + + ListActivityTypesRequest(*swf.ListActivityTypesInput) (*request.Request, *swf.ListActivityTypesOutput) + + ListActivityTypes(*swf.ListActivityTypesInput) (*swf.ListActivityTypesOutput, error) + + ListActivityTypesPages(*swf.ListActivityTypesInput, func(*swf.ListActivityTypesOutput, bool) bool) error + + ListClosedWorkflowExecutionsRequest(*swf.ListClosedWorkflowExecutionsInput) (*request.Request, *swf.WorkflowExecutionInfos) + + ListClosedWorkflowExecutions(*swf.ListClosedWorkflowExecutionsInput) (*swf.WorkflowExecutionInfos, error) + + ListClosedWorkflowExecutionsPages(*swf.ListClosedWorkflowExecutionsInput, func(*swf.WorkflowExecutionInfos, bool) bool) error + + ListDomainsRequest(*swf.ListDomainsInput) (*request.Request, *swf.ListDomainsOutput) + + ListDomains(*swf.ListDomainsInput) (*swf.ListDomainsOutput, error) + + ListDomainsPages(*swf.ListDomainsInput, func(*swf.ListDomainsOutput, bool) bool) error + + ListOpenWorkflowExecutionsRequest(*swf.ListOpenWorkflowExecutionsInput) (*request.Request, *swf.WorkflowExecutionInfos) + + ListOpenWorkflowExecutions(*swf.ListOpenWorkflowExecutionsInput) (*swf.WorkflowExecutionInfos, error) + + ListOpenWorkflowExecutionsPages(*swf.ListOpenWorkflowExecutionsInput, func(*swf.WorkflowExecutionInfos, bool) bool) error + + ListWorkflowTypesRequest(*swf.ListWorkflowTypesInput) (*request.Request, *swf.ListWorkflowTypesOutput) + + ListWorkflowTypes(*swf.ListWorkflowTypesInput) (*swf.ListWorkflowTypesOutput, error) + + ListWorkflowTypesPages(*swf.ListWorkflowTypesInput, func(*swf.ListWorkflowTypesOutput, bool) bool) error + + PollForActivityTaskRequest(*swf.PollForActivityTaskInput) (*request.Request, *swf.PollForActivityTaskOutput) + + PollForActivityTask(*swf.PollForActivityTaskInput) (*swf.PollForActivityTaskOutput, error) + + PollForDecisionTaskRequest(*swf.PollForDecisionTaskInput) (*request.Request, *swf.PollForDecisionTaskOutput) + + PollForDecisionTask(*swf.PollForDecisionTaskInput) (*swf.PollForDecisionTaskOutput, error) + + PollForDecisionTaskPages(*swf.PollForDecisionTaskInput, func(*swf.PollForDecisionTaskOutput, bool) bool) error + + RecordActivityTaskHeartbeatRequest(*swf.RecordActivityTaskHeartbeatInput) (*request.Request, *swf.RecordActivityTaskHeartbeatOutput) + + RecordActivityTaskHeartbeat(*swf.RecordActivityTaskHeartbeatInput) (*swf.RecordActivityTaskHeartbeatOutput, error) + + RegisterActivityTypeRequest(*swf.RegisterActivityTypeInput) (*request.Request, *swf.RegisterActivityTypeOutput) + + RegisterActivityType(*swf.RegisterActivityTypeInput) (*swf.RegisterActivityTypeOutput, error) + + RegisterDomainRequest(*swf.RegisterDomainInput) (*request.Request, *swf.RegisterDomainOutput) + + RegisterDomain(*swf.RegisterDomainInput) (*swf.RegisterDomainOutput, error) + + RegisterWorkflowTypeRequest(*swf.RegisterWorkflowTypeInput) (*request.Request, *swf.RegisterWorkflowTypeOutput) + + RegisterWorkflowType(*swf.RegisterWorkflowTypeInput) (*swf.RegisterWorkflowTypeOutput, error) + + RequestCancelWorkflowExecutionRequest(*swf.RequestCancelWorkflowExecutionInput) (*request.Request, *swf.RequestCancelWorkflowExecutionOutput) + + RequestCancelWorkflowExecution(*swf.RequestCancelWorkflowExecutionInput) (*swf.RequestCancelWorkflowExecutionOutput, error) + + RespondActivityTaskCanceledRequest(*swf.RespondActivityTaskCanceledInput) (*request.Request, *swf.RespondActivityTaskCanceledOutput) + + RespondActivityTaskCanceled(*swf.RespondActivityTaskCanceledInput) (*swf.RespondActivityTaskCanceledOutput, error) + + RespondActivityTaskCompletedRequest(*swf.RespondActivityTaskCompletedInput) (*request.Request, *swf.RespondActivityTaskCompletedOutput) + + RespondActivityTaskCompleted(*swf.RespondActivityTaskCompletedInput) (*swf.RespondActivityTaskCompletedOutput, error) + + RespondActivityTaskFailedRequest(*swf.RespondActivityTaskFailedInput) (*request.Request, *swf.RespondActivityTaskFailedOutput) + + RespondActivityTaskFailed(*swf.RespondActivityTaskFailedInput) (*swf.RespondActivityTaskFailedOutput, error) + + RespondDecisionTaskCompletedRequest(*swf.RespondDecisionTaskCompletedInput) (*request.Request, *swf.RespondDecisionTaskCompletedOutput) + + RespondDecisionTaskCompleted(*swf.RespondDecisionTaskCompletedInput) (*swf.RespondDecisionTaskCompletedOutput, error) + + SignalWorkflowExecutionRequest(*swf.SignalWorkflowExecutionInput) (*request.Request, *swf.SignalWorkflowExecutionOutput) + + SignalWorkflowExecution(*swf.SignalWorkflowExecutionInput) (*swf.SignalWorkflowExecutionOutput, error) + + StartWorkflowExecutionRequest(*swf.StartWorkflowExecutionInput) (*request.Request, *swf.StartWorkflowExecutionOutput) + + StartWorkflowExecution(*swf.StartWorkflowExecutionInput) (*swf.StartWorkflowExecutionOutput, error) + + TerminateWorkflowExecutionRequest(*swf.TerminateWorkflowExecutionInput) (*request.Request, *swf.TerminateWorkflowExecutionOutput) + + TerminateWorkflowExecution(*swf.TerminateWorkflowExecutionInput) (*swf.TerminateWorkflowExecutionOutput, error) +} + +var _ SWFAPI = (*swf.SWF)(nil) diff --git a/vendor/github.com/aws/aws-sdk-go/service/waf/api.go b/vendor/github.com/aws/aws-sdk-go/service/waf/api.go new file mode 100644 index 000000000..3033366cb --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/waf/api.go @@ -0,0 +1,6579 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package waf provides a client for AWS WAF. +package waf + +import ( + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" +) + +const opCreateByteMatchSet = "CreateByteMatchSet" + +// CreateByteMatchSetRequest generates a "aws/request.Request" representing the +// client's request for the CreateByteMatchSet operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateByteMatchSet method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateByteMatchSetRequest method. +// req, resp := client.CreateByteMatchSetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *WAF) CreateByteMatchSetRequest(input *CreateByteMatchSetInput) (req *request.Request, output *CreateByteMatchSetOutput) { + op := &request.Operation{ + Name: opCreateByteMatchSet, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateByteMatchSetInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateByteMatchSetOutput{} + req.Data = output + return +} + +// Creates a ByteMatchSet. You then use UpdateByteMatchSet to identify the part +// of a web request that you want AWS WAF to inspect, such as the values of +// the User-Agent header or the query string. For example, you can create a +// ByteMatchSet that matches any requests with User-Agent headers that contain +// the string BadBot. You can then configure AWS WAF to reject those requests. +// +// To create and configure a ByteMatchSet, perform the following steps: +// +// Use GetChangeToken to get the change token that you provide in the ChangeToken +// parameter of a CreateByteMatchSet request. Submit a CreateByteMatchSet request. +// Use GetChangeToken to get the change token that you provide in the ChangeToken +// parameter of an UpdateByteMatchSet request. Submit an UpdateByteMatchSet +// request to specify the part of the request that you want AWS WAF to inspect +// (for example, the header or the URI) and the value that you want AWS WAF +// to watch for. For more information about how to use the AWS WAF API to allow +// or block HTTP requests, see the AWS WAF Developer Guide (http://docs.aws.amazon.com/waf/latest/developerguide/). +func (c *WAF) CreateByteMatchSet(input *CreateByteMatchSetInput) (*CreateByteMatchSetOutput, error) { + req, out := c.CreateByteMatchSetRequest(input) + err := req.Send() + return out, err +} + +const opCreateIPSet = "CreateIPSet" + +// CreateIPSetRequest generates a "aws/request.Request" representing the +// client's request for the CreateIPSet operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateIPSet method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateIPSetRequest method. +// req, resp := client.CreateIPSetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *WAF) CreateIPSetRequest(input *CreateIPSetInput) (req *request.Request, output *CreateIPSetOutput) { + op := &request.Operation{ + Name: opCreateIPSet, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateIPSetInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateIPSetOutput{} + req.Data = output + return +} + +// Creates an IPSet, which you use to specify which web requests you want to +// allow or block based on the IP addresses that the requests originate from. +// For example, if you're receiving a lot of requests from one or more individual +// IP addresses or one or more ranges of IP addresses and you want to block +// the requests, you can create an IPSet that contains those IP addresses and +// then configure AWS WAF to block the requests. +// +// To create and configure an IPSet, perform the following steps: +// +// Use GetChangeToken to get the change token that you provide in the ChangeToken +// parameter of a CreateIPSet request. Submit a CreateIPSet request. Use GetChangeToken +// to get the change token that you provide in the ChangeToken parameter of +// an UpdateIPSet request. Submit an UpdateIPSet request to specify the IP addresses +// that you want AWS WAF to watch for. For more information about how to use +// the AWS WAF API to allow or block HTTP requests, see the AWS WAF Developer +// Guide (http://docs.aws.amazon.com/waf/latest/developerguide/). +func (c *WAF) CreateIPSet(input *CreateIPSetInput) (*CreateIPSetOutput, error) { + req, out := c.CreateIPSetRequest(input) + err := req.Send() + return out, err +} + +const opCreateRule = "CreateRule" + +// CreateRuleRequest generates a "aws/request.Request" representing the +// client's request for the CreateRule operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateRule method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateRuleRequest method. +// req, resp := client.CreateRuleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *WAF) CreateRuleRequest(input *CreateRuleInput) (req *request.Request, output *CreateRuleOutput) { + op := &request.Operation{ + Name: opCreateRule, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateRuleInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateRuleOutput{} + req.Data = output + return +} + +// Creates a Rule, which contains the IPSet objects, ByteMatchSet objects, and +// other predicates that identify the requests that you want to block. If you +// add more than one predicate to a Rule, a request must match all of the specifications +// to be allowed or blocked. For example, suppose you add the following to a +// Rule: +// +// An IPSet that matches the IP address 192.0.2.44/32 A ByteMatchSet that +// matches BadBot in the User-Agent header You then add the Rule to a WebACL +// and specify that you want to blocks requests that satisfy the Rule. For a +// request to be blocked, it must come from the IP address 192.0.2.44 and the +// User-Agent header in the request must contain the value BadBot. +// +// To create and configure a Rule, perform the following steps: +// +// Create and update the predicates that you want to include in the Rule. +// For more information, see CreateByteMatchSet, CreateIPSet, and CreateSqlInjectionMatchSet. +// Use GetChangeToken to get the change token that you provide in the ChangeToken +// parameter of a CreateRule request. Submit a CreateRule request. Use GetChangeToken +// to get the change token that you provide in the ChangeToken parameter of +// an UpdateRule request. Submit an UpdateRule request to specify the predicates +// that you want to include in the Rule. Create and update a WebACL that contains +// the Rule. For more information, see CreateWebACL. For more information about +// how to use the AWS WAF API to allow or block HTTP requests, see the AWS WAF +// Developer Guide (http://docs.aws.amazon.com/waf/latest/developerguide/). +func (c *WAF) CreateRule(input *CreateRuleInput) (*CreateRuleOutput, error) { + req, out := c.CreateRuleRequest(input) + err := req.Send() + return out, err +} + +const opCreateSizeConstraintSet = "CreateSizeConstraintSet" + +// CreateSizeConstraintSetRequest generates a "aws/request.Request" representing the +// client's request for the CreateSizeConstraintSet operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateSizeConstraintSet method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateSizeConstraintSetRequest method. +// req, resp := client.CreateSizeConstraintSetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *WAF) CreateSizeConstraintSetRequest(input *CreateSizeConstraintSetInput) (req *request.Request, output *CreateSizeConstraintSetOutput) { + op := &request.Operation{ + Name: opCreateSizeConstraintSet, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateSizeConstraintSetInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateSizeConstraintSetOutput{} + req.Data = output + return +} + +// Creates a SizeConstraintSet. You then use UpdateSizeConstraintSet to identify +// the part of a web request that you want AWS WAF to check for length, such +// as the length of the User-Agent header or the length of the query string. +// For example, you can create a SizeConstraintSet that matches any requests +// that have a query string that is longer than 100 bytes. You can then configure +// AWS WAF to reject those requests. +// +// To create and configure a SizeConstraintSet, perform the following steps: +// +// Use GetChangeToken to get the change token that you provide in the ChangeToken +// parameter of a CreateSizeConstraintSet request. Submit a CreateSizeConstraintSet +// request. Use GetChangeToken to get the change token that you provide in the +// ChangeToken parameter of an UpdateSizeConstraintSet request. Submit an UpdateSizeConstraintSet +// request to specify the part of the request that you want AWS WAF to inspect +// (for example, the header or the URI) and the value that you want AWS WAF +// to watch for. For more information about how to use the AWS WAF API to allow +// or block HTTP requests, see the AWS WAF Developer Guide (http://docs.aws.amazon.com/waf/latest/developerguide/). +func (c *WAF) CreateSizeConstraintSet(input *CreateSizeConstraintSetInput) (*CreateSizeConstraintSetOutput, error) { + req, out := c.CreateSizeConstraintSetRequest(input) + err := req.Send() + return out, err +} + +const opCreateSqlInjectionMatchSet = "CreateSqlInjectionMatchSet" + +// CreateSqlInjectionMatchSetRequest generates a "aws/request.Request" representing the +// client's request for the CreateSqlInjectionMatchSet operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateSqlInjectionMatchSet method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateSqlInjectionMatchSetRequest method. +// req, resp := client.CreateSqlInjectionMatchSetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *WAF) CreateSqlInjectionMatchSetRequest(input *CreateSqlInjectionMatchSetInput) (req *request.Request, output *CreateSqlInjectionMatchSetOutput) { + op := &request.Operation{ + Name: opCreateSqlInjectionMatchSet, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateSqlInjectionMatchSetInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateSqlInjectionMatchSetOutput{} + req.Data = output + return +} + +// Creates a SqlInjectionMatchSet, which you use to allow, block, or count requests +// that contain snippets of SQL code in a specified part of web requests. AWS +// WAF searches for character sequences that are likely to be malicious strings. +// +// To create and configure a SqlInjectionMatchSet, perform the following steps: +// +// Use GetChangeToken to get the change token that you provide in the ChangeToken +// parameter of a CreateSqlInjectionMatchSet request. Submit a CreateSqlInjectionMatchSet +// request. Use GetChangeToken to get the change token that you provide in the +// ChangeToken parameter of an UpdateSqlInjectionMatchSet request. Submit an +// UpdateSqlInjectionMatchSet request to specify the parts of web requests in +// which you want to allow, block, or count malicious SQL code. For more information +// about how to use the AWS WAF API to allow or block HTTP requests, see the +// AWS WAF Developer Guide (http://docs.aws.amazon.com/waf/latest/developerguide/). +func (c *WAF) CreateSqlInjectionMatchSet(input *CreateSqlInjectionMatchSetInput) (*CreateSqlInjectionMatchSetOutput, error) { + req, out := c.CreateSqlInjectionMatchSetRequest(input) + err := req.Send() + return out, err +} + +const opCreateWebACL = "CreateWebACL" + +// CreateWebACLRequest generates a "aws/request.Request" representing the +// client's request for the CreateWebACL operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateWebACL method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateWebACLRequest method. +// req, resp := client.CreateWebACLRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *WAF) CreateWebACLRequest(input *CreateWebACLInput) (req *request.Request, output *CreateWebACLOutput) { + op := &request.Operation{ + Name: opCreateWebACL, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateWebACLInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateWebACLOutput{} + req.Data = output + return +} + +// Creates a WebACL, which contains the Rules that identify the CloudFront web +// requests that you want to allow, block, or count. AWS WAF evaluates Rules +// in order based on the value of Priority for each Rule. +// +// You also specify a default action, either ALLOW or BLOCK. If a web request +// doesn't match any of the Rules in a WebACL, AWS WAF responds to the request +// with the default action. +// +// To create and configure a WebACL, perform the following steps: +// +// Create and update the ByteMatchSet objects and other predicates that you +// want to include in Rules. For more information, see CreateByteMatchSet, UpdateByteMatchSet, +// CreateIPSet, UpdateIPSet, CreateSqlInjectionMatchSet, and UpdateSqlInjectionMatchSet. +// Create and update the Rules that you want to include in the WebACL. For more +// information, see CreateRule and UpdateRule. Use GetChangeToken to get the +// change token that you provide in the ChangeToken parameter of a CreateWebACL +// request. Submit a CreateWebACL request. Use GetChangeToken to get the change +// token that you provide in the ChangeToken parameter of an UpdateWebACL request. +// Submit an UpdateWebACL request to specify the Rules that you want to include +// in the WebACL, to specify the default action, and to associate the WebACL +// with a CloudFront distribution. For more information about how to use the +// AWS WAF API, see the AWS WAF Developer Guide (http://docs.aws.amazon.com/waf/latest/developerguide/). +func (c *WAF) CreateWebACL(input *CreateWebACLInput) (*CreateWebACLOutput, error) { + req, out := c.CreateWebACLRequest(input) + err := req.Send() + return out, err +} + +const opCreateXssMatchSet = "CreateXssMatchSet" + +// CreateXssMatchSetRequest generates a "aws/request.Request" representing the +// client's request for the CreateXssMatchSet operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateXssMatchSet method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateXssMatchSetRequest method. +// req, resp := client.CreateXssMatchSetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *WAF) CreateXssMatchSetRequest(input *CreateXssMatchSetInput) (req *request.Request, output *CreateXssMatchSetOutput) { + op := &request.Operation{ + Name: opCreateXssMatchSet, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateXssMatchSetInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateXssMatchSetOutput{} + req.Data = output + return +} + +// Creates an XssMatchSet, which you use to allow, block, or count requests +// that contain cross-site scripting attacks in the specified part of web requests. +// AWS WAF searches for character sequences that are likely to be malicious +// strings. +// +// To create and configure an XssMatchSet, perform the following steps: +// +// Use GetChangeToken to get the change token that you provide in the ChangeToken +// parameter of a CreateXssMatchSet request. Submit a CreateXssMatchSet request. +// Use GetChangeToken to get the change token that you provide in the ChangeToken +// parameter of an UpdateXssMatchSet request. Submit an UpdateXssMatchSet request +// to specify the parts of web requests in which you want to allow, block, or +// count cross-site scripting attacks. For more information about how to use +// the AWS WAF API to allow or block HTTP requests, see the AWS WAF Developer +// Guide (http://docs.aws.amazon.com/waf/latest/developerguide/). +func (c *WAF) CreateXssMatchSet(input *CreateXssMatchSetInput) (*CreateXssMatchSetOutput, error) { + req, out := c.CreateXssMatchSetRequest(input) + err := req.Send() + return out, err +} + +const opDeleteByteMatchSet = "DeleteByteMatchSet" + +// DeleteByteMatchSetRequest generates a "aws/request.Request" representing the +// client's request for the DeleteByteMatchSet operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteByteMatchSet method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteByteMatchSetRequest method. +// req, resp := client.DeleteByteMatchSetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *WAF) DeleteByteMatchSetRequest(input *DeleteByteMatchSetInput) (req *request.Request, output *DeleteByteMatchSetOutput) { + op := &request.Operation{ + Name: opDeleteByteMatchSet, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteByteMatchSetInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteByteMatchSetOutput{} + req.Data = output + return +} + +// Permanently deletes a ByteMatchSet. You can't delete a ByteMatchSet if it's +// still used in any Rules or if it still includes any ByteMatchTuple objects +// (any filters). +// +// If you just want to remove a ByteMatchSet from a Rule, use UpdateRule. +// +// To permanently delete a ByteMatchSet, perform the following steps: +// +// Update the ByteMatchSet to remove filters, if any. For more information, +// see UpdateByteMatchSet. Use GetChangeToken to get the change token that you +// provide in the ChangeToken parameter of a DeleteByteMatchSet request. Submit +// a DeleteByteMatchSet request. +func (c *WAF) DeleteByteMatchSet(input *DeleteByteMatchSetInput) (*DeleteByteMatchSetOutput, error) { + req, out := c.DeleteByteMatchSetRequest(input) + err := req.Send() + return out, err +} + +const opDeleteIPSet = "DeleteIPSet" + +// DeleteIPSetRequest generates a "aws/request.Request" representing the +// client's request for the DeleteIPSet operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteIPSet method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteIPSetRequest method. +// req, resp := client.DeleteIPSetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *WAF) DeleteIPSetRequest(input *DeleteIPSetInput) (req *request.Request, output *DeleteIPSetOutput) { + op := &request.Operation{ + Name: opDeleteIPSet, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteIPSetInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteIPSetOutput{} + req.Data = output + return +} + +// Permanently deletes an IPSet. You can't delete an IPSet if it's still used +// in any Rules or if it still includes any IP addresses. +// +// If you just want to remove an IPSet from a Rule, use UpdateRule. +// +// To permanently delete an IPSet from AWS WAF, perform the following steps: +// +// Update the IPSet to remove IP address ranges, if any. For more information, +// see UpdateIPSet. Use GetChangeToken to get the change token that you provide +// in the ChangeToken parameter of a DeleteIPSet request. Submit a DeleteIPSet +// request. +func (c *WAF) DeleteIPSet(input *DeleteIPSetInput) (*DeleteIPSetOutput, error) { + req, out := c.DeleteIPSetRequest(input) + err := req.Send() + return out, err +} + +const opDeleteRule = "DeleteRule" + +// DeleteRuleRequest generates a "aws/request.Request" representing the +// client's request for the DeleteRule operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteRule method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteRuleRequest method. +// req, resp := client.DeleteRuleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *WAF) DeleteRuleRequest(input *DeleteRuleInput) (req *request.Request, output *DeleteRuleOutput) { + op := &request.Operation{ + Name: opDeleteRule, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteRuleInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteRuleOutput{} + req.Data = output + return +} + +// Permanently deletes a Rule. You can't delete a Rule if it's still used in +// any WebACL objects or if it still includes any predicates, such as ByteMatchSet +// objects. +// +// If you just want to remove a Rule from a WebACL, use UpdateWebACL. +// +// To permanently delete a Rule from AWS WAF, perform the following steps: +// +// Update the Rule to remove predicates, if any. For more information, see +// UpdateRule. Use GetChangeToken to get the change token that you provide in +// the ChangeToken parameter of a DeleteRule request. Submit a DeleteRule request. +func (c *WAF) DeleteRule(input *DeleteRuleInput) (*DeleteRuleOutput, error) { + req, out := c.DeleteRuleRequest(input) + err := req.Send() + return out, err +} + +const opDeleteSizeConstraintSet = "DeleteSizeConstraintSet" + +// DeleteSizeConstraintSetRequest generates a "aws/request.Request" representing the +// client's request for the DeleteSizeConstraintSet operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteSizeConstraintSet method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteSizeConstraintSetRequest method. +// req, resp := client.DeleteSizeConstraintSetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *WAF) DeleteSizeConstraintSetRequest(input *DeleteSizeConstraintSetInput) (req *request.Request, output *DeleteSizeConstraintSetOutput) { + op := &request.Operation{ + Name: opDeleteSizeConstraintSet, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteSizeConstraintSetInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteSizeConstraintSetOutput{} + req.Data = output + return +} + +// Permanently deletes a SizeConstraintSet. You can't delete a SizeConstraintSet +// if it's still used in any Rules or if it still includes any SizeConstraint +// objects (any filters). +// +// If you just want to remove a SizeConstraintSet from a Rule, use UpdateRule. +// +// To permanently delete a SizeConstraintSet, perform the following steps: +// +// Update the SizeConstraintSet to remove filters, if any. For more information, +// see UpdateSizeConstraintSet. Use GetChangeToken to get the change token that +// you provide in the ChangeToken parameter of a DeleteSizeConstraintSet request. +// Submit a DeleteSizeConstraintSet request. +func (c *WAF) DeleteSizeConstraintSet(input *DeleteSizeConstraintSetInput) (*DeleteSizeConstraintSetOutput, error) { + req, out := c.DeleteSizeConstraintSetRequest(input) + err := req.Send() + return out, err +} + +const opDeleteSqlInjectionMatchSet = "DeleteSqlInjectionMatchSet" + +// DeleteSqlInjectionMatchSetRequest generates a "aws/request.Request" representing the +// client's request for the DeleteSqlInjectionMatchSet operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteSqlInjectionMatchSet method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteSqlInjectionMatchSetRequest method. +// req, resp := client.DeleteSqlInjectionMatchSetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *WAF) DeleteSqlInjectionMatchSetRequest(input *DeleteSqlInjectionMatchSetInput) (req *request.Request, output *DeleteSqlInjectionMatchSetOutput) { + op := &request.Operation{ + Name: opDeleteSqlInjectionMatchSet, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteSqlInjectionMatchSetInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteSqlInjectionMatchSetOutput{} + req.Data = output + return +} + +// Permanently deletes a SqlInjectionMatchSet. You can't delete a SqlInjectionMatchSet +// if it's still used in any Rules or if it still contains any SqlInjectionMatchTuple +// objects. +// +// If you just want to remove a SqlInjectionMatchSet from a Rule, use UpdateRule. +// +// To permanently delete a SqlInjectionMatchSet from AWS WAF, perform the following +// steps: +// +// Update the SqlInjectionMatchSet to remove filters, if any. For more information, +// see UpdateSqlInjectionMatchSet. Use GetChangeToken to get the change token +// that you provide in the ChangeToken parameter of a DeleteSqlInjectionMatchSet +// request. Submit a DeleteSqlInjectionMatchSet request. +func (c *WAF) DeleteSqlInjectionMatchSet(input *DeleteSqlInjectionMatchSetInput) (*DeleteSqlInjectionMatchSetOutput, error) { + req, out := c.DeleteSqlInjectionMatchSetRequest(input) + err := req.Send() + return out, err +} + +const opDeleteWebACL = "DeleteWebACL" + +// DeleteWebACLRequest generates a "aws/request.Request" representing the +// client's request for the DeleteWebACL operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteWebACL method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteWebACLRequest method. +// req, resp := client.DeleteWebACLRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *WAF) DeleteWebACLRequest(input *DeleteWebACLInput) (req *request.Request, output *DeleteWebACLOutput) { + op := &request.Operation{ + Name: opDeleteWebACL, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteWebACLInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteWebACLOutput{} + req.Data = output + return +} + +// Permanently deletes a WebACL. You can't delete a WebACL if it still contains +// any Rules. +// +// To delete a WebACL, perform the following steps: +// +// Update the WebACL to remove Rules, if any. For more information, see UpdateWebACL. +// Use GetChangeToken to get the change token that you provide in the ChangeToken +// parameter of a DeleteWebACL request. Submit a DeleteWebACL request. +func (c *WAF) DeleteWebACL(input *DeleteWebACLInput) (*DeleteWebACLOutput, error) { + req, out := c.DeleteWebACLRequest(input) + err := req.Send() + return out, err +} + +const opDeleteXssMatchSet = "DeleteXssMatchSet" + +// DeleteXssMatchSetRequest generates a "aws/request.Request" representing the +// client's request for the DeleteXssMatchSet operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteXssMatchSet method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteXssMatchSetRequest method. +// req, resp := client.DeleteXssMatchSetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *WAF) DeleteXssMatchSetRequest(input *DeleteXssMatchSetInput) (req *request.Request, output *DeleteXssMatchSetOutput) { + op := &request.Operation{ + Name: opDeleteXssMatchSet, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteXssMatchSetInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteXssMatchSetOutput{} + req.Data = output + return +} + +// Permanently deletes an XssMatchSet. You can't delete an XssMatchSet if it's +// still used in any Rules or if it still contains any XssMatchTuple objects. +// +// If you just want to remove an XssMatchSet from a Rule, use UpdateRule. +// +// To permanently delete an XssMatchSet from AWS WAF, perform the following +// steps: +// +// Update the XssMatchSet to remove filters, if any. For more information, +// see UpdateXssMatchSet. Use GetChangeToken to get the change token that you +// provide in the ChangeToken parameter of a DeleteXssMatchSet request. Submit +// a DeleteXssMatchSet request. +func (c *WAF) DeleteXssMatchSet(input *DeleteXssMatchSetInput) (*DeleteXssMatchSetOutput, error) { + req, out := c.DeleteXssMatchSetRequest(input) + err := req.Send() + return out, err +} + +const opGetByteMatchSet = "GetByteMatchSet" + +// GetByteMatchSetRequest generates a "aws/request.Request" representing the +// client's request for the GetByteMatchSet operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetByteMatchSet method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetByteMatchSetRequest method. +// req, resp := client.GetByteMatchSetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *WAF) GetByteMatchSetRequest(input *GetByteMatchSetInput) (req *request.Request, output *GetByteMatchSetOutput) { + op := &request.Operation{ + Name: opGetByteMatchSet, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetByteMatchSetInput{} + } + + req = c.newRequest(op, input, output) + output = &GetByteMatchSetOutput{} + req.Data = output + return +} + +// Returns the ByteMatchSet specified by ByteMatchSetId. +func (c *WAF) GetByteMatchSet(input *GetByteMatchSetInput) (*GetByteMatchSetOutput, error) { + req, out := c.GetByteMatchSetRequest(input) + err := req.Send() + return out, err +} + +const opGetChangeToken = "GetChangeToken" + +// GetChangeTokenRequest generates a "aws/request.Request" representing the +// client's request for the GetChangeToken operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetChangeToken method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetChangeTokenRequest method. +// req, resp := client.GetChangeTokenRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *WAF) GetChangeTokenRequest(input *GetChangeTokenInput) (req *request.Request, output *GetChangeTokenOutput) { + op := &request.Operation{ + Name: opGetChangeToken, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetChangeTokenInput{} + } + + req = c.newRequest(op, input, output) + output = &GetChangeTokenOutput{} + req.Data = output + return +} + +// When you want to create, update, or delete AWS WAF objects, get a change +// token and include the change token in the create, update, or delete request. +// Change tokens ensure that your application doesn't submit conflicting requests +// to AWS WAF. +// +// Each create, update, or delete request must use a unique change token. If +// your application submits a GetChangeToken request and then submits a second +// GetChangeToken request before submitting a create, update, or delete request, +// the second GetChangeToken request returns the same value as the first GetChangeToken +// request. +// +// When you use a change token in a create, update, or delete request, the +// status of the change token changes to PENDING, which indicates that AWS WAF +// is propagating the change to all AWS WAF servers. Use GetChangeTokenStatus +// to determine the status of your change token. +func (c *WAF) GetChangeToken(input *GetChangeTokenInput) (*GetChangeTokenOutput, error) { + req, out := c.GetChangeTokenRequest(input) + err := req.Send() + return out, err +} + +const opGetChangeTokenStatus = "GetChangeTokenStatus" + +// GetChangeTokenStatusRequest generates a "aws/request.Request" representing the +// client's request for the GetChangeTokenStatus operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetChangeTokenStatus method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetChangeTokenStatusRequest method. +// req, resp := client.GetChangeTokenStatusRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *WAF) GetChangeTokenStatusRequest(input *GetChangeTokenStatusInput) (req *request.Request, output *GetChangeTokenStatusOutput) { + op := &request.Operation{ + Name: opGetChangeTokenStatus, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetChangeTokenStatusInput{} + } + + req = c.newRequest(op, input, output) + output = &GetChangeTokenStatusOutput{} + req.Data = output + return +} + +// Returns the status of a ChangeToken that you got by calling GetChangeToken. +// ChangeTokenStatus is one of the following values: +// +// PROVISIONED: You requested the change token by calling GetChangeToken, +// but you haven't used it yet in a call to create, update, or delete an AWS +// WAF object. PENDING: AWS WAF is propagating the create, update, or delete +// request to all AWS WAF servers. IN_SYNC: Propagation is complete. +func (c *WAF) GetChangeTokenStatus(input *GetChangeTokenStatusInput) (*GetChangeTokenStatusOutput, error) { + req, out := c.GetChangeTokenStatusRequest(input) + err := req.Send() + return out, err +} + +const opGetIPSet = "GetIPSet" + +// GetIPSetRequest generates a "aws/request.Request" representing the +// client's request for the GetIPSet operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetIPSet method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetIPSetRequest method. +// req, resp := client.GetIPSetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *WAF) GetIPSetRequest(input *GetIPSetInput) (req *request.Request, output *GetIPSetOutput) { + op := &request.Operation{ + Name: opGetIPSet, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetIPSetInput{} + } + + req = c.newRequest(op, input, output) + output = &GetIPSetOutput{} + req.Data = output + return +} + +// Returns the IPSet that is specified by IPSetId. +func (c *WAF) GetIPSet(input *GetIPSetInput) (*GetIPSetOutput, error) { + req, out := c.GetIPSetRequest(input) + err := req.Send() + return out, err +} + +const opGetRule = "GetRule" + +// GetRuleRequest generates a "aws/request.Request" representing the +// client's request for the GetRule operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetRule method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetRuleRequest method. +// req, resp := client.GetRuleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *WAF) GetRuleRequest(input *GetRuleInput) (req *request.Request, output *GetRuleOutput) { + op := &request.Operation{ + Name: opGetRule, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetRuleInput{} + } + + req = c.newRequest(op, input, output) + output = &GetRuleOutput{} + req.Data = output + return +} + +// Returns the Rule that is specified by the RuleId that you included in the +// GetRule request. +func (c *WAF) GetRule(input *GetRuleInput) (*GetRuleOutput, error) { + req, out := c.GetRuleRequest(input) + err := req.Send() + return out, err +} + +const opGetSampledRequests = "GetSampledRequests" + +// GetSampledRequestsRequest generates a "aws/request.Request" representing the +// client's request for the GetSampledRequests operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetSampledRequests method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetSampledRequestsRequest method. +// req, resp := client.GetSampledRequestsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *WAF) GetSampledRequestsRequest(input *GetSampledRequestsInput) (req *request.Request, output *GetSampledRequestsOutput) { + op := &request.Operation{ + Name: opGetSampledRequests, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetSampledRequestsInput{} + } + + req = c.newRequest(op, input, output) + output = &GetSampledRequestsOutput{} + req.Data = output + return +} + +// Gets detailed information about a specified number of requests--a sample--that +// AWS WAF randomly selects from among the first 5,000 requests that your AWS +// resource received during a time range that you choose. You can specify a +// sample size of up to 100 requests, and you can specify any time range in +// the previous three hours. +// +// GetSampledRequests returns a time range, which is usually the time range +// that you specified. However, if your resource (such as a CloudFront distribution) +// received 5,000 requests before the specified time range elapsed, GetSampledRequests +// returns an updated time range. This new time range indicates the actual period +// during which AWS WAF selected the requests in the sample. +func (c *WAF) GetSampledRequests(input *GetSampledRequestsInput) (*GetSampledRequestsOutput, error) { + req, out := c.GetSampledRequestsRequest(input) + err := req.Send() + return out, err +} + +const opGetSizeConstraintSet = "GetSizeConstraintSet" + +// GetSizeConstraintSetRequest generates a "aws/request.Request" representing the +// client's request for the GetSizeConstraintSet operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetSizeConstraintSet method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetSizeConstraintSetRequest method. +// req, resp := client.GetSizeConstraintSetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *WAF) GetSizeConstraintSetRequest(input *GetSizeConstraintSetInput) (req *request.Request, output *GetSizeConstraintSetOutput) { + op := &request.Operation{ + Name: opGetSizeConstraintSet, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetSizeConstraintSetInput{} + } + + req = c.newRequest(op, input, output) + output = &GetSizeConstraintSetOutput{} + req.Data = output + return +} + +// Returns the SizeConstraintSet specified by SizeConstraintSetId. +func (c *WAF) GetSizeConstraintSet(input *GetSizeConstraintSetInput) (*GetSizeConstraintSetOutput, error) { + req, out := c.GetSizeConstraintSetRequest(input) + err := req.Send() + return out, err +} + +const opGetSqlInjectionMatchSet = "GetSqlInjectionMatchSet" + +// GetSqlInjectionMatchSetRequest generates a "aws/request.Request" representing the +// client's request for the GetSqlInjectionMatchSet operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetSqlInjectionMatchSet method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetSqlInjectionMatchSetRequest method. +// req, resp := client.GetSqlInjectionMatchSetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *WAF) GetSqlInjectionMatchSetRequest(input *GetSqlInjectionMatchSetInput) (req *request.Request, output *GetSqlInjectionMatchSetOutput) { + op := &request.Operation{ + Name: opGetSqlInjectionMatchSet, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetSqlInjectionMatchSetInput{} + } + + req = c.newRequest(op, input, output) + output = &GetSqlInjectionMatchSetOutput{} + req.Data = output + return +} + +// Returns the SqlInjectionMatchSet that is specified by SqlInjectionMatchSetId. +func (c *WAF) GetSqlInjectionMatchSet(input *GetSqlInjectionMatchSetInput) (*GetSqlInjectionMatchSetOutput, error) { + req, out := c.GetSqlInjectionMatchSetRequest(input) + err := req.Send() + return out, err +} + +const opGetWebACL = "GetWebACL" + +// GetWebACLRequest generates a "aws/request.Request" representing the +// client's request for the GetWebACL operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetWebACL method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetWebACLRequest method. +// req, resp := client.GetWebACLRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *WAF) GetWebACLRequest(input *GetWebACLInput) (req *request.Request, output *GetWebACLOutput) { + op := &request.Operation{ + Name: opGetWebACL, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetWebACLInput{} + } + + req = c.newRequest(op, input, output) + output = &GetWebACLOutput{} + req.Data = output + return +} + +// Returns the WebACL that is specified by WebACLId. +func (c *WAF) GetWebACL(input *GetWebACLInput) (*GetWebACLOutput, error) { + req, out := c.GetWebACLRequest(input) + err := req.Send() + return out, err +} + +const opGetXssMatchSet = "GetXssMatchSet" + +// GetXssMatchSetRequest generates a "aws/request.Request" representing the +// client's request for the GetXssMatchSet operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetXssMatchSet method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetXssMatchSetRequest method. +// req, resp := client.GetXssMatchSetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *WAF) GetXssMatchSetRequest(input *GetXssMatchSetInput) (req *request.Request, output *GetXssMatchSetOutput) { + op := &request.Operation{ + Name: opGetXssMatchSet, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetXssMatchSetInput{} + } + + req = c.newRequest(op, input, output) + output = &GetXssMatchSetOutput{} + req.Data = output + return +} + +// Returns the XssMatchSet that is specified by XssMatchSetId. +func (c *WAF) GetXssMatchSet(input *GetXssMatchSetInput) (*GetXssMatchSetOutput, error) { + req, out := c.GetXssMatchSetRequest(input) + err := req.Send() + return out, err +} + +const opListByteMatchSets = "ListByteMatchSets" + +// ListByteMatchSetsRequest generates a "aws/request.Request" representing the +// client's request for the ListByteMatchSets operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListByteMatchSets method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListByteMatchSetsRequest method. +// req, resp := client.ListByteMatchSetsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *WAF) ListByteMatchSetsRequest(input *ListByteMatchSetsInput) (req *request.Request, output *ListByteMatchSetsOutput) { + op := &request.Operation{ + Name: opListByteMatchSets, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListByteMatchSetsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListByteMatchSetsOutput{} + req.Data = output + return +} + +// Returns an array of ByteMatchSetSummary objects. +func (c *WAF) ListByteMatchSets(input *ListByteMatchSetsInput) (*ListByteMatchSetsOutput, error) { + req, out := c.ListByteMatchSetsRequest(input) + err := req.Send() + return out, err +} + +const opListIPSets = "ListIPSets" + +// ListIPSetsRequest generates a "aws/request.Request" representing the +// client's request for the ListIPSets operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListIPSets method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListIPSetsRequest method. +// req, resp := client.ListIPSetsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *WAF) ListIPSetsRequest(input *ListIPSetsInput) (req *request.Request, output *ListIPSetsOutput) { + op := &request.Operation{ + Name: opListIPSets, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListIPSetsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListIPSetsOutput{} + req.Data = output + return +} + +// Returns an array of IPSetSummary objects in the response. +func (c *WAF) ListIPSets(input *ListIPSetsInput) (*ListIPSetsOutput, error) { + req, out := c.ListIPSetsRequest(input) + err := req.Send() + return out, err +} + +const opListRules = "ListRules" + +// ListRulesRequest generates a "aws/request.Request" representing the +// client's request for the ListRules operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListRules method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListRulesRequest method. +// req, resp := client.ListRulesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *WAF) ListRulesRequest(input *ListRulesInput) (req *request.Request, output *ListRulesOutput) { + op := &request.Operation{ + Name: opListRules, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListRulesInput{} + } + + req = c.newRequest(op, input, output) + output = &ListRulesOutput{} + req.Data = output + return +} + +// Returns an array of RuleSummary objects. +func (c *WAF) ListRules(input *ListRulesInput) (*ListRulesOutput, error) { + req, out := c.ListRulesRequest(input) + err := req.Send() + return out, err +} + +const opListSizeConstraintSets = "ListSizeConstraintSets" + +// ListSizeConstraintSetsRequest generates a "aws/request.Request" representing the +// client's request for the ListSizeConstraintSets operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListSizeConstraintSets method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListSizeConstraintSetsRequest method. +// req, resp := client.ListSizeConstraintSetsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *WAF) ListSizeConstraintSetsRequest(input *ListSizeConstraintSetsInput) (req *request.Request, output *ListSizeConstraintSetsOutput) { + op := &request.Operation{ + Name: opListSizeConstraintSets, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListSizeConstraintSetsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListSizeConstraintSetsOutput{} + req.Data = output + return +} + +// Returns an array of SizeConstraintSetSummary objects. +func (c *WAF) ListSizeConstraintSets(input *ListSizeConstraintSetsInput) (*ListSizeConstraintSetsOutput, error) { + req, out := c.ListSizeConstraintSetsRequest(input) + err := req.Send() + return out, err +} + +const opListSqlInjectionMatchSets = "ListSqlInjectionMatchSets" + +// ListSqlInjectionMatchSetsRequest generates a "aws/request.Request" representing the +// client's request for the ListSqlInjectionMatchSets operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListSqlInjectionMatchSets method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListSqlInjectionMatchSetsRequest method. +// req, resp := client.ListSqlInjectionMatchSetsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *WAF) ListSqlInjectionMatchSetsRequest(input *ListSqlInjectionMatchSetsInput) (req *request.Request, output *ListSqlInjectionMatchSetsOutput) { + op := &request.Operation{ + Name: opListSqlInjectionMatchSets, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListSqlInjectionMatchSetsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListSqlInjectionMatchSetsOutput{} + req.Data = output + return +} + +// Returns an array of SqlInjectionMatchSet objects. +func (c *WAF) ListSqlInjectionMatchSets(input *ListSqlInjectionMatchSetsInput) (*ListSqlInjectionMatchSetsOutput, error) { + req, out := c.ListSqlInjectionMatchSetsRequest(input) + err := req.Send() + return out, err +} + +const opListWebACLs = "ListWebACLs" + +// ListWebACLsRequest generates a "aws/request.Request" representing the +// client's request for the ListWebACLs operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListWebACLs method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListWebACLsRequest method. +// req, resp := client.ListWebACLsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *WAF) ListWebACLsRequest(input *ListWebACLsInput) (req *request.Request, output *ListWebACLsOutput) { + op := &request.Operation{ + Name: opListWebACLs, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListWebACLsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListWebACLsOutput{} + req.Data = output + return +} + +// Returns an array of WebACLSummary objects in the response. +func (c *WAF) ListWebACLs(input *ListWebACLsInput) (*ListWebACLsOutput, error) { + req, out := c.ListWebACLsRequest(input) + err := req.Send() + return out, err +} + +const opListXssMatchSets = "ListXssMatchSets" + +// ListXssMatchSetsRequest generates a "aws/request.Request" representing the +// client's request for the ListXssMatchSets operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListXssMatchSets method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListXssMatchSetsRequest method. +// req, resp := client.ListXssMatchSetsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *WAF) ListXssMatchSetsRequest(input *ListXssMatchSetsInput) (req *request.Request, output *ListXssMatchSetsOutput) { + op := &request.Operation{ + Name: opListXssMatchSets, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListXssMatchSetsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListXssMatchSetsOutput{} + req.Data = output + return +} + +// Returns an array of XssMatchSet objects. +func (c *WAF) ListXssMatchSets(input *ListXssMatchSetsInput) (*ListXssMatchSetsOutput, error) { + req, out := c.ListXssMatchSetsRequest(input) + err := req.Send() + return out, err +} + +const opUpdateByteMatchSet = "UpdateByteMatchSet" + +// UpdateByteMatchSetRequest generates a "aws/request.Request" representing the +// client's request for the UpdateByteMatchSet operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateByteMatchSet method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateByteMatchSetRequest method. +// req, resp := client.UpdateByteMatchSetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *WAF) UpdateByteMatchSetRequest(input *UpdateByteMatchSetInput) (req *request.Request, output *UpdateByteMatchSetOutput) { + op := &request.Operation{ + Name: opUpdateByteMatchSet, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateByteMatchSetInput{} + } + + req = c.newRequest(op, input, output) + output = &UpdateByteMatchSetOutput{} + req.Data = output + return +} + +// Inserts or deletes ByteMatchTuple objects (filters) in a ByteMatchSet. For +// each ByteMatchTuple object, you specify the following values: +// +// Whether to insert or delete the object from the array. If you want to change +// a ByteMatchSetUpdate object, you delete the existing object and add a new +// one. The part of a web request that you want AWS WAF to inspect, such as +// a query string or the value of the User-Agent header. The bytes (typically +// a string that corresponds with ASCII characters) that you want AWS WAF to +// look for. For more information, including how you specify the values for +// the AWS WAF API and the AWS CLI or SDKs, see TargetString in the ByteMatchTuple +// data type. Where to look, such as at the beginning or the end of a query +// string. Whether to perform any conversions on the request, such as converting +// it to lowercase, before inspecting it for the specified string. For example, +// you can add a ByteMatchSetUpdate object that matches web requests in which +// User-Agent headers contain the string BadBot. You can then configure AWS +// WAF to block those requests. +// +// To create and configure a ByteMatchSet, perform the following steps: +// +// Create a ByteMatchSet. For more information, see CreateByteMatchSet. Use +// GetChangeToken to get the change token that you provide in the ChangeToken +// parameter of an UpdateByteMatchSet request. Submit an UpdateByteMatchSet +// request to specify the part of the request that you want AWS WAF to inspect +// (for example, the header or the URI) and the value that you want AWS WAF +// to watch for. For more information about how to use the AWS WAF API to allow +// or block HTTP requests, see the AWS WAF Developer Guide (http://docs.aws.amazon.com/waf/latest/developerguide/). +func (c *WAF) UpdateByteMatchSet(input *UpdateByteMatchSetInput) (*UpdateByteMatchSetOutput, error) { + req, out := c.UpdateByteMatchSetRequest(input) + err := req.Send() + return out, err +} + +const opUpdateIPSet = "UpdateIPSet" + +// UpdateIPSetRequest generates a "aws/request.Request" representing the +// client's request for the UpdateIPSet operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateIPSet method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateIPSetRequest method. +// req, resp := client.UpdateIPSetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *WAF) UpdateIPSetRequest(input *UpdateIPSetInput) (req *request.Request, output *UpdateIPSetOutput) { + op := &request.Operation{ + Name: opUpdateIPSet, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateIPSetInput{} + } + + req = c.newRequest(op, input, output) + output = &UpdateIPSetOutput{} + req.Data = output + return +} + +// Inserts or deletes IPSetDescriptor objects in an IPSet. For each IPSetDescriptor +// object, you specify the following values: +// +// Whether to insert or delete the object from the array. If you want to change +// an IPSetDescriptor object, you delete the existing object and add a new one. +// The IP address version, IPv4. The IP address in CIDR notation, for example, +// 192.0.2.0/24 (for the range of IP addresses from 192.0.2.0 to 192.0.2.255) +// or 192.0.2.44/32 (for the individual IP address 192.0.2.44). AWS WAF supports +// /8, /16, /24, and /32 IP address ranges. For more information about CIDR +// notation, see the Wikipedia entry Classless Inter-Domain Routing (https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing). +// +// You use an IPSet to specify which web requests you want to allow or block +// based on the IP addresses that the requests originated from. For example, +// if you're receiving a lot of requests from one or a small number of IP addresses +// and you want to block the requests, you can create an IPSet that specifies +// those IP addresses, and then configure AWS WAF to block the requests. +// +// To create and configure an IPSet, perform the following steps: +// +// Submit a CreateIPSet request. Use GetChangeToken to get the change token +// that you provide in the ChangeToken parameter of an UpdateIPSet request. +// Submit an UpdateIPSet request to specify the IP addresses that you want AWS +// WAF to watch for. When you update an IPSet, you specify the IP addresses +// that you want to add and/or the IP addresses that you want to delete. If +// you want to change an IP address, you delete the existing IP address and +// add the new one. +// +// For more information about how to use the AWS WAF API to allow or block +// HTTP requests, see the AWS WAF Developer Guide (http://docs.aws.amazon.com/waf/latest/developerguide/). +func (c *WAF) UpdateIPSet(input *UpdateIPSetInput) (*UpdateIPSetOutput, error) { + req, out := c.UpdateIPSetRequest(input) + err := req.Send() + return out, err +} + +const opUpdateRule = "UpdateRule" + +// UpdateRuleRequest generates a "aws/request.Request" representing the +// client's request for the UpdateRule operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateRule method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateRuleRequest method. +// req, resp := client.UpdateRuleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *WAF) UpdateRuleRequest(input *UpdateRuleInput) (req *request.Request, output *UpdateRuleOutput) { + op := &request.Operation{ + Name: opUpdateRule, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateRuleInput{} + } + + req = c.newRequest(op, input, output) + output = &UpdateRuleOutput{} + req.Data = output + return +} + +// Inserts or deletes Predicate objects in a Rule. Each Predicate object identifies +// a predicate, such as a ByteMatchSet or an IPSet, that specifies the web requests +// that you want to allow, block, or count. If you add more than one predicate +// to a Rule, a request must match all of the specifications to be allowed, +// blocked, or counted. For example, suppose you add the following to a Rule: +// +// A ByteMatchSet that matches the value BadBot in the User-Agent header An +// IPSet that matches the IP address 192.0.2.44 You then add the Rule to a +// WebACL and specify that you want to block requests that satisfy the Rule. +// For a request to be blocked, the User-Agent header in the request must contain +// the value BadBot and the request must originate from the IP address 192.0.2.44. +// +// To create and configure a Rule, perform the following steps: +// +// Create and update the predicates that you want to include in the Rule. +// Create the Rule. See CreateRule. Use GetChangeToken to get the change token +// that you provide in the ChangeToken parameter of an UpdateRule request. Submit +// an UpdateRule request to add predicates to the Rule. Create and update a +// WebACL that contains the Rule. See CreateWebACL. If you want to replace +// one ByteMatchSet or IPSet with another, you delete the existing one and add +// the new one. +// +// For more information about how to use the AWS WAF API to allow or block +// HTTP requests, see the AWS WAF Developer Guide (http://docs.aws.amazon.com/waf/latest/developerguide/). +func (c *WAF) UpdateRule(input *UpdateRuleInput) (*UpdateRuleOutput, error) { + req, out := c.UpdateRuleRequest(input) + err := req.Send() + return out, err +} + +const opUpdateSizeConstraintSet = "UpdateSizeConstraintSet" + +// UpdateSizeConstraintSetRequest generates a "aws/request.Request" representing the +// client's request for the UpdateSizeConstraintSet operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateSizeConstraintSet method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateSizeConstraintSetRequest method. +// req, resp := client.UpdateSizeConstraintSetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *WAF) UpdateSizeConstraintSetRequest(input *UpdateSizeConstraintSetInput) (req *request.Request, output *UpdateSizeConstraintSetOutput) { + op := &request.Operation{ + Name: opUpdateSizeConstraintSet, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateSizeConstraintSetInput{} + } + + req = c.newRequest(op, input, output) + output = &UpdateSizeConstraintSetOutput{} + req.Data = output + return +} + +// Inserts or deletes SizeConstraint objects (filters) in a SizeConstraintSet. +// For each SizeConstraint object, you specify the following values: +// +// Whether to insert or delete the object from the array. If you want to change +// a SizeConstraintSetUpdate object, you delete the existing object and add +// a new one. The part of a web request that you want AWS WAF to evaluate, such +// as the length of a query string or the length of the User-Agent header. Whether +// to perform any transformations on the request, such as converting it to lowercase, +// before checking its length. Note that transformations of the request body +// are not supported because the AWS resource forwards only the first 8192 bytes +// of your request to AWS WAF. A ComparisonOperator used for evaluating the +// selected part of the request against the specified Size, such as equals, +// greater than, less than, and so on. The length, in bytes, that you want AWS +// WAF to watch for in selected part of the request. The length is computed +// after applying the transformation. For example, you can add a SizeConstraintSetUpdate +// object that matches web requests in which the length of the User-Agent header +// is greater than 100 bytes. You can then configure AWS WAF to block those +// requests. +// +// To create and configure a SizeConstraintSet, perform the following steps: +// +// Create a SizeConstraintSet. For more information, see CreateSizeConstraintSet. +// Use GetChangeToken to get the change token that you provide in the ChangeToken +// parameter of an UpdateSizeConstraintSet request. Submit an UpdateSizeConstraintSet +// request to specify the part of the request that you want AWS WAF to inspect +// (for example, the header or the URI) and the value that you want AWS WAF +// to watch for. For more information about how to use the AWS WAF API to allow +// or block HTTP requests, see the AWS WAF Developer Guide (http://docs.aws.amazon.com/waf/latest/developerguide/). +func (c *WAF) UpdateSizeConstraintSet(input *UpdateSizeConstraintSetInput) (*UpdateSizeConstraintSetOutput, error) { + req, out := c.UpdateSizeConstraintSetRequest(input) + err := req.Send() + return out, err +} + +const opUpdateSqlInjectionMatchSet = "UpdateSqlInjectionMatchSet" + +// UpdateSqlInjectionMatchSetRequest generates a "aws/request.Request" representing the +// client's request for the UpdateSqlInjectionMatchSet operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateSqlInjectionMatchSet method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateSqlInjectionMatchSetRequest method. +// req, resp := client.UpdateSqlInjectionMatchSetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *WAF) UpdateSqlInjectionMatchSetRequest(input *UpdateSqlInjectionMatchSetInput) (req *request.Request, output *UpdateSqlInjectionMatchSetOutput) { + op := &request.Operation{ + Name: opUpdateSqlInjectionMatchSet, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateSqlInjectionMatchSetInput{} + } + + req = c.newRequest(op, input, output) + output = &UpdateSqlInjectionMatchSetOutput{} + req.Data = output + return +} + +// Inserts or deletes SqlInjectionMatchTuple objects (filters) in a SqlInjectionMatchSet. +// For each SqlInjectionMatchTuple object, you specify the following values: +// +// Action: Whether to insert the object into or delete the object from the +// array. To change a SqlInjectionMatchTuple, you delete the existing object +// and add a new one. FieldToMatch: The part of web requests that you want AWS +// WAF to inspect and, if you want AWS WAF to inspect a header, the name of +// the header. TextTransformation: Which text transformation, if any, to perform +// on the web request before inspecting the request for snippets of malicious +// SQL code. You use SqlInjectionMatchSet objects to specify which CloudFront +// requests you want to allow, block, or count. For example, if you're receiving +// requests that contain snippets of SQL code in the query string and you want +// to block the requests, you can create a SqlInjectionMatchSet with the applicable +// settings, and then configure AWS WAF to block the requests. +// +// To create and configure a SqlInjectionMatchSet, perform the following steps: +// +// Submit a CreateSqlInjectionMatchSet request. Use GetChangeToken to get +// the change token that you provide in the ChangeToken parameter of an UpdateIPSet +// request. Submit an UpdateSqlInjectionMatchSet request to specify the parts +// of web requests that you want AWS WAF to inspect for snippets of SQL code. +// For more information about how to use the AWS WAF API to allow or block +// HTTP requests, see the AWS WAF Developer Guide (http://docs.aws.amazon.com/waf/latest/developerguide/). +func (c *WAF) UpdateSqlInjectionMatchSet(input *UpdateSqlInjectionMatchSetInput) (*UpdateSqlInjectionMatchSetOutput, error) { + req, out := c.UpdateSqlInjectionMatchSetRequest(input) + err := req.Send() + return out, err +} + +const opUpdateWebACL = "UpdateWebACL" + +// UpdateWebACLRequest generates a "aws/request.Request" representing the +// client's request for the UpdateWebACL operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateWebACL method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateWebACLRequest method. +// req, resp := client.UpdateWebACLRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *WAF) UpdateWebACLRequest(input *UpdateWebACLInput) (req *request.Request, output *UpdateWebACLOutput) { + op := &request.Operation{ + Name: opUpdateWebACL, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateWebACLInput{} + } + + req = c.newRequest(op, input, output) + output = &UpdateWebACLOutput{} + req.Data = output + return +} + +// Inserts or deletes ActivatedRule objects in a WebACL. Each Rule identifies +// web requests that you want to allow, block, or count. When you update a WebACL, +// you specify the following values: +// +// A default action for the WebACL, either ALLOW or BLOCK. AWS WAF performs +// the default action if a request doesn't match the criteria in any of the +// Rules in a WebACL. The Rules that you want to add and/or delete. If you want +// to replace one Rule with another, you delete the existing Rule and add the +// new one. For each Rule, whether you want AWS WAF to allow requests, block +// requests, or count requests that match the conditions in the Rule. The order +// in which you want AWS WAF to evaluate the Rules in a WebACL. If you add more +// than one Rule to a WebACL, AWS WAF evaluates each request against the Rules +// in order based on the value of Priority. (The Rule that has the lowest value +// for Priority is evaluated first.) When a web request matches all of the predicates +// (such as ByteMatchSets and IPSets) in a Rule, AWS WAF immediately takes the +// corresponding action, allow or block, and doesn't evaluate the request against +// the remaining Rules in the WebACL, if any. The CloudFront distribution that +// you want to associate with the WebACL. To create and configure a WebACL, +// perform the following steps: +// +// Create and update the predicates that you want to include in Rules. For +// more information, see CreateByteMatchSet, UpdateByteMatchSet, CreateIPSet, +// UpdateIPSet, CreateSqlInjectionMatchSet, and UpdateSqlInjectionMatchSet. +// Create and update the Rules that you want to include in the WebACL. For more +// information, see CreateRule and UpdateRule. Create a WebACL. See CreateWebACL. +// Use GetChangeToken to get the change token that you provide in the ChangeToken +// parameter of an UpdateWebACL request. Submit an UpdateWebACL request to specify +// the Rules that you want to include in the WebACL, to specify the default +// action, and to associate the WebACL with a CloudFront distribution. For +// more information about how to use the AWS WAF API to allow or block HTTP +// requests, see the AWS WAF Developer Guide (http://docs.aws.amazon.com/waf/latest/developerguide/). +func (c *WAF) UpdateWebACL(input *UpdateWebACLInput) (*UpdateWebACLOutput, error) { + req, out := c.UpdateWebACLRequest(input) + err := req.Send() + return out, err +} + +const opUpdateXssMatchSet = "UpdateXssMatchSet" + +// UpdateXssMatchSetRequest generates a "aws/request.Request" representing the +// client's request for the UpdateXssMatchSet operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateXssMatchSet method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateXssMatchSetRequest method. +// req, resp := client.UpdateXssMatchSetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *WAF) UpdateXssMatchSetRequest(input *UpdateXssMatchSetInput) (req *request.Request, output *UpdateXssMatchSetOutput) { + op := &request.Operation{ + Name: opUpdateXssMatchSet, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateXssMatchSetInput{} + } + + req = c.newRequest(op, input, output) + output = &UpdateXssMatchSetOutput{} + req.Data = output + return +} + +// Inserts or deletes XssMatchTuple objects (filters) in an XssMatchSet. For +// each XssMatchTuple object, you specify the following values: +// +// Action: Whether to insert the object into or delete the object from the +// array. To change a XssMatchTuple, you delete the existing object and add +// a new one. FieldToMatch: The part of web requests that you want AWS WAF to +// inspect and, if you want AWS WAF to inspect a header, the name of the header. +// TextTransformation: Which text transformation, if any, to perform on the +// web request before inspecting the request for cross-site scripting attacks. +// You use XssMatchSet objects to specify which CloudFront requests you want +// to allow, block, or count. For example, if you're receiving requests that +// contain cross-site scripting attacks in the request body and you want to +// block the requests, you can create an XssMatchSet with the applicable settings, +// and then configure AWS WAF to block the requests. +// +// To create and configure an XssMatchSet, perform the following steps: +// +// Submit a CreateXssMatchSet request. Use GetChangeToken to get the change +// token that you provide in the ChangeToken parameter of an UpdateIPSet request. +// Submit an UpdateXssMatchSet request to specify the parts of web requests +// that you want AWS WAF to inspect for cross-site scripting attacks. For more +// information about how to use the AWS WAF API to allow or block HTTP requests, +// see the AWS WAF Developer Guide (http://docs.aws.amazon.com/waf/latest/developerguide/). +func (c *WAF) UpdateXssMatchSet(input *UpdateXssMatchSetInput) (*UpdateXssMatchSetOutput, error) { + req, out := c.UpdateXssMatchSetRequest(input) + err := req.Send() + return out, err +} + +// The ActivatedRule object in an UpdateWebACL request specifies a Rule that +// you want to insert or delete, the priority of the Rule in the WebACL, and +// the action that you want AWS WAF to take when a web request matches the Rule +// (ALLOW, BLOCK, or COUNT). +// +// To specify whether to insert or delete a Rule, use the Action parameter +// in the WebACLUpdate data type. +type ActivatedRule struct { + _ struct{} `type:"structure"` + + // Specifies the action that CloudFront or AWS WAF takes when a web request + // matches the conditions in the Rule. Valid values for Action include the following: + // + // ALLOW: CloudFront responds with the requested object. BLOCK: CloudFront + // responds with an HTTP 403 (Forbidden) status code. COUNT: AWS WAF increments + // a counter of requests that match the conditions in the rule and then continues + // to inspect the web request based on the remaining rules in the web ACL. + Action *WafAction `type:"structure" required:"true"` + + // Specifies the order in which the Rules in a WebACL are evaluated. Rules with + // a lower value for Priority are evaluated before Rules with a higher value. + // The value must be a unique integer. If you add multiple Rules to a WebACL, + // the values don't need to be consecutive. + Priority *int64 `type:"integer" required:"true"` + + // The RuleId for a Rule. You use RuleId to get more information about a Rule + // (see GetRule), update a Rule (see UpdateRule), insert a Rule into a WebACL + // or delete a one from a WebACL (see UpdateWebACL), or delete a Rule from AWS + // WAF (see DeleteRule). + // + // RuleId is returned by CreateRule and by ListRules. + RuleId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s ActivatedRule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ActivatedRule) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ActivatedRule) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ActivatedRule"} + if s.Action == nil { + invalidParams.Add(request.NewErrParamRequired("Action")) + } + if s.Priority == nil { + invalidParams.Add(request.NewErrParamRequired("Priority")) + } + if s.RuleId == nil { + invalidParams.Add(request.NewErrParamRequired("RuleId")) + } + if s.RuleId != nil && len(*s.RuleId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RuleId", 1)) + } + if s.Action != nil { + if err := s.Action.Validate(); err != nil { + invalidParams.AddNested("Action", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// In a GetByteMatchSet request, ByteMatchSet is a complex type that contains +// the ByteMatchSetId and Name of a ByteMatchSet, and the values that you specified +// when you updated the ByteMatchSet. +// +// A complex type that contains ByteMatchTuple objects, which specify the parts +// of web requests that you want AWS WAF to inspect and the values that you +// want AWS WAF to search for. If a ByteMatchSet contains more than one ByteMatchTuple +// object, a request needs to match the settings in only one ByteMatchTuple +// to be considered a match. +type ByteMatchSet struct { + _ struct{} `type:"structure"` + + // The ByteMatchSetId for a ByteMatchSet. You use ByteMatchSetId to get information + // about a ByteMatchSet (see GetByteMatchSet), update a ByteMatchSet (see UpdateByteMatchSet), + // insert a ByteMatchSet into a Rule or delete one from a Rule (see UpdateRule), + // and delete a ByteMatchSet from AWS WAF (see DeleteByteMatchSet). + // + // ByteMatchSetId is returned by CreateByteMatchSet and by ListByteMatchSets. + ByteMatchSetId *string `min:"1" type:"string" required:"true"` + + // Specifies the bytes (typically a string that corresponds with ASCII characters) + // that you want AWS WAF to search for in web requests, the location in requests + // that you want AWS WAF to search, and other settings. + ByteMatchTuples []*ByteMatchTuple `type:"list" required:"true"` + + // A friendly name or description of the ByteMatchSet. You can't change Name + // after you create a ByteMatchSet. + Name *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ByteMatchSet) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ByteMatchSet) GoString() string { + return s.String() +} + +// Returned by ListByteMatchSets. Each ByteMatchSetSummary object includes the +// Name and ByteMatchSetId for one ByteMatchSet. +type ByteMatchSetSummary struct { + _ struct{} `type:"structure"` + + // The ByteMatchSetId for a ByteMatchSet. You use ByteMatchSetId to get information + // about a ByteMatchSet, update a ByteMatchSet, remove a ByteMatchSet from a + // Rule, and delete a ByteMatchSet from AWS WAF. + // + // ByteMatchSetId is returned by CreateByteMatchSet and by ListByteMatchSets. + ByteMatchSetId *string `min:"1" type:"string" required:"true"` + + // A friendly name or description of the ByteMatchSet. You can't change Name + // after you create a ByteMatchSet. + Name *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s ByteMatchSetSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ByteMatchSetSummary) GoString() string { + return s.String() +} + +// In an UpdateByteMatchSet request, ByteMatchSetUpdate specifies whether to +// insert or delete a ByteMatchTuple and includes the settings for the ByteMatchTuple. +type ByteMatchSetUpdate struct { + _ struct{} `type:"structure"` + + // Specifies whether to insert or delete a ByteMatchTuple. + Action *string `type:"string" required:"true" enum:"ChangeAction"` + + // Information about the part of a web request that you want AWS WAF to inspect + // and the value that you want AWS WAF to search for. If you specify DELETE + // for the value of Action, the ByteMatchTuple values must exactly match the + // values in the ByteMatchTuple that you want to delete from the ByteMatchSet. + ByteMatchTuple *ByteMatchTuple `type:"structure" required:"true"` +} + +// String returns the string representation +func (s ByteMatchSetUpdate) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ByteMatchSetUpdate) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ByteMatchSetUpdate) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ByteMatchSetUpdate"} + if s.Action == nil { + invalidParams.Add(request.NewErrParamRequired("Action")) + } + if s.ByteMatchTuple == nil { + invalidParams.Add(request.NewErrParamRequired("ByteMatchTuple")) + } + if s.ByteMatchTuple != nil { + if err := s.ByteMatchTuple.Validate(); err != nil { + invalidParams.AddNested("ByteMatchTuple", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The bytes (typically a string that corresponds with ASCII characters) that +// you want AWS WAF to search for in web requests, the location in requests +// that you want AWS WAF to search, and other settings. +type ByteMatchTuple struct { + _ struct{} `type:"structure"` + + // The part of a web request that you want AWS WAF to search, such as a specified + // header or a query string. For more information, see FieldToMatch. + FieldToMatch *FieldToMatch `type:"structure" required:"true"` + + // Within the portion of a web request that you want to search (for example, + // in the query string, if any), specify where you want AWS WAF to search. Valid + // values include the following: + // + // CONTAINS + // + // The specified part of the web request must include the value of TargetString, + // but the location doesn't matter. + // + // CONTAINS_WORD + // + // The specified part of the web request must include the value of TargetString, + // and TargetString must contain only alphanumeric characters or underscore + // (A-Z, a-z, 0-9, or _). In addition, TargetString must be a word, which means + // one of the following: + // + // TargetString exactly matches the value of the specified part of the web + // request, such as the value of a header. TargetString is at the beginning + // of the specified part of the web request and is followed by a character other + // than an alphanumeric character or underscore (_), for example, BadBot;. TargetString + // is at the end of the specified part of the web request and is preceded by + // a character other than an alphanumeric character or underscore (_), for example, + // ;BadBot. TargetString is in the middle of the specified part of the web request + // and is preceded and followed by characters other than alphanumeric characters + // or underscore (_), for example, -BadBot;. EXACTLY + // + // The value of the specified part of the web request must exactly match the + // value of TargetString. + // + // STARTS_WITH + // + // The value of TargetString must appear at the beginning of the specified + // part of the web request. + // + // ENDS_WITH + // + // The value of TargetString must appear at the end of the specified part of + // the web request. + PositionalConstraint *string `type:"string" required:"true" enum:"PositionalConstraint"` + + // The value that you want AWS WAF to search for. AWS WAF searches for the specified + // string in the part of web requests that you specified in FieldToMatch. The + // maximum length of the value is 50 bytes. + // + // Valid values depend on the values that you specified for FieldToMatch: + // + // HEADER: The value that you want AWS WAF to search for in the request header + // that you specified in FieldToMatch, for example, the value of the User-Agent + // or Referer header. METHOD: The HTTP method, which indicates the type of operation + // specified in the request. CloudFront supports the following methods: DELETE, + // GET, HEAD, OPTIONS, PATCH, POST, and PUT. QUERY_STRING: The value that you + // want AWS WAF to search for in the query string, which is the part of a URL + // that appears after a ? character. URI: The value that you want AWS WAF to + // search for in the part of a URL that identifies a resource, for example, + // /images/daily-ad.jpg. BODY: The part of a request that contains any additional + // data that you want to send to your web server as the HTTP request body, such + // as data from a form. The request body immediately follows the request headers. + // Note that only the first 8192 bytes of the request body are forwarded to + // AWS WAF for inspection. To allow or block requests based on the length of + // the body, you can create a size constraint set. For more information, see + // CreateSizeConstraintSet. If TargetString includes alphabetic characters + // A-Z and a-z, note that the value is case sensitive. + // + // If you're using the AWS WAF API + // + // Specify a base64-encoded version of the value. The maximum length of the + // value before you base64-encode it is 50 bytes. + // + // For example, suppose the value of Type is HEADER and the value of Data is + // User-Agent. If you want to search the User-Agent header for the value BadBot, + // you base64-encode BadBot using MIME base64 encoding and include the resulting + // value, QmFkQm90, in the value of TargetString. + // + // If you're using the AWS CLI or one of the AWS SDKs + // + // The value that you want AWS WAF to search for. The SDK automatically base64 + // encodes the value. + // + // TargetString is automatically base64 encoded/decoded by the SDK. + TargetString []byte `type:"blob" required:"true"` + + // Text transformations eliminate some of the unusual formatting that attackers + // use in web requests in an effort to bypass AWS WAF. If you specify a transformation, + // AWS WAF performs the transformation on TargetString before inspecting a request + // for a match. + // + // CMD_LINE + // + // When you're concerned that attackers are injecting an operating system commandline + // command and using unusual formatting to disguise some or all of the command, + // use this option to perform the following transformations: + // + // Delete the following characters: \ " ' ^ Delete spaces before the following + // characters: / ( Replace the following characters with a space: , ; Replace + // multiple spaces with one space Convert uppercase letters (A-Z) to lowercase + // (a-z) COMPRESS_WHITE_SPACE + // + // Use this option to replace the following characters with a space character + // (decimal 32): + // + // \f, formfeed, decimal 12 \t, tab, decimal 9 \n, newline, decimal 10 \r, + // carriage return, decimal 13 \v, vertical tab, decimal 11 non-breaking space, + // decimal 160 COMPRESS_WHITE_SPACE also replaces multiple spaces with one + // space. + // + // HTML_ENTITY_DECODE + // + // Use this option to replace HTML-encoded characters with unencoded characters. + // HTML_ENTITY_DECODE performs the following operations: + // + // Replaces (ampersand)quot; with " Replaces (ampersand)nbsp; with a non-breaking + // space, decimal 160 Replaces (ampersand)lt; with a "less than" symbol Replaces + // (ampersand)gt; with > Replaces characters that are represented in hexadecimal + // format, (ampersand)#xhhhh;, with the corresponding characters Replaces characters + // that are represented in decimal format, (ampersand)#nnnn;, with the corresponding + // characters LOWERCASE + // + // Use this option to convert uppercase letters (A-Z) to lowercase (a-z). + // + // URL_DECODE + // + // Use this option to decode a URL-encoded value. + // + // NONE + // + // Specify NONE if you don't want to perform any text transformations. + TextTransformation *string `type:"string" required:"true" enum:"TextTransformation"` +} + +// String returns the string representation +func (s ByteMatchTuple) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ByteMatchTuple) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ByteMatchTuple) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ByteMatchTuple"} + if s.FieldToMatch == nil { + invalidParams.Add(request.NewErrParamRequired("FieldToMatch")) + } + if s.PositionalConstraint == nil { + invalidParams.Add(request.NewErrParamRequired("PositionalConstraint")) + } + if s.TargetString == nil { + invalidParams.Add(request.NewErrParamRequired("TargetString")) + } + if s.TextTransformation == nil { + invalidParams.Add(request.NewErrParamRequired("TextTransformation")) + } + if s.FieldToMatch != nil { + if err := s.FieldToMatch.Validate(); err != nil { + invalidParams.AddNested("FieldToMatch", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type CreateByteMatchSetInput struct { + _ struct{} `type:"structure"` + + // The value returned by the most recent call to GetChangeToken. + ChangeToken *string `type:"string" required:"true"` + + // A friendly name or description of the ByteMatchSet. You can't change Name + // after you create a ByteMatchSet. + Name *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateByteMatchSetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateByteMatchSetInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateByteMatchSetInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateByteMatchSetInput"} + if s.ChangeToken == nil { + invalidParams.Add(request.NewErrParamRequired("ChangeToken")) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type CreateByteMatchSetOutput struct { + _ struct{} `type:"structure"` + + // A ByteMatchSet that contains no ByteMatchTuple objects. + ByteMatchSet *ByteMatchSet `type:"structure"` + + // The ChangeToken that you used to submit the CreateByteMatchSet request. You + // can also use this value to query the status of the request. For more information, + // see GetChangeTokenStatus. + ChangeToken *string `type:"string"` +} + +// String returns the string representation +func (s CreateByteMatchSetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateByteMatchSetOutput) GoString() string { + return s.String() +} + +type CreateIPSetInput struct { + _ struct{} `type:"structure"` + + // The value returned by the most recent call to GetChangeToken. + ChangeToken *string `type:"string" required:"true"` + + // A friendly name or description of the IPSet. You can't change Name after + // you create the IPSet. + Name *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateIPSetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateIPSetInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateIPSetInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateIPSetInput"} + if s.ChangeToken == nil { + invalidParams.Add(request.NewErrParamRequired("ChangeToken")) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type CreateIPSetOutput struct { + _ struct{} `type:"structure"` + + // The ChangeToken that you used to submit the CreateIPSet request. You can + // also use this value to query the status of the request. For more information, + // see GetChangeTokenStatus. + ChangeToken *string `type:"string"` + + // The IPSet returned in the CreateIPSet response. + IPSet *IPSet `type:"structure"` +} + +// String returns the string representation +func (s CreateIPSetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateIPSetOutput) GoString() string { + return s.String() +} + +type CreateRuleInput struct { + _ struct{} `type:"structure"` + + // The value returned by the most recent call to GetChangeToken. + ChangeToken *string `type:"string" required:"true"` + + // A friendly name or description for the metrics for this Rule. The name can + // contain only alphanumeric characters (A-Z, a-z, 0-9); the name can't contain + // whitespace. You can't change the name of the metric after you create the + // Rule. + MetricName *string `type:"string" required:"true"` + + // A friendly name or description of the Rule. You can't change the name of + // a Rule after you create it. + Name *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateRuleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateRuleInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateRuleInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateRuleInput"} + if s.ChangeToken == nil { + invalidParams.Add(request.NewErrParamRequired("ChangeToken")) + } + if s.MetricName == nil { + invalidParams.Add(request.NewErrParamRequired("MetricName")) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type CreateRuleOutput struct { + _ struct{} `type:"structure"` + + // The ChangeToken that you used to submit the CreateRule request. You can also + // use this value to query the status of the request. For more information, + // see GetChangeTokenStatus. + ChangeToken *string `type:"string"` + + // The Rule returned in the CreateRule response. + Rule *Rule `type:"structure"` +} + +// String returns the string representation +func (s CreateRuleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateRuleOutput) GoString() string { + return s.String() +} + +type CreateSizeConstraintSetInput struct { + _ struct{} `type:"structure"` + + // The value returned by the most recent call to GetChangeToken. + ChangeToken *string `type:"string" required:"true"` + + // A friendly name or description of the SizeConstraintSet. You can't change + // Name after you create a SizeConstraintSet. + Name *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateSizeConstraintSetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateSizeConstraintSetInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateSizeConstraintSetInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateSizeConstraintSetInput"} + if s.ChangeToken == nil { + invalidParams.Add(request.NewErrParamRequired("ChangeToken")) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type CreateSizeConstraintSetOutput struct { + _ struct{} `type:"structure"` + + // The ChangeToken that you used to submit the CreateSizeConstraintSet request. + // You can also use this value to query the status of the request. For more + // information, see GetChangeTokenStatus. + ChangeToken *string `type:"string"` + + // A SizeConstraintSet that contains no SizeConstraint objects. + SizeConstraintSet *SizeConstraintSet `type:"structure"` +} + +// String returns the string representation +func (s CreateSizeConstraintSetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateSizeConstraintSetOutput) GoString() string { + return s.String() +} + +// A request to create a SqlInjectionMatchSet. +type CreateSqlInjectionMatchSetInput struct { + _ struct{} `type:"structure"` + + // The value returned by the most recent call to GetChangeToken. + ChangeToken *string `type:"string" required:"true"` + + // A friendly name or description for the SqlInjectionMatchSet that you're creating. + // You can't change Name after you create the SqlInjectionMatchSet. + Name *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateSqlInjectionMatchSetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateSqlInjectionMatchSetInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateSqlInjectionMatchSetInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateSqlInjectionMatchSetInput"} + if s.ChangeToken == nil { + invalidParams.Add(request.NewErrParamRequired("ChangeToken")) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The response to a CreateSqlInjectionMatchSet request. +type CreateSqlInjectionMatchSetOutput struct { + _ struct{} `type:"structure"` + + // The ChangeToken that you used to submit the CreateSqlInjectionMatchSet request. + // You can also use this value to query the status of the request. For more + // information, see GetChangeTokenStatus. + ChangeToken *string `type:"string"` + + // A SqlInjectionMatchSet. + SqlInjectionMatchSet *SqlInjectionMatchSet `type:"structure"` +} + +// String returns the string representation +func (s CreateSqlInjectionMatchSetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateSqlInjectionMatchSetOutput) GoString() string { + return s.String() +} + +type CreateWebACLInput struct { + _ struct{} `type:"structure"` + + // The value returned by the most recent call to GetChangeToken. + ChangeToken *string `type:"string" required:"true"` + + // The action that you want AWS WAF to take when a request doesn't match the + // criteria specified in any of the Rule objects that are associated with the + // WebACL. + DefaultAction *WafAction `type:"structure" required:"true"` + + // A friendly name or description for the metrics for this WebACL. The name + // can contain only alphanumeric characters (A-Z, a-z, 0-9); the name can't + // contain whitespace. You can't change MetricName after you create the WebACL. + MetricName *string `type:"string" required:"true"` + + // A friendly name or description of the WebACL. You can't change Name after + // you create the WebACL. + Name *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateWebACLInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateWebACLInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateWebACLInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateWebACLInput"} + if s.ChangeToken == nil { + invalidParams.Add(request.NewErrParamRequired("ChangeToken")) + } + if s.DefaultAction == nil { + invalidParams.Add(request.NewErrParamRequired("DefaultAction")) + } + if s.MetricName == nil { + invalidParams.Add(request.NewErrParamRequired("MetricName")) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + if s.DefaultAction != nil { + if err := s.DefaultAction.Validate(); err != nil { + invalidParams.AddNested("DefaultAction", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type CreateWebACLOutput struct { + _ struct{} `type:"structure"` + + // The ChangeToken that you used to submit the CreateWebACL request. You can + // also use this value to query the status of the request. For more information, + // see GetChangeTokenStatus. + ChangeToken *string `type:"string"` + + // The WebACL returned in the CreateWebACL response. + WebACL *WebACL `type:"structure"` +} + +// String returns the string representation +func (s CreateWebACLOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateWebACLOutput) GoString() string { + return s.String() +} + +// A request to create an XssMatchSet. +type CreateXssMatchSetInput struct { + _ struct{} `type:"structure"` + + // The value returned by the most recent call to GetChangeToken. + ChangeToken *string `type:"string" required:"true"` + + // A friendly name or description for the XssMatchSet that you're creating. + // You can't change Name after you create the XssMatchSet. + Name *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateXssMatchSetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateXssMatchSetInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateXssMatchSetInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateXssMatchSetInput"} + if s.ChangeToken == nil { + invalidParams.Add(request.NewErrParamRequired("ChangeToken")) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The response to a CreateXssMatchSet request. +type CreateXssMatchSetOutput struct { + _ struct{} `type:"structure"` + + // The ChangeToken that you used to submit the CreateXssMatchSet request. You + // can also use this value to query the status of the request. For more information, + // see GetChangeTokenStatus. + ChangeToken *string `type:"string"` + + // An XssMatchSet. + XssMatchSet *XssMatchSet `type:"structure"` +} + +// String returns the string representation +func (s CreateXssMatchSetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateXssMatchSetOutput) GoString() string { + return s.String() +} + +type DeleteByteMatchSetInput struct { + _ struct{} `type:"structure"` + + // The ByteMatchSetId of the ByteMatchSet that you want to delete. ByteMatchSetId + // is returned by CreateByteMatchSet and by ListByteMatchSets. + ByteMatchSetId *string `min:"1" type:"string" required:"true"` + + // The value returned by the most recent call to GetChangeToken. + ChangeToken *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteByteMatchSetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteByteMatchSetInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteByteMatchSetInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteByteMatchSetInput"} + if s.ByteMatchSetId == nil { + invalidParams.Add(request.NewErrParamRequired("ByteMatchSetId")) + } + if s.ByteMatchSetId != nil && len(*s.ByteMatchSetId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ByteMatchSetId", 1)) + } + if s.ChangeToken == nil { + invalidParams.Add(request.NewErrParamRequired("ChangeToken")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteByteMatchSetOutput struct { + _ struct{} `type:"structure"` + + // The ChangeToken that you used to submit the DeleteByteMatchSet request. You + // can also use this value to query the status of the request. For more information, + // see GetChangeTokenStatus. + ChangeToken *string `type:"string"` +} + +// String returns the string representation +func (s DeleteByteMatchSetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteByteMatchSetOutput) GoString() string { + return s.String() +} + +type DeleteIPSetInput struct { + _ struct{} `type:"structure"` + + // The value returned by the most recent call to GetChangeToken. + ChangeToken *string `type:"string" required:"true"` + + // The IPSetId of the IPSet that you want to delete. IPSetId is returned by + // CreateIPSet and by ListIPSets. + IPSetId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteIPSetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteIPSetInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteIPSetInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteIPSetInput"} + if s.ChangeToken == nil { + invalidParams.Add(request.NewErrParamRequired("ChangeToken")) + } + if s.IPSetId == nil { + invalidParams.Add(request.NewErrParamRequired("IPSetId")) + } + if s.IPSetId != nil && len(*s.IPSetId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("IPSetId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteIPSetOutput struct { + _ struct{} `type:"structure"` + + // The ChangeToken that you used to submit the DeleteIPSet request. You can + // also use this value to query the status of the request. For more information, + // see GetChangeTokenStatus. + ChangeToken *string `type:"string"` +} + +// String returns the string representation +func (s DeleteIPSetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteIPSetOutput) GoString() string { + return s.String() +} + +type DeleteRuleInput struct { + _ struct{} `type:"structure"` + + // The value returned by the most recent call to GetChangeToken. + ChangeToken *string `type:"string" required:"true"` + + // The RuleId of the Rule that you want to delete. RuleId is returned by CreateRule + // and by ListRules. + RuleId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteRuleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteRuleInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteRuleInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteRuleInput"} + if s.ChangeToken == nil { + invalidParams.Add(request.NewErrParamRequired("ChangeToken")) + } + if s.RuleId == nil { + invalidParams.Add(request.NewErrParamRequired("RuleId")) + } + if s.RuleId != nil && len(*s.RuleId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RuleId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteRuleOutput struct { + _ struct{} `type:"structure"` + + // The ChangeToken that you used to submit the DeleteRule request. You can also + // use this value to query the status of the request. For more information, + // see GetChangeTokenStatus. + ChangeToken *string `type:"string"` +} + +// String returns the string representation +func (s DeleteRuleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteRuleOutput) GoString() string { + return s.String() +} + +type DeleteSizeConstraintSetInput struct { + _ struct{} `type:"structure"` + + // The value returned by the most recent call to GetChangeToken. + ChangeToken *string `type:"string" required:"true"` + + // The SizeConstraintSetId of the SizeConstraintSet that you want to delete. + // SizeConstraintSetId is returned by CreateSizeConstraintSet and by ListSizeConstraintSets. + SizeConstraintSetId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteSizeConstraintSetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteSizeConstraintSetInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteSizeConstraintSetInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteSizeConstraintSetInput"} + if s.ChangeToken == nil { + invalidParams.Add(request.NewErrParamRequired("ChangeToken")) + } + if s.SizeConstraintSetId == nil { + invalidParams.Add(request.NewErrParamRequired("SizeConstraintSetId")) + } + if s.SizeConstraintSetId != nil && len(*s.SizeConstraintSetId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("SizeConstraintSetId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteSizeConstraintSetOutput struct { + _ struct{} `type:"structure"` + + // The ChangeToken that you used to submit the DeleteSizeConstraintSet request. + // You can also use this value to query the status of the request. For more + // information, see GetChangeTokenStatus. + ChangeToken *string `type:"string"` +} + +// String returns the string representation +func (s DeleteSizeConstraintSetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteSizeConstraintSetOutput) GoString() string { + return s.String() +} + +// A request to delete a SqlInjectionMatchSet from AWS WAF. +type DeleteSqlInjectionMatchSetInput struct { + _ struct{} `type:"structure"` + + // The value returned by the most recent call to GetChangeToken. + ChangeToken *string `type:"string" required:"true"` + + // The SqlInjectionMatchSetId of the SqlInjectionMatchSet that you want to delete. + // SqlInjectionMatchSetId is returned by CreateSqlInjectionMatchSet and by ListSqlInjectionMatchSets. + SqlInjectionMatchSetId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteSqlInjectionMatchSetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteSqlInjectionMatchSetInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteSqlInjectionMatchSetInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteSqlInjectionMatchSetInput"} + if s.ChangeToken == nil { + invalidParams.Add(request.NewErrParamRequired("ChangeToken")) + } + if s.SqlInjectionMatchSetId == nil { + invalidParams.Add(request.NewErrParamRequired("SqlInjectionMatchSetId")) + } + if s.SqlInjectionMatchSetId != nil && len(*s.SqlInjectionMatchSetId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("SqlInjectionMatchSetId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The response to a request to delete a SqlInjectionMatchSet from AWS WAF. +type DeleteSqlInjectionMatchSetOutput struct { + _ struct{} `type:"structure"` + + // The ChangeToken that you used to submit the DeleteSqlInjectionMatchSet request. + // You can also use this value to query the status of the request. For more + // information, see GetChangeTokenStatus. + ChangeToken *string `type:"string"` +} + +// String returns the string representation +func (s DeleteSqlInjectionMatchSetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteSqlInjectionMatchSetOutput) GoString() string { + return s.String() +} + +type DeleteWebACLInput struct { + _ struct{} `type:"structure"` + + // The value returned by the most recent call to GetChangeToken. + ChangeToken *string `type:"string" required:"true"` + + // The WebACLId of the WebACL that you want to delete. WebACLId is returned + // by CreateWebACL and by ListWebACLs. + WebACLId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteWebACLInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteWebACLInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteWebACLInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteWebACLInput"} + if s.ChangeToken == nil { + invalidParams.Add(request.NewErrParamRequired("ChangeToken")) + } + if s.WebACLId == nil { + invalidParams.Add(request.NewErrParamRequired("WebACLId")) + } + if s.WebACLId != nil && len(*s.WebACLId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("WebACLId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteWebACLOutput struct { + _ struct{} `type:"structure"` + + // The ChangeToken that you used to submit the DeleteWebACL request. You can + // also use this value to query the status of the request. For more information, + // see GetChangeTokenStatus. + ChangeToken *string `type:"string"` +} + +// String returns the string representation +func (s DeleteWebACLOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteWebACLOutput) GoString() string { + return s.String() +} + +// A request to delete an XssMatchSet from AWS WAF. +type DeleteXssMatchSetInput struct { + _ struct{} `type:"structure"` + + // The value returned by the most recent call to GetChangeToken. + ChangeToken *string `type:"string" required:"true"` + + // The XssMatchSetId of the XssMatchSet that you want to delete. XssMatchSetId + // is returned by CreateXssMatchSet and by ListXssMatchSets. + XssMatchSetId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteXssMatchSetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteXssMatchSetInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteXssMatchSetInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteXssMatchSetInput"} + if s.ChangeToken == nil { + invalidParams.Add(request.NewErrParamRequired("ChangeToken")) + } + if s.XssMatchSetId == nil { + invalidParams.Add(request.NewErrParamRequired("XssMatchSetId")) + } + if s.XssMatchSetId != nil && len(*s.XssMatchSetId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("XssMatchSetId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The response to a request to delete an XssMatchSet from AWS WAF. +type DeleteXssMatchSetOutput struct { + _ struct{} `type:"structure"` + + // The ChangeToken that you used to submit the DeleteXssMatchSet request. You + // can also use this value to query the status of the request. For more information, + // see GetChangeTokenStatus. + ChangeToken *string `type:"string"` +} + +// String returns the string representation +func (s DeleteXssMatchSetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteXssMatchSetOutput) GoString() string { + return s.String() +} + +// Specifies where in a web request to look for TargetString. +type FieldToMatch struct { + _ struct{} `type:"structure"` + + // When the value of Type is HEADER, enter the name of the header that you want + // AWS WAF to search, for example, User-Agent or Referer. If the value of Type + // is any other value, omit Data. + // + // The name of the header is not case sensitive. + Data *string `type:"string"` + + // The part of the web request that you want AWS WAF to search for a specified + // string. Parts of a request that you can search include the following: + // + // HEADER: A specified request header, for example, the value of the User-Agent + // or Referer header. If you choose HEADER for the type, specify the name of + // the header in Data. METHOD: The HTTP method, which indicated the type of + // operation that the request is asking the origin to perform. Amazon CloudFront + // supports the following methods: DELETE, GET, HEAD, OPTIONS, PATCH, POST, + // and PUT. QUERY_STRING: A query string, which is the part of a URL that appears + // after a ? character, if any. URI: The part of a web request that identifies + // a resource, for example, /images/daily-ad.jpg. BODY: The part of a request + // that contains any additional data that you want to send to your web server + // as the HTTP request body, such as data from a form. The request body immediately + // follows the request headers. Note that only the first 8192 bytes of the request + // body are forwarded to AWS WAF for inspection. To allow or block requests + // based on the length of the body, you can create a size constraint set. For + // more information, see CreateSizeConstraintSet. + Type *string `type:"string" required:"true" enum:"MatchFieldType"` +} + +// String returns the string representation +func (s FieldToMatch) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FieldToMatch) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *FieldToMatch) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "FieldToMatch"} + if s.Type == nil { + invalidParams.Add(request.NewErrParamRequired("Type")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type GetByteMatchSetInput struct { + _ struct{} `type:"structure"` + + // The ByteMatchSetId of the ByteMatchSet that you want to get. ByteMatchSetId + // is returned by CreateByteMatchSet and by ListByteMatchSets. + ByteMatchSetId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetByteMatchSetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetByteMatchSetInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetByteMatchSetInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetByteMatchSetInput"} + if s.ByteMatchSetId == nil { + invalidParams.Add(request.NewErrParamRequired("ByteMatchSetId")) + } + if s.ByteMatchSetId != nil && len(*s.ByteMatchSetId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ByteMatchSetId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type GetByteMatchSetOutput struct { + _ struct{} `type:"structure"` + + // Information about the ByteMatchSet that you specified in the GetByteMatchSet + // request. For more information, see the following topics: + // + // ByteMatchSet: Contains ByteMatchSetId, ByteMatchTuples, and Name ByteMatchTuples: + // Contains an array of ByteMatchTuple objects. Each ByteMatchTuple object contains + // FieldToMatch, PositionalConstraint, TargetString, and TextTransformation + // FieldToMatch: Contains Data and Type + ByteMatchSet *ByteMatchSet `type:"structure"` +} + +// String returns the string representation +func (s GetByteMatchSetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetByteMatchSetOutput) GoString() string { + return s.String() +} + +type GetChangeTokenInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s GetChangeTokenInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetChangeTokenInput) GoString() string { + return s.String() +} + +type GetChangeTokenOutput struct { + _ struct{} `type:"structure"` + + // The ChangeToken that you used in the request. Use this value in a GetChangeTokenStatus + // request to get the current status of the request. + ChangeToken *string `type:"string"` +} + +// String returns the string representation +func (s GetChangeTokenOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetChangeTokenOutput) GoString() string { + return s.String() +} + +type GetChangeTokenStatusInput struct { + _ struct{} `type:"structure"` + + // The change token for which you want to get the status. This change token + // was previously returned in the GetChangeToken response. + ChangeToken *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s GetChangeTokenStatusInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetChangeTokenStatusInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetChangeTokenStatusInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetChangeTokenStatusInput"} + if s.ChangeToken == nil { + invalidParams.Add(request.NewErrParamRequired("ChangeToken")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type GetChangeTokenStatusOutput struct { + _ struct{} `type:"structure"` + + // The status of the change token. + ChangeTokenStatus *string `type:"string" enum:"ChangeTokenStatus"` +} + +// String returns the string representation +func (s GetChangeTokenStatusOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetChangeTokenStatusOutput) GoString() string { + return s.String() +} + +type GetIPSetInput struct { + _ struct{} `type:"structure"` + + // The IPSetId of the IPSet that you want to get. IPSetId is returned by CreateIPSet + // and by ListIPSets. + IPSetId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetIPSetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetIPSetInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetIPSetInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetIPSetInput"} + if s.IPSetId == nil { + invalidParams.Add(request.NewErrParamRequired("IPSetId")) + } + if s.IPSetId != nil && len(*s.IPSetId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("IPSetId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type GetIPSetOutput struct { + _ struct{} `type:"structure"` + + // Information about the IPSet that you specified in the GetIPSet request. For + // more information, see the following topics: + // + // IPSet: Contains IPSetDescriptors, IPSetId, and Name IPSetDescriptors: Contains + // an array of IPSetDescriptor objects. Each IPSetDescriptor object contains + // Type and Value + IPSet *IPSet `type:"structure"` +} + +// String returns the string representation +func (s GetIPSetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetIPSetOutput) GoString() string { + return s.String() +} + +type GetRuleInput struct { + _ struct{} `type:"structure"` + + // The RuleId of the Rule that you want to get. RuleId is returned by CreateRule + // and by ListRules. + RuleId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetRuleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetRuleInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetRuleInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetRuleInput"} + if s.RuleId == nil { + invalidParams.Add(request.NewErrParamRequired("RuleId")) + } + if s.RuleId != nil && len(*s.RuleId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RuleId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type GetRuleOutput struct { + _ struct{} `type:"structure"` + + // Information about the Rule that you specified in the GetRule request. For + // more information, see the following topics: + // + // Rule: Contains MetricName, Name, an array of Predicate objects, and RuleId + // Predicate: Each Predicate object contains DataId, Negated, and Type + Rule *Rule `type:"structure"` +} + +// String returns the string representation +func (s GetRuleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetRuleOutput) GoString() string { + return s.String() +} + +type GetSampledRequestsInput struct { + _ struct{} `type:"structure"` + + // The number of requests that you want AWS WAF to return from among the first + // 5,000 requests that your AWS resource received during the time range. If + // your resource received fewer requests than the value of MaxItems, GetSampledRequests + // returns information about all of them. + MaxItems *int64 `min:"1" type:"long" required:"true"` + + // RuleId is one of two values: + // + // The RuleId of the Rule for which you want GetSampledRequests to return + // a sample of requests. Default_Action, which causes GetSampledRequests to + // return a sample of the requests that didn't match any of the rules in the + // specified WebACL. + RuleId *string `min:"1" type:"string" required:"true"` + + // The start date and time and the end date and time of the range for which + // you want GetSampledRequests to return a sample of requests. Specify the date + // and time in Unix time format (in seconds). You can specify any time range + // in the previous three hours. + TimeWindow *TimeWindow `type:"structure" required:"true"` + + // The WebACLId of the WebACL for which you want GetSampledRequests to return + // a sample of requests. + WebAclId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetSampledRequestsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetSampledRequestsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetSampledRequestsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetSampledRequestsInput"} + if s.MaxItems == nil { + invalidParams.Add(request.NewErrParamRequired("MaxItems")) + } + if s.MaxItems != nil && *s.MaxItems < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxItems", 1)) + } + if s.RuleId == nil { + invalidParams.Add(request.NewErrParamRequired("RuleId")) + } + if s.RuleId != nil && len(*s.RuleId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RuleId", 1)) + } + if s.TimeWindow == nil { + invalidParams.Add(request.NewErrParamRequired("TimeWindow")) + } + if s.WebAclId == nil { + invalidParams.Add(request.NewErrParamRequired("WebAclId")) + } + if s.WebAclId != nil && len(*s.WebAclId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("WebAclId", 1)) + } + if s.TimeWindow != nil { + if err := s.TimeWindow.Validate(); err != nil { + invalidParams.AddNested("TimeWindow", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type GetSampledRequestsOutput struct { + _ struct{} `type:"structure"` + + // The total number of requests from which GetSampledRequests got a sample of + // MaxItems requests. If PopulationSize is less than MaxItems, the sample includes + // every request that your AWS resource received during the specified time range. + PopulationSize *int64 `type:"long"` + + // A complex type that contains detailed information about each of the requests + // in the sample. + SampledRequests []*SampledHTTPRequest `type:"list"` + + // Usually, TimeWindow is the time range that you specified in the GetSampledRequests + // request. However, if your AWS resource received more than 5,000 requests + // during the time range that you specified in the request, GetSampledRequests + // returns the time range for the first 5,000 requests. + TimeWindow *TimeWindow `type:"structure"` +} + +// String returns the string representation +func (s GetSampledRequestsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetSampledRequestsOutput) GoString() string { + return s.String() +} + +type GetSizeConstraintSetInput struct { + _ struct{} `type:"structure"` + + // The SizeConstraintSetId of the SizeConstraintSet that you want to get. SizeConstraintSetId + // is returned by CreateSizeConstraintSet and by ListSizeConstraintSets. + SizeConstraintSetId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetSizeConstraintSetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetSizeConstraintSetInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetSizeConstraintSetInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetSizeConstraintSetInput"} + if s.SizeConstraintSetId == nil { + invalidParams.Add(request.NewErrParamRequired("SizeConstraintSetId")) + } + if s.SizeConstraintSetId != nil && len(*s.SizeConstraintSetId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("SizeConstraintSetId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type GetSizeConstraintSetOutput struct { + _ struct{} `type:"structure"` + + // Information about the SizeConstraintSet that you specified in the GetSizeConstraintSet + // request. For more information, see the following topics: + // + // SizeConstraintSet: Contains SizeConstraintSetId, SizeConstraints, and Name + // SizeConstraints: Contains an array of SizeConstraint objects. Each SizeConstraint + // object contains FieldToMatch, TextTransformation, ComparisonOperator, and + // Size FieldToMatch: Contains Data and Type + SizeConstraintSet *SizeConstraintSet `type:"structure"` +} + +// String returns the string representation +func (s GetSizeConstraintSetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetSizeConstraintSetOutput) GoString() string { + return s.String() +} + +// A request to get a SqlInjectionMatchSet. +type GetSqlInjectionMatchSetInput struct { + _ struct{} `type:"structure"` + + // The SqlInjectionMatchSetId of the SqlInjectionMatchSet that you want to get. + // SqlInjectionMatchSetId is returned by CreateSqlInjectionMatchSet and by ListSqlInjectionMatchSets. + SqlInjectionMatchSetId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetSqlInjectionMatchSetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetSqlInjectionMatchSetInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetSqlInjectionMatchSetInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetSqlInjectionMatchSetInput"} + if s.SqlInjectionMatchSetId == nil { + invalidParams.Add(request.NewErrParamRequired("SqlInjectionMatchSetId")) + } + if s.SqlInjectionMatchSetId != nil && len(*s.SqlInjectionMatchSetId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("SqlInjectionMatchSetId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The response to a GetSqlInjectionMatchSet request. +type GetSqlInjectionMatchSetOutput struct { + _ struct{} `type:"structure"` + + // Information about the SqlInjectionMatchSet that you specified in the GetSqlInjectionMatchSet + // request. For more information, see the following topics: + // + // SqlInjectionMatchSet: Contains Name, SqlInjectionMatchSetId, and an array + // of SqlInjectionMatchTuple objects SqlInjectionMatchTuple: Each SqlInjectionMatchTuple + // object contains FieldToMatch and TextTransformation FieldToMatch: Contains + // Data and Type + SqlInjectionMatchSet *SqlInjectionMatchSet `type:"structure"` +} + +// String returns the string representation +func (s GetSqlInjectionMatchSetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetSqlInjectionMatchSetOutput) GoString() string { + return s.String() +} + +type GetWebACLInput struct { + _ struct{} `type:"structure"` + + // The WebACLId of the WebACL that you want to get. WebACLId is returned by + // CreateWebACL and by ListWebACLs. + WebACLId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetWebACLInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetWebACLInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetWebACLInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetWebACLInput"} + if s.WebACLId == nil { + invalidParams.Add(request.NewErrParamRequired("WebACLId")) + } + if s.WebACLId != nil && len(*s.WebACLId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("WebACLId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type GetWebACLOutput struct { + _ struct{} `type:"structure"` + + // Information about the WebACL that you specified in the GetWebACL request. + // For more information, see the following topics: + // + // WebACL: Contains DefaultAction, MetricName, Name, an array of Rule objects, + // and WebACLId DefaultAction (Data type is WafAction): Contains Type Rules: + // Contains an array of ActivatedRule objects, which contain Action, Priority, + // and RuleId Action: Contains Type + WebACL *WebACL `type:"structure"` +} + +// String returns the string representation +func (s GetWebACLOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetWebACLOutput) GoString() string { + return s.String() +} + +// A request to get an XssMatchSet. +type GetXssMatchSetInput struct { + _ struct{} `type:"structure"` + + // The XssMatchSetId of the XssMatchSet that you want to get. XssMatchSetId + // is returned by CreateXssMatchSet and by ListXssMatchSets. + XssMatchSetId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetXssMatchSetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetXssMatchSetInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetXssMatchSetInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetXssMatchSetInput"} + if s.XssMatchSetId == nil { + invalidParams.Add(request.NewErrParamRequired("XssMatchSetId")) + } + if s.XssMatchSetId != nil && len(*s.XssMatchSetId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("XssMatchSetId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The response to a GetXssMatchSet request. +type GetXssMatchSetOutput struct { + _ struct{} `type:"structure"` + + // Information about the XssMatchSet that you specified in the GetXssMatchSet + // request. For more information, see the following topics: + // + // XssMatchSet: Contains Name, XssMatchSetId, and an array of XssMatchTuple + // objects XssMatchTuple: Each XssMatchTuple object contains FieldToMatch and + // TextTransformation FieldToMatch: Contains Data and Type + XssMatchSet *XssMatchSet `type:"structure"` +} + +// String returns the string representation +func (s GetXssMatchSetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetXssMatchSetOutput) GoString() string { + return s.String() +} + +// The response from a GetSampledRequests request includes an HTTPHeader complex +// type that appears as Headers in the response syntax. HTTPHeader contains +// the names and values of all of the headers that appear in one of the web +// requests that were returned by GetSampledRequests. +type HTTPHeader struct { + _ struct{} `type:"structure"` + + // The name of one of the headers in the sampled web request. + Name *string `type:"string"` + + // The value of one of the headers in the sampled web request. + Value *string `type:"string"` +} + +// String returns the string representation +func (s HTTPHeader) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HTTPHeader) GoString() string { + return s.String() +} + +// The response from a GetSampledRequests request includes an HTTPRequest complex +// type that appears as Request in the response syntax. HTTPRequest contains +// information about one of the web requests that were returned by GetSampledRequests. +type HTTPRequest struct { + _ struct{} `type:"structure"` + + // The IP address that the request originated from. If the WebACL is associated + // with a CloudFront distribution, this is the value of one of the following + // fields in CloudFront access logs: + // + // c-ip, if the viewer did not use an HTTP proxy or a load balancer to send + // the request x-forwarded-for, if the viewer did use an HTTP proxy or a load + // balancer to send the request + ClientIP *string `type:"string"` + + // The two-letter country code for the country that the request originated from. + // For a current list of country codes, see the Wikipedia entry ISO 3166-1 alpha-2 + // (https://en.wikipedia.org/wiki/ISO_3166-1_alpha-2). + Country *string `type:"string"` + + // The HTTP version specified in the sampled web request, for example, HTTP/1.1. + HTTPVersion *string `type:"string"` + + // A complex type that contains two values for each header in the sampled web + // request: the name of the header and the value of the header. + Headers []*HTTPHeader `type:"list"` + + // The HTTP method specified in the sampled web request. CloudFront supports + // the following methods: DELETE, GET, HEAD, OPTIONS, PATCH, POST, and PUT. + Method *string `type:"string"` + + // The part of a web request that identifies the resource, for example, /images/daily-ad.jpg. + URI *string `type:"string"` +} + +// String returns the string representation +func (s HTTPRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HTTPRequest) GoString() string { + return s.String() +} + +// Contains one or more IP addresses or blocks of IP addresses specified in +// Classless Inter-Domain Routing (CIDR) notation. To specify an individual +// IP address, you specify the four-part IP address followed by a /32, for example, +// 192.0.2.0/31. To block a range of IP addresses, you can specify a /24, a +// /16, or a /8 CIDR. For more information about CIDR notation, perform an Internet +// search on cidr notation. +type IPSet struct { + _ struct{} `type:"structure"` + + // The IP address type (IPV4) and the IP address range (in CIDR notation) that + // web requests originate from. If the WebACL is associated with a CloudFront + // distribution, this is the value of one of the following fields in CloudFront + // access logs: + // + // c-ip, if the viewer did not use an HTTP proxy or a load balancer to send + // the request x-forwarded-for, if the viewer did use an HTTP proxy or a load + // balancer to send the request + IPSetDescriptors []*IPSetDescriptor `type:"list" required:"true"` + + // The IPSetId for an IPSet. You use IPSetId to get information about an IPSet + // (see GetIPSet), update an IPSet (see UpdateIPSet), insert an IPSet into a + // Rule or delete one from a Rule (see UpdateRule), and delete an IPSet from + // AWS WAF (see DeleteIPSet). + // + // IPSetId is returned by CreateIPSet and by ListIPSets. + IPSetId *string `min:"1" type:"string" required:"true"` + + // A friendly name or description of the IPSet. You can't change the name of + // an IPSet after you create it. + Name *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s IPSet) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s IPSet) GoString() string { + return s.String() +} + +// Specifies the IP address type (IPV4) and the IP address range (in CIDR format) +// that web requests originate from. +type IPSetDescriptor struct { + _ struct{} `type:"structure"` + + // Specify IPV4. + Type *string `type:"string" required:"true" enum:"IPSetDescriptorType"` + + // Specify an IPv4 address by using CIDR notation. For example: + // + // To configure AWS WAF to allow, block, or count requests that originated + // from the IP address 192.0.2.44, specify 192.0.2.44/32. To configure AWS WAF + // to allow, block, or count requests that originated from IP addresses from + // 192.0.2.0 to 192.0.2.255, specify 192.0.2.0/24. AWS WAF supports only /8, + // /16, /24, and /32 IP addresses. + // + // For more information about CIDR notation, see the Wikipedia entry Classless + // Inter-Domain Routing (https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing). + Value *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s IPSetDescriptor) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s IPSetDescriptor) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *IPSetDescriptor) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "IPSetDescriptor"} + if s.Type == nil { + invalidParams.Add(request.NewErrParamRequired("Type")) + } + if s.Value == nil { + invalidParams.Add(request.NewErrParamRequired("Value")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the identifier and the name of the IPSet. +type IPSetSummary struct { + _ struct{} `type:"structure"` + + // The IPSetId for an IPSet. You can use IPSetId in a GetIPSet request to get + // detailed information about an IPSet. + IPSetId *string `min:"1" type:"string" required:"true"` + + // A friendly name or description of the IPSet. You can't change the name of + // an IPSet after you create it. + Name *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s IPSetSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s IPSetSummary) GoString() string { + return s.String() +} + +// Specifies the type of update to perform to an IPSet with UpdateIPSet. +type IPSetUpdate struct { + _ struct{} `type:"structure"` + + // Specifies whether to insert or delete an IP address with UpdateIPSet. + Action *string `type:"string" required:"true" enum:"ChangeAction"` + + // The IP address type (IPV4) and the IP address range (in CIDR notation) that + // web requests originate from. + IPSetDescriptor *IPSetDescriptor `type:"structure" required:"true"` +} + +// String returns the string representation +func (s IPSetUpdate) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s IPSetUpdate) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *IPSetUpdate) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "IPSetUpdate"} + if s.Action == nil { + invalidParams.Add(request.NewErrParamRequired("Action")) + } + if s.IPSetDescriptor == nil { + invalidParams.Add(request.NewErrParamRequired("IPSetDescriptor")) + } + if s.IPSetDescriptor != nil { + if err := s.IPSetDescriptor.Validate(); err != nil { + invalidParams.AddNested("IPSetDescriptor", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type ListByteMatchSetsInput struct { + _ struct{} `type:"structure"` + + // Specifies the number of ByteMatchSet objects that you want AWS WAF to return + // for this request. If you have more ByteMatchSets objects than the number + // you specify for Limit, the response includes a NextMarker value that you + // can use to get another batch of ByteMatchSet objects. + Limit *int64 `min:"1" type:"integer" required:"true"` + + // If you specify a value for Limit and you have more ByteMatchSets than the + // value of Limit, AWS WAF returns a NextMarker value in the response that allows + // you to list another group of ByteMatchSets. For the second and subsequent + // ListByteMatchSets requests, specify the value of NextMarker from the previous + // response to get information about another batch of ByteMatchSets. + NextMarker *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListByteMatchSetsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListByteMatchSetsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListByteMatchSetsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListByteMatchSetsInput"} + if s.Limit == nil { + invalidParams.Add(request.NewErrParamRequired("Limit")) + } + if s.Limit != nil && *s.Limit < 1 { + invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) + } + if s.NextMarker != nil && len(*s.NextMarker) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextMarker", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type ListByteMatchSetsOutput struct { + _ struct{} `type:"structure"` + + // An array of ByteMatchSetSummary objects. + ByteMatchSets []*ByteMatchSetSummary `type:"list"` + + // If you have more ByteMatchSet objects than the number that you specified + // for Limit in the request, the response includes a NextMarker value. To list + // more ByteMatchSet objects, submit another ListByteMatchSets request, and + // specify the NextMarker value from the response in the NextMarker value in + // the next request. + NextMarker *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListByteMatchSetsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListByteMatchSetsOutput) GoString() string { + return s.String() +} + +type ListIPSetsInput struct { + _ struct{} `type:"structure"` + + // Specifies the number of IPSet objects that you want AWS WAF to return for + // this request. If you have more IPSet objects than the number you specify + // for Limit, the response includes a NextMarker value that you can use to get + // another batch of IPSet objects. + Limit *int64 `min:"1" type:"integer" required:"true"` + + // If you specify a value for Limit and you have more IPSets than the value + // of Limit, AWS WAF returns a NextMarker value in the response that allows + // you to list another group of IPSets. For the second and subsequent ListIPSets + // requests, specify the value of NextMarker from the previous response to get + // information about another batch of ByteMatchSets. + NextMarker *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListIPSetsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListIPSetsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListIPSetsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListIPSetsInput"} + if s.Limit == nil { + invalidParams.Add(request.NewErrParamRequired("Limit")) + } + if s.Limit != nil && *s.Limit < 1 { + invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) + } + if s.NextMarker != nil && len(*s.NextMarker) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextMarker", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type ListIPSetsOutput struct { + _ struct{} `type:"structure"` + + // An array of IPSetSummary objects. + IPSets []*IPSetSummary `type:"list"` + + // If you have more IPSet objects than the number that you specified for Limit + // in the request, the response includes a NextMarker value. To list more IPSet + // objects, submit another ListIPSets request, and specify the NextMarker value + // from the response in the NextMarker value in the next request. + NextMarker *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListIPSetsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListIPSetsOutput) GoString() string { + return s.String() +} + +type ListRulesInput struct { + _ struct{} `type:"structure"` + + // Specifies the number of Rules that you want AWS WAF to return for this request. + // If you have more Rules than the number that you specify for Limit, the response + // includes a NextMarker value that you can use to get another batch of Rules. + Limit *int64 `min:"1" type:"integer" required:"true"` + + // If you specify a value for Limit and you have more Rules than the value of + // Limit, AWS WAF returns a NextMarker value in the response that allows you + // to list another group of Rules. For the second and subsequent ListRules requests, + // specify the value of NextMarker from the previous response to get information + // about another batch of Rules. + NextMarker *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListRulesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListRulesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListRulesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListRulesInput"} + if s.Limit == nil { + invalidParams.Add(request.NewErrParamRequired("Limit")) + } + if s.Limit != nil && *s.Limit < 1 { + invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) + } + if s.NextMarker != nil && len(*s.NextMarker) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextMarker", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type ListRulesOutput struct { + _ struct{} `type:"structure"` + + // If you have more Rules than the number that you specified for Limit in the + // request, the response includes a NextMarker value. To list more Rules, submit + // another ListRules request, and specify the NextMarker value from the response + // in the NextMarker value in the next request. + NextMarker *string `min:"1" type:"string"` + + // An array of RuleSummary objects. + Rules []*RuleSummary `type:"list"` +} + +// String returns the string representation +func (s ListRulesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListRulesOutput) GoString() string { + return s.String() +} + +type ListSizeConstraintSetsInput struct { + _ struct{} `type:"structure"` + + // Specifies the number of SizeConstraintSet objects that you want AWS WAF to + // return for this request. If you have more SizeConstraintSets objects than + // the number you specify for Limit, the response includes a NextMarker value + // that you can use to get another batch of SizeConstraintSet objects. + Limit *int64 `min:"1" type:"integer" required:"true"` + + // If you specify a value for Limit and you have more SizeConstraintSets than + // the value of Limit, AWS WAF returns a NextMarker value in the response that + // allows you to list another group of SizeConstraintSets. For the second and + // subsequent ListSizeConstraintSets requests, specify the value of NextMarker + // from the previous response to get information about another batch of SizeConstraintSets. + NextMarker *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListSizeConstraintSetsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListSizeConstraintSetsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListSizeConstraintSetsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListSizeConstraintSetsInput"} + if s.Limit == nil { + invalidParams.Add(request.NewErrParamRequired("Limit")) + } + if s.Limit != nil && *s.Limit < 1 { + invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) + } + if s.NextMarker != nil && len(*s.NextMarker) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextMarker", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type ListSizeConstraintSetsOutput struct { + _ struct{} `type:"structure"` + + // If you have more SizeConstraintSet objects than the number that you specified + // for Limit in the request, the response includes a NextMarker value. To list + // more SizeConstraintSet objects, submit another ListSizeConstraintSets request, + // and specify the NextMarker value from the response in the NextMarker value + // in the next request. + NextMarker *string `min:"1" type:"string"` + + // An array of SizeConstraintSetSummary objects. + SizeConstraintSets []*SizeConstraintSetSummary `type:"list"` +} + +// String returns the string representation +func (s ListSizeConstraintSetsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListSizeConstraintSetsOutput) GoString() string { + return s.String() +} + +// A request to list the SqlInjectionMatchSet objects created by the current +// AWS account. +type ListSqlInjectionMatchSetsInput struct { + _ struct{} `type:"structure"` + + // Specifies the number of SqlInjectionMatchSet objects that you want AWS WAF + // to return for this request. If you have more SqlInjectionMatchSet objects + // than the number you specify for Limit, the response includes a NextMarker + // value that you can use to get another batch of Rules. + Limit *int64 `min:"1" type:"integer" required:"true"` + + // If you specify a value for Limit and you have more SqlInjectionMatchSet objects + // than the value of Limit, AWS WAF returns a NextMarker value in the response + // that allows you to list another group of SqlInjectionMatchSets. For the second + // and subsequent ListSqlInjectionMatchSets requests, specify the value of NextMarker + // from the previous response to get information about another batch of SqlInjectionMatchSets. + NextMarker *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListSqlInjectionMatchSetsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListSqlInjectionMatchSetsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListSqlInjectionMatchSetsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListSqlInjectionMatchSetsInput"} + if s.Limit == nil { + invalidParams.Add(request.NewErrParamRequired("Limit")) + } + if s.Limit != nil && *s.Limit < 1 { + invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) + } + if s.NextMarker != nil && len(*s.NextMarker) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextMarker", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The response to a ListSqlInjectionMatchSets request. +type ListSqlInjectionMatchSetsOutput struct { + _ struct{} `type:"structure"` + + // If you have more SqlInjectionMatchSet objects than the number that you specified + // for Limit in the request, the response includes a NextMarker value. To list + // more SqlInjectionMatchSet objects, submit another ListSqlInjectionMatchSets + // request, and specify the NextMarker value from the response in the NextMarker + // value in the next request. + NextMarker *string `min:"1" type:"string"` + + // An array of SqlInjectionMatchSetSummary objects. + SqlInjectionMatchSets []*SqlInjectionMatchSetSummary `type:"list"` +} + +// String returns the string representation +func (s ListSqlInjectionMatchSetsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListSqlInjectionMatchSetsOutput) GoString() string { + return s.String() +} + +type ListWebACLsInput struct { + _ struct{} `type:"structure"` + + // Specifies the number of WebACL objects that you want AWS WAF to return for + // this request. If you have more WebACL objects than the number that you specify + // for Limit, the response includes a NextMarker value that you can use to get + // another batch of WebACL objects. + Limit *int64 `min:"1" type:"integer" required:"true"` + + // If you specify a value for Limit and you have more WebACL objects than the + // number that you specify for Limit, AWS WAF returns a NextMarker value in + // the response that allows you to list another group of WebACL objects. For + // the second and subsequent ListWebACLs requests, specify the value of NextMarker + // from the previous response to get information about another batch of WebACL + // objects. + NextMarker *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListWebACLsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListWebACLsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListWebACLsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListWebACLsInput"} + if s.Limit == nil { + invalidParams.Add(request.NewErrParamRequired("Limit")) + } + if s.Limit != nil && *s.Limit < 1 { + invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) + } + if s.NextMarker != nil && len(*s.NextMarker) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextMarker", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type ListWebACLsOutput struct { + _ struct{} `type:"structure"` + + // If you have more WebACL objects than the number that you specified for Limit + // in the request, the response includes a NextMarker value. To list more WebACL + // objects, submit another ListWebACLs request, and specify the NextMarker value + // from the response in the NextMarker value in the next request. + NextMarker *string `min:"1" type:"string"` + + // An array of WebACLSummary objects. + WebACLs []*WebACLSummary `type:"list"` +} + +// String returns the string representation +func (s ListWebACLsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListWebACLsOutput) GoString() string { + return s.String() +} + +// A request to list the XssMatchSet objects created by the current AWS account. +type ListXssMatchSetsInput struct { + _ struct{} `type:"structure"` + + // Specifies the number of XssMatchSet objects that you want AWS WAF to return + // for this request. If you have more XssMatchSet objects than the number you + // specify for Limit, the response includes a NextMarker value that you can + // use to get another batch of Rules. + Limit *int64 `min:"1" type:"integer" required:"true"` + + // If you specify a value for Limit and you have more XssMatchSet objects than + // the value of Limit, AWS WAF returns a NextMarker value in the response that + // allows you to list another group of XssMatchSets. For the second and subsequent + // ListXssMatchSets requests, specify the value of NextMarker from the previous + // response to get information about another batch of XssMatchSets. + NextMarker *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListXssMatchSetsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListXssMatchSetsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListXssMatchSetsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListXssMatchSetsInput"} + if s.Limit == nil { + invalidParams.Add(request.NewErrParamRequired("Limit")) + } + if s.Limit != nil && *s.Limit < 1 { + invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) + } + if s.NextMarker != nil && len(*s.NextMarker) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextMarker", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The response to a ListXssMatchSets request. +type ListXssMatchSetsOutput struct { + _ struct{} `type:"structure"` + + // If you have more XssMatchSet objects than the number that you specified for + // Limit in the request, the response includes a NextMarker value. To list more + // XssMatchSet objects, submit another ListXssMatchSets request, and specify + // the NextMarker value from the response in the NextMarker value in the next + // request. + NextMarker *string `min:"1" type:"string"` + + // An array of XssMatchSetSummary objects. + XssMatchSets []*XssMatchSetSummary `type:"list"` +} + +// String returns the string representation +func (s ListXssMatchSetsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListXssMatchSetsOutput) GoString() string { + return s.String() +} + +// Specifies the ByteMatchSet, IPSet, SqlInjectionMatchSet, XssMatchSet, and +// SizeConstraintSet objects that you want to add to a Rule and, for each object, +// indicates whether you want to negate the settings, for example, requests +// that do NOT originate from the IP address 192.0.2.44. +type Predicate struct { + _ struct{} `type:"structure"` + + // A unique identifier for a predicate in a Rule, such as ByteMatchSetId or + // IPSetId. The ID is returned by the corresponding Create or List command. + DataId *string `min:"1" type:"string" required:"true"` + + // Set Negated to False if you want AWS WAF to allow, block, or count requests + // based on the settings in the specified ByteMatchSet, IPSet, SqlInjectionMatchSet, + // XssMatchSet, or SizeConstraintSet. For example, if an IPSet includes the + // IP address 192.0.2.44, AWS WAF will allow or block requests based on that + // IP address. + // + // Set Negated to True if you want AWS WAF to allow or block a request based + // on the negation of the settings in the ByteMatchSet, IPSet, SqlInjectionMatchSet, + // XssMatchSet, or SizeConstraintSet. For example, if an IPSet includes the + // IP address 192.0.2.44, AWS WAF will allow, block, or count requests based + // on all IP addresses except 192.0.2.44. + Negated *bool `type:"boolean" required:"true"` + + // The type of predicate in a Rule, such as ByteMatchSet or IPSet. + Type *string `type:"string" required:"true" enum:"PredicateType"` +} + +// String returns the string representation +func (s Predicate) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Predicate) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Predicate) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Predicate"} + if s.DataId == nil { + invalidParams.Add(request.NewErrParamRequired("DataId")) + } + if s.DataId != nil && len(*s.DataId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DataId", 1)) + } + if s.Negated == nil { + invalidParams.Add(request.NewErrParamRequired("Negated")) + } + if s.Type == nil { + invalidParams.Add(request.NewErrParamRequired("Type")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// A combination of ByteMatchSet, IPSet, and/or SqlInjectionMatchSet objects +// that identify the web requests that you want to allow, block, or count. For +// example, you might create a Rule that includes the following predicates: +// +// An IPSet that causes AWS WAF to search for web requests that originate +// from the IP address 192.0.2.44 A ByteMatchSet that causes AWS WAF to search +// for web requests for which the value of the User-Agent header is BadBot. +// To match the settings in this Rule, a request must originate from 192.0.2.44 +// AND include a User-Agent header for which the value is BadBot. +type Rule struct { + _ struct{} `type:"structure"` + + MetricName *string `type:"string"` + + // The friendly name or description for the Rule. You can't change the name + // of a Rule after you create it. + Name *string `min:"1" type:"string"` + + // The Predicates object contains one Predicate element for each ByteMatchSet, + // IPSet, or SqlInjectionMatchSet object that you want to include in a Rule. + Predicates []*Predicate `type:"list" required:"true"` + + // A unique identifier for a Rule. You use RuleId to get more information about + // a Rule (see GetRule), update a Rule (see UpdateRule), insert a Rule into + // a WebACL or delete a one from a WebACL (see UpdateWebACL), or delete a Rule + // from AWS WAF (see DeleteRule). + // + // RuleId is returned by CreateRule and by ListRules. + RuleId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s Rule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Rule) GoString() string { + return s.String() +} + +// Contains the identifier and the friendly name or description of the Rule. +type RuleSummary struct { + _ struct{} `type:"structure"` + + // A friendly name or description of the Rule. You can't change the name of + // a Rule after you create it. + Name *string `min:"1" type:"string" required:"true"` + + // A unique identifier for a Rule. You use RuleId to get more information about + // a Rule (see GetRule), update a Rule (see UpdateRule), insert a Rule into + // a WebACL or delete one from a WebACL (see UpdateWebACL), or delete a Rule + // from AWS WAF (see DeleteRule). + // + // RuleId is returned by CreateRule and by ListRules. + RuleId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s RuleSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RuleSummary) GoString() string { + return s.String() +} + +// Specifies a Predicate (such as an IPSet) and indicates whether you want to +// add it to a Rule or delete it from a Rule. +type RuleUpdate struct { + _ struct{} `type:"structure"` + + // Specify INSERT to add a Predicate to a Rule. Use DELETE to remove a Predicate + // from a Rule. + Action *string `type:"string" required:"true" enum:"ChangeAction"` + + // The ID of the Predicate (such as an IPSet) that you want to add to a Rule. + Predicate *Predicate `type:"structure" required:"true"` +} + +// String returns the string representation +func (s RuleUpdate) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RuleUpdate) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RuleUpdate) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RuleUpdate"} + if s.Action == nil { + invalidParams.Add(request.NewErrParamRequired("Action")) + } + if s.Predicate == nil { + invalidParams.Add(request.NewErrParamRequired("Predicate")) + } + if s.Predicate != nil { + if err := s.Predicate.Validate(); err != nil { + invalidParams.AddNested("Predicate", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The response from a GetSampledRequests request includes a SampledHTTPRequests +// complex type that appears as SampledRequests in the response syntax. SampledHTTPRequests +// contains one SampledHTTPRequest object for each web request that is returned +// by GetSampledRequests. +type SampledHTTPRequest struct { + _ struct{} `type:"structure"` + + // The action for the Rule that the request matched: ALLOW, BLOCK, or COUNT. + Action *string `type:"string"` + + // A complex type that contains detailed information about the request. + Request *HTTPRequest `type:"structure" required:"true"` + + // The time at which AWS WAF received the request from your AWS resource, in + // Unix time format (in seconds). + Timestamp *time.Time `type:"timestamp" timestampFormat:"unix"` + + // A value that indicates how one result in the response relates proportionally + // to other results in the response. A result that has a weight of 2 represents + // roughly twice as many CloudFront web requests as a result that has a weight + // of 1. + Weight *int64 `type:"long" required:"true"` +} + +// String returns the string representation +func (s SampledHTTPRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SampledHTTPRequest) GoString() string { + return s.String() +} + +// Specifies a constraint on the size of a part of the web request. AWS WAF +// uses the Size, ComparisonOperator, and FieldToMatch to build an expression +// in the form of "Size ComparisonOperator size in bytes of FieldToMatch". If +// that expression is true, the SizeConstraint is considered to match. +type SizeConstraint struct { + _ struct{} `type:"structure"` + + // The type of comparison you want AWS WAF to perform. AWS WAF uses this in + // combination with the provided Size and FieldToMatch to build an expression + // in the form of "Size ComparisonOperator size in bytes of FieldToMatch". If + // that expression is true, the SizeConstraint is considered to match. + // + // EQ: Used to test if the Size is equal to the size of the FieldToMatch + // + // NE: Used to test if the Size is not equal to the size of the FieldToMatch + // + // LE: Used to test if the Size is less than or equal to the size of the FieldToMatch + // + // LT: Used to test if the Size is strictly less than the size of the FieldToMatch + // + // GE: Used to test if the Size is greater than or equal to the size of the + // FieldToMatch + // + // GT: Used to test if the Size is strictly greater than the size of the FieldToMatch + ComparisonOperator *string `type:"string" required:"true" enum:"ComparisonOperator"` + + // Specifies where in a web request to look for TargetString. + FieldToMatch *FieldToMatch `type:"structure" required:"true"` + + // The size in bytes that you want AWS WAF to compare against the size of the + // specified FieldToMatch. AWS WAF uses this in combination with ComparisonOperator + // and FieldToMatch to build an expression in the form of "Size ComparisonOperator + // size in bytes of FieldToMatch". If that expression is true, the SizeConstraint + // is considered to match. + // + // Valid values for size are 0 - 21474836480 bytes (0 - 20 GB). + // + // If you specify URI for the value of Type, the / in the URI counts as one + // character. For example, the URI /logo.jpg is nine characters long. + Size *int64 `type:"long" required:"true"` + + // Text transformations eliminate some of the unusual formatting that attackers + // use in web requests in an effort to bypass AWS WAF. If you specify a transformation, + // AWS WAF performs the transformation on FieldToMatch before inspecting a request + // for a match. + // + // Note that if you choose BODY for the value of Type, you must choose NONE + // for TextTransformation because CloudFront forwards only the first 8192 bytes + // for inspection. + // + // NONE + // + // Specify NONE if you don't want to perform any text transformations. + // + // CMD_LINE + // + // When you're concerned that attackers are injecting an operating system command + // line command and using unusual formatting to disguise some or all of the + // command, use this option to perform the following transformations: + // + // Delete the following characters: \ " ' ^ Delete spaces before the following + // characters: / ( Replace the following characters with a space: , ; Replace + // multiple spaces with one space Convert uppercase letters (A-Z) to lowercase + // (a-z) COMPRESS_WHITE_SPACE + // + // Use this option to replace the following characters with a space character + // (decimal 32): + // + // \f, formfeed, decimal 12 \t, tab, decimal 9 \n, newline, decimal 10 \r, + // carriage return, decimal 13 \v, vertical tab, decimal 11 non-breaking space, + // decimal 160 COMPRESS_WHITE_SPACE also replaces multiple spaces with one + // space. + // + // HTML_ENTITY_DECODE + // + // Use this option to replace HTML-encoded characters with unencoded characters. + // HTML_ENTITY_DECODE performs the following operations: + // + // Replaces (ampersand)quot; with " Replaces (ampersand)nbsp; with a non-breaking + // space, decimal 160 Replaces (ampersand)lt; with a "less than" symbol Replaces + // (ampersand)gt; with > Replaces characters that are represented in hexadecimal + // format, (ampersand)#xhhhh;, with the corresponding characters Replaces characters + // that are represented in decimal format, (ampersand)#nnnn;, with the corresponding + // characters LOWERCASE + // + // Use this option to convert uppercase letters (A-Z) to lowercase (a-z). + // + // URL_DECODE + // + // Use this option to decode a URL-encoded value. + TextTransformation *string `type:"string" required:"true" enum:"TextTransformation"` +} + +// String returns the string representation +func (s SizeConstraint) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SizeConstraint) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SizeConstraint) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SizeConstraint"} + if s.ComparisonOperator == nil { + invalidParams.Add(request.NewErrParamRequired("ComparisonOperator")) + } + if s.FieldToMatch == nil { + invalidParams.Add(request.NewErrParamRequired("FieldToMatch")) + } + if s.Size == nil { + invalidParams.Add(request.NewErrParamRequired("Size")) + } + if s.TextTransformation == nil { + invalidParams.Add(request.NewErrParamRequired("TextTransformation")) + } + if s.FieldToMatch != nil { + if err := s.FieldToMatch.Validate(); err != nil { + invalidParams.AddNested("FieldToMatch", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// A complex type that contains SizeConstraint objects, which specify the parts +// of web requests that you want AWS WAF to inspect the size of. If a SizeConstraintSet +// contains more than one SizeConstraint object, a request only needs to match +// one constraint to be considered a match. +type SizeConstraintSet struct { + _ struct{} `type:"structure"` + + // The name, if any, of the SizeConstraintSet. + Name *string `min:"1" type:"string"` + + // A unique identifier for a SizeConstraintSet. You use SizeConstraintSetId + // to get information about a SizeConstraintSet (see GetSizeConstraintSet), + // update a SizeConstraintSet (see UpdateSizeConstraintSet), insert a SizeConstraintSet + // into a Rule or delete one from a Rule (see UpdateRule), and delete a SizeConstraintSet + // from AWS WAF (see DeleteSizeConstraintSet). + // + // SizeConstraintSetId is returned by CreateSizeConstraintSet and by ListSizeConstraintSets. + SizeConstraintSetId *string `min:"1" type:"string" required:"true"` + + // Specifies the parts of web requests that you want to inspect the size of. + SizeConstraints []*SizeConstraint `type:"list" required:"true"` +} + +// String returns the string representation +func (s SizeConstraintSet) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SizeConstraintSet) GoString() string { + return s.String() +} + +// The Id and Name of a SizeConstraintSet. +type SizeConstraintSetSummary struct { + _ struct{} `type:"structure"` + + // The name of the SizeConstraintSet, if any. + Name *string `min:"1" type:"string" required:"true"` + + // A unique identifier for a SizeConstraintSet. You use SizeConstraintSetId + // to get information about a SizeConstraintSet (see GetSizeConstraintSet), + // update a SizeConstraintSet (see UpdateSizeConstraintSet), insert a SizeConstraintSet + // into a Rule or delete one from a Rule (see UpdateRule), and delete a SizeConstraintSet + // from AWS WAF (see DeleteSizeConstraintSet). + // + // SizeConstraintSetId is returned by CreateSizeConstraintSet and by ListSizeConstraintSets. + SizeConstraintSetId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s SizeConstraintSetSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SizeConstraintSetSummary) GoString() string { + return s.String() +} + +// Specifies the part of a web request that you want to inspect the size of +// and indicates whether you want to add the specification to a SizeConstraintSet +// or delete it from a SizeConstraintSet. +type SizeConstraintSetUpdate struct { + _ struct{} `type:"structure"` + + // Specify INSERT to add a SizeConstraintSetUpdate to a SizeConstraintSet. Use + // DELETE to remove a SizeConstraintSetUpdate from a SizeConstraintSet. + Action *string `type:"string" required:"true" enum:"ChangeAction"` + + // Specifies a constraint on the size of a part of the web request. AWS WAF + // uses the Size, ComparisonOperator, and FieldToMatch to build an expression + // in the form of "Size ComparisonOperator size in bytes of FieldToMatch". If + // that expression is true, the SizeConstraint is considered to match. + SizeConstraint *SizeConstraint `type:"structure" required:"true"` +} + +// String returns the string representation +func (s SizeConstraintSetUpdate) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SizeConstraintSetUpdate) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SizeConstraintSetUpdate) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SizeConstraintSetUpdate"} + if s.Action == nil { + invalidParams.Add(request.NewErrParamRequired("Action")) + } + if s.SizeConstraint == nil { + invalidParams.Add(request.NewErrParamRequired("SizeConstraint")) + } + if s.SizeConstraint != nil { + if err := s.SizeConstraint.Validate(); err != nil { + invalidParams.AddNested("SizeConstraint", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// A complex type that contains SqlInjectionMatchTuple objects, which specify +// the parts of web requests that you want AWS WAF to inspect for snippets of +// malicious SQL code and, if you want AWS WAF to inspect a header, the name +// of the header. If a SqlInjectionMatchSet contains more than one SqlInjectionMatchTuple +// object, a request needs to include snippets of SQL code in only one of the +// specified parts of the request to be considered a match. +type SqlInjectionMatchSet struct { + _ struct{} `type:"structure"` + + // The name, if any, of the SqlInjectionMatchSet. + Name *string `min:"1" type:"string"` + + // A unique identifier for a SqlInjectionMatchSet. You use SqlInjectionMatchSetId + // to get information about a SqlInjectionMatchSet (see GetSqlInjectionMatchSet), + // update a SqlInjectionMatchSet (see UpdateSqlInjectionMatchSet), insert a + // SqlInjectionMatchSet into a Rule or delete one from a Rule (see UpdateRule), + // and delete a SqlInjectionMatchSet from AWS WAF (see DeleteSqlInjectionMatchSet). + // + // SqlInjectionMatchSetId is returned by CreateSqlInjectionMatchSet and by + // ListSqlInjectionMatchSets. + SqlInjectionMatchSetId *string `min:"1" type:"string" required:"true"` + + // Specifies the parts of web requests that you want to inspect for snippets + // of malicious SQL code. + SqlInjectionMatchTuples []*SqlInjectionMatchTuple `type:"list" required:"true"` +} + +// String returns the string representation +func (s SqlInjectionMatchSet) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SqlInjectionMatchSet) GoString() string { + return s.String() +} + +// The Id and Name of a SqlInjectionMatchSet. +type SqlInjectionMatchSetSummary struct { + _ struct{} `type:"structure"` + + // The name of the SqlInjectionMatchSet, if any, specified by Id. + Name *string `min:"1" type:"string" required:"true"` + + // A unique identifier for a SqlInjectionMatchSet. You use SqlInjectionMatchSetId + // to get information about a SqlInjectionMatchSet (see GetSqlInjectionMatchSet), + // update a SqlInjectionMatchSet (see UpdateSqlInjectionMatchSet), insert a + // SqlInjectionMatchSet into a Rule or delete one from a Rule (see UpdateRule), + // and delete a SqlInjectionMatchSet from AWS WAF (see DeleteSqlInjectionMatchSet). + // + // SqlInjectionMatchSetId is returned by CreateSqlInjectionMatchSet and by + // ListSqlInjectionMatchSets. + SqlInjectionMatchSetId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s SqlInjectionMatchSetSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SqlInjectionMatchSetSummary) GoString() string { + return s.String() +} + +// Specifies the part of a web request that you want to inspect for snippets +// of malicious SQL code and indicates whether you want to add the specification +// to a SqlInjectionMatchSet or delete it from a SqlInjectionMatchSet. +type SqlInjectionMatchSetUpdate struct { + _ struct{} `type:"structure"` + + // Specify INSERT to add a SqlInjectionMatchSetUpdate to a SqlInjectionMatchSet. + // Use DELETE to remove a SqlInjectionMatchSetUpdate from a SqlInjectionMatchSet. + Action *string `type:"string" required:"true" enum:"ChangeAction"` + + // Specifies the part of a web request that you want AWS WAF to inspect for + // snippets of malicious SQL code and, if you want AWS WAF to inspect a header, + // the name of the header. + SqlInjectionMatchTuple *SqlInjectionMatchTuple `type:"structure" required:"true"` +} + +// String returns the string representation +func (s SqlInjectionMatchSetUpdate) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SqlInjectionMatchSetUpdate) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SqlInjectionMatchSetUpdate) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SqlInjectionMatchSetUpdate"} + if s.Action == nil { + invalidParams.Add(request.NewErrParamRequired("Action")) + } + if s.SqlInjectionMatchTuple == nil { + invalidParams.Add(request.NewErrParamRequired("SqlInjectionMatchTuple")) + } + if s.SqlInjectionMatchTuple != nil { + if err := s.SqlInjectionMatchTuple.Validate(); err != nil { + invalidParams.AddNested("SqlInjectionMatchTuple", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Specifies the part of a web request that you want AWS WAF to inspect for +// snippets of malicious SQL code and, if you want AWS WAF to inspect a header, +// the name of the header. +type SqlInjectionMatchTuple struct { + _ struct{} `type:"structure"` + + // Specifies where in a web request to look for TargetString. + FieldToMatch *FieldToMatch `type:"structure" required:"true"` + + // Text transformations eliminate some of the unusual formatting that attackers + // use in web requests in an effort to bypass AWS WAF. If you specify a transformation, + // AWS WAF performs the transformation on FieldToMatch before inspecting a request + // for a match. + // + // CMD_LINE + // + // When you're concerned that attackers are injecting an operating system commandline + // command and using unusual formatting to disguise some or all of the command, + // use this option to perform the following transformations: + // + // Delete the following characters: \ " ' ^ Delete spaces before the following + // characters: / ( Replace the following characters with a space: , ; Replace + // multiple spaces with one space Convert uppercase letters (A-Z) to lowercase + // (a-z) COMPRESS_WHITE_SPACE + // + // Use this option to replace the following characters with a space character + // (decimal 32): + // + // \f, formfeed, decimal 12 \t, tab, decimal 9 \n, newline, decimal 10 \r, + // carriage return, decimal 13 \v, vertical tab, decimal 11 non-breaking space, + // decimal 160 COMPRESS_WHITE_SPACE also replaces multiple spaces with one + // space. + // + // HTML_ENTITY_DECODE + // + // Use this option to replace HTML-encoded characters with unencoded characters. + // HTML_ENTITY_DECODE performs the following operations: + // + // Replaces (ampersand)quot; with " Replaces (ampersand)nbsp; with a non-breaking + // space, decimal 160 Replaces (ampersand)lt; with a "less than" symbol Replaces + // (ampersand)gt; with > Replaces characters that are represented in hexadecimal + // format, (ampersand)#xhhhh;, with the corresponding characters Replaces characters + // that are represented in decimal format, (ampersand)#nnnn;, with the corresponding + // characters LOWERCASE + // + // Use this option to convert uppercase letters (A-Z) to lowercase (a-z). + // + // URL_DECODE + // + // Use this option to decode a URL-encoded value. + // + // NONE + // + // Specify NONE if you don't want to perform any text transformations. + TextTransformation *string `type:"string" required:"true" enum:"TextTransformation"` +} + +// String returns the string representation +func (s SqlInjectionMatchTuple) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SqlInjectionMatchTuple) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SqlInjectionMatchTuple) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SqlInjectionMatchTuple"} + if s.FieldToMatch == nil { + invalidParams.Add(request.NewErrParamRequired("FieldToMatch")) + } + if s.TextTransformation == nil { + invalidParams.Add(request.NewErrParamRequired("TextTransformation")) + } + if s.FieldToMatch != nil { + if err := s.FieldToMatch.Validate(); err != nil { + invalidParams.AddNested("FieldToMatch", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// In a GetSampledRequests request, the StartTime and EndTime objects specify +// the time range for which you want AWS WAF to return a sample of web requests. +// +// In a GetSampledRequests response, the StartTime and EndTime objects specify +// the time range for which AWS WAF actually returned a sample of web requests. +// AWS WAF gets the specified number of requests from among the first 5,000 +// requests that your AWS resource receives during the specified time period. +// If your resource receives more than 5,000 requests during that period, AWS +// WAF stops sampling after the 5,000th request. In that case, EndTime is the +// time that AWS WAF received the 5,000th request. +type TimeWindow struct { + _ struct{} `type:"structure"` + + // The end of the time range from which you want GetSampledRequests to return + // a sample of the requests that your AWS resource received. You can specify + // any time range in the previous three hours. + EndTime *time.Time `type:"timestamp" timestampFormat:"unix" required:"true"` + + // The beginning of the time range from which you want GetSampledRequests to + // return a sample of the requests that your AWS resource received. You can + // specify any time range in the previous three hours. + StartTime *time.Time `type:"timestamp" timestampFormat:"unix" required:"true"` +} + +// String returns the string representation +func (s TimeWindow) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TimeWindow) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TimeWindow) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TimeWindow"} + if s.EndTime == nil { + invalidParams.Add(request.NewErrParamRequired("EndTime")) + } + if s.StartTime == nil { + invalidParams.Add(request.NewErrParamRequired("StartTime")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type UpdateByteMatchSetInput struct { + _ struct{} `type:"structure"` + + // The ByteMatchSetId of the ByteMatchSet that you want to update. ByteMatchSetId + // is returned by CreateByteMatchSet and by ListByteMatchSets. + ByteMatchSetId *string `min:"1" type:"string" required:"true"` + + // The value returned by the most recent call to GetChangeToken. + ChangeToken *string `type:"string" required:"true"` + + // An array of ByteMatchSetUpdate objects that you want to insert into or delete + // from a ByteMatchSet. For more information, see the applicable data types: + // + // ByteMatchSetUpdate: Contains Action and ByteMatchTuple ByteMatchTuple: + // Contains FieldToMatch, PositionalConstraint, TargetString, and TextTransformation + // FieldToMatch: Contains Data and Type + Updates []*ByteMatchSetUpdate `type:"list" required:"true"` +} + +// String returns the string representation +func (s UpdateByteMatchSetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateByteMatchSetInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateByteMatchSetInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateByteMatchSetInput"} + if s.ByteMatchSetId == nil { + invalidParams.Add(request.NewErrParamRequired("ByteMatchSetId")) + } + if s.ByteMatchSetId != nil && len(*s.ByteMatchSetId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ByteMatchSetId", 1)) + } + if s.ChangeToken == nil { + invalidParams.Add(request.NewErrParamRequired("ChangeToken")) + } + if s.Updates == nil { + invalidParams.Add(request.NewErrParamRequired("Updates")) + } + if s.Updates != nil { + for i, v := range s.Updates { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Updates", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type UpdateByteMatchSetOutput struct { + _ struct{} `type:"structure"` + + // The ChangeToken that you used to submit the UpdateByteMatchSet request. You + // can also use this value to query the status of the request. For more information, + // see GetChangeTokenStatus. + ChangeToken *string `type:"string"` +} + +// String returns the string representation +func (s UpdateByteMatchSetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateByteMatchSetOutput) GoString() string { + return s.String() +} + +type UpdateIPSetInput struct { + _ struct{} `type:"structure"` + + // The value returned by the most recent call to GetChangeToken. + ChangeToken *string `type:"string" required:"true"` + + // The IPSetId of the IPSet that you want to update. IPSetId is returned by + // CreateIPSet and by ListIPSets. + IPSetId *string `min:"1" type:"string" required:"true"` + + // An array of IPSetUpdate objects that you want to insert into or delete from + // an IPSet. For more information, see the applicable data types: + // + // IPSetUpdate: Contains Action and IPSetDescriptor IPSetDescriptor: Contains + // Type and Value + Updates []*IPSetUpdate `type:"list" required:"true"` +} + +// String returns the string representation +func (s UpdateIPSetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateIPSetInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateIPSetInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateIPSetInput"} + if s.ChangeToken == nil { + invalidParams.Add(request.NewErrParamRequired("ChangeToken")) + } + if s.IPSetId == nil { + invalidParams.Add(request.NewErrParamRequired("IPSetId")) + } + if s.IPSetId != nil && len(*s.IPSetId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("IPSetId", 1)) + } + if s.Updates == nil { + invalidParams.Add(request.NewErrParamRequired("Updates")) + } + if s.Updates != nil { + for i, v := range s.Updates { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Updates", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type UpdateIPSetOutput struct { + _ struct{} `type:"structure"` + + // The ChangeToken that you used to submit the UpdateIPSet request. You can + // also use this value to query the status of the request. For more information, + // see GetChangeTokenStatus. + ChangeToken *string `type:"string"` +} + +// String returns the string representation +func (s UpdateIPSetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateIPSetOutput) GoString() string { + return s.String() +} + +type UpdateRuleInput struct { + _ struct{} `type:"structure"` + + // The value returned by the most recent call to GetChangeToken. + ChangeToken *string `type:"string" required:"true"` + + // The RuleId of the Rule that you want to update. RuleId is returned by CreateRule + // and by ListRules. + RuleId *string `min:"1" type:"string" required:"true"` + + // An array of RuleUpdate objects that you want to insert into or delete from + // a Rule. For more information, see the applicable data types: + // + // RuleUpdate: Contains Action and Predicate Predicate: Contains DataId, Negated, + // and Type FieldToMatch: Contains Data and Type + Updates []*RuleUpdate `type:"list" required:"true"` +} + +// String returns the string representation +func (s UpdateRuleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateRuleInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateRuleInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateRuleInput"} + if s.ChangeToken == nil { + invalidParams.Add(request.NewErrParamRequired("ChangeToken")) + } + if s.RuleId == nil { + invalidParams.Add(request.NewErrParamRequired("RuleId")) + } + if s.RuleId != nil && len(*s.RuleId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RuleId", 1)) + } + if s.Updates == nil { + invalidParams.Add(request.NewErrParamRequired("Updates")) + } + if s.Updates != nil { + for i, v := range s.Updates { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Updates", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type UpdateRuleOutput struct { + _ struct{} `type:"structure"` + + // The ChangeToken that you used to submit the UpdateRule request. You can also + // use this value to query the status of the request. For more information, + // see GetChangeTokenStatus. + ChangeToken *string `type:"string"` +} + +// String returns the string representation +func (s UpdateRuleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateRuleOutput) GoString() string { + return s.String() +} + +type UpdateSizeConstraintSetInput struct { + _ struct{} `type:"structure"` + + // The value returned by the most recent call to GetChangeToken. + ChangeToken *string `type:"string" required:"true"` + + // The SizeConstraintSetId of the SizeConstraintSet that you want to update. + // SizeConstraintSetId is returned by CreateSizeConstraintSet and by ListSizeConstraintSets. + SizeConstraintSetId *string `min:"1" type:"string" required:"true"` + + // An array of SizeConstraintSetUpdate objects that you want to insert into + // or delete from a SizeConstraintSet. For more information, see the applicable + // data types: + // + // SizeConstraintSetUpdate: Contains Action and SizeConstraint SizeConstraint: + // Contains FieldToMatch, TextTransformation, ComparisonOperator, and Size FieldToMatch: + // Contains Data and Type + Updates []*SizeConstraintSetUpdate `type:"list" required:"true"` +} + +// String returns the string representation +func (s UpdateSizeConstraintSetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateSizeConstraintSetInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateSizeConstraintSetInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateSizeConstraintSetInput"} + if s.ChangeToken == nil { + invalidParams.Add(request.NewErrParamRequired("ChangeToken")) + } + if s.SizeConstraintSetId == nil { + invalidParams.Add(request.NewErrParamRequired("SizeConstraintSetId")) + } + if s.SizeConstraintSetId != nil && len(*s.SizeConstraintSetId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("SizeConstraintSetId", 1)) + } + if s.Updates == nil { + invalidParams.Add(request.NewErrParamRequired("Updates")) + } + if s.Updates != nil { + for i, v := range s.Updates { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Updates", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type UpdateSizeConstraintSetOutput struct { + _ struct{} `type:"structure"` + + // The ChangeToken that you used to submit the UpdateSizeConstraintSet request. + // You can also use this value to query the status of the request. For more + // information, see GetChangeTokenStatus. + ChangeToken *string `type:"string"` +} + +// String returns the string representation +func (s UpdateSizeConstraintSetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateSizeConstraintSetOutput) GoString() string { + return s.String() +} + +// A request to update a SqlInjectionMatchSet. +type UpdateSqlInjectionMatchSetInput struct { + _ struct{} `type:"structure"` + + // The value returned by the most recent call to GetChangeToken. + ChangeToken *string `type:"string" required:"true"` + + // The SqlInjectionMatchSetId of the SqlInjectionMatchSet that you want to update. + // SqlInjectionMatchSetId is returned by CreateSqlInjectionMatchSet and by ListSqlInjectionMatchSets. + SqlInjectionMatchSetId *string `min:"1" type:"string" required:"true"` + + // An array of SqlInjectionMatchSetUpdate objects that you want to insert into + // or delete from a SqlInjectionMatchSet. For more information, see the applicable + // data types: + // + // SqlInjectionMatchSetUpdate: Contains Action and SqlInjectionMatchTuple + // SqlInjectionMatchTuple: Contains FieldToMatch and TextTransformation FieldToMatch: + // Contains Data and Type + Updates []*SqlInjectionMatchSetUpdate `type:"list" required:"true"` +} + +// String returns the string representation +func (s UpdateSqlInjectionMatchSetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateSqlInjectionMatchSetInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateSqlInjectionMatchSetInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateSqlInjectionMatchSetInput"} + if s.ChangeToken == nil { + invalidParams.Add(request.NewErrParamRequired("ChangeToken")) + } + if s.SqlInjectionMatchSetId == nil { + invalidParams.Add(request.NewErrParamRequired("SqlInjectionMatchSetId")) + } + if s.SqlInjectionMatchSetId != nil && len(*s.SqlInjectionMatchSetId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("SqlInjectionMatchSetId", 1)) + } + if s.Updates == nil { + invalidParams.Add(request.NewErrParamRequired("Updates")) + } + if s.Updates != nil { + for i, v := range s.Updates { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Updates", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The response to an UpdateSqlInjectionMatchSets request. +type UpdateSqlInjectionMatchSetOutput struct { + _ struct{} `type:"structure"` + + // The ChangeToken that you used to submit the UpdateSqlInjectionMatchSet request. + // You can also use this value to query the status of the request. For more + // information, see GetChangeTokenStatus. + ChangeToken *string `type:"string"` +} + +// String returns the string representation +func (s UpdateSqlInjectionMatchSetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateSqlInjectionMatchSetOutput) GoString() string { + return s.String() +} + +type UpdateWebACLInput struct { + _ struct{} `type:"structure"` + + // The value returned by the most recent call to GetChangeToken. + ChangeToken *string `type:"string" required:"true"` + + // For the action that is associated with a rule in a WebACL, specifies the + // action that you want AWS WAF to perform when a web request matches all of + // the conditions in a rule. For the default action in a WebACL, specifies the + // action that you want AWS WAF to take when a web request doesn't match all + // of the conditions in any of the rules in a WebACL. + DefaultAction *WafAction `type:"structure"` + + // An array of updates to make to the WebACL. + // + // An array of WebACLUpdate objects that you want to insert into or delete + // from a WebACL. For more information, see the applicable data types: + // + // WebACLUpdate: Contains Action and ActivatedRule ActivatedRule: Contains + // Action, Priority, and RuleId WafAction: Contains Type + Updates []*WebACLUpdate `type:"list"` + + // The WebACLId of the WebACL that you want to update. WebACLId is returned + // by CreateWebACL and by ListWebACLs. + WebACLId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateWebACLInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateWebACLInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateWebACLInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateWebACLInput"} + if s.ChangeToken == nil { + invalidParams.Add(request.NewErrParamRequired("ChangeToken")) + } + if s.WebACLId == nil { + invalidParams.Add(request.NewErrParamRequired("WebACLId")) + } + if s.WebACLId != nil && len(*s.WebACLId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("WebACLId", 1)) + } + if s.DefaultAction != nil { + if err := s.DefaultAction.Validate(); err != nil { + invalidParams.AddNested("DefaultAction", err.(request.ErrInvalidParams)) + } + } + if s.Updates != nil { + for i, v := range s.Updates { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Updates", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type UpdateWebACLOutput struct { + _ struct{} `type:"structure"` + + // The ChangeToken that you used to submit the UpdateWebACL request. You can + // also use this value to query the status of the request. For more information, + // see GetChangeTokenStatus. + ChangeToken *string `type:"string"` +} + +// String returns the string representation +func (s UpdateWebACLOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateWebACLOutput) GoString() string { + return s.String() +} + +// A request to update an XssMatchSet. +type UpdateXssMatchSetInput struct { + _ struct{} `type:"structure"` + + // The value returned by the most recent call to GetChangeToken. + ChangeToken *string `type:"string" required:"true"` + + // An array of XssMatchSetUpdate objects that you want to insert into or delete + // from a XssMatchSet. For more information, see the applicable data types: + // + // XssMatchSetUpdate: Contains Action and XssMatchTuple XssMatchTuple: Contains + // FieldToMatch and TextTransformation FieldToMatch: Contains Data and Type + Updates []*XssMatchSetUpdate `type:"list" required:"true"` + + // The XssMatchSetId of the XssMatchSet that you want to update. XssMatchSetId + // is returned by CreateXssMatchSet and by ListXssMatchSets. + XssMatchSetId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateXssMatchSetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateXssMatchSetInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateXssMatchSetInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateXssMatchSetInput"} + if s.ChangeToken == nil { + invalidParams.Add(request.NewErrParamRequired("ChangeToken")) + } + if s.Updates == nil { + invalidParams.Add(request.NewErrParamRequired("Updates")) + } + if s.XssMatchSetId == nil { + invalidParams.Add(request.NewErrParamRequired("XssMatchSetId")) + } + if s.XssMatchSetId != nil && len(*s.XssMatchSetId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("XssMatchSetId", 1)) + } + if s.Updates != nil { + for i, v := range s.Updates { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Updates", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The response to an UpdateXssMatchSets request. +type UpdateXssMatchSetOutput struct { + _ struct{} `type:"structure"` + + // The ChangeToken that you used to submit the UpdateXssMatchSet request. You + // can also use this value to query the status of the request. For more information, + // see GetChangeTokenStatus. + ChangeToken *string `type:"string"` +} + +// String returns the string representation +func (s UpdateXssMatchSetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateXssMatchSetOutput) GoString() string { + return s.String() +} + +// For the action that is associated with a rule in a WebACL, specifies the +// action that you want AWS WAF to perform when a web request matches all of +// the conditions in a rule. For the default action in a WebACL, specifies the +// action that you want AWS WAF to take when a web request doesn't match all +// of the conditions in any of the rules in a WebACL. +type WafAction struct { + _ struct{} `type:"structure"` + + // Specifies how you want AWS WAF to respond to requests that match the settings + // in a Rule. Valid settings include the following: + // + // ALLOW: AWS WAF allows requests BLOCK: AWS WAF blocks requests COUNT: AWS + // WAF increments a counter of the requests that match all of the conditions + // in the rule. AWS WAF then continues to inspect the web request based on the + // remaining rules in the web ACL. You can't specify COUNT for the default action + // for a WebACL. + Type *string `type:"string" required:"true" enum:"WafActionType"` +} + +// String returns the string representation +func (s WafAction) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s WafAction) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *WafAction) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "WafAction"} + if s.Type == nil { + invalidParams.Add(request.NewErrParamRequired("Type")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the Rules that identify the requests that you want to allow, block, +// or count. In a WebACL, you also specify a default action (ALLOW or BLOCK), +// and the action for each Rule that you add to a WebACL, for example, block +// requests from specified IP addresses or block requests from specified referrers. +// You also associate the WebACL with a CloudFront distribution to identify +// the requests that you want AWS WAF to filter. If you add more than one Rule +// to a WebACL, a request needs to match only one of the specifications to be +// allowed, blocked, or counted. For more information, see UpdateWebACL. +type WebACL struct { + _ struct{} `type:"structure"` + + // The action to perform if none of the Rules contained in the WebACL match. + // The action is specified by the WafAction object. + DefaultAction *WafAction `type:"structure" required:"true"` + + MetricName *string `type:"string"` + + // A friendly name or description of the WebACL. You can't change the name of + // a WebACL after you create it. + Name *string `min:"1" type:"string"` + + // An array that contains the action for each Rule in a WebACL, the priority + // of the Rule, and the ID of the Rule. + Rules []*ActivatedRule `type:"list" required:"true"` + + // A unique identifier for a WebACL. You use WebACLId to get information about + // a WebACL (see GetWebACL), update a WebACL (see UpdateWebACL), and delete + // a WebACL from AWS WAF (see DeleteWebACL). + // + // WebACLId is returned by CreateWebACL and by ListWebACLs. + WebACLId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s WebACL) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s WebACL) GoString() string { + return s.String() +} + +// Contains the identifier and the name or description of the WebACL. +type WebACLSummary struct { + _ struct{} `type:"structure"` + + // A friendly name or description of the WebACL. You can't change the name of + // a WebACL after you create it. + Name *string `min:"1" type:"string" required:"true"` + + // A unique identifier for a WebACL. You use WebACLId to get information about + // a WebACL (see GetWebACL), update a WebACL (see UpdateWebACL), and delete + // a WebACL from AWS WAF (see DeleteWebACL). + // + // WebACLId is returned by CreateWebACL and by ListWebACLs. + WebACLId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s WebACLSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s WebACLSummary) GoString() string { + return s.String() +} + +// Specifies whether to insert a Rule into or delete a Rule from a WebACL. +type WebACLUpdate struct { + _ struct{} `type:"structure"` + + // Specifies whether to insert a Rule into or delete a Rule from a WebACL. + Action *string `type:"string" required:"true" enum:"ChangeAction"` + + // The ActivatedRule object in an UpdateWebACL request specifies a Rule that + // you want to insert or delete, the priority of the Rule in the WebACL, and + // the action that you want AWS WAF to take when a web request matches the Rule + // (ALLOW, BLOCK, or COUNT). + // + // To specify whether to insert or delete a Rule, use the Action parameter + // in the WebACLUpdate data type. + ActivatedRule *ActivatedRule `type:"structure" required:"true"` +} + +// String returns the string representation +func (s WebACLUpdate) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s WebACLUpdate) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *WebACLUpdate) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "WebACLUpdate"} + if s.Action == nil { + invalidParams.Add(request.NewErrParamRequired("Action")) + } + if s.ActivatedRule == nil { + invalidParams.Add(request.NewErrParamRequired("ActivatedRule")) + } + if s.ActivatedRule != nil { + if err := s.ActivatedRule.Validate(); err != nil { + invalidParams.AddNested("ActivatedRule", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// A complex type that contains XssMatchTuple objects, which specify the parts +// of web requests that you want AWS WAF to inspect for cross-site scripting +// attacks and, if you want AWS WAF to inspect a header, the name of the header. +// If a XssMatchSet contains more than one XssMatchTuple object, a request needs +// to include cross-site scripting attacks in only one of the specified parts +// of the request to be considered a match. +type XssMatchSet struct { + _ struct{} `type:"structure"` + + // The name, if any, of the XssMatchSet. + Name *string `min:"1" type:"string"` + + // A unique identifier for an XssMatchSet. You use XssMatchSetId to get information + // about an XssMatchSet (see GetXssMatchSet), update an XssMatchSet (see UpdateXssMatchSet), + // insert an XssMatchSet into a Rule or delete one from a Rule (see UpdateRule), + // and delete an XssMatchSet from AWS WAF (see DeleteXssMatchSet). + // + // XssMatchSetId is returned by CreateXssMatchSet and by ListXssMatchSets. + XssMatchSetId *string `min:"1" type:"string" required:"true"` + + // Specifies the parts of web requests that you want to inspect for cross-site + // scripting attacks. + XssMatchTuples []*XssMatchTuple `type:"list" required:"true"` +} + +// String returns the string representation +func (s XssMatchSet) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s XssMatchSet) GoString() string { + return s.String() +} + +// The Id and Name of an XssMatchSet. +type XssMatchSetSummary struct { + _ struct{} `type:"structure"` + + // The name of the XssMatchSet, if any, specified by Id. + Name *string `min:"1" type:"string" required:"true"` + + // A unique identifier for an XssMatchSet. You use XssMatchSetId to get information + // about a XssMatchSet (see GetXssMatchSet), update an XssMatchSet (see UpdateXssMatchSet), + // insert an XssMatchSet into a Rule or delete one from a Rule (see UpdateRule), + // and delete an XssMatchSet from AWS WAF (see DeleteXssMatchSet). + // + // XssMatchSetId is returned by CreateXssMatchSet and by ListXssMatchSets. + XssMatchSetId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s XssMatchSetSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s XssMatchSetSummary) GoString() string { + return s.String() +} + +// Specifies the part of a web request that you want to inspect for cross-site +// scripting attacks and indicates whether you want to add the specification +// to an XssMatchSet or delete it from an XssMatchSet. +type XssMatchSetUpdate struct { + _ struct{} `type:"structure"` + + // Specify INSERT to add a XssMatchSetUpdate to an XssMatchSet. Use DELETE to + // remove a XssMatchSetUpdate from an XssMatchSet. + Action *string `type:"string" required:"true" enum:"ChangeAction"` + + // Specifies the part of a web request that you want AWS WAF to inspect for + // cross-site scripting attacks and, if you want AWS WAF to inspect a header, + // the name of the header. + XssMatchTuple *XssMatchTuple `type:"structure" required:"true"` +} + +// String returns the string representation +func (s XssMatchSetUpdate) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s XssMatchSetUpdate) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *XssMatchSetUpdate) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "XssMatchSetUpdate"} + if s.Action == nil { + invalidParams.Add(request.NewErrParamRequired("Action")) + } + if s.XssMatchTuple == nil { + invalidParams.Add(request.NewErrParamRequired("XssMatchTuple")) + } + if s.XssMatchTuple != nil { + if err := s.XssMatchTuple.Validate(); err != nil { + invalidParams.AddNested("XssMatchTuple", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Specifies the part of a web request that you want AWS WAF to inspect for +// cross-site scripting attacks and, if you want AWS WAF to inspect a header, +// the name of the header. +type XssMatchTuple struct { + _ struct{} `type:"structure"` + + // Specifies where in a web request to look for TargetString. + FieldToMatch *FieldToMatch `type:"structure" required:"true"` + + // Text transformations eliminate some of the unusual formatting that attackers + // use in web requests in an effort to bypass AWS WAF. If you specify a transformation, + // AWS WAF performs the transformation on FieldToMatch before inspecting a request + // for a match. + // + // CMD_LINE + // + // When you're concerned that attackers are injecting an operating system commandline + // command and using unusual formatting to disguise some or all of the command, + // use this option to perform the following transformations: + // + // Delete the following characters: \ " ' ^ Delete spaces before the following + // characters: / ( Replace the following characters with a space: , ; Replace + // multiple spaces with one space Convert uppercase letters (A-Z) to lowercase + // (a-z) COMPRESS_WHITE_SPACE + // + // Use this option to replace the following characters with a space character + // (decimal 32): + // + // \f, formfeed, decimal 12 \t, tab, decimal 9 \n, newline, decimal 10 \r, + // carriage return, decimal 13 \v, vertical tab, decimal 11 non-breaking space, + // decimal 160 COMPRESS_WHITE_SPACE also replaces multiple spaces with one + // space. + // + // HTML_ENTITY_DECODE + // + // Use this option to replace HTML-encoded characters with unencoded characters. + // HTML_ENTITY_DECODE performs the following operations: + // + // Replaces (ampersand)quot; with " Replaces (ampersand)nbsp; with a non-breaking + // space, decimal 160 Replaces (ampersand)lt; with a "less than" symbol Replaces + // (ampersand)gt; with > Replaces characters that are represented in hexadecimal + // format, (ampersand)#xhhhh;, with the corresponding characters Replaces characters + // that are represented in decimal format, (ampersand)#nnnn;, with the corresponding + // characters LOWERCASE + // + // Use this option to convert uppercase letters (A-Z) to lowercase (a-z). + // + // URL_DECODE + // + // Use this option to decode a URL-encoded value. + // + // NONE + // + // Specify NONE if you don't want to perform any text transformations. + TextTransformation *string `type:"string" required:"true" enum:"TextTransformation"` +} + +// String returns the string representation +func (s XssMatchTuple) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s XssMatchTuple) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *XssMatchTuple) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "XssMatchTuple"} + if s.FieldToMatch == nil { + invalidParams.Add(request.NewErrParamRequired("FieldToMatch")) + } + if s.TextTransformation == nil { + invalidParams.Add(request.NewErrParamRequired("TextTransformation")) + } + if s.FieldToMatch != nil { + if err := s.FieldToMatch.Validate(); err != nil { + invalidParams.AddNested("FieldToMatch", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +const ( + // @enum ChangeAction + ChangeActionInsert = "INSERT" + // @enum ChangeAction + ChangeActionDelete = "DELETE" +) + +const ( + // @enum ChangeTokenStatus + ChangeTokenStatusProvisioned = "PROVISIONED" + // @enum ChangeTokenStatus + ChangeTokenStatusPending = "PENDING" + // @enum ChangeTokenStatus + ChangeTokenStatusInsync = "INSYNC" +) + +const ( + // @enum ComparisonOperator + ComparisonOperatorEq = "EQ" + // @enum ComparisonOperator + ComparisonOperatorNe = "NE" + // @enum ComparisonOperator + ComparisonOperatorLe = "LE" + // @enum ComparisonOperator + ComparisonOperatorLt = "LT" + // @enum ComparisonOperator + ComparisonOperatorGe = "GE" + // @enum ComparisonOperator + ComparisonOperatorGt = "GT" +) + +const ( + // @enum IPSetDescriptorType + IPSetDescriptorTypeIpv4 = "IPV4" +) + +const ( + // @enum MatchFieldType + MatchFieldTypeUri = "URI" + // @enum MatchFieldType + MatchFieldTypeQueryString = "QUERY_STRING" + // @enum MatchFieldType + MatchFieldTypeHeader = "HEADER" + // @enum MatchFieldType + MatchFieldTypeMethod = "METHOD" + // @enum MatchFieldType + MatchFieldTypeBody = "BODY" +) + +const ( + // @enum ParameterExceptionField + ParameterExceptionFieldChangeAction = "CHANGE_ACTION" + // @enum ParameterExceptionField + ParameterExceptionFieldWafAction = "WAF_ACTION" + // @enum ParameterExceptionField + ParameterExceptionFieldPredicateType = "PREDICATE_TYPE" + // @enum ParameterExceptionField + ParameterExceptionFieldIpsetType = "IPSET_TYPE" + // @enum ParameterExceptionField + ParameterExceptionFieldByteMatchFieldType = "BYTE_MATCH_FIELD_TYPE" + // @enum ParameterExceptionField + ParameterExceptionFieldSqlInjectionMatchFieldType = "SQL_INJECTION_MATCH_FIELD_TYPE" + // @enum ParameterExceptionField + ParameterExceptionFieldByteMatchTextTransformation = "BYTE_MATCH_TEXT_TRANSFORMATION" + // @enum ParameterExceptionField + ParameterExceptionFieldByteMatchPositionalConstraint = "BYTE_MATCH_POSITIONAL_CONSTRAINT" + // @enum ParameterExceptionField + ParameterExceptionFieldSizeConstraintComparisonOperator = "SIZE_CONSTRAINT_COMPARISON_OPERATOR" +) + +const ( + // @enum ParameterExceptionReason + ParameterExceptionReasonInvalidOption = "INVALID_OPTION" + // @enum ParameterExceptionReason + ParameterExceptionReasonIllegalCombination = "ILLEGAL_COMBINATION" +) + +const ( + // @enum PositionalConstraint + PositionalConstraintExactly = "EXACTLY" + // @enum PositionalConstraint + PositionalConstraintStartsWith = "STARTS_WITH" + // @enum PositionalConstraint + PositionalConstraintEndsWith = "ENDS_WITH" + // @enum PositionalConstraint + PositionalConstraintContains = "CONTAINS" + // @enum PositionalConstraint + PositionalConstraintContainsWord = "CONTAINS_WORD" +) + +const ( + // @enum PredicateType + PredicateTypeIpmatch = "IPMatch" + // @enum PredicateType + PredicateTypeByteMatch = "ByteMatch" + // @enum PredicateType + PredicateTypeSqlInjectionMatch = "SqlInjectionMatch" + // @enum PredicateType + PredicateTypeSizeConstraint = "SizeConstraint" + // @enum PredicateType + PredicateTypeXssMatch = "XssMatch" +) + +const ( + // @enum TextTransformation + TextTransformationNone = "NONE" + // @enum TextTransformation + TextTransformationCompressWhiteSpace = "COMPRESS_WHITE_SPACE" + // @enum TextTransformation + TextTransformationHtmlEntityDecode = "HTML_ENTITY_DECODE" + // @enum TextTransformation + TextTransformationLowercase = "LOWERCASE" + // @enum TextTransformation + TextTransformationCmdLine = "CMD_LINE" + // @enum TextTransformation + TextTransformationUrlDecode = "URL_DECODE" +) + +const ( + // @enum WafActionType + WafActionTypeBlock = "BLOCK" + // @enum WafActionType + WafActionTypeAllow = "ALLOW" + // @enum WafActionType + WafActionTypeCount = "COUNT" +) diff --git a/vendor/github.com/aws/aws-sdk-go/service/waf/examples_test.go b/vendor/github.com/aws/aws-sdk-go/service/waf/examples_test.go new file mode 100644 index 000000000..d5aed3d0c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/waf/examples_test.go @@ -0,0 +1,868 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package waf_test + +import ( + "bytes" + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/waf" +) + +var _ time.Duration +var _ bytes.Buffer + +func ExampleWAF_CreateByteMatchSet() { + svc := waf.New(session.New()) + + params := &waf.CreateByteMatchSetInput{ + ChangeToken: aws.String("ChangeToken"), // Required + Name: aws.String("ResourceName"), // Required + } + resp, err := svc.CreateByteMatchSet(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleWAF_CreateIPSet() { + svc := waf.New(session.New()) + + params := &waf.CreateIPSetInput{ + ChangeToken: aws.String("ChangeToken"), // Required + Name: aws.String("ResourceName"), // Required + } + resp, err := svc.CreateIPSet(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleWAF_CreateRule() { + svc := waf.New(session.New()) + + params := &waf.CreateRuleInput{ + ChangeToken: aws.String("ChangeToken"), // Required + MetricName: aws.String("MetricName"), // Required + Name: aws.String("ResourceName"), // Required + } + resp, err := svc.CreateRule(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleWAF_CreateSizeConstraintSet() { + svc := waf.New(session.New()) + + params := &waf.CreateSizeConstraintSetInput{ + ChangeToken: aws.String("ChangeToken"), // Required + Name: aws.String("ResourceName"), // Required + } + resp, err := svc.CreateSizeConstraintSet(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleWAF_CreateSqlInjectionMatchSet() { + svc := waf.New(session.New()) + + params := &waf.CreateSqlInjectionMatchSetInput{ + ChangeToken: aws.String("ChangeToken"), // Required + Name: aws.String("ResourceName"), // Required + } + resp, err := svc.CreateSqlInjectionMatchSet(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleWAF_CreateWebACL() { + svc := waf.New(session.New()) + + params := &waf.CreateWebACLInput{ + ChangeToken: aws.String("ChangeToken"), // Required + DefaultAction: &waf.WafAction{ // Required + Type: aws.String("WafActionType"), // Required + }, + MetricName: aws.String("MetricName"), // Required + Name: aws.String("ResourceName"), // Required + } + resp, err := svc.CreateWebACL(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleWAF_CreateXssMatchSet() { + svc := waf.New(session.New()) + + params := &waf.CreateXssMatchSetInput{ + ChangeToken: aws.String("ChangeToken"), // Required + Name: aws.String("ResourceName"), // Required + } + resp, err := svc.CreateXssMatchSet(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleWAF_DeleteByteMatchSet() { + svc := waf.New(session.New()) + + params := &waf.DeleteByteMatchSetInput{ + ByteMatchSetId: aws.String("ResourceId"), // Required + ChangeToken: aws.String("ChangeToken"), // Required + } + resp, err := svc.DeleteByteMatchSet(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleWAF_DeleteIPSet() { + svc := waf.New(session.New()) + + params := &waf.DeleteIPSetInput{ + ChangeToken: aws.String("ChangeToken"), // Required + IPSetId: aws.String("ResourceId"), // Required + } + resp, err := svc.DeleteIPSet(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleWAF_DeleteRule() { + svc := waf.New(session.New()) + + params := &waf.DeleteRuleInput{ + ChangeToken: aws.String("ChangeToken"), // Required + RuleId: aws.String("ResourceId"), // Required + } + resp, err := svc.DeleteRule(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleWAF_DeleteSizeConstraintSet() { + svc := waf.New(session.New()) + + params := &waf.DeleteSizeConstraintSetInput{ + ChangeToken: aws.String("ChangeToken"), // Required + SizeConstraintSetId: aws.String("ResourceId"), // Required + } + resp, err := svc.DeleteSizeConstraintSet(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleWAF_DeleteSqlInjectionMatchSet() { + svc := waf.New(session.New()) + + params := &waf.DeleteSqlInjectionMatchSetInput{ + ChangeToken: aws.String("ChangeToken"), // Required + SqlInjectionMatchSetId: aws.String("ResourceId"), // Required + } + resp, err := svc.DeleteSqlInjectionMatchSet(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleWAF_DeleteWebACL() { + svc := waf.New(session.New()) + + params := &waf.DeleteWebACLInput{ + ChangeToken: aws.String("ChangeToken"), // Required + WebACLId: aws.String("ResourceId"), // Required + } + resp, err := svc.DeleteWebACL(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleWAF_DeleteXssMatchSet() { + svc := waf.New(session.New()) + + params := &waf.DeleteXssMatchSetInput{ + ChangeToken: aws.String("ChangeToken"), // Required + XssMatchSetId: aws.String("ResourceId"), // Required + } + resp, err := svc.DeleteXssMatchSet(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleWAF_GetByteMatchSet() { + svc := waf.New(session.New()) + + params := &waf.GetByteMatchSetInput{ + ByteMatchSetId: aws.String("ResourceId"), // Required + } + resp, err := svc.GetByteMatchSet(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleWAF_GetChangeToken() { + svc := waf.New(session.New()) + + var params *waf.GetChangeTokenInput + resp, err := svc.GetChangeToken(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleWAF_GetChangeTokenStatus() { + svc := waf.New(session.New()) + + params := &waf.GetChangeTokenStatusInput{ + ChangeToken: aws.String("ChangeToken"), // Required + } + resp, err := svc.GetChangeTokenStatus(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleWAF_GetIPSet() { + svc := waf.New(session.New()) + + params := &waf.GetIPSetInput{ + IPSetId: aws.String("ResourceId"), // Required + } + resp, err := svc.GetIPSet(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleWAF_GetRule() { + svc := waf.New(session.New()) + + params := &waf.GetRuleInput{ + RuleId: aws.String("ResourceId"), // Required + } + resp, err := svc.GetRule(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleWAF_GetSampledRequests() { + svc := waf.New(session.New()) + + params := &waf.GetSampledRequestsInput{ + MaxItems: aws.Int64(1), // Required + RuleId: aws.String("ResourceId"), // Required + TimeWindow: &waf.TimeWindow{ // Required + EndTime: aws.Time(time.Now()), // Required + StartTime: aws.Time(time.Now()), // Required + }, + WebAclId: aws.String("ResourceId"), // Required + } + resp, err := svc.GetSampledRequests(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleWAF_GetSizeConstraintSet() { + svc := waf.New(session.New()) + + params := &waf.GetSizeConstraintSetInput{ + SizeConstraintSetId: aws.String("ResourceId"), // Required + } + resp, err := svc.GetSizeConstraintSet(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleWAF_GetSqlInjectionMatchSet() { + svc := waf.New(session.New()) + + params := &waf.GetSqlInjectionMatchSetInput{ + SqlInjectionMatchSetId: aws.String("ResourceId"), // Required + } + resp, err := svc.GetSqlInjectionMatchSet(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleWAF_GetWebACL() { + svc := waf.New(session.New()) + + params := &waf.GetWebACLInput{ + WebACLId: aws.String("ResourceId"), // Required + } + resp, err := svc.GetWebACL(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleWAF_GetXssMatchSet() { + svc := waf.New(session.New()) + + params := &waf.GetXssMatchSetInput{ + XssMatchSetId: aws.String("ResourceId"), // Required + } + resp, err := svc.GetXssMatchSet(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleWAF_ListByteMatchSets() { + svc := waf.New(session.New()) + + params := &waf.ListByteMatchSetsInput{ + Limit: aws.Int64(1), // Required + NextMarker: aws.String("NextMarker"), + } + resp, err := svc.ListByteMatchSets(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleWAF_ListIPSets() { + svc := waf.New(session.New()) + + params := &waf.ListIPSetsInput{ + Limit: aws.Int64(1), // Required + NextMarker: aws.String("NextMarker"), + } + resp, err := svc.ListIPSets(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleWAF_ListRules() { + svc := waf.New(session.New()) + + params := &waf.ListRulesInput{ + Limit: aws.Int64(1), // Required + NextMarker: aws.String("NextMarker"), + } + resp, err := svc.ListRules(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleWAF_ListSizeConstraintSets() { + svc := waf.New(session.New()) + + params := &waf.ListSizeConstraintSetsInput{ + Limit: aws.Int64(1), // Required + NextMarker: aws.String("NextMarker"), + } + resp, err := svc.ListSizeConstraintSets(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleWAF_ListSqlInjectionMatchSets() { + svc := waf.New(session.New()) + + params := &waf.ListSqlInjectionMatchSetsInput{ + Limit: aws.Int64(1), // Required + NextMarker: aws.String("NextMarker"), + } + resp, err := svc.ListSqlInjectionMatchSets(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleWAF_ListWebACLs() { + svc := waf.New(session.New()) + + params := &waf.ListWebACLsInput{ + Limit: aws.Int64(1), // Required + NextMarker: aws.String("NextMarker"), + } + resp, err := svc.ListWebACLs(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleWAF_ListXssMatchSets() { + svc := waf.New(session.New()) + + params := &waf.ListXssMatchSetsInput{ + Limit: aws.Int64(1), // Required + NextMarker: aws.String("NextMarker"), + } + resp, err := svc.ListXssMatchSets(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleWAF_UpdateByteMatchSet() { + svc := waf.New(session.New()) + + params := &waf.UpdateByteMatchSetInput{ + ByteMatchSetId: aws.String("ResourceId"), // Required + ChangeToken: aws.String("ChangeToken"), // Required + Updates: []*waf.ByteMatchSetUpdate{ // Required + { // Required + Action: aws.String("ChangeAction"), // Required + ByteMatchTuple: &waf.ByteMatchTuple{ // Required + FieldToMatch: &waf.FieldToMatch{ // Required + Type: aws.String("MatchFieldType"), // Required + Data: aws.String("MatchFieldData"), + }, + PositionalConstraint: aws.String("PositionalConstraint"), // Required + TargetString: []byte("PAYLOAD"), // Required + TextTransformation: aws.String("TextTransformation"), // Required + }, + }, + // More values... + }, + } + resp, err := svc.UpdateByteMatchSet(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleWAF_UpdateIPSet() { + svc := waf.New(session.New()) + + params := &waf.UpdateIPSetInput{ + ChangeToken: aws.String("ChangeToken"), // Required + IPSetId: aws.String("ResourceId"), // Required + Updates: []*waf.IPSetUpdate{ // Required + { // Required + Action: aws.String("ChangeAction"), // Required + IPSetDescriptor: &waf.IPSetDescriptor{ // Required + Type: aws.String("IPSetDescriptorType"), // Required + Value: aws.String("IPSetDescriptorValue"), // Required + }, + }, + // More values... + }, + } + resp, err := svc.UpdateIPSet(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleWAF_UpdateRule() { + svc := waf.New(session.New()) + + params := &waf.UpdateRuleInput{ + ChangeToken: aws.String("ChangeToken"), // Required + RuleId: aws.String("ResourceId"), // Required + Updates: []*waf.RuleUpdate{ // Required + { // Required + Action: aws.String("ChangeAction"), // Required + Predicate: &waf.Predicate{ // Required + DataId: aws.String("ResourceId"), // Required + Negated: aws.Bool(true), // Required + Type: aws.String("PredicateType"), // Required + }, + }, + // More values... + }, + } + resp, err := svc.UpdateRule(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleWAF_UpdateSizeConstraintSet() { + svc := waf.New(session.New()) + + params := &waf.UpdateSizeConstraintSetInput{ + ChangeToken: aws.String("ChangeToken"), // Required + SizeConstraintSetId: aws.String("ResourceId"), // Required + Updates: []*waf.SizeConstraintSetUpdate{ // Required + { // Required + Action: aws.String("ChangeAction"), // Required + SizeConstraint: &waf.SizeConstraint{ // Required + ComparisonOperator: aws.String("ComparisonOperator"), // Required + FieldToMatch: &waf.FieldToMatch{ // Required + Type: aws.String("MatchFieldType"), // Required + Data: aws.String("MatchFieldData"), + }, + Size: aws.Int64(1), // Required + TextTransformation: aws.String("TextTransformation"), // Required + }, + }, + // More values... + }, + } + resp, err := svc.UpdateSizeConstraintSet(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleWAF_UpdateSqlInjectionMatchSet() { + svc := waf.New(session.New()) + + params := &waf.UpdateSqlInjectionMatchSetInput{ + ChangeToken: aws.String("ChangeToken"), // Required + SqlInjectionMatchSetId: aws.String("ResourceId"), // Required + Updates: []*waf.SqlInjectionMatchSetUpdate{ // Required + { // Required + Action: aws.String("ChangeAction"), // Required + SqlInjectionMatchTuple: &waf.SqlInjectionMatchTuple{ // Required + FieldToMatch: &waf.FieldToMatch{ // Required + Type: aws.String("MatchFieldType"), // Required + Data: aws.String("MatchFieldData"), + }, + TextTransformation: aws.String("TextTransformation"), // Required + }, + }, + // More values... + }, + } + resp, err := svc.UpdateSqlInjectionMatchSet(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleWAF_UpdateWebACL() { + svc := waf.New(session.New()) + + params := &waf.UpdateWebACLInput{ + ChangeToken: aws.String("ChangeToken"), // Required + WebACLId: aws.String("ResourceId"), // Required + DefaultAction: &waf.WafAction{ + Type: aws.String("WafActionType"), // Required + }, + Updates: []*waf.WebACLUpdate{ + { // Required + Action: aws.String("ChangeAction"), // Required + ActivatedRule: &waf.ActivatedRule{ // Required + Action: &waf.WafAction{ // Required + Type: aws.String("WafActionType"), // Required + }, + Priority: aws.Int64(1), // Required + RuleId: aws.String("ResourceId"), // Required + }, + }, + // More values... + }, + } + resp, err := svc.UpdateWebACL(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleWAF_UpdateXssMatchSet() { + svc := waf.New(session.New()) + + params := &waf.UpdateXssMatchSetInput{ + ChangeToken: aws.String("ChangeToken"), // Required + Updates: []*waf.XssMatchSetUpdate{ // Required + { // Required + Action: aws.String("ChangeAction"), // Required + XssMatchTuple: &waf.XssMatchTuple{ // Required + FieldToMatch: &waf.FieldToMatch{ // Required + Type: aws.String("MatchFieldType"), // Required + Data: aws.String("MatchFieldData"), + }, + TextTransformation: aws.String("TextTransformation"), // Required + }, + }, + // More values... + }, + XssMatchSetId: aws.String("ResourceId"), // Required + } + resp, err := svc.UpdateXssMatchSet(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/waf/service.go b/vendor/github.com/aws/aws-sdk-go/service/waf/service.go new file mode 100644 index 000000000..a4a780f2c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/waf/service.go @@ -0,0 +1,91 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package waf + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" +) + +// This is the AWS WAF API Reference. This guide is for developers who need +// detailed information about the AWS WAF API actions, data types, and errors. +// For detailed information about AWS WAF features and an overview of how to +// use the AWS WAF API, see the AWS WAF Developer Guide (http://docs.aws.amazon.com/waf/latest/developerguide/). +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type WAF struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "waf" + +// New creates a new instance of the WAF client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a WAF client from just a session. +// svc := waf.New(mySession) +// +// // Create a WAF client with additional configuration +// svc := waf.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *WAF { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *WAF { + svc := &WAF{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2015-08-24", + JSONVersion: "1.1", + TargetPrefix: "AWSWAF_20150824", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(jsonrpc.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a WAF operation and runs any +// custom request initialization. +func (c *WAF) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/waf/wafiface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/waf/wafiface/interface.go new file mode 100644 index 000000000..071b191ef --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/waf/wafiface/interface.go @@ -0,0 +1,166 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package wafiface provides an interface for the AWS WAF. +package wafiface + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/waf" +) + +// WAFAPI is the interface type for waf.WAF. +type WAFAPI interface { + CreateByteMatchSetRequest(*waf.CreateByteMatchSetInput) (*request.Request, *waf.CreateByteMatchSetOutput) + + CreateByteMatchSet(*waf.CreateByteMatchSetInput) (*waf.CreateByteMatchSetOutput, error) + + CreateIPSetRequest(*waf.CreateIPSetInput) (*request.Request, *waf.CreateIPSetOutput) + + CreateIPSet(*waf.CreateIPSetInput) (*waf.CreateIPSetOutput, error) + + CreateRuleRequest(*waf.CreateRuleInput) (*request.Request, *waf.CreateRuleOutput) + + CreateRule(*waf.CreateRuleInput) (*waf.CreateRuleOutput, error) + + CreateSizeConstraintSetRequest(*waf.CreateSizeConstraintSetInput) (*request.Request, *waf.CreateSizeConstraintSetOutput) + + CreateSizeConstraintSet(*waf.CreateSizeConstraintSetInput) (*waf.CreateSizeConstraintSetOutput, error) + + CreateSqlInjectionMatchSetRequest(*waf.CreateSqlInjectionMatchSetInput) (*request.Request, *waf.CreateSqlInjectionMatchSetOutput) + + CreateSqlInjectionMatchSet(*waf.CreateSqlInjectionMatchSetInput) (*waf.CreateSqlInjectionMatchSetOutput, error) + + CreateWebACLRequest(*waf.CreateWebACLInput) (*request.Request, *waf.CreateWebACLOutput) + + CreateWebACL(*waf.CreateWebACLInput) (*waf.CreateWebACLOutput, error) + + CreateXssMatchSetRequest(*waf.CreateXssMatchSetInput) (*request.Request, *waf.CreateXssMatchSetOutput) + + CreateXssMatchSet(*waf.CreateXssMatchSetInput) (*waf.CreateXssMatchSetOutput, error) + + DeleteByteMatchSetRequest(*waf.DeleteByteMatchSetInput) (*request.Request, *waf.DeleteByteMatchSetOutput) + + DeleteByteMatchSet(*waf.DeleteByteMatchSetInput) (*waf.DeleteByteMatchSetOutput, error) + + DeleteIPSetRequest(*waf.DeleteIPSetInput) (*request.Request, *waf.DeleteIPSetOutput) + + DeleteIPSet(*waf.DeleteIPSetInput) (*waf.DeleteIPSetOutput, error) + + DeleteRuleRequest(*waf.DeleteRuleInput) (*request.Request, *waf.DeleteRuleOutput) + + DeleteRule(*waf.DeleteRuleInput) (*waf.DeleteRuleOutput, error) + + DeleteSizeConstraintSetRequest(*waf.DeleteSizeConstraintSetInput) (*request.Request, *waf.DeleteSizeConstraintSetOutput) + + DeleteSizeConstraintSet(*waf.DeleteSizeConstraintSetInput) (*waf.DeleteSizeConstraintSetOutput, error) + + DeleteSqlInjectionMatchSetRequest(*waf.DeleteSqlInjectionMatchSetInput) (*request.Request, *waf.DeleteSqlInjectionMatchSetOutput) + + DeleteSqlInjectionMatchSet(*waf.DeleteSqlInjectionMatchSetInput) (*waf.DeleteSqlInjectionMatchSetOutput, error) + + DeleteWebACLRequest(*waf.DeleteWebACLInput) (*request.Request, *waf.DeleteWebACLOutput) + + DeleteWebACL(*waf.DeleteWebACLInput) (*waf.DeleteWebACLOutput, error) + + DeleteXssMatchSetRequest(*waf.DeleteXssMatchSetInput) (*request.Request, *waf.DeleteXssMatchSetOutput) + + DeleteXssMatchSet(*waf.DeleteXssMatchSetInput) (*waf.DeleteXssMatchSetOutput, error) + + GetByteMatchSetRequest(*waf.GetByteMatchSetInput) (*request.Request, *waf.GetByteMatchSetOutput) + + GetByteMatchSet(*waf.GetByteMatchSetInput) (*waf.GetByteMatchSetOutput, error) + + GetChangeTokenRequest(*waf.GetChangeTokenInput) (*request.Request, *waf.GetChangeTokenOutput) + + GetChangeToken(*waf.GetChangeTokenInput) (*waf.GetChangeTokenOutput, error) + + GetChangeTokenStatusRequest(*waf.GetChangeTokenStatusInput) (*request.Request, *waf.GetChangeTokenStatusOutput) + + GetChangeTokenStatus(*waf.GetChangeTokenStatusInput) (*waf.GetChangeTokenStatusOutput, error) + + GetIPSetRequest(*waf.GetIPSetInput) (*request.Request, *waf.GetIPSetOutput) + + GetIPSet(*waf.GetIPSetInput) (*waf.GetIPSetOutput, error) + + GetRuleRequest(*waf.GetRuleInput) (*request.Request, *waf.GetRuleOutput) + + GetRule(*waf.GetRuleInput) (*waf.GetRuleOutput, error) + + GetSampledRequestsRequest(*waf.GetSampledRequestsInput) (*request.Request, *waf.GetSampledRequestsOutput) + + GetSampledRequests(*waf.GetSampledRequestsInput) (*waf.GetSampledRequestsOutput, error) + + GetSizeConstraintSetRequest(*waf.GetSizeConstraintSetInput) (*request.Request, *waf.GetSizeConstraintSetOutput) + + GetSizeConstraintSet(*waf.GetSizeConstraintSetInput) (*waf.GetSizeConstraintSetOutput, error) + + GetSqlInjectionMatchSetRequest(*waf.GetSqlInjectionMatchSetInput) (*request.Request, *waf.GetSqlInjectionMatchSetOutput) + + GetSqlInjectionMatchSet(*waf.GetSqlInjectionMatchSetInput) (*waf.GetSqlInjectionMatchSetOutput, error) + + GetWebACLRequest(*waf.GetWebACLInput) (*request.Request, *waf.GetWebACLOutput) + + GetWebACL(*waf.GetWebACLInput) (*waf.GetWebACLOutput, error) + + GetXssMatchSetRequest(*waf.GetXssMatchSetInput) (*request.Request, *waf.GetXssMatchSetOutput) + + GetXssMatchSet(*waf.GetXssMatchSetInput) (*waf.GetXssMatchSetOutput, error) + + ListByteMatchSetsRequest(*waf.ListByteMatchSetsInput) (*request.Request, *waf.ListByteMatchSetsOutput) + + ListByteMatchSets(*waf.ListByteMatchSetsInput) (*waf.ListByteMatchSetsOutput, error) + + ListIPSetsRequest(*waf.ListIPSetsInput) (*request.Request, *waf.ListIPSetsOutput) + + ListIPSets(*waf.ListIPSetsInput) (*waf.ListIPSetsOutput, error) + + ListRulesRequest(*waf.ListRulesInput) (*request.Request, *waf.ListRulesOutput) + + ListRules(*waf.ListRulesInput) (*waf.ListRulesOutput, error) + + ListSizeConstraintSetsRequest(*waf.ListSizeConstraintSetsInput) (*request.Request, *waf.ListSizeConstraintSetsOutput) + + ListSizeConstraintSets(*waf.ListSizeConstraintSetsInput) (*waf.ListSizeConstraintSetsOutput, error) + + ListSqlInjectionMatchSetsRequest(*waf.ListSqlInjectionMatchSetsInput) (*request.Request, *waf.ListSqlInjectionMatchSetsOutput) + + ListSqlInjectionMatchSets(*waf.ListSqlInjectionMatchSetsInput) (*waf.ListSqlInjectionMatchSetsOutput, error) + + ListWebACLsRequest(*waf.ListWebACLsInput) (*request.Request, *waf.ListWebACLsOutput) + + ListWebACLs(*waf.ListWebACLsInput) (*waf.ListWebACLsOutput, error) + + ListXssMatchSetsRequest(*waf.ListXssMatchSetsInput) (*request.Request, *waf.ListXssMatchSetsOutput) + + ListXssMatchSets(*waf.ListXssMatchSetsInput) (*waf.ListXssMatchSetsOutput, error) + + UpdateByteMatchSetRequest(*waf.UpdateByteMatchSetInput) (*request.Request, *waf.UpdateByteMatchSetOutput) + + UpdateByteMatchSet(*waf.UpdateByteMatchSetInput) (*waf.UpdateByteMatchSetOutput, error) + + UpdateIPSetRequest(*waf.UpdateIPSetInput) (*request.Request, *waf.UpdateIPSetOutput) + + UpdateIPSet(*waf.UpdateIPSetInput) (*waf.UpdateIPSetOutput, error) + + UpdateRuleRequest(*waf.UpdateRuleInput) (*request.Request, *waf.UpdateRuleOutput) + + UpdateRule(*waf.UpdateRuleInput) (*waf.UpdateRuleOutput, error) + + UpdateSizeConstraintSetRequest(*waf.UpdateSizeConstraintSetInput) (*request.Request, *waf.UpdateSizeConstraintSetOutput) + + UpdateSizeConstraintSet(*waf.UpdateSizeConstraintSetInput) (*waf.UpdateSizeConstraintSetOutput, error) + + UpdateSqlInjectionMatchSetRequest(*waf.UpdateSqlInjectionMatchSetInput) (*request.Request, *waf.UpdateSqlInjectionMatchSetOutput) + + UpdateSqlInjectionMatchSet(*waf.UpdateSqlInjectionMatchSetInput) (*waf.UpdateSqlInjectionMatchSetOutput, error) + + UpdateWebACLRequest(*waf.UpdateWebACLInput) (*request.Request, *waf.UpdateWebACLOutput) + + UpdateWebACL(*waf.UpdateWebACLInput) (*waf.UpdateWebACLOutput, error) + + UpdateXssMatchSetRequest(*waf.UpdateXssMatchSetInput) (*request.Request, *waf.UpdateXssMatchSetOutput) + + UpdateXssMatchSet(*waf.UpdateXssMatchSetInput) (*waf.UpdateXssMatchSetOutput, error) +} + +var _ WAFAPI = (*waf.WAF)(nil) diff --git a/vendor/github.com/aws/aws-sdk-go/service/workspaces/api.go b/vendor/github.com/aws/aws-sdk-go/service/workspaces/api.go new file mode 100644 index 000000000..c6a6f336f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/workspaces/api.go @@ -0,0 +1,1818 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package workspaces provides a client for Amazon WorkSpaces. +package workspaces + +import ( + "fmt" + + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" +) + +const opCreateTags = "CreateTags" + +// CreateTagsRequest generates a "aws/request.Request" representing the +// client's request for the CreateTags operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateTags method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateTagsRequest method. +// req, resp := client.CreateTagsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *WorkSpaces) CreateTagsRequest(input *CreateTagsInput) (req *request.Request, output *CreateTagsOutput) { + op := &request.Operation{ + Name: opCreateTags, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateTagsInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateTagsOutput{} + req.Data = output + return +} + +// Creates tags for a WorkSpace. +func (c *WorkSpaces) CreateTags(input *CreateTagsInput) (*CreateTagsOutput, error) { + req, out := c.CreateTagsRequest(input) + err := req.Send() + return out, err +} + +const opCreateWorkspaces = "CreateWorkspaces" + +// CreateWorkspacesRequest generates a "aws/request.Request" representing the +// client's request for the CreateWorkspaces operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateWorkspaces method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateWorkspacesRequest method. +// req, resp := client.CreateWorkspacesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *WorkSpaces) CreateWorkspacesRequest(input *CreateWorkspacesInput) (req *request.Request, output *CreateWorkspacesOutput) { + op := &request.Operation{ + Name: opCreateWorkspaces, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateWorkspacesInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateWorkspacesOutput{} + req.Data = output + return +} + +// Creates one or more WorkSpaces. +// +// This operation is asynchronous and returns before the WorkSpaces are created. +func (c *WorkSpaces) CreateWorkspaces(input *CreateWorkspacesInput) (*CreateWorkspacesOutput, error) { + req, out := c.CreateWorkspacesRequest(input) + err := req.Send() + return out, err +} + +const opDeleteTags = "DeleteTags" + +// DeleteTagsRequest generates a "aws/request.Request" representing the +// client's request for the DeleteTags operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteTags method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteTagsRequest method. +// req, resp := client.DeleteTagsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *WorkSpaces) DeleteTagsRequest(input *DeleteTagsInput) (req *request.Request, output *DeleteTagsOutput) { + op := &request.Operation{ + Name: opDeleteTags, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteTagsInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteTagsOutput{} + req.Data = output + return +} + +// Deletes tags from a WorkSpace. +func (c *WorkSpaces) DeleteTags(input *DeleteTagsInput) (*DeleteTagsOutput, error) { + req, out := c.DeleteTagsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeTags = "DescribeTags" + +// DescribeTagsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeTags operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeTags method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeTagsRequest method. +// req, resp := client.DescribeTagsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *WorkSpaces) DescribeTagsRequest(input *DescribeTagsInput) (req *request.Request, output *DescribeTagsOutput) { + op := &request.Operation{ + Name: opDescribeTags, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeTagsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeTagsOutput{} + req.Data = output + return +} + +// Describes tags for a WorkSpace. +func (c *WorkSpaces) DescribeTags(input *DescribeTagsInput) (*DescribeTagsOutput, error) { + req, out := c.DescribeTagsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeWorkspaceBundles = "DescribeWorkspaceBundles" + +// DescribeWorkspaceBundlesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeWorkspaceBundles operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeWorkspaceBundles method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeWorkspaceBundlesRequest method. +// req, resp := client.DescribeWorkspaceBundlesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *WorkSpaces) DescribeWorkspaceBundlesRequest(input *DescribeWorkspaceBundlesInput) (req *request.Request, output *DescribeWorkspaceBundlesOutput) { + op := &request.Operation{ + Name: opDescribeWorkspaceBundles, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeWorkspaceBundlesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeWorkspaceBundlesOutput{} + req.Data = output + return +} + +// Obtains information about the WorkSpace bundles that are available to your +// account in the specified region. +// +// You can filter the results with either the BundleIds parameter, or the Owner +// parameter, but not both. +// +// This operation supports pagination with the use of the NextToken request +// and response parameters. If more results are available, the NextToken response +// member contains a token that you pass in the next call to this operation +// to retrieve the next set of items. +func (c *WorkSpaces) DescribeWorkspaceBundles(input *DescribeWorkspaceBundlesInput) (*DescribeWorkspaceBundlesOutput, error) { + req, out := c.DescribeWorkspaceBundlesRequest(input) + err := req.Send() + return out, err +} + +// DescribeWorkspaceBundlesPages iterates over the pages of a DescribeWorkspaceBundles operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeWorkspaceBundles method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeWorkspaceBundles operation. +// pageNum := 0 +// err := client.DescribeWorkspaceBundlesPages(params, +// func(page *DescribeWorkspaceBundlesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *WorkSpaces) DescribeWorkspaceBundlesPages(input *DescribeWorkspaceBundlesInput, fn func(p *DescribeWorkspaceBundlesOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeWorkspaceBundlesRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeWorkspaceBundlesOutput), lastPage) + }) +} + +const opDescribeWorkspaceDirectories = "DescribeWorkspaceDirectories" + +// DescribeWorkspaceDirectoriesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeWorkspaceDirectories operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeWorkspaceDirectories method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeWorkspaceDirectoriesRequest method. +// req, resp := client.DescribeWorkspaceDirectoriesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *WorkSpaces) DescribeWorkspaceDirectoriesRequest(input *DescribeWorkspaceDirectoriesInput) (req *request.Request, output *DescribeWorkspaceDirectoriesOutput) { + op := &request.Operation{ + Name: opDescribeWorkspaceDirectories, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeWorkspaceDirectoriesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeWorkspaceDirectoriesOutput{} + req.Data = output + return +} + +// Retrieves information about the AWS Directory Service directories in the +// region that are registered with Amazon WorkSpaces and are available to your +// account. +// +// This operation supports pagination with the use of the NextToken request +// and response parameters. If more results are available, the NextToken response +// member contains a token that you pass in the next call to this operation +// to retrieve the next set of items. +func (c *WorkSpaces) DescribeWorkspaceDirectories(input *DescribeWorkspaceDirectoriesInput) (*DescribeWorkspaceDirectoriesOutput, error) { + req, out := c.DescribeWorkspaceDirectoriesRequest(input) + err := req.Send() + return out, err +} + +// DescribeWorkspaceDirectoriesPages iterates over the pages of a DescribeWorkspaceDirectories operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeWorkspaceDirectories method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeWorkspaceDirectories operation. +// pageNum := 0 +// err := client.DescribeWorkspaceDirectoriesPages(params, +// func(page *DescribeWorkspaceDirectoriesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *WorkSpaces) DescribeWorkspaceDirectoriesPages(input *DescribeWorkspaceDirectoriesInput, fn func(p *DescribeWorkspaceDirectoriesOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeWorkspaceDirectoriesRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeWorkspaceDirectoriesOutput), lastPage) + }) +} + +const opDescribeWorkspaces = "DescribeWorkspaces" + +// DescribeWorkspacesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeWorkspaces operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeWorkspaces method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeWorkspacesRequest method. +// req, resp := client.DescribeWorkspacesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *WorkSpaces) DescribeWorkspacesRequest(input *DescribeWorkspacesInput) (req *request.Request, output *DescribeWorkspacesOutput) { + op := &request.Operation{ + Name: opDescribeWorkspaces, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "Limit", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeWorkspacesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeWorkspacesOutput{} + req.Data = output + return +} + +// Obtains information about the specified WorkSpaces. +// +// Only one of the filter parameters, such as BundleId, DirectoryId, or WorkspaceIds, +// can be specified at a time. +// +// This operation supports pagination with the use of the NextToken request +// and response parameters. If more results are available, the NextToken response +// member contains a token that you pass in the next call to this operation +// to retrieve the next set of items. +func (c *WorkSpaces) DescribeWorkspaces(input *DescribeWorkspacesInput) (*DescribeWorkspacesOutput, error) { + req, out := c.DescribeWorkspacesRequest(input) + err := req.Send() + return out, err +} + +// DescribeWorkspacesPages iterates over the pages of a DescribeWorkspaces operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeWorkspaces method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeWorkspaces operation. +// pageNum := 0 +// err := client.DescribeWorkspacesPages(params, +// func(page *DescribeWorkspacesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *WorkSpaces) DescribeWorkspacesPages(input *DescribeWorkspacesInput, fn func(p *DescribeWorkspacesOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeWorkspacesRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeWorkspacesOutput), lastPage) + }) +} + +const opRebootWorkspaces = "RebootWorkspaces" + +// RebootWorkspacesRequest generates a "aws/request.Request" representing the +// client's request for the RebootWorkspaces operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the RebootWorkspaces method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the RebootWorkspacesRequest method. +// req, resp := client.RebootWorkspacesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *WorkSpaces) RebootWorkspacesRequest(input *RebootWorkspacesInput) (req *request.Request, output *RebootWorkspacesOutput) { + op := &request.Operation{ + Name: opRebootWorkspaces, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RebootWorkspacesInput{} + } + + req = c.newRequest(op, input, output) + output = &RebootWorkspacesOutput{} + req.Data = output + return +} + +// Reboots the specified WorkSpaces. +// +// To be able to reboot a WorkSpace, the WorkSpace must have a State of AVAILABLE, +// IMPAIRED, or INOPERABLE. +// +// This operation is asynchronous and will return before the WorkSpaces have +// rebooted. +func (c *WorkSpaces) RebootWorkspaces(input *RebootWorkspacesInput) (*RebootWorkspacesOutput, error) { + req, out := c.RebootWorkspacesRequest(input) + err := req.Send() + return out, err +} + +const opRebuildWorkspaces = "RebuildWorkspaces" + +// RebuildWorkspacesRequest generates a "aws/request.Request" representing the +// client's request for the RebuildWorkspaces operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the RebuildWorkspaces method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the RebuildWorkspacesRequest method. +// req, resp := client.RebuildWorkspacesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *WorkSpaces) RebuildWorkspacesRequest(input *RebuildWorkspacesInput) (req *request.Request, output *RebuildWorkspacesOutput) { + op := &request.Operation{ + Name: opRebuildWorkspaces, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RebuildWorkspacesInput{} + } + + req = c.newRequest(op, input, output) + output = &RebuildWorkspacesOutput{} + req.Data = output + return +} + +// Rebuilds the specified WorkSpaces. +// +// Rebuilding a WorkSpace is a potentially destructive action that can result +// in the loss of data. Rebuilding a WorkSpace causes the following to occur: +// +// The system is restored to the image of the bundle that the WorkSpace is +// created from. Any applications that have been installed, or system settings +// that have been made since the WorkSpace was created will be lost. The data +// drive (D drive) is re-created from the last automatic snapshot taken of the +// data drive. The current contents of the data drive are overwritten. Automatic +// snapshots of the data drive are taken every 12 hours, so the snapshot can +// be as much as 12 hours old. To be able to rebuild a WorkSpace, the WorkSpace +// must have a State of AVAILABLE or ERROR. +// +// This operation is asynchronous and will return before the WorkSpaces have +// been completely rebuilt. +func (c *WorkSpaces) RebuildWorkspaces(input *RebuildWorkspacesInput) (*RebuildWorkspacesOutput, error) { + req, out := c.RebuildWorkspacesRequest(input) + err := req.Send() + return out, err +} + +const opTerminateWorkspaces = "TerminateWorkspaces" + +// TerminateWorkspacesRequest generates a "aws/request.Request" representing the +// client's request for the TerminateWorkspaces operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the TerminateWorkspaces method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the TerminateWorkspacesRequest method. +// req, resp := client.TerminateWorkspacesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *WorkSpaces) TerminateWorkspacesRequest(input *TerminateWorkspacesInput) (req *request.Request, output *TerminateWorkspacesOutput) { + op := &request.Operation{ + Name: opTerminateWorkspaces, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &TerminateWorkspacesInput{} + } + + req = c.newRequest(op, input, output) + output = &TerminateWorkspacesOutput{} + req.Data = output + return +} + +// Terminates the specified WorkSpaces. +// +// Terminating a WorkSpace is a permanent action and cannot be undone. The +// user's data is not maintained and will be destroyed. If you need to archive +// any user data, contact Amazon Web Services before terminating the WorkSpace. +// +// You can terminate a WorkSpace that is in any state except SUSPENDED. +// +// This operation is asynchronous and will return before the WorkSpaces have +// been completely terminated. +func (c *WorkSpaces) TerminateWorkspaces(input *TerminateWorkspacesInput) (*TerminateWorkspacesOutput, error) { + req, out := c.TerminateWorkspacesRequest(input) + err := req.Send() + return out, err +} + +// Contains information about the compute type of a WorkSpace bundle. +type ComputeType struct { + _ struct{} `type:"structure"` + + // The name of the compute type for the bundle. + Name *string `type:"string" enum:"Compute"` +} + +// String returns the string representation +func (s ComputeType) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ComputeType) GoString() string { + return s.String() +} + +// The request of the create tags action. +type CreateTagsInput struct { + _ struct{} `type:"structure"` + + // The resource ID of the request. + ResourceId *string `min:"1" type:"string" required:"true"` + + // The tags of the request. + Tags []*Tag `type:"list" required:"true"` +} + +// String returns the string representation +func (s CreateTagsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateTagsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateTagsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateTagsInput"} + if s.ResourceId == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceId")) + } + if s.ResourceId != nil && len(*s.ResourceId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceId", 1)) + } + if s.Tags == nil { + invalidParams.Add(request.NewErrParamRequired("Tags")) + } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The result of the create tags action. +type CreateTagsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s CreateTagsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateTagsOutput) GoString() string { + return s.String() +} + +// Contains the inputs for the CreateWorkspaces operation. +type CreateWorkspacesInput struct { + _ struct{} `type:"structure"` + + // An array of structures that specify the WorkSpaces to create. + Workspaces []*WorkspaceRequest `min:"1" type:"list" required:"true"` +} + +// String returns the string representation +func (s CreateWorkspacesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateWorkspacesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateWorkspacesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateWorkspacesInput"} + if s.Workspaces == nil { + invalidParams.Add(request.NewErrParamRequired("Workspaces")) + } + if s.Workspaces != nil && len(s.Workspaces) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Workspaces", 1)) + } + if s.Workspaces != nil { + for i, v := range s.Workspaces { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Workspaces", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the result of the CreateWorkspaces operation. +type CreateWorkspacesOutput struct { + _ struct{} `type:"structure"` + + // An array of structures that represent the WorkSpaces that could not be created. + FailedRequests []*FailedCreateWorkspaceRequest `type:"list"` + + // An array of structures that represent the WorkSpaces that were created. + // + // Because this operation is asynchronous, the identifier in WorkspaceId is + // not immediately available. If you immediately call DescribeWorkspaces with + // this identifier, no information will be returned. + PendingRequests []*Workspace `type:"list"` +} + +// String returns the string representation +func (s CreateWorkspacesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateWorkspacesOutput) GoString() string { + return s.String() +} + +// Contains default WorkSpace creation information. +type DefaultWorkspaceCreationProperties struct { + _ struct{} `type:"structure"` + + // The identifier of any custom security groups that are applied to the WorkSpaces + // when they are created. + CustomSecurityGroupId *string `type:"string"` + + // The organizational unit (OU) in the directory that the WorkSpace machine + // accounts are placed in. + DefaultOu *string `type:"string"` + + // A public IP address will be attached to all WorkSpaces that are created or + // rebuilt. + EnableInternetAccess *bool `type:"boolean"` + + // Specifies if the directory is enabled for Amazon WorkDocs. + EnableWorkDocs *bool `type:"boolean"` + + // The WorkSpace user is an administrator on the WorkSpace. + UserEnabledAsLocalAdministrator *bool `type:"boolean"` +} + +// String returns the string representation +func (s DefaultWorkspaceCreationProperties) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DefaultWorkspaceCreationProperties) GoString() string { + return s.String() +} + +// The request of the delete tags action. +type DeleteTagsInput struct { + _ struct{} `type:"structure"` + + // The resource ID of the request. + ResourceId *string `min:"1" type:"string" required:"true"` + + // The tag keys of the request. + TagKeys []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s DeleteTagsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteTagsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteTagsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteTagsInput"} + if s.ResourceId == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceId")) + } + if s.ResourceId != nil && len(*s.ResourceId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceId", 1)) + } + if s.TagKeys == nil { + invalidParams.Add(request.NewErrParamRequired("TagKeys")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The result of the delete tags action. +type DeleteTagsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteTagsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteTagsOutput) GoString() string { + return s.String() +} + +// The request of the describe tags action. +type DescribeTagsInput struct { + _ struct{} `type:"structure"` + + // The resource ID of the request. + ResourceId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeTagsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeTagsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeTagsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeTagsInput"} + if s.ResourceId == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceId")) + } + if s.ResourceId != nil && len(*s.ResourceId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The result of the describe tags action. +type DescribeTagsOutput struct { + _ struct{} `type:"structure"` + + // The list of tags. + TagList []*Tag `type:"list"` +} + +// String returns the string representation +func (s DescribeTagsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeTagsOutput) GoString() string { + return s.String() +} + +// Contains the inputs for the DescribeWorkspaceBundles operation. +type DescribeWorkspaceBundlesInput struct { + _ struct{} `type:"structure"` + + // An array of strings that contains the identifiers of the bundles to retrieve. + // This parameter cannot be combined with any other filter parameter. + BundleIds []*string `min:"1" type:"list"` + + // The NextToken value from a previous call to this operation. Pass null if + // this is the first call. + NextToken *string `min:"1" type:"string"` + + // The owner of the bundles to retrieve. This parameter cannot be combined with + // any other filter parameter. + // + // This contains one of the following values: + // + // null - Retrieves the bundles that belong to the account making the call. + // AMAZON - Retrieves the bundles that are provided by AWS. + Owner *string `type:"string"` +} + +// String returns the string representation +func (s DescribeWorkspaceBundlesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeWorkspaceBundlesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeWorkspaceBundlesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeWorkspaceBundlesInput"} + if s.BundleIds != nil && len(s.BundleIds) < 1 { + invalidParams.Add(request.NewErrParamMinLen("BundleIds", 1)) + } + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the results of the DescribeWorkspaceBundles operation. +type DescribeWorkspaceBundlesOutput struct { + _ struct{} `type:"structure"` + + // An array of structures that contain information about the bundles. + Bundles []*WorkspaceBundle `type:"list"` + + // If not null, more results are available. Pass this value for the NextToken + // parameter in a subsequent call to this operation to retrieve the next set + // of items. This token is valid for one day and must be used within that timeframe. + NextToken *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s DescribeWorkspaceBundlesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeWorkspaceBundlesOutput) GoString() string { + return s.String() +} + +// Contains the inputs for the DescribeWorkspaceDirectories operation. +type DescribeWorkspaceDirectoriesInput struct { + _ struct{} `type:"structure"` + + // An array of strings that contains the directory identifiers to retrieve information + // for. If this member is null, all directories are retrieved. + DirectoryIds []*string `min:"1" type:"list"` + + // The NextToken value from a previous call to this operation. Pass null if + // this is the first call. + NextToken *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s DescribeWorkspaceDirectoriesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeWorkspaceDirectoriesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeWorkspaceDirectoriesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeWorkspaceDirectoriesInput"} + if s.DirectoryIds != nil && len(s.DirectoryIds) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DirectoryIds", 1)) + } + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the results of the DescribeWorkspaceDirectories operation. +type DescribeWorkspaceDirectoriesOutput struct { + _ struct{} `type:"structure"` + + // An array of structures that contain information about the directories. + Directories []*WorkspaceDirectory `type:"list"` + + // If not null, more results are available. Pass this value for the NextToken + // parameter in a subsequent call to this operation to retrieve the next set + // of items. This token is valid for one day and must be used within that timeframe. + NextToken *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s DescribeWorkspaceDirectoriesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeWorkspaceDirectoriesOutput) GoString() string { + return s.String() +} + +// Contains the inputs for the DescribeWorkspaces operation. +type DescribeWorkspacesInput struct { + _ struct{} `type:"structure"` + + // The identifier of a bundle to obtain the WorkSpaces for. All WorkSpaces that + // are created from this bundle will be retrieved. This parameter cannot be + // combined with any other filter parameter. + BundleId *string `type:"string"` + + // Specifies the directory identifier to which to limit the WorkSpaces. Optionally, + // you can specify a specific directory user with the UserName parameter. This + // parameter cannot be combined with any other filter parameter. + DirectoryId *string `type:"string"` + + // The maximum number of items to return. + Limit *int64 `min:"1" type:"integer"` + + // The NextToken value from a previous call to this operation. Pass null if + // this is the first call. + NextToken *string `min:"1" type:"string"` + + // Used with the DirectoryId parameter to specify the directory user for which + // to obtain the WorkSpace. + UserName *string `min:"1" type:"string"` + + // An array of strings that contain the identifiers of the WorkSpaces for which + // to retrieve information. This parameter cannot be combined with any other + // filter parameter. + // + // Because the CreateWorkspaces operation is asynchronous, the identifier returned + // by CreateWorkspaces is not immediately available. If you immediately call + // DescribeWorkspaces with this identifier, no information will be returned. + WorkspaceIds []*string `min:"1" type:"list"` +} + +// String returns the string representation +func (s DescribeWorkspacesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeWorkspacesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeWorkspacesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeWorkspacesInput"} + if s.Limit != nil && *s.Limit < 1 { + invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) + } + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + if s.UserName != nil && len(*s.UserName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("UserName", 1)) + } + if s.WorkspaceIds != nil && len(s.WorkspaceIds) < 1 { + invalidParams.Add(request.NewErrParamMinLen("WorkspaceIds", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the results for the DescribeWorkspaces operation. +type DescribeWorkspacesOutput struct { + _ struct{} `type:"structure"` + + // If not null, more results are available. Pass this value for the NextToken + // parameter in a subsequent call to this operation to retrieve the next set + // of items. This token is valid for one day and must be used within that timeframe. + NextToken *string `min:"1" type:"string"` + + // An array of structures that contain the information about the WorkSpaces. + // + // Because the CreateWorkspaces operation is asynchronous, some of this information + // may be incomplete for a newly-created WorkSpace. + Workspaces []*Workspace `type:"list"` +} + +// String returns the string representation +func (s DescribeWorkspacesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeWorkspacesOutput) GoString() string { + return s.String() +} + +// Contains information about a WorkSpace that could not be created. +type FailedCreateWorkspaceRequest struct { + _ struct{} `type:"structure"` + + // The error code. + ErrorCode *string `type:"string"` + + // The textual error message. + ErrorMessage *string `type:"string"` + + // A WorkspaceRequest object that contains the information about the WorkSpace + // that could not be created. + WorkspaceRequest *WorkspaceRequest `type:"structure"` +} + +// String returns the string representation +func (s FailedCreateWorkspaceRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FailedCreateWorkspaceRequest) GoString() string { + return s.String() +} + +// Contains information about a WorkSpace that could not be rebooted (RebootWorkspaces), +// rebuilt (RebuildWorkspaces), or terminated (TerminateWorkspaces). +type FailedWorkspaceChangeRequest struct { + _ struct{} `type:"structure"` + + // The error code. + ErrorCode *string `type:"string"` + + // The textual error message. + ErrorMessage *string `type:"string"` + + // The identifier of the WorkSpace. + WorkspaceId *string `type:"string"` +} + +// String returns the string representation +func (s FailedWorkspaceChangeRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FailedWorkspaceChangeRequest) GoString() string { + return s.String() +} + +// Contains information used with the RebootWorkspaces operation to reboot a +// WorkSpace. +type RebootRequest struct { + _ struct{} `type:"structure"` + + // The identifier of the WorkSpace to reboot. + WorkspaceId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s RebootRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RebootRequest) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RebootRequest) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RebootRequest"} + if s.WorkspaceId == nil { + invalidParams.Add(request.NewErrParamRequired("WorkspaceId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the inputs for the RebootWorkspaces operation. +type RebootWorkspacesInput struct { + _ struct{} `type:"structure"` + + // An array of structures that specify the WorkSpaces to reboot. + RebootWorkspaceRequests []*RebootRequest `min:"1" type:"list" required:"true"` +} + +// String returns the string representation +func (s RebootWorkspacesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RebootWorkspacesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RebootWorkspacesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RebootWorkspacesInput"} + if s.RebootWorkspaceRequests == nil { + invalidParams.Add(request.NewErrParamRequired("RebootWorkspaceRequests")) + } + if s.RebootWorkspaceRequests != nil && len(s.RebootWorkspaceRequests) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RebootWorkspaceRequests", 1)) + } + if s.RebootWorkspaceRequests != nil { + for i, v := range s.RebootWorkspaceRequests { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "RebootWorkspaceRequests", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the results of the RebootWorkspaces operation. +type RebootWorkspacesOutput struct { + _ struct{} `type:"structure"` + + // An array of structures that represent any WorkSpaces that could not be rebooted. + FailedRequests []*FailedWorkspaceChangeRequest `type:"list"` +} + +// String returns the string representation +func (s RebootWorkspacesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RebootWorkspacesOutput) GoString() string { + return s.String() +} + +// Contains information used with the RebuildWorkspaces operation to rebuild +// a WorkSpace. +type RebuildRequest struct { + _ struct{} `type:"structure"` + + // The identifier of the WorkSpace to rebuild. + WorkspaceId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s RebuildRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RebuildRequest) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RebuildRequest) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RebuildRequest"} + if s.WorkspaceId == nil { + invalidParams.Add(request.NewErrParamRequired("WorkspaceId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the inputs for the RebuildWorkspaces operation. +type RebuildWorkspacesInput struct { + _ struct{} `type:"structure"` + + // An array of structures that specify the WorkSpaces to rebuild. + RebuildWorkspaceRequests []*RebuildRequest `min:"1" type:"list" required:"true"` +} + +// String returns the string representation +func (s RebuildWorkspacesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RebuildWorkspacesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RebuildWorkspacesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RebuildWorkspacesInput"} + if s.RebuildWorkspaceRequests == nil { + invalidParams.Add(request.NewErrParamRequired("RebuildWorkspaceRequests")) + } + if s.RebuildWorkspaceRequests != nil && len(s.RebuildWorkspaceRequests) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RebuildWorkspaceRequests", 1)) + } + if s.RebuildWorkspaceRequests != nil { + for i, v := range s.RebuildWorkspaceRequests { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "RebuildWorkspaceRequests", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the results of the RebuildWorkspaces operation. +type RebuildWorkspacesOutput struct { + _ struct{} `type:"structure"` + + // An array of structures that represent any WorkSpaces that could not be rebuilt. + FailedRequests []*FailedWorkspaceChangeRequest `type:"list"` +} + +// String returns the string representation +func (s RebuildWorkspacesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RebuildWorkspacesOutput) GoString() string { + return s.String() +} + +// Describes the tag of the WorkSpace. +type Tag struct { + _ struct{} `type:"structure"` + + // The key of the tag. + Key *string `min:"1" type:"string" required:"true"` + + // The value of the tag. + Value *string `type:"string"` +} + +// String returns the string representation +func (s Tag) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Tag) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Tag) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Tag"} + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains information used with the TerminateWorkspaces operation to terminate +// a WorkSpace. +type TerminateRequest struct { + _ struct{} `type:"structure"` + + // The identifier of the WorkSpace to terminate. + WorkspaceId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s TerminateRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TerminateRequest) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TerminateRequest) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TerminateRequest"} + if s.WorkspaceId == nil { + invalidParams.Add(request.NewErrParamRequired("WorkspaceId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the inputs for the TerminateWorkspaces operation. +type TerminateWorkspacesInput struct { + _ struct{} `type:"structure"` + + // An array of structures that specify the WorkSpaces to terminate. + TerminateWorkspaceRequests []*TerminateRequest `min:"1" type:"list" required:"true"` +} + +// String returns the string representation +func (s TerminateWorkspacesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TerminateWorkspacesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TerminateWorkspacesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TerminateWorkspacesInput"} + if s.TerminateWorkspaceRequests == nil { + invalidParams.Add(request.NewErrParamRequired("TerminateWorkspaceRequests")) + } + if s.TerminateWorkspaceRequests != nil && len(s.TerminateWorkspaceRequests) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TerminateWorkspaceRequests", 1)) + } + if s.TerminateWorkspaceRequests != nil { + for i, v := range s.TerminateWorkspaceRequests { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "TerminateWorkspaceRequests", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the results of the TerminateWorkspaces operation. +type TerminateWorkspacesOutput struct { + _ struct{} `type:"structure"` + + // An array of structures that represent any WorkSpaces that could not be terminated. + FailedRequests []*FailedWorkspaceChangeRequest `type:"list"` +} + +// String returns the string representation +func (s TerminateWorkspacesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TerminateWorkspacesOutput) GoString() string { + return s.String() +} + +// Contains information about the user storage for a WorkSpace bundle. +type UserStorage struct { + _ struct{} `type:"structure"` + + // The amount of user storage for the bundle. + Capacity *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s UserStorage) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UserStorage) GoString() string { + return s.String() +} + +// Contains information about a WorkSpace. +type Workspace struct { + _ struct{} `type:"structure"` + + // The identifier of the bundle that the WorkSpace was created from. + BundleId *string `type:"string"` + + // The name of the WorkSpace as seen by the operating system. + ComputerName *string `type:"string"` + + // The identifier of the AWS Directory Service directory that the WorkSpace + // belongs to. + DirectoryId *string `type:"string"` + + // If the WorkSpace could not be created, this contains the error code. + ErrorCode *string `type:"string"` + + // If the WorkSpace could not be created, this contains a textual error message + // that describes the failure. + ErrorMessage *string `type:"string"` + + // The IP address of the WorkSpace. + IpAddress *string `type:"string"` + + // Specifies whether the data stored on the root volume, or C: drive, is encrypted. + RootVolumeEncryptionEnabled *bool `type:"boolean"` + + // The operational state of the WorkSpace. + State *string `type:"string" enum:"WorkspaceState"` + + // The identifier of the subnet that the WorkSpace is in. + SubnetId *string `type:"string"` + + // The user that the WorkSpace is assigned to. + UserName *string `min:"1" type:"string"` + + // Specifies whether the data stored on the user volume, or D: drive, is encrypted. + UserVolumeEncryptionEnabled *bool `type:"boolean"` + + // The KMS key used to encrypt data stored on your WorkSpace. + VolumeEncryptionKey *string `type:"string"` + + // The identifier of the WorkSpace. + WorkspaceId *string `type:"string"` +} + +// String returns the string representation +func (s Workspace) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Workspace) GoString() string { + return s.String() +} + +// Contains information about a WorkSpace bundle. +type WorkspaceBundle struct { + _ struct{} `type:"structure"` + + // The bundle identifier. + BundleId *string `type:"string"` + + // A ComputeType object that specifies the compute type for the bundle. + ComputeType *ComputeType `type:"structure"` + + // The bundle description. + Description *string `type:"string"` + + // The name of the bundle. + Name *string `min:"1" type:"string"` + + // The owner of the bundle. This contains the owner's account identifier, or + // AMAZON if the bundle is provided by AWS. + Owner *string `type:"string"` + + // A UserStorage object that specifies the amount of user storage that the bundle + // contains. + UserStorage *UserStorage `type:"structure"` +} + +// String returns the string representation +func (s WorkspaceBundle) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s WorkspaceBundle) GoString() string { + return s.String() +} + +// Contains information about an AWS Directory Service directory for use with +// Amazon WorkSpaces. +type WorkspaceDirectory struct { + _ struct{} `type:"structure"` + + // The directory alias. + Alias *string `type:"string"` + + // The user name for the service account. + CustomerUserName *string `min:"1" type:"string"` + + // The directory identifier. + DirectoryId *string `type:"string"` + + // The name of the directory. + DirectoryName *string `type:"string"` + + // The directory type. + DirectoryType *string `type:"string" enum:"WorkspaceDirectoryType"` + + // An array of strings that contains the IP addresses of the DNS servers for + // the directory. + DnsIpAddresses []*string `type:"list"` + + // The identifier of the IAM role. This is the role that allows Amazon WorkSpaces + // to make calls to other services, such as Amazon EC2, on your behalf. + IamRoleId *string `type:"string"` + + // The registration code for the directory. This is the code that users enter + // in their Amazon WorkSpaces client application to connect to the directory. + RegistrationCode *string `min:"1" type:"string"` + + // The state of the directory's registration with Amazon WorkSpaces + State *string `type:"string" enum:"WorkspaceDirectoryState"` + + // An array of strings that contains the identifiers of the subnets used with + // the directory. + SubnetIds []*string `type:"list"` + + // A structure that specifies the default creation properties for all WorkSpaces + // in the directory. + WorkspaceCreationProperties *DefaultWorkspaceCreationProperties `type:"structure"` + + // The identifier of the security group that is assigned to new WorkSpaces. + WorkspaceSecurityGroupId *string `type:"string"` +} + +// String returns the string representation +func (s WorkspaceDirectory) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s WorkspaceDirectory) GoString() string { + return s.String() +} + +// Contains information about a WorkSpace creation request. +type WorkspaceRequest struct { + _ struct{} `type:"structure"` + + // The identifier of the bundle to create the WorkSpace from. You can use the + // DescribeWorkspaceBundles operation to obtain a list of the bundles that are + // available. + BundleId *string `type:"string" required:"true"` + + // The identifier of the AWS Directory Service directory to create the WorkSpace + // in. You can use the DescribeWorkspaceDirectories operation to obtain a list + // of the directories that are available. + DirectoryId *string `type:"string" required:"true"` + + // Specifies whether the data stored on the root volume, or C: drive, is encrypted. + RootVolumeEncryptionEnabled *bool `type:"boolean"` + + // The tags of the WorkSpace request. + Tags []*Tag `type:"list"` + + // The username that the WorkSpace is assigned to. This username must exist + // in the AWS Directory Service directory specified by the DirectoryId member. + UserName *string `min:"1" type:"string" required:"true"` + + // Specifies whether the data stored on the user volume, or D: drive, is encrypted. + UserVolumeEncryptionEnabled *bool `type:"boolean"` + + // The KMS key used to encrypt data stored on your WorkSpace. + VolumeEncryptionKey *string `type:"string"` +} + +// String returns the string representation +func (s WorkspaceRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s WorkspaceRequest) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *WorkspaceRequest) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "WorkspaceRequest"} + if s.BundleId == nil { + invalidParams.Add(request.NewErrParamRequired("BundleId")) + } + if s.DirectoryId == nil { + invalidParams.Add(request.NewErrParamRequired("DirectoryId")) + } + if s.UserName == nil { + invalidParams.Add(request.NewErrParamRequired("UserName")) + } + if s.UserName != nil && len(*s.UserName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("UserName", 1)) + } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +const ( + // @enum Compute + ComputeValue = "VALUE" + // @enum Compute + ComputeStandard = "STANDARD" + // @enum Compute + ComputePerformance = "PERFORMANCE" +) + +const ( + // @enum WorkspaceDirectoryState + WorkspaceDirectoryStateRegistering = "REGISTERING" + // @enum WorkspaceDirectoryState + WorkspaceDirectoryStateRegistered = "REGISTERED" + // @enum WorkspaceDirectoryState + WorkspaceDirectoryStateDeregistering = "DEREGISTERING" + // @enum WorkspaceDirectoryState + WorkspaceDirectoryStateDeregistered = "DEREGISTERED" + // @enum WorkspaceDirectoryState + WorkspaceDirectoryStateError = "ERROR" +) + +const ( + // @enum WorkspaceDirectoryType + WorkspaceDirectoryTypeSimpleAd = "SIMPLE_AD" + // @enum WorkspaceDirectoryType + WorkspaceDirectoryTypeAdConnector = "AD_CONNECTOR" +) + +const ( + // @enum WorkspaceState + WorkspaceStatePending = "PENDING" + // @enum WorkspaceState + WorkspaceStateAvailable = "AVAILABLE" + // @enum WorkspaceState + WorkspaceStateImpaired = "IMPAIRED" + // @enum WorkspaceState + WorkspaceStateUnhealthy = "UNHEALTHY" + // @enum WorkspaceState + WorkspaceStateRebooting = "REBOOTING" + // @enum WorkspaceState + WorkspaceStateRebuilding = "REBUILDING" + // @enum WorkspaceState + WorkspaceStateTerminating = "TERMINATING" + // @enum WorkspaceState + WorkspaceStateTerminated = "TERMINATED" + // @enum WorkspaceState + WorkspaceStateSuspended = "SUSPENDED" + // @enum WorkspaceState + WorkspaceStateError = "ERROR" +) diff --git a/vendor/github.com/aws/aws-sdk-go/service/workspaces/examples_test.go b/vendor/github.com/aws/aws-sdk-go/service/workspaces/examples_test.go new file mode 100644 index 000000000..17bbc877e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/workspaces/examples_test.go @@ -0,0 +1,266 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package workspaces_test + +import ( + "bytes" + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/workspaces" +) + +var _ time.Duration +var _ bytes.Buffer + +func ExampleWorkSpaces_CreateTags() { + svc := workspaces.New(session.New()) + + params := &workspaces.CreateTagsInput{ + ResourceId: aws.String("NonEmptyString"), // Required + Tags: []*workspaces.Tag{ // Required + { // Required + Key: aws.String("TagKey"), // Required + Value: aws.String("TagValue"), + }, + // More values... + }, + } + resp, err := svc.CreateTags(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleWorkSpaces_CreateWorkspaces() { + svc := workspaces.New(session.New()) + + params := &workspaces.CreateWorkspacesInput{ + Workspaces: []*workspaces.WorkspaceRequest{ // Required + { // Required + BundleId: aws.String("BundleId"), // Required + DirectoryId: aws.String("DirectoryId"), // Required + UserName: aws.String("UserName"), // Required + RootVolumeEncryptionEnabled: aws.Bool(true), + Tags: []*workspaces.Tag{ + { // Required + Key: aws.String("TagKey"), // Required + Value: aws.String("TagValue"), + }, + // More values... + }, + UserVolumeEncryptionEnabled: aws.Bool(true), + VolumeEncryptionKey: aws.String("VolumeEncryptionKey"), + }, + // More values... + }, + } + resp, err := svc.CreateWorkspaces(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleWorkSpaces_DeleteTags() { + svc := workspaces.New(session.New()) + + params := &workspaces.DeleteTagsInput{ + ResourceId: aws.String("NonEmptyString"), // Required + TagKeys: []*string{ // Required + aws.String("NonEmptyString"), // Required + // More values... + }, + } + resp, err := svc.DeleteTags(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleWorkSpaces_DescribeTags() { + svc := workspaces.New(session.New()) + + params := &workspaces.DescribeTagsInput{ + ResourceId: aws.String("NonEmptyString"), // Required + } + resp, err := svc.DescribeTags(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleWorkSpaces_DescribeWorkspaceBundles() { + svc := workspaces.New(session.New()) + + params := &workspaces.DescribeWorkspaceBundlesInput{ + BundleIds: []*string{ + aws.String("BundleId"), // Required + // More values... + }, + NextToken: aws.String("PaginationToken"), + Owner: aws.String("BundleOwner"), + } + resp, err := svc.DescribeWorkspaceBundles(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleWorkSpaces_DescribeWorkspaceDirectories() { + svc := workspaces.New(session.New()) + + params := &workspaces.DescribeWorkspaceDirectoriesInput{ + DirectoryIds: []*string{ + aws.String("DirectoryId"), // Required + // More values... + }, + NextToken: aws.String("PaginationToken"), + } + resp, err := svc.DescribeWorkspaceDirectories(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleWorkSpaces_DescribeWorkspaces() { + svc := workspaces.New(session.New()) + + params := &workspaces.DescribeWorkspacesInput{ + BundleId: aws.String("BundleId"), + DirectoryId: aws.String("DirectoryId"), + Limit: aws.Int64(1), + NextToken: aws.String("PaginationToken"), + UserName: aws.String("UserName"), + WorkspaceIds: []*string{ + aws.String("WorkspaceId"), // Required + // More values... + }, + } + resp, err := svc.DescribeWorkspaces(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleWorkSpaces_RebootWorkspaces() { + svc := workspaces.New(session.New()) + + params := &workspaces.RebootWorkspacesInput{ + RebootWorkspaceRequests: []*workspaces.RebootRequest{ // Required + { // Required + WorkspaceId: aws.String("WorkspaceId"), // Required + }, + // More values... + }, + } + resp, err := svc.RebootWorkspaces(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleWorkSpaces_RebuildWorkspaces() { + svc := workspaces.New(session.New()) + + params := &workspaces.RebuildWorkspacesInput{ + RebuildWorkspaceRequests: []*workspaces.RebuildRequest{ // Required + { // Required + WorkspaceId: aws.String("WorkspaceId"), // Required + }, + // More values... + }, + } + resp, err := svc.RebuildWorkspaces(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleWorkSpaces_TerminateWorkspaces() { + svc := workspaces.New(session.New()) + + params := &workspaces.TerminateWorkspacesInput{ + TerminateWorkspaceRequests: []*workspaces.TerminateRequest{ // Required + { // Required + WorkspaceId: aws.String("WorkspaceId"), // Required + }, + // More values... + }, + } + resp, err := svc.TerminateWorkspaces(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/workspaces/service.go b/vendor/github.com/aws/aws-sdk-go/service/workspaces/service.go new file mode 100644 index 000000000..1d1b5ec91 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/workspaces/service.go @@ -0,0 +1,90 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package workspaces + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" +) + +// This is the Amazon WorkSpaces API Reference. This guide provides detailed +// information about Amazon WorkSpaces operations, data types, parameters, and +// errors. +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type WorkSpaces struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "workspaces" + +// New creates a new instance of the WorkSpaces client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a WorkSpaces client from just a session. +// svc := workspaces.New(mySession) +// +// // Create a WorkSpaces client with additional configuration +// svc := workspaces.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *WorkSpaces { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *WorkSpaces { + svc := &WorkSpaces{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2015-04-08", + JSONVersion: "1.1", + TargetPrefix: "WorkspacesService", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(jsonrpc.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a WorkSpaces operation and runs any +// custom request initialization. +func (c *WorkSpaces) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/workspaces/workspacesiface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/workspaces/workspacesiface/interface.go new file mode 100644 index 000000000..16d8395e2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/workspaces/workspacesiface/interface.go @@ -0,0 +1,60 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package workspacesiface provides an interface for the Amazon WorkSpaces. +package workspacesiface + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/workspaces" +) + +// WorkSpacesAPI is the interface type for workspaces.WorkSpaces. +type WorkSpacesAPI interface { + CreateTagsRequest(*workspaces.CreateTagsInput) (*request.Request, *workspaces.CreateTagsOutput) + + CreateTags(*workspaces.CreateTagsInput) (*workspaces.CreateTagsOutput, error) + + CreateWorkspacesRequest(*workspaces.CreateWorkspacesInput) (*request.Request, *workspaces.CreateWorkspacesOutput) + + CreateWorkspaces(*workspaces.CreateWorkspacesInput) (*workspaces.CreateWorkspacesOutput, error) + + DeleteTagsRequest(*workspaces.DeleteTagsInput) (*request.Request, *workspaces.DeleteTagsOutput) + + DeleteTags(*workspaces.DeleteTagsInput) (*workspaces.DeleteTagsOutput, error) + + DescribeTagsRequest(*workspaces.DescribeTagsInput) (*request.Request, *workspaces.DescribeTagsOutput) + + DescribeTags(*workspaces.DescribeTagsInput) (*workspaces.DescribeTagsOutput, error) + + DescribeWorkspaceBundlesRequest(*workspaces.DescribeWorkspaceBundlesInput) (*request.Request, *workspaces.DescribeWorkspaceBundlesOutput) + + DescribeWorkspaceBundles(*workspaces.DescribeWorkspaceBundlesInput) (*workspaces.DescribeWorkspaceBundlesOutput, error) + + DescribeWorkspaceBundlesPages(*workspaces.DescribeWorkspaceBundlesInput, func(*workspaces.DescribeWorkspaceBundlesOutput, bool) bool) error + + DescribeWorkspaceDirectoriesRequest(*workspaces.DescribeWorkspaceDirectoriesInput) (*request.Request, *workspaces.DescribeWorkspaceDirectoriesOutput) + + DescribeWorkspaceDirectories(*workspaces.DescribeWorkspaceDirectoriesInput) (*workspaces.DescribeWorkspaceDirectoriesOutput, error) + + DescribeWorkspaceDirectoriesPages(*workspaces.DescribeWorkspaceDirectoriesInput, func(*workspaces.DescribeWorkspaceDirectoriesOutput, bool) bool) error + + DescribeWorkspacesRequest(*workspaces.DescribeWorkspacesInput) (*request.Request, *workspaces.DescribeWorkspacesOutput) + + DescribeWorkspaces(*workspaces.DescribeWorkspacesInput) (*workspaces.DescribeWorkspacesOutput, error) + + DescribeWorkspacesPages(*workspaces.DescribeWorkspacesInput, func(*workspaces.DescribeWorkspacesOutput, bool) bool) error + + RebootWorkspacesRequest(*workspaces.RebootWorkspacesInput) (*request.Request, *workspaces.RebootWorkspacesOutput) + + RebootWorkspaces(*workspaces.RebootWorkspacesInput) (*workspaces.RebootWorkspacesOutput, error) + + RebuildWorkspacesRequest(*workspaces.RebuildWorkspacesInput) (*request.Request, *workspaces.RebuildWorkspacesOutput) + + RebuildWorkspaces(*workspaces.RebuildWorkspacesInput) (*workspaces.RebuildWorkspacesOutput, error) + + TerminateWorkspacesRequest(*workspaces.TerminateWorkspacesInput) (*request.Request, *workspaces.TerminateWorkspacesOutput) + + TerminateWorkspaces(*workspaces.TerminateWorkspacesInput) (*workspaces.TerminateWorkspacesOutput, error) +} + +var _ WorkSpacesAPI = (*workspaces.WorkSpaces)(nil) diff --git a/vendor/github.com/coreos/go-semver/.travis.yml b/vendor/github.com/coreos/go-semver/.travis.yml new file mode 100644 index 000000000..05f548c9a --- /dev/null +++ b/vendor/github.com/coreos/go-semver/.travis.yml @@ -0,0 +1,8 @@ +language: go +sudo: false +go: + - 1.4 + - 1.5 + - 1.6 + - tip +script: cd semver && go test diff --git a/vendor/github.com/coreos/go-semver/LICENSE b/vendor/github.com/coreos/go-semver/LICENSE new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/vendor/github.com/coreos/go-semver/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/coreos/go-semver/README.md b/vendor/github.com/coreos/go-semver/README.md new file mode 100644 index 000000000..5bc9263cf --- /dev/null +++ b/vendor/github.com/coreos/go-semver/README.md @@ -0,0 +1,28 @@ +# go-semver - Semantic Versioning Library + +[![Build Status](https://travis-ci.org/coreos/go-semver.svg?branch=master)](https://travis-ci.org/coreos/go-semver) +[![GoDoc](https://godoc.org/github.com/coreos/go-semver/semver?status.svg)](https://godoc.org/github.com/coreos/go-semver/semver) + +go-semver is a [semantic versioning][semver] library for Go. It lets you parse +and compare two semantic version strings. + +[semver]: http://semver.org/ + +## Usage + +```go +vA := semver.New("1.2.3") +vB := semver.New("3.2.1") + +fmt.Printf("%s < %s == %t\n", vA, vB, vA.LessThan(*vB)) +``` + +## Example Application + +``` +$ go run example.go 1.2.3 3.2.1 +1.2.3 < 3.2.1 == true + +$ go run example.go 5.2.3 3.2.1 +5.2.3 < 3.2.1 == false +``` diff --git a/vendor/github.com/coreos/go-semver/example.go b/vendor/github.com/coreos/go-semver/example.go new file mode 100644 index 000000000..fd2ee5af2 --- /dev/null +++ b/vendor/github.com/coreos/go-semver/example.go @@ -0,0 +1,20 @@ +package main + +import ( + "fmt" + "github.com/coreos/go-semver/semver" + "os" +) + +func main() { + vA, err := semver.NewVersion(os.Args[1]) + if err != nil { + fmt.Println(err.Error()) + } + vB, err := semver.NewVersion(os.Args[2]) + if err != nil { + fmt.Println(err.Error()) + } + + fmt.Printf("%s < %s == %t\n", vA, vB, vA.LessThan(*vB)) +} diff --git a/vendor/github.com/coreos/go-semver/semver/semver.go b/vendor/github.com/coreos/go-semver/semver/semver.go new file mode 100644 index 000000000..76cf4852c --- /dev/null +++ b/vendor/github.com/coreos/go-semver/semver/semver.go @@ -0,0 +1,296 @@ +// Copyright 2013-2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Semantic Versions http://semver.org +package semver + +import ( + "bytes" + "errors" + "fmt" + "regexp" + "strconv" + "strings" +) + +type Version struct { + Major int64 + Minor int64 + Patch int64 + PreRelease PreRelease + Metadata string +} + +type PreRelease string + +func splitOff(input *string, delim string) (val string) { + parts := strings.SplitN(*input, delim, 2) + + if len(parts) == 2 { + *input = parts[0] + val = parts[1] + } + + return val +} + +func New(version string) *Version { + return Must(NewVersion(version)) +} + +func NewVersion(version string) (*Version, error) { + v := Version{} + + if err := v.Set(version); err != nil { + return nil, err + } + + return &v, nil +} + +// Must is a helper for wrapping NewVersion and will panic if err is not nil. +func Must(v *Version, err error) *Version { + if err != nil { + panic(err) + } + return v +} + +// Set parses and updates v from the given version string. Implements flag.Value +func (v *Version) Set(version string) error { + metadata := splitOff(&version, "+") + preRelease := PreRelease(splitOff(&version, "-")) + dotParts := strings.SplitN(version, ".", 3) + + if len(dotParts) != 3 { + return fmt.Errorf("%s is not in dotted-tri format", version) + } + + if err := validateIdentifier(string(preRelease)); err != nil { + return fmt.Errorf("failed to validate pre-release: %v", err) + } + + if err := validateIdentifier(metadata); err != nil { + return fmt.Errorf("failed to validate metadata: %v", err) + } + + parsed := make([]int64, 3, 3) + + for i, v := range dotParts[:3] { + val, err := strconv.ParseInt(v, 10, 64) + parsed[i] = val + if err != nil { + return err + } + } + + v.Metadata = metadata + v.PreRelease = preRelease + v.Major = parsed[0] + v.Minor = parsed[1] + v.Patch = parsed[2] + return nil +} + +func (v Version) String() string { + var buffer bytes.Buffer + + fmt.Fprintf(&buffer, "%d.%d.%d", v.Major, v.Minor, v.Patch) + + if v.PreRelease != "" { + fmt.Fprintf(&buffer, "-%s", v.PreRelease) + } + + if v.Metadata != "" { + fmt.Fprintf(&buffer, "+%s", v.Metadata) + } + + return buffer.String() +} + +func (v *Version) UnmarshalYAML(unmarshal func(interface{}) error) error { + var data string + if err := unmarshal(&data); err != nil { + return err + } + return v.Set(data) +} + +func (v Version) MarshalJSON() ([]byte, error) { + return []byte(`"` + v.String() + `"`), nil +} + +func (v *Version) UnmarshalJSON(data []byte) error { + l := len(data) + if l == 0 || string(data) == `""` { + return nil + } + if l < 2 || data[0] != '"' || data[l-1] != '"' { + return errors.New("invalid semver string") + } + return v.Set(string(data[1 : l-1])) +} + +// Compare tests if v is less than, equal to, or greater than versionB, +// returning -1, 0, or +1 respectively. +func (v Version) Compare(versionB Version) int { + if cmp := recursiveCompare(v.Slice(), versionB.Slice()); cmp != 0 { + return cmp + } + return preReleaseCompare(v, versionB) +} + +// Equal tests if v is equal to versionB. +func (v Version) Equal(versionB Version) bool { + return v.Compare(versionB) == 0 +} + +// LessThan tests if v is less than versionB. +func (v Version) LessThan(versionB Version) bool { + return v.Compare(versionB) < 0 +} + +// Slice converts the comparable parts of the semver into a slice of integers. +func (v Version) Slice() []int64 { + return []int64{v.Major, v.Minor, v.Patch} +} + +func (p PreRelease) Slice() []string { + preRelease := string(p) + return strings.Split(preRelease, ".") +} + +func preReleaseCompare(versionA Version, versionB Version) int { + a := versionA.PreRelease + b := versionB.PreRelease + + /* Handle the case where if two versions are otherwise equal it is the + * one without a PreRelease that is greater */ + if len(a) == 0 && (len(b) > 0) { + return 1 + } else if len(b) == 0 && (len(a) > 0) { + return -1 + } + + // If there is a prerelease, check and compare each part. + return recursivePreReleaseCompare(a.Slice(), b.Slice()) +} + +func recursiveCompare(versionA []int64, versionB []int64) int { + if len(versionA) == 0 { + return 0 + } + + a := versionA[0] + b := versionB[0] + + if a > b { + return 1 + } else if a < b { + return -1 + } + + return recursiveCompare(versionA[1:], versionB[1:]) +} + +func recursivePreReleaseCompare(versionA []string, versionB []string) int { + // A larger set of pre-release fields has a higher precedence than a smaller set, + // if all of the preceding identifiers are equal. + if len(versionA) == 0 { + if len(versionB) > 0 { + return -1 + } + return 0 + } else if len(versionB) == 0 { + // We're longer than versionB so return 1. + return 1 + } + + a := versionA[0] + b := versionB[0] + + aInt := false + bInt := false + + aI, err := strconv.Atoi(versionA[0]) + if err == nil { + aInt = true + } + + bI, err := strconv.Atoi(versionB[0]) + if err == nil { + bInt = true + } + + // Numeric identifiers always have lower precedence than non-numeric identifiers. + if aInt && !bInt { + return -1 + } else if !aInt && bInt { + return 1 + } + + // Handle Integer Comparison + if aInt && bInt { + if aI > bI { + return 1 + } else if aI < bI { + return -1 + } + } + + // Handle String Comparison + if a > b { + return 1 + } else if a < b { + return -1 + } + + return recursivePreReleaseCompare(versionA[1:], versionB[1:]) +} + +// BumpMajor increments the Major field by 1 and resets all other fields to their default values +func (v *Version) BumpMajor() { + v.Major += 1 + v.Minor = 0 + v.Patch = 0 + v.PreRelease = PreRelease("") + v.Metadata = "" +} + +// BumpMinor increments the Minor field by 1 and resets all other fields to their default values +func (v *Version) BumpMinor() { + v.Minor += 1 + v.Patch = 0 + v.PreRelease = PreRelease("") + v.Metadata = "" +} + +// BumpPatch increments the Patch field by 1 and resets all other fields to their default values +func (v *Version) BumpPatch() { + v.Patch += 1 + v.PreRelease = PreRelease("") + v.Metadata = "" +} + +// validateIdentifier makes sure the provided identifier satisfies semver spec +func validateIdentifier(id string) error { + if id != "" && !reIdentifier.MatchString(id) { + return fmt.Errorf("%s is not a valid semver identifier", id) + } + return nil +} + +// reIdentifier is a regular expression used to check that pre-release and metadata +// identifiers satisfy the spec requirements +var reIdentifier = regexp.MustCompile(`^[0-9A-Za-z-]+(\.[0-9A-Za-z-]+)*$`) diff --git a/vendor/github.com/coreos/go-semver/semver/semver_test.go b/vendor/github.com/coreos/go-semver/semver/semver_test.go new file mode 100644 index 000000000..3abcab2a5 --- /dev/null +++ b/vendor/github.com/coreos/go-semver/semver/semver_test.go @@ -0,0 +1,373 @@ +// Copyright 2013-2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package semver + +import ( + "bytes" + "encoding/json" + "errors" + "flag" + "fmt" + "math/rand" + "reflect" + "testing" + "time" + + "gopkg.in/yaml.v2" +) + +type fixture struct { + GreaterVersion string + LesserVersion string +} + +var fixtures = []fixture{ + fixture{"0.0.0", "0.0.0-foo"}, + fixture{"0.0.1", "0.0.0"}, + fixture{"1.0.0", "0.9.9"}, + fixture{"0.10.0", "0.9.0"}, + fixture{"0.99.0", "0.10.0"}, + fixture{"2.0.0", "1.2.3"}, + fixture{"0.0.0", "0.0.0-foo"}, + fixture{"0.0.1", "0.0.0"}, + fixture{"1.0.0", "0.9.9"}, + fixture{"0.10.0", "0.9.0"}, + fixture{"0.99.0", "0.10.0"}, + fixture{"2.0.0", "1.2.3"}, + fixture{"0.0.0", "0.0.0-foo"}, + fixture{"0.0.1", "0.0.0"}, + fixture{"1.0.0", "0.9.9"}, + fixture{"0.10.0", "0.9.0"}, + fixture{"0.99.0", "0.10.0"}, + fixture{"2.0.0", "1.2.3"}, + fixture{"1.2.3", "1.2.3-asdf"}, + fixture{"1.2.3", "1.2.3-4"}, + fixture{"1.2.3", "1.2.3-4-foo"}, + fixture{"1.2.3-5-foo", "1.2.3-5"}, + fixture{"1.2.3-5", "1.2.3-4"}, + fixture{"1.2.3-5-foo", "1.2.3-5-Foo"}, + fixture{"3.0.0", "2.7.2+asdf"}, + fixture{"3.0.0+foobar", "2.7.2"}, + fixture{"1.2.3-a.10", "1.2.3-a.5"}, + fixture{"1.2.3-a.b", "1.2.3-a.5"}, + fixture{"1.2.3-a.b", "1.2.3-a"}, + fixture{"1.2.3-a.b.c.10.d.5", "1.2.3-a.b.c.5.d.100"}, + fixture{"1.0.0", "1.0.0-rc.1"}, + fixture{"1.0.0-rc.2", "1.0.0-rc.1"}, + fixture{"1.0.0-rc.1", "1.0.0-beta.11"}, + fixture{"1.0.0-beta.11", "1.0.0-beta.2"}, + fixture{"1.0.0-beta.2", "1.0.0-beta"}, + fixture{"1.0.0-beta", "1.0.0-alpha.beta"}, + fixture{"1.0.0-alpha.beta", "1.0.0-alpha.1"}, + fixture{"1.0.0-alpha.1", "1.0.0-alpha"}, + fixture{"1.2.3-rc.1-1-1hash", "1.2.3-rc.2"}, +} + +func TestCompare(t *testing.T) { + for _, v := range fixtures { + gt, err := NewVersion(v.GreaterVersion) + if err != nil { + t.Error(err) + } + + lt, err := NewVersion(v.LesserVersion) + if err != nil { + t.Error(err) + } + + if gt.LessThan(*lt) { + t.Errorf("%s should not be less than %s", gt, lt) + } + if gt.Equal(*lt) { + t.Errorf("%s should not be equal to %s", gt, lt) + } + if gt.Compare(*lt) <= 0 { + t.Errorf("%s should be greater than %s", gt, lt) + } + if !lt.LessThan(*gt) { + t.Errorf("%s should be less than %s", lt, gt) + } + if !lt.Equal(*lt) { + t.Errorf("%s should be equal to %s", lt, lt) + } + if lt.Compare(*gt) > 0 { + t.Errorf("%s should not be greater than %s", lt, gt) + } + } +} + +func testString(t *testing.T, orig string, version *Version) { + if orig != version.String() { + t.Errorf("%s != %s", orig, version) + } +} + +func TestString(t *testing.T) { + for _, v := range fixtures { + gt, err := NewVersion(v.GreaterVersion) + if err != nil { + t.Error(err) + } + testString(t, v.GreaterVersion, gt) + + lt, err := NewVersion(v.LesserVersion) + if err != nil { + t.Error(err) + } + testString(t, v.LesserVersion, lt) + } +} + +func shuffleStringSlice(src []string) []string { + dest := make([]string, len(src)) + rand.Seed(time.Now().Unix()) + perm := rand.Perm(len(src)) + for i, v := range perm { + dest[v] = src[i] + } + return dest +} + +func TestSort(t *testing.T) { + sortedVersions := []string{"1.0.0", "1.0.2", "1.2.0", "3.1.1"} + unsortedVersions := shuffleStringSlice(sortedVersions) + + semvers := []*Version{} + for _, v := range unsortedVersions { + sv, err := NewVersion(v) + if err != nil { + t.Fatal(err) + } + semvers = append(semvers, sv) + } + + Sort(semvers) + + for idx, sv := range semvers { + if sv.String() != sortedVersions[idx] { + t.Fatalf("incorrect sort at index %v", idx) + } + } +} + +func TestBumpMajor(t *testing.T) { + version, _ := NewVersion("1.0.0") + version.BumpMajor() + if version.Major != 2 { + t.Fatalf("bumping major on 1.0.0 resulted in %v", version) + } + + version, _ = NewVersion("1.5.2") + version.BumpMajor() + if version.Minor != 0 && version.Patch != 0 { + t.Fatalf("bumping major on 1.5.2 resulted in %v", version) + } + + version, _ = NewVersion("1.0.0+build.1-alpha.1") + version.BumpMajor() + if version.PreRelease != "" && version.Metadata != "" { + t.Fatalf("bumping major on 1.0.0+build.1-alpha.1 resulted in %v", version) + } +} + +func TestBumpMinor(t *testing.T) { + version, _ := NewVersion("1.0.0") + version.BumpMinor() + + if version.Major != 1 { + t.Fatalf("bumping minor on 1.0.0 resulted in %v", version) + } + + if version.Minor != 1 { + t.Fatalf("bumping major on 1.0.0 resulted in %v", version) + } + + version, _ = NewVersion("1.0.0+build.1-alpha.1") + version.BumpMinor() + if version.PreRelease != "" && version.Metadata != "" { + t.Fatalf("bumping major on 1.0.0+build.1-alpha.1 resulted in %v", version) + } +} + +func TestBumpPatch(t *testing.T) { + version, _ := NewVersion("1.0.0") + version.BumpPatch() + + if version.Major != 1 { + t.Fatalf("bumping minor on 1.0.0 resulted in %v", version) + } + + if version.Minor != 0 { + t.Fatalf("bumping major on 1.0.0 resulted in %v", version) + } + + if version.Patch != 1 { + t.Fatalf("bumping major on 1.0.0 resulted in %v", version) + } + + version, _ = NewVersion("1.0.0+build.1-alpha.1") + version.BumpPatch() + if version.PreRelease != "" && version.Metadata != "" { + t.Fatalf("bumping major on 1.0.0+build.1-alpha.1 resulted in %v", version) + } +} + +func TestMust(t *testing.T) { + tests := []struct { + versionStr string + + version *Version + recov interface{} + }{ + { + versionStr: "1.0.0", + version: &Version{Major: 1}, + }, + { + versionStr: "version number", + recov: errors.New("version number is not in dotted-tri format"), + }, + } + + for _, tt := range tests { + func() { + defer func() { + recov := recover() + if !reflect.DeepEqual(tt.recov, recov) { + t.Fatalf("incorrect panic for %q: want %v, got %v", tt.versionStr, tt.recov, recov) + } + }() + + version := Must(NewVersion(tt.versionStr)) + if !reflect.DeepEqual(tt.version, version) { + t.Fatalf("incorrect version for %q: want %+v, got %+v", tt.versionStr, tt.version, version) + } + }() + } +} + +type fixtureJSON struct { + GreaterVersion *Version + LesserVersion *Version +} + +func TestJSON(t *testing.T) { + fj := make([]fixtureJSON, len(fixtures)) + for i, v := range fixtures { + var err error + fj[i].GreaterVersion, err = NewVersion(v.GreaterVersion) + if err != nil { + t.Fatal(err) + } + fj[i].LesserVersion, err = NewVersion(v.LesserVersion) + if err != nil { + t.Fatal(err) + } + } + + fromStrings, err := json.Marshal(fixtures) + if err != nil { + t.Fatal(err) + } + fromVersions, err := json.Marshal(fj) + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(fromStrings, fromVersions) { + t.Errorf("Expected: %s", fromStrings) + t.Errorf("Unexpected: %s", fromVersions) + } + + fromJson := make([]fixtureJSON, 0, len(fj)) + err = json.Unmarshal(fromStrings, &fromJson) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(fromJson, fj) { + t.Error("Expected: ", fj) + t.Error("Unexpected: ", fromJson) + } +} + +func TestYAML(t *testing.T) { + document, err := yaml.Marshal(fixtures) + if err != nil { + t.Fatal(err) + } + + expected := make([]fixtureJSON, len(fixtures)) + for i, v := range fixtures { + var err error + expected[i].GreaterVersion, err = NewVersion(v.GreaterVersion) + if err != nil { + t.Fatal(err) + } + expected[i].LesserVersion, err = NewVersion(v.LesserVersion) + if err != nil { + t.Fatal(err) + } + } + + fromYAML := make([]fixtureJSON, 0, len(fixtures)) + err = yaml.Unmarshal(document, &fromYAML) + if err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(fromYAML, expected) { + t.Error("Expected: ", expected) + t.Error("Unexpected: ", fromYAML) + } +} + +func TestBadInput(t *testing.T) { + bad := []string{ + "1.2", + "1.2.3x", + "0x1.3.4", + "-1.2.3", + "1.2.3.4", + "0.88.0-11_e4e5dcabb", + "0.88.0+11_e4e5dcabb", + } + for _, b := range bad { + if _, err := NewVersion(b); err == nil { + t.Error("Improperly accepted value: ", b) + } + } +} + +func TestFlag(t *testing.T) { + v := Version{} + f := flag.NewFlagSet("version", flag.ContinueOnError) + f.Var(&v, "version", "set version") + + if err := f.Set("version", "1.2.3"); err != nil { + t.Fatal(err) + } + + if v.String() != "1.2.3" { + t.Errorf("Set wrong value %q", v) + } +} + +func ExampleVersion_LessThan() { + vA := New("1.2.3") + vB := New("3.2.1") + + fmt.Printf("%s < %s == %t\n", vA, vB, vA.LessThan(*vB)) + // Output: + // 1.2.3 < 3.2.1 == true +} diff --git a/vendor/github.com/coreos/go-semver/semver/sort.go b/vendor/github.com/coreos/go-semver/semver/sort.go new file mode 100644 index 000000000..e256b41a5 --- /dev/null +++ b/vendor/github.com/coreos/go-semver/semver/sort.go @@ -0,0 +1,38 @@ +// Copyright 2013-2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package semver + +import ( + "sort" +) + +type Versions []*Version + +func (s Versions) Len() int { + return len(s) +} + +func (s Versions) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +func (s Versions) Less(i, j int) bool { + return s[i].LessThan(*s[j]) +} + +// Sort sorts the given slice of Version +func Sort(versions []*Version) { + sort.Sort(Versions(versions)) +} diff --git a/vendor/github.com/funcy/functions_go/VERSION b/vendor/github.com/funcy/functions_go/VERSION index 5a1431ca7..9d77e730c 100644 --- a/vendor/github.com/funcy/functions_go/VERSION +++ b/vendor/github.com/funcy/functions_go/VERSION @@ -1 +1 @@ -0.1.33 \ No newline at end of file +0.1.35 \ No newline at end of file diff --git a/vendor/github.com/funcy/functions_go/client/call/call_client.go b/vendor/github.com/funcy/functions_go/client/call/call_client.go index 1055bfd81..541bcb004 100644 --- a/vendor/github.com/funcy/functions_go/client/call/call_client.go +++ b/vendor/github.com/funcy/functions_go/client/call/call_client.go @@ -23,62 +23,62 @@ type Client struct { } /* -GetAppsAppCallsRoute gets route bound calls +GetAppsAppCalls gets app bound calls -Get route-bound calls. +Get app-bound calls can filter to route-bound calls. */ -func (a *Client) GetAppsAppCallsRoute(params *GetAppsAppCallsRouteParams) (*GetAppsAppCallsRouteOK, error) { +func (a *Client) GetAppsAppCalls(params *GetAppsAppCallsParams) (*GetAppsAppCallsOK, error) { // TODO: Validate the params before sending if params == nil { - params = NewGetAppsAppCallsRouteParams() + params = NewGetAppsAppCallsParams() } result, err := a.transport.Submit(&runtime.ClientOperation{ - ID: "GetAppsAppCallsRoute", + ID: "GetAppsAppCalls", Method: "GET", - PathPattern: "/apps/{app}/calls/{route}", + PathPattern: "/apps/{app}/calls/", ProducesMediaTypes: []string{"application/json"}, ConsumesMediaTypes: []string{"application/json"}, Schemes: []string{"http", "https"}, Params: params, - Reader: &GetAppsAppCallsRouteReader{formats: a.formats}, + Reader: &GetAppsAppCallsReader{formats: a.formats}, Context: params.Context, Client: params.HTTPClient, }) if err != nil { return nil, err } - return result.(*GetAppsAppCallsRouteOK), nil + return result.(*GetAppsAppCallsOK), nil } /* -GetCallsCall gets call information +GetAppsAppCallsCall gets call information Get call information */ -func (a *Client) GetCallsCall(params *GetCallsCallParams) (*GetCallsCallOK, error) { +func (a *Client) GetAppsAppCallsCall(params *GetAppsAppCallsCallParams) (*GetAppsAppCallsCallOK, error) { // TODO: Validate the params before sending if params == nil { - params = NewGetCallsCallParams() + params = NewGetAppsAppCallsCallParams() } result, err := a.transport.Submit(&runtime.ClientOperation{ - ID: "GetCallsCall", + ID: "GetAppsAppCallsCall", Method: "GET", - PathPattern: "/calls/{call}", + PathPattern: "/apps/{app}/calls/{call}", ProducesMediaTypes: []string{"application/json"}, ConsumesMediaTypes: []string{"application/json"}, Schemes: []string{"http", "https"}, Params: params, - Reader: &GetCallsCallReader{formats: a.formats}, + Reader: &GetAppsAppCallsCallReader{formats: a.formats}, Context: params.Context, Client: params.HTTPClient, }) if err != nil { return nil, err } - return result.(*GetCallsCallOK), nil + return result.(*GetAppsAppCallsCallOK), nil } diff --git a/vendor/github.com/funcy/functions_go/client/call/get_apps_app_calls_call_parameters.go b/vendor/github.com/funcy/functions_go/client/call/get_apps_app_calls_call_parameters.go new file mode 100644 index 000000000..01e9989ec --- /dev/null +++ b/vendor/github.com/funcy/functions_go/client/call/get_apps_app_calls_call_parameters.go @@ -0,0 +1,156 @@ +package call + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + "time" + + "golang.org/x/net/context" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + + strfmt "github.com/go-openapi/strfmt" +) + +// NewGetAppsAppCallsCallParams creates a new GetAppsAppCallsCallParams object +// with the default values initialized. +func NewGetAppsAppCallsCallParams() *GetAppsAppCallsCallParams { + var () + return &GetAppsAppCallsCallParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewGetAppsAppCallsCallParamsWithTimeout creates a new GetAppsAppCallsCallParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewGetAppsAppCallsCallParamsWithTimeout(timeout time.Duration) *GetAppsAppCallsCallParams { + var () + return &GetAppsAppCallsCallParams{ + + timeout: timeout, + } +} + +// NewGetAppsAppCallsCallParamsWithContext creates a new GetAppsAppCallsCallParams object +// with the default values initialized, and the ability to set a context for a request +func NewGetAppsAppCallsCallParamsWithContext(ctx context.Context) *GetAppsAppCallsCallParams { + var () + return &GetAppsAppCallsCallParams{ + + Context: ctx, + } +} + +// NewGetAppsAppCallsCallParamsWithHTTPClient creates a new GetAppsAppCallsCallParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewGetAppsAppCallsCallParamsWithHTTPClient(client *http.Client) *GetAppsAppCallsCallParams { + var () + return &GetAppsAppCallsCallParams{ + HTTPClient: client, + } +} + +/*GetAppsAppCallsCallParams contains all the parameters to send to the API endpoint +for the get apps app calls call operation typically these are written to a http.Request +*/ +type GetAppsAppCallsCallParams struct { + + /*App + app name + + */ + App string + /*Call + Call ID. + + */ + Call string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the get apps app calls call params +func (o *GetAppsAppCallsCallParams) WithTimeout(timeout time.Duration) *GetAppsAppCallsCallParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the get apps app calls call params +func (o *GetAppsAppCallsCallParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the get apps app calls call params +func (o *GetAppsAppCallsCallParams) WithContext(ctx context.Context) *GetAppsAppCallsCallParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the get apps app calls call params +func (o *GetAppsAppCallsCallParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the get apps app calls call params +func (o *GetAppsAppCallsCallParams) WithHTTPClient(client *http.Client) *GetAppsAppCallsCallParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the get apps app calls call params +func (o *GetAppsAppCallsCallParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithApp adds the app to the get apps app calls call params +func (o *GetAppsAppCallsCallParams) WithApp(app string) *GetAppsAppCallsCallParams { + o.SetApp(app) + return o +} + +// SetApp adds the app to the get apps app calls call params +func (o *GetAppsAppCallsCallParams) SetApp(app string) { + o.App = app +} + +// WithCall adds the call to the get apps app calls call params +func (o *GetAppsAppCallsCallParams) WithCall(call string) *GetAppsAppCallsCallParams { + o.SetCall(call) + return o +} + +// SetCall adds the call to the get apps app calls call params +func (o *GetAppsAppCallsCallParams) SetCall(call string) { + o.Call = call +} + +// WriteToRequest writes these params to a swagger request +func (o *GetAppsAppCallsCallParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param app + if err := r.SetPathParam("app", o.App); err != nil { + return err + } + + // path param call + if err := r.SetPathParam("call", o.Call); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/funcy/functions_go/client/call/get_apps_app_calls_call_responses.go b/vendor/github.com/funcy/functions_go/client/call/get_apps_app_calls_call_responses.go new file mode 100644 index 000000000..ba614d0e8 --- /dev/null +++ b/vendor/github.com/funcy/functions_go/client/call/get_apps_app_calls_call_responses.go @@ -0,0 +1,101 @@ +package call + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + + strfmt "github.com/go-openapi/strfmt" + + "github.com/funcy/functions_go/models" +) + +// GetAppsAppCallsCallReader is a Reader for the GetAppsAppCallsCall structure. +type GetAppsAppCallsCallReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *GetAppsAppCallsCallReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + + case 200: + result := NewGetAppsAppCallsCallOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + + case 404: + result := NewGetAppsAppCallsCallNotFound() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + default: + return nil, runtime.NewAPIError("unknown error", response, response.Code()) + } +} + +// NewGetAppsAppCallsCallOK creates a GetAppsAppCallsCallOK with default headers values +func NewGetAppsAppCallsCallOK() *GetAppsAppCallsCallOK { + return &GetAppsAppCallsCallOK{} +} + +/*GetAppsAppCallsCallOK handles this case with default header values. + +Call found +*/ +type GetAppsAppCallsCallOK struct { + Payload *models.CallWrapper +} + +func (o *GetAppsAppCallsCallOK) Error() string { + return fmt.Sprintf("[GET /apps/{app}/calls/{call}][%d] getAppsAppCallsCallOK %+v", 200, o.Payload) +} + +func (o *GetAppsAppCallsCallOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.CallWrapper) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewGetAppsAppCallsCallNotFound creates a GetAppsAppCallsCallNotFound with default headers values +func NewGetAppsAppCallsCallNotFound() *GetAppsAppCallsCallNotFound { + return &GetAppsAppCallsCallNotFound{} +} + +/*GetAppsAppCallsCallNotFound handles this case with default header values. + +Call not found. +*/ +type GetAppsAppCallsCallNotFound struct { + Payload *models.Error +} + +func (o *GetAppsAppCallsCallNotFound) Error() string { + return fmt.Sprintf("[GET /apps/{app}/calls/{call}][%d] getAppsAppCallsCallNotFound %+v", 404, o.Payload) +} + +func (o *GetAppsAppCallsCallNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/vendor/github.com/funcy/functions_go/client/call/get_apps_app_calls_parameters.go b/vendor/github.com/funcy/functions_go/client/call/get_apps_app_calls_parameters.go new file mode 100644 index 000000000..54fe1d111 --- /dev/null +++ b/vendor/github.com/funcy/functions_go/client/call/get_apps_app_calls_parameters.go @@ -0,0 +1,167 @@ +package call + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + "time" + + "golang.org/x/net/context" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + + strfmt "github.com/go-openapi/strfmt" +) + +// NewGetAppsAppCallsParams creates a new GetAppsAppCallsParams object +// with the default values initialized. +func NewGetAppsAppCallsParams() *GetAppsAppCallsParams { + var () + return &GetAppsAppCallsParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewGetAppsAppCallsParamsWithTimeout creates a new GetAppsAppCallsParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewGetAppsAppCallsParamsWithTimeout(timeout time.Duration) *GetAppsAppCallsParams { + var () + return &GetAppsAppCallsParams{ + + timeout: timeout, + } +} + +// NewGetAppsAppCallsParamsWithContext creates a new GetAppsAppCallsParams object +// with the default values initialized, and the ability to set a context for a request +func NewGetAppsAppCallsParamsWithContext(ctx context.Context) *GetAppsAppCallsParams { + var () + return &GetAppsAppCallsParams{ + + Context: ctx, + } +} + +// NewGetAppsAppCallsParamsWithHTTPClient creates a new GetAppsAppCallsParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewGetAppsAppCallsParamsWithHTTPClient(client *http.Client) *GetAppsAppCallsParams { + var () + return &GetAppsAppCallsParams{ + HTTPClient: client, + } +} + +/*GetAppsAppCallsParams contains all the parameters to send to the API endpoint +for the get apps app calls operation typically these are written to a http.Request +*/ +type GetAppsAppCallsParams struct { + + /*App + App name. + + */ + App string + /*Route + App route. + + */ + Route *string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the get apps app calls params +func (o *GetAppsAppCallsParams) WithTimeout(timeout time.Duration) *GetAppsAppCallsParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the get apps app calls params +func (o *GetAppsAppCallsParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the get apps app calls params +func (o *GetAppsAppCallsParams) WithContext(ctx context.Context) *GetAppsAppCallsParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the get apps app calls params +func (o *GetAppsAppCallsParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the get apps app calls params +func (o *GetAppsAppCallsParams) WithHTTPClient(client *http.Client) *GetAppsAppCallsParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the get apps app calls params +func (o *GetAppsAppCallsParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithApp adds the app to the get apps app calls params +func (o *GetAppsAppCallsParams) WithApp(app string) *GetAppsAppCallsParams { + o.SetApp(app) + return o +} + +// SetApp adds the app to the get apps app calls params +func (o *GetAppsAppCallsParams) SetApp(app string) { + o.App = app +} + +// WithRoute adds the route to the get apps app calls params +func (o *GetAppsAppCallsParams) WithRoute(route *string) *GetAppsAppCallsParams { + o.SetRoute(route) + return o +} + +// SetRoute adds the route to the get apps app calls params +func (o *GetAppsAppCallsParams) SetRoute(route *string) { + o.Route = route +} + +// WriteToRequest writes these params to a swagger request +func (o *GetAppsAppCallsParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param app + if err := r.SetPathParam("app", o.App); err != nil { + return err + } + + if o.Route != nil { + + // query param route + var qrRoute string + if o.Route != nil { + qrRoute = *o.Route + } + qRoute := qrRoute + if qRoute != "" { + if err := r.SetQueryParam("route", qRoute); err != nil { + return err + } + } + + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/funcy/functions_go/client/call/get_apps_app_calls_responses.go b/vendor/github.com/funcy/functions_go/client/call/get_apps_app_calls_responses.go new file mode 100644 index 000000000..8e9a9ba52 --- /dev/null +++ b/vendor/github.com/funcy/functions_go/client/call/get_apps_app_calls_responses.go @@ -0,0 +1,101 @@ +package call + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + + strfmt "github.com/go-openapi/strfmt" + + "github.com/funcy/functions_go/models" +) + +// GetAppsAppCallsReader is a Reader for the GetAppsAppCalls structure. +type GetAppsAppCallsReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *GetAppsAppCallsReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + + case 200: + result := NewGetAppsAppCallsOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + + case 404: + result := NewGetAppsAppCallsNotFound() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + default: + return nil, runtime.NewAPIError("unknown error", response, response.Code()) + } +} + +// NewGetAppsAppCallsOK creates a GetAppsAppCallsOK with default headers values +func NewGetAppsAppCallsOK() *GetAppsAppCallsOK { + return &GetAppsAppCallsOK{} +} + +/*GetAppsAppCallsOK handles this case with default header values. + +Calls found +*/ +type GetAppsAppCallsOK struct { + Payload *models.CallsWrapper +} + +func (o *GetAppsAppCallsOK) Error() string { + return fmt.Sprintf("[GET /apps/{app}/calls/][%d] getAppsAppCallsOK %+v", 200, o.Payload) +} + +func (o *GetAppsAppCallsOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.CallsWrapper) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewGetAppsAppCallsNotFound creates a GetAppsAppCallsNotFound with default headers values +func NewGetAppsAppCallsNotFound() *GetAppsAppCallsNotFound { + return &GetAppsAppCallsNotFound{} +} + +/*GetAppsAppCallsNotFound handles this case with default header values. + +Calls not found. +*/ +type GetAppsAppCallsNotFound struct { + Payload *models.Error +} + +func (o *GetAppsAppCallsNotFound) Error() string { + return fmt.Sprintf("[GET /apps/{app}/calls/][%d] getAppsAppCallsNotFound %+v", 404, o.Payload) +} + +func (o *GetAppsAppCallsNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/vendor/github.com/funcy/functions_go/client/operations/delete_apps_app_calls_call_log_parameters.go b/vendor/github.com/funcy/functions_go/client/operations/delete_apps_app_calls_call_log_parameters.go new file mode 100644 index 000000000..09f1033f3 --- /dev/null +++ b/vendor/github.com/funcy/functions_go/client/operations/delete_apps_app_calls_call_log_parameters.go @@ -0,0 +1,156 @@ +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + "time" + + "golang.org/x/net/context" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + + strfmt "github.com/go-openapi/strfmt" +) + +// NewDeleteAppsAppCallsCallLogParams creates a new DeleteAppsAppCallsCallLogParams object +// with the default values initialized. +func NewDeleteAppsAppCallsCallLogParams() *DeleteAppsAppCallsCallLogParams { + var () + return &DeleteAppsAppCallsCallLogParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewDeleteAppsAppCallsCallLogParamsWithTimeout creates a new DeleteAppsAppCallsCallLogParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewDeleteAppsAppCallsCallLogParamsWithTimeout(timeout time.Duration) *DeleteAppsAppCallsCallLogParams { + var () + return &DeleteAppsAppCallsCallLogParams{ + + timeout: timeout, + } +} + +// NewDeleteAppsAppCallsCallLogParamsWithContext creates a new DeleteAppsAppCallsCallLogParams object +// with the default values initialized, and the ability to set a context for a request +func NewDeleteAppsAppCallsCallLogParamsWithContext(ctx context.Context) *DeleteAppsAppCallsCallLogParams { + var () + return &DeleteAppsAppCallsCallLogParams{ + + Context: ctx, + } +} + +// NewDeleteAppsAppCallsCallLogParamsWithHTTPClient creates a new DeleteAppsAppCallsCallLogParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewDeleteAppsAppCallsCallLogParamsWithHTTPClient(client *http.Client) *DeleteAppsAppCallsCallLogParams { + var () + return &DeleteAppsAppCallsCallLogParams{ + HTTPClient: client, + } +} + +/*DeleteAppsAppCallsCallLogParams contains all the parameters to send to the API endpoint +for the delete apps app calls call log operation typically these are written to a http.Request +*/ +type DeleteAppsAppCallsCallLogParams struct { + + /*App + App name. + + */ + App string + /*Call + Call ID. + + */ + Call string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the delete apps app calls call log params +func (o *DeleteAppsAppCallsCallLogParams) WithTimeout(timeout time.Duration) *DeleteAppsAppCallsCallLogParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the delete apps app calls call log params +func (o *DeleteAppsAppCallsCallLogParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the delete apps app calls call log params +func (o *DeleteAppsAppCallsCallLogParams) WithContext(ctx context.Context) *DeleteAppsAppCallsCallLogParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the delete apps app calls call log params +func (o *DeleteAppsAppCallsCallLogParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the delete apps app calls call log params +func (o *DeleteAppsAppCallsCallLogParams) WithHTTPClient(client *http.Client) *DeleteAppsAppCallsCallLogParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the delete apps app calls call log params +func (o *DeleteAppsAppCallsCallLogParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithApp adds the app to the delete apps app calls call log params +func (o *DeleteAppsAppCallsCallLogParams) WithApp(app string) *DeleteAppsAppCallsCallLogParams { + o.SetApp(app) + return o +} + +// SetApp adds the app to the delete apps app calls call log params +func (o *DeleteAppsAppCallsCallLogParams) SetApp(app string) { + o.App = app +} + +// WithCall adds the call to the delete apps app calls call log params +func (o *DeleteAppsAppCallsCallLogParams) WithCall(call string) *DeleteAppsAppCallsCallLogParams { + o.SetCall(call) + return o +} + +// SetCall adds the call to the delete apps app calls call log params +func (o *DeleteAppsAppCallsCallLogParams) SetCall(call string) { + o.Call = call +} + +// WriteToRequest writes these params to a swagger request +func (o *DeleteAppsAppCallsCallLogParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param app + if err := r.SetPathParam("app", o.App); err != nil { + return err + } + + // path param call + if err := r.SetPathParam("call", o.Call); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/funcy/functions_go/client/operations/delete_apps_app_calls_call_log_responses.go b/vendor/github.com/funcy/functions_go/client/operations/delete_apps_app_calls_call_log_responses.go new file mode 100644 index 000000000..62a5beaf6 --- /dev/null +++ b/vendor/github.com/funcy/functions_go/client/operations/delete_apps_app_calls_call_log_responses.go @@ -0,0 +1,138 @@ +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + + strfmt "github.com/go-openapi/strfmt" + + "github.com/funcy/functions_go/models" +) + +// DeleteAppsAppCallsCallLogReader is a Reader for the DeleteAppsAppCallsCallLog structure. +type DeleteAppsAppCallsCallLogReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *DeleteAppsAppCallsCallLogReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + + case 202: + result := NewDeleteAppsAppCallsCallLogAccepted() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + + case 404: + result := NewDeleteAppsAppCallsCallLogNotFound() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + default: + result := NewDeleteAppsAppCallsCallLogDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewDeleteAppsAppCallsCallLogAccepted creates a DeleteAppsAppCallsCallLogAccepted with default headers values +func NewDeleteAppsAppCallsCallLogAccepted() *DeleteAppsAppCallsCallLogAccepted { + return &DeleteAppsAppCallsCallLogAccepted{} +} + +/*DeleteAppsAppCallsCallLogAccepted handles this case with default header values. + +Log delete request accepted +*/ +type DeleteAppsAppCallsCallLogAccepted struct { +} + +func (o *DeleteAppsAppCallsCallLogAccepted) Error() string { + return fmt.Sprintf("[DELETE /apps/{app}/calls/{call}/log][%d] deleteAppsAppCallsCallLogAccepted ", 202) +} + +func (o *DeleteAppsAppCallsCallLogAccepted) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewDeleteAppsAppCallsCallLogNotFound creates a DeleteAppsAppCallsCallLogNotFound with default headers values +func NewDeleteAppsAppCallsCallLogNotFound() *DeleteAppsAppCallsCallLogNotFound { + return &DeleteAppsAppCallsCallLogNotFound{} +} + +/*DeleteAppsAppCallsCallLogNotFound handles this case with default header values. + +Does not exist. +*/ +type DeleteAppsAppCallsCallLogNotFound struct { + Payload *models.Error +} + +func (o *DeleteAppsAppCallsCallLogNotFound) Error() string { + return fmt.Sprintf("[DELETE /apps/{app}/calls/{call}/log][%d] deleteAppsAppCallsCallLogNotFound %+v", 404, o.Payload) +} + +func (o *DeleteAppsAppCallsCallLogNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewDeleteAppsAppCallsCallLogDefault creates a DeleteAppsAppCallsCallLogDefault with default headers values +func NewDeleteAppsAppCallsCallLogDefault(code int) *DeleteAppsAppCallsCallLogDefault { + return &DeleteAppsAppCallsCallLogDefault{ + _statusCode: code, + } +} + +/*DeleteAppsAppCallsCallLogDefault handles this case with default header values. + +Unexpected error +*/ +type DeleteAppsAppCallsCallLogDefault struct { + _statusCode int + + Payload *models.Error +} + +// Code gets the status code for the delete apps app calls call log default response +func (o *DeleteAppsAppCallsCallLogDefault) Code() int { + return o._statusCode +} + +func (o *DeleteAppsAppCallsCallLogDefault) Error() string { + return fmt.Sprintf("[DELETE /apps/{app}/calls/{call}/log][%d] DeleteAppsAppCallsCallLog default %+v", o._statusCode, o.Payload) +} + +func (o *DeleteAppsAppCallsCallLogDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/vendor/github.com/funcy/functions_go/client/operations/get_apps_app_calls_call_log_parameters.go b/vendor/github.com/funcy/functions_go/client/operations/get_apps_app_calls_call_log_parameters.go new file mode 100644 index 000000000..abe49b6aa --- /dev/null +++ b/vendor/github.com/funcy/functions_go/client/operations/get_apps_app_calls_call_log_parameters.go @@ -0,0 +1,156 @@ +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + "time" + + "golang.org/x/net/context" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + + strfmt "github.com/go-openapi/strfmt" +) + +// NewGetAppsAppCallsCallLogParams creates a new GetAppsAppCallsCallLogParams object +// with the default values initialized. +func NewGetAppsAppCallsCallLogParams() *GetAppsAppCallsCallLogParams { + var () + return &GetAppsAppCallsCallLogParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewGetAppsAppCallsCallLogParamsWithTimeout creates a new GetAppsAppCallsCallLogParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewGetAppsAppCallsCallLogParamsWithTimeout(timeout time.Duration) *GetAppsAppCallsCallLogParams { + var () + return &GetAppsAppCallsCallLogParams{ + + timeout: timeout, + } +} + +// NewGetAppsAppCallsCallLogParamsWithContext creates a new GetAppsAppCallsCallLogParams object +// with the default values initialized, and the ability to set a context for a request +func NewGetAppsAppCallsCallLogParamsWithContext(ctx context.Context) *GetAppsAppCallsCallLogParams { + var () + return &GetAppsAppCallsCallLogParams{ + + Context: ctx, + } +} + +// NewGetAppsAppCallsCallLogParamsWithHTTPClient creates a new GetAppsAppCallsCallLogParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewGetAppsAppCallsCallLogParamsWithHTTPClient(client *http.Client) *GetAppsAppCallsCallLogParams { + var () + return &GetAppsAppCallsCallLogParams{ + HTTPClient: client, + } +} + +/*GetAppsAppCallsCallLogParams contains all the parameters to send to the API endpoint +for the get apps app calls call log operation typically these are written to a http.Request +*/ +type GetAppsAppCallsCallLogParams struct { + + /*App + App Name + + */ + App string + /*Call + Call ID. + + */ + Call string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the get apps app calls call log params +func (o *GetAppsAppCallsCallLogParams) WithTimeout(timeout time.Duration) *GetAppsAppCallsCallLogParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the get apps app calls call log params +func (o *GetAppsAppCallsCallLogParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the get apps app calls call log params +func (o *GetAppsAppCallsCallLogParams) WithContext(ctx context.Context) *GetAppsAppCallsCallLogParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the get apps app calls call log params +func (o *GetAppsAppCallsCallLogParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the get apps app calls call log params +func (o *GetAppsAppCallsCallLogParams) WithHTTPClient(client *http.Client) *GetAppsAppCallsCallLogParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the get apps app calls call log params +func (o *GetAppsAppCallsCallLogParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithApp adds the app to the get apps app calls call log params +func (o *GetAppsAppCallsCallLogParams) WithApp(app string) *GetAppsAppCallsCallLogParams { + o.SetApp(app) + return o +} + +// SetApp adds the app to the get apps app calls call log params +func (o *GetAppsAppCallsCallLogParams) SetApp(app string) { + o.App = app +} + +// WithCall adds the call to the get apps app calls call log params +func (o *GetAppsAppCallsCallLogParams) WithCall(call string) *GetAppsAppCallsCallLogParams { + o.SetCall(call) + return o +} + +// SetCall adds the call to the get apps app calls call log params +func (o *GetAppsAppCallsCallLogParams) SetCall(call string) { + o.Call = call +} + +// WriteToRequest writes these params to a swagger request +func (o *GetAppsAppCallsCallLogParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param app + if err := r.SetPathParam("app", o.App); err != nil { + return err + } + + // path param call + if err := r.SetPathParam("call", o.Call); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/funcy/functions_go/client/operations/get_apps_app_calls_call_log_responses.go b/vendor/github.com/funcy/functions_go/client/operations/get_apps_app_calls_call_log_responses.go new file mode 100644 index 000000000..4b2e6c4df --- /dev/null +++ b/vendor/github.com/funcy/functions_go/client/operations/get_apps_app_calls_call_log_responses.go @@ -0,0 +1,101 @@ +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + + strfmt "github.com/go-openapi/strfmt" + + "github.com/funcy/functions_go/models" +) + +// GetAppsAppCallsCallLogReader is a Reader for the GetAppsAppCallsCallLog structure. +type GetAppsAppCallsCallLogReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *GetAppsAppCallsCallLogReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + + case 200: + result := NewGetAppsAppCallsCallLogOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + + case 404: + result := NewGetAppsAppCallsCallLogNotFound() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + default: + return nil, runtime.NewAPIError("unknown error", response, response.Code()) + } +} + +// NewGetAppsAppCallsCallLogOK creates a GetAppsAppCallsCallLogOK with default headers values +func NewGetAppsAppCallsCallLogOK() *GetAppsAppCallsCallLogOK { + return &GetAppsAppCallsCallLogOK{} +} + +/*GetAppsAppCallsCallLogOK handles this case with default header values. + +Log found +*/ +type GetAppsAppCallsCallLogOK struct { + Payload *models.LogWrapper +} + +func (o *GetAppsAppCallsCallLogOK) Error() string { + return fmt.Sprintf("[GET /apps/{app}/calls/{call}/log][%d] getAppsAppCallsCallLogOK %+v", 200, o.Payload) +} + +func (o *GetAppsAppCallsCallLogOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.LogWrapper) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewGetAppsAppCallsCallLogNotFound creates a GetAppsAppCallsCallLogNotFound with default headers values +func NewGetAppsAppCallsCallLogNotFound() *GetAppsAppCallsCallLogNotFound { + return &GetAppsAppCallsCallLogNotFound{} +} + +/*GetAppsAppCallsCallLogNotFound handles this case with default header values. + +Log not found. +*/ +type GetAppsAppCallsCallLogNotFound struct { + Payload *models.Error +} + +func (o *GetAppsAppCallsCallLogNotFound) Error() string { + return fmt.Sprintf("[GET /apps/{app}/calls/{call}/log][%d] getAppsAppCallsCallLogNotFound %+v", 404, o.Payload) +} + +func (o *GetAppsAppCallsCallLogNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/vendor/github.com/funcy/functions_go/client/operations/operations_client.go b/vendor/github.com/funcy/functions_go/client/operations/operations_client.go index e2d306ba5..867f0ecd5 100644 --- a/vendor/github.com/funcy/functions_go/client/operations/operations_client.go +++ b/vendor/github.com/funcy/functions_go/client/operations/operations_client.go @@ -23,62 +23,62 @@ type Client struct { } /* -DeleteCallsCallLog deletes call log entry +DeleteAppsAppCallsCallLog deletes call log entry Delete call log entry */ -func (a *Client) DeleteCallsCallLog(params *DeleteCallsCallLogParams) (*DeleteCallsCallLogAccepted, error) { +func (a *Client) DeleteAppsAppCallsCallLog(params *DeleteAppsAppCallsCallLogParams) (*DeleteAppsAppCallsCallLogAccepted, error) { // TODO: Validate the params before sending if params == nil { - params = NewDeleteCallsCallLogParams() + params = NewDeleteAppsAppCallsCallLogParams() } result, err := a.transport.Submit(&runtime.ClientOperation{ - ID: "DeleteCallsCallLog", + ID: "DeleteAppsAppCallsCallLog", Method: "DELETE", - PathPattern: "/calls/{call}/log", + PathPattern: "/apps/{app}/calls/{call}/log", ProducesMediaTypes: []string{"application/json"}, ConsumesMediaTypes: []string{"application/json"}, Schemes: []string{"http", "https"}, Params: params, - Reader: &DeleteCallsCallLogReader{formats: a.formats}, + Reader: &DeleteAppsAppCallsCallLogReader{formats: a.formats}, Context: params.Context, Client: params.HTTPClient, }) if err != nil { return nil, err } - return result.(*DeleteCallsCallLogAccepted), nil + return result.(*DeleteAppsAppCallsCallLogAccepted), nil } /* -GetCallsCallLog gets call logs +GetAppsAppCallsCallLog gets call logs Get call logs */ -func (a *Client) GetCallsCallLog(params *GetCallsCallLogParams) (*GetCallsCallLogOK, error) { +func (a *Client) GetAppsAppCallsCallLog(params *GetAppsAppCallsCallLogParams) (*GetAppsAppCallsCallLogOK, error) { // TODO: Validate the params before sending if params == nil { - params = NewGetCallsCallLogParams() + params = NewGetAppsAppCallsCallLogParams() } result, err := a.transport.Submit(&runtime.ClientOperation{ - ID: "GetCallsCallLog", + ID: "GetAppsAppCallsCallLog", Method: "GET", - PathPattern: "/calls/{call}/log", + PathPattern: "/apps/{app}/calls/{call}/log", ProducesMediaTypes: []string{"application/json"}, ConsumesMediaTypes: []string{"application/json"}, Schemes: []string{"http", "https"}, Params: params, - Reader: &GetCallsCallLogReader{formats: a.formats}, + Reader: &GetAppsAppCallsCallLogReader{formats: a.formats}, Context: params.Context, Client: params.HTTPClient, }) if err != nil { return nil, err } - return result.(*GetCallsCallLogOK), nil + return result.(*GetAppsAppCallsCallLogOK), nil } diff --git a/vendor/github.com/funcy/functions_go/client/routes/patch_apps_app_routes_route_responses.go b/vendor/github.com/funcy/functions_go/client/routes/patch_apps_app_routes_route_responses.go index c8a057dff..a2941a619 100644 --- a/vendor/github.com/funcy/functions_go/client/routes/patch_apps_app_routes_route_responses.go +++ b/vendor/github.com/funcy/functions_go/client/routes/patch_apps_app_routes_route_responses.go @@ -121,7 +121,7 @@ func NewPatchAppsAppRoutesRouteNotFound() *PatchAppsAppRoutesRouteNotFound { /*PatchAppsAppRoutesRouteNotFound handles this case with default header values. -App does not exist. +App / Route does not exist. */ type PatchAppsAppRoutesRouteNotFound struct { Payload *models.Error diff --git a/vendor/github.com/funcy/functions_go/client/routes/put_apps_app_routes_route_parameters.go b/vendor/github.com/funcy/functions_go/client/routes/put_apps_app_routes_route_parameters.go index a1026bb73..fb9d5479b 100644 --- a/vendor/github.com/funcy/functions_go/client/routes/put_apps_app_routes_route_parameters.go +++ b/vendor/github.com/funcy/functions_go/client/routes/put_apps_app_routes_route_parameters.go @@ -4,6 +4,7 @@ package routes // Editing this file might prove futile when you re-run the swagger generate command import ( + "net/http" "time" "golang.org/x/net/context" @@ -47,6 +48,15 @@ func NewPutAppsAppRoutesRouteParamsWithContext(ctx context.Context) *PutAppsAppR } } +// NewPutAppsAppRoutesRouteParamsWithHTTPClient creates a new PutAppsAppRoutesRouteParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewPutAppsAppRoutesRouteParamsWithHTTPClient(client *http.Client) *PutAppsAppRoutesRouteParams { + var () + return &PutAppsAppRoutesRouteParams{ + HTTPClient: client, + } +} + /*PutAppsAppRoutesRouteParams contains all the parameters to send to the API endpoint for the put apps app routes route operation typically these are written to a http.Request */ @@ -68,8 +78,9 @@ type PutAppsAppRoutesRouteParams struct { */ Route string - timeout time.Duration - Context context.Context + timeout time.Duration + Context context.Context + HTTPClient *http.Client } // WithTimeout adds the timeout to the put apps app routes route params @@ -94,6 +105,17 @@ func (o *PutAppsAppRoutesRouteParams) SetContext(ctx context.Context) { o.Context = ctx } +// WithHTTPClient adds the HTTPClient to the put apps app routes route params +func (o *PutAppsAppRoutesRouteParams) WithHTTPClient(client *http.Client) *PutAppsAppRoutesRouteParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the put apps app routes route params +func (o *PutAppsAppRoutesRouteParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + // WithApp adds the app to the put apps app routes route params func (o *PutAppsAppRoutesRouteParams) WithApp(app string) *PutAppsAppRoutesRouteParams { o.SetApp(app) @@ -130,7 +152,9 @@ func (o *PutAppsAppRoutesRouteParams) SetRoute(route string) { // WriteToRequest writes these params to a swagger request func (o *PutAppsAppRoutesRouteParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { - r.SetTimeout(o.timeout) + if err := r.SetTimeout(o.timeout); err != nil { + return err + } var res []error // path param app diff --git a/vendor/github.com/funcy/functions_go/client/routes/put_apps_app_routes_route_responses.go b/vendor/github.com/funcy/functions_go/client/routes/put_apps_app_routes_route_responses.go index 1ce8721b2..0e973c669 100644 --- a/vendor/github.com/funcy/functions_go/client/routes/put_apps_app_routes_route_responses.go +++ b/vendor/github.com/funcy/functions_go/client/routes/put_apps_app_routes_route_responses.go @@ -23,8 +23,8 @@ type PutAppsAppRoutesRouteReader struct { func (o *PutAppsAppRoutesRouteReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { switch response.Code() { - case 201: - result := NewPutAppsAppRoutesRouteCreated() + case 200: + result := NewPutAppsAppRoutesRouteOK() if err := result.readResponse(response, consumer, o.formats); err != nil { return nil, err } @@ -37,40 +37,36 @@ func (o *PutAppsAppRoutesRouteReader) ReadResponse(response runtime.ClientRespon } return nil, result - case 500: - result := NewPutAppsAppRoutesRouteInternalServerError() - if err := result.readResponse(response, consumer, o.formats); err != nil { - return nil, err - } - return nil, result - default: result := NewPutAppsAppRoutesRouteDefault(response.Code()) if err := result.readResponse(response, consumer, o.formats); err != nil { return nil, err } + if response.Code()/100 == 2 { + return result, nil + } return nil, result } } -// NewPutAppsAppRoutesRouteCreated creates a PutAppsAppRoutesRouteCreated with default headers values -func NewPutAppsAppRoutesRouteCreated() *PutAppsAppRoutesRouteCreated { - return &PutAppsAppRoutesRouteCreated{} +// NewPutAppsAppRoutesRouteOK creates a PutAppsAppRoutesRouteOK with default headers values +func NewPutAppsAppRoutesRouteOK() *PutAppsAppRoutesRouteOK { + return &PutAppsAppRoutesRouteOK{} } -/*PutAppsAppRoutesRouteCreated handles this case with default header values. +/*PutAppsAppRoutesRouteOK handles this case with default header values. -Route updated +Route created or updated */ -type PutAppsAppRoutesRouteCreated struct { +type PutAppsAppRoutesRouteOK struct { Payload *models.RouteWrapper } -func (o *PutAppsAppRoutesRouteCreated) Error() string { - return fmt.Sprintf("[PUT /apps/{app}/routes/{route}][%d] putAppsAppRoutesRouteCreated %+v", 201, o.Payload) +func (o *PutAppsAppRoutesRouteOK) Error() string { + return fmt.Sprintf("[PUT /apps/{app}/routes/{route}][%d] putAppsAppRoutesRouteOK %+v", 200, o.Payload) } -func (o *PutAppsAppRoutesRouteCreated) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { +func (o *PutAppsAppRoutesRouteOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { o.Payload = new(models.RouteWrapper) @@ -89,7 +85,7 @@ func NewPutAppsAppRoutesRouteBadRequest() *PutAppsAppRoutesRouteBadRequest { /*PutAppsAppRoutesRouteBadRequest handles this case with default header values. -One or more of the routes were invalid due to parameters being missing or invalid. +Invalid route due to parameters being missing or invalid. */ type PutAppsAppRoutesRouteBadRequest struct { Payload *models.Error @@ -111,35 +107,6 @@ func (o *PutAppsAppRoutesRouteBadRequest) readResponse(response runtime.ClientRe return nil } -// NewPutAppsAppRoutesRouteInternalServerError creates a PutAppsAppRoutesRouteInternalServerError with default headers values -func NewPutAppsAppRoutesRouteInternalServerError() *PutAppsAppRoutesRouteInternalServerError { - return &PutAppsAppRoutesRouteInternalServerError{} -} - -/*PutAppsAppRoutesRouteInternalServerError handles this case with default header values. - -Could not accept routes due to internal error. -*/ -type PutAppsAppRoutesRouteInternalServerError struct { - Payload *models.Error -} - -func (o *PutAppsAppRoutesRouteInternalServerError) Error() string { - return fmt.Sprintf("[PUT /apps/{app}/routes/{route}][%d] putAppsAppRoutesRouteInternalServerError %+v", 500, o.Payload) -} - -func (o *PutAppsAppRoutesRouteInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { - - o.Payload = new(models.Error) - - // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { - return err - } - - return nil -} - // NewPutAppsAppRoutesRouteDefault creates a PutAppsAppRoutesRouteDefault with default headers values func NewPutAppsAppRoutesRouteDefault(code int) *PutAppsAppRoutesRouteDefault { return &PutAppsAppRoutesRouteDefault{ diff --git a/vendor/github.com/funcy/functions_go/client/routes/routes_client.go b/vendor/github.com/funcy/functions_go/client/routes/routes_client.go index df645680f..aa672873b 100644 --- a/vendor/github.com/funcy/functions_go/client/routes/routes_client.go +++ b/vendor/github.com/funcy/functions_go/client/routes/routes_client.go @@ -113,7 +113,7 @@ func (a *Client) GetAppsAppRoutesRoute(params *GetAppsAppRoutesRouteParams) (*Ge } /* -PatchAppsAppRoutesRoute updates a route +PatchAppsAppRoutesRoute updates a route fails if the route or app does not exist accepts partial updates skips validation of zero values Update a route */ @@ -145,7 +145,7 @@ func (a *Client) PatchAppsAppRoutesRoute(params *PatchAppsAppRoutesRouteParams) /* PostAppsAppRoutes creates new route -Create a new route in an app, if app doesn't exists, it creates the app +Create a new route in an app, if app doesn't exists, it creates the app. Post does not skip validation of zero values. */ func (a *Client) PostAppsAppRoutes(params *PostAppsAppRoutesParams) (*PostAppsAppRoutesOK, error) { // TODO: Validate the params before sending @@ -172,6 +172,36 @@ func (a *Client) PostAppsAppRoutes(params *PostAppsAppRoutesParams) (*PostAppsAp } +/* +PutAppsAppRoutesRoute creates a route if it does not exist update if it does will also create app if it does not exist put does not skip validation of zero values + +Update or Create a route +*/ +func (a *Client) PutAppsAppRoutesRoute(params *PutAppsAppRoutesRouteParams) (*PutAppsAppRoutesRouteOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewPutAppsAppRoutesRouteParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "PutAppsAppRoutesRoute", + Method: "PUT", + PathPattern: "/apps/{app}/routes/{route}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http", "https"}, + Params: params, + Reader: &PutAppsAppRoutesRouteReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + return result.(*PutAppsAppRoutesRouteOK), nil + +} + // SetTransport changes the transport on the client func (a *Client) SetTransport(transport runtime.ClientTransport) { a.transport = transport diff --git a/vendor/github.com/giantswarm/semver-bump/.gitignore b/vendor/github.com/giantswarm/semver-bump/.gitignore new file mode 100644 index 000000000..da5874421 --- /dev/null +++ b/vendor/github.com/giantswarm/semver-bump/.gitignore @@ -0,0 +1,2 @@ +/.gobuild +/semver-bump diff --git a/vendor/github.com/giantswarm/semver-bump/.travis.yml b/vendor/github.com/giantswarm/semver-bump/.travis.yml new file mode 100644 index 000000000..4805aed68 --- /dev/null +++ b/vendor/github.com/giantswarm/semver-bump/.travis.yml @@ -0,0 +1,8 @@ +language: go + +go: + - 1.2 + +install: make .gobuild get-deps + +script: make run-tests diff --git a/vendor/github.com/giantswarm/semver-bump/LICENSE b/vendor/github.com/giantswarm/semver-bump/LICENSE new file mode 100644 index 000000000..10669d25c --- /dev/null +++ b/vendor/github.com/giantswarm/semver-bump/LICENSE @@ -0,0 +1,13 @@ +Copyright 2015 Giant Swarm GmbH + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/github.com/giantswarm/semver-bump/Makefile b/vendor/github.com/giantswarm/semver-bump/Makefile new file mode 100644 index 000000000..9db593a14 --- /dev/null +++ b/vendor/github.com/giantswarm/semver-bump/Makefile @@ -0,0 +1,36 @@ +PROJECT=semver-bump + +BUILD_PATH := $(shell pwd)/.gobuild + +.PHONY=all get-deps build + +PROJECT_PATH := "$(BUILD_PATH)/src/github.com/giantswarm" + +GOPATH := $(BUILD_PATH) + +SOURCE=$(shell find . -name '*.go') + +BIN := $(PROJECT) + +VERSION := $(shell cat VERSION) + +all: .gobuild get-deps $(BIN) + +get-deps: .gobuild + GOPATH=$(GOPATH) go get -d -v github.com/giantswarm/$(PROJECT) + +.gobuild: + mkdir -p $(PROJECT_PATH) + cd "$(PROJECT_PATH)" && ln -s ../../../.. $(PROJECT) + +$(BIN): $(SOURCE) + GOPATH=$(GOPATH) go build -a -ldflags "-X main.projectVersion $(VERSION)" -o $(BIN) + +install: $(BIN) + cp $(BIN) /usr/local/bin/ + +clean: + rm -rf $(BUILD_PATH) $(BIN) + +run-tests: .gobuild + GOPATH=$(GOPATH) go test ./... diff --git a/vendor/github.com/giantswarm/semver-bump/README.md b/vendor/github.com/giantswarm/semver-bump/README.md new file mode 100644 index 000000000..a92b15319 --- /dev/null +++ b/vendor/github.com/giantswarm/semver-bump/README.md @@ -0,0 +1,40 @@ +# Semantic Versioning Bumper + +[![Build Status](https://travis-ci.org/giantswarm/semver-bump.svg?branch=master)](https://travis-ci.org/giantswarm/semver-bump) + +A simple semantic versioning file bumper to keep your version files in line +built by [Giant Swarm](http://giantswarm.io). + +## Installing + +You can install semver-bump from source using `go get`: + + go get github.com/giantswarm/semver-bump + +Because go expects all of your libraries to be found in either `$GOROOT` or + `$GOPATH`, it's helpful to symlink the project to one of the following paths: + + * `ln -s /path/to/your/semver-bump $GOPATH/src/github.com/giantswarm/semver-bump` + * `ln -s /path/to/your/semver-bump $GOROOT/src/pkg/github.com/giantswarm/semver-bump` + +## Running + +With semver-bump you can bump your projects version into a `VERSION` file location +in your project. It supports bumping of major, minor and patch versions via the +following subcommands: + + * `semver-bump major-release` + * `semver-bump minor-release` + * `semver-bump patch-release` + +### Initializing an existing project + +If you are already working on a project using semantic versioning you can +initialize the version file with the default version 0.1.0: + + semver-bump init + +Using the `-i` flag you can also initialize the version file with your current +version: + + semver-bump init -i 1.2.1 diff --git a/vendor/github.com/giantswarm/semver-bump/VERSION b/vendor/github.com/giantswarm/semver-bump/VERSION new file mode 100644 index 000000000..d4ce0dcda --- /dev/null +++ b/vendor/github.com/giantswarm/semver-bump/VERSION @@ -0,0 +1 @@ +1.1.1+git \ No newline at end of file diff --git a/vendor/github.com/giantswarm/semver-bump/bump/bump.go b/vendor/github.com/giantswarm/semver-bump/bump/bump.go new file mode 100644 index 000000000..02e3d18a0 --- /dev/null +++ b/vendor/github.com/giantswarm/semver-bump/bump/bump.go @@ -0,0 +1,101 @@ +package bump + +import ( + "github.com/coreos/go-semver/semver" + "github.com/giantswarm/semver-bump/storage" + "github.com/juju/errgo/errors" +) + +type versionBumpCallback func(version *semver.Version) + +type SemverBumper struct { + storage storage.VersionStorage + versionFile string +} + +func NewSemverBumper(vs storage.VersionStorage, versionFile string) *SemverBumper { + return &SemverBumper{vs, versionFile} +} + +func (sb SemverBumper) BumpMajorVersion(preRelease string, metadata string) (*semver.Version, error) { + v, err := sb.updateVersionFile(func(version *semver.Version) { + version.BumpMajor() + }, preRelease, metadata) + + if err != nil { + return nil, errors.Mask(err) + } + + return v, nil +} + +func (sb SemverBumper) BumpMinorVersion(preRelease string, metadata string) (*semver.Version, error) { + v, err := sb.updateVersionFile(func(version *semver.Version) { + version.BumpMinor() + }, preRelease, metadata) + + if err != nil { + return nil, errors.Mask(err) + } + + return v, nil +} + +func (sb SemverBumper) BumpPatchVersion(preRelease string, metadata string) (*semver.Version, error) { + v, err := sb.updateVersionFile(func(version *semver.Version) { + version.BumpPatch() + }, preRelease, metadata) + + if err != nil { + return nil, errors.Mask(err) + } + + return v, nil +} + +func (sb SemverBumper) GetCurrentVersion() (*semver.Version, error) { + currentVersion, err := sb.storage.ReadVersionFile(sb.versionFile) + + if err != nil { + return nil, errors.Mask(err) + } + + return currentVersion, nil +} + +func (sb SemverBumper) InitVersion(initialVersion semver.Version) error { + if sb.storage.VersionFileExists(sb.versionFile) { + return errors.Newf("Version file exists. Looks like this project is already initialized.") + } + + err := sb.storage.WriteVersionFile(sb.versionFile, initialVersion) + + if err != nil { + errors.Mask(err) + } + + return nil +} + +func (sb SemverBumper) updateVersionFile(bumpCallback versionBumpCallback, preRelease string, metadata string) (*semver.Version, error) { + currentVersion, err := sb.GetCurrentVersion() + + if err != nil { + return nil, errors.Mask(err) + } + + bumpedVersion := *currentVersion + + bumpCallback(&bumpedVersion) + + bumpedVersion.PreRelease = semver.PreRelease(preRelease) + bumpedVersion.Metadata = metadata + + err = sb.storage.WriteVersionFile(sb.versionFile, bumpedVersion) + + if err != nil { + return nil, errors.Mask(err) + } + + return &bumpedVersion, nil +} diff --git a/vendor/github.com/giantswarm/semver-bump/bump/bump_test.go b/vendor/github.com/giantswarm/semver-bump/bump/bump_test.go new file mode 100644 index 000000000..ae0e49484 --- /dev/null +++ b/vendor/github.com/giantswarm/semver-bump/bump/bump_test.go @@ -0,0 +1,115 @@ +package bump + +import ( + "testing" + + "github.com/coreos/go-semver/semver" + "github.com/giantswarm/semver-bump/storage" +) + +func NewTestSemverBumper(t *testing.T, initialVersion string) *SemverBumper { + s, err := storage.NewVersionStorageLocal(initialVersion) + + if err != nil { + t.Fatalf("NewVersionStorageLocal: %s", err) + } + + return NewSemverBumper(s, "testfile") +} + +func TestBumpMajorVersion(t *testing.T) { + sb := NewTestSemverBumper(t, "1.0.0") + + v, err := sb.BumpMajorVersion("", "") + + if err != nil { + t.Fatalf("BumpMajorVersion: %s", err) + } + + expectedVersion := "2.0.0" + + if expectedVersion != v.String() { + t.Fatalf("BumpMajorVersion: Expected bumping of major version would result in %s but got %s", expectedVersion, v.String()) + } +} + +func TestBumpMinorVersion(t *testing.T) { + sb := NewTestSemverBumper(t, "1.0.0") + + v, err := sb.BumpMinorVersion("", "") + + if err != nil { + t.Fatalf("BumpMinorVersion: %s", err) + } + + expectedVersion := "1.1.0" + + if expectedVersion != v.String() { + t.Fatalf("BumpMinorVersion: Expected bumping of minor version would result in %s but got %s", expectedVersion, v.String()) + } +} + +func TestBumpPatchVersion(t *testing.T) { + sb := NewTestSemverBumper(t, "1.0.0") + + v, err := sb.BumpPatchVersion("", "") + + if err != nil { + t.Fatalf("BumpPatchVersion: %s", err) + } + + expectedVersion := "1.0.1" + + if expectedVersion != v.String() { + t.Fatalf("BumpPatchVersion: Expected bumping of patch version would result in %s but got %s", expectedVersion, v.String()) + } +} + +func TestGetCurrentVersion(t *testing.T) { + expectedVersion := "2.13.4" + sb := NewTestSemverBumper(t, expectedVersion) + + v, err := sb.GetCurrentVersion() + + if err != nil { + t.Fatalf("GetCurrentVersion: %s", err) + } + + if expectedVersion != v.String() { + t.Fatalf("GetCurrentVersion: Epexcted to receive version %s but got %s", expectedVersion, v.String()) + } +} + +func TestInitVersion(t *testing.T) { + expectedVersion, err := semver.NewVersion("1.2.45") + + if err != nil { + t.Fatalf("InitVersion: %s", err) + } + + sb := NewTestSemverBumper(t, "1.1.0") + + err = sb.InitVersion(*expectedVersion) + + if err == nil { + t.Fatalf("InitVersion: Expected SemverBumper to return an error when trying to initialize over an existing version") + } + + sb = NewTestSemverBumper(t, "0.0.0") + + err = sb.InitVersion(*expectedVersion) + + if err != nil { + t.Fatalf("InitVersion: Expected SemverBumper to initialize new version %s but got error: %s", expectedVersion, err) + } + + v, err := sb.GetCurrentVersion() + + if err != nil { + t.Fatalf("InitVersion: %s", err) + } + + if expectedVersion.String() != v.String() { + t.Fatalf("InitVersion: Expected SemverBumper to initialize version %s but got %s", expectedVersion.String(), v.String()) + } +} diff --git a/vendor/github.com/giantswarm/semver-bump/commands/bump_major.go b/vendor/github.com/giantswarm/semver-bump/commands/bump_major.go new file mode 100644 index 000000000..7e270c71d --- /dev/null +++ b/vendor/github.com/giantswarm/semver-bump/commands/bump_major.go @@ -0,0 +1,29 @@ +package commands + +import ( + "fmt" + "log" + + "github.com/spf13/cobra" +) + +var bumpMajorCommand = &cobra.Command{ + Use: "major-release", + Short: "Bump a major release", + Long: `Increments the major version and bumps it.`, + Run: func(cmd *cobra.Command, args []string) { + sb, err := getSemverBumper() + + if err != nil { + log.Fatal(err) + } + + v, err := sb.BumpMajorVersion(versionPreReleaseSuffix, versionMetadataSuffix) + + if err != nil { + log.Fatal(err) + } + + fmt.Printf("Bumped to major version %s\n", v.String()) + }, +} diff --git a/vendor/github.com/giantswarm/semver-bump/commands/bump_minor.go b/vendor/github.com/giantswarm/semver-bump/commands/bump_minor.go new file mode 100644 index 000000000..dd5a89337 --- /dev/null +++ b/vendor/github.com/giantswarm/semver-bump/commands/bump_minor.go @@ -0,0 +1,29 @@ +package commands + +import ( + "fmt" + "log" + + "github.com/spf13/cobra" +) + +var bumpMinorCommand = &cobra.Command{ + Use: "minor-release", + Short: "Bump a minor release", + Long: `Increments the minor version and bumps it.`, + Run: func(cmd *cobra.Command, args []string) { + sb, err := getSemverBumper() + + if err != nil { + log.Fatal(err) + } + + v, err := sb.BumpMinorVersion(versionPreReleaseSuffix, versionMetadataSuffix) + + if err != nil { + log.Fatal(err) + } + + fmt.Printf("Bumped to minor version %s\n", v.String()) + }, +} diff --git a/vendor/github.com/giantswarm/semver-bump/commands/bump_patch.go b/vendor/github.com/giantswarm/semver-bump/commands/bump_patch.go new file mode 100644 index 000000000..89757c676 --- /dev/null +++ b/vendor/github.com/giantswarm/semver-bump/commands/bump_patch.go @@ -0,0 +1,30 @@ +package commands + +import ( + "fmt" + "log" + + "github.com/spf13/cobra" +) + +var bumpPatchCommand = &cobra.Command{ + Use: "patch-release", + Short: "Bump a patch release", + Long: `Increments the patch version and bumps it.`, + Run: func(cmd *cobra.Command, args []string) { + sb, err := getSemverBumper() + + if err != nil { + log.Fatal(err) + } + + v, err := sb.BumpPatchVersion(versionPreReleaseSuffix, versionMetadataSuffix) + + if err != nil { + log.Fatal(err) + } + + fmt.Printf("Bumped to patch version %s\n", v.String()) + + }, +} diff --git a/vendor/github.com/giantswarm/semver-bump/commands/init.go b/vendor/github.com/giantswarm/semver-bump/commands/init.go new file mode 100644 index 000000000..850667de4 --- /dev/null +++ b/vendor/github.com/giantswarm/semver-bump/commands/init.go @@ -0,0 +1,34 @@ +package commands + +import ( + "fmt" + "log" + + "github.com/coreos/go-semver/semver" + "github.com/spf13/cobra" +) + +var initialVersionString string + +var initCommand = &cobra.Command{ + Use: "init", + Short: "Initalize version number", + Long: `Initalize the version number for the project either from 0.1.0 or a custom one.`, + Run: func(cmd *cobra.Command, args []string) { + sb, err := getSemverBumper() + + if err != nil { + log.Fatal(err) + } + + initialVersion, err := semver.NewVersion(initialVersionString) + + err = sb.InitVersion(*initialVersion) + + if err != nil { + log.Fatal(err) + } + + fmt.Printf("Bumped initial version to %s\n", initialVersion.String()) + }, +} diff --git a/vendor/github.com/giantswarm/semver-bump/commands/semver_bump.go b/vendor/github.com/giantswarm/semver-bump/commands/semver_bump.go new file mode 100644 index 000000000..941bab6d0 --- /dev/null +++ b/vendor/github.com/giantswarm/semver-bump/commands/semver_bump.go @@ -0,0 +1,70 @@ +package commands + +import ( + "fmt" + "log" + + "github.com/spf13/cobra" +) + +var projectVersion string +var versionFile string +var versionStorageType string = "file" +var versionStorageLocalDefaultVersion string +var versionPreReleaseSuffix string +var versionMetadataSuffix string + +var SemverBumpCommand = &cobra.Command{ + Use: "semver-bump", + Short: "Semantic Versioning Bumper", + Long: `A semantic versioning file bumper built by giantswarm`, + Run: func(cmd *cobra.Command, args []string) { + sb, err := getSemverBumper() + + if err != nil { + log.Fatal(err) + } + + currentVersion, err := sb.GetCurrentVersion() + + if err != nil { + log.Fatal(err) + } + + fmt.Printf("Current version is: %s\n", currentVersion) + }, +} + +func Execute(v string) { + projectVersion = v + + AddGlobalFlags() + AddCommands() + + SemverBumpCommand.Execute() +} + +func AddCommands() { + SemverBumpCommand.AddCommand(bumpMajorCommand) + SemverBumpCommand.AddCommand(bumpMinorCommand) + SemverBumpCommand.AddCommand(bumpPatchCommand) + SemverBumpCommand.AddCommand(initCommand) + SemverBumpCommand.AddCommand(versionCommand) +} + +func AddGlobalFlags() { + SemverBumpCommand.PersistentFlags().StringVarP(&versionFile, "version-file", "f", "VERSION", "Version file to use") + SemverBumpCommand.PersistentFlags().StringVarP(&versionStorageType, "storage-type", "s", "file", "Storage backend to use for version information") + SemverBumpCommand.PersistentFlags().StringVarP(&versionStorageLocalDefaultVersion, "storage-local-default-version", "V", "0.0.1", "Default version to use when using the local storage backend") + + initCommand.Flags().StringVarP(&initialVersionString, "initial-version", "i", "0.1.0", "The initial version of the project") + + bumpMajorCommand.Flags().StringVarP(&versionPreReleaseSuffix, "pre-release-suffix", "p", "", "The pre release suffix for the bumped version") + bumpMajorCommand.Flags().StringVarP(&versionMetadataSuffix, "metadata-suffix", "m", "", "The metadata suffix for the bumped version") + + bumpMinorCommand.Flags().StringVarP(&versionPreReleaseSuffix, "pre-release-suffix", "p", "", "The pre release suffix for the bumped version") + bumpMinorCommand.Flags().StringVarP(&versionMetadataSuffix, "metadata-suffix", "m", "", "The metadata suffix for the bumped version") + + bumpPatchCommand.Flags().StringVarP(&versionPreReleaseSuffix, "pre-release-suffix", "p", "", "The pre release suffix for the bumped version") + bumpPatchCommand.Flags().StringVarP(&versionMetadataSuffix, "metadata-suffix", "m", "", "The metadata suffix for the bumped version") +} diff --git a/vendor/github.com/giantswarm/semver-bump/commands/util.go b/vendor/github.com/giantswarm/semver-bump/commands/util.go new file mode 100644 index 000000000..203a63e3c --- /dev/null +++ b/vendor/github.com/giantswarm/semver-bump/commands/util.go @@ -0,0 +1,17 @@ +package commands + +import ( + "github.com/giantswarm/semver-bump/bump" + "github.com/giantswarm/semver-bump/storage" + "github.com/juju/errgo/errors" +) + +func getSemverBumper() (*bump.SemverBumper, error) { + s, err := storage.NewVersionStorage(versionStorageType, versionStorageLocalDefaultVersion) + + if err != nil { + return nil, errors.Mask(err) + } + + return bump.NewSemverBumper(s, versionFile), nil +} diff --git a/vendor/github.com/giantswarm/semver-bump/commands/version.go b/vendor/github.com/giantswarm/semver-bump/commands/version.go new file mode 100644 index 000000000..c8c729ac5 --- /dev/null +++ b/vendor/github.com/giantswarm/semver-bump/commands/version.go @@ -0,0 +1,16 @@ +package commands + +import ( + "fmt" + + "github.com/spf13/cobra" +) + +var versionCommand = &cobra.Command{ + Use: "version", + Short: "Print the version number of semver-bump", + Long: `Print the version number of semver-bump.`, + Run: func(cmd *cobra.Command, args []string) { + fmt.Printf("Semantic Versioning Bumper %s\n", projectVersion) + }, +} diff --git a/vendor/github.com/giantswarm/semver-bump/main.go b/vendor/github.com/giantswarm/semver-bump/main.go new file mode 100644 index 000000000..281f5ccee --- /dev/null +++ b/vendor/github.com/giantswarm/semver-bump/main.go @@ -0,0 +1,9 @@ +package main + +import "github.com/giantswarm/semver-bump/commands" + +var projectVersion string = "dev" + +func main() { + commands.Execute(projectVersion) +} diff --git a/vendor/github.com/giantswarm/semver-bump/storage/version_storage.go b/vendor/github.com/giantswarm/semver-bump/storage/version_storage.go new file mode 100644 index 000000000..39ddad7e8 --- /dev/null +++ b/vendor/github.com/giantswarm/semver-bump/storage/version_storage.go @@ -0,0 +1,24 @@ +package storage + +import ( + "github.com/coreos/go-semver/semver" +) + +type VersionStorage interface { + ReadVersionFile(file string) (*semver.Version, error) + + WriteVersionFile(file string, version semver.Version) error + + VersionFileExists(file string) bool +} + +func NewVersionStorage(versionStorageType string, localDefaultVersion string) (VersionStorage, error) { + switch versionStorageType { + case "local": + return NewVersionStorageLocal(localDefaultVersion) + case "file": + return VersionStorageFile{}, nil + default: + panic("Unknown storage backend: " + versionStorageType) + } +} diff --git a/vendor/github.com/giantswarm/semver-bump/storage/version_storage_file.go b/vendor/github.com/giantswarm/semver-bump/storage/version_storage_file.go new file mode 100644 index 000000000..2275e0b45 --- /dev/null +++ b/vendor/github.com/giantswarm/semver-bump/storage/version_storage_file.go @@ -0,0 +1,44 @@ +package storage + +import ( + "io/ioutil" + "os" + "strings" + + "github.com/coreos/go-semver/semver" + "github.com/juju/errgo/errors" +) + +type VersionStorageFile struct{} + +func (s VersionStorageFile) ReadVersionFile(file string) (*semver.Version, error) { + versionBuffer, err := ioutil.ReadFile(file) + + if err != nil { + return nil, errors.Mask(err) + } + + versionString := string(versionBuffer) + versionString = strings.TrimSpace(versionString) + versionString = filterVersionNumber(versionString) + + version, err := semver.NewVersion(versionString) + + if err != nil { + return nil, errors.Mask(err) + } + + return version, nil +} + +func (s VersionStorageFile) WriteVersionFile(file string, version semver.Version) error { + return errors.Mask(ioutil.WriteFile(file, []byte(version.String()), 0664)) +} + +func (s VersionStorageFile) VersionFileExists(file string) bool { + if _, err := os.Stat(file); err == nil { + return true + } + + return false +} diff --git a/vendor/github.com/giantswarm/semver-bump/storage/version_storage_file_test.go b/vendor/github.com/giantswarm/semver-bump/storage/version_storage_file_test.go new file mode 100644 index 000000000..050d862e5 --- /dev/null +++ b/vendor/github.com/giantswarm/semver-bump/storage/version_storage_file_test.go @@ -0,0 +1,123 @@ +package storage + +import ( + "fmt" + "io/ioutil" + "os" + "testing" + + "github.com/coreos/go-semver/semver" +) + +func TestFileReadVersionFile(t *testing.T) { + filename := "not-existing" + s := &VersionStorageFile{} + + _, err := s.ReadVersionFile(filename) + + if err == nil { + t.Fatalf("ReadVersionFile %s: Expected error because of not existing version file, none received", filename) + } + + filename, err = createTempVersionFile("no-version") + + if err != nil { + t.Fatalf("ReadVersionFile: %s", err) + } + + defer os.Remove(filename) + + _, err = s.ReadVersionFile(filename) + + if err == nil { + t.Fatalf("ReadVersionFile %s: Expected error because of invalid version format, none received", filename) + } + + filename, err = createTempVersionFile("1.0.0\n ") + + if err != nil { + t.Fatalf("ReadVersionFile: %s", err) + } + + defer os.Remove(filename) + + v, err := s.ReadVersionFile(filename) + + if err != nil { + t.Fatalf("ReadVersionFile %s: Could not read version file. Got error: %s", filename, err) + } + + if "1.0.0" != v.String() { + t.Fatalf("ReadVersionFile %s: Version does not match. Expected %s, got %s", "1.0.0", v.String()) + } +} + +func TestFileWriteVersionFile(t *testing.T) { + filename, err := createTempVersionFile("1.0.0") + + if err != nil { + t.Fatalf("WriteVersionFile: %s", err) + } + + defer os.Remove(filename) + + s := &VersionStorageFile{} + v, err := semver.NewVersion("1.1.1") + + if err != nil { + t.Fatalf("WriteVersionFile: %s", err) + } + + err = s.WriteVersionFile(filename, *v) + + if err != nil { + t.Fatalf("WriteVersionFile: %s", err) + } + + readVersion, err := s.ReadVersionFile(filename) + + if err != nil { + t.Fatalf("WriteVersionFile: %s", err) + } + + if v.String() != readVersion.String() { + t.Fatalf("WriteVersionFile: Expected that version %s would be written but got %s", v.String(), readVersion.String()) + } +} + +func TestFileVersionFileExists(t *testing.T) { + filename := "not-existing" + s := &VersionStorageFile{} + + if s.VersionFileExists(filename) { + t.Fatalf("VersionFileExists: Expected version file %s to not exist", filename) + } + + filename, err := createTempVersionFile("1.0.0") + + if err != nil { + t.Fatalf("VersionFileExists: %s", err) + } + + defer os.Remove(filename) + + if !s.VersionFileExists(filename) { + t.Fatalf("VersionFileExists: Expected file %s to exist", filename) + } +} + +func createTempVersionFile(version string) (string, error) { + file, err := ioutil.TempFile("", "") + + if err != nil { + return "", fmt.Errorf("Cannot create temporary version file ") + } + + err = ioutil.WriteFile(file.Name(), []byte(version), 0664) + + if err != nil { + return "", fmt.Errorf("Cannot write to temporary veersion file") + } + + return file.Name(), nil +} diff --git a/vendor/github.com/giantswarm/semver-bump/storage/version_storage_local.go b/vendor/github.com/giantswarm/semver-bump/storage/version_storage_local.go new file mode 100644 index 000000000..f452353a6 --- /dev/null +++ b/vendor/github.com/giantswarm/semver-bump/storage/version_storage_local.go @@ -0,0 +1,38 @@ +package storage + +import ( + "github.com/coreos/go-semver/semver" + "github.com/juju/errgo/errors" +) + +type VersionStorageLocal struct { + version *semver.Version +} + +func (s VersionStorageLocal) ReadVersionFile(file string) (*semver.Version, error) { + return s.version, nil +} + +func (s *VersionStorageLocal) WriteVersionFile(file string, version semver.Version) error { + s.version = &version + + return nil +} + +func (s VersionStorageLocal) VersionFileExists(file string) bool { + if s.version.String() == "0.0.0" { + return false + } + + return true +} + +func NewVersionStorageLocal(versionString string) (*VersionStorageLocal, error) { + version, err := semver.NewVersion(filterVersionNumber(versionString)) + + if err != nil { + return nil, errors.Mask(err) + } + + return &VersionStorageLocal{version: version}, nil +} diff --git a/vendor/github.com/giantswarm/semver-bump/storage/version_storage_local_test.go b/vendor/github.com/giantswarm/semver-bump/storage/version_storage_local_test.go new file mode 100644 index 000000000..400f1ea9b --- /dev/null +++ b/vendor/github.com/giantswarm/semver-bump/storage/version_storage_local_test.go @@ -0,0 +1,90 @@ +package storage + +import ( + "testing" + + "github.com/coreos/go-semver/semver" +) + +func TestNewVersionStorageLocal(t *testing.T) { + versionString := "no-version" + _, err := NewVersionStorageLocal(versionString) + + if err == nil { + t.Fatalf("NewVersionStorageLocal: Expected to get an error on wrong version number %s", versionString) + } + + versionString = "1.0.1" + + _, err = NewVersionStorageLocal(versionString) + + if err != nil { + t.Fatalf("NewVersionStorageLocal: %s", err) + } +} + +func TestLocalReadVersionFile(t *testing.T) { + expectedVersion, err := semver.NewVersion("1.1.0") + + if err != nil { + t.Fatalf("ReadVersionFile: %s", err) + } + + s := VersionStorageLocal{expectedVersion} + v, err := s.ReadVersionFile("test") + + if err != nil { + t.Fatalf("ReadVersionFile: %s", err) + } + + if expectedVersion.String() != v.String() { + t.Fatalf("ReadVersionFile: Expected read version to be %s but got %s", expectedVersion.String(), v.String()) + } +} + +func TestLocalWriteVersionFile(t *testing.T) { + expectedVersion, err := semver.NewVersion("1.1.1") + + if err != nil { + t.Fatalf("WriteVersionFile: %s", err) + } + + s := VersionStorageLocal{expectedVersion} + + s.WriteVersionFile("testfile", *expectedVersion) + v, err := s.ReadVersionFile("testfile") + + if err != nil { + t.Fatalf("WriteVersionFile: %s", v) + } + + if expectedVersion.String() != v.String() { + t.Fatalf("WriteVersionFile: Expected written version to be %s but got %s", expectedVersion.String(), v.String()) + } +} + +func TestLocalVersionFileExists(t *testing.T) { + v, err := semver.NewVersion("0.0.0") + + if err != nil { + t.Fatalf("TestLocalVersionFileExists: %s", err) + } + + s := VersionStorageLocal{v} + + if s.VersionFileExists("testfile") { + t.Fatalf("TestLocalVersionFileExists: Expected VersionStorageLocal to pretend no version file exists on version 0.0.0") + } + + v, err = semver.NewVersion("1.1.1") + + if err != nil { + t.Fatalf("TestLocalVersionFileExists: %s", err) + } + + s = VersionStorageLocal{v} + + if !s.VersionFileExists("testfile") { + t.Fatalf("TestLocalVersionFileExists: Expected VersionStorageLocal to pretend a version file exists on real versions") + } +} diff --git a/vendor/github.com/giantswarm/semver-bump/storage/version_storage_util.go b/vendor/github.com/giantswarm/semver-bump/storage/version_storage_util.go new file mode 100644 index 000000000..ca3d05c14 --- /dev/null +++ b/vendor/github.com/giantswarm/semver-bump/storage/version_storage_util.go @@ -0,0 +1,7 @@ +package storage + +import "strings" + +func filterVersionNumber(v string) string { + return strings.TrimPrefix(v, "v") +} diff --git a/vendor/github.com/go-ini/ini/.gitignore b/vendor/github.com/go-ini/ini/.gitignore new file mode 100644 index 000000000..c5203bf6e --- /dev/null +++ b/vendor/github.com/go-ini/ini/.gitignore @@ -0,0 +1,5 @@ +testdata/conf_out.ini +ini.sublime-project +ini.sublime-workspace +testdata/conf_reflect.ini +.idea diff --git a/vendor/github.com/go-ini/ini/.travis.yml b/vendor/github.com/go-ini/ini/.travis.yml new file mode 100644 index 000000000..9a41f64df --- /dev/null +++ b/vendor/github.com/go-ini/ini/.travis.yml @@ -0,0 +1,14 @@ +sudo: false +language: go +go: + - 1.4 + - 1.5 + - 1.6 + - 1.7 + - 1.8 + - master + +script: + - go get golang.org/x/tools/cmd/cover + - go get github.com/smartystreets/goconvey + - go test -v -cover -race diff --git a/vendor/github.com/go-ini/ini/LICENSE b/vendor/github.com/go-ini/ini/LICENSE new file mode 100644 index 000000000..37ec93a14 --- /dev/null +++ b/vendor/github.com/go-ini/ini/LICENSE @@ -0,0 +1,191 @@ +Apache License +Version 2.0, January 2004 +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, and +distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by the copyright +owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all other entities +that control, are controlled by, or are under common control with that entity. +For the purposes of this definition, "control" means (i) the power, direct or +indirect, to cause the direction or management of such entity, whether by +contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the +outstanding shares, or (iii) beneficial ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity exercising +permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, including +but not limited to software source code, documentation source, and configuration +files. + +"Object" form shall mean any form resulting from mechanical transformation or +translation of a Source form, including but not limited to compiled object code, +generated documentation, and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or Object form, made +available under the License, as indicated by a copyright notice that is included +in or attached to the work (an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object form, that +is based on (or derived from) the Work and for which the editorial revisions, +annotations, elaborations, or other modifications represent, as a whole, an +original work of authorship. For the purposes of this License, Derivative Works +shall not include works that remain separable from, or merely link (or bind by +name) to the interfaces of, the Work and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including the original version +of the Work and any modifications or additions to that Work or Derivative Works +thereof, that is intentionally submitted to Licensor for inclusion in the Work +by the copyright owner or by an individual or Legal Entity authorized to submit +on behalf of the copyright owner. For the purposes of this definition, +"submitted" means any form of electronic, verbal, or written communication sent +to the Licensor or its representatives, including but not limited to +communication on electronic mailing lists, source code control systems, and +issue tracking systems that are managed by, or on behalf of, the Licensor for +the purpose of discussing and improving the Work, but excluding communication +that is conspicuously marked or otherwise designated in writing by the copyright +owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity on behalf +of whom a Contribution has been received by Licensor and subsequently +incorporated within the Work. + +2. Grant of Copyright License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable copyright license to reproduce, prepare Derivative Works of, +publicly display, publicly perform, sublicense, and distribute the Work and such +Derivative Works in Source or Object form. + +3. Grant of Patent License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable (except as stated in this section) patent license to make, have +made, use, offer to sell, sell, import, and otherwise transfer the Work, where +such license applies only to those patent claims licensable by such Contributor +that are necessarily infringed by their Contribution(s) alone or by combination +of their Contribution(s) with the Work to which such Contribution(s) was +submitted. If You institute patent litigation against any entity (including a +cross-claim or counterclaim in a lawsuit) alleging that the Work or a +Contribution incorporated within the Work constitutes direct or contributory +patent infringement, then any patent licenses granted to You under this License +for that Work shall terminate as of the date such litigation is filed. + +4. Redistribution. + +You may reproduce and distribute copies of the Work or Derivative Works thereof +in any medium, with or without modifications, and in Source or Object form, +provided that You meet the following conditions: + +You must give any other recipients of the Work or Derivative Works a copy of +this License; and +You must cause any modified files to carry prominent notices stating that You +changed the files; and +You must retain, in the Source form of any Derivative Works that You distribute, +all copyright, patent, trademark, and attribution notices from the Source form +of the Work, excluding those notices that do not pertain to any part of the +Derivative Works; and +If the Work includes a "NOTICE" text file as part of its distribution, then any +Derivative Works that You distribute must include a readable copy of the +attribution notices contained within such NOTICE file, excluding those notices +that do not pertain to any part of the Derivative Works, in at least one of the +following places: within a NOTICE text file distributed as part of the +Derivative Works; within the Source form or documentation, if provided along +with the Derivative Works; or, within a display generated by the Derivative +Works, if and wherever such third-party notices normally appear. The contents of +the NOTICE file are for informational purposes only and do not modify the +License. You may add Your own attribution notices within Derivative Works that +You distribute, alongside or as an addendum to the NOTICE text from the Work, +provided that such additional attribution notices cannot be construed as +modifying the License. +You may add Your own copyright statement to Your modifications and may provide +additional or different license terms and conditions for use, reproduction, or +distribution of Your modifications, or for any such Derivative Works as a whole, +provided Your use, reproduction, and distribution of the Work otherwise complies +with the conditions stated in this License. + +5. Submission of Contributions. + +Unless You explicitly state otherwise, any Contribution intentionally submitted +for inclusion in the Work by You to the Licensor shall be under the terms and +conditions of this License, without any additional terms or conditions. +Notwithstanding the above, nothing herein shall supersede or modify the terms of +any separate license agreement you may have executed with Licensor regarding +such Contributions. + +6. Trademarks. + +This License does not grant permission to use the trade names, trademarks, +service marks, or product names of the Licensor, except as required for +reasonable and customary use in describing the origin of the Work and +reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. + +Unless required by applicable law or agreed to in writing, Licensor provides the +Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, +including, without limitation, any warranties or conditions of TITLE, +NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are +solely responsible for determining the appropriateness of using or +redistributing the Work and assume any risks associated with Your exercise of +permissions under this License. + +8. Limitation of Liability. + +In no event and under no legal theory, whether in tort (including negligence), +contract, or otherwise, unless required by applicable law (such as deliberate +and grossly negligent acts) or agreed to in writing, shall any Contributor be +liable to You for damages, including any direct, indirect, special, incidental, +or consequential damages of any character arising as a result of this License or +out of the use or inability to use the Work (including but not limited to +damages for loss of goodwill, work stoppage, computer failure or malfunction, or +any and all other commercial damages or losses), even if such Contributor has +been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. + +While redistributing the Work or Derivative Works thereof, You may choose to +offer, and charge a fee for, acceptance of support, warranty, indemnity, or +other liability obligations and/or rights consistent with this License. However, +in accepting such obligations, You may act only on Your own behalf and on Your +sole responsibility, not on behalf of any other Contributor, and only if You +agree to indemnify, defend, and hold each Contributor harmless for any liability +incurred by, or claims asserted against, such Contributor by reason of your +accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work + +To apply the Apache License to your work, attach the following boilerplate +notice, with the fields enclosed by brackets "[]" replaced with your own +identifying information. (Don't include the brackets!) The text should be +enclosed in the appropriate comment syntax for the file format. We also +recommend that a file or class name and description of purpose be included on +the same "printed page" as the copyright notice for easier identification within +third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/go-ini/ini/Makefile b/vendor/github.com/go-ini/ini/Makefile new file mode 100644 index 000000000..ac034e525 --- /dev/null +++ b/vendor/github.com/go-ini/ini/Makefile @@ -0,0 +1,12 @@ +.PHONY: build test bench vet + +build: vet bench + +test: + go test -v -cover -race + +bench: + go test -v -cover -race -test.bench=. -test.benchmem + +vet: + go vet diff --git a/vendor/github.com/go-ini/ini/README.md b/vendor/github.com/go-ini/ini/README.md new file mode 100644 index 000000000..e67d51f32 --- /dev/null +++ b/vendor/github.com/go-ini/ini/README.md @@ -0,0 +1,746 @@ +INI [![Build Status](https://travis-ci.org/go-ini/ini.svg?branch=master)](https://travis-ci.org/go-ini/ini) [![Sourcegraph](https://sourcegraph.com/github.com/go-ini/ini/-/badge.svg)](https://sourcegraph.com/github.com/go-ini/ini?badge) +=== + +![](https://avatars0.githubusercontent.com/u/10216035?v=3&s=200) + +Package ini provides INI file read and write functionality in Go. + +[简体中文](README_ZH.md) + +## Feature + +- Load multiple data sources(`[]byte`, file and `io.ReadCloser`) with overwrites. +- Read with recursion values. +- Read with parent-child sections. +- Read with auto-increment key names. +- Read with multiple-line values. +- Read with tons of helper methods. +- Read and convert values to Go types. +- Read and **WRITE** comments of sections and keys. +- Manipulate sections, keys and comments with ease. +- Keep sections and keys in order as you parse and save. + +## Installation + +To use a tagged revision: + + go get gopkg.in/ini.v1 + +To use with latest changes: + + go get github.com/go-ini/ini + +Please add `-u` flag to update in the future. + +### Testing + +If you want to test on your machine, please apply `-t` flag: + + go get -t gopkg.in/ini.v1 + +Please add `-u` flag to update in the future. + +## Getting Started + +### Loading from data sources + +A **Data Source** is either raw data in type `[]byte`, a file name with type `string` or `io.ReadCloser`. You can load **as many data sources as you want**. Passing other types will simply return an error. + +```go +cfg, err := ini.Load([]byte("raw data"), "filename", ioutil.NopCloser(bytes.NewReader([]byte("some other data")))) +``` + +Or start with an empty object: + +```go +cfg := ini.Empty() +``` + +When you cannot decide how many data sources to load at the beginning, you will still be able to **Append()** them later. + +```go +err := cfg.Append("other file", []byte("other raw data")) +``` + +If you have a list of files with possibilities that some of them may not available at the time, and you don't know exactly which ones, you can use `LooseLoad` to ignore nonexistent files without returning error. + +```go +cfg, err := ini.LooseLoad("filename", "filename_404") +``` + +The cool thing is, whenever the file is available to load while you're calling `Reload` method, it will be counted as usual. + +#### Ignore cases of key name + +When you do not care about cases of section and key names, you can use `InsensitiveLoad` to force all names to be lowercased while parsing. + +```go +cfg, err := ini.InsensitiveLoad("filename") +//... + +// sec1 and sec2 are the exactly same section object +sec1, err := cfg.GetSection("Section") +sec2, err := cfg.GetSection("SecTIOn") + +// key1 and key2 are the exactly same key object +key1, err := sec1.GetKey("Key") +key2, err := sec2.GetKey("KeY") +``` + +#### MySQL-like boolean key + +MySQL's configuration allows a key without value as follows: + +```ini +[mysqld] +... +skip-host-cache +skip-name-resolve +``` + +By default, this is considered as missing value. But if you know you're going to deal with those cases, you can assign advanced load options: + +```go +cfg, err := LoadSources(LoadOptions{AllowBooleanKeys: true}, "my.cnf")) +``` + +The value of those keys are always `true`, and when you save to a file, it will keep in the same foramt as you read. + +To generate such keys in your program, you could use `NewBooleanKey`: + +```go +key, err := sec.NewBooleanKey("skip-host-cache") +``` + +#### Comment + +Take care that following format will be treated as comment: + +1. Line begins with `#` or `;` +2. Words after `#` or `;` +3. Words after section name (i.e words after `[some section name]`) + +If you want to save a value with `#` or `;`, please quote them with ``` ` ``` or ``` """ ```. + +Alternatively, you can use following `LoadOptions` to completely ignore inline comments: + +```go +cfg, err := LoadSources(LoadOptions{IgnoreInlineComment: true}, "app.ini")) +``` + +### Working with sections + +To get a section, you would need to: + +```go +section, err := cfg.GetSection("section name") +``` + +For a shortcut for default section, just give an empty string as name: + +```go +section, err := cfg.GetSection("") +``` + +When you're pretty sure the section exists, following code could make your life easier: + +```go +section := cfg.Section("section name") +``` + +What happens when the section somehow does not exist? Don't panic, it automatically creates and returns a new section to you. + +To create a new section: + +```go +err := cfg.NewSection("new section") +``` + +To get a list of sections or section names: + +```go +sections := cfg.Sections() +names := cfg.SectionStrings() +``` + +### Working with keys + +To get a key under a section: + +```go +key, err := cfg.Section("").GetKey("key name") +``` + +Same rule applies to key operations: + +```go +key := cfg.Section("").Key("key name") +``` + +To check if a key exists: + +```go +yes := cfg.Section("").HasKey("key name") +``` + +To create a new key: + +```go +err := cfg.Section("").NewKey("name", "value") +``` + +To get a list of keys or key names: + +```go +keys := cfg.Section("").Keys() +names := cfg.Section("").KeyStrings() +``` + +To get a clone hash of keys and corresponding values: + +```go +hash := cfg.Section("").KeysHash() +``` + +### Working with values + +To get a string value: + +```go +val := cfg.Section("").Key("key name").String() +``` + +To validate key value on the fly: + +```go +val := cfg.Section("").Key("key name").Validate(func(in string) string { + if len(in) == 0 { + return "default" + } + return in +}) +``` + +If you do not want any auto-transformation (such as recursive read) for the values, you can get raw value directly (this way you get much better performance): + +```go +val := cfg.Section("").Key("key name").Value() +``` + +To check if raw value exists: + +```go +yes := cfg.Section("").HasValue("test value") +``` + +To get value with types: + +```go +// For boolean values: +// true when value is: 1, t, T, TRUE, true, True, YES, yes, Yes, y, ON, on, On +// false when value is: 0, f, F, FALSE, false, False, NO, no, No, n, OFF, off, Off +v, err = cfg.Section("").Key("BOOL").Bool() +v, err = cfg.Section("").Key("FLOAT64").Float64() +v, err = cfg.Section("").Key("INT").Int() +v, err = cfg.Section("").Key("INT64").Int64() +v, err = cfg.Section("").Key("UINT").Uint() +v, err = cfg.Section("").Key("UINT64").Uint64() +v, err = cfg.Section("").Key("TIME").TimeFormat(time.RFC3339) +v, err = cfg.Section("").Key("TIME").Time() // RFC3339 + +v = cfg.Section("").Key("BOOL").MustBool() +v = cfg.Section("").Key("FLOAT64").MustFloat64() +v = cfg.Section("").Key("INT").MustInt() +v = cfg.Section("").Key("INT64").MustInt64() +v = cfg.Section("").Key("UINT").MustUint() +v = cfg.Section("").Key("UINT64").MustUint64() +v = cfg.Section("").Key("TIME").MustTimeFormat(time.RFC3339) +v = cfg.Section("").Key("TIME").MustTime() // RFC3339 + +// Methods start with Must also accept one argument for default value +// when key not found or fail to parse value to given type. +// Except method MustString, which you have to pass a default value. + +v = cfg.Section("").Key("String").MustString("default") +v = cfg.Section("").Key("BOOL").MustBool(true) +v = cfg.Section("").Key("FLOAT64").MustFloat64(1.25) +v = cfg.Section("").Key("INT").MustInt(10) +v = cfg.Section("").Key("INT64").MustInt64(99) +v = cfg.Section("").Key("UINT").MustUint(3) +v = cfg.Section("").Key("UINT64").MustUint64(6) +v = cfg.Section("").Key("TIME").MustTimeFormat(time.RFC3339, time.Now()) +v = cfg.Section("").Key("TIME").MustTime(time.Now()) // RFC3339 +``` + +What if my value is three-line long? + +```ini +[advance] +ADDRESS = """404 road, +NotFound, State, 5000 +Earth""" +``` + +Not a problem! + +```go +cfg.Section("advance").Key("ADDRESS").String() + +/* --- start --- +404 road, +NotFound, State, 5000 +Earth +------ end --- */ +``` + +That's cool, how about continuation lines? + +```ini +[advance] +two_lines = how about \ + continuation lines? +lots_of_lines = 1 \ + 2 \ + 3 \ + 4 +``` + +Piece of cake! + +```go +cfg.Section("advance").Key("two_lines").String() // how about continuation lines? +cfg.Section("advance").Key("lots_of_lines").String() // 1 2 3 4 +``` + +Well, I hate continuation lines, how do I disable that? + +```go +cfg, err := ini.LoadSources(ini.LoadOptions{ + IgnoreContinuation: true, +}, "filename") +``` + +Holy crap! + +Note that single quotes around values will be stripped: + +```ini +foo = "some value" // foo: some value +bar = 'some value' // bar: some value +``` + +That's all? Hmm, no. + +#### Helper methods of working with values + +To get value with given candidates: + +```go +v = cfg.Section("").Key("STRING").In("default", []string{"str", "arr", "types"}) +v = cfg.Section("").Key("FLOAT64").InFloat64(1.1, []float64{1.25, 2.5, 3.75}) +v = cfg.Section("").Key("INT").InInt(5, []int{10, 20, 30}) +v = cfg.Section("").Key("INT64").InInt64(10, []int64{10, 20, 30}) +v = cfg.Section("").Key("UINT").InUint(4, []int{3, 6, 9}) +v = cfg.Section("").Key("UINT64").InUint64(8, []int64{3, 6, 9}) +v = cfg.Section("").Key("TIME").InTimeFormat(time.RFC3339, time.Now(), []time.Time{time1, time2, time3}) +v = cfg.Section("").Key("TIME").InTime(time.Now(), []time.Time{time1, time2, time3}) // RFC3339 +``` + +Default value will be presented if value of key is not in candidates you given, and default value does not need be one of candidates. + +To validate value in a given range: + +```go +vals = cfg.Section("").Key("FLOAT64").RangeFloat64(0.0, 1.1, 2.2) +vals = cfg.Section("").Key("INT").RangeInt(0, 10, 20) +vals = cfg.Section("").Key("INT64").RangeInt64(0, 10, 20) +vals = cfg.Section("").Key("UINT").RangeUint(0, 3, 9) +vals = cfg.Section("").Key("UINT64").RangeUint64(0, 3, 9) +vals = cfg.Section("").Key("TIME").RangeTimeFormat(time.RFC3339, time.Now(), minTime, maxTime) +vals = cfg.Section("").Key("TIME").RangeTime(time.Now(), minTime, maxTime) // RFC3339 +``` + +##### Auto-split values into a slice + +To use zero value of type for invalid inputs: + +```go +// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4] +// Input: how, 2.2, are, you -> [0.0 2.2 0.0 0.0] +vals = cfg.Section("").Key("STRINGS").Strings(",") +vals = cfg.Section("").Key("FLOAT64S").Float64s(",") +vals = cfg.Section("").Key("INTS").Ints(",") +vals = cfg.Section("").Key("INT64S").Int64s(",") +vals = cfg.Section("").Key("UINTS").Uints(",") +vals = cfg.Section("").Key("UINT64S").Uint64s(",") +vals = cfg.Section("").Key("TIMES").Times(",") +``` + +To exclude invalid values out of result slice: + +```go +// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4] +// Input: how, 2.2, are, you -> [2.2] +vals = cfg.Section("").Key("FLOAT64S").ValidFloat64s(",") +vals = cfg.Section("").Key("INTS").ValidInts(",") +vals = cfg.Section("").Key("INT64S").ValidInt64s(",") +vals = cfg.Section("").Key("UINTS").ValidUints(",") +vals = cfg.Section("").Key("UINT64S").ValidUint64s(",") +vals = cfg.Section("").Key("TIMES").ValidTimes(",") +``` + +Or to return nothing but error when have invalid inputs: + +```go +// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4] +// Input: how, 2.2, are, you -> error +vals = cfg.Section("").Key("FLOAT64S").StrictFloat64s(",") +vals = cfg.Section("").Key("INTS").StrictInts(",") +vals = cfg.Section("").Key("INT64S").StrictInt64s(",") +vals = cfg.Section("").Key("UINTS").StrictUints(",") +vals = cfg.Section("").Key("UINT64S").StrictUint64s(",") +vals = cfg.Section("").Key("TIMES").StrictTimes(",") +``` + +### Save your configuration + +Finally, it's time to save your configuration to somewhere. + +A typical way to save configuration is writing it to a file: + +```go +// ... +err = cfg.SaveTo("my.ini") +err = cfg.SaveToIndent("my.ini", "\t") +``` + +Another way to save is writing to a `io.Writer` interface: + +```go +// ... +cfg.WriteTo(writer) +cfg.WriteToIndent(writer, "\t") +``` + +By default, spaces are used to align "=" sign between key and values, to disable that: + +```go +ini.PrettyFormat = false +``` + +## Advanced Usage + +### Recursive Values + +For all value of keys, there is a special syntax `%()s`, where `` is the key name in same section or default section, and `%()s` will be replaced by corresponding value(empty string if key not found). You can use this syntax at most 99 level of recursions. + +```ini +NAME = ini + +[author] +NAME = Unknwon +GITHUB = https://github.com/%(NAME)s + +[package] +FULL_NAME = github.com/go-ini/%(NAME)s +``` + +```go +cfg.Section("author").Key("GITHUB").String() // https://github.com/Unknwon +cfg.Section("package").Key("FULL_NAME").String() // github.com/go-ini/ini +``` + +### Parent-child Sections + +You can use `.` in section name to indicate parent-child relationship between two or more sections. If the key not found in the child section, library will try again on its parent section until there is no parent section. + +```ini +NAME = ini +VERSION = v1 +IMPORT_PATH = gopkg.in/%(NAME)s.%(VERSION)s + +[package] +CLONE_URL = https://%(IMPORT_PATH)s + +[package.sub] +``` + +```go +cfg.Section("package.sub").Key("CLONE_URL").String() // https://gopkg.in/ini.v1 +``` + +#### Retrieve parent keys available to a child section + +```go +cfg.Section("package.sub").ParentKeys() // ["CLONE_URL"] +``` + +### Unparseable Sections + +Sometimes, you have sections that do not contain key-value pairs but raw content, to handle such case, you can use `LoadOptions.UnparsableSections`: + +```go +cfg, err := LoadSources(LoadOptions{UnparseableSections: []string{"COMMENTS"}}, `[COMMENTS] +<1> This slide has the fuel listed in the wrong units `)) + +body := cfg.Section("COMMENTS").Body() + +/* --- start --- +<1> This slide has the fuel listed in the wrong units +------ end --- */ +``` + +### Auto-increment Key Names + +If key name is `-` in data source, then it would be seen as special syntax for auto-increment key name start from 1, and every section is independent on counter. + +```ini +[features] +-: Support read/write comments of keys and sections +-: Support auto-increment of key names +-: Support load multiple files to overwrite key values +``` + +```go +cfg.Section("features").KeyStrings() // []{"#1", "#2", "#3"} +``` + +### Map To Struct + +Want more objective way to play with INI? Cool. + +```ini +Name = Unknwon +age = 21 +Male = true +Born = 1993-01-01T20:17:05Z + +[Note] +Content = Hi is a good man! +Cities = HangZhou, Boston +``` + +```go +type Note struct { + Content string + Cities []string +} + +type Person struct { + Name string + Age int `ini:"age"` + Male bool + Born time.Time + Note + Created time.Time `ini:"-"` +} + +func main() { + cfg, err := ini.Load("path/to/ini") + // ... + p := new(Person) + err = cfg.MapTo(p) + // ... + + // Things can be simpler. + err = ini.MapTo(p, "path/to/ini") + // ... + + // Just map a section? Fine. + n := new(Note) + err = cfg.Section("Note").MapTo(n) + // ... +} +``` + +Can I have default value for field? Absolutely. + +Assign it before you map to struct. It will keep the value as it is if the key is not presented or got wrong type. + +```go +// ... +p := &Person{ + Name: "Joe", +} +// ... +``` + +It's really cool, but what's the point if you can't give me my file back from struct? + +### Reflect From Struct + +Why not? + +```go +type Embeded struct { + Dates []time.Time `delim:"|"` + Places []string `ini:"places,omitempty"` + None []int `ini:",omitempty"` +} + +type Author struct { + Name string `ini:"NAME"` + Male bool + Age int + GPA float64 + NeverMind string `ini:"-"` + *Embeded +} + +func main() { + a := &Author{"Unknwon", true, 21, 2.8, "", + &Embeded{ + []time.Time{time.Now(), time.Now()}, + []string{"HangZhou", "Boston"}, + []int{}, + }} + cfg := ini.Empty() + err = ini.ReflectFrom(cfg, a) + // ... +} +``` + +So, what do I get? + +```ini +NAME = Unknwon +Male = true +Age = 21 +GPA = 2.8 + +[Embeded] +Dates = 2015-08-07T22:14:22+08:00|2015-08-07T22:14:22+08:00 +places = HangZhou,Boston +``` + +#### Name Mapper + +To save your time and make your code cleaner, this library supports [`NameMapper`](https://gowalker.org/gopkg.in/ini.v1#NameMapper) between struct field and actual section and key name. + +There are 2 built-in name mappers: + +- `AllCapsUnderscore`: it converts to format `ALL_CAPS_UNDERSCORE` then match section or key. +- `TitleUnderscore`: it converts to format `title_underscore` then match section or key. + +To use them: + +```go +type Info struct { + PackageName string +} + +func main() { + err = ini.MapToWithMapper(&Info{}, ini.TitleUnderscore, []byte("package_name=ini")) + // ... + + cfg, err := ini.Load([]byte("PACKAGE_NAME=ini")) + // ... + info := new(Info) + cfg.NameMapper = ini.AllCapsUnderscore + err = cfg.MapTo(info) + // ... +} +``` + +Same rules of name mapper apply to `ini.ReflectFromWithMapper` function. + +#### Value Mapper + +To expand values (e.g. from environment variables), you can use the `ValueMapper` to transform values: + +```go +type Env struct { + Foo string `ini:"foo"` +} + +func main() { + cfg, err := ini.Load([]byte("[env]\nfoo = ${MY_VAR}\n") + cfg.ValueMapper = os.ExpandEnv + // ... + env := &Env{} + err = cfg.Section("env").MapTo(env) +} +``` + +This would set the value of `env.Foo` to the value of the environment variable `MY_VAR`. + +#### Other Notes On Map/Reflect + +Any embedded struct is treated as a section by default, and there is no automatic parent-child relations in map/reflect feature: + +```go +type Child struct { + Age string +} + +type Parent struct { + Name string + Child +} + +type Config struct { + City string + Parent +} +``` + +Example configuration: + +```ini +City = Boston + +[Parent] +Name = Unknwon + +[Child] +Age = 21 +``` + +What if, yes, I'm paranoid, I want embedded struct to be in the same section. Well, all roads lead to Rome. + +```go +type Child struct { + Age string +} + +type Parent struct { + Name string + Child `ini:"Parent"` +} + +type Config struct { + City string + Parent +} +``` + +Example configuration: + +```ini +City = Boston + +[Parent] +Name = Unknwon +Age = 21 +``` + +## Getting Help + +- [API Documentation](https://gowalker.org/gopkg.in/ini.v1) +- [File An Issue](https://github.com/go-ini/ini/issues/new) + +## FAQs + +### What does `BlockMode` field do? + +By default, library lets you read and write values so we need a locker to make sure your data is safe. But in cases that you are very sure about only reading data through the library, you can set `cfg.BlockMode = false` to speed up read operations about **50-70%** faster. + +### Why another INI library? + +Many people are using my another INI library [goconfig](https://github.com/Unknwon/goconfig), so the reason for this one is I would like to make more Go style code. Also when you set `cfg.BlockMode = false`, this one is about **10-30%** faster. + +To make those changes I have to confirm API broken, so it's safer to keep it in another place and start using `gopkg.in` to version my package at this time.(PS: shorter import path) + +## License + +This project is under Apache v2 License. See the [LICENSE](LICENSE) file for the full license text. diff --git a/vendor/github.com/go-ini/ini/README_ZH.md b/vendor/github.com/go-ini/ini/README_ZH.md new file mode 100644 index 000000000..0cf419449 --- /dev/null +++ b/vendor/github.com/go-ini/ini/README_ZH.md @@ -0,0 +1,733 @@ +本包提供了 Go 语言中读写 INI 文件的功能。 + +## 功能特性 + +- 支持覆盖加载多个数据源(`[]byte`、文件和 `io.ReadCloser`) +- 支持递归读取键值 +- 支持读取父子分区 +- 支持读取自增键名 +- 支持读取多行的键值 +- 支持大量辅助方法 +- 支持在读取时直接转换为 Go 语言类型 +- 支持读取和 **写入** 分区和键的注释 +- 轻松操作分区、键值和注释 +- 在保存文件时分区和键值会保持原有的顺序 + +## 下载安装 + +使用一个特定版本: + + go get gopkg.in/ini.v1 + +使用最新版: + + go get github.com/go-ini/ini + +如需更新请添加 `-u` 选项。 + +### 测试安装 + +如果您想要在自己的机器上运行测试,请使用 `-t` 标记: + + go get -t gopkg.in/ini.v1 + +如需更新请添加 `-u` 选项。 + +## 开始使用 + +### 从数据源加载 + +一个 **数据源** 可以是 `[]byte` 类型的原始数据,`string` 类型的文件路径或 `io.ReadCloser`。您可以加载 **任意多个** 数据源。如果您传递其它类型的数据源,则会直接返回错误。 + +```go +cfg, err := ini.Load([]byte("raw data"), "filename", ioutil.NopCloser(bytes.NewReader([]byte("some other data")))) +``` + +或者从一个空白的文件开始: + +```go +cfg := ini.Empty() +``` + +当您在一开始无法决定需要加载哪些数据源时,仍可以使用 **Append()** 在需要的时候加载它们。 + +```go +err := cfg.Append("other file", []byte("other raw data")) +``` + +当您想要加载一系列文件,但是不能够确定其中哪些文件是不存在的,可以通过调用函数 `LooseLoad` 来忽略它们(`Load` 会因为文件不存在而返回错误): + +```go +cfg, err := ini.LooseLoad("filename", "filename_404") +``` + +更牛逼的是,当那些之前不存在的文件在重新调用 `Reload` 方法的时候突然出现了,那么它们会被正常加载。 + +#### 忽略键名的大小写 + +有时候分区和键的名称大小写混合非常烦人,这个时候就可以通过 `InsensitiveLoad` 将所有分区和键名在读取里强制转换为小写: + +```go +cfg, err := ini.InsensitiveLoad("filename") +//... + +// sec1 和 sec2 指向同一个分区对象 +sec1, err := cfg.GetSection("Section") +sec2, err := cfg.GetSection("SecTIOn") + +// key1 和 key2 指向同一个键对象 +key1, err := sec1.GetKey("Key") +key2, err := sec2.GetKey("KeY") +``` + +#### 类似 MySQL 配置中的布尔值键 + +MySQL 的配置文件中会出现没有具体值的布尔类型的键: + +```ini +[mysqld] +... +skip-host-cache +skip-name-resolve +``` + +默认情况下这被认为是缺失值而无法完成解析,但可以通过高级的加载选项对它们进行处理: + +```go +cfg, err := LoadSources(LoadOptions{AllowBooleanKeys: true}, "my.cnf")) +``` + +这些键的值永远为 `true`,且在保存到文件时也只会输出键名。 + +如果您想要通过程序来生成此类键,则可以使用 `NewBooleanKey`: + +```go +key, err := sec.NewBooleanKey("skip-host-cache") +``` + +#### 关于注释 + +下述几种情况的内容将被视为注释: + +1. 所有以 `#` 或 `;` 开头的行 +2. 所有在 `#` 或 `;` 之后的内容 +3. 分区标签后的文字 (即 `[分区名]` 之后的内容) + +如果你希望使用包含 `#` 或 `;` 的值,请使用 ``` ` ``` 或 ``` """ ``` 进行包覆。 + +除此之外,您还可以通过 `LoadOptions` 完全忽略行内注释: + +```go +cfg, err := LoadSources(LoadOptions{IgnoreInlineComment: true}, "app.ini")) +``` + +### 操作分区(Section) + +获取指定分区: + +```go +section, err := cfg.GetSection("section name") +``` + +如果您想要获取默认分区,则可以用空字符串代替分区名: + +```go +section, err := cfg.GetSection("") +``` + +当您非常确定某个分区是存在的,可以使用以下简便方法: + +```go +section := cfg.Section("section name") +``` + +如果不小心判断错了,要获取的分区其实是不存在的,那会发生什么呢?没事的,它会自动创建并返回一个对应的分区对象给您。 + +创建一个分区: + +```go +err := cfg.NewSection("new section") +``` + +获取所有分区对象或名称: + +```go +sections := cfg.Sections() +names := cfg.SectionStrings() +``` + +### 操作键(Key) + +获取某个分区下的键: + +```go +key, err := cfg.Section("").GetKey("key name") +``` + +和分区一样,您也可以直接获取键而忽略错误处理: + +```go +key := cfg.Section("").Key("key name") +``` + +判断某个键是否存在: + +```go +yes := cfg.Section("").HasKey("key name") +``` + +创建一个新的键: + +```go +err := cfg.Section("").NewKey("name", "value") +``` + +获取分区下的所有键或键名: + +```go +keys := cfg.Section("").Keys() +names := cfg.Section("").KeyStrings() +``` + +获取分区下的所有键值对的克隆: + +```go +hash := cfg.Section("").KeysHash() +``` + +### 操作键值(Value) + +获取一个类型为字符串(string)的值: + +```go +val := cfg.Section("").Key("key name").String() +``` + +获取值的同时通过自定义函数进行处理验证: + +```go +val := cfg.Section("").Key("key name").Validate(func(in string) string { + if len(in) == 0 { + return "default" + } + return in +}) +``` + +如果您不需要任何对值的自动转变功能(例如递归读取),可以直接获取原值(这种方式性能最佳): + +```go +val := cfg.Section("").Key("key name").Value() +``` + +判断某个原值是否存在: + +```go +yes := cfg.Section("").HasValue("test value") +``` + +获取其它类型的值: + +```go +// 布尔值的规则: +// true 当值为:1, t, T, TRUE, true, True, YES, yes, Yes, y, ON, on, On +// false 当值为:0, f, F, FALSE, false, False, NO, no, No, n, OFF, off, Off +v, err = cfg.Section("").Key("BOOL").Bool() +v, err = cfg.Section("").Key("FLOAT64").Float64() +v, err = cfg.Section("").Key("INT").Int() +v, err = cfg.Section("").Key("INT64").Int64() +v, err = cfg.Section("").Key("UINT").Uint() +v, err = cfg.Section("").Key("UINT64").Uint64() +v, err = cfg.Section("").Key("TIME").TimeFormat(time.RFC3339) +v, err = cfg.Section("").Key("TIME").Time() // RFC3339 + +v = cfg.Section("").Key("BOOL").MustBool() +v = cfg.Section("").Key("FLOAT64").MustFloat64() +v = cfg.Section("").Key("INT").MustInt() +v = cfg.Section("").Key("INT64").MustInt64() +v = cfg.Section("").Key("UINT").MustUint() +v = cfg.Section("").Key("UINT64").MustUint64() +v = cfg.Section("").Key("TIME").MustTimeFormat(time.RFC3339) +v = cfg.Section("").Key("TIME").MustTime() // RFC3339 + +// 由 Must 开头的方法名允许接收一个相同类型的参数来作为默认值, +// 当键不存在或者转换失败时,则会直接返回该默认值。 +// 但是,MustString 方法必须传递一个默认值。 + +v = cfg.Seciont("").Key("String").MustString("default") +v = cfg.Section("").Key("BOOL").MustBool(true) +v = cfg.Section("").Key("FLOAT64").MustFloat64(1.25) +v = cfg.Section("").Key("INT").MustInt(10) +v = cfg.Section("").Key("INT64").MustInt64(99) +v = cfg.Section("").Key("UINT").MustUint(3) +v = cfg.Section("").Key("UINT64").MustUint64(6) +v = cfg.Section("").Key("TIME").MustTimeFormat(time.RFC3339, time.Now()) +v = cfg.Section("").Key("TIME").MustTime(time.Now()) // RFC3339 +``` + +如果我的值有好多行怎么办? + +```ini +[advance] +ADDRESS = """404 road, +NotFound, State, 5000 +Earth""" +``` + +嗯哼?小 case! + +```go +cfg.Section("advance").Key("ADDRESS").String() + +/* --- start --- +404 road, +NotFound, State, 5000 +Earth +------ end --- */ +``` + +赞爆了!那要是我属于一行的内容写不下想要写到第二行怎么办? + +```ini +[advance] +two_lines = how about \ + continuation lines? +lots_of_lines = 1 \ + 2 \ + 3 \ + 4 +``` + +简直是小菜一碟! + +```go +cfg.Section("advance").Key("two_lines").String() // how about continuation lines? +cfg.Section("advance").Key("lots_of_lines").String() // 1 2 3 4 +``` + +可是我有时候觉得两行连在一起特别没劲,怎么才能不自动连接两行呢? + +```go +cfg, err := ini.LoadSources(ini.LoadOptions{ + IgnoreContinuation: true, +}, "filename") +``` + +哇靠给力啊! + +需要注意的是,值两侧的单引号会被自动剔除: + +```ini +foo = "some value" // foo: some value +bar = 'some value' // bar: some value +``` + +这就是全部了?哈哈,当然不是。 + +#### 操作键值的辅助方法 + +获取键值时设定候选值: + +```go +v = cfg.Section("").Key("STRING").In("default", []string{"str", "arr", "types"}) +v = cfg.Section("").Key("FLOAT64").InFloat64(1.1, []float64{1.25, 2.5, 3.75}) +v = cfg.Section("").Key("INT").InInt(5, []int{10, 20, 30}) +v = cfg.Section("").Key("INT64").InInt64(10, []int64{10, 20, 30}) +v = cfg.Section("").Key("UINT").InUint(4, []int{3, 6, 9}) +v = cfg.Section("").Key("UINT64").InUint64(8, []int64{3, 6, 9}) +v = cfg.Section("").Key("TIME").InTimeFormat(time.RFC3339, time.Now(), []time.Time{time1, time2, time3}) +v = cfg.Section("").Key("TIME").InTime(time.Now(), []time.Time{time1, time2, time3}) // RFC3339 +``` + +如果获取到的值不是候选值的任意一个,则会返回默认值,而默认值不需要是候选值中的一员。 + +验证获取的值是否在指定范围内: + +```go +vals = cfg.Section("").Key("FLOAT64").RangeFloat64(0.0, 1.1, 2.2) +vals = cfg.Section("").Key("INT").RangeInt(0, 10, 20) +vals = cfg.Section("").Key("INT64").RangeInt64(0, 10, 20) +vals = cfg.Section("").Key("UINT").RangeUint(0, 3, 9) +vals = cfg.Section("").Key("UINT64").RangeUint64(0, 3, 9) +vals = cfg.Section("").Key("TIME").RangeTimeFormat(time.RFC3339, time.Now(), minTime, maxTime) +vals = cfg.Section("").Key("TIME").RangeTime(time.Now(), minTime, maxTime) // RFC3339 +``` + +##### 自动分割键值到切片(slice) + +当存在无效输入时,使用零值代替: + +```go +// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4] +// Input: how, 2.2, are, you -> [0.0 2.2 0.0 0.0] +vals = cfg.Section("").Key("STRINGS").Strings(",") +vals = cfg.Section("").Key("FLOAT64S").Float64s(",") +vals = cfg.Section("").Key("INTS").Ints(",") +vals = cfg.Section("").Key("INT64S").Int64s(",") +vals = cfg.Section("").Key("UINTS").Uints(",") +vals = cfg.Section("").Key("UINT64S").Uint64s(",") +vals = cfg.Section("").Key("TIMES").Times(",") +``` + +从结果切片中剔除无效输入: + +```go +// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4] +// Input: how, 2.2, are, you -> [2.2] +vals = cfg.Section("").Key("FLOAT64S").ValidFloat64s(",") +vals = cfg.Section("").Key("INTS").ValidInts(",") +vals = cfg.Section("").Key("INT64S").ValidInt64s(",") +vals = cfg.Section("").Key("UINTS").ValidUints(",") +vals = cfg.Section("").Key("UINT64S").ValidUint64s(",") +vals = cfg.Section("").Key("TIMES").ValidTimes(",") +``` + +当存在无效输入时,直接返回错误: + +```go +// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4] +// Input: how, 2.2, are, you -> error +vals = cfg.Section("").Key("FLOAT64S").StrictFloat64s(",") +vals = cfg.Section("").Key("INTS").StrictInts(",") +vals = cfg.Section("").Key("INT64S").StrictInt64s(",") +vals = cfg.Section("").Key("UINTS").StrictUints(",") +vals = cfg.Section("").Key("UINT64S").StrictUint64s(",") +vals = cfg.Section("").Key("TIMES").StrictTimes(",") +``` + +### 保存配置 + +终于到了这个时刻,是时候保存一下配置了。 + +比较原始的做法是输出配置到某个文件: + +```go +// ... +err = cfg.SaveTo("my.ini") +err = cfg.SaveToIndent("my.ini", "\t") +``` + +另一个比较高级的做法是写入到任何实现 `io.Writer` 接口的对象中: + +```go +// ... +cfg.WriteTo(writer) +cfg.WriteToIndent(writer, "\t") +``` + +默认情况下,空格将被用于对齐键值之间的等号以美化输出结果,以下代码可以禁用该功能: + +```go +ini.PrettyFormat = false +``` + +## 高级用法 + +### 递归读取键值 + +在获取所有键值的过程中,特殊语法 `%()s` 会被应用,其中 `` 可以是相同分区或者默认分区下的键名。字符串 `%()s` 会被相应的键值所替代,如果指定的键不存在,则会用空字符串替代。您可以最多使用 99 层的递归嵌套。 + +```ini +NAME = ini + +[author] +NAME = Unknwon +GITHUB = https://github.com/%(NAME)s + +[package] +FULL_NAME = github.com/go-ini/%(NAME)s +``` + +```go +cfg.Section("author").Key("GITHUB").String() // https://github.com/Unknwon +cfg.Section("package").Key("FULL_NAME").String() // github.com/go-ini/ini +``` + +### 读取父子分区 + +您可以在分区名称中使用 `.` 来表示两个或多个分区之间的父子关系。如果某个键在子分区中不存在,则会去它的父分区中再次寻找,直到没有父分区为止。 + +```ini +NAME = ini +VERSION = v1 +IMPORT_PATH = gopkg.in/%(NAME)s.%(VERSION)s + +[package] +CLONE_URL = https://%(IMPORT_PATH)s + +[package.sub] +``` + +```go +cfg.Section("package.sub").Key("CLONE_URL").String() // https://gopkg.in/ini.v1 +``` + +#### 获取上级父分区下的所有键名 + +```go +cfg.Section("package.sub").ParentKeys() // ["CLONE_URL"] +``` + +### 无法解析的分区 + +如果遇到一些比较特殊的分区,它们不包含常见的键值对,而是没有固定格式的纯文本,则可以使用 `LoadOptions.UnparsableSections` 进行处理: + +```go +cfg, err := LoadSources(LoadOptions{UnparseableSections: []string{"COMMENTS"}}, `[COMMENTS] +<1> This slide has the fuel listed in the wrong units `)) + +body := cfg.Section("COMMENTS").Body() + +/* --- start --- +<1> This slide has the fuel listed in the wrong units +------ end --- */ +``` + +### 读取自增键名 + +如果数据源中的键名为 `-`,则认为该键使用了自增键名的特殊语法。计数器从 1 开始,并且分区之间是相互独立的。 + +```ini +[features] +-: Support read/write comments of keys and sections +-: Support auto-increment of key names +-: Support load multiple files to overwrite key values +``` + +```go +cfg.Section("features").KeyStrings() // []{"#1", "#2", "#3"} +``` + +### 映射到结构 + +想要使用更加面向对象的方式玩转 INI 吗?好主意。 + +```ini +Name = Unknwon +age = 21 +Male = true +Born = 1993-01-01T20:17:05Z + +[Note] +Content = Hi is a good man! +Cities = HangZhou, Boston +``` + +```go +type Note struct { + Content string + Cities []string +} + +type Person struct { + Name string + Age int `ini:"age"` + Male bool + Born time.Time + Note + Created time.Time `ini:"-"` +} + +func main() { + cfg, err := ini.Load("path/to/ini") + // ... + p := new(Person) + err = cfg.MapTo(p) + // ... + + // 一切竟可以如此的简单。 + err = ini.MapTo(p, "path/to/ini") + // ... + + // 嗯哼?只需要映射一个分区吗? + n := new(Note) + err = cfg.Section("Note").MapTo(n) + // ... +} +``` + +结构的字段怎么设置默认值呢?很简单,只要在映射之前对指定字段进行赋值就可以了。如果键未找到或者类型错误,该值不会发生改变。 + +```go +// ... +p := &Person{ + Name: "Joe", +} +// ... +``` + +这样玩 INI 真的好酷啊!然而,如果不能还给我原来的配置文件,有什么卵用? + +### 从结构反射 + +可是,我有说不能吗? + +```go +type Embeded struct { + Dates []time.Time `delim:"|"` + Places []string `ini:"places,omitempty"` + None []int `ini:",omitempty"` +} + +type Author struct { + Name string `ini:"NAME"` + Male bool + Age int + GPA float64 + NeverMind string `ini:"-"` + *Embeded +} + +func main() { + a := &Author{"Unknwon", true, 21, 2.8, "", + &Embeded{ + []time.Time{time.Now(), time.Now()}, + []string{"HangZhou", "Boston"}, + []int{}, + }} + cfg := ini.Empty() + err = ini.ReflectFrom(cfg, a) + // ... +} +``` + +瞧瞧,奇迹发生了。 + +```ini +NAME = Unknwon +Male = true +Age = 21 +GPA = 2.8 + +[Embeded] +Dates = 2015-08-07T22:14:22+08:00|2015-08-07T22:14:22+08:00 +places = HangZhou,Boston +``` + +#### 名称映射器(Name Mapper) + +为了节省您的时间并简化代码,本库支持类型为 [`NameMapper`](https://gowalker.org/gopkg.in/ini.v1#NameMapper) 的名称映射器,该映射器负责结构字段名与分区名和键名之间的映射。 + +目前有 2 款内置的映射器: + +- `AllCapsUnderscore`:该映射器将字段名转换至格式 `ALL_CAPS_UNDERSCORE` 后再去匹配分区名和键名。 +- `TitleUnderscore`:该映射器将字段名转换至格式 `title_underscore` 后再去匹配分区名和键名。 + +使用方法: + +```go +type Info struct{ + PackageName string +} + +func main() { + err = ini.MapToWithMapper(&Info{}, ini.TitleUnderscore, []byte("package_name=ini")) + // ... + + cfg, err := ini.Load([]byte("PACKAGE_NAME=ini")) + // ... + info := new(Info) + cfg.NameMapper = ini.AllCapsUnderscore + err = cfg.MapTo(info) + // ... +} +``` + +使用函数 `ini.ReflectFromWithMapper` 时也可应用相同的规则。 + +#### 值映射器(Value Mapper) + +值映射器允许使用一个自定义函数自动展开值的具体内容,例如:运行时获取环境变量: + +```go +type Env struct { + Foo string `ini:"foo"` +} + +func main() { + cfg, err := ini.Load([]byte("[env]\nfoo = ${MY_VAR}\n") + cfg.ValueMapper = os.ExpandEnv + // ... + env := &Env{} + err = cfg.Section("env").MapTo(env) +} +``` + +本例中,`env.Foo` 将会是运行时所获取到环境变量 `MY_VAR` 的值。 + +#### 映射/反射的其它说明 + +任何嵌入的结构都会被默认认作一个不同的分区,并且不会自动产生所谓的父子分区关联: + +```go +type Child struct { + Age string +} + +type Parent struct { + Name string + Child +} + +type Config struct { + City string + Parent +} +``` + +示例配置文件: + +```ini +City = Boston + +[Parent] +Name = Unknwon + +[Child] +Age = 21 +``` + +很好,但是,我就是要嵌入结构也在同一个分区。好吧,你爹是李刚! + +```go +type Child struct { + Age string +} + +type Parent struct { + Name string + Child `ini:"Parent"` +} + +type Config struct { + City string + Parent +} +``` + +示例配置文件: + +```ini +City = Boston + +[Parent] +Name = Unknwon +Age = 21 +``` + +## 获取帮助 + +- [API 文档](https://gowalker.org/gopkg.in/ini.v1) +- [创建工单](https://github.com/go-ini/ini/issues/new) + +## 常见问题 + +### 字段 `BlockMode` 是什么? + +默认情况下,本库会在您进行读写操作时采用锁机制来确保数据时间。但在某些情况下,您非常确定只进行读操作。此时,您可以通过设置 `cfg.BlockMode = false` 来将读操作提升大约 **50-70%** 的性能。 + +### 为什么要写另一个 INI 解析库? + +许多人都在使用我的 [goconfig](https://github.com/Unknwon/goconfig) 来完成对 INI 文件的操作,但我希望使用更加 Go 风格的代码。并且当您设置 `cfg.BlockMode = false` 时,会有大约 **10-30%** 的性能提升。 + +为了做出这些改变,我必须对 API 进行破坏,所以新开一个仓库是最安全的做法。除此之外,本库直接使用 `gopkg.in` 来进行版本化发布。(其实真相是导入路径更短了) diff --git a/vendor/github.com/go-ini/ini/error.go b/vendor/github.com/go-ini/ini/error.go new file mode 100644 index 000000000..80afe7431 --- /dev/null +++ b/vendor/github.com/go-ini/ini/error.go @@ -0,0 +1,32 @@ +// Copyright 2016 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package ini + +import ( + "fmt" +) + +type ErrDelimiterNotFound struct { + Line string +} + +func IsErrDelimiterNotFound(err error) bool { + _, ok := err.(ErrDelimiterNotFound) + return ok +} + +func (err ErrDelimiterNotFound) Error() string { + return fmt.Sprintf("key-value delimiter not found: %s", err.Line) +} diff --git a/vendor/github.com/go-ini/ini/ini.go b/vendor/github.com/go-ini/ini/ini.go new file mode 100644 index 000000000..f8827ddd2 --- /dev/null +++ b/vendor/github.com/go-ini/ini/ini.go @@ -0,0 +1,561 @@ +// Copyright 2014 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +// Package ini provides INI file read and write functionality in Go. +package ini + +import ( + "bytes" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "regexp" + "runtime" + "strconv" + "strings" + "sync" + "time" +) + +const ( + // Name for default section. You can use this constant or the string literal. + // In most of cases, an empty string is all you need to access the section. + DEFAULT_SECTION = "DEFAULT" + + // Maximum allowed depth when recursively substituing variable names. + _DEPTH_VALUES = 99 + _VERSION = "1.28.1" +) + +// Version returns current package version literal. +func Version() string { + return _VERSION +} + +var ( + // Delimiter to determine or compose a new line. + // This variable will be changed to "\r\n" automatically on Windows + // at package init time. + LineBreak = "\n" + + // Variable regexp pattern: %(variable)s + varPattern = regexp.MustCompile(`%\(([^\)]+)\)s`) + + // Indicate whether to align "=" sign with spaces to produce pretty output + // or reduce all possible spaces for compact format. + PrettyFormat = true + + // Explicitly write DEFAULT section header + DefaultHeader = false + + // Indicate whether to put a line between sections + PrettySection = true +) + +func init() { + if runtime.GOOS == "windows" { + LineBreak = "\r\n" + } +} + +func inSlice(str string, s []string) bool { + for _, v := range s { + if str == v { + return true + } + } + return false +} + +// dataSource is an interface that returns object which can be read and closed. +type dataSource interface { + ReadCloser() (io.ReadCloser, error) +} + +// sourceFile represents an object that contains content on the local file system. +type sourceFile struct { + name string +} + +func (s sourceFile) ReadCloser() (_ io.ReadCloser, err error) { + return os.Open(s.name) +} + +type bytesReadCloser struct { + reader io.Reader +} + +func (rc *bytesReadCloser) Read(p []byte) (n int, err error) { + return rc.reader.Read(p) +} + +func (rc *bytesReadCloser) Close() error { + return nil +} + +// sourceData represents an object that contains content in memory. +type sourceData struct { + data []byte +} + +func (s *sourceData) ReadCloser() (io.ReadCloser, error) { + return ioutil.NopCloser(bytes.NewReader(s.data)), nil +} + +// sourceReadCloser represents an input stream with Close method. +type sourceReadCloser struct { + reader io.ReadCloser +} + +func (s *sourceReadCloser) ReadCloser() (io.ReadCloser, error) { + return s.reader, nil +} + +// File represents a combination of a or more INI file(s) in memory. +type File struct { + // Should make things safe, but sometimes doesn't matter. + BlockMode bool + // Make sure data is safe in multiple goroutines. + lock sync.RWMutex + + // Allow combination of multiple data sources. + dataSources []dataSource + // Actual data is stored here. + sections map[string]*Section + + // To keep data in order. + sectionList []string + + options LoadOptions + + NameMapper + ValueMapper +} + +// newFile initializes File object with given data sources. +func newFile(dataSources []dataSource, opts LoadOptions) *File { + return &File{ + BlockMode: true, + dataSources: dataSources, + sections: make(map[string]*Section), + sectionList: make([]string, 0, 10), + options: opts, + } +} + +func parseDataSource(source interface{}) (dataSource, error) { + switch s := source.(type) { + case string: + return sourceFile{s}, nil + case []byte: + return &sourceData{s}, nil + case io.ReadCloser: + return &sourceReadCloser{s}, nil + default: + return nil, fmt.Errorf("error parsing data source: unknown type '%s'", s) + } +} + +type LoadOptions struct { + // Loose indicates whether the parser should ignore nonexistent files or return error. + Loose bool + // Insensitive indicates whether the parser forces all section and key names to lowercase. + Insensitive bool + // IgnoreContinuation indicates whether to ignore continuation lines while parsing. + IgnoreContinuation bool + // IgnoreInlineComment indicates whether to ignore comments at the end of value and treat it as part of value. + IgnoreInlineComment bool + // AllowBooleanKeys indicates whether to allow boolean type keys or treat as value is missing. + // This type of keys are mostly used in my.cnf. + AllowBooleanKeys bool + // AllowShadows indicates whether to keep track of keys with same name under same section. + AllowShadows bool + // Some INI formats allow group blocks that store a block of raw content that doesn't otherwise + // conform to key/value pairs. Specify the names of those blocks here. + UnparseableSections []string +} + +func LoadSources(opts LoadOptions, source interface{}, others ...interface{}) (_ *File, err error) { + sources := make([]dataSource, len(others)+1) + sources[0], err = parseDataSource(source) + if err != nil { + return nil, err + } + for i := range others { + sources[i+1], err = parseDataSource(others[i]) + if err != nil { + return nil, err + } + } + f := newFile(sources, opts) + if err = f.Reload(); err != nil { + return nil, err + } + return f, nil +} + +// Load loads and parses from INI data sources. +// Arguments can be mixed of file name with string type, or raw data in []byte. +// It will return error if list contains nonexistent files. +func Load(source interface{}, others ...interface{}) (*File, error) { + return LoadSources(LoadOptions{}, source, others...) +} + +// LooseLoad has exactly same functionality as Load function +// except it ignores nonexistent files instead of returning error. +func LooseLoad(source interface{}, others ...interface{}) (*File, error) { + return LoadSources(LoadOptions{Loose: true}, source, others...) +} + +// InsensitiveLoad has exactly same functionality as Load function +// except it forces all section and key names to be lowercased. +func InsensitiveLoad(source interface{}, others ...interface{}) (*File, error) { + return LoadSources(LoadOptions{Insensitive: true}, source, others...) +} + +// InsensitiveLoad has exactly same functionality as Load function +// except it allows have shadow keys. +func ShadowLoad(source interface{}, others ...interface{}) (*File, error) { + return LoadSources(LoadOptions{AllowShadows: true}, source, others...) +} + +// Empty returns an empty file object. +func Empty() *File { + // Ignore error here, we sure our data is good. + f, _ := Load([]byte("")) + return f +} + +// NewSection creates a new section. +func (f *File) NewSection(name string) (*Section, error) { + if len(name) == 0 { + return nil, errors.New("error creating new section: empty section name") + } else if f.options.Insensitive && name != DEFAULT_SECTION { + name = strings.ToLower(name) + } + + if f.BlockMode { + f.lock.Lock() + defer f.lock.Unlock() + } + + if inSlice(name, f.sectionList) { + return f.sections[name], nil + } + + f.sectionList = append(f.sectionList, name) + f.sections[name] = newSection(f, name) + return f.sections[name], nil +} + +// NewRawSection creates a new section with an unparseable body. +func (f *File) NewRawSection(name, body string) (*Section, error) { + section, err := f.NewSection(name) + if err != nil { + return nil, err + } + + section.isRawSection = true + section.rawBody = body + return section, nil +} + +// NewSections creates a list of sections. +func (f *File) NewSections(names ...string) (err error) { + for _, name := range names { + if _, err = f.NewSection(name); err != nil { + return err + } + } + return nil +} + +// GetSection returns section by given name. +func (f *File) GetSection(name string) (*Section, error) { + if len(name) == 0 { + name = DEFAULT_SECTION + } else if f.options.Insensitive { + name = strings.ToLower(name) + } + + if f.BlockMode { + f.lock.RLock() + defer f.lock.RUnlock() + } + + sec := f.sections[name] + if sec == nil { + return nil, fmt.Errorf("section '%s' does not exist", name) + } + return sec, nil +} + +// Section assumes named section exists and returns a zero-value when not. +func (f *File) Section(name string) *Section { + sec, err := f.GetSection(name) + if err != nil { + // Note: It's OK here because the only possible error is empty section name, + // but if it's empty, this piece of code won't be executed. + sec, _ = f.NewSection(name) + return sec + } + return sec +} + +// Section returns list of Section. +func (f *File) Sections() []*Section { + sections := make([]*Section, len(f.sectionList)) + for i := range f.sectionList { + sections[i] = f.Section(f.sectionList[i]) + } + return sections +} + +// ChildSections returns a list of child sections of given section name. +func (f *File) ChildSections(name string) []*Section { + return f.Section(name).ChildSections() +} + +// SectionStrings returns list of section names. +func (f *File) SectionStrings() []string { + list := make([]string, len(f.sectionList)) + copy(list, f.sectionList) + return list +} + +// DeleteSection deletes a section. +func (f *File) DeleteSection(name string) { + if f.BlockMode { + f.lock.Lock() + defer f.lock.Unlock() + } + + if len(name) == 0 { + name = DEFAULT_SECTION + } + + for i, s := range f.sectionList { + if s == name { + f.sectionList = append(f.sectionList[:i], f.sectionList[i+1:]...) + delete(f.sections, name) + return + } + } +} + +func (f *File) reload(s dataSource) error { + r, err := s.ReadCloser() + if err != nil { + return err + } + defer r.Close() + + return f.parse(r) +} + +// Reload reloads and parses all data sources. +func (f *File) Reload() (err error) { + for _, s := range f.dataSources { + if err = f.reload(s); err != nil { + // In loose mode, we create an empty default section for nonexistent files. + if os.IsNotExist(err) && f.options.Loose { + f.parse(bytes.NewBuffer(nil)) + continue + } + return err + } + } + return nil +} + +// Append appends one or more data sources and reloads automatically. +func (f *File) Append(source interface{}, others ...interface{}) error { + ds, err := parseDataSource(source) + if err != nil { + return err + } + f.dataSources = append(f.dataSources, ds) + for _, s := range others { + ds, err = parseDataSource(s) + if err != nil { + return err + } + f.dataSources = append(f.dataSources, ds) + } + return f.Reload() +} + +// WriteToIndent writes content into io.Writer with given indention. +// If PrettyFormat has been set to be true, +// it will align "=" sign with spaces under each section. +func (f *File) WriteToIndent(w io.Writer, indent string) (n int64, err error) { + equalSign := "=" + if PrettyFormat { + equalSign = " = " + } + + // Use buffer to make sure target is safe until finish encoding. + buf := bytes.NewBuffer(nil) + for i, sname := range f.sectionList { + sec := f.Section(sname) + if len(sec.Comment) > 0 { + if sec.Comment[0] != '#' && sec.Comment[0] != ';' { + sec.Comment = "; " + sec.Comment + } + if _, err = buf.WriteString(sec.Comment + LineBreak); err != nil { + return 0, err + } + } + + if i > 0 || DefaultHeader { + if _, err = buf.WriteString("[" + sname + "]" + LineBreak); err != nil { + return 0, err + } + } else { + // Write nothing if default section is empty + if len(sec.keyList) == 0 { + continue + } + } + + if sec.isRawSection { + if _, err = buf.WriteString(sec.rawBody); err != nil { + return 0, err + } + continue + } + + // Count and generate alignment length and buffer spaces using the + // longest key. Keys may be modifed if they contain certain characters so + // we need to take that into account in our calculation. + alignLength := 0 + if PrettyFormat { + for _, kname := range sec.keyList { + keyLength := len(kname) + // First case will surround key by ` and second by """ + if strings.ContainsAny(kname, "\"=:") { + keyLength += 2 + } else if strings.Contains(kname, "`") { + keyLength += 6 + } + + if keyLength > alignLength { + alignLength = keyLength + } + } + } + alignSpaces := bytes.Repeat([]byte(" "), alignLength) + + KEY_LIST: + for _, kname := range sec.keyList { + key := sec.Key(kname) + if len(key.Comment) > 0 { + if len(indent) > 0 && sname != DEFAULT_SECTION { + buf.WriteString(indent) + } + if key.Comment[0] != '#' && key.Comment[0] != ';' { + key.Comment = "; " + key.Comment + } + if _, err = buf.WriteString(key.Comment + LineBreak); err != nil { + return 0, err + } + } + + if len(indent) > 0 && sname != DEFAULT_SECTION { + buf.WriteString(indent) + } + + switch { + case key.isAutoIncrement: + kname = "-" + case strings.ContainsAny(kname, "\"=:"): + kname = "`" + kname + "`" + case strings.Contains(kname, "`"): + kname = `"""` + kname + `"""` + } + + for _, val := range key.ValueWithShadows() { + if _, err = buf.WriteString(kname); err != nil { + return 0, err + } + + if key.isBooleanType { + if kname != sec.keyList[len(sec.keyList)-1] { + buf.WriteString(LineBreak) + } + continue KEY_LIST + } + + // Write out alignment spaces before "=" sign + if PrettyFormat { + buf.Write(alignSpaces[:alignLength-len(kname)]) + } + + // In case key value contains "\n", "`", "\"", "#" or ";" + if strings.ContainsAny(val, "\n`") { + val = `"""` + val + `"""` + } else if !f.options.IgnoreInlineComment && strings.ContainsAny(val, "#;") { + val = "`" + val + "`" + } + if _, err = buf.WriteString(equalSign + val + LineBreak); err != nil { + return 0, err + } + } + } + + if PrettySection { + // Put a line between sections + if _, err = buf.WriteString(LineBreak); err != nil { + return 0, err + } + } + } + + return buf.WriteTo(w) +} + +// WriteTo writes file content into io.Writer. +func (f *File) WriteTo(w io.Writer) (int64, error) { + return f.WriteToIndent(w, "") +} + +// SaveToIndent writes content to file system with given value indention. +func (f *File) SaveToIndent(filename, indent string) error { + // Note: Because we are truncating with os.Create, + // so it's safer to save to a temporary file location and rename afte done. + tmpPath := filename + "." + strconv.Itoa(time.Now().Nanosecond()) + ".tmp" + defer os.Remove(tmpPath) + + fw, err := os.Create(tmpPath) + if err != nil { + return err + } + + if _, err = f.WriteToIndent(fw, indent); err != nil { + fw.Close() + return err + } + fw.Close() + + // Remove old file and rename the new one. + os.Remove(filename) + return os.Rename(tmpPath, filename) +} + +// SaveTo writes content to file system. +func (f *File) SaveTo(filename string) error { + return f.SaveToIndent(filename, "") +} diff --git a/vendor/github.com/go-ini/ini/ini_test.go b/vendor/github.com/go-ini/ini/ini_test.go new file mode 100644 index 000000000..b3dd217c6 --- /dev/null +++ b/vendor/github.com/go-ini/ini/ini_test.go @@ -0,0 +1,491 @@ +// Copyright 2014 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package ini + +import ( + "bytes" + "io/ioutil" + "strings" + "testing" + "time" + + . "github.com/smartystreets/goconvey/convey" +) + +func Test_Version(t *testing.T) { + Convey("Get version", t, func() { + So(Version(), ShouldEqual, _VERSION) + }) +} + +const _CONF_DATA = ` +; Package name +NAME = ini +; Package version +VERSION = v1 +; Package import path +IMPORT_PATH = gopkg.in/%(NAME)s.%(VERSION)s + +# Information about package author +# Bio can be written in multiple lines. +[author] +NAME = Unknwon ; Succeeding comment +E-MAIL = fake@localhost +GITHUB = https://github.com/%(NAME)s +BIO = """Gopher. +Coding addict. +Good man. +""" # Succeeding comment + +[package] +CLONE_URL = https://%(IMPORT_PATH)s + +[package.sub] +UNUSED_KEY = should be deleted + +[features] +-: Support read/write comments of keys and sections +-: Support auto-increment of key names +-: Support load multiple files to overwrite key values + +[types] +STRING = str +BOOL = true +BOOL_FALSE = false +FLOAT64 = 1.25 +INT = 10 +TIME = 2015-01-01T20:17:05Z +DURATION = 2h45m +UINT = 3 + +[array] +STRINGS = en, zh, de +FLOAT64S = 1.1, 2.2, 3.3 +INTS = 1, 2, 3 +UINTS = 1, 2, 3 +TIMES = 2015-01-01T20:17:05Z,2015-01-01T20:17:05Z,2015-01-01T20:17:05Z + +[note] +empty_lines = next line is empty\ + +; Comment before the section +[comments] ; This is a comment for the section too +; Comment before key +key = "value" +key2 = "value2" ; This is a comment for key2 +key3 = "one", "two", "three" + +[advance] +value with quotes = "some value" +value quote2 again = 'some value' +includes comment sign = ` + "`" + "my#password" + "`" + ` +includes comment sign2 = ` + "`" + "my;password" + "`" + ` +true = 2+3=5 +"1+1=2" = true +"""6+1=7""" = true +"""` + "`" + `5+5` + "`" + `""" = 10 +` + "`" + `"6+6"` + "`" + ` = 12 +` + "`" + `7-2=4` + "`" + ` = false +ADDRESS = ` + "`" + `404 road, +NotFound, State, 50000` + "`" + ` + +two_lines = how about \ + continuation lines? +lots_of_lines = 1 \ + 2 \ + 3 \ + 4 \ +` + +func Test_Load(t *testing.T) { + Convey("Load from data sources", t, func() { + + Convey("Load with empty data", func() { + So(Empty(), ShouldNotBeNil) + }) + + Convey("Load with multiple data sources", func() { + cfg, err := Load([]byte(_CONF_DATA), "testdata/conf.ini", ioutil.NopCloser(bytes.NewReader([]byte(_CONF_DATA)))) + So(err, ShouldBeNil) + So(cfg, ShouldNotBeNil) + + f, err := Load([]byte(_CONF_DATA), "testdata/404.ini") + So(err, ShouldNotBeNil) + So(f, ShouldBeNil) + }) + + Convey("Load with io.ReadCloser", func() { + cfg, err := Load(ioutil.NopCloser(bytes.NewReader([]byte(_CONF_DATA)))) + So(err, ShouldBeNil) + So(cfg, ShouldNotBeNil) + + So(cfg.Section("").Key("NAME").String(), ShouldEqual, "ini") + }) + }) + + Convey("Bad load process", t, func() { + + Convey("Load from invalid data sources", func() { + _, err := Load(_CONF_DATA) + So(err, ShouldNotBeNil) + + f, err := Load("testdata/404.ini") + So(err, ShouldNotBeNil) + So(f, ShouldBeNil) + + _, err = Load(1) + So(err, ShouldNotBeNil) + + _, err = Load([]byte(""), 1) + So(err, ShouldNotBeNil) + }) + + Convey("Load with bad section name", func() { + _, err := Load([]byte("[]")) + So(err, ShouldNotBeNil) + + _, err = Load([]byte("[")) + So(err, ShouldNotBeNil) + }) + + Convey("Load with bad keys", func() { + _, err := Load([]byte(`"""name`)) + So(err, ShouldNotBeNil) + + _, err = Load([]byte(`"""name"""`)) + So(err, ShouldNotBeNil) + + _, err = Load([]byte(`""=1`)) + So(err, ShouldNotBeNil) + + _, err = Load([]byte(`=`)) + So(err, ShouldNotBeNil) + + _, err = Load([]byte(`name`)) + So(err, ShouldNotBeNil) + }) + + Convey("Load with bad values", func() { + _, err := Load([]byte(`name="""Unknwon`)) + So(err, ShouldNotBeNil) + }) + }) + + Convey("Get section and key insensitively", t, func() { + cfg, err := InsensitiveLoad([]byte(_CONF_DATA), "testdata/conf.ini") + So(err, ShouldBeNil) + So(cfg, ShouldNotBeNil) + + sec, err := cfg.GetSection("Author") + So(err, ShouldBeNil) + So(sec, ShouldNotBeNil) + + key, err := sec.GetKey("E-mail") + So(err, ShouldBeNil) + So(key, ShouldNotBeNil) + }) + + Convey("Load with ignoring continuation lines", t, func() { + cfg, err := LoadSources(LoadOptions{IgnoreContinuation: true}, []byte(`key1=a\b\ +key2=c\d\`)) + So(err, ShouldBeNil) + So(cfg, ShouldNotBeNil) + + So(cfg.Section("").Key("key1").String(), ShouldEqual, `a\b\`) + So(cfg.Section("").Key("key2").String(), ShouldEqual, `c\d\`) + }) + + Convey("Load with ignoring inline comments", t, func() { + cfg, err := LoadSources(LoadOptions{IgnoreInlineComment: true}, []byte(`key1=value ;comment +key2=value #comment2`)) + So(err, ShouldBeNil) + So(cfg, ShouldNotBeNil) + + So(cfg.Section("").Key("key1").String(), ShouldEqual, `value ;comment`) + So(cfg.Section("").Key("key2").String(), ShouldEqual, `value #comment2`) + + var buf bytes.Buffer + cfg.WriteTo(&buf) + So(buf.String(), ShouldEqual, `key1 = value ;comment +key2 = value #comment2 + +`) + }) + + Convey("Load with boolean type keys", t, func() { + cfg, err := LoadSources(LoadOptions{AllowBooleanKeys: true}, []byte(`key1=hello +key2 +#key3 +key4 +key5`)) + So(err, ShouldBeNil) + So(cfg, ShouldNotBeNil) + + So(strings.Join(cfg.Section("").KeyStrings(), ","), ShouldEqual, "key1,key2,key4,key5") + So(cfg.Section("").Key("key2").MustBool(false), ShouldBeTrue) + + var buf bytes.Buffer + cfg.WriteTo(&buf) + // there is always a trailing \n at the end of the section + So(buf.String(), ShouldEqual, `key1 = hello +key2 +#key3 +key4 +key5 +`) + }) +} + +func Test_File_ChildSections(t *testing.T) { + Convey("Find child sections by parent name", t, func() { + cfg, err := Load([]byte(` +[node] + +[node.biz1] + +[node.biz2] + +[node.biz3] + +[node.bizN] +`)) + So(err, ShouldBeNil) + So(cfg, ShouldNotBeNil) + + children := cfg.ChildSections("node") + names := make([]string, len(children)) + for i := range children { + names[i] = children[i].name + } + So(strings.Join(names, ","), ShouldEqual, "node.biz1,node.biz2,node.biz3,node.bizN") + }) +} + +func Test_LooseLoad(t *testing.T) { + Convey("Loose load from data sources", t, func() { + Convey("Loose load mixed with nonexistent file", func() { + cfg, err := LooseLoad("testdata/404.ini") + So(err, ShouldBeNil) + So(cfg, ShouldNotBeNil) + var fake struct { + Name string `ini:"name"` + } + So(cfg.MapTo(&fake), ShouldBeNil) + + cfg, err = LooseLoad([]byte("name=Unknwon"), "testdata/404.ini") + So(err, ShouldBeNil) + So(cfg.Section("").Key("name").String(), ShouldEqual, "Unknwon") + So(cfg.MapTo(&fake), ShouldBeNil) + So(fake.Name, ShouldEqual, "Unknwon") + }) + }) + +} + +func Test_File_Append(t *testing.T) { + Convey("Append data sources", t, func() { + cfg, err := Load([]byte("")) + So(err, ShouldBeNil) + So(cfg, ShouldNotBeNil) + + So(cfg.Append([]byte(""), []byte("")), ShouldBeNil) + + Convey("Append bad data sources", func() { + So(cfg.Append(1), ShouldNotBeNil) + So(cfg.Append([]byte(""), 1), ShouldNotBeNil) + }) + }) +} + +func Test_File_WriteTo(t *testing.T) { + Convey("Write to somewhere", t, func() { + var buf bytes.Buffer + cfg := Empty() + cfg.WriteTo(&buf) + }) +} + +func Test_File_SaveTo_WriteTo(t *testing.T) { + Convey("Save file", t, func() { + cfg, err := Load([]byte(_CONF_DATA), "testdata/conf.ini") + So(err, ShouldBeNil) + So(cfg, ShouldNotBeNil) + + cfg.Section("").Key("NAME").Comment = "Package name" + cfg.Section("author").Comment = `Information about package author +# Bio can be written in multiple lines.` + cfg.Section("advanced").Key("val w/ pound").SetValue("my#password") + cfg.Section("advanced").Key("longest key has a colon : yes/no").SetValue("yes") + So(cfg.SaveTo("testdata/conf_out.ini"), ShouldBeNil) + + cfg.Section("author").Key("NAME").Comment = "This is author name" + + So(cfg.SaveToIndent("testdata/conf_out.ini", "\t"), ShouldBeNil) + + var buf bytes.Buffer + _, err = cfg.WriteToIndent(&buf, "\t") + So(err, ShouldBeNil) + So(buf.String(), ShouldEqual, `; Package name +NAME = ini +; Package version +VERSION = v1 +; Package import path +IMPORT_PATH = gopkg.in/%(NAME)s.%(VERSION)s + +; Information about package author +# Bio can be written in multiple lines. +[author] + ; This is author name + NAME = Unknwon + E-MAIL = u@gogs.io + GITHUB = https://github.com/%(NAME)s + # Succeeding comment + BIO = """Gopher. +Coding addict. +Good man. +""" + +[package] + CLONE_URL = https://%(IMPORT_PATH)s + +[package.sub] + UNUSED_KEY = should be deleted + +[features] + - = Support read/write comments of keys and sections + - = Support auto-increment of key names + - = Support load multiple files to overwrite key values + +[types] + STRING = str + BOOL = true + BOOL_FALSE = false + FLOAT64 = 1.25 + INT = 10 + TIME = 2015-01-01T20:17:05Z + DURATION = 2h45m + UINT = 3 + +[array] + STRINGS = en, zh, de + FLOAT64S = 1.1, 2.2, 3.3 + INTS = 1, 2, 3 + UINTS = 1, 2, 3 + TIMES = 2015-01-01T20:17:05Z,2015-01-01T20:17:05Z,2015-01-01T20:17:05Z + +[note] + empty_lines = next line is empty + +; Comment before the section +; This is a comment for the section too +[comments] + ; Comment before key + key = value + ; This is a comment for key2 + key2 = value2 + key3 = "one", "two", "three" + +[advance] + value with quotes = some value + value quote2 again = some value + includes comment sign = `+"`"+"my#password"+"`"+` + includes comment sign2 = `+"`"+"my;password"+"`"+` + true = 2+3=5 + `+"`"+`1+1=2`+"`"+` = true + `+"`"+`6+1=7`+"`"+` = true + """`+"`"+`5+5`+"`"+`""" = 10 + `+"`"+`"6+6"`+"`"+` = 12 + `+"`"+`7-2=4`+"`"+` = false + ADDRESS = """404 road, +NotFound, State, 50000""" + two_lines = how about continuation lines? + lots_of_lines = 1 2 3 4 + +[advanced] + val w/ pound = `+"`"+`my#password`+"`"+` + `+"`"+`longest key has a colon : yes/no`+"`"+` = yes + +`) + }) +} + +func Test_File_WriteTo_SectionRaw(t *testing.T) { + Convey("Write a INI with a raw section", t, func() { + var buf bytes.Buffer + cfg, err := LoadSources( + LoadOptions{ + UnparseableSections: []string{"CORE_LESSON", "COMMENTS"}, + }, + "testdata/aicc.ini") + So(err, ShouldBeNil) + So(cfg, ShouldNotBeNil) + cfg.WriteToIndent(&buf, "\t") + So(buf.String(), ShouldEqual, `[Core] + Lesson_Location = 87 + Lesson_Status = C + Score = 3 + Time = 00:02:30 + +[CORE_LESSON] +my lesson state data – 1111111111111111111000000000000000001110000 +111111111111111111100000000000111000000000 – end my lesson state data +[COMMENTS] +<1> This slide has the fuel listed in the wrong units +`) + }) +} + +// Helpers for slice tests. +func float64sEqual(values []float64, expected ...float64) { + So(values, ShouldHaveLength, len(expected)) + for i, v := range expected { + So(values[i], ShouldEqual, v) + } +} + +func intsEqual(values []int, expected ...int) { + So(values, ShouldHaveLength, len(expected)) + for i, v := range expected { + So(values[i], ShouldEqual, v) + } +} + +func int64sEqual(values []int64, expected ...int64) { + So(values, ShouldHaveLength, len(expected)) + for i, v := range expected { + So(values[i], ShouldEqual, v) + } +} + +func uintsEqual(values []uint, expected ...uint) { + So(values, ShouldHaveLength, len(expected)) + for i, v := range expected { + So(values[i], ShouldEqual, v) + } +} + +func uint64sEqual(values []uint64, expected ...uint64) { + So(values, ShouldHaveLength, len(expected)) + for i, v := range expected { + So(values[i], ShouldEqual, v) + } +} + +func timesEqual(values []time.Time, expected ...time.Time) { + So(values, ShouldHaveLength, len(expected)) + for i, v := range expected { + So(values[i].String(), ShouldEqual, v.String()) + } +} diff --git a/vendor/github.com/go-ini/ini/key.go b/vendor/github.com/go-ini/ini/key.go new file mode 100644 index 000000000..838356af0 --- /dev/null +++ b/vendor/github.com/go-ini/ini/key.go @@ -0,0 +1,699 @@ +// Copyright 2014 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package ini + +import ( + "errors" + "fmt" + "strconv" + "strings" + "time" +) + +// Key represents a key under a section. +type Key struct { + s *Section + name string + value string + isAutoIncrement bool + isBooleanType bool + + isShadow bool + shadows []*Key + + Comment string +} + +// newKey simply return a key object with given values. +func newKey(s *Section, name, val string) *Key { + return &Key{ + s: s, + name: name, + value: val, + } +} + +func (k *Key) addShadow(val string) error { + if k.isShadow { + return errors.New("cannot add shadow to another shadow key") + } else if k.isAutoIncrement || k.isBooleanType { + return errors.New("cannot add shadow to auto-increment or boolean key") + } + + shadow := newKey(k.s, k.name, val) + shadow.isShadow = true + k.shadows = append(k.shadows, shadow) + return nil +} + +// AddShadow adds a new shadow key to itself. +func (k *Key) AddShadow(val string) error { + if !k.s.f.options.AllowShadows { + return errors.New("shadow key is not allowed") + } + return k.addShadow(val) +} + +// ValueMapper represents a mapping function for values, e.g. os.ExpandEnv +type ValueMapper func(string) string + +// Name returns name of key. +func (k *Key) Name() string { + return k.name +} + +// Value returns raw value of key for performance purpose. +func (k *Key) Value() string { + return k.value +} + +// ValueWithShadows returns raw values of key and its shadows if any. +func (k *Key) ValueWithShadows() []string { + if len(k.shadows) == 0 { + return []string{k.value} + } + vals := make([]string, len(k.shadows)+1) + vals[0] = k.value + for i := range k.shadows { + vals[i+1] = k.shadows[i].value + } + return vals +} + +// transformValue takes a raw value and transforms to its final string. +func (k *Key) transformValue(val string) string { + if k.s.f.ValueMapper != nil { + val = k.s.f.ValueMapper(val) + } + + // Fail-fast if no indicate char found for recursive value + if !strings.Contains(val, "%") { + return val + } + for i := 0; i < _DEPTH_VALUES; i++ { + vr := varPattern.FindString(val) + if len(vr) == 0 { + break + } + + // Take off leading '%(' and trailing ')s'. + noption := strings.TrimLeft(vr, "%(") + noption = strings.TrimRight(noption, ")s") + + // Search in the same section. + nk, err := k.s.GetKey(noption) + if err != nil { + // Search again in default section. + nk, _ = k.s.f.Section("").GetKey(noption) + } + + // Substitute by new value and take off leading '%(' and trailing ')s'. + val = strings.Replace(val, vr, nk.value, -1) + } + return val +} + +// String returns string representation of value. +func (k *Key) String() string { + return k.transformValue(k.value) +} + +// Validate accepts a validate function which can +// return modifed result as key value. +func (k *Key) Validate(fn func(string) string) string { + return fn(k.String()) +} + +// parseBool returns the boolean value represented by the string. +// +// It accepts 1, t, T, TRUE, true, True, YES, yes, Yes, y, ON, on, On, +// 0, f, F, FALSE, false, False, NO, no, No, n, OFF, off, Off. +// Any other value returns an error. +func parseBool(str string) (value bool, err error) { + switch str { + case "1", "t", "T", "true", "TRUE", "True", "YES", "yes", "Yes", "y", "ON", "on", "On": + return true, nil + case "0", "f", "F", "false", "FALSE", "False", "NO", "no", "No", "n", "OFF", "off", "Off": + return false, nil + } + return false, fmt.Errorf("parsing \"%s\": invalid syntax", str) +} + +// Bool returns bool type value. +func (k *Key) Bool() (bool, error) { + return parseBool(k.String()) +} + +// Float64 returns float64 type value. +func (k *Key) Float64() (float64, error) { + return strconv.ParseFloat(k.String(), 64) +} + +// Int returns int type value. +func (k *Key) Int() (int, error) { + return strconv.Atoi(k.String()) +} + +// Int64 returns int64 type value. +func (k *Key) Int64() (int64, error) { + return strconv.ParseInt(k.String(), 10, 64) +} + +// Uint returns uint type valued. +func (k *Key) Uint() (uint, error) { + u, e := strconv.ParseUint(k.String(), 10, 64) + return uint(u), e +} + +// Uint64 returns uint64 type value. +func (k *Key) Uint64() (uint64, error) { + return strconv.ParseUint(k.String(), 10, 64) +} + +// Duration returns time.Duration type value. +func (k *Key) Duration() (time.Duration, error) { + return time.ParseDuration(k.String()) +} + +// TimeFormat parses with given format and returns time.Time type value. +func (k *Key) TimeFormat(format string) (time.Time, error) { + return time.Parse(format, k.String()) +} + +// Time parses with RFC3339 format and returns time.Time type value. +func (k *Key) Time() (time.Time, error) { + return k.TimeFormat(time.RFC3339) +} + +// MustString returns default value if key value is empty. +func (k *Key) MustString(defaultVal string) string { + val := k.String() + if len(val) == 0 { + k.value = defaultVal + return defaultVal + } + return val +} + +// MustBool always returns value without error, +// it returns false if error occurs. +func (k *Key) MustBool(defaultVal ...bool) bool { + val, err := k.Bool() + if len(defaultVal) > 0 && err != nil { + k.value = strconv.FormatBool(defaultVal[0]) + return defaultVal[0] + } + return val +} + +// MustFloat64 always returns value without error, +// it returns 0.0 if error occurs. +func (k *Key) MustFloat64(defaultVal ...float64) float64 { + val, err := k.Float64() + if len(defaultVal) > 0 && err != nil { + k.value = strconv.FormatFloat(defaultVal[0], 'f', -1, 64) + return defaultVal[0] + } + return val +} + +// MustInt always returns value without error, +// it returns 0 if error occurs. +func (k *Key) MustInt(defaultVal ...int) int { + val, err := k.Int() + if len(defaultVal) > 0 && err != nil { + k.value = strconv.FormatInt(int64(defaultVal[0]), 10) + return defaultVal[0] + } + return val +} + +// MustInt64 always returns value without error, +// it returns 0 if error occurs. +func (k *Key) MustInt64(defaultVal ...int64) int64 { + val, err := k.Int64() + if len(defaultVal) > 0 && err != nil { + k.value = strconv.FormatInt(defaultVal[0], 10) + return defaultVal[0] + } + return val +} + +// MustUint always returns value without error, +// it returns 0 if error occurs. +func (k *Key) MustUint(defaultVal ...uint) uint { + val, err := k.Uint() + if len(defaultVal) > 0 && err != nil { + k.value = strconv.FormatUint(uint64(defaultVal[0]), 10) + return defaultVal[0] + } + return val +} + +// MustUint64 always returns value without error, +// it returns 0 if error occurs. +func (k *Key) MustUint64(defaultVal ...uint64) uint64 { + val, err := k.Uint64() + if len(defaultVal) > 0 && err != nil { + k.value = strconv.FormatUint(defaultVal[0], 10) + return defaultVal[0] + } + return val +} + +// MustDuration always returns value without error, +// it returns zero value if error occurs. +func (k *Key) MustDuration(defaultVal ...time.Duration) time.Duration { + val, err := k.Duration() + if len(defaultVal) > 0 && err != nil { + k.value = defaultVal[0].String() + return defaultVal[0] + } + return val +} + +// MustTimeFormat always parses with given format and returns value without error, +// it returns zero value if error occurs. +func (k *Key) MustTimeFormat(format string, defaultVal ...time.Time) time.Time { + val, err := k.TimeFormat(format) + if len(defaultVal) > 0 && err != nil { + k.value = defaultVal[0].Format(format) + return defaultVal[0] + } + return val +} + +// MustTime always parses with RFC3339 format and returns value without error, +// it returns zero value if error occurs. +func (k *Key) MustTime(defaultVal ...time.Time) time.Time { + return k.MustTimeFormat(time.RFC3339, defaultVal...) +} + +// In always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) In(defaultVal string, candidates []string) string { + val := k.String() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InFloat64 always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InFloat64(defaultVal float64, candidates []float64) float64 { + val := k.MustFloat64() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InInt always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InInt(defaultVal int, candidates []int) int { + val := k.MustInt() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InInt64 always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InInt64(defaultVal int64, candidates []int64) int64 { + val := k.MustInt64() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InUint always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InUint(defaultVal uint, candidates []uint) uint { + val := k.MustUint() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InUint64 always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InUint64(defaultVal uint64, candidates []uint64) uint64 { + val := k.MustUint64() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InTimeFormat always parses with given format and returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InTimeFormat(format string, defaultVal time.Time, candidates []time.Time) time.Time { + val := k.MustTimeFormat(format) + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InTime always parses with RFC3339 format and returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InTime(defaultVal time.Time, candidates []time.Time) time.Time { + return k.InTimeFormat(time.RFC3339, defaultVal, candidates) +} + +// RangeFloat64 checks if value is in given range inclusively, +// and returns default value if it's not. +func (k *Key) RangeFloat64(defaultVal, min, max float64) float64 { + val := k.MustFloat64() + if val < min || val > max { + return defaultVal + } + return val +} + +// RangeInt checks if value is in given range inclusively, +// and returns default value if it's not. +func (k *Key) RangeInt(defaultVal, min, max int) int { + val := k.MustInt() + if val < min || val > max { + return defaultVal + } + return val +} + +// RangeInt64 checks if value is in given range inclusively, +// and returns default value if it's not. +func (k *Key) RangeInt64(defaultVal, min, max int64) int64 { + val := k.MustInt64() + if val < min || val > max { + return defaultVal + } + return val +} + +// RangeTimeFormat checks if value with given format is in given range inclusively, +// and returns default value if it's not. +func (k *Key) RangeTimeFormat(format string, defaultVal, min, max time.Time) time.Time { + val := k.MustTimeFormat(format) + if val.Unix() < min.Unix() || val.Unix() > max.Unix() { + return defaultVal + } + return val +} + +// RangeTime checks if value with RFC3339 format is in given range inclusively, +// and returns default value if it's not. +func (k *Key) RangeTime(defaultVal, min, max time.Time) time.Time { + return k.RangeTimeFormat(time.RFC3339, defaultVal, min, max) +} + +// Strings returns list of string divided by given delimiter. +func (k *Key) Strings(delim string) []string { + str := k.String() + if len(str) == 0 { + return []string{} + } + + vals := strings.Split(str, delim) + for i := range vals { + // vals[i] = k.transformValue(strings.TrimSpace(vals[i])) + vals[i] = strings.TrimSpace(vals[i]) + } + return vals +} + +// StringsWithShadows returns list of string divided by given delimiter. +// Shadows will also be appended if any. +func (k *Key) StringsWithShadows(delim string) []string { + vals := k.ValueWithShadows() + results := make([]string, 0, len(vals)*2) + for i := range vals { + if len(vals) == 0 { + continue + } + + results = append(results, strings.Split(vals[i], delim)...) + } + + for i := range results { + results[i] = k.transformValue(strings.TrimSpace(results[i])) + } + return results +} + +// Float64s returns list of float64 divided by given delimiter. Any invalid input will be treated as zero value. +func (k *Key) Float64s(delim string) []float64 { + vals, _ := k.parseFloat64s(k.Strings(delim), true, false) + return vals +} + +// Ints returns list of int divided by given delimiter. Any invalid input will be treated as zero value. +func (k *Key) Ints(delim string) []int { + vals, _ := k.parseInts(k.Strings(delim), true, false) + return vals +} + +// Int64s returns list of int64 divided by given delimiter. Any invalid input will be treated as zero value. +func (k *Key) Int64s(delim string) []int64 { + vals, _ := k.parseInt64s(k.Strings(delim), true, false) + return vals +} + +// Uints returns list of uint divided by given delimiter. Any invalid input will be treated as zero value. +func (k *Key) Uints(delim string) []uint { + vals, _ := k.parseUints(k.Strings(delim), true, false) + return vals +} + +// Uint64s returns list of uint64 divided by given delimiter. Any invalid input will be treated as zero value. +func (k *Key) Uint64s(delim string) []uint64 { + vals, _ := k.parseUint64s(k.Strings(delim), true, false) + return vals +} + +// TimesFormat parses with given format and returns list of time.Time divided by given delimiter. +// Any invalid input will be treated as zero value (0001-01-01 00:00:00 +0000 UTC). +func (k *Key) TimesFormat(format, delim string) []time.Time { + vals, _ := k.parseTimesFormat(format, k.Strings(delim), true, false) + return vals +} + +// Times parses with RFC3339 format and returns list of time.Time divided by given delimiter. +// Any invalid input will be treated as zero value (0001-01-01 00:00:00 +0000 UTC). +func (k *Key) Times(delim string) []time.Time { + return k.TimesFormat(time.RFC3339, delim) +} + +// ValidFloat64s returns list of float64 divided by given delimiter. If some value is not float, then +// it will not be included to result list. +func (k *Key) ValidFloat64s(delim string) []float64 { + vals, _ := k.parseFloat64s(k.Strings(delim), false, false) + return vals +} + +// ValidInts returns list of int divided by given delimiter. If some value is not integer, then it will +// not be included to result list. +func (k *Key) ValidInts(delim string) []int { + vals, _ := k.parseInts(k.Strings(delim), false, false) + return vals +} + +// ValidInt64s returns list of int64 divided by given delimiter. If some value is not 64-bit integer, +// then it will not be included to result list. +func (k *Key) ValidInt64s(delim string) []int64 { + vals, _ := k.parseInt64s(k.Strings(delim), false, false) + return vals +} + +// ValidUints returns list of uint divided by given delimiter. If some value is not unsigned integer, +// then it will not be included to result list. +func (k *Key) ValidUints(delim string) []uint { + vals, _ := k.parseUints(k.Strings(delim), false, false) + return vals +} + +// ValidUint64s returns list of uint64 divided by given delimiter. If some value is not 64-bit unsigned +// integer, then it will not be included to result list. +func (k *Key) ValidUint64s(delim string) []uint64 { + vals, _ := k.parseUint64s(k.Strings(delim), false, false) + return vals +} + +// ValidTimesFormat parses with given format and returns list of time.Time divided by given delimiter. +func (k *Key) ValidTimesFormat(format, delim string) []time.Time { + vals, _ := k.parseTimesFormat(format, k.Strings(delim), false, false) + return vals +} + +// ValidTimes parses with RFC3339 format and returns list of time.Time divided by given delimiter. +func (k *Key) ValidTimes(delim string) []time.Time { + return k.ValidTimesFormat(time.RFC3339, delim) +} + +// StrictFloat64s returns list of float64 divided by given delimiter or error on first invalid input. +func (k *Key) StrictFloat64s(delim string) ([]float64, error) { + return k.parseFloat64s(k.Strings(delim), false, true) +} + +// StrictInts returns list of int divided by given delimiter or error on first invalid input. +func (k *Key) StrictInts(delim string) ([]int, error) { + return k.parseInts(k.Strings(delim), false, true) +} + +// StrictInt64s returns list of int64 divided by given delimiter or error on first invalid input. +func (k *Key) StrictInt64s(delim string) ([]int64, error) { + return k.parseInt64s(k.Strings(delim), false, true) +} + +// StrictUints returns list of uint divided by given delimiter or error on first invalid input. +func (k *Key) StrictUints(delim string) ([]uint, error) { + return k.parseUints(k.Strings(delim), false, true) +} + +// StrictUint64s returns list of uint64 divided by given delimiter or error on first invalid input. +func (k *Key) StrictUint64s(delim string) ([]uint64, error) { + return k.parseUint64s(k.Strings(delim), false, true) +} + +// StrictTimesFormat parses with given format and returns list of time.Time divided by given delimiter +// or error on first invalid input. +func (k *Key) StrictTimesFormat(format, delim string) ([]time.Time, error) { + return k.parseTimesFormat(format, k.Strings(delim), false, true) +} + +// StrictTimes parses with RFC3339 format and returns list of time.Time divided by given delimiter +// or error on first invalid input. +func (k *Key) StrictTimes(delim string) ([]time.Time, error) { + return k.StrictTimesFormat(time.RFC3339, delim) +} + +// parseFloat64s transforms strings to float64s. +func (k *Key) parseFloat64s(strs []string, addInvalid, returnOnInvalid bool) ([]float64, error) { + vals := make([]float64, 0, len(strs)) + for _, str := range strs { + val, err := strconv.ParseFloat(str, 64) + if err != nil && returnOnInvalid { + return nil, err + } + if err == nil || addInvalid { + vals = append(vals, val) + } + } + return vals, nil +} + +// parseInts transforms strings to ints. +func (k *Key) parseInts(strs []string, addInvalid, returnOnInvalid bool) ([]int, error) { + vals := make([]int, 0, len(strs)) + for _, str := range strs { + val, err := strconv.Atoi(str) + if err != nil && returnOnInvalid { + return nil, err + } + if err == nil || addInvalid { + vals = append(vals, val) + } + } + return vals, nil +} + +// parseInt64s transforms strings to int64s. +func (k *Key) parseInt64s(strs []string, addInvalid, returnOnInvalid bool) ([]int64, error) { + vals := make([]int64, 0, len(strs)) + for _, str := range strs { + val, err := strconv.ParseInt(str, 10, 64) + if err != nil && returnOnInvalid { + return nil, err + } + if err == nil || addInvalid { + vals = append(vals, val) + } + } + return vals, nil +} + +// parseUints transforms strings to uints. +func (k *Key) parseUints(strs []string, addInvalid, returnOnInvalid bool) ([]uint, error) { + vals := make([]uint, 0, len(strs)) + for _, str := range strs { + val, err := strconv.ParseUint(str, 10, 0) + if err != nil && returnOnInvalid { + return nil, err + } + if err == nil || addInvalid { + vals = append(vals, uint(val)) + } + } + return vals, nil +} + +// parseUint64s transforms strings to uint64s. +func (k *Key) parseUint64s(strs []string, addInvalid, returnOnInvalid bool) ([]uint64, error) { + vals := make([]uint64, 0, len(strs)) + for _, str := range strs { + val, err := strconv.ParseUint(str, 10, 64) + if err != nil && returnOnInvalid { + return nil, err + } + if err == nil || addInvalid { + vals = append(vals, val) + } + } + return vals, nil +} + +// parseTimesFormat transforms strings to times in given format. +func (k *Key) parseTimesFormat(format string, strs []string, addInvalid, returnOnInvalid bool) ([]time.Time, error) { + vals := make([]time.Time, 0, len(strs)) + for _, str := range strs { + val, err := time.Parse(format, str) + if err != nil && returnOnInvalid { + return nil, err + } + if err == nil || addInvalid { + vals = append(vals, val) + } + } + return vals, nil +} + +// SetValue changes key value. +func (k *Key) SetValue(v string) { + if k.s.f.BlockMode { + k.s.f.lock.Lock() + defer k.s.f.lock.Unlock() + } + + k.value = v + k.s.keysHash[k.name] = v +} diff --git a/vendor/github.com/go-ini/ini/key_test.go b/vendor/github.com/go-ini/ini/key_test.go new file mode 100644 index 000000000..1281d5bf0 --- /dev/null +++ b/vendor/github.com/go-ini/ini/key_test.go @@ -0,0 +1,573 @@ +// Copyright 2014 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package ini + +import ( + "bytes" + "fmt" + "strings" + "testing" + "time" + + . "github.com/smartystreets/goconvey/convey" +) + +func Test_Key(t *testing.T) { + Convey("Test getting and setting values", t, func() { + cfg, err := Load([]byte(_CONF_DATA), "testdata/conf.ini") + So(err, ShouldBeNil) + So(cfg, ShouldNotBeNil) + + Convey("Get values in default section", func() { + sec := cfg.Section("") + So(sec, ShouldNotBeNil) + So(sec.Key("NAME").Value(), ShouldEqual, "ini") + So(sec.Key("NAME").String(), ShouldEqual, "ini") + So(sec.Key("NAME").Validate(func(in string) string { + return in + }), ShouldEqual, "ini") + So(sec.Key("NAME").Comment, ShouldEqual, "; Package name") + So(sec.Key("IMPORT_PATH").String(), ShouldEqual, "gopkg.in/ini.v1") + }) + + Convey("Get values in non-default section", func() { + sec := cfg.Section("author") + So(sec, ShouldNotBeNil) + So(sec.Key("NAME").String(), ShouldEqual, "Unknwon") + So(sec.Key("GITHUB").String(), ShouldEqual, "https://github.com/Unknwon") + + sec = cfg.Section("package") + So(sec, ShouldNotBeNil) + So(sec.Key("CLONE_URL").String(), ShouldEqual, "https://gopkg.in/ini.v1") + }) + + Convey("Get auto-increment key names", func() { + keys := cfg.Section("features").Keys() + for i, k := range keys { + So(k.Name(), ShouldEqual, fmt.Sprintf("#%d", i+1)) + } + }) + + Convey("Get parent-keys that are available to the child section", func() { + parentKeys := cfg.Section("package.sub").ParentKeys() + for _, k := range parentKeys { + So(k.Name(), ShouldEqual, "CLONE_URL") + } + }) + + Convey("Get overwrite value", func() { + So(cfg.Section("author").Key("E-MAIL").String(), ShouldEqual, "u@gogs.io") + }) + + Convey("Get sections", func() { + sections := cfg.Sections() + for i, name := range []string{DEFAULT_SECTION, "author", "package", "package.sub", "features", "types", "array", "note", "comments", "advance"} { + So(sections[i].Name(), ShouldEqual, name) + } + }) + + Convey("Get parent section value", func() { + So(cfg.Section("package.sub").Key("CLONE_URL").String(), ShouldEqual, "https://gopkg.in/ini.v1") + So(cfg.Section("package.fake.sub").Key("CLONE_URL").String(), ShouldEqual, "https://gopkg.in/ini.v1") + }) + + Convey("Get multiple line value", func() { + So(cfg.Section("author").Key("BIO").String(), ShouldEqual, "Gopher.\nCoding addict.\nGood man.\n") + }) + + Convey("Get values with type", func() { + sec := cfg.Section("types") + v1, err := sec.Key("BOOL").Bool() + So(err, ShouldBeNil) + So(v1, ShouldBeTrue) + + v1, err = sec.Key("BOOL_FALSE").Bool() + So(err, ShouldBeNil) + So(v1, ShouldBeFalse) + + v2, err := sec.Key("FLOAT64").Float64() + So(err, ShouldBeNil) + So(v2, ShouldEqual, 1.25) + + v3, err := sec.Key("INT").Int() + So(err, ShouldBeNil) + So(v3, ShouldEqual, 10) + + v4, err := sec.Key("INT").Int64() + So(err, ShouldBeNil) + So(v4, ShouldEqual, 10) + + v5, err := sec.Key("UINT").Uint() + So(err, ShouldBeNil) + So(v5, ShouldEqual, 3) + + v6, err := sec.Key("UINT").Uint64() + So(err, ShouldBeNil) + So(v6, ShouldEqual, 3) + + t, err := time.Parse(time.RFC3339, "2015-01-01T20:17:05Z") + So(err, ShouldBeNil) + v7, err := sec.Key("TIME").Time() + So(err, ShouldBeNil) + So(v7.String(), ShouldEqual, t.String()) + + Convey("Must get values with type", func() { + So(sec.Key("STRING").MustString("404"), ShouldEqual, "str") + So(sec.Key("BOOL").MustBool(), ShouldBeTrue) + So(sec.Key("FLOAT64").MustFloat64(), ShouldEqual, 1.25) + So(sec.Key("INT").MustInt(), ShouldEqual, 10) + So(sec.Key("INT").MustInt64(), ShouldEqual, 10) + So(sec.Key("UINT").MustUint(), ShouldEqual, 3) + So(sec.Key("UINT").MustUint64(), ShouldEqual, 3) + So(sec.Key("TIME").MustTime().String(), ShouldEqual, t.String()) + + dur, err := time.ParseDuration("2h45m") + So(err, ShouldBeNil) + So(sec.Key("DURATION").MustDuration().Seconds(), ShouldEqual, dur.Seconds()) + + Convey("Must get values with default value", func() { + So(sec.Key("STRING_404").MustString("404"), ShouldEqual, "404") + So(sec.Key("BOOL_404").MustBool(true), ShouldBeTrue) + So(sec.Key("FLOAT64_404").MustFloat64(2.5), ShouldEqual, 2.5) + So(sec.Key("INT_404").MustInt(15), ShouldEqual, 15) + So(sec.Key("INT64_404").MustInt64(15), ShouldEqual, 15) + So(sec.Key("UINT_404").MustUint(6), ShouldEqual, 6) + So(sec.Key("UINT64_404").MustUint64(6), ShouldEqual, 6) + + t, err := time.Parse(time.RFC3339, "2014-01-01T20:17:05Z") + So(err, ShouldBeNil) + So(sec.Key("TIME_404").MustTime(t).String(), ShouldEqual, t.String()) + + So(sec.Key("DURATION_404").MustDuration(dur).Seconds(), ShouldEqual, dur.Seconds()) + + Convey("Must should set default as key value", func() { + So(sec.Key("STRING_404").String(), ShouldEqual, "404") + So(sec.Key("BOOL_404").String(), ShouldEqual, "true") + So(sec.Key("FLOAT64_404").String(), ShouldEqual, "2.5") + So(sec.Key("INT_404").String(), ShouldEqual, "15") + So(sec.Key("INT64_404").String(), ShouldEqual, "15") + So(sec.Key("UINT_404").String(), ShouldEqual, "6") + So(sec.Key("UINT64_404").String(), ShouldEqual, "6") + So(sec.Key("TIME_404").String(), ShouldEqual, "2014-01-01T20:17:05Z") + So(sec.Key("DURATION_404").String(), ShouldEqual, "2h45m0s") + }) + }) + }) + }) + + Convey("Get value with candidates", func() { + sec := cfg.Section("types") + So(sec.Key("STRING").In("", []string{"str", "arr", "types"}), ShouldEqual, "str") + So(sec.Key("FLOAT64").InFloat64(0, []float64{1.25, 2.5, 3.75}), ShouldEqual, 1.25) + So(sec.Key("INT").InInt(0, []int{10, 20, 30}), ShouldEqual, 10) + So(sec.Key("INT").InInt64(0, []int64{10, 20, 30}), ShouldEqual, 10) + So(sec.Key("UINT").InUint(0, []uint{3, 6, 9}), ShouldEqual, 3) + So(sec.Key("UINT").InUint64(0, []uint64{3, 6, 9}), ShouldEqual, 3) + + zt, err := time.Parse(time.RFC3339, "0001-01-01T01:00:00Z") + So(err, ShouldBeNil) + t, err := time.Parse(time.RFC3339, "2015-01-01T20:17:05Z") + So(err, ShouldBeNil) + So(sec.Key("TIME").InTime(zt, []time.Time{t, time.Now(), time.Now().Add(1 * time.Second)}).String(), ShouldEqual, t.String()) + + Convey("Get value with candidates and default value", func() { + So(sec.Key("STRING_404").In("str", []string{"str", "arr", "types"}), ShouldEqual, "str") + So(sec.Key("FLOAT64_404").InFloat64(1.25, []float64{1.25, 2.5, 3.75}), ShouldEqual, 1.25) + So(sec.Key("INT_404").InInt(10, []int{10, 20, 30}), ShouldEqual, 10) + So(sec.Key("INT64_404").InInt64(10, []int64{10, 20, 30}), ShouldEqual, 10) + So(sec.Key("UINT_404").InUint(3, []uint{3, 6, 9}), ShouldEqual, 3) + So(sec.Key("UINT_404").InUint64(3, []uint64{3, 6, 9}), ShouldEqual, 3) + So(sec.Key("TIME_404").InTime(t, []time.Time{time.Now(), time.Now(), time.Now().Add(1 * time.Second)}).String(), ShouldEqual, t.String()) + }) + }) + + Convey("Get values in range", func() { + sec := cfg.Section("types") + So(sec.Key("FLOAT64").RangeFloat64(0, 1, 2), ShouldEqual, 1.25) + So(sec.Key("INT").RangeInt(0, 10, 20), ShouldEqual, 10) + So(sec.Key("INT").RangeInt64(0, 10, 20), ShouldEqual, 10) + + minT, err := time.Parse(time.RFC3339, "0001-01-01T01:00:00Z") + So(err, ShouldBeNil) + midT, err := time.Parse(time.RFC3339, "2013-01-01T01:00:00Z") + So(err, ShouldBeNil) + maxT, err := time.Parse(time.RFC3339, "9999-01-01T01:00:00Z") + So(err, ShouldBeNil) + t, err := time.Parse(time.RFC3339, "2015-01-01T20:17:05Z") + So(err, ShouldBeNil) + So(sec.Key("TIME").RangeTime(t, minT, maxT).String(), ShouldEqual, t.String()) + + Convey("Get value in range with default value", func() { + So(sec.Key("FLOAT64").RangeFloat64(5, 0, 1), ShouldEqual, 5) + So(sec.Key("INT").RangeInt(7, 0, 5), ShouldEqual, 7) + So(sec.Key("INT").RangeInt64(7, 0, 5), ShouldEqual, 7) + So(sec.Key("TIME").RangeTime(t, minT, midT).String(), ShouldEqual, t.String()) + }) + }) + + Convey("Get values into slice", func() { + sec := cfg.Section("array") + So(strings.Join(sec.Key("STRINGS").Strings(","), ","), ShouldEqual, "en,zh,de") + So(len(sec.Key("STRINGS_404").Strings(",")), ShouldEqual, 0) + + vals1 := sec.Key("FLOAT64S").Float64s(",") + float64sEqual(vals1, 1.1, 2.2, 3.3) + + vals2 := sec.Key("INTS").Ints(",") + intsEqual(vals2, 1, 2, 3) + + vals3 := sec.Key("INTS").Int64s(",") + int64sEqual(vals3, 1, 2, 3) + + vals4 := sec.Key("UINTS").Uints(",") + uintsEqual(vals4, 1, 2, 3) + + vals5 := sec.Key("UINTS").Uint64s(",") + uint64sEqual(vals5, 1, 2, 3) + + t, err := time.Parse(time.RFC3339, "2015-01-01T20:17:05Z") + So(err, ShouldBeNil) + vals6 := sec.Key("TIMES").Times(",") + timesEqual(vals6, t, t, t) + }) + + Convey("Get valid values into slice", func() { + sec := cfg.Section("array") + vals1 := sec.Key("FLOAT64S").ValidFloat64s(",") + float64sEqual(vals1, 1.1, 2.2, 3.3) + + vals2 := sec.Key("INTS").ValidInts(",") + intsEqual(vals2, 1, 2, 3) + + vals3 := sec.Key("INTS").ValidInt64s(",") + int64sEqual(vals3, 1, 2, 3) + + vals4 := sec.Key("UINTS").ValidUints(",") + uintsEqual(vals4, 1, 2, 3) + + vals5 := sec.Key("UINTS").ValidUint64s(",") + uint64sEqual(vals5, 1, 2, 3) + + t, err := time.Parse(time.RFC3339, "2015-01-01T20:17:05Z") + So(err, ShouldBeNil) + vals6 := sec.Key("TIMES").ValidTimes(",") + timesEqual(vals6, t, t, t) + }) + + Convey("Get values one type into slice of another type", func() { + sec := cfg.Section("array") + vals1 := sec.Key("STRINGS").ValidFloat64s(",") + So(vals1, ShouldBeEmpty) + + vals2 := sec.Key("STRINGS").ValidInts(",") + So(vals2, ShouldBeEmpty) + + vals3 := sec.Key("STRINGS").ValidInt64s(",") + So(vals3, ShouldBeEmpty) + + vals4 := sec.Key("STRINGS").ValidUints(",") + So(vals4, ShouldBeEmpty) + + vals5 := sec.Key("STRINGS").ValidUint64s(",") + So(vals5, ShouldBeEmpty) + + vals6 := sec.Key("STRINGS").ValidTimes(",") + So(vals6, ShouldBeEmpty) + }) + + Convey("Get valid values into slice without errors", func() { + sec := cfg.Section("array") + vals1, err := sec.Key("FLOAT64S").StrictFloat64s(",") + So(err, ShouldBeNil) + float64sEqual(vals1, 1.1, 2.2, 3.3) + + vals2, err := sec.Key("INTS").StrictInts(",") + So(err, ShouldBeNil) + intsEqual(vals2, 1, 2, 3) + + vals3, err := sec.Key("INTS").StrictInt64s(",") + So(err, ShouldBeNil) + int64sEqual(vals3, 1, 2, 3) + + vals4, err := sec.Key("UINTS").StrictUints(",") + So(err, ShouldBeNil) + uintsEqual(vals4, 1, 2, 3) + + vals5, err := sec.Key("UINTS").StrictUint64s(",") + So(err, ShouldBeNil) + uint64sEqual(vals5, 1, 2, 3) + + t, err := time.Parse(time.RFC3339, "2015-01-01T20:17:05Z") + So(err, ShouldBeNil) + vals6, err := sec.Key("TIMES").StrictTimes(",") + So(err, ShouldBeNil) + timesEqual(vals6, t, t, t) + }) + + Convey("Get invalid values into slice", func() { + sec := cfg.Section("array") + vals1, err := sec.Key("STRINGS").StrictFloat64s(",") + So(vals1, ShouldBeEmpty) + So(err, ShouldNotBeNil) + + vals2, err := sec.Key("STRINGS").StrictInts(",") + So(vals2, ShouldBeEmpty) + So(err, ShouldNotBeNil) + + vals3, err := sec.Key("STRINGS").StrictInt64s(",") + So(vals3, ShouldBeEmpty) + So(err, ShouldNotBeNil) + + vals4, err := sec.Key("STRINGS").StrictUints(",") + So(vals4, ShouldBeEmpty) + So(err, ShouldNotBeNil) + + vals5, err := sec.Key("STRINGS").StrictUint64s(",") + So(vals5, ShouldBeEmpty) + So(err, ShouldNotBeNil) + + vals6, err := sec.Key("STRINGS").StrictTimes(",") + So(vals6, ShouldBeEmpty) + So(err, ShouldNotBeNil) + }) + + Convey("Get key hash", func() { + cfg.Section("").KeysHash() + }) + + Convey("Set key value", func() { + k := cfg.Section("author").Key("NAME") + k.SetValue("无闻") + So(k.String(), ShouldEqual, "无闻") + }) + + Convey("Get key strings", func() { + So(strings.Join(cfg.Section("types").KeyStrings(), ","), ShouldEqual, "STRING,BOOL,BOOL_FALSE,FLOAT64,INT,TIME,DURATION,UINT") + }) + + Convey("Delete a key", func() { + cfg.Section("package.sub").DeleteKey("UNUSED_KEY") + _, err := cfg.Section("package.sub").GetKey("UNUSED_KEY") + So(err, ShouldNotBeNil) + }) + + Convey("Has Key (backwards compatible)", func() { + sec := cfg.Section("package.sub") + haskey1 := sec.Haskey("UNUSED_KEY") + haskey2 := sec.Haskey("CLONE_URL") + haskey3 := sec.Haskey("CLONE_URL_NO") + So(haskey1, ShouldBeTrue) + So(haskey2, ShouldBeTrue) + So(haskey3, ShouldBeFalse) + }) + + Convey("Has Key", func() { + sec := cfg.Section("package.sub") + haskey1 := sec.HasKey("UNUSED_KEY") + haskey2 := sec.HasKey("CLONE_URL") + haskey3 := sec.HasKey("CLONE_URL_NO") + So(haskey1, ShouldBeTrue) + So(haskey2, ShouldBeTrue) + So(haskey3, ShouldBeFalse) + }) + + Convey("Has Value", func() { + sec := cfg.Section("author") + hasvalue1 := sec.HasValue("Unknwon") + hasvalue2 := sec.HasValue("doc") + So(hasvalue1, ShouldBeTrue) + So(hasvalue2, ShouldBeFalse) + }) + }) + + Convey("Test getting and setting bad values", t, func() { + cfg, err := Load([]byte(_CONF_DATA), "testdata/conf.ini") + So(err, ShouldBeNil) + So(cfg, ShouldNotBeNil) + + Convey("Create new key with empty name", func() { + k, err := cfg.Section("").NewKey("", "") + So(err, ShouldNotBeNil) + So(k, ShouldBeNil) + }) + + Convey("Create new section with empty name", func() { + s, err := cfg.NewSection("") + So(err, ShouldNotBeNil) + So(s, ShouldBeNil) + }) + + Convey("Create new sections with empty name", func() { + So(cfg.NewSections(""), ShouldNotBeNil) + }) + + Convey("Get section that not exists", func() { + s, err := cfg.GetSection("404") + So(err, ShouldNotBeNil) + So(s, ShouldBeNil) + + s = cfg.Section("404") + So(s, ShouldNotBeNil) + }) + }) + + Convey("Test key hash clone", t, func() { + cfg, err := Load([]byte(strings.Replace("network=tcp,addr=127.0.0.1:6379,db=4,pool_size=100,idle_timeout=180", ",", "\n", -1))) + So(err, ShouldBeNil) + for _, v := range cfg.Section("").KeysHash() { + So(len(v), ShouldBeGreaterThan, 0) + } + }) + + Convey("Key has empty value", t, func() { + _conf := `key1= +key2= ; comment` + cfg, err := Load([]byte(_conf)) + So(err, ShouldBeNil) + So(cfg.Section("").Key("key1").Value(), ShouldBeEmpty) + }) +} + +const _CONF_GIT_CONFIG = ` +[remote "origin"] + url = https://github.com/Antergone/test1.git + url = https://github.com/Antergone/test2.git +` + +func Test_Key_Shadows(t *testing.T) { + Convey("Shadows keys", t, func() { + Convey("Disable shadows", func() { + cfg, err := Load([]byte(_CONF_GIT_CONFIG)) + So(err, ShouldBeNil) + So(cfg.Section(`remote "origin"`).Key("url").String(), ShouldEqual, "https://github.com/Antergone/test2.git") + }) + + Convey("Enable shadows", func() { + cfg, err := ShadowLoad([]byte(_CONF_GIT_CONFIG)) + So(err, ShouldBeNil) + So(cfg.Section(`remote "origin"`).Key("url").String(), ShouldEqual, "https://github.com/Antergone/test1.git") + So(strings.Join(cfg.Section(`remote "origin"`).Key("url").ValueWithShadows(), " "), ShouldEqual, + "https://github.com/Antergone/test1.git https://github.com/Antergone/test2.git") + + Convey("Save with shadows", func() { + var buf bytes.Buffer + _, err := cfg.WriteTo(&buf) + So(err, ShouldBeNil) + So(buf.String(), ShouldEqual, `[remote "origin"] +url = https://github.com/Antergone/test1.git +url = https://github.com/Antergone/test2.git + +`) + }) + }) + }) +} + +func newTestFile(block bool) *File { + c, _ := Load([]byte(_CONF_DATA)) + c.BlockMode = block + return c +} + +func Benchmark_Key_Value(b *testing.B) { + c := newTestFile(true) + for i := 0; i < b.N; i++ { + c.Section("").Key("NAME").Value() + } +} + +func Benchmark_Key_Value_NonBlock(b *testing.B) { + c := newTestFile(false) + for i := 0; i < b.N; i++ { + c.Section("").Key("NAME").Value() + } +} + +func Benchmark_Key_Value_ViaSection(b *testing.B) { + c := newTestFile(true) + sec := c.Section("") + for i := 0; i < b.N; i++ { + sec.Key("NAME").Value() + } +} + +func Benchmark_Key_Value_ViaSection_NonBlock(b *testing.B) { + c := newTestFile(false) + sec := c.Section("") + for i := 0; i < b.N; i++ { + sec.Key("NAME").Value() + } +} + +func Benchmark_Key_Value_Direct(b *testing.B) { + c := newTestFile(true) + key := c.Section("").Key("NAME") + for i := 0; i < b.N; i++ { + key.Value() + } +} + +func Benchmark_Key_Value_Direct_NonBlock(b *testing.B) { + c := newTestFile(false) + key := c.Section("").Key("NAME") + for i := 0; i < b.N; i++ { + key.Value() + } +} + +func Benchmark_Key_String(b *testing.B) { + c := newTestFile(true) + for i := 0; i < b.N; i++ { + _ = c.Section("").Key("NAME").String() + } +} + +func Benchmark_Key_String_NonBlock(b *testing.B) { + c := newTestFile(false) + for i := 0; i < b.N; i++ { + _ = c.Section("").Key("NAME").String() + } +} + +func Benchmark_Key_String_ViaSection(b *testing.B) { + c := newTestFile(true) + sec := c.Section("") + for i := 0; i < b.N; i++ { + _ = sec.Key("NAME").String() + } +} + +func Benchmark_Key_String_ViaSection_NonBlock(b *testing.B) { + c := newTestFile(false) + sec := c.Section("") + for i := 0; i < b.N; i++ { + _ = sec.Key("NAME").String() + } +} + +func Benchmark_Key_SetValue(b *testing.B) { + c := newTestFile(true) + for i := 0; i < b.N; i++ { + c.Section("").Key("NAME").SetValue("10") + } +} + +func Benchmark_Key_SetValue_VisSection(b *testing.B) { + c := newTestFile(true) + sec := c.Section("") + for i := 0; i < b.N; i++ { + sec.Key("NAME").SetValue("10") + } +} diff --git a/vendor/github.com/go-ini/ini/parser.go b/vendor/github.com/go-ini/ini/parser.go new file mode 100644 index 000000000..69d547627 --- /dev/null +++ b/vendor/github.com/go-ini/ini/parser.go @@ -0,0 +1,361 @@ +// Copyright 2015 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package ini + +import ( + "bufio" + "bytes" + "fmt" + "io" + "strconv" + "strings" + "unicode" +) + +type tokenType int + +const ( + _TOKEN_INVALID tokenType = iota + _TOKEN_COMMENT + _TOKEN_SECTION + _TOKEN_KEY +) + +type parser struct { + buf *bufio.Reader + isEOF bool + count int + comment *bytes.Buffer +} + +func newParser(r io.Reader) *parser { + return &parser{ + buf: bufio.NewReader(r), + count: 1, + comment: &bytes.Buffer{}, + } +} + +// BOM handles header of UTF-8, UTF-16 LE and UTF-16 BE's BOM format. +// http://en.wikipedia.org/wiki/Byte_order_mark#Representations_of_byte_order_marks_by_encoding +func (p *parser) BOM() error { + mask, err := p.buf.Peek(2) + if err != nil && err != io.EOF { + return err + } else if len(mask) < 2 { + return nil + } + + switch { + case mask[0] == 254 && mask[1] == 255: + fallthrough + case mask[0] == 255 && mask[1] == 254: + p.buf.Read(mask) + case mask[0] == 239 && mask[1] == 187: + mask, err := p.buf.Peek(3) + if err != nil && err != io.EOF { + return err + } else if len(mask) < 3 { + return nil + } + if mask[2] == 191 { + p.buf.Read(mask) + } + } + return nil +} + +func (p *parser) readUntil(delim byte) ([]byte, error) { + data, err := p.buf.ReadBytes(delim) + if err != nil { + if err == io.EOF { + p.isEOF = true + } else { + return nil, err + } + } + return data, nil +} + +func cleanComment(in []byte) ([]byte, bool) { + i := bytes.IndexAny(in, "#;") + if i == -1 { + return nil, false + } + return in[i:], true +} + +func readKeyName(in []byte) (string, int, error) { + line := string(in) + + // Check if key name surrounded by quotes. + var keyQuote string + if line[0] == '"' { + if len(line) > 6 && string(line[0:3]) == `"""` { + keyQuote = `"""` + } else { + keyQuote = `"` + } + } else if line[0] == '`' { + keyQuote = "`" + } + + // Get out key name + endIdx := -1 + if len(keyQuote) > 0 { + startIdx := len(keyQuote) + // FIXME: fail case -> """"""name"""=value + pos := strings.Index(line[startIdx:], keyQuote) + if pos == -1 { + return "", -1, fmt.Errorf("missing closing key quote: %s", line) + } + pos += startIdx + + // Find key-value delimiter + i := strings.IndexAny(line[pos+startIdx:], "=:") + if i < 0 { + return "", -1, ErrDelimiterNotFound{line} + } + endIdx = pos + i + return strings.TrimSpace(line[startIdx:pos]), endIdx + startIdx + 1, nil + } + + endIdx = strings.IndexAny(line, "=:") + if endIdx < 0 { + return "", -1, ErrDelimiterNotFound{line} + } + return strings.TrimSpace(line[0:endIdx]), endIdx + 1, nil +} + +func (p *parser) readMultilines(line, val, valQuote string) (string, error) { + for { + data, err := p.readUntil('\n') + if err != nil { + return "", err + } + next := string(data) + + pos := strings.LastIndex(next, valQuote) + if pos > -1 { + val += next[:pos] + + comment, has := cleanComment([]byte(next[pos:])) + if has { + p.comment.Write(bytes.TrimSpace(comment)) + } + break + } + val += next + if p.isEOF { + return "", fmt.Errorf("missing closing key quote from '%s' to '%s'", line, next) + } + } + return val, nil +} + +func (p *parser) readContinuationLines(val string) (string, error) { + for { + data, err := p.readUntil('\n') + if err != nil { + return "", err + } + next := strings.TrimSpace(string(data)) + + if len(next) == 0 { + break + } + val += next + if val[len(val)-1] != '\\' { + break + } + val = val[:len(val)-1] + } + return val, nil +} + +// hasSurroundedQuote check if and only if the first and last characters +// are quotes \" or \'. +// It returns false if any other parts also contain same kind of quotes. +func hasSurroundedQuote(in string, quote byte) bool { + return len(in) >= 2 && in[0] == quote && in[len(in)-1] == quote && + strings.IndexByte(in[1:], quote) == len(in)-2 +} + +func (p *parser) readValue(in []byte, ignoreContinuation, ignoreInlineComment bool) (string, error) { + line := strings.TrimLeftFunc(string(in), unicode.IsSpace) + if len(line) == 0 { + return "", nil + } + + var valQuote string + if len(line) > 3 && string(line[0:3]) == `"""` { + valQuote = `"""` + } else if line[0] == '`' { + valQuote = "`" + } + + if len(valQuote) > 0 { + startIdx := len(valQuote) + pos := strings.LastIndex(line[startIdx:], valQuote) + // Check for multi-line value + if pos == -1 { + return p.readMultilines(line, line[startIdx:], valQuote) + } + + return line[startIdx : pos+startIdx], nil + } + + // Won't be able to reach here if value only contains whitespace + line = strings.TrimSpace(line) + + // Check continuation lines when desired + if !ignoreContinuation && line[len(line)-1] == '\\' { + return p.readContinuationLines(line[:len(line)-1]) + } + + // Check if ignore inline comment + if !ignoreInlineComment { + i := strings.IndexAny(line, "#;") + if i > -1 { + p.comment.WriteString(line[i:]) + line = strings.TrimSpace(line[:i]) + } + } + + // Trim single quotes + if hasSurroundedQuote(line, '\'') || + hasSurroundedQuote(line, '"') { + line = line[1 : len(line)-1] + } + return line, nil +} + +// parse parses data through an io.Reader. +func (f *File) parse(reader io.Reader) (err error) { + p := newParser(reader) + if err = p.BOM(); err != nil { + return fmt.Errorf("BOM: %v", err) + } + + // Ignore error because default section name is never empty string. + section, _ := f.NewSection(DEFAULT_SECTION) + + var line []byte + var inUnparseableSection bool + for !p.isEOF { + line, err = p.readUntil('\n') + if err != nil { + return err + } + + line = bytes.TrimLeftFunc(line, unicode.IsSpace) + if len(line) == 0 { + continue + } + + // Comments + if line[0] == '#' || line[0] == ';' { + // Note: we do not care ending line break, + // it is needed for adding second line, + // so just clean it once at the end when set to value. + p.comment.Write(line) + continue + } + + // Section + if line[0] == '[' { + // Read to the next ']' (TODO: support quoted strings) + // TODO(unknwon): use LastIndexByte when stop supporting Go1.4 + closeIdx := bytes.LastIndex(line, []byte("]")) + if closeIdx == -1 { + return fmt.Errorf("unclosed section: %s", line) + } + + name := string(line[1:closeIdx]) + section, err = f.NewSection(name) + if err != nil { + return err + } + + comment, has := cleanComment(line[closeIdx+1:]) + if has { + p.comment.Write(comment) + } + + section.Comment = strings.TrimSpace(p.comment.String()) + + // Reset aotu-counter and comments + p.comment.Reset() + p.count = 1 + + inUnparseableSection = false + for i := range f.options.UnparseableSections { + if f.options.UnparseableSections[i] == name || + (f.options.Insensitive && strings.ToLower(f.options.UnparseableSections[i]) == strings.ToLower(name)) { + inUnparseableSection = true + continue + } + } + continue + } + + if inUnparseableSection { + section.isRawSection = true + section.rawBody += string(line) + continue + } + + kname, offset, err := readKeyName(line) + if err != nil { + // Treat as boolean key when desired, and whole line is key name. + if IsErrDelimiterNotFound(err) && f.options.AllowBooleanKeys { + kname, err := p.readValue(line, f.options.IgnoreContinuation, f.options.IgnoreInlineComment) + if err != nil { + return err + } + key, err := section.NewBooleanKey(kname) + if err != nil { + return err + } + key.Comment = strings.TrimSpace(p.comment.String()) + p.comment.Reset() + continue + } + return err + } + + // Auto increment. + isAutoIncr := false + if kname == "-" { + isAutoIncr = true + kname = "#" + strconv.Itoa(p.count) + p.count++ + } + + value, err := p.readValue(line[offset:], f.options.IgnoreContinuation, f.options.IgnoreInlineComment) + if err != nil { + return err + } + + key, err := section.NewKey(kname, value) + if err != nil { + return err + } + key.isAutoIncrement = isAutoIncr + key.Comment = strings.TrimSpace(p.comment.String()) + p.comment.Reset() + } + return nil +} diff --git a/vendor/github.com/go-ini/ini/parser_test.go b/vendor/github.com/go-ini/ini/parser_test.go new file mode 100644 index 000000000..05258195b --- /dev/null +++ b/vendor/github.com/go-ini/ini/parser_test.go @@ -0,0 +1,42 @@ +// Copyright 2016 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package ini + +import ( + "testing" + + . "github.com/smartystreets/goconvey/convey" +) + +func Test_BOM(t *testing.T) { + Convey("Test handling BOM", t, func() { + Convey("UTF-8-BOM", func() { + cfg, err := Load("testdata/UTF-8-BOM.ini") + So(err, ShouldBeNil) + So(cfg, ShouldNotBeNil) + + So(cfg.Section("author").Key("E-MAIL").String(), ShouldEqual, "u@gogs.io") + }) + + Convey("UTF-16-LE-BOM", func() { + cfg, err := Load("testdata/UTF-16-LE-BOM.ini") + So(err, ShouldBeNil) + So(cfg, ShouldNotBeNil) + }) + + Convey("UTF-16-BE-BOM", func() { + }) + }) +} diff --git a/vendor/github.com/go-ini/ini/section.go b/vendor/github.com/go-ini/ini/section.go new file mode 100644 index 000000000..94f7375ed --- /dev/null +++ b/vendor/github.com/go-ini/ini/section.go @@ -0,0 +1,248 @@ +// Copyright 2014 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package ini + +import ( + "errors" + "fmt" + "strings" +) + +// Section represents a config section. +type Section struct { + f *File + Comment string + name string + keys map[string]*Key + keyList []string + keysHash map[string]string + + isRawSection bool + rawBody string +} + +func newSection(f *File, name string) *Section { + return &Section{ + f: f, + name: name, + keys: make(map[string]*Key), + keyList: make([]string, 0, 10), + keysHash: make(map[string]string), + } +} + +// Name returns name of Section. +func (s *Section) Name() string { + return s.name +} + +// Body returns rawBody of Section if the section was marked as unparseable. +// It still follows the other rules of the INI format surrounding leading/trailing whitespace. +func (s *Section) Body() string { + return strings.TrimSpace(s.rawBody) +} + +// NewKey creates a new key to given section. +func (s *Section) NewKey(name, val string) (*Key, error) { + if len(name) == 0 { + return nil, errors.New("error creating new key: empty key name") + } else if s.f.options.Insensitive { + name = strings.ToLower(name) + } + + if s.f.BlockMode { + s.f.lock.Lock() + defer s.f.lock.Unlock() + } + + if inSlice(name, s.keyList) { + if s.f.options.AllowShadows { + if err := s.keys[name].addShadow(val); err != nil { + return nil, err + } + } else { + s.keys[name].value = val + } + return s.keys[name], nil + } + + s.keyList = append(s.keyList, name) + s.keys[name] = newKey(s, name, val) + s.keysHash[name] = val + return s.keys[name], nil +} + +// NewBooleanKey creates a new boolean type key to given section. +func (s *Section) NewBooleanKey(name string) (*Key, error) { + key, err := s.NewKey(name, "true") + if err != nil { + return nil, err + } + + key.isBooleanType = true + return key, nil +} + +// GetKey returns key in section by given name. +func (s *Section) GetKey(name string) (*Key, error) { + // FIXME: change to section level lock? + if s.f.BlockMode { + s.f.lock.RLock() + } + if s.f.options.Insensitive { + name = strings.ToLower(name) + } + key := s.keys[name] + if s.f.BlockMode { + s.f.lock.RUnlock() + } + + if key == nil { + // Check if it is a child-section. + sname := s.name + for { + if i := strings.LastIndex(sname, "."); i > -1 { + sname = sname[:i] + sec, err := s.f.GetSection(sname) + if err != nil { + continue + } + return sec.GetKey(name) + } else { + break + } + } + return nil, fmt.Errorf("error when getting key of section '%s': key '%s' not exists", s.name, name) + } + return key, nil +} + +// HasKey returns true if section contains a key with given name. +func (s *Section) HasKey(name string) bool { + key, _ := s.GetKey(name) + return key != nil +} + +// Haskey is a backwards-compatible name for HasKey. +func (s *Section) Haskey(name string) bool { + return s.HasKey(name) +} + +// HasValue returns true if section contains given raw value. +func (s *Section) HasValue(value string) bool { + if s.f.BlockMode { + s.f.lock.RLock() + defer s.f.lock.RUnlock() + } + + for _, k := range s.keys { + if value == k.value { + return true + } + } + return false +} + +// Key assumes named Key exists in section and returns a zero-value when not. +func (s *Section) Key(name string) *Key { + key, err := s.GetKey(name) + if err != nil { + // It's OK here because the only possible error is empty key name, + // but if it's empty, this piece of code won't be executed. + key, _ = s.NewKey(name, "") + return key + } + return key +} + +// Keys returns list of keys of section. +func (s *Section) Keys() []*Key { + keys := make([]*Key, len(s.keyList)) + for i := range s.keyList { + keys[i] = s.Key(s.keyList[i]) + } + return keys +} + +// ParentKeys returns list of keys of parent section. +func (s *Section) ParentKeys() []*Key { + var parentKeys []*Key + sname := s.name + for { + if i := strings.LastIndex(sname, "."); i > -1 { + sname = sname[:i] + sec, err := s.f.GetSection(sname) + if err != nil { + continue + } + parentKeys = append(parentKeys, sec.Keys()...) + } else { + break + } + + } + return parentKeys +} + +// KeyStrings returns list of key names of section. +func (s *Section) KeyStrings() []string { + list := make([]string, len(s.keyList)) + copy(list, s.keyList) + return list +} + +// KeysHash returns keys hash consisting of names and values. +func (s *Section) KeysHash() map[string]string { + if s.f.BlockMode { + s.f.lock.RLock() + defer s.f.lock.RUnlock() + } + + hash := map[string]string{} + for key, value := range s.keysHash { + hash[key] = value + } + return hash +} + +// DeleteKey deletes a key from section. +func (s *Section) DeleteKey(name string) { + if s.f.BlockMode { + s.f.lock.Lock() + defer s.f.lock.Unlock() + } + + for i, k := range s.keyList { + if k == name { + s.keyList = append(s.keyList[:i], s.keyList[i+1:]...) + delete(s.keys, name) + return + } + } +} + +// ChildSections returns a list of child sections of current section. +// For example, "[parent.child1]" and "[parent.child12]" are child sections +// of section "[parent]". +func (s *Section) ChildSections() []*Section { + prefix := s.name + "." + children := make([]*Section, 0, 3) + for _, name := range s.f.sectionList { + if strings.HasPrefix(name, prefix) { + children = append(children, s.f.sections[name]) + } + } + return children +} diff --git a/vendor/github.com/go-ini/ini/section_test.go b/vendor/github.com/go-ini/ini/section_test.go new file mode 100644 index 000000000..80282c197 --- /dev/null +++ b/vendor/github.com/go-ini/ini/section_test.go @@ -0,0 +1,75 @@ +// Copyright 2014 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package ini + +import ( + "strings" + "testing" + + . "github.com/smartystreets/goconvey/convey" +) + +func Test_Section(t *testing.T) { + Convey("Test CRD sections", t, func() { + cfg, err := Load([]byte(_CONF_DATA), "testdata/conf.ini") + So(err, ShouldBeNil) + So(cfg, ShouldNotBeNil) + + Convey("Get section strings", func() { + So(strings.Join(cfg.SectionStrings(), ","), ShouldEqual, "DEFAULT,author,package,package.sub,features,types,array,note,comments,advance") + }) + + Convey("Delete a section", func() { + cfg.DeleteSection("") + So(cfg.SectionStrings()[0], ShouldNotEqual, DEFAULT_SECTION) + }) + + Convey("Create new sections", func() { + cfg.NewSections("test", "test2") + _, err := cfg.GetSection("test") + So(err, ShouldBeNil) + _, err = cfg.GetSection("test2") + So(err, ShouldBeNil) + }) + }) +} + +func Test_SectionRaw(t *testing.T) { + Convey("Test section raw string", t, func() { + cfg, err := LoadSources( + LoadOptions{ + Insensitive: true, + UnparseableSections: []string{"core_lesson", "comments"}, + }, + "testdata/aicc.ini") + So(err, ShouldBeNil) + So(cfg, ShouldNotBeNil) + + Convey("Get section strings", func() { + So(strings.Join(cfg.SectionStrings(), ","), ShouldEqual, "DEFAULT,core,core_lesson,comments") + }) + + Convey("Validate non-raw section", func() { + val, err := cfg.Section("core").GetKey("lesson_status") + So(err, ShouldBeNil) + So(val.String(), ShouldEqual, "C") + }) + + Convey("Validate raw section", func() { + So(cfg.Section("core_lesson").Body(), ShouldEqual, `my lesson state data – 1111111111111111111000000000000000001110000 +111111111111111111100000000000111000000000 – end my lesson state data`) + }) + }) +} \ No newline at end of file diff --git a/vendor/github.com/go-ini/ini/struct.go b/vendor/github.com/go-ini/ini/struct.go new file mode 100644 index 000000000..eeb8dabaa --- /dev/null +++ b/vendor/github.com/go-ini/ini/struct.go @@ -0,0 +1,500 @@ +// Copyright 2014 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package ini + +import ( + "bytes" + "errors" + "fmt" + "reflect" + "strings" + "time" + "unicode" +) + +// NameMapper represents a ini tag name mapper. +type NameMapper func(string) string + +// Built-in name getters. +var ( + // AllCapsUnderscore converts to format ALL_CAPS_UNDERSCORE. + AllCapsUnderscore NameMapper = func(raw string) string { + newstr := make([]rune, 0, len(raw)) + for i, chr := range raw { + if isUpper := 'A' <= chr && chr <= 'Z'; isUpper { + if i > 0 { + newstr = append(newstr, '_') + } + } + newstr = append(newstr, unicode.ToUpper(chr)) + } + return string(newstr) + } + // TitleUnderscore converts to format title_underscore. + TitleUnderscore NameMapper = func(raw string) string { + newstr := make([]rune, 0, len(raw)) + for i, chr := range raw { + if isUpper := 'A' <= chr && chr <= 'Z'; isUpper { + if i > 0 { + newstr = append(newstr, '_') + } + chr -= ('A' - 'a') + } + newstr = append(newstr, chr) + } + return string(newstr) + } +) + +func (s *Section) parseFieldName(raw, actual string) string { + if len(actual) > 0 { + return actual + } + if s.f.NameMapper != nil { + return s.f.NameMapper(raw) + } + return raw +} + +func parseDelim(actual string) string { + if len(actual) > 0 { + return actual + } + return "," +} + +var reflectTime = reflect.TypeOf(time.Now()).Kind() + +// setSliceWithProperType sets proper values to slice based on its type. +func setSliceWithProperType(key *Key, field reflect.Value, delim string, allowShadow, isStrict bool) error { + var strs []string + if allowShadow { + strs = key.StringsWithShadows(delim) + } else { + strs = key.Strings(delim) + } + + numVals := len(strs) + if numVals == 0 { + return nil + } + + var vals interface{} + var err error + + sliceOf := field.Type().Elem().Kind() + switch sliceOf { + case reflect.String: + vals = strs + case reflect.Int: + vals, err = key.parseInts(strs, true, false) + case reflect.Int64: + vals, err = key.parseInt64s(strs, true, false) + case reflect.Uint: + vals, err = key.parseUints(strs, true, false) + case reflect.Uint64: + vals, err = key.parseUint64s(strs, true, false) + case reflect.Float64: + vals, err = key.parseFloat64s(strs, true, false) + case reflectTime: + vals, err = key.parseTimesFormat(time.RFC3339, strs, true, false) + default: + return fmt.Errorf("unsupported type '[]%s'", sliceOf) + } + if isStrict { + return err + } + + slice := reflect.MakeSlice(field.Type(), numVals, numVals) + for i := 0; i < numVals; i++ { + switch sliceOf { + case reflect.String: + slice.Index(i).Set(reflect.ValueOf(vals.([]string)[i])) + case reflect.Int: + slice.Index(i).Set(reflect.ValueOf(vals.([]int)[i])) + case reflect.Int64: + slice.Index(i).Set(reflect.ValueOf(vals.([]int64)[i])) + case reflect.Uint: + slice.Index(i).Set(reflect.ValueOf(vals.([]uint)[i])) + case reflect.Uint64: + slice.Index(i).Set(reflect.ValueOf(vals.([]uint64)[i])) + case reflect.Float64: + slice.Index(i).Set(reflect.ValueOf(vals.([]float64)[i])) + case reflectTime: + slice.Index(i).Set(reflect.ValueOf(vals.([]time.Time)[i])) + } + } + field.Set(slice) + return nil +} + +func wrapStrictError(err error, isStrict bool) error { + if isStrict { + return err + } + return nil +} + +// setWithProperType sets proper value to field based on its type, +// but it does not return error for failing parsing, +// because we want to use default value that is already assigned to strcut. +func setWithProperType(t reflect.Type, key *Key, field reflect.Value, delim string, allowShadow, isStrict bool) error { + switch t.Kind() { + case reflect.String: + if len(key.String()) == 0 { + return nil + } + field.SetString(key.String()) + case reflect.Bool: + boolVal, err := key.Bool() + if err != nil { + return wrapStrictError(err, isStrict) + } + field.SetBool(boolVal) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + durationVal, err := key.Duration() + // Skip zero value + if err == nil && int(durationVal) > 0 { + field.Set(reflect.ValueOf(durationVal)) + return nil + } + + intVal, err := key.Int64() + if err != nil { + return wrapStrictError(err, isStrict) + } + field.SetInt(intVal) + // byte is an alias for uint8, so supporting uint8 breaks support for byte + case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64: + durationVal, err := key.Duration() + // Skip zero value + if err == nil && int(durationVal) > 0 { + field.Set(reflect.ValueOf(durationVal)) + return nil + } + + uintVal, err := key.Uint64() + if err != nil { + return wrapStrictError(err, isStrict) + } + field.SetUint(uintVal) + + case reflect.Float32, reflect.Float64: + floatVal, err := key.Float64() + if err != nil { + return wrapStrictError(err, isStrict) + } + field.SetFloat(floatVal) + case reflectTime: + timeVal, err := key.Time() + if err != nil { + return wrapStrictError(err, isStrict) + } + field.Set(reflect.ValueOf(timeVal)) + case reflect.Slice: + return setSliceWithProperType(key, field, delim, allowShadow, isStrict) + default: + return fmt.Errorf("unsupported type '%s'", t) + } + return nil +} + +func parseTagOptions(tag string) (rawName string, omitEmpty bool, allowShadow bool) { + opts := strings.SplitN(tag, ",", 3) + rawName = opts[0] + if len(opts) > 1 { + omitEmpty = opts[1] == "omitempty" + } + if len(opts) > 2 { + allowShadow = opts[2] == "allowshadow" + } + return rawName, omitEmpty, allowShadow +} + +func (s *Section) mapTo(val reflect.Value, isStrict bool) error { + if val.Kind() == reflect.Ptr { + val = val.Elem() + } + typ := val.Type() + + for i := 0; i < typ.NumField(); i++ { + field := val.Field(i) + tpField := typ.Field(i) + + tag := tpField.Tag.Get("ini") + if tag == "-" { + continue + } + + rawName, _, allowShadow := parseTagOptions(tag) + fieldName := s.parseFieldName(tpField.Name, rawName) + if len(fieldName) == 0 || !field.CanSet() { + continue + } + + isAnonymous := tpField.Type.Kind() == reflect.Ptr && tpField.Anonymous + isStruct := tpField.Type.Kind() == reflect.Struct + if isAnonymous { + field.Set(reflect.New(tpField.Type.Elem())) + } + + if isAnonymous || isStruct { + if sec, err := s.f.GetSection(fieldName); err == nil { + if err = sec.mapTo(field, isStrict); err != nil { + return fmt.Errorf("error mapping field(%s): %v", fieldName, err) + } + continue + } + } + + if key, err := s.GetKey(fieldName); err == nil { + delim := parseDelim(tpField.Tag.Get("delim")) + if err = setWithProperType(tpField.Type, key, field, delim, allowShadow, isStrict); err != nil { + return fmt.Errorf("error mapping field(%s): %v", fieldName, err) + } + } + } + return nil +} + +// MapTo maps section to given struct. +func (s *Section) MapTo(v interface{}) error { + typ := reflect.TypeOf(v) + val := reflect.ValueOf(v) + if typ.Kind() == reflect.Ptr { + typ = typ.Elem() + val = val.Elem() + } else { + return errors.New("cannot map to non-pointer struct") + } + + return s.mapTo(val, false) +} + +// MapTo maps section to given struct in strict mode, +// which returns all possible error including value parsing error. +func (s *Section) StrictMapTo(v interface{}) error { + typ := reflect.TypeOf(v) + val := reflect.ValueOf(v) + if typ.Kind() == reflect.Ptr { + typ = typ.Elem() + val = val.Elem() + } else { + return errors.New("cannot map to non-pointer struct") + } + + return s.mapTo(val, true) +} + +// MapTo maps file to given struct. +func (f *File) MapTo(v interface{}) error { + return f.Section("").MapTo(v) +} + +// MapTo maps file to given struct in strict mode, +// which returns all possible error including value parsing error. +func (f *File) StrictMapTo(v interface{}) error { + return f.Section("").StrictMapTo(v) +} + +// MapTo maps data sources to given struct with name mapper. +func MapToWithMapper(v interface{}, mapper NameMapper, source interface{}, others ...interface{}) error { + cfg, err := Load(source, others...) + if err != nil { + return err + } + cfg.NameMapper = mapper + return cfg.MapTo(v) +} + +// StrictMapToWithMapper maps data sources to given struct with name mapper in strict mode, +// which returns all possible error including value parsing error. +func StrictMapToWithMapper(v interface{}, mapper NameMapper, source interface{}, others ...interface{}) error { + cfg, err := Load(source, others...) + if err != nil { + return err + } + cfg.NameMapper = mapper + return cfg.StrictMapTo(v) +} + +// MapTo maps data sources to given struct. +func MapTo(v, source interface{}, others ...interface{}) error { + return MapToWithMapper(v, nil, source, others...) +} + +// StrictMapTo maps data sources to given struct in strict mode, +// which returns all possible error including value parsing error. +func StrictMapTo(v, source interface{}, others ...interface{}) error { + return StrictMapToWithMapper(v, nil, source, others...) +} + +// reflectSliceWithProperType does the opposite thing as setSliceWithProperType. +func reflectSliceWithProperType(key *Key, field reflect.Value, delim string) error { + slice := field.Slice(0, field.Len()) + if field.Len() == 0 { + return nil + } + + var buf bytes.Buffer + sliceOf := field.Type().Elem().Kind() + for i := 0; i < field.Len(); i++ { + switch sliceOf { + case reflect.String: + buf.WriteString(slice.Index(i).String()) + case reflect.Int, reflect.Int64: + buf.WriteString(fmt.Sprint(slice.Index(i).Int())) + case reflect.Uint, reflect.Uint64: + buf.WriteString(fmt.Sprint(slice.Index(i).Uint())) + case reflect.Float64: + buf.WriteString(fmt.Sprint(slice.Index(i).Float())) + case reflectTime: + buf.WriteString(slice.Index(i).Interface().(time.Time).Format(time.RFC3339)) + default: + return fmt.Errorf("unsupported type '[]%s'", sliceOf) + } + buf.WriteString(delim) + } + key.SetValue(buf.String()[:buf.Len()-1]) + return nil +} + +// reflectWithProperType does the opposite thing as setWithProperType. +func reflectWithProperType(t reflect.Type, key *Key, field reflect.Value, delim string) error { + switch t.Kind() { + case reflect.String: + key.SetValue(field.String()) + case reflect.Bool: + key.SetValue(fmt.Sprint(field.Bool())) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + key.SetValue(fmt.Sprint(field.Int())) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + key.SetValue(fmt.Sprint(field.Uint())) + case reflect.Float32, reflect.Float64: + key.SetValue(fmt.Sprint(field.Float())) + case reflectTime: + key.SetValue(fmt.Sprint(field.Interface().(time.Time).Format(time.RFC3339))) + case reflect.Slice: + return reflectSliceWithProperType(key, field, delim) + default: + return fmt.Errorf("unsupported type '%s'", t) + } + return nil +} + +// CR: copied from encoding/json/encode.go with modifications of time.Time support. +// TODO: add more test coverage. +func isEmptyValue(v reflect.Value) bool { + switch v.Kind() { + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + case reflectTime: + t, ok := v.Interface().(time.Time) + return ok && t.IsZero() + } + return false +} + +func (s *Section) reflectFrom(val reflect.Value) error { + if val.Kind() == reflect.Ptr { + val = val.Elem() + } + typ := val.Type() + + for i := 0; i < typ.NumField(); i++ { + field := val.Field(i) + tpField := typ.Field(i) + + tag := tpField.Tag.Get("ini") + if tag == "-" { + continue + } + + opts := strings.SplitN(tag, ",", 2) + if len(opts) == 2 && opts[1] == "omitempty" && isEmptyValue(field) { + continue + } + + fieldName := s.parseFieldName(tpField.Name, opts[0]) + if len(fieldName) == 0 || !field.CanSet() { + continue + } + + if (tpField.Type.Kind() == reflect.Ptr && tpField.Anonymous) || + (tpField.Type.Kind() == reflect.Struct && tpField.Type.Name() != "Time") { + // Note: The only error here is section doesn't exist. + sec, err := s.f.GetSection(fieldName) + if err != nil { + // Note: fieldName can never be empty here, ignore error. + sec, _ = s.f.NewSection(fieldName) + } + if err = sec.reflectFrom(field); err != nil { + return fmt.Errorf("error reflecting field (%s): %v", fieldName, err) + } + continue + } + + // Note: Same reason as secion. + key, err := s.GetKey(fieldName) + if err != nil { + key, _ = s.NewKey(fieldName, "") + } + if err = reflectWithProperType(tpField.Type, key, field, parseDelim(tpField.Tag.Get("delim"))); err != nil { + return fmt.Errorf("error reflecting field (%s): %v", fieldName, err) + } + + } + return nil +} + +// ReflectFrom reflects secion from given struct. +func (s *Section) ReflectFrom(v interface{}) error { + typ := reflect.TypeOf(v) + val := reflect.ValueOf(v) + if typ.Kind() == reflect.Ptr { + typ = typ.Elem() + val = val.Elem() + } else { + return errors.New("cannot reflect from non-pointer struct") + } + + return s.reflectFrom(val) +} + +// ReflectFrom reflects file from given struct. +func (f *File) ReflectFrom(v interface{}) error { + return f.Section("").ReflectFrom(v) +} + +// ReflectFrom reflects data sources from given struct with name mapper. +func ReflectFromWithMapper(cfg *File, v interface{}, mapper NameMapper) error { + cfg.NameMapper = mapper + return cfg.ReflectFrom(v) +} + +// ReflectFrom reflects data sources from given struct. +func ReflectFrom(cfg *File, v interface{}) error { + return ReflectFromWithMapper(cfg, v, nil) +} diff --git a/vendor/github.com/go-ini/ini/struct_test.go b/vendor/github.com/go-ini/ini/struct_test.go new file mode 100644 index 000000000..b8ba25293 --- /dev/null +++ b/vendor/github.com/go-ini/ini/struct_test.go @@ -0,0 +1,352 @@ +// Copyright 2014 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package ini + +import ( + "bytes" + "fmt" + "strings" + "testing" + "time" + + . "github.com/smartystreets/goconvey/convey" +) + +type testNested struct { + Cities []string `delim:"|"` + Visits []time.Time + Years []int + Numbers []int64 + Ages []uint + Populations []uint64 + Coordinates []float64 + Note string + Unused int `ini:"-"` +} + +type testEmbeded struct { + GPA float64 +} + +type testStruct struct { + Name string `ini:"NAME"` + Age int + Male bool + Money float64 + Born time.Time + Time time.Duration `ini:"Duration"` + Others testNested + *testEmbeded `ini:"grade"` + Unused int `ini:"-"` + Unsigned uint + Omitted bool `ini:"omitthis,omitempty"` + Shadows []string `ini:",,allowshadow"` + ShadowInts []int `ini:"Shadows,,allowshadow"` +} + +const _CONF_DATA_STRUCT = ` +NAME = Unknwon +Age = 21 +Male = true +Money = 1.25 +Born = 1993-10-07T20:17:05Z +Duration = 2h45m +Unsigned = 3 +omitthis = true +Shadows = 1, 2 +Shadows = 3, 4 + +[Others] +Cities = HangZhou|Boston +Visits = 1993-10-07T20:17:05Z, 1993-10-07T20:17:05Z +Years = 1993,1994 +Numbers = 10010,10086 +Ages = 18,19 +Populations = 12345678,98765432 +Coordinates = 192.168,10.11 +Note = Hello world! + +[grade] +GPA = 2.8 + +[foo.bar] +Here = there +When = then +` + +type unsupport struct { + Byte byte +} + +type unsupport2 struct { + Others struct { + Cities byte + } +} + +type unsupport3 struct { + Cities byte +} + +type unsupport4 struct { + *unsupport3 `ini:"Others"` +} + +type defaultValue struct { + Name string + Age int + Male bool + Money float64 + Born time.Time + Cities []string +} + +type fooBar struct { + Here, When string +} + +const _INVALID_DATA_CONF_STRUCT = ` +Name = +Age = age +Male = 123 +Money = money +Born = nil +Cities = +` + +func Test_Struct(t *testing.T) { + Convey("Map to struct", t, func() { + Convey("Map file to struct", func() { + ts := new(testStruct) + So(MapTo(ts, []byte(_CONF_DATA_STRUCT)), ShouldBeNil) + + So(ts.Name, ShouldEqual, "Unknwon") + So(ts.Age, ShouldEqual, 21) + So(ts.Male, ShouldBeTrue) + So(ts.Money, ShouldEqual, 1.25) + So(ts.Unsigned, ShouldEqual, 3) + + t, err := time.Parse(time.RFC3339, "1993-10-07T20:17:05Z") + So(err, ShouldBeNil) + So(ts.Born.String(), ShouldEqual, t.String()) + + dur, err := time.ParseDuration("2h45m") + So(err, ShouldBeNil) + So(ts.Time.Seconds(), ShouldEqual, dur.Seconds()) + + So(strings.Join(ts.Others.Cities, ","), ShouldEqual, "HangZhou,Boston") + So(ts.Others.Visits[0].String(), ShouldEqual, t.String()) + So(fmt.Sprint(ts.Others.Years), ShouldEqual, "[1993 1994]") + So(fmt.Sprint(ts.Others.Numbers), ShouldEqual, "[10010 10086]") + So(fmt.Sprint(ts.Others.Ages), ShouldEqual, "[18 19]") + So(fmt.Sprint(ts.Others.Populations), ShouldEqual, "[12345678 98765432]") + So(fmt.Sprint(ts.Others.Coordinates), ShouldEqual, "[192.168 10.11]") + So(ts.Others.Note, ShouldEqual, "Hello world!") + So(ts.testEmbeded.GPA, ShouldEqual, 2.8) + }) + + Convey("Map section to struct", func() { + foobar := new(fooBar) + f, err := Load([]byte(_CONF_DATA_STRUCT)) + So(err, ShouldBeNil) + + So(f.Section("foo.bar").MapTo(foobar), ShouldBeNil) + So(foobar.Here, ShouldEqual, "there") + So(foobar.When, ShouldEqual, "then") + }) + + Convey("Map to non-pointer struct", func() { + cfg, err := Load([]byte(_CONF_DATA_STRUCT)) + So(err, ShouldBeNil) + So(cfg, ShouldNotBeNil) + + So(cfg.MapTo(testStruct{}), ShouldNotBeNil) + }) + + Convey("Map to unsupported type", func() { + cfg, err := Load([]byte(_CONF_DATA_STRUCT)) + So(err, ShouldBeNil) + So(cfg, ShouldNotBeNil) + + cfg.NameMapper = func(raw string) string { + if raw == "Byte" { + return "NAME" + } + return raw + } + So(cfg.MapTo(&unsupport{}), ShouldNotBeNil) + So(cfg.MapTo(&unsupport2{}), ShouldNotBeNil) + So(cfg.MapTo(&unsupport4{}), ShouldNotBeNil) + }) + + Convey("Map to omitempty field", func() { + ts := new(testStruct) + So(MapTo(ts, []byte(_CONF_DATA_STRUCT)), ShouldBeNil) + + So(ts.Omitted, ShouldEqual, true) + }) + + Convey("Map with shadows", func() { + cfg, err := LoadSources(LoadOptions{AllowShadows: true}, []byte(_CONF_DATA_STRUCT)) + So(err, ShouldBeNil) + ts := new(testStruct) + So(cfg.MapTo(ts), ShouldBeNil) + + So(strings.Join(ts.Shadows, " "), ShouldEqual, "1 2 3 4") + So(fmt.Sprintf("%v", ts.ShadowInts), ShouldEqual, "[1 2 3 4]") + }) + + Convey("Map from invalid data source", func() { + So(MapTo(&testStruct{}, "hi"), ShouldNotBeNil) + }) + + Convey("Map to wrong types and gain default values", func() { + cfg, err := Load([]byte(_INVALID_DATA_CONF_STRUCT)) + So(err, ShouldBeNil) + + t, err := time.Parse(time.RFC3339, "1993-10-07T20:17:05Z") + So(err, ShouldBeNil) + dv := &defaultValue{"Joe", 10, true, 1.25, t, []string{"HangZhou", "Boston"}} + So(cfg.MapTo(dv), ShouldBeNil) + So(dv.Name, ShouldEqual, "Joe") + So(dv.Age, ShouldEqual, 10) + So(dv.Male, ShouldBeTrue) + So(dv.Money, ShouldEqual, 1.25) + So(dv.Born.String(), ShouldEqual, t.String()) + So(strings.Join(dv.Cities, ","), ShouldEqual, "HangZhou,Boston") + }) + }) + + Convey("Map to struct in strict mode", t, func() { + cfg, err := Load([]byte(` +name=bruce +age=a30`)) + So(err, ShouldBeNil) + + type Strict struct { + Name string `ini:"name"` + Age int `ini:"age"` + } + s := new(Strict) + + So(cfg.Section("").StrictMapTo(s), ShouldNotBeNil) + }) + + Convey("Reflect from struct", t, func() { + type Embeded struct { + Dates []time.Time `delim:"|"` + Places []string + Years []int + Numbers []int64 + Ages []uint + Populations []uint64 + Coordinates []float64 + None []int + } + type Author struct { + Name string `ini:"NAME"` + Male bool + Age int + Height uint + GPA float64 + Date time.Time + NeverMind string `ini:"-"` + *Embeded `ini:"infos"` + } + + t, err := time.Parse(time.RFC3339, "1993-10-07T20:17:05Z") + So(err, ShouldBeNil) + a := &Author{"Unknwon", true, 21, 100, 2.8, t, "", + &Embeded{ + []time.Time{t, t}, + []string{"HangZhou", "Boston"}, + []int{1993, 1994}, + []int64{10010, 10086}, + []uint{18, 19}, + []uint64{12345678, 98765432}, + []float64{192.168, 10.11}, + []int{}, + }} + cfg := Empty() + So(ReflectFrom(cfg, a), ShouldBeNil) + + var buf bytes.Buffer + _, err = cfg.WriteTo(&buf) + So(err, ShouldBeNil) + So(buf.String(), ShouldEqual, `NAME = Unknwon +Male = true +Age = 21 +Height = 100 +GPA = 2.8 +Date = 1993-10-07T20:17:05Z + +[infos] +Dates = 1993-10-07T20:17:05Z|1993-10-07T20:17:05Z +Places = HangZhou,Boston +Years = 1993,1994 +Numbers = 10010,10086 +Ages = 18,19 +Populations = 12345678,98765432 +Coordinates = 192.168,10.11 +None = + +`) + + Convey("Reflect from non-point struct", func() { + So(ReflectFrom(cfg, Author{}), ShouldNotBeNil) + }) + + Convey("Reflect from struct with omitempty", func() { + cfg := Empty() + type SpecialStruct struct { + FirstName string `ini:"first_name"` + LastName string `ini:"last_name"` + JustOmitMe string `ini:"omitempty"` + LastLogin time.Time `ini:"last_login,omitempty"` + LastLogin2 time.Time `ini:",omitempty"` + NotEmpty int `ini:"omitempty"` + } + + So(ReflectFrom(cfg, &SpecialStruct{FirstName: "John", LastName: "Doe", NotEmpty: 9}), ShouldBeNil) + + var buf bytes.Buffer + _, err = cfg.WriteTo(&buf) + So(buf.String(), ShouldEqual, `first_name = John +last_name = Doe +omitempty = 9 + +`) + }) + }) +} + +type testMapper struct { + PackageName string +} + +func Test_NameGetter(t *testing.T) { + Convey("Test name mappers", t, func() { + So(MapToWithMapper(&testMapper{}, TitleUnderscore, []byte("packag_name=ini")), ShouldBeNil) + + cfg, err := Load([]byte("PACKAGE_NAME=ini")) + So(err, ShouldBeNil) + So(cfg, ShouldNotBeNil) + + cfg.NameMapper = AllCapsUnderscore + tg := new(testMapper) + So(cfg.MapTo(tg), ShouldBeNil) + So(tg.PackageName, ShouldEqual, "ini") + }) +} diff --git a/vendor/github.com/go-ini/ini/testdata/UTF-16-BE-BOM.ini b/vendor/github.com/go-ini/ini/testdata/UTF-16-BE-BOM.ini new file mode 100644 index 0000000000000000000000000000000000000000..c8bf82c8ffa2416cb3b462457a6d4c4126f9c410 GIT binary patch literal 56 zcmezOpCOtdk)f2Kgdu|=pP`5$mVt}Gl|h%mm%)+2lfj2Ufx#AtOBoy((t+~n48;t3 I44Dl10AFkh&;S4c literal 0 HcmV?d00001 diff --git a/vendor/github.com/go-ini/ini/testdata/UTF-16-LE-BOM.ini b/vendor/github.com/go-ini/ini/testdata/UTF-16-LE-BOM.ini new file mode 100644 index 0000000000000000000000000000000000000000..27f62186e20bc90b1d302415baf68ffb00cc69e7 GIT binary patch literal 56 zcmezWFPb5dp_HM7A%h{Gp@<=tfs4VFL6^ao!I8m}!G}SC!4`;185|hWf%54L#SD53 HnLri*Xg> This slide has the fuel listed in the wrong units diff --git a/vendor/github.com/go-ini/ini/testdata/conf.ini b/vendor/github.com/go-ini/ini/testdata/conf.ini new file mode 100644 index 000000000..f8e7ec89f --- /dev/null +++ b/vendor/github.com/go-ini/ini/testdata/conf.ini @@ -0,0 +1,2 @@ +[author] +E-MAIL = u@gogs.io \ No newline at end of file diff --git a/vendor/github.com/go-resty/resty/.gitignore b/vendor/github.com/go-resty/resty/.gitignore new file mode 100644 index 000000000..13e345788 --- /dev/null +++ b/vendor/github.com/go-resty/resty/.gitignore @@ -0,0 +1,26 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof + +coverage.out diff --git a/vendor/github.com/go-resty/resty/.travis.yml b/vendor/github.com/go-resty/resty/.travis.yml new file mode 100644 index 000000000..8972c4cb5 --- /dev/null +++ b/vendor/github.com/go-resty/resty/.travis.yml @@ -0,0 +1,31 @@ +language: go + +sudo: false + +branches: + only: + - master + - integration + +go: + - 1.3 + - 1.4 + - 1.5 + - 1.6 + - 1.7 + - 1.8 + - tip + +install: + - go get -v -t ./... + - go get -v golang.org/x/tools/cmd/cover + +script: + - go test ./... -coverprofile=coverage.txt -covermode=atomic + +after_success: + - bash <(curl -s https://codecov.io/bash) + +matrix: + allow_failures: + - go: tip diff --git a/vendor/github.com/go-resty/resty/LICENSE b/vendor/github.com/go-resty/resty/LICENSE new file mode 100644 index 000000000..25f81d88a --- /dev/null +++ b/vendor/github.com/go-resty/resty/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2015-2017 Jeevanandam M., https://myjeeva.com + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/go-resty/resty/README.md b/vendor/github.com/go-resty/resty/README.md new file mode 100644 index 000000000..d9fa45d58 --- /dev/null +++ b/vendor/github.com/go-resty/resty/README.md @@ -0,0 +1,648 @@ +# resty [![Build Status](https://travis-ci.org/go-resty/resty.svg?branch=master)](https://travis-ci.org/go-resty/resty) [![codecov](https://codecov.io/gh/go-resty/resty/branch/master/graph/badge.svg)](https://codecov.io/gh/go-resty/resty/branch/master) [![GoReport](https://goreportcard.com/badge/go-resty/resty)](https://goreportcard.com/report/go-resty/resty) [![Version](https://img.shields.io/badge/version-0.13-blue.svg)](https://github.com/go-resty/resty/releases/latest) [![GoDoc](https://godoc.org/github.com/go-resty/resty?status.svg)](https://godoc.org/github.com/go-resty/resty) [![License](https://img.shields.io/github/license/go-resty/resty.svg)](LICENSE) + +Simple HTTP and REST client for Go inspired by Ruby rest-client. [Features](#features) section describes in detail about resty capabilities. + +***v0.13 [released](https://github.com/go-resty/resty/releases/latest) and tagged on Jun 22, 2017.*** + +*Since Go v1.6 HTTP/2 & HTTP/1.1 protocol is used transparently. `Resty` works fine with HTTP/2 and HTTP/1.1.* + +#### Roadmap +***v1.0*** + +Go Resty first released on Sep 15, 2015 then go-resty grew gradually as a very handy and helpful library of HTTP & REST Client in the golang community. I'm planning to freeze API and make v1.0 release. + +#### Features +* GET, POST, PUT, DELETE, HEAD, PATCH and OPTIONS +* Simple and chainable methods for settings and request +* Request Body can be `string`, `[]byte`, `struct`, `map`, `slice` and `io.Reader` too + * Auto detects `Content-Type` +* [Response](https://godoc.org/github.com/go-resty/resty#Response) object gives you more possibility + * Access as `[]byte` array - `response.Body()` OR Access as `string` - `response.String()` + * Know your `response.Time()` and when we `response.ReceivedAt()` +* Automatic marshal and unmarshal for `JSON` and `XML` content type + * Default is `JSON`, if you supply `struct/map` without header `Content-Type` + * For auto-unmarshal, refer to - + - Success scenario [Request.SetResult()](https://godoc.org/gopkg.in/resty.v0#Request.SetResult) and [Response.Result()](https://godoc.org/gopkg.in/resty.v0#Response.Result). + - Error scenario [Request.SetError()](https://godoc.org/gopkg.in/resty.v0#Request.SetError) and [Response.Error()](https://godoc.org/gopkg.in/resty.v0#Response.Error). +* Easy to upload one or more file(s) via `multipart/form-data` +* Backoff Retry Mechanism with retry condition function [reference](retry_test.go) +* resty client HTTP & REST [Request](https://godoc.org/github.com/go-resty/resty#Client.OnBeforeRequest) and [Response](https://godoc.org/github.com/go-resty/resty#Client.OnAfterResponse) middlewares +* `Request.SetContext` supported `go1.7` and above +* Authorization option of `BasicAuth` and `Bearer` token +* Set request `ContentLength` value for all request or particular request +* Choose between HTTP and REST mode. Default is `REST` + * `HTTP` - default up to 10 redirects and no automatic response unmarshal + * `REST` - defaults to no redirects and automatic response marshal/unmarshal for `JSON` & `XML` +* Custom [Root Certificates](https://godoc.org/github.com/go-resty/resty#Client.SetRootCertificate) and Client [Certificates](https://godoc.org/github.com/go-resty/resty#Client.SetCertificates) +* Download/Save HTTP response directly into File, like `curl -o` flag. See [SetOutputDirectory](https://godoc.org/github.com/go-resty/resty#Client.SetOutputDirectory) & [SetOutput](https://godoc.org/github.com/go-resty/resty#Request.SetOutput). +* Cookies for your request and CookieJar support +* SRV Record based request instead of Host URL +* Client settings like `Timeout`, `RedirectPolicy`, `Proxy`, `TLSClientConfig`, `Transport`, etc. +* Optionally allows GET request with payload, see [SetAllowGetMethodPayload](https://godoc.org/github.com/go-resty/resty#Client.SetAllowGetMethodPayload) +* resty design + * Have client level settings & options and also override at Request level if you want to + * Request and Response middlewares + * Create Multiple clients if you want to `resty.New()` + * goroutine concurrent safe + * REST and HTTP modes + * Debug mode - clean and informative logging presentation + * Gzip - I'm not doing anything here. Go does it automatically +* Well tested client library + +resty tested with Go `v1.3` and above. + +#### Included Batteries + * Redirect Policies - see [how to use](#redirect-policy) + * NoRedirectPolicy + * FlexibleRedirectPolicy + * DomainCheckRedirectPolicy + * etc. [more info](redirect.go) + * Retry Mechanism [how to use](#retries) + * Backoff Retry + * Conditional Retry + * SRV Record based request instead of Host URL [how to use](resty_test.go#L1412) + * etc (upcoming - throw your idea's [here](https://github.com/go-resty/resty/issues)). + +## Installation +#### Stable Version - Production Ready +Please refer section [Versioning](#versioning) for detailed info. +```sh +# install the library +go get -u gopkg.in/resty.v0 +``` +#### Latest Version - Development Edge +```sh +# install the latest & greatest library +go get -u github.com/go-resty/resty +``` + +## It might interest you :) + +Resty author also published following projects to Go Community. + + * [aah framework](https://aahframework.org) - Web and API framework for Go. + * [go-model](https://github.com/jeevatkm/go-model) - Robust & Easy to use model mapper and utility methods for Go `struct`. + +## Usage +The following samples will assist you to become as comfortable as possible with resty library. Resty comes with ready to use DefaultClient. + +Import resty into your code and refer it as `resty`. +```go +import ( + "gopkg.in/resty.v0" +) +``` + +#### Simple GET +```go +// GET request +resp, err := resty.R().Get("http://httpbin.org/get") + +// explore response object +fmt.Printf("\nError: %v", err) +fmt.Printf("\nResponse Status Code: %v", resp.StatusCode()) +fmt.Printf("\nResponse Status: %v", resp.Status()) +fmt.Printf("\nResponse Time: %v", resp.Time()) +fmt.Printf("\nResponse Recevied At: %v", resp.ReceivedAt()) +fmt.Printf("\nResponse Body: %v", resp) // or resp.String() or string(resp.Body()) +// more... + +/* Output +Error: +Response Status Code: 200 +Response Status: 200 OK +Response Time: 644.290186ms +Response Recevied At: 2015-09-15 12:05:28.922780103 -0700 PDT +Response Body: { + "args": {}, + "headers": { + "Accept-Encoding": "gzip", + "Host": "httpbin.org", + "User-Agent": "go-resty v0.1 - https://github.com/go-resty/resty" + }, + "origin": "0.0.0.0", + "url": "http://httpbin.org/get" +} +*/ +``` +#### Enhanced GET +```go +resp, err := resty.R(). + SetQueryParams(map[string]string{ + "page_no": "1", + "limit": "20", + "sort":"name", + "order": "asc", + "random":strconv.FormatInt(time.Now().Unix(), 10), + }). + SetHeader("Accept", "application/json"). + SetAuthToken("BC594900518B4F7EAC75BD37F019E08FBC594900518B4F7EAC75BD37F019E08F"). + Get("/search_result") + + +// Sample of using Request.SetQueryString method +resp, err := resty.R(). + SetQueryString("productId=232&template=fresh-sample&cat=resty&source=google&kw=buy a lot more"). + SetHeader("Accept", "application/json"). + SetAuthToken("BC594900518B4F7EAC75BD37F019E08FBC594900518B4F7EAC75BD37F019E08F"). + Get("/show_product") +``` + +#### Various POST method combinations +```go +// POST JSON string +// No need to set content type, if you have client level setting +resp, err := resty.R(). + SetHeader("Content-Type", "application/json"). + SetBody(`{"username":"testuser", "password":"testpass"}`). + SetResult(&AuthSuccess{}). // or SetResult(AuthSuccess{}). + Post("https://myapp.com/login") + +// POST []byte array +// No need to set content type, if you have client level setting +resp, err := resty.R(). + SetHeader("Content-Type", "application/json"). + SetBody([]byte(`{"username":"testuser", "password":"testpass"}`)). + SetResult(&AuthSuccess{}). // or SetResult(AuthSuccess{}). + Post("https://myapp.com/login") + +// POST Struct, default is JSON content type. No need to set one +resp, err := resty.R(). + SetBody(User{Username: "testuser", Password: "testpass"}). + SetResult(&AuthSuccess{}). // or SetResult(AuthSuccess{}). + SetError(&AuthError{}). // or SetError(AuthError{}). + Post("https://myapp.com/login") + +// POST Map, default is JSON content type. No need to set one +resp, err := resty.R(). + SetBody(map[string]interface{}{"username": "testuser", "password": "testpass"}). + SetResult(&AuthSuccess{}). // or SetResult(AuthSuccess{}). + SetError(&AuthError{}). // or SetError(AuthError{}). + Post("https://myapp.com/login") + +// POST of raw bytes for file upload. For example: upload file to Dropbox +fileBytes, _ := ioutil.ReadFile("/Users/jeeva/mydocument.pdf") + +// See we are not setting content-type header, since go-resty automatically detects Content-Type for you +resp, err := resty.R(). + SetBody(fileBytes). + SetContentLength(true). // Dropbox expects this value + SetAuthToken(""). + SetError(&DropboxError{}). // or SetError(DropboxError{}). + Post("https://content.dropboxapi.com/1/files_put/auto/resty/mydocument.pdf") // for upload Dropbox supports PUT too + +// Note: resty detects Content-Type for request body/payload if content type header is not set. +// * For struct and map data type defaults to 'application/json' +// * Fallback is plain text content type +``` + +#### Sample PUT +You can use various combinations of `PUT` method call like demonstrated for `POST`. +```go +// Note: This is one sample of PUT method usage, refer POST for more combination + +// Request goes as JSON content type +// No need to set auth token, error, if you have client level settings +resp, err := resty.R(). + SetBody(Article{ + Title: "go-resty", + Content: "This is my article content, oh ya!", + Author: "Jeevanandam M", + Tags: []string{"article", "sample", "resty"}, + }). + SetAuthToken("C6A79608-782F-4ED0-A11D-BD82FAD829CD"). + SetError(&Error{}). // or SetError(Error{}). + Put("https://myapp.com/article/1234") +``` + +#### Sample PATCH +You can use various combinations of `PATCH` method call like demonstrated for `POST`. +```go +// Note: This is one sample of PUT method usage, refer POST for more combination + +// Request goes as JSON content type +// No need to set auth token, error, if you have client level settings +resp, err := resty.R(). + SetBody(Article{ + Tags: []string{"new tag1", "new tag2"}, + }). + SetAuthToken("C6A79608-782F-4ED0-A11D-BD82FAD829CD"). + SetError(&Error{}). // or SetError(Error{}). + Patch("https://myapp.com/articles/1234") +``` + +#### Sample DELETE, HEAD, OPTIONS +```go +// DELETE a article +// No need to set auth token, error, if you have client level settings +resp, err := resty.R(). + SetAuthToken("C6A79608-782F-4ED0-A11D-BD82FAD829CD"). + SetError(&Error{}). // or SetError(Error{}). + Delete("https://myapp.com/articles/1234") + +// DELETE a articles with payload/body as a JSON string +// No need to set auth token, error, if you have client level settings +resp, err := resty.R(). + SetAuthToken("C6A79608-782F-4ED0-A11D-BD82FAD829CD"). + SetError(&Error{}). // or SetError(Error{}). + SetHeader("Content-Type", "application/json"). + SetBody(`{article_ids: [1002, 1006, 1007, 87683, 45432] }`). + Delete("https://myapp.com/articles") + +// HEAD of resource +// No need to set auth token, if you have client level settings +resp, err := resty.R(). + SetAuthToken("C6A79608-782F-4ED0-A11D-BD82FAD829CD"). + Head("https://myapp.com/videos/hi-res-video") + +// OPTIONS of resource +// No need to set auth token, if you have client level settings +resp, err := resty.R(). + SetAuthToken("C6A79608-782F-4ED0-A11D-BD82FAD829CD"). + Options("https://myapp.com/servers/nyc-dc-01") +``` + +### Multipart File(s) upload +#### Using io.Reader +```go +profileImgBytes, _ := ioutil.ReadFile("/Users/jeeva/test-img.png") +notesBytes, _ := ioutil.ReadFile("/Users/jeeva/text-file.txt") + +resp, err := dclr(). + SetFileReader("profile_img", "test-img.png", bytes.NewReader(profileImgBytes)). + SetFileReader("notes", "text-file.txt", bytes.NewReader(notesBytes)). + SetFormData(map[string]string{ + "first_name": "Jeevanandam", + "last_name": "M", + }). + Post(t"http://myapp.com/upload") +``` + +#### Using File directly from Path +```go +// Single file scenario +resp, err := resty.R(). + SetFile("profile_img", "/Users/jeeva/test-img.png"). + Post("http://myapp.com/upload") + +// Multiple files scenario +resp, err := resty.R(). + SetFiles(map[string]string{ + "profile_img": "/Users/jeeva/test-img.png", + "notes": "/Users/jeeva/text-file.txt", + }). + Post("http://myapp.com/upload") + +// Multipart of form fields and files +resp, err := resty.R(). + SetFiles(map[string]string{ + "profile_img": "/Users/jeeva/test-img.png", + "notes": "/Users/jeeva/text-file.txt", + }). + SetFormData(map[string]string{ + "first_name": "Jeevanandam", + "last_name": "M", + "zip_code": "00001", + "city": "my city", + "access_token": "C6A79608-782F-4ED0-A11D-BD82FAD829CD", + }). + Post("http://myapp.com/profile") +``` + +#### Sample Form submision +```go +// just mentioning about POST as an example with simple flow +// User Login +resp, err := resty.R(). + SetFormData(map[string]string{ + "username": "jeeva", + "password": "mypass", + }). + Post("http://myapp.com/login") + +// Followed by profile update +resp, err := resty.R(). + SetFormData(map[string]string{ + "first_name": "Jeevanandam", + "last_name": "M", + "zip_code": "00001", + "city": "new city update", + }). + Post("http://myapp.com/profile") + +// Multi value form data +criteria := url.Values{ + "search_criteria": []string{"book", "glass", "pencil"}, +} +resp, err := resty.R(). + SetMultiValueFormData(criteria). + Post("http://myapp.com/search") +``` + +#### Save HTTP Response into File +```go +// Setting output directory path, If directory not exists then resty creates one! +// This is optional one, if you're planning using absoule path in +// `Request.SetOutput` and can used together. +resty.SetOutputDirectory("/Users/jeeva/Downloads") + +// HTTP response gets saved into file, similar to curl -o flag +_, err := resty.R(). + SetOutput("plugin/ReplyWithHeader-v5.1-beta.zip"). + Get("http://bit.ly/1LouEKr") + +// OR using absolute path +// Note: output directory path is not used for absoulte path +_, err := resty.R(). + SetOutput("/MyDownloads/plugin/ReplyWithHeader-v5.1-beta.zip"). + Get("http://bit.ly/1LouEKr") +``` + +#### Request and Response Middleware +Resty provides middleware ability to manipulate for Request and Response. It is more flexible than callback approach. +```go +// Registering Request Middleware +resty.OnBeforeRequest(func(c *resty.Client, req *resty.Request) error { + // Now you have access to Client and current Request object + // manipulate it as per your need + + return nil // if its success otherwise return error + }) + +// Registering Response Middleware +resty.OnAfterResponse(func(c *resty.Client, resp *resty.Response) error { + // Now you have access to Client and current Response object + // manipulate it as per your need + + return nil // if its success otherwise return error + }) +``` + +#### Redirect Policy +Resty provides few ready to use redirect policy(s) also it supports multiple policies together. +```go +// Assign Client Redirect Policy. Create one as per you need +resty.SetRedirectPolicy(resty.FlexibleRedirectPolicy(15)) + +// Wanna multiple policies such as redirect count, domain name check, etc +resty.SetRedirectPolicy(resty.FlexibleRedirectPolicy(20), + resty.DomainCheckRedirectPolicy("host1.com", "host2.org", "host3.net")) +``` + +##### Custom Redirect Policy +Implement [RedirectPolicy](redirect.go#L20) interface and register it with resty client. Have a look [redirect.go](redirect.go) for more information. +```go +// Using raw func into resty.SetRedirectPolicy +resty.SetRedirectPolicy(resty.RedirectPolicyFunc(func(req *http.Request, via []*http.Request) error { + // Implement your logic here + + // return nil for continue redirect otherwise return error to stop/prevent redirect + return nil +})) + +//--------------------------------------------------- + +// Using struct create more flexible redirect policy +type CustomRedirectPolicy struct { + // variables goes here +} + +func (c *CustomRedirectPolicy) Apply(req *http.Request, via []*http.Request) error { + // Implement your logic here + + // return nil for continue redirect otherwise return error to stop/prevent redirect + return nil +} + +// Registering in resty +resty.SetRedirectPolicy(CustomRedirectPolicy{/* initialize variables */}) +``` + +#### Custom Root Certificates and Client Certifcates +```go +// Custom Root certificates, just supply .pem file. +// you can add one or more root certificates, its get appended +resty.SetRootCertificate("/path/to/root/pemFile1.pem") +resty.SetRootCertificate("/path/to/root/pemFile2.pem") +// ... and so on! + +// Adding Client Certificates, you add one or more certificates +// Sample for creating certificate object +// Parsing public/private key pair from a pair of files. The files must contain PEM encoded data. +cert1, err := tls.LoadX509KeyPair("certs/client.pem", "certs/client.key") +if err != nil { + log.Fatalf("ERROR client certificate: %s", err) +} +// ... + +// You add one or more certificates +resty.SetCertificates(cert1, cert2, cert3) +``` + +#### Proxy Settings - Client as well as at Request Level +Default `Go` supports Proxy via environment variable `HTTP_PROXY`. Resty provides support via `SetProxy` & `RemoveProxy`. +Choose as per your need. + +**Client Level Proxy** settings applied to all the request +```go +// Setting a Proxy URL and Port +resty.SetProxy("http://proxyserver:8888") + +// Want to remove proxy setting +resty.RemoveProxy() +``` +**Request Level Proxy** settings, gives control to override at individal request level +```go +// Set proxy for current request +resp, err := c.R(). + SetProxy("http://sampleproxy:8888"). + Get("http://httpbin.org/get") +``` + +#### Retries + +Resty uses [backoff](http://www.awsarchitectureblog.com/2015/03/backoff.html) +to increase retry intervals after each attempt. + +Usage example: +```go +// Retries are configured per client +resty.DefaultClient. + // Set retry count to non zero to enable retries + SetRetryCount(3). + // You can override initial retry wait time. + // Default is 100 milliseconds. + SetRetryWaitTime(5 * time.Second). + // MaxWaitTime can be overridden as well. + // Default is 2 seconds. + SetRetryMaxWaitTime(20 * time.Second) +``` + +Above setup will result in resty retrying requests returned non nil error up to +3 times with delay increased after each attempt. + +You can optionally provide client with custom retry conditions: + +```go +resty.DefaultClient. + AddRetryCondition( + // Condition function will be provided with *resty.Response as a + // parameter. It is expected to return (bool, error) pair. Resty will retry + // in case condition returns true or non nil error. + func(r *resty.Response) (bool, error) { + return r.StatusCode() == http.StatusTooManyRequests, nil + } + ) +``` + +Above example will make resty retry requests ended with `429 Too Many Requests` +status code. + +Multiple retry conditions can be added. + +It is also possible to use `resty.Backoff(...)` to get arbitrary retry scenarios +implemented. [Reference](retry_test.go). + +#### Choose REST or HTTP mode +```go +// REST mode. This is Default. +resty.SetRESTMode() + +// HTTP mode +resty.SetHTTPMode() +``` + +#### Allow GET request with Payload +```go +// Allow GET request with Payload. This is disabled by default. +resty.SetAllowGetMethodPayload(true) +``` + +#### Wanna Multiple Clients +```go +// Here you go! +// Client 1 +client1 := resty.New() +client1.R().Get("http://httpbin.org") +// ... + +// Client 2 +client2 := resty.New() +client1.R().Head("http://httpbin.org") +// ... + +// Bend it as per your need!!! +``` + +#### Remaining Client Settings & its Options +```go +// Unique settings at Client level +//-------------------------------- +// Enable debug mode +resty.SetDebug(true) + +// Using you custom log writer +logFile, _ := os.OpenFile("/Users/jeeva/go-resty.log", os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0666) +resty.SetLogger(logFile) + +// Assign Client TLSClientConfig +// One can set custom root-certificate. Refer: http://golang.org/pkg/crypto/tls/#example_Dial +resty.SetTLSClientConfig(&tls.Config{ RootCAs: roots }) + +// or One can disable security check (https) +resty.SetTLSClientConfig(&tls.Config{ InsecureSkipVerify: true }) + +// Set client timeout as per your need +resty.SetTimeout(time.Duration(1 * time.Minute)) + + +// You can override all below settings and options at request level if you want to +//-------------------------------------------------------------------------------- +// Host URL for all request. So you can use relative URL in the request +resty.SetHostURL("http://httpbin.org") + +// Headers for all request +resty.SetHeader("Accept", "application/json") +resty.SetHeaders(map[string]string{ + "Content-Type": "application/json", + "User-Agent": "My custom User Agent String", + }) + +// Cookies for all request +resty.SetCookie(&http.Cookie{ + Name:"go-resty", + Value:"This is cookie value", + Path: "/", + Domain: "sample.com", + MaxAge: 36000, + HttpOnly: true, + Secure: false, + }) +resty.SetCookies(cookies) + +// URL query parameters for all request +resty.SetQueryParam("user_id", "00001") +resty.SetQueryParams(map[string]string{ // sample of those who use this manner + "api_key": "api-key-here", + "api_secert": "api-secert", + }) +resty.R().SetQueryString("productId=232&template=fresh-sample&cat=resty&source=google&kw=buy a lot more") + +// Form data for all request. Typically used with POST and PUT +resty.SetFormData(map[string]string{ + "access_token": "BC594900-518B-4F7E-AC75-BD37F019E08F", + }) + +// Basic Auth for all request +resty.SetBasicAuth("myuser", "mypass") + +// Bearer Auth Token for all request +resty.SetAuthToken("BC594900518B4F7EAC75BD37F019E08FBC594900518B4F7EAC75BD37F019E08F") + +// Enabling Content length value for all request +resty.SetContentLength(true) + +// Registering global Error object structure for JSON/XML request +resty.SetError(&Error{}) // or resty.SetError(Error{}) +``` + +#### Unix Socket + +```go +unixSocket := "/var/run/my_socket.sock" + +// Create a Go's http.Transport so we can set it in resty. +transport := http.Transport{ + Dial: func(_, _ string) (net.Conn, error) { + return net.Dial("unix", unixSocket) + }, +} + +// Set the previous transport that we created, set the scheme of the communication to the +// socket and set the unixSocket as the HostURL. +r := resty.New().SetTransport(&transport).SetScheme("http").SetHostURL(unixSocket) + +// No need to write the host's URL on the request, just the path. +r.R().Get("/index.html") + +``` + +## Versioning +resty releases versions according to [Semantic Versioning](http://semver.org) + +`gopkg.in/resty.vX` points to appropriate tag versions; `X` denotes version number and it's a stable release. It's recommended to use version, for eg. `gopkg.in/resty.v0`. Development takes place at the master branch. Although the code in master should always compile and test successfully, it might break API's. We aim to maintain backwards compatibility, but API's and behaviour might be changed to fix a bug. + + +## Contributing +Welcome! If you find any improvement or issue you want to fix, feel free to send a pull request, I like pull requests that include test cases for fix/enhancement. I have done my best to bring pretty good code coverage. Feel free to write tests. + +BTW, I'd like to know what you think about go-resty. Kindly open an issue or send me an email; it'd mean a lot to me. + +## Author +Jeevanandam M. - jeeva@myjeeva.com + +## Contributors +Have a look on [Contributors](https://github.com/go-resty/resty/graphs/contributors) page. + +## License +resty released under MIT license, refer [LICENSE](LICENSE) file. diff --git a/vendor/github.com/go-resty/resty/client.go b/vendor/github.com/go-resty/resty/client.go new file mode 100644 index 000000000..96b79a3d6 --- /dev/null +++ b/vendor/github.com/go-resty/resty/client.go @@ -0,0 +1,926 @@ +// Copyright (c) 2015-2017 Jeevanandam M (jeeva@myjeeva.com), All rights reserved. +// resty source code and usage is governed by a MIT style +// license that can be found in the LICENSE file. + +package resty + +import ( + "bytes" + "crypto/tls" + "crypto/x509" + "encoding/json" + "encoding/xml" + "fmt" + "io" + "io/ioutil" + "log" + "mime/multipart" + "net/http" + "net/url" + "os" + "path/filepath" + "reflect" + "regexp" + "runtime" + "strings" + "sync" + "time" +) + +const ( + // MethodGet HTTP method + MethodGet = "GET" + + // MethodPost HTTP method + MethodPost = "POST" + + // MethodPut HTTP method + MethodPut = "PUT" + + // MethodDelete HTTP method + MethodDelete = "DELETE" + + // MethodPatch HTTP method + MethodPatch = "PATCH" + + // MethodHead HTTP method + MethodHead = "HEAD" + + // MethodOptions HTTP method + MethodOptions = "OPTIONS" +) + +var ( + hdrUserAgentKey = http.CanonicalHeaderKey("User-Agent") + hdrAcceptKey = http.CanonicalHeaderKey("Accept") + hdrContentTypeKey = http.CanonicalHeaderKey("Content-Type") + hdrContentLengthKey = http.CanonicalHeaderKey("Content-Length") + hdrAuthorizationKey = http.CanonicalHeaderKey("Authorization") + + plainTextType = "text/plain; charset=utf-8" + jsonContentType = "application/json; charset=utf-8" + formContentType = "application/x-www-form-urlencoded" + + jsonCheck = regexp.MustCompile("(?i:[application|text]/json)") + xmlCheck = regexp.MustCompile("(?i:[application|text]/xml)") + + hdrUserAgentValue = "go-resty v%s - https://github.com/go-resty/resty" + bufPool = &sync.Pool{New: func() interface{} { return &bytes.Buffer{} }} +) + +// Client type is used for HTTP/RESTful global values +// for all request raised from the client +type Client struct { + HostURL string + QueryParam url.Values + FormData url.Values + Header http.Header + UserInfo *User + Token string + Cookies []*http.Cookie + Error reflect.Type + Debug bool + DisableWarn bool + AllowGetMethodPayload bool + Log *log.Logger + RetryCount int + RetryWaitTime time.Duration + RetryMaxWaitTime time.Duration + RetryConditions []RetryConditionFunc + JSONMarshal func(v interface{}) ([]byte, error) + JSONUnmarshal func(data []byte, v interface{}) error + + httpClient *http.Client + transport *http.Transport + setContentLength bool + isHTTPMode bool + outputDirectory string + scheme string + proxyURL *url.URL + closeConnection bool + beforeRequest []func(*Client, *Request) error + udBeforeRequest []func(*Client, *Request) error + preReqHook func(*Client, *Request) error + afterResponse []func(*Client, *Response) error +} + +// User type is to hold an username and password information +type User struct { + Username, Password string +} + +// SetHostURL method is to set Host URL in the client instance. It will be used with request +// raised from this client with relative URL +// // Setting HTTP address +// resty.SetHostURL("http://myjeeva.com") +// +// // Setting HTTPS address +// resty.SetHostURL("https://myjeeva.com") +// +func (c *Client) SetHostURL(url string) *Client { + c.HostURL = strings.TrimRight(url, "/") + return c +} + +// SetHeader method sets a single header field and its value in the client instance. +// These headers will be applied to all requests raised from this client instance. +// Also it can be overridden at request level header options, see `resty.R().SetHeader` +// or `resty.R().SetHeaders`. +// +// Example: To set `Content-Type` and `Accept` as `application/json` +// +// resty. +// SetHeader("Content-Type", "application/json"). +// SetHeader("Accept", "application/json") +// +func (c *Client) SetHeader(header, value string) *Client { + c.Header.Set(header, value) + return c +} + +// SetHeaders method sets multiple headers field and its values at one go in the client instance. +// These headers will be applied to all requests raised from this client instance. Also it can be +// overridden at request level headers options, see `resty.R().SetHeaders` or `resty.R().SetHeader`. +// +// Example: To set `Content-Type` and `Accept` as `application/json` +// +// resty.SetHeaders(map[string]string{ +// "Content-Type": "application/json", +// "Accept": "application/json", +// }) +// +func (c *Client) SetHeaders(headers map[string]string) *Client { + for h, v := range headers { + c.Header.Set(h, v) + } + + return c +} + +// SetCookieJar method sets custom http.CookieJar in the resty client. Its way to override default. +// Example: sometimes we don't want to save cookies in api contacting, we can remove the default +// CookieJar in resty client. +// +// resty.SetCookieJar(nil) +// +func (c *Client) SetCookieJar(jar http.CookieJar) *Client { + c.httpClient.Jar = jar + return c +} + +// SetCookie method appends a single cookie in the client instance. +// These cookies will be added to all the request raised from this client instance. +// resty.SetCookie(&http.Cookie{ +// Name:"go-resty", +// Value:"This is cookie value", +// Path: "/", +// Domain: "sample.com", +// MaxAge: 36000, +// HttpOnly: true, +// Secure: false, +// }) +// +func (c *Client) SetCookie(hc *http.Cookie) *Client { + c.Cookies = append(c.Cookies, hc) + return c +} + +// SetCookies method sets an array of cookies in the client instance. +// These cookies will be added to all the request raised from this client instance. +// cookies := make([]*http.Cookie, 0) +// +// cookies = append(cookies, &http.Cookie{ +// Name:"go-resty-1", +// Value:"This is cookie 1 value", +// Path: "/", +// Domain: "sample.com", +// MaxAge: 36000, +// HttpOnly: true, +// Secure: false, +// }) +// +// cookies = append(cookies, &http.Cookie{ +// Name:"go-resty-2", +// Value:"This is cookie 2 value", +// Path: "/", +// Domain: "sample.com", +// MaxAge: 36000, +// HttpOnly: true, +// Secure: false, +// }) +// +// // Setting a cookies into resty +// resty.SetCookies(cookies) +// +func (c *Client) SetCookies(cs []*http.Cookie) *Client { + c.Cookies = append(c.Cookies, cs...) + return c +} + +// SetQueryParam method sets single parameter and its value in the client instance. +// It will be formed as query string for the request. For example: `search=kitchen%20papers&size=large` +// in the URL after `?` mark. These query params will be added to all the request raised from +// this client instance. Also it can be overridden at request level Query Param options, +// see `resty.R().SetQueryParam` or `resty.R().SetQueryParams`. +// resty. +// SetQueryParam("search", "kitchen papers"). +// SetQueryParam("size", "large") +// +func (c *Client) SetQueryParam(param, value string) *Client { + c.QueryParam.Set(param, value) + return c +} + +// SetQueryParams method sets multiple parameters and their values at one go in the client instance. +// It will be formed as query string for the request. For example: `search=kitchen%20papers&size=large` +// in the URL after `?` mark. These query params will be added to all the request raised from this +// client instance. Also it can be overridden at request level Query Param options, +// see `resty.R().SetQueryParams` or `resty.R().SetQueryParam`. +// resty.SetQueryParams(map[string]string{ +// "search": "kitchen papers", +// "size": "large", +// }) +// +func (c *Client) SetQueryParams(params map[string]string) *Client { + for p, v := range params { + c.SetQueryParam(p, v) + } + + return c +} + +// SetFormData method sets Form parameters and their values in the client instance. +// It's applicable only HTTP method `POST` and `PUT` and requets content type would be set as +// `application/x-www-form-urlencoded`. These form data will be added to all the request raised from +// this client instance. Also it can be overridden at request level form data, see `resty.R().SetFormData`. +// resty.SetFormData(map[string]string{ +// "access_token": "BC594900-518B-4F7E-AC75-BD37F019E08F", +// "user_id": "3455454545", +// }) +// +func (c *Client) SetFormData(data map[string]string) *Client { + for k, v := range data { + c.FormData.Set(k, v) + } + + return c +} + +// SetBasicAuth method sets the basic authentication header in the HTTP request. Example: +// Authorization: Basic +// +// Example: To set the header for username "go-resty" and password "welcome" +// resty.SetBasicAuth("go-resty", "welcome") +// +// This basic auth information gets added to all the request rasied from this client instance. +// Also it can be overridden or set one at the request level is supported, see `resty.R().SetBasicAuth`. +// +func (c *Client) SetBasicAuth(username, password string) *Client { + c.UserInfo = &User{Username: username, Password: password} + return c +} + +// SetAuthToken method sets bearer auth token header in the HTTP request. Example: +// Authorization: Bearer +// +// Example: To set auth token BC594900518B4F7EAC75BD37F019E08FBC594900518B4F7EAC75BD37F019E08F +// +// resty.SetAuthToken("BC594900518B4F7EAC75BD37F019E08FBC594900518B4F7EAC75BD37F019E08F") +// +// This bearer auth token gets added to all the request rasied from this client instance. +// Also it can be overridden or set one at the request level is supported, see `resty.R().SetAuthToken`. +// +func (c *Client) SetAuthToken(token string) *Client { + c.Token = token + return c +} + +// R method creates a request instance, its used for Get, Post, Put, Delete, Patch, Head and Options. +func (c *Client) R() *Request { + r := &Request{ + URL: "", + Method: "", + QueryParam: url.Values{}, + FormData: url.Values{}, + Header: http.Header{}, + Body: nil, + Result: nil, + Error: nil, + RawRequest: nil, + client: c, + bodyBuf: nil, + multipartFiles: []*File{}, + } + + return r +} + +// OnBeforeRequest method appends request middleware into the before request chain. +// Its gets applied after default `go-resty` request middlewares and before request +// been sent from `go-resty` to host server. +// resty.OnBeforeRequest(func(c *resty.Client, r *resty.Request) error { +// // Now you have access to Client and Request instance +// // manipulate it as per your need +// +// return nil // if its success otherwise return error +// }) +// +func (c *Client) OnBeforeRequest(m func(*Client, *Request) error) *Client { + c.udBeforeRequest = append(c.udBeforeRequest, m) + return c +} + +// OnAfterResponse method appends response middleware into the after response chain. +// Once we receive response from host server, default `go-resty` response middleware +// gets applied and then user assigened response middlewares applied. +// resty.OnAfterResponse(func(c *resty.Client, r *resty.Response) error { +// // Now you have access to Client and Response instance +// // manipulate it as per your need +// +// return nil // if its success otherwise return error +// }) +// +func (c *Client) OnAfterResponse(m func(*Client, *Response) error) *Client { + c.afterResponse = append(c.afterResponse, m) + return c +} + +// SetPreRequestHook method sets the given pre-request function into resty client. +// It is called right before the request is fired. +// +// Note: Only one pre-request hook can be registered. Use `resty.OnBeforeRequest` for mutilple. +func (c *Client) SetPreRequestHook(h func(*Client, *Request) error) *Client { + if c.preReqHook != nil { + c.Log.Printf("Overwriting an existing pre-request hook: %s", functionName(h)) + } + c.preReqHook = h + return c +} + +// SetDebug method enables the debug mode on `go-resty` client. Client logs details of every request and response. +// For `Request` it logs information such as HTTP verb, Relative URL path, Host, Headers, Body if it has one. +// For `Response` it logs information such as Status, Response Time, Headers, Body if it has one. +// resty.SetDebug(true) +// +func (c *Client) SetDebug(d bool) *Client { + c.Debug = d + return c +} + +// SetDisableWarn method disables the warning message on `go-resty` client. +// For example: go-resty warns the user when BasicAuth used on HTTP mode. +// resty.SetDisableWarn(true) +// +func (c *Client) SetDisableWarn(d bool) *Client { + c.DisableWarn = d + return c +} + +// SetAllowGetMethodPayload method allows the GET method with payload on `go-resty` client. +// For example: go-resty allows the user sends request with a payload on HTTP GET method. +// resty.SetAllowGetMethodPayload(true) +// +func (c *Client) SetAllowGetMethodPayload(a bool) *Client { + c.AllowGetMethodPayload = a + return c +} + +// SetLogger method sets given writer for logging go-resty request and response details. +// Default is os.Stderr +// file, _ := os.OpenFile("/Users/jeeva/go-resty.log", os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0666) +// +// resty.SetLogger(file) +// +func (c *Client) SetLogger(w io.Writer) *Client { + c.Log = getLogger(w) + return c +} + +// SetContentLength method enables the HTTP header `Content-Length` value for every request. +// By default go-resty won't set `Content-Length`. +// resty.SetContentLength(true) +// +// Also you have an option to enable for particular request. See `resty.R().SetContentLength` +// +func (c *Client) SetContentLength(l bool) *Client { + c.setContentLength = l + return c +} + +// SetTimeout method sets timeout for request raised from client. +// resty.SetTimeout(time.Duration(1 * time.Minute)) +// +func (c *Client) SetTimeout(timeout time.Duration) *Client { + c.httpClient.Timeout = timeout + return c +} + +// SetError method is to register the global or client common `Error` object into go-resty. +// It is used for automatic unmarshalling if response status code is greater than 399 and +// content type either JSON or XML. Can be pointer or non-pointer. +// resty.SetError(&Error{}) +// // OR +// resty.SetError(Error{}) +// +func (c *Client) SetError(err interface{}) *Client { + c.Error = typeOf(err) + return c +} + +// SetRedirectPolicy method sets the client redirect poilicy. go-resty provides ready to use +// redirect policies. Wanna create one for yourself refer `redirect.go`. +// +// resty.SetRedirectPolicy(FlexibleRedirectPolicy(20)) +// +// // Need multiple redirect policies together +// resty.SetRedirectPolicy(FlexibleRedirectPolicy(20), DomainCheckRedirectPolicy("host1.com", "host2.net")) +// +func (c *Client) SetRedirectPolicy(policies ...interface{}) *Client { + for _, p := range policies { + if _, ok := p.(RedirectPolicy); !ok { + c.Log.Printf("ERORR: %v does not implement resty.RedirectPolicy (missing Apply method)", + functionName(p)) + } + } + + c.httpClient.CheckRedirect = func(req *http.Request, via []*http.Request) error { + for _, p := range policies { + if err := p.(RedirectPolicy).Apply(req, via); err != nil { + return err + } + } + return nil // looks good, go ahead + } + + return c +} + +// SetRetryCount method enables retry on `go-resty` client and allows you +// to set no. of retry count. Resty uses a Backoff mechanism. +func (c *Client) SetRetryCount(count int) *Client { + c.RetryCount = count + return c +} + +// SetRetryWaitTime method sets default wait time to sleep before retrying +// request. +// Default is 100 milliseconds. +func (c *Client) SetRetryWaitTime(waitTime time.Duration) *Client { + c.RetryWaitTime = waitTime + return c +} + +// SetRetryMaxWaitTime method sets max wait time to sleep before retrying +// request. +// Default is 2 seconds. +func (c *Client) SetRetryMaxWaitTime(maxWaitTime time.Duration) *Client { + c.RetryMaxWaitTime = maxWaitTime + return c +} + +// AddRetryCondition method adds a retry condition function to array of functions +// that are checked to determine if the request is retried. The request will +// retry if any of the functions return true and error is nil. +func (c *Client) AddRetryCondition(condition RetryConditionFunc) *Client { + c.RetryConditions = append(c.RetryConditions, condition) + return c +} + +// SetHTTPMode method sets go-resty mode into HTTP +func (c *Client) SetHTTPMode() *Client { + return c.SetMode("http") +} + +// SetRESTMode method sets go-resty mode into RESTful +func (c *Client) SetRESTMode() *Client { + return c.SetMode("rest") +} + +// SetMode method sets go-resty client mode to given value such as 'http' & 'rest'. +// RESTful: +// - No Redirect +// - Automatic response unmarshal if it is JSON or XML +// HTML: +// - Up to 10 Redirects +// - No automatic unmarshall. Response will be treated as `response.String()` +// +// If you want more redirects, use FlexibleRedirectPolicy +// resty.SetRedirectPolicy(FlexibleRedirectPolicy(20)) +// +func (c *Client) SetMode(mode string) *Client { + // HTTP + if mode == "http" { + c.isHTTPMode = true + c.SetRedirectPolicy(FlexibleRedirectPolicy(10)) + c.afterResponse = []func(*Client, *Response) error{ + responseLogger, + saveResponseIntoFile, + } + return c + } + + // RESTful + c.isHTTPMode = false + c.SetRedirectPolicy(NoRedirectPolicy()) + c.afterResponse = []func(*Client, *Response) error{ + responseLogger, + parseResponseBody, + saveResponseIntoFile, + } + return c +} + +// Mode method returns the current client mode. Typically its a "http" or "rest". +// Default is "rest" +func (c *Client) Mode() string { + if c.isHTTPMode { + return "http" + } + return "rest" +} + +// SetTLSClientConfig method sets TLSClientConfig for underling client Transport. +// +// Example: +// // One can set custom root-certificate. Refer: http://golang.org/pkg/crypto/tls/#example_Dial +// resty.SetTLSClientConfig(&tls.Config{ RootCAs: roots }) +// +// // or One can disable security check (https) +// resty.SetTLSClientConfig(&tls.Config{ InsecureSkipVerify: true }) +// Note: This method overwrites existing `TLSClientConfig`. +// +func (c *Client) SetTLSClientConfig(config *tls.Config) *Client { + c.transport.TLSClientConfig = config + c.httpClient.Transport = c.transport + return c +} + +// SetProxy method sets the Proxy URL and Port for resty client. +// resty.SetProxy("http://proxyserver:8888") +// +// Alternatives: At request level proxy, see `Request.SetProxy`. OR Without this `SetProxy` method, +// you can also set Proxy via environment variable. By default `Go` uses setting from `HTTP_PROXY`. +// +func (c *Client) SetProxy(proxyURL string) *Client { + if pURL, err := url.Parse(proxyURL); err == nil { + c.proxyURL = pURL + c.transport.Proxy = http.ProxyURL(c.proxyURL) + c.httpClient.Transport = c.transport + } else { + c.Log.Printf("ERROR [%v]", err) + c.RemoveProxy() + } + + return c +} + +// RemoveProxy method removes the proxy configuration from resty client +// resty.RemoveProxy() +// +func (c *Client) RemoveProxy() *Client { + c.proxyURL = nil + c.transport.Proxy = nil + c.httpClient.Transport = c.transport + + return c +} + +// SetCertificates method helps to set client certificates into resty conveniently. +// +func (c *Client) SetCertificates(certs ...tls.Certificate) *Client { + config := c.getTLSConfig() + config.Certificates = append(config.Certificates, certs...) + return c +} + +// SetRootCertificate method helps to add one or more root certificates into resty client +// resty.SetRootCertificate("/path/to/root/pemFile.pem") +// +func (c *Client) SetRootCertificate(pemFilePath string) *Client { + rootPemData, err := ioutil.ReadFile(pemFilePath) + if err != nil { + c.Log.Printf("ERROR [%v]", err) + return c + } + + config := c.getTLSConfig() + if config.RootCAs == nil { + config.RootCAs = x509.NewCertPool() + } + + config.RootCAs.AppendCertsFromPEM(rootPemData) + + return c +} + +// SetOutputDirectory method sets output directory for saving HTTP response into file. +// If the output directory not exists then resty creates one. This setting is optional one, +// if you're planning using absoule path in `Request.SetOutput` and can used together. +// resty.SetOutputDirectory("/save/http/response/here") +// +func (c *Client) SetOutputDirectory(dirPath string) *Client { + err := createDirectory(dirPath) + if err != nil { + c.Log.Printf("ERROR [%v]", err) + } + + c.outputDirectory = dirPath + + return c +} + +// SetTransport method sets custom *http.Transport in the resty client. Its way to override default. +// +// **Note:** It overwrites the default resty transport instance and its configurations. +// transport := &http.Transport{ +// // somthing like Proxying to httptest.Server, etc... +// Proxy: func(req *http.Request) (*url.URL, error) { +// return url.Parse(server.URL) +// }, +// } +// +// resty.SetTransport(transport) +// +func (c *Client) SetTransport(transport *http.Transport) *Client { + if transport != nil { + c.transport = transport + c.httpClient.Transport = c.transport + } + + return c +} + +// SetScheme method sets custom scheme in the resty client. It's way to override default. +// resty.SetScheme("http") +// +func (c *Client) SetScheme(scheme string) *Client { + if !IsStringEmpty(scheme) { + c.scheme = scheme + } + + return c +} + +// SetCloseConnection method sets variable Close in http request struct with the given +// value. More info: https://golang.org/src/net/http/request.go +func (c *Client) SetCloseConnection(close bool) *Client { + c.closeConnection = close + return c +} + +// IsProxySet method returns the true if proxy is set on client otherwise false. +func (c *Client) IsProxySet() bool { + return c.proxyURL != nil +} + +// executes the given `Request` object and returns response +func (c *Client) execute(req *Request) (*Response, error) { + defer putBuffer(req.bodyBuf) + // Apply Request middleware + var err error + + // user defined on before request methods + // to modify the *resty.Request object + for _, f := range c.udBeforeRequest { + if err = f(c, req); err != nil { + return nil, err + } + } + + // resty middlewares + for _, f := range c.beforeRequest { + if err = f(c, req); err != nil { + return nil, err + } + } + + // call pre-request if defined + if c.preReqHook != nil { + if err = c.preReqHook(c, req); err != nil { + return nil, err + } + } + + req.Time = time.Now() + resp, err := c.httpClient.Do(req.RawRequest) + + response := &Response{ + Request: req, + RawResponse: resp, + receivedAt: time.Now(), + } + + if err != nil { + return response, err + } + + if !req.isSaveResponse { + defer func() { + _ = resp.Body.Close() + }() + + if response.body, err = ioutil.ReadAll(resp.Body); err != nil { + return response, err + } + + response.size = int64(len(response.body)) + } + + // Apply Response middleware + for _, f := range c.afterResponse { + if err = f(c, response); err != nil { + break + } + } + + return response, err +} + +// enables a log prefix +func (c *Client) enableLogPrefix() { + c.Log.SetFlags(log.LstdFlags) + c.Log.SetPrefix("RESTY ") +} + +// disables a log prefix +func (c *Client) disableLogPrefix() { + c.Log.SetFlags(0) + c.Log.SetPrefix("") +} + +// getting TLS client config if not exists then create one +func (c *Client) getTLSConfig() *tls.Config { + if c.transport.TLSClientConfig == nil { + c.transport.TLSClientConfig = &tls.Config{} + c.httpClient.Transport = c.transport + } + return c.transport.TLSClientConfig +} + +// +// File +// + +// File represent file information for multipart request +type File struct { + Name string + ParamName string + io.Reader +} + +// String returns string value of current file details +func (f *File) String() string { + return fmt.Sprintf("ParamName: %v; FileName: %v", f.ParamName, f.Name) +} + +// +// Helper methods +// + +// IsStringEmpty method tells whether given string is empty or not +func IsStringEmpty(str string) bool { + return (len(strings.TrimSpace(str)) == 0) +} + +// DetectContentType method is used to figure out `Request.Body` content type for request header +func DetectContentType(body interface{}) string { + contentType := plainTextType + kind := kindOf(body) + switch kind { + case reflect.Struct, reflect.Map: + contentType = jsonContentType + case reflect.String: + contentType = plainTextType + default: + if b, ok := body.([]byte); ok { + contentType = http.DetectContentType(b) + } else if kind == reflect.Slice { + contentType = jsonContentType + } + } + + return contentType +} + +// IsJSONType method is to check JSON content type or not +func IsJSONType(ct string) bool { + return jsonCheck.MatchString(ct) +} + +// IsXMLType method is to check XML content type or not +func IsXMLType(ct string) bool { + return xmlCheck.MatchString(ct) +} + +// Unmarshal content into object from JSON or XML +// Deprecated: kept for backward compatibility +func Unmarshal(ct string, b []byte, d interface{}) (err error) { + if IsJSONType(ct) { + err = json.Unmarshal(b, d) + } else if IsXMLType(ct) { + err = xml.Unmarshal(b, d) + } + + return +} + +// Unmarshalc content into object from JSON or XML +func Unmarshalc(c *Client, ct string, b []byte, d interface{}) (err error) { + if IsJSONType(ct) { + err = c.JSONUnmarshal(b, d) + } else if IsXMLType(ct) { + err = xml.Unmarshal(b, d) + } + + return +} + +func getLogger(w io.Writer) *log.Logger { + return log.New(w, "RESTY ", log.LstdFlags) +} + +func addFile(w *multipart.Writer, fieldName, path string) error { + file, err := os.Open(path) + if err != nil { + return err + } + defer func() { + _ = file.Close() + }() + + part, err := w.CreateFormFile(fieldName, filepath.Base(path)) + if err != nil { + return err + } + _, err = io.Copy(part, file) + + return err +} + +func addFileReader(w *multipart.Writer, f *File) error { + part, err := w.CreateFormFile(f.ParamName, f.Name) + if err != nil { + return err + } + _, err = io.Copy(part, f.Reader) + + return err +} + +func getPointer(v interface{}) interface{} { + vv := valueOf(v) + if vv.Kind() == reflect.Ptr { + return v + } + return reflect.New(vv.Type()).Interface() +} + +func isPayloadSupported(m string, allowMethodGet bool) bool { + return (m == MethodPost || m == MethodPut || m == MethodDelete || m == MethodPatch || (allowMethodGet && m == MethodGet)) +} + +func typeOf(i interface{}) reflect.Type { + return indirect(valueOf(i)).Type() +} + +func valueOf(i interface{}) reflect.Value { + return reflect.ValueOf(i) +} + +func indirect(v reflect.Value) reflect.Value { + return reflect.Indirect(v) +} + +func kindOf(v interface{}) reflect.Kind { + return typeOf(v).Kind() +} + +func createDirectory(dir string) (err error) { + if _, err = os.Stat(dir); err != nil { + if os.IsNotExist(err) { + if err = os.MkdirAll(dir, 0755); err != nil { + return + } + } + } + return +} + +func canJSONMarshal(contentType string, kind reflect.Kind) bool { + return IsJSONType(contentType) && (kind == reflect.Struct || kind == reflect.Map) +} + +func functionName(i interface{}) string { + return runtime.FuncForPC(reflect.ValueOf(i).Pointer()).Name() +} + +func getBuffer() *bytes.Buffer { + return bufPool.Get().(*bytes.Buffer) +} + +func putBuffer(buf *bytes.Buffer) { + if buf != nil { + buf.Reset() + bufPool.Put(buf) + } +} diff --git a/vendor/github.com/go-resty/resty/client_test.go b/vendor/github.com/go-resty/resty/client_test.go new file mode 100644 index 000000000..b408e613a --- /dev/null +++ b/vendor/github.com/go-resty/resty/client_test.go @@ -0,0 +1,365 @@ +// Copyright (c) 2015-2017 Jeevanandam M (jeeva@myjeeva.com), All rights reserved. +// resty source code and usage is governed by a MIT style +// license that can be found in the LICENSE file. + +package resty + +import ( + "crypto/tls" + "errors" + "io/ioutil" + "net/http" + "net/url" + "reflect" + "strconv" + "strings" + "testing" + "time" +) + +func TestClientBasicAuth(t *testing.T) { + ts := createAuthServer(t) + defer ts.Close() + + c := dc() + c.SetBasicAuth("myuser", "basicauth"). + SetHostURL(ts.URL). + SetTLSClientConfig(&tls.Config{InsecureSkipVerify: true}) + + resp, err := c.R(). + SetResult(&AuthSuccess{}). + Post("/login") + + assertError(t, err) + assertEqual(t, http.StatusOK, resp.StatusCode()) + + t.Logf("Result Success: %q", resp.Result().(*AuthSuccess)) + logResponse(t, resp) +} + +func TestClientAuthToken(t *testing.T) { + ts := createAuthServer(t) + defer ts.Close() + + c := dc() + c.SetTLSClientConfig(&tls.Config{InsecureSkipVerify: true}). + SetAuthToken("004DDB79-6801-4587-B976-F093E6AC44FF"). + SetHostURL(ts.URL + "/") + + resp, err := c.R().Get("/profile") + + assertError(t, err) + assertEqual(t, http.StatusOK, resp.StatusCode()) +} + +func TestOnAfterMiddleware(t *testing.T) { + ts := createGenServer(t) + defer ts.Close() + + c := dc() + c.OnAfterResponse(func(c *Client, res *Response) error { + t.Logf("Request sent at: %v", res.Request.Time) + t.Logf("Response Recevied at: %v", res.ReceivedAt()) + + return nil + }) + + resp, err := c.R(). + SetBody("OnAfterResponse: This is plain text body to server"). + Put(ts.URL + "/plaintext") + + assertError(t, err) + assertEqual(t, http.StatusOK, resp.StatusCode()) + assertEqual(t, "TestPut: plain text response", resp.String()) +} + +func TestClientRedirectPolicy(t *testing.T) { + ts := createRedirectServer(t) + defer ts.Close() + + c := dc() + c.SetHTTPMode(). + SetRedirectPolicy(FlexibleRedirectPolicy(20)) + + _, err := c.R().Get(ts.URL + "/redirect-1") + + assertEqual(t, "Get /redirect-21: Stopped after 20 redirects", err.Error()) +} + +func TestClientTimeout(t *testing.T) { + ts := createGetServer(t) + defer ts.Close() + + c := dc() + c.SetHTTPMode(). + SetTimeout(time.Duration(time.Second * 3)) + + _, err := c.R().Get(ts.URL + "/set-timeout-test") + assertEqual(t, true, strings.Contains(strings.ToLower(err.Error()), "timeout")) +} + +func TestClientTimeoutWithinThreshold(t *testing.T) { + ts := createGetServer(t) + defer ts.Close() + + c := dc() + c.SetHTTPMode(). + SetTimeout(time.Duration(time.Second * 3)) + + resp, err := c.R().Get(ts.URL + "/set-timeout-test-with-sequence") + assertError(t, err) + + seq1, _ := strconv.ParseInt(resp.String(), 10, 32) + + resp, err = c.R().Get(ts.URL + "/set-timeout-test-with-sequence") + assertError(t, err) + + seq2, _ := strconv.ParseInt(resp.String(), 10, 32) + + assertEqual(t, seq1+1, seq2) +} + +func TestClientTimeoutInternalError(t *testing.T) { + c := dc() + c.SetHTTPMode() + c.SetTimeout(time.Duration(time.Second * 1)) + + _, _ = c.R().Get("http://localhost:9000/set-timeout-test") +} + +func TestClientProxy(t *testing.T) { + ts := createGetServer(t) + defer ts.Close() + + c := dc() + c.SetTimeout(1 * time.Second) + c.SetProxy("http://sampleproxy:8888") + + resp, err := c.R().Get(ts.URL) + assertEqual(t, true, resp != nil) + assertEqual(t, true, err != nil) + + // Error + c.SetProxy("//not.a.user@%66%6f%6f.com:8888") + + resp, err = c.R(). + Get(ts.URL) + assertEqual(t, true, err == nil) + assertEqual(t, false, resp == nil) +} + +func TestSetCertificates(t *testing.T) { + DefaultClient = dc() + SetCertificates(tls.Certificate{}) + + assertEqual(t, 1, len(DefaultClient.transport.TLSClientConfig.Certificates)) +} + +func TestSetRootCertificate(t *testing.T) { + DefaultClient = dc() + SetRootCertificate(getTestDataPath() + "/sample-root.pem") + + assertEqual(t, true, DefaultClient.transport.TLSClientConfig.RootCAs != nil) +} + +func TestSetRootCertificateNotExists(t *testing.T) { + DefaultClient = dc() + SetRootCertificate(getTestDataPath() + "/not-exists-sample-root.pem") + + assertEqual(t, true, DefaultClient.transport.TLSClientConfig == nil) +} + +func TestOnBeforeRequestModification(t *testing.T) { + tc := New() + tc.OnBeforeRequest(func(c *Client, r *Request) error { + r.SetAuthToken("This is test auth token") + return nil + }) + + ts := createGetServer(t) + defer ts.Close() + + resp, err := tc.R().Get(ts.URL + "/") + + assertError(t, err) + assertEqual(t, http.StatusOK, resp.StatusCode()) + assertEqual(t, "200 OK", resp.Status()) + assertEqual(t, true, resp.Body() != nil) + assertEqual(t, "TestGet: text response", resp.String()) + + logResponse(t, resp) +} + +func TestSetTransport(t *testing.T) { + ts := createGetServer(t) + defer ts.Close() + DefaultClient = dc() + + transport := &http.Transport{ + // somthing like Proxying to httptest.Server, etc... + Proxy: func(req *http.Request) (*url.URL, error) { + return url.Parse(ts.URL) + }, + } + SetTransport(transport) + + assertEqual(t, true, DefaultClient.transport != nil) +} + +func TestSetScheme(t *testing.T) { + DefaultClient = dc() + + SetScheme("http") + + assertEqual(t, true, DefaultClient.scheme == "http") +} + +func TestSetCookieJar(t *testing.T) { + DefaultClient = dc() + backupJar := DefaultClient.httpClient.Jar + + SetCookieJar(nil) + assertEqual(t, true, DefaultClient.httpClient.Jar == nil) + + SetCookieJar(backupJar) + assertEqual(t, true, DefaultClient.httpClient.Jar == backupJar) +} + +func TestClientOptions(t *testing.T) { + SetHTTPMode().SetContentLength(true) + assertEqual(t, Mode(), "http") + assertEqual(t, DefaultClient.setContentLength, true) + + SetRESTMode() + assertEqual(t, Mode(), "rest") + + SetHostURL("http://httpbin.org") + assertEqual(t, "http://httpbin.org", DefaultClient.HostURL) + + SetHeader(hdrContentTypeKey, jsonContentType) + SetHeaders(map[string]string{ + hdrUserAgentKey: "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_5) go-resty v0.1", + "X-Request-Id": strconv.FormatInt(time.Now().UnixNano(), 10), + }) + assertEqual(t, jsonContentType, DefaultClient.Header.Get(hdrContentTypeKey)) + + SetCookie(&http.Cookie{ + Name: "default-cookie", + Value: "This is cookie default-cookie value", + Path: "/", + Domain: "localhost", + MaxAge: 36000, + HttpOnly: true, + Secure: false, + }) + assertEqual(t, "default-cookie", DefaultClient.Cookies[0].Name) + + var cookies []*http.Cookie + cookies = append(cookies, &http.Cookie{ + Name: "default-cookie-1", + Value: "This is default-cookie 1 value", + Path: "/", + }) + cookies = append(cookies, &http.Cookie{ + Name: "default-cookie-2", + Value: "This is default-cookie 2 value", + Path: "/", + }) + SetCookies(cookies) + assertEqual(t, "default-cookie-1", DefaultClient.Cookies[1].Name) + assertEqual(t, "default-cookie-2", DefaultClient.Cookies[2].Name) + + SetQueryParam("test_param_1", "Param_1") + SetQueryParams(map[string]string{"test_param_2": "Param_2", "test_param_3": "Param_3"}) + assertEqual(t, "Param_3", DefaultClient.QueryParam.Get("test_param_3")) + + rTime := strconv.FormatInt(time.Now().UnixNano(), 10) + SetFormData(map[string]string{"r_time": rTime}) + assertEqual(t, rTime, DefaultClient.FormData.Get("r_time")) + + SetBasicAuth("myuser", "mypass") + assertEqual(t, "myuser", DefaultClient.UserInfo.Username) + + SetAuthToken("AC75BD37F019E08FBC594900518B4F7E") + assertEqual(t, "AC75BD37F019E08FBC594900518B4F7E", DefaultClient.Token) + + SetDisableWarn(true) + assertEqual(t, DefaultClient.DisableWarn, true) + + SetRetryCount(3) + assertEqual(t, 3, DefaultClient.RetryCount) + + rwt := time.Duration(1000) * time.Millisecond + SetRetryWaitTime(rwt) + assertEqual(t, rwt, DefaultClient.RetryWaitTime) + + mrwt := time.Duration(2) * time.Second + SetRetryMaxWaitTime(mrwt) + assertEqual(t, mrwt, DefaultClient.RetryMaxWaitTime) + + err := &AuthError{} + SetError(err) + if reflect.TypeOf(err) == DefaultClient.Error { + t.Error("SetError failed") + } + + SetTLSClientConfig(&tls.Config{InsecureSkipVerify: true}) + assertEqual(t, true, DefaultClient.transport.TLSClientConfig.InsecureSkipVerify) + + OnBeforeRequest(func(c *Client, r *Request) error { + c.Log.Println("I'm in Request middleware") + return nil // if it success + }) + OnAfterResponse(func(c *Client, r *Response) error { + c.Log.Println("I'm in Response middleware") + return nil // if it success + }) + + SetTimeout(time.Duration(5 * time.Second)) + SetRedirectPolicy(FlexibleRedirectPolicy(10), func(req *http.Request, via []*http.Request) error { + return errors.New("sample test redirect") + }) + SetContentLength(true) + + SetDebug(true) + assertEqual(t, DefaultClient.Debug, true) + + SetAllowGetMethodPayload(true) + assertEqual(t, DefaultClient.AllowGetMethodPayload, true) + + SetScheme("http") + assertEqual(t, DefaultClient.scheme, "http") + + SetCloseConnection(true) + assertEqual(t, DefaultClient.closeConnection, true) + + SetLogger(ioutil.Discard) +} + +func TestClientPreRequestHook(t *testing.T) { + SetPreRequestHook(func(c *Client, r *Request) error { + c.Log.Println("I'm in Pre-Request Hook") + return nil + }) + + SetPreRequestHook(func(c *Client, r *Request) error { + c.Log.Println("I'm Overwriting existing Pre-Request Hook") + return nil + }) +} + +func TestClientAllowsGetMethodPayload(t *testing.T) { + ts := createGetServer(t) + defer ts.Close() + + c := dc() + c.SetAllowGetMethodPayload(true) + c.SetPreRequestHook(func(*Client, *Request) error { return nil }) // for coverage + + payload := "test-payload" + resp, err := c.R().SetBody(payload).Get(ts.URL + "/get-method-payload-test") + + assertError(t, err) + assertEqual(t, http.StatusOK, resp.StatusCode()) + assertEqual(t, payload, resp.String()) +} diff --git a/vendor/github.com/go-resty/resty/context17_test.go b/vendor/github.com/go-resty/resty/context17_test.go new file mode 100644 index 000000000..8bd7bf69b --- /dev/null +++ b/vendor/github.com/go-resty/resty/context17_test.go @@ -0,0 +1,15 @@ +// +build !go1.8 + +// Copyright (c) 2015-2016 Jeevanandam M (jeeva@myjeeva.com) +// 2016 Andrew Grigorev (https://github.com/ei-grad) +// All rights reserved. +// resty source code and usage is governed by a MIT style +// license that can be found in the LICENSE file. + +package resty + +import "strings" + +func errIsContextCanceled(err error) bool { + return strings.Contains(err.Error(), "request canceled") +} diff --git a/vendor/github.com/go-resty/resty/context18_test.go b/vendor/github.com/go-resty/resty/context18_test.go new file mode 100644 index 000000000..610546c6d --- /dev/null +++ b/vendor/github.com/go-resty/resty/context18_test.go @@ -0,0 +1,22 @@ +// +build go1.8 + +// Copyright (c) 2015-2016 Jeevanandam M (jeeva@myjeeva.com) +// 2016 Andrew Grigorev (https://github.com/ei-grad) +// All rights reserved. +// resty source code and usage is governed by a MIT style +// license that can be found in the LICENSE file. + +package resty + +import ( + "context" + "net/url" +) + +func errIsContextCanceled(err error) bool { + ue, ok := err.(*url.Error) + if !ok { + return false + } + return ue.Err == context.Canceled +} diff --git a/vendor/github.com/go-resty/resty/context_test.go b/vendor/github.com/go-resty/resty/context_test.go new file mode 100644 index 000000000..7a5226a68 --- /dev/null +++ b/vendor/github.com/go-resty/resty/context_test.go @@ -0,0 +1,199 @@ +// +build go1.7 + +// Copyright (c) 2015-2016 Jeevanandam M (jeeva@myjeeva.com) +// 2016 Andrew Grigorev (https://github.com/ei-grad) +// All rights reserved. +// resty source code and usage is governed by a MIT style +// license that can be found in the LICENSE file. + +package resty + +import ( + "context" + "net/http" + "strings" + "sync/atomic" + "testing" + "time" +) + +func TestSetContext(t *testing.T) { + ts := createGetServer(t) + defer ts.Close() + + resp, err := R(). + SetContext(context.Background()). + Get(ts.URL + "/") + + assertError(t, err) + assertEqual(t, http.StatusOK, resp.StatusCode()) + assertEqual(t, "200 OK", resp.Status()) + assertEqual(t, true, resp.Body() != nil) + assertEqual(t, "TestGet: text response", resp.String()) + + logResponse(t, resp) +} + +func TestSetContextWithError(t *testing.T) { + ts := createGetServer(t) + defer ts.Close() + + resp, err := dcr(). + SetContext(context.Background()). + Get(ts.URL + "/mypage") + + assertError(t, err) + assertEqual(t, http.StatusBadRequest, resp.StatusCode()) + assertEqual(t, "", resp.String()) + + logResponse(t, resp) +} + +func TestSetContextCancel(t *testing.T) { + ch := make(chan struct{}) + ts := createTestServer(func(w http.ResponseWriter, r *http.Request) { + defer func() { + ch <- struct{}{} // tell test request is finished + }() + t.Logf("Server: %v %v", r.Method, r.URL.Path) + ch <- struct{}{} + <-ch // wait for client to finish request + n, err := w.Write([]byte("TestSetContextCancel: response")) + // FIXME? test server doesn't handle request cancellation + t.Logf("Server: wrote %d bytes", n) + t.Logf("Server: err is %v ", err) + }) + defer ts.Close() + + ctx, cancel := context.WithCancel(context.Background()) + + go func() { + <-ch // wait for server to start request handling + cancel() + }() + + _, err := R(). + SetContext(ctx). + Get(ts.URL + "/") + + ch <- struct{}{} // tell server to continue request handling + + <-ch // wait for server to finish request handling + + t.Logf("Error: %v", err) + if !errIsContextCanceled(err) { + t.Errorf("Got unexpected error: %v", err) + } +} + +func TestSetContextCancelRetry(t *testing.T) { + reqCount := 0 + ch := make(chan struct{}) + ts := createTestServer(func(w http.ResponseWriter, r *http.Request) { + reqCount++ + defer func() { + ch <- struct{}{} // tell test request is finished + }() + t.Logf("Server: %v %v", r.Method, r.URL.Path) + ch <- struct{}{} + <-ch // wait for client to finish request + n, err := w.Write([]byte("TestSetContextCancel: response")) + // FIXME? test server doesn't handle request cancellation + t.Logf("Server: wrote %d bytes", n) + t.Logf("Server: err is %v ", err) + }) + defer ts.Close() + + ctx, cancel := context.WithCancel(context.Background()) + + go func() { + <-ch // wait for server to start request handling + cancel() + }() + + c := dc() + c.SetHTTPMode(). + SetTimeout(time.Duration(time.Second * 3)). + SetRetryCount(3) + + _, err := c.R(). + SetContext(ctx). + Get(ts.URL + "/") + + ch <- struct{}{} // tell server to continue request handling + + <-ch // wait for server to finish request handling + + t.Logf("Error: %v", err) + if !errIsContextCanceled(err) { + t.Errorf("Got unexpected error: %v", err) + } + + if reqCount != 1 { + t.Errorf("Request was retried %d times instead of 1", reqCount) + } +} + +func TestSetContextCancelWithError(t *testing.T) { + ch := make(chan struct{}) + ts := createTestServer(func(w http.ResponseWriter, r *http.Request) { + defer func() { + ch <- struct{}{} // tell test request is finished + }() + t.Logf("Server: %v %v", r.Method, r.URL.Path) + t.Log("Server: sending StatusBadRequest response") + w.WriteHeader(http.StatusBadRequest) + ch <- struct{}{} + <-ch // wait for client to finish request + n, err := w.Write([]byte("TestSetContextCancelWithError: response")) + // FIXME? test server doesn't handle request cancellation + t.Logf("Server: wrote %d bytes", n) + t.Logf("Server: err is %v ", err) + }) + defer ts.Close() + + ctx, cancel := context.WithCancel(context.Background()) + + go func() { + <-ch // wait for server to start request handling + cancel() + }() + + _, err := R(). + SetContext(ctx). + Get(ts.URL + "/") + + ch <- struct{}{} // tell server to continue request handling + + <-ch // wait for server to finish request handling + + t.Logf("Error: %v", err) + if !errIsContextCanceled(err) { + t.Errorf("Got unexpected error: %v", err) + } +} + +func TestClientRetryWithSetContext(t *testing.T) { + var attemptctx int32 + ts := createTestServer(func(w http.ResponseWriter, r *http.Request) { + t.Logf("Method: %v", r.Method) + t.Logf("Path: %v", r.URL.Path) + attp := atomic.AddInt32(&attemptctx, 1) + if attp <= 3 { + time.Sleep(time.Second * 2) + } + _, _ = w.Write([]byte("TestClientRetry page")) + }) + defer ts.Close() + + c := dc() + c.SetHTTPMode(). + SetTimeout(time.Duration(time.Second * 1)). + SetRetryCount(3) + + _, err := c.R(). + SetContext(context.Background()). + Get(ts.URL + "/") + + assertEqual(t, true, strings.HasPrefix(err.Error(), "Get "+ts.URL+"/")) +} diff --git a/vendor/github.com/go-resty/resty/default.go b/vendor/github.com/go-resty/resty/default.go new file mode 100644 index 000000000..d76f33d74 --- /dev/null +++ b/vendor/github.com/go-resty/resty/default.go @@ -0,0 +1,283 @@ +// Copyright (c) 2015-2017 Jeevanandam M (jeeva@myjeeva.com), All rights reserved. +// resty source code and usage is governed by a MIT style +// license that can be found in the LICENSE file. + +package resty + +import ( + "crypto/tls" + "encoding/json" + "io" + "net/http" + "net/http/cookiejar" + "net/url" + "os" + "time" + + "golang.org/x/net/publicsuffix" +) + +// DefaultClient of resty +var DefaultClient *Client + +// New method creates a new go-resty client +func New() *Client { + cookieJar, _ := cookiejar.New(&cookiejar.Options{PublicSuffixList: publicsuffix.List}) + + c := &Client{ + HostURL: "", + QueryParam: url.Values{}, + FormData: url.Values{}, + Header: http.Header{}, + UserInfo: nil, + Token: "", + Cookies: make([]*http.Cookie, 0), + Debug: false, + Log: getLogger(os.Stderr), + RetryCount: 0, + RetryWaitTime: defaultWaitTime, + RetryMaxWaitTime: defaultMaxWaitTime, + JSONMarshal: json.Marshal, + JSONUnmarshal: json.Unmarshal, + httpClient: &http.Client{Jar: cookieJar}, + transport: &http.Transport{}, + } + + c.httpClient.Transport = c.transport + + // Default redirect policy + c.SetRedirectPolicy(NoRedirectPolicy()) + + // default before request middlewares + c.beforeRequest = []func(*Client, *Request) error{ + parseRequestURL, + parseRequestHeader, + parseRequestBody, + createHTTPRequest, + addCredentials, + requestLogger, + } + + // user defined request middlewares + c.udBeforeRequest = []func(*Client, *Request) error{} + + // default after response middlewares + c.afterResponse = []func(*Client, *Response) error{ + responseLogger, + parseResponseBody, + saveResponseIntoFile, + } + + return c +} + +// R creates a new resty request object, it is used form a HTTP/RESTful request +// such as GET, POST, PUT, DELETE, HEAD, PATCH and OPTIONS. +func R() *Request { + return DefaultClient.R() +} + +// SetHostURL sets Host URL. See `Client.SetHostURL for more information. +func SetHostURL(url string) *Client { + return DefaultClient.SetHostURL(url) +} + +// SetHeader sets single header. See `Client.SetHeader` for more information. +func SetHeader(header, value string) *Client { + return DefaultClient.SetHeader(header, value) +} + +// SetHeaders sets multiple headers. See `Client.SetHeaders` for more information. +func SetHeaders(headers map[string]string) *Client { + return DefaultClient.SetHeaders(headers) +} + +// SetCookieJar sets custom http.CookieJar. See `Client.SetCookieJar` for more information. +func SetCookieJar(jar http.CookieJar) *Client { + return DefaultClient.SetCookieJar(jar) +} + +// SetCookie sets single cookie object. See `Client.SetCookie` for more information. +func SetCookie(hc *http.Cookie) *Client { + return DefaultClient.SetCookie(hc) +} + +// SetCookies sets multiple cookie object. See `Client.SetCookies` for more information. +func SetCookies(cs []*http.Cookie) *Client { + return DefaultClient.SetCookies(cs) +} + +// SetQueryParam method sets single parameter and its value. See `Client.SetQueryParam` for more information. +func SetQueryParam(param, value string) *Client { + return DefaultClient.SetQueryParam(param, value) +} + +// SetQueryParams method sets multiple parameters and its value. See `Client.SetQueryParams` for more information. +func SetQueryParams(params map[string]string) *Client { + return DefaultClient.SetQueryParams(params) +} + +// SetFormData method sets Form parameters and its values. See `Client.SetFormData` for more information. +func SetFormData(data map[string]string) *Client { + return DefaultClient.SetFormData(data) +} + +// SetBasicAuth method sets the basic authentication header. See `Client.SetBasicAuth` for more information. +func SetBasicAuth(username, password string) *Client { + return DefaultClient.SetBasicAuth(username, password) +} + +// SetAuthToken method sets bearer auth token header. See `Client.SetAuthToken` for more information. +func SetAuthToken(token string) *Client { + return DefaultClient.SetAuthToken(token) +} + +// OnBeforeRequest method sets request middleware. See `Client.OnBeforeRequest` for more information. +func OnBeforeRequest(m func(*Client, *Request) error) *Client { + return DefaultClient.OnBeforeRequest(m) +} + +// OnAfterResponse method sets response middleware. See `Client.OnAfterResponse` for more information. +func OnAfterResponse(m func(*Client, *Response) error) *Client { + return DefaultClient.OnAfterResponse(m) +} + +// SetPreRequestHook method sets the pre-request hook. See `Client.SetPreRequestHook` for more information. +func SetPreRequestHook(h func(*Client, *Request) error) *Client { + return DefaultClient.SetPreRequestHook(h) +} + +// SetDebug method enables the debug mode. See `Client.SetDebug` for more information. +func SetDebug(d bool) *Client { + return DefaultClient.SetDebug(d) +} + +// SetAllowGetMethodPayload method allows the GET method with payload. See `Client.SetAllowGetMethodPayload` for more information. +func SetAllowGetMethodPayload(a bool) *Client { + return DefaultClient.SetAllowGetMethodPayload(a) +} + +// SetRetryCount method sets the retry count. See `Client.SetRetryCount` for more information. +func SetRetryCount(count int) *Client { + return DefaultClient.SetRetryCount(count) +} + +// SetRetryWaitTime method sets the retry wait time. See `Client.SetRetryWaitTime` for more information. +func SetRetryWaitTime(waitTime time.Duration) *Client { + return DefaultClient.SetRetryWaitTime(waitTime) +} + +// SetRetryMaxWaitTime method sets the retry max wait time. See `Client.SetRetryMaxWaitTime` for more information. +func SetRetryMaxWaitTime(maxWaitTime time.Duration) *Client { + return DefaultClient.SetRetryMaxWaitTime(maxWaitTime) +} + +// AddRetryCondition method appends check function for retry. See `Client.AddRetryCondition` for more information. +func AddRetryCondition(condition RetryConditionFunc) *Client { + return DefaultClient.AddRetryCondition(condition) +} + +// SetDisableWarn method disables warning comes from `go-resty` client. See `Client.SetDisableWarn` for more information. +func SetDisableWarn(d bool) *Client { + return DefaultClient.SetDisableWarn(d) +} + +// SetLogger method sets given writer for logging. See `Client.SetLogger` for more information. +func SetLogger(w io.Writer) *Client { + return DefaultClient.SetLogger(w) +} + +// SetContentLength method enables `Content-Length` value. See `Client.SetContentLength` for more information. +func SetContentLength(l bool) *Client { + return DefaultClient.SetContentLength(l) +} + +// SetError method is to register the global or client common `Error` object. See `Client.SetError` for more information. +func SetError(err interface{}) *Client { + return DefaultClient.SetError(err) +} + +// SetRedirectPolicy method sets the client redirect poilicy. See `Client.SetRedirectPolicy` for more information. +func SetRedirectPolicy(policies ...interface{}) *Client { + return DefaultClient.SetRedirectPolicy(policies...) +} + +// SetHTTPMode method sets go-resty mode into HTTP. See `Client.SetMode` for more information. +func SetHTTPMode() *Client { + return DefaultClient.SetHTTPMode() +} + +// SetRESTMode method sets go-resty mode into RESTful. See `Client.SetMode` for more information. +func SetRESTMode() *Client { + return DefaultClient.SetRESTMode() +} + +// Mode method returns the current client mode. See `Client.Mode` for more information. +func Mode() string { + return DefaultClient.Mode() +} + +// SetTLSClientConfig method sets TLSClientConfig for underling client Transport. See `Client.SetTLSClientConfig` for more information. +func SetTLSClientConfig(config *tls.Config) *Client { + return DefaultClient.SetTLSClientConfig(config) +} + +// SetTimeout method sets timeout for request. See `Client.SetTimeout` for more information. +func SetTimeout(timeout time.Duration) *Client { + return DefaultClient.SetTimeout(timeout) +} + +// SetProxy method sets Proxy for request. See `Client.SetProxy` for more information. +func SetProxy(proxyURL string) *Client { + return DefaultClient.SetProxy(proxyURL) +} + +// RemoveProxy method removes the proxy configuration. See `Client.RemoveProxy` for more information. +func RemoveProxy() *Client { + return DefaultClient.RemoveProxy() +} + +// SetCertificates method helps to set client certificates into resty conveniently. +// See `Client.SetCertificates` for more information and example. +func SetCertificates(certs ...tls.Certificate) *Client { + return DefaultClient.SetCertificates(certs...) +} + +// SetRootCertificate method helps to add one or more root certificates into resty client. +// See `Client.SetRootCertificate` for more information. +func SetRootCertificate(pemFilePath string) *Client { + return DefaultClient.SetRootCertificate(pemFilePath) +} + +// SetOutputDirectory method sets output directory. See `Client.SetOutputDirectory` for more information. +func SetOutputDirectory(dirPath string) *Client { + return DefaultClient.SetOutputDirectory(dirPath) +} + +// SetTransport method sets custom *http.Transport in the resty client. +// See `Client.SetTransport` for more information. +func SetTransport(transport *http.Transport) *Client { + return DefaultClient.SetTransport(transport) +} + +// SetScheme method sets custom scheme in the resty client. +// See `Client.SetScheme` for more information. +func SetScheme(scheme string) *Client { + return DefaultClient.SetScheme(scheme) +} + +// SetCloseConnection method sets close connection value in the resty client. +// See `Client.SetCloseConnection` for more information. +func SetCloseConnection(close bool) *Client { + return DefaultClient.SetCloseConnection(close) +} + +// IsProxySet method returns the true if proxy is set on client otherwise false. +// See `Client.IsProxySet` for more information. +func IsProxySet() bool { + return DefaultClient.IsProxySet() +} + +func init() { + DefaultClient = New() +} diff --git a/vendor/github.com/go-resty/resty/example_test.go b/vendor/github.com/go-resty/resty/example_test.go new file mode 100644 index 000000000..75ee5b5db --- /dev/null +++ b/vendor/github.com/go-resty/resty/example_test.go @@ -0,0 +1,217 @@ +// Copyright (c) 2015-2017 Jeevanandam M. (jeeva@myjeeva.com), All rights reserved. +// resty source code and usage is governed by a MIT style +// license that can be found in the LICENSE file. + +package resty_test + +import ( + "crypto/tls" + "fmt" + "io/ioutil" + "log" + "net/http" + "os" + "strconv" + "time" + + "golang.org/x/net/proxy" + + "github.com/go-resty/resty" +) + +type DropboxError struct { + Error string +} +type AuthSuccess struct { + /* variables */ +} +type AuthError struct { + /* variables */ +} +type Article struct { + Title string + Content string + Author string + Tags []string +} +type Error struct { + /* variables */ +} + +// +// Package Level examples +// + +func Example_get() { + resp, err := resty.R().Get("http://httpbin.org/get") + + fmt.Printf("\nError: %v", err) + fmt.Printf("\nResponse Status Code: %v", resp.StatusCode()) + fmt.Printf("\nResponse Status: %v", resp.Status()) + fmt.Printf("\nResponse Body: %v", resp) + fmt.Printf("\nResponse Time: %v", resp.Time()) + fmt.Printf("\nResponse Recevied At: %v", resp.ReceivedAt()) +} + +func Example_enhancedGet() { + resp, err := resty.R(). + SetQueryParams(map[string]string{ + "page_no": "1", + "limit": "20", + "sort": "name", + "order": "asc", + "random": strconv.FormatInt(time.Now().Unix(), 10), + }). + SetHeader("Accept", "application/json"). + SetAuthToken("BC594900518B4F7EAC75BD37F019E08FBC594900518B4F7EAC75BD37F019E08F"). + Get("/search_result") + + printOutput(resp, err) +} + +func Example_post() { + // POST JSON string + // No need to set content type, if you have client level setting + resp, err := resty.R(). + SetHeader("Content-Type", "application/json"). + SetBody(`{"username":"testuser", "password":"testpass"}`). + SetResult(AuthSuccess{}). // or SetResult(&AuthSuccess{}). + Post("https://myapp.com/login") + + printOutput(resp, err) + + // POST []byte array + // No need to set content type, if you have client level setting + resp1, err1 := resty.R(). + SetHeader("Content-Type", "application/json"). + SetBody([]byte(`{"username":"testuser", "password":"testpass"}`)). + SetResult(AuthSuccess{}). // or SetResult(&AuthSuccess{}). + Post("https://myapp.com/login") + + printOutput(resp1, err1) + + // POST Struct, default is JSON content type. No need to set one + resp2, err2 := resty.R(). + SetBody(resty.User{Username: "testuser", Password: "testpass"}). + SetResult(&AuthSuccess{}). // or SetResult(AuthSuccess{}). + SetError(&AuthError{}). // or SetError(AuthError{}). + Post("https://myapp.com/login") + + printOutput(resp2, err2) + + // POST Map, default is JSON content type. No need to set one + resp3, err3 := resty.R(). + SetBody(map[string]interface{}{"username": "testuser", "password": "testpass"}). + SetResult(&AuthSuccess{}). // or SetResult(AuthSuccess{}). + SetError(&AuthError{}). // or SetError(AuthError{}). + Post("https://myapp.com/login") + + printOutput(resp3, err3) +} + +func Example_dropboxUpload() { + // For example: upload file to Dropbox + // POST of raw bytes for file upload. + file, _ := os.Open("/Users/jeeva/mydocument.pdf") + fileBytes, _ := ioutil.ReadAll(file) + + // See we are not setting content-type header, since go-resty automatically detects Content-Type for you + resp, err := resty.R(). + SetBody(fileBytes). // resty autodetects content type + SetContentLength(true). // Dropbox expects this value + SetAuthToken(""). + SetError(DropboxError{}). + Post("https://content.dropboxapi.com/1/files_put/auto/resty/mydocument.pdf") // you can use PUT method too dropbox supports it + + // Output print + fmt.Printf("\nError: %v\n", err) + fmt.Printf("Time: %v\n", resp.Time()) + fmt.Printf("Body: %v\n", resp) +} + +func Example_put() { + // Just one sample of PUT, refer POST for more combination + // request goes as JSON content type + // No need to set auth token, error, if you have client level settings + resp, err := resty.R(). + SetBody(Article{ + Title: "go-resty", + Content: "This is my article content, oh ya!", + Author: "Jeevanandam M", + Tags: []string{"article", "sample", "resty"}, + }). + SetAuthToken("C6A79608-782F-4ED0-A11D-BD82FAD829CD"). + SetError(&Error{}). // or SetError(Error{}). + Put("https://myapp.com/article/1234") + + printOutput(resp, err) +} + +func Example_clientCertificates() { + // Parsing public/private key pair from a pair of files. The files must contain PEM encoded data. + cert, err := tls.LoadX509KeyPair("certs/client.pem", "certs/client.key") + if err != nil { + log.Fatalf("ERROR client certificate: %s", err) + } + + resty.SetCertificates(cert) +} + +func Example_customRootCertificate() { + resty.SetRootCertificate("/path/to/root/pemFile.pem") +} + +// +// top level method examples +// + +func ExampleNew() { + // Creating client1 + client1 := resty.New() + resp1, err1 := client1.R().Get("http://httpbin.org/get") + fmt.Println(resp1, err1) + + // Creating client2 + client2 := resty.New() + resp2, err2 := client2.R().Get("http://httpbin.org/get") + fmt.Println(resp2, err2) +} + +// +// Client object methods +// + +func ExampleClient_SetCertificates() { + // Parsing public/private key pair from a pair of files. The files must contain PEM encoded data. + cert, err := tls.LoadX509KeyPair("certs/client.pem", "certs/client.key") + if err != nil { + log.Fatalf("ERROR client certificate: %s", err) + } + + resty.SetCertificates(cert) +} + +// +// Resty Socks5 Proxy request +// + +func Example_socks5Proxy() { + // create a dailer + dialer, err := proxy.SOCKS5("tcp", "127.0.0.1:9150", nil, proxy.Direct) + if err != nil { + log.Fatalf("Unable to obtain proxy dialer: %v\n", err) + } + + // create a transport + ptransport := &http.Transport{Dial: dialer.Dial} + + // set transport into resty + resty.SetTransport(ptransport) + + resp, err := resty.R().Get("http://check.torproject.org") + fmt.Println(err, resp) +} + +func printOutput(resp *resty.Response, err error) { + fmt.Println(resp, err) +} diff --git a/vendor/github.com/go-resty/resty/middleware.go b/vendor/github.com/go-resty/resty/middleware.go new file mode 100644 index 000000000..84a34d865 --- /dev/null +++ b/vendor/github.com/go-resty/resty/middleware.go @@ -0,0 +1,414 @@ +// Copyright (c) 2015-2017 Jeevanandam M (jeeva@myjeeva.com), All rights reserved. +// resty source code and usage is governed by a MIT style +// license that can be found in the LICENSE file. + +package resty + +import ( + "bytes" + "encoding/xml" + "errors" + "fmt" + "io" + "mime/multipart" + "net/http" + "net/url" + "os" + "path/filepath" + "reflect" + "strings" +) + +// +// Request Middleware(s) +// + +func parseRequestURL(c *Client, r *Request) error { + // Parsing request URL + reqURL, err := url.Parse(r.URL) + if err != nil { + return err + } + + // If Request.Url is relative path then added c.HostUrl into + // the request URL otherwise Request.Url will be used as-is + if !reqURL.IsAbs() { + if !strings.HasPrefix(r.URL, "/") { + r.URL = "/" + r.URL + } + + reqURL, err = url.Parse(c.HostURL + r.URL) + if err != nil { + return err + } + } + + // Adding Query Param + query := reqURL.Query() + for k, v := range c.QueryParam { + for _, iv := range v { + query.Add(k, iv) + } + } + + for k, v := range r.QueryParam { + // remove query param from client level by key + // since overrides happens for that key in the request + query.Del(k) + + for _, iv := range v { + query.Add(k, iv) + } + } + + reqURL.RawQuery = query.Encode() + r.URL = reqURL.String() + + return nil +} + +func parseRequestHeader(c *Client, r *Request) error { + hdr := http.Header{} + for k := range c.Header { + hdr.Set(k, c.Header.Get(k)) + } + for k := range r.Header { + hdr.Set(k, r.Header.Get(k)) + } + + if IsStringEmpty(hdr.Get(hdrUserAgentKey)) { + hdr.Set(hdrUserAgentKey, fmt.Sprintf(hdrUserAgentValue, Version)) + } + + if IsStringEmpty(hdr.Get(hdrAcceptKey)) && !IsStringEmpty(hdr.Get(hdrContentTypeKey)) { + hdr.Set(hdrAcceptKey, hdr.Get(hdrContentTypeKey)) + } + + r.Header = hdr + + return nil +} + +func parseRequestBody(c *Client, r *Request) (err error) { + if isPayloadSupported(r.Method, c.AllowGetMethodPayload) { + // Handling Multipart + if r.isMultiPart && !(r.Method == MethodPatch) { + if err = handleMultipart(c, r); err != nil { + return + } + + goto CL + } + + // Handling Form Data + if len(c.FormData) > 0 || len(r.FormData) > 0 { + handleFormData(c, r) + + goto CL + } + + // Handling Request body + if r.Body != nil { + handleContentType(c, r) + + if err = handleRequestBody(c, r); err != nil { + return + } + } + } else { + r.Header.Del(hdrContentTypeKey) + } + +CL: + // by default resty won't set content length, you can if you want to :) + if (c.setContentLength || r.setContentLength) && r.bodyBuf != nil { + r.Header.Set(hdrContentLengthKey, fmt.Sprintf("%d", r.bodyBuf.Len())) + } + + return +} + +func createHTTPRequest(c *Client, r *Request) (err error) { + if r.bodyBuf == nil { + r.RawRequest, err = http.NewRequest(r.Method, r.URL, nil) + } else { + r.RawRequest, err = http.NewRequest(r.Method, r.URL, r.bodyBuf) + } + + if err != nil { + return + } + + // Assign close connection option + r.RawRequest.Close = c.closeConnection + + // Add headers into http request + r.RawRequest.Header = r.Header + + // Add cookies into http request + for _, cookie := range c.Cookies { + r.RawRequest.AddCookie(cookie) + } + + // it's for non-http scheme option + if r.RawRequest.URL != nil && r.RawRequest.URL.Scheme == "" { + r.RawRequest.URL.Scheme = c.scheme + r.RawRequest.URL.Host = r.URL + } + + // Use context if it was specified + r.addContextIfAvailable() + + return +} + +func addCredentials(c *Client, r *Request) error { + var isBasicAuth bool + // Basic Auth + if r.UserInfo != nil { // takes precedence + r.RawRequest.SetBasicAuth(r.UserInfo.Username, r.UserInfo.Password) + isBasicAuth = true + } else if c.UserInfo != nil { + r.RawRequest.SetBasicAuth(c.UserInfo.Username, c.UserInfo.Password) + isBasicAuth = true + } + + if !c.DisableWarn { + if isBasicAuth && !strings.HasPrefix(r.URL, "https") { + c.Log.Println("WARNING - Using Basic Auth in HTTP mode is not secure.") + } + } + + // Token Auth + if !IsStringEmpty(r.Token) { // takes precedence + r.RawRequest.Header.Set(hdrAuthorizationKey, "Bearer "+r.Token) + } else if !IsStringEmpty(c.Token) { + r.RawRequest.Header.Set(hdrAuthorizationKey, "Bearer "+c.Token) + } + + return nil +} + +func requestLogger(c *Client, r *Request) error { + if c.Debug { + rr := r.RawRequest + c.Log.Println() + c.disableLogPrefix() + c.Log.Println("---------------------- REQUEST LOG -----------------------") + c.Log.Printf("%s %s %s\n", r.Method, rr.URL.RequestURI(), rr.Proto) + c.Log.Printf("HOST : %s", rr.URL.Host) + c.Log.Println("HEADERS:") + for h, v := range rr.Header { + c.Log.Printf("%25s: %v", h, strings.Join(v, ", ")) + } + c.Log.Printf("BODY :\n%v", r.fmtBodyString()) + c.Log.Println("----------------------------------------------------------") + c.enableLogPrefix() + } + + return nil +} + +// +// Response Middleware(s) +// + +func responseLogger(c *Client, res *Response) error { + if c.Debug { + c.Log.Println() + c.disableLogPrefix() + c.Log.Println("---------------------- RESPONSE LOG -----------------------") + c.Log.Printf("STATUS : %s", res.Status()) + c.Log.Printf("RECEIVED AT : %v", res.ReceivedAt()) + c.Log.Printf("RESPONSE TIME : %v", res.Time()) + c.Log.Println("HEADERS:") + for h, v := range res.Header() { + c.Log.Printf("%30s: %v", h, strings.Join(v, ", ")) + } + if res.Request.isSaveResponse { + c.Log.Printf("BODY :\n***** RESPONSE WRITTEN INTO FILE *****") + } else { + c.Log.Printf("BODY :\n%v", res.fmtBodyString()) + } + c.Log.Println("----------------------------------------------------------") + c.enableLogPrefix() + } + + return nil +} + +func parseResponseBody(c *Client, res *Response) (err error) { + // Handles only JSON or XML content type + ct := res.Header().Get(hdrContentTypeKey) + if IsJSONType(ct) || IsXMLType(ct) { + // Considered as Result + if res.StatusCode() > 199 && res.StatusCode() < 300 { + if res.Request.Result != nil { + err = Unmarshalc(c, ct, res.body, res.Request.Result) + return + } + } + + // Considered as Error + if res.StatusCode() > 399 { + // global error interface + if res.Request.Error == nil && c.Error != nil { + res.Request.Error = reflect.New(c.Error).Interface() + } + + if res.Request.Error != nil { + err = Unmarshalc(c, ct, res.body, res.Request.Error) + } + } + } + + return +} + +func handleMultipart(c *Client, r *Request) (err error) { + r.bodyBuf = getBuffer() + w := multipart.NewWriter(r.bodyBuf) + + for k, v := range c.FormData { + for _, iv := range v { + if err = w.WriteField(k, iv); err != nil { + return err + } + } + } + + for k, v := range r.FormData { + for _, iv := range v { + if strings.HasPrefix(k, "@") { // file + err = addFile(w, k[1:], iv) + if err != nil { + return + } + } else { // form value + if err = w.WriteField(k, iv); err != nil { + return err + } + } + } + } + + // #21 - adding io.Reader support + if len(r.multipartFiles) > 0 { + for _, f := range r.multipartFiles { + err = addFileReader(w, f) + if err != nil { + return + } + } + } + + r.Header.Set(hdrContentTypeKey, w.FormDataContentType()) + err = w.Close() + + return +} + +func handleFormData(c *Client, r *Request) { + formData := url.Values{} + + for k, v := range c.FormData { + for _, iv := range v { + formData.Add(k, iv) + } + } + + for k, v := range r.FormData { + // remove form data field from client level by key + // since overrides happens for that key in the request + formData.Del(k) + + for _, iv := range v { + formData.Add(k, iv) + } + } + + r.bodyBuf = bytes.NewBuffer([]byte(formData.Encode())) + r.Header.Set(hdrContentTypeKey, formContentType) + r.isFormData = true +} + +func handleContentType(c *Client, r *Request) { + contentType := r.Header.Get(hdrContentTypeKey) + if IsStringEmpty(contentType) { + contentType = DetectContentType(r.Body) + r.Header.Set(hdrContentTypeKey, contentType) + } +} + +func handleRequestBody(c *Client, r *Request) (err error) { + var bodyBytes []byte + contentType := r.Header.Get(hdrContentTypeKey) + kind := kindOf(r.Body) + r.bodyBuf = nil + + if reader, ok := r.Body.(io.Reader); ok { + r.bodyBuf = getBuffer() + _, err = r.bodyBuf.ReadFrom(reader) + } else if b, ok := r.Body.([]byte); ok { + bodyBytes = b + } else if s, ok := r.Body.(string); ok { + bodyBytes = []byte(s) + } else if IsJSONType(contentType) && + (kind == reflect.Struct || kind == reflect.Map || kind == reflect.Slice) { + bodyBytes, err = c.JSONMarshal(r.Body) + } else if IsXMLType(contentType) && (kind == reflect.Struct) { + bodyBytes, err = xml.Marshal(r.Body) + } + + if bodyBytes == nil && r.bodyBuf == nil { + err = errors.New("Unsupported 'Body' type/value") + } + + // if any errors during body bytes handling, return it + if err != nil { + return + } + + // []byte into Buffer + if bodyBytes != nil && r.bodyBuf == nil { + r.bodyBuf = bytes.NewBuffer(bodyBytes) + } + + return +} + +func saveResponseIntoFile(c *Client, res *Response) error { + if res.Request.isSaveResponse { + file := "" + + if len(c.outputDirectory) > 0 && !filepath.IsAbs(res.Request.outputFile) { + file += c.outputDirectory + string(filepath.Separator) + } + + file = filepath.Clean(file + res.Request.outputFile) + if err := createDirectory(filepath.Dir(file)); err != nil { + return err + } + + outFile, err := os.Create(file) + if err != nil { + return err + } + defer func() { + _ = outFile.Close() + }() + + // io.Copy reads maximum 32kb size, it is perfect for large file download too + defer func() { + _ = res.RawResponse.Body.Close() + }() + written, err := io.Copy(outFile, res.RawResponse.Body) + if err != nil { + return err + } + + res.size = written + } + + return nil +} diff --git a/vendor/github.com/go-resty/resty/redirect.go b/vendor/github.com/go-resty/resty/redirect.go new file mode 100644 index 000000000..69e095ecc --- /dev/null +++ b/vendor/github.com/go-resty/resty/redirect.go @@ -0,0 +1,99 @@ +// Copyright (c) 2015-2017 Jeevanandam M (jeeva@myjeeva.com), All rights reserved. +// resty source code and usage is governed by a MIT style +// license that can be found in the LICENSE file. + +package resty + +import ( + "errors" + "fmt" + "net" + "net/http" + "strings" +) + +type ( + // RedirectPolicy to regulate the redirects in the resty client. + // Objects implementing the RedirectPolicy interface can be registered as + // + // Apply function should return nil to continue the redirect jounery, otherwise + // return error to stop the redirect. + RedirectPolicy interface { + Apply(req *http.Request, via []*http.Request) error + } + + // The RedirectPolicyFunc type is an adapter to allow the use of ordinary functions as RedirectPolicy. + // If f is a function with the appropriate signature, RedirectPolicyFunc(f) is a RedirectPolicy object that calls f. + RedirectPolicyFunc func(*http.Request, []*http.Request) error +) + +// Apply calls f(req, via). +func (f RedirectPolicyFunc) Apply(req *http.Request, via []*http.Request) error { + return f(req, via) +} + +// NoRedirectPolicy is used to disable redirects in the HTTP client +// resty.SetRedirectPolicy(NoRedirectPolicy()) +func NoRedirectPolicy() RedirectPolicy { + return RedirectPolicyFunc(func(req *http.Request, via []*http.Request) error { + return errors.New("Auto redirect is disabled") + }) +} + +// FlexibleRedirectPolicy is convenient method to create No of redirect policy for HTTP client. +// resty.SetRedirectPolicy(FlexibleRedirectPolicy(20)) +func FlexibleRedirectPolicy(noOfRedirect int) RedirectPolicy { + return RedirectPolicyFunc(func(req *http.Request, via []*http.Request) error { + if len(via) >= noOfRedirect { + return fmt.Errorf("Stopped after %d redirects", noOfRedirect) + } + + checkHostAndAddHeaders(req, via[0]) + + return nil + }) +} + +// DomainCheckRedirectPolicy is convenient method to define domain name redirect rule in resty client. +// Redirect is allowed for only mentioned host in the policy. +// resty.SetRedirectPolicy(DomainCheckRedirectPolicy("host1.com", "host2.org", "host3.net")) +func DomainCheckRedirectPolicy(hostnames ...string) RedirectPolicy { + hosts := make(map[string]bool) + for _, h := range hostnames { + hosts[strings.ToLower(h)] = true + } + + fn := RedirectPolicyFunc(func(req *http.Request, via []*http.Request) error { + if ok := hosts[getHostname(req.URL.Host)]; !ok { + return errors.New("Redirect is not allowed as per DomainCheckRedirectPolicy") + } + + return nil + }) + + return fn +} + +func getHostname(host string) (hostname string) { + if strings.Index(host, ":") > 0 { + host, _, _ = net.SplitHostPort(host) + } + hostname = strings.ToLower(host) + return +} + +// By default Golang will not redirect request headers +// after go throughing various discussion commments from thread +// https://github.com/golang/go/issues/4800 +// go-resty will add all the headers during a redirect for the same host +func checkHostAndAddHeaders(cur *http.Request, pre *http.Request) { + curHostname := getHostname(cur.URL.Host) + preHostname := getHostname(pre.URL.Host) + if strings.EqualFold(curHostname, preHostname) { + for key, val := range pre.Header { + cur.Header[key] = val + } + } else { // only library User-Agent header is added + cur.Header.Set(hdrUserAgentKey, fmt.Sprintf(hdrUserAgentValue, Version)) + } +} diff --git a/vendor/github.com/go-resty/resty/request.go b/vendor/github.com/go-resty/resty/request.go new file mode 100644 index 000000000..7335cbccb --- /dev/null +++ b/vendor/github.com/go-resty/resty/request.go @@ -0,0 +1,496 @@ +// Copyright (c) 2015-2017 Jeevanandam M (jeeva@myjeeva.com), All rights reserved. +// resty source code and usage is governed by a MIT style +// license that can be found in the LICENSE file. + +package resty + +import ( + "encoding/base64" + "encoding/json" + "encoding/xml" + "fmt" + "io" + "net" + "net/url" + "reflect" + "strings" +) + +// SRVRecord holds the data to query the SRV record for the following service +type SRVRecord struct { + Service string + Domain string +} + +// SetHeader method is to set a single header field and its value in the current request. +// Example: To set `Content-Type` and `Accept` as `application/json`. +// resty.R(). +// SetHeader("Content-Type", "application/json"). +// SetHeader("Accept", "application/json") +// +// Also you can override header value, which was set at client instance level. +// +func (r *Request) SetHeader(header, value string) *Request { + r.Header.Set(header, value) + return r +} + +// SetHeaders method sets multiple headers field and its values at one go in the current request. +// Example: To set `Content-Type` and `Accept` as `application/json` +// +// resty.R(). +// SetHeaders(map[string]string{ +// "Content-Type": "application/json", +// "Accept": "application/json", +// }) +// Also you can override header value, which was set at client instance level. +// +func (r *Request) SetHeaders(headers map[string]string) *Request { + for h, v := range headers { + r.SetHeader(h, v) + } + + return r +} + +// SetQueryParam method sets single parameter and its value in the current request. +// It will be formed as query string for the request. +// Example: `search=kitchen%20papers&size=large` in the URL after `?` mark. +// resty.R(). +// SetQueryParam("search", "kitchen papers"). +// SetQueryParam("size", "large") +// Also you can override query params value, which was set at client instance level +// +func (r *Request) SetQueryParam(param, value string) *Request { + r.QueryParam.Set(param, value) + return r +} + +// SetQueryParams method sets multiple parameters and its values at one go in the current request. +// It will be formed as query string for the request. +// Example: `search=kitchen%20papers&size=large` in the URL after `?` mark. +// resty.R(). +// SetQueryParams(map[string]string{ +// "search": "kitchen papers", +// "size": "large", +// }) +// Also you can override query params value, which was set at client instance level +// +func (r *Request) SetQueryParams(params map[string]string) *Request { + for p, v := range params { + r.SetQueryParam(p, v) + } + + return r +} + +// SetMultiValueQueryParams method appends multiple parameters with multi-value +// at one go in the current request. It will be formed as query string for the request. +// Example: `status=pending&status=approved&status=open` in the URL after `?` mark. +// resty.R(). +// SetMultiValueQueryParams(url.Values{ +// "status": []string{"pending", "approved", "open"}, +// }) +// Also you can override query params value, which was set at client instance level +// +func (r *Request) SetMultiValueQueryParams(params url.Values) *Request { + for p, v := range params { + for _, pv := range v { + r.QueryParam.Add(p, pv) + } + } + + return r +} + +// SetQueryString method provides ability to use string as an input to set URL query string for the request. +// +// Using String as an input +// resty.R(). +// SetQueryString("productId=232&template=fresh-sample&cat=resty&source=google&kw=buy a lot more") +// +func (r *Request) SetQueryString(query string) *Request { + values, err := url.ParseQuery(strings.TrimSpace(query)) + if err == nil { + for k := range values { + r.QueryParam.Add(k, values.Get(k)) + } + } else { + r.client.Log.Printf("ERROR [%v]", err) + } + return r +} + +// SetFormData method sets Form parameters and their values in the current request. +// It's applicable only HTTP method `POST` and `PUT` and requests content type would be set as +// `application/x-www-form-urlencoded`. +// resty.R(). +// SetFormData(map[string]string{ +// "access_token": "BC594900-518B-4F7E-AC75-BD37F019E08F", +// "user_id": "3455454545", +// }) +// Also you can override form data value, which was set at client instance level +// +func (r *Request) SetFormData(data map[string]string) *Request { + for k, v := range data { + r.FormData.Set(k, v) + } + + return r +} + +// SetMultiValueFormData method appends multiple form parameters with multi-value +// at one go in the current request. +// resty.R(). +// SetMultiValueFormData(url.Values{ +// "search_criteria": []string{"book", "glass", "pencil"}, +// }) +// Also you can override form data value, which was set at client instance level +// +func (r *Request) SetMultiValueFormData(params url.Values) *Request { + for k, v := range params { + for _, kv := range v { + r.FormData.Add(k, kv) + } + } + + return r +} + +// SetBody method sets the request body for the request. It supports various realtime need easy. +// We can say its quite handy or powerful. Supported request body data types is `string`, `[]byte`, +// `struct` and `map`. Body value can be pointer or non-pointer. Automatic marshalling +// for JSON and XML content type, if it is `struct` or `map`. +// +// Example: +// +// Struct as a body input, based on content type, it will be marshalled. +// resty.R(). +// SetBody(User{ +// Username: "jeeva@myjeeva.com", +// Password: "welcome2resty", +// }) +// +// Map as a body input, based on content type, it will be marshalled. +// resty.R(). +// SetBody(map[string]interface{}{ +// "username": "jeeva@myjeeva.com", +// "password": "welcome2resty", +// "address": &Address{ +// Address1: "1111 This is my street", +// Address2: "Apt 201", +// City: "My City", +// State: "My State", +// ZipCode: 00000, +// }, +// }) +// +// String as a body input. Suitable for any need as a string input. +// resty.R(). +// SetBody(`{ +// "username": "jeeva@getrightcare.com", +// "password": "admin" +// }`) +// +// []byte as a body input. Suitable for raw request such as file upload, serialize & deserialize, etc. +// resty.R(). +// SetBody([]byte("This is my raw request, sent as-is")) +// +func (r *Request) SetBody(body interface{}) *Request { + r.Body = body + return r +} + +// SetResult method is to register the response `Result` object for automatic unmarshalling in the RESTful mode +// if response status code is between 200 and 299 and content type either JSON or XML. +// +// Note: Result object can be pointer or non-pointer. +// resty.R().SetResult(&AuthToken{}) +// // OR +// resty.R().SetResult(AuthToken{}) +// +// Accessing a result value +// response.Result().(*AuthToken) +// +func (r *Request) SetResult(res interface{}) *Request { + r.Result = getPointer(res) + return r +} + +// SetError method is to register the request `Error` object for automatic unmarshalling in the RESTful mode +// if response status code is greater than 399 and content type either JSON or XML. +// +// Note: Error object can be pointer or non-pointer. +// resty.R().SetError(&AuthError{}) +// // OR +// resty.R().SetError(AuthError{}) +// +// Accessing a error value +// response.Error().(*AuthError) +// +func (r *Request) SetError(err interface{}) *Request { + r.Error = getPointer(err) + return r +} + +// SetFile method is to set single file field name and its path for multipart upload. +// resty.R(). +// SetFile("my_file", "/Users/jeeva/Gas Bill - Sep.pdf") +// +func (r *Request) SetFile(param, filePath string) *Request { + r.isMultiPart = true + r.FormData.Set("@"+param, filePath) + + return r +} + +// SetFiles method is to set multiple file field name and its path for multipart upload. +// resty.R(). +// SetFiles(map[string]string{ +// "my_file1": "/Users/jeeva/Gas Bill - Sep.pdf", +// "my_file2": "/Users/jeeva/Electricity Bill - Sep.pdf", +// "my_file3": "/Users/jeeva/Water Bill - Sep.pdf", +// }) +// +func (r *Request) SetFiles(files map[string]string) *Request { + r.isMultiPart = true + + for f, fp := range files { + r.FormData.Set("@"+f, fp) + } + + return r +} + +// SetFileReader method is to set single file using io.Reader for multipart upload. +// resty.R(). +// SetFileReader("profile_img", "my-profile-img.png", bytes.NewReader(profileImgBytes)). +// SetFileReader("notes", "user-notes.txt", bytes.NewReader(notesBytes)) +// +func (r *Request) SetFileReader(param, fileName string, reader io.Reader) *Request { + r.isMultiPart = true + + r.multipartFiles = append(r.multipartFiles, &File{ + Name: fileName, + ParamName: param, + Reader: reader, + }) + + return r +} + +// SetContentLength method sets the HTTP header `Content-Length` value for current request. +// By default go-resty won't set `Content-Length`. Also you have an option to enable for every +// request. See `resty.SetContentLength` +// resty.R().SetContentLength(true) +// +func (r *Request) SetContentLength(l bool) *Request { + r.setContentLength = true + + return r +} + +// SetBasicAuth method sets the basic authentication header in the current HTTP request. +// For Header example: +// Authorization: Basic +// +// To set the header for username "go-resty" and password "welcome" +// resty.R().SetBasicAuth("go-resty", "welcome") +// +// This method overrides the credentials set by method `resty.SetBasicAuth`. +// +func (r *Request) SetBasicAuth(username, password string) *Request { + r.UserInfo = &User{Username: username, Password: password} + return r +} + +// SetAuthToken method sets bearer auth token header in the current HTTP request. Header example: +// Authorization: Bearer +// +// Example: To set auth token BC594900518B4F7EAC75BD37F019E08FBC594900518B4F7EAC75BD37F019E08F +// +// resty.R().SetAuthToken("BC594900518B4F7EAC75BD37F019E08FBC594900518B4F7EAC75BD37F019E08F") +// +// This method overrides the Auth token set by method `resty.SetAuthToken`. +// +func (r *Request) SetAuthToken(token string) *Request { + r.Token = token + return r +} + +// SetOutput method sets the output file for current HTTP request. Current HTTP response will be +// saved into given file. It is similar to `curl -o` flag. Absolute path or relative path can be used. +// If is it relative path then output file goes under the output directory, as mentioned +// in the `Client.SetOutputDirectory`. +// resty.R(). +// SetOutput("/Users/jeeva/Downloads/ReplyWithHeader-v5.1-beta.zip"). +// Get("http://bit.ly/1LouEKr") +// +// Note: In this scenario `Response.Body` might be nil. +func (r *Request) SetOutput(file string) *Request { + r.outputFile = file + r.isSaveResponse = true + return r +} + +// SetSRV method sets the details to query the service SRV record and execute the +// request. +// resty.R(). +// SetSRV(SRVRecord{"web", "testservice.com"}). +// Get("/get") +func (r *Request) SetSRV(srv *SRVRecord) *Request { + r.SRV = srv + return r +} + +// +// HTTP verb method starts here +// + +// Get method does GET HTTP request. It's defined in section 4.3.1 of RFC7231. +func (r *Request) Get(url string) (*Response, error) { + return r.Execute(MethodGet, url) +} + +// Head method does HEAD HTTP request. It's defined in section 4.3.2 of RFC7231. +func (r *Request) Head(url string) (*Response, error) { + return r.Execute(MethodHead, url) +} + +// Post method does POST HTTP request. It's defined in section 4.3.3 of RFC7231. +func (r *Request) Post(url string) (*Response, error) { + return r.Execute(MethodPost, url) +} + +// Put method does PUT HTTP request. It's defined in section 4.3.4 of RFC7231. +func (r *Request) Put(url string) (*Response, error) { + return r.Execute(MethodPut, url) +} + +// Delete method does DELETE HTTP request. It's defined in section 4.3.5 of RFC7231. +func (r *Request) Delete(url string) (*Response, error) { + return r.Execute(MethodDelete, url) +} + +// Options method does OPTIONS HTTP request. It's defined in section 4.3.7 of RFC7231. +func (r *Request) Options(url string) (*Response, error) { + return r.Execute(MethodOptions, url) +} + +// Patch method does PATCH HTTP request. It's defined in section 2 of RFC5789. +func (r *Request) Patch(url string) (*Response, error) { + return r.Execute(MethodPatch, url) +} + +// Execute method performs the HTTP request with given HTTP method and URL +// for current `Request`. +// resp, err := resty.R().Execute(resty.GET, "http://httpbin.org/get") +// +func (r *Request) Execute(method, url string) (*Response, error) { + var addrs []*net.SRV + var err error + + if r.isMultiPart && !(method == MethodPost || method == MethodPut) { + return nil, fmt.Errorf("Multipart content is not allowed in HTTP verb [%v]", method) + } + + if r.SRV != nil { + _, addrs, err = net.LookupSRV(r.SRV.Service, "tcp", r.SRV.Domain) + if err != nil { + return nil, err + } + } + + r.Method = method + r.URL = r.selectAddr(addrs, url, 0) + + if r.client.RetryCount == 0 { + return r.client.execute(r) + } + + var resp *Response + attempt := 0 + _ = Backoff( + func() (*Response, error) { + attempt++ + + r.URL = r.selectAddr(addrs, url, attempt) + + resp, err = r.client.execute(r) + if err != nil { + r.client.Log.Printf("ERROR [%v] Attempt [%v]", err, attempt) + if r.isContextCancelledIfAvailable() { + // stop Backoff from retrying request if request has been + // canceled by context + return resp, nil + } + } + + return resp, err + }, + Retries(r.client.RetryCount), + WaitTime(r.client.RetryWaitTime), + MaxWaitTime(r.client.RetryMaxWaitTime), + RetryConditions(r.client.RetryConditions), + ) + + return resp, err +} + +func (r *Request) fmtBodyString() (body string) { + body = "***** NO CONTENT *****" + if isPayloadSupported(r.Method, r.client.AllowGetMethodPayload) { + // multipart or form-data + if r.isMultiPart || r.isFormData { + body = string(r.bodyBuf.Bytes()) + return + } + + // request body data + if r.Body == nil { + return + } + var prtBodyBytes []byte + var err error + + contentType := r.Header.Get(hdrContentTypeKey) + kind := kindOf(r.Body) + if canJSONMarshal(contentType, kind) { + prtBodyBytes, err = json.MarshalIndent(&r.Body, "", " ") + } else if IsXMLType(contentType) && (kind == reflect.Struct) { + prtBodyBytes, err = xml.MarshalIndent(&r.Body, "", " ") + } else if b, ok := r.Body.(string); ok { + if IsJSONType(contentType) { + bodyBytes := []byte(b) + out := getBuffer() + defer putBuffer(out) + if err = json.Indent(out, bodyBytes, "", " "); err == nil { + prtBodyBytes = out.Bytes() + } + } else { + body = b + return + } + } else if b, ok := r.Body.([]byte); ok { + body = base64.StdEncoding.EncodeToString(b) + } + + if prtBodyBytes != nil && err == nil { + body = string(prtBodyBytes) + } + } + + return +} + +func (r *Request) selectAddr(addrs []*net.SRV, path string, attempt int) string { + if addrs == nil { + return path + } + + idx := attempt % len(addrs) + domain := strings.TrimRight(addrs[idx].Target, ".") + path = strings.TrimLeft(path, "/") + + return fmt.Sprintf("%s://%s:%d/%s", r.client.scheme, domain, addrs[idx].Port, path) +} diff --git a/vendor/github.com/go-resty/resty/request16.go b/vendor/github.com/go-resty/resty/request16.go new file mode 100644 index 000000000..cc7700ee2 --- /dev/null +++ b/vendor/github.com/go-resty/resty/request16.go @@ -0,0 +1,55 @@ +// +build !go1.7 + +// Copyright (c) 2015-2016 Jeevanandam M (jeeva@myjeeva.com) +// 2016 Andrew Grigorev (https://github.com/ei-grad) +// All rights reserved. +// resty source code and usage is governed by a MIT style +// license that can be found in the LICENSE file. + +package resty + +import ( + "bytes" + "net/http" + "net/url" + "time" +) + +// Request type is used to compose and send individual request from client +// go-resty is provide option override client level settings such as +// Auth Token, Basic Auth credentials, Header, Query Param, Form Data, Error object +// and also you can add more options for that particular request +// +type Request struct { + URL string + Method string + QueryParam url.Values + FormData url.Values + Header http.Header + UserInfo *User + Token string + Body interface{} + Result interface{} + Error interface{} + Time time.Time + RawRequest *http.Request + SRV *SRVRecord + + client *Client + bodyBuf *bytes.Buffer + isMultiPart bool + isFormData bool + setContentLength bool + isSaveResponse bool + outputFile string + multipartFiles []*File +} + +func (r *Request) addContextIfAvailable() { + // nothing to do for golang<1.7 +} + +func (r *Request) isContextCancelledIfAvailable() bool { + // just always return false golang<1.7 + return false +} diff --git a/vendor/github.com/go-resty/resty/request17.go b/vendor/github.com/go-resty/resty/request17.go new file mode 100644 index 000000000..21105a35a --- /dev/null +++ b/vendor/github.com/go-resty/resty/request17.go @@ -0,0 +1,72 @@ +// +build go1.7 go1.8 + +// Copyright (c) 2015-2016 Jeevanandam M (jeeva@myjeeva.com) +// 2016 Andrew Grigorev (https://github.com/ei-grad) +// All rights reserved. +// resty source code and usage is governed by a MIT style +// license that can be found in the LICENSE file. + +package resty + +import ( + "bytes" + "context" + "net/http" + "net/url" + "time" +) + +// Request type is used to compose and send individual request from client +// go-resty is provide option override client level settings such as +// Auth Token, Basic Auth credentials, Header, Query Param, Form Data, Error object +// and also you can add more options for that particular request +// +type Request struct { + URL string + Method string + QueryParam url.Values + FormData url.Values + Header http.Header + UserInfo *User + Token string + Body interface{} + Result interface{} + Error interface{} + Time time.Time + RawRequest *http.Request + SRV *SRVRecord + + client *Client + bodyBuf *bytes.Buffer + isMultiPart bool + isFormData bool + setContentLength bool + isSaveResponse bool + outputFile string + multipartFiles []*File + ctx context.Context +} + +// SetContext method sets the context.Context for current Request. It allows +// to interrupt the request execution if ctx.Done() channel is closed. +// See https://blog.golang.org/context article and the "context" package +// documentation. +func (r *Request) SetContext(ctx context.Context) *Request { + r.ctx = ctx + return r +} + +func (r *Request) addContextIfAvailable() { + if r.ctx != nil { + r.RawRequest = r.RawRequest.WithContext(r.ctx) + } +} + +func (r *Request) isContextCancelledIfAvailable() bool { + if r.ctx != nil { + if r.ctx.Err() != nil { + return true + } + } + return false +} diff --git a/vendor/github.com/go-resty/resty/response.go b/vendor/github.com/go-resty/resty/response.go new file mode 100644 index 000000000..f522486fa --- /dev/null +++ b/vendor/github.com/go-resty/resty/response.go @@ -0,0 +1,125 @@ +// Copyright (c) 2015-2017 Jeevanandam M (jeeva@myjeeva.com), All rights reserved. +// resty source code and usage is governed by a MIT style +// license that can be found in the LICENSE file. + +package resty + +import ( + "encoding/json" + "net/http" + "strings" + "time" +) + +// Response is an object represents executed request and its values. +type Response struct { + Request *Request + RawResponse *http.Response + + body []byte + size int64 + receivedAt time.Time +} + +// Body method returns HTTP response as []byte array for the executed request. +// Note: `Response.Body` might be nil, if `Request.SetOutput` is used. +func (r *Response) Body() []byte { + if r.RawResponse == nil { + return []byte{} + } + return r.body +} + +// Status method returns the HTTP status string for the executed request. +// Example: 200 OK +func (r *Response) Status() string { + if r.RawResponse == nil { + return "" + } + + return r.RawResponse.Status +} + +// StatusCode method returns the HTTP status code for the executed request. +// Example: 200 +func (r *Response) StatusCode() int { + if r.RawResponse == nil { + return 0 + } + + return r.RawResponse.StatusCode +} + +// Result method returns the response value as an object if it has one +func (r *Response) Result() interface{} { + return r.Request.Result +} + +// Error method returns the error object if it has one +func (r *Response) Error() interface{} { + return r.Request.Error +} + +// Header method returns the response headers +func (r *Response) Header() http.Header { + if r.RawResponse == nil { + return http.Header{} + } + + return r.RawResponse.Header +} + +// Cookies method to access all the response cookies +func (r *Response) Cookies() []*http.Cookie { + if r.RawResponse == nil { + return make([]*http.Cookie, 0) + } + + return r.RawResponse.Cookies() +} + +// String method returns the body of the server response as String. +func (r *Response) String() string { + if r.body == nil { + return "" + } + + return strings.TrimSpace(string(r.body)) +} + +// Time method returns the time of HTTP response time that from request we sent and received a request. +// See `response.ReceivedAt` to know when client recevied response and see `response.Request.Time` to know +// when client sent a request. +func (r *Response) Time() time.Duration { + return r.receivedAt.Sub(r.Request.Time) +} + +// ReceivedAt method returns when response got recevied from server for the request. +func (r *Response) ReceivedAt() time.Time { + return r.receivedAt +} + +// Size method returns the HTTP response size in bytes. Ya, you can relay on HTTP `Content-Length` header, +// however it won't be good for chucked transfer/compressed response. Since Resty calculates response size +// at the client end. You will get actual size of the http response. +func (r *Response) Size() int64 { + return r.size +} + +func (r *Response) fmtBodyString() string { + bodyStr := "***** NO CONTENT *****" + if r.body != nil { + ct := r.Header().Get(hdrContentTypeKey) + if IsJSONType(ct) { + out := getBuffer() + defer putBuffer(out) + if err := json.Indent(out, r.body, "", " "); err == nil { + bodyStr = string(out.Bytes()) + } + } else { + bodyStr = r.String() + } + } + + return bodyStr +} diff --git a/vendor/github.com/go-resty/resty/resty.go b/vendor/github.com/go-resty/resty/resty.go new file mode 100644 index 000000000..2605d905c --- /dev/null +++ b/vendor/github.com/go-resty/resty/resty.go @@ -0,0 +1,9 @@ +// Copyright (c) 2015-2017 Jeevanandam M (jeeva@myjeeva.com), All rights reserved. +// resty source code and usage is governed by a MIT style +// license that can be found in the LICENSE file. + +// Package resty provides simple HTTP and REST client for Go inspired by Ruby rest-client. +package resty + +// Version # of resty +const Version = "0.13" diff --git a/vendor/github.com/go-resty/resty/resty_test.go b/vendor/github.com/go-resty/resty/resty_test.go new file mode 100644 index 000000000..2b472b535 --- /dev/null +++ b/vendor/github.com/go-resty/resty/resty_test.go @@ -0,0 +1,1639 @@ +// Copyright (c) 2015-2017 Jeevanandam M (jeeva@myjeeva.com), All rights reserved. +// resty source code and usage is governed by a MIT style +// license that can be found in the LICENSE file. + +package resty + +import ( + "bytes" + "crypto/tls" + "encoding/base64" + "encoding/json" + "encoding/xml" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/http/httptest" + "net/url" + "os" + "path/filepath" + "reflect" + "strconv" + "strings" + "sync/atomic" + "testing" + "time" +) + +type AuthSuccess struct { + ID, Message string +} + +type AuthError struct { + ID, Message string +} + +func TestGet(t *testing.T) { + ts := createGetServer(t) + defer ts.Close() + + resp, err := R(). + SetQueryParam("request_no", strconv.FormatInt(time.Now().Unix(), 10)). + Get(ts.URL + "/") + + assertError(t, err) + assertEqual(t, http.StatusOK, resp.StatusCode()) + assertEqual(t, "200 OK", resp.Status()) + assertEqual(t, true, resp.Body() != nil) + assertEqual(t, "TestGet: text response", resp.String()) + + logResponse(t, resp) +} + +func TestGetCustomUserAgent(t *testing.T) { + ts := createGetServer(t) + defer ts.Close() + + resp, err := dcr(). + SetHeader(hdrUserAgentKey, "Test Custom User agent"). + SetQueryParam("request_no", strconv.FormatInt(time.Now().Unix(), 10)). + Get(ts.URL + "/") + + assertError(t, err) + assertEqual(t, http.StatusOK, resp.StatusCode()) + assertEqual(t, "200 OK", resp.Status()) + assertEqual(t, "TestGet: text response", resp.String()) + + logResponse(t, resp) +} + +func TestGetClientParamRequestParam(t *testing.T) { + ts := createGetServer(t) + defer ts.Close() + + c := dc() + c.SetQueryParam("client_param", "true"). + SetQueryParams(map[string]string{"req_1": "jeeva", "req_3": "jeeva3"}). + SetDebug(true). + SetLogger(ioutil.Discard) + + resp, err := c.R(). + SetQueryParams(map[string]string{"req_1": "req 1 value", "req_2": "req 2 value"}). + SetQueryParam("request_no", strconv.FormatInt(time.Now().Unix(), 10)). + SetHeader(hdrUserAgentKey, "Test Custom User agent"). + Get(ts.URL + "/") + + assertError(t, err) + assertEqual(t, http.StatusOK, resp.StatusCode()) + assertEqual(t, "200 OK", resp.Status()) + assertEqual(t, "TestGet: text response", resp.String()) + + logResponse(t, resp) +} + +func TestGetRelativePath(t *testing.T) { + ts := createGetServer(t) + defer ts.Close() + + c := dc() + c.SetHostURL(ts.URL) + + resp, err := c.R().Get("mypage2") + + assertError(t, err) + assertEqual(t, http.StatusOK, resp.StatusCode()) + assertEqual(t, "TestGet: text response from mypage2", resp.String()) + + logResponse(t, resp) +} + +func TestGet400Error(t *testing.T) { + ts := createGetServer(t) + defer ts.Close() + + resp, err := dcr().Get(ts.URL + "/mypage") + + assertError(t, err) + assertEqual(t, http.StatusBadRequest, resp.StatusCode()) + assertEqual(t, "", resp.String()) + + logResponse(t, resp) +} + +func TestPostJSONStringSuccess(t *testing.T) { + ts := createPostServer(t) + defer ts.Close() + + c := dc() + c.SetHeader(hdrContentTypeKey, jsonContentType). + SetHeaders(map[string]string{hdrUserAgentKey: "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_5) go-resty v0.1", hdrAcceptKey: jsonContentType}) + + resp, err := c.R(). + SetBody(`{"username":"testuser", "password":"testpass"}`). + Post(ts.URL + "/login") + + assertError(t, err) + assertEqual(t, http.StatusOK, resp.StatusCode()) + + logResponse(t, resp) + + // PostJSONStringError + resp, err = c.R(). + SetBody(`{"username":"testuser" "password":"testpass"}`). + Post(ts.URL + "/login") + + assertError(t, err) + assertEqual(t, http.StatusBadRequest, resp.StatusCode()) + + logResponse(t, resp) +} + +func TestPostJSONBytesSuccess(t *testing.T) { + ts := createPostServer(t) + defer ts.Close() + + c := dc() + c.SetHeader(hdrContentTypeKey, jsonContentType). + SetHeaders(map[string]string{hdrUserAgentKey: "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_5) go-resty v0.7", hdrAcceptKey: jsonContentType}) + + resp, err := c.R(). + SetBody([]byte(`{"username":"testuser", "password":"testpass"}`)). + Post(ts.URL + "/login") + + assertError(t, err) + assertEqual(t, http.StatusOK, resp.StatusCode()) + + logResponse(t, resp) +} + +func TestPostJSONBytesIoReader(t *testing.T) { + ts := createPostServer(t) + defer ts.Close() + + c := dc() + c.SetHeader(hdrContentTypeKey, jsonContentType) + + bodyBytes := []byte(`{"username":"testuser", "password":"testpass"}`) + + resp, err := c.R(). + SetBody(bytes.NewReader(bodyBytes)). + Post(ts.URL + "/login") + + assertError(t, err) + assertEqual(t, http.StatusOK, resp.StatusCode()) + + logResponse(t, resp) +} + +func TestPostJSONStructSuccess(t *testing.T) { + ts := createPostServer(t) + defer ts.Close() + + user := &User{Username: "testuser", Password: "testpass"} + + c := dc() + resp, err := c.R(). + SetHeader(hdrContentTypeKey, jsonContentType). + SetBody(user). + SetResult(&AuthSuccess{}). + Post(ts.URL + "/login") + + assertError(t, err) + assertEqual(t, http.StatusOK, resp.StatusCode()) + + t.Logf("Result Success: %q", resp.Result().(*AuthSuccess)) + + logResponse(t, resp) +} + +func TestPostJSONStructInvalidLogin(t *testing.T) { + ts := createPostServer(t) + defer ts.Close() + + c := dc() + c.SetDebug(false) + + resp, err := c.R(). + SetHeader(hdrContentTypeKey, jsonContentType). + SetBody(User{Username: "testuser", Password: "testpass1"}). + SetError(AuthError{}). + Post(ts.URL + "/login") + + assertError(t, err) + assertEqual(t, http.StatusUnauthorized, resp.StatusCode()) + assertEqual(t, resp.Header().Get("Www-Authenticate"), "Protected Realm") + + t.Logf("Result Error: %q", resp.Error().(*AuthError)) + + logResponse(t, resp) +} + +func TestPostJSONMapSuccess(t *testing.T) { + ts := createPostServer(t) + defer ts.Close() + + c := dc() + c.SetDebug(false) + + resp, err := c.R(). + SetBody(map[string]interface{}{"username": "testuser", "password": "testpass"}). + SetResult(AuthSuccess{}). + Post(ts.URL + "/login") + + assertError(t, err) + assertEqual(t, http.StatusOK, resp.StatusCode()) + + t.Logf("Result Success: %q", resp.Result().(*AuthSuccess)) + + logResponse(t, resp) +} + +func TestPostJSONMapInvalidResponseJson(t *testing.T) { + ts := createPostServer(t) + defer ts.Close() + + resp, err := dclr(). + SetBody(map[string]interface{}{"username": "testuser", "password": "invalidjson"}). + SetResult(&AuthSuccess{}). + Post(ts.URL + "/login") + + assertEqual(t, "invalid character '}' looking for beginning of object key string", err.Error()) + assertEqual(t, http.StatusOK, resp.StatusCode()) + + t.Logf("Result Success: %q", resp.Result().(*AuthSuccess)) + + logResponse(t, resp) +} + +func TestPostXMLStringSuccess(t *testing.T) { + ts := createPostServer(t) + defer ts.Close() + + c := dc() + c.SetDebug(false) + + resp, err := c.R(). + SetHeader(hdrContentTypeKey, "application/xml"). + SetBody(`testusertestpass`). + SetQueryParam("request_no", strconv.FormatInt(time.Now().Unix(), 10)). + Post(ts.URL + "/login") + + assertError(t, err) + assertEqual(t, http.StatusOK, resp.StatusCode()) + + logResponse(t, resp) +} + +func TestPostXMLStringError(t *testing.T) { + ts := createPostServer(t) + defer ts.Close() + + resp, err := dclr(). + SetHeader(hdrContentTypeKey, "application/xml"). + SetBody(`testusertestpass`). + Post(ts.URL + "/login") + + assertError(t, err) + assertEqual(t, http.StatusBadRequest, resp.StatusCode()) + assertEqual(t, `bad_requestUnable to read user info`, resp.String()) + + logResponse(t, resp) +} + +func TestPostXMLBytesSuccess(t *testing.T) { + ts := createPostServer(t) + defer ts.Close() + + c := dc() + c.SetDebug(false) + + resp, err := c.R(). + SetHeader(hdrContentTypeKey, "application/xml"). + SetBody([]byte(`testusertestpass`)). + SetQueryParam("request_no", strconv.FormatInt(time.Now().Unix(), 10)). + SetContentLength(true). + Post(ts.URL + "/login") + + assertError(t, err) + assertEqual(t, http.StatusOK, resp.StatusCode()) + + logResponse(t, resp) +} + +func TestPostXMLStructSuccess(t *testing.T) { + ts := createPostServer(t) + defer ts.Close() + + resp, err := dclr(). + SetHeader(hdrContentTypeKey, "application/xml"). + SetBody(User{Username: "testuser", Password: "testpass"}). + SetContentLength(true). + SetResult(&AuthSuccess{}). + Post(ts.URL + "/login") + + assertError(t, err) + assertEqual(t, http.StatusOK, resp.StatusCode()) + + t.Logf("Result Success: %q", resp.Result().(*AuthSuccess)) + + logResponse(t, resp) +} + +func TestPostXMLStructInvalidLogin(t *testing.T) { + ts := createPostServer(t) + defer ts.Close() + + c := dc() + c.SetError(&AuthError{}) + + resp, err := c.R(). + SetHeader(hdrContentTypeKey, "application/xml"). + SetBody(User{Username: "testuser", Password: "testpass1"}). + Post(ts.URL + "/login") + + assertError(t, err) + assertEqual(t, http.StatusUnauthorized, resp.StatusCode()) + assertEqual(t, resp.Header().Get("Www-Authenticate"), "Protected Realm") + + t.Logf("Result Error: %q", resp.Error().(*AuthError)) + + logResponse(t, resp) +} + +func TestPostXMLStructInvalidResponseXml(t *testing.T) { + ts := createPostServer(t) + defer ts.Close() + + resp, err := dclr(). + SetHeader(hdrContentTypeKey, "application/xml"). + SetBody(User{Username: "testuser", Password: "invalidxml"}). + SetResult(&AuthSuccess{}). + Post(ts.URL + "/login") + + assertEqual(t, "XML syntax error on line 1: element closed by ", err.Error()) + assertEqual(t, http.StatusOK, resp.StatusCode()) + + t.Logf("Result Success: %q", resp.Result().(*AuthSuccess)) + + logResponse(t, resp) +} + +func TestPostXMLMapNotSupported(t *testing.T) { + ts := createPostServer(t) + defer ts.Close() + + _, err := dclr(). + SetHeader(hdrContentTypeKey, "application/xml"). + SetBody(map[string]interface{}{"Username": "testuser", "Password": "testpass"}). + Post(ts.URL + "/login") + + assertEqual(t, "Unsupported 'Body' type/value", err.Error()) +} + +func TestRequestBasicAuth(t *testing.T) { + ts := createAuthServer(t) + defer ts.Close() + + c := dc() + c.SetHostURL(ts.URL). + SetTLSClientConfig(&tls.Config{InsecureSkipVerify: true}) + + resp, err := c.R(). + SetBasicAuth("myuser", "basicauth"). + SetResult(&AuthSuccess{}). + Post("/login") + + assertError(t, err) + assertEqual(t, http.StatusOK, resp.StatusCode()) + + t.Logf("Result Success: %q", resp.Result().(*AuthSuccess)) + logResponse(t, resp) +} + +func TestRequestBasicAuthFail(t *testing.T) { + ts := createAuthServer(t) + defer ts.Close() + + c := dc() + c.SetTLSClientConfig(&tls.Config{InsecureSkipVerify: true}). + SetError(AuthError{}) + + resp, err := c.R(). + SetBasicAuth("myuser", "basicauth1"). + Post(ts.URL + "/login") + + assertError(t, err) + assertEqual(t, http.StatusUnauthorized, resp.StatusCode()) + + t.Logf("Result Error: %q", resp.Error().(*AuthError)) + logResponse(t, resp) +} + +func TestRequestAuthToken(t *testing.T) { + ts := createAuthServer(t) + defer ts.Close() + + c := dc() + c.SetTLSClientConfig(&tls.Config{InsecureSkipVerify: true}). + SetAuthToken("004DDB79-6801-4587-B976-F093E6AC44FF") + + resp, err := c.R(). + SetAuthToken("004DDB79-6801-4587-B976-F093E6AC44FF-Request"). + Get(ts.URL + "/profile") + + assertError(t, err) + assertEqual(t, http.StatusOK, resp.StatusCode()) +} + +func TestFormData(t *testing.T) { + ts := createFormPostServer(t) + defer ts.Close() + + c := dc() + c.SetFormData(map[string]string{"zip_code": "00000", "city": "Los Angeles"}). + SetContentLength(true). + SetDebug(true). + SetLogger(ioutil.Discard) + + resp, err := c.R(). + SetFormData(map[string]string{"first_name": "Jeevanandam", "last_name": "M", "zip_code": "00001"}). + SetBasicAuth("myuser", "mypass"). + Post(ts.URL + "/profile") + + assertError(t, err) + assertEqual(t, http.StatusOK, resp.StatusCode()) + assertEqual(t, "Success", resp.String()) +} + +func TestMultiValueFormData(t *testing.T) { + ts := createFormPostServer(t) + defer ts.Close() + + v := url.Values{ + "search_criteria": []string{"book", "glass", "pencil"}, + } + + c := dc() + c.SetContentLength(true). + SetDebug(true). + SetLogger(ioutil.Discard) + + resp, err := c.R(). + SetMultiValueFormData(v). + Post(ts.URL + "/search") + + assertError(t, err) + assertEqual(t, http.StatusOK, resp.StatusCode()) + assertEqual(t, "Success", resp.String()) +} + +func TestFormDataDisableWarn(t *testing.T) { + ts := createFormPostServer(t) + defer ts.Close() + + c := dc() + c.SetFormData(map[string]string{"zip_code": "00000", "city": "Los Angeles"}). + SetContentLength(true). + SetDebug(true). + SetLogger(ioutil.Discard). + SetDisableWarn(true) + + resp, err := c.R(). + SetFormData(map[string]string{"first_name": "Jeevanandam", "last_name": "M", "zip_code": "00001"}). + SetBasicAuth("myuser", "mypass"). + Post(ts.URL + "/profile") + + assertError(t, err) + assertEqual(t, http.StatusOK, resp.StatusCode()) + assertEqual(t, "Success", resp.String()) +} + +func TestMultiPartUploadFile(t *testing.T) { + ts := createFormPostServer(t) + defer ts.Close() + defer cleaupFiles("test-data/upload") + + basePath := getTestDataPath() + + c := dc() + c.SetFormData(map[string]string{"zip_code": "00001", "city": "Los Angeles"}) + + resp, err := c.R(). + SetFile("profile_img", basePath+"/test-img.png"). + SetContentLength(true). + Post(ts.URL + "/upload") + + assertError(t, err) + assertEqual(t, http.StatusOK, resp.StatusCode()) +} + +func TestMultiPartUploadFileError(t *testing.T) { + ts := createFormPostServer(t) + defer ts.Close() + defer cleaupFiles("test-data/upload") + + basePath := getTestDataPath() + + c := dc() + c.SetFormData(map[string]string{"zip_code": "00001", "city": "Los Angeles"}) + + resp, err := c.R(). + SetFile("profile_img", basePath+"/test-img-not-exists.png"). + Post(ts.URL + "/upload") + + if err == nil { + t.Errorf("Expected [%v], got [%v]", nil, err) + } + if resp != nil { + t.Errorf("Expected [%v], got [%v]", nil, resp) + } +} + +func TestMultiPartUploadFiles(t *testing.T) { + ts := createFormPostServer(t) + defer ts.Close() + defer cleaupFiles("test-data/upload") + + basePath := getTestDataPath() + + resp, err := dclr(). + SetFormData(map[string]string{"first_name": "Jeevanandam", "last_name": "M"}). + SetFiles(map[string]string{"profile_img": basePath + "/test-img.png", "notes": basePath + "/text-file.txt"}). + Post(ts.URL + "/upload") + + responseStr := resp.String() + + assertError(t, err) + assertEqual(t, http.StatusOK, resp.StatusCode()) + assertEqual(t, true, strings.Contains(responseStr, "test-img.png")) + assertEqual(t, true, strings.Contains(responseStr, "text-file.txt")) +} + +func TestMultiPartIoReaderFiles(t *testing.T) { + ts := createFormPostServer(t) + defer ts.Close() + defer cleaupFiles("test-data/upload") + + basePath := getTestDataPath() + profileImgBytes, _ := ioutil.ReadFile(basePath + "/test-img.png") + notesBytes, _ := ioutil.ReadFile(basePath + "/text-file.txt") + + // Just info values + file := File{ + Name: "test_file_name.jpg", + ParamName: "test_param", + Reader: bytes.NewBuffer([]byte("test bytes")), + } + t.Logf("File Info: %v", file.String()) + + resp, err := dclr(). + SetFormData(map[string]string{"first_name": "Jeevanandam", "last_name": "M"}). + SetFileReader("profile_img", "test-img.png", bytes.NewReader(profileImgBytes)). + SetFileReader("notes", "text-file.txt", bytes.NewReader(notesBytes)). + Post(ts.URL + "/upload") + + responseStr := resp.String() + + assertError(t, err) + assertEqual(t, http.StatusOK, resp.StatusCode()) + assertEqual(t, true, strings.Contains(responseStr, "test-img.png")) + assertEqual(t, true, strings.Contains(responseStr, "text-file.txt")) +} + +func TestMultiPartUploadFileNotOnGetOrDelete(t *testing.T) { + ts := createFormPostServer(t) + defer ts.Close() + defer cleaupFiles("test-data/upload") + + basePath := getTestDataPath() + + _, err := dclr(). + SetFile("profile_img", basePath+"/test-img.png"). + Get(ts.URL + "/upload") + + assertEqual(t, "Multipart content is not allowed in HTTP verb [GET]", err.Error()) + + _, err = dclr(). + SetFile("profile_img", basePath+"/test-img.png"). + Delete(ts.URL + "/upload") + + assertEqual(t, "Multipart content is not allowed in HTTP verb [DELETE]", err.Error()) +} + +func TestGetWithCookie(t *testing.T) { + ts := createGetServer(t) + defer ts.Close() + + c := dc() + c.SetHostURL(ts.URL) + c.SetCookie(&http.Cookie{ + Name: "go-resty-1", + Value: "This is cookie 1 value", + Path: "/", + Domain: "localhost", + MaxAge: 36000, + HttpOnly: true, + Secure: false, + }) + + resp, err := c.R().Get("mypage2") + + assertError(t, err) + assertEqual(t, http.StatusOK, resp.StatusCode()) + assertEqual(t, "TestGet: text response from mypage2", resp.String()) + + logResponse(t, resp) +} + +func TestGetWithCookies(t *testing.T) { + ts := createGetServer(t) + defer ts.Close() + + var cookies []*http.Cookie + + cookies = append(cookies, &http.Cookie{ + Name: "go-resty-1", + Value: "This is cookie 1 value", + Path: "/", + Domain: "sample.com", + MaxAge: 36000, + HttpOnly: true, + Secure: false, + }) + + cookies = append(cookies, &http.Cookie{ + Name: "go-resty-2", + Value: "This is cookie 2 value", + Path: "/", + Domain: "sample.com", + MaxAge: 36000, + HttpOnly: true, + Secure: false, + }) + + c := dc() + c.SetHostURL(ts.URL). + SetCookies(cookies) + + resp, err := c.R().Get("mypage2") + + assertError(t, err) + assertEqual(t, http.StatusOK, resp.StatusCode()) + assertEqual(t, "TestGet: text response from mypage2", resp.String()) + + logResponse(t, resp) +} + +func TestPutPlainString(t *testing.T) { + ts := createGenServer(t) + defer ts.Close() + + resp, err := R(). + SetBody("This is plain text body to server"). + Put(ts.URL + "/plaintext") + + assertError(t, err) + assertEqual(t, http.StatusOK, resp.StatusCode()) + assertEqual(t, "TestPut: plain text response", resp.String()) +} + +func TestPutJSONString(t *testing.T) { + ts := createGenServer(t) + defer ts.Close() + + DefaultClient.OnBeforeRequest(func(c *Client, r *Request) error { + r.SetHeader("X-Custom-Request-Middleware", "OnBeforeRequest middleware") + return nil + }) + DefaultClient.OnBeforeRequest(func(c *Client, r *Request) error { + c.SetContentLength(true) + r.SetHeader("X-ContentLength", "OnBeforeRequest ContentLength set") + return nil + }) + + DefaultClient.SetDebug(true).SetLogger(ioutil.Discard) + + resp, err := R(). + SetHeaders(map[string]string{hdrContentTypeKey: jsonContentType, hdrAcceptKey: jsonContentType}). + SetBody(`{"content":"json content sending to server"}`). + Put(ts.URL + "/json") + + assertError(t, err) + assertEqual(t, http.StatusOK, resp.StatusCode()) + assertEqual(t, `{"response":"json response"}`, resp.String()) +} + +func TestPutXMLString(t *testing.T) { + ts := createGenServer(t) + defer ts.Close() + + resp, err := R(). + SetHeaders(map[string]string{hdrContentTypeKey: "application/xml", hdrAcceptKey: "application/xml"}). + SetBody(`XML Content sending to server`). + Put(ts.URL + "/xml") + + assertError(t, err) + assertEqual(t, http.StatusOK, resp.StatusCode()) + assertEqual(t, `XML response`, resp.String()) +} + +func TestOnBeforeMiddleware(t *testing.T) { + ts := createGenServer(t) + defer ts.Close() + + c := dc() + c.OnBeforeRequest(func(c *Client, r *Request) error { + r.SetHeader("X-Custom-Request-Middleware", "OnBeforeRequest middleware") + return nil + }) + c.OnBeforeRequest(func(c *Client, r *Request) error { + c.SetContentLength(true) + r.SetHeader("X-ContentLength", "OnBeforeRequest ContentLength set") + return nil + }) + + resp, err := c.R(). + SetBody("OnBeforeRequest: This is plain text body to server"). + Put(ts.URL + "/plaintext") + + assertError(t, err) + assertEqual(t, http.StatusOK, resp.StatusCode()) + assertEqual(t, "TestPut: plain text response", resp.String()) +} + +func TestNoAutoRedirect(t *testing.T) { + ts := createRedirectServer(t) + defer ts.Close() + + _, err := R().Get(ts.URL + "/redirect-1") + + assertEqual(t, "Get /redirect-2: Auto redirect is disabled", err.Error()) +} + +func TestHTTPAutoRedirectUpTo10(t *testing.T) { + ts := createRedirectServer(t) + defer ts.Close() + + c := dc() + c.SetHTTPMode() + _, err := c.R().Get(ts.URL + "/redirect-1") + + assertEqual(t, "Get /redirect-11: Stopped after 10 redirects", err.Error()) +} + +func TestHostCheckRedirectPolicy(t *testing.T) { + ts := createRedirectServer(t) + defer ts.Close() + + c := dc(). + SetRedirectPolicy(DomainCheckRedirectPolicy("127.0.0.1")) + + _, err := c.R().Get(ts.URL + "/redirect-host-check-1") + + assertEqual(t, true, err != nil) + assertEqual(t, true, strings.Contains(err.Error(), "Redirect is not allowed as per DomainCheckRedirectPolicy")) +} + +func TestHeadMethod(t *testing.T) { + ts := createGetServer(t) + defer ts.Close() + + resp, err := dclr().Head(ts.URL + "/") + + assertError(t, err) + assertEqual(t, http.StatusOK, resp.StatusCode()) +} + +func TestOptionsMethod(t *testing.T) { + ts := createGenServer(t) + defer ts.Close() + + resp, err := dclr().Options(ts.URL + "/options") + + assertError(t, err) + assertEqual(t, http.StatusOK, resp.StatusCode()) + assertEqual(t, resp.Header().Get("Access-Control-Expose-Headers"), "x-go-resty-id") +} + +func TestPatchMethod(t *testing.T) { + ts := createGenServer(t) + defer ts.Close() + + resp, err := dclr().Patch(ts.URL + "/patch") + + assertError(t, err) + assertEqual(t, http.StatusOK, resp.StatusCode()) + + resp.body = nil + assertEqual(t, "", resp.String()) +} + +func TestRawFileUploadByBody(t *testing.T) { + ts := createFormPostServer(t) + defer ts.Close() + + file, _ := os.Open(getTestDataPath() + "/test-img.png") + fileBytes, _ := ioutil.ReadAll(file) + + resp, err := dclr(). + SetBody(fileBytes). + SetContentLength(true). + SetAuthToken("004DDB79-6801-4587-B976-F093E6AC44FF"). + Put(ts.URL + "/raw-upload") + + assertError(t, err) + assertEqual(t, http.StatusOK, resp.StatusCode()) + assertEqual(t, "image/png", resp.Request.Header.Get(hdrContentTypeKey)) +} + +func TestProxySetting(t *testing.T) { + c := dc() + + assertEqual(t, false, c.IsProxySet()) + assertEqual(t, true, (c.transport.Proxy == nil)) + + c.SetProxy("http://sampleproxy:8888") + assertEqual(t, true, c.IsProxySet()) + assertEqual(t, false, (c.transport.Proxy == nil)) + + c.SetProxy("//not.a.user@%66%6f%6f.com:8888") + assertEqual(t, false, c.IsProxySet()) + assertEqual(t, true, (c.transport.Proxy == nil)) + + SetProxy("http://sampleproxy:8888") + assertEqual(t, true, IsProxySet()) + RemoveProxy() + assertEqual(t, true, (DefaultClient.proxyURL == nil)) + assertEqual(t, true, (DefaultClient.transport.Proxy == nil)) +} + +func TestIncorrectURL(t *testing.T) { + _, err := R().Get("//not.a.user@%66%6f%6f.com/just/a/path/also") + assertEqual(t, true, strings.Contains(err.Error(), "parse //not.a.user@%66%6f%6f.com/just/a/path/also")) + + c := dc() + c.SetHostURL("//not.a.user@%66%6f%6f.com") + _, err1 := c.R().Get("/just/a/path/also") + assertEqual(t, true, strings.Contains(err1.Error(), "parse //not.a.user@%66%6f%6f.com/just/a/path/also")) +} + +func TestDetectContentTypeForPointer(t *testing.T) { + ts := createPostServer(t) + defer ts.Close() + + user := &User{Username: "testuser", Password: "testpass"} + + resp, err := dclr(). + SetBody(user). + SetResult(AuthSuccess{}). + Post(ts.URL + "/login") + + assertError(t, err) + assertEqual(t, http.StatusOK, resp.StatusCode()) + + t.Logf("Result Success: %q", resp.Result().(*AuthSuccess)) + + logResponse(t, resp) +} + +type ExampleUser struct { + FirstName string `json:"frist_name"` + LastName string `json:"last_name"` + ZipCode string `json:"zip_code"` +} + +func TestDetectContentTypeForPointerWithSlice(t *testing.T) { + ts := createPostServer(t) + defer ts.Close() + + users := &[]ExampleUser{ + {FirstName: "firstname1", LastName: "lastname1", ZipCode: "10001"}, + {FirstName: "firstname2", LastName: "lastname3", ZipCode: "10002"}, + {FirstName: "firstname3", LastName: "lastname3", ZipCode: "10003"}, + } + + resp, err := dclr(). + SetBody(users). + Post(ts.URL + "/users") + + assertError(t, err) + assertEqual(t, http.StatusAccepted, resp.StatusCode()) + + t.Logf("Result Success: %q", resp) + + logResponse(t, resp) +} + +func TestDetectContentTypeForPointerWithSliceMap(t *testing.T) { + ts := createPostServer(t) + defer ts.Close() + + usersmap := map[string]interface{}{ + "user1": ExampleUser{FirstName: "firstname1", LastName: "lastname1", ZipCode: "10001"}, + "user2": &ExampleUser{FirstName: "firstname2", LastName: "lastname3", ZipCode: "10002"}, + "user3": ExampleUser{FirstName: "firstname3", LastName: "lastname3", ZipCode: "10003"}, + } + + var users []map[string]interface{} + users = append(users, usersmap) + + resp, err := dclr(). + SetBody(&users). + Post(ts.URL + "/usersmap") + + assertError(t, err) + assertEqual(t, http.StatusAccepted, resp.StatusCode()) + + t.Logf("Result Success: %q", resp) + + logResponse(t, resp) +} + +func TestDetectContentTypeForSlice(t *testing.T) { + ts := createPostServer(t) + defer ts.Close() + + users := []ExampleUser{ + {FirstName: "firstname1", LastName: "lastname1", ZipCode: "10001"}, + {FirstName: "firstname2", LastName: "lastname3", ZipCode: "10002"}, + {FirstName: "firstname3", LastName: "lastname3", ZipCode: "10003"}, + } + + resp, err := dclr(). + SetBody(users). + Post(ts.URL + "/users") + + assertError(t, err) + assertEqual(t, http.StatusAccepted, resp.StatusCode()) + + t.Logf("Result Success: %q", resp) + + logResponse(t, resp) +} + +func TestMultiParamsQueryString(t *testing.T) { + ts1 := createGetServer(t) + defer ts1.Close() + + client := dc() + req1 := client.R() + + client.SetQueryParam("status", "open") + + _, _ = req1.SetQueryParam("status", "pending"). + Get(ts1.URL) + + assertEqual(t, true, strings.Contains(req1.URL, "status=pending")) + // pending overrides open + assertEqual(t, false, strings.Contains(req1.URL, "status=open")) + + _, _ = req1.SetQueryParam("status", "approved"). + Get(ts1.URL) + + assertEqual(t, true, strings.Contains(req1.URL, "status=approved")) + // approved overrides pending + assertEqual(t, false, strings.Contains(req1.URL, "status=pending")) + + ts2 := createGetServer(t) + defer ts2.Close() + + req2 := client.R() + + v := url.Values{ + "status": []string{"pending", "approved", "reject"}, + } + + _, _ = req2.SetMultiValueQueryParams(v).Get(ts2.URL) + + assertEqual(t, true, strings.Contains(req2.URL, "status=pending")) + assertEqual(t, true, strings.Contains(req2.URL, "status=approved")) + assertEqual(t, true, strings.Contains(req2.URL, "status=reject")) + + // because it's removed by key + assertEqual(t, false, strings.Contains(req2.URL, "status=open")) +} + +func TestSetQueryStringTypical(t *testing.T) { + ts := createGetServer(t) + defer ts.Close() + + resp, err := dclr(). + SetQueryString("productId=232&template=fresh-sample&cat=resty&source=google&kw=buy a lot more"). + Get(ts.URL) + + assertError(t, err) + assertEqual(t, http.StatusOK, resp.StatusCode()) + assertEqual(t, "200 OK", resp.Status()) + assertEqual(t, "TestGet: text response", resp.String()) + + resp, err = dclr(). + SetQueryString("&%%amp;"). + Get(ts.URL) + + assertError(t, err) + assertEqual(t, http.StatusOK, resp.StatusCode()) + assertEqual(t, "200 OK", resp.Status()) + assertEqual(t, "TestGet: text response", resp.String()) +} + +func TestOutputFileWithBaseDirAndRelativePath(t *testing.T) { + ts := createGetServer(t) + defer ts.Close() + defer cleaupFiles("test-data/dir-sample") + + DefaultClient = dc() + SetRedirectPolicy(FlexibleRedirectPolicy(10)) + SetOutputDirectory(getTestDataPath() + "/dir-sample") + SetDebug(true) + + resp, err := R(). + SetOutput("go-resty/test-img-success.png"). + Get(ts.URL + "/my-image.png") + + assertError(t, err) + assertEqual(t, true, resp.Size() != 0) +} + +func TestOutputFileWithBaseDirError(t *testing.T) { + c := dc().SetRedirectPolicy(FlexibleRedirectPolicy(10)). + SetOutputDirectory(getTestDataPath() + `/go-resty\0`) + + _ = c +} + +func TestOutputPathDirNotExists(t *testing.T) { + ts := createGetServer(t) + defer ts.Close() + defer cleaupFiles("test-data/not-exists-dir") + + DefaultClient = dc() + SetRedirectPolicy(FlexibleRedirectPolicy(10)) + SetOutputDirectory(getTestDataPath() + "/not-exists-dir") + + resp, err := R(). + SetOutput("test-img-success.png"). + Get(ts.URL + "/my-image.png") + + assertError(t, err) + assertEqual(t, true, resp.Size() != 0) +} + +func TestOutputFileAbsPath(t *testing.T) { + ts := createGetServer(t) + defer ts.Close() + defer cleaupFiles("test-data/go-resty") + + _, err := dcr(). + SetOutput(getTestDataPath() + "/go-resty/test-img-success-2.png"). + Get(ts.URL + "/my-image.png") + + assertError(t, err) +} + +func TestContextInternal(t *testing.T) { + ts := createGetServer(t) + defer ts.Close() + + r := R(). + SetQueryParam("request_no", strconv.FormatInt(time.Now().Unix(), 10)) + + if r.isContextCancelledIfAvailable() { + t.Error("isContextCancelledIfAvailable != false for vanilla R()") + } + r.addContextIfAvailable() + + resp, err := r.Get(ts.URL + "/") + + assertError(t, err) + assertEqual(t, http.StatusOK, resp.StatusCode()) + +} + +func TestSRV(t *testing.T) { + c := dc(). + SetRedirectPolicy(FlexibleRedirectPolicy(20)). + SetScheme("http") + + r := c.R(). + SetSRV(&SRVRecord{"xmpp-server", "google.com"}) + + assertEqual(t, "xmpp-server", r.SRV.Service) + assertEqual(t, "google.com", r.SRV.Domain) + + resp, err := r.Get("/") + assertError(t, err) + assertEqual(t, http.StatusOK, resp.StatusCode()) +} + +func TestSRVInvalidService(t *testing.T) { + _, err := R(). + SetSRV(&SRVRecord{"nonexistantservice", "sampledomain"}). + Get("/") + + assertEqual(t, true, (err != nil)) + assertEqual(t, true, strings.Contains(err.Error(), "no such host")) +} + +func TestDeprecatedCodeCovergae(t *testing.T) { + var user1 User + err := Unmarshal("application/json", + []byte(`{"username":"testuser", "password":"testpass"}`), &user1) + assertError(t, err) + assertEqual(t, "testuser", user1.Username) + assertEqual(t, "testpass", user1.Password) + + var user2 User + err = Unmarshal("application/xml", + []byte(`testusertestpass`), + &user2) + assertError(t, err) + assertEqual(t, "testuser", user1.Username) + assertEqual(t, "testpass", user1.Password) +} + +func getTestDataPath() string { + pwd, _ := os.Getwd() + return pwd + "/test-data" +} + +func createGetServer(t *testing.T) *httptest.Server { + var attempt int32 + var sequence int32 + var lastRequest time.Time + ts := createTestServer(func(w http.ResponseWriter, r *http.Request) { + t.Logf("Method: %v", r.Method) + t.Logf("Path: %v", r.URL.Path) + + if r.Method == MethodGet { + if r.URL.Path == "/" { + _, _ = w.Write([]byte("TestGet: text response")) + } else if r.URL.Path == "/mypage" { + w.WriteHeader(http.StatusBadRequest) + } else if r.URL.Path == "/mypage2" { + _, _ = w.Write([]byte("TestGet: text response from mypage2")) + } else if r.URL.Path == "/set-retrycount-test" { + attp := atomic.AddInt32(&attempt, 1) + if attp <= 3 { + time.Sleep(time.Second * 6) + } + _, _ = w.Write([]byte("TestClientRetry page")) + } else if r.URL.Path == "/set-retrywaittime-test" { + // Returns time.Duration since last request here + // or 0 for the very first request + if atomic.LoadInt32(&attempt) == 0 { + lastRequest = time.Now() + _, _ = fmt.Fprint(w, "0") + } else { + now := time.Now() + sinceLastRequest := now.Sub(lastRequest) + lastRequest = now + _, _ = fmt.Fprintf(w, "%d", uint64(sinceLastRequest)) + } + atomic.AddInt32(&attempt, 1) + } else if r.URL.Path == "/set-timeout-test-with-sequence" { + seq := atomic.AddInt32(&sequence, 1) + time.Sleep(time.Second * 2) + _, _ = fmt.Fprintf(w, "%d", seq) + } else if r.URL.Path == "/set-timeout-test" { + time.Sleep(time.Second * 6) + _, _ = w.Write([]byte("TestClientTimeout page")) + + } else if r.URL.Path == "/my-image.png" { + fileBytes, _ := ioutil.ReadFile(getTestDataPath() + "/test-img.png") + w.Header().Set("Content-Type", "image/png") + w.Header().Set("Content-Length", strconv.Itoa(len(fileBytes))) + _, _ = w.Write(fileBytes) + } else if r.URL.Path == "/get-method-payload-test" { + body, err := ioutil.ReadAll(r.Body) + if err != nil { + t.Errorf("Error: could not read get body: %s", err.Error()) + } + _, _ = w.Write(body) + } + } + }) + + return ts +} + +func handleLoginEndpoint(t *testing.T, w http.ResponseWriter, r *http.Request) { + if r.URL.Path == "/login" { + user := &User{} + + // JSON + if IsJSONType(r.Header.Get(hdrContentTypeKey)) { + jd := json.NewDecoder(r.Body) + err := jd.Decode(user) + w.Header().Set(hdrContentTypeKey, jsonContentType) + if err != nil { + t.Logf("Error: %#v", err) + w.WriteHeader(http.StatusBadRequest) + _, _ = w.Write([]byte(`{ "id": "bad_request", "message": "Unable to read user info" }`)) + return + } + + if user.Username == "testuser" && user.Password == "testpass" { + _, _ = w.Write([]byte(`{ "id": "success", "message": "login successful" }`)) + } else if user.Username == "testuser" && user.Password == "invalidjson" { + _, _ = w.Write([]byte(`{ "id": "success", "message": "login successful", }`)) + } else { + w.Header().Set("Www-Authenticate", "Protected Realm") + w.WriteHeader(http.StatusUnauthorized) + _, _ = w.Write([]byte(`{ "id": "unauthorized", "message": "Invalid credentials" }`)) + } + + return + } + + // XML + if IsXMLType(r.Header.Get(hdrContentTypeKey)) { + xd := xml.NewDecoder(r.Body) + err := xd.Decode(user) + + w.Header().Set(hdrContentTypeKey, "application/xml") + if err != nil { + t.Logf("Error: %v", err) + w.WriteHeader(http.StatusBadRequest) + _, _ = w.Write([]byte(``)) + _, _ = w.Write([]byte(`bad_requestUnable to read user info`)) + return + } + + if user.Username == "testuser" && user.Password == "testpass" { + _, _ = w.Write([]byte(``)) + _, _ = w.Write([]byte(`successlogin successful`)) + } else if user.Username == "testuser" && user.Password == "invalidxml" { + _, _ = w.Write([]byte(``)) + _, _ = w.Write([]byte(`successlogin successful`)) + } else { + w.Header().Set("Www-Authenticate", "Protected Realm") + w.WriteHeader(http.StatusUnauthorized) + _, _ = w.Write([]byte(``)) + _, _ = w.Write([]byte(`unauthorizedInvalid credentials`)) + } + + return + } + } +} + +func handleUsersEndpoint(t *testing.T, w http.ResponseWriter, r *http.Request) { + if r.URL.Path == "/users" { + // JSON + if IsJSONType(r.Header.Get(hdrContentTypeKey)) { + var users []ExampleUser + jd := json.NewDecoder(r.Body) + err := jd.Decode(&users) + w.Header().Set(hdrContentTypeKey, jsonContentType) + if err != nil { + t.Logf("Error: %v", err) + w.WriteHeader(http.StatusBadRequest) + _, _ = w.Write([]byte(`{ "id": "bad_request", "message": "Unable to read user info" }`)) + return + } + + // logic check, since we are excepting to reach 3 records + if len(users) != 3 { + t.Log("Error: Excepted count of 3 records") + w.WriteHeader(http.StatusBadRequest) + _, _ = w.Write([]byte(`{ "id": "bad_request", "message": "Expected record count doesn't match" }`)) + return + } + + eu := users[2] + if eu.FirstName == "firstname3" && eu.ZipCode == "10003" { + w.WriteHeader(http.StatusAccepted) + _, _ = w.Write([]byte(`{ "message": "Accepted" }`)) + } + + return + } + } +} + +func createPostServer(t *testing.T) *httptest.Server { + ts := createTestServer(func(w http.ResponseWriter, r *http.Request) { + t.Logf("Method: %v", r.Method) + t.Logf("Path: %v", r.URL.Path) + t.Logf("RawQuery: %v", r.URL.RawQuery) + t.Logf("Content-Type: %v", r.Header.Get(hdrContentTypeKey)) + + if r.Method == MethodPost { + handleLoginEndpoint(t, w, r) + + handleUsersEndpoint(t, w, r) + + if r.URL.Path == "/usersmap" { + // JSON + if IsJSONType(r.Header.Get(hdrContentTypeKey)) { + if r.URL.Query().Get("status") == "500" { + body, err := ioutil.ReadAll(r.Body) + if err != nil { + t.Errorf("Error: could not read post body: %s", err.Error()) + } + t.Logf("Got query param: status=500 so we're returning the post body as response and a 500 status code. body: %s", string(body)) + w.Header().Set(hdrContentTypeKey, jsonContentType) + w.WriteHeader(http.StatusInternalServerError) + _, _ = w.Write(body) + return + } + + var users []map[string]interface{} + jd := json.NewDecoder(r.Body) + err := jd.Decode(&users) + w.Header().Set(hdrContentTypeKey, jsonContentType) + if err != nil { + t.Logf("Error: %v", err) + w.WriteHeader(http.StatusBadRequest) + _, _ = w.Write([]byte(`{ "id": "bad_request", "message": "Unable to read user info" }`)) + return + } + + // logic check, since we are excepting to reach 1 map records + if len(users) != 1 { + t.Log("Error: Excepted count of 1 map records") + w.WriteHeader(http.StatusBadRequest) + _, _ = w.Write([]byte(`{ "id": "bad_request", "message": "Expected record count doesn't match" }`)) + return + } + + w.WriteHeader(http.StatusAccepted) + _, _ = w.Write([]byte(`{ "message": "Accepted" }`)) + + return + } + } + } + }) + + return ts +} + +func createFormPostServer(t *testing.T) *httptest.Server { + ts := createTestServer(func(w http.ResponseWriter, r *http.Request) { + t.Logf("Method: %v", r.Method) + t.Logf("Path: %v", r.URL.Path) + t.Logf("Content-Type: %v", r.Header.Get(hdrContentTypeKey)) + + if r.Method == MethodPost { + _ = r.ParseMultipartForm(10e6) + + if r.URL.Path == "/profile" { + t.Logf("FirstName: %v", r.FormValue("first_name")) + t.Logf("LastName: %v", r.FormValue("last_name")) + t.Logf("City: %v", r.FormValue("city")) + t.Logf("Zip Code: %v", r.FormValue("zip_code")) + + _, _ = w.Write([]byte("Success")) + return + } else if r.URL.Path == "/search" { + formEncodedData := r.Form.Encode() + t.Logf("Recevied Form Encoded values: %v", formEncodedData) + + assertEqual(t, true, strings.Contains(formEncodedData, "search_criteria=pencil")) + assertEqual(t, true, strings.Contains(formEncodedData, "search_criteria=glass")) + + _, _ = w.Write([]byte("Success")) + return + } else if r.URL.Path == "/upload" { + t.Logf("FirstName: %v", r.FormValue("first_name")) + t.Logf("LastName: %v", r.FormValue("last_name")) + + targetPath := getTestDataPath() + "/upload" + _ = os.MkdirAll(targetPath, 0700) + + for _, fhdrs := range r.MultipartForm.File { + for _, hdr := range fhdrs { + t.Logf("Name: %v", hdr.Filename) + t.Logf("Header: %v", hdr.Header) + dotPos := strings.LastIndex(hdr.Filename, ".") + + fname := fmt.Sprintf("%s-%v%s", hdr.Filename[:dotPos], time.Now().Unix(), hdr.Filename[dotPos:]) + t.Logf("Write name: %v", fname) + + infile, _ := hdr.Open() + f, err := os.OpenFile(targetPath+"/"+fname, os.O_WRONLY|os.O_CREATE, 0666) + if err != nil { + t.Logf("Error: %v", err) + return + } + defer func() { + _ = f.Close() + }() + _, _ = io.Copy(f, infile) + + _, _ = w.Write([]byte(fmt.Sprintf("File: %v, uploaded as: %v\n", hdr.Filename, fname))) + } + } + + return + } + } + }) + + return ts +} + +func createAuthServer(t *testing.T) *httptest.Server { + ts := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + t.Logf("Method: %v", r.Method) + t.Logf("Path: %v", r.URL.Path) + t.Logf("Content-Type: %v", r.Header.Get(hdrContentTypeKey)) + + if r.Method == MethodGet { + if r.URL.Path == "/profile" { + // 004DDB79-6801-4587-B976-F093E6AC44FF + auth := r.Header.Get("Authorization") + t.Logf("Bearer Auth: %v", auth) + + w.Header().Set(hdrContentTypeKey, jsonContentType) + + if !strings.HasPrefix(auth, "Bearer ") { + w.Header().Set("Www-Authenticate", "Protected Realm") + w.WriteHeader(http.StatusUnauthorized) + _, _ = w.Write([]byte(`{ "id": "unauthorized", "message": "Invalid credentials" }`)) + + return + } + + if auth[7:] == "004DDB79-6801-4587-B976-F093E6AC44FF" || auth[7:] == "004DDB79-6801-4587-B976-F093E6AC44FF-Request" { + _, _ = w.Write([]byte(`{ "id": "success", "message": "login successful" }`)) + } + } + + return + } + + if r.Method == MethodPost { + if r.URL.Path == "/login" { + auth := r.Header.Get("Authorization") + t.Logf("Basic Auth: %v", auth) + + w.Header().Set(hdrContentTypeKey, jsonContentType) + + password, err := base64.StdEncoding.DecodeString(auth[6:]) + if err != nil || string(password) != "myuser:basicauth" { + w.Header().Set("Www-Authenticate", "Protected Realm") + w.WriteHeader(http.StatusUnauthorized) + _, _ = w.Write([]byte(`{ "id": "unauthorized", "message": "Invalid credentials" }`)) + + return + } + + _, _ = w.Write([]byte(`{ "id": "success", "message": "login successful" }`)) + } + + return + } + })) + + return ts +} + +func createGenServer(t *testing.T) *httptest.Server { + ts := createTestServer(func(w http.ResponseWriter, r *http.Request) { + t.Logf("Method: %v", r.Method) + t.Logf("Path: %v", r.URL.Path) + + if r.Method == MethodPut { + if r.URL.Path == "/plaintext" { + _, _ = w.Write([]byte("TestPut: plain text response")) + } else if r.URL.Path == "/json" { + w.Header().Set(hdrContentTypeKey, jsonContentType) + _, _ = w.Write([]byte(`{"response":"json response"}`)) + } else if r.URL.Path == "/xml" { + w.Header().Set(hdrContentTypeKey, "application/xml") + _, _ = w.Write([]byte(`XML response`)) + } + } + + if r.Method == MethodOptions && r.URL.Path == "/options" { + w.Header().Set("Access-Control-Allow-Origin", "localhost") + w.Header().Set("Access-Control-Allow-Methods", "PUT, PATCH") + w.Header().Set("Access-Control-Expose-Headers", "x-go-resty-id") + w.WriteHeader(http.StatusOK) + } + + if r.Method == MethodPatch && r.URL.Path == "/patch" { + w.WriteHeader(http.StatusOK) + } + }) + + return ts +} + +func createRedirectServer(t *testing.T) *httptest.Server { + ts := createTestServer(func(w http.ResponseWriter, r *http.Request) { + t.Logf("Method: %v", r.Method) + t.Logf("Path: %v", r.URL.Path) + + if r.Method == MethodGet { + if strings.HasPrefix(r.URL.Path, "/redirect-host-check-") { + cntStr := strings.SplitAfter(r.URL.Path, "-")[3] + cnt, _ := strconv.Atoi(cntStr) + + if cnt != 7 { // Testing hard stop via logical + if cnt >= 5 { + http.Redirect(w, r, "http://httpbin.org/get", http.StatusTemporaryRedirect) + } else { + http.Redirect(w, r, fmt.Sprintf("/redirect-host-check-%d", (cnt+1)), http.StatusTemporaryRedirect) + } + } + } else if strings.HasPrefix(r.URL.Path, "/redirect-") { + cntStr := strings.SplitAfter(r.URL.Path, "-")[1] + cnt, _ := strconv.Atoi(cntStr) + + http.Redirect(w, r, fmt.Sprintf("/redirect-%d", (cnt+1)), http.StatusTemporaryRedirect) + } + } + }) + + return ts +} + +func createTestServer(fn func(w http.ResponseWriter, r *http.Request)) *httptest.Server { + return httptest.NewServer(http.HandlerFunc(fn)) +} + +func dc() *Client { + DefaultClient = New() + return DefaultClient +} + +func dcr() *Request { + return dc().R() +} + +func dclr() *Request { + c := dc() + c.SetDebug(true) + c.SetLogger(ioutil.Discard) + + return c.R() +} + +func assertError(t *testing.T, err error) { + if err != nil { + t.Errorf("Error occurred [%v]", err) + } +} + +func assertEqual(t *testing.T, e, g interface{}) (r bool) { + r = compare(e, g) + if !r { + t.Errorf("Expected [%v], got [%v]", e, g) + } + + return +} + +func assertNotEqual(t *testing.T, e, g interface{}) (r bool) { + if compare(e, g) { + t.Errorf("Expected [%v], got [%v]", e, g) + } else { + r = true + } + + return +} + +func compare(e, g interface{}) (r bool) { + ev := reflect.ValueOf(e) + gv := reflect.ValueOf(g) + + if ev.Kind() != gv.Kind() { + return + } + + switch ev.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + r = (ev.Int() == gv.Int()) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + r = (ev.Uint() == gv.Uint()) + case reflect.Float32, reflect.Float64: + r = (ev.Float() == gv.Float()) + case reflect.String: + r = (ev.String() == gv.String()) + case reflect.Bool: + r = (ev.Bool() == gv.Bool()) + } + + return +} + +func logResponse(t *testing.T, resp *Response) { + t.Logf("Response Status: %v", resp.Status()) + t.Logf("Response Time: %v", resp.Time()) + t.Logf("Response Headers: %v", resp.Header()) + t.Logf("Response Cookies: %v", resp.Cookies()) + t.Logf("Response Body: %v", resp) +} + +func cleaupFiles(files ...string) { + pwd, _ := os.Getwd() + + for _, f := range files { + _ = os.RemoveAll(filepath.Join(pwd, f)) + } +} diff --git a/vendor/github.com/go-resty/resty/retry.go b/vendor/github.com/go-resty/resty/retry.go new file mode 100644 index 000000000..6f65e3012 --- /dev/null +++ b/vendor/github.com/go-resty/resty/retry.go @@ -0,0 +1,114 @@ +// Copyright (c) 2015-2017 Jeevanandam M (jeeva@myjeeva.com), All rights reserved. +// resty source code and usage is governed by a MIT style +// license that can be found in the LICENSE file. + +package resty + +import ( + "math" + "math/rand" + "time" +) + +const ( + defaultMaxRetries = 3 + defaultWaitTime = time.Duration(100) * time.Millisecond + defaultMaxWaitTime = time.Duration(2000) * time.Millisecond +) + +type ( + // Option is to create convenient retry options like wait time, max retries, etc. + Option func(*Options) + + // RetryConditionFunc type is for retry condition function + RetryConditionFunc func(*Response) (bool, error) + + // Options to hold go-resty retry values + Options struct { + maxRetries int + waitTime time.Duration + maxWaitTime time.Duration + retryConditions []RetryConditionFunc + } +) + +// Retries sets the max number of retries +func Retries(value int) Option { + return func(o *Options) { + o.maxRetries = value + } +} + +// WaitTime sets the default wait time to sleep between requests +func WaitTime(value time.Duration) Option { + return func(o *Options) { + o.waitTime = value + } +} + +// MaxWaitTime sets the max wait time to sleep between requests +func MaxWaitTime(value time.Duration) Option { + return func(o *Options) { + o.maxWaitTime = value + } +} + +// RetryConditions sets the conditions that will be checked for retry. +func RetryConditions(conditions []RetryConditionFunc) Option { + return func(o *Options) { + o.retryConditions = conditions + } +} + +// Backoff retries with increasing timeout duration up until X amount of retries +// (Default is 3 attempts, Override with option Retries(n)) +func Backoff(operation func() (*Response, error), options ...Option) error { + // Defaults + opts := Options{ + maxRetries: defaultMaxRetries, + waitTime: defaultWaitTime, + maxWaitTime: defaultMaxWaitTime, + retryConditions: []RetryConditionFunc{}, + } + + for _, o := range options { + o(&opts) + } + + var ( + resp *Response + err error + ) + base := float64(opts.waitTime) // Time to wait between each attempt + capLevel := float64(opts.maxWaitTime) // Maximum amount of wait time for the retry + for attempt := 0; attempt < opts.maxRetries; attempt++ { + resp, err = operation() + + var needsRetry bool + var conditionErr error + for _, condition := range opts.retryConditions { + needsRetry, conditionErr = condition(resp) + if needsRetry || conditionErr != nil { + break + } + } + + // If the operation returned no error, there was no condition satisfied and + // there was no error caused by the conditional functions. + if err == nil && !needsRetry && conditionErr == nil { + return nil + } + // Adding capped exponential backup with jitter + // See the following article... + // http://www.awsarchitectureblog.com/2015/03/backoff.html + temp := math.Min(capLevel, base*math.Exp2(float64(attempt))) + sleepDuration := time.Duration(int(temp/2) + rand.Intn(int(temp/2))) + + if sleepDuration < opts.waitTime { + sleepDuration = opts.waitTime + } + time.Sleep(sleepDuration) + } + + return err +} diff --git a/vendor/github.com/go-resty/resty/retry_test.go b/vendor/github.com/go-resty/resty/retry_test.go new file mode 100644 index 000000000..854853b19 --- /dev/null +++ b/vendor/github.com/go-resty/resty/retry_test.go @@ -0,0 +1,270 @@ +// Copyright (c) 2015-2017 Jeevanandam M (jeeva@myjeeva.com), All rights reserved. +// resty source code and usage is governed by a MIT style +// license that can be found in the LICENSE file. + +package resty + +import ( + "encoding/json" + "errors" + "net/http" + "reflect" + "strconv" + "strings" + "testing" + "time" +) + +func TestBackoffSuccess(t *testing.T) { + attempts := 3 + externalCounter := 0 + retryErr := Backoff(func() (*Response, error) { + externalCounter++ + if externalCounter < attempts { + return nil, errors.New("not yet got the number we're after") + } + + return nil, nil + }) + + assertError(t, retryErr) + assertEqual(t, externalCounter, attempts) +} + +func TestBackoffTenAttemptsSuccess(t *testing.T) { + attempts := 10 + externalCounter := 0 + retryErr := Backoff(func() (*Response, error) { + externalCounter++ + if externalCounter < attempts { + return nil, errors.New("not yet got the number we're after") + } + return nil, nil + }, Retries(attempts), WaitTime(5), MaxWaitTime(500)) + + assertError(t, retryErr) + assertEqual(t, externalCounter, attempts) +} + +// Check to make sure the conditional of the retry condition is being used +func TestConditionalBackoffCondition(t *testing.T) { + attempts := 3 + counter := 0 + check := RetryConditionFunc(func(*Response) (bool, error) { + return attempts != counter, nil + }) + retryErr := Backoff(func() (*Response, error) { + counter++ + return nil, nil + }, RetryConditions([]RetryConditionFunc{check})) + + assertError(t, retryErr) + assertEqual(t, counter, attempts) +} + +// Check to make sure that errors in the conditional cause a retry +func TestConditionalBackoffConditionError(t *testing.T) { + attempts := 3 + counter := 0 + check := RetryConditionFunc(func(*Response) (bool, error) { + if attempts != counter { + return false, errors.New("Attempts not equal Counter") + } + return false, nil + }) + + retryErr := Backoff(func() (*Response, error) { + counter++ + return nil, nil + }, RetryConditions([]RetryConditionFunc{check})) + + assertError(t, retryErr) + assertEqual(t, counter, attempts) +} + +// Check to make sure that if the conditional is false we don't retry +func TestConditionalBackoffConditionNonExecution(t *testing.T) { + attempts := 3 + counter := 0 + + retryErr := Backoff(func() (*Response, error) { + counter++ + return nil, nil + }, RetryConditions([]RetryConditionFunc{filler})) + + assertError(t, retryErr) + assertNotEqual(t, counter, attempts) +} + +// Check to make sure the functions added to add conditionals work +func TestConditionalGet(t *testing.T) { + ts := createGetServer(t) + defer ts.Close() + attemptCount := 1 + externalCounter := 0 + + // This check should pass on first run, and let the response through + check := RetryConditionFunc(func(*Response) (bool, error) { + externalCounter++ + if attemptCount != externalCounter { + return false, errors.New("Attempts not equal Counter") + } + return false, nil + }) + + client := dc().AddRetryCondition(check).SetRetryCount(1) + resp, err := client.R(). + SetQueryParam("request_no", strconv.FormatInt(time.Now().Unix(), 10)). + Get(ts.URL + "/") + + assertError(t, err) + assertEqual(t, http.StatusOK, resp.StatusCode()) + assertEqual(t, "200 OK", resp.Status()) + assertEqual(t, true, resp.Body() != nil) + assertEqual(t, "TestGet: text response", resp.String()) + assertEqual(t, externalCounter, attemptCount) + + logResponse(t, resp) +} + +// Check to make sure the package Function works. +func TestConditionalGetDefaultClient(t *testing.T) { + ts := createGetServer(t) + defer ts.Close() + attemptCount := 1 + externalCounter := 0 + + // This check should pass on first run, and let the response through + check := RetryConditionFunc(func(*Response) (bool, error) { + externalCounter++ + if attemptCount != externalCounter { + return false, errors.New("Attempts not equal Counter") + } + return false, nil + }) + + // Clear the default client. + _ = dc() + // Proceed to check. + client := AddRetryCondition(check).SetRetryCount(1) + resp, err := client.R(). + SetQueryParam("request_no", strconv.FormatInt(time.Now().Unix(), 10)). + Get(ts.URL + "/") + + assertError(t, err) + assertEqual(t, http.StatusOK, resp.StatusCode()) + assertEqual(t, "200 OK", resp.Status()) + assertEqual(t, true, resp.Body() != nil) + assertEqual(t, "TestGet: text response", resp.String()) + assertEqual(t, externalCounter, attemptCount) + + logResponse(t, resp) +} + +func TestClientRetryGet(t *testing.T) { + ts := createGetServer(t) + defer ts.Close() + + c := dc() + c.SetHTTPMode(). + SetTimeout(time.Duration(time.Second * 3)). + SetRetryCount(3) + + resp, err := c.R().Get(ts.URL + "/set-retrycount-test") + assertEqual(t, "", resp.Status()) + assertEqual(t, 0, resp.StatusCode()) + assertEqual(t, 0, len(resp.Cookies())) + assertEqual(t, true, resp.Body() != nil) + assertEqual(t, 0, len(resp.Header())) + + assertEqual(t, true, strings.HasPrefix(err.Error(), "Get "+ts.URL+"/set-retrycount-test")) +} + +func TestClientRetryWait(t *testing.T) { + ts := createGetServer(t) + defer ts.Close() + + attempt := 0 + + retryCount := 5 + retryIntervals := make([]uint64, retryCount) + + // Set retry wait times that do not intersect with default ones + retryWaitTime := time.Duration(3) * time.Second + retryMaxWaitTime := time.Duration(9) * time.Second + + c := dc() + c.SetHTTPMode(). + SetRetryCount(retryCount). + SetRetryWaitTime(retryWaitTime). + SetRetryMaxWaitTime(retryMaxWaitTime). + AddRetryCondition( + func(r *Response) (bool, error) { + timeSlept, _ := strconv.ParseUint(string(r.Body()), 10, 64) + retryIntervals[attempt] = timeSlept + attempt++ + return true, nil + }, + ) + _, _ = c.R().Get(ts.URL + "/set-retrywaittime-test") + + // 5 attempts were made + assertEqual(t, attempt, 5) + + // Initial attempt has 0 time slept since last request + assertEqual(t, retryIntervals[0], uint64(0)) + + for i := 1; i < len(retryIntervals); i++ { + slept := time.Duration(retryIntervals[i]) + // Ensure that client has slept some duration between + // waitTime and maxWaitTime for consequent requests + if slept < retryWaitTime || slept > retryMaxWaitTime { + t.Errorf("Client has slept %f seconds before retry %d", slept.Seconds(), i) + } + } +} + +func TestClientRetryPost(t *testing.T) { + ts := createPostServer(t) + defer ts.Close() + + usersmap := map[string]interface{}{ + "user1": map[string]interface{}{"FirstName": "firstname1", "LastName": "lastname1", "ZipCode": "10001"}, + } + + var users []map[string]interface{} + users = append(users, usersmap) + + c := dc() + c.SetRetryCount(3) + c.AddRetryCondition(RetryConditionFunc(func(r *Response) (bool, error) { + if r.StatusCode() >= http.StatusInternalServerError { + return false, errors.New("error") + } + return true, nil + })) + + resp, _ := c.R(). + SetBody(&users). + Post(ts.URL + "/usersmap?status=500") + + if resp != nil { + if resp.StatusCode() == http.StatusInternalServerError { + t.Logf("Got response body: %s", string(resp.body)) + var usersResponse []map[string]interface{} + err := json.Unmarshal(resp.body, &usersResponse) + assertError(t, err) + + if !reflect.DeepEqual(users, usersResponse) { + t.Errorf("Expected request body to be echoed back as response body. Instead got: %s", string(resp.body)) + } + + return + } + t.Errorf("Got unexpected response code: %d with body: %s", resp.StatusCode(), string(resp.body)) + } +} + +func filler(*Response) (bool, error) { + return false, nil +} diff --git a/vendor/github.com/go-resty/resty/test-data/test-img.png b/vendor/github.com/go-resty/resty/test-data/test-img.png new file mode 100644 index 0000000000000000000000000000000000000000..0a650146320266496de023d56a62c45b295d6b74 GIT binary patch literal 2579468 zcmZ^~WmKHqvMmfj6Fd;yA&}q>jk^SQg1bZG?jGDi1C2v)cbDMq*0{TCqxao=pYNXY zjeGC&Yptr9RkNzrk7v~w>z9&(6xt`kPcSerXfo2`Dljkz!!R&#*2sweNHnf`(_vsf zRal9MDanY5kt;a?&8=+BU|^(w{Z2trQ(eXlo9y83nEEW5@YxHyKpLbE_bmaPOH}mZ z*8yQmF7ZArd}TH9>gY|SFT^P1CL#=4_&7xOAK8$Y&sA!@F`VAQ6Z9Sw!9d| z5(BN%%aSv%?a>>I;wHj}DGcoDz^^PwxR!a9Gsf&i6C9UXjANg#cCcqs?GGz{nbpGU z)z{vqwEl}G5H3Nomlc5>8G6=lM)r^q^^AR7Ws^=R%5;i9M76qMnuo}6%GzMEB z0|nm@!FAAw9P)3UBg3{SV5!3vM8LVC#qf0D=whx@%u@u1lrAL4futh7Q!KaWPamjQ z5DI&bs|DL(8iUsi{dwT^VF!C{Hf?z@*3m&dzb}=W2|AHCLN0ngKXUSi!GuJ!U^4dM z)QIuN3sZmkB_=+ESOD`~f-XU_0PRTpobt0NJT$t;$P%As0B0xSsz1cIIh8XNEtOIZ zP>4Gl-4fjrb<`Ve+-Xvx#aaGw0e1ASq~T5t!IF>d_eMf*LT?0bGQI$k!L5HXhMmAY zh>Gg_y1lR+>44-Q-|VJ~QAx%fHn^jHN#jB5L!K3^*n6<^uobt>ym_&SbeWB88bs5R zxG5h&iHzJ5q9Q7=#lMAa6we}(Om&P!5)mj)${M95$1iG2wN8nSNh^I#4VQ@hJtSEg zB2GuenOGU0mAIZXY-(34;Q2jGae@Mr5SLVy@H~|L{S%H!B$63GLT*{1B)49bRC!&s zM9sI9o-;CCa!sKkUq4T<$S*N>ICZ$%)Ps%4jLtOO?0l#_;p@opaO-eo@^}j4Z-MXL zvTowB--?(ICmupO>56w!IcqoNTP`(MH!C}}Qnpy~pu8b0r0ZYi7xfk&SPMH! zzLL|kSwb;C)$b5v^k++eDkC9NIdK{`e`TP$rol1DG4zTNd#P_A1nAUVU?J$%`Rdd2 zG6L-=#8Ks*b(uw+6=VnCFM)XLwT{rx&~2&cEvgq$4~sW%gw*x3mhl?3py2i{rNr7*(Ai@t=duAnP=+~lwV?!XwtDWMHwtn6BWyF>^|eR%|qgr z$itqQpXu+E(rnq{pi8Hx-+bJBty`vtv=+Vkc`dfJ!;#w^uO-{<{cvluW<7rm`|9?J z;qvre#$wk|26X?&xcI~lNzF(u?$t%;d>PztJ z=&J!dK2pR72ySoGEAEB%i_5R|&pnAL!k9y!R=+$(+jZ;rzPWz4|L$$oKLZlM6*&pw z4Yukj3pxoN6k8RG6w3-B42u-w`)k^p5D>b-5<_69errZ~#F-16UfI$n~0HlckgRY~=j7-^hR0p3(lsD5yhtZFVgI?a(1< zB7K6rfqM@>x6@d;w|my_rf*iZ)YsB}Y>3ct*^!DO8}-XrC2i_xTPl3qL|;YkiACbm zb6ZFm0-1ttFSOgsJh`3Sd(JVuHO^Y}TdQxU^k?+1^kseu|2z>~^X0quWmNpuWw50` zU2kCb-uOy}af!YPIP|K%ihs)eD`w!g{eF8{J-N02<8@?)PFLM)ew4~cir2sKxusIO zp|$!+CniTyg-Ff2WumdNQoo|J8P*liEmPqj^6XKul)|x|LJz-61Cu5mVFWGpXam6<~y}#u4q*-TRGhv-s!BgZ> z`N)6dvko0bS;iZHgq~61?J(cFmmhmLecztx7?ovqH@NC@IxBscm(8@##H$48@p-Rp zJ*5%3ackQ{R&CE~&v{?Lcfg`HyH+r8dEL6h&cCg~LU|I($K=kme%J1OM|!e8{w#a` zz>n;ES?%swwM%iaHI=_B1tcpH%JN9O!FjIVOk1s+Ik29j_ve1=v4RzF;xX^!fnwc~bX;Say7zD>RYQ8zN4M(?UB zrz55VoAR2HE+W^<+N+?|mtuqX-ltHHk;lHlAVs=tYoQh2^Ow(WW)74)^lvuH-1$Je zjn^)rOu4iS94GONBV`y7?AR!{O%h&OB`VmQHN*gFc6KU7Bo6jPMeIkuthD^1Uc`V> zHyBYT7-Zo^-^0?l!g6{TTMjW@SX;BNuJ|I}BX}M_0 z%ki23?U;;AfyQP`9(E4@{Mf?4@O$w7v$QjFF(UV{v$c2T^$?)^w*~J%>wk%vDarqB z;$kB}sU@#OE(UZmBj;q|U}B*Z{6tPp&hKPu&Z{CW`CssVb^?@^E-ntd%*^iY?o96N zOh6|KW>x?Iz|6wN%*Mv}j|HQ%r@f1j2cx|+)qkA)e}2TxoK2jp99*n`_T>NaYh(;` zbrGPX{8ym={r!ihnTOSXN3wVRueAP2kojL4W>zK^=KmYa#mfBu0sEKcKd^u2^`CJ3 z|8mBwWaVLI`%~P?&dlEVA8LXC78d@0hxtD=|4sCNAhrG*$;$QrhyD-M|A79B3a_G* zmDxX4`d2LkS^1g&57~d&^E3aeQ2(QF|LMxVrT?@=@Do4t|8BA1r=UTUf37vcFf!sI zY96qr9XcsgZM9}i?9LY|*ne~wr{d>Ao*f}-b_uPNF}X$8OS5QkL#a z1<5|p`}6w~uZYoz^X+aC#Qe|ZE^5M{>k-=jNFQ^C7w-Rh=j`M!Lbf$p^eXXy zf)SFl3@L{1Ky2O3Nl3=m>t6)rOyMan$QwmIU4K~-@;{z^G?>u+425Fby=HFwH4S$Z z^y|1BQS7gx2YOp%(!(*K`06W(XeX{lw`AKaME^!S=X>XG z*tdSiSe|=#WxBul#Q_^1Z%6T*zxg|R{u*^F`G>fl!He`O<+qcGB`!Q|C>JM7bZvy( zcBt++vIaT@mwR7X4*9+`qff)c2xoaT{9}LuUjI=0LS(Q$q8?~(yyf>hobz=S-$20M zY51jfMM`m!;P(@}8yG0a&l{ibhVA6K!PQmlfu#R~xW)$YaeiZj@~p96e_TtR`WFo; z|Aq@x2Wz^&U}Sq9VEnane|*LV;uHF*$sco_H|3}{ORbt+uVW1l-!byRWL*%YDfS8e zdfymML*ul9)~RSy2;Cr5 zY{|e66%xAplY!ZVDRZ)MLV^~gTwrmr-)#B^DdUD(@p&c?W90&%U9sutL{YV z$u^N!e;l#GiMx{Ph+&R;(vk0F zo0h#{(!LFfvO;YJe$`zO^5?U6FXXW`FRzm?`R{-XzVZC7*I!0AGJ^bqEq_ote1z9q zK)B_V$bA;sk9V&d{=N;sECiE7Fa(VVmS<6oH0;xfIxGbPnMWPUq;vs2E2 z{b?_gDm;A>-&dx%#v97F^yk^N79?#>q|`eLfZ?KmL^Sh0&YOi_{}U0R(;g;~LH8Vp zAXCrR;jELG07Ye7?78EBVG-_jS=B(v5`k9;J{HInGyo#w+nDafcul75EUmlRdw%F8 zEqFn{8b(lAc8{^%Ny5h8uGE+_#^GnaEJc}or)_Fab?!j^V){Tj*%CdDS2|w7>EYF1 zfdNrDbep zo$xwRcCZ50332$PFYiw6nC;XzW+13vGnVI80IWX!Je^WW+M1sGY6(`Y2s+0l;6sdtLni=$!4D-X#1Vlm{aaWjww3+P6! z4rmX+Pz4pXCDn*BR_~n|Q*9O}7k&U`Nmvh4{!UaPeCl0T(}CZd^8T&cB7B zA5b2MW9}SvL_GDiJ{VD03Gx$5{(5Q*)mmEz=tOG{f=$e-H=waL*5L*%3$IteQ`XBb zLiOroLgi~+ktCncZ8SSu19KOlbE~9b<17)GJpSLZSFd01)hGf`W&yLumE#daI+ZDL zXfL&-5-P)4OUK?G6KooTE3;#Z+_pQvRf`Z28Jetr*E|Z<(~;i#pa!xwLV_!3Z4M6o z>wt}5;iezgJq0kA)>1&;QovP>I=Y_s6H<^J+!BvDdc*QFlFn)Wn=W&3+>OxY9&Lqy z&4lyLgq37S*8aSAgY@FzO!Ydk+iPPwB9Avg2*Rzwh6je}D<@e|8Ds&M?X=h6&AjRg zQncEyR`BBg{?N5m6$`sa|2s|0=L^X1CP&DyelRsj(`L8#VH7z%Xa9UfXtU$-mrmow zmos{I?oEPf};%V$*r;$owrr`=#5D2PLUFtWH!zD>iw%8*XJ5Nzv)^0MxFd2 zZ;wpVOfSjh&KzW48}%-ygZ=C`WUdjSNpR5=zASs(fM{0d`{POW`?bN$ir;-cp{?RP zl3!~djh4+?!vWx!>}G_A#1g{B8GxJJ;+&nBT=j&S3aAro!!V98^wit>)8TmC%g-(y z&+0L7S&8SKrS(c56i{{aOWhv7jkfea_czc7dj1T5z1`;TuD6kWbn9-nOYdWWdW~_S zWOz6*|RSX7}T8wQZwrKM@`alNCk1BOZaj3pfUhZ$~ zVn#8^x8}?K`Pk>JCjY}(Kliw1DxGCXT(B@F?Z(@U|Fgl{-A9yxzdK*_R1;lAqMRc-6rXL=&O~_ed z1=K4DTg9SU%LrnlI|>#fF|r-0EYa@lb)&=kE-um+YJB=TD6#!0P3DkfemuwMV?L!z zccS%WMGsYOBGdDVIy8b8*=muXz%)bh)j>pN=l4PXR&jTH=mr|jV;2VWMtftOV^2=h zVh2vjLSe?*W+4xd=b(3bBVT#7)+4%9FT(1+iOjbKU3*Cb`6Ld8(datG5|OsUnf5?d zdw>dvR`zSfPTfn2KmGTMq6~y^wt7ToiuSy{_P~`Wv_N$(8tgxL;dw4=1-JfiP`BS( zM`2UFxm@J{&uzlq zgV9Q?KRkTWVGUUzQr^dzLJY8bSH>?*iO3A$$HDsEiRY+@8`eHYlQ727zliiI9@fk! zf*h*BO>6sJPXc7#=Q8hwxz>adNCbWEJt<<5f|$;lM`Jj6H&i*@AA+2HvB4IO-#d~n zXfbN8B+cF$ zl|0;Mn%*dX-PxNbigj1nfP_kr8qVW(StNZY(s%w%WIT10MzWi(YI&wJ*cJs6Os(e9 zZG&)`S1Ik8+|)h94QrVYR5CJ8f(`>WLp}Ecn#STP&+ipj$UR*;Yna`*Y&7@~Je~E) ze5+C>ve(;pya7Z@$(&W-b7i-3s;cU9<7Tt3sCauV73GOgixK$LOs4f5cV!wG0?2+K z2dBlds1(3u?FZ+IZkxG4NCP<#TB+?L^meHC)704WwP?99odp(tZ~s>7f;hjc zz2W$wu|oemU|S%v#ST6LasOtda3f(noPC ztcR^?o)-CYJBPJn*)SQ#w*3*)MD(~oNffuJd1|G#FY5!=nrlCp%v9~QoIVp9(HNLR zOyBEVk};_->VG??31H{G?(em5k&wD|Vn`mtdI?NJQaXbXpI7(tcCBjOjYG^PFA$0R zPHD;(<;1W$Y{cevS+}jqN=6lN9(so&bjeXcv}{hT$EnP$oxi2(>Q_54;)Pg1UGAAW zb`fW2)hy(Qp0cI+}xYCL6lGZzC05q?dvxcK-=RA#AI&2o2bvP+Sop? z)`2w#@Ot`^V+kE~6;084NF)s8ldt<0j}UV_$4Z{$*Zpv6MC1}4W{fsvG_I$c->1jU z>C$=hk=`o%4$WZlZJPY~q={}Y?Q6J6AnfIY?{*r-pzrXHwxI|BkIM_C^zb|)kM0R> z2Vgp7zmh3>oal%48k!Teob@mDxwLTvTa~(jz}UfSsY1*{8mvdbF;_v+hu{y6Lfk$H z(0zR?U7BcOA@4Hs?e4~X_k7hIW6@0u<bATF=K( zRTqSBZOr04qokt^f=q)?fkK^E6lBIxgg*fCVxW9C=lFH|cmo0acY)4?Uqt~$GlbZX+r}##PcgI&T?g-mO~Y=We8-l7F!##fiaIZ(g9*DsyH{z|RNAS5{~)7WVRlt;p@A zL|~M?xWve#S1`Zs&5OC7ogsd6b!;daZz-EJbc9ovBYWG${$~u?#k-o`O_5vDGKydV z^lB>Ft8Rxb+B6_iH~T3W*$Q&pa06D6YAn^HqQ*RJeQ|id9qBU2Rg>*T*u9Of>Cp1N z8xMXrU+JB>43SFy)+Gk;B_ZSHFFg zh5p`2WaoBe_*>_Ay6tMKDM$AkO#2xB+f zC#OOfqr`l;G3K+eXt!elT)lA-*kmh_M+Q+LOS0IVf)-gDOocj1zQsBEOa1|AlR1|< zWIszfSVio!iLFrZWJ>c$JEmC<{DMLm4e-8D<=!wZj7^_Ouj1`75Rq=ml}uk*W--F3 z%1V)$>c-x%ej&f)F29j@aj6P}A09p^*{Nd;slE6KE3s>~7ZV_wl;5N{a=ACel(#TQ zWeNs3V*OrrVsZq?`C7xUfo>OmhH@7dZVlJl?S z3|L64Asv86q)xuEYvfOyFIr@dkK?dUZI&Ac>%D z@o`x~(pjp1cSr05@xVo=n|9GlxD48>L5r-IU93$tQNIy_=?`v9_56@2u$ytdVi&^d z$(u1I$eAF8V-C2Tvn}7ZXqfmxxIc>YGHgwR>r>>BLNe<<823@5O^B{BaV^+VHNuc3 z8pv37VT5yIqLro60Jc4e=wyg_Xy0GACA&uaOFI;(ry9m#2%)t@xP^9r~8hPq$o`nxW0bvY5# z+&mX{XTJEFkJ@efN_2v)7GOdTL$aSSIyvDPk8?l-qIryWR!GsG?UwRfX=+nb94`BFwf6f^K?#_E@>3yO#tWn9p2;Z z#8$4&cqn5^f*02Uu-7uy+cS#~o`=`9Kgj+f1)@U-RGbTA!`*)W?Ox&TOX<9$0xda| z1CKIoz4jZz@kfpd6w!l`lek%RB{WfH+VZ%N(Naf4j`(P)E@;Zwq9%U}a@WMa-0}_b z;e})E0%ehCgkTFl0ZT?RX>N7NMDEd$<2K%G&TiM!QFTdvKK}4o5I>MvNWa~Qj2!d9 zspBrX?l-)6gYYU#vbtcjaX;Snwl2Jmt;&!Hkn6j^K4+nrEA!ZXmr(5LRZoQDs}Sq5 zjHgZo#ze+tuGgEH*3>TsU#*9X(A{J9ajQ79LYIt>B9I>UZ~!*aWb0XZ@tx_CTQI~vy-ZN~}2~9Y6^fPV=|9X7aBWKCce(#g&JcUgoxMPSY z>cyJ%u)pfp{)ewZFPKdkGvj0#$?HwMRr(}RWzDIblTOS>h0|ZnT+2wgDzHQQtrQE- znd4gj$o)wU@VpKyiw#`KY86BcQJ!ZStW=B;J7qc4BFYZIuQI`o;cqA_=EkM|MwjBf zYpk}uRG&{B!#Ku~!_0kMz);oi;b}mA%_E3E@vHU;c9PihoMDmnNAGnv?Jj1I%1ZBz z*+yX&uyy0}P&m}IznQDC(}%G(5UlIE$_=~0*~nT=y0*9$hSMchj_wle$lB2^+z0zw zDj9{qYW$_?1hG5C$)2C!*ZyxEr(PQ$za#J!VVe{;iU^3ypgW~s_JHiw_(hf9_N=f# z!0sn|QISzFyOEm&b_C=(Moyvm>e5?3L4{w>^s0sslRcdUnFV$L!ZhMgd2%3Ifa`-f zxIAh_o>_EwJMQhASF&TbMn@qsF!42O)EtSe^x6E-hoE1RlnpS)T_i~Tr41EP!JD&$ zd0(o&XpzvVx@gu^OTY3E>W=bzXIx^VF+7Xo@48xlLCH?_XHT|EZjor(+A5H$coN7o zrVxlZ0?`J#!_BPTfj>{Zb~ZdKEb>nK}mxwKuKd|fG z3TJ=*4k~izd8b){1QkejuDxMXkeMQWT1oH02zzxyOb^?XC5y!;da<7qt21wC1E|H) zlD?RpmqX_cEF$5pM)xla{d%*!kg9B&&ph+#ZGZwIGM*xFV6oo}&)%yX9; zyvt3csI1yKk&>TUK_92#pUN`PM}RxZ4Nz^#y_hj zzsv-WOOd622a^lxcer)oociE^u5*trb-lNJQ^`9pTsaXcPi8Ytf}cg0*x&B6-%tEs zMnxV1483-gxj$(E27=Bg6-CB3ZK2ii9@;!g^m*;gA@c@MdBfIHuXdRs;kJaZf%)k_ zKTMBgpB$*S{-oi9t`dKK`aN!S+a9krmZzv+KC42L)hM}QE^83ogYg_mYj%gzg4b|n zadS2^)0+CZkM^^Ee@MwKJ?@RWPcg*RU?{GVK4Gs&mw?LAvNJ{m|COV^u%hJyO8Y)D zAT(z<{|u427Ho33r<~z;IOVpJsd1`Ysh3qAkL+A<&Ig-m_$H^R+%sJgUq|jiphaA_ zbEMs65!!kF3j0mp6KfFq4Y*C=kvJYmeS?L@VY(9k=P;5>1$He1axs<8frw3fJ6#%n zT{YnZZL5SQFFYhLYL>pf^O2+TAyKQ$_DR

    I7&@Q(o`(c-WT?Vhh+Uvzp&*eBS=L zm}zu&7}sE6_l*gn4TfWsI$)#>8WvyRUs1haVjvg2`lZCR;G=&KU@bki&Vrz>*y?;H z^9&o&@OD7ilhYO~o^TJIf6_;|nF8pXv1n#KOfuVzCR*qjoIg|q#jzJud}+It z5$rhb;O!1*T@y)F>`#*)8|6auFGr&I-1d`Mu~v~gkXvKe=Z~CCR<{yIS5|dRKoR4~ zey~4>G>R~o%~ODw)1>TzSby!~=Z=QhSXbMP)&Vm9=_Myk(mqx}OS&J}qk^$tf`e}< zB-op-=V$f*(w0U0&0ISu#Gt8ddidIK&iA6pwj70JhW+?}pkY&aJq@P(#MYRJH)&us zz&^IE9J~4;l8e{1ol4|-hh@mRtEn+iAoOW{9FJs-hIw>{N%;9ogHxf{@#*M_eJ@affhg(rCDt_*t|wqgT6-bnOXvGZzuwOJuXf@c!gY^eDTzWDl? zU^iyN5IQWy7+HY`qo~;Fk?kuS7NzRrY_hEuV$Pq9@*3ZbRaeCCEPe7AchmTCyt~H^ zYu22=zJr>D!DDDgg{^3|{jv8k9(bmIDMD}pmO^!3MhlyY%Jy3nK0emcpKuSn5dklJ zFnKeL0zD4*OJ=zVE(OjKRj<0hM=d=I4E9|gxLaONreS1}I7P)Ta`vCD-Xn@jyQl>o z$cG%g7oPN6My)*!OC7R&!@feup)fyjaQvaE5eLvO(-k{(jdiFWXD95T%*k?|qDTa6 z|ExgYq~8ee^6_<*zlsEQe%;3uBdcN(|Hz(A43OL(V<0|!@mY_yLl(*HRgIsE40u9l z3{y@WCROapjIzg{@omvRAyh(|>~XDhqP2jj9+xWiC#0GHT%J;u>3RWJ9Yb$}y=Jqn z!1>lf+D%x{x(x54}kWB@dNoUu*qx35(c5^6-pKwaP|d0kVI z?@cyiF=cRuZCF&Yf^WHzXg0#$Qkomxz6OJ^C}X+j5TARKbc&uaf>*Yl&Oa`mp=)=> zuAFFJ`mmh24Q2_82R;-u0CUod_1mG7>E;X=Nk+EzcTg8AclO!zD{xH4!5nN8`0JMZ zx=&f*!>8Z=HV?L~qxJ}*D8?0yqXwy-rUkm-Jp7P=}0wOz_N(IC9>>*T*3Ulh}0+SP!o>sloG}T;El83qP zx0uL|eK20{fguHFoF<)`R%KsIYMDtZ-Yf^5={SZnv0E#bV)kBG%qyNK=K7g}Hc~VV z;;ez&Ty<(NVKZ=m@1{Cs+UeNO`axcbl7^w4(~ED^AuS?JFWULUHtkkn#KC86ts?w- z=a2G^18uh=JIiyLz{A$UeZ9_rS#CqD6~esq=y4mVN`s0%TWi^OOK{wNs%~Qb;kCz8 zZZQB&hrk$Wo<+f1wI|Z)_~&n#(15i*BNgfT!6nU!H3UC-e`n+g zFWmw^r8Vjh)yH@e>cka?#ovTiT7J({2Lr}o>GZuUemjg7kB8iOUFjAXM>_jI+q7?< zCUgU?xqVzyFznKZdfCrCmypIhgEGF#8kGIkaY2iF+hSYyB*^MHu4yStS@-1KD?#=G zyi0bqAk6u=PNnzy59yISnfJ6#ZA`Z7+6!{QI-F+&hpSf{;$jS4-o-INA=wPm2`f#8 zR*OU@f(1lPhR2HY%K<|e>0HBS`%g}V6i3SiRETxc83Zc+_cIM=uZSXJ9X8@gKb)>v zr{$KL9$gvs_xLf7Tx1C&W;^eMfV zdDA+{zA;KiD4zA;h-VS^kJA892mbzVRB_ZYb@lfdeND2}%}k)X0QYzzeQd;&Rd967 zmXzTp8VG|_H+Xewrb{m!_DtzO>+}jv?oGC!`yVA~ySR{Do>> z!evD-_#+3d7~!Y!;jr)16$)h(8;L}z2@!T}a>ZazC`8t_wyjljAx6_+3>M>VlAHDQ zil%LR;*z=Qx|rQbOf{O)y_w+~I8xac3*6M}utqxt)nwyxm`w{Q1a?H1d}JWt;$_U23g?<9 zohg}LcKi?w#`Y%7$Q@-5H%k6C)O7D({5?l#@q*^?r8PqxFF&)Rq`Za;=MP33l+JuH zjOYJ;8)Bf}F{%LJ$tV@f@ND-qZY%zHmd>Px)#gKSfjtXK-@ zZBd_Iw_6)Mrw&FH-_(VV$MfUR4kQ>{=pzv#&aM0V6BVBBrxx)hdmFi)v``{|JjsF8 zQRe;8JzqFi#sjZxW;~`OtXB_jHMxwqX-DXiRJLz9e{3XA;{<;YjOnF907nZ~cL=s0 zEo*~g>nZI&8l46nw0|EaW?s?3`hHlK^d>LN2Et}(NcrQmzj;)i4gco%-903q(HG7@ z;7^f=0WG|#pWK>{ppTiWp57Ys&N1J>$llZInu1wdJeMORl9}iAwAa6$80H{j`8>Fo zv-o=)#yZsO^$L@UGr5B6nVww|(xDsPI~r92*4X~B!@5>iL_J zTOw9$zrKD7#===&)v(d_cP<;gX^N~S`$i~?POA9xw(6W0$fL<-w{{SbPo*;_RKKV4 zqLU3GJ2AX4%0f@n#Yo+-_v;4DCiQu&SsdRL0Sm^dS%$dP_MO@8i0 z6fQ!QH%sSFN>WK@E0=F`h-Ao}UA`6WTRBR;p+%6}btKg0WwY?>GJ?A1d;xX*Y>P<% zJO53wf~tL|QmJ_}{UV_p0&jb58re`WYJbD$0>HskHYPCfPbX)3CeupFm2&_hvFcY$ za6;)tlgU`smjcBB%SX3UH&dyiX?ohmXdP_R34xQ{j+Db!O?P5Y(LQBd|7JLbCQ}1Z zl)3;b{3ww=t#r2MF=8NhD!(HBq8^B@zqx(+lQm*bnL#CIab=6y@+0j2-ZHWFb7>3G zSK(l8r35CUfo^G%qFh}carg1BEZ0A)^%LyIUP6OO3<0EZ%5Tf0vnDd8F zb`y0?zORytqup*PTgPkAKQDXOF(ebCKc2VoY+CBz89tu5Vlx`7p>%s5XH~J*<15F* z>OW_rI~;DjF*UvMrpNHxWG74v`AU6BnkLr9yRCDa;-QyMa2L?3U{h#fh{}bO%KAU+ zEN-qWiddLHdubkrud}AQ;22|D{0{)LTYk;rcBI~%0p~XXnrMMqflNZPJ(Hp{M!bSI zUkqTl@IdFOwXWR)!>hEzKgezCj8Q;Oo^dkAuba0@{inbhwf^FVrkU7QIj=*hyyxM`K+Ly_9p z)s2(CA0qArhUYYX^Zv*xAcLhnMueBtwzd^EM9M`BkwO&CrDgCBf{KCbd;gq z#c4b>Es*Yrgc$8_2~+u}(eSasjl2&a?lY`udb|i5#@RRrsLKreNm89ydX(6FTj1dz zH`UY?-oQ(F&?-7DAk*WRDn*Z%M$Wtayg=4qCi<__FNgZ8s_s6!yzavfQa<#`QOD5L zQsQX0hK~R&J0ZY|I*J^WzM{3$$A>j;2csC3E-<>=@YrMGWN|rIEiKdWA>%rM(rYGW zoSz0xtoim7wH62u{uCJHs`pNjFvy1mRN&ZFWQIX40&_1dIZX%nWQ}923GVklWVjO$ zP$~MMkzh`5=2|@K6KOL~jhr}QBbo%sfMhw`skFL|$huHs5v62>uwt%vZhS6j)Q)LG z6l3{vw`mdW6*9D3beloJ)di(+1Zpn*P_kH$^g<-1Gs7Q3QXYLVT zDZxF-?IbwzO$V+$1|w z2#(H5dd9223;;e3XlXYus>($5K*3`%I?|B7)L>-FO6Jl>x&gi?a@6V!MLSQm&byMF zi57)9^L5?E=~xQ`Ih*>{9I)@5c@yyEkyiW#i7x#l?DOf3lHVdI9NyG*-Psi>Ef$}x;y$=Xzls|Xh}o81!tSVlhV!Z7akjP+ve(v6k`$cqvRk| zMF-)KY58n0dIvN7s$QWXCR%Xld^ig30b!TA1J4m;aKt~B<`kI>te$CU`OduZoP{9@ z?%0}P)7<*B>cwj>qskRJDgWiJ)Z*0FM?=zc$2#33utmV|5St6e6l{}lrP_6u)XPmY z8pYw)2kuP@E~aM4x}M8Zj?H)B_dF69Px3HBeT{L|jJ&1Ph_e8$9_KUI3l2GDnHRN~ z+0wh&7uat!hy4+Q+W6e}{~3R{kx@PiQwL>yEAlUIIFn(i$MyyO#m^Yq>1k^0SGjlk z0P190)L-X72~D-d1$DY~{&=Xy;mgJY!LPw$woo!jVbK3DO4Ia@_WqgR)HwB%|5}n> zC^#Ga4JWQf@cn2qC}Ig%{*b&AS%=J*{lMn!{7o4x@KrpprYa ziHsm8&Y~>Ehmq!WD56UHz_q*6j6Qt6GsDP|s_a~SzvE+~_(qK&0v!5Im0v(?xxdg{ zhm`(?f_PkfaAcv~Wd0bqQs~`>9kS&U0nD<{mnkJ>H*=*eOD90&(R+pDJHMF9M24Nk1b65PE`ir#{E zD^H)n8Bm;eA9Sg<(_p?6dHD$7>7RYbGlThxu2nA(ztt_|y*WYNw?RAkZ=rnCKy^`$ z$F77VYd0gWOM_}A)`K;1nx(OW@PlNGP2Y<^x_ZTGn@nlO1|7PAZ)ydcQ;s%4PbYU! z8Tr`}A%I2Us~6a(c{#Dly)x$3<-n)9GT{}BChB_81y#ftR3BU|^TppsAgG!=njW@v z>UShq*G{eU8^QjQ9AuV3vLEcx&TLKM@#}W{LF3ZjZ_ofc`Rnc`LB8{1Qy+#Z$#i`S zR82Z^vYnbsXHd!LV0KvRN1LQB$sTpP{oGNJ5UA-j4O65bH{3t`heD--DR$CEVoH;4 zSvSn6n`E1pWi-E||8Z&R*ut(})4&nhobsdyE zq!6?At{60ebBhk$io2BAocIdC1S~N?Q9Us0KqUGQ!k*$oj_!NN(>7uAj`_v-nal$^ zq_yft73$|2QmOoKpr7+ocFm(99LvlvFG&CjsyoK!gADxkQE#Gpo^cr~zsxzw)?$?v zd>PC-t4E>Eerfcbl@cspJQs;D(j^cj3RKK0818J&Rs?g`3f#M##D#r$OEnmP6lLpK zAOj5g#LGR#m*vt*NS=1kZ0shud1z_P(=c1pTM0!poM}v+kN01QyaT#2balJ6Y|d_m zTEe#wI}M*5HXy65a8col_x`~foxjLPAGn7}GLn#Se(B?9N?}{C7`pU*;!ZT}C?Yz>8if6fQF!p2Az_#fOT{M80H|xrnUZ*z9 z!B_97f=9H|Mhy80tPm)OwAmtr-7anCCXBvFDu#hX&Of|f3b|gouq=G!`#Ie+1*@3C zusbtfbWh#Q?RD9v2ozjh?LG_AN8cG+^`ZPk_Ck6RHR&La#}PdrWnLStL8NK(DIAtw z!t549Fz))C_;RNzj$=~C_x5+@qu{HgHqU_?+1gUWfzMYy=pG>r= z);$Wb?}^H?pKw*^KA05N$Mc2i7VQH9KzFmhpP&~h*(lJT*fi%kqyc1sjdnO*#9+qr zxsFP4-rCKi1tMcH+j2FZihNWI%=jaX$u6}Mb+be<;c8beCb-V)QaNH#6UnfoI>k*K zaFFlQ^R%l-yUTG=`s?diQ|EnokCbz)J3D-brp;5%8gV8DMO2aA(b92({8h|9b9`x> z5%ct}kD92`L}k8QeA+SCHiH&rbm0<<$30t)Ps3&OKfO6UOawSOl)U^-)~MB*KsU8+DRxd zAU2$n2J;@rF6a-EALI3PG#|NHzsZaJDPD2&aVYQwo#)yW&u-XEIxMG=3*r#Jm`_w2 z-gv$B6+XRop2$w~wkU0Q=cD@uV)Y6_D@g?a=_$MFGk3~K0{!Mek`<%Sfr%Vik94-* zOQySVQ?*S-^UgT(Q47Jh1@H z7K@;W#3RX$0(?AAczHz3^?h`jd4utFyNk+i4jOv4%sHP7WuU3~{j7T!3ft)7N5^Xg z37u-{ymY)_mMMhJ0G^^jszWoZ5G_}XOcowq{Y^&cyRRa6%(+EFn5%OTjJuU^w`9lc zPiTOsc-ZF%`AOSpG#$d|y7^WHh5O8x4F>-XPw^+``mnj1kJT)p-l#Px*uDaVdHv&2 zo{qL>9mF!<5m(xlm2?RR6f@D4DzFbIz_a5fEMB^uO+ZRF03il6BX|5w`wTA%8Cpn+ z#2od}pCmo|ror*Yq0hFev@G#-eA7gfPHO-N4m$s0R7Ur!NUGueQoT%Z6jG*8_KM%n z)<)@&O17BN;y_BEK!S1l=;Q22ch=yUgNlFnWS+OA@2 z2+e!nz8g4`(I@4|$J)3xp8+}dHJd)*O&-eN9!wkPkAy;1vpV!j+_CeUXMavyD~2Zt zyjQ6D+M`#)EIaod^jc9sFO^$aQ!6`5kUdXyqdXprgWk~V|NdTc)oa{>xOsF`M8mNr zuju#v__fBmrv>M zrm3>8p36`Ah>PhnVeR{rATByRgZwBCd z-j*kQFl{I2#y1lhz!yLD`c}ReEbf zNjO5>LlATBvAMO+>}_MBPkfz6iIsDThu&BLV$S~Zu_^`GIfP;8%7};uyee;<((N!; z^Vj)kZuDPN+8sB>hrNr|JMnC-CBM0@F>loW_1C|@doMu=4(kRd<^dwj#qV){YD!vb z2Kvi`@oB$2x(5wwHG=J~v4uzHa^p3t{*-6&xIWKranTo>yKJJYu4nH3@^?rj|QZ=A|(jat=a(W6ny?*5HhBJNtAfo(KkN`3fwVH>fCE1TkO_Hv zqGRV&JZo}VE^m82%#-&5@&kJBC#`X9VBh>#6Z8e>AFVm(8Q&qF_>9nRj_+Cod)Qg-=s2Nw{Be^UN(o;CZc4q0)4-D!^am_V6k z-ih>+bY&kc0moJMjdVkSWsTjM<0YO~Gj-n^q14@h%FI zmzPt&CGDBB559gYta)OuD6OOV^S1vkHl6Fn~OoGV*v>Hj^-GGv=^&}9A`&t?0FF?Ut9S^YhT%NsEx_amy_*k zK`agSvW7iozTN3DTtx0-!Rarjs_5eS4Dg`M?m#19|Y=5IPD!9ZK5PrDC=pp_q`W=sghS{Jp9q4lEC1`_D%gJYiE2%zg3 z%cJ_wCPKzbLVU#a0xupE=E+?7ZI;$xJOKg$7a8-!nt3arZS#{mjVrS@+$_-(Q=F`V zfvxSIfkVEcNJJjvH#g^N>f&|75DdTNos2&DM4}v?9Tgx~H+1IaVtjiaXbE+ZU3@0e z0z&@gBgZq&3yE$$EtnZVb{FZQp=S<+$e{D269Lq9%gHMAa)qe4%6RVh@6Ic_^A62x zi$`%vH*SjBYX$W>_dv@3OiJS+PyCnX&$Ztlz7?CQv*NXc@E8cgUOs|zCK|&;r+^bN zc<0D{FWdTU{P_UQJPURCJ~Vujwt(eJC>}}9tr#6z2ZQn6pdFcUxV-cxS`{VJ%j5Ls zyVxEtF9+|r!>5wz34#Vc#gc{y8!40DCzc-E_67!C&GccX42~L;T1@1ub4TDqd;lZC zf7zjHZWxy#UHBXq_t249Oqvm{dZDwHj%zQ50x`1P2g%M+xLK)}>{0EpMRGjVt8ysew8Y6}tMZg}%~+u5EPcMMojbGA}#b;W2dGDyLtZC(`$34&BzP zIr9<+d+!_&xH-=s+RODP7wGIUy!JWw78HH-p{QjCX1UO3F_!-MA-=NEL{WEZm65WC zr=z$5?QYuWEWEDAQ#_kt3@iEk(4g6eZVq_c+qz4@x#K}jcGFH=1$kzlj<%a0{4rs& z;*+!XI0vH(`TSp=o?W$+xmu#@-vF-0x@M6NeuufE~7x4iuLvQLRG}70ruv+c&Ej=VSWvT?=Fb+13Hqa>~V4 z(Wecqu{xYT_=0R~a<(tuoFG^~e|+x)Gi|LZ@Ij!wGtUB+G-$@;cw9d;C?>%#2D55j zJBY<~DW3WG+)OQCt6yAl2VWeu+ync^kY}g4`NWCL;fX(AmPu{uYWwB!U{Wt<${NueZh8~g`$pp9H{25j{AT{ z1rPZ+WtQH#OWEqDJJS=>zvvknZRe@kFL{t#(Z-?(#VZcO%#&G)k;;VB0tN@*KGO$m^@D9uCx_QKySoTghGu z$n&W`7Jx0n*_G}^IaGnm2Rv8i4A{{HXdsdDzEvQ>r}Ee^N8*Y5E{yff2{iuZD5oL# z|D+J_MIZPi)MiztJQ(~+Q?X@dqJCU6HTcwriHNd5v7bfs^a(!4mB>dv4PAwA{gf-^ z>07?Lzy-%U0_IN)ldm~9_jxLjoaKUGQ5QQ3=KAR>L_y9c6+(oje;XTOI*eV|eTWhU z`ECx2?{caD06+jqL_t)G5*a)+->$|xoU`VQHz`h^d8}Two42+c@>|7VDJ1`F_>al@ zH8mO1yQ#1ChTVKJ0nH;c%v&!?G*|YH_ukCO3m}R&qShhCgHNMD{$Xzgvx@8^vkKC9zQ ziESivT?NLc>w0TI)6~i=KJ8q5owQV9*2TRGFmGtjZ^F+& zi)%bcp@53M*yH8y!A9^xfdAfI{fhr=sAZjqKy76uC@{mG{n7VN^g0(PU(SF$4zSc#E2`^W zW9GiF;rYA8&*2m zQQvHJ%v@`&mL7~^YF@R8pw1MtD&87^F4^^}sxkM*etuU^s2F2UZK>Z0K~?*nM^%RB zlm5F<)wj((9S6o{+un_;D*Bn*Cj)--CY@)0f7f}2?Xmf*N8kC!_jC}zwqg@~eMbIh zH#J)1z4&%dQAYvUJrjlmJKrEK8!6+ff-tLBeb-+xJqJ|@>51B5M;q1!NFE~EpFO%S z=5jYblq)uqliIY_AJ4vXB+kc2Vq%Dk)QCPV5aFUL84^m{>a2mq)EZaNyz=ZEr5^ms z$~c-sQ)eF}pJL`C^9!7{A5ARj$V{Go{#kj-P=nsA6kiSX;QQ?)xq*_K?1)uh`cmiO z!vw~kA4Q)}lh?eidQE00KT)@G#O(?FGSyZ-Q_N)|`o;rUWjeywhhdAGedt0y!E_jV zeWZ^~hxJ4KJ6>MRwKV^krutVW8uJwB`2k<8*MEF6`Ub`~efHTtr>b(a80p#%o_+sR zfVkG5rgbj9&JbYoe|~|$eafdbX|l9IA4$=}sL-@|IzUyl@5lxR6hiQJrmTGKjm0w%yUNSxyTjz(X0aaWJh~?KII0))qu6ECB zWt~%Sft%fc;D@h$HTmsFaC*OC{5%mq9b$3H5G@ljdg?b%qgz08L41#7m=oi4@H_Bw zVf|~}o4tjASmy-g^Z}n+4=+C2u_hm8PF?QH`FznU{W6MwSTnkk8`@*8x=ATskK>h> z*^W=$N8+`crwqh#&#~0Bup4gRFxFocnme)m`@zn+aR?NezN@MJkG>(RynBfItaR%n zd#XTjzm3jIm(Ps17sf`!8%xM&#AoYq*B0BG-B?E zM!Me+^iPI-b-U>;*RDgh&YVl1gjZ_}qI!<-pQ!rJ{De0cf1|)0I-%Y;^icw6)b}Va z0?Gwzf9Dwibd>kX#J;)Ech$W4{p%O+yBPUwer$bDgT0^N6%(@N1_a@;$qRvQ?slUY z=+bE_lqt4b=imN$`=!i6%DxN58^AyKhWt6K=zBQ|DRQ;!d9mnrF2!gWx z=?&uXZ~XUdc))&vEr#C?-X5g>ZiZ{?%~$&bPFF`ahkf(6h__eQ%|F94c1|P6zP(K> zuMf;ym;ZLy?2zn#!237WJ+`kI4=7-`=k;xF=G(f94~Q?jChlUp-v3i(&)n{h|(&S(CQg2M-w5ziuIrg>*+%9n@E|g!r_Op z*iPk4$q1nKtgfGq(sbOg5i2j{hzWu016LyZwKf4_UR&=(?GYZu4@1yt`{*Q4jF|$`(BvzlrYB#*xrgw071Q#R7Rs+)i zh;_9haY-uH?Gl?W`^bA_ldkoaJ$X81J{6HSCmC?ao`a_x_4muCE|&`#`TH@Xn)7r} z7S-vfaUO^L>bbc?tT#(;a;D21d+4LbYwI>^^|nT|2m15PT&?N=)&AwuZjRZ}KlF~r zki%MF{hRmJm_kaG6jYX}c0+1N9p|Y$TASJz-ne6=k;+v+DTI#WJuRRq_jU7wD%h$a22ei}>C zPjz@|>-N(y7=CI~Fb=#R7-t+02{BkwC{z^d~@I1MLIIh%U$$V|0)ttKEd9a&@z2KC|NwnAkemN z{(XA!seJ=O-<+%l5IypH<22^vjI}&zFnwR>8xQTi*#YfpaE?8`z&x!@WIsO7qLlKe zOWte{quK#(bEO5a-XF;H3WzsCQacdZyjl1o?bZ5Xjx=b#Iez_{$ztwYKmDT`$=c)5 zC4z|)PhoU^@rEQ#=(UHFj6%&9EjGIL@j?)Ybj1>mp|83Tva;yfx7B_=(v3F38wD{v z<@s=IduCwCK*+u}$^)!dL)E2hE-JS!XTvu~6ayOWP4KM+S|?YZ_9h&@a~1)g;SSGP zL{GmfAT@b5f%Wi{z^gB>2gFWW$It_-{ouu*T@LB;6~%5EyqLxjX(i5MU2SZ>-+3i` zb#zBIKE1V}e`3#XTK81I%ensgCe>Cw=Rkyqqu&3oew9?<4oQo6fPp?`X{K)QQcJ{SJ0ae`RBtRwTSp<6J(I@*g>G2Ou$zKIXWXPrGbDU zt_RJlkIkfCPM}WA6*=r?$%{o-c)D6Xdzo{Mtm|Vm^n^Af+HPKHM&BSdANjuO++0jW zH4h`4o2xJ5Yc8Akfj0;4C)ib1rP73qmOLJYysiN7(|Y+q5PjHb9qJcPZMugRha(HK z3zvt+Fu>cx6~Rnp8htbJz8ThZ>w`{tAfs$0a=q8ZP$pXta;7+_B@OIsKy!d%Be9daldh$(c zwt2Y>Xk4ahwD@uP-JSs{GdR(oV`QA%%};X!gtZpVY&vKU&_6Yt|Hmic?r-5>yfV^m zgjb8YvYSvk=#3idSZ;6X+snKjn4Ri_^;-3YqfOZw%ZG(gT?~ zhMY-+=Z<9%%K7bL=grzP??L7FuID5)hu*|<9h6;(?hQ;2dGY+H500&^)`JEOowd-7 zyfo)ZH+MdDwfsoab~Q)qv0NLieMb0dzds2*k*Y#_Gc&XOP|H)i*pRP1u>rm#a{a{(Se$uu1Rq^ z=Rh(pZ;;D3Kh+zm-OzTv5a^#`>I={9@6NY|A-T@gr=P#tgi##y38lZ$t~j3YP*7iN zbU5F zzkhSX@&7GnUQwG**M@6JALb_wKIv!g4Vd5X&Tr`oXsZLYq??~tOXS4&FYT4`=_9X( zHdS#Ju0vBsHkZa6w^^Kg{vgxd;bqA3=)CxC;dW>AzH_}S&wBLOIxsE)h)}w*vQKzE z@|wnVA0gXZ{s_|bPB+Y-AJ|Cfw+ZkgJn)tAXgy(5J!@oM6*&~w&|LeS z7|*Sqi@V~Yb-tfGDCLvf3W&b@oi%Hwvgtl$TrEsx7{k}!>Xp_y(=R_<^5!$b#c3#k zNV*uzmmj{UTc{Xuf7m|=)_3O{o~8)G{S(!$wOtF%1MRH@){h_jSUc*_qPUycVnOrX zh7UnR)epgW>XZK%SYOh&q)Z$F`Hq-$W3!gOrMUU_dc`lld?%LYZ}A{kzQiq$%qcNg z6QvM!uM7J#XDC=B)f{#f~3G6eKjUf6L`R)C(F_@$@C|IP1QcBJrZ!n%QSM23?^jB}T3ple}~` z_{t&(z!-hARjvk@j&tTrpIPTCZaBjTU;Y(jekLa$X$kv5CPC?WOPibE^q`bHh8TG>=wJEfR*QIF17g#sQeDtJ620CmxkDZPP!7# zE(GRO0iN1te)W^rY}m_BMX29=S5EuZf~{Dub{zAx*>N3Xl_m1am$3{Hc|o3@ItkAw zObPBZBDV}otPc!!kaN|ocf2}Z+Lw=ROD1!i0WwqS$Fju zGe&J9_i}WtdDli4U0wtvFNS>%^#neb{pQtFJ*Cn^H!&TspHB|x^4LGI_2NcM>EnIs zm!Xt>qjWkGLEClV(VAb^SFv0+=EWRXksaM^hZjvb=&oO5(Kk*i)rJQ2Q@-)p(zsso zj&W;{!JC^;eP&1-+9$qr)e?i2lV*QD_awX zn`b&4+N?*7yF98-F+ltDwSknV>5K20kV+$`HeV_p^ctVvls`&+UyVoIC8IjBT+m9xwRCdyN}G9l;RV_dH`7vK|ORMb$MQ{FfcB8zPMM>dX3=Uwca{~rXG*hp>oYV z9p%;i^Vr=R-1>ecB4*Y1d+w0$VvnmGU0Y3_lQzV(TCtmPr5K#g1p%ep;#qTM(diRU zeZQc4V+Sou<;I-2(sseCcsCxk3r=>G7pmy$;p3>#} znlChlr5|?Q0nc@#q;lOT{xf%7s4`)O3TCMZ$Xc17*>M35CHHW+-1kPN4_S_hj@DOi zb#6>w|Jbkc?rJulQh+~6JL?uVzNLs4#f(?Y5m@r!5*B^wdd#5xuU@)u-lLQc-akwLlAiRXhsMaWY#9AuFn4(bISySYM$!SMT)UB?I+RW>xJ zesr1#pjroV(hP=!5w99quW9t9F)caod4Ahl*3D@I;l{Xf#;a3qRH-fb|man4MZ zKJz1|_|-EM{c6?Vi*b$q;w#n<>FVUhyXMD9=K|Wx&oF;Oy>Aq$?VFq@SAgWoRI2Y< zY=(_nzYe$fG{Z@BcH~cIAH2wf#-XbA_Sl&=dmj-Ml~Q5z+#Vp`Ti6 zNH7NHtjjX_6Gub?$=3M#p(>h?jRzUQgf%D90GmDM3Ysu!A%mao z4d~>92tx3foC7hi-~NS4wMFX$iI2#i_3r-DpAZ16e=rHZb!7;scq|$RVn@0cH~{wE z5X$_;+fFhQ`MsAJr~a8A{od1Q@z{JjFpE3Py#_~_eBN*_?>7ha?DF?;ydi0pn>S+? z4Girm`#G;If#t%kbBJ0u2W>a0l9mMRiR-PXFntv>d>UH0+Vy6Mn(s#A@=YC~>J3V7 z26mG*m*$&(qntm+k{$CTu5#MpDbHSeF1Gp*M$5{UQ)PLAXMI|Vt9|Kw!?NoqXf8Z| zc=~NpV^h!QJhlgpuf6Od{I~0hgmr^Zp8?q`u|dyc{SZVna_={U-|=vzXwjRk%?thV z1b(^E#&b|!0Q-=_J;Iy3IiJ4P@Pb4&Ck(68M?P6?jX#e)UvA%1kRv5%1Y6FXdPT)G zUyRi;x@Xpy7p#1*k~T*w>a)53+;jv(6RRkg$T*Lm_mRwl*v_5vZ+`U5ntJo>pRC=z z7`b!#Q(w%jH}-I{wG7hPSC)q#2-#k^=GpbI9EfHu7~A6O=0Ea)R3rM?Ikfx_q-zfM z9ERJ=>5%XKjE&JeCn0OcIvR0#aA?mP!q$sE`Gp$sn*TBo2x`km^-%#iSD0%K?BIj( z+tCMl=B}IK&I9DG`I#rk)lj(uWFK~^pw-3WK3*2XXCskTX)DLfH#+%Q{wc?K`sXa77w|%o=dCb zbkJ=A(DNY^+2#YABKI2v#Ae>iVK<_k41T;fVeRVQ)}L`*j;tf!&!S_#rK=<6a@vrt z2BjU-+$rdRu)md}|B{v)+VZ<`=X%)Ii*g@3#nPnapZMk=C}LTt#_L?8t79@bTx)*V z(e+0(b#(_?5_kf%b0VUzt2^gOfOhMP4UeEPEuV3Ig6nLXYjZ04-wG~R2nW6mBe|Sx zEYF)qcv13o0h%zK@Ezk|3OOX68we`hlUER0v!~uAS(>^|#!H}M#eL15NkviqXY%Aj zQ$&a~UeP$xSLr+6BJvv&uK=>q?qIQK6Tc#=Bd`NNv@Mp;vTaxY4S2_IJ$N{@{34}u zUU{?T8<0+)^e&%U{#d<(<)c(OPU zolC{0?8Tz2Hj0zt5<_6$=-`Z=|OCC=bL2_u)HRm49qZg8lm00Z^ zQ@&1`pZa_4(_nM$Ar>(Ne}26!p|zv48a!F?`%^Qb z^!uj7D`u3_mnGl4XL)V?;uD(@3wE&qe04qsuc!OjpCA=8u&XILPLyGqcaGwFxq~EC z*&VLC-|G>JI+?#^#@MVr0w|oD|MpBUtCNfKA*xZXbpos~!Fbr0V3rqa%KgB+p46QC zJ??@#m(02OI(dCOJ3@Rg1u)iH=*g^c)X;N>aj>=0!;hzfU2)-Wp0IGPtoB0a;>nuw zfkHCwvB^T~g4MAJp;foJaE@>7Hb2lYM+%H z)z6KgN;F>`7e{-Nx{^<~_4e?^I-P1IF){043Pn#Lt+m5ehc_-^rhC3oo4zSCe&;4U z^o>^=%{w*Qo7dJ)jJai9r-xn4Y>(I8>51EQ^!cV=#WQU4(mo2i_~7f6+?`)(aHQ+o zV5u9|vBu0h_PfTo+HzhqQJ$0$$A-Rs+_g%zsUXb7jnF|h&-(HBNaYn6nK-o5yw~2oWi<-6l;P=(jPh~*D_{Mgp&#krpLqzooSZO? zr_Ptpmk0CeO@aIugE+f!p=&%m;^}5{t}pYuZ!~g#?OWphGrVdAw%0}WJx|kC($1Oo zDG`0J(~TKw&2Q^@baO&^u5I3|DC&ORZ}+sXjpMf-tt+F}hHIJE6?==7`fh_?3dH7tGVe4!Wk zf&!wWk<;xn4-C3K1#a><@N33ct?s49WmtlQ2u(cfN=e_j#cqGXz#LtE%Ffq+;DIGg z1eC;EhXKBNj;@wro|z2fdp6ZI7O}O;L81BQEcMS9xb?0iabreq&<0XITH;?M$ zceSaQW}F8_%l_rY+R;aA;G))wd28jOsLtDuCb#9QON;I6U;h(x>do!haGrF8+>zt$ zzu-B41hexZegJH``W1Wp&;xb4fk^iw3syH|yp>iUmL%SpGkhpd^qrG(jIJxfKV4*A z^zvpv1}Q$6Zu{;}4agA8L?Q|K9=`5Fc|FlNI5^iWq8yT9I=ckFFOU*6;| z;oz%7RIM@#?EB(o&bnOie8PEmnpLk-z_t)-Una~gPkS)*H2<1MY z+TJY?e|81Yec=NM-Zyh6a`heqAN_58Ofen!tFN@Y&-&_b<8avTfbKtw?c3936X+Z; z9%@xP$hcRSvwqva!hr6Jj|P0>-S=Oe$DqC3c*(H2E^@#WN5?0)#T!<|6F*G-;#sbJ z;qW&h%|~+vO`rQ#vVJ>%KLU?tZf1)0fxkc5i`@VG4T7$Kv`JFn6v0*{Bx2V_vxyDe z)T%q4xs;Bj{4uWk<0OQU#;2Z&+4rNpZ~B3+%#m|@SjJ!)t%dH@efY*{;EP2!_e57` zu-%PoaD12pC8HCW?`^H8yz#wD*~&o6 zXFcfJac&WE?!@s`{au$kZ#T;j{h+6}Wbmy8=frtsxu~Fe82g}T{9wWV2ORTlcKvYS zhq^kKms91N?JCWUL)htW^PBPMD>~Fxap_kggVi}sOl5PrOgrg z@X}3yfx%thW`%SzH1U*CPaI8)Z$j*Vu3i)C$UbcoPdma98$(hpT?ri7FM4!`DMH2s zgC8b3;|FlqIK)ivgqLu>2>WntO&3$b3r~I5!*WQUe2VLHek)8bzr^9^`toOs-e$OQ zh}uaQc~T|KSMAhqPB#|_c2X~maI1%M;zC7UZ0+`W`S7*latt#-Wly*^RZ(_`#m^R%9gVFm z#wH6|`G}HUvm@R+oTK;PjL;{bC%Fy7$T?K;*+xXC*ncxN>rY5O5dZkyjHchWpcKnj zh3tDc2gt3+&}GbqlIP)AUp$qEl_l5Z{rYQD=o-se@$doR!PDBYhT5InI~-r^3Gwna z*|{$W@`quINTPpW=ckM}w@SDPBGVd;mp9W5fISBKvhs|nHygWYWZxKk@}qAZ0qpE; zS9}}U{~uR(gC)CibLrXAm3oj~9)Udtd#xUne@5c&seHC{CBM%`u+I6+s!RqEz{Umw z5o9v6vKG5M-Oo6Um-#TQ?Tzt3t>Sdqku5)($rx;6rK2w;sJ0!>j_t{AOgfIpxa=y8 zcE`j146AbUnroctRZH`ESBMGQwoYH)j$v#&Jo%WfajH+6Tloj#vbjOTD3cbr|-PbE7IgBq8` z1AB-3w!y?{!j69&Ckq-(8eL6w6;}-`^mT;e1^nEHCDu*kgW~h%b+t7R-ZXC8>O(XV z$*oyy>*ttmG3iP^?$h7FGviY~G;!sVz-Bzey|nZciEr(F3LW1%d}|wjz^pSTU&$}$ zEX)!6#uBdJ2;k1*F3Jl-Du30xm8R~_%uOLJtLaD>yd-+Og;GW^NJJ{@{BR!`Nk{WxLxa0c@9 zzP5T@Vh?Ww;lf1HV{>Veq#vjF!QXNFoZk&P0bk;Fy)ZscF9@w2flkf3yX)gP{K{=| z$KwCy0vdd+i0e0;MvgNJ~AyT){cc79D@j0UdzxX}5i-u}gfM=^R)E={ftcD=wp951pjr*`)ucCI@y zje~izSoEn1kkje67?#}CDW)AKpU-!LkyOnz?=QB)#6il9#ht_X;OjeAbj&8H zTL8LNuQNJdGlreco|xmEB5o7ci9WOGJypW3Nvpx@6N_`rT_4VT+U&mdlO~U%I8dJW zr1WAljK3*e;|V@{ea2%qX5TpIvrP|? ze2}>g`M&PS!nzoiGby-np#Ez-I~vsS9n}hKqMr8p#X+{W6I{ow1BpbiJ|f(A{X`nj z>U3)yeG0cThK}*M_^Y1L)dnVTZU8~@d>|eb}Ih%mZrQP74apPR2FEL({00)vqB>OgBg znJkvaGhP=DjN`Yjzt|cnegv;ZG8^aE%rU>_$-Hw@{@l1Hcha&qw#&;Hs+IJ;V<5^D z<}uk`-pz2vNIssL?<+xQH{ZVb1xtJ52JCI-sP*NO@PXULQjLW3E5^$M=*v;D4XY#W z;NzLE`$1ud+gq2t@v*odOc!@9FW#$BY;byg98USZO2l8d-rBNJtoLe}&U}oYKsshx zp7GsK^R=!w#%gwXITy=6&dJSHaon%5&Cr8QKPmXV<77G=e`*P?FMPtq#%{ki*HgF~ z*csj$7SFJhl)uCn?+jojID7|Tn7@}hROB-~Ypi(o!>IKyy!&2swZzEU@Gz6W=&Y_7 z4Gxo2(o)dgwVV3x!XSEbC-ohasGW4rAQ?g(q$q%;oR@Vt4DXNL4c||Z&fRCkqV_Ia zH&nGRAb!~lx81i?54X-c3Bk{9{!+3nQ|;L&-^q9VIzvRw7HJl`W~t%vgEy_~ToNd< zY0m=B>DF8V6!F9Sib+TpNYL!=a1bj^gL4;}oBN-7x?7-(M<=v-9l!i~x2CbZK{Xu= zbl4w9+$Nb)Vma5}Bhfcji(z)j|KR7g@kHi9UM;)=G8yETOPjs#6?ZlaVUvr$ax;Ge z4M+szA5Qigbz_)p8EbzhD}dF@aBJ>n((B7^le_q3=Y9`+&d|lEPM*E^_DKvEBY3BA_AuU7o=*Nav=f(W zm(vwbx;yYBSEn-`4%`56lV?cT=6>V*Br|rRT1@RPQSD!>)&PCtGPbqYY>c%|P)i?T zIMi<*=)8oD*Nm6jav`>_9={y@#K8yP9?6EcM)=|R*+sBIkLQ_Pu6{q3^Q{2~@*vYj zv)$)^Jm)V@5@egg8gJSlQvY|>;5059IsasHhXFsFWue+!O}p3@PWJm#+6aijMt<|! zMZBEBNM5e@$u4R=7Ukz#4%OGFhKYxmX4o@%zIRi2v5(!2W4On$eeXi!`KC~K&J^E%YgXg)YJKs>>rVDj#8+Kap4%iL+*<=;0#tI|TwJN( z^yPrB7ypq}B?&Tlga>;$g%!5%X@1-{554m}`~?^uf2oLTIj+Hn?g8s~?fhG(9AEv^ z9=6;W=Q@TlOtEO5a*L{E@5w+mF!A(fgD_0eCYYxXUGq z_XF&!@1J#;E6Lp&@%dd)7b^m8fXt5=?p(CFUUFV^K^w5s%HZ0Y*U2;cdM=<2ae=)d zOF{T=ym#H9FzWY--ks3sj+WXe$FO_MMY*sqh;wzSV*&E`u+i>mMmxBsbuF8HuXi_f z?uBIh)Pql~@Ew}-@ce2&K6g%nCAT9{Uol&E)7d$r{bh$;t>L}h5sXt9)k`c#aqF@6 z_`*LPtJ{uN3&iX1&?l5~x%1BE9KOb#YB(+)asR0OBB+m%C)EA9-Hv&P$J1aEm*IB)K^5 z8!x<(`%dV)q3DxiX?l^;TCq8@$(-F>;-_&Nh`UB|+zZlvj0GLLd$Ul>mYsR7ju%eH z>}le@gZy+jzq8TSP|J1vDj?dN_jtlVlviSXp09BrYoFjSQ%U*b`yB6hc}{71)^3WK z-VK17A6CBA^0&^n=3^@n%yp5A9SoJ2PlZ(OVyUZ{0+ z+)Axd`0>i8e#UHdt<1@v8o9ACzHj`d`L54y9?X)LjMBSAEPmjA?zLC_naEO z*F1`Ffj}~MXzKHR+nVS{4-3J8N zJw&pxD)7GEC z5P#)T&d$%-eQOrIjo~=m9fIRZe!P!uOyzQ%&#C^Q8jpOB>pQ2e?%!NdXJ|*I?aoKy zU}h}#J;!pB7R}XQeXgmtt~5vFerpvDG{c-%$F+S5eB%m_eiZlDALc)2j@1wnl@`3^ z(te5IPkiJFmM^S%NnR{$w13`6OO5H!>H>Y-(iGct(*d{FE8?l{e%7N!&@GW-*+7Nu z!x`yp9f9AAfV=4mLyZ1Xhu6ogIrguA^p7jE`J-zp|7h6elfm8uV2h=k6kgoLSdKCm zzpv1_?_%y4&mK-Q0&@*09vk_$tqwL(@XlujOL0EVUvaQpg0ts1e(tnd_k39C)@oeL z*YwK?PqYLuOPmq?lAZ5Qkic9?EiHv_gcO$eM{qrBor+AFl zU-o~1pk%Z2uZU}_m-y|vnS9a`V$Rp@EvM$^Mtim!A3RbVRx#gtPP?sC{P17B#fgqV zX%h(+&K2f{uRZx?uP-hyw{~h`=g$s~qfb8@i)z9r$HO#|b6o4rRixekvrh+5zqCZP zuPkXiZDZZjqc^>E)Uz_E@A&ZlOMfxK%!lFMKfbNb&7_UvuscdN_r`&fbRAa&0&MvHn_4Ug)voH1!f1(a>F>c?u5CP(gtY6#5Zxd?rS1W2t z(ZSI!?m_MEA)bAG^&Y(k2zCYg+%uv@zCH%Fjq5cJI4yPGi_htPcHGP1JUx9oI1|Tr zhxhDYp*Ojd$1T)DEL|4;j4TiopUL)Kf!NfXylzNp3z~vvKNh^T+nFzXZ5tLPx^kzA zk6XK3%EGTW+;CXDOujLBSgX0O?hE4j^I~C`LpTw{kry>3UpUyqL~?k9ZVchZdK>?nQq;sM3sxOZHe-^she62 zIXhlS^0kYk^rXRnZqD}ri&Xbb=&R+6y+YjtzC$xe)69fIy8ji z%>uzG9bW#Hr+h7Z=Hjq#yLD++vm@UNsI8|R6c@PRoA)2*jawbg70~u8=XA6AoMrgc z7B63&!2FIkA3EM94$k@x)B99%hZ~P%%Y}aH^<4}t)LSf?6DBznx)NOzu0iW}am3qp z=NrNu28nYARLOKvoqhRQmzvY{6qsh?XXl|ZZ0_u9tk{F;Y4O2Q8)AvW)}yzME)SI3 zj>C8G2*?M=Y!`4`-#F}e=b&spJy#Cj4aQ?PZ1p+hO`7nG(FFXwE&N@WB?7mn_8ZSr zp56MaK|0_VbGvxXk30{1PwU?z*9+Cvh+K94_)DM2;e)Hs*Yf|yX;*#>&IzT}`^h^h zrd_!0AA9P1=m|U4ABXu7s-6HG_hpL1SlCyz>E^)L%R{Pn zar$F8dp%3D{B;xia?*aW@|TP779HF$&+nZ7U~s^4ca`_4I3%>6|Id&e8awg1x-_luLCaMK^w z))s#HdHq|*c}5(rxZ*?yrwd36LOG7t+Qq)%oje)aLb>^!D!gwD>U(Hu!|6DIhmC~o z)%@EpZ)Y|BKm0C>#`k9}9LV7wXnY*1pSRsL@8O*WVFly~%;Q9K9G`%^c>Mru)%W~+8uXp79o%Twot~fLc>1Thsu*Xq2LZB& zQwts9Ju^$`8@ow|OM`)a^TulgBK{1h=0mbyd+K$;^JDm4yMFqgY-sZHAT+i<%t!F8 z`MR9{A;W0z`ek9^I)sWY|j$7W3}*SMEEzJB@s1@B+Z`~5iY^Lm}v z6C2cO31Xwk5ZhJU?uXQwteV=NUC_n@?g80Ai`!taZGkk!0C9zK>lNcDoW^xiIs(+| z68yq#ccUf@10;v^sSOiJlIIFvJXZ6lS@k_~*hMw1NW<$|bKsPcs)FIsn|v!YX-QO< z-A-D=dOcKetD<4lI~8&{iaMniJWae6(?4Axg>tFQ9MTGAj+cI(tn5Wjwz%4ZI&ma- z-`9IIhtKn*!zy8m;scT|zL!AQN49c0k)8#B+d0hAj5+tkXXn?8oL?t~lfpYksM=fB zCA>M$l)mcuGEotC4lyz~EvDaCb4ZZyt$0=Msu;L$&E6%O+Bo+IKG^vQf8`RS7*0LW zPKUcyY`L1^EF8gFX~~?okZiwekz=2Mz}nxgsk3j+rjG`G$$rkeX?a?kkz|EsJT@ww z96CrE==W7K4Vs*$O-*mD*LO$%Y}g_Wde1zmoNFkrm|GZl^~4$}So|T+37-N5S`EsI zdI!V0=V}&Nj#uMR0$(yxR3=};S^uopZvA(kRclh*9Dl-lqw4baazS?i$uj5(>s*5}1AClWjR1@ z%-UGH$Hg`vdqAsgF%vFdW6& zczp32>)h!S&7*QU7;zcgfBXi>Jat4EIE=VE?I}M~u>bTkLdShOD!0fSy%r0_r+7J) z6`&R-KE+rTxt|+!#+zojn}g$)o*7^TG2$u52)#69|JB`6MD_x(r0=S7`2y+ zj?)$rw*#8hbCs7%)421@gu5FGpO+KhjuVDgGbSa^c`?5u=*Qa08f6~O4GT@ZGO&A9 z(N&Gx9pyF|-J@3@-s*yRzPw(4+go{?afFaMlh|hQy2(K&W9sv3R^x;GiDcTr8+FJ? z1EdgVc~DTAlH@=vZd!PL>c7YOy^nJvqcF!MIoN9BXCClp1v^x|9-HJ?J>%6+GW!p% zu!w1fy zmoyf7Ecj|^Ns|T7R4cQIMTvD;cEaN-*FEqh=cPI(BdHL-sFWxZkYG{2|1um$RLW7uxGcg8}+_g$J`t~8$B z7rRmMZ>OnG;8ouxmSA*g`vfqf%8F$|KAWVzN=YhJc~COj6?{;*c@>brR@mADbF>v! zRX#ZS@~1sM%|Mpp6`xXTSdt1~6DNRe;tF>Cd{(&c&jf6-dop&Hv+@)a$|nDaCZLo4R0u;v&%u{b>_g}V+tkkdi?jy zqk{%u9}sIBl}mr-G9@*@;wj?|!wdaHymh|mCY>@6y62~7{RiXE5%_6c8}(5+Fjm0W zH-v9Q0hKh^<5U^c`*gVUYKn^H*MKjBGvYQz+#JV*4zJpYMUsqxt*v54h-2=bNE;7A z@wTC|>2VDs2i*x(Fwo{&2W3X;?!J%pew3Tsj<6~yqpRz2>%HaYvHDE>(-Ct-j+ecr zxc}Ixq#N-Mg~^XHrxa&#O{Z7$qq^L(xC`dAyaLd{6m<)*3=QDYpbW}-T-sgndol|@F@xtjx*UQr!^$d5^zKAj_7e9qW7 zDSwh0P7ZQf2q?6~_O5V5IXOv#Hat`!#)B5`nRqkI{+L!)z4%yNQ#JjU=J;MZ z;%lQ>`tM>aWuP{LZFz4@ws(-X%pkfS}bea&|loVImCz}m1q!@ z(}d1O-B%lSx4#bN9WWFm=EBwv@BTU4*WbK)I{e7^JY2R3%d$MA+Ubq0uEpiv^&I*% z0*TgEDWiFomn#MB`UCp<31@S>O}rZBk>M#7@VKS5M&E(9P$vhZonzB+2ffsZDy9P`Y#Y%)pnm-JJD9u8zSZqF_n*Jx!FFJtQBiwc7pm6Z}&E>{0+TW)vaA#c4M zXYX-)#(I6I@|X3&2TnJQFj#(nSI)FQ_Z~jW&N5xnLf|V}!?79N;LI3`qjeuw;>HhX zYEKgMd9_cCztbk|QhE46wdb`=vBJxhHl_X(VP9jn)qZy!?hT+~z9YYm2!hTw5&2Uh zAGK0ykgfy8@&`h=7@t-&{eEZ5Qv|xlEbVzBfkL<5VT9UNPJ8nX)UpfI83o%R_4@ObGh1x8IT;fBw7FCzR# zj8@ZCzPmVGqRVNtB;(?N-SuKhAk1{Cam0T>drZ;dbjkkIIeuSkk@B~eR29jlsH*a! zC;tTO5rvYB+16c0U_hr&*&6=~b|hkKgErVuzCbH7TU-a5po*Fs(tJl3RT6=%PA4=G zQLE!v*?ljsiF@m_Shex1^k;5k&_TQ@CAn+6&>o(Ytdyh}JL6&c(x(l9x%y$I zSzwhxjBXYZ5!BE8bu|P6U8GfIPei4;0Nvx5Q|k21AmZ=s@VM5}cgz2Yxgf_Lx$mNN z0=Gq-4#b1fjT|siV$XyDak7Z8^7}RPQU(;Z)0jiRQZD6 zHXsm3`azx4%Cj)~N5sh2LA%L?Q8_ z8qy7|*r3=GDaI`;W<#}Y2V6b$#W%Yghrx4P-kl2t?~}+HrLBlVKlK)7s|K(6PdmRj z8-qrrXPV2yfliCoM0|*F*(IUpNwGkkI=TA~xooyqe_c4L1ErqX_+CETPPkb+W1NmtnB7xX|4JT18 z7QS}JR5s^>pL}|@p7Xg@1g17PD}BgP=lQ#noR-e#5g%B9=5R=OZDwI0{@@GElYRe+ zT20zyQ(*$l;ifJB6^R;U!I5$yWz6u>r7s{{oOHIksv3I+zD$<^+~0tV$7^omzOQ`} z8tw6dS?1jpi3WEHXkGJ}MGq38CSzu2HQ_P9d<@9=|5*US+pmr2Gh-~8U8W)~JT9qQ za~03+xz?rQ+=jXby2E}JVcK`C`Aq-T5U(Ru9J0C+)=* z{Ln_=1bG_6J9@)Gd`}diY$MsMP1{ISV@h6rJve#GK_e=t<~BM%OMe3uo3ke^YT58L zQfAd>z@oJ+#1=$|g*f}XAj(7CP55?P*HB-!QO2P2l`VThWG~wXMk_s(F7i%NwvHI6 zW$Bom465k{0nsHpLiBTca_T=UWayG)7I6n_w;r{kyP1jy%yoTEg2r;a*ZQ^O7o`f& zH(21F;$9UW_uEp;H_z~zvu9~$np#fccs*d`hic(=AM#&`O`JP-qPB_7r^Cf+5^)y4nr67dtVw$A=+@iYPN68};8 zenN*kG+Hc-*-E*NOT@=+i?1XC#h(!pj@-7B!fjDI5{=bxC?U z-AOA%P4-Tf5vlklOk=9}Y1Lk_{I-&p97H8~pkHh7Ao5M#ze_^({7zopexOxiM5LXi zcV#76C`#QnSf`1wWL(U2g8Eagi+P!}+;~y1`Kzdb>^&*t;t_IB!IJpXEW9&t7^D6`zT<`-O4I99(pF z6Aky{WF@iVSN*g|6#*a!!XFJ{_|!cY0h7AbGToUwap+IsXZYo>7TjI{w0i$CBp$qq z0`^<5HDzvajUejc`3Plzd~F&jr8v+&r!9j6T~Omct-06fXzo1e?y;I&EXYkuNnKiB6w=p7tfjrHRV4>$Ntff1q}8bl2PP^r zatn^r;AucIv?G!tF?H_u24$Q)>MY^j;-A^Z?@Kc2-6|M|!yo1DiSz&E@Ncep)Bv%2 z=dqnuR0rSWk7XNX&0ojaHMhCUUmf^O97nL$CC{w?n3HW{naM3FQ4IjVYcwY0^OEze z1oYl4$1+!F7C^Rp$mF+b{`LbTznKXvzd|8D0112X(D@5D~b#5KJ0HVPJ` z-?~i~P4NqE^i`Bl{x>n>_Vd)ac^6hM82ZZj%F}+F&*b#Oe=Z7ADI@UikAk7z(RKE_ zJ4~ZcV7O~X=HhyarduCSq|4H%l#T}tq2w~GDtPIH0zz|^&J&OFWz-v>WEYFID(4~g zlE+7Fj@zv`@0LAmc4zWyoq>j4(gF%emN+<~0|N`s`FJBuaOo;7%Mp{~rA{vTcLvMo z%|&wye&<3@(i7TY}cTqg%A>_N>dg>5Y@vTSws_D$A>BNdFiIk zBOAl=)ss)I>0WRBzP|`A>cky5yibf1k`A0^jCHKwb!FX0OW9eT>H<1T&?EKT$sB8J zpdO@%Mr$g|HfI#4RV9VKasUVX&A~j5!yJ&PX!XD@C~Y}+TrEGB6(BYT>uhCtwG^Fj zoa>dzh(DiCMtxW7W<&z-NWaYiRIEHzT``Jn&7F~u%}`4r_xvJg2*!ZiOwm&z_m;0= zBj3>${X$%qLQ?)H4#_yZevGPMc`}@1JNYuP0JILlo z^FaK}^iLZ%l~)^BwZepbj>%AuC4qfr%QxT%jbPt|Y~h&C*<&KYT2xYA7xo_ZG^Wy* z6E`@mVsNxe=^mO-W}<>eK^aY$-M^mhy+!W@1#HXTaY$Pxcg_qLJT9UQX*;zYq!!zh z>YIQ=`~mhP6z+7YX9`C~a9ZTo$qNR+>2b}~Fvg5F9dl$s0<$;lh@Hlz(@M0WZ#BCbl zs1S6n>DsyVQzsu{Q=|M?7DktzHHHci$4{j!GeGwQt6a(==khKmF2r~*2roS8S}iZ# zYg9wCg6ml=Rm|-V2|eq|5?ri8Q0qF7`$Ac9W+L~w);N~?hUWC^Z-?e;Y@wN=NTw#m zAx-XYZ^{b#(bBK%HyKZ=!>7k&Z;hRo*X)9}lXPN@FYJEHjslYWdz!+t?)Hkq6DIwR z8SMbW3PZ(`y>FXR4|kzE!dtKUi^$_r>G06jS(zVFT8cJq1_+#A^-S=;>;dm|_aN^O zit&By@{-ZldDjg*_E*quy4X}C>B-Ty*m~K;#BZ`OmrA}C+yQ%nic&Z(J+>Vx^@ODc zz3-@QFQ=o<&z37`wgGNpI)r|LI|s8~*hSL3l1x7I+qWj&X;ypnua^1&IlB=|8gakS zbqyBXT?>fV(B!d?=JR6}_^~3d0Q;ZMf^uR-J6>y-vbnRj{v>J_RM|;dwWv$FdH@#k z*^UwyjPPFC#Q35{w#sMmI>+{Z-j#^##V(T;S!Yq*_}a-==7*@4`cL>-+wgS!cqWB! zpJTwve^g}ksfl;E>QGqgtaX+G=hoGY2qoudsm8v(zH0yBe-JPG#``Sbd59GE*1zN zXsS+!t;@S#8=ym)oFY}%<3(GAiLz;`4W;=R^=;)8ivmFWGT6v)>eO<+_A#Aesa@3h z$mshc5&X>{^R0&E&-3~aMnUME)>W<2ph>+t=2Xdj0^)GI$#lWnpA`Ur>b+jdA)(Xu ze;pHiuzLBnP$9FTI_d>(>c(?dvOw^Xn%`1`3R2tTaN>!pKX)iLcPV=N@MJ&9ptt5M zZn9&$$E787zyiKUto6CYGoR_T5Wr{Eg6~MUn5Rq-FIQcR2*zlc7s5#U{l#{~f(bY+ zn&S#z0)HDkC3mBoU;#es!Jj)ivLn8wExcQf;{Q-p$Tr^*k}%ta)7Y>;aIMG0~ z+wacmE027t@q5|Q;F~gVF=Xs$shic;nE67Qyx>e$$U*r1!_pVB!aJSQVzXD<){0Lw ztl(2k$PaI8o#EP|tv@Q#k{0YzT&Au#mrn(V!<<9MIdS?BZ=HwTy^L&bP5O?jccUR5og=XP=k7MF4%8j$4 z6YpdZr0em!X}75_A>Y z=7zwM(c%c5Z!HFl9-)skjSc>Z7e}i>HusFm&IB@_x#Qy0K1UAXqB(AS?x7zZtjUZq zN=T0LyiZQAoSYJyfk6QV;r6h+x|#MK&I3Q`@q)W`+6{XU>j2&$w(s}KTj&v3{0baL zBJnng;X-Ps7-YTByw>uz{zh+=v27>z%3u)b(b~4Nn$Q^II*B+E9{1D#q5NPSX`R$U z>k=1rvJ_j}wkJi1PwiX(8&Q74Nf{7Nlg{O#rBM z3vv&O@0|4rl;0^xvTXzMt_ZRwbt~GnxO5PXPqxQeK-3BVyx?H$%^Y7eMH^qrNih)?{IJRA8hWukUJw=1om89H(wM}V6`UAvF|}CpD^kU zO4<@?V!oibjCA9~DsOIIUmv{Qq4&tTdzR3(2+x3~&E1qQHd1_U#2mlY^5!pC04}+z zr|BWdX$49abK-nO98IiCk%W4UZRL&3L&i|?by z*M;1=!^VQ~h~O6&WjYQDC38npRbvjVv>UizeR)h?(B^|y1m3YaRCcZapJDi0hJA8d zy|VQ(WwI`-D{f{_98h*(7K<0#Z-$bZt`=2Y!{X27kJY15_deD1u00~+19_G*KZKnK zwF%WaA4D-}{#FCR-vRBc&L+*c>A6QrI&Zt~yBL~g;tD>>Ap9F2A!=8ewXekR{-yBF zPW$XmK9AIC^(>osXM_&fD1N#-V5V>C^IUmJqL^cNS=k_A``ua7Oiv@1Hb!sNBd;9O zZ+5)LIo^`j2kd%LO`wbGabN1;Yvi~|!mU^oN5ZJxnWUNxz!Qc6i%*wFKN&uCRt?mn zW4D_zaNt%wyO41vWadM`Dd)LF?;9YSL;6w7f}euRRpc&JMH{5DuCi0i(Iz;DaY;e` zGziqdfPtfEbBd51NB3e%}mBQh7a>SmWVw1F7&D0N(z?$rUHGPh6m_Hr>ODfcZhmx^b!+ zAxr@BF}e=8Bt<=P>Q(35D2SHe|>r|AC5PiPNE0%iCwVXx0N zOPS$Lij1Cgb#yxZVnIjxFuou-ziouR+mnSN`1(_K<#mf!G{9|)qRQ2z)%Hr{rEf3CP*uOx=YO4?|iL`uCt<~;*5+E{46R;V&y^=UGVT=F8-kZtSP-hnx z1Qcg<-;~p_KVFp;>pT%b%?RV#9}59fRE)yX+$ zHq}6tg|BSJbdy>)uVQPJxt+L#q>x@hkA_#^&9uCm3XP1wzqk2eIh8@W)^f@Ezv#Gv z{%*XP_E2jr>4L|Vj1wvzYO=_Wo2I>)5(ys&kDRJz9F6<_7N6&e@W=^Yexq`uL@vY~ z^z7UNB@g!)7a(7^N(S_k@8X^m)xGNja~F^Qqf|BjY2^1Vd-2(VSU5FiuACGR8_^NS z?|~V+7elvjcMuh&$kiUCa7Ffu&8FCJe-HJgRBWPjX^#4SMA=?w^tr;2oNLLK0kt!J zR7S+wPJZfYg)<`9if?YQqlbRVRjnx>PCR*W(^mS4n->e?$9k80MaYUg-l!Sy&;(ct z&-&fJm#!YR09$-ubM^Drn_wJ(@^E@k?)`IDD;AAXFKf$75y&}%?2(Aw60(?OoUn$2O9tmS9XoiD zv&#`b0kN*Nu}Z0;$_G@Sxsy^?mP$^&?2N#6t-Or*-qfCMACJ zvk3ynhhD03boe+8c_?^qXf3868$E!S!FyWg1|v4TTU)H_E7dlwrk*x$Xt_hfc6Q>x zisH^`>yBmo?7xK36zkslQ3vJXu&lY%%aXwlVIgZ*)Z4ZZ$ZXRDzSe^S2ib(b|7Z02|ZQuHzJfCxoX~Ib1gzbkY=yghov>?6*Dq6ou0aRDlA=^ ziLc0=rTTmk=X)sKEjBA#BMm@6^nQ)6)V3ITEQ}!C=0H+en^VgN@u-#WT`S?t3#q$P zm3yk-sE4g?i9U7Y<@j-=T`mO1>+ZC3+CEke|1ld#8ebZoOXN6ut(l_ct~_R`;(t_4 z_Ep@r5y7morhQYHu<^Kze+boAzL}BGL&MLLYaGa1G3w5L35<0 z8P{rVr$hpR5Lg4sigDR@Uo&J9um}c@i61#B1bZgCc^0zVB49Otj%@dQ<=-Z>Ozqh* z=a?|~hYgEo3&%e&v7CXAx)v;+YGpiqjJ`NC^>9qZhvuH|ORG)vM?Ti(asjt}2)Vy} z5-@OXFRQn1RzdxFycVK-M_rkYA+7?!x9~61{}xoyI97jzn5Oj>J$=zcu!o<8oc z<}}4z!_wQN^um+C+ozZks<1P@s?ke72b5mx*w;rPf(_qP$9}nazCF5%sc`ZtptjPO zxK0-Ex!J#zIrWnx5(J@s?ma~phO2*Ha8_y-K!orU=YE<7{_fJaRq6Y7?(RSLvtch% zWD?U7-Cr6<&)+o+1o)U(BgE$WesV?lU|lt?p`gFk^OKOn_ZmJ1H%zFG*jREH$zEdK zTX8s4{23ANvFF$T-H3QuQxm+a)_AWe9saj{$&PK3xl5KAxaXa3R;N+hqLk{zF;Mey zr9RFHYD^2Jp*Ur2Vm46v_Tr+cVslL?f^3)GdBt%ow?^rtxwz46{=}l6$H!qeQ^ghy!r*L34HS8?QAuQ~PF19! zeEMV^-K*xmj?PGRN${Wgw{48dRRJ6Pi7r(ObXgakcF<{~2X9OoJdX`LUn#(xi22U^ zt;EcJd5-((XZ`Kof+_wz7pIZ5+(lY_X1h8zL(|tMwd=c4uP3aO7rG9Rrui^>>Tg&0 zjod#}IZjjU^|X=fS|66KSub%l$Lr(=!$@7jfqvbiaCm5ESLS*WI&f)1sS|qR2)3O6 zNjRH_FMeSXlQp{9?G!$&mAUHsyeqSed>~G>!Jy8l&WMh$L4;2E52AhA)W~z<-&RG# z>B363zfG&FPj~1tx)jJKV^>RRIYzrQuS%9mX(oi!vC|-D@r`(+{JOBvUDfGrR{;A# zt#F@RWbL;$)(Y}Z@K825I49IYwM?p2G@8@RZCu0e&LDhkz1xn3-#qRY?0>)h9tLI% zo!!jLU7_JAh7kK4J@Tx0yDau{dRqEb87SN}@5;NjhoVwWuS(_O;~quWjISPP@L`8% z#|5az!)5>qU}HPsD6l-yi-@*!(qabSb_cxOkCj^JXw+CByuBq#S~BDWmOI#HV68KY zVuSt54`LmZ=3**aj|{Mg4bC~A6K>7eOE`3lxLO%&COPA$Vgi)!&dxcFz0WS8Yc$il z>^78=m067E!IAopE6_wN{S-oNzZ))W5t42y`*BGUnQkjz7HY}!cBHcTU$4(ZCx`!i zQ-oU6lu0q@3?VJ9Uu8U+IP9-tuitN(Wt{!JCB8vKuT=cQ?i$QJ8&MWv0g9l?QRpbZ zx7x1x>`^{?@yO^N#49}C1{Ej`R=i@An0k`wCj8r=IXLp{I zBHAU66X4H5G(>9un7M9&rE*J&djNl1P(5v{vUxzRan})SN_db zxdUn^Kex(hB*4|N2VUh~r16Ae=a3nBahl}1A_k30zN|d=u-UXLOAS~W=l`dhaCu4z zDDqZ0q(S86E6%9Xz5f20u==LPS*L$K;DT??xKi)9)XvD-^ zPg(!V??U_@m=&yX?Y+eY7i%K&1iIYYE9nvbnnp&}_-@q@5%OEPVkMOLj5io|v^SSc zaVG;xYgI3g!$K~;QeAr}?&fKjy{XnROWYqhv9<)C{kFFQso>+%5q|AHu;R9vI=ryR zwyN50vHlaL`_x9D+fh#0KC}qMu?NU5&hpz?ZwRRCAPn;6!y1M4RN31YPSLk#$$DQ} ztXD$r<{jbPul@{u(H$S@u`09K&q57!>>lK)x7{2y^uT{(u7!&%`Lr0R|3i({WJz@I zFL{Ebe~aYW2n`%w2N2>U=fnu!z@r)84l+(W5bzRRoe-sOY1TgBdp1S=P=%+ ztyVv-UP*fFY)sLB@VEy&-8_!xvy^d#Gfzl#XL^r&kbJkQt>2cfWGJHvj zXzgI6i^tgTvF~fR+0?mu-b& z-RgNNE~{Q_*=#=4AhwKz1iujYbXlljkJg$ZR=3R{>dZ>L*QE&-#J_oHKet#@^mE?E z+9Z}d*%2sV8mxXqLTJoaEsmsdJ&~4N^$#K@7q_frCS3eMsR|ar7VNb&JizG;z2M)^ zPiIRlN_Iw9#d0 z;aR;wm*L;tpX_}1-=d1)niWeE42O@SEkTmbpy@I*o@`9Sr~7e!DUUSxzJPe)f)Dy{ zOkz?RIc8DDu(sN2#BzvI$&pFBb_SukL;sb5US;ep2lT|=SN}7gfLrls?5F!uYCN(| z*NW_1p7ler9i->fP#!VeL*0Su-wnH0d+^if_c%}-AD3}pMhRTwBys-+RM;*tQy}0~mMb|RVF`M4Pve39{*cx3FoEgF>`H1SdeD9Nux2J-eq!;YS z`mE$|V957TqT5Y!7{&TM=I3;f#tJ#BQ0VP)7eQtwo7xOghR7m_cuqzDdffqF&d* z)3ziOLJC~C4h@_GdqeN1GN!l>rpi@`Fp@Y#Jml{94#T~Q?wEIj?AL7yL0Htg^v|8T zjex{v1}Kn^_gPw7yGvT&2aYpP6AQJ%4-^GO7uDOe{vH;v5bJ9e0`F_#@AA3asA!Gt z_LbiryLu-ROcG!kiAEXEu)Fi;7u;Z$KTb~MFL)sH&(rQzTj&Z*F8B-CY=$O^rmApl z6oKh8qMjc=BZbvu~BQ0E=*Da5~6cc*QaJG1+BfvZhpd_^ntG(k>;!@4n1RrEg<2tlBPsRm*o;EX-xY-SWVYeq_I`$YDn6Ue04ZippNoN+ z+?YGiH=*KN=9@soC!U)Q$lwtkFYh1U~mf%AWWf{uis&K;QpjOi?zSAX0koVAm z=&2HSdv1BFs$BmxJBj;UZOqM2weL*WKV@fz6BIvoP8Ld#j=gWTUCFJqWhX^ZM8v*3 zDM4<8T$aSVE@$@qi8M?+DZWRo)bi;Kt2xT|5CYEJQ+8ArZ7gRp)-tpk9(9jG-!>L~ zvy}{1eU)s}R40!Pd}hpUX;4yral5F83emu;v3A=z>U~ecie6D{A<*8Bkj{W@`Ja4` zzcq?c@q~#yf4Lot^6LBWIScl6<|b!BJSmd^vGv!=h_fgcWe#1vq^-nsUkArcD#=}D z-x5lers~2Lj7PmXZLJoC;)~A!Q=9KQVd-vhMBOu**`hvN*fJ-Q!-IDeho3o|ms~gA zR1*&!YWtpzIgQk>R+&vtO!M|R`uTpbt;Y?M2ZgemKdpEr=f}7B{h`9_PlbhZrEZVS)PNQvCroj3DY>vW;f}m95lR+9niu|9??$S? z=8~2<4B_HVubaszpF6_-jS@7Fv@t@|UR3xn29Y^Htsg1R_?g+jJ%E{%`XvV0-P$8 zpOI}L&l>VIS9YusPVk8?U52)teED4{^p}UVL#3*u1`K|1q7xXFT{yn2ZIu-zAxH~Z34HNfZ#~3Dz)qX30i1#4?kLm{@c9P z<W2r1cWo`4J0GcJFrhoUf9aRi24|4iU)s(s4O;Fg`e88uvT{*G7D32@Mb9KifBKKcy* zR#ST)1%Bxod{r_HnJ0NH(G=W8=a9-QTO$QsAzlKR2iy z!tf8O#`}8p%7To8a(AL1QasGRNT$3X7|Z_){I7EB@*!UFobaI3pFwg(t45*Z!Hf=D z9BGt0#S42)`1Fd_=v91q)xzC$;Cbj~?|ZCWKji0wC<>ja*jlMeqLxbt8M!b&%r2N$=N zU#;(_=P(6t?owU4_Z{)%?EJCX z&5FRh>OIdQ@8Fsi3hJFgEt}9Q?j;l=yNdsoWeIXGEj{XXd%jFz6Xe&!!8SCV%;p;t zA}oihdx_=Q;=h@M?hiiQ7WBvdPA0uhTx~u|$Go=SuK&b6UG%C?1^>A$9vbf9@3l>G zUx91YDcD{q07!Dg0mMDkgbkIBKF9VpaVx+MQ*B`$^$FhIR5`)_B<|^%6`TldbMHKS zP0-cB3Co$TmV!s7Qdqz}QQ*1$|1Kk_H*n$IU%sj6Z7lzOUFmIR+w~wnQl2v0FSqjD z6fA#{dHB(Ibww_dKlyk@soiRPop%JuQj`-L6|%6OD-+8F+2>pve2)l{k%egWtxmZ# zHxG@FQU5+Kf)_OT*lH;7!uTQTR7Ta-+@34@hnnP|)Q{&AiJ8w z#y{<_f8Ig`V6@V-I-RKt1pL@cip!ziE^xFQ>bK^Cu@4UhI+hxe^AN2%5E`Yzc6UAK zNlif6jbY%9qDV{;(+Jee2Q14;&lA}U;kzhd7H1(A$=_Q^Nj;gGuVxaU!XX>5&XArh z6Hhprepq#CBt|;4uof+Jz(Sn~Iyy1x&9L;n-1`#oF_h8d_q}wKrI>ai(=1p6mzty0 z+SN$GHEUP(QtA~@JBz#3;Fvjd%(JmZO1_eUSK7tmkXBz9Y-P!Ynr~+`B?@vjJQb0d z1`P8my~S{B^k)ZdxYq@ADajrzx!g);ZnXvRhtOiU>UsC5$|a7YRUsl1RHa9x<-VaE z*S^K+-@o^u{w<{Me&zaC_Wf@y4e2)8nEvfaU78IJP3?y?UjU%?hDE5vskduVz&i07 zA)<9}n-fjQxfZ_HCY-O;w6xR%!x9QDMhrjI{|;;DJ*V;cgn!z3-KK)qbut3@tCHp5 z;Ss&eev|VqC)K_O`82gGioN3U7&rX3YHS$PZ^yNDxJ>Jnx@zjvP%io^No&~MlLf^E z55rXkAWr993yWc>=DQLr(?c|qcH&aDL61d4MW~tg83+EhnZE# zi>J!i*kx`IK45Mo7fo`<_-(B=zNv;&d0Z>Pv#IzaP|md-9fj%68K<$?a=s*S%$`yH z37$HOwyDYxRDA^Xd5)bJr|X^jm7bw2`doo!N*6HD^A*|lYyW#LK@6~*wXIJCSe|O0 z*uTC<-pv3;LAArIZP@m5b7~@&Z%X5&u28bdd@EG6UecdwDo!5wxyT*EE{PWwcEL)C&d+{V=D&|6SVt_;nhRjeuI7u!eP40-LK%%8^eyS#M}A-0)CrsU%FDwzk~gy!)6 zWiqnG(Tmu*Lkfg4yqb5Do_p0fi*uUn&+?lqi5%jk7Y1+h^=6UhA3Vl1?&>*vZl_9` z?ZMc900cBQg6$4#Fx_AuB53-ciK6Qr${?YnLPsT*SnT7xL^l^-cG6%aLc#uF5u4J+ zM6>aTZ$~b8vkOmb&ivQ#QUIPqfEnbwQZr=i(4S0MXT~N`gS^NEl7VI8? ze&hfcye9JcTyDr0V~}L;?)c36p9;9Tj@0kE&Gup^piW+~L=(BexvH5FbN(CD+OD7* z5+8Nsl9)R$uVex^jsRzjnj%8v*%*pulunws|T{%AW`|mEkEXQ%* zyjK<#`;#YzQZy4=?`?ed6ptL^y=U{%Wz&{y$$yGs971@)U#08BSw@iZ9${&({*t$e~V zy%t!{r+maKeT*3qVnjJK3zoX&8^~hQgvdU-7h69b^6@{yI@XyAaUS;uTt95io>B?oIUktFb|Rq$VC{LRwO;AuQCTY05UY_=i?p1gQu;@^|v} z-Si9iF@*6+J${mAe$*;d^VQ#-=q``~?agfdmmlAyA!b|BgJhNF97KQ7sc(~IE*b0S z_=x-tGpsCSsG*!{yWK;e!1LrEhdW$odyoUIlCfMihSPTd8t^|4G_i2yUUSU+N;CpYUyt@bcdv0OX9r-__C*O{7p zdu+$$x2=}uai6)Gm^Tb>+H7#cZv*}8)pFIpS z;MQIzwNpoQv2R2}{9uO^b@$SDXnhpoHCl(4Qt~=szXVTkH22 zM>d-GAyZO6vQA{*)_)&x+sUaR#Q+}=HT1{&))cQ4^D_Ut=D&}4#rvs3gvI2pSv3Xv#Z|U-H89FNUvz^PMdt6nUihoJ4j1=xezHEh~`AEggB>x6a$9mHFYrd zTIe=AY|C^CPdR0*1`Sb~;qBN-Oc5IT{*nJ$F*kl|WfWds{A16p0oJ-J((f8j7n$Di+dc;Q zJaYad*#O{SpI&U1AkK93?2VjvE(0^6-ayS`|9taX zx7@3`e1u>B<{u*u4z>eY|8&22b*K$}OT*fin~P^P$mf0r^izWy(B$0g4^TAdZQl+5 zV#4=0Ut1?i7*M0T`XMxx^w)N%e{psY%(lW##rP%&AIiQvnniLMTudGM1Uogh^t5e! zFD@Q9n%`^EyBorF=wlgxaW~(st-HDYYEU#vz_KtL_Syx(-}jAnNMNfA4KW(4FZQg~ z-kwJNTL&61bJ9n()#5_W5gwGbehRS7^X^+7`0}$iepg$^N3H)5;3M6unc}SGu#{Ik zzPx;|?XhhiS9SJR=kb0}RF{6nF@aph&;@Oez_*Q%R>YP7 zU>5{wq4B?`+dpAygGlIS+p<%~4!Ng`rj`LaLK1Nn$z`VB6$*Wz%a(|cc9J+OHut9@ z*$;qWkKx`@E$ltQ(Q_p8z1g#LtSu=UsAmoEyD%9S4#<`ieo}mc2KJJbFs{TnPAUB_ zI%<4eY!|!Z!#dj(sO5^k!&pA!%ca=%JpAwI$#EOp#Rd4_!m-$kdHf-r|H1K<-SR58 zj|1WP^n5M58{rzCp1jolv@pDhYFu9azr3xcOdMSCI)MvfRlCWz*g3kAHsNkE=Enl_ zGj>Shk*d9IC$?fU4y&8w_TUW+`>7JVhd9uf-=vQ5dvVFs@J%N3h1oDR{W>_&hySn7 zHyqsIu1&KT*|%-yslLgm)1}kTx;*P=us*wvCeU?SzX>SLW?dcK&F-S;JFi++>K2h* z&p%j~S912^{kKnfz}h@E=Z@3tIwKEd`Oj9mNH(Tw=*`KEa~KxF<^Mc|JHP!@e3!|AjWbwCdaUUK$rLJ#H+UH_I>C zgzeKWHyb~;^&ep>>ZxOK%zL}`OwRX`;6TUpdp>o6e=+DDmw46uPjGWbdVb{0(PFTl zvq>C0c^r08kzCGt$gSMS((*B%naF=Tr(D0&rNGZ(OZJ^FDf({m<)(x7@^Wj?p$G~8 z+179WjpxVm;w#rozIA7L?Qp&gnC`gp@#!VG-0Dn7zWo%Kut?;&%~HfR#HnsV5$!^BU0UeGyPb|OqsEgmYE$HOqs;^G0 zc0*|XMl@N>hBpfDDaCCtfl{D<F$0dy>X09@a-pV#eDtX}|P z7WR8Fob`oJZHWhM9~fTRzMKaq!h+*F#}C$8o3cTPFzA&v>W1+U2AuXCFSB zUPv_awG7fyT z@hiv9L(BDe!D372Vz3{yy;mQrM;86`2|IDgIoCIT#ca)Ef3^wk_)s05Pd3^)fCuEY z#$tNw1GQE1(nmXaUM?>b+wRR>9Md%>HSO%p8?pK%+saoR-m9q;p03txddf)Lwm~ew z@?w70p`Px>f4$Y-lm?tZdvPotzLD_zaz9?LG{D}mYl$}&Uimxpqb^USXZhSD8HX8e ztd@zZBmD4y?@>RK(RW@FAi4V80-4D17pyTWFUt*EKjQI7f7eIi&%`IQyvbwy-1q`9 zwmN%!^*$<3rhj9RADVbVg<|jMC;=y)HZV9q;Ce8z4QK%Zc}C<;>277s>ct z9}Z)?L?;8^ranESN=hE##d#kJNmq*WP48y&<|JR`$ZL@AI>d{5SdPi}9qFyBTW4dq zf36nbd_68-ac+G~c4LdwR(QzZ|C9NBV}7Z4PF0yeP3fzpE1(J_0yD?QN~C+=sIDV;Ic~ zzV=CCbHE^RO1?QG;X|H}v003V5^GH z>6or&chFo&BImmamL09#(6^My<^4wAI3ISI84umI)mXN0jUD&$TCvtA^(GyZ+nxC-1GXZY;uC{%UWSw!b$g7L$NOu=eg!s`fuRFYYfs^r)d6EGE|t z|w`GGV$r{_(ns{(@Qwt%fHuC z>7m;8-QadOOgK$s5pURgy0KGxS*Fdk-<+ZF2Qe8)Z2dm)`Wk zuQ-j#)-R5F{N?}o@BimQoZ_h;(IqJ*0dG5k3g{V({Ep^GQt9SdR_FU{>8wS@slpXZ z*x>J7iFA6`X?E%`n{gOQJv>3|rrlGW4Eq zqR^-sV>ukg%WJAGTjdk&J>~cMUTjbN9CjpzH2GOLI~exG(s94r@Z9R)#dQ+xYm5&a zFx$pE&o0tTgP%n&oqpU*m}#wzz5F=or)%KGM_mYn;^J7uhd=-19ftkDrpIJ*ni8&i z{waHOY$mnI&5y3*p8&}5u+Q%u7IN_<4`Dywc|}o<002M$Nkl9uJY z7e@KpK(t?-CuhHLX#b!{jtILf=T{7EKfd@huCIpm$-ABBes9Xfv;2FxyRmCN+`Mp@ zW_bAV&B$&SYvNpt>g1EBMpiz5ynbNDO${}dp6Toc3~CtA`8s}Lymjp^9xkIHBc6w) z%D$u9LgBA|(xp6HalPebYE!8-J4!V~P^)Ls@bMM@#!1Iv&t{s*CKhkH>xjD(Te=Hj zvfA=NKF8@A+nrdnlk=_y`AE-+2%j^5xyZrRk~bb-3>PHkjV-K<;Qy>?#&y5G%)bzIo)F}tk z)*lCU%D|c?H{T{J>y_N_x5@ReKCSwe{B|QS$~`%jpM-4nO(8!1Np1c2AI5JTU9Q#Y zI@rk}oilv^C$q)_#F@Tl{=~UM$*9w@u`OX-d)%jDp24%dIs5v_9(?M{=fL5Y%faht zyIShNtB)fvPFD*Q{PNGZ<96)-8veg~x=&V!xwb*wTup!XE6c$bkNm1B{>gR>s~FXb zJ^a;E;6M9o;xJC}*u2m`Ci&WVCUZBFlKBos*AU}WJ!~s+3n4o{%VQV)cb51Ex zioIcS*pwGr`mxDTafp8)&|gyQT0aGH*Fgf>;RT_MKRofU8%8bH3v#j$&IcSiyU647 z8#|C|!MG}7e-MaL8v*&NSA3l$7cGm0h#D@umUt10fh!r?=I|D~Xi_4%Cd@(@l~hBOUp0=;?{n z#O7|IiTV0CxwcfVO!fNH+xGt-RsUk_u(E9HnY|yUos^;-Xt^N8^-3X2I@%Q7{nG!9 zEO4y@l0a2HM`mReK#-FYY#&Mt`y0p4$FEM!$qCTcrr&|R^p_v=HD`ZOU(sNmZDMV4 z+Qp#0UVN=DfKLW5n$eqnyRBQ3&MDi&J&Qlyt}W*xfsVFs{ose5w4Hv(Po|pxY7>*} z{h75n8`$RQ@)hUi%^AA&I-JID*MMwG%HiVd)&`_#le?JJr`R?Y;TSAQQ%fCYV_>!G z$!+x2-Ox~;;E#Ui(&WtL3FkBX_oa>OMjyWF_U3VhHx}q@Z4{%W6RGEeBA)X#1u;J0 zZjk;Vd-}7q8p%N0YF*=9Cm$XRc3Bo%TI@=nx{fzU`py~h;tqkiz`V&iP z0q+PJnN_2CDWBGqJJ$%1peUiR&3UpNjnAKL)RtdeW zG_L7gv?O}@WE;yijcJ8otTtyQY1oQcdqP7Q znLWaNklnRcmU!OJ4Q!tC5Ydg4m2Lcfy?kRZOX=$aWEv&D-D-TXoWJpo^^5W9G5^&W zzO(}k&#sAUeb?VYMEB3{t|pVI4tid^iwpXz3E}?E(E-0XSq#bXu%ZjvY}FMb?CiDA zCO}EzO((LpgMJI#ZpPLM(yJC7HQ*Co_3RuTGJ7=lzA81ws^JG zQ`Q#O-Gh35l3Bhpv!6#{!+C|x)f-e|5rSRaNSC8@5UPuRxS3t!zD;b6>IS^^fP60> zEU0@w5o3+uvsRaA=9Tg2A9F@o9-fcmk>wMP9K~~SQ%IK8mwWiLyU z-wktq)lJtyern-uJ2XbU)G^jI%o(+y#v;T^LhU^fZY z8U1U$f#7Fgg)k>ifd8dz{d0M?q`+X8xo1%lVL8v|5-)jh+KFWs0r|L-3^Lq~bgv-T zB>#iLCLBm>Uz}9P4cD0ZFEncRxFRiIW zW5T23Z>;3B=OMs&NH(ZXHyd9~ktHsNYtOg7ykhj6pY4ti5o@t-SMM#7aF4n1e&S@Q z&*1q%nqYbsJC*UnWY8AW!=B{wtt$_U@%iCwlROoJ@NI08m-|+h?<8kI+4+Ae<1nZ>)aY^Eu0tqg9W+DV+Xez1WOk{Na$# zCz{aKxSbr3WAHckgxYjENo-v>M)hr+NI&_-VjR!LAN6qdC%3Z({*W_VD+MP(K3`gp zmK$~9H33c*TEV!f`2KKj5UKKXe` zmhabY41cjQ1!%J0wa-M`N#P4(u=jx(8jQ`^%cc2w`1Xbu)?NO5e!EiZWcaPo-WY6c zzT`af`C2X=IZz843t-2soBD0PF;t8C=K5RH_szQ5IOyi%q~Kzli7%h|y!9AOHu*bK z){cD}a`TE(jBr_|4v)N?7yuda;45x3b>KGe<@$$V9YJ7h4wo|~y zfgZWD-uu3JIlM4C*BIA7B$ABYlj>?{xZu_&SVP;|G2EYQwB;XDUJ~YGWCp{>@b5a8 z9{#JZNfLYZnX#?E%+-1FY?ou^iR}ZNu&j>|??oDo4!IWto$_|y;)h94WS(Egw;89^ z{xf{nBGK}H>lpqVMms;NgXJg;@1494)b@s%UA#8Z`!=~8X8-E#Fs`H(S}w& z_WS0U*|#Rq=*!bY>c8V5g0R+m6Lh`gTSGu!Ecn^czPiTJpT(17Y=r2W+gYX0NctyB z9+t4h+zqnY!!Z|hPlKA)>VyvYw0HS4g#^@jhBtR9(1r0mc1_PtFV^-ncguhqm^YuZ zjghY0E#Aq{u_asnwqkHj2_N2ghl-C!@4(@PyWOC&(PQiFotRdO-0rn?%3KRmr0J+B z&-@XazQ)@2WZ_L{IiU4tfhBpkHXOx@&q(aM%t5ZL|-}GLMsZUJL33XIs5QbR< zcxRx-#-Ui?O>VFfk)!ErC%$~1b_CI~(d$pLJ{iY&h52txo0DR4^8uLu}`|bIC z>z_|~4G;*wvCA)vvrTuKJR7SdJ;VOB;c-*^%PSoFFeL-K*2|chwXN=|6UOQLkf;6H z`vOXSAL>5WyuWT?_iUSkGn0JZ=jweT?XzPYa08ciuQ%pbJ5jj*@M|(~q+;bqZ5|eD zESqrcx>tT42KeuvW3uUjoUi+!>RZ<~r_*sCaM+yxS(7sZ5r_AKP)i-K6G-wl`#a~5 zi^pCE7MNJw)Tj34`m~<>jL7T5?!-Q$!Cf5~V+NR3bLFx)mLvG;!~2b!Z|{Rf#oEg8 zm-i}YrU#}r8NY#Uu5V7jPs3p^ao)c~69*Y&KNV!|oNqSdH4eo~{Kjqmki`?O-+ZWV z<&KKecB8)LEl_d1)1Vx6f4=+@K5MB_k$9iVkeTEQVBL0w7AArx0UQxt_q%%<@2pPt zoE2Ej4G2R|e-ki)>9$ASAV=Fn(O?b(xjL`yP0>|@6YYS{(h6pUcK#+qPoaf*N#9L0 z?eqNM5z`eREZLL`c(>`-P$=bR9Qs^08K*lR+Fu+Cdc3VD*}Hi7ZTsx+%9C-bqD*gH zy^Jl_bQI>j0y4>sN1W+od=YKJq-(_x09+ogO=@;nWS$nC_fUwxZT{R0zg*138JY;o z!x*;v7G1g*Wc1^#YmC}f@bvX9jPHcb?p8Pr3NEbV`T*XMel(L4(ky8$i6IgU=w;+*fp z0n?usJ~>WaVDqK*#qbu~xFs$1Y_xp~FflehC%-gJ`q<-(mR$ChiQ3On{nv3^UgjR1IUjM%3PFs%c%fq0GlU^vrzhdO)z0|q+@c6rc&*read@c4r zt>;V_hvnDJW~NT#e7*k7C#2=gFG9nT0)9H;xU3hM7t5F?poG)Rs^To6}+l`B}%l6r1myx@$-33K* z?A~^Dm;i+%?SyU?AQk+^FdbaB;@~MiY<~78K%OK!d^@+i_6f63wWGD#5yK(9S$zEO zT)Jffo%>SA?)??ri%W9(cxx2fF zF`lpF(6MUWkj*`Ch>L7~uMJO|nkAoN;-6^x-jpV8r;fHIBWJ5M1a{+DBl@V*lGW(B)=FmTJ$&)%RUTod+rhhDv+}W~U>i&9_LqcEJvjF>7s{Zt|<; zuzc6eo1gRfjcr)V=dA(Rf+aiO80*5jylG30wP1{cV>~{@^FwAq(LI?T9+*G-vvC}z zsVv3gKP)e7YxWfgq1xr+&g(SWn6WOvIy56BTYTl@T)};L9^tnTqzhLn-qe`|9wRL8 zXE8!@Y6YvESB7rB+&Xd0_QkMzQLD*aK3PSlMZTl+X(m^K9oa_TIk6+cH|L!nc5NUY z=a!E@L+zHM(EGdd?ElV@_jl$Qe)X_1;H)jjs~Og2C%cN3jD(MNJl<`0)9;Rlf_#kF zmpAHkXzVNz{nccm;NXAP6z1^di*XjiG+b%J0gu8czV^c>xAL(lYCF0)_bZbMl$!Ma zwOoB~+TGN-Kif_rYu>OgujDr8(=*1yM}+N|Z>;xMV5_mlPi?G*0E7GdL`+dHvic3n zYN5_D5}O2OZWdad;TR#-{lvz-!Cx-;QScNgWn9j_xqsf`^_N<52FHSk|YDu_JC68j_D z4ejNpuA4pP#J<6}d3U~6C-X%qpPj3hr|jKzfcHY`>Be8^E^k!9d@dd-3AY#uN@g~G z@XV@y_YPKPFthc^Uas%_OgPIaZEYcw(XAs9cNU6gtgfEe$Zx(?K&lS|p`Knjnr$*0 zn6MCp^NqpUYDFDo?{ab@U|sa>W|$UCdiwwv!gyl2ts{HORk5lg&nF`W7Clyf)^zP& zK>zY#!neua&S~)1AGMvHN25^tJqBSCEticUV`BZ*EA|HGWKt_v>F49CZU0E{=8qY6 z>re?Jy4N>7Fy+Gf`A^^6UM-!2i(wTzz^)k8lKk&xdSW(t+ckpR;G>C(vlhFusOpD} z&DEYUyH9=Mv^7@ulNs5rX8-wccu!<}S^d71F86B2KfhgYJZ5)mc^8Bd7rGSzqFo>N%dAM++nkZ8hZr9NAfZtKVqj6g5urztN4;C=F1wMF_XchsHJD&uiszz$cuWV zyY--)?KkATw+QF1k4msq4q7`Oc-X`Zk1$Kew*8NfXufI9mzQ(wYI_nJ@!iyh$L0q2 z){+ew4Oq=#1j8{$_0ijs=X1P#oxb|6c26bNJ^mRE(Xwf*w^mUNyy_H-mJ;^O{sP|h z(}LyaWo{q7tk9-hzOomb@&^=v4u@*Zy4%F4NRrug8lJ#riWd=44?MlHEHKI-E(<1 zCr74O@Yk1~b0ri2diKfH-aV7Mm|rF+CG_OT@{#vF*o?N2s=hlx@h7mv_*zcuJYPEw z{g}}^S`@0noj~dnrN0x&44tiGU@6v{$oq6`WzH|B6xsgTOY$#_nZ6(-|yi=nzq z|Jd}(vc-X8vs|rBq}@kv7VM%1=={>1{*Hv}KfRrxY6#|o5RoqRv#~`d{(X=YgDI95K)&Q+g~En8`$W_pnfW-K zVRseS)9OU_3Z9+wr@uV%5p6ezhB+B7s86)U&REempZGZEtK-P4uatIy&1bYv=KJ@? zd$Y#R7gPD2f7ran;qwUJ$#W>Vn+z%P?KeNP5k}qdE>`M)<9O7IpBPq3mshdB*HM=* z&fxT%tJEyNbA@0d1JwE~wv{v_%Nu!nmbM10Uga>Endqp-E<(1- z7o^P0!ZUw0rt|dSs5!gL?MxaCJwJyd3kG9Bbtu2XRj$NAX8nz2eIe{zaqHqPh`LBS z9}qJ9rXr{QRV`HFcWmHcY84P_>@ytwPC_gU!b&*G`a#+{}A)wwL0x z_+*EUwp_3_@^MaIeM<#JF7ZyeHrqDqQwPP77=t+Wwq2~h^UhRj&qhivz-N{Hhie$E zR}+Ts*-B)WKg<1)+`LM57xK+_a5K|ZJZ!Wr4{XE}wY8ug*vWd%Ub8%yGzIj~D5<)M4(Y8-!vgziZwY?!# z+jE4Hd@{sVs~vyWC>Iuq>9TNZGGR9L>1!uEAEYGk;$eGZ%LF?MWC0@dwWE-*vqyBD z9oUIFJE5F@zN~*}A1`{{dg=}EQM&yYHWKxwAX|*{rHz}b_{|_s@;+5 zc;@AR!*6}PEbZIoTh}vqpwlH&|ILrtXq=E@^}>6vuitTV-!YMIe{Ysq4Aw2YTPyI< zezG;jj`pn{-}p{9Nx6Xf&hM9&uk8yPUjO-fX#aLMsPq?F>*sECTSwW*P#M_o)4Cqw z=dXRB_BZl|x;#jQc}&lF@mI&guwb&49DR@SAthS}j~!BT3eF;066++3*nV?K$2IKa z&<8N;k$?Bir%cB7=IjRIRX#liCGF@)kshS+=n$(M(98RuQ2&t79Q z5i;T4IyWD0YzzwB8Ksd0`ACng*?lp8klk!P|HRUnjhRV)XbZF2$fs`|{?iZMvUDLu zPkRQUYA#0Y%jM!@G~$Un_HPW2DXdUSJz;OEJLySbTo&9VrXH$6jB2#}@#KcF(kppC% zFg|x*l^vV0`0lMm>1kc%^Pl-9m+pUGI@>?9YP{b@JA zHnsh|;qeL3GR>}8; zVj`k{msAHPlVMc z#@)813H^$f@{&U}@zY#xMticm z5zR(ciysc-yu5St#t1sy8#m_Vkn|10#{FGXIB5@d3e!^)k8rVOhcVw85Qu!%+?;wR z^9gLtt(E@#upub&-VD!r{HJri_;#YTHia!eyPh((Urrr|CvyYzc)I2BWue~GcO<_H zPuvOulAEXLnqzJAA7d9?9WOr!q%Q}{oBT^#s(U#kLuwcu^)`-LR}-@%CW6LAcyhhi zFWAQLatQC){62*E z!Quit{%VIUvgiU3!%`M;*CZgpGlj{`K)5zXqbzRMTXeU5wU;B%PWv#`n}2gvdk?BE za^D4QfbP#t6Ufh<^A5+xOHRUwejhsg0-D9mxcGl9*~siymHiz}^}x z*QA+5T%x8i`ua@7`nH?niNb~PT^mf`n`?FOy}kjcxp{J0%MuG`oP)!1d9L4cKFrnY z%+_ZEZR0c(d{KB5>(FAuY!lb~uTwi&;|^OGzqxpC`pJg{kKb*uQLq)GxV)i*;MM9m zh|L#0s>^2$xFVm)wCb!D)NuLRnB?NO=FBZUe*f53!_4P*wzM>oyK6G-=DoEixy^^c zxm?``Ajw!uhCMr-2Zxo-Vqp(^VFCSOx%K99urW&L^$uoc@7nUH0$#0*4wu@P+sWP9 zdG?X-JV5STUIXLC^!qlHz^?}QF-~H=x?9W)u<=L2Yg#L_9p=>qUM~lrmP`E}J0B!F zAK;KFrMvD=c7HA%X+CIF3tO9$oe#!9v)*b~hfHwK=K0jE{fi}L{mXySQ@F!%V!ydG zmNEYCrE%-K1g|z7d-YqRn?6iBt?(D$x4+dbaW64b^RD_o|2Vw&^AGhG2X~AlJaEy= z>Bb_;sPv2bHo*1jn(S7pFAH{IO%@2L;JSPsq9*EH?| z!!i~cJr~20JqOc+WpxGo@){txazx&B1e?<()Ap)rdEBsMT#SGG)JF1sV>8XcalD+Q zYi*Ph7?;xr@$JGU-%t2ZEE;}h{4&i;AfZS+3W+`Ar# z!)AF_X8Wg$$sbScZq@-F?QF``#YB0Wase!xcp7Z_Ua7ji+E!cL&Mq$NqbT?n&)YQe zH=n@Z7{4|*=*6?~lxW^;2 z1ac9t?%fb8l5{7ERt#>!p48D#woHh<5MiXSFP9b1dAm3k)*$&ts=BVjKtc|?Mi=wy z1i>?A>&7ns#KEBX!JnKM+w2?*`YG9OoA{~B$>>C=_Sls5eyaX*k|A|qOmEG-{_urz zBPdtt>cML47+lU;@8oM?&d%lBb&ngOd(U=O*1lmJ7-4Q0_CZLp?9Mj)&({PJUp~s! z>Sh?qL3waaa=q)u_V-gT`*MT4veI$z3rG~JMFbEUulq%T7-Z#Tt#*dW96<=%-`q5p zd%1+0{f>g4xpo^1o#g8LWYOtw{jcpDyYZMY^4V#8<$O3eP2iv1d)$rh#d4Joq1MhX zY=wEhJ3n}h$**`5w4QA{?Emh6B>d&?|FtnRjuVw1U{;&mHA`mmB)y9fIC-iswj~bI z+Tu{VZKTvU|G)EXyoJUGnL6LK63WI09Wy;2ev{ueB0N#@>pUr~m#5nJCfM@L@a1J6 zM6s9sbQ|j&Ns#V<4kr|&uVG_*c+^2;_mP5d|0>KP0BiOA8PI@tuzlu zatBLoq$AdHV6(o6U8Hwy5b(_&vW;&1cbh-zf_DLYk4E~8Y%fHclDI_N0HMWUY}-lQ zM3g8F55xw2z~~DXuhdvS1|+UI-!48edux$jRV=5~mm9&R~I7{8?97;FUvL;am3)|8Wo1By_y$84>WJ=^r+Otg_|;O8rw!_HT)|Ljj~ z%N={8$zw8I{8+HqQAoVpvx3l#;jz5a)uwr1~xqwXO0^0Ki}ve^PS9wkA3yBgx2|i{_KmlI?LDaBGd7bPYSPpex8ZZhGYs_ z%izW;-8OLt&y`#|xj^i><)J>;JG(~z@?L*(n{)Z5+Q#@9N`B z-bEzce8>Nd|Hg~V`5J^|xa&Fke2;NC8-2!?pyg{x{_sOKAKCfxZ4kD8Tt2K_wliA< z=JA{w2>Lk5zF3y9NSC)7Zk#=w`#xl9*E31e%$HbD2J0X^jiOE$G+J1h)1;LxE z^X0=#66|Sx10IfVeHtsjs}aYE{-Fbtkyu6|?_7qmH-G%+b93j=&JX$7N$$nM!sgZx z-x%T%6XiA?EIPwfE;k0~^4ddPhkrLpzr4jB%Hq2VhqGuo!GW1+W^J%-PQ`$ zZE8;p-#R#=yiJ(_onCID)wp$MCN2kl2B2KsZx~Zd#(Wqint-&eE}lK> zz~YVG*s3tteq&94&K%;=9j*TCc1#Cm>03Sd;?5olBi@M^#xOq&laEt>*J0jti;Ga> z_gy|C-Sx-AVa(%3y*z5K7S89AiMyD{wASFOp#kKgKEp7~K{WQbVA`v9xWmPKd1v9r ztT}b^=8tig6U5opy!GkkS=wwcVX$p%d^Y7|`mZ*k^SF&XKDDQMjP@BE{+eK1pV&rm zK2I-u;rh9ga|~D!O=vl1?`QmC-d`B;|I}HZOtORXVT-!@d~@=gwTGWB;rV-hYH!{G zj+xxesk##N+2_oh*dMPLqjW@U# z_VaU51M>$qO*%00Lw$PkhzN*yPp$oOaDvpDt#2(F95~zV1_rJzqpZywdu?KVuXXv7 zgZY9N^_+e+JzUX`cUoI-+4<9YFufP|a_tX&Z@Es{&9D05{P%y|@y^{Bi@am|nSiCd zcE2;QINShnIu?j;oV$5>m$sLwg-J3zY~0*Sfvv6~>H;rMceiNGWeWm z=U5i-8WPJ`1is)b2h(RykZmh|=e64L-HYH_Z0hvpB5k>BKl|DIC;R^_&mS%^fNyM^L)Y$HNPjZj z05#^Ix?yXKbUbXa^7hUz*_RtP>h>3m#kBXc!xRn)x@2Aw7|0B~Gv^@TZOdOfwK5%g zV`aN>i8`T>X|+XB)A`tY3K)800+gE3O|%NgtvG7i+n!5%lTphIjp4 z>qHX6UGD*v$b6(n|Mk^^We~R29}G6RF9w2q8OP<5k-;<%wY}o;)tC&|S#SJqKF_W^ zrkM2ArA9h?XO+I%8?NZH8{1gQyk3G z*@j4W%qMp`KiwHiEV9qPCp5b+_W6uDe#h*ueq!%t#AIEVem)%K43IWm?F`%-)R&u| z4{W@1aCt*ZZZ{4u78g{YP&2`1v;7eqc{P^Wd~2_BJo?dw2f6K+gXCD;9KvVT3t7tZ{_VH{%aB5lsD{}k#lo=5-uiw6xh zn5ztLP_=urYulSra?3BP?3TA{3)U5qEeHbw4?#5FhC;0Xle92ku$=TW7 z8_W@hg^>?8fs;R+Y(xUbcrdYKBecz=9M{g@_-JdSXr94=Wd0<#3tF*EiEe!I>uZ(; zdQ)qMzgNnWBYJpOH&1^T8R<@q{n5j~E_^r*`s~_`kD4(4uYai?ocU{{-ti4WO8Gxe z(z04wqOKq_mQB9M(0zT!ySP03*!vUAA3lV{x3%tWQ5^HU&2LOtx4koqZ?3senMd)^ z`56iVyJV7ZTAQ7Beq-m=VOmH3=u85!$Hp|fpUxG+_)8BKh2*?x*8JuR(eFH5 zb01=jZ)XlIF>(H@{+koSz|PYhJ{ZPNr}jd4{LRmwJ>#7tWA}>%AakJ{P0^}R!%(-~ zb*4Jg;m5(QF;w~y&f{1ST9aVuNolrT-O$;KtqB+xZ>FL@72|Wf_Uzc^FJt1IVD$Xl zxo{Ho|KLq6lLI*RI*E`eM;(Sa!SeveSh+G<@I%#(NM zLB(sE@%cJCzH6V_!?zfd)OYx7PZ`d~NN#W7>;8Vj7Ykp$FR;=rUbJ+Vh6^G}_Mbhr zt>cb%(`~0t==a)sncXKfduNmFKm6tF?4&dPhqs)5_AiFRvGvu0QFm|FOSVvl87cX#)x~^s z?R>((oZaHoetKv3-E_enepMFdK+G=s%hGArKP&Cwr0Zso@#vhtF^T&9mBeoB@-Ke* z^L%RFFgq`}=_bYZ69%=%za!ShyCXfUv6EkvyNP$#9D_p~Z@bIUHz&WWTzm{0!#T)S zN-%Ex=wWjzQ+zwVu|Q59osHc9ef~(?AZG(DP=EDT;>O7M7}K9@d27Sl9ii(mx$SOF zp2f`t$w*cm4L3FTc1#7Y!@fm0TxieS;-DX`!+h58@Z8Ix_}vEEH-sbpdA$?A4S)64 zQJx+`%Ihv3U#xCKEyp2um4}b%zZM6 zZU_OG{^3pUmX<7J}$(59VT%gAmTE@yYIlg?r9+oQ90 zF)hdSmvgz)?(w;Bo^^B6eEG1sUcmQ~`91P!oQ%)ucTAViWTP(va<$tn5n_^@`KKwq z;@bF>D7w1_AR~`wEaI&%Xsf3bb^~-b>`&*E;h2|n-?4Sq7_qS`VUrzlAN~^^cu+TA ziS=3@;EL|1;QrzvDwVq-dCK37=*@RF;@iO~Z>x{`j;on$$kEkHwGHCKJXLZ%+>n;T z)hKYx4*Ob3=-;|}`9V>SS3`2GPA08}YN~_pW;C5;J~;t1y(|k}9rVHGhjVVpHEy|B z94zO3>%+z|!p1zgN{LYZ6-(_bLnm0c_Cl$zj@N7lpIo@XOmS-|y?FmpG(5sFqq8{P zTm+G(=HCRpsqb(yLjyJ%44tP=s2?j zyHZ%q4LAAA-)KvcC^BjTl#*nw$PK*FnLtN8{kucCB(I+YlJ*tfC8-F`-eA(h?+s(3 zvTQs6S%lEPgk4zioC(NWWfO1}vjVC!JNxHHI&Hs%k@I!eGNqU=t}i_@lCT$E{xxm) ziQdS4BIU>Zvol6AE0A=V7CGDgTZGZnWsSW>c$k-a zlL|&yGP67l13ht^uUT0v>>TbpVU`K%vk79Ip2l}JEd=7J|C?q+ID2&NaDQ)7&_jCT zJW%8RzIpwPUby%qz00_(S32vJ{y?EIr`;XpE@{I$^-_Sv5H=)>}jFPT_hZB8HplUJHEez?nzi=O#{ zW&)SvzxXKFo5uzD>f>OX&%{^PVElPgd)e8=e|3Cg`|xCQI{8}k(|zj-*##^(jE3HBGj$i145N{}RXe z?jP1Z??`-a-FdqBEj1DfEH!=fRJ6dgU1__yc!ae6p zE@CbuH2GaXPtH1_zE^|1-RpPpKT|W7GJiuf^H~~A9_U#wlc*hD@n2XS<854HM?0x} zeo(6ikP*!9ja_(HU97$-BxbK&I}tT5`pGGA{ufvAu+?@qT;x6O^77;e1a7t5wW)0? zf%vOGcg^4O`X@T`t2L5b!72d$QF`G&A1dJ)8Pu4NY>_%?_P2#ldWkS^gbG%DqqFA z4^NFpJhtfHdXs)K{4CG8A~L_>xZET1-YdBq(#tqebA}1F7ejq|8+!!)=Ho!GJ|>8s zf%yA|mQGkVFP+=>;P-MP6@y{yhH5G6-1-0+zSRArM~&q| zzs}}bZ)i7mlREi1&F|R@)%?`Yrd;36fL)E9rK$0g(!h*7eJ$fcN0oY-L;I~};`rrk zZFGJcH?)DE*ZT(&p>AW?z2f!EFUT_7f~zao^+C zH?c;5;cYiAug_;qIoj5k{87&s#`0|34Zp0515V?*JiYPFknV1FyfMF#yqn0g&z~D8 zz_V@KXMZ4y75cQK~@oP!@%hDz_ZB)uJ4)Wz=H^DoyB z9lxLRV6sj!Ar%8ZG~3W?=%xoiLPyExcC!>5=yJ>R-lGf6Uk|HzYn z*jOdUHa-Z~+;Y8tFc@dw%I!Fe9nA~!-%Y`N_!o(N9BbHaR+~@ZGKF_@(z(*uV22T8y^U)jMDLo%_G}%iiTG-Puj3 z<7xxe-e7p=?fPM%w!3*^%tYuf8EP39cDUwj7($?+6oF%sMC#o!%yP0NPK?a)m50;3uo+oNf?zy}glr^0HxzsU~tISIZk~PxK$5fBpxk zpEHZk2C6;cclSSA+g1R0*nykl?XVF$e6JdYXdH{&5&(2^+ZcE)#}%(ZiLdx<#;uBh z!%5~YK2M1irEplH(Srp;a%=Adygu2a(WoRgUH##zzrs$>CmzFif}s32$aFkrlch6?6`|*gcsfsU*rxXe=zJ!*{De=D=8VbRl>LI+Hz}iv-M9){ z_5c7t07*naR5+R{5Afh{@cR1)L?AHb&39N`a~**$rkuHgE|PPP0K zT@8qdfnKjx@4A{mWPTYUGJHorTaRwe$ji0_r`KVQFMQ~2%-H6;<4!Wq{z(|C1FTBU z>S+)2vppN>k@*Ffz^R6Y+-i^HM(=O|phgp1ec_ozY^4Lr*sqTDzj*E_A82+q3c75< znZ0b^C$4(faFKt#&6#@1-AqVE9gz#fH!r6RRPZq!eLk%b z_aV^XihXMX{CfwI($kyvVmSTv+3*-WT`UJq9aCO=wG-2TktO%#M7tCARLN5G)eF(Mu+hH=WqY@2UdD-u3eC4W_nDMPmevK!_;w7;_%v- z{SoijBQlV(n63TWIxg?=x`4cM>&u@cGshnpe0lA-{4N$UUN>hxTa*9nK2OAKKhM@) zFL@Rhp0WSt=H3zIYv(CIZbq;}WEZC$rw{1Ic=jd_WAvR5iG*n-lKhn*W9rR|Y|jEV zzPlkzcvo7`g*CD1$wQC3#z;QZZHb>i2Y&U*Ipx$Gpr>xpPDUt})3GmtzLv{@hT&Vx~gF-!Qh=Z6pd0`Iajdcn$rn8}At zP0U~TWCfuwOpIY-|!6rs`Px@KShlbBgWo=4%QwKi0#^7LFU7 zC(F}0K8^E-v|5<$ctm#t2APipgEQf){cBUY8lzBdetx2dZ!S|_i-Bx$?G4`M2%Xty z{F^xQKHZHQn{jQchWg736~GwW5nn6$$os%Bf@`d^@WzVIKkEiW{aj5Xzi|KXIjiOS zu&|YFv1q;HPZ}2Xj(hVx+NANs9}mjiM4tQz7Y1Nxb9T)sDEsoc*fxY;-DT&kFU$Ad z!$eDIW0BgOkL$D9T6_Q85n?gV*j&EmVp}d2TL$vYo13rVz4doUZtTE+=geJ?@QJpK zkFOPd@#ndt_3{k_kjpzyke$lqjqkNSL(5NjTB^vpKJU15Zy5Q|&xk2v6N*U1xHx#g z|I@WSxyxbRtSK2tjad3_U(`o%9>(JX{}bXYfvbDQasN0c)EQ5%*fVVEXiF$do^|g0 z(2dal^!wMwb13W&)9eT?zdIL?Utj@oX}WDSm~C}4O(1RMonGS3yM|OOB4yI%hL^ga z`pW=!>Ssp`@@I1a0D{^tH$;*}OY;BoYSs|0IFPpGin~q#>9J4Gl%rH9yI^UL@ zzbM%`IK`c}*nD$jF(=FBLatGkYTpBKOA+jyPqt1bKUrSaAbUpO8?HZ{dzSO%=`efm zN@F=HPH@dGdgz~znSZgM`Ia}FJ^*lYk@VKkTHhC5<&P^qS4i=`ZMcj_gpGGc@|V+R zFT2*IVPmV>Sle}D20R!LuNOdE8D}?Y`M{UKIF}2B97BIz0EmCNVyt$58NKU!EH+us zrV_ug3&qnjAm0ZBlO3+ax#6o#oyikRo_51-+}BG6#XQRJ>h~N}BS)Lu%w2yUbdy1o zj>QG$v;E1=esX&*lJh-XKD*@`?_X|I_b~SIV)u>bg8kkf&gJ>o+~DV7UAVPZ&%?oO z3fWu@kewfK^Kx8!7$&1_A}Ace$@?Vhm0R=Sw!uJ1oF=&+i{&|Qm;+sVH@q%gySrIvpswX^CM4t|MA)m^SCcI(a$&I zGh)&*%Y$5)Af(%?due*S9vI@PzTVd&QH;Z@Eim#F_`P}lLYQ=)IAdo#bg(QMKu-eVLInapaR*q z+nDTuzXe=d*X-kiPqvZK_NiourazH@72{?$)t8;c#Lm&!ycl8GXC#M9k;(-=k$h-w z9BL?6G4l`Ow%XpfU!L#?emwT$zfwfK{LMhTyQEN*-)3y4O=PW$VoqQDoh=MNq|e_? zqjQV?bZ*qLlY?q&=9Yx}bnWn*A9Hl*s;@=;`6490%LJf9w>rE9@%-IIhuC}ZWlNM9 zShoPzR&O))M6n6o4c>Lu9}1IJ#$X!048E}(p0x+?;Ulk3liwA^!@>U^;NcNVZV78t zPc0I_FyM87wbw}Oy*a%6BU+)7^|GJM4!;kZ;O{wqK8RZP{7;J$4YAD@;RxCVy!=Mm zU$ic58%t7UOWBG3-cRZ}nL-%vP%5mXiNAFG(Wa ziE$?2oBZj*b8fTSplvR1aN^JPjP54S>U}T{LlX0QIx$>{d6CJk^s2X?eYg=_*mx)s z{VC)}Q_+w1>>%~ne9{h+To_YUY(@?BB-uS%{NCD5sx7;_7{MSjbwcqzERD0~ZWy84 zT&#J~-v@;dVOqD?dg@G)Q!8X=N7H%hd{f&x{>GJg`iGN39YW$X#x=K_&HN2}GMlHb z=A(_w6qs|rEJeu1TXSR2K9g`S2IrIAX~n#pIDrrQz-ACY=kKN zc^+0n3qM-)#q-H52ca+)tDV#Ht;9{gLpVKnzWQQgH;lt~Z9H%U&Um|F=v!`bUwir= zJ|eIg>j(Kr48wi$I~RUeRs>%vrn2>?+&&ryTUYpqwp_?NeKjF=xHeGd!+tpz#Kx$; zTr4PDeaOByrha}%bbO(=;~!>nb$4xZngrTrAi45DXBWX)_^q2$e}0LJo_*faHNNbs zjpdK-2uEKX)|_o|T8oS)E99aLT-J0KYj)?y*dM;>Z>OJTde^;ttR5oVxTgn;{*(L5 zzRAkh<>pt5)R#2!J;|wu9@g~xD}akT-F#f#VDL}x>P6EU>7_k>Vi9VRN9@@A%}Nix)GVG?tsCx>jfCc*wudeJ zTN~8Y%hmAFpP6>P)#tBA5yY!b%JXljCC4NY-(qFK8FLJT}I|% z$JyhtWZrv|~`)|KfPWl9GK+pW`!6}TvXPIZ=D*Ju^yehm$&79?bF|NN_>!P--q<8U3QE8W$lM) z*L#H3yMvu?Auhl=0LzDQ97r{gjMv{9SL=i|Ao-0WCSHbqYaSkYWBb=#M>Gn)W=Mut z$JXclS$ux>z5!i(*OA)U^kTkq%P^>~@Wnm)0@v#$5FgN~cdy|uuGynUXDc@C#H-o# z?6fWL3F#8FB>}|eQgJR#cJ02`^)VUV0oAlXV*9TJNQQF%{kw+bc5)0uV!W>MR_IOxOOD+*T4S1r)NbF z$L$G;_g_1eqXsr^KHC1idD4uV-*1hkSsYt{i`(yXhhP5p?Z?)J8~UW?_vV_wFU%E- zVa<04GycNtm>rG@Uh6`o+XHXqRi18c2&ow2@x5`ODF*mDSKtPAV^9CCSsexCrZ4P( zX0z8{<>QJk>Ar32h9eucWdFTuUcRV@N`61A9nto|$y@W_q&NK8{p;o)y^F{FAK!qT z6ZVqvjrz?IA<&mwcE^c!+27Ub;eRay|@)>)v28%t3 zhLDH7e+Ucjc!xYKfD?t%&AlAfAqV5>`gXH5;Iv~?iOD+kp3`yQ3$DKtDyRHi*&Aav z(FB+8Y|6P@?6=9|3=As^J`;_V()`Ar*6SaCd%0^} zie`+&zYnKgZsc?CNjv(GgMzwY!oZmA;}?o$4<_gr)AF?tNo`G-dv@}_b)S5%i#NSJ z9_}#sO>DmzzZ}aooV{Ex8RPrQi?lOYH=cYJ8E$3;6I{EOX@2YT}GPv;fHZI>VQZJexkmgkI5>EeGmWPuud zSzNqxa&}kyXEaG;LFVl2zU-CP<(fpg##fE`o6G$rN_zK?WQz|LV=)|V^e6Xua-CmVFdY>@fGEEnt9ZHqQ!4%%(+ z#3%h5`=cIa?cqP!!(IF7ES42(ez=^oxxF2l7>q|2zkh^bytkUhqatQ`+*r=g-145* z$xJ@E$yWD^OpUrwI3x437>1)=a)2dqGTH}v;j=vkcn$G*62a@2+|0~8bmF#UXFo4s z3}%C!!+9Ae^wFM9^+{rNHT+2|Zsw95)Tb>6=JzcGHSeMp=v~A#{%Ux@xSFrJ?v+gY}I`JCrid~RHfm$OfYG|TWnLbBp5&ynv~+*lQn7iB5@TqzpL_A6 zu;G~EU@ZVLH3YG-jBi~S&-xEv53y_q2~CWcTLn=~8ERH8%_Op}1s4hsC(& zR}A^QZ=Ga!9I`dc_Xxv2P=EMmZ?U?#!s}0`-T1nHycDl7M_xTJHkppQA=we_t-Eu| zvDbHSV^CY%#V-eAhkdD}p2%v0Xuh`|sI7e6`F$2H7nc@t1SLBVC(mr##*K_zU}sMn=BK>D>=B}u<7D~RF1|n2os0NvFxpPs zGn8-k=6j9k&TrAZv1J!I6Ybh}q2=(GuB~p)vBUo?PhNB5x%|h;02@(z^d>=`)!iGP zEx(G83`biB)qI3Gd-l{dy<~1|w6jGg*_$80(L+MB7WCR&c7d+n9LJ-^3%_4aEi;He zMv3BMAWz=@+ZN|AW`~?yq%u08)xjLpdGRFo*RK}eQp5e10%AAT9g~~gY`SzFjLoS( zaDS1U{q_q??6tX8|BlNyaOcYr<1f3x(9zOIFa!6dvib?s^7`_(H7c%*Fr+m{Ia{vP zITJ3(3t|@*R^#?v&;~KYY6ifTrc9iG4mO3B18NtC8HMZo8 z1?uTezU%q9x#VvR3fJ(@$*VnTYL1RMoc?_fm95!?jiEL{1R<5Vn9$EL74>;#KJnz$_*ALaPJX%2vpqfeoW$9tGcLLq)m4wv@2EzBef2)x@RAc>bA5?CJBKfC zqg^!~=jAiqx!iTW)&Vd^+k(k5J3D4aPDF87>y3ucD}2k<3*=jWRy%SqSs0%IhHP@! zwi^mFROdIo`33=fj@!*XBW2*}stMVIPOgvpVJ&awZPYujY{t#pY{>P)^p~zbtUJy} z|8m2w_WJqpEzNF^9}Y5nHy%^9uLgEgu_gqY6Z4YXwrnX)3U<@iJY1HA;fTHu#fY_u zk*!sArtTbgvElu4kSx$SY0JWS7p%{1mNY>L`6Z{U_V5zbA z9P3L3bZ0+e1k>xQ1@z6&d@H5V<{Ou8u9vpD zjYHV~(@z)d{51q`Y|`=1XxTD1@242p3U@dpR`~MapL1@$iRk&9p6i0;@*)@0ZtSxG zqnO0m-fLsFTw~-nsvAbj{ct3e(>+qY>)CqoW(qs4VWa)#y@*_|ZjFaEp4b79Pj{j&}JyQ0A`upg9G1v&xvIv74!gZ+zEZ!HJwg^8Ewad8jP@^*^H~F5I5I z>LjK{+$!boD1kw#A2?lJom+vcgzw?2?+lTIRS~*)fCv z^q#8iFqDg#yqpNfPojuO4Ru*Mo9JTu)*iWB8qJv&vcn#K0VJJqxwFQ9KV2fpx0aoA zw%GC3bRhEeuYdL)|J5{Eb+YrI96$ZlE0v3K;~&WS>N?*GqC@ik4D)bAcY}F#mE=As z`20@y#$-O|^zxTUboWyr+1&f0`Q)RYglQ6asJ$h?SS&}^1?2K24zbQ*!sS?Q@aGF) zFUh8JeOPFS%zhszMI-zDOcVJV`>8HQV`1Z4lb@XrZA;A7%~|@! zo)2T_?PkY#_QvbKx<2yZsZs-wtvNWo&$*1c;1sKk$S23z1{Na$ zX6qkD1s07w#zlQ?X8^p>*cKm};+TUn-^BK&WpT367SE(GnC+6`EBUfWYk9-!k>pG* z`CQBnDwX!j<1$F0ZaaCa82-Fnj?#{Lqc| z;UI!fj?`hHy1WlBT=8C;6uEuVX}PQ|A96!%cNFB681P=}WFk+?+nKwV=*3TN2Sy?- zx757Y-d5+ub5re+r%*csmKbAn8c%PwMcHNt6H`hd@gIYJ_+;vd1Tn$RG-PKSM@z<7ncp0xY_H7Yu z#`AIi$WS-kSNmXFRIY!rY+WUs&*ecJZTD@}%j?WSvX4&XnI9l+5YBDKbUin3-pPOV z(n$Vp$h;irQ#2lwme)IBvpOc-jYZws_hAR$uRhsWq9_anV>PqL1t{MC{HCR zzJFWviL^8CtcO{W1KV%zZmjb2+@7s6lcyS`*K$^OhUITy*FWx*>-EZ_+YTQU%fGN5;%7=g&jS-fzZ&aYaTP&YnY#%Yz~|E9Aq0Cem0@h!Pced z&#t#@^F_uuIEQ?5%eZpKzPt@y?Oo6Da(IWqodO?#^}+0Pz#2dK?2uINGXgUTJWqxj z+vaEfJa#i7zV>*qXl-B*rWn4aec>If9i9O-(xq>#5m_Csyn?78phB!%U-ws%MX0< zSCejV%%57A^Ej=g0ZHQQxGq*Zt+C5ZHt&5DzXLeGJ5O@wRZzRh+PMGId#v-gkOo<- z`BlsFUvBp01af$hcf*Bdcs+LR)|Tw>3~qf`{QkCzxi|aP5oW{e=CL)C8F4>%8hqTj zBH!`vJdobdUd?O_=P3K+OOcymFe**&YuMVgV{P(VGj|Seu9jcOh%Byj-H6#eQ+YB6 z0=sVt6Z__jc@+KrO2+wHohF|lH=RE@c>%3UERnn`S^rj)NZNua`Ki#8T&V8X_Td|et-&ic3?O1vBqQLX9 zT8ui=VV3jzWu)d4O2QoYR>7Fh1f7i^DC)_I=nZs7mxAH5y}Bz7I) zH@c^rp5t!Hi&XB~#J=I7pG_fr_Z5b{4}$Ll2msVXx$8rRd(RmuJABSs#IbN()8oS)>?zBW}Ir+SAmhz=mo{#xuK)mM_0^4_^ z+Wit`>Smhx>l>^->y#(8x4($^^vNW9TjL5KU%mJh8#WL7HhaR_^QC^Xr%%F~cR46~ zqn`~tCLJ&SImux3245U%V4ue(#rAI0YDdq_1^Y`1ru4_FYdICv~@Z zFiyPe+=qYPSkMpCcUnJ?vp1w*wz;v(4i_RcTihi6hy28&AaC+?_Z<#DT)Q5O{;$>< zH=S>NPDd$cYq-7F6j!!$T4+m78$?aZ$$bP>iIhp z@rK%b)qXi_ZL=<`H5(K@$cUXFXslU1% z$;s6|j3A%f&I{@$2>0RI)jQn{U|`fk;U-pT$KuxvBDGj{0w%yE1iiw0 z6lc-5kz;&CKlHV~;2+{m=%j#>xIOl@=5*)zXkYXe?UVe{oa4hE%5(r(q(>*k6`=>Q z<1GkB$=Y0zsX8ZjNFfQVb=+}VtW@{!nr{pV0#M!*t!|m z4eI3zCA)K7`(YZ*q~fc`)10prRir=i6Znlmw7JZl7{j#R6hE)|czC!>eucl~hJ9N{ zc6!|>=_Vimmk$X(c+pGPifyW9G^6=*_?*Vd-?CU8)Xy3F{5oWM5s2LMj@K6 z*DK8n0iJSNu!~_t`q4jqSgEz`h`vRFRJ)Mg%IQ`o6 zPU#NjOY98paH}4Fr?v%m7~;FoLZ4>nvU&KP!NrGNUvBwMcaZ5dMyvgSEC-`SXFu9% zVGB4v>bbLs5ctuT%O|d}(T2$lwS3FaLvmsB!$;dVZuTrU=MR=)GqiYc^xTGL9nmQk z`QD-qr`YOPd{5z&F4dv<SVqOJ|9b@%ZpBFZ~(h=!o<$u3w~+ zi;lS%V1#Cf-q5f)tMNLD-jntol~fMP8^-Nc^%GYZyA~K`5GYOmP4mmK7R9Owq%g6=g$?z`W@7BGQ>8P!xJ~c zF8;)~SGh3uzi}q>-}`5?eEBBN45Ad@*1Y`8Z;YN7Bd4~9=bclx7Rgn+(r_=hQdfoZ~im({&zZn}(RpVQ*Z9zkt5Ad~PC>TwMkB?2NJy$bPx{8i)Nk znEu;wn1w%bqHn&!M1BLp`1xyq+zeh>Vf9+=d55z6J*>=}(&D-QCz9Q4s;Qlq$iEjD ztGNNmhBZ7|*~N(`&oz6u_}q;1(!RM8=iW`qs~VSwNL2sR2S;*ZP2Bnj_w4f9i+pdQ z?uLHt){9-ECO047Ih-;1GUp(Wqp#q^*4&*Qnd~ozBt2g)?*qb_NuL8uF&j%wV6q(w4}~{iRxfba{0xG9o_R0n<1$*<4x(!-GwG2E0w~Atv+b)x;#;e39$9t-qfh z5a#?z0^p0lbrpd|36gy4U#)gj;&{QSJk!Iq%<2CnRcLS zcU#)YY-OygtLA+UJox)7n54VIfrA71M+768%(JAH{xi9~>>zIM5}QsraM-cqeWF_8 zJvp7t+i^eFPR2JseIO&&Z$2*P#xAVEqxAC2k^Fn$?mKfS-J|(y7+M?q#O_?D^YXx! zKW(X-?}QgaGXBXqr7J(?_SPVgn=kTJoW*f@PAC1n=SlOhLMM*fLi))stMG?|%7lE;cW;uM>tnT)_QP zJVdJhT^%=A59ro9g%i|!5Q*G;aTAYU{@tkc-foXUghsvCJoYJ4Ip|@Gjr%^qPPVab zEW}{!W*E;sQZ)yktjg$Y&CV`ozW%3vw(cF?FPl(JmLHdm$2?Ap4+q^Z(9@j#_=Yui zbmOqWa{hw13Ae#s+$nIF?P2jZ{=-g|pZ%gWZWc9R7~^Y#-6Y#Q z=flmk+fV+ImzSGg{$IY1lI(L4e)DvBBLJO(8_U1-D&LHxyT@a(n)-{dv{{=y!*H{7 z-1*2Bz8vc_KQ|U2<4w4!_W~FuU;JOV-(K;t!1404xqYs-dWACn_5a;EI+EL? z+}HYhzsS(P+~hNqjlq5?h2x%3%n55qJQ$@Hr`sV;&)CpgX3(i6EFgD%#0vNBBc*yi zT}AjMnwz_SncIk_zUfqDYdO(!@^S+j?ryr4IKKYo&D8#(Vh#H4Z;;V#k8o4k7)}!S z9wz(zJ-L2~AOnJgPdERM{?a74|E}p5P9a_3DR10*ajoY1i=X-4{gw2Kc{l2_-!CL! zhHua3s}V;r+%mdxGFIuNk0Tzw(rJGs#D8|!${V%u3=hh#A(R`Cm=z&4-N< zM;&Z&S~_ZY!0?{aS!$t)fzKZDET5K)q1~ET9A#_)@N~~xgYd-Gvp{h=Ug^Niu^h_x zc-6SLj?4Bm<7-~0Z}tEEfBt{|xBrFt{!IUv4eT7}-x~vT1H1f5wv=9N@cgPYJ>DeWnNCKZ9$XiJ z91L=t-XQ1q)B6;h@J;$ZF)+mYBR;^zyRli^H-dj+yV3Z?@o`1v8ym{5Jmkbv+fRNL z(rgmLH@4!VSEsJch4D;O>-#E{UDU|+7Pr~(fGTco>~;2w>#XZL-Ep2zK6pNFi$D2p z!VIz(j^FV+0pg@%I`OR>ZhcsF6~mwH0gnHhyOYeQ+9%Wga+BSU%=!V| zu@eUd(@CbpyFA(mrrt(RI2WUQb9EbHFM4J_<+Pe^zYEW^0=(y4FfQV``VFQ*UB2W- zy{EZAR!(d%>a*mh*tU_?eMF8_Id50}m(GFaPSq@J^XE!Fzdo zfMy4X@fZhcDs0JNy^a;;LVa6b2A7w6z}gLWS!_cuU?hAKB67b#nZ@%9J-SeZv zxHiu6e`8SJi}isWkNPkFPNs7*P9x9Ff=kr;xsx5C=cfk$_SCTj(eqRWFkOPBLH~?5 zBH`co45RwZb+N;}vdWLTI7w$GJN0Rv=7*KO)Xq<@&f0Q;7X2AiF5d7a3kzI%+ftEh z_W5)A(7Ew=d0i@#(OQbq*{2{_Wy55#bOE-!n6laKP5CGceY(bKxq-b(rCh+^x?}#< zSEF)ynB#m+Q9hG@vD5-LgvF`4%uRNt*?5y8ioe8rPT{`!Is3F8nbQY{cah4kh}FMR zytv@Hg!1zMR?>Am+iTqGb0plw^x5qJ zCn;-JuDkxXwvvS7-2R(b&pnbjq>=qvy$L#**PffR#Sgou(z;#df1L15*&%026AusQ zb z{+=24RG;fwu&r5w;$%zhE2$HC*F!~~Dwh|zf$KrWPG`r~M%CM+Ne$-3b$Pk5+q_^f z;28ox|K#e=4`_V%fr%WhtG)TS(_`Qew+1er)n9SEi9%Y8a&H!#+NXGKlfHFlM*RA+ z+b;%Y^!a%9TrrmW9WbVNMe5h059hjXo{0%w#}bNL+UoH1DMrPdeB+MKL;cQETQBxE z%HuQB!=pbEKPI#1Yu+pW^n|hb_epB=kKz^Se*Df3wm8yE$#qbQ?|HIk$=~{|HnN_d zvhihRZqxHfFfQ`3@z8(3o1dz`MlVxH|r84=F~WpWhLa_m;X>!QGht!r@l z`YK`UAkN(PQOk$-tN88vr1|we=EIob;g}b;r$P2IA29vAoKZg?YTQn*kF4l>qmY);Ou9@?U&Ww;UwD|t z$@U8oaTd?cJ2UHsry$4q>0k4$GqD)!x$Kp<$E2Ky2iDe6dECvvWQlfw-a5cv3jS&X zRqn`)QSIY3tJ%*RTbN0<&s6)}+|Sl0u&v##wYVvboQ(OoTW5#qQ_CK~>B=8cO4^j! zbtKzdCF)_^Z*8STCgxo`Q0~xXyUwsHmy7EK(qB(x%NHF7o2x8-Q(yb+G{hX&QyQ;K z&Vp??nw~>U(KsMDKi?Ytw6d{I^s!k?>@_-eE0lTl8wWnp>v_|+_@1&{`C1Uli@VC5 zhckv>E|#OmX>;ToqsQyp$Ck4!lP@ia_sb!E-{bV~4kGs$!iH9F%{cJBUe#yeWyq=!31C06DZvyd_k@CXtho7Eb09hMF z{O}6HU&jE1VO*=uMy10Zw4sTJOm7SzP*v@h3RK zN^KX89!^>mcF_8My;=YF8(qo4s&X40jGD}DBC1;ia4_m9fGt)!Ippmxxj8#}uw;F; zkBI@O$KY>e7Z7z#}tOP7JdALft><1P$(@g~^wUeL6JY+t;8FhP2f+YmC^b0^Zp$G(_6=^`4$ZGP(U%O|XF z(o9d+^eMkh;lsh;#rONP!{$RBa>b-_7qo{=ludrt$6I{d{2(X?vm>k8>z-yWeEHs3 z@n4U_xVdODKNr))*Kbts4TSlpf1t9{5nfXBjn~zyP??~LYOc0=Af8P{b=b=>DWg3b z7ZZPD`taj+Oix8Oqyc|+Ynv?{PE<@;>EEWYQHe*tTO%tME2vf3+{>Ch)4j3PfVHW@ zrXRb2T~kr_UqG&%Tqlmxcsy>iCS-VH_7X$;bl-1ur(^CWXMv~6*fo#7H#bQW_FF;! zltMgjeTn5hUB{5!cH(N0X)ufj)%!7&2Iow#e zSZVBb+3?vm_p!rdxpvZPzHORKdVJ~dxp^QipIUmy<|;d5TVVGFC*Az*+>rim-gUvZ zKak?9Ik$b%oO%p?YdZK(y0R%^-p}{T@A&YKX!`RlFY1Z4k82h48|U@Nr6&$V$@S@; zO?cXIy?`~#qkO?Ech8L@#4z^h z2KzUDiR8wbT3udx?i17i2il8$+Z+6p@ER2Xl3&R=tk&D*!P0ozCT9%e@3tSzq1X+FsUre@LXTy75iawRK0 z_y@nejjVG5oXmICy7~8-(7IZ`T-Ut|5PIQQOX+T1G3OqKF{rQH92cV66nm24vFoOI zpANhM49J>^+)~7BOox94jdHe-JyR3#^d;$&znnpr^A;V+E0ulY(dzNH$UNM z_uMC-+UwHe2B#hCuMH>Oq0~4#P*d|Mzv;CXJkj=`T6a!KExO-1ZR;id*(+xw#vbvw z`Neh&#!25|j{0(Go-T%T%Hi)?QvNgit|@PwKOAwfbhdHwl zjM>&Jb#e>>(Ebu-zn8;u2y8fHSPNX z;AiFdylOco?2RgnbQYZs>WjasHo!NABG6|9@0#QBObr+6`^uEgMdQ|3-g`|7FMhs? zjr{hI+Ku(Uwuzk?N^4fZ!JGSKXMDzD`|d7X+h^v(^$6wgH+RG#=fGyIJp5kQr)~W2 zJTcSd$dq0%*)KB_oc-DMYA6%gC-wMrq`u|Vxi~I*zE4?O5A@B0<&_OXxu^hAbvBz< zFHif*5IFkidkd6-RNywlu>B%@a|2gxe&{!!FqzW{*i5p17#_xWwRPWIUJP%uSS;+# z8`pXHu@5cxmCLjF$7SQ`e}ok6owMGbhc@%0wm^a^qk`tRI*-h|ylM z>6QNGi*<*ab4%vi>$0|7Qiva{yD_@!cdfU#At--aM=L-4>~JZMc(I3fH=O3g3-gZp zBed^wH|BDr!+iF@fu1}WD|omOlzVmSMR_-&jd`5Bwd}YLic~b6`y-R@|HmtpB)m&S z=1>t88HqWo6mqO6r9OqEU`J5c)Fz2(4lJgkG<~%mWHnxxN58r>_ zzV9FI({;U`kLQD{=g!j*xgEU!ogX4gttxBfVB;P7>{P#q5L82-@}xp1pj__7Osv=o z8*$gz@`h_Og0WcpUB<@V5wcb?li%o+6{jK(n(J<5^nO-9%o{Rq>7`SeGxJ6=K2N{t zi1c4ed;`4+-_ccP#&9y@iHCxGwM3Clm z42R~PL4MHO9Q--X#MaWeYSguyjy;kg7#G=lZBlD{^E-xEjCV4}E)t8rHSIOH*L{71 zP^lVP7t>srICTO?zdk zbnq=Z>f8Q(P3Jeo`w-hjv7E#9a$b4QL86w3V@6+34E?~Oj-G^PJ{H5%YocOVw8-JV z)9MLFXXpA(r};9JM<&_7grW<+dTf3zq1?#y9hm~T;`k;rGc#)s6|iw`Q);_QdHy5j z(aHZ+r2#DZP3&H;PlO7WMW{@T zBuOtjRM5#jZSfnhJafO(&~CXT#3A8Q_LJIUB?P^tm>~ZZ)*|wzayqW>g<8s@kfe}Q zqcYd}-bWvYeZCLyKklQ?NHg2R4)FRC!@JWyUvjTFn;s$yT_5coQ2qRW1$M?EVvdao zNY_JmDY^#2j4c#>NTi*ZK9^uiT1WS@47fm&+jO5=Vgt%CwLun{l4t`hY*% zXvd%_jXt_X?BwvN|FB0;STV>oBx^+s@1E_e)F&_)UT%}q^Y*1j={>&fv9&HJNOQmY zJE?!{M?FZB@I=RK^dr1iU9W=iF@75mtJtnRia z-_N1K@i@CjmmQ_bwJk}0CKXDofuUUB8Q5TRGqYaicw+nHM|_p~&uEC?G+Q%kTuZVq zz)5HPabLU8@;rGc6|lS%mWl0gXp9t%)L68DIlVb9+ZTmmbya6d32%Lt=kAn2cl!k9DNC5g(qYMVG~4zwEp{Of$~n%LvSVKF}un)m|W#W#X#73PK0L zd}IXZ|KN_0w?`qC{TQ;imijULXFI`52B%T<2cKKA>6f_$rlP|rBSvVuK{bD^Avk5+ zGb|D%v*-@FJ}B+JF(Nip>J!Z3HKV>IIGJ?ah_(tm{SLbJZl2oM%!5Aa^qK{E;T5O4 zKgKnu$LvH+2OqAV=W;}a^J>`yAx?js-_2`KRZ!oQTHhJZZ+(ee{^d^%Co?7&njd#@2W>FV6+MT=S5P%wmv!3r_Ij_7;dM7ZN8WL zQUlVI(Lv+yXR&1@_tUY9{vj6`=ZA`=aw0g`D_-YEYeaPiMjpiBkO6o`X+4&8R5?qAOHih#<0~iOGhW!OzIy zxthHlIKdd`?dG{j;h7MLm<#9R?+`zOkNN8;(dVP*Id~SPgx)$=EbaxJ3-!DhE^S}^ zR^BTov2guI%obMiJ_aRt>zpVxOHyA&XgMVD>a^5UBd2oKFgkRzWPJh~uN ze@(vU*pSR~?!XPV&E;Q8cSW7mz!mrl#=N%`Z5IU-JCrM&key`2+rm(A4eXf9Dl@pP zA^QSVbUzGT(gJG=bRs#@>y=k#n-;a6s-MobZlbNOL>75gD$Sa9NuapD%oMAZj<*P| zq6}G<7_8?&A9$>jHgwO;hWGrRfV5O|D92BQJw`wT5TKsQiP|fCrW0a);AJgz1N6sZ zvU?i31{7fgS{3@eP#JKRYr*ny^t?8j`_t;aBMDf+xQh*yP%MdepkTe+iq+?{rx z`+d-U_EJ3uJrT%9nxA_*ZrQAIFHEJMx6}a?uXSCa&{6lyt{7T<{2Q=>PpwepTdsr5 z=DEB3jejy{cJh5DcH*vyHlkkUsLOOm`ybekncG6+d{u2}#kDovo7YIv&ho1f79L*S zLvoFTVosMOaMc(B`fptkIB(%v64Cdx!R+ArN@~Qlzjrtf3PNj>$KKp+C|vjY>CZpx z^*YKeo)1Q;773u9Ml{?X+IuYRwr6*?;!DO^f(_MefcnzNx9TTJ$Ct7st)E|R(wBhG zl<0Lq@GOL_$7^jj_^4W^S8SG-_{&z0_4i7YD+woBNp*QM^(QooGSM|5;=f$vUsoq3 zgKCfCEUxfFvrON%8i8T3N^!qV$KH|eOdScUUNviSK~2lxx99k>XROo5Z_W(>%hlSy zXx(wtU;`l_&H)aAEp-=&?eB{2HPJozvaB2Zu{^GS9(yozECC2`kE)0xDgVo%u%5W8 zr!dyT2=R`1OEJE+UY?L>!2(^uC>*e^#%-?n;%be+=@F4>?ncQA1>2J<(p}7@5T(gO z5}V9sB0xHKk5#7jP9CJ=~=lZljxqB(Jxpc#+33SD8VO(o- z&NV+YOtbcQCbs+Dc9=d$VI|}ZVf9q%Ty0y^y+MM@?e{@x9zlEWM(bY>v`o@dT4$c> zi|c&@c@5BpwkO5!E>hXXW~HZ@KmM|K?_`fc|Gx_$?QS()@owhNuoCgWYe4kz0*{V2 zGPPWtZT(sW(VAaK8;hf2i?Ro!1*ToMzrPv{`Z7R}YFr#PAl@#XNDXcAzRNkHwR;5| zCO=%l=IO^>O6Xblb+?|Yn2^1sp4Iah1?r5wRc6@Jwi4J{2rn|N&bB2*J_@9$brs>X zQ2XA8jv}A{C3`FKKr`mXdTUD3U~N&a_QCsKip!}=iZs4y&7Ll}z~>Ovb*{IDh>IA- zFpl7$w?g`YTi-QVYTHVXgdPFhE(90wo|{{s{`f&XhYhqUF~Jmgk3^>XT<~sH7G;DQ zgEp3=-cKR!{~aAOpS9!N>qlFTq7 z9luS;zhN-Stz@glp!XbTq<4UoyvjXg>|9Pt@w}1_rB9|NHE$1Y>Rc~NaIY0@46?I2n0|h& zkHm61=g>3$-R;~yE~8WgOS?L|I}o>uJkJZM$=_mL!(y_CKy_e0I8}Sm`Y}f@1T#Qrb^*afQB0=B!4cPFCq3x~H7HU#i zFpC7}mxJDS6#5@HA>CPIEo+EbkTGU)S2M(t_X;F?ZXWgpdkmPLcZoNi7Ju?7Vq$Wt zDy8BoEtO-d!%oTbhM}8UtF+GzLVr_a6JT22QH%9a4Ia9)6RS(Ag}?{S^H$ow)ckFp z;U4EcgOq*&GByZlUvGhS203{c0`rApnti$>ntJ}0YOJVUj&}2XP(}^G@3S6<66CD5 zn_v~a3z}RKI-&&7&D@%PU%$1heUS@?5ek#%?LWt3B)hg`57+J12F7d)BbRbkJVlpt zz3T(BY~5iSioCf<5=2;XtxH`lo-Vj{)eE8)THEp+Lq-L&(wi0A#)I+@P?@V}C9fU? zvv&-kABTaofD%A2`vTEc63vnHu`$L5a^;#I+LpQGsw&3%v68t{o~*+`=eFa;nax!0 z{#wSvCRItguyhY3mz|f=KOo<3SWlpBF2N=0ujwMAv?*9V=PfZbi@iI$_HSbMJ<84& zyKVdC=bO92c>~h#-xkOWX%;ow^}9RT^RQ^b$A}Ll|8~6HZ9p1Byb+)W9drI9N#0oL zgP(P>U9y*{{eqfxuDd(G{(K&kepN>%;TDX-0d)MZQpG}*yAZ5!ZXiPx+gDpGb`nl$ z6!yTWnzZo_*vbil@)xgX#=Xrf0ZMAlY92o=if~E@U7!n|z%{HKjBze5=9;Ti_Z_1R z=D_igQb@t%*O_u!(Np*I3=<%gH*20^m;JgUZaVC&33ivj^l@34lD6C4C8+=Q=io)v zq1Ru;w(LD}5Y^1xk;U$Co`?ZITCZRMpT|H652>2Y%cjw2v2=vvK7NI1;HkO6!8XLk zY4ctX8;X3alu1_Woy40*WWEK+09>KLK2Fr`e^Q;3ccPC|A1|HIvDr$l5B(ju&I|=T zo)f7>c-J9j1#-wbqGV=xc5_V!%y*^{5E4w+WdmG5h(R@B3b~s$V~1hTS^bSVYa{jk zGe#sB!to*~7|ng?okEMYS8q{YDzFRb_lIY3W@uX#0NbnzAjTzR)w-}YQe7Nh_Uur+ ziN8t>=1`#&$w^PtJ%|glXi=uli>=Gvs~r&VXY_$IYpB0{!u*~HA3$9OJ*1ct!Tv7C zGvoH#rx?XeL6FkI&(xfwbh}RTkn$0^pm%#Muw$s<7q7S9(+Kj{tI|k7IRt6omu2=e$YKL)B=T96p-?XXz4OaQVd_XyDQY}~sB=ws-r7V=Xy96X z0_kK^7UV;N<@y@vfNG&fyg~A_yRn$*L0wZ}0_`+n_(R!|=5A$lQfd5;342YMoL#

    _t2P41nc9HF(pj4k`dS?E_uU)-NlZo*_$EzUCBFM${2kd)N8W|>m&HOq_*6EHX zE&yXUp7tF^7%Ig$@K?Sz_GcO!uPrUO03gR!2rgyp4)BG7I^^=A530r?+eUo=zaXHi zK2Re3>B?m-if$)CIr6Y6H4wc&nEiaWg+0V3AlTqz9)Igp6wkCKKDHidOJ5i+sYYR& zZ!Kb=eWf10*VLP{da)_pdc=xPFA4rXTaIaC5A2r}06y1s?cPRNhY6e;(I}-$6?8BS zW*<&H7uOovJ3c8FDmbh?S0{#RsfY>=Q=Wk+55f5({|JCpA|e;+^YL<}#(H?$t3x4| zxw+f>%g%+wKtBD{KpvyqC!0*IF7w~Z?8|%So4nNJ@V@3?r3A{aJfoEJC!>v8UDY8z zv$|Il@pwLuKo(g zr9uVnovftmjmuhNEJVJ)Y80CPnsnUkCJ4}fXmyZ-kMk`4ZQ7XHKg5dujK|>^dT~SZ zw-@XRa@UfZOnx*?hJK1M7-_s)XOCvt%XX*1YCSBZ1n?q5Xi5JDiG&c89dz;=-zR)y@3+F# z^)5Y9lRABJWYQin_hF7-)sbhBn9XnqRY~Rlms#pHqz};#Y*(Ic`t%)D<>N9(ua^N= zz~6Rvs4vvfy&-y8r58NJEOq_=B-ZdOfmvQ?2@qS=izn|Xb?BhH*^S!MWAF0}e+%;J zQr8wR{O!{VTYdrphv}a8@Al-0XMx!R(!A58jfjr(VhJ^Z=_Lvw8}=t>hZe!sTUtnS z*V9f*atqp|B}&_gRl5D(;fwb(ZLR$R;QzHmWX?R$oCK8@K$}ddy9)2x4(}0e$X00z z`)basX5tZ=?mdv4yQNH~?l54U9xY$OWLzdCY$Jk9dKj?O2OY??*})Y_%O)SFd>ll0 zmk=zwoFlW;%{#QyLBCl_so?@8_CQo0NT`QdFqYmrg^h|8S5_+KDIu1<$Dga3)WxX%yw$z5ifQ(@9~%9UHqbRtu`lVW z<1UJzr~yy?WqvA)GeawCHgy$it@}L`?ym8Pk-D%}5P{r%<>oQ?{nJI4!10v6Pb+mI z6R+M3r$E+r>{H@D;+Rp0Bb|S0whLzLpbK z!+#YiAJ!~b@GhU$XH9x&WEEj)>APoc;SQt4C5^nMdYgTf%tj1Uj-*o&! z_G;`O{@YvZ&IU8=>sKnR|Hb_w-qRA^n4_*@*){oG4g{t&ADKARu21%uiS&iKjeD;| z*p-Bk`5LC~nTXQV=j?KYeD+TLAYT zo(Bu!Xbz=|MxoY0dZ%MH{Yppc8-1K*f*e|j*>R`7?>7)7tPVy>yh`cmlPyPY^WVDZ z8MF_1De9NN>|fBwS&gWuo&)QDinXfmKMKUxt48Bg@{y0q>vV`CpG~)wi>sOSFecxX zDpPF>$i!cgd8+PG#|+Qpg9R$(r_)>H-N7MRpyLH^edD5?&9yfysursR(cg^hOE4$q zs@%95-_PUx!ejF&njG9eQmlaOa7}YTfx4MOi3ua`nT!rSv2K~Kj8j#)`k`~(TLw!+ zLXKCJAcXk`cui8WVJ}QcE6l9WrweULn^OA))bu*c|I$+@a70 z8ZE7D`tm?sdqq(JXmWS|VjTgHK3!{4!HI$7u4Y1anEj!zoV$$;p z(A4@Hvpc%{@=rrQ@!UtpsSA|VX}}48f_Xq)U@tT>usqG-<(_E?p!40lDIVvcb20GA zLXxMZ9=%#@b7ZU|>uSwCJAR z(#Tq9TRLJ*wK^N0IzcyhHcLM;Hg-s^P4HGrJjdRQwKpL=_N?*J%Dq{LjihY+ofJmK z6$a?}@{Tq3xZZ&6qpM=(LSMGIBT83-1lKB5{^OT3o{5~;lfJ?o6o${(%u8IQ)b{W6 zI4k$KUyHR3h+>fK7iJNsW-$Sp%Sp*>JP#j4?hAN*Yg_kfCj2)@qgV6pfs;HudHRSa zx0%W5@0?eY1t~2}mFwS{^rLoh3YZ>tJA(a?<-%IASLjX;tQTQ`2`W#)O;p2kf5JhZ z^7`<4QJT{X=|CykcR0SG=DsVJ#ofP%;Dgdz&nRWi0`B9T9F?3DoUw)3D5z z{VxcM9`BQ1+C=_{+Zfa2WIESk3>)LQcfi(OSnCppzumTOgF_b))B~lhrz^YnTJ|Jq z)fJDX(`NdPKOAyGbbpT$`<2~N!}mp&KMb7L{k&}s9;G51B;k5D#?}1KdCo2;kgnS- zS4cax#^SjG?3~bmO>4p@RA6KZb@i``ChpIY$%=u&OdVyAIyH)W+hDfO!e$ZMJWYbgYSGJhI ze_TE2{*0%0$@QpjwMtJ_iMpe782Qmsh@-17;m#(>api2=`+i=zP};Af1{#}etzgEQ zg1}l3puPE)4Eo`OC`~!NyW`CL!Hx_O!*91dG@5*jNZ1!DSClkP$_Bpp7b0I5(mKrZ z3GKKKl8W8$No^?`>Y+~u2C%+u{**2J zf-Br+D@1ZH-}7eN$KP`hJig%WSE|&-^BzaE?yUnxEQujasS~*|Z2)}QudFxNw%gh0|p_#A?;;xE`U1zob{N6EAc z>a8zXyHV{9sxdn*s9Ovv1X$J)i%@C0Y4JwQnv*txyFBjZ0h~}be9W2^9CFGx72Ys- zPxDb;UZum_&eo>I*i*Sa(7&tKn3eR3M8G!-pId9Ly;BP+8)!4E2>~@P4zi!6?Qp0x zK18s8!8ar^Jeb|GE zhM98cyhdXqk4voWIUm-ZW?zc$Yt|3xFV5~3SuoidKeO-j*0xx3LNSmuL&0DEEoIg@!v!nI9d`wP5Lf8nlWXT z)aR(heDoSh8e8;9E7|!Pih=(px5NnhwLuD`DtcOzqOkz(b3LwKH=SDto;JfslGa_% zj4mI!e>5eCqH`xv=LSL842sH~ zQ%J_lKFb@{&Mz^VJSsA_MG@m8IG0e)%}<~|{SkF#IMFz)m4)641IhcNEp%9#8iW+Y zd}$NjOnSO0$(?S|Fl6H$8=c7+;7`n-y>ZJO zl)&4yMmbNmzw=chf!Wn%WkCwaj+0W8k!|shny;!zY*jS&Wi`9jgEzYb#2b!pj&y8K zp-L@E+gj31N72o@_?fN8UPYK(G^Wf4<9Pw!CGB$2KJHCk(b7m=An@mD0P3tS^s~)F z_+F8@qr32I71;%pBW%6nRHvbr(cAUuF~JQy;~x%tGI72b*s+@Uv6C$#mBjYuXM8Od z{2g~S-6RHCowlh$4Y^RfIz0~j#|E<;Zdv_86-9o24LU1;jcpIB+L)pX-zgkQX&zWB z;#ZxWI6D;b8Nmr<1agRxk7KtNCjGCh#Bo#`)O_59b7_c~3#pl6P*t8V+6*EN1H8pYHD=Zz~ip+-#YW~i(09);AvtZ{@8}l>!b}z=W=7wdT0fPUyF2Z zHSW~>x9x?iym&3UEY&22_eC;BjS75`=ntdQUH##u8jkCyDInUwLtxAuugd0c9=0dU zTph{iiW9}g(deJ4R<~Y(-ao#G2H;OZl~ru(S%7L)E?SUcoH9M z#Gc1A@Mowf>O!A-3~wLUOf$+u|MZ8tAH_b-W4FyZN10E8(M{>xRjthNie9X0Pu*X9_Rf@E}HSD-qw$o@z7U_tK+xbx=o4Ky zM+x~go)4yq4@6@AR_4cf?=^SZU zThb{~0w(sqi=Q{OO+GR_KD%xAbYRnL7Bcc!Zu+Q_x=f4HM&84(c&Nck$zJC2RM+E^ zNQLbyJ&`hD83wquVy0OQTHJOnzjYxfgWMSOfvh~}30x5>&~VXi(Kfuqze_3m=xpby zABXbERfId$U^GxX#||ew46lZu<>C2qizc$>LoPiYIh*VE*ZzH$p9QcBDiWLembdf# z63t74k(1q6SmH@4Vs}DTEGJLJ=<)OU8)9&ZPug1v3gaY6^>k$GnS#jhpq zYOix1ODNeEgrG6um=1>~exJ3W7ZQn_fxC><{eGV*XRDwS-?tq^ZSU4<1YRHCC7!vK zw9+x}+7np;-yWX-f=X?Zsc@}seuD2N?R$CELe!Vmrn8@C&RT4lubfoNi#S19I5BZe zOvM_uIv_6=q$wVmp*Gf{(GP3!-?HbkJO6M$PW?Qh_GJM00gvHYc@t0YqKL$q3&KRr1+1m+1G;_k|*&J%O zOfc`&gLE5>IDSQ9*2kVKz{DtK z^d@b-dNeGw(3?;xBJ~OKE(;JV{~v?kAF@9f*%TqvGi{|+*_9Rq-P!?-yC!XB%152A z^Ay@HYA&}$g9L~@<$bRRa>Ps9AU?L|Mqq4ruksM$oxVyG`2w`KX365INVnfO;xnWB z1^IAS?^n8widd}m$l%8)`PXpOn|w+}Q0=g)!EL8TtWR992mqg@^?NA}zMb_L4(j;= z+IwFY7X7@(4=r&0qo=~;L<^4a-QBOFnM#@jYeJe_MHglx12pU1H`iq}hRF@ixmU9C z7&&MqNrSnsnzA)}FAVYH@*lZr47~NEif$7GGo~`w(kde$RtbOi^>lSW4ZQnS<0yVn z6^yU{Q=+N;$zz}AS$0a@)7gTEz-q1V2k9UK0sQD5tX|AM{1#94%$3Z4I7c=;ry^8K z{vlX6C*fMU!}fFbLeatMUjCJzmQAIDt3+}s5xRC(n(+4BT~TBs@qLKzZ3Gc|Ayb5E zqw-;=z(a|YpXs084SYs| z`{w6C-?srRfjAfu^*FI%^h9%A#rNx~&()TnD{rTSKY_cMr;fG@AKxd>0hB@dOQV{P>O&tb7){f4 z^}Vtv9M@`&MJ0EPC*Bq9+j*sKJKsC1#gYSel~&khm9u9IX0Wx(~d^Qb^#-P1lYZN2zgTC zo3Z&v^NC-h66w~1tl(1Iorb|hNGN3KsE#(rD5YzFNBQ?<$QO-P$-oGI@bs4>bLj=B zpOQ%|IC8ULF{t(3>YA?1sUG{;n-mn(R3&qh8s_?`GW$O82Ns8On_Waj1SKkwqQx{8`Z;4Lz-M6@> z7Fty4y{~P_6x7yeYlNNd-&wra=eo4qjW!{k)KV zjCy#;>5Hc2%ZAOwCKu!RwJMoByZcca*G^j#5#3L&*s^E6^K+(@3q7j->+Fb&Ouq0V z!7a%i@FNX5h`7v==qS-`pKuf0*HzQhHg&_?Crs&cYP%;~?l=E%N3NK;8!{z^56;+~ zsa)ic8MXED`Om2?m9Uk+D!IF*R5W1IJso~0nvRj%8wqA1qzH@_c6b;w_lAHT%0JD3hKDjg{;OOicv zS7-NMSjzS|h}5+`|1`h+(crP61!(|KuWREBsUIoGMKloqr6&CB4&(M~9^(%8{`&cq z*TPHgwt2=gk%nd(qiSs%sb64`P-^DvXpv0!3t0gxMAQ>6idK>R zd2K#3CEm%w(t30BhexZt+>^>l-RJ3{4S0ndzlDk$=<#=d z{P)4?PR1dBsYquHxQ*?5w=r?iQ9=xH5KI4~5!)PX9B#^#KTSQ(?bVaTF8}9Q4iOdB z5MoY-+>B&mf7P>gJ@<-e_Cp2@ze%W}er~WG9&N5;kV|)9?*GtST zH9v)saJRlFJoi{ua>UM<@fXyFd7nLrsaxs)Aad_qa9%w)$}(DecCBs+$I>Im z0}@uDJY1<$0BU1UmZ1ryIej^QDb86Pla!8^{>OB-itX>gPEK&KBp)$ALoSvFgjQU} zt9TQzYHd6xgVDkG@w{6iulv_KJ~@0)8J=-t#H5k9U{T)nPjDLC8UW7{wFx=5CU=0h zPHfSL-JoD3F)H2pO&lZHx?+aIJ5wX|G;2^WuYC@`MdqcWq<^FQVrqCFNew(*V1M+| z&>jHZh8k<(5TPSmKD;3)IHUFrb0F+FBq}$p8mFz1J|6btOuUnywdmXuMmgz*LKc>63Dt;kXcHhU;mvz3mFJ0 zCh}m2ek+VhkDI2Y$!pmPdgA8p&fw|=(MZEo{`svx=e`K|w8WPr^eLC*sfeoK$G(Xt z_)5f&XwvO$<2#iS>H3(94hcEsHQZ0t8-_&%^y!5;n>2y(A-6ksM=dioXP{rp8FHs} zw(p5IX;BdY$nw#^4mZRD`od2(#kGm^@M-MfAw_Z>Cu~!p@p0H&9UfwbGnG3%Vj7*< z)8|$N(2Pf&TCk>%UHJ=LAa4K zMPTm#pt~eHhNfA&EiGxE1<;SG+^y1xKB@#$eE0JfUG>0*l0&>^pYVBw zs~Zvm9kaQo&LmaU=yV8A3Wt&0V146N4wsRfBWk_JJtN1nx{eQ#FpUQiYJmIYK(>Q* z-OZxV9P?tucB{oO0`P9UMy!mWALGEK{VXrbJESfDa8-ErW zK>c1tFds(~J1sif1c%?ciqX;L;|JarEzF)HZIUC5O#|4qmwU(G)#{G0v0%PA8L$!? z?RKu#_n9ziUCIKF-n_wK9Vq_MSO?q9)P)JQ7UK8n)@FHXBD*=YLUUrT*CJ=nV2hxS zw_7jdY=;EFJU7!LgwJtf?yXJ(0V|rwta1PGma1<#IRH~MkSPD)ZxAU->sA{E2zX>m zUa&0p*+8jWh=UAn?L)|>_oE3J9Yz67Wj2AoXc`+2<~hc-qH&7vf>fKLtnu3b+S&w; zm|@$bgrlP_IbkdRx#h=K^;M@S=0U|K7Sj}S`XK7*A7?I}ZPcVsFKKk_L~cyHDl$%x z6G-;*8=8qbk5pAXC2YJAXDQ5X+>2ljE5(b(?fD+VO^nx7eCS(2Bh|$k(Z#snV?@6x zlkE$E8^W?W_HGF1BayxL?juIvZ%pUxwd^4B5kQo%lb?Wh(d+M?s_|Y?XRQQ9_1z{p!L`Wb0xvF7QJMQT&a6k9yX_ zKd-`gzW)Mn<`bRW4W%r_jPIJ0kZ%6l_q+55>~&_cJM*_g)X6YM-n8il3!(`K=913R zdd!g8w++W_6K4WoaIBk_;ulX^;!%O;ixk$Q$tf*|578-Y7-OWs66N8f;;%Uj6T@)mi4)#c@iv+-&?lbbz#iL$cxYIT<9_%`s^x2}if&FL!7*IJ&)rZ7_6 z6-Wo7nl+W%!i&_@&8^NaE52Fnz7{=mAvM>mZ7VvXa*#Q}A!Z-;ikwrJD@)c7KO$(M zu0^3Y4Zpm9ko=+Ilmo0rgJ2Av`FF5yoH&8~KKroA$b9E$1KP4(DO%){Fs8e`7&|kj zE5)&9FYAhsq*x7E&!I5ke}Wd>zV~A0h1;vk_N^n*?r7L{7K?F0LFztU#wT{~0VPLG zyAhPg3*;bG4y0XSCJq{3V1cX*N-J)GWvl;*P@h%~6ZS5HSU8(^A(7QqSDbtpOikmtw?g9zb4IZu5ikIni$yO${{@`8-MsI`HmBT5 zIl$6LeDa2>#Fl_ENApJ>!fU3QK#zqWT$0#*)@;L&M+bE>-{5hE=K!t1|GgIK@Yze$ zv_&Rs78lDS;6_B?CPI_A^HoJdL7S#Ov)WJpYobqMRlXMftw86*&@l^rXpK+N?0v*x z!pJo2et(yd^|r{l%R@j9S#=QK!HBzlol6CI3Llb@o%uBev*ciPPENJ78ZW9Do@zL`EZ;=Krc z@(&Z<1uxNU5N}z97WJ!5aI?{^X+L|Q$-zp3Ep1vLo1IlqZFxJJ&t_onc2GREU-d+a^r&YwI(1SgB~03L-2>7G6myWU^wp} zWpb;bqXHr@EOtDNLD(u=J5>XvcKYjfj%w6UZXFD0Ys_O3aGK}FpD#?!3)!!!&V2k2 zg$gHQpVTH$DCvV>Dqb5gtg+pgNAD2b(Sc#FA4Yk1Z`VLN_E#q zMF-f^)4EnC_3eTAWK7PIjelGRdUk8kU9ux=)JfiZ{2fN79XIvl0+)f_*}c@)|2k8| zraWjC%A@^SHh{r$x_eA$iwU;7)NkZ;!VHYyDu3@emu^HbbJpQ=W_(!v$EpUpeB)Vo zS02aa&bSbeYKd!HscTk(VEKJ60E(G8!!K}hSDrHV%sE>tb1z{tz%a!#bG^YzlwT^I z-~do|gDSH1LJ*YrBEI-y8xp-hQ43 z>>A0lL&Vj!!QSYDzyGeXIrlFLW;GUzDRa0hPy;&MWQM5lp0P_AW!`XVnjQf~=jUXG z^kn@NO^qxRd=-7|krzp-{N8qkwae~k_{y45L82jQ-l(o`#bxo|LZjKssqr#NsF`mH z!Kv{H)+Abem@=e3N${u#>2_v(E8kv<7S8ABVtU&({k>-7xIZ>9El>ZQoj;{Hh z6Ij9emxnuyM-kAW&3cqqgqT z1hy*uOV+FX2lsKBudXZcElIaF2`w;Qr*QBwL5(mnQOgRog1BVMcW-6dT@h@m{ggG* z2q4tidI$32$CWok9qZ{#tajrc>&*>kHV%FfQ8}rs#0IP#cA0M2a7!w(m?gK@Fh4@f zN-e9t5ohxzobRtUVsTNqNzwYg37HM~fLJdH6VC=AFgmb5G1D(UhmUgC$Sv)CWdf zpLU8a;?G`edLF9tk>g1B6=tHUrYec&VfqvdA4~E~6sNcp(DrY*g zr*h%Iqu^Gwe{sthb3ANWKtoMXJhSAs25|GZhNwU^I$qdA{4Y*uE;mFOz?YYARvB$_ z7m?hD4z8}*QF3+H6_wM-Q>iHL(|e2$Y!#kZbKkI(@~yx_o`%BNCddC}UJyGf;BN)~9$F$6#9Z)% zw+FshY^NQ3d6Cmk0Q^!0No@&xI}0|-AI-{e%c{V1PCgC=$O$*T;C->FzJFIu&-TvmjBH7f#F|Kna;wN-VldV?LUtPIrAsqJ}QfDwGqo@v6KD31v?X zp0llftw7f@g~0Slg;UnQP{Y~m%vsy7V!E1MS)a;?zvn}Lp_DI+Cz~SqngIrmfIzSL zsu_KiTxrvbF+ zE(L$mvJaO6J5wE1xlicr%XQHd*Vx$iWYZO~_?nX;OZ8638AjJqVOkGJ@iJVsuxxq? zH%^oA3v-4CwL^PO^h_P5{;~~UDZQPIxpY%xXP`=UmyO$dNulpZU zofvzD0jMy4~wqy{}nCcmXTyq##h(I_xwt`d%l;SP+y@JWe1!fjW3+^g*PZ_sd-yJko%A|SV{ zuAi#)jHUCYAr>r(oufEG)>gD%fa*-$ty4>O;>~-3-=|4!@rK0!R}a;Cu>h6Z`RRHn z3(Tb$GuxLrN5g>lV}aSN^06Hi@m!kZD{rO)j}+G>gG5C^k|a2;$|U@acy6ipy7am7 zjK=9b8h7MQ)Bv7n8%Do^U!h{_u4)c5CU0yXEVbD_ZS}fcuR^I{<>O)BoU@TV17tgw zV8L(5)b209@Q^^gxzx@@0Oa6{r}aett*r7Si~;l#wo>O_9)qZs+vQ+`<@WLj-5kb; zzLb5vRU?~o6)~C+t=u{42y&Ai7fM*)epjvdDxC$gRSn3Mw){_PuTiqlvk7uY1slge zU_!3Br0mFHT{Sy3I;;K3?UV#U7u(p?UUeA^@Bg|M<3W<9qo?jH0is6~yqFmRP?z8r zLPwHy36>^y*?KJLt*anl^;lQ-U>k6q5#t0Nli8-+!T&zdQDQ6i!< zDW?CU>AWA3;My>pS(#~$EX|#n(o!5LZr-vob5&Mqt{k~Xf{Ftn(=_)g7mm_O&AlgJ zZgZfyC%M2qKoJn(>zD7JIOp8wdG70q;CKn+u$>F|#z)*Maq|_b_tEpz`ARFY3^wX+WS`Lg*ZKJ5K~9{hNwVy&v%ksIp8O$t}Vr`RNupGbd@OlPAwAP_FND5mTJ_{^y~{{1PGR+=m40{EH9iR9)?vxy2}HW{OSh`E`_U=vUP`ZEcHhqE z77Tnz>1r4;Xc+yb80;CnlY#$0Pg9jw{bzr;Qlx=_s89K)-(Elj^*RFnyF?10WNMX> zuGT$f#~?y@;D+2`Gglkqgd#6wl_Fc~_D;%DmcNd^Q-0T3XTpQxAfrOqmtVk5^XUP? z2V)aOhto4B(i9Z`nm{?NW_~yM^j)j`fK@$MBc(Hpe82j*vRNgVRSza{+iE9tuU&hE z$_UDzgB-T~^6;V!yFW)Rqt z>$B;zn(C%3wNgLs;xC0@#EVh=vqnl=K(DxEcY-rXrKA#&>lxzFy+Y{&0$%fa)ru6! z1muuti--VQC&4Du4=w6kaUbI#e1BfZNJ9lO58$=0YFFOIqi(G%s%3Ry@xX>3&fDhF z9s~8AoLo|L2DoHvDxAJJ0NZ#i9ku&ZP1AD{@yyoiOyN^}h2)_~8ZNx_FexPKnFO;w zl{tMVnvl`)9t;P?n~&LKe=?As+enr=b5ABvd+BH$9KUwEo?h%dU^X>ZeBxn?*hVnb zD*jxrhrli^=LpY>RF~9wxm33!Y69Gf{|}7(U7-q#He-Uf_c6AsllXcT`GI$1pr4Xj z((#})Rc1;n_^uVxc=Cq*;5riOkd+zPQD)Q1nU3gA_vVoq!iy!{gg$vz=o)6hJ(YJb zWoDUm&cWW=)LO@xF&N58>AEGn2lEh%<&@-sHd&p|Cp@h08VJ3;cCS_U1sCBz>tn}q zyb*wGiwvS>NpuFw?`WY9lq4a_qRQU2n`~KQQ8jpmrI#^OW66ePm}@gPq-y*1p<_DL zBBejWV{=m~ZjbQQ-0>iOMyTe@W$t#(RU?@B)b)j-gZ#Nl>^1r+8fuOlLTf=gtS^1_ zKcSiC78R1J5^L6}uHbWW2KvW!093MPrwfjq{*4YT!YFa&+N+Br)(WZ{Q4?!`jaG+= zVD;AqFSeYa@zweo;_a#&*NR5DA zpRMtZ`>h}rbv!6AV^(Chthlxx z7JXRKpi=etkEO1qGQ34QF%E?Mvo&x{gp3k!!;4ll`|F0;%gev3?(0EWnjaQT!sa+m z*mT<4hFwQeTtb?D)LArkbtXOOmixnz4C!WkoS~|}Gt9Kb%u8%d!-+KXQ#e`ICit0Y zJnCJfV!F+zwU0hxWk&u>IqAl;@km5mX!^d#EHI7T*jH!fsM?~1TAbEH=Nb?VuMH&hrKQ`K!t8`6z7dyWwxIu18w-Xu(G}42A91$?s+Nwhg&4Rx5b!N(lFQ&L`yT?1f;AP`E~LprWc&fRyHYzs9==^JS53HyXp*5&xw; zRg}*i^l&mlWE^$b)2=J1!vYr@u#O(tDrP&!^!8FbHOj}wb%vcxi#f%slc(CX^iYw8 z!Y^g_q-zx3{-l=Jsxg?RcYzY$ekPSilKi>g`KAgTJm{>qmN9ZrAs@8+qVo8D;0AZ3 z1c9wYef}#TvhZ+-EEMi*ApJ?_J@}pGNeE@~w&E1mb6? zfR@JZX>wWMKW*3t7#_UAvU-o08@#*oxh&9i$CrFJMtlKoiAM76b z{Vn*Q#ZP46s5`A&KeqYd%vGH!0vVLh9#+Zu!PX(HUh0pGF;=O~?CjCMxA?kGunnDT zjcOZw3IdvPE)jaOg?yX=s}JptiA;>h*$d)8?_5Lsn0cKgCy39`LiYfD#S$ec{x(}{LF;QKN${7KJ{v__CmPct-VjnI2;R3e_KnWX_I5*bZXcs&Rl0ZRgfo`~ zsAMV3re5af`>tlD8aeA@D<3xdxCUp*PqT$Xb(yMCqZ|3vstiXT36shY0CQnzM=yTE z{j%A1+dN9NO0|R59yTx{DmuSm*%Z9SN=uKgKt@9v^ZJip3A9`ZH>swK$U?0{6=&>@j; zrA|>!iWHP8li;$_#N$FRayK=9UoTV9_*Oh|(kkcpy<@JHv_ZGTvGu5=MzaFNq?ojp z=Mlf)RB;3TH!Wtl>jCB&4E}w(VH``tXgy2ZaMT99RDOp0gHXVtD$)>pl&BoW9uoKYXx?qvu-f zIMvw!Te!J?JSHQ2q~*}&!d0GTm(cuNja#!N2@Tv~mBG`N%KFc&ehcsdY#(2aALtfI zi#fFry6?#5S2-cUJpZK(R`{*ccq`59z(g<5yxCYdQVK9=jQGL*l}PF>R~?QlrT_cm z-i7C{DVSgaF+L-|x{U?WoL%O3KPfe5P7xIEgq5tcR_ob>l(HeaNLWgZeOJtv* z?iiRlX;N@;dk|K*7-^OWQgvqAoc>hpZ*$az`d$6G3ZE5=FR}Zrj42cdl7c0w-#P4v z$A%q`d?(xOPWXV9Q-1UlW{QE{jDnv2KE^1b<#k3mBkQ*fuluaql2v(5CwOI=s|EuS zwCrL)Kj-eG^;VzBFMuqWhtQ9owI3wID$WfZohoc>f+tz)m7}8FNnh<_skwF%H|v>i zogVJ|EJ$%&-~-|7RQY`!gTF@D$s;1#e|Hz?T^jUF&qxoX8~OeyXi3*;{ylY`S3OKI zqI@}5sWCbJ9-4@d`gQ)F+e$kvS8pM$SS)6XyqmotaWhd$%!J71;)uQEdz@m@g7P~w zXpwv@?xBXwv`x1H)<`TC9XXP+F)j68{($1iethSamg}tDo3~49Pm}jw(`rPW`$iTJ zWSw1sigFD3iA(99u>ovMQlzsek9F?7{Wbl|yR7)gpuDYlh{ar7eh657-fvN^-lJwS4Zpgob=bxD1Nl3#D=G=tYo+4`aLsh{?f9n)1I!z;QMw2HOm50=MxL>*w z%5!>h;IadJX}0l_BHQy;62|V(^W&c-ST5an?wBHz!LNjz*+}%?ez(gauGK-7RDniiFXoR{v%nn-3(c^gj^M@f2bM1wE7k$h{_zfOq?q9r_xYRvy$4OsGf z*;#>8A;YYWxr4$Whlqp0L>!9lV$Og_|Q;hxscov+Y+`LDzQ|;&m9G}8sPi7`8 zs8i}j5P##JFg71;_PrX(N>NBVfKt0s%sk{Z9^St7b8uY$*YZ|hU|p=$h4h`uyiJCJ zFi#3K$HXbP)!eiN9f~A0MqtsVj!+Lro~`0^rg>cJ1!#_pw2sfb*qwQek1vcXC(?Z0iF#FqDzI% z_zCiq$;i#!rk9M&)89_lL@Aw|Zl95JAT9J>VY`g!_+mQ~^+kSM54o;)O|wlLVX{i& zIt-V6d8p$W=>PT;VLDknn{g5X@n5`Zpfpb2yYn0OG2kG1=5lz9T$n^W(kFkMKXvx7 zK{U{=Bv+2qEcMK6K4PL}P?wB4fmH>qr1!siI+88bZ7VwIk$kt$h=6<*u32=o>te** za%a;XJ+vJr5M}9KaQfjdX;~~uPkS(JwQr1?qQ**5J$bl6Rc8NZ1je!LW$Y?o_eQid z9TLEXRWzlU_?7h`awAu6U33k5cKKLw&~kI#KabEg@vZJNvl2%aJK$BWNg{LSKglkP=g^kAduFIH#a!%?fy~kTSTDbp67NhdeMTp2G0DV zLu>DRU7%|MIm~|bp*9fDShEdl)PBGBujsDs>fu@R;+-DivG_7*aqK0TWMkfUj2;m^ z0suG^`J6wiPd*J%a%Z65o-jzzu8p-l>U~i~YRu9Dvw$xT{(JVZdqUW?yI5C97X*&f z|0{q}CL4GaXe#(Qw-9?bOuTCf^z5ve{5xu54z;iPI?MezDx;*VUetrA!XM4gZsWKT zG@eK$Lw#ZC=Wl9`>$#~pI4T@I;cUKu@i{r-&ePvM1vihV-6I-*fl&BGy?Av|EEBa* zw1N?`Y1tNi91Fbr3ys!2pBTJ<{wx;lOD;TPndZ)#8cO_cWj(FI?+`6~5*qA=FrJ|4Og;E-rBPAn{Zveb zdau@OqmTSvq&I3p!>myMLp{{=+rPrF`Lmo*kL`i60L@$;khiQr&d>%vS57%utX&K5 z=WvaXIC?^I@@~3z_j#9CtVooh?W_lP_;y&sz&D@ri`n%r@#k3r8#MK$bQASpFzL~S z#_=R0-9V9yJ&p6K;VScS13mMHa9V%_dz@)?&D!PUDRvv3#JfSYppNeZ(=X(;ZP9%Z^8tjOt$JDp82jSEzDz0@|LVR-7fScn>D?_Ex5G02-?zY^iNNE|$5wJ< zXMkE7xm%r>6G%Jbbl6Y@%#N(rr&B@93cRQSJGNSdoURTcIWKItPS*gJw#%@Z!TdrMoufyR-8F1+PWw_Vi&w- z*Gl|lBlNFXshZlc1NTC@AqB$xgDWrihX|OQ|1}$k*0EL*xFER=44F~`o82U(^y?^! z$}WGcPNFN1^)(j=2*>qH(EB~Mt4yuDAortl-spHIVj6O`{|x!!4B?7C){P2=D`l)f zq)%+vLJ~MeUHeerVTvhu^Q28MEe;cSnHJD|o7;VZ>%Zl(vy83^D^dOYD_!l4nxWw) zPF23AJ}J-~vxGVw8bsB5cTx_YesNd8iav;uruXk5simiUjI@HqL1v{athX1?+5Xm8 z-_(4?*_lJlln*;<_b zrHd0}9Dfdrl6-pG+gQzO@ErgMYW^w_%EAM>aC$8$0lJBdcC=}&|+SkqiOmDu&%LvvkJUndi#Iw<=*k z{yuh;M+o0arH!auZ4CDE7wpj~^L8U_qh-j4U)u^@Mul~#a7uNp8FfFB4axQ`$s4lx zi?XX>~`)4+0T#X9J_7L!=M3&6K%+NuI>rU&=N{zYQ!TT#cpGv?Q({n&Za3I2$%sv z{y2}a#?3N3yyh8(BJz`;(lo9VCu`)_o~x>C(WYlh_Y6z(OLM+ZH#!I`b{< zEA}{n#YD1z>avHmADXA`1x~B0eT|@6WHu0ak=l&Y*=t#I9aaxa=2MLN6KrquXY3yU z-f*jHT39moCpQmuK2y4OBHo`WSO3-w<8V4LSZfgT>5;qI!m-s8=7n`Fv5(7D+69xI z9pagO)EX1~r9S(9RxfsT^Qyxi91wFEct&TJUq(*(4P{CkQPuh7EJD7*gGW%C43U1Q zu}FwD-@TG9L5iyzM)fW0Ni|zl2k+C4lN0=bZc9cEGUm1^cdzf!t!H~Mi?F(U~D|hi$H;tK5T>Po#qyH99 zKt#)gm|K;(8jk@Fn6~WC+=a5@?9H=P?86#URs&Bi_Kqu6o0_8YIXx?SZcn-+tGPt(a9_sF2k5`)?z)8yD!xRPB*1&XqMMFE^RMU?tq+wM zn_CaYe1kQ%w-W{d!FMUW1fn+lmBpLks|EJbYN^tthqJ(+#4!^kAwxX_s)DWdVJqcPcIqq9I9DRTR|BQ^c$ zCv9PQ^KfcEi7QTPx{FRc-Mi*ciXHCdhrHC7 z1QCROv9q+AYK61UuPw`4>BTWX9GgJZ$ssV#@IS3a2Uy*^tWVDBVzRbDR&$G^gj;(f zXpKV2=ZmSau**cj~;L>q^JZO&u8`tRz@6t z))g}%qoi~wqqm zRm%RixhuMcWpp?8r4P&(UA?xLFH4l5M09?pwmW#NO;}Wk6sxPW4o0s)6cM9ih0@qg z6SLqdJ@@2)2uVqEZEtzw2AP!e@fro0eHqI`Bf|c_(^S`20IJ7f%@u2T2d~~Y@&m$| zPJEjnlkxS7Ih!#@3bwO+eW6ngbdlCEjamt!o_50|f5a^uA#~P-p<#QqGz5Pb-DHvi zgpCuP0G)`xzLnPYv1vZIe~Kw99B1>5q@Gi=Ojj*R+fcylkms3gHuqAlWwGaP zSZwU(d6(k`wTwecx8x2w@95J0G9Vt*=i0f*0^N(}?!ndjtIB1vTEj9bQ9ikjPxRu44#DUDOaj8oVJqQ_8bP zO7dajOO7_WfCBzwS9ut>%_g0jy7fb5gLq3;HaPLgnYpKiR!b9`PI__gD`e|UD#Y=x z4yr+Q>(F5^pYKl#j}Dt=a-`OtE#lbI-(8u*S=0Ii9Yy7d3}zf{JB{aX2j6@Un=5|V zE?5phgff5Y>t%``@@kqUrp@hM+No%ZQpKYA&4}wW!$o;4^iZ4=C>5rwgQBzt=M(HF z07um(@~%KaRd>DScd~Xa^*6}eqQ&n~R}iLw>RFZsAqJ_AnlgHOZGV2*wevqW# z{Z|#vad8Y=!#$XeMqnOOQ5NH`ew`Bc&;r~4)+hMgQsZsxr<@(XvD!o91o=G;MD8)aIoRn+~H274AtY?Cn1AfRSh;W3V_z(4BDMsm=V$DrN?pX>>KhPLDmhV?@zipw zpc^+UU41|rQxx9g5$I}G@YZ6Piu@DE^5uJ3zkwkl0YYw}122jPf)MjQjlTh3t+WX8 z^YFFw57`yOH;}I$OxmZvomOV{%UE_nbashw!#8nJNSa#2!)D&<_&i82x3T z-F79S8dwgPG1nULZQ(BB4NMaQBGH7aQP+ zYS_#+ZkIXi40>Gcw@wp{xdA(qYCwt|#ON!SLaVsgd&I)k*W)5c{6D2qA zi}wGxJ}Uh~Rr1e&Oyd(dqUq(>+d`Y!!$$ngb;A0 zL(2r5QH^)@6U^ip>XkEmVbFuL!}xLUZ`RlIqG}00e%qW7#4*0khplHxceI=BIPZ{P ztS`|O_?k4}b%}{oz-wqhzNi?x$r%FWud|!l9`Tm=crv>)t5|uCir2X_v!E`SY`l?y z4dClPo1%Q=_d_82e9K6QgVC~w`ukm7-f_g;3&GvuL)0Gqxp)Y$4zcq->b`kBU!vO%DJ(lR;18{5l!O7Lpqm=?hL78w!53nYEKAI0z7dzNhT+O?z$L!Bz#-qyIH7^u*!-tmc_i!klrnrTZ|+Phi? z{J65B=9@P0L=}1BFj14b+_C?*C%EtiLPWcz#5Py&c}ZXAnBBVls63$K;SO!ky^^YD zqFthnY1#eIzVicb!+n}|RTE9)!rQDit@WAAMfQf%VCnxg&T;iR!n6gt#$JVoyi#V> zb$8$>gTT$f@w+FK^~UEKGq#a6Egi_0L?JzDxM2&^ag;Es{64i+Nqe!~FWn{^P|aXJ zCuAU1B4N!@X@m^YPgVpK%PcKxYghgXfM}1{jie8b>5|BbHWQWLbYJPyRzx_4YX2n? z483tpF~eH<>AiIDWtI2C%bBJrhb77{v$GvVn!D-{-CCQ$1OE=v7_N99EQ&*;V%csv zXY@`L)e9-c<_VnT{G<8do*@NN)$(MV5dEuf!dX%u%%n7WjG(@yr{W=s%&0RyvPFp6 zww=r^yJ@!AW!Km19D6Y#i+2{C$qOE3Ka{?key`Wsm1A%B4WIJ4jw6yDW#zT!bm+!s z`v!MSX>HFAQ&B|@zaBZMaOdS_e4WV0Ll+hvifbZYVq0rDlL{(+J=!l1d!ci^KDD54 zVEfCgu^{G^NkFjo;Eu`gX*R(}DqnyhuagAb-3FF1(OoF7A_`hwoFm>; z%?H2^e%jcRas;BFe<70U;cn$upSz{JPcnMHXvHQrl(4-@Xx#XmO%~J(8h!Unxt>Jo z^1#mr`YJUisc0ydwl^}eIby(Hfe$JkfXb$O@K2Z7fSPV=aAh73oy4D8^uKgSO=DPC zXId#g6pw?)uO7zOUDx^LfL{2LNZ*22Fu4!P9SC6GH zq0K%?JdN$kY(Tqh3$h%j)$204ECScMnBUxH`)x6E^+oh(p61Op%fsGN5K2% zpK|La#@M?uCcL(ROMA3?W^n%iwmDI!8w14B`-9!*V}AXDmxr0aCcO1uw<2UXm{U{GHI@xKWTIBIPNhd8JWL(C){pvV8-nYy=hSS$q_iMsX|dj&i3;=0 za`e+S>w=KR}$47h~lt!!m`e#1x^7Cj97g)Y2Q1~xksBo`h z@*eE(739iExuPm~pp>{#Z2!umFR>5n7aDM z6qy{JH0qMuPou?ItkuFWapL;2=N(AcJvFSq-C3_Xs@>m@c!$NDSk2bS=AVR?y&R>w zEKuHvgAn`6b;fnsy!8lAsDw9AUZJEvlk2DQz=pEXQUfm1PK^1@-m5b4Xn;c_tt;r| zlmFV~P0YMqCErr*$h0y0pU$6J9T&;<%&F-y_psAx74X!)71e3SJ!;7l&8zJk=??Ij z%%YbBRAX-CW=EEV3QVMTwEkzoN5;xJ+*V=TgLG3ZoQUO`@g50{X{P=3mP31ThK=)8 z7)LB!7GIdas&^rI{i&c7=Y`}CpZIdu_z%BLP~ms7M*o(}_nvS*T8Iwy;%#(-gl(<& z>dXt@!2QwTZtU0*UUrEetCS4x<&D!$)4I_v4J2$yiKiXK+Z}f{566dh-Iej@8Y^3UstS z%r(DC@vNfNr;k0jSMIq5Zhn;V9m}{!b0LDk-c0-JC3XUTK1&{tpaeW#leinFxKie2Ti-%kZ$s?dgKI5$;-T9bNyStet|w6xYq_7ct$YBIIG z8B-kZ5eJlkU(3|^G|Z%|2@=X?&V+juHNk$4V_>v=#Ypah=j>xu2cFtAu4r^V(XOn9uhBoy)fcISd0gVU4%+o)aF4 z>qTcu=LpWt?nr*WiU@q{GI1|=(6s5;$fm@HB+2&&AQm^1;0FO^-ES$;VIzN;nH*@R zZCv4H#K`bbei-i802E%Cgai0?1bi737l`BXI(xit@XfK}jg@Vh4^g8ImjImo^(Tag zBNO4Qfab16q$Oq6)oWWTO?=`8LSZsQXRbe*eJjA0hnD@pospEHzRz}UffiC{gtm-OZh`dQeh!d}b*Ow97Dfbm99q|gZ)9~ZB zdB9ly#W3MrYmS3+1V>u(fJyA1K}~WsA{f8r8d*6yY@vNJj>o`x0xH&|p3V*U%R8Z{ zzNl<|rbK0lW|l@j`!A%ed!8##m@y%HjCVOpBaNe@=(l3qkNU7d{Mp`kfElcG&kd#@ zR7ZO)PgA>XLL9O`5qN}1wdV2$8$12E5o6l7Q{1gaUiroC-NUIZNd$0Qr_^wTH(3jO zvoc?nC=LtqnR6;Tc-c4hvBrzp`L5bsi0|8WK%o9>C2eg{H$$B?Ws-fcs$GbBgP#Yt z@-h6xo0>{>C!3UR$wY&G5TLK|(Jpj$y6T5n?y&xn=ZnmljB6W*_Sv0l3CV_-k6~q! z?rcab*$dlD`)}}Fa2c=a!#-ZfDtmJS zAtgvyPT~lXQ<7Vp7F0C&FtU?(xar)^V6CG_m{NC*XgTmq%gSb2sa$1`WncT}nXX>M zqFdP9m=507(^luI7QWu2C!}_;FWlHVhhsA%)NZgqIB0p`LETYfW2$tOt=96QBl$Y> zXU6{DAf4cEsnXl(K$Gc#tYxHa`$%|)%rg(`&3*}Dz$(l^<|tsfQ?IcRlKm4k%hJw|i|FUGRK^7JCW^(tn54ETJA1t%9lY^UgDKy3rR1VJyLsJL>@05-PlXn3 zkvYkFqWq!tJsngIFWDhw>`!?JZWU^)oOsfKqO3O zmX_}M)ZMFMA*AO4zkiJ{v(RhBnnHM-;uiA>)xnPI->8EkXNcdZ;qyz+lUTD zV9U>ZHFpq-w!-JURn=mz*Fj&a0e#()1!g0%1HU}}#msHJ)@I1zwM@HXMIB#BnUWA= z?aY76@n(ED7^Bk`2Gq#1+ea1mFZZ%My-&8^O7q6p84N2lT$YgZkMP)r3#o{u)VlW% zglT}e+NUC=#~vnE$VabzG%!tY-*2lrvuyIJ>}75&UsW(4Ewdk!^O0-LX*QESFLN8@ z^^qbPhR3CADREo!BUDrMApC_4t*78=*^@uA&J~8#eQr{xHO6!0#?+7NBt99YSq$!$ z%e9-iGwfxVA+Y+}ICQ+T?d{@Q@7ZIp*9Zw4g48)aq;pdS)fZPd%S4w=_j*^jWp5$; zeYbgt_XxK4j>ft(|5OZ+nTtQHHsb`IZ|1z_1xsj?b&k8LYiYGQQO*7rbuZ95!R7UJ z_s%4r&7!-_#|7q{v?+ZPQ-=0%mu?K0beL(^5x`55=Tc`TL|^sSE;ig_t8>QBUbFf?vqv%%k8AF3tGjaBAND#gw8u+0 z>)Y$0a>W6R&um5A<@LV)lM7})Bnf5(+QWR(^%S+c!_woZLeAZfh~Cwx^r=@H zUqEC#amR~(<4cg=JZm}B_cG=4UwVFU*~LI@P51&;|7DE*i4bG^hChi7b zJ|1P8E*s4g#BW~YDwpSJJ0J!hzv7QQ5?vdgzqCTT%h$1)ZP#9m5AqK=RiiDr?u3#g z1fC-pb40YSJj2uh9z5GvY*ab?YHehUn+^A2iD%t22Trt~vA*g2Py+VwYCV1+Fh8QD z8ELE%OKdEE*p%94V)Zu2!v=Shaqvx%()l{>PR_B6A$PfP^4`S#Lz8M_B98p7{-OcY5l-&MK60kQul(uAT(GkQJ_<`1$2r{`6G!@T?v{6xU5|@U zef2h?eR+75D!j&AR!c*@ak~fdN|UxcFIM($ zK>Wos{Vo@yti`?tUt|!AnIo22v`yy_r;eWEa;CUC%oVl0J7%Uj5GRQ zy*Ftsv08o*Z?aSai?{BlT}w!j`5h@Cwpal8>1dYV@nJ8!uj19wk%l-X>2%sfZU<7I zCs|F6dS5nxX2S!fkHa)p4$EgmIg$t1cpad#_tJs6F`?d`j-%%;u2J4mKZ%XL|MYr* z%cuXL2VNzXlA9Z&j(+5BZ}+G_V674a z$tv}$iY=U6A#^qSr&QCxFGN@eJIEh=AxN2F6^#<~_0d3e<}-x5S_TlAvgartT-GOf z{oZ7jx(GGoICsLLvV$yybmDcA`#ScDsb{4Vcu=wB@VaJ?^v0c!+7a~3%rAYBCiu#A05oI`bRL%d?&XYC#^*l8cb!^T zZ%_UDuG+o0shu8zir${~ytA-QY7mL~%H5tkOf76h*v%P{x{PJSQzOS5RS}eBIxnPr zDS2ihr$Q<|+j9N4W^8Hu#cwSde&%ur%%9@94bFK#%*!zH==DsRG%%xDyk18`2bnlF zCzsJaWjx1Jx#51lMBrOWe?{I3Uz{TQ6fI`HUUo?lU9bvc{|xU@<^hHNg8&0`@ESQ! z_a}B5lC#?BLZwnzivBW5US|X6x0;&x!R_l4%Wl+l`%WRs$kYXXq(i{5%@TG#?AOG^L*n;NPj{5 z>mgjoakLFQP{~X@O$4)C@H14;#l5r=c||Qdr!Y|Z+Ml^_lPPY~#K(;XLL3+&%XkKP zU;uW(z~)}_@L*wM66eA7nB$-T0lV$E7+mB|ZJ3HViZ^JlXyPSwW2G-9@ZE_&NuZpU4?_K{U{?CMWr`P)I#Gu}y ztX6c-nakUrHzUSYA1N?SYm47;f1y)X$8kcsXVJv_%U=6E*(ViGH9z71w9$l;FwdfrP8LV%83`P1xEQ|k)3%Ak!sAxY$k zt$M(3&598*WCM$cWMQl8?fZsD?EXH3f^JRP^IgwKoGp2kluQmYDECbs;0M&$id}#= zaDp2 zoa{;WNSG4iDCNlZGaoc-Ve_q0YNQ*!>RofUH4F>kI^QMuS`h*Iefy)ehw|NW(VIv? z<&ePHJfofFy|-NH*E9k3T0OTe96cwyhr?%1^XV;5No9CDCDs;IHr6}bq%ug=a4|=W zv$vB=NC$*3&g~=sH`vz*v3lHw8nWEg;tUaeALnzt^4DeeuKy!!!E^=RdQpCP{Zcp* z$8r1-PB6Jllzwy^aAUoFrkpDQ+!PgyuNJb%F4|tjS$@3vu-{y|REW}pxawXU!ISnkHB-88F`#92@ z*B=qA4OyMJVq4#?4l>@UXE_#+<*KA_!!_9Cjg4K8yEXV{esR0o4Ay_Fq)-<#ZZ}^x zPmAlk^Ik0`87#EV9=!OciTy$v>nSI@snQ7x^ zC%)DB-x(O2*7D}oWjFfz2__{hjWxx}pr-fY?wmQ8-g+=aYnki3v{CBe!&D3L-)1~V z5|+vzn$~w~v9_~}K_ZRi#%`zO`xM z=3N|ACObFiF7}=~nT;KKxyR!KvvNJH+2!2l9LFQe%C^=8n`d~jYg~@TT`2fXtc7KX zD3~RS>PXR+#LQ}mn&hlpkL*^>HpuD`Y})haK;5p~H_L2v4##+CXBauB1QQw6kS151 z7UI20{M7xd6b(4^m__alKn_3L>>JQ(0Hg|nM73ja!!d6Vfshm?0DI6i=-JihW82a|1LrPYGr_eV3~v@x zTakZBFt%R2n_8`um3L6zPY?4`c16@tn0t<;#K5jx-~DPR(Wc5`*6*^?)HjmfwsLi# zMsQ4SJtcRT6R-h!03Scv#qn;FPmdcrkb%L_Ek;NiV2CDum(Ll&!>_16!gdG<#%`Ab;I zZ?H;wCm{p5`x3i$U(i~tlC(J5B>`~m<`^Y{rbt)9CZ0)>qdkSD6~;HQQ{TYwwMY4Q z`W-+Dv=ega$rUo^@h&y-4NWk3YcZZ${O%#1FJs$Yj`vfvesl$fSZWfOl~oJScSc$B zx@Rz9*$N@P=B2%0*052={!ZNMha<3TucrC8fwAwtor$!b@EgtM=iELV^J0hlHx8D5 z7!ok40(w$0%9A;Es{R6Z=2s>)PG&OhRSAl~bVrPS=aNNr1>dF|IwN)n*a;Jz@Ta${ z8vw?m(SAv1A7-DzhzD%kDQxZ)oDUl#EK5}H z(UcAaP(Ui9o0<(o(1w;h8#`P^&qyzoJ81h1+|BI+qSD zH_iiVZZ&lkrsW#m+2oA)z<<}eD2Qe7=;mX~%7u~t0aQS%zf(*6?~X?~;Wyi2*!`O1 zmzN~(#(WIF)9DBz!p=M9!w~N}-1>xwwJ-9AwayX$;iJdLC|FQHC?baRw@ zhi3_e5=|5TXJGu@EP;ovFfWmGw&^SQE)K_M*Qe~<2#nxig*tgVej5zo&p%FEB$ItM ziY8;x6M8Jlx9IMoA@l75DNFfNzjuz=U+&1jWKy@bi*R$zH!*4Yts9%C^>cGa z7XA68KRD~HPHS(jbWHNiTlQZLcW`r&PEV`j=|yPGX;{mhqlP>Mm>P+E-JvJ&9pvVS z`g3=ZPx@kr;>5KUz}5@_ZAu;Cki-A__^%N=m6N;CoScM8{b^y$uaSf|LGQ%{i6U}gQpwmS26ZPs8^RSmUeo5@=2rkeLi}z%I;p69f`vk1v zCP!uKbb?{U7>Xy%2|DuPNLL&PdF^Tylb${MI|a=*{$2j(r=mK8>E1<%xOe_gb6^Hb z3u@RA_7B$-{c;hH+$1_~G4MO=;`x01_7K9bw22Or{=sd{)F0*n*M|j@p&s+CqxFOL z{AyH}kAB5wIr8tVk~Fzz3YhFhPo_|D&Y)CZC5UQkTroSZ%x=;=`Ma6Vj@4{_Mt zS`KJF-~8JDHT%G)vAAqY=;rQiQGm}6I&M;TfXQdM*e|O!M}V-p@3dizD-Pr##3_-LQ@MA9``(agi<=fyW55V+zk zPltataCo&(-etq+bPn%(GcMxk4fjNT*57z@q&X-qGJFx!L)_FCpewb#UPxlLPIL~k zOwV!kxLPJqi?|m9VfvX+E{=;foP;Ftv5(eJK8JC<8@%Tq(%~*Qf$u_}NFZM=i-C7L zewak^V=th+^hAHWMZyW@JM1`Km}&x<3dZCJHm*OP_llu$^ETyl&%Z|TcHQkSJYmjs zd^yR%}Bh{mlTao4>C&>ISFulhX=dbj_keXD}<-L7}6;~Fc)8$tq*B+S&UaZ1U}%v zmWZ=;Zr%%CGUS&*9d!}$B-w}IAf|rIZ$EcEpQQXP40_20zV*h}8JhC;{@?2Y?DnB# z)FAil^+$_Y91WiRtc&?9M?dVXy9JwE;(L4!^S9TIKfcn>#of^1SL<;$gF58mQvb

    9i{}Nj$ZU!yyM)cWRgB8y;!E@uzTf0)*jY+CpmLYEpo^JBXPlV+?p$EFPXdbQe z$Zm$5NiscFn{qlc{uXdH)Z0t=eUKn}m**e#M&ztbo?)kWz_$C;BKkSHg zv1e!gP)83>g>p?encY`jpe?^0-?RBjV*@D=$sBuk3J;qzO zmlwG9T|Vw+uYOMdvs!So?5t0*P{U=)p*(om?YQO9Ge12JCl~5V?GMTvpfB$Gk^2W3 z1Fy$>T?K45Bi=f|q#mNz+twVcHXnKZsonWhHCZF!``&JU>9!b?|9kbK*3>wx{S(Hx za?$T--pJUw1#b44Q}1qayAJ!S51q^xd8(Hj!e%4?&sG?Zwd@a{AD4)$vvlOu+E8yD z@AL6&*q_xKnHCn%m&tC@ohu?58QvkbezVEs$UFV|J((w4}=Niuzs8~LoU3%vO z-N@o6=8ae#_-m6l7WuPLAzSN<$vwx$J2xosj22$LL@(F>>O-_*-PeG`SY2kzdg9;e zC^<@-@2vPs6`vB{nw`|W(KZ|DsO#HS9g4ksA5Q zs~Gc3q-yM+n>QMMl9Rmn`)dNRHl8od&2n7~=EHiOmbj*;&O!)(xmLGvc;=JyYB=`k zvSfmB6-U4K$#wpshfKI%7>j4P;*YR+Vt3Bs8w~520rgk}yT0F>mGW?u7aRFV;N|ZP z9M0d4eA_yH-o!N5TWe2HYHkk(R($O_Q)}dZxDR-n8q^Z73%6MqcVP}I+Rrgj?QFbY zleAGSU&|ngZ{sD?WN@Zr^n8zDMi)~2$(WXsc*4B(jyY-nV)`G>|=lW`ZERlxDEwvm2gg!B3Ar8g@OPUez9E?Rc}>IyEq zGpQcuTStV=a`Eh>EKy-2%jrb$nxDm4_iG6)7hI;n^kU=9hOex=3w5^V=kK=5%Qh8Q z=>4vRvy&tw=IP_*s~c&1H_mXbVKgtVz#2SXbXbhHE-%~|Ze*%q0)KBnoPlWNdY{gx zH=|D$NbZPDVejHS+HD9OmtS>IKR<02moq|XmCwESO2dom#R9Dbc(Fx8_cbaVNT1C6T(jH-p7mB#{d9807*naRGGa17w2r2c6`k|e4$=`)L-62 zzE7$p%mwMlHK}Mswui?J?0bPFBkDwON;q3usWKICoeGO;b69o zqaI|>0shoH`$u{jm@w26JM@;li{<$x=(FWFtvk&uCsNER`&NM1N8IZ z*NgOU6D4(6@UMq4-5j$&9QMOKmF0W7*|t5{Mi%etTR_k;=gx1YMs8^jIz`4D$)rt1CJwtkEC2ja<{{V?RPCjVlY zK{a3vv%%jl1{Y+JXW#yYzc+Ievrl%=Xsw~`f;+WMQJgX1SAXd**PTQ6r|J;+;3A;I zhqru)6NR6S-(EzH@E15=$*^bM`q??B{O&)m=hJR@yWo}^uD~1g&+LQ;RD3bi<;I5j z{n?%^F0;Ac`7!?{4Dc)o{TQ?(y+Ptk)+h)VQ_Rh@zK{tOcwyQ9B~r8 zrazCLoO9&n(xe^8Zbv=8*r^ipwYp=69u&+FOzw_rSGxXUimL1<5f4TbB#lrQ(GMV=R z^lFkmh^wK^D-!u0cTTELCz)O6@9ss`@h#?8!_vGLw?Nhi=zM_jb@#Q6+1n?;D2Yfe zh<2RuVz(u~6bxntiK}D&9`ml58r#S4_&F?E(f6s?SeaUjy!9^^9;C~o-pW-V=9X~2 znQQ&|nB3^b>JC1e?={wc^~@e;!d45P0v|v{`#G;hkSo$sVV=Y_6Rw5NRL6R&gNtu# zbMnau!L^b7upc(H@1|>n{f@|93HDbQQKfOQju#zGD#ud;)$CqFZBL4+zWV)ZxjS1R za)8NU)8wt2R&_O<1$VY||JpT&YgM+Uo6K?;^RNEGEPH>x0^dA|kKSxNbJ?YHpEh;F z?Cm=+!+9lf9Xa{@YaX(2_Mp!9bkciba}-%43v4$U?O{uhubgn>rSXbK?p#|ZTP(w; ztC-{e`R6~@cVkb3eQy?JbcW0eIb6Bb!c6^7fBjo*_iDg8cD*))$zBDWE&bK)u#)L` zug&=s)&lgYPxx%lcy`WXdu%FeZayv^7Mm}Y&5Jy2E0>CXxV)OCM|peA<>SG};F(s8 z4}ZG5j=a5z>{vbd8W;8JbsJ;y@U1ae{2t=+&#c^`f7@)GbnNr@7ab4lNZZO7qj&Kp zAK~1uKO%wf>}~UDQ~TyTam@VqXFj;`dpQ2NUoHo6pHOu(Enj20T%T5a!eltSYXVsH z_l8vTyAe;$uenhI^=`3mE=~i~k~0K#G1&QYbBeGUpiujJ@vya-E7@CHq|o1JiH&0LfNGV>)7b>g3SPUu}G zZ$&$94_!=omnc`+y8A#Ut9JQ*e7vyuso;`TW#9gv`6tgsI~i=67|{)Cik6J z^q2EK82e{``ZeE6lD|HH$0@RZ^w-5(0|C1C@Q3?R_0`P&5$WtC8^FI@v**Za{pHQv zUriLB9C)JxeEGDSe%ID>mX7&aoEO6OCK~M{_s=|KkBm9_F3KAFOUM@Q<|V!y?X_iM zY=9QW*2)l)fl)L!BlhLYjC67m`BU4~hILIq$hs8d!Oi}E#@{}Sj=8w&!y9d+>#wxP z*y37i|MUOpljMY+(z*?&>t|*ui@lbdh&>Y+x^7szA!v@s zvN!rWG@o?9X>%uAkoez@wR!(tVEG!(1d{1!+=HLDK-10M{Ygix{V;Gk+pwZ(S34)O z7xTz4#SV8lkG6LSJ8mAnB0nq5xf%-0V2HaYQWYW}NjW1k{- z1~vDKD`^V)YEUlYi`51f;qlt=_^d^~cy5fm7vpvA!ZCqOMmQi^V<;Q!(0IN#2!6HJ zVM6?X4^w|6B5!O|4jr!g+PFx{4_*EP5|g%KNK#Czy=bT2SZyUw56Wce%Eh!# z*gGU)PL^$Xq9(@n9`xCMDxWTmZ*Q8mVa#o@0vh`;~lj!`KNAcAyojZ>lD?O_0*XVqE z$Mywa{@FCt_U@&ZpIi6gZqm}tKGE?3$shLT&4S0MaiPP>-db7+c=NA4Mc%^P{yktd z8QOR5A#3e$zTW|UlLY)*ljp>A_hQ1lEY@_Bx1w)P9-HjHx}NT5haXJZ<2pg^ z6Q4P7`#YTIe9#2;_3Z&L&>nFt=R{ari%)lIxZ(D5H=!G%RJS5OC+aU}XW!~so@SS^ znyFp)@Rs99aDiNo6X)XJ}+sLt-h|*}xFbmy77!k%c*D zK6U}RphumoIYcVH#S0sEc!DytAfE zX)du%>D9!b_|(s)PoJJx+1?t;KQ1mI@t!=ZYmMhlgVEC6wGn!cyB<7~Gcuo_@aymT zhUeB?w99Sh{03(yZ}NS8ezdNZUX;}ld|=`u+u>o4yneEdK&G7BI~N^RWNU4|r+oV< zl31QvM=#9LcH`UpGPs%!8-~WO0e>-^&Q~udlk6OPI<6T)zVnM-xDVcObFga#^yEm7 zUI$%&uGis{6Q%L?<2p~_G@E0Wdo$4U`VGlbhjk~)w?@Lpi@FbL4D87xPoEF`=O1|n z*F87h#H?Z#Ys_SpC!*os3OElRAx3z7*`CZj!cRdDE}XP4=MjY~h}68!*c&|GB%*uN zmgDW!MjYTfxIIlQ)Dur`5ND|rH^&jqfAafHzX;7o_pkj58YXp49tTo@Vnm zAAZxh*L#UXqssnNzNZ<2aW_Oby>Msz%3_*}F`m5Yk8l4ae|cE$pVK#;SJzYR$;@b* zlg2wo@QUm90_#IF-(k*0%D>UbQV{)eNz_OV%efmJ_mxOdzZ^v;V}kuprF}FJOl_rw zM86-!y}bx?4qTqMp5FW|hiXMF>~(38tX??>Bh~q3m}S`FNr$eGRtqv>FQ|Q6!E6sJ zqI`VwL1=XQ?Q6@|KyNzafnk0(0l%=``Djkf_y8_mvZu0q4n3U2+|0Y7F+G;AznHk+ z*W<{|7t^kF>>=mGkDc9sQZ3oq9K|;)C%{+L<7QWQh%?ob{LH^^4jVQeTS5tE+}Ni! z`MmY}u!l9fFL{r3s`jy+~c45??nx}NygRHZC()#s< z>HDIm?;Ktsn!SC(+i~{uVL4&PEQPz;dtn~A0T=ko5uRqRu4Uqe96etL0Sm4*vm2Ve zhOA)uz8nAZE&b%6c&+}&zM`Y?LU}gw&;2EhNpo^f+@x$~C}g=Ym!&FB^3kT#nJYy+ zzB`ZkI*=Y?}kvupGY!Ck(|@g~Xdw$7Liy^|`s@@WkU4*e^N$kk40+ z@vBgL5B(xi#QkpCIoM$HP9dCyogR3u?HVf#H>1T$arG`mbj^d9+sj5@Zs-s25;pJh zaclc9k+!dmM>gmj+}degLI~s5o&959A(Pl7pAT$X^K&!}(ehEX?0ts#w>RR%>!-|a z|2!R3UeoCQ=FdG&N42v%}u@vki-` zBffc-6F0Qw|A!vrYBrIhMS!)sqGRsQY+Yc-s=L1ZKH7cVO0+o|;XoP}OwTXp#R~A&^%WdzkxbmZ_zGi_5lx}NI*C+Orvxn!|WEZ-O;OucIE^d}B*+?~^~^z2?1uJ5pFulcBQ>_WC;s#{W!h zZlCRQHy)qM<1Sd1BYwgcB^5HHQq}aEJeHylAxTzbRJVKA{@+UA7 z8XE@l^>~ww_SNJ3e&Y$BGpPn3zsBo>Q$)4ZCgAdIEEO8L=iKK%^`Cb)Cp^IuH`kw9 zJ$;y$qt+=q&xMaQeuJAM`sy&vY+tRW$De5XgwSdzx%oAe`iaM1UDjup^|~0-y$gnh zZ(Xx=`{T*O=N}!K{4jTV+Sy|$ndNchhJ39)=Ce6(h?N{4U`gZMC?R`#P}i+7zSZPd{}q0nFF%e&@3kKCU^BhqC>Bpg66;55~m|r?uhS(Rpbxk&~;L zNY^@ee(!`j-So-5T!GoV(&^{BSxAI!*<#WBz@4%s(mI)oXf}xkThrvDU6%0-;$?ew z;%_o8>g(?!ZB~}+pBE)WSziK}j!?hnY_0f0xp`R3CKyD4# zM(^Rma_#Jxmy-}Jk2-Uh3w1n=TImA5XY3zz@-elPC$9^evo`1CyF^7b)G zjnQ;#JTo&0*5Vl3E98V6-pdcZecOB!fwi<5z zqR&s>6=QJ;?c{al*=de(@=GGuz)EPboU|c^QHMRO|M=Ot5@P4%Pu-YowA{5<(A!t& zOwSWjcp_LUIOU24g>*G0kJLACJw(UtP%00TP1EZ&>pC z+%oQ$Yx8$~o+dL~gzFOc_Ugs-IXk>`#Gbf$K2^te{-)6H37EUHp%T6q>u2fNSc0P< zva7Q`nBr24Z=J2KT$jSHZ4ZzOgQs&v5(*i5j=i|sIl8{M=JWV;fAgz7&ib1lH{qv0 z@F_HQoYt_&9$;jg#c*mjNAa!OT}NbVYj}cPBFJNWIvvj6@R{)GA+}5*=A0(m*jz@} zO$lMWUr*U-u$W)&NY4f>ohg$_e{-OFi14;9CJU&?0H0dh+I@uxSHkr%MU9X4qYk(4 z#N%GL%L`MJJN`y*O<`P!Uxy&&ni`n%wBMui#8-b-%522t=7lT=*$s51Pv7+-BZTV4 zJ^#S_Nk+vAAoltv8l4Y4sPCr1@^*Ur^c;mL(Re0y!7G;{ImFF(BD-GB&ZK7#KRpTK zb3vR=biYf54o_*32jpn<>*v;Db_@KA9rq^YRA>!fKnF2|Z}6=HhJQp}UdYS?zEsbf zYyBR(nWMj-c(5BctSQ)?C8T;hqtJ7Turnj3(6A=g74d80~UpK{M zt-tlY4^70{p3RpT>ggtE`@=p+gA|T)#>(C-&5zq(f6fb^&47M#?A$yDpD&0BHzw){{PL)(P;O+)+$NQ9h{zRNRfOQ;o zcC4qnjbwKPB6l`gEyWWi@!H}#q^WMtu6G}&0d^AnPHlN|RR8Lagnxm-l-Dnb4PV{L zDL+PwzuY{;z4wy*y-|@6dgEP#7ToG~6ISf%9G}g8d|kpmxeY&OWYY|7zB=v(cnWf~ z8|J*UD3b>y_n4Dw@ zwVuQwAN}x0!GGVae3p9V+VgLcZie#W<-kqe>+{~qG{*-d}zgd`f` z=AKMC2}-Y;5;N~%CN7G}upX|)x7d;6qL_r5-bd}GUCcCQA8JAdVr4`%VR0`qY3 zw2SCETaR*B6Tb@ZD|7mOzH6Kr4P@&mneBn|gZ9;Ahpu&dm|uQ|y%Y|U+-sbPws?3w zA392)`9Ol@DJ(xRcRZg?cV4FRx4nsV*yhz`kL%pO{ZHRXZh|BvBAh{anqlL9X085Y zPj0fG&ToOh=8E|C3V4Vm&;uX1Pxh0)QqQc6R{aaDhqC&xDeAoWb||t zmK_)l8FXxfb}j#0jqKNE%K(TQ6lZvG-~3B%jsNR34#$ZS!A8SR%=omjig>RX?G%53#BBoVxyQ z3|flW8I~C2NsM#v9iN&PZQ8{eh+9WeK+oQa}cA4tzdOA zz+^W*d#8g|BQ`KOdi{;#b(BwSBu^i2IUdr{UJa4IcO=CcxE%Or6a9k;o7TR*gzSfV z2_44wLT2`uU9e&L*fb5obbX|3(MEDu?(TPLBCaBeLNlZtmX~rLNfQjx`YbE@c?2VecSg0D6y0Xuh#26?AW&`8B&sFN`QyPgQ5Wzk1q5*YKZl zat{OfPBueaE;xLtU^u%S>QKHM%}`@FHdO89Wx-Ng(C5W?^B++D68F4yaJU1zsD>pU z`&1*^a+;nXPx){Xvc9j-BTfG|n_OQ$D99Rc5ZV|t#16P@hx%uF$E?~rpwKf6ox znYvmy-s%CIHreKl>%~3HzjrccL+&(vM>$x0Ga8K#Y(78vxZ#31dW73oPj@KGx7WwZTsS5!WFY1 z)EIIX;2M?rYFGy@-^M+Wgz!AP0GZ(ikzWmMFWOuZiXZ5&o9zI_9*^xAB-Pm5tIOr& z7cOibEJq-|5SknxjU35R>jeF>JPH6$1FY?JXITepU(epI>LobxGJWK&6N_7OtboYe1E znkJLVOV17lv)Z}g+k58<+|}EY@38g6=lq?`FBQz4E5DZKL*Iei+dIH;OP;y%Q%ZBQ zC%OA0a<~fL-Ong(|c2j1{ICy6wD5c0X#vfutdke~0` z!cd*f9G{xY)nhVS{~|yyo^Nhbi`)@w>`g55P5{Nz&M^X9+7-C##E zSn|#{g2nt@TWyYwAw0YOQ|wQDMgEm%Qsi0(=72w+Zg#{3`=U9#lcdgGo37AM1WwBnf>~ zxsn>6c(q2{I5SUQ-oIRK%_pZVm5%9(!wk_GeRyB2QHtgKlhXRL%9~$)>VKFC_HC1){qw1$j#qn^K!i{MjEv)C9Tx zQ^DrUUyT>?;{WCppnSMt56rq#KHji#1M++rCs8u;e~Zazr50Va&7Y*O)r2=BgtDAA z-ua1twcXs0>l3<6f1e~*&wNmnBya!8PJ0J0_`O=^b8>RNbZr4$j`@8P@ve0OQaNg1 z`0@X3ZKb)_8tR}ts2i$s`0bnXwx`{UC4cOFosIg~JYSZE7wO{WU%oBR`8oSN3qO&X z+WkJQ%%7PZU!3!G{xOK>>EauIv``cUp5&X1#eVWI$BX{i-DQ^;zv597Z@6{Q!1(j; zASeX2M1h6zSo)d!9(MJu0R<;o$R$+rQmpY3gXsmd4=Oarw50ZgX;F%=eWGR zx8j~m(zhn$nTqRz>aal`uFvP8K72m%Qm!s{FuAPRtejE@YTc92Z=?l*H}R@Y9E+ot?kzL~R~kN|(vxyQsc^sW+aooWZ#l z%p%ws&Y=n6(wbC!E&sS!(md_gMZ)1B;Xi+;C*EX}*=iDp$u=fem$$zc`*^?Hz0jIp z?hMN@m|YYtY`)ayQ@AgeqlpuH@!oO1CiBg)YP(H^`}HX%e{%Gz0q(;vH{#ann=d{s zkIO?`vCToXL-1slTT#I2CN`amK2r0#oWr*9{Jrl64`csL9L8+dkv~2r!dIU7=Vy*Y zE(h!IYI)uTHUYIxo>O#R4oL3}41U{nKUwj_QhP{SZ>>9Nw$3q+-_g#-fp;Ny7`H|T zxO@QV(G%Zn)>nUx&p;3p7}UCkI`2UYMlM}v&kguZHn+A$W+OSOlaa9EJH@9}9^r)j zBAl6o=Kgs}-}uL93a|Z^7gF;Vex3bf&;8m13uXQIPv#k$z-pN<8lYUYWwQ<2f%KX<)4*cfRZ37q2_7p-pskHYlhm>lt!`#XF*}zewoMZR;gd$0=^}r8T zg`-&?AKsK3yAhm>cf+2&i-5xOt6qYBN1Hu8MGrV5yI^eega6aVrz_v})mT4lTpk+X zZ|xnNdU+D*N-PKO;V_$Ox1JwKQbEg2+7w%+>u%OCf@9p(lDuZCRV)BED)$zAoAkF9AF zd~unN|JVoTX!29PMC@k694OzWZSm#@f5KUwss|ha>;w9&CC_R_M?XfE0i+3!#F^i!<$KSNZG&}+Uj-RgBVV~+ne#(wf^SU44=|h6WmRAb70+&n*qwV zhwP1U!>PyI+=(ZCWK+Ag0ldwhF1-96xo!Qe=@PBOB2O*fHU zZ!E6jo~;++XETUv;gC;vv4Ls)b0hvs;*K=PCvfKH$9zs-JO}?`vj(~qlLy0Acgtl7 z@n(blXd?W3Q^2^qaca0b!Gn1Q`SE;T&KQ)7j+1DPw~ma;-Mz^Y5?jISMuz~MAKBgH zBun`@vA;H&3pB_zSW*&VhYwSybgGNvs}DZZJQJm}MyGuI`ycI9yY|Dp{nYxu*IL

    L+CwKm_k+p!Sy)osRZ1{=?}cHJ5SCAK<&w!SKU{C$1-2}Z0=byJn^T8dUK0`wVMNKwuc)j zZk+CBnd=yJ9*nwG4_lLz%Ej>6iZ2K9Ip33At%Ntd8@#Y!q>^7?*5nl}!mnH%_j|s5 zjt^m`^yz-`yPk?8qPq_Kumv$s&nSN9fvtV(=(XiUOnfNL-2@R6L(6>~j*l?XdgT7< zdZs+cnOjrcflA(9>fZSz5x1XW1h0`Cg?iwqGR%6=AS%m4u@$C z^Pn5YuMU<6xcmtK+-kSp^2V8LxiQ9{`~2O!+Rb%}sZWpYd|3Q%F4I?wu)a??ukM*i z&XnO<;a@(t$CUq@-(IG_?@SiQ_o;P;yn*DL{SWmIZ$UWrvcB5b*SJ)3K4rbk$7jo5 zFOa>xEW6S8&fa2JP3f6AuPx*!gh+e6fufck?aRUN&~hh@;ZH8V*^_y>f@gev%QqWh z2w$$g`CC0E2Vbn)64cRKKfmVHT3hCulVVA>^evX}x{ey3w)eTGmy8y*r(*e zck3gqj;A9qMk3CQqEbKZ9~LxZ^WX^EYd5wQ>ps)~J#Ldk2kze3{_y61*v5I!e{;Au zS=cr2<=A(S4=FkGcle7c^y1DJb*HiQNB#XHU9m65uxfmOuLk{tT`XZS&rc(gSBGfX zNXQ#W^F6_b_?|D)<%(_~QH7I#SwS1`lj|DLb*-IEr0b)C_RA zhle<9ucYR?GxY+FJ#9%^F+&1rL^bhy!n0ukfvBlBvz%zbDn9W~ePY_vSB=+|}Il~%& zUJ+S!C2+okW|}1%Lj&?_{L=ZuUa~oRe0?(&{V_qa0}!&Y?6=L9dChrfaXeU$$d$LL z!8<=-J6ReF{5CRqE|3BhCrw*#c7CP)VQvvJhg_OxUz6l>`Ot^i&j;VY3;zfIfu}N` zeXJ-~{xly*^TG#Nd`8E3`^^OB0~mSb_?1|?1m_s~upJvrP2Obl4fb*cwyR9_LYASq zU;}X*4d>bAHTwJJdAQC1^s}YjeDxyE$_*B_XzRa{oW67Qhd2KDYE9r|6QQGV&Yuy~ zml>X&2=Mep5oKL3nr{%2SLfjD4cX~2a}c<=W9!eAADRtC&e-6Y~|&dDj@R-&z<^se(J14WPSS2z25p>jLFJO z;C!*JO$QaEJn`y_^;B&}nTj=Kcyjf}Th}(>j9=q#(^j!SxH?eBW;cuSl+71g>_8M} zSau}OqH$^?K8&CBR``%;IM?(ExyF-bbH_XynfUC?$z40`5GnKXT}w2*+YWVMr==#? z+2{1^Dc~6CXu#%ktT1a#>oE6AEw;tdgExDUZh`o#VNh|*RFZ#j&D-gPT#pB9>T8Q z`E7so=Kp-(*o#%0Mw{qhII-w^&oCHTl(A4P0i_v!LT6*U8G4<$;jrNiIQ7ea& z6+l0|hO#+s3TDeV*;F@vdHSzkoyfU%+11?`l!-7lo|6wLT(Q`T8=h_IyK#T`(25a9 zmM(?PKMgjIILYU)F?JngZD~VBDLqL-M{Gyr(EP^q#o{=%HY0G}FArcsh?dz<=;!C- zoqZh^@6|jxee*}N3rKti5Xu4*o(=lo%3%*xG1x;$ZLtOa@b!t9^r<{yw&GV%dG@Vy z_Su)r$)}{CxY+2Y1nmN*u1^$<*mf^{V+W)AKXampy5fHL_(Oa5UuF@WQt8q#AH_{G z+>^0=edRwr=DPN<598O1ZQZ$07xN9oMu`K3T}6{7jz&u`Wlmv2~$(on#FH5-Ad z$8Nm%zH5YC`wFm`1C`u#KEIZVu%x>tqkvU~`jfoAE$EpZsP{C@*JH(Z3>+_WtS>4M zD96j?bi=3a6g6gXl#c76`j~9Fwt4Y+-Vi=d;bB)>+Hr1mt-9c=3v&oY8eAJf7C74F z31^1L8T$JWSbNz9JBRJtyhAabb!mB#zA2@bxMy*Kny%K$mjXliv-~eU;lFz>boajN zM^-!8`|25tOgvm{eREqq=-xLcUl&Yu&bOhBX1-=hH`tJK4Qjw(SdZ0|HWYN@OuT+> zqi2btvj6awm;}^ei)RuGk1i+2)YK}ok|6y-F2T zQ`$Gm8F%d!sUaY1TY+i@-pjl3CE&p}o7inXr0`nf%!fGGz!f0HEtbbR=>Qx7O z;am65boABhp}HvdGcAza9MR?vJnu60K;PEMHK}+Hudq zck+g%aHnUR81yLv(^f_sNfU4LmTB9!lA-T?hX`&+IZt0bTboibY;EKz*Ewrm6QjVp zHO){QNylUlkF|X@>e*zW=(t-r`r_x*db;{@-fy-zfr>c?)DeqZhFw&b7u2=i#G1d# zirLSzFkaR&Y9=C9jD#f@#+rI=h;D&{O5+S-bDcs=X}%N$PMjzXJ! zt%)yA>FK%LJJxO<@mP7|uQr*d|M|cFuYW}#IsUn27?9Whht;L64o+OO(()8(X@ZPp@E4&$~COpbgbaI$qvE0JoTo+s9!96v}$B#|% zyO>ABA3gH{UA`5fhucz^%tZIpNV#zijtMxlBAyM6o-S{=^D=7VorffYA? zg#kHj5bX2PPW5=cXG2=!G8h*zv9?~D1N!EQ8-(dXsQ8?@DuR7L}wVp#9cxk6M3AP^wL{qZ^XBqZ6RI4f3 z-J(2wi`P9A&TMa9C~p8C&exc z+93BvQyb-YzRc#*W_y-7eLhM9%Yiz+kc&q#c#OZ8uRhgiLoQQac^IjF!F}^h3FR6O zn&tWL`Pvep59-15FZ~D_W_LX?iEP!QGUGpAJ$HkvOhoyIfvmXIhwIbhLlSo8lu%wA z<=L`9&FHs%HqSplpCKI`gD_9xa?YB%`r5-R==HH-828X8G;~@k1$L{y&I?(iLVx*9 zOvkl^-LIAfc)l(cDYvkJ0S}zsxal|AS_;=OiZq(k4=aY^@^l1Vwu68IZZvoTI?rrs zOcC=So(t>-TxO!t6`Q$r(~}?!`85CHX_mhSp&wm4U!tsQu@p|n4%^jx+U1{?@XA%= zUaQbe*`x7zeMm?Iw!?am=qi(}ynLDDCkyQ&Pp-kwj=08zUcPcNHUhJosk39dCRncR zt)0q=)0pW)a3Y2SgY#^Zhl3Q!Kb_KVH|JIDB_zr4IQ;#d2|_BfciLh6nmZ?%nR|16 z*K&2yc<~k_eL=2WuJIxN^wAe_Mt`v&?z*|_mZpEkJNM}eH!L&nW}el9%1(1N^UQZ1 zJ=+sThhB9XNI0f^d7WR%XiMA1*?8)!wyx%t$fyUJ+kR~eEkiwQvKKoCeVsb3S4he_ z-yK6~ygS*0Gx{q0GLGRoz6Zbh(T)UV0id0alDxJnjPI9**%w2e_BbnA?QvSpBqU0o z`HCOq!YRd;Un=Qe_a{g6Ut$`E41`>tn$NJ4)sl9<|6k=mZmp`uv?1BGWiW(9Oz%1` zmVM=(?F11w9+YR_&gH_fGsn;WIJu|@oB1&qn`uir97PgRDiKqvR8)QGQwik!uXB`^ zuRR+kPeF?<`@9)UK-ohySJsN8WIfO;hXpkp-vff{oC#YgvA{7$dk-|-`78~#py=#6 zi4%(4ZrtwQdkflAy4-2YSi-r}5=}U>5(WbPb&nCk;+akKg6$C35B(S4VVZAnbxT{D ztNMI|Z#<|-Vy!$BZoE`LR#nfuzYwW=D7`v=l@YM(rbYyZ7kQRJ0L7I5iJUd3u62xj zZ^04PhDtt^N!fmQo>UeEK7OiWowjb$MjMfi4Po*5Wg7GxLdW485|QwXYxexo(8gGO zHx!&t*Biw?`{xI3$M2C9Qu+9;$@1F3ovEKbyj_!3TO3{m%YmTP9VdJ?bR<;;VXw-hE;H7p zaV%eos|n8isj8N_(zd$~XZz-_GBS;Uw$*?t{i-axjU#+<+iCB?mKHSaY;p79uKsI> z-L*-4_&pS@H@WHuxkty~{LfKk*z_sqN5t(7E!eu)H-GgR`Z7)1V0gLu=*JJt9y}Xg zzpaf@v52FXIt*?qwD(wZ`uh!Lj9|AvjD;h(BCz(bTL~fKFrXE zm%ZP8cED!Xt`T_H7&TP^S>4FgcQC-wNd6hmv^hEk#>H3(Zb^)I<|i|7xaxmyFGL)0KZTt zGISE*S;3?uZj6oJg|b?6E}ZPY{m|MtN|5*{@d~;a&~t3?OYh5n^{JoOM&AG2)CZ;W z11ZLH0O*8;T^eCKSk}1(nW6b7xetz5J5o2^IjvL4t5VMS0A9cQ7hf962x9Q@&3tIv~{DJS1+?sFy zG)Po)=`ec$Ee`gqoM*pU((rR1UFU&`ct2&>wJQ4Q3S+Z*1_{Tic<9S)VuDVlAf+vzLgtHD&9|2yYeVeWF=RJu z#azEJv}9x{_uK#XzyH_2WRywIMx%n7SlW#S!~3)_+0BvtJVDMIMm1FGbjFlb&Q0ug zPb^GL6nN59dPN+FsfDK>*!Zm_(LJ9{ag@vJ zeveARjh8ItdzW2*3SL(8smj#D_7e=!2{ zJUQ8K>-V4R7$l4)$}jwY@p}_lK>$ubvA>z@`d7;-N~eU35+eaU)+c4mjd}*G-=L_b zoE=^@g=eDoz~IJ1nPRHeeYk#h@T*CNiM`=d^2Pv}9^@&G&DB4)t*^J;gLU007DPk(s0 zU%lA(LBBWEO2g~#;+jvfM*o2w(Ic@Mg??SOoPT%VO4&W6WM{7#B5UZi>M<4?9Kubg(Dx!|^_Who?___e-OA;8BbFRt6;DwPfuU zfM;l)+~uF4`GfR~CMPCh^97_DQp5u!oLuw3j_#Y!#gUALhT|JW@n-xxtr3TM(2O#9 z&ap89SBCl4KjwrP3`gnaq8j=ydA`ZAFWwiInqU^TR^GMAm=6Dwg%kj%0nG=yf(}Eg zZ0hTiX9!)xB$*@y8D|sj8c@m{@5Wqhc%%NrB?fxEc}<^wa!SsoIri1gNx%KOQIj(F z^`o5B$e;X!?<@JML;deMmapiEfqii^RQ*ORwD}E}y4RKN7fvUFCdzS44-kx?29k>* zZR^}v-=Ha~#F_R)_Z>K!N%(>V=s}to?ivqabh__4A}d`_ZrQD2hdlvWwC#R5IGN|8 zlSt9TG@xn<IWKjUMiDik(VG8yaIwylx4jQPs5J`Ea*3 z+?-LF{8sNIRQ%_9YKAcwpl!f+|0g=vH?w}Lx)>1YgJJC*pBBWtxM9%7@}qe3^lXUl zRyNdUGjz8ezIyL_vOn;v>`QX?tIXt1*;-TAxh!mCqzu-MFPnfCpMH27gRtx6t9|+T zD8|?xqviU4!O-FML}m)o@@3xe!?7{fh&1kr3i|xxJoRBczPTqBSB3&XrEyiYuh(r{ z?erwRY5hy!^h3{;$swyho+RvdQ&FH@EpgUmhhNmoc2HI@7OA>Iu7gnR#U3@BW4iMMN-s<L67L?Xo){0y z?D&GFiC@#dVJOq!zzbh-QTV&@{0}}D`s=^EAC+c}I@UhuSP-3?JHF%4alpE#5f!7_ z7W`t((D~v-nl(jjv7z5Rs~l^w zV@v&V4lSO~8Rp%F+NTOsBf1Z>Z>Huo`Oxm|f;kgR(cUlRGzPq96W@P9@RLI|pKhAu zN6#HJ$D#iOREdKT-zi<+bNlXb7CSLl9>eX+2Z48GHATbf>7?HgjP-Uv5#JMQqYGPuU}D{j5cj&-O6- zmsmD+3as4cP_7_5hB!ejfNVioMewZ`v+?p=t--a<{^9!@v0@5sEl?lc>qtYd4+UxK zyD{vVV(i4zH~)WY?PtcLvs_oZXuBD;Hmgu=39FCY!!CXW8l7qco_?d(e~7GtenG)R zJ`9(mJpF{lOAjFVVFTLv4^f7yh^1d+afoR>`_1IE%8en&^caw*E9ng((DsF#;;3`7}^? zeKroY=d1Hx8SRzV=YrdYYUM=nzxi|30 zn{sZ_4NE^9Z0*H(I?`!d;RzqMG;|C}|26|ulrWqglsXQ_U*Rravw@zxr!W4J@JvBL zG7Pj~&w>kMXmH_2fBqd_M0qmV!O%CyWn&M`0WbztT`bC7nM+Sso{naa4C>Xxd~G?# z3?Q0neAjMFgU}ARzPSoiF!zm~BK+6Kk+LG4vENODJli^%N@j{je~p0!stY2Y{0$L$ zzW%|{pHb1D`QF)L(AN(51VnZCi*F&6cf~HYC4|2IU^3#?G6x>*7qPd+zhrlUu=jj`t5~8$v3Shgy&nM)nNx1sdf}3*^u92Sveh_f7^SAY2 z%IUY5JFp;T%KYmXq#)_a-HrIh!7NyF^B=jBbV!=sF*x9=U?kdjwwj6#!DS4q3-+El+M_{C^~-^s!1atpK+w|>MT-EE#$cg z#A`0l%+hqVtv1Z49P}^4>fk^(00TcP7ziE8aO^c%Qld@&)rNS@4bBpd1ug&pKmbWZ zK~xv_t4&#LUt>0d^ZLhJ(f1A|$M)qmssS*z$n0C+7)V~aL{8lt+Qnz`+&63VRVDr5 z^cQ#W-GKeaKO@9am%aJ)jn~W1=Jw$T?-dW36uM?#J{oQf|Vvik_f0b%{l7NQ66tr43gU*dSER#o z_ULD7?#W+Hhf9}rpq$IR{vPJErm4i3VA>q*0U4O4(44qkEcVN( zbYFfz-w-*b3TcD;>mdCw7yks-H`ihN;yJd=`9NF;#B)=T0=}8HoTz~_yBC#@+=_gD zr3k6q!4#V!Y*u&4JrbOcGpZ>W+Tb~#VLu+TNSX4<(FgEAA zQQ5Un)3#mn=WKrb0tA@Jo_g1N*JX*l0&mRh*Kmv-k8~7b$4~!Kv&LW+G6b(v=SudC z)AhCcEfsQYXZ_4}`O`MB(B3oK`O-17!~n3G;iJ6qEMvk#KU_}!$7Ib{=;@fy6h%ZQ z=Swn3vt2+*i>AFI>wQBg>Cnm!I@Cn(v8=^{+36+FdKjX6g4i9Tg4K0? zB7ZKGSFhdFVbilZx$+Q`zozlCrJnxq(leMj%#9dF>pxc$hOtvG+)0^Wjj=`^%m{D^A9HoL>F5oNU9Lw6`bLCU;MJMWvT zvC#9|q1M(}K*s2OkcLFqKe*u`kvV~F5qz2IOKNYU;!I=BwW!kCcIsU8a_I7}&@nO?}@_ zQ>?A9`1|svdBrjWZ@Goo7Z*9xu^xKp4re;j5v=ZAe^y08@N4C0BgeA zj*YVvm;*Ywt=@eAC8{;+b)+cJZ#?r8d*I0H!J|9@+!{DAmr43&N;>Z1S`SQ->4OxL z88-{n2HrM*&UFLjHRpxa&enjd0TaXv&g530N23Br)2=zhEhzH8`Dp||su^e32HVj5 zk8S;}a1*_`i<~BiyYK1&;Ww+hhs#Oxun$4J*EJ`<{gdY};=gAI0CX<_Mp*glIA~^d zMzc1DGAE8(59&GFU(H)b%fInRYkUp#=2ythhyJ|xaSWB4O`Yh29&>G|{&@yhyfCq8 z+NJf`zP?~%STf{`Ga2hZEbo-ImRO#PO}j|5Q(d^}x!h`$%@0Q;>&N08w*9_&pDpl+ z&-=;XtWWrh(bn2K>(lIu`mSesp?|%HRTJ}$e+|(PP{xKe=Y40ne))r?$BTKfD-I=w zF;y3m&VKI==fV81t$A>EicoRp`Pr%^W_K$#!0%9D!n(97S&=f$W^HUpP^ za~YNc1Fak&-~c>dX_|wu8~522COcNgH5ae?W6f#WM_P>bPXHVFZ5Ntw=N&%Q8I3Z{eXC|CRTAhxqHbXsS%DaxU(C$sK4xCIH>> zFo&HDVadvp3c|&sef?C&fM$8^gx)9)N_6 zWLx%WA(y)P(q#Hj!A)5GL1q^b9i`s!A;~jW-q7`TLw7MG&Sd7!=N~BIWaR9BvC5Oi z?&Uzg7VpIEtb(*5&GBb*KC?Wc!lE~t8$U_S%b>B zr!Y{-bh9-#fGIP}gSk6QRiAQrts}-jPi*JIdmv_4A9&@DHK4rLQS&0MZy>bkp1&x4 z$<#@ANb>aowiM61uGp1o`4qI9v?)s^+h+SmSK z9|AP`%75dUkD~8yi)%>f7t=ImV`fCq+U;A@4m4UrdM-m~h!+Q)Fq~_;!&@R@vhN8$ z%AHMpUxWjwn@m{hoj>y5wWc-RHAP-{VwR-?IG*ehapR@zn?uER@Qp3LDDP6|IwOSc zX+wHPJM8#B?LVbsQGK2P1y+VrK^Z|GY~B%a*8qoEJMJn2C?the4<*lEKL^4WaR zZbC>Kr*r|0!*^S@8w>cVvlovn=yMy{^HG8F2;*JL6T-%14^MqIZCZ1EphNrTnv0L< zv_|?ZvQmYvFJt2@ale&**!u5%sQGbI#*JKu|7)GWlWk43`GnPGT@KiI>r=7)jja$i zyH}j-%i;S%MH1?WZ|s}=v-Rlm@NKclqEE*hs8=45O~6f zoTw8H&Ge2Y-K`uzbB+a~+-rj_h5*v4TgL{H>WbMeNf>RM;U8u`JFdoq%*(gAO*T25 zJxrn4Ha}=+TbtsJach@C5wO4F%MEas59|07!F6Sz<)3lYenB$r&WnB?CfU1 z>$HX*phIpqI27{L`R;I!(N9Ps@_s5He_{CmD~rT2pENj#FzHHBW*^YYCFj`444hIR zzj5Jw`o{7eWc7Q{FdRFs7J+K9`AIz7eU6Uq_rW1+g8CCi+BFBG{-*7!6jof3^l`fn zWo*B*{;B7(h(jOd_s~v$!qfGkaPLs9`I}QZ z<-K}KgPmfzJL+`7#ku)`VfaQ=-_Riy(^MA=e8*CC|0B%zD8c6-en#5^M{QQqU?IQ% ztWadJr>`ZN?DWIG{2X24?3j=QJ*Lmel+vdz)!m_`COx?Q4zWUPj-9p$Rsckz*g@0B z-Z$Ur8HZz~sz3X&>QZhtui6?1Z80KYD(^#St?k|t3jC1uQ2_RRcte}c{6I(Mz0cLr zYF*#J^IPQRv~R2HdmbTk?~z*<3U2` zBKrJ|13Z1vFLR_^wZemNQ#;{~=~O$1QQXC^58wbQtKFLY^p`V6FbtirM*q#vtu-!& zf|Bc;I)C&=$+4ZTR;4*8w+2@=u)di5-7dS}V}rdgOTgNo@+%x;G?D zVpAHhf6yof`|^2%t^36Y;koG;`mo$O4PVgSi*YtL`UzIGB~oB=9Cv)sPPm4JG;4bQ z2Sf4BN5eHQjT!oT4kG-+6;EjG&BoTEM6wM#8T0B^30o`r8)5wANPG0nDLgekG3ik3 zn~UW{fcesY+^H*gyIpnA0tksP?dvK;&toFbMPi1IW`Z1)@KM&90fob&fRAnpGrs~C~dcLVgs zVnHn?^@ma+)kVOVHbAqxT){O46odsf6nryNv=-7f~LGg$KzUwMAumjA>W&G zq(3>0yO}fEPMkJPlt=wiU0$u^Ou&oh)qH+X^B6)#Z;;`e=|yvkv{b%k@M90Z`fJ{t zFD{4|$7U}&B}q_nB9G>CeSTvtnP2@!T*z$LrUSc+okc$Ov+|yh>!#qz-bGlg zZd!FtEOh%oE;$aqpzE_ZdfhyrwVSKa@`sv`mlCpaJQV@tU0YT|N)_G&RBzuH9yNZd&s1>B{xnYY%cBY2XpsB<3Y9gD@J z!eZZ>HX=s!^V>h|}a>6Ke^?*_ZMDN!H9G5_+4h1Q#|IU@)Dtr1_`^2DqZ z@A?;pbiSDZqp>nfV~OVh1=p@9Qfc=>UaG7`fDY z%zD74dhX$@4KvI!kjpQHh_|Gz-8x9KO_jVr%)H_7Fkx7qtz$2@#*J6nR89vp=DD4? z8V8do7DQCz5xCcm9^R#B4wOH8_)xqh$hcAKu5#;eb)pS7YB~t|W}-LonlyjR`K`2< ze{~?Y-l~fgNv@3B8Ubix>XY#$wud00eITl%EH({etPVXt8z=+ae>81u{59{36MXR8 zdT`VD3~P927=91x91re2n&aghn~0c;HRzOY5$EG%jm5rXb^O;2N_^InZrtIU8$I3j zRfEb$vw3vK%yE>Y8_Hx0bYxz4IZS~3ivus>EUBVDSx7BJq?R+0!%Fv5l?U>h-{wcP zHju#o-l#znyB(!WTg%q8_ni7u8ZTD@i9snR>e3xnmtqkQg@0AY-^*;s07-4p@Y5r@$8?nNQfc5oYH7MxK8K@#P6O?IG_IL zzX~Z!WXl$(uysZm=qFp9&j3XDcR!Sx?eh9S^iB+3`T&MLOnv8=+Lz~bnzLQE;<--u z6~S^)HUSQ_-xQ9PJO?_HlCV~JmF_3Zy~v+=aS!j#w)LX1Hh*w0*A1nIcE;T`!1abL z&wHx&a5(qnCa`qFF{JUWEwhSZ*5fsz${Hi&Y6;FH#E^NtPH6m+4@p|t8mp`^PRlpx zi-QxiueOs{jt82=nT6_R^1VkS#$werb-SxFd~vXHO5j@5*au_}B#4+!ah;gr#x-4J zdoO?jarB<&#m+e_BMJ~1{MyuW?Loep(NEkGQXOJY&-*brwS5@SPZU_A@|z#vHeUD_ z=vb|tft`=w>;a|zp{ot$+jefQ*%Z%&k)(#zYg7r-UvdM>(W!iDiHS!mcOEGwZ+4$g-pgj*FQ00Y z-Mh6ubPEnXy4B2S+yB1d9=J7VL}FYmKyQv2fH=_mu#9dw(XP1uVG~7>I6WWD<$IvJ zcoZ`jvC2O+0kyf+6~-R71P&v4@Vry#0s7f$JV7q+h*u9_ zH($gw*U1XzW*j~|mT`3~RkzMxucd!6r}`~-#@n%Ses+&9SbBnL2b^b2N(7Tq9V*XrD z^m%ze$T@*xW3Fv?vBae9{0X@4G$Lo*)`CJT*bZHOf|;E9hT5DhjBIrMgIBGqtCe)n zL-xxZWZAuEh)*2V3$$zjPCq~KWp)#+R_R0}e zs8`-knWu1XfVv^G?SgjxCL3;|9AF*4L9MdTK}NB*^S8u8Z9D?}Ku=h7lAkkS9BP`6c1}uNe$JI)JMoYA-C#ml zE`Rha+w^RRQ;a(4tAJ=Q7G5N>GvEEd;=9;r$5rFb*{yvoFmpF-eb9{9oay9gCNm13 zju(yJ|9ewIi^?NC`nIZ*iR_x zyLlA1|3tGnqX;q=j~WU$rqSoqu1U1D;hO@+r>bQ^I5S4LD#x_0~4}@-x_XEe z3*KBwF&FA1+;v|wlFGmGi=lf2^x`y{pLsfwdcb41fsFUb^fTR-$+0)w#oJYD`E4b41BLE3-SEasDSYQ{a^WcY={?| zyh9hxEfWb&TH9?k)_b{2*D+|#a4Z&se$Td=v{f?>**-zAA01kY;<_WbPSXdghn57@ zddazYp=#Sa7iTxO^i2Q(m@y&Rs}I`Pm)@&H;f!5{RI?^++l>!~DUi5StbM6#b&RgE z_F!0lzvznvaVO3_@USbSb*`#jCBmWei9e&s$TNO;R}ZmukYw$8{+S8aF?n#SJ$#$V zXMpobuCR1jyRim z$?3m77l(54zis`G-&|cT(o6QCa|YMB*%t5Ws2WvHJh%W(%3A5_HnZ}W7@pqYLx-4_ zxO+iB7wn}<`|Rd|^SejcILJdhUGdd$#Y2>_@!`Y<4m>fCb@|&{&h^7~9Gla4Lc94r zV39P?zH6Ky8!zp;Y=kSBy$^Obn2JTz7uorsEg9By9`hh2UGIaCJK5504gr@Pmf>jg z`nEG#oYcK`<)&;*@d@&lb6DJxOkMlCaKS066K8zIu4EM>K%;G64%a#i|Bn zz!;da(&m~0t`B<5c^Zwab(DeWV7O4#gW?=2w>5O?@YdRx=&qO;68|^89_|o@lN@|< zYxcAy{gCF5dDk8XAG*oQr(#=9uS!xBTcO{3zT%h;vwsPoAM_h>fw0#7Y(mRZ%JjQW zQN(eOU+h$deu$Yq2W9>9`f_8Fv^B23)`8l`Yb}B9JxPNeR~-txPYu) zaW*^p{Jt^$+wcF(b3yzwPIFYxb1oXE!1ObQhuPG>>D2EUN^Tg#KX`v7&pc`0-*|ry z&z&EO5kLmTpXw>r4Nm!V9Q{`S=4H8`?=KOX7kU&t{_R`r%IiHQJC}>*yjj!?w#t$* zY^?m9dGCSrt2t?`5#%e4=EdRtviEYWTUTS+61#gm@ssteu6B8E$5#E)_fMvy-fzMq zKYziA#heyUDSRPym*+-Zpz`Nf^gNTZ_2KXT+tB>awcKDJKO=8Ax<1ZS4YDiShIV8JCh=i2N)L<%Ci>gjkw${`X@I&#NqeG*?zZ)=xjQ*A`<2^-KJEANv|_9nHqEg@gPj2hIEP5(wG+F9EGZnGE@KTt|>yUIlTz zWGu@IaY72qH6-7uCf7T*-IE~Wn_f#W0rR!UTO-qlcO|-ffzP$DHA(a5gYo*uJ995t zx{{2=)Y~<&G($YIK*zrCQ9gY65Z^o$M0ck+CIa6WOMjc=XWUO#y>KR^PVa}zmpc00 zHhxX-<@5)TFMs(z z?wFc1@nRfbJ?m9dv|T8ce(Xt+5zn?VY1jT8;nn5?9zVs$Gaa=KNaXU zv~4Z~IqS67g{DVKel}aDTHg*|j27C->Q^#$ZF}q>K3v2l`Y=TiAD;Cy1y5!Jk!Cr0 z>yGj3I9m_J&#wOgU~xfT{-`13)J-)F<2yg;zi*3P(pd}URJCp3uAKnv;XAy`ORFnv zUG?B7m^=H$Jx=WrB7)}D5x(pQczSi_4i}O_v zeDQ5wvHr~$A<9n}=FbMBN(M^`L$8XAVlK8jZ&)(b$#Ay;SX&q3hu>x@0w>9&@V$ zMXBnwj&bn1c0A+8wbi;~ql{Nr>zlmLc760=>cjEFTGyZAH;)2_={&PW{DXhvwp~?E5yecIi8>wCG$A|KHv-Al!!;*fJk( zN|X^kbl88UYMrQsEPmT+#y}JLPiz{))*1NN?OVj6h##EzD4d)$<%g>^{^%R>zR8Y! zraT}G-OA{vdPI_>eHi*6o+F_Z``lphXb4Q}rQ9nsd`qBl>7nqAyH;dqKOAMwC;RYz zx2D;wTl20zdgqp163d@f-U&3|=i>}haDVE4l2Pd~(0iMCyAn47MlW&xb7 z6^EAtm-oZ`?LYY@x!KZqH-u+z3mV8eU~y$kO!~G3vuz<6T(rqnAT-;OcQ`Pdqw3Rz z{uacdQSNQEwK2I$Og8;U0SEyR|0*wwspOTJ@v?_pG6T=;v-7=?H@=d?X-;OS7@>a` z&XO%YW|eo8+;V-TR@a>Ak92or^A&=aCr%7_cfgDjUAvG_DT&&xHHMs6YDvkUuYQ=e`ti#ZLT zIin5+?O8!8b@N<~uanr1RjHcK3wZf5W6^T0!{si&kz*|0$nKMH`~Gem10HqxHhL#W6nDHgdg-5P2-; zU?MNRHD#|kp-btK^K_F!X8DJhOUm0wk7MzwYjPb|8Qx$wVW1(=k@b>G(eahlF2edP zT7Z-P?(`cl1)&>zb5ub{D#V{oM`h#?w=QiPEjol$F4l4E46}MRb{`Jtpj&>~z=Abj z6PdcAI>zF{(+TOe#X(3L3pNIQ{RW@8*P747*l!3TX+9hSMbHXYNY{V=0UJPw^v)yCf@>+t70w#V!oU5XBTI1V2)%PM*ei&lj0@?li{ou%A4~*eSHu^ z-|-j4_hA!GwdOgVK&>L^LxlXJd#e|0jg z-OlIYmB@Z`EHzXujq+F!6Op`idNw!bBpL}AH9eCXM1Jn^&V(`L`eaOeZ>%yl_4%~< zGE_4C9G`)W?SM43&2iNxz4JsbYIjYd7mu98PmhDSD-V-yc+HK{=0K4YmUGpbAL6x! zII8NNta`t0jlui~OF4u;T-xb+u$8(83GBLp1AiM|$ln|=X!{e(XWIegIDG$&ygml2 zFpa&wS{+6xD+dgIbS+{YYQqV=$JO&}{oH zEPc=)9}>umKqJ34M?PF>_|y9r8bNt{Igrqs)Hz*v`Ulyuef>aAY4>nv7lF*rk$f1q z2;y)h2t?ZoKY9k}LpK&|(fQ(*TW!IU$8B{p%zw`b z@LkWKaqXj3EYVPU(faW2J){<|=>~uP11y2M=m9{+hLd4<#nYP$bj-WI^hnJa4@TO; z^2<9H)9&LOCrFApwEV2O& z?pq#=xX7$|iRe0f+0!}t&WHSxZ-F?r$)L&PZ8YM^H?9ynI~0j>P^afPG<5OjJp)fA zx)%}~ZQ}UdxRG&=5iqSrH677=$e+#4O(psrW{n8L$!8(1v5I~7VsL!;_8yRL`8WpyA zp$qx&L$MpVXVFF3%1_tzRp(Mlu5kfNf4!QF$XS`}Vdns~sOE3~`G5Y$zhV;jigW{) zS(AZ+2~XU!Gy+NX2G2o802=2i9+9^_gIZ&=t8Dl+9a1u9pmzTEyu%2dP2YBM*V^Z%EEaFhiwB(yL@%&U)dU4^0#+6qfN7TeN$Ht43 z?R?{Cu3x*wP`NharLia8G{l3T9nM>@>{I^cKM7<6lb^2Q+JHaR)0#&MVum0?wT*@1wpw=1B80mT~k1*=PSxey1fSN{$0=~_qk z&FulBpf9Z1A zyi`G%>6+i>fSQNC(1b7X$)rnX6< zrQ~VY1XnvXkDw0NZoc{F;mP`78qWDp; z15OtbD)^}H;s$W|)&LZ@&MCMa8PkZ-m!#9@8@D{wXN8j!Yj~%4Z(v31Nj>4y<#+(}I??;!Z$t^@XXt<{c)yLraH>Q%f)cTE=lSGy7v zu~srylozI04@VedhOg1YJ)8#-f6JnHE0$*DoRe{G??gHY~aCW%aE>gGpv7|F_2 zr_J@@ZAVkd7hUV18qx0aM{(yDxtfNF8D_b&rw?L0nU_n8(~Z-vm9!PSt@tEa_tNhE z!!{mBN#1)TL#P%q1pQr?aAz5CjYl(IIX9Qof4dDqI98J@@NgYFiy(SilnQza!ozD;*@kLAJpO@T`%`uvp!^O}&hXp4FI zndbsH=AcYq{Cqrl^+l0ZON_NgIHbtNpe({ZY%(zOUrdK*9ikYV4V~6c{E{X~KVwOO zK3-9wXst<8&Nr~?91qHY+E~x;jGvCwxlOh@N?TU!oBH&5q9>lRq=9-gLA=On>d6K=bn)x@R-o}WMV@?8-y2?GW; z%z19lAL%WJ5_uJtjb6w^x0559nZEaobkc|IW;A@|m}LW4N@>1tcuIbOxNdiEesNY8<2KWc`?8C>*kEr} zCve@j-uz~_dgX{ybH#B|0DW2e1~4Q#gCV!rX{kg;ot6M(RnS&DZNCjJ#>=hsXGfaj zgeuvQP8*sI&lk&_7HmJxKP$Pk4(vDcWebTL)mjZ(8fUw^0WCQvhq+_4IJ=k=N?O{D#^&D%Ti5Y8f@vY^4dc!fn zLUB~Dw9);(?HlJO*UI^nNeylK{;v9>@TwOW+Ux@_8ojmGa1XPM6-6;P&r@?bjChT| z5@34`qwj-W{n1Z9Nc6f=4)k4nIwNi|tBbrgp*fEA71_dvzKG-sp7_<}#8F^Obp%jj zYy8fLCfs~~x;DDz3Yz)cyrkoE81U?i)fg+I4cAf0oceYAMVZt`kFK6Fv@GLwl~IVE5;dGbt6F=Y?Ti<>B=vG$di!fA2K{7|6l*=KkGuuzZ~OG zGE7c31hi2mk4E25Ohm_s{;J;tLqR0?NlpODXq_zso;OYXPa{p}HbL0g-tW(ZiH@P8 zRACU|>1aoChX&KOz0T2%$w86?XIVkORC>}T=N^t~#Xi0^P|(u_lce>@lrgCYKmAn~ za)a7ni18~9vdS6e7b^+^A(xI|=8;p|jn>|jkNVyq7=!<;V|w@o=A|wDVz@$ki-fwO zgr~6M1I34|#Tp+8=uU{(B}S#j>RSgA-{F;IPV5Kp1HhS zC{cbt6dEpKZ_`Eo^yAq(kLVwcOGP5es0dv@@)?3}RBwEpT$4q|g|2bMOZ7Ua4_G*Y zvPrjjb=4|_)9*o1b3j{OjIAsFGsk{1#W#oA1?(@r{cBEU!sY9*In@%|Mm`_=MAYx$ z9pq{XQwhKLm~)O_WB6!DG}o^F&7nDOJc}+REGbAdcQy*W z!C8o1G&hgH_F~LjZ(VsY8&KBkTgr1plf=^-;ht$bqus16x z$Ll{_LT^Cmi>;dSGc^qRVD&9JeH|pZCXTP8B6j;bDx@y&fsL0-s-e3WwBMx%`$x+U z>jXJBowDX{UYPH_d4s2ox2AUEs?twx47Feq)RugMlW~&TtG#oe&v*_aUQG!l2EyF= zl#}}S?;4~Z^4NkmSH&d#0IYXk?8@Z2<6K_N(QPzpCnW9p1|b{EXVT_pPBE{b+t05v zLc6ffIjSJ}pk*`%LTUCkVr~8@q-0D3@$zt65~${C4|^)DA<4@bo8DQic6Y+`TF)*}*hLQ7^^At>a=DSHWdm`;oY zUhC*!E+iD28}NlHzm7R_33wJyCNc46J-CfJKUdVl?#6o}Ke}P3ji#}H<%gGGb^oC~ zLKoDmXX0|wbUi%nW=jF{;<~4w_G<$mrg5NX*=Ws&=K4!RLVT$GrF#(FUmW)2b?P=% z%L#pOst)zakGYY)7^{oaq#~)Fa{8EW`Xs{5?X;Cq$%6&rn;zrS@QuGz#0n!n6$dNg zY~+Dc9F_@?RZmzHw>}>2PV-kDA{?HzVr%{hve=BldlAVkUl{PuSEYo$d7PuuTkHxb zW^CB%jp~W%?}>Ij)-To&*R9CePz}A}z@l)OQDXF1Xq2BEgy%d~Pt3Nzx4NE3QkI%z zO)Kt4JDpk z<(e=Xu$ux$)t7bzZnOKht=!Dc9v-WMgo3y<2qH7V=FSI@`X0~(s+pU6R&xHPPn$Hl zO-4mF{Z5{^Ya^%p%ZK(AZBUn+U3(xxv+Wz?reKg1O|oKJa{`=AL6yZ1MOs6ALPEav zq4mvxak}>?D#Ffv=+S^MCSGleMHv`?+VAiD2DQE*&LFXzQ0lwh+wGys>A5z^d-s#! zpH6lKNLBz};1km(DW`Ixe(Vln=!I5G&C|E@wg!syNOBy~&|e&)z4t;i^&RX~kzL#n zbVhvse7KO-mniXoRZ9QWV{d4(M`o;fHdld-1v*^jn_p?8cX6bP1GC}y(@K$ZM6tA} zif8&L%~!hK&%iG6&1L&o(C+1~2QhDaNrsqv&?BxJI=tRbxC8`a4+rrs+>(MWXR*QQ*#LSb%)IP@ zFtNcF*)JSyy;fzpi>}3T%2_Bdk`qVG4AO>Y%nr+YpQL>sjvJ1feZ<h97hcyjPZwu;^CRAzRL@ko3;IJzC(zX-~RXEcezBq`C*9X*M`E{5rnP#6FoB~1gWb(y{BdS z&7Vg?ZhMBXzxPQ0b@k!6D$rU*`+c)G9j>>}jCjI#&s^>Dd2|UIKCCun!X&0WPcmPl zwSp42{r$(6GQ7X~^S}N6PtU{pjao9EbFExCn%P&Sq=Abc=GoW&HNa}nSo%N!N%QBZ zx%tyEf!G1ud?8w?dWWZtjKb5txtuxYt!oKVl3UN`t1AAqjUVu|$){ljWTW@r))-Wi zszWO{g|gb%LF<|2@%$GUuJMfnf@11pwP*gdfWGmJ3oKupX#GZd*XRsn7tM4$IzQ5l zLT-MCn#ysSh`=+du7F0U^r%vUCb|Ft(gF~M%hrbMAuZu@=Z1k z9np%WJiA!Ci3->dA1E=feXAuR?zp@?C^-)zXsf!_~)mx=AdlNC-wjO?VtF-f6nN{EEb(`CLpexPG;W_SU5h6 zrvD#*+|O-**CH);m9Bzz*bAO6x;>cugWs{{chlPx>l?{5(Ax#o$obPp`Qo^Rqa5R; zA2j1gcY)BZo3TX0hP24z?ZJlpiGKd6W)UFY4H*F|@{Uum*vtnUUDM7lQa1IcdNa4a z*pqSWG6_Nd#khU{;enWwX%Z-m=r%#zhv3*=^-oIV2yXRWj)X~CS5n5_W37@db>pdLgOR; zjTxBMUjO1ja!FM8SyJ6LG90_o5oXWnAmK0FpV2w^O57j zQKNaH{ln3|BkxP{{P0YxIp$5tn-5^WHAr}U|L|#tkBl_-#SYm58{&A@lWxkoV<+{x zi#}=O_K+1Z{N05Q4dl`_c2wqrHpSEP!@2nJUIsem(s+%D^+7*w3$J*%U{+I?|GS~d zCPJ%c^DW-a<}lTADiXVOb#^DovAo}$%eTW&IWXWkGNJtP*J8ohB(jPTfTW=I`JbTFI zyvL69*jgmh7k|wS>&dow#Iiv_0Q;_wV&B8}IuMpC$Ai3IuULGyth3^S<9OO_jL5}q z_5^&}<<5ANhn!8vuO0;Faah(i&1As^_^nMClJko_9Uym+J>cupgV+tzi&LR|Q-yfT z8~WJYO})OjaXvdp^1?$I<}GIytL@MC8{hrY_v2@OgRE!D@2tIzQ@KR9>$vgXT%Y9d z)%npg1TmrHw>ir>EkqV;HEBIr@A6q|&v)N6w6TU&dyk$RoZ>x<)(U73)4nB5KcAGv z1yzOl=(XotbXW=KtM?w+y18L4bkVVQ{N$cJ$-c3d*tpV9pgDUr8wp7VNd*qoqqdwE z{T-dQRuwakwQt^!C=jl7YLLqah$Z1Y?&?boCpb+8oT~ z$;CHTd9R~CY~@8i%uQ^L8nvdK^OH!{QEv@Mb~3F!^xZnye7TRv!EklBI-FIMID@$I@OT+R-oG~YMSc@YI>};J*Syip7wG!!efJVkcczdn zF!;AAT5dR-)^v)4&FVBH=##6|P zp@*Hadk8YwpIX#Dn$GdYpN@XwZyXPre-qyR`KK>7n|F?-Jg;Y9?LC4R_|bYI$l?fD zT0Sc=9Ry}GduHRV8REu6!5R})U3SklJr@itf(-_8AkD{&)TBb+JDys6}8yI3F2xzYL5nAhn5LX}S?aO&*2RVK6Z}SgTa_uMz zW&PxVbzuz3LK{`1^A!fJP$RzYr`=TsTW#owEsn$3eeU6lqwR1N)~GnntsLTGo4=MZU6e|g4bI5XnhXoGz;fum3HJ9X8e~Zz_@6xFx#8(&kU`*cwb-;I> zp%MyQ3}nLb!Ljpx+v-ntAcR<}zVp!w%+84?`eUu@89p##_x=WW2#q1prq4u z^`bJ7Xp~(5m3G)aLNpnewHmq1d#oE z<08oqOMY*CL6^LwPxAb>yY;&o3KXx*n^8gU*&%gWEue)@fATND_JCJEkO8+^E+!sO z|MQ=ts{@Yzp{cdsvjES??LL!?2$`NXq#K}?ku&Exs(&<04EZzf|HQANFu%sSh&uPd zgBZ~HZyw9nB+&IkBO*kwe05FKE>vx6&NV3kW0#|kNod{{G_NE6;pD56e1_jzDTLdt znUH_+b2Vx$3FVlFX~!bcMT2MC_&oc@zWB?3m>$6B#GwvH{S>)z zL!|op!%)XEs1wOA+R?!%VOwZX+&8Ar>_1vG@D^Fsze9v5`PdEUxu96CXaKn$a%5Zv zc`;8m?>Gm|sYZ_D*+StQiGu3pp`uV0z7tS6W* z3%yDI3Neqx&5L9wszct4gs}NxaL-Z4{Ayq}U|A!`}X3hIfiJd3Y- z(ihwvHi0h*=yb9bpzQwcp%{c_DUvZzXZFw7y zOyRhH%fRH883yOmb)b-vt-UTnob7i=Fcb|=f^%>jv}f*h+gdeQ#(gz&3ejg91mSNi zG*L}vTa)KoeT~r1C;e1aW$EU{xv_a%J|qjTbGWb@J;cphJA7hTR-7x2t)SGV>jms` zpl$uD{~gdB&0010&0+N4r_GI5@h|K45nFtmTaUDgp)cS1Pe(ewNqJDz zEkzIV?ksVl`QsY8_{jT{hrf}bAyEOhJB1aT^`_p9PXIB7^;9F-arZR8=Lr4A*ZKj^ zG4p{^b9TJ==0jy=xbZAzX~r*2|McURIFpBSTmcl?SQ>YPh~QrRUf1M=&Di8GnwO1F z-;}$lQi`mFeBj)J83xc=VKB{*AKEUJ)Pka7V`t+>9<9HwNvE$6AGrJG_RIa(01BpZ84=!-_(*`#%W5I1$?s3Y~Pk-iAP_&62{ziUo0 zp`$-y*Z21m8H){^;wq$Rr+%VBWlK;+#2_|>aLn3q>nI)=)s=PLIO)7Kz=@_~Rn8AI zyBZRcvwh=n%>I9_-Xz(TCE2ozSHzcWWRt^7l2gbOVUwQ{)8cG61$MHEi1)bG+G>7p z&;hucX|c61HFJjp2gka~C3g=XL)v+=`fOh_rhs0_rMv;TJn&GdD7H zeA?#K!-qY7c7V|@g^i_V`Lm?~59}NtL41BUW!1p}Xl}R<6;6tmYn;ziD^tX2DN$0}sBMFiORYr;S55rBH5d_QoC(^1mrZd)JB6 zF^)huYSTFK(IXx6(KRUNP&NEVmHPhHiN2vqtTOXH-}P}g(mveYX6Q4*p1(Yzo2kZ~ z^YmK=@5J)6UO6J%n3iO8;t`jzAh@4y7+vlB09$vAQ9trU za5wcixMud2)G<>{UwEm(0s9Kg=gHIehHwn(Rs+7YY0TcZGJoP6t5k>PT%TlScs+ftB>ZQ9#Y?`SmqkDrb>rhFud8RFW)tnGKucWfAug&T?aI$ zKicu(=KGr8{PLKCVO&Qy4)seGGRv3ug4EkJ^;>_sVdgreFG#j-DCPAf^7`06A}slu zy4%#6)E_as39EV9N1rQn^aIqn=M6sX7}t%hYFfVJ?AlkDd871;ci++l10CbberCb* z`XAp48*F3t84`ZIpX_M7%@2cPHk8Xnd-~6oKPk&Y*4V{Tq(kV%-iH?vor`!4Smh^* zet1Kiw)wVZ6m9LHp`5mTSs$7ZIaA1P40UU5zY8XBV+1B~ z9RBH=d-pkGXxML=(@E1Ygwj0YX!iVxBYm{q{bGS!1=>CV72aFSN8TRU-F|a;x zz&!e)g2AUZ1%r&{!MDF+i1w`s$UO0O&H3d1XorrmX`qV(U`@8VPMNb$yB8!QqRqabIbhEoTmFLO30+^=R5R8v^)I4+ z$YXKMv?axcethJ*LGDqqCFcaB(dBEsGp6L(%7;ukm2hKiav_NzUBn*Xe|1t z!i>XH*Q!CM_A;cbyd7kyz!5Ckn&x-rStzT1heyvb=}+o0v2wv9c{K9vBV<6E_&Uk65m-q{e} zal#KVvhCtd#}|^yx^H|wTVg4ruVATbEH#mje!_|()}POWDZz{8<*)@%cQvP9$mCz` z6rt-MYru_ld1b+$oO_c0-PQ5GpDM@ufT zHlF(U#>X|NHgbC+pQ6g~y2!oQJ+Ya$k0w1|kTq}m+;(6b8VkosSU+X;s4vL-b1Mb+ z#5sDxR_*EOUPD4oS`Qp6Y$|7L#nc=VGlS?KV1*;Vvsq*PUACtq+H&#}2BOm4x=>ZM zF+?V+jJ2|hX!UUOU0N4pdZXCoe6k7)0)yOik?}~%@ic~H#Up(t;raN@Bjrra^RxCY zW3rcF#|(^K;-KV~PiQt~0YMtieYWO!@ZUG5kkVqZ@FybJAjhP?Nt>B5C!<2CVE4vO zj9y5Wi@@Z|po5S7{Ezf%UO8`w)0cl@VZ0iY?oD%r%<EVcYMEhZMQTF*@tn+R}<#$nLcl>Oih0t zk~3wtG5^bncxKk493`JG934mLVzK3orHm{%xBR$2l`S{{C_udE} zdVDcNm&TDpFm)HtYUXSqhnk(@X(();ybkPII5c*7tVVWfoEu$x`a9kNO4GjkJ3oaf zd9IP8tsmV$QOP-^Kh-yL(mV@5tiz&XV#Kv650-C!vI(oTBBmXlwH2Z-zSq>%oVY@< z^VxWri_2Vq@XuD`sh%6MwVn+6-Ap`x#+;~oG3Vfp_7(lfO_nBOy*Ve}HbT9DRMuM2 z&ee2lS#ZPLoKVj|I9mU-YA;aQ0JOoo=A$fX#};1S+(p|2JC4k2J$4N-ch*li$N)Q^ z1tZT67!BR}tJOv}*&M4V88`i5ryoeSFHQkyNSh3Mu&Z5dbgjpsu~)`Ag>D}00r-6o z0rS|3{anJFdb5;I|$c%@j zNQs*?5Xy-ar`+34Iup}t-+Maqv$a^S%tdpE_Qo3}+xm8Xs@pu`nC#MrlsOxgQ0mz$ zH{t?S&5~$Qt2+J7FAvJv8Le@_=1x2FA=vCGqdg7+S!0?Mfqpj%OZne$Ow z-At9Zb9c2mFOU~;8lAtyd-M7I8Gk;jj~(j>8OO#D=8q)uAQrFcT1>h|+xh;r?U!^i zfvgYb=xzcT>**o0^InSip~*GQW+V2*u13~UH3!}tG9JI_DN+3XG7Tm14cS;tTJZ}GLtVm8=ixLS zromi%wvZ+Ftu;q|`Bqy=jD{CIf_X4jqtNvD*~u6Aj^*YBx}7bcNHh|E^F@1v6P>$}qONj_R<(Dj?i0D--q0ok}=@&yk<@ks&K$t)_0 zP*xdEOSkNEl;!&BeF}50xEI+kkIMET0F>Xkk?;Ep$KMQ_|GkG(N-W|mj(Wq=E^>Nc zom(QtWlr+d9gC2B6|}1lt`)lCyb*I;kJHoEHfT3q-Gsgb7@x6DPFdtQ6f34uGZ-z+l_PMgr6>r zmtSx8T7S}oZLLMl4;%c@WdGb&-t32gTy^c*rSJ6H(Aio&iaOOH^nKvvSX}Mlm^tlw z397iobh!WtzftrFFD;IW=~MEmIO*70OB@p9mD;i9av_#mvC=-QfCTMj9?Y+N=Wga{ z%ESlqVbi|f19aqESNnhhy7B(@@Bf*9+&Q1-jg!tm)%d)cGniPyL?tNETa3}kXbI3L zi%2Iw^#m6a`M2e;9rS{h$E!AH0PXiUyXe}|#{1+=@dw!nG6ZdlQ?#GJ7AO}h=-w21 zii0oCl347pyJ*uL@T_<@d}{OOGa%`)67qY4nun3Xlbg)hYO<9xwT$n4#i!m}6yT>6 zP@$N=VrAzDLk#!HYhQ|e{f};+hJxl2dT*lW%Gcfv)%;ghz5o9ftPCxX4SY2YVYidV zOM;lnvvS2(#+EVX%Z`g%)=zOIDGoGZEY&kB2%Cr!Y?;G6gtnU3s!uOyf&AR z6~ZxBym_=%u=y)ILIT%+{u#?;mrV5aPkS;PqsE1{{d+Yw;3>OYxYr5#$h&wwI)P|h zBk=X{Y>L&Kp&uWDn2pgcpfeax?9+2j&^Df8mS0ZiTz?*UzBr2e35TbP34Bo8oaGPt zt`+93^gwPi7X2NpZnH^~ur3;r)$Wb2KE>CkNtL5=pivbk(~VDmVoI_(o`FO*hIjKx z7%+RYlT3~y2acbt%9ie$MGT!F6Jh5(uO1e2;{YwK#saoCUXwfjeDke4=E-$e5p9V{ z&*Lr{#FxfajnH{(W*1`6FISH9m0r5TJ6bpK1V!F?DXhmiF<)`fb|lkun@93k)m-}!NP&j%*Y@satL4Z)J& zxWX{MC|CR9YAq~S$h}(h*aZbD)#M_~1a?YYX$NIqjXo+FVzbD9}-c-Y3*FTN!vocXki_A6NE+H;$~ zWzCCl=M+>^A(j_o$%vVcKZw%Jr$AK_;Q zdUg2(8f%52i=3Q7J(Dem3YZ&$cRs|;&s0TARio9UV=l;mX=Q+}KGHkiyvXlOoAS`M zX{T%^JKEVtcmA)RQGM1F+a1z3r>i$P)&uqTe6HTFj?~k(yNgXd{qeeB&OV_N(^9dT ze}o%i+sL6PoZHSn(6yU)(`%9_KtA@tw>d$M`LG+zu6JzZ1-d=_+gQud#`2V)xCXWUVU?9jU9G8T?epr=agpZ*@9SsYJP#3 zvu+%O8VWndT^&cI9BpF(ix~5HLJ}Iwx&bXNldrmTaeTbrBb8=<=O6QioMW zcUw?cJErqwk>GKvtG7AJHJU&QkrZL(;_L(WbCO=fUxIdZBMUNla!QRLjj3 zeZw!}DejCOrr4)uT`ymQkyT&5@nzRK=kf8p+2k1RYTE1bzKQ7d2QTxZIqOQ7-OAIg zYhu04IK5dWo^clU+)Kn*pX-Z7zuffB>wbE!3;3z_FfO>9fNK6=>BCvTO}s$QpHnE_ zSyQZhRWrse0czK^bs}szfa47=VCs)EJH}yOY=62!KV6sZ9vH(Jx1~>?fhliTM+fPA z-ueu})ur^@0a(9_7{cVd!R2@vQ*jcY^4+us&&S4wx{+?5hw=ITcH^4|vd;%&$2o!U zt8-?WNjOFFjXg&YCEHfE4nAiB##A`+12gLJkm!v^+ulU1mWW~3yW$|%YcgW)#_#Mb zCf71c@8U;ZoPY2>%slwBM13P0o9dbaw>R#2@pL<<=3I6SK^xr`14LOp@2?D;w+o~i zFa~`^+`@vU=55exQ!LMaeKa?VRzu3~A=RSThXMA^XQdIHhWc(k^l^VXH*g|)u0mz9Y_nr+=sJDnvoKjse_Jb(MY{ts@F zzhuBJi{9YKxlA``7Mew`VM{7G4w=va7b72@v~mH9CNSp=P#y7Q2Q;oVIDu)$a54XP;psr+Zm!8Bvf zJA`TW=b-1ellN6It&0zwcMl!|`LCS1=A;3o?Vv$JnudH@y#UJQ4o0&)X~FFWruXOC zbWxvtBdeYI$8^l(iLEKIsX_~V^oXL8f6?sQmNJpPK|n`XV@}MRgSp`qReX-xaU#@5 zL;D(*BBp1@`w`y^=YaPKqn~xY{)aDb}X0>MLxLY|Qe@jXI27^@~5s z@5ZT4$x~K5j_z!~A~#QN=0_fdtC(gcM?0GmFQ?joR=refC(rsbpUkyb2SiRgWdw|_ zCIQgi9<*Q0OAQ++kAma?I;zRfzH#QU#Q9pjW?NI{WNV^+(`Q_&0b_T)LTGbMHlCcz z&B3yO4nn!@1)q>U`XCNW<)tfg`83jL=%3rPItdv^H^*7OwI5%R!#HVm{3jYkh*R2k zJhwhA?$w42WOcl?dF^7<3k)=5NQv@|Ndc+J!<>ntGcV89L~98?dVF*RpS-a*-r&Mt zEVNr+gc6(jZ$7HXrb9-t}Pq7njU?Fwyyc zZE57E`V-^%ig50H@y&SR&4s6*q4@{1k`hB>jhhY{HwVwZId4GFT?Z~E>!_SIR_cnY zjdtvPc#gn-NiSFQ_wzu?!jdkgIBQ5=vpzV-tHNxf68=n0*1YL!xfPQSj+j@O#n6s2UKLCJ))hsd`+^a+7xLm)O9;=wVJIglGCOgL=aIbjQO5Bz zDzy_1CMk%HkJW{x_{te;zB(2cYf8_iGUe;1xk*1O0Z7*t8(6iBS()@SObz4?JL&4x z2j$7CZ(G4+c9iUV*_%As%6Nny&gC7kd@=^x7}=Pu9Wi`c%Iqa6{Arg9p`{s_kD{&S zINw^+P=4eQfK0agVrzUQ{YxGZt(HZENvu0R(ZrEL%6O49ZJXnRJ~IX{?2gTAT~hGT zGm8?rn)e$Q*lixG6B`Z29j)2?8veBBqxSOZK%V`Mc~+Ki&#!; z(6k$#^Wypv&{+)=KySL01Cv9TesRENxE|>kTXRC+6{GJs^NOE>VbBjmp}ih|Z=TeZ zez`pyD4(6=fqz>!Q4}b@G~@}PK(rry=Sdz2f?T!jlB+Pg=3cQN$`3nvvxT>=Qx;$ERq}T+HOOwHOQl5dE_zeNE@;FuO0RInyPsZ=W5<-k7$U;spdqI zFbvy`>CqayM22e&*6VhvJ-r`o{U|bd@lU3)BJ}W>1Gz45(HeQ-xYG{0rwN6J9cjG$ zR;f3n)M>1;c5HmAb8W$Jeq*mV8yhjTwAjT9!FZL|ENw-Mtq-*(-h8P}5Lq74REe7D zMn$UXdILim1h!7CU|n*-hr#+{Ci79j-V3XMKjhPgIQqSfqio~7`cDROGRQhcep}jj z72VCX3SeQmN2d6^Cc0}vto+N`(ESVZkS`kG$(^632L8m-53Drow)KgF;T5-&$#HrPEP{5b`vwlAUyr&7QxJ zjn97Qfq*|PB356PvIaOAbX_9`{^vLP{W$-m{~B5IVtoCBUL~~e;=;kdO>kWo#yZQFlh=;lTK*l|qKN6ZS%Xf` zY71O>?6NPm82+V0-zbMJ4=8k~Lj6IUC!2Z`NQF>Rh0OY?30 z4TO2)cnh~Y*q1+e9JTRGnNJKDz5pI%Zk^e=H-}}*!Jg-I;fz_L`IPs@5RQ@yMIY=c z5kRb(=CeA~Y5L+Zfe=BC29KK2ZzyEC_aLqypy2ys4;JY!Q zl!UV!Lve=>I)m(lF_2DFeVY>6y_wBM-pik!yuCpab8pa^sPN|i2M3Isup~+uS=v$kzRBGQ~b+j3Hee5DvpWa7U z=Is3Ore%08U`{yPbYM!Eu_M8vBLd^pS}>#^#1bcGrt@r;Sh7}y$a@_3~ZP2CC^ zz5LOx7^2P~&H3fA)-7I(_~fjIJlTF09Z>nf9p5aY3DBnbVInho3iI;B^X!@KzlC-l+HzHRFUo!aS_$Qe|w zdMk3;&Y|TY0d2K$Y>@QfNaS_cZ!Dpk3vFg^eQVaan+NHdFGa*2n6@%bdQew;XJ}laCO&C>iCSrofVV zh(rIaOZuUXPv?vFg>k38jb{_J+(<^W^|d#>#Vy%5k|;hgmFD>RkFPBLgq1D*QS2uj z{aw(>+78V+aRX}~X3|c1hvneL$vAuWb@RWz%GS6?HqHZ8yQX#3f`X^!Cj+AkC0%0c z0^EEgAkXnsgSP4G(*OYjf_JeN|H+r+L7&m+Nbqz{&)Eqgf((uKq z0D`%(AU%JruIT3zA}J{c&2d-Mb{6za6g8#6F5lp&=L?n9mB%EIoSwZOzhho6{8h#2 z>&Kn9(VAIns`#ZSjch&%D306o<4F_8_!NNk;;Q*6(UYy;37sgt7keyhW#-kE>o$aF zs^*IXY{J-hnp=E@r^w?$uu~qhih<^hieg1fbg zwnEy;mOT2plS?w$DR+m{T*6lKLV*b-7v`Pwbogl}a@ZBHXzQmjP=Qx2Z z#i6O~x`04W!QQ!H1Orf>m)CT*j{?WBhV@-U`XL^#`j1IqP&la-yyqh&g zhii!QduVd@Eje?e&)Zf{a5E?-S*adR25fodS(VdW9P{%+(h#x(ipI{KQrdj6H`dUo zG;ED-RBkW7`A87oO{n7=hbENE*4gX*BQ%YKWDFM5$!R-3vLbo1s!Ys2PxX4bUf>^c z`L3RA_@zKR<$P(7mnEik``rkF`EC%3Y5l-Z;&dghro>UZtkYinBtRAWHXKy+xjWRh z_{|8LyK9K`@~K{Z^CFP?wAE_tQG-HnYv5PEDCt>uqvVUNt_OEP9ZuHFmo?4%U&dHgViv_7RbKg*>{>#N(m8IHnjfblfP>UtZX%c-0tBI50Lh!M)3 z!nExDZhXG-OUBq9%+)`B&$8@&(-{^xU%_2W-BXmh!trQ6Pj z6-;*J_P!dXHqC_cdtV@~Jw>6TYudtqF10zUj1JKcr8Yh6gsH#DnA28>#lZU=QQT>_ zzq%+VA%|%kog9>7zsc(3j$@b}X^d19g#Xg-8- zF$^1i@14bUGfGzEY*LH7H;5z*59`G9osnz$xv2@iQLUbJkm6!7(!zt!>)Y*2RH==0-%c!W%4tfpG-QNn}@Lw`|~NZLQU4u98O&6 z%bW|xt{~x{C|^8>cBvgd3lY^>?Y!Fb#;jE5l@>fMo-@Wl`)QFF*xBq{LsR)aSSfye zU_OvH|C)p*s;fnBMEh-ie~QH$&)UzHLTvrah>gp`v-3N(YY8+4=1mXs*?M(c^qYHD z(5zqkrNo7ZPWg;#eK-#$b7s9v_>r?u`APkZ`Di+c4#2K48Zj5npT$eAXj_|B+UiU> zHdCUQ@uQ=H!_o-Q^#g*OPL?+Don|=7@0>LoB1jV?xCqFcQaN+&LZX|MJnMwB zxEi=t2=(Ci?_JTOn)8AFpa1kXiP8!ud~tQ(IB7RX-iVUs&YK<*^|8TN;^(uv2_lKE zwmhb(q4B~pe*KcI(bYz(Yc_=cGF&48Tc{9S!}%-oMC9L3vA>rAEEn~A-EOp@+%*Tx$(`aAC}#E zvfRp_e>sssWICF6!L*|cCk^{AfBHdt7hqlt{qHDFEML-g0PlQa{U?xqF|P5{dasl=06AYyUEp9&{xS2(GJ!N zyFOZmqPekvtQB4t=h;y0U6^R|#SXoMdF167ym+6N@^20NM1g4TT|T0p+0^vaejki@ zq@&X|dG!-gHLNY^mm6=^)0|JsQgJIQIUe*_M;t5M#?0DrtjvS+#m!vdgD2?LE_s6G z_+5Yq2W~dzJRNz`wJFo1G>>$}o-^#}8KZLL5IM)1?gkGDM2bhXS2JiP0@HD94CK3^ z+_w<$H}kJPlru)hCFP`5IH%4R$r=?$R>3HiwL)rj|=QUKAu;(}QMB55q ze24GF=R4oPoIUcY)j^Aej0gE*xRzN@VLTn_j_(nxk>L!;9LnQW?TPj9z<1t(y>Ihh z6f>v+L>1-;ZgZ#FkuJu(b)0(hERGN96G+B+Hp!_dkA?_{(Jt3&N>g{Y`$wmrGJO3v zx9W+X9U8UiyQwZynj4VAkz_~WDV)EM(1rU{J{ zYgXHN?l_r4FRd4V3d7nEhGxt)0%+E3El zpZv7gYwv7)=LPkDvI}^w`A^!`@OU!$t6y5sFB$qMfA(n6hLB9X`i9#F&9HWzmf&PV zlHnSpK6B)bpijnY7v-R9sW$t!-q<(s`4n4O8rp>u-qmsAE@OXO#Y%KU#l?<@+Mq=weRK_(zZKT#|VAX2rpMMakXF_(SP$Hv6q`1K~2?f z4;FsUrUv2Rk2L-Fn5;IYnE&NO416sI;h7IkSmvv94dyv&0wA zjUl%4RKv*i1u|pNM%Sve70DJyn;BX=U1`c6#uTR1O~#tWns$m$zMo_WeCje4FGlFi zQ~NnF8?IY!#^OTjjU)dbs`!1VFAvclIWrn>n&!ayA?8VdU9K<2a)qpO!cBjCW$TEt ztgy}&>X%e$lXh>lpA*?CJ&uJBSbqC^=b@c>9WPy6odR%UW2t%_(zpACq)#=KHD$Z+ zFo|;V@Ha=qA)Dx=N51sU&DqgB`JFE>+Ea#ydcBa@w?Oq*34Iow7_i$*8P_7#%<^Vn zeYqv6lQXDWKeN6#dmnjzc<&(l`f47ax9K?N&3?)QLSvYJ=0HDoQ@6Q#DqY1e9uK20 zeCmQ&6iwH3<^fy1{|X0r?Dox8N3QtHksCbX{^vjW%S;S8?*7E5Ji|4Q=2VXUbsm)< zO}ic^kPqcaX)xux8M_(Y`*wK72rs^E1FRlA+BZHTK`%|nT}#-b#=4PvKIo%&a@xuP zJ!n$i9Iy`a72$kUu1(+pb)y*UB9e>`dg0q~TVV}kc~DY3WLq=Dqa)mYm%5ayyBB5=sIsA5i@u&~c z(KK%V8r+;h#--nT`)rJ>f03C3UNazEp3c5;IxdBel)mjfh-2}Wzx>sh@{4}}ifj_D z#pwI3c8_m+^a@m6}FweJ1k9X@qamYO1svl(jkk8h*V@C*XlK-c_nWTBk zk-(e4cT%FG4x67^XqeXxoD(!X2E9RWg2L}YT1atIcP9aPA)?dxc{CC~`A^(XKV!*@ zKX*Y%;iBRUx;N?-j{5Zdvz)gO^sNqMJA;FB-*$O^sA8^D3u|B@MnKKwS?PU;z8~?~+4q5Cc=h!O0 zoF~XmOO8=qPqesb3SAJ&$~vsgbST(#f}VZ{deK#A?6qkp=r#12n$;b7#SW%jUz7is zQu4V^xwWm-eM@>w7Csk!6r1;AYV*k!^VIZhNmP^e#hS0_gYxMrLMOcWr&e}sNY9WB zYgoUQrO!rnmlE>&Zjo-zRInvMI-9o2tu_=F*G7hD&^h_dh1q2f$N1Se3G@wO-!#}~ zQsmK?5_lQ+t0DUNi1!}-nI9(MFMoOwc9_`gCY9@qxpboWmnQjTJkY%8px>K$_qla_R5NHM_3nwCNZa*>)dL72~B&$X-qmfS~ni$YZx0cx=723 z)Xuk9hK@aU#Gki-CTbq!*?V@$XFT&}BfFy&?#Wdz#&Z`alT{@tY7b>ofvs|#O?c)! z17eeN>DIN!?5X1m;CI8^WXW@kRLh5bP}K!C)ap&HW2~@;Uam1Td~$64wT;|22d2@! zZ_D?yDC#2XVNI$sK~VjYS!4s+)fsvk1h!K|rt?6h(U(gs1;0 z(zc$hQ)H?cN7FU}+F`TZ`5O9ae_n@CU5ksn{?=sY(Y`p@qHkR(;7fW!9IiG0#Q7*R z=;MbUs#UnjLi5QhS&m(!eRzj|W1J6&&bd_S+&s$R3@Xv4AFt-RmI%k#kh6wfK+0Ww z(}7jZnKlRoObKO5&T6<5E zUH{6i_`z&kkfs1TyU}mm6j#h!%Y+aonr-tRiH_?Ac6KfF6Ay`vD_WkDoafF3j?uNn z!?ynFQrF7bRy2F^Vs|KJEf>@1c?G;fq}RXpr|XV6gf=9MQ$4Z8Eq+MmQ88X!cR2=% zx*X|i%rqg`XQ!dl5weahWkVBNCyGt}iKnd0+z<%a`4Ws68bICal>HBqmn#j@mam+! z#NxUB`w+vJUXBTZ-UJ=P7kbv>!ZXHeH^utL zg!-v!NfcK*($JAXxBtUqoXJk!+F#|HY5PO|H0+u(S!M#(2AgAAole=W46%s%_)&1lkX z;n%#Tjem}L?-BH`^BaDc6zfvQat=|){@RrrL3?Q{s`SRcprfoW40Z6XH@{jF`^@Y* z4<8&m=$cp^(r=reGJ2q@`pnL!)t;oo$XDl%!peeCBS5;u4PB}OMWr4W3topn>Dq2a z*y|VLP0O2M`G*Z}W`=>jCi@wybM9OmzHitWLC}^hs7)abj8=owuI$sM|tmqljz#SukSL4Tug@dk%Hl`2W?mT=7hlC zKQP=+eTc~#qD#udcRl(CIoOEkEv>G@!~!r(WL2??PWgWWV{N!)oCqBQ@H<~gHhyh^ zTPmQXFBUqj&-rZJX6VV%a*m?wq3vw_)=!=o1Z;fwA^1{{aOZg_;F|;0tzpj_;X~gC zv*;Ns_Vw5LfipDT*O|xNZnf&r#nx`1E9*!YAL#nkIf-MwLM2)A(3|f16oYe#<8|YL zfZmxKWlN!O#N2R0CxF%F;qH8PxkSN;EQ)RmWgMEg#f_4GIVGMvpVHw!Ti^K(NH^&` zO*_ZUKd=9{2H>mABzEvT+6VsX@ZL#Ft=9q|Cq^%Ggl_So)2%i7N|_(&`)dSqH@m;^ zvvks0-zvW0X=C{W0Eet2Z?0>}wl{CPeON)5iyt-L2P~|Q#l1zcTt7&2R~3jy+k888 zT5g=d@A|_&+vcd_!Q4clYc1O4+u%@Ea?-l1zH14&@_bHPX>HneYaUdWXA|0n2YmFy zI$7n@?IyE1U_-{uad+Ib*_fQJsZ>sT9w~_$mauZx)cbj>=CM-*s}7;OZ+w&8oQbd4 zysK4gx{?tZ;^+gfuL$))zOPSLCC2gJ|MP$Pn+yYKtW-2HzDb|V9qGR)RHeH_9kLN; z!M=&7PjeRmHK7046t{LhHCWM}gJh~ndw2FN*osy?Zbt1*^K3bJ1}4aC=uIxz7mFL@ z(TqWcn{ob~lNf!A>K9IE4XBG$H<3`*ewgfh13;lSd}7izC)qh;7Mk|D_|=wvXIK|$ zgaMr`x+8NK={6IF*(&2~Y0@Ka1Yqc+PE~lCt1n4D1mh}yXY(h%u9OH5&S0Jv+Y>Rz zZQn7CaBZ#k{MVz6i^23TC{GTz7&R_xr95n>5EA*T_=;7l!^^&TCS9PlKk5 zkB!~)RYhsaCnw=;5D2#TY~~8~kXK!jTb{`?9(gh<_DIN{B!>@6#b(}Qlz1f>cy%z^ zU-<9G**MHpgA~vncIB}t=c4_B*_-BA*iINObM0bzq7up}qUJ(=V@%JysA_thL)E)- z3c(oa>c{+xP;6gKf$bbfUme(N`VlabTGkdtj?}N;;OQcGO1t1jCfirD=$>z8MBetFFq0R@{Q!Pi&+I?mTzX*P)%8IE9$A+CFo2r@>YcSf05O;6X==WqT zp)S7b4t3q0cz>!(bdO? zrWIiDP5*Dc$o*;+eq(RStbuX_jeP0GZcIn)T5)ckZ}1yy59ddJYOJEQs@?EmL$d_u zcgS??&Q6{4D+=*DX8i*%b77xHYieVGuJOpDRN83!PXISZpj0P$usoSZ6Na1~=-q-u zlsBt&)JFtU!%@f=SY0D2C0~JFFs)M`FDzLIb%Qz|XSeGXu3wW*SYVVgNK5^P8Mf9>NeD|z9UR_vw8&8>@o6taE=X4r&bSxo6{GfqD=lR2{ zKGQ)AR`1&|D3mUbfxrUKeE(e-(h; zI9&VuK}u>{y0UqhAM3D;g%SGX)P?5_p4E?1Y6KcUU(!(9xeQ)6AHs12z_m8$n{S6( zOR~Klp~+ufgSBLB4K|)m_j>H6E}m2SV!tRv-;Jl)b%&N2002M$Nkl$aTNXeSjnH{#IpfkYxq5D+GT!+nOJ%4 zx`TD0Z;L7@?i{f)=OJ|S*B_s1xLj~lUc<=eUq}1GxZ@R#n^}JFkjiprCd5&l+jz<6 zixyRrClj8-wHwd7VG0W@V-sRA(wtp%x}lwNKrsv{A4NoG-$b2~qu{-Yuxuv-(B&#O znh*OB$MfBOT}8?U4F1Bq{2(9rZ-d?6+QrJKg(9)|Jy6E2$<*g#DGtdl;BkjFT? z>5r%DKI9dfzPUE*Y&m=`O1reK10_Qls?n&XO@ahUxdxq6vEbr z`375{uotuTJC`Vf-AT^Ia_C60DM!PewwFJb5YIDcF%CVH0l!)%Ja1bzzQke{WbQ9v zXbP+_jon~}*LcoGX4&NjGKwRdeECDwP5-4^f4}{o|K)%Eo1#}1&`k;{pdd@5K(zZu z0kpe_a1hQp4`t}UU&c%@;@L+S5gnQAn%HPiN0;Smg|xJOZ*naz(x$+|`lL_k`U0C= zZ0{T7J3Y;=81RY3H+SN6bHi8-f;Zz9`w)UDtOa48Qu>XbKj_m#`d+YWM_n(zjD?qf z#`>oVhrvT?@l^LE0~Kxe>12}&Au)Z!>7QUQ67m}Z(32L&nWS?Z!zRRI2jUCl!my`BGSv}nA5j5lw^=}WrJnow3WY{SX_%8CiaJ-1s;yU zQPl0X+|}*nI5Q&YqxF2~h!)2+^WgYTSM;}Cjn)NE5pC*-7kzHRn;QWm&EDdcru^d( zvwi)~2k45k`^Lc)+Hnxm@lw291)ScW{Z_wZ|1#tFZ|pppgr zWX!cPJd=_Bd`K7uzMxUMQKW&N^`RO9*t@Wq&`z?@gGJ`#X+TuQBMk(MtHn}`#t(kH z$t_;(prbPKzcyblSK?{NcS3J7KG_9@zvjm|@$52AW>dtD&n~?yq4{86A#8`mI^e~a zTNIdY4Q$Ri{@uysh=l%b-0aftxq8`&JLh2*SvJ+uiEcg`*M(KBJ;8F}m@My&r#>R9 zzL6PMoe@Zpd}#t+9V90XF-ewpGT(Y;OyoPY4BSOWA(F>jT%6jgUu}VRYjM7akwf}m zo*YMQ!Ub17U2U`R^BCzvG0TN%tm&z)qOb9Kv#tr_KrT0_hn>ryIWLuMx3SQ#a;d)s z3SS#g^Id=SN80lRjC3?hAe+AsJx|7l6b1L$^yrqakaf8n>u=5ZktzLAmY0wouvaG==*STA;l4G7{w^!! zE5HFeVq{yJAC@A>WQ(0g_>DmhI+pTbyt+7?m(R?qhn$y=`DM@CcPv#vJ5D1gvvH!Y zy~{TZINR^T4(;lX42}X=7mgVVjc*oR+<~Qbj^L$B#XFGW!D<^EXY&e944SM zwxPArcWlaMK=D&|=TpC2rwim1yQY9_TtfN~s`H)gMw_Dm>MYOYKR=KmUxmi8OeTi^ z;=#yY6Xcqy&oO|qy`+TueCT>o4A8py(|8C5QF?kdhq-!R|6jg&Ed(;%ltah0vo}Hn zs1M)V64U-2)$zG^->#W-4UpqDN6XGfNIxBE=F)fCvBlC;@Xd90tw|m)E|9eG;}ptV zJQs9BT-3TTx%$Jpb)OC6IAPa{#sHe~uHEIz*c_Y58q2S~@6bq_-suB-lfbLKH2`+| z26(;BfU1UFqnML>Ym~L-HNWGoe}3?pQx*gycAARzlYv-R+)Z|S`m9d-^wQV1y#|K3 zudd%U+2%;0ok^vP8~1p6~7EPySS0Km|o0p%wx>1epvUx0%V4asW{WORD?fgXnIRVWH{biZw6MJzoAW9z@|GUBZe44LXV;^<>SO*%ImT=f zL^ms2KTgf_6;j@u^X6kv$mRRx$!}K%ogl&l;WhQf_eXB@)tfOUO9a2Q?G3T_-fuZu zUTvF811>VhiHA>jVmochKVp#;51Ruy?S?Ci=D$cND59({Z$fu74(hX&d+@5`q0(bx zOw{cIoc{CtP~OcaMXe=kLEi*$-uKTeVPcAH){RZ=zSS&)=hn;BNB5l|uCDM6TosB# z?C`;W?)=;O!V%l-d_u<;8{kws@cfX`gq2VG)+r*M?)6EZefW#BJwy1H z&DiF*ZXq^bYl?Z;*z)(da}E!*RZpOqUE83ubs7nL zH;4c5zLxpnwLEZyXUF@KZt3@?EGK?kdn41_(w54OpT}2kbOvRP(WStuWn;&0ZRq>2 zd}-;e6aO-Iie_>lfcp<>+QZE#^+0qZaP#O++cDAn4W-B~otfAbpK;4oXU+vC6$T&{sxtt*O z##0HsU_E8vlDS~1Plf5^%02l~(=3i{#jk<$Py^qIk%=51@17{;vFD&|~q_3W>Y|jTrc*XgA z^-VQ{;HvJpD6X=o0737w-G3U<9nIC>ZQ{9dG>xu%$YsQN02b~Zif`;B=`x~N*N zRJfvUE!2nWcngx>xnZF!*6iqq4fF5#2_SCZFjWuBJUlG@2n^24zoLTqVtYB3Po9V;GPrI4Sh{I9@yBMO8VCCiY!x3>d&`)6xUev3m>`?Q6I8;!s@dz z=!-^l)xLVlDI`%m{Zgj34s9+pM%j~vjJ2p7^0qok^CiJpm2k(2OC)*a!V6St8PAZ8fgoPc5~odtcfbmE9m8$dcr?HY%7vKof}peFPEK@ z6F{gR2IMHDwXT204q@WEXU$qzTzpqdOa#O zki{vH@&z}H$wyBZ_8o_?#F4h((+j#CmUW}?#g1)WO=Ilte0ex9gHp(Oj1TcuC`srN z)872jKD$$*lfC(3TQglBx)!(As-r%*e&`}OXcf-}O=IdBrMc=D^u!;>VLmN8_$UeO zSzD~T;o{g>x5nYQ5;;uFXXMfW%WvX3HyZ=7Kfnt>bfxvl7Fgt`KhHg1gtA)l#QcCc zLy(BQpMLu38fuQ{ABg7s?nH%4-$%B@N37J(;Z(1AV=+@{3~smr7%ak*XV01&fNLe} z?6ddWJ@-b{C%4uw*HrtCpMJ#?%PvQKw6SQ-%h-)-`Qp>6rE1k*8vVjv2Ynl`j@! zc=9o^>Cw4g$~@8#$CvaJ(DNGKJn`7I2Mqn9X1aU{_vB@Dexsp&Uy?c9?CKmiwo$2Q z81Hu-x^aU}eiu7JVz~K#lxQ*qQd4&4uCW)3xB=&@9IZDGRiy2k&8 zK=!?76hl4notyov$Jr=N`bW$|HRz-}IuMxkub|Rv6jQpA%!_sP^8bQGetsK+ za`e46$=qSOCN!A1gZyR>pvVO04quHSZ#n8ic@~uIJMZWQ^724G=Gn16*o>!VGC&(^ zBz4%>srjyl9rOIqrhK+;a1di_X7%J3biB?(*(c7n)QKHvWZFc4eD84)?B*-7UGHP0 z_2{l0)aaYX)e)AB%S&S&(08%DenalX1gSiIM&Wd2WPPY9zZUy~q9euJh^ZO@L?RsDlk-s4alKCK%!;5RJTEl>Jn9aR(P*Y>X*{!wmQ zF2zvTlDa%Jb?NQ~G)G{DgJmIghz7UA%C%$bw!G^XU_WT2u%z*w7~j<`S*(-T1i1qU;XEiPCduDYZdb37*+vi zS9=5j2!hY2d=ci>4s9?H=IL#nORRg()!Hz3Cb03x4)x~ygEYW>*9F=a2Nvesm4#sK z<}h07#b+(iFZ0kEr!9IFku^O7lh1FlhwHV+AHG)v*s*v&SG0M64QYmsobAR=42V7& zGxFow@Q1%PVV<$n*pa`!DehXyL#Iq7QRA*T!7+Uww-_)}u8ixSu9x=vP|LAL+`!C- z*t|FQ26$uK98uU>^`^f0@P&GQ+am7FB1a>M`jecr8067-m2V+`NZgEPKCEuVActTJ zq%P7m)U~nW%QZKi&GSimZtF(BR!}py%~0cpX0X{dY4nRnIom3D?lCJ`IX7>P=eItw5M~!T&&IBh%AbADhsFYyws=(!*wLHc{{6rBb1PbcqZ4cz?I^Tw z@B@>{>jM1~dJaaLz7wFE6CAy;0ufk~4FBHThpQvq#00DDQvXurjI7P+D5X)ndp?dc>n z!_oq&-#4b4s1o&qoZyo;naZ&|bE}_T7`!;MBNhbs&&NZ_K&A~6qb)qN!BChUu6YhX2=XLN(9edL8i`ZR^~VRqAAZ=@ z7~6ap>}V$!pFFT>vTk-jD`+sCfwwJQI@&clAEr3T8Y`M$Y{+4pAD=$`wR3cB(g4Q}UB_*29=JprXi+oomTx^AH_wrKC^#Or=CRPBAnq7)-K2%Eu zwAendC)*tVj1AI!nB0p;t8My`GaRZD?TwdwLhhhhC&u&S-?8$trXA<=xBT1%Q8ldo zz`CJEQzTuQj{IAr6#L1q>esr$zsAzFweFk&ar6WHC|>w#ZAC!Kyy2@k_iS6f zSS7=}=yazsfh{%{e*GgL<&kR)jJp6@VZ9!HA%2Z{z#1&in+_`y6(KP9z?0^0!HovK zH!M8I=+B*V^tbZ`^R+iCTGBs`iWx$iBs)6jO*Rv0J-rLCbVgRrU%My-v%*~5xeMK& z)}`!}8?Z7a=GpgX9?BJMp1s;Rp7h<&^FnjD5`7J$f1;KQK-nFCxkJ)*q?n!#nh)aQ zi@hR1dm&H49IEJc024$=fzw-|(uY3s#M&S(GQyVZfKq zjpT%aCI(}oUJ|?+n)dd^TO49TGrt^EYo3TN?X+Yh7eyg)aZa(iPk%h=ZFHmDc}4v9 zp;A7Ycd>~zb1HRydbVf4G-xbXcFWalEidHc@~B*KBur0v9_#L+d~i^jZ}jdw(ajq3 zZfqG*nQ=!XOdHqUnhW8^)&XgaQu=~X$9cYQZA*4SFnT#PkF@se^_LS_Sj4Rn;ZL@D zi2ocBpV)F!yK`>lts(o4)8*Gb^7AX8*qYu{|EtY6EV*k0?YOPl9O{kWb~MAq{m z+1ZZB+T!&aA_ywS?qs)43M&A7Z1{+6E1LK;(5tJyZ1t&yBkTEueXZbfa~#xU3D4 zpgOl{%Z~!T8&~MZH_;!6zMgIh_-0+ZX)nLf?_6*s^$Se}&kxY^K_zvxrzMVxTstg& zpS>`mDbJ=%AlUABL~Zyg7*{R*9Iobil`Co$xiHG<{9oP=%Y#l^L!HAo@@77a!)zQg zrXSOLW4xPicRhEREmq~k^kEzwYe1eKisy?tM`&|;I{IYGF;FY_145Rd9)!pWwB$fH z-1I!J3b8WcpSmTgVupqv5*orTI2{Q6^bY$n*>M@zw%Ziv8 zg!Xvo^*h#ZDhF#1pAF-^pYy2vZcvMdZDhW0mcz@9Vxet9vIQt(pwHD5pxvZCn~tCB zlDrazxVUW|UT(}TIzHUW@WU6Afhiv3#gV`30o4%1f3Z||4EGlYj`gq_Bl!q)6VJiZ zp1N!sOKcp*xk3L3%F#40-LyMB(VaGa3CPA*drX)5Z#RDApQ3NnC2WK zKM3)IiJ#5ur#L!N{w3H(KKIydZLvSQu+9d@#$vv>j^uZ1k>BbqrTr_;MNf&XH3 z+s|<5l{1OD!mb;WcIu;Z=dM1KBP~$iF+%}uK0`}0d06_YE#|rRDGkW?OTFhvrr8Y{ znF7+R@kuvk>VeqyfgwE2p||(6dThP;6EA*Xb9se^=e+zdOe5A9m5NMbq0Z$?-{;?K z>U(am`~DIFUF%v@Qj%v@I!-jv1+o|c>|-Z1!mG8Z?Ub|8{75z3ncZRn(?jMPOU z0tYv3L{eS?Sk5$W)_YSy46_q}eC3DlDyOQ+dM9l4#Q*cYiMca2MTdPaT7%M%3$}1< zxhQ{5Vgfu1xlel(cmdN!*Fl()U3gw#A^Yb)|BkMH7c9BpsJt}W(93i2Z_0(G6Y|@X zyYcWUB{IG=X}~gnH-4T3^Na-bp}>dXIGTlFy`$gg@DnzNAsW8rD`JM!W<2QaLRzn7 zO7eInh8zR1qmgDE_){cCdeXCqNC5^wx7%fE0v2+CW%txjL zR={nm1xMAD*j5dyp^pZs2{tX*>0*|U^8vs1XFby{_Cu-^Qa=FQ5l5N@>e1X)tU@Ee zYaYLxgO7}h*dPARzMHl7uo%y*4YApUl6v@HXxFw}{aVe^>Pi<|0{BYC z3r3slwfW9$=FK`2Ae-jQHoIa&#d)JVdL>)ifB&Dw^_PEg!uqRo_U{Y>bn}WOKdDUi zeQOwMC&qB`L3nCYr<_L(M#>xQ#im0%t$)`fYk^nM5{DLGLx*QeaeS-$gL_grMw^_0 z<$`W)Z0N9$4eMcm`m-AT%O8Au{-^nEfW?96AMnhlDfQ&PTL})&3Svt2^kMec zTx0f|0N(^#KO%U<@2!jC=ofhtj!o_6vIF5*nzNV0&&HEhA35W?V{^0h1Dw`}8s2cy$k zIP^&0Zhp!K!x)CtIC>*wN*o(IWSl3?U(_2<*Ko(fKUa1$&J5zO$2R;Bo3638(eL8Y zfz>DWyE#t#gv#G3?pvAS!M_WyY~7qM?;p6PG?u088eDGS`eLDuouiI@ZaU7JAC;uK zCL~M+&zVZjV&C#{R?%VNj z%8_!)$VR)-xp5OuKh$Y^G1KuhxiTfTTX&6D9nGfa(REp5ct7$Ynt8{X0k;Mt%(DdO z3`*6qM11_NcRP0|=lR6(8rBWwkKDjZ$E8RA^sSs=&-D$`V&+KOv_~*MG6XIVBXKTM zV+(99CZ~+GU@UCF8Y9#LR~HVlmjwji>5H4P?; zBIgG;7jJn%Q`?S@aeAInLBH}3{GI-Z2XC}3UokguyC!n*%JJ+f1@nAE*!ZcN)*S6A z&ZA@gKR{^6`jGDvkbQ7#+qm>ITlO(*8?x(9V-H?v8E**mZBN>Zs786{FWmx9aOT)|L(}tZzQD_&oxj&6 z3QY7ZT(wFfdah-4wi4?k+%{XTOP;eFulhhXKW_|`*gArWu-fhV+MAKK$?xUMpWrbc zPPqPJjrHUE(EYyj;#lp5h*-Me&iJrH0QUOn66>6J92+-%O46O{lIIJ@I0@_W&NS9h z#c4d#ed|MOwN!9_tiAZCoa@)GUSB(JvAFwc2 z1RIzBd>DQo>RPLgF+POOSa%LZr<-Y*@5UOnwVP_e!9$vSSz=pzjWGk~iD>5oSi5Ya z-yB%Zi@LSjYb#`2xSCUr=HBz~hMpy;G@E&$?e8_`!-)7iTiuMu4SjY!>Yp4c|Gfu= znPDv&tDzeU@rNBC{$L8eHAUpi9rFW?w?O*jnBYmY;zSzZv~0STRnyUw^IFB4fk&?l zXdLXmsegU|-r8nO|AY6P-lSB=5w_7fi!YQ76vuVrB+r|Y&Q6Qmo9^8><=B|xkM-MJ z5=t+;ezJCXsNZ!yd6*2|l0$EN_N{J?P7a4JQI7un%|HImdu9lJBUzst6XBen;QB)d z{pAKUo=E8)@{FH3|CMjTgl{C)k9=@E4Ojyp@zUvixX?UmL-+lrwphk6|6$UXI>`{{D`Ks&YLp+wRV-}S zvZnKc`;FC~e_4-46@?FK`_~Az98=#18bjxE`T9v6I#%8#gwY(|dSzdK*)2ij4rJ7t zYxpq#n+4fSpO$jg3+AhVDG4uT(MQ64Y(vY}#L-@!p^lEcml4L`=DmU6AauTR{@}!Z z!pU~CGhyq}nj8+93d1~_YX?+20IFjhd){>%u@pIrX4uVoQ`EY_9{c8Sv9RtjPo|^# zJazA!-3%Irkka%aIenVA0wev;|L6b4LpUOy6a6`yIw*h1kEe{=%C1Dz~0a~He%<;0+z zEv6@^Kdi>SK{T&bJGHSP|t?C%F}oLRm))5@_NTQs~&fSgjm7@3FGDa z0&NatZ1y0xpyWTDhm)?g3&Wewy|C#VH3W@!!Qmc3cfHZN)~57qts-*kO`u~>Pr z_$xPqxiUzng^v@i9}M(KYu~8!Vkd_ToDIyoASkEeIXA6O#I(PEe%?c!9T z8-3_{XTbrxeFFd1>-Ygk!0^$NLbjTBEBiYX#d6!ZGtNViIR3%)}TEd%jVU znca6h`|*`mmV@J7>-mBGVzh2J2^vl~chpb#T~MIkJS2aEK)Ae#l~?87w5n6A%9?|Z zZQS-3M;F=DQfp=>M&sU%G5z{OT5}8^%!63QqMzBwDLT^6jQRU37_Qf>k6nnHUwkRA z>M6WMxOkWAbJ8nO*PLwOucFHPL9LjMp<`a2(DA3eu-7-q{a+WJ+Z^*m&7VHH5nDCo zxLY$;z~(_W9Tnu&_*lQ@PRqJpeQ1|HFLAzk;d+)4^=X@IY$sd8@f%56TZf<9P{oN4 zKC!=Z>~c1|_^vAe<_8dAn-hGrzAVKqZl6#3!_!?iaqLzT61B;L{A3KpM1Ub$ZM8r^vReb#QK%H;*{RQZx8wNJzM5Q49F?6+n{iyUF{w(I|#aje9Gr)e&gL7 z)}S#O8+bEme&%y^ZhZRX+5p9-5x#jag3LxR0p`Kj(; zYtOvo^UJ$Bj+^}(GY_46nuEbIw8(kGz-xJfK(B8}-+1WJ7niXq=9sxQU%Xv5mWzoR zHyHrx{M37G>JNJH#AVB>avLO$Sv>jIg_h4qS6|Ri&N^Pbh;*@n*ShIg;&W?;Du{^Q z*yQ`#%5**RY~-tW$8Y>@OuL4ZfKK^9E_vaxXWK0!9K+{|y0y$Kzb(PJpHzyoF;C_JGM}~85*JtOKcK*e0bkWX@v`gtB zamYRE?L)qnA}>!L*EEO)13Nwx(}GXW@8+pkt5vbMN;Vf9t0Ts;8w&UuA~0LU->IR?EE+W+UfuOZ~qm$-Z1j!$FG0+ zAN=g#fB3h*{^fu9kN@#6|7J}wS*&qgx#@$;_}{paN8;-DU7_XwqWa5U!QxJtTKQ$| zY18=i`Q_G@xhWs!#`(k6buy~o_!4&}y#_kJ#NJIH1$~N=(qw7cZ?3~q&#iMa&E|vG zd|>{{-2bC5$f1NE{MSI%OnA6cjf$(f?wOOu9zr~3n zC=g6H;|2Tws5;LlIkN1&7Cg}K7_&1w+~rbSGRO!<=#&0=g(74^UlimgN|Zz{*HG)m z!!&^Ih8gIbbKfgqv%4yvx#oAxdztwvD=Xs$d*76l#&Hah`1dp4wPA8Xd$d+o$C;#F z*NScANICth&bK0)8ngv+-#Ts(@6;zNu?mX-#7ClZz!Y4XOAo+hld>cTAFQ;i-6lD@ zkq2e!Gj{nq9qLYEea>HLs7*No#a&yIDf1kHTclQ>0U`1(_Q~7AKKfmz8v)99vp(?3Dzt=FDo)ik8;=W=$c<5ic^tq;Fe*MQ&nvlSf}R zU(&7uX|&BBHEq$@HYoS8qB+(%O$GEBgX8f9|Lw4VA9Qe$$!lZxD+xXCj$da#f3hA^+`LXKt$r;7Pr7_~tO$Otg|x!{;E$i9(RoAt^2Y>dqc#ev)0I ze<0cBPFZ@9;n%vOw_gx4wuw(ZbQm}LSiA@Z{ZXzUSnJL#snq3S2>$Ud#GbKRR?$vV z2Wv3TggJF5dGE?}(oS%Rja{5%bkeOKm8XIFNnDw|K=SqkfPx+9fu>(IvLF@w*2%b2&Y7 zZzF_@U*FBc9O2OJg)OrlvzgS|Vp<12N_rbK)pkGR4Dy>nfL56lbHgDERW6u(F4)5w7oDe|5rjB(23@aTVpa=KrlQKM=U|<7X z6xwaszZnF`LtVI%j11+GkNLWJ!F-@?2q}n9eYEQrCASMaCIf=6CD$id7s#2HRx%F` zu<75i$G*{9JoKY{Y&D}}zBOPY$n)L!p*y_sUo}DKOa5XPlbfRDQ7+YiDt_tZq^u&& z3o_BK^~)x)eb!>+ja;1*Xu24T@j)0zU?Uv*_^9JNZJmW=u-cSBWT+@Q3LWpP1IyHb zaAH8LnL15h8&ah@EP(K!y`mFErFO7+e7 zvz4>ZvCX-aI*qx#V%95`%{R(oDcA6;{ zAF0wci=~4oTQpxR=H@PgNeN+rtK)jyl0pP&$Cfy$h&i*y%CSu!&|U}9RGv`wZq{wk zPJK$boQ{qx+u@(;jxE`3EJviUYV4F@LTQmQxJ^Ek1K_8FDW$z-DZ(_i^$4l$RFCW0M zc=3h2dAP<(WoQAlS7bbE>p~IceG@R_)R;F;7!vMI41M z+0>PqFX(iU;xZpzb$*V5=0y>r|6RvtJL;K0BP_qt~xpEdSwfY;kP^A?%*Ds?s&ghr3;qkHCRDWwyx{(0;CLW@&F--y+oB`7u4m5#A* ziIIz89si(de#Yn04_@?Uo0uH~%FQb5V)2cf80gA$NILc`4XU?GmmJBI#%?jgGVvb# zIx|)U((htjkIKkDcr+1%qq(a@pq@F%z+U~DZ=Hk1 z9tgC(;|qTBo0ToB;unq4z<&Eq z5ED!B(vA%AAt`;;X;`k{1I)pq6JC3RUF0xPFrUpr?JyrYhN9~Y{v7`TnLqPQYv+SW z5MZ=6U_Cm;55{cJL}ZF7PH1SS*w(eWJ+`UYuKJXrxz0VA6-{=6QOxEdQO*~~^{Zql zWBCo7jER!#Q;QQ@^N&hOBHnCx8bw40*Y=~2z*&_!{ ztcR|-R3D?y-JLizph zt%L@f`kYr%5+sc<&{acY#aNW zZz14y>D6!lXBHx%4d=pq1O+WZ5~qC1b!{w1 zqs5~l-oTJQkNQ?0^=XG>9k90J#3{r!`OCK%#J1`45l>tBU?VT-nkFb#2Oou~#SP}+ z{6B+IX}LbYLMgw*E{blmri)zN#3_ED60l|*&Zfn z6_-!xV}D!-hdDpM;EzQ*k?LE)<-;u>{(M7Ulb!9F=(gR1&J6n4gg?y&`FE z?w2_lG1;;9?3`nlaJ_*pX_2W8bGXF^dYS`Z==hS5k)xkh-YGdB>*jn&a(JfehTQoo zCVS*Ix7#Lk{C~+~n|i^6M6jSUUC0o0`QnMUJ{g}Me}Gwe zN`)@_l?fJY%opla2RlQGJC|#rkriLgI8>2D3bTD|O$=3;=7a|!@<_Wj-4<3kIE&I_e*k8hzt!qjOR-eA!d z)6z-VpyeKYD;#}CS-INizxDVOw*8=980w(i56_JGh-W{?zUh~ip5nk>d6b(g7X3a3 z9ra9M^cC}sVep8}mh=EZK)t{59)Dv*pW8aUL#2}06l+^Ksf{%pmDL+%?IS%RF@zi5 z9V6w?KKkq)nlw<1+a?OmGrXsJZbYEJb_m=S{mkpsJ0>a{J0NtgpgGBvH)hzTean(; z&pRWRhLAv+&&A=(O-~p@>ItBH+-#GVai9wR@}+oqsiSdbj@x6Kez6~}(J<|{*|6#A zukQRP@j6zCK|=FNy-8x{Rv-HWcdm3C0m;ng5bdq`ReI&Ca}3damC-qK z89q^daHq}Vc9@)xq;ub~O+Dk^<}!Jd4^VoNJ98m=o$JuqxYh~C=vXcT)ar9_(3tFe z>R1WSImh}R(0h%SiyyD3|Ka;5hc8c058pgHJv_L5`|#rI)#10_eSf$>?|09hqX(5N zii_mr){Vn!WS^eCWP$ufKWKM@g=!ydaQ^BQ&@LVBAVbDJRLu? zayB^yseH~Evmvu&BAOKafxPAfXi&9$A*FheC0MrO__iFa{m4X?=j2MfxbaPx;dwg$BU3QPm!6a7r7cT9aJ5BYy*rZ#Flh~)_>f5%$7Jl>BvYX;y zS}qWik~BkJ^pmr;ImZ1KfC5{ukVhTb4g*zW!x#qoT>CK|w!?co?{+M=jZAt+=QyaS zXpP5VPp4h{b1o6Fb7TU=wdK0^54|l&s$ZWYPjY()*ywdE86SP!wG;D^anqV~C@Z|g zQdUJ|LQ4olKg@CEpLUbisAgOG zkmnrG7)OzKWiz+MY|gkKt8R0l<6#ui%s3T`^!mqmP90wUw4h`}rt_T(#vbqN@q+~C zay3flTs~<)8->Q;^yG76v>%<^2M5Y7-r}Te9HXb4TI3j0?C`E|b)*X%{FD9p2AOAx4Y+w?Py1db|SY`9Zr{wY1Tvt+uIh9Hw zmU9lgSV2y_ENG#J< zmJk_75SAib4YuhT%d^Yi49o-344wV76@gid);k(N1jpEPjwsz?DTBR`gHA`uK3;`a zi_@iY`pm$MMepH&oB+lGY7-Rk*sm;YQz(v?+h^;|1Vl{|9Q=Oe8Ns&tRzT=!E-T1KZEhOx!B-H$UOp7 z(C$?^WhNq&l;9H19ltW*5BxY6sggpheAqHgJz{8(DF{sKVr)4+bFdHv(QbJ?MPW~RHKzS6yTMVTeit@;Qw+%^o z*!6w@S%x6S!-Y$Q>=^M7|-t~l-oQ*p2K#zSq{qUytP#6&wPNK zMncMbgN~OtimQ6$bu6m`0i%s>V-<&d5=p^a$$t2)vr+HFRlnJQO^ul@K=7M+mCx6S zqD00fJ7o|963t_CTsh-F_WYY;w!zmJwb-Vo-ORsla%?sQK$2eN2Cd+S*9eoGA*c>;W zg-YMCarA4QdE2}RU9I^+b<(ppKZ`ZIsA()v^w`0hsAv?M{*?_m;_(Cql9^YRoE;~1 z=vs2OC;$LJ07*naR4WP4;W+SQzBut7Bki6@=OYE@2io+RxTHc(@H*F)VKoq5Pvh{boD+##o@_CFbT_ zde)diV0;-OHfoR#t%k6%vx&w+KF4s%{f}J7e{t8(oHNZyKYMk{32lB@V?`xuqnS=+ zP*$CoSKb(x<_1ZeM*Z`=fOF3Hq|;XMl?2d18&=yX=w{!7VH33GZKQPs&yuw4@+;QeOuZ#APbsPuM@(QSFF`=ri6dCQ#H&j*C1?d)O z%r_5h>EnX6P$U<6w(XDb+Ms{6vpiOvid5ik@k7zPMwisZ1o#1Rkw>~5%HDQu^IVc` zzAB9`(9^NNHt18A5m|>UJLI64UU5ugi%lMzI97ap`t_H z;Rbhn-)2$zvriszy2+ir+%f)#ufOGvx3}Dx&0;%ug+Jjga}P}?*H}P@<_s*)ST_4I9q6 z+x!i8lRvt98ymrUadx=L;%2^m)uY;ZFEFw{9JN`tiqyd)&4D^G`oI{Ose84xiq=L*I?k zN~anU&tcwrUID&|1=(uZlr2@5-z@O*RHTIz0@x2Qu0T#68Nb<-GmCm2du-=!LUN+~ zhv$lS5lY(WjIt+ZyyUaJ)bRM-(4wVt6|W0sOi5=aT;+x}IroPq4OGM`%nwicFb{Dv zwieDk22=)K=j)lXDX%$Vy3CyMmC7>5oTbeHpfp&+oFAM!YgZ1P_A3E>o}YTBsJ>hh zXy>Q;JS063>%a0a{ zTGHcnl%XC~`O*iwBAw*+(_TJGqoKu3o}tb+lwBnBO2dDQi?2Bjy?x^VT=M~t3-vBO zN;l;CDLYo1zG-}*t82A|EG*19=4f|K%MT+6*~4t?=<`}8QNplKTRZ?T2J#uLlFDhD zd80n_EgZt9kAthpg07=9b$4(epNUY)YW5quz#sdXGZ4*eK-rddS({0p~aSL zrms9OXOgGA`iXZzXov9)QyHj7(=ET_P1nEenoX{Y{@-MY?lqQ?+(NrriZ5xS9~ zt}Cw9=`8DTS$q?n3+R43z`1edPz%Qe*GS}p7S-WZb_?!EVTj{hq$zfN%U_cQo8ujM z>)W|um0g0x>#=^%DYo`o5f<>B`KLOCn|pkuo4nevQ|q-vbCuIZ;i?Pq#L&DvVnMlj zI)By~LrmNFS>O7ju!viETCq8+Y!tMKqAnEr!LAzmQ@3*P-HaKq8S^j=uz6Re?T#-M z!G^5nBt`I&uVapLuQW8yc@^@_109ZF|9mdL60=5S<%0~gt#TOTQ)hX&@=0ZO%J%wq z|EW?JLrT+7fVK(34IxsiuAs3;kKXDv3$U{gBDlCzXwtF|GzxQoZODZ)07Gnrswbl` zw;)r46)D}rt>Y^qdnAlu;&5_cPd6L&#pXh`iw=X^J}AJ@1TUF%VpE28W~*(M(IQ^^ z;2TnxSQkb((7BKJCWdZ0(Nk1dnLsmvh)_SGNdyX zjQQGoI%j~;D@K}db)tH!K?K+gPq7kmu{W+V4Z_{GA1Nf(9`jOe8w>K3c)5`I z0}FXV*ki*uly~DzjyxHi- z9{t_ha=d1hZsLL6EuS*RvU~YIJjw=-V=P?e)50yQNa3E6y7Z?5N*j_-`DlZr((13_ zlO}nz8Ml$h0j33?6@OdBU~Cz)`p@~X30z~`lB1!@WwRX}s>vMv$=2N&?iB%#Y%?Z@ zrF?-|yZjr!!H^%=gF~jlOnw8`y8I$`bej^{2Gx;{7QMiHEHRGUq&EvSVV>u&gj|FIpq9nWYdPLYWLIgLp!%Iz+9 zR!n|iP8q~@P*P^t;1Qd=()yCwoz6?p?Bb(XzHz*;zvEnjp^blRoeyMbBDTutLcyFX z7iekzp|5Ua$=h_|xDwvv==3K7b=tSE?T&ELZ(X=;ORi%Qxs6ZgDsR2=XkpZ*!1} z)Nd1du%mO7qYnAi7sc4D-SPxm+(jM&b&fuzTAeoc3YdA?F*G(Sd6(5618v9fQlcGy6_W1TY(a-dUi$U*+Tu3YoSzU{JH9Ztnv?gu1>4409u|+SlrD1y!Of*Bw(~)bY+}a8 z;&0O;Y3b^irQgOcn}0yY#@p!8j2;7^I4b5(=RDY4&Z$QmoV%n^oqPOjc$*tK52HVy z&|dzdouZ`ax19ask2$ZPF#y^Dzw>@F*gGHZ{IsnY(C#K4aTsew*rcsKwu=Vw*D=cF zNWL_cq1ZR@G#(Oyua$=`ebDg&J96_VP)hg|yY&w)Mw@)5xVUr-^X|9M9x; zF~`vI$uK+zk|U%c$GCNEi7md}g#Pgzuc_~^b~J3Qm@ zDkmp54^Ox&{Ci$~y`3@bo!?%k-eZy6?G$|WU1wq21$nXFxHfl|yMUrTAE9#HyT)12 zzs=&cceFlz_L66&pSAGw5qHDuFP-%7KKrD*)NkR}zx%UK4|i_zm=_)k zbA!3b@nb~vr5}!&qlVGzaNAty?R_9e&-8CtT89AdZlo#_a4MNDn{E}lmdW0E3Weh z3Wcc3zC8{{#+ptzhG}0)-8ImXBw3g%1Zt6Oo3w*h9t*R|wk!&?^@v2q+}K1n`CJ?! z>k*^uwZFwFZ0H&Yr#EjkNqA1F&@Ecut(tbKX1a$4Y^WWcE|APRc=4?g{4>>@P|^!? z_RFDu-R)Whe_|Le5*E({E$97c+Obo=!@!2^Ya`u9!dRw4}$w$is z3pVdS9*k5J$QPAWIp}O~(8x^G6He`TkGk#taH)MglBJIE{T^!o1s~syS(vrU zwG(YqkE6?(u^a>+P1)m*i;AufVLbN(Umv<*qdo``c54?@<<*CIXAH|J^f^Z7wpwAJ`2r zyOQ9-_xN>a5}_^%ol_7q|KH0=U z9A&kwcvnUQThsz1<%&6W__uRM#i~$#YtxEe{h@gM`?W}J5TIcJ|ChpN8)=5q$b|D(#(IsgKPGO1WC9sSY07 z?G!5$-JT?*O1*em{8~C;fRa8Zi#3?A+mou}LNhuYsG;{_9=b^&m9cpOz(KBq9#HBR zx2ALQ6Cd)nl}@^W@oKgWCKpY36(rjP>td9?H91`10Yi&r*$0m?F%?%pOqOt}P#r4~ z#|H`JO&Vm4r{=xM$$okPou70fKGCmpaJMdj{1l9Z*l1F(O!_f4`83|R=sRf_O~Pf0 z@%_F@2_BoZGk#(?^lKlmD5OI6`}Vo3a^X4Rl*q_~3|`$N#=6-jo7!Nbr`!_vP`IBv zl~-N;AE+1emZo2lXWr@iHF@PfdGrr7ed1eU*k%lQsc+ks98aXTyqS!dYQZV(qL-%_ zOJ|(y!q@DyRZr?3XT`>ycLi&Pd9-BDhhWIl??B#n6vF{UlDf!6(%4N?Cr{8vlWZ9K zF(LZsF9Mz9AVCq1EA_fLi4O%T#cWRNLD4FaZP}lC^Tr$=L5PxH5DE@(ea46@BDtxP zue|a>eug%+$nkDy-3GlWJa(yf$xT91qEl6NCdY9A9@Q&EpGm+NEhZFWO0E?2;vYYB zD6c4ITHF#*S1mz0+E^To4_k7?Dk+4F+4kmWg5WEsBo>KvOT4u?UoGEB@0`V-+r&dU z>+FY-8a&-GCY9jW%sKV9L7xcU%mMps!%K=w4t43x&9x{vvS>F>5y}giMc;>r$}hhw z3^L=Wf296Uc5{lDl<7+xP~qczqi?soWYXVBnq!H)yTk{Q+NWQ(eaV#H`~e&Oa;)|# zy0)o zY+*_tW-oo%QoQ*lNeU#gO(?3-t>t#QL% zS@>pRqKMI(JY|~?HfaxX(nM!iQ=v@|7`s^Yj|E=(Qv1}Y{;+ONNZcHA9mA&mPkN(= z4G}wqr$F2|Ul@J#cj7!Y%t=2eHNaQ?3oz#Z?6G6#f!)e0N2*Lq&C*er`CmU6BN}>< zPmN(JngYwGaO|e!GB@IiIo@iEP;Ayy8`Y-EK+aZ9V{&(hRkMS9yrN z7$>aqgMD3bTSnH>)A1_cN)`3;70l*@=g2CKC*ArKe*5in-cfgR6}Ff+&PVBxPb2wX z`6Z4v+GMjY$7Q2Bl{cNjYE%5N3_f)mTWIGFy+uI1Iar9kobMbH2+Ge}vd894yKQlg zn28}iH$doV?x<0F(Pyr$g|T32gy59N%dz3Pez{iw=@Yws?X0+2*ItXupUx#qo<3-< zZA`<{@yKrF)eEJ38cGxK)N}H`V*#P__IyfHa^*`^Z$XZgIn3uTn-AguPnak)|7)wq z5yzJu=CnKNZ4nQTi-*EfQMCGE`B5|LO1=W9p|*4B1VR9FKJ+xjjrMIHX4&A|#8lgM zuG+SQmv7bSU)Zd=AA2(qLU10Z)`meuxa(KeyX0CEg>Rs>Je8S?mcavYo_PZ>a z>zj{RP```bALHYPH*fJc7w(S7FWlTn&Q2b>NaCINZq9J#-sA%Lh5s6|E`ThWMi+R+ zlmn3Gqc2(nmR(zrX$sGIEpim@8?-F|S!dh*2FvH=z8_$#51vNS!rJ+W<5k(x2VllU z!)c^~I}$a_91SJ`p`OiggB89g&o!{+NXi>&=9_05yS?OkeB@rBkO`MV&G?QyB~H5% z&4ZqmPForfW1MYw4s>Y@k;&uDMdVXcgN! zUstC-?m7kvQHWRFvp%FwHt=g4YzrvF<_%1OpX`5sNVX>jhX9h$GQ?>#+7pB8crc> znW}6H;O-nkz4Ac~@0_!Hpwup7m11aULbu1kY^9ohu|i@wxiw-H`(%3Fv3-L*<(LlA zk#IQYwWvt-EwuJK-qDAf zR&rBb%FB_RV@;11hgE-y*$*6M9dQ=1yW70Jyp!L}4d zV`DtD%TYQxFAT?kb4?b(7p8QYA9N&3yO0CFzb!CQX)}kBt7{ZAA3{=&#{9&m9{2G@ z$Nu6cF|_D4{>G-f*FXGgPv>>&Rkn5Mp`&H_fOh)Do(Ju)kC!%1{=tPlbG+XoP!O9o zB@bl9i=md@R#O>D|lxxJn5 zvu}MG;~jq)qE3q9?L!M26Nha0JKpMZ+N|qOE0d!3po`{H-zV7f;4iiuZ36t5o zWW}$Z{DmAv=@b6EXenzT6|=H>c9-+KN~~j;P?e)ClG&%P8@Sk`?e?pg&hdvBQ3ki2 zyz~>a*t54T0PO?MY+y2uB_M6hF31ya_@xU$@79VrvPUi$J28hQo9%~6=x#R8Zu)7^ z54GD-A1z}LHSt7I&k!zQ;)#d4FdH?GOYxOZRanVI zjbuD*GJRc$D?!+EbfN%G{lNT^FSU8cIA+DI$mu^+5z*CxAQ$JbQIXo@34nRlC)ADw znuAHHm_^UHLw5GAm>QGZ={!_yu}vRqfjAW6n3TwqFpD(1&165c9nf5ec2Ualw8n`t zb^r0CR*cF``06w6$C%n>ei&0@Gqwy9W!sRcXPRHML;H)F_Rq<|cn)mYsV$)1GMBpf zQ4(aX`0CwR^nouU%@q%<+C&?&CsuZw6FNge4dfpk6GN-!pSY0O{HH8P#YxONUPWW0 zIZHd6cQ_c6=8Q!5Mu&S|PF4DkPwPh>@~MU`KhkNAYpWf8*a?F%*!jF=b>J2wbW@fd z1-z;odz4IWp@-)frdxdQ_*L|d6U{aB8yoYCP1PMjd28;(hmC3X$i3__N}R=520>`I z<6p4qRSHeZhx*evlxy~jZ{>sDYyODEH(vA?cEt5V-}*s;=pA=MlMd_TuKsfqhax1l zpShx{PPDF0yH>SvQOoid{o1^3iyF3$dC)0~+Qb7H+EEn6Yrd){D)wS6AMDd#dPk-; zUd1YInv|Q*WYIUYsI5m8jlt8VK55(ngNz-+?48a&JhYDgXB@G&%iLn$If(+oNP~Vk z#*luS!Vn6|>J<~4Z?GPZ^;XlPjNPwSrZRIhd8u6p))xsV9_A%t8BYLooZM2|G*EbH|x8FU`y(kF2~wG8t%PZ68aEK#RfRKFGYx@c6kF;NkO zNI>2kbKbG|*EusZo-_GjZ-PRgP&W1Y2ETVZHqgUeml~iivX93;LsDi?)v+ z(ya&uU}BJB7ZddaCyty(*0Lu)^Tn=B>N!yB*w0;lhjXtaizIq6*9*_cPpMAU-fZv)IocF9@oIY;;WRW zEbe~G-QNGo!ubFF!#9V2`|dH15xL2NG|%O}c-5WMC)aOgwhO`KJ{WGYz@b~}g7u;cfljne6IXrl9_weA(NekruNY}$VcY17# zg1>+K?C}4-`nn(7^hY;;_Tw*DOn<>oLtSOz{`~O!ufFZ^C@zwF_xH2Y7d`6aM<0EJ zJMkT3?-uPn^O)oIDU0Q=IJVwFUnb+8S1igiLpEQ(VcdGh{B=G!`Q*`q!~HvV4==b& z+=cQ#e)%=7zKm^m`9V*ffljP$u_)~V^lPpy_2(<@7&kMnfzP`=@7=piI}7nV{-rxz z(c@j_^4z(7!qB?h2Lfk&2;qGB8oW37mhvs`fcN?8>hpQ)%6?4_kcQwT{>BgcTyXd9 z#~VD;`~;o%e8xHWfBNwL;nS0o!#(g_zkc~}j|F~x?H!uMVCwLH*!O{5YBc}+ZznYr`|GFx|{vGW?3`+?AP+%$&jtyjN?N%`*=!{%Jh@h?Ws zr^Je!<|=)wKb#w|<>))GDBBo{N?P;6T1O$u!PORZR*^7b0rUZqi~KG=YLc?O5!Q2m zW3v~k>>15!N*gvO7_lqf;uI5qvkz}2 zYgMs9oE?-6z${1(eT#|?4mKZ)(L9+fG1k<8MnozogNWIKo1=r?Yps!GWWP@Hj6Uwn?c+EgWlzQZT9%_27yFq>B%qxBPA z;GVX=-HXu9xAc!OSADil-f617lk>83zgQ5W?cK~~Ki4XKct@MpcwTqA7#`NFkwL}G z3l#j|aeR&)pRbZV56P+mj31&A1mvm z<~5-kbQ3e%m+urTAGj2g5xk3juxbMDBLWttKE`i!qhiG$$ofC4i8h%_Cuda=V00d zM$^ntb6^|!5~@S}3xmuFP%U`y$2%8iC~=lGxMIQH8pUZsz3(e`NW5&5kF9)A$IMnG zReI%fuxp@n=nhUb_1n&CVN9xSfPQ>{OnBZuYaMd^xVC60o4m_HYEx1|@j!gJBybDr z%Kvm2TguTY({_AH!P?rCC$BgaW}isWt|*%~y>9F9e;?VGVrScghe+uJHs+^$2MUXW+_L z$DUA?re5$i(FnR$w8O0=Mg&Ulx2LD+9fTS=*dU=B8Myr?SiL#$i~$RRu9CUafaJFg=nJ^t!-`k z4|^sS%A{Xh9#oMxi(ZWTY!ibr^npHcrw#BZPhZeJc3aeLWvq7S?58E#X-Af}$rs&> z&kl587#jRu-@=vup%u zgg(XCmd%+@i~nMoGg3P{u$D`9nrp;@Y?FI74!oUFh!OfWa~k~RNLcjklFdbWCmQAW znrDkY^hzdK6SL-?E_B4P)a8qkj23;zD^mJFz}#t_7ss^A9h+=rTf53CFyG3xxag*h z{~5y+qc5WImM1V5rvTb4mrUw3pAc)SHshn_bp=^PKz7R;f6DqG!i%?#mAZBLf)Z`E zvxl_3ZMy)Su}Ysb5~(Zt4;Y}ShOEfgN2c%CYLmOF{Jw|;{T_p}=qN5P!Gl5AJUm)T zyK_<)AXY}3Iw^aMNB;I*`BPn0OJ*#-smvFf7{$V0-YwQSGyU)Bs6sB~DzqOM$cIHy zGb)F_u~;17V;}9xtF$YM#>TJ>#>LI!f=a5>v3QE9zOD&nZ8Gw5Cwn4z+y1FtuhkLA zy2PV@HlvM_)UF}eP}`z7%BL?n`W>g1eStL}D1yeuzirm@in%(ZG%cH#Pjs9{MP09N z5vOYFo~CTr^t<1EI06JTcx-bXo)28B@J57`hc3_8^g+Oke!{5RXxZ`0Ui!Mg*i>n) z@h2@d(msne@9DF}1#0E99WM}IZBDgO*`6D%aXobSq+>Zs#VNfpS$0)6FY*_AREJb)zf9BNlY4zQrbqa`d(j zO3r1Ko3A>rg&zFMIF6OB3upn~$elU6OhR}z(XHq?hn4&NR=jhH`nkk42mh1=;`a@? zCW92nKlIJ5`q+kjUa)XxVC*x}JM?)>j2V@d#qOpvP!PFQ{pqnoeYUII&K*zN$`@$l z#mJp2?n@K=rt7}vQ6pb*2lFrf_%(~;-yELuJn;+0kqbh1xcl0u>T4Ijy;J;xg{+%A zTl-_~_I9E66?c5Q2z!+USr?{H`9aPd%+p`i{l^yWqZm`%a5sK5|9BoAVbuF6N8{ckj~kdFVc$eLn~G^u_b;{Kn80 zI5gz$o!i|t?_*xPbN;o9()i9BaGXQD)7?kOxKQa`ci{W{(WAqUA8~g-9~9gpW)JblRdTwo z-M0W=lv^BeJh*^r+RS_avd&XXFmqv&(rN3RkH{g{qNRHlrzEq&zj+6?&S6~zq#o^_ zFCld+?e{qOU=Hs?k90a-QAkdUt>BpRh!m9_F0Q3lf7Vh*zJyyakb9os@!e%VvDiKf z*DY8H&XR7aGV>{ZV;s03YRr-WF6FU21hxC|XC~3|C-Yc}Ru z;?(eOoHN%qkflO2;~s3sML>NeFL+u&a$qYz9Z3$KJk9qT8C_xWTn<|2giKGr$F*s< zah8?sX}Xa z!s~IET7*{?J`hpo4769?IG3(GsV~4`zPRvftmm<{OP?~#AZXltSB(DJy0XQ-1mb_m zsT;qc_pv;DXl9ONwwXe%*o)5Yyk9p*?lxTx3H@NlR|DN%vtj%zvXnww4g#R~2n0U= z9IL`_LDp*u>~Jx&MQXpL!W_MGK{#&vk#CK28vWqLK!**P?eSL&)&ny4bUtiMQF(>A z+{M*!E7TauNx$tLv*t*LK8VC^F6kLtoyfk**Z~VlH5dEgbLo7JwEm{Sd9Z7=zJ$}1lVb>T;f{kt? zOMGpMR{Y6!{!s2X1zlxY*rG|e-F&cxf4kn2w>&leq80aSdwv>U!zUJGo3m}gky`aQ z!0}Yv=|oy^T{t(M=7w@S*P{sSVC_6+q-Cj7j(C)J=?#nI-7!ep+`a6?x)v8CRB_^f zVcjxkTpE!a-ez?krIbt1Aygo=t1&+LXQ+fkd{2yrEJC1=U`IL@v#nT;Y-#Zc%(!opx4CHS%8@n#niyd zLh~%BD?^{5XE{Fo@Q&Yj#|H5$uFdRa^Pm-tcG#(MlO1dp^IwZI(07(g8}$Z+^#t;! zy!=ML+rm5(Jo2QoO{03I1NC&V-_C8ZLZ!$RS3FdcYmA0e3j5s8CLvZ3QxgqE(=4WJ znd(u1oaPS#{TpG6Ha{qwcKMNew85rusm}`k=&UBaCG~f4Ks08MDbjsd@S`)7Vw8`qbEn_JDT0zF0u{2EgX0%%RYN!pv32zCTs=BfKSs8E zQp_Z$HHhq&imD@&ZVHv@Z1Q050j@?mw82>7lUd+pkWA+~a~AvmVsMhm9?= zt5QB*vkxHhv~A{!7Nx1v7;kF*;3NpP!*5+2elWFk_#sfSYJ5Sb9+|x=Qy%rkUo)~8 z6vdl1>XYz-)m+>9rFp~MGee9GV@r2g6#L+WLDKS2p!l%ojC(So{fIB8loKC&8SLZF z_sLTTvy;5WpT$%gT^#6$2v)YlcoJ7W(MLlcK-kUEw}XWjRE=%>2ZQqZq}RGRD>-dY zj7;`NAK8vCdMu6c78iDn3H0HkF1FVRcqcpJ(LU(am36e~QZ&%Vj`A!L=Kxx<6H^96 zNCy8j$aQ4RkG2cKy}7!#+H9VM@Or?IFEW(A7Otn#fjnO70u-z=K`1jefk{dsD)Hm}DM9AdLGbm-&Ew7vS*m2Ug z=jG(kJ8qaviU%^Wi~#uf(|3K?7L$JQ16em>9H|8S8&^Ka@his4?e2QwIevj$Cv*&$ z3+Uzo{e~SgPmatz$BAkw$X`b%LY6&tTSl%vZT?w)mDan*vJY(Cu_I{ug<&zfz|(O< zL%^Njgi8RS`D-xP;<3#wX7)CQg7*GvpqUD~BuJkzo7P2++&T<2K4Oge;aAVEW?pHL zE6GbA=-Q4A0e6^iG)hq$}uo@A;O**9=R-ZOx6eXar({} znK@XI{7Hj6V*tg_!Lu(A%+sV^zz5<}!$vukniFER6OIX}RGy4Vn*e6}* zqyW=jijGw~t%%#)OO9iI`2nIMJ9ykku`6a*Gyr;WA2lYR6_TY!f~3rpW^gZ3nF z*<+51(FMbWw>6kB#{{DGBS`wDB+Bda=u|+K=tL zPantP;u}8OkPoSz(q=FHB+P$0HhH{Jl&*2E&Ai0hJS^SzopR}O&PcND*&?&~Coe_q zP)@w;rD5}3v@<_*oK>rRol^|W&K2<#(=;JZo$00nKK1+O_{z7id-F@j12y4CeY3}= z(!H=MZuX04&5Rv{n z?$q{CEx%=P-q`uP@-r5(UAX<6JJSEi<4!($$lc-eJ>^F-{qan_^$48LssGb2e}$em zhkyUqf7wFv1&h_*A%6ev-NP4Od~x{4|NATMR=;!jl!f-o9LrDPdBI~}917kY=X1te zXacK?-7Ws}q4=kte1yK|xReEFeDs?0>{IGKOZ_&B)ZJx=e_ydc>>c8!hL@Ax>FAG- zI*cz~UG(@C7sGu-i;L(l@S~5FDL0w(yPKB$@ostVkbljOnqKB^_AAWU{($KlZQ<^G zudjWi%q#A0zs;TK4<9_J{*(_kE^!CCahe}pJ9Ia?kB#wp>la|-pS$S2JC`3-W#QbX z4BaCJ9}}maBIgm0Yw-?v^W*l7%ZEqUcLQ4QKEFo(IVbn185hIJHwahW6^ahoEo7T- z91B|7k&cD;lG_At-i1#mDFC^hGeIJ$O@3{D<}>QfiSV_q;5@^&$Eeq9ge$$`X@RgR=9d9%V>&Mco)7UT+@q1A8Ce;OS^L_jz5Rd^yGHJU~4+DwKAp zA+X;e4yATiW`WRty5||fYbJwVV*NTE(y!tb=g4tU%JU=_`BbPJwMjEN=UoY%-(tHl zE8&r)Z2ge`5icg?sAtQ(;&Bt~>Xfh#b~o>`Lv1dqjo!R~7>}b4U2!Q<4A8Cf52Z>_ zo`^$P($VZ3ZyvZPJ{D<{>sH!t43NG2Cl7+cmOaaktU%a&crf^zYgLe3+2E5V08=?@eo=PrE=1%Z&k$KQqgM|rO9R-vOTP1Ls&jX9qL4+q_TWG1wWozl z+Ao8<#rlO++F}k4?J-_+{HCV=u~iry7NZdQ_*opYrnobbKF1!qedi;}d_DGw-|@x* ztHy~Ro$B+?KKFXnkIDQOb7K*V&IFD_kDK$MxXkd7Pe<+48!OQJ=q~fo^9$|wF;`oc z`MbXbpe_2qdUwRjgdUpNy9m#Gfc?#(v`_vb37L9K43->kR}2+R(y%Lr7U&v&DE(_J zCJR0GQASlbq%W2z2rck>#-~b26h#VYsoXkuE)JnbZkkFyN_xr79Rqvl0m{k8 zEQZpNQ`{{~3e;HHdZWCtLhfTt$`<=PkT{SjOT+tmfpx>aUyz9v4`ilQN~UOk+$Ru$Oiy*`^<`DNW<{cM(t+n5in)V z@C?5^@a9}-D!bPK(0TA=5|7rAW80Re7>&nyO;FUcPdirtC!w4Wp{eaXew8W`jo7NF68A;Uc zY{h5G2ZLF4(h&l=}W+h$sL z%lkS*2LIrIV01c0T?l4#JeptP=peE9BA}MotPPHC8?Hz=IvK`__eL^y$$5IVqc(JDkC}V~5JO|GX~MnlUsIgMW|YoqqBPVF=9ArY8cHX%iOtiuv-{mMaaxb2iq{$&fa zZN^Pq;z0gLR+rVOnFSy8Wb?q*zVUTfA@8)L{G;9fWTS~T^SU|Ki-DYo4nd&TMDrjz zHW@R@Y@Hl2eyzRZ7j45YojfTR8Cp9Z9*ivUOl~5}@$CF6*4Re2^2bp3VAEdtl+AIg zJET1^)%W&0H_ECE9pIKDXc&it7bHV>#7IA~kK>m9?9c~_;AOpBT^Add{Mt7%9oxxE zpSi0nu*6GQyXdpoF2Fd zLXycGq}%h>SWeSm+NbjBY5dTq4drTzq*I|VXrE#&s@IO)m|vvBpgzDGrPLQmfQY{` zhZny?wHcQH1<82RgTcEE^S<0bpgoy`x+XjO@t2+DWDQc`XnM<4tRX5yX zmy-T`y!EcQG`1UyAjhW6l3WbAi8mp+(KJ3)S5lAdDG7u5JH%5RI^SK`b&)3DMuHqq z&OK&W_iX_fE*sgbL<+A$v9Ubb9kwlB5AlAFn~DZNgPbs}W_PO7?)(N#$8!j?zfSlP zuTJ&D$h7Jmuq-K3M4Zh zC})f^ezeso6Ih+=%q{+!mla@cm?PyZbz+|{12uLlLoscQKmC`c_wAL`J?dxTP!|md zaB)fd@~80?!t@CX zz;6yG+$rtmBR?L0|2@z8e$AcFKEwP~3kW`6`x6#tZ?Wipl{>cY-@jjaU9f2XmPKLj zEO%l0`Lm~o-+ld<1zt^Oftfp^f8ayepRiE>xr^TDI6r@ZTo(ITJpR@1{&0BCoz#E# zw|{l`>!1Jp@Ou{CfBlEYhsP`~-)B+X=X;C!gazvtFVAvmHejm@>(@Cyana}%IzHhU z;Vw>jSNJ&#%da>u^^r5*fB(2xJh;n;0G!vjSni#p%JJFd=85@aPS15EKlsV2w2J`V zW&fNH0iGc9x<4KY{_9{eKKkGsOfI;q$25NZngwlqaTh<{Cd`yI5pgY zxXu?eN$2=C#%;9E^>0oczfQ5(b(bMH)$65`AEeBDScvj($4L5H>_Q1B(p8Kox*02F z2w`SiIvkU>EPb+hp5%G1dS(#P$hn2r{(flWdV+Gzg#bDaEFEH%=Z)(R+J!9Vr{)0r zkg~Dsk6B!#4f!puCs(i|u;YL-1gJWEr(S!rk^-rfU9!xasy=$1^ZQ%0ib$?=*~IK< z2lAAkV2=y(^BSGD8)e7m%!B&5c^raSG^36UUSE0$v8{qWB!4_tSEq}a;?7niYKd~o z4fnMvGp8;0LIt05rE{_4)fh-}#UDv5PsOWBjX)f_WbPca>k{igS^cm~I%=YAP5j1n z*^RwOOIB%|z2kQghd+$?7*ZEZ3okldCH6?8&mC*7O=#n? zm%ZfM2UdBSZ)&CKj(layGZP165;=?!hs^es{63G}(-YfOpe=6O_Wan*q5J+Gg6-x6VX|cYDibcr!#Avjbs@^HQ)x zqN3B<#It!lXiDiq8k+-QE(ojAqZD4jULkj8lg-bCOh`JD$BBTS7qwtTZx`yED0BdH zf->0p1FF7yy~=A=10Y$Hya{##Ut5@bOvGkW@u^y^UesuK6ThuUHsSQ)a!52fUwUI# zmRV<0jnYi6oR7Q$o+RJu%O95Mn*(c%uv%X-l#M748U{r}NZ91jT_m)Rh(5cE==m%6X30#-l7OYeMgst9SYGZy>Nk?V{d{#g@Xvcv@mJ@hTb0Y#OYqf$WTUMGAA zGPlfd`JALYT6v%{p73UZxgo0<$gLbiF|6X}q}Y6&JhnrBLsuS3$4l4n*Ij0f59`R+ z7A|tYYS_e{XL;{Tvl^68+&5XW&>cX{M3b3Fq`gzSl5@l_X z=;$N-qAo$nCu`DaAE5L(#?^~_{fS;lXUjtHfnn*4&lqxIuV2EOGZl^=wjhe1ud!)2 z6A5(UV6U(EQ=@u4z^U6q35P4%?Az~_l*vA9>R28^ehW#CZS^|F7Vs{R(St(R$G!nI zya=o>edI!MhSxcHHuG@F6hFFKXn*gR+Ps+83X`c5U~1*Jw$PCu%tij@WNS6r5%1K~ zYW`{mc~P#3T};x2i~Z8C*YxSwYfZgkfk#?>@4VU92x&!@9|TSgLXACrFgO##=#@2F z^glR^#foRm`boVSCZT(GVH)`;q(M8}xyNq!EQU*c_0zyS?Yx-`=L7rHDXrKQ!hG0= zCi=$yXxM9_@n7~y+sPz~^ab6B;zG#W7ZL}53mtpIpigL0lzx+e{$x59xj@V;kAq^w zj_Ab(Q~9Nijx{5D7--WkDP_<9yj7q}9n+NGmJg-Tw;>mgB4VWlo{mq=i|EEj@8_nG zD{<`_WK;7%U`LL!mK_rKmxgd!Q~+)46*2PISN6@W5UoahlJmwl9pC8BicM#njprJ_ zvyh}L@OuGXKf68uf=_p*pLoq3>C0cl%pXFYMSesG>_b1Vd28lU@`COb4vuq%JZzc} zKD|BuX#v&q6R|EElpmRJEgVfZADwxV#()3-KmbWZK~&QeHeReVh4N_c2ixLHOg!`@ zc+tow#_XBh)E8s!gGM?x$1UY*`mF+WWF}T%(MXJpMC&^W=AJf3s0?K(V$oTWo^8%e ztgMDgiw+*YJX#VMK`J=OWImi*msETC6UmaJlX&Npm>w7$i zL;TtsO&pc#FrT1xo{Y{DBnoqGVo?D*4@UyTY?Qr|pYf*Nq)WT}KI6c?Jcm5=m^;m3 zD-L$wHtnIn#F=WzJ^AfBTPBfmff`L@0i_B z-XDpjW_)!FY;j)s#tm%vKznBa06QxiKl{-E#>s`@PvMFOVu_7dhBkTBjK!9dO*?+z zq8GW^i9F5O(AhUUk0zs=@=SAFM0V~sX3DQ0@=?bNkH7DaU5e5jB^!wX998h_3r@?#c1fBEHChhOs;lK=DD zKXPaIOCCFNcKDmW{Mq5hA3r+W=ULXuQc$*2x!?}+Z@&Hh@Z2A`>{@2pPruL0v;Jc-xKaIKj*G$pVj^2AN}a?dlt)mmiLcX%)QJ04v#*0!UFtt zo`vt-<(u3E?jrI93&+Kob360X=b7supYMIjqi9~Tpsn7k-j>Z~TDk!0;`0sK ze17>GWVk5pg3o>K+BWyDbI17OCr>C}%1#&HUqUC%Z5G2_Bv-%FoOc_F>o$w}C*1k% zV{9Dr#=-@*x166};Y0oIMn`tfRVVhze0e?#|1Nl!*YCQk+(q^lFEGcu_?aK$Zh-tHUUImJw^TI@l zW#+DiIySd>3U%m7{-o+gb+BvERK3W?#J1wL=Te>P9N;;wY}z9Vc0Fc6Ru_T&!@w4a z7mdQa<5Jx}7$11T>RhFNBw%lw=T5UQEsgDVj$G#$Z>R{K?Yz`C3t5WLly}-d9D+!hQ5#knU3e`1V zmET&3Re5d0zRJc3-_Cgi%GDnxy{nn#gQ{byUl!xwRkoQr-?G4N1+sxmAsye;tsjG; zAQMhm;G7uKCXaoz`DiTr=Pqc*ROpArZ^5V`+>Mi|r(h+r`Ji9QbJg+Z9IjJ3SJY?F z~Nufy$)0|Mp>e3ZQ?;d$8r7QoInfw9&ef_ltY{6 z82QBUrmw`IZjKLq(9$^6Slar81hrIJUf!`1buoF`>im}<=u^y&{oEV?BJ`=!*lf9w9jtnum8JTn$HfvGTVT&$B|fBSpm>Uz#vwCcZyMW!FJ)1IXS*-h?3nO zL0L~ejRD>|shi051zvuT;|L+?$O?)?n1X^aThE`CTLn z6-~Ovx)>X9WZbl^WQ;TmmrN=717DM^{&L_VV-D;~XmOaBqynA<<1>)~555(v@C{YX z08pbbo)dpqVb>@7AT?5kKfKA9iKK}XaahcmPztc9*TT9ikBT%q!H)3-d%9#33D< zyb~W})90+np*-GlOBrpB>dC(rS8?LV(LWFs6ZJr>Y+vTRy0RO(e#jR49WOZ2sL2}= z=-o@od+m;&aut6y_bbnSUY@2!R&F6%l`zN72}ySm_5loJ>4VP`F=ffFs_uB zh@|Tma}_z+=A>%Ba+ybW?jQX_bLlBrK9v}K`W2i#%2Eu{scXUzlaw~JNW;13GZtU} z$G`mZ;n!b($1Tg8D{*ac#YI;hui#A#a|=Og=pt+Gy}%eSPAXE@q#|czU^Oo0&lw+hlvgCrRTorf^IXS8dKb;Fkso68 zF+ASIev4c=mASI~`f!LAxaw9bDba>jE?Y5`8RMDwQo$aN0!aU(7| zyIt;lTrPZ38|TL4y|Ew+8i5Q24JI{+%D8r%qC9gKt&kZv$Bn)nAL+|M)-|ep{EeYX zn@tK|TCvL+VjCE2Ge13ZcY(BpMB05^g`F*)L$d{n)(>4X8y@u-?ODv$+L0@RSo^>M zd9A%j4d!C808SiGWSnc$XOazG<#tysYe}AScqg$Qz6$+YVkOUOX!qk*=mI;BtT2uQ z%tsr|v1Rny-?cSzQ^YQ{*y6maN7P+To2StAffGIU%}sOq2Cj08BJ*1x=D;V7c`{O6 zkRE@S7nW6P{=zZ@78ruLE(z8%uN)dDf_27TjAwBwC4~|esjvy(^ zBY8}E#zWFW4ltMz)<&cEy#asUcQS9c>i4_1Dl^Z1PG(h|%A18w#;0Y;TQxE&5oH(~ zkF^5em?Dtx{zxdfr<46gJ1Y%M;t40N?IQKh7ZAn6)TXOg*7YzEc#`CQC1ymZ4oOsv zEdiMt#2!1djS`0wbQxb5WQ1%mIZXrbCuloScK{7lzEzM0HU%HL28Sjf%)N;)sRNBg zhVr6`0j6#)SfXVUKc^2TBTIY3c|7GY<`5l!AVkF(Q90@}zu$!U42r#D2zri2dfJ@@UwZ=w}k;9m8G!miY zqqMDD^lY}Q4c!QmZe%MAoP%7tCI^A7&9v+4)cFMRx<0vBJJ=IZpMDlnF>k=pkmhKJ4)JxC4VQg?QTHn#1Ix0Dr-VP$x>wnvz8LV`rv9{vfvzYSO z_wtD#gFk+v3I5wxv&WV`R1()j{yJ&ZhW}a`#C{_0BYkYx@sd$N(7ApIzDhgZ%&A3b zf$N5lSb~l<4)Slf{xYc1qQ?WS+3&u26jM`Ht%y86o2`lQF%Q#@b?8;e21i)=5BBBe~gB$v1xgS!*0UD%)x zmkmQVTHqkZet?vHfKdj3_($370$e__PCl>3StUwGZq$Tnt#k3}14?NuNe*0RgF{z- zo#drs($u#4EaN-w$3w zmNMxN_tBvZ{9Hwy`yNP1S)R)W(K`A^+<{F_X(fCIBL{e?ZWS~qz9?9koXCxiuf}NF z#MyewUpd=`NH-z} z#m`!*Wj*cEu|VaqfwH1kj|9cP@Y=)&N7LT8={T*t_67E$?D5ZwFr~LH8HM%S>E3uX z-gFoMV@unmC4D~famNdUdZS-p9P&R_aVi#|9qsM{Ro1EI!X!OJtX#Ie4A=qQkgL&Qe@<;`AS?hQGr zQf~abFXoGR#IwZ5zhie#*G5Pl834#ubF{ykPYRB%N2AW9jih@RD=J3Bkd& z|HO857&_qRQ;6Ee)=gAO=Aq<}rM788kH^67L`?q4PWp)k&%bW+^e)gn%wCBtqt|?b z9sQfJTN)v!`b3Ge21mCH52$4yfFsOTc!OJI{>h_&N?#b^?S13Zg&cjrDIy8Fbe@u) zb>i6Qc|y6XL!NvA-O#FxPO#NcWdDMp=qnQ6|KZoaIegBH@ypD-{(kdx7NelV{Ev|CVJ{OjJLECrglEOV`}?rq$#~$uKyaA$j`2J2wtjNd~(% zw#kND@YCl?Wn$NLyM1A%2`rat`IMzlt}%oA1}~3nX$AClt@G!|lS zU*GzUHPpRtfF1^LDHNBZnFIk#osgXNp~Oq%c4oM=d|2=p-?(8~mSa+PITU@@yq`40%oayf!hq9>Wv!}Br zyY~qu+pRV(OPn)Synwz-vxw)z3gh4u_3z`So4iET+2@zB^J(sTd@OOwYb*3JiRO|E z_5owdtsVINt-6G@&2h@>r_Q(b1OE^(ROOYz^p|l7oy3qr8`!z6qaUlGXS-Q^GMPej!93NVN(zeeBRF{kP|}w%7{FjF&&y}?92BQcVD-PA7_MUNOhlb zW2eUURYv*M1_9v47FiK4!*^s<4)5_)WTe5HC|Jz z#(4r+;;VW>3)`QzP`){>>B~S6bSDfosSgO0PCm8|jzlQ2JMxIS^~=7}25$n6dN=K= z9U8NzN-gpMzF1J2DjAx2tDgE)e&Bh{&50mA^P5FUK_q!pCyg+AX;IBpvYvB7=Vt2WnvB>P ziwMopiUIXa!bMMV(FQ_!EkiV4LA!P8mB|K%8@-?yUTSt}l&`#Z))tQ&Rrr! z*qAig+Xk~<6dQC6@Ip?Sm3D6<(?n(GRPU)JEC?U37m}^==pd0~ku3Fy+GS$*F8EeI zY`-rezyk=pW7YI6^{|ya4Ar?~4)Uiq0VQ>i$F{~vaeys01xTUsgx}Jis+dg~9GNTU z@YD|KsczOtGXVy`3hH|xrY`)o91Q#Mvi5m3-0P^q{XS$_2J?e$`=&N*)SSNpdB*GB&m_`M_t^WX8-6 z4AS6ZFvdq>!b`_!unl5e@C#2VI}7K-hj;{t$QkhEZfzeVsHo`N-SL22hQ) zXb2m?;K18I#`tw|k+Pun13^dc{6|G{nrzb#z_b;6@(_>B`BUt8a_A)_<-4ZCdQ1vj|k3%EhQiw^3# zA=ifbHZF>!50KMAM*WEMlHbx-2IEwD#TSRz)Vac8~j{; zWzHPXz(~Ubx*CUzZxh}pT(+Z;ROZl@Rt6MqKd@VLfgc@|U%5qr_AA4rOLbJ|7-9U> z7qGYe8s3919ccToMJK|$u~7u|mag_tj{Kp&+)Q>;jUIDihZPN~Ro^h7>cb6fV?0sc zrcFHX%W;DkP@iIhD%t7xO}jSa-m#KQ;!6O!V@GTvb1BJZF_Y?;W!q2BqtU5YYys{y3=%-`?2$&M*+RqJr=HZy~P}Ucad&9Sj zWOv}Kk9h_aX#8gpx{imzu=5(qSuMWMQd)_teA+Fa2`ewkz-m*=*+(U7GR`5+1%`iJ z>}(iiiq80QlJLq1oUtK}z68H>gg&JyUrED0mKuC<^J%ozan-?%Vz7-h+846=K^o#q z+YfRjqlx&n9wo47H>tifjIXfT zPl2+RF(r&uemNA4aYoK1PkvcC0JSz{^Kj%7!EvX`#Y277KL1g-E~Aqimy5%v*Pv^k!Do_1hH2JnUROZyo2M>LMWk+!ZC z#y0tPou*G32Mn;qSKOdRmuO>C;fsM`28=iwaPWpMypHWdCg0mwT(Q??>S5VFslSYE zkMU)^)q!gD6F#2{qd)Zn-zKzH~?#Fj#Wq zXN?Kq`e+WS!+C=pW zoh+%>t`uH=1kT}U;wz9;efl=H;j{PMpXe(_JgJv<=c z`4Q`lUuCB68P1))B-4cN8IqRwNI?JTk3ZzHkwiEN@^A0lZe8nBBp#eW?UE;-e*Sfd zbrT$Cn92OJAAgX|TXWdBA|TF_2=_+WLKMKQ5_hD5vxYiFZBBJnLR`TFv=$IQNd z$|B2sGQZD-SB}>7a!vC%%?rio;7tbAj#QNB0xJEzE`rKzp zWM8=c&fy<_`_G5l%+&siKmR!iZ<5d7knC>e_~{bj+UzV#l{kaknR?plImvdfU(ldg z+80UapF?hw&VOJ@8fUcYi_@OlF}!WpnH6qA{3=Ob?P0>*hYe?##qG>{U!Z9cdtO-j zJqiBDhxguniIQ7BAX%g8!~8I8gE6GY_in&t+p4>C@GuU)@m#=T3~ zOj7K?3qe_Xo|*k`5ifuC{yQ9>Ih=*xMQ~0bS7WOZ@9{}TDXr$nSICVGjrsbc|gXd793IU_o%XMI#U9OulWlQNs|*hdO4x{=hT_F3WOi=8l;xVx(=kZs3bbSQ%e ze7wz>VGo+;5Eq!d`SR0r)b}v8qAD5r74xnt)m%1}{EPM28m}rds_(lW=?rhVW~&?u)iRl@o;G}Tf0)y^xy^=lo?KW`@d3WF+6l zF^}Hb_W)n|b+NH&Gs=58y*ToQkoSBFMtS~6&tidgU(D$wNneht?D>FtACkHq;~VDb zL)|3C=b=Ck81#{YP<5;OdVmovww8RVa0WU_4!}=ZQ^3AnOHkAsJ`|VM^t_2X~JZ5;|zO$!Ih_!Qy-#>cJpDdCTNV%&m6N$ zPsdHi1@$ZThDE;8HlW5V6X>dY7F{2$rD9y3+h-leFTV#NVz#_8*RJF*~dX!)R4c!J; z$?<6r;;5S`D+=&Y+5w|vmr7&pF{1$hIjuk5$O~QE>IJu~j1=miWBE8B@>R1*Wx_5) z2cyKn(rn;*;^ouHbl2+{XiP^}$D|m%p_=S-R4&8AIy7&S(f~(nzINm>X=%+8s zmh$MZB;xSc?jMUm6M*B%Rr%1n#H5J)E9!2qvr;*OgZ0xUY%Oq&~k)Km|PvjWd0(G6uY~#&@*?nM{rh z+EJY(=bpAPfOTt7h0ffl)Jg-i;-ADJg>O8yNoZj6qXVYel;GlUxhQOy7GL$JH9RS* z2~_17gQ_McZh%)?ib!k@JzEIy<%zpWYc za%yy_Z*fd}{+G7|ltbM)@rda-mHf0AE85UEeIsaegQ=c)+x>={u4K*NUf{27kN+n0rcZzcgY0Ka?Jate z)@#%oPtw#5l8xqywHkng-`IhA;VBPYbXze~-%~EP&2P!7m$gA-WPI>5pc6YB0H3## z%`zp&JMi~;B)TH$X9`^aTg+Bd-&wxaphpgIbO&2_*n0N(o^wP_uy6WeSjxqSu=q!x z_i!Wx@vW1d+D{CqS(~xx;j+h64q_ZRO#gHMP&+D#AmZdJ^tS9KT>B;blm?Et`tQK9 z2{J9-`9gSs7Mai~?+c&_FC>c)TFAyi2jbDD4E8CTJg%L>@>63X)x|>kb0i*ii}j`@ zIcW^DXd8XmF8R+ZpEjO9mg9I@SprKMvE;_;Z{Az9xA{Kg`i_!AXCB)_%!Zy)dHLSJ*?mcrQr{ zz40Y&*}?B^FWTa*{>UBfs?#rgIY+V>^D)J zI(X$SX=z_&ZeVgxn>>|CAEZC@i(~4P*-rzaFw)>lnb;>A_EH8nsI~W~62P7tXS_u= z%90u%O`HfB4`fPbD`e7M9!uuri0rh-O-(7r7x~>Mpog^BID*cx6&b@lZEjUTM_B9J zyGe@U2EQ|7HNmj$$#O1ATZ*z(x8OjXvd%Q^U z@uT}BY#$xIWf>G_Ejtt3HK`wvv^HUSo`mKFW>%j&W0HV%wOiukN=vDjKrq}*tUKWd&-zu?hLgG0|`19!Ixc!vG z^flJcegK>=w{&}nPn|LCn&Qu(edg5JUQk*X(0+@g_T78DT+q5fotwwmh{N9o2F za2;cebLl#kX=}gK*SeCtR#JBOa8oPEWrS+L6Nh8h#NV?UiS|IeGICgV)++h$Y%;k4 z`5^IoXk{037B{!r@W8?TF%B+tP<`pc`iLSgfVhDrGf_PUMBQ;gL<-1Tl5@#nl{RS%S3CJi z8#<#xT<<~ zP&PMd^l)g|)pr$i2!Ph@(hE2-jW5Rm$LpCGH~PuLUyhM>hxbe}st-Uz(-gxTYYXXl zUzAmlbL=p-)Gi9=(wFlBTJ?8%Ctr*VJ`Pg;{NQ*Xxc1FQY61+$^)(7$Bj+i`4=3o+ zj?b)fQv!pf97{HcEU;b&;W1=z>$(9R5K8_|t-4$K2AgtmKkhlBkf2<=0-$UHq9*&P zFH*(h-}-rAf-ZtYG_Euo%UUTf&^GbxIOW5Y-6s%A1E-fhPd{bFZk_`%lY;t>d{9HC zs^rH6X2*MA9tEVqVr1-0c)}{ zdEGabXdemb0tA>b*P78oH~?QO12_R+(^57P+QBieu~%B)wWS+0>|+R)zehk`+WJk~mF+oW3tYv*_z6A(~csEOx=-n!r(3PRx20GDAkw@SiebI~SV zA&v^x(q`15V8N||!PAzam@G$0S$N7`VY2CvkwIAH5heyhD=6x0=A$8V0K7c$9g~yt zPat_2V6-pTItPisW``(;<|S9oxdj;GaVh9=+N|H@78W~wBBN_q?aUjFnv*s2>e;5ktLo+HkniG%=-5 z!iTRlO65_qu|FX_0Q5!t;8iOF#u9|RHbNBIB$kd2D9U4*KaSdGbLq)PdC<*cQS)&} zo)$ZGgx~0pttBE~;h|w7MIKg5hvONFfRWCU2g#Pd<)pssPwG#ty=055Z6IN*_(;Ix z;?|38fV8)A=XPzps3?c^$umBqm|Qe~H-n9*7b>ZOvou{9o9CxHEEaxBKwJ9=AKX@hM(RNXxei4*A2vCp)Hniw*?b;hT(8bRQ z{4~-6Q=DmApn;vA@ELPsTj{*Pg_L|f&bQge265PfUf&RhvAGQy?0-GD@j|B?JMANH z5vZg8bXrkRHc7SqKV$hU?}ClJ67ix6AGlOj@$1rD+&~U3Xch+&;A3FvNbRYw^ect> zN^k&a3;yQZ0FDeEFBM)^zd6kUMf4bN=)I=(&! z);OB3BQp9qu!A?(1Y=hkf}MUz+mUVxpvKs{<%Fzq+~jZM2iHCclVf{ws28yI58J4V ztZfxWHfdB;2JL1+m}HuwrAXBvUrNC(+eKsW{ndiN(iNwI2a>f;-LdQV0N(b!`p3YG z9TQ+GG9Kl|upCJ}n>_d$zYKi#ut|Q#to$^cak}KH3yKG}e|D(O4^g8iviilqalE65 zM!(=*dI)A28KoCb`+Ydl8En7xy-%>u(on0ofoO?Y{$m~yBL$4jrd=Az0?7{kH%P#$62!dZJ^^u4-EM>ttJ zmRb%KzR}r_`|khP<=e@7)aSqAYk+lu;VAjY}<-S z;AM)eJ!tj(Vj0`|r;LhcRWO;23c=LHF=(~PMo2^RCHDa@NzOjt8s?uryLI^Nt2-ov zFCQ*lK6iNcF4ytn58gez`}TEo$KII1WR6Rgy#3DGty}J_ z?H@>z|F2tN;xp%KbyNPxCwriC;6I{K|ktBcj=3ChA2`?JGkN9sKKKkJO!*?Xq zeVDM5WhSPJ0TUGjoG-3)2EDfJ!34AYO^`n)ar}sN$L%l9##iRY_{GGs%greN6B76@ zIA5Le<($axvM^q=oFREH|A*MtC03kCUP7GMc*e3YCjVM*9{V}_+n2QZf>GBxcecCt zD<<~OGYj6OYm9-jr_ZJXWJ}gtXY9MojCCe(mAM)8_}_SaiXY{57h9Sndcv;+Op5Cx zlYJL>P+`K{hY>HR`;G?>GeedK8N|IwKVMqvDNOtMz`!MZdI$i&$LQ}eG-pZp8)FxV zxhweC*t_Th3}|1*o)_s0)zc@BVs)O;>-ee17JZ;9;+%M84iF@U!AJc|& zkT)n==&O99NWB70U-uARz){Po!@i@mJDJwX@yym`grYBq*EP4ag{SN$`Su`N z(|JbOC^;HPD;OF}mZOWU*3DVh^TLc-(s8JA{dma_2jrz*ozgd&>uVF12Y-%95^iFT z#f}Gi#%=J}wmy!rwzkO-oE{!wS81t>d->X!WvFU6m%eqv$d;Yvr@FclWMF6~Wv^bq z3_ZFjUo%&sFU;m)Yb>QlY^#=aLxno^GJZ}H55#iTX$OsD7`kFj1;4zYUY~$EGDA&$ z_L2}Mpfh=e!Gd~M#s$S{6W^G1wyFRZn^c>n}|1l2eJB++zY;V z;@s(s`mUML+a#(&7yli-f&ERLfHgZhU$N4_~(51d}tu&DD1cL+CX*}^4=|+w4fU!A?-gAi)Gtm*RJin2R7!T4*UXSciaR(-ea66rYP_O^TrMTw8i+BN&O1CIL~IP*Fh$m7+O*(=_TyPi3WT$JG)_IDhgT zT#?0Jmf}!{b7~3KjvwgPuPPoJf6-@bW64<*gDLT>WLym-PJfPrZD_rP20a zDLWb~qs`KB;1$kK-dr>U^P*irgrVamn;mN5NS3RTE_ix@m4h;TwZ9$O7XeP<3|Yk% z0p4&QS{iV-*G3y4i8CnU0FC3~XmZmQhkWQ5I&D4#b;Y8?vde zp&B>hTR(e&<1YPV!{_NZG=tZ+=&K%aS0A<+1Q-6);6-2*WyuoFMHdF+0$`0zt*KZe zFfuBaCE$}zUosHHht8c1gLcrA7VU|m{oR0}s{XNchNm(ZU!7I5EpW&uuefqhQrFHY z72`r3!OL9HMK)}dUevb{quIH#ytNO5JOj}2X`5}xqGoBOT$bcDW<-ZYn-fJ#nlLjx z#q?=3!8h5*MezyZGgqUJG!wAEgU>qIaxk*0piGGEkW@xw_$8CH`Rs9G`Ze%N#^}gH zdg{Tyv(A0d#q>`rw38qX*bah}p~)8?%jzZ!ew0^qK=7E)iB3$k>*w(XzV)PvRL%_V zW?pjRi%c!OjG$em>m}vbH4y=FIT-zy82Slu@-BN}yts>>w8TxMxhS5b6-ia?8I2jY zh8BRba-67QF4FUpDzl~Yx`^pUoOnVbM8?0#CLnYaL_LP!9I87oQqQyqejMurJ@W97 zzoldPL94iOPet*CeyUhJ3a{@wSxuja{UWP43SnY-;OSS5k>com?01&V6!`H(UIai7j58UmSC(+s0E~V1!D3`?K_D_o8kd zUO_Krz#^w`wyA$Oep66`!3BJnZy{u{IH2Wf+hgzm0YC#3*p2)hFTSE!mTo`QgJ;lN zF6n#TrFK3A&Kz4WO<<4X1hihkE-*#lu;3M4LBVnCOfP+)jPZmP!brt9smk*|v0-ox zAKIijWH{FL!c(D7L6S#()`uzEXf`3PW6sD8j`kMS@7mRW6PD!21x0Izeg^_~5I-q2 zmPu@H9s9+{D^mW^XefD%7|AL;{m0{tPAS;ht0W3mzbSrF9-zkZfX9}bf;f`oBbvwQ ztiMe1@Hh3!{fh6PFDbTu`o}&+_2BVMS(~Lp;o$iaU3&W;YQjb2bxo}$XS>$8ry1Kc+(ajcu5VTu49TBh^$RJt9RF04K(U2 zmsi`;a_l00GAf71Ntd0ISYu^J>-`ve6Vpq=QPEZvQBTYsNl*ZIK!?9gT30V7<+;|0 z^5`ncqFZ0K2|97&>!_<0HqCDKz}7bgif>1qtt(Wc!#OCs>Ko|hAc>3&!wW7uCS_n# zdRP#L9a}zxhWipNwk42KrYvplsT5xvd>Ki{TepRuk8G=qP4l^DS8$g7BysEq+L3Z( zJFqN(vP&5KI2d|Xzv35?g35zwqMc8*75G*I-G79@n<|BnpJ(k6NITAvv`E--Pw||D z?u!e&D3YZ0EnfKe`%gYUoMR2{kN)&e4%cp6hX6nECaL^?{`-G-_}xGKin=R@hpfYW z>)UThx;`#RUE%_+E{}1RgyV}BEOkP%+vKq`zE3loSE)?Ostb8f5}fylCli^>@+DFE zkeS0Kah?6_OkrR8=}qiea9vCL8B2AXW&()O@eb>S`-?1>;83Ic%#gmy`s6?TlaHDu zr_4Txe)qkbhYR4FEOjE1FSoNVnQ%36?c|Kp*x+}ce0KQz|L>ofC4ZASOk5w(eu;#v zFARO~=xH;-y_t8(7jN{xBZu`maETdXy12sSz1$A?XVf<) z(&j9Bli%7y8pf0#73t1v>WaliZiR^qv+NEht>qk@Afo zUl?o4RFXBr%LI8cyU9Z@^M{E%Wa*~gC8Um{uMDi$yTl;%`g;3Rcx^3Ce~XZ^ST^}; zA4w17R{&Xk&j}?PqlK`^g;3>e6oF6Ykaz=6U($BnUqK=I;w6`yyaylrksV&xavppL z2SK*lCiHqJlmA3oVzg_^8Gq80_I+JE`5AQ{n~e|R zPCo=XosbVb{I)#5qEU~k;p_MYul2B7y&aFUmHKN$_MVrO)n7HKW6Ay69Cs%E1_^xX zqYsYPuK8cNsrTWbXpWi2hW6M#7N_w;z&5PSp;;__IQLUPV?QB+g1)i}IM^q1uK{c;w?dJ$S_N}0puGa`N3iyj|!9ta4$Zhoi!!KKP(caV=;uK z0PVSP^;m~XOB7^T0dMl?XcR4|F|(w!C#-DW245uDIT)v$tKEqFMB!%NuTGWN`C> zAs7H=W+KQi9~)>dXal>D7g=q$BuOv@p=Qa@o+@WX9_^}d`hEp!`Z_2{XL^y+jVyH* z19a0|!HiBN?+xfSYzM4KDiiJ*EkDy}{oY)Ex=0+m%BPrXkFJ}XIur}Wrj<&NRPpi2 zkult4;RTT%${!JptV|;D<;PLZm|8rI*KCq~&`~*+Ir-(KjMOb#QXwW^B8;s2#%I?F zA>Tm4m-oQa!0aH}>MLHXC!E!w2q#(XVZGbJga7mu+m$CNC=3je*pU3{n|`ZKkYfx` z*VD{wq@4P7^z&pTUE`#9xu^;C=UCf6j@Fus%C-wzTrlUvem#0T;GikQrrlkH9uPu-&$O{_b z?6}!wX1~K$`*cDwE|T4tP)B(yF?!In{pR?Z?${<(pQ`%G`DU>dOewLwJeX(Rso`9b6LU&rDzvDo}X4!h;WXmktn;6uRQyKN+ zVe%luz)~&$K>aeV(2KqD7E`M{1mNL8`h$nwV>9b$!QqX6iO&~&f(2_H0#WDq=op2b zaj@?~FYeOy&ceWH4E4fRdulIa);0-lJ9r`qN3o0n|7dGWl7318C_T+3e-E_X zrYV1plSbWSVBbhdiHk@(u~y&LfwsE=pVgAvbBk28i9-5;w(%h>d=f_#LsxD906+jq zL_t(P2r1tcJHaZWb%IL5gQM=sv)X_WUx+wN^}tiHiek^EEJr$4Eg;WZv^91tKZ?C6 zZ#pH)RF{g>5`PYce&5JMUCE;%0F*uicD1`Z`USnmHPQkK`|X5iCfI28Ph`?>!pTGX zM|byO>$OiDpu6K-U1$@&HU!gl#=g4H7DhC&gF5NY$Pb@vBOJmu_KRrz06Ve_>8vKc zn|`#t7=f4ZlL*jI2WbH&8@I+AY8EzSvF|hj)GI|YFo2;c9F$s=r<6G!-I)OJnd1Vv8R46v!cs|UL3QU=;F*aMs z0AOLJA-ORA+FOqe9JA#iI>fbPn~WrpFOaSBYE$akUX1~wz4DdB*o0|U<5X0$bcMGz zj;bhv6*LQv&~>K`#6Yu#9oLcxJv^+a%p4ns-aK%gZBLbQ`BnVbCoJ+E zeF)TS-;q9LV@1OK8q*j6AwEMzOfHFl8!~<`7Vr>If;M`L&O#phi(`gca2^AUi>BZ1 zdaKc2T^ySMQ`UyV2-B_~rJ>JvhT8ZQ-q=!ZwrU*mSZ7NYGT18u*-C` z#x{vpm-8^W@!g#Vi2Ei>j9e}Weuku~Yi_%axa)bJCh==B#~F*?lgKm)e~INtE|H{` zr^yNv$lBXvr!%N$ITY>2k{u-RjWpLie@PPeEQ#!Uyg>Ae&%Xel#HmSB!vjkx=bM(9 zq5T_P()q#7w_Bp*Cx80m!wKXtv2^FVdnM_`aQOq1&n`b;g8vQ=7M$Vy&RaJ~fHUhH zI`>&lMcGZbxBfSB7%Pul7o1tpCY8@I>)NG2&Ye3?GWcn;vro{MKV~-h?eD&8IT7t; zB1{{4ZSYupaL&49$`zJ;VFvj(x4)ZYxcZVj*8QiU{gi}!i9Bql-<8SbPE1NW``egN z4reKw;CEg1)&K{7Y)m|08*Oy)(s^*sw8r~={AB$_yUdK?+Dt}lbdp~i zxE{T;zAvz>jLYoUe@upp?@LloFS|Iq`wkBeT!-Fdkz-G@?}76iFv)$FXmP2VD=gK+ zOmY)A_)r_Dr``QcV%XXL!n=0vqbJUcCy|a0XHU%wRpq5Y<^`oL8{{$)&Xzy$5>pcY zoU>*bm+x7w$C>XhdO@f*bp7|sEHiVlhYBwbXEEY6X3R_DP3~Emjn0D{N7Xlhcf9s$ z&#SXpdsxoslmvWZC;RbEkeamdPro>{*zR)2jndi{OxsNidXF>XP~CtQ5poBB&;SP0 zp_emN@uK*Yg@P|Gxo_~58>H*NBW z{u+;R60#b^!${_o+@s|B`E(#;!71qHzF@V={`*Sc?J{`7*K0VIQTE zF9Gdg4^4c4VtK>3S7v21e)AFXX&e0lWmt{>(>vzCTfgWSL-hD`la4&opZe}EluSl( zVuvlYjZ5lCGhVb=*RLbA18pvnxwK5N0+W^`(w#sc=*!E*Xga^%iN$SO@lryOE%$%Q@r#ftKX z>u&VfTD|HUjtz;djFAC&{>N^jZW5BSHv4j1=i20<0vwMjqm%j3OZtu{ZRv>&$|ofa z!=^8^zsjh;abjOpcE_sl;ccvfnx9>)pi5-%=T}Emh>dKP>n{Kl%duIJsV;(!lw+Rf zCdoBu*2KWJ_x0LANfoORGKGMrG_dT!%{fci8N8NNg%VIZC;+@PR8GYxN#WopTgqLO z!!IN9HFY@6PQ$=&gRpH&mO&mSwnmF@JK=QBsnddvPoo1AEFjI)Ns+P^(iRnIs}^ku z_-_34f@p5-p-olGHdJ=->uxVv-I#8AoRHkc-V1*-=j5-z(sEy7(PNWoc5sz}au@7K z)!-;!s%@bLJfFNHyEU+KIMJt9?b9~$bIgG@@o0Ln2W-*?z8fo_oVpc3gCoyQ zt`$d$K;!|ved6un-5c}b)Ie9>$g{Vs&FMTeDE4$qZ3vAZH?)57;?KQ+3h1GGZ4$~f z)wOMwLO>m1l!dbf6Y0o!@tGL6xUgi?@#4pT+YDcQyr>UvX{ZM~c_be=ZrV^9$*mD= zrL9;3fwolG1_+mWeV#2vuy5)o@Fc&M43T#y2l+1i;&kINm`K~7Q%g>X#%COf18?!> zB~s|ry4u=)?34Ya#X}C`Yesa(0Uw?a>-5QEm(ao9tO7rzIZij_hDLUFM=K|@Il*(R zO%YDHrcPU#|n(dGb&OZ z(xASix^{8g^Q5WV+fIw0GaqQcP5!K#7*U_4TQr7qHkt<`GLV+?=mYlZ3coo-KSMz} z@)N?ZWMz!=%-lbb1Kx~pv~!e>TmRqzR6MpGv;yzNPej=zhlRN+qwFzM5>J^ZYu{jL z#w^M$*J8-}LN#HUUO_@@?Mg}(LB3md(sFD-Zh_lCc+p)v^Vkde&r6r7Kyn-Kut zP>!ei9_hqvQRMhF@=_grjcw!%2Tl-@u6^5Wlg$H*^bywUhXp12j0fUKlLko`7seiR zWiOo`pj+7bb2#$wZH!gckj9~VXsaN7RJ+M3d1Sii#xNmZ>P0?H=|ElAHeUM(E`KNt zzD9+<_0nc6s87*q<&0xMf;cxK3xDzPS6B9tu>nM0iwl`GM!JvTaGi4#7}8jh46h|q zb<%=odPEdhp|6vH*{*`t8JrsHBJ^>5GA_68X(1G}k6UqL zv;9EiVaRuk0Z<014Sw7OTRg{)60macx83Sn2?Q?JtHlXWZqvu4HdqVQmibqAH+4#y zu}`)Cot$dM2K>+(Xse=3W#E)1Q6W9AEO5yV z*BmA`ovG{PhDj)AExXi(>rb0tc1HR2%U3G9@8xHC zy!Y-+h9+i#v#g7@65k}LFAjA!x69d_VD`9)FL@fcCe~dG{pEA-AFwS;t~g^-S^cGe z@zC-vNOl^(I_vufX##VG1iMKLXZsl!#Q=Ua&Y$yO}RAz8OpJ`|*O!E@v_ps>O<9<|3F0$Y#noj)9()RA^K z#|v@eMb)%QN9vPI(yn`pCo~*h3|xnk8F8Qq;K)lo?Ix_|C6bDuWzh{FMx6kSCGVG_ zMLW-oz=t-z_w*lKK;>ynxcN|m6B-m?CO$`8AyH=^!VdT>U29RVi4pyreGquAW06S0 zQD5>w1G7fk?37!w3Ip)iXs!oCU8nJE9`q#OoFHVoepGG~!17Wz$9JzZn_lO=9Sv{9Fz3WY}lgy%!5pc+QJy^y_{s{m1@Y@btw8M zh%y5zv{a;}1bBcyZA?osKxI^^0Z=m-)9SSQ!pvYM9=X3Sk&0c7gJJ<0@&ULm%Tl5{ zd+8}>g(ESro`>YA{2n>}cwSJpG}2cZjeq4-1%PKm*W@;^wN3Z(>ZMNxI9YhQE;el=;Y9?I>XH1&}oVmzAqVCeC3F}dem9SdGqrAk(D=|S}=bd0k&+DhTE z`LdNuy_6`&HYTd`tquar^H=b|N_QSIAsM`4l_B)s;w^in0M~fQ#uV$r?DL|jf!h5jXeNl9LfK*rGvN=>fumIum`jcHSE2q>-lEFle(sle3rl9hIo{X#Z-wnIwLRRC>mN&81rM%o-6 zL4k)3#i^r=b+y^<7WO(6mU;%Ru@Ux_2XbhJk!k#s#K^fjn1iTBk(xK_Zr)}Cae^8$MH3Rg3QC3?RYfyS}VWglgbe`xVy zp)GOjUu__3o&=H)_G%0;?!eLz`c~gq-zFRfU05x-5H6Vk58j6x^*d|=nqzVG$ft+p zCw=3iGX1Py(x|6#ADlS+114F&8c)-4qT@Jj%Ak62mK{7HWgIU{q3^h|`rXk-@OS8S z>#gx#O#|D*Q5jlfQ#VO@lW*eL*;YeC0T+zB#w-1b4Dk0PcigPYPL5|ouX2`HbY!Bf zk$BD>;@D)3r|vnEg0Q zdSf9Bb;X4BZm;tX77(k|F&|13Ue$)tSA7IP&xzj5Oj+7WF@MIgTohyEM5b&`-R7TH z)m1<~A|TVAy=1j@lN=IGVpY^syyPG1GRUj8>=OFAO2}|fU@V!W@4(KiNty4BdgBS3 z=_^Xb-HgxnsU-vZ`j*;3TexKFXxncdw5fN=$bg_*oa;kulr6@F^?&Ksp=aC+Me#b1 zK?mH4Z3Uy%GIT~R>aqv)&|zc}ZuIM>3j0;B9&VOtpLHscd-2iCLgriJ4bY`!JUge0 zuMX~u>$R6QH02yy>)`8pfYm)v_}Sl#kh ze4saD40tf;y2QSd+|=RXo;P0;zJ73u>r{Z>Wshsi>Smc0fEOIXmVn$oZ zYtC;TS5vc;iYsBIMR6KDU{l|aEsw623s3E%pV&6q;Hn3e(*zB;SXKN@eSK3~7)#R8 z$2!3c;*LKP2T>mU4YNh1sngy<6Ls6R7)Ex_jU%1?RTcsd&GyR4)_9!I6qK`V!75)o z`jW_uzk>1U8gi?kZTM>$JNmyo-G;9Re1KgBWWVGa;Q}yr7AMLO@VxKleS`XFGwSET zJoPkZLlZ?NEP)AM+Q#YX_nouE*DmUyJ=q)yUy{^*e)-zrnTc`N%jR>EBeU^U;p_}Nd9>J$~xH0B3@)*0r`>~$9V4VH^> z-Rv8zt^Tim{!?Dk_>6??*~9OC`;Uj4H*c~82}`Yf_W9vwKmYmR%WrNU{z4Gmd;duf1l|Pq90^bzVB*IlvJVOFkZkqEohEJXqMwN@XHL7E zhw`)(1USyV_F;tjo3QX!qIf-5Tr#*Z?aQY0ttOV)9 z58ladzyJLg*zxh4>xeCV;7t1;;P(t$z5DhJ^mhrHC%gm`d+}mWXB2u3x>1Z+Lku{xa4ctp_jfqQeCe=|1$hedkW=(_i&h z64*-_?U(29kED(Jr|9&M*th?BUFMQN%664yik!Lr0{{2EFu3UB^Su1_#lz=^k1yRg zymgibEnfe&Oo+pnqh+)UpX3=I{$woYA0EO- z|JV$DbkU#ty0_#Wv|J9!?&S4nI`-Y z1&)?Xv>OX^UC5rl&Uxu3{^(~P)86_q+R}_SI2=1Z`EI=IAOJVmFS5Nj@UlIgGnN}g z67n3r^DeN6w&>bh`-|t8<28l%wc#OF{Q(+0cEu*sw*g8oX?o63V`t=-Sf}oU@tR8b zjk30{A7GQl5K7*LYH#5TGiCCgbNV6=!r-Ax*gy8#xlk(517BY@NQAxr98N5FAP)G;km{kHLvJ*mSdz<-@E{uU zH$Gtu3Et&Kk{tRFL(@7E1dyl>v+81$GAa^u?H*D?ohGziIwN3_V~}7xmFNWKUZ4#V z{S9x!2%Fx}ONq!Xo?|Ihg(P);L}%Y%Q!MJKe@d)pbU>R7Qi;>Wv{^eTJ;=S-@kIn*Cz0~ z2%Zw`p^FUN7}@5GQDMVpM|*y>*W$~zjEOGFseRHLWYM&nQJ}4=mmf5^2pCzwqb(fu zQ&W3fOV3b*w~~&G@N~1s0M=e_)4{Txcs7{G%MTnc8S@>%GzqmuhhcCM2NJW*Js;$8 zxU%GSw2^_WFxJV+z50!SP~cd3L=@KcI+2iAU;99e?oGXL5QSM4%lC>dH2D z6rMWcM6d=}Wtw_Botr~V_5wdMV0U#>kFlKxlHc$Ypt*JPSWT3#!5e^U9LsmXXFqhQ|3gRHB)_yh5HDvai-}N9Y^tJc+DW?@ zV^;1q{Zaf%&%}YUana3wUy$gLWG3-Ao9*%w)(>>GpXGSTcZPo9XQ>z47Mi#_Ch6zl zFP}J2BM`9C9~$ecif^++13~aPBCgfQ@+*Ts@-s))XyaVOzAZrzsEuziHSdP9!~`gLd>YZ55%hdf{y2;`ahJi*Hp(Kz`>MG_H+m8PEt!n z8;t>wdC^n8>0blU_>2zzwH4dowP}E;Tu9y zQSo-1tD3ZxG=8IdbXS{AxiFtA2QXCRB}wxUxA8%}c1880OSX}xx`a7=rQI(y63n`& z4h`jup7bdcvtA6-Y4wbg^#S3SFasThhV! zWRs^3;$U>uR>F9#z>)lO;F8Z{;{&_^*bN~2P-!@lp3z9qT%>N^#K|)}Il_y^u`RW` zQD1Cc3%2u{AgZxsjyd?-_i3TvdrJul!ia-DowK|9hZuFrkPfv2oBX!HNZE=D&K#ms z-T0+10o<$ri-u*QiF&Ado$a~L<7vot*>oZ1Z^s1BQ?io*@SRII^04zDvUx9H|9Qy^ zGJhaJeHTA`M>6u-^|!dZI&=8LAOCpx_22#7;nPol51sE1@7=tC&MsT>@bJyIcSw4F zefWSETmFwf{rK?VyVqDt`f)FlyhGB`0_(pKM+kl;R$ z9y5krf7>;?oxQCcOrpHOTklM&UxSVb;>#pcKmN(jnbpiqEj;d24fX{=Ub$=^wxUUM~qf$1L}YEUVIW45oG| z5oaNS%%`j15+_co@#UgE_;>k^r##s3WtYxMQ?BzYl_DKu=^8Hy{fFOv(v0Vy{N&@- z0{`->TdmzaGwxZIgjveY%zlP#u9NKl@JH_*KL7mdX3u|1a$ftm1c9@<&yuh=!Rrj_ z8`rPUhe+&VFXQ2~al`ffGhkh!#3g84e&r0arcd#4Tz&NH3ASb3aO5^&ZG!1BWs}~n z``t`L!ps zprkX&dtU}?l2F;SrE9WZB7tp^+@zH8VG_;R@2Ak&c+|7g#z6x+gCkuAIGk`02HqhtrH}-|wX$Gyrr>1}d{Yc#d9c^Kf>0@ce zK-mFC7I&|3)Z33b3Fvd(Qp9Srqotjp-?Ar<~O)Q8%u+!?sqQ3l#& z;*Rm#81PT!=)1FIi@q{}El)_&lnRn#QywLBGY8NL4gl5H<)0YxTeLYh`rvXNW>~v) zV2sk2ir)Aof5`$c@B$($xd?MK`VC(N~#(nB`= zJdoGeTWRNE*wD}(^3N8;p04&62FVC)qGCU-)GU7EQwNH}UcMqp-A9C#H4Ydi>$`q5 zmVkHX124xBh0?n|K$v{zjwV_Hr(Rh=@|Oj%g!c&s)9zK4O!I(j_!g$}@H&P4j(_RF z%zrC6NUJ%vN*>VedMO}b$FGiSSyOyJ{H3G1gDcN03tj!>0}UULWER>-K6#Gai~%2R ztJ_X~E7Lrv;Xq;CDFFGiK*ZgPgEI)A=n06t8h;Yalw+a3Cd%PUC$3#| zHj_uM+qO#Vlnrq-qY1PPX@<%IRT>Pf`boJJUq>Pb9#^?YG?RCZsE0T36M%3_2jCFI zFpCa#$5~p<@YVn`<2!q)GykofT$0>&$#QXJC%52MAgZ*$;<$Hn503^4;Vr4Bax*xY zfUI_6q)vz~yVH~~K*mJ3Gs(i!H2kcSRx&8Yk*5)|L*NNfuy8YS-hhII)pRJ+zVWlP#4^ zB-)Zsnr!aY_@oW8VG1rAd2D}(m9fE1*OmvoMJ1Zaoqq7mPs5MXC~!auT!5;ZH-!?l zo{zGwa`FnwN8Zg$6^shU+;o$-He=ivA%q?c9dml8LAp|1Qr2eqxYt_)znnQFcDrK& z$5JbCTE7>5HtPqQ7|5K{c9dUg3R|3r!*OwB{fZ~YE_ujvZyZZmca5DKlZa`by%{U= z64tLycG^GS#SS0qOFpr7y-sy!)9iiG+__9!Tg5)#Z2KSUH~?p$I95(+Ya48?deO75 z1|$Px&OU43pqzflw`oU1T(wj;K~+i}+^C1vEBgXqMgPbtNkQl8bNm`A8h8%1A10VI z{LDo|HdY=*bd#(*zuc0?$_21>wMiY0aMQncQ65?^n@~A75!Fyoo<-6jMbea|6@Emi zC^l_Ck(9TUvfJx*;j1hM)F0_Ar6yhnZ_dXa^YL7%Ug>B}g3_j~=Qym_*v417$6pBv zr#{%CC9I^!KfW8=;#2LP+``7Lk_B5P)?wT6)LIJII^%)w!r3^Rg9wdoR7tuiO~9*G zzLBf=e0zPsUz_q~D{8=#qFb%v@uz>ZOFO>8g+~6gUHBR|o<1@L9AEer)Y36-=30g- zY^R?%XpE=N1V?@JRfINSR=XTLzvvR%+7;WBBgoE1^iLmo$hQ~~1P;~-t9vSQ3`%x! zoa}hnIZ*jfnJnTjzSfGLPsEpM45Qz1en2nnC%zv@jXl@xu;(CYZTpB#9}RHvQbp0i zmj%KfUo_#%?b^|Q+6KgN9?)Huz?2KGWLZxjUy5&HNKYZz}CVI@DJW8>u>39 zVT~K&b6H}^19kLn?4U(nueL>BXFwxca9oIw85Eq40S=aYj&f2bT~UppnzI?eEz|LrXOtSN< zZ@wYHe3t|(%TKUO$E8b`TGrzsNqApQX*a$3_DzzX&N4oI_>MKWO^&f5((=LeGv&l%@UV4pd~jPCQiR35#(7rM=w=$@DU^}qZ%$@YsRD_H`BB=A4{!*5B3 zUOQZ4d5w$A_GpZ5PwZP|`#4)m%K);M$&V+Vp6cb-xuzb#4))a3|YJA|! zNuA|A$x~)TvqpR`QS}^4fO+NP40(NF_!$VNh^-6g^d|b7WEaNeTJAIR-z2z-*^Nym z+ZRnFJ6xLVb{%mG4z4jLIax#>OwkpH#? zMcH1@g9NT=vnd!Ok!*+8jGx2H#GzZfz@4#ApU!n-l0nO)lXB=6N5Jm=vG-u>x250K zMOis60Vba2p)NC}v+zi*&uKr_b70*u!7H8#DWU1tBLH@!%D7aw&O6ef&hxO)BmLCb zFNP0${JN8A>Z-4TR|trDSKN6*7syXq&~C=Du?sEX$83z+*;G2f0v9#q@S{LSdM0Tt zvq{@_4m&NFOM;+$hT~jW+NfQO%h6$EQEPe4Z0W(3j~}X&Ws=oM zb5Pn5Yv_0So&CrVbEzWxqD#n@c3YL){DA^PguwTkjERyxM|a4sKe z8+A`M>n5VT8dP4FFHszMc+kvf4+*{j#pl%T{voX$Z=yiC7-iW=1hRhKIs)?Di-{ z<)MsJWHFgv?nbv9z5*; z4;$S?@>qP;6;>G_=rzM!7c1LQD#OLIai9L+@C2fD(tGc5|>%repG1`V90LCmYHe z*aBb-R#J}Y@h>un!&9;`s4z4xIVVvLjMe-!Qs$FTf=8Jy<>e6FyMR|jf*bAq#m7j+qd!UN; z%V)Hst9%ny834B5iYRN}h^^m=3+yK4p57aC0KDp_8reQ89cAJd8~%EtO&`ghHsaQY zShmkeO1e5ro6T`a6hu(oPEye9=T^#7OdwCPQz6YZ^eGnfCw{dZ`R2*`(5i1|2edir zrazR)3g|jMs)D%INyfgWJk~`US=(q2mtFb=3=X;)zY|Cxjy;RT^pXKEjt9~S*P|`F zeBr_PRICY|>pF8;O^7x1lS z>8?HXiM7ho5wQGErFa~+Q#NEI?z5GIgIm0^0Xm`L0C1o$l|?+Nryr@i@*A_SZ>ywD z*_3NvJPEjyN<)Zt?Lr~8!h4LJrve0pRBraeTcCkCCh?Sy_L0wTx2#_upbLFPVj*MaTl!tWJ2Hd_-a{P>Hbd+5f0Tg`cW|3O;o$*}|Dp5Rezm=!L z)M!GFJoiS6g1yt2I6<8+(p55fjK2W3%zxEEL$kWE2f)Si03ngKW|#cQ0)3sKY{pc5 zp5t^|_0p4U8}!g=Kaegqva56lKHEtTIioE%%Wm#EMj!hk@*K}cz)9k0fBO;OT1*?3 z@QwsP)D?;LQTzIeD;fqrLh{9y=rT7s zw}pK4y9os8&iPz0{W3o499DcxdHM!0%AtJF%iqf1c!n-}(^rb5&tc)ww$sQ$$vYB_ zTeI!dVGs8@w229hLlElR29KHLHL#-fFxtK(6XV2x;iDmKY0f_W)>{TAUGoh(Tb+qG z0Nc;lpRzX4hS;j{&-Qc{mMvs;pK`X)UlLq#+4WogID&PcoXRykWi{GIYLS@p-B3?2 z`H_)HUB}{7r$zYqjs95)(E5XYK=Fzr0E?Bei}f~jjgZWhMkIN;N!u^`j7o8M+_0zT z3#*D<_M_EnK(R_tV8;_Fpl!wEf@E=$NTySluJ@BQ>Y?~$-%|eo+w)a_wvO2G941Qg zbAbMhb=e14eK`hH*s)6{g040YuW^H8ZEZaIvP@=3KRR>%@a4TnhhMRb$}j)#Umw0^ zDU|D1u9W0^Ow#WH$-&DP&oEQ`ItlYHxNUmQi!|@F{Kn7zhaVk&|A#LQfBb^v^xZp$ z_ujg8_*ehpXW7f8F)qNvmuq^Sa@O(%5~zRt;>*J)pM8#yFAx9XqxYKu{2SIZcS(*9 zKm1XN(Qlbe>l)G5uU>8`7I}Vq`wln#V{?7$j9Xs-`VC2am*8;bw2AZo>CgUTmfzr9 z?ZbT6i!}lI;Nks}#?FYoe&fdBr+@O%;gZQ`mp3Vq>5DN>moz_j<|IqRuogROPP>GN zGlE@b{1?CY1$sU>eDL8%EUUt7VwQ60{6v>^ZE)C^Tz2ho6Wh+r{_M+JhyVJwzdHQo zpa0q6FaPCFNVYPonxwvKYuo?-;nU9#w@H$pB-x6ynKjM}M=xFO<(~4q$4gIJx`P?g zk68xA#I=5L`Hzbvu$`fQiP`Wasj-jASs!pbWm%Nlx9^sOR!3)p`{GRD9zg2~42PdI5IQ|?d)^;`SQ|cU8BQECN;Rsi`O~I zZXC7lI%9=)C;^XueAuGiE(`MvJZE~k9{TE?C6-J8`LM)3;R`!WH2Y#u<#CxEXQ)3Q z+5H|%>pbQqp5NWQUoza6Y+hp~`R(s+mvleHwTmwwb;+Nb#Qb?=S9h0vQ66WX(+L}^ zj8(@3ih_~O>}vF@P(>pPS33S2Rz`>2A_U;o7wR!i^IbwU--es{8*8u=gT?Omdkkwtcj#!m0tw76g(;VyW1Mkpb~fn= z1}eLnwEgmQvCkW7vmYE9ua0ysH!?f{gN6w}ShWc+jrP_KHp)n;*)G4?cA(|)_tyJ zDbKwgFyeIM=7R>4>-u}{1GZejYXr9B!l%ksjik}pUwyRxJ1z3p#+`{4p7!t|&#eD$ zi#n=3l#C%|<>POa4zw`#hv9fD4cxtV#jWvnX zu!TBeH1L1}zkBRB3BiU;-7aFAYJdR)*(8G0%B*hKV#`0*BI+W6!D|8?V~15uG{wd# zLfQ8%^zc}FN=7uk`&EaXiS2uh>WLo97|$kzwavuk*mwu1CF#^(*qg_gYxqvn7Qh?d zDYv(ud*-_j0c}x7C)4^6ZohLunS`^S?uSvvPYG-STKlCmHZ9K8Q5?SQ#~ju?v2T3o zUwFj~IOzH?(rB1C05M!_$=(1K8uwSm;x>&M6dSj>PizL5g*(w@DfU7OHuQl6bu zr^KG-mi1JJCHH_y*W3NUS2xvGp6tr;{30hW6k}A3u5?&y8=vs256|K#0r)5{LayE* z)Bm5UKWnz;xY9ho0}wOtVg^WpV=}XHsH!ZL$#kL5qEDfF-RR@A=~6OllgYYOYFX)~ zFIuJUu4E-MNhW8^d@;{}{=dg#zhuQZdxwYnn%8oVh!ws){AVXJh;z8%J@olo|3m)M z1y1D@MWo~NAK;E1hK%rFgRlN1E)G1uoOU1%Y;#$daUjQVj#Dp+;dFe+hMqABoMBH1 zBsr$~a1T|43|Z%falta?3>n0DgY>x(?#kyH=FngDgVsZJm3WG(-6&ONqz(n)p-YoP zx!vSGHy|h2*TeCbTpl1=@m3aAH{B*@IF4tN%L9VnD6!-;B+l(=GapU=Y(58Kt{MFU zH=E$y@!7?1J(3TCVYA^VS7=~6iIEM#>Fq+N8wjf}IRql#+R)EZ;;S)>{&!@(JHV=fu{e3_f1jTix8s9t6i%JgrBmiJ1Q>yRI<2 zlZ+U*j>8LP8|D$D$*>o6@X)UfXgtREg4u|oo;+v`8y)cB&GzXgKD7a<>A%{tX=JH% zeStrKL*Ex%bzu6e?!keI^e>J|HaFCr@MITo?dLBfYCFF^poM)*6?_p0%gjgNs=_eaM@>OGqDXDvykdL2&6@nHERC zTYJ1$=Vq%hW5tuKd?`2BiSBUUtR>rc1tj=@9MYUi?786 zec6Jg4zGv0u(;5Voo7q&het4CadB}wMk5~B_O)2NVr{>q;s3=3S>n@83Hn=FL_cT0 z3*H_ZaXE<^)4)CeZa|U0wAtkG24|eEpRJFufsZZyj8zi^7MZ|ptKLIF_{j8y&khvm z<&(XT!*j<2rtfup;XmYOcyzMSuXyQ_Xy&IseRm;0LSU`};&iqT;~PJ1Bh7jq4Oaarvr)%rR^j38^fvzAwv`X9_4P4~7G7xiu5R?&tI@(f8*{+Z)iyP6nCex) zfHN%~!0rKV7tcFSt`*d=$CO@>94sw6oF6)v@6bFx(0<|qARmP{TUWg42SPM86xadB~E-?(#W2YK;!u9B+_FImwWVS7jXJ4V`+ z*Ko-|GIck+9TI4~Cl{KLNwiB&sH1ee7VgHE+IItS1I9?)x1+~m>=-%Ql}EQVgcSVM z!)HBk=pTX;0>old*d*S~PAHIZe2C!)6D5^xzKtE)mRJ_cN08HYgYW2e9W**2PKFWO z!O`)O^_HX_Eiold1=S&b+D{CzL;vE5 zEzqC;E3AslnvPF)olt{u{xk$6p!;_`*@jRR$5Z8^JAuHW1tqyQMozwP$*{gnzW(wL z5^koyE?M{Wz31*;FOm4a{_+>ic>d+xFaG6scmMETet!3A$+#C<2E|O^XPZ_0!V53# z#{0cq$oWpm=ts%b`SPU~EInaXY%iv4X_aT1nfvk!_wN4c$3JN1cQ@?W=M&3oB*Xod zPx-|!e_68hv4)thN-mxk7yjMf{N(OO-))9=f>~nZQ3=Enrtg37e!3)Qy10oov-tPT z;Fi2xa<(!P3C-`d z#ES&3HO@b5Ihau6`&r4?M_OO}r6t70>(^hjL`+FYGaboCHVK{I{Q6(+-fG#BU;OG1 zCBmP+`))JiEr((zv!!L;dgtA{zxd(z=Ei5Mf35Yy|L}*m?w))0sU<@#8S`?pm!Ewm zKQwds-Ih!F*)M)wGQAnv&7_y$-K^+zd!)~|TK41NlCe*fgnzaKse2T1{qXy*ElGR7 zS6n9%cy+O^}0m7+3S*IWFVJ%c#`4xY&nvAZ~gKmTZ-Q;eNeogmuXs$Jo{Wp;$6RF z@sFC7?z}H~FWDS}9!7W>>{+HktUj4dpD0e?(9bMxgMqR8nP$aHoPQRbS6_K~iTeBR z-`|4>OC^xkuNIyv!7j!>Zf$nUuzaVF@<%1V{R&_+;L~Hvy~Ib~^MW~j#%%Nt^Rp#! z#Oh}2_weY+;?D!A4?g~|zVqc??&;p=eh*1LDe3;jmnFrYd}c49efeH}G@E_)(Pwwh zB+GL>Ps(mCw zhq^I~PSFh4w(~_D37&q(i>?lryv@(*GsAzioj&e9pty>Tcw?tCf~!uBjgPdhLl(%9b+@$D+TnZ4@fU{WZ0FI+)Uqu*}1=C?STz{4Ez1jOwJ>Zv58jdh=tc|5HJ4PL+B$e z!3GWIBu0-WY^5~W2XWaIIDMkSPi8;wJcAxPk;;EO&wY=iy7SdqCUdCgID_n3hAuZ- zYdHX;^L4h|y9&^cuGrdL{ouo^zT$A;%_|HVooNtlcuRCw?L8k`tGjB;pco(6V7e#g z%(3%`_g);G&o4fc>z12yuZV7vEIEvRM`WYV*WK6FGP-Q&{Pl2Sjs*33^W=dK%flFW zLFRMqGA_56;X`)z{sF&~yT!rsMxh&{=#P?uYd5;mWAV_JUp(*tn*4e@M`pAWYq-Y4 z7$XmI`tP9y$Ab`ZZ~;c>I&fVv$-_}PF;qDiwkOl@;0ELF)Uqb(z>ZJ*J%Eee#@if-Dbv%ZLw*I`lQm~t_5WI(!HJm0-iDi z4dA#D4>TM4(LTh(34V%$b6PmZ!&@6rL@wuC^bZ_Ea4Dx&Q)hIH-nrOqkUuqPSiK-5 zaG&FxV;NOPUU~+JZ#>C1cnAgdJcbUM65Qm4Jo}{+__bHXW<0Eh-QYt_)^B=pPx2!- zm}CD#j7Opy?y+n8CDjgJiayjVx}qd@aQoDO@n(a=FFQz-lMNmEU_lP{SFCC?@6iZw zwD^mhHEw3xXt*HO?atr9ksJ*VPsBC@UE#8KeAscarORe)4VHB-TTZrwn2?8#Z~og! zoi5}^aI}E0ieJEVeA}o8Rzh^cLQ9HFXYz=1_2a__;Vbunhwfra3@K}x?eV!1rr-hB zF;CY&5}f2C`}H5)@fe?Ak*d50L4qj!qgPs{&yh2rfgYJ=Tlju;kkD=}j*#&C049H? zWqwlNEd;c!|L~QIhiYyQYKFId2c0c1B)(o5I)b&dLy7GCv4<h_OD!=Z#I?=&VkH*IEUR(4YNw)N!)a*I+)d zKjYkBN1;PJ^#Ab1!+B@~Dw?szcNYaUvGIOIkh`AX!-Jf+ev%}C9^v3*)A6jV-WS>HlP6Igt_N>Jsuj$5!BeKEzTQDL z-b0X3j$c*dmqcj7wP*XjXYa|?p*}Qwus=Hl9KyzJz8f<;cz6cG2K!VtX@|k9qR)d1 zy$f%4Lr>o#C-QV{_Ri#T!DKAewub-_>Y>sS*Tt{?7?)j$4WZ`oV+(Zb*uY00AhYtKg0@jWfnD$9jJzt%+19O7~L^pQ1;Oq{U_G(=W>pS{Bnw9md4TPvCyz*f* zU1vYcCkTS1jz_rDG1whn_C7kr6-uOc?b8L6V~}bb>;gYJbnD7KIS|r*Y)6FI8WAxy zT5e2_%|@L6*mF8xw1J0vm>;b-exV($_2=o_F=@QS{!4?e~L_|i)nhzo*Y z=KCXS99M-F`Wj)-r#%9zJ;w;H4Or!!4_2ol{Smv7y!!s&(a9EcxELZZS-F3_D(r@L zLLdYFI!D5rpQD3?c)%wcPhVfr7n?w*A9&@OhIFDd^b=2OSX<4(*~qtRE;F;t<}qfG zlk9k*2`+wz7mR6L?d62gyfN?ORQD`Lg?sZ<;9cNeSRDn)Urj$;huwibx*o zYQIoY_$NR9Vff#=d;k9XOKzKm`+0occ>CQFjF!4M%TU}ef$N&#t7hbxP5e@K&XO|sni*~FYDrB? zWUwnUJ^Spn26r#cbgq{?eY`~R>%aNc-S2+)ySryf?!MTIJpa&4a0q6NKM`$fmVcg( z-i7nqKfJNE#-C^wvd_Je;xF}(;qx!jC4QEmd8S#`5~OA#f11sH`m=v&_V6=z|Mp-1 zq=aY7n>e2b`&Eh1&r5uM^nr%})_ec(?wu0umWKJTS<5?*#?^Y|Y;rI8Ui9E-7tRxUrAVc7D$$Z@lqVeI-9M6a0g8m2~#-fW;oqKIHnB-~KK?N%TJ!uadMS zxjo>p^vqKwqa}{%V|f!k;m6-c_t$@TGh2Lp_j0qL&G^15JHEH&d-1L*UqSOl*pTxa|68e^-R z4H|FtNjluRo7wFW%hnk;({ZrvJy;MMW`avR2`LvbX1|Ly%g20L(%gdqGc`}r{>0F` zJuG=5Isf)Aeq0>j_05(ElGv8Ow*-!dBE|s;nkReVsPn*w&9-NgFY={X=Vr(9tJw6g zLX!WfV)e)0dvU*X_@urdaSnF2>4Ad@O+C-ZC!gNE-Ge9BuP?V$%u_vpcsjl>_e+7N zlHth`=TCNRXy))nhMPh3nB)>Mh8ybkf&2-5O?41cu;{{XIhA03=!Og}@USP&Q zOR-2wqIbqeegbRkcKvcj=CdZcu{Xab8~dEZTQr<6^TWY|dE_xJ*zi@iv(f6R-!&W^ zgUyc{Zau8^7tG{@o6W>N+DlBP(8f(MQ{_qQN3QYZT%%Q45RNl8=%w85ohJ8@2)_XZ zGgt zrP|V!?g6eZpIg8HW_LSwIpZ?1&PJBFB^OH-q0yBmB6D?={gWouk>lJ4=}S}*HCf@% zS}o%LIq`|^mRUjTh`)Xn%ACP2 z#uqsNwyhXk!1-}wdqnAbTLNYDLj{*`lhq9M^_41BiwW09i**(8-Th6pj&Gm_MW7nj ze}Z2d))4yN@^;3rWUP6cdrS`;;7nh&Yrj3H+2e!x7Y@Fr_n<8v<`etLG&XtB{%`>Z<&hd@!?8?X08_TTGu6!FWcAd3&K>dmg{_D^2gseom z$WIOh36v~gRwh39L0kZro~J%3Ci~%aP=n!(B4o6qp)j3oFt{uvWgDz%o`V$K8zDIjd4nm2~ zNf?EB={&y<=E#106mS5SR(W+%9Go_$GlPJBz42;j)#J1`du(^ut! zl31r~#D%Vx&q>j&ykSGz=?nv3xIwErg5y6sEkRx-5_V>L>qpu2n`TOj6eqo%R>534 zO(s0s&Iv#x`SJzcY^8ZN6MJS|=MVico*Ew{rZyIcaq%9V;@)O3#2?+27FQj20yroC z;KYm8s+T1ECSDy+nZ-sjb?Eq;ZU7&y&f#K%pB#{J_Kr>`(KGHLNZtWV26_&G-eNeW zTU)mJy`Z$#nFP5&W)C!Oo4UnnJ2t{Rdrbb0b`g=z1DlY2rSl$MtTtFag*QLwcm6JZ zbg)&yjyvt?V$9S|9l37s2NOa-x1VIv(F2B!o8ioVLApR)a)S+wZFnM%XGeGsR+aKq z&o;pvI#T{9`jDCAVnXF!++*LLpVFxt zo9uSZNgXBUL)~~xUT)Zx30cscv`2onme34yzMSo&gEy*h9XM{!H@AzS$~;IhmRB>|;fwZ-KREd8fCgFG=`-ke1r&IEhEq-O7w6Hr!7aMXyaz|N8xEeQ zYI;oH(aN@(jFsXC9LnwYbJ__3amIL{+Jk-*QS$C$PZ`|$@u6`L&}>t0yLe3HNMn%z zqoeuovJd{&o)2#q;ak3H;;_T46)-?Hyv7o^;#eQPb&&jj;y$?RMqqYU)-NVII^n3B zoE`IpfW<30nQCp~G@NP2mP%mq=}C@51>=yl^k zuDCqd$QQi$Hlr8HZVqlhWFis-az*FCIZ0e;n@`A7JDuq|ebvE(Itq(H#hcA(jPAuC zxye_pKHLT_V(9{=4_>ol)nMWSVt5rC`GaGJ{;9i4>u(qQXikEQKUmN+NF*Y#c#g<~ z&R2?>!yTVMF7-POck*6(lkIk#KhN?k7yZ$_#eDSe1ZxbKuPaZ1qpt|T{m6IY9n4|| zKIqea!!v&FW^6W9zTl@sbkX0Y{u~+qcK#qvzT7ODjW56t{0WJCA{P3aj^QhhP_|Lt zjmE`uW7~}%Sb;W@7tOV?&5>o-tNHoF8rEpG(Pb#w;&Sy-W*flT`dWQ;vDpKo`63|B z8~;ORha((pL>Bwm|7NR^yTvpXNVng&f@61SD_la@HLUM^qe;9_cRF8wfTzmGxoPph zo^Gj+ucs^C+Q9|0el^5&hogQrN%r>E8~?K9JHC1GrMq7>WBLE~-~WfZfBxm~n%VsN z)>F1FwCkE@Tl4sZW|3R&*=*kDB-?6}4E?;Bzt(8}xMeWd_@idbDt}c{?~}^+TW00? zd#zVqGFY1K{K}{KJQtNJ6e9mOrL{vY2<{+1mnvH3)^qRDGk{41}%vIhYY)ZcXP@llD?U;etpaS8Spn@w-_ z@SAVGbN9X1UMrd25-7p_?3cgZ3nQNm=D8*9P4;Cb(C56>_ z$lyhwu9JWIPd{%x>yPjL{_p?x?v+t9HPa8hO?vsUzzRwTnT)( zf0~anq`O72AW8md$$N>7FOtU$^AGdKFMnr7eLgKI`_@}6)sx?!FS-0!*B6q+eo??7 z9#lM8V%V}HlFfelzZa?&TM~N``?rTM#u5)&*3ZM8q?tsrO)}doa#x0bn*FN(wmgXk zH|>oRW_DZ8{pDU1YWC({99pv2>~72J=ssRDDh~L_dhKH7(I!u5vn)!)7LzaLUo^#D zRYG;~`$@@eOAo!;FA(S~$*cc8f=~9ezPzPw@HEr)iDGACQ(s1~PcOeb-Xt&&rhGh* zFf(7DdFfuV6*s@@<+#E@Vuq})jU@af(?2Uy{@If4jThOJZJzCchzDDr_6q|qK{WH7 zEgtLM=jrr)BELMp<8R%0jp>rg#_yE-s`DJ#&N#Bt{Aa8b)ne1E+s)ifjv!}qvASe? zwsDOFUwkc=`7r#APz;#&9`63iNakF=C0;5UUjiHdWodxc?uyzr+ZpE~JqxxUxvR}I zq%+IcH3CA7FWsBCuergIYI07dNrYINFAS@j{Age8SDP_!%cKAg?iz_w%MQE}G=Q`qioC@PupU@AsCEJnrXfj)n%K3*dZN}Vb zdW?~L63!RbgqVlu7+rQi+pkC>T+Cm(X}jbgBKfwSeUyxAdnnMJ>=L>sTf-Kr4iC4V z67h7|t*ACWh_^by+n!YvQD|P~$-LeDIt!Hu~Gmc2$HTJ*Y(RyQdF z;yZnn>%Vku4iTN8Y{nHj?SUT~Td(tcDYF_rokD4AeFbO=^6 zt!9jjA1Z$8`u! zsmgXacNKz#80aR2m$qGdFF!K*!G<9f)EZSCRDLu9WiS(XS2 zo(+LKSl@Bw63I6k<&Z<7Q7u``=&09BpCyXG-L~PJ3&!KCgXx4!XMP|yo1PYk$&17) z6t05-UA6!=M>w5DOM+e1E{+Wm!4<5tHNV~LI~w_EH(rOf2LR`S-*MXmb_rmxa?0di zGLL6fd^7=r*({H|Q1Nt9U|mvmk$$!s=FbS@OF110Exay%500xpCwy%;9F&e8wJx64 z3*AURFjn+xb1|Tf&o_g%hXjjv0;UgJJk*7~q}AEx0hk>|Z#o{K)*%-|*81+og50}= zxNxX2{#93AEXHSeK(^fqaQm=@q7jr#v7IT@?QyX|v&u`ao%FK*d}uJ>3vJ(|OBdW; zKIu^Gu|JZ-j|q7K?2m5vYUI;vEAF>GKYQ>m_->?*-D;Cu=>glL|;@9D!c6ZYVkmmaaH3PDlFvkG#p&y<2 z$*&>KRI&Eeg*O`xrgAkwC5CST>}ciLGdTT8{l*V?ju_*aP#R&8qg8!A~5t* z=9go~SVn^;$3~YNGeWi3iC}!3&GFk80!NmS%zhh_hDS_oV&Jm#^l@B$s5=RO-9dg_ z9M+Kji~ZWzXSx~>`DQ-CJs7APC&TH6VdEv+%_P}Z+!z}PH=B}MIXU$;wsW!8|NIl2 zpY2U#$(t00cqrfp-+=#sq( zrY&Tr0C3f(Y58 zcl>c?1+VI&86?!>vh?a8l;K0Y`F!*a%8B_ykAKMJDrygD;$u6l2y{Hz=p`~if~jo| zz52pbitFmGcGwkFnax5@%OR{anlnj97Z*Um9^V~A_6(Zf-gsRxSmmx!$rbL2*OBZCgKBDPc&+rn`IjB=KKye` zWx6`{-F~t4z`C>3fj=@#)WNac!8`3o3Q&Kd6S>KkOxL-5wmqbRoG}7Q#+yj$rhinz z6$7V<$EQlDeRlWX{5SuXyZ`xT z|Jc=*WbG$+FP0R4>WP-ZD5))}`eDiA&#c?sawcYZv(CpQ0q^%>$j3TYJyMcYg4N7e ziBd0L+|S&<`t0uS|MEw7-+Q&?DHIAby%3Z?KW|BuPg;&-%UFC|!ZM>gUjq8IUZ!aV?Q7ru_GWr- z$(7DK@0Wm=xMk~4N^nYmK3-Dtz88X)sC~1SdVbvuYa8D?Lbo%~u$!nkP`DUWmw_fB-oSa)9jUz(Bs^JZbULfN+Go^AP+S6-TZ zKax-m=Cky8t_1%}&2IN%%;!r&OM>nfwbAv$&reETd+Fz!Eqn5>|J6@gw&urN8h)b@ zH#_%MwoD1%xEYsx`Q($5;H{ngX|{Xk-4E|R_@tz0@IqVw@xi0X{8@5%*`!4DrzP3h z$Si#8gIgba%bElu!T!DPwjOtQmTd7bL*iI$NDx~8_=zp+;aWl;czL(D}D`VFEM=gJ&Ur81}+dBBqwfv0-BVTr1@aQ9twRevGG|x zw;YO>db+-o0Ghzzv&UygITsHIjsr(~(a+c}I{?R$h#C+MyT$`EQ!-988ic9Rxe%aHkaqHom zk^bZpO{%$Uy9WXB@j^~^b=CKEfxD#;idQd&eX9o<681YM4)2jI!{WNtLyPb9qFlf9 z@S@m{lGRc(){>_q-%6@mGy2nfLsqlU{R-fjmI1n#jo)qgAAZwcC8F6j2xbjjD(w^@4(JQW;BhJHd26mZz#_SD2>2a37biNax#?o^hi_=L8)<4yYU4d+z zJw7~fY^G<}8@|A8tQF^rstcpB)7{RwYRy;C+=P~R8WlrpyOq@d=sRCtM>Ss*)rY6g zh+!PypW=xtB2M1XKskCUeFy6vaxY))SlmZq^+rnZ02GEHS+g;W4fRJM>|A-yHxN;; z@)+n`tFd{1`U%?T$ob%XEJRlxIFeOe+>`ytwpu1QN<`_{VN1a!(1UyN=$`xU2(*23 z-P7Hu;>#|M#cjp$5I16*4D>YCQ}pt{>|XU~yY>WpONLIt>|xt=a>d=xxmPl_Qf4;f z7dqowRff~Qjd$mst-4_Ke-O8s&d?7AEF_aEIyg!wd%!iF@MR-)?x)%|^PE;-ru?c0 zU-f~nO1{8DIWdGj-hQD~RrIryLL2$c#x`gCp5MdUcz($u8uS4S@Q%u2lo>|5g1!;J z+QhBf)pKojVrwju`8)&)F5knL2;q|mz1gBlHyzJaG|HR|qBxSU4<5PPZpVNsb zn(3#>Fq+ z{{LieLJPov3Zir4$ixTmBqt9+!EnO%Bnfnk8LL_b-%M~ft1-XA=jgD`g*5}*LhIa^ zjzVmDBa5}lc zNC1Oh-6;kF1|0-yBb!4JreMGshgZ8~qe;~@pdqq==jAE|4GrFsJZTsrOamJ9~d z5eatXn#Yf=(E|&IOh-C$1{~iwYnqK~?+N|U@$is5@^YRkH`t+c5+3Nr`${YwdRqHP zmUnS>22$|R>H^6iI5@NN<7b0G+wp|D5}%0EgSxl{IGc$Nvc=zrez2;tZI(RR@YN;X zW(l&#HBf?^9qq4p-(WeLj#}o;76{B=$}T6`s>kcZRD36M^|P@(|F)m3?0ZgvqcffK zuaNwg^tPS87*Z#u4bVG51%)?Wdnhqwf=-sv4|evyY#HoK6vRmgWa|kR-fVR}RO!U- zmj+8*hqD-u$z&q);_J)|po;2ogiw?YYVZuhyBwDuE#en^TL0!YK z8~x9CaN@?zx4LTQ+7n-3(I;mcy;cko-Kk2DD#e3}ZcxU99CYtw|Ddn%FB-1G7R!ht z65o#LxplnZ_>jb$IoEHjP@!)oWiasDx+nFKc71*&(6_OAH`jOh)>kTVfbI` zr!Jzpdfvm5U5Wk~4xY(&BswxF-$W40yP)jn(_J+j8)ch^J)ogPI&R68==>>Glac)v z6kw9UZ24RNBD>hy);?SL;KSVO_u4!}?8bcU;<|CvWc-aQ>`lt`qxS0`N3X^0=p++c zi7_lDaA*_d7Q6UFgx!!4164T~ogFpqb>*dv)V1_|;`LOJ<A7;DkSu;HjFQm}E;RYYTgL7ZDVV5JvuxeMA>>HopBAD!a z*xs${l5T9nXSRuP!JeAFkAl;?Aahb^?3R zCed|%xW#*zN9=s%`T_PCmz0uWPG$RafNQIc4Gys5tGu=j;i$_k8v^Kp!uo?aDPHD3 zx)2U{@Uz2Zw+P@9-65Q-Ef*0NKtqhPY1xowLKs2xqwu{k&;}P1C_}+trj%Jga4=>?vW?Bpc zGoN&xJhr`LA;azZjNZ{$zri6vrYqdoj`J;M98K)BT=B&1;3l$ub|DS3W0wS1>>XWM zC?fc(=zsR8;CTK)Mg880#$T0<2Lh)5U0)nxEIGW@4KPI41>iAZL0g(Ai zMdvP)Exv60efNux@819HtGjzY{HwdipMRwx`JB%`y#LPKfB5hJySxA8pMOD&C2l1l z-6+5Q=9`;cZ7GMxO29r_0@xbcOvhwqs!A|Ql$#;uXYTGbyY}Ul^RUjjB-InWa8Qz#^z^q}i8ada-~VIlK%ZYBeps^ld*Au? zlA#}#H2=J1FCJ@|lNX-9clV~xx1O*kCznwyoBz1Jrs~Qe&?OH?tb&z-FT7hTsr)hz1Ay4U#5D=#F^y*)7aDm$9_D}i8^zU1^%Pd&4T6;G7(l-#`E5;fv{ zvxtlTcau?qtg5>|_5#z#Tk1u!S90GhdC5+Gc&0?DxcR8%Y~BuzEiHW`UM+`WIT*hj zc)xYy&G446eW7J&Bt;)77OW*Mfo_d)2_7@}-@X6A)^`8#_rH_C~?3I31 z@@z@=C%ZRzw)!WEx83(ebL(asn;JOvd%`<^?In=ejxM*PngkpW{xG1ix8cf|>-@L# z!Dy*pOE8T&@j2IKtx4Z~mRL3zn^}6s%o`EsbiRtZf$E|HS~Z@}KkhxjyCF2fLVit|r&IOYZ4R&C|+QU`aajcx{cP%(#2j4Xs-+NG^zi!5M z?c(H25=#Hd{){1PVRC_W-RHv?iF;Qh`sci0Pw#QQWC33=Xy*$X^*vk~UI$PfTsM=O zT(<*Zh@!pPCiw{;I@@r>o9+Eoh}MypM)=Z!{+U&A zbixX+^TV(($32YUqkN->7W0mxiHoVl4VcXy;?cO{1oGAv)>_$TziR7E@ zV50#Y>b$^?)#1yh67thzyedA}*se;prZ)m?8E?4!y9Y%bc~=7e?K(ENBR8sN6OXts z%Iy~(l2HU)o)~pe`5~YM&rmi33tqv;?>`5nXG#V zfEFRDZB6q3h#cyD+^jPdv$bda9e3CU#Wn%8sYv@{77m4r*;KBSU>tTh)PzTH3qF+> zuX!cBlsOm6!7+yR>U5N|;e7FZAX{_njuHU?xAxPs(J$)S^jECkz!T$k?T z6G|L0-hsoX%Lc_zY(r0$6|UXs1(RWX@S35IQ&Bk=*uhu7qbnLWJ;RF@*sr_z zx()hHGVC|!UAA0#HWdVQob;g8`D)(@cSBtAo`)S8+sIE|fXWYBG6W16461(GavfL4 zwr2X0d#tBdpgTZC{Ry^X701=ij~KKmFIXkBI%Z>aw(9rcS2AlS-vFaU$PQ15+~CZn zsK%#1KYQ6rJBx*job7sE12>P>EynKY#+^e)ce8J$MVf!;)KQQ;I$tafEjW$`6eSX$N7JNFPHpc86kr;cr4F=u= zg2@u^Tg;y%SQ26}yPdYRceT$fE(DfTm<9oMGfR#Kz7hlc75*-O7pvJ?A0IC?j_tq@ zXfhrLN2@mS7Xy0;gg*PQF@0|_#wYwS{vWo_(@MjMlHJo_)m^89-ig zor6o>*?`RGtmP_`r>g0vbmCb20bJV^*Tbvg#QR!~49MwAgWf@G;?;uD?1~Ev-q*$u#pPr|`;yy5`GJaCJ|8~2cBp?%2tJsa z(N6xI4`$e6XylKtZrskr9U6#KtASqISH9%kb;Fzx9Jv9Eaiyz%Vya(UB0+{r&chsP z%?_@L$#lW;=lC8T?6e!GkRGlNpE37B&CCNv>(6jT?g(=1oIQ3J{LY(X>hH`{3YQp% z-Jwqg=ZncWY7a!!&u!Qa56yqPZ zG|BIq@%@`-A>VuD)w>`4^}o5h_x#Iu@BQ(Qy^!-)cR%~tf4O_(55I2>={J^OT&yLB z8RL?w5_n!%_+jghTUtbdPU7!_61!|-hOp&A*xuUA5+7bL=;qPP+Sk7QVl!S}h+ean zx12^xIh6Fg*NkZ|+q~ae-oN_I@3t(3$v@yeDM1TXlJyowBWG2TOEgxg{@Uf0$rL~?vx%<_xe>2%7PR&|> z@wun({<7KPFFyZ5$=dr{V_E|CUw-wgyMO-q&-2aKz0mTBoe#Zq5f4kpyikJw;ox3w zEp&;U?|uIVcW?I+Onf9@+2)gM#(poPzo0P=zy0pJcmMQHzgS}ZkIgbZNpb6Rn?2mD z@e;gWo5WOHljG%VE<_|iEPZ0Oxv`7;&A5NBM6~5+Y;N}7Z2fpICjEBnr9bj$v&l<- zzVMvc_+}cHJofU=W}J(4>vyAJ{rGp9wJcHnY7Z@}pZ@FD-%O8|oXMX2VQG-(n=S2O z2){_k^C6y}<&%%o_2ZJOZ#0u$-Cvf#{icTseqHc>?LYgM-!1VgY4=?AD_`8K`WIW< z{{iWN*J@jd;MzQy?*&%&F@dMg_-GOkf?w2t#{`me)~L{)~7dnoPYR^ zFZkAM^iTWcg@+DiqknnX-kR#p{$BJ+e-BW=vOC*JEIGIMg#|x8nS93YFT7NB{_X{p zCF!3nx&Qh*JrpW4i_RWs1aH=^nf?0S7wKfI@G#1n{t{wdgz5psi>>kgNQrz2R(!0`mT`#QM8Ecrg$f$Gt$*Ly#!;u&w0u z5~wBnAAhD7f_gD%Gt*0;KV_+wewpx8V-I;3q>T$3V#yOl=X%4dHg_Ul8|%b&IOheS zu7xC&GJ8!i)rI@75S*QHaP^vkqyL)Yzy~(ZGF00v!Qh;54o^E0#&C5T?yGi}rhJz2 z0MW=A47qkb&<4IHR|V%9=Mp7f@|U`s3^$n{oL7Re1u|XOe8&V`ZlUA;r%$vf&{f;U z*2GA@b0%OPFPF5SNpB#{b#}Ju}g1nC#0B zjfde!t}=e$g6V(^9^SJ*wQX#!5A0VO>4}DTJ6G&~;<0mX!M3C_4cUe8^sjJ}Mf6-2 ziQf@;_XG!E51z)O0NU(y(#LCIPXW2u(Mimr!o)0cz5g%^i-UGb1`TEHLw8E>k&bWNPK`{F^iAo=;a?T!qhlR0m8 z)Qa}8IV5FY{`=B+0NPiW-D$xO7j8%7aSWB5R3VpTbM&P{w8lZ$^VPAzZG3>MAMwjr ziT~{L&=+s~52%L|!`qBDvx*_ahl9_v94j!?Iqvd_gkc+Q%msyoR>q>m~yGTB9I{7*dzqv9(wj$R~}s34o9 zT!?UQH(p2GAzbi0bm4B_;bgtZ!iT4uF8M34XH#^uFK3DD^)Qa;s=JGuJ?}9Ca{`iE zVkJSeZQ!V3csothPVP%T$$~-?XO;Mo|H$R4zB+c8ohrv|xG|eu=H!EW)(l54e0-Of zNTcNCVQY-fi0UTZC(gBKC{I|Y}mn78w{{~6~Bkkk#LuY6_ezgf7?rLbs;f%*%goB zr&Im`yDEw=)-V5Vpuwa1`MWZ_*)$0xS@|sdvmSdzevH_I2M1%-7vC?I@Z!U0CZ`U4 zM8y+*zRMmu`6}?PYGlC%S4FkkJ2#(1?Ja)igz8F`Ujxf-#*=wr0`nlZ3z_aHrQb>p z*styQw*uo#$oR5zde}TEov+Cd{XIdQ?`s3E4)w{5%<0EdaRmH0DY@Cagys5>0qq8N zyYL|6?14`ownUT6i=WE}Xu3e2u7tYUjUN!wDCU%();@U?Z}jbmXcw9estfhcMX?t*aeu2&*wB-gvB@9cS+dxB&o} ztT6b=@%&ZA+FW{a^i7VdcCGx%7VVetfiQnohljB$TTI>yk1Zxoa(@&5*}ZTd4}P(Pa@0jBoD`0Vu2 zXwQn`rz3Qj_}Hg{vBBF#Z~VHD?GnGkLu=Ng!$^f&969K+4zUZb;>Hqmlh0@P!Cwy` z$=y*~w8%QS$#iNrHIZ&;!o9(?7fZt-l1(4J?svLl(_zUfBpjEw=HaReW}!|4zH z5EyqcyKOWp6)Usa6-&^3&^nMex*f5{WCr1wwUHoW<#R_9ez*kS@AjZ~?rA@SAzggH zs$5;N;t|454OO0p5RR)Kn;PdAKozs!#z5!H$`AVFxE?x4^V4lSJ7Y%3ZSzUGF2=*z zIJhQte8g||ZNwCtqfMCV=m~yI6q3_#KmWX5zhMV3;ct8hX}>hFjMmPpk;qhc-7n5>jJGXxyT`2XH&`m6n<& zRD5pqCMO|ly7?tI5H@ljoQ^Bb2Qv;F711>|yWXs?8&QWz9uJ)9I0r}Zve8_~25|9! zK(9b&`#RL2GfK&Q;c6Yi1s~iuU&V7BP^?}yVDE@-EXSe#x}Rvn@5XZHLSuCHMLU2a zGj^^i3LC}wko?ZU3CAY~{>VJkpeAK-CqBd1Kq*7X;sddc2UuBHvYlRfy-+T4$$$pJ>uVqunc)vBKKWgT+7jl}JZRYA9 zJAS<(!|B|lH zw`7UsQuLR%N;LlSFaBjS$IYbkl2M6H_Ik3}w@jC;`grSFKlAi6EdldEv%ddWLjKoV9>WVojrDXgWBm^$-Tzp^+nV05z5L?c zcP&xU8rnkX>FUh%Wxq%BlLrTz>E1(!cmDXn-ODdOzn6EKolPdmZm}a)C4nU|o-J|w zR1XQhEK&PmiE0UT!~V;yeg2O>|7AGc!$jM?Ox1f(;+F!iG`s#^{l$;^;h28b+Sw(+ zKlkaJVriJ)C%0v8zFVT+TI6P>|NgCa?tcEeH;SuflYjZy-M{(kziN%}kBW^)wq7?q z#FV9fg!&u3l+=3g&$r(9XW9MdzxnMRl)Ur59*^=4G)D zZEf@Siz!KE@>y2oTU}?p{IUlQj^Dp~H`{;IuM?guj=uleD|g@SVaJp*tIqv;uG#B4>f1ep@Y2~Qi)}Oe zCG^Zj*I!;NmMn|&dA75eegA&_cFX!CuZQgVfQ0=^`RwVIo3XTthXTfdS6_NQ8@zme zDG{#D>p_QKOnmTR%ejOnXd+WTXi1s;beoaw7USjCiMJ-bhYgl_VKXy5-z(|v#h;dd zF+1OEbcr7RvA#W-KCiDlnoi&9Bf&0bB4Pewzjk=4ex>g{UlRQZ5026e-^Yt7%el1j z{A`>lcSWP$>T@oeUEypvtZ~V)#>6EjXQ955F_7)VwIAnz3#dZ4A=6{C?*r$Hoomn~ zM={L@hF!D#*G|A%cL$hVC7!{lqX&PZK}M4(ZggNlm4w|(komrT>@2_@C;1;0vf(&g zV9_l&#}0>)OtL^s}M;aJcAEYi|4XF53+@JBG^E z6KT32#g1&)U+nZJ${GLg*)=bFVe+6(ea(F?x6FQ9v%Zt%E{*D=#;reH6V3~fK&wy4 zvr{(rprNjcTkzGBZ6KJm#dy3BmMqFE7}7&K}Y$S zE`H<419+pIOk3NRv%v{GbOV{E$jQot0fRprf2;O2;L&q2o@ocm(7OpmLb)Mlf*n?s zD?AAF${Q@Pj{ZopQ?%k8mz8(pQ~BooIT=rIm#vm8Ljw*#elen4at3%>**QiAxDzZ8 z40xE^*x|yPvUGz@?n(l*4eulGIo^#?1IQ(Hysz#}4sv$qaJisjPsuGkF+0RhypVjP z;^QW3vXg_3M}i5GZs*1|dSr^uZrX7k>=ARWBWN%pLUf@sQurbUh#kcl-6jW`N7spS zVbAfyIpRQ_j1xM#RE!G`Jk%PE8$3KP>S6(|xZB2Gc-=mDi7m}l^Afd|z@5zDPM(oD zc)^|%4HWHs7H@p`o-AjCnv4&1@i=eb$pB1Bj!yKI4s1-96k)@~`jKaL%s%=I(2*cu z$KRyq5!?6G2*1yXeKdldj>ZWv+ABurmPB7auO972>*VO61DqK`tX_8A@q{-z9T~_~ zYz_B}fRV3GH67c}xA;0vg!#grf1DYFAKzbgo+@GjC1~pp*f=M+FU-k=@B{bx?UBV3kv@is?pjvI3F{lipUIEmq{ldz z__5&xP=#Oh{K5Z~>9JVIFAsO~F^dI%k}rqefWWcW5=9lGeJ*D4zT#KLMBKKW0bTp@T?d*Fhy8xQj1D%P`nXXk4104Lm*l=z1*+7^cS}s}#t_qb8whpxaS>48j z`K8M9k9=|^%f*ZSinp<2N9jBL!ZDL?{tw~MI}DHPBX(7QW1CB6{UV%0H$ubDj~gS| z$QbLK6AgVPkTYI*u#85yD%oHGNl13-_$IUVea;T4owJf5^qRy^b^Kscc^Yav;FMKWJV`@1Y{KM6F~pEjiZz3;x`zKY-rer&$h`z&DCkrrjG3gyNdLve!5+- z;hgrsZ?Vw+*#6+`8{CM7*LGN`6z7imY#;HA0rb?5IwqH#u-DjeKG(ORvDi$o^{s;p za9a?UJTUngPR0w4-6t3JpsT}o=S6L!un1U~9ofR0+(<2og2Lsx$JXF>?*^fR)I#+*5Mk)5iJVzw`Fp?^_<@^%9;Rx_K+f{DU9=_1#bZ@t>PD z{!b+cKWKT1#wB`nUi@5QF@Cq%WfB)=fSVmIK}x74TnavXyi-MTS;A04#VlhlZ1j>w z*R`G_OV*+ZZcB%hNWWh)Sn`<93`G)&FSfLYq`38(B@-ppEO}uW2+7IMn(gg{*jocS zwI!w>Z)p`Xi$A^A6@RXov#(PoFIOLqLSNgCEN z|FmUL-fm{ShXIn|W)fSQ`ni(S&$n(kUEV9P`b4t4RhX+&`Tzhx07*naRN~Y<$;%}t zB`+o7A5O`C_{DGU-Yo&?rJCeDH{i`AE}1C7ATCaFtVC%tn0?KDUgRmM`Qsma@9x{Z z5LB}J(Z|2lO!&9%{?q^b)7Bk-H+dhw```b&f47GP|EYD&|MNfnax?7j-FvEKG9()x zy8BLRu79`0t`{!S@kt5EZ2PT}k?i%y>@7)Zsg9?@k>rsGH9OjR)n>cDR1*97mc6kS zxBCF+7BBQ<+efmyHQ)6`&h(Jt^=66RFMb~Cmsa0R_HVb&JU{*BH-Bh$ehJ``qpp+8 z)Hc)lJ0+!GYHfRoGY=Ym{_Ee}-EaN)r{eu+HvHjh-|m5m<#}3%KD;MO=6{wfmMZzM zWUlqqUrxqPdU)|!$?#wN`nPwV1n&WZ2LslYC%akwmhCYsT1@kSl1L`NJD;Etzf&@6SqNn<@UCS6*72 z-jdCqHJjXWDP-~xLE?p-^;^ph@r_y9&bx2D`_AY|Ci&Hcm&^XuPrl!*`De4mnMGfy zFZu6R6u*1(-IC?aT#tu`5C$ioU%&p=-E-N}`P+%qs$VTUmVL}9_p6BqBdGDM_%mi0 z1?4(xUSDeX>G-N>f6j5?)=si9>_9(M*KUbfN&k+b(|86lS&dz66t(k(3EJwbI^(<2 z#V0w>IiZ@)C4&MV+CDd8ok#Rn^6dV|wKYK!h`ej-cnQ|}-;#;y8+V?BZHcB}mTazV0kS<>@W!cX=bn!H zP$dEzgDV7`9My@%2RxkPksFa{h;2OECTCAn{2F8LYXDa}JNQ3H?Ubun46X7!L?i%t z?AOOCdQb|Mv`5IIN)eH0FD}?85vm!@^|t5?=dxWb<3w(7WSZ})1tLkU);Z}U?|YcG zV=^5i_kKc0jHpj+$1J7{x|@;&s`=21(JpEc<+-)67KRvca)y*lP&bGpI>x60 zGqkl=zV@KTbF{18akR82j}yfMc-p8_Q~QjXkwF~!8GtkJPi`tGZoAEaKVXlpk;IchB z!MQj(voSlI{_vw)Imp8kAM#*4n?&!Td+c(aYAy-1dNkS^co+1_eAZ`t`A|n`MnRP$ zwIIAD&DU9?IkgR}_|h#Ley0OW@|KJpT{RFIWpt(4`jOVP%l2fi0>V{x+#z|w*~!Xm zcK&TU_?Nu!vi(kT_)eDUF4#ZWY^aCd1ZUr59ez6Vj`r@ffobQ&VE~$jRdcLIF0%6=I=5V|u7WZX$f@!e*=9|x}Id)!~^7=(^xA+b82-^Ofwk|&f z6CXAKB9{DgaJth=<$S-wm8h|_BjWr=27O~F#||6k5Diw}s8I39ys_k_Z!O$*UclRhyLjzy zyb^6Pg?~aK2H9jwzNkRski7O+9fH|u>wE(UXBQh}7B@pmPMp&9T$B-nE&6Z=KX!aB zy4vI8Dsv!cR*3)c>!CQ?vjw9cW3-3ZXLaF-i!SC?)eVEOaZjMTiIPB%XfWA0nP+1< z>YFiM{LmZ8(W@}q9<;+#+o=ZzeVbUDEablYju&F;XHT|a6J(DM9`as{+NTaeW|L2V zcKWY6>&;J&Xy4i{+fD#NX!n^fKn=43dD<;D)u-nr!_M7ooIrH;K|lJJwH`W_SYh|Ym3{scON-IT7l-O^ zwp_oxaXrM3EXuaPHx@b`|L7BH4dm%Bn4>=g=(IT7F?I0l(%#$>+ha42jw1Y`E8TcW`E2Y>CA&n17jv&#Wo_>DedqjnLkT;u$@F)5wGlahF^ zWb)5mfBo*~&DMRR#IWTp9&T*)LdEw=w7J>)@S~Eqy+_7uNH6QNj`QbVcmbtl8=ff1 z*eu)n*SESqyH|qzfp_B}=`A^H_LL<=?w3HeJ=-i;OEG-(vBYrQvvVVJT7y{-tt=g@Bzazf^T)5f z+L9Mf-+j<>Gv9srrIr!-ptZAqvUN%S`B#6))?L#^J;L__)OLGd(=;0O4E7XLc;2U-n={;@Gk`##YHC4+qFC*)2gT?kx{vru+-ZY{s)h zu4P{y%jO=!NGQLW{Ve72Ob;Hu%xC}dn>X(M@mIfFLi^)nc=nkm?*96(et38P-S3_iExSTN3ZLx$0XLX)vtf^`@PW8GAoiry2_XN@5kSNZ4W!Zf6!X>mRRu+ z#KWG{I4@=mSi<&?#qVn^)1#AHqU6(N(%)}(xkU2|&8)ZNj0Y!d=|RKK^ZV0&A<(sq zWqR}<@nh+mmzq^?Srzt_{IPEQ#@8fc(=UUSbm2d1@W0frAS_K|Ssd58dhqklJ+*c3 zU+uxeJ+uB>C;yWkI!JC?lmCMcKG`odFnuq-kjXmsmTfTu*vxX**B)S5rpN;*K0j-U zm)vT+(6`U5Wl7`w*RL5mTkJu>Q`Uel4lGq9*{?sjzV)jKGuH3r3q~|++(U*JdQjxo z7jG8dmT}<($@j%RXn!Te6LOcsmENC4R&(oJVXzJea`^Fx=ZYBGkzdld6*dlR)gQ7Ao#| zAg4MImCi?iPx8-88jY)~R~G`@p^PoK!`tDxW&z&;nH8ppW88hjM{U6Rn8hs2Ggo71 z3D4-U7-e@o5ROZ7vMxa_{z7JF=R(hZo%4w8+)!0Al3L;P+XqX;Tw@Q$wI0!e-E~(q zoqDHF`^mppsOLmD57y|Kye8Bu&j%RlT?yctj&6=spI;6fIboSjV(;S@Q77NNsJhIQ zUEs%8zcADPz>^hYxb4`cJ6m7+SK8#FRfIQLqlauTG~cxw`Qcn&b&tj3?l;k{ zez7b-lWDBu)Bi+r@4H#~9Y%2N=Q=J~0(RaZpSV6r>&nTUT#3R{L1e z@5BVL6pB1C@ z!6eq=W9Q-GR=m;Yj78C_ZNkJ2e@13`S`SfJmF^Mu>10 z(^Iwf;na?a4pzb6pWJ~fqa1oih6k>yO)HfV&`}P&^6&)Cw25l8X9ySj;L!wL{Q^~8 z##|UMTAwBE7u83ugNGMdZG*Ww7F~^MQ>ON4&E;A2b`x{@a~Fpy=a&Xyjgf~SZh+u*1L(}|?fab(czOKD?8EL=*VJ_fy*Q6Jqg$7X7m)-|IUAt)48tssQ0@C9>V(L~J`=`?Vh( zc=IbW?!1=$ZA2i?Yp$*XSw!DIcz;YFL> zmF?LiFtAJR(q%Ph>pwwKe*#Rf!48Y859$H=?=Lx5y}m^L{->|Pr4OFux@4Q}^cgdQ zb_T{fNe-dPH#*f$;J(HOuW8&d-&Btd^@rP@9MN3tPzJ5-(aHw&#H{Us?IR#ouIdME zC;relxaf@360b4sVCRF`&SAXbw{bkQV{KH`w;{D!GF9yAVhNyP*#+iHub{^?0^@ROGp+sLyz2et!ud;wxm?eINg>R^Tl z*(YDvfF|$hhaE)s_zjG$9j6h^34KU+oj@Ya?NSB%aEhtS2h_JJH5 zE(SYZ!VKNWFX_iG0}R}GZ%1Sis|bK4bKAy(_Ue-b&j;r>aAJFYiz2$vZIkiPo{ES0 zkwO0l0(icxe9^ki`cQNJkCjJwRhNzI08E#NP2P^x?L!vrV|3NEyV}rO+^G389S?Tj zmyBl|3y7UF5m(J^BaE7gn!%43QBECNlY2asRoIDNKH$rPbnqQVr_U%1o?kAStDn~4 zYP|edaXhbB0KfL?N8TMW9kd&mB3;d`Or+5v7JgB3wf4gm~HKj#7 z48j;tOpSM~>H){&>}xX?EPljQ)$G#o&iAXRvO0fT-Xj^#`BJ@yhtAKHZ5Q1gUA&YJ zt>}thw1N6+n_J~%7wFjbMb{@1IrCO zsVm%Uv1=BH(YW1bka=T1o~uWo&j=vgf#Z#6l_x*e`S#8$yHUsn|Wd$lCHwRgX2!pjpS_&%)tvt}M|`3t&t z^J|v18OM_J6ILN;b->yhhO~u?vZA_x)Hx0Pl;nQ=q>qS zW~!TRHk5d_Cc8DYEi8PtCXDK3_;!TiWBd%~-b1yYTcN;Iq#^UE2rQ z)@=Xhn{{vY_P_b-pOi!|krGc!V0dY#ka)J4&K?GAHZUKQL^f`)vBak2u?GiVl*smA z!3=ui-W$!7XNPB=eqxE$k9){sopLYu^uXfYGxzrL%;#JBmRnv%_rG|UA@RtVpMX`GCEJTcD5v~_XFLhPcoRjEXIChHhHk`_43YVvKhOGFAqm7 zKl4@e9xt9BFS#vAZ|N8b{Bxi1O%F0&-kRQC%KCNJIhG^v@Wz85aqMA-7!>0lNc?x- z=|!>cy=M*dv$i<*eD~USU%C6h;#D3*z!E*1)cS@fW(Lx z{hJ^E@a}K^@`w4(djno-UGa2oP;6r^)SG?_xkx`m7jn1sr>U|%Qj_?WcsXSa=vNu*NgSjd&S$QpMDfxFAQy>n4~mPgO0It;dszIsV)f22 z9f!@Gx7qXdJ`&gN#u#uiI()Ml2HOCCuB^Y_Jt>ksRIADPV)jBg*d^7@fL{{;9JM)r z^@rY$Pd_(qkO%Dgy0$n=8an?Cw>I)REloZXKJdz>OQK&mgFfVU4#CGY!nr%4Q+Q(* z5c=WoTtmNjPPS95?Oam^HeKy@y-|U!cCWMZufbXtaWIEKOUa2(~q8m#SNqQUG)Lx8FV!5&y`8-mFe(JOMW`AG{>ps zgJh}_#raD)+vo6DJgqf|NWi!0YsY6*AidAUqn&G%h5C@lSX~C6Z34f= z1TI85B%+zJmcUUlSvzKm@vWNO4jpAg48r)%ZaK>jrBd`b<4q86w`-~r6$<>K!`Y0bJwS$q&6+C!`Ye1>}&u8i_ zfsG7*^;sZ{!wKvI@26jvUBIwOvOK_XjQ(W1c7N86#OM3yKD&XYjJyi66plEWTpC}$l3F;t>rZ&^UfBHPGH->vg!OR4k}OgZb~FD7iQ!O zx;AnQa}{{@0l(2Lm=2L#^Ephrs{(E8e4)Y*?QlP_b;>7s17^TAiGN&=?yC3c>)^(Z zE%jOZCI7&N<3{>I+uEgKRpPB}@pGGPPiREpPaY7J=aVi%)K2&c#*PWz#i3sXbhHZx z1GKi3-@p|vd(Lk9!0XhCIo9vpIUhOUC`| zZS5-c@%l}+*7w)9RNs8X2g40aT?eDN${UYp2qQWKvtP)7?8z<+nrBJmFPNa&eEuOHV$JB;xMenRhldQh!QLv!E$IQ5{rFDXcs)Qm z^u+A0p3sj99ka(FWqc%1UsJJ*Q#3cuWsk0&40Dt3*8hxYL#~|-!d)MZtjrV-0OitC~l^u3t115=z zbIL7-0XZi9Egp-h$-USL&Jnp+3KY%8s>yZn?jRmu@D*dVvo%^(9bOeQhmSrJEZH{u zI#}b>PSXQxhnCnMf0$QW{xZIuzIe%86Y2xOC;rYEBONBF*=HocX+ysfddl>}CAntP z*{7=8_&nRz)}DP&cucC%KkI6uc;pzh@c=W;4lWS(34ml|yY5j=9q{ourdc1N56Fx6 z;ee3RtXiAnY_9;W-Qt0M;hrs6GDVM{j*M4R2IdpCpsJXKXmzJeBnSp$^=mR z$^*Wt#Y6sz-x2CaKQYHT*qaZcpEY*uhHsEHAM+hMIV8vY4`z<|GaUg8!XB+E`aiZi zIuC;EWbIQiNIE4eVB7E}!8sn##U~tpn-ylfhKt7ZP{3yueIFYgy3_9B7unALz_z8! zu^n03n5&`Db~EYXa=fB%OytWp{q3CpcQ~Ht9{t)A5BN>zfmMkxZt%gYpFI^iGUaZS zf3k~yGd*jVTrtw$=7+8eJ}uGwzy9VocmMBy`lq{}{pR&%R#WAn&CsK2ODgi4=*{&JmkKO&vcVD{u zi`JHYtK~wRJMj2@31DK8&6?gVKz{ds|DXP$B|Ex_kDix8-YXIL@0-p1`?pF^w*K;q zExBR1e*NwD?q2R?o*pQ;4tpj1S6_PR?#F-e{Z6G{-TjCE_&;v;x)`?Hi9YXofIZk& z0#uUUtYb6fBnG_*(`;#WJ_*D8hFOO5{ay)O%d!0Z-~83xzialohaGJBVKbkfH)FX3 zq{}ul)+KcBzxUzYKmPJJcki_Se#z7S?ce{~C2OtKj;`>Pj3yU5nFY-rbIriE)Cz-}Dg8mqb4XZ!w>86`DCzok31Ulr{Ob3|r>@Wb`p3$>i$mz3*$pQlc^^SRHds{5Sh)Q6w6-OYad#~}S3 zwk2_&YbOUD5LkC>J6DApH20;gzj)_;eB^~*qN+cB`0*v$R5^G_CqZPlrcn+j4GcQ# z`|h1BOtA(m;lz&n=ij)&);s8M0ROy)%m#$dBD0eV+T42*eUhf=IKXx|``OCHEotmG zH_`RkGuoN)*9jO`)(UH|$v=I;ty>J3e~It;g48FhwAZ%s@K+kG{KhYw2yo1ML|fT! zoS5jvq|&CX9P{)q1Ko$%_uBxuzqoMn!HEUOAKTdR%00r>e;4iT>QePLVD3cLZuXNu z#TF<>2$G*Yb85N3p60u@UrTa7=fs5*1NWGOajO^ies{YSWxw{>o%)|a^u6>&Cj-(H z9!>(ef46cwmN*_cNnxPf7uEXf0LKYsbok#5oVM3i+S2~vwln&GlMxPp>gUU55932l zusnGD72^!Qig@`@zY5t$JZYc58jmV{3V(Mze33q>Jsh|8&>y(_9SCSzZok$p4!@kJ zvw`i2O1G6wdaf!I0z5%PX~RP$D0FO|>nVBYZh^*rP0%G|;Jn*Xw(ZDn=9Pm}K~m&b zhMoHzP@QO;7m9ivKn8ru`w-vVH}{?t`j7ARXs1HT>IeHa=}&=a;T#*f4_6Qq6t30* zAG<(A5^&C!+=m&Z2A|S+W;E~F#RrFaAFDgSO z8kFOr@RNk?#WNv!F|}y&0Z@~}r zn;8**;Jx2)!r$>+d2L&{Yd-Z2Ea~af6spIp#8=i-122^9x3e-i8&Ez~5JNU)hQ|1i z$doAt=e}XK-Bd84^8gBp7?GcTDn^Mfo0O-OpLAHT89flGOz9 z77r-Wl*V)J$w*Q)@N-?YoHF|WMDkaXVbicZ3qZGtROInb8B0C=+5f_%6#~Q^tU3}L zY)l5$E%@E)Se-b6s)n>WrmKy$4HrOPa;S2$u9IaZC9`h*Xt2C(X72&UND9uU!Ow!V zLBvb0QD&zc`l!=BxoSQ$QWU9@omBef#!{rDZ;%W<<89^esJX)t-g0QC(ApX{z?f>z z*g(fngbBRD&%ix+NCGNE__%LzhFAVopLsX|S3dIO6d`-Gwn{c>*r4dHDE#FRIS!4P zvknjK9}X8hMUvoj4~H`ON>>~rQZ_tv(*+vA37@_x1C41nu70M#rQG1618_D?AU$dL zYG&#S)X5Jg|Jt8O+Ma!&B3}oVcuJE)TCpVWU5d?PG2hZkCTUx!ZKRbPfmUu+^`7fw z(&R#41ke-&)%%z}NsbY&{ZZeK^?>UIn0C z5CR=+`yGM$nW&u_`q5os!x7A-nQ9T8>Arz>^7t4O@%%is4{}T*}_3HU9}GnIZ6_swj~!Fh54Vs zBK6eQmEGWKccrdWaO@a9=3byv;#csLt-@Mtbuxgr(Af!L>A6K-w2XCZm)yi%y@}zjC&JU^g=r=zr8c47u_U|EJ~hjF(_=kI^C|k75XiRD);Fc44TF^`b?O}N5j}m_ z{;2+|4r-MVfVYp)pPIl1ce}X|R+~1aCo6vAc$o;wPeQ#JXC}B!QCwFaQ^Za7&QqW7 zB;V=<*2M~%2GjMG>@%|Uk|B$fQQJzU4lLeWqa0fRA@BD?c9cA{m2a32KY2=zoKDOLOj>^sztYOZn}C5jBP(R;!^k$1qU2Y( zU>F`S)5UV(wV&!lqXNvXe8@`HlHn~7 zlmxc0uM#9zZ0qFazxMX&3sT><-LyIIwom@G$*pa+HX8E7f9P$(#3>nUYi+DdTRV}i z&vMQlo|Q#urH`HDTOno1Jy{|A*B8gWLt7u2Ymjz|o0L{0agfNHe#&f8Tf*x490l7o zF+Y$#_0jPjrE!S1cRUntrwyfPV05nUgA>tUzTpm>%E?vPNTyR|rpG6TE#LMp^%7rlJIwHl3>(YvMXN`Tc5OC z8OA3xto9upaTC%KjPwvMIrS!O3vuT9Eoe-9stZ!;>wjjqP(9~9hr5Nn?Db6W=%=4t zT7G`%^77gJM+7wwvND*}ums+oJTqARh&_+*EjI~lea`A<_xY8?E`oEeY<6|9TYwlC z_2rnZE;blv;M)M2L1z&h-P{JDKp9)M?Bag+FhNlFFt*;km<@WLBJk_V_1PoUfC4W< zBp~K$Z-WYt9zSGNsw-Iu?6bmkzblHd!AVw_9w#_=nE?0`gN6k6_CxzSyqNNje)EUD z%xC}}4j;j`o(b9> zIa+H!U#ftk`}+`x@+Y)%kZ{JGY!+Kkhd!zDh8Zt!iky zXG}V{#f$-w=e#u4psXu+-~Z^-<&!H{x$W7-s^GJ%>V9Q;<&`r9MfR~RiifvF&uIhi zD<5G`+jk#9;e*KMbDTQ*;z$>*d`>Ww3o-b#DL&!qVDBwHCU}0>UjeW+kGChj;PNzl zKD&B-xy-)swwEtD-R(rkh4afD0?QAO_XPXK9{|t5@DqaMH}BlWwgfJTf*EW#`0G|A zzxesD*z$zE#a*e*O81XH`h*qKXL+gUY^CD_f_-fd@Lm;_$3Vql0^{2BvuihLgXjH9 zLEUyS4jw$fb}&pD**EO3?&E!cRm|R|?S;+}R%-i;fS>*Hy~ZQ2ua6!Q#AYj^U5t5e zzJ5OY86>~SwlGaDu~o?{1eW{71T^>Hi%wj)l3d>~@awiT_HxI=UHI~I7yUJC@aqcf z||k0ezMeFun{_eErTKAq~s1-<9^{cjG(T>ivCt z_ciHp1-m?AFZjpE;4d}~@bXdhbaKfR;ocwEw_V+R+*RsKkm%4(uDPXwK|q7;2Cy$( zy+QwcklatOwR?!m&%N(e(8)F9CidUste)3~J(D>`LUa%JipuOB_6EAt#l7!kc#;V_ z@1vdMd%y~LCo%NPUEmyIQo;b6+u=AVbo)N5_T83+Yu3I!ywiZils-fHYl6MJZ|4A8 zv>beymxnSj#s*8tDJ}aPZh=i%?XJXLE43@>u2snC;jldSP24{LfT+o0f+{oV)7Vh& zpAp?_mE(fiTBnY1Y&`rDn|lqJXI-hxb+${oY4mj@nRoukP&VtePi{I*WzaUw0ZB#a z`qDF^aI0(s&^NmDR6s4j9^mg>kGFf2@SsVZ!PMdgz!wa}aQ$K?WZMLJB_(e}G0?4H zorF?L+^Sn2lKg93uPV?eU@5MhL42>N$OcbwwO3C|lzRymNPEqFv$7Q)uWZ`I$yQ*Y zrJpKL+jns8evh-y&pR}wrzFug?L90$iVc)0IjEoeENDYhKKhN<$zFmBy4SJFWl4KO z!7vr+(Z|3ng#FKV`UL?zwN7$tJSvsVq#tFkJ)tLkZKfZ}+%~ajo}ZypHJY1cv0VqvI&gL9j-EK?!G}`FjlnH8kDMI`F zV=Mhzd29;%&*(AbaZ^$11%n|rwVmhsNlAQbKkDS&HP#B4buShwHU1+<_L8RdQ8(Xq zZn<(vW95}Ag(RxXrRjeX2S0uh>GX1ZZF?rp>H;|qu;s;H=7PS|y5~k1{eGpMjkS~Q zQhO+@2f~rams0c-dk20cFfgf5N(WV$JCIdo zUIQelMFw3tY(P&nq4ZiUgEyagc!2&x#W~g@P z8bGwZww$)!R^-0egEw`YRZ03C+&pN*Ipj%|vRUL8u=4=2KDamdQAYJlaFFsLFrTu$ z3{oPqy2>bK(gHu0=)xpLex#n8ouVjFha73+qyD71z&QoBs|2K*P*e-ZcW1xUOPLKW z>HG4umK%#6io9rruAh*OzP{>5NC!R3(L=ocZ4-?!a@pSu98X(9toB@G5P?HEckpdk z<+5%0n}DZf+J<6rpc!4^xS6glx>3vT1fGH0GWq%;nd8)0-=x(Ad@uO*HTl>^i8rz) zAvPV^gy*B~9@-i?+`nDC*vMa*k{3If^xzztK=_^guK`?rvAcl*oFm-mz{i`i+LE+& z+zCPz7-`GHdI(Sdpv*Sy7#;E*LD5~D#z5_WAkhUs@jBa2#1oF6@6ylxlTB?XUFGoZ zUQyR0(X#E(-=RxIeN+Yo{7Bdr$!`7Jo482ntDCKI&oCvNWD@$IW1(OB#R8hVo5!+d~lW#I+vc-7v*WuxXEJ6AH1dE0hXg zP%RzHh7UB9n^Ng^uFkPbAEd;-N6vh9G6!$L1|f<=pE6OZNDfmKP-Z{GC+m;u>j!^T zHITHu&}hv;y1X!JyREuPZf-J33~(3*H{E+&DhaW*gZTeDu4P2izT$bmHMasAO^s7VaaksP?_zk=$WStlX%JI8y_11>Ze@;v>L!;%s`V} zz7;FC#ptY`TF#+J;VD}3Q5YP$h$l=oAdO5jrirPIMGj&;bD~HaHO7$22P_gE2M{OD z>oqvrST4lY7p#*mJ~Mq0Iw47Xt*gJSye$o9>KZGk+WaJ1WX#uewX?YTy>^+j)Wb!_ zxi(W`Ap@W*g9dc`uJKt-Han^*bhY7Tw?URD9}ns3%lW|MD+e!-)t2gP@0mKzmJdKD zt(C*zy;a4V09Y_^H66@$EU{aWczQPqZPJ19pt*H&@wvyB(o zmv-}9T3q>Rn-U_70<+3(1*mv%ESWar14z$V|yU#nzaj5_0# zBv(HA?M81UD?RJSxVAg#oQ`ujab=%U-^igoEL(N7F9#PbG1q>(qScO4l-4R^{iCv# zmuB+e*!ltN9Iky*c9d5oT@uMq0Z{p)yg8(0d88{TKYh*QY74;X@A4FdzwJ3=lUcrz z!{#p%kh@19_5c0&I=qeS2Rn9J0&JtjIk=5(0y5+u4f8BiFmG|BgI9cGH;|2x2gD^n+yKlX= z{PB;zL!kXAwqyl5fo|E3>IavVx3k0xz(D!hE>s!36 zw4f^f^@QzQjN@Fra*g1qf&9Y-9Ul?s{epm~!BY=qaU#VD76T!^aMRV+uCO;qf5UyL z2|&AL%kiVfSxL>yJJH8q47k$xAuG_`cIG<%=?Z3lU2u|B@sHuN8y|Fq`E9n?G4JZ( zOV>Vw&e67mdC5r~g74Z!{oRXQ8JmJFk1vtG$yp zz6`YP%(s4L+6kt+ckhPtkT!9p{Xu-*p!;DbEm-yJc0O*!VnF#AKI&FAPvCDL*Zt&; zVYp)Zh^y<-*VWwj@i!M`7%*=2IQ_sC^leLmE>2GQ64j$j3^=K@hZnaxwm2!}7DD&n z>Ey`?e8>p~SHZjgd1H^`EcW%D*j3@~cV7$RgFYL$>e@*Gw?o>+vyt*D!^``2!|zZp zzm@+XR-(I#-3c^boT+T$J?2*_JMVqAy;R* z$G`jfyRuzh(Z7__ZJOlgp?tQXdil$go3yvTWH@s82oo~&PgeRnrnp^^69sMwG{1oG zHi}<-;5YKr2aht@vWIcRfW5Tb$^{g3c>&);m;Em>raj`Nx=ajwg`a-;V7YOhmHzO5 zhFk^^<>m9g9^NGd343~^k9(HqR_>)~8>ubpI#DHjb0`CvD#=xZyyMXpKdLiRXK-8^ zg3n?5DTj#QFkaWT*w5>M-frKPciWdmpD)|qI=-ezJuKr7o%mmULtY!ujMdh~e@6(f zHY+X)D9YfNwr=2++}p%Wz5o?aS-}~*iLA}AMYM+Q_^USYZWZd;$7dGkgP$l8Aup_d zW|OO8+H05tl5)|i4Z@+Aw$J{^q|DN?D|A4!dlNE-)~(yLlDYRZ+Fp2RCJ%BU%%q1% zkI&f`8Ej)^m4#`=vK`Y6dg&mZiuj4>@HK1RdjhYw32vU^Xpm6Oq2go<^6`GKKMXH)8=A#HtBe77=F&S@U$2qDtoYEMO!ZVFOAexWQ}BWWmcjPy!B zOuptk{OmKyAWh)3ll{g({|@cSM#t zc-0y(#x<)mKrWd6)}%rM4|@K|Kl#UhI>Oorn?foV-WUO*HmEC%eTdm^D5EV~JBS`E zoEL4R@}ktkwS){%TIt#XgS_F$Kpf~vdu-d8c2(wra` zIM_luuJVybfnDts95 zQ0sx0)=s@E(kYXkAKBZvpgAvGvyQd`zj^@X0%_JDZ*!$RV6ZIjKH!))z0Y{##) z>ByN-im!0WBaMQ_oXc2Fn+p%^%9PLkCtd?JZIJAi%X`W_NmJYz*l4Zz8hF(9fR0g+ z6Z-PUF8Nosh5`=;2D|dR+E2e4AFNEA#sYcy;R{)!F$@x?yD94gf~?jJZl}luM`qDx z9LP4Zm=j}QY!^+SZ%*(?+0oJQiki(9xkZ4I(k|4Ym$ughJH)Y^vaO#^+d_{9-`cz} z28h~QWOcUhd8Dsp&{8J*&)ACgOiVP)+SPu zFL`dbvp4DX(^fux1dw!)d^YK_Q@K_g*#x4Tj4<_asCLLAC@ns4*>|-^6Brprxrds* zSs)&o`BZ)Z8N~D}xRfHR&w8jYM`M^pt1n`BGyF-53P0Gj;5&R9tA+J|&!oT+t}>WL z%d=&`eUj!;Dimg@c{dNgFyZGNV+JN?I3=BtuES=+S=Bh?_s_K)fyv% z#LY3MI&rewDyJd{7u`84PP*u0@>MO9;z=#vAg`{Q!02iJ%} zBcMx%{(j!%a>rvT;Z&~?D%C-bULvDggK_te^xsGPgp%Vvoxih}K#ect?dvO@X{Q7me1)^o zZNHoce&B*ty~RlooaO6bVv|jstl7pNFzVakOjY&3E9&$E5}~a?@JWr@#WEyV2j4Dp z*2@DNYbWuoys(=s0{|M>`k4xZM@4nwkA_=zP<8;bp-|!Z^rEaGn=V66d*GovHg`Ox zzW+Ryw7$zJWwnxYMgd)gSzaY6XSrfy<;t^cJ`n4z{ zSK?S}+bdH9Mz5huspHTpUpkZS9x+0obsqkkpXNIehZg_xlY<@9lpKa*h4YnMR*Z}m zX44fi8q$ovm43K3XA7v-Y<>wN(-o$KW3g2hZW0VgC4PBy=1XAg?KNFe;rQT4d({q< zJO24gr56ZZJxAo9Ub?*e_y6{nuI|1?FwGYVeolb&!E*lCzU2?!x{%!ruKw_~v&$C*694Gee?U-F`P}~F zg;u#=yLB6S?peHhdH2$dDC@FA^ww@A7r_uRerq1kd32j3BgII=FpITUjt9eaedQ+XO4GvF*%N_GQ1#%MYKj zdRaR;6ls(F1U?M_pCJ(WoioRlGe=pGj7-Rf7s)72fMxu zFIQskXImP#e$k!=UJtVZSWhufzl-4H3ASf(m9(n?UCHde)2>dRmtQUg=${b0HQ3r# zA_P+nZn}TFFJtrtlCF-tfz95x=KAp(8 zdiiqpI70bjw!^u8`!>PkhXh}r6o7U=>)Wj8cDoLP{%&z`i^+m}>q}V+zM`uunhk8b zQd$^aa%te-V6HwjW628yau2mVhCXM2*9ilXt*mzY689>1lEev`eb~%Z=-x}YBGpO2 z&siOOm_YLZf~R-b{>3dw+*e-P7##9>#vX3K-IvT7)ZYutaculKFWodaZa`T7wmUvx zVqtZ3qTFJA7sV4lT7Q928++KEzc2F=TA_cy~qiKpCY*K_BaNL3@Ev+h#$vVarV;J z)lqx83fQSDyPhw1PGDCkf0RN0OF;K3Y zt~S?R`l7l!4p?u0_ny^BtuMjx8N&Vg)ny<3U@v3Li`e!BUZQ>dbAFNX_|Ed+8IySY zg8K`8?eP?z&l#88y5t!O`}vaVgfXBmDf9ZJm)L)?5|U4u?a-yb=5Ro$A=PIR;NXwR8Ik?^$6E>WV+@3T=qkCiXoVKN(*1CoLYr zu7|vmFW%sJUd!YeJz+`D%GZI%9q>)kt4!Trke4)GgoLY%z!@mlUc!clpK1c*HBL&d zR@5S^YyvB}`Wg(zue&&s1E&dO%Do5hPhFFlw4_xzK~`PD7g3tv%)JBG)r<@sk*HR7 zKzjUB&aS`e1Fq%T!t)NG@(Jgkd_6e#lq@{y>g7|No`Vep>l%*?T-Q7xBcYvJ5?fAW z%U5j90@x0;t?(S$MZfaUTK4l`%u_smC4K9Ln%{~v^j1uGX%{7IdlCRNSuCyF1kn$` zO|MYVVT{sQKCK{^qoTyiZxhm27HE(dUo~y1`6}~s>xS;YZGPB&5pYpdF56S&m0rfB z!26y2!nz2rK*}X;^pmE@JMyeXm8PnP0bO+xZnMAi#CPmYM)_qsU{xiWVv*qJ-ja%Z z$|65Gn`i$A&eKbO@?X1>eDw+nT5&5;jvf|kl|%+{Z6Pt`%%_JhZqWLj7z1L)U6s}K z=7BJ3!xo1r@Z@|4BxnvQF)9M?IEZq{U$kT?McNh~s|79p452Li<24z12(NS{wBbuE zg5Qg{9j*WXKmbWZK~%s+W7R8%F>NE$ppF(var)2gaSR2H1;0?-3U*~ndpSa4- zUgud}Yp|UZMbeiBv;bBY+hWr}lx!j#b(5xWD32_ngWSL4d{poDugdlCU zOde<$s1RRx^;e*j6<^^9l!il~SG&|PsY^)eLeTSpy$NG z3{lp}OH_I2lavXEKLx9ggd?5}p{@OCu`lG|qKOU$c7us0$c=c?RVP`HM!cZ|4Fj0z z7CD%CnH7ec6wmgJcnGWB@dyK!!shlNL zO6;uIe{C;K zD#cHyX%FcpO|k)0+km&qJAHu44hKKcImgcopN-85(}mBpz&5LGX8Q%_11|PW{sFO~ z_Ur1aGyp2cj&3%vT0tzj`oyrEK1-dn6(@gz25ApvDUA#k>6?kMQX9DW>7^?G;~gDf zIZvO4AAkOjT};nJLlbi{il_ivf45Mqbqw*0J^^leNsyxLf+B``Dut8={~KM*cZDAX zNRudyebDQhHWSwhXnECG{oGP*XifC1liNzhHl?gd9NWMAV3mfS^zv)#J*4ee!P#W^ zv~^c2Y(GCwv<++{X#%5RWje+uS%w1aoNes;QyrT!pau`L{Gu^3*ry8q3~{JUxy)7w z1;rfl;fM~pQl$1>*C?KVC31G10Ct#%0uq1XuBlb_WM1L)>95(pzF(ew%jVk{zM>C3 z@RJRDjyU8@Q>y(pJC1+LRC%pe=5P6P}QI1E5z zM>Uk2PifCnA3W*Y0XS*SL23}DFd?iVkuK_Ox-4noWxLJZ5!L=H zg?LD3l$xG|4&qfN3xNh^?sZ2lF+6-|mu#QaQQ|3DDrMc3PAH_oF&SIZoaN)$nimKR zKIOXg%Ny60fAR0$UjEhJeZUIw2Lz;_w5rt=sjiHEz-nScFcN!8K+;v)zAWxJztTKN zuxmfTH(%bV>-b_y;S3hK&4B^LUam;6N}n-E=6L7+rLGRY%S#QPKYhf@0rwI(B$$VS zN7?S+&2z_>mtlE`ZBxz?P`-Hj*zz0f(fuF%@Ky4Am*cF$eeL9tCiN2)MpSZuyigJU{ zpWXVLZ9jN|cDo4zSDm{u`vhBz z9Az6K1GWBY;B&T$F?fB9@}~u;3G!m=!Xg9JDvL^~m-tWnWQ7`;Ft0D;@7J zA#j&f-EHeaom=yG&#Anqo(%i>^@fuVpZlv0>Rd^_AOAguA2=pxOWV`{7uDdhZ3<=D z#DKe#1OCE8y}RWCre6l2$3BDjyV%+Ue%e?1j%oIBZD1etJp=a3otKf`=S7_S`aqao zfC_W}TuE*)+n2gZUmbVjXWCrBSeBpm>2G`&SQnf58=k=U0}S_pcE&=LNdY?=n`f{nPV1 zOfc{=)Lr`+ly`TR>pi*cv5wZI8p+q|i%<7+O$G&6gKCZq{tMD2#dIWqZOpZv2@-&& z-28eipe?p*iM7J`7cJ`ql?RC~$<)zIBbTyLFE_{c4q%g4`WC@gVy2b%o9o{uJ0M<1 zsZmyguJX69N2lxCt-RgjRDyvO z6_Lz2T=E^#st0A#>~Gpayn>rX_<>g+Wl<0H)?UKIQvo<8NKc0UY$zN^!p|{v%B|g{ zhg89C?I5lP>df_>ghlxP@ILuFkx;;!^CstR+O-J=ONI?8S|0m*Y_?Lko?{Wb=MD49 zkxX$ildTlcD!=VtbBtXSs=91LTS8<923WUg&t9CjcK3cl81Xz1*|nV6jZJ);lPE$2_(^E#z_(?Whsj;2XQAzDj^fB@;{j>kdmE;22 zzzAo6jkAuNWLjGe)p0gYn!A$HUpol9Er;a>)PQ}#pRKuvu)%Za%>J~4Mg=fFv~jFT z*OLqi70|O{no95S2Gd69pB|SJ+4GbyFy$G;wMV1b0q4w3rQ}u`sYHoqzEcB*gD7CJ z1w$(VQ~AObMoQc+s!t67KDvk}!*TKH1XkNMEtQ#V8hR~7($#2Tt$Nxy^C}y;y91JOF zWCQ34l+4*(*{st~hED!t#(rU?{QZchf7Z9O@9;|w1FG6Q1i!Gc_OO6+w6vk*);8#3 zYFR#p{?U`kFx&AVZKjyqB#l6v080!Bs$(>ihd+QDs2b=6(1(;sA|hIbOxaP}2rr0z zQV?MPS_dncPajr~Az?x<(pE!9*+fPURk98}qZiNdt5{3Iiz_h07{2;d^z;wy<~RaO zicg){;o%vbZ2k`Q(@%uM&oUF~Rp)JM6YV*^YTd{vuop`1w3SGc?W=u2OXTfL2p+P3&8VQv)y1A+#GCh zv%{ciliGCWI%T_3hb)Q_k_pF7`vGv>j;d2-(6c_`Bnal@*Vb1-9mQkY70|r!j-ejH zs#CVI0=emrBk?M?Hr8pCK?vmddhO3+EA<`!fD&ojTz=p=VP_e1wYVInzf+psg%QH` zwbl&uYy%H@N-Dcqra#!u^A|y3(Lwy`q$AO;Epnx8R?=ihQzlK*o2V;*eDn7q9GQ@_ zf&EZg5<}lj#^_I7+;VIyM zT6_ggKWHb*T0=T@H)QE>j07dJ)mlusbk~ni`hA+hI8-1HMr4Qs>QX#GSR&`Ln zrsJ`t<&m6&lRZU}rd8m|wUqr@yG+h%yI^pY`elAQR(o}OwwkW{1^pBl=_-G*e2ue~ zc8W$+DHfUZN;^oi=LoT8N-+sh!1t0f`O2IOv6GLu zE6KcXhqtB5C$RoVKBwfogJ8WNG902i_geBokR9E`(GNW<&t~ZL9NsE-K(Srv+lL|# zKr%^0`>x-sTyfONWD-BpqIkr(LXy}`{HU(T2VW2zyjM4_V(8q^&U#RD*%EnA+7%VfG=EMFYf{1Qq z@bJ+CR`#+A8-F-P;PnVAVxO^xxvOIhB;6yJ_A-&AvjmD?KY6eq_wTV~$Zvl4!t&9T z&j_LtFhW1~26ol*QC5|^uk&wx|Kjq>QMRXm-i3=73!v3$zI^gDfq7p-Xpr3fklkb1mAcQd zMK2;WQ0LypFp$0R9{Bw zv)r-c1dQY}fJ2 zU%p>3+JL$*4L#_d+sJi-K)F2qIO*Vyx87r)XCLw#H?I@i-dDi>W!lwW3Vgw&j`rAA zwL59?Ap0?CbLPx)ldWca+3S5K9eN?A69}}eE0Uf3G1yn_)-!uo51R@>tb25FtxaVwS^cYM($?a9h& zgH895`AZziHudF{UO)8n`6B`=^pXQV{pe` z7`U~H`WF=SKAfJkkBKwowBGG*Zrx#P9Qk0AeduOjyI*-=KLe=-zi-j6wGUwRMcdYA zE(7-RQb%o&&m-GIei5O+ISD9_=hW}TUamC1e1l&~Aoq7)e{H$Vm~oXoz0Cj*aZb>OMD?Znq|rU%f3?TZd3JLEGwv{TP|5)Fww>F%c}l7sD6yBo8JE@cTb zlE_D2iVwV|ZYCUq;66@W=T1#Om^RO!)SC8He<{e@a`n&eBo0ka(_a)|K1~Kdck63H z+Qw6hWk|D<{6GEEEEgSE(D;KOH;mh7n5bmfo@jjcl%0oo3T!o721zuSQ!V`zL>OO& z0kbY<2@fA{5`=~RJm8X6^qns$sVK&!P@z3kp6@viyUKF{A5811)F-PZ<8UuzMxr{F zwB!k0kNA0(gSv83uKDntyh7@J96FL`@O!YVqobp;7UV;|YbQi`e+jTXh|fx1m}?jG&o{jp^U2pTxH-fOKj~Aa zbI7``2b})3zNNar*^n9<+SDLdgRm8`0kzkT3_WGG?W;t-Tu?muD1&^rv~Z@sWl!rJ zWZ|WJyqYQ8!B^0+N#wOL_6$~db>+}p!b^cv^y34y{IPO?iUd)WgMO?ZDao zMLJhgB5fi#AKT3%I;JfYUtrpMM^AyE(IrfOhk;{8`U1L*M=_Shx9mPQQ4-K_qf2dy zt#)h!> zl*UEl)aKU73!MCu^uGtGEb3XIU;4La;N??;N`Nx|D~llJ63;fL!m(LKqUf(45tXmH zTQOyxtlRN*W!v=EH1ffYKmdr&@*L*jnI2pu1#V{_ZSaEBk8Fccs#EYe%t|X~A`MLP z_`Oz*cyhOf!oVi4{+3qbmxubTFgxc}ck?4h03=uJ=xhYk)nj1l?xNXWpq=`{iz_{k zFQz5NibjRvI&Wz=MS7m4oL4 zD4Fu&m>wP@TX^dozcu}8P`25Hv+XLH;C!II_K|7VP2ZJezF0d5X?u(?QlNP9Gj4Cb zZkhC<*SMqO39K%BC>wc&;h)TmLmQa-B^7Y<=QuG$If*SDY36GdW%Zynp;aBqD(p^v zO(6GhS@Xd`1A6MXt&SQRP*d;Pj3oWWb{;z+r{f@i;}2Lu!Z7rDF^xOvehY*eBv*DZmxoklnnX}k1UnW8Yw8*|@kMUTy8i&jhbXt@@aO&?usZl( z{?*&d|M36(&GK*G{e*2uZV@DYKwum`JnyvP6WM2M82@p*0&T7B71b>>{28#HmkCrE zn7Ye;zporSu>8RfUteB7dwls30pYhUo=f9<1QOjt+*OxvoIS=W@uSOkP90=r_<2@m zzR1=t1dBfSkgZ6LEUyv3c9rk@S8gu9WN+_Juiqv3eRuieKl**PELfJCY-!@l8r{#> zZ72Txum5iOtGC}>-u~zs0pzcizkTO}#E_;2#y)VyGx+VV7P%qx4$?~VA@r=#J}?y_p?5K`b67#=rjfm_Y%-HVC=KhHTHKm z(0UkJA6#Y?IL{e}3BbBS`#dY#UHNJ-&It;S(`>VGnC&Fq{>6K(96wKR{Wz%v+7B+VXZL#_ecVdtBLuMZW&@(a@7cxb|2JOmC6iY^y|g@iz>6mz z-zS)Sm&phGi#}pN-fc~ez*B#+9~lsTnU%K_JT{>Dyw$z(bCt4_4XzZP?N`_zp9zzL z1hkdOmvY*Z&R`GbR0ybFT~41m)m9&F3F0;-M+wXt8NJUh77TLSWqXxlY@y<+Xjj?4 zLXg}*s4wi?&o(G8F@f_%FQ|1pl&AHbhph5e4ui7$_RNbo_u&Wr3PPH0IU_HF<}N^S z!o&cG_W}BeeaT-KxNVFt((J{P=s$aOvt0)PefyAG=(vr`wlBHAwlFy4UhD3YZ5MpT zF9h@-sr%~&_jR`)y48w-a|6P+*-l0SAK;f02eF~@+ujDr9g&?RaZmG&r+AGcFmUvB zk91#x>Ui_og;&^KgzZV(d!N9*`kZ8v=otZFw`Y;>=LGVFKYWmt?f8Y0C7(aIU%NTs z(28z_XFzgae)stn*KtW(8FcdzYA@r80X9cCgV+B0!4=^-i{RkX{dG!XEL4T@*9`tjqAhB;);JN-%5?N`Y_qKlm^&eO7oF|{`x5tM z-=j}IWdHmR7?&PlkB7V{_tIxK8LOUkxlm7i!4>t)N-TR{wFSj1uBF)6y^UrBt_MQLw zF)-}kkN%Ds^im$v`H*(*;7ig>pIBK?o2Tkvw5Im-02RPup>}Z2%8WJ9Ej&sRpUQ& zU1R}Ln=1=A%H}cmGSfbi(vy@;yNN&Jr1)r!J(Qta7~q@$kx&8h&fxc0g0$Z<1S3hf z+Nl?wa<=`oqAg%k%mm2LR>JB?NJgHvp>0h`A-pvOu7(}B*?vaMl|Q(0klw&&8f2gp z9cQvYTVy{b?@8D|C^c_EC%-ttr=&f} z>u5F1Y8;66U@nqrnEEHlRxcVL!GKa$>EXsr$jYSFCta}IR;zUkSPV>(<(0aKhcuwC z#Nc(#0C1&;nXJL;Xy5?S!=kWNI@FL(PGNC34MQW%PvNEMfFCoyC^u;%7MNWxblIS< zcnpFxbay$3WxaUNBt4dqmw67DSTzHK)S5vha51f13S1QPFzq8O=ROiLP%XTjPgyCmDXh|lKI{~f5_HF& z)Y{>-od>uc@*h3?9wTf)pE{=Y4o(Kh#HA^)N~Je8&Nlc;%^Lu+X#=Sn_!T~V)fxTQ z*}=%vp1}A^1_9-%DC+2+cm=5_M+b_drPA7VWg0w+P5nwUAA(*h`W3E2{~)a#{P~~1 zn|$fzHIMAry`R$$z`y|NB!&IY_HaLSC!Y!^NJsgU-$NNp>O(4&9^{+kz|YLnmYtbH z@1Zm&l#uNTmhErPcObGj14&fdQb$?cGCbhO+ZMrDxvu(87fJ=zXWDjT9DWGsH__5R zrd{JHt!=d|^-}gLRBl-07k_jlMOvETEU;6)h5(-D!ZU3& zzM_u;8=SgB2?4}S*BA<+Zz?;wKzy4OwH>;s$#a!mkFwm!ui8bN^@amN*bP4VhfgCM zv-;R}G$UUq=zF%e{g%IMLtWblp@pzygAFYkKv(0z+nV(DJQ|jYwk0*QFrx5fLA<4A z@|!VV?bAu4CJ3uD1h?Oih^1o)Y2kV>6i%QfM}1h7$I446X>q_Nx%AV5EZWKn?1&TBdayKAezZ{^=I3h3nmW)U1!7CaNHKHBp$?w4jdc{; zk7oP@Q&@>9R}@A*vFew~7foO#8QH#QBIM+OvvJJI)!)4Mi!-+w2}*j)OM?2wDM}NN zyw_jbL%&7V5tZ`lDO<;5$976Nv@Mjn{*slAYB=VvdJ57JuCOp5ZR^Nqc*hh(k%cBd zB9q2{#t4#Ixi3AReL{%l2 z;pCWTkVJI*b99xF^suNRamvfe9RH;%6xIZV>g}=0tbIo|5rNyX`ms9sY1(u6uH|ce*XFfN@*(u$fB1tt3>fF+>tQ=P-lv0U+CSsKD>}+cx$Pu>`;7TM zmL(u$q#;NKT+f?qAPcV_dj6K3qo8fGi*+Mca*(#IdvKCdGDu>jYpwh`rtPF3iS-=C zbfS0+A*(qozz%`VSNXLzS)^}Oe%^Q?`Dx<_L1V^qIg#f6B^>;$2V_dxCzk6GapUDW zD^>rD7gYZ9zxdnbzx%)cX8Frse!4t+>^{A$Y9#393gPWq=Ywhw@bEaye#x$+6~?`x zeR-v}o(J1eNHK-kLvZmQ|HI!|et6*+TW`3B`Q_!kOIOnAGW!scjjvpe}ewfvztmY&T^^{2SURG~jXQlVQ{6GF; z`LmzCyZq!=A1yy2!2i*;o6E1id2xAzK%T2f-MZp6Rtz6!Wvf3YKTCl0tgF2Vy#J7u z>L*!EeeuIM=Y!|!vp z89Bp#%&)Rn^+Ve69x}WA$#4JYErQ|SXRD6e%Qaq1>5CFyf0gYzZrohn{^h#`A6>Do zADugQhAm6(5=`B{T!4;Sd-ySEdJs8X;p;Xdo(%xHXSY6ZnHAhO!85?^3Sw8>K6uD4 z1lVTd6n6aX8{cWG6N9Z+2+&`;bd8m~Yz0Cv`gPjs?3q(cn7I83zK0>*e&s0vbhiWX zMXrwsI=2#-0Hv$9Kf#aOzuK2&%IiLMyAA(mz#9NozB_k$kt+7K!?{B8I1?Sm3x-cH zcs~;myLg6lrSfgIV<}k5%H2;ty&T*T;NN@y1MK2TWmj^upF5K(ZjoXTcE*I+3dCPp z+~=1VzBtq^Ha0(SGQ|5M`-fYQ7|1oS;&vhXnP{<7>mLTE<)QA+fp;Hu`?IT^4a^&$ zH3)Ucmw2+GyFJkN5cp+T{sRKRPuZ*8m$(r(G)WPz)p4d&a{+Fm=HKYOA*+3(QD?5_s%Pcix8So4(0 z1Gk$oeT%*5edmB%&ZwjQYtYJ-?%Mkf!TX26+uvvCn-_>WdE&1coQ%_UZbvis8T2vW z9ciS$mzR#ZzrXV83vPu|yYQ{-juZN+LGFjpa3bw7ydDua_ZFv#A!)(fcJt+)ZvP@} z?PK7-Us)7{XW*w_e#PR*AG~^Yd6|2+x8GycH@@aC1H2x1{jr_*@aqS+-T4k<%ehm> znk?~`Gq>(M<`*b;ms?EiJzxyIgD-u0bdA1zg^YoB%S z@N7G1lWr}-gJ0K4pQ9jZMR35te{d)ZmMZG%E7l>S{n)^u?KZ#4fEQ&wYsd}qj>)aQ zhD^NE3muTjdkwSkw%$MF+g2Kr%-5`OywE(`y;FQB_jw_F=qQqz)DTDN3ZacDA8rXS zdFaEiZA7dS!h8!G@vC?ZcydbKgmPFcUH#I2P;fOJkdrp>Gxk;_D?s8fnQb|HO^+`= zD!lQHxxZgwpbPmelgvGbLO?o75ua%+qXHaOjP9nqZb`zl*D+X#mLNV)J9+a34d@lq zV!%7wsOlzyniNH_(XpfLEv#&%;eCxOvU~nQFgKE@TsF$Gu5Jee{h&vG^cw9LZF}oO zMm3}<(eCRIWZJ`Pg@izsF=yM?Y;C&2YhGnA=&c`gucg)i3M=s`(;DWrA#zA>ve{;6 zi{P=HB|};oepw7J@4>b{Xj@ut%loIU(wAp#C^c!IzipyPQXCC71xJxA0BO-4449@`Pcy6d@Bm7tS!3<&WIna z_z^G)RXqL&2D%%sG*)mcr#Yp$9SuU%Wk)-uK`%auX<-_yjUtlH=Apr~8|HH5u&J?z z4JUtrgk2jpSCzj|RdVLgL(Iz%od;SN8cF)eFd)&dq2#yfNPW86S)*msQ~$An>w3Hj@Chk#-N!wl8xvO<`NEY|+Ff z(vX*C;DXZyTm6&SI^bzf|5{PJd1~?~2RCa9qK`;tAW0)Hd_4*zcT;Bu-m7eRJ?T0f z{PMp9ns!&W0{Lv8(!+F}8yeCDZ}>qHo;!X&3h|9hT;Qn~TwykyMp^RM&OL@Zb7M!~ z*9&XyQ0uFYtlYcYlpp=wB>h48w7~BR=$E^2alPx1y(w%{xjI9D`f_q$1$QU z@_-^xHm3z7sNV6K^65WTke9J^;%uHIUn39lL8l^NX^;9#G=-3K(IJ>|Yy&0JwI^a6rmO|AlU}%}+axQ!9@4l&1JYBN;5`Ullqe z$&rrmU9(^ytAENMy1K_7h%w~ww$Acc&=&CX;^GXxh0{jJPMIw_bgcqG5Zkvl*556c zmc-&tQoD{EU;EIxieniT6pW9$0t0S+TKH&K4{tf^o0SQ!uV^okfiVrWCKyXwN2^ek zjM<93>RHa1J$?lK+HV`z0iSZt%EXwY{>rE}-bg8nLQb(2eDdeOVkp}g|41ajR%ccU z(p+_vCiLn{PCRr-6DZqEUyQdBnDkqHH$zgSFd8nQIO`*FA)g%P>o?SO{Zhj9*znBC z$_p(#ngFvbe&FZ7>v*mt(DOw^wYhLP<6bMgx)kbdWrfV3>5Fv%mZl}M|~P;Ae9Ka4DAQHQu=Gs|ra5O;iTZEztaFvc1= z!lOLCb52!Z>*phYV@epM;*0;dz425g}FVC{`t9vjduxz-2YGMOfuJAUnGb08s>U2dp z_t$RE;EV3gu-(X8Z=7CsQ~u6JS9!VPiRB;s!Ee&+U$CXc?N-~m)q?vkdma0mUwyQE zM)345t1nL-JygKzCVBTUHZa0tRx4xG%l)!X5CqnC4+xx{A_)Bzfi;6}R|ufH`u9Kj zgWp=-JkP3V?6Hp(v<3<9^U_aWmU(~xn*lfjF<#dV$lc;aqz0Hz?DyrEOF{Aj1oGcx z6?Xe?v%j$`k$*to`RMK!*zRS5*akLzY2r!bF;Hia&>-0f>d%}!RWSH4W!G=s;RT|1 z3i^J?i$brk68#o|`3D3HU1jd}2JTC3U~=2CK~7d714~l=C*fRyfSw;73oMNWNDH%HJbU_?QM2-@WV)A7$cz&o2D!op;}B z>lVia1Lh8?`jxB7KW7E=F=!p99o({|K0|%mETF?a@ZAI3$%q^H{4Mq=H^A$!0o)2m z_^;sSe)aqI`;yETmP1TfT)KRoex$vm@4b*d z?&^Hhr%hQ+PQPmHPRcvy~S2Ohq1T6Ja~n^a`q&BhycC6hJzkoc|QUAuUHlSY`OON)8)o}_Jqeb4>2|!qObVk z(Oz^+J2}zli-`~IePKDgkM_ZD?8^*I%lFS7WqjPb96h$5ZIkx!@kOTmI^gSjtS`F@ z>|Nm4mglRR%kw9fu-|9POV)kKBp@r%pM1%-C!fpYyp7|T>KxMY-L;!=k_wGROKBEa5$@2tj#f;5DgZDNEU zsWrOt>I=5L*RHPhjm8!8lMPI z?_mcWy-7D%DYMjdO&Uax_pppXPcC(84O^LY3NO9&9NggQI&rERIi&|565v3CbTZTy zQQ&K|liYMfNcUjNnWF)L&X>N)CtZPsw_TubMq8BEK9+3!g~9U4tRCtLEJyNc&o$Qo zZF{SCrD;>GIMX%M6%2!L>)>H_9@b=R9X#bHy873y0*#%r>zBNg1^~@DwiQoXkzdQg zQzwJ9>Auple!A1}Fl#4?r%V+?8PtWd{_Qy%NS<_h$A~m?=})fG+;}U5erY)u?N%u} z2x=Qg5$5B@Z?u)TBf9~uNy9@L(nO9OzoTd_Q1knsi%0pY#{!W~2BcO=(dCvvI$x56_klJ1+D$oi|WmKK~X3BkN zuFN|9Q&h`3fazG}F^vz%Yvfl-?WS%b>Jg-DCjpw^5J@^*h9xT>?GY~?4oG`p4#Ykr z{cwd7Pg|}>eFR+S&pfTNaEusoUIC#O4N4@D4YY=GuJHKii`kU?j?W>tzN>uFl7@qK zM%m!6-cV^7Uh%28zV+$VTWmC7{o$ zu?n<-{h_fJVhW|tDW_B{8bqon$>%D;q_n4Xn~||oy5w!;YRqbLW}_rg{#lc9D$q^x4F! zEPP5m>(etHTCde?1ODshL*IObeM8B5(mRqX&v1x$UkVX#Wg7lN2T+fdMoH80wg(JV z>D9L+9eL@Q-p-`UZvf^ku&pTDsUs)I%3KYttB7x{Q`t^G`nC*`+aNa@gIp;xR~iHD z2)ORoY993-9NR>>hZAY#RLtz7IBR|M6r4|N8WAR)-azj7h}6P_mVHV+r33mZ&)BMp zwMeFOwEAX+-x-Hi35A$zzdEcR`&k^VBn8vq!DUUV|dA15@Xk~Cj^usLO=ts z^=)yY<9XwYtX`AnzFnV?rf@55X_?dCR~^g`rJ$ysG7q5mrZ&MQIpuJHmx0vBj9ovw za((&dfBw_uzx~&Lx%}ljpAZ*qm*5jwYDP_~TyZKYrtNf}y)uF?<)- z`hwNwN84KDHY>6XZ2x_>7IFXWI|TJE-TZv{`3IMJnWc9lKX~;FTUZdlCD3ay?JJic zV#fn)^Ks5#H(Me2lE>ZfJxGAr4R$lSO5Cc*XgO{P7!Ag6wD zlpy&tg7pS9-FCt49ZnrPhRs;D&8qaf1QTx%n7&T{^d14QE3EFmPC(g*{#&%sdu##Y z7AN*0ZEPUgRr&7y>`OM?_xK?%Q+0K-fkFe8S8m)`KK$g;a+Ou%uCV^~-~4R?aW50t zW}@WE)#bf+KVWtDF}77XQ2*4n1}qIc`d*b6xvzTT;)PZc+rCereT>z|hiHqV=r1i- z^PZ<q%8vJ+HOnF#@0lao_pi!D zsUzRp1kl^pDe?mano@AXbBE1K1+v)=S;HPi@oELdwC;a3F z`;9v}vKywp5Y$nrZ9m}SB!B@^SM<9F{BwN8$ryRKPy997%Rc19#{nm5&;|5XWxJ}` zK(*WGe8z;y6N0zy$?nT8wU-k;PAItA+xB@#pH#ezAwdE64j*u!3a(P6)I>k)m#zUbtJ^6HbW61N=>Ku24CLE!r8^_y+wbCzEPxF^59 z^PE1Sf9!&`6C||{L2zHxdKWzn$otaGFBwOiP|$aLNw2&d&kTP1%MMqPI>E3PKk%iZ zjb~EN^ zQ|<4s2OK30=sSiPz?Y}{@cW`#<*?tY_emyjoJet8IC+ehuQC>#VN&WO6MaWm(S4lv z7aSqzeFj^fJac?G#h&-)*@DSc^=?memOkaAg)iehO&c8J9Si$tM<-koC(K_Ad9He#t6$_pWy&&V>jo@oQMd zfv*|EzJ5;IdXEMjf5Ch4%ptZ>V&VjMJARmLb{w9c-C17X^Zfpm<@WXWms_{q<(DHL zE%%-?Ho;@hUS5QYJq)rbt6a44cAvHdinnWJL1hdq%yz}}xi}vak3X*vNo+%SUFDhT z+bhrQ$X@d%HX<>v!!Y*@itRm!f z=aco^%6xfC9~#=nZBFG@n!4c4$payqL@=2g>LzP(xaXj~YE$?(u}R~NO$2gFm6g|I zvy*y@J9u*Dke(I8w37jRWziQ#UGsukCS6t3j>>|4;1ytG625l8^@g_eJ1p=YdZ2ve ztSx+e=I<8@Fv(9mW5aDScKU~&C7sc21R}f9h7=^DFTCrd^C{qumF36z#1l@@DN9$=0)}+T>`(-D6dT^XYwOC zG=_N&JF5XyzWA)=)Vq>6h$d3ja&!ny`$600p4+-XtuotDp&tPw-_cjukPN1bygPB5g`q%$$U(^jlKfvQ`<`^mA`fd)&Q$e z1yHjoa_TSY?2(_Mt27gmlXlqp5oNXcj;*jQ_|eP`gU0vJvd>8;yVLgdWwOa4%)qHe zcCH^*IDGXPWSagi4FBq%oaSw3bXrH(`go%#G?YghdDARn2mZ6Qzj^DmLuu)1tF`U2 z@9=Is$d{$7uSjp@!8uk97!K0fMBb~<md)Km8P zvuWf6*J2ZDPGUWiEWg&}0|3|0B%&OAY@cWgKYF{;J{}RaeSp_~g7}q>^Rr=n65Pf- zNh+u9segKSU0(a+$YMYA1IKKvm83h8tB-Zr%(VXL>lPv^Gz^6-Q8T zqN9z}D;`yl=4{e3{I zbS5R3M6~}vQUKfv5RVnU_Z^tdH=@dAd|)@++DLjH>uJz|pxrF;lP}3L-rs<#(AM`8 zIU1DF?sXe&yy9t(O%L%T{B3%w7{`^E3)jK`06+jqL_t)>H-5r+2OTx;*s*~*%53lI z;H5n4M`p6gc|4aEcG3lS1)#{j6s)r1&l|;HUGQKDp#8e1LTRYGrxm)hm1{n|*M2DO zhG$Bd>lcHs4fB^x9&*_xHY=uMnz((-cx;>CYc|>AJ8;Peyb_5m+vr*yJ=)2__^*6I zUDJTcNBkMfm6JMf6+fsdrLX<{hffujt=544U<#8E)szI@2PYps4UGFcoR8VU;eY@6 zd&|H0i=Qw5>tFwT`T6@-+B(31q`{b71kMa*$;MUM#|R`IWF@)l;$9;t_tCZM-M6-_ z1iTGGy=4F59^y;38&K!H?8S^41+0#+_pm`K_hNPr^Jk>p>-+r4L%jHt{fSvAeT6Lr z)b923=a<{}pDh2^U;ec17JSK_^?&>u-&_9RH{T*)erP$(RxB4@JGVS1Q1+OYX8IDw zt5Kl$)d`vAKau>1c$C5ZTI-#N!>arWsZxb)U{UM<-CeO6Mx$<`qTsIIay_BpFE ze*E26mw)iv-(UXlw|`6kY!6#gJYOCWv^zr7j*h9T*fzcOIsLL7+ z7dO4I+#!g4gY8aSv2Jkqz!3ss#|WCS_p%S7ZoS}rzboWFx_o&#%r+(N&ul>3?NEOI zcYjPUjn$_Ffent{CD>`e@vZM%jHdz7uL-=~Bsl4IAZ~Nw%NGsGy1(>cg7h<<@Ow=H ze+GW`6TmibZ@{Oq5r25fWW^QsPyfX`@3CtC)^eH25??;~_y6$kv8~G4c!S~)Ep!c*rx$ZI5HSV+5g3voiOv z0YmMMa}{tVpty$sqc*zDs&KdJa360kQQEbArQ!XUAoLSfVEf`n_n~$R4}+<03*_ql zW9YGuz^C>($X?;D^tKc3X0`GYCO~=tBQJNo|KQ$oo>kNatZ&lRzD)E1+Y=eU_qy)N zUl%JJCm{ON72EJNfb9zDu5&nxfo3O|49T^-@iO zz_f9D=%Tw53HsX?OsqKZVmllB@cQi}m8+qBJ~Y7ZM1On;1dlJSeS%+WpC(~!Yv3G@?1T0hf7x+^ zappR|;!s8vl&0@S&<|Yseh)d_EPkZ>w3T?Lwn!CZZT@6rfzf{#foK&=}{k4QaYWveO z##wbxK6QHuI=h(=_?kGw-6vl!H~4&ZpK;;=lMlR{f!$jgr*AQN=~hN3c@e0KKKm68 zV=~hS_yGIQ;|DK6cNgu)Z+`eC5##UUyURzPF|l_0I{n)*=P>$l-NH^??~uqb%@x3P zJn}YZ3nohXd^=^93u}IUX*}01SfiDtbHS5tFw!KorHwy!&E!1ys%{qnmQE?IXK_XF znU;i;ijKYVkgh9_Czv=WQV*MU@Y-5XINd^DNeU$#<99*e>9>uw*6<9V{3C_Tjlg*L z&Br3;vS@{?h$;iK1u1*>rCQ!Awy<7hI#f>%-80DF{B~GQnz{;O#242#b=u+&%1|5t zV3HyX4WD5wh|I{_Ef@XyYj+A*;aY@8BP0{NMA4?(9X#MG|A@-~%z-iO_A$w0srRuv z%279f!P}lrSn4b4XuEH|HVo$5IxXMbo3er@PWNNdM+v=8IIrEZ>s|}lyVn7BT4lVC zZ2UcYh^HJWx)uyRb@Y)fEuiu_wyg?AXMB?~oNu*si$I0VR4X6TzCS>lD@kKH`XpQa zK)DUR3YM*8YCOv_Vb(es>jt%-;_&gP-$_GRLK&Rum4m|UM!`#e^56XDe;VdSWeWf@ zIN9+E+S+0f<5e_M_0%&Ur^8 zo0B>X(+lG`>xu=13Y3GSd@Ff_KJxg+P{QjT@X&>U8xx4c!C;y}avSpLA`PuOLJqAp zU?y#8@U=CN7o%-Od-M%l*-NxtL9`JrY9UcBt5tA|dVrCzc<@aOi6tHMIw)0R5K) zN&FJnF#^9<{n#8>yWlDU#wQYR%{KJ-2?+q{!c|s_s4eJ@;1C zx%YP}L_D?x`-sP2gMtNXYLB#+_@<{YD2E)jwQUIwKdh2K^eg)-rypoaD{?yEQsgmZ z+D2Kc{VbFx8yB)Xj9Y1A>yYmG4@_i{mV<@z%EvZQS9S2r_9~b#kqp72Tokk|Sal3e zgwQXvANYA;tH;vB2lnujw3Ih`v9o9eqgcvDt9rzNH1!?N`A9$KC1(OtX=CW2Ysn!z zaDDJDv?+I?_KnF^$Wf}5Fe7MG9yb=S9gH<5-vjJxL~9ofY`b8iHs=^0g)q9 z4$!p%f}FuG0nsQbuJD$tyLHO0y_Jc7{fF~d8Cao9rc?{Szc-i4uHCd};R?%bb7)ct zIcZm&*lEu`NDOf4CBBy){oFFP*x&LnFI|MsIanM77vAv+ZrpMnyN5tzPJ1?@aHpH; z_YzT3KJeMy2OWdk>Kvm5VayayP2yJ>bE^MVpH&ve0O~Wo+U6ieQ1HczouI%KEVQ@? zR<#V+?6X)cJh&-OKbBqkF7^6cXbKFDH&;RRt87<&9yzIq9l)1&@nN4frA|K%j`rdl zez9fB^;vlF&?Ak=+UO1F=zmQg$F%s6kR=ebq=8)hKJB7h6npV6s_k1i{vqyPHUVYk zN8TQWp7nm%(l6QT$H3IZyyOamgGSlsLs(cbjKYJq%5COi_DT(g`&hkms!G{VF^v^;L`5dr#g zFFqVa6bbn+EOhXj64!Gow4*eH6}A7Qt}hV;>Fh(XO;1x6HUh{Ezb6!UX-F_U*hwST z1BzwactsA6Lqiy7DywZKx^|P6ZNjH+{0qNl>@7%a0$9dGASD<3$2U0EWNx;lJ`tMo z3nb_M%ZDxgQ-{@e+OfOdW+_q;5&1MdEsid&SgL(}C6}(BK?8Bj_-2uR)K&0Z_>!jt@SCy#DnQZ5jCiyI@^N_SW{tBFJYqej9IVew6(X-C6UcSHDXQ^qr#V(;M&rmCW zebdg$q!OW1F#2PIjsOu|Iy}7TB3@a%Ki?z>b%(&nwFk@HfBn(lc7Md@|9m>v-6XJV z3XLIt9e0`+W1b+t?J9vI^Lk$(h2Ur&>wV1^aq zrhb`c#r6SKp--|hdXkl)23hXjTg*%EetzwC@{qqI2&@~q+H4ye8SFAZ_>Akx?S)6( z_1g~!Hg0!6{^av+H!D=X^4`1Maq@U8zo}VHpx+FsJB|oj!h?>nSTuvGFu3YL61I9N{IE4r3!sQVbDz zGaqyXUI~QKUZ&%4)#`Bq@)HCW&F6gK?3wNif${g=c{?ksw=?-b{&@n38w8lm$-RLt z69kfdnX19?edy#0=~aBfAo__@$5~B%q?@0=ofYXHy!Q?P=t)-CZxK|kmt(FIY|P6n z!S&fxKbpUDmq6!oH}0xz0>itpqc5IxGQnRTpml=Y=FB$u>kH+!3FaD@HLttD*Fy%* zP0c|t(SV^{$Q96)8=ZhNfk*RQ8%Vbw>W@zB7=YGyog`RZdxV{Mp(DZMg9J1;2#Rjf zt}no|f4YiSp1!!zb}+Z?JSlPHX`W~U)rXw0fR+Hycecr;&dVF=PYbU8N1shzSn0&c zJ|ZculH38uUrM;D**;OZ#<@%2Uga46Y(RB|myI5y%_j+HkK)_6N&AxYB-+8iGd7T> zGWv@TQ*yXzzvtXP+1}HvAfMtF5T*=zY7TP(=zH<=5&FepCJBz8BsB|u?#p7&oR}qj z2X+%2!)@3!!T$?>B|X)Ex@b;=-buCeG+tB}*6@ylE!yifrrhW*0{ zJGXWTZP!R^tgp>G?)U~}F8hOY+xmN2d2@`NvXB*kI;a16Um67On_aywoWb()H>3-! z9PX%Upp+8B%Vsy#8#g zL}W`K6G*L-z&U&A%HIHUf3FZb_I!ie%iipiGNb@i%4G{^D1`=#eb|L`0YxS7td0eM z@x*f#)3%N&(sGQoJyYc~azHE$a$rKoA~wYdG}Wfw@rke)=`Y}Onzn|g)Y6XPLL~YJ zoXIN;V}IGJJt>PUAh=50cJR5tcGBj`=BjNE?Eg+~L>IVgU&r8L4Xk{kl+5)3n<~rd za&GI{2>QBz;wxhoSt>lA9zBp0X+FmwC-$NrQ2K*%tK(kmV7>IzBhN6_NeE=`;opdX zBNmWNPTSBRf9bCQlt(%d3|~=-x<3&o0w8bNK^pv7><6^sDOd_> z$Vd>WEsHa%QT~C+AXYpyo?PAxcG&{C2@brDB&=|n!5f=!=9o<{%sG1>+WFMOg0})F zpkhsO^OM&mK`hYVvy#>d6&6-ymB~Lr)qxH3a^$gc@GYzk&OXkdcUTTmsL)u!w;tM! zbYT1{rwwGOkTPj0N8z%AFUKM)lLXaKSOU?h(=OtuQ-V(QfMto`bsXL1m0y7QZ1= zl=IwxHE_`g8p7`;Ku(+URWb*!TB8E_(gFt=qF2!QQfIf+iA%jYb6yXQ_z@k}P6=W@ zmEMBjbKodbW-G~sUOL)N(BkE#eNa2Nl*K;j8}QXWikMB=y@^mB+b_dOjtj=}@;$i; zQGXIBA|)emULDh?%11hG9(J|VDUmb|uTIwMt8C$0K|LBJGw<51`k=mnP3)KQa>78C zAVwDbje2#1Xy^vc%H|gW>hIA?{%L36B@_EmrVO@^?VPr3^aY|^ovGls`T<15mSuP< zllCjiDJy=FKX9~N??1vxLpfwCfz;=O0`~G^?*N?B@PVIp=w(1RT{=WUNPJ~#Mec12 z;iVV5bA(-!E+lNqcJY&bjFgu%+S1k@()0=veYJwV;wDXZe%Zg^6CC#1FLmtLrI$s%L_w{H z&V$shJ=7y#Nb$g+9ld~tyt)W4K2$2e1-z3a_FsKW-J?G;gjdSmJh`x17bl|l)BbXr_Ra|a4P_2=TX|gQ*0>jh#L?I2BN+L zgI3Texv(8f1Ev3KYh>(ic$F$PRuB0HKhRB13!onp$XDSw2QVw_V_V@J8?>fARu&X5 z_anJ?fPlVa|InuCi^^@Ehi<#Z+1Db3wuu;3FH5(QjFbG=nY4l#JuZLg*hK*lC3&{u(K~Mub%A&}GkN%XGOhPAQnw)+5qqGMw1}yt~kJu$P(XR3b z)AO{Sgo@ulF6%g{zROX=trGRGTKBEXhi(7K7u+SLNx0 zQKld=euu|Eu#(B|(2@{zfRS46&C+J#mv|*x&VaMsY?EHb#uuQHv5>+Nx&RK^C&mdc zHR$!Wo@4)GUr>TqGTBG!o?iJKPeL)gVq?Y@D2PNEyX=vXa;{prW~^*I>wbCnLHEBe z-|YV6$Dek8`d7c~zR#-hEAtC^(c~yW*WKJ_o}C)$zVY^n?z>-mr~8nXRX$u<@9y(L z$zgJ8A0$w;vcXUFxdxfGW1XO!FNrjGs!Y$l)?k-m0?Mv@K1eX^AAR>5-R=7iyKRDg z%Cfe;mH^@mD}i4TAlM@K<%&!LXXaM^__G_`7xVX%mScvX)q58&c3}+re;CkBcvix{aSfE(p|rHlVI&iR=}F0{G0Ed?|$>c^W7>puSE)2!H5W_`o9b943aFe{A>jF`LLmDZ;3_<}SqS7~dQs-?{xy1S7~P_O$-Ry%UoH!OuIW#VxZX7w0+KdgAoQH9ar)*b@&-n za22~RG_|caN%!F@aC@`4%o@1K%6*FT8U`#Ekj0hhu7WqWxWD2^S{Z2A_U5nFpGG*=o_7f5 zYd@b6cZf2Xs>feYEG(>`pDUau#1<^*ev|SMBKwd6&qM zuN{=jAhG$ueaYxJ0rXKOr{trZO;Ke3-r{5G7X8xv-nRcTtL{w`v;ohZZBmG!r+%ru zoV3|-l{EbHy@LdUy?&bZ$iRPs?ZEktgwscl5Y*>ovZiyP%wMnE=NBZN`->GPh-T-;e$7S5hIjxA2)R}YWSeSto*Oxhh^ zVr+^U{b!Z@`HvWPr0F{ZR+f-q`B8pJu#4EzOK!6DHv`VnH{!9jxR~@kP8vSJ-!~X% zy`$a1F1`qO?9fIV)uuS?snHne>6G4_|cww#qk-$>tU`<96Lzod50dB0H8H=T>Eap zc>NVF!#Q?>VLQof5VUg5q%DNEP3k$v9#>_uQLWJ9JA;PwlttPq0bO~Dk$ZBBJjW+1 zNbH?86jJVZVc?dhAkcHW0`wjLBW=?w*X>5vNd-`WV)eZU*(3%R>P+#|o(`~W>7kqn zpyQj4QKt40#@jJD)yi@&W=W9+BJkMdF=?G9MlG$9aGXtq=S zID&R-_7YEt^$#!df@mL0Bc2>!UQa;9KXL@gx>$(FO?i~}2~JGfHc~0E%Jz9FDYU~= zd^rQ>BUH5+aGn5TesO(6d_VfeAl4XoeTDMS0RV1s z+0u5jFfAf26|$`%rHzy&lM4VP5FaX*OG$P?t}|t(Oz)Cz=?NV`3}MPw*<^2-toD5G z-~Ikaj9O6Qlc}@n;Gu{Vxe<*tK(nR8A!yE0&ao#kga;_$ak#qo-!G2S!fMu=88y5K zZY|`4Yf@RbM~%2HID-I(K{$DW}4i<6aOmW8Hz zDPQ5fHnWt#X?BQe;a$U%=oxU3qnutYHmN$-%}MVBp&a`)8;Km-sTcMLK;OKuTf{UHpvDx?W<_=OCSU2W+!n{=`f-1 zWF&Lv&8PCzDhZ{=#W{z28cleKY8!n?*$e^~B$%A@0bbsg=7BYmHrb$US$HasdIGO( zHBlf5`V#aj_#svGQ(tIkbI(DEPem@_oiMO{q{C^-g^`yZZqf*2)SO`ab{3$Ox6&`)_Eq~}|Fr%Qgse@1Y zt5E#cje+)VdT7}ANKuhS1|&%_!d6=}TlXUs!uVHhEiX37H#_K0ITT8=Rw{?P*x)sO z4e?gU%W4?w57{@J#8uwLU;WDumECf7%~vBAr|!#6;(D%LWxK|mrYZcATOc9XY(H-d zkYOORj>FYaa8_tfhxLOQ0|99Ex9Tt510b+W}G|I_O{4 zLIs&55t%*rIC^jp6u$6XXX%AWFjD?709E_dVCG2joqj(S!|nLM#`+FT?P{g0j4N@Pkj7W zn2^y9`i{IQNSS-f-ScU6@`@dV@g>=gMYcaMb)ytCzW8r4hK2z(;Zha~0yJEeix%wN zs%`r|JAjK|;!0dNj%~Ln>#4R6V?_^4<5cB*>nAndHT;|l3!XZ<_tUl<*#_c#O{>#3}S=Tsf124^9+8_zYhb;i|>Q9kH+KvsesXd^;(grqc z{j*0NVf4A6V@$uFDsBBBJleT!D!f~C!SbH!>&WQ!F8ZrR^UDy|uK1ZYh+LuB=-U^YE$Tis#G_WO zfz-XxTBWj7ppkp9CQBx+IgWH;;)yVnG?~jkN_mg1t9uGP$jL&90dKoR#v)|9M6ZyP zgMdY+_OEFMPY_AeSDj#k-2W+4>%TP)Du?{hQl8pEylkF(9D@pxziO8TqP%SK$~6dZ zRZf}hQU`p?S2~tRCl!rnPU=h9IPziERe-Ltx=R4;M^|ok|M7=E@BZwsuXI1Vdb@i> z0LGw|uqRnD{@wS_bw^3F@Qq8Sx^KO6raQ)}YRB0J4+(Cw`qKc^nPbPg2c!+zAh?qE zX+ZlaE7A>A$;&{J4|_J3`M*~Q;4H7Lc2lf$T<7>ZUwf~+cxILW-&}&4=Ull-Dg;;m zdP}f-_x|qcE%J{OY%<9A)l28Pi>#pDCWyYgw%skPRIc!w_gL*t;LI0|UOIcUn_yM) zGlE72f4pXFv!c^`=}A^b8$>h(!w>=Wa|E}Z5=>nq2l?%ljqb{whpg~k?moRX-+giY z4l?ZSe(=*z(tKZe|6+o7BLw*-S^ayCRn<>f*?A6mu3r9(AP1{4NmKAS!Td|_zS9lw zW%V{GS*8dexl(rzL1fdd7&Khn-0AioBp6L#eT-GD`w2X`^4FLBnFHI@9R@&531U!d zi8K%9IzE5;WZEYybO|)NYIO@erzS};La^^8fhvQMqXbFKgPrs#w1xSi4W=3}_hH#| z4F(b~kWyxL`fzvV)Umw8af|@q0fO;k)SH6CZZm_;Y(rOai*G>NmD#4U@I{fctbCXM z1jlEgx3*475o~U{7lW|gOXS6c1mQ;s%({Ya{_Z_iv+iMK@^p9ki!bqojjRZN`@-3* zIyTLVxbiVr?21+cg{J6uOmOh}+@0jqKEwpVG1}q?ynR+SXy`rJD#5>_mp!dX|G54k#5TVT7rz?wE@m@)&a21`w|;|h9n ztM4X|ufLxlMTvpV>P7Z*gCpeM#vZnXX)qEvvd?0N)8_A|3qL`p8RRu!V-D?8Odt#~ zLG=)bFn=gJ198uxcLbYt#j zca!mO2p8E$EaC|hh&S#&dJWid6U5SCS#E=PPRYnr+w%~83PQwKE=QPplT#k|*Taf+&~xa&DYlQ$%3cSAuY25tYdOJNf%C~O7f1ZdKIh~@ps5$fHj|du zdc(-MO5&F(FRFa=2aG;%06u|3ba(9YF4sC2!5TpB@11gvUpOvGMy_?O&(okRspBg& zzylz3qm#!5TiZ2T`ng84tOb9|WF53$dOyRTp?-bNfV_IteYLcuTQCXAhMhXVlz+!D z*$I=y8Pejp%X>QI^*UgEy%1Ge=MmqAB@sw)_f-K5P<`>31NwLdCR*^ml# zR7M#o7rWk0K@VW&Gr}FFUR&uUs7e!1(BV=a#Q!x4?D^d9A&2>>>Y=xqL$g=(2~X{6ks_`RzV*opy6Q@pKTj>&LN3kb-o|$Yo0+Q6lKLK3JZYA`)mTL{ z!3k+d8|;t|tYy-mMEkeedboiWFyReI96P~|*X7DO5Vw#UyzN!aRzX|kf41D1y!d0 z+(49Y;zUa7E#p|*NQ({vfsrb|@PG)1{I{+6mmCz)Uh#8?mcPR%{S{l_8@tstwv_^0LWnPIrXYg5 zaTwJit1?zQ6u1`msLHibbhlJIdFP<$q@Hry4=#bp7q*dypjizYn%Y^)z+wN?#~Sfk z2#RArfM;Z*sL{+%)S8`qs8%NBh~+bOBA4SgM4=vTer4>6f2e$`36g?D7;Fikl-&8{ z1dd1XbA3vF60?q@=mqciPyhv|oQhuV=n;a*C7u9&_>!c1whH2KAg%CW7noQxC~7nq zr>Il!sui|uWr-AJBmGeCd3q+z{sWXcqyan*0@q9)UgDFflMs9z2M{f`wvUwXf$jYu zI6SoU{gEeBP`#ePv$aRA@F^)>dOx-gMGpNIU2=|`>fCFo*xJ>08eG?vE;tgEk8RWh z&seQJ-Jr?8Uy{idf`ul(w3~JW{mlTmV7Zy3T_R(Q1`Kd@;|1ZgTW`+>Cnx{WT`BCB zX|IS!N%Sn7x}GSRmFVbUlds$o`1EP(RYV@r<5*wpZD4VD0^$}P9)Q<;a$o_9;@gt6 zUVa|Cmp}9e;|PatSr8yEeOwr23a;5Rgmy|;N(_FbRQ&Nfo&v9$V-mV#MfbXv>5iWd+Ms9Y~w>@vR z;XHigm2t^7p~NQA_VS9|i?ebMR8-B-qfR;9`A2!d=>5$W`lq;{%K}#mzShQu#Zd->^n)`aaa9K zpFY*ioI2fIC3p9g>vP>hQnBnt?xV+Mx+BL=cEbd77jE84E^_l+&)r=n*tFfP5p4YA z>O3p+A9ugOiuensX1in4yo8iVgauM&7(`n~k4aXWpElq3#2D^PzH?``r^(l(%OvkQc+irZ(twOL1-6O`R+p> z;+?EuA7KLFNnUE|YV7F=UIIukdT3~Gcb1^~v@bM-_+tXxkFl?*BwPXQ%Sug^F-CBD zoK!lGSS|YugC>OvL3m%z=(zr1aWMhYZ+`7VQpNDXPJ**HZ{MzfCjuDQ6~|O1c{!sC zpujWz2#j?H_^#5nkFt997WvX$oj!JmmHOx^e_yU>FjyN1@8pQ9h4b@zyABn;6jh%! z(BexrOUY0;#EUC>&HHY@RPW~md<`&9 zkmp{xO~K($kbtZdE;K50`Y2V5Ebl*pAiyqBPThZIAuNOncK=~1-vQv$Q2$T*Ha z9pN*@criivhx+ZBefAwI!0e1s~{&HZ8zN~*f!j)qBlk|6Ud*8diK$@b}ZiVq<{MAl3(h=;&UO)NyXRLl-%*!>MAh~<* zUbniqz!=Bsc*fVKOfcA=<_N^UqR(#=MBhy=@lk$dxZSz+$4sMpD7{=~E_yLbt2$E4i! z4DVr}|1riMppCrtJjMSXvSR-F!aef&Pj?6R^8#bq92dzo&r4|>hJCFF)_-zkE5D%J zzU#Hxy?EAoeW{Hj>_fw~$WoPhHT1eCO=0`%ywApt9l73mC>{QD%iz5P_3?+0CqhsH zO1?H_<5;?V(6p}SJYA;lbud-GP2PC<$Ohk1kMNY5Jp6>Oci!qQ|GKY9HGJeJuiTSL zNLefgIeJ(W9`z($Dth07jNZeC33m*@!9n$Tw$WZ3a;xi$vjM$KRZDo;YX;*yCV+qN_llYeP!KHw` z7LI>#pzbe}myf$xT$<45E4-pJc*-Yy&r|w|O<90nWg-O#>3B|oN8o|OM;)A0$&HMA z0j0sGm(%n3aS1tSGwGxc6iVb*=ZI-#HVA^h=X?MDUw)Kxh2Wx-8&f^Zz`5M=pe_W* ze)JF>UKlR4mX%{;V9Iwf(`kTRZm#4j6}ilzP=Zm=i#tRoYFp>&_u;`H7SpS=aJ?0E zz+N8dadF`eNdN%Zp+oNqRRXmh9>77%o;2@(m zR}KQ?v9Egj7q$4xzHAZQxR}S+di`T8NL06Nv_R6He9H3TphRfXhSHR9_$s=3DUiqP z`5165*}$bfDCT6(CM@(Qj4yK0w5!Gbp{4wN5L3Rb57^G(1AZ?Mb_0y}u?HglOFK+W1vUcR9jOffBzKKP5K_k|8oGn z(zU87)7}D#FJ1JDT-pWOORs`$@nY)g211nBNVN17$yA$5&;A>Zd?mPciZ^>qvzR#9%QaI=jni>AFy{@ZH+9-qzuWYQIR@?v3TAF;S zDImyyM2wio2S4lTMk0mCRyI>Qs!|_$1#G^W8jrmW^WkYou@RV0n1y?5=Li(u`J!Iy z#P-UlK20#OY_}>w7i2Gz?6I@uQ$b)5`E zMAOcq)t5W;7e}9UFYo9LjOXD^8F=Yq9I9*d(zn`Jpp1@%gKL>4mk(QwcR48fjw`9r z792$f+M~CFb(Y%}>gH*7@*5CMeNieH+uyBnHd0+@JqZew+wz{jz(QHbUb%W1I1Rq_ z)**_P){=pVY%(ZXLE|eo!TOl?L>whjiz3EeQ6obg7OftjXQ~_xHbrG`Y4rC$(x`Es zLbiOz=ZbEXQJM+10E1E*#0bhN7XJxScotnLYBtW8A>s6y>NoLo>|t?Fr??}dc5-u< zG1xxFiAG=-EcJ>-a!?hiQ959RFJmHy_XreHA+EO1dA^!&Sf8ywpq#@rPwzhiv_=g6 zc_OQ%(KpK@_bpPcCf7ku^%I)s<;yQsPgG4<1L9+j; zuq!GJM41zpm}xgju-YK0fjw7$8ekeFxb>8v(-uLwU(B;I@;-T#>Er$k+%5Tx31S*( ze@U8_Z?W>*K$d}S{bZV8@B0@nbYGI%VSxb5XE*L-W$RZiohKl(2A#dhy}e8T*&M(I z8S;`vQmI(CNkD95|48?o`>hu|OJyZ2!O1=N_SfG!(M=sBk2YxrG}Rzr&#@~<#|h%R zAb7W9Y7~MvcbC?>KmX~K?iOiQ?mpV!MWf^sN6u%+KEfLc_apZKuCdz$@?HIXhgFy# zymKzAbceCSK7zsq$cG4Wju9|Dz)J2(Y;>5kGSj4Bc%qFiU+q?Sk>hiMv!B<02;t3#L9HVL}%vPANZyI|x5E6-;q$GS_$C&>ZNi#(ri6R2hdFTpVLVf%8* zDe8|BbTk0$i&RZ{vPK}=v>cAPhY6m~5Ck(g>k9(+V?TrA%dD#1N8^qYl)HH0d;+Bp zSYevJ!!>-BHgHw5=`k$3_}1A3h)u8Hs^l>Oz3*IlOY>&s>gwawOmesyZk?d5sZbVJ zJ?d)hOWImiqwsyiD&Cc>ynV=ioz>3}wf}WNBV=VsbN_qp369jjs zO?AV$zToC+YA4*M$A`L0CngBM9wHcip!?*?)$TR{+qW-W=#G(peuoi7TWH@Y~0d69tVepZaT65bRX;{=Aa zy(^pzzAiAqU>c$0q{OjbuFX z+Q1ifs;7O}fb|Tii?o}Q4yH3%gZ~(lF}{G)bS;l~L9OkepV;}2AlEd%7+G9md}5d! zhPRhLx_H-r&7?^PK>363TvM+xg3ZlP8Yjm(vL>JK5#SLd_>W&P2>S zlbUm+x>;t@g}617i<9`#E>@HC>k3w@A40ClOmvYlN88{}zEt)EFR>kij`lKD)9~ow zZe;v$_rbdtyZ7EA#ne|n=+2!wfjkG}pRV@bWWprxnP37SFY-mVM~u%yw1s1nn>p}( zndchzdFG@i>2&hm2?FXnw43?-A((V6j)_zJ%IN6Pqj~4V*w`?sTUf!rk41BtcsBaM z2V-R=ZLA`f^e`->tMu1c`~Z8_JBJ+J`f_g#xi9eA$;a!a*S`b^!L1d#(vfBe=2-Ar z74CJ?`)049(xf%56x=;q4jaBZXmuS~g(NtyUPjC=5?RG*4^$xVQKhff~y zq2oPG-AM@#40=1+pd3Z0cAhGV{Ocb%AmTd^42IWad(;DlasW&~v%lyBhhv`^eV-TT zY<2%oclp==qWy|>bu3)v0=^I;K%NU?GV!Sp7Jz=?USy)!F)UK1%{Y!6B}?voJjXcd zVX19~c#${lYB{v?@xCVX*kc>Vb;lBAcgutZ^de8r`2x;bY1Ok#k?1GE0Ep~>V0Q&p z+FIc)o^27{N(e1)V{A|H^o5?bZ6D0kc~p3HFXy@Vjz0ke-2-jrw|}0B!yX-!&%!>= zqESL4a790L0tHSQ+Bc`=k0D+%+TQ6akwe;!Q_560f;#b^GAzo_UDrPLp-;VCL@w0? zoKQwi@_+Ubko*$}d-0?mT&0P?QAWqfu{XVZ7)kFggsdIWIcZNvUofqaMO-*pjtpRlG~zvJphozb7ZnFe_J9UxR=Na5gCnP}yJ8 zCOS?wImpWohrqP0mr7*Dio8rAGqI%F~WvfO6WZ^2WJ$P(sw&=>2C(+9SbeCBg z58G-i&uuf?l)u=|`oMroRs2Ywf>s_3t&5*{!$h%|;ydDT#t@$K^a)AXe;lg9M?YeV z9hxoDH>d|s+*Avd#`i|wNdjeT?Ge8S~1dnl*c5dMM(M`+rTyyxpXaHPUV#mUYk2KyD#5pMhmVscHj$LVbUHVvniyuw5{8z z5>}P;DD@xJBLc9g*VYsYnf3^VcB3Ty%tPCv_TuzE+Yd}nR~G#igH`(~tYQshjYxc@ zPzt~$pbJoh5LQvC5rzxB7nZuHQOCLSxPcXho2+ahX+Op%B*mA0l+E^(wzpv^Ycw=Q z#1`1lCr z(nko#6)9)jiR{qHSS}{V{G}hsPx|~pBskW4BqeL)D|lq%JS@bExuxX>h6T}2&}da? zd`rTu{9Rq6S>q#a&k@MRPk;XvJlY|uXdC!Q%b$AspK}=mUF|FskPonqVI`+U)Z-K0 zsD}YKwc1AlZt?{ta(DzSuV<~y{cC#(j_k^oHjt+@s*gtl_(X5vq-k46U-td6GKk~f zKxU7vkY?i%Jj&#kekv~{3SDWiAxGcmJef~hCIDl2k>b&b!)cKcQ`Bj{zz#66ejw4Tyh22Q$D$7 z#xCL&jD!Ybd+K6*3aJQrWR3E|_nUk?_N#A{^j2Ug1HQLP$=&qT{%qPv2##{x!^A(7 zUpV;}2yo&TMhR8bsV{=8WI0r9k!FQM~c@I0%mPfm^!SXm>_Qq2#L002M$ zNklt)E?|p2-QOeMItj)JNbX~W=l<@_g9ToGIoX{f&4WRxd#>grfa>bu zy#%cEAA`59+=3j5+CWmt4=T zOg&9d*Id`8U$O6O5tJMySZ3hx+?f-3+2*Q&W>)t55>@j(!#cq|Q&I>~6Soaob z7gqOg6QDK)$Mh^O0VUYHxUfLLYnYdUl7E~)tSgfjfR$&)RodTvZ-CZZ&)U#*5r>=< zApmwC*-Z!Yj1&|Ggs;urN?MMSq*^&fiVdIs7dz7-H{93@^uKAZgz4AH1caEqv2e z{_nnhD%a^z{PraYcyHXg-`&N(OhGckYVZ5#Jw>3-fT4ZTexlD$GKpcp^r7_x*?q^8 zlQfS=DdP)CU14kivT7xWcnn4Dy_ zuKC+x>V$!}ax1)i(m#u0_DEolFcBr!8Y@rE5iZWZj&~BZh8{@$J9X6)BLJ| zzGpul#{Rpxm-pOyCk>ozbJe|RTpWX*BI_{s_Xk*QZpXH*4}oVudYfON93jvp15xF;)riuF|&NfBKTx6(*?+h)+P{l)t)w zr~TLTOQwwS#h$*X(qD2M#b30awqPRa>Y+f@J1zNQnh-x}O>#k%yIaPeMX z2Yhuim5Y-y>!i5aV&d-=&+jAr3S{UIseulS^GYuA@eds&z)jG6YNk8I7;v0``cVSU zm)^e6y?y>%clP{+Zi0zFU#vRHB;E1j$GdlUsjib)XL+Z>sdMMMDSqAJ*f-1ycGI-1 zG9kCjgu*+hgl=_`+X zXj9CVw#kK$KJ2|$u9h4NYk5}t;*ScdtoHTZ4=SjhJ4)IL*nGT>dd1}rdcgz#C2c85 z`+}QvQkPx%RbBr*22xb-`6MSI2jQ)^#C}&_oB8NVS3G1_!`Xp@#VA7Ah8j3vg%<)Ju>7e|hv$)V|3K9b=;9QQ;aKDaOW;o^t78UsRL@C(ZWR z6|=-g!bPIsM~_g7UE@C*9@>$i9ee*&I&kfTZea)qUFh%m-oN?1kK!EkQeQSw!6U$Y zphKbr!WEdou^d_~@*`V^_msgXOR&4C%AkqGprz4alo7yV7>75|Jk=cSC&r@PP$e9L+7jmxsv&l>ODY8xD z&ZWPBz4(W1Ap6KFlC=2e?kItw=tLVCNX_M>(GNZ7a=M!fm9!&+_|1WHZX5YASco5b z15m*l;gk>k)JqujR9cU6jPGO>Z*Ldt(7T|tMQ;~*Ks$)~f_=d{ecU?RPumJmHZD+k zAqT!uaLOSbwdiN97YA_)PT$lwq^aDEED)hf09reTr97eQ)wOT|p<)+5omB(>lp#U8 z(3FOBmEWDU7L_isB{y%94R0Gw`-ltAT$VYuI;!#&*vJFTfgaFv!eSsn=^3a9LnGYE^;;4 z0bS0er7gjWO~ZiWUIszjEO!Gm@=>6@on$Q<_?SK)Iuv>AUxs3jo}o&e^n3UhDe=RdGX19cky!d>Y~Zobv{raFc=6;xo#j#$swUYu4zH3amxg=+ z5IAWdL;9XY(qiBEcG+RcPkM*h1-JpSs?28l90? z9j*2cnD~J#dsdX&`u>-4>^bl>Zp+#hMGreVJlzhv&GKhy!O4C)Yk{7k3RD9 zP@F~|S|P=EbZUC1esC;Pi8R@A$X8g4_nLjKG18Y0<)Xxht$mwH6gIj08+>6~Q7Aet zJ0^vQ#5{ml&zrNl$+XTB^lUcmWerBgfM(l(S}HuGplP$Ici9Mj1Nt6#TK4fcBA`e5 zfw&Z1K2-%z$N*4T7hWn$KPgz~waOKHwIPk{;%Dh0fB>Ts&~lgYG%{9Md`)7?@5jCe-8xnf z_&Kh5*nA?T^7Ozha;6l#;u~=2YN3)<`m#}0>5B+d{G<2@SkR7x!mBGV0Eb1R)bFH_ zdUnA>LHEk;1GjcDkYT|2FF(K0{rSgVc7OciPrCp1lPd(VS6S^x;G26w1B|x&GlCsY z4CHd3V<65P%DxN4doY7oBSRwugjkhKu){#fdD50V+hK(&!L)}DSapkE&9O3l?%_ta zZmb9X2GvZ#;Lr1)5v+P^_DDC*%RJAs0{YkAKihr%gSS}qc!-oNykwCTw&u9?=lc6u z!D)cX0LH1KM>_L;-y|RSFUbRbm*DLVxqodbfAVc$(;!0y$ytR-0MZ=6n$FZ6@^;nh zhvXu^a_xHe1u0L4SrL4M;Fk}oLj;j_2nKDm7h+2J89_HAEIx$ZL?;7^L-_AE&mZf) zNl@?+X=VQYuYVJ3@9w_(-dhBhSuIPTcZs0h{l&Fzga_nV0?syZf<*DteccXt3rtdM zJR>#C*7NB3{)OYLG-u^Efv$b5P+jCjsdKjuys2GLFaFg+nANxXV0Et_2^7;kef5vAg!yiS063$ z63ZLi%u!y>dFFIh6hCJry)XP*Bp|$IfD9wLs(Faj?|ZQ8WnMb^IkF^$2f7&ydrV-| zRk4odeg_=&HIpN`=J%hoHV6M8Ko|>W$GM-=yH&-!V=;PTr{Z)HEqes5fPb zFF*BVhcirKy`<0FCf$y!;vFNmfHm-MdL8kn2$J7t{BfW)=(|ZE_ayHWSVyiUCI!5& z)h7F}r!Nk5<@`g&Ay;(!>ySkzPn-yuX4Sd2*g-daLirtwwL#sF(#+Aqz#3z&{TWza z{%N0aqGuoNeu#eT*y1>B9`GD8HuA?@eLkO6~{zLSlz; zZwo*6*BHJd;W4xq(Qk=1GRO1-UW&YU??E@u%Uu06g?!ersqO9LfRhvJ8~j>=^Q0Pb zg2rQXGvMzePd)c2r7uKf;sBmba5*VazYMBtk@s+(o4?)F_FBN5lbCK!0Ga3B{QctU z3o9E;NNqeK$2so^c*09ecj)Ucn4EfsT;x*MznDYX5qbF)no`~=VAYMm;tt1v~hUk`7ec1+B#RzS$DpVxd~sA3aR4sNy2^N;ZY z>cS(}=|+=d?{*y%ls)%eBIAd7sV4~XOB_UIY(0$rl(Ek;X^SI#_#jh>0!&RV1Xl#g z>sh1l14ePED+pU_8YzWl?#QWvMP#)+qy*<9-*5)s=bH*B`4Y_Ow6dev(_Xq{b1S>#XwGynw+`?u_f40M1z~6M9~gLbqbz(4~3`w4Z>TceFuEdw{qXdGBS$|lKBESWtG98UBU_4HXxh6 zRnG(H3Y@>X(|0hBBCr+T7uq%Ut%~?t>H&e4IFf{0WONd1&maEV-~R|phf=Y_oCd6g zi~t4%#E>aLR|0h19$sX?0H-;WKpu9m7|7Ph1AdUK!B4!{c>$GDqZh1Th+O!+91zTn zI`BFTEfN_jNB{%i<&h2k%8{U$E57;2MK|R4evD!Ug%nD7Ex>tOwJBU}>SjG9TFHk1 zWfm_oyR%Lm+4&cHh+RL02(7e+yyPJ-q1D&mz4W0gUYYXCrs2m0*m0GR6W){%4o2L4*7BinpIN|r{XCud%+z9Xpg8BT-y_Q zGqS@wc934N6Q@?s+lIEEwzCa@$+>_AKMc0p2FkC#$|W-XExd>MqgdZ*ZDr81yg<5* zfADNuFkn+bZm~J;jV3*L+J}T`?H4}}9e5_43s}?_uDU}Vc@!_2DP}_i!_KdfJL`}go0t~+?sj)&^D6eq(p=}vz zd<`Jc5!zxYv*~o)b2Fy6^$QsG+6TRIfmT0@v}Ncu;UjmG*}m;Lko^U)fU9l$@m~8z z2YCrEZI4@9_cva-dMX#T5~Ie8FbA)-P2`gcf(7_{Lu}a1$W|suE|c3#sO#VWz#I%mY!iPZ-Hb zqWRdY=EBdZayovn$zL1UCKZfUAJ1(!;cYKrsTYT~QrpnWxmDIxyGp-oucq+vKVu>` zgMZ-Ur(ML4knYh=nL`?#&cnlz6xmRry4y2r2K4@i&FM;uWat!tVxUEHq!8nFZBx(tA+908tXW83vBH@ zk!OO_-pi!j0OVh69{I#zt5i|NNLr0p1S;B=)UQkme;~Jn`YnEGeP1Y+`0C6)#mJ-V z7Qv^Dl<3}I28wu&jhZa62IQi45#q!KEFQ1R|3ChYw{GJ0c-bRE#=PAXPST1f(Vb&9%{P!A*dl4Wb0j};gln+U_9QBoLamZQ%dHMiF*vpW z8v00W1?RE4MwBRIdBb5Z{m9_hz)97l5KM`(H9n4;O`s}k)z%9VS{{SqLyDB$Lcgb6 zMJc!DwcRYL>y^T$|A`sH6+6rAIm(rXOK4?0ZnWJMcj0a89^P|ojZgYOp{yAwn>|WC zVcJJ3Wl#XoS)Hg$|DmKDmf{8{xjIw2@`Y-LzQ$_!>!WegMoD`sCuPzCm2+`8OBuI% z^i@}65r-hiL!g6z*d1QlXUdcR{J(zE{qbL1?tVG{uv^)5qMsB2q{r~_&lQ*nVv{z- z;FS56)!%fFzR=IMN{SSMN(RV%0i3~eU+{JI*pcqc%#o}po*<2at4R$yK3IFw?Qj!g zDi2q#8dR6&#OVI+_kZU*-LG9b(S7aRv)zTW$Ff>?o*?uma|R&|&>-NNh~Si| zB1Tzd_|so}(f#mme@Spc0a9NP1u zJ9Y@U37UN4tM8L5e5%`8Cj|omv8S#KCcx|Q5CI@9`+}g{VFF$g1a*IU`5L)(yY8KH zC%fPL)`v-1aEct`_es@YVAz1H!L1bnOTI*KmKCyZUpUo`6A1M|c7cGmD~b*5ZV`Ml z2xf3^p41<9$caOwuVH0qUZMzp?{Kc(yw@%A^1Exd?-T6Xh(8`B$GG}ECg<}wtAux< z+X8`7m-FmnRp}C|GF@fAO73GM3 zNOFWvB~U(h{{gGu$Gd9;{C&~qRaU-lWEJtQ1OrD&Q!zpgcCRs0q=PX)xJ+sc^Lig6 zUBw|}xz9>l13sgy1a}ptFg~03Xa08ksy?Y54RYFN4Wye&MtcbF%6nJhn=9Oji+%X@ zCc*HmEXEPnnShv@p3Zr_>~?R`vZ(iGH|AKeO%7{<`8!N*T)BFkG&|(;CwzFQ zhnSE!jNbD6_?Nr@mHz9h^FstqO~J9qO7Elif&JNj>UivJo3=JxiGg4z4a})Nh3@7$ zS8i84`=Vb|O-h&%(zmFS?-bD2t#C5N75BEQ=|4h~)$UHp8NBl4vIcO}Wr;R4XSe<{ z%0z)LBo)R0wm}{}=tnC|%GAqEH?rc~TTUlQyaf9z2=Nvc7ZV&e z2mgI+=}KwGunORlS_FTzj~hUDG&kT3Yc5f&ZcYDEk6OrR;MkN$Ypmoh8`UpHhUn{i z9q*{u=FjmZ?Ped-SG0-2Gi|g&dK&w(E41Bw>96*l>e{y>siWNMYtc^#uEJGjz5`YCkaJx6wYpn|$y5@>e{1rbX6+CzEOWl*ZH zZOffK?Wrrd8CM949>j7!?kWgP1^?koX+2Y=7CqSH<0+f>tM-iqfH`l{h@LZlz2JQw zO-F2|kgOBWKyn4H+iLYjQyUZ`>svcSVDPLHPFl^ra!R0E?$rAXU>wMHbxh_k^gLGn z$Ql}{No8!qslF?QHugEraiwBrmd7lz4UD!=3L9h$6X5-ms<5Rmc$~x+#Iw={VH3tm z@5aKfVarc>_Wa=={L_zOGjCu8my#QUUUJA1l+v%NE5^aI_293WG!b9v5{jojwPQ! zMTiS)L*YEijb5)@QBJ_ZGA{K%fiLeCyo(2kYZHO1Y;nx+2=(H@cht;Y=L83u7AmK0 zReTKS!VAx|$>MZ?LYne`Q>yL!2SQjraWHDU=!a`$L{98x5myK0vW>tEZwN?BzWSbfKB18akM?AcvIlBx)d;Q!lqDF=p)zCNh?8>L zO}Y(Ff(^z)ak*6Uu{D}3+N0qlaFsR^y4s7zm2#OzLA>dTZe@G@B(Sl8vXw3laOtqL zvAWBVWiFsfS9(aHZt}LRqJ;V(U;NnuY!>~LwPscmUkf2nulO7un&AeKIitj|5D$tr!v#j`M8M&k8^AOt~=DHXILads)W*97kA? zpr09J@i`7v>8qQt5UP649lI!r@2AZ<79nsPcruU+W?V2WlC6Zx?#h83l(osDov99x zywZ2M5wEdDM9YNtB98q%PBC&u4}Hc0NmQM3M7H>v#Z+cYfLC7^9O0Azybw?*Wh|LO zBfQF=Wf3sJBL}b)WMU)y<<-}T$gyoMY!tQt98L#gr~VXvML?0$7G4*ui%!s>TuJqd zQcifw8g1<@ZpidHlz*pPGPYH9$;EMWfNFR~?UIWJbH~lxM%Z5dY5VA)CeZip#2Z!n ziuTo56v@F`SqXhaE0@f)f*X7!J-s02GG^4aKf#z}fAW$6pk)V4|-0aU5TWH_09CCd|5_8d@miO`bDImu4wX&PT?nhU!Fb4;T6z!44%HPZwO<34-=BD zJjKAFWAxQe?Ps2|13BfnR0?Y$QZtPjFs7gizjB zcpezeds{n>SZzBr8#o?XmOjW%d&pnhQWM{nojil=j(Yt%k8xH={Imr?2YS_aM|i{`M2e%X8h|e8I~%SGTh&^^k!of>2uo#tq22V$vYaUhWeONcmzx^ZgDHeZ5R=2fGV2OasIx9r4-&^he`pSIwfR*IBe^$U_gEfM(z98=a!M7DwG%l=c z5ty7vS^!sN8Zh)4ZBRe*(Z&Xi`W5Bo`Q6%D??(5&?0)-$liff5&9AbW_H=iU2knCg z2rv;WG!SPKJRz{PNlFV-LOiD}pMYz-xvJ58>i1Sjd$9Gi`^Gz@Cm@(+aD0>C+*N|F z<_9-t^w##Xq$?OB2sljudWfL>7=fne1g>0}y-r|zoglRV1i%0X2hT zt_;u0J0~HyJ`It6=GfFEK1E=hK(zs4Q=FV+h5Bi7Fi#&ICTHtbAtT$QQ~UCJ29rlsn4rWQu|ETcnt{ zOPT&OO&Xc~^ohqz2CR|BMw>dWMrQI{<2JM#zZ~GDw62_=rJYO-<302fCY>Am>8MAb&p{FVuJ0a;4u}>4yiy4-dFzmTGej~ z7Tea~vwhqcgfB+5PdH*37&VA9g1yXPZdw)dpF2)CiS?9r9w#7gPJQ#mKj#+<+Smye z({!w{O4-yM<4h===H;^|PaeZ0=#HVB@bHC^i`aUFz_yc4t_FVsO;gz@r{l*aa&Kb` z?+2aGae~1>t*g#W&tlGT#{gd-e3W0g9N16$;j5<2@rAZeki`i>gS57f!FE^q`)hzR zXOFWIemOyR#}w1~Wc4;osfcjczLn#I-yOyo=h<5;q}Bk_D(eJ$4~d7w=zmY zT@KI=ql^{i_|GpjkkS14_5p+B&*}39#?&>}CGhlP)8r(`4?iad)Yt2{lctUfD*DoP zWb+pkj^~a~PEPnsi6Q9ddrky6o(pftc`w)XSJ?fJ#l6FI0A z#~Mu-H|>{60FMJl345QN6U?N23=Gq(X#?BZzAjAN^B0~)%3=HG)&$DAZ?GUzAp=4r zSjt|$j_Y+_An`_$isTmO-1hSN3wn5mWiWvWLg|p_L3LJsXxlFeRvvr+laKdS()Q0^ z#Hg3$0$5IU?&)=tL%E}{I%8D`Iw^ zmmv8+|0p7e<;K8Wjln~|H|pF7y)t;0K)K##aDia+OVXGayra=xF}SgmfxR!qKoAF0 zojV(Q0s{sSa$`@K4P8N?Y8(d-Mz!RqQaLCHMbONELpx5OsRAAe0O~o>1et8fYR&N1 zdkhBVVaIZK$D^br9#s8DSq#NRE`bfIQfSa$Bl+-c2{>&akLjVEAVE>Fw#dP!@?KN# zs$3@q7!1RP@2m#qM9CssYN|H&u*IX3O_iYzU4*xRsiQ3{2!@^3c5uL=U0jtd)!5z! zk4Sz~Dh{$0COSobrLOvd;mmRw#$n*8&Jh4TQeL*t*dQ<5y%;H5$ty03zvid^ykMnx zWRSLOrR!dKfj1Z>Z~cY#v`MYEvzKps0<3Km8NmTau-)1rn{CAwd8Gs2xV+__YX@XZ zJA=t;>}(BQ+sTwJfhv74-ZqquyyeMg*dSv z@~M+>qHy$zD`4aaB6!kOe^-!;8@-gO$_o*FLRVVS*MreladH{O=JeIR|AnVEbd&`$ zoAt`9JlVrjxnmD)P+-MReE`!Q$dQ3bzV_fx7c4>8j=_kz&mPU23 zgiBwMuDH@=ubjdN8+ol#7vZcBF2@DekSPZvG~Tcs2$50z$X%3+Mw3x{WGg?CM}q>4 zI`FhXZ$HT-FakgS2D9*1h!}eC^_+*9D$@q-Fjz%baN(6Q$Fv8;!z*$}C*`A7Irw;U zAuR^*;FhJt*G9;b@k43?xrH*PK5#2fI~TnjXADtpWlX<=9(DcC6>D-$S^@~ek!0+1 z-XvPCjgTp_nG^opEP?><5PklfBaBX&zo-Po8o$% zk)8f&T@7dj7JE^gwxkXj)gkRCPiaOyj#=&)-px0wuRs_cxv`O#z>Xoo@r*K&6kUIV zkEsCM^qv0tk+Be<*xWH*xwLr+6Wf4^= zYWL8RDzfm!%gcacginZO0aCdmAHkEhxyeBgJyamj|MuPjsjn zil`6{lcb)zPLGu}?cd}rAFnYh{ZU}*ioS&Xcm}exJW}@nwAuzQ$(Yr8;KCyOauzbZ zEkdaOO1le_+G1A{KqRde9|^Z8Bym3KEL?n_BZNxZhOYEPZ{@-Ztxz80ca2fZT=u3& zT>?w3q(i^(9N}{gSm5ku*d&~gv$x5BmT)pthB~Ta0pSxX=Qw~N6rT#g*JX;F`wyrn z+Ux3OXDQkDL{a|0G{G{N+c47K+c7(BRZK;cRwziK{D%4}DPQg3o8-Z*=1{-h5EM&5 znFjQ&r_b;mf1^BovQZZ{a`*7!NFx_Mo=Murs+XQMa69BtI;PG#--DaiuC*2P|xlX#16#_|c z=DEYbQ9Wq+Jn||zbJfk&j;`KxlHP|j19GN1Q2u>f1CFr5*5IZe^)mqJ1FdidqznT3 zBDYO~bOzj}##u#5faSBfhpY%+BoDc(J_)`XY#{)%PH@jvx~?St@U4>x=B^OX`}q1o z_g9zibhjR^#yXn>kPVEuvewiAqvYQmBG@i}11zpsHsF+N0gi4EKpFc~3(tAClDVCU zOm|}kUUl!Ep6dS9?|!%Y?cew|L+=i<93ZHLo&S&JVcPjm9S$3t_(aqrOp+f2C`gnXu!|Fzrn)$24+bEF+!g96ReiLc;Rd}i%j=e z{d}C&p0m8*ahvDHRf3MIn>(Z{S?j(aZG!Fd{@dq~hrk;07}Rq$u&HV;oSscU-_$6p ztOj>g=s3arC64zKh;wq}7B3LpppC}Kd29*>U%coGRuA(s&RJdrIc-pi34t5;7n80; zx7|zX9D`B89i9RTjhNJ?F+!nvT}T;yLkQ_!O6+)1wLi~c5ePoXZ~b^zov09r@Dbf z&s|w-Ze|0(9=qyz3p+0mC|5@FRO^?%c<~VY4Q87Pm-!ayX|#uMw&gxxT={Ll_XzrHPXoUCq1QiGy-!nafODQ-4vZ1xck;ns z0zAf-ocJ;}VVWH2d+%a;4{c}=)X~tfz{w8V)h~ZpqHL1{YkkS6tNLACo!~kv%?)TG zHoijM?n&B&2{&avb@F&{SMfFbulczRz{@}50{zbWCIi%_?(i2Y>%8D~%v3p???-MY zo*W14o1^ptZRuEJ%Ao|75Ye`hZYC6v!*RfI(wB3(s`l87?-uaa5xyf}lzwwKx%<5x zG|b{efg`fH+fN*yVZ}13Iq>;|^keB7q&~^Y>QhX9OtTvQ39Fe`n2hw-0P=Q4wg#A) zAP+i6`utP!hdUAU0w(_A!=QA6ezegLL1!nF4vy@{-$g{nZa=E^ct_NQ* zz&^$=LJY*~B3?%fl>2LtgG@{~o|$^az_V?giAP^_%A}cNufJw+5-1z|^}nf!DT2%X zTE@2KMa9PUM+ZNWV6StsDl&P3vNN*gm?A z>6QEii4!7DdTr$wO0*-j2F<*d`AejvY(YL(>iZ4=Uo71B8R>W|#e=|%&_4SZXIw>Z zT+N{RCVsoI$)wTqC#1$Ae#T^mzl?f88$Er-*t5&Y$(3$&Xisa_Zh>2WM zHoYVz*2;ZeaLRiv63*ayNg&^#VeSJkHqF3?Cuj~l&ehrjHdLT{6s+x@;IaJ$zwi#g zD}93FTriJY_*rB_l;RO-e8mycIi{5?31UHO}2&3 zm`8ZpTvC`fc9)v%?loOq<8Q!wr&7UB>Ok;1s9sT5Bw#%b=@dfWXI(a*Vq_v%(cz#0 z3-A3>?ut0^d95V1|I3~->Xj!4pn6Tsvlru@brqlMIzdvqzO2!4RK&l)O_03!iU4MA zoFp!=4|-UV8Nglqv@yZytQ@l(*8)5b%?Swm^Ad0rNP=#WM0FGpDgB5@6S;vUrfdnoG>Y>Ra zvt2>?te39x>XiHgCy1!xTA!{HJj)6A!q7M59p9ReaY_F;jA2p$OD2;gr3BId6l zcp!M;4zaN8Yz#XyJJ+7Rcl#<^H(Qb=%d#a|=JWYx9l3#H|8&l&%F4{j%BoW*D^FH# za%(D?l3%9lcpPfkg^edJzemN*Lyv1wL4L^~i-Ml95k+F2qjYuMOksr!{ zCvpxAD4V99%7wZl>C{ravcB-APDmq}6hvl8C$@I3$P156A2P^=d|``R z@QseJ;4b5Z^)DS2LR_VPQfTdFhA`45IdxPX{DMa~!O;na!sq~fKfI#9Qu9bX!>dS0 zb9@kCkb*z_2GlVQNG6}5S;qeG(CQ=?DHh$SGJSn>mZW_xjM-PJZOv1HwwJ~`$Bi60 zQHbgb)a|_HNr>J)zpo2^+lq#spYv$ zn|f_O@u{1Mg6&fpciiWJ$(R)0^fk$iyrM_ZW|4+$ z^uzl5@t*uxw6TiwGWd@xNRPv(L>JVxizf8o(VUH+w)dI299 zS9&dyfgG6stM34tFHWqp#r-)u&V_$XsJo0a!O4At4ZFgGW-Aw&6t4v+f`%ytxu+e{ zf93ah{GhzL2K^eR#K$kRq|q}1?v_jnWt5He=mCUv?%+St17sekfXh|mr^f3LAh(t0 zw#q423-NOf5L{-0*OR!+1IAO;(YE!v(!G#k&q?5-WTY!+5eXltDk!Xn^Cg?zYAI6T z0^z;o+94l3UNoss2Ds#8LeD`mTiM_;L7Cf!O^|u$Chr@l!oP5oEtahpxBhxK&k=}c zeyX15$SD~5no*|4FQ1)?qh#b6IFe+;5;5ahKmEmaS$BJRBSzv;yh}$hAg-^j$5Vb64_9OCpQhf!7;YD@UF!xh%00kH z^u?Wr-H$)L+pQ>D41zC^43puec6x^1#Q3W>{Q_RF3mC@F zV5HY*ys@?0?K9pjA?p*!VP*<^F>RkwklJe{&dQ?=CITZku?f&(PM3v@y#p=z};CY6K&wUk1!!G}j|n zPjdUaA$T-2Yy7!<>3obQ#&@?rkJD}%RDFaP4#BS$Cg=}_(Xr1N^1~6(@FW_4HFA0= z?-BIs&VAaDMF>M|_$*e(4Rv674fpodLky||)W-!S>u(4V_d1x4e$OU`|CuwhctI1^ zg}SQ)h9TNvY#2qp>eo2+nTDogj`;2Kj;Ss{IbLbFm)~i=(3m0I(IE5YeHZgIA%G5|5Y>7~EH*B>q_xll1ff3UvF%f$u5(8N%W z&bSxAb9gYex1RR3DB_}C;SrY$6)rGLpbvVXFYq&*lkvUnOSZj>H+qrVPoBZo7Dj3} z-OXYlJnpbaVMrtod2$n3h~zr-kO=+0HK5ji66+ zw6lvVenFtV8z3jpV zsQCr?u&TOM8=}NQIKxZ0Nb-6ktj1pHI5*0B9oLr2ALptb;#WjmI?j3B{&aUnT@hHxw%)@%D7Hz@J=0AF^G?!^(-zLZ``RD5>q}Y@3m))N6DPh5Og+} zHbJL+ek(QkR1f6cI*Z+txyk@!K%2kc|L$>Nj{2kkqJMiI{Ts1V0hKhdiEYj1F{Y<7 z1L8>rqJ)Ehe8upgh?@^QVs-BdgbTK;p%EgsULOnsQR zf~Q`3itqjvDSXtO6+g3@b8W5T`@ z3~Mp4Q8$&v@;z*1A^((3*aEVQVJco1IaJp&w8$U-!KvIz242VpatP1V1^CdQ6UXox z*^nZf3kUMo8$8pw82CUP{u|#Kg9LBL>!g>pDZN}GS5v5S7(*{YMEO))HQQbYu}6ia&SY>b9Q8se7jBRhb zsK+j72nbH%qDM}&sdY^<{O39!pO9#M7@&CV0#-iEHJzXMBpTe{_1b;}y?CnvqtW9? z`sSf4)kcDnS)GD2ynD2EQXj=7&*eF+j+tT|Xy+6ars0!|@FJ3G`?ROLwqLD0lg)kc zXPp=`lt*}LplMt64LJF8aCVGPZ+HPtefit>#Z;l!x2S=5<;(iYm)B%?>-qR-X|@}c z6K|#vj3HSump=b8o=F5ewv6($4)!;Xpk)$tJm$%-dSRKq4Q=1F3u*R!v&7w~dMD0g zM9}JwK=NRH%;MTMQ%{R(p$QZ+S0`yl_1sQA zExHxl(VqxBJOxg2$WdOcY;EiZEz+e_6I1#lIvC2}7iIYOY#H$8%Lm7w&?h+v(AxAx z0m-6Z+L3G9+Q0SlTE&wlbpU5{HTgEMhtEl+)5F1b@at(>@17gI>H5ArYa_CBdH zC7cvEr0K0^`R61(5BpXlQ*d}o1>XGhGy6b5K_Nm5+dkqco<$yfp&^6?3eJ*_Gfv_q z4|1TbdqD+Mv>r`vO*G*-qLP#_s+l;v&b@ysp_gA!D@a95u1b15$1|NbpIhEHSk6ghm+022ZL%v<67FWOw zsA-mYhJj#Xn+?b?7ChU3NeBo$A}OO`O8)c$TPJ*h;R3wB&nNs3@e+Q9XW}lQJ`7iI z8fP0{L^OIBGC<>q{YnFeF@b%d(Lm%Tdm7`UtARozxS1SHS03*0nf!o;Y`s14*e-iocgXm6<-|v9NgDc4JoS_I(@018XfGu zbNyU*X7)l7tWu< zD9A>L;QZ;&e$)M&uK-?KobIOWo78OrL*Vl2LtYn$;o)JtRsY&w{t%-k;WRMD?qR(A z@h?74xCsq}>WT{n8oF(-mGyPnQx9&!MKHJ1a5{z;yCETlF$~?fb{XRy#V))iYnipc`*q z#T)lc;>@pOSTr2RbMWaE{@$Bc(?++5&933?95JvBA9I#aDci{G{_0wc{O3vk_B+?R z`NbJF8r+7z?d~3n6~^1v16p0sAg5QiL)5v2xyVb}kCDySc+)@L-o~4Gf-?&V7|m|5 zVdcFC%NVN1B6E!(cd64Bi$(SWD=~vnO?~i707G3oSTnY?p>2kf_8f26X$+*_{ot+c z+L_62?au9lv)Md6*}eAq^^6NoFy#6qHGaS2i0!SXu|{$|XVp=^;MjY5K-pbjUdH3! zzPrKKAZ}>+-i>Rt5#b}?VHQKHaml9%v$BQ9w`IrhMt@GI2^Ug~8Lx--Y4k=9a>EnF zYnTpXeCZL|c$r0(89clzx^1GTx6s0&)kJ@UnXX>r_LOl^`6<&!ELJ>5XN=?gfOWm9NE;P6ggdwYZN7Fc}@m#~L3!&@l zn~4Q)40MgU)>|XLUp&N!MSrypFL>Y6gMEuduzab)SU8DnUC?rDo51j|fm^R}^T(j? zB8{O}40R){IMEt&agdtmb+M_p?Hh-TVf$^iZ*fq!mwJ()G=j+1(wI# zv=eeNB$f|5+fEO8>+g7CC?Xel4C!>lm;kHvJHik#jJYYU&k|F!(D0bWLU1{bYV_8) z=rcycwPS!`j#L7lK`y9iF0w6s2Kekb;Mt&I&;=UbVKjn!cHw50FLqpD6u%1^jj^QwvQVVyE(AWon`9m!jS2pQnAxl)W6B05j@(_hi(X67)4jn z31l_Edxi@KnF~|#fxPpm+K&0opE4UsU819r5|!hoH*h8$VPzf#Uzs~NA<~i3X$X9z zn7H`8a9)j`QvHnm<}0tlSMVi!>d^4GQE|=7&@nSXX`l15V2Z{OG#Sk^|8tI!uq6N` zBu)5muIC&{6c+a28LXT^2pg~sEiMj18<-HOI$OtjhX+s`kd-$NudS1HbgpKbxmYJI zFZtMBCgpvz@4IZ5B=YB|+$FMcwWsf#Azg_Q6KobyN9Oqq(rMUTu$ftROac-xdTX~~h$ zh=Vj|GnC_}%_p0Q(l4x|9Zu<3Fg#Kp!wE^lIc4CFaj6I309HB6j&r~8$rw}%0Y!74 zdc#WnXMgyEYF*x>7~o2wt)7ynxFd$p z2>pa!w-7VSL8lC5Wcd=N;U~sZDh~C;wsC?kIf&=w%IiE_EZ`7Au10tdX(=c#ZIE19 z+AEqb`DV%!X)2<8AZYmu-}08)inE$2n1EOO3N<8@4o8gN;`g4iC$jkjIG~Yy)9MoX zzHnFwT+#_&4h%)xa}EYY0i{?mmJ76AcB@qQMb`GH;1B;`2=9$HUHujD{!*+iIw&dO zEYDrfWPeRP(`jK5kwqc(U3Pv5rEMUHt%*I<(R2?q}XH#u1AsDi~kEE8D*L-ivU z>jZ8_gQRJQ54XL{(WA6|YYPXC)Fbi}iSk#L>PM4v_$A4M32=_ovB8*cZ;?5VV|i+{ zH%-}swyv63m7pEHV4`#2s588)S03V}PK6o_-gA@=$e;YzA=^HXtZ(WiAIR241G`@2 zWxIPcxyVJyxu;3_rb933Z}M*0teEjYOFyzAX(RKBPZuEU5b8pVosmDZ92u-<=%_jH zkv496<28r*LXw%kby@Ps7Z|3i59x2vl#bBeTLe1k1L-HFOUY2jfDKLIg*1wq=7_9z~J1r8|0##uf+ zI0sJs;x-Sw@Lcp#mFQ)#RuL(f5qNQyj8mRF3GkqtT&d8bIj)5>Rd!*#WW06bKo7u*6`-{Us4F5X*=KQ?u)IVYXEn9ihKIE&^_t51OdBi_tD$!;t}Z;^?56CC34 zXQ2>j?GU7*AWuBJtP2qyP4ju0x~PBTh$4hbbApZZJab#0OM6*XUhFpj+4pO5Yr6yq ziSl3_#YMbv(nJ16op)mH7`N9qlRx>xiD@*?8P)0poWG5ywlihsl$AAMZoaOrMl_?s1l3(Ex`Zqf}A6FKzb3{ zEJsa z;22L1!M%iGautuiZM+p%FofU1D0CkKi zP2%3d+aZP;y$>^nQJv^^bfvto^|I63(U-?EkVh;ruF z=a>mJMr)|yyo=$w3|=QNR&B@NvW`J-?a?E`iEMXoEsl2o`1ik;a3Qy;(=G;oHxPWZ z`LG+q*!++F;QQSq@suxKxQMs>F2+#WEHr5+_zrCpK+jV*k<&fV7@JT$7~S){L6dm zY=UW=YCTPrL5CrGhWP6v-ML8&GkD$JCgjQx4p29)p5vvFxaQEib)Qfo81^oln~yA? zVN~^X&l7wlpvSd_c|B^4-7L*H>bAf_!ac&a z*bWylnro2K0Dt%H5{BmKl-(@SuM8 z##Q1P`;`Wp)?#Rpj~y0to@4Zz!nnMMVcNx^GZ-EQ-}z2={l=B<8nNxK zT)Du8tV`X0|Ik zD5nl6%F@z(zCysG90U8t!wvYq%7W8w43aK1Y3$DMNO`2pxPnoVpGM+M`m)ByS-xO+ z%-1aOa#lxp_N(X5(M`u97v*dN!@)c@+y{onm{S;oHOgvW9b&B1s1%QDO4xZ8%eeRNS88ngW>#>IlqzFO*5*+f;nFvh-%g^mZXMn7Y53>`iV ztwp}<(8zs&{u$cm6lofZ^?r3t%6^GoJ2C3{8V2L<3EKY>Iy*)`c?nKHA{)mOXBs{Y z_TkqtPb0NoduSwm1fLq327N|=+c8qFbi<+eRf+v-mwuxfo3Bi*uM3~gI6p%_#c4lv z%mWp^ODqbY$Ga{xqPKdjKV*Sm0)2PWVMEfm=fMJF%@}o>CnmjLsYug$x-j?|3vlZ^ zmo%8EL$PD{ewNMSE;Bo;x|oegQ0Gy#5+v6#6#@o+-Yh znZPzW3xd-Nel-QE85}K%L>=c6J;tPjT{05Qg#^Bb|F#&g8tGf+rXh zlHeGmoued;bM#AEL4&>u#(m7Fq}eB}kYlos^9BHZVEtK^bf2st;FU;{Vwy!nDr}Hb zmI7R6o<#l&@^a+Vu7$M{{I7DkWZY0zq`_0wvF2LdSVWIx@u!XghA(lH?Bq9xIQ;R? zIcmyVQSfBrN%YIPrL_GgQR^GyG~o8hl;$KKJ}Z<~!kHpt4dN=M$>NB-ERk}`TRC~p zkMbUwJ#$@rr8sn4Kw>h{jx3N-r<5nnXLyYfg~)5^$)8^5NJZ{3#Fs&Sko0(!10#>} zE*5{`gS3T+foA{N{C7jbkG-&b<0`X)IZTO7#C~O)!_(j{pNeh`-UyZI9kL0cH z<+okSgD~MoR=gG;c$K+yWie?I=8*j~re`^Wmdptk;KG^qkW4QxTDpH3qmlqC6Fa7= zud>Q(;S#I=%b+b$`5-|U&-RR7H-ZCRHPm<=`8B?vV~vzcXman+nvf@EfXkfFk_OQJ z&6S$j=SvTMqpeL|(K`z(E9=t?$kI`E5lMI!2x9im7{I|^OeNWx%YcJO8CC_SjJlw( zH452Y-lsj~lB3mEUh7bn@Na*x?MO~dEMuAn>B<>wMYY%I8btykpTQXUQZ{@mTiFn) zKQN>$+#{RF3;3)83a_lJ&k2p*Y&o{E{Xt^gY?&sd$v};VrqKPym8C`F!6)-9p0X8{NGH}2LN*EI? z=Qx*p>srZK46z>0K$_l{{vji+Ts}{!mwgU|l_fmItzVvV-hi(++(tg(2@Wh4TJ$~j z_?WN-xU;O2MCu;3=e9pm1IvO#;CeQgz@dyp99fyr(!C79T!hcaugN!U7+esJylqbv z4r<=>^AsITK1s(slV`o;Iek{1ge5J9W%-3S5=56jU_cOFrw$EYjXh#0*s24Sr7G!d zmX83@gH(?M&c$bdc*luwNKZzg3OoEEP5C++AD8P%g>3#LulkCaq?G{VoNND3-i!2g z?*&QCNGpC#v+oDozvxaaBm_83BHA9+E4-xjW0yFC4|>)i_zGjqeaP?Wk_`a()Sc9j z`yx;>F4&TrvJt*;MSd_Mt-PFoM=?j=WDvSuds(GQ1FS@q6@&uxnT+$61-!=DRwlgH zxE?;6jwplUXFnxU4qScT31#cZ&*ws3lw{j-{9J}tz0^59*Pdxe!ccK*`9b%KuWolA z-?`uY#pgF;$o}=syWM~O-@oeq-4B1({g0o2j?w#m_Y=Z%{EFDzfBxz1?q^>vb$|NH z&$~}=FL#f4NuS|!Z|k+e=5%^SYmj=1@njpXJvVpsGw>C{UuX;-1kM+Ee}1O^45N*n zsTv;OJ}*rgtqd38OS~R?ejcuYD=$n|2^r~Ot(UZ$1nL=R3|Z^$%aNXp&ch6SpfOiN zfkrlsCh-_Y$7aTdy1)6I>)nkj7rQ$v8zRNnaT;R+o|PCa#xa7O#b|P7hHwKI7c`D; zWBmEfJ6F4Z_y>PIhRFZ;XTR)z`kSwUcM{{l-~XF`wfmj-u6A$Xbv!dZ+#O-a{`B?( zXd6G*am+9Ur+ArX45tnwc=BoN(pyfx^$?~-lc#~xJdL~>BM-?_7EvwQQzN?hvlv>Y z#xR&ucVQpnA*rEx598auR%gl{(ARG+t#!|k@gPPUjV^lNU%+sEaUL&nB8!aU38~lS zGTx?!MHu9(&;#1#8A`K-f&Fx13mXTT%{n#CLVp6|tqP_Wo8Gb-m5pn@4Q`ER<=ubC zV#@|bMK-^ro&4Z(2jkc?;Ma^Dy|vk0KQ~3Vg{^K6kGjdp=@{Crr=drtF_;;fSYwl$ zMLxk>S!3MRHMH6&kh_l8#zj}}t>#xBwvDh(My6B+MT#wV+82&%{&1c<7 z`r0(@KL%~%#NWSrzxx%NWv;C)3fBuju!ND$$rc*e&YS4}Lc!?R{$P7$%%TDOQo`^cbJW4{c^tVoc8lA68Rljtx=W13V;IX{(67`lJ^uFy zk7f8I1RFh|#lh*uvV1AS*@aYP?DNAu8y)6eKJ-SnU;7n{Ug;xzC3N=8T*Cjj&^71& z3h>~#Vm$RBzIL!qF4`t;x(jK*%(9X51pP`Qx}NfG?(B2Ig;~E+@%dw4RYsO|k;8>q zpNXc+i~Q};x9x{66!-;+8%P@-#swC2({W^naU-7dEWB#a7MEV|@g7$n#f|KJHrhWl z?yFbMJA5AatZ`3BRP#T^*N46bEQKMSU@2NF_IaHH6O~#_wnBC3wX6 z|D5x)0}SizHTdjcr`vzI!`MTJpdrF2oo3IB6E4QNQ79y-qt8&Vg0{vS$HUk{NROAX zd0eN12<4@nMMg5A@#z;doT;lUHk>>zxi?J+uPINSX^xS~MP*XvfuVqNVGm3A{_ekG zkTdo4MtyRMWuFV6XO4OmkcOWBdP%pk<^@7>ne7Fyd{jDTF62Wym`CmzFw74c`3!$O zrot6q>Lpi55ZL~m+Jf@i;B(D8Hho071qyo9gpwu|oLKy4n)1tqDaj_Cb9j>;JbBNl z+JtjVqb-fnISM0}6cmkkrR%tBgYq|Wu_c4wy3!}q1(Da8Yw!8*{LE{c?^T!rs;YYO19fZ98=l|x_Z^vFwt0Y)dZ%n1ibjSgQC)_y5=d83+27!o!e>uxfxFg$DCE;v? z3>=^$hIeQxLtNrL8<{h{jL&wZ)PLO|M(Yyuc}2U{9jG!Iu|quf#2=Dhh7T;t)7*0TmUq$?sxdyaP6QczXCwhPL3f=>sSnl(<~A%dr@FcJtH zrDDj0ztqLN-uP7mr1FW8Q@Oy3%6p_9_VrOV-1mlI852IngtV_AK4ez@thCp*cjJTW z@=xexU`;%rLp$ZFmD8|Mp-AM#a-792Jpuir&g43|k&CbqKJAdaBV@^g)Q)DlMkFUV zcD1xw+6W}+jn)tEaO;`25U+gL4>^llJYddW@r57mO^_n_SzzHDC?+t0RWFof1`~2P zhrhr}gW|RmZGSRyqC5p1TFmlq9x7f*444!`$)Q-dDNqJE3gn>f)R3{Iir2PmELzq!H$VEvz4B28mZklKr4;gUTqChBoMc0g_z9)Flfg@S zjGpSNbV&D;G;!4mHigtV$yh=GNWt1#W0Dhk#sf%?o)3)M>v5~%(u||F^Z+$W@Nq8vRGPg>Ek+;MJw-BcfL3$_xFrzFvn}l7i2E zP|sjFl}3{;{39p%lK13!Z6AdO9Qj`zC>a$#)VuUTEGb7xbxP6}0KdHX%K{BLkR1AG z=<~O}`cj~RQ~zA>M|$>ivqUAVcx2ckjpGSieOpF(fbpOD>Jj-AHHwP`p5@oF5|Kak zr@^KSsn)d=z!bqKQg}0;fL7^T)h{6Z7%h*`NZVLS{NY2ILh?sBDX5;8Fh{!g;YGMW zQXa0!3Xf*`zx6lqrP-rT;8Wi-wt5Y}=2O@^xR5fWl@NzI9CZ8%0cjU4v%`}0uoO3` zxBURwL_VJLEaM!6RY)3Dq{)c7Dy!11q8WU0C0&2g5w}IvJ6Q_1l+6_hl^O73dZMtc zlh?|qN7loC;7);ny)( z-@3mNPrEfdMmII?MbhN;AvEx8^##xv&wAR*_Zps##)S5za9Se^$c8Y^nJ3OYUf%T# zzI661p$uxny+IZk_lbEr3V$IiO8blNv%@L3Bg-HxW z7>zW*sSo1Qdv)h2UjpETs6ob;P(5Qc?C9}5g>`&n5DmtWiQC|I5kX_2JPhFZyUCXU zK0o6T3FHK1JXooh^>sN13<0n7G=p#?c z!G#$&%KQ4>z2>2bS0=*Q1Bb@ztCudK|DU5TNLQI*0Q3t1m?YLPn;?>=hwLtV+b7*@ z(04YE9TrG52x@3NJC89R!=IkA8uc7=)OSPfxcTEL=CnKTtc)BNW55BQ9@dT<_Az6C zGb74Io2U6AKyO-&i3i9|W5R=Fyv*^K{_vf*;Eu2m=<7JVtrIH57~xaMM{J8t*qmR4y`GVlayYF`2|69M;9S;1Ex}EAST)3EV;pQ#k*sra1AAInwc*QrS_H=U&>zKJ@}VZT|oKSrj>iQ!Lv}pF*s0M_!?_z*K7DSunp54|Z(Z+x{qbjn z@41k1)7a&2zxjIh1h})boc=R>js*nzz8>j@;uykEugAHIQ-`}4`?Px?br&tBFoaJM zH~%T)nqh)m0MZzzv2mGk?9JD&FlLqKwK`)skQu%x7$2KR7?yq7+l4?w+$>U1agqD5o5x_^k_ll39Imj}8xUOShCz8wt78mjE~8J6hF zrx~*xe@^gOif!nEo1tf%18GQqiRZDS)*#0JQNBL$O901gW98ewy1;u>iIp-;@k%Mtl`=PHOICuS(xQ*seA3}CED|Jw|ak#{To) zp9cS97YX3|-rWb?E?+sFXQNyf_cZ>ysjvM%a-?2{QPIF1I}Gz@9x*=oHPK1li}Do<@}}vdSN6Smo`=fhH0?3O7~nXw4_?Q54d4pL1*1_G zLG^-vK|lA=JIJ%!aAGf!h2eQzgn4kVkul7L7~41DSI`?55**vyGs9<}dtta)sLx5; z)y0k5w7r|FO3;M{z34~LfjQ#AkI**uA-^zkp~A0DJe(W(1(TZ^XFjNzK>4|SxrW1^RN7#-YL3eGF{T|pjG+#hr%X)EyT@Cq-4-rCV*v)+h6PCoe zjm{YI>oZB*E*v@!&wCWQ5@BTy<`vH^{^q?Xx=uM^{BapWCJN@9z=er2;#Q_q3~}NU z)&J7UGnI7ZS~Inv2|C!|hIH`dJ)3K08flYa3vS@cF&x#4uCyK{E0pEEr?o}@IPW=; zO6H;9%r!Wi>v-h3N?ZGUFa;6iIpS1RK4a`R4ca|_c@yGRUj4@fC4ueV8M)KgYpPkw z*s_{%lK>lri;2v+&UQN>^;55 zPcQ(SB#x1W@G`_Y5s-s+gbyCdlvRMR8l?)C#))+j!oZ?IDKAEH=8GqgmVr`TdLHz% zJ6{?UH}^~uh4FuDF;h7Zg$)O;+{=4-;x7OBlhKa+NB{)MhZi0o^+F?36@U{kaw5;x zO=XP%P~#riCx{cB*yaEW9v+)fD%BCFyAYTw4?1RdCbdC8g2WpEUH-qW+X6X=xp^Tb*tD)UOSQy-C(7oKnF7kCCrX zz*#TxSf|uG1!6=9-=vkHT$%V2pRgY4r2LB5#VGR~BpirL^H7fHZQ4?~Rhs${`IF=1 zJh8mx*?$dZKGBYREMwhy>C#XnlE6R@Ia^O3TCWaqT14KIzs4mGXsR@(2f%C7%o5hV zDV-E1s~+|-#-yxt1a7jDSKAu7TkjMjjqKphmjd3WPS#Bt$djZPefoZr{-?y`+dd#q z+)0WFR1WWzh3JyTh1WT%&AcyOt1ib?3`2rT0i^0h3R_ORB58Tt)U9pX@WNjnUXH1O zB+v0g*;PVP3(;``T*vySeBo3Y@*6&^lYOh8tJzrndg56LYbWH$I%yPTMQjHELX}!Uo4>PJex0H z(DbkXFAUKc^8^rAqwm;I$^7*qWw}VR0GDL($v=m6;>95K0Y`~dc&?@8^QvzXGnuRh zC;4O)4Nu-WzIj9k!5g_+3-MK!^7aV4a7_B_K1EnWeLYFDUY3Ub@#Hf7QW)^3-*KI` zmtT@0l*KdP@~ok(wY3k3+X;s_d*gcgd1MU~SwR^zjp5)XJ$l1ga!n>R7E(^a(vD1^ zrGBbA-iIgt#2C#v)e{N7zFdDTu>a+iw5ksYRjBYq?_}^B2Y{l@SPAXACQTg|pM5ht zfz#X`-afCK%(qY*7Ye4zBqQ}`G89gJJ!CL^ny0)Rvq+1Yk%kP8$F{yLNvWwDXJw1d z6%55eo9CfShlB~G18HOVRzE^ldbX?w7}8Ebz^!y`7a^1-ILONQ*yP%eAIS(3{-_5k zfc=a56buD91QB1Z+D&dv5+{_>2m5OXQoEpUrG<>VGpFRT<`0euJ|n~iE!#sl@$i#q zabRK{(YK_wGQfnEuqj-;9>XOcl*%sk15f%Wvgv68BdsdaJ#E8JM}Toqu0G;H2pEpwovbQi`wU<=q{e$+q?|#@uItg0p@@F>?1sN zH`y@q4q;Dz)BWtTFYpNevimcPQn&ExT_IG#3I?WxNx}I2$=y}LZmbbU{ApgCHPY$b zxsS180)yBnFWa*iL^J}P(Yue(5AR>Qz)SitvBe1&0bO6-4cB0Z0KMw`JpBwFcOPIR zo*~Ayag+BL9yP$1N9!J5t#~Q&vL0PWNAz6Ot1>U+)K4AoS_7I!E7y|;sk57yP2e%7 z=bRpN=GVfr^2O^7gQTG)6sVuAFJLS(7PCekacU^gDCp*m6+(bd`@yFp&?)vQ(nD46 z={;UzXE3h((ck{v#6dP>gC5Q!EC6Vr``NEQ#+bF&UBfUnkHL3ghK=0jCb~cT{$CCJ z@Ba1&7%fM5EKqTbrGNaB&)InNK86I|m*Cm>zxCo(&puydW0-dP0;r_Fuozj>F9 zFEJXsS>>~=1jsw#R4`P{5VzP+3BK$b7hYqU?dKwn9;6y67xA1P;wym}yh^9|x?l^V z`(t==F~?`I3%CdLfpH9#8WNT5GX2i(yoGUhADLRGS;8#%wVlSbAq=J(pETgB*WxjR z%m#}9E(qJ#n%x66<{iqnsfV;sFkyh9p|Yxck8e?{znC5_*D#Ej?VPF$8MZ z5pE1`=(h-=^4eKEim9(gd_Biiuthw@XX(%Qs^gJMSQ4HK8lBIvuw%#_jm>(ge((E# zt$U0Y>Q8_E%WfC`7T9o9gWK1)Z)fzo`o`yNEWpeV=MaXP;C5cb!Zz{L`*o7*|fi5Q#1tY1#GNo<>8kX#`ks%(kSTy zi5s@MNvE5V4n^tK2qv+lUZGmwYJTA(tGmdia6!ga}FA_w9_1I;+Q$Y0)(3oyZEFb-secXWzkh-ZF{Tp)=Pt^ zdX>#g84rA}Ijd0i>bLUtxu(bZQ^v~PiBFr1p`;wQc9ap1zgkQNmhr{C4O|R0REx%R zziM#`fih;S(V#9R(fbT?5ip@jpy>wIK2L;qkvz{VuGML6U-DphpqG3xvCl@x_IVBY z`!5Kev%d}Reo+IyQ&bovaW0_kAeZ{uANWsJZT>2q;FdqZ$n<&Q{L3r)#)AzRWF^je-yz**!hM8is<)T$MEG^NW1IC%j=rQcgULKLDcB zxmOk(vX+?TTofK_QDq~0>B|J*nOmhU#k0JKOH`>NxYS!m3Cjdt)+E(<3_%G)Bgxj) zR}UL1xIL=}Vqjsb!oy_1@va*#yo< zN}r8iY6cnMKTafpojfq(H&{>~3HGI(R@GDu}@fCj}VM#svF zHXf`z*YFOQ zMocC|p{>HXND$o#63PG*#VXu+fedZa#U*Zl0Q6sIs5`c=Es;9fBJu^N>W9@<_QH$X z1oPpgsDd(j)an`ffHfRmRk=n#r5Yddrkwaydw`pI=N>BRq*dy9_EJ8B!gBH=J0?N5 zNJrxW8816^3AmC?0If;YHHf5VeSuL7^6kBKj;yS6!)b+;w{*Q$Ub(3J@ME4w2qL4C z#&)!x>XRA%;TL9Y=XNPf(%_Ev(2%Ydyu5~=(6p@lG#xejAqbxe}&Y#NEJ4_4EW9(@Bp<*N>spq$A3ZyOZS;7-4m7IXuj{vf|Se)*TUvMu^0 zL-F=L$2tPVp=dG z58-4W*kE6Io7d`PhJCN;xVoM;5R>fNRykLhB1-!tGXwzwAnl_}EOY z(S6Q5EsuHb%Tq=#gFIINar4PMupMJ9?g`lROW?9-WEv0i=$g;9@LaH=)r|BhXP)vA zmk0ICt-Ly!QMBgqM+wM(#_hCe$t0}y%ov(X7_&lK-D>i)og1mbLAw1Sx>fM@*PwPB zMOUicAhS;amvO;*@{1g#l``s6(C2T-krZbj$>GGgXC*ory;Yvd%3`hU+A}GOwdyNv zWt~)w8jGa~ZVH8ts5z=HaABT@INOnWq)5p$GG;QY9%ljJ7?^zfSa^nxcDj&X-b$EcXV9GMQ+D0aDgh6mq_lUqG^Qc#4DHp*g zo3rpoSs^LzCc+?MZTM*uC!#wZ8UxWcJbNwgWt3y7k ziC)nb0u12n@fJ(UA;md|A7BD$J#8}%bu-t(h+AZWS&;vx^|#!>E_c051ra8&0T!eZ zs}wgHml6Yf zX^XhXdVg=|x#`!MCwb|`Bb%4xaejK)=>58i!E|Y515e&fy6I6j$*1LWcoM(H=9E_# zXS*{P0M5@%b!RXz%<&uIWq%qYz&IO2`t_fmOB-K%7aDrY8N=3>CBvl{V&j)z+{Web-6zOmi?_>()`k3PDCtz73Gj8Pb5G)9zOQSo+|rxC@zeu&{A9#kYI zemaH}4Yr1bNE}@5t+O7@N~+f-ao7(vNb0$%u5S_^Wt)u@F?nU&b|J&ZoRkg0k_|EO z3VieO0*0x_Y$!_Xbrwxli50)LskgFmj4_r#^XY*vk`xB-V+`*nF|0Z7agk#eL$-!A zLlI~s(-^PkqG!im!*i%B@~v@GuT{O8w^&4RJk{fI@7aqi;B2!Iq8k9JJN7M&pBf(> zM#eBWE+$T}8;07C*c=j%Nj;zqC*Xp^C^DYGNR~LhG3ZXAcZcz4*0b5g6+QWF+p{c; z$RDF#bj&&^$5Sj0#MlIVJv7gpDG%T*UbqkeoEtpuK-X{)F@9sD)3B(HX$;%Mz#F3- z?Pk6&_j>V8As^>=des|`-+CIO*m1|rD%C!XYD4H`FGs!Z_u1TYAzwGRIVN$6F^JlC zjE}y>_4U`^=sx@Ub{0W2#vZUJu}OVhz&@m`p078qU+ZpMK8I2KWw#1W=L#3*#=5`p z?Hf!P@w~;rny&!iVT}chOP4QqckV59zxw2h?pq&z(Eavb`c`*^80&j%boq?-)u5>- z@MoWV9=$Sb#y0hqf4%U}&{nUpnWh^#Cgcd7&DV)f@4M5Pg*oJLvip=!F6z6n*Ttvt z+y$R0#vsG~JR%IsEbTOoC-eeF?R$)Q#t_#ys+={XJ02{MHjM1$_sVORV(k6&%dcbL zyKvz=n{M7^@xTxt!|28iRd)_}#Dc~YeddsjXxHL3KF=nh2lOFB_Mqy~MUC%zeqO-n zfBC`!hRMOi`&aKP{yG~>vS8wuEG`;pXjJE8&|wI-k7gkZxjLr3V4=!K@E}H17k$h( z-SJPuG<2qE3!m>Va*feDmH-{SvZ=_Bn-|d{^?B=zF`* zpDZ}Ckc~FbS9m5d9{W7?YX?L8%+fCmPhf0gpt)o9q3VUSWGDohcVO5SIJucdKH-K;C{$xl&gLYk=j zWnRjKDCH@dbxen{zO%v)Lrr)AB>c&D{k$(hgjaY`R?Z(pnt42D^$HS^MQC~@P5C3e z@-4~rH{|MlzZhf#%b)*_!|{-B9}%-U-ZT@dDti4CJrpi=5z&g$MA+h%}7kzyAY<3Iyrop_;oCwNxR3QtA*(@0p~sVrn&54I!XL~FlFneqz}uoe%Pl4VfOA^TbBsv<3-7?mL&6D$$`-LR zsFpzpSZVOkOhus)et1|%pq7(I@&l((q+PAw(iBqhMrN+lc=@c=dnTG1>fkMEPx;(y zFch~uBU0-kZr}1MPP-hK#iWuvh07n(TA9Gwh$O>LUgU%)zhHr*zA+-ncau%9iYuB& zS@Kl;)GunqHJ?ukUxSf+Di`UKN4sPIlD?@Vm+?q7B%kyc1R`_gYB}@54tTxtMsyg* zV{qplykb&cN)FJ3w^CH;lMF6ind&{A6(qcNYtTqx?wUra8(6Cys*opX(g**A8B0Qn%bHPia6mb+CpKwJnjWdMYw&ZaYCI zu=10(u$=YrKVDptNf{FwP1`@Y_FwYRDF>=ohjkWSDC8Er-dj%E;J3e-*JP7L6w0LT zKt*@0NUp1F^(~UZlYD|B@&<3;hK?(}j>CUINXZ2-F%j;5$_9MUnHKk^`{6wf%prA(U}81B<=OqZrO zz-1dX8C$s0Km?5K9=ePxS)fE7LFE$zUcy`0Ip+L;|lyMff^r#vo!S5 z-pE}U<*=kki+1V&ERRl-;Z>fBa%7mw$&ZV5t-NDJ(u2oK^Qmw2hRg7oHQTDAyaY$l zE_snA-6rEackJ(gWMgPSQ%iMJ!?1MtBTatC#&zYvGotsNk(b2QPU&7Y-iX+;@)8u> zTi@bCausr`V5DJ@5-L12eqv~+pV^Yah^+Lnu2Uao&`A@-!+HggFe0))fFb%r8fW!H zy^+Vr&$P5zk3S4hwveXZI$jGCJhrcS>Ljqxp(Oo{I!eUKq+4%kLdRtJ@H&@G<|$g~ z!9i|h-1DoPss!2a=^P_6X*A7~ue7zYkzsz(N9lX-Asop~mRZDZcmra##Zn%Mxi-;j zdEiJN6-_cZ=c>pHCy+8VJ!F+HNseva!+sqkrmYxrB2RI8@YnVdI0a3mPR9%esPK^= z?{3>AFvv4{CmQi{RtCW=PyTpdFJOo0sNaIxBVSw zFNJSqqI&{1c?7>qSVfbmTaPtDPjT}B5ayU4(wb@om+4%HHZaHKa>1WsA@$0g3CD?H z!}}Ur;XqlybN=;kDTlO#<6r4+y)0zz0CA=6p|{?Hwe`4Ls|El7KmbWZK~(PF{>hKJ zAKzNVOPJXchL%~pNj2QX2!ZF9hPh2%_BARRMq(91?bi0Q?u+~D-LGygb+?E|tykUz z#+hq)mVR*MZ1-DlT}FYS8o9w7I4k%32p)gcC>nc<`EU5vu- zUA>G~?D^_FZadxl^h|eedAa*O8yP;wz<%Q*p*S!Mt*@+h^LRl% zM5ar4Pyd%6{Su?aVfTA~42-*HnfMK{?Rr_J?b)+2JuP5X#-Y~3_msN=T#3|_R8 zgrjggaM3`|-7~XuG48vus766U^B4=*P3Dxz6d^J6)YhAO(hw2XuXg8&zdeEBP{XO7 zyh|9y4$v*YV+_?(_s+cs82^lu-{C>N9V3uqf_gVUGXsqw7DgU1^y-m08e^{QV|a{l z43j<^HP{(n-0&$Hq2y-)1MY1$tkqa|7Wrsk);Ml>8D(GLB~B#Xx9up-VLeMRN)9j= z_NxUATY6Eu8LQzKl%dA67>h9g+xC88VcR+<*)&%Qi0%*LCWY+}36%`%P{1NzMy*D$tCW59kud*bO%+pBA< zYY#EDxd>Lz9XCgGA;I|DPvBD>@yS2I_@@qP@OS*w*y@I!x9{AG@ibv}FrF?@&tVMk zH(5~B2<_sJ20#~5Y%7g^D$Xv8H`~B#yw(V-%w3#vUxYR4l|DvW+9rlSaoiP$A!{_g zYY10oTtKm(#(NmOr~A?e_0o5-sNz*;zQ+8@MIQV3A^WyD{yoHiY<%i067l@jnB%k8 z@J%Pt8OO61gtu`sPscRx^PGV%_aexcLOT!WkVwAIup|2G0OQR``dgk^^1?W?N9dwm z>g%G5`#l&79cH@MF!Ekwalq|kM2*l5#%pA$LEibYI&_kEl57G>e^l3f);VDI zhV(oqs7n?f;Y)eC=&X0WEv}xGE_MlTcjkaXUQ! z+~CxCsXQn@H+^-i)LY&!$=rDP42uGjtRUZATFTxEK4T1X<4orArE`qQELuTF{-wEp zL`bA(=*8|MHn`qGPP_13I`15a$4}+~c^5!wGY1Mb)ENH!DNooUH7y@qPWTe?eBV*m zJ(N$a=L*@-(m$ds(gIRrYydH)lKz|uC28$d$CL~+}9e3GT_hNcBLj0tff$vtF3icqm~1(x*fUgE1s42kwti`U;m! zK!d_RVpgV>SsB?HL#3JG!c_~wQUWXRB;z>JF zmq0?_Ig9-uhrF0WK1~>YgCG8KkS~9!56H?sVNPQ(1nOS={uj5hmPP&^<$fU)CgO*CVG0C||xiqZwoGp-VnRqHo+xsvhfJpdHonz1whd&=k!iXQ<1F3Pb zFYAVp%A@h(1TqFB>lq$Aiv#8*XIZiI<*P@K#-R*6TDD57jM5duo?RVUq!&Knkxyu+ zWbv8K)yu8n{Dy%sqyw%%Bi-Ok-CEi3u8hQO0p%T;QN(sih#K=qccz!Amgh##bdGvy zhKBbTI^|oP(l8pi1|EHJ@uL~t)8UnshjLeD;RPJ>qMV{HD04_E6Whldc_W)YWzbOW z*28=2Dl9+$wR#mF{Ehy#vgCWHKS3LPhPRv z`EA$rkTjnZE0&VvOBT4u(^PdV`XFy*91U;4SKT>Nmy|2&w74`Q-oo(AHTl!47 z0t=bGF98g`#wRrTBQ)Ag&tHsDAhAy#BlB9`L>}Jc-Fy06J>8QRnU?TjOWs5-UF5;F zw`J_|Mn0gUn9cAux(Y329eD6dGymHd9rhbO(^G~-W#pJ=euO|K3MS9FmEZJxvI8R= zKhwYw7!$0I`r<^zYhjd`04XR>wsGMu!qwhDg3GqyNX4Mp%S~k$O*LkC$wVUqQVP`U z$9shZTx7;&jT@whE4+{5*@8%uS7o3^bzurXiD% zw74X_SMFTfcY`_l0x!xhdL&@Mrx+vyVj?jAU%k*Z`($v3U(1Taxr}wBOxTuh5sM?u z%bB#E!N{ZKAGZzJw!6v21Q#5Z5#IE68@$pnF*r-U1!USWAjIVbP||7=O};ey^5Cd3 zTO5T?nVOvgo>DGo$WP8U={2}M-+C%1=?QCjK+@+0%}wD{ztVnT`Bj@J%N*hi3gLlG zI(Z?{o|Cl4zyFgTbziI#@&cGAZa&7#r=DyYJM^|4#sIg$OTLC3y*c&V)c9eDk<-ZH z5+PIGIX~ZB$J^}!9-e2}giOy|U)Jpph7WMkKSrnnjSa?uHD;?XPQ6#{3^PU4vFFap(eaUmIk&kWUz1B?>5+Ia1uk!Gn@|!8_O9;mPhkUjk_Ci$Mr4(TSmx30-k_g}A}bF=;?c zZ(+w3>8o3|3 zP&>OYk1>YL7%6MLjd!boM{jmN>DSx%CPr1mu-xLjv4saY^Y8_{IrS<{oNM&bP%7C> zjy&W3u3&i8quh`iF6!83hBML7s#mOrDm@`JP(H?+StGT2uZ&$7ID2M3TKS0nHHUZk zXSbFTd-**K_NRC*KW+EAK&MH%`0QhC>*UpTN%|{3QGZT-jd@=cBi(VKMHnturRzeTy;fIqfq{o9Nwb ze^b9);J|Jc@7GJ0FA_@}10;MXpFupK^A-VZ7oj#V60&BJ_~NHV#=5uOdAA!GBa{(d z)mPqntGjgV3X5Q;yKC34bqmBve+j>957)aD7MYHAx4WxnNW*6E!yo>HzO~yu<%t^@fh5;nk=7Wjxk5F*@rp?R?Amg^*>wL&kz(>S35B$BtQypN`?mLz+Hc9EV(} zP^VqgGR%exIF45_)RG?>;LUhN-Sj}da_KC_bv)D=zt7`Y?MANCEOt(_kY+!y4Qz`s z4C60o`xxn{qXuWa%0W{`H0anLUFguDZghvDbygJZIro?|HvUaIA6+aN&VVczBU<%`XKE5u`CTp(>E0p;BhhnE^XD|84Il z=%6yNZ|`7uccWf)!i7Z_66~MqRJ^@`v8W#TE^=tpc086>>twi>S}epkO1Oz_3a{M=;ZtUPBJlikSHcp%^;1Cr4_)m`9%)|{CwQUA*?P)lJm|?XOXPcfy9phF7@ASy^+#;IBSr;iM3x zZY2Y?g?|xb6NN<}-oI-~^c8RN+N@Yq#InRB|9d}(G!P&tcyTJjNNo=u%c!4%})4^0y2AVWD`8hN2`T3ECkE&ac(U?W6Ji?(V972lm+=iAa z4Pb-jGA9@^0UGjc10@sz^aEz2rKtB@<$pUXC;1aVEmY0C4^TBq>YnVP>nCd-;R_j; zK*V9XIK*wbSMtWsGF~RJ@?s!^9?g!N>@ehp7t0s#DPi4?`2gj`xqJt(6x zaz>UE!VK}^(zvvrh{>O+G#MDXLvK9Wk5ysA0^?n6wLDE!Y2P})p zQ)DHRjgRWl-1m77(Is!8Jkktc$V+rmYO(`SbIED4wknb}O}*luhvKP@s(iRnrq&O6 zME8_U07W3Kz8rKcY93b(@Sba60()O)AgX+Wwod*uc0(r{?MahEZljOTNWV;514oYO zDI-M{kJuXBwu0|M^rBI*FZFIAZVX2tyW3tFC!x8?$ zopPjww(VMg=!3GQ#PK*=JXlVU)Imf&$`Z6G!BP2D5m_ttl#Jz(L9!Y#>HC}zj`Z|j zAE*{;eYZV@;hS_pZL2~8!KxEaMXuFdd5{C>smsybq!zxuO`D9OZ{P|#&%t2D^1uFA zT%4S(YTv#YtHiII;Hk!D82~7CATM|1%VzL!w(kfd!tk$t6|V*4GYPr$^;cfPwuQNp zi;#|~@Fgl414mjYlOOe89Lg62^f$X*&qF@lv$jab5aPJ{U7$PGKt{ zPEI09ePoU_3Wi-0Gai^KUlIT={Wj&iRhHbyTTmen>j9oR#O=>JVf@Ff_(UqNnVBWE z;KXHqAxQDYfF&CqPPA3-Nva7exW)O+BXnw9w`h&wz^fnmiJ>RvS!A3u7N};we(5o5j6) z7g>`aO#W7TS~dsej%6f2jjmc;Wh<>gr&#GO!1DM197zdHxuI@WkH+ikYCHM@6QgO* zLk|y7P3vU@O#V+@#gYHHi0+% zJ*!00ss&e3e@srSDjJ%lk-gnDCK>pf)Pmk;Nt#~A#<3L9Hqz{8!m zzTtf^MnBsso6BO%F=o6k?dqgoUFeD_F*ixZY3b9HH$c63onL#whg z?8y0x7ouZ#m+m7sLM7p0t`Tp8&=c0hexpqTAag1_$nD0_=jeS4=)yLhv+qco#GEQsIbB;B{;+M7o7e1~KlH%PrU++GA z?;VVyQ{8Ld`c}7i`AYZt8*g;)+_=(R#zWcX;{g^vjdv@n>&SMP@GXRkT3$<-8I5}* z7;W7MR4>=Hhg*?>?cx3h>O?$s(fKW8K7;`--mVymH4siBL%qLUU~$1v4_+5gb}-7$ z<8}Yx`)^^`*JwFNdp^Nmi~0fY#=Q#|iG374h1O+4W$dtV@Fe4i&oqV?WT>%}zJTm( z9OG#Zvqy#Ql(sUw5dR9?HgX-_paMxbn|f zxY%Q%!Ua~}u^bmpV-$DXH$M1j`hw$$_#qB1_-b=4jYV0!p(ELsHBe{sU=}a@%EwR~ z#@wIB^Lw7QpGTHw_;SIoI{eCHT(4vdtdsPaS-r#gqQW@yGkhW9qJuPz-)?Mn6Ve2L;S{5d+K*kp7K5ByKRyqN`+UD|n#XW>)EpkbbmbB5w!0Ybe} zN9S1xiE*4~;t+krMUso`F=32)`-5%k;)Lz%SgpRf5VFq3*R?RIk)F^=hFXDlzb+bQ z!O`$QQ+T{P4jFdF@z}AzcDG*Yx$UR1U7T5?{JXV1!&BA*YAvxO)18 zFhnkt`IW?J4E50g__O^j@4`wM@rQta20WV{lYIt*xC>=2X1l;6P8u(A@GGxfo@>qn z66y;6#(#qFSEeB^Ep-Xu4Jb!3R?zK(|xX6 zz#b}1=dz~$AFA&4d9Nf%?D`|Auex?i>Q<}ox0#-qo!Rx?Y%hjjgTOd|2m%K<{DLRjEQG^rd+puX^<4ViEva>F-KCbg)qFo$=TVQ3^!&~_ zRase?Sy@%*WaY_Phw7zK&v_mWW;jWn`+7lwhjZ#5S%VMnB*2Y|MJIBlIFyEz=g6e< zV2z%n6u{)pxa~F3m@j`XA#v0ZHTPXYy+*sLehfDlSGn@lx#}7cCVk|)5RW|61Lr)u zR{-i|Aor}PO*RPdX=FB3z!t0!rZCZdjY=$*&Wb*=8D3!kP>2|#^tvJ;T+PL!T&=?H_qZr6y+?xZl z9jN7@9d3;`O%Xov5r-xQa=m_lVO7C%BUsIpD(S+XQHFV;sY$tU9<&8e_ z)xBhruZ*rcYack~4Jx=X&;&Du2WNO3+5p1g*&+cwLnB=MCcjG^9o+;-$0v^r7=1bz zm(@pBwUx41LfPN}-)~-O=>24%4^8mN2GjBDdNDj~$%+ln8z11OpfWQ1ukO*C(&FiH z-x?I!CH>o1qrkwTqhIi}8cMPsenc{+kEujO`?lzszM!8xzSSR$UqHe)oYDQ^ z2hz|^9aEw^KMHtmcR=-<5oL9l4N9NBIoE-VoHq~O3QQ4y^{=@PQ*B~e zN9V{VI#o3FDG#e6&LfPu7?=?by#p{4tg;9SZ#dAYD#fN+cw|@S8e0}@Hx*ICfR*31 z##yy?Qm|Vx!n4~MgQlgzzi3`K$Q>LR_19_gVV|4HTk+XckzC;3s@PYh;G`SZ)LT&; z)lftKRs_gt)ZuftNzcrt4{V{w)C!Wwf=+Xu9RcsQrn3Ajhxlyk68z#DxHvQ5#7<^UwTBFXp3d;9_H@KBu^+Dl!jk_b2g%il&uAnlo zb{_ucZsV)##RnbPg9h*al!p`l{ZFPm(dW=Cqe$-H@7Rw?#mt(fzI?l1ie z^&|{m;P!wU&MDA!|M&DghNz5Z8=0PKq)(rL7jVBdUclSWpZ>43N$4h?WOZy}>S~_{ zkK6vhHL~9FCVds!xcoIdr9?v|2G_{FtHA@MKGZD_xli-D{D*)1XMN`U-0rQs4;hBe zwV9#~7C$_HX&(N6^2STMzx;!@=M!lKZ)d3a;Jh#l587NZ@6Wt5ZGtCE#Lg821>W8A;3w zJS}+(|Mri*SM21Qc@jRodp@HezQj7dm2r^~l(!(SKAYDuSbdpM$A)zb6pRx*8{vB{ zgSpTK3_1)Zdda*jC7b6nz}V>U%U86 zlX7{Z-_PjwT!!jt%N7%~nGh=Cely6_ToV>Xhy#5YUB@<0=|i=nMG&(VV<4PgWsK)J zk8kgiyfb-&znEvKn7ioYG0D*KTAuBUdUyL$!Qu^LGh^JN<`JQE7{EU-G>&H+hG+SV zO=E;f=Dga)@csPamErr%Jd`cCFxD{8znllOHoBV*GiusIR%izyH=any&a<&+GGS~M zzJcz&e)fe4xnk2(UsTX1M)*(8T^QpUFK6LmCRTRb^LT!cfoeQQwV639{)MH%@5u~m zUla-jK6NmT8wp;_P-ipJX^-F;Fc_}zII)=1wX1nye^sM>nbD5HbiQ^A2K4ROgdWI2%&sPfL zwVOUPhS9ON-gbpPiqv<;OpKrww(ZKg0EF#ZwnE*~W{Ed0!?YG4f9~ zZk%g;*gRn6? zgT?e=Orui-+-P(PE#bJ$a7%lI59cJIIcY4?|Rx4z66AB>GXAJ*TV zZo%i^hQs*AMxZb7hUQ@`M*D;jDUGMR4M2rhIZ;0{hcJj+wA+UUnou64S%9Vglb?JU zos93Z@S0qBK?|?24-q4V`dkP1s1Pc`t{5|Im}x=eZZf~tg4<^;0AEPg?zLda@O?95 z|AeSY#uKl*!A{%zqTri!`>Ax`xpeKF*IwDZ(tZ)f5k_AI_OA-5vkby_M*sB1`5LOW zvfym7!&fTiHMSP}4_G|n4Q>3g_#wp3lfgfzPm#MYE!tI|)pvzdv5BTd5+SFAKAJ_p z^h3LO{Mo+;m~(|QGoH_)Q+UQxzA?{&ibJ>-A=JoZE&3jsJ~<1gJ=fsd!kPsZdL_&f zL3*~(evH!&`ssmjzQB0$z$|DAF=TwcldOen6L#$LbC(J~b7eNUz1*UtuLo}T9AK=v z)`Icvc(&)njd-)6^X2-N#bukqp6dC;qSB?SZCdP+tNPJyJU6Zy|9sJ;?|becQ{ijK z#Daxs^7dEPYuD?^-&YHLDO5k0@HT1uJd^2w$%*mSbpH7WBbvy9tshrc}_kaqJ#gst1GL=Q$dTv{E&79b!yD49R zClWL|K@X2|OaHbVT30E+(onvx(FX{A&q6C>@i(|jP_JeKJMeU;1Y;_BVtwi4!VMM= z3pmUb@(kkeceO?J{oViOZ~heK5#t_v=bEsx(!->oI*o?S#q!iy8W>zg<8N&qDHg3t zO(H$8J@y=K5Up@j(pW!)A(@c&hRW(x##+NPKJ~Bm(Uz`<_R0*difYm^ER|8F>uLBz zaR3b3t8eJXUoZ>BsPCc&4e~mVCph>%8(&5W(EYZ-2fm@XGddmcuXJ>W3k*W%a>1|k z5~hH(x#$coV_F?$CE};Fp)-}<~}^8vBON|xS)H+4rFIWNYrk~*krV1|eigN0Yx z@cL`@kq#cnGUN-lx)2G`hNJUJ3U+V}6Xkas{W`)dRy>wf)ydmB_-gYK28nZc!=r9t zI1p_@LI2XZ=+Uhq@iXY5W!k&+tt?#uu=uNN^{G{GK)GF9g~o}I?v)0{q5M=c4(5awbSEDhnB54 zxlVfrmvIaYlTF98)#w`7@)?a=keGIaQ)R>RM!T|PUh-s?kB1p|YLk(yGLsgRF#+@x zDA%IZ(5^h_{?#5+ZrT;CbZg;oqI>&=Hv{i?I3ZPjfEM0KVQkPY^0eDjUVgY2aCyK> zM-MnW0t;qMx*;7Jwz4A&3BbbRn6b2ccm|lff~gPaBUE@KHQ?P3P5K}3U_F6!Ge)eb ze8XGX7%S-ze)~I;s@%T+C_mIh@6y=?H#!T~R66c}%KBj_I&y-Mhb@o*r@0D5D_PD1=)if^PDVf3rCz|O0 zhR%IuS%m6(kw=Ii`97$nXv?137G0DE)Q=b2YA%0t@im{f6HqKEPD4V8tLg8GfJZy z>Y6`Y8$-Ka6U%q_mJXyzs`H1g*U#;5GAx#?4c9Zk5K=(RY@JxF5Q+5e-!7K**3$7nMbveAd4w-x6>C9N= zcG_>lH*LH!S6fBnEY_4p+4w$31L3fKg}XN!`joJvs2p8GcJ?US9p!_4>_yv?)LRbdooGAu`Ub@nisohG|9x zX51bMrk<%+`Aw-;-=H(W&ViOLf|(%;Y?q6s&gxo%A82xuH~0b^JpJ)+Aplm!$X1ee zEpNjscaD+Ux?D!>;Hpd9Q@W?##nZw~&ePVad3=;Nb(3LL^jmNn55eu{lYRZKXLvf9 zA>v*}fZM%+|IwemUx<)4?`v-V(W|F-?-lEp$JV)w3FCR0q4h{F_b+8^Ih&`PP4pPk z7>}cPjEfT+yXW!4ZQ#Xt?qyxr0;Jx)_x0|Vd9MHbQ!&unAg}N#Uu3Yj+KaT$su_jF zy=DAmMBtg{h5S~=t_c&89Ijq%bI``h7c#E!_P&?V;6g9iyjU6O#5I05x=;6M{~Kpc z?audte#;AahKPF^28>q>r1R1gZUzb)cRtT!FYi^cgLzc*WX_&5p5TwjJE6`dWnH7i z_;vZpr98uL?1~_=d;ahfyEikIzK|Cn@9}4gwd;lGp|P}ntoJdZnRgk04}EiQ_m_U~ z*6wE?UfTWrfBvi8Cl_yJsEv;r?wLGqmGx4KUhV1nZU$*>=edwP8S{h@xRLiAV;RGZ zG%xxSKRBQXw=wT0P1T;A_v2Z4x|sObla_25PK~SL{O<9P)13?vyg!#$;;MIsa@>UE zT3&uUqp#lix^Odh(=+3A73hDBX>S*j=lid>Ib*W^suxomX<958kK5*=F(%jM46O|P z#sl&?k^%dhylfeUXP&99p}8`Lnv1jp!E(wF&VXaa>8{dp01_C zz6xL*WH=FW#p1w``q;>`aefw3lJiR$^Iynol%6rl@me>wy#De_ZN7PH_oVx3Wfa5v z^$fwhQ5me%C!E656AmTs_l$MECSdT^4mavo+VvEpXmpDmPk$ID8Lqn=gY41hw{YX@ zj_dJ%@$#4DG2ZsIL*CdtlTJ^H55HoSr}dfm_1&hW-iMb?+X%R~8o#>6kqf04KL zt=)wTnV!*()~C_vc(S&*ml5v9wX3^xAGM)x;e0;&+Z!v7v4kt1yzyIN<6Mp3n z{@Pz24{Hlh>x(4D+A+FDqcBCh{f!;8 z#(3W4Yz)ho4YzTI?j1YYmruSLig@kGEBj`B%@;WOq)<-peeaFkJ8cg8%GopXMT9mu z)L8y(`%FB_;e)5WF^8V>nip2g7+@Z~D~y&tULX9rIgfGox%zvv#&eM5tAAL6O z`t;W`!Hdc4Zhh2P&A@AM)_DHi7ZDE{qxwoJSy&9X(L%+qJ~_AhB>u#2H!jN;y6R$I ze0)|P;q}f@&p2qVr|UKmhgZn1<)v?&5j)-fC53Q#vN^YKE00=~vyfpUX~yxx&(gCN z35sL>phYR0{63L$tA*oKaQKV#^f9`V+y2-;gGrhvv4B0gJHXEYs2egeW|8~g8p(Dg zueotAL+@RRto!Y`cJkG&FRx4?bM+m7V6}CpbtGu_@_SnF=s1V|#LIb&ZwsEhpmFGj zK==2qJTs0VSvh&%WmX_s`^WGL)*QcIup8JKdvvU_<9(eh=N)%mWI(`;_ONtbemHmz zeiQUu8;`3(&b)gKu=3sO(CA2LH8U!vf6p^=aGnjT0Ptfv1DaJtZOF;3!IhO$)qY2LbX`!jxUcf)*c)2qmGX>_Z5 z{JCyGCL0}0&4APg`@jr7yg;!}4=nuJc#V>aH#`_9$ZqmRhU!|fhGZX_gencK8$I)) zPj-kTpE)Dg5B_+xm^5@R9;aPrvqaRE*CanW0gWo7*lv`yDDkj{N+77FI*Jn-tlBHF@? z3HY7q85r%Xo;lFr8b7O^x$j|#Hv2#PZ!lxTne=dmV~ttbN?pN1RUOJqxk+zA(`L~+ zv4m@TFW(A)n*Z*nj>>}D@C?!+ESi(m9Qp{@0or&Op2#A)0PSTrHXV7!4wrNKb>7NL2iLd|e}hBO)wXCEqxtmBl|J1I zApKKK4|P;xlGHe|DB*FnVEZ}YjXdTa&92uu;m-YxHIUWL)9)42=Sm}oecAn30_PYJ zRRLzI3aW>MA)++sh5+Il!om|BGj5t|AIof?@4S_p!XbFfPdVY4rk3Z1{0DyHB|5vU zOgw47$qPbyuu~ts+Y4@BH~5)Xl1qJh7E6Z`e@dI?CwKa-d^mfkUwk0Y4^90CJoHK4 zSAfcny`B!BGC(G0WT@QgpXCk8>K?iq6NYYyTcdAuz%gyPo#)9J9fKa8e#(;dq$$x6 zfLm}w=9EE8u%!(h=m8rJXTPPJXuxOb(Xn{=*N`icyWM)ppwJ z{j`^R z!@*nL=;5TphsKTgO_ngs8av*g8nj)j7f+688Cgz0-Re$upyudevbHvymjVQJJXPqD z_rCwm?$7@G{oVimNB^`J#sj;v&mGzQN5A|1-EY5srWZ$E)kk)(z3}|*-Lt26zxShe zGX$S1_V|g6WY5jZ<)w_)UTDW-ExLTleIoBahDjcb#%o^T=X&A)7axDI`}t=Vd)dyj zd^6}Vrtua(#;_0^Vi{wf%Wz@yKe%~N^Van8EOdd-z>nmiY9l^g>Ad+EVbmk$@W1hs zcXw|T{^O0p!ko(!@>;RH8w$qAmzGvVvBdNAeknsAqbZMZ-hJ0I++E4T8BU`UgY|@k zD32#B!vubAWkfpu)T7+roLL~HJo&o5q3>jYP zNt{vldY_?dxmQnT9Ldo2fBo^V!dpO_=KgPb0~2GL((yoK2&Oy}r+17%#Vt-wj9t_B zYiCV)CC@wy5yBL#v90H=d;)B~XLK1= z(@~S5YrMD}oXtO9_l)$x=hxbNw6HYw9bS}P7N2ed#;`}m6+ z6XJw{MCcHPs;4s&%|?fic8E*QuqB)hqZS^8dl(~qwQA?t@UR6H^-a9{>I8Tt?^duj zp7iB_AHywN+99qo8mc?b_+%=4fp?=Z7S(<>OcYYX%RVD3Lz`zH9^v<+*XRGQojqB2 zmMgnoeDvw=y?5W7kO}8A){>uY#;DI|bv;8bg92ll1s}#Np6a~Ng;aUHrZJEl==7HfejKPe0M*?x^Oi%CuN{LTR#+X=5mH|efxKR z@}u3`jZ3~5`PDB!++8|q4MFj zVJk4Yrp+H_JlEIs@1x1}PKH(CX&lmoG_iT4au%0(X$#LncIH*>&(V%)SyI{PO+Xm(70`42;+G8MO@lAEgi13zu~@4|QLr2+3lc@immiqpurR z^i%!Y*A36rC&e5;-~Jd6THsiQ^%f_i1y4r{S3+LK2xAMq&~F*7Ej}7=SFCf3nT?f| z`>I8W8K|Ra87-SH26wWpjM(wMcr}j0J$qVYM}ZG*`u>_n%po;webqh{<_&FRezWQ3 zyRW{ud$RyrK%~Dh@x@ceW`4FfC-e{;+KhqyOkZNW))y)JxyXVhnk*1lF!Uu2Ilj<> zi|{!-)lamzs7)L<8Xqi(&|A8JKVRcmyv2b zWxi&xV38#1FX?yl_@RUCb@A;A4Mykny){pIw(?#^LN!7Iu74~7VyqhvtFCkZwzIKW z?_KjSOk`JCc%YcPU`kVlzWutPeV*%Pe5>8dgRANT?Chfd`h+@DC&`iJIRKFQ)-msj zl_vjrrVoDR_2&MCZrww;e|I#W=tolqP`cvUO2^O}1D*^v&Ij1yFA#d>O#YAz4y42Y zY6mgggn-tbao8mmZHu1;SsK}pIrB&;P^$islcN3R!Mrnmr+vb;m;GK&>Rg6t1y;FD zOx>-1g=bjEVCd{G890<3DOG4>939{AI-;%NpXJT4kXwp*TdQ^(ZRX$<>=3$0yH&d0SyF+JHj*St+?|B)>`j@m^z zKMN%Bp^k5PYIi+@j68ZWFdn?{ykrmC@R@;j5UOwVPWj4|J4VM*_)Qlk5pFo;D@xDZ z$PDB3gLr{YzPiD8BX3j{tzhLC_ zpu}2~IPhd13dwXqOAk+Fz7=Z0mn7PlPSZ7>1ZiNj zC92$mS@p(Lw`(j|;Cp#$*MU}`AN;GIpm`bVz@|&X7a8`MpYaOz{&;#}RGdc)rll#a@*^ReJ zC)@I;2>LdBd(^}4=TzO-S9XqdeIB!ht~VU-IeIWHRV(r<(D zpf`P10p*9^;Je=An=xn-=!5byx3=2oSmRun{U1J)&EO(`X;Z4JP0sqo5RxQ`NkOw{ zi4_lO)!{S;KIJJi4ZK~CE>zcK!?NI}%-~BLZU#WnschqoOb{HzKE3NnqQ~0G&7Llx zVEvywh+&V&$8wYu;Fdn{3t@##=y67{aDjto&fzhvsCVXBv>FG2miAab>4tmN!c~4h z_mW&L8R2aZmbU8UQQx=RiwEO?xjwp;n}X9+?(pOWwr$+P_l$Kbd0$s|ldi5|v$FgC zJ!GLf{?O@g8l!yY$2i~~CgO~7s_Q8$eROC_bzO-Nt^CrqG7ybnmpqi5u{t``F%?Ty zuKUtl$Q%7u2VRGm(gy&17b9Q%bf@kLDfitWT@!+py5*`9>6Ozp0l_!ajaWBCs>S*q z;n0NvB^mnw06+jqL_t)Z3JvetchSAtwB(JJ;kS}&JOQ8lr(Bon)L#$>d${H~TiX7h zbCcI;<|}YZE|P~Z<)P?UxXI@`Sq_Fho%jGB4sA4IAK8{h$EA1gyr*+`Qw5yz_k1J3 zoD8zQfK<8bFMRk|Df_W$Pv3wkIrWr^q&*YA&hz9$qgBWbJ{5I^CWxwMYC_xoSB8zk zzW4TPyTADP2QxJM;N_FMcV2uhP`XBsu@p2DmoV}Bm{Po?H3{*!m)c^Z`?YCw#(-XOwpFEr)r}u>O$$`;< z(Uj-?36 zJ%aZky5D{K_3_O7G6Sg1J#S=C<%!6MqYnvNK~|o{c{8H##SAVy7cUet$fmESPoC-# zsqhRL`t%L-+5|U}N6&onZ^4J5%t8!f?%~3~9Lo z#`j^dqcft<=Ddvq`tJvMa_es|@Gg%w240IM!YLU$o@|`a_)qn9#mN`W>`uM-Qm>80 zD6SuhAFW;Ki*Z-jjprK=c;_>!EYDC2PfyJTgT9nF)7LYvymEGTIOFNX%a_ON^;9x6 zHopDdyHn<921{PwZxp8I%~#JBQswNl#pS$uc}4TMe);8>cmJv}Qw-)rH2v#fx^7X& zf`E;XZTk6qd|kaHJXJ>W!tR`JF9k-}Icm#8@oIjsiLlT{=iBVpR|(oy_#Gjm=#dV_ z2+s5P$SeS~8E<2lFeLQ#P78ZN;AlTab#gNg2qkr37Qd)wqfGBHjK;S!mOp#ExY~{D z$J5^<@yz2l;K%ZN))y4XS3h9*rFRF?P3^K`dDlO;_IZ$3FShQcPYlYJS+STws?h!0-VzLMa{JPUNye!q}3jBySdCpSVZLvRLC zea_fmoYAkIPFICnvH13E3nRwB!!4Qs@_g}h^3p#n0`ZE!Q=R($?SA60GcX%ljj@cI zxBD_ff4-J3+pzeuSoRjqlI6=S7@s^*|7%hBcndx1f7C({!>wxuUh}W^b5^gzb1Hq9 zc|GM>hQ{^94$a_|ez0-O7X%i!j77elAS2iGolL`iUOGd?E2t`#l`* ztF0sTZDYZO^jlnfkFA~s7~GBHV%ghA!X)DBQj6&}R(=$WIpn!Q&ph+g!QE@GwU0sf z<|})goNa75m3R8_t}QCQ{_>f|%;Q79_ZDB*+`f}ug~y`G-NxCfn1!b6=~C-7g@|f_ zqw5D2e$v+!W50Kb=ElR#)hCX`Q}N3mKK!zF5mKu~0yd58D8@pw+q6;VX~P7fj+sNr zr$qN;Y`mQ@DcD(zrgy=^Qbws}dPi5uYb0F4K7IeZwv5MWuB2`mEIiuXHJxA2%~kBE zV9AeGq*$1A$_jq0;OGy&a8R|cWtj6B8HCgT?|Kax$)+DgcNk= zZWWreHI|e<#D_24^TY#wNZD}j%c%!~Wz$lfgkaNoZLfc>=WVqtYibw4Nyozcnd^Dh zulV#i{bw+hu*m=~3s=%o4o#iGEgdk%B;&FY#qhOnGlp_7gLBJ6$KLy2`o7F2-O7h( zB)#MYBWco^Lx1nT{o6lX4a#uTLj{5Wggm+yXoK7cM7|WA^@+U2DV5~~lm@%B$BVZ6 zV1ipO(~;#NC)~qGMsJ^n6O?9|Sz8%+r!KXqO1W0w>fxiLC63o68hP_gx%MnmYzg`KA4fzbIG&tR8fYK=h8TF*_dN=!jdl!-uTk zOVdQ~jcCRI=piqDr~GPwp)up*@TxZSM4y<(JkVFCQw3ho&;uufqs#scy?C9w&LfwN zK1vnfC_FMZHB`w;gJFu6NruWXB7oIyE~IJC1?Q!47(y`I=#)!Q98(`9bF4)bS7Ve^ zhoebVo^~#0{bNYoS2F0lQ~-7)2VWR8Ku)z}DKB0bUgsD(yP9?iYG48ar{0lKr;v^; zNACt(`TL4C_zL-AVZI<};)diggwBq~Nv;0Dx2lp>9ivv?X zm?f!=lA)84Yt^^@u9tmn465Ir7_;77GZya?JG;aa(D9qMb1=KYoqE)IA(uX1B} zN@hdzV_aK(&`oI3d8(L9q*@Q><>Q0?{Te@mue^Cd2_ApLPc)9a^o8MhuMm$DZ$x`Ho!e0{`^&dXI6&fBG}LZRI5fGc->A9&Vjg zx7s+G7D0=)NnbQcW>lPG=u#GZ`7*X~YubL$E*ikEuO|XurPsM~6&fDMXz@M5D0%7T zycq0DX~)V81Um3&9MNX#s|{zK8Mt5@n@HGLKK(`el9&7*Xb<#KH}hiW!Rb$WGi3Qg zru0a;X}ivw42P3o;4ZO$%E9k;)j7JfrJ?I!FMewt__Z-$@TDV_AKHyE>Z3<+$|qZN zY<-3VmaZch4V|`QSdcbP!28tB!&gL%px_RN>|s{EJ9YTK!A-upqT8`OaP1d5kha=- z53ly^(09s3n!i2kcd_zEw39xGQ>XDYsuvmLI^|bdpyjOJFdLcZlt%J%wVl?u*^T@~ zVEL%n=2~sv-5lx}(CVQVzm`6im7fRmP5hJJpBcZj(a0_YGwyVrBRqIX7l&OiL$CY| z29ALR*9AT7K<}haTabpt&{qt7Q(9vWZn9d0jlN8oB@Jbi9@SL;RK0K;w^qV;_}A5< zVNc`my@a!fK+7No7eX}YSJ0uuQIoHS_8}mU@;C6&lK~oHP&eqNiS{JPj}NdjhqxXD z3!eg$+jH`fHV9+byScXG5cx&2>@Jya* zKX~Kp?niy<&M4_+`&gc^ujG|>BX4e@AAE`}p7a+PFRo-5{iaW=-+bx0HcfkBJTE`H zdTaMV-s>;Fa4I9`y^P_78%T-HU%a?`vhWXYpEV z4)Z9n@(8?^Cpg2^D;dUr^zIuOPj2o0(VzTs_rb;DsAlZ>=Kh`C-}>u+uzUT5LR~z( z*M_G@cYpOa-^$aj&^5{E=&_S|zZTA*x^CZVv$SUl%aU}p-PR@AfsUbMd3%b>{p=Wx;O9ksukz?)7!`XQJU!>l z$%uq!hB+Zmo=IoqG0roL`NE+mA1&bQ$lP>}ydpeDq14*UCnp`Btb5Uioxa=obc82BwMiURVosp1XW?_h!axZ6V}>kSFN- z^z-wxX&=7t+`hT{;`7gT=Rf;21NWuf+3G!!p;!An9pBIO^$M?U&&u?SS2|^4sH0a8 zB`!eSikbJ!a@U^Wv!q8(|Z{Q-xBx6dz%bg}l+#itryczlbeZ9&IojOGAd*l<~Zp;q?`31ZQX zuPx)djV5Ptrm+S6#+c`;=Z!-5c;EG0qL0%BV}eC6i?Jt!?Px*9^P;|FvEzvrt1Nm> zUrb(WA+5a#nrqNPmwdUwV_v*#^97@+kS{_U2^I8pG3zaqdPgM}^D9qyUgd+kdCIYm zcRpV_1ZZrqxMY#*)r_?7zWvJX?Rc=jbtJt%6b^bZUkIn~bogHV&KETUlfIm4VU0c- zuj+ui=iDGR1CmXt9oVI}RurW`+Vkk97Ix{3&^fIUL#MTe##-rNA znD;Qk=GmfJJ;P7mW1Q^TxZxRqj{3qy{is`C7SXLd>yw*$7~?J6Jl}%AkKR1f-1o}v z-Pg{vK;EJkp3)0@M+m)ksD+8yDA;(|IQL41ZoD38;ftJKI@2Os3+peoIAya{UgtKz zRu@nE!|AFoB=o6gJKL1gc%)DJQiV+Lc)W4bJR-~vgDctSw-yU;wa|36@%hHR(su0$ z@l6X!^?x4!`qap@XEKX6cYE&nwr7DO>O3H$H-WqYEJ-Vqb*JR@jx zwD>ue?O|OfvKg!KG|nyQoAz)_IdXUJrvJ0HmOvrtD&#Ok{?q|C5IT`h!5Y*Aac{h1&sm zc&?sdVo+Cp)d3_89JrmpZZD(m>bL7TsnjW_?xCam&756b8$Ak)AmKvF z)ZC5qq^XYZuXgghu=1x5!~{x~+@fcH4M*kGFbhx8yGAfNrA-1lgIi9eTa{%f>*Y;d z>XkDb53Ku1%t-#<{bxU&7wZPdd1zNq<|tojZewW>s&ig+Gm1P>+$c|Q(+cZ621Lf) zZW(!(fr*f{PQN-fLf{{E8VyF-8y(iBnT-;|SOa!vkOWwdTR5ah^l*rNjXit>vo$~n zyIwEqj|CmjYH-Cj0~Y)YT?D9&c`z47W%0QllT8}a(ImnfuME?(5e1c^J=?LA2p$B2 zFHhq-^=FLyMnAc5jfc_RprD~VCT@A*n6{AM7oQYy8c-(>pIYeV9Hp)LOHc=c$cj+7p=ig>d?`pHraTYHZ{6;KWz$jXsm3L_~4q+UmL@R z=+1-FwYG~EIw0Nuf!W{31Ag$FCQmcV$HN|9Wi2e|3>}zxkVQiO24-P-OoR4N z%x~nOHM<#^gde}@1-b`hI#XBarDF}Di@~V_)FiG4CN(9?RgepkDBz?n^rhxY}unox-*#wn@IyY~YE zy~|#Jz%05}`qVp3h^EQ~ybR(Sjml4%4aey8h7eNiT|H~OTIt;dyeQd^uTy_=oHJVR zt6mFca}U1@W054V{r0*nYYgTJZFssIKD!?s85le)TvL{uw1G6_#mmgmr9FO-EMpd1 z;p^W3j!;Kh8(ra$N2c-`b3#=LeJ!sCv;)Ggjm$$d^L$z00BX09UF9*Zf6U9F`-NkO zocgyq@V}T@@UHQqdcQ5tX%1=1_gnvH-F8eN`m=JT8uV0n-@XfJNs%`=lKqlbIFabs z@O*bYkTT|HOqlM{{g4HIA>3$=CQoAveE4zwukk>J{*yCWHeFf>yC0_D*pTcCt7lMG z$Iu>Zg&w!Fa(i7Jx@2$in7UNFVLkRDgPfc#b-qeSn;0EWzeqXj3yadcsWM5PsVKm`jg`HUd-4B7j}h?xZKPC z7sW~@pJy|aJe%j`gFODO6?>LfEu#l-wjaHIc6X{+qyNtD{l@NhfAf32Pz%5Cboknc z^s_HAtmo;MyiW9q^`$FUcEA6lH+O&gKlrzHzx~6vcJJ6sFbVyOUwoQ@pO+g?<6fv4 zCwk#$zS2J8BX7O&p*!ics=7|9`}V%`CEVE4|hiotyt#_h@XCO zc|xg(w|uzJ1s4nFav_6Y{z7?{ZeESG4Wkp%hd(14b@+zyDqhfGXf(nI2}1ASeemG^w1bTec@Z<{dT~~m z{Dpm3#%o~|G8Vy#{pi)lC(K55^JYh(IO9CCd3Q4w z*=P`M#!*sba52BkqIxOXh0%+r@7KxLFvm#5peU{Le?!C=HY z_F8@H%~xN}7q7d#^U(d44*hl{nnDTM*E;e!{I-oOmwdC7Ixthcr2f9qx+} zhE^Wp)7SIdwV^FTf8H9oIeX>3a zN#Q@6w=Vtt?A-Zj$3Ogc|8V!cAAE1T{_kcSyU{p6jf7b`UTB#kEfyWiSbjKTo-jP8 z3bArJ-8h;IsP?nT3=bDR|9p3&#e>h@|JClo7Z>W=S9c$N@X>5ac`;)+c|Vn&J?JZq zgT13MMl*`r9QE~=U)-HLSKRcBuQxODGYqp+Xorg}MA*3SUV3#WeG&Qt-xCu*y|A&Q z`WbQQj8G6L)FPvWEN!bj^(7w9`jape;>qjt7LRNId8l@D z2<7983E@OM(;A;`s0hxN3+i|(Z|h0x(elZRm`|2={DnN{UwS1&YYR~tupi^NMN>vb zAy}5zc3&5z3r{|k17rPw5%+jw(KG3=Ho4oEDV^deUOJ3jj9Fh?ytKQRr}^z-=QDB{ z@8&C_+MRLU*w@IB9<)eSy<^l&W)YJPEYIp?z=FM+&3hH_%)rxp45i84qNw?S0sU%= znZ}(v^$D@nwGFwDn}uKUG0vJ_Z`7A=)uvz7hM~;h-xo&3z-Rm(gI~;j@WIEsk3Ks; z-tgD@Lg<@tc`m-1ZXa&!(;qB29ILM#Nf))L7~sb`pKZa^*#e^lJBt{z2okUQs>LcJ z-*{d(zFG7*+IV7&kY~Y3U4%P2QF~i}z8$|;+Wh)b^QCz7!T~W>TYODIJvWn2i*DXc z8f%0@;vG(w#s_ox>up3UoX_)lYuB_hUl?bV)9-DF{p0uE+MUi@T^~CfoG*x!v1npp zdA>lYLb9hj>M-{>+nAN-y|GYic3$FsBb#*Z<;G&KhtJT%#&hG(6X}nB;R}c_8b3bj z3xxB{;l4aVH~lqX8GFdX7_Tk$lYPjZ*}OKK^w?KDPd5ixob`2yg?n@7lhJ1moJH66 z7WQsRi@#||eR3X7r-Cj!SI+zF!LA2hO`|93Q)|bD%USVB zhiY=8C>;Io-akiw=Q%M(E3-db$zQN#g>0@@#e+wJv~|!v$C@L@n7m0~6+wxkzk|pf z9TzWuz1vHfcLe7RMp7C&4ax&a^-HQ>(FUj@l?2tkc4>mFte4I1c^K|NVdY(|Xka)oY@} z#JV+5ozP>+81@Vkd5tRyxHYigR`V?5cnA-rw1o_f08sLJth#tX^J}K6NC}Wfal*(B*#PZ%PDT{qs__$cRRUCG>)0 z^ecn>8S*t;Yi`S1n%vOfSTbCr!G^zL+6Au*PC|Gnp}JDkk-7Y2`-bRHo(!;wt zBi)3rmy7uqPlMZCa<4ZoeT29A^-e!c?avUWzKpdaBZFSB9rz&g(LJPBFVe6P=<75f+|SG2x;Al58rXiT{Ypar;xbeV-(w?26aK99auUs50TL9 z^%+i+>y+%Y)iG5rQilH}l<)<**Hfid{U~$Sum97wBQtWxEQy3H`DnKh#}viq#-DZ> zatE>^`hY`vzd>9cq088Q1Rmlc#2HE`-DgM-|qZ@kUCh2jMt-i`sp)4cyY8D+jH^+e)7p;(LIpq4a4Bh48j50;93=v!BA&&SS7A|7nB3gP$?c zQ~cI8Lu379zQ|FJImF_&Gqj zc15>(=xn$^<8?6hcgA0-{hRcG2DS8OAXmPm1ltYz5Za;suGbvc?a(g+A?n=MwVoFw zFXTX~Wy-HQm!6eIZyy7pw_Eh>pLXfD`t@7qa14aH27l@On_}~~tC8cvGk+h;S6v<3 z*tptjm7nwt_rg{B^uf+mxw5{id#Xa)BvpdE2QW-q<4phC_e&K(JUr+}E5T{5H@H=2 zEXa1KLLbl$U9aNZ%j@R51%^_hXJ}u@7a6WI(D3{^kYVJLkKW(?!+-M6cOT@H?S;_` zyBF-Eecu1Pm-7rXkPG**Uaa#x>}B`4JZi@yE5ijt=KZ`SpD$#^Grcr3#4)hW{8W96 zLOjnIhTx||UfvzN9FINoM27Sy#;|k0nBI&r3$=GjwIFXO6Ds?j{3&}T4su9x25c;}_vZ~w;I8Preje*E^?Xy&QR zWAnl8&)+}4`(<&p@AL^egSMA!Ua<_U&*!b`<^TK_Uu3K}9uE(9f2FwRLRNfSjM~5P zZ~xxz-}^UzbN5?6c)O>(L%R?2YX3qk;0NFCe)j2=-LKBy*u8)L>h9x<*YgOz75xlL zJiZyN87lHv?**PAh7p^AKs%Cw@l3zq747BzWShm2xsBf#S%io|n{Wd>?--aE2V9Q_ zbFlD?Mu$g#xp%*fP?L`rI|f-g$!JS;ANTFKMn-hhR$%>>fqZ%6G2Sqs)ceP{L*7E( zh$ZbjF{$f!P))do^lf6=rjTe3d-yYSSO0@_2M^C>bm3LVkjQhB@$6KF)U`S2%`tY- z=<#w4jscM;Jwx&n8A=cJW02yV#b#GJ@CS zxeWchBA?HoYm>Q~8G;!RErLXP#@xQt$havK!Lf{OjMGAC@HXc?O8)v4Ll>j0y3u9h zQGAcFhmkaylB45^JR})e#b&pNLZ%b&j`@fTpw{OCG@9uv1-aC0cU*BE&?DP3LKz*Nl z^2z8f{XHc_RAbd=#e#l1BfaoJo)O4~uJPjL#r#t7!N33B+q*a3cy)I>BRRc0b?Q`M zG_JMb|NiySIuzeJ!Keo!X+YE!|+aJ65}#$;;e$ z_(ltL_IP-u1sHV+2Vk03djgJR*@hc)#4D$hExrJ$lRn zXS^H#&3ooZIzhJP3$r^Od{s2Gg$T!24p$;*i+n77gi$5KqtW1h0L4ox|&04Rqm|7GL+F(ky`cV(sPAM_Z6w<6tej zJ8u7si05&BBc5#1d_CO_ZSp8Q)%`XAHx6_^U+iQLVxsYt0~xcAbUQdYJwD9c()F1U z%-km-?E|EF+UB~Xa58GfF_$g7kOyd>!`M+SwkyO?s?&*OGjwKi8{z(ja zalQc1O_u&?|ASpm8zs3xS)WbX(H4P&CC16YUFS+{=?nL~jV{!=vdspZA$+1j43B;0 z(8wDN4LVK8-+tRe&DTLs*pnWT6*Sog$M(kVHJ>ckK(m=(ArN?}rk?#bxo0pxm_b~U zmlOydF>qEVZTiMTQ*1N|G@_T^wR*j%b{urA3?oV%S~)NH9*o%)8xf{Ry}f!EPx{p6 zk(MGFbtI&4;Dz>%7#oz*F1j$scna|#o^*zOn-i=iQ1#&OaEZ?80nw%rN^=Yv0*62N z;d98}XjnZNs$iFrHLS13HfyL?ozT~i{*NNW$MXD(2kkN8CE|_Xza>L}$!`%k_{R`y zfT%qll{YY5582(V#eyYAJf^OtsweshGek0=sZn}bwP%YZf>PU)U(ud*Oy}J5m1Cdeo zRVX-mToL+O$C~zjTo;9eCt4B?9={F#+cCbQOSZb!L_Q2`os*0P8Qk>fv5uxA(K7lx zjolxhMs{_Lj#lr;ggL2ZE;6zv4bxp`CV{4hBg{+NrWZ zc2`{rj&@U1qF9tNXDWIb%(ckznlhgD)w30;`c$on$-IKS;9!VAj9gJ&`NLxsORkt>6e409|f6)$) z3d0qK>H8xeBu1ZB7(51YR7xA{a~D+k$}fH&Rsle9`AoZZh@RC(hNx&oWUni-r2aY>a(mea;~ zl(!j6hgLN8Z$f6#xv3|d^N67y)OS?(jKyvf+=1v?da2%cA34FJ#`y4{4ma^vuLpA2 zW=!g89pJm3eqS3+9q@sZ9$sZCzl_LW%rnbaGBgb$?HjHUW+kTY)IX;lv~@K!O2VW6 zXi%4Nx`XcS`yqMbgMt7dx9J0qrLoH6DLU~!bfS5?cZPV{W5$pm(tWZY`tc@Ki2mI8 z(IfvxzbZGm4G!F!gGx7+Rc_52p~o+k>EZoRg!K`}7Gd>1`!h3yLDWijv>)x?yawTtc zy9^v}s){KRV=aFAD zjJPL1Iuiqo;P(9oN*oOCxds5L!zYf_v)9|gixrwYtmJ8KdXl$IP!yOC}i*cu5r=G5taMl>m z_24%KNA!@jSB)BK#&E5a^FMqkae5+5klON*~V===joYymGF>H%9>>1{%3{d8B#*K%)4}9Htdy;XF z&h$r_hvuCAF&;6xGLkZ&7@w!5J*v-i@Q$fXBei`3C&Om?7cR;1vrFk=;dSWtp{Eb;UVG#9+Uw5h!__mUhxF>~*)zMBUp<>g z^7Y-l#zcI689qA0_%~loWazV4aJ%-lAav-UuR-d&5&Crdtq60$AiliB!=2vKSzifw z&VJO`dpaZggmp0<)SkTOSNMs-TAg{Zg}qCof9Ah$>Sy{LW9jwAkejy{1dC(d7;vVL z7`~dJAD+D!V(;F}6Z-U-@izZPU#;$M(tQg@HqCs{S2uTC#IeC311Ycf<1Ka=8x9;W zMq9w^%ZK`bvDu>3>#w|8{QScHcrI-I(w_}9Jg=Q?s;l2Uo8k4|BO7-f$fNsS3mGR1 zV{<({dQfcq(;3cB?5L6qlc#7nIp*c%jm(sxZGbqnL&KU z>~H$2>(=yZiLSW?MsO| z?~GZD!t;oLm%EJ>#`doo!)hZsW{fG~$h4;ociGM^e135@ruD4OLpgu&_*pbGl2J%2VH!k_L!#=VvbIH7-aAF{4wF$@(%wdqqv1=U&hpo(c!D1 z=X=gLnhXnPlbnpT+TiKxKAes{k+J)WzEZe0i*@D4gE3oM8_UeM=Y8GtjjyO$9RJ|c z-Ge(f3Lo>@ENDNh16v#=r~B!PMS|l!yS&hI#y4NLDfCx8-|X7N`+zWBFUEsVJJDcH z>v_c7NM0sGb9j$2({2p#7CY86o4MKiz1l{BVZV!cF0Wh&9;boyEGMqGcTK{bQ*cwx zU|X7E?x$`kXd*Yl3uJ5%rBw=a|HwQ*e}b9k*{NH~x|im;SNjZTs8-p<*q|=o_)4~` zT-WI>oSxCgSppr1=zp9bvI~5b4*$9h#<*6R3AASzIx$$W=s=JB{dH}Nl0)RvMWg5B z;d!oxK!t|@rA7`^-<~rFAk)X_!{gr|A>^KY+4+`i)YUzlsFDo^?WK?r8vLFt{I79n za0Tz0Z16Ke=r3NRsap6&YY2-I`07{;-8QpWpYw$djFuVZ-8aCH z04ApOVtC7&7ZNm~qcV%WgaKAP>OfB&tP1*tx9c$y5QX?g_#vx~*?{ao9W7diZ;g)K zxp4s~0S=EY@zZ}mn3s;WDNuc*fP0>a9-Id2G(1!WzZ(IhISA(ju?xhl#&aielKoL% z|HiOAwAAs#KSsUD1CO3$*aODA*ef;!z!bdee$z?i4gF*&ZB!+aJ(zkqENv^><#tHJ z!-Aj0$&1fX2o#`UJ(SZQbue3J zH(tJFkaayhTn717b_sYDP_?!n0<=|`lLm;+#53KSqx(PyC!Ion=`)#h9=+V;G6phu zd&mJp@0NT9P^G0!X}NRgz9CulPs(;5hQZx%v*b`&^}tUKO`lll(@sc|X2eqc=x%A@nY{p}->74wiJkzAwpoO7J&Pino=onvd{?Tk z0!HyEv*@1;c+k4xQu7@6>S7Fg@vEk#FQpERm6`r6$Mq(gRk(X}b`#9{`mt{{Af!p%6@4%0A6fO%s|J|%UJ*%m6zxA^zqo{EJuv*u7jNPtFSqK+@qo!nF z{Ln_wgBh^7iB>p<$jX-0{iH<)`OH{Z!QmZmLu>aVs}Pa>Hl42dq`WD&%HVqw0_0Qm z>O-skY5Qm#{gT^wio8Xv_R~huH#F?&43B#87PJMuH4o5#bSII`ON0__YGn}k#fXCV9eb;$yrjSrVY96W9u$!sHqJSSa(-{2~(>wOEZ zM1}XVa0SHQ4e8L;t)uioh3~LC$d4^v` zoSPY$8RW?}n@{x>KBSk}-}}iABea+0yq#aPDOiS%m+~-U9N-1Uwrs;-5QsLVlg$;*WoEZuj$Z z*W1_&s$S}PH+UlB=QI7hWHV;)@_wp!6duSEOS$;Fmv7$N{p#H1-9Pz@k9R-I6aABm z8PeMD>&W3_!4{X5_h9m6u(*+7pNuRl?Bim4u^*#r&y_;d(4%7+-o4C{2ScbuCZ7Bg z-#5CQ*Nb>h-N{3^2L@NMe1$n+5aLa4!4~ZdV02`aY2@4bDbICn!U*FF0DX^|I(Ug1 z>cE0oaJ-St$*Z4QMBw@Dd6*%1Lb_DuUIvJ38BLDoZ3{+t2y+UfIfKR+e=5hQ%s|LM z%fQ75&#Ri3C1WHbmNxB`Ielp(z{2pLiBXp!*>jHpM2Ho!n88dqiHvBMu3XO8d$YFU z1(|1UIMMEFhZpin7eg0K#utWlhO60tF&bub!gv-6z&i~C4G&NrgLGLP$Mal8-xq}r z!XG1mV|mMFU<$vOyX3}b^khaCo|q^5%HreC&X1?Bumzr%KRtgje%e?vBOBheKjY00 zfArq&ZiX?&2kpR!`p(;L&LR*mPlhyM0d8cZWPoAt)<=vD7O;e{V3>vbS%&=dNJs;G z^FU{87O!2qDMy!XWUQt06LP1)LVvSaDdRaqEgiX#@tBAEwO~KHbfqz+QK)Ct{9 zc$JSw_Gh1-%Nz7s2F}9Xq@U*tk#M{XaF@|4b6zs}BHoV{1O2V{zF&G9?!J6>_fiW0 zH!oivW9)~Y+W7Iy-S7SW@9e(+y|<(M(eAyU{CGAgyxAhkR~h$iW&FF5oEY<#p_bS6 z!`=CF=hkyZ9H;!Fw-&7M;Qn0nUA)pFK(yUyO!j2!&QE06eKP$R5@L}4)I8H? zd@ZtKKYJe8WsF@m1?3K9pI~riK-T`^Vb7NewMS|*p*L)vdAUUa`YevMN_YmRdX3G6 z_6VQxl=rzldSqMtA)7NVHr8jny_rY1us1x0zf7KYT1>J5r>_)v%bK3QD*V&&Q#OT6 zU(=-t0n&ISZaMqOos5_~mL~*A@V zzFL?x7WkTioXOh=V9a66ONGO~#y2!tAoKNsIoP6tSn2nZ4gEXZCY3XlC!94lj%w{r z4K0-c!A2eVv}#}>lt60 zzx3K`8nst z76-4icyX@r=z3ppO*pFB*n-%-7U|iigu8mZFFxiAh{o`HIpY4}^UJ&UKfk)W^Qe6k z`pS#LqDANzPoCa=QdpNi`{aBJv+Yq*-4E`!(QUk+ZBy_E#Vc=zgu<=eOh5Yiq(1(2 z&b@3zqfs+QB$DU8!QA7Hrg)2EY4Y(J>{4vW=Forhj|77Dj*l)Z-mWE{^8Z z8Le|CW#~0t`fZ@tQei2@Pj?b0u$^}sh}q~|F<1{6vCYWdDM zSOY^{BhX%sy%cMZ4emhURt`MXrXJNh%VJw+9=aJ7TfJR|MYsr`U^G&URR@EB#vFr7 zGu6~xuDtm*SR7y|jc)iEI2cYJ=fl_VzZxz?8ko%X^k?AYU04m_ju%;71Db&+8GSoD z?k;s1o@*W=K<`|*nd>gb0|mk5#m6XJ1K%>H;Lj*uojihvO}HRnl*FIGd$2D$hfmq*BbngGfxmrS z($V97DcECq!s6wbL3Y{}V-74N1FL>Rw(E5@GTYycA#Vm;!-6)VryJi{;)W0SdT1Vs z$#NkZ$^&fGdeUF{MqiO%qE|WkHIr^!=ZP_)9|ITt=$MCI@LGEer?d* zwDFY1Z@*2agX&(Aw9al<;po?2>cb;LS ze1n9+2L*kA)ObGOjVeEKQQ@F+?{X^cVr!epD{XXW!9r?B`NfPfi{DxN!MA$0_8%F# zSC2Y}M%T)%I!DH?!8qy{bG6YhUt|P7hw^^mU*m;T_`jpW_1@SickoVH)e?^bKialq z^v(Uy*Zn4fMZ5eeA9%pkMV5h2rQy&+A6(_+002M$Nkl1WYW99|9%YWYc|Ovv4KhEj!~;gt*o>G>I~Na&S`YVJwC!Ya|t=Z z;S9WgYph-6md-$>lJrJ$B#mrGpYgZG2bByi;I4v0!tgNlY;{TUgLB3&ci{ljZ)7kH zu;HU;^kHPVAywDL_r6kC2D9fbVyNzY+K>VF{NPZ3AY?JGx05t~mTm!X8%^3|;JSrK zMlVkz)X|9&!Ok25*4*OWA6e!YybMqY1lXzD$NVnPt<3bm$)6%yu>qO-Bm^2xH4VKI z$f*3)j_U2-o@a>mL^+o}c`oG*o=sQL7B60Emmnv*Kid)b;08BI{hem%&)_X_KYpn{ zs9EruOLc(n7yWZqM*VO>(LbNPU;6Uu?(hG<|9JNoAAZy)!(Z?I>CgUr_xJzg=i@zo zv=`v#Gng>G_=H(Z%eV3#|10mkkrA#rp!6@>-0h3JdM^~;`?HHzCMNIU$}tp{J;onK zPqD%of*5O`%_#kJpTzsTpZA}Q=om!F)P`C%_&nUprSK9wte>eap0dyM7Ji{H4m=f4 zGDcMD9K+^(zMc#j>lkZp6~p=I!$*24&M=#o z?7fU>-d9c>J&|$B=8?5a&-UYK$sT?|ybJQ>ayTNucN z(O{rKi?9cb(hMZVuVn-pZJPV>JP$V=p=WDhL>D#>R!6__n9X2ugps`Z$X&hnGw|~k zeJWT6S8c?oXOTslPi)zA2)_*d47Cj43{H&TXY(F?F@x%J8KHc&zyM@ZSq5K5Ax1Xg zK^Wc`NtOYLQBT}q@$3sxNrtsM4}VS=V&^`au~PTBlzzM5(Md_^n9XKAoiNDgYuD=^ zcs_Qt&HgeN@>mp`pTURB86_Bu8I8^qMnoGi27eXrJnCP~IB5e-p#x5&+b=wKq_7^B z$sO4UXMryKErV)tjx$(E z)Bbdip^t$}oL`=(vq@qL1mcFzB06JlK~^<>xG43ctV3PQ!ln5@pI|uiLU2;Dm`6`?(I2{TrCRS zitl<*eSO7=x0o?Trf4{x47C*_pfDwj&)SFX>5CSbJSQ`Z>Yo;9jv9-?YYcq8cCkQX zuY}_*YBFwe4zN&T(@bM99e<{9ICPKU{C*z04AZ{A7=GjBVv8G>lY<40M=cI~TRdjt ztiE)$FB$Ik#RnQrRNu7>lg4J_x-rB!wLHl~S6#*-i$6k`&_{ZCrv=c1J+Jee)=oU} z!J_j-MrvO%m{-i3SJHW&&Bi7;w2N^;oct>pG8y3UZp`2r%3D}Ge|sVr1Mb$o#$XFU z%0I|xIs;q4$CFnX15ZwK420u+w&860{ZxByoM>U>NX_)GOzP% z>9`GPeQ_j&lm*#mYdedr`p*x)|JIBNHf#MP1N)uY?S&RLglT!VP;KAqOF4U0TuC4Q z<*z;|#M{x`fA~A^wGsI9yVqVg***yEchDlqVeK5%Uz|VJ;zRpL9R8+ft6%Ou>pAOw zUxYnl92}L=c%ePv*tT?w_56! zFwBI(9y53$ERf8Ag@A?b8)3zY1!jO)VFo5dYv^mDTesES_x8OtS7zlrXXQ{M-;bYo zvYNX?2-~E@rcyYk-B?u-Sloy$*HxW#j5Bmnpc$9TR*}*U!Piz|H&8_Jgfl~tU zJbvN3zCiwM1L@m7y1)~e7I>EXNG9b~bcv}W&}`3005=_vXE-IrLc{fLf1UIzrHN+1 z!2z<>TRc`6hw?{O6=)!O8<4dKl*U@3H4lvH+EUPTgm#UL73jwD2l;inU-=9Q5M4Q) zfY<^t@NM)%NmNxQNabA{+frIl5cvQKR~bELy{-Gkiw=bgJ64Q(FDrC3VCe7K1=7j%iNQi3^6Dcr*4|>ygmEEXsUz4N^k>>!y8e_W zlP;X0HGSr^d|uzmjeJ{KH07n|k$!Zn%p#-RGsA5*Ii#n*^jg4pf$ctEQ%{Dq1*(RB-)0cj=A6u>n9Xaps(5^AlmwQ=fFITH*3;)LZ zo?bOqUoH|eSJ3l}B{#sOH)gjH6n}JprpAR-HwhI&k2=)bWSjYcSlT>hr47y{j?$cr zRI85k{-ZVa8|$gq`D87_m{-rF*N*k3Nc5-Q*-H#;n6J~T_dieyGZ6?+A@y)vVRQdd6ROl5^84~bHy z&RWdw7?0-C(Yd}P&*)NJ9MI(%GZiO)M3IoJ>t=DQH+EC`;ZuEfF%9V0c#fCr>fG#f z`ommMoh8|nf1!u@;jO%MH2N8r>`lF~?_^kGsdD6_>nT@t_1H~E^KY6`T6^~GbOfC` z(#G;i{6UuKXso5tGbY1m$o7YPHgq+*{%^m3>!j38p4GZFPm`wf{TMUUCPn=@89hy# zl^LGp;jwWWT}sDxjB?A3w~|wisB_#ORY!-)ORQA&jR}R8TqUcZ9rEzj$LdM8-}06* zs8~i%OiGM|5nl^4D`j0f6>1~z5@hY1XA666OjduUG`{QwO9|`G;)m9hnqHau`y1^= zOYWjOwA4-Z*{L^)?TFa6nun3I{@^_)tDwI#j?q*{!l-Cj%v#i3$MR&aaaf_D^+Vd& zCIyX+l-+~c^&8iB|KtDq&vw7;q3=$h>h%Ec53k%9@6eu@i^ciMfOjYJF7t2+aD1y* zs=d;GJP-}&=i_-^Wj3xZb!-pJ@2C}rc>C)V=AN8+KrkWPzb zU7d%4rvP#Q_0d<4jeG#r0f&;C2Pd#(Y$l@tmIAF=H4ni7B%(K=5C$|}<4l;CG2Q;S zc90j?V*c)M_CQeJC;Rg%vIjg5rt|h+{YMT!0e}p!!)}1urpxhq-OBhV*P(Q9p!%3b zF;J$My4A4(mx1{_3xU5t+6kG!(-6>7H9+!B0C&Q@mZz!mUj=^93$Ixr6b{6A*(4{tY)zQD6_C{)TDdCp#LLFh=Kj?9PN&Umel*a5N0apC-# zS$JY2uXNvZv4sKv+m!%JeS0+EPuLPXz3Jhe0;t84SUG)UAxpRx;}KW}$mb<19=1i5 zC)t9KGU76`GhVCXm0x{e{%p~`^!oV;i}aKC-r4>3!;c1#eA3$>t4z`-<7?;6jGoyi z+qY=pZHj2-`C%M zd%lJ6S$WS=uzB}(^Mo)ppM7$9_pg5WXL)d64FLaq_s4(y2ZiLxvo(6&&;ZEJvqYT$ z1m}q@e!0b!)AgH=|K1K@z1QLpz|TiL>%GFG1OT?MA*9Rs>4o~y{Tc$0Yc|Zjp_jCc{KxgT^|9%``FlX zEwBHLfBnW50K88(yFS{-6i=rUW8Go{%?q{TZ5ZR3*Z8v*Syv3|JpbQ(s|AAl>83@X zWHAo_SlPdM1K9ma_kQ0uZtu2m#$(+)08}T}q54{TZQio*(NPOE7F*2)fX<@<;y$t| z-G@^Z7l(i1ahvwk+j zJkH&@xsLIiKi%Ss3%EtFBgtX$#lnrcbYtw_E7X_J9*%{S@hoHE@38UM_&ZYg8TbF> zxZZ*SSpm`NolUeQkI0K%s7K-AW&aD$`;{dN%j^2EjRR z3njqUy*>5dX~6mizuEo8@BVW4;q}jUkG|?!T%E&R_l4*3riHlIFQ;c?!+ndkxgHxc z<1L+xbmF=?_i%QVV=L=lG|F>5UC)n1?_4eE?WSDyEY?U~p3~(yZ=2_@O6js;lT4m6 z=2!*FZo_jrYEg8ycn?iIC}HjJ9A+h~d*wwV;iIG7_>o!p)f#ydMMGO^|12=aF!IA^ z4%9(g8{6$Qu^dITd$ng_E0wK`InFrICfi7m&gpD;R*0S!=HS=~xjp$UniX8NG4PwS z=tp<%&qjd7Z?cC53}w-#IFj#mfojbiS@wERWj(jYD?LhI4XE#a@!$UAU$|Qu@M!~7 zyTw|9Oe3pNJ}>j}z@ccsN+7J)Ax7$i1?28QQ*C%*Vwn4yZV$j(22d!D`3v9lDn|hM zJhV?^y3l;zYA#S`v>Lzdf>J_dlcmOL*eFm7Dz|?i+fM}Zv^GHbrmV`zi`c%c$u|vJ zb;%G<%G-bV(cq*#17WHTXhj!oQ3aC26D;=| zk@O?E=JCwL%gCi|vf-(|#hu!{FZ(>!aP2S{{h$1;pT-`_TXs>lzOEllaCJuX(hu7v z%k-JmM`!WJYtq%D^JuS>*r18l>SOwt{=q8>3eSR3XXBw8oqgS<@9lir>{uPllycCy zi=hThDt)viBf0~WOE<>o#h4?bLi*Z+I-Q}FjH4T5H9~6onX!fH)G5t=&hb)i>!%J) zd|S;(bfhdIPRIB`IwfgeNFa$f% zZ)|$g+q~UE9_(sw<7I4E0$J4U=1e{ORvVT)qiZ&x>=4+>r2m;el4A1Wwf4zNZgxm^ zW7JsgaBQm60T#(h7W2{2P*fe|(}&GM(9oUAl_gtcMlXAL<3GH%@jY^?ytN~GdmwG; znQWCc-{3QIm$5`1ZBe(wdD}z3;Uh0Pe|!F#W68<0)iM8(RPp_oSea6zLucuG#y|VB zz?JN(?{&8y9}=}|>78EGuTA)B|5UPX6b!?Pl(%#td(+EaKJ8Gq9AL=I2y;rG9sw~-NadP_0`QkeTE3a+J&VK6GKAFn9i_)Iq zoh<#Aw-${ZtJr^iZ~WL*ZQgd7+)k!0g7_pW{`!31hCN`nii|ANrmgDeUq4lEwQ=cx z_@G7j(fKAzyTywv>BZR6j_K=0E52@El54K-jyBy7t&w}B?XL%G5tpqHD!SX;tqs!_ zZN!HHY=j$e7T-CSP9FMpJR4cY3Knf`K@*!P$T#o ztZ_;o@|3mde`2(Iu2Y0JUTa=2U!9Smec4lePeY!coK2J;3Ms7uKC9r|4@})n&Q$<| zk#F8&RINH=3yxz@Z!A7|s9)Op*KzlVS8v_e{SW`s|FZk6c)FMJa^zX{YG4b1^>D`r z`%<6}c>$Ek_b|ZWqdcr01z_;9`XZ0Q_ujrR-lssf!##AL%zKQdxQBTUxWhO(N4DAn z;9K6fF9baBAopOc)C;@Efi0H{*K#MIfXMd(iB87<_ka9O9)4d9Ki%upSJ&_N3bv21 zoe%VQ|J^rtm)?1McQjDu*4;06fAP_^JjVjol5xT@q_^iGA$k~Qk^oAcut1xWCr*y{ zF+gL18UY=(dwpE2P%L>{KFE80!lpEDJPD`=AOmiImkZ2T9_j;7q*wEwum?af!0~$S z2?z+7pNH(~ei`6qzBCiu&Rd-BJmkC9eAPobz~y9ZTHe}u@hAIBfgET7<&`tE25ia3 zl_Qte*u|MQ00vl15qMC@JZGdA54rR_UfqF$Kp-I@=9+Z}e-o;uB;b}12(Pk-cs$F-o@G}SP~I-A&i*5O zyr6kV14WF@y>~g!QyU&YbPE#t8(0Vc3nmW&qGWXPwmo6YyOj}-{z&3A5*iHwG-?`VD4beOny>~903-G=(^$s)^ zPq!!qd|}T7q!%C=n*jL#_($(0@3YKK^v~+2>dCMm@RvJAd%=Vs<~?{mt(_ z*?souwR9WeQm_uk&6ozaPYuo&XQuiq!j zna2Bx07`MveRxy|s1xPARUHdtU*|z>ERJ_=IvEf86vjTi`J$aJITr)q-+ueT?hpU& z@9*A!^Yz{P?_4bI`j3ixUkH+b%FjRiFu6b7-MW5#=Ei4vfImq;Kvm=G>+H(pYLSK) zHLqXKAVM|)w@+rXuf*3!A1&4Z74c@fY}6m=-k8{b58z}vu;|89*&Jjs;&5+O@Noa) z>a_v#EgBj(Hwshatq1LVQoGm_pxA=t<8|3AAQk-uM6j zf9b7*0Cn@k0Q6xlcUrJH8qj>=cpr_8<}1w)0CA$9ZPCO$eXjlaVu;f@nilX}CoFba zH2XF>XqpFszT$!djl2~Cyal+@+3C|Kv&HmQS$YvF$@N%2XukzbZ4p0Q_#fk8jlZy$ zBoa1=4VvG9v=)wh2%50Mtgs}M}1LTFQ z`6dwWX4lFqg%5g=E#J@E-u;9yE0_pKLv-kF06}v>vRiPq=;Mv7J?JbSKiA~(SWi~> z&gS9^Ery+MvFLQyc^@zKn-6$*O*I!NPcJ<3pSEb>nqodTXW5&d>D40aJO}mc@GL<4 zR*Tj5+CS(X<5`oyH{C$_uzv3bLl+8dKEGY;CH=c?`AxD<9*H_UIY!8LAS0P8VFMpsxv zRRc~ml{m{q} zfV^{0TG_F?&itG7p^xF>Jp48q%lT=thM@AR9nvbKu3bVb+rmH*=~E&e$vA~5ge*xt zbrwImkdL%tQXMNh=apYFDWkpR_TSl*T!k8c_=GDo7s?+RmEWjE5dYHg)(IO=tdvG3 zRXJMinfz6oSm;f=R@FSo|H(i4#ek6vWWc2j3~VyoZz^Ki%VIH>U+s)*00A8PkHrfE zeiOV27AH?UHQa69um%cG4Dh@|YSe^b>8}Um@AD#F;LZX*C;n?7HUOLP?g3l0F;suJ zQdkp^Z6Bg#02=^4;5~y?e$rkl^a}aQ8}X|59FWB)TADN96)RC5ooOOEl~)GC&6*)n5+qt6VazzF6&BWzfW7(N@Rc8?Ztj z8Vs_A_M&4j&oeq#i^1psM-#!uz` zj&$pEMstUF&#|(yoOcs9r~dS7@*f~cJG(gHQKa)PpMORwUth$FBL;$G2Hm8r$CwMa_LyP}UQk0}+sN!O?SW_}vOtn7^Kc4%vJ z705E*OBg!rx%}DI!LT0Lw?>cdu+45FFuTz(-(_ZwGagIBz z-qebQ9zdClGpFdk5tfbPA$y2oIrXOY=pS7LO6le)3FcfrU6>QqS$4K`JM)zfBC{9r zPaB8us*B{%agZ#@Gary08A_UYruL1_o%h@NW2#C@=;_B9Z^^MQyH`-<*TQV`*J@O| zWOA{z*>5)C0zG}+`MibFAVP7*tFkd&{X<7v!W?KnI?~#dTSsfJ?ev?blp7$pGY`FE z*Xd;~YEB2##zo_A&pFyiEzdU$hRLFh{+thqMQ7$B={DoF-G1zkJo4r`@q-RKN~C@p zIkiS7)m`I`tYy;GHde-Ns<3A@?M#Qy=S#u`ZF)KN4}IPt3CbmUj}WBj-79`7H#L-7Qc1hp^Ccw zrdm|BU)t!e(?z(0#c^>)-1#c!%(gz%s%>*NJHYf>d?(e;oJm`DsV}xRsHRLbr;Pba z{z|XB>go(nCD*1M!*h6{x%y=4mO~9gH~Q;ZQ$3Zp@xHjOw0-~V+u~+^@l&e$+gvlc zi>a~WWAp#>|MtJ{KD*P$(u&0ijP{TWWEQjaSU}*p#{m=H)_$))pE(}LlsI!Y78->!*aPHCpzJC+&5>mE z%)#uvO)%H~+7A#HpZ(UY>*?mvfaN^+wC~!Dn*pGOok&KW-#~3Zmh0`4LM`wpbcZW0 zvKX+up-pQ%)|GSaxx*{|Cj$G#zUNu3PXH#j1K|vOz!6#KMbX>0ukA? z`2)ZUR9WE*-18W=flQ@Yz~Ds=bW={~8#;a#SZIXlrdP5_i$2ECo3Ec~0q51-7kPn? zk;Y4`a(ZfbOkcj54Nf0NVd5I6vlE3K0PtQ53_2K?{-eSd_-G#RQXA+`Xd!WQh1}2{ z`t!lVS-1+MIaQ2oix&Vk3s!6n0Br2P)tenYHmT2-ZLOH}!ZncPP@xCJD0a+79~AaT zs5c+Sd*{uIBew;f^_>CN5#ZtaNb1w(2jC-sTo@lQ+s6h1{fxh_J^R-8*8{`6MIq)r z4|rbvPm-0KUj>rR*l#Ra@cQ*%{bt7UrGP2k(TB3(OU(%uM-pszEKu~VKIALx()!?F zW3GN#9`FIR-MhXSSPfVP;GSwBP^|1%#J|p)`gmjgwZQgEZ(iEHaq-fC$xmwEy*$Ye zH=jKTuzTQRv5g1bp0jY1jhSx&>Hst!UVKoD{rlOR51$&l=UY^K|7Snhz4z0f?%sd@ z?E#lx51e}xsClP1X08TQfAZ<&#@B;D-N$PIrh4X*{agSfWotquxn=@wNA_&MJY&2H ztueZ&zkuA*csw6246P5cvU8xc>lKjSLdmgosH`6VR^0raTxM}WJnFB^b=lR+8LCBs zd0VFT`Y5tk_U1tz|A6lOn>5CWt0Ca{MqahzuG7i6`tf${{q^7cZud^M%-h~K6U;NN zC*S0m+d<>D+KojEp=|xV78<@4QY5Np&gH#djOz@&>&BgtdA#P!x8NZhl*JtYuJl{+ z12|bYI^7&|Jn)tt#3lFEgf~9Uc5OM?Lddyu=lh*XCr75QErbCy&0#(|%xfA*e=plP z(zvjI{8cjJGofSBg#|Tq*_;Rnu0<9ZgbFzm_~^-aCQp zFZGP$?q&v6W!#&ZFT8zZJd(?zl8@q=2hGvuD&xep)k2`qG#008z}T|uk1$mhj{w#d zQq9ry@8=q2?9T^Y1HR7$6d%kE&o-{kpX;rZt|{k>H-E7)a=Z{yK3eP=-xNFx;%vfV z3tMALhqG59fh>-CYik-)S&L%cOaLSwuKY{oTRgaS?RMXTc-k8~_j~K(S|N^Z&6{B2 zjoaos49#6%_eP3^dyAaj8U;5U{sqCE{yQg`z zx&C+yk-+>^G&C$qcmu<@`{lp;_jCRC@lW%;?Y-uPi@ini_FK(C^*eq3_M=bI(V6J_ zXm1N_UkA4L_SmVjeK@%5u|3gui?BuBzB|+9@4eTK?S58xmiJn0dZz`p zhfgYV^~2_w>${uxZtR|Bw?}##jQwQ>V>jkm<7;7ijJxag-+De`A9IgvUs>U`r1fX) zXr*RTK;E{d;n~b%?q$X(ha1z0Q+88U+rA(Qj+^s&ZO^lbd@NFAWR)MD$|cM^@0K{& zOblIz*IUc0o<__Sr20g)p|!le)r)L&@g?=N5sfLk@~6^NtVGQi`V`SPSTq%mbGtd( z7|nf)1m}}Rx{Y5+Fhc2D)>NMRuA#E(kxJ8teu9$k+XakCOwLX2P5vp0r>(uVFC96> zOAotUSm~JSZ&fCad2JfGDpk0Z|LPZA70OhgB5IN*0aREXX-#Aeg~h6LKyGU&z-DRl za6TOa1WB>HTRTE$fS=BWR%IQnj&5M~vCnCmM)w;};Jp_)PxL&`qgEMB9>Ip!^w4}L zlzdy`^;*Z;Vn6Bjzzf^~2*p0-F}cDpD7)orueuEw6}+NW3lysGUe1qUy^#x`?JggKmX}RB@Z=AS6wP4tmck|0uGc{aNS&OM5CoFLh_YU zQPns(sXg)+sM!&ajx1fKrBD7=2Mw~lXkns3`9ks5m-Lma&h#Lf`=9!w>l)Tg^z=!2 zb2DFQ8;|0(n3A20VdEXGO*ZtWZ_%;Q5&h2jqOlq^Evjs?x%r#76DmFn7<7=lz;t@t z?5N9DvS=rsLwJ*2|CU77wHuzSY|^ujr6W46jOt2mWR`3z$?O2 zGh3_QM)oO8Mzp1A-`ZwB`fD63M^D-~blZD^pRu{6t&XcrZ>xkR(|A_o(;#7&2}(9 z^9o?vziIEZXHmqEZR@9*uf~SZPDp}OXY?}p1h=iaLuSg9(hbiNNS>*=#L$#l1%Ay z=23badhAD?rPD@7PiK&+TZh-=Q!vBQdsy=J1QYrm9>2On!exFXYoub?8Rd z;jf-bwkhjCxeBtQQO-VZDi4XFrKqH>S#zrI(Z^4Hq|rjV>+ik zDPR5U5|gpJ=q{Tw?xdBc4d|1zZ1(eu zHr|tnj&`lg}u2BaP!BuicbAe13MU6 z?a*RR$d=}xys;@d-l`)t`sqOG*t0~mq^ahgOm;$FW1v%b6&NC0;rOn(wf$ySGe(nN zF}*#EqBp!ow(U539KK7E%1)u`N#FEJu1VMT8!cpQoliS4V%y3|_Gcc1N-57xwVUHrV-_gW@d8`LPA*1hv0{M;$9lLv5_j^~(6^KG65tnPWXff7J)KfDG%^>%##Tm(CZ?BKf}QVN@7~+W|A^zw`b( zGwyg@KgkQsb#XqX-h-Wg?s0?}pcfv3&-y*ci~PH;cVZuVfIkzkE2y zLfViAP*CLoQh=Cq&p<}D6fbmdwb;SSkA8gIlD99Q{O*Ii9HaMbe8)SlzR)Ie0%F({ zZ#*FZ&;s!BUj6LzPbS=q>yen>fTq9x;KKlglLOibm%?-1<^h+C>d|Oq#yi&ijVVE! zf#=GI_iV^t3WO741&DtuU=yHh>J+L$taIM$+RQu8tNP@)7kCHAo)8rQwLB*E9dKA2 zU~K_f>t7%U=0dVqJh>3ydT(C82S$k#p2u(C7;p`c3V?IH;VFKqMVy;~qC$Y62@nQK zc?f>B5C!yTK?2y$YhPU5GlAOXl8-*W5m0h)_tOCjznNG3y=A~-)6C{g3_Qj8|MZJ% z11_?EZ!7TL7V6+gZF?5Tg{KWr#c+k9@KHVYLXQHdmTm)|8cRO(2~^@ee>%CKac+w3hJLj0kh)`8lcXrSqO|rwSBx&la1WG$=TiG-k!K|^Y+C4 zzL&S?^+3dH0if(=g>DMWP2aD-Ryc#^0WG+F|L*Sp{L}xpHoUNV@7=fZK0iK2B4iX@ z0)gMWaDMlDfAG`5yu*RCdD#{l`RL1qN~qtDwovrsNnXl%n;vM)Wa87mKxR4uj$3R3 zMr!fJw=NE#=z9e`@3kM;rLJo`yE#^W(YyK{EsbmaYr*8pK;#=&ig#W+zs>7f{A8ZP z$CKlD3x`5g_yFs(z`v_k`bg;ID-$xt0*vR6L(RSRPx6$%dGq>!-^T(VeGh>hvuQRC zxU;ap!<|h7KY5IQom~o91K57*&7i_#SnL5F^I$&|_zUPYjxB7k<7JcCX}a;Y1n**? zt1H@gnm6}92-w%*&fmzZ_|+D=jyB(To5!0e7T1mz9%vRjdW+*)p=X4A_{oponh(ud zaIwf@>zHd=_8Px3J)ii5Ua+%bmRDZ?zCBoW|{2cgCZhhqpP-HzLgMu5lI< z0LL>&WLtD}fY*9@XZQ5Z?&tYbUV5beuks$A`Nq64_gmi;uI1#=H@;kx0kRf6U#)%Q zbG}aLYX-A<{Q^1{rY79 zx#J0`lh6ui{h2Nz&-uPmIKT0P?f{-aJ;L_U{r<^ugSTrqMzjbduX6pl) zHv;;v-?+W|;@Zs?LGR@C{b0a(i{j>yPd>Z0``ssBly^I?`v)zwM88IeWACHX7cX9% z@y|wY_>RE&7P-@vR(jJxsHtO(`BS~gWAVc^Oz0f?*2fkFEqIHGpF$cVuZ(=Iq3)>s zHNOaVOl>k|xJecvu*!#EDWxM7-eWZ_FugzWbQZw5%v(7!^y4Q29G1oFB zHZs?X4(1-zdFhK%qq_8UE=`&C`};&G5~4qN-S8NKN=}wP>S2V=P?5G(W$*6E-Gep- zJm)o4ocGW5XX?(rUK)N=VtD+Jx2KPuF7D6PLPQ22$8RKB*DSJcwc2%TdT3X%#k2kL zZkM~XvZo0r7+p^rSDWTuY3L85Ir4|j>9--aHhoma^iOG$=}=p3BYOAgM(w`6vN&AM zNRAeX43Ozh^tZ}n2qgcne_?{zNK`0r5gU{{2Bv)vkoM8blKcVWF4Ua?r3v6`7Xx6L zYQS4S`g(PG5{AxLMDnZ$!0NfuAv*j!?t-KCu8=IvOihaM2f$7;{&ReK3)2?OQW$~}B31Rk_H>lQz zZb#F}%B_nXdLBygAK9@b%Lrb%9s)K=I;Ky&BZJTITL#hSE>T_SjAH9hmNJaDeaBO5 z@<~Q#{hLJX*TzlO5sT21H0Am;^f4P*hR)(Oyf+yp202=tYhI=l5kCa_KNqbsle<$y zD`>FW1J$MZ?@vE7|n|LL-)u{7@HxsNR~ac>J#+m zpJQcYH|_IUqk=M3OlK`vt_T~#Zh2xl83!=xgpv1gN8{mRZNu$-x9PK?&7Rk?VLyrm2c=jMP_Le>C%4kj!4M~5mZ zLuNe+QOH3!M1Z1I_jZUlA zP1ekH?2|Cl{%FkHK%Uy%962O09vPz7A=37%?ddFeN1PdBYE;7fivRRa^lg=!zIRNX zeV+c&hqHL79hi`1JG7`~>%5*Rv18W~8y>UY%F4+Tm5%6m^2tjtRq(N=xelPE&$J5-dRmmqsP5>m zl&vrD9=#4%RaU*#&sCWMYi!ZU+(0SP54~kuokZ{X9658Myng5^PX^a_c?2H*>P?BI z=Z=jXRra3Z3QS#T3Qq+>moVq+c;p$nW$owq>07)iI(ds3dz~Avt$dxd(TmgbzNrpf z4nMpcYELv4QXZcqQk4jDHv5_R7th){H_9DiHB?4~Ia;*PdoCyamWPh5<7JahvSH_{ zVye!bpQ6s?4=v{Y$ecwn`hAbXP z|M|cClie@#(tPx^zMSiR!X@+Tc~HF?ICDHe#r?EZ~t>QKx@8eis_0VbPi6_hx)rZdjSza^u zunMSC=Sbko#Oh7xLp{_!3rKhr;5J}uAS%zV7hehFFYol(bGy&4-roK7hhOwS8*miJ zcfW^t;5N_e@9Lu$cv$ZHBArcWk-+^Mw{I1SAqe2fwch2F9Raa z1jqq|^*uli@C(QW7SxiVBjkWK2!R4%Q~zOMTrOX|nwMr?!^!k&adrW3+D%^G#k}Ve zqW1Oy)WER=UCRbAk1hc3`pp{wd`|+jPSmD=$aFEU`bY0lfXD*3y%~_T)?Q}JGm`y& zoBm$P?tysEl3Uxgk==dWqKJ09*CLID1`8{v>tlDX;%Ebuc+K-DXG{7)3~c~=hj}aK zQJJ^8IRf~?amL3^2|y0EGBNXkNAdU3LUT6n)9ZyN;+3!MJV1fm-XPF-2a-ux0AU$~ zVi}J{23_96Kq|n?$;OqCA7WSodn0a{o3 z0I0z)bcw|V<7Pf=n&J3x;kmp(0l7TLF9%pXOD^%20e2e4%N(HeQ6Lon?LhbSJZtYZ z7L6Bk0uNqxW?{?+VgZKw=6>Ug4d1J6JcWH^^J!y(N2gvJ5831bMqLf))=qkOnunzY z54LRn(67K_K%LmY$6Fi|@B3UHz1qmoeU#BhR{=@DG(gmu;#OaL=gmHPd3yI!NXwu6 z>93kcz9>XPV0`Tmn&4oz`81mqozI&$z23ZebHLog@#e|C zzyD_sj|!)BwGVY#WJ&R3Gw9#ThJX9f$Mwm28w1d60fDX3t8h(1rwH$H>eTVkx%tk# zCG5y}!sa#nF#5NI)41Oo6}8EA_qB5;c0Ygj;_eTB@4emM`@6rtJK9kCtPhWVakV!u z?)0I-+t&jfukAi*A@g=X`?bd7=bx3|@AJ!-XR+o%^O!k=&A*u50IfjWZ#t(tah@Nv zP-EO!95B&541g3X-A8HJu(vU!TWqsfr)|QVh#zmU)HTB#aW;?^pvnf!YsZ=sgjBJ; z+G5{V%|%^d8oNHWnf(Q1p6Lyx`#jatkNeLHy#;VLaQa?)usHM}(9t#8xIF*ng&7lo zdZ9-wbXo}XQCzmiquQH3!eTrLv^?Gdh5oSE$&>tzcwM}BemtRt?@`w^#C$4r&Ar+! z=Jq||Pm<^7KY4HR_A&DT$-XTB!1h*xaq0ScpmAj$5}t$Y1J5mxSacGeWoE9FwnAI2 zH#2wuV?dn0`Ksey(8pt+Hvc^9?GLlAd0Qwd_ptg3ATLY^o8XOZvEg0|5f<&tKlCxf zI3=0WEHt?m&l_txwsd`M-Zp++0~&I3-S!3vuja=s^jf4<-+Xz#`NP{*-eziTVcw8g zi`l)kQy7sq-?=o`Vv7Uj)Cv32HG-}_`7GN@hgX_sd~@MpcK&5;<0WoRdlp#SlsCXU zFY!AqunUj!a?itentRME4WS7WbH8&75BT^FgwQbuv&Y~1){IMfK`aIix zbou&xC&Jjf(;F#=1F>hZqIL^ocJM&%tx*p#vmK2$k?GpIchWK0-m)E|JVAK zith&e>enCce)TsWPCLAz!}`L2*L z53bZ_&1qk^c-4aEKH9_#mU!apdZjJ&?RgDv>8|69K)dE1urdQGr-Yf0DK)f_v{xxV|hsw4YvlJk+&(t(a5rrqPYr`udZUaDKr&_ zH#z3+w+u(FHLDCUEfme@8y+&y@LV#fj)xw;{m>xm5RkC$rKVJ4PocS2uilHEZJ_#g zM4C!dadhj*Pu}#0`$9ieq7&uG8{I92V}IoHRwsJvK7G|EgK?m~w?P)6cF8Nx&nDkl zG0FeYFV;<>i4`b5IsJ9PccbG5)T@4WtuH*m#@RyT$Ns8X~vc_u5KYGb7()!{E#9xH3) z0@MH@nlzUOJVnNDLc}OE|5c1mG=Zrpc?>3+CfU{Tz$k_l?RfGOccVI<^XM~(u>xYz z)23EsI-66L(%BC*e^84DTtmcqG`OI2}(0 zvX7ZpG+Jnm!B);fhCD*lj^Q_DI!`zs1zq%_F>P4pwM1o(Hnvi+eSH*1h9x_1lc=Xb9Hyxv!t(`-s>Lva_YZip)5nXMoo$|*X*n}hkvzxu$mp$#; zWZP_KrKu3D(P{Y&-eKvF`g232Zza$vUj67`j<@U3!&jTspL9CxM@E9# zX3}!9C1~fgp(KR&<(}`^g{$i-)vyJe524kFGkOV|tJ|H+;(Jk4;bV_PQV0RH}!vtrw#)b}{WQ z{kdIMXXfT;=w~;g(ZkR9TsBGQ^4PER*~sKX8otY(CQEt`zi6O6KjS`ImH7V~CwkW+ z>eer7s#-R*<}|jX52~-9TpQ?d_~FLZ_0NzVS)42#Et!=XzRJukH;wm;XC;QhYDXGc z6Oe*i+vjLXjopo2)lzTiW#(jOt})X3jozY<<-Bb|Cg*gZze-;}f3>f@X17g$DzA@S zzxVx7+Vr6+w&{;fG^M;%uKnbbp)%wxVS%J&?CVS_UaEEdADh#crA-@GTShyh?==@L zAiT=cU&=S`(42n8tG1Y@H=8YC(w1$<%siyOwQqDSpP>Dsg+JR|Wh7UQdZv-J$-Gte zfaWP|X?W>&-*t22m3*UjIz)a5k787ofXrUyPD_u@l#{dNYM0E?2OJ;%^ocWhvP(yF zMki`kc7<>0#Db}No9K+^(kzgmXKa;Uy84diaCjk-ZE6Er=q57Rv~l>T>s-Bl9j^AO zKQtsHAEJ}ro?f>!rKafmGwG66-ED_>{}own8X-mwI>FQ4CVv{>wBt!#1-87BM|dPR zxs)0uNK>P~>dYpkWBl9p?dYigcrV#jdFRpJf`A!5pyqdSFhmos2w0c1A3Kk%c zS85NRuk;Z5?>@THbo%Y?_uswHga4E9th)2y$?o5OcsW4y=q72N?xV+`&j7R-CuonW8$Mf%}tHh;Ef)Hc@**t zH0vw-RS)sc0yMtwNn5;CulAqsb#NcTdldL`B;e$gfP31*ydIkQq=NzIOA#*O>W%At1o7!Si~}D$%+kNG9q0+I089XA%r(6F9!PmE z0)2pu+F~bu@z=eo{h$8BKg!FtkA^lj52iCf>D|1g0mXnCy+04K(E=_54)p0fl(CKa z!G{Wu28^D{!~0f?8J}FaJZ%MVpD1QK+1Vv9>g9=Z4hT1=^Jo>@&ba^=28$h!JeE{q}6)65%0P8C) zSeyzp6N-cvCGhO^!t((9Cd5H@s_!Q3O?BzdVuAD1g(WfPy>scpJW}ZcUZg;S0k~?H zSmXd=ivYY8zxn3-K;4rAYyflD(2Z~#Ienn-PWl5vy%S*Pqqpw|lKl0@pQq1Pc4zbA zCch0}H6I&I7GOW`Z}I;v$b69ZtVN#pTL=&$;4eS;blL<2JsQA%;q}wS&i#C1mkYb_ zdLX$N(&BQD2Q59f5Oc9t`~jUpoOpd7fKB&2t9dN5W47UAf#x#y=p0!1>rcK&FMT++ zzVw!Y521c~<@x~o-z4)aT+|lc<4?2wM*-9Wo@B?sKj5UgKs~haWd_0+q5q_@Rgdm2 z1#bQ*FV!~!!^Jwjdi_@R(+7YX(>$}qCH}CF3ZDri|H+Tv-5shOzxwrWcb|N6W%p{f zEFQV*%GZqt{dT{%A^!5uezm)HIgqKe`+>yQZ{C{mDJC=!??G>woM@5YS&K>LdjQ?l zz})LCbltojF#Xx*@%%7e-|a3}_u&?9=5V0mH_4QWnqwME%|G9z176M+T*LtXzBU83 z#i=Hbwgc&S_WA*$eX#GnAHBVM^JhQZoqg}!-I+YD-}=3u?SB4yKi<9f)`i_46o>ui zzyI^yPs{(~zxQ`{fAF)?-+L?X=}WseFTOE=p0^~-6Zd-CgBSd_IgC64D6SI4xPEj_Z!RudC^BpXcLRX<~^aA@Nf+P-0Ekcj(oGhHQYTl z9h=K6qM85c>ecvII0IzUr+Y)du6fBqp?hQ@O1^140(W_So9l(j5?0ALve3znEV3G- z-kdSVCi09UGR>k$p{;oOY& zvH^>|^e>ORCj-%a!@;7_xie>WXIkLnm98wE2vHOHx%Uxz<;s<7Eogl)*AZ_FX#1_4 z37&ja-*0oInC`xX0^GkHZ_glSTW~w^a@X_k3I+GL`RD2F-OF7|<~p2A`qlgQuHyy> zd5oQTt}SIf|Bux-$GTVP$+CQNohvaQR=>U2_0zrF^Ly3ODn5O=4;v_{ilb+DvI_WD zq3JuLYo3|#m)yL-R?W#C85Eu84Tp)#W~ShjH=nFu*^5sVo|m7*iuYhZa(q28YxR(> zZe-?c?u94ASX9>a-tj6Uiw=Z#B%kt{MOx!O6(-9N8+?2tJfb4Do{rMEMyg*~b6c50Ers5hQ!L6{E>g_C=Z(I z4O0VeDpsfcwxKYVFeZYoHkP?YLI=wjC2Q~Ms`bF{L3#{G=APypqC!V&h|j|Wy?3rJ z<{`bd&Zh0s>ML!UVPBgUUpMJxYPLRl&ds1##2%r^rPZGukf+};o`cG{SWI1Y${e+tE#jpNd)qE3qdVXWy`y8KVW=!QJA6^URF5tI z3<^&1j!C#GwpV^Jt5Sew6X@LWce65m(FDIHmuf^{;%C!)H=53$w=F8O?PbDeW`MnJ zIwz0jVVe#|DhXp}YHyaaO@bq*&Wp|fL0jd)R#YZC9?@$m=#RFQQTj|o}D#Hd5k0)Y{T);TrxRcIxc@pnuZu}$ux^uZvKo1g^jal&yZ4=e$(e7N=9wQ zK>5z;Q9TNe&H}xlP-K2=!ku%vrdw@OM_+8?c}VnUMza1WhfM0NIi-|N zM$XdceAKQjb6yg;B54koH1^&3a8FhbpJNN`c6e7_A5?DU_0b#7{gknveDoZb({E$9 z4oY70SY?uB>SikwW{H3{?NxEwjlf9Pe#@VMlb$GH^kObi#<>Pnj_%p))<5K#RpX7^ zw1?hjfv@r-ih5i1p_L}8zXzme-XA`S?KP{d8($)jf(__beMjHqL1)^F!6t-#S;JJh zj@Zk{zcRKwg;xIRTgOxJdHJb3dWjkm)fl^tw}0lgmDpLQ-99hv;8P z@<)D|FFn(EG*&{#!*(fa_*7P1oRt}2l_6X7rQ<(xtm87JuDMki#i~1c)Ct4OzTQcg z>GQ3QJaSGEvQPE?u)lVD{@U$$bfEBJx$;N0RaTYRDytQ9K6l$4S9w|{amEyyN^g5K zJ)my1XN>$URx5cbta=o?7vyirP6a=Vbww6^bmfisW1OSK>9XfM+3yreQVg|CJ$e$x(Rs$+~##ZJc-mFc&Yp$UBEzNx<`E*YfHnZl)2N&WP4@7|r=zxdbx z&#qt-!^6YVxxmIZ@?t{ZabOTZFZaqc@7OavnBEU?`1tyrUC(t*!v}VM6-a(80O9xZ zf_xSb;X(DYfRKOi2S3{V$A9#*9@2})S^ZZ7lz;^<_n^wN`Fb9NJYN5!zyGu9zqEV3 zSi(O3cJ1zy-M{?kJc z*}TiI6?Q`W-Lr+n@C@L!XI|u9)%RfbMR9APGUP^WIF!wZa}HDiiUOJjM9~M8Gw1WR{3?(WZ6F=K zPg`_3kbZe+k2imF4KN5WNO$g=0HXRB;4IEJkc+n{pj@bgRnCY13R#k@;$-vG{>$He zoOg9G;=BEyH#7ot@#ighFtCNECqNR&186)OU~(k86^@AK{_BMr;gR^)ecaD?0faU> zR^7AF{>?|92h12P#Y8R*D9VFbzuyP`W;^Et<$0Fg$@BB5#iQCI%*S}pr;n4#qudjq zSL1sC?Tngd=!6sihNdH4=5!{`udxIqUHihm1c3QgfKCPC0Ahg=2)hIrH<6ezMK<#w z8x}rDUy94^?Erlb7zChw-Mqj%+V%CLyvlC`;{N8hAJ)ECc0c>+`@8U?-IYMnciwqx z_x_LGYr(9L5b5!?ci!J!{LznhZ~f#)^U+VSl8xsZ*ZXEdfcDo3WFZ3}EQa{RPVNJj zfBBo=?f(7mJ{Vwdz_L8-j~6rkjq^TOY#x7Myg^?rp7EE({6Ek<=)QtSwne)H?y)}r zv^JP?j`O;%ALj$bMr#T<)Z*i7#Wp_Oe0%Aw3%kShr*ZYHc>RxBSb4&WIgjE~Ee?pa zeyqMfk$!mBJ}u-5AoO@|svK)v`UvsaLeaeW_B*?`-gnDpR zr~8N}8@XFwiT`bJ=w#O-i>k(tdu`s#CSc>%{3`5{ds zBaDqP;HLiP9qdD)erEzE&FKI-V}IU6YU~fF7spvJuI)A1rD<&1b>ORX#VcODSq!X< zdGKiq0j?vw-^C(-+%?Cdj~SCfjdyzWaa14UHOE+_xRXpm2R$t=w>M4Jy=n_r>0*WR zX##$uF{G>V9JWY4TsD*9U81oZ5n(sT*H?APP)G@&NeBiTj$j*WV`n;~c%)Y#NLMQe7^eK4{SunIXmFKm%;bZ@m13H@x&IZm~ z;9#E?LeJDEufK67Tj*`4<|hFD%@(ie=&kd|du!;8`mXu9H@6 zc>43q_Oa*_UBk$E`qZnt3wepNhwFvf`L;f_@a|0v>Od)ifzwx9n+I^?UnljZ^xw!PgFaMW=$EstKFNPvjt;B zW2g8kJ+esOOG3Z&y%8YD($8whEO@;1PyhRW{0pAqgjsu%(E=r{n}TU(28-~-q|Lir z+;O1x?wfA(zwxDaz!9OU?}y2DZeZ~68Zc&~z35j&-tY}~Co;rLUq?;r?1OrH- zHGn+rk5?VpYAoZ)Qg9R+W$5~=9{!b`2XDvoNU+I9ehR6_CnP`~ol9NHRV~f?H#GB% zA5snEp{YYln12=j0mRqlP34onbT9@pZ3F=Ab&GH9oCo`=ZEe($1r(5?lkZw+sN4c8 z070E9LpD0H@89T9`Pwu9!{}*|KpihFTNi<9k5DB{{dO`(hG<4+ftm3edPa$SS!CHx z_WGPtf3%IAbj<6IeMHM-H?-BY5F($Akg4|Xf9iGxPv`ZS*DuN+;3HX4A7G@uo)1>y zRoM#n)9;lb>+r%KE&5Q8{7FvN>{J>lp0^`etDh;AN>9~gKbljWnH-ZzPL)azbZ2*J z^#=gCj;m0*zL|uP1)pk8!QM!aiwu=uI84-5rMl>MIyA^R_Cco$tQ*4U(%-(VDyZKM z*%@c3rJuQJo@_E{yAteplX2=Q&`;XF{~V^Psp_x(-1h_BazjkE@jAk5+H8+hHx5?( zN5*MM{L)S5t6hle_iav@ek`eT9s{cnSO3ff1nqug?r-$jDBhiwHKI(*l}ol67s-O& zw3k$i#`CeoR%W)}k7@Lir#3Gh)R8RHKUHsAI!lMsNV0cW-!H%}e#3@-+vsR(zdij- z*|L@l+1t!L)f*c2Ll~neKLkf_I z(IBTX!z;Oso9SabwPUqOkP`NvMp@!DE>xnl1V zTmPm#)fs!L?6lD}ElNYHyk&Rx{pc?l9nuILJXgaMF%$f-E#R|qOUYA0%~^0zf9Oq0 zd4yV&_s3(K&$-cMb(PVVB$)n5p6TONf7*`+w)1a^&JLcB9pj8NY+!g!;YHK=q$}XJ zbnVc&acOgdH#~F$-O;P6)m=A%B`>3xJp6}gZT?|Qu=Ucb|NY$jcm8~f(I&@g(=eLc z>7Vl0&azj1Hxe$M)5ptZCRx=@?$MPtnP;l9YLQ(5KPl~JXYXK3>q!5PjrMn{Y!p`2 z=yb5CRA*1FvU`wr$W8xEqCcA)Y^x2@vE@&jrb2`^U1Im#4&;sfS8snj1r$PzJXIwq zS(Z^O9?tPmPd5yE=aT3(?qhG-Ya2?FH|>}2pJanzNU6LPRmC<1Bx0zotL)I2cH)E1 zvRPGT>zquAvn|b^ZQG+by!=VTp*xACPrZ7AOe0unGv|!NOGGpo^ORk7H{o^gi|Jys z2s)cz0M)*Lt??c|4zV8z{f|QU*CP$E73;+$i;`vQK<*|g9FUG_2z}bx#aa5 zKn^_Nv32|IfD~Z` zF660uG;ci*rmkN=*h7T^uqbdKknBLR06z}}Kpi@KG_UJEsCED8?rz@WFCIA3!*L#$ zf#U|mg}hn;33q#Vzn&MIk8%OX^#y580ext) zCD#of;{t*!kMH9`A@HIE>;ks{5%(W?g*-3NbaSgP8Fzb#2O9eDC=lUbya5i{b1dBn zx#AcAG*_Dh72jCfdD|Ix6MuHVnE;~Nb-Ea`39K-E(AteY#87%io9BZX+Ea)AUl~jz5#eZE*{W#197zV zdY-X=`SI1=mwj|j>|rsDc_HGjUA+I;ym>_|a0@{~w9v!N!WanWaN_XGyY~xQgqH9Y zCkmlrj0oEzJe_gtI&&_7@8vb2^OF%WB|Saiok*CpZ09#-hg5U3*&PvJ6)c$^9p==J<-@QF4kg6A5E>k8B8Cr z^%av}pPI*-zvlj7jD5nr@FE|OvRVKyfYp_2w+D2;c<$VUhTuJX^ZNDOd$pAe|N2k= zETHbo-H-CLK5-_`U_AezlvXfq$2$R3T!M_+!`Ja%Ki;SWCeVE5}! zFIV?K{9m3A2`1!t=L=2qa_MIRw%Nw{x9a}fE~o1+ns+Ru zdSi=sxN&4rma|=InQN{EkZDH^<&nN*Y=N`|9pROf^c@6kv>4-!AN}!K%yBw zZVJDtoi|#bycg*Fa`VE+-M9GICScs+x;Mqd7Z;93fB68m`w|O<79kIH>>C6}n{xs4 z*N0YN-6|^t7k$phlAB*$*Jpgxev42(eru7@2VcE;Z61DwEL*nyU%#{%{Q83 zT8IvO_twKK!e&=rcMbVEJm+y?S-x(Jd!yl0A5?yLw~s*g%z5kX&E21W_*tQqNyo4+OMNB7Drl-jz-ovFD#R_D9Hy;NS?{x+Ob zU#gh;Q!+cAXHf*(>f-X`kS=@f-((>=OuJfiYs(6 zJ(+kfp2nqPWwm4Kcf1&_bk`~4?mJ$3_-st(zHX~5$&XBkO*=X_f3H?ivlGvR4RXaQ zrM$9pHj{_sQcB`PG0!myZ+XL~^KyRJM)y@1GUf^7quYUdyJC|6H@_IbYC^3nu#h<8 zmDhgbKl>M6@-_8tI>&Q(X~d)mu$qlO6gByAKO>*TNd!p2-@t-=$OG-m%TW7x z;_oaR^iUD+IaUw<+Pn;keioQDZAtF&k{6#?9l9EMk;aEC8;_A8c^YA>EILZ`n}q;v zt$xPBzGu^xp-WGG=#?`zfNm|Ceu+0PfB7mlf(CfOn@*-ZWRHL6?4`ohEv<{gw0HVI z8%k?iyJP`uq1v7?qv57u>_HwtSN@((hh)?T+rioo&FHfkbOr(N03stjPFp)P1Nl$H zu`l`AF&itxqQn}w+EEIT#%`;ief=9b%UkE2=@WcZn9a$y4I{_ezId;~o!cg#{cp$Q z+qThBI+)N&qf){wIXajwTYS?~Z5eQGZ|HF97^`?DZ}v9eP-SL{QD(nwdK&%H zImDvzIj!ErM|;Ir9yZZRE9`3eYFKR4lAZ4KeH4@%uy6FFLFCb1Vez67A#EsaiE9GI&G-pc9cs#*bG z;L;{jjK*e4r#HG-bnuIHeKR`h(6$!SM!3@HVYPD2D;U)7NhfCK+x#bO(QCiaoc0c> zc=Zr71@_8BYWT-P8GlNUHm5ju6EYv%bZqWujL$+LTfuKis;5o;#y$~X1M3`%=@SRi z>|*kLH%p7By3v?>`>{)P7fm(sA3aTf4Cg5?>-iW*kPH$# z-s^J4zO>1qfsVUtuwctf$L1^V#&uO)^`{MG&!+lw?WH5lBGY8LE~+YV>-#BN$}m_} zXY4p19V@EM?aQ|R+Z(zh+!(AqeGEg){5p@`*cHCf8#3iRzxk5BZ&#Irw$%V z&dJ-Z!=uyauZ0`M^;hR36W%y2TNydXmHm&cPMOZz<`u-DsX%;GjSkJBYY1(7%cQ-h+DlkQ(Ir{Eou46b#TN1>uV|fbGr|PwJO3{;z zj!Q>ZvGzj)FQ>*mT{@ZkMSm&`-6^!SebrIawQ5~sIvnEC%6Ckz?R18RW9iWwz3Efi z+|{nbi2XpPL!8i<@{ZQY%9sXi?cJwO`C6{%R($RoHadI$!v-0Ir>GIA3O#kY)AMF15FaJCnE}Tja+5p&7fzjXf%JaAVfC4;)xeVOsLG=kQ)$I0W zAjfy#k@uzfAlKC%p3k0qb$2NbtpP0q8UR7Ry>e&w`K^ZmN@oUaZ?pJxsSt#&IyneZphnNp>-R{+H5857}^p)4`5A?9@VR+PC|M0Bjr3WAf{sQCo zua=u@JUkB=mHw~ax|PlYPI?gLQ72BWkDFdR_u2sSJi~d_9t{kgevI#YL@*N>4?Kuc zdU{-(WuDpx`cUHW^g7^PeaVt+zyfyRW0O9R_@MG0{()fwl*jWy-e?ODjz{x}k6E7D zyvMZ>%Z zmw>@pC1@X^07ZWdCgkOV-M)7cZS!|VI!14)2+VsQIl9#F}-{^8LqUhj#(MqygapFaM1 z<$7-&1hSkzeRTJex6hBq=J5btAo8(5n|I%PckEi2AKu_+19L8&FAi?;>CeOu0QFX5 zK|EX@>30jK^E4d-8+lhh48%1rzS@}62af|b$pawuMZ7=E56+&pcQk?`{>+1DI(pdDPqjIL>U-R|_^RCN)h2kmv2( z0v>?S+u{>0C^>n&nxj@+>pZ~oNcKjJG5n~0ywd`gbDsBp_wrh`cqJ5yez4eL0mmX7 z5Y?hwLv+COHv(SGTi<5G#`(i+P)zIzyV9b7uvKX4ry~K|Jl~%*52A%Wdpej+dy7LX z_E*wjQ|Qcl`u^KK$|)|nTZ$W3uFi)?hcW=%mP&zTKdtxiQyxx2aZeP35B0~LXL4;i1F!7xN-rN=@-51lXg*bB) z{dnWZ+-|mWJv7G)7Xw@uz6mgGzEXy*nln8=(4Beo!GP8+?2s!tZucS1o4xU6KJzx2 zurhS{T?T65gJ(Rz*L4jaG0G1%Hplb6`raBcAL4T)o$wYv6KE}-xNE?^FMK%l>&C?! z&2MjBI9LAHyN`;;fBM9c!e5=+ecZQ8?lcx&tsic*kaYD{ANuV6;p>{t^PM8hh3;*I z7V01ycxm3^__$D2Kz(ll7{Bg0p4GP(UVmd2waymWL`XS{q~s*KInwyN(|ym?#_)VM zp^z;Hnj0=Q|5&87m__sEZvgqF<|xp@mDX|W)U;HW%=~>9C!pI?)EPf>{=izlcX{Glj6%xzaz#tfch!=Bw z9ylEbZol{fZ)#eXhn`o*+}!nh$ww*{D5i(hn{WyNT`z1gcfbEW!v?hZ7jQIK271*K z+U12_mzjri54a7$@5MOp=7w<(AL&iRKcnc4a)A)oPBa2P1 zd+sK_x>jUV<<{sgFw4sYfU; z!0_B&yE!8H_gx$(hK)6?99Rd>La>DSs%&e3su?5A(pMCT@8V@iS1QF0^KwiZU%M(qKu8uJblbAmYQ|F=X8bDG zO~G0y>lnY0(J@L0EPB&!<5m6mtp1emTq!p>WFtd*OQ#E5)P549S4sb;zZ|amdwIpg zq62cj*q^mfs}y^Zp+9R+x}Mous!nN}wrZEOx!RSk4WsMoRVCq9L2`}`=(#fJPFih2 zTiO_>{&(unwiy$%B`e*JOmQU#Tgb+YTQ_sl$I7zZ+HKC6m|#ofMfbbt$@_t99q7et($)pQ5NEcBb{Vu*!6%7>1ob&8%>=npFA2KWBVA;pAC3c1$8<0Ep zrjO8>Zo|KyaWj3VZ0%^g%+nTG<@xm=_qkqa>&B10_&0(~N0C!u9BadjVfl5^b9s=| zCc^n!I|yXc_Mul@a#shP8SBbNPx_K(c1u3B&_Yt9ZJo?DXp|5wJfg43V{ghJk8J;z zU6(L(v^k^tZt8mIm_o+Dl@$9|Ke`IpvRhY`D4vlE!{Jnk;P`S*nM3I-G?j zwmzlQo6==#J3W}&_ifN`>XtWq`rG7>#oqR2-okUEs_je8^wiDo@EUulJ@v7kaP2hb zzPRyYDANM%7(FT%ouNg}VX(+{&6Yl6cgl7e{Y@>M%>QL;M4?aRs8y5n^$;v`^j02u z67TySqK1w-BN4tyB6H5kC$EhRo8Bi&8`Gimv5~okp*<~*uDVlk)kU}8%3air1N`ZE z#@^K3>XkO~CjXR?Joy_<%_39(mJCbAPFC7hjLgLFcf^t2<^goESo_iGFhXIU@2LBp zsTiNOMdJq^D=R6`BWwH=8kI*6Nk3vP@|DvL<(^BVPNOPy{$GFcuXg|JPky;O7TECC ziNZq!Z~)tQ9Y5{Cj(4hOtSdbjf7!$Qmw6|@_;O&9IU}9;@SvE-FIS#7CB|R&FzmWI zo}=B1-|E$59?ie|{MI~l{RcmJbKG3+J^p(4S+5uKELQ)6&%fCH>__hfw)7FOn*mRT zX!N|QukV2OS9&P*L9n-bfaWF0OB0|hzNpx?VxUh72%Pi!Ghy0#L8)nJU z+(Dj~(}PeOz#xDMuo-|v-9QeY1Tf)ov2OMM0Kx6Q>uKiru~Wrby&gaku+YD6zR82P zI>5ZsCoe`%+~@8q+K<<3{XHJqX&DF%1lP|8>W^EuuI+yG+Ogdq{OIED-~9E*fmG27 zuyr5eBX46P)dyxh4kWS2z=n?G(MZSQN_$mYyY&5?2X_PJg)}(4!oWnp;sP(r(|M!< zr~$@kf1kXE1DycnLJZ8cLu1=`*#nux?FMxEFe5KZ&jv5m)`vaVvmYSj0NlyQlUgVQ z*K7+PUXkaCE_?$IPjUmtad_5ql(B!})fRH%rN6b!0tHa^#THl2wvYmFp{p?13UFzT-<3o`E(BYl#05N!l6b(RWQNXx6)IyDrE4KrPf&0e-yjR@! zmv%>A@KL(jS-a6c{&Jxkc(*pMvO7Q{(A`HCPc|;jj$W?pJc-wX74{mo0e zOBe3$KJCMgK(|}9$p;M41Q_DUJ8gemibY=QhqL9Ug~K>AV13IK^^5tXr`YzqcJ<$Z zY=n*K0{{^~z&#-lh&Sta-=@1)x?da*e2WVS50GO27J=xS{(ZCU=7(q}?~4JGK&vUA4P3c?C6D`uyZ`X-|D81c-R}Qxe!JDY@kf9Bhr4eB`FzY5 z2>X4{@<2ulUc$lv_>VTHobRm`^xy8|s5cA4@Jek4tg|_b;l_;cFm9;;F+k7DEvUTy z#>M1dr)eb}d9&r)Z1;_~-`ZVxH{iU*wp&*UTlDcKb4|UNNB8H2#<^NZs<+>{I9|)I z9_*um>^tD?Nns@naBtj{H1qwfLPs4q*j#(64-L1NcI0S4bh22y__II%i^lDvd9&b^ zz`1Y38+gGAHRQTQ-e>jkv%oe9YSv^@}ox0Kje~;$&=wZndH=2K_eOJEiw!MGoys%*((5O)*}iYk0^?u*=5KcI zzx~d9fc1V0nBE?tw~xD?oX+!iJi_DoQf+jv!A=exNYZZu<5Mi}d3NcW1mET?C(X62 z*>V<6=1rmIo#TxsA&=ZU7~|&pBV8-a8{*NwR6l)deo8OjWb^C^!0!!^1y*+PfH9>2 zz^!?VhqVvsdP|DU3oY}odCHjMIqw=Lbkn&D=O@0u#etXO{j9e_>gutzneUPxFm3|) z|ERi?pG%J{zweiGk~z_hwZ;?|biko_HcooH%hJo+o~B z;>5BNuH}|5P#>7eO%9hi+LsL`0oB0v%hybtn3UlcUt9QU-0iTa);D>Ju^Q#6fOLCg zXnTNS?Odm(U$ln?y0f~emj#=PYRi0ue`<25=Z48a6K`Z}1&il7cPMDw{_OVfqOSnb zkL$AZ{qrVuOoGp)q9fHgNM%!fPJJ@ z5Jzh(Hl;n#YK`xG=at&iQ=5wyF3h9{klZR8ea_}9@_9D!yUn9}X21Kvhcn@ZwmuF2 z`pHxA1i(Ij>CytXoAB}+SgTwX7Z+|?ycJIiHr*V zu=gP=NH&k(=*1vcc1DmgR{k4smysRIZjg^};{mGAB+q{&e{bzyh|KeYL+_Uk? z&GUz!-JE=3-{$w9e0KBcZF^)~tu9Rx)z8gex~DgczP1ZrSm7EY&Rnfubu#af#p|?B z*ZQA6`f)tYxN2TB-FLILK6E?_);m%^oktg8fqu5U48BoYq$hILSBNut^tA)LSK`Il zq+eN${xE=(V8-LfizYL9c8a!Z?G=x)J^3rYTpiu%t3E(_5o(j_QvW2m5!;L@`<24I zVbz#r6}V8CA>MaPASHeIL{c8}j4bUi8SU}g-<^pQVZ5R{?@p^iRyk@_mo5N(xt~~8 z;*)E!y3wb>YjKB3;17%K8L-71db>|yfKmSO1{9I+faG{F_>Ufj`^>-`Fka)IkEa8% z6QJ}4dfZ{CR05q}v4bz~m!dhor?p2Ou#jI$D7>fNya-bu$kH!VumM z9YbjNDtFgp-+xl=T)*ae@wWP-^aWpnN6!}Rl}-uzQx{bj;C3Ez@=^(vWdQtW%F9D% z9#SJf?N9%JaG&zu0gsQaJ>`ykS6$O*r)We4A9L3CR-U8Ry&R9_u;+_% zFS0{Nd$0&-`4g|*PG7dDbg_6Cg)9-BrXP;tEil>nlC!=H=&bV}E1ssUBU|0=`mvE0 zp5)j-V8Z80UC3Z%GW@wx7xM1MeY!~hAenQ|bua1@HqRDuU?I@y+G+7*J@ z{SY1*6rr%y*LSj5*~wBlI-fq=v!Q-ojV^B88}=s6oR@qiq*5ZWcKJNgm&x~0CSniQ zLqOJKL577-+Y%rDqtE@$TP1#nvBHs9dNXaSFZCLs^g23|pZI;K6&a%E88n3`J~^+8 zp+D4pbs#mz@YI=h+dcfbHPI#@MkZT#9202>%)8xOZ{>lv;^J4L`EfdBSs|m@+sr3D z^4o|z8A&p7A)C=xanSC%a?g96IMOYqiY`9^eWY_1BhpNsm?(Hith?K&1x7=u(eRui zi$!ZE=Mqe~{%Q52yFLm*dD!^u!;jBx{+Ivz{|zAhW^*=+B}?TmwanOH5SYUH5i52?awQ0o!b&FTx{qC$X3ed%jfBQtsDyI_?LhA zM!r&e#H9o8esp2;+IL>woV%3U1nyP>i1_}L=;?kB25bTv_Oy%`u*ILSK{>z<@C>xO zm{n%14Cun8!Q&04S%$Jadp|zjIe&fg)<^ll4YWLe@%rXmR^#_8`#T?ANDY^}iT(Nc zG~i%a$!!PVVBC^4IJVi_{lG~+S-BbER)EFWvK9wqFHrT#vCa%MsDA|Z0#pscckeo^ z%~)FUGydu2i$k9)3)Xl5j{_~iKbCx`A)s-9acHhOudc^ech|sdfC713y>fo@pZzC) zvU#(O9PcJmG667p{)`{<(gMxKpKQ+vWi*)}PoOocanDrNaF?%SDcB~We5s$vdJTYN zBRX+dvyX<>)&T10t5?A9+K9BKTYQK9k(K>7<@;3qkj*9yK9_}ifOdSaVm6@Uwt*f2 zwD_)`=V(RVZD7f2c7WBw04eIl1cvi$xo2P<)Y(rqq^2df`gtLj6<;pPa4re5&JlKayvI<bXC#eGfbU=&0pqVn+%o4~2YI(8^Isaq32{ICC@iL@j22RF0Orn@X? zZ)Hh*Jz3rkcmguAwtp)iP5V3Z{He{2>vuL6@&(KidHUYy(+ht7>DcGx$^UPRwmIP0 zK-gS1RNi@RRUQD?bG3QC_yNHDm|xAeGCks2g~jcM)x&GKdwINZozk6o<>k%U*I(Zp zIsN?RP!n!XXQ9ff*J=TByVEL-BPWh;E_`x+0AOuV9?#alvTCQ-CXVPFS1n@^oImt~ z0oL^6eqi*~PcLjfe*c3$A8wwg5B}kgf3i7yHaAoC*Qbu3$ZbXLuIkgCO78q8k7aEA z3wKms2L9xn0k7{SN0#WG z0s0U;vzAqceNC8r8U2EW9&mjo?rO`oTP33^_|4xNkO~+!jvKH#onyJKP5QIZstF)| zyr1s<10Q<=*7*%rkK}f>JqYGVKlQUFn&{8N>g2gBYgsfu+4BbgNym)a$CXz+9cB$) z-+efF?P(YS7_AzM=XHdOlYsBab~Ov;Pjb<5xps8DvTIjYJHJtV>Nmg4Wzm&ZkJxaS zbuciP8xxq3Nff@w^${b7%JyVsDf^Kf*Pq;YbbBoE^{rM`>FcWH(gO0i0(_{w#yKH76y5c0OCaKhWpfNxlozE+qS2X%=GD}~lPi75n zLdoihwK}ZHpX7G0iE8z>w?1Mf!YZpezY(wvG?$*0|E=71dDcXF+A8qbtD_BtFJ8Mk z&)Cn~FxzI!Cg<>DQgwfmoAz079Pgch?wr4LqdggJPkBsC&qklMS$*`ZUWpg=YtrHL z>C@Av+1UF~eZ{Hz{r&a#Z@%?z8^1O=lf1snzxPYIlFoPo%fsU^z8n+cc)K7 zzuHeEw^qkmeRi)l_ZM%zyZQF%T=g7mAC}}_)?OOt@SOF$^t%Fk<8;qwiG1pIMOJg` zzGv#ZD|=5ZZ1hEv=y+$9)R}U)8v3=Hb#FpCcN9~GlOI}W%#D=+CGA>%6lc=(Mc?Lp zrI>LAomxX6s%VadjC6shPkF8~SI%VX zFrt(-Ln?`tN|Vn!oqt)h{j%UL{v25_d>csp*w-u>BvJT&>jpM%nnrtfLny+0Zge8 zk8;g~LbSU}E-HLB-<&j+uRE)mCCAPNTozmX5HKKIxkQ{i#GBG|Z<`-k_BTNDmWPoo zEf%hxC&n}ffyI)xTch-y$ZtX`V_}Lh$}F9_oKG(>Ba7~lRq;H;1(~E75R8sY`Ut3Z zhtoN_MyG(GiGhyEdYw(&2`%cl2O{$+ADt0-)$?2wFyN!|p-ZpOnL6t?eH)#gd(!r) zqj$3YI-N5-6*3uUZ=GjFy6UrA=uO2nF|qBrR{gBoa*w1R>gv^0~L_XK2uw{ucG6 zBjmo~W2&&~dG!I}6^2KQk6c&M@kcv!qF{sRytD#m*rQT zj;`+RmDjZWu?<9~`~4U&u4K`#euPZ`o+pA#L}%$B!Cb7ku1h~*%A#yjAxK0E0eb6K zTw#)k;~^yv_w_p}u)CvUc%ui~WXmm&gv!wO5b1m|w8Di=A8ryZTKZ-*`gp#q_BT=G z(n-gb*ZCcpZo|lGbSXa6pFU!A-*2fOm9_H*05chzB+>OUFh!X>3Mszm^IR*Bd9LlA z>WVza(ktbgxYFv|xK2 zp|va5(rk0DutRu-36%e|N$JT(*!+(LhB)qKPK+o!)9!olk1{h|~S#H!qAU#rC_f;j1s@{u@U*4t;rwwEfxY z5IH?k-QhQWMy`H`=(LMv5gmnLz3};0g01`~VA0=Af(ud~jdE8#KZ=8Pd^9*3eYcbb zTjCG>`I^kocVlEF0(vWYya-)!w?R>v6vZi?{%8>#`oH~>Cf%9#5QXAt2U}7lm1o!T z^Kp>ZqjTk2;ltU;a?6J#{w8hdCjT9AX}0P7bZ@L=c09XR)K!^$pyg0HvGI5Z=kK5V2aDd6c zf@FKFK|FBivs|@2s0^0F13rPGe6~LkfTJ9&>hLfd({(*yU~~bG+?H^A0YuOf8+5BK zSNj5#SibS6Tky~UhyifV1Z)AA)b9YoL)QfGh)l9)jmT(YK;QZ-pfg^9vH)a$R)Hg|;8?%|4S<1w7G(gQ4Zu_X zW!#{=Eb@S-C26ANe(4J@PkcE5S6^4gCz>_>lOMh|8%qLT2O!Lva8I%)8}1GOQZ|>9 z7k3dS(>d-{ZUZjqe(}EU-G5(h4CpZM!VT#XU)|eU9&h!+^!tT-|2~n`ADtvyz{tVe z&;T@mXMB!pYhMOLm}p=rX)=cb09$}10DqvG9!0sxhb89t@~-^j$2=V%#{send#-cu zRe0s>)=mdDacI&gTR#jg54E*})t3b)lxuBDm9&38->IFE%JoL&N4$#8FR0OINdC=HC|{$ov4)d$o^7(HIf z;`rsW&yQTZ>s?PiA6>XSKp?kHu{0LJ-!`f6b@JwJX;uxyA3wu<5nsr%`)K{yiQHTO ztpS?jR-`&&HO`Xzvs|NaCxee$^*ttm-fttymtT2t^9O(YlP2i$VVsUX$V&G1#q$B= zSs+)3Pd^v9pEb1in>_()p9Lx&3mBvGK;6fahrWh;Op_+}0`R*$&%Rt8F^~g@_USpgTZIGQH36q%0K|S#zsAq^!6W%* zPri5S=Pw5^>jw{U)l?lQo5@r_bx{J!!xob1eJ*&W>`!NDf4$9_uT0-)GSB3yHeyl% zA756cr?lD5ZH~;!d)ij*@y>M{0S9R2w!%d0-RkFP7X0@s<8ueI&P^X4HVGmBhXK#D ziynB_H8II&G=O|ItE|tZA6(vOvt&sYCcaJ|Z(oPnKDQtb>O%mF>io&4^^f_*t}UHt z71h%{?4OFyZ;JC-dLf?uF>Y7aTz7EWG%HxDBhLX=#jC&ieB6zlyKrRyb1N18>X&bA zF62h$+se$}_mfSKXajqDR_zT`WxZ~N0CzX!uRW+Ua-x^yw4N(XZq)7$96R1B2>a;) z>&rJFmOCP{W(_@)DfJd(1)rWiY$YjoY1$9BBHEd_WMHKi_ek2ZX97@LKjs;w57XPt z3Z~i_u$J!Yw+fz#Bolw)d!I9L?K$H80O$^oHaXzg2!OWw3Sh5nCT5;4Tt8?0bEiqI z5>MZFruPCXl4fJ!#-%TxIW;SM?%%z$Iaho9?8|Saysw_+f{Y6!u56lAstwfvPXEDD zoc^AuJ$)6eg8|T~B+AtEi6D zwz!2l(u6H=UEgm*U2bPyI`e#GKGH-|leX!ERdNri!*iGU+ixTB-kJ1a&vx%(D`Vby z_oI-W#o2L=mYChHepemP$9b>C`(BLVKP8}&Tz#MRY0>Ic$&b*)35-*sBubyeAF zX4OwmGS|*Kv=^)FMj4`L{OH+|^Va)s^2^XPfDPF~_Fr}0}iQe{P(az2j1IiD!gPG+?`PVf~k;yNZvakulS3GT>|*SN7`Mbq|Zj`3PK% zdr|WkO<}6kl>>HbmO}+kEOQn@XxI^6oxa2x zI?M#M7)_}QTxm)Cu)j)Nkj3Gml=aRyD&tZTX*@QCs^fSW zS*{R`a3)TG5F^K~cQPvO4rm~QDO4{5cm*O&IVTtVu5#~yLHw+bWO!7Xex>Wd-xF?r z)rS=T?vD!Lv+}MwFCS#du|~aKh1C(-!hI$4DNpC(lIhf&Ixaxm0ZYiIFhOLab1VH0 zXhLb^M83Oo^*T4it7fGy-AXH7VPvsu@aU<^`65R#e3jSKTUUB1^()SN#KB7kTF%5B z{%}}4M|q30I;6t4GF|#sIQeb=q?aGoho<^RM!@7BrR2|oa^#bKZevd#6FFY!(&+Ed zk)Ce$6K{)nF^5lq%0wQW>vcPsI_y^(^!+_5H(f36l1mXB%02b9$~+To+c+y-bVuGR z+7Osz(&4GdKIO)GY{Jl;py7O743ln1w|-elh_~Z!CdMAgSTgl2er1ez$$XaFJpflZ zS3M!*k)*uEa&MbJKeBsp4FE+B!>c6pq!0S~FmjwQa*Z0fM`ssr`Xm)w+nZ&HQ$M9~ z$ZwAAi9TowNG^*%FK>WHec7xMQMRHlxfg%iUdpgE+VGNr((n4tr8kB5a=o&{U?0}| z+x9XX&?R|o`_^G)-6+}84(Ufu`a3eI{Y|?^N4<(a{Y=054`JIfDbkW{k(704qu0q4 zYDJY|9TL=|C{v#$%WX?kUqzZal{^`*km7c}0OWq9+v>ttAw+WYN7~A|SfJ;f%a|PX zZLXc~@~J=gt*#VI2Z}y?`<*h%FS)q0Zbok9u)?}Omr>dk(z9vO)46RK=H{yBrCU@? z-?CyZ{U+A^;|d=Adl*9ctOSq`rC8;*NWH=-Xz*arUm*F zeGOkj8nKbzlu z8lXS41@=K3P4`t#_wZ3^yKudtsh6ea>;57apzIHAow|UaTZfMniFq= zq?Nihg3idKH?Da6-~P{kwD}KjeXu#6W##FXMO&^900*A#YtUr_K+B9zHqiTbzj(WW zybZPTq1=GnK%C{$<(pqLNW9;GFz~Oz9@!X}0%o|Fu=F25_EhrF9a(DpVuRmvm#=3L zdvgZ5@;%hx_HX~ek2inyt2Z`hjvZ@>byndGR+ZyWmRA4~7HU@muvlLojb~PPUj#yZ z-p9cAVgpH*?=QUYLZBZ%oeeIt=KDIZ{_Xd3aS-@#<2pVKw)CRfZnr}rR}jJ0aAe+05j!f{YNIBoX_$uJq7^N z>xb1b3sGeeu1^F~a69pB{IX`YdV($k0m+BU5TGy@2>`*v0jw1}KqbICP@D|LZAo!} zf`EBI)}eIJIkyU|g}LK7SXux+pda9;z|&`>TmUoH=|B~AWnzhxkK_;ecoxN11KU~g zznBI2{;aW2wP|i5n4{v1TaIVbWj^zN@z?KeKFp^%f5FG{2kbQ&_ze&>p>ZIZ<1(;% z=hgvuz&hCE5J2)~c`b|ctk&x@R0(hlXm&mKElanwlVqA4-3PiINRD)t_2~j66rs#3 zsQzjEE2jsCwYl065S&G3+0V)pE_Z-o>X+<+vGRTEy)4D65A7PLPR;iRJegd(9q7w? z_f&wHi7bHr%_hT$#>!)$A1ii#tXTT)^*_@pLNS%4Uyr%7hi5@7X5c z>Ry_#INzp!=UUMHU~{Va(S9#oxwd&N_aF!3X@8T0`rJJS4#{)?ZZ1yt)<@pS3f?5# z?ZELdDoP&S)wClMB3Cb84LHvidpzlHKf85pbLHcYHWxlV-%5a14#dyNtlfY7xBh5z z;?(o$Z!0;H`4{Qdm-RbjOMd$Ed-u3OsgCjAby)k<5q$@EP5qbVZe@Rc^+k ztv7#@)%y=xF|&6p+V8Z9BH+CaXDqQF#>m&&zl$ke9!Z!K7vL6 zwA*;HS_P=>xn&iNNu7ha$HA*U(?*co6alZbQGL=ET*JhBx91t5kA5yXz*qgaB0B2 zkiOca51;hUCZmsQU-p~0|J7ryF8h4*FnD*DJrU*;WIn29ATDUKYef2psq;>l_bdqm7+ zME&-u+`65=_-T`txoCRf%;t@^-zkqnO<*2s!k`U{tAEc+*7$Swr^z-9#1sEa3mR=%P7IK>qe7y zo;g-8eRluzCc2uS>AgVvP*8fXa4qL@eDrp@?!~jvolKnQ(W_(5m#5$1zGW{ohtdNN3;=txsS&F(liYmGAnK+P# z#D0w(J5yqR6WsNPv1?;=Sz$rTbusVoh0S}uRON|f$la2!D1vA|g%>Y_FoMNFY5K6(bq1R3b;cgM^c)Vr=gQ>bVJhAyDI^izsB ztdk(J0o%;lj5Pa=F2+45NoozemymYATs?ApGzdmx09$2QMMigAxyVOFDEAaYIpqta z089Xgx=zB6*BAyoA0DPQr<^Khl62+xcC6#Q$N>Kr4*-LDg8l=p>jcN8Oh7O34=)3v zn*~f>cq{x{8+HyDyq4DifNs>kc4H)4P{kr;O22Fdemnk8W1&9TZ|f zw77^+0ez5dGGzQwE3Y~X>>_J*I(4p2lg;EoC$@#3hA^^FW8{ox-_g{0t@;ucZ)BKE zmo+ZBJ3Z@meES>OsKcz@mfzdb)FH3Bi|DrQ=DKVi0pIdnx%Z=PcKL`jTy-CZq5=A& zx%Fo@JT*K5dL#WDLw)ji6lWM&(h)ZN3{S{N*Jp@#N~T?t{eXHSBYjJu^Dq*(tNE;? z%Dwz%qg6cjCLh1bu6tj{==tac{q9!zJnFY}{@P_cEWmZid%$PO>6X5;6r<|pv+Km& z)@Myd+c;AP#a%CTc~_-#E&G1uo$OrG z{i(~YCwpP?kQEyAt2nETx#xCyr{hz9@?Dh5yBrafJOTK)mo!!y`nxh+k>> z>Ig!c<)ZcXD0rw{V#V zmwwCVock(#r^h3gZT6ES9(P47Vv%J~x#p&%+wyd4>b$f>jV7Hy&$ISnm;dk|{gcgm z4XB8W)J34bp&X*BeaoDGDTIAqG77B!K*~fpGc^EPSSf|M4IHc=OA* z-<#K_V*!7c0^_a+a=jRD04$px+H8ykm;v`J`|r8L+6!<7WMH)mtml(0S05Ci%qoxP|xP1M3n~lCVVTTU3{5#%R@s6TY@5|a9iKYJ%vI=HdCk_yJK;im6 zfH*K52($*h==A96nRFlko*!m;(k1ft0U8(_-~A%1{K`Jhur=)Hz>(vDN5EOj!nIET$K577$jK&dEGVDL%JX5s%#8pcK3Bbu08jVxr5k{HqKkmXUpSR z81r3!Xe?8$4B!F+fSC>iNLtmvjRWu%Si#!(>G(5&GaJWM_Rp(Rd;;>N1@@^=ZRB<< z8Eh(hH|tQY2Uwp2-n|#;1IQLAOExF7t_A9vOkmypS$T`&`fi|?0r*j6Nd-@_8ZWQU zb3dYA|jc=P?&Udh+|ekj?1sbv05x(#4{ z*ssZ*0mn)UOkS3bS%YTftZm`FTdR!)^2E}}*-{+9w}~e}JJ(7k9f0Q!eTK3C9$CGb zNW0yKKAIF@;iD=3&5phB;VEq1P4ayEjkia>hXYGr`@#1&Klst>v-#@r6DM<7aeBuqUf)QpJ)0U4CtE`C-rsoo1gdlU*6li{{2@rcdlMe zCz`Bje+n<)_LX3k00l5B zBrA4(3Ba72r&H;<6$g0nj50y+{L%gOH713!rmkPu6Nr80>xP*3WphXe(c}`p6{i>o1+!ob9=~ujdWFv}dm)uiIHlX0dba6n^pW4DNLHws%%hjehxupISFlgV8Y$zxBKd&Y*1s@{!uxJv^nqsY&gsy);VC4U|5Wk=*Cq(`KG*%t9m9v$OAX%paZI zz3(n*iqsopvYT)M_HiVOSrOOudbeKhncjIuXGB4_Htl&pcZ`E(lCd-{7$lYA1?1nf@VCq7+U>~ zaiN-XF%LlHRZS5h4wp z7`iUPYDNZ5OJjuQ5Ee-O-~1FXF7W_(K!^c%R8&N4syPhS2?U~JSX?B$WDS7c#-Hox zI&gJ9q>5z)oBya_%K0#*LbvzJb6LBVv<^hd0Zd}hi~j->(pKO|$5`j7ohhn3d+|+J z*crxrx%NNqPP||eoIDl3_!AjDk^0O*d|sl914Qh_W77scEFE|5R^+)r1Z9^Fw+03H zkw@VJ&a8m~a1lU0X@+I|b~2x24G6(M-clTZ$fnIgr*$TmGENk|XaRF6)`UsFc;8Jr zJmP79aEe+zJZu+=k!|@am%1w)k9^5fhB?ZA4X6XCfK-uR^peN6Tsv#^B2HYuD#h9z z(kzfDniL)Is=lP3#0uB7grix0oz7R6JB1$k4HMl& zw}vt~NizijuJlpVNBMS|oX0wBKx=Z@>Zdb27q=mih-J2wPvxD?W(tcpac6kuY{gB- zC*6)GzvMnl^_5<(xB|9^q}9`OV1lNc0>zBa+KKuZ`A(S>hKwugmiDR-*Nf)xg(kT@ zDmy;V6hN2CXzJJR(zO-VZ8EDZ4lh%W%3E-M;yjXnb-hTCS#5PX##IL+!|C)&J9;kv zh{!HkKHkG-+FCSSuR|Fs??lAY%1^fI(}^-vZ#qH2eA^dOqr&l(sj_i`Aa+Ewt;=v#mUp zt3T~$(oc+N;bqQCq3!n4u?utOk-mChh^s=#y)^TK?u4&=M>xXdfCrrH-M{x2T zo+sU|K1Ieu9|G1trth6~R5-dV3c2eu<MN6o(ujmHZLie9LU>u{zgaPGKW6*+;u%I~t2X*XU0_+`0R^c%@%u-^D~X9o)uWJe1b8 zyn6nwi52mM^@*Tbnz&o?LroqslV1lCVm~?x@Z6cOM_+#<#!9qHuP>2!r;8Ex_q%l= z2AThs$}aIryDJ91cOpP?-d_uN#74=6{n1fr`}>{xo-}grey_~km0$>stDyhzv!8GN z%^M#Ccs;#2+TiYPUPrJq0=2HkBU!QvGr00JJrgL(x>>#eJqJ+miKhb920Xk! zAjC)KE{!EDkkr!XFuMVbShupcV|~U2h<3p`9pDL6VnIpQSlt3hW~0CKof`>m5`YAj z(Hj_!PF4p`CNCCBfSRi}@|PT4GV|FNfB=LiW9~M9GIWHWU>1Yi0=#tQ%uEE(V`*7y zj-gFF8(7+4kneK*0U_Re|Gmn<%J}Gj6E;BvS{Hrtp^kv+dC%#bWu(D5FdD$bLis_@ z13HAaZ8PdO&ib%w1&khQnYczGOv^Fw#}63*vH>J$09^PzXSu0NS97PpCoe$51O?f} zMrC}ufqpI(;(dPr?%f9Tg@3cjAktdjI){O zY!q8O03#UzuK-zqi18&{ov@(f&mN!8utZI#0KUK-ZO!!&R@MpcZs`rafmeVjXI_0V5RR2*?koZVkLJo^4^XW7dzj46=Mv=0 zboFGLQyRZ|6j}y;Ioa$9kXLSCHXQt`lY}3BbZ&F^&Yk*-hntUc-STgL@#gSxwv9dUel6Ku3%up-#$?L%Ch9&2 zT)%Sejm0kB&@V34lw-tyZCnR?C>E zuwkS6Fwt==3)I7b;>HKxG>(U&O)6(ZFOSP-b2MP zAx6e0^BY~HEbWtLzW_)6umRUQ*Dan?p6Pfxm_F@GFCUl?N$-HA09i8Fz5jlbyrmrr z>)sc+;jx+H8n>BDN{5n-F~*JDwXpgHuxk@0Zs-SnJkg30b{}#Jkd>) zSNf;F^M}8;*;kpg4|4ma=gG5KPhXDTpa1n+n=7@)Jps>Lzi<)uTz%71opZxtC4vp1 zVKjSzcg#d-y=^T*^l~Csz{?Rs?C|0O|{uFVFj) z=LDXB$hrdkUc`QN{m zi;(IpIr==?JI`ZHiX2FP^z*SbbvhfrhD(@0BQx^R|8wELku-+$TY!Dsq0QwhmZPIvNI8woRH&|o(`XL13&5E0O5AJ-M!TeO5 z$)B7jadaooXsh#7KKU1v-VIOb(on$TP{!*z)6Sh0u)<_LykSa$6=V2N=wdscF|e!J z%EDiY9%IN3y=@sL$@ZNGuJv1t#`!LbQlKe7jEH#KCm*p!e*JDsQCNRi>#vedpQ2xz zf<5!l8$Uhycbb-_#FT#n;fs zEe)>=!wM-aN#UjI$~1DB@-6)s#l^3(#LrA{l-D%$iQhT;^2OWmS6p(X8?4uR`5M`X zBH!UnoRz-{_F7HW$j}Y<)Popv9!~?l0g20hUM_n`&AGBq{1Q7KeWM%OcvIdL=aD<0 z@=$l{*yW))7M4CPy1itm=O-*j+Uh&2{OWhw0)Vi?gv-bs^Rg%79Z!>oaJuMk$zUQ* zx&pL`xluf{{Y^Q_d(KOvI<#38K|KAeqp(?}pk4OY1D@O_UUh-&5T{e|H-M$Y)Bd*6 zhBwjVt6hu~CM_AEJM|*tXbaH*(H=`uVZ%>%k{3{F;NB2rKbx7AFqW{~D zZ1zVOsu5o8vy3K=-|d~rd*mr#*J*l^bS)@?+mDNm_?QP*`R_K1B%*1j81G`LOIGo+ z3Nw_ddyle3d-@Oe@kCbSTGtM2!rGIiCh-_@xlqs4=g zn~|M9*STPPY}=o3LDN3sm)O;g2is7Xtou^vDQJ=B<*qPwf`7m03?X_$_tYn1%G}{M zokeuYrENIS(iX@=`w$O3*%8nrAjf!;2VD?G7V`#Bocihs>wD^IbbaX^9)?PJkj2zj zzbr7;jV-xuq3Mv<3^+uXGKh+vG~}WV`qS5JuM1phM+iujLOCW49=h&)t<;cyf|5abbs_lj1sI0-_nc+@wPmUWXN>NsMN^F zTa@@vhWU+GzeT{&oD7d!-QUW4CG2kbuPn$<-AO{vcJeG_{(TjWUwwap(=N!>t+}`O z8u^Lm=-$Xxzd!T^?4HNY*Fjc~uGE9~5#_);y`8>u<)viXRGm7%?#RQj%Wc(#d#hfj zOPssth_`;V4=;#I6%d=-Lm$cAi}&ckBov{KmnX;2kJ{AvmiCY-Y?!Mg1y6mE*W7!Q z&0G&F(IG-{=eP46i<37RkC^KoLYSU#Y>D31B564A>w((p8^%I9#;@;c5d*NDQd2I8_@k5(eUwnS^o#&5_tUeAv z_$FNi0C7Q}{s4sr<}ZxT@ceW(P-I1ZB&*>Y4bb=JgA-@~7`U3h^;=o60c?Q102{Jk zp$80Y7(2_tiC!8OUqG|xPvzGIux*t$+Li5kpp`NLGST1*7GQMs#`WY`-Sf$sH9z3te)k5@ z$)ff~=919%6B0?*Zt z)duwRM4P~U9*A(URSTNn)8(z5c+XI;tlKT~*H-v~X3>i#pX-1Q{W1`M_43_7;d#f8 zvbWd-+a6GqBU&!5Tfsqz8EA5WGh0xYZdEVmlrvBK4nmU%N^M_#j8WBSOt z-(&+mpYA<`dz!Pc$kddKYw}LIN%w;rW;Y8tTC1QbU-7^Nv=@< zL+YD;0O!=934zmrMmE1BH+n>G632XOEE?HB$9!{l1As+Ve)XGlipv4@rJqFG`wP%d zdjK#3`pAL)lGW*~J8d?6BXFD5xcUb^1NTg{z4r3iR-?2L9)IBdS-E0@1fZ@Rn*`M7 zvBW&x3K}aC)Xn|?Eqw+TPh77&n~DD8U*%S+aq7)BngkU8!S`RA`aj%i0(4o9mwa>P zL>~j}eBtcr&8gyC4ZQoy zpZ)db7jL{%S-;)<&}`-1RKHFMO)+%n~};SgD|I z<9_3Ad2tQ#Rp2+j*;ZfJxUg&k8Lw{62W0Z|&01TTE`0pq=E$)o9<#K)8<_g5pZ{`m z>C;Q&a>Yc?K9f`GrT3D%wcY#4`exR$ta8000-pB-My`Dd_}fk|@6`HoK|al z_L|hn#YvMjPxQ^`m2vOd44cA zREM9d&3{{8U!7b@2kpJ_(o1J%CDq;fwp*XCNv-dv>-rwn+%LRvDj@$*GTC1`69erb z(gcPHGvf&F7{&yNp!!^y~NZJKBJW z2b+l>&F8-fO?ts?iwE!ho=qp3U^Q96EzPWciROjc+nJL``W)T7_tE(_g>AJ%b+3L+ zDClRsrPxR4Ta zN?%yvPZ-4qf{P#bdlm?Rd9Utxux&y|0HR(4X`Ek3i z_cy>Sps`M2u1iwahX=l@dnET;2cM#FQ8FD$F)h_xe!NSnU~SMjAoAB)OxIz2>$eVS zG$bY;A8<9Io{9t3^~*mxpXC5Pg;ba+eDphiWVNgk64D4=oQ|p7_%ukK0r`Lmgrdyu zr>Ij=3R2hW5pgq2p-}TN1c$ZhQi9h@=zLN>7#~!MSKlF7DWJZQ~ z@4LzwkfFR)&&Vbo@RX((w|Ju(GDkysqf61cS_+%omgZqFvhs;15vFd*q`v^%k;k;g z=tOJkGrD4+O@4?^e0g<+kj8}XX#jK1C)4%-WEF9!pbemROI?t&bO5tb2uFK;d(dth zU?GmFyD6Nq12$GJ<1ZC|@=9;!vy${J&Z^G|q7&sidb-Vjm-pfzI+I6r5v|dIEpLVTnWjnT8hx(i$2H)U!702(m5wRr1}y(_m<4buMe33jp#Ujv>QdlJ36j?(!UXT zr*j=|^p*^f6*h5YGHJGQa&M0KEu3C_tH+XFyape_JQ;>g-#V!xlzsQfr7{fYH8Ltz zc}zXcy@|QXF?p+_?vL(Oen(}a>vDImlQ~=M#f3f&-EQNHOhoE@WIFj&eln*|@)=!} zPw`gl?fGgSm2&8i(Bo5S6K&EXjXsX#IVC5X>TlxD9oK+kk(GD-vX)whwD?qS!@Zx< zkQdryI04)1Z8G(&|C{^dF1fE&HnCRUxZU0ejSk3t@!QF;f|qqF8)tN4;<=pE+biW&2E>Z70QhZMQFm=gwCRug?R=j9BfHL( z8-GTa%F)kMk2J%GGDUyN+%NqU*WKbzeUZ_w&T(4&zPqpd3e)c->3n!arx@~c{ph#o zo&PpSd^s#K6F%uDq%6BXT)LLQM1K@t*&OSa+*e+cYTE`T!bIw;_){0f`>mzOYEPpM zrNQdTYw^iCWUT47`7gahDw*J9SMFr6)7{}`%iGeyA`MfEKZDhO@@M~U^GO4*69Kjc zm3tdZ${bL@LX}mherT-M%I4^?Bb#?W_@w2*U$j*H_*juE@z3A;G#{o7KuZsZ`GX&P zZ{Kc#`@6F&CC}AwiA1Gktv(Idr`CzP)PE`&Y zeX){<=FohDsqt-{&Hz04B!42nL77;}vhtlcrMs2I7of;Mj=Kv3B~|9bT8vLW?Xi$s7Se*rsKA%g0ak~;&0EA*etRG|*=|JE6 z)a~wlbne_({T^U>7?2~LXD+LB<)lx*O#@IM0CYf$zuW-)S!F-$T??=~OP(8WKbxf| zx$RtF=oje$D{1+$iZ+>GGKJ4-`ZP4tCGug(2FN4FwMpLHvCIcT+x&5Vep&&~vsAcz z4d(G^knfQONQrd%!4i`->$0#1{vHmL)1BUKLT9YY%SRax26D41*Op!iKw*guOym2Q z{Ei1cSXO_dj|~V}&jLb$t^gKw0r)tT8=&QOV1dEP2~;Bk69-%n>?=NNNqr??NPUr4 zmjjm3&5zPkAg%%ZvA~H1$OT9SV%(oP0ANX^uKK9~2NEcqJQR58b(0$heT9iD zpDRta@PWNIAQ(V$KfZoF0Qmi}nxxnK@jnOv;|2ss%i{H|H{LC-F;+gqqjBoR7l!V` zz?pL&Ud*Mz{rvc{`d(nIa?$Ho&ptnGCvlD5kURZ*Ik!jTh6lP1DCcU%=6Bo2edVa` z0zz32>l42Wn3{5z9^l3QKg)e9Axt)LqeTz*oAl_{VIszA31g6h^-1c1erad=O7YYO z5Dp(?r;JvD05JJIe!0yyKMP0%8vXwFUJ8`YJygK*$tF(d?5*z2iiY%09$Zan&G%Z# zaiWPgApgN+!_Ce4+$U&@&*v}sk6u5!`8)s7k8(+Mq_Q7wgVbDd1d`FO(_Elr^$8q% zymzldS;bzt@!94`z~J2`A^!3=A7KVZ%cDiTJe9@PjO;O?v4NOt3wh3_OonzuydSbePby!Sj=WboNdVXZ(Fv zcR&BdFE?k~sM+eCYq{9CTe(cmXd_2@Hd(o$ucb%#^h>~9mdW3H>Gah3_1uWVuQV0lfgKCHHsp2^Fzr{wbs3^m!OEspD`_D2AVbb4|C06+jqL_t*iU$1T4 zD=*Io_=U|yZMdxusOz0cn0vKx8Rn5zYh<>(|sLzW9}X zCO+?HVf|G4@ak)?ZVol+He-ooYFu!na)0lY7dFq=hJXIcH#a|f=vngAo_(A5TCwqYmgC22v&RnRRw~(=Gzah}hM8QOeIw%62D3lO{A8s7YdBx9X?4UpZc1!9~vP&+atA!bMsxXVP)H=NbBOzUyHNM~nAae)=yr5oR+| z?|fI9K>Mn9roBB^`J_M7D!=0y2Yk^A4}Aa&?B{B8&qmYunQNeVUe`&k%~DyC_s;TI zdw+aYTd6IY#@Oz)E0-gdP=*VX@o&%j4dywqq$52I2$z4BNqZhzJ`lfk_A;-R{q zl<5`SB3tj73>myLS@BR-^B(rmm2;c_=^y{^ZMgo$=KC*xN3rKXw`4tW~IctF{niXd4` z2BH@B#Gn3;fB&a|4GfP|w}qL3=zyAlBs41IRBADIKw#k%XSA(?bDgmO9w5(TJwHPW zm?&%pfdmr6z|NRR+q@Zf>XTx9)61j02J8cfmbd#~=XVT$@|7;38~7}sq+dLi2RjepWBO9$W@;JU1*y$BXQ zongG;duVU-pYlrtSm@BHZU1CF7L-6=mDr}LO1(ftawc;Ww+BrlU!nY>Pnn883bh5s zltYTC14_GVygM*Loss8yA*hK&%o4#Z*d0~;<7){wKB|MU@+{Ah_i(h6p#pUFaWH-t%F`MlW7sY^mtJ}n?U zsihZ2rp4IO9q>neNTfmGQ={7EGdhRnc$)fX- z*~)3k>0a@cz7~m2X@A=!i;`;uI>h(Ei#9oZ2w7~)P6kCCeHOoC(JpQkZY9ej;BV-Z z-$W{1<>{vXq3!$8Jz-nHLoT@SWTQ&DV~W zfFezK$)xZhzooG&`{*M**b*%bYBNa@4_0Nk_7b`B(RRrTZ9l?ggXhAgQ55Fh=$HKH zFTL-+_Uhi`CyM*!zYQjf;Tx@v1-!;n_vaQF?~axAqxe$>$)>Ki>9_nX*{v|=R)3RD95VC! zJMtgiwsjz-)s0K^Qjw^7kC1!XncwN?cJ~)M6J;A4d+18Xn(Uwdo4;(NyOve&YjAGK zFmO#T!(!)hgZVpM|N91~-wg!17I5(1#}_vzPoCVIYpF7;taI1yY(Bo$hMa*&T!a9t zZM6AuK6SYtxS55cva-Nt)%oN6&Yr(~eRKZmtpVP=6P(JDFhjmT_x)p?el2ULqYaRc zTap}reDP)*rS-e4tnbYRpvq$KZ8`G^8)-ElI(O;XSmRnwZFx9=oXqrqV;x-?JvUf0 z8^Dd<>+S&_o^2z;FCTmnNFR{Ox;4=P6M&(!jJ`>GvLiPe7aDBMASPY4?EA~iZ&|^A znZ@zGxpUpy6T+IZKZD3BPY#7SFIICqAynsLM5db6T;)7iH%H|(cKV#r8 z3=gb;O=cVns0Wm@b_2RAzw{%J4~PPe0mkG{w>&p3cTe&GO>K}@ zIh4~TbMgXs0={?t!@X}D2_y$TTx)^?KnvJ>Dn3tT8SP-*&ocIQI{186z<>0UAC%K( z^G=q#n$_`u5C^~FHjt%{tjBFe2asghKh}@M1ym>-`O*_s(kv)}^}9j6{*6ra`(HE( zay@qmHv>TV(Ecj02;jnE4;XBsmmT z*T!xOgtuuOOKFyvK~>W)p3m3*XIa+^d*#frSv>)u<}QX+>HfgEbFJF=GH{2SpUk@b zOhBuNif@~J`y{aRivjqaYBJ$=pp`Q7Tigbi`&qqzoh4sZh$$Z6js634=?-7RONZJ> zH$d%Lu8>&%0^4Z$et@=mA{OU#8sH8bu{rBASsg$3%yP}}S%8;G z6u#zJ?P_;?EvpmlVEh5o!O9CHA1iaZQ+?4>t5&`ad>m`tEHZVeKz9|V?pRvmcieBY@E?|nEv?8(9^5))ZA_B#}irY~T{Y6Hcaw{C3yDxPg< z`u_XxZ_d{zTd`n8(BA6r*pVz}Uw>`$lkb18$)-J7X}4i>GW+yms|y0gzmDfuUw$#y z7C#>M5Zcq~qnhxjT(`5VSO0t|n;`lsz4I(%y{#`4#{}!=<@ssg_sup&ymjSjzgK2L z;p30b_nbICt1JHC4}YRNs64qP2^=)pxId6td03pItzB^)@k9m%!@U&)96(L;f~=?5!b8&zKoP*xZEuQSJwjBfCg51c%nW%;-qig&XA z=+gCEja^R1pBX@tRk3&f8?_y-q^yoM@pHXRsjv3DwU@@X0iiFxcsi^8HleLt59+TT zV?rr{3wml~A6WPeI7*VLg8y^qhJauR0ijuT?R3wi6OIG`?AT z1cU>>)q!WX=fSkQ=xaN4#OHQxUO(p{tFHAM({c67J=?K>apf{`X{FSi%CCRXf0&2` zs>f30t?FaxPT)LaFRNJq$@}Z`kLJJq{dYdt{6ByGSGk4C?O8s`^#`wi@5PxcxS!0P zZMDO->gIzcD`w0bza}TSrP|j!gy*n&*kz_nCMy_4n7n}>?x8dp`+l8gjayl5KPaEQO+Fcyj0PkNeAZ+7 zTC(wyG3P>!+lW=XG)9LmBM&jhT(0*{Q6}=dv*Je%^lJ!2B5=5JGe~GkX!Ae+vwyg` za`*Z?_l{)Me==8cNUto68F%!1hcRxbzD(koaQfxP?{40{^#10b{`J4v{Kfmf-kiUE zb#wN}vFfFXx8l+XI*WJxR%c4Mi1)L!f0&;<-CcjvDvCF;r{9uH;`LTH9Byw*K@KYo zzNJ!f&xJ*SLB(9N?dDz<{@liS=XkpQRq^KiZg@pfK8xO#`m`(O zQy+aj@uz?HKmX|fq7`Qa;=oMY?+mJF^tLRc-BswirXchB+_eZ5Pv&g^nZ^_a01GJX zC2}ge`*8{MRWmcdEdCS|;E=LS`Z}(9mjoxQya`f1bAMfqROeoWsWZdSZs1N5qsryE zfRnks!e&AMy+wP9MsicWcp30;3lu$-MTw4d2AIkg2)^Y*`MOv%r=m+6A0udhLGoOA z^h-ACLOzpD5E=HFyt^;X$VlZ#>sQ#aL>d{%Z!9$<7EfbA*o!z?lxV=lEsX(iM2o~K zTNEa3JOuiWe1PEJEPrQwlz*$! z1<56kDZ|hpD=!1;R9UgO?n*ZUZRMwDaz03w>wZUmp=4S$htpC`v4?Jjf{BU2SWd z(IQ$p9LuO<+dh_+ccqN2iME6iO}yfJ-IKq%!2JSjI+5@0nDef^QQ%|xj;`tIwjD1X zwS(#&eSMX4I=-bn^ZJ(7#r}%S!zB-`~i{N^uNv`h6cpFZqht8C#+x~_Cy7-Y# zl+fPgFYd11I$Hp<@@?s&?H~)_Njjgi#A;u@l9Skxhdy1-=T^o{(a;jZ6^f};EKNjCdnk5SdZe)l{mv! zQ9I_HWlLnrRN}ck{4erz+ZO_Tt-p1?3-OB)_wYFS^QbQoR!Y2UFBK1`YThrM7}<8J zOrEhrXWd=-Zu1_~jpB_2c427j4g?8c_p>Xje9^|Ak4uq8W^ScN_?u|Onv=p-tiI#5 zdvt6Bep{? z-u&?V@WKL#_4cz3-j6lV=hoz{4?hVQzdK8fZ7|8*$(b|HZ$7@zGT^L94ItmSc)R<2 zTi3_b7kxj=F90kb-Y=dxF_vXEK{Oh6&tUCW@11WDo{!V|_0w5Hn|Z4j>OW*p-X56%dT|X4wbIw){F8e7|ygVY5Bp6f01`1M7L^ zzFIjg&u3kGBa5zv{sW#`uFkp(cw!?rn_;mi2U-JJ^t_fy13G~fGbpQmY(V*Gt_Dnw ztJfz36M&XnfT#xf1CFj_705bGSy@)ICg%ekkgUaP!>sJ(!`fW-WKDjVSqX1rV5&;L-w!_VTHt1MGZ|Zt|6ElIP|CA)C#G zi`#m9yv_j zaD4(masb`P2k1VN8D&I=$V;C@53PE5A!}6o4rq^*kpAANj}ibl)wA9QWZt^9dFkw_ z0L%lM`?;Fpvgu-d(D^Jn>D;M+KW+@3KXWF2n+)M=xVku8{JjC*bc+A(%hk1K;nM+L zTpiFSmi{J-Y%F0CAI^#g?>AqRpA{wN1Hvx{kpIVj^0ziW{QVzpj-NPQ zpM1Q@$z!=;IWz6ngb;U4fOiva_KkS`kAAYbdg;>UXTSQ*=GX7MyLsdN4@Ylb&WHAo ze;644``_RE@U`!5e)RoUH$VBwkFvl$OLUvJ-u@t~+RK~U^@S%Z#}9w+JDcBs{r5IM zeEoalszLvDD-ci}daq%1dm=yS-}~X~$#id)tk2B^3Sge)@F$i1lK@MAAb|P1-)Y0r z0C= z$8p8P0+q{=&y%SM2JMP(^#j!hVBKb$&(!}t%<9^+$b^eF${_H?Wb=a`wn=T4~7CJV`p_o9$aT=<3RJfm7C?Z2`&@42Ln&(=mYM0@^hWDlg)GaB7dqr zz^0uQeBO1m!Ds3lpSCiiKJlqmZ_p$4;hAjgr#;-w68ZD;xLZGRCmmD=WW|LW;1=%t zFntCvlC4QQ{g)@7H>-jYdjP6c0Lf4uhXZZ3Ezhl*`g-?@ul4?GrISre884>ae&Y$v zHP-G`*)yYl!|InyH=4YxAMuov-)G6oYxg(xtKTLwcox@T&-Z@vLg2aQ2bU%ve|%x{ z*14;-x1KxkccO_Z6RZcaHs6!3YI`Pd$;srhXQF!Hs*AsK?*k9w*+lw0r%Ge;RG&u< zT+o>O;)mRO5I^V)WxZc$cemQmmb;acy+a*r!XKuf@3E@Kg#DrY2ja&Bc`K((`Xpa{ zu?aJiTf}Mu(b;S>-t_T%E2qh9le3-=y9ruujkqnVTZ_ki^@H|h+00%sfNc}0-&Bt0 zPoFH`Cfeg&8TRknKYP6Z@aevla3-1LVbV;W`)yt$nwv~k`BfiKzvpw4MQg0T+FPuy z>)~2G#r?e7dKY!R-g}p_6j`USjq5GLu|m8z8RZmK02!b~{*Lut;9aH-Ndt;cA3NGU zF3)ZLzhC{V_lfr9Xw_U5Q__`qtl#S=)X|>a8*bmbwfVn(_Gg<JhB+YibWbAT)WaQ^=fw+96eWaFoB?^-285e} zA}ib_Ah8#e`3Rr#mmG~9TLQZ*j8I14&Qmtw(z1?vTt%ep0tXaN*o3Yz$(FLL99%TG z?Zv^3-Joa$tdkglpke@A+&Y_8#-eXQy9rNeAJ4VGV_C;cp5iJ;a$t!HwDVHz)o&~! zMtS8u(W~e3$5TNg`A(FNZs1`$xGJq4cI6uv^H;v(Tb(@PcjS!k_?+|7PeBX&mQQu} zlqO*GapjhSGA6?TpvXF`N z>DDVloozi=go+-qr61^%j!v>LdX6U_hxk)SnGXZ;L8s(ey3rwN38G(p+V(GMfr^tO{3>AJW& z0W=)jdd2rl6c%UdzFX54r^8r%&rD#*SDEmJo@01bFxTYnynIIgy2RTEvhblN|4B57 z)y3$*wy!VpBw2CLTczxzL-~X`&`|fXSosdwUdpDub!X{2dJ0&~iLlb|bPuJ~2krR6 zdudCvK;rI0ap{ErWH#a!M^fzh@;`O2uE~O&CdNuv>_S#}X+;@bSSOQaC?=z&Bcghi zRh#07KkcNNjSO1T&kY^d>)6p;a*CEaTRtq+mN9*q_^TizmuXY#c2eL3uaXv0_?mo~ zjEBb1q^tcc05V2g)$dJOY3SLMuUm`U#P6$M@qMbpp2_NCbiR;X4^85^n!uqw{fn?66MvPjq-&zLz^>IJ zom!-eTa>w;HYWb!V^Qhe_C6^+I!bTgsy&Ks+3!@*Ts(>ut>J3u594l48$+vm(r%+K z{sjxq^+C^wKl}54zj^<18!0wGdNBZi+W{6xFTQZ5!R%L?fAv?t-2C9XFXzhS*_Qer zoQ>l2l~-Fne=HE_L4&|sUw*T>bo288i_kvGH+ZzJ1!Ud}NVBP;&ABWC-5;pPU+tlMwq>v}Ay)17CdFApHIeugDBAez-Fz=g%AjScyO{-m;z z<-r5bZBDiN;!=M1`TA##xF@hZL?#^##0w0uI)OWgQ~9!1f1d@YetQ1O=7T=peeqQ7 z|Np1zy_)Pyu5`T*KozP`zIGXuhX=ZWcI@tV7u)Smlfb*`jC3laopoNSZejL5jM;I4=)UWE#4J>j6%k>|x`;X_i=%J}$ zwR}{)`_8RultqJy$%*O(ZL$s7{SaW;HuB)%bTxVTLfo*J-t9XNSg6^?Mbcokg$sl> z^wkbl`L}U%v%t=PZNP8qeFPxQoiPCvDf>;CYtyQe{Izzs-gGux*>mw?w8H0mPjlRti1{p-K~O*Ox`QjKBB`->laQ2m$# z4lhoX1Ay09$XmoU%LY4hcKI&PJ*~cBq136sU;W}|)mwn#aYsa%w4NQs1gu{J`WtP{ zjHe=o5JE>eyCrHTL);S?z8}P6y{>5ZP{6@ zyJ2(-B1rCh(j5ST?e2=uRn0I5sgAKg;-R=p98S4~b@}$<0*5v3qw@iAiP!4+4#f35 z4Ev&1+N}%4g8RoTTsn?w(Sn)9pTm825e0~UK|byV5D${4EKs2z?FR(xY#X+bGYq~i zVPQYo=&VN3EeGycLFajBtYen4J4-#+iQMi*mpbS%4cpvV7A|ay=B2JG^A_BIp}$=K zF<)K95QVyB!Gro1$C5YtfIobGs+wi2FpF!pQ|K^3d)vI0@m-*~y|l#+`qPb_!|EAf z)B}gKVcUH-{gpbw#ZMQ057FN)Ok@EV8CnMzA7f`pKW>*6X`?OZqwGe35B6*Jsp=&t zBOOj3Kqs^kMtRfSPP4B&zzv9WO#n&{vlBso^#anq7lhUgiaK1Ye&t&?DC&S5ddmW^ zWpH86{!B}AWyak|o#+r3?_9JuzWs=<>U7VthGjXNN}uemH|chlW*^0}{QwUf z9svFIBgQ-DFzz_mslI;vsCw(d^{O3Nkp{;k9+v7Zs~6DylJY#qqsPiN?y>fFt4o}t z z>$92{lsOa4GYlk+WP1Kjnq0^V6j}U*zh>YJui}$D8I|CcFYy?}OD9^1@CGEVU-;>& zDqgU-HzMg#Za-!xEix@ho(3eRR_Vp*=+ z8S-@C6)RBc(VYYl*_vNGD7-LJ0d9HeC-e~B=YG;yA|Ry6L()D0I4P5I;fgwyOEF6G z(K57zk#F+cM4``my&`n)_DM2axJ~k;h?Yy zQbhqsWc?|CDmT!Vg(mC9cVVau<5d8_@(7g-Unm<3EdGAqHsH5q3|mdXGRaX}h#a&% z*EOO-GC%9cvPma$p-i2C_`Fv+rhL>)=qyB;GI2^B5K#(CzUF6?lmT9c4C|5@)&)$C zUQh;2+j_BT9N?IS<&rKgOI)K`uJYQr28$~Lg-J3H1Qm5YLxly{)Tq>L47vqO~#hbRLuifF&so|4Uh)T8v#IdpdZ1DSbNIX5KBJNk#Iao zK7s|MF2VrglDub4ys{p=;a5v=b4@!>(M_wo&JGg8(w<(Q_9ZVug5|F3u0$q#=&Si} zdx(BfCxU=rbT zh)F)-0X)>gGm3%qU)!c^f8jC2sKaV~n}TsqN-3uLB7KMLOfnIl0^piI{HH#xFFScV z?ku9n7N0RNY@W$L;H2rVBs&NwPvniS@-TS??BkIEl%kYhIY|ubUmilgyjMQFN8XZ< ziJs6Ujl5G1@~MB7JNX0+B~@wgooUd9xlW$GB@cO6U8YeRQ5_nM5gHWSnc; zSK2>$rv3PgA|zDu=E`TvMj@~y#NT}2xpXB)>R263K+{NgUqNqlk?E9ZhPP}PBcxJG zp^K~~I|7!#azL=*H~;YK>Q`Uf%{Jc-=vlzh^uwE1vC`_OZr^)^70V{R#4i9Ub)xRc z6MEb#-jB6Ye4R3Ae~Ipy0o1micEra}5Fm)j7?w_2bn8-Kg#)$TyfB*p12BT~ok zL@QY>tja=Zu~-$<=pi2`cw21W7Kn}oxfXcjt8eVC4!{;Bn1UW!^|dgEaKfvVrV|*! z!m|J==a_8kHbvlDE6P?T==x^Y!buk}0eyKVP^iCE{gk>DU7!MI8US@z?g5nP@?#Xg z!degSF{!iO1XGpsGmI%;!D|8NGqy!4T`=_(OwzJa>vm;LxxN4RaRB<^e%#v3JQ+v4IpH;ziV-3T=!A}FWTH)2#-qxlV79(Xk9e=O`Wfdo>IwMdbAbf` zX~Vkc?IM9=DEn;>8Em24mR%r7t9Zc$eMt+fYVD`hyY*~(0xKS**3E?0vP!pQ3vJ|a zntJ6CdATq^y#m^j6q8w{ukUlfA$f(63J{9C)UO^i>LG$uA2Qq(fJYFfb+^Jw z80%KqO45#4_Y1Ie1Ki7$ph-UtAzR!OzAw=MfzHYdSK`-i%}e&hhXXE zD1NGsIBX7gAzDFd6|dE~0LQ@*KCS^1FJqxSIo!{J6#xc4x^=m<23`9uRvZ@oP7QUT zGe!Vdae;#+@b&S5>gySN?^0%UilDS@S0GE8lr0zA-OZqUSss7Y1-f8rMHZb$$A>dz zOj!t^ezcu0;&ZyCm4!0WJUj#lq|I*uc>eyQKUIJIpZrxdb?r(uOq&(Nv;W@(T-yR* z{@t&Bk9(3HEXv2Ki&$g}JU7NBv4F-mI6P2)2w*?u0MyBe;aHLP0?6r4d1|atU1Z^; z4FJo7BR8;^)s0Y#XB{Ei@BipSELYj7LwVEYkdc=hp1DRIO8|NZK+>K7#20A)yTAWO zcD}Gs1Ry+)E39|kyq?_?T5|vKi*MrUqz3^Ba7nwf%s>CZ8`WR_;>Xp*{ioFi5X$0{ zC+Ifx3%nXe&IBAi#It{7w0iI7Kdxr)KgmL)`f`VK^>3WrAn>^TT%#J~(8Vin-2_A} zZgP&O4-XPuVAs(Sp#32J>Oc6)p99k%s8q@GgSKk&%B3s>zD5V01<>B7e5S9T=|0-V zE(;IN3$s8#y}Pr-Mwy*Aggc9z?Qwvi{lFpmRtsAfCpu`G*8eMT0uM(#i=I%I=&naS zW?Sm&z}*a%{k_O~KkjT=0YYEzuU9=Bl&Btaq4hcOT~zhpMnTeHtkYQ=0yO1(02Y{a zk7IL>+ZOsM_1zE__WQfrSc)h-9NMZ8 z0QOyWZ@G&@dEH^>)Fg{bm#OE6)6c5Mv#Zq-hh$&a!W#JTJbaRe7vsxbAXWJkC~s*8 zpa;+ujDJo&>|p)=8l}}i|J=dv5j3|zt2*Y?k7Jo;0hS3{ERZ5B_+tEa<7-!67O;Yy0d2Nxw@ zzF-7Q-P*UP!(C`~vCqE4HhO?A(WRIAUS8?m#qu}=TiIl%0sUteV}Y)=S8<=yg{V%-zi}1qO=RRg7+?tP0uV={pji0<#qJx*;rre9)&DIi~V?) zvcwY?O9Y2U;ft<%)CTcQP8%5-L@of=j}Yw+4$eM0-V9*f$@oc-+yxIAp{o?*turnX zl-_52WBLR1<0~7EZw}E(F0QaxPP=xOoV!ol)n@%}vPjayV(tiX`04HMay&C_yc3>wP4Mt}3%usrg=vG_}@Oi&oNkd_O5mhtzuzYy}O z?mT(G4vI|{L|6*|3D3eD?If<5`Bdl{q>%7& z+rsc)aIXNlVHx)lLKr6JxmFNRCIM)Y4)+0r2nQ{KC4RyrljW;s4NWMukQn`U9e@RqcljQc8da^a16_$!D?`K^Q019@hvjILLjE>V0&XnsgT00EFl z)FuNQdD4tq0woNTFL{H9mYy7+b%ILPsFA2)ECSCs+r+yiN=EvEWw{<(Xhh zNy|FPVvF@dex{#9j4$QZFJWzfrs=yvi+R<2^>9+xag0xpa6a@xKz?~9>j*FZlS&Hc zLq5h2v4*!!%$Kz3xa)|aO^Vcdp;21GJDwR=;70|*b@C?-ILo^NVi2+9t1L@;lkdu{ zF`D^X?}=B(72r$ywgK~yAJzfo(6>Fo{G?qf6S;vl3)xI3yp(UYB^78h4I`7AWg*Rz z@|krW$#>$Ii98Aaq0jho9=V-uX$IUW7M1m0sVXp5Y8--5WFY!LMPJWr?x96HZymym$Nlr~t(lJc2WT^EtjCirDG(V5ndj8xZ3zvV8R3<)W(&q%Z8L%3{q2?~wj zBXp*&3%!=5ltEfd-?S|ck)&A=p7bL7$k~7&gyMm#^{U zsWG4@SJIe!9z{3QQJXr*ch;$O&14~+e%;Q}hebD#yhQlTAM-XHa$q1U6@@tNty`Xl zGT-wxaDI}HeTCsoOM3lZhqp~jM_rEz<9)Nd%5r3r-?R^-sd0_0MYnaq`iI>9qsBRNx{C)c*6Z?ylvpf%P>b#rfEd55I zCj6H|%4%Ju+}?-&It^b-jwJBhl4zd!BqESk{%OYH_e6#W1OJ=tt3)qpl;pV1Z4K1R-)21~a5F7})#b9Jg*^FS zbpM!0;fufCm%1_+KEAfT_#|llcy6>1Yj5d!0pEhRts_$KJ?F; z?d#Aba3FoAZT;AmJ)}|kc+4+=JiHL3(QSxUmX6h=J#KiY?<_zdFUpbK*f9XN>1|LS zS|~oJ&M#s4cb4&rhhhq}&j6SfKf&$s>2KZ0hYO>P=p=Vb+mftOa*+Xpo*T;7kAe6fyw03S_;d_1(S0P(V z$RPCM;$VJhowEe`!@oTi6#T zY$IApdN`!=>Y_>u_b#k0U>U7jsZR96!TKr(MB<8wF;Rp16o5aXj@?y55^+b;&c)Nm zPh;V!E*i$4xX;{?qSd!MRHo5^8{7DFrVjP@z5+P@1fTKqfUcJ&$Epb|lyxT}(6I;L zD+s)@v{?P-*T1j+@ab28lpKBxD4!i5#Q*KbpH_DsJPL637w_JRTbBDQ;QiBQUsoT0 zaXZ%jJ@D}@55{B{!WiZ0qdfhCLskFziR!}J?^RcB-h@Y8_`mI`#sOqokl7Z>r?2nV zw5?7+z!zARZei8yLWSMT7#87Ykx^PtUH4cd3-Z%a@{jWA<9uKc{!nLHLFi6djS0cq;2?hme*Q^+RNFO9J5GZU!xwfr-OZE=Rq&jM+-_aj`2fLeePcw1#QZ+O=nuQLnqw!w??6C(7fsO(gpIv4#0@<;h zd8lF+*1;Z787pOEcYJCb3w8i{##k=U=)S}H+(AD#s6SnDNtcI<&a>M?aLayt4EHW; zERgL|KK0--JF`4H!<`bH0H)G;NO}|}y4ZfA!49oa+V&p(qk2G0=osTfEsD>=^NnTv zyt7L{-|fx$O)HDHt+=G{*S@Q>rwEaI`$+p3N1J6>qZKyiSP;+tafwB? zc|dUcl?fK#hHy8fj<+8k8XmL@%s52%EG}FpMcQO(H}q@d{gQFa9Tp|FaPguAxVyMq za22rE>O3y9XyeL{J8^W&tduSCltkYl;qPrt4H&8NRc9vCYg;?j`5akzho z1*>t~2~k$sq`MZ3`;%KgsNS5q%t9NB-0T8eMt|J$UR|6FrGkIlh(*6(fzg?;ed?LN z{h$8ZUvfnV)Np`4{QLMTP#jY^2F^~T19p_uEQsJ`0g?r5bV-5~5V91}($voqmN0^y zL=-qxIFl#YN?|58=8t{qWpaegdYWEF~=yZXYK<>E{`szm#e@El-k`=}3e1T|Y95l70vz zoC6SI&|r&LScA}STpncA3aw-%{&Ribv{Yc znMRM{-VnaPb}-x5Y$I0D>szAeAN^3T3$+S;Zx{0bjo8`U)olT>rbdDd4)GT zY}Ton8L-aR#STiA?8=*xNIEZ{n;`#*z>j71#_+sxgpfPl5zRIo%UQ0Kq0}+)Ag;s< z{Yfk&_}ns>W714mE~5iY(DrGWO+((%?#L*5B=zPbqyQ}PHVX^pA)lp%`YqI&ymCM~ zb-l$uw;89e@Eu?2=8<$|F^G4*Nh-dyPW_iazEM}pgF3Fyn{}()ndeEFrKIrM-$I!A zl=PzWESoY3?-JVdC`alieBw%8n5Ov}maBY}S;CZfO26nd=oA=(kYG7W+^147y#ro_xY{q zkWRyxNYNYR-uh1e5*Z-PG?~K7W)9&6@ygnfWi%n^_s47Ng0dG`f~LH0Y7k^aUWI_V z9l7|E|AbXn)ZzV2&Pmwx%)pNij8K1lBoWeX4D(JHesW!ILr2PAXi8pnIAf$-NrP(} znIH2=;5^pC002M$Nklcm0FqDzK;J<0gw$WY@OH(sP$keB^WQrI|{fdpyb5U@+*2ya0Tx=0bBGr zF0dics$bC|K`kb)PQV4XbqgUMoKU)LS%6ef(h07UN-c|>e0#4iWL*v%FuBs_xj?GG zv{utj#$~fAMVS`ZHZ2dYlqXYc`K7`#iPrUs7IbPOx8Jt{-gKX91<=BeGiA+WmV5;H zo#-eGFR?P8UtWqesTO06zDBjVUJg%ncad@xzq05|C&pSsx{#oZ2_)#R)Pwr;9lpA@ z#$>CEaec3)vR24$^A^|_Fp-~59JNfe0H&G4w7B+AH{T01Kw-+;9FuGD>hSI(kup}P`9D!x2UoE)Z+AbjO!JPYm90KNon=({g5ue(~amvyPP=id^c3v?4 z+@daYJLJ4ZUis|-#FA)TXZX?TQSd>RAG-QkXX3t*#R}!`9FtxalmLs)5>9#qskQpk zzqLNkk$y%PeAhWs&h>slW zvZFy;l7F*nSk+U&_usq-00#eHin}`m{RcTnaF~Ut7yBF<$GmnQ;B6kTdKfV0G&I;& zbWPL)pekT943Hu~r2f|($%k)UuKwz0A5=^DKHtGtvWJ5TOxi|mH_DTipDv7OeJzhX zAWruQx=wnG@A!3qVS(mO03+Qr>=Rxsro0MPbpagov)e&HOHgnZKwe8tiWs0GZUm?g zIv1mMBa9_xFpZ=VCS2ta6!x(Sd?pxDzG7->Viu*V51Wz;Bk~3fHuIEyFb)Do9xOuq)q68Xb@0RpgQ`B=l19768o6FEc|*F z0tY$MU#ky1EY~u7*rvSouvYo<^f7+mIYf4SakZLz_@sI^{V@DKhb+FLJv)CA#6O^q zZ3U|`EypwRPK#*Uy^BlD-BQ)z{Cnh*_mipD=*#*}f%IMA)L0Lcc)w)HqZ?Np@i~>}mhoRR# zNE&V5<%|b*fY~n-Z=1e#i*|d4`tYD{^@2O3=jO1yMfS9&_Uw@!c;MK;v9^7`>Du4P z*H!rF;?-5G`R!MfS?7mlkMIbm0U9=Zy zaKXe~6EEqX-63+oLee(wPV9GdbwbpX!@fTiXly%ccpWWT|MJ6g>)5fh2exuJJp9d$ zG}C}L{!{m^?QT^6`|tjB^>?5Du3F#S%z~41^lf%49N<@63v69YxmbB+{6aO-Z9j3k zdbm8x&X4D~9P6(>xNxT_%|*P>5uY310?mFI7G|$miK9 z1~>u)A)F#+LTmB)XG_gQ)&{~P#JKg|UMH*M&&O4KFKU)~O~E|yQ%2{N0wLf)r6nCB zqR41DCO}lGB5?$QEtvH9+<1c7remoYfEvz$rEx+Z`ASzlHpntjjx{AQGeJj7y(a$> z$AH8&57Kbbq_QYgAML|?%A3MjE|mmfRT3^X5U+%_ygrk5eiO8$*_@0Pi}4aSAh&S| zAD+PPSdzr%`LaR2ogI$P){MOPesSNH-UiNhlvmO)a=8^TJX zi_jzze-aJ4ovntZQijxJI&JDpu)_S32I&^b;{rO91b>|TK_fLpc*+`ppBTD6p#JHR z_*x$0^HsxJsAj>u_bqADz4Oc0@{?%E*NJ9{Z}fzf?}drHqc%vRq|e||+NE1S^rQ}x zF-3(w0cXn`nc=x*GCq6{O;PI5kb4&vN?A+3W?!hvy=6?HYPu4~GMFy;d#|jq$Rq$& z+5<8WVyPqF`5{~Z>g3X>bp8WWKeO5#c1-d9JV8F{DhVwBL#rroplP|iaeWE0v4LNNf9D@KIc7YHF?o2 zhtE>}=tSum@M(hH)<@c^y27y0Bg#iz2K9__0_6LwgyVv; z*R-YXxi)DTVt)CUf6-;7-b>ykZS|Mo?7Y;0Tv~>dED4qLd=Z)9nV00HycBm+)}>{) zj+?ab$a;Z?aaCiQp`UP(SKgD5omAvpdi~f2o3s;NIaIFmu|90GJP$vmG0{xJnrL>f zwjEy>&yHSx`%d2b;a%dD`^bTL@lW8s)Dv-2RO`hk^)G3h9MZ^(EC42sGH1B@OXTp} zM0`a4Nmu?zk9nofF;}1QjokPqoF$Ua;D_Xre@V$(==PoT`G%jp2{?CScjDuR5&dGA;h)k#%4wQg#<5uZWNgKCk}r64%m1 z=OiuSrJ&@L@{^B@t$9!~;2yfNC`(N9A`<^ifS{p~dxH5^Anys5k^kHO{ckIV+6=&7 zRu5<9t7}se)z@t0{`T2Y^(H`zV8ZXeyj$JG@^A_(QYZdaF*BuDn?G9E3}9XS4P*Vw zq)q>>W*^HdER?qapw1wVS}?tNc?vs-{b~Vg)D=Ld3DW%V%}cQuzYZ`o$>h}dg6{ev zKX`82EE6={k^r0Fhf%i-jH}SON?v?PG9K{Ith2Zg5QLNGy1tMuc_*-;Wtd~#0RRTu zrju+Ze}bjDCQxLZND3@EiPy?iFv~K_w``#fz;KFp90CeBtbf`O{KOjHLjz620|^80 zk-uP}6J{;Wbd%zwPJmnsGQnDb8|y*U zg7U;I@`qUBS#P#wfeGbAkV30q`R;^Si##Xw=NsLje}lSCdp8d%R+@%-JCMmS4kKhY z0zA@^bOCU8d;}1ZI@5i}Bvx_uiTa{-j4P0&Je*_UMIYcB$e+Jja0*xmD)eDJ`5Ygh z0;bB9t~^{6v5d->2NU``jF0DYEHc^u>myu&>x3^(tOcDctA6@d0T7iHezoTfyJA{MNs}3kh0FH>kV8?3NH*;1E^l5x_Xh*kT(H zS&&&;#_=gGk7x1gFKBlaf5Mvpms&zAM|*%u(lUfKxZ{Wx7J9Z=Tv)_fTWe227{P2^ zrc3~O?E}v1l0v!FeTw?Z9S?(j9dRSF!!yASRg7|AFTaC43CIu7P8|Q9L5@%;bCXxH?t!cC=L2Irvw$v%wKTHHfh@}?vem^Xp@2{E;zLTI;{fIz9OvcVUq<0+oRw_ABX()QT{G? ztKVM_p!6`m8S+|KSgzi9^LoH7t>g9kteVwobbeu#g{z(F@{Jo#wSMgC z?+^IC%X{ncyRW{g{_;QkS+)3VzWQ!|3 zAy$v+PiRw%y5ATX&f%K++kOHV`oj<2jr{-J?><4_903fCW^qOrFmGJD91wB_z28pR zuU)$mAK*9MzFCbO7|}6%D#__wn>5753{t*-8(XXdfoJQba+!ARzD z^uI2Q+P}QUZCHcy%-|+TK(s+!t5X_SH0z^ZS0sYMo^zlM>V`*0v~T0O%R)c>w$WAu zNttr@jBQyERF@#TT&ve$A##b8IsDw9oE1#HFpX10w8PE zY@0b?(dq!}>qCBZCvwQOeO`!F|F>ejjjJgEd|euOsOmDl@b~uEor6A1xhS)`RM1Z= z+AQp%dj-tPKhEud24&8LO#y22=tkz0yD|rIAr|*o)TKLto;{tb2I-so&~Yw`x+q9u zDVG?L{kq_P7HUb)etZ#rxOj2v%2*Z_ov*n_EJ&?xmnZTGkxN^00md^&&JQ(!u^f}I zaKu7c1Hf9{d&B~jF1!K)1ETMvU(`h|uG^3uBWV?`wYa*@KGQ=&kC_L%xU35|+p%(E z8hgl*i#$V=Pj@%n?1XS3FfNDa3)~grPPJJUmE5i8`6`0<>Op15T^eF1vD!z^iA`C* z*0&bu_RqEzueB0)_mX4EUD}d!G0&vX1+ z)X}lrvY(RoWjx9S_d8fj-<^M0{o%pa)gr*}_~0;B*o=di6NkeH`@+Uj_4&iwxS-lV zuXN*T=o0#MEsN-9m|wb}u3NmvGf%1w#+C0~zQOLU_3GKi3g_E!ZVBMBfcfkOJAa_Y z#f}Ab0d?T=NF$K`P8Qt2Y8zes)kSvFxEQLuInLmJbWW7JmlnV@pey5XguwDS4tF+Z z9OY2n{?olX(CZFGjYX_#C5CY~yo5oCk2>OB9dcOBZ!V>6G^lUSs5n3_AI#2Fb4&Bk z#12B*vbhxiyO6;LiW% zm*j2pg-5pB2#HF$C!!N!q64>ZP<4dU%uxob%@1ZKI*(2yc>;jSvO7! z^sij%1iH;TA^7owf=qdm0Wk$7?Xe?6gpAu5GWNakk`{G|k}|9cD$m1G!_?_Pr*Td| zFX;;Zp@HZAb3n}vUp@iENh$eQW%9U|S!K>TFn{aQU*k2?Lx$3d4Iy6>j$eb+$@7pK z>wz!zRO(pSH7#kS6_9prrNL3E&r?nlQQpfyEA7&y+sW8Q=>NfF%mrH#l-YJyvN zR)-FcPXhCt7eSv4U`csro3Xx$6jp_&$XKG8w)`pKO8{b0zT};J@+Um8no?GNL#D4} z4sE8$v*adF`Gs6bXeM9em51_Qew#CiHu+aN6&@2oMVt3Ypp?aMO>Pj$fDy5iEPS76 z(31=K=f{Wgj-R@|ttZp}bGl7C?L7hO+)dNGC}*L9H2>_G>E)~5kXC=mJ9HY4>n43> zZ~HDEpZevmVPI$`hqj%_jc>?Hxiw5PtZmOSkXX^L-=|{`IpoWPH?8<+CIQKmC(3SU zE~P0&H5PG_l=Pb~(#AInPd=CK$d)&x1HH*BF@4WR9TwW9L4Nol4Hj5Sn>21+e$$dp zWmvlSCVN7pZ6qr1njgcMub=RQbfOQ69t@ZkI(?r&CX^f7O?Df5l@o@SOel}hk$0x&*+yLae)a=a3+yffE-mjoXL93YRep#uGT}r& z2>LYuPPN$82eS3xGr^q)sXHw;1gDY5JZ34pDegbL}*LsX!U&gLZZsfxN*4tX;Y8~j|s7_$5 zD?xN8OS)C*26O|mpnVjO!$YkE^%~^aj&8dPFf={8kV(_n=vXYIU*VVXU;gw5)wfUP ztNoV%r%YzGc673&)x09)fu~NaoUrOv$Ajl!A)n%pd56Ue4~Xogo)sf4hy@7bi%qSC zvA)1UbTND=ePp3s9t(!+M^%8?#Rv0p(s%^mCGb%WN+T`uu)NltpqPNN7J_~Rz~jn> z-w$qHsLo+vG!NhnIpkOLx4N*Cg@keZe#;9d^rkHk|IYPGSePBgt%Pee0)1Q5i>`1i zlWrtDyi+j9a$Q5V1+%pH3*ZdjOltt^&t7E713G1rewhVlbl;(0ZTnT-if9!t=+Q>I z(Ym|<{(#vCmw>I-aJCCA;{|zL@aZRAdnGMHwc6B`ieQ_&Ap}~-uqfTcx}VE{HiCQt zV*)a|Cl~>cF+VL(1M)*t8+<+wfcSOmzb$t>mduI6RChJ9h z=ssf^Y+PF|R*mazVAMV73$Zr+apY)@mk zx{V8r`16Mk^79ygOP}b1B`z$;FDr7FGVcPUDKFNy{h;9GI_<$-C61+B43H)P?FkNP zZQw4)_PCBLxWhp=0q?*6cEF)~k7omh#wrvsYGJGwxr67<;Yz(=1&d5dM+1P;g*cn& z8kWWCDnX7-^wAnTRVNGrQ0s=nde%>{t`D}eK*r&y0Ohj)+r7}#Ctb*<^X^s_Yc|kv zF20-x=sw!Prz~X`Am0c0*T8K7H&QN!x$t!5#ueldmqN(Nmk(zF^BU+b58Y$e3VOBpZ{fBO1ftOPv-^D%X_vdzK`;|%@Rj|{UI0x#8{7g>b#tcM+dv>PmJo@b%x z;?*l@Bg-7FsYU+Je*VMigCBiZU47$P^jHge#(L^O#`U{wpM3xbqt$(M6Z*a^vN^c01wIl6k{L)l43~dc$bC9y1B90c+*26ya>f*#Pd_BMdw3o$$Za{PoER?r` zbe@gTM%xoe74$kp=C)Y$>H&Dxa#c|LNPrPux~Opu9qM6<9RO!5EaG@zs8+bvV>{1u zH(}ede~q;`gzKI~S?nP03=17+;q{HTZdJ?3vq0}2fOro&#lA5H9sp^Z^Yhgy+9m=C z0AI$H?qJg8l1f$AFSdgr+TH~gn3ee@`T}=<2vX|bIsJw@i}JerL^=#704XqQJ5=W6 zari}?c@~k~H8RlOMLZS{Xdj-*A4$@7OebyOdKt0t!KYHkI&XPb5vwI0CI7>h0!O?=s_T>Y( zJ#&FX3;9hJ@}kS&6Ww=qjvTt64KVA1hM(u?4hW*(DPa2+;JS;q>PW|g?uv2Y$c0)L z5oC(zmS|ng>-6C+YTJi8aI~%aYu~1vy7-($Wy)Y1_i*ujboV3NaoMh2NN|j^$HKdH z)?B2uFLy_$K(&jR3$)#1boUj`K6phq-7JZpS=I)-&eYSMDUyX;0DFDZ3!smj$BLIQ zo3wM~L6FYwm|2!U$*lVdG}Y1=+?{Llw*YX9k? zrVAii3JYYLuRC`H%MpCaxs|&1AkjtK?Rf5kGS|xjtGmbSAKeAykl87pZCUqVy8AF+ zWuzOv@i^lnJ3h;8Kfg{jP-b3%0D>v9bvjn&!eS9d?H9xiYW0>EzJC24 zpRO#^w{%oPwEYbh@2-wqpfBZMUE(PxXV~4|>0!iKcBJ%n1!jtOvcR#P5;ye77KF*2fn8|W0>q(Ozb*rHL>r*ePX?QU}pW_%! zw|_S$E?2`CKwKE%oC*5qMGoi`D8F^-8at$h3A4>1#G3)k<7*z7d%XCJ15O`T$H?=8 z`I)rS^Z3Mnu<#V#Vc5|@U-dWt=as}!vyzpvwhMee9%ob)^RAYGEpd)$VYKYZkYj3pk7^5b=p zQaJ^yC_pTv^U}biN2CaaWhr;%-mvCH!sf{AzNA3|ROo3x&VWdRaJ_zD5x~r z*yRgb0@O%+&hH_Ie({qrxZYC#(SZX;yYk8-Zf@U5Dje_*v>DG?C8h*VHGU zGLVO^@WA>i*+tl;$+XQke3YK@G1npoKJmFU)Gy7Cfs3(IuWA|-w!V1&{p6l9SVxxC zckm*OBgqhlG()NyUa( zQX@_+lipj1$aNAe;62e|J!SgOJIZa@ePW^Vj9-*)638=$E|imN z`I$#j1j$Xyzkuw?PgPkdte}xhp1}l}ev5hSfqlC6(w>7Q;kun67z}PTH>pvn{9WMMfew zgtM*-AI!&gV)B$#eP$Y56DpVdC`6eX!KK$gzUukj&QT?pw3EN;T<0ePMQg2*_ zKc-c3@?DWJb+qNNEPM%9+KA<^c}{70<(nSjT2-op??DUS@Gt`7qs(NFeF(AHU(d*SyPu6j5OgU!U14mbS}l6VG!p z^mRT>Ahhti4pTBVh0u~iA*ILr^n=otyU0dip$XP>lYY^shT%E?cr zPbhu^7Dx7s!avK>TmUio@I*Si_DlRUm}WZEC(ld0TSj?iKN>1{1zpmB-Y8%z&(jc1 z(98_yuRN2-q`}{^uv(Wty50PekrhN3?)~^9(fpc9$=~?peag$F6#_^y>>E_pwd_ZlrksPQpU&#si`xQDDBBi>Q8AarcWI8t--ubAa2Y% zVVl3rSLA6}(hYqlI>~Yw+gN;+%{(Zn809o|>2FDx`0C``mRO2I##sJZPRnTh+8#_< zQj<21_$_s0`qE^cb^a1aWb5+#+zIAgtm^rdZXczOAtM8Z050;Iw>AIDwST2F)aFKl z$h%Y0(UVUzY090Q; z!ZHniD^9N6&b*5ykMg&SC6Az*0Ie3XQlizI;Lr^Ysq^4XC*f`#zA!ZsOH8+-Z!y_^ zhSlrcCyV%)6cBMdSyh(+U*zvJ2ZL&u;Y37#rGk@gQ*QzIJC9}WFs>uCh|_1M*24m@ zYa6)O05tFbyBAD8wK5Mgsn1mavh|b0;Q-d*iPa}S=J-W$VsvOAfV1G`KJx}$U6}7M zpos2T+%7HP>egx}@J@6EJ7d|(q+GrToI2svLac|azFNs`;RD%8vlc#wST3hZ8E<%) z)nLB|g+c;6d4a6RJKZbv0+hPt|BoEf)rAGr#ONsTTkuglSAD=?j1vEl!%hY4bQL2A zuNABd8y>={m7YM1mTevqrcZLMn*{(>Wp3jh!Rz)oMqE*@A1@9q5dU zpYH%cG5&;ht*-e}N4n2^6A*YBHy>Iz_XA3qu70?+B%E915Lx(P9d82+=!;s5P63SJ z!QQy{ke}9v`rKYfVRKQ(f{Y3}A%9xm>7vImK@W>3y5VS#<+w$G5V&O8*13FEg}Df$ zTM$7Tt;Mx+x9zxaA?PCD?V^qPK|o#~*#bDqj3CB2bi4AB zIhdeM4#Xv1H-M@jXE(t3G62jPU`7KA!|V9+*D~=jpl%2Cd>Kn}K@Sgm6lj*u&V|Q# z-U(RLhR@<979H+9cm@#PUv(lw(x8hV7oPN6d-?hWK+hel*H%+s@=5={GjsUH2LRK> z37%@g2kXBJK+pEMk0qjMxEP}Ua?9kRmE$xIPHg%gZbAOL0Xx^x1unJ>v+y;BD+8?; z9Um^>clg=bPSV?^Zl6;BYr7u4SEr$%pi7k#WmmcD26%9MXHO>RXPejtU@(7Or%XS6 zhQ;0rU{oK6!L8RWNN}Nn1-l@Vb)tUS!R5v4m-MvM@iTb%^_}~qk820OZRPiiKYdX> z=bhG^kJ;%V(5wzQO?!BF?{W3{=XbN9rPcL?tGEVWfyQ$qUIV7@U{(7K3k~|S?x!9v zK-Utw_}9Ps2vBn-^m~x5eN7iu-ka31s=T+my&A_~uRQNX2fFyR21xtv+c&CfSEh0x z<2T>j;{dYWeYuWr5f7FNjUI@;?YV9XAO0DAU;I!t=CTHQrA>0jMNqyWa0t&MhK zyKph=7?8MFeQR;d0k_oU7JY?_DGOTCQ%74^DLYnl(a*XOwDtg8!OboJw(ZrGY7uZ+ zOX7w31*|`}XfL}}TQB;GcH2VVv4@=P0Q{|Boo&6ThdQyeZ$*|50ow)6)n7xn_;B1W zV7bXo2>X>DEX%bvUZXxCBkfDjOFeAgu#6SDXnBnMq?03{3nwlt$TvZF-6(Vec=qtj#f@Fg7C5Agx&;$}*42)~ zho$8;>d9R#0KK%uZe-5gKnK)^buHNF;+0@>2W@-?-FSgrCRspb;nlF}ANl4k8~ZeY zO4Igi0sA%=ngZ0K|Fl#cX7ObMU)Q?!T4bk#=LM zRGPJ1Hm?ip3en143w8S_fpW{O1+;q6I@3a0pkB**!F9u_cXbc8ijHAPSc z5WR@c?ll2<^o(ii5-zUI;JZ6kT#ORj*5CFE+K%dDi8$lvu~X=~B^DiB%u(Ok@Eg>3 zXh$BcC;O?>=y!M8tWn2b-@cP_zhcf8odms>Pu*?%@zC$HXK{tXZXEk3^%}*n8%cU? zyi*>uBDdaj`Q=!=8yRy)ldiv9NN|zeF|BT@bem(JYdkIH<-xcIf>RjJ&`|Tan5vHQ z%!jzsfv=8fS*sym;!_t|tNZC;;jat5lKCxs6O>nm_h`S$f&ExN3n^DGPQ~q#3+Zd< zl6&)W)gSKPsUB@CRA0?Jz=!zV>h{e2>hT)wo3WvbegrP%9G?wy zm@(}lu2+;L>fCX~6~;I_94Nhr_4w5IM8>HNfNu3P6+&5b1><3)qzeww%K@WPjlpVq zakl#W@x7G&C%961XYy+G#`*E8fh(0g!uPl1hKV+IhB5VcKMRRik;@OO!krJnFH{_) z09&J{t-lgNO;P5mzO-*KqH%Q%_~nzk)u)ec!!KNRogd?nQx+an-Gp-{;3uPf;}Cy- zdzAxw@50lbl>7SV#p?3#c>2Hl3p2FWS@dXob@LJiCAgF6!c~@avBjc*XYWuwsoRVT zXe0A$i`DGvQgw6ka@B=g%)z@h!2bq@Eoc7rU;S6V3=Ik-1=Jt86D&Jd3!2kb?Pyd? zek2;uDZZ>~b#q+{UftXmQYmII` zAy+1wVnRaA`ht&D^mc~2}b1a21>yP1c4-Mv- zkMh_|hi8!;>L~IkE!084M=X27Y||tT3O8Yt58^A2DTCmw7Wi32F+b9f1j;3;DKF)A zAeuOiB&{zM4c{ybc`ZNu$#a6#z+3JugY<=-I?W_bx-MuKH~g-{aV@wIFv;iPx%nEG zu%>4@cwQ7#;fHBiMnhPR{P-sY^0zM2iA<0t@eW z)$uL6?&H!V=Xm%B6Jxvzt0IS%xhCm@k^acEGjmE426qIV#|A z9Y3KBU(+1jDt&~tc;OxQmLEQ+t@12=Q*@1T=6mQ&-C1WwEM-htxl5b$w{9C@t-O&h z*7FJ9xPW%sQ`)X%Nn7ZHcFLk&EjmM*iBtQ zB6S!NDfMHz>IwSze0*;lexW^ir9S1SpJwJ}Vrt&kS$j#a)S0vz-DC`Kl9umFEc4)X zKDkbX7{RoC1COMeJd7hio4%N9cT^;m6a8Q^J|jHin3K$vJ?l^5`!jk;&$z5axP&=J~`0O6%6X*3foVN0f_#eHJ&B=i>g!Um&w z5yE&+Qs$E8W=v`FYhL_Bmev1>>^r0K(lIQbGR{gOyy7>X61h-s6ch5p^St5-lZoGa z{HN;w{Pjm{71kPwZL3Uz1hxgwK78wXEGq9lov$8asrHK>y<7D$@$Lj9JPRnX2Egzo z+s~J`$^sq&bKb~J> zTkTwRe{LnfjBa)~3Dztwvp~RvuQlMN05vjJ*i5=@Oa^hq0exEc$RoiZ zWlXS77atz1wnMpeiJ}!;d>#wjFd;o)5}S!EvhHNh391&)`Uw|2*<;eE<(8IwTKmR| zo_Pu*UEE~!=g>8Mn+uBTzC&wv{X~BP;I#!PI5|EBxY|?Qx%X|hmX8b#a_HDR{1JR^ z4WO~lR&Oo-+X2X3Sa3qGWt+aG;Uei4y{6Af7a( zDgqTsq5V%L@_7$14$u07sqp;r?eoxKT|%TL`cr@VS7GU=LOlV48k5A3#M= zR`8?)@WC<&GMI*L0iHvj+m>?>C*=|>(dttYhOsK_K!){wx&VMGef@xw(k%F)RhPRB zNFNd}o$39Fx*_gLnbSp?`5tZ73&81)9|60GW^{R?`@SIeYM5bP5?w;j4$qlL7U%}LHX=sxG+ zuUK|N=fwG5fcM_`zP1cofbDZw+2$}&s&Zk?-5d6`6?KKBvh5CFN$_33LRSdS(Kn7i z_aqkkQ&a350q7e-7xbZz^*27iJI6G4?>(yS zJ$#bGmAhH=_~tGE6~4i*T)k5L@TWgw5rBm})pd1*(tYyrqyFYZ=er(byV5MSi*Qe8R5`2gUD$TAj_ z@WDlvJ}hn@0wn(a_n&a!<8(F60>D#TZ+PJF268qHA15crssRs+rrqiWr;A0)BigCE zje5z~_SOn7-O;3*7W)8oRxj4V9?p3ddFvfyu?!t65ZmngVtveA7j64IVHa8G=)t1> zIrU*5b`}{t#?SE!7A?y-BXfokQ+3}-iRz$TrC2S3Xr55DAwVR$*L^V_or{$AnG0*%m6qfz zx5W>7DuWYWkY=?ixM5XZ$=7hLTxZ8t8~4Gay? zZxq?Fot}mc7hl{d;La2mupDn_J>N;W0#-w>b+V4!x6oI(&}qG?rv?9AjM#y8$2`X{ zC+^Ak)lG?KEV!uXqN}v^(l5FL#bJ~Eo;%DY0QD_T3;FAI%bj43Z{2z3!n)S^E;jXd zd1i{h`->c!>tU?AzHotMWqCPupg-{DLV`RH1N-gApH;v8!zUSM3{hXjLf%DY#!S#? zANi8SZe3QLMUHKM`h!1*!GJp&q|@?d7XWghE0Tf;hHU9>_lZ zwk}FsF!C^EWz)XQcI2YF3s5fH+K1&V5%LmT*3Z3irVAFwFLGQkKVUg8Y!IcDjm9~WQ!_|DzxqdVVaA<;hd73m$*hf{g9`(xa>O<_6CZXj~fse0q zVSUYSGqw@*UdFwO?veiU5C3`f#_%LQ)0N%UY97FN8W$#>e=#*Ul7+JF&fb8?*6}WS z`5(Xi6!$F4xB?sv{V4-{cKpEK^lgrP{A5081u^du{NLW+ss7zp|C9v*+xiM;OuTpH z&FVDuVqK{VogdggI_Ey%0Mt9r?z3BNk@3h82RCoP!@cVEX23p8XRBs(wD66tXKCJ=Br7{(hssWf4GD zKqSd?PuS!q08NUb9K=iw62A$)7J#}4ykLj}wSbcbO+20nK+?$NyzwMc02(X%mpPJY%%zyoUw>VJe~(6CS2$$6N>`c zNC6M!mo(w?-6#b#wR!o0rw|<)1U0=dJ^2`b*>KWLeNg@kFsbA`<31nCr&3JXSPd1) zlHYtuGhiuizXy=W0-#@x6(M%m4y~yd;!x&*5c1AYO0|j806jp^$)&deGgMU2Tk1~G z#NH4zMkw%8BP{7|ZCd^^q3CFaLd}yOo4xpdo#J0!GP7%kQt>&|_ZkU3zW( zdCq59ude>&f&eUy^b`hydY-yd;7$1pkd(_~CW^4L`XhG1Dvc zXeQ?GkHC}{Tqc8LlINB%JhaXT^F1FEnm=_q^{>(CHcM(LZcU$M51l2;1VDZYSW*g{ zdLV&-7Rsi?h5^fia_fzH!H6z!84g_(x#K4J&??ey;52!MkGiEYY(C9&rHzNF z%O=0kD?EzzGhum}^i_+NIyxw2G9K+&ohzaW{Q-TH6YtH!7&RTBQ{0t&!!z=Y-+u;? z>TBinq-`0~veyF-(}~WLuU^YfWiQ#3!IY^)MxiYWa`41wC(ofNZJuQ1hsoz17pXgW z;6wgfuE>u7pwImHldv#?UujLeyj0io%rsJ2hEZooXUVU~qx3hQ`lf_4@hpm}pOTS( zOISmhpLH(9(w;1QAGzge>Vsf{Z%sYvGs&^u(~nVF1{%$+@c$s`6QsJ$rHky z=oX{O3uUL4g=UH=PpPovA!l-NlBW;KrvSyihBv$qXNj9SBJIdMWj32UOSHm+&~K!o zqzoXXga`w;C!7)e`pHiT5gG|=c}`+k_gZIDo~XYPi)6_^`9P1qv_o@|Y7@0Tvp%wL z!vr?GG|%Wgv#;gU@HKmxL5AK!U!78DNy6npor^!oDSR0R=A^V$M;(Pga0b(!IoXnHBE&O2h=w~XJ~9RM;lgy4bT ziQvHi0J?{WYDwJ6WW1HUOj`>)0dZwy$SwO!+64IZgT9X?tCMO$YbVuO4?9VdXR=IC zPta2k*@>zExxkfMyN9rLa*OsnR&+V&ib(q46?n5Nb4z?vG_+K8(jv{$<@RwWGWx>Q znoN*#hXn<eRf|8jrFV4{HzMj#T_Lzwpf%|LK95~Ear?Md zy3#ASYAee&b;@a-3;_U*4mVh+SwG6JIwVPyMgS^E#p@m5JrEzE+hyv ztOJA$07gd-K$|isuxlu(lE3FJitr8qV;6va z2{6z0FoAz|)0txt%0nTi@I5|&<>3Z0<>J8zt{KiUFdM%tNRE2}fQZxZVsm~u0QJ<^ zU^N8LK0bvHecD$4&``Aoh~7=R(+I=f&NkA4)ggti?%aP?+8c-an!X^IR{y$65;Rqf z%g;md@V=F~XslTUci@+cY`q*dI!v2xgQxp|oMWV=j4wj-+V%lH+5vKp0QV@nix2Bs ziPFXdg|-3Iv^>?PtDk=4M*v>^ltnCnC);&7jIaPvE)43zWEb#Cf8?Wpv*RN}0f4OM zeii`SfuVdkm)>L%VvB`1d0%t{&#(gD1enwsUKa?SQ{Wc(wUw3VuaSw#Y7vWf=LBbw zBQ598AqT@3+49fz8NR2mTwbA`b~%H=T^zIc{MMiC`1xTL2?hc3hmh?Ca)%`;JbmMh z%W*j)Kxw!ctg?TL>n!^(WqSrt_Y&avfBY~1v+NYmN>+d1j^Pe)DYJ{EcffDjtL5Io zGP6_bXmo~MhdX_2lUl)_RTrWk1dKBuAq}l}chOy1hiZlDLZ5Aa5Lxc0-Ho8*^+m2S z6nJw%v0d<*g#*u+X(5w$-hU5E%Yo`D{?c#WdXocdFITs2-K_dqgz3f-+eKOZ{da0z z3{8kn+^cM2g>L`w0$EqTdl;pQOh=U4b|#3gn~wmo=m9N(1rUwzSWxTcA?OgewaxX@ zChUtkX^-xfT*VcOXF52h-oQPMixe%mD(C=wI{>_O0Yd=J{=j}iKFTwjqi%z=j(5kA z{Z!dy<6*?OAfpcS^RN7AS-ykIAxot!Wt_=f4&|iWF7&wACLr0%!j$7S^}lYGwB{Ei z*Me8Qq)VVdKwvHLbrrIKPM7|E+T|G*&vtmO4&TN_gL3>8=Qun^Z~yo&eh^FG>{#Nt zbsje&l)*ki5Y$dlpk2AvdfY>T2hqv)4X40YDgXdL07*naRIgabvOnWLI*;8J=yr!> zo~Ph}u9n^QE#Uhc?5&r@4i^;m;g5%~YU!-rH7)xdL0lJ_-5s(;zu+8HeGvc|T3k5O zC%5Gv!%dC))$u7i|<+p+c>I1%uta;o-Z->3dt^hQ-4` zDNJ-gD|PLnwqTt}ds) z*oBXG=4K<49_;BZGWH=wtjuZ1kYos(%=v@Q9hgb=R(S_|-s z=az#GORzjX#RUjNc(5>M69_M9(5K3lhyV_XBw%itWRi~Q!of_WxF%kf zjmsl=pZdTG85QRP9yw4Yo@rS}0V(AYVGU3HBz<4XSG9!K0)T=cxhB5#NF3e9$geyP z5@$Jh#M{uH$v^xw8LttH)Q#8S1w0|&)Sn@8B}J1Y*=Y0 z6G@2hIlvBoyf3`8tO4@~D>+3T;8+@uv3VXIP#XE5^){b0(sBVgmfKHVKmM9p3Jo7D zi^7l6OJkJ!JS(w6iy2;7^#uY5$S<*B< z?;_{M4Co1Eb$ZHa;77{?O@)V+G5?cpo|&FuXkQ_*WX2_VM}ErlQfBfz;hTJP0mkC!leqnLLqu!_zZ{@RY>@2s&A2 z^RmAnM!>O}ht2ZHi?j>dw&~D!ggo1Vb!}D*(o4tcJr68LzWgy^uYFEGU{2~o7oU7w zm}wB`I}NQ?CK-gsNyB%4cAxq(3}H<(I%9@G`uryUh*w*EI(!NdGuLb9@DdYmM3YIL=%h0MQ54}FKsI| zFCu%$V9A&FT%5T6N*Rq~n#5z`Pzph+%7Zi+(XaVCwrSoQ-fMpH$u&2r*ZSefJL;D3 z#eE7<>M*76kPimr%YOl^0)W?%%_lF&AoQxgxi)Otu{77($;T<6d7=60cA9v~mr0q7 z@kk)~CybA!gO_a!mU+CUTx07q-#cI-`&nRF4%=Y$|fEQ0WY)(L0_Zy=l zBeA;GI#VCuI{*Mqrdu>d;bT5pz)>@yP3vkWq*^2k7;BN{gg9*iK5K0}fW@bNPako3 zDtI6O1#UqAJp{t00dNGqS2>KSgNcv;v+Y!XK^YTpkOx{BoyXe9ZMsfYcbKp_8C9MI zgaowDo≠&r^({_3i0%Eqn$11d_BmgD}7!EGlic@W6lie05T0-3vYn0;CNi-*MeS z{#w@yjy|W4mnL2M2!L4524IO`y`WLH(gQTdG8ca7b6ucc_ZIFv*r&d=QuZLa;$xUP zC*uRGt=_pdl|_#^7Er89!TkwbhbV6z0DK;KbrGctfJ}=z0rnnvse2R4qWnsa6Lfv? zDzgH+0$6SIz3;qrHK5fwKmm|2CZV`_K#tR1kx#*JcQgnh*aVeX<7>%T_#t4n7uO6c zSdtFm0zy9M5BnVSc44jCYFPk&1W|NJu(`QMTNB&`tRs#)CyoFu29XK*wzj&;Vvpbo zmZ&TSs2K!-Gy;nZa6gaxg*E__D;IRP@}hc1yB6rv&$<@k`mS}H)avWW&hyfl?VH|mESG;E}kj30z(bx62LtJXf?&o zjBTvFZDRtbf|Js#ix1RU088@{kf)l{F0_*OqrAFHV-P(v4B+a4v1O+r{;#1+7Yz&e zf_4E->uoKc-nxFw$r2;r>cp<5?~4?iOel zWY(3Az;_qGo4Pn!K8PN_9Zwk_lB@4?F20DhYub=X*=+c(he+um4P;7QMp#Ol&53r^boyj zIb9sGk2?qGZoUHIw&8YwNr8V4lGNqIAz;&4EGM_I#6L?J9o|R2?Kip9l|_REh<#|% za#=9bdH5g;Wx8$I00?g*{yvLd>iTug(D;&FJnmeP=5G3hmxR-bUK#g1fOYy6byE%p zg~#sJ2uO@BbKyo1S-|)IXX?G0EM2bi%q;J#yIfawReN*V90$V#iV!#u333!81r>^z ztGSxn3H^tO(9cnX?lfYAAP67`M>qn<;@CD--Cf>$U-LX~e!CBM_pbe!G%HuGT$%ai z%9ULRkUY|L&^G$bojii#`ZCw@{4y?5T%_~>Nf*?##9dlk$>NshCAkA>pY{NUpc}9g zx>u2H^@Izr9yq;2xrUW+;~TuMx`U}E3qB0(&ONyk2UjKyzvRk%9?q5RaDK#C4-`?ZzZ|!7ciD7ju+#U6Xb0>AP@^mI(j(lwb3SXL-!bQ#w2~sZmQY@XL*SoVw;KH>oR5Va% z0o{)2T*$W{wv7%TfA(uGMBBD?VdPXyy{nrVf&Qzsp$!%;HCX88r7YHOzxr2KD~?56 zOcZ!`o^79H-C1yb^}8s$Pn(JldgwH5+m-RP{I{RpqyFquU5Ig>sy_5QlfVDPugfZ9 zi&vEMCGF*qMGW1Uxw}C3Cr1F=*0KG8ZP4*o#zyGd=Nl~WlTIJT0xtBq@SX$Mq4A%; z{HT1sG>6XUkALr%D~s%8!pdLka`f5r#W~sv2fvcO<0xGv-Mn_AynFW{ee7recCG&p zc)z%{2{1m1&SzI1W1v^-%k0isFK^7;Dl?bH%7dAkWs9+dyRn`xzbOCk(eJW|Za<;R zk}>>(j{%fdLjZYLN8^rzxOAkSF|~hS8SSW)C(wS(m`UC_zI(d#vfRfv`2gdriVHXs zBcAPs@9O3XG;#PeW0YIhZLHCz0pOAh#i_8-lzipxq*Y-BCB>zu<UX&TRZJ&o!Ew1x1F=#_!2fD^;_*CTNV^GN-;<|d3@9I@ zk3mBP;wwm(!ET4!O{^;E{sDny7m&DI##>IdDI;1TSWqhsD8_V?6BOud7@*xcA z3NTkMg9@-bkTlAJQfV^dw^@*AGOQqZO}|Y)^NpMso)n;gXV!%@dM_;kChkt~<2w{W zrI7{`TIHc}{afWHd{+Ksl|6+r;*V*`L*$NeOoXx^Z!I2~ zgkh6fC5|vWOL)p)d$SBYO#aH8dHX!zo`LxgzNX2wgh^{I&Cd(#N6ykpq{C;%F(1`? z^5fEA4eu(w(p{q`Oz0}8c_KBu3|$Su zq?fSC*P0+*!Ko9rs-B0GW`I_Jd6BVUh7)fNX0byd*8@<4uj;e1B<*Rjjwfs zafzB2U%!w1sD};fKYIVrZ0%Z&*gFsa^?9yOa>Z)q)bH>g3n-^VWRAE7A(t%31>O%HJ9SNijd4`JXV# zbp*ut#25Z)Fm9(rsMxGK}(Uz5Coe)AqSW*H(Fy7sMk<_-vB7HW$Cv zy8*ndX@WP^boj|f@_A<-^2V^HoqLnuV;Yr?de4LKB561Iq%GAfn|Fz6Y`)D4uR^Gq z@bz(%N5c8YdX?_hbL)uf+Sk_W=niO1yESae*zh_FY|uxU%)_ur%J3nj)`NA>%G<}u z!)LiB2J$B8Yh1@@(J9c#oIS`DL?@sy!e{_X%1@{@9Oun^f)(TAv#SMU|JpoVvhbvfw*~4KA z7@$FmNCC_%91^wxAk)r7RO@Pej_c0C37M8`ZtL|~4n>0xf<0x8|F$@Vc5P*sK2Urv@?Y|z5q`r2UPrV9wcD7QiD3*29=$OXx@0GtDe|NW;=0d!l3 z7Iu_BP&a3c$Mu^l5Ev^)%HU+F2Ww7U*hsH@ksp)eSWA+w6Y5?jTDw@1YV~PepkL*^ z2iMDk`?ty$a~#CTbB$;wxHxB`FC#Uj7cQNuX06e7=h*ow54HR3` ziGO;^@3!c;u|XaLyN;+6Ej+dK)grK;3A&R%K_)?kz6Ow7;;=?RRe|AiWXh(|-EmRA z^UnS967bO-u{ILN6avQ5EuiCeaY1btYG8H1Bi-j%mOkoE9yu;@r-7i0AhU<2xyAnm zt|H!k_it-A?bLd<9aUh?!{d-c`kWioquOdk>&572&O89%(Xw8!+PJnC z)8nVL(JJ$j#RU5oJpY!d+xWU&s&dT&LdMod9@)MfEs&co63~^F*yM zE#K562Xyjob8(SrtZqbl%Ifo3cCfIxM_ymNz~vO}Wo%>sU9nSM;qMyT?egXucgi=v z`Bu4d1An#1!ZG^k6w6kD&987<^6uLY%MbqDUzaW*%g5mo7*VnjxPAoeKCk+81$xAtL z@kKwv6PH;apdIS}+k-u|B-Z*|uyTMMC6g>jo4&f#g{@o4E;}jo3vK(=l6Q$V?jf<3 z{}T1@VWN(&7omR|mo_f+rQf1`>0^3?MF{Udn_ZyY1Nd@)w!1biQHFiMH^E`~YrE+K zWY^MJxp^_S7(d>QC;eJ>1UW!#tnUGOd*C@3bgaj9YvI9-CzMOS_Li%)*rdxC!Cu?? zCB_`KJr`MRJL-Hb{&fRF0BFTGHsM{|v#t5ff*9}2SG}zcuzr2MEiaI{iE(zsP_Opk zx+t?R68v_op?eeixq#Z#mwMWHs`7nl82!PhL4Ka0vvt?gfzF(Jxm4N&cv-CY-e3NC z*d^VA2g0gfwipw6K&(1N-~FCdV7}7kGy6+- zi8$_v4uxm-qs!=V*voi{MN{}9?`#_x%Md2#Fwh^XCv*vB-?6~XmQ8kExw}K%r0b@C z{OuphGvv^P`~}Ve*rzVVCI@<}pLGo!EQn?B0M_lz^&F1-`RrHJcimN{AMVTS1i6Om zlV|gshwJdiF*1zM=Cv(rstNB;- zT`af&1|LGVy8&MSq~9ICR;D@U-~|8RYrE{QqE7VbEnzd4X(x<{p5T7P@x&Jkvn)_* zZQohGb^lQo`7Ha@%M(@a(E>8I*5zNWxrJYjC!9MflmF}Ee**AcL9e)=+QFfs_c`<8 z83UHy@&K#wEI<(3w&+(Nefww|fPOAxDbxAr%TG8!cBQ;AeVch z3-te7Q`Ykp$YZ6PxmBuL`%PE#5fVlCGs^Sl0ZJ=0kb06a91Tsm~y4x@eEFzPI2ckd~z#_w1(%# zGcw9PWlmVah3>q}>y$xWHS(N<^V+iU*5|TS8l_7fTg1><^G+im9}^6(!}r{VN0CXd zM(jssYCwME z3gydNfj2J+k31?nexgGxi|HktPvJ7RNrt~wUr2|~i6?EwPlM$*GwV|4Si{^@<>wrOFASSnw$9HMgmBmdcgql7CH@+dX74X2zVmhc4GSq# z=w#qii8LeXp_L9gK5}K0lvy6}o{rLF4A*LRJ_rbwgnbEZDT|Sf&(}I?(lL$5b&EE2 zN5WXg%BX4ZwIY?Dq*dvS&Pkf`(O0I`s%OIZPM-3U_K*w8W}ZF|{|P5OmX-LPf8ane z{57qb2$aZW`eGj#uPMJ-Buw(DX|x_WLuzyPSLpI-t8Pfb6jKM4*XAERSQ$!^O&N^~ zy~apJ@`AUot-^f=Efu7$*Vc#W$v0j#a?QOos|Q=O=HB-v?;q()&IwD#+)9U~S04B! zSa|F^+DLX6L@y|ZmVqRiUr28fSN0njHOGz<*1ALW_wg56R1ShT$t6^20*; zE;O5tU-;bmBwZenSnA8yt-QRJml9|8`J=3merPct$1dL1*cm7iLSWu-hUB7&$`jLS z9%e#g60*`+fY-iD5Yk9|%h05hcr`hbOxqz;tH>$0u_7x3P9I3Xnjz0?y?9$|fD7c; z@|s(orW{_^FzI*F*10!b{`<%FkP>?z9*3f^D)jI;GGN@-AHrA<;V-|5TPczD^us2@ zCw!=x5ghu67cRgn+k@%U{Qb+}j&pP6_g~Ipk+Y1SZ?|opmN{HBeER%V`6kx6-?)Dp zYw;(5=lC_oAMm%{dVuxv7#7qk_{9E-?a2d7Lh2#DZV8ts`crnhuwb2jES;D-4)CBS zE#t=ZsoW0;1gIc2?F`cr0*&)n(}j1W=eA#gLajvIzTDN*kAG!c zYdC3UA}cth@8xko1^u84xaFWkCXu$sCEORd2;gKv5Jtb~y4Vm5)&HsNcJdv|H7uia z*)YV!QL93MH7BA1n@)b@k>wPy(t>ORa6#JE0P#%238xE##u4DKoFhyyUl4wmNw9~X z>Dt6C&&N!3Eu#}UEeM^AYIUvU^abOiNzz_q3-bct@C1C(ZHU(0A3vLew}WxN5^FH% z*W$IEalJsJK&4H_390Y(AFCf=t+}=A)Os-hC6-g)editk&QbZr2T$m_^;L>>GIZ%C z#EF_#X}tgnLgoUUinF5Rdw~xZ6GPZdl147YUa3L+L_)VkY)J><@Q zK=8;g-fv+=Yu+Bb3Jc*Yaj6Ua%1X-$K6sCm2g>0>jKm0I{VR4Iyu>xgI&L`RxpYng zKnlDlC+-rEJ^@)L);1Jn#s!VYCuPwR)H>0syce0QiwvYi{-+$M=$3iI$U6a|06KsU zrX`?qhzlU=Kp*Xk00#ODmJZd4?m}G9@PJUQls$}4D^J}}=({%{6X5&u;u17$Gr_Gc zLdJ3R0-h3}<_NH91ps0iu+@V^tzZ`wh5+_;_oHm9ZUsPl0W0Tm-=f^~!n^x-Za~K= z?R_yohZgGgI7e|)fz>-QB`|yK_D$qPf6Q1V({7gaH-6dyZx_n_cOJnDKvjr4#JXIc z%eS#qe#K&w^Q$4^%>bn9mf)j5J;Q=d1?>)l6D&wAzip9S8t^O&Q@lT6flS`1jRZOd z(G6N1+s0rKmh{7A2vF+AwMn*`lNfS*2*{y3lTAP$Wms8vM}u_RJ_VutEF&v|_btnH zpS!T*xby9Yx8wUdnmuuF6$6;2pK<+Z+iuj=x&hKcS0C_N{|c;!Hs{{-FYD-vEy^aa zG>U5^LEtqkLYI*v{nYCh-|hd(sIEtPux7nXnFQ5_s5{$=?uZ1Ft+)LZ4ol{rvN?)N zn=?RUcUee>*0A^B!+YO(x7@yao9-J+P;`YZG7hogw0p4qp3)}2_rL#D8N4zMc+Qyz zs~oCG9$JeYQ!W=1ZJfG}5qMPg^o^|5XBU8y&+CGthhMH?_01U)0HwWvSk~JSJAbT5 zn_t{Ek*@xjJE;>biuZ8k;o_qpd2}}QuCCDHRN&5jNy!)_y+uH77Y6h%uAUhL#ImnH zh1dO9Mru_msK2+4zjN}{$9QY8QXpS|*m|k1nK%%b{2?l35G1!BI@2{5b?)~vF2oLL zi}trs}Ia;8&^jzrmfIN#fLe1UVUbHE+ZclA z5bV*z`_YB&EYXFKJ7HFl*F$uhW%XFV^qcRZfcXk~TIa{?1{h{B?#r)c%dg)5 zl&Rqbt|W9DHds~`S2#$OvkL(6P_~g37bpbb)R`Vcty?m6wSA*?695>!C7)it0=!$s z%AFlr&gKMi1^wG;oBjBm)-u{dM+M=H>)9fL=+?tII~lYvSFWtjIvxatqTjK7cqWF2 z$)^8?dCn`9uM_1Mo~l1Q0N6!q^}KD}167qz-JvR>uwarRE@9<;C4@x(_=D%a?!qQr@B{>Pb~uk z$AQvf`%*U5Gq-QvE`Rap-Esqe|C_kB8DMO8b9A!Y8o!zYCb#hoZd&)Q-7ep~|85Sl zv>(26<2Jj>4qt=AJB*>S=!bmis$>P7F$qXKfcy>*j+VEkt|5Eu_&~>tg?+v-M>&qN z6XY^&SAGA+^mQ!88Nb2zwFBIpV6DB%_bI@;e$qW8eFi$KkwfjbV0YwC9@?(mEpSfV z|Ma8ZfYB%emcYJ3*70k5B|%;6Gk5n5ct*vsOY3nyUl z<&9hSSi~P^5rT6Y&|%>-WhI&D2ZB()M`eS)U}0^UF~~AI$6jT8^XZGd}vrIL$m{g_)F&ilbpaPs46~-M=AK>xibu@p+G7W@6Df{I7B-#%vN*=X=8h0 z1&U=9X?L5yI<{?obO zJ$ah0JZ6B$MJ+}aFrYoG|4NkrAgO8)^p^|_`KJ`-}eT3EnwWYWJcN{ z*Zbs2V1p;^rtMh|k((?c_>MLrwGt5S;w^ZDnFDbd@={qw7$Kz zY{r)6N^brd-}D(+`J(1vaO$lxiklkVpwgjU@sr6Le2;HrpDFK=H-gz*l2JoLCA`+R zk)_Gbk$kIle3gap$VCM@3!^I^$(>)ztv-ny=6RUPd){&13Ys#Ke8@F@^HVRV>yoce z_zvCS3C@r12~|3LaUocIZSuryQZ!e2vC5U7aXBuj|`7v8})>^ozmL0O3V^ za6_kkg6+Zh(Wk!4KjcXLVHDz5K2**}w@IsQNqM!di9Qn$1d z3VNzMlWrolXfgrPF%5;*tjw~Jmjq4uK%G_M#WdbB9f2b?vIuR<(a#JZYY`x;S zad?)rk_}hn7aliBC5Y)6H`MtfAt_DB)PL*OZK?^E@tC2SJezb|WvjZ~u+m^#HjhdV zH;PK73!X?ri_%d)FqfZL2a0BHiyP>ulI=tmFba$CAqbyk!<#4llOv_<;508y8B zFsU2`EEo_twB3=9K(hdv;HXxzx;7CE(x0dGcFe?7E2(SK9G1s@1=g`FJ7)5Md}%2z zxZsxHF7#P17Tp59f>pyzFjjDN;lxynH-QyFJ1y4)*WKd0if?S)wK(zA!df5gYgp~~ zQ^s9DB*A2XQn%7ONsh}B`VFm`-R`Ru-zlKB+k|gjn<`HhHyF;)W+_9qZd0#L9JNqV z8rWP--Ln_eKE<{{J-3}v&y-Do+li`v=k;UT37B|vVtd8f8kY=DuoTvfi3<2e@<)7ce$UEqBvO(A9#T>bg$8g2jGFHDH}q;GY z)>^dfRkq#>N_KYxLLxs}^~P5?;DxS(lriPSL-6#CZM_Rpdf2LNVFbzi)pE28S(I-1 zEC8eBZ9jZd#>WAlHgLrt;ANa~Kr(?20S!SW!S;6QwHwe#z|Okd01&@8VuuCRw=01B zTFG{{AD1COZ2jg6u;`OiJuE0D2X~R_N#47_qd(Fkz<@ri;x+(aTpY45*#xL_fk1v= zpkK6JwI22T+tq_S0+e*LVZ8`5weCJinJWMjV=!XcPIO81^wrC_Jv!dmEbUi_1MOPz zD-iwg@(%L(6e~;XTemO6@N@_({R_ZBKNP97@4yH1BA*lJvrR)P^`K?4pn1((7Y6R2 zIA{0vC>HcNIFB}~9D2x`y8#4?hOhuUKgV?pJao}ZOH|z{^rE8#)tq0t05SrYXB*Qk ziwil*<8$1oI7Aa*T*4}Ml{(X{#?-g~Wr23wiI5&kBLV5R0l@{vl~eiX4iUjp0iFwJ z*PV@Z;o=k3g%v6DdOFBYIrP*fa08#*LpV5Ap8IrVLnfEU`pYsq6E<1Iu&t|)m0x$t z9B<vM2ESQ|a2f<_Y)61t%%Mbs@kIVPI_a1t8 zx7>gHpsX(}mc`i@)CKMV*d-L8jBK=$mRIUo%c>j-uJw>#zW_4Unp;?1W>J%8Bk1Y1 z<+bwc<3Gg>%B{EG%E6gi^bzU@^+3*yAU)I4*SYOAE^26t?Sh=>Om~YM0Mcv&bUO^T zEj$E_68szlRP3RiM$SFELK|N68Feh6rF&Qbb3wLFix|3vU zM;B5Dd|hZj2coMi0&pBGQ7#x zMX3$?BzJJ>hQu8KT9kh^JIkS!W90~I;ql7@<^TRa|84oh$Io!(wHW#J;MP+XS;hcm z-KE)qUT_>_U!p~`i{OHPY(q)kc0w7ofY*op91CSG=C;!Y)G;nLt+6XZ7cE+Z3&!f= z$3-gZRei6&{&5z91g^KxU6x(GI)Ady$avGH0Y|j5)#tQ}Vb;$!3sIK=YoF7GY}dPc zy7#c%)5p0WOZ~c_r7m};jzGMiuC8))Yvsb4a;v4VV5H}Q?6dgxi{JdVu&h=7;_)@& z-6$`gEtGk7%T3sZ!Q__E1%l06=xz0$I(cU7ItxQAMzL724llmI71H3qQ2G9Q53?|~ z&BCp9>H=9W{FTp_@Qc3Bc*#Yd-jll1YM;n0UI02^P zPAEFb1q^gI^*w|uv3<(7g}xr07)SqjFev@9eFp8qKF3{Ls_g^#rT$VbT_{lhI6hjY zzOFL9b1~W-R6f_0(#g?g^oX)x|JgmHwW908nG{qI}*9W+!a1#mb7 zTlhd!bIs7B%q}nHV9j2PLJrs=q)u=*fwJ?(>=)&G58k1yjJwFkvBjI$Z=tBp0LbT4 z7pCdF${i@SiG`KL^51^_W9kI?q3&EfQun*Vslz!k>Dpdg2zP99Z43)?$|GN90m`k1 zEp{!cUme5jqvz=Xcq-AE%~=4xN=W{D@1TvFboQZ$AkO$KF^w21y~-ad4{CU28Uh?{ z&*7H>P}?{`L#+eKI)SZL$E`_>nK5XE@c7+MnvzC30+S4WE(ElbrXY+8!nk~#5cBUs z3v>MT#6ntCeU6|T#!>@#1YjmyrMZ$D3v%;l!01};T2_k}0Nb(%t_U!pAYw%h&E;G0l&3i;lQQYUy^gvR4C~AICI(N4Z~Apo=){mol)U#nX@yN( zk~oiY!A)J&u0S5VAbxvf#_|vjffEcSQKCU7(Htc5hyQ}W2(e|)i@Y>www(4q#TKZd~{S=D)K;> z@E;yT4$PZqSpXp%*BLP>C(4;J7Ty{N`g!WF{J!vT*pxxuk$;#Bwa}F31a*Zn3on)< z@{NoLd@0lNC~4Mrl~?jWzL^g+$Xn%0{+P79GymjES>=&!rc%azm6rfic?!+Ghql@- zEt9gsOMfZQg@+1yttPDx_@#UnOa4;Ulm}%fkEJs_g9baPBxN~57vYsZ%h1Sq>NN34 zC*iGE!x`6jKF=+G%$oSL$MD{Im51_|G!5Xt&}CrW2Y5Cfp8&hYL4c$&@*VwQo(?3< ziRV}gS!Sb2lNaZ7QvN%){YKUeXaMtthNLU6;Gre=9nbt{5~eI0Q?Dp}+l%RCF_5ZG zm}GAt>zs1JWO-5BI&IdnD=U#NQig}MjvF3vpZZH?>KP_R;eFF4Qm4FBKk=3XLVswt z9E7bOXj8VO-uMY;dX|M>ld#K*j2gyRriJc&4WJobOAE{3U{HFwrCiZd^2qi~yvPPu zbwo>v|j*4&LP{RWX&UKvhmh-A8<10G5zePd*gbSlM^!Sqcm?T=sk z80*#Ne$?ZsPxS!rD)VaE38#!e57&7}(v>!DTfDJd_|P{#gU;+mQ~o1=(ooa42(@&i z8-CP0Q`RPK7Qs!9Pvj!w2h+7U)=-sM{n49*w8_u#lGG!svf%Z#WhI~q@C|p2C*66% zbM#Ky`0KVLl?0Mrh&E^r`m}<+nu*zvNSOh!>X8aOTOcau$C0)QaJ? z@lC>N^N%{yM2y$So2e%-=|x9H2rIACR~WX5W87MdCLQZL?__dwZ2+DL!o2_4=kZ&t zMXR8i{OAYtc^6j!pFVq4o^V*%-{8u^3DeBXbSCZ3msetmeT@UNVm*bdtmpy-zILFG z1Vpp|z0RRt`rcjJ)Iu9QLAFexu(AUnXMnGTwsmnvo(mkE7NM)@-`~1l#5Aom=n%>qIPf+y*YVBG@K?+J}$zP5eR%o?3@mAvRKl*wW6?%UbRU z<__VW#))9SK;(N7u-*7>BX<&%$q;2Sj0b_e4oe+7DKlRIJ*~*yPTt$?VQ?;%)`Jvh zaTC%n-w`t>&$`9z^01_j`O7 z)-{0#P+A9q#Fqdfb-5u>{(NaE!238>tJb@|edC{&aE^}!Yo%FBV1f5`#fkA-=b9-O>pdoP{I%YGl<#0Nrl%-kXlw3VG@- zfmzz=4)kiRJ<5)UH73N_;!hcsa{+@J*QfET&g7bU637%dwK`oS8$kX>03^tmp}0n1 z*1NkPSg?V%7-wbhxLI>jLeL)5HV0*aj>RVzJe^_+t2Ap;txX+hfR3&sH1hgeYCp$&bG8=IE8)-v`52Lme)@<53;z z7Qs|Ci*U-<9su2zuAl&4;^KjNy^QtjKDtI<;5obx0Pv&V{4Q4M$LP_^__EfIxs6fl ze=So5{Z^M((IH&{*7Wl&Xgi2op&Jk1D05$Y5uK|g^77(p(f_muVBWlatpdWx{sD_+ zUFf$ibpdqraDB)ELI;aSr_^sdR=)P7>M-??yAc-AVJTeQ$N;h`bjxb(5ei^V%C z&BdK7?649b_uyFN$+B;e=Af4I_6z7#7g6O=AM&nSi-SY74vVU7$f8>$CMNX3PFVU$ z{a))w++8(8BM-_kx^jT_aS7eyB8TAaAa$y%5dD7(1W#Y73sNfrt<>`ZR|Sw;^zH(=m`!LCT<69Z)glxEj{c+%VOs~#t4AU)S+&9 z7%V`OB~%yVrU@C}W&zY)XTyNr880H>TY&GAoDb6j4V!1`3>NsXj@O^N3+XNxxqGLB z{?;+oChb%gPAljw>w8$YLd+q0;A!+5Ky*EOV<&hK8%8%no$6n%q&N-Ih zfrmjVgYsARF7~y$cyT!~J6Pbqc%SaloLAcxT@Z1Mb&8I4PUDV&bLVLJT+n}>u;{yw z(EwBzC& z;jgmT>|(KP{K4%zEcjqq&yf8mfA~%Lhfj{mehWJ~5Ia|J=75&Lr`4Ni&i)$PXjNCX@b$qMqygu{* zb3ly0poRz?V5EQj*1tjq56gf3$G=Ce4d>VacTgH$NELb8*WSI!PASF!FIVR2i|W|Z zT?8)7QO#+Gv_`M$1j>R~me#~Bz#R%VVH_n0kZC_3+VyxC^ zRBdf2Xu{Cq7#J{sK^o6{PzL6i@RC3WV)_toWGn6*7b*es;Akd4f=T2b7aN9`(o?`j z{FPsHvT6bt-6kVw9OcY`o%A>vq+!!Q%vUhnQ;pAwA33Ar)kRC4#F4wohHoZjUJ};y zoF=xlF%aci1*qE<)J>iRYzW;g@WR2ojFhM3FR0_aV5Gd^`9*xJQ&HqEkWt!c=wZAI z67rNm3I77TG-*kbLFg?Kr0s%1Mu2(pfX)DKmN%i`m^?Q^o{^^4FzQ zunzbPX`Tt|HOi^QONjv8@Zhe{Ygo!r9LMV^viD{q6CzskST z-BdjH&>fv%?8+0PR^WoXle!d=4{1b($y-oW+EXrzBaf8VhW5yf=0${P{SA023P}I} zKmbWZK~$hs6HK@SwY{WB)(aQvjXWR^({(Zg&%y)a=ApbXto)!nC~Ia_$;(^oz|PjZ zO{#)g>H}_wFFn4h8JiZ*(5b0o+oG@dc>fxhv)&S?{#aHYwHACL!>J4FlN3@oBOpf% ztkkVObE4D&j=T?eC#giS*`^NC9(+g5*k(0tF&)ZSK|%C15z;P{(c~yE_?LX6Idjj; z zOHW=`S;?ZZb>_3gvm*Q$zsg+ISq70`=1t0$iF*>vUw>Qqgd3z6qV2a0LE53&G`Ow& zH;~takfn)ZIB6i22!juO35~=x?Wzk>`%T4ikgsNE!VSzr9KFV#YjjG;$)GN2#(N5T-_ zVN9O-zTu0-O+E@Y6VT+H`Xsz`aGgFxk}Nu(ufbYg8%3RAAH%iv7kVr__a+;fy)g}l zu$`sto0tg@H+=?S3K+DwOkXDJ9Dfb)3HpZ1DQ>2 z*0jvufYL2p=8}l5*wl@(Dvhl%4p)YguLe$^QW=_be5HIxwxVAviCefEt4`qI2EDw zB#8NE@4Q=n_32kU*Tn%77_xYHV=BIxfB!j_XIRIM1M*Bvj&UI07!$+w^4aqR2I9Eo zU}EdmTmdWV*Mkw~c`s-=2(Uqh0Z4M-7x_D(iKR7@E8qFmH>-z?VNl9Yx|=QM0`-8= zeBF{O*l|WaZiRP?sFPHvWa5R}3?@ecz)lngnXC@qlr!)w_#x;^Mk+F1Y65arQGKtfjOD`5^g7X4n z0`A*P!pGTmtDX^DcG2PF)C2OM)qapP1-JF*>vn5tmyhOmM&1XMSAaz;v;{yTL7hX= z4{!_L&seZhezc+teLkbiijZ|CUz`X!SyZkCH9QnlscZ)T=pqm6#ZUTM;?;haz+5@` zQ!WFp^W+>b%5BmD$(`u*nvDQFvPC*BY|P*W;68q}2cUVEa%qV!=%j0wBS7>H^x+6V z_2FrIxjw}t8$jCyoH4-hxrKGgR!4GLnF%Zjmgq)9@WpoOwsBjL3lX*{E$L!KY61B# zAfWDD0kk}2@n#H=%*7OWCx0C~>at+uQh(_|m<{h?j>otvvAwOp=LNRFKfHev5TAJ+ z)ouO0tXOeu010p>rQOkZU9QP00opF+_^xcnckcaaMU=Jc` zp{WkhEld}(DG1XJpnJ|dPuUXKF^r2)-7HM3;wq*CaCvNI8gLpAc2hSbfQs<8SNX#?v@cfwfjrfIp<9nLJO45McROOPL}&DCj>EEeffXzSu z;g8D0H{Zn45+A*9y;a`*!QX;I9F)iJJ}5u?@vq9azyGhJm;3sMNe`Dn%ADg{+MVU* z1~7f990H_VuprTeOnc^pU^GxFz&P_+KvFF~;anD%)&Q$)kILN8t($R^VvE+2xEIS= z^@Brc^`fAda-T&d7ZA{Q2OF#O0qy{}VCTdH{*X73Q+!>&1n7r`5$gH&<45VowsG^K z4)YLB`!08FfG~yEhghWdb3TKMKf74s+rR7L#0AY^4sM>nvbsl~`qYo@a)3krb^-Nv zvHsmg-=CA7`hADGP`>?k0zmEndRm_XfG*hRE8G580B#h0D<9?EC;)mpb-f74Z~N4; zIc|TDb!7`hP2C}b7*e_;!FH_v+k&?)z^OwW{|bByaN8%@XX*c1mqAlkM$j7opwJ%c zXY_{W18f7l>rUYkeTwHi*svW1sGENpwt?c?cu zT_|(chF174{^|;*&f#Sli&@;;790zlFb+6{&h%ZBPriEghkDok)x}R2@!TPF#`FCh z`Yp!}gv%n?@^blPZn?ZY%|blyx8dIz3xKz<7{9<;dG^(I`RR{;TOOgWmElen=sQ>d zXusHGQ2~F>>^eDz?&mM&%R|lwfY2;7U84Lm(^s=grHgr!{f;uO4tI>8TpU`rh(SLx zWP3U}^k7+bF45j~#btjYP~OXersJ2h19tl%n`7gnEYkGjTC46VvmcHdFLbXiUR-48 z?imeW?(UEgti9b){<#!adI>x=_7(|HiGHeK5yDp9?iElw{Wcdcd~3?QReCp;bvH)-Tn;hd!4v z3%x`6H@HmOWI^zw*;nDwgPRZH1AOLNSJ>6~W_f{i_Vcaf@^XCvojh7@PF^h^e(^Dj zpoi3}QoSgVV@JAr^aG?+{c2wXpwh+6e0=m5XEPGK;S(S+etWGHeZgG>n+ zu+0p(PQw*|wxb~q%I2JHjco{e@^pj^a<78EKvCn6i7+s zepRthXT(T+tX*gTajPM4Egh^PR1ll=QzGbu@ohA?SY=9X!vG~clWEK1HP02sOl}1h zTm{09e3IW066yv*2}>A+tXlNR3&3`HW?~&|Q#oTrOgU;6FRC>=_bC8r zSl8}IK+?6WjLf)TLX)?nG$G4rT|`zWPv|rcKh{5GXj7(?r^tc%6W4bRSWTfRpdC$} zY#83>)(4k{QeJ#!y7D;h8ktS5)r)+`5UStfpSUW4JJfv>q+zx?H+oDsP`n6ElkB1yM0KRl82+E+k#i`RzZQ|r?*aIG~n7BA9}`pQE%V1ME@+sC z&NTg(#j7N1K3+&)txuJcIw}j|X5n=!to*kisYBvIPjsyMi!^wd{}hlQv<>?K<&W~D z{-sB{)8h0dM)+s8hWFPtU>HBr1dl9xlRXc4Yab&4L{8KMCk!_N z(@8UL44ot!-lU*BFqxW1WKVi>i`?;R{_Ttz{7nv~+oZ=u%2E3~(zD)^T%|v>88!{0 z_G{6vMhw~fQfG8w`J{*tZ2n43Yf&e1>qFw$7D5a6DVr5$0{%}$NFeX{rrC!kX$WD6 zC2bbne2p#j0(LI|pwv1OSO~5IGHM0=?#-)Akim*t~D{U-dH;;qfVgZ zkJgiI?MF;vkDv>mzKqeFgb98KUU#$g+rvRGV?ich=ftlcAnCK`uL3}{b9kZhGQo4( zjR2xnI)b3F`~^(!#k$AIjDv6t4@sSWFBCZ!#w4sE@w`##uOMHXHq=- zVm9De>!2|~Ai>)MECK~vY$y7Y_PyKYhXE;fNZ)XR+8yv#OH?Ps(&3g^{S0@)Qzw|Z zbrDE+tFIP+zSqL&01#^sDyx*5Jqb7HG7-Iw)VP&v|G06tMv&jQZ!c zegtv?#v`O!Se|0F9N-AvSAXVOhU&&d>uUggcqUk>%zE(QAa%VCuxmYNNva(5(r$Fm zAZUKTL|(qEvhDot_3_Z8o)A3gBE5@?dN``jv@{T>@ac3fQ34m{zu0V2+Xg3!d9wD02eNUC4!p)XMXjYcl{z9|X99>!G&~OSOIg zU48ZIXV)?ga~R;js;-LQ3*lUt5;*pd&=GbebP!fRd1+k>asYM4@483Xrfg#Xbe;Ve zdvSKw5y1LOT$9La50F){pj!Berpc>_z;VcRC4m2F?Y!Y*j(mw1jdA|Sr3zgUBgOR7cqxdpUsYqjhm(jW_O9)9Oy=tsZ(eR=+LwtV}0-zzgWu37uhiyp?P%MaVXmg>4z z+1sT~s0;sPfd<))^)qeEy1LBnqf_DyQ1*ZR$v>5+&z=Dmc9+lS^BwwLe*cSd4L26w z{`T8g&;s!F&tOpwcy(p6^y6+rP@fE=4<-qxj?sF5YXcwhWUvd^?&3)53BxvB%ZLSh z&Oyhc0KEjy_Yk1dPEimzyBzG|X$LMMdibcjN_GM4V0r4!!yfVDg8K%ni>Y@PjPy6J zKk7LSsU2c5PW^3ML07G|pUo{)pb^=KUZ#Au^DLmz$DR_t_Ajy|IBi zn<@02dicZ7pVMa9>A`~R89I1!g1X~O0A+Ry8q+^VdZILXwM0v;`N%oNuT*)h4<>vyKkY z@tgFQE~qP~7rN;qjK0nVw*__MpPV`Z8xIeaVRx$7{vBJWOYB>8X?H;%-h*zp&(~U+ zm0|Q8ZkOnX4{1Uo*{Q}(ZoUz34D19AuX}#-u%X9jwUIL_M@n8Ts6D(gw4hOJaZ)-#DXoJU$ z)hzh>vt4*9R~qH&o0y^+Esf^Uuq| z&N}tDTYmh(&&u0(-YS3ht$$rUoc%Pwy?vvLSA)p7?v(Fc2%VVH{&(Uv42;*tz z>yBT}xV}O;|F=iqDD!~oU(G*bk(0A1kf~E>>tfC>K<`Y3j|JdwuD$0PhT09%0==WK zl-k;Ll1~CET0a3?xOU!Q8d1!|wPItDOQC%k%W@Qn(cCJPbjVOYr_>ntG*BukKx@EO zS5p|GInhp@#1UW(9Xw;uXd{Z{IA-c4s(kEyR z!2wMPQoHPqoeXK%X*&2zUY1{VLppI!A(be93T=YinUdE=r7GP|*YLXn z#qb31D4>rZzhR(9YxI`IUI1@WF^yQOxw4ItOkD`-Lp$9;l@DFLsF0kfS=oHGijUlw zE|&V9=nfN-VD(K*7+Z|;i()&wGkvWVhHB;o|%A)Mt6AFOZrM|cMc#`P-Y zHy>#rpZt0&sb&e!ET{QqXNHMcZz&hy%)e$}tlWoZ#FRFHASejw1~r?sJL|`5K9)E6 zgnnrtKl$rR38?~-v=fCt#)z!v+Om)e*S@#Z?b0lr@+~}+KU|xrfuPy=T1&d}SKA4X ztwvt^QPwLBHLdWzp~cuWF~ayvIW)c06IaRGCp@#>8h%yTuuaNbDK#I{vfO;sb)=HE zWV^NfMeYc$-1v^ax2_*uMC#tl_%5YNfx#&yYgvTRp}(`>vp63 zC=b#HkGP?mlCFPIpQc7$wFWHF`ya z7$fpec4q6x=LR6WvQrm&$&>%KMM)zQUK^haXU0TMcHHLUJ9*DdrI!o($$HPVb!;?~ zH$iWbK+Pxo%$Uw_&`7wb@Yghww%5{VqM^+{DG`sP$HaX}eCs!Q#CobxQlIu)-iN1B z(qxc8#54Wqh}x&*UeMO(4VZ1_>8Huba#dG4-jJFy8~#FD2!*oHL45C127aI;IrGQ1 zYC3*uSkfmi#mQ__2ew7iWxVrrezB}LJR^OY(iVQGk0Slx~#!luRYe?!yV3CqDh(E@6HXsiZx+Au`cd@wOHr}gjF&Fq= z1H^P2_XU>YNATzXG{T9S)<>3Qn8}fKCYa^K={i>Ipb#7y#pIl=w{GDT4Dql%x0H_n z5WK{-hII($jKvVZA@6Vo600sJZ}q$s@}cN!Ovsqe)c8T9KZ=kBsk>cW{mhR<`>`wzQ)}S z4$pOw(#J$hw;vwZDEK-IIO3tB`Uc+tTu~2eRovdm#2c`{1qdxO_nA-3V?`z(8wrS2 zvJP~(t6K@kRt#V?+2-egA>c+;s|bO05WJP$*%_?8^@i)9`dmO=;D~h z1I+lzf`$jf=^95+bf}vx=+G^w<>Y;Vg&3`=wGdQxwD7Y&+Lg&eEQtXr^q1`6e@leb zBDs&Y=;Dd|vX1t#MD78+cA;pO?ft4wE%Vj@Cq~|S1Sq35UA0{7$1TkS0NW9Lmb(S4 zNBN@v)j=l5`+&|v{O)2wu8-fg$Tqt;HulOhc9l%sxy|$wFp$Gam3w*Xfur_Srsa2- zGAzErT@bRQbz~McsFzLLbGSgGh2sf4^&q!?>PnX#%DnAsNXugD8@W(dlOS}cf7)~K zu0HvX&<`mjueGjx{(QE)_144q!p#A>$d6!!^{Gn^-57Sk<7y>s>!^p+DQC7h(|3$? zDu|7aQ%C74#lA7~2N){IE)O@g?th`{A89@=FJ3LC{OuUHjnJ;Ou(c!5N_T^@n7(>X z;38mn7RFS=$PUV;m$u@9;w!*34~gtUw~Ygm>T*P#AdUU(vN7Nr$65*^vM!d&bqgFboWLXy*deSDS(MDdct&}SK509%HwZ7!cEI|`G+6<3?h(Q zcKF1{H+dZ(I~VeUMS~+O6w`MgODE(#di@6JU=@wqsvZ_!G+O)JkNzPCy?y`R{!I?o z)Cy8P;NqY9PCv+k_{!-8b-Tp@$)*i6)5Qs{*+!4hsa@z!+k7{A$ekWbl=0dOpewTR z65gEA59|Tje*Wb%H0F7E^v1pN;>#xh^Ea6q0C2A0iU2*O3nJSWVn!Z-a9VbI0}cJu z!yuO17s%y#2xFlQz2riaZb}49J)GB%`d3|X8M^viL;@tcK$dJ*U0wJXPrrvd2K2*6 zAAiE3h!ZU0!YlOGnJ$Sa_Ztsx6SjlJtu?YohM`X&Oi)}|a|eU&wRQk4ZHw-@(mGxX z=OwIeGj<>$HJSaNyOA!=>q5pj98W)B@y_v;(m16hTf zCa(e915Ro0ZQ0h!zIgS>{D2M&D%pr1w7fAE9v zmF-3PM)bP!VINrVq3xo`IdbeG>k|OduUG`Qzo*5p?dBYJPFUtHEl1Yv19XvLA7mT# zFk}027rFfEro^#|eM3KbauYq_E(}3VL33T+c$n)Mbtu1WtNX_+%J6HMT@cpt*KvTm zfb2VlNk_f5j1Jm6!q51=?kdosvaEPmJbS*$>oTx943ZyI9f!Zzhx1&ka8dEf&n`7Z+38kfHZ! zU++D*6CS&6gB2gka%IxYYF;cw&S zWMOBm{NeLYaHF!u!qm0$UEGs=H2Y=w`6qv1r`~0NWp=>8cl%ZsLHAhP-9|5Im2G>} zm;FBSKS*7FGWV2SX=CNl%&qWzjYB!rRe$l|8$hpzAFn}PnfZR<)< zU-c8Co>{VwA<0^JIdyfWTxD#dYa{yv^-vBTMc-|qqjf>Ii6#CASipb(!8gn88C)U0 z{E{6iLyRx&#~3S;r{jDbUN~ox*ApX?W#ZB}i{bc3#|@8TMD>y*Z1n)H*J~wZk`me} z2$f;AsI>WmvL{8aQ#yWOR$lxIFsx20c#aS!JcAq4v7()%>Tbo6Qk@u@hH3MRSxF9r zBwj4TbI>Q@aiLOyM<>{op13I?8r%(oQPW(buxZQ!w3Z*&hqn~bSj_Wh4<7@8@{l@19%&L2{vOvoqVaD_~LffN|~w^q`5TpVEF{~=xm($ zD|WHoW|Hb)qFTPwk`+t`SDE~gkL4sz8(RS@AR*y(qA%H@)3l|Dx(iQq&*Eg(QlgBc zD~D3R?>0gIOaiD6mo3dtUXhmJ1Nfn=^MH2({F&$yHj6OuMXUb+cJP~mLOXdtQ|N|v zl>;$Ddn|e_k0WmB4egel_oijOp@BFFV@%%h-mPObO_Xx-kS-qin1ANrmGRBOyUG); z5}x14Jv630NH1x$+5)naaO4}{2)a`*v;(prAM!}~0zp9o^#*SObeSPJ=7;A253?8& z>UonCDt(5tZ6&Z-+P;V{f4NQ@vksi>AS23M3SylqSNu==mS4tem961PrNKA`taKst z;ZwcknWGHl!`GH0<>ERurTm4WfG!5AX%fe}ksflcZGr^&p9QqMlOnByn)rzbj~O9Y zhj9rKs^N(AT87jgyr65f(QQhITxud zbyW0{?cO$8-&x18%vCS>34MtuRm7DBd7!K?*=fq9)!lj8G4Gu~MQ`zI8cq8N0o1Qg z$;HGW$2P(*`jz5Z=vp4>kv05852np=XCUQ7{bhkdhhU!Nkbli@aS$vjXuBE2}A5ufTgg++e*EX8el1Ji8 zAGEofDCIJJb*zPlt`_}OMhPN0lsf=ykVs{~+)b_0Vg8X{)8@Ht&N_248&KZ`P}49T z^eAK&m5Z8za!h@ujhmrDnEEkJBX8jtQDlejrQ0-3Lpq37`ybn|url?MJW1Du70axi zmO`U6lqMaojboC4@dPDb`NS_#Je*hBm7!X8ehrj>lr@DRF!2dpX=&v^7k`_srM&47;Fi&8>Ptql>{8XnXXSs=R zN+V&sCP3PO<)#lve*nMYs)BR_4%cL@9J~oCI@5GvZelbAAOY!c-~5n@O~&_>!YkjmZ~60kQqdDa;-3iP-E2k z34P{QGjHOXwsm3N#z``*KVSD(JkP9@__m?Nl_h}L7bJNG*joK{I?1~PpsUr`%#AAy z9pty|g!Z?t4nwgljG0V3Fhx(#rxZ|ox`wX<_rS!3aPg-O7O zmXEyO*wWP>el)S_vJQkT9VR=r(VAHcQ2ihq&wgZw3E(IoxF7vUb|EMF@b-|kPW00d zJ}mbDd31pyEl#*@;a^tZS4%Ygx2|IGI*fIwlgu>^?^;~;&_68f00Qg>^|@X7A5P}p zgCu39*1jjy?GTewEywzq#2S7bFk*#?;sRjLE|W6>F)f94RUoT9EbGw8AKUyd13KxB z-xR zk{AK9p-IbY`&!=#90-6b9aZcAe3&D^YQog^=LAk4*oJZ9t~D%0tqUg3xS_@E!-B){ z{Z2eBzuf_6zpA`x{kO@av@XQaI+}K+wWWZQmh1ry2;0fm9SX)hr3|(vORTmE$P0fw zSWOytDXSKE0`af#U0Jij7drCbE0O+IHHX?XvpR0fI%sXk*XMj8&B558@ishr*>o>O+ z$~4x|!<6?4R@*K%Ny{<)ht`Aw6zXTkHE%w=U7lebs?T5jJF5#XsEfW~4mzVTt2ecb z-^0RJOZY9?%<_wQEQ(p40#RINaz5QTiu__x4PRX#X*Vro+l8Td00`?_5J6rlJG!tS zIQ81eBF7-^0|47tMA*S<)(X-8{5kDGR~0U7SuV#DN~@r0T;QNnNs7e@d_Z#$o(n(L zV-IaIK8WQhOx3ckn?AM|Sv^AjU4T(;U3@!&$HTNu`73}X0Ip1$u(T`Jy5bP@Qy<$- zbU!6nE{N8_99OGhcem(Adj;RZS_$62bq#u1szL75mq+l(7SM-VkV9lgI(5CVy0lh~ zX>U1H8TTYxSn9j@w~SkxP7EaOF%RkmaNox#`M@|<)qPk6Ph2m@=+Tz~5ukIENc z&6Rr({~8O_p|bw+MReIQI??v%g1!e`>Ox2G%0oM|;07?%hR*xV5C0ePcV52xH$R~6 z1@f>orMw-4SJ$fR)Kl$P?W#Lmd{F1==h}r;0lE(Au9LcEc3QRo(|YJTW^UYI^Zr@+ zkN^H}(}%!{@(pCl{v&7D@Ibwx5AkwNzeGDw#ysrL9X9FX=p!`9vTgJM?q2I$`{+ciq${5p87OP$M|W9xUdax>g0~t>`mxVazou_!%A!88 z6VuJwK7hQgo8n#qJs_4--LRdOd3MVzt*(|k9ArAcP8|=Gbf-!G04`=|OG0fU?B1BB zd{5?HlurSS?+vkt!aTwA8NPGF+>Or@Kn20~Z`@bDPNLFF*LS++xSc z^wdOlKDfByBKy+H8ZwT%75eKb3)1yZXyc4OUd_%${#`uZC9PMCr`$QPM>z#( zr^Y6-sMCcUI$m~xSefewS9DAgAOF$G&b`o+EXW`um@i@4Wq; z@}K^X|115dAg_P^QNVcPJ;6=S)A^U> z=I9i=uvmSR7pX zig7Uq*#eMjsG=uK|jWgvsOuy$C4^XBVG9z`I-1QCGDI|s$a@*Q6zP&iFCo!F!m+Q59_ zz#s+nn*U0ymSuFzrt3tZg1fW|^FfJa$x&dOh7pVGV;YfoYu^;By_bnY7=c>Zi&A$& zYqoykT16m4&azrnt6S#=={ORHj@shbIL+S{k#aOHTk1R2 zvK@|tO=`qJ1wb)FdlUej_9@%r1guTTc!KisPU|-5wp?;OAM+6qQuzWl8}ALLD(q$*FK zOPU~vFqTz+Ttnzcmi!vJ9zwu8}FXU(}DhBQ^OK`SsVY^qQYEBBO?pJ=`Z4 zPb-MQ!#pFb;Rsm65g6X;P@4&mph8Pn`7=zZ2&rF$*zvNxF5pqq#51rOe@-ZFcjlW3|{-(@4s|oPb zJmh8M#=NOZ>&{QosC3md4BOBa!U=CXZo*r}+D1)}Je4cxGmGliTfQiTwgDe1RQ$gF zC|6C{LZfxTdn+U#(~_UiM|@>Q39I!bFG$kx4ivmG>HHX~`6N9bTMyJZGMT#bdD>T% ztJHyyY67HXIek}CQsH`OX)I&P%iF3K>$^N`>O0X079XXSopMJUY zwjJtQ`3zW<0K}D8^@?>6s*|!TAguC6c}zd}*p_DBBxtK#DAH4lw1J^(k#R zc$l!+u1w=ICI462$&t2ZlBwgQXFkTN1A=o77I^h5L(QaL`_zH z+CFO;D(!NiT2m1(@>SDTAFFRougZ~0@lGBk9m_;);zV~yf_#$)(%bMhgInUb3yKvk zo;T^ETXP7uK?$plstaWX;bGd3^6oA7q@6aNMY5{DESptlpy(O~3MuI8^te}UEQ@uJ zJovHhy{G@rp^-+C4xiZvL2vrj49pG3JN(_|EE3a}mc%z5BvPL8&oZO}t#iUvIpTSU zBm>G}J4#zNJFh8cEan+gB+Z5lm{s|hF^cITH?++Zm-NWi99Rp zKFgB2F>Xz-7R(guedv+*t-QHXwuxaDd@LpDMMpqSc*-;47%lZqa)yr{kf>G|`@V)} z%1~WEY4JvWNhePWlbUTtS*f3+yD&pEOf3D!RY2o{1(57W%-k$iTNdLf}aM%$zG0e*%B{9M@;*9=xKax$ln)l&3Eju-ejh zsw{#|vKR(fe8!|(eXaWkE#P_qIkoZ`xkO&@NvrnojZj)Nw_~-f)t6Su8vti18#+$E z)LJYG4hmkGM*vRBAuuRyx*k!5#`2DE*0-`3=^>o?IHA(rf#9DKDwwEcwp-hY8nDPi zN9B`g8pp+vObCg4&O1RU>$VTD?0|A=v1HvOZR$q9y05`{(hy7xC>QXhN^&fKDYL&$ z4&{Zws9>$>2|k*)?g%>CG2)@#rN@tLLTg1q3>V5{vCU*1Az{G>_aexOAn=Hm-{dW* z+S`;*dEKKt^~5wrE4t4>>?Ms59(pZhxzXTG{6+<5l{~v*oP<&0w5FQ zGq-@nxbg#y%F7#X+)drJVRhHujkVbsKG+4R0CkiHEuFQ{?daC}yIKe8X6b~sCU7`( zW2S7poR6Hk2&JE4>(ayQv?ABdk^ac7({aG#&JGT2ql`}4Pc*)QO&$`-QV3-PxMiVj zYHBRBZmeOkdA!Mv3P2{T-~}0mvEa{H52UB;UJmeF^y#R0ui0OfI1ArvfTl6G)P|!Sm1J5Q#uwF*|X3&%8Ec6S&xGPF>J^38L$ag1zoh+Wi0n6CdSLH$B*OY zME}e7XWalc0!i{>mv&;t9;)h20o$tWz&3b%LOlWS3|T*l9XuHXd{lpLp#SxUJ~quF z4}1{(*3FA$)TpHc9K!_!t#^-+moGp40$=R|aU-H`I%7Q7g9UBppnAl`AS|#~RseR{ z(E{%-vAFgBQ}w1#b|2Z9Ue>8z>Eh*FcUz(7R98aOD@lb_#U6(x>|8jMxDGyz3*W1>p31;gkNHz|M=M(7W!EHVv)n0 zS{DG0&tJVMw`Z;eEbc+pwK^5No*c(_JmnOCzBV;o=IGZuSpfXEfBktG$34i{kc-CD z0lIwY^=o#Q=-$TmNx7-SbHa z>nHmHywCdbUXWNl+fMn^eL2ul8Dt^CMG_A`73{aH)|tDhY`p@aF^aI=kcQUQF0Q-d z>wx%z?fQs6(zQw_3pvoL{Jo~V=^p374SedeOGld3<-;t%dywt74Oj) zQ~96%+rL9LM%j(GmT|-p%1xPcPTvY*JBgd5RTdYNG0%{2F<|fD5&ZX@7EWN)y)#s9 zPRx`O7I9CBHw3H-%pi^ zf$@xYEK~<$iQhf>rfjgI=fC^qZvv$M%cF10<3;qVeGTth!EhaiIyZ7x+mn~iQHY2K z&n5usHG25*$&ch)UDHjm;CW8pwT4ge@iBblBV&iho8_xN{%d*s{KxY7{SR1#Lhh02 zUG(N4i>ofoxUgqn`~G2kwoeX>lpFYC*R7r9+T6i%9U7c3xPa-bJrPnnJkCT3;gSJ< zGH5YEP(mI%G0voi!4Qw_tdl@0P*9=Xx((XNZ%jPMI2LLMGHnJzP_#1H7D-wGnY|}Q zh+}}!a>`at5BOxTBCt+(&NV_p{`okO1+=>Ca21Jk1Y3PZBZS*XmXm$H1+w*Bo{!~_ z06}vGt|$&#C1HKppw&Y)J6Xk|@k40Ru0U30RXRI(Q-M432Z+C9AQr#mqqBkZSbihB< zEA{9?guEe5>p_#ABu#oYdbrSnzxg_8mnYOWO}<)?lRkH-M!u9Q>)-tOwk{E{l%tt< z!pc_4>AU13?Z}mU2tUmO-jD{F^3F8)_+2CL?eiK(z`+3%4@lR-k|XbZ@^czoB7@TG zAl-7>8Tl-}kt4f=AzssGF99q?4QrifiJ9hReCbP8{6`haw+Nwh!5izrkGIkXX{j!h zlh5h^wAn_blnITz%S&ZbzLO?&g#`}o)6Q!93tc8}!ZJr*+h(MzA(N2>dnnTkKP`*F zC5A`xpb=euBtPpe?+BuN$Y<%`%gca*Mrw3&l|E$6wwk&zJRh!WoyZrjT^+|FBCm;0 z_<*F1glnBBQpXywDML*0iLjmcPoG`p5Dq8$M4QzfH?}krrr<)w5+tx;2H= zCvs~C9lqFD8s&J`zL2TNraIL$qgx3rYo(u$`cK)HRi>0Iq{h7ARbKcsGSzq<+IXd` zkXSt&47wAJM@hw~v@P2>oiqd_yguQq=PGAa*5kH;NnX9bCvD2O=k2l?Ul)#ItRMHZw4uzwi$JOGg$ed~ds!UY}Wi@Hb~kn1MRkZ?nmN zctN_^VPg8qYtr=}f2lY4ZyO2#gg|@0S^P^L>Kd=8d&2z;DDp70*$ zZvd0CSRo)0uOrosfj`h3l%j?r9W<5j4Qvx zTPU&}S-$Ygw3SHzOWjwC@$wG`}W)N*CS;ylVLEty6G^Ee? z58=`to#;at=)l@DJoLGqI!Yu1nG4^cu_kO1>Q4FX`_ONE(@*|}^OE;mnzLoA3lWyT z@x%zG8-mTF##0wk9ABD|A^~r$7x`PS6BSpRuAz_lHqrB znr$M9@lbkA$!x7p6ReNOI)YEy#81VWGU14p3%-na!2o^w>P-1HAjtpr+ka*tqPWO}>FPKWMkZ!I&0|>vfMQ+fJM!kWtFbt8i|hsycLDP=#(-LI2{=WL zV8RXnVjmNDeX$B~#2S;sKcQ6s06+jqL_t)^-ks~$GC9-|M$k*^DCMIb3JHioI?A~& zU2cjpd{Z1xJ8G$4o+*2uA;rmmqL;ufaBPCqU4CL7&u* zi;jNWX9(CS1A_BeETNq{LDHgZn8{15rdhOK5rp8mJg=Z{Lk=jTK)9gs6~IJ6=%rl_ zW^__Y-zh(}0M;_kNvCop5N!MKz(Z*lQ1<{Rul+cI_5BXK)H2<}ue78+)_*O0QAFim zwpBCfc0s_c^jf8#psQnjt{+$0pW&)cW-LgR8!WN{wozw-=()aLWo`#-R}XclF8~1q>3i{d zROtZ_0?fF?KfEB72d8!bJYB$tegKuWK|rZxcALh;^`AlqRUVkobNXm&b>fdlSfxrQqf+s6+t z-Vg@sI?}$R>~8r_-2#r@oSH#ikJ(AH8u?ZKPywM;o#LUb9zJaO&u};DLY7wQ_QA^N z2HWmC0CVTxu13G;jz;>GKP|!KxAfWmUFf@{oPB`hzxd@}1Uy~Al5hYQ568%!u2NQC zy+D$2cYwZ{zH=Ku8LMIF&@c88a$kMfd-g(4Ih`1**45D5^tYdLFpO@`3J{>ff>U=2T`Vwk=z5mV z34o{PK3MK{>UMSREsJQlI^mtW9k$UM%&?$|Ls_wQzQ9OheQi6takQ#-+#|@XOPCH^ z9OyzumnN6+p^L@f^USdT;vLUXF!gQ^publ0k6yfrPU{6+^(=r1thH~?uycaG(Vb7~ z#v$rvbbJ&#aFHXZPdmAJ^F}OWb>Smm3v1Y=bBcR0@fAQa+M_#WwDKIqB7J8M7fdV; zsk_<-p?6~kF$Ew;Z`tl9uX0WUW!IvAapSCf$IgKpS8y|d{0r3fVyUjXk6{3Lt!`b! z)xupl_H2(AERv0~(0ljhWO%mDE`=4^)+G962-g?g{T}=(AkIzz`ovQ#-33NN13JmY z0Ckjo&mKD1{$zl@;0g;YBkUfVn4V0Zdc>kpJF?3@b|KpSAS+sob)(ya6aXIuU za{Y#d0q0X6-n)}Sk#|_Um>eIE&edqZMJ&gDo3xc%oM&*v!i$GhBEGZ*Fg_S{0?s%_ z*{@7Y1o+-WZUoz};$CJAfBZ+Z2d$}(krA3=bha))9M7Dh|2^>2VW}4C(rtg}c&6^I zqJGKDaq9?11TK1RFeZ91IZ_6hM?YXu=l_2GJwC*@$~1>?K3tu{waa_ummFldzO!2X z!=HbjeoyP~BNj4UgmoN|F%NkuBmMZAUj`hXU79cVhOV#>G*o6+7R#SrKQFUui{<|G zt(5BuKEgk}eYdRaqu&^N?6M%R%lOsB4g1HA`@Q9T4rT4k9FX>?3}gBH$7g?Hhr${b z`7_XUr~Kl*2W4(|xy++ymE95aJk5eO#~1^*Ey}CA9d%vhZpCF5H{C^cNT2(~{m;tn zvB~m(&c9ASr%vjo9Xs~6-&|N*#`t2Jvso}oKv(EuOTDCy^P`SAXFR7|3+T5Z8=u|! zpu9%E{WQyB#y-1+=;Lj(%m}M;6r{Abb!(0tYbJ+Dj`s+>!zPCfnJ`lluk&GGKrF{# z{8!Zyj&z(j!qKFelJhVVM+83>1AJ#7ZG0udWaupGgp;_mg3Du8{$X^TU^7@J69*po zww1H$ttFu8JFyNBpz833Pr&t%M_BTxz#1GlSWCGUVJ;iX|nTYAj5hNwIuet>vepk-h* zpjB_4S=Tfc{!>(F)O`%DU}D)RNT3!#XhSP`XHsW<8H0D>hZA>Jl&WkHMjo0Mqy}Kj ztXS{?zE}_B5o>mor63puAsxf>?wvgHnD_x?C?65(b)#IF*zzoS5;Py;`e}k6E}~E& zM6L9aK+-Yc$dTzvTjG(ob&+S5H@_y9iz;_qS3&1nUJ@ut@>coPuiV%HGXP5!}_v5HAdQtQCys|tSIKBglnQ2TbbpO zG6uw}Z^@5`q08;AmJJ@HRMNsn>gXGkM z`jU52qVBezXcUrfsqwjOf)KVFW#1%H7siq%X;Ws%Gz}KM`0WHny9g#7Oh4p8Wr~q@McTUi zFj-5$3%|`TddlYpP8{eDAE|}(Ere~zm-SF(LWY}OBhPnrWgS^A-ouOJ!fnPm$&9iy z;I4d>NAl6kp~iM-`VIb6KAM*ANyGM&_v#p;rSCO=|4fq)WzFJPWq=Gye9M{;rez!V zTYmCXy-|%38nO)ireE7Ay2J2tiYRrgX8wlpS*?dKl3-4h{FrU}Q={_CprJF#)O#OY z$mhi2&R_G$r7|X+zLrOg=P3`@A<4Rc4$Ep{jXIDnMaFb1HMt~+;S5iD668zorB4>R zKxI131q-teZ9KNlpw(zemk80hsgFiJ5S4Lj+DrlxUOA}mThWX5$pSTgOj?~$-O_l? zAN}r?;n7TX1Xjm6&})jx*woZGhYPWVlXwo$pD%1N!90l7i(9Upa9ck8h+16^y4^Ot z%GT|^ODvTfS1GH!bn-aLL`48z%A8DlK%yYh(Ge3}0QF_SU#+LC0&9h*Jk(mP>8>KK z40tNIqSc=Om!O5RCupr@l#@~0sC;zNW__5R?rt1g7)G8eyZV6EFRj*vg1`EOP5Yq@ znWsDoC}kZW+geImwt9FbR`~#4TJ)5n}R2CVQurAl?x(A?imxBcja|QVm+|xI4 zXyU!#xVs@-SU97eeXpyY$;r`_%Y_|XJ9xNSKk=znKs#I_;Mf0*d07rF<(=|l-3{Zv z{~E4gUb5IF*bh`4|G)j4%+V+x$;Z`?W8Muc_Ub{OW`!K1H}qp|xCejne%V}J0R-JE zBR5o3{1HZ|h9ZnbD373^{eB<(7{fip%m)w3@{4D2;nN8amqjt;*t*?b zUIjGa;68Xh_wd_t_5O#Y@79B|`{KuP=kA^I@Z0Z8$Ab?j52Gn$r5j*-A2%%l-vP;5 zI|1L(5i2jAm#GgvLcT`Pi&*F)pBL!M@IvD|$`-%2w8MTDBeEz14Y~!e&1lhjPJ7Tw zR}f6y(1|tu8TxY@9oLWUm&QJ#0cN5HPEUs; zuezJn(p{Hat*!PyU>o#xJ=CuY4C+1QU*LOyyw-7HrG@SrEUUHhUS{D)3+!j(9*XUaJawjXsjk=E|tJ$)IV`$J^nqxbHV=PzFq{-oRl z@V$atoF^|{2DBDX-NaWlYcI4f>KK|j=H}rU`be2|=M6xp*q+A$RnLhxk4qZMtB-San&T3|zgGNyA8Z3YyCCR1j=obK zxswD*qMp!)(5uc>esr(l!s{OT(Scjq6-qFA#_=iRVB1@}G6R69MY4UOyqn`-#3=yj zUS#o-#k_U+_0{*^m%A)7Y%>ly=ez;?{@$LEva_*-j-H6V*gk@<%r!pw?9=j=g^eZr zl}n>C-H%1_o7ZnB|7N*Au9eh(eE~LG(_^Va&E^^|+&mr~h z&Od>D$EIV{`&AY{)Jhxli9@)Qxr8UyoORU7pPvBy=otG$b*^I{dmncID(emp)zOYk zEURr!y}3!ATgd-Fx2|mfdFij+ZQ1eD9EXH{1D@DXKKbw$ln=muaIAd!#CLf#xP=VCr1xD=IO%gEV$~?+XxG)jw=Mn$Ef4k`B~cPYWYtr?EITg{|cURI4O$) z>hOAyG-E(yvONdOQArLk8}ptz(NcgE3jVl?0yIEe9gqmG8{r5;hv3AjTCgG-FrFxi z0gYY|h7WO^tk`+bGEzw@1Sdb{p;e}hKl59`G<|flt&0%|X^{RKfBsoqp;h7-g=D6Bro@7LW+IWFuqpOx%=_co*l30mfUXq!umOO_TBm zXc5q1pb}rYTr>S6K51H3!DB~ADL8A-hex#6|1!Hkuu|!<&sHLwKgUyKvKXH z39pRgp}eE4d^y<{(5T>yz!UUa2mG}F#;3EAmyjiG@-_g73m{sE^DJPDNugxIJ872} zDx6lg3|i;#kjFV_)h7W)5~AYAyX2kLyE%+9L8(X6jqHbO zNjH(hBYDGJ+6kUcS-3XrGvk0zcNZww$%N60xt4CIx1xq?jSGuWj$G(grtTE z7oBc>`mx@vGkBN2NMT}7r7m@_l?@Ymp&@B zn6y&2UJ1}e#&~B?+H%~5sTW8?y5ysauPL|B8DunNx5hP%XeqG~E*b1V`z4d$)VtSa zn1wu<(O?MtvBQ@^;j=kP1a#SFdm9-cPXhRuZ+T#lng$_xgf1 z(C9y{A9J7d8?+c!*@mXNXiXUFFXV8Y_D1>)Qfh-YO;Sqlf@*qZV!G+C38>&DUv-Uf zlyT^CD|_Syx|(z}_%4rJh-k5{B9~^b6|4O*@ysIh^HpznFF#3(C}}6=#e}3ux{^^{ z`}m$e>%p|4Cvr@kR^8}JlUMdgBR8hv$Flf@cN^9b{!LHWe;48BvRVYo-i+U*uTsu);#y#?2xB}SS@s&G5@dYj zCSgdBPud`lYYZNkhZ81vVBeBY60MhMn_O1WC#BF!0QqEnLPNeAc}FHBLS0HdCn@CO z7ax97{^i?;Wr1z0qX0Y894ytwcJ0NLO~6+C0Anfoa(RcrJQg{&1LQ;?SBs-PKnE>Z zv^*Bj67-DLN?FyN)~Le|U3dVv(q=lKL0`{Kssa{Jw-!~aGWn*(j^L8@cSQQ*OpFC( zokaOOyrlh&;3srIw+%4hHe2Sm@ck?h<^r-ntqj*H*h#_c9PS?gTeXy^OPR>=A>SrTd+l56P&uUe^Ynp!h^%K=v&qK zjt#Y7v`f=c68I@{;aF13XE)QI;pmu3q~sgPBiW2b@ed{knMtq z($>p_UO%&t5}DGfP@mTx{5L+TKVG+-V`15iz9pT=sJ!E6pE}e^QlIx)>Dx+10qAtU zq8pW|QMM>I=RWE7 zjj%g|viKRp>hOStC%2#LzqAW0)lI-PsS~V(tSn9lkhx1w9WXKhP;tH)xznwT?ZNH- zSy;kN#EqHpvVz5+GO6XXR=GPI{wlD4db}L@444eV`T?@Xu1&C(T`e*X@Z;Zs>|Fo+ zK`Gz0mF0O3TKxC{*2XSebl`UvANi|0WdtDp5D?142?r*o%K-QNII8{KAD`kf23HDz z0nXv}kddx|uAEQs`pqI%ahu2jvg{b2Qrg#aajlh|GOkRjC->Iy`40fU&4QHe;sF2c zeYEvW4y5zIV9+gO6a7H@vF+=gVGjV%gCILum^i0RYT4|9kRS6Ca1*>i`w&;_d*uO( zXT~r3rbdyL9vH6P=sxYvu9_RyCRu3F-3W4n9C)~({p2+cxqJ3{p16QF09jgYyMQ#n z;*CJDi#2|12Y>wObq;cU@YyF!FR@U?LfXX|WqJqz%05o1w{nu`nBQ3{qqp`iP zSZ?s_?UNtiqrAaCF!FVN2G9s#*MYosp{tf&JWW5CLnEiL^o3t1Sc9Hmd9KBE8{y88 zxoYUu5RN#!LBYdaEb!6W`eNVb`~d;bZgy1Y-bl-PKZ3_CJntDmrm6e5@1cpQ zTb8jtb?3o8Z4_-#Ch)i14KS}P?Bcd!0{_g80S-@hkw2_ET^vOZxKlg-P0u^E~Bl`^{S{ zkZ}`3`?3ETL;hUEaq(*ax$DbfF{2dtpnI^Lo&D5}dUX{rc4KP`I%!9=fdR%09{Mam z+)6vMK9?zzb+BYRKo)cZp;fpGtBw=vsF1p2Y#}Irdd_(%$d(Iu%7h0yJ9l=WyG?Z1 z1t9tab*c-M!|-dEKK1KI&$B2JH!fJHfAiA|pg3g~T#bb}+q?(det7R@dGsd^C?{{* zTQB9_0&INt{1toz(7ZlVZrr+!zCF%@gnsic9z82doJ(^5-Ysayl@5P7L>C?$V;rhW z73X_eejj56?4hsSEEGECi|ZM}MFCRU%zemP104@}wER8jwr={v4XmjhC!YW$U(j9z zh|keO|McZok->4~Z3&Qmj0F~5i|Nz**{iuMCa4$t;Y&YdSab1xq%J_Sd{_D?_mh>a z@)f%fbXTX9_if7AO5L0vatJAPYo9Er?fBp-{oNA2>3>AFj(HBL&<*QpgPAv=(_M^r zCzsBJ9GQW-c5VXeAm(TgGW1j)yx!6Acmc=OedJUi46QdL5hv!c-A8`kj z`uPmq`Fd%NIv+0mq~o|`kF$5K;zjg<#jVE10# zV4so2F1}ry{o(abWqy5uGj6nqM`^)W^^ijZ&r)#FF#{j?sLs`8k>h6jj|4(unmgC* zbDuBHmcM0TVS$C8T?{h}>z`?}IKmAK49Eq5+N>a_T)y!;pUg6vlN#Tks1tw?PCRLV zf@waAB*5GnfG5OFI!cJi!2shsKW@ZNM`pC}YPC_s2+!^Tj z(*aF%f^a!xNPkALjUvfVSD6GaGnqGkT*kC?8qZFNhGA0{0AwI7Ph#obr1g?P9sn6l zo}|sga#9XmpGXVLt^lLivh~Uz2iex2X&{JHJK^}#1&n!F1S?6Nr9MozaZfVF3D7UP zAW%g87+qSuwoK9#KKeLx_?q|DA@vf_M8Sgum36EN8L*)WYJ8(X>!pWv3hr~A^clPc z5VIVtPPX6!Jt9W_0Xd}E{EVA#E}M7KW?B8>U*!wmDW`#mAx(tIA`TEkdT~rj=Mm zHZ%WBQ+}&QB9Mq0@df6Po64pNvM7>KXW@yeK~mM571j1ut-u zWG7bjmibb4d1kpHHxkq+FJx31^Mc38+;78X=$r0 zO3gpZoWyy?!?YXXs}BWm4QhR-P0Lrp!nepGVN;G|WQIzFdHcXCgG-O$n|V~(mtLPm zFOW$NRYf->j`0j{l*l$|+Na1j3LqA&E=_*$IC5rPCM@Y&Chn7#{f7zp$TIkmrqp$V zE}4RU46mEKG&X&db!YgrleG7mPn7`);Wc5sY9{X!`6yk)2jY{8qzV%i}5l+=tf$it*lMl(?E%+-7hBMs)y zwfv_1(jfkoyzS$muPz!9HheW-6RlK*CWG-bapZ?(=RsY#C5Ol#&n%ChFqi;U0BV_u zCJnV;@tR+sdC`n#VMOqJC%xgF_s9;n`J{~An4glzJL6fFTF{iku&IIY+q(vBT)9nG z-BS5OTEx#ibT|9e+Na7WK9T|fayE;GK2Ew`_>#0Ovt-#mIl8{4?K5PQNvVghhk`~t z+qu@mCjw}UecITXt!slF04XP_6M!+Zn+Jf~mu%q%++fm_gYRgs8jrXo*%AvzyN&uP zZUDA-unZ$zb+3T2z_t@pEs6HA#=U}{XBnwgj9bzj+zSdi3El!I5Qx=!DQ+tWrv;`K zrh?n{dAg`lZgo501pI^dZc{G(b2ql60-$-QRe&}oK3Wa8x9RR9QWT%C-SB$3b&Co8 z9y9>tYPrVtX%C<58*pc(tObaLjan25Vp+>_-(P`JCyWPxDnm?yPKfW?gER=*_CuS8 z?8O%mV_GvzTXCXwI(eTE$X{d_W{r(fFTV6CLUrcjRKxp13T%w zILB257T;PgDg(L*Qcub!d1Co=MWBq&&Av$+k-@qf+UtfMCZ@VoXv5M=EA4#%Sr-%f z`*2qSDC@fm%8}g&&@HIFi%WqX=!!pk>QbM)j^TAvFv7Kdn~zD$-bzbbyDJxf94 zz_L>}DEhkB-Gwsf0**RMmo4fv!2|u(+gCYX5_}Xe_qhP;1%R0c)GCt#JJySQ7W}im z1##K|OZDBZj1j23THp{s#@GFTW$L+oX(3Iqd*2I!xH~}*7?!2I3F2?Qoy($xa=*o5 z!2n>MKEVZOj{v3w!1TR5%mJ&)`w@$1-~RSLmZ2+?)O&mR43K>dU~&1)Qv3r?-kdJ4 zp1mydFJ6@KDGp@?z+8R#v~+#)Md`$iz$l9&&mMkX`v2~~F1J4aC1CSzxpMD*>3loS z0^@ou54&v!Jlq(tXq$x;8}1N5&nfOv?3+5!bpqQPD{Ex~@T!}7+ys<60qoLZwjJHK z4@l~vy77-pC1%t>W!Sd;crc}HK-U^$^cOB{I=;{+`3@Gj10HzDPWL6srR8mppeZ12 zFW*_Lq|JIj=MIZj`g9+oU!r-?f6-Q~Z`$oPmdzeKX`ZHi3SKe6VW|h`fF68spTpzH zt5-O{aTSZ%V`O(UE+5PVdxU`?*Bc7bJ_>KAJ!b=CkT2>U-SrI z@!q}n%E#^!p?!V#=xG*NpR$NCi;UMZ6o9l*hLFc$g3I_B#!{8~)!LiT7qWG?4`>cOF?s zXUvD5zFNqxnZA$j0orj`DQT>%(VmgHv-94#Q#%32xPs-nW0L^@N&R%YcyRmPZ8(X} zh9@~U0e-h~eN3IHzXot$;jSAZ0tok{kF%}lawF$`FvdMP@K8?|3+R(4aEU`%+^L|1 z=>DaLcBPKcm58n-+S*z8YxHjd(T>~gLj=(M$g@t)MR0C!yR+R33ePUS#eLB&Kz-UH z>0jdj?-lf8D{)6zc<_9rA79KW+U3`uykADJRCnmqho1lH;q&qq_e;~)W|Eg6{wRwY z_RU(Y_W@d~C-lc}|LlRnx;y#xZ~nUc=l|!Q@&7$rzWC+mv=PpNK;QJ}s>bmZVCL!~ zt~ZVbaKFO=kjSt2Qx9P29>8}$ApQ>h%L(|FuG_Q#cfoWM*E+g+nw}m*hj6CCt~~3A zOgV0`KkEau{`}KV%FZ|6(Qewxi??s%%1VFu=g5=0x8A>VBWEadQ1;ij*Kw@s!nT;! zBdq*qX0Bl^-BWfs==nW%vi$zbugfmGf9_3RWnpBb41k{=LdORE+ua8o2K>|0^8M`F zvPxUGzpy_|`-V?F$c#9bc_|ypjEn7BlskWkMRa7n4L;u>yn3@*O1HBJw}G+7+U`;L zj}PYow+GP|wAaDO(tWa4=H}+gd-pyre{=Vv@@(k^b;1}6`L!-d5}Cko10UqWwCA{` zBmb4Pm5gDu?C)oR&&7DuJBQFOmjCqpVfhUQb1HL=YaTAVDt~(OqI^1YyF8qKm40h? z&xMKh@@(-<>7L+#;T!kq-|4?7&n~)qkMgPe`jA-{$(Q#w%5y;XxFVyD1k<&+7XX(_ z9x}STv{-K6xeH&|IW#tb5yvd!XBLLgt@feIj)9109`-Zmy4I$QIhT((t+Z2le(F4t z^jyRVc+UM2fP5!or6c<&`i-Zrp5bz5K78)Q^1aTN?1wb@ut)O4jSO`1Pp#!TInwAI zfbb9|&{RpVe(ca47+NGEreiY!!XlJhaY>qlX-qjtN0NEgsH_T%^UTSV0I|V6fD>g~ zC%*(_HK~OPa|>Lo5e?#~i2?@H!kWC5PRjvg070Up%hA0-0A2?(y`(7sX2yJ=qm_YE z$n$tqwhcM}@E}b=unRzQPhqbXVl^iUthiX|^;k-z2Tfh`hxnsp3O&|vxz7|SkT0-g1Rbg0nDpKm)8w{a;?(ig!s zuK*mR4Rs2!sRew@wH+xm%1`o-SR0i10eVxm2rJVG|2m!Ze)vB0p^T@9@JW{PMWzVf9c4=j=8FL!*kk{<*;6SFAtNh zw1r~AAp_>0tf_l>;a|;$WT4A(ryk@XAAwqE2;bDD@Jq6ycOtjMx4rSPj5Vj4F2wP) zQQ7j#OX*XVPz=(Qv>l)-cZ8|C51MjMYU%`jLSz;;YW_yGABilQENSGRKH9i4=O;3e zOa3wFt1<>1)+HqP$}rg#lEne*&b)kQ-ljs?++~x{7QlDPAAxx4f6JWy&$bu+W&PTJ z@fBI(p1TX=LoaJ-r+k~HIyZjv4IQGaH`_2V=p*#~__HiYn}m~J@~_;EKD9ll7t#ky zXVtwSkg&0gHB6(<%+4}c7gc76i+%{ik+c~o(4OiK^AfJ5@-5#(Q#e3Egpz*Wg^uK2>nVE3KFwhKLv}5v@{%&~?N4+>WSKS^x#Kk-Q>wJr zdXEgki{xOMjFTK-H+fpqgb$(BxWu-PhH?3U26-L*=M(BKwB)_1^5?bTq&Ia@%T;xe zR2k22KVCHAL4;!v`BD3C;=_}a+2WdGDS=H9yj6sE8aih7Z)b@e_vsCW3tE_C~?gO@ZKA3pm@Y z8#9?`?CSTDIyj=*Huf2(K)Wtd^ouOGC`jWLVke3M#9CoGLFvY@t-jK$(whkAz;|sV|xQUCH)F2I%WK*+lqcB zty}oW^gt{vna#_CJoUdi01&XxVQpgq5=_93@GEXvoj^?i&cb=0F?sJFqX+NhmbO9I(00I4((-v zri>mlk+xDKe|xL?aSty^P}%i@SKrbuWNZu{47deVZAK8uzi&5Tia zFF4_(*KOa{xr-dip$iB703B2TlQeu!Q(yXG&LN%TBmZ=VA*j&K-#L@_`k+J%C(z;KIr=<-BuksC+by<#OF)p7Mk~#w6}s zkhVSm)E$6}S^TGJ&Ap7940)iBAGD(+z)#{al#aK+*0tj}|;+*z-;UWgsj#)rL-nXz?9l7#;b|Khjd$`+#J2aR; z>$bxB-&x%#=j5X$nlkJT27%$Fm37kDDc2{*$~czKtMJ3aB?tPM=EF_vp+m8xBl{T)l96*Pxm<+z||sXX=jn{%EVw6k^R*9Hf?JF zOU+RhvgUA6;bMWnzOHy|iMsLWg!VN+E9J>WK`nZFvB-Qjw*n6jvP(tl=~Jv_m3wuZ zK(Pxnoj7P0c(+}95a$}!+qxp%!xB0!3DB7?kO{sCm^*iMUU9^B`!4eC29&kSdW*}1 zh2^y}17D5>bI}hn@_+}5e z)2HlW7%3lp@hi$h;*_rs@K&qWUgT(O`Wk-30e+C>BXp-e%hdaaLi$n zluxi*J)%{vc*YU*bP-;?r;m7vN0g}B1zbF^;{=|1xZE-p<>X8|$EuZdJOJ5p3NS9w zRtIQLE@+CItg~aoHn&e*-C<$I_V0*8dNsP)UPsOVlP(+dux}BLKF6atk*u$H#RC7uo>c0h%N4o-5J?xM@FgxQk!&{-N^yo2|0C zhx;7>`(*(0Pd>O!yJ)5V+h@V+N^uEtiG}Z8X!z`74tNDD70_1a@3H800&ld$Uf_Fw z5BjLX*;ljFPhVuf1qgQopeE>>IVhVl9-}XHWwXt=K^GJD6)tG-#o@x9V?v!EV=h{G zM#%~b9DZzjg7wy6kM3>k@6bb9$a)xL=6;F~ZO$>Mw9?(cLv}&XejNV^Sn6_2{bpZI z(DcW)(-CxNC;IjA*AL5ez;YKwJ%7W6>c9Tvez`G@RX)Dz-67(_->dnz0i{2>d5!S_ zdfIZ(E}zlwoH9P1fhOlXi=0C+!-2c*R@h^=%_RN6zxn&WFW>&_*X7ZVPdNPee&X5R zi{C*l!(aM&_z+V%$h8TxG=%lJL=*2T_Ec0&!a`0-}pZTXjWq6mx{oyS0t_L@phZ^`S!?x7mJ=jyczYzDZx)0?L=T+Bqcr5Z2tCEdDr0 zJA=>r7z8YLbC~(~Bzj5LO)cf4k3K3Fj~}taakgyTxra482aJ-2iz_)hMV;9w*9PvI z1g;NRWV?!0xNcG2;%nbSUEQ^}xXB_ER^lFh`TfERWQqO^nmWKCe{=6IuqMX^;m8%v zxEU?~{_B4%yLip`t9zf785RuOK~N^>#3ysB5#;(Y}`p_|yjlwv9R( zLXDsN$Uo^afp4!T=dyQ+S%m2bq(Y})77Wx!xR%Ivl5_!#UIDS4^%NCWr70HI(2@L^ zdRcSO)tBm)uDPT6mNo)rf9A+TcP9x%iAkycgioYC?$u_^#6#{Pp zbR-u7$MTLp?-hgqD%1fUd?252@}R+M2L_U?fK;%-+(}JdII#}!P6x<$l{AtUe50ZJ zBY$ccOxJt(6BLAWG9lnK{FNp@0hA&`@Hc6|zc3wMC_|*(;(~x-B!z#H_YTw{uumda z3V&Q7m;leHmzqDf-cxT?wxCo#SWwB8{*<448=8}Y)Um(;nT8=;tK|7{ASDf6hg7dp z&p!3z)JS3pW4c^cig=$g`og-fj69>WszVmjg%*V?e>IJOB}o8YHd2CUuFQ`Y{D!XZ zx$+_wuL6pV^bIdR13VF{#;f$2boCvsoFWsZYaLPFcIGf7IW@B4+88{}JCm)-Nt&Z* zLRJkE5G^Une+h+ZbqWevdLmc8j|}jwjMdy~2Fi&*mU5ioA&G~l@~Y+`Ap#@;>1<2{ zkp!O`G<6N%b7P+7Z(2TU5g-*jthvh7hD0Syl`|9L124@{m77XcHt3;UB&s`R3`DvC z14%LYP-il+5!bq|X+neVO+z}Zr<9MhLYI%FMY=4IvQY0VM`Wk*9le%3q>XTd6$p_o z(eJUdZRTFXDCDNTS%6G~%k$;7{mo@~`=dFtPac-n@BlxX1`}TYxNBLMtQY zlqZ~fR|k?xcuCt^j)Jn&3{2p1hrx<05h)^dba;{W`u@KN79GW8ml zHgEZeSoNM4dDTeElJJ0n@ShL&(q9X~h2^pA{N}rc^1dk#Ss*n|Qh-KvR6bloYFxFX zFxHD5d>uQP(MoF_AbAX{ z-Y&Ls?*WYG5JD#O2TpnzZwO>-k*+(7_y;D90Vcn#07DMs)h+r|)o-nn`ySGl2m82S zH~?6ZM*`de-JOh2E@*?}O!~9{6S$Ta0_$42I@#XFvgs}sQgZ+=Zi6-${GRsjV8Uj0$J03ax{1Hd4#)(QUv zF^@>Qn@OFMogIAK4g>B83~S}9`-cSp9zk%yTKg_tLu8?V39u8(O)P)391=J_BrPX< zE9*-E2_^yVGGQaWl0&gL0C645=95fr1XtC~)|=X_x*ee|;g?pyZZVfv0<&3|Ko(X3 zfzpu$ECiPn9y2#N5h~KjKET2Xi+T>l{Gb2s*X4^lSo%@bfB4~LfJXI978xjy zepTh)U);Z0#@6@nnLApB=JgkPNWKGQK+u%@wK|OZnCKpQ4`de-V%?_&m~MS+-|?BP zB`dtH;3f-90LxprKN!TXFJAF;uwUmEhvIGQmNx(vpL{SBnHH42e*J2>kIY)H0`DID z*3yFP0Llrxc47VMjwgXKEv&7BUBCzddfQbSa@leKfPo*_8+WcU#D$;EXXx`!SZL9j za~yEfMI7l@_N-6A@lo=1qI>`V*-bvW9~mCQH3(_<62^mij|I5^*Lsj=7n-heXlf^x z-vabnvnq93Ry*(04a>xP@0VS`yo0S()Elye?^7g^DJ&*;)UUj|It*>EP}e{55|N>; z8IIy0{!br2|*fehbVAnEL4;R<~-h(+QmZEB8% z1a|_c{HLa;(1~4T9c#oMtbYfvK;7S3Du>;>are{10hDi^JSo%f-OJ&B=QIfA`#zx4 zJeIFN{QgT8Ti5{vFLmee(O>?m9O2?bz2HHehgf$D465T#0q>+;YwaN{Y_G7$amGTG zawo4XomRmv?AWKx!b9suJtXkyf{d=V_OWty_s1c+UyDm|iES2sTu{=&+Wz?F`*+Lw z%5qr;?AAhi6#c8EH*76me)%oJ?O798)nYNQcoIYh@E=#B<{Yytwh5T{kRK&3pj z>{^YEc$hNZdn`B|;?@V#asWXNS_XjA{gQ4*&bD+Vgr1{4yC|wfwTHW&Lh}&iJS192 z&maIk*7x20^yM7#Jg}7=Bxj82`T_g3kaxG3dBaTjhyDj}R2ONHZhP5>#;NNwWe%OW z4}d-dcxYSlOpi|7WKd1$cKUB<6!>+kW2CR%5{q1N*QNzlocpUtv-H&6nSl?*Pi*zk3_{SgfR8 zd!bd`r<){=d@Q5m-EDS)a6w%{zdJ`-0b41HyQW;&R8M>6gsxmBpjrR#-MwcVVvB1$ z#tA&t)zD@J30{iC?T+Cu&S|EbGUbmixteM;Q6@}Ix_ zI*UHnrmm&EYoOry6#wBL|FN{ac@52s9pLvr|L`>n8`CVJ-iAgGwbh>2vp5)6(tg#G zx$76i})VAvp4h+>_cn^K; z2<_4Zno~gQ7zrSsOUR9F#j(1J4FdbH6N;HD(06!#Vo)NUS#HMzj>FV1E<&jTl~Y~T z^l@0T<6g}g)D^WRC+FW_AVJ;ekA44$#f=VjVp@*0dJ+qGr3Rp*HPJeGO5H*Y z&r`MjcLJuOuahe$W6=UmxGjkSLHzpM@O%Qa(xLG~ARkaL)25Jg5?~s{s>E=$_{Em;bl*k5lsjKbOn(}tX9M_SuWCuE& zL4#`5$c<$Xde0nap?$87Ot(*W1d^-r^FJ+UDd;*ABH-xK)Ir>m= zhkNN2n3o6A#&@2DHYLajg#?%qVIvRl&H6ShKZ#2kX%rSMyrcd_1(Y>H#Lv2AMfh{z znQ5X#!#m{Z-6WcPLpvRKr3cxw^AJc1Z=ol$PyCRe>L9GQybst1pK2=B9ekGN1h;t^ z$s6UH_ugA>)2xphA>@T`Es!6=86a_|%fUJe)gm3^Sq5kre?|@lc*hX^}f?4^3_n|+$rQEWSxS@qt_0zaWp1h%~ z?r1QslMjDli4J|1x#~aVQC^ro^bk7X!VhnhG1B1P_G#%=#5|M(q#vS5OZhS@+qtE& z&Ke~}W-Wq1o=*~;f5x$G*F1e=RKBI3Pp$V_M$?P_^P4)BrYfhFkFsEuCr!!QHer5d z%JsWLn3<5PTfNu9=x6<+6&p08cao}+)6Oin`H@zmd{tJ|KXL*cVmN6bit_76i`{^` zW|08`$rz4&tvDAVQo=CBIMN8s+{=Z0%d4ZIthJmJax5tCUs}#{K-=TTfbxw z;97t8#Pf|ZDBV2ZoA}mg@`bk4jr8%`pi4GHep9Z>honvaV7HUNjWSn;!;^-5*L(A% z9HdP`CS3o_(_7M4CMqyO$t8>X04~X#d*x8Mgzo4fuYF{a%AR@*8p1i_haM9*eeyRQ z6Xp}%CS9IIZhdYT-_lo;X5`lX#k3mr;akJS^%%O-G^ITeN!z>$oH&M4@7S)bXK8nE zn;l8b#V{iyZzd56%$UCboQwmki||VsH7scbybYn2jX!fWPva%L{PUiA^{)|zfLtqk z(BpI4Lw)$J+?An~n(R*Fy=Af+ZobRl%pf7BSvF;YaG^_Dl_w+lAx!Auo?q)*s%xgi zPu=`*>UV#Z}7V6L=h6`>qj2{_s3?V4Ots9|=Nw3I-kTKFDR*@h*P@`GKPF zk9H89(`IafVx`rO~alUJ4|w z1B5t=>p1T$*YJO-Wu73F+lgao#st?LNLu^t0EjA+Y3kC1uGMl$P-L44mXIs%i&tViU09IumojJKYs@0K%A-6VF{N__~;UYMrGklnzexG7q=t*4>n)tg#ha zJ_uS0^xHIb&!8|1ZV8kN+-k|Lk6b6`v4SGbzLQ|m)AD=>aHpGzvyX@i4*#ERWYH(|fmW#05y)tsqWXUL63$Xo1^-p@i1uZdKpFKeX>WXlrPw0;kq> ztfk?Tix0Lh#}tA@TGR>RSts(qdOJFWNIv!>){k{1$f6ZleU8svECmJH-J-7LM9bVcZE(0)eejfDc~KUI3%9(7dD!qvXk>@adSvoFQEHxaHqTd@ExoC(v5J zwG?(C0O|>Rb>dn^u)c%#eg-d&IWSHhFn>o*cFF|wxHweZ$*`+|wA5XlEH({Hj$)aH zRWjvvH^dH>;cZNut!xiSoS6hXrS1ftqwm#+)a&TXR5=1n(2u+P(z4DvR5l{lv_F-H zx(Kn?D1f^d$DdDngT4{#JZKk zNNI=X7-a~Y-nn%hOZsgNU7JJ>E;+btEkLTWqn`JG)=k>V&a#K~O~ude9-vm|fD3NA zYT*D`Sl|MR`b1z+OH=)4E5m|oZmkbb;GI0)!=-@~9-(tA&($jcNzlE{pVpe^xFGGL zOv?EuAKfqWZ|0FR>HvPgvMerXg{n)Ke(Ka#CCIOT`M4FJ>$UB%l7agRz*j&!!43N- zt&pX02T*^2MY9}$%&wW98C>$nQ&Kx{VF=nTwy?s-T31(7nC_R0Vcv4BU;4ft`vAav zXg|c7UrW?hWK!SN>)S6&H?B0g0el9ivlG%2++Jcaz&3mgaN7w_dZZ2db#>w*rDfCA zltA{$*;nO57NXA3{oS;^*1nOl3;^%W2CbGmk%wWd`gd`cvIU5(yDkAub-(_K)eGip zo$6P&9dOt_PyJ9`5|A6T+3xDN0yo|0>?8Q5u5~l7Q;U?45ge zus9#gV#zBOBVWE*WD)FE+{APOkS;%ZiluFP=^LLYH?XFkzI(5{VG(9?6OfyJq7(U3 zMs!86u!8Fe_}o8{5L*|&w0{1r%<}*8YJI-b~31+tuNztVTsz#yrh>!0l~&@7I6%l zz5x(c-J!0sf42{EOydaF^5<|{XmHnub1`*+dX$RDB0_5i`Xc~3dY>~A4$)Q^ zp#QV4wOK7h#ur zei_~P_kZ;{KB{#`b+ax^vZ(u~Zy)FEgX>qP0378FE_9IHl~vq5(RO6X>lbrn5_z+4 zz5n2Td5sGb51#Eu$DFXx;6b12%@ub4oL~^(fzK|WX>j0ROE(+qEH>^nc9{t-!)V^) z+J{BFb-?s>>U$XXRl04nAMM4x#}JE|j*DE3+~kZ2cZ&_OV{H~o>VNvfx8<{sK6GYC zzk)ogW2k?}D7qc7U!7ZBL2jr&((qglt&v~6dWD{%Op9Mq)gH2p{Lvn^abx294R+2c z!@an!$<7VZ*J4~*+T z9BW>Ihok5{$9r_v=?`7}86UmE_y-p@fYGLr*)?UEU?)r;ebn2Pg~*zNL(eie$RZCe z#b~>k$1$cd?8=clItJMFHIneUE!v@fKA>L!-7d?A8|C+NkLVBKJAm>v^vO0@RKI&F3jOMJ?>z@f*E6Hd@0lUHVN>vJt*u%)FX4!VC^=2ustMWc|*vI-F2gD#q2 zXWaVgQdvVsi#6z4%5kghqzz0scqRn?#>G z_GA|v`eJ}`j*U(cQxJZR{+xadcTwt{X_$SEg2Z8pV{wr0~ z^_J|FbnYZZ;sg9dA^0p5LZVHOP`oDq$QxB#sPFc3h!2~Z1EQ7#&pfO)_$1^^-ZtXiq_(gHQg?_`O4 z0j~IGtxB&BT1kqyw(@X=MB-9~Z5;x{b{r0N18xXdxD|j-ff{ozQI%)j!*!HaCS+`% zyliEVVmMhS;vy}Sp(SOqPUN+dZb3ZrkheT*K}iIVm&ZJlr}8^#8P2%mld^<+#IdY` z&h?%IB#dy>3k|AhsV=Wc$H#QtAxA}BEk3o#kX8#M7yM4)YF_$VPo7P=0cclX4V!R; zwV==_Dd8t+1jOOFk1Q)wlh9>-1~{UDq-?1;%IZA{gcljn3Z%*d_^I5QP$NG>*&{`M z6W@0=p8$B=(%O=*#i?a;(8j>B!B6S1^9#6=I^>!-Nn2j>8}I=I0ha8KN0BGDH4NHyX)bV$&8$n)VrR&ipK!+v(LCsZWc=z404m%kz*V z-vIB^wk)F$48gmn!59ficA{BgFYXt z{VLZ|$t2zQHb40VN%pS}iY=01^GQ0UZ$0vq^4T{U#(1GI6=^;-j^S!_(=tvfny2zS z8S!m?21}~kA!BJ?C2TbY6ni36i-ds)m$TjyF{TAvBed+H<$LgYg$h93A} zStKv%SPsG^6X>ijcxyTRl4s^^*=k%e<=U{7W878VLR-~E4Z7s0VNKJI^qZCsEk`aJ z0a8%HG$ZgGmKZl>v#u?RJU1=t+i%k`A9XGF{P{XGntraiFgyVw7YV?l$P`RC4HjSuNI>br(*hVYxbXlq+oPVF)Y=>lNf za$%f;5q|-IyvZK>?X)1c&XKulWchqOHLTIjAbsyT3A2}ww;tKqZ9Z8 zUIE<0`ZnV*S|easEuF4CN_q5bw%i0{(V|L_N_`{{(@xtJOxD6(-_Rb|IF7}S6SY}< z3?H#gcM1?{2g^f!u=WAipV79o{pQduv}sis1u@XBq-#`gym_#{wM+t@}*!$jFJF6L#CK z{&8(5%D$k7huJCrT4_#CjFmSlYra92bip4HP6rQiYwH*Uj04z*F#SOb5XnpHW&t2@?4}?d;6E;3+?k-;AJvP%sd9e; zfINh3bi!M=W1nN~seTh^?Pqenj5T6M`#6@NH_F@Dx1l*!j#&2!+zZf7aAM&6U5v=t`V=!Y{v(re)7&fWT z4Sd(SFrr)y_28<5TD6Y-a}i9L>}Cw8m9O=p^{Bg6qK}d1bu4{9eQ*9>*e*UH^@3xx8MAT)$dm7`13D5<-7_P5znz?U5RWC zu^0rK%Q0?!+Q0==Bb&J6P!G5$If+g^pkFxU09h@Ux3GBK&=)y$yGuxL$iw?|2 z-^%nZ!0aA?UwBJ{J^*AAjP4Qu!v)O*Kw1YbVt4R!y@30Pk3V!#i-SC&wF@2D0f6WN zT>L>ZHWLKiz=C)joh(n(CFbL9DR<^5*De-ZlK;>sAS?3YBBSlB1DUZOJL6CK-9cq9 zt4?)Cg21yNr*+cWCWwlBqQ|sG_rOSXwL{7c7Iafyh$(annp$@*#OgA_`xO>II$4|?nY5(jyp7}Ht1g|iZXQJLM_2@W^y6dlqYjX7 z4=EkLqSY`i+X%lxo0vj>930@P2;J|3`|%Dt1dx?MtiaEav5Ry0MBj-kDBTB*pkH>V zkIeokn2XedfY+C2_$-J2x^ioU|H_~~&i6E8pyG}gA2^0^jPMG#BHN5-+<~;X`E~i_ z2k)0J9=w-Bgw;LI@%8Vb%n^KcLGqk_T3J7_Zs4nKm0B?-7y$3G|I?j~*85sNyCCfN z$$Fico=%yRITx>2k!599zt^W2QMnV$1v%Sb8*%U6x<$R?rUjQS)7Pdl2BhlBZJs}R z^cWuFU%%CbFcvr%e`PTQy0*|4)}7Y(qqJQO6qI|%N`15o^@qBnn=#rbyD&ODCz0^m zCL#|xZKQI65Km8vmuZv1c)Q$7BAINhM zd9&W##bLXCO}lVh=t^w+706Ats5HMe?v!C!|j7{Z{<3(j!eKF+N4eS1W=<<+R zWqNEJ_eZqHeGb&5=#gdTr=Gi#goT^^h$ya(F$rTLO@trweQui%@4{b;5g4rg&HKfVfm(^XCjBz3wR;B7`Vmik9Q#h zWfAb28!aXEyWG-Rx4Tp@*z^d~fyF`_anBw2!BIMBN8tK3uk;C$*kNlGj*NmW2Mcv=_8-~wR+ET_&0&V6WUt0$yfwUnbl&5|K zW5@&Er`?!EnBQna%7`E0XAy;duxdL3+W$r7%{c|5=<5v+u1>XhM&fpLJ|l6AvH5*JLDhJmXJ2>Z1r5X&6=ljFEVT;$B%G{rJ~U zI^;{zbwL-Jq>DO240tB(UYNJ^);3XP%CsB2C+`aeHQ{$9hIa&)cH`$`UCI!7=!f+9 zB#b<9M}@rC;({>7Qb*MLBuYH^ufdNDz|2)%NpRzkiarb69AmX`bJ<% z_jh|t$eaXN+ChK@CsVrMa4_G7Y}N@c_8@eGPC)cEtyNvKekJCB@Eg0E3}*n@(WzQi zc}SJpYolVJZqOg2y%2}7%g>Mm_$rqaf;O;<)`L-pnQ&DLSpByfe-Dc=C-X{XNqLVP4ySKY~yL)*Y!?*IIykuoy-9f0=r(Ce4#8KQGIBZ@a2}P0wH)2EgnR zgf_U#3vibb8KF=FzW{IfiSz*!p$J_lLZOS5h=oK6!UEU@FoT)tnVz;)UEX`|^gO?O z-v-|@@86_3dGh4R%wL{7i4WG@G7TuU3ZSX%xm|e}KxwIsc@AOKdtLY}?I%C$*$KZ^ zSo)aW#>I+0Ikg;|2y!8(@JAlW_lv8O)OjzH+_pumzzl;7xO-v{VESN> zc?P`b&;pd_?ko^sxX}6^`kmbC4oJ{Vmm&fYS}EH`+!Y`|CCcRlSdS*S-%h6{FI_c)8mF?ZSQf%uM97krb zq}1I|A3(qvu6tSB_u|Gt%SfOP=yvW2$Orh+0qCsXTyg7t6T?z@}T3=c8LQY*! zgB|fn|HUtUQ7#B8SkEQ}Xv9DHtJme<|J$EazsKeI(--BvPd_XRFJ1s%A;-kmFY*}) z{QWO~jYaKHnWjFb@vZF`VORYI04V51B?Jh+00h!E`#3&}kM`=}u47naUjjU;Uq(sK z^B$DbR^|@^N!H6WI#$_G4|^`aG3_7=sMZU*N09N7_HSF-U?Egr=PnwJ!%Ov-i)@F~ zxo!q@q)SgHvQN-7@of!QN|(?pgu1^nt+{Jdx^V-8yCxHe<0t~!|uQ;+bq`5XXu{m5WUuY zifj?ap4mkNmCq=L3LYMEKp_hO2Y{I)SgZEXwoE5GB?KucpSq+6zBx71LRgDecc}Df z1xx`=b9#EZtOCxu<3!v=c~u5IP;hzz*C+u0%I5^v>Yj%WW@$>7xbN61PaZ$u5f;gm z$3DWoUK&if8@ZbM!4qg_Hx~;l_Nlt#p$ZvdYGuxtlQ@w*2yA2VJ_iRvgL>aKD1h!9 zd3q8c5nU%QkEpXVc%Zuy`!IEo7V3@%)Vpi!IN4#>j0-d#bSpq>8rH=CV5A^^&IurW zWhAsv$pO|$C-igge5@A(Pj|B&0Sw=t9wRJrfH6}JJ!MzVOWMNwckYzuxUSO2JWk01 z_THHo1_;-8wY$PL$Pdp9&Z`1&d3|><(h5 z7qpGzzUVmzx(;OVp(TF8U0l-&UH|mDNOFNvQ2*kDK9%(E+@DRq)rZ_TPSAyqi;ChV z>WU4(de4f{g+~`>%k?5}_RWqX57|Ycesa;^`a)mjE{Y7557@2K&N%0ae%3x>Wep2w z7Qx)PXLwx;Iks`!wgp`-kaYDRqb#;=(cVu9<1Qxkoi0%9<7V&Pr7pBwMlaGnoiEw{ z4UZ!Sq-FnoaKxDz_!}Q&u}b$ox=_*z-CY&}#g4mPy?PVC{Nclg(TB^+E0HJZnM8*S z8wXi)*GxU^8di{&XVN&55!654#V@)26Y|oBUZr=(6{Obrt*uCJMB}PR2TpmtAN^evwfZ0Nl~iiX!b`oTL6~Vdu#OV>-{+ za6qHmopBaE5OLZ*pf_!`jWLOCe6rJzag4hjT?DtyWYGtmy1u@cv6uelFQ`ZBJ!53b zr+heGK`>G18iueq#n{8~ehfW$=Ps19BMugYrYrjaWMclsR|#wT=w-)8#^>l#by(6S zJ;#a80hozGnWfH*fQd6N0+##8cM#$>m;~V)&iI1I>E2TL4K2%a-qnDKr-G{Ccuc%^ z6HVSGEKd^NG?N|_QJDcVa7|)PhD$su-iDCg&1(uDKm=h+c%D_43~-T;qy|X9tw?|e zDl#9!7Y_Aw;^08YsilBHJ=lwQre}Pu9yvYQReCb0bjh^POf2$m(Q2B8B`vys(L&K} z|1EShkpw|c_+buly&`}^LyOh9fMF(wPFUop*)WObFQBaxOjgcr+8r=DX?L<^T|vKP zmC9?k{y;-M0O|N;ujL}2FhE{k-RE6#9iXma-qp7KWWPg{wknQsYn`ruzrv~?lXw-oB9s@@|mB=zfrt~uEZx+0L|o0T52mk;k~k#JMX0iTGB?O!Zzlu z{|se_2B61v0Kz7ph#5eJ1%}kQ0|oMn^*n7myCE7mR9(_&zsZg=*~ZB+^%II}egUwp zkHj^I*OXbgkpJ98M!1kwhWoP|DIA3J&-SGbNK*deTK?1&h-3SwPLw(M!*BIvOHkTw zGhM%{-Z3VbhA&d#Un4%($}!rT;N+tYG=H)fcVtI8bEh7D=9ha^wD+85tYBpM;fPm1n+^aO>LvzIoPiG`Q;f1Tuzo5~`a;GR2y_ z)Onfm`^`P|Zd&GDb#F~e(AxU-sma&Un=Vgkawd>>Ng$-#&Ke)n=DoW@nhz^gc`PM7 zZnR4Syo*@ln@4DZzT*oHNy4Xa8=#B6E*}AMZ8OR8P%2$dcmR}#b~ z7GX{TlqlVb2;4Z45(E-ha`LU!xRZV-00P2l4KZ3BEt$ae)k!Gt<-0(x)?k?o1E?G@nHTuiQabJ(0HOrFU0~3qi@@aU%tYLd z2!g1+{BC0rN2~3C7L?w$attU^!D#D9{YWoOS=n(+dFod56WlB6|JwWntS^{^1^9x$ zwjW(*$cGj3ZjKYB+45_JsYR34lj#HHJ@?w@$dj}W>Ma&j4I|S^N+09dVc{bv6vhGyq`tCnAF<#63Qi!xZRjn7WwvH-u`v0$bS zuqqTdeGRaub@HPh0!C5xSYyL?E!5PjTD}RQ3g+(vG`94ARL->kV>az!j{5KQAg@*c ztycW2t54#?79iL@Hg!VX2t*2IYy-Y(NvMmIVZb(bDXg;dVfG%|wE@XHne%nvrlZ>l zJ79blhmz?s!1i1Z5d&xjP>xZG+(2&h6MP9BT7cV#JFtSjVW+gbXzkPzo)I?S%#*+R zoO*p)ma+QX*k2K1hu;cRp6-VnH;3lHzqvjW~ELAwHorA_H(z@HpeT!zY5FBY+g zWPydc*JTo_h;RU{$onZ4noC=oi)RJQA6p#eyft=r+E02Ek)3UknqHJL$s#}&ptnD>S zxH#2S2nQ!?l}-BhpFECkQNv$hb(tM8(4f!e9omqGX6jPvf_GOeN__US&&r3R6J_^* z{fF}On{UeqfX>>|3LxSU?H_$h^(kli;C2+;55P9}_+hyRn0E%_=pwBW@cR_15Nv}3@_eZmP4$xMC5>jufiJ;5kJQTvm>|1bZ9#r+uWB|gOBmP2R< zJIfS)m32ShJo$=x)Jj&oX9ciPuz#SR=g7h-3k2=$o(rLMD!}$V+7EHGhVP&(E~K?W zw}yA>G8Y&2Gug zG>}CT^dxz>D7-IdPrlwOk1mwD;HIzTJ>D(jvZI$i*>btq78gmU=utr8bLI;AhL&bs zvYemqmTmatV#+8yefxHq1El-0h)1tbmo2p4b7YwUz*qYQE&2gyvD|-%jGf_PW8-9= z15Jm~5&dQL?Na(q`?@SRsaxzbNUIx~JOkpKMF#oY!2+0LU){j;)1C$NlwW=Cw^;YM z4RZXj;(>~Qnzp$M^eV+dH^6&j!v$LbXm=&`545uTU^u*RcaknF1U7Z|Vjt)NuXKq=T;7jl+`7a791ydS-xY;*zsx^uvT zn(t4~l*iMnWpVdq>LEeVCypl^J+7hi59s68_Majj&fCY1n4>&rS>d7UjsihZP+`d3ECl?|+bKpL68pt1mBYO1o`T?sY;JtWi{2w?n2s-- zzOl99mOi*?Kr^r3x?(XADyOultbI@8_ZhBBwA2=))biTXu~p6tQOO@#uK|(+FcMZ^ zMIa^vQ_==dQQ&hxpLwZJNLs)g5#X9Q3bfYYtiZuZ8Y&=n2V1?oC%%AYxt2c4gffaC zIr<=PVuubBtspX`bK*=^0Rd7>S4E{&z=b#rhHtbqp^UBVq@A)d%#?0v0<=yYX*tik z@JkwR5Mr$5Z?QHHAIx03LksB>wt_XpAs^iku$)Fbu3ZX+-$@UirtHupooUcPo2k!~ zh{u$()&W^mh7sNHMBs_CRLf6ZxwdSfl-Gh62CHGh3**qy5)pc#hjK=aT#)c%1kz6O zgp&?CXK7|eU~GOUgECFn1h+2hRil0>A2eFjU>C}m3Cb7JanP0U&;Ze-OLQg>>>6g!;KRhyhI7d1)jFU^{s-|6K z#W&vD4roKBqn^nHH?oLFrch~+mPYc)lQ;%QK8?75&a)T>*OVs{N8vYj z&G53OV|-;)c~AQ>14&v-NHl?H(FDYYC5)Q+oyxxI=PiWlqc;iIpuw_gp}|;eTI1&Hq5GNglcGav8D<^ z(jpSUq`B&a@G?4#GU;EOWYqKNi=@x6sWaTA0Svq!kLITPKT_q6-D+x(N{p^kH zg-`Q2Wy_)h_bz4z+_xPVM)@@_V}zIHsxAV^GR2pC%tfy zFvjz*e!T%ul46WX-76Km+gk9$uD)3A3}LO-{%satx7{oCK0aBRezj0rjkI^g81q z=`@`G43ctl?XQn=$v-~fO9F9|GFGBP2bU?Y)#p`%j?kAnH39fyQqb*Jf_(cz}U;4}OkmOHy3BYqGlmaOEc>=tJh5Fk*i0G%OV=zsu(h2!OWl%(a^Ql1@y*}71^${= zSvC@*D+3e@IcLnzbrz+NM-Lp;LQ4Qi_aMsL`fdfTMgbN&IH}Y2pnlh9_Y?~wF4*j1 z5vYuH0#@8u?;(@CkQcSW0IpMX+px5>QGWHsv+~IYbFt1^!Ds6jvf(7yLt+J*l*vnY z(+z;9zuH~^EOqB4pw9Goe>vHALd`r19tsvNzIt0m*fy@86OX)-aZA(%2-_$4#_j+J2zc#c*D3B0di6top%py3 z3-=N`%Zp`_g@vKv;Z(^bi%$RQuYOYg_-vtk@x@mxx=jHz;@6ro=oVpSr1}H?_W7&w z!K3?FlH=9_9eWE{b^^HN;hv5G)DP-Mfua+5<&K~|0Pay1vbwRB)tC4#v`=EC>+X{o z_^5Tfps8%@V?jb)p!K_d!^9I1J_o400n9zua`$F9?ZffB`bMC(B|0U9ANGYHwAlG+PZ+HeAtGRUk~S80Wckf#{<;4I~hFG(>hfy zbjjkPsQ|5(#2L@gW^A`EWOT_J08RTHL2xZh^~o#{w#5z_!zj4D9Uj`h433Ng8df(|%^f-;qhqlO78KT^RatLChV1vwNYgsIWc=6-e*@ehrvVqa z??FaKIR9i97bxp%>~2w>pj{apr$4@g-bv)le!zoLk62(iI^C%E$d!E8HI@t3TbEdz z!`J7jceebc{Wzl|KL^Z{f?{9bEF$_H{`8yrE4fQ>XCqjCDdyLTPS=<6#xK007*naRNlKlal+Wha=1fG_a`}b0$FwUlOS>hw%LV;+-6~vwx;#_ z6y+YL93IsC;?--?LEjKx+0lr=cIMG79q6}!+$=Jx*E?C{m8UKu@S66-q9Kb$&XpZU zy4Y!3S;rO7F?wVeT@at|l*_hn9?nmWk7y78+e(493P&4JfYA zKLadQI%ai@=gvpx4eqwob&vE4u-jI88MAv(ZZA8yMse}v0=xa5n6G0Nj+74nbt{%>A<@JGROZ(PDU1`T*^IuK@3 z1`9arw}1?3)X9zSOph0tz;O+u36%FO9DH&sfI>rXuqX{GWCtGXkxCd2N+&<9S{$Uy zOcYle10a9!DV~WI*P)BoaD)6BlYPLg3WAWP?;6&WXc@wzd=2NMTdP

    GYFHy&V{Q z(~27*ruXti{xZ;(PN`$i!%Hr?2JEbW3E78EN82=V0SWk#vRSSgl`qmm(FvabqkfV)9KzjhL8`4jG z+CYrs?4U_!3ryGD4RNZg1}DL$=^qve2%NR$rvigK7dWO<$V6YcU}fG$BP*y}{_!XO zxz9wKJac23$y;9fj`Aua1gxNi4Y;nXDyk;)ZlaxrrySJ0_|H26!P9_Q%Aw+b%+N|h zvGQNt#h9O<2MLm2+6Ym@JAIy-4$3><$Rw%cCuB<=0+o7_AU-ZM#H!r|nuO#%@@b}N zZ;^Y$@m4-tZm!9o(ocM8X(|Y`2<$@c{a6pmnIM#P?+2brV(9f*+87T)Ta^Q^Ykrj3 zn*?*BW$;u47fFP4DZaW5E${eB8nl6 zYeqaIT>>@%S_QV1NpMoPJ{odk8BHq-Y~~pnjBZ&QbR!d%qvl@QQQDx-BE!-k5BQ_a z)ixrHHBJC&%PYU-t27$ccA|Wm4m8QLER<30Pwl?ebpT)Z)A!zU0@{L#8nq1 zD)&jj{M5^&#)E*W=#No8Xu=sjZ|aw_V42kSgsH%wqG4t} zuX0iKC$veYx<9f^DBD0nlRmtZ|DiEyCKZVY?dFPtZN!%@(u!=*1_WBjGb~R*rI&p2 z)Ntm59NE974ONiGw3Rb;uakA#O6`ZGoog14A%I}gXITjo+K6LZWmx(wFYiLXd{n0> zPdw!%*ZxW$&!A5iQ1T~>I^hxFODH?Gp`WUecfP@J(8kHu-OS%Sndj z?|1T5&T2hmCP^4ZKh4*M;+lA&gKyfTeMeJw$j7uzbwE`{gs*gjAC`kbS0-tOF-7RI z?D8f3MDs4Zf`{@l*YZPJtWRmQoW8S+q(i&%CuOsZ6Hk5{*Y$w#IJ|1`M}C!iu2&=lU;zKkcI zQ)b60=tE_|kbX%9-!-0zK!?0!OjpMuzOmXGZCJVmNG)T+Qp%*mGlI$|M_%8*dBfz0 zmWN;DyLYFttlA(SfgdKSOq6oK6zMp=(8~Fcfxnh2r%ZfwlU1!#0AHEhTwS|8ni^t~ zr~d9^vYGA5%r6yLsTV}o+Ac0Dn4~#L>R-3#TP0`CA{`8E=*m3D#8~Fv3q{<}nr#vUnkOME7T(4k>>(+87%tVJRZ2e~vr&|i$ zT{!8seg*GhISvo?pBXUP9h;PCLQAvRyJZK^=p6mLiF*cp*XnN=v?~)@Cup%cN8EJb z(TZiQ?b6AglTI_2rUSu!fGyqDbTYNwC&)@=(h; zx1aMU<&nK9Lv*AIbXw)ty4K%m+%^zMU*-c$=Cr`m54w|Y7f zhu?fr<#K!`3%A8gq91zBkQ-9ZR?JoRJX_QfAoe4F~CF@d3d&x zV-6?y{XOH6dBHOqU?-E(-JO@br`=HJf(_Evfz_S9W_6R%OSCeA6mfUD64d z04%`506u_eTU4LTvh*OQo%pnN{A9QueZT^W#2pND=HP1U8Q!N4pibqNx}%3f_Z~k0 zIK9;x{xTMWPVm)XlaoWV7v=uEJb$%P{>{$;2I1=xfYvPrVK-Rq{o?=r9n;&3^2uNR zsI;)CvCpE%)EL&SgzM&?L~K+^Y-xihku=UEA3ShzZ!ORlU0rbX)p(7QeyN z@g1zIJ7@@_=!hHyNTK%*T)a^ioYQ7c%Pe}w#UFQ^Xcc?Rm_)E4yJpByU_v>UU$!@8 z+?@@uAS!%lpf?tYQ`D;fvTiyA2i4(!`0D!{M*88$PjbNK36_!*Gm|XD%#_2;O@Jr`QBz?&pT$(ID`!4>VyWy8Kef#uP^w!+{ zJAgfP-rt4p(j|dbnSEG;e)Qxa7NZm4-wu|KS~uD^9b=WPtAjl(>1~@M04xEB>&zV_9`dK&6-4ad5JeA)Ug9v&BP^{2%G6;aw4L*lz497A`j@y{5L`p! zra#k_$%jv#phL0nr*E_!ct(Tqw7?w%EW8LvzDK=tL_KN^tJ{-$ z@F^}kkU8Bl=~G*LNZ`>?lWDJD1^w|yv>J4en2-Hh7NlD6e~h%h1w?fLNq@{aw*nrn zV_olI(POk<^RmyA9x)#4%h8Acrwh^UK!E)8!7c>!506pb_*_T+$0s$!bgY7GN9R@R zR~Lu_ywb*$4Tc+OZywa$+CiJ({*r|h!Qc-1Yh~F5k0GpYZ@_qTKcXI81%S1k+J^h7 z3(Kai&`uU47fak7(b+eI#V!0iW49PI ztO1Z8vuJ&Y{Hx<9I1E$Wp}uwgEnyTj%E_a3pJyI;<~c?M1NeUx3G^NjNS z;rkcmXFvWd?ey*Za^%{DSj$N@WarA`#}5I>S$IPpT=2L=&RtA+v$Rxx_2t*dr3_ab zjN#aoTA~sxm%y?IhsTu`?*x0rwzRPJJPY04D8GWV?%=cX<{rL>OCc907)z4h80FHE zdTsTDI>dzye_C}rJ~2n@a}`T->uv%t`Voi6cs@cO?mj%McZcgi7K!Y8*49?&i+0m5 zt0Q;N#Vag2Yy-O6kCF&GFb>Kc7P(x!8z7#GEiQWKe&!q+JuH~4km2_7gAYGqu?l`8 zGlKQ*F4T?NI=;itSnzuM-g}X84*-46;<#=|lzruD0I=ILM;tF6&=0m>c`!QsftOV(y%mftxpICeZ6i~jTD;Y9)H``FIy*Wv(>?T8f6dQp@>nIP!I;BJO;7H47lhB ziSkA8pd!A3M|jXa>kv}zgnq77PZQ?G5tSIC7E6jN?QOP#}W`4OFw7o=oc z$s(8mE5A)Y^~Dq8RUYx3^hm>JUWYHX?HWF9(TK@L{?yFv2xM;*wtTU&441S`ifhtK zHTgc#_~&yTCNA}4$-S07%V@07CXcDJ@GX_tpih7?x=Ma1^Uy@tDs$3c0)7ZjxT@dn zw4~AJgd{_O%*v3OZw1DzQ=aoaZNz+(8wo^~EuM9tWmQeAh6^aCJ|wPvMcSl!1lh8^ z@k$1y_{tp*BH&&(WQ9jgK&wTm5fYeh%9oFvvMw!SEkk&ocSa_LGGjRj(#W(%O_^-J z;gT;yr(vWe{aezu7L+;4owUqL`Q@qXYL+d0H5KzxXHh5CJGw+&X!aq~^mAuANs_i_ z_RTW+%xn4Pwe(r``Z1lPAU#Qd@*_K*yO0GS%gQHxNYXJb{I@LhOUi~JQfkX?pGd!3 z>slG7ZlzE1j1M5&1Rc|!Y;)4>w+V%RJW%fVvKn(iKGEZ$fy9(s`vM|Y-J*{0)<|AZ z7q(m8ro8Y@-5}kS(aMs+$<*g*FO^5eQb$%9CM~TItwUrd?S=p@T*`&i1vDiNX;tvH za>Qi97wRu0`5+`&7G=KPC$86#Z@%-wd;a_+IZI*s(of*4C_X6z zX-T)$YM!PoknA^~Jo6&?kcVO48lc5^iPKIQde|0jHCtC6 zLMG3g=sB5JN(9o*&=rFJ)mL|4tkTURTXzYh979Wjo@4Q zKmEu5zI0NSU;g%=s9X359jDf>#tjIUJWG1=Q~BghxiEdZh%?5zw`W*k!7pXhFschh z+q%_q(s0?X{{9q}hlkm1kp7x+(-jXQ+p^D)5t8g2j_ zYtN+V7Xz)WmjJ6LfP3r|0Nk4Z*wnA{1=hw+wg-EU%2Pn@WzGTk@V%LG z21tFugxm?cK$C8Q+5v~o0e+^j7R^Dh0C}fOZe1+VBJ2PQ=%L|Rrab^dfGSiEf4E&? zLFpXOPG9OB_@W*GK);=5hX>`{US6VoF~*>5jx)O0G2kJfF0QGD&JJ)_K^yHN?t$}d z!%-d=5846mwB+?#{nUoM_G$4qzrZaMe(RI=DtK{?u~Bp)e2<M<`3;guZ*0k8Y zC>y&wfLg;@d{OGm!!}+Q4l7@Ezak)HyXe-{1-fPip!xu7zA4hW0n`{IKV@*&16b3> zugi0G0w}99>w#r?6 zl)F&ku9g8T#Q*ec9-xpqWjDbmSl;)KPLzEBq(i_q=l5U#>1o;8+yWSSkDUhiYDZ6b zh;ei^AlxPXh;MGm1G&7SP6x5>+{c|o3w5R+YImPM0+5`0?{R74fW80xfBb?rwo@K| z{BgNEF@pc^l`@VGasfO9k-DdCl2#}5_))KGl`Y^WFgl3UBCV@jAcvcPkM0!eqiqQU zo?q6fQUl(*5Amk8#Js zZVmc?x33nWEBcv#c~G*hcm$NJPsauJDK2Q(<^;0h!V3NQ;@jutDOUV0%4v}~3Ro;q zesiqleFr=^Dw`Zet!2Mf;kyA-cd#DbDF5zX|D@_x+P#MZ>-Ir+1-eGj?T(;*4{h(f zlf^>Xg?)k6u*c{?+neBEH~bf56wKB2gaBm^;Guexe_2GcKRw#y@LVjluaD@9;qw@@ z=zAFfBAm*Ru;iidaoewLQypU+*~WA!qaSEp*0_+fPq`dVx)@V;f7zDRvlZ0VFSY$F zDWU6FG_XA)tGbw4T3jv<=N@D+i`)sL?naKqLLl3=WmrL2-3j&3UiIy*<+?jlZqQ?n zN9=2DgVpr}dXv0~MxE1c9O@8O(0+6jmrvA*#SbE7(p*-?cU8o z>bKv%EKi?eUG72}G%hc1AQLR|PvPQ$&vE2pZu_V@X<$+iNhn> z4$NuA;{2m zE`nWA$H$EGyuUw-K>_?;W;c_db2Ws(?G?LK9HV)7t}^T5usc9B9C-ca1$tiq*)~?| zuoECyzFUQR2YY31ZjQco1`v6ptSl`f>lhHwUmnor9p7!ie_gHFw+#)9mfq<;`mT}q zsXyN0%!--&EY=M|zvD^98pzMZ4eoT1Nv)#?0kqdQ)>wGh&LWd-O~2jlG}>Snfd0cB zV=uNZpijrqJMfsD3(A%Y;x2AhqXWkZ9+FGFdL97v*N3)&gW(Vve3_-~_c zU2ryTKl=0cQEKES`A2}<>wEO^HwYC7FRlm4^Ypjh;-aKMmf|DYFGp}ld)tVZFZ<; zEGXxy3qYw!%2#Oym`=HKZ$Q#x0GaZWUR=PCOtrRT=xL?HdwE7hQC{+6#ozFm-i zARj@~!YUT70xjVg@MQMUW4%+=huDq}up-md-uM_vVK<(D6x zCH=~)`UHC6lRRYz;3HUD;~F~oqUf{P0vYZVrMZRXi ziTlV|3arcs?D1Zj(+(T;VmXxsc|?8Ja*_|li=}+BA*}r3(*)3@!vS|y!agT}w3CWi zRDqL6{D!CISKFZWh^g!75`q%Eg65UJR0HwSzD%m|0It9Xxm)Wrb>TCJOS7RZChN!~ zX*LtGZoF>Z}wmw1( zZATj6N%$B(5IXspjzI-CrHMZu1YG7aWvkx~sLdjvh~Y`w&YOOl}UD+`v3Czb{NMy5#5iCy%T z^bkL~MxLVo1mFZEZx4P&e%~tCP zc?pj=Oj@dM8T3eFtqan$9@Mu!O87{%H$JKLnX;26{h~D1MNsvye?H+a^_Q1M;zF6> zeZ~_NNJ@S9Oj*r`Fwr6Kh>5bhzmjVv=p*vQc@V17V4kFvFP>D( zapFY=trzl48zG$EX5^n`Gd%h)MKHX{$`RtoGkKl()|2V*O!;gkAu?0-uu{QO(=dt& zO1u0q8|9hr$hPv5dNnAy$Onk8a+Dk-qVWk8Ny|5e^CKVYB`KIsgEsS)-dZ#=oCoBkwGb zn6^$%&_cXF{lsk9`vDGnyp%aN7Yb2z@NaAd=n(g#DaIRC&zvZ_op_K*4`gORp_QGGZn1Uz>-O#bE+$G$%C4vz zfm;t*wEmP%C;I{~PM%?PCcgr`qku}LdjzjDNh2M94gqj_nD`1b2{yC?$a-CUuU88} zEu@vOZssHx#6QDgyPflmRJ8)E;UPLp-c;SB+zG%)o0C|}r4^U@5ZQr_stee4&{u(u zPIs~LZU|s`iG_vTQw}tvkD9wbokKnkuo~-O4j_=DC8%JS7Oo>$l?wijJ3*xmb;UEk zyi+~q()fFA2}0x)!Yaj}fzU;Yjqe#f%~ zEIhU5$71q$D?Y(ppb$_SpB+c0aYunEqg%(@;6XRCbh5jED;R-ZEUy7Es$K#t1W-Rg zrX5p^vxwlNx*c#+-?eRR7g&}e7x2}?I)|~0>>p;HM_UX$8I4>XEpDK^$?w93%{DYzs8Pjhvux{GCK%K5Nvu;iA+3A}f;&=n zypY8%+K4V7x>&%`Ws{(j2ijTxg8nY%xcj7+b~pi8w#%-8mmI{Y-(>;ZZoumi4i7v= zzb?IgorOn%-$Qio5(|F4=uWL}-Br^8P|^tybO9LZL91_{zlsZ#eij_{tvi4f@T1Q@ zFAHCO0l;)nmUoWI-~GG4V}YhSyBZ!pddTzb@=w3|Okc2k$)uFvb!T zaL_TdTAg!k0M? zBs^q6Nl@?jbe98Z@8FsMS2XAhq?m<<%`*RH8LRniEQU|Y!+UoLCwP8PmR48zTL(bL zik5aOsIAPXvjn-O0mhU)clA8Fdlwy9%HqmSK<&Pvo>-oa1C+J2b9m=fUGTZ3PaD7r zKRSc5pHNp?ak~g-AE_^E-7=``v#?8>>S9NRI=maXC@n0C(yj#2ol>-eLEKV40Zc(& zf%v!!g1#Zz{x0R{p}*C#I?pL}`p3;pb}xC@CX2GPl|BJNtZ4`FqwMaSBjnBxRrJEwH z-rdpS*#JlIUWmG((nJ!fQNKix9CB31HFE` z6dhnY6;O2SNImBY%qx<)xd%JsUKj7HMvgzaBtbg*!N0HkdbW96v zQvh*{x^rPnS$2%G#Ue`=Jkqt-4vXX0$cQe$O6mwK5r{E%no&%|*+n9a2rgO7 z-9~;KD<~B1;=H9U!W;O~dPQV-WB;vdzgxzhF06=!*?+Xd6aC5W!($Hsww-$}j%`Ah zLN3VbW8Du?^3kY6nXt{eVC;DcomjtPWZ^`c7c>nUhhg&JCKFXhZTJ-`Bw+L86YjX>3fNb&z zTVe@l_by0c`7D>r75IZj!sUQ5!U=R&ppUpI3-K&3aV(jF9`KfHA845yJ{m?|`7Hn7 zQ@{)A#`2PuK!6>IGzPp)x?IaA=|`9ZtOH0^!lkr+q*q_<48j9~CqG^>Vs9nUTLJy# zgNURvGEF+7#-x&fq}51`xV6m0K{?dJYT$u1@{>3W_Ozm=@J$N1=YySIyi6x_xXTm`s@%fhZlFgBK`9>m_ldhjc7!tDw1j^B%(DP9o`9TEnXtTto`#d6sg(^YFp+ zdFg<(f^^Bnta%QB63)c3T2tp?P0Mn5OZ_A+@s&683h3(u%0&~Sr<`0#9|~|*29y{J5FV(6YI`?nCwxvQ%hSs%Qs`N1ciTNc}3>YS)qw2)ruGAZ@C)tEZ* zn-r8KX)-UKXEC$E1Ley0;3xD(ro1AYX+$rW5zmOP4)LwAj1iqo`bkG#L>3Kde)8X7 zr0*Xcy8VM~A@8V$@Z54Tcqcak%ja5Vd06|yMtaFbS+g9Z5h{>d%OpX@)7N_Ho^(=1 z;)ajV82X?iby@RBU*dDqf<6IW^?rDs2{yS7Ru@hiwn2*>HJh3M;Ko> z@`$@R($9N%=OR*-PZ>)f{;VtOyV+(9>x0Ob*Vd&19)h`*H>8<8`lLWEN)$1y!??NSR=&;U>nDk6D zfq5Y1iJho9d^ zdJUPZ`lRL=*;gJKbcD)eYnqimzN_I#``w3hBd_v{aHOHW3L%z*kGxa|?r}h$bvy(} zWh>Gx$r#o>Zdcaz$>li%b2>wppq94w;{HI8x{Zm06Hg^oN<5gTrIj%!ZA!~LCqU-p zp#JhqR~qIik7NOP^WF)73)<=-E!+G!5zxZ#5UWnDo?4mUIf?COV%mnGgVxey1^9=L zXeL5}+X7C4P|D0DlMMm6vx7@4KbP71dVuxS6$g_Z#=2^#X9A0+Ev#8rn9MMte*6f_ z_+v`PBncV>Z~K`nYke$VorHLpm2^v@{$_gw9??yLV}@}{td>#%%g_y4lWIw}jlbVh ztPhm~fg`Qj+L;hp_H6Y9B(wSKV3jG*E1;sayM93B?VFW##vWL^VY%q!-1N0>c6)R? zbA&4*3r0EV75sJ*syyi?N+8M_FrNH&av49++{-V0+s29?`n7g)5!eZw<2=J@sYt|3 z9IbnO=jumUphaL+z2P=u>ovO60TYczOFXy8XK~H8#vsZzVm--2Ewr-34Q6&hix&I3 z_0g)^gF^LDt?tqyzYiOzWoynRlzU)>Zug zCT%EA-6Bk5Sv-i3a(yQ2O2am2TNO~4niyrx6&?Vbna;I<5VGUB9cN7bweTGSxc5N8 za{woSTDN>(;u>P|;dB|rig9eFft3@Xm7{cSlbPkkAC_YZBW@j{^XNJ zBAf%IJjyS5ubg!D;R0u7ls1Rk8QMVyZAn)mLs%ORaP7{B$N+rya78Wruj!-Rt>WRU zeUv{7SEP9kFqbyL;@ZaUA@$V94wwmKkOc>5P_OID)^=ih(t6TwebOFb1+4XNM~8^LH}=T41D6V2WxwUPyqI5(zED5zQO_=b=*G!K zixcX^9WEY5EQl?LLP2S3EHXfcvSwYWd(;Vk#9fhxGM)gm#4?xiAD^z_76R88gc$+& zwavxi7dgI%b**ye{IDB9EIT9^v-M$d-ZwHHz;N;9Yrq)*CU|}h7R7~2Mz?ul+!k2~(!R0<6Kk|5%19Lra^MEov{r)-ET>OchXguuWrMt^Hy3j3cWp27 z(RS=uU<1&*6CJqZLOQx`hJ}+s7Qk)uj#-Xb484E&!CygNov z<~*?4#cTa_tE06#Kd0Y#`TBKvH9wEl`TeBj_`>m+ZGNAfS7XTMH0{wHe#^_t(L-N- z_Z^E9)kU3dOr|+oK_K`LVBfYHt9HgxE&%Bo#T{U=N+*mm<-({gPxe`)RgN78v|~km z%))~U&Mq!0OYYJe86QU%!Aq>jl`Vb1+a4VMXF-qlp#g(+KLs6H@oN>oiA;MyyE@sO z4CNFZiQEd}chZjA+8MvlM*G;c5Q8tC>o)0-1#D&R7~o!GR1X?OYp`@Z3ce%Xk7`+a1W&Ph%QH z!f91`*tmv?VVK`E1UmxKAs7u9;@*X)F-EfarW#~1_onNl(rf>S=z9TzOrp)-;kQ4$ zbMll)j}rsZ_7PW5%;1hIE7*~WEynl=Z-BaXd_Q)Q*CoEhjTD+mnvOz23YclD6Fg5& ztlYH;2GBTMVXzp0#Wj5dqv>;7lKBMGi7`kJKE)~z z%V22;AYpot=U+g2`RyO{I#KjRos`298?a@~bIL84T^L{rdQ*Y4VH&s~h;fKYXP}D? zCsj$aIs1?0x;!RAGAA(LL$ylx5jCV0pmoGysnvh2U?jJsnF+KRkZ&gD01W}bnCLjT zD_H0YR;Ek{MzgAGIVhXH;ra29)9}T-{e)*IG=+(DEJEHld198i=AC6u-77cd>7P93 zJ0Gr<35lTG4mK-r?-QPJuLVe6TNXN_gbA_DcutlG0%I#JOjHdjZRTyAQqBtWB&qsO zup_Lcj08!&C>Q1|9m*g9lBP1@Me`f}`iUI!!dH!!;6qa2jxbezjSC%Qk}&Wlz=C_rYJ=ce4Ie&`pY5u)LuJkK5I;mgyc4*TTbiW^df`!SAv zIMdz0k`Uzd>o7*n-`oSFY6C)VJT3Cp;05Hy!m1KhhxILt}2`Wvz>( z&t3FfgtC?=<@Y(0#9R~EG|-$r4bST$k2mIEPUKJDXFe6E)iSyowWZ@NJM17GLdTlO=U{CFkj0jFQrI1Abw&+PnjNp(vL_J!E8gO zm!CxAA2h;0#~;2o%_^t#qvVx5NZXoMXDF#of@?b_Ir7xSTAoV}S=M~LkiPm%o@G2E ze?ph9O^;j36EIBrC0Tu#w!&NAah<*;-^z}${Ft_K!YAzGK1xq&2$y%(Be{pyJ_~{H z)Ue#jzof@=^EPSds5Capn^*j^ZTo?yjGcI78I+UgtXi7-K6y66=XruweyV@WjNgDq zenU^KQ?AV;X;bddNazM{eC4P1%LbAb-a~lWc-oQAO|-!W3Tk`uX-!u?cw>-8S(Ol+;pq1V5n_F5U34nEWw9E2JfpJ zLb%NYZUoCheMAaYOI@tx0LQK{eh_GOqLxVl*S3jPc4i0)TXphC^`UMS7}df!RSUg> zD(y^Iw5HS|op}!*z#t~kT8N!7@qddYWw(>oE)R^uT9?VmHGU)o0Nf6&thaN}{Q6t` zUNY&`+OCJZ1l+X99tS*h0(p8ig8xts$Yi3s&BB0STAd8H01j;d9&k_`lO|nlTrvsO zLOs?%&^XK_eg%u7=y#Yci0=f`ZS+po2YQCe&go$$r&>)rSylA98B^-M;gSMhqw6lO zuwo(|`^-)NMki2f&|*^nh9zBmM*@oX0ANsi3J;SlCv-C2H29)z1t?=8#B4cruPcj7 z+aY&a;|tUY!V^67>B2-yA}3aYLOvH1QKkexbgdz1=cL{CZ>?!TZrib(le*_%BGv}a zC$Y4DwTsVf>bD0Ps?S!eXqkYgeHowfDTDU!wnb_2FiR)BgS|t5ZdiD-U0bVN-)ZqJ z-K`kri~$sX{-aOI!^!@#eYOB_0572t?L~d{mgU{k@<0C1zbya5Uw_7g56eC*62|~a z_W`R0=K)KQORYyovHI49g}{~;o~jBDQ|zZc2bsvaSSr}i-Ug6>{Qb+%KV!-P*nvw3 zUG!*Kucg-&GI*g?D6U@O5(ulu+mYS?btiyvw&fcv?sWe%3^=a!vTa3P9CSAe*@s(J;WhtfWz*x z@E}Eb>|uT9l&uXpYa^d4KsiAtt4@Atr8-GjJCrwoCFLc05Lrq%YC2#nfSdJ@p@@3Y zzFS$L6zcok)$MZk(+?1Q`d{Albe)$6Kl->_>~ELjm8Eit)nEV23=28Vo1l&KFE0TE z1W1tOHguMUi0W&)jq!l4C$wIjo*FHWCUBR6d|d({=pWeyDUUZ2B(VLA!*|F4P`D}c zfYrKp(Be~|(1jll4&1?FbOf+Z@LT`ATBvu?HU$*rj~0{ybLQ`&lm`$_ao#`=b#j1J z^Ex1m?ap}T7rHP&H?at!kL1A<{EJg24=eQh-rZUB6<{E7wcK{mO8@QxLh-!~|FrhJ zY4jah1Uep7#=isnJwbnYFzPh*a!B5r$g1~(f*w}sVwG*&MH2nFdnlp1C5{iz%Obvv zEw^bZC#T5#35$B|(7wF10w6e3CLZ5s@#eS;Oiq>Fg{3kLsPgo?@5>87H4g%Q1n<;= z&tJ_`P55KSGE_hKT7nMot#xi6K(Jsiy=7?!)b2y3w34iS7>hXo`PEX9c1^n#kQxP$ z69l_=?+!ZWysR#*l~u~CAM+O6!K|Xk?*cGQ-kmAy%d2GyK0bajSLUBTFZcN#yEB8u zB_KS!8^^_g=QgNgv~>OO$$J6OTxjV<&sc}LhmrS!*e(_-FP_=pt~uM+lSdEAcW>qc zZZ80c53%qygPs_}t=LCD{21%|Yl}`H4E&CBb zFv=s&;n+>L5mVFCu}0nnY;=)D3)>tH3MgjZ^}*x0vWITje7#o|04(27K4tgw4?kj& zV<7Hp1nbXTh@t;+5x0e1x@}-ex@^#G%r5$84}CE}yOA~zllCCbm$<08K#rzaG}i5j zd{rK`dA9_530-h?W2HN_ef6`A0wa5SL}N z5q>;SwY9anlFDJ+>Er8}6YAs-SRnfHyO-!?7A;4HSWu|O0WN5*u;}&-d9`0uwsmRj z!nl33a@Iy!9w4Jz@QqR>IV zoegy_TTkdW`1xS^EmH!IzRr(GYnXPJ zIXbfCqK5S>pX1M(K0saL_{_d_5*?sE_JH&=`!(L{hQwWWODoINJEL#HJBD<}-6ZE9 z@F3&-Q5Hd5C=knWA;t3Q%4>JGzubr4g2pGvvSX_qcydmgcOjCbGG4KqjxD9_47r6c zq}wqdhm#^Ny4KQ~{f2gW z#nNy63ktX3vBHJn(-RLsXIw%bG&B1kJHWKaR*t$lakqoMbKE7(F8(_+q6efs;3kb- zBjYS~R~J6?Nwgmql^omZ>WE;-0PaqxBl)H)mP=eVpPp<-7mna}e++kfs~hVn+YKQ3 z86dfON`AQbZoSx6+|{V{d=?-bF7f_?KV`&o1{_z5RqXH44j5awW5ET?93VjLc zGA>TJL*RskopTm|G?H*2;P{RUNY)CU@lGaK4pDBS zq&7lP026Bthwbch%K$|oDEP~!7(~@lT);|@2kQ<8&nTo@t>{3XMYm?g6x+zfQEdH9zalv}=ZBMV)ya@H-iDYa9zlUR(~#P`NlU)c!E*=1@>!lrp9wfZHXXwm3_8N!)R~hf?g=VC1T1`9C+=KS zg6g-=1zkhCWu)#BQQj(3$_nrKA3pJI#*sxEH1S-44xREez?$()n4r*0%;XihMVukx^&4$q*^e3C3l zX8{03nDSWHzDHN2(xQuam2m10;>l-8N&;qMIVm4ysdY~two_guZzgY$SJTTkdHAt? z@>4-rz9Vy1rZGvwazt<}pVxv#ek&u*avQ!$P$OOMjW0L{P0}pE6-ei~u8R1LZs5Yu zk#)cGktzcakHN;fa;DJcOFY@Ah)3<%Zjg?HJzLlMXPuUuD85Z4< zMH1<@(^S`)s60)|HEmwheE4rV{v=c5g@2O5U7r8|KmbWZK~(a{vXc)wQJ!&=P|{W} z_%^=vTq}kkEbej5(9$1B&(q}5h-V<^sPc)-2+GSp^>5^cvV=~)QBJebS|Pbn-lT8&$kU?n zfNLg-_TM~8UqBl*UM)*va&JwMe|T2QOX$RjTwgcpHaeJcCUFyiC+Q1$V*4|bhHi`> zZ&^z^DZ6pPOTONI%GEF#D+Bb%0iqr;4sfHfV$$h&5iO?o0a{<9A-f9W(0>Cd7~Lw;=s zJgYPt%)pYXtRPRx+xmpQW*XLS4aT!DGVCz<=tfNC@)|ycok^2x<2TZ&{g-+3oQS4R zWOL3hH_1b7ki>j!5ZhDYg`QE|RfZ(@icK{Y9HdBBoCe;COg;7=jp#9^^@^o<>D|5DIXLb6oiEqt-rO__EUXr)5?_%Xsyqj;A2e( zm=dcgz%1kS0B$Ic)o-j8=U71T&UM8%9N?;jS|>UuI|9g4aM4Nk0o&0@4Em`nG7(JB zM{LH4ku&i)k;0;!3=Kxk5!&h3_^>DVnRi+l-mS6qqckvN>dAVRt z0ZxVivhxGh-1ws+{O*fxo7gnk>P{o*koi&k#Z?l{83 z7(4s1&fZ-wonv=P#|R+75pJ8<{;#iT6-v&=QOEM5OdX?lHmJXGEQqsfV#%R>g>BcJW}Atwx-YKVSKjh!oYNO1J%=I9a;14OcA`sCo}af zz$uV-hjhjPF0}H!Vu9-8$9Dm~@YTw0ndevxyQ4shTHP3QQs%fzLN7eJI~9;j&>WJ> z%UAPd0{t+B6|j;n7}ADyx(DLvdc#8&1wCjU@PNf8%C?4#K7Rb5=(1%EOI(52Se#>! zW!bf^bQglInmUo4G15L|;Xyk3a4T_)1>-4hPj+YnBY^uuSQKh;ySK?fdxCD1SzRCy z?O~n|fAD_VnG&rXX9^lSk`oU}&>;&{fDel8pyX{NrWdloa+pPYgdvGgq2di$uy(8L% zZgLi1@0SG@IquJKARz#oR@`0g4#Hydn=ig8AAIr=ZGc4}_&-3q9RidS2wIq5Ligb= z2Y|a3OWZ@`b#``^={W$o@`Y@7aLB8R3mv$GvTwA{83F|DrA%Lb{R}YJ#ol9f2y6qK zzoopR<%7A$<&%#;ET8`9vw&&aSlhnjVB6n+@l_TCv`Rjrf28nborTR_biLN}h$MBZ zPSP)U-TmSD1UJ;>8MHf}_TcLSbd5WJJgidvdI2D8oe2yI46EZ^ob3?!0?c+-O9$;i zt6AR<0)~2UpqAK2xKw)e_ALi&&*yOJx0~w#Zdl@?9~`s@;N8FX0DsQjqIb}0o@sH3 zo>>Gi^bkr97Tn%t@r3+sCC*FN*kN_^;$^wVVXFe77EbW;43N#mhgSM=p9>lv0bc7; z#WL&iO8q$u_&bhVs8`i7%Do@kv5SSa5Bngk;05{XA%ymMx~;jSA0A12zM^gE8(oWK z0Z?r3Q;+VVaPe-PHsfxkndv*^%K^ji*)B)jGK>2%U5v2)ST+HY?f-44dmO@f$s&F~ z{j&>Ho9sMzpZ4&aI`(sS8Xwv4+wr}0eE#{5%jB2e;L}~JZrm*43*T|QvUDH4zP8B$ zjR5J6SMJW-rS3M%mtTEb-eXb9#rv;czAgLc-6izY`wt$J2cJxqPe1zztLwG$9cLT7 z#cFyJKz8=-eRi$^bVKmM!Xol~OkE9Rr^7eTUX+tR{t2t}T^95w%OVR+E-ZUSiE_TP zvmCi_K;}V}jtO)-ma#KqgcylIpJQPU)7-+6`Q^)(=ng<>WVZwP*uq-h{&$=)iHi?e z9EEp}9z3F+aB0ILnZU1yN$R%c(Zh$lo6REUDGQk`0PXtDw@>yv@`3!Se>_`7e%T?N z&^LGUZ9nfKi-rTXO&3On`iF3-Glqrt26|v8vY-{Xhv~XVW_cezm_zn2S$rGdY#6NX zkr(Ah7jw3q?DC?X5J;+<_QaS#W#kytg=zI_=U{dF<2cj)t4pEbql>S8T%_y+x_1#) zz2mL{{h$wF5kJhge!}ZvyPV76fQ>Tj;Oj+z9&<8lpt`o3a6NH=Y-jWE~9ar6iLcju$K zTwLs;QDkiAA*Qevy`;e?{}Mz2by($Kod@!W zGz2YDQxPccdF4di1Y%7p9p16BVL1ZS67>dQY-vSFS~k-pVt_Fvzz=lTxu=fh1F4#} ze7D>QOa1~)aods*ybjGZzIRz%$UFF8-3a!%SRwtSFKnk3dbRlG+B!>iaz^llZvhQH z;i;2e{n|o?v`LO~VCZ@c53JuN6X8phPoofy2A!o^2A}~1D7%vn>pI|4DpNX~2$5-c zPTB!veIWmpiTrYHV}nn!JPRVVv(6Vl^BfOQk%c_U}SN-OoM9Lf*==O6WKsMI45s;^>R8NkE|9~t6Z^rLN%GR2jV zX(T=9gPth9W<1}ah}18(Q9%yFrVcF!>4n#1WR%psX*Bo{z)?P#KAlbt1@A~NbwCnn zt5pa>zgdPa*1y>$mSwX%)+@YZf}{-BM$El*@!oHLq6-q)=T>TiK6sfn*=QHxcaxT6 zTuV`9$GUIE%uD{sXFleWdawWy9%z$5-@zwn^U`aJVO>^>Oz9zACvDcHdc!hBHspww z7(C;BQcz!-quCh7y0ARxy2MqlBKOLvfh?suBRWO8QZLYIwvhwc&ASf|3381zIZ5I*JQTlq6#`M_Q1v);+GwwWZyEA(>WSO|Zqe}0S;ntf-vXn)a3 zNtb{j+O&y7Q0vXKeB{wr1d5Ey5c@qQ0mh3?lJD|1Ig+nsw~X-5 zII-|(EI#I4()XQFh%5~uzS0~$1MO8Nd2M>!M_)&7l#QC7VMB=jtSj}A^pIZqEW!nJ zbug~zkd|@fulH4i%%AusDQ|0nRn~lLUPjqZ&4N-{)n z+?o8i(xr`)!KLNpa!y9EgalkZ-#RQK4D1E1I*@DKemJ4rJH7%S2l&I1Ro~=Vp2uQ| zx^^Ni*&H^ISc`cS^bAG8xK%b=SC zLDXR^S+$} AqQ!YWhHQVa1dCc}qVuj&FtmoQp~eg6!wg1BH=xD0We$l?KMwjm!* z2(=zVX#nJ48O3DMEzRmOfoLa4bIL?%4}f5C$+OhAY6+R^b&B%PEx=5qV_w=b3e z3^+uN@(7GL>9W6FTtBS-;Wyt^bEEjZrT(l6|Ewd+>wH4FbYX5e8IOrR^XCmP@O2E#f-?!#a>B!_eEI z2a#WV@!O`)X^*|Q3~+2E-#jqw1i-T~+(&#C208#U8B?}uDL!-ui}EErA|ktN=kCA- zfV7pdCyBvlt;w~RJ;s{OA&)?)(%J>zcd=&ef)=f%Pgq3Cc)A`V=>u4K>c^^g2S8Ch z(~5_T^5M&>yC`u#@_%q9UibFhV~=)vZiGv zN;-!~lAk_=J;+j*D;F0lk(oa9k#XIDvCTGreV%{v@k96pNY73e-9#MWX5r}hV!-cl ztPdfF@=-S|g2`tA)om>;pTDer{^QR8n5iH72CYD;zO;o{sv@J)SeFazcOv^^vor9X zg(LvN0W84V+5w1&>vsPmTog2hMzHqARR(g{1@JimSg6~C#g|Lf5EiPv0LFckW$x}m zHS~5p0A%q2jLs#mg*gDn@$u?6zxkRz$$WOwmZ!!SBEvm{xE2EB*?zZ#dma4g-@jjt zQ}<^8`KQ#GyLhY@+rM)xT^M*^pNk2)sDSutfQ7>L0{|NI!X$?xuP&|9o;b9QGPp>f zdkHO8*EqQG9S6aF{Miq}lUDrU+bs(!&)&o5J?W0%2bqkjr!N*+sCbD>h4CEvsNdu( z^qhUMAXS4!1rOJ>&3ZWMk3Rb-e9 zG@za`LKBMFg8}s&tmXEYu5*YNFc_M(7I%0hFsWN1>&7&daS!`#z++usIA74>|L?#0 z3im3yoDdL0*l3@&lV`79Qx{j&&;IBqxE#S!7OQ)$N-iGu;`{S=Sgbs%UI9wGaC`xHET8&W+_TTUcNZ5_)TeG| z^i@5?&XQpkrql)Q2Jo=ZQ*@{n{VsZI)vg;F&);x|z$f?S&;_^nuWwZU@Y`>&Ku7N& zC$=qR)!jv_)cG?Ox6YAC%cstm#G3e!GHlY;R<;MK4_Gw);9fa2b)B}dXMat3tVaQC z7u`)$E8=_i?pHJO3%E>SVT5O!0PkCztI*Zw*&~3o)V091V~qHYr(N3z3l4jLY*%L= zvV?mN+yl8wL*qzya_Ccg7qH*PA)PK9xWl1A|2M|FIToh20i9ps3g%Z|d{OG3cq6!F za;)VdPd|KcM}+*_!J67}lseaWgqGhOE|yY$$8#kbw9bWY#r;px5ff1YQc|MqdL@CR6! zo&|L&oe+As~5N(--aUJ94w{rcERNmi>}5bxv=~ zBC4{Xk(E~W>N)kM;CFUBSP_2UhjlD})P2qsoZoo3b==#bk6US@#gaTRjA-}DFF#&I z>ylY4AIg&qFyI(3Ra1n5*@CzQgee0=M&e~K0ud!We$@)3)f6tLB*}^}u?(k>nYbjF z;hmJq2rm&nlx-$$P6Qn_IH>TYfTqDX@CdUV2ESCMqhEm@!-YmCd}WdmHvvX8U%+MQ zS5O7yW6UeiK|F;=p^`>sC0v_7gFh|Lk+WO2#>J}I^bsHjUB=5K(0rNswrC-qYw|ZC zXb(8x>=Awh_%ux|vy(mp*-RLH$A#fURi)Jz@ZEuOe0Nt20;M|^-n|6jnmV~*P)VyI zw89}DOozH^<}Hnc6nK!908diKa{*0@D{TRMjH@L+jYSH4mn7vM&;IG8k|qbJ%?m*o z>M945YF+Mt0J^Mx9)^~rMA9}^CkiR6>1(O(FLfKbO)E5zp8PN^303NfkPaxljw}-1vLOKQ ziyxVz1+o02fk|I@uYxHIpbU~nd>zzL9>H?AQN6biHHz`W8ObX&k$U2$n3V5*_`Dzv z@v@*`+l2N%0PJB3c|sudQ?|4@p)pzW7rufxe3f@Z*B zfkH2oN`vK8=9O#YAiBZyiEBGEP^|m8EEAz)$YuQo%H-H}i=c zp}b2vOOP zC`ZlkkvY>PJwaiDSoKNDFX_lVBNs z9<~i`i{9eOMKtxGdHBXQA#dz2q``!Jo^n}k=uqT*>>`q3sq3PL^Om$|D?EpK6Uen~ zA^JKgm>&F82N}*mtGuDyy50z%`A(ty@tW_Vi_O?u>zXM1OWWX)?Ztco*m7B9pw=}2 z9_kXQO1c(K8BM-~^**UtMp`z(?9`+4dE>iqf;a(uLzG{p->eHR8IYN7XsdN&+6n1f zKhbH*k6XOusn4Mybyeq|g*pj9Zp|xl15F+jC><_#YH@8^>3`K}whiTq=TM$MuGvoH zL+Vamn1&x&CVeR{;ce$eG96xI0XgM1dtMY45rF%oZe+t6k8jgW`ab3v_l2&KW%QJc z3EjL;+Lp;w__D1-yVgJQKk1nnpZEQ}IZLCo@{>|>PwBX}j9!yY(j-7+#qeJHKasid zp#JO9k%34cLVS*K1P|G33{8?1%Qkb zUHRff;e-kM2q5?f)`N#kU=<0^?fgBU?=&tQHr6&{nSOUx>s@3}Km)*a6wt4Y_Wq9T z>MKjT0RtB1=GX?mlk&f1;;a$LI6#i{O%F`Pb%FZ$!NMF9{R4*6_%A0fEmZf9g-!7D zzQF_u0IQYqcz~i-yKRg?%u`?AJpd1PrkL>V>vI(AJr*JagjNB>I+=WBY(Y9VOkkZ5 z?jd{GzD`&tbb@y0fG%1r%1_f!&f4WWbrp9w5Y>);jmrSGGy74-ZtUZgIr#6G2OpJr z(u-Jp1*e%@kI|MMT-um1F>Kdk=CfBkRO5unHOS1Z-s$-zu2$B|2|DzlYe!G&(+ z#XiBQ<0Qa9M>j4qsC2>d89*QXcHc}61!OtJrO-9)_U#5%u+}3yvEFnOF@~JA0Y>P; zVjLjy5O79UH!iHu6aswc$F4LZeKwx85yB=Nz#(Q05=% z4I;}i`pRqC(k11!44H6Kq54SA0tyYWF+P^M$U+CQdI|{D3vb+UVVp|x^@+L#NSAL8ehK1g8h24;CE-%T$- z8RbfYKsMqIk9ldUK)Gp=zM4WARbB_FD`jzWkHsgfG_^uLW5-H62YKGWYeNaX_5&We za54ywE^y+!zO|4137WW|!7dW4WXobvq)ah`23d%!3hGI1thMeQmYiBn?f_&i_wQEI zSY>Y>Vi8Fl*uDc=0XY8{*D_l13p&^y9OHSg;Nq*-?D!a9QG+^*^&V$D;QqozHvyM- zSaNGk-qEg{mfZpJS3pY8(}RP1dB2K9w3h3KSYqoY>6m&R#`TVA2#CCR^EPf~KAfLL zr?6NF9UfE|gCF>MfgX3^QA_M?^xZY>a~L-QE{46vCB?IEA6Gy8(P!1(yCrxxRLx>3 z{A_V0?eBvJ3)RZXciYmnQ!377ocer>-;P9 zlzq-Qb=FNgQNQnC39U;IWmnMGgHs)%bkhGsmPtdac0tllaig-avPzv7f5tuSy|F-7 zXDSc63)!T;Uedk};i~}PFTVJ)`r^xPa3eBb%}q~ZHG8M}=}&)L{Ua8=`V=P`E-r?u zd0ch4v&cm#ebJxc+qjD{g9}OSK(O8Ta9t%%SWPfWEpxaIwh6Q5QMw z96Ih_+up2B z0KnB@_wU@HpQQw(CFti^NH9LQ3w5TO9~WoW*r5=f(l@DFtxJ8JcW!NR2=RP1!vezj z^$d5kB|NAd~RXwDS zy^nmVf9(%En79-0(FG`dh3~P5pqr2b0OOolfIR7!Ucgdbnx2P+x-jAS9iB&ErMW0y z7XLlG^dzA968&p8X?dWrw5bdFX;;JOkuSdZx;i31Eu#C$JkpW{&yDcR?{;xWPi(9083cz@3RoFmxXl? zv|Ph-x`8as;PNMWmNqDus|?vjJc#w-qmN>lZ(WW8d^=#*9gMQ*0*7?lHxJS#?vdZ4 z2M?qBTnzd3Z+=<*_V<(nJ?A)3;CGS*u30SbJ*?OE8e;?r9!+sj@)54g)E~C(^^G;!Ru_vTdo0?#LH?o9{swwt45wNru5~Wpoj>?` zd`y4NzlV=LMP9b6Zy$f1agwe~uE^JcWCNewF4lDt-ZAeMI|Dq#*S5A#d>2x5zvoy+ zBNBDrG~=OWUpO3En8rj%zeju@2PtBO@$~b7sit@g`h?Ym_~UbiBL7 zjf?Xb`RBO+4e~lcho0Hzskkh=Qy0?AxkAV@wOdat5HMa7ju8WeDMLty@o+?**^u~T z!eu68m~bSPR3Y3I@busiMG1Ib;uz0-G7xZ*$eGYlJyeCV5J<1`#Gi5)o>>HFh?PB;$>00X0WHMI!cF9Y*O4usb8Y@1 z-F(fzDOaVw&7rAa@8Nrcd7=N4XtM^+qpL4R73U<`R=VM%@&)^I#b*47~n&xxcR zG1|1G8F`V<1eGR#ErYy`TuG-73BgW84{TBcj%H=E3WE_4=4ci&WFTzm&Kq6+yTklI*pc9an;tHOp>V&}SNXL3$z&q#wZ4vhvQho%qsjx}_}^evzL1AejPUDk}yvY)P-s z?L+HSdU;MCXFbOEzG?DWx|H=Sk|M8Z>+s1#>=+e7gMs)kkg?lhv;i&T<6w*5EU1~5 zv`UjjmfuaQ}=Pv2Nc{?~FUl zGZdL7VSH`Kj6uA%t_{$vGxPC2V-opP{x$2c6puvB->eDh@WzWGZ{C#W<$}bJDbI39 zfq57iH9~?Wv-c6+%T3D&jz(I5294Zcx%FSs|%WL~RBCszU+ z`t;T6QaSOxDp`4B=uflN3ew4jK&oxe37j&{XoCJ-0JsHA(e3~Jos@%iuDs4*4ZMAX zH8$y<0fgwY*TWvQ6xV88Am1H&f+cS4_kgBLfQSYY8xL$cVbc4}lb0#eD0vDx=(GGS z;N~8avwJgGm(#E5dPYFag8+5A;bg@L_8~xlE>(!iUmJcr*P#*1@A=70^0xu@^aH7m z)=KCSF!uPwEzre>r4w2~6Tv4fj|9ydOsp<1ned|92CyJE-7Dg4;!j=UnbbZ!*Yew~ z!;X-&{2u8Xh~<~ob@DKa4b-`os!D+O_N^{HsG}^8_2**65Os3|Z}sb(iFQD9bu#}I z74-`lyLjrP)XA}`#fNPHK3w#`DhYm0OkfH5WC@zFh$5^m1PbNy zC%Ly+ya}W?sK5IF!S?s>mR8t3a9jP=Pd~<`3Rv( zwQzS5sEEsxSfv3-_l(b=@;uCx9Rq;T7syKo6XYT4+H|x4ZHM1dDTpqZB5i#Dh$mR| z$wLof)G|>Je|<+kuzyt8fK;)DZL4M;-LKBE#_JiR9<2rX?{4ZezRKZ? zmTreTn^?+CVF8Rg4?qBYEKd#shBE=b1O%wMu=s|@y?`~x9lMbot@YcFnfBXPXdy?& z0vQ6u*DNBOvnb?YXq_y&^x;;>vg>leL&`M@=<32>?HXxOe_Mc_o%Fene)KWfFt(r_ z^}-uLR%Jvh?w%obf^gnPE93WWc4?flO<(XmeFo|E1Kyml17m1Rf9YD1TUy$1FK#!; z8>v&#M4qmhxTha5eE<{%OqsK->ayvKwssCU=@_XOUU@j@HX!3ZydUWvr9QCSR+izn z`qGcOLV6ZHoUg{whfkir%_5C*V&4eF!>m_JJ*~+}J93R~K~B0*L`PV0N}M}F1mZn- zQN1U4VEt!t8+p*(h*rGH+XU_iv`jw($WUJPum+z3+}g$E$b*L;Fr^cm#gf#W1Gt2; zPa6a19c0l&FrPL@0#J$CI8E%m0(>H6OISYoA(e3s37iQyH z$|1=F;I{|Hxx2*0Gs`C6donXOe{R;9PVPWs0hV4+7Oa3*mkW{vh0WjMD zE8|*=KErWdC$t^tf&lr~g^1JV4&*Ve5|DktMHeF+JGt{>fCZf+7Oh+?=xEa=hj|0~ z0Ft(%D+D4PLnuQ6TYH3>7%DANmeo)!%;qvig%h`mFl!gS*wk5AO${)k<57XDvVl_d1|k zo$eu{TDA(LUUD9s`cTVn`%Uw3aZtbNT5X?m=8b)ee$3rfbajkuE0LsC#$*NfqS{i2 zx_Pkfw0`z1iRJY*7AcniY}x5V{Uk1RqAYml!k>$k4eD})MGP0L9^JdoLalz0w=tD+ zJWJZpWLSOLyMX2akLsfv#&XBWllH>m#3^+?Na82d&!PMS1l8BNI(G~Ue!)V|;?P}H z4;I9mEV?-6*N3uwg6@uVtLC`_%A_<}r}5KInSEwk+@-BOUtX^M>?c3Mb@y9zMypck%U&QG_F6fpEu4jxnt^DzCVMW3j_=tNny^=isM}I8r!9Y2*c=%Q{o-6 zqhMnbcZDw2xG(^J2jTtf+)Q-yG5wmd*>a{f;rtY24-(*AgZg%7p9h?lF-!+zmKGL_ zq$fo}CjC`+6r+N2?O6PP1%nQF+D)G75?#f(NNL-b_E@{q_LP4YIH-hx>>keRf`*GC zO&5epJ4O+X?8=8UAR^y%NEF$3Bq~`}t}_Q4Zi3}IucSNg@&@KPh)qZ;q->QY1kWi= zI!0@{fC1by>Gdv_uT&ft{5gPdl0`a^A5vC2Opm9b(Ilk_dP8#Jm>$iqy+MsTX)B*w zIue##&84QtL~TIQ+6*TtJtuPWU7z`L!%eGkFR z1drz`KSqrN1uQWiC#+^EJvU6kA{{7^l$kutOP}nC$310~_Av6sm0)Xx$8rPZXT3>c ze7fg3O(Hx%q6Kjfu3nY3-J5ku1n=qpW9 zz%P9DO5QcE!%v>29#T)pi9iwwTNJB8zImTK5Tat$%Po&A3sVwgyq4|ynYo7+^H%Or zH2FlS@Qml-jk1t(*Rty?GPMIQ3klYTfH$E9b0oue@=+dHCI*!^OhyU`N*(V5{HQK? z&x-(E#FM5J44TXKe`L!-TKA@DT=LSQGxDF{v;|-J&N?KAxZr^9)Qx38QHLKY%rK77 z{7QEY3AEgHApVP-Njh<)kU>xQYx+F#^BzQ(=2QY@FzwJs9Lcm!xp&YB0U^&6iaP__ zAUsunkZ+wVX_b0v(#i<5&}({h>`6EIP+%s81t27C`38TazobQ5Ca^M?_S?*$)^W_7 z>TqEkv|Glyj%|O04>+PGuj^D=ElWwydZa9QCymIwb(V*6!Sa!cGH+Y8obpGe(lWE)j z!}`L95E%aWo_1tejAuH2;8|RK@I3vM&(tN!li<#n!XxR1)^_>}WaHP3^=ACsk{;z~(&?jACWB3Nod8cN-b5L+MFxzhZdSM0 zZ>5gS#MI>he3iFmqYiThZ+TKTMwX$<0(7SiER$tWhJ8mE<<>rk>ud8eZSDh>JH9Y) z%S^Dyfg#{m+9rG|d6}*;%Xh-(p4Z`{(l5oNM=0yVH-=9ccpll7w#YiKkj<2fuke*g zf#O=mSw2H90cR{nbcEqjc5|?7)+OncN=iACqv1k}*;|v=V@? zUuh|38}DVH?_{K(2~Q8;pc6DfiEXSN1ubHwOh^H$aV>NifNMddJA-4A)hgO{CSW0` ztecN|E=R%S7e1qz3~24CkKfyC+CJrRva7G@v}feN38^68g|dxa^H9SEAk_udzZ+ZY z_}T5o^~)s|p!mz|xxmNjK;$4}0LLEahv(lt4e&Pv9UtS9_p5K81(>@y)y9a@0vxCp z1V$9SE+=2K2`7`?ER5;zy1(x(+nVvo%4A3I+X?@aAPy7Ey|Y8CK@Vc_c!-sh?i$=~ zF7V}{bxu?V0TTst%tQa=a=_fTu#}ZYib6mYbi9`MPNW2WwBUAOqFxh?V3Vf74C#V7vvCcw9oE%!`S~3Ytjbfqt=bwLqukYVfThy=Z!TPuV zar~gz3x@SL!Q!10T>%DP2nNLh37Q1{*C@w07W-}yRtKqf;un>9hm$pZfo76UdlHy- zoT!goc`c|YsNvyyi-6-ErZ~++|1bV%zWV5cnRK^WSqtiGEvjo2)Cgb>6KGrxj4&aU z#uEU~BjS4ar-0Qu09Gf!iifZb;Wj|O^@4f=$YTKJ`b!pc83hElZ1Z#Dq{k!^**pde zIs~{K#`;^JLNK9=d~G9w680-^@y-AFXA1!a#uw(Q13>3%{HpfQ9(1vCvbl*B?=TkB zy858DsW(KBi7ZxmOl%JV!~sH4$47uLkAD24YW>;sYIpI1lj>Oe4^4lio6Q&3s}G^7Qv8Po(c9V$I8F-I2>`o<*O&pNP8r*Z~GCT65K1a0d~L#59^#@ zf$)&LXFs@Ky?cgkgOB}yy8$}8SO}sGjL*zs(F{mP2I)^Jd#{U!09RVL%YtzZU|jg{ zgX*i_KSiH#pe751L+GjTnUO5|ncfs2`rzDbwY$NY4x~FVJ4>4ZECp1v4aS`kX?u8~ z?P8mH)D6fjvgJHM1=K}8(-n$_59(g$72Wa%J)pHNhz{)qT?LI=nD4UK zROBBC%X5U;{?9%X8r^9l059k`hC7WF+TGT(mpOp?A%}E!0to*7FMf#&7~E2zS4UYq zaM4guUlFw}D`S2Ag2d`N4(KF~b?!o*dh-DN@Z~q(Rg3SIs~OzG=&OB*cHEEEuKG`% z5i4sJw{-2I+@(Jxj4sWN(cxA$RA={#zJk@0THM;_c>uO?_Ad5QUoQT*AgfZB#x?k_ z6}Ek$3kh0GZvoCdUwT)qV^OaBlrfY*yyLK{`sCq9&`ukLpB_9tJu#Sm+ua1hGEKXoMOPPZSZpHgb9i*j7^H#4_?J&# z(6(=AH#60vhmSaD5ntcv`P1jG;)>+y^Jj7UGdDX=zcqqxx~^Wneup(Y;}_b5Zn~`J z*=gD@X~yR|GJ{a0U(_XxJ1eXo`z#ly8}QiOXua$b&>hn*p#KrN)G?Zi&rhFykDRhI z2R>_Qtd;X6eZd)R&)o1A<+T;~&Rh-IQ5WhWNNetWz*s~#?bLo-tMSoMcIWYT ziazNB;8wSK28@d#>Vb2{l>O*v+p~wAXSW;aItFkN&2p;)98W15-N>B1s(`LzzisGJ z*V{TBJGf(FWo0FG9Cpx`a>zQnkyv~f1VBGI!H+mV_~FrBb@~k5k8BN7 zhok6h$4knt3s>s^=Fb;j(xwME>w*Kjp?{yaXSB5}5Ksr3oE>2Ljw__c*}3UK2v@X? zEC8_>B0ronQ7In!YRr>F@t)1f~ z#6@)Z?V>-Ttd2sFFtLvHqNp7OLmYTP_0WK`?N_~&e?0cR|7n#tN;yyG4*{WMx>Pj!j?=}$luA7VTesUegz~!_%IWp zxpmONq?^`|j^9Z!oJg-5OEk;ogdD&+X>;eNfYvkyC*yWDPHtImzrAvM19@5t{10yu zpGz&%rL``jG&C&{vp|qZJo&W)O4C5t%w_?|*MgXbK$`dbQI_x$i}IA$vPmNW0R1SC6WT<==roG8tZB;)(n|tK ziAyIgJ~f`@;g?S=m8s86q4~Xqr*`JnjdGevWa#0`1_I5d8@has&=+z`xdKxA#B8iz zKSuQtWtC#{w+sPZpeTbY^W&AgN)%<0LYqdYt`BIuLGCPv;wqU180Fe%G|p_%w|wRu znI<6eXK*XNfYc>y?tP=eb{l&DXVQ0ov_wUlyL);fHtH|gh5=qHXclKZ;#S~g`!`6gaKWz%wD2il^4 zBq9p~#7X+5XS<37kS4##O>_d+(!%JG_&lThmfBCY^7~BLk4!nZ<62!J3KhcmNnj%+ z5?{)NLYqcX%Fes!Cc{9xQS}z8y#3R%W&qKY3-cD>ERB}1M3VoBOd8}9HW4m$PSSG8 z;`^n%=2{8@CKZ~K2QQ)*Ow#wJVWYO3=Hz?wafSN5UN$;!4clt_Etim^%oxolc}j}r zT{xLIJ~vH6@H}lLgJtWJ|H!uWQ)ZgbE}yB35<#BVdaRbc(iRKN=2Np8@|6Yh3Oy1? zezXnScyhOAjvPo8bTgock8^I*7x9#K< z{>jJZ%?jZ!fOUIX-CL&=}MFpl3);My@wZ$MC7k` z%8@kjXSlG`JY`YZA?X4#6@Z(dzRQ<&3q7V|B%eD5VUkc5{jDGVOS_l$cU*d7yQweh zx|9x{NfICFG;;Kde75dWn8MA_S<@@gp(5u+I!Jgger8{F9}*(9uutT z3SWuJoA5>cc%21J!x7j;Bh{?zDg7kZnp|`5S9o9Fns#U}LRj8eCL)*mFm>YiEP;6y z+WrYYnlvT@=_!9nG6b0k^62;vUW6|7i8Pq3Jd)N>$CnXpyAbLy@QMk7_2l+_K|v?{ zPQu2pl+xd=lUYHVAtnopYdbjvPH?J+vC-7nXtot+OhSHwqPku=1>hC{*RQjn`5{1z zEmJxyuYN|A46R|d*;3ovj=yqvpwH25CV5WwhXH@K-9As_egi;p=gt%dW}Yy~*kfYJ zVe*5Of5~r+olSS$fD8W7~-q z+#X~!I@DJ^`0V5AQ+x&QW6Awy@olxSuf-GAM9@2cC7$4dpp6U49w_I*XPbbO^3{*P zg%d`BCjn&_8PpXXfaiooVBplrJ(E?(0C_-$zcvEfGRLjZ0ti~G9=u&r4%b(BtL=O>5& z06+jqL_t(p$Wv|zO8HLtQz-Sxt46i$$wMv18UQ)tSXaH9UpqwB!{#D*4du_#h?AK`p0L>)%S0fGcgwo+u*=G$D#dh<3@J-0Ma~E z?Qman}-ToV?`&EKHkFhY7w~ z2l{`_g3-edac|O69pTEQ69%^<%OpWxrE4AJ9eM|K z|KV5F-|tYfEjx@{{e2s)M#7&BfS&`U1=;R_MsOd_!OM(Y}Y^hkCFN zaQR@1gZKcQ)PD^u^$w9?foMTg`KW6R0Xi*0mymk_=n;UoJ^0;4TGFat;!!}y6J$>i zNg1Sw(GLQ@mk!AIjin&6sH+?S4%1eL3{q#hztGA~OJw!lU3Qi{1I!v{fljd5Hs+ZE z)A*6@M&ImGUp;_#SIij&G_9LscNG9!3D9|X0;;2XJ-kz^XWd+Q$fSB@a(=$*M1Sn! zqgr`Y{ynI50KVwEeC6#Dzbpd6`=9^we^&kKR~!lmXe@v@N}BS_-7>SJdj*I#$$_@& zm^KX9POvIpMJKNFCkU!Vqn5cehPZFB|BU-A;tF)>T0sBW6c=4l=3M7j2&B4!_N`tN zpcTx`A{=t;A+INZn6WBVhS7BPkUe+2ICl1I6Z2%#ap8|)1kC{6#mQ;7U5SBgy{<%|xB^3Ha4i_YRSNACkgzWG^ zCQ=S)rdZ+qAoZ;kso}4WQ5~kG?9viG>DkR-J8~C*i#}^ClI>vqe?a?O0O++nDUa&^LE5KhNgQ(U ztP4fTzxvhxx|0&P`}XM*T;9AZV^eg13l%ih#1||y&NhoYr}UZUlvg@j$Y3v&wU=F7 zRSSo5^HKLm>i}m2tZr@6&aMEtkyYHbbuwn~(B2c$bT>#FV4;TrJAY88%PYjS+Teil z?~o(0DEoNl@mk_~kYo4C4j}Dl7Av+GuXy;dfc?^S4x6TEU<>$i> zAEy3}08?H3*$0GnannP1CrPuoODw(QA#!b-9zo_$PT9%S#g3VUyIEk}X6KQ8ra-uh z2A# zkJ}ZL$ASlC!jqh3f?Uqee~|a;MHl!XnszW`mqWVj1`;4g_bb2R6OIdAU~`}Gl%-9~~r(>ip76dz4IPr{< z&5afK%6N+L($Q(zU84-B_vNOpSgzrP=W)nm+q6E?J#)r3??UK4{hf;s{aDO9H{IFA z@Pl{P96I`D@i9EwhAyy8$19Y_1(cm_#uFU!Db`~PkeiCYG=B@8bTh;J?(KG3`r5Fcl#=r8eUgb6FB^b^Hz(^5BapI*6n1I|D3d3+#{GGp&dO59vjzk z2_7@}Q*dr|wW4*p2TxLt^4v>&pkZO-y%YTc$eR{v@?2$)#89yexM6reC+?G<743UK zZPBBmig*RhJj}0_!U2b*FKJj_g~Y*`Jc7Rh;*`&A_tr}2FBMhp;GHEzn#wOqFrcgT z<{x3hGxu!UpWIOvl>^$tgiY!FZ_^Sv3MusSwg_JGC1CjG zFPZaad_Ez_I1fJ ztao=qc({{1MFtICz$WXI8~CjC;0!k+Pau6v8{-SqCLvVdRlHnohFwmwT=a zAGxXtFR3=ukypqLyJGU3ICkWwt(;q4%jRG;vgAFwN?Hj>ekEV?vMk=4qbQUBnrWm? z^0?4Mzs)B#8QN^;+4U2BX}ao3gK*7XWG8Y^%ai(_d`v<5N}WSL>8L{zK0K5j6H*>6 zi)q%siEX_RM*jLX{ad&|xNtfA4jsOyPQp-WGB3+v-3$CWER$ZsC4a6JyYkGizJsUD zI`LNink;c_U*?}OrF^98y?GMXGI~g=lQI_}^GChd#BE=)M?K`NG??mp@KoKwi;}l| zD^3}X{!G$pO#0`ISJ@+&BVothX;CHY-pv^&xD1;=&-ue-H1Lkos7Nw{y9Yf zoJQ6KxJZ!$$pG^O6DXgSY6DnsA7G8-u^7tte;yxxjhk#RzGYk(E|Bi>G5y*+vHQ^4hvfpN+m5CBm zO=E313D|l92=@5NV(O$99%>P;e4bF=b3lko_^igTJk7Rz%7kzodLG@I4Jf^YFLTQ$ zNG(v&LHTv}vQsZ6=x#$G-bD$uLbjKa?$HtBahM{=EWmrl1k?lLrZ_Ni5ZE$H3!Q^=qOW9{{IgE35l*u?2Pgpr?c|Rlos?JZF zpgaH_0zlutcvs!Uck=w)MD>erUoZ(rc9>iBH%4%s!`6Q$wz^H+!XL2| zHWz$!CEyT6famVQ3|7(DX4kQm z8@>zhu`w|o*%nNWE+t=e_$@s4Flgmo-njMs0IN*n9AoWxf%)M9_0nL0rvrBlJ!}cL zQ#(bzdd!1nENE<<_W(|+8_xk01>cUa5Je0!>vOz#?#=~Vy0ihLoB=ouO=BfXn;U?C zn;h(TMH(Im)&r0vKpJL9KfrG<{IT61aA4UO0Fi*WvhDn!6aG5h9GGVD40%xBTrr{V z9UaT=52su0eFz7{UhD(h0!l@`DK9~J&fJag?cIP%fZ-m}=>eR&VbER3;qnscV7bj= zR4-uyuG(hMJpr{zS1|bi3lkemPDE;Qm6Mpp!;E#Il599l|TK{(rOj2@ug z-5!F?>TeGK*2lUQ(OP_O0-(8LX23AW#{BeT#@}l!rktFX0}b6)zt4i*HkRuGy(0s( zODdlO(BLls32i$-GQ4kr=Yr14+`{ZEpxrhAJU|LQeK&RhETBVB!`&lqu-5L;{@2elP!^ANKY^w&x#=+XyrGWfhT<&G3 z%bWF`>bu3|>fQ(U0h3OugT=Kh5>laAq&S0ib(uhn*2Ppq+PVuIS|khJxe%yju=?8i z)m_L2`g0FgRkOH)7#^nH5oQ-Kc93uD=^Q!PW|84SuoV3`KQ|j|;+5si?B>yuQm}o3 zwl_n0pT2%wJzrgorEeQL>+A2I(I0hJcW`MW5T~{02n$aX6S~?f96Qja0Q!(U4L6j_ zx2r1vUAi%1Oo8mGM+I;77k`eNzJ0fXi;Gd@aU?oeR~TCBZsL~dHTAcM8;;ZS8_t?= zrws>k0$AzRXBaDV4}DcXyMQRKbW!9YwPn`-dh`i-3Y@#(VBabbc7k3yV$sVn@ywVD zE=3R7JniX6hsV>Wcp$R%tgd?Y8kwNHjy2YizjO5D>G?4_5-ef^okBehK4^|iI?@4omIpdD7~$cgETC;jll z2P{_Zz`x_r(NCK>XTeY)Pu=f9ky_fySLInZBOBDO3waL&{;qJ?ocrwck1e1Ls!4^{C}97Hx*2DY{iZhh)o zUYV6PcRCFrd+taO*cZeVWOt6>q1#?7^Xh+5(HXOlI`$-SeRG-;) z*2!ZW!bghSk zI!0VxdJ9hgqR-dUS373&91>lcNQ?cvix>Qam!pg=Uc7ik-QxlXovB>EU3!7v`@<~I zz>*B%HJs3OmLH13wc5wo3i^ezTL$UZ?U`dR$4eA}z6DIxkledN(p?Afyx}~ILi?-} z{>XgCw~j5O#a*Eut~2GP#rZ31EKjEg2q9r2XoHUW7 zpn`eBoPal^=k$&Myz^I@`6N>kP9&bd5RMElfMDYC4Ei!TG6fmO!9hh>%#wVp7{c7# z;@8T1K@EmeR(7SYF|9~&TM|&Xg~7G7akxrA72b!;czbpfT}%Zu&%k9(W|rEd~H}bp>rM9phtM@e!OB3^HY2GBRhdg>`#C4e~QL z8%eYwiNqxf9dXiZ(jw2A;ZTfP-j_)|!biCryh?lckuUj3%G6sXl*Y$$8tEVv%OpR& zE@guN?xEE`1_1d@7|KO?<+T7%Ye08tgEz*n(+CIzf0Cc|FKRqhZN*{+AMUNUy*}cabumCt}p6mrlB7q%n*Mq^+Aqvn>inDOa>Xhf>OBQ-^tNex^bBWKBNuRtCZw`ER)+ zdikpPl6q0Wn=!O0=iwbc#7a=;OPi8fvQhL?|H@<9r=_rNcpe^_JF%tL@TLPl<*Rpw zE^S<18i!v=C*kru4=JnRsTb4mpxwmfhflzyk`hI;?nz%hT7DxKHhkt%KFA|vK4GX2 z>CeKDG2|&x1diz82y6SQKarKL9Tv4lm>MJ}j` zv^O)CMv$#&6mC5wkl2*#j!OS=M zQ0JNbRbrZ!GN7(d4wL~tEEY#3D|llf8k-0lyP0dL}(kkb5WDwk<&=CzOxo zrmzad^N?vh*Rht9&Qj&lH^0XH~KsgoI za7(*Dk#01!k}|#kO1}UbVFsCGofAeduZ;`P*NAgGJv-z-~c%cM}{@4@Yc~zX2c^ zoSdnSZ`J@baovDS$OGN1>~90KO^jC0-mRs~T0S~~m(KXZhwwqHMH|S2?YxV$F0uNy z?gM&Y!F+vxI=ry`USgru0eIVmmD-ioo$6(H>PMdHhq?=k>}$3ZUB2l1&b@P2>`0|6EN4+|h%0CZ?HA<*HWlr99o z2Ez^%wO5&05+ihX^On)Kx4Z(S~kY_jBffJn>wtR=}qgwxP!@03{F< zY^SR@#xHX_2Z8Pam|ifCYHPtA4t%qo1^aqA+}2$vU6i98&_cl3T@p8Mma16*UoAS% zs6YL~D|ou77y*#nXZM4^%OS2m1R|^z4?@%RiSj8(sO;zb1k!VENx_JN_N5QAe&X^1 zopS+@rQXn*{O#%%`p5an5p8M|aCVHk<(z`$wQ3rAcA;m2_NE*O;Q!;7zX$l+t$zAP zpT_cAcQf;oW6@)W)ZLPxDmn!r&2E_n2cr(d=i3v$C~kqK?@UKuefitR z@CUFT`Lum$g?x!_97aZtT+D!fy4V;743nQz(BlD{E)@Lg`^D@8(#68ZEM9fep2}GP z1-NOjsG}(@^nXY7)*3;_GS`1VQldXR$>anI7mLa&D= zcT?Yjt}ZSrkLvF|fX+AQb^Bfq*}Oz9?On8XKh=*g`Oe}G`wa^+hbkD0K>*f^%c=h{IU{Esx0u2i9109{}7n&?)E;K|kbMS2@=JnD#pj zTw~p!X9Y(ma3A&IqXiB+#6p?ARV!Gn^;g$7u@YzJ6ml~&&SD6Q#DdVa*&}2K)>i8P z*d91L1`vKo+dJ7T2fZpt0*RXduzLb(Q?u3GiJ8=;{ja+pHeJl2-FN`)$=M>-@MFR!|STW-4>nb@Y_xR^d1(;0fjwB;RGkt z%N*qGp`M+M;q2l%qp}A^CIFOKL_lAh(KdIed&fiSlU{Vxn2Q#aSrGq&5AfwpzrV)f z<2H+KIS(bWi2kO%d6=WR!bQHN<(2TjwkuvH=;?xr2c7EXLre4?##QdpaZ%vtXb0J) zC86h!an&X0?ifm!x5{S&kXzk6$s(Eei|EX)?Y&rO^O!}n;-`B6Io2ZjDWJJK`UWU# zhlU91NYb}%?2mLO=c23Q*$&bcoNZdQchS~P&`I{U4P2HeH}c0mP-}IK9~}3c&=;OC z{zL%ir#SFa9Y)=p5@rP#FuH?rVXcb=3SF+bfaTao{Uk_z!9veDi(@VZ=o;xNdJ%m? z|K*sTVxiY`*}?)7d^G*>(Q@9wA@$^_$G&9*kb8_>YVJbJxbox<3q0&>VooPdji)8L z0QvdZ5&6@X!W-*!@$H-HtKWTzSm|Prg?f17LaBYBmfP-h@z-{yj&Upz90cM}wc(Lt z5BqO-!duSU8~PxyC7<7-r#whr`7rJ^V~2cpGSNvKnQ)bBJ9fD%_?4&R z0rM?jz&D7@B$Y?S4T$RAlJXv=IQS6|PM%5IRLq0t0ghC)Nv=+v8x)2QGcht8X=TF4 zbFxibfAe0eL=_A|2q`cMYnp%;GRm+dMFqlJ?xllI0CFGj--#0CYXL0RqFeVX)h(bn zl_Lk@k_Nz276cSdLc}q$4+-l4$?~+cd9pT-D4#sdWHl2p^XHGsmfIze0}TdkIb4%R zi3@#}Lx389K0F{!)xrQv>4E-Ie)6?!Gz1a}odwL7POZVE4eAl1u#$3IU5d|v~f+ZgL75boCmoo@${ARoG z;9x5}k+(H1E{Q^`h0`U3%0n9bb&}_Ry40oRcR*+P$u|v5fS+_sE6GW>^5T1F468^t zb#9|7!6MV7@xCnb)23m5UR#zraPGM-KrhctG<=XQWm4YC8_6JIA%r}w2f|WM3CEi< z*-t=jh@)%~H@@3V3uTb-bf)rOpe||J<_s^BQx?*tj8O=rkTf8=C>%j~3jtjMB3$KT zU6W=2Oe5OvJr$Wmu@De>CGzzG{R$_K(&2+L6z$uszFW42DLY-#XgeyJyD zOWMYVe|2<&6jxl*W!~_--fvPlbRX|cN)EDGy z83>xT*<^bCjsQG|kDI@S5B#WX?21Qk9nT5@LUa;XGzrhCUJRSTQ+a$N_$XF zOX!4mkw6;xpO1BydQ1sQI%x>JjS$LP<&95x=oo~^_6f%IAN-1(8O3nKPqpQyx}!x4=ygXTR6dR@Mx({iRdut1c?@<~0^q8PmuLUX>WN>V?wZ6~a&1a;bF2NzF`WO=2T@lEN{7L2Tt&%Q?RC+$+m7Q z%vvD9VY0xm3rCJ!v{n|hj+G=~1kCL}1S|bG3B3Y5cXDzIK&B2z8Igw$?k!WXT4qAR zL@Dkq&|eKeKxy_|kH^mzv6>v@&`*4=ViBa}v92dh0s8EVoE(i{RV(NwkY?d>xD+6; zav@lLf>oTLL^sx60@k*lKCKaH2OaELEeAbfp-is2Ik3L;vo6r$IIhScQcU`zOg2}s zlsjcIX&4s=vM53Obj!OFun|E}#@n$dvK*8<3l?y)1t83=!-BU;gRU^T;XRCjKUV=` z1iZBT4sYP62O@T2>@kANmHxT0>Q~>rATDizda#`-F;2dA00yk9Yv^jBZWMw(CWt*u zI0fVd=e6KEhISVNZkVk8)t`P^z4+_@O@Z+V3!gfj{99kd78tda2$rkA1e=|-THnk+ zncQLp+BH`F=YRhf05AZ_ndlWusvcxOP&B|GdbF3xv~GD;0Y_GJyFtnVE%pieao19= zl?8+oKn1OnkEw5o=p=9JC01J0_3yubRsF@!{#o@8Uw_ZU8>?2p;Wj{x^BoQmL!KwF z{_FzK7y+Cf2NY^cKg|J`$YQMZQK#o@T~`hT?DRi=iwgkjr4?CmoathVI~LlN5!#z& z*Q(VW6Y7e7tX>b<>VCz9b#e;#D)9A&$!%Ou5cjH;gE8@g?BdC_@?Y~m1`_}@ZEQJj z@4$)}S+kv=(r)`$Y`6kApt$6Znfdax2VDlTxl zD98J z(H&?Ic)NdZ9&i&2MtKDdo&%svC^rZ2Mb{=;ohsjk5!4ZEQ8;YhT89hV_0Z?5@1)cA zE2t|#q;KhK;!k1e{N2-6s&jx*5A?&;2(qF}hQ)WQxD3L277)n8^pxF&g(-G^^i{w8 z^1E26j{(TMdbbRpSd7B&y}*o%mHWu)325g1xd8y58xCbew(%7{idz5ySlw{!1MI2# z-mvgEF@yUlz);I*y|z*R0;5`zuW%seJr4S$r~s*#Sfm07k&o8qf_Lg6-SvF^yC>|V znL@s>+?^c*#KZ46I;0=a%L6FS0ZqFphr1Cvy7gC$D;^e4MgYle|F-S8^Wb6`S{9J= z766@176CmgKxgyk3`bt8*~xT`JCj3zUc$XzkKS-G&4s!K zy6P`~{<8r5FJ8aFXZpJUSoSTWEJ{sG-yxri>U(&92)(*{nCB4Gb>u?%v$xjIcs~~2 zS`?py4LrJYheHSf6=7z3?_l-##q%ui%0uO10GC!%Q*&6ZZwEjRI!Q6IaV4DSv zEBxZ$&|dWIdrVv0$HG4DA}FW*lw%`#;h~R5=({a+lRJngbf$5(>7BiI1ZcdI}B$!F;2Zg{v!TlTctHQEg+GhR(68~AJTk9+ zTt;+|ZyS6NL|0$^{HH&up1ph>z_YkDLzcHTSoA#3Zl!S+c~Ie{52Wq{!d;Yi0j(4L z{QcrU3RG+5hXd z>Jr_Vg*;?w46As@Oak7H8+8j}TUR#qFTRcb97NygbKHJV%j~!bp??}8P203q^7e!%R3gRl~EVT z2l(~i_pL0#g2V9fz}!B@G>7O^-M=6%Il$I&^Y|#|a)50epB`kF#>OsZPf+ikAAov} zYzeq)VIJCeXuY39hhtae$MJ#vqde2#L!&zlD4fV>OyKT;^qGungTGJ^$H9&bGp8X; zT$&-bo=2n1rLpkbzG{#0kc;EoQjcxb^xSMUP5XTI>}k`ITuL15A$VM7?h=M^PieI_ z^0|sI7Z3>5h_d&5U<7INijF}mFLElt7Ex4&6fyuRjmUBMan@kve^TCWRG-U@b`3tuUU({=L~3~iQ+ z34RXR6o8AREn%!}>66*ePGIht+}FT{!&}(OGbaxGS#|+n{VgM~R)K>UT6Z$G^9rb- zYX}bEH18HI-_s>Ztoaci9^G0NDwG)oueq0U223{|f{D8q#DJp02LvaAAa%w|8k7Z^ z<%a@g{sE_DfaM^J1D%?va$SRraj8=CmjHQ}`Xqb-eo4c8D7U32SMq{JL0y>(zfGAv zRkRaRI1h((Bm`X&>-Y} zQ#Q)th;)31|AqE}{7$Pk-eRk>ATS&wL&rB!%RTw3I(C2wDahq21?(E+kl# zeEtA1`i}H%W72Fmvozl{;?l7szw&~x(n!DnvDCL^OlIUnoFo9B(FCxBda&+XBr=ZW zM7B~_q!-$hQQkIb$|0XjI2;ffrztZ;P8m~fNoam(@8%U6(hhhLvr9`M4@pDX`7r${ zU`OtFZdv4E{tU?l&%6x}Y%|D?v1M;~;)VZdLzdNvNOPBqvC?kR)tN|1%W~!;k#?dM z*){~jqc?nRCh9JDDEa>%K$OUNh}@#9um%knZ#48+WJDh1+BPolc$spSyo>yq5N~XM zmJJ@O8xu6aRP5X;2*_R97WW0%A|B-_^=O?J*&vgo74o?@ALYe_tYPmU){Oah%{O_6 z9%Vf=S`cX=UkR`#1PJ-%VTooOQw;y5gK&oTM_N)I!xVWgU4!YVODzlOBM;$=$?A>? z{ltShPkw7Mcu>+Yj(}FkP@YZS58t)yNN4J|20lYmXdoK(;jWOlXd;#!vvN~FX6h^H znxgH4-_)Od8-LR6LZq}PzJc+I%jbNvKGglSImDQ7K!rUCL@$s19p+P%yTCGU8A-5MH`1kEIS# z=*P76&}5vFdeJ3@sdahsAPH%O8KuOeM?R*P6@7yf{=LpHPTjV`iz0N08fjWSsEJG? zgKpbqR0kiRf&Mnu?cptT;m5M-dPTm5M}_Zc&tW*xWKZfY!MQGZkeUBW8YXY)y_Y5C zA#b25-1ORhSs8-<nbonNT^0jPuB4LsNCL`_ zX)9mSBwwYEZ*RR$-h@%+!h0i@&wH09+!h&-0B%Sp?L(d#4_ZQF7AmL{8D%;7ZOU=t z@m2a`)8LxEynN&}!HvQ{DTXurxTBp1HI!^n?yW}jT%y-&AJTK?7$vRbW`gFH;C`&GqR)x#R`(nl$;5aFOEw499`hH*TdC z_>*T^e5#XX#z(OtF95FY00hg2wSq+lSylQIH6Pt>oa1_~gTA)UZPQFp_i%L}$litA zY0;;Pg~^E#Cb&#q@tv-}bgANmR*Na$YoREpVjBJ) z1IC%~4i;szSoDoIx~pvtgxuutFD<-1ge&7%Ob; zL{jTnfouV9t;y1~iS1#j(jXAfiyv%dBcMB=UU5Hi$>bHjQ}z0sAE1sK{0*_4+ruQ? zj(d3t&

    xxYA`#Eb=Lj6UQ$4DGvv80xrEuSXv8ZJYu3KEp}@j)YV7X#sD3**=aB{ z%AutJM6mXb`mx`%Z7C~=ANAux$Qf3l|LNcUWp#G|p6qN?Kl%JVYF8H`8q`ouTv&}_ zu{IHFSlTdkti{l|y~B+-z1O(40jxYQr|K+Y)uAVEhqb$8bN>%K@S{)b}m(sx|<& z<9*7*bt|+E0W5lusUWBTxNXt=Z1aMmtrLQxSo|I!cg*($ZNdlmdIV@N!r1@<&=ot5 zuh@M-K@-0XnbVzN8=#%8BRtI7vbF&3wgJ>BJovV;z&AfuwEy}){JU!CfBkLTTWG~S$zqqjqhA7E*&lkoz>^oRX@|%TvN}kZ zYjov>fIX>u_CX7}NKjFk6%Z4s6R>arN!iv_!~Riw+*0(jm?U7{jRm8+K`UP6?Dea+ z__-eAP`=`_#lz?{2wU6Q0_>TuzQzU68ZvlqZW>F`-Rk3q_cHwX{S&s{v#2nM<@F$L zWL8$O>PF^fspAtCG+at=Z9A&A4mYrt-mf0b&%h3N!bgAR=ak33_)5R!l-)LHy^I5t zdyuC-$F*MduuwtCYh-8x5O)+80`3y%2RL*bB5%iMW^ki2Rt>CfR{!IF{$KRzCjO!U zbE!8E-dtYAw=ovVtGFs@ZGTwJ4vbd6_`Cmur6r&ri$*R0G}yW?&k&i^+tXJ|SSPz^ zsE_Ghcu)={M!3-HE!FdtZS*J>;1p>d7de7z0&H4LuK?nmdm(pG;@vpeVW zPd;K7&~`vjy>G{{01vdb#Xtt6HNE`l*0=Fd9^_8>mPz&Smy%t#}x>R zFj~e8~?-=Q6ge9*na;z?Kf z7#5q6bMUWF9kYy~@bxWbY{DWU23xIw1=LI;aFaBofAN6yro?oI)ZJYAwfO>}z-8T@gih+@G>evi&HVQE-r}EphWfdze*NXwkctjaztBEy>%T>x zSkH&(=mGTT#m!Om!Mz6n+bk|+(T+aUegS#??blzkpz5xx0dz26D*dl+UfwLPWY@+W z7BjTc{oRxA0-}EQ(MQ#v|MZWmHOjq)JDNQfchrfxoLO91&h9H|S;Z2&n|`v71(ZYF zimd{I?>^bhIL5Y-#V_Yf)CXU2SEkF7RzTC$Ll$Qk$GHn`o%Uq=9|h3X%}dS*p_uNJ zaAC~zOIE&pjxYKxsQlEiyMdTfKPoigY(JKA4%EkIub9mM5pR z&~LBayj{e?8y6k)nd9StzI0I?=%`>>>Py;J~I(o)`alV$r- zp8C=G8G|63&L!O?L8p+zKu^)>MsVSx0T{3Rt^>|zVTK(Sx)b@~C!bYo0OSV;E(R%U z0N60s1222wxnbky9Qo*?ZrlNQhqipiIPuk+SLjdrQrf-71MZ9(9^qUE-1ha*Hl*Fp z6z6`(b9YM|yXb))vK{YHo-42;cWYc-X~eVGJkw3`786|hujQuuqF2r%_OE+MLY54L#34i z@yM3}kv#L@t>VuYn(`hx$h%BG34g6WBqmTqm#z$yuU+MZ2KitGN`rB@XXHgs#2{Iq zl4qH2Q`TEn{+d9Jz;fu3Cd+M$wo(z8)P>hU6L_D1)CUSJ*Mj!|-tQ;+;f?Q*gV2pk z1fUTBfu7JM|M{b#$G5YEF;4_K{>#OA2-jk{P#lZ^Lw}j@6PUE+h_nho#!Az&Q#k=^ z6%#s^CIDda&A~#zXZd3agh?9VWRaurjJE|uCvOLYbkwD+#^Tav@I(-)(3A}LN{v9n z`zr!-<0s_uWzp+LzVn|CmCGhDQD`)(1#!@BX$c3t{uW>ddX;VX7O;N#g*iyl#->De&{ddvYnV<_(A9bycxfKPdNgt zL0|#gmBl}RSx!dM4w~h*y;&{>#`)#jTT`=;d}k*|{gAlwLk2>pFKPP;Pdx;L^q0qe zMQ)Lg(5p<6H-UTspNL1Ga$u&9&5L&^(vo(wlDJn6dFrnUG9aBgCQS7W*VdmwxiZJ{ zElu@xjO31@MW7Qe|yK2V3=Bwm@Y87x%FH|0$EH;nqv`$8=DH9^oXL+DP-fI76D z@Q$zu58;tf>poVv7J&c8jXX;T&#bTHnwP|t3gdEb{pVkOPu%x3*Yu~oqnmT)19B1_ zsvNkWNINP6S_UiV#gY~hkUXS?wnB=Jw zFTLtI9?AZ4od#q!C4DUqY+oTWG!jl3@Y;B3d(h_x+S1-^&&nG2$va_40s4I(%T~+8 zM_rKtqSujc)3qMq7lRM;k@jSr@{b1&5w@==P&gGrrwsz{NoQaY?(Gcqi3jFv=`KzXL@SrZ@r8jo~K|6|~iSH>#5%Zb)Vl!cXAFpGdr6jfCUH&gs4=uBv-lokH3OHod2_3$jn5ikeXedX zkck>l9?A#gHViP;VkPY)T}vM)DFUW$3-*vht)y-4Zv8ejKf8bh){xebXG|0`5Jyj% zMIVzEw~?PP85rc+EwzfwAzM7Hdu7CW*FU&mxd57c%YksnoPdX82)FqPvIv6f8&sDF zS`O=X_XGgQgYul{YIzoaobb%agcRzxn~WHuIf?2C;J2}~3V4Kn?jwK;OG0}QfUA*z zJ7x1)D>T74E&H1KIlM}s34V|ilYOk?vF_A5bB9S(JG^n5u9kiB$>ynlSSMCaNCY#r z*wacrTa}q)jsr+9V@Y{q*NDwpac5l-yehwzt4ncJA0n!kM1I<$rr zu=>z?x1Qo7nK5iL#vhjDlyAvTS+p4l-WOk@mf?a~+#&$#4YrE^^htm9uQAwzZP2k00Bo>dCC`6pWxlp4Yq)jhw(wD z8US{7KD{>dU$95!yoDfIl{;L6>|f_dw$dv zll*JlR>E8LhL+#mxP&-l=fL~70CQMJ^N{~o(v1VuQ`R#VfiSoc1i0k7RjWDb$hOdo zF@x6LE(AG!v(4Ghp6qSLn!E>K;rM(jJUZaOIW4@qSh!HBbPfz>OZo-8IRI#C=^lyI zv%G8<1OQa6k(X9`S|ZwMIk%jjc~?FC{2^_$lSjI7!P*?K{p^54>KJF2{*1b35l5fp zTCXb{9%S0WT+OcO8j#`~i#qwF#pbp1Ny^}ds$+45a}PFg`#?MI21IkQ#YM*-=GOvn z2zdYU>)%%kYg^T`XHNo*Y+!jWs3|yg0}yo!t7X?N-Su(|xZ4K!-Uis~IRRPxqC9b1 z!2-^j?GOM*HK?C`^;;8!=*C8X)dO#jv1C-=bO<;BlnA)C)8?$_9RQX6cS`|vbVsPhUd@^wF!Un_d=31hUVr zmRRID3+S@~*z)Yjqw13<_p6uFbF@j^%m6ey?pat}i%)FNh!70#BYqEyHi9#&E1R@& zcgf(Y0{tNPrbVRw`_;=@ke}efV`X)-x(~lSq;_m%7%St|9J={#Zn1j&_+j;S`kMg9 zx^(!(FTSqsKh(!GJ5L73s^7kN&F%&dJ1wpRbbYW0sJ=v+>Lo#%ZDeGb9Uk^U1GLM- z!!@2ee)izi^XmJTGgyZXQBUmv?)XDS-d%KcENTN%M`z&?IS@#m!zzA$agnyS8zAuU zgD0`17TB7Zn`HrVGxR+B_+k2xSliP-*`C#V?krKysoU&Nw1l;P`r*Zk=)f0mUS|;{ zIt_Vqq0mvD{=;v;pGObxr(G#49>n}+dcJyx%Nm+>&~+As>4z002M$Nkled=R~a21_0Bk3lAN9_x?R@ z*3MWo>Z@h}t7Cn>_igp(UwmHeQRY<^yaklW92Zvlu0N}O_1$j+n7hNvg?q5>cYhz&vfXd?^hN^hAndo2ju=U zvNr-y{vN%7+b(zB08$HT@37Eyj&Y@nJ?^@*eGLwc1e89<{qP`ey3Uvzb&;kc>Lc3j z;Yl%2aG~y$G1V#Wx9K;v#+EO-;JU@a$iut$I75X61b8OhL&Ge-!c+VIK@J-Atbnt# zLv#dZD?r~3T;>S)3&zS%$2#hD7n-|h@ABP4Y^_IWcSnQev)|OsmzM6zqGjk|v1em_ zgL~av(T@UVyNhZZ-s!vD^6u_o5l{WNQ%zT9eaN2U0C(`{>tFfw{16Wbc3d}(>J4f>?g#*0v}@u7bINxTztREp`9Fp z%OaFKR%glw_0au$ce5+Sg*5TYa}KZ7qFwha8YX!DiF6*Z=;H#w0p&S|j$*KZj8I<3 zmu2s@*#0_sT& zajE=Fq7`^&17xm}GeI*$I>M;@fnX;*#7H{)5Cp32X}MOVMnyUq(E3@R&NR^;fQiicn1|_-Pr~?+xK1?5KNBIj>Ez3J`4XbZReJmm3aazlgq51{ z0tiB5=%$i0;P_Anf!Ad9T*KTKqsdidYl*ys#83sNz7@q?i84L+( zUE{YLlL5-^cF&ZQ_|<;mn#o=diAqSPJW zrGELzjdg7t;|iW2$Oez)HwP7^o>3}M)bh#M6aJ6GJ3%euTW)Dj9hofQxc5*N zO_ca9Jmkh`;W=^T9r5#VF@TGNSH8F?lVjf!mvF&Wpu-7wz<1-KP;GINV_5@kdyOnb zQVh$LX_Zgmas7s1gflMzkknlW{E(jMcwml6<{^BQmI4S9Pmqz94SE9%@~GTHH%i7l zQUrXo1%zQ0Fjk}twDhTOF_Hmnwk*=)1N|Rf+lgV>tHplWRp{ihFawf#4i7TGN6};f zE14+=mepVKvy7>)!dpRwO!fqP3B}7ohvkzVK5{Pd^C6wO94WuLRv};I%Cf2-(ypx! zimkP)Tn!k*dnZFl*0jR&A`ht@=qli*@dzhDDGO+ z-RQk_$`$m&1j0+BZ{#i4;b9}b3D621{Mr^xSDoi$zDkoobCF%+)_Fl2l9wfrZOQzk zR63L!!>TI`Q~x1u7RPw!Ae=z_FLfAw=>rNG&@;R*PkBHLKFCE`WbnqY*1I3Ct#iVK z7xEf9T_E5-GQwmjpYWjsj}EBo;IspxnG-RI&yAIA8l+uhibuwW9tW9z%Dq?I^S|^> z@|;JuXTzl8NXPn6rd2Wc@2!zL0-rPTIgMnsf_a(pQn5I>DUfQR;{RhnYuKBcp_|za)*gHi_PZZtKi+t#jl^avL&j znjh*l*QPIB%8F^*Hu%^M;C<)=K+2*Dyi8h@LmplxKln#EoOtD+T*`jUgsq8*R(Mk~ zH)+JgKa3e1lqV$~lVALsrC~C3#j1Ux{7w6fj-@^oG3AJXZANWGQ6|fzfynT|vPH)lIqvPu)r9!( z`-Jg{`SB%w(J|${N%~5V9 z1Ff%GsCTVQ4d=FaU67c*7FLH?#cIVgf(55mv|1NC?$KJQi;11q)1v?~S~nj_6MS3- z$Z+eY0FNMp0J&~O1S|ACtX1eG6O0SM6Z6o&uamJFG)T1*G!IMC3RVf%YRNE`-L3rI zYbn&q1UJ5cktfUG#CKm`qKwDa0Ipumu3$lnwHN^D6`*RicSm+zd{j3$yaW(tTz7$0 z&LLZ-tuo8EjwNzhKY{dnsl~PKM#==JquSa6$N+#ky*OZ^)t?D3tfY$FGJlHIb6<6Q zZ>o9@KqgNFrTJk341XQlphc`7!RRG?-0lfhP(}elEnYhSMYR0UdQ7m*e3$S+E5PN1 z+)0}gRL4zP?)E^R^t!cHw<%gcZ{UC1A%b)%Z|8u0O<4T})Tb_VXH0ls^5c`t0!}mRdXE$39ke9^5#F5A*@RY3t$| zU)B1*g|pZ&0qFJM-Ukb`=9|cnGR*vA9}C@PC#}e#zSiUG8v2~@z9;RUJ-B1j2*4>Y zY1`Bntw64e22Iq5{eV8dU1W*NAN7~A%(F9G0$eh_ZD!8XsUMsddbodOiEjZ~|z-cpXks;w75#IXzlbxa$tjrZf8h4uzt zswU=y_WhY7DL=?BGTjPbPU}R`P)EdVuDQEJR{-o%unkjZ*QW>7#9iFM)UK;Vz>Hz6 zrgc>!So{XC`SG1wS#)|2@FK{g)t;`E+-e^m<6H|Q2)ahcz+3As7NYRe1sD0Pg{}Hu zM%YLD(LZo}3nPKHJKN(3a<&JUqApVSU0m71KeG`%k3cYU4fi~5oi~V9vx4NdMSbyZ zP=?Ig$U@f;J;Z-Avjjj2kPlro+b=#rm~B|Hm=p6?t=D6 zcbi3lF?5PT@0o_*AK!Hr;YQFMgIHL9 z{<$tw#R1?q{?)aZd<-c3o8P~v{>{Jsm({1epH}YylD_=%kE+)!D(wJjx`3d6YTK@h zoz{grG(2?DGZ|I^$n6^(lL($21Fi~o-I*LsITzn=q~1)!b~FIkW1k@ZYUluUUq1)T zd8pwW{(fiP&Co_YK$!CtkdW$x0>I|cjf~%60b_c4nuUz1YU*knzzF%IJ?TeT_ZPdZS6{0J6L(iLxLnbYY<)j*-7J$8vB=#Ar6gY z;c#PPy?XfQ#SirZk z=%C@KR?ND1^ZW`IA)Apg^?>}Wfzc^511tzAKL_Y$L3j_#jO#IUw(dBNcrFNB7UgaL ztU?Fvas0+ec8x8u`zSjq;GKPQC*ZidZ*Je5LjGO&WFeUf+o2u0(6k9y?YKe!*n>PB zhnx{t9i-0c1Sr-O$=#<@)Lo5*6Bd|Ygu4p_=(o|e*7w>5y9DSfVlV`+PcdvwLHO%v zg3PD~b_4G3Rr5=Wu}mMod8?Y>d=C43?3sgS2q>$=F3|bi)X&|!48ysTmZVr8+yFl-P-4+Y#F3Omw zZbY>7cIVnLeb?;568a2FdOo^hQGYpB8XI{)oKqHzILqSD!^^uFtK#N`XUnTBxM)?4 zjxK)X(G|2~`x)oa9u&OKt^|z+4jA`l=cH-FYZnofZ{OP%U93I&Pu{fOh?bLWGl`yoB+Un~V3 zua4rAVHz|z?w2>&X-GU5E<9wli^Xl})N1}*fSj?Zi=$mbW3;DL&Y2jAJD=tEOO$Jm zx&);qAB`;9*(orL3z{J;_}wwbqo8F}Mwx6`(E(4TP*NPh*-HH7S$XeYA2BS4>kjQ4 z$oiM38DvtTyiZz$(HAm*2!TXfQ(R{PA5Ivvo<8vmW5|*kVV(S%E@AywfE7bXvrjUO zA=w7KC}0Lp4&tO#IuQfDg$~`YD#VFDG&(UbZmBINSqzSedxdak zLdHsHfRt=iCT#>r|6B3piUJUng!A#;R3_wX=Tn2DMB{kg2|A=^ySo4;v^r5NEGLX0 zj}J)$F)MUfB0&b9p;$AJkuT7KmW%S!deL$xob@2Tc~U07+*?<~P3sQtcuzjzlf25{ zh-U(JDZXVA)R8Ch*!1|+K?ZtC;wHy>7~Y|Ma0QQ#KbWJ10fh*kl zgcr$+#Jr=p5dA?8h_75pOOmc%_}=0)nKdEz+fa#V&pMx&iu4R+b$c(Z< zi134qB6slpL#DY(82&&v`B=}SZ+*$9fP0iH6FKTW&0B${%{XBQ=qB$iV;WtUnD7O7 zf&$VlImsI?2`{aohYmBm2^C=wbwT?1kiU;2WY#y=%952O!RF^fexeU(t4`_)PmnX? zny%@|OUq?l6UTb>-G{nKd5yp`{^mVli;E~}0I@NUWnhr1v4Wt_xCI#~->5*&r>_honEEiqq zb5d(MsngI*7#c@pNPy4;tQXrW{0sdO5Texu>XtH@4*)78Va23E%B6!|d6ZNmH^isC zny=+bTeNNn=Y&!{Vz!BdT*^oO=27yMUT9BQJm3~tH*93nQ1CK8(kq<`x%KP<0qLYH zQlnh*MC$U~chIkEup+xjly}N6GS6+&GDdh|Hl=KZFVGd)B|WoEo0Xp25V>9ot)wj@ z?_(uvoieDfUb4`D1qv6T*>W1)_8|ZDqv=Z@c^OuxP57L0nlWJ=n19fL3C}xa&Adp* zu%SG()zh*x$|3&(Rzqj{T|(yry-f^cUEC{i3!g=d?Jx5&UuZBy>GKUkXxorP*RPua zNV4#R>@?9f3GPb>;Rjb~Ng<+?7mPjXiwd@P)2g$w@R&%-XBJC+Z&H-C-Y1lP!f5CK z^S567C>PvY7IVo$3>WE>5K8Zr5c`nS0ep)!dw3+RyoVkq64C4ENApfvkQ09d2D9GD z%6I+=U(APpSpY69Dzw%4msHHu_|$LY7&=nn^Z`Ym@=IM*$3XWsM9v?)J=m@?&wLP$RP0S^zYALE3#{6H8gnsRV3y=9_D?iZ&%z@{{iH#&~`V?_cTQ89!!|GYu@C1u(opYe=IoTS0BPVOf)U8;Z0khMmG}rgN#g5XVbGwfPeaO zr7VQglBSJ%vuuKEveW8~DuHhY_>Q%#f(cru#=@S-iw7MF);N(7l)hx_vbeUKI_?7S z?`igMsY}4+lj;V**D2P-z0l*t-)|27W1=JN1r!1RAssE|1z-f10&+81wCJ9xJ%MYzCwRAedWlGUs=ql2*6)Ajq26t@Q*||agqDzW})jhyOd}y<~bP!PYFF$)! z{g>YWs4@YFr7;unW7{RVPq$&Z@33vOun;RqUSqPW&)#Er)(;Q;L;On@VO)Pnn^G2Rhgx+VLVGO0 z0U+!PS_#h_7T!`H(xZhXpiLWkzH9PEwT*Rbu?%NRyZ{94!9x`{7m-ygi?!Hx@kXG5 z>`?Op>c}1z%X{Q?g|*`#{_3@~b(fNTvw)t9=|i_~RVU3JG>321F6A3v(pik+P*cFl zORTAf0n`Q8)lI3NuKu{C@!-#%0o)z2=rrtBbjo{7egpVRZ>G-NjUeDvL&mgp7rZ;A zPOfi|9%Vh|Aig$qbxT{BtHt#w?Zw^N7XXjhIYK+6$z>d-&-f#(77qX@hDT`or;J=V zJom}dd(}7pB5V(HF&X6(qBbc-cZJp?rA%MJfLY=4KOXcZfTrB&!`rqm;BQ+I%r$R8D|MacBwV1gTtsT2-3oxX z%fdWISsbxF4r1MHdgkNUR51VbyZ3R`qZRcwayGNDTy;FSRb3;`1BAabafNj)K6UBe z4mc;mLrABP%?;eP$T#!v5v*xFr@dYP6wk5?;~@tJFUrh-Rdxs&PfWV4WPPT zy`Gt^nz2k)zr@!#c#goR3jz8kj)f_UO~>%hT{>FJchgr)PH?a%b@lYo{pu;@ef4G* zfQiKszDt1nORHM$7hqX{O>kO^*sUFaAL`jdB3tYFeIL+rc5beko|z7?Nx{?p)K35D zKmQF@rXyH4F8~k?SAX#*Uslhb{}{dNt|J!$-CZKcrTl&R>{;6I1iY{f=o|bMI%Irg zgqNag^2g`2_sPy};yPzo!Go9oBT^Lfm!P<151y1LxW5i#(uv&^;E-npxxukj^4W7YlIiMi3B3SZF0shK%NLW-rmrNBjH86#WG9tb3Dd z4m;dN7iwMq@BhPptp3%%_>1b%?K{=0A75ex&H;+V*MfY9eAkhkE*5QW;)deg+(KyA z{f54lR{_)fq`ws4hd{>`$GE`KYTAW04^Ol{CW&)4?kuB;Rs&tVY#cj8?A|* za|`^goZ~+n;8h;!*WQJ=5$e#fxw@jSk6kmghk%N-({*=7pfjdt-%(cTl63UJtu?XZ z{&FyPdn~X$h;R>3&o-+J3R1Sy4@^$pMo+O51mMt}SRR_{V$U!D_%VI4i+T1{SsWw} z#|64CaRFP<-GP*DOk8|khcCKVb8%oDKznt4H47*L{9`QSPEAfxR(8P9N1qOIuE^+U z%5OXmrhUL7-Zb)gNcu~-Q_y!~b9eA*_%9a26+8#s4yD(+lIJVGzi&p*`Cj|#V#~cC}>zmv27BY4qFL&P2 zlcY}EwWloG?me9Ihy^_tG}K)#mhGeW_Go7U*>`DEOG``afTH)ts(y8S1Ha%N)_F?* zLthDB)T#1r3_fn*7Gwim>3CG*rz7$mW0A#iqzk;`fbgp<@GR2~T|luc&Rw?QpSz?s z(M|ffudyh^GJN`0UBir^GxWvo;?CmyJ9x#Rw)hs;Rf`y0*iX z2#G1b^5vpRKi2l|cpmC$*rrXX5jeh1R87Y8Gx2nkXy&P88kYelt%&C|2Fnax66cCZfDeQ-k%BH*Zu79* z)J5vmPJw4!8$KWEFkOZ8Bs>5&11zMGa72~=bs;YNvWAosf(Q689ydU3g;ach>g!JW;&&k~QQI6Jcy=(+bx#VYx zV_4|*uT0mr0}s>|R!WLX90zmmkRXv1IdVyxv2LU{xkFEOop2vr z$x}IR8h+Hp;Uy8mN99}@kbVg;4E!k_NMiEZ$WTMLk}+w>@9?d}Gk3bJHMQex1aGH$0Lq>1m|N1woX50#axGiw;(2go%bUF$0!7 zQat0R;H<|yK!ymGy7At0jDv1d&m@2HaM2>^%PS_vAM^_4b>eM2Ys=IlOOZ|aTBpIJ zO;;U4T3$PYVwEl6JL4eS{`#{o|a9zWJCHco(5p?UOpO-{_2{^ zMtBP#kjb6A%vXA)y7ABQ*FU)+4L+q!l(bV9#(`g^;XQ=IFE6FFQER4K>L!VV9{Z>| zUu*-?Zy3{4|C<&(DvJ!>(jUoHWTD6`^&s0)$EHde;Uf=imrVZr6y8$S1`tbG%Cj(p z=Y>vbO@h&X6u{{dJhI-R@02Ic1Ti);5p|idi40aoOc?nhrsc|FnORuxmQ$Io%avi% zHfT4E9W5Dtq`VABZI>Bm6q@3OhGe8C`Fm~Jl(X=R{3AO&qZEgvE`$Tl4dp2c+eW;rcGN=-P+S!ZX)8A+i2$8kp?EL9b{)g^{k z)=dZN&IXNARu0B2yi4D&80VJo{H8x3dF8m#Zu8!@Xtsv&7sA5}E$!WfCf7`-6#4`G zDTf63H38q3LYs;ik`6!0h_?ocym)8Wl5ZLFB{ia`D>ZU!;vl>wCZSj`F@|#DddMXA z085@bY)5w|lNOnhB~W&P8v{5HfLZ^F9^SSGu(|>$<%HjyoUAyx`u^24;3(ZSz=?9> zgh}w1KujcYMF4Q61&{!-hx~bX-35M?J={sZ!CKWgp%%y-8Xg9~CK@lX9(91%(IkE8hultK|?E019lcKL^+#Fa0!2n04rucXtVR;F+bi?blc>0Uxb~ znPASX;AViP;{?tLzis6TJwAXnpS;}1ull|F_c{E_?dqf$nIEKQSK~@J0;H98sDNEUSqg=lE~31E)$$?wNY2sSH<_MdQMAEeW&&TnptP(046nn53L)oF1V)sw7S#92|3_Q zGrM_h%N?|@E99ZZ1ilM^d>Bw+2Vmd|`E`8i;ft_^X`j1304F$ruSQ#SvFQ?U`vO|s zO0Rx6$70ns(b|Sv2xLmjHr*Du=q(s~i5vtBr#@=ELxksWI)DfHBIpy@rYu)2`h>?f z^!Om@a4;s;mVH>0>$YYb(B0h%`fpa|^+jv@vR=P?Ia_`5_%7*UWJF&2rq$Q83q1l{ zE?&4ur2f}}Q@0+D$83*W#Ij!fz7P2CB7}NQMIwkQK<<6&(aPr|sOorE0MEYH-7VmA z)d^O>ZCJc(NxXM`nDW_vJxH+~{pNOlby*j6D5&fLjsTE)L{Pks#ehvL4ch?Co6sc_ z$ifugxs_eod{Z^gqM2nGU@^!A#16`@wdM5{x))jdaT@Dc>U~ll>7*%;tgmhluk;{D ztr-`WR-z+3c=ibJsvYo9Kt`SC?=hgQfS?CF?!)Wpg@x*m;MJ#}d{lk)n{TVHfBie; zhn*H!v>wntPXX@RYAi6aD7cRvTiX&)JwlhV3jr{6gWW^{X#s0{(HGt9^you392~k( zs;=Fu9^b#2#Roy>5divoHz#mqg>@;Qt_Q5n0Wu1bsgHL6T+aY-wQRmZeg+2y11t~l zYy0bGVd)s)JPUrbcU_@aKkjC7`1|zH-N@o9J8x(eslPpT0PK*D3oFZ5$tx2craH$0 zm6rZL|Ji5p|Ls`?`}BK1dG>MW)!JWuKY?qbE#$46`kTV9c?Ys9D7(%12%7+TF08m) zNY@Pl$oh&$a+0?|;Xn_48ZxcrvVE&Q#&e(`6lPH@z+f9b>{%E?_S8+D*WsbHcP7Ws z*Qe0V;ljw2_4_~n{vWDWH>avwlatj2dRJiF1q=DFPE)QuNPC|4DFUPK612G3bozg#47AxKvo>l=&*)coYTva-wq z$0_cEhLA`4V)%Od?tQ@jiHupFe*6*btgHI^cfW)GHT2t1{7c*a*r%zl+%2GPcbq}O zW)GdT3F7HCL-#Rv?%ZYpW){A_&mx=)t3BNu*hm}KFhCI3GObnX8TZ)7*}v=B#m_px z=sx;+AD;CAvbsoRJrgx!peE$R1qfYXgg@P2XXrNFj`%enWx%@8ih7Yn1oe@?r}{@e zUmbFW2p@fit4lO62zX1Kt0M)3@3Uy~SAY7))r&VTvq0q>?D_NORqbb-Hvx~e6qg0# z6E~?FKyi5BVX$InNAy)%Z;w+a0f3n~YEwSIO~d5pW5l{L~pM%Z^#Ewd=C^|*_TE-uyRPvd?LdFySZuR^c5 zK+*x7g3BJ5d}nGB9(2GncP`;ZhJ~@{dirJ$D7Ah)`$2wSe@B=KP<}Z({oJWxn11?0 z7bF~qde%uFE>~)}>pD2x#zGsNjNkV@>OuZe0sOiXkOc(Vyo&|z-o2$A;xdSM-Ce!( zX@cn-+)P=`&-UbE;k{cdvY|5^-)M~B$avkc0Ca2MLB)k{Ie3%aMqpfkeE?ti^3)!x zgPj-eSC^uXO}7igT)FCm7eg!v^>FUbD&sTV>I|XJ+`Z!&6kq*@#b(N@e{{#HwgIxO zc9B7?-qp2rgisa9$H9$*Hwdt$WNSCVm5w`N<5MSQq~eF)On4lz5DqT!%X`xBo;GNp z{Lf7ecO*-I;s6*la>_#p!Xl{r=iY&dtLvs=0;FjeH5DJhk^-o>vc!8h;ZG=HV5vrH z7HszGz3x&>4XX@8G4v&flM>VBebSdk2H#e8T(1x}&&h-IRVLYLpZJtj9tZ^R)fLON zZZ0IU@i%0XmVU{bNP|zT&IzKRpne(bQ7HoG0OC1x@xnC7OW-p8(Vbk$SJNdA)3|m( z;}u`6NNAv$#8b)oqAnKu2&?soa5Y`RE9^+Ld@B~W#G|eX$i}txNZAS;XBh?1otP5W zGz7AhKtEO{yiMHjNjjYP6WThk4jC{wxNm^FnJgREGQ+1#eoYsiXtf{LH70Es`Dpo7 zY8iI%41rM4{UxxW>U2P#c|u1(5gSq_2!>0b6wPZz(l7xScr70SLO|~anQib?87o3d zL}i0`by+WR+K?G}RnN-o1U-C^>mo0q83O!gNV3i+piCkcc@Ix`TaTSz4_6BKX5HX# zF?@99NBubIw=B|D^5BO;HZjQ5`sdnh>uG<=58*;MsoVCXGr)sk1&;_zeVDr}w(Kds zNr!CSn6}SLsO0OtHJ&<@x6Ga=raRhh4+3;{ifLxmu!#y1Rk5xNjT z8r%dN4#4Dd9yq!yN!Dpp=6t5y@hg~Pdgfo33H6yd6u-bO3@CeM!*d19Hj$i@JJ--* znIPJ7am$CaeeP;Rz4Gk@rP1<+Hf|y>(rJ4zt~yNcRr*X#ep-joZ3L6InnTGTK}F*f zGWg}%40z`tz??kDSXG-vy3itxrei(u<3uzG5Z0{ojy%aHd@`JLB#{y?p$wQbh{S*M zOfj4t`mwC)5z4_$$`RU4mk8$Vr&uPNm}MetbPu;alIgZPMOoylH12emrG3h1i6(th zCAcz_x+1>MOecEU^yG(mn5?gi7uhF`=y=L%cz9M8KN5++a3J+zjS{iW3+0#WcxYnO zSt_2ec}N+ZZ)#E!o4L>YP;X{Y|8Kj0C;tylS$Hbo+`DS7*w8~K|X;x}Z{Y~YW)puI-Tkw)sm8*+`F zPhSl=QU{Ht{+l#lS3uz6&v&>#8oSNS4-!YH$) z7JM36CoC5wZ>~)uY!0wTmfWP@SAQ~AuiZrhzXFzmruysFB5IHc%Qk+I zwJ7XlLSdVBGH}Smx{dfA#&pciiavmcHY|~MacQCp4Xv%qA}3(4R^_CtpXg@5H~~UI zCw)ct0UTH>Esjt5r`3@bohp$U*8>3k%EPz6{9X0efBwhSZ=X+Rd$1GjOvI>@PVzpc z9-Q#W9Vd5ANYn)zSUYLed;o|fu@uK=Yi#9)J;&ycjP#j(Y+yXWoVX4}N9A7i8v+t01 z8|zyDMf=sATL1_Ehi~SWm?#4FaZb$%lf(gl(`%7TKWwcRT#eoVxIF_z)Ec-OKcoWB z`l9~jZ(mgZ_OJe=8lIW}L}k(mh_lG#xpfNvr%WRCS#7(qf$N`l3hR6)r4UNGfc;FY zFI@DY%upS$xdn^gR^(KWx{XP%?YE5uffmy10vtS{eOUH(4uiXf9>Fa8j83f1wyXEm zu@;d4YSD?vzplEpJnIFB70k0d7XVakfbRMwK4)dhZPqeetn;|nPB^qtJ=3C_gZ8om&Nc#TkUhXy(pLYqC@Zuf1PEP;Tmyy(sGVV5r(V!i zhW^EGOmH9@KBLx^`5R%5Ad zdE6P}!c2EBt`e{&R7HCLrrYO$Q&O<*}MKXv;cAm{l7OYGDUs1dX+hxNITi6tapwufK}rhWAB<1A|I00z6u z!LgRIyn^rRGnnN;#C`aypP0H+tuHNAZ5-5jV`L=O!Gf|69^AvCd$;=H%P*>5eD!O9 zc>sGXtTp1(B3qDb8=%{^)dPrUg$wj)QLFWFFY*$=ntIkQf8%eQ18ahD#{)`T8c$&=>mu_V8lC>TcXV9C1jjJ7ZQi*27EvY`0KJJ-7{U%3T@3{TglX_S7VNWdVdRx<*nLdMLC4V_mP~UPAXXY9DuKH9pGE4sKC& z|IvY*c*x~GK$#$@fay7PNwejn>l)-peWQVj9|XX25HvG-{_1rsbW0w($yr9{;zkEQ z+Kz=>Q2FwwKf%X#M>UHM6G&X6Kg&WM0I28*DlOX`B ze9E(BT#8lKxbDJodW^$o1v}>!=i-XOK3Tf=T?pgSc0I_#otkleVJ>aQv4t)!Jor$+ z{Mplw)30jjZ2u<6XCLS*e zD=dU)HJfrlQ^qQU>tjJ>7`g6&=eB!gWS8-WyF46LsTW*8vR{1pb{d)ALnezp5a8{@ z^-mYP_k4|ps~GEfX=#CW?^z}VZ*qKOy(()@9z2M(ur8T8j7#0RBXu8uTblKA?&6en zqrP$cpfP}lPdc_58y%|N%`dti0*~Xty1x^IY)sV09MOo`KElh`h@FN2;r-}R4@MpA8_dor-5NSBa_5f=KQ79sgAXaM3;SIj zmaGn${@taK9>-v<_nOh9#TqS zWF~y1mS4gXAn|e#r`K@RL0vL*1VE>qT5;gUr0I%uGqJ^@@)|c9Ie0Txr)7W`K!A7w z=A973q$Fm76teM=96@wAO>U$G<3b}Ogcf~$6DZ(Xvnz6hwdw)QVY!#(J+n8NElm!n zgpjQ&LRJGhR z=(xGDOnx1B3l^|QU^p|+LIm%m$?f4}RML=G3j=RU{hA+?pb&V%HG@&hVHn~RaMH|u zt5UVRg`b)Hn-2BkC(4S3p)c~(XZUS+?tPxJQI|HJ&`w-44-b`3(lL(mnYiZJs2{I= zYy{$`Lb&2r|63|i(}#*YlP7+X4@|M9Qdh<|ZsPM+trA*%&wYyG)C_vOv7PWMpj}{r zIvs=#|NJ$D#3QZ9Y$&YfQSuECQ?E;-2v8(n!be2ptIsUQCFN1(DTA>~IDVy-0h)9b z78O3rwg4s8GkgnhY_WV{t)diCZ?;b=$$BbfDp?W;)=|E+aic(O1deM!BPSfOa)kuL zm_}F)-{r4wQ@<$rv?=sL!_u_K7;U7jo%|aO0(XYgoR_?gBe&wNYOxLtZ zn@rvf8pv0R+@doRGkFALjf|KWN###zC#k;(M&kv!gf3~b-;giTZ&Lngq+17?p(}bJ z&#fQc%Wr6o{xvwakwc?%-B>)b;v$cshw#2neB>;(M_GL5M_p%{(M3G9y~tD9;m7Mx zYFxv4Z+#NBB%C&F&;m3|i2q4Ixne+4`h$9c$|Px;TGGwE)F|teIq5)c`V~X*k(iW; zv`EuzeZeF0@C(lg<9)zFbw5ChWy+#ff~O}dkt}ogY6Sn2pXoOER;RzQ>+3_^r(o)S zizbhWoMIK8@PIU|e-g0$hX>N;m1W^CI>Wrn?jJq*NoxU#vfIGerfGTVt>9f)wQjR3;AKNx* zgr^M7%R(%VDOYHazLbT)#P?(U7ar$^_n{y(+NP3*WHmnZit?xQtEA0!_-W+GLy}Bh z9&neoW|{nFSpFqgiYp!PC3z&##4B-3PX5&CCymhRJ^Z1Yqz@xa5)J6AOq#aIFu?v{ zdM1|OS_gYb;qD$*UgWmQWPkf;6!#WbLNck<5=~#?T0#pf3MMff$i%=&rj|xdVytm3 zm35=yc4+gm4b>RP3&iOk+hI~G69lb2wFGlQr2p_%_%2-oxB$_=kNwoB)?*u?Ly>{ul-5+!((`Tfqz~sS=GaW4W9pU#rFCu+_mU_NAOKcMJJ15{I&J6Z~pRv8Op0ft-}{*tokwxADi+VUQ;6@3*8*);&h9u~W505%g(A6Cow zZf=2Zg2NkFq;)ds?>)hV03hH1z~vDWS4gN@nY?z;UKGYQ`0u1Wpgi~O$XeM(O^wJ0-BSCy<#VTels9Dl|b3Ma0#O43%H& z$qE+)aj{q*u2-aGJ-8@w_bwK_0HnS5$Ew;D^+z3bkoE+JvmV0BZous0;{oKO_`e+< zY{%kxl|_<+Sj6k2dmJ#-L(4jmbr;Zdce1gGr9Ancz!zPeHDQb+pyA=Db_cq&5FnEu zf?N8p7KCmB)Inn)F8~?n9{Hkgef5}I@&&d|X@lMHr?Eh$4$&8RS**GU7&U<4#F0^9#N&=rtI{i(Yptsu4L-`Zpm z5IwWa;-mx&jSN>0&hO&e7myu33hs^RhJ?DXErPM^4>fk!q# z0X-KQYmsx>f(r!JH-h21yMAzg^X&@`UwliO&|mQ;JQtAS{0I8{VXP*{aqBUKb+Efi zVwsF=w;+4c+`{6tyKF4Gz@?VQ#Ygmc4mrI!IZ-`-`MP>FJDUZBm+$6q^`fhm764Qh zSlp1&J2lg3Ok2i2Tpt;mpDkx{Cf1{nQVb{Ay-~#kiYT?2pAt)4CSa z)>_yDC)LBVSg7h&B?tdOqjIGGU>ar8vY+Ir3us|C0l*e569AaS9S>xrJbwBBO$A81 zSn$!;yM2>w%LND*JKfFk1MO9QIi_p^i%A~~*xt*+-Y)*>1tFEerS|~m>~z=$fNY0% zx@&PUP~XbRLgRBu-FcQoZNP<$PF!BB0E!luI=Y6?a@fw>|xP`i)YZu zqKdqoq}{tq#X~4{=%CNkRg`oHP${lU{IT%>5b{7EMg)7H`j&pQJ<-zFcSo^9LPSx zp=Go?6h2R3hK>Q)+p+xC%LUoQKe}g) z*w?JkR`gB(v!8!PpSWB7-QWH_eFKY{#C1_+W`2$xZ;Ju6(|@pl+|Rs= z1+;}*7e}DBS?SIgfO1^Z@Et!o7IK$O6LsjdI}Fr2$`ImCnZYlq8(o?y+oQubDA!Sd zaL0RwayN-GEe}^#myrW3%ym_R+rJC+uk&f$z-(?bRo{H`HTv=l-F26~in5Z4ZA5_F zG4Am2Sk9(!0kxPVl3FG#=dYEP1o<((jH0zOp7QHU!sR)Qmv>Db zl6i?z04(>A6htZs;7ux>e{mh4f+jABH^sN9c_APuvw+CbDKL?lh1Zl>>9(9&SL=`5 zA(G)}V2p+$z}LKMazf(hpH4^WXyKA)q;o69XGe2Ed=qkHVBBv=p3E| z+;&lcPG0{(*9?dS!bqHA6IbRCE|Y%gV04o(W=C2jj$Smx002M$NklQHsvB z7s?E|Wm=~TznI=7%0IiO&Bg5wFwY;?63#aqS zk*8$yu}FpYjT^$Hpa!rG_@RzJgfjtO5vEBiXeL*}pJ{zh2Uft+(W!*_-XtDe}ad9FGUJ`8C!xP(!aY`D+ zVmBNpX<9zgQU0^Cn=&!9MA@ZtGMco=Cd(vWyspba+D~)`dD>nqM)LAEl#>7v)b-X^ zQO=}C_{a}@YAkH<+(oA%v-P@=Hp^j|tttCUw6w@wV= zAD{D2dWz13MxQ7H$^$wz;h;+%!+7W|GMWW+@)M$ue5CK@TAuJ!HZUVdE|C@TEp=^* z=2sTNLWO68*!R@iRb*R1vpGknnL9MNP)xs)0+6<}mVyyhzNQUIcj1M(M^_Q9==YS8 z=*H!nx~iv>d#;h)$dKvzkvAzD_p(5`QaDm)QX44Q)AiXY+|Xnx=uJMVa+E zpMH;a~nCyOQF&{4qS4_)mE#<*JuK9j(ri z$j11}MJ1S-F=L+BWxAwT@(T@xrqEC?yA&vYwuKl6P-XTdrM?P5Wx>H%j_-(Ja0}x# zO!1BBNUIb1?>NX!uuY&@N(70Tu)evVOh-&wdzggW1+4HOM*9G_hkHPxgZxtl)q6}x zoHUuFhwdpeg3p$_fSmqfWzL{}2arZto51?V+5)=b9ac0%;B72LN?i7Fs7R6bJ|lAot*wKtREE;elKB!ya-fICIKkO*zc8v`OnlKeA5Log@~$ zUD})$uY%-SG5K7JD7Q>|_@#8XOH|Q61?Yp3fQL+|afM**_F!2)iRJJ%6L%+l6rfsR zoBr$hRVKiIy40b7y1)($DdmC+fpIPBbd94m*rDK$zz?n^pr9H7xEQBS7v5v(ClJQJ z6AtVA<_!SN2{KV13#HxLHYq!VMh*JRdT_Gbjn&&8b>^Xmx-N)yF@R7jWlen%R^B@4 z)S?iF`E1c{tH!Ya9Rlp0om~mgV(-%qNaEszV*;&vjqierha%p+iK_zn1Ms3eSeJqX zgUHNHz#1*SuCRO_WJ2G~q*)NsHq?VWUK}#XWTIa5h4On{?XsYB1US<@1dsqQs~_=J zCh%pq0UQGSx5!z{qfT_UqFWi;N)MBHcO$q6WWRildxF-uZYi#hbOCS%kZMC#9_D{| zTiySnD+?jVScvstMcqohTpzRRgZ9&dU)6Rk^8f>Pu)0xXshKc8G20G^!+css22a* z-%wUSq<-ggE{w4Qh4y?5aa3WfxtGaf3*fK6=eoL$^`bj5v`Ex)(8K8Zu&|fm9x%KQ zV7RZ9A?+iJe6-;XR>tDr71(X|XToY=-PMpC;1`G721ISat;{GEqf6Brbd@^d zIWAPb_~~a@qGH|s&5HowJ6H(o-sIlxn`yrSl_S)@I$sd`ExKGc4dozU+*Qyncd;sT zeysZvT`IK#l1$Gpr~kYKn5>Sn``4m&m-?K#Ia)n_^pG7A69MhyK`gNW{x;Uvt4}_C zR$U_Vvum3Hcl6P$;B+C+o@1ap8TDEZb>IS^eTKZy@432ehkEnb0Pa!xhH$^YLeT(L z>}R_1p?)Vv2CI)A-p5b!S+&4of%#Kq(OJsK!7=U&NKXM$hdQ=Y2WEK0^B-Qn#kIst z^nkkE`LGLM4<9_n!V*^xxO1@&)AC*2@XN1$8>>nW?RzsbQ;h(6DQ!vDO)mTt{fSIbPIcyuk+Hb^aN$EBO}*DY zKw6}&|7q(-pw8VtG)Q9)!%d1U5Wc*9RQ}w@wYZrjC_LeC>{ObVhb>1S_=@{BR%Qk)4p&=(0 z#bN{S<8&B6Opo>%2}De>Cn;_OzZ?rw$I?hzqx7?ypa^7(Y&Z*iP*KVpyfFB90&9 zmT?KcbJe-iKJgvtHIV zHqs9|zi%c6) zra&!;N_k^~!-Y3dUMgcdHWis#yi9OVoHUw%G87<_W8)A+1`vfWiU}XtT2ewK_e_*M zp4RXY8v;T+50ag~sUd$+`~i8W94Cd>ZCDCeVXPP?ap|>3@eoUxe0`-KG$y_kG|9&jTbCsIzt#g#+)+@Lib0&dc&!Y8>i7Z=ZTMF*9jwS%cOX9p5b#4? zNFG;M0>p>AbQzcTMHHj}+8GQ%FoOkG-~jUB&XqpMeJ6i>$G2FmQ4cgoYKydZs>=*h zBR>ZXVG;v0%daK5F$K9}NvM@QJc(twVXZ6cPtZMe!5_wnGRMJt<{FP_j!EvZ0 zZIyk13n(Z+Z>bOm2)b&4f>Q3#%pZwQnkD~Od=WO_mhG3(%fnIBC1sW8RJS zXd6yBBL#%y<9ovs(Ogo$q$e224e@yn?Xd>Va~>;NFt*e*G|5BdC3%%PGc|d^-=q`5 zY>$SK4|XyM1D(=j+KF#HS_;EjfzqZV_~CEvrOS7Ofi~-{&|*5}nXZ}`v*r^@$!t)v5X+W=n{LwJkF=%Wbvm;MM@qJb$V~}jR0l@ZS7_yJctt|WOJt@V z*Gz4Z(r(?Q9D?~g$){e%2HlNv5}FWJLOngh^WXj<$;e+HBwhb<&49;GcIr4fkv;?S z&0>^c1F%cGN%J z-$ByQZ?JVKnZPMdeRfz6y|8^0EwRm+EKuR=`E;(lq#MofiVgo-ldN8^e1? zF!NE5CtKh9(4zatd`*Lw;R$~mX(w+Z!G!XZHo~oo<(Wh?Vc`YAlyh|o98yQ69!%2$ zo1F|IFBo8+Szwbk<&$4VTn0*9lk=lhi7!(n@FVGvcKR&BO09L_LS)7@jWl=@xiLRM zrg~h{H7`k$8fi&gXAzS$%qZ=Lx5T+(kWh3VIoSsqoNv)T=FW}fhBxZM@HRZ;IkMyg zQwwNivedc!AZ?$?M0rX)X$o*{Lk@9yQix;{kUEez$^q}AgCY~8BhPu3bPU>vA5f35 z##cVWf8X;>l0~Lc3iYt`*WVRAYZLbEqW~y8|XLGx$aS?D0LGYgy*jcKzYvlbi{;6L2|nz@$c?U1{>G6|&%w zEEQPw(+RK^*_Dy09+udUO+kqETJia+)tVD4KU!dFN!)`aY#R*7A^}#5-AuM@VXLdBB{&&LV;a*x#!;;YNB>#%Byp!!t08nMy$-h>2{Q%;h zKDb%^_pg579_!kS9{_p?qwj33{IvvIg%_IuQu^T3Wk7sL3r3KSix#n(=UOm95aa-0 zV3P@DAN4;m%pqC0xUl}#0lzL8J48<4iH6Z9Sg~r6*-n1p3#@DKOuiRcgpZ_enQQnn zeg6Dg7BK|t@h`vlsJi>;c6B(vSe;@K*2*OQCWo+v zUS!k*_BsI{PVgtIixypW==1ji&_m$ee!P=I#8NgJ9DJ`~VP@O9rX1=*0sd=1)?+}= zQCvFgY}gh6XkA1iP22Aw?`j@yM|rP62?c;InS}QM;?w{mO}B$O?xro)unKifdqv$i zIkS(8&vs<094t#4hBo_adAEbF=aci+>fNh_ss%s0$1LpJymKQm*#-}E2e5{v^PQi5 zgl!LXkFM1v%muP|afEMw$~ZVWP`#&k0!$aQJ)4DKg1S4rq7HN?qi)xj=^UW?9G(l} z*Vu6&YckeF#&3>HVo#La);PJp{EK zFxEvP-2|Mm9by|Pd4y}6L3GC~U~nIG)r6I=i%5dXYR*{p!5a_NgYn^!pq8?Gz=DDn zor74-A7P2v)PkEOHtI*g5&7k!l`dg+(W%4att-<1^-upC%U3IHAKH7+MK=e>Vo@ky zbVl8H9)jEL581hGxwQJVe$?4#=o@#eZQ`3+>)tsQ3|lw$@v)3@HGi zrR_5lqdiTqLwz%GWb|d%Mu500=^_1GL%C_V;ms-;)Q`r;o69B#rr{jcS?&Jbk3Q z_?q#cx&+-uH$!yVJMT6Di?q(x%GbriBMvgPE?kt*dR;(rYVv0F!;9B(^`KSpDgO1f zz*J`Jr=}M;$6!s@VwV7u%Ko0bjfi7 z-6ymo!OV;akqdQ~b=e7^+XJ{bqH7ZRWPN-uv7ju!?61^sr|3}o8?908cXfxBLm~+< zd1I1%;fH4rppQ*+W?_L6xl;hZ6WP}C)c)8$iq|=$*j*wy^cuY;D5uVH?qYxk ze99kpur;@Epz8()vO-roeS&4W!EOg-MvHT0&a$q+^Y|5~j<*16-4VCEyp(pTe(;^` zRsLDXHu$1!`}NRJc|3w*7SxcKq)YsPsO)w7SD(ylq*g7K|F?`j!Pg{n8K9_pYx3*3%#FBJcVpcPz4Tuth#BtiX6(?HzL<=hi0v?T@#s^_6k* z)Ds7?MceXVONK&r4J>kE1fpJnR}UUMqTE~6otyVk|Enw-x&U_ouLR$fy%Xfu@r*J} z1@KJVi(ZGqD9=2kPF<`zr#;eEaAj5uPT&FkJEDjzs7LxU=Ium}#XT6ZwYRT@It!A3 z;`-WeWzlM#g&z{EK7RTHT>vf68F}QHi*=cEBhbeH?zWwsolSO^xSON9`snFLSkjNt zw`c(4SW;LK{bgTJumn}N3G=KifiHoM1^_>5RoDprr}Z|!#j8r zP_$-Ehfa>sJnuo;*|)_!t)ir_@X|V7IPU%*$qN z5CpY6(bK;>d{VN-$i-X*pFCR|uHz zGHEup4GNYtQ$I;7G{YC;rOs5A1W0AUTNIWJE#QguX=Lj~!WkeoYaQ>5jsJ0hBk<2h zAWZ&H*!o0>wB#568tIqGz4;~vKk!C?h&m`>oOMAdkpbgs3EC(N7yO4-8;-ido=gr7>q091?JX0^k_q|p~3VfN& zn@%H)&$*Ts1_hSS22Mp1rsQ2}L!R=?aHi?sI&bLWc;ZbxWWq=v&kV#j?a5^G;X+y{ ziLnGNLXC2SaBPXE{=j@XFu;m`tBKfVSeVoO(u( z0-!2e>W!pBczHw`(rf~jw)`_OlY!I(15oqzs4_f@tQeR6 zp$*hC&p%oaMn27pH-Dx?R({}6WaUWT? zUW_LT_!r(9&+yU_zQ`MMH?JbI){`pIzBN{!gtK0)?MA=nAg`oqtw0m$<&iWgccy7P zX-Hm#GYjN1V+Q-OfDUGpltYJdWdD+}g0kvBAX)j0G)dpOri9!PHo8(8ECV0Qj?9E^ zLX~u+n-G>gVXQ;q%Vz*C(=ZG3YFw9gMV*T-H!XXK%TH$n1? z4&qn2OaApbh9|z4DW%`}LthhFmS+(n>YB=k7(|c~tK^~XN;sa?`~56VSO>`Q^G~ zPaNL!&pOD57siH|q!)5cm`4Hz%}mUVY*sSaGM0Ae^YS#mX5el5^AesXbZ2hO->LrV zKmD4COF3Xq-|AMONKAXV#0B}%`%TCZ)hZFvAcyh@;l zL^fRm;5ZS7XBPk?TE{toyvg?CDYmx1eETlet4@GVnCNxTXYcJDGjYK`=ICf9I08Tg z=%a2)M>hmovbo(^aM*(>yRd-LLQxQAkI9T~5iGJS(4u(*OGLp8My@=x#<2@yGN#t^ z@HMyiw=m)D=X!i>kjW9QCIA3B`D%rD?wFK`svyA0X%8-Fa0dV_PI_#!T6tSVf_~co z3vhWnF9a^p{_L z6u-L{^e4ak<~0-LVsWTH*K7l)Jc9cxOw_bK6AX#_LqSGLDxmQF%n}p%$!ZP2^%%du zM}XnmSUJZ(JQLpS$w0Oj@vkB zec`=sC(vKia~IZ~7u3-?`qF0Rt_gu~)6*ie3*Y1ItJ}zomRDMwVe!qc0IL>emihi9 zmZ1W<)T2P!DU;{EA;9&$Ib4ruVW!&*b}9fu_94T~r_?=*LA@N_CP00-v5IREc0Q11 z8{4$c0LN`_H}2i8b{5}92Hd7kW2w#oE6%V+?3{YYEyf0v!NVt&AzP>x-xmN2%0xRL zr+s#}R_DlYKh|uzAUeci(l*a2$W)%=^A*`u9|<-c0OSfRswd8n54%s@GI+S0 zK%!gNbq!$~?S{u%saqfJHn7d>^2afaJ5B_W1cDti>Hqwa^4aFIz=p2TxA6oZgno2M zj%v-)03XLY}QmSMVBfKp!zG>rf*ol+hTq0~Cn zvH9EC`ReAaTV&saYmGU|t@{FG4Sk`Dm@Zr#EMRqcdcuMlE~(^;y2xETWA-1`ErnJm z%I~G+P59=4v!~FroxVob0?w6t(V?Gz`ZNbis^7bjg<;BgNV@XF#RT<-hl_XvbuMW-7M;KcbtSat^>-WlLRblfX?lN)nayqg^{gS zF9G|kXDm+ftDVCh>{ww@IYbf z5MrxO@SCqIqCG669fPY@EfB9sqgVZBRBBYdjEFpkM0bWhaZN z(xgifUBYyr%N;EBvvWyzPma+%2zQvB9q&nJm7Q;n0d{v8^8u>rqGTJZW82~Cx@QPn z!7mOu-ot12^y`%Uvrm5lV2dR?@&0IXk}}X|)w-&wy93p~yL+eFq&{8LvOQ&?gmN6A zZ{EC}uAcw+D*gS=)du>G9Tdom{2m@2!NUEp`u4@!xU9Pt*r(p7siSV{t{=d-rrQ|j zDUk=-KJ8DrSJ&(6#X8cJvQ7IKtNbJ6Q=r;kt@7Jg2x1oX2f`U@u7_EcDusb^( zYdOWEeUslXZh&0WcgKY8j$9P#$CZ>jM(k@kSzvH?j{3>3e{mBvNh zehQF98KAdpM}}HycC_UC05^hGRsf3##CKR6>vPgAUG3Qjm6hQgDNBbz4d+|(D__&~k*X`XbIGrK=H7IyJ3l3ubw|0Jgdy3^Ij^-k=Y0VS ztT686H4QC!5(k6Et9+IfW*xcD zH6i#-9azsKge@KU*=BerUHnrPnru7V7v7MHyrZn)arkH%Y2TDv0Qy?Rm^Q|vJd+l_ z30o#nDTlOwkQZ|!e)N;=Q+g~#3TXEA4|zjh0VXYw?SL@(D7W%~v}m7M{0Y^zi4>Ob zJg2M$>^1%Whc;!_a)yVNQ$CwMAA^}T@1$AzjeZH8h0@4ffLg8TrM1bSykjPQrtv zM>>{;>*#d*ja-`#DMg4Whjo$SP<}^i=1KYeCqJ?M@qm0Q1=`>nNHP71%1hakm!tl~r@}8d(fd_sJDA(*mOb^Zz-o=)pR|`Mo zeA=D$n)ivxAKFWj@wuU(gVY2muXv}d%GKyOe+?X+O@x^u49b>659Wl%6`uyUq1k?} zrai9l?DN7a!kZsAktgHUGL`4`;5>aS;gsv>p5z%O!%E_Wz>+UFq-SA#ZnlN~Ttif<6;<(8m4O&dK$o7B14_#6m0I>DJJ14+S@&#Qu+L2>S++SFB%3+;tF#(<(9|C;BY7O}r9d|xpsY+dH zRpjK_d)+?Br%@-rgpqD7B?Sbp7}s|Ion4p_Fj5Y>7Hfid9)$Og-@Jh*9l+tqOp;(> zETi`U1uUPFJFSBSiVguH1>J_Q@^pdX2g@_n$B!2S#%qoC)mJab3ebSbV2e5(Cg1Uq zX6o|@i{I!)$6e6j!pbSKA%NxhOn&y!wp;*m;vM4v1aS!9;$#OGD!54KC5%?i%CDfe z<+Uz001o33K(8M;@r!!17utvukupdjfmJ37B?3D3>!@P#4uo7l-?9mq1nbe6az7!ay zJt&`A$vOY%8n7svc1U>rJDYrQEzk#hjHsilQ1B($(>JH#jK(VG|5`0{;t zHw5hf_>9d3>U$gPmYC)6J%GbLu9aN@Qpc2ifNn0~pLW*Eb9kgn2B1C`Bw381&(*Dx ze%S>o0*JzB%RMxVA8&RnC?~tPDmZ}7HoIIF0Pa0NQq8D)C)<#6J%|oDj0HG4k4iaY z;cH=Tn!^kQ%?mJPRTx%RC0aqcP-0sWz!eMHi_S4(zM+Q(4FJ=-7WjJXk}y=Ykuow3?H?J z07kZBcPDJJ!$9Cp@K)NdK<_NosODwHCv;0E4%T5@+XK{O^n+>ht1<@Tp3<7>k zqcZm3o#|+)d5F%w)Kv&N@)GOoY{jSEP5bMouL&b=!5EBs=xgC(i2m6_J`Vx7|JPst zygEge`}c8s106#EGlGUfl@IRUtsXsG3~=l&cFQP0=UCzh9pXZWK#f-X>KG3O9AwdV zfyJOv0I@;jT@X#z5aTQ)onPP@1pRxZK_m4!KRq4b&x4KS-8jJIc@x=JcklEBquw9G*)J|jG2K<(KJkgrb9+?))2 z9)H`UySzA$46<{H@;!a^I%&4>Jzd%v?E(-|YjiC;N8!Dn&CPWzU-4}Y7#M46bdhCN zmwI5O3)2^8$Kl-xU}Y9S9n&DgE-sU9{@A= z>cT7p^lM$LuC^}%=tjTyG4~uD#%0LdTow&v)xI!H|##8@JOihzE%Fw*2 zizeW%W6M$MgNjT!1S&mT+XYMgc8?5u_QFthYgkE{oAd3ANOwPb^%$^v(AvtBdI_W(scV7Ud#tXY{kJs~F3#+n4N?gV(x$Q=gcwV59oqT^8X# z^<#1m*0*!J%(=})jg+@d}NinT^ozsWZj za9s4$)s8;4_t*AMU&e`9p?0%pD;KPkGFu^)(AV$_rx~ z6egI^xzEzR{>LDOjE42Bg5^T<7%cts@)>n1*HZnojPP3NF_0 zc5rk~@TLohhdLCG19{z0_&neM&jQMumI?A}tthgVGrqy4!?bwjj92B9YeOmxzDEhg zdNe$;>}3VdWWxue`p)`@wJ#5R8U9($B+WlT%hExIr*vB87rr_!lm0>gkKncU${Na3 zp0jtc@I!ii$EWbYbakNF9pctzGwxW zBl7~GDI*F$O*d^xS&~PjNBXw4q8JN3SwxXG2{ak=Gc4tcj9D%RF_bIdpmmFG38Bzm z%3s=i05R)^2|?j`EfX#fC4W?vK%^y+7Sb*CB5x%?5^H&<4sLI>UY3WJow9|;4jR-* z)@j}n%X;NGomBy}<)3t$AYT>-y0VDlqokuyDm@&4OG`3LInu6(VEG~w(G%7U4|tmT4?*UeT%dvY@YgVD3VdsYTzq{) zRmv|P2}dAvH=wuSf#EG%^58lQFq|JKg_rVQd3G^5t)6S!Ep|jk=9S=Y;cFoxk&}1Q zQ4g4XWSjKTUQLM)`6d6nH(yJyJn)uxnOsG-xDM}gJOI<&lbk-A7)L@zYwjy~Cbq2`4p zk#F)(dJ5B}mh`Lx)8sep-fPN~da?w@DdiJv);d>~#8TXAfst+)_*?QUa%w&w7UHZ= z?@2yDIZ3829fyUlzK|jk_fyMtZX;_UqR>qLTAF!?PS^r0o0gH4QNEN(e@Qy!Rz5>K z`9QbjOxTp4d&^>cK;(pr2=J`FmA+&U>s`Yui|{}JP$qoOrE*ZJ-NcOId%x@WUYfQa zWx&@)?b00C;~fuuF3I)-$^lo#;<=Hms}znWl-XBA_^=btSA05z{rbgoCU^3!xS!d==EJ_#iJjo2me^V^deD{Ip!KJ!-&hY=6eJ&HGNy&I6Dnyu2J9IC zX>vWJMPRx^fW=)la2h3rVNeoRDg1{OZjb z6Koa}u%govXoti2v^x?wlMj~nk_nXUa$yQLhg9v>KIt;CYXJreCOHxBup8BHEjIUd zH>-ObB6p16<7;^Bc6WK6H2Kzo@9jF4#mIsF#03yth!ETtjNS!&9tHq*5~WXPfpZT< z(>l&D0(-{O8m)yq=xg(YIJ2Y8>d7a|_|Aksf=XDdjk6Fiiq)W7r#(38fL#Iq;}_qc zj&)POe1P%@F2oh05vCAoBpQy;h+8lzuL^_Xk(XvpPeBr;&F8|IfLKlEiK3? z;}~EGd3BEW$qRRllu`NxU9Ona-yoj{Sl$i-6!Zaz(b#G3%tNTlD*&<9;Al0rcqeK~ z@T2FHE$UdWcL0oR%q}F%;%;R7`D@6)PdFBr!|gVku{cGB4jelr;U5x{JQ7kHrYvW^^~-(Szitn+vC z5;sXlm-5Q~5dZHiSUH|uywk2mP%HwGZ)n$du+DBGH@ZO)>~N7q@XfwOmrJO%fH_B4 z__?^X?|BCB*qt)AU1dP4d>9e1N58V>B=9N_F3=I5w*VBjRTqhv;gSa6nR*b=SrEg6 zY6Tg!GBlivO1A%x?>&GP+*+XP)H^F1JJ830h=SFV9zaW}kZpNf#<_qZp~-{ua9@SK zu^sE1*@iPo9qzNUL+fEdBI(tat@>CuRf6B@nU5ddgLdQ}o*biBjjJv_z`}igx?O$t z*&MqH_F@5U8F~Q`Jp@mm`h-TuU@ZzwTUe{l1Fk*(?6c|&k(pjv2CO@+zIwG*{r2e_ z+6eq0+zw!o?L`1h-6Qbh?v{-$4j!d0hXDWSRRZ=o_Umoz#AVLL_BJ}2c?iz}jsqyW z+eT~487z4_$fV$%a$|Y4sI|`2s}Jwp$%5n|^o+G>N6HiL?1##Z3q{O!eF_^2LJ7S*k`4Rw%KsQSb=oQDf*(aR;#8Hba;3pUxQfS%M< zf;(OWOtp~iy9OTtTZw@di-fw5LIXz^c#x&mv29$7^>FBJ&SQZ0F0#-J<1$4TEt?#k zd5-HFWr1R<9+SU5{iQ+N$$mrqB8?X;D5Te6{;YoR+z2gIwUQMeH?s29hudEboEwRCqmefBo=90FVq($`kHn$1%T;7%Zcu8XulWe$#Hyp+jlE(y5aHM6x)w&zvu&ai)=V| zImYk0i*AC^*4+Sle{Y||I%(^Ri;GlE9}9Z>*})~PnG?bzcW~({Nibg@;B#31E-l_= zad<7@|NO#IXc%P?a|!@_kHtX2(L)v;Ty$|)z|sQ_(KJtV%OJWg^4aMzlDXl3unUs%u`sb z%j0vbqHmPrluLb}wqeK5ySMOBALoNuG*4$I$$P+X0oxz_@DH=AP~%$91Tn8o{f#re zo!|@-7g=3I(9+y-l?UdoZ*C)Bx-c0|z8BdQ$?g~2l(^X7f`@v`W@I~6rh?5O$AZs$ z2ipM8hm3_-5U{}!GK)jB_22;T>&@%e)f;+)AAa$p>hY6LvAADno)1_}SY`Lf#TNP@ z`w`p3CHi-4euS|J3zM{4+o5zEVW84rm!q;{-e<@M0*~GspsX&qvhdCL#lK_tzr$`T z!FSt_z`t$JwlUM5#|pijw&Y^YDeVk0=`-)GGP`sfj$hw`;hS@@y7x#TqVbA0w86a{o-KrFAUJpu3x z(g4h5rhnv2JLMyD$;-hpbrL_v^+4G&_$lehW7BftpT{L00+%%GO?VNW@}F2GECOM9 zVFkaGPd;&903y?oSI{LfwU7rOLN*L*M~~K`yu@=-ZFH{%t4zzzi?GsUC+Z|1Sy)Dt zsP`$a50y8=r04URx4+9A(KJRpJZZIAG*)Tw-vUkOeU zwt@1F=rj)$CgDro@|pP5RcNBF1AYl)`{O<21@MHvE@eet=?JARvP_upgfN2Mwozm% z%Gg-&rSQtU)Gws{p`AgfGA39D*k^gAt+ZR{PA2@z1z$B!tWIfi%|V_%AXwrU$*?>q*7ONo$TX1jQiwtcmy}Vu3C%b0tWSSjqzetw zLA~2Zk}hGov_)5%J~AXdTt;>nT-Ay4E{XFk4|tw?a+3#1&-lt6`A8b{ga^>!q`6pi z(}BwP$P085SAKh&Yw4B00=}f<>ylrY%q48(Mp{f)eQCWIOu7oM<*ezXACOK9q%N^u zxRwVFutPh<7OP&~nP=#>9F&oXLz0P-mYX~#eDoyOdEzYBut{3J@I18D^-dajr(Ou3 z%tM*B{3&=Atkget$Gj==p~Z-Wdg{{ql+|1b#dXTbU3L(0mtQN+KFOH2d1)svCq?<; zJ1s<}Q!i4JzBq*QEa@scCA|;rCIVrZ2~XVAv2De0-Xyp>$hb0r2Y&IB=HQ~0e38F? zpd%lndMWsA9dWJ8k}P`3FL~>8({YfLV#ynGp)4>hK(F`V4++Ru<;294X-jD)NuO}} zE@_muAx{Y14MUs|5?Lj0>zsGWNs=VAxhEO3h}>}D!hXFN4Kai#P3bdUEqltXe9+~S zZNYSsENLWsy^v3&x;!Ll9^oA{8#notG?kewnuK=CUFebY5GxN&Kw8MC^v`@N7nIAU zA01uip9G*aa!$0AnfT!$WlG&AX=yhzbK$x2?LFz`$PLSu%HkO`gaq|FG(;x5q!Pcx zlcha+H~;`Z07*naR7S=_1TXQR!8e98oszU)NyA6pL_8c5k%#Tcw5)!9Q-<_ql->UR z?dp2s{^cDw*jU%iEEc$ z4%+H?$S0G=BPPa!Od90?W zn}jkcDC{EH!}+o5J%{?a&~O4^IEhuMWp`357^crm`u@=t_Y{ROg$oo!Ex;oDkylwj7!d4rdw3(@n*NpD!f*R_TebD0&-x)I!22BP z7C+86BLR~vqi#iXzcB&uya*_}w|iK%86)rEf?9F*AsOiH_^|8&c}E<|FjXFH@FX{|cNp_@C~%rBVp9B8czAlLX5oMnp@@^^<&6A;?n5V{jOhKC;3IX5%T!FHp#^k9OEtChEah$m+^)f9EC zAMG*f?Ac%cvU;+$RrNEm)T%FIe!+k0Z*T};v`J3_=&DN~w?_|9U+N>}IdTR7akjI8 zi9>~zW4D7eF5m9tB*YKSa#tVpU6ZZ*+oHZb5Gm0Axff?mOzvar;1#&f@?^oh?Zm_neekVNu66=4% zw2=P;WcKazSJlF!1;_y)1O#kRjzNIQ{tjiv61Kxw@0f+0u{%p~LnGMV0ia1g1dpTN zk)<r)ldIsUAj;AhUGR-k;g<6-LI%{T9==}{IN0ER9B3SICSXK`eb-5DON z>LSal_p1P1H#vOGa=F{h1uJ)MsH4VM_}BpGa%YF&hVsvH9C4BBR1Eq9Kw%uqyOS7n zxlrH^4HugjB{NoOv%sWIa2^xj9J#X1Xi=;l7=lM5=pQY9@yFJj|Ef?&p3dNb~3^{Ekm9E2!fxG z*GIVg(an?f>$CX!CI#iX-?qj4;+k-@r0P9Z-z!HO=*%&I=yr9FoihUHTeuBr05Wzc zlYf24(Pw|~19Sz4eIk3TH*r2L{_9sZt6%)%ceJ4q_=&FO&xJc(M7cP69Uz#x1@v0q z0+fQD2TQmCV6n?S^x5;bfH6maS}f)`NArie7zL=UI#?@{hUNBSUuQodH`INCwe5*v zENw>!kIRRHoA_;4nBHPNs#_1OFVEml0gU0IogdHKeWBIs1dC0(0KB@kI;CCYj0jiC7pjFvW zrUbjZRwrr6e7z#3qLxru?80-bOKFR*-@HNx(e9I%^DtyZJq{IpU7Wyo#Ood)t$x%> zRDaW2Obd{DIN$2pO4Xtaws8*zR`(9T6aAOl?`bux3y+iI0@lTn8FFHkO5a$l+b^;H zWnq#s^ux=3bYHA`;rr<+ZIm)v5dA3Y(^BcZ*Bvk6W-kc_Al~H zmlgH{{^(jlDUc`bzH--t;Hq_GoD;0VFE|^(cUtBT4-C^!4*?DXGA7Po%4ED@Ku29M z>85R*h4h6xxE=$9ok6Fsth}%0#`Q%_cor}h@Y!wqwSPWm5ywLvUAR#WrBfGDjs+Ub zDcl{V`zpbCccQr9-l4CeYN3B`Iz#$L*TR`96M>}ogyAZ@acGumJ`5i$^Z(RGfwbfcvE4h()OU9_-&_h+-;zIEZ%#F z{H)UF4zK|6FpEIShzoE6^J{OPvq-^#*6J0D{#bx;TK<>TbY$#VGv-2hP4(%XtOJ;dHd0;A$L70TTTRN>l$ z7mz{=OE_W>oPBZJ)5va+YV#9}7F6Sh+6_Gcl;i;Mq}m3U{K{RqHn9)S4P@fbA@Kd- zIq{{>SsmN<1pX8vX?8$&bIXa;(uXy9EEM5a6r6eUytkK*P%Co7xDdeeSioPw3o0UX zW@2c0ppg#Nx}XOmeL6$SB~X~jHP6X6U?kGf~LO`TjMmpt$&pTf$)8T<{n!_eToq6HVZlIP@QQkJ3cxm-(fok8Nds#)3_DJUEsz{ z-7ug}8p=iTBnRu6cJK4Zlf7JSOq(>78}Cyz6^w~`!(Zx3K3Ed!D`9w*diREi*0(QE zv`Hi7x5ShinXsMYg*t?IuH$vvrPih0#Q@uD_+k0#iiyAT@XER>19wwSyH&=>yU4A4 zR)(N~HXnXlHlInK&&@k^#jW*Q^N+hiKVOMw9NLa_ryb`xWtRsQHN5a4?|IOzAD-}z zdX4T(Kp#bpX%tD52FwHAsoSgv(~y>wowTS6B`XIF3UbRkC*(i%YC0O*F1&2mzprn-?9&H=&)yUF1+Xx6YHK379@N~Y$_!G|ZO$bzpF$_})4p^u zk#aw3MgxK=@!iimDplbd^;5X0O?dM3E6=(5KoJj!Qolamv2n*jG4zMfSFsho%}hmu+E(93i#{tz~$af_>L&> zUa=7fcMkE}7jLWkYylQ**$1?DQl>?gb>*bRu)15>#RZF7stt3>0)hvI>E^_C;X#v5 z%JucCRhbs6<0I|(UbYe1IZcInFv41j9|jk3L$$4=6r}ssBqRbq!oUr0w7e!~-O!0kLfLf`X^;{F)tZ zSMrMT31k(2=a!d)+5mlT;Gb4hqsZLRo-PpBrOG?4g!|+rZS9J3rXNP|9e?#Aj}3U` zcw!3>SsI-yIN{aJjjkqo01E`WY>Udgz>O2-767zkzNVJX0z~rm-h8a!)#n$aV|m6o z6m04aR&azJ1z;G*SGiV?gHr%D@)8}_hhNbvz@KXX=YBxxQ!MP=*{fR^=Q4uh>S6EG z|B(L;fb7(x2lSPc8(>0z`1Z5sSg*yM!!?Ua)K|tQ$cp`R4lM*ENPQwV8Y?-uw*Rvo zTyAYsc6KKK0uG#yS676c#oa}3b32yilo(mYYS4a<+Ob`0Jvd0cUIPSK@48nR0(@+^ z%N0Fg{mF-&E&R43AAJDAH?%SJk87oWq!J{H?Lm-xo#jDQPv67&rK zlEDjx5A+$d9l(}>xtUn1UjZJtGpF=l0Eiebc<`qS435nNm9GR`;kP0nP&rIH{F6WW zqVlj#!5-;Y*PlP{7uW^xjGYC0EGmssjy_~}a%myENG>tf8Ygb6f4W-Tq1~vTBUg5% zj)7RH0hAJCzs8tt7@0ny?YUso!3zBiI_AN>MXb}y!c?q_?L(v!u;qZom{<*f)w5@>&{G$bQ9rrIxY=Mw1iZ8CF0kIvj$GX6yVZwzn|8)R z5b0`wR@_-Nut>*E0}ow9(QSq4X(g*$oCE5}vS*hObIH^#X{SHVs<&#ajBx}z72bIk zaoZ2>;um|W`oDko>$I_qf2E1C3-oIBtQR7kyR^_O z8P6cQ)(rr4FSv<)feV8!%AUJR1^uEGs_D7lpg-{(;)mW9l$SSx|H_g5kPD$&!Ar1n zn*F_PfcsHELi|uep>8?^?2q*k4#+Hh%4!Qx`pSjWYY)rD@}07|6KNbj*1mV)+k=Vi z7hQDF;`#*rCUB>F2z8;0PWE5fp&<>Fp^GPF_ZW4hf9Wye=vUfI(F9s<&_6 zQa|*6Vtnvhw;1*-jziSt+Z$W(6vF|4NG;^k$DuzRn@F4E19@D=k4|j?Hfj3{lk?Ta z#{23V`s`y|&ML!Ob z=P{tS^q=s3$XL)FX(PzHyJvhpi5{9{(P14oVPAjEZZSaoKmOTI$c6Sj>s&=Vwcbgnx-R#Z=`L2tQA=-}M_0Eoq z|LSsdC3RJFI&%Q~Tjbqwth{Xk{+_y^K_JIPmN{cx?p%CLL*$=hgDd(i`|Ad@COzJ3 z1)js9X*2!JVZi&zYW~h5`R_9B+OPT!vGPXeA|lzX=x(Nd+Sb^_G<8g0>bRL511_Y- ziqSa%nj&xBZHXUJdx;f{1d`OWlrffHp4k z;Hbm6Oo9+b+oy!nP<>OlhSCe?nGoOyDQ{3bXcVrwv*(8(Y9!2zr6^3IRAv5-um+%Y zpveZC9vG1DHXs{-04+jQ4%#FLg9Jh88^H6ds1r)^2$=j-AOSLr%X3!)rBw^{v}R{b zi9o&rO#(j#G?X2>AHpO_$`t{iQF9Tx%3^^rOvh0T`S2;gn&Ejw*&Oi%MDLcLa;Csc z0#Z&7?7UIIBB;)K@L6|5Wg=<((K#xV(CGk@5^~MGTWAn!E+n9QJ^%vnT*YI3@oh;A zk`lrHTPAs_%Y}=Hu$DJeK$HtdyVsDXa9^0I5?L zkm$)60ZYFBBpnlTbtOENM$(X1GOol#b{w!feQ@!?oS{& zTl}N*9YXjQpt4MSlo@GGcIGF&^2^3xT_PvuQv$$G6H$K2CZKonvD_Aff0NrzvlD1{+e%3s4IrUJ$9Io@6 zpzz-)cCLvNa-akn0#XxKo)ns$l#x$8urr3a`OojNVB)n8$g?hwL8LzeMGGn&DKKHo zU;SuV7<2|$46lsBIUi{=SY3|=Ts3}OCeu`Rn`yVMNiGl6 z_web1o|X>jO!@|~Y~1@S-}2E62^SL3$Iul$8#*kjzw(jzb^9;j>-I%l`DGtdbg1=b z@DNBOvP;}tMVv}q@X)-Rpdg2~v9xX36snL`Fzm2_lV;RkYS-)j8iZN5deB|U@NTJAD1H4Jqcz?=8k>d*TuC{QN1 za{g1DO|oPyb_lukw*E!UxbRVKK6xGWt+;U8%MpIf*Y!YRQA(9GCoM* z;85Y6LD8!wZ4fi#zVKUmtPyD|^>o`pKfLU=3FU#G6vO-( z6_&a!W2fW~|9o$Xemhxn(3%9pTgxo%(gZ&Vqg5xP*^;jMH#{tD!Zs_dzBE1a_dP}A znet*@JW4p|air>V(upM_{3z1!d!E`P!Xx9RT_H2_sgBoeW9c8&IpmorWStC2AW@}- zi?}gKopqlRj*r{JQ$W+m?zI$vH>L)y#yqCw%J1_$p-)|P6U=_+15)=m*l5!+10lzr~xMY$gbl; zL5Nep7t8BHnE;ER;Mwsd`8ff{r3awn^5Ps|pMK7#rdTaz)GKKT5?gaxYzqVl{=C}2VhO7%GUHDRe*u&h)`j{<1)!!+RXVQGGh5K* z#8dF*5*g7DM-areHpFCEiEG0DJr36@0E&KQi`$b~tcQ0`@XHRJT1)A-_7n?nWl-RW zDU|{VaEV`7+=xs9J_x$^qANDB7M;Jl000Wui|~xj&c;vl0rh(iKii}D4c*w&Z#y!E zTuI*yE*N(3EA3%kZfO>*bzI!SiW*i_>$v6V1=JwZ$eQynvd<(@tH%szey zATwW`uD)g(hKw=?VwnQa2%r|Q=a$LnrEXR5f7)Z+xT}Hr(Sx7;7=Pby0%{M8&%p}{ zEznOqd2z6^0^{1{Q@Haw!9YSYPXYMHewzBC3o1 z@!(N3FtpLmEeD5uPL0#IP#@7d)K?EQ4UY`4#r^=kV1W;S?t+GC^fQsBa#$E34*FOi z=Mt+|0Yl|AI-g2YQ`rivGr^Gt0B1)44xZhRmx~R~gB$F$P=^tjgLrN0?3TbnST&=f3uNjvd9w%5r~qv4R9S^IX)Jo-Ud9+hXhXnb=7IMvBAM>059gnyga2|vv{?u=(IU&UXr^)`<6vNO_#$Z*BM!I&w6xDg z_ZJ`>cTw<3fK1`(q1_1dIxZG^Xb6OMZ1?aKIZD3HW5`E0O18jFb^&ROK{lB&Op7+Ef4H%I=Pas~aYi0g|wA@jl9<)9$Y1^A8_%UZZ zBanGbKK7LYu<}t5)?GI4#!$})+}rM0%i(<)19{0ys*?qw>FMgx!-rWsK0KXb$Had6 zXWeTI5O)+SaTl9i^cEzXo|(>WtuH?NJT7~55#~X>o)zNmk}tpdmUB!PpAhE=eV`?^ zvg#t2;I7u&6gS|#71RN2ls|D@MqTQ5ZFqP-?NvFmKeI2E*Y3I~hi9`$N0}|3x@i+j z-WSiGa?hcN)X^z=(s^tPy)F-Je=fQYGRB%@!O;U8-Qlvsf`w1Gzoe)i&+o!}~Wc(;oV0kMoNUXg4e_SwAo}J2c?Ghx59iB0pN3tswoa z)&zYc2Qs6d2YZJ}<06X^_wL@qXa8okcz3b7%NZV%EH15KJ?%lN+w3B;4P?=svdzI) z>v9cn+np+{CgTuf)I}D@ZCmg`dCejW{mB+}qN^t5#)arz?hi4n*rWf~THpRzzG@I5 z&@KOtSQxXe=JdbrqBnIS818&0b-+92RLgUi3pRa~Lx%?kmSbJNy}63spuNZtc&@RA zdf0jt{O08ucE~4lvkXPiYAp(6GJoe6Z;`r6} zNf80B%NSF+Qs3QMy2}`JK6U5rl3p6I05%1wZnv%(Be5&feo5dSzGdOvcm8E=St_|TGcEq}mU_~bW(=|0jXM&W@y0K!T1SuZQf=UO6suYZ6E;(I|O5sw@HN-K`hd;voJB!55o-WJ@jN@gCyjktxnSU z5%i4wpsZCCK4l0&9G-KlsOi$Ce8_{etVZLrWXY{{$tMrZpD=j>PYmuI@51p63TTwR zryM=_E&CvM2DZ2Kxv`+fZ767x5h zpYp`;(iq@}G`q4HYfk|@`4~o+r@X22x4w%>&JU}TI!KI^?(8Scgr?BWr`*X zE&QX5mL~%zl&1~VI!O7uJWIO9x2^HUIF^a`hEuluDRF#Gu>46M7q%(sj&h|QePZfnWgfTmOUBG1Z)=hn5Kv?l|V z_(|O7l%e!963%Tss0crb0LPlKoNoXvNym6zBU^QtqzA$NgiQpI>y#WCc*?^7fuU0> z3}Bfohdkut!?Yu4L5__ReMdeCZ%BFL;vx@nO~eB78J=g-M}9>HiLb6n>P06+Uq;q= zE}^zD%A9jBlr^4NuMU`{E!@*>7^yhmN*tbQQ7C|rc4}CD1u?9bQYMon41}5nRPn%I z%1x=CqPtAl@)3e`vJgxD_IoL#vLvsGV_K1Kt_a9~zkSLl#VPctTT*^`$t(ZzaiDA7 z-Ewqgl9!RS$e{fMbSrnJ$)EgChd2q$j<*nI)`Uq|+av*NUT4Rb^3OZR29dqIYTJwd6@ipu5LAN+kS$Z7=Q4y@GDP=E`51Ze_LK;iTIg(Ca%;{ zPo$Obg|MWrb|MK9AOwT=^pEzDK2kQwlK=TjPJ-jeP^piSvu)Zi{&h90cLkR**kMd^ z*qDIv833(q(k*r|+6`c91yFbA5Wt0r zpT2Lk9F?ga(6o278^F&Au?G)ok?OYOUgio;W=G&JC{g(DL~VwJ0P8^)GHFlHs%4(_ z6pKdMo;)5PuZuo_t37y#ya_bzW0mjtS6}klz&k1Ig$4mV!=dW=(-#0`%oAucZTyqYa{YW~kBKhz%c9K`hj@OxG+({j z*sE4>yWrt`<{^MHfWM!QmhM!4^Q&(Fx3P#sjt&9dF7~&QuQIJQ6=X0)xUOav=J4SO z5Xr=R7O=;~_I@V*w}hGDzVFp0wM|(5bQROj`0SLGj`Y8zp^F$l9@Kh^9A5!gov;w_ zsQs|I%OR2a@;zqaF0f@HTHd>3p@YS|R*L;B7-$LJ93HQpf4556SE`@<>~p4Wv#f0NQr|2d7xnxCSUu#vLQ^3s?*IxhW`&fs>L02%!6urn@kZ{{h;xJ*Bdy zzjMchaZf>g3Pk0Dyf}7iB4?)S;)wpuFR%jFO-zdgp;mjDc(eojsE<$KRtWjG&06Pz zuGx)^T*qY$`pbon%j;tf7{p2rFnS#e@@vOzpz=AG*kP<;R0!)n{5AdI+0Vx(o9&v~8@UY4_5f`5ubf{Ivx9 z=+XV^>u1ladkZW~!7s4_fj#-+c?laF{x=Rk<=d70HszY+@K-X5)#Egl?SK6J?^lnP zS#Utt3jW-`f3N!W@16r7KBJBXs=3YmfWKNe&SUxC^?~b-7F{~*lZX0RJ>y}19<~=Q zThH*@@s)jwi!u|s0-{X`7O%5oX97!DRs1M4=1^eejMxHs>UFIv;ZS~TUt=tKp1RY4 zCs}ZZPXh1mz_IR5;FGc}nCaw%;Jx)T6$X-Fc%?Qn6|;3mul-ehN`@*oXcohg;hQW0_1U z*BmnISsf?_;f+oP!t&LbR3+9=R?bAasuh<#UL_h0m`TSxZ`NZEg@i*D=utR%w=fmQi z1xf@8L;CKHIyvcNr+~7r-*Le~{)`bzl@*{xh&GnUlul5C#?YX!#(jefJ9eIvptiK`J zgS%gBpL4j!Q&%4Wu7C6NIqp%|IYnF8Wr0TbNVDub5v-o3pTBeGE>`!m0d&vl|Ni#x zUI0|jSKoi~QT408|5X+wl%)+U?d5@oUE6NY`O|;B3yY3FktBF{k;8Jgsk=Sg=IFlU zn8kbbo*=FKvp;bGMOg^5VZXX&V48Do0F>8OSHf@Sbz1o=KYp${x>a&Puqa0CB9oIYu?!e-s>n-~)7c+GE;{u#IqmOeP)P>56<6U>{xUlL%rvCK> z&}E$ME8|z>)VZ#_Z7^<8Hyq%ys1dLT;seG8TqzAATeqnZ6_=K`PT+e;Sg>3G>{dYA zXHpqJ%y6ZXWz!K%Aso!XAsYc;vRueBm`;O&!+fD^ajkNT8x=upm4cmg#dZooc&%3A z51Zf_+@S2zXt9n=#sue=f8>kEWy?5%M*`4`g5raxwcMjZ3Cj>h(iUn=T9Xq1-nXA1C2dT_Z?X~9YUz0i9@BPx00 zM@SxpKY$bVD1OLD-iOCgwkl%Ph|hVcqVfiXLgKu$EQEJ^Ad$m=lpt3G;+bi%&B8&M z0-qw%krFU8WkP9C`C}eT8(kbJlUaF0x#Y7i%RlI#Y~0&vmXG=HHZ+xI1ym+J`J0tx zhP&x%mXqtwHR+Lc+WS#I#B_Ci*9$H5E7#5hG zY5ON1tr6ou3kAqKla|)VjvJpUm~P9_j(hE4Um0F%5^wo-R>bXOqDq824h1ZBZoOow!t)Dwa)p69)Fq3ltw znhnq(GIz_+wo-#+3e@9TOF?|iPj+MZm*amS7H$Q#Se|026i4o%O&Dq+z{ z0dTDw+F?T0buDW`zwSjZ7;U-ppuFTd^@8q;&i1v7vld^ORFl=60AHbBTU4Ld>WFxk;M=6DTBr_ z4bgJr+P2jBK8hR>I&=}6XTC`K1W%imx5_jxi!4zRWI#)GWkYeO8B^k$UFhcm`YgV5 zCr!o8jJs4Rf0ivN@%`c6>q19Dr~Eu3Ps66pl8(WACT-*)t%kMST9Hc}df1QbQr|jg ze}}c$v-fKmVE2Mp2-*nh3UD8Ikfd_OuK@B8K*a?hkpQ~BMMnU|vXDrcT8i0z2l#gK zY^1m-pwM@)O0#|JaWK(4{O?Y?gvz5i!=br^5gsX zoW>#z&~F9{CCexOXW8beI}=6B;yPKd{>P@6Oz{y=zre!K!}_MNYRrU(xcbyR!>_lK z^d2(pz=svKEsrvx6d<=xxWKYT5L8g*3M<+{=on_J>dD~_@6G|00c7MMe6USjW2xyR zML=5j9$F+jp_&93-VeAf=s#4gVD%`Erg3L*2#|G*D+ybwpoLcRPFQu*Al>Rc56;u& zh?ba6a;#GQhFkZ7X$MR^)rlW*n4-RaeP&ut*8K>I3w#e^k=dcImDXyz< zTvguzyzgKcueISG0O%EbxdEu}VID@-OaxhU=^w_m##XiV{8hDxtZ0R&^==kJ;e}R# zx+u_M)5#`PTHR&mfKALc(PGDfW%B^jGQOstKt`^azzgpGt5MgKqMG~kadpMyT5CKD62O~Aa1OAg zHFzHrZTTdZu5oYB2T(YO<*h#UJILlRpjgqtj`>*tTbRKbo~`&TfJG+AE&P}s9phKo zx9CgjW(=$U&HhdJQe?tD_=I-TSh|-o^}=6Jr?mAJU{eo^MaB$p4|wkZrh@6}9|7mo zIkIrgB0vuyh;2G^D%zU^ho070+T9onRh}uJdy)YxcaPbUKeTWc*ElD!$UA0nK^HzA ztSNXL7YWc7YgTAzqu2D!tp({UWtal!IRT7Z+mwG;!eVhgME%$fx_+>2$AY&T$eew- z3weUPE@F9Dr+U_S{SDd-vNnZ&QE$a1Cc-1I?@rV+(m%ylx*&CL?-|c=^?+^_B$ugE zScVFU3BGDo>8^_cz5{fSuC6}L&bF&JD;w4F@)986FlQ6sdH`^G44Ig1x8c28_w~zt z$l{vY>Gw&W_DP?vev#jTa$Bbxad{wUe`jX0`q_^^$68ga0We#g0nPLt$o3g+5?umM z|KT@Z10tVQ|LkX9KzFNp^%CAw2dk^JTV!({Uhctn+oC*hBxV~^kEu&i&+ucAc40fz zhq&!hYxWD;Q9lcNx-8L!!ZqiZ73)X!kghz64wK*Z75v8H8L|R0>6RsP2=Y-UDeHGQ zbK>yimEAkKnEp{e__wWCJD&pRsq+Pa@*zR3fiI|wcbj|FbNWGnDHnO|!vyQKRP`Wc zcO00P2PSKMeu9ji(xz_kA13Ie?uR_&q4*Sr!pL)6BT!x!Jp^>!<=|qj`b<46P*_}1 zoU)J#7Qz!PaI<(q8FWb{u-yf^)l*q)CochO8Q+ArMHiR;)G?PXi`3idaKWzBFS`Kj z&(ySnbhdomRgAN+C!n{_85z3V(VdM4YhOd(AAa#gc3~W_a9~>Ycbhx78DY^u_Z9Xt zg1#47TvLxzInqWr@q-~iz)`HB<#`se$?xg&=e%#QD_|B7b|-vr7mqs|-0`FZvu#%w zH7+RFKDM^^SOEKb@`2~H-MBZSBu&D^00SA8kHaHnk;ENSUw!j!Eai1cq26uN1_XO| z@VEXHf7c_lH|y@;{R%#|aSsPScL4=GIMdxSjzRSeEs*b`Sq@WF$B@4}4w|$%{e9av zJC<_{sgP#Zq8%d*Pe1g^ZKDZD383rvWB|QlJmpA>cdfohaP@{a)$atPI;TDcMP;PaXa!y^@zK+l_ zo_C;CJQVQ61ivabam6o{w|x&IT5X1-(7U{SJMG{?J{p z*g^)(*F^uoKJ~>jb;R zJh0Y9mVVse-H`7k^63G@ikyDlrN#cYAKobMmn_I1!cWJb4X^;m*`67oE?0*Pu{%z` z|L#iBm|$rcBLm|wet7fdHEGkXpxbdh9_h#hC5HI`8c7vTB`I_y9xy}TT!CkDROwF} z2K7iBjXXds-zJfd9E4bq0JH=QbrLRE!H*T6bPUMJ?#u>AUqs~&{6 z>O){iLxYO&l}5ou{`|6-u^=TCJNf`-@QmWXW(HLTfMz6;2hik9Nm@)o8nTe#jS;9n zXlLZeeJO1LUr3L*<{Q8aJ|S7%D8wcGZ^Fh! zf^?!ZlXv}|8=_ktXz22fCmF1o2XrOf@We8e@+W)KlSXA;Li~f)0G~;hf2N7zFnoAy z+6FS5v|H!qkh}}M`i!I-Hv;J^yE4GQb^dwdgn_fBd8I++#|s{nn5N6Uv6b%Yu@02{oZW0rT}vB}C`2l=3wv^?B;tMK>p9e9MdE zR>~NCLLFMZ=ur!XPA!GDPT^J3sOdJ3=)bxS!yoD?2`Llmb9KILiCfYOJ?3g$i>l7_ zi<=DY)0S)AB^_SKZ$c+8f z9NzLs{)INaEPn|`o<1+KQR*RY$P!){C#8~Zt}_9(+{BkbzPH`lp2*)thVY+vs-5Ua z^sXuP;wr4@cgx`~;iXymh35oM9)j@68y(@oYX(UsW&p|+P#E1{ED$E1+h3U;{HGi# zx3uvWoh)6ZZFmBfMRn^K{VSjCU*#)G&-R*rFY=*&R&U4~<8qmgWw)$8Qzl3&)y|+Y z-$iyLxLBP+hq962@XmAs1|&pMEc!@Z^Ots6>wN=<&!$D5wj*Uee6tM7SxGW_Abltg z97Bb#ny_(TTG!*cj-0dkRN32y!PI2FN#O~74q zyfj|kB_NNuDJk$gOyMp;k^`@~H$8c2aD(}ezQN=_w0*|erPf_u5TL{`B zhbr#kawKkCs3Wbn1_8^)0ScsM!vp)wmo{X(Uf;w*lgZfkmp%)~F~KD5knR4z{JY;& zKl$NjnVfdW-@^AZac$Bs8qWhBkwy-j12Dj%?t%$LZ!f?zfa$>+6H+FAZiUrFlH1o$ znAi(wG~_*H7g%=U)koS+=v&Yy&~ywyI}Ff04yY&(ZTFE3>Pla!PV%&l6QpqR<3W;5 zX1h06PL}kY?0G!U3ZOF$K;mS76bns33qR_iLo9Z+LepO_` zk##Z!Q18KD%89^`lgfFvOt+!&9h3V2=vHs3>Bxc;x&SNaog79;2Eklu=cJ+UL_aCcoU*TI6 z0ALreniyp6HX#Woq$UV0G=*=E&$uKrq!Lv2^Mf8ENrM}dXbM27CZ(3fOc26u}lMiz>;{O z4TuCFryH12!j4U|nOp1GGTp}_jXK3NB14pEfAvkZ@MMWizr_ubMoJU+ zICS*q|A1xsY&EfbhuSy=G}m~DxWsoN?E=JA{NFPfXECUSn-%@OPr>iT(>DNAumK=X z@cSCsyvW%AfZzaN*MINJW zi5N{`C}y3t0EzTVJ;K(0fxKHR97mRykvV{8fR+}#xgzaV+B8+2dhsljHh?x-iuO(0 zvprI8T2kt|M2klCrE;zh<6YdCT=%0lX~SoL;JR&@0L)bv3Yx1|m6H+t+g}0{M@D%n z5Tm;wElPKBBVrp?K97m76*OW}og*6;lu@~I7sxH`cmy8nwo1TKD{;%^H*5vS8v%I6 z-3&VZJ^sO`@PAK*68;i@y&Kt*f0qS~Z9uQLEKIeL6&J?TGs=tY*z*FkAQWVBhf@>1 zGld?dV$vrI_}*V$1OQWFjBUFk=7!KmbWZK~zLmTu5+OD3Gc<9f5q?wSZ}{ z7zYgYKvj64)wgF~jH27g3X1gw-KoBM`4)Z0!Y828Cm%hj)>+8CrVQC-h)f9j z>tb0foyk|DA1$zRpd$q87D`YzyOfYO$3n`W3o*8LDm7^;*BXtunBX}VhEeunbqc@T zbw;MhuJS~k5JLHjArX1{tV2BrDBJ&DgCPjEdT5_I?~HnJ(b+N7Ep6U&1+;P(5YyF# z;I6JfM$sW!{9dA?rUC3edc2&S2j4w^QT-Nx`I9G)tAG2i{~RFn=hgrI`7a{h^0l8f zH$?ncrBjw++>p443`3#Qg-H7jC4zn(9dCKpSWL4&)IwiSSODB{r?d((_csAfd47Ra zyd~6{TOZ@Pf3S@1J}Bp)kSc9oU_1bAK$5@4Dd-V>n=4c5K7nER;M{#}b1gh_fz0+U zVEO-k@r%@<3-T_WpP}1bR8#)^Wc*5gg5dr1Bi8-!Acs+DI z9RLnBU9fY$;sSvSLeunH_UpU2pWDX;$`tOGlpC$szx(bx#&mb86L|Dr{_B6jh109# zr5<*d*)H(wj z)i{UST(0M&7Cgxp)_?w*;Q<)CV4 zlm5+}jr0SyN!$O#faem>1}HOC!+4o70kYt{R-K|8IsbD8f6931?b&PE&q&4wi;H(7 zf2(WniQ^%mEXtFwzTf4?bbEnAZg;9p>eV(eGH71N*btyS^{?A5$9l);GW%Pvtuq%S zw^@vG_ml^st4H0@G)}#?+cSXghqN02b@=aG*!t+D0FI9hk8{GJpfXWRQzD>JPND1& zqD=*4y~YwbU?mCw614eezEG4wh3_7%f{V{n!V<>GF=EKIEF?`?1M#tzLWrS&N479m zOa*2POWNM|uoB;OX<}YloSTjyIGrYma1ZTi+cpv>(WK8G2LId(Y^$tjzPUCHo+WL= z8Anld;{O!EEB?04m|sVYrtP&n;ycz$PO!a6M$k@mC6NBmX+Zf9$$>*$aU?7> z7#|@it9}S?Qh=9^5}~gEYQ#5vUgkY@CAd^9#7Vyk>XElUhB2e~rkAh$br(OffWfwm zklB?L>QJR750FgaS~t=L54=|Xuy9M>PT~a>loP@xKf)R$yi7eRSJXpz4X;q_0n~UG z0G^PMIXhbGIxHmtl_W~ZH2q{UWWw;!s<*)ez=08NDRG-gX&caFI+0x>rd)g*mw06o zPO8bLw2#osgYYzEDdEE>N?Pi}WGTO|YMJsn?{Y!D$~bTH6C%ko`Ia&mjF&YX-jKgV z@-g`u&k>hfmWy0-Z&?Ub+`80}9b`x^ZJN=Gv6D`Uq)=r9VtvR5UP}uLVkIoXm~s%; zchp~KGJo78sTmRNO0aF66yXC26DdE)>gi&qy-mo_*f1x43o-chC4)G;;W?P6% zd2gP!A)bXKWwy}98{V6^`72MPYg*E7deV~jNv6cB;{-fKU&gYU@&%Z&JsTh1T7Lqh z4g{6+53VReFX_n>b)RjYII#@yQ3}g@b$~QShj3}zM7M+s&|BoUroGP3#U~d$ZI_Xk zfUJg*B;`#xlOFV@%7gd%L+)W9RobuZ&$y<;Bg$W{3V$G!_rxcEh~dxQy6koR6c;fR zkNA<3gtdK}hJ3Z?0=7i1?;{7?7jUf5o`B?M{-vJag>9X(N@LoJyyCjl0qG-GimCh$ z?dq>A+}6D2UAJuM>)>e)j^%m2xwm-I8v3la$Rla1V^iM-CV$H%bv#qwc$vt`tU)YW z+KhQiqj@+`Km!6~S(cKHJSl(DW{XgJ_$+l{#6lFb6uOFThr^_ic43=J+mp9 zqe0?kks$KMt&glLXm%1UnGUoej%U`t@ff%nH2jyEE-mU62kXia5tVyMC2x7IyEfDL zV5Mz4%SZkwH??$Gret8ghN*K%a9&8GWlnf5&4-6sSja2+rwrsK|1(yxPLv7x&C^&& z$`bDEC%ST$-Av|d-4ZtKJkd%YQWH?}lt-o@kNqT5;utZIL~{DMe(A zXy#>Gc6@@H0ChrHyn$BBhn^$`5&SRp#wk~25!naumUtxuVF>FW&(P+TI^fGEhKa#A zGRI7YcXfJ%%eXJ)CouY{M+AsZWH$ULa#q?pH2BO4Bd&RQSKpR=l%JZ9>bvMZ8I%b1 zJFZoCxAg6G?IfiSb)bFQkD|NJ18cA zE|au~`t7qf48*bc0=U-N*unn@K#ms30!#whm+)+iiJ>w#0^kvyZw=lI|52l$=Aa?*P0MV9m@Zu_!N)UwWfH8)JszK2z`K*{z# zmcwk>on@Q@#rW#itxayiK${^tCL{>kmj@f$A<5x^RoCpjsfqM6wZvnBHFk*?G zgOgx{lV+Qv)|*<{J9!rT=yyU#8ZE4ToaA0$sigkPL<2BGp2~lD)1rJm(4qDBA(Pmy zb(5Am0zI%K{hbqMtlULL?%UUUoT*`0JjIa z>1#B?M$H!BNef0?Q~&~QvJfF3wa&JEd2Qcr`Z;`)MLHTF@yb@=EBKe~^5o?tQ(()( z5Zk!;SUUj7h5v#bPV%MK@;DK5qCGeW0H^$rw!T2+orj99F#-OoU%#x*JGyNFNZ|cn z{^}XlNf&?-_^JoEK4fy*&jjr*I|BBw#un&@vg(h%_qh7yS5K?cE9Ws)^`6C&fAz=T zV*y~K+5%wNz%`18A`0sF0GzCR^S*lhdb|4a+qWDNi$yU2<{z-7e{^xa>H(-1aL{7< zCJQ4Z2rnk6*H7j+e3LZpfAUc#f*Frd*VjxSwRB8t!fhp>D z5({!&vN#dFz>n?H$II0a3mEu=0Ziub)V9eY=(eGy5Cp5q!97rL9|` z=AAo?V|2}l76zPmv8c%wZQZ>bue?JJ`l^j*&#O}=#!s*&ZepgWW)Lh=Mgo3O_YDAw zD?pFd2;d)pgqHTh_#(g18XbN#$g2VPA^>zny=LCwxB=a80pKzKn5*2R&th?`gH>~M z8TmNQil2YlQ_l8)T3xBAr#^u8(BA?e5TwWg0tdSuu_!XVypa0T_v!&w^8#;LDmOUm zVH6pTPhJ3m) z2B3GB%%>0UR=@o6JH6!uToj-cTsr{V+XHYDh-#tJbp0{ZGZ+_3g0i+thxX)m78@u)n^-s#Li|&$uJOT`qHN{s4mtFoe#s z!{X!Ofqm#Ia%3BsV~KBcSk-6o%lhO`-6@!@o^4=`X=Bihgg~WMsTr@pBQ4%<>@)arj1*l< z9$IdzkDM2&4{XoIbtj5#%H1)#*$^0XtdVgEk()eIj@8Tdjp{7{LgiJO1m#q+x?w1b zKoEi7{_Vg0HOT@XqMOi6)wfSyR{!DnKUc5cV`IH#A zT?xrMEu-}Tt<2ijJ1%q~M%vt^!z+E{3dXX$>E(BW1w_dlwgU|7Dt~R{=ug zMIxl%opfitryNR3A3-}7@D73Gb3xw{fS)=U9WL!Pe&R{rF_rS?pS-tDJ@9)I-`yU{ zdEq#Mc52x?ApH`)7}tdocd_)*|7*p241jsWqTV6a%(^OZH=J~~0Bav$y}h-)1#elb zMpqoO^XKXku$(y3tv;SzxLYkhco0`IE+oj!eRkAPxPaZtk_&Vmy6It>6Ie03lfrWY zIKH5opB@jG>*B*c0JDYw_Fa4IWa-0ISTF8kKL7Od>L)+`adg!8zW;f(_~1U{Bdn%r zFDn}xr9DE6u2kfUhm_inyD(l3raZKTN zpjrJjfe-bWnd$68u^*1B8su>mo~SR!vA#ax(92nXV8Q=A`XtAV*7^7(3%=+C$6GEa zV$;BQ$-Yl{aRCcZn*~G#N*cA!wteXneRr4hE?CU5U8(EO7&|Ewjx~k=uBX@?vqK+k zd$l}{nLLzSS5B{Azoedvu0O_IigMfAe>Z^t5i)I?R#wc{dX`of2?hH5pv{GT>2+~S zK;QX?I=7ep?+CD8oem{=?!p?Uk)z9IaJj>Hh4H>)D95m#!(qD{#h?GwMLTY@T(I#> zpbOdnFOkFkv}ecV{5pH2fzfCr&Ixx)ZH5p;>$ZhL4h*DFJ*-kxO$KZAq=db6#6u}K zF-8Fdc!BU(a#1NVD}+lw*3G_87z;yZk;COs0v7E9Xb3%3rHc~2{73pdSa(}dWdg?| z(kXzlIWW|OyP5pv8agZ!Kv8+dJ3&0AA|@H|)MU9fD_G4x34{iYw@f)-cjQc`}i=Jp5&{u6*)7f9A`mwX(j(Da?zO_tQ zY|H6a_+;fKvC{GT%yKKu>7)&tAmK)C44yxE6kftV0~r=lv<}aNNIt?>2c_Y$yhwQo zL_EQo0wVKV08g;>#%*x*0#nLcR)@{UG*aJYVlh)sUh~`!G^BjUOkUfTav@O+kO`9Z zj2xI=NQOVQnDWjv2unc{&bkgCNQZBztJ5!lVgZ)bUwll=0JYqQ7Oh%s57u`$Dmh;9 zoq8cnGWJ z56USeH680KBv9T`WYUHQ0D?VCE(_0WWTbC93BQStT$Xz}8)-ln28lTSGB zcioOsX3NMAp^|raLEa@D>%jC0k#ZA+TzuxgBGc-pB0t9YP$%XAoh1R&hUer`cuo7V z@Pvte;L_Ho?AcfJT91}Spk4Y)hCImA(D3^OKJs+1;XB%SM2JBLBU93&T=1Y6pR5cTbl%7%&o=C)@KFgK2skgG=Q}~_12J({$L|tdyt=&d2zbvj&4u0!2EQfOE zH;F=*lejLwOW9q_Btk7a0e}hH3m@VeS<0hFm30Q-wxl$b(%0mk?7}DVN^vYe>Z&}C z^`F`Eva~PT2&_XdM-FLkezLmxep|+$I0j}f8vs6CmBL~qhJ|d=LDf2Ag zH4k_gnPMPZ;@Cc*!7&T(uw~Iii|8l@*^Aegv3@ z#uARatm)7%kv@ePz9)+J(NA24KS{d8&BBU#^RfFN5mXkN7efWes5?9Zrye^!ecctAL@)J7Mf66%N?E~7l zbsy{vcPjX^qznN;0aLWP)6z({D3gF6ZpA!}^{taIwpv^7lrI)jB&j8zfREsWUda3Bia7CJ%djyY4)MRj|Mm2C%d_f%OK~X@DwNt4`n^K^c?wK0xmYtfW7F zd=G!jSY-lqYA({x1gFEKLLNJLc0$n6ViaWPoaZwS?^nlIsjuVSq)B;(naF!6UjvZ% z|1))-y_GH5dEPwaA$gc{YIaWd-iddli5S7KF)(1jUu?ktPG9}afD9*DhAn#}VKhM+ z%}mej8@rp`O|qMrhdI;F^VUAR+6-r(y(_F*wQ5z>TdP(D>qG_LgfP1!*^GyDqQ$_ryX=)v5eG4+V#rPONRaXtM zSO)~MD)#}C1g&+4vH=+O(Y-k)KulbisBU4Ix{J=T>-JzZ55sfIw{?7h<=;O3a&1$? zSV`W(b;7e3E2OCf6LyFEF>8)2=@Wf`c-=75x^dEO-Rm~s4Bk0$qMEV11#E%;{kSL4 zYPE+sjsCRHm^~V&571TM$Fv3ffdTT}Nu%!-u_FX2TiaEA^zwu2x{nIe_VHd{$_E?wo>Z_bGwg7XTs|+vRB8+ZmaM^(Mc1}EG;Q_;- zQ_5nQ<%u*|>G9zXZ9T|M3(K@?c=!I7pI1W*iz$yf-~Q?n*Ayy0fpy1<&Lgs*l-&|c z_5Hkby#O=@srW=k7627L{^rwIzJKx6tLpQwzNr5Fzx~%(l+Pd^Sn2{8xeMay z^0QMa-~+dC(}9o#W!aS!tV;D=I= z9{B6KVL)HO%N(9Zd97FFSiLmPqS6r7=U={j9bif8a=}$ySPjyLxR^lJ>C1+Y#U?T} zg8tQA$N2b2HN#(A*B~e^Tv~VhQWADKkdM0R>BGC#I~FfC0q0z_aM4G;IL@`A><27I zFF@M`{ito)e$#Q|lY6(&GuPGUuU1LdKEphUS)mJ20?($Uyb2Q951m{2TydQR9gag? zU=?6>ktG1F0J*Y9SQqGQL+)HqM+U$Ics)G_^rsGdzXe|e&g@4mn|W>RY+;SPNqML{ z%6!bCp*mW6Qui>z1u4N#Et8GsPA3XW-Jes}ds&>Y4SKf85o3x;z{$1Eb<$@s37Na* zoU;MkX$<%1FP=lW(XlR8dKl;t`RxHDYW*wUU68-*(U!rwkpdW#Di7ooK3hknMlwofgb&(IBr*SGon@fIKvi z;G%w?F+>?~2ZENt@+Q01=%dx^x|*@Gh*f%cIuzaM zIB*}if5(BTXQb7KeD>qIre*o*N95Po*c59h=lJ+vjg|9dEcMZ~=&rl>9#s#XJ`K?9 z_($;9MFr(|4shAUk~McC5mqg2yU`K%F4pLq z>nt?WcdCq(5yz7*nrN}Vy{mOMgDUE6Xn4H3fBRnT@~G?pf?#9^T9DgQ#y^+zf#P%O zoNfC4cPvyqc=%0>Js6X-$a(0_FUDVvrP`b^bf~xY7QTPgD?_3h)0UZH@dwp*s_)bXTFP8>WKsO!> zben22GC9%wLR0`+0f+`$(;BR zBR0~2$k3j9(nFIJ8ocpRA@EZ!KH)`&2bMYa0!1Bn0Tw_Ok#@@BCjwQ%5jXi7fOL&x zT++P8Z!c?nslX&a99^R1f`;U4(g;rc5DOL|h+WiA0qLZLj`Ajd;!Cb^NIzJt*8Bk# ztWUX38N(9;!q0%6MzGB3zPNXyWSOOer{te2GhUg+H)SH3QC1p+_vy;0Ys=^B!XIvo zPW>hS8kF#RKoIjO8S&rY8oZ%13;MtR< z>Ko&kuNnGLUh@(?MSJ zRn46tZPTcp`WrMu7hP}RQwS+J`QHh;1VxsV2L>To6qAO~O5E_6aONrTHSLxwyy2NS zlvK^1yU2?5UZ*ep%4Oup&J;P8E|XOLNjrlflpar@)n}BNlD>RN;YeF9l%Ud5my32HU@Z7vctN_!(`@T-B_=T}uu*yLE9x*gt3%QU&_|>U zytDl&5Y)f)SdZL=Po-{rUzd%H-L;V7@K|1@h>1ZMJ_C9?n6~caZ|bj(AAXq~L@94F z&S>JKt@e29n?APygoR)J28z-g284T;zH5L!pHC)En;v94&>p zhVOopIDU)l`CR!RP1*od!Wz>rv{(?s+1AWcIGz!mkxU?QLSyLW5kI9I>W_}zlLu1a zd%0(M49Sz4QBr9c?W;p4b!DAI)>AgiY8dG;uxU!SAIO;#w#b_g%*o5#{BNmv%)gY) z@SQZ&)p<*Pow~6-+pdj6z@o3cH>h-%yw#C~hlJ!^p^GraG@J<#GzoQHT9+xa^5`qL z5gkhcKDYC+pSOQYIN}p9(L(^Qh?f%PyZk6L67uUS1*~~R_>|4?{1*2^lrOSj9hDy= z3w8_QJK;<6k!@>=h`D%YGAX+^hVvbH@tG|0DTOuN(pL|jwKDSXG%XMH(X9osz`-bh zf&h-z)`Fs16%8}E*6L0xWw-tw5oa3k5)xqv`LWm{fFT$huo~Vv;m~*a2;gh?d#wGi zSX_R;!32Xc&?n256DDC==#B$C%})$h>(FD{aRSoA1nRmCU<_b)4cO8T={bd%2*sR<}PJq z92zjY9K3d+r4ZwMC+ya%aren@kL|8!$cDayoj5zO6vz^!nZ?pww>0rFOu6;}WA*E+ ziwEmdpW=oSq*wf$G;0ww*nd?WGGU*eo`~~`tfESz=SeeGM zbz^tG+Jc5D4?rU?tyNXI(i*WR19|BIpiUZvWHe^ZM`<~Ni9JgU?w0K(lT4*>GJs^32Q0)Min)#uND zQ|)lb)%U;qLAAIzU5#?E=Bv+_Xaeb+h>x2cfkXQ@m@$pt<$l^n+eMWH4zPr1cS8*7 zMuf$>GXNYX&Hbzr?6tLs7J$W%D1g}^2a0Kxc!~>+W57Q9+>tqeZ5B2JZTbLJ``Hz+ z{eH8WKfv7!ll_CWm1^;$$7D~Pz<=GPD8rZdYj2KDaF5{=Cg zb&sN(6c+({0b8E@@O#zo|LXr?TWv!AEV=5Eba4Z_>UcsSYYy(iTIVC48$udC-@yoxXFaoU#vg);cni=}`B+sL53 zigoP4Y-d;!&P`25k6mH7Mb@}O8L9S9Ina=HBw!|(nr-gXOWb(Cb6u(E%0d6Mx=QM! zJi1tNflMG%OG6i_Y>ztt`g?#C0_FnqwlR&H#%SYOQ^(&i<U99|IY4j0Q~6*UFZv$q zMGkXiwE*7G-l0)|vDKOiFt^u>)jVLlmaBuLuXX%eTn?!h`bAvF|T3z8y8*n;FG?}-C-ffqkO3!1-kV)KgQz65pD03{>bvm?|#zU#k0*n8u7NazVV zA@$62Em5sY$9ID6)`xs|VeK}DK|Z*9yZYp-m*^c08+Cyqu&pMAHuR%lbU%5VBSiLN zBUq^Kvy)>G-SN{u{SLu?%Ym~SxQi<8TufK6%)&?)Nv52+M;66+y!TJbe7in(yl8Q% z&eK%_q5vIm26U`Frsd>0@otE!7id+ zXui56EXC8m{sBPRumC&dbs^Cmxed}=0OTA+r`*O*{0e8EY<>Bn>H?^oo|;Lz&O3FP z(F;H1x&GP_Vf3b;Am<;fuLI!G_mO6kHt)ftf`8JYm3%C*iQs~j?c@+^>_c|`9J9c4 zjLzw05z>PgwRX>esoXnHvCZqENKDBwgJat!IF$Wb+CTYcEX%k|Q1y_uwMXACzjR-s zEV`J}0lT}p53BdciqF)`wkN}S0J3Eq;#>j8X&yXT<_PF++O`W#?hFtlx85Av>0(Xr z+eNc8^!O$qw(fUEkY9Nqy)Gg(a1S+z91ioh$2j20@S_0Ex=c~VM;RAPpp*A-mm+^% zbg=E@ED-vYLlzOV7#0+5((kzgphdfS^?C)qadBAZ0&Aw=FLj# zQ8{+kgo_E@TaUXtSf*3XGhC<~V_}{hcJNxg<>8pRl(B#6q0e%RpiXG@yI?kqJFYY0 zu)u0_VnH|c%$P^nbMbQ#8MiIvJck2Z1|DO4f-!>&h36-XrI>dv9dc#`Wt2}gSatn5 zh6OI%c<-4y`m1*bqdWUxb;^<*3`nMnLcPf3>KfxhKzC);T^y#hL%-eUA>izolJ71$ z*godwILCtL6${Bezrd@-C}VBMYdwrBt#dyezuQif$F=us(Y5MpcfQawIFLfaFrack zA*M|YCB7@8&4zHM#h`$y44nbWZP|v0j)35q#TM|gx@KjnA!ATjytTn6 zFlAA=jF&;0WvG)ekDDvy=bM&hrf?JA5wT@7RbQE(lex0;k#H$Q3X=E92O)+S;t`%S zV}Xs&xhzm1FqzyjD3dod6Ou7~w{G#>`eBvC0jI=z2uA_R?KpUE-q0R^4Bj~qiz@}{ z($ugUA}#6RLZBIKa^tYfvYVda{Is0pld_vO!34PdL*ZoeQe1|8HENt%uq zX_+NLU%tyNa^Ru#3T`BCqF5CL*dYa5U+RZvmYbjO!*oM4X_}yPDyKJKt2C0KWihPu z62#6;nivN1&P5V(%+%ZZ^HYJKOQjsN&eE9`ZcVwPaapwDbEX7O(vAlS&}5h zsxmw@f5I6wGzeh8CmBk90UhNtSNIUQgLdL4`2;}C3M=!qYH>sp>-$y~};w^e0` zG}5*m*dWJJm5cCMfKgV=mOokHmBkokULJ%d%jnBe&N>+H%%5+jB@dNp%NhBT|K{y? z;;TozCw#1V{cgSUEQ>nIA?YU$JkP`{z##;v+bpYblwZqb9WsHF=Cn0d;eDQq@BtiM zv(C-kEIaASiv(8BC=+m!#{rGhztC3_KxX8bX(69x zrGMdA(E2bBe952RqgzNX_vip0+pa>RJTyJ%HV^f)@4W;fmCclo@6tn>8J0&s@|}1o zt_0iL4X0=q6}XE<$68pFAQHreP*Y zHS`3SO@2NrX`4?hcqv~1+Por7+Z>lvhxgn`r%!AHl4OZPQ{7(k+83Rxrc=2~nQeFB z4{1}7PIA$6NzK$vTe&rLC-mxO(r?zOqrBVXejNcgtjeH)d_E= zgdz{ZkZeE)3E?kwNFMh2rsK1e#((BxJ4o*JfB8&0Wii1YzEQj<qHU|b?VmHHxBp7FZyxx_QC^cBiV&1>@u8R}az%=ieJ z92nR2SMn{vN?6n5S8{ov+=Q=nQeh%Vgnr&fo+QXP);oMFR{w_NVFuwLmNZjQ;Xwdx z-YIi#!~guV=WJCU%2sy4(E}zBChK-b*TZhR8<}V%>XZVJ<-J?iL^=)?+8^^K}v{ zXcS)9|Ij9!OgK>!Aiw7Q9+s~zA+`V<((VC4bT?riea-}Lb!|IiK3yJYE!6{P<$W*p zeGD+!L!CJ>)Ah?JVCNxLrZP4B<+H~LKlqOBGP=6h=Fba(qxJXO(8fp=K)lo68OMo+ z#sqohcP4tLXT?wK+%yNa0W@uY_BvpKAWFbzD%A<$7*@d^$~c5eizXGM%Nxt&;b}vF z11>IX12(x;S>0m4tVQJ^b?}zm00K-IoA5k42uVX=P7D3WE@jo4uh}eh=HX;LwCgGI z)!oDvlOg3pphH(Ox|DExelK9&5d0Q25#Vb991jB^U$8LYBv24C)^%FXF-}i?!Mgw| zq@n8u4;OvkCU3e`fe0;S_1`IotJUKv`MjbGrX`SN(;gom#&Wn_oiOPiVA3p@a*Qbd z_rLr*EKIMeKl$ih^{tPelBVV1&_EVp)K}eD*-?>lh>8=W=DA^YZtA9pymh@I z(ANt9AdoYPRo2NilT8*0Tx`+hgR-L>HXoeSqDUo2%88GKHlc zVrR1>JYXCEa09FK%Zt722(S!yAKj^zK7R$@NqcULg$d@4$DAtwzcx1yst>>YVKqI0 zOB&ktHj6K#y5yie#1#WPR3~~++5IQCv$%1HCHU<}_o{#R#jmREBS070&;a=l4Y#mz z-A0!IRv`1Q0DqN5-J7_}#)UT9hagzoi1?DLxKe_T=*s|&@Z#J#EBrHjw#7HF!US%x zEqLkw+J1is`A{d>rmZ`H3iY`FyE=6QYeW0T77K0a-7P>Xcf?HSvW2=56jzt%azVh@ z9#(70O?25T;H}o*+W=70Q_}!&y|giPD7!AmcWP<^&}tumYAcqZjyhf~z2Q(>ceDWZ zQywiPFOkPR+SC}XUiGNput28qv^L)cz7ZE-hADHE8OhpDrp~5cN-*9gEH{ECPJ8 zU`1H_V;42-lO6ME)vj(9P}A~T*AT_x8VhjvDZp(zIYb@=owa1vCB&Eu;MBv0R`$M#Dk6;s9Ct>a5!1y*hm)4aa{6i8@1Y+QyjRXGhnMnufqv6} zv~&^`8eMR6eyW?2vUoDa!hyRO1a94laBu+gC_v_9F#hDsa|%w-q3m{|++)+T86zlv zh7q(@_da?2kR4E112ZnLj4sRxjyq1#EzLf%*#j-kXDw6O9yCg~uBMJSA7T{Qw!bq8x@Y<3{6z7beQ z9;t;*K>9X2t<*ESxQn@W`z~!3qXz2VvFkR^wpVaF0w0HJAv|J#CaX8p95==xi99vqsG)r=DjKZB0EB7CfX6S4tXb9Iz0fJE81aAiM`= z+TZ7Zcx1=!Sh-PNk6HM0H?oI_dgyeMKF@201*G-O_nfIx@My;v5CAR8G>g8Bxw4o_ z9CfA#p1L>#N!1Q@XWt>{eIkDugI}(`4!C|uzdC>GUbV1rm+|Wg^{;mX>X-i6MLWT9 z7YRK0Se>y$J7om}iu9Xd>1#kkYBDL69GE2$FPx-9#7ibQc z$i!I?kZ}4dHv}7(O583%a{aq$O=0sfF8|dc{?%nM%}nHsnyb#kFkXhsEqO_&w8Izv z@n_{xK6|pHXW06kag+!@3~JcKkroGPl#c1W&wyB_4Q-h~OMyJFOg`sFn&R4kJIf%@D1C+txM@DA zD<0Aq2;f`7`000*HMcw|lWCt3S$-JTfpk8J1CNuZ{0P4cLu8o}!uUk7SRYd6^OV7S zWuiTuWli#=$4p+C2@8quAvxw{^4BzE+lO*d{Cg9LFv$u<9jjiy5V<3ZX$L+_nI%BF zq?tSNw%v7DEyEjn4bR0@><^=9!aOvnvf~V-}Ea*g5+B`XqkzNmP?EPw#l#b zeJP&7R44E=?Xos$pFZ@d3cwe^@X-5u>Aa&-YKJ>EP z8_$DbwNxsW)AH2kzT#gp^AWrxjRImOa^ytvEPp5@tc#|0Q}I1#UlCdIy-C9HB=0AHJLx5~L5RWou!C}>R*);{3eQL{3slhMVwALEA$msZKstwZv+tm$8& z%Q)rj4NV0MmUv5JeB>p4sNeX`Uv#%Iq2C`%1woy+Nk0n{=H+|hXFLK^Y{T)TZb~VX zb!B^`JtG{YUZ5*{k&Kc@J}nDL@zNLMLt^UZR4ua3AUNd-EpU^14zCR>9rDsV?XQ## z)9Li(mdo_~^CExam~UiP5Wh~>58uf>5`qSQ`Aj+Z%w^g0s<4JIlAd)U_$ag`KT=KonveXT%!*H`vZ8a1m@@O8LAWeZMVO#%nS-t7QREz*B- z8$g_Q9xfyR?1ZzQbUd)j$?qmx=-U7l%#SjekT2&>;)#33Al@yb`ar&7(%S>=Cs-~y z5$X8VCybOC859f>ToQzGLN$H6%6j=S0l?^CQ`^-4hfg0= zfBfvFT_xZ&6LDoWRV$FjVSiZFs|OZlCIfzJUgOOTEFRA)O`R+b$`3Wx|Qs_R{taZA3#77tVW&9g77ZGgW?>f#=U!MaWU z01#)0oeH*F!Rmk*@T$*=t@ACwDnX1(4*L}$m0MPjD>6BCX*X@Lp@xdAV zrox*5;_ys&G8ecF(A9zfv4>ZBIHtR;E)Lt|0?PrP_L;o);z#uYc|U2tqfE#E0CYE7 z(0lMJda;QmD`o2L2LL$V2%l{e`jO5U5m~sFXS8w;lg@$C@Z-`M!Fb@_kA4UcE+4Q;1N0D(&lnGm zd8WWHpz_+YSJ7ctgz5MiZ}xfkp$jB{QUjyaA+BH=1JH*2c)1g$hY34o^pqJ;89(#x zoET?O?cJ-g0Mbi4cNdp#XNIuIZf+k{6D&wfPrl`_*wqYu-4UbQ7kx*%@TX^Bx*D3j z3rId*{qa}7uC`WI*m^!zt+iLHr%xYa8M#%h;aWm#R)N=@J%9)JqlNztI;P*cRVLu0 zfP(B46f1zCzFmi*3($wd?(~h_>|p@_p1Eb-{^;l!9i{vb(Oo(8atS}m0*yeOt_1XH zdjgnoO1nM>PZ0RMzc^ogf#tWu2YtlaR^0ug>m?TqJrva8{wV>P(0c+{zRoU_K0wfZ z0CpF$dTARuWSP9TpwAsE_wU_FTd>`}p$*JmE!4UT-GS`7aAzNM1i-1a<_-tUDm%JZ z&@G4gp8_`R>sE#|1RwR;y*Q^I_%e@nSCWfQ0`dnOSS%1L_$!aKn6(`>(fwKYBW=U) z0bUDq>zc~Ev?A9p?;t#tk9KJ42|;^7IsJLdV*xeK%P_wk^oxsKckV7kruqP)cH5f) zpad|rV!dYfj;?JsSQN1@(5=AE_C>YEqM-UrJ+CVd`@?m>;aCQvL#-1RC$#<*FxI#H zumA7~x)&N*I20fo!7}#N91A(r%L#3M4;~7n4pJT$9e4J3u}VF}=lcv+_^Z{+H}9~f z9jzWcdWahZeEE~FfU<{z>P!8IzEXg*yNg8;Xi$cnm*|UM*C}2ufa=l=@jzBN;CE0r zF$MoH&r9OC=qXrz43MX_tUG*6NB#*o3j7Ot?(=tq4pK(-9q-uH1D}-#cUNRgf&2%g zhCk|97dkJQ=Qya-ZG_;r3!?7CG99g<#l(*2=LL=3MIqRKjDC%a7{PZK=6FUrJS$)Z zJ)XsK(pJ_5&n=&P^`F?AhpT#qffui!g|d=HA7?bUt6^$p7DE7Bz@QIyIM~v*raLYw z5gGAZ2z3wrDBtC|P*s^P(%|?Na2Lq#jI$2uJGAufxnNO&atd-vjteXU1Gt-_{O)>l zkx)=qKw3*<7bzwtrdXJLpT!2p$}ap`j{P>)-s%SG>FR_&l>Xt2y0pA5PMomF;R4+4 zyZ6%`rdjNfmpkn8ad*ht##*e&b&unC>V^lzJ%r!Ni-)`lo;sFrQ5KC&IV*PWz02a3 z7T3ynY4b$o`i_P4)v{CoFPnT%y&#GM^afO|7NGV!+{xGvoHF)6deK4jE!2sBIY*(fLA_j7vvUj8H=aDM2H83IPrv&WJlTT( z`sCO0TvssEH9NfYWAFJ2>d>)Ge7BPv3}GZzUjYVeZ8Ux-VRFmH>*ZrLL_SqO_|m>& zWx+SPKgr~gm)Qay@ERclgsrXwuZ!Y!!j?%OCi{ZnGSx#7a}Xx0&8Qsmb6Y&XJ8=X8 z13mx(ns468Y(Wnd7^fF^391L6rd9Jjtvj?^DaH!LgsE@&te^0V{PS2#C10rk06+jq zL_t)MFXik6ODHOpg7J+ex{H_1Xp>Us6Mbn{!hucaggA9X*M11Nrz!E-<=fl zL%AEcLAY*|LAh}f%zBZpT?}5ayt_I-EPNt^fHT%z(nqI% z(8U1F?>Dnk^KzDoK!z@PPDH_}E4T6+pSq6KyYFtwm4r#l`r|1LPrrTA;}x!2UW571 zPKGcVcBL~=LviURy#O19rvVBi+;oW+*-JV^Oxa32)68U;#;9V;QyOvsBMeL8^#IM= z!Y5;yDet*6+0boZ^Rx#dkMPcLq)Q^@G4DfL%3_@wpEA=CMcZ-!o6v;349$@h=&_tU zHYKyL2($oGHw7N#mVdtVVR>7Zs6@;=Q!wt=b)4W46z&qw6sPgNP1r0wSeNkRIPlS!p9B7ih>Y-_$3i%;sF^AzcUWwlVXFayRXg2bvv` z=+I&Lm6H%$(zkr}a`Im}Mie_6V^EkN%T^%30?bUUdM#-zv}UWUe`V8P6b^pL|W zp49bW3Ppce|AtY#MCG4fmeXI; zm#*YT5WgAM`nK#AE%fk=VJhJ)4|x$XV-egiBpKsjQg~b&&-ftI&Ls zSDAoBx2j)4g#9CJsHUm3X(Q1l4B!joUmYEJw*Kl2OT8J!ge1cDREWv{`ky>ayRu9^ zGmP|*zWIH9anJXVMLrpp%NrwESLRLJussRoO_5#XtFs_uHSrK~pt)pI@+3~t8RjYP z2qyXF=r7+hZ1c5sukPfb?@JptnlHJ0U*Ahbc+p8GdRbZtmcE61`DghZ+oT*N4bzi- z4cf5-{RRB5YJnv9s^wV@-DKh=IHuJts=Jy691h@#MV~$^oumlb0(CKvK4XH&q`(O# zzlT__%mH4_jky)HAFIy6s=cyNtzh+iZ*h*v7u(nY2v)GZ>tZso11LT;fPo3)Aa%24 zm-1sQXavpNE<6c_){4>z)gZjkt|to<^k25L(8DSa`f>`3v$8nB2V;)PO#(+gnEdf z0ACOCIz{~mY+f+In&9xR>#Hqx0svs3ceQx3-DuS%U+-X9{bNA#&p!RCdcLw*ZSJ2| zYg-4D8Q_e5)58h9VmQQsiOTmG<0L22PS|odYrqzDj3JfdV(}{~ z{An(&u2o;XT>}Um#&7#96X97FLEQe%A_si4A3EPUtTtXP!*hTcChRaGV;jdF4QLa@ zIygdRncRE8p+*&s$&ZkSq1ijt*3#>$2Qan^9o|IF5EbMXAP~!4{06`yK>HpJ7ehWS z*s87nZ}SnHpCFztMf9J&zp+7ji-a2lJS^7T&J6|E1)3Z~3j9i!pv);iRkvU`eWZYh z6V{`R&FUB*&lPP*OWG?yjf*4B!J;nY(Zvxv2M8z7-UA45aY7v+vt78lDL%bnE%Mc1 ztGMI4a2lCXziOSXI>7RXNxc4%;SH9@UYCGp-2!v~5rY5?m&mINF@lum9(qTc)DrKD zzx})FyFdFeZx?}(bYM;!|HZ$?pA;* z`;KlwQ`{OvuOR~g=b_ayTLuA37a1D3BX!KtGl+li+1Y9U-4f#)%G*K@t#L5tHf}!R z0*Q8@%a2&M!Z!=v5AgBfkA7M``-fjwzvrOX`}c37W7=706VS38Z*dDX0T6W?w>tYQ z*eUx>&pUuFb(AhF1{4s&S|-nSn8yWFFAF6CbfZ{GO8*&hCvf5-obj}L9%ZM&33^qK zS+LlJsA(*^M`#~{yIL^A4a(R6lzN{t56o|wa(Y;wzQyeq2bEFkfBD_}xDK+7+4bnB z`xGF20Koal(|gEH$={Y~IVPyP}M*}u3zV!1NDCJn!P?*b=9PBT^yiOp!-NOhvC zt#<%A+Mz-u;HHJR`3=}Vz{?FlTy>CoTjB&%{TyN^nEj(^%I^_eGmSA$m}S?>Hg&Xz z3nq7`3E(^4Eo~55Nnei(=Pu5}FR_m)z&+iNl)QTx8@PzAYZ_dS(Jlwb%d+it?M5ft zM@g%T>w=E%?h06oOu0~_+`DLFcO~dADDQYlwsau=MH^MtW>^3jVWHyj z{I7>Q5I=$AHZ0f&ms+JVz$(R8Z2#^bzhh@VoB)ToiKGik8nF{(1?P zDNx==U3arP%X;&0Z_9Vg*g%&}hqRL){rJb#90#duB+{4;j+Va18`GOrP zQ`MJWzN+58UyVFz?L1NY8CM?;(QoWRx4ypBJ)Io`o5)9YEFnq9GCb?U1q-Ss^3#I1E{r*Fb{yHy?kD?j^}-a3-ZRMQ9xhe{t96N! zvn*(1tGJ}OU`JHa8RDD|7E~<1Q#N(D2g#q}e#Wt#yTF_)iZvR+v4dk_8&{dLJ7;nX z%fcWackdwM7smMN2j`0#+%>}SimQ_y?n%z54#y#ycdA?hUQ@x<^1Nm%PQd<^ggy(yRRd``JYKfJWa;Z2E~ zy0UycHLbiOO!Ai|>zzDf?P*ylS2>-Wz>%SUPLtqKDND%WRgoP#0@`s{EluPmkF9s< z%!_1Rp2?>>=s>xC=U+YA~LH&mCf;j3weNy?w{V}3?w5>oT2?6NQ=bcb?E z7Aoc}!Y}}d9_-YY?S-})9xCTv>YPr!6u~6}GJ)2}qQ&x(Tr-yr-lxseal3F8aNIc1 zP}+cHH1Cp>X{9~*j1I^0TS;X{f?s?j&AB131Snq$y9}PIv^j4LgDhC5@|n)djwRq} zUh|%39l#-N)HG5aXg~)_U`e)P#60r%l7~ z29}p$o}YYIH$<;kpv@C`LF&crn)M*reyyzB-W_e7BVEJE) zYgHu4giumSh{z2Grlc+iE)(RtXry$CC=<&|~olm|N zJyD032U&1|CI_X)&+e1RB>B<4qpwZHyb`bQUbzVh9s13(P|XKqLLK6~pS~hiWW*ORRt8%F6--K;bdNBpxAal1=-ZA-~!i|fTlcy(IxM96U`|}X6d$>#J zC;ef88FXq}OF_clpT#FK69fHu_h&NCgr0hu#EpV3EB3KE(uIotGOsy_?hKkN=_LLy z1&FnDatFlT>vzP%3YWBGiE370S!+OFGO$|QS(IBQJy1vg`@ zgzQ}~adAT5n(T!)1N!(jrjTH_AFJEAk+JpwF^6BCxH-=Oz;@lBNuX#j@2y zp-cym)yb<9ZIgD=YunR5`fVn&53sg8!rjKi_hB$_od z6?o)A-FLt7uzL1(i-nHQu?oj0GoafTmbO~Mc46EjkRc#080K|~oCw~#Q$Y*t3m6mW z1thW66c-8r)<^ntKCSLA+^W9w6A|6YQ_P zS_LrWa4kTAll}Kqdk4#QEbK-{8M~9eu2ZaM+tDG`ysInks(bUTSh9{FS5EeI#W6~K z_8{ZoGZR$-aGwj{ZM|8p`l)wqAx(h1rxx<; zd>_yfex0&qz1f^Bpm@Xd0FL%IX!FH~vKHk%f@A|M0#J&Jqb=mWfaZ?rTezgiI8HsQ zj6hc(0AxRaXqD$oxT?*ck{hiIGBz3x{A zN00+2;4Da(rhKOGRtZQDykg!wz~WI0T937wbaL-Jf-q^=1k(as9-aUeb}`vM1u(m0 z;mAWxbuTl-yIEXbY`Z0&g&(a?=ka&1^`C9f9TwLBIW`P;9eA+l=fC;;>Tmwx)A)X! z#-jSp;;rh%(sK2h1pybxhB*`z)4*zpMYet9c@RKn2=J`*i-08P4*`dO-kD?34k@#J zYWhaX&_s^-3TV}gWw*3g_h+<20e`J}-=GVXzftH@o)%aH)Q@=61v-+G4+4;FKz`*YD!+X{8@@iaNEzaM~qNMI)Y=gtY zSYg6Xb(&V|sh0g9G(ipNYIVgRNJB;>pC1iW$&mA-NC;R$c4{gJFE`Nb_ zviRv@>^KWvg2Jy}zs(_&_ixV-j(&)I1jAmvT0w8I&`LRD@eA+Pu;iVanhvnv&2#

    YhY*I)a?`IWF8dcbVc~*!$oAZuJ-cOuA_0qJnKvJ_s)At6Kf7CADRhr`CmT zjkMr*(aJU#BMQpy0mNrL@Q-l1oUm@~Z?x8T$5o5^Qm4sZ!Eym`>qY?8aKppw@DMD# z#El5=6h?Vi=YpX zPo1bGx$Rt^)B}K_`l7b!8^`wh?)?!~_A5~Zj(Z)xDBoE;W1eXHb0?Cz-5m#ln#z~H z^7}aOcMPC?g2jD(-lxsM$MBW#`r=j|bPME|MZnv!!vy_I&J)oU8Q(|HErN`W(=Hgt z*k<|xK?O`*m~&ic+Uh9}jeWKB5}wgdP0f*JgELrGSwvnZUt|*5wf>6VcluTMaS9;r zLYEfhS^*!k!$5v@TNXQW7}Ws09>wB#ZhkS~>;O9|1bgQI;pLSJAbaS!9d>sO0mOFU zZ`;1tG2HbrJ5lc4t5(^uqgx=yJcqnL0x&;92TY95;#y~dHopZgaAkDCc{J#WNpyjW zW(7aO0)KatvCUrf!AIX<%!6AN+?Ggl7dmwW_d;69?~?u}&pu_R$`-r#Zbd&hjMjzP zIJ+b~z*D-00hj~&Bi}AsAi5bt2_OrSw*lg9U;1&Ezn_2k1q%x-z9F~nz&T*ts(*EN zhq%Zxw{Vxfeu6l#3%S-xT|>0u`UnFM7SrCXa~2M1_91JIJPznbNSZN&JMW5be+kj& zcR#Wtgeab<+b(yoY~u<}?Dmv}fDv?#I|Q=Bg*N75*PGRMERyV_FWBieF~|bOezkVG zf&QkC$9T$}kIg2FH5lj~pW`wQ!|E&Aj(*2&zb=-J4$}AI+pW9LRz|y_VYvuq`_l!D zV>}lv=;&C~U~ygRc5vqt(!O>7Y4vCS@@LhDkDgT9%de`x_|N|l7f@@+EjnA-rO$GA z0Y$Es-Y#X=1mc)0yP zu(W(9k0z{!pa7W=t38~o6UYV_jX^`zueRYDHYA@r0HT8_6C{H3J%c8JNtl?pDE~|t zGnj%Ft5fh>TFl4kPUu3JIOCD7OthUC@XIr&!NrnR>tE9#E(0Zq*~))STy0re)6 zT<&tIxysEAoqknN3w^}t1o1@;C|g?di5An;4jR1z4&)Br=4E+a(&AnhF(^5sc3rWx z&%qrXe71G-DbOK+iHSkUS3c%&Qp==*asudJ({l1hTCTvGy-Mx+N0pQJd(qxzt ziULT`c_c9Dk|ICymuD_`Fo4W-mF#cofS6h2FeI6{Eg|Kym&o|fvYR|;pSq=9l!{sIEQx4bLyN?vBhZ%8Ly+nu2(Bae8YteFu{k>|)y z2one*tocQo)@?=QV4-Z=)<+#TABkPkGVu&rNvk1p99`ic*0^TO2NQl34I|O=K4tSY z&%z|1m;02DawbR$Q|j7!i;f^oV>o=1zGa|At`mx?pC^m47-SlT0Y{ z)O+Ep<@hD(@Y=mw<#V3uTmmcLnjJX2?GWAzOBHuXR8GujFnX=1Uyoai999 z!zp!Mo=KxQ6Ob^_!$-bFm#QPp%b3cS^b*>714TeehQO&BN4ZhZe%gUUwF_2wA{zWnEgy+jbV-m_ARX%{y+C10R^4JS1|8 znQ@fw;C0@UPA=a2IJA0Cdk72RN798);W|l$LlinPLZpsc9qyos2PBdST*;aEgf)Af zU$ZYhI#Sxg1pd(nB`pczrZYB6;f<5$H9g56`sA-!Stq$6p6T(Y{G`mC{KI#iGsupm ze}Z*YYb$SiQV;r~i>)>T(pz=6weKPOt@_#`rGqGys6+K~IKvk|-A~ zmms#*c3VtLJj7BL1IJi4y0!Mi$vE!+4grGt0VW<`!MU`$9&2j-CF*mvi+JjWQNc;% zXbNjn-HMz7+VlfRcB#84my-^~c#6rG19`2l+|KRx=?0U_y$uevB&{X@ogly!mS#?X zwG6vva-vm|j1^4rQ}A~UYh@?M`U3BZRW1?^FLSUFfVn!N2`w-(6PD@8QG8=#4TZap zBPRb^*6UV4%S?fc^GxI~i@Ox$N$^*Z&KrGh>rj9+d;O zv`P~+JHVoAoP$CQdn_o#M1GKkfBb*Bl1ArV(j`e>lh`-Dsn4b)H zRp0wyp?dfbOD4i;z4wPNUR57{aI2aC=$oEna?V8YSHFFZMe$tqV1~m^v2YvqK*sc8 z$b|jMg>Dg8{D3Ft@NSdC8DmM$bYlSEM<7sDJ~Y8@3FO>hO{5URI4sfmGMAI_$ zU>$$H$D$50*Ne582TNXHk=LvY+x*yqSVDO{f zYyhabb(-W!-!OoafH+;f0Px(a;vC~x*31b^lgYX|QQa|u1#=hivZK>R_!XYGz;xQ)!o>;;4S+J2&U>_(T@`(B0KGyv144GNL6%im{i!|h26L7(Hw276YJ0H~eUFZSz zRxhAif3F9{0rIZA#}yP{1lEif5cvZ68W=dNzVqlda@qrkvWp80{qs(v>nJz%dyO2n z(Mwxc^j{3)n;+gO#{wyh?l!ui^ce!=y6@;F9j&2D{Qm0h;w-yoIM9&tyRg{LJWA_d zdnAE5UA7$2cC{>B$G^O7qq7r3{chXyQtqaqdu(K@+GwxS_OQGJm)OLT(gnYJcWzg6 zq_>QJeMbJ(rk169V@dzTmoEX*0HNTA{;D`1$PMz=B76_mJQz%Mqp4Ya=TjHx~Q*o zUnTFei0P+tjtpwSAE1}8#uK1a2mj&ubNrQ~S9n%jAFzvuAKhmN6uVQ&e#4zfo_*oa z&4VQ`0Z;9pb?c!1wT!1Mq%6H%sebk^eq4R)n@_9f_|Es8^yMF6au6`(_mDk@M+-Ak z?1~w${_o%ZJ&T&)PdpTq9he-%O~e+umE9P~cG{70jxLoS>f$S0g^d6@YJsbtX%{-| zG+cyoyyN0_3;is(ueGsbI@^eiO4-#ST{mIw_R!Uj{qY6$5jSL6%z&tfsqR#QZLGqGz&ktXkRz?f5BFFQ z;5-Sf_N^=1ofQa=;k*lMAo?GgYW+^|BO2DLNo}@({ zUdl?jETi=CEqT_db8k*zhxLGrWO8eV3SALDfjL=c-aPo4cis#9Gmx%xk&u!WofL}f zraa`69!;K+hiN$Y3uq_Z#^b*oy7dr?yQoIK6Rs?D6mAsRF0zn&>9f9PCKBo?~<`}kcTCLb}!>eTIi!5Llz1yPg3c$Y3tXHP`R%Q z8Xg%rP1J{^;Na1L4&RcHpYS*78y0PtF9g2Q<$D| zDZhn29-Cj@E7R7k{TSu5;I>ikrOnUe#cS{RpE}@~;3^S#VtmrE@@fC6CCX>m5`jBj znYRN8@8o&XHeTwSv=~SyuC#@3oxIJb&{+~S3He-jo2~NX-QkHY=Jd51$T`Mk)ugDR-BcW6+5CUE&kTcUi@VZE44`Xj&ROG&l?frVsfq(0<_WsdOyx5>k} zq(7iFUk(VO_9?TITqvb!*rzaQKp&_Q=GaaupnL#GAVA=xByKE7qZ|3L1h!uRbcuO< zdk)~U7Yof5CgN;erEm05$`9_}qKg601-KFjc5>}t{q&5s3XgP|;zU6UIeFiUWv6ik zodwbR2!DnJl)y|2kh%cUOjelO90U05u5VWh4<80lvp+Pg78BozF@b4JB$y;;h|4t-c%|2pAV|i#(W^jI_pL?XA_DvbN0xaffo1t?zx*36`AjYY1Ljhw@3# zEaNu{Vl4^U2@35HS1`x=Qz!(m({7PZdF#NgtBG9R5eY4w0a7EIt_3!vk7FW0Ic zy?$Ta`pJjXM}M`=!HUc^tJQ75NWkXr|MZ(2+J$u*ZW+(F0a3`?vnRUXle+|Z5#}v? zqic!lzFp~xkGKX5%kz+u|lxWw!G_Mp4{)gTcvK#V?j6# zaHB=z5%kAp2s&qyvI%x*(R~2;sauj=>eG7CiZlna3U<=21$k{}XUK7bciVuQ@==RY z`()ehK)(kO!b9p-w@oc}+-PmsaC}D?>C~mw&;S4s@+VlSwW(H*){P50wjX&a*gH&l zJ<#+0`VQ)wwoS{}0?e9Yhs8ASHvowa*_oqjlX2WKc&%XND0r*>w@=yF&}x*k2x!yl zbirlal?axqcLjz7QXIFaJHPksZw7P{n6V8@$1!cwMFn?=sQa|4?qgA64w?BEVBVYM<$#wX_@y^(f!ii+QeWd*{Hwpb zoYR`$34I6b4nQk7?qc3Hz-z#~{fp|q{qLVwKmExMt2Zm_?9u@%BFzgI_^Er#BG@%O zIYrw*Cg?*3ZuM3lJ$X?5;nSDd)v&X;Z&EKTu|$1(b&7HVYqZfI4@|N|y`J!&q?(mRl#M4K3DjpM#7G z;K@sY>wXrk?K8Bb^$m@*3EG7hjEsJ~oHw5;~a0*kR0M>lnRe`_r+&@2#Yy;>Lg zuin|&1o*?%4=lF)@>Ja{!%w<&XY8e}a+d)M6@2UTo%W0R!bX&!h4x~(JZKp{2YjR~ zNAP>czK?!z9B^2WdGm6M@qi1d^f#pEA(y)Fa3RBk3@KUYk#=2v9nmNEp|i9=4`{x- zL7QKTOsXeM%f+ug7A*Di+&4G|kUPaehzpWV*v;r6!CS*du2@LjYXi1(DKEAm7fPol zaVdr?sjpsrfh_G+&!7LU>iv>_9iH8}`+zzm9CJJu^W0gt<4z)U#jCfk!*lD;HaWr$ zhrRvn_@3U`#xFH;VtI8f^TQwh1ncg>EQU=Ym&fQkef003@7C8g(?_~+z01Po-0VD6 zL!S(g-%A^n*`q9=s%t#sAo4^R59tRc87sQ`!S*x5!kqGhx(}c9kIpTN?e_Q3Qjb|) zzg^AvVs3VU{@X?87n+Qw| zI7!qpf}!{*op6;#$4dqdJcG!*7s!EB+tauw0URc=DKFU!J7gUJIJ!)02~YlaM}PoC!+*)W0L80k-uq4 zl!B6^lAxUQ>LgyiB~osAZ@Pv}M1{cM(#Kt*CxeoNVSETLZ^~|X9v2_KWzt!IA@j5x z27oq6Ha#8$5R=c8wd%$%8Gv)Cborw?BliqY_=FY(Qzach7ak{ncr1-JY!gmB$#e5m zu{(J*Zrt3IPFp?}KSS`-dh$YGQ#c85<|!zPu*u7S0M-+ES&R%qtaxd$OhySWpuxP1 zW17knR|d%lZ7%ZBpSb}tK__yyq#NHPO@e`Hu^e=8gyONR$Y>|LA?&CMoqRyA{LMmy zb(leULh;W!G+|{#+4nvFJ9P-($}{;Xh2#VO0@{#A>d-nu=0hjZbzfs=oXMvhD4i4p z$qrwVXPuVsOy8=J1mD}1__4#YK9vcRbmDFP2FZ8s(k`XJH0y?8{kk|}sC;zF!58Bu zwm@vYKodWCBYnJ0xlAiW8H})!5doD@V-#sJrqAI zLNoDPv?>CfG)n?dQOk-4dhxIIzgFN>Ywn^Zl0nb=F3M&wH(|; z{!D{+X)|;H0i=>wDNleR({zx=!be#MG+)a`JmpK`yz92$qe`JxPxwrR)_>Y>CV5*F-5AJ7iT&#Y4mW@DK`3jatqw>QsBs-#DO3jk(b zTrLEach)NdSQ05RXNr_BR`aA5+2$RLlU(=#I=-7IaXZpLEM}DY5QL-B3<`)HUThX__v7%60DimrJ^pS;{Uikmc}( z@>w$;?pE|15ll}V$Y-(smR^}eNLfvrOn+>8_Wy*fIhk;2W9BZC_{@SJcb3ILaA(3` zn~QEX9TT>$0}%VuV|_e*a1PpFJ1;9;Kn$UICxZwaWb z?7US9%z%qW4f{FeM?RaEL|ATKMvllY-y$c*f#N(PznZR6Z-g>m>y6}7*3e-b)0HOg zp@lk6?W#+{YWp$XNB{A_iKYBEA?rsO%)No3&oPtV2~~JQTtey2WCb3xA09c%IrmaiVDY5O_CC#7}Xpx+&xpQ>JbwN$-a#wwhNZa{S8#fiwJ zE)t+a*>sEdKGskz?scz{#Q?(Xa5&=#K$h03g8$u2cpUFF(Q5)WO7aTU%<5dNAs#~% zRBC81k|`vC<`Devo*t>k5R<;d{*+=Zyns|X2fA5sTR{63H1%4Qo^5X#ySmUQO; z%T6#oRMZK&KvU#`_m*9m0m8{7tQWu~ZIwZL}gso60m!hjX$WTUtbSaMI5ryrd~@f*7=WH-Ia*o^$kW6fhHZ*K$eIAKpeFK|aUbQQ zP6b_)Hf6F6Sr$QVt<|ob6w~g)TR`6$WTyGY{_oN^j)7K;`x>TkCnJwW4(` z-vmr^_^~>ZHnRL~D?3?Sz>!W5r;MLk;_R|`W?R-3i#@Y#eiSzex&_rj_LORNz9;ZI zJu#g_K;PgaeUDu^E+V<8F#y2bW^q6t=C;#G77Q0}PX!!5_~k#b2zXfBE%8o0E9i58 z)usU6GX=4 zZ{H|rZ&<-4`wT5Ub&GOhn}T-1Q7vIDpNpN!%o(7UiwXOj&+v<1{Av8-V^`(b2ku@vrLH}P!?xjKVLANSsv=K#>2rP9 z>$b!(_dYuiyw|dMhz0d20MHSvYVAuLd!5tP_TbAQI>$0vAJRgLrhkAwWMgt_8o4Z% z_PPVna+t6AvyA0Cb&JljkCuPKw7EmdCWvUA3c$Juae%eE_3Z8uU8A{ZwhusAWQhKc zgOrQVdx8Fx zGZxU1{r&1oKz^*=jjv2KS&&>@xECMwx-oIV;`wKv0X|OvkRJe;iWinXeFqC^fqu^% zfl;YX7wiRbpMUxZ^wE~w(Lj3$aLi7%Va69b_+EFc?Bc!c`jmbphseS!-Msl+_bF48 zlL3>pkneR@U2B-#bok)6A3}Bn?16?h#WLi~@#-c(eVYYV zeTCbfx!|$Efu!z28E40ay6h6Uy-YjC(3ZBj+osN;-8xeyYOPR3G@q=xTxZ zT*%WAhs>VC*D?A+%VqdoK=VjH`oSFl^K*BR@zNi7U}+D)y9Z#lu|Bt*7TI+d3_Bnh zLrl%E8xsAszVeoM8d{V?Xk7rCo|z}jF?Kz*IFsTHpX}gNHW?eab8iMq?_u1uyjgll zCqv($0qV8q3%UEFUaW#%`wTj})IoNc>5Tv#kVARVNk@$TJm9krT6I~Zo4w-p>yjND zjB5iX6VQSp2{4odRc;Q5NLYqOlOqrTxv6}5ZhPEJvZDQnordCbD#Ex9Zs@t`eOWOl ztW3RvFt^zSurfXVB*?*&fK*gzKyOyElD{NCgQFe6VqJ#toHYWTxkV1))d{p)-QyHm z+RPU^lAh@b^!iB}%0*aTl{lT}P61R31TigXw^z?+nb;Lz zkmp_`(6Q7D4JgLAA2F_ZB@N=cn4krAcwt8h-x!1iyy#}7GvVL?1LrIfI6*WC6egoD z;=vbq#e>ji9jTCvmM7M!d=adnE&^z1-O80ZhM!&XQQ&|&Oli$a!9xc8UVsB=%X0?T zOr9J(7~WjME6Nd>qR!$@#jv~!A=I~%CaKI|+d%?~-*QLnXzL!Jw#EPX8og_@+)=fIMl!EZev6(_YpQOrvenOv#z;mO7 zspM~2xQ}dEZo*26SHkeUv=!2#-D$Zd)!|9_L}Ip2XfU^cleYT=v2B_=U(B=oOC98% zq*HIv3+61{ULnv?4CGo4bp&)XP>BVEJcOl%-zu;~B7Nd>pS+ddJD* z=;)**NEu9;gz4yrv;!xl)Sp?zrHMD;B{wL(p)JhgkykQGWXfb(=5IM^2cfIvTP&$9krc`g!lJj8?Z|Z6itnHc`U46= zgXPlie4&T1(i5v}^CYbC%g|_98Ga@00By2XTI6wPl85{-uyauAGi4xcUHv3a zNmFXW3qfV9jf*bisR#BFMhckqCS3SLo&@x1Nh>l@>jMHOK4qx`gl2h~@~X4s3*S5S ztK3Pa>@IAkjiH-$L*+AdebKChLXHA{Q*sFlU(^Q{B6TfM@XKfBf}Se%pj`4q;F++I zGs@;9z)5|n2WgN9WLAJMF-^D7>*U0;S^MR`H1Li-B3r^qPcWgh6NxJ_EP!lZQ1bMd zBrwn+ykV6O^EKVl2Bp}m)Vtx0l(7Z3lISIPn&<9_@_pJKx1?$L!xZ^wSXWSxR~KXr z936qImqC`1ONoqAWEh&w*jG-t`21Rb@Wpn^!>Bl9EAnT)wzY_Aormc{OWJ6CFL#7o7yaOM~%<+6}GmlLIO%Lf7?pNi~Sr^*ibD%-Xh$6j@t^s5s9UJyr_GCg_f zGk-~o`}9kuYT12H8_7Ycgb?(O%-87{Kkdf^ypoCaCfTInjnzRGlI?fjN{V-ddTI6r zX{UA!XGB7&qkZQ^ddhxDl;`SYnVYMG@xN&&zWPf&%g<)}r272LM)lF7djVjT!9xb& z!+^3{iaCMQ5B4Vp>Vr4@!no)So+-J7cK|^002M$NklcGJ6V^5syiI(P z4+7R`WvB>rYu&~~++nhQPxV*4i@SneeWo(;`t$$t^Bj(MYrI&;3A}7=dKi@ld;u;v zQPk}XjCVrY%UHoG*UE5;2~{7HHMwPd_W)E4;`_5fX8L+vU3yi0_Qx-)Z+!oI)ioBO z`nW!!&3bU(0h6kEEU^Th^v`Rb-Q-?(FdJBrH3cB31nYc|bZ=AcaSrDc3|G5qp=CM( z#%%xr>p>czX zec$x^{q0;lLP_?Evc_J> ziZR^v2e_YeJbV&)73Ubhr3FR`IB0EUphEYM0beOSrdcbHH+s)L%;#`z4cHh|FoKxk zg_-KC^F2a};B9$@yr^&br{|iBT!u(4`z6MbTnMesMaH8$Flv36cVHm8!2@06bLJ1o zea0yk(T)Xrw`m|#Z*>n6Hx^^i4SiWJe0g<==bF6Z60hcz&o%G(;pgis-6>WPD zou-E|np3Zyyl&#SIYV>tq$4~D9>*sbJMBYWe5XPC{sKq-;g51`{M|i_$zOi^U5vr% zjV(6C<~UKI_nw}srR}u>OdDadVsZoN8&CUUwRO0q`5$@9q~2hs5=cu zaf;`q`RNET#@Kd^ri%+~Hn542aS1{?ozyUW!rb0433~d+APC$e48129KQ)wVpc=#| zdvQ%@12zpcYJyCRtI}o?569ijP5RMg^XQYO={u`zX5N{@OBG&e_|y3B94_+>`2~$M z;yYg(WYg2l%2W8OF4b7*=@oUH@}U9IO9}6KF*JH?41=;da(9ou&RA)<5f%3-Jh#o0 z#7dSguoQh;;a5B%qjB(v(+k}T-OUd_f7*Qg&9iLKiIb;%G~V0a#7kqjeRQ1@IBuNk zUF~?}+)-VioS2V)p|RrzJ%!(y$AX1&Z0Jel)%>2aVgRRaapD7AE8P;N0Xn43q}AqN z$$fM*a%RYnU2r};_1=!@si zW86F-uky3F^oTjkY_qz)LQMA+V6nkPpE_dPS|B9Z9`lNc@tNlF!%s*%VWaSV^Q$kv zWD`h-iAfBwoL~dCClS05bm{)XggG(>yL!Y;RQvKF^_ zua{rLgKpZ(W-Viv;a5E2Fp2kh!XI^!k7xVAA)Bz-SVOPx?XToinVVM{q8AqLXG3cj zukyLOcd7RX9MP-r%yp8XiY_oJ@1NrE01q|z8#~=O(afFsc>nJ)emif;rY3E;hA(@J z9ebPtaI?mZB_~1ZI8O>J+`E^#z!jTd!?ec@jZ5aGN6bf^$LiT_2q0sE8v;mOp$v?2 z-^cs{CndpQk4=JX5P;*w8DXf@9Xt=P$$G&iv3CKvF7KgE#|Vw>>EG~O*&Afuvb6LO zeSLU8~uIy%4&AN8j5D(|$p9%RE6yxoBEq=5b1_Cng|2I=D5O1ddvkxj}nDz(26 zKkCVaqu`|pz%ff5F~sKe>G=`!_&}4FbXkPkj;^pmHzpH+$^#`!g2}`5LnVa(<`@|b ztJtH8x04JD+6m~bq_~)lH?-D!cgK5KbZ8L9_>o0cCP#RTh_>~*Shg|zu0^>6l7Dn2 z(5g z_N^b9kzi_I*E`$J2e`r8MXevLo_K2{#&Oz2KHt%y0zBWX!OY1dADJffG8U0WLX3Ne zPih5!@w6u4OF0!&_-g&}wns?eQ+Vvs%`*H|n}EVQd8RkF!bk@GP<9%9ID+J)%(|+8 zIO};xTP!0iy-fX>W+pDD$cX_I4zDFo{X>3tZwkBye|ZGX43aL~LksKEi|TCqAjnwgON2w0sOPX%C8>-*xe0(XQly{K1KO zC=>L7C7Id^5hZ0wzL993@wbZse+OB>=#jU!OJey;9HqY% zr=&FZ{|+3!rHL}1e7?851^BZ~r6oeUf@>cBz|VS>mE@Cul)S40}rJ6 zU(pdr1&peBkZ#g~=NPGqaMeRB=WpB2pS1N8J;311NL4Zr_~Mt>l~){jpS}{>iXLrB zqk+Z42Yq4_+oHlqdStk0Q&ctsILaUUf%0tT@R2gg0?OVr!xI=fpbXO*B<&xST@=G^ z=@FSRU7moCG<1>erVS~kmz<_1Q|dQ7zfF9~dWwzwmI>pmE!H2q}VEQ<^MY)KK1aYe~ zuk~7wa0>s3i|qow9$c2mf59Udksm~m&rUxI^5#l^Q1+#eh?1|R15fS81zeU!^$MJ& zpLmFF{ZqNknM%~V&TYU^T>Rg0D{YI8vXndoh6D&q-V2Z4qEO?Od@@~`ciiEHFY{#@ z?=2EpGoSEGsK)}tZ)wrOv9P?3{*m_TRMG;&^5FMDo?BSaVE_%U0-hJZ%>2PJg#<8w zV{-zrh3PZ+MVHC|@pqhO;pumgTtxg7{xuHJ3s<eYjD361}~|2u*rfE zFRek0uCKpeZ4RK1o>Irq%dimADqfo8*W1@cri*jo^m(d`|22LXCP8s?&OeH=$ymre z{j~km7@ByI)6QcI3@(0+<9((F9Y%l{_t9k(&EF7)5Br`MxvgP5@z=@Pu!5JpUgye}1`hj%#x)m- zKX^3XJi)8`%ilb2rn%H<_Ylp)q7l1v7H$f!i&SOUMX&}PeNUvj9`P>prL*BK^wQeE z;9ABBHv^n+s-Nsfw1YZ5Rb~hBi13W33@~QL7y|4eV&g0So=lj*L-ufEjmt-8i51Qf z@Ze4;2^PD9ATv7F-#p|}$LGuMc}Lqre;2eEfb4hCU2F=?Pc?_$kpzwV;nO*?d_fzA z*w7%6nBBy(<>Ioe-TK(DLB0$NG{iCSE7FICZ39`s&zRp`c;V?K?WSVJPxP805pS}g zjP)0oUi#_j1nISl;bRm}wkr%%@?OJq#wy0FftkBaFR}%p^SAf<4Pi^TVD8>sU}F#+ z;Q})}9BYmZ9q};lCQM7_6Rt@v*z{o0tEage1NPU>r=#m`v!KVANZq{{c3h};5nAVj zOB%hPH?i~yEpto=f(tH{GwhF=rWb?GCE+K|F**)F3o33->7NrAYrULQ9dv<5W{=*< z7%B|Y(meoQwX{eCc-+-TNF%)QZXB1ae-ICAE+>^&{j|?eAi~&tdj{O4=3wP*^ZwUtGC*^I%E(cM?_@0g%r z-tk!deZ!{J5&hS&K8C#5M@H4P8d{Y*7>J%F_U+zrb8u{oYz@tfpKMefp%?7uhGWp! zte5cmM5T0lpbnbLS-=FNOjU+d*j2-OB z6gL3o?{L~cWx~8ejQ-gw&v?VWxK7q9|D26TFJ<-QleBR&-i?IJ z&7hMP&#B9yZqoF|cVpIgz#5AUr8SCdoSboj0@7;yA09S$Z7ynrU*E#0MZeM`Ts@p| z2Yd%Fc&1)ue#p~A=ua;ybiC4w+A6aS0Ffcg@wlp%cpBuEO_Z zmGgPqYaf~FpUr$%!&{HCN1OWk&>wtPoMifjGHhMoBY*54#&B1jv#v!qYY06vp8ky& zOR`x2@7$o(=&uKFbVl062@S@W7^cC&`Lkn=dfqm>nJ|fLO-{{Wc;!;&pZv6W{@n}0 zIqb&kUBjvOY`B4`LqcuJ7{a95-w9!&zS7|6CYA3H?HFCRF=h{8v=%30k&BmOpLb8F zgY^#AE8C5lapH7aQ2Y!DW8d|{OZn4(JxKh0PK-g1W6GSmDJc)o-G)Wcdt4fuRvy9C zx6~H7TLfVY?hbb z5$b}mDmofqd>l(155~A)@!=u{S#-`Xe)<20H(qpkfL#9SH@^(MKY9Fd^MlX62wyh{ zv9w00DZ`z7{NyRPqMHdx_Lu+e-)1iK>C+z+f5AmrGd{c<@1980b9n?Eb$oKbrMD~j z9>nl!KXFd%#*&V0>c1Vr)z~M-F}S;#q#PR~dac z*dBRpJa)av-K;xD_ujpGKXBAt_jn#)Jh9(9pF2dphJj<8cxAcdu1G9)_nt4L#?PIvN@2KBZfy(hYUg2(**lsVPU0+)c zJm)HI_)JWC!j032jDPAW`Q$v%u`T+Hi*TI-b#sTw03prJ*km6?HtheW7~plPRJKO& z9G{t4fEKK6_$U|IqyW!DHlORHc~3m>-Dq(BC$8^3SR@zaOl4d<^R?@Pd?j-u%q7Yot&ivyWJ}Ftaw#h=T^m<3P4THwL+vxlTM?%79ziQj3;&l+yt}v{5|6JSLiurH<{h zAfVm3Sg-L$Ti|M}4m?SrcMbFt7d-=+6gn}?MWDh+n|Q}}3==$f=PjfH8*gBYe@A5P+tO6nXMRTOvDrNjDX;hOxjAmYGPS zy@emR1~p$sDzbRZq zLHWq*lylIvcwk#1ze%8+|3$`>%R+?rwh=tjU|^F!_48ZWn=UxC6z@zECcoi%`fYxY z9@>+Wfy6vjO2b#-6H-|hiy85c4qzdAqb#8O^{O|IaijnO4)$$p2DcVwDi($^)7C5f z(>EB1eBZ@poAp=OP=<7V0{@RsXq1j3S1L`#)pjvgewZLB#!h@fLuFlhn3XbKSW{&c zLi6$hJR>L4mcEkHGQq?`Jcu*EmGLrYG01de!Fq%dlzlH)+v2x06e-(i`*}}$7)_~< zpAG~aex@L0$a{-dsS`Y3`McU|0ca+zfNT4#Bp$xDukCAPd@bWJg;Q#}G;P0dGP2kJ^s0{fW`q>74@|UuaFMg=Am4EBtjqMD7t%gZJibszk z)BLh#>6m_z36wA??>qWr-jT<;J4VIoYtq=hs5}8XZL+NVC#^t)4ouHc+sLf-nh%-? zKY!Ah{K3nAeGm_MCXGmtC)F>>3!SnU2aav8x-^0m1esTl{%+fsywEH1VIAga;U{w| zH>r#F@Hp_&i1wLDM|8G&KVv)agcI4cD(PsQ^3}d$ed0)Zrw-oNsSG!?>9@+T26kmM za#cJ52m~$O2~ScML46`G@M5e+4@m=){3oZBlyg4Ha@m965qu92b*#Lx@AHW~i<>-C zM=RgqP4Ovo7CyQ>eSy|mcRtd?yz(e~E&2GsGutFwD*o@wqRUwE6-ksncuN~YN2?N1 z+s;$wO!+Oo>I~odXBqR_@ai}AKMf7h&7v9J@gRP*KW&a&*`R_S9cP-fM_=m|Xx$$^ zJO>Ap?34B_5tnbYT`0oWQ^^LU5App6FL2<74!l!7#7p3v_9-vOWR0z+`xe;ZKz^&$ zc$RWjC9d$UbRaMJ1UwV`wBH{Jgm_z+g__S*Ys)JOrvx{c(_RP=(Ju%Vmi#iS}(Lc>Kx(O%g(OAVSswxt*kUp9xW1nM8gt` z9SlXrY#nm(!Xk6w!F{}WxoD4z+r;9W#j6)5y13lpVmU9Un!peyJ>ReI#w*J90GcVE z29+|J=&k7j%(^v_>!s|)aswO2i!F0Paf^PcnE!yLiIoHR=K z9QvILLl>~4%s=|6Us@V(b!>c~x%cFLGrM`zY`u6Z`=ABJDU6IR+%Q)~E?iI=j&KrM ztPywDlNf*ZXFqO!_m}@q4Br}eJihFLPS|c3c=?n)-o6)aF=(5<$Gco`h|H*KG?-r^ zZ(fXONEyQgcnN4ur!beY%n(PZ_4YnxbEm5pIb;5Dcm!?tPnrb`U1u2NhA=kScl7S^ zVzA@g!{)^=f7AT%AO5dR7q~qBXpzM>;d$(42#5yvE>?zmLH1e}91_-t7}#Ej-NVMp z1$0|m+iD(tG?(^cn`sW-y=xYK@>#R`-8*FUvRV4#3G#=bhtm=x#ww*v_W5*gXxW1? zIN?+VFruDt*(JvHo&jQeV<^17-2)jkDsp(SwcU&_F0y%IUuIJRIuNiNdBmV-xD=S9 zTz1np)Zq{R!9QwF*55-P3`Q7$x(JmLgQyK8?dF1Y5eDweMM(3g^{w857%c4Hj%8jj zXZ>}*4*G<&Ysy_C8y@WK!^rsR1sAhn6o2%^!)AJMhIV22GL|czrV|f|eN7qb8^FMQ z!bZgai}fLniXU<09a7L1bUpMI?<?@NuSUWn_))wFTZ4NK$vJmtwtUkrY&x9~RJ zp&C9kl)gb0r)S4u0ey$}ppy_Sp0LvpYgi?XU7m_b>|Ev(Ll~ufrx8jcRYIS@AN>p6 z)HI~V4uP4a5M?~ZuRf9w8aNI6AYSxx^dVG-Hu1iMsCw9%ub(kq-bu&)VJ;O!x5eWU`okKDG#HpW6HTBK~A(_mh;Z|>0WvpWLGTf5S4Lb4iq|E{bGWooX4tL$I z-gUlwbqfu^+3{!yL!Y`(nY5hBM0hlVc3lxdY_Iw9t1p|M{^WCX;BE8r&AW_;m$d5| zI*6xZ&kl7M*26f^vCEi_9I5kimjrEg?ouJPkbxM>^qeN9GKMf2?Pj6IEXUz&l1UGh zZ)x3(&9~s}i7V%JX}4+qToq4g0gk zU3ueXr#QzL2Yub3($Jj?aOEe)#hE*IvjHn^KmVJbH_K~!4LfF_pP=LV(Rwy-Mlc*Y zz8P9#bOan}ka7ZYFf(%6xvlzc~%8S0Uys`|P-3USET?g_F zse#>%A$P91N2raz`G0?ohcla=&?cc`*rarpdv9Th3$DM&DZtTd93VK=vV%ANz55TE z`}ZG4?p`gwCeHnM^I(P(C#*}Z(5r^^&`aDoilIKle;v>FVK&}Y-hG$58cILnK!Ggu zGJXxyFP-h5(O&x-McMFhj(18Jmv5qDop(6rl)f6j-@awsL|;i`!|HkRM`ODu1q{P7 z&NyQDq2)JkXd81)beQe@^wX!I#V*Epjoa!@c`*hYH#+5`I$NE$W5^fi>Bhns^H?{w zls)IRL&%nLam6VSJ;LSlAmrWH*o^Z*&41wya|q{;2gGI1CMTWu3@`L8=3cf%*;u%H z53lb>;qMlggnF95*yfk4JA2V_&Ry(R#`^aZ)G(*3#CZgxyrFVVTiLv3!_|5iEne=w>Ax^yGI%A!>!Ml`zD-A<;$0=~{B}S{m7TVTWAck`L9q>~+6Md&AJenQp(ABA zEHrdfU>E|#OVr2>k5K3w8@s*2GX|v?{&IdV1`~N5zE*l7?=Ga{me#kcpyHZi7AxxeG2hg96>U)?BHfO zQu429E$;r-Xy<}aNa=IF6z6=ma_-CEzytsL&c0$lwQSnwpd=h=@8z-50@x&hJLxfA zr|Q5^wxm~h#go;=@I#-{+MtXCeH9Wg)1P!P5CQ5|hFfR_k7Du(B+?e&g(Q(%-nFSQ zmdQlvoHD}GJAx8PkcadyDpaP0qeqrrg&IvL#xwH6yJRPa8$>beqh#e-dDXXNLja#s zgW$k~XYxg^TKS}gKfDXDw*QEySkMmTsq-mRc>@z-JheXpw{#(o>Q^(f`B<_eKS+_c z(lLpa6(9alzo;)suur8b{waeLw1!M(OI}~v<}k%d)G59J2YJwb;*|x=&@Xw!A0U3L zQk=yDT6SRhtr0;&%a7`wJTo{}+r*zb#Md?koIF%kl^a3x8@j|GhruB4Y*$O8YHN#x zWNT^6L*=nuq^B3;xz$@FJFzR`Ad`8nGV(OEQZ|`<3qX9B4UtY6`KQcSEuRVliEZeEnEqH zBhLbZWF{;%F1GM(OXR{um+#a?l+QdsIJ7gDM3-J6#YB?POL^zmrJR9r^mOU{$d2?R zulmuprJvA_suCEyPm_YL@~0l~JvvQa^Q?CY?iT zOPU5okzMf*aK1=qgU$`!f(M;^7wY?tdZZ2iX_N^*z$iHv4tZ=(P6}e--K^L@dMN74|rkT)P?>^ z-pV5#QjcXsAK46!;-p>{_l)BOFPSWw`uU>YroDk<8D8=g{Ul6&Q!+SZ%wjxCaogjE zS9S6#5>DOfSMjbgnahQKfFMa(NQg9wMqdD|4D&+uuWhR`C}z>MU;r-Fe=ARDM~)J# zJWv%e8!^auS)KESJH(Q80^ZLGP6Pi0MzIP>(8DB|&?_3zS#;`6=>>1{`6@DQ9kdp< zXcX2cwYz_uMTW=ShggVy`StVW(c`7yr7_|--iPPS>e?Y5QxBRAVzRnu)G(}(XPzVb zdi6#pK%)^B0xo(F@M5xWoU;hjQ(K+oBHlQkdiUx1_U6q-vyM?zdW~Y_8boIu0kaI^ zz-J8B2&)(6K!{A(1z>9M*QoCTO+${xW!5Xr-ka5CpT*D!9+Ji~)xhLsfEsZ0M7_aN z^EJox$B8pK2p@a+>ovRqFkRFdoH=LE*I?qMqOXOZdgbSy|PaZX6Q?;0j0So+Gm;w=+(3dq(^21F8MSU3B8ouBy zu)Iuj2!sCBrk+q`=(0|yE0*;*vwcJ!>cuXeTP$9k2N+XWL!kLKF&ODZID)}WnRMQy z7wPFG7h+zcYgvqT^|7di7u#!_&G6#|LXTWF8+e}Xu@N$e2fQH%Mm6H713=hNAsUCe z*%UZqVd%z-n>I^xcrt5%rG+lu^`LU@(ajvcbL!E4;Dwug#Nr==54+T5h?VVczao}B zo|SkE4`7%d1m6pc&U>6>n7)q}mzy=ty^8?Ag<#UnHo-^XYwU1=%>zbkjzEL%4FRYZaWG%c>u5}zR26#3HG0J-hri=1RJkh)))6Uq{Ply#mykrgy zY~-e=AAzP#8qsgB_A!2Ae2>V`PEPhlSJF33%v_qjRblT4V}N|0VO(wqiPQ%lyvoD(KLp&b$xF{2tuU;>+X*SW^ z0sDDkrym}ovoN4JE+4T8bpl=VB)&^Hly0p4#+3D=LE1X?5>};-Ak*pxb*NsA8h4dJ zPj+ZfGsMZm`*)j{Z{Ol2Ihl(&HSTKgu#ai*)~j1@N&AWh#Y2oD_Fs**YJNAtH1Zk} z#yHFx%Kh^_KCpyX!Z?C=`yd|K_D@lCb19c*B16WGKL;m`#RuSZLceg1b7=S+e$UFq?J!DP!PhawKB2LBdhs4IgK^XGST@|;pPwPV_`9a>!F@z^qIrM8W*GS$s&g4854n^SJtdz_cQa5tq`yYBaW-}r z?#;s^jNOb)73;jbel<2Z4mf{vBg%2s4N?vC<<0H}-dQ#@+;rH$2>kS;ht1;$_nKe) z=3DCMPiULRj~+D7UOaDp_0_jAhBY`E+n+RU>8)L%S$diueDOJ_sdm^zUd_1c$)r>I zW#SFf2FuWd;NslGz6(m}Ux;wM^P4pDrjJW^vyw)^^dF648qB<`RsLTv=Ia4$C>u9* z)m7POC67Smsbf~WnuURmvu`VN=FKJ$MtJ4cy#34_+ys(m&H*u?*m&F1~kf z5#uSRXOLGnS2fJpH{BR;eDDqfjljo@|NO)->o{RuwV}{AjKPvJbt$8US5Ap=(W4t0 z2v^$d{8{{*H;B6ieecFdXd%8&;GJP^4C~@t;hg@bL0q|X6E2&RR6aJwX__P3rCfV5 z;EWA2PPfrnqodO*)-uri6vM0r*?sh<8#3~4cXK6mgeR`af#Lc?oZb_9O3&#jJcMW2 zRM^^n558P>DQ%Gt=OzPeT5T{d`P*OpiqJ~$Qk(QIS?+CqfLHkayALQMPZ~D$wsN`V zkAL(hF`{nsd!PB#%=8^jCM?osLX@COF8OmFbb|iIgxYMczX4A|)$MG?D0>Zk)jQTD zf5HbhG{N2Jl$R%e_UTjpKE??CmM||z%#$Y2$Ap}LhEud>1dsg#HUKp8e){pJ$n$yg z{mYja>p6u8vWBWtD%==TC#i>#f*82(-(N&0k21eJW7C1Sp%#eeg zFfcB4@}sA=@}M2X->zx;kw3?SE5;$i$xJYx zdG&_zlK(VHPqATffQ&0Q=Xk!md6JWb=mpD9(U;u7@idNe2~Xs>;jL^>;JNSSfZ=AG zM>#%wY6W#g-{1mJ+Th7X{kYYx>Vr#QWuutBIAD|$h_9jDd8&qb+n?}XzR03HeU^Xx z-!y2R(^m1MA67!sN#Vl1hl9XgO}rk*x&p0g+pF-V;mQAZr-h zT5Uy|pn%SU@KXzjW@VsPUp^`w;3u;M$fABJKBgqNg17H2<{%Ygm6JLq zMw>Oi#f<;?ks*Sf}NssadC!Yod7W#+_-{fNgcB>GR zQbtYkNI%li6d)yitenLH%Gk0MRsfi`KX~zgpk-k$Byg*+EtD}`2;!K$R>VL>o`m%f zwm2yh(7`rqUa<37Amv4u=g>AhM&_72DtkfA_jFF(4=rnZ{glxS*zPWgh4qqN8#R`v zpfs01e(C9NJZ;K%45)dwjO~(+LP$A&%cG=F*w^77nW05BjhMmnVJDdT-!X$h#n zJO7EDbPRp0hj*!$BArbiH*EOT!Vnf^`6-MdCp_Yj7fj&L9ndC5G~~#%aEQfCHhPpR zX$KFZPmr0w77uVtKcI19<|m$`kz;;@f2O1EB17gt)`FipsA7DERw8JBvpIUT%SWJE zZwyQ!bStkY@W_|%(-3u^cu_utb|EEH-t&L@1>e>0`0auSd|IZEN9@fleBiY3%Xlds zyk#&<3+-4gFoP)1{5@{u2DSy26=@*9Otn3<*Eq8E8yN{6{;I!BOsFUI zrVsN%c;cf>`ICPNZyuBr8T^4q;4CibujWmO`m%mM@}BgPdwEP+D|2Zvc;Cni(QPY? zJo;>!IKzXGC1uI49%2$FK2o&Gwr!?Z^oQT#W&^0-V|_7%i;HQgQhDT~yrgb_w>)m! zDc&{q0n0i&awN_gi>dy$6Q3CVIBBuZB(}fuDNXo`el;^sffs!w_P$Rl>EsLlN)|$^ z;8k!s?U5hhkp-Fb6an%t&x))7r5WwYSin!@M?M5T3L$+Q{Hi{1ax+Pp3y*ANf%Z+BPct#b?`M8_5&+>V66rb9r{6BwxZz(uJx1Asvui?P6VqcRQ(K zd~#tH@1o$Mya-O(6%^$OAiz+*Y?HcG1jPi2we3Q0akNe#@RvrBgLWi^Z(;jP{kByw zk%6R2izM)m<019&BwuPCL%q=4&E;%TArI2ic>@nI?o{YHa1(USOrg{MZpn2{_g;{Zmpa70AE6z z+jjDpd{&yy$QMAOXUJ-Dc}^3j2Ng>Nxu;M zzdkMg{P1l@hn0Q-UYjwTvW&82{Q%CZq(K|#?&V0^coH77Ko};ju%SC9@G5k|Y$y=D zeiTO+sZO|WSjcK<)u1!Tf>J}1EGjvH=d|bDyZ6B5h%$V+dx!YY7+;n#t_%ZDBbi2Y zaXe%ZIEL4*9^1Xpe235;H-v>)z=$=%Vkrh87TAV?Ti}vL4XcMNxDwAebfs6apuc7@ zFo>Ziw1R$T7^IHy$ae8F#3IO#p<*=J+Rrrn#+bq4M7*sS;7^cDnr$^>bFb?eCg(r;-?_kIe7ovtYyEwy$ zm$?P}!I-SEO8L?Fsce@=oH9&Hn5RF+`15;=lmGc|e%tJyqh}Bn!!$T2b3PaY1Z6eM z8}~$*FTZb=rU#p!JYB$0yNB01 z#u4d84>&o!Y5w}FSIrh-KnC&D9^vB6Bf@YXSma@BK-By2CN=!Z{;OAb|Kkz3;bot| zw;j|MPk-dG`iGqb*pzp{te3aO(VUoIVca+3sR-b)>61$|kwxR2M|B_v8hd&PwGv}^ z^s2g!_F)(@jiBD-IUqlCD)5U@+qOV|FSNX2^{Ur)WSc%Fh;I03zAlWAUWN(Lc<t72=FvBCAa|7tCR-9>xRCk6~1l8(|nr{0zgv z=)L={($fcOLGK)qKk(}t!GqkG&(cJ`ML)_IMh<9yh1Z_E(z9=b4WacfpEV~KFa2%2 z;?bF@7)f5PZV^KquV;+VdKapfmX}wX(MiIMs4e+Dj4`E~kSZF-N7&55w=^^~uC1p` z^p4ipoXZ)ZyW^l9!uA!4$2-#g{^0HcVInYOVqb_h%SHe{aZp+5%PpTy;i+T!jl(nMrn|{Pwa8S9oaYcsY_j9zq3Co2+EToPBfC} zdHC1=-``^Fevk2sm$Y9k=J?=vppNpS&%)d!9_FT%=dKr;ZtbkF*>^%6#~9++m_Rqk z*LYo1m3>Y&>$QK%=9MR5uyN8ax#W(vdKzE?9%{^VlVXIr5EwQeXbZG6G|?1Bapz#_ z5l?}*L3fu8)>FcV?GYD$Ztf135E5npc#heI*-%$Hml6**M%>KMOEhBxfJX6%eT8w) z&C(|yKW=)^-`6;$xvAlXhMS{<8p+|Bn<87BDCog>>d79BUyjb>Y@Y4nN&X1K_`>vP z^NiT=;_`QX@+ovVX#V0K|0C)dY`*y7aWgkP#^r}EBj?5|H>B7)#!ojQ3}-gaCD?A- zdcVc$`}K^`8g<=RhzB+8a`W0bqH&N#%(&vt2c)szE0jWwrXxP$m9wlhaTs%B2QoXOoc?^p7#VovU@?7rWtTiXOsxmdAjNQLZ0- zMkDH^4*h9hFr6({cUAsIpHHT%E==yD0MF9280_NyXcq`=o+;OuMaUK8Y6ld zqoT%EjpAEepeUbQGbs;_S$*FJ-lbwYuL+6aT=4)I zk`5Zky$hwElRwTwXfkt=ofs(%W8o&ab00TAoGaPatxMWVsD3sy4J%}*5@~wQ=Ae0$ z|I8_oMW?^^cgG6*mvkS)NbYH>t{qO11vf+Au^A0M-Q9S`qkFe$uNx}TNm}>gq5Q>X zKPDdj=Q+Wl(SHMjxFMA^%#Ne8-$Mh#BMA(YJ^beR{3D+k6Q-`QCu%u^dH zAGqS_c?}qnR0!-Lw~^gdzwpzfqQ@<`3T%EE(se4jn3Ib7{MTH2&w}- z!1rmt7fvoNF48tG5@!Ci!3nBUjPz~->aCyC5^T;2e`Wb?^Wecpk-sd= z?x0^U*w{i}!sk8iQ;@$4v-6CzI14bQI(EC>fXWG{qP%I`7QX#lS#tx#dQ6)~-uI9* zF~4Cv z*MY>1+_lx^W`B1*Cl!?e`))$~(2qU&<7M-1EIn9SZ0-_Me&ziJx`s`0WSa>E{SA2` zJo6RvXUmuiq0D5REl@Ih`;o+oZ}Hv zoOy*jmr~XF)FL!0trLQu#J2T;bVWqPK%N)|%@PTBptGC`nQuB7)Z!%>VyIDdIKARM zUJX!O*q=ozywtsoA@9EOlOqyD=q;^MPE_qUv_W~GZC(m!8(fT7 zBr@B1w#{jxuz-&|$DrW*7I%Q~e_IyC;zv1921U6HN#G-{z6(CS^Ec&WtuhRZ;6PHS zVikrkh$o_Xm18N!vv~3;zWM|`q@nzq>+3eJ^`-yXRu?n;R=$k0$Y{fio5ekf zQ2P0<`o5Tht&3ICGl)`02XV<%>DCRc%qx7_S%wmRn5sNSE^HT%{wV`(UD+_m3+3A~ z(j4ALuB;!qL`QY?@J^h07wXxz$hd-4sNM*C*rPb`Qcea+BzcNVnkBGG7peFfsk(j)w6WhTevo< zlppm5^+{)Cfc%&zs0W$J5=Fd#Ex-9)@?}M|-^I8kphqrhw0Kg;2NWAn8@MhCY*X9D z$XfLhCrQCgkhV@*M0kKroyBJ>5uVuDcHp)BUZ@%4C?C(EyoXQWVfEL<>!y978F&aO zvKu;+UjGMrNB)a$3YEqMU#SblH9x|>^euISo1^KUHUePLp_cU>MJbcYqfeRI?>RZ6lyXNli%u@#46`Guqi3$ zvXN%LWkTePx!M^DQ-*~IGEj8jU&k$I=%PbCXkIZEjy&}F!?+E!iUMXLB(MH2oNk5P#Ur`{%C`kfkpljMkdHEoYGL3rt zkEbeIxs#qs{B2vMW8m^^ZpudI^DCe_b@~cjV6O1abc-gNGGJ}i&k_B%OwKV-m`?j>1|_7Sr?WXY+N|&HRh#|;xvP?-J|=zefc&M z^EHg*gTPoKCh<0xpvf!c>xhNMc>ib__Mrc##JKk2yfX|-J1mwaFot2hV1n zMjt&tjaxe8@nIH7Q-nR3n452I2@B!4W0(cKi=@&CwAkc7SynKqXhg=YN zhH*@zsD`kU3*0r3c^BAOWK&i{xtPm~+SHR@i~`X00(u&Y*ia+eT!<-;>=PX?wHW%D zLl`Sk&2i7(+me z!}dw~gT_3KLwZpe+gLgYFUCraUXS7FdJm)2zxnGI&2gjAR`JrikTTMc1--|aovZ5g zr13*Tq8ZhDdRZy|ZpIi&qO;bKw^ta!Y}+LUs`vZEv46ALJX%_4E|8A_3>>}mGt2Dk zov{&d(#%bc;!(u~bZhHe%*KWVp(Nlmx&}H!PrY(`@UFh$JoT^t{eK09r+1mvpfbkr z2H%7}>PyfEFLa1~8{%9jbKvUbhZ^Q|tQjQ^v~jb&KvU!P0Gkvt2Yt0$N-LkyGgZ!3DMY{&Myi)q}QCD{xl7r1TJ?_URFfxFPp420Q(2tEh@tekL z@Y&l9R0ek}V1)VP2S04ahz&iyy56j8aO57IYA_ojw)oBVX$-J~@bnD%>S0VzY-h%} z<@Ie0EEuO~A2zR~8wcDt!s?o?W)p)KWsvD7Y!bbE{T3q(m)3Ewz&4(ucQJ-OdGc}d zYrNe(MWQh_-q9`ny!*iz-r0b%Poy8zc&*Vxuk-`xseZgi*bhSxBnAWYQZH$s@#M%P z79q;ws9ryA$!a#mg zlN*%IPh_`x%<)8CYrvnvPs4#t2O`2zPT*!Le^LN_9iee z7&C4wQ`R>>b6cSN=F|KY#fW-KUWgqaS}i{qavCt8Z7A(~mM2p?_)^ z-NWPjYHO3WK|5#?Cl1y! za*e)r168Lv$5=Oi>>n|DGd@vOXsbQTHAZLvZS{nA^J9P$6plk0LN#Q%>7D*a-E;RA zp`{yqBh5#TKFOT~Tg;){RCj~pJ$=)ShDr2>n;6Pj4`Ji=$w5{ zFs-aUXS{aKs&1-_gwJV%eb)^kXD`De<6LUVJWW*%GU{RZvQ59(bTb0uxW?FP#xiBc zP$DuurjO=Z!jLNuCCx+eJ8TCV<;1F z(Ai*(O(Vw`!@Ic&=*H49bt=>6jFrc10C@+(8hqX75>nTFLyYyxjkGg7$pC+`<09C4 zRLiRg=8B`&BY4x}F^)l5FKXwEr)(?;`|xOwe9!{??#vS#9eH;%;tU<+=A(K>&wTqv zCj?$GOCvpF8Ts_4_a2H|I7%27im)lq7{|DK0R8n8SARJlRX;60eAN8&fBa9H*WA7F z8vWtg&rPBmc#;5UK$pMy0Cm~tZ`5`0W@~eqv2vDq>|pcw@#E&FKl%ba^uy-a*S~50 z&wu%UWMfDCjeXw7{NUp!kC+P|H^2SuSJch?jQ@HVBZLX)^od>v^DNRFy~Q}$kX(r3 zC*HwSFF}19sW7VxK$QpO;~7MrWI&KmGeB_=g5fTZ?Fk5HcF?WeJbataXaCYf1Ye^^ zc_^Z=tkA`I9%QdlX*m^-FAK*!ng?9+Ok13YWMOX_iJ8a*c7O(NUSy!k!qB3W6TSM> z0xraA!BQx=pep~UE^tV_zO$qN#FI~c^N;12Bqc%0^Fm`y)!;kIgbq9+ER~;JomX_) z+grU9_#Xp=G;%VTLDsSemPg24l(T0;ad(f*YXK%_C@!Qb6PEM!o7eD3|j*D#) zgFSC}EO?fucbfuj;8h-+wsoPSX49ixCREQboCelsK! z-@+0H+bTVICdv9V>fbmxkkBQ)BV+Upi?ro~S7<<8q^E5Nkv5lfIv=RY^y*s$FOxC* zob}N5OqTPU%p|4~-j|}Y?ldm_RGNze-Vf)ei5g{SE>G z`{S}wN&J!{jUq?*XU?ho_iFVCP^s0- ztL|En@xr#Xb%|Gp225%E1v=tHp7dqPTPgLV4HgYO#Y1BU`Nbcw8XkEfFMl+%jA0a@ ze(~@VTG)@xklHLLeBT4jdU+I143oj9-dQH?mVdU1dTBW2{D=o|GASt|MUw4Xp&Run z>(UK5i4F>XNSz2sMxDDEkf@F}>^ z=Q}c({%3zm3;0J`fvU8mPFr4WEIeY|C0(5Atuo+m`XCjWR+|jMqo_P>@uX@#rYc1I z5C&ykh~^zh_Tyw>a!-c}?NUDiA9$&Yp6sGs?=|oY8KG;*Nh=eMFO08gv$a{bmB=Hi zkR%!ZfGXVT6Ky^>C&44}%Tph3Bj<&8yeHEp5vC1U)D)DcC(6Y0IhFFB{wiH!*Z>R_{i_lt+s^(;3m{l%TBW>7m*O< zgap&f7rB9kB~vEaIy2ZtaW>6QrA0509{uV|WI3>DL>7a^wn?iT`aNSo=7q|4<*9$kOg-vg7Xz7pDx2i7T0f3Mfy{5~ zjtb#Hy$3X3g#tk$861n`jqmuTy?PnCD7L@o!R7+Bp9!^m(QvSXfy70E1|vOumRC2p zBx)gxo_Jcj=w&ggH@3!jy;!ArJhlfgE&|`<>|Sy=hC$Wi_0ud0CaGf!BaohfhDGp_ zCu6fddbkj}tg-OaSZ{kZE=_Qmq{dzigj0AVj^ZKdV#0~Mv9Y}n%_HFhEY>t!>E->t z^&XU5H{hVB`8Wo%6Y6ylD*g0sx_9q>JfA&2+XFs>7)>-d7&|pF*nz+F@e&Ik3>SEp zJ^S73=68g1S)8AQ4#quZu?IeSaJuj@W~9f;k6Em3?(Aa#qaNya;iz||24al`2S*xZ zppkk@hVo5w!XdrC4Sz5&Foj1x9zQI+U6`G*kQ@MxANe<~fuDu(C<`>b`cE}R(_j~) zLm2T!@xW4k^d2*WfJS+F<9m%T@pf;G5uDa#vBqo#W3{?Tqsb6_m4C{N^}FcTQ@)=z zA6tNM2=!-KMBtWFn_d8aXo z_Znf&kQW{z%X;KL-`H;(pp zD?F`? z8w-$&PxaGYjB8%@cFO|!g2i__Iec&(qwy%Y+WCwZb{b0NaQRg;u<#HbZlUhsTQ53{ z@y*!d%7qqR1Y&S+P=UN-DR+_`#{Jweo_x>LBzJN}8*iW+Q zunjWO2G3>;y5ga6#Y=K;PInU?r<)7S$e6x+MJSe@Aq+9diiXeGdw034l6dIg<|HN3l^_Yf#i0Nxca81EqVxmOG!VTZ-v1az(da?e6ZT1sN3-Z-4Kl{w)eBq$c{UVz z@0|~xaGAlQ-@fj=K*Q1K7=4%x0WVv07(bwe(8{I>S`l^@y zNEYZoe+ItepE}63v2AcHm0tFz%vt!hAhP`2oLGwgN z9XFIob-H|1F6}#B$S57;(;V&UVUBp@X&p*BF6&9WN5~Eh-g+?G=M8(a%_cMFNH7|X zCB%fd%@XRuJ4SXeimRW`p@o-ZD?=JXHFD}ruYuO}N)(;QeXHEcYJf}~X4`{rO;nWQt+M_4GYs(YDsGOV-5{h_`rQ@n`VdBgk7 z4aTYC=JDev%xSzF)-{70B80{pV?&j3O4@s>kg6h6ZUpHquMS45Fo%H$;5xhTAoMuE ztA3BZqWtKAyvclV5IMPE&a-=hH#+=6EK@$48#VxWb+g0%;pLsqG4IS0W{*m}yj31v zLeKU07dT$bVep+yofGu0VfvSwn3rsPc}mc*cV4J&_`q>adU^lb4j$#o;ufd%ynNOE zoD+BGl1u)qSNhp5$4fuzp&RU3bwYu0o;iY-eoAoji~A%S(dvA4)HtE03?-9`J<|ti z7L2zK2`6hl@Y~B&l1^PR8S2y;sDW7>f`V7I~e@^Rh>SB-nGr=XWKjhw&&p!Q(dv`dM z`07PuSL3|xQnwk><;!3EB6vuH=n>lCW{fze0pQXb&jA~h4=(`VLa%NSB^|+yfs#TN zR9v#yvWyF#q}QMb=ExdN<#u$v$8LLwwdR5p!LV#CO07cmOc^HAksG~_V}Rk89F)2$ zJ?lyibE(XIhXw?mgK=Jwk_WT+2|&KVxl*E4FNsMb*-r`< zRAJAP9vlQf5xQd~J@i!T#3c_N=0i9&Fu1_i$g7YwJud95#|6ARvs|?`3s*;3X(SC% zC}9mb(pLGVd|==%5{SLxEx&{VJpN6+;gf^8=+Rb=Jj(l`g_NkgDlvKvzsM*~!XZw5 zV1^##6mR&K0oy#ZSAK|J3?{I}4kO7KiK(v?v4;D!LFmesbWo;4E7~C=HEyJHk{;wq zQeL3}`iN)cjc?mX9X_QGSbylGEb~8YsB#j(HiHx$&UOZXbhFPe$S6Rj`cCRa4uMq$ zC~@Ukx>!f$;2rpx*OZh8BV?u8EI)Xk{NPBYszIE5rXNDX^0uQrv_(1U$hVVjYbw}9 zTiYA@ME1l?K-M7*{McwqRDID6AX9jf6&y1e<2y!mCTrY+0A2a*Bu-i=Ym~``4zL5V zl}Bb7Eq>I4K_T!(@&l$V4TCuHB6?{-ClBAkj2xq5a+>^efV9tMRv?9l2$E_%299=AAb9l?EVX$=8rC*ka_JJEq|kbN&b)@z6hGe z0ZQ3vVNlM=nEel8R^25Mz~@1^B&+yI^T>yC#FJDk`0z&j@~f3QE0adTwV#Dgyb}f6 zM1BOxU;O#yw{Wdf?(o}e?Joz`N|&BI`iu7(tCG=WxIxRrnR-9yWL{gVbgc`kQ<8in zNI!r?Hp$h3<;mx&Ir3)}{tm3FztjJaz$zccSw~SOe1l%KLD)hjk2sf{TDPdTG(=DN z&bqB98!=*G{#c_di-8yp-?B| zjN@125ID9=YyvX^36Jd;ZHcrmu!RGR(411~uK;lDqHkz8*Q2N80O?78p+A$>k@=F1 z2tBkB2YHf)f)n4i&!+lly919Lky&xFoj{X-40z_DXOb!(eOrCgw%7P&KHpo;{@>Ca z7!fBO0McJRU;-Tah+~`h2s3rL7$#3>XI}XwQ~+9q|3KQ%#C-4}c_^RLA>a^rfhiAM z7{SjJA`|J6k^DB@wDupe=R=0j*7Of)R;msHU&&g9>e;%)U6~T!{FKk)XE|iTMEjAn z>}i35S5A>xjPl`s;MyjCTa3yaH-#Ua4}AF`{{$&2$>+E4giRkwe)!IJXs6y5mYG}G z)nIR!5jV2>S!8r$^Aw(ja)hs0+axwLhUlkUKxxPcLmH^BjK^(rG`zatPF>XFVwqqO zStRvi5Z}a5qLEooyE_xai6zv<^xO>bEw>UvWDf()5Q{}=`+A)doMH$k_qnBJ_Loyz&;ekBZcefeByJ~~utX_n)`0zf2>28+sil4>% z`uWS(iM2aBJ%NG5n3L#6c|u>_!!u5g;YmE2z1VYg=ZJWwc-CV?*O*0M6~v)DAiKNt zrt&Df9#Jf?!M_&2Wyl$3alXm%XN_5RFi>bT_X1GWf!>OGjM`TDtI{V6X)=hnnO=y7oRLQwg{7TFPCW^oBPZ^( z#zFc(ADbx3Jd70xmr8O8D)salLm8R6#4BF|kQZO}4r7eH#R!ej?iMe)t0O!fF#u#u zg;$^pbY=D$5AgI;h?!`93<7?>c!rgIOk9XT~ zb4uKC@Um}ca6H>u!GZhw2P^OKh{sEfO(SLN9GMx#!0Xtek*jxfl8qlWlIUlH zgcZ1gMjK!KvKgM`xc)dX%<-J;W_{KRj}6~)6M5&i@o&NUgo|27=k**$u6Q@ahMOC2 z&a)Tq&nLvd<=Gwj@ZDy3oDGTV_k`p*Zoc{UW%J2hLMIUtL%#QLQJ))qZt8vX(ft@{ z;cPR!rHcl{_c`pD6H6K5E)O`ME3FG4_8>Vb*vWcQ$`pa)QIfB7gkNtS0V{{Xq^f=-) zhq_FIl6u(~*JF5mYpk`s{zZ29RtNU8nS%&N#~4EDkWCBcVbMc8c}j=>V=PwQ)J^JH z^@vRIKaE26Q%^9N=Y+OTv!Qwhj$XuT2qSfBF9X%L>J2bsIW2Ok_{Js)cY8cSJ&P*ub1Cv9p(mY zPIRH4G{C!&HEPH)XtM*1%NX>CVb6Hl-93^qTrXkUBku>~JD=zj0?TglZuuHfO@_K=&bs4ry!?m94ZWtY1ate&`UBjs|tMPeh zafz&kSy~Iv5unK12s-zK`VXjI-fhBD+bU0ExMy6MyuZ}UV)%4SQYNL<+c&R*vl}q# zXG6YB5xT|r=w0XnWy!U1pH4T>N+YLZYMcf5t^TzSSl&Ab)E(ld9@l`MF;(?KJ}-go zMK@ev$MOWl5u2!vzitA!se9?BCuOCD26<&|VPPq-W;uNXo1i-<#?%GG>;C@DYwFXm*^yQ-VvK#&I6>6vAewo#ZY3?(76YD(>gdw z12+kxH)yX$TKlkVotd2rUEJ8FyRivQh&aY)HwqnV?e98hti0uP0D9`tqlbx2e#mJH z)1SY5MxP^03OElEcEmms8w<;l3R%J!NSZWoKIEq8yUkab|4O?tjNlm);a4A*n;UM> z`#7AZjB`1+V=h`L{3z!<`xT*!XovGd)C+ZUf-bh*+K3uPKdWzQ&cvo17rK%FKN-)$ z2j>N7E82EvVS)Ma7kJ!nHQ#*mZN_!ycJ`r>K~8wm7mOM2e8RDsQG`C{e2Ok^H=}z|8Bnd{WIi<(TDuf+ps~Y6q)yAC_un~K7$;xt($j_k8Y$fGl2v4 z9tU~_G0*%%p)7bZ0gxQNqX+TrrCyLgs-#Gy6voHf3cz-RTP~Gs*ri9pf|%uK4lq34 z2B$P$+5zn)`S?v8`7Oz;%Z~{0oOIJV={%avm6adAgTMJ5iv3rjOaNPOtDJfHOIjyy2c+*@j6q|h z&C)TP8(sy2rMZRagA-6BHn4B;)(=Y7t>$2&osPlpGPv6@d-K6nbJ_%nq)Ez^EGDVvD%(OB}f<*nFib8e zbc+2^k>F)pRMU}fzS<1c-_T1M&-heZeUKi~PCCja{!vGesvL=m`p;@2Xf54*85#7q zAAgfsx>#0V>hR(xb?_{#7)X^Hizr8tePsbYlSsC}Nbbm=WgR#JJUGS>%R8V0)7nZ1 zeO4f=%{8FeyhOZ=*l$ zx3nu<5ou(p7J^`FJ)O3+{41T}3tBHMK3$BZj=vT+Ej$pHJS9v1$@>y^GS(u12j0ma zK-y0uH~a`3;8Vyp`cF2Sa-z;3{4Y-YiBl@?WK4e3#eh%57Fdq6@BXMruESN%mbu@bQM5w zgogegPa(+<>tQk~9+IlFm~W{b83{hZ@V$Hp&cJSQj{JuI!c*reuh!f7wlKz77L*q!%Q$73C*(1b|b+Iza0@!%_2gJNwBF6C=ArbD~jm}6(m0DCs1N>z5HKeCFrIySA-M=2 z$5`%#pJyzZ_3Alg@zn)ht1D|PGHHuON!vo(yrk*q^%`vJYku&_!-OWOOQrBc)rbc@ zq>1w7LMz9qNjF@D9$)r(|J+NV8)VULtm1)w`w3Jh-DAmI9R&?sGq7Nlei(%f+aeyq zC0j_W7vD91Io?mYzo~_B-^=av!lI2D#8`|T;+^Tjy%%GP-geGSyfjp=FZm+CEPSEa z?Uid1WDlJ6{&WLn1Y_j}7oqAYcYx9U0>kbcMv&F5b1wc`VIu%z4D|KLwDJl>;4<3) zwxKv;hyrtEBJE^R=K>D@x-5(^NY z3=n#NnyxS);MryShgg))V*ECC_q|8=;0IDJ4Au(v=>bE}Ft30l7!1Ae3nL?oP33$T z!=}i|qZCxog>6Y<2bFeny@|nEFGAWw zhtT_z(M#J8Fv+LBva$Sz$QbPL`eZ?VD?ioS(AE%cJ+#$}5v{9dY#w)F4CuBOerNoE z*Xml=I3M(iIu&D97nRuzL)2*O?tVC58v?Ycn{tk~Lo>7aPJHHxxn}N@kLaSv68%)~ z@=K1W-%dhjC4_#WKF~ORMi>QgJAC^FLjoHkRCK()+zd|6!ymnOAs_u{cxI_N-&+G+ z%7Mo*`r&HISkTpf?Aw!UlsGjUM}HU#UAoL*WEdGgZ(c2L=9I||_kispPctWa2{LYT zGFgMJ#t*qaLrnX`0fzUEy&5|P={H_7XpCKrm&V@qD80rCy@P-LtKUXG3_s$9mQPFR4qERtsqLxx0AHij^a9CE@%!+Ad&g@#B_C%93jH>GjM?F(vfJ+w7+ zPGJDkSonHnJv!R4)i~sOHfuETVn@ff84S05)TaT+INAqpRG=H2S0146yq7=^?=IZIW1p};=+J5U`xxGuh7a+C!@G67A2BYfpI*Ivhf${14?BdL zIbkC##!mXKde(AY#Hai!Z_+r1EuPZdd`BJTmA9VGQACD0ain*vb5Y+l#PM}NFjSxY zS>vpm1BQz*%%C3)_zRqL*dRnq=}R^b*&qcaOaN{`sGpQc^{)LJ*>MwuaW@yY(yyF{ z`CfzeD1E;-HXg!0Y!ZrtvF&_8v*0>)GL zG)I|jHc|D=9*5@2n=#|xy<5#jjXD)*8Hd!f0|OYb=~H^BO7jV5{_NRXWP=SQ#xFf$ zjic_p8-B881cKhOw$Tj+4R!Gl2hLeehDdqx(#A^&(u+|YjSMf4OZvHE{Qxv|(^eUe zE`-jJT8e74)p*{;7~m#{nf9_8UBa;?|)8OHLHQ|>Im2)(xYhIYs=6(9%J=p%Z*JFa@M_YLKj%oefHVD|A!CF>3%= zZs-{6|K7rb^!WoeR1Mc*XV4+RZ#}T3%?Ou$8tO*xW%d1wSKsD+H+s^!Qh%Qzv(%5w z1;EeEF=atq4#B~3-FeP1ambC`uC7{IdXO=Fc4nFocMmz8wL&ay4A10Mx12MdaIJAz%Tx>WS;nyo!q9noZj8;~9t`1zmpLJ%(9Fm}#&kFLK#5BFy>zu~<&Ir2BG-ch> zG}7F;=pOe~NRPeUZD?3yyBiXo%2UUP(;%lEm1*T(J@00Tn=5?PM~^>&=40rk4dU=`Kyy!r0T3Oj zjO*m$Ng>0@{Of=9uhJimIq5`?N*mow?#5o}<`k?A?uO>E-+NNUwxTIX2c-OmPpndr zHj+~Vg?Pwh)!!~!QRP8^61=mCV4}4A(L_Z+riLR zw!?`VlM3*@VP^{|hnWm!DrWK+m!}LPsAe0X!5kV$Tif_y@+*&_W%0-bB)p(JLj`$@ zzUKEoTV9O?o_Qi4_3UqjLvNh&;>%AG6+xcqB#ge`0DR;l{FCnpyO2qYOzBbX2MkG7 z5eeV-eEE-jjH2b8uXwISG1xh{i))APGA4AV9T_~OjgxL9*G?-$I`Qq6+%;)^b=jsR205HgvM{azP7|-@mc}%6^#Z!xx3m1P; zh$mZ7`9dw)Mk@5pTb^x7a091elen2A2ZM!Rl4gIDHV!@lGG9UBy-9)+U|xtT1(laT zih4>W7p$SVg(5%ZYXP9RGO-O9IiRipClqh^B~1B+>}B!G0Fp&pjs<{m>TBCn#PtVd zLugPFFth`XlnJUL&ilX-HZoS5LzP~-lKf#X^?{2UCvLW+53wi=FM&gIs*Uw!4V5oB zoWSuqOyGga{ZCx-5f2fve*&AGwPey~^H~b0k#T>TZUR{GZ90X`l8?qnW24(DGw?5etxX7qw!z;*YTqYM zCD*125FKOD?=~g6QJtGkD_wb~Oj;ct8(m>yj%|LEZ9bKqnvZ`Dg_8_(dBjcUu&_ToFQ zZNGg)<_L?V$eaBnHLK$U=o{)&#DwYURhboM6RoO<5qxr@M|dopBQtD*N7jK^I0}!C z)$nN1)N4IVS~C#2=nh8W+u{>E!NcO@wOkhy@s#nMAKNF+{Oo2mBpcggUy@w;N)Oux zi`b@mrwp`-mD5UuC&w&mKGGsXra@c7k{H6n#h>kURxs_tnab~TT-uABqO&oAI^M_Q z7210oUY&cFSg9iztBeg>eOyl@JW8q8Ij)Q5X}lnfJE~XDHlZ;FG(1t)G)C_y#2|nC z^g*-F5%RzN#k1x&zy5vm#nUAWMZ8Cnm#B#O9(8qf zJ2ZN(zQY)&@sl#fCU*hn zVtN=uhW+yZkE$`e#@yuCEKrp~-%yHqPvHbRw1vztp$OSyrPZ$Ctw(aZcnOM8v$3cHQof^H<) z55^`5Z$S)UPhc1l$2xOykdX_7P!tq61s%`An!8wyWnzMyym*O zXzmK*lQa-FPi90q!>#q|XVZ(2ESi82b1LCo;^7hct8=p%3eApO36m9*@Ce>K>tNPIKw! z=)DJQK;T)9VZdeK<@>kI@SO+XU>g}x3Bl4ghCvjMdoO`K-^aTdL-*(?h7sD;PoEu6 zxFp6YWJ&|Dr?_9gc$v#O@6Js%&)%&!o4a@d^PdKzA#l&|O_!RbAa(U0pqkiqVEQ zC$Cw?p(DxNv1E{;{bh!~lZ6O4b1IpcGX;|D}&RnptBwjfgc~p2F zp#m&;y-xn|K-*C-?c~3H@zv&MEka!F5KadkU+h3U-pyw+X5T5y09m7tPUNL7jEc5s z_sPo+!F@tsW=v%)r{gCCOEP!rR3W6oO9P<0KKbbCEP~!I`P6Ks&D(rgv(m@P8XzMk;N@57`=HPV&eoq#Q;y##=^e??_7VB{5Wjw2d(f4n{ic(YZn%$?Gj*AeVixs{rm0kNQlgps%X#e z2e&qr;njhQ+Tu>MJbKWAq3|mw$CzqN^23imv|w>_7BuwnC-QRt^yfcs$J)iA(e2{+ zTf`;549g6m6Go))PrFZA5RFef&*#AL(&^LbfWzrq_&@1P1x9lVo!XSV2RpQrU2584 z{VHl*y>h+XYd^&MJe8AoMqD&}TA9Wur}Hj8nh{hRL2seiv=fVe49dU!_@B>WzIlzs zI^{fW0Rrziui3%!;RhdO9IY+%r`<@$v-@nG<(JZjzx(3z;r)2Irw@(QUNwIbD&(uL zzL;@`cKs^8>K8Rm<3oqY+QBxvY+dzb2$$W?7&vn=S+w(|Map2|5jtJHcDY#Ow^}qQ1XnaBM>{R5ipPKF-ko%1 z=hr-IF(ZeDox&4wcsN3)>$iTmaU*@g&`-u0&Ks-6BfApl1cGneBFxFL+8(?Zs__(W z&YsOHzqWhz`nBPyamn!v`D6>M&V8}M>+g8xJCA^OyWMuq6FA?FfwT3e7KOg~?z?bm*Jr%5 z0B50?u0cbg6pbrB|NQfuMC?YZX3+*6q3T*cp+02CBB~hiv&a!VWm$l<`(nu^YiXI5 z@->O5@vV`kH~4<@Jwi5Hc&Z0N#y#TWgp{K znh>6eVfQEQAT2BPy( z*!sBIGt|6$+QrO&;o*}aLfiGd!~qTqn(waES2>fAP_NErj5=t$00hJ3f;*GEu1$dF z{oVvt-PCm;dWV)Z-h^o--QrW4Erty^32S!=r@U|9?hVTDi*U$Wnk^oKvjc4AqbAaU zSK8>;zn+l;gVU`%phu5~)^LgD;L4+3CN|*DfqOtIYv6P(fpqH9W2I&(pf+FM3T4SmuFPpWAP z)j9nAtO&@LX#!9aKb5BQ>o@|0U+oC(4h2jHNve$jI1I5+cf zu-6{YVG{Y@dmp%ax4j?U&h-KazsVEKsf&6rkW6M-%ZYB}+5`oSQW)bk?>>B2KF@)w z?$Lb=-caljXv6zxH3}!0nKl!wDP5WVl8IGtFQn}U+i*%ahgrZ-hH@Etl8?pTRmx0y z(60d%USmyQgVnz#0b_)k`UNwk|3~lF;KQ`C#=ulQgLE_Sh8+Q+SG)r(ym~a39!?qZ zqZ9gU=?En-{n2f~TkUDlaNdI1C~yo*0PXP>P3OM)4N?XeCi^m^etiSeK!Ci>y>G+> zqt2#L?(mU|+DtmVFWYK=b<{%`lQ)A2b~!wP zv(M0=_R{mw^&Tb9OFYpjn1g3&0BC>OqJf&A-_)pF3;5sww~zpH`)}$m$-MG7qKrQ3 z+H+963%VzR=P=CE>aaj9y4WVEv<0nlw3(?#@1;bq%3p_lY)d3-K=pjuGCIO>Xgdu0 zM`cU9Xl&6}J*RC!BKRO0Yz*HAo`JYG^69_g>ASSiNVa)fImxKHdp1S)Z)(?rIRJGS z+q?KPmy!|QNVuFvR{@ z6!cU+JZdxGjxN}v@UU~$e==0QZ{cOM2NOz<78&>6ZQD7SEC1r4Hi|lxQyv&sV9#p* zLmXHoXDL7Al4jZn;Ak6tlrak{y`C&v-1cZQy0$^yR5!RILsRC0H}BQAzqbwGR|{Ck zv*9Vkr%eZK-l+)O=^Zdu8(8#`wRBTi-(Xaxv0ZeY>-jg*u}#;PqMs$hB{&!i@}#EDfaoZF%ke$Y5!vJ@~GSsT!F0us)TvbWksQ zy^sv{=*g8Wd~mX|N^>_)9Y$G3-;)`MPh>PWnxX8cCV)Tt@Y;9?(vjm}P$4F&Vl6T< zoE0a$iIOlT-ZSJe06cBGeY46EGs3@^?f52fyHDGKo00kC<#U^luAJ$xqYIm>@14uo zRj7~piVq9xBF^ut3>LyF9L?+KMTRBuAjST4y&JwK@{BY&j!iR(RSxeXb>y{6F5c9} z|NP@?n-9(%+gu9IZ}NH+{^LZ0ehbbFRQl2f#a!n(CVuM6*G;gSKt69mAauv+jQh_s zARW&Oi}BpV#Uw;r%H3VMgaIc0C1)m>b~PT!@Xu@YezC}J6^dcE`aH?FYa%b6Gtbd6 zmd6@&;{i2?*mcii+u>Oza6+%xTKu$$vdJ*|4%eprcPMGGSudPfho3PbX#c$3@r6N% zXTO-2GY%*xucIZCeMdM4UXdRYR|o&$9S4U0{Kf5z(dW`Vg;Xd8^zXmFJ7X7KWkgUJ z$1=iv{Na1ySWN0@rF31!#9;;iu|UA;e^ zSa5)+Np?~^GWskp?OBM3PK+&2%k$NZdz;VB6~B{_JTKRW4|a<^+ChIA(Qe*(JjM#L zF*>bE&q7jgP3XlVH`%sWcKXuk&DY--9w(!)sUI)D3mG=2&r6r`Fo&x)ZJe%t$Mb@| z_}=BZtqg|8&kwJR&2}>0h#Stc*SKmo&n_{?57a(3Ek@8cytntaHTrGgSI~OKVR_xH zIa~-cqGVvy$1)mCT-x}Gj|ZCQI~&tqcG7+Br@UjQH&4I+JdgKcFK0~R zVDlscy?Dv9H9SE&knv#Ofp(tYb74_BNi(?T$D{s5@!`chCqRi^YPCh4mp7BE+F;|y zH!Tv-ZwHDKeC+yXn>Szmc8o##rXy#sRwuhj;(^c%8K_=mSkLmGY-JeBEACXGoSyAQ zvm>>YlWkk?P)Eis=MX%sZGZo}FE+>Xz7r1MLLr;37q01E`7C@mMDH|@aPnIl&>qM+ zL*wnc9oE(u^KtFnLxwSP$R$@T&ZI-MuUmy1Id#8K@q~g1pBwP&3#%9Ft)L?cCER(vdrD^hBQV4BK{u)!I_Nzaw zz1hw2dUO5SmCgAMp9J?o;TxP!!RTj^Q13F~1d>g0Z4cLHz4T;W0Wt zd671Lc$|16}#u@+tE@nhO@ z`GtTuoo=K59WXi`uq|vj98do7z^)#QRP;v$y;~rJD`--3t5gNq%G{p5c9xqCD(()4uoVa%#Vv&cjis?m6@?qtM2T>E;Q zp=?j5F^Yv+<-FXyclB~{k=p@~VT_>+A0tV6#KD-x%XCh(h~LjLUTd%J4>rG{Pkr!? z?o$bi(WjFe)tQC%(qB$jYj1!1+rOW3pS0jD%!*Jcc3P?Lk&MyC>HGJeXmLP1Y`WS4 z#R{n;M1;kiop7AFTu8o6ejV^^F@dAxqm1W&-;NbV=Mx#2&t_0WTW5bgdU9{`?RVdf z@lac52sgg?;De6~*K%re^M@a2*M=QUPdXdH!f~U)@;xfQks4#TxYz9#=;v66Wb#6v zgC38bOt!O)L?6%4$eh5gfTJf1E0Wy9<9Xh|4)wfn@oN2NX@h#Ue)Dl>MOf%~S$oot zTt0a{S{PR(KTml?-RW-}iUt_Q7 zhq5Et5peq!M}rPcuFqh2eXdPLZ{F>~Z!!2jYth75EWuQKqj^NzDrg{7Bt*NIrDegV*YKw$+u*f5H2rqxTFziYo>*F*K zPGYFfB5{4&vBF8|W1nMd&2 zAM?_7t-XxrcHj75LCAS1b0$#uooYPyy8h;VyC}%cp%$8r<$m?EUyhu7{ml=P-a;k3 z?UZ>~2%@94GrHkcIz0rYes&xj>k#Q9M;yG|nJwuBg_ZBS<9;`oz+4{#5@rmY)-J>qcn35^;yEzb1NMJ`KD!^bNT*{oXkw~MM z9<+xPRB3v$;2BUy7|-HH2FW^J0e*Oq0|IPiQDBp=auUtylrd*1HhD21c5KsP;L2AL zWxYcdoYKub;Jq4R%;okUxXN7r*B!O0%c*|7n#tW76|8hC`J`Coc`RpTj3v=!_YKNw zR-v|SRFlSJIRM-G$hYEuE5AI^Wxa=Q*YN@-YtqX@f;y45=K~k)@)^u|*3aMpPLrf@ zMX0RVZU{H|2j+n3QhGS{y@zL&9z#$tX2Lr38RiTe^w1bqSrk}#W}5^L|KO8sQv%xA@UQ&x%^{QFFOO=V={w{22&3xtUU`FqZ#R9-zb#+p zQ8Zu_(P{TtgC5CoPwFKLlYfs-!5la;bfJSo<+=4WZ6B{NO(SfP7lD`RkbVRh3594K*Vqo@vvX2rE=03)8Z|T*kLwVs+hrE9be&IUxSo9i? z(aC{Iy@<$57Y2h12k$ZEH+Y_clrgvmbNHkURu(xUdxM$(;73Pkdl#U|uax1t38w{5 zb)4S2`=I~;KmbWZK~(p$cJIr*yy3qV;5=RV{8!4rCIh8=XIzsEJ*3!^$>c-ZnD*%T z$Yyn!I#l-H3F7*!AO6jg(#r$~ zH0xD4=7-*J>w~uQ&NZNDqb|YN%S%6m)5r-NlvjP|@dYUSYkyO*dFwJ5^1;Dp=+txY zqo=2PVOLBRM~-_pI4eJT&VaOQJe6bVOE@kWu^=ejIxnQho=;Wr%6W}@$W%{U^MfLgR1@9qU-`cpb|>bKP?mA^MBS^BN^)z2(=qE(;i zCr1aWleUQ0_{NxsHc~OvE2p+@_l@UMXYGTmhCMXZamtjO48gTJqgP3$ohd(9iw$6w zM|p!!dB9dE-uIbMbyF6+0IALCBl=rogP_m*DPZ7s6&%!+2W(ljTXglVe+?q19IFjd zkIsM(yj4cRg~KWr-e@Fwb%G0h9IR=#V5(Q&^RB#8eYkp#u0vZ~MTu3%B|{6hfep{( zPY0(C>$AQWITubN`^rqFN7r{bm6s^|=6Mr*mJU@}gI)NBVc_0DF3IwmmwmklRsxfc z`)%tqeznE=(~Q&%lFiqf)EsTn{9v}tAIW2oLE=Co?`7D?Lo>sInAh|-a_SFFLQfTL zfiW9=?d)=1nI=CbbBs*mi5jd685@rr&N#x`H+-OUu1Wt+g@cDjCW?0u)d-+aG$UpU7I8VCd|n3LMfa+@_h5F53dfquYb5R6MmCabv#rWF?w&@ zx|O$1+jT1o664hvnM`OjQ#9il=TJ``^CqU>eD!U0dX>SZJn?737(~B|h3~lj>Bln> ze#%qp%<0YXN5%K-uu1VDpI7bxXh4_0cw#17uQIwEZ=yH{9tP`K#_?xk6lj}s6Dyp0 z5`T8LA!43&ZSp8?_-)(4lW(4ly!ILYgl*wP&f{_!$X6eV5z>k!%S&>%Ng_|Q!`(B! zMC$R%wg^E#J?!AlyHDP1e*ev#&EJ0g<7{vKusG}t3C~|Woyp>d8C1j<7VB4AFfm@+ zzKc;`S@2+FRNiBgV0>$`>O;q#t{#j-6aO4>oCDx-qiG4A*0T>AgDmkQ4Ee z;VaC}z3BVD{vZEW9{A_;c7MM4VfXgtaBbWnuiCQ=!KF~OED=uSo% zY1_YESQYv{W6b%^4=D7@CJ$~0l^#5lXKhYEZ-R3mBlFwW#ePlRji+8`%sFtN!)lLa zNDrSVQ<>GFunfb8=*Ey#qRo*j?~UuepUMf_?)$2{n8Nb8( zSRUDhPdiBI&;gY=_5Tv&{WXB{~#ZV{{* zL#2E#w3w$2h!_2{Vt0S}?GKyZ{QWmkw1f9r7`s)xU41LLT4y~)t8jRjVTOVFarHTQ zM8B~@0N7rS?qF)4!kqk64D+8d%u#WSjuC$J4^PS$@%dtge|rCQ@PwHn$)I_m9XCQWWLzzDkFX3D z8OuI$`0xJxUvJ)f@BPiEg>7(f@ZEGN9cDhuASB)FF3R{eZLSBW()+hFTpit~-&jUY zMkDQ%@jXNfar1Nx%EEH!U(TI9y}5O>b0X4pcHQ8eLz;j1_S?+|>Dsf&;r)#F+PL;H z9<)`AfsMhOk&4$c;}v~l0pVrs?d1zzx6%7Gx<_YSYVraNA9Wyeo-wq&$WU~=-2wXA z$MNlGeV5qd4n%bz^0{;2l85UY$eN+nm>>V>;s=F&__zPz*WtfYc#SJ#=(Jm4&rogw zl^41Gm@JbON9F8h?9(3|ZpX`mbpDHrr#b+eNB_ai*$nq5qvxq;G>bJODea`m@NA6Y z9FBkX^Pg9yJKN-rdJ5o2=fxd%Hfh@eBiYefa(7 zcW0sgpwP?-+vOJC?#6rJmad+8FZn;Xx!*kCS>a94%YqzF=u?F-K^r<#`xx0{JSMy8 z7kqx%yZ5iXR~vt?g~fLGq(@%VW_g?QqDDWw)6T#ArWowWpKjN#8f}ZbRE23b&NJZv zfiK#3S?3+2@1w>sc+NYYQ-xh6+BJ`O{no>9zIyHb;nPp;2$=pk`WPn~XYlq`A3M+P zw!lu7cHISC^GOk8c5sC=t5)G zInX_z4C)_#`swCTdiX)I__Fqm-@7dqk$F1SLODj+t+aQR7`|GtIMU9BlX>%_-_80O z-};k-JB_{LKQH)oK>b-Ee@vuw_)C4N6i)?2$jT1}f>Ie%Na<#RUaUUgrJHXYaj)=o;bm8Q zcu(Rfd+=#wHwJ}y9FFh}zg5STR{ojvOL-1JEfcXl%%(%bo2hU4hv?gZcB$Sp$V>WyCk;bDmkpX% z{@|kR@CHB7x!d3m9M^;6w&M);9&Ls~lis`FRHxS58n{UVe}C}V!+nc!_sdWb1Z2Rk z>b>Y4EHD|>`dS8P=@#BY!vx)cssc;b_jY2uPe&XM23C86wV~O_ML3OoDGE=*NwGe7 zjUOi0U@}N%4da=o90TPszWE*+m!)jO5@~(h^tb2-w+2P{y%Lqb9E^pM_A{@l=g@7) z1DVnc{JzUKG9QRVe<{&ZnlhAbwXeQ=w8{b9jgR-+JA?Yc!(-0@U1_wl1!p~5xzSJN zEzO6fiY|@M7VsG00yLiU!KE|@ZeJ=0mZZ}z1_y8@!E3ZupYBVzSRv#3kTHB6V@$F; z2FPBqhnQfY$)rw+r>TMw5OR?$&0<3Zc^`IFQHLh8^2%F%ypwK>#OUeQ`+k=JxSV=J zaKe4^2W!N`_k7fGG6tt-dv)&J(w7ESTlrV+g=>iy>d8~@7c{i0EI`y#8Sm-^cr@}X9OfOkDm9e^SFP@c)1cS8C%yY%fibOx{OE)Wzp1c(SuL}Itlz!Zs}Bb`*_vSyFdOp z@U?EGM_c)mFIp@-~`AjcZK26zSNzL>64DjVo*AeL2P_DKZ4hENWE}cWS*M53Z8^#--o`F?;7f;bs|JbXLemgtJ zYF|G3J$3Ro$W^ZA+p)#y(;$H(f99F~A>R61^;S=HR=}Pt82lLv$s~Uu)w3V{$Lb?I zRj7QDNo)IZZMQV@4j#%1Ru`o~!!;%v9`sZ?c%b{9&QqP$cDtZUqW6@7hroH}GXlPF zpSIkir6)_x;3QXg4Xi=ezx7%80s%L2SX!`U>5G0F<9cuALI2cNx4+uMP_^su507!0 z=>2?#CuQsT$QkI-VaN`C?}wlLn_APA$)q01Oo(_uHiOEbb-dW& zH#avjpxw#i^1{W8SRu+Ye8n`aSI*dMLMJSY!#8;(?G_e-8W0QfY1hZ|EF)h}^0b-| z0ccbo{QmVTo7*>UZO*=TsgN2%EaWvE&93HQ%wzUh9_de;Sp1ZsoRQmseq{Yk0?TU( zpT>aPMC{e{nbjoz2`}(EAz%n+%y^Xd7`hcvB+}oP;jFO`}^TehET>11$2AO@YD4eZPN<}Gx`fl z^19u?Co@2gfw@UC`FP#d`X^s~zBzjRqiM4T#d=IuDq^-U# z49~uJGVPU4HI|rDsD-QcuK!4WgdPE7UvtCb7cOj` z6suJtp-EmpE9Q7~US$-IJ)_-$jK*)O%YhCPL>q0xIEMoYuWn_%4&L7R9OepkEAM0Q z>}1k6(dJlfWb{$_BJR-Zb-W*^h2p5ceV&ov$G_h^$$%tY{E>4Vpt`T!7&pGl_*6XN zCiO=zeb51>E%?-~=b+J?o!%7AfJgcM+Oma==Y>$Pqk-3YiB`Vgz0B+KR0dMJ8g??M zynWmvQ(oK8&kDm)+bQ3zjMdNE#qrV4KFYJO?dX-WKb^;@c)#{7bi=-kO%4mbl5VUCIb6l!j1C$yTtOc_8Grg(l-!^5?lj{)7zR8U8SR!U%Y{ z!(olx#z@lQBQIt~7)CAzP-z*OE#}ed0Sy1%^=v{wWIX!u_U$pgi&gyJ{pEj?ZVtc7 z^!|KX`5B>j%900%%;C3sI-iCkvXa6jNOnN#m=AE64tlNH)L5y5kjC+Bn)d}tB zJO`m#ES#Ms^$q&oF}|f6lyfr=aXRnd@s%Ip)q!Dn7H+D^7|JtjjR&W`Y(5pzFS!@y zim{5fEn~UeO|95OlN;5~;l>U|%$u`zbD;2ZjBq>AP&=5g2)!HFnBM1*HsdeYso2et z6d)aSD7s!~A(ZY$D+aM6mG!Ef6g*cg*3h|s{h$6z;cp&pc55G>fB8cPZ~nOX#b+N@ zwS9S2|Gou|b9sfI2v%Ot7GpB*zO0|2qsS5?=eOVeu(@_2BU;An6O~~c$;c+Wj=q?O z`HUChv&D$>?ew{Rp)(g!ED!U{e-=#{9gW&%d{kY)H{v2@Pl7e$fQ*9l&%jJ}jc1HJ z7!4WUEViIO{kp~x>4Sk2@%3MoLyN6`j(jgeD!D;lyfp)0s07Ctj@Pk1i@}v5BoC2h z^xc~mYmd<#6s3g>JIQ`ZyhOtnuM8_FH_&u_0r$ zL9%D$U7UG!e3oq9fAACX&ti-*?4>K$%adU~rwTG*!H`i~D5pEOZ_L=_Wc>yn&q1Tn zz&OO>#^b#FA2t3w8J^l9eZe?>w=$HcO`WhP(KnryIF%+{n{Hv?R$uSz;;|6N2v3*8 zV?4Q|BUxqCm)|pZBBGo$n%y}MYj3-St(h@yaGW(Dbk9S(7~-QaDCGBEhV+w#vvM7< z(R=JU7LCuIHuep#>PIgJEa-jWivo9A1U=eLvHk663;p#`r+)S)W6xVWQlB0DtUAW8 z{EiZ3wRXr?gQM{Rm}fz#5wD31!6=QEOh^(h->d3)zff5&mJ$Y!4>E{d#j!lA~o@QHQ|JeAGXrxN=p4DeL}&*4@6iRoHh7g_e;m$$Tx_%bgeE+EkA zUk!X{lf=$eS?Sb0-cNl7SntRHa+PNWC*eMmV|DXX-A8t!*3cN<_^s?M9kwh{rVehz zxio3@g-Z%z7Ctg|jL`yY<$^0uv?a^+n7!{?nZ4h)uQ-fv+rll(O7GS|nkI17p?X2b zV0y_wu-s$b(50LC^t{r?lgmBW(@s3a;l289>!UqNCeHv=ZrAEFbYNV@7x{gpDvg_9 z4_@0&+QY%3=hK$wMlw0bt+E>^bTuhU-;>Z&7qr+upK?}aIKzM1|CBB%oEH=3dSu5- z{0UC5>eVB0-_=D2zVt$+sT=%;?($Uj@VV#Fg7+c0Sp%7c*V0El9>ED;onRfH&+_0> zzdqzG_#-!6k6h1-(xJWg>RTH1P1R1G$%=uYdO9?~jj_*I88dK|8F z`=p&r?{K0|7NXlPuchw?UB*z2Cg`0E&jQz8K||+3W8oj5Ma!-37&!{g5W@FXqSS+2 zG-w;6K<}65dF7}>e=?01bPtB@3z@-7iaj2~QzrRft&Mp`|Nl|9_htag=iTJ$icU~T zzw-7s`bnzr>lQ6n0RZ$V{X6;Z-D)o$(<$2D6jU1ZbFfs;>5=$`*A_`(I+7Sof!A4& zzMQiP$QLlaJxBLG$%i&FSSVvEr9P9^@b91UtCzN{%+u z-Wd^lt%^&3fKxKKcd>uskJldH0z?T{sjCK4Zn8M~VxVgiVKKA=Xq!G*!ZX0H zzGg5)+-ZYb{*=BP18ZAS_$~VG+h_8zHTqi&mlj>7th6`2Yt!1D{u3_Ku)FrWqLel1 z>u3$(6~8`98a4kdA%az!(pSvXbJ}Br(&5?2gX@LuwAm2V21;FV+FPKiZb)Pdsx~Gy zJx6moa{7_sTsg=In1ip3UH1sm({|$N^k;}M3xdHc$&_DOgU*pL<#mrP=r?snEA?Ke z!EWkS%K4_Jr>_V%BpdpST)FmFM)fJ3^3biE>u(bH*L}M0aR!ck4Sb&zpO+ESM2-g+ zYz*w5nA9{lzM0_~Eb&WCY_*58847LjTzns@%{vp(!x@}^dU!Vj=9$g?Lb-?;{H)2_ z(Ts!_ub;2q$(Yjw2evDW#Nv}Ux<@h+m|#wv-rCHu86U7(y%~vi z8F9O2aJrwt|6r4+7r}XByJ=;c%$;b0!9!QP?@QM&PfYPAdEbBe&8^LU`ulG;=Zn?4 z=K;;=qP`A5HRl=5QB%a)y&yXzs zyx8!J@;sjyRBT&5idNA_S#V~!Imx>|{GMhUdC)|Ye9(9%w!+CAZ2~MFvNW2H1q22Q z?U#{6>}Ry%ktyeDn;JUq^j$h0cVFGSxB1}wvCRiR|M}*?|MQ!r(<`6hk=D6)zr$Fc zGR!Q4*U0IJJ|b!J8RF8b&y%4WKi=B>>gShJ$Gq7yYG$mgies#OS*&0?=E%aKlV>-t z9(C|u4kd?@86N48GP)Kr;j^Db?-pMQ`*I?0(SzcGwy3}>`(P8`s#KYchccWDP4d8G zNPPYLK>#Z34R7+~9BzIp)Ah9P;EO%%0Lw#dTYr_OoKR!*hENpbRhwU9q_()9>5$wp z$|NHhqz@Fce_$sgWa&U2ay6##&ybjbf9KH2V1;1tUgp&;{x==PSi&JmgmGGKp z-RQIAxSb*d;&phb2LqskK81Gje7x%8EyKCR0&&V;6~dG}JSU482*h1?mVg6@PZg#^ zl`Kx2$q4z)7hi3T}vz%w*zC09pUq!cKOZsKeR0pTeP<`*N_ruC)ELumwB`QV)eBQDqms76i2pL< zVJsVZYg5PD`N11=C;p8=()gqTEi!%b;YS%kj?J!-8@KL^&SCTw`a*w7uZW#XCdSBB zIe+<=|Dw7)+59>~-lrdZkVo_3&GkZqyi9)xPa_NpBbJ3Po~b+01#UdwEihOlJCY7F zemPVhBPP5BI0suYR*Cn{5scx|-;s7pj1iO6g?DJpCzO(nj5+l$hqBk$nMA+bZgF&Z z$~xHp*aTe^v$`m zmq$+F@FYE<-L07Q)vcwjpzh4+zCg+DNtT)Db)NYj(8e-|iF~CP# zx`Z)y-@XjN$qGJOlp;fqG6+8|Y#N!K@p?v0V-JesW=3-Tq60Y@uNhhY{9pXV(364r zK?c(wfB0^8Q&^~?%jdw(Yg)@p~A7QXIO zmuJb9gOMFJ`KbP!2fPJ3{7`3O1Lb}D?bjLG@As_5mvpLw)ft`bRx=lVkRyLpe}EF=df%stSeyBjSL0P`}5~7rVAQ>)-LV7pi_C9gZm(P+q`Xzl`bap zuZlHJeuSK(zbv3I_C77{x`UPd*#&?<#|vdPi|OH`C5Y)i2dUQ22<7uKInZbG+CF=( zMf}cIIg&o|F-(8m85&PpjNP5HSAu!yY`7=O=?x1xXl^{m*zQ{?`U3qR9U|nP`bDrQ zU&trpx|hkS82c0cE_r01c;4dd^ZI5&X&2kQd$%U8za1Aoj~kmlDUFGM@%Hf!@jX{4 zl!p)FIUM5iiDJr=?X!i?B6DK(J2cocy0>X_seO$UArH!oe7c-RRD7Qlj(n^0`o}P~ zFyY@vHg#Lue*us?EVh&B!C?mbR<`5`=ef6F5&ZcBr~!cIQ_^W()4jKzbDdv}eEm4{LJTIo+TiL+NDTjRR@fg@` zgY9yCQg|?5X@aS+>0IFv4$}hg#JirU6kNMa%XZyw^)k5Gcc6xYPOW-Q{TW3ioVwO7 z4CKRa_cJkGJdd_+^ywjYxvB3dM_3#11A}m%r<9^F8VM#w%D2Ek_p1PbYck zL$`#G^Bh(igh?NDk{@nEPkM}OtVdFNM-z?S0Gk}lb6LvQJ(!m3)}q5ei=>q~3xeG% zAB^O0)l!)ZI27ZeDJl+*zz@HJix*^xxGN*RER_e(o~dB6Y`{6ZGJr%Eb;Z9)wZ#{% zF8!-aM%~dr!=r6lG$7O9O|2`~b>++=h4PSUc!&?-hLSxi0pNWvzN{PdMq>pBJ6Rq2 zR=?3p4a!FEM7=>x-#iQ2!KmDUop&PnoCW0)p`$W02FUP#>I#X9A?( zCzJR*IQ4$X=Yluw)_XwU7fd0627Zr6kC(HNUief#odIAnMJ&J_8h{TDe)gl}c~||<1~?J+#R%6R9+sV3OZ)BX4I*2!Wz27aD9!j_yn!>f12S{>tgv>T5@ljWh8 ziMVrHn|FC)R?DgjRjMI-Y1lhUZ&p~@nngr`_ z_9y?Ve@a(`=agNVgSC^>;>}lAW%KyE_frPoj8NypS1ew}TE>YZr_wT=7GDvG4p}E=$0x*H+^`pGJGFAbuD!B zZmq3oH{$YZ%Lg(R?&NJp*T2d@J$)Ct$G_#M5)I?+gprBH?YGYmUbvO#8TidT!BhR|6oBTV>RQY7) z-WT6)4xN8L@9wq0v*x03Q*`;XZC>@4Pj`u?Ji?>V(|ZF)2r|Z@LwQ5)8)Hm-x_@i+ zD?^`TIeZ;F_v%oS_VA+fj&^wD`RkW9x4*x$d0QOW<9V1e-kiw*V6JuV#*gWoYZ+HE zny1I?ka!vY8KJ6PhsPC)qwhHoEd!4O6Z7BIJ&!@@fQBCr$vq>Xwkw1P`Fx&iTR_~s z^&|uBo6T1l%Kzv8>0ghr{%&66Pt%#U?*EexxBc?VZ;KgyzjBvH`Ac#?O?b6ovBv2m zhHP!?m@pHS#S5E$+>Nec^a{7M8{FJVmwt+F;_7M(fAXu3(|-(P$w7wxgU4E2OD|Yt zvzvoa>S+AeZpUz$>^h9ldj?Bhnhv_7i+=g@&q||z*K27{wL^U9$*~qnc~LW<|1baB z|DLh?MRoi|I#8&Z43F*n1|Qs`)#+(`9bHd9B%6$&2f#}n>A1c+rme65;drTq4T~H+ zUe9E7fBev{2IB&JAn)-a{3#=b;L*`7EYf$v%)F|dr%l6hQ*7PSV`!%XWIuf-`A+Xy zJh0%g*Vmyp_&miK(iof90?4$Q!Sg_N89S|xwg706Q#07j(0G4qV75qfvT!NC$sqpA z`l_SxNx$@WfAcq+iy5)co;kDm`kNoqgSD-pOEv&-B2r5j=@4xuN=4=bO zr|XyAfB(JB#qe|P$n6%qA7wC&nDMA~QcKc5Outmcj-)#*tP1(Vd)6)rVHOxz=}=?b zF$6}3=jn+tvW5uhpwG4ASi0HK|J8qLTXC(n(t)t)j1tY_iO@yQi%l(l^pWHeU$xys zM}z}9mbYw+hsm79Qg$;tFRE^aP9FBA|3I?Iqnm;GSVq(%?K07KI-K?9ts8|Oy1M!Hn{PYl^5Pf@ zKl}Kz@m~HRL;0f&=H>^S4AvarO*}u}*$BnVPG5*&|DeTrcpW_LOp|+q%kdV1EViJp z9b3|_g_-JE2$8eJs6LlxIpZpwMel2aWETIfUVSf_6t-^d5OT2Op%ZIS?n1i=9I*ZS z-~Yo50WXC=`k@1FXEC-d{{7TpyJR3rW{zy^S6_DJ>Ic!NGe8p3?|=Mmd9J6-Uv&_) zFe+;oL49)hPqesZ;o@L=<8gJ-kJInRGt6ozjO)vp!ML)6jx+wlUB5aW>e_U2rQVF; z4qv{buHi`+7>=2T!1-;mqRdM?`+HAkaB8s2B%r$b4uwx%YM(-koI2Yfwi&^nw;&|8 zytwvab2Fy@)Ip&7=aJ<~m|YdYR5xDN7B(5!Lw?fiCI{LGIXjl~fUy%ff(OFV)AY=I zlI5`vm4+R_yoNrV#fa#$`}kfmb1S-PKgf|@NQVh|@}l+)ptc40f%L`kLb9DX)4}n< zg}*sV2a;po-zWS8>;y(1JB!@rpCS1w*DbjzcWXJKgQAhG>Vw(u?{ zy^uAh^Hf*9e%%K>;9+5aq2EGEi$0q44EyHga0yZ+j<-DYZV9B8+mHXX@z5v^Grhj z!kwXc2Ac-*D4nq&@C)Da1!t^#gI0o)P|jpsnY`>O&%q`Obr8_YBpMGixaw~p*TDR( z0mYyI7r6ChOEqOP)kC{JdG(Fadr!(MbMn=YVK_GivJ9p02-affmRBor&!k3Y&-;_e zb;3=mC<2YngjI(>p24M@&*YU#d41Qw#xNER(vJ~GU0U*@bOxs>cPg^Yx1Oq>hPUdu z;DI&t!7ro0kze$LZ})xgy&nUpM$)r2fiHQT@X&aa+N#&G`GE_a#m%9C``P~AHxaM? zX_r+WgeL=D@cy3=HR{uIZDTqB@Vg&_AlO}7>QjgE*431{XL#xxZ)nK*;5FL+k(Oji zYA{Vm(0K0AZ(Fp1y?X0#!5#d_A{u1;k>12}4hx(-E2Bvc9Nro5Tq|ebc(w2vMHwt< zK^hFC>Dlnvn;~TH2lRq5DdV?SGf2cYcriweekd{dRZj1wSjCXJ8Bn>Fmm#wEcmaoj z35)WBqn^Fr!({=2yR>AXZ=VW)(;_|mgD@~59sKU64a_sLRr(=ObXQk$ITe(?4^{

    eoaVtX0xfqIM9xk}7rTR1Cq%KltFy(mOvHGi6jJsvq`?_i%GYY<0-q& zuhCI()r&0ki&uaStbx?^U=hy2fO~^yIt}mXq^ZQl9r$=5 zcWFlF_H6i)Zi3t3H2sBZ>1(0EUd96hN~y}9?xI&L#wAdLKXpR?T4*w0Q^Fp8Q{Ld! zvv9JTWlyHbuRDvcT9O0jrjM`vNxc?a8D+?&yo;|xuJqkvVR*U-7kLJE-+kW6z}%@p z_)a~1_o2Ur7y2;VK}px?&bjVC7UBEQFD%1FDzqRAbWsJZggsp!wb}c)##8NUZC_vG z5cJ%p^I%|>rq#GHjP#lwA|kju@;!o38Z^{St8ST0p)cS_|l;0!mu$%dvKD6s; z>*34@G5S9^Yhj|K^u^#;Jt}*z1xa| z`1cK7_xNutImOiursfKd5o=r)eY}*{^JHGW7Y)Rp*DiP(I?RadnMj*xzI?=!QKr0J z=2?>+vYwnh?{L9G@!6!7A(Ro_0iI9#vz>Q;&y_5@*wZ`l;!i$%e{<>T`NA?Z>CS+l z&f|gF89Rs0o!=ZkmofE5o-i2>E}buyWo?%J6Q|nWb0H)G`7)!-P6KvEdTn@$iQdE| zfe&u{jc4C?h>>mTFVp$2nn-Em416ZCM>4cNZL**~;uYWc{#M3>JlF$xCn2ymIwntNS|Biid&yQ9g_qYTdX9xxIz>NiqL9(X0c`jEt3rma878AI zgCx~%fxwstk4#Xdx%boK&9C#=w;;h7@32sYGn2omZw7gb0&ks5KrVSl_1>S5Du**D zoXLCje%scCPf^gcf%M6YmGV&Y?Clngwz0pu@C<*Gj1=*@gC)@xYKK~Qhkr&7@;AFf#r=MnS0!bvpSK_x4c`Wb4r5q&UAeCwW_)Q9{Ho9*JH5Mej66K^q2Z^|9Dx67De7S9w+LKU~NYeML;88H^plKG*?r!o!RV7Rm)rUp>up z5U$;fo|31@^ZxkDuztWU=+Z!!ZuL!vzRZYr?Bd1fsJ$*-G%=phH(d2e_pa1@wWFUh< ze0iO5&%u&^n}>7SVs@_VaGnS_sOG=!-n+&6Wg}-rWCQj$uRFzV=BfTUX;o^nEulS?`7yYmO=LB?Ymt+?C`}4 z(@yl4aGE)7r9QizK1;sT@pbToouSjTY3-0fG;4O@rA}-vU%Z?rbH+{2imA&WF6@pz z=y);wdF&sl%s>3$4;i#xZT|e9{%Z6NW3PoVeG?;-I-=Jy>Nd+)c6{$e_#fDuIO#M*g;Tol za5TH0{yr8S1RV|;1qP=hg=Bj2jIq5K$}NN*Z^1)|1O36#WPrCZKJ4i!`c?RpBRp*5 z=Qlrut2ol>hUd}nS_b|bEi(MWKm1|CoBU>R)E(IP!3RIfNL%=cc7k@fNWV;50?)a&NAiI<3n)j}x7c3&R5OwqJ{n^2V{}y-| zJQ*E@qj-9^1ALPi-oX0DTtwu@yJ3XE6GOTu-v6$hoZuuwj9bZFheY3fC{e$|c_+Pu! zb}Sgt4;HJ&02@EwyOu8LnHcFpA~BF!tP<1x+}RK2px`qZLTPxrO)T&jQ@w6?1eyN& zo9|{}<(I$s#mM}V^tHA`r_Q+t0arG8JkuDAj0^E{D&xLI&?g!5oteTgZP%ei1RtL9 zyt=Pkx)Plm|7E0oCM?eM(XTUxx4YrOwS)-^vI!%?oZn7W&Fi!9)FCv#rS`aJUSob zWQ#?1;feRHf3r&vpYY_nZ@;RYw=k0d-`N9;&$YQid09M!--N57@9VS3emz8bL5REh z@nqGSQX{Rd;*MKPG=^OU>BHe~!9!T1+1XNg$J#x^ zvm9OMA|Ja?gy^B)Ii=9!_ilICY_iJRo5%h6+7a0j6aGZB78dVOi%u4ngllou4S077 zt8@DFh0XC(=O#ps{MxQLoSiE0Nb1A0F>^ZMZ2|tDrymZ6^L*9>G)+ABM3ow@0Ze0D z4Nrv`GiPuWv1am?0LkJH!J=dZm!?Mec9gxz2nCtpDPvzZefLRt_BvtR@IZjfJf{Gq zcTdqr^vd&YeLN3GXwFBc8&hdi&#L5147(&GCK5h-$=Bo$jP-n8N;Bnm2S?@Yeg1#0 z*Tim{F2#>FE1y!<-@dnX0*st*Qlt)c3I8{3%brOhTB`Tdt^DZO&rp3j>6D{q;k33p zm!@*|wbgtEpOafX8T1st%^U8nV$Re(ZCd*9shl&o5 z30{qbvDZLw;a|$R$A9m)xf8S@U$6J0+pw&g@LT)~KRiwW=<>(}xtYOgiI+#;O3ULS zZ9ljTAEi?co({Yzba19n@U@mML1jhFLBw3u3^ zjCRRB8V%o+H}B8@&Wqmx?Q&b^o{p@mYro~}N1f0^jjI3F$Tl>Ws8l0k=rx81xzNsl z7Ou+L@_5km)+2dKNA|naaSaxDGc=vICG*sE$!eGLv>Tot0|$&Aj|_Pt|Iid{#q=+I zqAl18rLJP>@?;zo8ZZeLuw<*cC?a~}&nyIJOYniG#X5^n7AraTdg10@J{-u20D|`e(0rZeZ!5i6;zX!pVZnc^2`xspDT%LA~ zw9?Ig^@#@4{!l6`hL>QKkQ|WN5ddvefx&8~ybRlU6D}lr?)7S8BfFl@@5(b}+f}ny zx4{z@)xC*Go6~xy3^I$SBXr^2yFd)hT6lAyr?x!=-lknNl}~|tGmy|x&SHTJF z(ye|&J?D1)+qSK;l#jlZvaCbj@@#n*VHUi33)WV4)oI}SR*T9a-~Hg<7`z7$`Fl3J zU3rH8y<24{wOn*m^dSe+j=`am)EghRcv;x%!B#f#ru)E7x$11O%Xf6v->&Cyi1*UM zJ6LmF$-RK>T&-u`B`a{3d#=&Z2kgFvXdri^Z1{8y($YJMQRh-{beOW?P_hBF%3C~3 z=CqIAX#&XZv)3MGF=Ot-YlNc&!If-qQLYR0?ctJX`J;`7F3`NYU%n`QWu8jA8BK*@ zL30E8ml+>F-zre;#HZTsXJ+sslSO zgdcsXZ$8pQgeUTgw$Cyszsiejw|c1K`498t4c?ubcY|5zg(m+dO(uoMtN-%W-M2Y+ zxd|mBSZNODb@uqdw|TzP2f|ydIF!pc{d@xr-e zbh@4K-WFIB79Z`xA%m}X9PpQCaQGc+LcJtJzw~w)u_4~a)>+$o!N+L}ASPst)Gv$| z;xo}WSbxeec|3yzPgCi|)g&giM(ghwe~x8n=i$bvCr&7jb_SW_g&9!dvBIZ_natqN zBlcbfXWPyhtcc;n@(nh*u>JaJ6K&-$p*3+1N`GR5rZt-Y7ak-!Fo7WjH_8)q`HqfF$`c&V%`=EIFXPQt4FNRNum}b@)RBM~%vpZyV zQcrl9j7;}4rWbZ?=lI!h%uDV_ivguOkTGn=HVnzphMpL`HVX_*@*6NQLhc+&$}>JH zZWat)=`R>p-fZ6cI4|-Hr51yD#n(96qTWHmZ$7UbRd$}@aW3PDdcE0vbmi>kb|Eot z)+Q}L^f0oDd0WV+op8@sWJe4=@FHX2!wlcH*q;^;Hah=s^XaQ^W9tA-$zo-Xa({^-JJ=%7gp0_2-<-$9EELcDUbZB_N!b_UWb z*RDo4M%(m6Wufn@aIvF8Jm#y{ugzlWt&G|K<-h)2Uef0`pR`CJoTm1!4W0?V`-QHu zt7mjj506&IX`9IoBV8z3?~1mCMv3-9puEb+_oj5hi_mStSv`#=yyP_!u;>Bph4JcS zo~m@q;f&gp*2IV>C-L=Y;Z*RfD)a|822TA`8s=@rwrFvp1pyxYKm710V_k02LhCIdMG>+KE|gt>7506+jqL_t)@V}yV8`bmewc4%XLhp=6L z_qV^>JiK=&9N%vK@?ZXo=pPRhXDhw&?gr%dTG&ghomy>@ZAusi3{Sr(K3&wul8Gdiyr{rZ%w+QH5!Y9Xc1v19SEzF$Zg z#@3LWcI=%{7$?)6&)cChhm3bXD#IQ6lVvhI_`omzpr5cnr#ck;Xy+C@9b+UN;Y^j~ zIW0WLkq+8bK|DY+25278YX?tyIXoF{wS7#mK*a$2q;@1$wX($q=XE>dp=d!}*y)mj z4d)ZHNXE;bVH)mua{bDsIVk)@bz+Qv(83|Sc{%gku38g*g;7^rZabVNBv5Tw2oT}n zELt7S!`x!oFMs~g=BqDn%mOhR*B&$GNf8lX!Cfo5FF$JKoL~;Gae}eGS@9W80P|jGcZEy)T}> z-1wr!{MwJ56?QmS*mn*<$4K|LZ_Zb604n{)n_9>aA!VLDYk{^8-3I5U^<@^K$i=yH zmqyPqn7=F^+t11L>FLIGj~W-TS3an%(;wof&m63#=ff~!AjbXM90Sup+#C5GK z9~!G?GeMvGC1?n@1s=;~X1$zyY z#~?bmmhVlCslnqy9sGnzZ&qisypQEmWCPh~4CiHR%BcFbm8|gQ*(;ygXhadWt9X+T z?%xMq9d?7LIgE0Esl)m(;3hJwYLM770H(x}ic*6Y&Euc4w+Scn>gcJAcs2$SkGmf@ z)g@TliVbevyrY=wF(i~0UzS0Te1tn1&vV5|*OyVv@Me#9;WRCDAWiy7SibSzgum8Q zo@ESJa=a%`+nNLzO_dW|bsQPRS9op9Y#_SC1B^PYZ9T}SbUqVrx=CSOLq&R~4)__K z`00^4?PaO)kx%wcE16_uydQZ%PyY36=&8L-JqDv5MX~Y&HD%Or!Ce^le@hoIl{<#s zK_EOPmpWHYHR)fE;3+rw;gJGMrF5q)l9ir$ZxVtBGEMqF8nWH{f#+TsL-(z*Xkm^B zuC`@Gd;En$8C7-O%w%L9!MEqi?P@xRk(GtRkaB?-WsKJ2)ff&)Zela-x|{K+#bba5 zraGu&P^P|%PlHWzu#8xAR>|lO;aq6>K5J6gHJ#>u^vhgL+NIXDL2y>hb!-M3WK*@_ zFmS_Z=;jF~N&`nck!~?I^1_vT!3W>)34N-8v~U0kUjw7Oy$repe%VWy}~gIP`W=phx(keQovN zyEoyp3~y0?8hc65x@*;#NjZjsl{|SjeCs8euJ)Y{^|WU&9X`*s_ls8Z&NCI!f59xA zmd^By>C@D0`zha~35NqKlbV)Z{UEmGT-S7A|5rwOk1+e7^(60^=VYcbSJG%P^a|?q zRrnX2_yiM)X3=@@tsY_H;LRC+pwoPM>2mLDTi2liI)Uq*HlQ|p*(S@vd+9O2Dii;k ze2owVc=)}o`?NoT5?mYy<1BD6d{>{TCLs72s+iGlr_fr$%m3;C!-MQ052+?Lm-G7Nk&Vk(Mn+7k zXQ4Dk!o?(N>B?m=`o|$x#)AxROKZ^s5rqZNMs3kI!9mwGE}}okjDmP+b`7x-nBQm- zj~SSHY+N3QDObpc-6s1Uh!rbb%d;lY2a9*@@VH0M-geOEonEIK@G3mif%oOH=*in% zpT~onF~U2wrKMmTl8I=~f-kg+_Y4(wI~-!D>t_s`CWkF*oH%xIv#*K!1IE|NJ2~E@ zs=}iyqeX4h-)g7Tp=ZVl)Ao{c{Pyu*tYBOEL%Ksb^ZLEXJF)VTn|&`X}&>ZVR>T(V%|!K>)O16JYWyRFy>ljwhfAo8r)93y_TP2RgV*pY}RKld;X4 zb~YSr7ep41(&(G3_VC)`j>Rs0MC~j*4(6@LxIR=020R_Q`8t_B*uu`+3=7_m$9LA& zvl$v6cNpa6)63-v7mL8Dd7M|^AkQf?Tn=`)=&N)G zkKuh89;Dehl|D$`pFJdREhHqHCo)hT$S4KMw4`Wr=K2Rc>yT2k?sNZ69<7h;lvsv} zo3;1LXAAWZ?a8S5=iLYBUb!QWJok8!uC~RH#`E-ip1sF2D17td%@!1QCmuOD5LQ9^ zV5mEuKHJU6OV?Q-I+@qyt=nyhPfzdGCK+%T^-i5UoRNEZI=1dOZIA)|Me?QnGg#iu z2>Z#0*QYJ>;N_WW*M{&QM_MGfbn*On5DU+7`ZS|aI+T}X;RSdnGMF>G;WI<=#GFlT zgeeg7eCQM5o@5kq&>q8x4}B-Cn7H@zB)$H@2T{Kr9Cz-t1LI5~Op5KBSMs9_qZZ20 zehzG`&tbHoc8o#cqCeA~bVu0H?HEycGNbkD=F|7?+%8N?eL#jr{SEKWJ1vx6O!v}N zWOAAf|6#^_FakHRj)jF919UVzP(17_moJ3}kLGkh z3+uP57ulEI0_NEayV|qWjNkqCZwm=l{QT1Xln1CVKwycFf2sw52X;6lro|5nHtI^A87%+J|NHMZzxwpU4DzR<&9VA8hf8))aYoi#x3!HG z&)jY@i&px7+MW>;P4SR^Kb&4vKVH9f8d<_>0;g0}@@Ua{m*9`wSqfm0s+&y_3h zRj(FOl3fN~##!Mb$OaujI8UEw0j|0TpCbGOVP<&LKOQ`i=WcD55tCdopxSxmK*$Rh zT67QRC*d`Q`)~=|90V>lHl3QbwDDqOM9g>v3X$?Gd@Qap=F4P!aPHj2<&947WTe;5 zEDY`(JlZ%wKa{*h%cr|`blHt(_d~qYW~2YvjPU>Jzij~~xZica_S46mQE+t0h=Vxq z-M>HeKGEVG86fv`mxVe9nLi6J-pLcYyvA}XBk32P|NWdHGGmHiR*Pk*iY~teq=BKCaZq*qH<9vv#^!2&ff~ zOh1c9cG}sEM}9_+DLld-XS6@u;(Bz|uAi3nMY|HU68W8ux<(Hdbz!ha|b)0h zU^LvHfr~OqmqJ;o_sXE;5{4F6IzC*aPMa(>?r~1P3&QVrY5%r_+t=+tE>@k&logm133@}uWAv@0SK>A z%*u}TJS3$I4w}JrP~x%gJ|>SLaROc|S9%SoR0FeT2&0_o7~JEV3WB?(zdEG2XM(RT zy;|~Cx}i0YTlopx8XzrP7GI-C3fO0Hv3!-e8nr+}yi-`$9_{gEh^rtI zA2Oq!K4cMXVZX{KQ`ZJh+vl>ClHM4&o|DZ|DPy&lbO3N$s3VKrQL>AdtF$r9OH=s+ zA$-VwSOiGh>(5}Jj_cjW`@z2_aQEe(!b`@uuqJZRbL0)S>A9}ywy9fnB)(I0UrQIQ z$w3enF7(s183~l%y}I_ZXur|{I6BN@K*J3!h92mej)cM72Xo;|#<%b00h|{-8wn45 zWe=^$&C1n1UeKfER)^BeE2#Ev?zhjdoy?J+F*KvGNt-q%Qym&I(mMv$rS8v1#$J$b z6IYYtAy&N6Br11OD_|+D@^~iC%FC+^1sd%P=?0e(zyy=tH<|pUmUY;iW(v&%wZ_0T6RSx5UG%cFSzxsDz)Kh*Lt9x>3 z0KUJ$tGa5-fD#q-XMj_@0hKaW-N*!?0Ru0R^WcD7f`JRbDofd(_juB~*!BetSQbI! z#?XV|9c{D?pVC(5yo(;>WA!~=;pzIBzDW^-lV?$q%m^_x2Z9D?`U3Tkkk?0b@i+YK z5!v%oc_IePu4) zpsrnSX)2G;$gd0aQRmwFGRBbLm3;DO6#9YEjHmm`-YrH-yQPWu-G$veEyKX{J^EF- z8TH?_(=kAnp%2xx@L0G=H^2bwv!~+z*e=Qw-bxzTnF62K>?cgF*o8Z{W_D{i1=qIEt@}L3{t! zPn#Rx-D-l6as1PdGAi;m&0x|b%HfHJ8@y|4&zfla@M=R>lfFa6gyhZoGQ*8~-sI>&PB(9&=W5CWpQ+#8hbg(cD zLTJ3@wbaC8Jc*P0SB0Aa_x{}`oEZ^5{^Y}%c)|1G<7b3eRxE2AbFBsrYTMgL}{piCOZ9HgV zOXP%OIb6ZKua}ldddPaQE*a{{1mn<7i{fwr?|z0E#xxVPifKaGR^=v1;;S=O&xAkj z9ODsL{S5QIzuCc487t2=G3O!4<6C=UpmV@yz?wv7{0Mh^P%mEZj2QYOI`K&p#rIm6 zc~T#1a>jJQFs2r2kSv1`c@TslCR(D&()ag+~YtJ`1jPhI1`O)aJ+}{d%Bw_cjAH z9qRy3JG5kl8rh($CX{Eq(uVZ6%XqFH?U=|*ZFX?d(eTxeRPO$a2LSCiU+o@Vd%LBD zCt<9IAH8VXHg9b>9f%IEw2AHyhTFcZY{yz)xcAL>VH?IB{Z zsA2N}C7%pvhtkvgGGwh-$HG&*ZgFtwKOUes^&fxpyLMz8+Whj9j}r6ZmFE@xBpQj0 zZ@d1vJblS8!}8Uu$2*)dgGh8k4+cf!271;uc)Mw|VMZ}VUuQkoWy0ff#fXj{eT*Ak z)&}ks`r*pqivv3yksv8-b7`@@j@WVJ}@sDm8 zCk<(W|F#`0;&|U~;cvX^Ym;M?&1?1j>sQD4_8{XegS$A#&#N=<#n&0Imh!D`(UV8@ z3TI$}9scB=_vq>q7yx(kWZoZ*P8>fq3xnkMVWFF@ckt@y{_12TKuFbZ+9r{W7Ng60 zGh?mV+tQ`&c*)aLT=Go)iPQ3kbkOeIboU4EU7KA+)zuldRh7Y4~ojEIe&{_w?@VY*$mfWj|Y7E7z;5BtBiAaxSLV&e2YL9BFK$!C6A-;i)X?;w7AjvA?U%o zRv3*dS1)aT{)?ZFp2F`WxwS)b5u>VEjQhxr~hxPZno3FqAcJu4M`Avt` z7J@09$;SQ85x6jMu}|lbd}ebkL;nZwU*G(@|M2Vd?rwc-#<$vrNz>xNX7lZLH^z92 z#(((o>);%lSo1=6oj!fB7}r0}g2gNZ)fWi;b2tOPLuVOrf2#Zmxe~5+-jB?chBr1k zGZ^C6ZaZ}7amGCQ|8_eaIEnF2)^FH_L6>=Uyt4}|j`P|+)I!+fb}Q{Wv2Sy=INf(M zvf7noOu)E02LPvcw0#EgAAhWkC$R8BW8+62&*YbfJY491@^QF@tCI}VwOzf2$e=Gf z7lMKKbe`yp)5r5t)<4IVWHYb*=w~5~9uf}ZS)SI%o*zq22y^L@<&%Xk`6*+)v6ms` z>>8-vfBhf-%j{-(+b+%2eq)bEb6_oaN2BNQXpcThv!L=k9++3~losL#&OF`kWjGhY z#3IJUOV{he&n6=oXp?1n&~t}c?;JR~`S4nY%O*eMI)aSx)SZ zeZ{eE{M49@o;q0jw-6@m9DbrLPj7r9-)KlL^J;%a=htTKkg;fFEQvOZ>ZddI(mnSx zro-JL0b0`67DgC-FBEFT^H?(Ng$x*P)H+*CeLf40WQZ5_!xpaLcdi{V=L+#>A%p>+ zPJY%x^vf0}@OFPhUpx5gPUWT zg?*OOLV7hAK6HtGk74^%{jpFo>h~grf(D80%$Z&{Ml+UmF3H3Dx2G*wu(;5!f}8CI zBa;u4{Rb@&8Gj{x9ZLRPp5pCBYfMg!7KV&7}n>brM{kwI|`Bn=cPiP`QidCZ@~ zs0^?SJ`QkWw{q3@0BSH*`el4y!nQ(FR7ha=CZCACZ#;pNsdMN9uk9d#V9osqtTesD zBUGPn|KKW*r0)BWpq|Y@X+7wDL}APv{KAsAw(|c!rp{!^vgA0=C+EC5XXTtVR}bh$ zHwb_vD5F493zA0CVA4WHy@Q@b@1T`-+DHqT7LrV&Ck{nliJha-Qc& z-}n2u1>GF(86rG9+&w%ZJUl$~j!Rmu5lC3Xt%GUH6P|yR7xetcpA%u9<+X5GF%do; zXpA!sUKwonTyppljUow28-oQCb7Q>997AphC&n)F-(W-sYL$;*)}n*u=p7C{X;35% zIvO70sZ951eWiQkhVp_NU;;~8SefAJKb{Mc#4+snpqr+pt3^;5o=b0y7OEk?P0RmW z_ze!+xE(Ve;Kab}Gm{D@>=BMI==g&q*GbUk6=%{WJ>My>eGoeF-bwfkNgJZ!>Y-vUVR~ge>5zE~C>XrpqKr%8B!be|Hbt7y zz|H}>SZV)E@1?RSUrgh*NLyDYGm&-iGM|v(G4Il@AaL>stHnj*SPZT$Uhy2{wQzi1 zn2A5~m$t_>G8!Wt@5QZUx0ynRWVc;Ym#PJK6cO zRzT;%Z`-NNJ|dFju!;rs5A=Qcvqg;^>Ot0N1ZuGKb~^L85Ckn$%TENw6eY}e02&x(OJNE&?di0#vgJSeM$-jmdQ)K zqGKtq8^XxJOX8S!X$1<>-%sR)=Z+0(p(bGJz3G7y*-mR#gkH7nZRxXmt zYf>sJ_S4FxxLAO6N59hIQfBF2K>*wK2uy1jxTX!ytBj=OUFn77un>NwfrD>vp?wMh z4(%=N)pGL@HaCtnsBgDENh6KLn|if~nv&9r=DC(7!jbR%Dgvc%fr@N|O|4u+j;uNv z%%kVG(5eq^ucc2el{eBwzPR91a%f)Y7T)^2{j2QoM_jEmpXgFvNHcQf2U=cW82P&& z|CFU|;?SbACNX%hq{AdduQQDadL4}rW0i>~Mw)#F-ZL0jhPXBa$@U&zTEKR)Gr{C* zX?B`xCqQhAotsR|Unk{y5LS4g(pMV4I~d>9G*`qby)!?B%47l$e2srjE}c;5ZGBD| zy-aq-@y6EJs=?4|4KVq=Wpd#JOF05EGZzd-Op;W+2^qr#MZ>HHq){eF(e)6_iJwD@ zaTWzMnma+auJ^zF9!>I^Ey^6mhA~6K>==eA4P%%FV;nhWGU>v=DH8?bKD$lzj98Qd zj6-LLRW3f3y@R)^`qYEo3`OF^e-q=da+)>(z8Xr5^{qGfI37N87(k3o8}CHw0GadL z5Dc=_iQ*WOY1`2bix_fTV|z_PsXmj#OD5nRMtG_LgtBcT8b^nCr-57}{gpU^hhy+~ zS~14b&~E(X6AYKab&_R!ck8Wf<^&v+G3CdJn;{a0F~Vtl9%GW|{-kO*HObZM)0m&< z8pT@Aw^}S9FF1_4U;yqn=q)_~mB|YxeWx5`^})haGsC3s=g+s}k*cSe{e`+s-YRd7 z2Mv?(cI&WN!25DfLoIE3j_Y}LAH2YOst0e&DXuk;q%n~>{gOuVSRc4Yrh1T#DGa-Y zBZ$s(;f4ARQZEgdSde+l_=^d(_3g*-Fo1#K3IokGAwu*zbv&Rqwf%vzaE7twMkgI; zr=dS%C2%&L?X?DH$}oNKv45hG|85bCJiIf;x%^YchCPT)b79B@B|WvH zbC?^&E0BJyw->p>a2F&0rJ*X|x#3h)7__nSFvs|SN14z2I(S7wV;4OgqvZVsMkMm} zQ6G1JP*Du@&>2Y(o_gsPW1YGw^Ecj~Lz6)~F^5=e>+9gXP8+=eevIVuJN05W32pld zLDln7CjbBfj6>h>%AS5gtY0aP0bBfCl)A)YE`1t2mnRw|l*da9c>TmWSC`CSL|x|q z;hT&jC23D{|AR+N3z^gt;^qnifS#EIdB>1(iENBC(;CRZ+XWxD zaCz2H9vVdS_SOieCv=4?vh90Nr?IL#!yKMBHOog)K-VawERA5a zl9w9GUHs7yWWS`*_ZUO=sr1qy2+v&@R2NQB#|;<9(2d5dHDr}KfAa<%2rRv5yPnw{ zbm5RYiqMgcndGU9WO{YG_-5#o4?p}Mbfsy=_^$W+KC#wwh6Qy}hbnwom_x=5{}H_l zAAuNMHt22*uGMAgPK~8{N?%gP>@cBh!%CgQBaOqq0HhDE)9U10N&S5S;`c@M8| zjkp@uoX2%KJ?IRK*XRm$cn4XwZ%jBB&PS7W%NVK+L$dbzE%B$9&~n&0(Cz#y`T~{Oe!;wpnCXLpNT$I@Pj_)2$J4nLcKdzGazECPVZE1N=E6mS1Dz zEQ-R*we`*BjI{0^7{e%Q{P7Fwy@6M`i!4jD6L%tLWcQ=Na&`evQ@nWZE-f_QdVDVn zqRQ9K=1#N9xgH<9_c(^!L3F+gA)|Qxej9`BqsNcY|Fug)FMILcWrx9Uzj@i*y|>hS z`mM*r)Lvwl176eY&iK3k{ePQXV*hXL5Z(j)mghOxl74udL$#L{mt)YhPg6&^L*eoL zht10C6?BZAzMX^*QPypflN>bYftt2O!2|EvydhK4%r!?QQlbB1ho$i^34S~!=$)aaf&hW7~}E@ z-+KN}65=L18<0n1+q;|1FgfZ}J=b?w@Yh3K4|fgr%B~A-2jG>s<;ZyVEP{O&hbE!* zApNL$+8zzedikFnsEjZp>~Mr8MqyNHL2SF0g65b~o1EHiw3yxj;-bv$ z?I0fVhsgUF8vZHhZ^O;9yTJjOs4w??S){I(G=; zv!RR}E7|H@P_bXt=)a5Jcbww5z&O1$Odxe*zyoPR|}Tt1jZ@OWx9(D?k0Km>}mRJLn=+uKGXw-)Ja$` z=#X)DkKt-+_o4QYK^Bf2b2;bl=ic(^8d0|>NVGWxOG?!(!vneMgp|b8J_8CFsa@S< zndmrZ;S&`Nz@8Y$sW2y+H!vTz@Ry;68O+SDD&2t~-*jGxPNnghiW@Q{6Q8h^^mLll z1)xy;2!nJoB_T+-&-)gB68O%i@7@)J{C5y)r)ix?7Gn!#q^`+lCUFc1o|7)PIlyb_ zLitq=wTPFzhH3M;jgASB+y7va+fo=gfieRvJ3N%hEzdH66&9s1WUjn4fWU;Nrqxr! zfhbU67^|+vi^elw>jC zJmT+ra80R#XzN?3!WYw!g>*Oakp=5SV%nweMIo6fFuW2!9#IH&;8|ptB&K~wB;E;7dXOe?T7Fx%7}Wzr zIWB{VyoaikZ8<XCWqUxCf?n0Wn~S zN7W&^!TQMm;7%fbV~jAF@+2+EDuBSD<7f0m`L+x7f&3RgJ|v9J2@pX^!^-dENbiO4 zs(o&jpk+qm`O6;@r1HMyBd+3N1=?5YBCsKsMt|z0F0I6*v&?QAE?E_WyqA_qCq3X{ zetf{aeCFQPWi1TVlkh%fh)&?U%@Mf_tl|fRNzQ6FmL-gmUv*S=#Xw)7%U{2{`X>ZY zNBb1q#jtd^1zUG%CY^zsYhi=0d7w??0rHoo;J)QWbf&qar=X=#vPv&f13ol#au;|t zxmP|4Kl!TuQfn7|3Kr=ZWZ2J%lb?i*GLE&7)FbIDd_z8qPcro~7xE`1SPJ!%b|PjT zzD@7*LN+&~Z`-_WBeL4!n55*VZqmX!`Cfxw#yapV?<~`2yuUGokaY0I^iVE~+@TZY zrT>s7zOFP0os~=)!6`~UFh+9m!k;X#bo&W#P2N;QZu05!NuKd%QGxBdENW06c%1Qr z&um*j$+*R|DNm@#o0*e4Kmtn+rFzmvVw=R*)>8yp@>Typ$-V%NnA+A&>{l@NHq|Cg zAx}~O7+Qryc|;Q1gh^ALdP3q0gTKgCXrt_sMo_{qcZ+{u3d~2zU)TKiL8S|BO169? zp^u{XeD>{?;zBa%VB1F%BmFw}vm#bHY8;;m|{P>;Z6n z9IiNqr`qw>HrNc|iFwqFVjv$Jq0d2Pt@j~bUy}nI-ZjJ|lZoOW6MpOFV#q!RrFrI$ z2jU*$$vw@s?|XO$9biOtGH;wtjo;&VDGBcq9U;OXIM_y!GN>)ULL zWfF5|c`n{t>LQKW;xajgN8!XU6K>+oGSStbQF=?mx87G8LG&!HZN4sGXy_Va;w~SB z(c2!dq__7KZ9yZzFk>kBA^$b{8XxlxIJ%3gpAN^7D=|>+l|K{KRCvb6+2%Bho;fCe5lUM8PqK7>2rN#hh z;TC0$p4Q*PBb5Ww85RYrd1Ti5+20tdM9*pU-xcL^5l`zryrKKyCq;yI%A9>$KSmdG z>S67;sh2cY#IwF)5VGz)GTjNx9mHI*>Ij_M@dD0;5BBbpvx-Ipz z7T}FxJ35DF8io6iE4}#C>+i;TVhlj_g2v=)4B3uR4M(EmMh}MaE4;#PoEISzwq*~p zi!1@@3S<50);b5~E-31xN5+6>n3oL*@XZuSIC8P1-E-k zCyn7w$S*M5Twr{0!7TE`mi2x-A@w#l-gys(o-+(46LSx!zo8p2ULZ5$80uVL=o@4^ z_vAe15oj<+Hkr_izkdU45%1C`V?uER7vodETHVI$7q3)cHiWm(i|>{Yo;`hY!P|Q3 z;e5D>p`7qZo+;n~h=$0}$e|(Jrq6GT*^}sxNsQ^cM}#)PASy1_UHzb;*KjK8OyzA3 zL*^-l1rIQEVa;v$dg(!y=JCUa&9k3;*1THZCfy`nt>ew3hYv6&u2lPwHxSB$H`PHJ zYAnMY4>CvNm)^>@$7x_HH*Yt$VpNMEobug?v9k7tLpNt4GwuMz$KmntqAYREG-vHjD|&MscDg!p)AXbW^i zj5*BSF}!H7w1e9q^%0 z{_zEE#Jl1l`;2zOePEF00=+iSU!J|T5yODk%G7Hbxa@ag3`6FP5AI>fdhI8^aSVgi zH@T2$k;<_i+`I5c9A{WmbKykKe@qVfc5zu=yHKo$tf3F2)f5YYAAkH|^yIUbYYA(x zLtO0+a!jM4@95{82SauAaG2wpW^8B~KCwWE&eo{)-7wqcNcsf6GrEf|JN96OaEs6gWkGE zrw$^J9#q@chb&{DvoF_B{p7v(3GuOltdNHHOOF?_0MQ-)AFwNFiUSFESTwyL#GCzG z>Pzfp8zu-qgJlJc&8+{FO?7`lJ*(I?I4zxpM*vJO4;K*b^Sq4=L-?9{+~ zOJ^vog@r{H8Rpa8PLP!WW3*!(4mvKvvDl)%dCLMuXQ-F+0G9F0e;au2cp1Q;>$xE=-1Gs@zEM8ANFdD| zt85dJ4$($$v*@YLhX%(O={3GjVcd4EI>fk2K3Z1Kt^Gk*ho><+_kb}@THA|y!^K(0 z9fsFY@7cH6uBMPz<6j$2MLns^kCAT+nVBI3n2rke8|psWi#uE%J-kmD9)^pTHag1% zynPmUJj-Nq3eR}(xLH(j1z)vH)i~NRF zF?`Pv>2=DWMZ*i)dB<1YWQ z8^=Bq`bMG36Y@Lua#124^~xge)nCIM7FpP7aiM{lbm~v_zJ~ISA>QDH3w^6A-z4u6 z3p+L}M95Abj`5o`;(>t~ir2l(LA<~w6CkCxop3V(Aq@kk z;3xp!frpcKIw6&ZgD|<`TMhH($wZ$)gG~0R10U1HcoO*HBOY>-B)+2)i|2v(%d3As^EwMZPON zFMLJ=O;*au0tb)FQ?$#cf^Do@jZQHlTbd)F&|h4kZ{($g8FZl^9jw5E?*P{*ARBCb zy7&So^-+d+rc&_WKI`}H15KD$wTBd?={wXTmO<*nksu$V$oP5lJZfqVrt>?fwx`-WWb0y`F$&1wg+$w z$x~OFJrgY?)zOgr;TKw`}!=bQC6OsIS<_XFtKgwkD3^ z&L?CePxz5XEvJ(pOaWYJQSb_OA*M{%C-1mSn46SXc~c+iA`QY@akNrN z!*_@RB6-q61OFJ58MJj5kP_cNiK_0=j%xc zy9z|XcZC%f-1}aRdL9@jJd) zhU1Ok2U?aQ9ZQE-IYJ6$#lgP5Dnz;9DMMPBEco)1{@qmIoryL5jD3Ff5h~M{+5bF&K@`<3D2@Crxt9LGv&Z z7`Ix^c%U5vWJ7>VGr4zS=3!O8e74FUnl0iOc2L?gr_rua=nH$ z()DSiU{bPyaozaR8dmfkJSQ9qV4GnkWAPl~nR$-!<{H3&W#8pQMz3A#a>Rtk$;c=Z z{zE){G?I_cPVsJpat(`up_)mBvZBYC^}QzF>dHoQmvr+?0tSx*7sAjx!lZ<$DRr<-BJ1X7^5dja!;1CNSw=&VqM>t$UQx#SHMZ-F zuqo>RuOVYpCJrv|65<4yK*pfP#GJHx&O4FR1IX?5S1}OL!YGs42PdzT&%`+A*`alX zt{ObfukdP)zRwiOmX* z8d%SnkPZ?)WP!z#fBB<7Y5w|0f7bluAAZ?vonRo-hy>|#KqRIkb$2HDCHyjAo&jN-pmH+4&*3c?Gu)GSbqONz2WL?mgl-oCeahkfu@uH+F^%2(1$xhQh z_d#=g@P@-Rw=)@bca#YdGL*8%9(=^qi5(#rK6+Sia;{}regYR7smn{xsGrErv+RUK zXV^y7zX_29=o^h|;B&l3xF#mVOknS>|P97 znF5M5cwQoN>Qfi6G<;w1ZLHV}gdz2$z1ki%P0Tl~=*?w*l0;~IxC zHn<4pp|=`w)IXN5QSX%YytcWC!3u)|?Z$g$U(eG2$qt@*yjb--Kcc0$p|-?JW2y#a<8S}t7r$!$;;;T3L+UoV z#vL^-fUULF>e# zhnMPV`zS6VgDx(3kg10Ip^nD?ieaw)z2aK>y145iq#ms5juZHQjlozE9Ai;My=A^F zbdv^IcN1C91-z>7FR@63oM>>>vs`2F*49?yI)@hE{qct%H2>j0|95o%dGptQ{wK&f zF`MC^d_6kaYQ9C-FOBY7EMPo;{-Sw^QS}rLRl_8x@V|cg98erqy0+1L_76Y9C`+FW z9uF~i=b%H{zw)X+pP8R;UahP(Pu_nY_{1(ouN~0;>M5&iDjOM3Ls^a78e=_7`{NJZ zYgX}|H@uNNJ!Z$)*Blh-Zl*iPuLf_u-VG^Zx_x$8jS-XCLt#C*dY7?)-p%V6)`e+V z88jfTE*>3WgkB_Mk%qnjLc-XWcvz!5dZyTMqp@!XZ|7SU?AP%uK0u$=;fxq{)ltaB z6}G5r!*Zda^{CdEd`Rdv`KF=J{>y&$d*AsEUb}AzO}7HyltV(S=?xCe>@%-vyDru^ z_IZoWGlsjNOx#W3;+7-NCFrP5v47up7aB5E2QN}KwA|Cne}Dxy@?TSKV-94ZTyQt!2jQ2z(i?q| z^DWfHhsq*5x7Jo(W(VOE-r>rSVSkhn#|negCq{c@z_E}Emi7%Unx7z#F827*<6a)R zOC`?Q^yx0nIakok$<%omwqri57{R)>wYSfX0z0P=%F{ z7QACL$9pP48V{!|qq!X-4J~Dw)`l)Vzyr2W>i|YOC!hJ}TL*12O8gyMLGVn3g;xx` zy{WBU!SR}HeHzH2g)rI67lA6H9Plz)0XLLfJY2d}vX17U-YUQ{+*>}hNFA%58a#Ln zdNP{28rM=4XH42Fx3YmfY2ZBh0L~!F0cv@llaACeo+7v)LmEeQPL?fRz@%3=y+s(4 zAEQnt;mShrmw(ou-|&SpyR?+ANe66sD6$xlq+`-sp5<6y;D<-ZPvij@@-@Z`WX}9j zg344D;fW$of-hG=nfuV*H{caqeJAZQfpPFBJaMKj<_>R_QR>L!$e?g-$b$2sgK^>c z03(c^jw!cAAaoRFWLjR3+%gHt0jA!N*BsP2H^SvTNpvd>=f)+cw3$ zKf+cwzFRCy;(8kX0$jShL!#wF8GR(YRFrn)q0SCdg=>cJBnH$lh-ZFyQzrhj002M$ zNklZ2!#1%hbcI6M4c@(KjFT$o6aL zk;di=>)z5@S>QV9xuO8B#g%XMg}f0a-^rd=RoBp2xsm3ndkaQ>kwqO<@PH#+^{lc2 z>=-lUd0cWOKy|!=QC3NiZ@%7@?=x|IC+F%Ht3!QE9ixHu5?}kE)DzqyKLsxsCXd`A zoy1R_EmFFZKl+*b@W{lL+15GCk^|;d_S<}xWxb?V+A{B2ObSO@M$$70k4`Zou=%n~ z$>67C$iBw%d|(1W+fRv~b%s7&KAD$yF%Y23HH_N&;}wnUI{^%DEDHR}tE}aN9*okI zGSVLkv(+==T75c6QXZ2u^}El(BmG_$%E>OG{-cl7;FNY@-M}RkOC5z%<%J)S0m@T9 zg)Z`t`^d2As4tTNbwzY{s0MW$lSl{TD6*kWVql!{1KH$(pKz-P4-DEPW1Z^9bN~Qd z6H%|gwk%+T))8D^DmSF0kjRPpn|Cp02d=!9$5bwTuKX!k6Tei|_Qy|+H7E;yxPZ|M zzoL&VOL~cl^-?c_Q{>op=~v_xwc$28#{SOwi*INMeDXzy+FsRb!VrNjT@e>6oHD^7 zb?1L@vwl8M*KqAvC1ID4qQDoHKgf#Xo1$+QK6OIYqhnHLAObAK<-5G#tf7C^hH@1T1|L3o)f68mlXdhT`jlKSur8tf3C@V8ID;=oPikgcy6u%|IpCj7?D zAX~GYL}56fV_c=pjSKUIyA{?e=^< z?+mkXjKAcG4*fH6!HL7^0`M+is0zIf=L3 z5Jus*gnKySKr}t&HwhsyGC^!zCJBe9`^~Fg|E8I}yTCIH1k`bzZSt4UbY&ecc|yjV zVLTWb#A6kH>Ctz1MYs&g)uV5gL(QJ9Z8Cwpz@W^;5U+6!dg`8>Q$)IweLl~Dr zFZoeY1*vIET^Sj5(nS3@+aXw=mjC1|uJ=k1rKAstBzW1k}H1FS^W7{w=-s9k}Uw+a2 z__Hq=%HX-=k4YFC_g{C>{L>(sX$fQ4doej<2ZEi4bUTm&~ z1_L8_391d?LI2<>Kl4Ku z;pCk$)KAD0Be(5~_i1;?o1w2Ff8^AIQp1miwSA20FILu?Uwr;676CCnvKT4-wh4Ko z4iE>8RQ6ehk5F%jr}a@s+V7-4RLbb0ckwRO%T%4=E*8fC8kMR&BICeIPt5L6`$y%< zkL|-TnEbG|wh!^mf;PNVhsH_FHp_fmeXh5^hDHz8+=WkudNGgBtaoBkQ%}ca_AM@6 zXqZ=qTsVwj8(Fjuv94t}f?MF6z8@NEwAa8D<2}#!S#W)DcQMa)_v*cJHbviagbdoZ z7SB8%gWXkB#oa>2qj!F2tYrIy_ntgz=Gkp^a>n8U?dKZjK=mQb7kMhVoW#JWeh}UU z3m9)P@@iZP%D!dRmZ^t0;B)oUF4bmo_=iU-co3i z#Uzc2E|$nJ^;+7pI|t}LC_B6b$Pyuk^uC|P5Ni9|;m}#-{)d0~U5xc-gl@ac;?eU} zVkK`MHnS``+a?LPLr-60Z)v(udvT$@rub({+zcgAJeoZ>BuC1gU z&(3m)IBjT;1%}-ny_*M{`}glPGc0`l_USYFJ3O>c#hV>Wz@yq($RG89Lt<0gj~X)R z4Lj3}_q?aEADVpYTOX^WVvu$Blk#HUHi6+ki_S1jb56YpWXxOLX_%gS2rEqTFVoL@#P}dv>dU+sl{Fn!C&Q!HK?yKFlyq@_UQ)w%w`8 zJE?)AyH^x@@*Puz#C{r{yeAd@+6ZI29aj95C<%L-?(d|9o%W~av*6vB6BFNS||WU zWdyYJ5C||*VZOa1PmCy}^G7(TpLg)7mj$02cW@xHd2iiP4QU}Az?*w{Abz!TLbF@9 ztUR_TF@%C`I%pMIBeyY*hfl(+^kq0BQ)pq%qGRDl8u7IVGuRHC_@>P(H#`NG7*Sj# zQ4Yg5lY_IoU@+aq-DmlP76tO%&Kco^NIVzyB!vDHo4SZ^Wf3W`@hRM;J`B-YT)amF zx-^RP3Qwg^z0h;Er?NMiTUh`Z0oy*5Md=l}D!FJuRXR!vU3f?_DO375xr;1Xj#w$1 zsepIN2s(+uQTUC{P}XdpHmnoRBairNX2kuQaePNacV>`fFsk7E!Gdc;%(^P6f@HPq@J| zYDJrId!IA`Z*d6xDo5bbRA|LnF?Ql1ASV{m3}~X8RD~N*TRgmn1})4g-v2Fcl1urv zZy^tLlGf2jlR230|a+ zE>!2(=?k{ijq=VIos~~2KT5{C$*a1E5DBKXPGX)#6m_+QsiVYM zeo$uh59$Zg)BZBHlE%O%O(vVrpAu0MJnPdyPd%hJx7|=v-upnKQfNZ{>Q}`ll>q>0 z+P0@GMCK?XZA_W6OyJ~Ro^V^~$p=ruY4PMo4q2Tns#q?{tDg9jkI3;6R>Aj~lW+41 z!H<07NgBWM zIUY&p&~*gEvUPDH@nYp|^Xa$WZ-$`r1RiF0?u<33Z?`i6ULq_(jI!`iW7ZIpFlB#K zuTbFY`DR>958r(I>NOKGjM8@;_`8L83>c?)?Xhk7fW!6J_MLGP{5i$AJcwmcW1EBT z8;o$W#CA7;vG>i&>t+lO_UFHSnu+5mp)H(bNJHCb!cj1eWD3~~6XxZHkPEisrNtR= zImWAMtXbV(kLRUcWKz<2y;B?}s*rE81>9$YlyyPbdPa>+t9OA7K4m1rn~FM&U}WDW z=Ia&*jxt-o*n$TX=`}udj8n~|NMo6vj~a82IowkZ%o!%Mzkc!Noq;*+occKlGXD`1 z_7M)9w0Zf?y6eTL=cooX`7^~tcZ+!GHFls$sE=spi8%~kGZqDFdG1!;Oxn_ZoGexR zbFBA~87FWF(?Ho8yllg6)xKeur(2fqEiOe?G~!vF`YbvDdL3a59)(vL|1|&(!Q&3N ze*4jK^Yt4%yl|G$&@{}rL=Vf*mNGPUG#C5Lrw^u@zx><3YCd=}%LEYPEcG~LOaB$# z&W9X6cZ~5z72~8#+Kk|#CBDkEi=2^bX(zELM;L}WxWPE^i?3J7?_pyYJ!zL3emWQ| zd%-uJ!lVbGgoeAQp(-@kAjjd*C*&Pee2R$yoja_Uk(aMXi7Z|ws zr6*o3gaMwkF_M!{Z!Hbqw>O<;goE#HX$OX8>7Q6eB~$JR@=s`%o_-g9FvcD4mNz%` zH&$x+js8~+MEj&j-XZf`Mh09!ihglU=W}FF9enVTg|O#eHKQ1i2j?gcywnl57`HuC zPn^}8J$eF4i!m1tG$MlvG?xy_ksgL118n*^C*bNm-$CodGbgF<^yJ6#9fOMpbN0wP z;)X|lphFgnN$One(tBv$hvC)(^OWB~j2-8+^Mj|anumY(XQ6|}_V6A#88qIyixN=~ z;0;cC>z^NNvD0L>`SABXY2LhCwFK~yJLuV@!Fw9M4UOW}`f|IOhR1`-$<+{tDegBH z)X|d%_KwdnxDcP6g(SVhCon?zad(N^+&dUmT~s>85U$)xqM2!o9q3N)ZR?Zhf=3TH zFm!sR`GUGkQ>PCeJ#4=G#x3##F-pp}UwrX(cxFuN5%jR_RK9G8_jDs%P74 zNV>-7Z$vhmmWN*!Psz9Qu+ZQIt*Xp`<2yG zb{I^?h`-GO@isp*=vQU&9-$TNk8sXy{^l?Ly!rgguL4hR*Kd$5<>u>WubQu4y+M9F zu<)#TMc?)zF~%QiAm-rFdGvr@>h@C$#2;Vdy|JRDfpk(n{pe-v$C$|);>2GgpJY}C zjdEzVahk7LB+VFur&*)`MndjDLw83U5sGUZgk2zULBToM;{0^8^L(AY13d^pV|1U< z&m=q>6j@$cZZJh@j8~68f3=Fdx=6(M#@$$?Kf*gViyd?k9@fcM^P0tlLuAH2*f1zt^i%TL z@|-`o8_hEqJb-i;gS8($zvCUvA<1vvtTuBqi_}A}@`LO$v0cpLRjt>#3$Mzup6BX6 zFbBdSbIQ`)`^(gyMMV~-?CWMQ^6zno^&XztrN3DW zWRdsP%1XSa@84T)R##sGM;vV%4d! z-o%ERSzCJ@e#n<2jL425jQLK-#NqA0AialBo}6=FGP>7v?!?nOJYMdw=+(-r7{cf9 zI=4H=lELMIL8K-Ezet1^Z{&*@p+J29QJIOI#E)!~9R!y>{p zeW*K4`q3BC$hoC$U)rjhT=qeP(ItepQdZE>&K0j2$B4U59rjX&#Zf0q19g+vrgukU z7Bp#>9%#I_`Yhu;`{ZNzvdeCs)i-YvxRgO2;l|i& z+2Y`H{xbvje_dyw;PlNzXhjAF@hETt%KZ)DQ}T@0;=yQ$FK+0O5*Ed8w6YYz`}%l97Ax$P^GyK`~zUP#2V3;X$X)AS-2P#Eei#GihVpiZ8H*cd*hrJ7{X@ zs%E&oMtB9_hvzZaxR@ay5soN8%i}gN1jPx#=*DYJI(J0^SgkM$v}+$v`pzN4F08avEI3F z(GxuN6#Fui@iOvOG!jd_kE^ac%ftj;z~dObGAV%T6dKT z_YownybkZ=E4+#U9C=HfZ9~El53}%>X5zEd*Ju2Y&zzY|E3ctV$%(RW-J&Ppub7B% z*dq*nLf-I5S>XLUc)~3Ch@qYAV&Zl9XHszrOfLBN#x#O+kR|ToCiTQiIdmIi@U1K@ zF0^YPMNTazgsJk#$c4%tDALICDVS0UKj|bQ)-xqpt7;RrHDB3A3Z$@{;ESJww8B@t zP!A=Kyr|UTlR8LO^bXxu=#_E=LOJzGDFs8Br9Fi^fXPHzeW0v?y91$=>*%iKap(^G zq8B+eVmasv??@&zakV%rFJ63!W-ofv`!f9cz(U*iKoq7j;ABQfRi_p|-U>W*mS!yt z)u(|OP|BM%1(uJ2Vy5s=JjH`Dqm$Z6SLEM>fh`XNRJ1CLxVFzzUxWsvX!|nTeqc*O z@}zAm(;^}5^0uW7cV+MsLDD+G_dE2laP*0o=e2xsk}s`FF5;z0`7zSUW7A{6NV`K9 zD`U~2;GaGr^#@|)1}v2?@wRUEqY6G{1&)1{Fc|>JbLBQNhQ3QXk7gh(15N1~-QlSSaN1QYUk;@ikzlQ0AD|NOy!t(h7hlpfS@<85*uQ;iD;H|QddN2v-)40I1@Kw5! zR?$jb+R_~KS6q2yvR4td<>z@!5i?kUD`H$!ppp~4BmBd;FWTma=YnBoxFl+;gmcT zOpX5P*}zk-rHd)eZ+W~@Hd99TopKN~c*%FJ$)-N^e?-cDGP;;F)NvON=^eap3<08H z)B&&hNN+G{JqTa#EzQJ(Z3hFA6TlDd>pjG_PbMu+ROPQ~rw>or6AVily^fCXD&^Vw z`ZfdlgXX=DAAxTfeav%p&d3o%x^y05(yJ^PBXeMmvY41Ct*3aLU1GdCCid$%&j*J! zrs+L(jyK$TJh05C_pfyDP)}u633WW~L7W~;X*_Z#{?izRkB>PVl<*k`#`|@9Z)nfr z0N!>Qz7FwN*GujaFUXgxZ%|VM&0Qv(iw~$X`LEnA?}Qw~#xax6W4!E*BkI^~$RRY6CAmTdb3O1r9mX>q;Yl1{Cq;DOl*0O8Wqg9_GEh>FgMo{eEh+BtSYNPSNBnm!1mF8j+q?q8QvnaB`*s#(#bJ=)(D}CUcLOI zp0rMMTHnAZ22HkpE8Fsm+?P!D&oEqF;)T~UEx8~rJTdmR`c7l{jk^`nu6O`WdV%+F z;8pr@@uWYH%ketbm_%RH8{>p?K=b2O#&~UD_8#}>;~0%G0u3%x4l&6sGo^{E%q5S9 zdD^!?3t{vCua7n)Pc?MwbaaEk6e2aNpMTZ7dGYtn@BQFN;)%zcu@`;rLRlX?i=hUYj!}$5MPUZN z80jzyOmMJS@uLj*E{^mu$G)MD+uGblRn=L8e9!|;EoXw`b+p1 zXg`8(mTrcdvfrB^ub%B02O#d6%Fm-CY27@x4${V5M~40IP)2p8{gaM2=_?84B7HSX z`>BO;$qBCfDXX?%jd|)JKQ`%#zbj+jKYGr1>@IE?GGrQ~Uc&F7TcyDSp1HNyvDbW`1tH6$8Ag6IXd3fbdGheY z#}6N~)9G#6)-)kQ922Tf&0v@banM+wkP+Z#d~z2-jE`>Hl4kNrL!M&@>7hQCD-66IJgJS?{z2ICQ#$K`dwxEN;e3$xfLAv4od8b_)5e|lW4SIo01!sr zHcy{@lkgjQIIA0!^)VKw?H_z*@2NMn{ebDGPHyww+!RRET}`= zIp_h#@yf)$1Xy+>!u$%$9amjpWX0=&+ z^$q=*^Jfl}>g4uV1GqcOaXF+jg_J$PbU&v+Rel`$8@Qx;)*95<`W zvM@+}J^0pz*eiB7D9hBQS)6$UJ+Qy>*BNsl#F&+L;SVegPz!RxQ3Eru<4!Cy%+)hc zWhr&2SPuneB!-aOVo)w4fK3k}BAbVy038?{=mHFXqGo*u1Cz*vJoi(EjP|)&3j*hL z{Uezjg@Y&msFZ^kL#)t2vGlZ}8P!y0p>l%Yi3JW$N!!vWiwhpYnVJC)M#!8Rp;De!KMhpkEq*p?>&0~$ zUNi&%yBjJ7WhN}d20UGb392cR?uwi~D}$SzTVh`OZ8BiI^EmdIb*8TMO&KsgybJw` z22>EOO&Wx$Cc}2l*59;zbKNW5G!BLL)Is61L={cl`!_ zly>g6a+nGMfoxII5hAep~$FoMBxCW z$RKZnFZe44;SpcC25$zxF?a}1nt`J-AKA0f+tF#!$n%ntk|S^yAUNbN?_2oLUl}Sk zlpOm;qfd;^)|R6aPwC?_o5SB&bejI=qy&%t}rr%d2`Yh5fHcydj; zal>B~T83b0PWraEypv|~Cwax8OOMLgW0(@2m3)h<<%yg1<(>87FN+ooU>qR8Fj-Of zSjW)cv|=U(Uds$%S$DF;Sns=_i!AsaoT_dmwYg9Fs-gLVu5coQ62kJ*VhgVC0@rWf z`H?P^LE6Z_bOf#{#COrX0QR;^S0@wJtK?QaC@rdkC<9Z~!?L|s#)K7_5)z$7wlPPC z^U4K5@v-iLhTo}6bPjN(qivCodL_Cty3=>$Q%0RcnobK8A4N*M#9R3gWiEn4uGBS_ z1@GjExcR{J(TzNl4idfkrh1pW!T~l@F22Q^bQBPH(#*P6?)J(uctScUVcp0WoMPbd z8PHr@^Anu;uD+u1Q=bP#-iG{1S#`Dk-TtNYE-$4MxXUx~iGcw4S=>x|;gZH=>C<>; zUBk;@Y35wS016qw1CaA<i@J>GRHuMO-^(;EbzCy1g%OahUD($*@GxB5oorqb2x{CYgKVbqn734|} zR@+cyP+1li$wS|jT{Nb)twE!b7ZcDXxewyJ4o;Q_eKZQ#w9*pNK73DJ$}Jt?74QUU z4F$)KpXiL1#y%-NweO`tD?8oi60mBY#MHml`-*^Y@&T_f9V)dnP5U5C=oc1o6Qqld zl}!vplE0v5rKGgaETEMwE0CJ-48m!^-tf>)JdVaNDr@9)GT~o`iAlo9&@b;1c0@0( zrG?=bnT?-1h;dV6j$Si@J=LI%_w|v3YbHQhkB5Iw&PD<2!PIyOXlL<#c-6wz>f>IGDlo4kj&hz{Y2L29on871|gueR) z?A>DGsJHY9Jah+uG0w9Xh#^uL)rh1hYFH@GMvNoWBk~PgC$t)VCNKgz>31@#%sS!L zXy`WU?X!LOd&(pk9zX{t$-)?&>||n`i9Grbbl6QW5>NVm(x^AB%Mdced6RfbPB#DQ zZ+_7H;0K>zOu{n|gXakg6TKMo5(0yM(Xur#Xb9Y9^5>*?09~{EV6Iu;)4PuL&pVB7 zHWBHpVNcz1j$vFCpS3%qG^*TVq$m3g zTgruVuy4813TZv+D8r^0mSg~rwq9hUmot!hGPx$B`i?Z^pdn~?L@ZB*8 zIE|Qh)B^*;6$a&R|HYp+6HE7rH@!z093*-8mIE~L7Do;T$Jt3?|6t#NJfyDbeCHpO zb&FxYA)JH#?iEIRy}~teE1cEOz%z_e*LaE#V$ieQIOa6Gj0*zdmOellgL4mj%B~Fb zj1BgJ12&z2_cC8J-}w<9=gPma*i%=~i;k41=rZI<&vy;Vo@Ld??i2C3B2K(LhYK3d zfBUStq@MQ{?g3XYz$cxD<_%*=7{KFCkm@?aqPXkB1&>p7*CO7g?z*zg>OpHA-k^{4 z6dfHo!C1_~3H(Yk39I$Hty~Qg=P=$zb6kLHUI7Je~e)VZ&vuE zC*}@@w<$c#=Lkn(Y-!VKoQ`SV{Iq8mClPDJ)_DKt%^74c#LeXNofs93nIHN<8{50- zj7NUT!ky#noxS~d>pEPq-NbN4y=xqX%t6mf=n~IC+K$Fq>mbbZUl0%ur!D9qK8h?) zVAQgIP!AZB-8QRN^o^m7DBCuu&c39tqFCrs?Z?Hc;X(8Zb<>!OK)`Fq2Gw?r)2|_R z70>1m-hUL`q#-c7i@--ZLw24^bJ~=B7IoJ1*&Qc_j!8X*&4mk;L&^zlOI=|vtU-Ki zavW7oykiyt%`ej`S)d@yRWkh-yE=CVwjk&@ztW3vxQy;FUaRe`1WN>lAg$Vmi4QY;x z(pO^eaxSMnyFkzDw4jH!hRZ3+H1_icPaZXIHh-4-dX|XeUp;CKU9pGGeavC8wrxMk zf&AEI750jL#NWfpPi1BP-o0gHlQ#NdB}U>sN1ECx?+~Q z7%pusGl$Qz2w-Psf4avaka5!im+}S&pgD`i8ZVXc0ovy{9>z;7K3?%G=Kvrd!x)Vn z8%;Alke}N->-5p=uEV%FgOT0wmxftG6}c10HfyL9z4SE}yQn?O&H<@f?S!^X+j9{{ z9n;0t9ahhtJH*db%g>Ysl0b9S-ZrJ*$JoZHADrk(|`ys(6!eWw<_sibX7 z9=dQBos2FR8fK>jeY3jf62}MiQRGnBhEI+cj!zEbS--&IfCm+e=MJkQ8d~*&m!^a6 zN`n6C#r3zurH77#;66>CxldYkhZ3kRb#cXmRUJDTzui9#(o6U5vKXLeKl+IFJWu!> z4Z$b$Yqd+Gyz13q((QmTt)cbipoR2Y$Dtx^+dS0CvFGqahl9&G2ZwsNJD?83B_t0E z94<}{_uKDi$k(HtFo4MZ2>HdqcBJRH{fT21cUO=YT-XUl8&szBR?ip2-OkZeSAPz$edbN!-O4kclM|M1JB`P2t%} zEF6?ZdEJ&#r3^;oMUkenlRCoUE3h3XF^I{8l8Q&UnO5T!X<-G)i&@F)^OWpB(h090 zJ0bE=ei_iK(-LL|Lcx!Y#dIoQvoKIGObluP_$m?yWo{49L&4V$qNsyL+DycN>9sJe zqlZrBIfE(b0i+l+icB?_w4R}x__Px{t~B5SOi+ANKj6wp>A~=tZxyzyLUe#Uj|YDe zTf9w=bTBm^>TNy+o0dib%W7%oJs*V@gyF4an7ZhYL4x*l!L8z>YHtHDHq*Iq) zqKA+IEOn1D3&BLn3I=Z^gXqz-R(a4UsPpY$GUba-a3G1jjdcLGG7M0#@RDDE4m1R6 z9%zV+2wqP|zk$Ga%L8CkJ$aLfI=8_Gno$45JC^p+8u<7Qk&Z#BtGhG^L4=>W5O1CNWMx zm&lOsth>3Df6CywUjZOPxt1^WO!`)RP2nfBNPg)C1w@5Us!-Xz3w3-b=!*F(Kcy## z%0tSE{E||-Wug>bK*PwjbrKoUSCvdz^#f8{Z0cfyckRdW$R8evyBI(t<mU*uSwGSxS0w&-oRbFf$ZutgH{MePN&xQ=mU5ySc#?Y& zGX*+BNZ=)gb?|CxO*-BujqRy@=6#z#JmuQ*{1mOpA`PsQX~D<3`nGhNWTD>pR@i=f<;S}!4`KNK zt|`U(QD57cbgYuYRdR_%_pijI1(<9I3t5FtO0q^D%89@;U#n}pH>0{tofVjES=PtP zZaL9=q?Vr2O8K#EljsIHQ}4Mjl22$sJCQzuj}$P_49eof6Y5vAkUG>IIyyMjN%Gq_gvMkF_pDCaUx>4~MF;ubf?GbAIy@C=o+Tad zrAt0M4qd#-!{p!+Sp<-{in<=NyiA_Fvq+D=O zw4xl1C!2e2kL55m2K#zU%EnP9M+0ohen!ZZmm6CdxOW&#FNimYISHGd$8gO=4h))p zEQ&`M63^hnAw~v0ai=)cQ%`iaz&crU!fU^z@n6IE30v_enFLz4Tb3nNJQp!!Ig!~U zl!VVVm0F%aV>VtPgC6+XbE^OryVEEB?E3^A{L4gKYy^=^d+K zVhGrN-qD5p1GJG-9A~;-*Bb7fkRR$D2*1>68f%poJwAKUM^5r>d&-!H0OpV>WJCt& z>8dv*=RBs3Xr$-^7mA`_`GGv)@!HRv>2LqVA2&bz;m0tH$)^TQ%FxrZM=wRZ+B6i~ zf-lZ{nxFmhX|s!A3Nu^tqkr<9W)t~Z-FOBM@U#RM+q?LTVAxj93&J|GUQ`l#2*#wy0%d(AY426=yN++7TPV}$OxVxnoBX=Bh2 zU@*>DRi0x&zINh>oGZ&3*3uU-5w&lqMK>nDc-Y<$hVyijkSfF+9UQyUT^Py#>nS>nu}B>`iQEpbn0H1fBn^NbDtttoa`nF+;?Y&=38c_41|DSk z@h2ZPYyaz~;YoIF=y^*2gerjKcv-WH5Js$Z-tWJ!B+ykY;T?m0ND znAMAf9+94Bgr-=&yM#uDKH%(uf$EJclEF9gN|Iq6+Q3vJmtnW$ryt)-vq9yTj|cRr zCycFoLA#ZQdi`5Tjo1dEI>IPsS-bR)lPn&(i^Y2DeJ))_pjU_Xb4)*|(RPf*96h5? z(V+);#ZIs@!I;U)_&GadE|5_T;2PBLJ${@sE?lG;8b%J(UFbuN?&=N?Fg(K;Y?!R* zEcylYoSx*=CN#UGkG;Af1O|E3zb@E9lz4J)bH>PTzIvL_DIdP~9?u9hfxMlPXBY3> zDGbh==pdWx1&cDug?}3Qw>CGkIPQ*z6J#xMsgbqe5ytV*X9{De;}AXjd)V#dV);3S zM?F{Nop?(7UU2ms2@P;V?8x>Y#pR{D@rXAZP)|chB;k#V&;jPO=7=+7+)?I& z*a>aKU4!n#u`8__8y-5jIofDEqM5d!)HJF?x)ekFP^W^7HQw`+mI=Gnr95fy$f~rEtd=$7nYVO zdoYd=>Ja-F_3YRw{6jZSVx)KB`jEpn^$;In+^1e~!PJ99wKb?;%y+^9dJNr{$eiQJ zBlSmzGu+P+HATh#CULQ)$bU^r|BXQ>=Icq-*%GA>=tvB!PzH#EFMj;#I~K z@sfAIFp<1;Yh?@TmJX_?hk+pBax~z3kW1)*>Pl*5DRcxXuL2($Xi(F0L42fZEtr_VU#Q*kxeTl=XpP;% zvg9Dy;%ZscN7}~7>2(w!&%**}r=gItvY6mxMS3ND%U7XNW}rvT#7#O|cGWa>7e-`J zEP;`HJo7n)$q%pOfzPCJjN|gDD?dCBUdW=xBvo$eV|&DMkhbSy9?vabB`TyY%ovC3 zMV@tWAR&p7kI-CYUHFAp;sdORfBRfoSbEh{T_NTcXj!cUS`{;ynfLO=?;uS60@c0; zPv085+;$?oV36e83oB36U*MUyrXU?Jj%@eW)8;cc8(N5Gbgq>gRD_C?+J zM?&C1=a%)rV6bezX?c8lnNajFAQ!>pk;Um%*-vf6(s!+gG#zbV|8 zNs#Zvgh`-=J`Wa)ILtPf|VOVHCNL0X(6MX@127 zdW(tf?ESluiECuZLA=SVr3)4sGqv&VW57Aao9ufZJz{cq+kE!r3nqYi3DS;9t4xYieUsPC9M>*N z+;YxRXKb)}@Ow}2i_+sBdf|bnZXPr?F7=EqfhPTU6M{x?QTDPpi7eYMxOIL24=)YH z=Z(gm4!a{32t7i$0}M0jCH2`QvdkAA-WYKWPh}<5*<2d`JGzoKt_~yvY2Xv_%WpX& z%5-{-$AC|4bBM+}Wb={(KUuKp9Y!|6-L`mxOwa!QpJ3c4bjkh}u$h-4*Y*caltbb$ z1s>XGiw8LN>Me&SdCd{fwLN&HQ*PJjN@d|O3vEvBxi)^g#%{eEuQ5W{KlKsP#Xhyq zb0vUjtWy<|9^NpHP(>VS`Fsy!`F-NV68-@lV>&8mm2=sT4$00DbuOPvLijW+E=cQB z3{Ng3e9FR##;9R*)7z>VasU8807*naRP|jv=#dX)s}EzLI(`Tvi(Y>Xhi0B&1l8E0 zw<#+~*%32fe@eZV7H6BEeDM-v>7C}q>y75&Lu7%vslzmks7EpefL>mA&>zMDHcqSu z3V!#~_i2LzJUhTJw284{k%dVX#pH#37`nuR_tw_862p6j_UvMqo_`Y)gTyyJM6S`( z=sH6nNn1l1=y~W?{%Lg3>$e;Wo5 zpbP`!VUrf^I8psiR^ZDk$77U;oGQ+iC;rks^(l6 zcag#VMI3YBC~%cO9+F0##Ud33QoYm@C!2SAU0$KL)l=42Bs4r-Baf#TrEWMY;F$JQ z22VoN@xFfmy9?yJU{S)*4%d`vtm0D+y1TuZiDB&+L%H7O8pTWJ8iSfX1S9SZ^D)O6 z9voev3JQ1Ial#|OTNf%FG3fn03NH;c>EW$Pr@pkD4F;lRvDv^zsrTdg}K@0 z**8z)!y-&k?@RmZF|$e zXMFd$#pPxkFW&XnZ^EN{%ji9JmN>2$!H~O4-|Ql;ZC5@1)l<$^c}(@Ifi-u>^bibz=bgZayCgjABFw{EWmHG@L`ycenN5e5)#Wc z@c7ANWQYY|>bpnZ?`JHxzFc4z08oA3Yr_ae{cUuLJ=*3N$O=>ENUa6iSkK8YdS!x+~HA*TQv z*P8~{^kJmlVxcy=nl@$F6?trE4%_DRIH6By8wd1@q9yLOGY!fBKp%*!UCkAT9>0xu z`^gE1-?AV)$_^>#2F`b0yj;T|KSGGB4;oO{4=mel=y}HvF>(N*0Lv)X^%ROM;irZ$-ueRd> zuY6v@YZpeWk2+tvs^4gslvC~2wt%Qn2gg_FYRWaFv3kq&(%y3P&{h*F&-N3JdG_&i z*E@cS9Yn9!UZ#$d=v~Jn>fnkK?--c!;jfmN>=Y9h?)xKGz(<8DyOvQpma-_{*ykg( zQE@)fdmN_Q?@bflNl$~qT)IAlIX#Krejt#>W0_?77_#LS42Z!j9gWY0Cw_K33?}pKJ^36k z(b+-yx+Z3ghtDuN!^?te@A-pU+XSWbk*eYX>TBF@`V!M#DFj<1MSZy)@z;2d>;JJ7sWa>mz-oj|E73@=DLN1?we^__)#>FL=vADH@(g zoP#&%<4G4B3hy$UhzxLpBXEMR1NW|sSubf$nW?k4%GB@buOU>!s_4C=NoZ3zmoZE{ ztUu&RB~%}t`SD)R=0TpMK|UC(fA6YH$pR z$!phQy`=zu@+;*?^A=7)12pZycHO0gJV-sQSJBtS6F+IwLgsx}hNZ7X27h4M&Vp6E znL#@h6&EnE3_rm2CkdgA*;D5{^SLzPn-}sn_dGL$INZo9bwv^+cb9V;B2`lQsNJ!z1A<%@rmBwwV5v{LUDp3ze#i9Aqv^j3>ol>{Lrjy28OAFw0 zBcOb{m}I}D;i{hfKdSEQ>C)sl(EDlcyQl568xKzaBp2&VNH>a*bg6s&lhTzUq=?-W zxyvOXf)og10gRuXHq$-rz18!}I^SS*y3coBQ}G(u@q$_bfe^ zE`Wqj?-+5E0B?pkuxGJLzREOlBTJLcFaG!ScF7nptBvC>V8}P`%L{YQAUb%E3C~By zd%E*0*<>s+zN_a{GD^~gmB}!@96=S>sjec+5%hPw|^`yKF-bM%DFZuF8uke{Zi@cUB zzyqTpT<_;WX_SGs(VgK(DmrwY#~YvcV{!)G2wm6O$?F;1m3G<`lpt0GrIUspR#)%6 zGNc_$PH1EjCcW6&Pj_efb&UYEn4XkEHl+<rGJ1R#j6H0x?7d3Sx{`^drElur8jYCEpU)IgYDkJg>C zjVbz9`oY)lj3d-FoJWqNgXGT70GRuEHgr}F`SrW4YxmD=BW+S;;4J4VFPP~d3LWKjk-uB}s4ajfazQ6h87PnDRWMCo~@9;WVDi^eC?w9&J9x7X00xWf(frL43zg9?qDZ$4+?g#EL}YJ;#`nSQDzj z!SO-D2OP+I$V3GVd8NJ8MEvqcA8bzLIcS25X3q+x@YaP3o8uWy#9H@#TKJQ1^D3gl z&g21qEzkG~IS~)X2t_dB@vFwv78Q)G-!4?kXL$tQ&O4PMUYvONkn_*3-7d~{-e)Zm zT)F*t^G&M~f4G`qq<`YWi-&&ds1Ogv@Z>&}t0P*V72^xT4Z8Pcyi%Vs7Br7Bfio`Q zfyWTTs6C$4rEPAwc`G`YnDVaUku+ZTV5P&y6Fm4POcP2cf-=C(L26|_Ii8Wv#z_9{ z%?F#SpXarf+zSyxAMVWnvYQ-K?(Sgk&X}>Q`Uux@ss)nk-#*;@`(J<2u|+L>RH>>| ze&eVn^p&YjyvGvBj9c_8aS~GD_2$D1?RE%afN)jb}KT1b1&9e-7n* z`8dzg^!$;G0edqX8+!?v@#5~ch4?v^A(?k`b*f#`wQA~j^gsr^y>lSvqTA?g`qBI{ z#Ob&r$))x;=`%>|+8-TzuMgRK>~w9Yup6IzGDfY_KmTX*{OaHQMd^h@sZHNw9OB_v z^^&1!PXV7$E6K$y{3frX*GK2nUxlA-^`k$lyZ&s-)GElCHktg;gRh=xtF?#biQc zLtoWi4`m=WH@RO}r9&N5O0Vx}5k}aKm(6GF2zV<4xOTw{l>vwmkJi1JN2{@}w!%q> zyjz^H2=qLo4lh|=o=+LSq76CJ=5A-u9^-fP>Cwpfc!OpP5(dNfe4(38B-g@@@Zjc+ z{V;>0MVWw%;gP|GT*J-bbc}+G-WI0Pl9d@f)35y`M!PrP|KayC)SW2C?px(gR%*XO zUO2G$c%JRXFOjC_d2y%Ld3)+Z?fRfwrXMYZU;5;6M$pF=AI(|I$M|(ashTSW49*@N_0tS?)Fc0fhpW1$n$ypeOs;^KZ z7Tca>Se+d*)zf03x~}=2@E~YzWDrH=ZTo{jM-T?dxBfr}P2 z06x=m76XLQu$W2r2WA!orLmBto)+kirQ0T?Ot>%tE6;+Y1p@NVU`C%lsE>S}YzsN| zphL1}{D{)w&*+br49gZA$v&N}+^I`&Cagg^2*wP<L-5x>F2Z1WXIOC+SaYS zggJrz0MR$=EC_46Q|vxyai5% z@l&VHPXGTT1F1O-To{L+wlHufdAI9ePqsmaU4NV5c@{G28^{8Bfn$e`Gf$>HCVXT= zTxk4U^X#hChwSakz|Oeu07|>+)b)45xZx<6rswv%H8T8p~Kzut3jn%ZN%J zIH+`H0Ldl?2ji-j^(kmAR=zfUrBEv9XFNSI@oPVmu73GmysO&DXmt&r*f-wgJhsL9 z{x)2MdL#Q)ZD?VEz#RDP4)Of<=FQFT|M9n*&p-QoWRji(``Xo8?cAs@A6^viAUcU5 z|91WBvApWFBk}8Z==v-f=4E|US;;96a`UY_$$&QH9xrB>d9G2S z(8qW3a)+=O_4OogW{hO=V0;RB79F6)ay`&?yHo_EV&tAaCZk6$ayyPP*LD2Y07bJSQreAYw!pY zG*hP;Bz56>aBdz6CI=(ohKn&rXI3Y=3?;R;>pxG{tB(*GjthX>eNNW%z~14rPoP}k+FOA zMgx<$?>vNqWr8R-G^T?JX|eF9OHx4p$ADd#+TidTn#ua?0_n18QR%>mKj<;-Au1Sz z*A@o1EsX~=^%@ylb^4>egva`azA7}lQn78B@`sR8`jvj5$0GVqEyzeelg@8>>Cpgq zO9yKY)2X}d4(PdxUfRIuig~8|K0{~_qn{ko1Y`XzH(Bi8$`}4+Li;IO`E%{I&K~cE zuH`7<=*D@U{8|7tDxG@iA6EOHGF7SyO#c}k4Cm{5l{bZw>7i>dATc;qz8aEO{m-h< zr~~Z<52xLis^s1GoBW$pOxD7SPpS!3L`0I*}2*YeStG^oogR80Q`T<>}|G$@~v zrJGM>PxahNHaHKpqXEy~QUbk*Ht$znxylr=^i>QYJ3O9#ivAeAlDw3ktSNQaVB}jU zH;4b=$F!&5&_lDEBf1+MJLDQ4z=ao@0WpguN*_jb-ynI6-#wFdM5{hWRkXd;udjDd z24?K_CEUeTZA2x>##4EW$H-_n{IRYm>`nr8@4d@_dCF^g=ssjwTiY7Clm@>`N3{&> zL8mKeDo;7Xk3pk9^S*b=9A(}J(jd7w;trkjKAY?uQg!uK2T$c$d8xzXZfDFs+9bfl z`9$8eyrtCQSrfFv-G;;K&AA5Hww8bV>6JXWiWAzTduROctO@YL;)8VYAbh#wR8 zxQVjKnZzcvbdxRMCr%VMv1cbT+?#k#%=3eVGI)}=R0b+Wc?$stGscl~;$nOCk)v%* ze*4ts(D8$XRS_=Y_SETEaaxac$Qt%PYcg#6?8>)ibNA}iJj+jIC@ zchQ&8NjVHFj6&i_d#>FGTQK9z>hmy9I1{lwd!A%KOir7a(eJ`Wn2=hmI3E3e{K5A( zfA#C%Hpxwf67O2hOxo>6*kAd_@<1aK_&l-eyTdcaX*(*F`yhEbayl>2Xle2dAMI~; zK^#4k5k1}~YrEyqmg9@O;@ER}r(3Um9gLdl*$?s#FU=6RbeiAvZTbO=5y|xKePoSD zJe-gu`W#_EAit+N?`o%m9apbAn9!k`b0{Xg1Yi0vnSYg0O25O=h%CYV@P*42Tfdkw zphCkl!*L-(e)k`LwR!))`yV!+|DXTc=ItN#ICQR9x=#z^!Q=DZ zH^cv7V0_q98=7%XReGIaM%%J*U_qt8;UkNlyWGZ|5LR z?V|QFI-z!QHDl2(o`a{`#$J5dr?oqC$o+Zr&-kJG2}N<>;O!2TeUxr#tWdqkm-cq2 zUCtZ_juyWbCwYT1Jli#LIUVquc0}AyHXYV?q~{MZl3eesfnAJYXcQWER{W~&5t|}V?8SKR+PtA>fVUT{D z&VM$c4vNv8@%O;X>88P6OOSy+A!{j8?8@&XkjzM*WgoKm$&vh1bB9(w3zxJea`?+_Z+M5Kr2S| zBN-0qiWS>9gJjopm~u4c6^gdI)B8^=!)Skhyb`)XXczK4W5{qALw)tLI5~7pR;CZ{ z-P3fg@y~chC+AOdOgvN=41KcxPxzDP8SfrvbbR~#`OUw{uy-QQ?*IDn=bOL$)&INs z>7V>`Jkj;vj9#Z($oT&EJ{ox!LgilZ!u_5p58BaNv-lWn`cSnm51zryR(&tJ%hv{yHav zr;T5gLz3}>*Zs(K3gU46N#u%-oQd}&4`(M1{iAkd;1%9X_^Zm$cMDIouLXByTU;;QjOW8e8+$!tMGIl|g>+Qt?jEJQuuj>b zT---~Z_9xAl>AJ@hA}2$9`rSsp!BNv3@mi)s5dbBNujR?JtNs$L-rJxYKVG$cZin4 z8jouu(Vj7E>o9r>-mW*rdg4vK?L=m>?tsfN45VffQUeHS4PpY&WrL3~#8tKlGVh88 zXpLClB5!3%nQqiLU-c9P2*bi?33#+)9PcRVDe8_A~lOFQWjq~IFV z*_JV0_Ib(=&`OyJA3DN!2;#c0M~Gtchs!O(2FZ)tS;-lh@!^pXt=V99r}&I3VC3qn&&Bjv;2s>wUP))1jfd z)u|^J;xLY!cwXaN#u7A)mi=H?H>L4~ll$uL!hjIe?K63IwTxbvuH82>=~R)s?nHCv~n#BOG}kKkC)M9mw1UYpzqo6sIm;c z@JA^rh5iITzIHv{q6WcaMmh#?vK_1`duRxwcw3$Eko<;4;Lxl`D%0_;6?tuq(Uv(IILuzu(bEy?n zlJ7n)S36vf7W|%0eZ5qIHn%m-_I!-I!5xpPNVN<{A-=^0JoFeD85GDJyt)}0G)NqQ z!IPD%7ej|BzvAcpwyrwxq4eMaYIQEY1U zV?y5j;su&c%2m$La2a&ri8iaLqvtP<&rJN8MuwS z<4LdOg)L7iY1Ki#!GDX}z<^gYgU^&ZGN#?8%N^)jy^x`-a}Dz7!Jez@Dlp6O?pf$4FC)O$E-Ptz8=UUu2)Ssh-+ z4_!L^qW+{Y(KYdyy8ENQK@|PultUh>UnyS2%c*D2rCDvbGMjXhL3!x0AP1<=;Nz!4 z;Xe1h3!Zw6ev#JomX>Hex>i+udN=94pRe@EHT-ryh%rD)GiA_KQj`7l4o=!9dB^`L zPg;2)<1_aQe>e=Uz3%zIn{uS{^}>H;^lxOUdRjE!(Z9(UXa?wvP!m%!7{@zs@jjzV z9-TZjuXn&C4_z{Yz9v^ZC{H#?WJI`gKLcjQ3;cki2^Eh|UV~G9MLumStS$Ep8Tctg zf!L~yN@t5hiU-T1H*HgzXT>RhSd4Vz5usMlhBw*7{jV4Y9KMsmeRJQ6IYL;^hlX zAjKVhvANelXuPqG3m?EZ+Goe8%D4^3BN@JgY7tW7ev|VXHw&MTaalW>xS~xyA2ca^ zmMkC32=x7TE=)UQKs2d>lWon6+YD63YCMWuqc^?_ck=2vPx}mbH6jKj1_+aF6U?8y ze|~fRtu=|~S;)}MKy7_Z}ddFh(i@(9;H80|j&`s$Q9h1T}&+%IlwG#20W%j@?x zzy0{@nRGs7tjS2HWjuShJeGIG5Aj;AO3|fL-_&pKJsivoy~ZhGnA0i!(fh2lunXQU zp-=j^M;MjpXN+S`^QuWXxIE3D=lN}1xaau!48e@v84!~L;UfaJc79gww;zAJd2sXY z=H4HEvw8pD{F}{%KmGIMwh%@4ZkOiC=KL@IMfb_9ezm!*80=M`d*kf<%ENaK5~ZV! z@xyfvCk>C+8S3)Q?uj~ww{}1JuhtVRGN2wW+UCEz$@}9BHEdnjK02R) zP#a`~K9{kLSDkp)7QEg~U+^UU{OcPdgATeoR|t!T=``h^Z|8}SAw0qvzZi)AF*&Cb zg+(D(w)-K z-muko79pa&nAg*POk+jQ^`$G&RQMNRTo~PX8BeVL^p1r{X?P-Pi|Vakw;+iJo{QUk zx`oA)d3=shsOMUN#YhWs#vQ&0tz7;7o9elP&w~zay`8L_&fEB83v>)K_cBmVhBB>C z4`q%N;F3FDy8)=~;X7@vc2#-f6&b$Dq2KWbeI{e{cE2xOaWDgyIsP&nman$%P)~g$ z!yewu0z>6943DR1ys%r!;f8go$wHp*(N%iqJ19s0Zn2l4U0VI(%i0m6pZ;)X+-aP; zJ6)~C_@FV*>qqJI^;zLp=x6gLi^pPy^A`WpAAL0EiJXb%4;>Es{qME=>b(qedow4v@Re$(!Yc%v;9;i9^)7bG?Mox5? zU1Az%@~wZIzH`fOMob1fdT<>C+oEXoA`{Ln&>n|y;bHD_zFi%cE}YqX|J{o%?6$a6 zY+VeOl|Xm1vGT|8-gb%5iYyz_av->+{!SN!BZ`?EjK5Pq)j`OQZk zd{~H$WmMdfAKH>RfhC;$J_qD{{D9@9Gu*I z^x^kAq_#NKuan>S#!H{Q(7`@5m`L{1M)-XOucIxL| zr2ol+MQJ-v-a6i4t);b4bE=Ro`;v##Ep}PFxmq9RkWL2ISDkxwu(}-13)|S>R%48Z zd54Rcudmk@&$QS9_JerFu=~a5A8)?7abc zc*f=C34P7v9FFQRUuBTpeeI}XT<2+SK#%{N4TNFR*XmQuafIlw0CuYd2)oVfys$fj zZhWhq3*`J^?fYg2Y?2lE&v&>eTn;x*eCNH(n~#3@z1n#p=L&a4A5GYk;Ls=J)A)#9 zn2>?}9&CJNfrjoR|MaH=GneOoi?HEG#?PKTJ-a8gC0_0HqIPUd=YYx^x6@uR$8M3uKJE6pCKN3&v(s}+;9Qk|I z%c44{b@R!|!$RU1^PsiQEKJn38bjXAv)`hdbQ7bz_N=d@Hs?HxMB7;i+Ofl3>4wIK zRV4&wAf=OzV1d;Jsz3vh{xx#w`C2KRxS-yp@(h7Sd3V1&>lj5DI}HX+;7T)q8kA&M z7(9c2Ri2eBBEDgh%OJgO)~Q30>Ef0|vwN47i%$ zz9~le-1i*tF;ZbHRE$aTzbE|92(koK53V? zruv2}iu=GDO)F0Wo&jjIb&p==;z?z=l#c;YAw#pCqnmdIh42KY-?{gE2qfKt!C2tm z(4z7+$n{w?K+xeujd5U?w!#J;B{2V#S$Rt)low{;LQ8&lRwsi+&sLdR{CYpgU?2J{ zh%(kI)-cph9mdlOed|<{wedu)T)0g=;T4XyrD8c8rJLwW-=XUMw#d!YZBe$~T4ycin>>t<@K-`S_i-9-iOTD_TmM zKAG~P!9aiy9aHIW7&c^;UlM*bMpMjh7J~9r_>ItekNjdA^6OK=Wfm-Zu6|<#o-)F@ML1bizZW1Z^##_E}-h^g)v%}m*&g3iI;N!P6>u-1la2c2@TR*9NmKqgD9_l0TTNxv=)qnK5zO}Yy zKupGFfl|`3+)bmFdcov*w8L5!anZ? zZ@BH$WkIdL>Z1*nUK)?XbBA`LAJUEPr*yh;%B#N~`mLIi4fRzU^qG&e+kERH8ms%f zM3-c27`??Y9A3>?5$Vekq3Zm`8^kd=wOvp9*`+2sNtKd)asWjSEX5ZQE&D zz?}E$6V#xMc!_l36NQT=n%Sxg`IiTq{`udyTakjr3>Tndhg1YBkONPR}(jC1+R~^ zP4?R_uZ^xbmDi^M99kajaLk(>D5^ggBS>ja99aixiVe#s45#D4dk`+J8OMYcQON`8 z7sf2cyOVjKpFi7SsGr?xK_k!TM1fxSp3#lrhinMP@FIQ2%ag%-`lFs3e@j9>dCn@w z!hu-vX@h{Jv8&K{#nqmsZVaNOllDL{Uv1Uhn=#-}1{u4HjvmOkFbgOdQfih@8L5*k zlU?4~j59Ny#k+8=scz18nA)jh#aX`TFwuIj_<}bMprYIGj!OFKn~cSMfY3JXQgsW#aJ@6_;7xKqjF+d0{|CSL$>z*Y{$z9b@()T=TTSi`o<85*>}AHOaX!MM+Ltlp?sg(zLL46++EEmv<{2-w&7S&* zc)<9*H`pffd!lm*s%0aO+7PFreNE_vx_kaOK8Me)l*g6_>u6dQ$Im5p3f{-iS6w7>V$0t{|hiiitqK;;Cg@>>^ zr@IP6@XhAWfBOA&WC!KtRsALyAARud=*Gv<<#C?bM>E)+%(GYfWz2Z-GVk{Ad01LI zd9-03%M2p>@H+XYC!Qz2LL@LKF|-@QFz!6aaK#(&Y`XED5D*$vJk|GDzl!~6Tp7&i;OkiqQN|L_kfg=ne zCQ|fp z`G5SMCd9?3pA|1%c#KYT?zLb@(}t zb8YtA!Bd0Ny~bX6ccw#-uU-ABJhhwt9XoZV9d;+fCnIzG6_O?}16%v@OkZzdfN_}t z-`s|tW7M}`@+c4TnX^p$(zNorZo%wSOp8UcG9*chCY!D4Cg80%oRb%>Y?b8_KaEs)0J>12F z7lKK9=L93h`mGkA$mQGB`{uVdlASZ#de#>tbM!RbCA3hjeBkdL!$&WpOiH~bW(HX& zpb9EZX_!!k$xQghYj&QNKJvhHw*AgTQWK3quxEVgAk1aZ;GN=>SWMe?C^{!)>1j;B z-dI(vG@XSQ=JMA_vj6ujUxVyKyyOOurT4zFX4`Z^ff@45zzWV_gXMYux>)6uOU)Ty zO5AUQ;F#2~xobG}P4JScfi*Z~2ycz5G(GFz&~n0i!aICwOjPZzYr;0n0w;GyO4t9$$#`c#%;;nMr@+?y^|U3*rt zfmuD#W(KL?02y83JoF6iY}Kzb+j+JCENR#RnnzG%aJ3>#!Lds(|d7ZkJY73X_46gD|TbhalNIO&oG^RKgY|NN{ zcsT`@zcxI1;pusamAT3*-{3j`h77|G^y&GceJ~UY6>Y#j1;V{w<@lit>8uv&yCP*L%7GWBT?Pas_YRD;v@S4%V($ z?bK_@SGdq|>gC3ccJy3r zBDA-7?UcW*9U6w0e8a=~B{V}cH0R+{B?e|Al%92+4uvG#CRVJXdol&kK?qR3sb;c( zH*WN4^zEdkd!-9U@@C*Rix!G5g-On+Q(PlkSNo02VdC`YrX*)d@F7E4ycmR48)}yNENJSAz_g2UFjF5OJ=-h zNH&RbO}{k~n=%bicJediOov%{61YI-J_e4K563%7#103>(?)0Y> zHHXnvp1#3keu^9!+sXqSHS!`KdX)m-w*52xGi?lh$WWOh$6Fa)yjk=qbMMHMx|U8| zCcV4kaX?dDm=%%i>M z^~O*2O#^omkb5`pjHl9(JQNSsjt*uNAa+rD^1g0C{&fo_=w-4kJQ{DpOe#Uo)3T5b z`wMNduL;)uCd|Ayc{T6d+rg!I%{AkQ?HNRoQ+vwEW=(KS;+wQZ3*MPW3c>O6>8s5* zpMAEu^phVCU%&nQ%e7bO>3- z@W6ZAr0;n=XMoz*`D6-Z~VEch$ufD?Y#W?WChi7|Iv{ zOv?7PAZ4O#T;v~bBcA&_Uv0NOestgF`yX7`{O^D9_f71~Kg!3j61^YgEiNp??zT;e z6HOO75c62O8i+r`$h{Ud4rj1^vv^+FHD+ zR~uTc@uIfY1hs{pmyd46iOuHIfBY;Sa&GDa@%U;x^6aY0aJ#!j+c$Z2?LLr^*!$p4 zJC3*ePhZFg{B1`jWe~0X38T5>qV&;n^#P_sExJ6&@W6X4gD{zV{Uk59;+fM~uQ;R_ zgV1A*eP2bF!zYhy4&_a`@A#=n!?5()A(u6~3H_1`JkOI`xEC^ts^)d=BJk{x}29zC6SCCQEeC{R}s6i%)*rc>f44CQE%Y~Zwub+G|hDBjjc=_?hogG%mHIGTMR-xhhDDU=U zHF_RTpM7)d))@JpRpz6|EqtXIfAGQO4CTi&1YWCKOcwG$o$w#gkHS%&8?e)!FWuot-x1hz-e36~?6r zBh@nlfRVBA90ORer{ArfW9)}lMrVtB=i9Atv``Pi8@_cs-0P=)S!j|o=T6U#Dd7+p zavAdUTRdWUMq8+L;Ir`;Ih2l(Nq=%I58ppMdu~FXIAjlv*LZZ{s`bpmY3<%vZ|1fU z3(jO+IgHY3Vll?kwdh7)r{|OT*U9VA7IdB^13bWm6hrW}7?4pgTzJMZv|0oe1OK<5 ze3jU|&ggY#bFLjvk1NLl5~IG|gbG(L2I%Ww=XG3O@tpNr50W#+`g3`C)9=EyK#-^I zv-*`o8QP3-XV(mw>8sCifbG*fQq^sYwx{1t7O#|X*XHuO9}IrizWjXi)t6sn*#2_P zF*z6Q5452DCqMe2okZWZ==pT>ul~uOZ$7+qe)E@q{Wr6S#h7|DykC&p415f_!tqSF zk;=NCQTT6u`OD4EfBv)4KX)>QpUNQ2(^{CgGiOdm_f0#+E^YqfU;NePSAX}b3B~f$ zpZ!^$yazUa@~1!3EH;1r*Z*bWIRE?q@PCcb(vB^C^oz(Zsm z(=N#V+v#5VL5LY`=~CXtUwr=Ac>10WCp$A9)W050r|+p9Y0DM~d>kfe*Vya;s5}d3 zQ%BuTb3^r<9U-OFMrWr+Wqupo-n)Ex7OBYe4}SF144rRHJopdZ|8VmU|M2&D8=owW z`{m({_B(pIy!0*)WIM4Y+*&$^v&45}iQeyRhZ93F-lGYx<7f0m?~ECarDqP-f4q44 z&61(y!jz1@Z{Yahl;#xjZR z^eHb}xRgfy#z5Y4#%Km}ixS4Ybn&y=&(R}=(TneP2WZ#Y0ey`w7WNx~%}zJ+ie}=y z-)pxU0i4}i{aPpj&$xxY*Uovw+adYw&Fk%$xYe^3d^;GPVV(i|Nqy@6&K$Xy@n5Kw zqb>Z7=X~QRapQ$7)K65A@S&r-NvP?n<1KL5F{8cff9X$)5cqzm@$H2R?=}9pHFQ1P z0=Zc44qc@roGp-&8d|Xn&hCyDefH|eC^}4D#_le9wkA3m9n3^R#&^J!340g-g+W;SYzZp^((EPJq{AGlwl5?Ff)O+=wtk(I( zpecg^0d9yCF=n6>V+_jQFz)#`1DJJy#xhbgODNA6{*oLfL7$xoa#u4jNgIbJ+`JD~ zogv{0`|bbWly>M=nm-zSr0okQu#6T;#QQ>Nz1-&UI9kr0fkglSKmbWZK~#b3+29PG zj=g%ttf`M7tS#52Te!f%rC+d>GmsX3S_8WDEWrIlYX(*DyY-QCxKyKH*woQZdiBgu zBkM{JuABbiKe*B|5GU|>K6%v{TQj>gf2UHguemxstNb*-`_EdrF6SSDEs4wTxK7RiE<0W`{N^C>4ws0MMjB&pzduojs+Y z4!l*i0pqR)S5pV+@ptmYr>RT%%R9S5@QAEvWAiDmYt^j$QUCPO@OZ(&2Xu3{;KaA! zkMRzV`?s(j%)9G%=&gWiji!SadO;li>c4W1G1~K$bLAbpE74;xeC9?T^0)LHI_^m0 zDyMYYW$HH-9=~?vbQ$L6=^!zAhAv@*&hc%s zg4F#as=jzNhU0lseP@8>{XFQ;l7r#@)T8=T?(CwXhsnrPTKlU6g>*moG$8fBgaVrl z=;#z^jhrGrdX5bAyG^H^FL_2cAi~S*#vE{;{%7#+eTB_4fKyZ>ivzcO78)2+8lY8D zA7%9k0XVIGRq145N0xg($>d$R=%hCes540l_R=L5eM4?X&qfEy7d?~u^o9W{j3BR0 zV#-dLRX#{1UUI`@!@$(D35_Eyr8{|(MS2KuIETv&_@gW7l&aBS&&!Lb4K&}hkgN*x z%!L5O`qfI;^TA;nYB;`bt z?KvzhgXXiwlDw@QR97>baunwPB%a4_nt(9YnpE)GL3;-xGN7vGmsfAhWWhv%5!GN{ z9AFc66UKvtoXjua4M(d%xcYTq1VEt+hguD*WR|S?rlWY^@O{L_lzL6VLwa$c}NL?LMJh{Xd`=zL2UwM!n3b=#ZF&glD@(`Jjn>s z#50P?uS}j@)Rg8*V)jR8^fo!2xS35{@3w%T&f@t#OSBor8HXNK4tnn#bQe0C`xy{~ zm@?6InBTEnvQG!&YzE~V!iz7P7g|ONo?iPicKpNNeX{wWdCTe3LSoc@jji!8Ir*wZ zk;mcrynbYV{M*}PkXVkMNN#vn&iJ&4XgvnzQZTk@D<5=d;aeFT)G3hR)Yj(s`}$G% zmDk*hekV8Tw>ufw(--`Sznl#1-ghp;c}5ThLVb$1*xxbq^Sp}}Pw#bL*cVqfmox0> zOGx=>ijlQ5n>UX$q?@OC6Cw=E5qaqRy2-J_B!wOk)BP1gc|6?HoRIus48ykc;)Czs zRY(`xv@wk#yh*F~>i#Oj09mzQO8@R|;b32$hr5zLa`ZYG)ZT=ddEWN#2r$oii8^4K z2cQoRZ3_nsa?kF5vlg@RltcUd$2vIoIqzmPoa)qm(?`(<|15q?|5k0qJ@@WqhL9{k z;gw+`1H`MR_v1s0QNaL&uHz}nsQ;`G3l@^%=;lCe7jO2|=3ci@qTb_}6Ml@%ro$7e zr}QsdEZE(6V*l|B=^2wwe(?V0*sXLx2i!hvaU^06j>5j|%j=iH^cQGA=%Ro>y%$_vP1DH$VI7k7joan(Ty5 zhReKsR@bmX7lUc{4P(@ z^P3Cd`es)P8?`IpLu{3Qm_fw>qS}yljjskI%UiW)@kTieMy~ZDvjE$#&;9hn{>ozj zd-9mUJcD35f<74C)8TqNbPv?#UrryE&Z_*eCsf$z|M8mK!|FPOxqk6@MsJG{;%n2zhpXQM{e8N_!ph#-8MqcfeaY~QPs8m=;dywy zP8?&gSi(-bg71DiOwzCDsGoF5HRBy>@*t;Mly|iAIu2)OW<*1yry2Y7xk6mrE`Br5 zU*UcnF8L&bs)Y_ZNZFt3Cwmw_B*W z^3_+p*J0kh=Cz|S(hJqQT_n%o)3c|H=-rP&v9hJ5UJqxC(~mvQ+xXPU zbDM*2w_76*<6r;wx0`EMzS>-R_nnNzCkoATYK({Hi}QaXn(wb&Sb%a^^sRPMy#L;X zWa0eg_P6hEe)I7co13?8Y|dV|TzMIolNV!;yX}f2uVmst?Tk_LZVR2{*4%SKbfhwc zM!AWnkeUsS@#Yz$}b zeCFJU`lmSnIe8+yVyvHSA@kgsGwpyW6i(#}>ti>Q!{=>BwpP%d`xsHcN{ zEd&@N+D%~5=Istn)t3obayNsx1yN%@yAdAON9vojAUqI1{_Cq>Z$A0-=`3a#Tkv#e z1F++Or#+=MbJ*}VZqOc$WA*sAaZLpXkuR;ZGAe zg;03%L~P|C21fcPO%3EtvBSZeFWeDiBuPEG27BNE9i38W%e%AuJ)bSKJbZ?b()&jF zZ65iPl5Up~CeoE34t*y%I9=!kt5025zpEZALEzx4uH$(q@AfnK+!1wUme<>OyF5h_ zef5`oE4)*tR3^1sKqkTPoJA6;r!JLepkG5D0C1|4lj%G9k^?XdB*@bk6S@x<%5-@f zhnMFilyqoX*-JV}sPg)2a9Stu#z2}3)&Sq6tQzGGyjo+|=pSDa=Etj`$DJnJ)m2j4B-w$DKu9W=c2P8r)@ZB=!) zY3F8312VqaI?|MWxR*rU4UPiOwEjZHGhF7~( zO85T5weNTpmrAKiMzlL{>wx^$j%Vjh*Nayp1uNgknsz=F93VQ)9lnDJ&Z6PAJ{3D3 zRg@r^pLE(whzVx`uh9WJbewu@(?z>k1S!SB7a(P#N&lw3%kL|#bY!Qx55DWwf~LKq zt^AoY{f^uY6(`|}gH2-n>tk`1an!YXhvPgi?=lsa4#VL-_2_cw*l%?m6vq1)o$;&O z&M>csBZF&8-h|G8$&_7bR((4fby0 z#lj8Tf!*_2q(FxrXkYpoxR62p(vTA6(P^8mv`EM}KF^R&esz{d+WC53+v;DyU@|br zpsvpylBL&1ckQGBY8kwD=fOI=1MqKT1dLk2;HVe{mp=Z0X&{Mr2K2MNKH4dF`q}E= zh^}{GUD>{&Gc^9Lo`B6{)rCH7)p=-2XsV(*%*XZ61Kuj)+h=eiPZDhDL#~tAo%9Qy znk*`<@ke>B;6#hneztkx9UjVFbnIH&^9Y}NKC~~>cwuhQTQBq(()XRxl&4?mZ|QgX zC3vj#Q^MSlkGnT-%$OpD_Sd|Xl8eF5@-aNL*qqOwvfhNcI z^K!fP)%BU!33U=-HwTUdKhK!`O#)3|#De9$vOip~;iZX96Xo#^?zwvNKokS>NM4lc zbclh!y!dC*iU|AjOrC>6!)W3Xhtq=Hq}D_<1h0TlZWo3La>lcbzsmONi5%}hiGRaeB)01PX>5-YBwg)%IBpux-!_ykOtST zGsKi^4te|Gr8Ap<_M<%OGL-USe^`0P&Yg=7g)u1h{I5Q~y1CmSs!#G9dzNu(^jt8W z=K1}w5I5@0V5SZxo+f@iv<0tm&!+`wugQH*iX8-(>_yXK8SYLdr(&^_MFMlMMU8#c zoj2^BjQ2t%>_>~~w*wLFg^Kv_@|g@c8F@3-Xt%qo`>PDgyuB-BaM+b$=*i<3n{Ph; zG6Bfgst9_D-qpzZ?8-~dxOw`Leg|3U=nO=bCb~?VZ*_^bj3;{!CI1nTp;e|W}yz}s${ugXOqlLczMXMOQ^ZI(0M>Ku5xArsegF&|_(}9`r*^}4n=;y8jHax_) zURG9z#NEDHyUKJNE_(_oU>^ATDdT#OXa3ohNqG4g^5E;*=QXe4cx7=xncCa)G5*vK zRi`<4RDTbrl_SG>;UXALx_^^#m%O}u(oPm0v1k)~vC`?n8E01(gNCp$J`6U~hexA* zEsh*2-}C5kzu5P`%476F;iR5rU?w|^4o{LrW1QoKrXbJPGd#Xun><%6U`9|zw>ufL zCQfy@FerWT)z!@xUth~mBSecs8xM5AamJq(U-$I=$@f3V5OseJg+Ot0=`$XmF2wUbmGP73GCCM9X}k0}T1*IpEiLwC z;2Cf4`b^%(7xEl-c<0p?Eg7(RLOUo_Xo;&=uV>^DTE#9ChMUJ34Kq507k#2j(ax|D zy&2ucqqS$`3tiT^152;S=P*X$v&2@m^Cqq|tjbWFF=-DO2#!&MCPD7#FV9A=>ECsV zJ%iW7`17dm>0%XMyl`&3N70`#46hk?=X{Is(GSem*gM?t5H4hjp7mi&vcM|@*hfG7 z(dKN1N`0LE4ZSrTxahYJwJ`F|JC`c|(dK!w&g)*kDE9o>)2C;AAxsGUeju8{$+%?T zRi52PbUs)sJVf=@cEi;N@5ZpNjrBF2$IHnaw^lN0ud*h9BwM8Leu6%W6bLGkx zwT)x7zhilUf1C08`sN>h^Xtto|K@KBX>)3Gu`nLT+u8KvAN{DqPk)@T{F`V_&7n@l z%#4;6nk?cRZj9nk&j*ExdQv}Oq2zIfNXB1z-g%F)`qt*x9hiJSL%sH3UZCyry0Kg_}+)L_wzYB9Bs#0`ZK)n=0ZA2dy!bb z4FNjZ7ZR~I)gRjLEG4W4!=qSUk#flQ8>pN4TwCMUU`rl6$=^HTA!yI9BYS(rkAdGrw zqe7aPThRv^sJQbEU!|wbJ?OUyc@-VyJJbSzIWrpJKSv+2*d4CREBkTngX|s6hz;KD z+rlrUw@R;_)7wiwG$zbOrw;`Sz7HQ3V?P7A&^)sfD+qKe+{HJ47JQ*mI7M(~n0ak= z-pl?M>_CJe{u=|@fwJ1Ha%K@Ldf^@2g?1L_^mqCdi*VYI!$#qI$O2g7DgA{VCWq=< zg#!b3CN!0$cj|8y%2il8oYwpvRcGU?FUR3ud($ z<4}h2jj!c#2XIa-Zxu5c42%|)0i1EXo+3hMq{@ycRd75N7_WOjiaRg!H}h?~Tw9&I zk5aR}-QY??lHc!mq9;5CK_#xdVEZa7#@X&#ORd}#P0X2eaFVCzlRx9jFeO3~P~N;g zOQ9-Hah`*15ZM4_23T7oi1!jH-`59SyPom~cik_XJ@3!J>IZ>*2A>`L-G|Xk(knCG zO!`TyK?kWX&kq9wMa>_)?q(K5x|jxMVCvthL-*}%Z0=>3% z9$dPXZ9GYa4qF&C_`xSy%sapM3Pxq2#bkspJO|Etuh9=~HCESASW#=zUxQl;Xq#Oz z-i6ni*hf_h8o`@?;pCSL!3WtG;=ZGM?-uZ-+#}HNa_TmCMGHK^k3M95+M8!4hN@Az z`S}i(S8yh4@G1%VSN6avugf0BJ%e@V_9q&xJaAO8(pBEbmx9!3bi)8_a8i2;lI^XY z_zav$Cw=wpXJ=q6&7#Io1aw98Q76B!-B#3{%>7@u`bD?s6VE8rX_q~n3HLxNFZmis zN*e6?UG;#Ecj`FT({|^{z@(T*zDhMpdYeyq_z(NhIV1awYv3pYOnG;{@TVVeW zRu(I>=Yzlea2On2qgn6#4xGvj;qN#&RR(!I>7sPycQJogT9O6_y6AQ7gCRgs!;8|5 zq>d0&<6aEyhJG+*$VeWe-56YZz7{0NL!Z$vl?b*vJJTY41<#?2B%?zm(Kcn$N6BxY zZPk&1m-H+-@NSE{I(E0?-9`pI0cN47%myfR!iqFBA7iL~MaudTo&^r~)2}MS09^NH zz%x2Dde1dF2b9N=SL1=%yD1GGgM072RA2C;oBAz@FMs_E{@Ee1!xe35+RW5Z-L&uW zhS!pZAfX33D-Gi45-ja}=0YVc8C_S z3EvF_@k}t3&wg+e$~3cSoNGO!{C!M#M2}0*)?|b%lpbT`06!$nBs!5rB@}b=i01veQ|z-!!P=RoMS@D_86Lo4n{J*NfAuLvxBhOgWtnU{xK76Pe%fs1b{8fUvC5g3Q`YuHZ?SG< zq=J|3+{x4n{rM8gNyl6Fd5#slG#}@^d_BYJ(-}k;^Rof4Nk=&nA%_2!H||s>hl{o_XS8~e!4~!5AYSqqH%xNMbFWzJ*FXItokPAchIeeCmKC8Txs|BILwD57LtiQ@1jl-M)HtCd9m+dDh|@137k^bdCY1r!~#+>|m|D*}gO0 z_Woo>nv0jtjW_mAm%_uxHfk~$`twCw`F19E7AYJK_araf+jl!isqMyT#8Q*M<$cL` zJ?$!FOm2J~wD~k|!l*`?hR##vZWt?+O|BkfOnlI!72bHktIJktlSXZN!We*1g1xmf z^AGaIQ}1M3=g+-;q=k>AVewBJ7ix!2zh5|)>xH9WjD41o1@I!g*8&~pRrQqUZQ<}WmGHT4&)I=~ZQ42Q8 zK_BBS@_ahu`mq*jUOc)x23_^u(`57&LvQdCoVvjlB$6vT74~G%p|4-&#XNFDK4#7p zCz|N;-kun`(Oo^}u-eMQCX?eA!l>YDZ4xi&mI)u^E_#YVK4X#?H2twLKIZhF7rHc~ ze}lKH1xO!;{k<6ygnD_IXCtShk|)x64?fSJl~L=_wU1kf%HY!Wd-`DSzKl(!87)_R z$BR9jcNgl#T;LT)EM+vGeHkr!F<#yH$4h)q28@xj+RbYQOKr06zJo2!)V3dFbe=jV zd-Rk#)2lBtXdFIwu>(a5=@SoT+?1Z@@xHq)2*_qJi{V|rE3Pg?Ew<`UES5d3jxVa$ z7|??tbi#||{=tjgn_vF+i_L%d&wm>Uo|HbXJhD=rCmF`*DTn0IZ9o3m5A(!48_$;! zoM8pNeuV|%fqtUUDo4X*_*NZn=6!y;1-XCuPk)->YX2A@9e!-F=wt>*ZTEOaYQ|mo zi(!5sx_mK=gxjou#B%0x+^}Yld_jlCYUYwFhm=Z zSOhZcyzPuGb%t-f?SU7?NeJZ0=XQYfl}85(IU z{h-BT24TeF$^5YX=y>>(S^RT$$FssnojZGK^YvF>Mb`rzR-IgiJ0qzN{CTQBOb0W- z%Ewp=hA>QTojyBw^O)tW`aE0H7zRsdTp|uOV>VfKh6nHaK-4}R*d1L;BmIMR=S-WA z-e*p?v!Z_F<4><_zP@&I^SeKMvH8vKKHYp-A3!(!{3k!$9H|d7UeE{3!MW7|P4Bhi zm7mvEm%;XUVcSkR zs5u<}@bMouUtjrh^V{G4X7jgy_qT9@?6?-aM|4 z)s}JlQHJwJ8E0=?`zn2rw|#WJmGRvUi7&pmmVq;aYvUmOshIXL8l|FXa1%*w-C}+{ zKr)i?viAL;gBJC>6AQbA4Ec^_G#*)M@mm`(Lh!X{Zd}KU{M*}iH=lm?dGB}6u8eaX zJbt2anD9?Wk^`ZA=u*4^pCNwci$Su$MK-nniAx{8FY7xj6p?2<-wC0ljR@glo^d6w zZt=5^h9@s-UgCDoXdBK>IM#v6CrklZ+p-V zz_sWSooWw^x}Sdj`RE>tRwu)O@!Vn#1G%<;_|VaKB(DF`XBIcrk%8GFptB!@WU+uU zhVT|U)caUNOLH z7kCwqd2$O?^C(<}H1Xj*|FE&Z^JgtwrW+0wy5_>A4_Yv7(Y|ZDkL((m9Vg*+_+aDm zcBagECjc}6CYRa@yew)Q%*d@D;mAS8d6F}*oBvrn@cbEiH7+JQ6de6t6{1HnKpAv% ztg*g2(HrDY8$eSuXxvw4G|BqYfWyEUB9tX!`hYWD<6RS!pp3FyZz<6T$s1R1-vGnl zVh)jrSZTrTOVf45_N%cp0S~b?Fj7D;cb6{@?m5(jag;Hs!5V`)qQ}wl=wxGpa>8vq z!NKbN9Q;>V3#Z`C6{7ae7gg5F^@ikc9}HpO>DiPOF=tj*xw0{C<{6IgD_8Z1Kr?|< zpYR%d(91m<`!wLu(Wmaap?Ku(WtJdU{>;H}6$8(C=6Pv$)e}()b1h#?7^Cz~qN!JD zrY!ZHq~**=;JQCk_Ze`m_h>$}4&K=0ssklA#%y^Kim879JASA(OABT{QOvvNeNJBsBqa2VF*7bPlgNjJqLwprcS~ThFULN%- z|9HRG2<0D?w}Z|!X3wJY)X$r)YY-#r0oB=!tYiotZY;n9e;ErZx@>YX-ndGYEZm4J zp42<+7z)O_Z|I9Sef{65U$_D`a)kHpM+N|bdmq^hPP{4~BRags0K`BjJ$OAIFZ*N| z&4+eq0cM|31OZuc-tW{a;8Q^NP;m`5c;3J__&w@P@Kb(sQ+<@N2{1;cr5el3cm ztiYc-;mv|K20MVkL%ZIOVQc7uPiQA?@3sSvNj!9^#PA?Rk!F;zMXQ>M0)q+xn3){Fq$VlRd+H=M|2IhP=@C!XLXeGe4crVd-4R!v%#Z! zOjUcbs!&4xkHYX4>~v>*R9Et@Ao43)jdw5_gW}L5`1KPDs8XVNvbpds$CL%Oy7f!v zyTubcQ`emjeodbvALPRRM4jk!D2`s`0;l7sG$+M z$R7npH&tddl@d(`Hk3y`q(tNO8JeO0lo<^07|N&L?X7XZwoEh|`L51Dkz>z>JtLzg zx*1IvTY`huuk%(z2X)q-)Mmy2ew9nVm39qecp-rqpU8_g0fzFWu}}xUKAwMf4X-Uv zMvAuZEFR+F=hx*y2E@_Ra1l5POpSX@65qd;*g;PRHU)ung$lwB3JKAqt zcT@jl4)2FN0VntLIy6hGuJf56?Nu8}hI+L-dY9DY)P3sHy-6|Rcs^_i*zV{)G#Gsa z82qQEQ=aq^4}SfQJasp^B7R5@Cm(fNIBXwGuF@!M5=k4k)WamKWVZ@MaIY{83`&P4 ztV#zjHR!#VfrjTKFY>pW)Q};F8$3G@(M0+G_)mYk`RVuG+w5*~{JrnLH|@*h!Ngh} zjx_l^nuik)6^5++O&A{K^=(42zmO+KGC=T>dBw08@LR>z<+=Rz=T|oGzkex%TV6RG zf^?|u#yqnx<>l#c&1V^Kc*o6l<|aJGWITg;#Z7;%Eu(d5UL|W^-ME_<*!T1C$bfq1 z{$^iB(i=Tn-k#x6 z{cW#*^ZMKv*`?8R7)DMOF6T$@oa-h-QL^?{9s=AN2LeFJN@H$p*9k+p5}Etp7nHZvS(sD#LIrY&8mEUZD zfl&I*GP=(}mjPoeXUv^A+;L`KaXuqhbm)I{<+Tkyqbl&P+h)DlZBZvZ!tqIKe^c0w zF$gmVm5##{05OO$eb14MyoWpdwHVmsV_$|e3lR^meLb{4dikTx^E+SH4qFKN^7oqq z#|uf2;T^2K8Nqh#Ju$K!z*!X7eSl|ahPZ_A#lzd114jzo(DhmXSqr{vZW=H1mVc3f zTv(djdwI^QbN9SX!*aqkH8D47fB4Pa&9O7ZP#@gAtqy%(b^zOfQ*URW%}~R@9MAV; zjCqt1d(V-=IceuX6XxOZ?dualW^cw13te;gBRV(Md6t3dK*pIpdDAaXxaXUXzxXPC zwWUAfiFV?^PN4=EVW!Ph^G$M$<4)JLOKBS;-&K$agvYlmS1B!p3b4m=vD2`LOi3>??3*$c6($DAY-fyABP}6 zN{61xNKeM@wRQe?pM9CfY+kl4+9 zzJ9_GOJ-_|rF*SENaqW!M8_W}PA+4&#RNt$G<)_u{g95-OY(Hoj=;X3p`Njs2pm7m zqjrBG8BS)zEZj{7nMZeSmm_@@+=J0wxoMqrQGAJK`1dNEr>)PqUmC7Cb9ub4B@f-E zK_@+8N56b&Twoz64XN!VrwqvS7(?~tcQ0myJ~qZK9-5zLuw@9{Uw!GDAom?MOn-w< z;{h5C80@?=8pJREEIQ<>Jo3z=nBJnLLp|B5oz_O@BV!-$?MUzu|JU61RSge6>1qxl zXvav&pl6_=FWw0QviMS88{O%`D=juUbom#5_LGd~&o`fa`q>yDFLWlxjT^Va-MFYW z!H|~zzn?r_y!=iwU09;ptVjoA;I1z8EQ2Y2n4>uG*usQ3)ICWT^PDZbMtT5Ev{$^a zkl`?OciK0cjnNq0{^SQgjDMe}*PmzbJ5VTO8UxfOE?vA_-@Ck~uU@-ah?9S^`TM{ByULtV)8^BUKiPcv zqaSQO`2G*V)gBzy*urCV zTwz=C5RL|nx8%rT$eu!@&0*Y?1K0iOB)W#donaQ8jWKzz|N0+(n_+qBVuxFvJbh_% z;moCYez>qNXGYeq6e8+qVObuer%z;nwKME~M&1OZy40@I*W^i9Qeltu#dIM$n#a&@ z!nhr0tgr9l0d5iN=B>iz<>7rGeJf7)zRqWPzPH%m=>*=(Z(q7NFz)3!jt<)KgqMqU z;2FnpTyS`$vAy#Q?%y-U(qY#2YBSn08i;A`U{*XkTDv8mVtqf47UX(DUM06=25tPl zXq@(>omtAVn64cPUBk;-zrZ*>J7)S_I0zwBpVU8NkfZ6kdxe0Z-|S+b`;BAq&H>Q- z)0Jca{phd9h1LT5aIz)D41T{(C+gqlJb+q?yieQV#aL~jm)+)cG5mSV(@Qg6q!hy8 zRlBTS*5@BN)*@=UX`Uv3&JcLqE(VJiV;Jwy zYFO58k?RjBeOjRE_*p(t0!UX4eX3x+shF@&#ZEi#15$8#_E2&OSmDyhAm)#1KM!J1@- z2$*Y7qD;zCV0L*DD7C0!NzasNn>MS#7+XpMKQ(>x#$=>{oXK%Gc~-Snos=CF4T=pc z)n_KC(p3Hm*W$Vi6n%mSXMCK$MSxp}Y+_VvY3b1AlPABzM%Y&w{zK;zw8E&cH9t;5~Kex`74S!)&Enb=sA2 zmkhws`$eaeJN%^eEU<&RES|$LnIH?3r~D;dUkwu8m2bK?E6XWMxD1R8_Ha-a?V{G$ zWo7%8U@bmO_R8PZ6aC|jZ9&6|UZ-6aex(`w(4xl^euWJAOunt$_Y!}myslZFxux|Dl`1<`E9cV2R+<{KUtaj<0(HpP<@?ku;mGQ3?CrA;Ho=5O#d-7>y?}elqTGUhl-v4 zpfa?PxsM0wj5iGc#m^)Gi#NejkEy?eTiyn9&SW6_qu;}^yfem_3YN5Y1IMpQYWuxI z-4((`pBir7!EZ}T{E~k1s^-AME3&c-?L#M%AhI*%2V>|FoZ$ce^gIw?`@|PLyf8X& z;PC*iVtP<>HU?0Rfq!)kch8Vx=+ph^Na?rroAF6$@avU@7RGh_1FwHWg$z^fz+p6- zK5F`s=u2i8v(Q0V8CTY#fVS1MK@QczCD_wn^qjs}M!^!GS5=o(muqr9`X<=xnKY7> zjspRjY6}&&RIPI3X>}SrhA+VhD72PtAPZ!BTfbf}+{jhw+?J4@8t)o%UuMszT=&ZK zBsi14zr0o8a5oPvF`q$Vl%jj@6`%HWVJ7xxU_9H_|9cr|Ojb=khHmf)i9;EQKD>Os z!E@>J$a+@X*(W9!m4A#kSH_nEd8?@(gAQJr;H$HVh%K9Yn&iEDX@Z{jTSjVw`4=5z zZxXS$2@}HrLq?XH4AbKJ=6%(>M;%_tyYgTjQNx7z@}dbCet|Xg!;L0r5AHtQJc_NS zPL;Mn{gJkgzs$(KKcmt0-+z*kCr_}q&u;$eKmTg;qn~^~gW$8xwYI9C&cM$T{cgtb zuRr@b!%vfg=(IP()_4)+U5y#2aUk5_dH2TS4Av%=mE9PeQAE2q+ys*+HV;2KL5L5N zPZOp&g!E9JQ-v1!>_%JVpWF#X9!}Lg&KAE@Y~4l#O}H36$QFav4nIeJ$ZzjjZrFU9 zN8UHWCpe%v7_q{*W`J|s@&-E<&BvMu;<+gp5+OQ()@xLJov zCie`HXny~j+j-GuSb|45Gq?z`vF8x)_ho1@79t-e-ACd%Z(uU-nReO#WmugX2DJ{0 zZF2kfzx?gy{f~;xT=wy@&8eE zo=tWo$$38Qy}Rwu4L?|n*&Xh3#2_V#3`!pqp}(Qf7yUGSRD>ugB!(2Cg0T#8$>r{l zYsO*V8fc)syWO|Fp6AVTLGl89?=fYvva+(O-pb0-&olMC`P=7AID5^?r^p-$jEtgv z1h7e~??*hlV9{+uBW}6oIr26z0;iB~fo=idTU?k70Nf2a;T7By5ShTIDmqGcBkE>d z{|o`R2+9$J^jW|lz2RYasZ~39Xc0e*b)ao50H9zMV8Z13V|Gw%rfzQm-De(t(#$;g zoPtom%r60TyL)VG#%k8GL8I4y5JyCX}@OT z>=^>KV`b-(u=@KRiA6B}yq6ba#XrP>pjxt5A7Lp756fME8Y1jDfbkG*Xw+RQ}~vM@*mc9HKr7SR^i^`fp@=qvydxacq!+35qRwp~OsE&;Zw3@wjg6>YeDLfdo% zrWZT&xJWu^-s48<>E-?S(>_M$3PS(vZ~iXUzWe$JCa)J8uLH&o!Lu7ccwJLz4XnlJ zIoIdsSj1yh?;&k7@W?TO?m#^7aCCGXw;ae9^|^w32*GD9F}2EEx!Yj@5EnCag4?(= z()g@{wdEK9-^4sgoQa`tSeUpTG-b!ZtQ0KANe+i= z@PRKNccr}0jHnkadewIa?4EhU&YRCaegg2R6+ELDXcNekQ36Pp>{8dIgt~MBeJB4s z2VxRXNHED=EbglptU5$r&qUfi*ibhofU@*qx@}1BChrBEs7rxe7xM-A%Ky3Z33VGl zll>3G(Cam0=!x1<2_FUUksYmR1^it&5g^s-RrgSn%YvD-L3G#{?=tp;wtUok@JH}W zaLsG`C}m9RSPyb^r&073d8m(b(6D8YUdpGu2(V{y8M(G>E{J$I=ot&pE|$0$;-ay@ zYTRz{USM0-P?smX1B7!7r7H#(z-8AQdRTq#L6FMa1PXr|*%7Qh8hj5C7HgWf5Gk*F#6eDE{d8zrbw_F4$wj-(-$ljTPN~znm$}#)9*NRS76mJg4u^G%E_l0+PZq#G79qR zcYb+!je6z4al$B%J6oF^`isjrcsYd&4*6t1sO*@Yu9x1jIM>4x{(v1dE+}aM?O@RU zJ=W2rb?46A?3QxAJxQBUFYRy;vAk6``O%;BA&dX+LYQH}`!W9CYY}1xfR{dnFuS`4 z5PTeKVi#NlZq%{_GE48X2Hv$AOW)aqQH?7AzuonSmif4at^ z*)BR%AKY4jyK7N-(YLqmkmk@C?>Hl7?|`wByah0~Eo*h{ZXFLP&Nvi)p>n7j`d17T z#+i4WBmc^@W1tD-XoT_GDGPxv#%ts-$qq^_@(-~}x6Hc0IrYF?=uKOsOqR=?L6h*x z`IYy!SNj94%LT}-KX+8vj?@u8^P?J90Y59X&3ORxIjgxztvZX2QBJHepA_I3tnF0Hm} z)7;r*Hvo%=F2HF#VLv!QKYWaZyE}Wk$1LcQ|K!wS%1n=la-}2lSO4HOaj$`)ornrp zU@O2jTcI5YL23PFk_(MYHU)$n%tS(+S-~_X!P$1q!x}UY&IvyjAUD_KWuA&FgS|{5 z{dF+c7M$8A4U(mDDjlNR0_ePWT7OiY!|MWK$&)lxZ~&VZJLy);30$p+7rJ>I3GO?ZN)aMETf&@Auz@kuW{=%-nws`rKs z?5Av7pX7(tV!s>#TGF-zZKyv&*|zy2hyx9uP$#@IyZ(-fgp(BBnuB%guOX@;{I%|@ znDRq#DaA+}!;`pa`q1*qrw5&vwJFl(KD3%wzIR+=xQ znP2JhW1CL-+V|GGU>5w3|90M)p5kG+8_Q}5$;|Q(a-aIkbe|9*LfVLwq}7!bcQ9q- zI(hq2I*Kno6O1>ed8q?)*UyK$ECA)9vTK^{FW1qT6@#0iuPloe)B zx{YkY58Jx3P?Qivx-7Ky!(hmEnRnqMc}O4Wm$i4ONM4RlOuPC%>Qovjm$jVd{@VVP z2WcY}^^J5S6&C>D+l^sDdzIDPELU6-NPF_Rx`2N8XDK4zzVbcceU=<76THks z7_bQVHnL}_4&C@iJzR!|PrA)rnZpkIrJSJY+ zCHdtYOmoYcp9he5UndU9cj(ar@l*z2^1T-!+?nvZlSTs4rH)2PPI>Ci`z+y<@U^5UXkz zOTPZ=ziPhtxBnW~6j+a99e7Ha<^3{N?@ne;al0^vyM=*aq=;d&6KxNJ(+cr?k3*%H zY<00J8fC($9>#|=1+Pe3zB$2D|8)T;vQU9!JKi#k?Nu3Z zQh9|1)BF7sEZ=y*7{r4S1>g=@fH>#9;FA0wg~t!@!@RM(Oa5etJn9#{3m+$0c$n21 z82)G}dxAx^)=qaH<0~D)UNJE)8MpPxa^aD%ZaDYGMU3_3*@U=x;xt3!-BX2 z2<{(BV;95=KL0UIhT9069cELo^$*5y7C%G=Jl!YAxLCPRI1HSzAMstis^}qZ# zcSy@NM=F3s>d100$OIYSPN1`Qj`cGZck55q8KNR%0@=2ACYW2V-=fzT7tx;N>zI@C z$X>O@FoCg#yA@O$>6hDjw-DE-06G&a5Rm^hlh7gDo(y0GJ-|Vq8iiZ~XzRX3;6=-5 zb?g9X4Zt6nudXVcVf$j21c0mVIt1S=lRJ(l*Y7rG+Z!wfK?9TSQ2^bkwU1KgX(WWZ zIDQA9ja4r)d?jE{S>&=I{ib^ss0ivh^>du&?=(Tzp$-0X7m>=`mZraSuwfbBh8Z0$zZsYXtb zbK8nQw`Ub7x4MZqrZ3QnTr2pUor7iz0CFC8B#@qRsaw>Kf>J~x+;Q`lKl_qrxXyt` z?kEtfOno3{(yWfiyq!eU$&*?vtMiea2^KKaXSxz`4B;+6cMF{&7XoKFO99?!scc0QeM7L44D*!+xHJ}t>wSuj-Pmj)f>q{%=j5lN~AHi%F60OG` z?Mn;xTXbx!Mv+T0_-A=Lp34Es**&Uji3@@ggYbASoi(h2Q<3gqYn$LVUNqPHYeHZ7J?#u){J<%nF;{pEZo2z zdLI);@Z7^Zd&F5}x5f<%N{8Gl>$Zn2{>B)G_)dT1x2V3O?^v8ehv{l%n;l1SO@e$N z4)EIDM)VcXEVwQpt=?T-dw{lM=NEm=+c&SWi^BGCu#bB+>gfl+^Ktq!cRNr`#6!PP z7lVw60s>Qz3w0G;O%`vEhf`!;t8B+=%30a~Vmizr#K-U6XN)Eo?f6b$(>6Cu{preM z6+QUVpZ+CjT(C=HnMJ@^_(b~D08&7$zai=Za(&5{ORUp2VeR6pLY>w9G$QT zua(&&bgF&1?nJV40^RAjrFXuc{!#bEM0UYL{d9!hWH~ds#s1p^FFf+UGWncPmo8{HKGIc*JW~+VJ&&J! z8siEVZej>x-tc4VJ^V*b9Sb=I8iF@xYL4`14@gktG zQ3bOIhw3*5|6EBD5OZMTJB6#I+mSzoHuVpAdyQi7S<2PVGa;-L%jQG)V;H~dr@i$? zS_vy(eMuPy?6Fb)S2!@O=0nln%fYPG06uF)5&>pMV{q`SBWo zLGrmbP2S66Bj!3KtFQ{0`B7fqD}+^-hHVFyyz_D8$3_36Rz*g9r`#kwdDG;QRi$IT z5>zS3d&>rye3UOPB0C1FFv&+Si+2IG2oCf7f(7t^E7L5HLtc@hwD`vVCKHB2TbpnF zP!1Yx(n%@=w%j!8fIpdWSRN9H{L35ib@V5&U`*wLdPw?7!>97kx+%cHyov{53dOU1 zm=$4^P4ncwetlQ@1f0tY>&#@K#5j>P%gcTE#jCt;Ws7SUMgnR|eX)@+mIF#m-!hqA zs5SpQk%%HCK}pD9=3yODZebe}tepl4#53E569biv?TR4zHC~&pvX}N&{ESSd(ur%{ z@-T!_2L4H^QdW6e20BjYC`siWd9HV+p9~Yp_~5H*w%k z08Q(<1v1-=D|`E#@LdWmhi!*z>rMR{8R@_CS2{?Gcvuarkx;DLO#uQ9C> zB;pz!ZZk0G*GWjWO{QN06t-SX75)T(W&qBJj(eU$tJY4%bJC`KQdMb1+FKq_4APNf zrV}|=E-a6<@hkm({o+L1H2V3vK;?wpcuJdcZC<9BYkn<MUB!eU(zL@R22Nt=nlvlh2$+YILQJAc7M#4X;#S|c;Lj0+&GdYGgo9Bsw4 z2bu6-TFns)XU#HhK5j`%ALsg`9$}K$1&F)Bpk<77-oAU&Jir&Y@_)Fs3%JAtiV0;i zjE`M-=L9_dA&rMigaD$m%%JIaqC1eOaklz)R(O`m!Gg&42z?ZZiLY%I?%V#5F}Dqm zXu-^m(qsIvzr(upyV zpb!wRFK;!rp+)li3=1|7Kh@2N;L<5FHc3B!7Z7r8rogjnCa{9!3(E`85N(5d4g8cM z|I_Ph&@$4@V>LRs3k}ee@vUv6I1E4NqsU8{@-RQiDeJ7_S3pq0S{*ihb;Sr1*3t29 z^YhpEr~dj4z|P%feu=}kXg|Y%M#`yUX2&Cw05)B~!7&zPdb=Ei3OMHwym&a&9B=Qi z2;!Iy4xmFO0Bk1NqCAMq49@`MGFQ7iL6#`LfY20< zNxc5cZ;7jHNt=`2>#JjMdGecIQ)N)*^5sv_0Qj|ZWx|UE?Ef!}6RIQ$ffS3jxSlxq^W{7#<+f z{s9_qkumkqFm76|hmN70_M@JsP*=32!7(iC09(#>IlK?QxY5C;El7TiOC~1#S;PWt z>H)G{93nHFweb7;?4ap9{g~YMfBf`GGfEj;>b512_~Q^x1E}e%3sH{We?jJ)lt;ae-BysHe2Tjt)cb_)fh#PC1XDX$;`T zo?^ts73xl4R%>DxHe8?(NYcvPPjmo0&qX^P^S#!b8IP%F+kfgMPCGyz;39tlVUc&@ z2|7BS3%HHkc%KDbXtUj&qW`qWJjFuV@sn2H{tCoK2B~v(w*8Klus!-7m~^c>L3@9tJ9?MCz&=gC!~yi-i!$KCi1#iGs@F*fI?-?Bp}Paq(3OyKq4l@p zC7S<-16EzM7#KVY_$%)$%N+Hp4(!pNdlrQ*e_V|2(f3%_>V2|FohZK_=Bmh=GUmeS zTlJ9%cxsIT-MOb||2E zMw{%gFt$a%dkkQ|w4y5z4lX2qj2kHU3=4Y)^!qNj2;^%$eSjZncfXNr`t2I4sZQVv z0Nwyr5x$>e7ts2hyX=~HPM^kstjOaq&;I6TKPRH?YO?v_CqHS%kikcf?gMn5p!Yl^ zRlpWw0_d>9-fg|5ZrKfm8xh;uG2;jq76eN@4@DsHlykQOu-ICe386xWOLaPI)CanjW4+IJolYF=^M zq(AF@7QJZi%AN~u`pMp>97~kj14?t4C}nhhqV@4Hmg@!JmRlFwq+$SX`n1dFID_vmMLJ*$GW<-5F)uO8lM{aWT>1V=*v(-=iwr_nFV=p)B;d!7l=>7-q2oi9BD zCzQp$l=#YI79Wsf+q%1KtfvbWOWe&@LPS24L0vZrAUp`41diP)VV$T)r`Xwa!9vrS zi(b$=#Ue=BG<6VAp0LiN23dJfFL=162g%xgY`50a1i$(L_Zdkb=PWjp!O6)XJBRd( zuaO9G(aX*!l_?K^twkmZ11-i=e>g7iMtaZLRqVotixpanJ9cpK#zg_sx1}i8g58eI z?Uya@0CjA?ySQ|RMGni$xQ%{9Jxb!yNB(x9{rr^OV2pt?Z{e-9JGYzC)tiH~05?qB z^dqhUX7rGT?-smL{3=jBDou!T<^2|bJmFEIhT$dgOp7p1)+rbd`0vD>VmM)Qpyni* zLb_G2fMN4fSpph5_;5l8lQPkBfFRglo*6_EmUL9+0nB}$e0bz!*9oh12*!{H&6hlD z;Nkn^M_Qq)4Od~wpO0?{6n@A&x^^B^`DA=1;grFOsWd8%EDsPM^L>}j&G^QZc6pPe za$R5-WlcQd$hYu_unMAS@JnRgkp}4s{D(fCInhsC;u*xa{x@4Y##laDcP3+4-QMW` z72ah5A@mb7yfoda*W{CYrPqw0-a#fIl1F|kp6SNoQ@^{8ll_oqO@I|!iLu_LkvPbd z^5bKeP`|l>zT{;Apu^XR%eRXVUh^sg)0EQ$m~f%Y4eP`u{PM>4VRZW|VNA?Y&M+sb zNmhLu z6T(JD^PCn0QL;gLEDzP~4RI1FJTa}L8M=Jk)_PRQwij9@f_f-_ z%3K>(TD_p~W~+RqzAcYwDnk@6U{ggPEhDNQm?Y2+t4&BRO1y1<<}W+b?o%Gp$;Sys zYMFAAaN4PSBvk8Fev)8;w)FTb`e>(nR~$;a}=%e+&Ea;KgnzH%bymDNxK z^Ko0OlqsrVfR7Oh!G%7ZK1!Q34W3q=a+$PEn#d`LiD=U%)ebYNZp}NcHsqW6Lx=Tm z{D8<3Z`!02u-o)xs>unQ$P2E+e|3`b&9m@?t7^-JGnxZ#Uo$a?KqnDiAO|Mnt@$CN z*snD#8vjqGaq7s^>xNrf+Hsv1-g(juB75jQ-pPKqCUb^Vu0LAi1 zNUDYFEJ6~o792?hep=qhhpBp>1#j9e{D^)`L8Q%i)lY{9wu_do0B7W+oG@f2BX|dZ zkiH|-@{+VO8DhJ%&n3C}ioJ^y)2=@V4q**yzW%1&$xG?5Os3B*<3RZI44=0q0g#w}A&o|U z@!@&;SOXa*X`2a@*?uCJ)@cZ!YWSrbQk!~{h6HZEU`+a0!fE8O2hh8Fgx^DfRCqql zpnQS}d2i&RS)H3=64k?h^eBMI6gr7X(&Z>ycG*sfm9&1eZv}2i%L6P2m}rk;v8WH; zN&0>V^D}IHW(ZH_#XTFF^67zj##>-=<8RytS!rDFbLgdhsAb*l_!xjhKfmg_ST!X*fJgwJF4kwYLxm!SeEnTAiDFx@GPD2U z8Iz6M=FYvyhsd1+Uc|B({WAn$JTQWO!1DLO@=WvB-@gaI$DcKo zR})KQO5F_IF7DVLWgD}`NIbKx^OE|7r&ytPsCUB(@;IN`1Kc~vf($e>xf3|E47QIU zw#Y~C!tg=l$Vv2k2Oz*UC=V&4eZV#Kc(Ma{Lbz z;Tm8tu1o-GhM-;JEWzkeK!HIlQZJ5NC{d>9ht&V1lW})X{fwE_625oIJBu6CTmLZ9 z9`$idJ6J--9n)Hi%8myy=ip`X_6!ae2~0;fS1wKvns$Um9)Oy9U*GNW^b#|JHA@@{c-p#aaGa?gY{n2K z`rC1m6;L~4celUlhjGBjj{qx=1p{f%S{%;<=$+y^X$v4)pv1#5_W%^{EY8GgTA;@+ z=nPBdbL6oHjan~ejE;QO)_xBjEEgIAbvf&^NRVfT}6rmqo!h^C{>di+y?s83tPlfB`C4$8#kGRzXsIs< zI7|Xswp(A*@KV5PjD80#$F3yt5#4lXyyw_YYw)tpq{-Q2t@g-}bt||`CG*Ea3!z*% zaO2tqi8B_o>@(8uRMRZJ3pDX@u|ogS`cpTJBlKezc@2=ndo5TUV`x3;C$bwrpY};U zx_zoeBilT?ApxIszogq3M}GoT0*kt#(t36R5KJIhIU0!!9Ry5Nw;9&D7#{3I$Eo+6 z8<>yab`Cv;hJe0=^^jHjx{M>BQBbnenFQF~0(iq$GnTWm#Kq~`0mhZ~CCKXnyTr6` zEj?~IY+JN>bfPlp4iEuHK~;AiFPk@SH*n!Fowl^Pc9%YbML%R>!ajO= zg)$B|Uw!>8^(wGe<4pC47R}BxaxMV*3bGEj3nrH5)ywD2H_yI8mIXwwu_oW;K+Xpd zj|s*Vp26WF`xXm69un$!S+HFBo15#zYFhoEYmn3f+&st1R!ik6cIQmeZ|GCoeiccL zb-n=g@H{^DSJxwwj*D)n=cgb4h!BGSjHC}A9NWzRC?Bws#GMwq=z94gkf}Qnt+!99 zU-k2a?hXlm4!tvg#MXg&SFqf=R|b@~moHwk*szX|IoV!^ug|j(_VlAi>BFXQ+w-em z{f05%eq?QoGKjxTbLjE{V;*-t#9bUb)q-7l9v^hU3SgXa>EcT8*aZuHVCys7g(HDf z@sBauy@h+f^&TCJML9gS?rk6HN%i_VJ7~5y-#5Fof5&~XJf|FY?%WA4<%i>1ciYUc zC>d98^oKF%fKT#ghlT1x^iUTdzso|6i=k`l_oxGRkKhAbSwiNO(d@KCo~?}jcqlxB z9v9?nuhC`5yLIW}&NM(hO%7uL7EaJZ%A9mM7geroJJyfmFWX8Mme5;{vBv1H=2?_* zOm&W~9l_n&;SB~J3CH*X8MVFGPCQH+p-$Vet$XmO`t6jl?KwM2_78C-249tbt@R1Y z*vc_J{Q!OO?ApVm)1^OfVPF&x-rWR_&mEK1{7-9e&aa`JnvM%vE^HjL_%x$$bHbl7 z_Os7&7eeXR8W%w#G_q(1Kh(qWP&w55LtL`J>tR={Yc#|p7-3aGDzvb>5<>&2s8#li z2vXoo%JCbXY33~h9GK$dHnSmDyK_UaR z8NnQ&g_DFuCGK+MyBA;0fuPbrmV^3}G1gM~eKmV1qp6N%)Mk3SLN4fe*a1 z+~$|xaEoW;PGgL)cP+UdDrHfbiHo# zkpfarT+_0==ASYX-EG5t;It;;)(bp0oONb`DYH3ACk;b+2(x@dIE1uZ?MN?wk!It; zn-4rEuGdM6Jmj6_lQ#LsuW4E%J{63p`Xi3-Qzj`O?*t%Uo(1Tz{@qR{ujF57s#J`T zKN?}^LV3{jnrp>1tAAQf2kd-m_XBZFE1^x>@=LFk=D?(-*;j3-l*23~L;9$9Nl2+k zEAe@q07il*f<>tt^YR%V{o&_Qe&x9|E8|WC;6YxRk>%m@;Y(h@yYSw){Q;D9X(a2` z@RXCZt#f%AS+vfnlhiHm0$g$zSxB84H?iBaXcIo^KjJQcNj>FT0i%58m+LCOI-x*K z9;@+{B?=S^O!MdZLz1Q?-v|{N$xJKEltp>=UF1mFkw;++@0AbJyp;KYd5u z^;{lspG=KkfPkP-7Ou>j^s0S&ozOh>!*h8BU3OGqhFO}8{FM&NX4=YwKbDXCK5e{B zd}W~8e0y&`p^-E!TXdxf@Mj+KBG;x$D*Qy|cxH83*05K(HEiXcv?`+X@l^U8)WOF} zn*3+8TF?9Sde@O z&*PMfL2~$HS7t{rodw4A9l_|(ly&PbyRlMLwk_qh z&9|k)SZ15jF(?o?4zJ0xZ&4pBXyf{>Ekm_KVwtvpG<|Wg0-EW=<7&k?txhSr>Hp~W z(tqN^@(P)BuH`w;sHBp7h;RD!uVh4!M2ncny1I+d z4mOf*uHgkFrQIZ^x{-g;apdm;U)q_xviz-%J&u3}Jl!uS`LJA-A;?w_X`H}LfTOnS9fyf4Qx|kGsamXptSuXkh-Ve)(cKXeWu0c59Nvvu%WDm45Ou zFF$4$nXY=Z+z})aRX@ZcH9^g%bUf=(w_VgWCZQMnmTO zCtpa*LA%>TFR-E-1sKqMla|c(Nmt};nU3}kvD`dtR@axAd;t0?pMXP~r4!3rina`G+@BZfJ&BE$RGqpU1n+q&`b^#w~TUxuG zVWqEC^gMoA6_EoDoyr0r9hE?bTkOxz0U-F@#$t8%R6v`_3FW?2MyV^UF2?|obk&lo zqim{O^^I0lib`K>oSX|f2yQ$8B>(OWVBP^1`qZo2a-9?@n*v1UBmXBUB7=nxpUZk59U(M$N^LZ^!=7d`z^ zV?{-Mn&BV|wujpX%?Wkpxl*>tY5Z5q({cReEBjiKUhHiGCbz)2=>m>edFkuWOuiyZ)UK#&1k=qNM#Bo_?J{FC~dSboIhAD23~S-D{nZI~jcR(QYZT?qIBXxD7qfi!0Ir+YI$G%f z#|-0tbqFBEG_}fH{^F0xAFI#3x1^yTan3e?R_j}Nl#c@SSLiJ3&$=4|(AAxU{bBsq zBP5nn|IS)ms|&m2tFE{N^t%EGAxZL^0IYe7TL=Am4>f0$=Lk#9d-nym7^`xQ*gmqm z##Z!sK*iVZs3+o0pp$k1!UaN30qdpRMXuThiUn@Vr3s6a?C#MYwBU@Y%0p}IIklv}z@k6aqm*C3Pn~{u zeT4%9aot1OT40`>?!~fJ!0hz&J$ie*`RL)j)Sn=b7RZ95x?FkBPLubzZOM)l7Dj*e z^S=YE!Tk!0Wdc3Ri%VD%V=+oteL)L^3GisKsq29$^r~rECiSX)f^9>WO|jO54+37( zq&LZde}YtJ076TPYw(>hB6Y1U*Gk>~vWs=?IBAYjm|ZOKDRu$S@c9}c)XF+zoh-Ve z`{DJt{gN7rT@1>OAlK!PZ4_XYYuk*nBGB45{*cn|o0aTc zLs|8qtnr03k^?%_`05zJLdz)tHRzcK&@M122&MI_ePVoT^GsbX*f|LZ`1~1u(y8w( zZXp{QeT-06Eq(1T1um&ZXgEb)bk6~Q$!h{&Q#rPu*D~AoQL@D-jkasv2GCddn>XkU z(x6Cy=yRzH<-$cq0cm|f(g`rVH#^=oTQ1 zziSxkVwd&cpO)d)_c3l|T)eRl)a8c~z?zGthkh;#ISV zo46l+_DS;=ql8x*FCtsEPyLDuiAo}G5V+*a9 zhe_K76m{MlGPr){eq8Ul*fR@Hb=4>B)3oIi0Dl*TwgI?3H$!(hH$4qC^1uRVFoX7-!euISc+Z4#e6UFnxiA85bH(*nJSU zd$ce0#WBVX)~&_@ldV;_hbIg6yK}(!6^i^8tD?`eub$>w`J@no*a^ZrDbC{D8c7oaG!gm)O1`ZW0 zs)RVTQ{SR_?M22CoT;5EJ{`05ytQe9~F_j zj2r7^`OS6G@f$jFuNjH&5}#-Wsf%#;*V2vWgm&)z zHB9jx`bbDxEVDleXX3_BSUWPFTC|jxG0+dNdrvDy>#)I z@a9E1BOAW+b)~7CJLs!27~alQ?d6B(k=OQ`beM|ql)U?TA+MBi=<4SeD@<|?O)b~q zRoX0cTLx)ozV#ta2sJJ948{H!fd5Y9QeWX*cxAQ$8<=Lf?K*~eqS$wUw8)S>l19~A@raQ!&rFB#{RKI-MjIzNu? z?_K~x0Mr6l43M`#rjr=AfjT)K$EwbHvFHNcS6JbuO^}|}Q#l}zNrsKgNw;#T`-}k& z|C?bew?Nz>pw9#rRFh2Hb^D-Ij6AhZo?Bkdp@1`Y*P8Xm58=THR(M!Vlc6;0M_2G; z7C>}vsgo_pr&v=itdJ9B-Uoc$$1kQd9b%pS@lSpj3%Im3tzhf z*MqZbEV{6QV$!YEmTn0s5`Q&`wjUCB3RvTq1;LN?krQ?Kh@=O2Q(m~!<-;l4#MMn; z%~{O2#T42y&)mD?WtK4GFY&4jZ7AYNeM z;oV+YMJuiXTux#wzm`i{rjG-%PXGW30PbvJ;Y2z|XO~!&wwArftum|I8TG1Gq9cID z>hr#i>JWd5`roC^x-eji()v~&3VPJ}638Jk;pAPv_%up*1?Yv(akq?rqeVM9W&zp- z9%rz&zqd4vZ*;&}wS+}v0vTOE-0KTmI#7-&T)hbP4k3?wo7AnwG30Tug@4f3TP%$3r0|m-( zM|u$C_=-NiVSBq4lEYe-jm%a$8eY-XZkVtR&*AO>c~MWhc%%!P0otT>qA%fv&;JPU zSxay%SSOJCqiqhjeAkS3?h$~pBirFO3>AzPybef5UEH!LhM*+>8VlJLkPvvo1q&eS z&FTJD(jdFkJ;a0M(^m0^f6ZJ{!229l8k1K)vXnpu;frJTystQJ*7PA<{N+ z?~Qt337R_cklzvXkk;A+fro?5oktIvBew0MoJeQ5Sy{t!`}J$AaB)v%y}~2iV5l<> zumDyMzI?mgeEsr0z`|hjgmVezXQ%KBOCIoE7bYvXqY_9tVTX&r@&Y<)44&)*gpCW- zxyV30M4;h@2WZ;9j{t}CXRL11viTUR=1t1*=_gOoHM`9SX$wf6hZmL~sX@ke05`PO zKE?umi5(5<>h-zSO7j3+rmL-cYj>m9<&_rB>YC@zpW~us8TS$Td6s7M zKZn1mm-Ndms5eIY9)cGqEHtP`bo(VgGLoQu2owsi`6Rj!TJ2+~cG6QLDD4FX2SlWe z3W5jJEj8qUiX2ErPL}o+G?F8KhZ2Rkvo-0Ge6>$1<2Q3hXg5gnAc~ zSv;VxxX~C7dap4Qlt(x4)7@19mL8<4m8o)lii}Xf%`|`NzA8Izh3MB@P|%W8y5#3B zeg5-jFS01|3ZPtG-a)?Skw?e%T12ZuY_A$0+3yl00KE&4E?927e%(Akm&H{GaXkF- zgnItnAOCJZ&;R*<|02NQCr>_VKKjAa=H8>n0rXD^XFZRgM_~rQIEikZaG*`0V3H&3CU}H*a=zQKnqbrzsQE z>m`7#G#_Fue!@6plJP|cHx|~p{>O)zw9XqZ$Cah_=i_0-i4_%`x4$rS!S7+I_<*V-;*XiQLK3&V^RSqV#+&k2} zVUubpMBPR_3dwIcen{2$`aPuV>wv+h<-pBb_bi|O%Fuh$BNv;H5UQ)bt`Av z=s}O5vOMj#cY$zqr#m9Vztp{sH;-{o15u$vxt(Ur;doI44;K>Whw(d4_|3QPVx=F~ zXvm8$UvzDwju^xhmH#}PRzneGNPpliTDXV=`YFWDG>S+*3d9OyMW_*zQhgs~+sW*jWwj(!KfSRTeDgao;B3?&6H))V&Yl zi+Vtxk&a`;>?$IT=Y>phu(>+UIb7l(JL)uf-4J>L6~Yo_UslUvC% zfaOR6Jcn^M*1V9pyfbeH^F}xQnmAJy(k%d^Uk*DA5;ZzxRQlW)FTC)V|0)dg&Wn}= z>1sdaZ<^=it#Ac|=Y4=`P2NK0rd5?U>~%r$O-n92n2>jflL*-cAX&*WS_b1%8x zN4(%?04U4ks7S|<@{Q-=tA|?}Mj$wK#Ba($!*`%yaN-J_$IU=09HrE@fy5!%)UmRX z^2kFY63%v}%uv_iL*%K_?bB);Gpb3MiB)`A5^H((=NqDe!F49oiNS;^ZPV%|YNI8)Ylq3A=cr0I2K53&)9c+aV z0@{(dI0IzFGwEhXQ)iT11 z=+F|rq>;z8%~-JVq~zZW4a);R=HstPD@V!9>paMN=_w+uSN14CjQ@N>6$&$f<9Q;@E+~P23Y% zk7Q7JnwEORK$MwqhR?0^w~Kn_l-(+po5iOO{ZR6Vx~uvtNCc ziL5?bl}Q1$3qbL;Cy$$14mExA%U=ah(AxMC{yBM{U=nvprHnAa9cI!vIyncB0&vL$ zNh?5oe!E*lGN(Df?*Mlq8_bEU})P zc*#E}_f8JAdNMC3>!$!VE*4}L7mpq1X~izkrL~@Za!tdwYdr~m%`$NmD0hOXg(=*J zFJSF#wVq-TMnCk*h1RypmI(;(+UA|eUhyk&WvNA9LVkXHEPVrmv~|iQh@hpDZP~C| zSqUO|P+Ql<2NG7hE#gk-&NRH40}LRZx>GCQYNxa<#>+SDB8+?;qpn7f;~ocvod6oX z`sQUoqcN=Q&zOv#vnVmmfv-;5<&$(gtlv}L*o)geTp?aRX`cB+U1(YTE)gX?&53*Kl}&yf(M8~J~Eez zj8RnR8eClEmxK5yoBq{@0Aoic7n<_}c9cwGQBFOp$8^aeP(D1xVUOgk{Mgo%b*=nB z3Xwt5CvDr%87@$iPt--^+eJbFYr|>{?O4Md6}EW+53R)aj?S?B^bj+w9pSM%9+ZD~ zV_ni#OW#5_?N_xmKa2IC0L$y`W)HUw?u3~jtqUwAQ^g3N^8vxxu8Rl66Yvm7I%Z)| za4#0glu>`&^SEJnySW+AeGALW8J^u=Uu#~!dCRlXtH*dtfF4!aXh`kddSF!vIMx+Np0|Z@g{({x{#m-IbQ8v&h>a7M2SDRMRZ< zPV#IH3s?`Dt^+d*upDQx4T2*-%A}T{0-j}9QNVH*L8#xj593$xvu*0p6T{4Z1l+ZH z9+DqvWYA0*G{Ui79m^=cf}&pAtdtk!eUthg$C6yo@eo%Ff>)*?sP1UYU0|(5hQ;%Hc0T;)|MI7dA1<5!^dJ92^8F7 z_^~*L<`Z^eb$i>yW1$n^ut&R}rT^5rb$)r3ri^cYcB0w;os*siHvagtC(ZHS{tEr> zILmP#x%5v^{WTRMKTpB&;(8JeU{v_iLSxHC=}o<=taT)K#2 z-)s3{Qow)f*HQ2hV7h7A2DH9bH+Vp&e(`k~Bz=BHDC;b{TIKp4f%J`&;T( zD|ZG40jA~e-MbH=?>ajc%ts4t%kkawmyrVx#J#yj?;^YEddG6QojZ3ooY)Dhs&fz= zXK0PgP7&N{3An4|mAR#rwTub0Qg>{xi#N+BFh4WR`7Q?>jtT$A0c_Q!1JqF##Ne|F zZoBMQnZZij5z-(!@d6zy0Ixnd!NOhe+4MXI!SfCv>yE}eSzbc zP)9I$LRKuwEVBT$72YXJKFb^r9y{ilnPrCo5BGM-gETyF)I_py11-wBx?B7(b1i7J ze>%H#Tn4SkN8I3v_ZTx187Fs24V8$NP7)Fqz+0hr!e@tXFiPVksq#ld@`P{-i+1DJ z_{OVW!PDBF>pLvZ@q3no(MYEz;S|a={U>9*mJZG81>*aIFJH(sI|}oMrUDQQZ=8nv^@SZjL!goB4=((51nQMkl;0n!TS0z z$+q5|pxY8tUVr`1ug@%^ylTrWkXv+`W}g?NNf6`IAMfCM>I&Mdi@tT7bsQSgZlv0Q z9}_xAGj66eq1lY3mGFE!I4QoHz4Q|yny(OE32hK>B?4ZL)x9c_t8Q zxunCqO2$l+sEOo7Vv#24`$3*6gk;fK!>fp#v`U}6VUQzF9Z&`gCq^w25#D+vNZKuF z=hqaKRm&QgqLjo<`Oqmz)A#02NoxQuJKrKB&$2iJZ}exajwts#mJK3~$v-(@{m3tWc^(~U`utVjW%=~6*e@~llA$c| ztv?Hqg3ynrzD(bAtNhWa(rKQA=V|f^FZ0}T`rbdjrGvN*s3ObJ6VPQe%kEeKJ{U%t zeOh|k#|-}Am$jDU{7E?oM>=K`x*YfrH*(yLOAxp!2QntVtNP7Xoz|w8L@Nz52>H;F zE&ozM`2-IPAMzs0Ennb~@-D^lEjpd|@~-~bu6*9om;|`9tb}m<#J6wxXDaj$5Kz|J z#aq+LU(zKA=|#sPk1lqac=#UPngkE}yvSHClpB#hKXS7El$~6gHLvAQcqyHH{^_@} zld+-6Fgc2>rM*@>+e!fWe(KU;x<2Qb(M`LksLTRL60GzPVe^JIUU|=dK`++sBXDemcWM`u=LCnPQUVR#XSQ%##67rkg1&r$;G=exq&w zmem1tPP!)mZa|U0kP{d6nc!59N!1ph%5$u4oxIyuS(Z!y$!lyJD>ndH(^h`%YXujy zGSn}n{vQQE_W`Gl0jIQTQ_S_dJq}nj1ZeCSO#mR}C0~6m+OLk{!}A&oD!1>Jj)yMl z_Xbc%-Ke~*tYJy31*j8V#y{5pfVyZ91X6EIE^;6jpw=~$aeZ4FM_@ugS@3ZNKxsy+ zGiVh+90yp74}JJ?$wXF*GA+KRnCwO0q&}JCGC4d0v=X#2%}Ie5Ccs*J9|4+psGIo+ z;5gZH3%$PJpFUb|o<3Z!NfV5B!l?_ER03rW7{d*0#G(%u8rB8>1WerOE!_f2PTX94 z@Z;n>7HM$s2#*`13Kv9Ooj!xS!VJ6Qhn`4BG(H$>R54RlVsa#tpjv+~mg(wz(=GQqy41c_UCs)Ti zO>b)>vOYY;L7eFqNs_QbWB3PN#9a|`X1Ru#&|h_RNrKB9_AkbMEL&^yujvakfZzysPv?i+Q(k@f+YiU5IJ#a-aqB@@0?Ee$9>A%4K>6VX z`op$5n7UE7*ncAew3!^}iwuQ-84K9YxyyycCfXWohaMK>s59Gnf4*xUtb7qT7O?_j zWD;QH1X&dr(F#v1NP#OY?bWCL%UG1_9y-c}pi9OxW9U=IKZ1Sw(be_BHade0NcN_A z%uW?$-Z7!N-L$lV6`WU}>h2(*H!`ACHYy_kv*6UX&z?6g0d>dWg+6=DM<7$6%SEV< z9^a3iSzKJef_nv57lKPS%`blSZO%ln&b4yAWT%O~$``1YUwrwi=1>3Z|1>}S>%VP& z{nfW=qmCp8+EW-pk zashp@)LDwo)3pj*pmvIP%Aak({v%fL^jlh*-oVFzS>!E%=|bT0(jxUhoAe(AQTOY! z*Tn{9!hXv-w9m1h5ZnylhYpNh24ok2k}iFv@6lKNzn}iB`5Ir?E_N>C%e{m3_au58 zxGWai>2sO8j??$(L)^Y=0XHR{Re_+TZxRgEy@}<@SW%!2dZpjFnBAQVy;`wepliDH zcgnWhJnqaqywLi! zLAc;>fW_w#E+gWm#svms;tsn7tQ!}BEbBgcP93KuwKR=VPH6}L$nP-=!@GNXxBz;e zu!8=|!4W|6>ME{M&^!PA&;Fd94;RfJ{^6hC=X)(>6xj7+-*0<#H^rNc4Qo!e#bQ^v zwXfFKd1tAE1vOUV)UkfKwcvF;Y29nzGKZD2*Y+*8xp(j1<=EcW7v_9(@M= zuUT|EBcuAzb{uu)f+%(8xL}Sk>MZ(5nWg85vBCk?{Q|k}7I4wrg$FI^9Usl; zRuB2Izz6IwbAif*i&5IYyE0fu}Ip=M4s`5a7m*_edEi;!#ztJm1b~w0`391CyV+nTnp(C}EN_PnWX<)%_ zJd5SEj8Ry$63GM?fot4W$1^BR{2VMLfM^VXLehgPgt0-A5BbKbHpS+7%M=w_;_xlQ zL?wBZilO(&vjPctNM$)7P#L9j0sB~b_GsijU!*AF`>*C-Wo1+C4x8kBprE>yam8mdDKA;4wFNCjB<2&ytZ!3J}uX0Us0+w=Zj(y^}t8R~HDhBQn9>t4B^dUjX z3wmxG{7Q%5wDF-eJhh(v5GHAR$zOaFlV|uT7%7#eWv_y4g;e-10DVit3njb@;7kZp z32l}o@yMG7?O>b-B!OIzl)MTDc!Y40E_7KB(!+fbj7@oE~(8A5S%)9Vc z8YK(h?ZrSun!s0rz&J|e@CYdUQjRSr^1ag5JJn_sRC zr>+W}q+y*JMjmh-Ius!4Au_K_^3QO|SDof$hcu(lKIF{>blR3lk$0wBeC_jF!L0I_ zIxI6?>&oAh!(W+DbcOepgE|&SO^}4d=D7TJC+s zGa|Rwa7FsDz>_Cg=*urelXqy~z5<&H*U(a|baF$d4L`Iba|A2rOU4W)?Y04*TEe0y zX>q4as25c;=HQ{$Z4NvzZ{D*&%mw$9Idmz{4tyD0**4Uv=`&4%V4>A3;v`jm)q7^* zM>((#sSEqJOpZUu5EPPL)dl$xFM3JYAPiBf9Ap*wvwVE4i^z{*`n2-f*ZtP1I>c8z zZo*mqvMIyMTWAl(xWo>M(r+i>CU=lnOt&3IRk5KT^xYGk)=0xlDZ0SN0f^ zB|dkgU`g^5{t}U}BRxKI3}Ud9m6++vB)(`(e}#TY7{27KQlIBC65dow0w+~M5_Fe6WelVt1{VN zr&%Ktq$WYhDq(pN7i#8hIQSl){XEw#jD1g1Y4bIr{}@FlkF-5*t+oX5!ZSm7rYn&= z`O`IDh zVZ8t?a!R1v!TuN%y$dEg642jCVVag#*C|ez8bI&uMy!SB0d~3^rs+X|V@yD_u6y{qKW-)$R+^&?EHaTD z0fgT7_^o})1Z8EV8Dp|P#3Wny27+*dSRYwU9$2aMR3?SY4`$(w_r5%Z7Ue<9@%>}` zax&?f0f2n~80~?h0y4W;ipo3dq%#XRh6S4wD93Tix_r_ybb?8pwR{a96i2~w0S##o zL=a>{Vt9reFv(2Y=3e#URN(JXT`0!p`m}@qfT;2+_rp>C$ zl|#~@y$Pt%uQmw{u`tWNOkHnF^B~v@K-D3_YpvPc<{-7jId(b#)MEv< zh+pI>@^HL3#)60X$niuLCXh4x=o&A;6Lpz+P$zEDwr*fn=^EmAorPNJ=lYzj-4g%~ zlref3x$l4E?ZD(>a|7tzi~zVSKO{-o)+tt7MuXmU7Wh1J}&?@uCUA)(xIWBX zyz*evYm9q5JhX?Km5UMDzHE*>Bjx;#V9hsR`_}Lbxf%vc9~c)PQI`X>uw7pFGXb3` z_XTBEj=Q-0krf(eQhDhTMv-uVMZdWM>*{U6y)$Hb zc?maBSO~|xfb9vr@y++on>DPwSMfP4_@^FAzn@O!rg^HnC|s)0qN9tYO;-%#xLfJa zei1eJ(=;D_^pLg>;E5j7GIj0lazMW2&SDP1o1>m|MR5T^E>PXWYS;z7XMcN89aOA8=P9g)&F@DKK55>vK!K#B zSql2~I0Vy0pWz7rQfO0O2<}}YKlUdzPBpj-kA$KAJqUGS!NmgFD>7%91O;6H5ICa* zDTDo4_@EU3`$&azuT7cRP@I6xO_h3taH zcr$_D_6@+Yrg{402h9rk$FDi>1i)uzSv;UkC|fSNZU8=O(W|^F@7Ck~CjBS8_Fzuy zSyw=g7Y@-~bLW8YxG2tI2lA%R?_tWI_MZpfzq#4N1qar_wEN$D&EgL4Ov90;V7~l6 zqOV-Rnw+j2J~OvK-UZPugS>UjaD;{Mi|?PsYFz#+Pd$KGEsw1){VrE{8fMx#*2Rn? zoC7)N%AABMv+uRvbzEYdbQYIBxDc9}oW-Tc5`1CqB;Lhm*qsCRImFRFv~ua8rdlRX z(9T@E-P+m2Uv!7VELY=#N!qopcLBJ|IVL-}0YdFFPFT!t1TC!>Smbk|?+rUpJPcP^ z5zsthQAGgw4Li7$H4oc$5o3(9+(9qzvgl?RE~q0H_8nO}&TzhEeY+r;1tfUrqTr%j;u!qPrk>^VDt}_)l7%aWIN3>!W;@ z9gXTvb%21o=ey|k#PN&%$TP`#kDy0%Hz=UdJxF(6S%(+Jj@a zb~l?BufJ8~=V2$I`_W?q%Su{eH&DREqN94g_V}g_172v}~LD{O_{J?1GHvSxnFmXuO~-DWihu z!|+ipGtW-2{XKn{Gd81dmv0;z10NXMs{go`H(7{@>^d@{kB6VsE*A28=os4sMl7^n z&jD~;JjLRuW1b^+Xq;j!p}Qa3xc88e_~xgqISmy@$XJqgW8bF>tGI@OR`u5qULkO| zLJv*mnOG9Aw*2~t6jL0VF7N5E;WxrhtMs+2GsdNH-D>qs7&w`DHEHJ(LCi!QqOG)q zg(%2M8cAPp+jo32X){~G7`_jH7{ftC4w%dg$pM;x0+UJx8#+9xD;FSJV7xFBn}Ef8`^x7wU+P7 zQpzVUBd4b5A8Cd^1yDdh`<%DoJA6_ejaH`Uwtlz|QQjxMWwm{n47B+k*$Lgb=N%uonyTo7Tt1Pv zMWe~WANXb-sSA=V9bjw|_dnso$5ytXSuPcbZ6*R|-V>K78RQ!;dc*hX9%X@e%6G(# z80|AI5=W}~JWJN(S@{=_co|v}%pChw5Q0Mi>7b90?YrXA!RA<}OX);fu-Nsc@rG`cj| z+CPN9^2RbKFNWjGtAvLip_7NUE#jJI8VWx-H^JcfzzNgtQ)Kr1i+r@nRj}e%+&3jJ zpV}5JpY;!KtOLs@=+RFP`S4S^Mtbs}JZk^%68xz!%0@EOf@*sFHV86hm)`tUtIoW|09ZBK@RsJM4e4tE4V_xMs z5s7Mo)n*hB!$?~#;J5J!CV%@pj&Zy6_RDO26Et6>7haNg7Fr0FHY2|h%z*qwaZ3iz zy|sLV73`P2JPn_-0BISR3~j%A3GjK<9N<^Fix1^FT~1K^ZfoT$f0T=*dA66sqaN-q zv<5uCU=U0{szux_lc2pFw&4Q?Pcx9#ALS9&C+7O`ll!<+062h6SsirO=n@89=>0m?xMtl$!$?!IoeE zDFHQ&QwEs~Szsq8ZW~o6-2jXT&c^))w8uB?AwV{kjZOxYzxn%jbAaXzZAPGb7;tdB zv(mhNy&GU`6!3rfqX$fu1_06F-hzKwb4$XwV9@a+z z_NXI!3%BoUm9@`g);4tkk{jR$S@7UVL7Ev3o0H}}EM5=Zyl;;2$D&A$!uR#1DGoQo z-zu|yL3c*migRSzNvgicH#t02_Yc-3ltvEJ57wOmatiIUOv1FByMPw0So0qFZ~@eG z$DQ!O-$Q8V0(30TatJBBnPU>GMPv_NDOXyCJI+>jYt3pIw8rvK(jM-28vKIqpPb|G z7>x!Ef+gp)v#wb8#ZmLef1vvt{J7$ib_xJ3dJk7+S|U51c5Agu-7ZK8=vcQGfNKIs zcJ)r;s~xz-8+nGtF5r;AJIKqc*KeB+6X#_>ZNXG6;LiaI?yN00uik7?ZRjWU3ADKr zL2%7F#E%FvjNAyk0*1K*p}}|h2z4+4Ff)P6mK%T=%cDh?^`mcftpaVkF76D#i|DD0 z50F=M(BLRr%5~+UkifTL4(iIz1%T@z+@IV6P{A1%CXh{hn`hoRs9_a!?{7h$eGm;6 zn)P?Co*f)#!fQP0PdU^r&&3h-&X#`<^}IRJ2QYFufjbED=<8<4PI%yEqQT`1RFx)W z2LN1m0k$EobI>LS-j2YJv%Po7H!@0kSMb_JD}P64SnQzx9+{lS5+6VeYw9)KqHld%rmbGNLWcD&JjKG*KD$)Z11`^N9jsMp0z#?oR!-4* zW243`t3O?rOaM0N)?^0#(_zQSzLt>y!KGUTh;(ysoo)E~15ew7mVivqZ8$&r|2@H# z)w``70M7LQ98_t-O(Ca`(U)hm;q9I8X^Vo>@C*GbsOe$9_KE^3`b1v@U=hrI?(?v51G2^T!{3lmiEkDXVU!^e=6RUG$U( zInWf^$5>=KJ1Z+vWk{g(jQVzHIVG4)ewJqv84@V#(MDWsWvv?j%3HYE5Xcu8DtA#X zU@D(3+793XLNM+U{UD&H?7F~9f~oKKfQEwD9_n`ijjJp)bZPTb0EV9=$x zfjlCR8Be1m)D?bQ0Pt|^n%fWu8ucX|c_nS@+5S|J(**!^;T25KLOVKCuvs2JdzZfC z6oB=R`-46DVR&%w-WqL|g(*Qs+KUH`YKe-lM`p(Dw~-^y3V8$YuOEM{x9vleYx{}` z%C@$?25{Tq*;ridY`l2|4L#~tw@0`1MLW$GKmK8?QT2r{Nbjxyd9Ppi8O{t4$g}s8 z&RrJf=WxGa8|_hELF^F@`dkE1)&-H{20_vj0^{xXtbp2+@;2;~B>%7(g{x>XodgZfcwQSiJiXy3n00y2#@RWK2BO6V~rCe6>C& z$EU(;b@;bWpQ7v00np?jk}k?*9s`ezH}2t!hA^4(P)=|(q>e`8S#-W$&3d6f;9yew zUF+U9p&aVzeTe?OfA5_bwO3fEFy^`j(Fw+K8k?oveq1?(N#VVVNW(0UYM|aD&b)aB z>A&>S)<$7D>YgL&dHrZ33-Pvb`|!*a;kRDx8v2!U$F0_@p=1u-MMeHq+V~7c_jAS< zwrLI3Iu00y$Av@nm*ZyExZG+T>yBp8w>y&A|z<1eAHH?fW z#0~V>PF!??8PQuF0J=-vWvqlQQ5PJd?+hWM5n7q(18?WyR4Y8OFK`S&zone2PohJZ z&!Mw?Z(R>Cesk>R7+<@E=}uT|Hjg2$t_X4VhFuUxEExE)@60#(^*TRgK|bRYo;#j0 zhPrcm&FL)lrLQ=njo41r;})!}dM8h^=FsfdIk8E75PtGHaE|H)&MH4p#gzUwGlafUwh|7Z)c%kvS7ZlY+0wVl3vOj4DF& zo&SIyJWb@&mWSPEeg{7X+HJXJw2aV1fehSg0M$(^kHD$TC)~I765ga#o>AlSQ@jzX zlmmS+V8e3_zM&_Sa~-V3k@P`VL6s}uBivM=$qJ?onwCNuzDZ?$h96|-p-}k91dVT9 ze1#{U_yK0}gx_`|mTSRe!PpUR6w*{IUHF_A-1A!FR}V4%l${KCeVTm2t-&V41%%nh zyg<&_>xltC)WAXbzzLr6IlK&jJQGXlO`7l&8G&9W0HGfxVi%r82Mw^M1TGo|lBHGP z+*>zZS!TRP3(FN=sA@f9A!wbU~hSbk^&FTPD` z=N79y*9597TtrU32EKU;=39B9&JwHFIun1CLC9;{W)@Q3(pF{K{vzkrrDcm-%Rj+e z5qghUZupdMcm?ih*YGpgNlWAlxPp#R(K=Td@~%Wph`ej%P8{_#3}473K|b@FoIWaVD_c25#r7yj(&9-?0x6)Ba;3%u{ycZ9I$hXqrVp?M8=2m>9xUuN+91;OPXc zS7{)hAEfhbnY9=QoiRceoM5lag#dwwOW{>@AL;Pn#biuiFk9Es)=yv5+ZE2e`=UdsOEbB3RR8H~{A4(OTxTNwd!ulk) z_3Sg`U;ak^c#<{+sCuvVf%ha0&1AE~thy&nbfouQLx(zv^no7(xq3s`!J2Dvv<&_t zdrYL6}{gBp2V(ZaFMkg_qJvd6w2;K;uhede=vOP~Xx9?eT&W_wbK$ zGip4P*bnM7ltGTn79KSIH4gK%wyZ~9Rn}kr>>OQmRHJ&?J0xm;A262VicEB%d{;2 zw=$Hc^{=}iBPx+$>N|KrWJ`?zMACmILIiCee!$gPuQ$>e+|5H6X$TQKDU$(~8rRRxPy!>hj@4|lU zes~#aFkxbQ%-=rNOTz_>VlX(zdZWQ=5`zntRSYh8MP_nVF&2jy7!NWT)Bv+b=mg`P z_H-_o2r&V~2yn=shl}YY?-pA7<8wT;oa8&;w+c^~tm)x$0g^{-+1|k@YddhzeZqv& z5Crn=*u%dxyud%b(+=@QzUn_{j`2L3n4fP(nD9U;gz8PE2Ob;(1`jZv*)Dv}1O+3! zUXT||uud_!4P%tk00c1H_rcfcN0>0}gCl0X7FlVUTk7ke*k&#KJs= zH^$A>YuN+R_AvkCa;cBo}H){CUVIpt%7rkDU zbAR+|)tgb{6Cy_$PF^w8JFX**6F(=WdR}=*=OyKNNTCKwJqtZ}ZvrEkhM+NE+q~5A zPGU2|;=eMfAxL9}MxtK4xIJi7KAF~uw}%kk8}DuY8i8a;2`nrZ&&NAETN z@UvgVyR8E+r{>@d;&DyAD>r&&8joJ@eP)0d_=z`59PB*|V4m@G!Tr<&w!7mgYu}&& zxhRuw3~Z`Q9@;1IB<+w#8#G z$K{YV0s6?q1#!e3>tRwM%~f+qh!JVl3%3_NF^O^9g@_XjV5X}&EB2_*T zpu{>q#lX;E!KzD5Pj>1coeb`NRKk&RPeUE_g{XhEuDY+qxjb)jQ>J^T|l<5_R9 zcr;5`hf}=e4lyc@;%zyKQS<=glH(MO<<7)xd!CDsFhR)uItKB(cW%UZt^wPy6>ruz zF-8%;8pHoOMt65I3?QG+Uc74Nh@tFaQg#uvZI7?C>nHC%#6$A1`Qq7YyTyFecc2|3 z;yQ~(Zmplg`0Qe#2j$+Nyq|ye>%=y<{YtDxOpS9R7;nbd;UUfHeR+2d&tJTFiH=3~ z=kQ1;{DQh$BcI_(CMWKP)}MVsOzQiK&5wTg0~V8Br~bz7=W%uf88dl(bDePjp*~o!o+0k_{d8SWi*9L541beP{~!klDrO_}t$AuObO zEQ<;|^s$6?p?{erq|6j$*>;wx4~@F-J$f&TQ-+{&O!6-JVky_LM|A-hxTR&Hy2(U*O_<=ovB{audIM4tYhF8(JZ98;_+|sLO)LDP?{s-T$c1B*qk0^t$mYy|x7Up<^o-_S8|?$*26lkJKe~B>jeH8e)M$`LPUTOPIqfR#jVM!2y1rvDk13Z#P#N zJL#~^1VGuyFE(rYhYmVMX9!16!|ZTSFB-mU5MAqmrF$%@pAwqQ_GViFoz!V;fLtoy z_5+T~9d`=c_QMsdGc}BD=W@KH!*U>@#M$8q(`zUDriWQ@E`JVQZLmI1kaYorzKY#} zYSNS6dk0Klm1kg@>rAjnR>MLkU3QAxE4@j}mzVj}J9@ED3A3-qXnbfk(Q#b@Zm&p@Mfe{0{xQQ7`G`Lg>pNO%v z=i0i7j05xSM_DSFkf!ox6}PGv%j_=P$hY8-b^&DguAJHtX=L<$v0OQn|Aj{t#dYW^ zdK`EZKf8!nhu{k=@hMr-upmU~119ob^i(=&iN0;~kO_P=@{o7CYj{T)8e}wzSkFE+Jt+A$_vT4^ z5HIURUb;XPXkaC6vea+ddpPSg6R_%20FZhBHf5W$GVn#ZHZ$KEg5A0W|Ln^`k2r!K zddTm1b6Z~+qGA+B&%{U`+-UuJl3N`2;T-&kjN4BN6Fg!p7lzk7lfG29dBc10lXh^+ zOK%;>S#H5i8-Z_GVCA_oQZy0y73K9-pWD?u#EWf+4tlnzjcw1W%8`MH+LoLFhTj?3BD=N^lL$8pCE#`)1GM#Ic5wwL19&ftooiX$Feth9pKyFmTkBzh zZ8@P+?76JIj1=}i(#1oQ2rRtkRpbu5%aF};Ui+`~vu#mC+DuM50$VsH@|}6nA0ejT zrQ$8G7+5UwVC=xDFFl2fZTbdpE+Qg6NdWpsR6C986-ZYN12GV$kQ-;QNuiUF@W0{@x-J+p%VCiFe$eVWhc& z)+6va$0&NjBvtQJ+rSZ%c)fGJtuhUYdF7xJ|~h+3N?;vEE=Mj>@sbl`STk zOtOTfh{)43LJwGfm&^m);vX6?_&H%Szmqo&IJPAXAx_-y&fR3fL>y4))EJ_n!HLqi z9)^UM`SRr&iySA-H251+-o7S0M^9(~(im-c4m~gJPdzwKy3}Pq`N0)b)dV z^G)AW(|q;$Zz2zFhaaR5(yLhhL{Nce(@{6|dl==(a}3pNiDt3k`1E-*OZXdDADRxf z+&-?hVgUhT+LalhU73~U07YM3pBUrY1q43Arm)Cr#sfT~U(MN58cj%lfp^!&FFwJL zjHf2vy@Lyn;t77X^$fUpiQy^Tp}y5+F7jPsTpygiC1Ai)uH#_vhslUWInnTtU;n6k zCeR+p*-=LmOB?}?5!sLej*~KmfR6t0-^Q2~abrK*PhDbwIAihQ9An1i75pRoi=K6T zS9oyS7Wh~%7bhITi@|CXL(`>3PkG8@-Z`iSB8tGn93u+2Xi(L&UawRgSrU!_W1hzJ zJ`VSbGYu+FgGUyX9KV3e32onn3cdV?=kLTw|-yEMpa8{P$O}UV#9JC==)t*9pT1xh&?7=MU_82?;sHj%X}nZ5x*Ce%{{6en%=}Df zjbR!6qi!1*!PE8W66yA7UjO0uB1?T(4-1dNFpQ+iWvs zM8w!bJF=ClL->c7o;53k2=g$&)s1!Xo@2;gjh_1GyYI0ONL*x$*JJS517V#{Xl&IR zbb&=(4cP~H+`2&E0^}UwwcJHD2!F@WWd}!;?K}*=-MM$Cd6zIUE({s|;p=anH4h#> zz&O^Mc-vzv_U+ThExlfj(d*%(2Y8e2vG74#2WNG*hOa(!oqW*y*U=g+FowEb=!Qke zIei=TqJc}CRJ~KgtTv9g#yjbDd~b*h`^h_Z7Q>@)!lu}FDEr2O)=;V;T^Q->(D5#0 zd#LN)9yD@oo&5SYUpG(QeYY9CJ&sHm>V}1}lODqPOs7BGW}#@09Tdi!-*XXHB*A)@ z1-ogCnD-aaN3;VCd(WOfjX~4+;eYanKWf%F%y=6k-rDwl+JrIp(;lGQup{;h%F7(G z^ZB>Wn!ow`e{6pE(RZ5f-MPc!=~(mWC!e5a_ah6+#4dVw0%N5+3{LfqMO~erm624# zcjBERFB%O8`E!9_X@xUTsCNykdoOoc%oxw1vnV_0U{Q~BBv1^~4!eA}%{i_xR11vG z*u?N?whxV$o+)715Zk_KG%OFI7aZLqByvuEi_CFeZ)e{L@L!Yd%)OvMS z#Smy@A)5t$W#!ZzSk$ZGSx(WH#@>GR%{R^CcOT~1$966JBN`P`&#~|-+EN6FUg4f6l-K?%XG-T^Rp}-(WY%U>0i>0aPk1Q zvg;w6Q|y4y8+??$$51VUw8c~8OT)cJ{rv+DNretW#MpM+NpYJq6%2u8dA@T2<^mh! z3Fp+D@vXkQfVRwYkr(6VJDyR086w4yPwFSf#g511{mJz)<0<+$av59x6h{%`uiM_U zkV1QO0Ylk1qyCjq#|nnF8KeDP;JmTt;j}z6jpH{I1dClP^z-NV&I77_zsaKf37+o8 zVt3r?c*J&L4srBP9qhPN+o5w$$5}ed88=@%Vn8>oZHGEjzCk|nYrcxHUf?npg_ni| zub$n4GT|bl?G3M)mY3+2IrO0^!$lV4BJkE z+N~M^CZhZ(WTs=#z?B)Qc#w(lQG-Gi(84nfe`GSorJwpwHksAEGZ{pdA-!d&k7(R( z*p#3$ev&(7@yPrV4bv1<8(--1nnvS*z=o3IQ*N7={4r8;n?V`EBqkwDrjoBv^_gXu zxh8R`EYfCS0CdAUzAU542_&E7QTPkIstb5e!^<4AOE*Q-f`vGl*ZOQT^UjV+4p?t( z0i;psLYws>eZJvZ`txz(4E-@oLx?;`y>o5ZnOu|lIv%&K#sFVA5M%jiB0&jX+?6-r zktLWop%b;RC59;k)MRAKfuX`6MGQ2GZOcZoS3o5nDXkw0s5%!&Tejc8 zRb5srUCR)Ws+;iM+tdq5$r`$YxBP@>$`A%J(es*;k}iuREMAncEkFX-&Qs(gQK?_? zD0eDVZYq@2)|CyY>pN*Z|)d6u6(g?3A{ zGm|wAa=R;>3FAPg;zEcrS02m4m%wnJck)lUgAvG(on5XiFWB2IOy8F8<2FmMks?nXZe3xY9;Q7g>>q z6eVQ=8JVE%rvsIz8aGM6a|gXK$U3kMO6U|b2^#oS0yx-*B#-5aAMe9^%9i1_S1LZR zf}8N{V~TI08h(lcye(ZsLQ<5zqb%VF3IqgS5)?#oU%E@Gc-CE1!(&`tIw3DlW{gu# z+6@~7o|o`K{zty$h`OYepU@Uw;f?=mi9>%A#rEO>`o-1q=^Uzm~;hIpW6)%VKyM zI8KNxA@wAl)*orI*hl_av>`twg!iORKik$r^+zN`7X2hFlLipW5sy|cJMgh!CtEcL zLUQeUgyV;@H7*%~#)R==1g|Xo2#f5(3|Ew`;KM`Ur7YwgtV<_*jm(O5jDVKWg`20v zi}dHpEOnzU$l`G5helQ0p8k*}#=aT1TvNj*rL zS$t!Bd&f^#)G1i-Mp#0#4unU4#CyX7*k*cNGzLW8D-KpDXR)3Ndv61bAmB)597cV6MBs{!%Vb|iS5MRyyE2qZxX|c^{`1u zmK`R(`%IpvrzbGzGAV+f7<-t6Ii`H|a=Cf4vVqswA_H@`^&5M;gI8M_$H%{Uwg3P? z07*naR5W-G;jyP&Y48dkxi*B#4#vjU7=E0zj4+r#LvALRTLr!ywUwxly-DO0X=ilc_`<)_b^yYJplT=FBl z>HHWESsin_UlSj_Gppi&p`|iC!G45Hv^Y7egHh%NEc|^=*MuCy@v?Fz?Z5?Cv z!1z2dyz%JfqpljjDA9*}^donDO!W049lLe6S;Yf*6PXUgp<+3)A@&NGZ8hKT89VXLF}nqvUlzAYU-c9^Ie?f@3yp^gz?W`XDQ1<>e1m+3V~ z_v&Q^E{27Xv4&%@{^fa}Mryo|jWd491l@YM2BwFps=9SXIghb?pR^ip^xAjvg>elA z?`sUQdb3{Ed#S^w!XmD{s!G!>ImBtzCS?_K`vDvI4 zAoy879@05*^G?&D9cVZ(^i38d;Dvpx#t}VKd-0U(!;mn}V%XLZH~>POU`%igxWm-* zIy&yn$~Nr+12*37E(o22*Z~VQ0~pbd&@J+oe`!afdicY$Vd$B{i*^9VobUhO31SLLq4svi;jmJ@Y_atRYGx%@IaFMMN)B zjg@P7!E0b?dWefo-TeI1UpG_WbV=EIW~$3ISiQy&yo)ENhg{yrcr0D&0LL3!7{Xuz zo}b`~;c1CD$7>uMIZB&3AdfM&Z(#T{# z<6T5EOvLu4XJfFi$Gu+ggYXUUWx-${1JhXZdz=q3j}dMb&szQZEu*>S( zXRp|eK-i<3bJ2x*UK{?x5I-u7A^M&j4DsKv81vxneL|$X!YKb&Eau=94o`e%+18U` zwOo{tmOXGEo9s{Mt5z;T*+~ADO#S7a5BIgM4F?5G;&!+>LSf{(~H%x%BcCV~V}z z9_?=dqxlBA^pt527uK6Pi(AO0xMvX>-8{zgP4wj%ILwg8Ih#gl$2lJG>jJ-_9(rHbNorOIL0J zzVr}if3!tpGQ`Bofslhf0XWbo@5$=W%!{6^=M-3l0nbM14+;*+qroz0OF0zVGL1xJ z)EH67XtZ%am5Gt%@I%Wczp#LbdJG6)k+9ywsKDkGkNC$xUo{F+PL4Zas`uunD*#15 zs9(~7i=^kCe=w_tsQ@B<84P7p08_jZVc~Z_V#S>W^DE}k@|Fj_v)p8t52S`hCm0r7 zMjT%{K_VRuBe+W@xL9s@i?Y%H9r}ccmq&1gui`H){+D+#YQX~_@UAe|KrSy*rf056kRk+U4Xfg}0|Q}E}qV7B$sPEG{J3n0M!yyDw@hDu>_ z9$41Bbz|DRGy(Wqjza9qyt7PksY#{fCuQDCS4O7f5fQ+v1w5ASBO`G1Dvi>2Rzg5R1(6tsFGaNmCpi4c>$K%kC&8b zefW$Cp0X|8$}#Q8xO?UiA9rgs*K8q%7nWj8q?5{(=m3MkGKnoHAd4qlrc9p$L*A;# z{cx=T*~8VcsF(bJ5I)zX`^_po2X|9DyzEg(J^^FDuQY znNaVv zbPzbpZ+T@2@)hSJzuF@a1&uW2kR}mP9nrN5UX6^)UGzl<+(+RclgF# z51v3?Cr8VK=#SKiaG5X*L-g~Eukct%z(f~pZ!k$=EaBwI__y&e#z+A9%>Z>(+hvKX z$%7Y5D|jmp5jXc1O#ov$lZqK9^T^M+F{Lr|ZZav_U?MQd+}QkK$= zph<(Tb!C_i+lzF^;0BFOb`0lK@=E<{$iUM|eZTYU8OA$3@tA-yxlUgPEgE_I;Kwjw z0GvR`eRY%*lu11+nRL39H{R?_-la|Bf`>TjDLsqvXztFP=5S+;rmrWK-nLAnnK1Tg z+yUnP5wXQFtQgza<`C~~WK6@>T0E_cM@pSA@p9s=N7oGArhU@S#82Aw8sEi}T@N*l zE6V9G@TpX2s=9F@-^GgyWMz;^9mG)kq>W6#Dd;*PX0k>Q4N`iVIgZd!bOujH;hzRg zCwh8@ii;E{mM>mT9_(tH9%Cng-j{ljEA|>`EZ2N`=|MEl;S_uy$EYa3rri-hIkxi- zp3KMF8_=MqJG5djz)D9L9JIOu3D5fQP8-LYzn5~%<@Vq7IpD-?c%nSUP);7xNYlt* zr}^;R+sy{Xz+)zTBk-})d4t?&utqk)!SFwGQy7DhCHrJ!k9U|%I(hEz7(06i136y) z7=1P{XiQGdr5)4K5}rn5%moWH>Ym7FyndmR_GDW;$9UKo!b=mK)YGeXvigX22#(fe zu?F7T?;@l7UwzSxKmH+}o;P8Gp=LOck;5)K7}+!ucPFWON_YA(UV~>m8e_Dg0@Nkx zcYtRp>NRA@T^~K9_h8~!+U!%12ffpzyJPql>zj#r={EQb4XI0+Onr8GkYT=c#)v}( zv}<@bmUW0Hn+#2^w=X%AV3@ zMkk1&P1ui~t=dgt*a8AL(Nz%|3S-s|2AnMxe{74}gj1;b$T>!K4559)&8MG#(=0xC z7z4NA6~-`tOyglbK>P2Zy2nR4&F_8ue)Ac|<|iy_jbm)r@Tt*EBdiN)?>x8{Z)^?B zE^>@wtUQ929TswwA;Xb4=It7IPS5uV>4XO%i)YrDMyB3gcE4aWr894S^amd|KmFv> z7-H@ar`zxmhNsdPq!;m*Uq7XeIp2YW!IjO5gFZ}rp)u*jJNS%uiyXvx0|S;uIm47Z zdUzK@P#Fz17{B-4ljh5BpTf^3hjymj(9Sfp89GW`rDx_Ze*S6mYL)N_gkf1BM8@-_ z7YPlqj~6tO&Q6ZI&7Iq~kO9MQ;Z+I@cdcY+5S6d-%eq}BHuW)j*02!>VDyLvso@a} z&lo#zvbgf-;e+UC!{itbT-g}|cX_w6vJ5}i(Sm-u0d4AX4MtsK^cXx>Kks4CH=pGi z;z6C2@K>ZY-nQXO0GM3|wb-COf-e~K4U1skVm+Q9Uyy+uv}X^H$FKz+0%*Ds!kDo1 znzm^jTMkR6;DwC3+Y;SiT;=G4AOyY`#9Ux>@zVZT*c!(?08yRo?gnu?W%1)2-MYs@ z7@)JLVY+epQ$1g&&}Z45g3i;6_9Mc@9Aa#AA!1>kL#78=07njvF#NhJ$Xx?|V$?-` z<)?96=! zUe3?aKh(oK3tNlCmw&mwoqkMtaHu7%s>MkR)5gL!9E?WwBRsVYGdHAXF*0d;x#oNk z%ggRC#%o*1k+f(KcL#&s#z*w)jw2^9SUP7_f2ijiKiEen%nx(=F^sdv@X5CC{MH2< z&p&ZzghtC<@EW5XD}S{RME{Mk)x$y^SJ{>)Fl?`2;MGVkUh@743l`h#pgBI>&qBi+ zLc#6Qp2X1w1^WnP#`%}Bu*2?;N%WKpM;?;vIRhH*bEvN}j2vnxKA@edI}DNIVvc>2 z24N2p^=yw}bgX=|9SwQ_Giehd#KkCf8mj}32$MG5GeOYepqWF)lvOkzI!r^f?b{gk z>WowSO!{fd&}i-W(f&I9C1p9DiT65XsWW(m4!~O+{<$l~omNAPiyTY%UYWCQcDL3T zYwsb;jAzg#{us`5kdSW!)SrfbLm@i$QkD+0_<{~&@pWf=C3%iHSTu7t58lnGru;F>|9*TMl0 zFC?f8MwFg(E(z#70sux61erOf))Z`FpB%Qo44P|lfB1-59{qn+Rq_HkU z7e343;vL^!`z7vq3l{dsrVU=A@*NY@?75JPVHn1x<`VJHkr&J`*47 zk!Q*?N;i!Kx->Hgv+5i;T^^AwAKzId#Q>lWrWsZn6@F!c^P3pR?$#SF^Kq>FNs zEkLYo@J@EF@=aOF6YV|iE2Rn0I&13^!SG6hNDOb}tGs-RbpG)rPU2zq{8Q$nEes4y zQ>9MiZBhX_c#wf|qP%z@-z#4(xaT_z7e2BTj4ET=8NBT31LZNHZURS~$*LS!blv%c zKg$T#xeY!^C%z@K%Cfm+TIru;%G1bHD}$wUd1srD_wa=2st@>6{)&ze#uL9&c4W@F ztE93o?a6$$Q;{aWc*y3kKtnE`Hn!>1jJhfL#aSK(JdZ3BfRbZg55o-Rv;*sxXAT&p zTlo`2!0;{?{0rWE+lDmmhJSH=3R(aF58m17M&3gJI0%DJWHz)GF40lQWb`X~uf zU||y57X~5m*7)$&>oqzLLa2wxuk9%W$grxLFef@181O!2ISD6S#{~d0rha%9Jpeu7 zixYZ{A0i-yEhlIbX3&Jwgw5+GGkmwo^D>UrY`&zpgOzwBQ~jNNz7~L(HNy3&prQ0 zUvg?$#p`Uhi$L+(yl-jc2K?kt@b!yN$(3?zdxLN4c=eIVrCuPR3%vMPUU8f}sZ*bd z7j4)DAOxNq{E<#Pd?!uv6QGeVu7wtTmo(l`8p+L@JmPBE>SATnbjVtVL7m)8|1OeZ zlSaniTv|hWJ#T>lM;zoPX~o0W(gDb{Y2-${!*4Sw6S?Eddt|UE0dMjNM;zrt_(8o0 zx9Ih$>A~BDN7DzB)Js0Q_5@k|~iLGbe`eL49YK&M{N<9Qx@ zOi=Ql`sU8IWclhn5pZ(8ho|8;-+qgCCEyjkDj@WrF%9)?UO#Jf(|8K-F|aWFl{*60ByTLbWZVse-^OAKH<#|D^a z_2EId!$f!(I2{el&}=9MJ+t;OppPVjDC#W^nC2+u)f*Z ziRR2uGH!#$=s(J&Z~B(;afv^Sq34nXfc*^)OytK&uUq{W9^5BR`DU|?k*1%rd#+C~ z`VuEs7)&S)?{UK9@07g1`SKawUrge`_ZTCYlThVjjxFPQD>{*iks4m6mZ>`@?jsoX z+=hHWn3otUs0SzChTqWBT)yhb=r&^c;K76*Hs_CqW8p@RVVJOfTnH5ZbL2z=k>PRl zZZ&<$A?=H@2PkuFR9X!ObJE;ixW%FfMnL#COTF)%fhBF#kPR-(YHWA%mQV=@sRv!U z_1nW@r)MXdnK8mgK+jiSziuAPvmG6S$^hX_&Ik=LK%HB0mMf07;c-07^lCkaZoS-G z1c21=M$dQ``j0O)>S}=SY)xd4a5Ue4a=-cO|NVdPAjA`!3F5d$HN5TIi6bBS!O8gQ zwhuoZ*lcXkc-(Q_$D}uMMOwGq_b~B2-CD;ujh=CS1|G_vX*8DP&}f>Sak2;Dqv3ga z1~Kp$x+FD1x#)njZTefR-C!4ioucm=$Z|j za1gU0btNvw^IlqZXGssCusC0V@GXXNa;JnJjUXBkJy^MRY+PV8aiPy}DegunJ)pPf z*5+EXwzf)lfEP05SoQ(Nr5z5?)eBt)xGSjiCJUv+rT^mV=NNEU48*V)-k>Wr%xRp~ z<9GyvF>K|&gT6Y!h)10x0~!{evV+E!_;MNDhq3hxq7p$O7(P#!!mOvMOBKn-s$0%&xOS22m~p7pkjE(ACG8x4lq zM3fQ3lf?jKn}bGo1RPVg`Xceikr8Fz*vGaj^JE7GaCw|IR&A8t4c>ZisP&)@*TY$T zS@mKcXx-cP@iGtf=oR=m0?rE<{|{JLIb^X^O>a9;zq_+x0p1zI*}5D?#+6m$k84<4 zT)f4uuO*CqCm09~B{V^QemnYFgQ#Aq9-bY(z&DMWhDtG=FSKEfphR&T6&c*O)h3E6EokCEaGWA`YCjnMsh<&IYx3bAWD`XkTH zas28y)5RRiw~e|}#zk~PpY4M07&5oO*#+ZMg#6)|?e8WFCd#fdq36Fa-Fc!8zQm!! zHZ_9&Q~oueYp8adt^+x)@`@{8|563=w{v} zoFQ{ZLs^xVc#R=yArAQ_&D{KLaAV;WLwM#S;^PmzRc8IOky+Q~b3Ush7EOrjl5O?A z3sRZmkrs9O_Fcv@!mF+r2>@9?YjA?;c}C{o!JyB}f)h zlS>)sMAQzcgEwCL*wg>`lwn64M8-c}N{6^e zw+s)?!gB!5d(s9nugV~aOz=9MLKxihPEbI}m;e1WpMz8r0K3xJ;2hk_N8U>#GE>*W zaF9zu4iuoL;tK;TxD^cn9$=;yR;6_UrSV@J{Rv%kYU0#gU??QK=PA5bn{}#c%u1e9!|e z)xLgLCWT$}rGbdM_jy+7y{@_hrz*k%fk|3RQBQ%2_{Ru~!p*`?a2AD<-O36s>Voda zwi}@4_Z_(TEFbcj*7D(TjzEpn(Pun`U+G!!5k*}0T8u(FoqQ^LV zFd;Bfr{brqNsn|{SKwgZBV2xxv)eZK*5K`PMg_pqJ4XJ8(*=w67n+4cS#?lwJ82d_ z=&gE|tUv-c%ICiW)hxovTke(fB3<}m!nbWa<&nkwUQ7SmkN5xw9b|rKf2$Kx2J&Ei z`qPz1WkEhz81iuCz%{TfOJV2FwBoMLC3$q1WwvD(p7O8Y#xRKypgg)wD6f4_3gP4m zn#3QPT2X)dUR-+gayOv`Qod!#2mg0Hq~k#@pRBVMY5_}$Nn3fyPx;a%ALAV^f2j8q z;ZxxRH=c0qbIY<^Tbm)zEMyHUzD^rRPg+?9~De~IZuA@7pyoJlt=w9?t zQPa{R9E%CR?5AwwenU;#A^Fl@nXk%{CsZZ$A#>tvxmn0`<*biHe>IgGn1hvTo z5QFbG&p4n+0~hUSoJqi;Mijs~`OvGXpL*^Ehs8N2z*ED-0^ehJvkMKV!m% zM;mf%XcY~NhF@7;-o|K5$O0yf#~9%?d@$sXcj2JnP(a6nJx}53AqT&WVF>o)g!q8T z)F$4WTO0@~=d4{tzzLHc&>E@FF(T`guBWa%@4k2JWHXURF8CKr>xp|NFjvTc6MGN> zt~_EZs-9wa^cv>n^caIG6K9Qq7Z|i-5P?VJZf@Nmcr_EPOqkJo)ALi!G~T^VeAE#d z4u>&h4dVTMPACcMa2M~>^`)21qaS>?*~2iDn6WGfq%Lf4l+&S2J%*pg_&z}g7=yb* zeK&o3-tAw8!hVGFOjKZYX+kBe?Ej6Mz`F7j(^EW&3R6K$&hR<-qtwFrQL|iYm zYeLV+2e&&r(bk9`d4&%iMwpkpcM_Y}pYYYmn>^5Ln=md2NPtNs&o#yx7f4BMRZ@wk=CU`i`flp2fHLQ%`6*!DX{pZbx?=8X~42x_rm&RVbKB3X@BV*KUeFu!nN0D@O@opdr^7q=WEUqwE#iu=|mH5pbu%-YblPEUFBSb6_W)XIFSS zHa*Bw+PDo@8RlEbrM>mz<#j!b&gdonI)(r}qSFnL{>nvuzKt#3i&wk)pzBPHS zN4!+S7ad{-i9c(aE974z6ionq$+mb5BzgcQ>jsBA&f;}xSP*5a z2P5=V$N2WL=sMw33?;Iom!mu)%^^m)NfwHx@Pb@iB-|8wNyC3Xc_;N;2S<(48mZlB z19>qR&EgTQx9vIgsUdcNzKhDk(08MG|M6YCz+VxDYAxR8y_D@40K?QnAE9@+#i(|O zm*yb{U`{2j_tNXv&38Y%kAV|!S9Fpd+u3T*nG^@g6#a`~K{mHG@W#Fge$)@VTf+!G zMO)Upc93?okHOA3>Kb@FfYDGb8m0}IVqF|y6rG!&hxV&Hn_;KVE9zCx`d-@1!7(}k z1K1Bfe7|}6`~`-){TOSF={!a`BP@vSV$9Rf6VKlDjTkV`2_xlUi3{*$9Ao(Yxrh3} z6Y5P*P7Q^I2KnlnZ&TOGyF06HVGv*7Pea%seC&q?7hUo}W;7mISHsYM2<~>I>ZegX zgh{U~*p>}zp#e^>ZM|IwhgpQ8vTSo2<HLrq0pu#skjOB%geN_(zEWGb7VKhw=gbGSbSlOV_RHdk=R)4Qy9oK zk|u;4X_P&r0m~BiCdL16r5y!~o5IRf4b3!iB?hFri!a)Ztwa9CVKzp6u(XK_;4T{to1(hMHxtLSOt#gC%f z2e1pcpTQjk>I2)d#`G)tJLgI+3|Rg4dm5|_|L0Cad3An{VVpEmgtl?#p!@*=`Yb}$ zprh2Y`;37u2(xku9vZt{0Cj=MkYDOu9VHw?xce(%l7R1+J@MZeXW6baWV=XUpXEGQ zJA^t}UD3JZ8aix;%6fJYGA?mkCQp=sJNF-_%znm_>X4j2!@FJN-@f$%9O7kfho*XQ zY@y8X&$ir$uFcLKo*A3m`f~@d^HFs|=D~L4>H@N5oF^6H>imT?ePsmVH8=KD`I4&| zpp&iIm~edNt1?J}3_^RAS(h(uUKz?10;FQ56sC0x3sQmLR1ipQ4f%x-jl{oD9AHN4?gmRk4T}UlSmvz+3m#+_Do=A zK2^5YbRbbZ!Y8o^I0&P{rAb;9{N(jn=q3$~$^v;Mv93uRr6YqUaL1{!1qsOa7 z@kc`5S+YRdFOjmrDYJp$D|sed6Y(WJ;);TBQO41A=xfU~hdh*TmX`X~C!Q3kx1oUT2h~4WjW^ID+$_k z%1a(wF9~L$#Q{unhmMT%+G@YcU?A9ii{ZqZ~#LS^%XLf8ayfQa$THdkk_gsTK^> zv(|&iiXoIyUc3z*xH1Uv6>eQjM&u_@NhUAlv$TdT>zSX_Dc=Q(1Uz!mP<2{$OM#@z ztE5-w6z?1q_(~qZ&y>r(b!!Ie64`Q4XMdXd;8}__t-RnnoaS-hfL|6`{=YKiFFX^b z?VL}En6SVx^~W{zN4Ezik3}vzz^s)|82(gUiWj8|3^-YIRu0>;d0<9<#giM0GaYcl zerWc(4AVj?{*lUz$rm06M@qGw6~o@5lf_bHfvn|qU7WR8X^mcYcurd{<83es4bgWF z;201&;EN6rT{BxI-!9f9L{o5pgtTuB7m@=(>TqPaSOt&6OX(}wG~9&jv9-7bSbA)S zR)aE?d*8t`<-gjM93YeJ*7nPF7QQI2LelUA2BrtLynN&tyr;g>Z`gh-1+e71yiK8j znYslZYtlOQK6wF`MH|y9mg&pL3p(A!ozy|#g$ZH>6fq{934O;G7xgELWKd9*riKm?!{p1KwHDmZ-@b^&cAo>Rzf)G z38it!t@2Ky2Qhjc5U!wuNB-~>F)iKBjUGuqK|VdG#7A%HVGP%{my&((A-|KOLp;*mudtY@-YM5iMD7C*GZNy8 zZoGS+$&%j1jIT93VXWLEPNeamhe@}~lem<#DEQ)q5NSC!pq$n z){Ah6EzNrv5N8Pg@^ooAh7fl4C)~<8amXF&8%9E7jcvxmN{v%)OwfE_v&I)iP?q)K z1^36_d8hg5moJ+0v@v)?VR~K}KesP~LexHVrj6LX^}3k+n89Y2n2=}BiDdJw72X&YnS8U2d}pR@ffybSR$ zrk~5W#eODz4TM63dcryoHx1<7F|fw78H^ohuV!JiuldtI`ndUDfAhC2VtfySO@H(6 z{_Ibh?|tx)&?g*f25h~uVjy5!yE1Ov+bcXPZ9A$EV~k#1YFyG-t04$kqXL<);aT$sAAgVbuiWuU10CL^8YvBPB5XZP$JpgD%UKN`l&8^G zPhMk|+rHFE3YiBoO4|fR-TMz8M&HR_cj(y0T$DRU_gcgDfmhZ!eX(<-EEu52FAj}^ zuH4HH$NrpwLHPo%-83%5Fb)P!^VxpYHyY2?%kd;8t!+$NEdLliCT+BW82r_{di-a+ z>I>4_XG)88pux}~wdpMfbV&leG42bSaR5m*qIw|lHHJ6qb{|7@7M$r{`^J=I^U{78 z$7e?Z2Fyu}gNGXD;GKq3`&12YmRW~L_cx1oZzU9pa_<3=8rsUx&Kv=LxLZhLpbMhI z?C2OLtWiQys3+wIb*8uZ%NH*a+GdzGGc-W^wf(^t!)u(saEw5>7s^&P7v>h4mDRQE zI{WB@yV+HsTAZ1iB|iRYLIZ7cSmz{iAj>=(U}64A=(>OJ9^((j5wt<|w|sX2*u!c2 zxZh-<5%P4X%z^p|Rxd1n1A;SjXoDSZU>obf+xD)VgW*Kyh2!B)~}jJLhA#F4sj(PWBnRmT5zM~FQ0 z;Ml#x9q_~<1m0*j@r;H8&g{_Z|BCZCAP<@howP^0_zkkZK!3%~L;5NgO`|vHd#n@r zsEph89IG1k$}l_11}`H+Nt1D_4kPG2corDugGLpE{CDiEV}!cCkF#u?C%OB;_gi?@ zt3NWHQ`Z3d92{xH!Nd81ctB*;E%PjC(Cc4)<;$e-JwG)!N{;9&<<0h;baph1eFzPR zvIc;OmBkpD%q}pV$*D}PEm*-5uuLW2l|~`+^TfP?Y#!dr7$-;>6lzQrAVk3huFb=B z;9FKPk%zz`9mEtopNJpNV4eZjQ-*2$1WB78I9VRJ=1ofrBNOf|%MWi%55D4`d%r0w zh6fl~#s+X^z?0;G!yuNjrAK@-84q^FWHR%Nazh{RxlUY3CG!n+^wN)p~#=i!OhmMO?!Cu!UWGd$(m zB6*wi;8HRyAE7M`lmUR{(Yc2U;%xoU$Wl*%Z$l=B72d@^WwyYb4CkX!!Th|G|8LWf zuWtBKj#@A%kl>@th@{DU#^TG{wjHdfV`Mcrn-087(O3qVy7^PCIQd-K#fR@M-9nH) z>5Yu|u1ymLRUKNkMy~J`INh|86VddA{1uk?F!VFAWkx=W5Z-Ywzgih8d|JMU6Q8iN z3L;rO6KFbE@db}=-CLF|HzMqfJT{H_+j@xsMr>?r;250z1}8m?YLP{GRaqD8;FUHd z4eC2(T-gF2uDeE{kQ%IihbPb$Uc2Du$FhLs;5|mk!j8wn3Id-E0U>Ig0iZwj#v z<24pgmLU$Nx4xOYsNI0Ks$9P+qpWnnfAoE=EszFKX)Z$|^TTbI;wYWMMt0_fbxf#7VLJS+LPPL!Xj zpDdc#U-CY1g<(N}l^2B;@Va>UAc#0I!(gRcCmp%rYjy=82Ne^Wl+;HSc!Hyh;}`Ie zE&F(R%LC;={U1C{#3#@EPcpB=SM@Dhh=xAlinlBv5BOe6d0}UJGm1kn8Y@44e{=|cvwtFAbo;!77B6(1_jyp^U%qKyB~|7mzjc_r)~yp^@TdREf>Y|3N2Iep#Lv9l z-58=Q88L!8=#x1j292ruw{m7eUy@Kas_0z&{b65fot*=UuajE(+#K;qy0wF?}#AtX{4;VAo_=#TYi z=Qu`m91jEjh{Gz~uB~z8%`&kZ>JUuQNG?wsVY38Uinw2wJ@wOeWc$yu;*ljWim$?%tkl?qfKYzas1S-w+Sj`%`7y&diApT@BjE&^T+?}_nU{erkam_ z_y~g>vW5|J0O5Xc?*_&I>Jx*G@}kFh!dgI|akV8*L#a3fSV^#@PY4aVb^52g3OKmmn!#v9jg-OFU(-4d5)glNIYqe1-w zqjHU7)ob)q%0A^e{?YrK&DR*y%P=2Lah_|q9-g}uBe|hrJP`B}ne1bGx`ataZ;VYC zMNU}Qg5J_wgw??4X?svsC@lPTE4zG7JE0CV%sbD}NbiB47qnFuoH{NdxS)k$$qhn! zt0SRVu+Wp=!z=C2Txd}5Yt#@A$4ota#FHgoKQcl?z=&n&0v4=jPnPAqhL#-+N*=+=8xO+objS^qX%Y!!F8M_A0T$Js5NseX71#TRqI- zj2?KWQR~i4<6l>NVReWGczN%62Zo*jk~m!2O8Ba?nWP^4I-I&EIcpu^dGveMXtyV{jD@et2~wG9+0XmIcC(b z&+)qygE`GRmcn9LE?!H`dmh<@I$u#yN9yGN!}pqxfAmkH4?g*a|J(ff^UpDc zmSK39@q+!*ICcHeci(T`yjh8dxpHm%ZNrl+s0m2p808QgG>RV{y+ppwVkGYEJAf8q z_3MBHE%udHS4SA%=>yPJPX20dhx zwz#vo+3aB;7s2}v--AoY8v6ARBd+qL_j`1j@ICC>e)Q`vpl_qOefu5@M|f$&1KXwg z<_sgR2G__ry3B``fvX^PQcNJ-Y{4WOEn78S*iX!JcC2SK$IOeMTF0(O++E`#cTO%D^f0)zt?HqlEsD zCyryS7xj&@O)Ep^qbtDcgm7u+8|Q?Pc@$bThAT&oT+D22a`ReGy_DJtu zXxzG2e~qvpBaaRBqFkvv)Uoai%Q%8>_1!VN?4{ky8)NpnQ0CbV>PGDeTdp-8=m83D!Y?*TA0H%y?W0lVs(D zRmvf}pqTJo%`Q$|5GIBPzV#A@@ZItqoRG%g%|Bk7hG*eD=~`MXEJ7vhKsF7jfmI6| zbxo@9-!ztQ9Taa`m#?m8TL~`DqU6kZ2S-DdHW< zGf(oCM9Z-edDYe!)x$d;MFy-tVY+Z2K4!4w)L{V#9%M+v5*GQLhnKO=#v};KNY>z5 zI04VJs1CuF43pTqb`alV=oiaR8!?Tg$qx}UlmEorHkZq!Pub9E+X*qG5O-zGyg?^? zsn=DjBnUq8*K&$)!OOxRN?f^s7uGfHLD{od$x{x*HIsB@N0}-7Ej#sWUgbgfRsddD z!3D)u6B6E$){^Rl`IO7{gE2`SD0`_d()um7$wIO+GJ_YmiB>#{l|yBhHXK~Vv!zco zqO}7>JPK#&5xPc|uo-v`{DPMT+VG?9BP-xkI zz*T#0(}R!OxuqxdCtC8)FJMWD6Rm`SYIS3HLIP=3cSgp&w;bt>@e_RWjC&`;(jA>D zc&*%6D1Br;TMo&Aa9rIt}MU=bpA_nefaIPs(i? zE^z%3*D7CH#1Q#`FVWY+a*|;(e}JPi=G#GmIEESK1$P*owDKoRw=C;UFC6gkQ_KsD z;9*(t$=y7FhE(N*EBR=4br3kY;3F+b1kM&zvMv9)Z*^4`7R{SHB;#5yP;pRhMJ-5* zN87iCe(EFxNCrc}CkAiZB;`;q@!*4|(s!b0I>}K#wDkrZ)_HUtWhItto(ZGI3+AbM zW;h5vb;g3E!aH6@mc%kNNQYdhkG!?dlvYx-ydjUtlSKJ+&@LS72EM<=P2)_}W$16y zn-lI84=u`a#Z|mzv*lZ_J{J}`OhdcvCSk2eRPDmd%C>yAor(+hKK2`&0!!Q&B9@L9 zasGr`smM#(T(@#b{;r-#{RvM)Wzv~ex$)avQc@r(AlGF8hd_A002jx?Az#B#b99HWOaRH^opd~lyrrCaoiwCVHbalq<=7-`l^1&C<~?8V43?1Ca(T`t{Uw*x zMt#W#nQ(BLk8sTZZsO_3YA77*n!Vm{?rk<^!P0pP^z0ON!c24j3{ z;23A(;-K6~y&)|2H}IHskj>=AkP(x_%{IpMOC~lR7N&QU-cN}|J+pvEAD*f%g79aI z$vw8W>PhCr_3W6QyVprM-kNxmU+*4cq?8}LXWSzWQtBkF4}EDf*I;Ap%S#SK-QT}% zW;w7ip&g)BTnuY*gg4z0M(SgX>TYWt#<=fxdE?$fbkQw$~4^m96n52Yo`D6VYQ0{xloC=1-mcI*DVl8*g&zMuU?E596`gFP<_{)KE46fAnPi z`4>;K_z?A^yugTfVZtWoHcs;NY&PVA1|y9~iq{;I*S$j~Ug)4qBw0L}WCDAPC;t$* zoZ%^}e9aJo#&#n=0~_AysV9Bocr+Ux0yL;EJkNMGNXMebXg`JsfOg}GA4{}i$(x2+8!`GrWBjCf&*dlKf84#Qu(5; ziLF8c;>J(I1La)086P<2Q3?}6lIJdZ>o5?b9c2o$!-7Ij8z#=QSR{alEITgOI}F{S zXP==qE-+5_Pu~GQ=R|sY_@l?LExEL=`<^2fbxACx7IG4e`ScA44*C6V73dYRW&|!Pj)6&pW9%d>2%8lWL zjBRTuDxYce>3u;M0QAf>-sFZ&8oL^4me*e3!M=od_LJuMtJi6R;~3O#V7xTm_B#62 zkU|=J?0+h5vtE`gCQ-**n|sYY4)D~t*r5(QSXVu}f}wjCW2Oe6X?Hn5vmS;T@{RF- z!r~j*;|2bJ_`o~cyUmbZ>(H=H=!-4xmodJ6_d6fpU5>Ea2#$Ztc*E% zXk2PNIImbRb2pA@G|qdttT@<@WY+|5VoXNgj-mT(TNSj5y?dsX_g*$ zUF}jg-FVq9*z~bLxVFBLc4SDdOZ3qOhFf$+{C#4;~)P~bBo;&;vElH z+K*n}(r2Htx3k4U<$OY+eE!*IIqdMC|BF9u{?$)@BK64Gidn%EW9)%+8*92hyW5O! zZTs?|Q@yr5`@t|I9=b>+VYuZGR@#DN3gza2P%Ts{_|Cy2F3~9-PAZS*$TyF7yNe?l zmdm(!oO3_)S{L^tjI1u|xIoppGVDghk`{*wM+QH6-<>-f8qPjj8Swo!-p#`HQ1UV4 zeGeR6u#l!0gK6uVt8YO4yji^e1fw%LVEGmDg%OoG;_U2P#zQ&_*#>s^)>$mrh`ips z#SS-UaFI}Y9h=!-tggJtE}0QL#?P1w#<&XI>T3AVr0~qRuah5osp6yTE z$+g1*>h8`Oi!$_?$V=_4=Acr-bUnC#KjC$9;J5}@`kQ_i-6*#o`flD@Xtr2zJU~uH z84Iklc;;BqF$R(xSyvWZ*fTErIJ(n$gKc-3@skrxjkU(kUq@zbgJrm8tcoLs9@Osq z3QTm9hUO{4+33ap`Dedu?%cV{qUCht_FHzgc(CmR3u~s64t0Q`n;dKDbfNe40U`6$ z4vr;V81leX$MOT{aL1{h<74}r9Ak&i(H@IKuVbW^2K#`7d!yboog1cQY;uOWVC;nq zIUhSj-#NC5hrW%T&mjvuE@Bw2N!@wExX!li?jyhHE^P|w6HVt>MA>su28PkUjHON0 zSx{xD?sGvLS$AyZp}psXN;IuH2ysk%G^C6RBZUS3d0#d{b&+M#X6X-{6T2(x5`K$J z&S!Ewg4~LO`J=a}3-#kV?|)MINIadGao!H;e&#?K*I!ycmU~GmN6_&?cfSepE<(v)J4*ip{Y8pKnK@Myn)&ExT3Xy zy3b`#l@Z$HhqTC3Ukcw(S9*QLz_koD+?dxiA{Cs0`Tv-@&!5YWC9&^^lXK!ElEZWv zZsOHitFTuq$rcO)_8Y(Q8~cwl7BC8InR*( zeyY#IyXToR=NG!GtE#K3yQ{0K$;cMhSr{0)@i3ec!U*oVG_nl!U5^aO2UPTc*Z1q; zzYf20)JxAs#w=F)@D~DbK>tp8 zrQ7mg@H6mHHN(U$4}-HqwKT&&?Qx9$-t;%{dP%joA9?W$SHxgM*U8J`pH$W2SS=CY z-e!kCf9X+btI_cpro$qx(cTL?Vrzjo+djHhhY{jX|w&9 zfP2^AMf}n+=<47ksSH0uH=|v63{|efal#eQPZGy-;PCe)hneJo(ZiJv-WF3J?}Lx& zltEpeFmjHjCinfRzv*1dzSyRoOE>ij25FGJGPNZzQ!d)-+6h5QEvT(#8PhefbQ?r;MRp3EK~u9Dxt3(oSZx9qHg({SA%C7<4@!tQVeO z*A91&d?`3M+T$P-U4&s^z^I(b&bMW|kM_ln_&sewG4Xi%VK|nnWHWdSmVw>qtl&@o zFY^`-d2+d=2f{T zgBY(&Ubzlrltai0-hKFbpb7c`p5+-F4;3QiZlOIGc=zUscE8D~13C|9^pTb~(u)iW z<89it+7jmAP!l9FH9OGu6e{4xy(aEE@spQ9Ro)2$&;;*5cr(cGSeh-=J;R@e8P^Xt zkz>5r&3N1d;K7}*#=vFbpsr73pxVDL17VYyJ+@WLS^e1MBfJC;uzeZF9_6)oqOIM} zOJ_pIP=Zk;7;Qqr>?szqx;uCxFV`{T=4o8HhmOBi?CLWE=S7Df9?r|#HrZzxFBl}A zHX*v1!GXuTiH=F|fnu>f&KSuw9-zmku;#Nak60A)~|N7Au7O(1a{(m^4Yxpdt>g#SDg+b;wd7Y?=`9?BAa;fpO!n>PHtlSg>R#lA{BRJmEAlkGODy&hz-G z&y((G3la|Fe44ka!(f*u=F&k7S*qL??~BF0wlm=MGZ_fNkyrX|a*(luQF>1Xfdd)I zCp<}$!FkpK!tMikQZ`xrCVKZLL-(89zr-teZ^pI*ChyuXy*CYOXqj@Od^A+$>yy6D zu&^(Kh#dqIi#*h3w;8=Qv`Dz{HgO(Dqb9aK+A=ThY+4Nmjhp7-=l~&D#``#9!}McY z&+=Vu!=}e^Ph9S$&uRhmlQ{-m5)XLx?=q?Ho)N;t9!)QJF00pgH4|bu2J0nf4-}q8 zMF?#o{nHj5v@OPN;a+&(y$B|~yf>qcKIZFW?!`V{ZW*aEcF%YVpEKAV&V%p3{=5jw zXLqeSKAe&2=4YR69<(dvn-lRmdX~W{uo)MXH#)L5;ZNImZlbRq`rONqvO9zEZZH?- zbywc??ZK|bpBB6rFB<}7tm3&{TTAzVyf-gQ;XYojt$rutpVF3FqVayl7~{dh)G)B$+m&pk3&t4K4uwZIIv*l0+X)+iE*uFSz07d^#g!YI zUwrg&ZS?4bR1tp|-7zTdPlvPanlKQ?O5j>lVfZp;I#bw~ub-cug#q4-fBNTty*Xb< ztl$0J->weACqtBjpBeFv7K+IMgvY829{3g*z*cw1Gvv~Zw==TSgJ(0`j{`-D$f14r z+ofb|Mc;EBsD6R#?P$3C#pTUgZ+8wsi;;1v1-kmnbi z=FsL|i`U|cAE-Vj97z1V+m1GMOXeTfU*NO0s^0V+vnby%04{^(b;_$H?Mfn7&yJ4ajexibb|G2*q$qw)H1INZ8(z3?TU2qK;ufRvcq4b<_TfXj#zXvG z3kntoX4jFvLucPVZHso$7vC)$4t=nvYjLiHDdM$EPakQFW$ZMccz#Em^YPs7Q1%uD zwFzx&LI8!6^9PL07`>%qNZ0)zjSpn@aEoBhurW_MR664w6jKK>}R zWQ^gU;XCbQxw5%<`O?Vl^V;o!>Yed;7E42r_p`S6tZ|ryB4KS9&JUztr*Fcq+CK&y zu1&xD=Ib4h+L)kn@9b@d;mOWT$oT%rCqErIARE%!Rs6JFN(UyKRO6%cf>3+(2YJ>n zFw8STs{^uZ3__M>F*m)7@9I(eLl19rLn;lPUOW5p*<>S!h;}vM_SjI;Yn=XO_d?}Z zh@RN-Ef|s4wZl`0q}oZlFW1d}XMS zhz9QJ$Td-E7K))TX?KJe!XUw`ALVRu(>Ue>KJYuZP8tldx(?n-HeTYM)c_0>45)TI z;Z_;aNnd$eyvG`T3II_h8jRuzyjCk)b(xT?g@fJ!UOq~12EW}C@WCU+ zx@Zed`57vQ#e`+6946K_Jn^ZIyn}-YW6#(pEiBAadGK*4cD-cEJyy;DnoKC&pts+t zk1gL!*vZ})Muu1AcdxRy@-@cbo(64bo&0bLzVCRtC^tNY&tNCZtB7bA*)3g-oqWp% z76hldVIc9A&f(MK_iU9hS+?cjJq(v2v;^fFd7Z@Ysm`R8UD_#-F{X>YV-OmEXzfGR z@p1A*Lv_6PFm--#(jWtn6fBM^?i_U5zmVNc2qM(uGVPY3F^)$g7b#?Oq)Y z9ix510PZQ`5f5s`_KZRM!V3NqYV0TLof;RF&k1XD4!(-t4+q{^zUtXYkK75?KJ%h^_>(v9f zsr%`f#Awx7xvHzFX!KMrJV)1*cj}Zr0;>Y#RYy8ki(NbX0)b!15A)Jqy#x=X!lgK^y?l_O`P0?K`v&tkPQ$i?@oKg(rz7-LMmW=*xP%WPBcoc`%!zL^yoA7pUQjljbek3EQIJzc- zkMsC6fxw4@W6b9v8(+)wO%p9RF<>*0PhDV5RkSQ-+!;M; z>}2aN&(+U6oa#=oHJP_f7Z{|(;j~C$;>3&Ir^}4y9dOnpjuAkenVd{GfJs44T`Z&c zql_jGo2aQv?Z9DhD=uw|A<6y&6Txu3oo8N2MqU^a82{8Mqq7e~I9QD5M+!-DIG!=S z+{-`(KVzL!ZMkMFxZfn5_uA3$Berz@(Twc0#27+rXW;15dc8S)w)mvg`K=C9edGScL5uL(81kqk0wRueP%p2Kyw8kR9*CZN5Ye3duj#cC4nzA4A1HpdHFM-KDAKLAHi83 z-ob2~p$~YJhqi;v@MHS^>TplS90xJ_tess=pfgS_+`>^khp*5@;?_SeZtK#A>TT)K zubZr6-XWfpc@LIC6fKaAu_}3c z<^1XN-op%$7VR>^6ce1Du>f&7?`g)52esQTzWi$H%dQEAJ_aTpj)x91+-5k8*Eg=; z-u%Vie3)U?f?CG*Xt>kieQ={k#7$=~aiAPe$T6Y@BfzsYUkHv@&cD2Q>+RQzXM1-J zf&3@`_#bWF|NeIimGRyjHaOmh@q|n<$X##u%oykkpCR;=y0?Rc5tXs;__3266#B~M zM8-UId4Fr{)n<)r@fWXS-ZGFUqoZrO$NvN_y=YN)g}fjJ^p-jm7K=eypTcm>$*=vzRAb}WX;6KOvv2M&3+i;ec2+%(F|aa6j*`c_(Pl@)*aEckU^#DN#*p+< zA3RnUQ*jY6eVX>GpDPdV%BRV>a0our_f-ac%h;{o#xM7bPF4J*oiH=y1)u(vZ)dUR zKt_JD`&5{%{_F@4^PY#Sg#q5<sO!3u+7lzKtIMp`EOjiHhC*`b2IPlPe1-B z1M{6imWb#4P4)D!1&7)x`K}HRWgK;ID&xI#DJ(qc&%aFH(#vmjur+zRnU^}AlK-#1 zx>DK}+gqs8x8To%WW(W(>g9B@|IhyEKg@7`ee=Kl^MAhiSO5ACW@m4#^TifE_?&AD(EWC$z6S8Po4o4n*leW5EYGP&~}@+oIa^+e3SC(A!Bz9*z|HKeBF6nYg(2$qNsA#bswk%AVm&{^9^(Kb9okPYR4qXLf{>nFF$ogSnYua3q!ML$)Vv@{Xpx!SOYYSun!W7P>|R<){#Il_?;i zLvS7dE=>fNQ=ONlXReWAz(p~t#SoI;SK$b1G}moH7Xl?3r2DUTB;G*5bsyOWHYD87 z@MoTZ1a$weGRM+Te#lIAxJK-(GWB3PDOj+}7%T~O?26rajtN7Dg292qN-z_=$*0_C z^?n%YLcSUB_O47bupGD&6ByMN9m=iD!F^?$_fuTT%WEB!q6cQo1fg!W`&F7(0}5`@ zy>OQZd=KS-Ur-WG14{51qkfZii+OnxrCV41f%Xf&_)%R_Lb8-=aR=o+x-8@`uO5!-Us z%K^5nr}B)DmLBd30>^LkNk5Ab$?Jmc#eyyIq?`Y?&q7xRnMPl-0%WgL%J7$5#}}gI z^DWPXUYWaL=;X(d9#zIFa~4>nnYxZwGw=Xt=Znx)pHqNzyj{z$9tRKb)Jx9{pykD? zp*{4LOvy^7R1UzTcugbu-BK-U1X_M%EYaP%!PJ4s0eey_@^sjrPVJ3$7gck;T;O~&WcBzGS;zugl~k zo}2KG9vyt}DO~f8UipAG$kvA8i}=#O4^7H|?}UviZM?>gwXh}aqHTB-fYqka;&}P~gj8Ovp(SgSUYY+;KyEU1!y>txr;WOL4 z@kbp>GrwUtlj!JhkI~?T-x?TJ9^KVbdc0bj>w!SN%ab)KaqM}08ttb~8NT4Z_uJff zGBO-6Pe=4IfJm&S2oaK|Ps)`+NrH zl?9F?n|Oe>+6D>4uaW)eM2l1-XK)JkYHtRw;Rm(_sKDzp;D9fqTZWz0PxK5e>dhx4 z2U2=eDF-Xo2S;H3^XRPM~~QjlA*cu zhw~u2bout?Cm&zRo3;r@1iiZTI7xi$aNyv%g-)uhmA`j!9*cQE~{JWdf+d4mC6b4@5BAOWNjgJgY zhYuAexsU?JNE3Ux+%NLF+|@+vo8Et`Cz6YqWK}LNcrlfCH=z|*`AHM5=T9zePQ7|I zgJ=CPzFKTdRt_9LojB#0lM#!-XEzjfQc9RVAL~V+MA4N3l8=p(ho9Z>Z%gHneg$B2x0NFPjc$s@=?6>tl7qXVbH&?7?Z?B3UcMbGxMrTp20;`_#j(Q){Ww&)rof9ZM2tF!5A)JpGpuwMLM-Gy|=`C^k>JiM9_ z+qjb_CW9TL+9#iVvN?CI#lNkw>~_ZGJuUXXdbWdw3s*p<7>a}(a2~-o>0k%v{p^iuAF9^AiLIED^b%m^x8G~KPtIYh2H;t8znJkEo3!a?CjJR75Q zJmK|@H;m!4U=)9OV!reuJ(*YZ@>onx^(Ez%hHkD(s>2rcx@H9B&HgOj;opIt?X$R~ zPHH~lYEL~Vmu!waXV|IiP%F`Dh$iWyuPGz>3eB~m!LQG5wjQ({DD!P@wA-w zT(v}td7pzd_1R>Xfpu3Qh-MxpeY8+N-i`i=AbnM+4X#((NpPmR5+>~4-Rnck-e|g1 z$P8`ySO!-56pg3Podc;mHoBYqbO8A#v*+TKs&gkI3$0M$N|k z+W*sVxLv+?-+iw>``9el7^WRa--s8@=*@_I?W;?hx8D73dZEKYL*hmuWc0HqsOXwDTqPBXx@ywm{m4nyGV%JHP=6Y2KKE<8<87P{f1F%c1Cd#f63+&F8;Tg|Zez39A?Z!ioGDbT`MFXZ6 zmuI;#MqzfUM={JDAb+I8w|U76z2tCTI*xJtwh&V78hIE`UTy(ao40s#yT!fR8Sll! zw_xP(SmBYJ*K+3NQ**9^g{2dXIZwA(e&gzm!S8T#xcB~n(aZ8_TTkjs?9jS+xlmWv zzf6vbY2I$3yB#uqQdxbGWEE}Ewcdd}6X_k;cqmVQb(5>Cxd*ThJ^++3fIoSw3+2r< z;ZcS(3Fk^%9-U};aR&)dhbM}MXI;;;A>OOxhH%dN0WQbTC%qxif}sxKR3`}Efmy=J z8ZYpj@{%vFmhqF2gKpGn`CJjTf3}Ic?AzH^Ss7ZIq*Zv!bG0R8Ce7qJYWAWKF!l^ z$Dlt&uAGZp{DWVq{A%Ro)cH?Ryu(j-B{gH3b`kxI4~&04D=h`lb#RnFGLalmWJ}gc zH3_;m`wBm408V`_7;p|YoF&2k9dEx9PeB7)nPjkE=|?7KMyFg)eb5vT^4hh0)BA^y z&!nn7qJRGI72U&*CD*;^Gyf?tc=CA%<~$l44dA>P-GnC09R`!DUbz;UY3h{h8Hn!W z>G6(^P?j|5Qj@P)g{1aNJ8d}{N7f@@@gdB&vJuce!?b=q*M@@^-gu(p*l90K_lN5Y zwDZ~Wa_H)Mc#7Ckjj<12ZheLi>t^a;`+P~2x|F6b9Xi)|?`i)J&0R*@!V%owdp`Uk z6HUVClF8hA#&Peg}Hb;p&6m48=oz_sUmy z=vX{Z0rarX(k}}(cpmF_X7|sOE7i_@_^Z6B1F{rsJn7L|#4B?&E_qz#4o{TR-_R_5 zeF+1G2Me#V4cvag>ThV3M~aX_>RON(PV~HVWV`3n28O2J)DK?n$ORn09HZ(0T3D^J z!B&^xC|~G|L87AZbiLF5r`<`{cb+$i894}tg~$&6kX;fnx-^)7o9A0)+RuWtc(blY zW-8wxQc36=oq|HYlegwqgB6wpYa5G(1(iTd=?>x zqRQLi(?zsyc^1I+7jJ>@>$Cc5H(+^zPhFcKMf=2O?in~^Jg!`?S3BsJPFH`WUlZwm zD}M~n^!ngJZYR^qNAB>lk96<^17i5L!%ww#H$(6jpMKOpUrbjA)je!`^^q}Fm|UB@ zfFYd4#5wMo=Qcy|FFw9l%-#$o6)lv812Dyvrl<9v6*v>U>HpvlS3Jdz_-+Dq?u`qj z??DZTN6&b0R%bkJ?=^9OK6(W1$)%GsUg6XP3W=y>Ho0mk7%W}fCXw7adewvHdp({2ppTX|5=j8nXO z=pV*4hblfCLsoCA&*QJYwmH=VOzY(>clyHl%6chdm&s|9^Cs{-Ht$`(wmJ3MtD9q| z^Q0~9{tUDXUh0YgnK7EjHBUQ+JCjYKdcUpW^aG80`TCtXwDU28X7~u9!Z^J&OF+Yu z5eeNvo_MFW+mfAfOh2$=Frh=#`J=wP?nvuSweb+65ylXej6E+To@*ghPKn`}zP?sm zW_&r3=k4zJU;+v!p(-pKKmb%c=y9}#S8e!STdwbCXuFXC<46-=x`hEk>{zAxo3Q{H zro*NViwoK+;4goEar0F%?ZtH^=V(%jw#O@1or|+Cud)3jp=u z>cN3+&oX92!eBZ@^`9}28uGk?>fV$84CUj9K+nnF%cbXGF10_Ou6JipZ{gp9po2Lb z;%R&doCz=kE^DBLa)V1o?mh90L)30wki8e9dfLd;Q!;4BMfGW6X!Z9&(+3-$gr7FT z^LD&%tGAajelj|F|16`--omkn4g6BPwIkzxp$gEk`=D5`QD6*F{W#3?NQN{9H~Qns zC!dW+`N3iv+r{!IuRfB17-O_KWRw8S;%G7&OuJPqOzcV?)yH_c(~E63O-o`bj&Hg+N$k^?6x7PsX(m2I1qSaqhs8qr!-^6Ql)zi~TO}hqqPV z3~r1Zds`&o-6|IP`NBsL#PMuwob>LSuWc^8a(Wihc!Z+kMT=UD_QEw>O^@-uWUylt zXSm@d&&%}8+0&adr`pv}eVwZB;6=``eXn@r&st#6FQA>=3(uhMVA#}l#Zvd>wW}^3 zWJG5~n+3hvtV33L60e1~m{XbreEJKE2|SAN+VcsIlD?Ql=R}EDs(cC_gMDp%`rY2m z{7;)q+N802h0zsFvp5(&WQMWdj;rYhD%b9(m(t(UHv2^znVGO0)w2)7w9mB3#=vX0 zUTrKoCoDqz+gS{N2mN4-Mh?-zpt~=_Cwa7}!&^4yjt92q!fWxo-C3k%G(6Uhii3H& z(qYq|r}M<>K2<0c3#Iy1#xb~x_x$l^7dOB2_y2AO9=iw;jGUiHs^`jcW^f4TYL2S41r`il7Yd&YD8n{S?0MhDx5iQj84@;=VkeEHJlS?JO>@Xn&sjIra1v4zC~3lN7o?_&;G z?7ag-KltGLa~Q6Wm(IPoliVJu|BPkTedljv7!`IyoonmHJE|K08EqNJ#r3vpCY=)ULH^BmrtK=fuc4<#M2MLxZQ7& zkRf0Dm9{Cv=IrSUo4@m;zqfhkJ8#cHo7MKz@0nAltLt;q$If=Fr{>v+0&5jh~j}yfd*EWtc7Pwzn zp&eOv0H|2p4uiKKVr)Pb90;p_C5s0dpBY=+E^Ns4%hw7k)o%;p>ff#i<3x)J!nSC~ z2h(%fpge>DoZId8(C#c?FjzbDg5I&fp(XO{w?o8kzc*gHFpD_!SHf4Dy45bpi&NJR zTG-`fPTqtn+DlKwlP86@aroz-|Mi~*_fBDizTABL$d%HO@@D$ z<3KucQ7FXvio=c1^%-;iLFJ(nO|v)^E=DLzpQigNcLcbSOH^g`8-}2cmr%KT-a9=V ziJmk8tWgg&C<+Tq9M7)FG> zt@Lw^f4vXynq+yEs{m!Avs-mCbw)P2mS*q;x6ZeeQ)kw2YKc&)nKmTN#i?={cCddib&{_wmvL+`Y`!DiJJK83dg zGm#(q7jLw|;UgM`$_D7e3()#ZJ-bFzd8TedWY`nT${PF!4?J9akQKsH|E?Nj5uHI| zy~ztMAk)@8Si}$h1H1pqKSm^Q$~%Tx>AXinbWNKik7TEG+5!C0q^xodk3+xL+FX2@ zx*!X9Ub@MTUxVO+GcpPW{^*Q$CaoYUyWH{Sr59GE*Q9SIZj(D$<(c-bBMP!>e@o_J z8t&mTM#!#*9=L7O4&UgT;cN6IPdj)|H#M?eYNSW@EK;CZWyEWg58k{52dBl~bu;R} zG~|}NOC3q+6a0`HhQsK2xAO$!dpw11xtrIw_NjiSOfKOLYVcQtvQ1)`upams;E|EBKdS@cvcV+k~T)_?`C3`C{J z8@jRyX>~W`RG$vx>tDdi*w@dJ-|*hTntB{r!^2>&L^74WMB28*^Uox&%i%}w%c1^9 z@+xn5&|OJN)GwKm!IK2uBzoi@zW6lwd!{YpCs=A}7%=&$)K#z22L{}EA8ZAJ)%_GR zWhe#DLF#pHT~0gP>cMrt-cQ~1VD8j8In*A)$qokhqbF9WwUN@G)qU^AL#+G;Sz#Em z(-Nlq6|cT@qhl+9+{mY$DG>f-Jl*-V@kqa@U;QL_jM&}7VJmCWuQ~+|o+ej#xZn?+ z@ECc3<>KkmiKXuyG`#W86=amN+HN$%D|(izj&x$*rB_D^61{dQyyin z^j)oga2TS&gCD*@vUJKB-QWEVgW+BJ(Ph)FAXmNy{L4rVn=q3XT%-Wk53Y|s>}nn= zXA($9hn5cR;7xg@gLEEPfA!~oy7`m;_-74fGn_VwJ$`zHSvl5#_x95ckDGB!`NI2f z@tTjc{a8#wZU4r#yLmF+pGo79CLc$dh?t-7fMnPi55@-9yP6b_fxiX@|An4y;wD)p zHqjMZ#&8gxyxbTS?_~IOD5XWByLk@X>dPa?;vAxGw)LCoU4MK406+jqL_t)M!o>FB zqvyp`zm~xyxvl_h0!BKrpEo1B>R99N41E$mbc^-GAccwuX0jULILIZ(n`q_022c zkM0L~$sI2)>;A&aIK*^+%o<}wd?jX=J}Rat<5%tTc;1;plbDQ{z|MC2>W&8yBLJhc z_VzG(FU6;?bC`KiY+W8o!j7O}CN}Yj4iGBD1o%J`R|c%R89Es1c#bmanZPi19m}ir zep}KXWMpHQ*T3-Iq@PGv7Y!4tukc_zV|X@Uewa7d@eI&-VDc+SwMhN|atn3%e=h5uxpfcvf#k9<}=p@sJBvcwPGFi$ccamDeQJz0f?*o^+^W?ZWOX_02=t zLX-KS@dj0ZeMNlNeg%6;0fa_J!9-t`HzY@eehKDRK8w};ksH6$@FH7 z6rUUoc5GP;YroNvF@A z4C{9HbY8)g3{6kck>@+K^UFgW9(?uY=4Lxo9Ed9Bx&s>-fOxdhr*CDXB&+$wR=2x0 zFK0LsYU5r9F*A5Ftm_YEY(;06R(J|VOglpM)t1moH}KRy*MidVjI{sh&;BMoUi|Rz zNADO;d(Mc@NH5m3F(;!Iy~mTC!O(7vTNx&W1UMAk44e!&%C(RyuP_LYGG^Vrla6B0 ztG<00o3sH&c7`&>sXZNHFFb+XnZcG`)jzAVJ@jaJTj;}UU8W{g;^awu^%j!DU3_td zb>R*erN(Q0(o0-hr2fFKel3VSizmuP-|i0EoiRYN`n-jcakePec$rUG^-JaR6^84D z#g&=|nkU|Pz!~=#w}lL`2tbzLcCUWswRU-t*ZDL)ds6zh-}zoUBpxJhVHHt_S{%c( z#RC$!bUXv3MTa|u&G_Yqzp748Pd|OW5F`KNfB(+J>2_Y&XBWRp?qAxx@>&Nf+cl7f zF3;dIEv_7Hp^1_DS|MA`pF6Yps>QL7KKZ1W`>(YCe`4CT`v3LCFUH6Qr+2^e&crGY zx!TBq*)?*gwrLTR5!DWud+C9Lhj(Qxy`5a{-Mp4;^G>#ip`8jTb}acI$LiC91Mg^K zX5PR0j7NDE!)*?~EapF*FF#tw`WhWy(WcO2SK-(KqDQ=RhI4I*cQ*sK^F$b@ z$I0MXIxYSg`&nc-RroVi@7({o2iIQ#Wv|dZx{5D|_<*T0o`0!gkp3gqTT-TC}rU;X%6Me*KuZ_lK$< z<3{o2@#JQUd1TqS68Ce2;5ARDX=`_DJ3>O;Ye7X_(1~P-E}|2RC&pa_X2}lGT6eX8QI?7m7u6-IGt}_T4 zDRaMrP* z!6D2!zSVf5wf;OFm?NzPb&f^mRAx!(AZc{1@jyN8eDuqnYeQ2%!5!y~`d%_I3r6@8 z56c?7m@^+``Gjzzi|IfqA+IT=nG8wpZ*m4i;eE%T1!!55pMo%RuUB3`ICterG}{ z^%f^|$m@RCY7jSo;1NFH!C_mhi5Ld0!xRk$m8W_@>3Q(rw+u;TDlJ1sP`w^O!{0@N z$)#&$%G8bPp*ffXr$=fR3_R=E!0%_>^glxwp3kFgCio=a10$T~U_tflbvzqdCuvvn zxb#zg*@oOfcGB5-Fb3{$n=+Jx4+u&q3AKUmg1fED9xuQv4?GN3UMr>h)Nr|XmB$)deS2Uq4A23*V>Uf9X_PwhF=4q z%hGv{FU(xCD4|LL*~)ycOR!Rnr;F!!AkAE@|KXqNfRVSWoZ%aOsBCWtL0uz!7#%zs z+WRfbv>_LL=YMT>C0e+ae%eh+bm6yCnl{okJVywCfX{+uf;wR4cHtwZhbzs3GrYmC zZpRoV|Iq#$dewuXeuL`(nvw?Q{5Q2VX{K}ihDWm(25TwXF;0e)veYA-<3kTh)qS!w z`MRG!E3FI=_%|Y|zJjOccNKjoL5otJfz*yW07v;d~{cN>d>Jz$W5N5yG_o;wZmWsQ;sLw z4!zqlmL9-4xcn8l(B0ZRcbdNH;c|K_ymyChj1JC*oEmP;}U<=bL+2=FQ zw>Pr&C82Wx$S5flPFc6=?_~`Zw0sESKW<=l}K1srbM-r4CH5q|7-kX#yWYaA5 zalCr=WQcoEY+5)ls@`@$Q*~lu$IFvvAw={9?s@G#<#rwTU8VPocl!rFd_O~8#=9m@ zkMeSS{)`Nirb&)?;o96xP%tor=wEp4Zn9`Q>*Hc~K5DY6j=}Un3!I-cIbyss8Dfm{ z=1FwjxpXO2pIjuz$6h;|VK`%8lRE~!15K1)fBn^&kR0joItE9Ek9`aly=MSAl_B}% zCctl%cf~}tfE9Ek$3LD@Fxj1fT6=yHO~xS4l0^naiw+ZuvZrJQ+H(L@@9C_^)rrY1 zV+e^H1JTIwH^rRK^Q%cT*&xZoi+HU~;P6=gz&LKgJRx){Q@t}v z`ItCruj*UqkA!P^zLt(*wgyq6+CX^C!a;m{nhe|&1|tKwkXO$c5|U|#=s7G{WtPVT zc|5<%jnQfq}tXpL#2508{nF5aZiGG3T$hfKU+ zfWgO%f5Cs8SL36$y2rNZ&l%sT$^DELr_a^)!mB)c#Q9GkOvWFUemu_W>$IuYzW?0} zF6|U)hs)iJC5JO?96I+#rHetFksx{6cRXWvMuRlM#4LZpP(?un!y?4f3?SQvQyt_j z-Sc0xfbr*l{j<^qr*_I?*Wqn5!S6i-?eK;0Cqa0i?s1C&TMTIve}^TGbmdIJD5hHR ziZMkU(4E>1*=NXNWO~AIiSL2li8)T+O`Gnawz@x=V^Ft4=F%5=BL8K?>= zRodejKNthGLo!cC+I8~IJKq^2$ITW4ge7P5FYCPZXU9)z52@3i$d_m zITWsEL7}?cA0CWW2Wz)t*MIcM=Y^ZOo6)la5j%YDXr7`BLJaGSQ=@Of*Mb~4Ege{W0pb0NDb_!T6?T@qp2B3`5DVdSU?bRY72P zGuLpNv3Ja2JU>vKsUwS|VpWgGRo8f2j`G$@eB%QHHrZJQa>i{79SaVbyWN4&vrrPv zYe6O$z1J@Frxt6}F+=*%bSmSaoeLHz@#@s+6V*ctz!iKV4_!vLD|z!ebAqQZ9kD-M zbfkq2bf+yw20h15hD-*{CwNq74YK~`>#uMA?vFkQ-pTQ#l|?@+bOH}shUmlb@=iOa z?oN#SC~SNqRE~Sb`&&0}Ot_0f7S~IEqcBDC+A(yexZCKlNPadMIh_utL&f8_lViM^ z8+Yi380>%bt6$}z+(KwPVH{^*|L~(vH$VUIlZ=V&){17d&Z12e+WkXzhyTexxq%2klun6miF6X83gA?~NXd#ZwDgUj4?QCpa(Ct z89LMAl=i^6!B`7FrI|i0KF{J+`Wy_N=q=Q z(ISM7g^Ak?}O)s=Xvg6Pq2LTb4yp%+B}Q@#vMClPFt#979I$H_3b{R69a0Y z%Ez3T&d_G*Ai5ea^XQS^LyaHOEfE2T;JcaQ30_Ag{9?W2Wm6vu%8o zZ$*XaDA>_-($zRdotd1%t0Y4mFoqq<34s|Tc^yKtZA3m?sEnDQ2g|_Dz2VB5RzzCQ zcDxc+M=$>XS3v(N6dehpj()tM2PQb{ow=`cGoa`x_&e7W_5y@a-59V9-0lZNxCDP0 z%44E3N*5b@Zt4?O(PGj<*zvBLDWRV}rH;-q=6g4W{wYyui)YfHd+=A`3=PVRR%PIm zw3DI#+z-rcdEHL_t)NJ@fV+Zi=?DUNDIn$S)b-TS@T)TAA6UH)hw&QhSHbfRABGn} zl@8zg{aVF`&rGTlz*Xn{s84BNG3k}2Va;B4W(bW#4pdPY4D~w zd82OJ&vSIZZj~|5T&95LS$r6e(NazO!h~r9(tz#PBxB*~!SJ~JNPx+z18IApuB3se zZ*{TSR@eT4A3w&3Gypt|!|`Vhe@Sr+i-GoWLl(EZ{q`N+_liMzupHykDspE_nDV`2 z%v))tUmr2{)ky`HLR(*dJLyI@b%Vc(A)_;)fIy!ieO@g7&a*8}WL%DsZ*@?*QGP3H zXt@YN>6cEboH_;-C>@?D7oSEk%U>SX4CbQ?q*LK_jPAe2qk-!E!ioHPMt}OCX3C`O zyHtw54EMm(BDr{QA;IWVT3B@fzKo!u^}iO+mR=&CWMDiomD%-6c_0#L*iV_^J$kL*9eL@&T+FoBY8V zo_U^R1serptfaft^XQC98F*-xAbys1+L?Ojy8lYM)l}Z28*KTv{F)f3;E?qxU$&KZ zWUTbR`9sS}JBQr5L8UsHwt{kac$)m6y_@Ni2Iis%holLS$v^yq&z2{{xsbuPN!$OE zzWdeIlUaDdMY~d7>6vm$Iy_p$kcstaeAPS9zQJvj<@uyH(rNRf6+J} zb?9PSE}Ug0cLub0PgY7ncN%EHb>&!>2AP8cDEQZVyeFBwJ{CYQRz1P8lncLUOM`wS zsCUn!M7>0#wj=%Y%Y#^18`xPK8Yw9*^rVF|K9pXYlWUbhc7ii_(?vaxgsCt1^c>%e zUFejN-zJ~7XHRE6{Z#h@bSoFVTKE0VxKK%}O;r5AnZB=d>jO62(j&o9f0J+C<9kJA zM7Y|d^LmrkS5Aw+nFnw2ov+@ypT|yawl#qwYi zD*450HCX1&!^pimzV41TUR%=;;)Tf>11m!X0~o{NY_pFij1LCcFY!)p5`sSmD*I3q zU>=*Oo<*a|dZq3A2b$EWRECBl8S#0onxxFQ(ZqLX$P4}Y-OY`QS2tIRoqj8$6Qh~~ zI<@bqkKj!`1?@o6=v!3Vv6++^oe^>Dr|<-EBW6?jOJ zsylh1E}0io`(TFj<)t_EQr$A@jaOuK#CXS`#t z7DKsywHl|mk`=%7#)FK5x7+rAI|B{xcLzmTM43r+^pBTkZCKoChD!zpTi{O|Z}Ear zIT*ta+MqJ0|EwIeGmg(VC44-;fBW&~lb?LL08kuWb^cdjhe39PeEw|^$nd-?q!=nXbMi900 z8S^xkeEukp%H;I%^~>?9$$IgipFLQz$9PUoX+-%Lr5)5sK3~)xGJyr3yvAEz;k6^U zzvy66vS9I23~+ThbjHi4$?l8Vyu(5n{H58OVTA{~#ia)sH=ouf=peCr^#cP7+gBiRZTn+bdLaPP2Lxu3UpI6}WevSV4G)9wTjL}D^mpz4$sG6#; z%E^c_W97=)+YSS}M#jhxf5^lag>?8||KC5}{M$eJi{S&0>?g^)IvCH-+K9!SJ@NHP z3nkhu`Mg@YRW=Xhdl~buT)ep2SM_ZB{(-#R4-{77Xl+yM;?WV|vp*dgBQt(ge(l+g z20KA4VxdzyeG47P5PG+m=MLYq=r@Z!;VdshE4{5UM#puxKLeET0X)hlCUap&#HZH9 z@jj&!EEv+wac1*Iht)>jc+x&^tavmXtc`ikSURCdD&Sb&yXu)w>0kcU=BK~-Md3U?3}$CvWGM9&`o&ofck<2`>Onuq`1hsJ0~bH&>ZNb=dutqG znj=h9XD^hOcWGIp!-z56QVx9{nIccTV960xLN}8E#!lftjHl_c3uZO(Fg6x(;+d_526cP0k(@$R)_GA>-q*$k`uMrM{*KF@RuIzRjA&o*CW z@ZVqCg8zj=x!lSCdZRXWxsXAB@P~i2`8WUmk4HBhZ4sY5*?ngG0(OcvfQ{SQgOkS`1HjGO%8~QmBjIS}f2mwbexjzNf#Z zFEEa*68dVhuEnS)Q=b*9{Z@-i@Vj)mHdI@9(80MkuEpz&kB~`y+<4;0 zAKuj`8j}efMIYQ~!R}fIG7EP@HqSK9!55AfV&*G{JYKIY>x1s42QB1_n?2sl;iIwC z!s-{=wUypGW(Q9=TA*NLzM0XQUbr1kEa=f^@SX#=%lQ>~F1*y9#$ON97y6__jRRB_ zeWY#hn7`8j_Qs-k<@u2o1MNCE65r?!ytBY(JaX*l$q7YdcM!f!e_vj-!FJh(V!fu2 zOc^H$lR`4kTatd4!@%e%Z^}!b-mmYr2w|KF$LTN1KyN&%{nE`Ac#ahA=16Db(3P`z zoepqTgR&kzXvfgaOYx`0$Ie_~bhjgNLdN9Xub$B=M3Z>@_Zl-;_*)A{Nlfxgm!pF` z(5Y}qn@rzKV0p14dlF|WqpF#z+z2G7g4o#kEcjP8azz7r9)^K z_pBstz&5!9X|9#kb&7s{R)sSBjWMteBW?^c3@i<5eJ~?w3C$%CB^ZNd&*iCXWsV>>ea+sW}O{C#ZO?EHrr5`;z(^+ye)eTthp4 zZ_7dZ&;b5Owt=(sU@06O@~OXWy9R%Hnxti52G?(idR=)A2bA9<^*;RXopRL)s4%J)TpNHd4adS z)v1mKk5QzIY1j*|0QX#3>V83#WZ?8%{S1sA;oN#_F7=td+o~Sq@dwJ?FP@VvJVW)Wr}b7n^$LD;WN$4VnyAY=^{o8LL+#?J``&B6+w$>e zAP=7>WjtGaB1@il?-?4$_&&%nkM|rs_^TXvgHs*{X0Ci?!yBn1XxcnJ(ih#0ybXR} zly~$yoewX0!P(}Ubbx0_AK9q{iF&Us4UTdRp7Wx2{ zPM5r_x^cbQCtl!5as-?&UC{67w^b?ge7)~QB~Mvs+tE2_oJF*tmB#{vl;~Kn{Ejy5 zDZVLHVesgkI_Ya6*3P0SaC9?pbW8Y++%@=Rh`tk8@rL(p=0K8cQQ*U82 z#rJNF4REIv{fsW~zOt7r^-P)gj3%_2;0>?J&@&4^W022~1l|@}xT8(U{YZl*dLCZA zGwvEaq3+bV57P02?(XYvWMhC1q)9W$hHs^hW%%SHkA89Fd>%+%Su4YWtE^QKz4PZ* zod>^3%GEshe)aSJT)f&Nd4%oX{N$5Mo6m0+cQKxb5o=;@aEuP&Gxp>a&MRs3Y|pM- zyEQyB$rc_&2#={7G!6Z`nv`4}_)U`zTf7f7d8gla7YX@7V1$BLlR!p9V=i{YjMy3H@MCt`g{#Tn z$v5BL96g=kFON8MzId7;rwOpSV2tEx_r+&lblcYMJm^m5v1gKe?W@8eFv?Zl{*3h3 zZ`ua@q{%#QuVl0IjJ^&XGoj@zXu=^L>+KAZw*UK>@a#>$FImCMB&)o>CVAaf1@u)0 z9!Aq1tZy_`=l6 zlFp=C{{C(5Wh~m;#gXK1-|nwBKYIVQX&cKssXYBLIvp%_^0B_^;AF;=z44mScX?Y? zK49=`ob`45xb*So6QA8;iOHREEx_y;tcDlS5$IV6x{_=;>}|z&Wq1o7#?*exoT0gX zGg>_#FS+O;CuHYIb@HS!F$0~lN;JGA5Au**@_v*NzVIl7t2yh09NCjMrv)VMsYH$% zCX}OJ{CZLOyyKT1UE{uK=QEenX3{X`T@n*S@|g_MF^}`$GAz z^t_4tvkbbt#Ak;^^>g5Oab~wTqwS^Ix%%XpZic-pFStF)CI;;9@Hhs)hZ)A7WF*jz zkG_1O5GLuS45H|qzx1001^nK9;CSWLR)eJqeR#Apkbe^|EDqQ;`0HPOxcR;Rt}$jY^plZ?wR`0}Oz#Y9`(ONL#%X%O;!dQES1FwC=oM@!LV0AN=5*&ENm6A8y{r;C8Zw zKnq{msPIn0xxh^fbaB3UTaRa9cpXa@@$wbY#|$@#L4ssl^9pMtt|Z z?`&Q#Ov0gzb7Ep2DDRo`=f?;(2u1Uy7H01zAIf0})ExG#{l)#>la!SJ&8i`1PJ65+%$VdshJ9yc zC_y=N+mqVR=#Auq*RAVW42>3eY0Ewqdf_|Q<%KV!?4uT-s2m=x>WvYd+>i(3dSN;= z0x{n$V9d)PKj7({;aZ(r%obvSj41C`2Ir%BnIG=#2yOa5|NNJmfBgr4IPC;4z`xh| z4S)I5UzO%ay0Znpg3x9FA za|$T174oKq*u2eo$D>0$bg}Buaq85mLKk%aaCkh)Xv%~AC_P$xv9rQh@t1j%|L_0j zzbyP(J4;#^x|=)*&Bl`+P5L|is9^~MH=6PGXmX_e(J}fkFd6s{#y3XB+bvu>^iW*w zbFaKoob=aP{4P{TUgCC(tlbhBg0j0^5L=<;z3NB8ES z$+PXSIo$%gG0+D;_-?#@J!A3Hjl*+|6V&s~`i9eIPPd5Hc>&eQ!Ep2(-Hgc=397Og zf8fpYS~XVG7cqXS=qK$YqNhw17+;l#SBDD65=U!uEpS#B z4r&!Y{AOO+7Hyn+AzYlZM=pN;<%C8VM~R-3t355oJ!s)k$R;74Ea>g*+;AZ4v1Ikk znN!taIx9X*Xq#k#jwMgpnsZR-h;!*5yIq7m;mv+HFZGid=wE%Uu}A5&GX{5Uite;y z&0(xO)79hYlV@AtDMU{4bE`$)J9(MkZM?;ses&6Fi+N}fr9Ob1(@8#~b@VS=xj1m} zXp0kzfAEVzlS+F0Ht(D{{>WzBD-E4wOjhl;7}3tI7Od$DX^rRhHcva!LEwio;tNN| zL4|{Yx`P3^n-E`*YWu}bH;z1}{?pCs{Al$fY?b~CT>8)gim)!~Lm!Y3BP-dfJVy2S zKnC#Mc-^n}>pwrnKaDB$XZ@KI&F;pMXbWT%tB3icFeNmZsUpT;hk^5NIF!O#3bB%8 zn+{{H)tCaj5Akmq&$yAnbqgd=GqsxcDtr(uZ#gTkWVC{LS@3rTmg7A)aJ&eT>#AX3 zmU9_yAi2Kqu6b_X_Y0ry<})L`uEvnjZ#1k&E*P`qTY1N5QpUASUmj)lC0LUQr%m+2 zwX30J;KhS&Iq`e&no3!zq%7^KkV%Jr{FKyx;15i=_inzusdpCoVhq*Mx+c;k@45Wb zfeI_oo3boe+fL8~b@C5h>PTT=_*I5qn5B5)-+FK2LJ_$yO)@cc-4nmlNL+_+|F4fq z1Z*DQd)JWZeq;hX%&3edBN|Zen|uy$C!_oL45yI;asnW}tp+g4eFtxOQ|^oxlSYw@ zCi2AR@oo>NF=#gEo_Yn`JBBAlgOZNHvgc!P^m_1C7ZB+)zi@CLJnzeI@&T_wy(e%T z;{#fy`BrXv3D@2)FFp5t;0gBp>%U+P-@uW7M>gD7h|){cw|8TF?9#{$&*ZFKxrX;l z{!`ooU;e?p*If@JX;K91gC0oux(p_sZm%Z?T;xITl&{^cd^>f-fJu4EHg&({H|YjoX|}9zF`#zjZ(hJZXyE|fydTPCTC!AS zlvUgJXw}ci3fbt-%U{X#&MF%X>9mlaw%Gf@HJZN8OEiFeeYDB0`;JW48KPTFtTskRxCaj} z{#LyYkAl$qZ^60lyA~m6)Kx13f65ConkJnxlx-63U*(L>t_%yIvVbi`a8ReoLE;5o7azUTPvk3Lf-qzvHg z^T}R${ye4+rd;$*&hmAwosB-CAK_OAyuC(0wzc#@xrSeW;hnF#+2Qpz4?Zf}N72c}j;t;H;;z1~ckmT!dKLwzzPm2<$YWQ$ zw#IWS+15AW$1;55VSH&)G3~wL=*8(vwiLl~TGY84%@PNz3!?%aSx!SkJdSDCf~f z6ZYFph8O`%f=u3RSv6TccG4k8F$538!M01@N6P0@z3}C`O}+M@|J?%i;UpvOMNTL{){#UOqMGfyPh|(+S5T( zWWXO8D?@RYZ9!fh%a8L;Ca1P>o2(pYqUCT$=^5u{hnTzQ-`!+N-78G!4=!&J)^_^-ra8UdpS?5YZ-9=Dr3aGj0McbCQv*s z8EeSX(`cA)^`&m{MEDf?(}cxXjuC(klIK8%GLxg@(a-DcVP9$SXT>NTNt|uCViP}& z9^oH`zvL@knozB{zJ%2`!&b(EFY+L~-6a2eZ@;!Vd%U{rIsQy*2-lfF#afFf6$p91* zXGZcp84}Ltp(v&?c`=S39L%}G-2w(Y#=|g~y_5XXQ{?Y%;Vg_l)x#W`g>Ut*<9Vj7 z_Z@y_1k$eP^Sv3@k7Z=EkkIYsN#2u>!(W}!T@}}7O(0FmcQxVMUH#p^l6P8dN?$bw z-O^5b5C$zkXqx$;Ib#rbi~C)DR!sESrrTN6jJ!!Rg$7S}lov0O<9#hEWq^OF$-dv| zqtsh+t_*TBWzDnh<8{b%$mZSABj~4a_9Vkv+Tuas7}|Zo6BaMB{uNd63@GB2O zw2yr)p75?FN9eF4W8trCmong2{ZyCu#4s_Tk%IHtC%>MY^1b@*cai}il`^=r#eB+< zy>Tx?_|u1l@5sP!JgFY`9a)Fq;UUj`Ljm66|M&m%hk24;-~2Z}{;d|*I>0qO@HC^y zvtSFyz&T4FxVL&^m0qCI>BTk1 zXLzv*Q$jz&mEHYqaS^DLO31IAjdfU$){h8{=j>6OMjz-k;!6# z&`K7zg!$0d|HWVbO(9d@m28x5UyG+a=tE=k+AA-2xaWoG)4^hR(bv*9LP5R$#%r7J z=EVyR!ym8HV;OnQzI=KP7o}(KwTK|Z#mgB-Pjp!Fp?1~K1r|zqrt|W5km9XESXkuq z{A34$>Z`=+*0va8$jY3xpk0=q$LYvuw2*1WG*dbCxigLcgG|nDocawiK6<|A+CYz{ zsi~u=O0F0#8S0#^RQpM%kkb;Ui=Nf5tpySDVmo%!sm73)^T2f|UpesU z(Ol!7MbBAC34zfS!MK|No>81Zi_uWnoRckPXl)E%vh%Q?adrGQhGziQcIGfWDI4QG zOos9CbYVv9=(uw0-sTVf?SI((hd=rA&4(>M-pM%rt6zV)`B(qn9|wci?t{(Ge(}-f zFAG=1P%Q5lC%1ftzl9CsP=_d!VTUwc$Z*@M3Bh5vpT!9E30LjNVw8KKbWSG^XV1OT zVXtReXe^XWy2PS6FJNJ@w4W0#rdb4M2-G)Sy8Pwl_y6#ZH_y}A(4&heDTGv^U6QF`O$Cv_Kd^+=#T!S zc6)pC#@la?Zn%}Po-7MrV!Uj}ivxYX`~Ewdx8HuFcD!#w<&k@bWL~>ojOzONgerOM zTb?J4jcyeJY+{S2zl|H56F}El&>OQ+^@Co9)$!(TVdeI%&Im}Z=`aVC(ka@AkRA_P zw7io(7EsfJUa$hhwa*3@G`xYF4ku56{g0b z<1Hb4@@W3z(&saFxp4l%=9O2^Z{B$0)$~sNNHz_+(1Dy5=ooW_l`;mTD=ZN4RzKaj z5hs$RT^7X8I8&w_8S%}IfMbw873$`3{MBw}zEyJ`T~$`&9qpE&D@$0t^(j7ohIM)ezRy~`KGoR|XNnbX zF8m~i4gEaBIvw_~1rM;bCv|}iV}uzebUh(y+O4+^7ezq*O?|^oh%NeQKBd(re95@} z6peN?4b44o7x0V#q9O*;!U+_`wq;8{HKve?$~CF#{di{$vqCH&pqc1QxB>WvkOJmE zMo0!3Ux<}&83|1qdfamzH03~G$0$;M0}+Zzp6L|2)_E*E8c@X?l`h}<&ku#Y?kUul z#zQn5`>S64KQa;Lx>ygEw=A{c5B>0k;{YPxD^Cgx9Jnw%41xfrTy*+9=v$t6H;V;5 zTZ;zUSH0dAS+aQM{Q$30R*_}xV&DK(1}IQuc$|WZ=*1Vsq`(%=cviCb3OCiF`bLT9 zxvD#V!-L5m&q^>JYzc)o%L@)Z?gOx;f9M<>z=yL2qcFI6u}$95$c*r(mps zI9~LAi?g3zmK>Zu6};+p!B*1nVW)1$0lMecTl7Vf&JMpQ7y}Avx~F_(ozL*5_Z6hx zhu6JO^7_*TZ@1t36^4p7VyRp zxu}}{0WkC{lOCqqr`&#OKQc~v158LZx!NFOPmsQ2}As>rtz4a*AvtCL@CdK-X?ik%PC8<;IhtL@`D0T6x@{mJ5 z=VCn{BQmy>8|Tmd>Q85)csv6w=|`wg8fV%=L9nr^rM#u#5ci79GQd@hfn*WWn5Idh!HvTgr+ijRJ@ z$#+|;qjO&q69fB)O`>@ct4oI{PR!%v@_6Y^G!c6oemvoKInX?V?Cv~&sRer9WWl8K z>n3$Qh#!4>wuh0ZN&fM3CpY^mXLNqVkxx2lJhH2b7kOX`&m!!ELj{c?9CA1^uqT64 z-kT;Ppa0EIHn*;(h82QlPq-Wn$DJ@J69S+*GwG!Rv>B3U0w`nzq?@|o~K z;Q+2Sx@&)-K3>F|%HNyO_#hnX-8M%1cQ3AK)3D9TaEl2%+R^7 zYavi(0hSCUcfy4{$xEB}8)VSRVE8P<7LW218E+iKyQ@$v_wp*;ovfcadwSZRP%Z%< za>|+ds~*xr^6l%;+yjMd;h_#sbs*ldxbT1QZ~tiXtIsZO{;R+D{mtL`;k)5ltoX7& zj|L%EjPtY;9=8lFD=u<}rDZ7L6>k9v?z`g?N!4C_@SZ_kc?_>Sy^YZs3B)B9BlrJj z>OPn)y^#Y=pYqZaP0O;cnQ%X@G3 zeG=z#bgns{0TPMKL;^@85_sdALBYo&)bxL3D7unGx}ImA1w-vaTVe=h^x_T5_%8I+ z>(SxO!YMeQkf*LV)6c62DX~!N>1Ss1`=^J9pc~ zQYZxNk>{j1(+&W&06L2njpYs=+|vPxr#9dJ@cnk9fpR-+I=Md7;0XSN!Yg_7fR%PEH>LkD(R0 zN!}Q$zP;K(i^Blwb`xcjO;TGO_-B# z#Yl$m)o)q|NvBjN#x6R^hrGRPKz1)4ncMSrr{fsy>1DeFcn32Y^N1FPg|Y8?Mn7Kq z(&E{J^ox5&xiRF|7WJiMkQ{{O)X}qLyj^2da^zl{5pVusMmzYQ%b3jQ>r4yhYzUVo z9zX9^G}JGuAH(LcjLTvu>wDpFyu}jU;SB2k{K;>kTgKOqzPD)Dq0qnhIhDCH|`p@HP$+FB^|fOsI~&8%y|Dyb>Y~&&5>NUWNQX zW9j##XD%1611w`tva~wb79E;rzGyrkKRVCYF+RJ|2hatNGIAPul7T1Nu>s@0Fg1*v zKlt&Vj*<0p#{S>@`qw!ZglFZDL&kcGazumYwu8{|+(OqJ5?mdGD&sZ(?e#*M1Y^Ra zBm=i{pPhon#AW|u3aNJRiqy*%tg>yG@M-) zkS(ZWuZ zd&T|M?hduE=fF(;*|Foi*&8F(_BeS6`6SKsb?GR5LjQ2R_9iqLec_S5JJ6-kp6z;h z-n{#1{r|(tu_&oM>xb0Kg5!)e%M`ihk8UPs%i*NHu6v8;WYu3)ik3ni8IQCxARQo# z%>B-7xE5_^r$k}H>|{8we~l^7l+4bKlS-L6R{bQ$nM8zIorN8GD(&8)Njt!j#fqDJ z^y7}Mgg565dSD$kZE-GReSdpbEb%(Os3FrF)Hg6x04)DfbPN-2QQ1VkVqdBmqE;#7 zn6l?zuzEL!JDz`a81n(pb0u}Xlx%NI>MBS1DvcQfteWcRXBEE&qHEn%V^-FbH|dO` z!0^l1KlNS~002M$NklF6ZTir447R}My|JP^}c=n#rmwuI9nTa@NrDf2j7Tt^r5hu``r z*QYY4?yyP#DjQ!+3Ox_}(6^U!-T#%Z!sp$#47rCq>Mo7OAk*+jF=z`xm}egWRX zR0p4+*MXo5S}op^*PxVsCSt>74UHHLwl24X(HAcpv!Fo4sr*a&s z`rX1t<<&xiA01{=(=}PI9jOa>oAwtnOZF=OviO45(;oYd5BRamO7o=M;vVezjDa=0 z@C&`B!s^){P-7VBJz1tZwCTC-<)jH0WmVCM{16QJR=f0WTh7S!yjblFDZ2F!oyf=z z&*`U;jbRR13SMob|0NHHp}D&)4q&MdJVwsQP7mEHVd^cd(n_a}+KQKet9v@m{oo<) zwl+U?_YB-%mOv*^ZxUTi2lVCepx(QGQVmDR2%&pZqFZl=u-EfiTg z{Mf;H%A&`rt2#4~?eJi$XQwiiH?ZtH>Uy@yqis2qx9aO&-M6yV^|a8+ZrD^hchx-F z>)1^;P3}i02At8A?(N@#8_?Na*6q|c2y_H!JF?o%DqI_ydd=H`G3`d$WSY(ge|Kqm z361%zG$bHeg_E5vBO|rTDX9Ud@K%cki|%-%?Y?y2WH6`y+lED{upI=t0!yC7gQ3RI z(Nib{W$Fx-Xy&00x^_KnGrmmQ9E8!gyvg@!KhY2^6)-d}lS?{98_?gVM1aXm|GH7} zaCfDh%MyL2yahfG^keDDvvk*z)@7(Co#6PtU=6;EtI8VKz1!+eixEr5%i_6O%kN%2 zwaXn@4qhJt9)#q1k;}rCqzP&yl_R9d? zv+yz=s(>lGSA&~)qvD3{q=nq@B)TYV>aPy;uj2YYdfe@39;i*lRJ=T0 z4D3nnrrYqv1Mdd@%D<9U@91;uo3CK9dAInjuNt)TKDyatl)g3j6bF{^$RT(4Gmt-Q zk|7)mug81EE5CC2MkhBOY4UY{JjD!z9p3ofTW`!*(gZ_jgwstz53+!&2i&5&@Laqs zd3Ex_dr&&D&C_H}T2D-~8Bg;jI2I0!^-g6#VWdA$r~y?voYDECix&poIsmE(LK6Vn z$B&;rpI2{&$Yk(1gINNpc$4UDLusP9JN)zRYck3}#0!tX;dTa>gL#Sm=)*V0!;+XY zwj9ssk8TVAJbCXX9}d0T#c(`7`UT=$yM$L0^jmp{f1Zc%>B4$2Og#!0h8B3x8QLqO z$ro4dY`%E$aPz_2=PD!by1dyKZ#KK>rWV+QOzF+DWaU*JvT&Nd??sCzjSF~u?kU8; zfkS$V-hm%)s?C%EKNH$L@d}(LwWp&^{L%CHn{P&=XssP2pJJGfZp}b6j!dOLa4bV` zKM!veE+e{+H<}JXg@dsc^BB!}q4E^WBd~nhf!Ows@88}Wc$#6lHi^Gr%_O;gZzks9 z_R0a8;lN1z^07XwbhZ6GJs$vtKzhGDR5@_hrgk4#{iU*vr`7^^*QK{GG9fC&G1ogO zPu~{pcV`TFp25I^!OH{nw=eAS;6>NFcxnD-(r@CvCqt<=LL)!V*m0nE(r5P^-TbeA z`lLmNJo9?a0N|VeZTnb;8fEo6#`hP=$f5YYI~@hbC--kAqdeY(Bw_ezp)Ui-&wueR zn-B7~{fmF|XPaXg?;mG)nhAT2$ryf5d9`7NOZ+;VyxwTx%c9PNY)JOUh*Y1%gU;b? z%FupJoNOTgZPUv__)NP?iPCX!V1yw%`!aA%y=wyu^Uqoku?Tg%FcplMw_0GD#U=Gi zZh7s#d*St%QOKrblCkDK<9WtVhfX|c{hyV@@!kVONbZ?0ufJa^{o@Z(AKjZI^CWt$cj(F`Ak{a=3a ztMWbDT+d_Lg82J+em}V1A+Pc9MRVJO_4yxs@aE>P{?ortzKrvPO*t_9xS3}z{ckFK zxJ4N8!=G6Ms}dG*jCn0mzEvol9Yg9@8FG&l&-v=LYnxvtPnSENpJCC05bkO0Jm+A5np`G|vH;xdqTY{XYpUBUXLV7%Ye1G#e-M|2@pM3rNTbuXa z|FDIt0~vIqdyD(dn%EiB)0Ym7yq~<_uMi;Gx%%xt-eP|3^U~K}%+3&UO3vRXOw_~1 zIMQ)m;9)Kt+%MXx@}LEU*Wb8MsGZlaWIo2Jbf-{G>cdcto)QSdcf7DvLYY0OeOios z(Add%L%**r>k~Ox&~f;CyHGE7F`Q~q^Ld9i(-*5KVPqaB`z@BHgAdgQ-OZC<-&=1w zeb&?T%+VGNw9`9x+aZ#Uf82tA9e?{;P!vaf!uYhSOZy|!d^5;g0x|kXrOa6Au-R3q z@l?Y3^KZ70`1Z6ryF9M9=z(5#c3Ozj7s68;eztqbSOkd-$zknTIp95uu4@b?ZFOs2 zcjs0M7d*y6=>3k2(tn2w9mpt;&hTUd;d~;k@+*1DR{I=1*>rBlo`xrlrKB@Ce~V2A0i-WiTg%i6{0x$R)^q^ zp|$q|e{hvcrv9pnbV{0g4bK%iNjuh5OkH>cr0Z?^uE%3hIRjB%xW^cTPRTA{;pG#& zDQD{L*7Kf}bkVBsikWmXz*Ocu@GzVVic7CfaGQ^^O~BC~G~cCF#{BhOg_`KO9eT;< zrn^NaSoJ2Dcs-ttUhztqVuBhTV8hjC(uAY*gnjs08MBBoa28&Rkfj-Xm4n{uZJ;YJ zJbZ2GzV0g9s%JRi&+r@~()Kr(`0HBP>&@_S`)=e*nI)O*-x=acUs*5*8^n>5VQG2pyKVfYs;e3?2cFIicPD5tdQ8LkcZXOKUc;m|MickX=;U7}I) zYhVsX?}sv#z2#C*dfKnfs3tO>K}~*j!z9WkI8|ZcgBHWD;G|RrKnRQ&qy*9)lWycF z#W!>z-(?$_nzoB0OV%h&GBp)}f$%DgE^L?A8SKEHNul>X=n{m1sgBWaS4N!C4TCQl zmp3?rm-a7@4BChlsl<(VHc5w24=dI-$YDNDZTaHeduB^TAL`w8=;jyoRF zF?75^$_Iv2lP(y|u)D4tbcS*`hg_H4hu^?Vo>y2VGz_y~3@uCZogSRb;L>N60~hk! z-Qd=@s;k$~YIIuHleV&U;L7j1=VQ!Ak7RAs!L*^?C(qK*RU@C;w=LT}S>-Bwi#xh0 zwMq5v-3{QzklO3fiM>LTl7FW^l^5I!n7Wy~PMR(D(=W)=XF5%dru_I}&_0WT=m-Tb z@T{+)dv#tsk^lg0%=_vW|Hkk%1Ixv(%OXqdH^u`tz zh*N*XXtR?v6dTxPXdC|Ye6mK@p=DziA!PJxyIWjb1|M{QV{o*Yc(Hg^9za5!vdSov@`#5;7!{ijnX7L23>&ZpukIz=@_Hc( zmqnA3l(f8n&&$3>Ul@Qc+RbFX1Opo#7mzNL)o1ed9nHUMlj_uq+n2wb*QfKa=Cwmh z-N~@Ir-SpJrzajaApV!%f05zG;dPtM+0w~iVMZLxH{W|ZkIZkHJlf*k_UR^nWRv%# zx?eAZ1>3kS@VvA49?z?LJV~T=FKq{mnO6SeiW_HGuZwA3*ufN^1-ZgQ2)x^u8QNs%~(T7!M-o*Qx=rp~J)_MI^?}KMMSS=&r!#h_R zHe|R+XFX~GVI4qLy{fkfI4`vow>vcJ(`~_D@rTn^HFXBPS^Q7l9Qb*n$-B@CkE(-7 zD8v52X!$UH_>jag5Yfr;Lc5rWQ7?R(M4If5UQT{_JL!=`Vn7Uj90xA5c2I-$=rM!L-Kz2$Kneg-~2p-+xIWNxA}{o{b_n4Z^d{@CvbjXL`G)@ z04jqJld|X4-;Rcp83j+~*(qd51j#6I zf6_6glcT_ocP70uMvrKGDA^gEmBPKA!O=V3cbC7pviafnKkOj77AE_U{WtO=|KzuS zjEo0bzzp}&GJ5G3!QvoA6IN+J7K}dfuiCI3xSD0R}=|lefhF4HqFn z7=2D<9DH1eqeu7i49`>gvoAm2T*`=dJbm@?NAGX`>`#6~S!{k?=#3-k%p*tJ-k-7d zY#|PWq+rA|gm=!uYk7TJRQvq%OBw%oogUcyn4$AU3vL-)v<(ZWLdysf^A)3H3ub6S zrtY^8a4lmI&s(17aVanI78fmK#Vg^yCge$P#z5XP2hph~o-&@*Bt{oXZ!9u05MA(P zwSDcZKm708;_tNkWX)K}f`|Udf~|g#Zn%4=1&fSv7cLa5I+?JuhfHY7JWs{BXOv^q z)N=5M>j_OVeA~*L1-&uOj*O)v52k0s!ETE%8kwFT8HI0Cn^J<<0vaytjETBi-G+Zh!vIzf4cR z8e`?Tv*%j?IGHYs<{4gvIYOh`9hUg*)r{}e(}EJ&v13mxX)-KEx<#G;_TT<{;Xyu1 zhM#YK`-e}H$48r={^_4)v~7o2xLy6?v7h6Kg^UB~ghMSN#P*Rbo$^z{KYdMy8OP`SwK0nu7L0h;ijmD5|6~Sy{gp+N zo3-%Wpu^gCyh<+6pGCul@_>W$51-UVzY6;$@Jcrzqk~Q8I7OCyYtarxQ-S| z%KO0wAB=(gH^2Ym=x@4f35rGHYO%jOwHIlQx0Ar4()n}es!Ioj zW{`ba7#Q@S&+tikZ@hJ3Jm&G;P89u(G}?lH7I)O2m%p*i(H0d#ubl*ihN*FoF|wqO znJ`#Mk2YZE3LR%=C{FqP+QiH2^=R~7^g5p2e)ibH-sK7Bbo!J7Q!6(<>jRYixJ3l< z$WIpse0Hma2mN|9c+Yxgfss5|NJ_oc9(hyOo}*W2M0&kVI7YXZ)z<=od9JWv+N65$ z#5aQxa_2$gO1u5&gNaXW4CYh19<_U=9h~jBnz=yz!_#yQkMgH2bl{O6dOF(uJP6WYl?N0fNe)z6Sexb9Zn3-iD!k`-zveganbFGNPwq#aN=NS| zsKXa&wU@nPh+JJz2@HUM>xV%m=Dp(OR-v=Sy60=ME|pwdhNPWmN~vK^-pSpy!M*1`J)3j}FLSrxtC2FQz$8R@sq3^1)W)FF{gk(D_=t-n zGpXGITZWs;0;_Tf1zNyuASTSy=}bLW-c^zeQ=)%eqCTJClCR$Am|%|a zBmtg$!I+6`2=K@U(*GSJ_n@hFnY!ag^&MLEtWWALf!66JJboBewhIW->G8n?XXOA2 zZ9PRh5Y}graNz}~$*v9w$at!jcCt+pIC|*@z4AnwpLIWVO!(pCGe){-JGAfF3WtF1 z(nL4-!95tV%%r|SJJ~>2ID0qO^B&aFC2Ne-awuu&)juhRsjgLI=%&7vH#oaiemp5B z8jfs}0iPiTD8bf97!sf`g{W_E6&44`&@AFinbNFshKA|}KPX6^ssnYD&bACQpz*Q` zm*GkLwi^NtQ*ScvGYV@9J6udDMo!U9nc$Mc@EXI4Lct&({qQ_$I8-`_bYGsS|CED% zgIV>F1dLLBSEh%Rms}`$QuDS}2elz;&-m{?_Z(DR2-7r9U39vfhqk68o!h2+GRkrfnFL|m# zjyx%ECTsC=OQFCocokAvJtsGV>uOIUbJBpNj#$*~c+(C)8N8s0A9z%Pfw{dO?)D1K za96eo-+1f?uTE_0o>83+9E0>2yX4unmu;O!hR6ikDHJ^Q9)wmIJM}RLo>{`Q1L?{m zeUDW`eJ2gr&`<|_W9@+Bl)ZRWtC_r0H|diVpVfBgM6|}w+GOv+9sL02(7Xv|`gaNk zOL@MdsRb^?yVWXRNP;Bg=?k`QAC2hj1 zoNCu+mA%u}@Nwxnx>9>6L+ET%x#+Kd{;J=Mn}C6vZ*=mteedDq?R#*Z2T>O}=Us_? zhxYuNhvCpO|CUh`kI=Tyze=~-yoB4bSECwUky~3VpPIz-n#`c{I8WL~dC|O}i)vqt z5)LY~yRex_UXdp^CyV8H>&C4nH68YrG5TS~f?I`+cpff7e(;bw)5L}W=XhZ(E`RoA zUTAHRJ-Q~G6P`fZjlM54lEMxxcx>Lwh|A07crnMvkU;KBzyM|o?TNh7PMtjw|6h#} z{Ad%rx7yOp=y5NcPUofjs+}VY&Nl{v&bSDbIvZsYA^*xzq=e{OzpZ($U&F$iRZXS@t zUc}o&d0XGU`*b`jEf$!>K5L>Q7O`DB_aB89NrFneiXeKAR|E&tI8Mb#e4xJ{PV)TMCn^OiuUY<+Ufn`mUFD2f%Q&^w5VPmC?#Gb$F7@IJ@IWhTY*| zdeUKo_&N*7)yapUbyr4{Y@g|eHMVdFOIa+(+bZ{_( zz|jmN4{lr^?@fLB{=AR(C#PuAeu# z-;)7(wW&O3GtfM|^7ZCe9)EieJ5Ui`=+R%eOkbq0C;QQ5PqN@MJFb%5(SOl{_js)D zd2~w{#^5MjKZJ4my>v&W&15B+>yM-}kUwn!$uoMH9L;Xa;Oxpv+VzV(-|=&=`ot?n zM&WXl#hZ2C!Hi{jNj}UQ{z(1lvEtMUm2xC{-ue2Q4w}o;J(+%zQNS*jiS=5({U^`0 zU{wgC5Ff*Xwh-*6>6Q~2Ic~P#cJX^37tc1@q`w$!)Q9)s^K^%uA3hd3R$rOnAUwpT zef7;XqlM%;n&ySfSbOxufiYU_(Y`WTGVJo^7FxwZ-7N6L`)@Mzo-E$&tJ;CM*%Q~7 zfgl3};|e`6p1|FQPWPMHF>*g6lD2m=Bk`?_usk01V~jWt!d=LT*NW48v%~vt-Fh%( zFoa(%w1ki;hw|cm^YycvKYrC=!PSug^2=|ocA#$O5@h(sU$LCOmx1w4`r)A77q;OIMq!QhGey^p^)I+@ojFZO$4!Dq}DLge_d7NwKF1bsYTg)s~C z$iDdVjM>`I!|Ll0)-i;)8%LaI####tLbnJ7;eN(5H7?%l77?@?xE{9SB0OjDDpZd# z21ZkcOMQlR$B@Suuic1Y&bYR>MVaAE?MZCwH`?v;MGG0{&KFz$!?(xics&`k_;ECk zYvFCcewH!zafU(*$qtYF<5%BoE`9a&cxBsF^t3*GJm#Boz(W|Uzx=m~#3zfUS6iq(S$nj&P9F9#gxB}j!L8j}Om-dyzB7_PWZ;j#XDsL#tJKNZ zK_cvte$X6@j$sHjRw18^m%N$D4?O|a=%HkF9jKZNk1TbFWn&!SJfwZl!kPH-=g*(p zeDUR%;qz+bE^HIEGfF`s0wq`@==X82=hG)^}^*7#Gi+0i4 zg8i97zx?jEzl-j6C$z|xd^_aaE`fJ1yp_IvzWMx%-zEz;(T-SCO$~kKsN&fG&_@n*rNylAnAwvJTZMnlsz3BbMZ2B|{b<^hJ$(N6k%&j^3uZ&IxjGs9AGPb~S>qdvXp~GWOBR~JB}c|q zGY`~;L9ea8s?Ci9L*?je!Q+X3?b{aG(=p|q{`+|0ZSE&87A%d+jPrs%xEKqNc?-Q3 z1)n}%V+rG|>DwyLyn>OxC6%eyp=0s8{?RzkI7ow`;u!vCw@UaLXVDXIAos#_kwJgX zS9$3SG&YYu!aGIm^jLV6jy^KJSFU`<>WJa_=+)@onL-gDVj3u{PYHTaz4XLW5C{!p zCeu9cUcQ~lB@ZWxRi-fzPKnh7&|pKh+}mBJNVD-f{BRm9pmjH z-~b$4D#yTa%2_amrj~$kCgjNomjo z?ZNF24SW)Z{;KydviAe8GQpQ*>b_DU!16j)ST7Vzi0~<13Q;ip909Kkg19XogrTKO z=*B?TgYlrpE3l{R#RJKPPY?*kC~x>fD!S=J=F_vmA3p1C-(Vo@k_iSUso+zqC|Pw5 z%xu|)RPWbcUxTkARc;Hj#<3lR^vs9k$^7x0IJd~zztvnCKdb|qB9+>9I~7d@LS%EFJyDRFct!%m%~o5}EcSSlIN zbHOud`Of&&tNE06r3)Jau8^Q4JR1XI`FAm*)oy(TXW&d0C_}Ul*3>y7Ecx5!trMSj z!@~wxXxYG5-3HekAygjl2!Nx#oN^{J`_j8SE+MK#p*3@U(_|#(F z4I&`Km(c@*!_Irm+sXt)$hpGsi_EFJvhYRj>hQ7&H(E{{EjmqI(5e)!N5-2N?ra0A zf9P4f8(NfpjJo6m4dY8itj?+O7BS{~@wUr4ip7_%2L_sH%j(ee$VFE>?_OD~T6thJ zc)<_9zWc}*t|2!vS2}ea7{iNTqhsYP!>W4v1TTO)^uQ}EXYs|mEj_f;mAou+E*g)K z-@STE5l^O`I>qR{Jav_gruYFmzV=f-C4n}&107pPGPxPtMt1}0F4*1EZJOhEd<94T z+V>pfJLxJ*9&jdc2?)W+eI;t(9^sSpwWYzY2RrH4;!b^|*9-*G!N0GCoVt|HoBo;b zk4}(Qno%O!o4TWW>D9*qb@y5-IfBCIeQhxKvJT$izeAZRa0+7cqQkd*S7sQ@t8F>b z4jz*~Xq7vLiV|rn{=TCH1g1F8dar)&{h;$8GthhAeFO37*GjW;D$Afmp6w?(kIW&; z4i0$U{gSb5TENM#cHRfOX3{s;%GUmuOj(SUmOh97gxVobW7vxVJBw24KI5B3OGvFg zsPgI8VLRFl)wkuTN%xbsOJzV^Y4{PIjD5U0(S6Y)Hu8<0eb(> z%=Wp|)#+)W2ppcoIKj*KQ3l8x8Cy_v4nT?!&oh?%^xd;F9(dHm3chzT{_UyU!-Z72 z{KYrpZN9$=$^9m4JWTFoEIQC6-(;TgfOIi9FiJ59>sMCjj$UMddUB)f*$tkBaXH#V zMaYB`#iw>i+=`LigtccM{rKZ1DDk0sF=m`7EswpcUw#$bwzi9l+NAwa-fRZw&zhh= z4(EfVqeMJEUWf&RnZw(fNQjsGpm^v<1afd3u|=Pe_s4fCUbr zaWpp*Na@K6K9KK+c|7u1LpKL$PAtsIFa{dKX7n@RVMHfB=;f`l=u)i!U#CU>M33M3+jz{6OZ*PW==&@_pwawAI;cn&9em6q|8B`||L-9Rr zn??h1lRL?D;!uM*HnG~Je#m0lt|qFp$f3RTk{7h`$=;06yBVngslPm!?996eVSMWI zEMAT4L=$f5c7@l0j4Tgt-`G6LEAvQ3i+#0+(GAHK-WVVAPFAg1oZ4G^3-8g{aBfV# z#+;=v7K)~f;7{{5d{H~yJqL4^IS1`$_ik-YUHmuQ&{xBtft4j>l*?LNv%|BIKr*^J z+0NR$`i*!$9_{h%xvk+lhM$VvwWmq7Ib78=ake8u9W2&hA#Yi*@n*Cey$RRuc$=!+ zlMGnsbk2v_JvR%F(ZxcfX9^ zPoFk=igrfYV(HVP!V(bEmqC2gDc z-g~#a&O0cqNb(Hd*D^w#NZxs=fA7N&CvN!P{@p)pKKae>H&?C~W~e#~!=#_)JL%;A^W48v-RL~=sOd2VaK=9>t#!W?`Cr@ghg=g;xV&dF-39VRvC%%DxMcsRlX^T(K@9rj}Yf*bqD zMNqvnzVw;&-SbX8ROl!=^mo7ibPf}I>%!Zc&p!LC4DAr8&ms%Qw9|OTafj@pg@qga z0k2j3w^+Dq_o3n4?F_w)k;2|Enp-?%xP`CYo2Y@tx{lqM%;QYKm5}_$L}MX_uei@@k=gK1vr^ zz*=6_c>VLA|1w^rt7`*3=v#BD|4Egqt+ zBaKmh@yWj|T@^3CxpF0yv}^O>#gAv9g*We!Jjcl*y4VSDFM0}%bM|Zt?8%k!*%ys1 zl%ajoUBdf3%D{Xh!}9Tro~=F%6RCx_2BM?1WcOp;H=dVS?A)K-#(or4*% z4>uOlmh}0?RsT|3wHx42ZCg9zk^S!bA8#%+wz50#7ytYZowaf~5;ew+Hg*j;r$HHv z-0Jf7J8y4(^Q&JCjt?3K@M6Dy?`Ar39jGbaYvCsC?amUoeC69|OQ#zbolFPO*F)d< z41aoH;+rSG8Xw%AA6)U|>+pNB+OKwlhXEM5M^L+LWV1MDF@x8>gFKb}uy*P|Ri5VJ zpcA?=K-XT$AcOx2Vcoi(@#?9wo3Ac)(E5Wr$$2d(xLP62L|)kj7KTE;gmbcH!4uu9 z5EV7LW_Nnr0xtdVvXFv8ooJ5}A3T~*-0Y2t4=wNYLxW9wO0#ob2);=v#S?E*u zr!5MpFy1<4;=*>SS}IlTN*S3%QqMlAV>@&ikim*85es#$@-FJ-G)_gl~wxcrlX0qUJu=t zcQa0V?mL{+dCKl~KHl{KGkgNq2jrm6Z}4^mVqVI>(ymP9A7c)hO|Ee15u}EHurZWT zr)WNqS4T$X(%_|^x22dw5++kA^3snHzsI1c8}!ByM(FT!y;x}nVKP>#ey2Vq82ptH z{$m&+d#j>B!{>*kB5)t>)j04d$@{Ra>qb4hATwb(q|c4N;Sv^e*kTb zWCIU9Py#)coVXtpC$WN8`5Nt_nF=YRKgX7hk+91?W9Xn@y@GssbiNJmye^;el;B!9 z5^h0#$Ai+&^WIA{3Hm=}x>;CEV<*2U!!aP1WZG5ujQ8WpDMLvOu%Q%PyD-o|!@&Uz z{F7c`+7~%j4m|Kz9S2`D?wRMMk-RG@SGxq$Q(v^OPicp*bk4x7EYE^xSZSiaD6%FU zb3J&L-#~TRTsWji2baanE~90*%#IE6JoGNj6bp8FEH3Q8hj+U?z#lmTd+;FJaO(BE zD}}TJs5Db=@{z8hv+4kbn8AntJRh98Co99V@+0`l-ZhvYZsD$*dG5)S@m+a~KKUtm z5edk9<;&AO?NS9?)=7~MfWre{l?|YzzDJk3hJUmm5h}4Uh2(jdTr;iBMHZAGXKN>&}eb4|!(`!Rh zxRiKg9-iAaTB23n5Si?Rd$b}c)5gPXo^AO%z5nr-Et1s{#zNT$MDzGYxf4rg#zap4PugRY|b z;4p1KTIH|8BpecYw+!THT>A3J*JWkn#rm5E)!SA|q>+}6fKMaB@{og!QQDL{@B8rf z&sR=YXJ$M@(-P8M+k8{z4&jD&pqG9AgS&7=EalB-+Z6R1@EJm)A39;c z00@Hb{)NJnXJ9|&ZvXeD1f{PXOtx)3Dr!=!zF3md?Ys-dy6?GRT$|J=XQL60ANoUMyDT9ek~m5LL?&8JG#XqxUTFl>Qkaym(^KWeK zeS2y1WfSH*(RRmEQ5|{) z>C)b3a8NI8g<%BJ=p%%La`UMlStEyxOeRX?_;G1W3K^rw?3CHW{a7Jgj#Z`@l^0%r zZF4Fw?L*Pz%i`KHwll=rQhqxF3;6mV-mtd)Ga~R1d;6`En^SqdzI;+>i;P4ceE*}( zovSxC*KhM2FBDLQLmq4gY*|juJ@aSsJMqNh>5)PM9nXvX(ai^2V?O*>I`Yo|JUXz( z^Pq4x*NPv^_=2Ake>}&OgH4LXw0@DC&SFIJ{~})Q$w+K+h*s)OPwunrJNyGbl3ni7s6^T_G754jjvf(f9J)R-e4{D=YutSzLO2_u9ng-_3YjK3n-8W$@9DsiWQsj*MEn z)IC1#Z=s5@+yTS9>{V@acCzs#KE8OFo|iuZl`u-$0HeW+0iTufrSC>@#7Wh zL>~Pa!|=m&+Ww4Mw=>4Qe&NEjLv*`SoZ!>xn5TLEqUkP+wZR{QcTe=oLNz>YCk36t z2!j@6k-^h^u#n)HI$mZa^1N%-F;^0g>OFD=!?^0(XI8c&d=jzqVo7eMBKg)1O zuLg^!=cVF5TWH{2eyOlk^smq)|NZ~@>%2z4n#D5<8SlS)A)|N;e(~sL2kyQ3`Wv$t z(%zrSOINFFFh-_pxYF?+R=1I@=rG|{dUq$|nEv`u^q_wjmmj}OM&gGykAL_(d=3s} zPb$e9{U{XDu07ADUn;2CCcIB5ty<^aQoE+cy* zFUiUN#z_o!&z|MYUAy98D+c(4n5cgd5B_NF@U4vAzxnNNOIIxC+sROJ^!s*<(d)uv zyk2~1i!vX5@59x%RUXgq)hRp4^7QpJk9gtUqqN4Bz9y6Jm)h%z@T+Z}jf>zyJN_gF?0( zZSj@2?(L`7CtmuEn>T}>@jBYixhnC=_~CMAmAsWR0Dd_TTRv@C+;_6DKV$Tsyoa@g zHAVIBT51#~jBJOO_Qas-Fju=gZr-{!==n*k;pWNWgJ6uF>bbtb4kimp zhm+wW^^emIOaC}8d?82BM|x7Dty$zZ9zI}?^& z4dT`S8gu$gams-ugN#S3yxBg?7!j_coz|Pm?;f6JUICxUs%nSlPRBBNz#4(*RSCx! zrhu4f!ZMW7;SB?N9uarfSUm@8CYP?O-!jN;X^R0c?OTWvxXeH}fkZ!^rj|Dh;Kpz? z^fJs_gKae&W_liMxH1MT=oSJ{puAf;;}1g>G#B$!ni3;Ko)Hr7JwvY&j_~&lN3g2% zB<=qg6<8y@P5{HEpZ-f=FhxODm#O2HzDt56Sg;0X0A}GL8pex}ogT$xZ>G$?`sjoK zs+_^2`VQTDTv`Q#jIWDdo(-VBe9N2cY;l~yF|g5K)n9IO@2fxc*gb~Pc|7-nP`ufp zv-InQEBwH_>Na;h2^X?GbVut@n3NSbbaFdU;JGi9*tC80hR(Wf?X`@w#Nk2%GanT;!k+AAGG|)+vWH>)` z?5cr%WDbtXRz4n;f(IqtF?bqKm3Qi*4Ju}2ro1r#zeqa6ONsj1URMpW;(p|$SKleS z!FNmZl9jM@l^cj5_Apc%ND70aSojHxq#dE{(pBr4v1!p2KYQ08e%ioJy@qZ9QO(-? z&O#yH%*4GmIm-DvepU9!D_(-VcscrPa7EK?yBWE&&D$Ufzqc}~4h-dJ^Gavjw*@Fa zu6@u;+Y2UI4)4^Jd<y$gD!a?+q~=2X zy!Q#{Waf?DMfBK}_ub@&C!ESZsFEvb%d%<}UDHWxk!1}&03!}EfHC!o)_aC#y`wjV zMos2|zfQ~sKO^G^rYxXm7zo0&&n@j{Ck6fu!iWK$I=bf3r&xR|-YiW!L!fs3QS1t$aXl@SuQ*$#EL!-4szhH?~(*(${ftSm!NO(C8!Mb8-beJ zFo_O}M%Ax@(G0F63C7@u29u`meisdrmH_Sy0v8@6DBdt$Prp(<(MfF)#GuPUU3`y3 z0m8?ggWaTvT;)2_;6$sUV(oyPlLiehf}M7&Hgi;4u%Xo5xcgUUi2z>jUrY`>9ON zjWys0X63mW8N)yLNH_R_DNUusct$Qf6;eQ4P@djL^R7MBBy;zRJicDE&A!R;xcG&YN$XE`5eYvWR!}iQ4-94AnC( z>v7vHEBEE*bOvIF`|Qu?cs~Qno+h??@~E)|oG13JjHOT8_Iblsx z@4cCEsY5f{)@v&@Pr;{6$V^7AHDP+^M;AAzU(e&GNyN*>fmg0y8Tlfw`ldij8e!V&U(VsLi-fmZ|ZgVJPGWIN*jDD=1yG_o} zJ-*z`8>+cUhnXGQT)Wg{v$PDtXu`v9Uz646#g3fz)}%}9>Rq2*g)zzp zj7|t-|DwINg=bArdBK?=k~R5w>hZ)fVLe}*cOI$Fi&6X0`)_adKD^z8@zCaf{KGo% zl~F@X`GfKI+YAkllf8QxSWHZPw6|HrsNP~|@`zQ>C-LS`9{;>1m5JvhJS5T~JfXtp z@!jIQ=4A|5ePZoF$O!FVwku<9c}H*KYuR}*nRp66Fcprr_rCplbK>oH#yj=J{tQhS zVYD0id~_YX5Ud^DL+7E9MULTHJl&h2vUFqP2LJ#-07*naR1^1te+k>8X^RfYA)P=j zv_EO-APYc^661TuwBl_)&xom?9nbE6rQZio>GXrbk{HYQ%s9651ICLwcEZ%sp=UA}#J=qrB@V|x?* zL&dUvn0M;qTi28O#cLCK@+V&F7y&bQJh=Al>{57`F_F>L;i-EnZ)aTablPLa#ta<1 zOOIFI1C4KlG%^+?P~;ok8RqDYmd-X=dBd}JRnz;&E#y3iPp2y5d3_DfO~x1ckb&e; z@5tMCZ6rJy&%hwM%S$tAR`*#nME7_m&A|+)M>00;4p$zduVtJ(cs!+uQrDoS8IEpe|05X+euTK`}pF!n}7P`*JCVx z`MkqS%YQJV(#;l7CQg1bC}hXSAAXRY*pY3uEuh}4j^YDHo9H9%@AVru zN4Nj}vrng;h^_p3@q6ES=fZeXmvq&swi~JPK5oZP{Cjlf#2oO9KE{>o0x#-QjubPU zm#rN-+68_8?YGxwF_3KVVnsg-dCojw_+vDUykjgybH7|W>=dy)us`oc#%baw)C%76 zg0w3l;*Cu2t#3hpdQD%bUl{wsymy_<8HZ|T$-8m_H_!DC7KV81R>chE?Tk47a^gsL z+rq+`ktK1%564di;qip8Ec)6ZfjqYlwh(bIxeDm?6O40=a2AA?p^2P5*<8JTJ!4ri zpWY@fuQHtR&NV-^z;Jx;vGMXoo8x(*i^csizFLqG^2Nd0Co`6k5qq{nXv1=PY%j7a`!eEUAn+x(5$dG;t;9LVq~2Kf&@?r_NZ zC+u%q&z?PNII;P|AO9HX#ChIVIF(ZwW)Bo2|D)m87hit0`83b! zpZ>`YGrHd0e7^G%GAORyV)$z()NAq7Vg|#ub}H_;Snc|H zjc;Fixw+yiV0GoH>2AGBP}eul^rwM*h3Cee)@}+-(sq zF&O#4H~p`1iCqrQiM8kk}VoVTkP95u&Kd6;kkK{5|e+#<%Km zJ$&d3G_u=4ND>~~D;_;vkPJoV11*}^>CYJ3!ZjOCh+@(-Ac*4{(##~ zq@Mb{V$eqL`Wa_zyfn@X{?o?eU5vcvL(sL|ZI;UQ&&fuM+4pjKA%w?Iyq3-mmloBY zJ}m@Y>1OLH!T$0s}G<451!sk`NopTW%bn#c*19G*uGrJa!+s2838pU1BKlt+FCmtd$i!W zhm`?pU)}|5B{HC^JD5sIrYc^Q*2KwVT?Q5EYXMMMmFw{&?qBJZzre2R<;_;veW28G zd^ovK_25R7tS5RSkLEKIvw%!l(n;fxI%P?dk{CiKz*CPwEda{zV_>}MCSRomYYdZJ z&p_P3ZGgd9hMlg8z{%Tp&*#Mgo|S%y_i8JX zA6_anbq%K73$D|9J9-3NSI4E|{hF-Dx3x>7vPaGbt5t_OUHKP{qGxywiKf6ILV3t6 zg!@$d;z3$oayGS7-}1s~bWnOu`a)vuurdLKr(VL71&yQriTWvDjOEW}Ok6~izlKEA)Mf0+j)9dKhBBvYh z);E~wt-Kz=yR=|Up5&1nte4eu>b>eT?|Wx5N)LlE=OQHQ!>@9}eDE&aOZt65?fL32 zMxRfmOl->^4grBc-)P*oKFl{5y&9ebr@N`gD>7dBj27D9f5L3Ubc9o4r(yyAVv|GA^F?=#{=$W<;kFLk_ylk$=bJ}}&O)~$= z(r0a}0~^AXISqW_H#8t0^Qtt})np82gArMYHar=~xWS(n1I=|=cdk(jAU~7HclDes ze?$^(zmc-P=_7MOdxo`naTsyiA^IqAvaFElV?3*Yy&ozcyZ1%JWu zwaQy<3LLV?sEDs76Y`G@ynJp~8Op7Ex-F1HXV<`uj#o~J=b21iYXiUWAGffbd(o;w)N5%%(!Oo z{0_qK8H#waP+j?eBV4o4tI$d0;cy1oderm&53T0De{^Y2w-Pn@k;$F>+6QlxM#y6% z;DvSVc5y&cWQY3WiF>!uAiNa!&-Q$aL_FM@(>8hjhhO}9b0Z_PFa*5DM>=X_3M5_& zCbx`o&zmgmJ1WG3QnaR8c6A^gdol#@%+a10SDxM}PIjJN&zrzJ$~g7e-~Qd^+z)=T zIn;zt``8-|amAra@MZ*Od|Yw9)juz{3;>uRwB6OuzZfs}mnpz+|Mls<7wSIs!3G_3pzMzsH1Q=W2}y+^dk>EaM9^blOo$_8T`hGi{6#z5Ktcm zJ;pX3X=HV^`=qFHjY&=BUN(l8M;IZx$u)f~Ci2NVCwC5pGkN6MZmY8`-i+E0cX=d65A;izyHDJZ+@4drU^2=KVGB8ga51l+SynJkdA$|!pfPXLJIYV=V$T^IKInu@R-A%4viob1qg5Q+|kG+i8 z1_kNV-O)r_T!&)z9quC^J!SBgUVjuoQ2X(}7NZU8hcvqW$mT32hYy}jV=T?C@Y;9u zbZ}b8dT=Z6w+=czcOhN|qx$mF#yi`fMI)X0XXTO&#vpwoBm46fWQP96hCScw;4(&D zaNt>)^c$T+_I#WLg-$bGfO+qvAu}WUm9e+@;ue4QXGm112e&&=tvZ)!>Wntp7ajMk zbbC&mX^T0}QMBYex?)NTT|t>sV|&Pa?Q9m$qw}uh{os+4BiGP?&&zlk`KE0MO>^&V zhNz4U*8hcX+MnUYVgMb)E1!W9F7)#l8LOgD9=kJ$tnK`TluBOBy|p3XipZ_H7_T#0 z!_N*4yI{x@1D2f^%g7M)ei?1i986{MPPCiCftSbAC1dak#%mcMwRNE=5S>AYHzxxF zL+Z(lIQJi=GlNBR)a7oT;pPGKmUcJss#`ofoZi7RpQa& zFZ_#m#A2(TKAoYi`ofPsy?*WH=8s?eF|W$PAf@9UXCQm;?G94R2+n)?t*);-TrPtU zqraUd`UY~dLICU=FLM&^&|nK|7VIuwe6RL$aP#{-(s}59dyRfFR-iMV41XAOjR}MZ zuu#azn2<)6`X0JFJ}>5_fvfj){mB#KBze@vc`Y9t{niYI{Pmgd+9%q~9F_neX27)N zBx!gwGnZs;3}G-Uo&I(i;+Nsl!Gq#n^XA4Y2BH%gd+BD2NXnp>g+%EO$1W{~cKp;wt|c<>qETtCUB5A&yh7THmvJ!d&LN}DUto+TZ^mrS zu%Hk8JxNa=Xdx4RYF(|y*v>Pa*D8ZP+8=7S)jKU5(G%k1AI?BIWmU(0En@uaFMiUF zfjp{fOX_^4-3U($dGy7ne`ul90oA)YWb^G}J-@d3&;RA^&E*W(*S@`4yz$RBUwwNk zWA^_->dH&cRo)-+%Ky8+|HsXr|MZ82;Q3i?y}tj=H^&3{+l;+DqQCn3(&kFW%xl+f zZQe=t-nsC4eZk=lTKsxw{8k40t2b`UyU)M;V)V%Qc2)fTcb{&4_=6v8&URqr(GK#x zb?wULd}o(Dj4s#ir33q6EZ=Frc($j-CwSe@aINj{u74EH>_&?Nw{CnJ?k#wqf2RdN zJ4KSq>OqF>_!!5KuF+AOaEL1TDms`&S%KiioNx7&es^hDvM91fa+oSwbgqQqBUT`oR* zi}G7t5jydjK~v1|cG`sFi55WSfXxiT7G$0`zneu~uf;7i@)3_-Ec%1B zOXmd~Z3olo_6goK**5J7w!x8AKD4m>rUVuWXyH(qjhy13`&WOh=F1IXx$Fb{`X zWbL}K%7y{5zKsX_p7a1YFg{rG>@}u=tFwl7pRDf>x4p?6S)6%vIunikS}@y}e$?jZR%aCH zdz7g^8+?XF>C|XV$7%!MbcO1D!B&=P9vtHaxL9-}?>MSN>1P2dy)y^P=_1VyH&@j zci!Yku;|QG05`Xl+4fXCVNA0)&;?TT-!t%hL&&Rz>#9@vdZ&C+lvnXxyLlu1(B~)eh#+t^a@x%qklusaiU9w_)a_ys>&EgYoxe49RatH|(O1nD(=q-I2`ju4*oz9FKV zu4NpW;D0|onu%H$(*V8hy@skxZ3Zb-VM6UtD$0p}Q(`X{HsJYn z{v}!Rw1aQ=12QEPPX_nNSmRH@?CcWw&R7PLVx}I-B#Yrld8h-_dbyylI!po60ZUtg z+W7!cPH7j6UUk1SS(6LMyjrT)K}a+MuKE2xf*Ra^4&Z1B=;GQ zgi=Dosgurw{+zanzTos-v8#P~w(y^N2Z4O(oL0pl+4ab!Z{^6_KkZs3mD^+3+4a;V zDTy6AhIv03$lFRX)t7H%qA&O?=1$pDp3*%-Cvc^L6IkU%HwNb2tN=ib>`tZJOd4H` zOA8viUe(djOOO~uDihSf15CPi+a4fTXE%o$54{H5pfWNX?d!2hh2GLG-KHKZL8oig0sL8{gyZO0Wr1Je$|tA3(SKyELD&c}*)JWLm=8GHdLo0#?eGGx zwE2JpWi4ppPgIv5Z>9jP8NbkE`(A!Do^n>(55Rg4y`JOU=nxg|#-x5ey_-+44W!76 zA0-mXG1P9KuRd?#vl{3?9a=4UFr=zdfzcE!Y@CML=*d;p20*T%SWANDFJLCrwMvTFFWZz`S^Y8wo!w0~oY&Znl^YFnF$yYhj zhNpC7XDZ;OKeYB)|L2On19vOi-@$^Z^n>F{GMD|Ec7R4Q&b|Doh!*Gyet&2?m;IYx z+{E+RZ1{mNNwxRtJQMdO&}3>?6Z|txR9-u@R3g31dxZ?bQT zv3sIz_=kBy)i$*))OTnRLkCY3p2?3gNHS1pB74U`Tq@;nWlmchgJJc%$IH3Nyz~e2 zjx?d*{U^Tjnb%HaAjzXRgUs$?H15eel?N-8D$elB?HL>U+9nK-2Uow@y!+FO#Q-l1 zL5BV(O#*NH@ze4a&-U8w&A}#%(@$uZ)8^5yIs|Zc{qeqF-YcAp$;JpZe%1aC6d(0= zUXItVbud#QRCWdPS+Qn$@VuK>7hcRENKND(EeuTfFjL%dq?P z=U+ET{(SRx6MQ1Cyq9@mU;gS+hhN@VdLbGzMz3wN8Cr(tc{nv0Y|atAi!0w`nFrn* z=T4RONyai0F!GqOlYGZ~5}+x}haiv_^3em{O)kiY&^L_mPoL#IRzFF87*`Ax&}h<@ zcw&0@XzCpDj504X`k1Unstz4(av1KyzA&f>(c)0G@xIN_ayBC$uP-sl0HlJR(x%v11ZujO`-kXoxsvfjF z1y3ZKd9$^`Fvd)ks~$$J>N|$CzD?9ZRV5}2&xyC*jaS9By(!!aV^SV^8RmHVGlmLL zK+n(_#M1iUBxEfd#HM4 zxS9ro8+0Oiwy6J|(*oW#PFr)V>XW`)alFO+?)l5k3CLiQ;5EdpEjLMik-=)tvw&0W z8SWF)7~dOnO@AAG#2bH-kyCgU&JEfF8g((}6M%(Qa3>CQo;JF>-4XE9SBzfl6*P?_ zEGofOT^%BJ;MmEL*JpVp?|;=nXvrH%lxF%oJPH>?5#xMzP-GN)aO3OA=fJzimBW~L zzc%9g&ZQ2md;i1D;Zx_sU%QFsxe+qNqP4c}kh=%Mp}6ngc<62Uqbxrr_GrNa^KgrM zj7NuG9v>s|n}uuQNsF(HG5+=?W5Q!F;?P6495`DW;EnpQyvN(d|M>ZeAxsw3k)e!- zw|2?s%Ye-AZl}fJy@$s*sSF2hk^={X^0GYIf+T~6JWtYT$Ay>L+?;;&P-T*H=K(NE zir0PfZr;e1h3*#2#!&vG1;rMeZs%<(-t^79?-^*w$#3)MypeIpLf4678PC$eZx=4> zvlj3eM8z!r*`NRE=Hu^OoRAPtD(j<*8Sh#+W2olo%>Z;HFXuC73fYzH@X}5wGDaSa zhAp5y`g-()#jc07-&2J>IM;&cwQmcrQy;;z_I`9bn*slpFeB9wJPHLJPGvZbT%;>J z)Ad9yp2alO9eUxT7o&$fPj-9RcN`$2;W~AWdd5j)VczRcXD(5ifi;9wlYz_*hqD=j)k0{HTb+TRJu%wb z*>f?*M6 zVlnE>$zz>KacGR$M>6&@R-W%LSl-$e?=F4)_2%Nc?`|%>_x|Ri4?h?@Z#FJ*xaY?o zf4q5=cl2+!zeClf-$Ol zXC-LQ#$v}>%(_{a6pNCK<_@Yo-nc+pV*p-bfF(h6gz%G3>IaG2uD*FOpJ=RbqL52u z<4*i>$nDjug&e8997|^?Ph5HUoqPSw(KW(eoGx6K82iuD@3Y96ObTQ2v_&y7@b47< z=6H*n`X`Gqrz_|7&Fd|q6gH&>X3?%uc*r%Ti>4=&%a_U9c#dZnfA+KmjZkGMejW@4 zWbJ2m$t6&B;tYMZ{eo)o^tK4YOZ$zt-Xl2ay<_QsbH#7}yI7uF#d9)z;mn0O`2Ak; zEzPCE$eeDc-s^9l@37ZPwVOsH$<2#o(HSNVhqaiXZsc639S7##X)(b#WE^$U^)bKQ zFUfQ`Is{dlG`=-fx^eYNFiwoTud@~6gBDXBvW;KHuR<6ZXXzX59F*7286C!s3~*pD z1{R-PxDLY1pgqT%Y{11z%?kJZ)^)aYMYC^I;i7gN%ZW+7HyFShw?YrykwRU}NE~td-Bb(FXdWFh2N0^B&{f zzpDQfH}VCZy!}xsWc3^}zeKa_xYj(BuM_nL*?`X5y_7;8!>%e!_$&5#N zu<5+nzN-$y2X)?-hlIfZZ{$IG_@?wrJkLjFrUGHuBe>{1y812;{Wj?ue00%olh)z! z+-=hVy#14=%l}=rJn-r<{kIG z3mIc{^;~&jP=~$lO4HM24DD(wZ(1aredfD7D%&t8_`{fRTM*F1qR0{@_$`?k9*4&m zeg{_d0uRmLHaDvTFakf%w@?R9M!@+@9lQEh`et||+dSK*i*DLkkAvlz`uUTN{GvWx#g%sHNM(BB8!vDq9R3yDk*ku-yV9(*!4AJA&rQO6zJF-oSw_?H z8%-D(?SWf}7{D3WPCVkC!?WfR;3e;=h%SwJqLFsefKa#%hXpMiiZ($dm-0;e`B%D^ z{09!c0dG`V`Wa{SZQx3Oc-}KeJT!l26Z8J)|PH~S($e`i0Sjoc~^Bk@noYtIi0QU-RCRZj^r}7vx zBC<$9bmSA(;hW;G+rlqAiNywdbyyuQHb$-k@xX-9I|m7#Xkv_Chcl*~i8mN#D|D@B z;^4kWPYn%v&l6L>KOr`@ec|XRl9G{>hm=qjcuiZ3K{;N6X#rzRq&znvtnG&b??s_r z81A)5Az0i$Z85;4?MB{h=&+krzjkx9c&W!T*l0rP#1KkN@p2SP^hwWNM37|wYjL7F zp2}!xl8y%qit6ye#S0nGGdkv#^(2Gol#zj94BM&`-D+iBG*eOM%1|#M3Z6dANE<2c zHeuH$cgJV=9Xggl#euASc&?4dlj>Iw0QR=CLfrNlBhaICYVZjvo<7JZG5V@Jd&2Ei z;|dewJ;4+Th>VWRg&V^P;i?yDXAIE*{DpYhLBcI)Jbm(Vb1R*Les;c$@gU>e zvoY>1Jq)dh!5@}n?qP-v3uqIzB?Awhz@O}@%NV2M$&A~hEg4cThBG>FR7Q{XmRH*q za!T)^UGl_sc7y)k;cO_lj3~s`+##sZQu}2*rUx03?qnG5amKo56TknpJho4!pM@S` z{ARR2nW4<#vGSj;tt;nHRN)33%;QYRb;{*pa(_F?!H4oW|-fsT$Zb>s+7W89v5r7m-w%pz9l!2)^h@T;Bn z*m#d2iB2K!7M&QSD|PfDPuSU66D=S$ZL+j<^2q=o1M;z z6sue8YX@(l;i(M4hw}!Hxx>S?`(Wva9!@@Th_VOLr4NH|!Rtsgv)FLEJ_{{x<`EwW zf?2Teu=8bk}RA&!%HqI1W#|>G9m#EiwF!H~Kq= z3*X2<{>xwfyv3+?fHl@71Mj}qxGCQ02kjc5f9@1s}n;-q~hlSr0 z7BAhXEr;W|^BMh<+bbOyd?K1y$nc(_`}^PfsL)X#Y~F0a#|{^ISsS(k%pxRXuN?(+ zB@cD&%>u;r>o?QS%g9V03AX}gUixUvaDAtPYK8pS>9h0`47T9yPbBQd>H9hIdFP^N@2Xbx^s38ng-PsCai(|`S#qOOt=Catut6P%gImUkvj zaM-sXP*>&tAbH*vvS{cf zZy?NOapzmi+H)8$7{i?4E4O8icEVxabE*2lUrvTDP{53}7v50H<;8;1?X@2MGR+^Qei}egfvyz=u8;Z4F3pK`eTQXG`@z6W(QR8@8aRKX zKZ=cU+0qNuVb3Z!U=|kSwk8=<2cE-YZEyJpe@u_(e{fpR8SVR6pR2qwAoT(VPIyw? z#p4jE&oH3w?e&={KlMg?V!^62+JwbG#gnYdv^TqmRvj7jk-@aH`sHdKPGk@^BL_gX zGO7T1!Ef}0{`4NFnWh-FX|U_*&Gc_q2IRsE>BuwdTQK_mrjEhgFLtLdXx-YPTJ8p$ zfv3SHXMG%P+Q4IV$t0TrkEs*BbDbktSSBaL8mqaTatWlwS-ztKYr-1@e1(E&d)8T*U>+L`_gL@+dos_ob5 zlhX!yd8d0dzU*qn!)+v28E6hpkQ?4c<7{Ze%Wlp;Xm-!W-(16M702zMW$=Os(Jd}8 znjh#NTjh84(giQYmVH?|xbO?dZTYdO4Somc3tr>^pVT1L^yrlq-?SBvET&$Su3{ej zAEK5`oB>#({oIb+W$= zgFSliqnouE9D0a;aC@(@A?&-3m(ymT4ANeIuzQQgAZc^nm4n+5GYB@wyOJ&}-iL2x z@sh(AkEZ5UXI2~C6^Yj*b~e0}*LFPE(}QVd_)zU=qnD#c@nfL(+vnbxg>8#Hzay>sHu_c?Cne$At>BNIhqvE!jC~-_ znXERamJFiCv%v)$R?x@xVmP;>*H!*%0k^cdpf5{}SG`NscwW z;6u#|-gMnPIM)_F4h*f#cWrIqhSxZ`)oD`8*+nL!aC8d;`R-$1w^`X?{;}>UzWC{=t>j!IUu^WMDB_#64iVxM@vF)61;L= zzHIV-GH3e97J5hGHCb{>mUcgJ)lhJ21%*D)4_<>I?^Y$`9@E#xW^)o?H$TG4}Ar z?aiZm4uq0|qlH7Y9IAQZdR?nmct3s2w2u2hACfjxYfZX6E>5r>D0VDL0Hd^U07kweke;halXjDrqrle*<~ycMlQ zyOnP#Iqe(0_U1GPh5V# zdYlZ|mSD0u7Q1rhPj2)0ZHDh0GK6%(?N3R*TEB~x0y6H!K=LkGf|9oA)l`3R5u+km*tdHefmBXBU zEPk=IOm|l19s~m6Z>q8I&##8LyYRh z9&l=Mpx__;IK9}x`#G{XoA$S{bG@kFopxe4;8y?2Igo7Wv(2c`f!NevkMf@yd&rT) zne1?0`iK{tRiA!QRJVJjMIc9x@NopCmjBJa`m5RKyq6PPg!@G8p*#3eXNbhMA$q4X z8eYc`2L*C)UJJ+H{{G9^yi?ero*#YqPMdTc7MkpA)V=@SJDc}&+TF+*%2_=&rF(ey z;m0431D|a;RzLbKXPE;X@6;b}=8XKNG!OTBO6c14TbqCP@BicA{V)FN&o=+#zx_Ay zYyalo{fECB2iPxv`f(1i^W!LG%{Uxw%9(<(`SLv5VC0nJ92!x=P|YhA0ZdS0Bh4%`)CJ)4c);I#DQjmT>U zbFx2hfQ&N6Q93koGG;~-$DWaUCwqJ-85*a_Y5He(?&r{xj)6{@UrCQ8?GBso^(-gZ zq$a5>{ot&(ZgPmPO)HVQkK0rdF@7Qk{iL_50sTU~HDA`Z^wauYG~k|g^vA^~TwCh^ zd6kE^(a5QJ{>-_l_g2nl8w1j>{P6V`n{Pk+bn|uZ>sPO|*;dZ-#^`IE#lfk$Hf2|T z7Hzs4oFCH((cYhZ`Q^|gbrU_plT&SiA3J_>=>Orn?}`kMmc~U-mPrvJmCn_x(CnHbM>U3FtZHNB;^y8l_T$304jx^V! zujUH$lVYs-zI9v=95mnAjV5!SncLTm9OQcM){S&OyYc=fIr%>t{yoeFJZuxgTtz>+ ze!X)C(ou5~x_7rZ(~sAFoH^dn_&fUBScv!&$$*|2|DkateLJ3vIMvj_pJuNg+6d@{FZ2j5<1|mdIGA-^2Q*7jbFeuFSsO>C zcoNZl{l@h<^mzDDVWz`eL0R(j1j+q}cLwg*f$$%jQ$JP5wMXgyg9ob&-Yt7k)U^%M z%FI|;;qgIL-fwdd5A@Qvm-U?|ZMcX4_o90?c~A7@;*9xSIh<47p`UMEes}Z9C%=1ifA5fo+*%1Wy zqQj#j!4!Qj;KrPBQjUe|G&B&qD4N;A-JEC^M_V8h0bOUe2r!T@M5-IokYURgVj<6g z8`N=_D+|V0`|c^2@n5g*VP615)qp&2Fby2lQ+1r8unaHvXJRx)u~vp5UE2;K(@qQ? z&2TRRFfZ*69tbuW(OzFoOscz5Cai07D}%o1ucG_v#vd@IE-+xvZ*&=$yJ%qJMCzZ{ z2uOQ`t9*li!J=8FyZuWshaX$G)0QE)yOegTpc7%ay$Ankx8Ha&tnE+S8kjyOZ*jlU zivBd2LXZ0ylzvnP4&m76&=IV~6Fft+U&^?y+#dY&1Mdx*6jhVGL9fuzU+=44_gvNf zlu^I0a3AL-hgXm}5jgut=D`>qZ~482T0!LcJ9yJgIyRTWb7-M)ixEMsoTz*>FrcH) zm0QXitN;KA4j%Ls{$w$WeDYE!C(`hDXhG8-XqkH20|z48wnvtJlWVtm*9M=#Ik^O5 zIik1l{2mx8O~w8$nXG!CGY zHp=&kCz*?&^_yP6XR2sD*!Y0A{hNL}lc0ss^gXcfGr4JQQQ%8F1S{A$3&(*0mmdF~ z_A0hoQLi%98G8bf=%(>F=_hOAwP%nA&!Iz$VC=z*h8_uSx%UR{8$Fyp7Y^b2+Gbpa zSKq)U5Jt|Umt^1K08jN%-$z&ay_#D*MjyW5dpwBLe#1Y0G^v|t3UINx#rnvlLet(> z59yFag``e~^Xy>bpo5d*6fNI^NdaI0k*fi42p4N9Kt|dtK>I6|z0>n}MdX zb9u^;4cs8>wXKy-k37|_{n@aa`>U?Qqh58ODEMlu#rM(;7w9#bI0QiE%$SXY;LQfs zB5r+$tk$#xL_e1UEgZ5lXdl~wJIO?mX_O47ZVssAz(GzfxkEEU7J1zt{Luobc$WN| zB8G5Rsy|eM-(-X?!NhPh<^Af%M4D-mO&PlItLwoAYh8z9{bi4UzK;+<3O6_}M}RUo zq6LKNkE^vBsyUC{S0;S+hwJJqL-Ejzwpm2<=>mM!Uhwt1^xJFXx9FL^qpZSYZ8lR| z)2cBkJ=BMXwy9h7DGRDL@Y#)C!|&DJ*zI@#Us&MxATs!*RrrhNF4j)eb&az;PLEnm zBehkz-ow>lez?({u4x^Ahf5V7c?Nf}86WX>;TgzKP?nvflWKR*IAFZ0zpnm)s@f!T z{gIt^-`M3&?apRU?JP&{T<(^377Nq^Yf;ceJPgml)WvzDsXE}md9O4uJQK~SM%i39 zdFz8tiMrQUherHR)9>Uqx`rEUkNE*A)l&?eeIKkw$GTpa|Ezh_ZJ} zCDVd}gN$>JlVZOV76(r)2WJQUy|$3*edx&2gOe97Y;JY{9w%sA**qxs>7AQ7yIV{g zO5YUtI9QSA?i^1s1m)ZqM&OD+;N6(ZS;kJGdkN`~HbG`{v3Iv$1h5 z-ajb!w|ub{l%iZ8R{uqdOlr+Z9yh7a=*6uuZ8=ZXgH35Ktrws$ai+{xr0cU$$;mZxeAGt6v7HmO z86Dl$6ZYk}3&8w>|J2b(T5Alg3CAWG8*upKNqd1+NV=#}CCk*QGs(#0r zyzT|xm>9p;IM>WKev2AsgI;78IED|D&-Y+Fdf5UUAINo_{FUL1CZB_cIQ%-u_UX;| zQ1pFeuD5Y=yA()g9T3}-9y88079Y!*X7ldY*>j^u?3L6b6b`QhY00fkU%aJM5GiV% z$a#Vv7VyWCjYB>?eZp$m1mWDz&e%NRe~2 z$Z07xlK$qscitK&)rn*5pPcpOfR7jK5l1S12~?2FA1$6H@zlZNr}uk;q(gPHCB^4O zQ-{l@bL;_`li!)5OyTnW+m|-K`Oo#eB8Nq?+hqFcyYGw0eSaK;p3J#^ zp~&A8jTfTZ*(uH-=Nzy_ayzr74w^ieO<8@S`(39~W7o;s7~8*f$S-wBy1hl%+B8dg zOAoqce4Y&z@=v~Vux!N>Hr*Nn2KJ7^fX|!HH!u#Iw zLb@a!gM*yj4Xpt`S(NJiqDR>bPIJ1TUG@ME;cjD7Wbonat;lZrYYMV*65H^Jr5XpxF2NR8G`yirD^DGwXQU=7vpw z8xzi@IhlP4o#E{dcme)EspwPoGalO^%l zlNzt$=MO*rm>iw&5?`|syG6-AiT=r{{ov{5<4=A$`N(GjFr9t*QVJ;1zR5WG;Pc5M zn{O6jElQa)nA6uBgG1FU*eu9c5i03gV@=})6}*={;I%ZbNaqktDj=rrW{#-N_u)BJSr2i`vFm z1j^1p+~MSA2!~E%aQb68j=?wOYdFdGT}hq%^tD~r_(cMHb}K{ zr*kuQ8n-50RCdC3{IGe9Cv4l{c&%6SQJ9%~&4!afKgK+L@}Pc_{j7h{9b<~rKQ>9p zYV|e!IR|&+*n!gh1Z?byfQkgk6!e?D6J6f%$E!)v`FjLRIuQ}E}bAi>sTn$Fx9*T%sXqc%c2er@i*8e1bDVenE++*dVkcN#Wdvq1+V{nl z0n&|diORvi1ahPWaGO$?U>m_wzjlnD$a@VMKf6Jw-m4Gj1FhMPC| zm6r-+a!$j&NvmsfU$NkTIvJgq-*bZ_0KR=9b9f3)vhj1^bssD`lc5_YL2%95 zVXexPYfxES4+rg;Sx>vkKBsi%As~a)d=6itqZ{i*A9@WQ;BMuzD5zfa(~aRah?M{U zKmbWZK~%b5V9`@y@73HEpRJz5srneM0&q$YaY8%670a5;>IkHP!{jundc z;|PM2`tbC<*Jvd=dyg!-w>EFODViJN8L%vVb^BDkiG=-+p*1A-`b33>zl*VO+v2gs zeF)ODMN*&^3e}%-3B$FKoiAY3PI^5XDjD~|u7rz+lEvZ^LPR4^M*;abt7P*}aK|AKKzGxSYU6Yb{>Z9v;9|KU+jqu4~$v zRw>#v-X}fuUEe@azoG*_mZPpud{zzsZy)+BTMVxH{`Orz9v#xK_NT9GZA^9OyHq`6 z^WY5F@GUysZzE)lNcfZv%*@bnAM++4vwP1XUS3dH#Tu-G`Z1MRp_6>_%;`! z$94aP-hK?f-0W4+Rc=a9->Q-TU9vFVekuh=?erB-y4mmbDR&%RfU<1SR;RkHz12tE z-HtW*HTMvv(e-oxw6WmOlfeofxBN63!L196j4l7M(}WU6s}>x=cD;)&8it7)<#3+1 zgYKvEG+cm(je}{=r6Bp^e=k~0seU(;QV znt@MdO}f3N)|R(YxcJxXZqY?9Ks8whZ)DKqR6A_qeC`2RHU9`FbafNo@yX{v-ToaJ zfDumFnlW1%>t);;eFdQZv;!h@d&V2(7Os06@2=q2*d_9!E9MBvgjfc5c4k=LpMVXm zm3i3Y|I`2UA2tVaZcI+foUQkB0-;$XHoJeOC`E@1KF%qf|9f*WC(LeO1dWZla!6d5>J07&l(b(+XXi!w2hQhfn0Zkow@^ zazJu$94~M2>zp5)<8XVK(?p(Ni&+uGCf=M;m}&FpK@mv~L9^&Nl%o+(IBWHrGw+wu zq{EZgKXWV2xfd;HrIE?YSCQ;W>=LOoPK2D&Ep<0{Z`_>n^T=q9&L6%nV*ODLk6=je zajFIQjVo6Mx5GuGjiax=@TkYZpSP$IiOxatxP`3f^A|B{9E{PU?@#{PP!wH$_N<)F zYk~bJ2g$u6yviARU*Df{$#YUjunFCo_6Qz^A>SVNYkoUuzz`8HZ1OfyErBL9i#Xy!H<493RKP z!R)|1Z4i!Ee#m*z98HR)1IdmvLFy4aWeXnMx*qSF`$Y@h!Dn*8)&@r^$B;v0>5xax zmGNYeF@^JjV}YGHQdH&s_$f_r}T~H0P@qr3r^lKVQe(>G#j&h(mnQN99xy4`{1#|YtE?;8K3nT(}Fc;&@hn3 zS60}oDgzg;&n9SuT}rodB(tgB^JLcG(td{4FM_|VfgAu57e zn{Y?j1iHj8_FnGt+9*Fz9_%_5Fel9UA7sp(;C4>4wz@sulq%d8{?%e zl3MFT{rhp7oyGTzZryJ@deo--k8S>*sGmxGF{v4M>VwS%I9A!Ei|1D)Gy;AoZPKs* z{7;52HZMF$a;T?}ILnQn?8EPV`|0LxeerMp`mbk`^}ToBYm@8L=JE$01}}$hJP~dG zul~z_z4_+LFXPRV%}1Yn5(g4q^u4Nodg4Q#^FyMPA4zxes$AopDUcIYDiZVRgi_t+ z3%kufvT53;3-fQ=clCRHsym~H?1U)i8I$m`{{AxEdc>KW?BrLs7QRz@8ac=B^?9=S z*!2z&y>YV?HO*g+CwIoz!wY-Tez`-&LPM%tX2-12W-9{r%(C$kf;)3p zr*UjTrFgTcrf<{d-B*t%4U+lyi8iUF!qK_!-n}vH9WReLc|VynU~K-{xG|@);ik{i zgNgpG-1PZyF^0To4(AMl=Z)RwQ)9v6A$_6`BeVE9eKR=Cv}@X!%$7ADQ#QirvHk(3 zYx*>srbmx@g1EIS>OE;!PJ85%aSv!CFc~F{T3P^crU^*y>??!g3Xz>;9-@ZR1-L->DB?cvcF$A$#z~ zn|L&!gEt_-guxOr?}lHd|5H&}D-#1L*u&#WcYPe2%w;%@k%gM;L*E|nTm&jy(5L|3 zZhx0bYjIIFLb##-phK6o3-tr&31%JUt>);)nKhPgO^}L`gUNyLg92{5+ z>gd(*eb}P?EnYqcARHr$F;PcP-bPC;T(w)j8)p(c_u60OCsMJR{!Pyt*=%)hcVM_Tu%e-|qst?UKx=UHD42Na z2Y8$~gG}XvyV~y3=(Hcv{xbNB$ki}j;myLG z-e|Ln-9ZbTs`X^3ky)I=shTvNxE5xWpZ5Ij{)nub(`IcbkM8)c(rRwox2MfYOrK3Y zTY2xL-5MOLrTx`b^bF6@)3r|9ZWXW?$957U|#w>{?PqCm+e@$y5Q1k3tww<7Hi-mgxpt+V6>p_`y6&U$X0Ps zaJ?@xPHAVsCNC4FxfPtDFj~>Su-MWJPdHA&B~K`gT)H}Q2;jqs?DW+ogP?33606Uk zZ}CU}9VgB{&J}zJ0391z3475sGTFbionVhGK*O^CST>dLW6RH(Kn7@@fWW+whqm#u z&uE69e{Y=Od*y(`ldbGVj;__-d}mMKmMn%ww|awx)jqw3gTs%m4y@It>+spD1E;A{ z86*s@+deQb&`V&-fj)Hy${T%Ny0(g}Npf|R&(S+ik@S7^KipZO`}gjS6U1T-Eg;Xv zV~g_#KYYE}-=fe$@IY;G#vsh3_Hk)fjvd`;@tdu9-Q&*X+x+gwqJE$6@9}$iJ39>S zwnK<;A!m4wMGI?=AatF|>GZUPmuQ(w?_OxJa(bMIlU6F+@MLY!RovK89jQV%pw7K@ zc5}Q)&~M7`jc{Y+^BjW*PnGthL%|;Y*kiLfQ{*+bC&N}*NO2l)PS9SDLGRx=Kf^fZ z*|Qwl@*lr!Uj4l9hr@sRL-@}^p#}KlhYo){c7PSVl?UITar2w+%MW_K;ijmccr$sr zBZ7lQ4ojA6(rFyX##s=Ax4h{3l1G^3lKrj-%5fkE^SK>|C5%Zef>*~{3{QVg-$dnF zoa%?7IA323ez;9L@z23kCvu>OkODJbXbW0-*t4UJxkp3jp*?f~@ z|3Nb5AclY9nl}kTg?^+>!#{0Fe=v60cp5M0o4lnKhNIP$!Mj<==_}Mwe+!N%{GB#V zjE^>6E}m|~phX^M21%?^Ijnzk2wc5Z)PC4qEC2h6 z{daRfRwqZ0LqMNDiT}ZMh^ww(LFh5_$I1ED?yJog9$)Xq%=Lpuo}6p+lwG>hBKF_^ z_N($RcW_)ziKnGnS!=aquUutB8c*X#<(}TZF;7+;$iYF^ z+0vIqv*WwS<5xMT^o@r(d89IlfejtnOs7dX=M?&sMc<4K6DXk_JV_rSQ9UVR_SwXO z03P;_?r`SHDJ(keP6oLEVNit2Auj{iG`ZmL!<#ZD<{o#9W&8E|HV_En~Z!E>A+ zIgX^d8M{e<{wGWKMeUInoJ4D*&$^91b2!WS%65>YN7Y4Hl8^bd$XPFr%?syp9;d%g zav<-2E1Kdxct@gzlawu>MxLlK+|cK}2*kYz!f}3d(>4`{7ro?kI+jyo=}xp|-$bl( zQ0X(Ao1E4MrK@RE=+K#Cn`=MZ$o4(l{BYyeIJ)KUJ`#VleW8uE6=|JJLP)PCSMwTq zk@Z(m$Q*qyiin>i^=X2CcKf$R=)$w&4+1WaId>y_xk-9l@ z-m1NM!YH2284fui_lM`nlbxH8Er0QjlqtJK(iVAM>WXi#UTuR@?)c}k(K!*}>Bv}= z=r&}sJ${e#Ij5Sjj}uT?_)ydSl5W4*FMYL3DtD6xU+<}(V>!Zs!4z$__MEJ8I&AXDi+0z?3p2WYeo>rEh2hrA>;r+L$%2?}ocnH}YHW zwz=Zk*q(Smuk;5FNNs^NG7lg9#n~46NeCfz>UrX0$c$HXa5iePi8rjZD#R`)&>WG= ze%^a@uY)&x`YfBv-kH&9^HBQ#AelZ8q;^oFh+@2Qo^Sq;nN#bK!L*m#q!&C^_uZsjz8kgmo%n*jIQWbi}@85~LX zZv3&)AV*(uYf%2R(UlyQkGIp1(8> zSR2#Q@+foH!L3Ez^C8{6eXCR?*ET=@r zHn!;q`?F}*6H}cxf3qm{Q+Pdzb~5%{#-+9$%(`+F6I&ZZ_xBYbj?$dOv7vL9ae(9&oxR zf^M~0dZJW4HVH#ExwFO1K`Q5oKJDAAg7=xubJ#WhH+3_%8LKW|e!sHyZT2~yoIQ7G z?6ZEt_8e)$M<4`l%+o9Z5`oQm&Jr01UZ=M^9d0Z5f_@tR=(A1R85HW<%V&nX3RIsS z!Pk0mo)GR6JgHV7OP|>}pX8wIY#dZEbr}7v+l;gPS({j&eE8Go-oN?rhbuF#t+htu zQGEVCycH+dK zsI!KU#jqg`1@5WWfIAKs-(AOki!ITFwUFy@Tlg-~jrn zz)-wu_sYxCXV?V(2(V7!)4i#JZ(*~CaC8nWJ{LH5hv%bfRhmV|)J03R$#f*Pr=$4W zjj4}TxS|OT_-5f1?{l_|DAn$|cKr8RZG*SH;9BpgzSkC9T%Q}hDBov){F@qX25d@p zY2mu?o1$bfKr4qW{Z@6Nb3Nq{uNH0iLhqFeUVW9mD?g-8bJ3N)k1poO9d=i4@iTdD z4Ne6@waK==F@1iaP0oXR*T(1!E+~OR?ZRs)=`Y3&yz0P-4A_HVD4NvJ7a&HLz+N;i zRD-E)H1wK7>!y7$aLZ@-Rb^~bgGCmtAKu-A+c+@Y2U}T%eXoT6ry+m#7zDQ_@9Z8J zZqtszZFmW;`rQ%nr&M}E=Vx-yrl6y;oMSeMEbqZuiWE$3G%)~I3qwck`5Z@|`q@2` z)rrd7+KJ)c=t(*`4s-3Klk~L3>ohSf1%InkV4_#QkiKJ_@WI<>5;e9N-gqA`D#|v9 z$L@RwxFd&ZO}Cl z@~hnxY$`0*NT^JW{%E^0u6vDLATMof?f%fzC0tgP6jsi7&Kw|`)K(tmAln_Ubsz@( z8YhCMsje&OwVd+niVcjWE6K=>{*xKG``#s!f&~X}!exGMZ&paElS%-(ftAF3Phxm6?e|i+JNSF~U;&>Wt0V+K#|% znb3UfW#j zf9QoG+6Jt;5TEZ!#P98EZTxgQ7_N>(XZk;U;^E*Ai-a|8^+*3rU&p-3gPlY+O=Vk6P%wh+?)v~ubz585g(z-SbcYP$L#JtAt`WEk(G!zMb1@K{w z7xZr{i$%AO96WK(-`#r`k<>;Ja-yYkCkE>g!qjpv%Q{@Kso z-+a)4X`DZ|i*i1ZbA)5$1xIY{NK-;Tj^!9TbG%6G9(k1tLUc2S$+OBGI#hIGj*p21 zZSp&gRP@IuVjIU&+9>79q3rX|e(`ZQX8&N`WZdJ*aHcc!WCVVt(;NuiCeS;_PH*1+ z@a@Shz0-n;(-^NfO>3&i+ZIsoyghtBoWsN8#vJhG4;Ik};={`pl&`dv)38QsJ-!>i zigG=WW9d-QW{-Mg`OrBIvFOe@w4YNkd?vz24dY8N4>b2Z*5m&#ThzWTrO9rNa?xQQ z{rayqS3dt@bF=76ky>)8k7Ka~(vD4r7I{Z=T7CP^zuP=4oyfuX`IBG$V)NbCS2tIG z_toaZ_uomZEv{PNvGf1(U;oW!UuhNS!Qo`_(a%3Fm*$P(X=u!1?RkeEo=9fnph>GZ zm>s~C+hub&o;oaQr-hHj%l+LKMTUOcV!I8591kJ_$&^D!M4Ce}cca^wWT8UK4#g|| zgHz1ri%46#G0wxW{jsq7`hz;`k-jS`*rHRBB7#OfeS*juV?Vmq+m@~R`HE7gQ?SiAzdD>$>ljpPD4!CVY=x`2J zebRV6cEb1Cu{mYTj3IAq*V@pHC&}DqPo;v58afwmqhoMx9%D2^kM#uyNH#Re^S`pEO5Z{-LMc03j}kIxQy zJY32I=>?2y`wwt3#sBCMS;}eqIGDFOxbW*7X^+znsXOj+oF*2$Vpa4n4w&Pm;i8+d zYjn)!jW%a&M;DHCZPH`Z5Z&gmP040VWaHU|aR6184HBL1@KkeE>${mTgjeWJpD_Ph zV~`#)WJL$NQ-2QLlkn+Bxb+0q)LVUpjl>@{MALHKdMbe-(`Qf1 ziEKRUpWoMeBH?s>Nu5+G=TuJt)ZW@q*`K`I3_4P7@Oa||PkK`AHbAmh^&ii!+`4gn z?!!kv;`sI5i(~$v%_;J#ku^_quE&uatoJuNXNx$FCPLM9SQG7t;JtbGz~=n%oYK*5 zgOZ&&k*r1d;=@0E`uX&WKl%ADHkaRiw-g}d>5smh`U1J1O5cC+$;X=?Z%V0Fs*`Ld zoIlJ_{-Vt|hcK2WYBtf2^&|oe=&T6w_dk3;9@by#ZxIp0rC4sT=VizbM4OjI`3(tG0u-7)_qQ$aDtB%2(2 zGN+^Cc&zQ+7^Ui0;|cAP2eX&z-wag!HqV!q*CHxUhNW-S;ON-+bFR zt6#C*`m8?1w!>pY5*}ZC@%i9$JigobQODT|9GPr_^lV46yKLQ!AHNToHp1rqPAQSz zosG;_`+gk9rh8s9MrK>sE}NH2AGduBUu)MmG~-IVLbtXZFimey7t|m{2q*amKmBRf z+OVik&yza!=Rjmzm0uemhOtC7zw!5WPJK_|I4k9&4?fs@|6Lp2wRx&TiUk53ukW0Q zKK+2Lyw_a7(>rVak%Rb>O`tY9+JusZk3Q&EAv$9b$NKSfbPm5vm+sxZJ5F*uMi+;; zv^AXnT5YIK4|dz=x!p$0mCiiT zzei^VX8KS)@F&2Xcn}E%|1-(zi7rQ8#<}vj34YQp3bpHPb2SF>V@$jK&G9n=|3*`+Qbs87|YZ%Jqt!{nObaey^@i zuWjdBE2>IuFDu8)kE6k;l!8uO-yvl}u&qCjCpS48Q2k|GtI+?-`LxJU=S}(Oh)H`n zYzNL@saB;Dqz@#Hb6`R9qimV(_%r0Ni#( z*SbD*V}i!}=rxn1acba4Wwhy!B1I8`1y@2|-?@I!t6=Q~GZWA!x>VZonPKS8`WxJM zG>!;JEBEH5)ZXvz_1*8i-J_w2JQNwZZ1L|mou7qv-%KZJM<+~IzHIOYYggvq;Huyh-tx5tC0*;{stdQ;-TvxF z0~7Q>*z%K(fT#{SrfwZ*^wk~xhT(EQgD}o2^afWn_&D+4K>m9)f;aUT3_8>A!9yRa zXjfcXxUZk1n_V2aDBJysSf*<;Wm2x|^s;~Rw8X%vU9hwV2a42Wdj=0x1~>Srwf#*G z${jdBRd3S$c4pG$us|(uJ-E3~nYdX@>bNV)z?6pk} z;Be+@OJ9!`dN;U(sce7tqF@KOb{Z7nwy%M1B3vswMTCEzigYyW)3g%ME5kYC(C8k zM(3JCkeh|_q8ShIq@UFqd%2at$U)H>pZmerG1gdAg{@538^`-O+!gx;Jx!L7+0>3CHB2Udz<%37O(&^23iQzejt8r%equpP6#LD@QxQ z>VMalZq|MZ;rrY|GdwBDw*1~)OP`$cMc{*maNk;u^?QhNQ*+@xI-l3fwTc>+$e}8nJ zFI|VT>7XqK@9Rf78vC@M{l|a!bo1AL^`}j8mqW9Jnnn0krhmiT2%2;r+XWpM2gXU1 zY>TFuzMD=ot&fl>&z5<}6z-xmuixr%?cIBuU;X+go9CB`a<jQEuug&Z5Ic+0io{ z9GElXS?K`w7oB1*N^cJY_rlrJ)i0u`h1331MY_ODs-Gf_MKv8baAG3SAD919)Gdc7 zJJnjEa>42Qa;8@$rxuwWYawN!MqUnTeBNW@7h9a4JlA1bhpx0ZdcL_(w3)oQ4l3NY zFXzt9>zgAj=p7_@H|LbR!rD0;z9Q>S<{08k((gq09PePPqvbGujxuD$}iA%en9RdL{qu!q0(m8w9NQ304~q!D&XL~6%Xc>(ZEoHDZ1dT7*XuSpl#+vW8fCn-l(kDe#X zoffYzbM73;Va5&`_sQmA^nO~3mTN@>KB>;B7G`^!bsPq7>_G%RCcEUYTjcVi9CPf= zj>qnE&KzptC;I#Ot^-5kReJV{Lo$5l9F7<=4)_-G(y=Vg#I5o-Kg#KNkZ!dQKkSe> zi$;!{94aYV2hQFpxBczS#h?GAyyHdbWg|s*Pu}}@gU58w;@6>E2RUg)C)THDvv@oT z<_;Wmb>e7g7}(tY=8MgxU;KJ=>|D{--+vWc^Pgng%@=KukDQAXmP)~E9Hn%r-s{OE zi{Ft6LmPdZI2Pe_Ltu>sKiuS>R#r6l(;h=-x6tYuT8|Z7>d;G1R2--ujibPjoNhcX zUo2gDSwG#iSk^{x(K>6sbalqd=;1)IL=(+Ddd^Oyt8$A!?Vbqs*U~A)))aiAE$zL148BEUFX>VhjjW?$0Vi`;ov|6QHd)NshkGMK6CA&1T&j#syz>_>P9LvY z?Z@ZzkTa`dgGX!+X3lp{sPp)A4G%p0(i$0*&&m)|a@t9268TLlG!6O_T z#QE0cw~C-I%91m&Yx?HXWP7zujY-i^UD2+dLOhleP=u|e;=jJ-g$Hzp;|%;mZH`NoHD>`-j>DUi^=LG) zc@7A)aiJpTqxMC|-YM7h>-y5kBgWCj%XHeB{7ABjE1N(4)z3HYmq*)%2qua46MG za^91fYa*$4vscdaz;98-Y$<$ZGa;A`4n5r_P@`ng+9zf%bn|}nB|m-1oJNYRaU5e@ zbBZ0CsmWn?p6FUUMW1iS8|UegCpB#F>2EGbF*1kna&#x-@2-A7xOrORUQXc?rI&l_ zoe!oDU%z&v4YL#V>2haRe?1}9p_gsEr&^<59K(|`DY?koeX?kHhy8lmgX8w#rREHc zlkCF5^043NDU;_<&P|jwdvdsm9=mFT%>3e{P1&)TrQj)ug6){im>QH*8x1oiWT0Ng zOEjilk=;{s**?^tJAZz2tFgppo{b;l@ou){;>AnTmT2vhrSf~8E{j^1M$Q=+=0P@& z&NV->QF5&fgNqj~HwU`A`CYWgZ?2!&AfK@`eA#RcavN+9%}KK%HWt}6sf+OD(L5QD z&7#}!U68=h#%s2PEY4-GjMr-;y*W^04SLR9=#cpIUR(O|WlLJ3}Skd`geQB6|vnFtfYE^%4jzXWtW+tOG#~Y=Z zxd9ufE#reHw&sa9Fv8iB+4Nt3V4J`B`t#AL08by(e`YP09Q6ruH2)o2l?>-fb?3S& z@!3mz<3H%T@soWrwh0osR;0gD`pSH%E#uhO!tj5QKW2Y{p|N1bnU|i#Bm3Sp)$FxG zVLeRXta+6o=x*1V+*#~Ho)e)8^F2b>3z|^Yny*b(r?37X1dBMMR;4a? zJ!Z`BVNk$QGDlT`g;%$f=k!>+ zHT~+AyRNTq+aKyApG?t~2d1gRJNby(;+leAfN0+)h zGzErS`tZUFn=GWFU5h#Tkg4o0JkUQ{*fqMcs2G^OhY+xgO@E>VWcaqI^VzC6(JuIk zC!29a4Gs&BF4wMVOP>Y@by_kXLKF~%1q(y z{VCM7`ea`Re{GS;@B;s8w>tg}F@t-TrUn|c8NM0QweSw7F-Y;Nezf?nOm)(kadh=_ z9IJsO7hP~~lVHFiU;mhXsrhYdc{`Tw;tA3Nq<{Ib#c}ZMH%?8zf=gvU(8lT`{bG~U z+r9=5{MNt0)ZaLLnxszTZ}fFDA6=6e@W@^nAd>UsfX1`RXnWd=FQXe>5k)ulYT9r= z!@cGEa{i@n@m`-1t!Y6wI8DhSwd!PZ2wV<~VtRUmP~+?yFA$mG<=_(rLo-Zz=>sFv zEyXF!DzQwUzANcvMFHBi*wz5gz$EGR+1u_iE42D}BeWRar zbs*8%AP&5A#Mf$7Y2gDl+Oj?R6Igp=)pG1q!5F7K_MmcO3&5ryTb$-|;QC|Xqf>0< zQsvR3(ZeQacr@=mYF~RC%Tpd5@xLDc^}<_?8`H>R?{}Bf+4f1iGC7)d(=EJ3H@%xN zv1^Mr!DW-DeKgY8%gaS0ruk#}Pl~Hjr=n02>*<2d`*Fjd4h%O8>j=T7ZVw%A$ApxkRxSlj(*rWCf4`o5`h#?rrll<%-4FIe{pgPd)$ny~ z9Mi7h!(3i?3~tGW4DokO3LF1zrYje%dm9&PtecAq_hE?n3>?74x4sYluJ|5Keh<&8 z3x~PB)h%RndCd{FbSAH${SiM_nP#u?eYL;Fp+)PIj&Z~LZPAot31r&xH9U;n)p*bAwZU-mcz4Ndr*b^-%TEw)70?z{F^tsD* zhn#vX5U+o6rPe!8D(Cqpzy3wfy%*&eEw^sonOxaqXyNcY2hPzVs7G?;8@%14!_V2N zc(cDqCS71{V1`bxmu|+R`olQq8rM9)bL0qTaEqaw#E&|B?s2&^pXXG$UL?`^ci+v? z+@igC-hm!v<+wQ0BIjXFqEkJ3y)Vbq%@$!hhg+ndh?e{8N}P`cY1qa19o@ooQp?WRGNv8h=m}wLG4OqiHv1 zG93}cC_0cc8eSGCq9HkIjwM%zT1}dW9FJ@kN95R>_=jiut+XF>ZZg@k|R{eJ^k~?>uNxE-I{i>&g92i`i${f`9np#^!hn z-Q84V9t5ZjDQa%sjJ`7$CP(|uH$S!+B3h~F^}(l^Z3|o!t2(hs+pF$zaNQ@qDHNsbjo z=uvL^h_BB(^vi~jsB$S+%nfFYPUc{*X!gLwzo9Q!@UH5zJ;_5Ijtw?|KiLSu#Bqkt zcpV>X224+hKWv(m1=?r7M8O+pU#6S;+n|^OMJ>#0e-05OOg1^3jRPDs2aebn5q+6H z)klvGPVpd_aqum=bDkFE#%5{5V*P3IfGgZY5$-;^Q_7TdFt`@q964-=jVXs=dWk4~ z)%Bh9D6K3S^H$E#&E{-Q8P3NSQnt|F@HK|ah~7BtfL3E5r}94Xjovl)iUZlA!7;hu z%j6;MK1a~ucpwk+wWp=D(I5K92_!G@NvTP49-#ZfPd?iG<{$oH@b=jKzMN_&N;PuJ zfu4;io+fw}U2<`AqKyMH963WcxI#6XkwYEc{o|0+pOih7(|&J*Z1PhddX{skgG*oC z8pq#}Lzl*;i@@fnx?M!Jr%4{?n9}b#GV|q5iYlpWY^vV6`>^W{ds|NZJ2_m>=e)kx zW+X=&2a~6RIHk!!S{eDyZJ>e)NBDXQ=6Uw!(p#k*sSP%IoWIqlUDD%-*p@^7M0^l! z={32^)6TP+3hciRzz3plxu>)X*&-_lo)v|-Zlk{s69@NQ2>(XX40cMtBj!CAl6ro&3nd9F>_ z$2ouX4^DRd#%7hY7E+XqY_myc+n_p8F8y^-r}bDGujBWLJ_OONmAV1%<5}0}vWVfm z+}fPLm5i_1b$zlj;{c0BwpVnoCw^|G&qteU9B7l#lS(!YwPlmVCc)cpou6^=`t=Tz z=5Wrw-7a$b*r}(RqeUB^zI3^RHFJnIPk9hL2W>j|n~#Xo+y>3-mrpm3vX6&zA|DDi z8D0M1lfm~)y5e;+r|N??Ax`GJ_q5pI`W2ixIb(RbU0R}ejUW0m+sCf3XFnE!yz|-T zn}6}^KX2ota*dNOUpD4epM3ggtZ0*}afB{#o*Q@E_tXbHoyb|gqyKPG^3u&c&d%fd z!LBP~%-G*tVe}}R*}RKyy;DDQ;O*TUvv19qC|cSX8zC4Eny>WImv6RFcm3KmQtY?G zKEo*o?Ag-6xdtzeZMtp~z&iAJ`mvKP-G6Xz9POO*KmXOA)~2*5ZFZFA@Om5h&VoNNSzcB!PYQQ1V%LB!}>J=jg2!8Fqg{yjGYN* zud$)o6xO~Nz}j(Nzto-5A-08$3WpbcdEMmb%^HC%iw6}OT_1=Ujj`1MFjt%TN9_XW z`~$Xt&5AWsj!_uAy4U~p@n_Xp)IR*?ixapsEV%fkVh^+XQ(c$5@*B%bFzmSI$*lPlT zq~%Op*Z1milo8|P8xGVge1X(9$OaG@F+-fuH z)YYoiHiNO1?OKf1SOzCc-Suh*Nt!3OAgftzg@ zJ)!jE=y#Ct9=)J?jV+i@cyH~ec9rd8_y&iu8HlZ%x^LigX)=Rm?b7hwNYez#v z!@|P^cs4`mdc2r1ayIAUMJQ^2>9*zRY7JgEMFbYXONQO8kIr~8vT~1N4v?&+Ln^B zcOWUe>P$s7(b#4EaDko{BO!V*5ske>4^AI92F>*r1UeDsO@GlAeBr;^p811*;WwUZ ztM*AVAYJxNdwcS(zOlA)Gq%p>RF6D1F*x+i8*T6y94t1wR{2_4*YSDl&>k8FR`<62 zv+#u_zN%xdce!=Ew!6RH?qI>!D_rL7$aH+If@4j2r75k+S&b>LF4I1^s9=sKRq z>;B#4gnFmwCI@03&(XZog4+W3gFpL|%{Tx2`9w$EjQ026zqC1evU#k{rqQkR7~9)K z_1Y~m=}^3Q-U4yy1$meQASgaI=I!J-Ej+I2@czx!@2_n>`RR!km^ll&e=?`MqPM>L zX7gSAvCx@ACOwjyoJC^MABP#DKkz<6J$G6YOw{+n4dfqu=kn_x9#LfA($8?ws^(e7xU+$wKFJ{CL(z zf;kxnIEUHqKmTf?$d9$)DIjZc_}rhI+xtN9IQt>jC40R7N1X&&x3Nb+$jq69$B?8 zI>h{1$@Gi-_Bw^TXrLH|;n?j5GeG)reB;#q!p1mYXsa0*9GZw`D8!klCEj|wE? zY*_esod0lro6~S|jUVLvUpl#`m((0L^xKI*E``)i4yF5^E(!S=^^BeImzLMqL>F?< zih4xfL}J4~eKbv{_q*x$nQRqjhW_!i{*Rw>=bx+}oyp1R`j4fqdi(Olj%X8^TIB#r zQ80WPCqp=-M-#=_<`5p^gN-q5h+IBjw5FWW@I?+894TGKU;f3fHvjSO{$a{W0dwin z`5Zhe@AY>@DqpDo-7W91+{}k_^5Ek?fAP)cpFZy?qf!fqa=%|%m)$2F+L;qh%85@u zFQ0V0Jr!Ty{pf?1B>QJfu)$>G&$uk&{AhGL)J{b7>~d2f8?O7FNEjg9D1jU z?j%p+_-3EO3pt88uPEuO>_Uz!8x85_+91fjR$HIsNWu*Lg>#xL$1&J7cELR~S&BHc z@9K{~=199+j`R*Cy!cj5+76kVR6X&%7hlMv&PWcPCLl+6?v2ZlpIo|9dp_ZfUb-`z zJzXQKwPuAA+gF{4^0bDT*$BP>06+jqL_t)tbZExszI$oUxgW%e2Y>n}zlc}oX8h#1 zlV|(4|Ma_b*nDNRJ<;>&D0{}HJbGB_ikQas8uJ~-D!tdG4r*p|@3yhwNgbOJ4@zbA zF#F};;F(!OlMN=0Owq9Vhm8od?GAs_{O)lGJ|7x%Gr7fkY9pJBMo%oAId^{Z-a9K| z`BCHj;bS>VM~`1l1oY_+V6;hf^LkHUa43hD!(dN$IHfctHdg7&vGD%jlaDrUUHHe% zi{==do;KRP{rXCJle4;ZALeZ4@SGGs>`5DA9KUS9Y&wR!!%dxQqVFF_aCjw^%d_SX z*4hpz|J5&lwQ_T3>pa0CPx8)A_9mNhr^83th}qPskID1y$*L=j(dLW~XM9g)@k{?P zz8FuWm87S5c{0bYe#LPudRYJFG(UK-)H2mSR-`xipKP=2Zkw_fPhMhL#~yRSzV&ty z_c`~i_221(| zroFdPv^lqBB7jfiR1f!S<>=2aPg%NT!%?sUomI}bYV)1**wY+d`Ul;g{t$guVde?o z`6s=K2xDi5csfy<8t2y7U|<`cG)}&H@p$sczslx_9zWZddod@u&7r52`|#cOW-MdF zpI7HXb__k7`u9r#XhYG{7AEv}a&B9z%mMPxn&ZZ24)pT{ec1e!(ru#s8p)$H&)a9YoZ}zcynMY0f zMUk2{B#IHpYX`?gp2*Kplyih5qwB9SPBWetFm6|!i(Z7-;1!kVY)W}tip45!5tWgR|k--sKznT&#uO*E^BGs zAE17^xx_l3Il)eu>84OkYS&Nx)9U9PE`!h(v!Gvr9r!rVNP5a~WD4nl< zBhba((^g;A*VZ0xf&BkD&}N0*&5TOGAgfYkazqWk(GT`qUFE`$EYt4FcYWwXTR;3C z+Nw9GL{(tDKpMWGr-5fVJ664E!*#F*h$j2zlI!S%1XtR*{hhW}scFWwx!zx24dhcc9N;u-k19Si z1!A8`2fT=7aG9I^$nXF=xX_5j!{s2`(p4D~eU(@ArZS^F>8l$0NQiC+JUDGxRGaKW z4P$DyW9*5t_}#ymMEXqc{a(V*j)|RxOV{>F@9CopwYygwONJkFVZk2$PP?@(r?icq zJ=yLr!fUJZGb9XC0PRH?CbB*LO4YJY4mFF<=!j7^76$fKhg4a*QF%A&+xu*q#B5}a zBbZ$9E*i%X-0wZRN&`lx1`ZhEIpx3#%=Gt4A(G7LNc4=Y!6OKm$#3}t{zztSz=6QS zn?NePoV#a3j+NJUkLVRP4@d59WyrgbSrvh(J}=Qpw%tSZcnL7uEO)V z+I0=)Zd9u8*lF@8nqbE5p>KP?Qp2mRnKXE+X)n!7JvN*M%IL*x4v}GSsRmY?tl{F6 zN`^z0`?X7+qaziB@5mg?;jA)MBTxJTA)ddX&={>P0K<)wRlNlRkDK`ECxegs^e-5- zx!S69@~2m`Njf~~ev?(W)~X*W%zN~HblBbPm<}Kssyq4?ypbXMGZ+Jft_Xn#B7CA3 zZGpz`fju+~!IkOLJ-E`*JsQvh-)*rih}y0!8>Q0FfzN%~@^F`6)FFyQW^Xa$OHfK*B-F$fAR135RrOqhIygqm&9!NQ0@%^|( z&qN>AzR2b~7LzTuKK|tW7LrN2MV>`!E4QI_^{Xw z^T$#fh=?b{Ti<=RdHd%-Zy{B#;ufP840QHP8v!Di=(WeKEu!vO)!sDziucYREx z+oY1TjzT8|sdwg@QN37W$ zjxLKmwg;qwc*AWh!SoI3We&t=bcu+UzU9;J?`%G}cye>U1v{s=ZuOwZ^Gm0S@FUw^ z(xv?N>YWbje3IVl-^+>Sz_YQ_5j%Y<+C#55M@o@)$!y;lx~UIKIgryf$HJ2bw_DIQ zzpL;|bG2;CM3Rk-Yb}+N3QzG;PgX()_pG~g-PsikL8FT2jdG{A!2bqJH$lC2z-)0fD`Ocj-pTh z_HR1?_rm7b`BF>NCYm|%LnC_SUtbP_SI_HT$zJL>hv9OH&YGiIYwlkk?S+o{lbsXA zO2(WvFFP>3mHY4~k5zEi#>T0OZG>18)t-5+jhnaU#g0 zPqeB{ivY)qcvd<6SnmAYoMKXz(Dktw?pX_0|5f||le4F$C+b$}92Y?*_ zZ<{1;$oKy8MLD^%XD1Gw>HZ$=m5s(3CmR!`JTQ)ncD2bOI#KEX&ZRk&H2!eJaKL@> z#aB7kdx{_(dmP*g=@or>RJ81!?C9Z~^PVuJDF?#a6Atq4dyUg1nHw`a-61!#gDY*M z8DrRk8RyNb!b88^m(4k74jerk$Hpak zm^0VVX!YD;qs+qsJay@(d+Y^gF9(&)=Jg~*^}~Tfj1v)GCPh#7{As#iG$Ku4-!KRp!e*4x~9e1$jQgU5q79eE*m`OLU1{4>w{In7rFwcufMsn zxp3ip{ab2{HLs=v9F}n9^cIP$U(<;f>HEQ@hVgS9yld{M4@p^4&mTu7$1)zB?}@4J zuYB9V%J;L!r5&T2mA656I7jm1Y$>^%?g=M2YmbdHCwW{1^{M7!2czRcXAL;O*;5jM z?N!eCFdI4>&Xx4K{$rZAsxVN zQ}ID#hjE)MZr{GXdGGz7ghi=v*#BhyqVam1#n~^ild8nVn#kkB$(yc`!TtKIsC0IU zp5Ev{(zB;7zR^Qb`17PeZ6Ea%482H~y|qKWc*jj~_PQY-5pyoO;YtFMT!= zWAB2e?7`9w8jq*l(kzmJK4v|pPgZd~BD>x&_?NZo=N254dg6Q+u6TOMUqbsAXEMzVuae}_nh#>q^nE=fMACXXXynG!~pa8SKHyFrZw{^`*QE{$97J#RX6K(M#>Qad=ZsFi`ic8!?b~{XSCfn^ zS77OY|FzxVJ83`k*5N_reR?b3=kSaU)7f652dHi@eXeQ8_1?W4JER=8}Gys~7-{?WxI zH_J3Ey2lm|Zme_g!^woJAsp74oAT2P9`yT-T^Iq@YW;2oF*JDBehM)~6<^=G=12Ax zPw;SGa)j;Z<`l$Vbgs_N%LBaKV$jULfEw?ks&T${BRKU`h+x-sAIVd0ccs1u!j z@3%=kyg)o+jN8?fjrK7bQd(+)jfX}y`J_ItBmht zYhI#$czO?>8tmR2Dm<(NAI)ggPT%ome*cl)jvy-2J$+&6Dg5_jSY5cpdrdAr_FG?D zM0TzEBMa?pFO$`lH+`G$ZVsLLl}o*~w}AKC&H3%p(o1qmdEA_0DvYkrm;+;%mFkT) z{G&@%o4z_f@t}gf_fMa=(*Zr6u=&jw-*0Z_puC^6;&=;Ni;}BN_J3$0d#lC5qlTe} zMJb*rHO0*iOS4E6z54SH&TszYC-3ECe{XX&C&lv`5Hc9HS7H~Z#pPY`D zEB9D=v^i@$)+;BfK7Bt2fCY*(1Uw#XVaX9E-!feBdd(r~_kLsxM%KaP?302(wAhmt z!yH5hbKaos`Qse8eW#D+t&gMSLFqdzq`v#zXQfuS*n+cz(85pZ9!@`F<>40Gczd8I zpxttqKDhF2CjR;6WD$4AiZYW%U_XbPz7hXKtx17GE-&-C-TvY39PV_eXrNQ)PHaw< zLgQ#r!gDBLjK6bF6yL)ZZl(UHjub6Viqf@daHd03FSJ>qyqvA%bN~AHInnRN$D{M; z@%eX4yVAnzo8N!2`Rem;bEfC8Y@z>zGi9RldQ`e=<*GInNue-r>(phfeT|9F>1!N zgXi|M3_oNPJTl4a{Ah-0hDFAo0QDCws>=nT^!nbszN=wm6Aa$mPACixyg3Gi-DRkaK^U2>#yGw65+x-7mCAM%_ta82Z-)F=q9P*<{I}^6=rq4DW?U2{&@K6C8c#W$PUMmZ<2* z^sjz^3=C;b6FjjI*rv>8N+#ITt>S|G_@t zLgb!*@6#?@eEULS4lK7)2$E8^H z%IJSi$Cy8R=K(Lg2H1>vHkgb7#<>xb8j8bo)EMczE8u*drGXlqBo8C3@H{pwCq{5` z<;8HUPra_a;M&Oan)xRJ_UH2uv$199)%!9Eu65q-+<1`aAH7GPersr_H#2O~`Nrwt zQ?jCu>O=U6=EC%#`!`pwB|qnCcVki;y^O&3^6uvi&QthS#!GXy!K;$=7XG^{La+C; z?ly3$V&7bnIXKmJT2$vHb9CDsV@3UGgk!Ks-ohU};lf~T;}3t#%_m-Wc$+tjUXM1& zgf|S8&)Q_ZbK~lip+oLw=p{qLgYt;Ky}V1?3~ECY-lyBhWo&%#Fk@qNg2kw9WADb* zVsgiGn;&9tGd|lq8SNI?jL)Bl3L6((pl3)2U}bI_B z*7~b{%;-+Ge*49jb7?9gJ!gbDu^5kpn!5JQl}=w2IxYMU)ZcCDs$U-wd*4*?PV+DN zKwp|uHWypPY_46swE66_pKboj-~L4go8kTLyYEl`K6R?Q6{3j{E44n`aeBI{|HeeJk^OdsU{wNv`biLW5H$z*>RTy9Zi;|J;vlE=+jv@HAgV(x)w9M!bTkHdW`kg@?e&1D9#105 zj7j<_J#z2f&FIlaJ6$$%e7rsI^ndkFzZv-W()z49<7}7)8*X#gL15L#F2q1?)5ZE= z2wI)mxa>?wLIxy5Rer^+`o2v!4kdH3wAzRDv;4}t-nx}dvNz|wlIey^FPLVkXf^YT8e8T^)~ z+3(XwXm+zsUK6ZQYiZWnN0Ng%Sv1hs-U#wo6g-Pr>(MNCw|NuV^=ajUL&^rMF_5I7 zR@+pdP6C9{m=esPVUwkSAx|&JEJXSuPjo0@@L8y-WtE5C&<8b!#ThRaoXIb>HjLp5 zeG>!%JBCX%Wjq*IaO+tE3|bF_vd@k*UdS~6)VuvFZED?3hog2^Zovp1n5%}8w)~TS zV6HYz=RyH>Onz;i3VXMRF|iIx{2${9c++NdT;O56i;K_Nf^PG9o~%v)zmaVq$|Hu@ zq83o-(>Lil`aCFOooNi;V0x^9cGPU*ZG`L=R5+NRjYr_{zP#`n*ukrw#WfHn zaZsrELPZPU^6ej*dRn^rD5uKRs&D^qlNNx=`96^SOPS1HgIgN&+wv>JfC{(Z4}EKg z>M|;ip%rfjk5${qa7C)RG6Am)It^ao_U)pn4E9H+%`G6N0Brj>cn#eF!kaBmlS_Cm zuSM;n3)JD)Pq_6veN55nCK~}5JtGOt;P5T&ZG=pN%2Pd~BZ5H|2hM=60(wq8=-Xc( zC-2gEGF5&FWO*S`9g_)OTI3G^G7VPm@_j4wefF3>$WXK=eOSlS`jw|aZR8gmS%+|o zHuJIi8P<1eDnt485gWGGx zV_t1ihlQ~A?zVc`4Z+$wMU-XO2HnOWbcQB?F>)G9VCsQq>v?96Qu;D|F`l7Mbega~ z2;IbbZE97HaHH*1*t0D!jW0uAGO_#IxYp0wC075clf1&qV63%k%-ZTfa#B9<7r$0b zBioZ(t1FAf+C8$<_we#F@{{NP&}iyg5;APk7eUULzhG>2M-_fM!XW3$6>OU*WGSAO zwo|**+0J%<#+fDm4Bo?YP|9OZXbjNR0y4C=Bp=A|T(aZ;7UtAZZh7OwE)KKV(zg*O zjp!LR4Zz(t*YA`YzN%9*YtzbC6DkKEFzFh*{0qnMhVQfq&b5gs1#8lKmRKq^G+1N7 z$|vtErpi+V(KNVCYWDFQY_e=|Hpc4OH}jihH}}V2MKidgrU$vg z9&?l6aVZ<`bn_sK%N9)DXO7Yw$2}{{I1b&~^sTU|C-^E|AqNwhkx+h&6P`+X99jAqtmUcS2qWv=jr0=GU5ocq28U+4z#d(l6BHTny1ps z!a_Kjek-H<7-&L|QGjQr3-b=QuwpnOpRX9Xn*WTh4sqj+@DZaIn&_*;@#_~E)3kSt z#2tO!$!ItRqu?E6r0z&=)o-3>{I{4pkT=VV7E2?xP^=uoofD<{Ospl)I#(0haXK$a|X5N(e+oq`t9a%#I@O= zk&jz+h;jV&7a7$bJ}qwP`OVwsPv(u6*Qz;2d_EYBd0d|_cKfMg2R0uTX2vsy=PTE4 zcSJqOW{`|8`u=#o_1U6`EYD_GyqycP>YGPrV?p~fw)NbFSTFm6<`z>HOkUbf>O8?y z7pswH3m*nLp)Xb_hwAXWwx8b`KrGVKXR-P;x-pc4&-idRZ>Z%xw-)-sMa*IE>UV7dJt%g09_IM5#+;0j(MrEq-o)fD zT$`imYmEOEa%dTeMh?j4k`b}|*BEBuy3@s~2QxCfdX*usg*&e^a>p1hJkO~NcNDIZ z2>UYpF(PxO8IP`vAXCBQZ+sql8iSKNdJ!$F5!KU;uU<v5FU9+L@T$+BmoWeuHm8I)V!VU6F=n3iSx%Q3;=%IpULJ7)iFMJXMhJlb5MzZt zEL_EV?|m?Rj(4T_v=1|c!u3=hwYz;u-Tr11EP{v!Y@>nEfUzZ!2oJ4Vw9}8DXAomp z7UH0%(>KtPf$Tti_;?=P_{=bVHv=I#ayn=DpA4K!H`&Z$n7-dBoFf^;8O3gQKShE! z^M*$m^;CM{?YB2ye);9Z?T^$MMDA`bU%omtxs`r%nu90r<-$Vz-Oqn9MlfMkuFa); z=Qf{y^2ziE?IKHKgiqcXPjOKoFftRo zijEOeKN;`q;Xhn@7QFHFt-ml#GNOWGbAWu(H5V^k$#{2VLKFy7;B=JB4IgCSQy*ig zFd;l+A+u|!deDX}dN5ELgQ*SpYBQra5|PE7>_3Bd_|=f)p$+ViCae0(oI|<s|VLg*qW4c*ppvjZRbTv?*aOME}w`PD@!A=)X3;g-RhXtA7_3r%RuWVRVrv;j8@i1ms6-%6f zS?Doi67T6F^+j`S@!ffeKdD_$stc$Rd|}OqemN-$pi0x-t%|T6(7F8 zLdl#ee)>D_w(%AnZ*+1|Js&iOGY36hh@O$pU>Wl{1=vL34UZSfoJqgD$i8$hulifH z#r+E3|K9g!{_=>Yeffl2p_9q+xwG!3C_GPnN4gVlyK^6&?%I0K{J8#Nqa=1#w$ly4 zu~~8J);IAxeH!ibZ=an2aVe-!F-Mvoo(iX9%?-{bkBsa#1g>5EvhXpTfYd)@xRYZ} zE)ek1Dd_`aij6yRyVf;PG})N-J3X|zXjJYs{bGs_eoqRw^7!8E>62qXCsbu3Z@c{m zEzFx%I4`F~(g~ghOFq4YJ~0Zekx-wLx+>Q9MnBBLhv)sF%};;&kLIMJ5H@6u-434W z`4-(ikQ=8-^mqD5Wo)P;`z6HDE5DZzFF^BYE+DOY0D@}s6@AX`K(sRI4$cfBPP14qr_ zp72G4Vcu?Cm6$rFw(<_W7?6gB@)|_c4eY`-Ec?D?aup%p7z6`W6M=+O603?rzc-(+u?H?F@mearS1T;A|5!<#w>xLG+ zPv@=VYD2VEF5Kt&)E`W38GOJ<)W>?G&Ml3$crH>Tmm?z}DvMfpFRz+=P5#>N9q#3u z*S3Aaqv%7!L-Np|qje*{D%mQFMdKji?Mz_MebHrL*Jkh+?UM~{U{%r&R0z)Gf!>x^ zJ@9PRDWCeHpVVQVPm4Y-XpT2Lrj*-7$OHf#htB1xzWEp|5T;;AF5I@?(a8h6h~|SMJR5w`dD<9|t#mAY)wU&@ z5Fu;fqP@Ft26W%5i+n9z6Yls+?zVJ$ovvN-7k^&2=nDsw7(~%&KBLU<`HH^kTm4MC zdQJw(B3zQ0wNbF}nZ;)w`v{@>M0e2zRPDvIkwb9%?P%fj|B)w)uf?}F;(1BiMGE)A zBVgYqDW`2F0VBK9nsh+*SQJk`Z;Tn;Nv_bLx_9|Ev^RPwH+<~ld=97hJ}>nTzbi-9 z;DA;dw#5y-(A>m)UAC%Wnhm#TP6ohFzWi5S+jcjN=A?)EOp*t^>QT8qxjM9O6;n>n zs6Ooo6-FEjfSIWFn4bBTzLT%<(X0Bwq+4}CYyfUv1GmNS%CSvLZu&Izl|nxISvr(V zd0L8g;aNW;m&wKG-E@FHr4nI;#)E(OwJKH@c^EK_sU{RAn*(*|rEPL&e_<#O%b2p>X(Qf8E zet2hdriBlW!^?ROTFn2OKl$U$jV@>VvPJrPc_ClVkim$4rWmLU+7@u9T2NT{oz0MZ z=h`w_Goajg_K83a(a!PoLoG^nTBJO=|8PR5Sa6V6VN*W) z`L8w?zx-CVz~HbotBVXq_Zk3s##6Ji5fi z-=BQ_Lg--Nx`&`r>=z zY;c5L*vEk>+`xU^#c+(=;)0J_sFs%u6ZM6C2amOQ&uBPfOmCe;R(^~|XlG%4r1qw; z=2V_JN9C#76B%K_SH`W_pmDvl&s;280ZFQ<0I-(7_r3}NVHa*|N zD9#Ia< zrpq)B#9Lu^eBWExDaN0dm3iGp(*E%Tj2^sQGm@dHO_mqwBl4p!p~K^JhA<-f%3R(S zjEQ02=A3>O7b8`hl*!P|n}ueI7bn{|bIkuiZ8m1!K++fKY#wr=Ck6AGg>hupFUY1 zu06-bc;5KXJHwFJ*LT{8JDJg3-+r7fpCkRvFYf2*3huQV*U~30XMC`^9` z%ha1px@?hQ9$rE^@S=a*#`CRCzt~h{r8%ZkTQfm@5IWp4EZ(;?w4_)-NrX!dob+!K1l}G2}>>c|PdW z+?hhK*sRoFCkAqOI+-~9jCS-2!!Coa`1ohud3WjG>S3$~p8alb>aD!5^)H)B zoC9{!!wprgEmS|b>a5=7v3G3>MIYMvpjjG?^*{pj>qF~Zd~s~!lSvs_{!Ip zMuzTpYUfcp%clR{i2u>Y?+@P&Huthg|MKNmUFLdXHecR4*QS4cfqqx_VVeN)a6F>p zhfRz3F1$Bm%-uGGpXD&&^n}fQmzC;cN1A&Hg~Z72nA&n zf8H6`?>Y$?EV4weXs7;kD8A3dp3RHOn(Q9vG)LOMFSojH;#r#*f{0 zjl6I-2acpuY~rbF*}y_0So5GaI)QE(`j76imcfboeRg7fPzbn}ZA8J3p0FY8xnFRr zV)T)|+h8~w=Ua&3mW~T99L+(WPIx-2ocewFXYIU&w+o9L(+@TYzPfm2^r_P=58Kcj z!@RMwI;Tw_RY%g)cv!!$txwxnLH9D(wpH)Bv+qnEx~P7`0K;RYNs@LL38qcHFf0Pt z->z3%X}*^*K=n4_ItKRf7H6=iQOb?ic!NzyH4B+Z)(AT!Ehfv8#-4h3AMkG26D|Q< zb$}GtbTB_&%b4b~?$Yx*&)@=|8rVO%de-wA&4`E*+NLd2rtgz5-0!0{l)|UZ2+#Rk$}A3pi=gE|eq|=!F&w0Vuk_HSG)(m;jfD7E`N0=ny{4V>xqmve&r?V3 zM}z8sE(1mt_6~@0aOi#NUTO27O18~|VNFMCxU8pBXtZSms?HG|!9fEbh~=WIdR45F z3Qzg|NebR9!h5$Err`X={HberwuOgQ3oiL;kqejVn1#B}Tl~q#`dpg2wr{I$FlsM) zlg%EYO<(%lTR-YlY*NS>-UWL+v7}EueOEyKp>1VGCQ7Z1^BE6(E!VEG$+HDH=mv6M z@v-`q*~MaVDL*iL+rG+U0fX$T4a%u6ShDyWJf9um9QFiM{j0v7$TRKfV+zQ31(cI# zXzH`{6u`*yHcwh+;XClFa};f5QZ6W`%;XPmlNPU`FB*HS&0tHP)-IU=e%cOU<@BZD z@5-zH4IC4@Uh48>m5UxDUrPqo#NTtclpij5GcOe(h|;~QCt55lR^Q&uhOP^#CEx3_ z{BR=^f$8(^M#{h)sBjDyvL&C4WN7%?vy}1fYs7Z<3%x+L;5T~4I~*3J0x-WzwkclE zl%Y?zJXKcR)k~pIojj!f>xyIN<+ps% zYZNq_1c zkB4xYcQnL@CfX)tynXk!0OC~Go z)>?^7`tlI3;%&JqNWQAOtiu-#-{ntu%{bb_(b1EuvLi%gMAPY4%2%gy^3dDCB@3gs zwy+21@LOrMO&j2gIcV(r5Z8NuC6mah4z!-hDvPSUhOd2}_o+8HcvPqcWhc-4gAaKA z6zF?2z(aUQA0UH|&*~+wK6_f8WcNl!na?j?>vE+QKkT>8His z;=g0`>OYeK1TH*$kX+ozV;M|7{q*C)lyn5PMTH})Vgoa*@kX1FXd}N^k|A?=)j}n2 z%_AAs!8Hax&CBsk3j*<;AM&hjVa~8|sCbcJ?Ib^rbYK1Iat4%BsC zFRd1q$nUs4!#Xb+-aNvpSZp87E9)>1@SP(WFrIGy>i_(Y8J%0~G5j{6H#S*Z`MhI6 zAC8AQPHgcu;YXSq2pJ$W!5GLH35vh{@jw2@n-6~S;|vm8ol~3CzdUn`#oA);`1wKx zG*(!Q8}A=wAbOlPy`$JJ!n>cBK0cg(=bg>xUtQUJe(Cyn>it2R0pI)>N)~6gS{Of1_kYy|w6`BG z`XNKZ>b$@2KNA`z7`&H-LtsGCh8Hd99^Y!g8%-EV9=4IMcMPnx|7IHrJBL;ud1<35KH#0_J9%t@MQ7~Ks0F4z z$XGsok;bWwFq)1XzZZ*00sUWp%HT4xP+kT{Cx3)}fhQ8vuMwsC9BV)8-^xc+M}l{H##QT`p(-FFL+=J3B+j2xQZ7p zZKIbaOjC#(dmr*-OR+$M(D;Vy(^czu*1oWbT^Cw08Wo3Qp9 z`_X|{sjvj{9M8jiUH(}6qQSAgA1!3kTn-sdC-UaS@3-G7?)RskZa)3!lW?_}@oMwq zAN@h_@&*m&>5SdtP1{`126)d2u*QO`;rs9Z{6Dl|dvoryco6^HrSVOkr-NI0@eF=8 zBgfEGTj;oB8Nj5YE1EM5-oBMis4vqQX5LFC&5@GdZ~<%#Kz(3*pQI(UV~nGRXWlf_ zi6*^+VJy_gD?M@{osMLHCDX!n%(@Ma9py={9*f>%98FgAp{sdV+pPJ{cfUI_ajP(A zXwEAYkA>tRUp8+?OrL=168M)A3I4Kxs(A@a9 zHY#@KMN7YGpD{<9rXOa|e~=zVbM#d&J%@ked77LUr|#y7Z0tXpZ0IKgC%y`g;WU}J z@%OvP@qRk=e)`(@|MY2a>8t2&t?@LE^gZ#^X&!CG+j&J~@vQOqVPRc(o$E(9!pj%W zZLZ?2e*2&hEY}P1qdqa3dAu8w8TOn6nG-(x^MXFG4rCmESh?HZT;6 zOB<4dNuwmID6WZ~)Hqd&P!lT6qsa$@CbCmX-I{B@_t3J=(3o(oiMD3U=k;RkKsFJ#?} zG0Er2HXBaF2TqYE+L)oo?zGAM#629%m#kyF@Zex4!?ep>1#g^I(D$6kyw#mE*RFjv zpE&~H*TX`b*i@iOYe={NM(Y5SB~oqS)!*VJ zr-Y@?sx#eXv!>+WK?=N827+rY7|=ke`&_3svJ!;rtD$?S$IBc5;TjO9dFW{C%>&Ib zT-Z!5^gVL{eOu_mV_n{R{@lAm^OxvSW0UEO*C{Yc=rzJ&GK00BT~E=$u4ky@x9?-5 z9i11^_B6rC@Co@L4?==9jITio zoO;ZIYF=iTKIY*#lkNyHo^qv8&^W>9KQK{wK9!~onf+hB7mwpgyU{!wA(EpJY znNqtFd+BsTR`qLJ@Ay21!Jg}6>!s52Y@ZMA$|kczxBkoFHS##T1y70zc=_P(`!-`k zWM513n)=ah$s$}<@{*Bycv<2A#CtTfFArJ{FQW%zIORGo&w8?qlhvWTzP9CYUqWZm z5DffdIj8N z@ZZXyXJE%8G()11i}LPnhE1JtgD=2-hm;WOi#jJ3L*?g3KbKpf#UJlvVA|P0HSN;( zqti@`_2c9g0uvqv52q5iN0ad0J9@2!G^j(+$_$=US!wfi$?Bw6w+UyD2WEQkNzF{t|W#XWh2uTwz^R0^I`*z?*Mjzg0-4M`0;c&^V*Tc=Hn_A8HpFEG{V zHL_jK0k{3HUDH?fqoF}h@uvo?e9^@?6q=@0^PwkmY6npLwEWYa8E+&m{W7Pc&_na) zAKCzf9)sA>9=)d>mHc+Ij1bDQ$K=<8J~s*VuB~Ih#aE-T*RD*XLD=AHQ>Ky$zdW^R zstnWGrgeUtcv^7#tQgoaa6Ih>bk9d#gS-7cw1yvENE{iDdt0Uti)l^uf;(f;$o;fPV+Ni1 zFnK}}EvsW~zRR%v`+X7EtUA*v!vnHDY2gAtJd?7;En-haK)`FtkTqDv@a5|d=002M$NkljHlc8rJ}s;|0OOE`g7EtK~6 z&a={ZaJEYdc{1(n%lo{=(Efe&eGA5pCO^K{qCCUPfebqNH;3O=OV%gMJ)X3%aq;8D zLb2S-i1*G%A9V3oVO_$x=|IhlXXWM9ezXPe^9+{^h4*h>uA?=7?-|1{dh9S*$5e)X z^7Jx8!FV6`c~)DIJD4yX2MViluQncw7AH@JcVR1dcE0t&xs1UN zHh=o(f77L%ukt28xA~WU^3x1d6>Lnn)3NFO@r5VUjk_(bTf7mOS+HbGt7L%EPL zC|TS->1hAn!zVVsyY7O!Jd`b5lc&*nm2$k@;`LZ^|11Lleh;{51%qICHm34iCIfHT zEDCR4Px#I2YhrX)%J}gt89Q{k5GB>`FZbzWti`K^bNb{^#%;1q7dVZ8C`QathNTNp z^yYP^3i2F}F+*dUix!zY&LPLp$`jJu?r9$02U=vJu{h~#<7b5s*&EJt$(V6Dv>`OPduwe@M2dL8OQHf;yfoape$v-OEQro)c`IpXzs(ROCC`g# zdpvK;IiV5mE*9qL>9hueJy~jdwnS~ zhIg`MJrJ(wT=yCsIBV-@P6tTG?|=}ak!vR)*EmFviYMKxx>_!bv1g|-55F1L#D{*8 z(U4x1Zlij4?0D3;crDtv;QC~`nA|;$=HJ}7QFw+Z}YGJ z=YPHV=z|NJi+}g)>P<%*mokLAC>5RbReEAKtk}^`*UUU8{!IIO*9Rz|V;OB3zaI;| zRx6LTxr49g^OQalO|Ks4?tsQZ7ysU~(H&kh7a!WBM~n~gjP9|?puNYN&v1YcB8O-B zk>(Ufn{x@1C&Ud|uz^5FD7QDAL^psd?8w8ybXbd>JoQ%Z@vJ)FqhCMCC_Nt3>B5nX zXn5uFZ=>&%%@2R{M}@U1TuU1~HV5}b3vwwox=n7hxfARV*qjx%ulD@%XTO?D#E-O* z#F+spFc=neon;c;nq z(h2&9Fpj6vd3)Pzc~jrF5ojYpXf&IT`o9gvxf>xRK!42mm0sgSa61`+H+@6LT)p<7 zIoY-0F?lkFIvu@)V!2xzPc_Ea40v3>2bZp=FT{5z7wR>SFc+GOLhF~h8ktYJlLT|i z-h9?~tGkX`{x!$z0pKCBy$gK zf0Vr14E92=Q}s_g#E&O!eBCQV&%~cs%LtqfU>OsWc;)Sh@R~_sRVpyvHQfs#eV0QD zWl`RgW<9bpDdNi1<{`+$i;WnqR9B-wr6{&=OjH&gDaaIOj~85oiSdI&IKgHuk{8j& zkhm!AN%g@iyuC-ng=_6DRI(g0D?e$zu>9tWN2`JD{vs!=B91aQ3sd7wI_r^J9vDNHar<#Mt_;5ff>9lt|c!1DHnc8QN?SUiB274Ps$j)Zx_VnD`$W3 zsp|P=Ljxqw=0iDpC3Mq9MnQ4_&V2Sz!==j)Q11!w&}(SyQFx*`nEf_d_P$b9QqL48 zFS~6E-f#-sUL!YCro59K7r}t(aI{?VZea{paP0mrw}CoEF^#zS>2 zUiQAFI}blGtcEMPiwS61({7A27j9e@TuG~ z@PSu`UUUr_%>>`9P;zPWOnUU11!_gWFkr1VwD63c~_&2&rKL^HSPIK_bgtPVxpWvfi>gUxA*1`w) zMTeEP+lK}zVIew3y$t5CD{~<&yw(Q5P-|dI(_Vigb8zU(e1~J-M_#0mq3H|q2Cs?7 zY{tYJc=^$gF(~$I+KmSI8js%?+^50Gc4I*^n9}Ls>hU56>X>#)NcM-$=&>+F3+ltE z2*A*4bh+Zy^BSEPv}J!|@=B*AL$KS{y-pF}!|GecrCCkv;!)l;W~}GpPVeb>@UH%t z4Wtpy@}UueHR00sQW8;g0Rs^lNO<4)Wbd zI<)Qk@L)@mk)v8Sbc(*(J+<}LGqf=V13Tkmcu5^^=-|Q$Szb4emTS>D7$eWg@-8pN zC@{dMB{QMHpWy+R<=veOso$$^Qu+R-hTbdBIIey#sK4s?uyjAXLX@2@qYQl}WvW4+ zCBrdeAnf!Uw3TYYhyL{+3k~H)chrW_cYUAy@x?o)ppS7RS!L*h=a4o$gPkb$%pq^fRf<$M@Y4Fa6$wam2cYze3N}>Gkrx_-+R)>rSsN!qg5mSvVc8v zo?zF$(S1W8GSX{`_i12Ma@&`*VW>=&5I$PUzpHykj>B*8qQgd>`c}J!iRB$!efNoc zPi~*V@o%b~&r27VfH~_??`o6{{XFx%4EQi$`yOf|d+6!$q?Hm5U+yj47DN12ayY?_ypMeN<1j#H;o!k00KcNw&fv_P?VAz!yI zUFxE&Jk1L+0V#{NokLC5TTslA>kJC7@R{srR(v>ExD^Z5eJ%R-vB#b}uQ!L!zZcAm3>mfe7q3&O7DmSLgsm+W8c!^| zE?(RmJa=yMU+=UkyREU=r8%{bWjcIs$Ho%gx?i@|^gc$Whf(qk<`$y{Dp;rv3s zPL?i2N0(y0XyNvhK_^d2n=xlT`0nPx)r+-F^_4@Hj0)2++uWV7E~VS_81M36(ckk| z=A~|G7o$Vz{Y!q^s9;P=t{!!){z!}RqZt+U?l6YuftKh$$qi4` z!x^XiJSy?#n9cOfbEm?Kk)Lspfy7w< zB%B$Ecpfqqo+&iO)w~~VOw0y-cQ=g{~#HeQwq?RvzU5JF*tSu9>zkbWG#ONmd)tg7-rS2pF@A^+Z z`FJkvyVd4|wFTqZrAt?$!L`CeoZWo-o$ppILt}=L+l6tta_!d49)9?P?~hl#KK#_? zUG!kE6pF+~7I_hWU5F{PTd|WFeL8KFvDpQMJR4o4Xg~D}|Og z@Cv_|F*&S^%IUi{o%b@RHx^ge=Fd*M;dA*Xeb5g&P1DoAix9!6%|eLL0TVAiJHd;` zbYZ8P+H`f2$Gq$Q%{!Z4e|~A??{pz0o~GXqA6>D`-?WK>M*77uo7sKdm*K^!vfupr z*K_ISncAMvB*2}RD(uA~-%gZ}wH{}-EI zCIjT*UOE~5KKk&((NpGyUzOLT!!FX^yvgX@nCMblar@`QQEk+=BhAJ3*N0vD{kTom z=l7oty^n1iH_tY z{;c`0)-aGe)dzoG{J%d2<&8;L?@#mQ~*INn`A zhg_X|jA;>n5U)&NdS*b?;xZvb4Yc1dBr5F67^EgIcrh>p#z0aIzk_YeNP>c80@mm1 zl0JsP>KjirX_LQy!_Y;vGOgqoUIjf%-D2XrsNM*jpv;0$+Q9xc>nz%ab-ui)Gda%qO@Go5YXSme;-!Ex zwWUGX=arzePbEYLgX6TR&-ftQypI8T_j?sIF6Fq zB25_&7CzDg7hl%*aH`JV_nno#$Y-Lxg*iA35|!?S1a$K?fIIq^NAr}QR+9;0 z9BqdF!0Ge#A@~f9^bNVWU`Vv-*Zauk&=jQdHmHFyaOfs^;Mecyy6I!!^mUBTrTZFM zi|5)4KQuwNEza_9=`p&jT=LNki@r@>YtY&#fWyKMPU>3!YVg3UT(sOfeWFLI8M-3K z5Ms%ZP9Z;9!8IA4w!;mrJn*bGPyeH{;cJ2ICj@cvL@ApYV;H1dh%=1i-jJU zU`!uFr@oJO?oc%T*A9y#uiXhCRL1KR&lLzS#}$X3{U5mTb6+9IJPuTKgi{a5(U31X zP5+-dt6v*YZp)w1PvxJJD5HGxXDG)Tw^YLv;F^q-|Ma%WNWO zin)ie2CjokN%$WR2PLrZN*X!c%G`RUjCzy-tGvdWLE2(+o34H2&^*Z7{3{#UjePbz zd7QqZ&;mYmSYflsdr!AG=r_~vM`ru1T6L5@eI>dLh>=&khF-V_diAC9D^ z|7FM3Zx_e%hu{6A#X#|6oA~b>%FFnR8=L?2fBKWnf6f4YDdWobK7DWVlkb1J`G?QH z+hrwMvSnWl>23T`cm zZYJB$9^W1#vU$PF7J{d`L`zIk@lYRU5TT# zPVIXZo(u#G!Ec)I^U8aWVUxl7!aHwmzWm*#;n}IT&Lp2J-@Z0v_vb}7IxpUhAu-+@ zf9J!^ovUBxVbq1A@4uf+?kmj8&5;lM5MTOM8xYT05k85oxAL?)Ec`~g+(mbH;}6fz zb0^YOrHLPp@C*auA~VR1L2)ZYP_vd*iw)~3eOFp+WuRg{vx{lH0c^iOVUAy1kofh?F zO3oOt$GbB8&C%)1(L0bL=jk}c39<3^~s-81cS^ z*Tko-?o(|(=(7*%A4iTjN}i!STGZ+jG$gZ4GnRDeN( zvB2e^yaySIE4VpWdxQsJXh!~%XU{ej9wDpx>P=41y!H0x(U*m_FmKNYrN1yZ@9%Bt-42^sTqp-r`*o_ z^hEvfW_|HjpMTy3k6pSKEeKgJvSL*FW8_`o}r{xYZ1Z}5@Lg=;gEhRPy!>K zdqbuL=>hH6s99@t;@)YUUBO)Pnc=yOwEHjab@_Lpr;1(dvQ~F!2;=tl`7`mfjh#G( zh43~19fM|G$HvRU>40<5URWu{>6fnyiE{nc7^U|n8%L96y7qw3W5%)i&xs6iH;UbT zH<%x^xq@bCnCbJQi|XfdLL={G;cK2YUehgfruNl<`cB5&LRYyzVW;uT#ek2V)o04X zi&Z!yr-zK8Pcrg3nRGZOgVSw7y8QSbe)U=My+Yv_lZ{KfUJqp)wsEenEH89&nL)LA zijXM!lh9K71&{2bZF<}65%xp8a((ojHuCS>xs|u_i8)=hPU)CGrMr^zF^tox;fd!w zztvB!TrA3Qh8}e?OXwVZj2?E8r4uU;YuBu$!o$hGGo6t7#V>z3bQ${$IUic3gEQc^sI7+9kBJ&b9+j~LYv=gUaO3b-eTV6nKI}<7_4>;j{Q@nvhyB*RaJJSnH|0jx?~gbptT+G;|CufFyb`q+F|2)0P2JEl>%{bbR=g7vE8P@GakJJ8ULr z&ldzdU$5W}pwi(`BK((`p>`$fI@q`Mz-#i!1wT*F10Krt7kvhXv|YK`(#%4jkwoT! zx!N2pS^zYXkK!MNp*{EkMm6l&8j@GLw67OA)(^a7oLc2odpPKGlj@)6WTtluD}tnG ze2{l-?DVcZ%4;SAC1Y`z&hz?(ydgQfnGGGE;~8W0Qh;a$hC2H-d9(PzS9An&41Oj} zi{_2!t1WQ!TpfL0xM(YU^a2kT-2hsK9~G4fuwUiIh|_1;wNtuP0GX40aMq^fai#q9 z4ecI%XVPD}X={(BuLO7cP{~t%OWSnaWRqWeckwKbsus5XPsI@FQ@9LrXhzo2Up?}W zooPt_mwv%B@;G$fwh?~@#*~RGB@DlT__i*eOCQ5t_|61QeG2!ctY*^Z;g5!3cppRY z^hI)1p8f`hX=iyVGx(O5J{qt1p_8oAdY^jYA03c>9~x`Zv_YAMrP@CmHQg58(zUV{ zfjtYBNj3IatjV)?kt>p)U)yI0l#*LMT}M^I5@vN+MA3_S!9>G zr9st?@uOx;{~U1fs~kfTu&R0%U+6C#efm7cgI(0hYymYrr+-U_F$Uu=aM31uG7beo zW&UkpD^>F3iNf*?tm;4+eMqIf2FE_{1GGHyO&{qo1D}nVkui1D&V7|#dM7%J%tZU` z4;-e?4G+L(d|P}S9H%u11o*0dW@42egI14VsQ8kpXhpULH`GThFS&l-hF!g}<<)up= z*;o|9V=v$29lT4Ljdu^^5B6X&fJrfowt0v5%7Ouq|J!!R4`y@)f7&rfpqrQW`Ca^* z&BPYgB<1h^>Mu8c`d2^SeE+-O+5D%U|HJ0p(;51jyr0Q9^V2{4(a6{@n$VK*b9s*Q zcDmRi)sgp4-a9`r$8WX3U`%u@_3qs%0Q|@y{IT4^5urJZ#4+1qRz^ zJetS>uU#8ZUw-zx7WqdyPTW!S7O7e#Bn$EJE;Pxr#@m?+Cbzs)S06^T$dRtx*W~(n zm$WgoS@4Q8Z1Zpo9duebZ(hCLV)4c1tvrv;p2?%D#m$QsT_RTb7f<7VhQosyTpko- zk`CE9RC{<@<{5;h2igSKpOOA$2C*@4JT7*26aS;<&X3WIN3YX;FCMg5YmxTk8Usyw zGTk)B@{CnSGuVIho%c7V&$Xau9GFGyW^@0WYZ>YsXWpCFdNHkQ*xg&d8;_^67v3HN z*PU-}Z5}*r!JCmzdrVJUJSlD@`8}Mm=~Nfiy_Es}@$*|Pq+V}+_4D6s-u@r{XpDP2 zp`T~qdKoPloc0{ZgTIY~%%J1xbUOZtGfZCT4#t*4(T)eQ7Xur;K?I%3VfdPumm%_m zM<6k+AM9wYYoxk|moWvrx{yjJZQrxd7RS@BRkcX=OV+CG0~|vjQf>Uw`$xc+Qh@ z^^Ktgnkv^zxn5`?7`pAOF{%rK7vcA>33#p3Pf?Q}KSTi(T(#423KD zFe0J^V=TkngmI|v?oS5jsXG~*;J}Oact)uU?_4NG@QKYAzxnkTzwfl6#dvljFXYd^ z%7|3nXAhFi>R^CnOyk`zbcT>r=r4>39l_`>9L4RMH^&e*8yoS~STM81<|}aC&tRFZ znK3fFhj1Yc@6lb=>GSl@`Yx^Y%3QW|52o)`@8UywOVi)ARTg=cVc9bfLGu^SlD+9K zrB6Q?o!;gGImwu3BZX0%(dY>8+jLc0Wc1=4myt5;H3st@KbhA&c{`N;q8}e*oD`RT z!r=@K>D+jZ4{)GcZIm*oFO=XcAqH{;g--+K4G`1WXX{mP~In%8~f z>GkV3W`l{moH=%(@yp#KwIN)d7M9KBmSXhJ=1)A-{zdbuNe+eKsP!3K8P_siSN=rC zSK$+1JX$jNuuDCS#XASTQ~M4UO6lx)w;K0_Y&%+;9#@xMcrXL()8;RD%+Sp<`sbg0 zy}4Eh2?poA`&ys2Ma)Kga~m<*pJupy+-2vlUJ0*LZ1Y0S z@NWL_2S3?-_M6{~9GuDt|L0;rSfsn1kd9T~px^nsQ$fY>Z#yqEK*b9uek23lm?0${Cosb)2aL-o{G4?j! zY0Oi%@vvDyIvg&3b$K=hW{q_CO@{K{wyDxa zKz*H2zD8u6zMXD461>CB;m(fN`<1KT^gOqbe!aQ~M3cfU+o(!`XK6v4@i5?atD2#r?BKei9jmob@JrxL4 zZHg|-xNYE))_?GU)8!i&3HK`7q;fpP>wHs>JYMQ8Yd`QAqmo4^fsGE!*iZ&Z0E-uF zlLGA+?8k^QwF9Gxy@;0rV1wziwyb=kP^4|!=Q}vduvyl=jd$tfslILd2h}F!6NU$F z=D-P+Ht^&d+M(qv=mW3XXfPl8GPDd29;(YgtW2p4uJL0!naZngJoM1B*G#T;=6(XX z;E~tLKvq59zVvu?Oo#8mB1rGD$`5JjlQ4DmDKyYCyd;CM1v;O%z|jvz_`GNsJhECF zN5-ZNWF=Vo3^3%N56g#O(O__c3tE+9Hg~4YeNX;*hDP{U#Nz)j(OxqDXoO+vn6b~7i zRg-WIEb=rjiQ#Bxclz76@=(5MzdBIXi_AOL96m!k?c1jNFtDRdKdWyjrEYJYYr}dr zi9@f+(pPi}s6K&b;X4~j+FQBR-?mo-0+qTIjMIxN1PV%50W&HU(rOF4ecc3 z^8g-Lu=R%)`0N?_P5H|Bw0qI#!~~BoFrRjme^Ak;>0>G)e+&MU^?9Ce>jZt;vtSSI zBj55h4IRGdhv9-hno79@gQTpMm% zyjQ~K*WI=-a$~!U|9@7t{sin#9;Kr(nB*Ag~ za$(S${AfysrtCJU)LGOu{}{Z|&yG&7e9DJ^Q^U8POdZK9Sd$7>sD**zk}4$scYG;$M7+ z^MoAe8Dj=!E>)^uw-Cu_<;bq~PCuyajN`^2N4ps?59JB`A|uq{yqXvQ?_IrC+(|Kp zUv+Wixy=v%<-gi|{{Q~pc@PzIxkb#gyLq1;sK2-15!?CU&BDJlDQ^|Ad352ZdcN6j9!I;6 zgtEB=Yk7?84m{k*lKwmfnwV`N&j2@}Pg+3V$s3VRRy9l~%X$922#}`YvkX`_nwf z$*BCB7iXmyu&FYobqNKO(a@XdV~;m5cieRvC=%%FXskQq*H@I-$e zkLQ}QN3X|YoqsTTGS-j_dizwHWMhyG*0H==uim_oY|$XmINf#NSbZ;JE~C=gNXS^1 z%o!IM-uE}I!U;_H_{@V@C=mKa=n(gN=@9Q-cz5&HfB9FkSY2#RojWz2v^jSU za5aP2S6^SP%sPd@4q$9J*(TI3Onsl>XNVMRvWiz1uUzW%(dwU09PnZ!>u6PrN0&3i zK8!~rpW$F^nE6oob~oL$&HU(f^0}O9sw%l(p4hF_$VqwDTm>Gz)S+S6)MuW@lLsUF z8@*-RuO8nSP{}%-aj<#QI53_*O?D2{59flx zO*?JZGippxNt_VUiGYW1}^=`#`Fv0efi)w1>jB& z$x%r*g6TU3bTQ-iB+q185v@jpgPj67X``~w;LoPac<$QtB{xg=xjZsQ@-}8pY~Cr% z&wU2nHs#-a_riGa-Y@)yQ!sZka35{VBSXTY?CD-6{d-Opz>9M18J?iCjfEHW1;)^a zg`^@I?oYuBVXE6k6I@4gY7_U0Dpxp6f;c?@b`t&`Q#y`wyVw|jM*WJ5l+r{&Rw=$n_ z@v-|*CbCwb&Q&x8I+;g5{h z`jonD%A7cUy8dKbaC$Be_xi-SG3-C=ewgOmUw;(Fx+7@9k7mVY6z;i7uygK{Wkj zzWz=jmC%ixIhE}W8XGmIPB&MEZ)2)cNcz^z#)Y(QoR|iLAma{<`GO8{}dj8V}BPZFjx5J?o37 zQzv$e5xjS0)@54S-nT(f+f+_)27{?*47}Alj|NY0`|N2SX&_EseDk>!KZ!r^xZ!Ohb^DL?s_tPGv}(+>vKB^SPw3eq%uj3$GxykIPvELp`92(J=@GyaY8 zlD&3URv#UYRoT!KZi+Rrntm)jB99!_z&CSRW6Ad55uOaB$_^t3@17t=^jvN4S+Ch- zg6arU3BwO`oyxa-sg4xnG<%F!Av1z8Aj;dJ#!T@<{b;^&_Bt_U_*p4*NJ>+`|(%dlo)ZpKsy3OPBKOwrTjW zr5y;+TJ7wAyq)$yt@0Bae_PUcGw^3x8ZH9^Mtx3prv|*}`4~!HuOr59-*ya}{yV6kH(81YgIxK)eOD9@7yL3I!c;M1iuy*^uXe4z z+4J<_=xI1y+WrD0z_MDDf5yY;!oOWESk$Lf@gM+V5FSJ9^peb&Hwl3|7r8x58mDUtAF-K zo4@<)(&oQ>cDY4Uak>i~@=t&I{muXWr+>S-ctcFUmz($V4*$bXKPc2n7kK9Bbwam` zSI1gZ)C==8Y=Kx^hg;C@D<1XhH(jXIqTNyA2OUQqPt)>Qv^qx1+w$JsjIJ7h z@o_Y7Sbf?BbqCKCAG`}bd5;KNv2W`UH;dk6U`W-R*yI|c7}@vKu9q3N@s&|mUvudm zBO5Y3x_2wXr3(Y)f4X^CSeb`i2j^J6W}gNMia(IzjZ&%1a^c!zr##26^Q z_uu~an|n8|&H~P2OdE||&s&I14D*`A`~1v>5A#TWw*^>6qKpC8Z`|whO{%Y0$`gVs zLsspzm^sp7>(-SPZ_$CL^othF_cAzMzxVa#z~vk9wXrnO*=f=Bo$r2>eAEu(YJ_26 ze%57s6ZW8^@fISFGeV--i;UTa7`2mIAs>!ps5n@Cwj#$Q_q6_)3sO2?X0ekEPP;3Up~2?%yqn4tl*j3LO{E=<K=#`z|`y0>47WKxCX)}2a?rUSH zy28=MCOv7vte8!K%t?WiN8GcQbG0npP}Xp`xF1}QRjy3;faPAAfBH}WKmm1D@+ z%cEO6(x2jbGb~^=(es+OEtZBSAlw7*-U93p?Iu2R!bz*>HSd-OHPL;-3fm*wK^2XB!NsyMw}Oj8!AsjTQaa{1{ym z-5GDQ0nf#S6F<6qFb8W?ExG=9=c;~5$NwCT4zriqN>45*7A_{%WKfM*luME7p^>|~R$WWs2? z`~HX1r>|ZuK76_kp5}q*N8h6DPB_}k*r|^*#LgyX;|tp8OVvK(xc1O^FEd;k$MAff z;YIyOTy2|Cvk6sZCzKeic@gt4zjweCk#YA`UgO$>p1imZw7$T=D_{OYr$Y{A96dkrxNls$y!q=+-+lJm zFX~74H{bj2cQ$|U>38yYzO?yn7tlHpcRk00YuB#NSnM7v^ybx1?wx46bMslDxia#1 zG2tOG_LJA~bml!C%!oYs4;0d9yqU>JdiYERS!45uAAMB$vztHrvwy$&Z~oCwy9o1I zV_KnD&YYWhfY3@;p0$xwTVKQz8@wZT!R375qy?E0YNt%mhNrp>^Fw3kKVL{G_1({- zoR@PFJhYO3*1+kb{oT8=jN;^Dg;H|yuQkJ~HUo^0F8$;kexUkO261_AiqEP(GNGDfAPiiLt~x`Nl#T? zBncro^y~>sp8x;Xw$^0#gol5_wH@Zl*~M9#P}dIp7{lxuU%8W=M;s0 zT079wGjkW4#qxly9p+z+7K<-OI;{{c)3*y{_>6JCpXJ>iocf5(%t1suYLoRN-L`&a zg)_BPVQuVR^NV$g?RA?E@1*w&xs;6EPe;3eSZ4HiY&;9ji9!N`WfRI=;G<;x-~GG) zZS%9g`}@t$fAP!4yBw^-+qg&n(N&u6bk$Dt;hmfXY9V51v^SmHyDv?W#4C?D8aWda zj}j`CntXd-MjTCF4O(7@eQu(+V&j=mG%y(1yujm?=P1!AYLk=*HEAXQ>Ou{9Q)&@% z;Z=t+0S}Iaq)7t4Aq3CksejVLX{zcU;|4JQKT~)9bXjs7=K1#i^}bJcPtObn1V8`; zMVT@!$(F()*^aQo{$m|sha>!hZP^mV&_;m(LF}_nPtWwe?~C6j>)ruR_x>l&etsN{8kv1@Af&dq_Z|T=F zyQd&1csPtQ-SS;N|9a{ya^L;JyZAKm{-uX+T4rblxrZws*^I{Vij_Y6FfgHehew&+ z_iWl`;DI%0l~0e4wz8^`dbQ?}4(rI)dr`+Y4j?7-r(c~!1;g!m&$K-`1F;aOn<4Jyl_)hZ;Qd}+xW9|`v4!j(X(`U zb-*l~Y=ARbH|Q&s;-Ow5WXW=^L}bcVGPB;l;o%Z9M;Ru4nbX!2&2e zC~fukZdC$T_|*@mFAgNU(}w-W25ln`zM!Z&$)a|rWA%O4HO)<4_bWQD&)%JJh|X?x<$xd3*Q$Rvw02Of0)s@zAo;w#g5aCJoF~+YQGuA z_hn#tn712zc?R#5M)#d5yb8~!3s=r>jf0Uvr}>fshM3}&yw<@BY(j@=Vco_FX*&%uFy0u>eG*S-ikHe;^S$C2gi>YgzsnY ziipjfJnZO>G0KG-gE@U{QLAmdwCv5>JT=G5d@b}CvltV+aOy|j|3RLKk2as@d7mVW zvFK%vG8VURVI1O_svAxSi$rZ+fJqEz%qh-k3s}d^g^d{RGdd<%Bge_ibdM2RhEIB- zr+pY0z!Xy$U*YpGS{XeqU%ZgfP~72kN&w`wNcYTSk>C5tR!hX0KAcQ0 z=yazj?&M)PdO<(y-x!mE^@9)J-F$WPUNQOU=J=ffCun((BQ~Q=L^BqQPSfwhnYwt@ zLhnV3`O_H)@R~>Bgy;!Z254ghewhQ&JUl}8jwZMIf&Lt>;+(duju+;ug_iJg%3SFf z!737*JmoUrycDX1K2SD;DG%W+S(}5&>9ZTxH(&kg*MrlGr4x1N2(REC_msgoCbbv7kn|xk7sm!+5*}l|9GJf7+r-Z;NkuN zj6ie0G&w(zTp8CH7!TEN&HZ`TR%0YpbI)JbhiBc;Gjn&R2m(EF@+R8G(3{*p$}91H z@vk4XfEVWGQtvci&=m)m(TL8ByxCN;DMd%JRvzAJu37B8LS@@5?-XSD=*Fagc_~b-59**yXX3* zQ$YClejdV)l8vkJ{MM~I6K7l-*<3rC9{&AjUyLF7O-3wocP4LXvJ2LnZs_Jg?RPMv z?}0O&7)s|J7apR{=2VP6?uD7RqV4p=Qtm`e8>#vVnVXOm$rO&$ulP5{V4an22)<|Z z3>`CbsOTm8=yuwH>@n7l2WAE2*Ivq+-!jyVj(X+^uJqhElOCQ;kTUM-amtGPc}GI5 znsJQbn(@uM1=P=Ac(#`u(j{U_k1;%SpQRUsZDbayfNq0L+}_7 z8DsayLpqP=_mPw*Susa``l8s{hmPkJ+Y2|-1x-4EX{q{b(k+uz*EnA%2Fp->Jniq}5R z`;CUPoz@V?_)44B>d-gmrQFHr_x|WRo6VZ+ zY#5n7Y6Gr2ryQINm`$wu=^mhOL3mK@to8Cw(SMOKX3qADSd$YO0 zfIBBH;@jW+{5R?7qnisy^By;z$5VX}FV*E_&z*GNmG>CCFOAXoPA?&RnuoHDlB49W zGDnk#JK;el-o5%^GJR-s@}uXX$?2M#H*Soa914#KVH}<_CRZr|mGgj?|v^lpcb_;T#PvCTKvuT6jF(M`^6lo{vs)iL~bii!RaGKJ25 zkhl5CeZ4$2eu0RVJ$RO}x(&$m$n=f!nS(i@ zGw-2Emd(NF>~rax>uspmF!f%C=e4`dRbKJdp~C)gnqaVotJ4NTqiA=VD-Zhp?z`_+ zS2`zN^PtDaNAb@*ROppwz3ly9uxDdEm<+S@C%GjP<{VB3*-+7}LIq72u=)}s@2s&S zR`13K?eV<1=BJ>5LsVV2>wr-Y+rwDm;XVDtnS`xDRlx9}=;iVH$%< zC-_NTxGjVUGUYJ@O&P)(QW)3AV8BS%Fo1*Qr7ShTYJ`35yzR@|FBX<=F)f{vHhtZe zH;ZG~*tMei;AMo3;dV~eurWt=95>tF&QA9b6U;U&Rnau<(gV)X|E<6ZdzGO^m# zp8!u4ldf%oC2i{Icb(M@Ua4}b)ec*JD;tz)F8EL+KK_(_p{{&)+GIMQLX&5kU(Q`Y zi+1fgY4|W49YFX-K1N172WILk-7`%B?mVk3I1SXkr%n3pXw5XlwvFW8)=-{!H++=e z;>E*o+~LSC9+kuk_n!L+)~bL0ZgHp&cs6njNcAZzaqy_lDXeU%=zDIHqM4qw83o9AuHZ$U0i z!&bNZ>91<6yc$P#doaed5!Th-L(B+-hsyOYJSH4fvZl`rr^qaMR{x~3-@T)@&kWO@|Ubt*8&xwE&sp+f8?v*BkRG0SoIGcXc@-Kx>AO>%EA$!IfrvN?9Tw{B|Q6Dd|-n#q>sl^i=x~idLbM5mCdWmQD6%w9uWH~ zyYl0M-*{2}CO7+8jg*N;7LNn7@0ii^@RFcDG$ez@!e!LfF5!TN(S`l=|AAhu;kmRsyb^Dl&nF|zi81_)*J+o zaEHEf^BcfqC_$St+waLFVGxivT7#`zSgmwC(7)yDnmGaY*uk+!WNq6|hhOC%S>4s4 zIJkq~+$0L8 zkYW=3YT}XcJvW^syfr+G-mlc%*01KH_b^!MqP z-$aYWNO_`;==0{s2fP#0(aSvl?l+$lR)v@GUJKj|>@5lz9PhP2Bx3L6srFv{IFvqr zR%~|l3wOY9WR7i-WdTXP7#_yMK5BSrX?rjjY%I`(vCzH@1-#bD%^`XxIQlp3?h>1r zyo_NR0RQ!?cst=0%%dy&Jf1Q-{-BKq&n(K9F@D>(D>HhybkaZLd+?2>w>l<$dt$El zvd!Y}i@D3N$e3xaFm&|IFxZMSBb?*Vr&61FL7Vmj2R^GUGCIicNfI@OU4j}zCNKb-)K!X@!VosmZgX5;~6J}ITG?m8sj^CWUi<0 zFn$ckWY@T4^X-8b?#8dv?=W_e>!p8Aw83Poe#p~%BC&^60e@cB4%(Aq~((zs(wje8N9o;^)k7H5xpO zBRzk3ueiDCP$45`{OPg2B>a##(+n(5G4RsUC&VVdo7W^cF)tF1?qeBNjVt3_86Ly$fHEGM z(>zaKkScmcXpX?du0y?aI&a2s9-PWIam?GeAz#J596TNU;+Z^^wE^ZQox?GLZHK2i_%4Qdd?PPTCXgvG#o}c&;)$*- zWBiFu5s@1PGDczbK1zPbm00!WuJb{Em5RaL&t$I+r|5<{8HrKh0ljnntrvxdo(&f|{C>vst5-kRTupZimt=#^oa5^oPI8={lN-h?`sZ1C>t3N* z?$?(Yh}C(a4TbX=qUVL485b`$4xB88xUq|_FkabIdH;iVn~&KT$*5dg(L2ZEw|No= z18wU=hZ?gcl+&^11JV4X_I;A~@#BoZ+S_G^E zxbe-+;3OMSFk@|D_UUxa zi|0;GCjUL7fy`ZmPGQ?rMc_tH&=p{j8ErRX1_Y(o@5a0UpFSzJ@Hstn+o<}o`Sa7} z6mUD&=Fk9zTmLu}IC*fSenZZW*T;YUi(l5ZjTgz(!(@a*#9m{sdTl&-(YkTY_3%-S zA#d|Wus=2OSu0xM4EKy42*6lPqwF1NHKUTvy6+-H0dgH zjDMT)gYg|(b_2Ra#wNr0EV=-i7o_-BMrSiQ0H;X~03>?+%0t=68IkicP;nz|u0i*WF|c7LP|?&VF14UX>dOula+ zt4}RpZ*i(DJf{5QR^tpcC6y;U7=adlt0#cdPANWR2pL*DSY8}`Cs5j8c6<6g21j+) zSqJ|*_Rw4|az8vTWtu|T7%26+C@y-py> z$ho~{qU%-@&J?(>g#Hiw=suj`X!0_zKA2{LV3a7zu z>X86LIP`O9>cQlZe=TNZg(8yBxn#a_+G;Vf?`?*D`91x!UxT}64FF>(cmSY$ryFFWm>?Bej zMwj{~?tnItK(4m+(U>yzHL|3B11Y&7r(3>x+^7^R?b6+}e~%eCg17oj-*658Rb4P9 zUwIf>$Mu$kZ}Ergv=10NdFVUdL2#kEg&p5URHyc>e9^CTTQKHDsK|6TZP#yby2u*Z zf?QfT=mDuy0ow5_yr(aPpNjBRIV>P!cv$|yQ)$oT$4jHsw@uk^H(*bV{oa-Z(?1-* zvz1Lgg@ZSGLBH=e_~EqAJ1ItggNA}F??xu#UTrr9O{a=>eZI8uypE2kXZo$NBe^8w z#*1(oW487Qz6CY?=Aai;R^7gbQ+<O)^#l@j^8iSe^r_BlCL zzD)2LdFv6rHUXEmuAT>fW|m-e3-<0^Clc1Cq6aRx({X-+Kgb8K^y*gM=mQLz_L0%0 zoVMA)={MZ;w_(64|LtSX+<-3+dWSZkq$z8O}bqs!*d_22(?^YN!& zg-rLAp+6aDOqi>A7RZk?N`FvT3r58g#e4nV{_<}&|NTGt$>zWMv!4_b^48{F9(_kb z?3cg&WQ-c8n$#YtuQb!03H_N2xC|&f+0-+i@Z_Cy_vWo8*?2={`p{TaTV<#lCB zU^DDRhSA60+)EUS-Fo5N=FTTy&9ocqUh(8@!9pf2cHwuhaaE`oM|vGCW{f#q zh?o6M`ak-+U)3$v(e?u^6o2;b{=?>*I}bM}^AZ;7e{+kx6!oBPjO6XSM2HyML4~NE?cMk;o z)^^c^)+`JJOIp=~J$n_*AlfmQCvUgeM0o@Bv^6!Y~H zwfCF)=~^e~2Mn8=^vui6sh)%NIOD3=y`YlQG5pqN;7H%$wY~x$pP{p|<0;ww7+o^- z9yqw#P5#5dV;Fjpk@m~adZ};*_Wk=>sK4yAOh$YL$v4GrX6SJgpP`=~!Umga+EPqf zo}zGHBS-SoCJnDeytF~cJMKoC5>GodaQNt(d8b1l#*nQJy4nlR7`Vu$5J)x^Z5mMhCp@+tyD1A4EuV6Wqc%*h_BtKWIW=G-mRlE{|_Gv~)#(v=z#L>U>&iOX1 zdXZZ)t6tylWYqL>&Ck=VOgUl_i)YNBb}Iv*Zf=_ zys51^A1Bjd+TMS7Yjmh^N@Fk$*fmkpzzYT-ZX6>adgRT9RycRAbL;cgnCdNO8 zeTE`(MaK0#uwlfLo>7XmIMYj>Jc)Qnxc zrJbaBy1Cekt`Eml#?d#?=lv1%9^)g~fH(Ta<2yY;mgqt+k>rVcqA^V$a0-OM`?yd* zZ5~W`uiAtG?#qvUGa-vAkYV%T=1eiHFJ5_nWXPCqvutfnFgBjvJQEV8_~v`PQ-lM` zqe5pfjJ|r+=1=^gH#MQnB)UwDaGMJ@kc0%H@8Rz{o?-g=i~D1c7rM>n-LnjTU-xou zFEeL2Tpq|ao-zjZBFw$y_*nD__hh_w8f$m)h!%RpykVz@y=%o-xQymB4B2#IV4WtY z5jQ{l$xk*Pef;s7$|f$)+O)dsCCzhEpiQYx(A@9j86)le@IbE<1+RPm0R3qr+gND* zd#)2b_d6xW20(W(h}&qiiB)4ye>uotn~ttYrVQDh^?NS}Cwt~x#yT4f#(nw`{JrKr z_miWWg{3>yT=Gg`gYMnAJz-SDC>MkLU;c}KnMe55;p3e)C!A0u0~3y=-!J3Y&Bpf- zXanwS&x}uPDP{>oZ) zFS*%W_>XY&r|FMuZywf%Y*^VWW#~QH2E*xM+}~=mLpy5EXPqSQa`~Mctp3c_&^QEc zKv%2QPpbcTFkWRB_5$3PS6^BoVYK(W{JY%d7W#vC3hRNS{r%_&8%`dEp^1ep(+ceMc`g7Yio731LUBIS6yYtmyTgy1a!_0Y0ZHDiUEM&+)b`vm!`{2X^rFx-SLoDQSGXOWuV$+i9)h{D;k`Iap> zK>2nNR+;BB6s~8{JMaP5Nrz8Upz<~7+*7v7ci~Th0{DQJ8V}dzf($)TCR~=GpoGCc z*p(e)=RgYH0PK(dV8+v_u9ifv`>Dhq&wOdmsbf(ySU@D)$Iz#}00tOq6@DFyTxgxA7hl4Eams{-7i&plvW5|wui7~MM@hrVzu-*_}m zxz$G|d*v8}NB&A!wjI2qu!aLYgB!5@nKrL32V5^p*L)NZayoa4yF6sbqxA`lz2-P%b$cnQa$ zDEaXm4eFB_Uk1^*zAfvy{y$~N1UlvG&&ZIr9ifshz(Y&lQ+^A-bl{gEKTy#=`VZdV zmz}Y543EmW55Urq!6zFje(g7HHoqj{V@{G*#&-)L$ zpLE1c{qYCTp`$En3bAj5q%LFmJM!Y~qgXcDj;IAGm z@6W(i|Ij6^3tP+M*^YJv=0&H|x5EqklnA|L0|Q)`t$BcURmZkZ29l1O{yLi&Tbxvu z(Mlet_GOGjf6z)NL&4v^?0ez0r9r5aEe*SHSIv|cE@T+F4C5~Khv(vT_dN@6 zyu#zY=g$znewtir*Y$Azt~RP@>C5w}V#Gl^lw+e8SEB^f7q$ zkDi)5^QZg%nZUwB`;6?3+~`-89X%O+(&l-*=+-qL(u-ep-19I0-+$A~7&n`%mwL%u zpq^yZxzbD0e*N*6o4@$sA8mg5+4arc7DfL=9NaYv?#TDW&ADQy!%) zwkqpb``ahrr4i~|x-=ZbIK>l&m{}lAe`3s{UrC>i*C~SdjoS=@8GKrB zYL|&4+hSe}U477XJg@DiU+3XmsG!rAu1vp@Zo!Qz9?xX-a9+Q&IYjR8F?eKjjWNkY zV-RD&6Ym4iSmo>C7`J^hgpyOusLvR;8C4(NXZ#j3ztLZ##WxFZVIFo1bBpZPZywhs z%5-YwXcAQ)tpSJTY2SV^uFe%KFyW^yEw~w7pXO=p1!|1`4DP4%a^(qn?(&7<*W)&) zc=3*5C^;Ow$!ex~hPJ0I@K2sQ-*S1qgl@c+s{^ScOYy+Buttya)W$c)0kN+c!(J6{ z{Y7p0VW%Y=(|>rsQwI7>bezn1$1{>QFMJsT84Iv#jHbzmut0}0MDad9UAuXiA=-tH z5fbBW8)8&DBcf0PaCu_GExCSC-#C-(?S+p`$kT=A!JAX*jpON7#;9|JDmhZ$d%(z) zUf>z5!FYBGnV^jrvcNrF8t?Z5&-Z0ydYl)hd4qmSMLmA+%*ST;RVx;LR>^JxWava z$s2UNYoplE$+OJ~p4jA9`#b5Rf8M%vbMy9l?`I4xq*!f!`b1+^^q#E!#u%S*R+x-0 zKl{8k^J3n-rK9Oi#!_>AFPHRk-MgJiVT3=PEI&^dii@tj|J`5za`PvD{9)Y!s}6swsraPvaj)P8O72yMbs-*|J%CQR!CedcHzg!IreeKT5L*Y-AlPi0KM z9}mcYvG7bD<$v|F-^Gu{n%T@trdroL%X8hl$7ux{w`6!PkJ;hf@K;|vKJ+kp?&Ya` zvN^|zJe8e5xqb8Iyx@@W-loUNqi2fIeP(pQ?TnA1G(4lH^m!p|ZpSC#GkR2;zL_v3 z#%Sv!2IC38l3WRO^t97z&*~?S3Y)^f0d{iL;PVk>?(Ib*<_u@dC&&z-#( z9nF)HUmKJ{YQ);nN9)8-2z1XVJN?NVAQ^wrSS&vG(TQ2U<^_8h>zz29Q(57vJ>H84 z`nmO%O+91O%Xt5bzx(;-CqMqt7^!dGxRp`WW|h8@HPbvZoMvpqobaEXSbyMwuMgV9 zo6`m9IrY#nP9;>!=(6Yi){krm=|`_#RCm8GZNvOXJDLvq#%4)XsMolvqW-SAhzDKj zex2>q%S0F{tV(ybTK|ppNub0 z@&=#vjj=8ncREYWYt06OuF`ih=hnP$*b`ji<>kxo^_rdL#kCJTc{X|a)vteHDLJ@l zZ+ZY=oFg zkym52kUre(Yz!Nt#?%lTV}v;$d@BYUm>CE>jR7$e(VE7~G}lL`pg8+q-oECu{&ue+ zJzM`L=YUjghO$N(Vz3S;~Wc`R8Y~&;81(V-DldJ@B^W z{2m^tLmI(t#?eP{Wqc3)3Em(JhC2I98xH2tu`A=j6ka*zsJ*gD(%>-w)dxq0rUiE$ zW!&aLuil}i@-G~Guje59InVG+o6aMHd^it+!7qJ`Wed!(r&8;x{jqDY64TtEZ3<7@ zY%7`csVhDYuczRWBkjEHv4IE|{~D0ij0M;bt1gqh0L_1YR$p0l4lM}K7OEekmRl3- zbuB}LlJJhs0k@@fLGEF71Cs_B1D2cV!4g5Ltc68%EWO$fcmuz9^_Jf$Rs1f zlR<3dpX5?a7(4}IWHUh3G(4KV(A}^K42EM@upQZ*YvfCwVATJX@x|}xqhqw|)B_c1 z^^+znbGiisQfAA@SDAk6(_rypBnOM;K#)J>5SqMA9+(E-qOn9sFO*6tSA6Qeb{y{; z<+LMuTE?$UJZ-b6*IuJzqRk{_y<|9@2JEh$X=Hfn^IN{C2k#~y6!39-)rM1kTW;V$ zY_4bvJ=pxAfHtpg229Tav~*4dlna>qX(7CtU)`y@Kd4{r5N!Qs3wVn5%st$ue7Q&O zRZbbrX8>7(Bu_AWJR7se_C9&>bp8+TLGOy!1A7Fkw7zG69obbQ*tLBjR@NPv)z{%= zK>$Px(rKe^$nPXplI*!7OG9k=wr$y}W@AG| zF?@`$oL$)+GFw0VO&ne|0rM*})iDlSkR=Y-Kt5=QIbr(4?*%PUZD`4CzfI zr_Rd4yIIS?7%!z@y$!dwLcTD%^N3Q%)Pp|sbdM(bATAG|W3?t8LZ%}{$M9DhcqU)h zHcf9JTHkJ7(KF;lTZ5uZpAkO=$=W>G{*|?q1yuW$xhxBpo^(I5Fft);&-=Ubm1~eG zrRVhAeCGMoo*E9n0pH*LPyX6D0P~q~P<}Z0qhI-WJW27$FYP(Ilgh?Fu>AY`Wws!u z&BzZ_U3dXReD*@pX=nH4d2sLM=J$W|A2alxY0>d=bLq_TzOu0Y&c%-MCeK$cxA4m7 z@!Mf0A7dCrmF0|Oo;L@>a z#^?h)vl}CFGD~%5Fvv^!Ws6C2&m;CglWPlx!x_GLYVjUs+&)npU$pb6u}C{p=nRW2 z{n(<=LeirAbY4-nGe$GeF^XtM3%XN|UFXg9+Jd*8jZ+sdWhA^ci|oDXvM^>Ow>Yyn zXB5>pX8u`UV)Sr)+qmgSIeK{ZFoZDfNxS^PyVFe1JBoVatFJO%KW>qq5v+Uyk&H-p`adfD;pbPv*O6inJFgyIUZODzw8+U%y?_dRC}5m zFg8`7JdIn?Oh3&A0J#~Sm$~Ocr{URb0+esU8zhT1rJm=NcI(=w;nTvh1u5qk^OA!Z zu*`LABE5~~BY9`(TiTJnBTol2KJ8_M!#ic~=fSIv@tRAPz0mFEwaa&w4$EA%uj1k5#N7&C~G(FPz!9eSD#NngH$Z)T}Ff8$O z6jR#NoZ*p?W9B}Ah6e-Z9F>nxGY*u_)7IjgTr=K!fVX(a@CikIL>@82;V#B488TJE zE1tPWGftmOA6&e0rFLtRE4kU7dH~~FLwNN(X>;L8e9}IQWmOh_`Y9eXZqZeZql_@I ze2g>NWgUOdLl}(O(OiI+ynJBjHw=&%`BeWbRMOHxYYhNrbm=b)hUA6u8(q(e3+-jJ z=H;J$aU&hleLB($J$WZH_%d33dF`vwC3H93-zkp0&=@CrhsD+2+i)slDP7BObu9fw zr!aa7mBF}3l6Y>?w>C8ypx}ibp;_*yyWAVy@r1mq`)<1U?t>?VN$E*s(2*s+(kAWL z55i~BkxceayDeQ-yVYKLp|RDk*gNv$rMS=940+l~3-OYtzsfg)SKsWPafZHMGD>f( zbnQ3L$U=0?nxnjE9@*?E8W@%yw}~;~TB4P_39DdJV6V+sFD)hSxRe4gxdhj3|5o(!KM5o`uon_tJ; zpuEvZDQ!j`9`k+i6*HW8wQ45EADVF%%SA!6{47xT%y==J&M>4&}EnR6lU z|Kn{Q;FS%O+j*NCdu+s>>%`;va~Jv)*SoeAGv6r^{n(me!vDpW)1|A|9K|N|gHAT^ z7WZ;(?SuzT?Yu}|*&yWf;d)tLG}p6fcOtL-(>Afg{ZTr=*fNIVZC{rkPRrgjA{NH@ z)TTsj__)oMn}r{7^2P=!UEoFU!@&47x~H}wqkh)}ebYt3B}d5u*%I2vw^Ml9iap?s zxs{G*PBFSDu)(qk%h){nCNgz*?V&B*ohT-}%cihc<>iz!1Zs-^YqJE6!5i%1$K-AM`5wPynFt|-M{)B5C3HO$vpx>1- z@j@V!EYK+-M(G7oqO$V#Vdz;8<{KSTXN(&|0R~3*!fS>zLmGB%`O~)xj7cO#!R)G3 ze`|D~Xd43!0jul?5h?B2n(WPxsn`}yPwIp_10zs79A>~l4;)<8t)?-~pb0<1d&q=K zaApx+p4o*Xi2Y`;sTV+JUBi!6Jv3K2`N{!ayk5NVEKb2e-FP(Zuy{SJAVc^T^f4|D zK5Gz}!CT$^577FQclfY_rJUzOWHiioX+sae)&}re5WsXzkuA*Oy9~5|-aQOi_`qxM zfEQ7TN8`OUG@^5IR1B{wudWcn7B!LtgD(V>pXcrfzjXOMBWF{+`oSn!dWv;<27~0; z4n!FShicVthQ@8Z{RVOHgm=K^c{~}O&(lDmcT4k-Jo#1yyZbSCgwMzYw1z(Dfg0@S zfU3{>BfYed$*}18YI~>;fPPd*Feg++OwpbpTk@K`PQG9a4X7snbO&6udvM?bmsJ+r zlBb_+e@{EXZJQ@P4o=Drye+=Ubq$V;@&IYP(F^IHEq!EqE`7v5ctB-;_jE^(XCuj` zkLOKD@Cdl!@qBA@`AUX6zV2||J{Ww-Zkfo0zkZLQ6zt$KgjBd>x`zza4)6h5k%Tm*9_8-s{GCm$UV--q8@I!2ZoxZqSCy7O&kULC_? zC~BCm1~Q=^?eL5ivRdlvURWaBmo}LfT*gymjPbk}f-CG;FNlfiJ#8WJ`2c%@Auml(>5qaulIcFsGe_mLJ;tTY>wdg9WVMl;-cS-D0E-i zjCIpzf*r5)QF2J^myCBeeQzg!SiSmnjWKy9-xO9Tnn%{iT>mS(1>EnR8#Cv-Iu>G6 z-_rFYR=KIXKCqLiGHQ4DPMQ8xhx?Vbg~^zX$i9Kt8NCJ(MMegP_~6K}ttx)H2KpA} zz@?{M;fsvLH)XcCBW+R9!*0@{>c!Wu3bWF?shad2%WLcC=Hi*dn;(7m^5$FKGOj;hcFER`<>XGM{dAmFHV6se(qq5L2z0|7(_OUhsE`R^~ z!YVJ&BaFLrB2=$P9IbU!#J6}pW#A?8S*UT#gXbhcm7->Ukb#^LS$=bg@dz#*&jTAhwW(+QTQNmD zKF!0m@$q4XU(fbxfAx=1HvBBMcpyTqrkdIqQs!Z0BcQVXnckQaykZ1%MgTbXXB+eF-{7MGDcP^aF$j~N5-n4iz@WLOjCv-(L zK1?niCX)|~^?&1fp=_>QPxhB#Yvu&uU>;@O@u-(T&QbJq@DpV+;_aO}kCW3^jQNa* zj2#w<$iP3wxH06HH1jSG(r^0G^nn;4?kagb)IuIz2lv*C3o!>z4(DYq2B@_cV~`lz zxI|yiIH0Yg%ZZ5B!M;-n9-dv#GXz0pFE2RZPrm-*%grzT`me|H*-JHT5 z&_SG#G^Nusfe((2g2u)+Hf)*c!nZ6Tc7)G8=&es+gNd`jm?93 zWb3UASug6NpMMd|8dl#wmjRJMUq5Gb(r*~jGy<Sw9)_7IsDdkmB)9Iy#6I$;Uz@CvEbqLgqQL(J{p_p zIb|4$wC!5It=^zb@I^hNztVfcB&d&EjzKXzZRaL)O8wKiTz?#j( zemfn&h^t;>?+V4$?=}c+4jVJ+6~_^I%lerqls3p~@$mhMg8l;(Lz z*G|b{<2(K_#`*B>W)R-JFi{)c?bO)g#;@u8mA=+^_rd$$+5Auc!@t;k_`&l0e7kUY*cl>x!pb#eJs`5-uyFwt0b|RuU>w)ut)DFuf62EdC=p=P`o#Hx!J~&&7-5yXgo8IA}2N}=Bjy4x4Fe( zVZEf(=2{^yqVt_z9O-4)c_9PV?vFZe=wpHaxLe5kI`2MEEd#SfK;y1qOXDid}6u>&;55mu?lt1b=Kg(MX5Od+wag zA1?u|Z^>I#c;LHCO9xe-x!A&KviZ5NN*2Y)f1NYhy*7901IGWm5AIfHyz04)p?Ntf zU8dcDP>=6$+fhkoTo>A`_y>Rbqv~wl*Ze^H#rTnjUCec|wU{UbZsq+gX%WZYFg-H89KL@(a?8nae;@9d%d`3ScL58T!axV+Lfvu7a4f7GTq3JumMV zVcs^0xK*Iy6HGZs-`D_dfV z!5y zR8{VId(DkbFL7kN?{J<^kEgBh6|M?PK+8gE)fEp0fdSm_VSW|(U%9BNOfY;;qxn*2 z$%A;0rd@VC(I=E!MOL3Bztt~%64`*OGAdF3TydBzs;2v?S30AaJ3HD#8wn%(a`+}+ zm7PLcSc7NJ;W_;7`Od&Nd~!UB4C0A;mD_%XY4E}~K>Hrx=;~hEC^vA@6=biX)0&h1 z_i#XoTXm$sclCifAzu0(KFC9^hpGLJ&WXi^-jU^RxiPfPG2YT~&=X7JH}x^p4*GOw zNDU@%o+o|vy?%#txK6VwztgpYn0(cX&Vc}?_FFYA-L`7}Haoam8R6-(^c6U+a8bYh z@Ar;9^S$G7kJM34Iu@Timg4{1;KNQ=%nLuYfn1fMmw3X;>r!6(j?rQnpyr%Z95cRH zaEvY={;HKeCcl*(J{ldmnZ8jSo8X>MSn2aPoFO)fo89;RPtYdCB-Oy;+G`hf=n zXDc%>Sw?Y`hFhV;rl#=UAFUI*iqU)eMY>2GbRQC%NY2JbZM8OFsw0%qynZY_wECt3 z_#cdI{v{UCLKotjiK#?<6Xu|tixTO$xMmH@rBfK5;nj*$?vwGzowA(&^2oA6-i=E=g&^4-mQ zfBd}(EyCLvUoFV+@Sb?r+eP8J{5OO?#A8EY))8A~km zc#ZBIF5FAs_X^kYEJJx#-+5`Ng>@^o4C&7|7dt`VXuKW-|2dMJ@q<{8*Vg2F(qi;v zhSQF=XA~a&Qf7ubMg?+Wp>B~p-oEjZ{EL5lJP)%YEwW1(`1;l|BGp~u(OyQ3m+`@3 znh~9TU_6(ZLC?4PEZoM>G~=B%#Ow0Mh|nH<=}q-aU47|aGV1$0ai6sie>h=*dRAVB z3;N_>8-O;Fyx7yRdBFrGLrkk#IYj zQQ&aKb{kbZlgY%Pj3xV{nWvqNuLpVaksrnp9$?4d77bIE`l4Pr269Hf8gb~Dc<~vO z=3hk~W~4c>Z^h>YVr@iaaO077dWE*&`H4Sh;0eos>P6cOV0hy*Yn9y~rC9y|?y_RHU<`@F2M zO|06+$r>9BrbWvb!dQ4H!`_|ErN$qpju@Lxq>ujQr$5`=&g1*?mCMsM6&T*&?U`P% z`^`7YbJu1c!})}#Nrq=5BRJB~_WO@NnHTq6Y?IFzL_gxFkW$BwifP|>Mj7oa4mG}# zqeIPwI+rkWGyOpS+OPjK58LTBh)ui3GrE-w9PBg3YwX?dbW%#czzG+T# z0KE7={kOc}@sRZTD#ov~ZSvm9Gx~7p`nin(21(=hQ(=-yW28J19go{=HUEPjDqUcE zNu`i0uQD)!KQdA~m?EH8J3irITm9w}^!4LBb3A#9E@a?J^o>VDi<{kVRmzu9~`(`L#$jXB>W+cpDC zk)4M6rumBT_i~%tZ)>9q%{7d5HWi#wc#whYXhMu5#oOlr0>1ZE@V+L&}D2$6XQXaO+ZR5oWHL;t;oW6AF zQU=7+n;#$j;pVIB*Qbv^Y>c>e?d#2lfAoGE_JsmVubY3&rm+TtcQ`PFdpYstHU^wL zvvJ{cs`rCj=zRc}F1<7Q!6}(rdGj+2)7fOk2|C8;@q&+Trxa#A9ghyTQMntJzq)p9 z-g$7meu`cOd*j99!pNcFjqqZ@Je&?+V{vo3aW_MIJ0@T(>=x!GzeEk-`8NHo4sSjI|!_&^po*k zPad^r4LkD2)7eI_O`Q|zQy%6vkT@$ms9o?;=o|Ct`^6R4=g`YQu0IJABK(mKBb3v_r`;MzW(B9pms5D$DVw@swV#Esms*Pt~6K+0C0@ zO`kKa*n}Qklg>x`IGiD+e`FtY_!$#zR=gL1u-yhlh_|wluw#dQ73zw&Ny+~gh zXPxY@VRG-zjhqE~0c^S{aJ9ve+P@c(_DwF#t+lHc%satyqW7%G_`ErX{&Nt~xJw=4 z6~T+_k|_2J6e+dw`X+4h5|Ron&&|q}f71Kz;V@J2F}%{oumU6C2Jl*MU^6aAmujLj zRS)R$gI}sk1AtORrNb?}JfDWaTs8EEFm$uLU#q7iu=`TX-~LrkMu>vJPHq>mD*=q}b9o^~bj$HR+N)A+^=p5^Tc`I#31Y7|i8=>U(9zGqS16E~CBH!_W~zETQ)Ya;`{ zn8iu&7<_qMuR>8I`xedFmDR}x;7}VW@N)%g+EoW@r>*)1e^Ii&!5BD0-NKQvY}yt=ctBQ^cU5|Lf)0>N zuZ}4_d=H*>-d#PLK2gqvuLrfANgX_^8-JH9z!Trp^@o1G=uqC{g(oQ8E!_NUVloEE z6f^c$U3gY^sMvK@Pf07iVqgtT^TW-8HG^LDufC+diJP$JRyl;3`Ti_5n< zQJx+q%Zukh2tHmexFLb3$&$suz>6o#FmDlV+%Qo#W@zO8hYuL8{qg9{9WE8KZVG`cUWrZS1|HmWk*TPNyb?T+xJAOdd;IGmOc|P9%gM0VY zJx|KNn>S3KRba~C2RxTRZ2+7#K4^@7T*v%h5Bk@`m_aS%jV5jw;tqKb0$xg`+53Z&1l;({AfJfeD6p` z`K#|=&PZ}%^Ld`IfBmbEHrH?7*?fKL;pS>F%fD%H_Mh|c{-6KZKivH8H=k}kz4v5u z>UW=Reth|C6Zb=TJ@qo47BQ`i@&rBAapAie03R+-r7=hp9w4Lj%T6g;_?gTf>8L*= z%ApL~=gyzreDcxPg$O;eIn$!~X%psWc>^6iR_yc^+6)UquP{tn5H90&2Ba27$Ij+` znr9==ZpVn9W_V=$;Gy_B{$IFqvBgM6g6KY&!QgOS>i2FI@AUdNoA3YNyJK`Y)MWks zC!bGWJJCi#>#{M7Py19a0c8vor+Z%Z*rGz4yy4y3V*9{eUPdqOO^A)f{1$&bf7YT- z7=tqzEa82;zF|^->HMk9g?BF(zNMFUwQv>h@_1fRhxZ@b+{xqa@Lopu$}*-ue0qNt zZC(h*lU~@GF@_`uVzeJ`F=v5DKQWd~_=5=6kI&@w$w=_%>l+yik1da2@)SLe8Iz%x zEy8x5xx!g^u^A&5Lk2^azF99XYa1lNx3C(|{^VxX7R|XCEbl$KTYED0z1`fd?R7f1 z$aCayVQYe?4-ny5keAOr9zUy%&$OWBDgL_CMAWtT+7_}FaJ{W|y9$DyH*V1(s*uqK_uHwnVIo^Ke^n*e{ zG_KLr(J_X!;a&5J>cp3iKl^5L_WKNg#n)s+4YJT2gHvsPuW&kId!IOevGy+hev7|T zZB(2-Uw9#*OM;^eUi0kog4@Fx+8H<2;y3K#8GJ|Q&v+Dl(fFd>tXCNhFh(0b=sf{K z1syn&p$56pGTzg{9*@Zu-!D74#pq0?tc`4((DN@dZZNhQtM=;G;^R-)mVwE)S5#dZ24re?K zzJAWAv3E3{)K1FV9AU`hX{gQ4zjJBA+Vo%+?cN3OygYBCg>g)XCgrr-cqpfb^ieNp z1EZv*OFanKJ% z(}N5?Cz2a5zU5uKm<_dfpw4ybWX&V+K%dbkZB~dKeLQ2)vuL8fF7{3eTiGfw;7C&Z{B*3Q=%_I_ zzy19u)%P@y&{GpDT__QuF>LI7@If1wc_Au0p2~DVvNRhLG2+prm@$D)XH-16zs-wee=mdj(b5^NA6FOhzGzeWLSZfF z5$)(*4fG97DdZ2L1@7gsDuj(O^iX3QKEs9Xv++X4u;y%I_ZY>)L!W%mcxSVjN4paW zm%jgi#qD`*-%{|g~(Uqq>IjdcSBJrYI?@ajK_kJ)Zs~%>U z+p`I{rHPEsyuzRap62SpcWrE(*=xh}MMg<+&R>S#55D(D(?|ZFfAw!N+@%NGc)WA( zZU)rbv(d$<`C`K(cIhDozKezL0xznkf70`hl5?TFW)m}fpEMo_uX3%hGGmCfshUBX z&5M4#SC~8Ay%iaQsqyAj^l2lfJw) zPC?FQP%`awkiOs)k?>XKe{Y+wp^xYLUh}JGZ30^_I4L-5a*87!tvQKzUmP!<`PGr> z2fZ8>A6!ly9Imgf^=Ree=j%>Q&xU3&$gqCtzl{&`3H7{A579+4H%@OaeN!g6r(UJP z!F}wr1~=x+I&N^YkxcIy%iHt{_Ccr1YNxa5?JK>1<3auR`pw(T6F4ZAHE<(q zeCm9gI@V$OA{iH#p1wwda6GlFg)C8t+7#B1-ehJ5Kxsk_gq(>$YHlj0l*&N!R~ZWX zGEu0L?U1GmX5ujL8{jFy5C>;j7iV}V&r+O8QPrnqEn53mrYZl2$%m>Ij?H$|xP{+O zSNP0=RDE!XmdQUTDp#3sNJI63!`7fMG3B6|AnQk$5kbDGulw?j_^4y@mofe?ntP^`Y7;bo z1}B26e1mH}V16%yd-pv9-DGkg%U@e8V@|NAT~rHBAO~jjP94gZ1U}e)Mz?&E6OvC!#|T1<4u&l_kFiWX6M=Gp^Cp3we|7e^4FuY63@6~HY^E-MgE3I>CKxL{ z;0y7Ei^PnEL+jK7x0Mrb7rx8#_$}`+ReKV= zpw*{_Vld{Rx;)zE+0{4r^hCM%HF!);X-ZUjzQ`b4;9uVOy9~eNW#yTwr;f7F1A`ZO zYO@{=7;@=%ID$351G($F^32r_FlOU&5GaquAe!fX$sdeoU{rY*`pPtUww#)JzwN&| z;*KWy`z!*{WnsO=WlEt9zgh%ry~laxrb&hkRlNO<|Eu5@7N9+=?@w*v%hTTlUvd$zg|7Aw0)0IOL)pQLzW!`! zO3uaJ{kD_J(`9m>yhU(%_1kWhSJq@``gdcaI_5W8$e+H-qeNb`MwhbiMgR=93)tzG z`b-l{>9c7?MivI81Ja`gZhik-BmT54{wNdto~;d$?#n(+E6?ENne^_Pw2?EGg?^1( zP0h+r!Rnj#D|P6iyZY@}A7#v7rvIvE2wUTvb|Bv~=Jzf4eCBt>*+j8hOA`1ld7bWPXV*rS`|Q%6^QaAY5lxjgH~@TH&8G@P02{V81voCn6rP+GkY zEHISW{TLo>d5cCl754$N&$QDxS@Fh1At&TTx`koH7JqiHaZn zTJ$SeJ?Vnc8Kc+MQ}xkzqpNoap{+W_cb}60K|ui=s&VkvN<$<1=%{KQKFT9g*VGk! za}MqMG|!s9{pn8|r}s7wpWofwxp8L0^e*Wc+ z&0qcc(+pd+S@LLTyOWo!c+v-|{8WZ$iyN_=KMKZ=-@CMV?>p~rj^*`p>+@@y%Rl(; z=5$9bzbZcG95)a9N4#e9GfCqwk%CfIeSa~Do+UhM0Imv?XERa;ES zJb8JvJZm!joWZ(sPxHQ=xU$7XewpFsNH08cboxjO6ff*NnAfjG(A&aST>I7UH%HGM z52w?c6B&9e)UoU!V^apK+h5(@eE!kLE#}4LHc{`mc8f9|mS-~X3lkuoG+nR@pY=E+ z6mQx`87JN=1joxqbXIeu`X)o9{%V2kNcL+B)5MWi1p3*_4(H=vNO&0@Ptc(E9ISlNe#vLVrU^rXcd5A@@`ERh!$L3Nzm;+Qdw5Ks5*2jhDnD_Z|5eHK^! zo*1phmUuA>&7P5d<=5O!5sO*|-xf*c%eqbSICHIeW|769Ug(jk84u!c4COwxSa@j@ zF9=HmRzaV>$M_s?ExNpHZM;o;#`tqTgLB35Qfo7z=RB$2WbiZ(=B;nh%qVZ8Ad<(s z5q(o%^%|1&1@Uf;tovIaw&EUs9IYRqmmJWuZ-l-KBMV^RGRT$9p@L+DW^`H1;%$0iY+_RoB{ev&LOI51LZ zgYo`MG|+mWW8*w<^}+v( zAIA@&0K6zp%w6@l^moQ>hVo~xp9Er+V}uQn8RO_6@~X~<9iN9N;}QJOG_drS{ymEx zFU`E$X2+2X8e;7~$%yv#jn{bqo*$U+Ts+(FGlk;1zPXsE>!VI&*if+nz$=x(jQlYE z+Kk$(P48qVxpL{yoRE1ZgVvLbcyxpH$eZR<$4(xe&=p?#$3va*?!v_j@hyX68=m?g zoyUNquiB8ni=%nF@(A~$!gGZ<5GUC2eq;KY5x4nUKX-EFXogihK;t1V)vX=C;<1wm zbxT&YS)9lSIgd(P^U(T6{F*h+^uh8@qnEwD3r`z8Ggqq(=pwM_E(~Ub)1I%J6O;Yv z!xgn@N*=|ahTD-&p=dYx=?T~9h3IC$;@K;mjGc_urx~Mp9Mf_7pA$ShE0^bQeK@1y z;8ERPqDLo=fwh-h^WYxc7{5nelUZ>6vjNH|$n%q-n+zRnyk#6W20ArD*7U1)l8eKQ z*TQ#cmz!;ZtWBUcN-BG`6JFzWAFR;WeE8nw6wcm+75nEK4x>$_jMFxNj@S1uUB0sU z)akKw(ZdXIw{CU%qB;JErSr5i+8NDF z&vstC^Uav3Kb7ZRC(DGI`pG~3Z*nkrfAgPy^?CjM?1D#@{MXOpy%RX(@L=*dq0V|p z4z!hxSZzs07}e=Rr+AF<587<7Q57n+i+JsY4~gf(liY8!iq2LMUG(^EC+Z3{ce*(p z-F4&oI$?65lNH+g+O=y#i&H?a!$n_ZXn*8Xi7~WZV)?Wexdb*fn-D zw%Rn*N#M%RE)<7wQin$G8F$+tjCOg9EqLqh;UmWV1CT=*Qj}H0Zz4s^d%F>^04u;3m27QKQvAOpjc$6NZCyw&)S^9FTbtF#MSol)UST?`RIsI z7(p@z)Q&z3`D6$OT(^@&{9@DnUHwe|dzGG0w>d+|CPV47WN6x; zauYMWvhsjeCDoT6D=%FnY+{vGH2oUPCP<4XZHf!CCQQ}YUPg#EoB)C=)h#Vdi0U833ji4332m0(P#fNOcCUf!-4P0gpUc9bafbPMoBBu@3=b2U`mhd z$%kQsCy=yIFf`JHBpJp^Mi2Ns_pJM=ro2;|jx2q6QoepKocmS|I{jOV)#@v4>In4Y zLpwa?s7^G%bs+b9=m6iueVeRQS!N+5ee#w-;M5(wwFu~$jFTpp-*7hRk#E7pj4}Ks z{Hx7-sN7s|D<7UQT7~%X#sFZ+2;rMb!1a50olxpf;B4h!jH=zw;14#M2Zhx^;j}YW zeG79YGUeOisw3$j__xyf0Y-(_G$Q&I!+RdU!DaD#%1tryfVVq7HNj%^tK966;Um7| z*f709lfUGiiB-IC$D7jdb#R|wrQtN5Rj#j{p)Jj_eb%GNqkKH7x+;zwWh&G}W8f!O z`%I?ruzHu#ZPhivX%v)iWni~ym0*?JP17$_gKF z)FE%bro4WEN459#CA5%@$uCbO;=AfxT4vo0P!FRQ?f74NNe07rI6w~vr>>q)+2MI< z(+<^JT_fB2EBHLS)d%*J-r@~bjWj%jjG2`>x<;h7T6w%Y2l4W*rwfO{10VFia0cFm zk9dIB=$`het)hDj^YHd;TNj)ZEoXRw=Qo<+>0YgHtBxJt(OO%Wlr37ng-}WSlOJsr z?X!%RI0u%`_G{W_-R}NQu3;D*vBjx->4Ois0b}GPJZfvYa&pKrdAHEeXbh;0W*|j_ z>bLcdfpgnu1}iY-$tQvbmb_?KdK`_&VSMcYZ!h;FUrSEaLyyhoM7$fA@op!hqi0H8 z&!cEySEdQc^k;p2`bZ@`oR(3`f;0$CdiUjxvBMS5-@s>{RcbqNSr5U2b6@@p-*9u$ z_bqHGgHZeu=fv+OB4lV8X5bK)r(Jti`tXJLxc@Bxj=bV0c~lw7Fk)1_BtKnHVl9D3C~SGmC~!|)g`Zo_M} zZRt&*=h7zqL(Ud##Rik=m|82deJDfahEF5s-S66a%9L*a;rF)8Rz^mKb`@yzX^Z$h z#)1~IO`<=#b}NH+M&9Z;lR=q$J&pD+Z$8=FxStWZ1>#@)(|@%2U;p?2Z}YEy@#*HP z45a_`hacu$esFWUn9XQ2kN7YTsB;;Sh3sH7f0%*fZ}KkxU;f8`o`)4rrl*BpxSyx& zxeTh|5Dkps`_qXQ7>?V&uI!_XOc#!x%`+?mqjtz>{O0xH&ABU=GH|^fgOEkkEOxS@ zKFiba5U*GZ`ko(Xp<}Y{BLoX?(*1{pIZ0nsFN50iTf#+jB)-L|Fd)bCPTzl!0pjiE zn-^pq@B1H&&Ji}`O$#hWeAmN_I)^fp>@|UZ-2~sU=+8g;G-G^t)|$sM+&?#u zPHt_wjnQ)!&&z|>BIQtZujBk3%g(U1zhkv~$J4vD)huGnL8D!qYsTTHZ8}&;-|9tY zH}k?|toCxY!x_LB*^K6gj)>{#xN^p<7sWPj0ZoG)&lu|Hx&_)C^(W)~XFwAh*5Zdp z6;E=N?yLOs7CqZ_a4>@& zIkw@$h$4Ra$a(qnUF|{h9m%l5D1&~64f72KBA%XbKbqmn^O-MN@Rd>fl3(8gZu&sd znC{!(iAzR%m%O#$&}`9bu{efx`MdsJmr157Kl%o~)(QF@q*mQhndLcdL3=2p7JY|bdl@{=TrB)h zi}Stug}Khe9pAzn12{cUI)k!47>oNhu4Sy?lv2I_KUH_uba#>*_W5An_pO0mXX9`< z6e-cBO+_fI3*Gq6AHd&HH-o#LD3{7a>SV-r>A?m8|cQqsqR+*w>=P)dZuWF$dR(UiA##)$H)zH}K0#E|8Y_xersjwZYc z8P*xo@kibRc@rClK5T)|;s)KKZ{WF2pDYf;T}#!YyRH)b>uUPl|W1LsBh4-U_YS8f*t59JRs zRNL(k6^m_LJYV=`z#nQc9~4a8wRx#^)cc- zBf^=CRlHvXU%JTON7{6Hvdiej_(QLF$Z*A&$Xk@LNBbS4Ph-tDd5eTIdkw#_kG>UhsOA;qldxwJ4}h7x&+ylYWIN{(kcF zKi>TOPyTFk=jQdzfBwtAZ1>IOO5RM^4k4G+Q*2`f?5i2(-`~1l-?yIIsbRt3l!nts zcoN1^Ay42>w^$UPa~k5oUOS@pp4^`WG`nv0=k$ZO6H7T8iL#t2L02)<-}vp{Z-kV% zpIo2PBS&(5p+47+l9MfNi=(bBk~96o^K{4@>>RHg^ed0>BIrg>2}{O^JG;38Yt{wP#x^dUZ;&EHlxS1}P{w0|gZsgD#2J&`a+`2tF z9wDcXE!>?jVB~;qwFAgJ`$YIp{TpY{5!z$u4*kb5p*oO-V=cA{4Wy1_8Ezwc$rd`( znd+@9hrG_>Ve~m#zrX-Lp|qkKgQ)l84PSli-Z`7`|6a1?(0QS{EL_rKzxmBSj7|sh zb|HH%U1-XIooI12`O(FfCkXBwwEWONpNtF9{hX#4M@^J=6CXFPyLK zIP}+IMGT(txy54l)21GQTiZ@wk>d%m7LJVh4wgNf;}yMV$67?LJDwV$sl8C31>nlnLJy=z+e)=5Y%U9 zC|bAjNe^S&INeC=w{H*XQs&vD$BZd|bts6m9k!)NQVa$rlO5jW9e7>Bt$*vC&Zb-_ zu5vNQXHvxIF_@y>JPa>sCa*GeUMcTNopKpT9KMz!nX;r-mxw(aoxJkQ3xC3a;b=hh z(*Q}D!8Hs@I2}><3sIDYg4B53xoL`qs~I4!5TPZX9`|;CzooyT6n+aguT|G*CN-o6xC-^ z4jqGm*KG~OE7|?*r{H*)_8AYT8tqbeXkUuT-_dk<-Lo;gPA!8!c#>Rvf~RZ1$iPsk zN9Zgq`g?}XDdI`fwe%y0TR93rxDBn6i$NDE5>EMKwzP8A=0+yKor(=*w)xSxdoo+T z(G%{cZ6R3sq3jRcqP~5;uhaJ3$gi~8Sw9P7FS}6^#Na?E2ZvgZR{2r^L+-B>; z|L(SB4ji%@yh$%l{})Euj=bv9wK5bZP4_dHRDWqk2dzOgJeS94JlFQ3zvA%adzl`1 zm08}A>Jm`s@+y1Ul3!Kawoxx|X!ub*=m`g0dNwjs-f|4S0q7U4%QJWnp!HgkK2wsq z1OsG%M+SSgqzS(0;T;-x-LG`KA|^gQ+`I;3D#^nsT4~!n!+Yn@%pR5o#?l9U$^sj$ z@|J2);Jq*e`&wAlHdG;#$K)N@+de{hLxgY{vUfX71G9G6Psx|At-B0PeCXM`5`#Ik zEwA5ICb~nVx@{NzhN|FTKis2%xYxS@1U;)wO@87ojmbuQ0S3Oh*!654jC?Dnlyf~~ z$Du!0@b4M9o)n(-Cb|vY67-GTCBl#L0!JRoL$(I) zu3hwYd6kr8iVIHq1kGutS;Y3Lal!!AhNXkMf!)X~`gGra_d{DaDLEOP`vJbyb*{Gp zHTA-C0x*CVs(=liVEUkgKO^`a=<(^>0fD_S`dn z=Cb_3UtZ+Z13pt`>G1>1^6&Dh*XWc^jK z?iGsWPFt6M_VFJ!;pS=opb6T+%?Ekbo1DLCVs-b{ojm7-L&^It+{W8Ax*21>CI4Z^ zpkmOj575>b6L~T}Yf^DFV>{2LX%$T#vQBM|H~=xPDjxG-+-M==!bhKOuHXH93`pb6 z7!Uoo4LnVpRJV!DJ9H znKX&QcOe{-!0wlK=otQnc6gOBwrl!iCYRBR@qgN05KVr^qdS>r3?2gznGYV^^_J5{ zJxab9J`WY9hZ70|5MI3Lx};^%)-;*0QE1@L2F(wO!bOtzn9w+u=i;LlzQ)5Yqk{HC z%?ExAHw>KGKAoh0gbTxvHmqO6W5xi084O-^FFw9l!B5jgdI6qojD5TV$LO654zDVU z$K*?fncCPvp0<4$wc&{q#%;@ZiO)TWHoT}fo*yTttWnGjUf107Ywb8S<7#i2fs@r_sc zv19Ep2~VLWj%S1vqxoqDHV60_H&^}9XSeUBb23PqM=8&nF>)53gc0gO2jpG9d9#JQ z3{v$s&(i%5GNL;C@o@EGkhzjq>&Kt{qH#pNRaq=se?af%u~IOX58l)i+-o@x*)-`#uz`!^KpB45Z{esryr8*xnxbv>G3~D@fr(IX>EQFRfL?#V+n|`=o z#AZDmxUR-bZHK9L@RNh z-@VCbcYpz(mnWGo3_@^E)eg{>x(vPWTXkeSf1DwhvC%kB2n)P+mc@-iwBe&Ng^{uN zb0q`015zhkOS*Errd!x$oO~D#XOhjc$=17L`ijO02QF-GByZ#hy~nT{9kt&Zd8A&s z_Hnxxu1&p9ROhsKZLM<<@`^>fL$lLr>F&&Id(kV7+{CUat1lO?{LwBaC>xMbN%x#Hh=${ug0)P-*6+#lD=gg^dn;YN$u6A0REdPUyp~e~~ddKVc zY3+Z7Lea*aG>*!ModBIZ*{+M^sVvD!{kw(W%KM`~`s30)8(yyQas69qgkUpH`1-5s z^-qk>RjbV82_E`fMotU#&t6teGPo9t;!^*~Kls|2y0Js(e_my9#w!XIUT~%Vc&?BB z?U_E;LJ&hKm=;`Sv5ww~e|95)ckaxES;#rp!in%ouhK92G=~2V3)^H|apT6fz1djxx&x}p<)XcLUp2O9g?*`z)0J>~6gm*e7@_6{E0y&pbxVR0h? z96l%;PKOJLCA^UKc<1x@0&!)(Nhfi_@FCY0 zwGT9QpLq&IEA#w?4`wmQA@RTb^>2b}fv`G_o-Dl2`OEQsoe!WtJrOR?$y4pcc!tih z@L=v_fzH9EYg}ewiv!5gg=EV)BHm5-*61A0Rtuv>q(uyhnF3{$@_ZS1y3Hs7K$6!@ zY5Ptks{ge+e zllEPPa!lcqb&M?P8|>M(iB{-Fn;48t`O*gsvz1@r!-JujdnN6jNr&E^y6*kZ3c~8D z&hkWqDR-N1wO7E}1;lKxR`@Y8kU-+(OCZ$IW=6dXv0q7(d zB6{p$Ak?{E0zZe^c_#1tA$xB8qLEx)$fI6hlBb@juUF_-1EZP8;Nt#!o|Xn*SsSL@ zVDwJ$!`x^vc=c}hH;5aUfIo@)gSTplw#vsZWxMt|`t>{5^~>N6zx`Qv>=IavGoj}I z06+jqL_t((cJROe5kA$iybLyq!#_&TfPcIS@u@W0mI2WiVHg+@s(Z#O8M~{B;nWop zjchAq)vWR%rGDz(7u+c~Sou{tfylLg^SPdL0audozw@t z5GCEwKe<=i3feHbq?1M+798o9;d2ct2X9oJ%m7FS_Il)=5j_prvwqdDPPHU`dbBQF!43PlUcdzdAC!yWE!pA4$ZqL=}-a=L(}ednMU>`sJwVNVDS;& zgL{9_p#Q$32jJcyy42M{`CdvkPqyWvl~Vf|p+Z@-Dy{VMY+X+F`7>a8=+__g=NR%7k1zoHnrJQ=XBx@{AsDfH`^=U%^xFp`Sart%bDFTVPGgoXi!9hCu}1AgjSi zqwT?GK+%^OrO5G6nvM+Fy6eiOS2E7qRZ;dahSDF3ubdTfW!1@`Q?Vlx(G2X#w5>D4 zfPNGQt4y!^7v0KHvPuRNExb#oE`3xR-ojainA#dy8@*OiI1b#>1<$~du0qqL0|tEM z^qV1U%4%Q;mK=kh(U3CS!Y5qZ&VwFK%E6&mQ%9BDex+9?e70kQCC6J`P@U%Az%T9a ztRMANM9)VzY<1oDaJ>!o=)d5PSIG>>DhDHm0e-^Ob>*+nUEZUhcW_cjkK+3(bETCf z0bMwB2}k+RaE$sg4X#^0DbxGD%0?mmx@RN40}#+4t)E`>lH7ysBOk>2j9~=rj7zH5 zO5#^N%GxtL9NNfU{T4rW@PZ1~wp~pd0;FeC?_MoFED`_#_OuV_)MM(jtqXmsa?#rO z&7k{bgIE1F5>6YG2ObT6UpA<{`Q>lg;(Tp$v6$VTv{l~Z?+d7~s_W1G+3!t# zgr8uMc%AnZXOi&_iYNFl&+8^<_Z~mreET5py1c}Bzk41!8Al(6wynlI-48Xv>rkq^ z^3~5)Y%HVYnRV&E@miaY_kB8RwTk>ggeQ& zMmE9^eIB$c;7Jo0Mh&4d?q&pd#xPoj*{+`9^hJEo=h~hR53wJY$7{@r?&1<(y>xzf z%W%iz-Peba+@yT3$-7@hG`h~N0+Vj?X)=w4dK@wAmBG;J5ZBcvv_F%jwXn*#Zc=7D zFvI)k05sN4%IludP%PbLJk=gU)g-a|p~SCg`_)bAoss-b-s8{m zroT}vcZM}y&f=__lbCdxw3@hC*cJwXag8wnCDeZRi&PJ9-%QquncM<1-NM71 zF+(T|eKfD)gd-tlD2|8POqDa~e#Kxz-s9uoiMhRFr0_9;wzGf-t-k0`@%9fFm-l!^ zpb_}~?vIvW&?}7Yv?WJLAL7sBWjl>+bI*8|03T*&XbMmun)8XBj}Wz3MMU zFhiQyvJ5To)+mji=CIewnm!_&8U1FFsXCjWkI^8$YJ(O`%yo@Vj^cl~4^I~>GcZ{M zX>p^v7!Da*E4rx@!|U)TDC#|-WXj`UIe8hIRA3n( zcp!R?C)GjTelOOx@%f3xnabB6(=F-@7*lVD{V|YRkT{+}RSaZa&JJ?cku&h(6+P)= zOmwP)ppT_H&u3)6dhOaQaNH=~KSLKIr3ejE(hIGmk*Gczd!?F-NLDkN21L#dfRF6}-faBhZ|8sW(c zH%AX>|L-d6Suw`%eS4#Y?iTT4hQ+Y=wR?tN3ugBoKAnTl8CKDqq1ev3wd=-CDT~M} zG!yxEC~t+D=1e)Gb0Y5TyEg}a_|hHp!5{qoPbW+fI$21jFBufqL59n)&C`2!?!|wG zb+Y!G&%fCGRUY@e);}l?`Q<`pT`bhmxfXiK^^+%iQwH5euF*puW5)qqz8C-X6DLoe z89g%22JM34{i{E=xK0lUO(Gq+qD!<3_DF_bxN2|krFTfYaflsM#skum4F+$E7#3$f z&ai!=e#F9-b0WU{-Pf~FAvBzR<5-4wGIy`o<8-b5QH*Pghju>*$s%Ny-72^5-DzBB z+>joRcE+I&o^69dX#w z@6)E^6**F$X~5CJII5qaE1f=agF+*h7RBru$w0i7N3!VU^H)OSg-`tOo-8|i=gGr+ z>BYt%6*2D=6))&4`V^f>b3JE2SDD8I&Gk~JP^JHt;;Cw-b3oXDW+TZom2C=Gnn zJ=>Mo`zkgbkfoda1fyqT$nm7|#%O`F?o%YA9pKn~>|FVDQgtBXy$;uyxn8Uv3KkLL z$dtL>ZOe3nJN`r0MFQ_}>%;wcPvCsET^J)?w3@+N@6>qS(8}AS1Ir(u(!f~Nrzir9 zX5Fucy`Qx32B{a571zLqPgn9yox$@Q0BILB7Og0iEe}d$;IRzlyRJ2Ulx6rR`#=3e zp8+|%b_wn>mMwf_+|poxbX{44i$6Zq71-g>PM(JLO?spy58L~dPMwrfL+ZhhK`{f* zh3|Skjk`v>UQVf9Dxchzrawa~_>b3I8D;h(8(4GGk2KrWCNJmUK##kYZ15jsl#@bc zu^>NVxRor~LoagBKZZu}m-f{G!eX}3Cx`00jD!YkB~!_Gk|6i((_~=`8yz?`*IOP> z8rMK;7s`Rk$WqT!`V*c-X`!Yq;la?2>?dLU0nwUI7%UX{P6MI?>6f+))6E4 zZfQJe6z0E|<(>2kPd7^klrZ{|RU?%(Dc$9pq6RN?+7i1fTZYOrJc^&{qrA!{8&i30 zJViH$yarub`e(sVmUgMl?w&!e=VT`O4NUE>Jm`f!XfD5$!_&drwFhwU4vm9D$mFXN zMtkqqZiVQeou%Nk$8Ku3M3Q3l(q!;gT`nIi`BBEtc<~b z!5A8JiJr>bX0EJm(1+}Rku16$9!VFy>n2wk@3g^Pd+M%?-FsghrV}4C%CxB7z*|H)su!v{}GGvSZW8ho-Nt@hQsCA0dJ!Eo3U(1Jg+ufx?u14F@{>j z8L-S}mA~kNb_?#{vgL6Arrg1EXsj0Gz#y<+w~|b5w>_|UII>$^(S$6JPYIPhvbxPC zJ;WJn!O<2Lzy`x&*!Lw&R^x35E0e-K0^44?tB*f>G z)X0H+TUi+$S>Z5}cw`!n3Q>YTlRsR>m|UWrPEe1Y0^VoUUprh2V!Z?3<53{e;L*DO zz7?T~vgfy##uh_2!~nbRDAvi@H9WSjyx+C0N#{l1kv*5niylDCzn=9(D)d`4>3(2( zP(sBHT)*3*<&~)%AdXs>X6QMbJkz+$em<=ilBRKAde5dE%qXn`_0u{#6FQXBj7+Z9ShH zqI)fm@1qS!57&N7()XHpn6w@-v2B3+I79AUd3X@r$zXCYJ>k$vL#A)Le^f|{-+uk= zu;IfGuS^V1V#sKISXhn*-0FS8R`?G(q_PRw)8bBUj^{nsq~`d=52{O)?fHugLp)6N z1;#%P+*FA%YDEVVd+lZnu|0AT4Q+MjFz2rO#yh(-3>3{3Ya;ZR_M*&fx!QI2qnAcKBtzhM22hREB%Bdi>T)IH>4Rvb%-7|66%b{-&U^Ayo`5HsC~h*Q7-RDM+hpFq+Mzo6?QO>LujG7!jizogwT4y=@!0MKikv7@2s8 z8Y@|lQE#99rTOB^uQtECem&3R3!AGMoJ`n@Br^5oNz23bSl+PO0t3WsdpT1e#2h}Ullhcx4^Uf#{)o6(5jgE9SL?U6y`SVkdj zf4oQ0nocY~8u^NUJO+-8>Wm%Fo`O#-%q`7tT*3=cv&bDB8I^FpudjI?X?fVggn(cWbZ*2U&JX<~ z7rad!G|Vt*(dS4G6UWnMRbk}Ejzl_*Jd$OLc)~>Sz8#}a4Doyn=*b|%sy+>0rb+de z{LTUr*^fr(u7DZul#-!{anvHnrr7_)f1DSSS?!;ZbNcVnGDIGreDZc*I4wgLxwN3d z$gDlkz2uiS>yi3`7rejwr$4kvzS@i~ate!P3s3Ejtogt}-{4@!6@U6^ecbU5*Av>K znBqszjr=&&(=MFr-`z-#90=OtP<+yE7^h~3(5lqvNOb3MYw_Aau@-w~K|>!}+d0K( zO~021kMG{;K-ZjDYAVs}-Gmu?lrgiLp(CArBICJspp`amkb7siKmUW_=TCn8lQBYm_08AeR@;gXilJNfxBGxjMFVmw><%OSeuvyf zw*x}0L<@_t-cNk&_@VD`I44QAm>kJdj7t}hr}vE^=^}c!VPyO(gkAD1AD%c|ojwpM z#F*ORx<#|M$?wTL_|XjCJhPy4@lv}+@_I(kIaHKE_l z3;FZt(fw(w`eEZlJ5}fq{fF__kqo!OA>qUO7q#!&tg%tdsg_F@jqWIaLK@L}-gRO> zI&E~zi~*L;g5S|R!KXdMJBwtKxbz$})*L(iztv>{9DL9YTzyCAja-9uzOgJvm50|m zza!j?v(S)txA(mIpFC+HpzHT^xIO`{Y*5L17xRx+qeHKK_-QB@H>8Ji1~7Je5su^m z42JEyEkJ1>7n-x!{bAfM$jghj(2>lxY|g(M_Zw*Zoi~MKgxOdA1N6LrWE{ z1x0w%J0cvxwt@i3Ibpp#Xd!pxp42DOZUA>jky`w;3}yy6}}ihh8X2d-GVjGOIq4h zzH7+|?7~-Frf%gin58%p?=5t9JqM4G#EO}Ez_I^HO|RsWM_Q@zL;b^((0f(-F^L54JK^tnBJg znM=+>uS{@WxNU!;^X@&FS_1_<9HcYPPd$fTJkjx6x^)Mqp&hzRqJcT`uj0{6SfHta0Z9BA;|!ciK6M&<*;PTe@)cSqdh&^8!u*u6+S9dFrr$90SbI2TcrydOZA;FgTN3nZY7Y zGF9HZDc{gLKn6d&O3U*r*z2)%!i)Q%l+;Zd9zSAvMDXPP8w~Xe7m&X zRgYyTCnu7^wKAIw4gCJDdcn2l^K_dQ%pL#1s@_t_KOU6szvtg%4UP2M=&2=lLE7S@ z%v~LN8o5`SldtCycIgLofp&N*6VASLOTSA;Rws3kZ*cT$00y6GHP7WCH!|(`vrX@j z*8w&$8XgToJwc?Nk;<;Zxi6S~wK-{R=^f)28IdGh#}j#5_RtZHhEGs~tN-x(M>=cE zp6}oB2(WbK$d`AOHavpJ_8xAd!_;7Ovjrh3qYqjrtB!BlD0rpbKY6#Zk2VIZJ)1%6 z)E|#BGS$INe^Px$?x#bxq90uS4t!?f7eWVvgJ%Y<6{%bknHl&_BPmV!yi&&@s|&it zZ~tW^Ka!7ZxW+ReNB>8!@LY>xfGB|OE+4%*3(N8X24?RILoxjznD|i{)Bjjt>Dka0 zzp7inX22l3cS{aj^sPT#89dj9lvftzj2@nwUIqt%{?CM8+UO|PR61Du(J0v&c^`r5 z7HuRc@8}*nF8Lj(J!e2!3$f@m;0NT;X>dnHwNrL~+bY~lX3`D;T>+*LKijWzoe42H z@xD{+CWUllqdWgr+a&J;WLxJQuPZD4JUsPh^7=*7?Po4Q?gC%v^ewbWfta%RalON{ z%Y_F3aFV*bTRD+_rF9|A!b$%?{wA&2jk-8c)*~_(zSD=6XY>R5N*0)zxT3|RR%Q>k z_{ctTF*=1jqtPJYRmqmY=->VMf7$%$&wszk?v)t@F;IUw(08CPnlMuWAM_TXk>Vez^JW-lNUGs*KBdIiD{s z?8~-@^3FlKiK~0{bRIcPcJs7uZn1gS_WNfU=TBZ)17VXjlYt7D2@pdxuku$-NFDw| zN)8`jAgcAITaIqG-=h)((4mapCu+A}-@Kdcrb9oQC~Xd$-#mWsqJjE@;<2aEUhXAQ zZHI0$c(x1nEn@$55!riV?;_NAYOH(8<4 zkddePP#ZlW@AQImzLO^CqLZxWXo9lS{>XgQ5T9QN~+Ld{W!X zdB@u3yKDCNr)@Ew9E~5^-iyYZ=;eUXF?Z3lf&^z-s5fa-e=n|=L!%qcponO%JGZ?&+}}at?R+1&X%Wd#)H}b0rt1Mtjkwc=ey?eM>vtGo0$@mTp#mMi|D^8Dr7|$=bUNl0t*LI>6|{ zh?@RHD{>jq3@ESKA#mhqIt8@KM3ZypTczlUZT$=co zn4wI)dF4LVwhm`-N}dO3vb2{W$Jq^x-{PhUmBM&Av9qV#)!m`-+#IJxCK-65ii&HiE-|9i$WF_TI2%pO^O{cBHGOI^)VBmimEh~P! z2S2|0K_NEImd|0QJf9Ctp903CM|a}){>_)yZ^aAVz4bN88jnW0h9MQL(1-44#3u(r zjA*0enGyJWhWO=C{cgp(jZRN$r()OhNIrk$Y-RH_w^Ly&uk}59(cePI$-K}RYSQq5 zBul|frpd{S&8uV8nK3Q8&p5n`sJz3@V4x_tRUedY5)n{>8C3(Gg}=G|FaBJ*V6 zaQfZCdHZQ8DKA1tB9PKaMA{jBp63XIH=X;O`G3bxh?hiya zecJiF$RD+MzIJ+~doy(N(yhx}2C`{LHSG!$Pdsbw(1<6px{!HaI@RK-wyb{{Ww{U> zdTC1*9q2)aH5v<8>}2rVv*=tKw6jN;2YOR^!m^xh;bnMMJ;~=Rpu`7ZX`Cry&PjHp ze_T9vGKUWP_odJ3JKh$?=Gk7x^bF_vRBcDSpSLJIhbe}z!!ZHS7p;ZG4ogfA(P_%# zE&kVk{a*%ehdA3|q3zSX#s?PS9Ucj93*5rsXgiG3`p)x(Ji^Q0ef3p4F0O8l-d?+6 zgco6$=AkQ$*La4LTYXwE={O_|t;w%EdWKPz49{Xvi&A877Ka*ZS^Oi*`>H$J+wh`X z=2qG!!zN?y^D(GL=X52W_@K>aKlr_5zwk-Piu9-2UHH6(5Ihzlk3KtA`zHH$?%tk- z4h{$>@^ru7*$wccM;UA_?jETQ4mKCoX%-uk5Ip|)<4-qVe*L?~f)B<+oZbdZKwQY-|0C8|I9wFyXo;@hN9TxFS9nvvkqc46g zJ!V1GnLRJ+|LL>y7p}Izd$t(&*GIpY7d>vwKYdVTo;YjRqAOrIa&aPjV4$ zo>5#Q$<%V0NO$jqvY4@+&^cckOTe(ZnG#zki z47dbr_q`2rd4e&WN|$43gcHRD?}#B!Ip^NIGRjYWcrC+f58+-~&mfun^eK<$lW9u_ z<sulj?(JV1k^Yz^1jE!`SYg1TTOKyJps-1|wVZVHSr!<>ZLt?U{>f985%qZ42Q zyLVGB>1EhKD`U#tyD?65;zW1K%Y-RdKBLN&3bc5A*Y5`^WW19h+r!(kUoC9=d1O4<>*RAxG4otX`Av74A zraU+;x-G+lGW(wbQzssq<7IfDvXVD4J-AAwEQNF}Y{JMucW|n?_6q^2%BK;zADQv6 zx3!CU6}cB~UJovnfk%>IdF4)y$|9j-u=BF?C7k?I27J38!>JPHUG3(d@`+qbx+O1z z;K*Sag1tUqESk9wlU{kJ?meG}EB{=3qEki-G#b3%+{H>fP{A9uuj`rrgwTc;x*8JO)j#rYV$8 z<-wlz(Pj8gDT7ashX#1+d7tj5++Yk&aFEAO<;};ySexq~n$D}C$v~KA(oJ2$YxL6q z@ZMVN!a>_a1M8%tryxCWMr=a`Psj@yG~um{EF|Z5cs})l>B7mNX7K9CtPn4h%CC;v zV!z^wH+TXIe^=YnKFb+hR@>{f^6ThSXz5r)LA}Z#`4$Z5?(7Q=Q*1h4IvX+iOF7hM&ww=$RYM6@`7Istu-VFiqr0>W~ zd6tfXFZ@f6&Qs=8Jfy)b`w%kPG@zbNGrUKy>||uCv+;cO9qAcJUD_0C*zO=ab$RKk#LIx zd`7oy@$za5Yt9$})Mx6ug+B-fprQxnBv8M~8_;yPeCpHBTqiSKPThOBpn92H#~Vq| z;Q8Leyzw%0=K=b41NcY9Iu;Yyvs-sE}cEL`N>CDHoyGl)`b5!*rfMf6YDW5 zH<&e1papDUJy;tsi9Q}avvs=)@G@ikG+}t2XV&{B5wDZSL&uwh=W+S+ksSktY58wo zZhr4i|7`Q7a4sglLcs``g!uc)`;OtP&!G%;Z+PouR3RZ^kD`nLvbO0EEd;`+o8Nvn zM$69%1F-M!KOf_vMZv?zPjBwzRrTa)TV?ZLa~P+Cy-cPq6o%$_aXOzDBb=<(RFe0M zM0uhgJ`vB7^`lMT88Y7Gy~aa`?1>4=Sbj9)B3UxQTpo8zzwy9+p8<3Z)+*1_cy#5{ zA8jt4Jh!>`i(kz)`QQHT+YaZu+=Oiz566hv4>Ra7Y%sR*s8JEGciB$YGVKspnBT40ngDqbC z{uTkph}XpN{*w;-di0{*2MmaLAiriXZCiD8VdR@;5iX3GWR{$~ILufBJM;;|64BkS zveMB7<5e7q@qGQ2Mw$BYns0bB!i$e;GIgd2e#BnwwS07`N#cHn*6zPmM(O0oOzJjugOpOrc8RWk6f9RP2>_$GWieDB2CN_;LpDW3tHH=H(c=x_#~ z`o~B!JhcEI&NyRF+G1#IgrF_zHy9^oVFdQkg8a_Gu^Bzd&Dwd#K=U#~N|@(T8`qeZ9!o_gBe7@L%xL6W!llP2QAj*vUF zLko*Bhudj^J`64l20ITl1{Ct7Kb}Rs<_3%~^cDl^Y^%=*wG1LW)ynT1`&;R%j zCrrV^3@hT0GvxA;7PtAMj86{4y?OiAw3CYHHv=}KZ!~69qa%z1Dt11^eGcDECkxwS z@de#{7`a~5ZmVY3jNZ#TT^)IfzD-V|?(ouquP0mB)TYN^**jj@C)1aD8@dB+d1;!< ziIx0Gha-P+wJSNn~y%clKZq5A{RoO9v(kL&OMk<;s=Km){7H;NZ->n!|GrZ3{L- z4-a1R%xMqFhh0qcz~Kzf2M!ADV*w_Z(NddmAe^oFcwqrgs0GF#yI*b>+UQhay?*k; zYg4w+Fu5lVF6fOe0BYk}nX?6|{^;8oi+LJD^wDF4aWAPl_hP&k!}&nEaddCGlYX0d zb8s1i=u&+IxW43ddFy+hUP$if)y4N@wf6D4g#meZ>1sIJui$0ukp;GJ_9; zQ-Jjw`Vp}4ny$c8=_lM-dB{Bd`796Y{W);ZXQ7mS!oS)?buwo{i%XX~M3>xj&z1yl ziv$fwHfN*1^WB9rdEEHSSZ(^3bUC9m!ymcO7dnjgL5I?+7smn%R7c{k-AD-bxW(q@ zg_p6z?NOm#Zr}bkjKV3ts}Ef$sZAcJQu-dW)V{{MJbJx;*TO=$lVdg-bydEP zet+g&Q%Vo2lihs6lA-JD+^IbDVCXg;xY+@y4xlui5a#6S2On&{_~OgW?ZPHmwA5x9 zRUdS|4&AEh@^*LVDQC=6C-P8FKNhUEJ#9PMRb7pMP1{j`OA^X&ue1XwgE>{GJpfZ69c8q6V< z0n;Y!LVVpAlWf0!ZMTI!Q}gLhIB8oJFZ4r44&!5EnrOf{|GHgh>#*NieRV`r^7pL1 zPTv(J2M_W!3&cp?d-Wij+ThIP3_W}*Qa?!U$==cAUEgfSftcY}E_X<7?M$dFIK|G! z;C7s4q>kqaSPRE%L0P+>c~gDKc-6N?i(VHlTxxg4z3{I8k47)*_k@R;a{_X3IMlc0 z+CT|+rH#?kja%cDw)8mNB5cyne*ELjk3RcsbL-CC&0qZ2zpam|ZB`#N(5~e}U+q`l zQPrk?vrzLs&x@H@GMZ=%6u}T&cdPRK05&z$IL(G5Xo9?`X8=8#a-}8MsM@26>58y1 zXht^-F5~@L2g%l;91@8EXcI+b>N)gc_3a@%C)~pT2Qeb} zubl9!a^WcL5NF`8e2Ae_Q`V}FYZMq7DQ)Pna2ewu@Zd`0JMepiH!(Vi8KG3O!Lz~S zfG>534ozIH&q{;L>V`Pep@Uhbf$8OXJN0xuD}3<8Otg>>F7auucKORW&((4GolJyI zU0B~`AOmmY$Yb=YqnKv{bBhNYwq>Q(y{_z?@vdw4>W`159Wm~9jG6IyrH4ah&dcDD z>-7TWtLS;Rt;;+Eq>J!h@>%_o!3pb;OyI-N4&JUMg-8E~=Wvm?I_)QyELN$AA27v*y zi_wqd7~i%Z^qIoE-?qus$jYlOz(&s@X7}MXe5Q})b-aj2ws?(f@Ny=v@qyezV6c(C zk8%~ak3JW(d*1h;I*z=M_jJ$>7QHuz+%~D#(T=P(U~gs?kPt{lM=ygMt(WW=5Gfiz z$%?k6)E<(@k%0#7WW36cXR&vPHR<{Rw`YSrk%`s|LVqi_X+`fUfAW~91Z%Q`H-w32 zr6up%oXXwtg+AJqjREVL+?Hai>t=*I>A(w?A3EBnvKB1(bkjg%_}A0nRnHa;O4~rY zQAqDgi?$6obnwGRZQOV2QCi>#UBB}g8hVb->F;T^l?Slp4=D1g9T{D`507cM@^5=)lYI4& zRM7*w65^{g^Fki7BGG{FUcKgLa?CsWXcjjTX;`jsJXNmzQzbGRUq*>&UjQTr(>B2h zcRW?>s>3RO#Wi0rhgf6>KyoQ@moqk+dK1ZFU_QPz@v8se+(+SYaPuGk{4X}Y`R3bV zzniXvIZ`|2j zF1Gdm@JB!1{EMG_lxNwS&CfEB|1baMPc|?9_y4rH@%Y8&YDPbk$8YmAW}v^=VQ;^8 z>dNM1-g>+N&t|9)eqpacxA>g@>Yx8~^TQu}*hKHy=F#m(n-4#}nukuFv5AA&&fk9d z-R8{YLbtrl&`Ig=c-A$SirU&8}l;gYDTY` zJn@<|UUmSVN#w|Mq^w$_!tZ4q=56j^Iq@lZC)tW#i)wNeZSNId_x_X3$@3jtR-J77 zWpvA|R$27R;*|;PfhLIz=0dc+YMU?}#{-?w*2i{GlLgy`dB&dY&@pu)eQmkZ$AIxcNwcXY^-`R$ehXWdGq$2&HWd}u`j3C>*SL6@QD*b$K+{Q*+ML+C)%tn zxOoRg_v?jQ*lW`1@KAbuLMc=>&tenqQ^)cEIv`%>I(!SB3;_oZba-GftVe`Lv}*_2 zW_gJC(F`w)^V4s(aHIFyOJ3i+U59Bg)-p<(U_HxdX#B*ODE1{7bST3noausT|J|E3 z9*)-Xdv9_@w~3ou@guX-9?NGE|KctEm7yV{8l(LF(yAAOu8dGa#eS;dBojz}UoKr2-Zi#LH!fP~LNkCdnlOF2ka>QccEd^hJhI-t|XouWxxm z*4EGf&lo@Hp*OWd`5n534m?nA@`UXDZtXAWr*!cjsQ` zOLS;po{IG8nHEv@GCJQcB$NX(X)<2Tqu=A#%&p(J)2779Mk;2Dd;rEhtJ zbE1D47msG-vTH~vCpZbyVaGraGMJYseBR`ZJ06e8JmVSuF{aNBiPDjMi?0?&(8;0; znSPQ!Pt-=9W+5gX3qd7Zh_(bDUhO-bppWI@K4C%swLqnD6#^unh|lE#d0E0hRNbn;G*vpX^XS=|%^pG! z)jRD&U)-#7_zpCzb13b*AXxwrvex9lCeo(w=Mo;~kMJ;r#Ql+&x zu_=Vnrx_2w9iz8?%oxsC$oWa?Vxi|1xBE~gtlxO#aO(�+Sg5)C8Q$p} zC(VFEo({Ex?qZ7o^tZ4|XEXMz`@@X+#x?5xx_K9RS)}Q9#u>cHExI5q4`FrHE_Dep zIn;$W$_b)&Vn>(-Ewbc#KY5JqtIud5A$-0l2KkSFXd&x)yB*fz9o|1`JR`i9!T5MM zH?FcfMEkQNi}7?VmbDWlU19OS?k5YKXIeP8Rv0MftlTW*$H~TS_uu6qo(?6)+9P`C zV;Eu|xBEfcoc=diA(g&(^C%fnc?-Oc9!EzqeY%B__{X@tw-=t#N9^$Rp)2X>%9?Pr z$&h|9i1G@TMn?5XZ*nFf|7fM`=~qi<4)KQWowtcv{61E@hm`SEM3^Oh{hu)>xz#d9 zuT_r#^q+V3UUJr}uC+&pc<#dGXg~Cc-9dBCOljZbsl9AZsvKV{cXoaZ^x9AN95(C} ze3qe`{(SWG{ybyPnL}(FCmcQ6!cDXz3u4EU54$*wv9uGrgZ|~8{maeOix)S)_{A@B z?D;y`u5V6G$Q^v?(ix|h_u=dusC;wXcH;<9bkLxI;+b#<2ryoQ(WpjgF!GOHm=Q7{ zb&cy^#>t7M=u4V5`!y6X}TUmG2>>?yj~$FKvvlrmP2UDKko@| zPZ=lI1luH3=iTE`Lcy%z?Nv4G+c)e7cZGJQi?|HUOjF!QlhCM*gcCt;nxsKNFtCz_Lyww4|loKU36de?m z)n|+E{Eo0HzXZ|@y|!s~aNGumjSTpy4EO_QGWF1N%?d1KhI2w)YxyTIC0fEXS5tP` zJdFF@vE?6#ZU_0wEk8wuhU2X`^rIlVpYp)M+dlZjfU0iY z4vmM`@j+cDuk>Ex1I!dAeOK{s%K4sV+qCm880w-*vh?vYMm;0xc?enwQjB9frno`g z(NP0SzAX+Z0{h|QSDQ*3tb35*C&pWOr~b;CLZzX+@TqiL`Gwec-Hr?{xU16P6s*Bx zRk@dF3Es9o;7*+(#DuWuKI!0DI=E5PGkjBQpE10v8$J#_E5N7~tog6*CGP#CpTC~L zJz1JOXcnFF3eQ^Fb)Wu>jv1q`wg!V>_kui3-ht7RsawxSF9o|!`TLy!r5se?F!|Td z+@fop_~1e}0PCylx!L7euo+2diIq^f^ab4`OZ1X?i+2PWJn56sP28N44-kcR@{0jP zX6U1y81jz@c+s7&zy0E`e*+UBP~`CoKIC03Jf<{h=Q%lC^lS7nFh)0*owpAdc!?Gh zKee>8s0WrZrtDw?q2Am2F8#D{oZTyR)b3y*&Z(l;eegzin7l2$J!t2FJ+R1!Y^pN# zf=`cXSBn)8Wf<>ad{zh4sG-f@DMkN8Cg{x&9sX#4T^rD?JjoJX_~4r|X28}3x$T2z z3>DMvx}H^R^__M>&P(fg|K5)WdF8=vIG zsHz7J{p$Sx0F%BP@EtjY zf4Gcb!iv1WO`86EFWw&XMccqhK7E~QPxJ*{54-fKssP2wf>+g{zavxXUm5NvLHX9D zx|hNK?RVkWYY+O2oK2;}d*mz_@p6n`wb!A0z}CXa$}{lf1v{GXOn&n0QDV^HOkXuQ zZgBeNzx=XTqPI8y>%aN$Hm@^+eDU3#&4tfC%xmz{=9>(@YInAY!Uq|K?`BlEnK#b) zJOF?H(~ma4&RhI9w;pVM@$dfc&HwzTf7k?q$H$?e~>2{&*bZ0-QOH*vhhjr$hGmeO+pxucw79B#xr(IhUt z!GM<`#6+8{m=umT8adP+W77VNU?y6azmrvyty5<&R>x)3(QF+e%IIP8_V{UKMJE$; z+o$2hFm2MLZnobuNT1Az&k*`N92~T1vQ|MEgBU{+a`4klv$ZyQ-@Tu=RgKeO9;~|dj8T(Ey|PTG zrXR2F3^qJ=rwhO>kWb-WAh6uHwP%{g)X^JFuJI?fOr&(O<4 z&KQB?h5C-sBAUF*06#{WXnyec!SQIprJax{;o!hbvIoB3F{pQ~A7H@McEJ4ln;X+EujQpp-|?1Y z$YW$b7C#TY=b#`=L8I34PdxbO;p~AiyjNEqkU|5T%QJj*R7RlpwQnE#11;$V9-oZw zcD7VKxu=FVm$g!W+;DG zpUs<+PO~G$&Kqs;NPLC^9c01db^7#NeZ;5fbZv!U`do{u=ZouWOaJ3%uZr3HOrJ?*eM!G}kx zojcwgjWT`hs-RrZH@#qT1 z%r%R>j3@dW`jRZpLJyr*13c7@2RXI1G4=;spDI6@0~2q-U@&55aIc)B48PIn__0&T zv3Y{7LUZWer7>9 zfuU=`@&c`R;EGMogWa`|Lk^lnv!faHcmf++963@bmN!pZkj-N~7z~bvU2oGb;L-&a zpmrX&LZloxS(^CFkZ5t+c;tKjL`ffXW^gxVpi9>K+8o`~SbBvNs{MuUEME1y47~BY z6lkbVv53b@THj^y3V-M#{nG3@NM0B+U$(gaD0!ua6V2%hp4Enh{VD4hJ{isJ0yx@& z3cOyuD+JeryYWnDw-%C&O&&j-zW&;^PYf@rce*fuPm0q_0vUkmBn!=NUN;7cUgF7H z40Vt;W8%ccj&5jRVf1*k((Z*5Svs{MtDD^kW4nqcyta=P0?T{jD!2-dqn!(vq0Ol~ znbbEhY)^k5QVi$DF48m_n*N_Y6AG#1(ZnKA@?APG(BAl@!si#=8D<%}wVO{qDNc0? z;CI(=Y;NDVy}6Y4_Jlzx>-O4^n4&DY=LZGLfc{rS_{Q+lI(v>kmxpJ>-N z8xLCuJJQZYy7Q}VzRCgO-q7jek3ZR5yYj*2)i>8i=dn{+SbLaf`oV+7VztF6Zvo+C zeA9OE%kCEo?sWEd-+jIL=KAN;XQ;46TJmBHiWgBRd=iQnN)D&rPEPzoqoKzEGQl~Z zVS%>1zlFW}_U9R>8Jv^oo~5D8PbwE({h5$5@nfI`Qy*rruaQB0V|u>4c#8k88V62W zNaxLhTw%eqFHQ=xONDNMU;pMm`0@`<`tB9eT{~T$-d7gz4DHhw#NVM$x&jgufK~qXPp*Ev=pTZb}8^}Wx<@H>aZFx24@*tRUVj}zy46JYjQy#d7PU+xU z#pkAygX`Vk(11mw3W1(@wjN8rl${5qpLz!eP2>mDuliNqGDyg?V2r_kDjO8_m~rtC zqxAhIJo~rrp*xgJnGoE;SGbhYOy@O)1VoO`G$ zCs<(a3YyZwH#$HSEg&;SBXylywD#Uh*ZuT|aml@F>E;j3FM%==?BV}ZZZfTMQW6HA zo{SvK)sm}`u>e7DP0HXLEb<5R{9E}6*_HQLNMaZf{T}Z1_it!k5gC=`}GO&k^SNCx`qS!*+MIgdXR&KpFA_!XE};D zvl}2#b9MovYdS_A>4s)quXc?NJ?`(o+8WEghYz1VV@M|#KJi^9Ed=ie4>GKb`BVM94hoI1$|nDWtR$4_N|y!}O+X#=agh3>*zn+wDKEsZ?`8#$xTC`-GM&=*bmhp@9d zg&v;5;}f1~bof}hZl^p0Hu{)ukw=Cx>PU+&@Yd#~>Av24l>x45DsyB6bv&Dg+h2vO zbgRr=UJvVs8az${1C{x}KHPM!KJ*iM8HDtFXh=tknGK%?sbj=XeplUtTk$Hl)AirO zikE0Q^&a@$>&Hf)!$QVB+N}DhhYuXQUBWiiFKWBW z-lw*sxPl3;I?uf~UDD5$Gj#B)9wP(N&Ho<3m|U#~RM23!8dNv6q>>LjFqIJw3n#E$ z_q_5IZ7`?bU$VaPEjEBE0Z4uNnONdos-_3?=6k%76@Z4X5=+!?b(&hKFT}drX0QBY zaf|o7ns%^YFW^xFC~_*Rw(cm7hiQSpir-;Dw7SjHkvguy|ZpSb7I)Z~l-#$s&4tj+zrydULpdH#dTNym$fZ#jp}!r{Z>&YMU%>};g4dh0GGzH|J$89y1t8QvcE<&kItc_Dh- z%ZUE+RWvQ*82ru%@v+I9l$YYtGjI|6Q4IL+Zr|&W(;FFNcrzE5wF9wC1R1**zgob^ zqwEh_nE9Xn<)7wF^kVaG|BpX!!J&nWywe#P7$6xT7}XA-^;QkfgeQ4t1bx*+iJbDF zpY6*%AH#Lb=Uq6H^6I1~)gDaT#yA{pP3)D!NXaW}9Sok~BYfAvj*R0~xO!@E>Zh)5 z86=Do8KBVoV8+{LjQ&S1H6d=Iz2&WtIB4x4K6z!)>@?hK11n%113JDYqYg6Ju zzkXY}{*k|onnnj`lFeYehZzOL5zVcu9T+D=3$AJH`Xo4(abzBy?EK(8{5H=tF|1EU zd;0xchti>m*s(nKcnIn<3|IZ>gwqL@UY94OG|p6D+~EPuFhLFp!IKQ#4&YQK+8xVN{#bedObe|R zt8OK)=*SQ%%mRH#7kK{Nod=^cv`=+ETc2<`dSyqg&r!8=}BVG(ZU3C)X|a)qmVSjCXgtf%#TbbuWbt_6 zM$km8WX^O)Pw|9LCc+2J=L^GRaA=pVWdxeA92H;ziu?(g(K1@;l0FIb{R(v%uVxJ2 z(3|1Vf|L4cGY2wukL{)8y)tth!w>`hfzmv$ZzaEWZs|+p*9K$q^zUcl3EDFd(t%@C z#fNBi_s#vL4JQ1pA zEnXeV5J|qLO5w{8meh{!Tyx779WsQSXjek$PrP)8-yTeVntMnC7kq)IeuokAdf_yF z|Hq$=ewcO~pUAOuQ{L~Po4#KgN!JI5flVk4p`V0eqN9e;=oU`;2(s>DoTHyHHeu90 zR{tfxj?b8oP6am&9*DmEo3>OM21EvIi$sB(=lUG-_&&K6CgW~>@OOO+d@~1l>PHx^ zpFLfM>U9Rmq1sfkpALCeKXB)M@w9VtuxP*=m|j_7o*a6NrYk%K1G2io!5B)YrzaWH z8HyQV=}pF7UetRIyJUDQolqYq8vo1=huYxi?xFuO{yXFrOnta>V9?GY;)HOkk3|;? zZVbYvB}enR)<@9A_{y-pKYIw{wlM%b_&l%gml-b|w60Cz%fWO${Wo!^(Y113L^pc< zaDBz}wdq}6j`zWYbQSWhdU9yy}e^?F=#o;N5Qrz`OUIw~>z0kBeEK z$Tq&!|Lf~p{EY6`u6>sNDGXr?gZkf>(P74gWHmX4FL|<{VXmkR9qbGeizV+`d>Z|Z zPJoAlvfmeO#R3FeYdDpg9CSHj7m4VSXgTNJbPc|79=WzqOpj}CNq*^8-(oDPMbJHP z1cM=5x$?_1-_lGdo5{DvNP~y`!31X(&9ZqICm1u^#qqLo8Q{eex091|h_-y;;??x( znQ?MCT-_&@|BEUU9*1UUVFvZu%=N;4-DpvResC6y_Bo4?Rl&k0nL|s5?7nI&qMaDO z9c|ulT5R}ah-_Bzs#&MHuPvJG6HDRpa%o^Ler$s}2&ex0HK^*Fx5!#CN~n8BJr0a* zx)PRVR7{sOx$+?DTqE!*cFG(hh_ZDS0|!cru|xg{_$piJh z5d>WFJ>?<7`Yc|e6PV>nv5bLb>e>D9WLvl7VT@osTl6oFGQxwbt!lW2N53`PB@1K( ze!cPsa&V=7N~_aXjylcnA?Dl1XD zS$TH2hJ$t)T@CgItw9iO%0l0M{GvHy4B$TfE(6`9=-MwGFfU6BMmK{mSn_n=Kw#t| z{?Gj?VWpL*vIf?`bHA$x7~#-&H}TmiXmuFb9J(lVOWPqt`L<=KDDxw}lI6(UB|+4l2FXPwK-!*CTaUWqRg+_wzhh(4A+1 zd%wNe{wfDjL;KM!-8SG|GLNRxSAKK`r=-DD!uHj8z;`d5j~D7xStG~Ma5~^#Ed5x5 zku5`S@~J$s7NWh18`V@^WkcQ_TB2e1@qc80>b&SbaJMpS+qW3xyZ%D~ix-t2EdMMr z4UHJwwaY=fzgxWVUp^nXMt=@2UHdQ3$hj=~6)@5q+E3SOaa2MKk9K}#?Bm*DakC?9 z`lM~YG=ko{Y1b?5WI)FzJu_hn2h`a%>tPMLxR|g^y(fL+i8TEPnwGwL)k#l(Gsbdd z`3?=~P1lu%|E{&Qp+VPu=I{L3w&_XJ12U*@0ju=*yb}Yk=7V2=t4Ck|&_LGd=Vr)+ z*Q=zRzE&#ZF}WI;b6uM8CQ@_{lAC=d^T{KW=1(+`Z!*cK%=xWO79S_Odr*+1-TF@o5H{WV`1A}b)COc1Y8$wCrR0qPcFdsuCIQQtiZ$bXt~Pk z(e^Wo)v}f5$k7vTsm`P^Mh-rH)oFQn*)b< zi+y(ae4f2+w-4^qCJ5&m+@HxPaVF#H-C~+w$WVOgoVd0b%QMcs4*#4=o4~hiJEIWK zvm@n)FJs&2%`joig!SnrfG>)5dFE6hPI`9t(X+g2UvIwafVL-(Y^yf$c(*xo<@^k8 zfAlZ^Y&<$W> zp0~ZTi4I+}m*MV6`L1=yC=8__)b6&t?-Vp5$bP2k2fCqcerr zIng4;Ou{mTUCv;7+GHhKF!(aCGYCGcJc}kK%|anC5SW08S*5C7v2&TszYkAAO7WnM&MM9)jJi49{Kd0vYS@hka9 zZ!J^Vq?+opAi-e#H2lOrW`NU&@KiEE^HC;GxRJeJnv8qTH~>Ca(~dNy;Pd|NGT5VW z*6Z#k#6pugaM8erQNg5GgXV3|6IQHfxSvhNg{ZJ7bT^*8$QU#-S6fk{L(}d(Xi=l~ zbKZet@$(VmNqvZ|*N@@_xn>Y~P~9B9s4liOGwfgQ5Z%B1+g}Cu)p(Dey7<8u$QiEZ zK;`Pf({^_l-yDt>UU0B8;Ye-zc!m*jJvE)ahn+x7T4VlNMYMj^&Wz_7x|GepXE*TS zV`7vyX~*yAKL>_J7e>F(&6touKYrb-U4QY#^~xac!on19JL3UE21A;0>dfJ`G=~e? zEx6$s3=0X=uEXZewndt8aCAxqG0reZ+V0K(NOpH&Io3FI?Uv9^=mll-lB?Lk36D9Z zP``K%7EfkI3w&jmwYA>lnl9NAYs!;`LDoe7Rr*e>aa;ApK_;VOD4W2WoHGQ0%hUIG z#u*ZDBqPPC+ThNc&;h&J8za<&jzCC~(vR>jLPyO4NAzJx8)JR+(cXq;;Wu)LF2gHa zjh5m0JmcNW392RC?3s5AlnjBxgXlK~!k#NvTW0{r_cEl{Gt_i_g5e@L;648Btvj2W zeR*-C-P4Rn*9z4@-+XsV$c2on?OwRnE~4{!ij%vC>C<0)em!|kN8G(P?PK}YEWB&5VS!IRi+7%j>;B9lOmgDGlNqnpSe8^p z&CyxwTH75Qy{LV^iC=5P&~4>u@4^|Fo9XuPhNrTa`szv69kgj-(*hCMd|51N#w+w- zq+gsbPqrFz3is*%dgI=Qm$GX*Qz$X@I8&czr;&qq&lIvr9qAi8SDe9dzQt0;$$hN? zi{bpdFcIpa&d*;wDJJx#B&Qicv?Vuc?7&a*!5f^>YK*$m&)xX zaA(T)j12YDPMvQzx4!;rbK`fv-F*Gqzu$cEo4;=-+{4=93Xuc8*zNd&M*1;w#&GYD zP^i)QcN*WkY7v;M9!pm%!y?tf&+f6en~PU2x3F|3nR>ms{oT#Vc`*6OokMlW7vu5e zOIOlQFYB)}#6}wjb=uK!IHM|gcvLu#M-N+Y>bcLQE0=OkI9)&A!K?Aq81ZzD666Ry zEXdepz{tz( zVv*ekuk7sm^anp2gSRkDXs)l)=3;3E_WHZ>l0BS5mo?vR0j+1}+Oc4KMz51+azw`T zyFdNWkIJ_U$qyP=-0kdvv+edfRomuqPdD9fr<*?Zd^9^89|;z@@7SzyzqlEoE@P)}(tv0PirTuSFuv`CQt)K3mBNDD+zCO*+go3L>GHJ}GdLM!s}6`V=Zp*b zt1_Rr3t+FE64Gj4ddMf~7HQ?N+fN&wg{m0`Sx9!)gRxh7qH-MmiRaqJeuo*^iZk2h zd5a5m0(%C-?c~1}Ty6Sx;L!WNp6}0y&ZzFB(*2ABDXb`5nh0D$h(^H26O(snLS8r0 zvy#mF6l4Yjp4PFZYEYQrfx@)@Oc=W!V@dg7KQIvnTSLJ;`T4~n3TL$$p*wB*l z_e{rMrv3&a90|yL6ft%8Od7~`DKDJnX8o+P;E=GX(4++*HkE!1E0Zq(=%8$MNeI?Q zqkx0_%Ig*_BvaQKk}qD!G5O&#_4Wfda6Rk4cinw|c%Zw|%*1-~tg;4A8ESYHBvaSw zRvo8vSpy?99--~cfZoxln=y32dzD^_;0!KO;+_BgY;pH`p7pv9dJWAczj}~0=yaV2 zEnJ3YQz`G|uWtSNrWLMuQPo4Wrt);g()BS=620rgnzym%~ zuK~)`22b&^MxG{`H1m8EO}FJAqkrv!3@^TwVhm_f`u1-NcVK;wL>D9fz1UN7V$hjO<-!BN*~c&n2dI8*-rN7a4xT%II}eLm&Aw_WYKXLfp) zTr*3G#8M>WNJtPMLf=52jkmg>pc?@cK?std6-9E*aCXPHEz?z9-uo*3p2+j|EKk*W z&);N5Mn*(NW=2Lvl8?U8Z;Y7Q!x&G?J+e?%a)|e6BTx7ZCL_DO>PAU((fb)bpbtKc zFnfm<+f-W^J9J&S7hR@}VdJE!T)*?FJ}J$04-TcPtcAOJhCqhy7z+ygR?sL1dk3G{NsHCa)Shem%+2&WjF*Ym>xr+>)8dA!CAeh zeSyUS$unipERVWQebIPfxGDSqyqbj#4in;pJ`@nTdZ~fxVK+HLYYTr0pM{gQRZZE0 z48HD^Gia5pXJ~1VJ8}wXGDI(RsqFbx0KQb%l(pLBk~z;-%Edbgwt7MO^}lC*;4ytn z?N`MId4g9z6>i!Xyr<8abYS=0$^>-WX;;It0ge{IDBY5KdCz|5a*yPk#F1|L&)gM#snAD;9GKI&Gny3{ z{pB)7cVC^QTY6R=X}8~A!(oa6Yv7@m54li3<*qcdATF_VgI~$|RoCfvTn)Nj4v>Cb zE3hB;v|NYaOzx&>)29_BBnsl8xoo9CAJcbSfeS`3CGIYS~;|^)NkO9yj*o2*d z!FKzD4OAI#ALfzw5fAT-G3dhR!Hea7hR?%I#>HmF#^e3kR(~z%==LoLA`3Ki=^KCzpPCy)v>W+rr4}JhYxRnIxv?Gn|R} z`Q_Cc#l-*3=JVp*lNlcQ_~8)91IY&YV+h`v z>N&cvJGx$e&&jo2KCjWlZib%YEu-Pg8^Vn&4?m+B>7Xl( zE7U0vJ%V9+GM+wcamFG580xBD7p_1El*jSZL6SVwwUO@)?v=@7);P+-vru!{szfsS;jME_VO}ye?Sl2opPJ?%erq(Jk1*o;+_CkFx`Y z|5I0?%3egXHC7RmpJ5qoX3@TJo$&)*KK)m6BF%(`NnT$zR(X`%;03-})H-qY)aGOh zDo5)JcgOQFq*fLYG!BrKbMx z4jr;aIOtsUr(1se%~v@T2mzPVMEpsjW+#cU&f(qui5v>l5xvLwwm^R%tor8`&?6>~9 z^fTt^m)tU@D}$aSJK(Iver>6~H2$m}U_b}H`|>ISJ;=h6q2Nnj8_<6AG1Jc&Z%F7< z-f=pHNcHP~W9}9*ZhaR$9`#*^e;#d7j(qcQr`M*QWf`5Bj64>%J^nDNGklu^2tTOZ z8~;O0SR?IZ#)REFWYi*(ws0UG%|W;EcMSBki7ea(XG5xmF6y{~JMHYf5yvP>@0J3O zOjDU8d6>2)foq^M1Yl4>tOkw1ERQBc0V_B?W4u}hy6%DYnLI1apfEqX^X6W$77Hc(^D_nm&w6 zTUy2URo==}9=L4j6WmdI3?^U>8uBJA1FGxrM#tKIWkug!jN(;JnWor&SLlZ&gyo1W z2GejMEM5DBO!=wqL8{E|236p~%aifi2)7;M9l;(Wod!}0VCOfwD(m~tP9=Ie)Sra@ zY||$fE6(~n*gg&ButB=;-y*YqgHOI-)nG$mb?d7>!P;p|YXDn`@=m&7kstUCyp?wu zFu{m-V_+DfmUh)bS>xRqojh?Co_dV200YW71|lyN(a*}erNb~I9_{ddm>;E*mC0K< zj3YaP&=P_Wuh6x^;?v+f`L=vQlSvmQd*HX~Hbz|K^}&k}o2!wnE=L*7^I(%-AJ6?M z2V?Mf%Dbby)ENw~qtCRzq03NWK&6j%7N9P`#2eQVuDT8X2ba?IE-2+m-goLF9UAVGJ9$gg{m?_X zJ_B!2J3>!+6nG_Vt4XHtQu@U^dC1Iw?&-*If5*@WE+g-h5|qkausm1q=mduy{sTJ+ z`mGG9*WcQIWi&GD`3}6EsYiHBJqM?i1D$k&3z(w1CR5Uc|CBk9cj%(vExhWzbO4%1 z@7mDli|_FQB?AM4x)SgN{^2Hx)IK96^w!`9m+C7A+T$h+(kW4P!vFbv9Eg2{w98Id_gv;=)3-t!5`@wBwsoRpSz9&S6 zU-WX%$~?b#1U``d$S2)^lA^CNO7}h7SK&Y};fM0n5$kE*7k%!_{M37ee8~AfUqYYD-_pC`-M-?tbc2nw=CQRAnDM zTVZ1?M@xXf>W_TuS4MbrudMm_g?DXQ{(0Q}Hr=9mN#FrY38w!?O9c#RE)kv%$oRPFKb%>$80Ol zUR~Poy$%)?&+>e6(`O5I6CV@FZ+`vT*8cbB<$AmcW3hDGmU`~|nbASwVeV^EZfo%2 zw&?PtVzhjIFQeA(^z*(vnDQ#y6HOjJFH}Xim~1_5(&AvQk3Y&Fkyw~Oo5(+HV$OJT ztce+qH3R=Wdo#*r#CVu7iO1Uh(lb6VYB2H$0dYSA7SXzL?M9Qj4sJ@S9K=PQd3s$q zf1xU7oQWp*#0ylI0y_eZgtG+>+j6yOlYU!Z8Jd)XwnDQ^D=Uxq(k5hd01qH#PeLc|2NU{HE4T zA}Ov&XL|9J)|zDiqU+3bNTws&F_8sLG^sI`6ybRO#a7fIa(b^4;_kr z$U<(WztOJ;ixCK9`i|jYMnR?D-5hYd=OO?e%E;q$7OR5sD6hP)zPd7`;1##89Vw~R zfo+m(=R@<>czQAq!;CmW?Igd9<@llwXA8Y_c)PoV2jd)$d${l%m36R5Ef3$Je;8;} zK1+V`!mj@0;r$OkoXPKtgBe6JMlvLf7qN;YHx`EgS!2d`n_7^B_En1vUzrtR1{vF~{YYevsf4G!?6Pqe9-c-I!m0V9&SGGJSb2bbsLp>Suw zc^Hk(7t0oo7EE|gzPNiQ*=T_xW2?nR=K=iTkAE}^l=x|TKbT2wJxni%wS6dK2&3Vt41aV1-VHAyTYpJb7(*Vm zs7ps4Nw?8W4CuErERuVMyC)g(p0$X0u?0EX`n_lTW#D|Cp#@#hjNuQ>EqcJ?#q;z~ z@Njw3)Lx#}2Wly^@X{5W3<2npm(d;%4#dA#8Q&hoo1-UNV2p;ZibKsXHDg!QG-im_ z7820fyl{D+hD+}z>{9bZcrhqyr}Ua}KK;j&cyAv1__d6NEuv&x-&-7e;l+d}@th&v z7)2ZBdCbFdb{!=bJY0EyN@s^aRi8yP3*6d2qw<-v7oz3O%5BzAKTWPGGCqV;^%CEn z%C@h$3q2I5@&4Ywzc!Sd z39YfK!dGJLm_=~nTp#egTG zW2n<6tM|z>=Qdyc@|UxyD~#6DJb#}RPVQ-PD}0JJYNaL&{7bfCq&I-6;CaJI-ze_2p+eoI5XDNBeFa(TFMq_Uq^$35{@ zdj(JXc-?~7opzPInK4!QO24aqOo+6l>pHXK`Qsco+TE0VzA1$0^kD(CSau_a4P!53 zTzp_$)p(3$Zf6W9TNmEBIK8)p7dzwZ7Q>@&zW%zt@^s_NciI_uZ$bw>kAKRY812!{ zZXi2Kj1}ydu-JF21-nDZ4Z7dRoBl#x<7dyDjitsC>4O%tc)-hdAI|B)H_2s4jy&ls zEpTWz;nb zdi-p4X~F$b-r?G2<=cH2Q-&Tx$7E&*7ETu9;CS-%+446Qt3NVE9C*Dj0-}>jH6M#d z7LMMAe?<-$_hXE&7|%vOPnl~THR`H&UTSlbslR*kmZx7F@Jz0G%0JuFIJmxIXp!8E z-Yb&@3;pZ;c&6{N&|?>kMUUs{9b+%;!cK-KFCWFbc!hQmV0L&FhQ{K8cU;23D?#AjltqO?0HLwGS&)nsbZa~12vU$&PWW5(`0$xK8Di1!Q! zWAy9(O%vZ`v__2x4AlJgxA(|CpD{`|D5@MR7|+k`z@lMw4UB>_P^z3dcep{dhjYCU zQm=JCLK9w?J4v>wewTjBQMe2pCP%3>zf3$nVEYd!;SAg=oW&t!y=K{`k)zd9>(WI*z+#|Yq0 z@8&`2pfG$#GBPFqWZa?QqWI8Y@Qqe}2P5ehO;G>F{JQw0YT^OYp+O~M1XDx6r^lZ_73NBB{49G_=C_wP# zN1zG-UkA+VeCUvT_j>)ohww-q#H*fg8Tb<}(-yY$TZdGFXTZC_4nE2MYBLL$DGTVz zR@?dbf%Du)M;ff;Tf%jLwhJ|FdQ$cq^}6nxuJCLO%F@YRrBmx&z|#ZY=;c#gRb4w7 zdb{kit`R&wMc-x2@tj;NIz)hZJ=s&lK65?zR!09lA)j>4@L_Q7zBCl~e6-Ik4PeT^ zw2a=>b<2~5yE1wveOCrptFAlsNs@F3fCn3v!)MpS8#1xzqufQ8g~PO)5wNX1yM_j& zfseKZ3vRz}TRk_p(B9CCR}dkE(~x;&GFh7Y26VHau<8mTG>3E5XDyt8Pp*4D3qWhY zI&}|5iT&@BTv_;x{%eu0JcCF1r)^0S9wEDMlCeC=#_lFfWPlC_w~;|<;S!8GH?sRi z+tKH!1lnl}!*^9x*K(kb&|vQ1KNUt>&^+$F`jh#|t1WEv4vbQvRs5X^rrj%LMiXtt zf+G&<`xssamGZfmMJMgg>vA_lnYvA}lS|IkPCOfV0k@|l!rQ^OGPfk&0MXb>QaNl{p#jlttJyUMgpK`$0vbOSxK7efdwrQh1*kUbL@6ic;B<&64 zCwfOj0Xx*+l!eIx*mi`Jv-+q)gI=+5U-rbF5 z;D`d?F64UNyCv7*1t)k^$oG6v_TaCqG7gb<@GDQxrtL`=J;!KIKBL)!haNjP1zwtw zJ+xotb)ndWvodzdgK=^^rH#&<%7;R=n5=7%{U3k!=bL}=zx=DsKltniZIv%X$>p1s z(^%`kiOsLRxwSd@d*?Rqyw?`To6k01{Pdg6hwrzAG*9mb&u=x!571K=YWiOvLj z-@#(jHlW|zB$_ejL;%gfA~FnkWnpzhq#R=AYO~yWxK1M5(c#}7Yv! z#(ni4+Aw1eBa9FU^5KD);Q!el{;-Ks-mA$wzCvuq2E9j5`DT(_?O(r6vzO82%j9qR z@Z?2nG!dJAoXpe^$+2{dG!{J`Wqe|g|Mb1{o10g@YSQPhF5C4T4tB8Z*k>~mwD`f; z5N&3cO#&n@?_vu*wzZ?HCJ%p;`%7nfmp9(y`1>%!@a1IfNwUpQdiMO$;sT$Vw!x@m z^398US00A8|0jS=+>6P~*c3bh|Fp^Zsp5U|0;T^gd>qVJy<O2%h!2a zpNN>@HL&m{THwtXOSGTxH>qVPQ#T%$cnxnj;(<^h;~fW3FsAR-kh|vH_pEDjK>mLh zcXfK!&HiL@yx$tL^Twah71fy`Y<=Q$b+MCT&3!6!a9W4c9dD6p7T1j@sxLWnh#R?Q z>}2$Fo&o&6`s%AZY{m37F^@KRXCAIT`2L^}PrTwk`@tt;(BEnshzs`v-5b-s_$RKkha)0T)_crgmb7|;wAj2ikd&XfQYc8BSKZ_GzXV`k4UgSNfk2{`m9gi7G z7<(Der>)j#@P-~cRU2XqWJKO`hyig|i*fhIpg0E>N5@dj_gnWBa@|_IlAY)1XIY8-G%8I zeV5^v9fdqtoY8i8@!I{tu&v$GV`xCfKJ4(m7uTL|&eyJwpXk8M@ZiO77lB0);TRa6 z#PZcQ?%sJ_3p zE?m_8hlNim{LFYL2Z1N;{(TQK-d>mmlZPFsE@U13N5*H7yMBLM|WDFKDYV7r=M>A=C6MiEiEP{$2mOA0j4noeJz@N z*W&KE4l+EF;q^`jgyIJ}(=qBsZ;9Xi1nn|TA9^O-+@AX96UlBndeR#PsVihhpmBYV7-@#hcLs$*=i`_OD;F@WIggs>Muwpxt|~3tPA3d&I6VX(yv?bcoj$cMhgQ zA2vpYggn|b+8XEiz%4a3W*@YxHHTk2@#zW*`c;hV)S8x4;e$x2QxP~kb%hXJ(K^*S&!v@rn__%P3ok}ttr=@XZM)2(NRX!Gnm^B?foDE&@=T53uFeDloz=B_ zO2LP~tqHMG5)=mg;YrUWQ0MS>w+tnf4Gvlo1MtvB9TOPO`!ji`qXeVR!0NAZhX>UG zslmjrsq@eVOth{e9^4poR(<@1Ab66 z49{em@&*-+;`y$4LcRxwXfMz5+FbP<{wfN7$@Q)}iX|UG89Fajv}hm9{viii4u(@+ zw+wK25MEePwg z$4eh-@6iOVE!+r!#mjn(*wGhPCY*^8K}qIMY2UuJO?!DX86;1$*LL**lSkt2$x1vF zQfu*?E~v~Q>Y~lyfi&bTtdOv-x!>};nZZiA>)09O1K4l0Q>YG48a+0Hc(7OB(4YV& zgDX^)P8xXOJN!Y5ZMz&%DcPn_U>kKC{k_c}B%wj3ZA1ro*CNHXozv-4cQuDz@6ikV zS%lr^qy?PHv| zz<>d0FfT!QqV@FsSlegy<>X`0KwTH4dC+Ba@@NVOFj^>Qr(Q#AIaXbVvh+SA`d$4; zmvAr^17gdtsRL+=iZ5?U&sg8aGLIX=4~r3<&$1C^*#U zVMZT}JUiaxcQR2@nzmihcfIvx_vW4Lm?o?bR7 zaX_3l=D;3~B$c7<$n6fZi|)G`+~2$NaC1}$nv4esVhcA9C~cTQoaZ>6Jh|Utm`$4S z?o9>?hi94)Pz$&xy@}DG79YO+`r1swO*%}Nw0OAV>8j-lM-^+*#p6Us+T;(qy? zFDG`numKZexOTwEsZ0mc9SaYiqZul#aZ%|G@e`1|l+t5@MP9Kq|Z=Ww%r$MdsE z>cJ-aAAfkMHkUD(fvGxQxqhQXr}y#(KRCM;@LJn8IaUX;xlh%mz~W)go7tpK+3#LD zmz*Bk{QB$fHeY;uyEdk;-qkLO8=F7*$@wu5?9aGJu2u|VUcZNmRsQYfOW^JJTx%*?ks|Eo2~fi1w`b%yf^tV;WYkZh{h{*VlWiK zhzG3@Nc0W^+j!kq7lsXVew_jDMH5us;JY#$%|T+ytPZnKo@~$9E7~wtzUucuGHgOy zqA}Q+AMpZ%!+3xJ6j2HIW-FCFl0goTZ8~k7#$*@?mF>+0anT9L3Iso%%I+1aeJ|kSG zG6FNkKg%ddrgQshS3XW zkJaA9cjpEDwgc{N6*lQ&@%8U!K#z9euO;ySNAuhFge_R1fsVJZ^I?mEc36COvrs%4 zjSgvF{l9as1;h6~977nL$MAFW_T_2gjPrsu-996G$eg-HjPF587ZjSSD?V)}v+ zbap3&FTJwn4Gi;SmC-L_cQhntR9q;?h$3gu?^ z(8I5;He?sl=Gelm^5`A9APFulPv;DlWfmsGn1*igeijOPuYFX==*!U$m1p7WAi90-3USi0=AF{fb!IhYs2um5ysSEgC!Q8(a^h;Z#pxhiBaBOw`@ zyzH5F_ayq-kvLTj$8jVn*^CeHp=)&6L&=l~kF+c8%)}rGnl#3$oKEOfpS8VECX;@q>wkosP+#OY44#VyRzmkx)0(Y+8Y zPsOvZZf}w+&(Kh)l^G|{4|GcM@~quX;+nhmX%})TtnV_Iw7RmV!SaMyAt<5keT5~e zdHH34*?EtUW9=*%i!QeRsj!Q zlXvA=D>;*6G7dhKJ5I&TtY=#}eAH1r1|RABxSxulZw&{I+xi!rz66Ul3y%e3j0yut zezcKp=i2YKF7hTPP@QMPn+glJMPU>e#`V55>eqL8t$|K`u_Jl_m@-kE)wz_|)JOem?6RTK0^h z)2_PiVtseyqvr-)(@rNJkEsfozj$08Fn#fZK+k*f1q%bAZ5?F8zltzv19VE8{FMQX zt_S;Y5~gUCRlfML(ss=ttZv}-8Tr~O>&ZU`b`#Zb>3SuD*A%(x5>CS>g_41(Z)xB# zDZ{%026kzt27`cneFN%t^xDW6T9gXjaI4-1+4BwGF;JtmcVw*VBL4L?*ht@wERY|a zrgv)q$q=prbXCCmr9XT2rp_5&z4IMdo^?HVbg^(&Z+X?tlLj*0(OcR*-5x_2hyVmFK%?*CgL~guNF}eUOD)B!{~!$+eb5-KOFQ^O1>VsFME|4Tgc$0* znW%E~6C=md9>c+coAwZ`h6B_7)Cr&bEIOf4Baq4rN|$tma+heAwl>gHueN#RU+*Vm zM)}l`5g#6KWgIj4oCyiA)4qCH)&XSTj&?q!n};LEF2gJMC_kT)&4RI8-;sswjdvan zEOeA(YP+7hQvifOd%v&A*e{tSh<@jJd8R$Otrp0my6D}{{3+>rxTV3C9t#cxdQJz+ zGerdltphYUBtZ*}m|Xen&;zg42mW58y^Fao|HwFm`T#!fgIbbigA|Ig{YO_|;dN zpLc+!Nf)CW?{5R@7kRWE%_AHx>^;0^d71Jw7H9Wh6Z}U_xEOI5`|dVjbGRE%W5(+9 zXF3S1!LR|f*rbO~o^4Bd6V;3yj@t=v+k$!O?8;NRiC&YhS4|e*h|kKXRI_pDB9PKNwt>XKe@YveRD`tOrQSVs>I3Y}q=i&VD!QIV` z%U7F7)+07{z5Ml!4A5T(KjU1+EBfnx2Wj&3W(d`aPc*5$pU2C-2Ky#BCTzxOFB`Or zllUM55~JI}Jf5Dnc<}K4qnRY5(ei-qfX2Mt85j>_$a(M}FWbDcc#oR|nE0umX2FEO|}f}ac5n;FaZH<@Ro0h5798$e|fSjG`yN*FawhL7aI{yP26Gm&9Xy-nU28PIWb zaOE=I!}(!ye&xoUCa~q1wDe3(#rV7nAzr{HC&QoU&)~rbCjpP~M5^@0hPC_hko$qL z{q05c%jnYmvFM3ri~bJJYuh;kTYb*{j2UN6E{`{W7-DP$Q+sV~u4qeSd@{6mtHrEPiVS7FuF>!vYBNfV!f!DW_JMzsVjecjKvFKnz zFWiaxNc*A*Ji6*{=3wi1px(T6$-=Ap&NCUy(AlDb-=Spf%!FDQ%0+kE=)cKp+W7?J z4E~QYL^13NZE>#!DBkysGuq=a-i1dpKsTc$(Sn=)grU*139k@*3nC`HZ8JmxHfF_xB*fJvo*|uOw=KSKp`|rOqI>O<=WJMWt z#ie)O+kE`-hvg{*Rs3K8VF=Wx-AiWZBi`I(g)v%KgCG6qv&{$fS3)9?N%K&~bM+L) zhJlYk{A@CM@!~sE$=Fbzav($5?Luul$wT)1v~9f6?&v=ut@J~9sQuH2htts()r3Id zIm#P(XjX#|%U_5I-l_Unx_-XF7#Zvroar08o;gNs$03IY(p}>I>*B~GI_s;4zC9#2 zD#SyT&fb-g9N2k0x`uSe3q_TmeETr0X%qT6UhlJ8Q`^Y%6u!J*jYsySX906yt%D2i z@1$CEF#nReFi>i3o`qYrxdXp3T(>$_%w zD{?FyP`_LIlx~F{l85(qp?esa!_i{!;f$q&uXh=n_w8TdWfre@7Y687-y0n~TfFP! zXT~hG-;f;x8&B&yg`u*8%X}b`PiPOjyX^creiHoJQ!n&QcyZ# z?|ReOC)cNMWQ-;|BU@cxdiPTGC9{RgDHI94A*>MDH9yi`?zadZh~X9ec=z7TJgBd= z_+`OeGJO!%~!Qq zxCou1&o@3HYj4y0!nZJx8$*hFPH$U?W;oSPIV|{TI#HXq69!+jXU6j9jg`o?{`gtO z@&}JP;5^~+Uc<1ctznZ33#`-L$W0Z|_lhZgx=?udB)pD=1)-C)cOK-7@DATJh7~Tw zdujF6b}m}Xf7lq#oEKfmoqq3WWeLM)=bErf^w~}L($knnHOat<1gIOKz&+~N*eO#ff`DDrg9xJZ6H4@bB(o>bXr z)Pt$my6L`#I2vqaUX-u%9wpa1+pQX^jX^!4)}n$_XI(zo43iW(3UOdjOudK9&@~v9 zJw}0Q7fKjo=r@1|fS>*e|m92M=h>oGbGj+8X~ zmQpr=n-2;PZt}+XNXa9f4bP?RWAfpnofU+9#lk zPJ=gkFf3H4nsNh%O)^iRB)DfGGJh)!4afKd|9IrhBt^R| zeWz}4D9P|}=qx=x8mMMigEwV>bl8WdLqK%%JXx!Z-Q>{11YmIQ-_&soi1EVh$WI{T z2N*1MKu42_sn0fDxXe4aOx{K7Ni*-z(4d;U1Y;}jYa$g!jDmxw%*yH`pSrk)=g6%3 zcg?G{I#bk??C#3dh-nA)D}!40cl{0zyPoprd3a9xo#GbF=dXH!P(^l)r}cv7FU6#u zQ$~XbZFRx)tK6NBcgl+na7340FUIc;tdZG=64e_G?=oZ+UNJoc06sh(-^_q5w z#!7)tWi5f3au`5|PU!93z}U(_cW@chmyoF1l&!t`to8~fdXtAibiJE$@Y{Qj79N8? zgIaaiLYa3$jVw7p6L9(rU#suH>V6)hs+|30P#yTy%>rZmp6$985*tu7ysJuO0%x-H zCp<@oG5ih>(R0ym*Um1T;4hl_ONN?s;LYfZ%He%o8(_2?8cqJvQcq~m!$CE8E4&B% zDmQ)xW$J_CLt9yf5aB=<(&-JbMj!UVVgN(5G&?++wu=VgGoD|r!)0V2WzbSxwz}2e zb;&kbx+Z7IeYj4&v>7^MaI3!LU@9ES$CIEJhCsvTCZp@(bw77l}}G|>q^01wXF?IPy1 zhf!pk9vml=qMS79>DYx9PPH>RZ90h@aPBM z)p-hpzdZe4e_Y60Ig?nr^1*k$y;?6O`IIlKPcT=zT9Ck;kU-$fca@9xtHCTBrACt} zC;qDHq>*k%j+F_|(Nm?LI>QZYfBH6{Uk|!wco{vgWKCYFlsl=w8ktZA+IO#=ONUN@ zp5*f<7?pSW-H$h)76bNC6BPr~M%rV5xc$_Y>1UgN^4W)*FBX6Mb_Qed%Fh(0 z<8FqZyG;%l%)}hu_hk3x@BR4W&1avyzqywu^ikVh!*LG#YLc?AHgzJ;@4x)w`sV%f zr}K1uGDi7Bh52~XfOk)W=vg3+4mYpm@zX!Mj*i=ko$&^JO*VP5M~&qX-BZnOh6ANF zhMS3ec^#Pfq)D6!fi3PP)%w80;eP#_Yn#&-KbQ%&oj<(A#fE%RJ%tS6>HC0pZL~a@ zQT<-H{^D1kSJvBNv1W*<(6yMJELP5|j4aAMl+pWH2AwxeCLT6;7lZU|dWT{7ahCtP z8Qz6eI9i@()#K37CZ_RLcrR^+f$2)dM+Q@|&&SBn#BuN5LlbZFQgJxxA0B7CqlEVO z?8l#Oe))^v%%ty8c;3EyV~kU03+J#rmGWq9LVxYX&CTnrXPLIn0Kx#k)8}Bu@k}In z@}w1;5Q>?u88UL2eBg!2Aa5&^%bBFb55|ZIVG$k{Nm{cVuet}xh6&OAj9_2z^4_0^ z*yx9FZvxGGd13)K$vM-6RGIgy6GP741BJ6_@srrozdPQq`X5`kO?n)h#UnOGj6ve| zz2bB?c6PYz%M3)?Q>5&zL$T^ddO{XQ9;@%m+O&F&x2D9^gMmXm=?TUH27C3RpWC!P z_?&K%ex|S}Lx(NrCeu5HiL-esJ1}-a zfMihNRAFq1p7>;Y^ICw87IeGGI6mmR`Zs;q)SpseOtH|C=VR^qz~RS*RXLf#BV%T9 zrX9p~v@QBzJZM`xBgv`W%g*DHhqOZ%?`J?EPlpZ_La4VE&3Lt27-Gzr@iXT1WzaD; zGKpqrxsZ{Qjum!=rz(EU*xy(~K|bZ1e!gGhW-`KX#wZ?nqwW4NPSC2EGxJ6(-hDk->z{;Qh`isp!_F=ZIn+ds+F|k6&Sj1?l zOa`ta$@;^1K>xjIL2+a}ytDxEXf00COgX<%4AVKszDcFD+g?efr%Ts>v|>;fEh@e)+3k4iD%z`1;4t z|190X1Jr`xfnwHEQWgT}l6Nm%nuSZoa3Ln1=e_#v&F?bSF}Mo_QJ8`ZWV~#(nY$VL z$eDI%(eU_Dp=_d8dSv?AcqeoT*+I7%?~y(2r|()c!LI1+dHtmEAemx7eO#O8RZi~J z!6Mw=I;zn_m*q?cH+#? zctYcvUx&T^QeSND;CnESe2cSXi;nTS3trh|*JA3x>lrUrZO?(XbhRBZuL~0+U*Lip zbT@sq>YmIyIBTDK_LnGmg9HA(Zugh;vtShc=#@&G@e!J?gZ?V7LoY|q(B<{!%>bP?}!;pE8O{+=B>d1iJn2scTVoIwJ&ed&ak@ql+QL!Jc` z24QK0`q76NYu!pG(A{K9n-dG1EYojlcC*D!JasTFuixXxJKrOC7E^{F$o{nXjEMbjmvy&bN!Bwj9kpKiff*HyeXlC|DtrTI|vnq(t%jK#mT0V|;*qhiZ%8 zUb$TT^XN^#sM9fv0F7rLceH-$+SSWrEI$zcv^NIw84JwWbM=E%JDx}5F47&14arz= z@X1b+3H=i7=_l9v@?9B*89#YUTNs1Gc(KP){WBvs1GP3hyOomXAAI^rblErIRX+ac zqv-Z3{I2E*VgbBR9KoTV(AxMET`d-zY9ZimG0ov4R1G`mtrm3k+vJk>@|g~$pCNp- z6-JOOK22XS!jsAyjidBU7TRAlro$`G(eXf`{~j0e>BY;(>FdJ%^xUEcKC0VDXmwf+ zF!WxbT`H6CA3b)uP-mTy^WaXg=D(e>pt%R$)AU6AWeeT<@u5X^nl_Sd^-G7Sr|W4r zUG<>y(!L5m{P%V?l!eQqU z#BcIpktHOPIpg-ivmMVfzp;g9Y$^=h4<6h~RtrH?8$e@vP9ITCdv5`z#ng5vX}kC} zyTi76MJO9|kjEnY8#cJ9r$y1mm(g_2EK1(>eRDACQYu>oT!HXjpK4zvA%_wMfI7Ar z98H3y>{eRDZ1uFTD7%DZp|E+Qr)&oxIDKjk(+HMvq7FqXA^_Vmq(xZ9kulaK6g?y_$bB%++k$PA&j9f`<_o&C}d9_s*nC=tK`E)x&VKC-feMnZHnS2w0 z^2CmvYh$J{RZP!x+sCGx+)>$(8HZ;Gx; zukQc~FG4=$FJ2C~p`Wa9kO!AXMmKw05Xs(A53+cdNHmu;>xCeLleXVw|j%(o{^dHpmx1Y!+;>&svDklJA@9H ziQ$ydv+x*ieRV)o`7mrgl@q*WR1S8}7QUXZ8W`jbuUb4bc+`=sH{f}M4t;j&(9>Bo zsEonYfR${cSBg7zKX?wE8Oz}UZwm_pr~H7GZ|dN8zN?6p$g9#1Jr;h9M{A)G?X|1E zZ|WGzJ4Q;A%E6=Z;f#(uvKIfO^B!-94pZ-n(Pk9~C3yHPT|HoVz<|H8y8&ml(_YU@vadiihzHSc49j)y zJlhACax8f-U3B%VM}1d5&ju&ATYmL+(vJKws7~2EYjQ68_j#2KK0fpY*l7o7TOH=D zhsqr=@a#U?jJ{N#;YrUtH!||^9Pd5D_h`O-zp_@hSGw|!5nb5> zzkHr;^UeFA&w`0J-i@4*P2(T^Vz39d{;q--a_B#Ds6Q{h_IPsAJJoC6xz;WeAEYG{ zTRAdz*}-`Sr|uU_T76-n{i*ZsbpGNiYT0W2=-&0s*I$3WxmzshpZ@0Z=Fk4(m&JrW zvHAT^-`RY6;ne1XQ%5$x_u<9Ojbdwe9%hpqUd@j-=Q?omV#d^ydB*%810BQbohF7q zD3rvZXmU8O!2=myOk7T7%)WX1eg+odQSwx6Al$rTZTXL87lQSNpT0K}il6=Fo6XJZ z-)*jab9r<1SHIodX{+jm^9H;tWX7#qcV?1?COgK4gKf{Z?etps-^mMG3}@cj4z7IN zpqVT@$rxlJOK&gFM06&%jcNn7;~~4d$r8haxZ=DlQBo7<<-&uVXY;`fe!LdpWPA0Y zwh^0XVAtb}JEY;x7_s&=0ew`600&}aTxep<=miJfvf|O=%K;v2O%Qn_F^VgXQGiGJ zt&F(ikr(h@k0F`2A0y+-3>Y_y_bu+INwhnb zo9|xy{N~#AhTTopn;i2B8e>cM*Kgi#GI&3ubslQzLI!xAUX1e7$Ayzgj)^`GA2GJc z<^Jep(m&6m^SxqsGCn-cuqZF-{PxCoBj5MiT7ITjsox2~QU3E85x=|HVOV+ZF&aL} zQ;#u92!f|3pV68(r$gh=^;mpHa|S#u#ji3-R$X7tmTL4Aub9kC|CUhTGY{Jr!5v*V zO)MJWW&ccGcMM-9_q_Yai%GT#Bs!5T^CBOHP9JnAduf}@rxNkuhd+3~!|L8?0$g12 z=)#CLbqJpos^MAj%Fk^6yMO#gn~&b@aI_|t41%|th%47303)lmyuZoah4Uvj?_4a@ zLG4W4FBX32gLh6Ax~3Sl(L?!HuHP;W{o0aja;VLa3x>@@8LrXKP7o7!{Kij<5aQ!b zUyOi&SIJk=pPpZOY3V`6B1VN-Bq*;D010E+o#9XjrKx{O6vleb~JeSI%B|$PkIloGI^7({#cU! zh0pRnclfJq=?BKs@I;GXJlw$+(gNSe7rOBh^JwAIshV$wLcSg`L21ZMI(bbEx38 zYu82|-~ZsF^jOBE@D?xsak0rQq6vLr7m{}Buwb#xFI;$M^X~hXCLg28=&ERawu4Xq z=#T$6{c$#f)v56!9OGqufrUK_0gRCN%_Cf0-+lM}&H1zEli4R32XA+#!U|W#iySWM zc&|k&ZInFdyM#<2Tk7_t-5cafyHGE(K06`w{{#tpqP-78Ak8N)9jU!&Q)F=#ulfat zuBvk=1eZ9wrI~S+v8_-2Mf99B-jONg%yqI}S)-HsWq`I* zw}gOkoZe?p0%{g)$$2vBqtBx6EY4|@vY0-*| zqM`aQh{IW%RK9$}lVE7q`UuEsd*s9-^|8W{sjvL|lTqO-D>0s0l&GEe?qK7Kv**uk zKK$s@%?BTSobEWbx$^C|o3E~3*<8-s`->~rHn;M?pWQg*r|a?TPOtJv7NTmxcmz}Z zcmV6O8PlY5e1AA?EDBQ_lPEe=s3jc+>MdeptF&okj4V-xG})PUm56EFesvao*_2d| z7NP7yL2u(2;{|vb8yrf{Xm?kye4UI`P044L4qa>9ZFMB#|mQfQ#yP~Q3UyfPP~9;a_<^zv0z$t2IG>^xFiDIth2NRLO>ayJ3^nCjZW@%)f zKPig&^?oE79@0)%x*nB=2O#_~Z*b{$=$vuEz;@B3B-_u>E_moEr+hmGo-sUxANo&S zyhkr7S8p`;W9*EGa0Nq?^>_XSo4gDi2XE={vm|}~6Z!^Y-7omlELkKbEn02qu{F+5 zX=4=J;a%lorolx2#<)0z^n3)m=XJjDhbMz#Q0IDK-=m>lC6n74vEQld)ML?lJ@Yu3 zHbB|dYvB&!QqD6-RE&Eh*~-(xd)0@mPT9R1-i&7^>|`kA7#k~BIqn(pqQ{a|>H2DK z!80x{2VCth{)q7y5{}?Pu5qf&J+VjZQ)E5G9TlHUt zX({2Pts4+&ljWQCx=lN}Df}u)eRdT-gjb#cy*OUTHhLsTOD@R2)RmJ=Y?(P_ES~qQ zdMvr-J>08+jG-_5ras&Ei>8Y&>slG5_owf!T`A(hW6K|H2Q3+Zqlfkic3pj$loQPd zVYhXB1BOg`7Y-Ip&_SBfi@mFZ{he-*L(%HBD7^lz=N2lKJ^&IA&>Ec@U=B~ohqP$x zqfFPzt9^_-_P+9Ay^Of>kQMnr#V?l&hpxKzfb5i~>ly4vmwNZ1lMExL^kMQkqB}Al ztP%8#^lTF03HI=&^2Qhlf8O%a&ElGNuFyUskKqodc)diw^n)w94X~P=y7gYtnNU>N zoibEWT`GI%6U@0$ zbe9${R^JmnlV^C9TFDjK$4{B{Zy|$Ifn1aB=?|da=Pjd4h>R|vm!?ls$WB2E-sq9u z4cTSvVn-$_p-=bmhOr$M3xWR9$=*U^dogl}2JY3>Pya^V-IitOGkqCafrt?OZrj>_ zLPG<*+V*5B{~LIMS($_P6tQq}IWW*=i|I-%qi5w)Zt*fzgX($P;A{GWc`%S&^{u|) zuMMKx>awJT%RnlT`%)*PU`@O93Iwto-n>a}-@MuH!#pGQZa(<{6ySFtXRpqHuo)7kaHM7=g!wV7N8extkI9+v^VB>maTsdnPseidT9+ z56@F=)xUD{ev`p>8u(^pu@$>e9~`sJ@^H!!d^F>cNr_3`wYAo%75_Dk2jAphCF(jF{N>h3G=mE55}ue{O>Qnxx4wvAN`%pzxg+R zw)xYa{CKwMn!sH+cd7~F$)ZH@_KAdf+wDEr!I2KBJ8-PSKAWty;IMhqmfyUXc*Y&7 z?d@u^_3~{-BlN30G{8Epx9Yy%BrEv)4j>|Ax;>I zd)1!@-NkdqH}8M)@#f3VziOgnf?Egyle8w@Co^U{gwI3}ZFxHFJ9~U+W(IP&1E8Kf zY@2cU7%O;#*;>1MPu`PHgnf9Km+;v&(umh4F{G1GiubfID$4NT zrD%&XBLy9W?vX3+&3hRcGNN`!8Lvf?KnHbMB*2S&<*IYMs*EuJ;eRyTF+|#~{kloJ zycPz;$_CpOWs~|bEQOWvhH`hjco{z!^?YWM8@^~r-i+~FTX;E|(S*U;B(+zyyXmb5 zza1~Fx2% z$H>#~^V%7B13!6LO1Gb`8D#XKJS^etA43r1lSQHkm5w@?(IjJ4^|CXC0b<)pjbTK) zWIVlByAYq5N2&h6V{eEBE;@F$y(G7OXV`|((47zVfV9r(H{g*-5u&XYTb)joRl zc;Mlm!`aBYLqpY{!GFbOr{ zw&Gwz<3TX-lg_a_h(VjM;63?2+9Jm8yN@_CHbUz-}=IY1j6W8OV+gOeaFoapKtBH-w^YR~W)EWn4B`v~$b)}t4=T9B$nRx~t zE{}Y)GSysTkaSf1Ub=3zA$nm#5X3Lz8+A9%n1#Ep;rF-@8Hbt|96gtzsQix}q)Ve4 zx`Qj9x!aoxqT95C8GrQ7LfqqaVbIGw;NM1z$Mwq&1GFVk5Jl;nymjmavx(C?`8wC(bECx>||K3d47WL$f< zgNOR?4)q|^wlIz5YD2XZ3t$&6y*Fb7#yN+BmwR;9)VDPB)2ZruI31zAoo-i-1s{6& zz4t#F{U`SQjc>0eb4Q~G2-}6W=*T!58q=Qa_MnUGJp0YpS2q9tKmK(_!1}+Q>+gBD zPAHgoPUg*98H^cFUldw}&U%({@MOkOym;r#`Q;o?o%KUh6Xg7~o%qni71O4cy zy~QhM3_ke5p3T)96pUFc;!fXStdXuif2IRvgC)eAHgoaa&I}0NFaPFe)BaE9bx$t! zd1M_o_6LL4wS%~|PyMZX?Tx;nGqe*l#Bb>f9#;Nj(GG@?r|L^E@|U{%*DHp#e&CCr|INUl z(=J}TP`>iSFJnZW_IA*mipKgdykM*c_YeNyk2jzH=2xT7c0#}Gj&2uDy*CGLfAPha z(I;BB8}RbwZ^ldBfvZ~o_3M|5hu*Ha#^YlEPZreqKu!YMl0HRW`)1B;SRwQ*j-NgM zUi>Xyd}GQaxa)RJZ08zehK_jlOxTufxTNY-aiwic<07!rHnh#IrMJ+hUmU&HJvp@q zg`X43d7kyqr#4fj83*l*l@y@w2=c&t84u~G;bXvNQMJNmu{2)UHOCp{Q8I5A4z82nJSoeU0HJuJjZe4!5>K7Ft`zV}pP zo6Zt^&76Cr&oth|18pi!hqyW~s#FfP^(n!$icv7q^faP0 zROw|(x`M`_0No4(@-X6HLF}4Jl%afzsL}|BNd8skDs(BTF>IO*p=UHV(57&zh z-ub8-`t-hk0q*M=c)tBD&lqY4UeCgLr$hFBjO#=2Zt9@)Sv~E4bOLWa>at)OD0%N+ zEik+YgD2G;etjlajcC=WAJ12I=xsmJuIX_IZS#gZTEv@G%=Z0c-va2xGRUlZG#k@;|mj`IRMUgw_E z8+e{!B=mYcFSqip?){&}Q5rzih29#xsv-R8xuJ7)3g&q4Hn8+)>bX+~<=2L$ep41% z1=RFcdyU@ihc@WlBM3(`a)fk^0oNFll{MIudEqoX^G^Dm zZ>5bcBy&BRnCRu3XOd0f>aI@VWa6+LAgp#4eG;-6WYYr!c$+WTpDGRwr*c!j)#R0j zhGbL0{jc1S6Hw^h-U2YPGug+8|_y_kdTd4-AZur=OtT z;fQCd1(zlnbk)E?3(tHZgj$tSwIo4>t> z+2~k2FiH2C+n!P~uCHyXs>-<)YsYaj@g zLzk|7eR=bL{$Kz1#t`>Z-fuz?T=@Cto6mmu!_6n3emVzKiibHKtLYecHeG-j(u4!~ zSoMUD>v=KHpjW-aLz^El9^vTTO*{T>Uca{an_vB6i~RRz>37aIdEo_gVDs($C!60E z+uR`fSd$_~y?^j0zn{Tq_vSzR_2-*g9hwHl?VtXtLGbO(KlB5AZszTb?ho^%;>|Rk zY0;ihQOOKtJ6_g|5cHhMfeJH_91!N9iQ&V%#d##|jrR8ojqt(meVFIg%gxVz{_E;` zXmgF`H%I+cOmPACSmsZWz(X&9XL>=S=^#c^fK z5~hI>{KXiz(c7dxkEaa4jO~nKM2f9VBLOGzs)O{YuYm5oe&HlNd?q!b5d~|Lni~#}lic2b*v?;>o{? zAGf2kunxTcjvuaoV(TAF?w;lC#LMmCxs#h;e(`nf>T$-d4|`Xg(*^r8-q>D@ehj!| z5+CQVx)u{mb}bgEKe-~`i~)~s-keY<_(%;rY8Qb;5@o5wjzM|pY;~X`qwUbN;uO+< z^wgHdI3A%aQ1~!1O|0#76N9j{Cc)YQ1NiW?`o44N;uwp*%|lwak7IdDf@R_9P%_B# z?o^9mM|hPbCp-P0HY6m`(ty|y4$+_CJB(JF?n;;>eBe}s26i#n2JYDkol>7zUUH@% z;F!}5tdm2c+Btvu~s9BK=I4D`~<%Y%?C(Y#VDdYSPb?0c1arqCP6p}+Kf-lF&Ga9}VJqddVF+?V9`w}QPh>~OH6 z_NmR%oA`kMuj9${UH3PqE@U)J2Z?tMjwbjnBVuS^at1tK=;_JkHtSQoQ;7lp%M(kUC*@vi=O)N@TJpA%Lu4I_@KYtVzVF? zpLy&$5OUH+R}KIHo*gF^PU#K14tS{^sf~+e&d5hM-Y@*cLE%Yu3**DVpdARw!ohGw zBXZ62n~r$W;<%ksfAU9vy!rE={#Bu~ZVhf0HcoUd%c1%o{iicHXliTJ=226y~3(k+WfX%jLr*S ztXJR1wJE|h^M*7rCkWxH4o25oyKZkT7Pje8A++wd`-^A0#Q@_zM(#_O-icPXt83xM z(svdfh4#39_3I7={dQ!WW5vj6WzQKR>Ek)HGa2qx`gzaf-&y#%ee2rLi2PfuKAD`H z$t(NuagLIT#L@i9FuPq8VFM@}^{_)-SKdIRm8{?bB61wU2=K8hE z8OiIDE9`KKF%fm(E?Y0T2(9n}bCb?G)ou_T^iX{v9)4rA0w){s&Fqj~b?o{3pR zp|lp!LRxUPA|?O;KmbWZK~$!&25A%+FUbUUN-ghrs)qn&hav8#^MF-&tn6|dpmo1B z*o+}CT*j*%RiIV=@n~cm^8vqqwTPbU$U#d%&PxQ9-hfJe#(QbtS=pYULvk`!1-SJO zZe=Q&yWaPF;7L2rN>l!+tJHxRSbE&K-@-4=pp)_*Xy3MV|&uv*F1Crv=q${oD!5W^84DCQj zwGB>i*?zlUp3+|FT+xLhAbW#AfAFp!C3rV=avQDsN14EbWG<|GN^Bae&wD80ttJjlKp#?n+(0$y%OW z7QWw|MM><^SOQqhjqswY|t6r19fHZ!NY@pHN!%Tjs zzS@j708X&+7@Wa1+{j0NElv!5OE=Zork!@q4>o`D zhdFQir`fMIFd-D9|l@pk7#6Pv^n@DTRs_1^{_%Lno{UjsGzy9C< z)#j@vE)3)*Y$hv0INZ%pm56LEWK4aTJpb8W{q^R5`se>-^YmVxIj4^2*70QXe#VX` z;cEh-9zw9ZOcs8fH~Gi!U2I6!p?u-=H~OOy7IODnFFr<@Y~*e{&*l*`r4f!smIR>}t}qXYcdPrS}Wp(EC^6Vls5lp`6wCVFrVpNi%MdAq3jJNBgf=Odrr0 z&ofp(?|N-P&zr9}kvywTU3xbIZciK_GJJXS&C`A-NlV%c;D4lbO!2MZSjAYA27ys{Nur^K>DhF@q86L=FHSqDJgGMv-i7|XVL+3jk4(NbE`q~(f zr)WNvlb)f4mzJ<7GjR_F&)1`Qdzsw9|8Da0pb!g(n&?cw7%#|{N!!FbPrsT}se?Y( z#7uuNhLR@bjN`S6WIds09I2hFD+9cV=NMB;uPMNRF$GT!O0SQGb1=MLRb)8L06#{M zChfgfZ$RP|qDu_A*By3kSrC;d5_!54NcCU;h_>+63>$=8yjFXPe!{8#Re$ zw_w-+=X^Ua&J}8g7wF++c~5c6pE8=gQk(kI=1mUhvaOxr&xc`iEvoi9JQ&PwpB0Ry1`R9 z6v^~A-H*Nlu(V@%VBCwYhcdjJIoDyiJc*TFTHE!>+Ew=+)hynFUcY}~fJ$KR9DUH|ho{j04U zK9+}HFc=PbW?CenJ8+CVl3xdh8HZai5HjR#XD3~4vEyI{g7+$Sd2z1x!~gvM97WAqw9sVNR>5BiK zs{3lXJV_4we5Sqk-7`Jjt=|PekN~70MMj80@m4pw&{xwJP=pk^Q79;i6bT3d0WPt? zVzJ)Lj_c`Z@4eOU$vW>WbY64*rYb8lD=VuiD=W*Q*S!|OHb;wJpV1tSD$AC4p)H=* z{`LJ1!n~L9`cCZ~y}ao37ieGroVd5~X7qGBUN{ucjpH$2zsEQ(Jc@-KM&3t-6JQw8 zmgp4TfFBo6{ZKKg-+cSMjNTcb+g7j5pUF3)q!z~uf7vn<U(xwEK+8{0`3k#-d(-!uMehg)?y%| z=A%l@uoY~J8;?VPN9~H`t%_=SHCMC-Cie7=mldRqxF4_g9v!KFK3{l^-)5vWp4#yo z^-Yh$jm$E}KYNCk0Lx!b6aUl!)$B7y`gnw&h@N>vaL57PkDjP~3Uv~Qc!2u^Y0E{9wfgn8lzZ*ADON1Czns+0h!isQr-HKIXE(1y?@{R&C86-7C($C)Oq$iMdbN@GLM?UoXjPkoH>xinjalCuWKmIE36&xCgi zDCsbc8+c;ykAGw*_D)%ifV%HVWs*#|JxOk~i&RPlScuwbx0Nj+CifX#SKc|JrDq;X zOW!U#gQS%gti5QjZMi;j)DE#?kk=35yTzU3olSA(>>I&MpR|Z$(Udnnc@RqIUOQ^w zgc&^OwJnBh;Z;1=`ICtQdEOhx&F+d>TxqdVobe_z>Q$_J3zN@sL^yQl#OQT9G9I@p z)fkRb!7R1}8?NSt${E)|4D!4FdASBbST}~nT9nEfZM;pkCX~`PD3Eus?Y_ZcsizT6 z2m#!yV;#fR2zCmzig=OnWRzI%HJ0_Mab{4N{IVG&R`3|zB7Tf`%2H-u6Mve9R7S+B z{V@Q~eJNC!{561?FndlyhyVa%bo4BIF`#G5XprD|KZE?>uhItZF^YLI?`7z>n^lh6 zaHyOyXeKmc5Ewkb!`Jd8e6o*0V-$LoIPlN~m-T+UIiht&fISP?pdv+5Gf0frA=Dw% zZag_tL>9+|UC-~+0rbk*ZJ z5HUK)D4d4eXoHVItp%J2n+c$T`)oN9sD+CHhKqPJNrKY^JAx;Tck4an?Mgbhk&T{g zWpNPg>Bw1S57pK2l%t&~90bqe5t$#E3)Yfh<)J~@n1hb4=dN4c(>}XXZSt3174L>=Q8hR0ig8SPfy0Z zb*`;df*rrrxmWW~zF<$eU1NxRo=G21hB~XZwTp>yoo;2|oTOw>reMy&m%b}Qr#tU= z_(e9+ILYBwdy7wFps$mtS1EmTSM1U^4P?~$(6!`!Xc~EiVX%5XVL__T(c96pWVy1? z6TCSzgPbSJ_%rW^ZZsq>7B}chHMgJ`AoqJkJz&wNk8?srFiOsPA!3QR5U!Tc7Ie%CF5T%UXZ*iLXLqqq92XBnv1|uP@wAH^< zUx@9Q?r+{z52K44nBmRRSG}CUZr|ZM9^?%G|L$*pkVn~y1AOJygUzom{BlexmFFyKs^UmAnH-Ged-`l+V?gu5-rW*7% z`MQ4fa_RE0e&d}cIBUZ8yn*3?2BX@~^Hv7#!+8uJDGUVpeA0x)g5!mZH1EGrJjB4?Z9slGkLc4K zVsfzqKV(w;S=fP)d{H~0;m#^K9x7TLz;b@a<#!lOP)gcc~Ue%0R2jZJB z5#s4G*fZSl!eZQDxKe*6x^@VRQ92`(#S{lonN;uuV_35b<3N*RUZ2-Hl#XtGULVVt zbtL09L$!s2@yL&7GocL^Jmsl*xcX*vztd!!AziPuCb6}{#Ev0R2nKCtj34DY7Y`pr z6Wkd5#aCxIV8Gi6i7+x-eqOoylf7LtcF>`rUo&e_qk6QB8;`Xi#(DWPb!lsflb5mC z1dg%qY5Zoynotb zob!M5-FG)%|KN?yFOub_5AI|<3D@x5)1nrbyAQMovFk)3giej|C11J@b!)-va0a5n zAUXJPcXh=`wxcKZrkBMFowgi59<KfG8$)0f>1_n zlTj1J7A*HXyn{6`kwxd7F+sf3wt!h$n_U2vW3>i@oS9E>GRWt9I!C7yD%|_ z53fFTc@AdOu#1CJN%iPpIJ+D^{KnVg^P2SXzWbt>)%(Nc)#Cl?_Q;aZ1g8!jjgryO zp`>O*gTC%t9gFF&o@V^hYu?2%Xbc6@t~>bM#N6bX2dO#)!Nl9n3Oi&N5y{Vv7cVcw z11)6nYG=@9oD)un!T(V5&bVoDz~P2qjX|%rfoEie5lwiP+j(J1!Km={4?k!~6K=tq zv3jeb7OATm(X z8`X!y4e6da$S{JAW}JPRkp^!)TV7nj1D^u_Oz%fMADwh z*XA60%aF)mwhSk9(U`hoZ}I1o6ZG)pW+Wf|lq`RK;qxg2pY)5bGmdDe_ovK4noy9efmbP zyyh#jfZF3(ObRZ8;_36}Hy?lUS#y*$N;K+M=<^+0OOK|VzuaSdTm8lx_&HRb-1r^g zpNn87D|95<13Be0@F`opOrKnTq)pP@#^vOI+}Z(muzr5w+^(c#`o2Y-yz%KxJ1gw! z61$#f_Q3<;5x#cXl)X=7RF~Y+=nDOyuvp-03l%nfCf%a`^=*5@<8k8}hk^e5*S~6^ zDj5G+Qdq7gA$( z>h+ww>sNLR;Og{f-oUf)7hw0}4V>JlNA2iD@zC)N9i#uE?P^}|V`NW1Jg$yk-hH__ zd-n9^&MVGFc@XY}ISNnVtDZLRRR4_cr_Y_4#is+WG|sJ#@3hExx<$Zy9T<5v2LofK z+wGK7p1QOH?r9#)LTov3)=rqKdE9fr5Fgv_H47LP1@upc@{Zp}KGLQ7?HAL>v`CRo z(*X!nW~~i!uVrhRhy>&@VZCC zGvg>@YiP2?DK|1&5u)=U3r9`qMxJ7bz{yZIp-}@Lkp5UHI zHXyND`X-8cr3#+gD!f)BbE((Y7~L8;O^j!goH39S_Ep{#J&C1X(7OuHs;7G%l{_IA z7XV!hkrIO$mnL16EPppWt3o4sDbZEsDsVh~C6PCw9j#kYsKJYYHw7WopJ#n(EqxZQ zJdCuy&S(E~r)`z}RfcD-%#JX^L;z3Hb+35BPGMh0{wzEj@tVc<{Uy;K&Z*)QRr zYO6A%aSR3F2Tv8We&%xCDQ=Dz6{!hHuggPvEu6Bqz&2D+74FaVaO_hCufX`lsII1BZ;N zCo;0lr!2e}9?K$K>H9;&4CF>WdW1gd$9Aw=(^J zt913~d!F=-x_*F~G?1N}lCOM>qKj|IWq4C0aP8@&0by?B#1Hr0&HX_2Vm%G!tVV9% zH}okhyn&E|M|BDoj_B-fjQW^ZXZMO7De(jzvrtnG{K1cg0qY$wr5I6&FxW2Wc+brQUDMn7mjlYUEYAk0FP{o=Vv#VV!|&cHV)sh61!Qqmz{@KrQ= zwhRxCX*0nYojf(wJ2-54@DhCK@w&xV-$<~vcy40rqKS;57Q6P>s!gfX9y2n2KMm_SUl3dfayWl4DEwR zdE}_lqyFcIYoHr6r;|ViXeGG3(E6?bTsh5CE zJ+EgI{s8U%Dx##@PZ!e3ix1#z(VSTmdUhU0guFv{&z3%!;s+p*+O&a7#VnZh?Eo$s zDgy&XR|iX5heiK}cL0y^x-6rQyzF}Z^?zWm2U8Zjwz_-~CE@yvPU#Yk*gpCV|H`wf zaa&KpRIIjUBFhuVWc8H>?d@{j+|K*pmp}R2&E<<9=b4@E>G`EAmp7k&@p%Uj-q?Kh z@ozVO_HX~s&5Z{QdV6m|d9=YFuUvz*n@#>7=B2VbMD}M`5`X%7M&>((4S79-$9rc_ zZcZ2C+h zQ2E*BS$djoTwFp+*U9564d8$AqaSR3*#VhiEblGd$qX&Re>jNqNruaP8Rkx8)c)bG zF9i4g=1;zt7j=eMT_1zW2j6;s^YV}RvT z!T0X{ZBx zn`>9D_k2y-d0O7dt7w09z)Os${j>UwPd>jk2CVEB?byi}UYodQn|(5=(UBDs0pUSR zba=X%oZrYx>p`;)1Q9e{Z^!xrj}WyCn%c2l8i#=9@CVWCZq<-KHU`*<}*<9OQA z8F9?Sku2WKKzBDI%Xn`k?+y+-ks;2u>wU>+WRBrb6a2}~e>Fy>N2TWhY+=CUZ1hp- z0A_fFyNMn{lfxA)Dm=(To(DUl6eGc-2VzuAzKW1;_5oc_3h;L>+im``64eehn_Mdss~=;_yxQ%9a`Rb`|Vlu z5NcE38(1Z7;n4@L>zTx9%F_5UC}VghRH@+AHCqCWRQlkNNIlx0{BHHhOOm@b3G zYxP*(V5bU6WRkr*9LvFy)Mi?_3U`Z~7Rq(Os;*S@&=j6>Htfw=G3nd{QapZTcN7M zR6F!o$Rdl3NfrSn9{sl7K59Y#-n|u%9z7v5bWU9Siea2n2MmH15{0XHQJtPRdp3DV z$5q$(WmlUvF~;j?6GvE|HRCqKF0#Xc0}n&!;yoEvrurCzDE{d(9YLK8DcZsX2^CxFTUPsVeeY$!4-d9 z8-@efLb7(t*ogR9m%k|qK~z6;kksb=|So9P!IOwWY{ET-$zv)QzEPg*;XzNim z3k4T1e45it9^~{aIhKF(vSkx_pUfGZG^om;4@L@s&EQrdSg4adw6;uh>9QkTsy)XR?WF}va z<~cwn+FjIPnw*ASkvUq+E|4$o1VN+Jwe;sBD~dl+~&A{L04-R&~s8gy26?*>j~ z4aQg^WdbODHPmn{>h;h#$(JydXz-l8Q}nv` zeBs`k9mWg}Jr2%SWmX=%ht{!xgwMjHvcl23zR{z;)TQTRH21yb4MPycJ?bRIveUpB z#R9RAAIRR##i9?-@p0ry!ah@M|InY%qkHOXy%<hRGR>PT|AJWt1T+QdcNk zv;)BLaNG74(CzzPp?%3$uq{L_WQHH(;Y)5*bAT2ecu^t@Yj5c60I^^ZU8_y>&TC(T zi!z{)QYlNnL!<1kf?t9HjNN@N#2tGWq&Hbzes?P6$>4mF8_z>*)|#iXw)HUvY41u?C#Zf#H-z}mI&~QyknJ$S}a;Y6yk@DyskL6qMc>cud$7 zY2X&z^{(sS)4L68R=Hrr|B=zDvr>jy0Q=Q;@yr9T``d}{KIo7D#~YI#+Ax(UFvX+ zmMX5Uc9NLXSQwaklM@S8N9AJCCugrlpL*AT#0#|J(UM`{B3R+WEBD(v33rbP%!Iqq zuE;1J25Z`XcrDq1U-%F%e5$Tz(bH{%)1d`E@KBzm!-v@$sDY$?pFSyVU;`iOziLpeQ4>k(*~5h{s3LqOZO~v zB}eC1sZ?J&LE-9nvp=I{MuVUH`0tx^=S5QZiH|?}bq23Ho9p+VZT|Tm|3PsZk8XbO zpMS9VyPy1OCXEkx5jOC)=yE2ba^v47gO3Zj;vl`v=8fZrH_tdr$FKVh;1BM5zIo@h z)0UVVLYsx;4vS^d+GFXlP(>&@4iaG4ljhMf}(V88f_|FHS($DcKkDJEh%;8}yN zM<`!|=at@`9uL)Mz|WlT9XJmOvSK_OX8mKhDd=-@6B z@VE2Ox%lOkiCy{nxx%D{ReueUMo#?Y6~gV%QGm(jnciJ4F&j5WJ% zH*xR#-op+pOaHxEY|f>TYK7@O;xtVsBg^g#B@;d%k5vO@Ls46-ALVr@*`ACV51!>& zp0Va$9#voFRsY6YEf}=5|44?F#~Gqy?u3pxa*Xk-EuSq0Fr@FQ?mzqdVuyJp=XvcN zImEb`0aU+Gp8)3lLSx)&a>?uLaGp@#|Iv?UB5|TgGQ%J5JO@uHgYyAHleHpVp2AQt z0*Wyz1~Hf>FSZfiYXU8uFdGi4GI_gP-JGn>@8+px!Q*Qmyw{MY2}UwQ-dvmLoC?;l zyxs36bBE%~guTdc@y@&N%+}-Y|IOcxEbu^SY%@_>NX$(dw#scAO7!+fL-)Y~rJkxEw zvG7YjU{Yyf#JI!YvA23)Aer_Y920p4uQ5=LQeEZgLQLElgnVWzzy7)(6EB{6^$LU6 zgz$(bfdYx_bj@rKbDrQ?;KEHfND z5*Hc{>80Zxe0l!dsm-T_wBd>7fI$7&^-EvWK2B%cUU7lu)DrT4-bG<6QAWt1Xc4s6r!?yZ~p zW#&zjXM}w6lDSx(Y*|bc>sY;~&+3=Z%b-Q?GIkodScFwKyn7i!6|e21{goEo5AV~? zjiq~|ji}qiV%qi4no$wP6`Bcb@;m!MUyi2?tzwez*|T;4g~aHJ`-KL(c;RBP=g%#1 zlbd}9PLX4$iWdxBH|zU^MT?Zp>AayYXH=X6ZKrM0>+!ivc3U9c^|aZoOwK%F<_OeN zGTE3i3TEN>Su&xMm^N*`-NzzQ(R8Y_jK6`5X`@f-HEIjd`xV^am`hYH`4_gsXXXiW zLB>1lq;|Dm+tPVNmoX<@z#yuR7SkSYL%QOsKk&4h9`RsSUw6~J_z-ckz|F9)T;n0= zpz+`sa8GPLeCPebo17eZ6Sv<2lCxGAsRJ-H!NDTq%?#ap@3sgSTm{l8=5)NxEq=23 z%o!2&Ymd?;LKQI<)2Ev!Yxfdf(~A}agb2BM?Q-ppPB)jTev&ObbN-3N@}nK}e*8p- zrzamm(V*i_V`pt=jPuFQ@ICscZNz_}HuT3)JG^rM_2VTXD{cJZA+1g5#|R?5r9B(R zGbr=0=XLE|n6KN^)b$y^|2xwa1ndiI_j5Ns1Q z?2+YUdiu`Ndo~|%`m-B$JoQWNv!R8DI6@nM>nrSKdA85H>=Kv2a0? zAPXvsysLxLEuaevbTd!)XX(sSr_K#u?L<6%?zP5}g>U4&U%R<-<4S$N&Dt2})5bi> zpM|QBn7NMndf0es>N~k+;D*l={H!b?*vP4Ik~rnO#B)GD|6zey)5XQOY06V&v15HVIA4Z9M3)le$)j=_5Nr!7b9LGPj&hNc zVSz*s90oAp7kqG}_ttx#@j|6QJ%v*7&R2dZf=SV;>|VRqfL(7B4{VCmgk3&A%7{DN z$^@sSEXSCF4hl9UG{y&I$XO?|;C9n9aF%j}XDPrBmqcXvFq|8~*p>$@*vd>{Gl(uq z(NiAJgF%qiN4h?Pk5{WRnY0N-2(4l;aasrep|jd`(3+95G{!TZtv85?85 z!a<8vciqh=*gM7`#uf&#xu*&Gkh?MLYv_{&KT!pq`qHTfZ*K@b<3y!-L zPra8w4UF=u^SQ;R6wL6t2etE^dgJN5%J2E$>wWT=vI2O;Mwcc&5AYD0a4Ab>4JQ>j z9FIPEUwO%(%xGN>16%yB{NZ^xj3{bX(+0wGa4t1jiC2E+8?(S{M~g>m@E6P!H{D=Q=vPI11%hTHQ%+5VXANVNhc|KN8bc%;;Khq{7`kn7kCh~pr||TNlWdY zW)Ka%pCZBc&{owRc-JHV2Oz>I3eii|*;GDo~*+ zW$>4_vIeg7iucY%_u)9%;kcFg;aBdtp(+sl&j1!lz8N|6B-vHplb&2w z4*VDWB^b1orSRU=5Bse8#tf(}42G|hOxeH7LuNf*3qa8koUi!N)j#~q;-uHoROaY; zw5xYe2A_FVip7hOSNQqUekYx4U`nNZG{B!GUm85wrkY9QFMs;idGwY(h&P(_{q+xj zzWMS-6WCy#J=S514a{~o;J=%(?@#~w=bJzMs~_db`)I<3Fb*9t>&qzqh+!mN|MEfu z!z>2h{_w5w1U=bdNG4JL$6x&{BW#{Jc|hOHaA{I_oL3(D!trv4UVd@AL(Vc-{z37& zUui)8=YR8)%{RXGPTuCHHs5@+n63wpZT|F!f3x}FkN&o}gzs$r@<0D*V!ihnq7a3dbjA3BBFojQufBO#~FO0>l&E5>m#AEu)^v9EMw@|@Ybuf=;1}a;B z9fCS?E^Wrs-Fd5yh%_+1f9-DY3Qr(kg2AB9(|2EagiW~FWN&ASya}kuxhUM9_r^b7UR~WCZnGA#*`PM=wWK8W!|8cy<7%Jmd2Ai81#?PESz3MdFP52mf z?&Ya$GI+f(JCA<$%j(CZl_ySNMxN_u^ZabW%Y*80^`?DjZ;YOr8L>18bqLt;4E_!% zYdTcgjOv9^aHt#Y=Q)D~;~9R7bmr37(;lyV?+W6JRwhxwNqk@ z$I*_Nt{jQN;oqItu@P%saur}A7c&zt8@c8$^HDjnm?c+BQ?%_z3KYFF&v$Goq_qYw9I z@j)FQC?pc^XA``Awfp^D_dE4U{uxmjxbV{UW$|e5*H#!h>>@aG>Tr|K+CiBturS!{ zO4n%|K2N z?7|oA{Q0=J#OF7E@I4EkOCIUB;d3 z^7OI#C?+riQ7})QJ~#Em7_1FNa*KyYJ44~*=Hrh)S^aQ8A7n_;KFHmUXDOoe_h>77^4R-ZH8`R5!KNyXj%~ync@zZLuObQ69tIjd z+Jv@!tzBV^LA>>68~?Ou!;jU!Nq4`#AMb_O5uQhzL>Q+R+FV=qUc08>_srOESUEgU zAI8dJ`g0mIwjD!ce5VlT5cy}pFg?m7u-iPkH%E^$NG40Q6QOqCvv!9h8`X=24CgYO zIG&*?yy%SUg@I!{SFbOUZ*6JHE3L7hIQqP76Zj>c)dMeN^Dv9<`W^D$EM?k=T~2mw ztz9?8)Bo&?&DXy3&CT%^MrO3=0(VSH3GxoMS>q+%==brY^0%N?;pNH(ohp~X&F`QcVZ$r{B#A-t& zn6ri$Vudc7MW*C>Z@Y)CUb$Gis?Vqlhp&>CM;+*U>&}(*+ui9qv~Ds*|4(0i+JtJF_Ucr!nQUhjcZ*z7iek)ohSX zF~^C$^^WumTHP$!9(z@{`r{=pOP48kN2b8$SmBYsoxV}OGA6yA($hzCJUP*P<$4Pc z7U=B^H7?Nqzk0TvV}&c^S#HN}>}s5Pd)lgc+MDjSYlkiyeHIUmJ)8kEe5>I;EesZK zatn808Yl1G-Pkf(C$>8!6E6BM2e8M?+CgK|m#fd6cAWk}5Be>-(8+7e84cwg=8Q44 z0!B3A1SSsU>q&s!YFxfC6|3d#Z->DvMYl0@=UFhhh^LB5$av`klKdeb@ng{P29icZ z6jsFRIHcSOHL+v7Q(Zj{T!Z`u+-`Oz;akl0E%RKumrh+_7@;9O2)3d|>caDPhnSfx zN}Cdnk-Ngh-M|ju(A2%+!B{|6njMtnolNm(1WkIR`{3E+Ta`8md$vBS+@XCQ3<2R& z=6JL|=w8nm!Iv?3;YT>;BYk+M_P6(V)1|CDgoi$z`26;~_i!q~2+0VPfm8esCaiq$ zPN%awIa`_P&_NkbVj~Msuuqxq5!~@2F{@|_kkGYU@+19#o=HF zZ0M}vqNiU4^{jVf3on(`YuicYy)=~zomKzU!B;#7vv=rhHEfpwCR~EQb_~d?uC_R! zZLw<7OyZF##RjK*vJVdii2?6PpXA)Zh!6?K#dGEN3XL*W)?hvL(LI>kI!GOPFll9) zZ1Zeg4InZw2G*b;B%m7lJVxi#Y3cEL7}B+LK=fH&=LqakI6`SClFVv6UJRa6^hQ}d z+tzB245U`R9Y*2H48|wl*5EmL6_O%?Ts$0|0yy5lZJ?4vtIXZ?=#>=Y;$3oqRP304 zzNPX^8}0&6eFb4I{G$l_Qf0~=-jIcDTb|BW9d!R9S)ior`3BaMTcN)To3zDyWRVO8 zR5%#rj7(@d(e;8S8#>}kg1m|GzWT05q`s8 z1lMjyDoQ)d>T7VGdf&>n`U{HA10yC@{}PU_mDy)oe~X>96?w?L^!9u5! zdBMSx210bAT(IKFIiCFP2A+Y&(#boAs7~4OVkpPUaGLxcSlkTPp?UuIhD_lnoRgd3 zQTJ01zK3@fVz<1UuSLnUiqGV-aO${*f7Q}&>-<%4`>oad9G ze+W=tU-cUv^thyfDz$%=Ltdubz9orC3sZ@ck&&|bAX3D?Cl*TI-CiTb#wb66PW#XKBZ z<>4)N@xo_q>wj~_l15j0WftA?H2CZ%Kid5HU;UTON0*C#UHY3DsBNVbd;36>0I{8K z+B!b`9q5 zG%>zWT-w~O8+>mvz~9Y-JR?-(kHH{=`N=cKHfQqkG?93e;rCzui+|LCNO_swxSzM^ z@-BRwvGxld)vujwaUqoqi^~&tzPpqYmUn`?Sfz5C8txn_pkJw)xh(uVt_; zc4fHlZcsdZE}iT;IJ>I9moJ-qRPGZKtm^h)#{8#E$n2nDU=f}~UGK_3E}sGNOl*=& zaYx5^)w4ZhqWa&{gsZy@AzwBy&lr|h*P{o~c8{l&!>r0*Q4Bu3X&*G9ywk$Ul|rD1 zH4YaG6?^v!QIMx=ygzh2ufxo0yAJN{AgFiqZmMoe_ucP)yNT8H&7CIZCdoaT$pmAO z@sNq)t&H`&jTm%q6u#x_AHH8Y9*@T+&hE3dgYR_!06+jqL_t(9@+ynIqb)ixKHN#> zOb!m@Rg6v(J`+s`7EPa2UfYbvL$Y$lkXqT#`eQ6Q-S%km#2XPFJfGFgc-%J8F)^Na z=*i5E{xA{ZNhbdNYwZrWQ~XJjn0tA-kC$mO_cYn#eNG{$I}>Bx;@T3JPm(`|zwvIX zeem4F5BxM4V?Z`}>mqVfHdV%<_%dB*>6XE?*FBHrW58h)rL#=XTw}U;+hdT}PKuPR zUI($-9>rtg2|ruE9~O5M4U8lQ_Gf%}+F$$_qey(kbMT~nki6Yb_Jj!nk2f9fJw{>8t!2Yal&vZZI^1ddW=zI~s^>GY5 z6*_#g@NV8ThipxK8OKyV=|@}P^#=?y+8d8_hq{UJZei&_#+d_Kp5Tji%;>oHV0@|0 zwFO4~7s(Mh)xZ0YKW%y(c8uF%AYAEWad7d2&Ylor@a~=$-ZFSDZ*c~fosTv(l|Zuk z1z+6~0G{JB;Z8*93tsrdjE+}t)p!{gbe3V5obsTwNWe4PSmSYRU@ZVt&!v0VF2GqV zNiK||OzMT9po4fh3bFE|7O(#6Klvves(WTa6H=m3W+fDyJ*$_pj|!CfAoZUp72J=<>8FV z!XSP8$>pd{jyoVt%yWigdQT5-5$r`YdSA5_zN*F5!dF=ccv_vRw<9eKisvj=FTw3Oc7_-se=N=l>Gb*MpT~##`@+890U=~Gr)y{)c#tiJqN*=* z1B;An47wIO;4NGkZ-21RycXh;m-1Ms6S6?t3Z2C}i1ooI$g*{)L-bap-=UE~ke*~@ zN6gH{$#SvZ)cyYYzH1NfOy5nn@s?&3$6Jd8M>2r&y1i3hv$tI=7K_xo-DMLmnISFw z&`#G4jkSd_f@Y+0=%;pP?o2)=R7vUV?qHOH8(vw!z$-ECPu6z~0r7I?qv)$lJmJ|a zT-Y3{mo9;-V9=-aq({?|k4>?b)P)^BayUDKRfFVSm@DX&qHnro`OXlu&Nal^J*ESy~ z6YBDV58mJ0>dXorGvIO$*Y&a{(hd+_wO|>tnEpnE-Y4`d$YcZoCj0L9rfEp_fkNz%czq{N=$s zwhc0Dw@06m*J*>O^dgyz_s*Vp?X~kQqP)NP$uE8$*xF@sgc)g@U|FnQ4h!C!5K5!ysQpFFV>cJ)yI_QUAXz+f33dP>3(UtHuf>DNy|n5TFmBwe>^9aYaNy> zJe>IC`Y&*Km(wp7I-FA&zBAp^TfOR&g*ahzz&T|&iv;Dvw+VX}-;E7~uzF!vsY5%b z-u1QVi6|pe2h%YpTA;8vVXo_E7)DF8vqJ!Wy&U7pq>y$hqBI)M8n^;5B=rwpr5N%CubwS(ws3m6<#ktmCegP1 zm7j6|U}DqvM%^v^ly7pj_&v|&%~mSlhOTM-t3F5HlyE`V=78L21OxC@))=)K3a>*V zqYXhL&moW3!(@D2@;`FZ^A%zv86RGCFFDv#5Af6=E2DCvXu#sv>~b(r3hpGuqZnmS zf!>15mRdgY+B5a0Jn2Vqg=;9Ts_i{PELj0(;3n(flxX0UZ{>$mxbzB1-upL7vRDB&HQ$XSaIYmm0&8{c}- zU{57!uLEyzE^F<_cB2ppTxHCA1$RB|Qa-#1jzl}3p+lPO^O4i2qF1@A#0R{X&*;C! zQxdYgkHe|5K%DkK2GrH?Y##Qm)aX|i!I)4%_=q#{AQZRqIi(WDEKZt3Z`!uJ)n-`$Lt5|{ou6ZYj~=f%BNm?Sz7WJ1T^); zzquNIyD0Qq;N zrhcca+Kq2@jK0BJ{g;m*tpx{sQonFqpCuE30Vf?8-IriZX0$_ZmDNQ96S!zQ^xe=h z^$ue1qj&YK%JC$sNVcxt=cyC|% za`TH{{3frDvzsFw;$z_YY7-kCUe|9ILwQ$Tav6bz$auAQ++)Z@a`j+q?Ug(xk^P$t z-={P1o;uvXFQd?%Jk2g-IR0e@;)Au}*Yeih*CAA|pFOhq{L+nyC;gAV{$8@cQ}etKbIRy(*4DVI9r@EfA*J~eR+yA zPBID~uB=CSg~|VZ@sV$2Ecn4kpKty<9zCUDBxlGwp7+uD)5kV%6$hQ?$0vDwI83vt z^6>a(9_e=Z|$p4|;x8Lgf-VGvS+ z$E5);Z+QltyG@wzkC!*^GY6S|a_#cw{nt-z&L7TjeD35-Ooi%r$V;#ZD38%oXNz}U zNQsV%E0%Z*6i+hf;XkQJKR(H0eT-n*O8UkidSCT&=kn#qZPJ(=Gh{JB?f{NJala|_ zOBI*s=6SPSNZ^4xM)D>}`%QZMRi6x7!WJ;RUnuOw8?T+)JUx)EYf|u3SdVZJPT+F! zHyJ_I_ojmiA2z|JLA4!&ea6Y%O|}l7I$Ip+;)I`mEqQ4a)`$R)dX zk~R83`wZ2QW4z?ODinaWHgq-FyOV+bN`^KQR$)|3IvKOEi1FK`5zP!k+MRkqKQF-H zi#8sgOw>$DOt3A=+-$c2I@Q&Ij2owGJHkxh4P&@TzJopCJO_aqcT|@6@xnhG==tr8 z?7Wtp#e&CUlnaYxP|iE3{4d)vFmbZO$AoLfUeULl8yK`1Kr)b|?-`*?=0jm7WPOGA zGIFBDcJ_(knZZn4Y!lx=jn_M#4?p88ucYz9?Ik0y$^X(>jaCw_@%k;j?ax9ujIO7L z`hz`qrK*SM1Vu52SC|#Vk$;FLSr|$1)~cKzdyLo^En{^u)={<2>UI zSAU0_px?=m&javaeCK(s?&wWAkXQ7>M-Jm`qL|@JXc%%PG>iH?(zbdAo~IeMh4dkG z!itFDZY;8Ecg6wpr+8z`MphWd)Q3L6mh#Vj{mZteA1nXSsgq|JZz)D;g!0%sa^(Q8 zbcjiD`5AKXh8OnoGHXkF<(WkHI9aH?@hpspei^Tn`z)A@^5m4kPun2>FM4lr>eij* ztq3lC6_{e1=W(3w0Qm5W7FLpZUYAw>$YCIZ-&~-o6}O!HJxouIAvhX&(qg8##yphC zAg|S%H!h8#>)mwi-QvVsK($EtutIs<38}$Y!{hiwA&qQte-O^k?4I)3=Ud&A?2@g6;7RQoxQ0gBP;VfvWYE3ZkqPiO*s9`Dxr2Vpm!KC(E*n4S>` z1N!K%hHk`$8yTS!#)uT2v3AO_+v9BS^+`|T|2<)t4mr&DbUR=yVrEndk2>Fxk*Dnd z|Kjtj@i@=aWa`<&>Mh-TAP>IRUVD9HN#Ahc^Uso(=NS$=KOqD8=U-eHeM28HR$5r% z@qMc>2HG_vFr$_}ndkC@_&5s`@xrKpJ~`c)5bMCyLO*2GcTlp0M0#s)<8y{|2b^l} z7VKtGt$g&PgZ6}5I8;6hFB9`Ny54^4t=iA$(erp@vC_hwen<$TNG)x=2%jB8`$#f=V2YP9M0vxXWY*3V{6V9BgS-b} z_-4G0|0U}~&s!`<8@Xp!s?6Y#MWG!mp3OYFdg2)lA4XGi1B;JOM!!v^md;|6f>(bv zmO+=Dl_3y-0_GDMU28!doY9GI{bF`1zoEf&Hp zoN8-$&Y*5#i2fg^sd!|;-htF_7nVt=xZAbiy)E$A<$@2|5c!=r){S8-6p{gR4hC=d z97@0GXXvE|7Wnf*ztIAWI@dSMm@1igTscplo=8u}$BfiRGpc^}>8Dc;!>xW@d(al_ zWO}8sqBcceT6`uiM~)W`Dn36>->AD;im<7>BpX(JGc4S&wn0V zJyvKh*KOZrnEO#8!@@y!o>jraiAmihB`0GA)KbaiQd%Ja$;ITka8uSQ zuS8??7#QwDlg1iP&@C>w-Df7?a0@;KrmSTfw13ef#) zA@uDJ;$y%GR_qufkLO!jBzK*9g%0E^-=nuIS@S~tbI}*Xx&+C7~J3#?mCqz zHy9Hm8k6G1@H&1hKBRnl9EkWFE*goBWFE|Lpz9%Yp2^D?2ch9TUi48;00*bRb;z2l zdGFt9e?2RmqGaiJd(jn~0WRSb0e?^M3*FxLK&mk^&SDHe%8l1}I%!;QaZz`@cQ*_Q}%J6I_t5(Lt&d<2hZ~6E22#F__YP zb>YEK5VT%Yf7;u$p8-8Dn`AgIL0`OKjb?_p>1|kIz{^5yYQLz zpsb$sp!=S?LV&WuWjym~+&&A-CJM@QL&;2L@4En#cK0m9>1sa+^{CE62Z1li;*>F& z*MisZag=tjd#>!EXXvZ#hugHP_`v&r%1W88G!;;}uF(LuAx|6Nt=!F85b|PSGjiem zgnbE)Fi*&kWa59v|L$>Nc&1GShpu5z7%*f}JCd{@{Kk(;#RIfgL$y z*w&sAG6Jg@@?PF~PF^d=XU>>NPGZQkAv8mqe5q$Vj|cM(PiJu^oXCO#!1oorsXF!C zGlL}b%^(qC_z3XSef*Sv@^0z#et1xMbR_D$QvS5f^43YN_NYGK(BG6D?b^chAHC_Z z_5s&{Mdt7v)UABKY%sE;!zyLr5`Ck`7ufQa5A6Z`%3u#EvN3wS@2z5cX~#vhjbVYj zkf$jT3&Ul8O7GdoRz<1{3mwXsw%ohwXyms`rTaf@>wA4Fg*=pJ>4)E?gA*KPpf-37 zb3h2sDS3<%(@%q}>eLOn0374ATWwEW^wiJjs;Re06s*#dKYt;I*17MwAGnQf>i?nz zUY-XZUlcr$*CWMFKO@Z~!`CiG4gl%5LE+Tb7KS{+bA10VzQ6g4pMBB+i>Eg~{Pf1= zuYcR&G5`Min?L#bTbqCR-~7?$;wQh^{Q8Twf;Jhxou}}D19>7haXY|cF3>^}ym9Ka z&08nmPNbXYT)CM+Dnn`Gfy=i)$y*40_KKf$wYT+jE z_e7p?m#*II`@rUnJlTz@4915CcOGW!%A5E&gL%g8W2ZNt{Q4)Gx8FWH^=?vdB(Lu; z^LG5_fBgHKU;gIv%@<8-K6v}B=->_G@VNc)z(oC7p7;Cn9xvPnLrdX2Fo{RpXDuG^ zHWg}y=hB5wE;Jd-Yr60pFKVZ6zV+(nH&>frG+E&d^02gGbk|s_*y_6W`KY?N^vhpw zo*y}t2rR?i;mV?W4z*D6^j?OacxvnGuK1GV7vdyicJj*@Z_@HO1D{FQnb+TFv4Cfc zLHpLTvi8R?@Y!bpl4VmzJ_q?)4KZHN8YsLX^=>m9~fC(*O5+vp*BMUEa z<2PZF_J9+7VQY9x$GKR+I>40uPHoZo-M7}(*i@$2lHXPcm9qz>M_ zwt8qsWL+5#^YlK_fr~tzp0{u>T!L8c(^u9WkGJFFMjo^d3+CxM#?9)4JfVw`mcf>y zbU^PN#>VQ`!ouwgEGJLDvw4!2uCaysb2#F|3?Pi~_wU@;oNQvd#nh<;2NP&Sh*K zGEN=MgZxo4w5Nj}h1FU7s(pv2dJtDv=og--7G{O3(Ha^3dGqlWCFkmi%<$HCD6j+5 zwJ)B6`}c?2KL2#{#l;KB z>hX~o`sa8HGQ7AgGSXcPb?U$vln1&4JuhFrR58nw951v1ZP!A}xjbSmz}|eicIEKM zorSXS(vFU1tg)!b(EddWo#aIscPo2DEPmciUT@^J%P7su|40i_WLjRvtuH%-SD#_j zf8an1K=DUi+)uaZn~!8PWjxnj$=rl?(vIqng{+`MwPE2bCS+bXNJq}l0Vm@$^p8$Y zzKs#PosTcZO~%AAMI^Ul18FX~7JyrfuAPq&3Q_$!o54bj@g8F$qm%Xv?6l+bW7$jF zKjl@IoCu90_8+V*)9wAN!-m7FcXT$H(b%+I@+~Za#Vtm49_wFT{BrYQp*#+DJ@Ox& zJY2OU?Pk6SPKNv4d)uv*{OybH$CI&Z?V6*%4;R`(AB?sL9h}jzD;jyV*VNM?WT1EP z1P{@y|6=R{!y~dXX>c(*SDQBvhzY@=kBz&={uBTUr>pJ9qu^L1p!e0aJQmN@<+Q0l z?ny6M&>O>DY4Fnmy7mJnG6beKoxG zzY``V+Mf6KxHJ;jMP@PfWR4g&3*YlF9y+(>az^A>P@39)hSZVaV9*KpZ_NAYCm$yw z@iIOhsP65~IhV2C_`q2-7DH!ozV|8$pB<+^e zXe@a$jk2q*1>9TW$!EL`rlvs>=zWRlB>ma+DVQ)Si< zfi{F?NlnJ>K*T@0YY((@lL6Z}N*V62=9FX+|J127J*pkXfBM5m`J8QLF@a3a_-QRp z^l;i1{#mf6(>Qz>KiI7a$6Jk6#Oo*b&x%#D9`Bl*xdXLY01I zDS_`EmTeVETuUm=01He)QAIBt*7wABol2^)Af~?nmLI>vWfW-T26MsL=1<|o#epAs zlXpZx*dmDc-#(bq^foNxwQtSBG+ zz3yYutLQQ2%S$o#J@=CVTuOYKqa@&g#9&8(&u4hLp1W5fqgbg`$K)R)@6?qC8D|C# zAmP}fWyFj}c(N^Lj6-u%hcr0Mi!h2;4dgu7zihe^ff}rje&ju;te>ZT@~5Q;ysW9N?mV1YoHO2QgUNJAs=`^D@a@;`v1vk z{2b#J1636R((oyShDYUH(ESgWCKp3b+dQTsgFm?#JY7@jen!s9LS}S?!AA$BI9w5669>;h( zyj(P4@ujOg`R3EJUiA*27EWU*9Gtdz3nf$H(GBT4*V3*75zD+I zgFKeO8=92RUo^tVhiBcez>SV_cna>4Ir6<^HC(syG`gge%A6{OCw@bE^s%(+aVz&@ zRPWgg)_SaMP3Hc&*z#=$kyjY}lVwa8vDCRhk{m`O%)dHZwmMZWbjP&8-a%|FZUG^+ zG+s>3NkA4n!cu&mbR{A~BkuC`a!cj3^QA{tU!p$IYeF&Af=}^$$rJq?U?2Dn-Nhs4s6D->`p8FPXmXTWWfjDRljdG7(4geACXFhb@Tn7Me-SuoH89TTS zfYM1?!M;q47C#3Ax#EL*ay9=3fATtfnp`F8odbLR6*AS0T`#NtE~GXquz$ICbFjuGreX|LVPD7O3Q=44)}zxVd*vpt_N zWISt=F+ZOd4h5fHt^CigTyKl=@y$DD`#YU+FONSS<&PT>^4xyCE%3aPt`@`m2~U$i1NJ5P zVs>YkYo2iPc1E`*e8($?z`XV9*$k}DH}Agv_U74kJL=PT!;|q?2m2VbGLk>dv&ZE0 zSzBiJJmV>pY{ablc~I{u4&}@0_eqNpCQ8~4W9Gh$$uI9kbG+Eqz?%+z(4>>DH=xGb zJ@s!h0jDD=j1^C63p{EVTYSdY z!r&2Y;udR1#w!mqB)l4JV)lNT0pwA1!})ZEud{`L5SLf{UvTMe?Mj{9xOuzD>Z$R> z*ZxhO$M91Me4oCGVD=HiJfNHB(Y=~~bsK&ROV9EIlb5V`Ngu#%8O@$=PVjo}gEo^b zX&KhUB{qpNf!3aQpE69&L9I#U7+=$+v(QmHdytnkPd1A%CR1bh$wnx6NMsRl=$*mUaH}wtY^=J z=&)e389Dl8ZRcnk+tgxk3a9gP>1ySgLUv??DF5JxAhy)ES*>8>5q}n@AcM-Y{%S<1>eVN2~4o zt6CVVd4A@_m;v{3#@buy9g8fUTR<4&VET_@RDZ&?>@1MD_v1sKlW|+|hw)vLl_xC( z@h08bRT>t_X>>LzcRhrMu(Z#sS9*Yl8LvXdY-zL$o{e|YQ3VX>S@aG68UL2yb&T^R zN0(f5=`v6jPd=H}E|2Fee>Y`jC&2O7Gx%19#imCOw#HkFh)3v+CjRkx$4ij0_E|=n zHMeApJh=J$pZSp9sm42RL481N-(mpf6pOQDzLG@opC0;@Rqd#O;p15na*o`IXI;JTQgp z;AQzNSsD2#z4mpt1r!E5y6j>h!kocC=U?mG1I#0Pr;0~=DnkxfAAkCr4#K>WXW{+L zxijZ799|hXa57dGu7jb(9GT%`>4&xOaX;gkgNKh7l7(^Gf*2h$p{CM#`;+%qZWgYp zMM!DsGGQo=PfXkni_JUm%Xa%H>ud`Umn)kNgCm_ZF~P}LGQjv|AwavSIgXwn3yjc0 zLGeVlICQs#CEk%AfA+=ZAAS47q-p6SeaOgg-wcQj7u~b#*!1Vd>15r&!NSVK2d@;K zy>tP-GboLNL-}V&RFj+5Q8o(?dooNJSDSl3Y^OjVla<<^F=c-YdAA?raopVHal0ZI zu6bBG$d8eA77#15(Obj?ye}VK(w}|&t7Na3&PPtn`)LbwZ-@=r>0~?$`~H?w2OZ1M zJYKyatgU@{;mh>c>B{86S-R@d8jq`AhWY2k%@^uxf8PJ(|K%&~gou_C z>2vLoEK_*K$I}<4Ta`IRv*67_MzAbQDROjca#-VPg zGfi2H!!1rk!t$?wi-wn0gHOwcE#!lK${8#xrJk?OdMg=}5BT;7jLGJgC3>;KOgl zmv(N&U+{gjw`qT&MF)`G=VT6D>B8sP8|)IADkV>-3V(b@AJ2YX{l@dVn->~m*)4SU zZl3B6pzMb{7{AaDbTnGY^h@VI^rK$>>aYHC^V^L0!eKE=U%qlFJoPPP)B?;}R4nV% z-{eommkb5J=kOwUa3$2!-nB6}bT92Fh=9Nw{@|#)d0BqBSqteNvTLE~QFvHLv}l7b z_Y3!UI61L6b1OY<4B$Xmde(Sm7H9j0$gbwt^a;pl7>9)P*DQ9yM{`gA*@a@%VjO^rr!AJ%z-NC4i;uRh82FWq^Sh7zxczO)FnIJ^VsAxT}&04OGxVWZ`QfN(eN- zd0IJZ;Ih&!U;}WtNsVtc)fAED*u;*^dl~m^`T^|*S-bro9+anFi z%M|uJ-x{L^Uc=(~(8N;`$P)KSpj)2;(6dz?*wt3;x2Y|6e_aPjKQ597Z011GhgoExrT`TL74UTN;N| zr3G(j*uJg1yS9bbE{vjKmY^e@sGzMq%$CRbKl1q%Qm{a}F19FXA z~(LTvK$elZAG6S!VP z(;z*mhmI*IFq0=7gBO*RM^4cOU&S<0iAOc9k?&z)FUG@s-9!T%$r2eg;I5eMyQwG2 z9FD$vIbQ7B_Bn$YG?B|GQ2X>CUYA@9c>UHkYtQhtzpp-}Qef$aZE|^3y48i1b7W+t zo2CTz(6_Ba<<6&j?NG+yLl0}avnWtTawXsDC!o37By@YzyH%g=))V#T3cMa^C-P5O z>yi6jtTdBLJxSK{>TJ^LcSeVmH$F{G^YDhp%L5{Ow$1b6;Qpx+ugyM`lS0$1+n{OzxQ zmG{-N&DYMK-Te5o;^SrHFlaxOQTNz^yp0>k-N-=vW?@H87pv4x!`TK~`A3^5ooqsO zE6=>oG8_w|VqkSFkBhIreSY&Fe*DSi)3zI%K)shi{14uHy#eC$&EJ21xzHjHH~-?h z?`?kYo6p8;`;)?}>~An^ussLhHPCJR*r5J-MuU?L-Wu#B-(TF$=-A|!Q4a~W^pbsB zyI*$T%$-7uIQZ#M6ODTrXLa zW1B0#`o-qex86*hWkAm8=0Hxy5XPd-?mTP;#|-Bg6|S}5a3o%S_{O1PwLARF;FLU- zG$t6-+rs{kaXo{n!>D*A?P&r_#(5+vN9=HQ>d>h@2Rl?Uqw@16sm{%~eJ8$DXATDv z>-%cn-S?ZAA1%fpy~87JCRG`i956+u4xc(cd}6>9mf@lBHF+Mj0E9wCy#Ikx-zIU3OWQz`(xlb$QV{b zVZa7+w)!)`Hz~W-!pfV4>NuQ1j%V=gjB@no!~jh;ZnpKB*VOf5nqFx~fCk5N%>-Y| zq5Dky9pvd?tk2^IzTVE*WFqh&1F#8?)?#wR$Z_;|1~G>zW#ngcf#a)Z&&`CFA^aSJ zeG5d_i#tnx^e+s6W0Y#_V(q1U7^&4Kyp$=#4{vC500&+3h8?4= z30B4z#$NH1;V2)~$yoS8!-+<`aL}I#u7wQVN+yutGlVd@k+Z3j#zF8D4n^n~lsn`Z z9&7QXF--6+q^Uo~919?fs6u~$qmJxmuoy%q+fL7$j&Tvc)bWKcE(f#gWMOaZ>O$@7 zkN@Cng~VxrsR=FP@%?GT(LD4YuI&`2sL&aAtJ9NbU(29(eGKqog1?#F^wuJ8~BO2M#2o@se@v+-tAaR%#>3t3yr$z4@|mL=S4G z@4xr<(D>|e3sJ+v#v0K|C-9V>Fe~wm(OLg!uEIb*p;GWHK9MEutAA7f#=3Z7f#u4T z%i)iLL4WD!;P7nsjLtUko_?l##tM_{nT%II6*1nydmF!*{Ejn98P(kwm1?g#X#5$^ zFd6he{S$Nc`3nY^+Ftm>6DEvKjAth4&PS&oo^=pdfa+uPH^?_;VsOZCkRITn%4^c% zjj9lb)>uf05Im5VoRCAt+zEMHV{{`@6-uX3#{F1p#9NVRh(Z^~+eSpS{(>@PQ0^mkOV<>1>Eo zCmfK;*mru3fvD zN4#Ai3~P+(M@Jutr|sMXM&T=Ynlo5g^tstCnK#~iYmEE!p+g~O$3ykW^YV)?zeIJ$ zSPLvwOyx2jTLi=teHrgz3uZjrg?ovS;Z{f{i^}QE@zMmP{9nei_uhVObD})-2szrj zcX>rx=suA?<$cUhLx1Sa)eEm)e1(D!`i<0-H99GId)A*mWp=65hga35=wJ1qsToIr zrN6S!ysxtegs~V|tGrnR85y!$VDBuzDvsV><8HFkjs#(2k|74&S6_Xj{KnkjL`6vt zt$uc%ciBgRXbGT2)%zAWJr0*g>3rce^s^B$@x?9PGkTF9hS;MSNbeO>`%WIe`?m95 zdf7sd!xbH{M;9MVzupmcBffekoDbvM{+HEJGHI+hbMlZJn@&7ss2#4Pzm}ZFtgwU6 zi>-$>T27rQeB1DeO(Y)n-q`}DGPK!Iz)N0#@=9gn2VPD8pPbQ++B_X6EXOy$`K{r(MG^Cl1M!X#+xUL;Rdpcz*onq;bb+>DjMaX(fveAd z6b^PAoH%wQ{j)-9ojF-->I~3~5^(AeIR=G-mT%Y;h_1c|gEU&*(esh5E(QfEcZH8P5?_hjyuv97z4A1j zLH;cYJ<91}7Et2P)9Ttnp+#yUtIg5<{~uF#_O$o09QOUePVB@+5+Dg~hi8|L>?0r9 zRZi@R%ZZb!+_@rGrE--&ELXWq`N~e&H*vg3vZP2y8xMDa1V{qJzAps8d_O(UOX+!+ z=eNxC^mO<1%=Gm1RI}|^!g#|XcTPv)pzqmNow-GnAT&%sr5+1f7a^iEoJl)CC61Fl zn?yqhmt)8pf)Q29vj3ikJR|B%xM}kFsBUnAE$^f^>WvuV*~d7w$liYgrz+kLJjNh9 z1!6#Wj3*>`y<<3lZ_0FpX&7Zi0^BudQJSPvZe@;vVBmSP@R49_EpVGh{=v6jcy4Ln zdryBQ>xJ@r=h7De@im#k30-~AAo|Ta9Gd#~Rp&QJt1<8^_h2;FxIjLYuMHqtf9T{r znDCT-;Q+NY*zUXM3srP0-}GD3^}M_kf@@EF zqK~%=I)xK>wF*c~PO>`n@n$YjX_dSxHm{QjN@Y7>kaK9U@LjONGv%@?#X0b&J(RBB z^{GID<+*~iHyH<(h7RwZ;ZJGC0~R6RAAO!zXE~+{a;Cs0JjD07_G$}mU2NMjkin(^ zXSI0RBpFmsFV(dIrt)3;?qlGDHIr`OdX9eRnw-eyRtjlLJbW5_z|%fWc1qG7ci&`Y zKJyTMJLN8#TR0mdO&!moZ+Xf`<`=$OTnBF@?|jg-cgp(ygQh+2R~5gnuQ&wo5K^U= zlLfuE&(#64WM>Sy=--Td8EBOc)`B*9N4Kn}C0aNHV{uEqQgu&<8Bpx#w#o5*1)6AY z^GM zs(9JYluPI1#SB6gtW}`*K%k8QKxw8do$5fI|L8c2kLhNHbZwYtj5^}!#7o=u3kFMa z`dt&QWNh2n4#aR-I!FF3UbUCe;dnK5CkN^fD7d42pP^CmsMO(W`G9CEW-{92^y zUGKM<$_*x24HhqOApNOF^jF^CH+og!-Hv`*d>T0(T~T_pg(uzbZxGw!IJ-2$quY_e z-t7!RrUc~&eB@_k8GggdQ}hSZ0DW{aRC-5-y&f4_iV?W7X=7;8XYv8sGq?w8aQi;r zmfvJyi}Uw=gf#svkBWsD9H-n~t(U!T61VhQ;KM7drd;4A|3b2B^@(?g4F73Ale(fy z+@A^C(huAEN!#-&U6S&7KWz$({?O&h#~*JFH;J=l`d4qB%fNeh^WK#^d7x&TD)h(W zjBLBw`g^$;wv5Qlblrj25*C#TDWV+e>==C!h5_t9UMB2Z3<@n*v zsl3r|X8?QWjhFKnKePG64=!&0uRs2ve7lPkc%oR14>Juz@Gai`Lr8-Qjt$qfZ@H&#rG1`!)drf9OxYWtuC;A_e_oLL^ZH^4d$SN84&T%V9c&U~^3;^6N#HX0if8%y zOD{Hoc(!@(^D7zN_D$Frh6?)My7Z@CeY3e*h>ss!7Z0$w+oyPF=24i8zmS)t@}3vt ze+-&DdyW*MqCC{^pp3`1&fm)0-Q>i0WG1Nb_Hy3-N1Nb12|h36&n|q?WFYVS zwm)mL2lMP4Be2O>{O5J!kj`F?5dn_Mo4%lL6W7zlz(1C3m~=bL^ll3&Ca*2^X+kq$ zFo|P?q*HIUyWz(_{?YJ3ID;=PUTPwpQ7S{*W_KQ8l{ax?Yofg4wL249@qigm86y~W zZf0y3EBIuG6Cxf1r^Sc|&zCo*2|DFRw#PVIdnNY_vkc_q++@ilQGd3_84s1IA2HEo z{NGo46CPnU=l}~n3`J`@$hhbLHBQ4AYinT)`Z%^1bW(0FBtkZO*)WZgcMZYtbk}OYele;mvB&&yy`En_bm$CegZ& z%0{o*M&38wVhj6tj7FDT<+B)bNbJwb-kW|j@tv*eDqYzQT+=VmKgJ3ZL%!#5rH^J; zSKn*=S_x&_pDxn&>6_X4)x=r|5QaAkD(E6VoUursZP$RqQT2NkWIvXQnA_MS`fBKVjOpBT6X*k(| zq~8@c7)>YkFFtAy;dAtugEucu7#4@NlEpdPvp(PWYYYO(pzZ3dMv2-y7!{-O$Ve z0x$5ZSFc9PZwtwCI$As!BL-RFrRw`~i#QhQju$q|t_1!0@r!W7}~+dizFr^e7m!Pf8_AV3}dH81{l;Bl^!Si zbSC{4nO7Uk(Gib!WyHR7_r~UVdu9C6 z@6TAV@+`DfiTDmqA)V@9Pi4GvMh3hsn(jCsjG?)EG6A<)?CjgeIT70H=Tm7Ln zug}_-(e!D3neZ?|r0MT9lnD!yfzl$#;XJe-+0DjNwM8MkrIXc}k<#MoANmUxKBQ&fdun-?dwY80tRC=KM>A%(yJC0A2rs>!@h5L%+9%R|M|~;zIo~8m#f#c zjNr%GwbR08{DU8lbakPl80z%-ffX*G^?qzZ z002M$Nkl-c)sqsX!Ap8!0)v`Hy;?y_Rbq0lSW@6MUe}8qb!{|x! z;P1FYOw+eQ8R4nLB77QJH6AoJsB!_Vv}{SUK%XubXG)#5XP;VNIG}4dh2x&xwYjcu z7YE&9hmVj?4?8gRL^}-VzC+3OP5QKtop#z)nxbslsWuTk(o=`qRq?R#cDD_*BJv6+ zfK)2HCETQ5*bET|ZvS?k<6t?)2-~xn;7>KHgbzkJ=#c5JeM^XjH=v!Kbmn7F1wU6syMEISu=3cpk%e$D*AjKfVP^A2+ z5!g`dXX>=AgFF;*6&rdDrz&jKESzHY`hhzLZ~&;g1Zt!Q9NWfuq5r_l#NY$1uIE ztIlK{l~w-U z?BttDm2QdZ78b0e>uavMkZu5M;;trUoTm1yny*6zIYkjZ23{ z(vjP~R{CuoMq;!a+F+mredb$Tf@#1wya2Cyd9CdJLT#=GwiobL0hjYn*`zg|h1dLb zC+7^(rVbKnXG@NGLP@dU!y6w-TyT?};iYRh_B9iNp@DK1PE*I~HeS->5stRK-{w)T zbyy|<I#Mj3r$wGI@Wl`(&^Hnl|OhU1*!8~^dJ7+!?EFb4`9<* zg$<0_p7t}J$p<$w6)d0cc~BY%?$CU$waMsE9jD)cx9f~=Jn^%Ntmp8g&rNK?W75q1 z7Un8x${f6uO+F_*JmKWeH5l%q#t^mZsnbfaaA9~_;++TZ+>%_Z7^7BKe% zR3fzWX8MqM(cojfC$sCFd+(-UZ=Zn!H-FprQ|R{E^R12|+mmkENQo6!vh}~K>OZua z`YxPCM#j57xUy4Pw<^nhc`bzcVzRkR~s+|(l*j> zGF(5$zl@#_3VHA#54;aA-`V`m&wjM|L55$3+eV?A|LK=M-u%;F{9t(zW?VSbfbCYs z*8R1Q<9U~z%5(dTQ%4$DpJ?K8qHr07eh7`bO%i_b?)l9RUp=#V`{m=CA7wyysMa5S zbZPUK9rF2I1KSgMr#DY+4EU^rJr8W&Jbkn@wzcL-(_mcn8JH$Uvq?}q=G}I`37di6 zjeCzXdVZT1XNlS1DZm>(?0`mF~~Hy^%#sbULzQ;hf9 z8Ck$PlEKixorjZtX;1Yh8@JnHdhp~rNAcI>ZQ`F!GqTO!cGvV(@~M2GGHzsK1EXld-L^87+9r>0A9U0<1ov23EqEY?WuchO4({ zj0pAv)T_z+<2ZA7yirewSY*M6qX$s_|1zKlUOe({Jk-k`TF*nxSr z!|@pT8K>!sMdxHqKOZg(C(bc&FyF)mAKF08L6Q!$L_495;9&749@Up&Rwuu)F)~EgCX1oovCBp^6u%!$aTu z^il^A-in@i2e+81AIXf9>=sM8t@Xk^S@5&NTy9Pk03FgB-2WjJ21cnqh8@wG@)Xa)3&<57y6FupQk7eg%OwW6qK-mT-7Q4h$rkJ?Z`qb{iEN2=gvX{gY!62L5P0{X+<;7b7h> zi9mz5I*R9H~-?l`NxIm;}F4ESJ|Pj9;=6{4qIN5VQrSTHe4)d*xlqT z47|XvL-l=(;Ou7>qu8s)Gd8dIgE>r8$czQF4?2S3&=_rWe)X~#r#@(ls-Yo0?c^*JX`(?u3&7-I8Uug-X&4U+XUE&AwF zZY5`5eAz;O@MqBs&Et!-M;Zs}d+xWebM4AE;qzri{X&>T8_t$?y+oJ}>FkW6b~#%3 zK2^D5zFU;ix-8Cpdg1-erLQlvD0_R_p%6;b=BxYh!cLKIhIBgBXTJKUzvD=8%y>vY z9xsiX(QIV9C*J<2b1Z3ClPuIPCb!0h z+R!7kZt=l4nKq`f5aFF25XS7}p0nk%#;Y@b3_iJBQ3C=xx|!azD=-IvP_U3q4vi(W z0ShDgYr5q9_x~pNjd``<^3HfF-LO2(wLdzdF=B8Iw|HxTkFAi_QO)_NW;Ey%(WD%W zRu%g;DG9}{x2gN)K~`xpEErwOG734tQqkd}_n!ANN+9E$Mo@y_%C9_$`tJ1fGX3p+ z1o&>0IbtM;TtksP;i(l&!Ly1wMnte;9~=oxd3T8q+f#TmBu%k!=^@m8&qV`-!2^g% ztK%A^N&`mMl|OhYd+MSzm&!9Rn`uPg7{9g!FhYk5L7#dtq;%;HEF};)Y8-u-mzJRv zETAXlx+ct{FnZDTGOke|l|B3l&%SNX1a0UmXU~v!_|+SHRF)4utj3@`xq4rHYYGeJ zAy6>zzpudxJny4FD3W|npMj^o!5Z1gg%U@(}TE8iO>0yO)FQ zq+40#U9hG+G3C9eTu%pfS5urPEDiBVfr}rd?d77e@|CWbo=HyDk|DGn`6q9|pAbJo z1BtfJ`T}8$)jC<)OPP2!IQ8WNredSyz?CM2V!*HrwDOg}eVvpz#%s|7J$lY~SlM%s zCK`hi-cwrm4V><@z3&HI=n?G=vTWNM_>-e&WnAR$Vi;N8J7L zNHCQsyCx56papGml0KZFZmxIRGl}mI9U4Rq&_K^5e-4 zU3wUx>IT^G9cX0^b>{WbDZQ6wbXXXL`}cMB93T6_7)sh!JCmkq2L^M!)JAm}0ScY! zIQ-UTOILlzQ#)K19fA&Sgfb9=ac^rVg z7Omt(-^w!i7*L*cuYHQOkB?B1(tos>2hz+F5UK-Rs=fBdV`E3xk9YgDmCBXDb@Jm9 z+@YniyP&*O`2p}J@h#M&DZytC*2s-2XFaYs3%^N1LaDu ztyKP8OEwB4rh#Vs3^naf%N-}4dTBluIG)s`u^bKw$x_CZsyR?Atx}^joX`l z@O!`7+%5L@yySqvErbl37IGdbs-{^Bfn*m7Pk5(|asId$q(UNO-vSut5* zkb&2sXvpZq_-c~xW75GJ)0S%SldoONq zA`?f44Bot5%ZuJ2E5FGUU=pP54rO zj8%+t^+46R=9#|HCbdB=-LOELc9_$6fAyZ1Hg9K!shx?ouqyKG8xN}{#eXgX3zq^8ePKimK5 zj7yg;Wk7#1PsX-8Co_mg{>b7^eF8E%_*vmB=$5D@aLU4EM`gfD{#yclo9(YMQHjpakG z4?OKu3DFiQq955U1>N#O9+}2L=(>zdb|XC+NZK0_1F?X&03 zj)%1cJF;mD`qAUzG_+111%JHS0VB`-On8;r>z>YHvH*Af^*6>F{4d_WFa~n%#zKG) zEp}Agd2%ORnnz!-X0fk&uAMAmCks_@Xmg>($~WJ7D>*$>A0#YRXE=12A3404d^uPx zQq&LF6;hua9rmZsh3jAp6e8tr=Y$-}2zWl@j)QFz>`uvvgNFkHN(|^kuNsK8_UsmI3B&p22lr zy>91K`T`3^Zo(TbcPx-5qhcJBY31lg#z zG8pnPjy*4A#xktMrxx*5!yJ-6CU^Rn1C2d=EIz8Ac=H1{+Q9q3l-tZs9ZScWFvj$* zS6`U^4$gPHI(zmwqvOtEBfPbbXNCT7Q2o`fuhe(6+n_NI9m3eikSh%XwlE47O32!` z^+gsu=y?m7=w$)uUPjl)@t!cF4ewpDFgR4+&8yzq$b_93s@lR+j*e?9)AA+MC z3-_~-Qd`1<*}+kNdtd){tepzsus>rdk5{tCpsugLe=9Y>F?YHx`*6Z zz@x|LCWoI3?Zg{;#^C8MI$L;_!-w0om;wIo?ZSXmhD8lt<@A+`D+e!e%tF|Vw{jFX zbNX!0)Z?r8zvX!(*EYv6UMBUY&)0a`bMveby^)K+HU=rLhTjXi2_FyZbEita)^5*H4M2 zf-s>!Fu!CQIKn^i>aaUUb+p9gN!xWQ9mUhEsUHaHE0$7JT-N007Zix&*SkVL>jdp@cj#y!A)7}Idm%T z&;$f&@PF}0-W|h#i3guaix+(etLHl~OVJMyizu*KX{1}}$3tBicwPBJv&cXEmtOrw zZhE9%aP5aOnQO2W;tvf+(3EQsu<##4Pw6CC|ALDZo{=-$-{KfSOH|IXPI~m3`yF2N zUSk2PPJJ4vdu4Ys@-y({UU1NrC`@7*W`N?g`u!dJrB~PLyapdiG3ZLE4KLNX^3^d# zhgv|70SQJKJs3a8nR;pP(oARVTfW+a_vM_bPW$j~+D5R^BOC@zu(d^XlLp>H7)lo2 zK`(6$vg@9Wyt_t!vJ!oUzS31U*Wcq*vbk1BzZjB1y1T$M=_oB29*kTM8`MqWV1ch} z^z(tDdw3-n!xuD@7OZg5QH+sVAxn_98T$fY_zFg8r|d1fIdpU3qQl%G6}Tm127g26 zUX2a_3!a`vy9tYeQ9~E+6ci1ckSZgK5>300zQh0J zsg%%7o-&s;gTM|gkxwyO+3c=+yo)}S(TUMs29nQ; zLpE4-a$_`?$Cicd`>DhBdKNelmpu7QVZki_G-37D-zX1GQ&9CT%f$ZIM<~nR08xHA zxej>Oc9O7gF0VEbU&}T-AYPM z;kHVUQX3~Hbm zaRa*_dHb4r?&xUpIb{!i=+GaIlukuGcqz){*`wa)3;my6gGTVw+lEKsc!z_ z_3BUD57A1fK9$wv@tbe1&E(*8Th8axpjZs!V-4WWGyw|T4$I_S$7`;3R^EM;_xbhv zn@>78ipTliT)dh0P?Mhk{Pf3fZqA=AG)J+_E#SX%{^iYu49ahuJ-KnFRGEj;E#?e z{7r^rGR!b_BcrqHnVhFLDs2p})pTrQ0-u)X`<5?9hGZs&Wqy7Gjvs8!v=vng%@8m%%@FpgAdB!LjGSA^!8Fpau z+&ss222Xbz)#>Dz^2Nw!kaXiB)T0E+J^oM{k+~X$YJleFE zJ$oKa$eBACB^jWuM2`@+r4}#2)c$yFzxD1r z8Qe1Lcc`%N1k1xTzUkA^Y2ZOS17bq3w;iyvrF{A?i`3}WqC$Gb#92Ka#S39v#^X62 zsmqF&9p5TnFFNC+p4)am{Z#2JUTAy=EgA&J;u89jS+Ty=g~7#-xTQJ;UX^IL(A393 znUEyM$sZY{<4 z$9NN6LSkeD?Z~9iF}pgG0xok%EnQW4^?C@mfV%vcd@6nTmRU(@;(rEIqMX`3V)K>@-bF==*cv1t@?;{op)sh zZU<-zIdb6ciXG2j5z#V`#)EL=S$m?zgRcvH#wcmwk5TIU8*fJ67L)QWr0>vF+wo!G z5!xXwS-(;I-Ri@8n{MS9f4(|caOJ7Upez&(!<&VQ;~B-t_4S)qGGg%{nbBzxC#br+~zTBeO z!E|+W8t+cB6uxvPuStjY&Z1K?r;f%@Ip?s{fS7&6liOEu%XMROJ2(Ru)|X=fZLgLFI{r9zH1D;4Wz$IhlmNyuqQn^BU=;7qHi!A0)0Ot;O^u{ z`*^eI~OUfka&PYUZ3F6NUCe|+IUeOb8luX@{k6#R4~XR+%o%IrFIa?!oo31_A+ zLhmXw*AFj$U7YX?t;Q6U366sh8L;)+v1Iz3F@#slc+r-|$C)4qxfY;Vm6`{J3*O=F zpoOZ{&%y{pw}t2xn|SS1I&jF`F`d)TgT^rAl%o;a9$;wOwY;8rR}1fCj6${_2@?}BkUOUc=A@H8@)QKUC+Q;LK^k96|;^+VOH|>H*mY)Ut+xjbp+JnWF zH$JoTYL`%t^+`glFxVeyq0CM-=`LRUyja?oD)*%ho;;tCvAF(~V=;_ejUKCQGEQ5t z=cRnFP!wo$I9)h97My((zK6o`L59S7zWP-QKZVMQuG%|2a^+6D+*-`NcKv$J677Zz z27N$JUcC6_$o0OA{1%7Ev(Qk+6d@9im*Jlm`5ZhOU8Zd`FVKb^Iw=gGclvv0rWj+) zu8e3*mp?IPs4jLV&0=`f)6ZPKdcC@tBxcNSVIRGPak7AAr-NN9FTe6?2mYqd(iPVl z`>+=}TYwB6?O;&4S%@yiY<$N*Vdv~RwD|vOJ6%RUR;FE-&JJ?;GFhdER{y)%oPX<` zDf`fuhc|Z`AAa`5$KmKal`>*S>Bo+t?e;_d_wGHoIs3|+)6VZdDl}2yHxD+qFs^vk zm~%~kv~~J0az=OcUu_{hmA*GWVhkU3*?T`zR_SM~6pdvzwh|r^?_4iAZwVqggi>WR zhQDDqm;SkXkjS8y{dU{07yS=rTtxQyX#bLVcPxhsc`i5ix|Pmc@0M|2Y<%N^C(#){ z_s`-%^OxlDaElcW?%y7MJb7ZC(JtRDE`v=tqvfDe9elL62aSE%0uB~}M}6^X4DdrM z^B!Sn$X+2FGm>*u;JkCYIn|6OgR#eWD_MT{*kVq*I9te6okf{#VU^Y+B!I-C$TA#w zJk)9O(DSYtxyFNX@RY|VrRYjh`QzBChDI($f*fILcHK7+*&1(V0^REwfFu!X@*6Id zGhs>wP0Fc31UL;M#^3`wJz3?)bGN|=iCTus%I>hK@eD0V60_iWAB;puf!@r=*a!Ze zj4{VM61PoaAhMEAs2rDvd{N?mVE~|i68=}qyqibCuGOa`aL~+yshO_Buy|V^mJgA50nAZ z3-U!ymYjhJ$(;n-a*%K`d0id=`&-G-()~8kT$i{ED+T^UC&}ht@8&u(EBvO8>d%nI&^>wmTliq-Jlvu&avU93J1I?XS6t>vIPE}=RfqL{>XmYX z@5D~5{O=FXoRkn>JePCo2dBZ)6Lf?{PzN>NTbio0m+N0&@&#-1`))$hgc1#=zUmcC zrVWEU1gYMhCwCO;l&`EQ46aj~vM)TAKInDz0T&2|1+Q{}k_SDiGvnq~)^_y9WFX^Y zJ%SE+aQu+J!O{Bx+&xtMOU5Q$m!&fqgPYdZn7V||q#B%hQsTLk2l6|x zRzW?hY!g9)I`{!5DIBB^FS^2|>lt)TNt8ZZ&@%jbtsH}lF=o?$K!Kwk=z@lNRF5lf zR~_%7a~bFT+9Ec)6C}b{La6ZX>)tbuYv;?@9`60YwtRrfQXu55C=^lbT) zDGRRyJ7YG(i+X~&a8YjQ(4=Rp4fJgCm@Kan>QQ6W!ha?V#Cz(DmRk^AjIK@wc4UaG zc2kOND!6s;Zpzr|Xs50cBZC??M~^A^;Oejhw{Dl$9(scg~;L{MXN}Y%VoX_{keDwK=72oyqg#+W!Yv?`^&=*0)2x7)Ttz_Em<+ z4?nn>!RNr{SHJq{=3s`>v*-40&R@EU!SNzx~enj0g`m|K>mb+2*xZU)lV_U;g&yllQ;a9B<-t^I?2# z!t-idPQPs8^zIw4ZN6*5Bqp>2CV9DDYrnVn3H$= zg-^bikU3*WNk<-UvEbmb3@dqE^Vk)tz(kD!+xNlhFuE$CVPv%c$9N?sHRCoK5U`*A z?57ipa=(?5bQ5F43wbS#;Wjz*&AanZhU(iH@NVB;1|Bh(cV|Gk4(CFFIAC!?mqcHl zEZP9Wjc2^K#<T7yWG6KY23R#5=L4zs<;Tt(e58PZb-zw#aBE^ntj~ zx05MbgqLB44(9ZIFYl}tY{E~!z=N1!#Nm9(A0v2^c?%YdjV9M{IGqud0l@AD2i;99 z;wJcrvOJSAW+j8RL<_k<*D`c6elP~X0Rm$r$hZWqN!u)R)?D(BYciWLJRD43G*<83 zuTMQ|Fmw^V$&~)kH6zU$Pox_vh7sk4@C~-d_F(i+Kms{+WoX0eIY2D<44Zh%Sca~6 zNK2WtGMtUkMgCDVD-6z&+P*mAM=~HWVBO(eR$7M56vk{zzjbSQ1Ty~H(mY$6lTmy! zVV@Z2;U{d5T_FtIi~ts07`2XMC}2c;)E|Sk1%MlEWq z#2B8#XE%ePaX>oe&=CuL86H|75pqBbP#(@N9IWkR;NbZW_lOd$)}KE8DT8Z#VkC$> zgZI$0_5l|>9wWB5{T|2|LjJxj-xvYAo{(^1!63tvZ$j(n>$^Wvo8%cDIWvI zo_-{_!cX`-$S}LF9Wi8=fxw}nu4geT#K@NiYy6@eM4z5nlmbU3Evmqq=O{zb>GUw8 zozM$p+BpD+>tA@A3j1T6>L5yOm2rfj(lAum1$`vLsKtDXXp9nKMW4*DX}l@l>`KUJ zW@icmtG4^~WJ)IbTi2#^47Z9+s7^pX5C+zA+jzI;)QzE|ws6RZ(9Iyyts^hm zweT?BulB$CjKHU-p0*%xrZO_>WUSK$)Dzuzcx}vSQE}IEyN`^IwZk&^Eyl8tR`M_e zO`j1Bv~`PZLh5KQ!bvcOk^>>D=8TEzY0Paw@L4;b7{P=`IbPp2#<-q8`F7EgELbFA zpraLQKBLF!H1*Q{R&02SQrd6GsbC4k#^-rUW{uG3>FeKg;AZ`%1%>f|Eh&A%0OrFO zY{$v<4pRl6Tudp|i7xOwrjMBVWMGV6-c4-vQfvSH_DAaaDUZcPGU5>1;a&F@ zhqM=CxxI}$&|N-Z)##XUTvM%J@2dY2YDn0(b6itl*N|Zcm z|Mb(s%RBPBKX}^yljMcY7js-1V_En*Hv~@f)#G*%j6{@g#)o}Bn`j2X+S#8mZe)y{`> z%VK`MqZg`waE$}ha~1_k!*ic*Ge!}jZeQ16IlP+wxz$4N?R%X!)IsHTFS4E4rATgt zd{NRkb^tyU>rwE#{M*i%f*Gg_tA%8>HFz8Y7fK;`*e)ZAY7FSUQYG?xghn>pFV$ToLo8_wXNMef0r< zl=obB*Y|)Z@uZi;)%HCVOW|RfM`Og01XMza{&U}Tbp37)BJAD34K|~g4lew63fMKB zdoaP+!h-*FBFZylVDyP*>XNcYK(AOVBqDmQ-gWB9?ettL~_CDd(y)pczw;{hnH@)@a6Hl z19L*WfH^ox>t)nHQ%o%H3Io&i@JoL9`ruizD$g`(d0or4C=aX-V0eso^7d?t#(XE} z5eXejw_ekfCUq11>V)}%G@^<(Zk137;+}F?t4JMm9tvoXuh1}rQ zOV4+3i^pS}t<6Ao_%iRe#;|E;2rM5NVsw^$4cf@r)P59ruz>a+Tm!+7;hpV6?%)Ld zk$ZT$CPOX_9Cq}>Jdq1BB`g!luC}b`fg=|TK~t|OSDNTF^0~?YpN^gq`i8p>dC9PM=ttj}#IDaY z6S`SC`FS)1dair9WTN|_n|DEl$H)nq!MC3}&7lQ4&i#^y)ebzW{^&q<@v$q_9J%P( zj?QZYUQ%hL12??-z#S!IlX1Mlksw)IlMgrj4UI}7oj*v8y!I8$P?%!DH7T0@4}T@b zciALr@G;leRvR3AqMb?llHXoTUHjhR`?os3{QV-S!gl1%Ht$9mQ~m(&-Lxk+O~{dD zSA&e%nof3_9L)!h!P&_<6RhslT{%AT(qGbfR$H6824m>6L%RVrNQH|V(l)gp+=d?L zrCQ*SiN4To)uWp#x9GSnqXd$A=%-ABgDboo2+6$(;G_)Z(l3==AxoYXPVT4NU@3RN zZQ+hADR`duEpNEqyz+H%2rm(?_HK^t zeYSb$wU;*^Ui~D|*}u8)^^MJ!dEK2n+&HF5>VJH{SgTDA4mPmfo&5gz-M2UY@uUB| z`9J>MpKo6K!P}cXPZ?@EK=kEPn;TzTZa{aZIN}{*3Fq+SA@gn_UwGb#GycNx_{U$Monyi~d8jv%_A3m^W^UC4ln~PU&jM4C9b^7&hI+*gSo10(l zd1rIut1H8Q@tE&kzqa|wZ~b`loA*Cy(x0bM@E%vr$G`b>^NXXuwRy5T5B4UW2b*9& zY|HNT!lXRPi11dSL=F|sWq)5kZ-vs|=hFO#T`_a@yn1O(25kszt`IG3iyudQ@n6S|E z^&Llqc^Nr3A#w&cJlCYwfJ(pg9}YCFSj+`;>$07 z`?qTTP2{Stg8*MCEXS9Z+cuv;#esGj-WQj?9z)#0LvUNh=KbX{aWgTo@L&NpGk&-% zqoXp&?e3oQct^Xv=?=ydde}T*v~AC{XL^9~gK?OVg>jf6>uF;h-kjP2Ix>`5h{4Sr zFHg9euvvuKJv+aoElWIJy2{pQo}qV|JlZyHA!_M$6G!UR#A${3Nk1hs@HCNRd@>Q^ z@%k_R+h2}>;$jBFJNNzudk1fL;pxE^;Viyd-Oj)cz7xgnS-ruC30T>MU zj?U;M&g0tR_Hot+-!)#P(NdWe zIeK0g06d-fVEyoN6e(}*Ks|aeGSpWSVqsvmhPCQnal;c6d>-G7!8^trv8(9>eK!OC z{_rIuiLru5e~=42C{oX~Q!=QZv@>CL57fpOf6OTDZQvFFVs>8{Q<3ere zOBl(N!Jx_bLZ^w}`=GirLhI9ofP$;G_DyGDFhpPY?9=+_Gw~pUZ}qg>MCb_%)q7hU zI@~!5!W^JEIfRRy2Fell+}wue@yS!CXYt@D<4??DbQVHF{ACLzr(0CCIK|**oN$=e zW8Xp*q1EvWch|37nb0T?((S@w@J82;87hT^A(sxE#e`WPvU8S#_m$R8RC~wZH+*Y zv0m5;kEegB5bXqPGR`}A8I)RtjK>etJ9i5MCG^D8=;T0aeF++A2ek8GNzwEfS$7dk z-r3L6BTsn)S3ir*yW<-YTM)S4;^nqxEwol^E%`4XiKkXb*ER<-) z@P#Y67F)io{pm9%+33f8&ud@te+q~nYj+l-H+qu|JMNk%apRRoc}rjX;_K)X-c^H_H65hQJ`mccv6wc%i1$LezOy^Sg8|YR9rcNgQ(r8s z#ozzk-`#w1;lk!G{_?%emtTHfosKOztKDgb69O#U=@;$k)i>XqHpZwe=J=Dd}|jSG#Xyhl@Q@(1s{)8V{VH{WCceX%qazreof z41;Hn#vpDHm<~GE`3sl6`YQ6a2-P9LjMR2EGOXV1Y#s7IFMQR(@OWcqN(79yEsi#x z)Wpfbut+Z?1LbPCuLn*d!cAN8^KZPB zv&zxU=NCU48PTTYHxA(J;2Vz;l#yY&N&jp2qK~jR#^mU$B{QH~NL@Dx`UJC!@iwnAp6^R88J73dzte};uYTPky|!h%R?CTe&$9K*&a-MjUzxKXYC*yd zO?AA}?pyMCs6`KlfHRP*_sr8!FCgxp3yW77(D6q{(xddewqrpR{xdc~yXtQD(V>Gm z-87H-;De6}74}-=+s+gSro5=C1DOTy==i)CE2GboH9giaodhfzEMbTn{Vt-+bI5F! zW4R*{mY3EaMOyy#-Zc^uxt_5cGx`OjyI_`=;lbmcq#;VG!UnyIF|n|B;P4jcS>;U; zDax7P_ok{ZI#Fbws&(H-p5Ls`tTD-wGPcyB=@( zA>Q^?WlrZ7{1Me?G-b$#USMd<=%mvvy_9jUbbZvl+vPoh9^IqWR@oDp88E^dy?7u? zGsSctd_|(gj(6o8BDck3zNXAd{hmjwfhm5$nXCT6EqL)~$_=+|&8G}FNkayzN*^_@ zV0G>J0CO45*P}_aSdSKXP{vt6aUIf(96rO#`3BK5@4e}(G*eFZ^R{{r6>t;}6#5Ry z=ed8|Kjm&OCvgpFNp}gP0SR3Ozb!EQl1hCiGnz;$=MFUWRhQuRw_u@PSs0%t-J;1d z@Q~-8ZU>bm+m@@5Z|j#9G;rFLQk?H(J~g*sRmSjIwr9!wXw}Nn4tloe0Vo;wEcp-V z9XQjt3LBuV$TpqGK;E`V_#)(ZxL5v^3v!RAjeFLIybZ5wKhYB1N-<@vGTg6c)6D0c zjL~X=O9Y0CB_FKI3-&-?d55NxB~YdDK31Lq5Z5MPfdhO`Qp+T@{Gjw+4cUt9C1JbxLh4mh7)@RxZ zSoGo~NLGh7y_d~T_Xcw4-)(^L9!TZ+L*^m$)O8Y71lnTx(0O!wDFIjB2u1H*Pr2w$ z7j!@6XotP0k7lw}A2Q4@7_;4f=bU97uu^ntG?O-i5UJX1f4-0dUU;mXHPv| zA*jMgGJKJ{0Z)YzcMs6 zD{Kd*M~*c>w4L=-gE@xr+rpOgd>3zm2JGbHU=tk%+k*}6ITQ0>hTn}Q!B3h%U(b8= z;*GnTfB5dX&4-^{>JY-b+L~be>?h|oZ=E~6x%;30dUN6Goz001UT^2^{i|R6aP!9* z*)M#PCtb$27n`Js+xm;2ytVns`Lmlp`}oVv|M{=~-_1|+O23sC^`4BwVlLj!yVRtf z;q%C$Jh)FDZh~-d^Ov7q%$R?>iC}}@$nnvYyyO}vzM2v3pZvX_Bw87*GkO>hKjh`s z!1>4F@#7!7w)yna&#LdU%};*%qs<@w`oqnu8RTAw?l8qJx~C-p=6KKzmQX zc83f8|Mstbu{n@o1_!1QJz!53kf_3ALBD z={J#l_3YWrr=Nc@gZyQ{Hi>UiR@!f_ev>Ei=`n2aA{DyevoG4s(D&&)KeYrphtcAA zlT}7(Msew0E9NzCDqYFb4EV2}d$nf{49w7<$GO7`-*_vpwCZyzqnC--GUzl3s?DA} zadh+YOQ(a8K_NZI2qRp}qZTU|MomnN-?TwP7Dkm(i&RCJQE?W=P4LCy)>Oe4BUif{ zFL9H$Y4iO{c1@mmnK1}`+x1???Cu#`CT44uu}~zwDP=*mj0aw{V$|b3u6v#MtoqWP z>u=!2;K_iep4ta5XfSDY*fWfXb;(2QSh7k7&SbQ2M!C~1F1(z9`-{)NEY^N)H6D#2 zvD{*TpK3C0rvy(~&u!Dj8)dxq+G`mVu0;E`++SJVVkWhZ(<}FCf7&|F^_L3S@!qH3 z%x;1g3+Ix>CU3=zAoOFfg@AaL13Nk8LCWB9s5WR(L7!rQ&*M56bLr(5WV z4l^lN@2acDyq*~!jR#DChiA2$Zws5m6HBb(F%I_3c4<2sly5>S#y`4G?BHNy_KcIF zm9e9K!@?A!(JY#f)9@lMdYmzOlM#8tX2wS0G4p}=>pdKJ?ZM5WM$1c+UY^+{^Bt#= zCgA3YJ)2Mq$xOLt;%*lP{*9r%Vs!l$LUH?Y9SG#W_U=Vf^1q$yL;*Bn zM$ikB!*b5gGBR3tSl)W~GCVUnflt;MSl|2TlgY#Qd#G?;I0B6Cr2nKI4*6HcNJ|eBHSdwOJm@J0WkRqie{|wQE<0r)2;9`SZ!wTGSF& z=5B^NXDl#;=`Zp5#oDsP9)=`x#fV3)cpj42Rj&+U@j$9WTGm;$RF+W-UL%vqmA1|U+-`)?VfAPFF$*eW zk_=l+So4&Of$UvX3Cimc#psD9&# z8a+s<_P4)x45SBZ@1EUmN5Io3c5i)GOzMLfTl4-sfQGd>^O**khw4MGMFR#-bB3XP z_y`-ODE*6eN+-~#3y)cosjLV9Z0lcWEJ=H2mXRac7;bcy=-F0fs?!6EkAO*!)(0Nd_cdm;GpC31Zu+sIM;`ps$1zSa@C%ieQ8`_0 zhs}Y;iJyM@`R0TA?is5DSST&wH0e`|THiKa)865{`-!u1qU0Xol$K}k`Bz_`kS163 zALX}W$PNx6Q{X|i_ZCfo0T^GOX3(}k?|dAGQ!*4gupBps4$*=U^~L&r&OC zWpkzQV0UYq7EKQ2883_w=LdS%!HW-T10Q|z@$B@lFr}{ACz*KjjW-%kI?z{09=dc_ zG=8vo{k7LK=wF`k>~-GVjaTRlhnqeK=7HM4EGVbL?lm3}j>*C8+Kchvu6TK*oh!GW z-Dxo}xZ!SWoZ=YSAn$O0`OGV`V0Hb*)#3k%#;6wP?DErRY6o^)b3$Ax(|e=rHOUkY7gMchf!tHit{lsyBtz~-mA#W@UaCXy2A}N!F$SP4 zOcqWiChoVm;9CvD{Rp`HKEWL>Pu;sWtBEcHe=;lEJrekr3E9r24=9knZKbaddVX# zIRbN%`!A!cUC(X*0zN$N0r}RENVkLA_uiPJV3LuP$QW+=No^$aXAUPQ=U48)6^@vL*M)Dwv}VY8ua1jOi%Fg?n8UZO6}vKq*XRd`^&x-cXXsFA z%29Xqajh`0M*dcv3_wQ?;5X=PY2v2zOCPQ4!DrIAUpVdPD<~;XS%c@$MxA!bl|>s` zf8A|A(w7%Hy$5G*rjk80YOI5q$J-iM@EEw#fT>p-z67iEV>}$!mvKzv8(ZC&TcX%;5N)n8y+7Fm2 zEf4;M%s_Ihe}U zM*3Q~muBv#QiIy`BLllU+sBBgEsp_ur%pZGQUb}K& zV$!lKt9;39C%v2JdUXZ zl$ZJG3~2W^=QB_q$$0YK2cM3ou(;RS#piiW-)=IbP1}llyaE5wJUzt{z0-u@z0WSC z1`8k3gw=$3Pcryo@hbn(&wr5j`)2b}hdZ7qY|PuQp4*(wxXfVlDA~SSd{mRwE4+Vr z@H8oOsEvu*gY#SS9G_Ire|9;5c8Cwgb{|(j91wGCN(DghZ@`-s@^72%y!r6`k26xX4ZJk^NfT^gbw*CrsluPsj`w7Iy8O+R zCJJ<(iTmN=Oup5GDUZDJT+aw}ExbSf;;ZPHOf<1WXE8LzuB6vqisv6>sCw`HkMag8 zo_d3P1|PZreL#Z0&|rLDx{bavSuiQl)}{?q;t$__yLx4C3(sT8paV5khB1}V!=XzI z;un*fOL=!QcwK3$b@RN<>#x6_fuy!lo8Vpi_B(HFUdl65%VEG`Fkv7xkx{o-UOl^c z?aeou+@bHmyuseyeDL9Co3Hcc6GDbTLi4gMUYM3+O-d)GZw(Qg!gCHht$Dx`!o_=vYNVGJiaNEv7{29}W2?o2=}#HlRf0;3n?jmo>mwx)}>dG)?J2`JIbuno-`DARxQ(N^PFnHu8cdtbPVPPEl zi=KGBwkhWUA09>oXvZLg{yfa~F|3f22pJdpSJ^|?+J5w02JDOwa8f>nFqn|RJl)cX zc6~6)q3I6K@cUbBS@K|yS8W$d&TC`Zce)6C@8u;k3@(fV(hD8K5JGoZJkdlf%%B~^ zBc9GD7>pRJZ);Q*oTJk&GW@lBhGEEnu@*-U)OUeQ#FGg<(Wo2RJavtDz2MH0OeKPFK_%y&4li=r2=3DoQMW3Et?a#PMdro&& zPvKVPaK>a{j6FTOo8j?p-lhQ?J$5zy`cj8`>TemW(1Sr9jpfmYkkRoV?AgROPxi?d zeTSdCb{QX)%UF(YLOSeEw+hRGr@T`cW$Y*sQs8Lq`0$~l?fkeNe{1K_%0Z$GcEU?B z5-UsS7EX7r3DuEk%mZHgb?uKnurp&q3WX12x%MNB%{BU=y3!wL17CbpCQUYD)p(&5 zXrFjPy71V}0!+)k3U%k#cWN;{pM@8% zVeS7`VSm&eK9$m8h#h#|0gXovX+w>dqw^qL+VK{z{wsvZ(#6^nulbwpX0ubwu8fMt zmdZ?TS3V;iIe_21@O3reORyY))xz1;ZefzCanKozdj=hxnM+7W$vvrvY5iogzCg#x>iywNAKizQjdd;Qq? zbLTg&b^I-QrxPd6jBIH;+Q;aF+J=7F0iwINb9s8*JZ3p4k(+0Q!@IcoA!qd(&YA$^Pu zwIPcW@TOnPn@lIQuoX&bZiY@IdOqRHX@T5~oYjsWw8(@%#xKsum@^sTC)t}tnr)jn zR6EeekC%wq-&P*5h$n0e%>7=XIm5sZ$fh-4Pyu@zNXo8*~WMQpW?v7&z2b-UKl=lz!#! z>8w_1fnzKIH)R9wsV{td$~S+slUs($VW#)$vZWy<6?5i(-bcm0HH>Mk#VWN*GFt_w z{**=3(g3Dd@VZYJ%FrKBTi8QeiAMbo`F1httR^63KdWx?`cW1C#%r>CK;i2ck3d)n zwzO1MG)k*1-s{vGfKPfHRaW_ zz*8qbIIBy~W{@`ZkWbQJZOdA{#UxuWq*Kqqv`gO$&F%N1t-*rQmr-r5N+vVf_%3ga zN7#d<)OK+0{gf?%31cPn2ls)3x5DBCVhpCiBXjdfBZ-*}9SN4aE#!ErPp9`iJeb2V z6h{WjmoX99>SlryFS@Z1HSng4Xw?n$hur0xvbVB3hVk&MVxR`7PR12lD7TD^WbjUX zEL?jI*FmH!AJUiW`&_N|SiWIZ>ALWK=s(pio#ds)_GvSt4+iTV^ddS-vGSBjnfXriG%ekV!3uj>CiCO!>;|FWkonSvr-Rfo|{NJqwuLd!?QO$$b!e4liwi z&R3prEuc0m?^`|9uTn}U_q3(>gBz0$j$Xvn`5HZ1zTlyYfy5MBVs!|i?S-4NcE2Oj zXgB-@sbt^}4F~Wf?*W>4*F!wo%11&CSa0CgbM%>Zyl6P2u0#E<0nU0}s@mEPj|PW8 zFs4sCY;XYa0jd0;F5XqI!K~M0rXr^fb5SZX1I*N`GE}-WqiewPwmx8zH{hF>lS7_u zyWbaFOs@`;aLNeK_vw4DAK8{Uw2wcEGN8nnF|bD)^-_m^x8J=RhA6rmuD9=dGWSD4 zuziLw>vEO1(z_lwldOLk_ojbU7kT}S;FhkR0jFK`Y_h6?hj#w+fBxU)ZF7BdtGJJcn(V(_{O<>CZ~etj-`#xupZ<6}Iud}*;ReTy z7s4*Q_wlDq=#G{?_zm#xHRz*1Ze@(%9e?LxUP2FrPjTpDlfS&`4j&S4aI?A7r1Ssz z!{UseKAKVXjm>ZV;Mbc|O^lB;LFF-sXRjBp{LIOtn@ff1F!6Yy7~XIF_{W<+|HD7Z zfN%Tsz0J8Fzmpc-TWs_jg?!1ABp&c?KNCI%*$%9x*w^lqP(aGA2223vJbpr=o-GOoq;!6lUhp z=Hm;OHgCTEYLn7enn)Guq=7o`q(i|bq;@IJ_$7vrE4tPM+Ng>lp5a)mdIyr)eoDUz z1+>2j2L76im}m&8AP)SGL(MX@*aHLnD(*eG8sy3&txGBBeeS-eW zFr)je3|G7??I@ThYk{2i6NA?u3Zv<>GSZ2tis9PZ64E5Oq>EZ&iO|(5-DS z_V6S##x{?5DkI~BMH{jt{q$?4f098uss*PuUwK29`-SqUP1)r@cfrMC4S%#l=oow z4BQ!o(^WhQzx?7tZ6OcRbnES8>Xi=s^q%MNsTN$x6azhjym1WAatFQfARjOHF^`v(ct!bQ;4QgFoMEeZuk< zkMHq#hNltBIFKH7K;y))PETkL`&vYsSlZ<^lGFZ$`$8BvBv3Q9LvHcdEjaP~!~h<( zqu~*O5-re#1S&I*?AUUeizX4*eNo z1A`K%H(9vWmhi<3@uc-B7VIs&?rO{+Kfv1QgLtc7Jd!>>kd9VY<;{X*_ql&p7zFP$=-$mykc>3*!XF;MdN*K7Gem8MSN1o0l6ui9b%pZzlgE57BBJ zP;2+dihcfa3oUzI&T$}Tf=@4eHhrAB}O_F2R$h^A3+DbotnzDp&i#TVr7JTjwCK(cHUpGa0?#V(Ezq zd2_qP6+02g89iF2S^S{imCqPm3yM$p*&-WxNpF&4d?!TiOIfsla!B1s9Q zl{bvgq7THXki^pUY>dZU_oiF#e1qS2f~;`jO^#TMg_wo`@_dg^f+%HF?p>EOKBO~txu@ji_d5sSc-QYLK$>cwGBa?4 z&cxS-zjVrzRGGMDkOffp6H~oM{r2$;4PsFLcW69C$u@KVL!G;a|D@|HSR?4b4BaT| zwHh{$1`l-rWS$JbWjJ%clRsPyXa}KifU`gO76S7`|h=2Vi-?o$|NWVB$xQv2eQRp@D>b zCSz~^E`YfWpiDb;?t98KLZejVCG(PLAMt7##DMBy{C%#`HiTHEmtur^@dQ2kqLX+)x`O#ptPXr6DrXWw1``eH! z%K$fZK!+)J0EdTqsmCx;64=fC^2l}#_7<$Sm9q@|LqLV~DM87;=WF*$aPR8-b|B59 zgU7lA-=n#kcfHVqdarE7fCJ4^P3J^^poQn*J}B1VQqtJJXl}9vZUBaN(IK7=KK`SZ z^k67byM(JO-X$XwhGiBYg1P1Cc$dr|v%EWL(-qsCedE#UkeA*JT=!tRAH54<&wBLS zIHf2Y`&)MCM6@wGW2{FI<+O`_{b97M&>JX__9V)Jud?~ z4w>cYwn5-r%ri1mErt)cS#E8ITtI?bsXS^rFqK)V@br4=F=a?m9(t_&st)JnwXKZ) zVyZk7@v}Lk>@liCta`o=!Pe1%}bHB+MT%FAzTHISKmW_0Z~ivmk~Q?1!&#g@ z%2K)s6wu&l18N|q{A`Ea7kGXuVDe_b6FD7@#N;~84ET6MJZLILD^d84sR?(0D_+`!f zbzt;6`HsJmYmj_`HyJw<@OtsjJTbT761^s z@a<|~0kLGc$BK|FnUEdMLYl0RnHMjs-v;0&f_!nF%UaI1`mug&a=-t;ktKgwIkKi* zhbpIev@tZ*zD*!5Uc54{T=X9-2=8Sj|JK=MA#AdK{=)gdY%Wvy;?5mF^q;M+(8{XG z#DtF7S04QQvLIJ1I%=$M`u)+5KHQwoirmCzyZ=AX1nKdWJDZ>U_{RfIvpO~zeDylF zGF_hvoc;bLK=|>v5qa<3cXC;9djbDsI-kRDuHRm6P2v?xut!a9v|U!r1A6anq7!h! z4Fy{CFZ7?u;Oja|;1_^f+u`2mNZ=TM@2>;pScB>>2B59X2ipOnzVt`h9be9x&c|Kj3L?A}z4$zqaW>($`J)U)26hxEzvd+x41wsoGovwCJ<+WfR!?bvr=F zING)E*)b8#05QPSSc@n30GbEY`>xs(aFM^&OP6n?ZC}k{YjiHK^kI5|1uB{0ANOkY zec-^E&E6KfUNsJIQ0KAq#p8TG?~j&)0XZi3ET33^-D@$;LXq;>B2O*>_yfjP?)$k7 zF{zv}L&9z1i6{C;va5T*LoP2(5+7t4ZZSmJZ2>3S!aWIerJq>Fo5=47bhb5ltT*Y( z^bZ#lbm#20tVwA@!>iJjku-H5+l%mKgFEA$2CiRGPqS@%((hfmyyR+kdp7su6_A9* zUYq?(BM@KPDGvHs_5qiqvE8Kr5Y)*UmjcUI-zzWtLyYd?34rFQi8)XdSmAy3i?I1$ z51>y?3yfuAmq1MKSmOau^cQ4;oIa{f0Y~^V^b`{hr$3z*(d@N4pE5VkHLh0%;J^R? z!>e+O4TS8-e^JZSl}s81%PD%i3K@}PWt)n>W6y;a(pix zX5oiVbNvRLY^RjYvNeEq_F4YquU7s&;uGegmlMF?g%Wvu6p9nT|hct zpD^P~Vv?YfYwdr)tI9!-C@Z}}KZ{4ExNp1ILh#2QellR;^XQp52|9-3@xOny^!{2@ zNFSE|j7KB-`i-0It~)nv$idF&-=<3A?QV~ojd7&~$FXQ&p+|r5I-NP-a5TM2 z|4ZX;yB*HawaLb`oyw=Z#Fdc~ya)D{+?0nL(OLAwgBH;*esg86Z?#~qP1voa9nqVB zS1yCzdFP$Yh1_QF!wsO{m0UQCl|VU=@l@{Iv}yAr&y@c~?TDo_S0ay-`Q5uZJTh8t z-MrBu}%>c^v4jKTt0g7P&xjh_gdJAJmt>Y7xhR z@1f2`&`*voi;Lp!EFQf_KYjVt7XxNH4BcVl;%m?JfwMn^zdw3^_1zX5zq@*+-6^MM z=h>~sBNn!NX8cj#{t-)Q(m}rk?1QzLGMj}Diw#`u@XLK{|M6L5bAUV} z0K4U$m)6}oe3361w_EK+eJ)+~{K_E4PI5h797YzEpg;hUokFL zlJ|$D!NA4%PkjkO-}&Gz1u$-%(rat3Y=LVX$jkl8Bv~2o*4T+JeFwnOzb2dq8gn|+ z#X;xBEzk4IHhRRIr?yY`YKs=o#hD!(>3It!odHRWAfbt_NtXChafH`=%U~yJP%bE<(LIfX-uM+J?}7Z;`x)}=%Wx5 ze(qM|U*LoEeb#_M#F-dZF_Z5?1%}WyIZtrN5f{8OWnN7s?U65{`laJ7TIL*UA?F z5nT<$4Yn44bR#8#kAz>X>slu9F1rO7EC2}A9(cZ$N8z_8F@zJdlzsh`wnsYZ#rL60 z{sgIT5*xZ`RcTg^d5-C5klq##CaoC^6wPPo*cvFhFZ}ZRSH9?4&3>gbkx|yCvdV7i zSlV5=9l8f3Q{JASXN%X1E+GZ<&PDI>b^BJV5+1Re00o-zh$0Z0{Oi;8bFH4%5MbTUD%`v%Enz0V9whHrIcO*_uIHCoAOksSkzo{L>QvD99J|I$mr4lq>*y2?;l3R}?0 zcX)}GzWz;3Z7Jgw$h$QeTEtAmE)5Q~i;|Jp#P6ohgqZSq9xWhsBwR zS-6Q$rbYLs_zQ%Lr}J>h(ddQpk@U#bRy!J+N_)yPFND~NGdwL#dJB!deYVRmS3`SO zUQhePudM=X(Y50AS3c@*k|~U`{y-E6_dwWf{iXA@3f@yGOulPU7~K{xx9cNaxp;uKB`fr&cCa$Rk2NVa zNa(ISicx#$1tNydvEUA5w}?743_%#Q{c+t_9?>=Zkc_u8AHE9{t=Q(Ou$tbIm43Iv zOs%PRacAHt!j=r^OfAMXSpI>BC5ZCY1&4%b?a?=VL1_#bm2Gs7M73q~`!8HH6?oWD z&yK`J(;^pPla?^9=bv;WEL}W7gL}Zn=!)(~r!1b&sHVZxu&sc?j*g-$B$U2cJjM`p z;CCwwUF>OpWIO$H`L6fTgg4}Mz=UoqfO?$Ilvn%}d1&lKImBD#mR_N^@>>~fg%xVb zSs?lKGx20C-gY{3B{vVtJze-?4MZyT^}t}`Gn z>`L2!Z;f?!M(m-spK?KPA}g8?vd#nk8tmT=V7}XS&?gOs)u2J!8r)^|oz?5De7+uR zW_UW_`rbDWH^=k4dZ>ZkM*+;Y0)wyTZ~OZu;s?>z!L2}ETb4in=IZ9(eRgT{!2o?AzSU?FfFJ&Be*BhfOpBVpt1u)qo4Eu&)JLDmx$Q@;r0mSm1h=Em_QepG7hM z#*YFT_)UNJ+^KOZa^&`&%_%Nh%KP8vEB#M_=>OSIKWHK0)#graJPtOgc$$^Vo9Mfj z|8V*I2DCW~g)x(5D9mvWyJuFqx39whY)md+ua1@Od=aRkEz$H}~2~UcDYY+T^;$kPCsx zcXRdgs<8*);c1gJW5_)wAkhNwGWJ@NZHqE%;(D;Uy4n_K7A$}mR*k^W@qwBZ+QE2o z_44=QHpV0buzB_BwdrSEYkrAg&A)}6Cf-12&1(FkCbvKP`A^3!#J~O3uUpi)xq1J+ z_r^*e7h1YKvKmYThP%!n|k! za_8>A-qHv3>{aT9pAC(^<7P#jU%+*H(9>Uf`|TTs=P>D{u{%Av-J%cjfzCS;;|oc0Q&CwFAI zDf;49Whq)=xT9LMl0CH2r|7cHd{+xfH?My;lXPw{)Q@-$Uwa+U;@Y9WfH78~18`N& z58u1cR`MsC|Ly<$P49PaKK=OpkrfAD&b*~E0WzMy40x!l*V`#%Vc={#ny&oawXq)N zj_bYdftBi3n|hqw|Mv5XEyxBurjIIlR=p<%EWh2t>9n!xmwY+1fJ-PJ;5MGKxF5g5 zfscSO?e#IXZIJ&RKEBD zCV=RVKKgj*zn;$O2-4chvL@B$S?&Vw-uvLAY5#UaaIc|80hV_=K<<8F=pI121rmOQ z`NRiM-w5~|YtcY7mYCuI=D6FyGqQU-H)6+IL?w&BumNP_w~7PS`7GBG+Os>RaM~1i z18e7u#a+I6ckXV?n{4bInOi!}A)fo&-Ni!t$&(f#(;s+aN3w-JbOCL5HLrWwiI1$< z0f)zqot9ZLUHdCzi$&VC4^YU$=9ewByr|44PoJ4^EI5I1W5Jv58B8Hs2ZYKpZCr4X zYYMUPGYi%M;k2>JrjE4%_57yYU*;q;Zr0Y-We3P8A6m{dF1gjh;_Y^3y;XSbBMkfG zZwHlyVXpEDJ~BTGBh3l4Gcx!vIiUMzXG-Py_S$#R!zE9Be>`%o#xH;R>E`1P-g+4ryW&SE`4o?5x`nrErRN=eVxxiPV`Ik-=Xp~H@bWK`sf04 zB8F0fZx;h_`K6z)Ub`H)%gVdmjr9q5U^jzB$0O~EI~2(Ms_~XQs_@dU zOqGRey5Rj8`w+tcH8;m<+oxtB>p|_n;=#Fd=PUDDBOm&DI;sRKb8daoq1xXY3%uHQ z`Ln2YpyxWwbm?|;2K8sh09kVQDFEK?8M@bEn{mZkjVJL^yEq=*7Fw>i(@L1#T|0Y& z@y5<{h&Hr49i=bY+Zi4D8FEHvp`GlKJq84)PM#VboO|bdR^PeFsg54py4Kvl!riWM z-{PE*6DLk?K4@IT@B008)Qt|IoJF1Ft6+@*I=A3avI1DAOY&nr#$?suTIgq8pGAIj z+-rxZe#F>af9Ind{0(8aR z@}xhtgV|+Kyji$QcgqVy$<@q@OMp?_-p0|!ee^-S^Wt%5`K(2&!j{&|zoKat+)#?( z=$f%?^82(MImUR)$9jEhyf;*16vv%S+HB-*47rNu?3@dYWv({<;jVB`u9u9l=va7( zu>fp=3@bAlfZ7p1&-A9_XS_P6_W;_;%ry!=a;qY|vEE=iy;JssEqLf(AI786Kh?`D z=$G*99&1>kJ?@>eFz_b>8e>6+U5?W0cgi0DQ(b}2Gk~viC#(xhiP34NnT!i8&b-s; zyiy4s0c6Bkjd5t4CkPjz4tqPkfFnHpk#_$m(T4gxuaH{>1PZneIqy}{0!&MP;ucjR zE9qXRgtLrzowdWx9kv-7*pUiZ$Iy|=B>Q{;n-CtYJR)wg$8G;6 zzXWc=Nv5EaZpjZFc&hHQf&l&v7>(b<>w@~7l+;7{MvGdJZZGHEsvFI!ygiv$+jTdO zir4e4;ES@t*GLu(D~x<5h_blqeLcn^LA~^B!s{u!U->BMqGT-jB_$Dm1eid0CQk87 zI{2!4vHI(Abwych^O+!_WcVlEginsPcrkgG#5SlnX$vjr{8=(TmpvMJjkjcN0bmh< z*WQuE8B~;`e074#R~3lQ<;}FcI-x=EbZX+NXH3P1Nl0RGXseyPX@E8KVrAE(sZvt2 zN-Lgxm0Roy+dY0yYEzI^>}j9Mrhdii|Ip-UZO14ew-E;&a_^oz6hjVZ- z&S=?U|HP@I8~$$9iO!YGtTvi193GpruC!S(vCP&!=vwslW@uiWfO@D7OJizo^hEE` zik^7NMask}%-WH%I;ttEyz7XT{7mwT2lPe*uC-{hS{5ISvqdFYgTW?H z%K#ime7Cf}yn1u^_=>F3TV0-~7AZ<`?{U z*KcpWyL)}}Km6;z4PXu|h`##)OwGHnIxF|50p^D`XHT8n{4c-y`^_)^`e&OD&K}8P zDqrV?;a~er6DBTUFzB5ufmw@a&pS)Yfjr#0oGM=~3{22Xa({I8c(irkXaN6P$?`kz zoo%w0g?1D1d-ooX1^3GccpYf22_|6WZj+}&0jZD63;6B;(KA`}oC#z< z6OX^oSN4UEKT7tp?8!%|ZT~m2BEQ-pR2P)--E*5O`RcxX_d($A`RcF%e7?{9(+?Dc57R$l7cghpLEoXf#}Ca_npb?93Y2_UQm7K;QF z+OcED#!BjVixDQ#KpqpEC&}O5Caf%&kfF~bXEV5WUpfFay!z_vZ!6Qi+)3mfrwJzj z3-~+};PSdxy}U?ozx`GV8+n`n9`U!z>XD_n^1qX8F=+VFU;Jziy1UbY%Hvy)OD}gF zS-5}o^*2q@+lJit&6V%-Z+>HQ{d#^)lRvIG&~+?;=hR95v>l+AZ_~;E2(obTZE*lA zCtHa5KKBmXgP61eMp&r=lQq?`RMfBZHThK*lfEf$?ZZR|{lWsDCj9xPWiFKG2-4 zTa(&!L=$nA%YZv+e*4WgS-75xhT3tmsw{8S=K?g|3J5rwTL{3j#kqg=FaOutHOs#E zo(S#jKwp3hfZHO?v4BkRSXs{VXu@SB_RCengC@Cv&Qqt(!mM;Uo%0ko|GRi zSmZr@vM12LGX|b!MHk4ixAw12Eb6d$1Q-EIxrA`o;aaQ-P>Q+oR}bb4vLkN3sK0xw$^D=H{Kd54BlTC>tz8ED+B!9sbkzyBSwQ8hfrF2epFe#5 z_2xgjert2~#F5Q=>1gd;8v?2U!&s2st4_YmMcBji`G8aLPZ<8D>7J?A%Ao(IpB@xm zEg#HP3Kv!6_uBX0rI+5R4O@uh&LPk;UEur=b$q|_;|GwKPM|A%wJq()VwN^8uYKB1 zap@a?KdXH^cFuKX27sAvK=(ISE|1;@bniP5eI>>M_Ikd90cRyWX{%SXi?_G_XGitN zC5^s*;?(Axs<_6GEt=Oi+o8aBIiVoqgNc+5`T%YgY6sA*4xFX2Gkxv=VT-cdIFJ|Z zU_Sk<_Sn;ivas+Zu$I2lcFDSUfNw54=TPxv*++h?WykMu>EY$>GE$yx-TItCK=I#h z+yZ<9vMJk=iCneimX6+Kb_?~QGoZn|HHBhYnN&3SF@>mnBZNS%bHlY2p&;Qi)-q~neA+nj1%U3nke6;!gyUXd1yN$V?bSUS!#!DwR-+uk)kpV!VIsiQVr9WzmSMFVo=lL~{Zq4R;dgN&I8S8)qUbNt5Ax}M6NOk^>v6sc+ zJC#!;K>V@X^cZKlwqVat=3onv_wF?IDj{)9+^-f9~9S zqmQ4zcv4?^ZTL+OKkJ_CYd^+2#%xd0#qYlN!RCJFcHFspBiU6|q`om~)` z@z&T=y_-|h1#~8TWQ=0GW86u{ar0#UG#Eg?3?b~oId~{HMCoPo`TfyFPG&)*9Ot0m z_@Iw`-FrHK{=s_~!;=<>^eft_e%KE6yLUQdyinWytab$+)%Iu6xp>^rAl>c{0J%j( z^*XvVU9GO25wu(o$#2OsFkk<1GSpxMnrr0)?*i9+V z2cuRU>4&X=0O(CRK!3(+#bcrp!9?u6;2#>FceS9UeDD5{U_AkyA zNBZScV_s2JqE8K@c)AcXzq){-WDOW%QU$MCB@rEzw*g>zOujY17$D37`(k8f-#c1; z9zqB$03$}pcStGq6?Qe|p-oE!%t+YmvwQ{(5SBQP-{S`F@$XT?Dza=+Y zC7d+TGXIske^XbZ(F(4vS3QjH?QK3!{34;F|0Ay>)2oiAyy{8XBP*q2uv1!F5tgV; zQH5C(hjF(d>-iu``s=-G<(M8rJq%J+zy=XHr|R*VV4~^GxD+Dy+KRT&vyqG10iF#X zl&j39-6?sWp?3<@^~!q5%BuAVo6=f*lcw@+m9q#Gzj*#Eec^p~!^eKfwQzz=(X=Z$ zlgFNSJ#{L4Gzy3XOHfKnBq@~7fF0dVm5VV9G)s0{}<50@}xUPr50pp>_+*x zj57HgpWh8YfY@ksYW2j8)~UxSr*_>0Y&zs>5)A|Pmk!>j&uCQEsdsU(;TRBAl zddS1nXS^EX=qzPJ_Y4X|!ZXDx_V7OXCb)1?thPLm-tb4oNy3{Rsf%Gs1EqeaPKWL# zAJhL#z`}0*7I1EculOkqba}5l(KZv>0js?)<$0fsE88NVzg>5af_mZpc!!#vNOx{l z+A}fOCScMkZ262_i;srUr!I?#KMmBT{VI>nzd^?6R)4nI!zy3ltJ6tCI+Gs$^+>U_ z)2+Pc_0;3Mu8I)Z$BV*D`ctm%%hUS>@C?%ykGJw7KRa?i(g<lckx-de{kg^UAn==I(w(;9vM@T7tCGge z`N?ZUK_03j7T9M4JdYg8%|UVh6o3i%zL*7s!Q^bqZ^CavIKG5S&j8Yd=z5bp)_XYe zGMS)`FJ8a9`RHW8X;#Je0z&`ek1uRK`{vr_m!G^_UI#b--(USUOT8DfRaMzd|pE9ZFoI1APTZ7ctu|J@%q&&vPD z@1ELRymoW*n=dbKe)8e@&FRYY;YaUoe*X`jW!cfT+a^3e`{;aN+>XtY%JAolS2mxX zKiOpH=;le5b$}cP!JTOf`zIf~xA}eiyn6rn=C6MAPFs`%*9!mQ_0!ESKKUe|F$<~v zKrNOXP0pj=VW(%av_9Md!n0=&H-G&f{dn`Y|L~`}sQknB>_HQ}8-axfJGk_CfDMq( zAo_e3U;6`j-fDt#B9H|rz+&^w&PSW?0_0igojrA8EZn&yV9BM{3$Xe7-~Vy*N%R17 z022UH6GamP`q4IO)+;6ez$BK@?`A>t)z@E_&jP{twKXVz^{NTsy#VA(S2vflral|k zd?K)a#J}uJj9I#xET1}kDyzru1OD&jEBuotQm^yno4@E50><}dGDCN;N+#!MF~NJ3 zEMEHNN zgIMv1Mf)|t*yHE3C}a|ODgYIb^W`^}#tI9-xqN#zVJ$ApEY_L8i@O0SK&#`?W>QE) z-OEzUM8;%7`>?y>X}<8u?wJ4r^?yH$t;g}j1ZquM0t1T6f4oH-&w$JHk6&PrR---D z2>SwHS%ziRV`3Ap$Oa26)@BaWQh$IqN=V(#L2~IglO}vN_5^AHHjD>s)#XNo#lQ4+ z*T(3h(M!p>^vCTBeKm0`j;%2adG;STKl6>oTqczVO%&t9UHx?WN8SJe)}_yyu-NiS<(AB!u1W4^0_@GQ(`5wvofNZYOOJQ;ZR z!ugTACxDO&SW?6D0l%VcwlaIx*yZ_)fRd`_SaSX{poZ%WwNq#L-I{Orks@a-1OY*$xDK6`*0Pq)$v$Yfz` zYz6#Rii)}FJoy*^wze@qPxYd`8!wKO)vp3kv|lt1IG$TieTq3+V%20i*-9j4%u&MT z7Zx|hm?2VGaatS%9G}&HMF)^exh9R`=uPpEesZwu%lJPu#sfwH04)z*cBrA!19Lpn z$LNOv#8 zKCTk%lKSF{&bQd}$>^Y??VeFqdX~&{#qq1({AqKgGd=7kLd&6aB5O`E%Qv`ikGtox z=&0?J3woM0{9$8`z(#tA zKj@-nxx3ntaAe20Z2@WPTE4^o%C`+cl}&}r?mvqVZy-mHA9pz5+kr@(7~oIa|T z(NVt9A|E<}YoMEr3G{&vauxAGfb=ha`m@0DuRFKn_2ysvr~f=RXe}DH_*M$Zn(GKXGS;cfeyG>Flk|S&&4S@@Vtq zoQoi3pGC2B)z#eCeD>9)7U*02P+9$_V>LSMQoTHHd_<x-Q0i;o^{@w>U7MYu$7 zbL7~m^!xFt&wGKod_x~Ta&q!FH@n>;z|j*9^*qvT{aZY`o!b!L_}vUJWbPl||J+m9 z^<`n-IP;4yzZ_X|=7_$Nzjp2KWY*Y!uFtuf>lsE`e1hM(ryg?iRa=tQ{=m~Sr_Z)v ze|2c#Z=IeHM_Kj178vn~?pg;m>VLUJ%2i;sqkZ4EKoiaTDwnXg^H{Y*O?7&`#% zfyow>$imU1>5gReK)Y+4hoF5Gy#@c{Et=ezc8#z40Y25aLOR$um^J*)^#0>qr!XG5 zb@RK;-~RexJaX9iv)tr-F!lMOg$uOaYdp+~{8?>_0h9jJ*#LY%gTD#n@aD~H@hfA6 zZyiecu!X5()lV**w25dT3(7K|uGM2D?|Bj#u`@nN2P(Jl2htzLe)p?iW1tnrLZbz9 zyKh<$3?NTnlDD<9P9DC~zvS{yTIvpuJeMvSEx50K!x+WRgBByo3sj-sp_hdBJJ&^2 zMs5B;7UU`M_Ys=+wxR) zX=umFj^7q`ES4yr1u`yX@Mi)jcjcsKM_xuUs~{lCOz;Qj92KHI$_DlBC;DU{BOl>s zh#`=QFxV;60p4_H6IsZ5hTdxgz3$JJ5KI7xO#+2U*_HncZsa7wWTx{doIDy_O!#sA zk^Y;Uj;DTYsL>oF(=mJWt>ZtT;B@MlmcliQy*NaKE0DYo?00S6Nn(5jW zU0p{DO9qWZ$E#A7#rn#3GWS%WOJ~Jj^v{41kBSTEUoYeHkmFg8)033Qwl>^&^hND4s@+CDyMvz!ulaP?}u{xs_hO#aRm!J>SZ5LX4bjg+({8 zkxPu8K83;t#sF3foTdP4J;@cos*R>VbrjPs$-MZ{fIr(n=+e^ov(2k`BsF!q2wZv2fTld~2@U8h#~}tkqrsrj{aT2T zqO`Wc46(%--qSTpiO~()yF4ba2r?dCN~F_N6++Gf2HZGZ0N%GtS#E!P%XbPk_NOL}!U^&my> z-4H#wt@2Es)kn{#YSkAK#OrI)`dU=cJxdskeSeJwT$ab4IjKN?J2Kz z(0yfT1QIz0{oVF(K=>))Jo0qPh5ysW$lGMKB{NgD5~~lI_;{k;Cxd>-u!4)@Bc1sv zKE8H6ZF2gcRjQ%AIHQ{cCMyOpU2gwTAM+j=9``%_&B$*&EwH-wY|1>cTHX!rupqL_ zyRy*@c!&}|@%IP7KRqb$>c_UiAb2~EuBZ04^XQFGlXpK8vhfwneueLyaHE5|zkTE80HZ7&!@w4Og2lf-eS2eb{_TC6_f8#a zke|=I>WPnTR&G1v%l-ybM*_&-J(J}(5TJ<*>t^!%IPl4!mE1Z=@qSjiNZ_L4P}XNh z0|wOjcUg!Z34r+M%<;{|?{8%V{&e$ie*g7m*U#SFeEQLa&CMnQ#~Pp?X+n18#;wio z09t*WT4ww2cpXK{{cK`l?&7UuS*Pz|tZRh5fADl_5`PzIExREtogS>CA zRIZ=B*OvQtHs3YbyWb%FUVzQM4l-k9`)+{h|Mllf`9IGhyKTphn+(5B=5NNUkN?Tf zM?P+M5aGdq2Ue=@oIjHo1j;mVx*ouKud=?KCF02}ckX1#%~J7b7LzAV9od{a-Qk0G zul2qI(E?w7+BW|OO;k*XfHweRR%gZ(51!oK+{n_4@4sue?rpBUx}D33vcw^lm+Ujlm*6nWxY<3`|Sp!ohKUnklzz?v*VhLsl79N7|8a{Xx4xveM^U8ZXu3Oyc@gK0Ly^ zcg~+5-_^hShu`K+3vigP(}!6zw>WY1#Hp-q+amAqu-b!oqkZ)mebK)oemZdYQ4@bw zdE@7}N;1*4d%(n8oz;5`Kk!R<*53HLGq+KIeQlDZIi8ZG=k<>}ZJC~a1yHtR!NTEe ziBB(7sHw~0e%Aw}qpLV98V3N3eq%Nuzis~x@RCQ5rY_bZhaCpmbZtJG$5l(RNB+s| ztIEPsPF(uaa&fojnCuSjK8?HLA#w9#yKYL zc*JtwA{20p1u%XCsy912SE2CAV;*aFiD@UDynwR$p7bzYnWu1*;XMF@VFZhE9Usu) zVE`HaS-2y2WZHzDJlt$I2wigFLI-E2bMB>Q4(wa)=4FTKa+lz+I5K{=1&;eI5+0z# zb1C6`3kNj<_>b2HwGU}stWWv&hBn1*)w8iiwm3B)>aMJ%(>uT>3$?&kdep8P`i6`- z!{A|U^5c&_sn59Eb!|D_V_|Ji@^1$X-F%#juEO6A7$d7www|k3e0;Ubp|UN2bPs4h z7*Nj@3qXX9INt87b--R=PkCBAwwTHa7}$P0knHu18|gMXgm#WE^67uBUK!fS7QS4* ze5tnILaMgmU$lt7tHlu3t2=8CW6@pxERa098bj=Vvp<@vv+7D+hu#mT@8!e#P{1=6J{BmHdH=)4bH|R)*TS(yI(>%SwN=Z=(bL>={I~y~|Gsfq?XEWB z%nFM^+z-t{Z#zl&NG5w+t1uvVtIi4FcPGHxZj-n3BTfga^S9q_{2HBniyu37W|b$# z(D8a*x{ELE)2H7F6wDRg_gBY){0{%x@rmmlZtGZTo8wtPx!xj^c5t+^08+V0+M5gm z7+J~#!QXxF{moZjep#QlBYM&u>3wa~7)5{2H43YJ1{Sw+0eFZpL2^s(=t^}*X4IX9 z4e453Bj(T^85^C1pY#yNamXR;jXOq+a@r<=6@`yJ4E=hl^I zFW=|nv4~Ta78ed3ZDA}Uh^G%n?v6FS8$ajOl{RV@g8C4FZqwe3kF-_obLM6})9;cO z@<}G>9t6?_^tXB+d5nHLB@eX|iBSqPMLy5ceHQg3KYCL}$@q5vYavA2M}z)FeOUm6 zee}Qno~xui)&Hqvrv=4vm9oYQ>Ozs}xV@~^lRX)sOM0&9L;y7YRk&ocLsF9sy2{rX z0K%TEZN5tG-cFC-pK)`2czuU=zS=aNXhZv|OKFwEj5%Kh{O``)-o6&-jdvbZZ!`Yu zy*#Gy0(Wf@MxRG_8_RJEWc+F|R3&ULLW^fn4FB*W@N3vY@M3WB$2zV`9Y9pHglxqxOr883=2>}d3uL`LphXjhX(jTMe-)aP zt@P12AMd(Y1(@;=2#ld#6mDoM?G+Hc(%p(i>bu1Y{PA=Fn_Z^}d`zGg4@+zEmACNX zMpMrw4xVn&Dor#EKiy3JQY((KSB`mCqZqJv@&)RbE}HQSz=XHbl5UD>E+x}v%7=y; z9)5|lDBcRL$RbEfhgjID7v&jx(1kzV3ny=6OQrOtTxeOJ-g?{z9}G*k-*{g5egbvjax1T)t6OO(+g3soOb>1#TiGUF@0PU7 zNW$K03tbj&@w8_vp?(@%q|jCz`JriLG4)d>=$(g!TjeNB@s_L?cxYN&nUM1AGqP+& z&3kFq8Q?L%edG=R*!8@d2#bm(Up*MPU*+B+sjyw)3;rph$Q5oi1JB5hr&~0O*yjgY zx9Vc+aWC+w@(5#rN22bfZE!a{Hvkl__wttXgqMr5inD$O#FIvHX%TB@fl;&cgjhn) zldhCQUPU$ok%<4|;uXeD#r068f_OGOD9qNg0W#Oi?Ib2FUJw1Fa~4k~@2&EVyi1>M znRZdy@yEnxbO~a}&Q{t}FOzpq;=^>BNQy3TB!MHV9rc^Msl!#Y>ZRniKZ^tX9Z+05 zT;(6>m7cIgm~ioP^$ESe&v}Pd7ty=Lk0G;1Q)aIwT;b`D+9lbZg$plKd2%}KIbGl* z?=4B#iYrJ@fW#7?aHY|ssn5dm={r8e;_2uU;YL>QzDt8kX_pR{MN_`&P|3*I=ovMjw~UPuWcR(=NMJ>_x$%d+6ADvX%G569zrVETy$zx%K9UpREh}8GkAWoc>Q7vZFm$o_kG*SxGuD>D@rSTaN;d%1b)}2pnzT{>UV} zf3Nyw>B~~k;OuZf@QHj&o0LC%5|9`Wjg@z@3={uw;Lx6cA_t#2+*4fuIMDi}K{2af zjO3^HP#~iLIjd!dg+6Fd|JhgHZq78RzxaKCY2hzk=awOBh6bli!SmA`Xx05#q~`|V zCm)<2zvbsof7nDO3zq=36Gw8-(gg1FKYqRW%OAbp;BiNm%zQ!uGdmzBK>DkIfuSYh zSU$eu+u2YEio9f-poSq_<4p3nUXzq2e<-maeB z&Z2gI6I*U4)E}2REc(FIltpsB`yG9 zf0L^YOpH(GXZliynf~$fi|Ok0da~Kv zE;(r6oyG8BlTZ_#<^6}Z9KOg_{niIDR{9Z@G$oq*vd=vbG zfqrD|R+EyWS$VVGWa$PdGI>1J!E_m@Z+`vvf9L?!`i=O#CRjqzz*`2l;= z*OtFJqQ~*-c@raThh-<DZBNoY znD%Bzpi_Ln%V%v3=Rf<(w^>k@J{Mcwv5K~!fX~x^!~^pN03ko}K;=@N1q@*0&cKv6 zm3QJq>$qNj~Khzek^ zQ-PrY023%$9h9EK2rX3bbt-KKJ*2+mJu)AiUC#JK+XN69$W5Kb1QxCn zR@s5swV$!ljb@c@TxLv#FBbZYCq{0EXSqPhQu|DY_}cN~yn+AMfAueNvz1$ktR@`- zx;KSwG0(#8ch@_-FdET$r+(v|d_mh$^)PVtY>OKX%>)Ymq4OZJ(CfPZDRYH0XR>&$puRV=z$MFeMLlWZVJvs6zp|b8{DX6aVWA8V2E?N0 zEI_dIS3Yfss~v4)7B$dQJv_`B9(aE$%hIXm5*tR))?ifl>IJtQ0dw(?WWQ+f`OO>d zeWLqeho$y!ZwH>s_(AoitvJts{EbzAzK6-7g+&1G_<^tdbl~w87y*ULKecw3)o5d) z9eXTlT3F2WM|_#Zzxs_kH?9vT|5ldZr*pfakFf{?C~X8E9RbxnQ}1uLph?ayyn7)q z{U@7;E&N};{7uFQ*EWCr>`#py+O_fF$5S6?E_Bw%$+NSoVNZRg@!6}|?fU`Dzx(Xs zEKcm$!Cg;vU?A1+_97_oQoqcf^PyuW>;GCv&YF6_{gAQPl0SWqwNyNng+XnpGR zCAEp~1HhBWd8ZH5KdLWdCTGz&r+^y^?fliA(n%ofP8faFwei5T^(>0Tr~6(^zH6aJ zdiwf983#Njv)WR1sNO6XJL^Z=qX*~%a};&L`d?hvTxC4y+#VL@#=ra0Pj`U; zMYmXFGzYND#X{!2tklH^;sag>KrU}PVI0EAl6%fzsZGr8hU6Aq#$az}seZOyj$eHF z8R>7#yACX#b`Ws>R{G~Z{3w!7(WiKLRJQ3oob9?w!*Am(_xQUnqY;Y`xfcNkAtbC593(ubYU=~zrbu$GYLxEq71WmtLTQ?`9176ox?N2`G4?M&>hY5R!fAqicC4Hn+>W^WG?*3@IOz+)j z@gh3ZojA&+uhUnV8`vp&HG=`W@M#$10CGx}cI

    7SX2P?3sDJ-Jy)|@W8^qam&7a z$46G|pdu^g5(>k`8KZ}qcw?$$Cs{Wh9K9M1&#FtfzUu0q&l)geL@AVLr9g1C==~7i zy9t{j^wflWz^4^u0Rt0X$^!dc>1Z+CdfG>E*HuNHG^ObIbP7G2z?0b)fLtZaq`pKv zMyLD4Yb(932aK^wP&x_wlmU>)a?v0pH}U|wu>b}daTJD*(iX?$YQT+d4Gsua?XY*g zTknTK-mT}0PrXYJmD%4RYv?RuX-y&o)kI!8Lb?}l@+>_hjB57Vb3BS?ud6|8$pZ@Z z^dB=W9MadHk}}BB?Er07+EWLG;r4)U$VpkbhSGlUyMI1Y_Q`qCv`ypGbPC)3@Tp%Q zl1ap5Gm*Vj5Ay9N0h_u~=gK*GsDI%wSDrmbO4lPJqE1;BJqFC8$$vQ3J8|84HUiLZ z99s86c|6bh?Ph51$;zvjH89s*^&@jvTXOF?`lfzmENtJx0o^9$XqvobBz*EA{K^S` zdx|MQX>q5Vb2T|RSV*e;EiPh((Md^HI^D~+22bg*Y@Gb?0&uL?lSc1_Q>DYg$=K>< z^*`|!eML&q6;86l&mxe&tvXo;sCiieJ=>AyTjenk(C#GcGXYj+lUR?c59OA}0HQ97 zu;P&2c@>RhQrIX(>||G9gToZx%5S1hdefeit#Bg`Q%}+=j%QtJmje(NXe|KEt9dz# zWm~fT13mPDy6}9`z)u6ituiRPJSUHpiS`D3R>rl6wIydIArb9&cArS2`WT>kgl#YU&Sn(HZ;TvFUNkTT*{oxmVU!xlvGu#1Dduq31~4g zh=vI}DNWf+dES{2zY0X1dXay$xs$68i>ztW!)tkwNu5vc%5T8*30hgAYXSb!srZG% zGwBUY+vU{u<)eKRF5)6ae5F`rK>nh=$K<>>>V|xJLEkMsfQQ@qpfE)gY&#utDBjX* z;!Whf3X2 zA&x&2-5+W9GZ`V=XZ1hZyIj^2VYbo{r-zek@fOcLh{i3tCIdN@_R=v@=_6u&8y-bg z*^eYn+2}tUn$MJ@$P=Hw*rKiHL+WfXSW8qYiikY@0lP zoy9cEp$`Ky4>h2Bal@c3Yq17uhnp|~=I%970?IyZ62Vfzdm!P12JE{Un3|-b$vf7R zESb4iSk{=Vl-dM>nSftbhOaf82ainba-oYZH8a!A;hmG;qI`1@zmwQ89qN zkp;AN#Rbi0UtXGLtV&p=+oEn#^C0W(&CdKs#(UfUS(Wb2;!dB+59gy6Gz@I-2I!wV zcQzSFR;rttWqfU15^oM z<4-@S{$6a(Mc1|K`75n1xl(!O!nr0^CSs3f!ZmI`jIskXOj;gA_pyMpFJ62bZGd72 z6TL{z_T)0V#H$Ko2EUuq7$rjk-SB3mbXoA050Ur!U{P8 zeoZ@SGNMxSm-~`My2|!pWn=YkM~8m@Y1dubE~Mmv88}&f;6WjFnn(e6-)8z5I;dlll* zRA_f_@vhnwiz>cZpEfC+224=#%I+39OKFnaYzpZG9-d(VkL$y_l2Z3;^sSw-%Px5{Ungb(B!;ADdTqH+S*fj{b$yea>bV>?`Y z2g1C{a?T=Ke?|_0qRTJ-T0BCV1&)9F@BVJ{r*?l_$$IliI`nV;^{+Pn)qnBNH{qZi zPRKI2mXnS|1OL=Fa|s1#x^($kyNu3FNwJE~Ec!@`4mf}A{Ol^C1MqbGlBYE6S^@e3(Ex$?YH{a9 zJd>t=s&KL%-Q6`BB)xE2G<}KZdm_YSj8(K z&3C!I05WPrTu>;RFCcP(@nkhQt?c;0eUTk4z*&o9>Wi$ZGYhl>YSvH5kN#2r+E0}} zWqh%_#ieA19w7(b)7Rwtt;%U^NNz?3YH4^k0F;s_Yq8A0(F4qspMGU`l{@Jrvvwf7 zX~)36tUQfN%^&XufF`|W)WhUdZk+M{#=glb0BtPoVaUePOZ^jB-bybY1Z+hF< zh}$RSe3dnMJ!56_WqHjFRAKt3&McswZZUsct88}Z4$h<{(ZA3de9DnQ*x^ue&E-MpvF2+5x z2%9{;JqM>6OFO?r`)XmazLc(bp6(?VQ=W9r%*E7h{L~K|*nfQZbT13=*;(4671`9b z{J9l4YaHibPCL-{?a!(_y7Yl#)!z8vK|8DjKaj;X9eNCX!KtoCXyGiQTXeCWIFHb;Fhs41!BbuBi2q^B%^&f{Z?%h&7|nMkz1yu-OBlsm zq7l6fK9uYFM3e?I6-rr6VK9Fcat1G}82z16PdWr-Cb-g{ypY^A#z)wiyi2BV!pvtv z6pAQGtNdi$?*haTh!iwcs0536r2^GfzeKqR+_Pk*wZp0%QdO>mVnnTb{Z0<0(c?FP z=X0p3nvixCi6j)vgiIpQT*SUxpWf@x7Qeg`1bwRv>vD@D%Gi4pmf)0qm2ZWe$HV7B z6>))(tBi|^aweowk>?hFfUgUbECr1b&E6}=l$G!(`&MV6?8O!aPX@e2RdE+U*lUDN zc?C7S+oE*JRoLpX6#JBBLfn%+{#K{GTmRP6l@^|fvGryVId#4jYk2rW8ika{&{zIg z&WhP*0cPuZc%Tfz&IfnMMB)3RE|!(4m%S5=j3^V9xE9~*B1}`9q_+7;1`8;if=F*< zZi`pao8b&z`o!agOU3DO((%4HDPid{V6S#-? zEe1ZXG7J#+kD<2S7j(*r1I1Cm6x_Jfke$5xCvCq=K6*biL~n5yKuk$5kYTkmaiY0S zgbWWadJpa$u8FUF>8TN!fDDOFhu?$Aal&Zh804m>dbH{ZFzmA=+%w<8^jU6(w)lmA z>eBl{lPRBR|J4;8M?R&Uy6%l%WifD-p6^spk@~eD5;)sbw@}(@Us+3o2#USci3~5b zZFLiElb&MWb0O<8%D;hZZIcdAQ$wY&(_ZjSioX7#-?cOw{0#4r7(cWX;gSjbx0<|S z^qc@GM)a{Lociv5isN|~t1KSUJLFDSmnGz;-!0a4FOJ;gS0Lf~%(a{0HQMKCK}#c^ zc(4FkVf>ed=Q!TAbbRY9wFP8PKtGeYvY?~#Pg{w9WTj^#7hBI4P12hLCYNbH;+8g& zm)y!uDJYqFxC%XT+I@qJ)o1n|3ET8f9QElLneL*zS6b5C{&a=6>f?vP^(eB2#!0q6 z8o=n6;>lwIxE3FMEAN#d>FJ&*M(1LhAECB*EQa!+r00w0h4EZH^y{q{KUC`6bPrT) z&?21k0q{AljqIQqZL7SbYm#3i;F&UD3IctkIZSfXmbxsAP1?_7WvnmcJMeas1(V~q zn(P>GewZ7SZv#GWHE}vzns|4x0pZaG!uuPHf1fpxIj*r5HZ8d?%O1?)W%X+k>43j| zO%nFcz`p~Fp47&we^z=Y4({EY3aAHCVAH`s6XOyVtzQOE?5jQfv!DND^S6KaV)O31 z?`(ei#kT{_^NYxB$*nBdS&XyN<5&7%&;GZ+%ZGSwF-+*>4Y=HKGw`f_|L&uJ(fdyW zAFj+``F4u~XguA-<@|}mn=66c@1Jj*eO8n74)&Qi9?qiXi!0aT@1D)CFJ2$NzdM?Q z+|1hW!=HS-xp%PxteR+@%qkIx@Tp z%i8o{Wk1m*hrjIC0X_VAo;z*gvZJl5xd*8(J`K2aFx>x{fAZ_`{6yABKtsL@pEjXl z-RSpX{Bk%UYya=RZ%cj*J>29@d)(I~SNR_|u-C>|W*O8Uk54zU0ynWFBEU_9ch<@# zTqFx%1el!(bnlEq0iD2fpbBtBd=p1jNbgrq78=N_MUWF&tLx9nqiv>uA2RqbIbacN z*8+fmm5j1z=X?3TFYeCgfi0CoT?|;)gr84V_4+az0U0KPN1}^=Z|Rzx+zj||z@|3$ zexUk+2K3M4%l#+(hCknYp3l<{^M8%cUwn0WEE>;dZG1BS`ZHNc9&JB0d2p!MoxoSx zA7&+qL{Fv;qYNr|AjS~s}7|N3?dsWp|uaXZ>);qy9wE+ zAHO$;)c)z>mD!0gxT~2JQ%t4~!rd|C@XJ9k#MeY8*8n_0#K&Gfx9!48zz^)y+G zE-pUsSNR!0y#)-dPqT3UGEhty>CBlaji0!MxqrV!i^{B4#;QIUR7O_YFY=p;hjve( z?@fTmOw0?+0#91*$&Im=9SeXdX=xu8UC@GFK$NyByvaDrVtV^YZkqIE%QCz&7w>ro z4lB#EtbOmbh<7p&&!WSN*I7*`gP1f{lBETpwn&IR^aKA`q`nnUrOd3>xqG;u-oM?! z%KEAI-Z`J&*RR@!-cFbLjdweU8BoWX9FI@tC-z8uef|1c<2N8f2hPPyI~nLdiHv4M{x0_md}2k2T|s@2}0SC2bDKLFB)@2pl{W^WU_ndUxFM-09#tJDars zr%&@sOO)a&t7q~{XV4vFfzM!h0E+SPX=OXzLek5ZS$Jy1wPcpW$J%XT;RcUbM+2t5 z`|ipB)yJ|Tr#FB#bI5FfA#emZYykuB$1=F~M<#A(`Hprn&SlKVR{Ymq(NHGSSJ2Pu zLmgNQM?V?Bt7R-g<{Cq=%B~NkU(`9h@!gFZn+x@ud@b{*4=YJ{<9RYvos3mxI!PZg zS(dK+r7<9RdC$Uca`w8xm^Aez;-X=&mwva6PV^o8HG9qliiIi*ZBGL7fZ5zj46vm? z(Y{NMzij1D9$)32#mUOcm5uopM(JDWjh8LP8MEDMk(9d)^91dYzAJEJA-ez`PWyWy zY|r*3Uv^qJE91a{Ds*s%#zeU-d6WFTY%xZAX917LcFXNf#_^HW?7nzZDYHyHT)(lm z!&k2jP-$#|KX}V^*riKfkB)cd1q)#ZR?=g3BG@fvv4hJZ3pv`V#Zo(R7#%rqk-RuZ zgPXKljYllF*r73fSn2FL&;mmXmJaZAmcxgid^`*M`kVLOJs&T!e(&5H^I9^>UE1q_ zN8sMGXOA|&`R(uP3j;QRaq-lQ#G-@w(imFANBq}69((&l{O+L5WSknVwS+o8^=IjS$HzU7N=?|ty2`kr=h#AE%iKH><_ zynN~T>*aB%_I9@28rlpi_6|14();b)^5i0^e|G}FA3S1EQommP3U{Qjm;UZ{HUEpvmCN7MpGIS4yd&}+i$+y z{Nc0Tjx5-TqV8U{8293-@rQb)Q~YXoxerX9-tLfF06F(Ntf+5%pCQBaE9zHU29noT z#MhUN%NPzwVDPcZw*65jzVwuF?7sYt)3ftGz1D#vV$FEUczbvB(Tn;UyGX{ZNHi>5 zA^PG8Id3U;+7h{%4~E7!@?{QBSL=@+RgX`T(|KMHyKD>QfBda4N~Y$6{vPUc@niHO z%(QE+R%ZTIx#?Kt@tK9K@+>yQ)hvZ9!R`s55Lrh1Eh9G@V0`uE8Yx6d$bBT&(!No z*oCYCmd>J}Fe#78C&svz$0&^Q#9ZJIMoDLZ2jWT)ja{ZLJnzqX&T7%)ZTP2m1Jw19 z@}9DZIP{eEmbKkf22got^vf3*IKyhxMf0TZS?T)`A0N78eS`M7?sn+uZs_o;e~SQg5bTUkix%a_E5x|2UJ#Jrja7OArmg_DxH@G@ zU{-29Pw-q%J&LHzTXZhaLHxoLrjW8VNW^9 zuEZR8IT!!I&(K^Fk}2K~U=cZ$ds%6KBeONETIC8H-;khwtccg73qz^mYoCc&3f0>H zV$*)AXQ>j!-fQp!NTD|w9YB`+%UAyDoI+X1_e!>Sb5LT7LGqu4tf4O&)M52T8Igaq z6(BM4y!HG;%5UBq$OZdeHq${N$~iqnvwRealPst-wh=cfDbQQ2hvm$n9h&TsPITu+bl&ua)M>O(B z=QU8R`z}UuIPx)xblaq$ik)=PK=$J2EXX7;vq*!cRqDwT8w=3?MfWOG5?wm8qh*S+ zcq!%-3ke8!Tlwc&UG#5s+jhYT**xtxYus6b7A^iqS?~H)&MxO*An8T7gGI@qu4RI0 zW9n#Ibb74VUXGO@{vyWv9m~pb+|_hHfDWC$g7+XQtH8;B8bO&P&#$#B`74J3&Qwef zR%MI5u1jbw&~#teB?_fE@lez!xg9x|9@;(iJ75$&gU~*H(?iqd#F3bM^;bUV93G0_ zz3V9pK8srfd>J11*G{YA&XWl*pGmaeuBU)o_hfzUdo}dU-yRjeG*_zL6|DLm8pW9h zTiL1vY4jd_E6e_(Y3r4KY{DoP9oT=bh4uVVV8PWUIVbWdE|Trik6+x}ynFI^hg0oo z@^e3oegULhh&Z&yAPfl0AGsYAuE*+?4v9be8?fU#kZrOpU4tI%a~fQqVgP;o z?Sq?r(RDP-rFYL{(Htmzu)&$b&)yEy{XUE8_c}P!1n~Hg1096Z_@MqCn0+{lsV7f6 zSgE>kkmv91$KSL706+jqL_t))zP!26A)UZm0KxU*Dz^ckGg9{C`sAk_x_P_Bfg}0K zKKb^6%^$waI?s(<=^S9T>r{n44`9Y4J2ajKD{^h^<&E}u{@+V_maj*%< z?=F3}`SRNB&0qZDC&^@!jC9Mbtis7JOWj>ffcG`2dGEs62GB1z|MFk``^`5u0zfRn zC3}y0b}Nhhqk(-t{qbLpKITehe_;9VKff4AetPp&7G+$beE8nG0rsrNO;n$6-aYru zOooAo`aWCLZ{K;a`TFveIpEON&D(*lCs>A5Kir^v_05&)`Rq(USjMjjMxa%4%!-WF zl3x=>lPCk=SAk&;D5Y1lS}sL)HQ4~7D5D7%%S=`~2f#A)IFc{X(SvAd(nE0Ny=zB{Apy}W z8pl5@>XJP!RIXkP6v!gya=hUFq^e0}NLnLz$0|Mah;>)GZ< zfdZd@{zVJiO_-APXVuZAwwnXrSxJ7}gyzz>m&ZRpEBTiJ@ULFy8@R3ZfCPCifMT^B z{WP7dU&UZRG{5`QmAYX)PKMQ|cF*O@!P*Bkw_F8qDbV6mGA5vU(@spd256-Vdrk+L zc=A`v#RJg81eog#ZQB!SDH+dw}VaS>~gr3+lHITrTkc;;roFA z9lGJhZLS2ODSbhcOM{B$ul%>a`y*8~WwL8yU!dCEfE~I=pZ74(h2LJ={kcrR4;D__ z`UN<|e>w0A;Q6#&Bqra_YR7v50#2L=RA8-|4!xH@(eJNbDNI)7=@r(0;`7OAdp4N? zSnXjU%-?=;dOV9!6K;!5Ky?%C*}k4K9-sHg>$ozi3{R7%T++tlv|e=5eJoC&2Si!e z0Cezw3OLY@Sp-s^*(#k2sxewf7mh0L#TtjS*pQ4o$-nRZ{L<<%UbeugTVkQRH~wpH z5-O?H<}1gJ(izYS_+LFcvjNW?oO+{r+MBSD0cGMc0v^zQ;Xak%jd*sB}+YZq^ z)!p=6=_2jB;JK-)+{SasBbsJGuCUq^ssJHCNei-b9z*R@f9t&oIvr#>@BPlbc1zSI zs>1;Y;y26EnZHE`0D{gS>%5ockbL`Hs`O#tL@u(k2Q{eZiUpn^w{bZ`P_82%HkL<4FpZLn<8`W3tq~3P;UI#cIQD^#G zx<0)&GLmI-GO;(E#X9+M<*wLWoTe-_1u>z5r=nRR+H{mzA4;w4*T;J^KU|981ydV3Bn#Am$SQyYHK*y6ic z;BVnq`I6l?;SLtTXi#l`fbA3>o~SErPu)%}PQ4`$JulgG<>StixgJPvVQ-=@BcQMGn}~aVLLC*Y`*^bV!{v|)w{Cg^?UW^LxMfn`4tIwY?K>4HBjtysQ*kSd&KAC$a>5z4c+Qwu13P)Hiz`vdE`ezxO z$Ola?(%lvpEhOK(VdtL3yT_aJ@4h#d>|BN%vg7gjxm?}-ZWbiPr3+Y3KWf~dcfcp< z^=D+-<)P~SZe^J^*K^}3mi^>wc0Xu8r3E8s!{D?h)v0e7e4+PvFmh0wXVPHpymz^V zDOB>K+~XWMKQ^YciC)Yd>fjcSOzogrjrubw~IK zJ;eQ%v{~2p6DyZRIo_G~J_^`BoZB2&ihfAjGDflZXy!hVFhhs3Sf(WIV`{G#qx$Xr zgimw@Tp(VBN|;r|KQg|XxnHSGI->WHhxEMP2D>KgR94KFwgI#EGOqwCOI`*AUc8AZ zV^xZV{w+HFmGnw`6(oVu;P`CcOn(00LE-0WBe{`&&_l&Y~)EY#LBV-V*BJ zjt;MIHLhC*OpK0sAS;ndFM(R%u>6LEert3E9N4^RU}tpyXEv;dbgu{zLOb-pX&X z7kDBL_wiDc>TB)=pAY4*%ItR0x@2uiP&rrL{njn{SRimM)_S>!8=58s&&zx2yYyq! zD$B&^xqN)K{;r=M<4@oCILe7kxKAD?2p+k>TV<0%_p;eaO49M5heNb$?-yt?z~@%{ zsl2T;MusF-IP53aD}9Ogf8tDlqQ!qoYC8}0jR0wB3zA6*?db3qZDAIIdN_x+c-y2w zeZL+=Fy%;L2s?aUK|E_gwbqN;YP4kO0Z6KweD#Gnt+H=uq%NjNX_8@yzqW zMyGd^ZkI#;3hD7mcX+WB?h4)W_>Yp27w@H4N zOGd=)6HnJAy1MVr(kYQEUvw95>afcaD-L?r1Zi4N@kWo3@pv~bY-T7mxuU%IrQcV^ z1NzH#?uC&sn&jr7%2j*@LAwN@aMyc!uU34aQe>R~EVjJtIf*z?bS|OHWPSQ>O*d z$(O(nI&tz@Iv;@krpawD7cI(b6y1vOqG^jC^LY5* zJ3Ne6MVq`t-j>mciBFYD+5^;EV9>v9rK$YuM04p&Sh&)|f~`J99ZVY-CXtjBI07{9vr*-E1zeF{W=(Okx!2_|pbr*m()dD+7|0ZIS<^Q&3u&D*TUZ*5Kl zM4ilv^t&ecCz^PDlfP{P^@D-Q&r9!imf5VmS!F#jVJ@Bh4fG7o9CSuk7|0P5`U(wz zEWWR~fjAxL{pL{h{v2So*__J~>Cby^HE7J5ra@!enL+>Ge|>GU^Xsdd|KhKImbLKi z%|HD#(7Z|hv1pSfK#nCTOXvT`KMS;OAbI7ENyeVdwVU_Ha@+*wY8HQIPX+unnP+v) zHN$s-wC{9C82_^evs^wC_zu)!MRXzX^NTM#fHF(_-(_k1=JU&&v$-aDrwQx3*N<*4 zR{ovUh2wyZo?li*4n_l#zVo9CoB#U5Pvicp&BYEM+;{N!X4mZ|0vRKaC=>0s1Ij+m zV*QhRsoU1hy~N`ucg8A-(&$=~&%aoQirvgw z5U{{Sk3+~_X9ch2a3N!|{h*1Vg_9jkz*ttFIeRL(h*JJC4^TierW>?Q6D*S#2Yb?A6@7*2-2gbf0|Wuz zpO0VKrDIH-cGqUK&1WnZrB%IB7wTL+*u}s%sl#;vXh2c)1Af#6n$U(4GO??EA9%;Q z+k_XOp)M_M>CMhvd@z+>6`_{2efS79=`MwRBEr*@uR`O(0Dq0VCSUgZzj-ZF@Pr zaW5IL;9%?cU;Of?qjR4II@9nE1KZD>Ne<%`Kc(6SUB>c!eB|dx`SAljpqEwaEZpl# z;=vqZ7|$K(OXuMW5O4sq$_u~*Htn*IV(eLkkS%35Rve!9Z~%yD194qH81J{oYcGKt ziuEb~#sDqug>L89_-x=D-C+^ZVyH1K z;CXotATe(<{lLKctumn9BE&53L ze((E#xOsH81>f6WZ!QM_-|8Uh*MX{NxplW4bH9H*x>CQ$J&6SizRYjckE;iLJRXu6 zyL>EIJgL70)}1)X5C4P4J@q5;_GC1jwMd^lO*_tT=O;h;Nekoc40^@_x>l==SEm-K zkvKaJ(oM?hY>z#~nNjR0sCKeF@9dj?$fh<5j({TpXHl-!~*AKr&|AK6^IaIyYN z-_innZm;O5uP%R88tKX8(=Hx|XgaIG4!7g!54z)<4)x{p-wuLUI(lNp(aG0XA6Hg7 zm~L=yOcgtt?lF!?X0@?oL!AO_jSaYUu?tGSXPox1vCye>mbRr&epLC%8NCGTw|ht0 zpMUkcaUo)o_M<=k(^+U|HEtY37tzZAe8A}6efke`kn5>a=L1&n#tRF^7NF`i5h!nr ziZ<27THM&~={5Qzotb<;$|_r(In47>zRZp3mAxqOqMt4XoUILcMh^N~@9I|=m$~OJ zoBI^k_^xfeyD#89u%BC{H??E^!%Om$d`%xyyP-dfW#3fE@;_ObRAAI*{ZNdUEca09ST?Xx2RPZPO*X4+EGzUdHSW0YB3Dn2kDf9P}hXkSO$b%mqz)4vpd z|6C|Vp-OK)8mdqvL=hx_nQJft%(F!oqZg&AM! z=8YuS?%jlp@nm4h?us*$(P$oype12gx^{uFOYp=ir)?%X z*E7M|=Db8uJ(tEX!2nXer9AP_+XLkrfO}C+4u=ookWb&=^0b`d>$DBJIg89m8!b~| z+miKa%Iq7+91-hH-=(+O#}H8-(kvhCZR7?`^6oS3zxTDtNpJK>@#wy3w*sl}$|?Wk zeNye8TeSHaxstX!b<{*+^jLacm~Uyvi%GArF=-Vknj5Su^Yo8s+R9=POQ$?~rv2~K zej}=)OItFfUvdLXN5kr4xBQ5&f~`f5CAW*-WEBs{a5R$Df|Y)1*e7-CJ!dE*!QRb|BpU-zwOnwh(6q0y7zSRC?J~eP=m*Vftq}G@9*07 z;e&z6K)lZ{UE7?_&+f@Az|R)`M3%fJttSJ~{_@ZMVDmryFMqqak!zR_&vBR0pfU?K zzHkAR{0`IU58L7kECi5#*!FtiJ~-RNar%Ha1Be4%fh2%)R@oQw^><+Zmz$se>hsO{ z69+cm`#1k$bMgGC&42#QmCe)34>l*m>i;BA9~f~a0PyyMC!7E7|NcKVFnzuG*Z=Y- z0ow0vo_u|LY-9YzNeAp9+{-Ym#Z@|9?0d&93Ivy~7 zHftx=$R=Q?n{e)LaH~CyPwUD^P#sP=R%*4|Z=MA@2SOhS%pkWPy#HY!N`4@#2Zx{m z(VhqLeDUe$o8wLL59eo5S%EVq?W|3V<2_g~DGXEFh3P!8ZKcLdLxWLr$(>g92K@#nxnK1a0~ zz)*9k0l2T`hx~qRgP%{J4j_gQ120*H9&ZAB;@E*E)B$m|3+@b#C6`A6&cC{JY4hQC zJ`A*K;iozQBy!JTJFbIB54BAjAa*zR5kLky<8kHUw&LmI0EBp`+$KD%%#7fHN!oCPUgkJ$yVX!lzZ__maxh3z1(kHYDWmQf2_qBWqF*hUBH6!A4@+1 zM_&Y{jKybh#=0?Gbu!mHuL4KkHfc10H(4D2=*jBaeElCNJ-S`}zw?k4San;w04B`t zmikdvmDf^%KD-Ek5}%J<3sAdvpB@@55E)0g)iU`A^LVddeE>l9;5 zt`gpv$$EJhJC-SsE`o+9Kx*z8v@1GfU*I#NApV*VRR{2+d@fVxhx#>da_ytbB06q?h z2GVf(r7i$->XDv4pFeXJ*koTH2Q0D^5<6;Ex1%zbX z{BGZ5&JGN+!rIoD`9WZqZ!&No{bvyfAQ#I=*Gf~F)cGv>#J!{VQ2_nH?&f8)m|lJ2 z$qvXuPX6deKgyEX!XLf^ytp3ez~HXP5*_>~cN*M&*a4$&DT!E;tQQ^;qua`C7Fo*g zpFy?4B|Bu04p{Sqh~CPkw&8pPWm6su4$Ec{G`Lrc+ZUiS`VF_F&41&0~VF;+yOSyk^9G$+lk}vSN6s* zgtoN!dIpH<3%PcCQawGak97tKOLQRY_rCja7R+~|Jz*HVuv|?YZE=Jza|;6MJ#yyS zF2X05uWr64ubhB7{;^dhD4Ie+cKm5ldpx?=}@696F%V@FaXrbX}|M_nQW8i8- zyKA^IGN=9POMu_pDH;EealF81GUEWkBw>I&2Tp4{+Qr4jKOcY8VW9W#Ok90W!?k3y zb7AV2fu-k0=sk3XgVYy(|(VzZT>Hg9wud`>*k1kQ4cLTL+MVptoD>N2+Q5|yS$LIMI{^`qG zeim#2!gicl1oVtPU4V267Y-o)Is;mDMV|m+cXuAa%nu8)({B`j_|9j1Y1d}7C%aa( zBjIL|xIA_T(31-b;j@dZg>D~uVMj*D(C&0GgUA=P7dk?HwH!NoYS%7178^Tl^Arz_ z4T&aS{n3oWloL~Xx2w6HS@|^U>f_xGdo>mUCgah&2ajaza<+ETZjkij{Ri!AtPYIr zv?FfE6msy5(zH0KT>Aq50o`O}jg4EJY0PF9nSOK1p5D?|Sa>XfSx|k}nFab`yF=*^ zG6o#CC?h>ZBcIZkaqphSU)m2{I?o@+L;6eKYGISXQ`eQ{VB<;pnVufb#J{)csz>d# zY~zqd*f7PLDNtXo1?^(2Gv_v?6ayy(5-Z*eI086EU#*lXq0mF{B;-B;@MHkBNn+(W z^;K(Ihu=jM37(k5W&P}=HDS9MkR^d;nY+LpKtpi_?4vAe(%dz|@)0Jbx2+mX+MfCJ zJX5b3n2q&mm0Im5FK+1p@f$YN$!y3Rmp2#dEq)~PuwLWOr@}TP}^St-2FdDkc zcs$YT)I6U!G|tX-=~Id;RoAYzJk^j&GtofCq=g4_9X<1?e{(NI7d!kPu)6!gFLwb2 zm}-fF;?4(fIzWuLJsDc%-FtOjT9WLyfAK%z>fwa*wRj>uafELW-}5OYUW~vhLt#eX z=Fx;xPQcZ-)g$E*tYSxu&O%D_y`^h7D&I06y87pP!p(!~L|7A=8q+#_lRDT*hipov zcw4z0f~7DrRY=8~G$}FVFSG_U1)i+>=PqUrucf^%=fazvw8X=ZVf><_y@Umhzgw$s z8O}t*WAz^WU96A1r>>NDN}-os?GlkGAi<~Cns8_>?Gy{1pl3Q=y&8Fm-T@}3T$QiR za(t71H)HS_Rn3R1ZDfekSxzu!)Pl1-1zyFZ68N?$A|ih4D68XJJU0 z3O4mWspDl2hIjH<6Y^C@+x9v%O%98Gd{R%7HlE8TS)`L>M>Y{Y6UTJV6k=J?p+#Qx zO_P^*BxWXteNVheFCN8LZF=gu7d!PcJR(Dtm-LPcmv~HOM!q9LtssV6kA5ak^KoBV zlAn4=hWZ||6$=luLYaan%)C&5(u{YF!pb%sJ9+hZ5xm0VE8w~KvyfA8@uwf?-3r^c z4?Qz4eMMV!fa=LZcIY2D=(%)~sbTlf+&el&IRs75Hq9w~@rQSPll9IAz@w#iyHCc4 z1`_~J$tSw#LD`kc;_Vc=2Yqk#*~s+JAP)rhxh zn;`0R>Y(k_ZL!_8d;jL%;}@IzxguaO_WITH4y6pd&Yi{2KD(T(To|j;6DRYTedAt( zKL;T#%PQ?07^D6SHn~J$nFCnC!+pucHwK)wCH7SSC_v@@!1Id%ao4@$UA9R4{vCZeXsn`Jm{_gje%k!PhPxAr(^UJ^Lz%*O44{m<> z+b^;>%?(16h4=DD{M#!X&>3KGBH;Jd13qyL#Px0OZ2sW8A9irxy{yVlY|fuPo=c9C zn~yHE#rXcc2DR^OZe-=UH(=muR)*)B6#nuzp9PZd+5GSS>z{1?Lle0F^!J~QKiZdM zurj^hV!~eAS1XOK{+Icw{a=5PuiJpwUu6aVuYdZd@g-Z7X&k%epiE#0>$nVp+Oqol z(WSc+l<$7;y8$NqH&?HHJ`;8mfcpWx`bK=2iANLlD*?IR`QU>N%Dg@9V{CyR>&Kh7 z8jv>;ewpPF85s_5@~Qd$EN*aBv3SAX?51+$pSCu}!Ww8{;7&q`8le}~^LLXCZPxYU z>d@gthufw*?pX#bY0^?&d#W3j$l|l8mpu^hT=nXps_Qqt?(fz#Ei%G)G%Lj0x&6_n znf#dG9BJ~;LJu%Wo`AmO4Y$3`<`&8K+Ukr4_X1hD*?9Fj%g!b${s0BXj~{9g<6(Zs?`}T% z_`_{`;*upF|Fx+zr#g@fa8kakJWX)8t621Ey0wq={Y;iw%~duQN$)l(YKdS#2Vk;x zIzQEec4E?##%jSKfWR<3T9t|2p5c_giq$ zZcU`mygQ8w(86leSr8(u;QgYl`r)p{Q=!yC(|Z*GodOSdC-wFHt7c7zEcL} zTW7z}+ZI&30~WAW1*T_J^^EoAtIc2j>5rp3dG3q=vc&K5#VppYUcZ%v z87Q&9wO#w&7A(dpI-ZUNU2$e{v$U$>($I&RXDnS#R$^HCv}V}#{`f*SkP{u4XLdxq zF?kJa)c4Nc=z8sBD|e;2#zugb4veeJLd!2UmQJ#!!0Aaq7MTawSp=bfp2mlqVwNMQPx+G+FwCFz4XGXWpcYkTAQPJcg(LF#_SK>6FQJ)KWan2f7;ZcAR4 z27u*FyyJT3=#gBaJT*SVSg!=v!ZRz+K?h%i8|l`a0!wh&6o=)MANG&)KB_2sxg0t~|a@ci+m*!Ozy3;{?Tyd#y{++dLGIS}6SQhDr)0K7!0g_q6kDjSKcnx$k9;1Kg z5{oYu8Mq`Ioe*F4#EZ_sowh(P1CgH8@7Trk!H4f}Zd|`y9jUVbpThHHZBdD3G0Uq9>mwXLcyD?APD36y0BX+TJ_OOSxLIbAjF`gwh}v+LZXO8(SDX&q7xEP&)ub z+O@_4n4U(XgI$4;7E_!D@w;}gSuo&AN8k7$8je;M2l}J`9p-wkdZw@O-h#;4Q!Tug z2ibYrA9ot8qt7+Q0*=vx#zPP8-pV!5_0ePeTz@ZJb@SHk+!4LE`Q8tIxHIR zA&cJfw~#i)%Xjzj(f!&ueROvJ+092EeZ2YAKmLC6yD##8Pw&N>89Nk*75VgJs;PQr znSbYYyCu$^-0aN|LLKi-Zf@kC8@TvRVaSNZghRRbu#ixtG`)gJ)z3x98q3o?!|^FI z@@A)7ZP@_pP=3Lk4`Qc_g+5lr^tA;e`q*Ol*ST9d+#;BIvRHKJ{E;m9Sva>C72b92 z>npXTwL3~#uU@^JUbA~}>6p*I`2FVigA>!gIvc_`h41Q%xyJKhMQ@=MXl#K;ooGkC zjoZ*~A=hpXG$@0I)7O)g-m~Q0o6L7cWD@7%+9~p@v^2&9$vhB67QV_g2=bC_~ZN+?RVUy(Wa&6^?&)% zo=a|(Yr-#s0R8ri2euKM`Ln0-(k$S{U*l?F7^j$b;1N0j)w`k*eQ!G;{8ftu;?O;G z+KgM$!*&DE5&AE#R?a6wB!j#dOKQjJlMiwFe$G>AF7)V8hcm}px=`6>A*J+ZPL|x5 zr{R>iCvnPP$`67lSx8QoQ3hne$U0P4GR1a(&%pYQ(7q_$^1LakJl*C1&%7C=wKnH5Ib zT>&#Djd@xuiS`7|!UAOFIf4BP9I+}imtrE^pAH3>l}&9r)6IE5 z{%2Euq-ZFKHvAb&RxLZ`C)wM4U9JilUac5Ir@RLg7AIZ~?Nc@b(#7{4_Ca4cp|wV{ ze5tP}e3C25Xp}nrDX-9_C)r_ibc#w~)YAYDlVT4kadjs!{)i*JEj{aj`rqOGmd9Gq z1jU2+Gc@*vmq2&);+@0_JDundK~7}iPkqQv9lPp-Z}?gRST7fk(l&*WF21A?`avR) zQL;WGU){{R(xZ&NEr+o3>3eeWUYsd&PYOzIrYeLGx0~T(kp@&MU$0iViacrcuQCt+ z5HT{YVSQT@^Gx6=WAPU&i&whcu5^Z1#Zeyh)z7Mn6~+NN9*hiZ`A`>9ni@W$q=R>h z=Of3%8)cBG()ttz;UbV7`3#>VFA+iIxsBIAVXW-u`5KIq?+Gct;*gDPirufeU$hiH z+LnyVcjY&vxRnU{i@l>~M(*X0b^!6vHUWn&k+wCr?n|3E8)#BbHG1n`fS-hbGy}--l6}L3GRn8S|wF6<2iJ?c9LQ?)sB!*6HonDiU zOtd9rmtOTuw#Lk*)F{&MGxgvvRz4Sn({S+pp7KU1g0 zpXwPJS9!GeQ5nOh(w_Xw57nhH3v04QXTRt~&e91}0II9zkX%L&pT6pOziSsbGr_LTRfBFcMdM@uF{Pf8?Ta43D?G>-Dr5NsC_|vY2aW;FWw_ z&%N(1)E%A=JzLrYN3(GALH`7uIPoj^}UpUTNIT)xn>B_x;V8d|Cg${`yxPICgAv=gQZG-@WzU1^{D^*W<3TjN-TZMN{+C%do5cR7zxmZn{!9{p z9ITFK(3v&o_#IC^`Oa9OUkeB{LChOWgXHCo;2fwL80gTcuP$FL|8K_kwZk-l z5?8K0ZSbCjQxntgk~nv@$bDr zs~d%t*ORPWEfVqRd9VqQ0~sHe2djFE2hWm6GCzDNU6T>{j!{KrR>oUdu03r5XLplv z7Uve50jt`p37|GvGi`#>A#M7B_{!>uWH#N@ z*csh)-GE|AES=A~R^N#yzO}*gXHIo!>X-4+w)$1Sv+cS`zCNbk4r4pKx!3~L^Y~*i z!Vvh2udZy)23#D@>iv0O=e2g5=tnKSA=>u$vj>lF&IX7)&w`gF_pa)ZjJ?zM+ua?^ z7pP#t^%h-wRSV#htIjEO>+p;};Rlnl&V}c(gvtUtXo0rukkQ^{?A>vr) zJn;MbBuh<9A`f%eY56~DF>rK4%AU38@U{H7!a>SdDl13n12i|g79c@R_offziw>YL zz~^~e)bR~a0Z6BN)!C_2X9C4f2BtO(dVOf~;N`EUl~OtJkj9e_YAp_S68p2h#cGQ4a#ESx<9!0SxB~M0lY6yMeVDzw!@=pPZ`$ zn)O)yBtTGYSnN9%?G~igVjUSNWOBk{)^07JK6gpv1z5@Lk+y#C@%=f}QJYTWMvols zf;_OOj=ceB@$3yyhwjToSF+AE63`c|G~0~-vPNWuXfesc#Ori1nFJ*IzR`j&y+v+- z=P%v?YE_!@B8e8}hL5#XvOz}yc3!pvj$YL+0c8tBOQ%+@*G#peX)i+*0zyuw>;^(^ zfo=6I@eJt69nx5Fw|GHj^{LCHg7J-mDC-Z-Us&a$94Dp)D(+V37Y8BMzOt}A*kOX| zcu(^TJXIIZwe`}~<}I4@(R?eQlJ%(tDTDU`t_n*}>8HMF2jA0nVx4Vlbi73^7RzsI zgM3&!D*`Vq3OY2LjInN)7O?PH`QK>qg^X-ASH|l7da|a!ru*-9Na&Lc4Sw{4?~nEP z)3y~lQ1Se^Q?u)-KqEILTe<3cpVdZ_cfQ2cRW)020x07tkn};mj_=-k*n#Em)rM=w zFB2D(a%4rzijx`p`zC||HuE~{~8#5a&zH)zR*vf-u(XaFE%%Gk@uoHWsOXp zSqF2MvUFyEC~T+tHcp;CDp{}qH-KsV5*g6v1Eno&9gp|=l)G8j_o{Jey52aVG{`Ys zK}x@F0sm;gzr`o@`Jmlo6@U6Y2XZ?|@xlimjtpy4cW&RFaR}oQZnMsuI$Jw!herLy z?c2GeN!FZYaw=C8_wU@Au;2USlUW?w+Zg12ApIVIY2~qSI%DVB&dI_%bnoN zoACRm&UKh*2aWbzzxli0|5LkEjz`m}dZ(_8CmJW{*YqbAU9_(zHN$)H_qM3=s(z(L)0C?EHLj@Q93W}o0`c^_w(~lJ zt(m`-&*)eBU3&dmIFrZoXRE)1pAB$c9`a8FYcCnpG~S@s=3I#Q{U)It(0uY}oK-k{ zGH)-~lt-TOA#d~z*?Ac)@8&M*$gwk1FZ&%LTAthuab?CG%)S;G@Za<0Uaj;>*CH+* z^fui#^Axh0?9yR$umhLXjW%$xok*X5{&~9PX!4P4Mu+xc=cF{WPi_KVy>gfRmS}WATk~XpcG*l&i6(ECj)6rVb>ti zq&wPI$`i!!b#;6OgzjWBK;%S4LyEjK<}ki(uEKb}0?R@-6H^kNIh>Fw$_~JQ#)NV* z-U%nC4!c6*Ow4P{WA-!+!KzKl{%wPK#7>;TwK_!TDX9VJ<~#b5utj&@BV@(z$qpb% z*gUW03>_=H8AWgZpu~#{^@#5dN0N2oopE~d% zU{ad*@~_d3C8^du#P}``L@9sIU5gK6*CJ*(|`G~ttPGMSoL^0qC>P}>K=cFpYF@QRo%B)4+u}*CUZjTQ<^J}bvt}2fVArI;%8~#$F$#`*AY(LNDDn# zosa%2J)PJBYrWGMmnMBz$HNuRN}Ec@`|W4)DA*9Ufb^0cy5u?jqYE=KqZ5~B9olk# zB3!ghr!G?0Q?BBX0oR3JEs4VXcKXG zF6<;Mp-F$W<+&|fiB&L`=hMFlBTspBJ#AwZypZ@GZ9^>Cnu|$jEfT0td98RWzrsid ze~P){;923&U;CLpQd;hPd)HrgVt6rEQ%_xu9F`s$C%kwoqPHtvI$`pk=RGXUP$gdU z(4Vr>58B=f_&lG#kPlBC-8>GAIM(3k{PFy4z82Z9e_tQfW5etj-KHu>0b{W*gG`bivLKu zbhYihZGm>k6h4uU`+JOHcF<{(Ft0tZOV zL6me(nXxGH9uP&2zizuS7cgjGF-Bew2LveR6udTRLTEy6%Q2AKmUi1e9h`TsN!yvz z+~s67*FYXv4A5E!D58tc%Xpofo3!44aHINrK8pu@ay}2p0qC%}yq`=MV{p*`w8rNX zf$c!DJr9~lmB zju8FQ-sFK*Cg1A(?W-pMBCrX7ai+zI`&ozr5@sR01POX zQA!(dpecpR!kGo5v@I&oD<*8{pD{@^jNZ||_=&$P3e6wX697s7G=?A*n}7B)(Gh0MmEQ9p9YKEeGHAm%jLFcDHaRLYAM`=FaEW zn)?^JVPAlhNu`M=xn$)S;*8(lm}D2`{y>Ra z0X@e8-wvlM$jbe;)zA2;C}fUaA-90{idR{)@~fWJ8Qr!6K9F(qgaP&ovwVTGICkPo1v4R_5Y;=i+*kHg<5U2ZK90X|fRsfCZDkJCu1@tABLgi=YIFQ! zt8><|EIKWSI&)z8_qX^}=>GZMPxl18pqYyo3ylCnb&j3``SCs482M%60AQx{@nT%6 zRen~rM_QcXUWPz=Z$W2v3|04bl~w4f`JHHgII^NIqzAb3VUaA!Z~C|T0gEbuLFtnJ z?QefR{mTzN`Do-so7kV6kUR2d9s;Cxu(J=@po8fQ#Jkw=mBV>5iohUHbJM79{~I0-^jzbB$Fvp002M$Nklh~IHP zt6h5Q?&k9^zKA~^cp4ATK#w|1)q*0c=jYvPI|z82@R3D}vdX2ud7i$R_EcnidEMX5 zb~5PO%BID?`ozKk*0@aJ=IQK5pG@1|liRc3eEPfIuYSF` zcI8?wA>MAjZb!nbD@Nr$GEka)%>%!s?_f?oif`Sy*J4ZZ8m}G3sBOP!45z)lXcy9+ z#$8;FEY}O_^iA$Zt~Um$zf51!>15YxY6{N|Kcwi+jPiwhl~H! zU;kpw(URw5jYVE&?f$HB<^nR;jwL$sRofBNwZ2B(8>=|{b{5w3hk?tFJ7a)_{o&kG z0CoTNum5_^fM_;2;|YDcg#vvz&|N)Wy>cl&qqoD9(>E5W_q3zPxPpNP%lISt@O~Ab zc)o=lZS_zGw1fP(=(>61=G6JQEPn4t+w17JNUFWtsRL-_>Q0+D$Aw>uP5NX0rY~K( zK6QE`ek+d_OAk2Y+2ODjsL2M5%faMy4c)N6dOu@K70=WUkdrGW-{P~zHqJu__dIm8 zvEGiGnl)B!tdL9}&HuVxG!I(<&{!;5?ylda`z$Kn%iYR=r^m92FHZ|XWCzdiYUVEZ zmyC%&3uEO+KN-$MOy4#`McC+@x+v)4f5!WH;E8g^l zl_^F}C}mjyf4YS+arEum-FWr%`|-a84?oG%lNS$4vv|F?1K`lnbF)x>|MqfkM+ayR zabfjU)!n2Om~I+4i|5IZ!_$qe`CPwr#{h5oY8CmV0GEXQYh#hCSZ zWwWsKyo0AKY{Hi+-h{(r2PzvAlMy?b#^p`zzQ&Mj*-`my^YM2--n*l0P$lp-$^s#KmXaObi{+vTjI<_) zi7$gL`W@V@E@7>&_{1l%mD-^cxkQ?OYy|%*Zq{oSLqEs+ccBek>SZp z;Aok)*LC6gAB}j@2aOmZSd)uc1X2d+3ya5yuOpiUtfDL{adjt>KlLQd2735Wns_=2 zv+t2<`2l`)U`VfwW2uRU?&B@sbLwpPES-K`&qqD%Tt`<*OW1kRzvcR7mF?TIqK)j= zSq$F_r`$bVDQc5_rWoE$rMT!Jo_VJ(hS%HtJ(sR9eTJqjjnY}4Egi%5%J=P4X%-{- zoHTS~r6qd9=-w$$Hv|3_Hl;lEMCVivGL4N=7jDSy+V2iQBS%xlLPXFiuYCI>XVuS& z+qE>*bp-6l7yYPsz3Okp_h8y>;ode#s5k4K&TkcX%02bH?x+20ZwC9q%6E#ajD7ZQ zHCw@F;bkjp+R3zX&Q$d2C(6h%Nl+D$j@CD@kTccJ&P~ei$o@!VR+>j;S^22D-&NUQ|T2Rtv=g28Qs;hT}`4N z2A-PO9m*={-7Lq?9y_!-TWZJi8-A#X!B2ne0LZ(W&vJwEbr!21Jv!4seOWua8;}lw zdoOE}6IrNTx^`oLZGOy6FqP}ze!yDRE={Dy6+$2-aQNhr51TOW+uUvPb32P8gS4w1 z80D}+mbfQ7XiEIiAIS_r?{t&4i)W8!1)d9)K$k=1e>MR7_tzV=_x`=Cyl>qx(F_P^ zEOI2kho5gUeJ}s$tjP~GnJ{^>o%iRTe%ZveL1*!Ir58+i9|Tl9%^Hw@Y!)x9vVPm7 z^T$8>!R9x=`KP!XNY*5YOxbJe7NRU>S(4r>yn!-0fcJOX9{KsDEBP2@ zEek+=n(x}vfkFWdCW-know|$;t;)dIV4B2{EfaMEb`unnO`!n4e50~l;dVs0*+N|% znON|7{Z$iUz>L8)FrI}COW}h};vAS{;s(#emp>d$rvBNwljbE<>diCxk}(~^?qOv|M7TiLd;6~Mibm4S@^RqGATV3 zaQGn51W;_U@Hh~Ctew*R{D-m_WgW~aU7I_e+&)UCj|JWWaDMx%-$Yk#VM_mGX$hll zCePlR_`MvkI18&L(Rj>7fx`uxUYA#soyuT*2#}>S$I_^Y7+>qGr&*PnVB+)ev&q=; zKz%X-2;;As(GWlZfBZw0cYw& z{kpaoLtfBKPwcn^(PaVjfr0=TGDaVNlN%6Wl`YNF0@nS_|NJ-qu=zjy`+pm_oSsUb z0gdRLy~(P>(hls()LD-D-*~-0z4Q>6SDo-xtRA(qfAMF3l3w1u`OW8F1<+mZyoL+Y zjx4Mlj7G9T-*IvA@pnELAPtzMjU7BvKN1VL+s0pJ*pka^(8K!$7&p<d@JrF|Bx4LLr(>i zBa?}8VdF<}fbZmM`jIGlTEBVz;)SfHk9D1ebMYKZY#|L8Dox{9?$_~P=l&8f5R z$Faq8i+~nh?U-2Pf?WUlYF6YOZdo{EhW9`GFrCx6ILV>D$fCpk=&*B(UN%-c764yWt+`FI zeJ5EXhsF=;>hOWidQk`Q?n!w7ykF)&-nfAaliTf_c=k5HGo4_^fZZ$lZ8~`tSTaVk z0DnM$zoP;`Zwy3ty~xTP-E%;;IcTCpKXSLHZPP{CuCa=`e9{gX?c!j5&gmSB1oVT$Ums@Z!_8UM zKY%~b{CTwS8*d@@PGcA}jb3f+Y2gxI)RXpN9IxN9NCK26ga7pDKaD#}^Ce2&0vNYv zYYbuzWI-_Q(OsDB2N(Qk>`M`*7P|F;JFRd!4G&SU;v|t<#xJJBj;2CHnfMC`Six1& z_I<)mAS%T+?M8M@>?YNSMj#++O_n9peP2W4+`5sNjCxyI1Yio*V^=1C6TX!ZTk&_{ zT$`39G{x9eSqY0|diE`dSOI*;mOoN|3n=yS{x})ng*}&WN2DtFXZH_0lON4 z=4+cTKrG%E><@p)>{bp|y7RyES6z@s3mw3EB8W-?LW;^@v4WdW$(i_imgsdI3-EyHT>Udyi+qm)JV ztok!?=~4rkvPGj{lh(-dqSe5QqL<9jwbdtd)snX^#2e*BSmM_rWXd%i+QgKAJSLNZ zS6;LZ6Y*MIdas-X#VeKTPx-bqtybl|K;9`19;5EX@S~<7wdp?=yO>_p?Z`Er&jB)klG<%WummPVN_G5o^-#wfD7?^;vl! zN&Lw}-pPpefhI*oC!MlrUNIGNrBXeSL$W;&x^93tiQ$9G(wW7Bsc3qw4|=v=$~PIs zi`6c@@HWCqbNVCs_i!GMyr*^1=Ye?ME&{f6EuPJriQ2#6+lsT~e%1TVq7_anft$+BbrWUs5ld zNCOBB#DDekUv3^YkvY+OgUvnV!LQ|er)-C3mE1vg0Y-oLVJ=nzg|!uf_k#iGRnkm0 zUo1XtI{8Q>}p3Wi80<$h%pP-kW-2 zO{iV~iQGjzOg>CBj|3LU3yA+t2V4oq-!c$u>bP$ce}1?D!58v%db>rJ$AJzXy#GO9 zn>ueYQ5f~SteLni*ve#e1avegw>{h>>RCKIxgVI)h%i}jSexyvuUJSn0X322J2q3Y zCJYX2EN=C7EsIZ+;Tu`qIY=mJ-P2@eDMfr$=WTCM>CfvR(9j`h^8taIb#nxrqE&FTw~ zs9d(u>Z_H*LIrD5x`tKMxWE{>$!aUPVhOk_8F}7<6HDR8g$JGw?E$qWUHE#xt>QJR z3D1g>OtSW62@j}$cYkB>+O_uQ&!oyk6gb6Kr7`Wx`1dXlpnBU!R$n*q&Kmqc^zE%Z zj1_fttqiRBW?>*&ZNDen1B9l}KK}50WooRJdyLzGg$`EL_DsToe@>bGw=%Xg zR+#>)_*@DA8{gKBw0+}f$k#gf zbb+zlHtZiB({BT6(HYeht9L&Rtiw|aR~K@vqq}ov0$u~>52WK+-dd2n^B@_`ozQ1r z=5nW9K0xeQ{Ij5Sx&_hrq#p)Oyn6AtGH9=Xn(-Y#YT-(sZDEm*>)&6xR2%Mq=lZsP z{N--~1#fSD^u3R#&1rYQ)9>Ub+ku#ubHQ^f7hh-2U6}si>pLw92X>L&BLPoz0-*Ux zmX%!o*hv5cwBRt~mCE>Ft5><1;d7cBC^BN93+RBye9r1K-@IwZR*P-gI{8%>Uj%Bh z76eKh4j{1$gjFs8_DF7DlntNxJGVfB28+_>RRA@Xz;x;E`avM(Zv#KLDN}z(+qomJ z-C4!DXSGk~-nexuYx+xx!`{yAcrO`z+o6o@+&W6vu!w~b0KWn-lVLuK=@BwXx2t1$ z6P%HW>FbtWQ&;pI5Swo2^PHZgpZM!n2gXQ4SF{1M=HR>HJo~6K}JVc z_PvH%z${~x#-pV-EgZgqqSCP#d?xo`fbCuNQRi|Ia{pfE3>5z5+jihImYDfYqQ5^& zON+VEVA*Ns%~*f~lBXTk_9_Pp-+i@53oZIl_42a%e^nnqKjYc-p_TP*I_FKG^PY4g z`CRkMGKhY^1G2?G?VaWSiS(wi9XBf$hTG4xIdJ7li(6+ZQ@a3r|7XYkXmhK2(>}@E zja;l9t1o-+{r5M2QeW`bfA{wtmU(s#XXhSAA8tXE906_tYj}v4$~8Dc_W*Nvj>Q7= zS8eEKMg}SS(KWlC^gLbsxHCasW+l(%gdI=%gnh{mwPO6~fYUenkj=w>Sa^b=UEsC8=MsAI49XN1w7P$D8-&5S#fIFWL#2 z<5L`;fb;HOy~-WYc3dB+@p54Vc2JP#UG1>7SatGL1}yP{(CD|H)z)8zx7p2c_x8;e zZ%&WgSzs>N#^A|Y@c^L~A|44BcRfJ=J;1 z)OT21Uvo)|8Tvf$wG(`L7AQRnbOM*R`g!#x8LQ2s!wib+G;<$1W`*HqN1xi;5<_!T zmZ~XEV-`O$>0=zKt?%7;u!WiyU~98pAo>=s=m@LdtIB&gQ++I^! z@jY8{C!tAfh4EBn`LAWGpi$5htLtsVvCdWTi-4)<0S$KIicgi7wn6ITs zXWZyBgQ>0u)Sy|UGxU3pW9!+n2s8i|B*oa^o$yRqQ~1+6Ee@~vTi-zgM%G#Q%%orX z5jExN+D~FyY2~*S)UA+hgHHG_mRk0JTT+>{<82LpS?ez#Y(Oz_Cs_@A)$=OHR)%zN5?4n8_Kz%}VoF@{^L(E5 zPhL|v;SgRr+xry|UkkIPeb~AqZ=Q-yX-(X19z9<&?Xdu|=%2KQzfsb?CyNQvS$VGn zd)Lcv!CQ4uy(TUz-qfK98<}6}_GWwSzVLO_IcBVvl8}(S9{@9+Ro5&T{yAzi_H0@8 zmsJYK4|<3H4T=%l&>O{lN=@2*FIH^JIDu+Y_|YeI+5fH^w5!YEj{4bQDVn6Q3fq-D z3pM=g`Fxbe_4d24>OcpuKHcaEw`H6&1G*l*;Av5XACVGa!j(pJ%mNKkCtT0^29yXh zk;u-(DINK4$xsdhr@3vVVCkyD5-jDD#>8{!8XewuO(#T*3E57VUUunuor1PCWX>aADq~12b6MST5=LfKMecG!(Ab{#BOp7@C%L$Iqd4 z-WR727L80=E z`Ci1V{;3B;Z(-%PQl0G3t9E3v4sGj++Y%u&+qc65vS)BHdcf#hKw+i;;<(io;)Qza zGkF!vAP8Y-6?|L=pna0?e1cE>S&WnR@M<_Pc?jA=iS{SK?$pnu(RFoU@pzH39!~iu zoo#Xj*A8`+3oI|(5aizVO8?uoSmO98(c$Z)9=*vsUdRhA6NbJhjVWqx3boP}WCdJx z7>%!^_2HfTdFOAI59d4iwEXjb9udMJ5%%{AiFNZz7 zJbKV~?pGS*y()hPY58+_naLjT2cfh06G?}+mJBEbE^&o&?)0$$?tI@iK`|hH*+k*Y z>HOt3Q2eIKyV+J->g!zeu!u3pW-Z2Yt{F+-{mXH?0GRl}cRvUOZE)RW;3&)AHwQL1 z0x(!g9m}=FtNqDZbQ**oYC!+q`BV8e&T_g5vpOh~@-cOo{zG60izr4EnSAY4Zg*zLto`qemCroVl%mgN|{##Z)cjFm%E=?Me2X%V9vX6BjSxz9w znm4|LH*Nm@AAemLpAFDUhSdo$3c$lciYp7g!IkCkp$^vUuspttP5fD(vH~?>8NjPH zVuE%x%Ob$fk@EdK7ZoNEXR=K7J-}{sefIg6Gnq08e-b!#BsV@EeQlcqk^@%OEZ6t%cR1|x>iq727JvECPv#I)i!~;3mo8r!0MlV$76Z5fxbXhP zwtG9IGC5E7=b)}!$Lz>03m=yECdm6uAnO}g6Y>rJv^r}QKr0*9DJ=b2%4;XrN&`Ut zy^r4?l>u--|Faf&PFDXcO$V^3yxP{$Cc)>#Gil)u zmHy#J($;tGEAAyj%D61e>c2|A&aJffu1Rj`1F}sVjc0r!r}hxg9$|c%(>-(xw;Lv* z&jJ(yH2Qh;o9w(!UdXxhwJB-b4~*oi)_c~fEL8#ebihpL%NGy52X-xtwqe6=Rk3%Et+T99c$Bun-E z>6Ej{^0bv$J_j5pM|bkCuk%@!ejWH2@bLMitD8Uk!AH}5J1p%$bZ7%Cv4M}@B-4+x zO!U!P+C>1MQd6rvl>t7g+v!IN)1-AQy5r5O^E@A_^vRJDf%g>>kTq5u>ZtZiA?ds9!(B{d$f3c>JMXnxKz3Lg%c>rxMjgv-p zDxZT_@3#1}7Cf@j4S=!xNdIKAPYziYzphUL;sFn|u?Jbe+1*l=1hn4Y97-=%)LBTe zxcIYQ{AP2tMN4yqUw`^phYFtEoNTAg^gWei7K+dVbdMe~*;flXS!O0{V?BrJbjkRg z4MqIei-{f-Q))w@%>WT&XxpQa75}R*h zI|A;u(?Ixt`t;YM6V9DE7vObf^f0hdA7FP1@P*Dg{O*xS2Ovb|Y?HqoV5%Pgkn?YQ zro~LIrGS^mb7}D+c~r?3M^B$ie%@ORs7Qh5O}-CuFQ4hjPuDZPQ-j9e&6U}x2YSG82S#{gp(6$zcwMYJ| zEsn+8c6gJ=8N0u-E3v*)|6W@-n65i^;`Gp_J}opIX_0_z%)(4Op{rlLV1*m_SYN}c zS{u-RvHHG~`wPMK1Kr!HlCSq!AgevG zAV1N98Xg;G*lj{T@6W}bG4j&UwX^E;_U-78?rYJEw_IhBXKid2y^h^WA(D>oy3w>Q->g#7bQ@hqru$o$H+^1nx+Ig+tn~Gr|KKXGvHjE@9Gfw(P5+@b^!13h@QD}<;v#6 z58j_K(zr&U%VyDawUGnSWf4i62Xym?Z(Kd&!*of^iMnA^q8-GVX`u6SOoOef4}R}#|a+sStWfN|BZeO_wAa03e=D_4)Y8X5_hG}pT!eqQ(7 zV5>Cz>`Hi38ReN^1n|8R;GGnd&MNy@f4Y`{d`ZvF_oN`p!goCcOtk0(WXYd3enLA! zEW8d>@VPdG?5fHq44w^86)&dD5-jedD~vo0`iiBP7`1$_SH^iDJ*6uD>AciMu~NFj ziv)S-=WeF>-br#ANjC;xtDJpJe%&Qhim}3%&h|N?h7F>`lj*E0o0Vf}PR)4U>y-eW zpj(E0rl zH;~9}8yGns8jFHrH1*Bee!itSuX-lUp`$nGik>AD+p?timn^u)n?ADZZ`#Aulc(Fi zXqYtX;OE)+JdbxxY$uWdT0HS?4t*3&K_yt(kn+}KJULJ{`~t@NjQ*7R#8IE6-jV64m*NZ4v$38hcO&cN0Q^puFa1#%019wZUG!OUAlxGMBNy}BBlW7h!bqg_rjAznOP8(0$)hy<;LYeEJn=jFhkQ>G>z^>gBZQPs(WTG7 z{j>pLJx-Sf+%0i`gYL^O_G&J zQA!s4nIqCWUDmP|0>H$Y8D7Y>)lK| zll?~lXw9)UcONg`sjSED1UjB=z|VU8U;{I$I)rd{AOOG&n82cc zUv5}TTK6~UM=CI*`EP^YEae(p?F#^5G0S~|Nq?bcpl1Sgu*nF3@=kGn+!op|vpU+7 zEZxozt;wVT@_{`sHm3u??g#$;`42wWJZ|#w#kCI7%L49Vme5C}(Inw;6B-uiAH9Em z^VwI|Hot5@`u^DfY=FUBZ`{pg#<5ca)*Lzh-LWqEvMtOHn<%~4MDEqgzl+anG6qbVLrY5w zFP^oKU_yT(V4jb2vI4vXD)Vva9aj+F{UD#l$*V2V^u(zqErY6$`u6OnxMZMh{S^J0n5g$y2La22jp=V zU{Zt+CjE3jD_d>MmVX8l51ah#2?)V6fDd1`CW^N1`pCl|nZQa1=wk5Bijajo@EB+g zuwp6s?*0>5LtZL;zE_*%5MvfuGx;UbluvEL7Eo5)EEyfzh}S@D8T#t-^#P?=PLHqFX#MCrA8tO+2mig?shrvRo#$$0z`~gO^39VL zIu2zqc02H|g@SagMHb+L35B{C3#BF|tRO8;aNh#lGJ&;V0DQ}4Cq8SP@uu&+wMQ1r z@efH2Nj_#H5Y8l5rPN-#c4=E0e`2fSny4Hv=QT?qIj~^XUqlF1z6o zYvYHt4}PmJWbymu$bKs5+kseG^#L%e%8$vxpQCYNR+d);=sNSqPeJ^Den3on_L`BHXSTeGl0&b~7yM51X6Ft_vFuT%3sM0fINIhROyU-R<~2< zr9-a{-Nojzf~<2d0DLFYECP4!!Sg2kmDBDF<+Z~Jjdt1KDH*0+e*T-!TG+hQvo+S% z|9$$!mCe8T%RlJ=K#L9fWPP7;u5p7lv@Dx-wH9-9zum0lC1Bni7e)a2r zYQgZAn?rxoXTNF}&o4Fyj^rn}`>R<|)3efBbCD>nJQj5ve5q~j z$P?g#9$^iCul^SA9g4hb*K(J@^#Z+!$M5lP+(IlG0ZfkoPw5PK0ho?=aNg}s%BM20PL z=$Fp4lVhy#qxDI#Z84pOMJ0N=0mR6ywl=R^;Lf6D0`aI(F1rk=aig!chY$`cUQ`W z77HyFu2`*~Krzx(yCHXnWOyIFnKcP9(_A&aXQzw`d=?z)y`^xpcy z6M>s&0@S&H1LVI={}~716rO8`+JTR{H+TQtFMqYU_|eA$E~95p*4^lR+X9LH=EjW< zt}Q**u6CO7TR*fWE9fAn`Vtnn4!BnLb}aq-fA{ZecYoJ{cZ&;+Jpib;Zni6@#d|WX zuJ^udCrW(oa{C=$(i^I4=BT}+N6g`7>|6ce``|e#KRmQ!iV9UHHv@mkB$pOILcXhy z)%I^(zdC$iD8lj^Pj0mc-=Ji`$;+1?=Eml$79&n)m~nI#6UunTXb0b9R8U*y-x^qM z@nLjVX;^4pe$P9UINg4`MTz_E+S*e;u1=5S4rb}CK=a$R&FZ=q6-~=M2VH^}fXyBK zt{;BcqP_lv?pJR8v_f(%CGYFkFL$QK2kn0B0QK4)z*C##4(DEHei)lry!70-l?={U ztu}S?#F_L}ZV2Puw9)8!Q8_f7Z!#VMdf#h)@x1eSjP>xrc?epNdNPI}C;MwN+`G^> z>T(B&-d8^-&!;bLRw#=~@HyjXMV=0DZzw7=#oKKdf=$Ixxp(6PX8i+|HECN~z_ zPPTj3;=guf(SnSR{!Bj_AL_F$e!qFtm?$|m27mRcz9b&)Yd&}O=!KEntJVAU&iEO3 zh8hX(l&?0VU2&siL3ZS-cXYV1w7x+c{pa=C!l~rO0+&VFJGmjecI@b)p=1!SzxV9;>Bcv-lb3hdpiB`4O`=BBvE)PLZz7iCbe6o&;#@)=w0+z zoYF>zRiL44XfFerS}44VaM9M2{@1v~pP>HtXc~>{fcR-d4Z_lly{ddQ1_KLx7cMQj z%`2XCn?!Z*y`*{|qskBPKw}sE^{~#wORslmSc9gXOdKyHQ}{kBT=5h7q_mj%yVAmQXYm1i(S=wiVs;NqoZKA3$Y59N|Ul z_3#akeI{RDN}!w~c@YnzbzYOx5?d1t{9Sc{hWX5we?{zTZlg*55*VtvYCzh2N@EGY z%C~^i*-cDo7>GyBR8$e>x>S5uhVkKS5l8)+MR;!TzU501Czk=$3iB-(Hw!ks>v!7f zqO==jCG!h>9iGWnxPGLz_%Mv=IU1`IFT88J)6{`_NC64;wyj3|=uZk0u74%MFI!#F zTn}gPJ^I&Y=X;y5=j3)C_pEDea~L^1+P)KRD^KKWh@Ck8MSt&=V)WWfY|M)`tZ7T?Hy0r9v;sUlqz|G+8k44===43;?z=R|n}@YG>3Nz z->ZO$>;=AT`7!b!(WOrfz{#Eg6rC#$AL4cQ<&mriDo$@Elbtpq-j?>^U-ZvPePx}r z=so;Jns(D?>Y(S+QnW?W`o=19)WZVOZD=SD{erSrU!-v2cvslTCm>@OBE6!oL)~QB zFTEE5);UYCf7rI=k9^SRL!rvrKg@Mq_@!qHuYTwKFum73tBzNUUX;|he(92{ly$xV z8StP#Us9j@uC_c_i%rTF&Pp*Y=l{`z7T&Tw#86HqyQb?1g;L5K} zteEqB+GFob3a&ShdD@KdM1bcyD5^ndddxv~SF@dUKL zH^GP}A7$bFra|>ce)d7+3?6!^Z?f5{qo%H^d`&m@-wVJQT^zMTuV(2}S+mw9`?f|rCe;8;8 z=ziLO(IKI>(f;(0zQ4H^Ao9DfzTW)wFaB(EBrx=6|MSlpSO?6P|C_>??6R&u5>NTZ zl~3d9IPrROHo)V0fQ06M{bv5Qv&7kz%Q^#GV30+M53&xoZT9`XO?t0>eKYXSTLV248*Y79XBE5R5<(^;3klhJ_aN!V~-P+CT;PARpD6J z*XP}@T>M#IyK<$;e{Nr@w;7PvxDExR7jpm|vS?w&P6G#;It=qkV2?B%lt?e2=XP#! z#wsh9CAJmI|7cb(EX2+OG7Iz7S661i=hVql9dvYgXy%&3!KViPCct=TQqQWD#qgVe zZS9Q<6>WiEa_xpSAxl<=Bmw`PRL@)!{O+^g6*~6_E!14d>eU2<)gjRGP?p!XZ?<*$ zXcL`id-y18^FV(*ypiP{mqvT5S0IqK0u*FDOCR$MYL~~p0FGx_J*x{qzOrL7!0%#P z!>>1SF^S!ug{KJ<{#ab_VWq~eI6xBMyf1xlumuymnzoSDCV+{ng9Ekc*}7f&fN5T%VaoR^U3=kri0eDd3+|X1Dr)KR}1{~UcJ>KTD;-^_tK54)31>`3vyg_VATWG zt?}?-2PV=R>ekK(!1ldpb|9w$&Vo-7(5j74(I?L>XSZ^r@4wA_?vjAkpMep; zGuGF9{J)p=_Pqg|TQCkxQR)Fwl7?UY?u*Tv!v5oDpO4iw9Z1Ht6Jr@WGXP9zz#whx zVeVAu0_{Tk0UnGsAD+h>Ag20x`1sxcug6*>K0-ev_u7aBXV!&ZT>5hJ@kbwR-tGS4 zg^Qch={N0`tDob6ov&UQe+A+tr)Q3yP1f_n+z;#3m*pK(tG8v@cn*?B5x%i*@~46eK@(Q~4g5EIr_ov5W)0EkFRuEbvBB z=3vGor4K0Xn(Vw}v0T02Ir-9>r7vH1fTofC(%6+%IbHa0+)pGASuF$GfzA)+z{hV| z80gSjyN=OZSsZ$+ZXu(1_^-WGHa{Y%fREw+p2PYFiFfu)E9jXWJs~C&{CiU6V zc4}w(Qqv;H|}biwpYq)2HKkq&!LQdCx_czCfF*$^x^lB}Taf0LWQTs$DNX?MtS6JY}pHv+KjchD9oi zLnRUo)m8c9?UT-L*mXa@*BuTx7U(^5Ap8kndkNvYymr;LPPcHQKjL!YKtSz(```XI z0j~D~MUQO$`mcVr`S~w@z1qv@sfdXF;eYcFvNL+7^ws0+{HorjPRT=Y)P1prHgg0& ztnyjzKFlq}>&A>+Af0Xz_12BA>xaIoELmZH`TP7vJCEgFK>qz?KG!L2z0 z7QN{c3#j@cp!Cu7J3allaS&a(1C$aaWhMvoKHY}TKm75Zv>?&Wm0XaJLl)%trHv3C z*Id1+GrLRwe_Z|7)1^m}pm!dmKx&-;5}D9dKy`_(?jc8zGvv@n?r`T~%=Th!%~tVh z|FbnCX|}t(StBooyA(amR#UP|LjwsUkXj(MKqjA`$N92oJfAqPi16_6aQBGt@bDO; zYx&3#W3@%-F-F&4%-o?6 zqdf188`aHu9X^K9Xc$^8EK^=P_~95XDd_ZZZ}&p5?`9QhYYMxV>5%p?17yA@^^SbQU_ zivGv~GPzMV3mFzx4%LsGpp&CHe!3M8C<9*_Gn!X2cE4E2IAaxfn71hV!q*q0ZM#kD z`yDns^b0m)xG-OPb~pbCt`L8>>Nk0q)^H$W;R=>E?SiH|F)qjjK_mu~)MO9@|){KIl_@Ld+b(f_@?;WpP9n{K;&fyU5 zjK11waI79v-yvMrlfTzq2FFL|Fjww-zuMS~$@Ir>?{y@_jT*e%kMVkdEs&E9U4mKu zQ8Zf&L7Relz92{QZ5e1XHJPd|+KeH|WzQHs7az&F{?x^@86HO?s4pWlScDy3KHjZ7 zI_PcTL!lMM;E(VYO~S7-L6LS^O( zX{qLVYS&$mcW>!R_Te&%SqN7ibO%parA;K~S_xC0_JT^-5RP+0i0x`osGC@zV#FSP1B<xrhUMNfAp}>Sr4{l z`>y*9^_70`L?d)ovC05zp7#Yuz`asm@(vyEz4M_L;cddyK(K_9WpQ~+3KSbC8l!JP2B;agAi*0EG>XNl3ddmaKg!>=Y@4s zvN@m>EdZ6@doltC{6I?!5ldVFD^{Yw6N`bdOmqd4`%~Eyt+Vp20*a- z+0psao;y6Hep_4^p5X3Y6>OLOdy|d4l6fEV5VIg<;CrPB;Ju77Zx@Qf#6^L;GkCT* zFe^Y4Pt)KWsysKLLcHVHFl-=Sof%pU^cir(8zwupoASc5W%__;!FCX$LAnDBZ6$or z0F05sAxyh7zQEx|-bH((jjhhN8$8<@&KSl(a=5Ldd-8rYfaO)nX!STl_N69PM;n}J z2cZ~vTHmSsyKSkxb)zsXpLN(}O-*{$NCHd%Nr1F%W{i53~I=JmzMa;u3c zdOFnYZZX>T?!G^v8Q{cVc&dZAc=%ad82)YGZrikwCcpXZhj~=Jvbp%h*JBJY0WmQU z(!_yScyw=z9J=k>8L!EMe&}Xp{^5gXFH!@pE!Zo){yA9^CtUZ`tq`0_kv*CmyYI2tzp||DCq!>!VDl z7`=FcK8a_aJrj0geHcKLN$0FjGHucpOx{l8-5tLkiNE*P=6IViy-ZCC9mch1^&Q)0bgY|K|{}w_ZCp6U*H#-kCh}K2t|KTdu|{Wb#CY0_PGi4t<<= z_R(U--!8!Z)eJz+IWPg_A*j!%{rze`F>!C@+3zr7yA~L^=qi)fy9~qW?nEHNL-}jh ziQid1{6@fe-_fbgF4)%?kufQSGL+Fr-3f;a?M!*HRp@75Ix!0~D;7IlwTy(k1?9We zqRHR?{?qSv4scq4D90pgrkcr*<9CX)$e>C3NHO-Z7d8 zyF!04Fw7XPbT8KSE3XS;Vbb#U49Ss3XV1F1z1Wm{knvPVC0=Y1`Duc zuUBUWyFREa^*WR{E8Y1`;nl?X{rvpr?J&C$KmB-kTD&m^ zI&|{f=IF`#nd zhm9ZT^@r(|Jsk>4)?R#IhnWSy#=3bu>$B-93;W78;nPmW-E_J`hF@wi<#)fkknXYg zWPy4cYN~4E8Ak9|Un#uZotyEw1H8!vT-RbLJt?0yxL5zG?HRWjammJ&D;KA|j~{=j zaqrpiZ0A;e(%3kXF}g|pcMRLkq1j_GuZq%l^Zps9gBGFVg$d)6jQCh!W!yj1_(~sk z{Z`}L>JK)VBFh#ZzD@VzKMRTE8jkDRMTX{RX#VbulEZm=gLf$T&cv~~{V3i|2RS>! z;hpA%V3Xq)>lf$)3kE(w`!EJ8#2g3yN*x2HSFenn6pC3$bSTsc!@@=uq85DGc z1*(zn5D|Nx+)dp5#!~7?CVWo6+&L&MZ0!plX)YDwX~tK<=b1lYo+@v~g|$1r8Xa1{ z`eM!;@*HYG>cU0gqAZHFt13R**F4CWTYq7OX~9OF$@9HD>W{rx7@%;s+u-X97phZZ z^4jB-7WM6Nyi+|ama+?IU-ZEL``dvmY}fS`H1@0pjz}6c0AmQ7Zxh)b&Ck>`LM{p~ z&%@P@^JcZC#~cRfI%5ad-iILF-8*R!=sT%qW+a`l{1v&4hQLS})Kru&#i z`YC72A10-%ij$(UhPKM6{mxJ_X@@<+qy`O=OOe$ zD}D0swB>kc_rlNccgbF86%!nu4;0?&05LtISwKcs7EU9pJzxB`rJc6xk3P~0=r(2c zY&@6IGB^`Psd^6GdbiRTtjSTnXizeY4G+ez8*t*I&SePT;mbx!e{hMa;7T70}53(dR5<5yJg^iI=mO`-s7>w zn=&E{nyRA;!>#r%4H@o9uftE$(oEjLQ?k+yk>Fp=w9yDToffYn;l#r4Ftc%+8X1X{eEK`dlrgxLM!N*V*L++| zTS|i_>&5W?PLlZs1P^RsZ+UEyH|`rbsGgGbPkhT?{^eh7KI<^KqYaey6;tl>LKBF` zd*;NE&0BfCnn)aJU^5e+%6*t#V3`aR45Pv&lR4+cZ1Ja3;p-C=t7 zHuoBw-YP7@P_pOu+N!!Y-WV^E27T&syNL$_I%e=gR-IMykN*4z;kRe=vtNEVo++0z z&I_Buc=grgtD86Sem9`!A-KCk%U`U2FlYyP9#*@7;Z8me2)Vp@#@L_9md(jfnmnh*E!Iv zJhPY(?HRm;PkHV2SMv^yZ{wS*ZTn;}K6JRli<)3Fl-58E$d3)*8Q{OrWQCD|cg@69 z4;F&}96tNvLgnAt?7p67W)m3`cZMc?EQ9}%CY>gS_&n)d2K&5Rd2^anoxkv_G3<>& zqJ^CA6k_FY6V2DozA{E{vBviYhsQeEyH(tLGIjZC2ar{16REoy?2J!r;eNrQM&B1R zn6ASE^SV4-jOq+RyB;?Y%=jUOuWipu&#c~M+B3MzE0pnPEqFOV4DQpGwYUW;lQ5Gs z?GEpI(f?w*O0KpzcCmw6->we->>vMwne^^s1WESs6(fWAqZ7VkjLzxP>SUor-*7iW zrjy9e=7n?*UZ&hfXhs6qA1gcywY zF*Ub0o*UWDkn^j`itbr zZ1GX!A-rfJ`~u^4H}ognUa>9iHJj+3*Ce|RI`e|RT==Df#Uwx5LBJ-r>LV7pumj^^ zUHuuIzD*CX8TWRvgR5sU$Nc09@gOnJuGHEkTddbo5=ZC}|Y-18}23OpR>?ar-8F40R2 zdFk;hSUeM_&fzy>0qv48g6`L*H`8wn4nO+-_cmYO_<6<%ae1Xl=VYvO5Fw-bt#tpP z7E(Jxub9SXYx9gdg^>C2Kl=~z&OKhJq|Y|*fAHZL>GAF?VuXVO>OTDBv(5kO|NE~q z`V@w&ZSU9GT7DwEgWn&flYje%k1KLlWu1r@+8q<$GYD6K>MSHe#`@?eOv1}Ay)^kA zq+cA!te*SQrDJ3)2mQj(O;(R|76^mG%Nax7d-vVVuYdj9F$y26-7OwnOMj5Hx88Uw zsjx^U97*lEVM4e@LHInj}8{T=R|z<*=L{D|L$)GPrBY* zCK-CvSnpVij12nsGHCI9Wa!ss!&kvp7)LmBO@9`J4q&iojQX(%xbio-t10 z{AKJ#4$!}B;}JQF`1ms&Yu5~e7#j6@+PBLwj9P%W-8ji2zdn`zx|yfBF`0h*d*6L; zbjW>PrO6rDQHI4?#yp z!{}dOM|MYp6ED4*eh<-jTHIwJ(u}ts=k05p5EWX8%+TIE(D@xVTlm-R4#5@Hj;^7v z85tQ-xxZXu{brd z5+UGoyig&h;sdhlK;f^x{$gl^uj%FyhYrrXa{-H%g@rEW5})l946==IhqEqch%(%*UP~T&Rz@q29aDVm;jpOqc z1#T5O@1+)u>BeSw(Vx7aOFQ3se)65~)-IREVTCTD7xul_LMT05zpX#O18LbBqzeNR zAB_vBZyy7+Qq1*yYvlm?j9QOi009C{nOB|xAl=Hn;03L`lWtVgg0q-x{U27uAZ3`h zV{n1R03RE|q*P9S3qm+iUOv7;$eZUV%*1^Mpn3+a&U6ZS{y)YV3z2W{F-1xfF7v5_ z5(Ag#Cr_pbsxS@mygn}Kc*n4jVAKmF5ik!1VN+*ibp1RJET_JGqpPx?Kk8!Ww5ql0 znDQJNde_I-;0WhM+cAa!;@y0$&NIZGm$g$X$`C0g&Tc}Wqgc3>QBS*VYla!X25)Hg zYAJVxY(LMRbus(|r=6GKHcZ#MZI2jyb-T(g9|aq)_)HmrSVmywRyV{Zw+e`FCJYFC9Dut@ z?R{+m#3Yih>ook(zk%1>_inXejoM`Wbo#pyrq|N9Ly1 z-3~!knV{4*cJ!layPsj4I<{pXRwj(0 z6aBljz%jDl)8VfOF**W`w5R%OqB^$us5rXyH(&D2Ge9L?cqw`zq%?nvM~9atV}&Q< zV|?xHs+anK-7jGbM_}N-^7TE)fLEQSE#R23Niubs4Bhl(RJ?FV(^%0BA9|0EOi=28 zdns`+s?S_5x_d^4%?Ittf$FJaFlHe@`q33T6BfY1QP#YiLBW)vy%o?vVBswtzS<%@ zI=3W)U)kqz^qInfpS+A-?)h_Er(R1pD351&P**uyLaZk}!w;(Feb>)-f2;~lo*lT- zOuFI8(oVgD1mDTuBxJq=RsQER*N39dl(XnG6`6dcq<@~n*0yj3VW2eF-7_8lfBo}6 zbS-qo3!4uv7Mh?zyATFqqn^&Q(uCAxjOx3Xckc=9BD$vJCAYt>4?S(6Z3V3@bA9$ zgE2zixFx>i{tjt;vjMjO^`p%@Z=Kux{{2s;jZMN%9xv29s$z`~+E%SH=7wyCtJ)W=`jPA5}@gR@g zr%jTC_z?Dj$D~Qa92Qq78J>tHO^2Ig^8~ye-JfR2yj|WShk0(ckWu@tiAIyU@D}5n zr;Le^{1!PF%Nb(8znQnTt{7c-pE4%#impjA+H^o>+gbPSSqlOC8|*)B%lw@tw0rix zQP_qrHXnWR$>s;|eZQ^6yqX=Fndk7ML-9!Q-7^Fo&8z>)^=q4RcVEeC{X(9>=Q5Dr z$k26TXsWIZ7QD#t!<9VwUJ4H$hj{OW7gmgV-dhu5AigC2LZn!5d32xWV{%ReOOKy- zlm$7vl>yx1ff^gf1TIY8y%SY?wc78quMnv;Ip8sn zXftg(8Z$l^16eZ9aLI$yZUJSP_}?zXNQybi8iq6}tsP%@!2*p?0r6#qM22XxW*o&Z zzB{7NR{hGg^_{nx_CiZWO??CJb2~T2&{h9+AbGfQ{c`0#On$CzZpAmwZIG7%mb{s) zJ&4X_58G7_?M(+*1Y@wVGlci~;YZcA{63@G`ZnP+wpkO`bagaPz6Fbug#%zf-rs~- z%=laYS2?}E)1xEj|niHLSDzvSiUmH zP*EkgyASYM3%AlSObA@DYa<*9Ymfyco}4`0c=%nC`(i*&-8zq>vY4 zrQ(Mp@#Mkqn(u%_kaDl(u@1FN>-6?nA}9e7E*M96pfaRs2l4+M0w~U}F@rWxC+}}txK5AW%kaz4 z>%-t|494g-p6hbe#&qv2#C3zu*5b(WR(00M7#3?&d?@_PvHAr%mM11dtj4tig<-|P zhZazF=ViJl4@ay9fBNoV`ayrg&@2Xe;ynF^zIqn0>Vv?DRmQVXo#=(9t&lJr;yb!X zd1MB28dr_11b66|4C(LhS4Re+hZig);n~uamvQowPd~2C`!+APu>8I6|0tNrP_o28 zH3p+XAUsHSkt-q~BnsWc(-W_d7Yoni3fw1+oz%|O7Xw09_sxDtC4BpB%r-|imV}%;oRX1s`Xp5DAjSjrS$60hOFkVQd9c4b z?M%?-t>1TkMGF++Fh;g$BD4q|vZ(z<;}Ybc+agsut46PZ=e|5X`W6lh7Oc>X@i8(E zUg+zC2YE)nScnxg@_FzmqyCoO#*5njR-sp3fAifIq%z1BDniU@p{5wmUw`|%jXN4U zRGzd#S{-j(_f=smZnl8++UswmLk_pl(1DD>)%RLVnf|Ub$&52R!>eRQjDIw9PRhA< zcHGWud@XF?)7`}gZ-HRsEyH@_Rblz`?>u^+<^XlLont(x$(8Zlv}MFG26-{u7M_a^ zcuBpFpL{tw-D<377g-B(#i7U3#*vS^*C!h@;{DB&g@?WX@n1jpTH%AfE!3Qye933} zfl-%}AbW$)SY9B3*1;E5C&&JEu4k@cw?>0`iE?0MlPDU2yPF{d41t!*+9o zu6W+qMPA182}_hr(BH;S=8CTE&=D?$oRO~yiCQj(f6vYo0_f$_uS_2xeAw+4>5RF_ zF+L}+hm5V_tLueP{N`3;>)H~%@RK&Z2hYab{5%~jmLdV_j~@vW z?tY}0kAYGbgSTe@_v|^Y(kfn}I4F@>_<2;?@yzYn+NQq-M~Oi%m7mh-ARa<)YwsMC zDSx;vr5dyLMnkRwDkz0Coqmi2vq_i0Pkn3bRmIA!5zAbPq^g&G)M$sE^Da7%H!yl5 zoCKpVE2qXJuoSbZ!D@_3=;1G%ciIu;xsKUaowx9S3pVgt>5qoyEsY28f=>}bUZ7}uqtoLAzVFt|VHv>|yx@GL#2!$by_sXd} zHv^~4j3SIcgMwUTQQx75%6T!*OY5?a@`iri>%3BOt1i-eCr$q-KV|D+2LUg7*4I4m zduTZH8PMy6Y;c!uK9F~7GMLD3^Uop;Tzfrb(sF$k?)VP9cIe)Hc#NmepyU1e@KjE7 zOq~E?93Gm&dp;Ei@9u{Og1K|QP%MubHe4Vwje@S~5T*Nm+8}of-Gfj3g>Kbx(uB88 z9=^k~$;7PSQ#kQzqs8EsXGBGPxVh@K%ZI41M5g>WqKpqpd4rZQu0-o&$MaFC%!eF-7;Ua?~Lzbx8)$ z*8ASYqw{=l*wU%%q0ziwc#S8t;(Ip6>|oL-%b2Zf55Nbz!cDz8eb`}RB8T={#4cRlX=Ij#gw zR{E~aa*VZ*>e;Gi>D|Cj^(t$MQ%29GtQmlWr+O)OD&BpASM4Y1z!;$3tMoja=W{hY z;CVGD3AO8D8U5)KaKc;rSI*$k)hx7>)Mx2+nPm-^sc&f++e@o;O+u#ZlFiNu<@t%$ zOUEtUHM%A}Ipu=mGdiUD*Woi8Kx67R&x74_>FE_TpY%I;dls+a{fd)u(nzaCw~X5J z(gk{vZCmFlXtm>nMj5~CxMe{vh@1y7&?*_x9Zda2c2Inveeh<5tIFuh%%f$ zezXJmPH%31n#Ws{{JRul}y|Pco{# zvw8pHFE^)}knOtJWULXW*w$--p^0Lj2YEam%m8NM$rFV^laY|ziPvrtdN|_|!~OX$ zzi7gCbMw|)Z#9^&?~9|JW+)JvM67cMY%$7NXrM!M0m3(oOyl=x_awfXfja|a{3jF) z-1n!uOsJmZ;luO(P@X|z51+VlBZJoM35UWvlJ_IP42mW~F^Ack~pa+Cw;<4!fDSf;9+ttY-o%?D#3q*(W zfOn{un3wy)4ez0)$sbjGARcHs+BM$H@Y*xV;$Vv)LcuT+G6q>}X6&Xb#$b?FAp@yJ zu4~utw&l8m$inq{M&^=*7o(p^VX!L*SpOUx$4j4~h>n`H@y565zbn!BKnBsL86W6+ zVGqc+@fy9)_~x(Qf#unu7Wqz`Xd+oVi=A#<$EYdPR4K#90mdQJ-J#x-H~3Gy&fbra zD!F+7{qvjmKPyi5??2i6KmPOoYj6){Xt|Vu<@n~6v#)P1UHD@9PYX)VY6B?P!NOy) zY6Pi?Zu;Zf_!iBKwdpdrGly3!SLddK1QUBN9xIIS_lyz zeHobxZ1Vr0&=X&cKAM9CGYXK;!D8Caztsma#Lyuc8~;rI03ed!Rr*E05uegDCfJ zr{CqpUu5&*rAwQi|NXDy(Lz9FOgLOyk}nGn3{U${7gDBj#7RF~9QiM=UysUms2tz? z^rt`B{LlaPH*=t@1JNF3RN=XOEFEfmVi660>Fq!I@WbwlN8DlvIrYqCN4|l^F+ls?rbpcy8Ao zyi6I=#gf-|R?v9O@}7Uz`EtB~EmGm5SI@k*+1uiKtyp@C;}1u-CZffCf3Z4c;0^Y* z>m(86ZDH` zI#~Bu`s6RaI6wUvV<;Z62xP(R=Rg1H8b8&Phgt|z{*CmCycP+BPU21e+4)aLSMrWF z9)Op6#e(s$Nkb27>mis^v(6z#$Ea#V>D;;Fwh!@!QEl&oFtwU#ew(r z92(j<_Sn(JLA9H63-GJHJpk=kX(wAebndP1Oy5CYT)kE-_MYqS91c%+3qeDNJZm0D z*W-aZ8JNGh-Qoax*Op0R*sngBN4R|d>(8`TY1M;XFu^n)u&_gZ{2h*#VvM5$zFoUQ z@}6&D-l5<^gY`Jv`}IEEr0fF+onymuyneF24Ikd_kKvzj+yRw%&Em4qINy2q-OZ;T ze~{j^^O2n}{Hqr^L`U;di+JGd@6dZ=m;;65QO?nJa9RL0uIIIF9A%sYpP75tcIIQw zv3Yd==E&wO%rcNCBhDp&Gd*gaqtDj&(AVHEls-^pt6^cVpPdx?q@!w+4)SzF$R#8Po;yp5H8;e z?*KxNs0GN+*A`kYOzH zf+6Vnd<9RcsW(vVe76V`mg+uo~k%B^VtBx8TV? z^{Twlj`T!>Jc6GH$UbSuVB4!zb`LR8Z&Zab71loRM&=<-LP)i)=VjT_e@l*%7$i&j z$2Md}B?1vV>DRq0IM3a{ zLH`oWwH$qRz`2H-2lL*G3XRv`H`1{4uG^_+%LP{m^GfsQq~T4lEN}vLqc|s9=V4@DQPQvbr3Z zAdAw#dn)8R8jd{SWpJb=6Md$>!J4v^%XkMDhi;(JSAClu6XU$PX%@vP0=X)MvlW>|1qB(9zmShVI(iluvIr$g3{adne;vL z(5HOE8$$fWRNM=jzxmsLySe<$qs^HNiDFV; zy_$ZTYhYbVKe@dl80n+O~&UZCL`?a=;w}m**!$VCp-g)EI29g=m!gmbwO@2;S55`kQF!3G@ zB4@x}no~_+9>+g-ZWezqL#Kl|#s20&F#~v$^=QH%L?7B7$=hBmLq<%8@$nSCmDi}a z-3-wCo5&o^fcLNg5q9zU!_AxL&SoGzS?tI zCT~Ymakq~&asRdnk%h3wd4uoDm zl#%U9u}EK@dJE@ZLcwsulhoLDwy8%G#whLeO@>^G%68}{wM*<+xw@&^sJym}Gw3h` z-^lBy#v7x|i+P?gXfWhG%FF7zZ=Xs2#l=6`j)gBHaR#7t!GrMQCB)OwB9RHHnC!FN zJszc381d+9ddN3~;4i zxEJra_Pip_eDcm%KwqITTo~$hM;pC_v4qKL1&r=6Ryx%p){D(q4tI#GFf4l8z4fQ) zbhXeg7U&o*kGHUL;Kg;&C?0?t{o~-RpLBraU;XS?g=@LJdFOlI+r0O~KP{}z3Y#I_ z!3-D2f^+NIrOt}DzIn9;JzWk@E$z0m2te;o-K(=n{~B+pLt3Eb;%Uve#Nbwv(apRT zd30(Q`VIeT2NUR{C)e2NQ4@0XePP zeJFOljDXslT%#*NmEQu+w@;SNkr$1tYWN4UA9duV%@{}~^k+YKFQaaKkpqp-wdI?U z{7$^8nI@ z=<36uGcaNjZONcAbxH0R-55O8^;*U`dQOYnZb5>v=j_>Y^oReFpDCdch*W({`2NS;k_1b8bdd)?q8WozuS%be4p%r$SfD9m z=^oUhUxba@`wZ{5XdXZE5EhP#3|MrxC_(>NV7OlRfx`zfh-FZ_RsTaCW+s zrSlyYicb=Qp$8)3wVg67N{lg)OnDbh`V-&0nC*-cx=DX~uD(?$P z=k3CXugT8C^v1n!ZZr4z!V~{<~zb!hW0Vf1H#f}wx57Ch| zd|I1!N@n=kHL-Rr=(icA!9_oEV0Xx_4)wlg2UK)ghy6AttWMuFw!3!iN@Kg@!y~3x zr(2Y<POc-|bB%A2v>(asMp3?P@IhX$}h48aJ$6O*Q|t1$8ye zrJ?%F|0;&w+L0cez9_-D-$9ZLuoeO=^uV3BxSeO(mG}CSyrG{8-5h`G-*rG&?0tpKN*ngx zf^>aRXH1~y?y)mipo;&>cQ_rlx3ruQEga6QpvtSq%%{TD$HM)Nj$L{#paG_z(2g$N zQ`tkfncOnC1Z3#>oJieMke(xQ-+%n{#Kc$OJyfYN3QQ6eS;FvqaxCNX^4#>4yXT=Jz)tvgSzVo&fQw$G7=@Trl-fXe!7%8WI>t0}NMMhLF-tMV1H@M+ zbvMuer!=cv7h&H6j4(KBelOuSjj7v`KlqhBv?{j_y59x!IZYRc)db~RfItGU`}O5w z@~5=cVQ{)+L+m~~aKo9vmtrT=dKt{&8IK2uI{8fQzVTZxx8(OmM^Uc%gWK@l(9!i4 ztptC{TF7rP4oK}c_)lG;Lqe+PWz2W6Lnj4IVg2ia)-TW|`|ZaToGWboUjW$zm= z>6F#%t|+eJra_#UlEsT=ZL1!2hy#OigRuO<8R4M-j)8TGha-L-RLZNJbSe$nlRI2E z<##pj00L{$ha-B=GtWI-pWzW18w3u2mTu{?$`AGe3Z`rbTDgH3Ji3^P)qpC+Akeqn zB+u&-ZarUIs%(E{AVc^}ox+Dc=yG_E?wqz%R`u#`N|-~LdWY8Voi>|#^%|~t$K-7x zw|IRT1ca%#x|L^A z_P@?(bP?R7qkGgF-ctq`>Iu^=e(;*#%I|s%+>-Qx(x!ppT%$FxXb8diR=1t@Lt6*^ zX;U<<+|m18#yfw^Te|eblKWNmqGd3<9WwN7urY83->wIQHdA)r(n`FeOXk5K+L*>7;CeTouA%0+b{xFAHu%hdIC-}4WFGmk9llrMUFsK} zpGC3dnJL6|xIb@e_wrQGv_VA-f54aQ^=>|;TRa)Q9^%K^g6>^J@_sTec#HJGTmaN+ zuKG8$95gzRGq1Ft{pN!vPz}OLEAIC7jO6_7PiO3WL#?Bv~{I_e>o_sPPskb4t!={n5P% zy6xU*aQeWp>PH?k$VR7^Gc@n1eQYaz@X3WK_d9QP2vf%7pZxs?sn!lWJ5?OgWbEzQ z>Duil8)0E?G~qkd;Y^=>@l^{Coj@P#Jx$0h2K?^hPdDFt?|aj?-OPyaN(Noty1J(T|-WFDPLi30^d?*8D z^fYjPoY(xFc2n3UYvN7Lo@7`PK0~~46JbV%qwOO2^88nuPd@*A36^jRKqXNTq{ z_1a_Np9g_a*jClQ`m4X0N$j~ZFXzqmuu15ZjBLMY3-?>4c|2b5CVs-neAC2SJFdlo zyjwFuaKN~o$26lDqpGdXjD(xK-JVZ~n^eW3BMwbEGeleNlO>R--zDZQ_!n1Q0TlOrEF=f-aCXd?cL5GLgp+Vku z#~1kOs}^cb9eZac*wErR$_QoR&Ok-B9d0e0J|{ajFU6jG5`d^bL7nC>v)Q2D9qQ%X`JTHlgnRo2O$;rRFE@ z`nMKpM{)We-RNN3<%^pf-_y&hyyg9lPmi_W$7>D`nY3D9`SRtT>cB>z;Wbnt#)1q^+F|IZB(W0U=qU^k$Gybd0zEN*X+uuVI1yo!!sH1FSQ`Zr~#hE+0g~j;z9KB z9R73y;~HZj8l#g1ft_|CQ*_dk^vC5ZElP#MTW`KK9;|oMFATo^KKuN$;cw?Cy!pnP z!~6Q&m*a~omoHA}5xP)l4l+P4EU4^n3y(T6*eQ49EIHNwJJYlk3+i*KMbI}g`FibhNbGR-*{R-AS4#ge0kv#BJoLwf=2^kmefU< z1c&k2ZGkU@8e>Qn@?%&17yQOh)U!3dAj1~T7;wY2Jm7)tT0f~ijG1D|Ge(Os%os-} z-pEL=&zb|t>p$$oI~x6G7fv`&{}x|7iQm7z@I~i+Jlq_J7UmZ`mUpGw$(KbCi+n;n zy^u%#krtz$RQ|&bZoHJC`+5!tp)~yWECWA}VRec~6#MY`z2Y_ByLh*-d7pOB?MvZY z9g}ee_m4mMxc+Q~$oTs5jST;V7t&VRsej+ZgD&v#@}6HtN#m`VE2j?_JIR)VI;W3s zk@=xRI0=m#FM7I?w8qliWuFZ#+L8}vLS#L$dKp0sd&Bm?B379hp##~0tm zlghdqU8Ua}e;sOUDeL_QN9z;s1)_zo%GQPsIDN>tA6-wKIz5K>v+X2wet_|v!#o}C zYSGC~KY1NCPFLfFlkF6uTR7Kb(+0xxN&mF*FkUiNTQSHxoc#2; ztv-ownitbaXk=F?9L*7D!7-lF*WNUqm@z=(r226SA;ymu*%c^UjDAJm#sF()#LK5& z+I;!N=QE}-ekkGa{<$-+*B4)(MQnQS*3HheiANZ>8C*GhIPiDT&3vYxm}=&-@!l+M zg`bchGoRCcXrtt58u#0@LVf%T8EPN28syOTs?k_OxF>1!T3Av&X^{2V+xTaHa(d$E z>2%=Q1z^$SQaiPF!cEO0Me=p9bnPs`TR1dq0}G!l%B$LKH;KG4pzrq z&5s$%^*x1~2)q|>HUMu<) zHl*-wNb<+~9YP0mHS>DEGk8FtX>?MulXnM%d!YMX#&B1D(3T=VZ0{L0q|u>xAQKf* z+K}$e2q1zC4m*YqDK%06!SsGL+7!C3b!qc{`%J=N{@@!K_I?aF`86#-(jzLPRRom3 zpYawP5R6tSj@~YG#nfAqR&1As9s7@+P>C8M|sp+8s#gm5B$+%Titm!^@eva#%K~oL(lR}oobV5OLSKM+G30& z8!ig48+`#L3>9^He&wK4Nh7k4e6DM#nv2R^nRe>7XcPwo zW2o(QpW)&8#>Xkso^M|TZ<1_bcVi~veSgzd@@-QO)TxuJ1!3sF1qRf3dk1uAJI!95 z(Nr;m7iis0;{O%osY@BucUmH#DaDl#GQo^~6z{YFW48&MSN)}urpzfH4Jk`yZh2vs z&=`Ze47F341{P~Nj1WTm3~rNiieBMe6fX~BiOx3pQ+AWTNi_K>;BcB!{Z|giOHPIc zJ$nA}?)km6X`bjs$&>e z&%ix1n2J`g?F|%s4K6FpO}H!8!!DkO{-~Lz40W4DmGa0oazMBi%z-g)$QapBuig%w zyBI}P0o~81GNx{BH0st9dPh#RTMtV^uKJZ8MESdJ*db%T%ISWau7>IkUOg3?S`z8z zSzq%U2x;OQH0%%WjK1m8GoRsnS*HW5y!E*6Rkxk*=V5`S@uq~;CU~$<$*1K8jo{5f zi#nvAE3Cg&M>=}lmkHj}UR_7`B?rClmzshbn#cnF(0S59c(w27oB>5A!Ua=!JrtWV zCM_5{dS9|BW69KN$6%5%bSd4!w`UC+v;`x$g&4?$f=B(s1KZCa*_{XTvhsU|Tq{v^ z@p^DvxU1i^ndg-|ygb>53%c5Z+w#)_z3tBfjE^i1PTO`pxKBfu4U-{s@T>usOjDH2BPnFsO7((k$K`*H*hv# z)>aONq>dOI#h$iZRgD;>c9#ZqP4cKO6VqK0?!b#pKpHe&x^cBh*nvEv_Dom^aTMWf za?BvgsLP{A%x&#sFn{OSz0J#ctX#{mY9gq5j76uLSTLFyG#Xf*DIA8$9;1|Trqk(P zf8|6TyVsh0Wk8685AznkbS+PeV563asHVBwWRkbrw~ZUr&m`{QOyo1hH|Tx!<nz>J0owI&tCrECy7hmWNLq&I1~nK5m5v?3!6ng`2o zGR;dvSQ_C~E?xbif$W~mTd%(|6DMhH@%{Mo3qw=wbt7YvL%Z-NLG;-(uPA;D z8QM?`=;uBLpN zl>w;u-Bt9-qn9$=J}6fAr96_3%_Iij9<2;p#Z9(s!RKN6Qac}B&G36A@2p)-)X#Kn zL4%RUw$GPN*Uou*((OFRu3Y+}iCqS(@cQhF3!Cq~``#FZ7ztm=_{1PDC z6<@qv49iF+R)1b^5%kx;{e6@74$Es2b0Z_KcI7Qbj)kG&6~#D9ZiV16@isvxhv-4D z$QSvyb(Ys<;y8K%k_^EgpZ_ZUH{ngzd&kIYCx%4{6Dy%$B2fIC7g%yERKT5HXCdtd&$3@n{YFXd5JpYb^Tm1%ko8l66;G8pZ~+faJCsd_uKR_~8?7N+T? z&x_}OXy0p_Bel743E&o_?lh6S*n;GN+V^OliLC)-)XZx=-S#M)K0JSM^M_AAkAIGA z{xAQ>f3^AX55K?p@BiX&HR^Y%pc$=(-@@0uvB4O<`u5@ZyF26OQ(LOb zw+sdh)J^oO_puIUbI9I<>N#<|X)I%)@R`5pHj4|@TYgV>Jny_W{GrDf#l-1mM45SV zGN4VhEkooiHf01u?=kL-K^|@5-QX}fx<1lwfUDQO+5DUT>2IU^3a$0tciv8C9Btx% zzr~yW!qvgcj1upE_W5|7AInhwn?Do|C2!e3{r@)4W|dmj(U$bVC{oDc*o*Z1mKnW>G{vb@TcxV z1TZ|%1rET~^`GnTU;L?Ur=N;n$spsN7`-Rb-8YjtyRF8fwZ?q0g_4KS)c8WIX}f9| zO~^9YrzzB%7dfLNnYmpEFS_e+viEg|L24WIv$%L9P>MJ7 zT}SeYobCA4o#CDX1bro~KMOT<1|DUwp2de2ChRm=Oc`0qW3hF`#KljY>5zeELKQ~C z8T%)n$2&9SOgp9Ma{b7OcBBaHVG(f_^GmBAup5gh7T%0+{U-OzDE=h=GJe@#-9jvb znS-+PPOdEuw1CPumL^FKG9p!`{)w(Mp1EJx0*i%T=HXM_ZJ_{rHGhN8lA2nWMs(@q@WBMxOe zefgC$(cVRRy$z-#t#hOlqapT{m@WDqPZ_a;qe)HXT z-yU7g=rkAxf+Ryn5@iBgxxh)QgUYtVk7u!-=c=MUze+`fai*Y|P|OPJUGTOY`N$ z3&WAdD;Ca${J4C%-COA(yFZqLM`Nb=zfu>SyOH*Bva%~@7V;n0HikLgPLo&iP`5K- zg|;IP^u*}*uI*+(>w25vP5nm|^E>r?(76b7DC75o2X}%~Tl!gD*U)Lk7?r60FBS$x zd(z9w5K7NDm%&`WXp!$gZDd}7e)_@D(ZT$tP%(Ei?9ZZed?ZBDAObRe>PP?JnKl$WAG9jT<1`KuRuC-m|_328? zY2}ADzQJQmgX&NM?sGf!ay@V6;Zho&VCa=exdUd(=wFYO4`0`% zbEznK;O03Tx_>tD%C{{|@V&!Vl5O!Eib~rLMLZ>!ZoDFsrlEtU(oEh-QJJNcZV(KC z`R*^^){izA%sM?-P9i<(-J?Z(n zoon@h&nO7^dO!Tq^|sBQwc0;pn+K| z3kj1)x~og2P8+#dG#Olb&agLz?)Z4hfx{Ss0V}=VEsq1nz&M{lx7HztcyZgY4(G|k z_&fYooz-=STUyAgZa>4n-oeoClr=BAUbLFL2TO?4gNfNyvy zf9}Cn?!c6$0c3e3hr7a`<1+Xxx_L7ngR`OaqRA|dZ1Jz#hI)WNQ$K0CLYK-}`nblS9 zU-TJzXlro$opK}qujli>f6sZn>%qZypKu#G;42NGz8cU+yKLbO9eVD5WNpgb(z~RK zPQGQTO(tD%rY^APaX&*BaKdkRXG`yqY5fK|OmeuOJL>e=^8F6oJloMrtKOc}O5tUK zGeGcHAg4OOs$1_S-*dwBygbiwfmiPa#3G>tz29bV9r(p(km%+)%3k!W&p?a@tG-KC zy@Csa*B4)&-~7!lf44c*;NQfVVcVAIKRJ7H^M@<9Hy3WKLme4){@D-S-u&19=4TC9 z*T9E2m%*8~Ho3c6jPSs2b{%e#5k5R_A7*6Rol*Ee6Rf83O`Ho4k)cODOakv_AmE*Z zPsHJV_0=;y&uEvy_&dd!{=zo-N5$BU2~2jzh*ueps_Xu|M~>y`&ijl(?@$v^19-UH zzn@XAK_3tFOP8+9Wb#B4fpdA7^DMo3>;4#}E@wnxC_0=$h1dD{OBol+cOnDFl7=Qg z86vaDH#N?4xHR`OsPh75jK7vg_oZv+$5_nMX5yPSP=C|_@T1SaE~Lvxo5LB)7_5aT z(1r}E-z(I{r=MLYR_No+?>n6ITAts^6=s7m;f=Fr^3FN6IsetgG1^`#J~uJq*n8_v2*4B!R(+;Z}Uh_rCk~7+*~! zPrX!3)No^rofzb$L4$w$vtJZb{jJ1$ zd`4-L6>W9$_~9{<|L)hn&KvUX=Ewi;KWJikd%RmNTq?{?1_1RQT@lX+P4nvOXEK^! z-JJjMldWv+o(asERxHGIynOj;;a1w_Up*Q6j=t!?q`coUmYd`ph+ebRbS8(zwvT5S zf=#&9NywW0O@hUW<$a%E&Lrnp?IKRP$s753k`aiESP0qMWLk`Qp;Z_^c~+wJ$vn@l zUcE5}7_v68BI)efgu#em_T-)nanZ(Pi9Aie7`-fN_>7UI2`QZ;+{FD1dM67dWO67T z`{>n_!M)0U6m8XS@Cb(Yj1&&9Wq71BF6B*1F$wp>NK(;57kspH&VaHfq(S^@GA5Sv zH+M61M86+?@4a}gE%aYs3%4OpEgWYDpP?y`n_qwYaXhlw{G%WLaPzl6`*|T`(o5;@ zF&YK$R=mcL^qsffo_c=q)mJn52P1R_S2UdT)k%l~hKlhHOy@Bi!iyX-WH8)81ufvt zL*4`R!_)ZhQS=et7#}|lrv3!J2QqTN)u_+>0bTF3Sn}bgU#C;gZH_0SGpWpp;p0!w zm&z9U7I?yk{)fx`Y*x_ZKgoQBSB0 zhT4e-YNA>wi*Gty%!Kw~25`m>a?48)P7II6nnJ!?_j5ndRz%>Sn=rw#C3}K;Oh~72h=szBjx8j|d@b{iRMnmuzU{{EWN~sQG z;0zY|_Ji(d0?)*5o{vGjIX`_KnBnE|z6~!loc^qTjG4Mh_)e_T>h<|omnQ~1FE%?+ z{_3Z{>;UA~Mt6UevEss|i))c0Sz-`bv9^UEI=cA>KmL>2N#9Vs3ZI+cGO&sCcL7R5ql@Ig;6V=T`lKce|>nxSi+ebcJZ7(U1*QwhNtx2 z#$;hIyx;)L>o?tTs9@x-j)ku|*^Z23 z$rD~RLcd-elt~_(V{#*-?Cln7=+4_2Ga1-Vm5)xkdUcI$9pFjcd5TZ0>+-6l@tn51 zl|uuaVhr=R{v0jnT=Hh89o_iu_r5o>c;lIv>Z;M$B^f$hJAd`nmlKCw-@yyqn20{( zSYwBYv8P^?m$U^g=UiAM!VBt+fAQ&|LN8sta%uGCk#;Ixi~k&CDU^+I)r*bkd8n_x zi~c9G$$jlcPf7zhXPj`n*=7FPE~TAtGk5Mfi>5YK`$(-0V4ykOi>?mjw!o;Jgp+ax z2YvaZet&;FWBJjo#7Kwzl!x(F!Y;T9HpG_|v zn}zK-CS!7A{tWCEarR}X7u#DqS^#h+g!bSBlGJbZmu3!B4<=qyuKCX_V5M8>QVWvC z!^(RUeatbgw<~D8z=zcuPj2^}d@atLOd&sYRBYWv{jvZfr`RRmh zvdb!U-^bf&ChXL$4jzU3h1@p5Tup*`*gvDw|4o{K;C;;g+#p(-EqC{|6agk#a90Ti zq*`_Ikc0qBvaaTCCHLG@WzSXDl{KF+Sk2?!1kd|{wNt#-S#3&9!Q+KypoZ$hk@_g4Gi$5hnn}wC{!DR;z z-=&@U!D%I|P2f|W?$>u0wUz4uwo`^$d4@gU)$<8CF^F`Jg{GQtmURJK^3j7b&U5hk zef|mP;6Kj-(EH&JiC2axb0O^Y)S!3U1bwaa3qQ(aaGS-N=%Hd$pAv3qF*uK({4VFpjyIzJU8 zTM{gzb$nIlH05;?_aGOZ^?+T*2l%7TV}};X92i?3fz!aAZ@eJEU?}1CPK7-VBX#U& z=mANe1z>Pm@2ewAyXWPZ5Htf*y-PNARnPL0i_$4^#r`f!3VzB$%zkmfv?0DjPZ+2$ z{ZJ7LXNYtayfXK^bW=8?Kslt9WK#FOXX0@SJ2kS|z4ue2?Zv{gXOlKWWKkvmnqUnc zAvpREu4CPY!xS`d2m5fHj=gqWg-GBT%SgSvovi z`UP|T!2vG}?8<;DFxnKoClwsMC~1GaSTN%mUf^WNA=d~v>lc7aw}m}7!y^Eo2c7Nx z6tu1zC=Y0O57X)or0OZUmCs>SzyA5pHviRs^Z!pLG?{G@{%s4QPvhyM8J=%6c|X=5 z=0g6ICjBqANcK;@|K{dzij{k;2@=n{!%fn7DZ|Z#l3~My!9>AU?z;^t83OmWrOqIX zcdNmWtao$vn#qXDG4a^D`_Sg}+1G-}VAL*yj37rU`%$rlKl%KN%`bob+wqh& z$T#_6$Y4Z2ns+`U&U@c^XY=>Jdw*zUgn>t95|sf%xEBYsov7{3fBEIq_e7p5`|*8~ zp9`0-k6}!}SH`hjO)SL4fAh_An-eWUeDe9_;5FF|rU{8y?~Ftan7dp!29t@iufEcR zHZQ9t9q&b(JJFCKkqj{^-pb4CRuf%(iw>9aASY_#zniEqEQqbn7=*t}PQLE2z(4=t zd$CJ!k{6T4nHa~ryx$pI9OyI?^?2H3h(0C5Yx1tm89()N4%}*@*N1_9#jj75^gf2H z{QCOZ+#(j)V657iBvM-52RQ>#JRT1)Z288gw%v-!JQJk!!Nf%m?ycINwmIJ9P#ohY z-xm5P*$>qw#A~}Po&+1ORCJ*_j*Uz*EL&Kyd*qwsYeH{SH%1Wpn9kr`#xw1k!raUu zuT9XF!Mz%#-b_kOx_$0+7#H2iV8uXzCUg**k}n=tFw+kBW*5Uo?MMFrF^2Kd)rnOj zdGrw(o62Wv|9EbNzi0Y+VJFb-K*pwf85zLcyMGy#={UwY`YS-eV|+>O^88IjjyyBs z2`3=*#M1j?yKItuZ}aotzuyAmH=BR&AO3jrfB(PzpBlz%UpQF^Y_Y2aJ0WP$b__qs zIiur~jP@2v@Fbe^b{qp?c+Po9)ejHN7Vhf8_#hqQ{ur)Luz>f@0FINTP+{r&YkS<~Tvalm`4G(>K`L%T4SLxDg z$uu1&o<4)}J8$G=oAKz%)fVWIBl0i?*>JOiX*VZi zOPx7!xWzRFXuLCrT~-H&t9q{8EoL}a5v^v|MK!0F@1{#F7+9R6`}WiaT)uU6jEHm% z8tDrx?)~m}f5?OP%+&MaPtH%+qL&M|12)}&pN}0rk?|)_%4Cs|3B7s43y;7^Zh=K5 z(8DR`l>VQRN3-}C7#bl(F+XRcavoXEylAw^(FLy-EVN=1^Vo13-YhMX{g+j z`rByb0AjS!4niE!17uDD<5XdD#Bet5n{hf3tj{0Eo$7nHg))3|JOdYe8BxiDYkKis zv6Mgm{L|X+!RBB6XaCvejl8(U)1Occ)fdglNI>fvE%XTYWC1RnA1(K1%zq(%y&H~c zgUx|sr#AbJoCy2+nD9C%T!enMKJ9ut|Ljx{eoW}4fAnWRTH}EhvcJ68xG3GgxNC>O zPk#Q3&7b}7PsZ!?M7)Dv$R6B3umAps|M;J7-pJtg!3Q61KKl6M&7c0@`-+uG$%`30Gw)yhQuhMUMXUE%UZ&!jHLwJ`=Fc{CmX?5eVtIwoY zE&R@~xOCccXi^`5VTFxh(ABOyoJX&=7-ftyvRg6>6b_|4lS7BN`CndOKyFM{dGwcs zRU-vwqBtD$$sRkv^cl5b3ken%=orKN^A|pEOx415i~A>f?n937pz~lXzMX9$^cR2s zix#F%<`iPF{c`RNsXmvR~8XJI9n4_YKQCSr#W^5*4N zUfo=Kd?gv|z|q=HKggqc#xIS5E!0cTknO$DaLQVahR-@c`qjxxMLc@w~fQtvhf1tUFKXSUg0QyqR550od108S zny>!3n?|3<{d+#6mQpjuDA7um{!(W;!(-p}Z6LEQqa>J5%0?E~hN)P?8kc4{zr*LMHOIrzlRWvU@C zi@%+^D6wnadeYcR(|p;}7$fS;z!vo0Wf&HtQC>zt9_tjMRg%G!uuYx&juA$Jl+$3H zheaU6wS4NA67lUbqyca6@=&>x_>b)pE<1kbTmDn@oSB{6$sKAN1o*}(I7n8+}iX1e>oA2q2N+Fenq`__SRR$dDj29k*Y%o*k>LLH&)4wLSN}5`0 z1NGb1ZSum+$Zz2GnyjwWJyWvxef2T9+fs4Tc>$I@lW6L`@~=YFwFCpl^$u@VLci!@ zz`+w<-QY9w*7F^{)`s1sTcqz__x__BY^Dy;Z|FW1qi1@a>=m8HFkHBY=E@bBY+;=q#AY~bO?;g$P5aekaxb<=eFW&=; zTxo{^uzimX(Rk4RU#R-e=gW^Qzw^5&6w0Bz0jhw4?{1){&2;z7kTXNZ&T_OOW2sn% zBJ^PYH9gcLJy^xABx+5>lDnd9u1Jn;51?Vnp$g@F6;MX+*U9@sb8prC-LFY=^5n^r znV&p)a$Ae&mQDl=@1g9zxMS5WJEsNC|m*C_tM0rTRE7$Ys!K5?kAzIe)oLpl7 zg1RQFv(rRQ+oaq9}G#Zq38?PXf?ud<>iKkCQYwy$py$f77I3F6MDzSpEEM+$7 zu7}=tFTMnaMW|V~Q)c0nZ)Mnv9w4INJI{uGLZmJWJMOKb#dH5|1<*XmFZj`et3FMl z4D64eII?+M-*_Y!Biw21eW}4?>3)2AxuGeI0H%y>a=4rhG=tT%w?{~1W}>G=-iYk>AfmfFA!G_e}~=-kCXfc={{ zPrtjloK^LWfNQ`YaBnOWDj%R5u+8r*%icq6C;j%DSvo}TiK77)`L#x$$>>{W&tw6f zuS6Dem60ol6P-N&$3OmH^EbcxbgVlc<${LJ0G61ze417Jqb5Bjhoe^-{PFs|#v~2s z;~9S0P7B;&ZRMSb{fXRyu&i;Y5um_C_HZC4*DZ9#ovcp}WN{9>n(ev2{cLkN%VQwC zN%f&7malaH9dH|H=|D=pq+burwYBEDhgUt~3JQUhRO*@CyrcXi-$V4{KlwHvnw zpa7Ty`qhnX$anHTeJ*R(*Rx`;R>#_ACUfeq$(Bj2ZNRpk-wPxlSF9y1Vm#Xi0Bekr z41Cp??&VkW8Q%c;t1JtjHhBRku<|w;GjV^l3Ee9vPtG9zMu!vfCvCE8>#qYS`QQZ1 z9Lf^jqQ>)fBBpOofAOnL_S>7A0rnQ09t1+Er?m}x zPrh|q^2xXR)vGMUUv8q#(ldGkuK-r~1ME3_de(P-rc+sU??w( zUI3mKi|=~cLn-5>$s3vA8{EQ_19Oj7j{r&lIqTK5V7pf)?QRw_y1w+~&CM}81rF_Q^4M1V;o0(! z{^S{#2sbx>^|N2rMt5!g?2mr9`QtzS$>wzRfAi|K`T}i^<(Pws*TTt*jo*M_<|!ug z=m5N`)B6E`FYU3FKTEun^^3{ogsm*L*wbTQ<-(|_0iyw{#iy6)K7hyzjq5A|&e*JH z>QmXL2P~3*`2R&d<=^}EyXDCWuSt9QbX(m6-6+$H~f$y)ez4ZOZ^{`}O>haY}YlWyyLAeG%V#y8`8Kbb#s z_UtT3oj!G@^p=YfVXDCLSs>7z^du`oKAP!y{pEnCy?^8Nvt#iMj7FE;GyAHyLjj_r zI{_(uPq&C@ym6$h`~3`%4JhPx>UIY%!Uo8ZMO2Fe%BtM>uC5$FsL!Xv?#5efQ~LTU zZLpwIuK=*MQz<{X^_S#x3KDH(p0zl>dA_@`hlM-f_Obd3dFtz>Tk!F-PR=aCX!CZf zuk>rL=CXy6I6vM+N;lAr0abhHk7z3%9`4E7lRE~D$D#x`0^W}g-$K0bVmzyS1E5zf zpuAlaEUCFadK@_R^@9#*u1z02Q6HXOqldWI(8lN`yd|r2IXNWTaddDD{qu6u^Sr)d z?Gkxjc>vHXj9F#Bdi>PpP-C~}f$0Cmw{sDsZ>W!^AF<=1JQcSx=+n&~EC3i!A3Acf zDWO9#TR6Fy%d=<#cmg(!!^z{WfX=7Mh5BU$dn%Cn>J^74-<`8G-hS)dsiz~!(bFOw zJoK<}1`Fu>jpHs2V0J94=+hZ29H}4G$9;LNvBn$kZr*wSd+DY-n~PbW|KU%5vib9W z@DFD32`=4(9{@?qv&G&!r2h}^5EB7{^e)8$)3=6^j#l|Uw;j3<+-H-qA=8d<$J&VTr!vTd$mLGR+Y!2@`+!!Q~ zFaYsk0P4;7#^nj)2`*~(%GGF?<~yH*S;r@WntZ7LRHdkpc9*qKlS%jbS(fK$oiB~Xyq9fG-Rl2<=)eRu4mc}IXhTg zICvb`d%eXZ?b1Skaj>?cFV)_zrl&@(yVmZW+0jy(3}W=H`n%f>E^o}sKmN;Exj%jS zMS5+|=HrW>Z(e=l&8d5R{Oxud+-h+pF&bBV=rSIsU#87Q_?`@Q_STjjCRb%M^U?*1 zD~l%{>NB)dm-S1H8~yssu2kXrPW*Z9-O?W{w~rorrF;FE-J6Z&(}y!oNOu^cKWkUQ z^LA#jCVwft`xSsZ>u<8L@Dhu#!sYTL{#$tSTvl_oLyF}Q-R$+Wir*zS{_>!bN=$D^y)F3-Y;@rp5*T@Q340308<9hyZ~ z*7))BKzT4CyVH2yV{1KA^E z&cm9DQaLJ6r7e(JWk#TS?cY>xfp;o>Cht?*+vt)RL_>nYr{F8hgqswKyG<*dEbuq? z(i}nS{ai0(WWwOkdMENk6m_LYq09yV8WWI#flIlp9Jb*KHOfk16CiZ1lr-nwdEOKG z3>YFzwJ_yZAziudkGd($8muXRbl3YOj0>co5Spe|X*&O!POLD{Q$7Q%0II-^=P3qh z4%@4YL}6Gs0$+EkApN^^+oANHhKmJAjsQ$t{JlQy1dTySb3D_$!{o#CX9Xh)+o_ml>r<%ZgK7 z0DR=(-(tACm4Ds&qBnj|S|u&!e7cx=!&uL!vR2&;0Iyzm0?+TtWm4GQulI7UE`^Vl zMH`y@EB{E5kaxnbB>O9DA&OW4h4W+JSFw6m%O5fY@e+L=ucDRK&P{o&w%beZr9*(E zzr7~c2$?w5$y!+I<)kg2-Wz3B22mz1@QVNaA*gyBI=w6m>jeMwRuyWp@z+HHg0!Jm znx(zEqZK!G);n?W#^6%30c8YbDgLvXIYy z2ANZ*>GAd6LmzS0X94HyS&`?Bcj)3bdE}<}`s8`LO=`mUQ;~cIpk9~mCV?rhikW)s zG2RV3R92CZ)4g<`RTeprfjM^~0+tReO#PH-laBm_^UGybce8-geg7v9ectdPSy+j{yHq`eQzbk;VHv-V?&aj###hWBJi z_R7fy@6l#Z{h-0`-nO^`VOa8=I@aNgzx^bT;?{s6KqqcZScYlOEX|zC&bQ;Szzvhc z=g&@V&IP=mdG$owMh`W)x-%1NRuXsfWe$*MS+}P+AANpdtczcJ<;3_Emp-eseGN{( zDu1BM@>TvU@coUMR9?;+hu_b4UOP2F-i^`+VC)YZIC!9~-|+;W)g#cAJB0gzxU4`A zAIusz`Iy7T3dg7OD~DNbE#I_%`|&3YpqofE9yoONwat%yl<)Gt{Z%xuN`1JwdXq)` z7c*$xQyJcU>+I&z)f-v90VtMh0}~?Fq{rjOwH5_fyxujyFE3lniOS0X##i&%`$F$n zR|8p0vH_*6l7LVqS9kMyJ`LozSqRtxJZE6vgbZNt<@Kxi zAHO-klYWxbqIxsX-jz&9Q+V5T$zp7og$Mkbg_U@CxWy7XoK7{-<9GPWfZa!dXS)LA zz7Bje=r*BylGW6)4xDAFg|-V9^V9lh=`34-Uk(J;rUo>uegRxu+{l#dd>QCXTRsf9 z*N&viebAbitnJk%mdA6YvY)$v_%gs{&yH4pZHPOMF9JPHc3usfW1;_`c7gAo1%mMj z`Oe#~w_wGZ$ANb*Zr*z1%(yzab=NKelZz%0@gH!jjj4-+Z9%u|;Z}TNp{4x-sDVz} zq?RTeFsc%dZmd~V3?t{R3dH@1fcLUMW*z2WTDt;(eP2cIq0S}%_K?f_En?{t4k%Y) z02l|N?S7yl5SNUsae>?eWm?F=FIfTN?F=!t(vD560n=Uryho3e_8ew4eMfyl|8A1E zLhUJ?y^UFbw9`M84uFSh13t2l2Dp;hJ#B|4D?a0zCm!%YJll!&r=?pB4v+=R#xpud zJrG`G}L3DEcw74E$D-+%90OZki(G3u{ zNVqRoGxRZC^Q&~315^Jm|M2gRpX~iWF8xB^MkI?@6_U<*7#Mi8ML)X5ZUJ5A%vqvO z`zCi>a_rt+U1dS7q=(X51Bl^85$y2bXB3FL4(VF*+Uq4B@ro5VHyL^l~!Cl<<wGZ^G5@X?ruKItrC}AV=-5G zW^vbUqYjw5`<%2A3kLDiX#PL(0}E@Kt2E!egfyTCu#62&Sx{=9?w^MP4Ab_)jLo0jyy*7YvcAv`CwUv&@CVKq;DzLwPBbDj z5PDKQ8}bj>SO#{z(8;kpx|Z7{a?gs_kn6={*e;F10Sbd|mfzyAgy#Z-Zhn*n>eK2w zq;kr)zXMJ6>E!>#FbcYb#-nG`o!Wf|Khh>EOFk4GUC_q7A^p4 zU+0$YOowAC--o~L?2&k9_ZEQE0)gF1z%y;_tKurx^U=c{N)FJX!vlTE6+?##HVZ1# zXVA;hXs6Hf>hjdv?{0P{D;6ra-WUM3c5wWS&hKb?B|lRy2F@%?_`{DtUj7f1c{U;KxkZod1$_ckAV z=l#u<3l}$k`^#T$e)*e^qQ`Jyy9iRb_|Vrbrd;ZI>O&7MTs-In`QXln#W`@6>m#zp zeTPn2dn9hNFkAiDL7tQH%NGhA%zI zrILkla%sVnQGp$U&l*GVWFXxyIt=`H#wFUojSh^}nhrN^ymH}OfOxJ{(mQ+G?P$)! z5B*qe>vNN5@=u=_vj9m|-0Ih>r>bM>Vq|vd2IEEbjefFO^B%pc9*l8T#jo}{;V?(- z6NujLO&^^&@p|o|9eZErx4d|w0gUGp@;1?)OSJ5qub#7e0((V9fJp{@R-HqE09d8_bH`%Oztu3jaC>~2}x>CB@{DX`i z$Qd0B6bFdU4xnU;Mf#Df@t>tT_BZ~xABg|3KEW6h-@d9mx2lUHEiN5SH{6czU)SEX zf1k(gNRba2F~2Z2nDS`<{lic7km{hVa$&xAq=g@PRbL?;I}N_|?cW=D z;UC^SM_cD|ah<==T&nnYS{%^F?Uz<^YV3Bn9UBj#2}&WWekk$#2eSD906+jqL_t(C zSEpO1Yov%n&V(wAySYK3hkfU5f;j_4ao7KySR*vjo|HsSkXAAhXkb^mQ|RJKL#2}+ z!nX5SBH&?9gz+=0^n_T3(pc$9bBAt#&BPsnR7US`x$AjW&O7nZChx)wKu7}(A&Z#d z0|t#Xrgwz7^z}^QPU-+6eH(u0V#M_%0DHofJ|0!Oi^g)R)0=9T7FNfucAU&7d$X*wP zU7)yRRvisV+wc=lIkssmUE9lfFRd=nZh&Q0HngIq>mA)8?BuzPx7C$vv9_?Kv2=p= zfZzT2Vyqi(v!;ILVXr*vD;o*RUq9@bhutgV^r;H7V)R3jC*T^KhMILl}I>R6EfR1ORr3YMaAAg!Z%-Q8>It3 z0~oo!XcUCBnXJfv((RXpg~6mjuu0@y2kkVmyOqW8%K`O&{|~=Et~5Rl-2N!P)yI$U z3!PO)1G+DBrNI}U@*1GcL1QLofxxVPSp2fU`Lc<}icVhy4E<^d4?2;*};)tho&iUoQTy|NY+@r3CC&Z}n9G76aM0&o(&m5f@?P6wablZ3e~EZ*62sjGz=4aB z=QrCu&@UPRUv?7!#_tB!kTWh%?l5Fbs**VQcX61T)qbbA%LVW=3WKJ zWg_(McfVU52hOwbiFR!sO=!4Lz2FJUqZiXNtd-GX0%p=gKGg$2Mf(Jdl3%)hPZr~+ z0;FF#bt>>E-@5S)n6*2ZX{OS4@FpNxD!-6!)(T95_wCDyr}${J&}Zj>Nx{{?Ef%X2 zuEh{mP^Vu#IUox6G?%Jx^~iGCLIWP`YoY>}AhWx>zD@8;z~qFR9AF2YIM5Z)YMb-f z*H3SrUuqJPo+s}t%7F^Tb-xWHwFBT#eyc4k=^Jcc$Gcnk_csZV1uz$|280xDPZL}V zQ1m>DdVbU`N{|%`ivOu~;v`Vs>W1Zetcb=e+mposc$e`b3(K@>*l5K6K>Cz}>9fsfPt@RknC# zf;>BB;=9F$^SK}358i?UtHkq{&b8y?+5jDY`bR(7{LxRopEX|=djU3N$rf-R4drM>G_Q_KdcjJTwvIFiOx3&6Ox?bGfTi?%Xhk^;Y zv0LfK_s7XQy*~hB_dbAIb1ZyhIs8(9H69IEUp*NI;kU6k7ar{Id|@Sbz*_5!<}ox$8)KIEkEzzWZ<}G-}~Ub%@6wmhs0Mm776(yw+M7Jw_{hX zKH2==|LH&L42!!B@pf&#|G@{Nr%a@PM|PAPPOp-EAM)g5hYhepAAKxv8em|2^BBNo zVK9E#$)l~%E$Z&8ud)Oz9d2`!gP(37)vHG<#PWDOAJp>w^4gbyIO{++`UW6KFLGyt&&HfPF0ja^gGAL0`J!Jf za`CZ?exUZDKpFKS-wHo%U;O}kj1e9uBkG;Jj*M3RhoxmPM7m_`g%|AB0HD>s*LUk5 zjKhHP+^+4MokAC4_nvgZ;o_m6&N!HEeh`?nFQ5{iwTBr)rR(UyUFoH@tA>n~f+OIUn#Z&W&@mam)iSmo3A2|{2TpXbN_~{en*J8wB zuEXpc;J=)_TX18cZg)<)wsd^PQnb9;ep*|zXw030=PX!(;G=WP>{bWnKCR4r=mWUk zYysr!czr8?Z&!<;{FvBfU1Q|*rU^2!(O;yAH+?d%&JZLQneKY;nq%4cNW>dV&-IrqzO^TvD*kDY##DCZ z;ZH=*_`t3Xx|@WNpNGlHvE1r>ol8JGw}|;tAn)G5Q2mg@YJsj(dGUpOa@At)tG%sp zS%;iAzU2<+PWlDU?lG>YoZ1PuE8LWzgT=9(H1{b=67zNSo3L03(=w{_-P*^JRh@cvxYnB!|($4{Oi8jT+^vaxs;Pad`ixnqEE z{^I#~yB0iIK0j?dV22sr(c}G;qR{}2uv|o1__$Y@^?P4emvpwzv zy!p=P!vu5W&-lPD42!cdCv(mb=TvHH=?Pyfxo%=Or@+9D&J4&-gD`RD(Kf7}Af z_06ZBe%$!Karep7xmo%UNba*(EYk(xl8kvVf2ITUa(lPP>qgl}VY?_og__Ck#7%8! z9NTzAWL4vO<`rR)kL$VUeqBlc8~T*)qRAT^?<3Am*y${Um%0>n66?C3NqoicvW6<= z)LiKjgcY)Ylcryh=2plam(vgkgz0Y5=tcQDG7AW?(oYH2aSzB4TXo{%)-`n9yE=o( z+utc#9b>{XjZ69jUG@p-@Cq3<$|-Ws0k! zm3_(}Q2&(y8H*$9*~HTjU2N0FCqx0A(&Q6&=b6^Jlc#iwv|e>dh6w%8I8X5?x}__e z{7VnVJXs%{=q+;Q!kf~m^PiL_?jk|>71{t6e6)b{Q`6S&ce*9h>1B5h1;gt z)kJf@>faM-2-H>Sj%D~Hrrau3zRIxjVkL^@245>qzuW5_85DHATyf@o!SG__YO<_@ zjCYHmLQA)wNebWQS{_|T$MB}R)rPto09m3d4Wtge-4dPU4?4c~ez6VRD?A#ejmuw~ z^rPPABOz(IU+~mZ!|pRfogh-s)H<)1Akq zg;y-K*hfx}fhTJKI?}tta|06Tv4AyZQI7tPT#h2wPFESJ;gMksL8#xU1M;(I7`{xn z-YHP&Emm~zIp$6SUiYhRW$4~VnJ1TtH~gQpy;B$KJMAL6rq-wY__u1=;T3eyq$RzP zeG6@q!jO#{bkme=(obH!uQOS+6i+%c$f%{p?;Q%IveH(z;uUeRlgt-KScx^EpRTJi z4*0zDr8gWR&)4q7*BaI127P}#H9U%#bOg;lWrCyQir!j$(aW6!&g)KtS<3A1ux zeXg#tZD=q3HHMgE2BhFdLi&I(t_@zvLe!RfU>Kjj*R#Yuk)P>10egTl;PcG}zb3r$ zS&J!vpdjC5)P&T4pIZPE)F)Xst-2^|Y&Fm$lMz0k8BoJ^Q`?UDumldG75H%HE)XbS zv43y8erEHV&n^X)G@&b>J;@>e+^PL@N&_(YvtkA^|K_uE9SYguV$tgG%U5zG z!F>e_K!@SkVtph_Ujx7KCEMg0u=!=y;y_GRB{PYP-WQuRm^kiDuL0g)33UF7)z+!} z4aNtc#-qM`+aAq2{7sYL^8M(ypS7TnfA@e-s)OqgKrnY9*PGOS{OP4!o4mStFM3V9 z4K^2;@_fK#hg#Z}d?q(5ED7%ghF#3}H8&)0zL9VC(#Ox?X*{`^Wjt9t_(H&Wmi{Jl z4=VH3T!8>T_ce$$m}g;yj>ApFUf7ogf1bO576;B>zBFL*fYtGJ$8`?A0!;xw4@zGe z1c#L#i=bWFb9HL)Z-CEr$0xu2s7db02Ka3ajs|WF@buk$jS6?4rA(9TCru3Y?c*i~ zxF3KSxC0CWq5)tYH1Uw;p2k7ne($Z#d*6C5)%(Kc{C{jKZx*5I$d>Y#_Gd{J_{2h) z#h`8Xdy{Llo0yYxZJp~Dy92Ig&90pMwwo-n;07M^v3{#bhY27b_aLr*=IATmXZ|dM{oBETm%KU4+KJ^$*_f}s)e>v{GREIO%el{ zOM_67mp5t~7cN}bJWs9x{o~uWFvpIvbjxRd?Oxl%3!ngCZ~B(xb^Xd`=~s-N>-a!F z0JgaRu=oXp2V#sRU+s6-uC?8l6{*RxNpo4uViVaLkTEI&>jUbRCQDNNChNU@9R>v? zV)e-FgR%e+l=&&E>gtfs)&Z5H%fworVf!zDknetK)#4VNhL?p%CIbPY1JK8(EPBx( z&qKBCy*JZS9l!?oVQG8vMCS}dn|gMbE_srsg_Or%2f9BFoJxN90sZvcxoKB_{`daw z=70I=|8Hb;PjW^M^k82FwhT~{17mf@ifnjV`aZ*l+K3MeZ+Rk??o>Z`K;|t>py^24 z>A4$XIeN4O8L|rq0hr-aVm$GIt?p;Rqjcne2Ty9FPg~HER<|S5=vfo|u6q7l+pVlR z8+E@g5X?N}F<-v&Nbf&sLGj5;t4_#}x@PgLjVr0~ikTc82#j+eqXiLiaOunI>8K8L z&3g3cAN;VowQ~r4YoV2#*%ps#dP5udgCBe_eZ{q``!2VDbw9A^eC|^A1st*DbfE9Q z_}5oAXJ30|^9SGmz35nN5rFN0U^?>Z&4-)+?w|deIVhHVaJBQV|K0yqx@4#J-PZj> z@psV)>{_~&k5-d^06l(iQGy5L4=ABcuXY+B(mz(#79jyd7Gqux*uhJ>)%NzI$u$42 z{AzR0V&U+5I{j4r!dgJf4M!jq`TR2XP~?}6)|cA(fu~p7+RhaK3&4|EBXbF+?a%}C zFrbD2XkYpnE*+#tXPj^bMl>uJ1NsiT3If>DefmE9S0-RiS@y1-RDngx3p`ET9;V=zHh7ct$hw1o}A}Wvs6yNY6y8zCfF{C=B$c6E9!A9Efi5zA;N_+p_<3 z&(b5a7*@PT?YtVb^dYUe>D7P=e10_}_dIqSq`|B3zlT$LI zEh~#f(>lNO0Prn-#d95nb_4iwUI_orW04vEwE^)Xpu@~N{5jBy?{*7V<5DHi?8z6GQSrF^EtVVC@k6h@eEkbxSg~Jg2O2%D{0_y{b}c$t9I(S-bWr`T1-Pr% z@*$nRIe6fBXYJ%iy1F1|`eWA)h-M+r6`36d51JF)y6ubt=NPQ9h6AH-rjLv}ZgJO> zsF2t90{u@khP`?@_f=VyTkIxWT=b}~1G%c&lV5x?ZO4-~9+B;?Y!_hkj>yG;8Q)5D z^;U(gnbt?lPJy1|(M)uvmFpOcd1^M=Rqex$@0a@m`0w9q0iiamO#`*p_`z8)>#$I| zYjBj}TRfe@paE@2=$b6N+<4f|vKga=i43q`e$qwP{{6|wt8Ya^XMB{OeDD0cmo~38Z~6E`yI&t}-v8kHn;-t@@AQ1n=EL9oYV-YM|8$Ec zH2OySm`NoTN=_H;5+zMZ`Kbtym>3$s@@GwS zU&y)sqA4DX#kwI{&&yYtRyj-4g>QKvv6ubd(ml`R>lNPgpqmwc{Yqd!n)o=49Ie81 z=Z~T*+bV1EB_Z8)Iq6OYQWt5mPJoA>1;P#^$Uc+CT%u&H@~sFSZp(y+8iTyGsKTvy z5*BV-*EOKwPfBY(^7Gm+o|dP2ow_NV;ajiqy&+KH)&Ro$ZN&J{U3q&XDW3seF(IDJ zGxfNAJJs!#L;=vAZy)xg>u6CQcq*;Hu;JyTA?V6oJkd}*pGi;H@|t=rNBKx}vgr9- zd+56N=*BKD`Y%0~18DV&CnJM^NUT(fi7Q?_B_rtV{{sERpNQMUqDMoNPjM$3`60}Y zGR>uo`xbQ3LwiiHURK^Xz6l5LozUhArtbQOl8@p47)AnysOeD zYiZENXc}26oB{FN7oT(wNSQ2klyu3gfl3)gYH^-7@sn=@)JATThorpscs1U5ww1Y| z$@>XYfT1@Uh9BEo(ZyBHsnha82U?Y>ID#(_w8Br?JsEm>N6YtF2b=bO@$lQLrYKM<4(90pl8F63ArAPOlGZ~;&oc@n4 zB|Dx>c~|1nkr>|M3tE9K|Kz{=N1OlWfB(O3ZUpui438yRa`Yr%2k70d%8IdP^S2-V zdh`0JqnjfyXW`npFhD*BVjDQ|?Z|rdcHp7`^ocBe_cnQXscrA*K#uGU)U_#Ajwn*cO~i)R3j{hJ^D;5&g7H^(ZNWwJxc&bG~&Uu)%jBexo?A6bV0@VI|bKRW~a z)34+vBnvZu&Mejk7M?kMy!el2!s&p&M@>XeWP!nVHmmWU{` zJvTTkAX#`H44g4BUwz^dAKp6vyn!-bu{UqD-8sPFao~`N)A{qC zZ+`!WKdw#}Fm$c>pPxV9w&3Lk;eI^&+rR#+&7c11AOCJpUR~bEk2T%HodF==bgpvV z|IWKJX*AKf+@gmt2M>R5{GH>~?ffm@2#_8yyZW31xYALq-i`%Wu`K4_9bm#0$le+i zFdw*RLg^69>sgPob~j-M8a!obM$h$Lo5hQnY*v1Kltm--{p{DDj89^d3D!auW?pLn z#YFc?3sL)XlL2UA*>BRk&jLo}eCO@AC(YaZU&d=b96#z%z90SYgJf#&=JLa9n}hA5 zz)Zux#^lg5E}qE1?R0>00u0DGu>VNA zQm$RU6EMOj`m+4T$5C_JY(;SM0{<5W(Csox5;x^TunrP5RzEC(B$EO&P>3(4E@7T>>=! z%ei%7$qVRMeWOWnCAQm?EP*GHxG?Z3JipsIAok7+ua9=*^R6e<@W{4l`(#Rf8W zYNLQ?ytOc8yoq)^2Xq|C&C*}|{KMWYw_+AQFJHfv1^Ln0A*2t`#%^VuNm{u#Igs!C zW3@MB`t;o87BT+g94rlxwRmUoc7X}WaJqeW^~^<>9auK}b0x4lfB;Q+bS&#RRbSuX;D`Dw|nK%#q;gJIo-~dEOJYOycu&n3pm%;0omvz;GhLV!2j(m#91r@ zA1!P;7;hGFYey_t>2;twS)2vG!dXZdpfJ5^G3H>42z$66s0}YaxO9yLDefy+q>{fD z&(g`BkL=sIo_I-jTuUU^c4T0xofUMBMFRbh{@MZ=_ify59IPC>Up$a*4}7i8gwYSL z`3Zoiwpsfx`JU4j&!(L|^6C%!;pi^PF=Ows#T@UuInBx;w9;mYT6ZR+vkd+`f+xEAW4 zQbEdJJ{FJlcVt5I-&OrP4+L+nb&%tsciyRAr62W?oo5j+514#bf3ur>ehHMr*ZQgs zGyEzY@ta@$d|atmXvcH&nEipo+6J9rp$)&dueosU^9&s>j{dfQTg~-+0f|(Hh20l- zS!@pwt$eov?+&#);!x>v$E8m+h^8+rF5C)i-j~d;McDXU+Xej7L-hZE?9EZ>!zp_q z$hy9de9Fh6vY&qbS(dZMH|KkHw?6sc;dW#tmlpAiL-eckpq+f$`jOm99Oz)n&(GIZ zYfJa*i@)>U2b-@?KHgls*tjT}rJu?Cz80bAu}cA?c4t_$_~8BTZ+`RPFK0p9;;AoJ zANpLn81Q++ShYGlc*p{J`nmG#P0#sEUs{~~y8*Y4>YKRTXpuQLbp&`8`(DVMQo3h$ zk|b}+@ifrdhdZJh*RIThC^=;SV$5(T*#;=1(HO;i@>agMPaHh8`7(?9JJknp`sndj zYHRf;@$1OJ+#jB9$3cA9UH@kn)2ja^4 zkAU{v_(%IPE`FhfuVcqvsV?kh(7$DzldS7wd{{LDz8xm3oy%!w%t{{6u?!Zp9qvii zE*)Vk4DRKI>P~G(gDtWHn2(+~lYU-13hg>j#!0t20&ctJBE=%kkt5YlI%Uo^sEwi9 zqUS8YCIho@SRSvPd2REHpZ`^Dn8|C6q+oLO# z#W<#5u_~#j%SM;+reqOzTL#$m*-*Z+8bCK1wIw%*XjGbS2yBR9n$c zo;cB%wfO-V`LqAyf4TXfIe>osl~-RIi}DNSFK*s^ds(A13b^!HPditDS`pAh+`ouow=3)8l@^;_e07myN-Q$F`>}B zI-kXpMc)EIS2*b}pt%lj_@-JX!SZmWYW-^xIzSPcYIwj=&;6j2@@!zxqj_G>QrvjZ zU!5d6=U`BL`-Y}1Uw6v51=r-h!*4<~tkx5i4!%r1^nAi;c+wXrC9?tptH=iapkM9{ zVhZDTfDP|TBk3NHNeTTggKhEsF8&Vx(TvXG%R`#OYvJWId{^h)OgMD9m){CA$lA`e zNW-&~OJN2aD~&}5el-A)*SNp?V9WD)@4ojV zJCbWMWU}r4g&ABW)pZ`vllO5uG3l=OD{XSu07WVah)%MkJw_j&ddh_)`TXu_GFk6O-YHE0vvulxrt%c?n(}vyNRlpl3Z?Z4$4xGA$ip`?uO$_xOi*eWf|=S0LdKGrU^& zLikKu^;|qGb4^#_`zn|~E3_n{O~L$&11k@43o*L8D@9vA{5>nK*S(*l3u|XpqnXWj z{@_QO@BH$&n}44BkGBF1@XS_S;O&qt@5X*YX#P-(RTzo`}HtEn#gT4bDzR5EAeuK{!Qx5MQKhvQ4 zRuiOi0}LMxn11h_{KXc}gyU%wW1!vL2L0GPmf*SE;E()415K86ETy%nS5BSSod2>( z6#yo1;`OW=wYQJ6f(Mp349*0|!ho|7{`tT9X>kHB1Bh5e8~6fO-~ZNI`BMCBd_}X& zM>nfy6G;>2LzvwMu=21$@@|%CO=f_@>fE6HYG4U*1BlFkH@>hS+gBI>r^&2|^sPX7 zKB)(wZ;(8oZ<8>Sg)agGk2Y~KPzOL*O!=b8EgAeeU*7-i|L1=SEIz*Z)4%ikn-AW5 zYjf(gH#h(CU;K;Qy5!eAt0{mEzqYLF&~378Qv3dQzBTT0Oek3N{^I99-<)ibgGJ88 zs}HlF{yKpCZimeVh&4fdQhn_YygJ)t;ldY}(mw$lmH%n-G;TU7uO>E&807;jFxlfK zNPb7#5#VsUxIdPcCYcT$y={RY+0f5whV+?9^Svf2N2A}Ce-^Mu0>$^|e#fDew!S*V z6HuuA0k7!8lPye~&4Sxv3qR=hlZ#8&K2DaGHK#*uU(VXlq|Jo(>9as`;6V3$RMT|> zhD`gZop=5S3t51}fZ6d}n|BQWP*(L%zK;f6nbh8DVsC63Spn?;m-^%S95GE!wPoR! zwP{YV1h<|WGk*V320usNBGrdl~w#?a!>Mi>O?yXdUUyoqBPMu z>X%Y4J?$J|teg_nPqu@;MnV z9~`JD}WTT`wtpx{WMezVyZB zLT&@zfA5{m|M?&P<7qoLlZ^@%fT6tsEK50B9;G9=hr&;wF+QN#gd8in>;tIFQZ!$r z!T|leD$>*y5P(j7)FTUh4ggiQ0dAtxB%C~$`0HQkRR^%LR(mqQBJ~6qYMXXr9)JRi z&wWR7@sJK>RsFcI-2MP~l^1c+O(3w+W}rYx#g zUG_WUJjIy0Y+_#9AynLJJS)KMcFNqTog8nmmM$>tz#B4sr*{Ly#R)q{j3w}v)3mBab^SH4^I7w1}BJ<@i0{VLr@Kluo&9IU|Y zm{RXQ{^1X&E-vKCg;gRQftP17yTHqBR+yS$!|99=mjX{0Zq?2rsn*nIz!;R=$ zb1?n>nv;6BzlCSJkbq~8%OB7UB)*dlk+#J&ygPVg`PG*{f9K~foZGyb_4x6843m`s zOKL-3etBj3T0B%LdK*XSG<}H0R)-bSDdf*$@`3#w&euQhxTpfAUCDY~dC3ew3P?^K z05L50^$`GeK-YlK&+1dl2kk%>@9n}>r_bv*=_ztgH=*gSwwi9)tNisdT*g=|;jYy9 z!oraSp;;^`T|Bi|qdk3{a2IvjG`@55h1U{%TKbW+c~V&{wsGC2-5Tct(8^(jjrTMD zEAY6^=>04X^jvxLpW5$$jOh+m)ehyZRn?#6-X`6%yL#K1_u)U_3x8%eNb+K?q}|}Z zwrqi67KLhGTq-O{XxB?^LU~S}JQa;!Z!V=*0MxI)c4iJ^)wkHmFhwjO3t0MdMnLqq zeoY_tG#4R&M7x_Uia0l7vy1Tqx!OB?7+#c@aRRq8g7~2Maeb+Jy4zVAPvXbkbQa(D z^a1{o-$yNQ?P-zU{BP;iWt`!xw%6Xy(iC0IA*FAj<4$7>x=9*`1KQO8X93E8`Lmz3 zD1LkT9Kb�xlEv+tW|PE9KS4I*gHCv|w@d>ZN3sk8%qKkH%v9wlge}#r^GGBeNED zEdD(zzWzj5yw;BJQW$*bZ8F^gLh?@szD!4_OF#YO*8$_X%A(Wa2VLfHUVOaY!Uml& zZhxM$_IJ)h3-Z;E>-XM%Z|UXE)jJxff2A=P5Yrh0htrv-s^c5&c%j$0v12Lz!{7f& z2l{?KeIeIPcV29a(Ly$WQ@>>)&w@Hx25MUj-rqvY&WVi3pF9W@@9^F@V)16?y>(^JUw*QAJ=Z#?^K;G@ z`HkyW)5$IVCrb}90x+iMBINN6ha1^n5$7djq~?6{ztTfwV{W2y^hE9#4|GmI_25hi?ZLu|#jkN+ z7BA^j=ReHCu((1#WO7(ei{@6csN26W^Y3f2DX8qjvIXbvAQ?D1R? za@AuuhOzt|Eg9iU4Gz4K9F6c?~{6n_m=#@!TThH6vz z2{e(UJ2_4m`3gJ2h>ob~XZy~DTXg!nXdR9l(e$18J)dW@syY$6U2(-l)0>J}e(IUL})qmF&NNQ7_Nqc*bx_UjIf$ghiglUASrd(+&WMy_nCUWv3ne?y=H|uWXWu58E7I&uEsEdjpqL6HvjP@?M8vbw7u%loJR+MkaGDz8`SiIho<9IpgCp(gF zra8}te?6E1o19HFCQP(+sZOR2q!F*k+k_p(6EC1u((@j@XpA-h^enze({p?-jGv{8 zx+wGK4QgaG{EF_hXt!B~nB=wcJ(khpRL&h;H*w;FLs+K085nKznY_03_(B$?WzxIe z4&N5P9Wsi(6{%;KfG};j7*p}RT&GO$ZzTf;1Df!-grG<>8wIe z%Gw)wpY*0q2-%XU)rMtXXEXI%y5f#7;8E%Nj-2!^f=0I}o9_tAO0NV;x4c9rn9ArQ zZ~xTSl)GmVavwN3X-?Gs%*XF)$J#(CO_}GgQDvWUMhkwr$0g5{dn?Q9(9P(kx2kK0 z8;wj0Cfj6m)fHL5Q_%}M^a+4p@kr9ctL_n^{chXX%2z!9%Uk)QW5V@VT`r!BBQ2Tx ztTbj;KRoKb|2q-oJ9>48pWaRURgda&bnO(R%(aJ+E$J(X^h%?F`p#gvk=fAV?NEzO zJPtq@8R%}>+|>29ZA`wxFM5Vc#T(sRLeiSL#y@R-o>k7d?AbT+UL0ldnIDXFQJBiV z^g#D`yL~>vOMLR2@=nAlr>kiTo>#9=8p!ZnYS3r$ya>p$s=@a40P;r-vhOw+V;#(0 z$GI;%m?pU}pkXP{T2m!#BHKh17-TYG3$97iY%z>RlL3=sd7C@}w^(kiiO0$kn1v^I znp7EB0%WOj{=)A!i2e1a4ay2}?u#!Q(C4SLiL&?rGnU*y1B1mMfB(B}DZiR{Jlp)~ zAN_a$m7jfhuKHW@^lAgheGTYXg0hfB%e{aT@lUsX7EngWuU_wPnP`337jR*k*npmkTBq78_V7vbX{A7|7bD$@+(7qb=pv%Im9i z2>!nF#%syN^MK@IbNG=uWueVd9AC6C6U3tdtgi$Tv#vAvb?}*iszn78CzEhI1vr}= z340=d+V02UlNw!JDwsIxn|SaI`X*G=1m&_j5IJEUTp}rPZOV$=Rus$Hq$0q(c)l;joK< z73Z=17!nqMH6Zb=@*V5hY6e&d*t^|i8VwJsI}0fHSztvkesh6wFZv^ItZ4Z^XN{>W zCQrif8O_JN$+5{lz?U2WKECzsZ*AUs=dJ2X`O+52JCFgO@gTlgP%#-Lur#zOGeSE5K0LsvPvtvNkrG<8fbNbNrW9eF5103$K+^RkfZrvqV zK-7lsHyM{V+VNfgOIJDol$(nmv=idL{wM!vbN0=%ftc;KNhW~Px}rE&ltnr4wT9$wh`&#VttYiWRSQA%&-MrZNj~fE=aqfIpky&m3 zCV=|Wi(b3#toId^P=I6ipZHEAUJi2D& zI9crf=70WIf06b2?I!#C=I}atcjjJFk8v|z=@;Oj{#bqg>{q`IbjrH6dK`VFJye#{ zuf0Ap!8(&3(x;G9z^27lz^Fx4me|@2t8a2Z-u-fY#@DYl`PS>NjSP@Imc{xCyJ5)t z;rbO;<;MfHS(xM3wX8lbT)GfYeIUQ)uhtKDu0eGO+&UNdXqU|_N>?X`(vtu!zPI=1 z0>RD*z#Z37((*abJ35REIZ#nKSnz9?fbPdFc#)xLS7ez!=nLcns^04`LTyr?M27I( zPB1Rs=vppE$S_c7&db1)_-HYZJ1IM=uGXH&1bOw0YXIOVzw8#9?8dPB0FY!nGxLtA zw`i@ux>s55#(RhA;=fk@Bst_anN=&fCyUyMaSOWa&L!`Lc-o;$mCJ%z9XY>4U9a62 z>FxMA=MqFWAopPMwy!aZh}!wrUvn|Stx}78jRhVCMDl5^Z~La*Yh(B`{o>(xpMUxQ zH;V+u9<1`YZZh8a_P4)N-2u2-@GlLAwdz0U^kq4^1s(}l|CNjEj?SS`VuHRwpUCQe zTo0vBEC>K~=^*_CH+M#AXw-hSIeeN0`(#dkVqr!rl>e3bG?wG6XvvDO7GQQKFDG*c z@lx_+{slO6-i`SqH&F*W7#DB;&As}G<4y0ROWk8Ul#dgK~Q+c(bh zU}PYjuM7Y{KK!*k?)q*89P7s%Zq2~tu)3~%Ts0=v@r#QVdXk$K{=VslqwxnV{AFvKhco<`I;ss=2z70DqdK6E z?2OYsoi{Y&lxVjgN`94(mGAvrpcuz$YbTF4UJQ`OH+`i(<$n6$c87{``(o!4UUM^p z4;Jz7CBNDMfd0hES2y4L!S8Q=_~SpU9H+B9?@-df(yLc4SC)3~b%5rf{GjX0xdGGW z)xpSPeB^>p#nK<@Ls^Xz_at8f=c@E%Z023Hsaag@nRcl?iyK+}q-UF}Jjhr6{T8bp zS04}h;_DB-_k+!MYd5c+e!Vg5t0R}{K_8?~vp9$E&pNyC)eMN*lpD{$s+#vHe!;-SL1VKDF)A4^u~aQ~fw;u53SY`)CclNn z^j`HY-~Bcm6HHrPmH}U)&TbXbv-}+Oq#u`0qlGLhLhJe7;(3yX9&fL;(Zgk zwB<1gZp)~=c!;OcnXPw$Zm2eCSz&?e2IQL3s*6RZ##ZV&)r4o#(j)GaVR}aiNkv$^ za;3ZQDXlIBY(pDbcgi?VJ|`P|Cq8@1$7gKA!}a1@Nc? zH0;ng{D=p5RlNCQ5u*Xg0-L;0ZgKpTNuTJN3CP^Yu!~7kp02mIy-=iyH~e&uN760L zd8y>mh>l6Vcl~!__V85v5+&!-mBFNkpOts`+T-=kn=Or#M`5}hw&J&J%Y+CmGQ0TZ z8V%CfrYWqK3H`XenO8gcA&Ts%!$rr=;BDfIx=m}nIuocDD{%RVx2<<|H6<>MNw&Mf;io#JJ65)pA-?0y#2}l|G2l0gWpa};nl1cz zT8?v?xf%2}y_ITzQ-nJ_8pcX>m@Lxv+fSpXNw;3Ha(K~4!YCMaD1&?Xk*C_5yrS7; zQb88Ri7~||H@#o_0x({Aq>caNLlz^qg;Cat0(|t(GdXqTdH-Bb_H(`SUIqo;zMqg7 zieSi|b~L<8C$#I~1&6mx8^ZT=*woYN^L+ICHcxa9zk6rk zwsf$JQ6et6=@*^ooxkNXc}LMS_--f4wAK}N%QxZ4z3{?dlk_H`F7QBvNvA>2>OZ6} z;EIQj(^gDQ@K^N7H+m+Gp6rCj`?-!!wJm)rp3P^6&lAKm`en;2ZFT7IM0#F(78xQB z@vhL;FTVA1`lp^wzcTSe^x4K$*hy#S`9z)fLzntgzJjR-2XfudQtrR`Z~uqQzx=D; zYz_t3i0?qm2MvUt6z)L-ELNW^#((nd*E^8u;O27HlZbv+JNWqW&CSukV1ubO>1C0e zj6K!n11w4pAn^Ec2SH|4Wnh(8m9Zq}4_LW?kF13OkHE688z3vw&IAs@(gEVxdUzvn zl!f-O6Z!o7iWOFz+81DzwKp(MJ$&okH=0P_n!z6{#$SE(`AlYB%e9EfJ&+tA@zcNj zxgt04zqfhkt+SiYKR=%fmQ#g2GJxjwKs;}F~wnu+@x^hU~Is$hpg?*5)C*CcmgDR5sytw zwSk+r?&rSe##r-y=UZ>j1mQ-&__XIJXE{FOxM&A>U%qm^Nk@w*)uFQd_F_IB1EuY9 zI25gRYyg97_1$SVe0@L3I-g;H?wxxXZD@ZJW#H@%;0RdU83zLB$)8CgejI61#ZAGr z0L`VP^fM-;yEhl2fxcN=_W#p9-NJYisPfVl3o`kdK(TB!LAPM0u1v7$TxmQClzJ9` zqJ5t_dA#!83mC}Fgm9%dV~=E)wb+1!Q$gYzzo(Pwt-ePBX^#Ysoy%f(7B42nsy;gK zhV|8erj%VDv}s50002M$Nkl^KwsXt)2@XFn=jHa`1+_xo~~zS3=3#f_5t716&56Z^84T0{QVZ~jxxBfbK@CR0EfZPa1~Yx4bBlUwW;X{Avf`n+Uj zcN2K>^Xa)3h}t0oj6M<=gqQ2!)B2d?9WT*m;R>*=N_LB19@?{dw)5_CY5dKvKdv0< zD*$5YaM^U}>h;Yj*3#(^;GQ(;VD+P2ScJV^xIg*v57G0M2cr|N-MpV~UalwF1|Kl+sCIA8Vliz%)vn14y|4bXIXilQUk<@k z|3GTIITfHnC*IC_d@Q2NAD~3u=?#3xbKtf0s=gJ_`JMN^J@P0&Y4x{vk7q$n9RU3t@CHQGXWRtFB^MUd z&=27ET%KGOT>Rofi^ITw^_o?9=RpjJmn^bO-Cx=0O-=W!t^teqt&J>qDL`9RyksyL zsJwLJxL&9n77(2op?x#5QMdF0o&l*@KJTgS)rs=9d_1n3$j_32+)&hyJbKz$A*KB= ze$gX#XI0s?q56_UnCv*ZMf+jZkH=(F8=FIlV;GquG*9BC_6~%sAf;7(ZQB{Q2VZ@C zd*mHYxDvu^fH08irOM-he#Ne_1yTW+lb>j#2Q6AFKNpD>xt-$xm^>c1st>%=t{;mJ zb}U_Pchs*x{&>z?_`84S??vmr#&h@UKaOXmd36r)c3AAv8R`3Yv^#(b4fI{a2gJpI z(sOXBF+}7}+jt323Zs8OC*5mYMHU5kDdyDcjJ{g^bNyXor4x;vMjk5**-~x`Ef#QJ zG7gDf_^M75)R`Yu%4w$-!+BV+*yJ1$3rPR&r~jrMfu9aA`49imf4O-#VA8oLfBhf+ zec@Ulk{KSOD|uFzRqMzoklUCYIQ>=N8@Yi+UA=mtg}z+*o6oO(I_-w+A4-PoD7bRv()3Hl4R)aopc;MpgzH&< z8%yc`=u~~#$Wra@aOs_hKA`5o^qO(Cv4Ml!wF_;Z`y)(b1%CC$l_?jQXR*wB-44-1 z9geQ99tAx6oIZ29#kgz93SAg4$~Sgokn#HXRzKNdnz6g_@aN}#JK+vD4t|;*0@~6k z^uPS~O>x+X-c?&~G1V<-xDN5|@C9;d^A&puBo2YrYjyZZLE zgM<%pi(%YW(cvuSpOxQ(2gzpjGIduu@DiY{jK^PlqXpH|(^izxSpJ=}Z;h`1%fI=L z16+&8aOc3w86d=G%S=eu`rHZHL15JtJis&vIm>8w+=BMTIE zl*!NH#{K9p=05+~r_o;Bj**?Q%yOUdxP?JuFMS4?BAeuj{?IqlxiCu$bGsP^R@T|2 zH{3A~Ydl<9^pLXHK|vl<7F5QJmluHVAne=(S#;1xbS=EGNEJ6cQ+5{fb__pgQB*(j zpu>Y%qLTrxXY3MIw&%&4-3vEvU7fbLdz9_8rt+5Ps&Dl<1yJFXh=%T0<<7@Xkw-9! zG>v)tY6aM)x84+PJHhtS$V)o@SMeh*jAzZh9(d8ZY9~9^O zpxNVjFZCFfg79EKr7BAz)arbqVZ~Sjjmdz@E!~ysPPnxSFAj40TY5{8%X=P5d&*bL ziNBP%(39tu4tMx1DB+mFY;@9U@||ZgM?vqCuK5m#7>K2toNUT(K(yHsZBrfhr$Xfzs*gl?)^&k1B{^abDyP>1H7vWmDZ>(p6qW>T?zFC-l+d3WIN^c0CQ021u$EshEGGBGLsM=HHD7JIof+C zzt}`BL#{_mgc-mUucp9j05I|wjsC5?{Pb?z+Z0&OI3$z;-ea_1#EY`!jbcq2_+Fj( z3w2%KdhMd5hBSp+flEi2VxdVTYA0?ct^~Jvk6@E|Td46Q8QRvBFjF?kM$0$W>*A)A z4G`^`kY&TE($wbNmxeqGM#ff)lNNcCZ!zW_UX-V6pOw(?d)j4mjN21wi8p)`xV-RT z!l%RfAX~C7yhm^cZy0#zHwDFw8*D&$W|bH->DlNs^|u%cq8b@c@Jk`;=aj91)TC!uQQOQ zhsauWGrW_2p9L_xS~RVEz3V&jwMw$%RtcY{efQe$uIhPYV4KzgZ_C?zbnTS42XgR7 zTbe~v@6hB0nVOXFu1|mHozWfcz4m9G%dxmcT>7<#gQKm*i*e zq_JpnFP|-?EBzi$yva&>-N!qFpa1Va`|0MN{>#7EeDuZ5&6xlz0OZSovLHX##`(Hj<9U|BCz-Kiw&H{EsF^?Yu-*y&#KwsQ*U?JCF`Zn9$ub7 z0B7d9}@}14!{Nkelfeln|T)!~~Yn}*TUm3H0%nx*v57uliA2_sm zB~bUum)FK}8o&872Y{T(lEgON=GXa?Y?9L8{@u6V96%hPOz*OQB9mOWu#A)Lfd+#n zqcc%#K)gSY!-Q!L)vKME&=@%13eY(mczreS=VX(qzx~bUxn#MYsVuQ&QD--zvLRiMp zn*fJBO-u~F@8la15X8#LgrD`Tg#<1}Sdto0fARU}n;U@(4+4bmmiC?|3;@*69Kf1e z61KF6gC-~fc1Ana)Y<;~aM*!=Rt4~Jfdp0cR{5R`DFdF0|6gKLnKw+ z-MKNiITJM2*cK-)eDTG&E_wLkgBDs|NwoN@ZPB8SLr1SAtMD7Xp@F0pm-ggC7T5~- z0kEA&E^cI%WD-dafl_zJe=-V~qgzaTnhj2iTp(N%zv>a_#vk=qjAeOm;&m_{l5sq} z6Cde1WgJU!zdbjx`>LH379Q}#`2*b5+}SQp0Cl*<(T>SQ$&F011aqjVT?4k+@@L+> zG+OyB&DBi)*)4=317*^5+@= zIP)xU#ZDX+_;ej`1VAKzJbv(C9eVhq4w0O8sr{6vi8io^yOOoI)0m^e3|LqWkzD@z z<3PFeq_EPi0FjraSLx%eiR;%{`r9%Nd=qEJo%Q|r#)9ybbQhfrj8M)oT#!=o25dCu z`>HzC>8a!0$u@c08?b=CGoPz(1Ip;RUrb&{-`C#p7+va6cgl+Jx1V%4>4jW3MdO3y z(#|`ulZFliOpuAhbm|lTjosC~_hgig0ZQGwe>*?( z_C`EEGi`E!-NKQV)fcw_P+iV-T(ls@THZnmD@OkI@6|rZ!T^;4{Mw^M&$r%sBVI4P zbNSAd&9V45W0{^EsL!;x_au2(R_851rnm1`juUN>x6tw+AjLRTUkFIiKOH@Mto~ko zXSrEj16M4J+_-jid@WnZFpLC#(Z4`7048hmXVn{Ec^ytVO+Q*Kh}rSMjQ|}IFg<)) z*2o9<2F`V#7)%@Si#N9d#I<9?`C~0uvPcEyj~k?TL>}C$EA+55pPfU=JDotjE%tE} zva2>ZBf|JW&gLA7%0$l%kP4)ZPx@W-?P|>8W5I;f#Ui3N*T&^`n?A`k$$)mo{sqt{ zlY^(>gbwIFoX==wVByXR_}t1*v6w}4t{&72(1~p6 zxAi9$yPmZRXIFjV?du(|7;Mz-wt5?$>*Ogl%w=x^0ucOd<(Yx#xGC0h$s zTu}Yt@Bb)W=HT>uqvz!Xq@~Bk%DwX7kNWZ@4*qD5jL`ssKz+Zo4~t%QWX#BZ%O5__ z4>pEm!EQ0wxa-uJv$MGLAiDS5w}UY^ES2l!#zqe3)Rx|O^R3EXePD8F8Oy#{xrB4D zCQw(|wBa&rtn#??SF(_Ayg49g{6#-t)fkt4Ave;-4?0Xav==+kEHG$9U$+#1?SHk*?johk484tD_1Av zh8_ftAC4Cm4G&irr?N7?-1zNyATPPQRlnh$!3g@q=ZpT(q1tBk=pfR=x%NWCmsh{2 z9W&&h7r9bOXQWHG0Ma(~XBJ$vA&d8i+wEy5WnwTo%6S=pZ;O?D)6XJi^<{xU-=d#( zFze2kXXZ(jkB;!}Vg1{_7M=LGH(p?@K$y{^Ut4F8)P^fF{WH&dSIv*ipc#m5R|I`$ zCj%ev+&ppdqwN`1$)A3906ASiW8BS+it)_7JMDT&UQX4Qeg4U(?fmQz+v=Gd8Ouw9 zoYOPAXYev4>T1_V2CH?}NgpqqVqq-M@9wgB5w*ZiCEw92Ts2OTE=dwh4MQ z@w|6k7@dNow(6j3)YW06Jb`k2Rf)%sDL`!iv^tr&D5L>@+N0!jG8kOoiL;c<8d&sq z^3fs5yx;EUg8<-@Xda-un+1wXQ;G&B0?14FZwfHib2snP`oa%y&^Reh3X6`0+Q1Bj z@T$x4`Q5dFu|bFPJbe~VN=rDc*{24|5A!^eK!UUA(G5ju?+2t=k$bf*RDxH)zW9@1 zHGmjC7k@x8=_m9C=xMdsDWCE#9GSw{G_{Row_Oyc`84$Ej3rmJpa+S-)gqv9F-lHJ~ zk4q~aOkUFHv%|Xu8lr&;?o-%eAV~s?T%@39$SDt()#dsv|6Zs;QGACJUe5hW7yT1| zC9(}DRk=xP@oD5-<}0E3U2VG@N^|iwLex!Rh{$^1Gw;8NINA57C#7c+GFGXR%*u0k zJ+jxmw1B!3ep?#yGf%oVi*z#sIeDvtK0C5i`VqCwqw6|W9|Q60bTxv!eD)s@?eK|KwfzOA_!+XrC2MyOjROp(xs8@yX;YY79y) z-AS%k5)~xb2+aQ`xe3wp@@j}(I_r8pmT=!~c_!^4OkI_>*OlX$#e(6BxSmNKzoN%g zo$EJ?0d+RR7s{-FQ;t#_a^)kkJg2=k&~mFI99TuPwxXQSc`=mNyr9H1o!$skC(w_A3yU&)Uk%PHa#@oVw__qDXKS zX(D7-gRxJ3`O)V8@jw5&z{uTM;~g5cajZ%9nOBc*KD&5%CZMuo{rzP?)}Q~$4;$#W z#jnA*yts$DmzD5ar`qy~o=7)H<$H8rW?3)FpTEoM*+fB+*Fe3njVac6qP9_A_qd>GDgxB+L$$#Lre)*QXoF&wU=f2$h?MLUMrpe9m zYnwMaNb`puyd8M_Y7-)UUH4>B(qW(Zi&dVZZIQn@+v_Vw^8Hd)_amD$&h)OckAKf}^Bds~of~HPGB770@w$kW2q;bSQ^9 zy4@t0^&oL&Ept0y#bm&waJE;+qa%S!fN)kD`WEsE+y$yWYm)I=>76=pY`|NSbX(LN zilsj|(BugKgqO4!0Paq}6n@)PqOAP*;^hKXSoG!grGY98 z;>e@GkSCSlaCNN>vCbxM+`D}8>8DNP0>r<#HkR=Nx-@a!lQk`&W|e`H;&42x0{pa!F_U8Zg-~IFIVu9WK&H`2}%BjE6r{zPJ zr~vH&h`99ZszX573ok4yUHoW@Ir*xGTH&Ih37AP2pl)yZKWH+$<{l>IKuHTEdjjY9 z<;?o6Jn)asSaOsM1zKt|!peW#sFeQWoZi!U%R(((mU7cJ0ONaVj}x|ZxLQ!*K;+Z8 zXR=@eppehf=>hH0$DcCrL!CH3z+{zt*`D4kz4TacvTm%L=<6$8dgg^dE)(sNoC&H$ zEI^|;%K4!B12CX1@q@MFd`c6a zH528i*tsRhe`x@@-5WlK0%y54xZVzq6S-!xAbAi_kWi7e2amLXhQdWY9#C;9;QL~W zhp)DnW%t7Cuf5g|o&Rxwhd=w1KWyjC^#L|Nc<=2=#E8)lD?fY={{Sac03YXEx-1z8^PSU_&u;WU`Q2Iyg<>sCk1wGqCd&^nY7g?kL`>vJQw z0~EVt2*sW`64`z>R|9CUunNce$D-AYAInwB*Q5LZ*Qo@`V~V&t=o}qvGV0&poKKN@C&iS$eIq3rMKTuv@4fe7VcdhQ zlN@7ZU7c@MSMwa!@c8EIEJ$76O(AFFh0H&5u<<;+Ta`hN8^ro2kQ zN^X#A+G{p~D|FvWZj##6tz3P4M+rIuXmGh#K?|F9cxTDNMn3Ymc{MmT0Mv^WC07G1 zx>``*afc<-6LATv9Mb0dgWM1?tRRQYVesd%#-h_FbB$PDqXn6)9dF&tpL=ub z1C3Fm{EgPethqF}KJX~z{oG+v{#m;Qi((qx@y^rI;&*&&wMy`?_I3jBbeCT1EAExB z3Bo{wv5s;N9XejSote4YY!s((XV7!EIXu@c!-vC0(Uv^u^xxsq=<~BlUpuOo(MErt zF&-^?#)Iy|(YE)JIY-KezAm{me=%O5bu-uDEiSO2?qnLJHF;y>ZPsDU7nG@9+!N`{ z!;j5@9<(mvb{hYfCrW>?wqty;JNdpRyum7B<}c>@^T994RQ<+@S7VR0noXe3KKn!R z{8R=N$D%Q!sy-n>&-g5nAK8+@{r(3}NK(GROP^bAqc+J4Jzx}Mw zpXS>Ml6dC>MwRcgKrXnTE{oi0@?xbauB-;GnS6%FWHL*dk2G6YTYSMBh)Gz?I`!E3 znQXxV)VE5n0{UBpvQ>J@F0Jz9*N}@%Cw{^3xYE4pxO`LjZCy7u#{V=4H$q-@whwq>mo9F!eQ`3pC&o7mwc>H>i`29_!=5M z9!-UY5^xY<(yhW)hrCuyKi1%kY&o+MWDQt?I#4_{+){n_fbR>06_$M70fc0P7Egl(a z^p2K^0QKxUe3!tP^uYvRSgqfMn{rlOb)WiAV@=XBs8?m|Brc?Y748W_^B*4K)~8ao z55X=kxbV@oaP~7ixB>?4*=nz>@o8DxSRgFSf;Ht=C;5V{uYu8Yp{OlQNA{O*ik4@m zUh+)A6SmE`rGs*cD-$fGTooz5#UL{FxLja>hZbnO>dPW{*a*+zEA#+k@GDF|20#9y zh%MLf(ZVb^Kql~yuLd9tBCMiwCFD|AqRy;jLPm`%c}mOK|HpN2U0BIK8jQ`1$c^= z2i&4fTqhlvLy_JUGyqD6h6>aslT_~dcepn@4BmpXz?4avz7%>RBh*Woa8r`H4+hee zw{}(BDw}NUCD5xnkWls{T_`Oi3NC#5qR8@r1+KE108G8n1&w{-qc3Hdvf&nEu+a`Z zPLzqUblRx;fisASl%@$|O7(BsbLv#M&+wXL0V^CP^Qo?ioiesT+J9}jEnZw>VW|}( z@8F}rKI+$JHU|6ML1)r}GY@20CCX=`aoS3r#k)%}F+kU3um!{5i$*eu8aSR;%Y2Lf zL}@!&;GzvD17sI1q>(>_N68SqRe=Xdnp-N1?pFF&8^Ur?t zesO0~%67yq#W=hzAjirp8tNCUZ*E>|0{Zy==FQVCpRo{2S=eNi(r*-05WqWArY;js z!5T~C7t8l~4BNspzn#^c5_}X193m z_K7ZFbK&K``m6t97T0{2SLoz@=R0qfKWmBR!7d|Yz2^eYJ6V3Qu)CD?`27|h_}T@W zhb!m3cizZ4okG}#!GXY%IJjT=<5ic%#tX*CbOr zP|~uzp~#05#gIh+zTm_C=!smCRQFNrmCizzLeN6s^xmv}vLxr8;%hg#A zRtl)MyX@1Yq!%wQmk@kfQbtkWo8a%yBGiPMV*mSDR%*LvEhf0G@#SN9UlsXgtI$8z zqJ%<^A`bt)5pUpliYRShB20QwI9nJW$}(y1-gRuas-;wFv!wFO6VUt*UwoGJ*Karf z@~{7@^jsFCP^Zw1c4P4#ywj)8Z0;3src0lW-F&e5QPz)dzx7tQT8JJSh3nxKbQV9u zv(eRYWfx*m?#-k>e(L`yrEIi}LcKC9Ft4@X)+UZ#SEaF1rv$ag*tdsI;8Rluix>Q7 zQ-pF6El;)x<(HkJZ*LY9C*qr{S&#Gc{^@7=Jnr3PsG~ry&J>RpUp6c#$-l`DC)&S$ z{!G5+k9Tt8)aIM7FSg*iHLz~C_MFBGs?|^R*CR`Q}4mj-TWgYq7qRBufbZ@1@*Jc@j@@k27#(PEJtFs|N+<$rJ(> zfahDl-@5a~ET)9x4PzeViOWR|2~J}0ZT+mT1=5Ef{CJc*5AyF0zV@-GAK4NPetq!P z%l(wZ>qxP>h&SQx03?v7iDU%$c*6pVD%y@}dDa6v6v! zO5>f%7@oA4heJHDlx7SykuW;J+t+yQ{a8dkUkriJYrZ6F?spFi((#2Xz) z#|zagSSikvJuh1L;!kzsHUe&xd@k~0RYqnkoa;mTDamuaQ=di?ZjvawDID;xQz?AI zkCLdeEm)W09(~opy#*9tt9{5}a>vZ?K^rzMz^j@gbHcD3j)12?31`X_@_l^7mgxDe zlqt#fS#Kn-DIh3>^z|1hRet-?$6bhdv5l?w>ZcSlDHki>$r=P^1&Qx$x>6K^V>8S1 zw0roEzxXkZSU5Y~@@`5b?vR`$I+OzE>s;-8`qjlY-csIAd$YD*Ct2{my9S(gxRQn$Cnn92+K}SrOcsboQhMFGd3|%~V*bDD5VTbvlj=-n}p6<1ci9^W9_xr9Ss)qnvC#ppTl{jvEKK z)VJ#hLysPY9((HRL&G}QZ1$5Q(ncq9igC?^ZozR^!U@XZZ@x+<{Av`+PgBI{7v=!Q z7qXK3IhR!4ZsYuDW8BfK;+;U?OZ=9)=11DyB)c=F`1_yz{haLa&g$2xkvSEU{)O_B zD~J7^RB-xCU(%ir+ZeHCK62#b<}d%^FFTF#PIrY|m<@Al9DUHZNM?Ip;}BWq1VnQK z7QgYn)_0=A35wIXjysV>_01dIj}x7z-Q&Z3SrAholfPWdy{K$(jVD~EfXOPD5raEx z@c+&0S2rIgd+*%2QQMF^HpX(TQ(g2UClL;%JbX|&%D0)ctJ7%fUWg13k|{?|9?e4g z-OVq4`Sa;pGcGjI)sJx%c*C=u>LYuMVcPU+^F7K*n?m?joH_gXcJDn^gy*zm03)E%V9x6IsII``&D{WnE7eBXVi*H(eGS@Fu)`@VGXa zji8k@nz@|#@#gT}HiY9dv?$4J1Tmn%XTwjyQfKpkC-D?RpIdDj@812q(-K!2gYR$t ztH1hJZMI&TjY{ME=mM)ddVTi!?|a7xt?^RP_Aljx(tI?)`s44Gp+#5_7v~S+Pi%j+ zmi*h)J)nn#+q6RDFu0djr1*Z9lBY)?SM0=1e(5#VdKn0leg1$X|F#TZ<)7d15w4KX z8q3QmP!WX|Jas4yw+YIfgz5n=A5jzDUohnnKWWg)07Z!TT44IWzMqe2X45mnlwx7D?h1GRam#I93-IEbbz_rp>DVq}4G? z+hFXsq~D_Rr>HI*0ELfa%&jnK&pZOs}@P5K^`js}`;;Jo^a?@|cjnaMc z08U!TzW7ji+Sk$T@q69#x||hnmRdOtZi~)6YYX}Nn)JEBT*2R7&biZO5ob-bO1bl_ z?$d_B2+r`Dw$Pu%g}F>;@e?)(Br*b@U97c$u_Azfe#g&+-oTcuss#;znjmr&2qLTPYhg zrhb!ha>}8K^JnCGgB64X6!tR$zTeXDYvNMlc}h{@&h;@k>Yy^zlDgP;=TGaeWr5t-)S@L z*9T5hh+lOKGdT2xH@GHr!r`Hg;EHPHG`u=xQfQYBEN#6$!IH0k0}~%$i6Yf~dPW(> zg_eSbfW7NW^30$Bfdx(b4Qw!n8H6v~Y`PZdHPXQ2kMGlm$lam4vR68}C%yjLLs`qb zBIoqMJa6Bpp~IS#jy zBnw&>#IZ721n4+%6TlyR`2Ac{TrJ;>LPPfxSybKXDD#OX^8dv@`9X{5XDNxV&w_;F zlI)?hHrZFsr=MMzTI}MHBND)tXYjam%|~+xPA1 z*ku^H2=Yt{<}10?pb%3jR_Klla^1l#fFr-GR)6`?=PA68q&&DY)GS%7N2|jJ@L-TE-rnt`CtFT|CR;%z1DUtBOh*#KI>R~X)j&tICpqF zBU>w~>jISYhg+0<`SsTw8$FiAWkg7Ee8ug_xoR@U+?peQpCb3xMK6jGmk{&|YK z6vrvh_{03`zwQ#K*I%2(5!VTKT3FlUpsaGiD@&kJwpUjcR@~}XP*@O{)Kd^p7;q!O zIusw;nD9$U;`pd~QGT*iT;bEE%j<; zPy`%f$w;=e$@M(mvxv5UrDPm=BTj~BNREL|*>vk!ii7w>y<0EFi&-nSsC5Y*r2~F; zSrrBPeu|&?@WsoNEzuM_%3Q~*Epp9MDFtlU_*jgO;yw7vn(~t=`h^Xnf@dMm*Yv&o zfO7SK_JNtYLDyzr%ed;Xe5+CnlXMitto--Y-WK{?9?8|gnZNn2RUXT4o7WtH+gh63oakZc*<2Y zf>6NLvXX7;)J+q?ZnUgIxFDXvPr)>3_4Z^JX{635ZSL&y4 zwIN|1b11h;EO{-;xwd)h&9^o`t{-uO<8t4xE?>{x%DMP!cNd4<*xbGKYV-a(Z`JP~ zY<~CqFE$^2`t@8&M`1xpY5qW&`7monNB_aG!NFRQV&44lc=(eS_~}PK`tfXJ#8%0` zD+2~ME>2K8rPLuf9uth7Ic_MA3+2qB8()#Li`q=c1WF`EcKQmv# z2U&R6Un$e{5eg#pb(+H1@UnG3H&iar-roh)l;w~1dl{43HmgPs zCMV3NY)lPrg_m&i1%0>2^{E{xvTQWWY^sP^bM%Jh{;34Hv(|}Snp~L_@J|iYzyI#< zaxeGIoUSu(&{p~!es$u_Ng3^`-L;30HpLrUPf&JCZxe>Xw1ioGzd5cjPPchxE8P+WeV&#CkV}Qh8%D zKiI~lq*zL4^CA2kFQn(Pwm~nO$9KY+93*dy_tr&Zkor#Bg_cvCTnkdF+uXl%%cZ8x z_Zv@&4vw;3wz2nCGGNW^Sok{~26uhUI6e3GR5vo0?78zWMSb-`OKn9tZ{B7tbi55< z{e+tmr?+OKCt5#lzOGLiU&$qN5zkKHjqHJuw(I+c`ps+cy1w-Gdmmu$&5iCtTA~i- zkvyI;CpQ04f6XxbQ`lqklIS7dj5pzJtk!4A3jAhVANrMv9HmINiS8~Q#w4#gA@8QJQ_Y57rH2fmRDqQ;%G5!q=I?z3H0*lu zygO#r5I!ogqtu%k6gE{3(3%i?vurJVB&9s7Vo`A5@8m1VbON1Nn97rsmDp=xddT3l zFq*}Cu-6A70IWBRH^@;c2~xj#09Sx`aJS)8PQg>J1!RF!?h@K^snddI@ad`aGXM=x zbP3)Lo^^;+q~{dS0YJTT=-i_0X;hBCOPHe&d_z zj*GA>9NntJs1K(utIc-ugQEOZPI3Ce4)of5s*5go1_F~1NU63s3EQDJSa9qw*lwQJ zAN0YMM_-->irxk!6K@q=mWM+xpvw-5N%qcDv$~$^n>%FCA&}=p}xouXB zb-A*XZPEbW{x|iTU$yn$;(267FVineG4d(?Z#1f!O@x}nO{?o8Xg)llJ+|~h6R>N$ zRR`sOBQQLhK&(1WV5myJ@Sl98n{iB90q7=vl9aae${#`uND&@Z(Bv6qvUH_H+lfWH zo@$e!zp&EbI{)Us;snEWXgC>*(mus)>AcVtR-mln@H>3vE#0b%=xqX!`#)qtPan9I zaPX-qdMlsyk+7U-Q=7~Nj?G}%cEHIG&VX9!1??!q7r6`XVZaqP^%wwB_R-$zxurrc zVW1x^ev@SJS#Wo7#?$cX-_$jDyE{htH-Gy#o0~08(er*?>uJ{3|1>|X+yQ8E*-Xw@ zjZ*xKD~j7$uA~FI)k6DFlNkyrmf)U{oauWf$! zhcEMa*&?>d?>8y6DRm#VKyZZgaf(b6Qxiu@ky}md3mWUDeT#&Uq$aDv*@x&ICHY&?)B+Ed~tDeJN#cin-Zf2ycJDYb?}w#QpUR}e(!(z zp!6q4!Ed2LMzUy}_D!MXQo`N4D1DDs_a@D?l!>^u);~VUcTN1 zQu!i%@BQ~%fGq1VicQLFepk)GC_*2_gD=9vMNDAohT{uAJn##p`pXwwh~(e1w&2GR zj9t;iapPI^)&~yftCUidGKZCs#qC&!wIF0s2v3Vxym2K32fP)j!Z4EzyBxF*&5oxlpkwU{mEU{`}8J;mUf`qFqg9ks2H#omKNFpQnaB*KQW( zaDk`#?M*T0LRZQWLBinSayo5ntY6clU30wI<-WJ`?~4cc^|x5(0_$`#f%2bX)Z+bV zy!qh11#bR?DYkXY=**An$m`_%C{fU%zQSV7C8kvXQ$#w?&33|SDroc5Kl@Rxn!eur&;Ry6 zZk61%zj&BHSXB3XLtBow1&3pHzYj|3G8W$eQ?CrOS(G+G{NtlgO%+=E@-=vxgqa#&z)=6uy|?0t+;Wy zRGrb5Qkg;y;L#U^x4~)|_Vrl1PcJAfKK9<9MT_bZn$suACUjKK*(~jyf^?Lf)#u~i zeVnVL?sn+{a^vu9P*#@vIgX|5JybhU5+7_XLxxdaj%$Wwh_U!wr!bbE=Eksi`s1&@ z%sQ4Up`}cJ-nc@}8y{@S;U87O!`jJMqCLzvZAfUqZ@#%aoc+T6N&^~Q{!#>t2EkrPfGB`+xr?t-_D zT)9<&i-yb^vjKHR&fK1X>Q zoyJN#dN{dr>5_Tp@y(Ik%B=pj=7i5FN;5dX4li4TZzauk@GD?9-a2Hq*z< zGtGb7&L~GRr~ZL;2Uc^UrI6-#ySW*7a@X#RIvMJ~;c%x7j7R2$HlyQ;&66zrZG^e- z^3lWY0t&wVW0`d@S1gCJ4xhDL`0j09XAX^5Z1ybIRP{IgXmD+OKJ(hp4DKqSzqr`g zCgjY^$b(?Xt8Om6zQ?^|G%*<`+qiP0n8)9YL6G8LJa?p%LAx)pD8IgW>#grjA8<;J z>p~mBj1h4)zKvR;H{4(D%S0cx5-^vK2=R;ju zO40qO{^cTQCxo03`M>-(A8tPV!zb0VwoR71q;_ape=rXqhZxB0*>i_Qy{1uDDEUM> zVRh4nmFu)VMQ`g1VAec;zf+?8Z{!ak zQ8o>tF;k#^2#GwFn#ftaIi%IwUC&H!EfTX5$Gph zgZY#@aV71uqbMKcP~k&|a0ic|-mwTqi=pArrHnvM>1gA?Z!DHLz+HY;d!C8&)9K3=FH+(29I7LZe<7J+tk!h!W2ZS2VLrG1ds>k3rIRjZ< zaMVX*#-A{fW#D-6JRMHy%7FV8PVefXj6e3%k~hKlw#}tihv>7aSsj9_{ijai1mG=& zDHH!zpCOM>^cp-$TYCJy0t(cA&2(Fmmel|Y1|SNNELelL@ReVl!B0B8rZ{+z1KWki z>JWuj)F_&_?LU35(uVFsYxyZZ)?{s(xDpHSe|`toB6g2Foxmw~l*&T~X$zO9L9K`N zoj$(mK8jpChM%`}n{?B5LZZRMt=vgRoGUo8PVnj+qebpj@R0I-lK1x;yw3ISzWvsKOD%qoaF$e?!k^#pGzV9_0^PUO|B>}^jMR*n_W71 zsH1f(7@z0&jxu+@DGbiG41lkUrIdiuDqU zTC&j5=6m@SXLWZgOJ>%X#&}=W!$&Cjk}VecCe{|QtW)qEKiaHkDag4SVDU^LG5&Cy zOhZEYmst(%i_R2%j?3;SW}gI~rQ3A=U|!2dJ4?y)xy!N8T2|3rK$@jK>mZYCzh|<< z{t?CUF|KM-bk5?#QCN$pl!n!v^{53LzM$B+(Xn=Yb^S|$mSUUx1U`_tjIjvNmaOS1 z%VURV*W#|aYZraTNq`UDd9yOs(N~KyNPoHf8P+zjJo^dP2x!I0{8 zICr)Ul9V(o+OBt8)<)7#+Gx4!*nKAkE?ms_dT<}5s2M-~Va#0s-?S6}bCkS3PFc*- z-U3ovatHA&dfu+z;L(yyxP=PEqrRDOd7l&`Dc0sfyKrN9NkMt2JZpZ?P+j`_j~tb) z+}c4OSmR>-J6TGu>?nhVSAskI*4WHHCTlnC4mXO+9c2h5l!fP>7PT(UBV!$h$4jyD z$Q5aAexado$Dq{(ACKa$ls4nQ(GTEC8Asv8?U24rF-WQZq6>aqUOAgk(MlayESm#T z;&FD0CtfC>l&S41s|aLtBV{PWppwx z-naPwrc)N^^!@L>-|t!@v1Ua>20eI@)tEk2bB!$1NB6WzWByheu}Y8l@9_$apZx(Z$nC?#oXRPccqmySGkb;ZAYkLVeXLoYbs^y>pH#Xr;#qheYx4uW$*6zvSAVtn*)M-PvJPec{3n0bNu=9ZVCG7p zg|M-Cj8TG1K|sm z=QS5k)9!n-;Jv>2{PPq%$)^ACpMSBrP<#CIzx?yfkAKi*p65Q<{NkhEk4$4Rey6)H zuC&2r9AbIz1jFuR+$i2E@1Oqk&o+PE#^>Mv<1aUV{?i|Bp8otr%JOfbcYQ-6`$n_J z$p(BwX-#2ssrwSf=YMp?i#E(|M^moI_-$RA!trOkK$&$X>u)@Fus$=&s(8@`2mZv@ z#x?!_VseA!@X-|U@4U?=6{V9g)QJ%8UtFx2JGHH{jxwFAnBmhD-Yne54Mnt)cX+)% z#We>7%&do_y|GzalToWb>pzq21I`#kIdS+m(^Su1z_4WHD=w94#u}>|8Ujr z=7)19M6znn&>+C!tscsI81GYp+8}%X!yjzke)q%X9Cv0e!TMaAJg6_*JhUl!^Y)Dy zqljy#G4Yr&$fh0z|B*Hjtv!vu_@9d~l(l&|E?=S-OJwalJX1fg$&J6cpj)yf8D8H; zJ4`uz8zJ^J=fD$l;-$G8;{&HPjs=gEzLPQLc*YDL?qkToq;WiSJE@v$wbNbnUV1LF z$OJB4MseC&)mX>PR7Irtzf?PPw@t2cjyCsvv-@R!_~Sp@eD(Qf!&hA6p{+Uevt%Cp z=dx>~YX7+>F;CcCAEJMuhcx~hD;Z!MZi5VM&3!4*)yv6s^Hp;duB42~&!6LuHl&)T zzbqbq`WW9vsa)Kw59){LG$*j?OYRzUxw%c8nd`+L6v^(M&_8Bes7-g~e1~`BR=64C z8SYR3qrc6sSq~+P=-%*-`x_Q#8mHH(IwP)mjrsAp=&XJ44n;g)^y-RXoch=tNDkP{ zF1@~j;45$CAn@tZV zr;M=_;WK|CSDObp*QWL8u@!DTHwjkr_N+VaiEQu$}jJ&Ti3?*iaVLy zVPKO3E>E9lF`wez#*;qteD9Oy+$UY!88SXKs7Wfsw!FCpCQ&17F>ORCfsW<3r%60d z+pyuRi2I~{mcmeogrM{jk$p^A zRz3wNA98RhSWUNRo4C1>R2C za)!4AeTF7e+!oHn$h=~whxejJ2HB~FG8Yp+l@;XmjjEA(*XbRF{ z(XjHjX~yNsOv>eg+cw{}o|A7YM78HAWwgiCPj$2^-Xl&6uPt{J!IM|m{Kqk9vaM1- z3`qx#!lphuaVyKfnOb?D#HCkOIIj9H07LT~z$=L&p}0^`$kc0yQ(QPskOC#d9NN!2 z_yW;lm}Z{my}E6w1me(K;^EJP%Fe=k`0ZPK7VW1mlxc@mUyTy8dNepraS(>J!nJKQ zpI@P1tFweEB}YG#rMHDl@jY08EWVYvdXAhaya8wOPo0z_V{r5JCSs@EOBtNPDpMkF zI~yX?{#zVIv6!HqzBAx@j*n*XE$fg@1YCn*vJc@Q0NlYAR~Xt$Qv&?-s)@~f?@=U*)`Zd{X&;^s6Bg;pbJ_Sgxj5SH0y31~&EnJ)E=R=b4QYZ4U zcXjmess)F4?KaBrsRyLXKZ-(;GwyXqE?~>zql-YXGVt0d^HPw>8d-{uV1$e)3+4I4OHI_Sq$ z)JDX=|F?fL7OlrqfKv7yZqju9P7AdA&o+Pin=dv$dFQpwkH6zWoF-J8l%!3_9HFIP z!Do)*QWibDm*roy;lkqV=@wy!if=y2-HdwOf0Qec7TX7={xEJL=-WFwad|OHsU}er z;1;!LY@vHRYnSW!$sR>absCGQvYqKj{_zw=EKp6>O}1FZ+-ySmO-JW{`u(?OadGO@ z(X1-6cFV2I>68eUv*w;Mvob%`BDL9O#}^-WoSEy4qxrZ@@v?1OE+dZQZs5c3ygo{u zdo7?YT)I3o(Dx|n9A&pSJSh5eC3Sw}Vzq_n+r|51v> zZnv+U_GSh3$>-msp!qU?+xdNLVfE}4#Z#^YnsEOA{=+}C3DHr|-IUE*Tzf{p%I0ps zt@od$_%ShN4J7tRZOD4u1ps$Ih`(ptqfpN1`>c`qx4h9}+)>pVEyyS`-frPSGTcwe zO}4OhkmAXuVk6AMdBOlDBpOh@bOd32#zKTsVh z&*41^!R^A_5qV*(mGBO?8I(q!W_kbJ@4P(*W(pzg>hi{# zs&b=uWm~kUH|u$epIJ!1*j{2v85LhqI-u7a|Bmh~!1|f~X0a6sTF^Zn3pI-*b=nzI z*MhR{Q=)ZoZthINgObVubreC-SbuoTzj!!MAW?K`Yc#Ncy&a9exgK7#m~HUrqy1TR zrWzO=xQM|Q7O#{juj0!h1GB74M+z2Cgs`ypUHnJs)j}?K+M67)5kPrvF{f@uesLDL z6dUSi@n?h2?|dq26yl|`@qw9zUr4 zwHrKNwDzF1V{vw*jV1M_IH#o4pV#6XZCxyhSF2ZY;8D0}Z%R@Zq8hifFPMDMJ|7xX zj|mH}#+Z`ysoeNi+uJ<6_0`4UkMG3uNJ#m2Cd=-_ZB9}etfS*8U{m;vi-`Kf-f3zy zZ2lJxAU~H2lK=S2-;Gjfck;l||0ivtqPupyn(Qe(E$Opwm)cornWqq>j<)eoN>8h)foAS&Xm*Al6$6&T+Phov(b+GmfOi_{fe6u z@W_B8-SA8KbK$~QqnPE7oljgc#zvZcaWqB4-pBhp+0sT_^2^1@HV4Qx{edjFyWA!Wj0_}Xblpx!T3+hQO z%!XWZ7d(Jp(e)YL@1OR;H^w1^rj#WsZCD=6b<(V%q^m8_4-NLkIF#nEs_!U^;_J2H z)W%#mjj}PqjdH1SY|O(;pPdba+Q@~P@b}TTxhCOTTs%eAmCF~KS5j*0t0|`?>FWRFu>KDwMQkTF zD(g{Jw>NSt^kr7xXe=HMleYAS;zu}>*ZMIxBgPBy>W1fyQ=fkR$)vH_jc4|z)F!iS zI2~$JWX7oSp)G4={2H;_^v{oXa*QQ!83t#6$RT)`YuZ@1k)i!qtUs;3PHIr->CJ%+ zR6K$n!59nP+V)9&tnc7Ea&)IoXS*oTro*YzXNbi4P;x%Vn)YaO*4ebIZsv_qu)IB4AHJEDuzoz7oaW0 zj3o{Q%O*Jba?xSaZBFWh=fU9aZGDRejceLpzeIO!^IjLBU+Z+AG2Uj{)m**YPfqzT z+z&v?rkhrq=>Awp zPq(f}uR-Wfnao@&p$d=kqA-HElMl!3Zn}{q9 zpC#%;1+5jJZJ-lRly8b0WLG^WVDgG7j8IXJ!N0IcTLNv?cQ{Khp;KB}(M-r-*z@2R zjKNddCJ>Sgf0k02a?EG@u#{EXm#X6lys7uV>}>&=n3({nZ1}0y!gB`~NmlNOm+wys zo$3}c8y|l{OZD(?#jn5L@{(7@4Nj$^@rtG}T>t-A-t>FzD0TU?R$I&4w%Gy_wgC1s zjb7Sq8_CrF9VHsR*%nf`x-3{@`6cN@7mnuY5I+bq0i8}gS+@DMw3>QV{#xY47vRVR zL~>)wTYy2D&#Q#^&@;G80l4MG24iAq(3Fzg0b*$2VM_y(fRQWP#5zNhAq%fYg0P8i z12Uyk@)xuGK$om^23dG5i~Ldzao0pyIDVUZ5+EWP?C52&yJTXoQ_c>I;pSj!M>LJ6 zr_8bk!X$Jw#4|Y`Py-j9qcqjB=s9&NzJbt5n?+j?!$(^TP=E{D^6a;~+yFMTj0Y^p zAu#lpN8T+@h?sg%qEDZe0D;k3eR`bdp;}48MY!bZ73{H$@ETma+n=5%VE90}u-*a_ zQC&*c0E|yYL0ks)n^y7*7JeDtD~C1#6dGHe@2U5d2dj^Km~l%41XdiFgO9QXHh5)# zmwW?v+h)pDf8|UME4Q*sh_-`oL>c-?qZl8Anj#AXZ_BiUwzwfpaOYG0K}azoHn0mU=0n_t9ZjDONV(IoWSw0#l0AB17yk&Ra}`Tyg%QjFAzFA_$y%c zLMC?#?G|!ZavO3wKeiV;y8hk#H9t;4>o`5Dv#)P-(MuEU2Q41{?45ig>N`!WUOZ9v zMztztl153oXwcE#(mrp&efHFeCg_xBEYGs+t*kv3pDk9&V(GDiMa~jQ7NAgF3&$ge zy;l>9#!9=Mm9DBdJ}Up&790H8{^0vR*nIZ+AGQ~`9hu46rK`6Ge-;dnS@^U_GI>0c zHTPIyVc_xpcEh@8@5AoznSemWLl~ z_IA5h|UXK1#7;#@hF~fzznUug^H!-E$ z99I^}mbD+?U=YeZS&W{hoXm7 zjYS`8W!7dkURX=HAXOgt@*hv>`z!__eJS+sl~#NHr~mYyH}7RF`r!{hjA>Xn@>>~w zqLca7-sp5EYiN^qi;t7>%P3k)^DMe(w|y-=U%+S8)wZVO~}rflG{;m?2i z)9RH)Aq%A}z+A#cacg6h)#qMyY|)LDHmz)QI9BZhhDERAst+DJ>YWu(bw@9X6L|6? z%dLX`OQADXakUc~P#{vkP~bX(ES}X?Q?HTDHgPB@#z>+dn=QMV2kYa;RhDvxgF6yN zo}j4L$blD+I$C}_>s!|Ij@(-OE(`FrNgiLw8&6ZFoo*3y=OO>4xmn4Y^JNO~+qsx= zYGRb?Bg~VP@pYsO|JFl~S3S^?1hO~;PkpsN1-LdeemhQY<3gFU@NParc}K~KHWUxS zEdYJ_3>{$^(%K$x?`h8dq(!=lSQJ|PYS-nX7o98+!>eCbX5c7u@D24L5h7HMG!z@> zx)hT9vEbHjly@w*B)iiIl)wAm|FMO3{YBNI0VR>K!#m}@JS-!xXN~*g?|+c8WB({g z%(MBL#UDqrzT)}@J`{-h5}MfNyw2)6>+Mg!yf7|>LU;4| zFTU)O&^wzSbh)w{z+baW+48?m(8|yVDWP#bk{ghAKGN~T`Ho1QD zhtD>D`Th51{6Qnu)o;D|X2-qx!Oe|9?fM|4kkd$CeEnH@UvyIF{ERK64F$uqc+R*B6`5zxZPM5v4S@Qu>H?B!`S^`XZTpvoRNc8R5H5EPKe!SK;6-{%(wne;-C)N;UFZTd_FZcQ9Hc!_|?zdvc#^ zs_M`>gO7Tbq1y0Qw`ZO1;#SR9e;ho6iyxN7aMUTnW`Q_x(CKLM3%}|EV}+j#vUzDk z0PRO!J&mUtOTpz|oGYV!ZSdRdy4mInC8+V_UUhvfCF_aYZlJ?KO1H|`pW<{^V}tsd z|BSfmcNBEWjvnSxuYx=MxpG;}a_0dDw2nJB`(G_3*UWjh^d*;09er(czxo)5)rBi2 zTd5JI_eGEC{{^YT&`i3<+0ZCu>z(pAw9Xaaq!8Q%iBD&6+EqFHE!M#m+ z8xN>~Kh})WSX-IradCs+XI@qK%f`NuH?g@*nlX|v2Gk}q9;D3O?eJ0cd(v1!!8xGg zL2HVkS^W>+az&H?9EHEl0!m>F4>P@BW-{bBf4@%GE2) zdn&`G34+_`Ah&H`89Qu{9m#T6-@+3%7xvfxLZx~*i9`Og@@=fRac%SZ8*gsD{5+Y_ z*k>HC37W%oQZQvKg|+e2T?W^~i~n+)rTEsyfqp_M{Pl%Dj0=#nUE*mBV3TWd%X6eTz_sgF;^PctDv=8%n*(27{A}dg=`K2LW|GFfjo~d_Sa-8B zR~HI(%j~TPGk#d@Y19%CrNnjUiS;s&*gmAwNo$h^?`F2q%l~NnKKmCT{(eLRyWx zJ`3847gE;AKS{hyAf<;Q4g8>$zTZ*cRNf@Qe_J9sHZr6WK)4k@aJRIRc5)Q2T*1+DlG-SP}N3EJYc!#8+qK^0k9EoN4~52nI= zdXVi~noa}BC69Ong$l?MkpC$0+WmAON@g-jjU8 zJKHjce(I&3CQ?(VgiBt{B&)u(F0NH)6MA*&*$12^)`|*1Y!t5|N{fR=-r{UH?UcE# z2N*-_vH(+b9kV~$Mg8;%|Lc#XrQ}}|NA0bT23vk~=yxe8;bx#;c;TbDs22|5H8}0G zVX44eANZqBZ`=5(ib!%KIKvOp<6*eAw+=^Dhu5Uh2l1Blg-ACzujtYiqhZALCA&w? z4YLfOm2-;h54?vC@~k?k^T3|J17uL%35AD#122CT&CsRyK3Z+aK0Lj`rVjEIG0Bvn zkjf?#r%lMrX%{601#fQgo;*#423HWI1Fwx5q#1>~aAl&ww1)zuMFT%Q`%G483-uN3 zbxNHS{Snvy2#m)4U-)_o&eYlCwvOV34=j%rvgos|uQCdqFetW&yM5_m&|YQbaKGcJ zdo~|@=Z(!TzPOTq)MGP2|H=2>-+X!b#^&Gu=BpIxM>lVsZ6eep%Eqs8XIvgkzqe3o zf#29B9{r!Bba>Xnc7HUZ_`>fGn?xOZxsC?IRTyQ!!H$P&Pvfrfl%-AXM>pf`ulx^6 z!+i6@xG3BUv2^ZO-hhAQdm%mF3Wb4LT(@C(x%G#;rkzqB@AWc z*){|C^*fp+|^!3#&w^^g9T8o_~Z--M5@T1Fzx3s5Dba7hY7Jx2av`FT= zl5%cOi=$us?)NQbj^~QyH&tiXSgEsCx_S5I=Ibs*y^*hJ3q{uNPg=wrkETsy!@I>x z6aDe=UEeuuV<35QB1;&}?RYcmra5L=T`UrgL>HH!{VJ<=%FQELZ1~*m_@;%#nd7Hg zsJzz%JS9krB}ypP-&{0&@547YUwn08l=IsEcFH;9=-m{+``bj|?&Qgn3n^M0!#|k6 z+e;HhK~6cqMaG>JRQzXJKxwaQ7J`*~ItwE{YHdn5R?kw@LW@voLS;w>b4^*!6M7_2k@0UR%JB| zw=ou$`XR+3<;f^>%Rzxo0f^rz$6h@}mt0R!r0PSJLAK%-bR8N5TOG-Ew6`D}rA%e7 zH9?q_r%`nB?C*5P4$bvB@NFJVyOnC>STwMB#UJLKZ@!r#wm#3r3m#RknfC_z$`zNg zwMo;3$aCQxO;~WDv^Td3{Dois`a*bR)s^z>@NviZ4`!X$odfZzi+S}C?e0?Frzr&} zdn~T^cbbE;0MDL^hX=0O)*i-visW4>9?TIa9{9{f9M*k&=W9~$6j#UaY;s^%{lw9J z_@VW_68+|*Pd0z|pa1jb|MlPhKTB@oZrA27{_IE7k0{i?_x`(^&%U~}dH0>~ZvOSZ z{O3(axAVAonTreyX8j`q_in?_oJe`HR)V~eTXg zkxkjhwMR;dl+5EMBizBHjCvNoLg*iU{?X=t{D1#Vmv?q4U40c#+GMaXc%lt4{$Op6 za7A^dO*C@*QdY6Bxz@($uYU7!V?(ERYDZi;E;V5qt>V-aUMDwibcX_-BLm6Lb6s|) z4>-Y;x+29z;Hn=UT$lJ#l<)4;)AdpO9;vU8H`!a)?^wyY7hx&s&gLo$E?k=#8!4#q z(|6u_JBz`)BYVlFPd@#)vFp@WQc^;=bno4F-mS0M{EU*jY^bhF`%WA`9Sxig>TZn` z{45+t5fUxb)3h8u6stD;9>v%C#DOgS9>i~GMxnMp#T%C%AAkH&8yK&Rd}VQN^WkL* zDzb(PHA-y?6aDm7$~=n16Q{ZlrFR$WQnG1de8x2zIjJwAm&sc@am}ggOVe*sA|tFi8TW1O;)kc^lw?Qz>|O|NSM*Ku z74r*z-%oYgE!R>R!o;hnqH=TvkeZ)eUB-gtu`G{2yWdiqoWs+|N-P9zE8@_0eI9=^ZccO>TW?%zQzib>XXd^XJQ+N^ zZ=JGS0yQT0uFN?V(U^cR_a80;BlWz0zf-i~d&CBJ<(ac^O}BYoZ?(Lw&dM-XdzrEy zFOk7>a-(|bH%=bjxqB;FmTz|QHoPu;k&B|*!l|PJ&81zUd^n!|&fD)Ndorl0zv`D{ z!qeRhgN{WbCmxnD$Gzmm)lNH{IeUIwCG8HGedprc<~3=42w}pee;Od9M*3~T1oqgc z23(vp6`91_cd-WMrNAC=g#@<@Go{ne)39~^5?Y+@lm(`aOUOw-&)`l1fMuCKz4%OI z@jXW1c^JHdu}y{$qlld3J%hhf&(e%g2WfB~OuZG6pbNKwS%Z#}vfv$I1)Mlczk{Aa zOERHw8s*i9LuGDv`xdhO4KsAz=D{rRMB6Qq2aTmDQI7hF7rn|^uQ7uXg(-6qXvc7_ z7fowck8RGCUtP6Hf5pSw?)~5h09^cR<9D8+uv~nIsov3LDXFw?kqP8c+^9$2iI2$_ zPJ-6ri11rU&}6Wm(*BsR*FFp9{tnZvIw)D5q22rrQo$RpoqVHgRj5M3b7%m}AQBE! zxTFOLo3#DZvDSM5;rM?_H}qe5l%y`&W;Qv%>v@5xG(4tlB_Eic7QCT)k3y!8%box)c9fJKPF4-x}Rvfx3k&-4+n z$|9j8JLS(rLR;^Eo${qC3>`|p`hEfu9k%5JW#Y8`;56yCkA>~V8cp^Fu+{S9mB(Ne z9K2-`QosDxdYdA)c!S7L-@u~4Es^b6D4sHY9=vD|I8+9llpupQ|FzxJ)yp<^{RlSz zx4j=vO}Qef|F|yEF3LxDFyN-W`nN-~9eGBprk z@}mF5ZShS#PT0T;s#hCl-D~92A>|_QrMIsq6hz8 zrO=%^fscmZw^*M|k>F{H5)>9pG+(&_w{ZLxAbf#EyLsqOZ3@7^2c!QZKG05k4jmSR zZ{=YilsVN@2ANR6@XqiX`u=H}Esj%8GH+!5wr;|vPW>X2^7WA({rT2ykJsuZ#@{Js zNEK{6A$@pEKW!p0pKzNn$WE>uxV5_sBg=~OI{<{?()pRP*7(#r7=ypG$^o-4KHp|u zJW;Tmc+`{V0a!l2@)ouxY1{Y0q}P4}vjmH#^Y-l@besCA?*LoLw*{z1Y3I+Bz2q}m z{1NV_AAh=e&_tGEiofmqO@1zCu|2LrqT7iUsxMw#sq3~FZn5QZC>FpY6Ebpgs5nRR9V=y>z*6{m^8kzR=`SVYA8}uc!j~;Lp=+VBlwHeGOc@mE z`%?10mcQ>WFJDZ`6u?5VT(zEP#yq>~~C54OT zj#al&ayLojg5>}U=q7uQZsw{WJZpgz^G`Ov{{0t)XQfuX@YVZmBADPJp#`lALa${J zNHSge`17%Pxt@~sWqs6Q{X{;jDJ6X^&MBC=%XrWNf-+5ASn;zmq-?~9++*0Jm<3RE zU=9AHe2#8XjyuXs$$X@^C(-j>3lOdW_BT1zx<_gwmrC7BS^nm}^P4@-xuWUVVl?Js z*TtlJ_FipM;mzqow^Fd^OBQo)zx7rS7JQTBV_C0RP@TytW>eSa3N77LX0ENO4H0Z_#Uwxgw# z4fo;;v^~BH&#_Q)czxeqtQUG;1Y!>*JuUu*2a=(R6mNW6Rw}_{t*QaJYA+z+~N*)&>J?nSmAYQeJM9F@jvcY*)-=)ZA zDP+MfEgVOFg+G@7K0%rKS!hv|kZI~ZbL1ZM9Sbyd)E6o3D{>ZTv#C^>l*eOjR@wZ% zD;I58f=TN`$u&y+p4Ed9h_WmUjiozBj^!Cz#H;w6VhP^< zsEzS4WuB_tsb62{bi+@7k|kr;H{cEZZ4^#z5S*yL9Iq}mwa7XOjb|x2xO~!|Db{^P z*;ILy9O{2KS0NPTE5 z{7=Ejx-!||0!utfMxIY$h!^I5h~CkKrLc`3-=Vn*7H(7RWY)w#eE++f3s-I=cPOUv z;a=EMf@V>=tJ4DU;LGsUj}IP7mbTDdbE*1muCoqgO?x>j@c;Ddk2V*txT~P~Q}m_u zB%98qFr+ATl7eit5lazq`sDldKbE@trhnSdz@N*9{EN98V;C*^k!zGftOsv@Lot!N ztCUMDn6bJ|r66x?KL7gp#)F5WkfmHcViThJ-(hVXJt>~_Lv8O4h{!TH z>odlN^I5|iYah2E$2#zG3PmS1C>Xi&ksn{;A8pPu{OlBljXD;}wU(0|aJO;y@_DXkZeHshA16zL_o()J)FunLp}%osaW-pP?BKqa zUDvP0-%jczN296!FnQxSCp?ni(~r$h)`pVt!6pj$eX0k#Ra`uh;;cEyY#7zXO@|nDa9~#+kAd}>0H4aCN2rk7r8Eh1oO@?5gA=$;E`AG_|JsC7z&dtSV&Ce(S zHLiKCF~FyhV8%Ch(y(Ucy36Sf@*mt;>-T&1wX<#3AKU!;lj-J$5SH(7G{NGGnE8=Pp>tSL2AjbFI0AvGq7v zpSu@0A&Wl#<(KBCDdKBD<07|%C)%jv9tkgyb4|+YH%q3Xoj!5<#*NMAfA~1~ue$%| ztu`578<^xRIxe$nRolerk3z|nYu zZ*_Bee0S_13-{*2$r$sEn^(VSo^0dJhC%b+t=!cg%%hgun`A_NS22|Typ|E#u-@8ShCcfWzYIj1|5ndWn7XNxwn$x zM>n5+^6{E))TRgP!`3nf!_h{Q6Kwd83oVL!^OUc zK}zuc1V%C6!h)dBB=%6i4&rh`Szb}${OzamEF#xNM41O)^_YaKI_TH?jxulxne9k< z7tl#wy7?{ZJPnO>GMU3Qgw)n&wS{L3JjnGJKO}v6l)IF|wg9!o4qNDMuN+`9YklcT>ha4JhrlI z^22c^I0lVAP~72v?=zAvUZ1v7?(n9B%R*zx$?)^9&AQ=bFd#2~G&BK4jZ}!`-^P)j zN^S6+igxLpctNWb)YIe(9l(Zs-|_=JeA2JVn#nUMf;bffXXVp>CaXR*@Owv>6v6{v zB>cC$QpSao;szZ!7C*e8JT&h+^(-x#qosW0bgv@=D&O;bD|hNV@ueAAAvuyLYd}Tll*WlpjNvZeGi;`S(72ugUL?(sfL>MTfESan?!eNP2#~ zvjm#sn9=)0lf!HL%Lbe^IafCq@_~M?MYd)+ouU>T;gs(#Qsh5yj=V>o8DB?EQdWgu zWx3qZVvB-6J3IExis$ald$Z{N`LBPydH-;T4WxM{+l)iULU27i#m@+Q;T5Ab{3yxpRr*SgW&-N zocKoP7UlEL?`=N(-iIkD`PO79A1x`@o9O@c<4;C;&mD_TOKS}j++&+BE?la89WQ;j z`A+VASm1ws>B8oxfA*8jmlrO^ODQADp)XkE+^$~g&!_IlWIR}kM=c(Xpj!(K3pN*% zVH~`pYu~Ur6aTQP)pvI1YT#+~8U;lVT=WSC3cjQH*{8r_83h(ga5#_`WSs@QqwSjq z%L;E4)X{2`N8uCL3SB&h_VQcMN{4<#(axsNQdC&LayPQv_PlHk|L}2rX0*A%)5qF$ z8bx%t>T{7}K6r?-*P?WIFIqkiXNsw1X>WtT@%)t@&zRe2Bg(V8@v?XMEwo4Z(fA41 zii&pII1C!%lorbs%Nm(kN+sb&E)7!QLlNO)VP7G$F@)!}Gaxnz$T)n{5_A+tEDI?- zEeOYwF9pfNM|W!nO2_C`Ut_h#)kA2HH6~aTZ2UTZ`rBV+MgCRtV_CVABjhjz@Rd%f zT)1>?@ESKBDOZ#+%C-8l`rmJJi94kOSq9Fr_?mZ=gWZ{+Kz#5bOUGB^Zb2K*2#rK@ zx#pex+7DkA59d9V{WR-1_X_N65Z%t=@E5=RX!Fyb{*&qR>hq}k9c)7Wbr)p*vWqzy zpAY~5KmbWZK~ym>{r>Z4d|>nOA3m$ScW?guhaZgVEHY7_VMYEZWu7vOE013+#lwt~ z#ar0_^Z)WMHZT6+mz$42`?4%4GNP@zjGLp{=g5Ih2Sw*Zd9rpXv0s1lt+CoAGhAZ% zZ~j03ZgVTS?!E!?Zg1AqWTA9FGv z%DFeP0>*><9y?((n^9y;-v=oYY!txzej6yt2nZ}s7D%o8@W&6{2S*s|MV~Zys{sRJE4;&o;4>qGh?2%$EQzOWrxp;6!!SZhcf+H z%CviJy0W%@84l#a-mHjE+eE4T@!a0Vyu;CsW$B~%o2)w(PWZtbjLdU7%)EA#W$_W& zjz0X$J7uAbeaLePdO4G^3oQ!BE1f>XNBX>RvzPf$8WQ8rWC&O7Zftv+e~PgXkM4U@fZ}xuZ{rG?zz74+ zJFQ?7kYfB~8=^;YLt(z*zLz~2n%r%iM&rB9VYLCPX>I7V3jgE!Hg{52I6$vp|zX>05;hThNh4nC!fK5+1Szs>FHZ^raTPjZRa{LOq4P_B+o zpFE$k|F!x(S9~_zRudhmPki^Cx7!SSZS&QmuSdR;7utN*hUM2T=eu9#ZnExr{cz?r z_#+@Qc1QSUh#x=BT&8)@%mV^6YwFs6#sc9ngE@$G0;2`|fwp*wWi%c$kA&yoP`tU0 zv`*FTiMMt)-t8-F!lRk->PhSRv4(GbV_daCGp8DgzjotlG9lxO(jCmtyU%T|#Spwd z4{Z#E6Mow^Z+u}4Om4)Fj5fqmXmb~I{@fdF3MRXE!BL&{n|QZ4xIYRWxgyWn^s)xh z?^9scPJ3-45AEs)$-oL)eWN@!Yxkki3-iNu`tZ@q42T*t&Lt!DL+;SPi>a#fk>>E7 zvZ+y&YaV(u`Fp$#96rg-+nmyz@u#|SiI~2+%2rb>XHAW1ruS?KVyZF?CuW;Yr_!h< z$2)^E2=hg?A)Vg=(yv*q??jfT@E%osf-sDz6)hszF@|s)IGo#97rqeRtQxN3ASLB4^C2p-3Q~(0)Mn{BE%Or@nF`lQw%r>eDLP8%`a{E zz(~k~3x-TPa8{!a)^&z8*_EX5p&P|KIORbbzku!36Sx>}iXQyRC%(_PLTJF50L2X+ z;WP1N9CW>`c9v&SRmKuT;`%C6UK#m)u7fs$6(zn8#;udI(o(aONAT<+;h-(1_QPls zTi$KH$-hmjoZd&dq!W7gk;U6oa7$nL2gbH8(dt{em0@?na5f^)1%UrkaessX^nf2^ zz4TUH{@9M%W}9ZBcV5C7u7+f&NTh!9@08Ue{@tnjjt~{B+LN?jWeJ;r`IVqAG^H~e zC;>s~Bg$vs%$vGP;6J>IR6ua2{|+FFpAyu`2M*lBSI{3n75M)r>(06^Kd$?}A2iVO zfS!OxV{8DN08%6+Q4=jIjt#!@CaPsr4 z-<10BfQ*SJbG#e=MUVF>^g=5|)fFw#WyN@&p}UXp!+UvjJ!O$b;f9w}ZlIEU$;}kF zhz7aQT9i4s6g4qWZtII)`AazI5w?m~=Ti=ky_LRv4Fc+r=eG`Wvg)_d!(_=q$|A*h zZjl2X_>~6URiz`l(we%6hw*$YeZ`&d1()A^yY|s0;y=olJ_L$y!}K$G;H~=WGxa#Y zZ~092=;-pQWNJ?|_i4xdHz5hVRG?L4$JQC&(DdM4FDtxnf@3-$aXkx|EBswq; zxcl*^7dHE1u`P|y1qL2JoV9G$z6bXP1huj++p)B4(m?)9-tGk$Uv5%vAj#_Ya04Eb zypru%6SEdiuIC5zejsy#SRIo6EPI?{y&f3y@yDO4 z<<0r5F4g(5CNT!PCV~K(XY=z6*f2>pp;mt^QN#hNe0=WG=KW7D%tYzEkIv^Svk6zO zU;x%vdjEMA(A>&svHZE;Z1Q9hakGht0Xp!9e^=Igz-HBYEQ|N6O_~i}55;$rSu$pj zt%c2mEFR6mP&{(a;^$Ie3V!WxU}@6N3ZC_eHe~<~l(|~nJdXDU+yjsXB(w6kaPe~B z$gKv~Uk)Gx@Vyei0hBOU<^vPZHwU#A=1vo;n*l|31vn__8epmXSgQbI0GxPb0?nFw zEn?-DJ7DCwtikx4ex~$*@nn0-Sm9aZ1M~(|C=V9eCPkO7TwW8!>fz1T&x~sq);YFi zei2wjvJKF$HhDm+{BH&rqQ~dMk3Q~z*6KK#_672S_x_rk zC*bn8efn6|NOEcc!Z zxc}xi&TNimIsQ(0vG$|~fUyIDRbH05z$CoThM#THKvkHaI-C(0W}*Qw9)O@pp(bup z@gV-w8>~9%hyg#+&%lX&wF6+*^jFoZ$s67kwFN7ay|n{@`~w8MSr%Sd$d#T+47Vmz z#@fX!Oq4fJf-YdKW)iHu0F41G+K+AZ(h&~euB;B{nnIPA1p)HO`i?uJhs9$ZKNG#^ zH5r|WU=*47sbBI7{8Mm?7(f@hL&z)(Ga!!1t8$A22&YrDg`?>v)^pnU@UeFQf*lKd zvMX=Dcw>y_9{uE%Oq$r!r+~P59G_ar~9oDtd zahT$@^ednEduo$_QRBq{tP2N#(T4#hFLwYVel3u!(LnUk8Nd7B!(=oUQUM^xodXe1 z&}TA>Kdemg((WC=E^9x#ZTQN4CP2wp$JoM_d;BI3J{CmOHN642;3+w9GWiBFIC$<#R>1%I|Nh&}FW>tpVD`C?LdG}K07-*h3LgY0Zsh&>kBQC-AE>u{sDSaC((R4OKnm%ZKJxE$r=ak zN+v8)*Ow)$+B6v@L(e~da)4fKjURhf@)l1bX>=CZu}jC!6$ksC&xOpV=g&_%baF}6 z|83na1ndBME~jHZ{^XP7@<dJ=Mjz;7@L^wRa_x07n#h`VuDnGY z9Ts7;NTm$MGWu5apuS&u>BY^De(=4`_uhJ|#gc>LHs>2}ygvQzFW-5$1yJoY(5yE5 zpvC9=)yDu+C1$ZmIao}SCA+SS%?*pSJF*p=w!Q) z?k20`fJHVd*$OdWwI~C8MLS)nFA)y_sv?XV^x?o*`q|-n7Ufcbqo?Ti0R*cXZP#bU z)O1w$JszE;&ge#MPVWMI{fD3ZY!>vj^kqu+if*JSx*`NOD z=CA(wUu@32_R7eBdLnbc-Pd1zJ^ciyF-B|LQGaO;H6VOMsbjL`y+P~rN%{l&Tj6>> zIyOGJ#V=zRV>9w-o}n&$lpk-UMd$L7uH5sHROL{g>PXuJG#fWr{MNqdNcCb|Dl@-! zxrnC?zwpB8&Fjg}bLk?w#7CYxbOOFD0KK1{|K!t8XE)m^=f?!1+A*VFBj3iT@)z@I z4#=by+zq{0wU~?5cBc;FSQViR#;?wn(4QFt;xidjN7@y6 zbRG`ipF|mtXcyYCx>UEan!0zp#Jd!rKbMP|PQ_nt|yP0@T-1OtxzFo8;G2)rSkCPSeEIJBkXAdHbZLZX3 z8Z$|kOjwXIUbCoT?5NL@KO+HkH~l95^w5GDd8EVm4HpofX2Cvrwg7-Yf4=}X^Ua<~ z6Ti`puks`h!iY0tq-dJ$$U^*0zixGn%U2=o`9L-O-Z{ zseR?e*OISS+C}=(v>$G4l>f^X5G{=5mP#$9f~q)*Xfh`)aYIKMWi_YmJCq2Y;9wqx z`<)c$s*t!yY4j_W4n!Km1PY+kJo46OHO4Ml<(jm2o)>Oesjl>v1j|v(#iYr%M>A=s zICf~>T2uCY>GoX(*_(AOO zQcnMbj)^L)$DaGscj))hi6&dWbKqs#N&l;om% zC+S|y|4GF45VXx}av91e-<7ubJN)T#%GQsvNPi_HqlxWTd&IbDd(kuLpfhE(2I4D^ zrdAT1xD$VSIob6bX_J(+r+JiH&pcjjw&#*}-@oD4_HI4xMfY-BtSi2>Bs}r9iA{VV z7v=SQJ1MR-MJoXu^+p<4yly=3=?&#cmIKX3zQ~qv(%t4evO0Cs0IbdoTjoQS;?LwD zjme{zXf54d^qoI>W~mcN`A*rUU@}={T%fu*wF`Ar{NWAyw3SJy&=@!JxMXFnCyO+v zyy>HUm1}@%@uwVE2LMc`1#Ayjl&8TrL@H9tz^C$ zKI3a2MEH)Nc(zT$fQD`a#7fk6`}w;5L~W61blaq{0^rMzUZ&gTBa5NIPxK;Wo|Uxn zxk2ybE8!j~n`)c)lggA~Cmq{(4Tfgt76Qp`VJFX#E#(#u4cj`aO!%lr%jb|F7Ymr1{C`ApR47+EO&k)dsdZYP!gRL@g4c-PnU z(7v*sfb(eTq?AW)B+xg$T@1)zk;$hZ%PW@E-+t}%3}k-t>yJ0zdMVK5NZX;aAbah_ z=VzdPsIB$t-Jr?$L4z1Rl*f%p14veRCW3&V`wbfTm3y4!3xH;}IMy!i-fIAtJfF`} z#x}*8#sKJ70>DS-NB^xXSjKl{6Bpq2p#aYJ&Ry7ioV6stdvDgaOGf%)#w69Dx;~uM2*3pR zPmg_e@mdyF?+kbri^?mj$0o+Cj#wVvw4MLy?SR+B*jS*vRW#7rp#ON2Bp|3s!m5vl z9h!P_;$I3F634)v{yntk07;oav-$vj-i+@+5fdH&uZh+dm$HuU-H{GFd-R}zY;GdB zvcQK!Et~+(l;W#v zbxad}z!IR9{0wkfIFsVLv*-~(`sBcDujMD+7UYs^5WVQySH0efukU~O(dLD$h+cc; zr9hx7n@>ME*TH<}0@DtSk8*w4jV4cz0`}n{U1G z#>kEO)Ak(fX))+hpy!#_&Qw?RAx~IgJ*iCjly73gsutf(j4cugLkr%>I`l?B39DUz zp|Kyo{?dNZ_dYqS-!@U?pVSs^ex>Ofx|ntOzJO{VDAmBq6`(SDDVe-i{hFxEft~7a z#)V}ZZOT3qx2;w(F&mJ0!liTKlePySUSOYor%7QlIg_{IcRTuHdZhSif{hMxx&Rfb zaOp|L3Va}2OfcC3BAaNDgD(R^T>y6I_)?j8CVl2afNB=u+C8@)J({lxorMewPb{kn zy!ot7c@XV`%ziAGL~EFCzOMHJwHPC1&7d*FmN z#7FoOz+fM8h^GKa;3qvkzL%XXP+H{DWcE;QPvoh`waxsak3UNfwOgU~#;W*6`o|d! zctY>7^wkyL3RE=7e%t~Wo|ElopXT4zViwms#kF<-!Le%(tk7zua-3OJ+bzFOTe{sB z-~9G#x%fI(n{P1=5D{QtQQ>y&%pqloUUCk&*2M$w<83;M)#jO(UK+sS(+gjW1-&v{ z4Oq}#0LoVaP`>xgd~O5WTj1IsaCI|XXfgJu@4UD9_doeXdhXD;Bsem4bbNHD1&G;2 zP$d9o$I?;3mAyK;kYDJLWtE*J{=?fXZjqB{1?cP#=+}?jxOsIf8DsaP2ZS~b;2!42 z%_{-07HQ8;qZ}?_m&IhDlhg@naN0Wiia}OXrI_})r zPX?TSzQg)h(rQ9W{d7U73CAAIs@^_^>~;y!K>g3AMA7ViOF z2dZCsN*mJWa@}Djrfk4{qbG+3-eY~8oX-MV^w4>jtGxK;VCEB9+H2$H<9BLF4xsb4jpe{2CI>&*j4TWsqs z8F}*M;tw!a{phbB#drMVvv}&LH0Ufc>pi|WTypfYI!iw92Jpfl=(T_R*-rxpJAAy| zGFuK-)VNkU!ak5*`se-w`q&u&wbT0DQ!RSDc>2VQDZlyVnN4fBxrn*fcqq`YoRytV z=_|QAVfCXgCOb6KuHb-XnA1A@K6$(;eEH?w|Q}`D$zQUZbiw5cewc>_jnd z>xq8U_zivZfxZ)HXNSXJ2Jt{oFz1-mUhrw!aKF2&AHcIiLjjEzF`QGA4R^ zHuULqN*T4AF5`u*Bsu#6Ur83-?YXh#^{byxJ11k2o*u}}Nc>e_U$g*7ZeTs)&}Z6! zdV7%mym8}VW5097XKdo5Us&@w3oz@Mr(RXBa19TmLz_09lOCF9J{?)|9a)oK?bUfR z_v0lxfaUkf*N(}jd%nN%u5z%*SBG>F9XxZv(z2Mpo7Gx*-+yR6VOJbCYh@TAZ@lr1 z&EXEUwBzdDy{ntEpL{qA3>N$G%7Ll-_a9C_WH=Koj@AL{e4R!dghiBhU5!bcV%t3-1gdhPL?3 z+W&ZqJ~wLz*Dg2DJG9QDF+R4Kc08H4cmgcn*UmO=X%8#%+VJ7}xi4}(G(5`q>8a{V zDf(!b5jC|}Q;m=XiuNiAna710puo-0xf(4dO(`dwLKTiwNmGHkEabYMTJMc#O9F(N zFzb(y;`>VoB`xl7ahrelqRh)hiu;i`zvZ@apUVQEBdVfv03(^Bviovcf%;Jn9RWr0 zPd}Q^&a+9^9~t&v$gxlqClU>YCrxxz*)es(t|qwBogBA$kK&vM#oD1uoIzTOI{A24 zf~CFO)VN%HQnrz6Azh#YUtHGFDbIitg)^x^)?)q&zv_jmo)?m<5Qf2ir8A`|Udp9` z&+u(hiKp@@>;MZ}j<5Iyn~M^gx^pX?a+79sYD5%DEw}3}UDBO+g_L&x@ox_KMAxEz z(s4Jp!ff+Ei)jAr0L2TKnS^>FpLIRS$E#69WDR|ICx3aNv7h0Uk^VgI$pCDXBf93j z6ve9nPc%^)7!3eL8OlgpG!|~?+tNEc@J(dfU*{vzL|jo^N7F?2X1!Xpj{NRK-oEIEMd;A!)g9#wY}XPeICtx*0`=&pC#N0*ZDh@7tWQ5}yS?S-poE>yP!1)` zin~YAKo`k3TIR{N%sp2o;xbxQ5)(mNTy1pfRvGH>hHg3pKt6mGv3qp)cIce^danJA zep-Xj6~31v%XJB*GeCPCN>gtyR+@y-JQKHs`Y+6UM@Hn(r!Z@PFF%3M-tP>S`bCg@ z3{)h7?j5}+oFI~s7FyHGQx_xJ-uIodDrqScc;sIA(RC77DNObTLz8~*MXyXNo1VwV z$!z5?c|IvWeaGZo`1#Ot!>r<#_Qdz=VtX&#|X^1G^k|i{fl`S1p_CW3iNl!P(`s~t$1|+48Y8t zCehj)k$A8jWZrFZ_u?yO%KzCc4zIO^Gk`f@`Fda=SqG|<75@BKWILeJp_~^27rA#i z*7I+^{>tX{mrn)mZ30Oy=Bqq+8Kv)l&lAUwY`*{2H?k7D+<=jVYvqjxckV_{VCxUQ z_uU5nx3fsRmUUc~o5Sa{RP}l>dJYCuefQgE=Ag(AvN|96Dn6i}Z)ZTcdKk;ry^jMx zj|F5&``!S~Gx!Yjk*@>A03k%zq-m#8)eq3qfc@^hhcjqo9ir?GxjE5fhf9*X=RTkM zGl3!3+AFI{X`2`vX;Ka#!~eT?+P>MASLp)qSyj_%KoeFRr<=SRe`!>h@*>djLN=fFpbZ+rmDUV5LuXO+;Ax;?Lf_ z0RhPaQ0~g5%QM-$e=qkP0rG$a^vf3@cPqdl;cd|*OQp(oJ&m6{vNqi9MhWn8IrAAK&@S=H+~sE6=6eShm_y4d^*};^ffm zK)u5OT^68hnZI%C{*-sN)~AoT+nB|T_K$u=*g=TKRg%MB!*uw&BfZ*v+EWXq(+Ae;F&)6zqwzSvQ z2e^qJCy(bt8&DUG`)h{>U&s%sMpy@PGmB8JJII6nj{d_NyAdAT-#t9s6DT!{XO(H` zmx$~G*wOcp9qnDE8!Ia2Ph*#T z;3Qwo6=)KY3Eyj?`0US%~NqoV#ySdBwDj7`JvmpPkBWu%NM2~W+PvA4Xt8b(KU(6N5 z>u2l`J2^Z&*#g8L{m~z8E}r|O9bf@#$=AI*Ep{}YvY>u97b&+cv^zw(YC7uZNMj`9 zHWt1B%*1f`cBOX7^$yzUYll=mYSBSkn?5Ldv>R~;Gt##E;F)@+u_JfnP3s@MO4C-J zUFLedqbK+6rlT5j#BaL-EwXa|g12f;yV5rS#p!s47sJ$yHg5)4n_qELbFK1`E$vTx zvQR(%t*e{;EoxifF;3!V`|0>_r5zv-({;|<0Z=-eSX~3K_wH$2oz*edT4YPRbN+xs z&@GTWZehsckX?jN?P}Z+t-5LXIgrsf;8*W{FzKFp;l<5g{P-`^ReMKg=-Z5Q`SUmC z(Jy@#pn3Mw-?cz@eDk9}`ipeH;qhVq;Nk5KSpB&1M?S{mgXt<+8@^UXJIC;qdqz6O zi*D6I{JN$F*1V@WtEw6sy;L8O&vG(ReQF05jb?GXG#=DmSm=`(pzdS3RHmg%UUgs( zt3L~87P07Ud5kelFK3>zmFHP#XM{H69eSe)Pw$4liPvp<%^|(UCaw!ThCvUa(OE&y zpL(??`o`@zBpS6v_4_C+k3al7_3zr?;dHIE9nAgwi8se$-$J)CUTe&#-8jIRERx?j zn71*oIS=_`#DTBcne^PxgYvhlmjCe4OLi^w(PpKiZ2_M{*1iEg#q(l`ysjkk7Pl7rvXzYdqT;S4RFu8-%B9KQ~9nCj@;(wO?L zt{F4jnEG!=!Jgw&#%Bg_m=aba^VteZiiDC}Kt+u^7744ihxo6BN(0A~d9_WUc>VkR z$HdN^Qe)=al>R&!VWeQzP^e(i?%xnDnF4NqdL@5}c50CbcI>5C_QmkUMs9?JQ|=Y#>`~0dyPQ816~xS7ecON(LIlP zzvaa+VJEHMQ!eJvqAZh!bhn@JKOQ*)hA;G^n2Xk=X&b6MQdSy__{!IxZSl~%4c0S4 z{&g8An=PTz6mDA&$~8pAi-`{KOEKuM7wO_jH@WDu>U&FriY}19V=X>OM@J7lTWJf@ zUFq$}-@2Em2;RBukGc}BAMpnmZ{Q)G1~N~3ITBnr$<8E081F^&>BYqF@6a-3=?OMU zTSqqet@n5({fWJ$bL6041A@w^_pxl^`T1IQT~B)AtCNCE9W4sPn@`Vn)>?+Y>QuN0 zgsJ_$zPcVtl&F8n$hsd23WxVmkpJJbkcjhL0NToG<%vqK(6psNypcI+Ea?E!EgpF{ zQ6~2(w+HHEUaWMc4$h~oANo80)ZrAAJjkW@Q&v|+DQz@p2kXy-=-NYxie5;8w{<3;ulZA* z$!Ee%plCEuiT05ZFSPOKnq3g+FWqSyuFHJnddf zSCeMJCtqPJ&*H7L5bv{6UP-C%J`-q#STf>~@_OdC(6ZeoF}y&3Q5S!=eBCl+S&;XZ zyviav(7qPQSN;;5sOm!e|oveg+9Z?J2xt~+O7$ZY?s;RxfC&=+7mAi9bT3y z_ZoDe6ELf+w*w)bT)W-GCzllYCcJs&;(!=U)0=3rcG={fBw%Ov{jnOp-=LlK>_?wn zYrxqA#kQE*hrzvxpKbKt_~z@mxZp!H06kEEk4aYDER~)Mi1@o-{yHm`3!Cq}d8UDV z1Lp%@Wz}|mESdoZKrl?^R>Xmr=w-poH3gROaS0$`JrjmIlNI38cLH|);H_($ci;Q4 z@&mf_dmZSBHx8Fn=3Tp<9U$XY17jA%(}$FX@ry0){B}Fv7f^7#L8GnFYwL9upjiYu z;Eu}~2e8@7elZ`#*K-4NsIIT4VL6sS3cSt8m*&7IdGO)nmqf?mbhs6yG z8k43yf#%n$>qnhje=#=*2FCXv>0fNzr}zrbDtZM3Sth>sn_uVu{#28Z1Dg*%{9qP>{`dz!oN!O< zN{sp9+0yI)yEoo^WAm$D{W|_Rge=S0$`7o$o@@gi-e^&W`~a8G_={itHWvn$H{X8! z)!9vOF&8~AcA%#Ps4Gp1_(4CC_1~xGKg-ho=;nLh`-5>8akPVP<6=?V)$Pkq%6kDeXQtpxDVL9uA1yI8*Q(G0|R(1fc7Q}K5G^(;`# z1d1g(AdU`P3qkd-)i>>qe*g$=6hOqs>8|_^FOWRS2y667RbPb{z;N}%9fsGy2{fT& zf#a<7=&U(PT^y3;3@$HR2n!>sY>A%CP_ z2HjYkS^y0BrZ>@8lC#JFd>!AmS+L{tzW5K^Qx|9jwxCZvXj{q&05Iz@iM2rSQuTYX zEyyOVM_M>}sy4j9^2%O1!qHX8v2*58XFizRJ{KU%<$?tuDbBX%z50Z91Jt?LnV}l`eUj6^n<|0-kUfBZuG#T`(^c-kntsd#E>&0VPYGF)!*}JX7SUPll-hvqSU{4y991S>PVF}2z>%&4&jEY}@ zcaQb-{weRpKvBN!$>YufD)9R8-4^v?t_90IyN}NT zBEHaLshe5cp#NCMlDRW2ioW>#^P5it?3b$vZdE!EE}HPvf(m{gj%V~1zTIXq9{=qa zVUfw=5~$5}41MDo?G{T9)bG`;^{lZ<@-btQ9*`9a5_E%w(gwH*g@t2osyd4xp0Vh6 z05*NKH*n~yfHL99=jfqi1-MO5+C`@y753m^{@~m7mma1Z0C41RbSa%&`*%$Td|7x? z1Uq47yigjCI&|_W=LHp(`vPO>p>bp^)T;LUbnjUQul_{(>4)kyA}3F6TB91h9DM^c z$2+{SQ1Un{+6RH5@BI3=9ca4xD7tsY0#)4W$#SBb!NaM|pZwu>H{X2w?ai5QDlP86V%TzWPf3@{eyG*Y^M6onK9Tp>bF8N|x+u5;oU1QiauIAFaPu2cdnBT9R0A?*-9Br&7IkIOib+a81lprJWge^juG8 zojCT~^a<7AfX&V%*_(C#{oMhH>5J9pbQVuK#bS7(usLz^+1gT8&6QD3+J^p59V+Xw z(l+L`GmH@dql}ART$lrX^{JO@%i1ZR{&@N82aQ#@fHGF2r?rzi)fWKQSwGtNpZv)` zi|%JPAH4tD#Q#xgo^5fg-96Dh=j25k@O9>4)q&k>bZVFL9)0*npPFa%v)YAH6o`Lw z`h?~Bif0#-u_-ro*T-#3L8~DP9rb2F?($xrtiuywW0bGP2*(?l^|tvBKI+%hw|2ee z%!>|-8xEc}wxVnBc;q7a(07G@Tt z(%416jBj)k{j)!}nPa$7+?QT{Wfm+La1eO@+BK)Ip2|H?WBddAawFBE3|D*l6SDS9 z0RZl3ed(1~HOf$UEYI-P^(GD5MfxkNc&drd|ru_0)U97Nf)!Y}L1F zr&yw9(sMr|I?vVwuFzA<>$n z8BU#+`NGvvAuY77RHmFN2!-4D_3k2%g7-#Q*EgDKY;#~;yg}0dp@L+Y_f-?-T1U^t zH<6VFexyjyE}bD!{?Rd=g9pkpIjvt2cgh&w2ZWh;JM|!6{3%>~QU+0mo}PLxg_UM? zTi6LR_2_ZoWmlx>pn5;yCtr{8NEn&wwECS8!j)2GGSFXEo@7PY@J6Veu#-XWq_@5P zJ%N)P5Y5LpXXIWPrcL%-M@#-Dh3@-5?a(Fq)m0w~e=W{Pay^sZ@1L#k3apIlub3;% z83awes1I-bRu*4;h~`2TU7jw=uV0tzk8~7#C*|%HMmm$0Gz_rZ`-g{~^;aTA^?v2C z5_XTi3D^5MYGRywjnkMKiDS$bN&@r`~#=YXC+AP&axNt$nR6Ls8ZS`Fnq^C zanc1l0Q2Ix_)GUM$|NF2A8lQ|mf!HZGUe0}DSPkd-S&6NHJQt4n-_gQeXBI1f8<&`^|B3v zvbh&euvI=jXg|Afc`W)~e(u?}qngm}4+OulId|#i=G836uV?vUVd10hxr^9;AmFsL zSR%jpTp(mTdC(&!GK{mrp0}ZS!mp%Bl(#Wb+~a*bQtA zSnoF|`Y3?=+MOoPwO<20RvRXk``T*v{K@0Fu{bpofTee{6l&lG=sfaDmeEacO;nZb z#EFv~{`YynOyFf8&haL+CEtYM!GNW#^1iGL7qi+qn2Um!cegFN`wO`iv9149z|_I4 z>zhW8y9^5n`c)P~C%8Tdu>Z1h16<#H^R)pmZf4bc@yZtgQwtzhKeoEF&H{RcEPs%ta`KT1D03-z%P>=hqrxx@p8O)It#n}SvI*b8FlE8MFH-w zj%7K1p{@Na>W#7vSNA4kz&>?~XLnopFc@b!$cp%J);kt()Cn+_HLin5Z;^#~rMxem zKGkHeLslE~bN4_-ZRI|ZPsk5H`e<{qYm<_XK0Z63+u_4cW#xAwi>~VdFo!p9243&o zZDRH?3(3!`i-(*4`Cq-zgz`>)jlZatv*@etSlzLn;U0qQ`Z(Z@+yJv|lYX{^fTc?u z?i7HRZUjpI;CpYkQ1NmueJ(`vat&~{yd3@}O};GOc=hGV*L7B)K!&5yfA(w^yGB&CeD@B#BQvZlP15rn%Dy9uC~ynO^myrN-+Q?*9+kPCVrjHunxW3fr_o=|gC&iVf z@xVIZFP7+|l0%C!PbZ%yn}BYjIbcco(!u%#kh-4UM>sTUL&i)N;#l*kAN(PzK2%u1jIkV=SpD1OVPS4Q{jBjbecR-+u#W@Z=|JI)UA1Gq z3*rt0p1hh10f(0HVGl&qsCFf*+VRv^^wQncV_VN0C@jQ;8` z|Gdd|a?oV>^9$!UfA{luX3>*HvcqX_1bAE?l!4U z&&l5c>A1s*$9A5)|A`$>wZHg(s>O#t_~x1P=AAjr^E>Hh3k`r%mhHd!-G>8Ay!%1= zs_+g)rV}1N$^tsy=G<10pHX%yvHx@#K$UEhA+D=Xk|O}zk{f!$ zL2l1>Xc_SAULZA|bD6^8(_#VlEUezkV#Y%fHg5s!(StyeafOf$76_2=IH1!43xI0o zb4Arof&3OhSlf?#i>_slZe;RKpo46J9V_Xpqo{#c0XyU4o*KV=!W zTD8MVS3VyAZ*SngwsN%{Jr-XdT)#hTBJYO-^dD9(R-EJs*mJT&;#dXGL57uw%#LMZ zx*Ln>4fT5K{>^cbGk%~;%g!nJ3@}X&YumteVf8(93b4`QfO0r&bao#^JIhiFq82^? z;xc-W9vSlumcp+~|_zh>Q)WT^rQLeUzVfmG{!=mqupRu94bQee09@tY?zt$HvIf`E>cv zBNnHWKaG~|UGf3oJ>H`7zDN1Czi~NP2*7%#MV6u#CEZ^i8LeEUsXJ*{sL_8si7(^A z$u5iPLCXH24=w`?)Se$a4!Fy0kOi-iWn+u_99HY2=b8v>)4S5wYYbU`QTyLHBjEA< z+(jh2zxvH@^0j|$Yb8qVxPQx^y8a;kz52@O&7c41hnsJ{n&07eCLTO-DthZTZe9vd zvyi^rwpsW;R9)*o(MLzxIpXY%6ZxvvPoGO?050)rWITRxVPpY?gdNVR8Mtdl1Rwnj zJqD;v9|6?4Md?AUF8LUs^81L&-2XJ*<>s-Z(nsz1NhUu(e{SUEt90|JlP!+7z~u1YtLVCDC1C{+w?ZHCmfQ!|SMSX1tfN30LY|fy8t`d(6EOgIUv3o8p0Hh@` za*dXI?cUT+AI+fU+}X3~kSo)lGwvYYr?Vu#)b2ZAzdo&(n|FWxkIDB3laG1c=~v#^ z{O|{VnyfvudH;96+1v~K{rIDI>kpg>P_e;I$jA| ztzE?4)sH3vm`7h8KYl758Ccu39Wmw!>cm36`Qw+R?d8*r-9%V2T3`?Tui*?%){{hf z?t$cGG&<$0WX2f7$9Uk#k&|8I9;fjtfO*x%YDqD9>QG-f;}1pwr4PHn_ZeTL`^IhC z(+*>;J;r>x{h8=H++u}!tAlwDR5x$G{Z>3l*Houh(zSM;UTCq89;IKnBOCf>-f66n z+nIo5ZmEFq_u5H%zj+p5UjIxN8<|DH)c5Xo%hAQgakIlUdD6ZYxL%pGhvcNuPjVZ8 zou1>`i(a#nVagb*$UZA_F1N^zcl+CAd>}m|4Iua{FTL8CSMOF92f*5GLvN?Y)6FNJ zI~||W!Rb(G(NSvKqUiWePal~x->Hv!_uXGK$2b>7gfB%jAX1fsxd__%RbkOKg!=TK8RfA`*3~LryAHc2--k8K2tNLudI`g za{T{3g`M!q*ncvCxcLe!E&QwGvPAEs=iNN^S8G~fwl#!L!>f@F`Qqu8@54vWR-BcG zJhur>aD3?j?IxVpJNfpWh34efY-?r!06+jqL_t(JVLT{pJes&_Sd@OzJIM;OuBUw7 zmBiF7c@)8eel`A4+rm%1Oad$YowiW8rP_PvBLRFY&W@Z&a&G6(qNyhneCnhCbGH-F z8_5iRyz8GbdpIG>5zV!8&li6SBa!kKegTLD8W6JQLM1-l?#RLFD0+ftV#y=g2V9(V z#Vg4YUfI`X7TN2C&Y}C^wV&+feBD1)ho%!*axahQD~q*ghn_8%J;3^okO+X3e*5CX)Egjk zULc}R2}ujkJYgr~md4T@Moh5cf1O@58!U)dN3@9XQXW0bHNez?>uqk6jvzid^2+{I zaisfumcxO9Sw0a5iCg z(w}UUvG`xRv`b{}Eh^<;ayL6+&v33{k zZ~*kxS@j=F(1@|}KmL8o`#wwFevbt{{zKN?+V-=DnmjfzM#Sy_pIZTz0|-X1ErB<( zJOmK4&IL#+paHZ=H46={BY=B+QL_BL8!&Go%i8Q>;I?-r0avrM`>*f3zd0TFIl7@T z-smv9kIq%jWb0A#=kQOH6^F&W`P%6wBKrcGmaj`dBLCB@?M@sEENxJH_I!t)Z70G2 z4pwMyo_Tp_`Q(#x1FYN+z`cCs#xxy3#|-c*tjP%4)RIB;p$3qEeg3CS)&M*A8bAVr zP6m7exO^W*BkL;w&Qw`{@QZK(1Py4>fp;gG5S?xjLA}wJCX^1)F>%K5oh_Ho1qfUV zoHOxaVWN&~pCy-k^nTfZn$@T+oB$OAW%We&k`04=vSNZ}BD?Djpyl!a+yyPo_?WfJ z=>P#%N)BO@2P<;m!=V8t+IAn{p)4kOXI_1+!!kdZ!8<_Qgbr|}y|Q!#fY2?#2^Lbf z;-&5MhZ^rJ{|||wbl5w z*Iw0J#;STx@_r$}gk=@Jyp;Qu@BhJfLdi_dZ#OZ^s;kMr3DCX#MBi*N$;8G4?PvhZ z(b@&eKik81`fMcPqXlmtAw|{_2vNaKY#ak9cp`b{MZ5lxLG`w zI|*e(|B2)uh{f8=*4j%=qR%&J1%jA#XcNF}`S6c#M*|l%HIMigP!SRQJnyPp+!6u9 zfFHK&Ta56TaHSzXK-aDK57e2lr-$3;Lg*s=VTFW$fU@;9`J!T0zhS~mW=zJ&?!4?* z{YnP_`LMV*fgeDwER*zc0Dp4JYKc5+M`DbB*8sW!gEskjZBhvUA!pjfSh<(KMFV=# z^HBu!yK8xF0hFNu?E{e4cM94+y}bNbBq|Fa)&!9pu0Evq$(Z7Z!&={sZR!9#ZO8Kg z1$T9}LHB?@KpolQGQc+XL&@i3mWLM6qK`X<*fx59d;pgpw;Zp$%!&^{3)l&~3~-gV zL&NNn_)YESi_3Ob)E=+%Gn*TS^f*w^L6j#?9IK2EYXjBiwhYfEHz#xDL{3?6u`*M3 zTbvK=O*d6;mZ5;RiZa{u0j^(Vp?$k8)nCQ42l0{B>Gj+NTxxOZljQMQx|XkeI@Io% zS=g-ozx~~}0s$Xye*NCZnS;4SwVw>m5tV~)1)d8Y-c{mC5JIDF_-?d96$|M)lmA>jP{ zSOOjnq#oT{d$JHL9}6oZBgvx=*|GSgPRa1huX;Y#gz@P}3qhNZ9H9juy~=&KoyqE zvkR(r&8>t50}F;HvePR zs_1j*ipP)F&ZoPz_i2ljNvpG?$2gtd!E0cg#Vzz$q*i`?fW-xi4VNzGLakqJAKWVw zU%u+k;yT*#3-5vDkJI;~Bbyg+!LzHp?zhw8a`K^Ucns*KH_&=Gxwg>3C4hb#0n+21 z%?K60EMgq2jiGrhwiPV-)L_udiudgK&&SI3wO6@SN`K58Ce7EigP*1Iq=f}Mu%Jix z%v=Z%7abO<=^UW9L;oJN^A1nMx2UEK7~H8};U|7!_q8~)#~2`)q!;z^tk%idm*uy| z!WY1+{FPl?I*Ke_&u{V0xPTS;(4GF7#nJ$8bH)ME$SK_m+zR9}CV2O~_X75A1eWD5 zy%Jeyh!vZs^;>c7$AA78o4@$8AEoQKK&nrw3}1CP{LRaO#rm4qw{*F~a4QFW744pp z5B1}4^8>jCbnU=moa5r_sTOo*fwFw)GIIRYlNMK|6J!~_T+VpMVqQDe1{|pB`~ZFG zU1yY}3ukT-J#@ImDRp2r@J!E!$Ycgz?q)F2<&^Pe0J(E`EWV+M0l@PoTeM6LXcU&e z_tTI1md`sw;dJ8x`iV8a1&g2j-QOlF>4x+%p8ez}f8SxkuLYi;Zqc*dH)FXS^Q#MV zk%MQRsWkD0F0){9Kb|ekz)FzGuNZ!ga`Cj8^Zaz-e{wrEztvgxl zUYve=SA4WcJ?$`^`>k(&XAWOv?JjNcFP#6hooeTM{`^dbKK|gnSs=TcOSoINuH{Sm zT#HW~E}9O&xu-L-=puP+44z>_6_5@hvkro8VYzm*V_ZP5>HpQakM`i1GTTjY>eS0K zcHEs6|K94xA;_$qZp9Jr8D!omDD^##}wLlC{7OqNWPNisma2s?;*Iq zhYlQz_ni~b;vvBNt0w`%J&&O3G~E`j?H17v)rtNhah$%RRA-ze%z)L#MWyfTALDfk zi{>pyGIFsXZj7m(e);pCHTDf4uYAuok79Xl;g1e=9*(-Wlg!Yqj~?Y}r??gpzwG=c zfb)qKo-8&RgX5oZry@zug0#Bz(e}xEg&IBgFc~&~0el}i+_@z6t>R-Uqg3N_^9t$F z85XC_h3_VN`jlCat)7gfUwifSDKq`O+Il*zHeifw97ESw;5gc@nj0;sXaoA&L%R-6 zo!-f?B7@)yTsr^2#FlYk|t%ktZvS!$`Vxw8? z`9+1VPq94HD9Up>fJwWs$lk_L?!`0RPOrMxNYN$EOah8G`QVYb7&9$$@pJ%@o_UN< z>SPKv{8{;-2uBuu!!4QhEzRPrPiZs&!5^W8Me?lhCN_flOg>&$zh7HCSH_`ZLI|Xh zE`G^pSy(T=3bI4<0&IGqQ`pisiet+w+2F}Mt8>MdAUM?cBKaFSi?e7L6)DJk zCY%tZzp8R6G%x4f4Wuni*fB9nC14kDu*# zMctOEY}Xb36kzfwEn#=E5W>57xEPC1((ZQ(@3(2BT~?90Hz3(bV^P|DaT`_? zwf9CyP~Kh&JG$g+BE(Rr?N9kko9I^gzG?x`L1IJ7PPn2>yV#~NWgPl|zul3W;q#Vk z>9|Kg*-LBGnRL)4PsKw@dCYH0Ss5iea!Iz_3=PVlY~IC(e&-dwNqF7N`zL^O<@by% z)xpn;cr)3}pW2&v^PLC9S-}R7rRU0D8^n{*VHQSLLd6q!)rUdzPGn^sS)vliyI_+% zdRBdCU!!qH&u(d-I##;AF=id+*>&`d|MbeE+;MHvoU~a~w-s%&-=R#JE0tlv*Yt>| zNb^<_>&i;}+R?H2IBkkt4(XHIM4Hf3pDX)GL0-yRnv*M9)gO85;Z(`cQaT0L(L?hb ze>^Oo2`G`GNoVAG>Tc=8Q7PV_iR|M~;l#tI(J|ZilY&H4iBADX4vV)FhuqBzaR&77 z)+C@?Doq++hn2bFNVBIS4P*)}WT)q&BSaY5CRl%#9D6@se4aMx)tXT4NY<2p`Z(#8 zhs@AB{O-Q|95nON>61m>9SEMqM`Z^ZA#?_<(Q-doI1)&4s{t5a+%LU&a;!yI=b1PG zBTQ&bLQO#VkOsOOXp-}4*54*GWSP(M{TzfRpY(oERz>h-=0SYWo?{})xSZ>h2(ep_l;O-{O25bO~+ky6b_U6{43GKC9 zygaHrEG=e&fsK`wMKuem<5^u=^m-+_AD9I8y>avISVr*$$N~+(?Xb+B{QT$Tk;Pi~ zKz3qlTrk#%sv}y)3N8yOJP^P^NkCqQeE}DMF|tGs`XpU(=CJ{sU)CZvG$0F(fC9}|CoGHWV;wF#HBfjs;yU+sIc?e;9K zPUg$q;)LGyQr1vFd%S+wz}apBI*Taa+eZO@hEc9RRhA0N#8`?56Tx7_5+!Ke$Y52$KTfAx#p5VSjh zs{#Y|!&$jGL=Fz286FA91Hz8a`#?zG)Ug&64hJYXq)pueC5{&E)BL%={Ni)tiUvq} z>iLt&dv1KVFiH;4^X)g^Y|-h(Qr?wc<$SL;VYu3)K)vmcc9x5uWkK|fH{VE(?~kSJ zgBDMY$JfiBUx=6Kg1%&CPk_62%lgOyh>6yK4ao{{+=NEEw}W730m1l}p0(IQwxl!G zLZxFXzeO7Dnw~Qm)3#abr#)vZY(lszi*PL!Fgjt&&!M-{GhX&Fu_Hg^c4zTO1+y4Z z->lQC?qM?Uh1-<@fErhtu#F1>TBE=wpg<`Wvqx)7yOYxgwFSBg-)3x5+W2DkhRHTS zTD;LSW%DRG);7K0l342XFg*jrv8n)^Kx4nR_+Zgb7#8sW74gwLfLeX-=$z7}N0n

    {)?05SkIOn82>HzJ z0F~r~#q<6w2=ycU|mU%ej&-=$5D-1JLF;Wg(bK*C=$Jy7bFZ zv}lXxSxGCyERf{e-GZgQ(!$8KYZnJxAj4!9Xz?&$+)f#w#%+C0&*XpZ+_`D5-}%;e z=G|2b7J;KI-*?r<$(J~`$DcTUvPBmb=p$+)m)u@hFkXJOzvuwH3!7J7c{!O}3%6tv zpYOEmh1K-_0FHa@>O#BzW~WTTkcVs4vv#u&9;YWkl|4N0nU~4l%Y%34lmfX>%;b59iv4TNmyy=m~v~bSz3fSDopz+-vLi(zp5- zvak5kF44-lXDmRM4q19GtuwN6uSFgfr+_P#>bJVL;J0>;)$XFzL9!0)v>3MUsoWV@ zs4N5HDKyc4=J)!`djYAyW^Gnq0qB|?Uda@97vsvRG-qy88Dmz@0?*PF`*OozNfUrp zBb@l$f1G~#OnRn;iS+cv%UL{+9NI0mTz1%*XPhQ~?P12)$)a(LK3Q8CBcD=q$m!jT z9De!_Kc9nB?L<)qJ3ov^ScI#~Kl$T7*!=jP{dxLWebujwK5gt1J@ke0ep$cEs#+g8 z1_tRa2l?%4K?&gLQ0r6eTCnh8arbKN-i`wcnggn8r_npR1xw$;4&T*`0M@SAB};oc zRM=;9dg*dKGz$)un(imt5j0?NOc)-N@bFkY;fn8caCox9l>i+s=zfZF{Bj%=>9t8RaKhU^|pPLDr0F$?qB z&z{`@n$>|a+xZBHXB6?_`@f5Axqv#>B0XR-ddZbE@K>D~*JuY8fQDzD#izS1p79rc zBjXogs_Mqrw>EFR{r$knb)LcDWErT-%J*6e3TIw@a~9b?`0)MB@y6w6KYBlK{L|4l zT+om?~SEcDzyrW1hq+yyq)3{{^K<3+?e37~N_$iM5l?C_d7tf5o^;#dL&rej! zqrUTwF?csMNAJhCcrWdY#0ww&8N({H2#Xg8p+o7?X?Fz`PCsj5N*lG{M_1D$cJw`4 ze|hoJXPez+v|k&kEe;;gfMVLqNJQhU!kSkAhArfg&3pAdkI8&I@a{yi%)0+>dj7rl zelzY4_4hktC5xAb(@Wg-*ztAsA$iG=rh0hBys18LVZp162M^Rf8XM6Ehcn92K7iXk z7P`qAIk9t&p$fTw@W@W5%5^N4TFq&=rn+VKBVgM2Fg{z%$KEG&N#!tqF%~nvwyV=w zRVoWee|YcF&FgQxzWMNXxi2fcHo=%fTX6;u5S^jO=kf6D`Hz)$+DQ9DHhb^w?as$z_kZ}u%?;a8Cxd7hp_)R^UH1lHY8~VI9dMUm)QDFEuX;ctl|niron3zY z*}jv^Hr=Vm$#Cb{x*5>1Jl)B232gT(>B(y4*8?Px(k@0xPx|6*1G`MIFTe}ENw9=m zRu(I|ldiH9XGIrxeK2qCyx8V5Z`>Dm@|`Aaz%9{2S9Zl;#AyIPsP1|v1|Ak-o=x66 z&+xWJFlo+Eovaop3{~f-~fv91@-F=ir7Ti$7}!!vnKU9K`U+A8U;C!6vhk_wl6PZ8B!@23q6 zACR}wb~|C+3XHfN8D6p1i{a&@)_v)f_TuX{ox+mu;eawtKI`37h|31X&t=7MIjcY2 z%h4=+48Q>n#{>H=Wi4wk<3O`R2lfPpTFuXoV1piI7>O5Wld;kM7S+ z#OWqMO;D=mJ8k8CQXFm*42tm^5Qoc-zvSKY4WX z{kOk0VB0&t`CYtuYV$$>{gJ~5nl!fEybAm9)3Xi8zZ#I7B_hAStjS1|GCq^#G;6{u z4PqU%Y0y3GHku9Wi~)eA{O*1Ebe7?PdHb7WJAe&Hvu6O9ryFpa@U;!U@;>v71E@@V z>8Py?mad8LUI0&Rlm!q=>e)VA{jr83rS}7@O`Z+NS-b%tOh5S!;4w%xE|25$=a2y zx&YAR##Zk~f#39idPWDCK@*VBnBqZznyt~|UkN0!Gk~vi^-ffQt(p*9N?}c0H@d zPy+=|EqT8WViLvx&4{AO#lhx^x$x$ZPM~YOK)5}i5|;?fwG zC>Hio+(3*7p1jq44e4uH>8@!*p?Ve>VLkT(xb}_p{ui~&9pJ_Ud&bu-w@v=rHWXpS z3wZBW`=uv!xyt0t$iOOXAutkGpF`DU3AQgETTKYpCRWh)FETehI?j0Rw zVc-Yf{Z@c!3{XPyUb_Q6veHD~fB_;_$@1chS6ar2_;fnI`3oqGZ>6HGOh(;Je#w9f zi#2vpRFKUJ2TnzA6WZjGFYmP#|K8@G{rQhJZw836*u9^%AJ+iGffk&&I{B+V|C7zz-#oK-s~*A06EmjU;N?sH^0qQ5?{}r?QX%rA;vcYA)QZfu`*q+y=x~6EVWot z8Iz}H$c4Izhii^R=J1{#Uv)rs@Y;yW&L)G5d%2b1=7|0!C(>PDV(qH(1Eap|%L*6G zj>$0J(N$p^KrIoeQai*E=ozl-8)n>6T6~4?jdlkWTErEGE4Ei(T#Ke(HvXmOXGdG% zxstjY$nTKl0pO?H^iO?IGysVJWAbqLw1vFYcP!UZFc>?K7N7={%mP0;0q>z(QzsEQ zbxelXjdNWDoP3%sTQ+dVXK+>T%_^SPW#iG^MktUVsxy~5{aE7<~ca*+ZQOOuRzl1Hu%?%(Nv!0P#Mz!!aJtis*N zsa>ZwUtDyccKk2Ceq+GZ%Da;XIs2;iL|+zp8U*pI8JcV|HkxW-6f(o$kV*w@&@Znha8C~IvHu%>=uNwhC3>=#-jKi)Xz z=kL5T03BUwcOLm6qx8(|j43_<*bX8|Avf0@1#9 z)o85R6#iH^(eRYdxP(5}kKm>8i17e}1eU+MqtOBk{Q}UH?(~r{)tH#9?9OOJ`Ly$A z141vIKRd2l%*Ec!f*U~n;m7aS-`PQtYn9s3w2tb6yE6XoxkPa;$|$MG<9eBHRCkY> zA1lAS#>g|e84)~shHv-UwMB$>#mz3xETrj$+bt>>W1e~Sjf@6fn0#m9;Lg=__l3=G ze*3Fg*n0hyGn-G&eme5~_FF<+a9SCucv(%K0iaGxcl1jl7U4JRIDjJop0_$n#n7 zEwpyl)A@`JPA2G1JkwV?R8|`S$O3Y?QNr^F5ASSlr!OrGj{cSc;hb;rC;!+}J*z7( zv~$J-(mr4pP-D(r$%M93RdyJ%yc<_lH^#bS-QFCJtUlMlq0R^?xsjha2)D$wvnQo# zkz?GfBpb(1z7(&j_p-HkL_g8P$~B86j7Lh-*!XG-cR=fF$&7xw4ALa1md-eMkjqcZn*%_)lbj^tqr>CFTXF)32 z*{dBjp5ppTKFUVk(K_S8Z0VOgwGf-GBj4Jeb}S6~9sYXm+^4m*EX(yhedMPvcHYL# ze3-Ko-xZG;Zji%gtBZ%}TzX0$!?g^TT~&X8`sd9(2~rhnAfkR{NYvJ^sjmC=Nu-AH z<+^>Mi7yfJWbT$c%zNP{ad$Cq=YzRxu{zjPRJDu*l{C$dr_g-#3i>~#|eQ$;brIVuk{UXGoxd#IE@xHM5RC(q{ z++m7$r8GRO^!OXiV(#$I{d5{l(!Xl!IgoGYt?CCTN+yyLiA-K&nM)R8u>st;8ZlEy zs>WAszmxtHYw=*kRUUQJ3p7R8q61x$M%%hx^x`cVUmEoSGGx~v?iUA+ci<0 zwDEHIK1uXPPD)W4{qFF6%DKH?R<}dZwy5ZIUz{4rP`l;t`Je|0 z=u<2YhsJm`4Q{17$xnpt@x^!I&BH1CWHQ0G^c05FcqI6w?p5)IZ=R_85}$#0@8q3{f{h>sI%g`aw_V_1ubC8F$gcxn6*jY&iJX2E0WFodmgPyX?3=t6)xzz<+Q z-t^ItOH1zfF8ygIuEps)I)@C3>US!;SJ5>xj0fF*U5Dr?-6@j%=F_t&vw+ihUG!aa zOad#Fo()rajTv=BGf=C{6-0X!Z|E(9$$tpziC`iJH^JrnXFbuqL2OC$Q!qio0T-y}eGlK>NcS^d*=@glkwuu))H_I?%}3cHBy zq6CNRCUdhmIDA9QHi$Iof~mV6lT}IeZ}=l04w(_$E6ti3AcO{%SBwGxfh@L7dil6@5E(G_yKjGc^T9_3=O(*RQCaT>5MK@u zeUwk!H(q)F~bHx}GFJXaacyz+%McNFL3}d2jaVSxvaSZw&h!UHDNdq z&C&(pDdR^SX332XkWhW>&ypKJax;rTmZcUwEO-FiS;@>{gAJCiHX#MN10Wsr3GmY< zZUM7;A2Bm&10-6+7^~*;em)TOYKsuu#H>RlEvV$yphb{_2jY1$$kN>wZ@UT{ut|>r zl)lIFIxpyEl0<>Bd--%eC+#A6;GV zswdrgvEMYTp1M;b)t8>6?21@Bn)z zzSFlB{$TCpQ8F;SPuUwCvAV}ER&{&Q5887ND(^F!*I#{cWM=ez^;8ncQlL+508dTg zoh3m>+}_H+ojvpg;lCwdgP}+ z`{m}vlgBpy<6r-!CcgQ1fAgO=fB%bL=hlm*qj3^ps$7k0e*OOYo0CTmb-u!KNreXP zVp!l>1OQ0K`l+uuyP&8JKWo>1{ng>xU_MMM&#hb<{JX#V>E`0itne*Fjy_u*_{>u( zV>Q*nWiya?ng@yc8=!;LD7g zE(4tcsR0ur>$@$Y9ov0uz;%mgETK~j#a(9(utEfO?SOrDc+F0lcxji6w1DB}OxzUA zB5UnQ7zb_M%BmcYp>HP7b|oDO00o4N%Y&ZNQMc2}&j*HouDuk0ltJ$fm-fjlQUMhX z1sspG93e+z_)$+22n)z0*NbU6Ag3hww9H@*P6;>Y2Gfkd_8;{ol?u(+5^Suo;O z$XJVh(68c)1&Kssfb8W0s(vb&KGcDZTuzyz(DUkJ+!h4H8D}daIkn)yl?|&&F5kWc zG^QIJtS1lhe=)s$wVfbbZ_(oxsqO?q0zX-H-Uu)r_ap&I`vWfNRCQ%x`(dD^g@dJ6 zYX0FFfJr<20B-s`lBo@;H^3bELo3+{4Y{ zV&g5A^ZLFW7Yfn^xJ6ENy=c@n&_<6*-`K%|HvirK&rg0@JzErL0Vp~jRIhgijOzf_ z`1UXU;?FjJ_0NAii`MHf@%oh7jfEsU0AOcVJD}L3sarJ3nCa>#v`gevt8oMF}AFnO6gt zTTnk7D0n5ARfoq~G`SNvef#|N&8e4O9YFrK@Bb!RowvdWps_~(>?$*EG_E(crQ`41 zTYRfXGaj|GWPtG=XjAI$NdDRNSNGzDzWnwb?W3_$R_J(f%Q!1H9#^hj+I;h^?{B{S zjc>(w`EhfVYny8qJ6lFOx0uD~;$nGxl@IMd3}jE2e0evwcOOlASp9#C8}ZEeV|{9s z<+HG})i;l|cRF|a$2h4^^C4fu*#5vxxK{(<+BsndDtcmBWypBtc4q{{cM+apG}nKa z3i+1K%<;>&sPjy`ov}4{G4#Qu&S&6KW!hr8%I+3p6MYW%Vs<~2`5YFDuXgRQ-rsMh zhRI>{X*U-8_clKmkUZU_Z!iv_zsNcrL@tdJo^A2cE?GLu&JCjW;!7_j12@~5^T~uW zhoNtbKjDS@lNWMm3^en^X_xV=bnoBIjZSrXD;nrm^)9S-L*8#xuj0X@&~@zispP0d z&}3=GX34MdkNhkcawlbBSN}h9-QuYE2;EBWRWYM8j9)BH8S~<|^iH>Xj={){rwsU?J-h$W&wL8NfAvit={9D zkzIsoy#7usS20Ec6xi3F!easjx1JZjFzd5(vGcoqRXWtNJA;P>^17FScYR8;|J^R3 zRbk}V>xu5o5FK;uRMOvV6=}z3@6=*DG1sY!Tpmc^- zVZ<*F6s|nQ?SHStUG;|QKFXBhGq9ZS6RvoYP)77kh9XOE-AjE|?1uiHEynhvPDCkg z>8?z=pO5rGFXl%?{E=^$ z(Im?y!{s;hEglUC=o4aS=(P_b{A{01+@W7eE9e5*x}Qg-hxQb^s8U?NRf6}WBabN^ zen-Yw-6O#3-i(!gokH4fo=8C5qG9;iB*e2x*QMt(z*=EuAx9lHKzu5t8MC&ep{BH? z*QCj|sHa*jUyEH4vaR>817Xq7wP;(!c5U`QiL5fMLpTI2;ds308p0NlN>N9-ea5i|8X ze2*XGoajwPlv;_&b7Wy;qWq(07Mr%bn?O5xsV6bS9sY~Ez3IDuvfN)z7pKE*cfJ@PJyRc(*98kf^-1 zpw3t6$Lh0sF!5&T{$BoRKR$cD!)3Be%AyY#aV=oQfuC`2CPWT-{G`c_#RC(feS4md z{yQ^SK9LV<6ZyNjeqj*@+&5`qZRCKZhmY~b0Ix|+e8WH1?W}#(kqHa=lrF0?15t8* zutBD6`xc~t&FWdY7VAuQ#ww$5L`r=a$ZNjR;lq3RTrQn>%Qx^Bz2sJb^|%8ASpxza zxsEY_W_fIae7D0SpK9y(<*ak}?LWP_5s?3BWqfwgcl6PI)Y8oV28`TCqc z`0n=tT7S3s!Q0=Rg`GXD#sj__CMcI<`CtcRo%{4`{*hVxB(F`lmD^a$1e&$k(`U~$ zu{mA(K-~TD<7jpAbUu2ow!=U>2}HBCG@um7csFpLMf#ur@gHvf?SK5c7BH@kI|EkW z+QKs}{IJ4*7+`zcZi%cg(PNvxLnHCmLd(lp>iq1TcSp~$VCR~{;>qn61MUZ$vs43i z-w8wke4B_J4ydspr4Jfw@~NR}Np}*LciO6Ur0xJxOIOsks~gru+BPdFZCbr+OC~g~ z0qDj7cBI?jw6JKsHbeepfY7e`22f+8-gej?>_HDw=0@>~cdC-DD$pI*ni>AsW zrzV}iO*=2hG++y8M#t;`7_GDgz5! z01_Xw%14HL02R}2({;eNhwU~Q%S?fy63rGsJ`Yen+M>qW&$S@ZWRnXW6IXI;(G9;% z`uUQkpJQP9GyM@QPX~%9qcAsaubmZWo38}7kU>Nodqo#FMoOr`>{onn^|FrqbKmFk>re3Q} z+-YHh%M$JOa8_>?vrb0yivQOR6{6hn_b!=?7$OjTe8k$s1nd zk|jQq&L-~hqqztwGplX5p`%SKd}aQ?@^Qw>V(~$uHGZl5WCly(V=1_fbrKBSoGQ52 zrtgFDxj^&q!7TS1pAR2T*V5KWu)T%OUmrSO59{ zYx0oQPT9~;Xy4tnH&RW@EcLlY4l6<_^i7E0->A{=-jyR(q1v?hr{?-9{`oGvw9>Z@sbk z&;INmb%N#nX-}Igtn3e_FyvOn$wO{+^lO)f5})QePTz60^tAZ_dCgMwKpW!5{F=A+ z-HZb6aquYe@F_ysV3*%!5~ecWLnAIJW<6i#<${BJwT5X^k{pa5UbNYpIE;qI-*wR| zUL`NUR%hdse(o{8L3q}-eWzr9Ivc%YPMZ})+h8^Bo4@Km`rO<_5q`!Rn}7O(v1aZt zXie?(&gqxWZ{Gj@AEg+*H7-w-tu393aQc)RC5qzPU5I(JOAQZn5hGk(5YM9A{0=WX zXk*;iyua@hyDW~cw?RtLX)H0mam8c|ztJhfFTUja`9}9g{ORWX_rFs;jP`Akmj3GH z%j1LGWuotNkITRRtN&k{S6^=a`9J%YBg-#*{EIe3|0ep^$C^)?vys#KBitvVxRGOh zN6WS09HDxoK}!kA66rkV1ZZt@!bX8psAQU?Pda(^FpFq>WmAVt2S+SB% zy1KdY#f>&q+t5-GDCp=5lbSW8`<;PABTJ*oE7c6;#bXS;c_PYR{QO%CBiL8Z1aJTQ zHV}qGf^D>FmA{jA3NCJ<#X)4}J6tBc)C=nRq~Qo^#BAGc@F3`REWT+#;67z+;SCN# z)uFJR$>=I?rIlwITbZ%Ol3pe)3tj=e!)d(@4Zp<~ZKiun=Rm_%DRopJ1J z=O`cnSh=@_^ievJS)p~*!BHMKCv*xZ++wlUd3u?NVx>w}*sAeh-aDj(taMV~vsJ$x z`jsh|a{gXPH^Aq+w*?bEbs(q>e=WFBnfkJho%;B`=(tmUIf`4oVvt^7D0)A5g z(0YRN`>(wtE*N;rhxRDx6xX|4lf>I7w$P*Zi4fV}JOwncpz*Ym|HC`sJA%1H@(i5P zP5y0W@I3POOq9IaI)9sITiCbwC_^%QHi6YA7;{^>lRum%s`f8t z;s4V*SJ${adQFyw+=d+BR~cQVD~5ncXpLxo~; z%7X3iZlCyn@SPH%Hqg;#Dn9&B0_m0KK_@U2y-=Juu;$T!B%Wm9Ht)W#c>mX%mjDc2 zTe?H0bSqEqo~>N-y4p>jsRTSKuxI+L_60^Dx{Yk@>wGT^FI4jI0(1t8V4xq`)g~ZL zy`-IteQJv)MBv&Gn!Ypz1#Sz&$J*Ro`NepK|A_qH!*K=rAF%LUbpRMbed3otpwq}` zIQ8SdM>K`NqFpeC7FkagVd8l+U!kYF?99eM>t@%pfZ^r_^4j=nn-`R{tQINi@FzDP=J&@Eq$ya4aY-=qX*Fgda$KW}RsPld;PaDxwmP{Gl(Lr+z_Dk_ zAbxrE1^ja4a0{96d6YFD-nGa%k;N?Kzy5pbRe+R1{HF9pGg z6q?*B%x2Rzp7P@5`r+<2Dz0|oz)1ibTpqH5tbCDjd`EFgNekal4Ts{YiVXt3z_ktc zIe2r%{Niq>fPSre4@x}sffG8D7wEh<_!Nbs@M^(7vbKDQo^hig9h#~untb=2HwQNt zUb5|W`T(6%U&Win&aC(u#+{MTt)UP%`+rh8#<$iv6|IPpMe{4?V z3)*F&Pk;SkN`Vt2Z*FEKjGzAKyYFSic()6dFO3oiE8QUf+F-yRF1x+$G>JBDoF?yB zvQm25B)Znd>BVbb=7T)B5dG#fLixfZJfe^}PHjtW!Hqi+v?Bx4wy{Xe002M$NklGrksT|7sWPRfef@xaCRDo(hBeB6I|QuPGLtj8H2uQEJ1GL#+~>bzUCoLjy!+H-49>4yE<)jCOP4* zkot1<*O#2W(N^d-vOC(H?gSWGQ{=Kpb_s6e9T_&Kk&;zhj&PH}0#n`4zTY`5^|Ue` zhp)bWuFDYTgh=jrcI|z!`J!>*L1RC8ar(rO&GEy>rf%j76zT3`yPDti19f0+cPGBR zRsZ;;3mAWU;nV5w@4oZS=AZuKf3!K$fZ=kb37e3zn>bJicCI6eRzfn=kutB-T4yVNCBt!J> zck}aK{&Mrn4}TkC#y7kfZShJC^7iYmZT|F6zQ6hY`)|)lgf!9yt?H0`t4&uQ+>R^c z#mMQ#$P5KosNPCZd*r2DLXm?M&V4%FawmE9IK`!MKKcC8)NM!J!t+4+pC)genBcG8 zcz{1KfwJ{?eDBoajA=7}UkX_=;%W03Wtlgt6U85%$3SpAPS*LD)S${;x9I@wrON8MfrFx1t|PZCO;?=&zyGePbUtNQ^qhTwAMXn0=@Vy~pMTND=+QaZb}PlLKJU~XnSQ7F4}RO%9PDT^!(4`fn30D0 z+Dna3)|xkO+}K?D>_TJGz0La{{9yABfB3^rJmjZ8THj6%-1zGH$eq_;ePiomKg;Sa z(0uou54v~d{N~qx|96{T{`$w&H5#|k^SE)6D>a43rgO5$1|eP^I!FKAV?-edx%qqE z&|lCS-X7`aOMG^q4JRA%}td_{keCFE_@;56K+L{5csC1nZgE=&N4%7cKD{a$6%f{rPqL zK@P6@kTJ}fkO5SEy*@Q3N=k2|9UjI#n?e4Jhwj$-Dz}zgEwKbQf18c7;>dPB`niKT z`|@kE3I8CsD>tuyvH9Qr=l^l@{&#9w*`N=k?r{qJ2qIlT5>V~x=2eUvz0PTGl@3FXpi zENs9aT=*--cnc1A;W6NfTbh<8ref*)07vyVF!7YTN?G006_;E8S!(vro^|IheD++Xk+-EOLvp(Q~y)2?lRv#pm*YG{oL% zi|7J92+iU_Dd6|Ne!bK0;3aS=j>WCQS32#ym7Sp9W#H~9 zXv&iZ_$2q>Q0UNOl%s1lGWC#0oPIK>^@0AYJ^=K6qJo3Y)o02W98q5y38sg9n`TQi zF{SXoGD0FubUZwcj>s%91x+$z#dqkssVr8aBR_LsEp_;4#s?|3M#K zI2RT!JCoW4P>DOG0tk*YTe@I&LHJWW;=8F=c$i4Zx+dzNg)5#aP)$Hx^(z23FUEuE1I7D3Pw(?u8$cT_;XIA2fwhtPC}*Io zeDlBh*p!<5##hKfZtAH{t1X%_mcKahWI{js+rt#Lmv294@sYprCT(Lml8cbTUDC;y z?(y6>Jb#|0U&=~wP1?1C%N?Ppt)8%M>4!xSYgLP7B~KqJoZ`r&la;KC9DTog_p`ZB z?W0dGr_gx5c_rm0OKgh-!ILJ4U>$L}(Q z+Be^{fT^FKJ9DbZ8jI{@mBpX92_iqzCT+VqV^99DcryvKaQ{3X(D$nYDq8g5?n_xP z?8#M$Nx!4H6tqjR(L$ifJ_~AA<`#!Xn#A+F%160{6TB$;F$dDmk;TF(^AGH={C-C1 zULT+c0o%g=P7AXyD38MPP_Az9Crj|-Ez+Jmq)c5_V%qy~^?UhTzT?}R`0V1fX?~Vq zU!>r@p5pgF3KuxI{L7|-aTEO|BX>gUmqvI@W8r5T?i z9rr-$GEeP=kJd&L|C=nGx62 z7J(K;6thS79h?4y@9;e9Lz_Z1MCFA{IOs2wT>1e8{ec$7+7(leBCdFgYBGXV)@&G+ zm+Kyz66TL^DEavP_u^Qy(<@|@P0*@ScO<-sMDC3Cwy5NiC(CY&+rdeB9mP+1MCRk& z8Pj@~W)!Qv7diBK`m|KKd3i!>@KcvFU1w+eaHb5kDbfiM3g6ms&ynPQGLE&R zMLD^H?k)>NUsn@x+k;j+7W8N~GCsc9(TCUhE}bq8=v`ykLJFq4Jis^wEW)t?{0qf zt6z_`G}#{T;jP&K*R0X$K_^uH@^62#`Iq@c|Mc=FBkTU7KlB$qX>v3?Dk z7WR1M__5ra)KAd%cE{H5cPSWUF`4aN39~f{Z%P5YCl3X-a^-sveU0Ggz~}kC!}`?M zb5e;+qy)kLJDW(SI+=7l*CnGAukRj7-jGAT{piDJPVwEQasA_2d~+vDdEoFTR4^UsEr2ve)i0n!E>jtk>BT2thwxvg2zUVK1W%C`VWh46vnr* z*gG3uHhXP`Q2tPavj(I%dQw?m=J)k@8;cZ8+^?uJMF-_bXoc^sg-7l6D7m+1U!gA! z<{~Or&y#EKbM@{U-{o;S}Z?HN4#NvLJr<*Jo&QC=%4(y`z8ARgCG3S z=KCLfXY)$3eoy@AveJW%fn4m|?^K#~%2-2(r+JX|`OP*Zo*!#NXXYiPRlxNI6=&Qp%1}Iu{;I z)iP?hkqe|h&j8^;eelf4x=UGkem`Z^;lnPfCQrln=9kH<`?&^txed4v=M<9@6^~M~ zYd;(CZ@zwh^T*$NuX_dF-JCY=#Wx1){VC@4DLl3>-p5E)X>g;IX28JJ9>t+eFp9`q zl)tB4`c5txleo*H6x=hLF*llH*=*8x%v<$83c4pL$)i*PfQW1iM9|?0^9J*yT%RyJ&=0u`!e^tuXx(uDZ`UV`C(*X% z#5?D*wsuPHUUG1MGT>zMTt2tQCwTaIjALBiRMfO}Zy7n@f#h?t(VOy~rN1+`G?tqO z%%(>)(!?z8_w30Cr!my}Q5qNE>Iy8nS!f%FMxh+d-HD@L;>|z>`qz0SV;qT`pl6@2cM^P7Fmqg|{TOHNs< zZf(frGUiYldB$XLFMjsPoK|rcg?@!sZ6$|AG$1^6)xu?cmxlXsNu-)zoSri+}(i8jqFI^(JVg0~2I6q$F8ZdM>x zni^$w-!-9|7y@xC6y!sYQTP_QCWhjJ;5(oL2kZ7;K7wW&*(av6kXuFikSrtypPlb= z6&PHK0!$4*VG{f9Q*-veGG?vs%NG8CgmbunF=+*sZz)r}nm80FO&^2jB$zF=C(B?sf)o63GiZ4R=DbO_a9B(HMZ4KR7!g_AiY{S7vG;GEIxqax)k0}U!9GeD zAaRg6UzyBPZAOUV=jPjdQD9?gBSXRyAg_Z92c~-WevGUYWia~t92_1#) zWWo=DTXm_7K#CupMzDcCN%{xg_Pv;eHypk9wO&VFJtm-vm0ula;xd6zN%ex58geKdfp4G^I5!@IG8}dN5)YS0*98< zLFI+m!X;^0DT`j}KO13N+?4Nk=%TO}W|?|XGU`k41_N)Zf9c?>U8X)O?+};6N%ew9f_}z zgS~2xuKKLD*}-FF8y*m@9+frpPaZa{t?0t0;^?^2PhXsRD0P@1bc@ygL$ku7I}EiG z(I3>l3&DFU&jKjXBHEHiNc#Fmb!ed+SDX827d)g_uCH!{tL2~NYgYtGF`m(CI5ZTgwHiHJqN&YRa;CU7lZ)NUFm@RbmnJqqp zYw3#_9*_rCo+*BKM_@1tK!fGJNUD``_z@j7I^5)+%%v5ie0f(@R~%j;GqyC7XPVtV z^!3@VXJE9j`0b}xHlN+-cx{XP$4y@6@~Yw~$xr1*!{nZiQ?5TAH4(QcW#zzE^;jPV z5XQ?*LVl-HemY8Mam>#(Ib$K^k7e9gu~j(Bm`^X>*u4MlJN>;~{aGS@G0>EAD~lnQ zwYjM8t0r-Wn$W*U*+7|kr3>Czu)C-f9s(mv~m5 zdmku{`3UCPg*DiN%4Y5PbT zZ%4xUCa%3k;S?=xOkByj^~$BsXA?m`c=L@nN6{LEst0!xS@A}b8!27heB*rhrC^S) zEXWUy(x*jQi#c}!SXkW8g^Ud!KBsL6pc8(uxM9ubg59Sny7`Bt;Grm)eo+|~v6RLX z)hya9p740RyuR)z`PPaqI>BxFe@cg4^?yoV^p@s83Ijr9&1EdCNG=v*QY0_hEK1jr zVusj>!v{9{E=8g-VdfvvWB557^_8)#8=1DObf0v5z3lTS-z%JE@fLpl4j!zqC|K0T zyT4IZ6g^{VpKzS(YDNxL=h~h;(+9W&arB(R4jg$XO~9j&pa?B+=sAk{aL~{7p-EF^ zeX6@pGKgY_tEzo1h)4Nf+p$U{U-s|IGAe!}L&?@>wJ&8oCC`kdrOe_X!u#cXUj-hc9B)>z>{=`rpFA|~aoc5pIi>KMO2Op$Mn zGV{XP7hTB9C-+jQgrjzL{F<^L@i(UlPIceG)lMn=1`N#!Y-VY1uqX)d zOXS>q@b=qnPQ}A-zMf3@X7kY}pN#?-{V8((m;e3WCZ7&%{;Z2Q$rqRHUQ0RjcIiL; z-KSaW)`wCuurU6rg*0FEaJ!TBFZo0cx$sNV;2F&^O1bcw&6vVix@jZ4W?}xgHrlm~ zS4Z^c+J_sZ(&Z*aH8CDWeG#Vc1f?;UTYA#7uT+&>_6S*foJP(DZP{N%D$VzP#>kBIeh47_^;D3#$0^r z3OX85MpCxnC-vSH?KJ-5_{L_GcBZ&B zXMOaXRkaN{ioxZ3+X)2Y+pMu0(_BEvs{eMhBa^kalY;uUv71uZyRn7B%{+*_G!Ei7 zZT`ZpV~=;ZuzpsqO`uW2;nQe|-|*w4i8q9e;;;sqbxrbvd^6_Z5$$Cz00xU*8=BGL|bGOyX(X`FfQ{SP*O_3!@e=Hrk5eq6et z`O|3fqSK7VE-pV@=(>y@%t^WR%MDHs*I6$eI}?5CON}TtAv`(WK zSZ%~uH_o^h-z0EnJf88eI%y|k2G=Xb-L;{$+7F)iPG29{5T898d4wyRK3#H!n<4j& z*ocFlK48ND_neFN+L`<*Y82h(1LhItuE&#s`rxyN@nh{{Jrom;d^?)FLT`Q6iJ`BO z&*#p*nhU@;W@8quosxU_qB(B(OOHP(y=_PwOjeQg3bwIguHZzEdG?GewGAcsadN6e z_io=P+#Hw7!A{?iTcu?@aHR3+owwf!kB6IUxkwoc^{pH`Ssm1!;`{8$#;4@W%{Hu^ zU_FwR{HwVS{M2W~Q@Dud!po zfgfgDGzgca=$*pK-9R(1;DGrD_{zI|RylqDgL*9iFU^9buIp`CyU4n}2Y0;$zVzWm z*u!tq6b8za5nKLY(ut2=lcI=n4vIU9t8ej@3I6lhN1uaDok$jC&IDK2m^!S|c4$4g z3}`T9rqt-Ov^&ZhU<_iTh*MDSFiKMNDYmrXU~xAJ#F8w3W8%Yn;BA#9=&+@)LCW#k zZ`#pWVOpuo12Y)l_HE+OY)iwXg(+5<4G0z5yZV;;_u0$n%O8AHCj%d>Mrky0L)lf9 z`p?_cZooMXM$tH+{euR$rNw{qUiesAqKmr1dRsL5FWwAa<%TSJ>AlBeTQdoM6E&!R)*vS4lcbnnOjQf z>LXpx!fIZ&$%>eEfH8pmHbEU%DFNT5er4We1Fth$Q27?v)FC<1#BBP=^s@lab0+;-l5j zB6w3PDC1}F7y1KeNr7!0gAWQEyI zqmF31aQv3Xs|s4}{Zj4yjQaMg>`wIx6;2T{Zihn z1_kFr%kyoT3Jd46TslH3ZNKz`>oe=&GA}Rxi8Jp{-T&#SgX(Yz|?2L|MAlDr2Rc@pTN=tzK2}`}#Qg#*be3Wk z-Nt2BNEVnT$){58Sa9BK!No#`;_u58KW9&$9?K?+Ll#h6T{%5>=XMu@a;=jxn>F-j zmp{#l=}}>P%w`GFV!$!bLxG0d%U6@S-{!GthsfQ1rr(D3}cd{TihM^EeI9{Z93)A=~Weplc zW7fVlaco}XB{kYpzOZ;o-W5Kwk*tVEEo3bE_tef~-7RnJM!$dt#sCnW#M`5osI8nJ zxZA>JEXAV%Imh~J$0xdU1&<|@6Af^3ks1Df9zWf?d$oDPVGC`KksT$;| zkUz-h?O*@wSDO!0q#r%{+o_vH`F9Gxk$kwAi;E}qGfnLwztElqpYe?hJABZl8FxQz zawLn@n?n3_u99wL>ANFO<=Ne79sHv$_GY~|%DO7Cb79@V>N!f@%6KvDmkdsM#NV_& z<1~uJKDtuMa&`0Gd+!Z>SXJV^Pd?3pzP_RV*_3q(=V-E-Qj=UoD~fC~@YJc?f2H_z z!7)ohyeZ$zh3hXAy808>TEe7j1^{f6fmyh1sL5C6DY~7J1P*4 zA@g$9@ds0obG4(d;t!k6aATcKNlU(7OA)w_;YYv!PiFl6 z&in5-zSkb%uiam?TWL*b74SSu&`TFT-~8=Qe;V8un?L)f|Jf)2DU`=Tw$D?!A~Tkg z0j%Ng-Mzl~#m66RE_`+=-e{gsKH~wma4wMk-utyvW0X5I@RE%{ZZ9a$oQ6`5x$NEd z@G!T!d-rDVDG8(RjP=R5`|-o$6yOxPsp)b#m2$YYrD!J1;fIgz#)Fj1p%)*uk?N)2 zr|~0M@AUNCjf3{d2Fhw<2W6(dxT`d4epNfg1Ms2HOJvO-{rq6u#*@cvwEcealQli#ArtD9jsb29fV`e(>bTj|$`pdV`=N?2~Oj(1Av zR{UySawIsfq$KB>$r$~ckABnUR;SY%Ti$&4{pq_bp!a9hOKJb2K7NPdvi^v!#y*>0 zufF!i=3+jtfAia4&D;wO*LtY&q`8Rxu)C9R=g+?zkG0{MDcvQ5adxusICufDu8^58`{?2X3g^E~-}_uj4O zzS*2R|4wx{+2-<@tjPbv%#FC9(f7G~yZzOT_`DN^@$tp07u)R0-C-L<%9(j!~MbyAGh)hHulwu~qB7K70d$2|k z(YIAxafR&^Pe4u<@xJWf7()&{3Ll*NHZ7#Mo>fcm*EdMXKLcg-n(}ARly(OM?6g54 z;y{7Rd=4d7`2!TZfVTrWu)I$N$}DlTul63xQZy~o)SMz1l;Xhm zGX=sQ4AE0ig)T&9aaPZo^!Q~gTS<%(qwJ7H+MO(5_Z_Sk@ab2v7I5%FU&T!Qrv1Fj zBCYy@+oP>^-X)M}5LBw{GRd-Vn9rRs$(09YiL}S!v%XHIkWBysH|<)o0WScIp6Soq zN-1kyXsJLTR))#5<*C7X%I_`kXg@IYCv*UF_`G<<_=k?euk{agMVpC(n-=MdqJ|0d zwE@$=)t3i;`KD}>2zjR(A&7rBt4!9sq0qQe=7Mw+Zosx<` zlggF0Kd7KtAAL^kC*SsM^`-C`v`Q0JtKH>S4lsRAmA3pL@zALRQ{5?D2|bfmORPp4 zxu;FE?P7*44TIYtJ+i(+6gSTRG=B65L~l6sRn+=a$RC8u-@>Sna7Dj)i?n1$zmsYH zt(aa>WYRVvnH&_~+9e)_6Y&07pjh7YPo_n$UWYyUw`&yU)lL1#xvGs`Lr`OrlD6OH zBRI<260|twmbZ!E;<4(1CzLnzE@{b^Y(b}B#!sTxzxA@l!19IboZ!;d`Zm;?cwPGZ z+UC>icSoVXs(=FHPK%RfN}-xkr!*D;luong*|iqCCi~;3Z~s#ITBwcXNQ$GwEgT)U zv^bxIW2Lix<5$$AkfIL?tbtxS-eUQP3uIUux?rzz!qI}z;!W$}TUNv_l=4nt%hGpe z(!@0fV(@|Zq)D=cnokE%Nx;#zz_)oPk zW!-HNHSG{g$^w?hL+9$Djy8xbwkW#8CVW`Necdth+m&%F3*;+JLf6G!Uk9HXhkZ@* z_y%kSJbu-0rR1?dJ{%1!L|E@x{O#Gje_&8hXh+J~Gg)7t_sRUVTL20-!L?xF1KkCE zKmEmrDf*f;N8fu*Zdo`|+!#y1aG4c_3d`jJgSTkQ_dErQ$+L@gT%_eF|Lf;poy(hU zc65Fw&nZYflsA+V`=h@LRI%jcOJ6jh&ayK8;mYM&R{j)sHd82IjyLH&dAtpR+Lxsv zOG!%5V_A%93l`C=16k=fj&CtQ!SSR;76qlgVv*&dH5U`IuqHE3ru<_m?VVDdLWFAx z3nX}*Xc0IH%lLOhZFEB)w6v%iUI?bm1T^4YC87?`Y4;r^{jC;<`ht8f#mAJ2@;c@` zmqkUxYuB#D8~NW(fzDFQBCJG{Zd`!GOZ-rN|BwGP<#-F@(tg=xY1|WOCd%Z;Z4BY< zyDf-6zj$@?*6Xj0=$ZQgO2?A)PK$@JW{X$39ynDUFQqKx%UEBZi(^wTs5kzCr$wX9 zF#Xtf5;z8*1uI1&`6GR34DIoTem0B2+WGlIE_U{2{c~g%$QDvyQEs3McosxgWpQ4p7&CWIz1uZ=@b`VeEC(1hKrjMfAsz7 zXOs)8os5*>T;X{9PQJq}=+V^3#6le&`%*T4*RVEo0#MaK0tE+XEQE3Q4F9AnqBL($yc{eZ~pl|`C)za#OBKX z_b*4eJ@epl{pzES;+>~0puemhzo?HNZ*%GO+8zD0?c>dle);!(CIgIjcmnT6Cx5Fw z+k~iZ5&{(B_*gpaC(YANldzziu|!`kKb*Py3O5FZa@yY=fVYjxflsI?e*oCH|nn`9a0=#?B0MY`DlHsQzXU(r+~QpfHTGT zT*^yMQ|KoPPoM3Q%V=uzm$mTiHqmxAur3Gd{)7D3Hl|QKoj!Xi+*6xHf17D9zPT41 z8zU^{7Y$~-NM^8x<`doLuKE(oVf|I#1ee0rDUpM_vn~uanW62pBc<}i%b(Bi$YN4G zS=PRml9byv^*~D}iO|*D-UbAv+KflhRG-!p$t6m>+58DM^{x9tZ2oCq^M}{Zza9)+ zW*oMWwm0`*mr@oU&*jG}&AaM*`a^ANY_{paYSVZPcbl&_@}16-@{MqOy*x*<%KRuk zb8+&qte#zjceo4mY}9E-icqIIBHhe`$a~`jD`dW|chz@U>;Lu7ez`fFlKz#ftBrf+ zlg5BO&2ygbi%;-MVO4z5u>R)qU;PCv7Sc90=b~dp#pC3LvCH^F4n+28Q)BM%XaPP) z!bID({<2Z#QqRVo`tVC_#ve}c^iqnhX*v}_r^>ha_sQ=*+Wh%n{Z)R7kBp_cQwBD7 z7!>UeKbf=}BZ%km^5>sj7~a3}MLgHoqK(O<*K&{dRxTS}K6i4IaVI-f!kEXV2u=Ln zEP`jFR$EqI{m|(qn>Vw89UnYN8OUOKHgDtgLv5Vk6SCiY?8WX*pB;o-Gzlkdnlzem zm%FiJ=kn34lEop6b}#$1HyRi_ZEm?#kV`;3qCeuvJ@NjUi!NoqQySzd#jP^P3&tF9 z16=ujK>4LmT`|CLih~=ZOptY}^><$SsOjw7J;^1IY^4$K`b?G(&;2Cv%TfpV{vO8a(^6 z$;oAkx*u;I=|s`7K98Tu=XDUi$R&*V^1JW88%;Vz6b-o5LXQKxI*E~78n+zHOXoC1 zvV!ZkkAMBs+0dFbe01kp=g8qU^Be2IMR#*PvesN%Tg)8t>#V}@dwjcZzj1iEsG$UZ zc=O)yiunbjwwZfJ4;u|GE0;!p;8sfi(Jthp_UiJY&At{}=4RyLn#bADD609P{-w<^ zD7k23KCYQxu12_aGS@^i@bn4o$c-T0d2aJD`QlW_OS#({zOAnMpRpxm3TwkM#yC{^ zF)ED?(wR@%+&PkQ3)yDe|RKt$#KRetxx(K_iM>&hOyM}PLuTJyXd{NtO4^;dI-qn#2Sw`hMpS{Tw<0r!z8zPr4Ump4!Lv1pl zy>*iIG{!p_x%-i^w#&pb9|pl(ef(#Ro4kt4(piOYlr_efLah+k84cOFHRC683#5 z>uZFC*5E#bDX*ia8gDw=yb+`U>+jq8)mhYcThbQ4m2AQ$4g%;~I{AlYeE_wkf#zAr z!cBTv`sWW#P1t0ZOg>>ClV?ju@Z=LcjJHh}4r)Re?y$jpQrcxbxD>o2t%H~neMZ^p zMQDTzx(-jJOw-op5;hEp2dCvf^ZsL4|79ZxwG7xXN36z48NGmeQ|`Q)XpMdFZDK zJ>%D3pv2p>^DgadT5atYlYyg#)<7b?UMB9!~R20VUH8 z<(st1NN`s`G+zW8CBF956{6#;U(H>-<>vsj_{#TpYrrE@O&jlrc{Aj81qrO zex~1*_}c|9xi#%vqRF{F`?CEGlVoH+lfUw296-N{82l=8%AN1vZPV(P-`06h9T?hb z(MQrCJ4f4q4&acxwwg-W1oJ8eT}$R;%2kIQT1`7{!?pXQ#c;4*cusR6EqcOJ8nD)v zDQ>&MSY>Qs&cw4+fARAVH`hDr%>Be{uvX_=xs2c&Ro(ZzX!2J&7JElbGAlzd+QnjL z_wKA(3Zc|q#kVL%zzIGZx`Mi#+* zd{fZO@xR*VNIu3dUb?oqk^$-rJ|M zq-I&N+Q}u4PqHj#?WEsd{W9z2`govfsTWP|J7U_z)Mc6`e^^s{QAS~t8Q&ug>nr}P zFMNJ+^Tw;Mj1Qolm!ecppstNA7ES}Yu1HxIz`bNxuwZ{N*fDg#hnz8-3`PG;H0>X5>vEr_sQi>u{=rQTe7+oUk*r<5cU^beD3xU<%woU&oC z@5!POR|4PM$Yn_j$eAzI?)aL8i-|hoT}cUjIz<8{l(FS#iyRBQfgcu_fn^TbT9_S) zt`xcWm;%Y7>Sn%EPvttoLbt9o?p@X;M&@ZQgS)ddfsr^5=7t;{?Fb zBh|P*^x)UO$%6OI=)scHC4$R}kb>Doq?9#JoS-P&Mgs*k3%Wf{w!|;2_H8hcdD5`3 z!zcKi=&%qb2Pw(+)lPeI52T(hK1JV$omf~fQ!vEOo>?3g2R9j!;*B z$_>hQiYuEr`ZRd+$X0rEp1F2?#lv^oc+mCR%LrTodY+Qd;R&3f4ce2haYeL>3{qmcX{T^n@g8n^1FNF z!qNKIspL3oFT6bd@5@R7wTsfPjl`;!LL>Q6TT^14PS$W=#jVcwt*R$n|p8B9OHYS+Sj6ll@G zg88$Qv-~A@fn$7nlwxLzoW5~r+J#F4`6;pQJ-S^P z?mp;rPnLO$~pEA8VHVa1WFHj!G~ zvRR#*hc<0k8v1@C-`O)JAKAS*lkA}!wqe4R$J)?d8@D!>=VTMbZ1ImeCB>rL*y>V1 z3W!@NY2M5_pWJhKChOAFI$b{e@+kfu#h)*NPjUS5g-@b&JW;ziS>c`puDbYICp+Ly z8O4g+=B#JN=hDYSqxhrrp>%%wwef_)lM+wZac` z@wj-pnOj+b{>5MZ#pbVm^f#N6XI|O-*?;039BFR2W}(viMdEPepP$x8|FTtlQpbM zdX1wm#Z#S1YpnCwAYrV)T^SzG=IU(|!DgLJrCx@I;AyN>j=t~FZ?)g8<~(FGC8YLc zWMJ%k)O>`cwv85>CU~%1<)=hWS(nls-_4W!(H0cOly&&syk*_n5>1EpvadAS0S#ZY zu6|tq!YAC5oNW{AY;H7e-nx;zPIlGr$#61vHt!DX9#?_GZ;iQ>h{{qYF5UQy=H|&* zYHmSBef>0F_tj_bo-W)B-?`woIfMBZ1@@&Um!n#JBi>pYWzAO`JN5-XviDazm%v&H zZ=7ud@ABuLCi~CT#%IUH5#yW}%B)|QQ$2}xAAS5$8%{TqiQL=eGOtS^jiuJbHd#ch z7Bf<;xtTJ){POz18TsFO*QOZx|5ENXz$YinH6N#xM<175bK!vo6xlWl*5$`tX5QGu zk2@Ds<|AZ*rS~)XjBtPkx;n|Fjk%K|H}DO*lRXFa=Y9-7hHuH|L34i2XO13U;X(0x z-9yyoN%nZ{`7MH>?@p4ZON!pRErITANvqT?qCP&Oi&)UGeS zuWzGVoM;GOOkXI(BLZn8M5pg#;^sfNHR?7_(@r_yPdfQ`GAJMFg%pJ0XAmjrEc&-` zMU(~{1HhVM!q&6%Ir)8?7okDz9b@g(Zm083E?ha8!7bfFpz2hH$=Jk&pE47ozt1cE z@57a+diH(5 z1#9q+mOg_m{|==_*)`$C^tpX49z81;&XYbk+90H-zQwKrq}vt|ZN{3Dz#3SUSGK8* ze~RD2wcwj@!Cw3UH3&o@S(T-ZcJzwU){feD`!?UzYu?prC2H*neyC8*4 zM=fUp4Ze$q)RUmyg52`3@3R>Kmwu*R+Q3i6PbqMo_-*EC3vqBSOg!57tQg-19YxCm zO26w{K-JCn{*>w&Jhy2kkAI`YUbrY{>b@-t;MG=@rOYiHZzfcVSkNnP;=xfs|BLj# za9=rBTSOnD!ac-4h5!IS07*naRLWdL@9I@$&}AMUXsT&^I$0nd6531M@v{Vz1tR3a znl$Y2`IO(}1U}Mjq0guM-~=hqr3oJs8vS`H8;?wWskiYvaC|Cp>b9USn1!-$)SnkU zB-)lb<-s?>Jb8mYyjKty+s2$a^?eFZPIX>w4$o~`IaDbjj3nd2BK-AZX$(pdXaiIl zJip~9G2ywxr(g{alw}7;pU=|4(`xL{sO0M;nMd- zAav*t4N?#-c*Pgg@2bKSFdQ?m7W@4^4Ob4+rZQ-cox+D66{IYnqsziUg7A_&T(H@k zCaF+uhlmzK^T16Qgyz}8mkceoa7xfyfJV6scIj7P>$C5Rr}RT<3)W5pTasd-4Z`c|G6j^J01Neh1Ickoi@RVQVL>fZvez7GPz zm0RB&qBd!6fp8`3zd!%g=S^ChNLF8ckAlS_+d_AD6Zt)*f0QEmag*8;O-K*y&wWFa zL{<*`URzvPtiaa<%wu9lxkHgeiDy9pHxp?`BrVMOn^wakE?=umFrbE(HfH4mmGYHB z_0uaq*_>?w#pm>+$1gT_a$&*Zac`5(H(z^g^I8^7_a0oxy@XTSVK z6V5J4YZ7eXb1sW%7HD5rFPD*0Jo7dEAO-5t@S?<~aC!VBS0ACg=0AHD{qZSF6sHEBKI`Jq6a$}L_#}mN7Go{Y&bDx(z%tpUSf{kIu>9n+ zi?hh7g=gY@uuEQ9WX~cn`m;==T;IF*$>xhM@{fA{_05F~ACI*@n0&ZX%v|r-zsdOd z^RHFsU85*7d4DC#KYq^ffyp(w=raG4H*=5)ZqW^f#Z69FlY@XGw|yRT=qpKpFg)B{G2TEOBvrviL0+2r?5K{|?! z7Ue8dEzI!^I)Fb4sKU?$&f<5qVAjs)M^Ua1*c_p>=fBz!@bR--e8 zMTvD2UZH1F4&#MGU9@O@^OryQ@#g#Qy;*tM0)1CM8T!3w^X)(~tV=2%wWv?WoI4kH z52WbMBAfLjJ~tmcSUKAAZ_^$lV zYIC%gb}a2uIvq|X+L-#{>Gj4z3K1~T34NM36)@JYKC9>2M2vpn^|CYX zl<)OLn}7Ta1{ypBTjL=H`?NfJGWiv*9OX&({s@#Ydssi zPb%-t#@e6#{O8dN?Uu4m+$R6|pMLh~v@K;F{L!A9qVK%-!Hj34Y^;wTNr7Vn03Xa; zDPZowxSI@Q**z|W3bV<^%?t&hO$_~oQtU~m&{#!Npi$5~&y@w`)hNqiD)PvQ8a(k* zmteYg!~?&@~5HpbIts88LBDW0KJ}#WOHQ)TWu*F#(+B#QO@3&&Kj}g4 zZ(KwSXQvhQHA-|ee9_5T~x7+Z&^-ZS$^z~#8 zEAhAAdM6$|y7}<8zesj-x3%UOg9{mCM6jjo6SM8sP@rc=AMIO zJL8zWh2u3FaaZHV=P%wE{=ug+e%ow~e{4WJ%ul^_(_RP@ZegH|-@g|IDH2zQ)`5M; z=o1btfvUB#_O3!;0VKTm1TUsqS`QK6ZKI0s+bW}&NiS=8SigM>e?Uq%*$1D21DDFf z=+i-V5)3GVg@OV!b&(y6{wBP*sY?@SXbmhMJMkl#itq@V|MPFsD|1Co{7z?E`9pZ% zp%>)q+@8q*=l|!U$OTujROq;}WrJAxQ!=Sjn zPg<}CxVn~a>d*(8&et+To1OMGm08@SKH38w$_R)IBM=1^HgST5`!cDNIPgvCJp{XP zz$wpaFLdlFL*^hWmuueEKFQ~z10mWE9JtpsW3Qs z$S?K7TV~1Ix(@EjoU)1sQ=GgzKnHR#z{1nzA9x_4DD_>k(O4P)6`Xzr?!x=8{_exgt&Y*Vq>SRpB+kV^CMWy%r<_f>LOQ61 ziLyx~D-a7a6GaP@wJ>G9v@AkQdRZ`zvM8Q~j;fhb%|arji^cPuEGR5sPv-+vUtqCF zDO|!PP8|hrQfZ+`VeMitO02v2pf-8@`%f;m7;195`@kq%{^VwU6kE5m2EKj0iC~k; zQ&}Uh)cx?ng%$<}vyezp9R3snds5JtL|Sn0!Ods4MS;mZg$MlLwv;Z5bgbj|TfBvz z1q@4DRd=C*2`{ArEAd+?Gv+wB1wwqX<_p+jDQX{PB{xS~o4DR@ zQtf!-(I)KT&!0Ot3-O(HgAX?x`~y=UoY<8=$*hyn<8HoyPj&Q?^5uSIv&N#lA}3Cz zK%uaD)iBMPo@>{b#c$ zWhF(tg%)MZv}b?BWl%7&Ais0_?&j*%EOS!QoIihltl3#aQ}%K-aiVs> zW1AFV7C=v*F6HlBG8J#h$F+u|wI{P0)n11WrRb|nbYvZdpZ8|f_TZlKlb`hwi_`Ca z=R5gVzcKxyJ;Rh}w{m6B#kAZ2RhP$=&kFf=INr`O{X`1;gYn{l{rNa=(Xn^;!Ohit zI?u7@>a=U$vPztV@e&myQz&iu=jXCy@7@;9Hshk;lNN{i^ozzWigJBZJCK`jr*P+v zphjJNJED_EaYlT#jzLpE%4ajoqaX8;JTj{9W7(Bl;bO*S*4NQc-(2%Ii&je0+Q0gf zkCZrM7Wl_gJRInV`hWee|BEa?H(mUAag>T9X9m~ul(DSCHx^)I=!8TM3-LatPwdXR ze&kmHtiSZnrIb*qm4}Q|7p^5dEcqz7DDyTQ6ZgZqb^10K@f|tf)WIkUk~8iPu+f2M z4jni)u8r``)m(}=g6<)gEZoU|n`KUoghKFA$_4`u!$ZHZn7xs2&(~f(yZIOY{6DF! z$$g33t0Gm_8kBqiZMc0o$x{%`RMw@*HHfP!#Dao`ee9+Nm z9;;TZ&EEdQk3ODoa!{MhjJ+=}_+4gv)d%^tN)r~Gc!k2wM&Z#EOn>~H4^npA*nHG! zAox9xe&gDt)RVU`byqHH?2BEVO9>hB@saTeZ#1GrCw#+t*)t~*k~PCeeOJdBvs&Cg z>BP>?7|DmP4Z6*Nu#C64=s{a}Qb!E;4nhg5k%D7we+Qwf5^`#ZEan8AT?aJVn8o z5<1>Ill3^+ccWwW{DIGAT6AUQNA^bQ&F?P!t~R^g25Bym+6;5--$j=$e0$@~^K&N% zWe#^(N85Cy_%TN4E83Xb8}f;aJanjg6si|RHY;>;dR&@SzvKD%-MxEVWNTw?+%<*g zZ5Q;#x8ug_33(D-qUX_0mD#{FX7ly_#r3apKlJ6ukW;5wou&Y6lfY)tiN-?Xpgw%4 zF+T>$MF30D+zjR7!6w|1!dXbWn0huRqWqrPd-@ihiyrDtF^oT4Fn27sNjk2!;#c~! zi1;7({RI;;=q3)0|C^wfFKN)|owa?0WV&;Rjn+YmUs`B(qtzph;#Mjw}% zW+ZU+(p>ImgXdYNt0-_?kT|q$EIhE^Mp`ro=gV#2x={N>JaV!P-B)wTX2am(rORW1 zeE4Ydqvml*zG+j6adXN8Db1gC@5F#}CbQNxS8KSO@%hy0_%s~X z95R_5k5V?er-zXO>-uYLx>F49=TgEL7Jlfj{cNtdWRT3@I_2W$pS9_9EZ%o9`_14s zPpkj){XFs^8b3%e=`MwF)m9tRpzp|j@^UFgjpH^QS;|wQn)kFxmu#2j8`j~`er;YI zYm@z*#=wW0pZ@gcxxhKuSRK!i!@E+_*0=QMuj{Wa+B8P8wl{YoyAMXU8C&94uF||u zJ11+^bv9b-^Fudt+1h^k7J2i=yMyP$+zEcwMiHHl_Cssq*7@YX{$vXJa3S?&^5jna z>&oR%!!Kivc+$y7pIin0-Ov8Edr&^CP0h(0$NJlSD5YqI(qE<6#AthJLt_-B^4@qx zncAI84!DuUPTL*GC5QEpI}Pp^$CV!$VV!IP?|JJ3ie&w4^@X4}&d(gF5S#9GM@0_J zjkO7vNq_9{OSw~eEkl*#%?p0Fc^;1&KWs!D$_32f{)`!D|ET#k`Fc3zE@K`emEK}Q zgCbqq!pCEbz>CIHa%P?4ux46=rTMS=qL=n}^5D$bSAvsknLSy_x0X4cwf@WJUY#-@ z*pR7@a9w4S>g~7R?bObzn+LUlIkNI*eNx+#1$fU1y?TB4cEM`~CJa%2^Y_2laDnvp zyWiWKY2)iur$gLpq9d92eD&2=<$sa2dh<-1u<@0##>Ju1!Jn%oe8pIUV%)fc_G=7S z{ees{CSGrY!loq}4E<}O{k7-OWcI$=pI`DfawVoO>96LEGcTI{Q~#{IF*sVwyxA;x z@Su~V#ZOC82q@O#uB-oXv(Z2oRNo+Y`UI@1t;-rzI@BOrt7f1kRJ3G6~z zniVloeKGJ$2`jPvMFjax7+A7NGvWRJ{j<_qtgi(&CI(twsS>@tHjnPH>u++T^ZV`} z^+Q4O3+kEA%A4#0!eK(UbybJKX+pyZ@NMFmig_P=9r`wOg5Z==$k1nph6T^_ zZOJimop50^MZkNf{5WjAiyzv_Y~rGmbP_}pNRm#4u zw}Q8I8saTPrrf51MMVGWC|M@X0t33V<8z%0zNThbb3d>5vQ9E>%F&~Fl{h+h}_b>u$6iz zPj#-C!C~-RfTsNb4aB6}eolD!3{7x|GCT?B;WPE!22JteqCeWGpLSXA(v=YJ&g2Gd zD{0CseCJbs{Jan;9Pji|9s||D+$LX{Cb}}jK?F=?0oYRk!>{_%wu~($rtDQ804s9{ zBmc@f941^_0F4fC?|opDZ0evKby0wFdY?K^`}Zy$OgIkEl4yfyGnYa3hG(Wv6+hXA z2Bk}J27tbY=EED{fdeqO^{FX$-r*-t@CIh(hxfo9W%XbK;5>HY$M?5y@@OCV>TmLC zGK)9KrzTstzRd$Ck>!K%@6`cuGScd2IKa{bnW&ANZUrJxw}a>$1AjCy%x${$egVrTFB2gp!c*-6d5P4kqa@ zn*5uDQu5501W>=|Y(B!R$?Yc1*P_+6EQBe#O}Ot?pU2_CvT&4+xynKt3lDBbxH_?E zfsQwB<~K3^9K}=Zgzo%yPCu)x>4VXjlH9|!%ii+2Y|Em{;_Ph7n**(We(=5TjD^$B zfAy=0d+)usM`5d6JjZJ1NUj_#x`a{8S)}Vq7JnB$`#gp7_oK14qU76?-{c>6D&TyJ zj{|dL)@A}eWf8?Uq>GBw<56KI=@i7;X^#F@SMU5dTi{WQvC=gUpeS~6kp;a)fbM|* zs&M$R0B5nE0wdnRzZ4j(orx9-E1Lz}o8ZUCEr5kNs_nZYuq@YH=yfDzx1R5aI18=0 zY%&~J>R9CAf5(xzS77C6-b$H{vw5g^1poA7~sF@4lUt8|A{m>XQ|A{B*mv<9q&WiYNHH6Ttl3 z1_jwU`xIQcfWnvRWs`DWV*rcPOV_guY&YRh{<06} zriDx}hrx57b`siu(iU!MQ0ujJs41rJ8K)>`uZL^BIeNNKulghW(6IJL zkD(im@?Cqz9ep96$b_@Gabm0L;y*4^%nvA*$jP<{rvBR7IB+} zd@$RI{7BnqHs0_h%O#S>y$*qyJ`^OOjQPj8K32Nm#=32lhwt%V8-dfcVq{CQWQBjZq<^*U#0Lm^33Itt@vZk zhs|Y-JuW=G*9I>e_Y$fl{VPEy7}dlNx}QHO+<4I^D;cOtYUM)6V>j= z_Y`xi&hh!0`;uLqFg$jm@9Pw#F?~92gf=GGc$%>*{3d_&d+EfP%_pC9YVOOcvo>%! z;qY+0wf7|#mp5iVy0To(B(H3Gn14BeHI}{A*SyK8LhaQ5^7+=?8}U;YZL6a-dxWuxaQGxk@=j}n^H&ePnzp}wBbRw8E5G>Fm0T^} z+Wg?V|6t_))vK3tm2z!!@#1Gg0~XirE5MmtSlO_!X4n^sSt3oO>$3=`n_`0LhE@s&B>{!WF>MmsqZya%nr_$@cD(FTk<7!Q?Uc*iCk z1-bd}lPA_`#gT*6=Wy=gj&_MRIf^&b^;m7qZA}eDv`t?#CZW~i)=UQ*JM^tJ|8AYt zSga2l-)FpvMlMw5YG!djMkTX(vJ5FEO>5GoCd1QZUIVQKxrKdYRapN<6!tz^Y7x$3UGZVC#>)@_D95=u=?GA~R zhDUV_+^4`Qug3!s+YO<9ic~s=SsWaz0gb(PfmQ>fU5heNQ6asuLY2#=8(R zct1M*VA3Nue4kf^Jg#A*SOBkyY?SA#gMtYO^{96cR8dLj1;~a@Gakzu?3J(QlX2xm zi;>0f3=X>a8DM50FO8V0dXBeM0B*0zJ8kV5{+1C#4G8)AGahwPZ;)GW;U`aZOkYhM zix-FSWXGpsZcm*Htg`T#djf*gqk)Is>K?i%mqK)2eV#Id>8g{gQ1$#DSy2u!<J95q5rDB5G(e1N{eGHcx|MgvhLdN7ID$4G(pvK8k=Aj)Hm#J}JeS=3ZV_q$ zKfF~v=w|`C=XsmD7KvEi<_}DpOwFlKAu6u#{M$eO>C7{fRAO%1=zG~5PaUcX2?9aFWM*vFEkbdQe1b+s%PQ-uvqffi$yNJ@cDPn zj4Y7%LB<1w=h=r>cyb>*n$ofjm%d+0$rJ;%fl|aK@oAfPo-|?-KS;@Nyx!p9($*{HuQ2|m?Dt|E@Y&|t(cIH1 zZncFWhD6dQ`f#0=Tbo04ryv-`PW`$zx9V#XOZisXvy>ncCMTNfH%cc4Ez)olnT;JA z__Y~j>?(iXZ48g4_~ZF`Kir^x_Ea8vm)|MW&)3PDrJ#g6zVuRmRZbs*XA{Odk4&*S zbV;jm&M0Bbj>qNDT_}qbIOXH%?%1#wIijpPO2DQBMFX&H_PoZ+Y{ojnM>wD%She-| z@xo^G!^0Q-T%gH|nmGP21>@z*7kB>pFMpNMB@e=sX5xqQoD|Pg9Nh;gKY1kE{8J{u z|DXTm*E?UQ{Q2om3L}t$h({dn=#CAH@?CvoB+o%1F3z1jIXwHI@pCGCD9Uf&d9d?0 z|L{*ck0==8<^NGApFbC`6_<(FKY8oFP-1Hk{A8$aMM|4DdX{EF={a-yq7}adAN(` z>u7p&jgH0e^+mn0nbX#cZXNI-7UCDHMb{@gD`kfjrOG8fo) zUKH6pvz<`5o&oMqAqqyxQ*BGBy-vQ6cdwHnaK9IS@2ejUC11Zw!N%BXF2Wmk^Eh@w zj1iM^4-b-qyx?~oGXEO?$%b$)dq$hHc}JcbZ|gfQlB*BSr35oapw)Ok*4MW)9`f|Q z6L0-B^9X%i(qFr04PHoN%!>5BeH!8yROIR-W015+>1^Qylyn9>Y5>Vg3B`Pj@c; zq&dCt5Ra1S*Sk2DLe~5(Y{n1C2}&@sf$>_1E_1-1-SJ7|(Z!wm-CX>*xW>li!N!@e z6jwg^-D+EWGDgf`xjTfx&OCYi*hwg+gzIw73`OFiGmJBan}sN|#a0$N?WMTS%}>^p z))KG#b4mGxj4AW0S_l(#oLBEcfQXT8Jx1h{?aLS@{<@QgXx?9R z+m)g1T;9wR?$6q_v1{Hx+lf2!?`Yvr$SN0>AB#WErsQQXn6Mbpk5T7VCm}cr)Qvl* zqoH---jv1HuV<{Q?`}7i&gD65T;IBLV`MW~U>>*?t*>AIYR2u6l$h#06dl}KgD=Sx z_rN%nb~}Uq{o=#le~@9lvFM`D@2*`Pd>QO1lkbL$5H^&SN5jQ>fkK(V%}Fd?*E*S} zw|fZCXm~doGZ?)0{!crZak0A|_U?T7#TVU~@y(F+QOfwo$u9hB4Ov_3FT z3spDs=^wje=8TzhGr~M}F=gvFA)4H0!I+B<QI)l0O23~p^KjSm& zBg2znkz&|B%!w9tIE`|qkb>IJxOV00m*LF(jFuURV}KK#+^}XaCmz@>Jk;KuufF_r z`0W>e_SY%eJIUFYd8d#C|1_R3rC@{aNZfQYafwFcoTSNT7Mr|bfQS zeEBG@^xuz(28zf1oBBr)Si4G?exCX(pxcHvKKNV#{e%X=1Y^>I0=z+vmP_%f8B$f- z;s?yYXzPzL_vdN<;wNn$epy8n8hjSLwNte{95}{Eyx2g;kJG2%D*Goq)t9MZlKbZ! z5;Me%?b5Gi5mn!JPUN)ObQqtI*`6D^Qri@zH1#!^~&uY@+2Y4uT=2c_z0 zv9x%k^0b3%fK9DKch3ir>cC5$Xs*2R__V3Sh5st1emN;JRIEG@!!rXwfqtuN%Fpz) z=`%bG^ybMx)Hp?gH|>z2-$^XrkF*23^4hc^NKb2fn zKKwq4!YaTkppYvYDpZ#W{V56T=G}LFViE*v9)ef8EQ)}pUlx&jCZC53H#iQVdp>=; zsdvV>d_!Ac%HP;rxb)pKMVpL9Mh=eB&>9uAdEq^MGT*Bs7@kcV0q{_^@_GaR$Iwgq zhyoVOcz5daFEskU`dS%Z(7+2lv=xmey?@~Nsttas2!HCEsZYC%$+8U&#)&kq-WOj@ zfy#pKrR*ku-~LF~dARCpQeKMQGRm)@3g8x;A-x1?i(Q8I7v80LP5wV^aP>=@21OU2 z>(imp^p*DYz2_r8WYy;br+>`Lwf(2vg}az4z&(c%k~I`YIX9dNjb~^`$p`+Q%Qln*yhl;k$the_bVc za9qz8AMmbtz4K|_*E|b$hnoeEMZRznypwI79aDUo(pN0pAz|+~Zg;f!h?uM{UZeD> zfP=?&VS_gjL7AyXd9w6t!M9PUZ?qLmPRUG7Y4p+r8H@ z&v>phn;?r)^mr64cjaOAeX%}YR`$haJh*i9)QO`l!n+N=O~K6;XopjZ*~p%x5Dw?@ zx@=>=Kjnsny*AjOKIQa=lDvIg?#X-RS#%PUTSySz^`CwDZ5zzI?}PL7wfKQ8 zildJOx&8qW$&z_!G zsWyRutIfQfgvjW<`qUE_Pdjah%n&vXHXJ;(ZGvqI&YW0_d!b2qyxlEEEhE9Zzxc_{ zfB&a{(WN^de(?T;KY`0nFJH_viC6QQqisehioKiXjv zFC1@?{&otsZJbsf$vlwvQW&&R@p4ppe+B|xhhh`kbP56UqY)=c3J1e82k$*A>Iz2@(O0u;9<-w^Wo!+E!VE)ef3_3iffbpvkyNSWya0IY}|X0LaR1v z@9jKP&!0Ot?c6B@kJAn0hzlYql62;SHo0rQD>nJY>uU8m51zs^GzdSYfzuIoJ8xCx)txFc4Zbi>@18%i^G=Fmr)l(yF+-*ar*tI#aTzWccQaOz4cf=hwxuVjyr+1b zHuE-rV<<3b%sOTRFS_6ziYuEvvhLiuv)zYqd_qLMPD#6Dd9pFSicD=r*Qo;+EK;tV zedoQM-M6l1EP2od^1$3Z@bTY$mLm7~&cFMsU&Uu_T5J2W;?){IJjlr%mvoXFcne+U z$ax`w@S#}f423r-V&Y#0kP9jLLO#Xy_d6f__Y5B~j^UnlF@Xs39Yi_&8YW1sAx z6c&bLcgNKCG*0vld8+)3;Hi_WtIptRzdIMU7t89u6Djwu7IKKk>pLAy$7c*k?i>Ky zxY6D^FgUF}lVhL@=krCO8Aef2x{KO)+lg0vG^HRuFpkJ&ZRUl0Aey{+ld`n7Fu+~x zgv05S;D7tyew`vQWprbYvcxfThBS(Squs-?zY{KaowpyH$uhX%L&^pGZEZoBXFk2r zxaVa~flo%F7n$f1(4(CS5Qd52h|*D~kWDTNWZ-rgGo>Y(U%q^K=i^UOCTD;%hgs7e zOV(XYsr=2=Z$?fZ%p15_CFR(c@yOy+Mj=P)8G6xV#&$eHS~}rlj(L_*&>a%S)cK1S zQ>^E48GQUr$sAEAMGSM0s^`ngQ~Oa$Iq)gTz4qojfQAfP=J>hTuez;&CZtQiDAtT^bC6iTPWV{k z96H&2UcVnYd1m-{&z|Pk#x41cPEG`k_iP~~cV(P6*0kH)?&3!sJ*U*7p$nTAKQYMF zXUVer(R=T~W69FDhd*}J_I=?x3n21|QZ0u9@{5ciLk$Z`MC+L0#rRJzQ&t_0*C=)}aR_#y*O4XGa=jF%}-b|G6L_rN0jtSyA! z^174aw<;x%k>R_-K^;GRHhw?2^Vz4LXMFDtlN7(au`gb_JQp956J*xNL&oXaDb_!p zADu?H+S^zUn&TKQT$(NxHAAT|c_((An0U}UX0>Yw-Xfj+;<3FaB_$q!*sS|Ut_ioc zPForqCr_VocscIrx*(;BhvvZhAYqxDfjLj34GIAy%GL=5QYTr!q91?zw)(pX3|)d^e*Z zrML?@t>L~buC=lh!3Xj*zVi7eoj7=V#=tT*FqoW-C!1Fq)Ak1x+)u@aPOjK1xoCP% zJV%B<8bAobKynC+q;Fd`Bo^#qsfsm^e{G6(71^J#D3rE^W?MiXFQ~% z3w%1OH9W&G?c@&`W{oJkpY;xh1vrj!yf*W&HXo8>+JW|Xo8iTthtXqqZ5escIAKh( zW?+1O-MF?6eO~{4ckSy!A6*;1x6XW0IGQu{9sLNU|FK{SrF8Ym7ty5{=G`f9EZ9QZ zoH=!7<|iKdPFIeRDLT!##xv1XJKx0f>RbDGC;TYRDfqjPJ=uugtZffR7@y;@;S>JK`c#~*%hNOX*z#1(c#tan)hvJn(uN}GH!Otn?8ywx$v zqRRR{>8invaG>-Jb)(WBtg8QtLbp*yT4UVmT&{BAE4`v;G)qs&s4bJmO6Rs z^x-Q!+Jbtu#&A9U03i=xxkAO&NrUh{m@Pm^!qHZ-o3 zs`R|J`I{b`w#f{m{;#+BT$Ytk;>zK_QVqaK*_PVCjeZm+>Ras!U+^~VoVtg0o9BTu zNhrNuVE1jxZ}K#-7tfY&7BZ{9v}Mr(?Ua^||G-;tWSG7rbd?3tGkh8V-MwCF^@=Xj z_kFDFf>I_7yG2o3g0+6i2DZKfqCDGd3s-RZ>)$A|<{_L`e-T`**fx2TRnNk0!K*+Y zhUdTlEWCz1J|luL>2qcE>KHG4^-LSpIW$L`Udnq%r}?07d(!9Oi=jz*%NKzD23U1^ z5Ps4uo=YG3VFXa*%P)K5RgpJuiLZu_6;ne(ZxEl_%Dbt55G;?3^SL_8x5glPsI#dR z{Qg$D{3AA1Qn(9_L`KNE>RYrMl z_5B8mOVV~ zD35nM6&?VAfez?4_)eAmQ+!jVl|WxN@CVNxPCnT-H0%4Se+n+x8+s0`DY7z|gyHQd zH!(JSH<|hzFAk1JWchqmk$F#G>z}z|aGJ!3G(wXgDaTFVr?Tc<#59^(%ccpm&X)Iwx24m&Z%~O&yCt zYFD(Mv=(5~U!G4Mc~>Fhe+-Wjf-!O3Y3hd`Yr*8JjfR^}3M{3--IU6oecyt;MYcl| zyxtZZ$KzbYR6Nl(4_oDp&7;19rX#Bs%NB$->=uYN>&5{^w95%!w6Jyboy@T*pd1up z#R3^_6v{UK;u6}JIr40A$J^z3p;R2bw|St9IF(ZAZeG*8>Bx##$q|d@z2X7ao>%4P zrFHx6qm%;ohYn{>7iTXzQy`8faf{PWzWBNe#@=m_o1*8F$Ky3G+F;(!^N=!} z*Wo;?FBvh86^~NiD=#*?KBAPc8Q`U)eH3!HioyELwQurZdzb=RU(~)}*zAq>P<^j_ zV1{nIaCqsR@AxU2+3-G0Au-2HH)9X3LYfR~m*xUHMhl9xeZ_mlSN9)_lbM>w35W5x zw0Ue(S3Aawrl-8f+uKGKmtN_=6oL1zw)wBEHFke=I9f;(3U^0AZAf{+Q5X;Gl#RkR z9D23lsk#IfU%KRv$BahVOup)9=~uY8L~}gLsNud%n9d7;W{NzZv6gHqW)`rxd#1CJNu6zjw!j%|buCS)mXd zeRsFP^WwCNvwSdx)tgS>@(eOTpc|M=5t4!{MFoX}P$xW>$1^WJ&_6a;#^Sx&PhQwq z8iN#yw+oAOWPhOn8Z!^`8pS(a>#bRheAP$EES~?zj*>~;ZIDbtOYww{et3E3XBk?) zy^)7+^3XWJ^Y5nEvq6U&WeR%kYcoz+RF>*!jMfIcK64eh7y=aXlngviW#k>pXhQ)* zUI~No=Cv5WOTHY)u;NYy0+|wk=co;>ONQ+$_odLo;JcGG6ao}2`}RFe5t2u5GUcy+ z^-*D44u=;Wf6;|k84dD?JbZZ3jgdxO&kFzYlDv%n#D^YlxMVWrIsEUmnSBv2+$~Jg z&;H_<$&>vZRsU+|@&%i7v=m#vxTN*D>4OaB5$BEwH2z`a3R2s=k~t|73+cj((%3V$ z*7|VGE4<1-{qjoQrVmEWQ1nio+V5G!tbZuTohV815lwl}QySyds+m5Zklo*=+DQen z!JQKL||S<#s0`RFpw^Ti(CpSG3k#Mj~&Q{rClvegS03Qtl# zZ6pK91?_y^xHo?&W3HvhJCfmya`<6}24O8I;q;+#WDE+u#QWVGhWFs$j|V8)YLBD% zGtPrSF?{pZ&5Vjm=7RyZhlTkf^DbVvu=79v^>-&5SH=JUKmbWZK~#m&q6o_9%ZOzx zMSpS|Z25SIYWJ*?N-!75|3gMKAv=Eg^IyiZl+L_@>%+pC2!C-pr5%}x_V1j(ILg3> zlzsIf1xU$xy%ys(g9M&(^5b?&_aE1dQQTS~j9RZSTnWi$y>fKs(X8mR1|b_>#qXz* z{l>0SOAI)cVNS@LNSVVM(&?MM^#SEN1s(nt%ceKdQsHHBPM) z86WZZT3<$}KK)<d`~8m z!<6%Z9zN9<`%-Lk0(kw3!p{kiyu70!FLK7W`^}3>p)FqXy6l`H4s!Te-y07M+vqW` zXavUz$(6wcKd5u9e>o|Tr)ah2u%mei*E8dAAtU8Gg&zkDFEn+U#QOMTif5->j+b`6 z6Jk7O8QQk%FGhveEzob5|I5!l**V(1ItN>Ot+0KK-HdqF-gDV(gNNL|d{J1u>s9^M z&ed4tc4fqM3XnmV%wR-9RVNn^RUcVjqyJF5e|XK> zpTmfgnJ&V$dL@RYk5dXCC?tw8Eo2M2Q)p`ox>z4^Y&mxPbnA%r6pe{j)h}EPLykVy zUlz3bY2DIwwXZ&l#%mv9%}C~04;*-%b4Gn)?fl)f zZ#r3cK85+2G0F-rNRINr{^rUTwcotzM5238TC)YurK8*RianrHAb9&|WAH$9w%>VO zA6nNLUpG3nb2{1yZ{#vphCOptSyOP^SHqH+rg^Uvu{2OYbYXMHm4?AeR$9||=Yjt9zr?DWx{@4x-JXPy3w zH)oAe{ow2w#Vz3)Kpm+wAcBGB!)VrGofInJ;6eY&Fs~7#!?43#o78!eZD1)oUy{VV z_Y3|!>-k$*I4}pHHP5h1jT(i5@*^mE7NHb0`3y0_*J5hntn9*Rc^WK1xfFZlpJy%B zrdDMXoW{(XC!_2DygKBAPfP8UR|oW30vvO}akD7fDB8B?TcK&Jc7h!Zt6F_q zT$aLYsNQo0jHo3{H>s7Wd>!V62`n1lG&sjIvrhaYy($Yg`TW(MAN#S?0l!gpsRj~3d zpsSxY=_&`?*Zzr1XzApeC+Y{0gd?0$V&F+!#e7|~^i1yP^J9I0cqQDWf;xRQ?U&MP z!yn)&ubUP&<=@mZ&r(2mKFX}hFKEF~|Fk1CSM?Qd%)x_XmO)MZP8A!TR*S@`V)JbH zpjM7TDwwN&c_!C@Qvd&7IW}Md4i{MU(grvpSVdQ9&wHx>`kA2_snuy`<;U-qXDIx8 z@Z)z2)76ynH<3`14)F(ipX;9~4nDq^mz4EdptlGujQn4fdb-J5;*irr{S*z!jltUI zK21BOJB+E)6j097Dcsj>=WY6G3#(Z%z}mLx4Bo=6q+VMXs-BtE_|YC?q2F!afT!&I z{@C##tlk>l2d{B6GzM?e#JAuuc$eE)AF^y8_6W`!zFA|fvicp`gRgc5P~XjO|G=Ba z{qO61^n9CNmVSO<{neJTZF(bEecg`rp=HmFcckjINUF}Dm2bpjZHB^7LETeVnYDLQ z|8zlpHWaaY-{#q5n7#_=;@jSbrqws?SMjQR<`VU8+Bo#w;yH9$d8Z!jRPIkDY59axxg--Hm#VTYm0>;4cv=8&yJZ4O6@Lug+^jUlZZUyz>mgdS1?5Sm*t7T~J z!}ObHy#{{okT&k8QT^AZ7N5WV?E22P_n(bo`aqtm7Veb5lq3{VHX!?5h?Vl0(y?L< zr7k#Y!Ta{vHbCKWB(DN=deP#u3fr9UertmkbMXSH;;Eab)uY!fw(w`=EQ-~&yt7+? zmDkZ^={y%?v#5I(ES@fuCpYihU!Jk$(U0&|myNYef=#xg@d_i?!xncBiX3{3X7i=df@YKuH^~z-PGf9z{k8M zs+02gP#Z(arkgFI-@ACec&a>q9UDdR73Rc+qUgF7pwTAf?}-cnWCP`ejR;%p-X_k5=lb>UW>HVx@k(>y8HLY#moAT@>}E1y&oRHoAAZTobQ=Bdpt{6fZmr-+%YL=u>!rlwLO06i(0c$`f+OMh%bJ3r%sOn;bf-=Y&!k%9JJ8@+uB3;fKh-} zo+sL`{_N+YObW^26Us^(1AXT*x)Uj&Y@jJjAGPRLH=Ysy+_=WGTO-k}n|H_X^j@Cd zp519vYXh)vpO7J|t#7@R$9gVD4Jx}A?3mwDdWy@H__(Yju zqx2xp-=i0fj6(2U9@VaNTgpT-Ej95y0vhPjL^io`zcVA<@ z`n7G>ZpJKSTf5bXV`x*RHhP(dX5-vDM3Ezm4VUa=GC38yzu^?`{z_T%m0@AwmL6lBJ?%GFJY3HPTd5?pdiNu%sRo7ei#$pfL9zWORd z8wEnNB5xb1^Z8|CYJYTay5Y&A3}uWt$pBuHcdZqFUt}yr*mt*5aP;%IF)6Hw)n){UKbJz}?CDdZ1l6Z5 z>|Arlk}KrQ^OS~om7!;MiZFE8T|W!au|Il&MJYqUN(q8bCX`hh_afX($OSN;7kXva zF5Y(gcRsj$ap%^fMn>@0C6x1ZE{GUj+@|t{|M>81uxreGu-WHA_TH$$hlDQ0ilq_?hY@c0- z%#esqC+mM+v=bXR#f*NeKy4NtO+5HDck;UD{Vm;mcq4CFClnrL;4|)c+4H>qx;D6L zgluALFz+904kS~o|NPy#lUz-XlSOAUE*kr{G8hP3bI>@BH{O^lnxiQ3tPL3~tX0Xn zw|EoO59VT_0?4l!yWz&GdsnjF$*jW}EO?2cTbw`ghBrSRf+L=G0?pjf!<6;=M|s9` z{&0Qo-jr$#=Vg2ty+V)z5Qy3n4uOgf>X+J3)=~bExr_r(x`>^kc$B`S%_$(`Fy0ng zje-3|{lcw{VE~1ygIF@|>zs|!lY)w~S|U1y7IRx;cV!P(OHXZo!ufmVJo|jh)&@&Pl_Y54wPx(vlHTTxUvn z7dP@q*B5tMPwB&ldEoQtK6~Qq$kP(z%{=8R!&CUw$&+JDV;GzHBHTaw!|%p`bN>8$ z)&Hh7>$lVPCwXi?@06XjpYe66kQ-#9Qvpv38Ka$dlMyEhyW)hMmp->%wXXj7(t4&N7kUV zJsQt(a$AU#1D$H15dWbQUFXhSntcYPFZsdnz=7s@amLLV+VMQY1;ZX0F3i=fPQx$` z8+V@V&N#h4IjSz}%#(#wVPyOHN57givanT$TlZPB-pn|4q`n9HS<2*Zzb!OX^gxU; z3?vU3euQbkJL1Okx+ni1*T<1ew@e$}$_PlVs@KJybHuIMMwU*T4O`;=IrcWwyaLZfctLjRZ8{`<;cDq)Fer$%Hc|GbULZ^%0OUJR!E8 zZwp#T0#JIDnmm-)R8iY44?NqvHXl7l7#hf_zQy|_Ro`s(as2>M9t8Nqz4Q$pt6tCK z-Jq-*d9+hkO?xC(e$wUr)684kl^IO>Px-1XQWEUpxx805I1Jp&(4lk5vfl*Pf5=Z$ziT?FmL(!pfkrG?2B}Dwjg2zLOyA$-yrDo zysgSSR-IxSxGOKrgCXyx{HmkWX@oMKPX(*6diBTjL+{}FuDnWmpUSmsgT%lZoO*6N zLc}w#$!8qs^OZi3;+vIkAj-JlPqEsJudCbw0R3hRgE#H#9o^wR(U<+h2>ALv4&K9+BvH%CyhF zzP5At=RYZIMhmt*DHie!`Z^_Q0D8`Ykyj|yn1yDF(iV~0YJsH*>a&rYxT7iXD3fO+ z+(Swdi%R({$|;J(qqCWKnpY&{GsXAgHhix$Y`*h$3-vt1uYcdo?=2XGLs?!d#el62 z9ve2F>Oa&%bvFOe>RHN@$0@uhcZHsiKAyz!#D(+c!mmwPxKWB)MBm9{ZGQ@jLn*QJ z)w33$d%EZfv&;sx1@MG@m_?(@k9N1PT^B;xIM`@9GEYGQ84E`X_5*0uLeRo-Ldeu7 zaG8tX>Sys3Eu595gtzeipcvq{+Ek(6p}b}&wf62k-v-R$u}yW0R|K>G7aGB0c$7R< z;&}NP7jdSbuyGOQgh$FRKl*SM;*1~^*OcJrqXSQD8@(&ve1knEzV{P-8hy?d5BPWs zaL2ddi$!@cA3kXl)G_vqD%!-WllSqpE@#yjlq3uiJWvl5`i7#J=jFlrl?T0XN0G_P zXLzM_o9R)Um+#e!;nIz6yx9)a$F+f%9!0Hw|L$tWs!j;FEY`+QoKQ#X@i(uyZ@(83qYZT% zXG$o1sqW(wt|6sscFu@j!-U$OD}B3-pU?}W)IpZJB5lr z^(Xjf^diNPzL+&Z3JCQvz)^hr%u5K5k%#&cKN}}DY2<`FJZH(TXKff~ey`&^v#Foy zq&`yL_7ZpL6z%`<-~Uy8@Mh<~{KG%SFK63K7Sp{;VeyFiC;`#Y1+q>u;2#Pw3N4Ch z%4fKY62E>ohm9hs{Zdk){Obe>`6s*xdQs?5)In^FHSOxiHZeP$O2BiJJD$y@hS6_M z@5EzbL7M{Z-ufm(MX`Jn%r3*?vGzZI`^nC~|BDauPV3^*udf!OqqwfsLBXmW3j zP6QcePBw)6$T^BxVIQ1Mq0GE^?!d^aFRy-^e7`vcI;S%JsXFW-|NMi?d7C9uW~>XD z#A`3bWb}BRBKwao^TLr}zslg30u!y`l{LSnpoj+#Rt}#(%8Mk2@aUZ|Y-m zZ;TeaOQQwa%yc5MKEW##n+KA4lq_3AiGE@{pj^S{ z`u+WP-`zQtm*{`{{g+*iSLly$H!lbSLxD;TSVd4s&De?-V366TIyyf};`q`#e!rd) z7!35#FBDKN^mIz<`LlJGgnS=lKsl9%?#&Dy<}>4!JX42oVdMja*QHDEc9CdGyI@g7 zfAh`Nou4F6UuJxq@GdES)|gsGXZ=Fq4t^ z@}xP~7$@7UODNOPk3Z?TjHJ(m~_v`rANiQ+g$)xX600|Yu1D=BZPO_R&*7!Nu znm}lmM|q;3%CPb#o)?D3JrQ_+6xk?H|4?${eRT_Sf>!rBJ>diqrLpm&Q7(MlmE0A| zf(#R$%=pF643OWYq<4zx=Rf)BjJ18yv*!0#J@LzgH>y7K-*#QLl;X`JWyI@@FcgOq z?y|O+-^}~=4CL<^*OOr{<85oKw;J=apzy!vvpK3?(ctyV=s$90dHhl~Gi>i^j)9jp znY7n@)hDvCHPxPscYC@3S^uDgxlOnX-x=8%6AoqUVL)@S?xVbn#U?+L0YHD=EPRH& z2V<{$6MpsQzZ!4k{lymsfk5 zL&v^7Z+5==>JK~r^?&?R=Q-TB@oeY+{J;NZPJtXecxp~#SU2K9-op&2`q_FyTaA++ zgo%m@`nzSz$gVa2@O))(U36uH(}QxGW1J`$=Ysm(Tm_!B>5t)8YEM+0E~(1$I&qKz zygW`%=uhiXa(-_z#rm28_=_(;9sYb19bMW;jylmoZkQ*9KXM{PEPWx>C|1v(y)@%P zJH_bNmM4W6VrV5pGKtL^<{6%=PQ0TH!YFX+s+Ow_*C&onezVpt7 zLLFVNEn-c#W=@9k0)COAd*;U4d2c+-pJ$lJ>$$eBOBr>ewLdOHCw=5H-cF;n7GO|B zdvqmltb(-1TrGqfBb2h()7bYKgG0P%yu5BKulvPXTQ+~~>f`_;#muF(lRSHrk%mzL z59x=!(Zo7^jpcYVIs4%Go#vQh$!tb&#?00^$!?A&jFH-a4iwWA`{)D5BL{_P$>AV* z18*l57#PV{y`2E zSFV1wXc?avhwcwJoD!WE_`&8#7lNKS(_JFfY2D6v`Y;C*CynUs?%lc3eu=?-=~k@Y z#)IBkE%I7doJf{gA22F190*}Tt_yiZwvlmq!urYcJMrMm-^w%W1b>XSwe@v(2Qd6) z7HEFqFy!=;xut%Yy@_^27cwG#dRa_=-tQ+eR$DXL8+G(1dDQwfd?NXr6rtQVhRA;N zhk1W@u+Lxo>5K!07PRz}7}f1wG13fH-xoWn7<(D`vF8wBeX?MTW)V7G>lKtq@VOjY zkUb|jSHZR514lA}>Gh}o;j-d&NfmlLpkSs6f{ zI+x<4WRJGE`0Q^gTmy0p5|v-HQ+ksJ%uPD@`V1|#30+nf^nDbZ0h~v2EWB%6pKK~t z%3BnPS%ZghODp@lM#<85`PHZRC^~W85>wP>1t83?ry(rH_|r*7^(zp78IpT@>bFDu=^pG@`Z zP-Mo0K3=p5k)ET&^t;@i0p^3JP(hCkN|OlY&}C)YX8+O4d+?NH17l&nG7tRKF9S=K z+PT25c5G?B5$R{w2dH60dx2Z$ke$fPJ=2E;L%gMjW z0^XfaV%G6<{FFqfxRG0kQ*OtkQYmhuD&n&wZ z8z-}uJo=G_8s`OVI8CGeh{G%jJdAcD??Y`{=eByNj(pspJa5xreZFv9ys^z-%mITh zsmPWqS@V=YgLPS2w60FxI5u4rL_$|kRKM)_?1ZVPAcYCVwj+BsvJ{KYQivZa z#0ZYy*@=$I2+{G;`{(n7;USy1AIenLX75o8H;N&6+0?Y%OQF+ZHRV6g(7O*hPOGjq zlSfmC@S?qz2eDY#7J@cX6a{F&n~Y-KC5^}1+&pffca%b<%N{8&;G!O0p>Td3ZG-`M zR$F&BPx4Z!R|X&PVV~cn;A!*IViTVGY?Nqek`ZD^-fAP{vPSfy=&?znbfFxynLN}6 z^W3h}LmNjj9jg|1691l0asByMUsdKsAuxFQlBpiyMH_U-``ZY$iQHf4gZ+7ej^}a< z;v=QoSO_2D__lBmltcOe{x(`iqBZXph6~DueJ!qS94Ntrvw5rih7pvWZDNu0-lI6? zfhsnwH6TUXn>Q(dVlv(`zy8hdcmCa9|K)6MCt@kqDeKljejzCUhZo`R%B|IL&f z6sI<8qevIVB)YkbRU1c1U2hEUwsFDwM$qv*ql`rx9WrsnW?XS>9W$m}qs%r&;Pl;% zn{%n3iw|usC`=x94D=QJ8w0$z#r?ir$e2-Hlx~xfdZKoK+nD%)qG?wfvHCn-V2t1e zyT6Mg_i+Ao@Q0e9?E_s&M9ksZY3)gb8{P4GlWq=qQil^*^17!!Fx~u zPBu_%v96^th8F z6bI-Fu6YgL@-qMX|NhT;G9TFaKmODIGV<$2baApoU(UD*S9#&4{On_N(C|?n-E9zc zptXrOo;*fJMyscBJL{n-FC@%Tn8Mp7o2LrB!ML@r0fTqRxD)Xr`JjFJY+{DjANP_8 zZx^>5zcNO+toYpd^RrR^E+ZixbLo}1_cw19qq;t#oTNnGwLh5{|B)?}1<)Taz6J-H zISRhN&^{jVHe>v`nYXb`DMQGa6wgOr98S(W>B#lH#^Rx!E8pGT`N{cHBX21wK6vk9 zr;xr0E_p*9#)ZA}^y3BoJm5o5@(9DnN9G&~c`}#WA%h<0RaZiW6CR|6oQOY-BQ(2t zdyRjg1R`(n==d{O#v9(n?57OE&j)+Rb6yyTg9nml@x_JY<&~>nj|^hqpoGQeE=&Ez z2S44pnjvqLVR_w=GunkFM~lNv29w1<{IEZno#$)3`&AdD9*n17G_SywY~giiE+Tsv zja{B8P%8eWNO_j*bn)-eqc?UQMtjQJ(kC7}2LAECt`KKp#=Lso%rEPR)-%>|wKm|@R}kvn-r zU%t>?6L~Q{jgQ=YFrkPtM420hS06N|Me~|k-@NXk(eAIH{Hw3Ydvlnw`}JvaI3vTU zLUE|a+&%poO=9|;=dC*^X@nEIUERh3c@2)SN9G7i!Q=5+eKv9An`2K#A4-$&GbUO$ zETdJ*%y`!N>}MbTV#dv5VaIlyyjZ8p%-dZc5=@tfi_3m`jKu1?di9&=wZHkxiL3gt zGDF|!SyM;0S#>?YJB%;!r*L$C`~UpcV%(prie; zp8l#Gi+2{C_EWe;7g&T$FSJRi7d`5Tp_fg8JhOBwn|9lH=qKx0YbW(9ejtO};ds^j z_%H>ZHMk2it%1J$^0S>A-H~98K{da>P=SwI-?+?nVtg}R*T?2p^T*i?MsT+lWSBhn z&iRow`56xumKq-FeE%p)}QHg2uBRca{MW-yt^^* z@V%7Kj8XSXdsx3(S5P#YYmT;-H8<^zmSXV>KSj1P;3L+H#>vatLuNhg(-TdlpaXE9)G%F_uq= zmhvzTFqC`l1d6^fmkYV4-<(}twD+b*dO>de+jx7{IZLDu)+o_qnZjzb;~#~Wg0eJd z6N?K*`IMD=<&E9u%S>G*#=xuE^2qD87Cb9o%CZK*s-K_%w=8uebf*MGS7qiie0pii zl;7s*=@t_M2#y5MF#k->eV($F>-!?bAiBQyQ5z?J@E7($lqEQ`SdPsXPB4Jxro7y6 zChaF}10@9E1p5H^KO}9+E4>ASI;Ay9jp9je2{GwR9`mdOcyWo9zc-U&3Bg3$pf5u9 zS-UrMf*qvfslHJHf?Xc;X{3NYq#pS4HVKR}vCnJj(L!{r{uoN?(7kd4R(&xv3$fx; zUwH;Mbtt{Tb*cte8+tUP(m!xVr|1KQ*W_9MR^8}3ZG@v=Z5Yh@pP1FJ8`?`>K>YW4 z+mrHd()uu|9#2{CE4TmCPW34;w2-P7YiB<#6Z))uUfSey@A_h13x}!C$EgP`!R~(# zG|#^uR)>~H8~Rgd_}$}89|C|@-iHpbj|K=fMDrX?dJP@b0WS$^{3EU^Um(JzWNZch z;ZhGaWhT>n22!ixUi*SOq|w$Ym@M-zJ{Ve-Zz@ptCeJX-pi+L%H4m|TdnmPv&@2j2}3w>?2s@%eOAfeIFQdND1_uvf9hHlC!rsm1tcX*X? z#$xcc3c@KXj~6(=`-kA0aoUGJ(n*%a0az9U=!ox!s#`F=4+zGCDX;}xJ~$e~GE~>-oF_?>|v~5Tt4_P&h>mS_VJyhu9=JSA-v+$y@p;W+U`rC$$0_W2& zua&0_cJUM)hqaKNOQ_7e_%FYiMJMH{x+rVUoOFR3-r@yesTR!}Pn3Focpmc17e>a>Y2%-Qj%S{*7r*-D zN5zr;bQa5>e(}}L6N~yb{D)I^YDZwYK<#>7o*hk$9=G#&rJ&|neE-FRxu}c+!;#aW zRf;hR5lR!A!9#6&gd4eA8{WNqaW)gTQhLGVy~`IyaVkWG@FUNkSiCQ8ym;xt^fzV5 z!#q7Hk13VE{q9Ce)srp8pN+!*ObXls_2ap-r~97rW)CIP>VpXnGn;jr%`Seey~mGr zDNwxe&9&=!SY8|jlS}0o8qiBvfRk%2>N`5vyHjom#RuYKc~YDS9LtXA*7 zj-pbQ%&CLu!~0l&QHo=8{UX*rkHh7$xnhHBC#I+1;?b(#C=YospRG+4P1*{AIL{Gx z=gQaD=8{F5hxZGmVxu`B3)*=4d@?T_p5ZTE-Wd;CicQ7`8!UD3u76Wo7+}z76ug;p z!~}m3&c>4Z{MZx=@1t!LUp%xKP`>U6Sw%kK$n`DtJdaV*FNg=B3Wl4W8!bb!qM5LG5P{!egY0um_Yk2l5uA zm@jD*do>`QEL2Z($C)->^7ERV4P8Xn$CSR58pgB~1_%mAhMbA_UYWEyjXj=n3^WTL z`w6ll(j=3*M6nOrtqxw9ZCm4)l+p3KQz14_HkUSA@H=uKWpW{Dj-{kNml46n?29W` zgU{>UQR`$vb=WwHLCPz6WYKKa@~UqP_&&6*UUajW8|7<#GmZrquD(UXEv{%srn*Rz zCpTUj@sx0XnPN?^&&7f9sf+F$S3ep}_I2F(?3t8;DZX~sFUGZSBoq{lu<)x-g#YN< z%^MxjKYk>m=AkjnP_pv08iQo-(+6ElsvN_Za1k#2)Q^{v<>c_sBhn{dev=~jyPZc2 zr}ZIPJnWtYGWPAlQXD-Je|Id~yzY4Y%jj)%)})qAjZu9PeqUd^I)*AT>vf8Z*$}t+ z_3Uu`Kv7_VyA;l68f%O+FZRDm5&!+pf##THh{`xyd#%aQi;^*vg2A|WDr4^cIbCt% z_O+CADTBz}`p*U$?RbH@{L-99M)EQ|RNtG|^dZIT7*rc)%Uj>PlXBqzkGvEM^*iI= z9ib#@*NtQ^!`JDv?}WPWNGY?D1$fMJ{DK!Ij92i{++6uLfB#Q2es@xEx)k%`g?DFz zZ!9ct;uTH|KVI*$wzGw)pwxP}tFU6tYZM(6>GwJ*L%~hfm~SZ)@tAQc-tM7{pl44N zx+a`|_q&gqb28v051o|a1^-zWq{81xFfb{uDNPwP(DO-kDa;u6+uwiE92BmF)4;FV zBHz>6JH~<1NNl(;3JdQr%EF;RitXeH-a%ttfDFXi!0<`YLC%}Ig=teKnL2BM#spc7 zQOQjPR7$kx@!XN-oiO^Y1h`|Zi;`G$?7l4)CU;|oPOc$c?^El!Dyr})nKeJOpnlN4!@tm z^Kl-$2Rn^GKD~&h6AmI8mz zc~_f<$TZ&iFH*2gtl#htnZ}B~qS#(a`yY~B>L5S7jH%m&RuKw>kq0lB8}27#_cgy5 z7boKj@vm>?xy*QSD&rgDk@0OFn6XJFnsb6_TIQ`gp;?l9E}#8*@8(^|Wk|E+r|Bt4(-7h&*2Jjf9N ztzo2;u^Or8HBJS=-#SCcfKAB5S+k}cJzcEmm(5`mmlWV)Vl%wm?Sjfvr<)fmgFeD} zjT1|DSM!xqSYU|1&Ct)O0&GV8FRpyK^Zl(t0!2d?hjOAIxA45Rwv#E^VSE{*PP37D z)&%Abq1nh^p*<)&fA-Oz=RJPDdui7C=zej>-5213U#Au4RX#M@&U^TKQn6& z@}tKyuNebF_2e~!AlW1T(%%%`tmi_6Q0Sk1`%(rSW2ZT}I?#!x9}tk1kbwv13&ZBhZ7^(gL^mX%R;EspYCHh zmm*yM2q8HkfnF^yeS@D2W;mnBXGlk1mr))%bUeA+Nxtw9(uVTii3962GPwrL7-AH` zyJNs=Y`P-=kKRwdzpAWphab*fD9(NJ{oRz__7Ifs)(Pr=oHNA1PNf>t)}KQD;3M>$ zxZ>^ctaYumb_uJ}UKC%L1W*`IGOS+-0%dWk~N785ASCfET43Sj}DutrT-yZSE+F_Oztq4 zjc_Tm@!otZV~kz?A*OF$+lG}4lNt4^{1APUXXV`H>3swR2?4Fm8H<9S`Km22j*m6#8J8 zgZ9n0Nmwuz+yD-3)xHGvdRj)b>op4IDP10E{f>fV@bCp=)l!TqZIz1tO&Z!ha7u-j z7Z9b7r{|Oeuh-BEyuMXTcaH#_hYRwMM0vC%X@fJZr=-#!Lt_QduQxNu$}wn+;tS7& z)6fJ!3i81NUbgOdu2rw4&~=HHZr<8hLvZi1b&Og0Xj5Mb|B1xYqcN#62oa|6$YFB9sFb;0D5l0M$=JvPTRqU zbx#MU8m&%UF?A}lTD65!!UiIkUh8SgJZ;f_rK8mphG$N_AMj9UaoQ@S=_3Vr8$oP%tT=I$>i3Fk z{B_C}M;nbJURC2A7u<_)cZp7NhX;cNsZBwdS|BcuQ=vU(ZVH}-@1r)97H2m67LXJR zZi}BIrj?^eTbstUSW5-hB6fKy*kC*I@0d9cgzsRwQmtpQpfoiOrYeUW>6F!cqi{(y9&v2w4 z@C8paW6s)T>Wbb+j)+C<2yyUgKZOChk*Sme_!o|pjOavJ?BXXO2=FV9!X76#gf?1= zXC9zG)b0muh~B+)F)v=>6CAf)8#A2yi=SR9JWLnrcKn~>>;L?}{yw~&3kjJ20mVo9 zlG1;7v9&Mr7EU&O`R%P4$GioPhQB;E$;P2_hw6{IA)39}`Si-S!_$wGE#xNtb=lsl z6i4Lgl1-#3_Mu>M5~MiO(HbwzuzWZj9Q+D4X>S_|Loli9DDOD#y^E$>G@K4?4M02#ijnRi6lo4cVeMolM%2D zbp{6fOtE=jA3m-fcqv+-+rE&#m+n9<$PomkURkY^^-3q&ij$^0!+r$M-#Wc zenv-A5IF8{z_WXjd9P}}X4(T))r^IA&Yg?Kl%vJ|jmQ*__KX*k5e!*(GQN%B>_}dz zjpf}(Q^=`m5Fj@sGb8Do&V^x2%lJJ)MF$D_%OJnSjT z?)7X>#5r>K_~2t~kwN;3qE;IyMbyIso!nXTtJZ{fy!*6%;n{}wdD999;w7KD7z!!m zD7q*J)@hdJz4AP7jybB0d5w~tkHAY|LJ`4GMLwdbc(N1Xq0hcM(k_Gq0~N(5`co#8 zh3F|(vT;u7ojIbr3C`?%`uXR{-0ydO@$-+8uZMQt$rwVq{UODolOyk5x|EDdNz)&? zxpRViQpO1ur!CZRHgET}w@TK=uXxr9WqZ#E#f$a97_snIxLFV4d$ElTtcmHL{6Jg$ zN;clOabslu#fz6lmhl!h-{?6%e({me6%@{tePXn~==9jNJo$;CpZ)B^`aoRk3zx;}ax z9w9KH3DD(tpZ;Ozv#-D2`CtFTe;5TCV;z}Xp)m$nH|P_5$RJE!k{9M>%U%~Tn{R|r z5N2rF9Y2CYflVG6WA4#-aHrEC8BXA1rAL`ACitT~PK7Km<``ujq}==J+BaQteQp$i zYrf4$Q7meP(S0fG)Jayad8aU^j+{n0agxkx; zDsNnCn(Tmew;FmA)-MxbRwtGXk>i(C?zR> zUqzFH85hLv|99PC!Xf3q{QZC5`S|m%$H;WBxfktTq)g{|{P&;VD&4d zJ0k`D(gubI+L|Yg-DzukOFIpoWClYJO zU43UHV^ACeZ}`-J8CMLhQzt8+sIqfwMoH*Rcul6^bJj}@y<`*ws4u{s- z85-}WtR4^aWZkjW_l(~99{+?wr@7j%yyy-EeEYP}T+OxklX1(q(H3)reI7YQCais` z($-Y1koo9`GXnM<*7Z;*@I>!ws2>9(W2}8ROKWpX_BGKD^ibD2_fEpE19+ z!t`NubMnPlbcfHIWRkV3o}U+{9|;=Z4R@?`|2Ii1VQse8gyliZF2l5E^Uhn+E11KW zKBsubrt7ty6W9xQXi)NbENu%b=|_0YgD6CZ%yXZ8sNwz^GLt9T*Eu~2@OlpZBo4@_ zZX>KBeQDcgk+X?sA)xYLdYe*Jp`Q6He;)xV?MDUGB%;fv-Yrd5#wlwdr|2w>4CG$G z3?0LHyXc*MDCdZeCIh9UE!_KtQ=ZEz_~#%rlkF!+G0aQmV- zE^+lgOx0hSpJ+Gmx8nmUQ?F?q6u?%W?h5bu3fI6-HT^Tp{7oB%radPpYtN8G-pRjc zH>VrmmoS3toVPpO1 z>twBMTH3Si>&N~Za4WB)*fai;E9=xq;|d+%H2A<{wHKdJAfh>W;h2Anl_OQ{SFaNJT;{aa1z-Ool2q^tu>FPGd;5%b-ken)4zHpeT;RDtntm2@| zckQgNhOffc*A4i2mvug<7$0xGTX+Kuq;_szlNYV(b9^-DFARphTiEg;`@H5u?dv<* zO`++7#X|#A`~1i=Kov_Z&ic`ukzqgf&s0>RC)yrtN#=~fy>x@B zXPc_xw;)+)tfuu09(~!=y?8*IOKA}X^wiutUVZlb&CWl3^6gv#bht$jGF$BM#^b4M zp{`vN2`;}9$5dQr8^Wa|w9u_R;c70i`M2R-$Nt+;g!gvX!po+bLKKzuSm5+yb1F>L z0R`Rw06+jqL_t&nI%^8KE_@g?J7wGZAs5r*Dori5WUFdl`r8#AVO*BP{jWxxExVe-zVy3R1O|DeG zc8(I=VlG9CKi*X?0Hg>*FLb8hp&Xb+Z3}nf$_7l^E~G#@mqKI0pj38W3sCeK?=O5@ zJ=#pEHr|gZQFtC(pjvp|%)89$=wRNd!oiG}bnJ1UjRVDkHtS2_3U1uY`!I}nSlRqh z06$4N_|L!pY?Ms;LVlZ73Qfw(ufG0v=jyd@I~FVy!}0=u87&{CjNr|6=XM@9@dv)Z zRXo#=@=$D3BkarFjq*zWQXo<$>mQpYG@Q+2Z9b6a{{H$O-Ur(pkXa`>UaVa{^HMFha-Kc=nlxSVzJ#4){ye&QuSBvG3J+#u)}!iJ$8;$b zEV{LY5|tv49B6w1Z*=Q3+X~#Q>f@U^3m9Y^UMX$|98#W$zHl-70pi9BfqoUEn>Y7Js zo6zc>c;2;z!h*v5NlGuFJ0PO(ZFpSNMGhedJSldJ*9jG1yhJPY82`M$tz$?v1Nz`7 zW2>{id>M_#xCK<@j%IXtr{neH#KbJ!jJL=>{QPhK;?D~abaLlE|EK@BbNT$)kvWvM z_;+7?dp0k*L;HpP$>XYY%6*`nTCusB&2aVD#50=ltiGMX^=1l03Vuq2kt^{Q?>h<( zZ8Yx0VrB3UkN)QEHl7)NC{SNTNB!)m^~fn3uf_^S8^zV6cg(o*gt*Ng{+$>42cL%= z&osPE$$|IOIbMy%b+FKw>`)%h!s~}lV}0_)m7QPz{tr7}6$||Qxp&6k_R&W_oxI}z zugzrHtBWB-Z&UnJa%!)!ZC-f#;)g<`Jn3|ZOWslp#jBBO#)~WrLa@dY?hO!jWKO>X zZ}Qej|C$H!(`1Br@{i*i@k_nNpcwtX`S!-Fl4;C-rxu~$dLs$K#Z}ge4AI2l4 z!0}ywAv_?FYis1!!ainLu$b3=bowdF%A4v%#ax!jrt%lNs?LgHm5pz*CIb z@K+{Q5c8GTkB=xn0#iyI2RvMgqoAQ$QlgD6Z~)Wnvgl3 zNuyjErE2Yfue!~56gEyfQOa1@eDt$l6iVow_^L24DXhrfF#^TY_v%+2=HkjT86I@U zoh&*|QN4I3mT%z)W(>COsBg(dr#ghZVzhR0%6PDT8n5BndU ze~1sP8yKVX1KILcv3Bp=E*wuvSozFBH}k^${Hw2be){hFI~UHL8G6my!+3{N`H<>` z6Bd2IPsY6U5o4}?Bx4yP7}RHt6}HP!pt2}gn=mMu^fDYd7!Vicf>#+GDHeaggUL+& zfo@%^ zKK$v{GL)XL@L)JT<&m1=)-y5p&v(j%v7Q30ijQ~8;{dqxJCPv%0w%7Ve@|2QK#g*`qvf8pZl^ZM<*%Rkw95xzq4xb9{&o zU*{1$vFz*H+3z%1?K@sIU*Bu&ylh^4lspq=WqSh3e)#E=`;GT5eU2ArUe3sX$?^Qb z3_2%|zrAy+@Gr*4SjuXJw0Wt%IHP!9L&Yk;r zYTIUAk-Wq=`|K;?3v(#hF`;g%y9Q&!Nwc4FD#Lg;$C!`It;UDBhCH`EF!uG6(?iy8 z_A4&Awb#?1a2lx9i({=Tj4SelaT6^|9a$^f;26VZG-zc-6$4;4zqauCJ;d_!}HHgj>osq9eg$ zSFUqq?QbUC)d)?P&f9z)M$?`px+=FBc+qS=gRu3CoWg^IC6ZT}@FPjP+?q^~lp8)F+9Hsx}eGqaD>| zB{Fq|>;UO)2o^YWnmqFMS%1hBv(ESJXYl8m8dwY9){}cYBP$F37zLJgQ593! z!lk-A*OmpGRuz5K=c~O_K*s)XGMr?ciBK|joCfqjA5Gu)zIko& z83d-y9t6AWs$2AgXNB>$3cS=gZyUI1Hw6Zfh0E}SjM_L<3tfD+(pCkYR&d(11q)Vr zWL46Fsyss{WmVL3;5SA4u&E~@yCL!h)m8W8oAyu9+O^E{HE$Dv?;Is(S7m{KKOpYoy=ruFh+4d zjnhugjrCvw=0kKCejO@ps_3a!E%`K+u9Qh!8?T;i!B=3BCAl~ z?w(8w7auo5ZJtwtE4$6I`YqU-z8G2LDcqGE8cl6|SvW|RvN{qTo3=8*YC~lvUbY z9pz^p$~Lf?04s}48)%D9N(Bm87g&w=T6NnDQQE-cZuMJ)iStU)a^Hw2SE~SiU6Wj**oi>%cjd)A)4z}oa0g^?g#juXEkrXF( z;uQA$bjl$f-B-T-zD?7~@qRm$r!0C#;-KsyrOqS%7d zysj;;je*)Xik7~AcjMMv{5PypS>-9D#JsdQI(_E!cuK#lJg+~Ov)#+PNIsrPVoN(R zYH>>;h-Tp1z(tDc*`2rJoxC@1*1D7?@rq5w+w~LW1+RKax41sVmHFeX`mR{+EtKEx z=%v2pX-G+9v;DY@HM;FivG$B|p~XH$21SI|C@1t^9cFWBlSC12lYnM6$QJJSk78Uu z9BShuZtjed;OJ{g7W~I!ni7L2mW|@^LIRCfWxU4=pOVip_~R+^^r4FYZAP`5?4poU zm-UE{JWd}l(mbkt_w%0q*-t+hZ*uMCIiydwFMVcQ)I0;Cc8L4$wZfhps2xJ)P_$Dd z;)7>TE0bc1;?IWZe&c`=^)ZSo{d6x>$U>s5oDkna)4dl!C+*rGt zCqBGx-6}q2?Y!{L+4`dIU8t)MKK|nBD6sdZC_j|~RwxNdC59^sXI@0=b-bK6)``L= zJbU3nF`=KTtCJuQfd6*G|LQekS-gNw)v>33Qs zu~~=LyPph1&P*X|%0flr8#&ve#V##w+8|laxo_eSa%{ z>4Z%Ri~VrOh(W>YbPvT88j)v09~r;qoJY-VlrLh$TUC%lj16Qy8Z%btV8&D9%Egc8 z&*f2EpB>KQnz7Q{Vf-mi>0k_#!4#gF#mm~fiiY@#MBDGlmq6dl zOA$Xfc@zr{4H%8}$7V~o^Tp?#n2OJgL-L!~;TZI)gE8k-v=a;6SfC`u2f|Y^1epu* zu>LbwH}d1dD(Z!vhqu<&e| zMH=Uw7SdOhpFXNC2B+<7d6fsTiYK115H^=z?cD1AiSMu9nE8Q2$)1DF+vU;rS+DnQ z{NfS)sO^(p%-?-PKiH-POx*I2N>%{s4Yq7zXsh zi(pu`C5jeFks1zX>6zYYU%K|ay2}4w%^qCgDU%h-f;{wJEtb*-EdeC_1$|b0iM5DE=zGv9pgdJG2H5eE&S!QG!|6GRDAjaR2B3%m3VB;@9me>AVtnrc}G1 zHM3n7PwHFIydsQD&~}U^e>f5xeRAUf^=rWbKTyIgcX&gA_{n&=dP^P}>nzui7NhWV zEjql^5i4x%&qCF(QG;{!8meO@`}#%J>GeMpdD=6c)3=CA_E7ZQWwels8V5zPcsBMt zm(j<$v+Y7DKl!McyVU_+K);9e>oW$+g8g-I^iQWxoyl#++9AmL8>;)OYjvP)^O>*B zJkBE5PBC(eY$6l2RWgxWwKy_9^DEoaES4$1DRzITe=BY*C2lEksySR-}8mBRG zxSjHxY(Eh%S$ySy=5_o#isxv4I75Y7xxSlStkJhduHh;YXsV9%d5aZG)+Jp1Du`0JS#uo48f>9i8E2f(;GAyZ$MSpi;1fOZB zMQ}kD@*q^+c`tm&u;LH&8j4H^ruh|aCjRb<028;ciB~@LtU!bAMg#172wRB@1jGB{ z7Kw6ga1bmmVG>byILz?0ck3cncS2|7BYfab1Ywi&>Li3o+eb?9S9uqFlrae*EEdu$ z?L0=KT2~+C1!U!iNeA)5C%x}^-S0Lz0pl75Q6KUh2t@xt05j zdZm9?_u@<*18kc@8Kjw)eU(bZi83*w2e71lDIm*HStb5Db(Vd?yL`MIuNSGCjayqIxm$!>r#e_@S341psaLVyNM%u- zp!w@@@>C4*x9>#ue(+ik^G=;Xed=UnhKK^d?N{FL7{n@bFm-5PO(|ryR0=fhhTBLCI>-o=Bc0RXzt~;CG+IhU~Jb%%DtIS@#Hm7wpjz-XGivkH9Ny#B<&25C)1UzE?iCLt_GPoxZ%?icwGM?Zr{ z6C+l8O95ni@={pNAkpGhc^GixV+vOOzbWyazIr;A-3`NLF#mq;Ias5bOp!D6T8n3w z(!gMiLe``d4Au?DQvk7E{H}?mLHZ|`K5XKDcD8O)%2QsMkW$KrZn&isq13OQ4dPUX z$v6duDl@4vQDEV+ln1uhzRoJ+!fZLd_`%7|S6NcyUkWH&x(_x`Jl^*8W_;yQ%L|Z+ zm*4T5-eCS#mSz-VCg>(bW1SWbew((8wIQq77cZNjq}$pr?g>F zL>YJa()rD|SH8=2PLuYJK1d06XLCO#y2~Km0d#oA7Uq4`)%BY%&co$AUBEf`PIL;odCvq+`W=EEhfa5#y^XTZ0cD z;$xk{Axo?#119hIK^b?G=(A0@XGcK;^qG8R+4gA*6_hs=MttQ{##8LF=20$u;jkZ# zrvJ*rS~FfvfyBbdWClN9OBrGzl$bb|(&_Bk)1$PXynD~0S$n{1FPrS%yq&9>=yfdq zd(akY2Ojd*{<5;z!mfDQI)x0ym&wti=p(*K{a9m$5Z<&EmGZ^pLgSbN{ckadm+H5FB)0vRO@2sRKR}SPhNE@YWhSv|_Nf9~9 zquT3>JxyW`Q}(pmqRHLq_>yY{ic6MB!~508ShSWmI+_5nM%5dcWFL(0_ExsH>bblv zFx}1~lq;rR{`yLJHKA)lDi4;!_me&N^k$29$}(hY5>DxNp&bPlH}0k|z;_gPPaeO? zJjTtutUjyTwakQvS1e(O3Y#QU37v34aHvIhf4C)t=i zU9CO#v-z+7*?%^!IPSJPMEkh-(fJh679h&Ig_f9hCiYx!P@>=$3mN#=V#wWxl`*Q{ zxfe}3SAYeu@ei4*ETi-22R` z0*@c=wx~$%ux9(^mtSpu|Fe&B&6R98@M`m4{ulpb^WXjV|9r}3feKCaY3k4fTmLX_ zEDEF)F&?$kM)@gnuE(SJM%&}}yCzkeSPId%Z;#C(vknbCpOvb17i87Cb_uDQ%JwW@ z`xHI+%S8GOt2F#-GuHweFi1yDgjqCMDF}QI>F{K;COV%EqM# zbDYUq;(QSv_A$b{|UN)?#IgGupddGcO-ngzkXqa6WkO-1H55s@oH%9(I`JwUl#@ zXTj4!qYmSvgtCZDMme+@j_^_~%5q4bo?;aLAPY<3^B;cFLe{AcV7@*(n(Siv^z$#O zx5^sc4r`{+)TRL@dwt03oj-QGqMtk0!nz$j4&o;7%pJ%9)`{M~sbL>a;d10Cmt9=7 zq`V40?~LpCO~-G_ZwyG`Ipc)dw?#+Fu!5(K#dElyIuHkb`XBBH-nRSWWQr*l<<*;m z$q#?Yh&j~0_ZC`|2ahVxSZx<(_rZIk(5=0cnZ@X5&nZi(d1~9mw-W*G#uf>+EHFEm zxin9!SG(+<fevt+2-gG#pTPShhq&joZ;pu2_JB9DHoB2Kt=LZ=C z7&~6Qb}fI%_cs6SfBoOo-yG`w%^9QYx6oM|JrnM?;&oQC6t&u?_VOePR=&XT7nesb zvfw{{Ah&1H{ZIe!4>mvj(dV11x9@KL{Aa(~+=~CCcjiC-&pNZu{H z1?&43fABsUqqzk#bh!I{yI=0!++4qQb@Ru6{HKj;QYKbc#x`&#AWqI0>-FZ^^%tXH zJ+nPyKs&Qg?*L`)%HGfCxw8ux5*&;tzxn!?BUAApBZhn5-xw=#V_aonDa^=Ze~SxT z15&bcSL0C08_&OvR!=uKZ(X0Uma*luoA`rF?*XfK;RQQhoFznQUHQDPoH{b+tynl7 z-B7X7^<@hyGfXPVhaX-7V%j!?li~I4LIFdaSN%i z{`RMsEho)3yxQH8d~u*GLx+izJbIhV7vbd(9WLAfr$ z2723K17$>6RL1(yc+nz<@#we?s{Hyeyk>C@>;v)fp~iAg8Ap_Nz8Gh0roh^Ug-9+` zD4_MFUwrZ78ACp+%~%+}+xU^ukVV}Y>&F*pt)H*}OTmtI>e870c%N~*SJ_|HXWY8_ zZS|QkPI)oLd7CkWgI(1-en3YHrk)-4O&++$FsVA!_V?{eAzd8xt8UR+`!HT5pXOco zQE(Tbawp$Lp0@bgc)<=RZhgtTL-Ck?*ch3+8M~^m5xJ_ZTl64H?N%`_8X`_Db{lA8 z@q@F7Q9G*70h9bTw=pLm2enH$T0~8HVu9Tigq|_yC{aQ59K!xsY>xuRQ^^YUcHImJ zqPge7y`O8D8}1(Z^?ni-NBlJqSZPA6Ke>(snN)i50r@WNbW9DnmgV|_wUU6n22`Wa zBp?fd(=;S}!5w&Dag+AW1K6dKSfO!!RlEs#a$LjE4$C49tE#sR*4FINaG zTp$ug1xdf&O}!5e35UYH8U-+LRnU$m9Y+XVk8 zSh^n`n7WvZDxUaoBAm)!dAGR5wNeCN_v;Z{X^U5uE9y3mcY`!Mg(**;i3En=?Ys3b zN_=v;Sc`nZC!M&$`;_adh?BXxwZsC=4 zlTfOgKgCk^=i~}dy!yCPe|6YRx zf?e3Vb|W8|E#O5%dSM6GIRO2ysea) z=Or_gSNgJ2EVPhzCG``CEzPIhz^~9P?ZKa(v+@rNb>E-hdeHR_kjX?K+*HrR*v3_s zZJK#2UQkM7Al6)cNV;UW`yf=mQ`ga22eWXJPW?#R?eIbG)JKCUJUBa7!eQ5r(@Cs* z^&C!n@aYgX7?TEQ<;PFK3D2qP1_L7l@a2@Ycavvdf+(*eBRqu%S&6#$ts;WgL17D4 z;ZuZdIf}jLQQY!Is^Pm`y#O7YRfZlWu}CrmcjY+~de^Nl+RjsU8RaZP?&Ya0w?K+OE9!Z0_7|5Z2(M5$R0G?xcif z>F}eEFEj{iFxQ0o>nr(qu6_+hSghF|zqeCTMrqVQS=}fe_A}_+(;!FyB@TBY1~&#v zry7{CzC4)H=V(gPomDh6jakriZF4t}a*)ikQFiee%$oO9mIz2n!FMY6Di=}~vWzfk z=E@2UX5w7l{Lr3l(5Brv5bwPvk*p0_v#}^r$MTo~ch4pTUuV^MDnI085&t}Ad3m@Yw zghmNW;Z?p4xuWnf*`;hb7;dPGwk-B+83s?8pR?lmp@Dl4v4Dvm*lWBM8{%4vUkw zEsz`=%W?zaQ(0M?MBvHnF|@@rPqs zrY`v3eVX#_@}*1DRw!aj2>Fh`mC|0hD8tX4Y0?`VNBIESkYg)cyaTiFFtD$LD{h; z(-asc2M>{RMbUCAR}t#M1i|ElTM`p6 z{Ai+f;9wJt;+ycktUN3w@u0&yeJ}*x#&0jRw+gu|l1$>7JT{&?(u57|SYT7&;c<0u z61HbgN}c#^EN08c1Q3n<{?LSSXr_&nUi&pk8VlCiTSbj`i&G?uLX%xG&Y}?OTo%0A zLQ=K1u&jOTNRhX+r>jef-EXek+C0so3!Pa0gC!r<=P#QeJ&Z5qsZE=FQoiH2idTNo zr^(pqlxB}wu(BvMxQFktEy@lT& z|G|%&{Ne3qvuMKF`sB%!#xFZyuXgW)PnVKlO+a53Xkq4Avf{uCG9lVG8TGujBsZxY zexej>avL5hn8KgpSD*ZT3suKcYQm3eK?@6beSGdmlaD*h^Ya#*{^D@U@!Zl?DpQrpl-_6Q#_@S~= z<~p>K%tso&GzzE7MXS#EU6% zEWp7JK6~rOhlk=9a_n^d2fPVnZP_or(*7u9V)iUazmJ|-0ViMW`dCU~i$%{z`2}aN zSapB%mBlj*pigv0MB}jdjp7Rp^_Qc>Egw5&PA8XHu2N7ON{OOuHTzsCu-tT5TS~DP zS+R$QE&t9OID6(;ee!$d_2npVjLC1bYe0W@qlM86xdl6vH6JD4tNJ8L3hXrWO(A9O zcJ#1vvrtX>(OCc#;4Bh9_~7C!u&`LPFmt$gXX88d^EeA)N^lpsiw-VrT=|@BF=cOT zKqlLHLqiF;>3W z3bln^7raT5Q?m%gngC;IwKQC3kNFC;GiPsTJ)W~s<+Th`Q3RkwN?Gu-V|EO z>wwPvEhzobXFtiZ_Hm2qH_L|w_M7SBX8|f1`bU58$0LWy*uVMPpKUILD@7yzJ)Dx3 zrS<+6DBtAH=4i@5?qBrla5a8ema83L`FzzQ#UR1|VT;b!T3BTD&2T^+l8f5zi}-7e z-790Tjhif76nS`gl$z?WzGWB)UDacST}pDY($19n;m!{-{vG#!<}MgcpH?}^8_#%l z;FQJm#kF6`$RjQIb60b`g;KI+EVXM(7GrPRyw>iFGc&H>UdH^)0+PiLW4@z}2PpjT z9!05f{PnDB_5Cc8$C|gr3p#-tDYSq3lRp{0;d0{iseior7ys-3)H~;;bk;*WLW#U~ zktug)BAhzgSn7jOw4$*-ZTy=D?_pN?(??eagq|{LLq5vM720r1IFBo#7KRyQz-2A) zODEwPd%BsqOZ?9jA)V2+n~g!^RdUBT1>Kx0bL80R#`kOIrFo|@fyJlQ7R)tj_h8Q_ z9x3hY5~{ug#T>q(TSgJd)T50j7!O#4Gq%z$@Ya*gln|d^ayVO*blw2BD(XjBjU$wc zwYf3j@$d@G>0kAwQ}_C!c#jbU0|>5194`8t>%s6hwwv+yOV;fc3SPF0>ceS!`e`um z*`Dg)QEqQ;#upC%G#;#Qv6?wb^RukLAEwlIMiIq0H%7;j@njx49*>7+H&Zyjc=aUr zh~JLV-ePZP%p$5q6f(=gh=o}9zoRF)|7O3?;R7)Q-L!N z_HwFsy&f%#tkvvXi|Kni6{r%DY`vN3S21hlbG^&u|MNKDw=q^44>4!o29ISl6@suD z%xYRIfgB3xm5xVL=}fn}@U{3AV&e5kNXx^uZddvxm|}Ek1x=pP7EgyW+5!11i$UN! z0E5pwPdVMK{0(B=DgO$cS93pkPaeuQzyO@s)nxajDZFXGgpcBC5*4ck?AnnZ*7MvC ze3yQthOo`oJ2BRvdesd&!fe;szn6d+1yUj6x_7M~_-}zr2M={T5cDlQQ$oZ@gH#O= zjU?gImhgz%fGcjd@(|WTj}vcloh(_SS3_&bkX~JO9I$TCsB2Q6+vFczDcn5bW;>0bEXuTeu zo#zr3@D3Ld3L2WsD|jw!yh7JOjk2*v`<=7}4jr+W zuqnV`x5B4>3oq>oEuQKWIZCXubq}xVo04rQhY4LdxGLzdZ9F&=CV6kK@|&{HK(lbw z+ux)ty|OQY7cP&%QF?Vz2r7^(v#Jn&diXK_jWfxmk%v3s=B zRw|2}ge^JIqvtxTX;g5ZmrZzvKtZxs(IU!M6;0=fzI`i97W*xvaEvJB?Cgmq`Bm_22zm zkpOJXm)ue`{Ml}rL@UF&Ab(%^9DG&BJZ zfUa$_p_%CN+LqbRS)=V!1P_bslV!LEA{#^;RY(KCud7{1oo)PqssRO|}evTn20uO9~0T!K7{NGx4bm6bCaf z&Z>zLYssPpP2sa=l(8nlSw}X2JKltFCc;6Z#Jh6y`_dfgKGy=}{n3T7Ct7zEG<4d-z22LCKoDCg26IU*&Z~( zG_X?^zDX&}40caut###U+s+%Xu7v>>I$29b2a~0lFt(^*pgnju$)w!6_~F^j2Pp%& zk72dDzX`1Yz5(bQn%3{(2IxQi@#O}-FY|f)OKdi7F+MnVdi;)?_){3)yZ>m)&niYe zfA-18qe!8+G0~zZWFh-u3llGzeAy;%r-3ceXu3|R&tGc?{w#MpZ3&N8&gYxj}|KG<<&R%eWWN zK23t}cjCRaMiF}_%N)2-u$iRcYfZw%3I*T$tU-GdzvMHEUpo;f_>LU0wfyibZr}%& zX0s?#ZpvaYz~uU3I}eD|>nZdRnANSr2v40nJqk5#^>hmgENRf0V%6kN-bdpHJg3c2 zy1TSx^j6o8TMVJxaTuS=f<%!^OIi9uc(lE>bS9hJ#5kO=*HN2NT)nNW&=donDa!Y! z+?@1{z2v>8IC@GJ#*}B|(EfOUm69@3tc;?s^am^q#kb;mr~Is(C=AFHlg(jX$o5QO ztL;!ku?qa5wlr>Ng7fxBvZ*-lNfOSFw!EgzDujB$&lGQzQzmq*k61}j;8Ad}9J_Vr z!RA!T){0#G{MomN!2O3w^0Aa{l%_9JR#?>Am$Gc=(gcreqO1fL&n!iDv?;!FJx_j+ z5qyGck7(w37X6ZmCh2%?Sr+1nEOMLBS-2ayS0c(i5)uvG1e0vpzc=|4Om)Wcb`<12 zYbVa%x%|n8#pgSn#eS1*iir9I@`5tu{=<}`wTETZMxnM`Tv67UM3O->*{)9Ev6Q}H zQ5nc(^rehBm~7aeOB-c!E(6*fDE~kF;?vDnSFR6#-)U!tGEios$k{@H*2PdEEg zxV^4EfAO2IM*f^kHdxHy(#I|!eZweKqoH?~vS4;tvcG4~vG&3N&RXUWg~v$>M%(WDTOxz>Y$PBVPy) zysMp%2N9@=`*vJvj2S)@gYw0c@ zOSz;kdDNk7L2_e=K0Y_4suKR@ifcIuH(nEYQLUo5%=FWhuIhE@bb@e*gCJ(eBqkPB^3LOiulr9!?^m%B+^$xjV*9N(J z?@bEB;_!Ftybx`K!fFl)=U${ZW z*0aa=Mt1BAU$Ds3hbh_A_o)`K2!;0w-_w~~7RD*w@rvJlxkz*7#3%&o^J2kyH+^&M zO`9_&poofajlDytf8!o>)e2s2iB^8Hx5<9CZfpM3iHgau^cF+NM7wPqC_f5o zi(0iebXQi2(6`AmyOy3uzhlu9 zQe?~PREp@+XV1;tz=FUJjaNRp_)#%eAHpB=$t>x~5B-OJmalPbKwra+PPLKzSjt3j z9m@N#_M)$Sl-y+Wp#A*(=YKQGSC;=zpWYw&`R&!OMnSG$z>Ck5)pMouJD1P;Bhe)G zn0|~4J?-atvUl-bi$T3L9`V7?6S=gW(Xa>;q>kRS*xrlk%lv-LdB|J!Gdy1S{`y>P z@5vK=Uc6Sj0PDTl4RFWXk#P203qG}3^w8(`dfN2rOQU)1%NTcZ2qg>T>IILFE3Vq; zvt*clTz;(C169~seA!b*c7pPae&+0jk>g8dC3mV*@*PYvK>q6S*^9@`VNQ-~lSlPk zWGH!04ov@EUfcmW%-If2amd3d7Fl`G%1Td}cPG+7twOwkf{*g3?J)ybH1OKn*PEw+hsu$-FJUUJbONQ*op#{^UC_i_m2N2F$$M#T`6Q3yk%v z1e3P!!E5MV2)IR@HjwmNuu9~LT;nQeSO5;_?Z+?PnujE`IsrtPS3Xdg@_{p`?&@rS z3a#v6RoVFu)=N&_upFKNU-*hTusi}BV&1*2<5>8}tB*R@P6iK=(Oa704#7(*xyVD{ ze&wg2Zt<18MVe=c3u`;z075fmhHwAsS?o~~_Y3Z{4SBfaBZ52z@19FO{^Et=#Rea9 z{y^`iOzhNft32ocrl@ZEbie8o{|wlns|T3(umUHM+m*&Em#k$bc-mxmEco(RuqS)3gZKsy z*M^oX-ohGu)HxmjsDRw%CpNHohL0?$V*fRl(7HRc0KoLFg}WS zY2Y$-PO{-~*O7zD4T}9&kdN3DYj**!zhVoXKp#7#t??O6PWP-|pf5CuS-_3S>NaGOx~ z=(Wl?F$33TPg-BGk23`c`GM4~`ITU$<(P{D4(Ws=n9y{p&r-`ymSLg_>~9cWvn z36jYlYYM)#JwHf+jHX!Y;zu85o!>#F4Rl$nI56pKX<47%rGTS|X}~z~E63%F=ffe3 z?qRA>{ z76k)7QP&2xx3ci!GKL~Z{ZM`xbX$PHTQ^&%aBvXi0jqj29)z>)_muKCZ(eWEZQFc< z@sw%ml0QW79yJm9K3~=kq8V${x|q%0l)Wys#XXI9kF%J3Rvs5V{Gjw5XqUqDX%m9b zY$BSwmGI{a@=1etlNor<;)cmw<-!;0@oLsvcXFHK@Ye6L;B%cb)nt2+}g6J*b<|M$L^a)A7bk0@@W zF?rHP_w7poosa3%it!D3RPWkE1P_}yqa%eMyx=b{GQdKSi_cfM7)NW^=%HWPM=?@+ zr$BNr7G*MpJDLz})I`m+YY6%UVIXRZvf?P_^-w{Hw>aGa)T+JovgX z*Lz%b6yK!kF6B+-H#swr*M5&iC+}JH;mH?olVAHgfVc%4d;?E;p*0#{VFCv(i!A9(QYqjq?x=SQDSA>1j~1B3d7F>YQ9@O2aue55il9F^?Ah4mqpY_- zIL{@9t??=6lPlzf#lM%etta9C@>Tey7-g|#V(-#szE7#i5^T1+M}Nv~lk!op#whS+ zd1*(_!BbhJwMao;@6XC#-hcFamp2#U=c{+_O_?dYj~DlGGHVV*tX%w_{_QWmOg_Kb ze0J%==C6PL>&p2mtMvCrIf3^^iDbN2xnBi?b@hMxr++jH7dP(Q&*%JC;Y9J5QmBdh z&pNd9MoMxAs+pXl!9V)rpKgBf>)($17`~+`(8xQAlJ`>{lkXI)7XK(U4z=jAJM#iP z-Xs^b8GX#Y#)2o4!T6TU;oix?($`viH7ml?I!kocU$~ZXDB4!f+KOE}eA8NBFnLE; zZc$jsv&7`a$$ZS5gQ82nMt1CAo=Iuu(6X_@4nCz3Hz#C)#WHerjp3F;)B*#mKX_SO zk`BGNB%sW5fb7Wwxk=J)M!`TSycFz7piwp(AD*afSsaElJ~)tEKOHR(AFi*dy&Vis zO2eBsZ-sO1J|zbC6-p4~v+*lgeTr`-gxn%9UAy6IERjg7o-Pg<25_8l2d`R z@CK$cH1NZ?qS}^2+d}uJxq0GJVz*$0=O3nAypz@V@f3bXPqyP_7OG!w-fscr!Y7{% zEykyFJfWX?l{>R1cN%Y{#QLlQT5nwaX7fWd)lRdT?o>2{dCiYC?4DB&Xb@NnM2 zqE#0A@f$1Oh&lZrdf#ow$(;xF4bg|+z&IInBZ+Mvw#q#V@77jsw9syjOS97>`8GD8)88CNqF zZVtVFZg}frJ9KX5zHJoJl}iVxosnUzX%D2>y`L5Co0R8wTbQ(%rCv|8sAMt9xRji; zfUB)Ih})v30~RUyNwHu4^5>h|cWw+Xj;w-nyfbqN6skRwZ%681_r$k$Z#f{Bfy0gB zj_>ctKXYwhPyG`aJ>^&DwFSI9i&5lga?pa2I=4vV z91t*G*XL1yt3%@se)d^pTciWqs99y<002M$Nklc_#kkTJHnpGElj3m-4KR5rVwjx_!}oXe2Y)hqct*6HD5 z3{4(f$(`f3-+dP?jNuTycAgvq^TxF+Ge-O3$3M-Q-Po~yBc4KwyDf@5X_5HO?OV0^ z+%Pphe)8!1sb9M?EL0hH>r;(q(Tt11TF%ICuFv!*=wtB>4?f6uy?GpY_OwIh-@J7& zdULJ%+O-gb4(eAwf#Vq1*zOWWeeB4i6t_dfuCr76Dt%V6@^A+|8#5oQ|1od6U)#DJ z?tk!ye>_Gm_~~&*M8;K=^?3S8a|7+=u`z%0=X6FV7>`9=UQ+UvsUoJ);$41xf}Cn8FK+jC`c2`A&&R! zdGefUTk+*5*U6zE?QQ!t&?eS|1yab|2e&i}0v2#*^1SjBO~}Nba;(^tWUV<2_yt$m zm3m?f>~Pi4LD*_k2FGBpu=sJ{i?goXuN?mavTv~%z~x(-p~J#%WfwF9h!}oos0=>a zT$Un7Ud5K+@0Ahl$Evzar#us(yBaA(CahnLYl@>h>t^Ax4OdN5w(wA|Q|!J}>8iJG z20!WHCXbMbPTsqndpJw9i#Acc++GQb!OHutsq3MK(kXv&CsXghZ$MTVN5Xk5%_x91 zGCWp*FmdIw<)Us19XLC>ta#!qzTCxxAap%%VN50y*jMT0B@&R7c7w-!O(e7>1g031 zZ|@Lf(37ZWI>vt0J@5gYIFubp-ib2`Z0~mTgVQd)Hdc8jZEz;P1m`G3#V->Rsu4aO z(PlzLb`Ob3xXs^FI86O}zwj9)Xyh)DugQOI)qVL1A4<8ksIdyY#dB(Kf+o(67AqNW z+tl0Nve>!O7&?ehKC@UB!G=|Xzmm&q)z#nz_CV`<;&>mwqH{dft6)VhMbtY^9eMz5 zLA1Cr^z}Fi7LDiqz*P6eDKFrr-A;J$N;6@7Ef@mIf7;!GFT%VN(FKnZ?wVXLd#=2@ z_gj8l5rliu2QRs{KU?&BAG}^q9xF!A)xj=Zzuv9)B36#|vuIP?k=y7{`eJYSr${~R z_;vBvfS&ppIx9+bHH$~u#NZ*$wmd*|4UD0)j zKkkvD#e^4k@&{@ZTm4Qz@k=`Qb0hv1mT)iRJ$MUHjzy2*ixI57E}nt+Qi6rSdR}=|ZVAEM^2lm93m2fg2I*l3-CcjwcFP8P2G4wiA86qH`0-M@ zfxSbWV#@aRBgNS*8r!aY6m4oJTtkR&!0Z4&#Rl_s1OBhRy_U;}mno0mpGo(L<7<%Z z^!DRNjyD($BNK`@O;}UTQTEM1x3*8I!-{bK{(iI0W$8|NY2a#-e`D)6nquvT2Aq2g zvKkcP2?H+MP;rNC@+{N1^)PreKtG&s;1gLFvTk9?O{ruuV&YEua_4RX`{>07``J@# z0`!2-{KK*1YRar=z?5o0aS&rL!{C%!G zLN0|MYq|!iO@3Ip!;_`5!QsowJWAE*Z1QX|!!}^sY5&!~`OB0Mr^iJCxyW)vyPE!_ zHe}LsEJXvq?X1oDpXM)~CCT^SXSvb+97?*UiBguzXByC7Ou5Weg^9o?moAN^Ek(%3 zO@{ycFaBZ<*<{&#=q;bm%d+{@!F3QF<)R5WUZ%7-k^&;BlCSd{El#vuI$|oft()g4 zRC;II=4`p_9SbW~y@)aj9o9Lqv+ehkp7(MG^VA|k6B?5uma~zif%H-q4XyG;%k|A6 zz6_%w9?(k0>br@sy0nl%HsOKgZe%Ge_KrKK777}e<1g<bXsnX$T1++?;siVC#(k+p=d@iZINKy3N>-ELx3Xu zdekyY@L-Y6~;rl()K6Pei&&x`j_|oe=;9rm_eh1u^=hBvK5D zCQ43|tl1i!%s@vk$;aW3;=FAVt(;_4;6`bT4sb$S^|2OLl$oN&f?;*PCj~{kO9omf z)D_q+yr&%@c#6VDT0BEJHai+NuQz}6lgqW++Q?&z7S(Oa42wnT@>w#JFJK2KQe0EA z9X^sGX!1z$pR(E+53F=g98LKeU6ck-FczYeIgwK6gtorAI@S0i{S^4UyhQSqFg_)7BEs8 zrLfRPQ9?S%m!;Sr|K#&g!2kO%zloM9Yr_3jigWItw1e|49)A7Zm1!$yQ{-R1^y%iW z|LzwfzgViNOESzMz~nX;0NU8oXhzAP9HX?Us9C>m*Fb@fX|d6z15tXr4DBMapi&qB%(?I?qeEA4(!pB4z6N1%_gnDLBn z>wO=k#JaQjE~UtYcAvmCPMfx>SGF*PpGOf;etz-4dY-@9$oO?{iGn|MeFolui?qr@ z@%}QJvfP`+n!-=kXL5Ukht^pSDUqg)$Jg=3`?)N_8+Oo)JC67gtczI(;|cXfdCe^Z zzSTze=KEVa(5H>!ZIl%kraTn!+7X#YQTX`r{rLI8&;oDpm9BrYh^4LU4QIaIwY!6d zl(F4F(>Czr)3KTz7XZD7Ba2ZAT<2pv=)ewr;?pPf!`lU-aW@+;*=0g5DC;aZl-}Zn z#eg@}mxUpD8kHG>=)bkek&itagHV)FY*A{F)s$l*qoo}a_}lnu#&7W#{#0j6;RP-w z^)eh_Rf<=%>zB!BR@MRC9IVf$=rz_+Z|08VZKJU2zILpxjOR)(Pwn|&^ww8UW;?|1 zy}Z`&nZK`oR++SW^#X7Hyp27FGARSm1J)D3?k2Z+66#g+B5?i6UHlO<^lu`L1NtQrr{Jur*J7bI4H*k*cb~B7`Wn6l#&O=78JVN#mpH2J&*pV+QlCh`B|28RieGTaAr<&=2!=KFaPV! z1?(6o+nF;(qX#WOM6BfUxoB>I!MGzMj%Z@cN^N5df>+RoF~P}p|JZSGJJ%k@d=+FC zDe&i7xZ?6GgOtH1WqjTqETU^akk68xv5kIHQuvrdI3r^gDT8HvF=NZ9KbZ`-^)-#N1S&*42ZQz8_W+nz z-J1}&!b`M)K{AOCUcVEE@a?~F0?NDi(_0ygAerA%>=dx8p$DV)u|a>*NDD@3YmAe} zwp;~uEnG)f3_g>5XgzdS2UE;lo+YV%-pjw0OXv8`@_r#axeZS6uMsFHjCOdai&gkS zhE@yGWa?d)U<~ew(45B6ciiah)Q8;X9^`?u1Y3~`4rzEc!I)SY0%*Zn$|9js!%A>D zfT8zF;KOeObdRp7g(40mV`4ZdZY@w?SHD9G_q~|P0&_jTE9byic};S}#R!YJC;r5h z5Kk0;KyBg33|`vE1WjGK8zdIJD(2uRRQThs``!>>UadcOetBr==z$jK_C(GXf?G}`kPc&68;7z=s!>LQPNVf8?-A8i^8{kg* z9gLppiQm=1c_wia zYlRtX-n@Hrb3et4iQ`d|)&?k)AqKysh=~SSKT7BDxZ5`J16gL z@#Bjhj&-+5-;JC3buO|Sy=bwI_576`Z{k@yz%AM*t#Y6SQ-}TXy zb7yibaKD4yDU-gfi+R`QGO;353R|M0CW7Flw#seKE63PkOI)fJaCXl2A^g1oGiI^@lSwu$2N6#5iMCNXICIAsJ&UF{x! zEURi`yG81%Yw9+2@Zj^o+1$xAY4%=QQ2%1Go}^$Ko(QgsrO+tSdq0!;CSE^eu`RBx zy?B(8$l{}hjc-_)!g+Q}#9uQu3KlCq;}4U>RP*K4*6r?JMeFagj3y8%y*_Kv#a7cf zq%xVOjw!KN$SnnMyc674?`7pwjwHEnZDK5nARfIfbd9?Nyj43yQx>Hra?fjPl;Inz0%>x{g^)?=)Ig*$0i455 zTL_?BLZ746p~VknzE?Q65*Fzw7wigC4}hB}QIvwKtmAeg+)d8O4egbj^3jxL!oF)? z@hx{N_~;a+7k+DksBd{$7Jgq(txZ*R)3vWWKTzuhGrr28sabo+MhIttq<|1CZph^AND!D8o08iFYsD|BVV z+uDg5AdfC5V@{tsHZbj?&?bKW^ULij`5@(2%Ig&MkfIo1(XAc6df8%1^q?33cR+~0 z{Db5bi@qsx-gDG1J-J69XN z+oI^d+5GC)zZ&Z}{&vasQAD;Yz^)C-Zh7M!KEU^L2^9QsQBW9J!KbSQQafv2zWk_d z>$!2MEu1)((raAE#BY&-;tSnR)NWoqJ2dTxTw*bP>7z^G$b|{Z_S~MN-20$9znRSE zw&ii{zX&7m&bG7Q9(P{x24yGyg%m4!ymB%*u03D4`0=zs{&~p~zJ4iI)c?J#9&g{d zHSK_fs|EKHEh2Djz!$A{iC?sP@)q9mGbW(KXwmd6xd`IYx=REtc~lVo}~2A zw<-hrjvS45(VKG6Vu;_Wth(LFAFLQ3DPMBb#{%A~WSM!uxKoM`#~MD7M$JoRp>bpw z`aDXRW?p6vbm8JhoA0iC8;szSL+t{3{f5FmC2Y^e4df|nUNTYG?9i*sgo<$pzV`UK zdNY=vQ;dWEw8adT_!bvf8ao{JSoNfy@g%t;-QrKYu0Nn`zn`KU;Lkq9Ub-^ein~vUwC+2Ws&{W3&QVfp^7{q=hdz?&)GTH;hFd+aq{^qhqd|E-d2+8Y`JGYD zpAVN0qrbzn)!~!m$NdlPj>``G#|4M}Wf@PAJ>~;(Xk|Ro*wZ2Q7D|pbHoiwDjeghiEFv0$G1%U|GAUF7?^( zwfil9N8ys2kJ^LrFCX37p9LqdVujfm@$~6~IY1asXw&fb(wKK-dvfgs+Nbb6lI%Tq z;(RVd&Q^wSiw4exnVl)>eRvC>X8j%y*j@EJIqG`UBEyFtUoH(AwE)qW)Z)tH+?24c zx4S`q;^0$G9Mx5$&KsRu8bkc#fzVA-f!^&ZH%4Fr;Hh#U7{Y81%D~8!-_pW zORPwa+?0)NTiI z!2!YgEsAbsnMIWH;~HjUZZbi8Q*H1h^D5r(AOnEM;eRnXcKMTBuw_j?W7opS5Q~W9 zzIg+5Fo_dzPGQmYE`I`|=JUABUe(Eup-BWIk z5^gYFUGNJVfzV@sTXlw;!NAv1-wJCo5J7*`H8#Y zEBjo6`zRk2)zk=%3E#d1V_V)uERTye3zL<0@u0ZMR@!Yk@K^g&-s*fs>3+)x@-Q}m_HdSPW!-N%iIWhWyoI}HL$?c2L)XGrQHO}#%e+88=u{^&G{v*(qY{sGsaT^~ z9ViobS=5&{P*Vre!*4-c^`$2(9ho84At+hxC2{x z2`a-4#?qIqd9K=U+uln;iV1qey8*tsPK4{tt z_f7*ye)xVDH#p?=7T&bsXfqUrLj&!vQ?ybLA8AW4i(`r^zFHj&Y0xh1%LXjmX>bQ} z?N!P%gU;)c?XAMcY5=2=uZ?DA3D0!=I&5NU9u zl!0v0zZ5E5s?5YPSj)<^DsIsInroEu`Q*~YnWW?UV-098p5M2*b@ySDhPGrkNiwiS z!y_rYALYZ2-#o4m&=Y%@>>myvKGRtU-@M({*y@Kh0>!E_-+Pd9KUrfk^Ps~_9q8#Y zK<{hH$imu*^JAq|{TOglf-W(%CTIJbFx_u5PswKzbtNUm*H@cZg(HNPvWQZE1xFM7 zCMyQZtP!7ea35>z)Z>$u(vU^PU;Vqk8*9AhB``^(v@uEe;pv;rU8myv>)fr}ZF1crz=v77K2M=&@;X*Sr6EV< ztsRaMH~dWe)FFkC$u>X!KV$*_yutnR6pfTk{HC*>8onKyX)J`6aMJ-;Y z;5Yty8ZV(U_YCTs`yMVzSgK9iFYeH7iUN+ll=32cioaJs8=P5S2uBB1TJWJfpd@{g zf`~$A4$#Fo$rSa5p6Zekn-UOHu>?F&TyjWXJQLjB;Wh17J0J@vDNHn7EO;f3BvX%5 z7o}eozi43VI7`@+fwlL?DMSIWyWv?13ku%{(SRhQpyr>GHI;U05~#nWY*g1KWT9M4 ze`c*IrlW4 zd6@eZirxG9wU?JLwB4kyvwH^(`014YH~ADl7oXVegDw{AVwjP?7EGSjrVr*02b~?D zcst8PR*SYY-?{gA^V=&O$QPd6aVVohK8?Hn_Rd#Ll=pA`$N%mBj-FZXB@dp*;|?gC zMbpZy;KS3EpRohW@4x%ax3g>I3IvF%e@IDj>W=LS&Ca2U=8=1 zZ?pJLuG#|b(ss3H?Myi=h-u^6lKABO<&QtAZR~4txkK@iU3g79S&E%#mCRFauB$$H z|IFr-k1p1~oZI}(FMheX(P5or;Kh$Fwe#oF;7BoN*Tc&W52c7i3x^Jy|4>9yK;rQ| zb|rMF-%(aZU;UE0h96m||HbKeSA9`ro$kGb;q$p0`@3KKY82)AG)fc};C3W@^VL_g z-QF1f>h;@`C-*sXKq>3QmnqLuqMUChkud}}8piyT`X|qv35U}ur;{UDnLoIDYwBLZ zcd#S{iv^8`Epl-~f!A3!@7xVl%xK3FdDVdhv6p++B9*vP=J2xEs^1_l^`jKl`XCCL zlPS#AH-(A?n=>i4?0mR+omF;xUcGTAuoT1OPJMUuJ6N84+oL0VEmEmVGMjuvR}G)c zc-?R@Ib!Xpdr zk&nsG@rkY;OK!YEYCUZ?*|R=LlNlRO&MW7>WD|K|0i%svqr75CT2`C=wc85Nef3aZ zy5{LYE104QU*V%C_4C|z@Slx7FJ5x1)Na0bU-|L!$@~WIFHbm(EDO2ew|I`Sn%f_W zJ>wAalC&gi=dUvT{!jlwZZ2|LR{fkk_d$ypcf%td$W@v}DQzbv&DwoteKR&Dr?f?F z^Gb5;U;ov=-`vk#-kpcv)&3kL`u^~lK43@hp?F`;3piLEc-AlJ-&Kirji>F-dz@05 z@?9Pld-!%fTv;Byw+uS)jd9m^*RR!<8$-1~IelhrOW%f&%R0OMp!#BouXDhMF6~Oa zy=%6jkF==3f2X zj0yDDl(~hSI`!d<%cq$~zPGi%>Brl_zzUn84Od6pELq%S*}e*)uU&Ej55vWL%FaP! zAPV0-$x!ldPrITXX1RUmc75U552rmmd6u<*?avqo4P17L7_&a=;L-~hE=`?}61ni( zcgV!mcgwHkkf8}$&SGip=3sr`dkt$S^~vb(8{<1*llAlu5mJ9kA^oI!V<7Sg;%-@|&Gfl(It zEhf#vL^${j^zg97EA85WwA#6G8JRo_+R2JBfQjySZsrWp^i2GLAMEZiR@WBDSYv85 zu*1uuh%x3E6x7MSst-9;-v>WDgl{2t@9v#(%S8_B@3@8GGDg2lW|iwK5Lf^m8CzRC zkj%IH6@NUc-`B>?>+t{gxzI6JJrj>D?$YM1vK) zwq@zv!n-&HE&RHlYm3L+gAbOp+pByHen%Fd%V-)hN$Zm3AEJpuRhx-69maAfj8?tm=j02WLyI_m>L`=(ch!bwvek7@RC^)qx=_5NDmb1 z3IN3*T54E*&u225e7d7qFLcU5U_(y`4hJQ=4<35bx%|S@Ly^JlUU&7bJP8HQ+ep0( zpVF^*1xp4>=_a3qlaRsH8%233Pt_MVF~wbX)20QhkN(y`47BoAf0czSSPL1w7f0U88thSS7j+6a z`1UYmF9ZB9e+2?xk=++>I^FKy(M4O`;^F39Iw+KC{Y$-YD1!gVU$O{4dAM~=K69rW z;WCwFHC@%gdq?Dn418gTGKHCx@_L7$vQ(}`FByx{@04$QyA2w;OoE52b)yuZs%k!NqSOi;9iZ^lvY(V<*JN3U!J9ref zU_eL2a9RnvEx$nwDy5&d!ztT1(ug1O!>i&+R-XO!X5tM^<{lJj*4MNn0YeXP{QLFY zpi};yqtPJl*~jz5LvO$q+)0NgcA4}$y!0*%oYfvCTi~aBE1zxGwx!1^fx+&JCawR^ zudWO}PqY3zozHQDe05;(Vqj&G1*KDMduN$QaY2zrfw>d}^|_@P3xi-9Oydv9MqyIg z*>(&_II_}s`qV)0SreTsVwx;i+?L5$S((uEXYyz}Af>W_@4=M#bKqa;C?agdg{KLJ z^pw>s*{AK7mCNMI#MFf6Qp$?GSwG&&lF`7D>kfxeu|U|n_smTCjvwiuw3NP3y>jDD zKByh+)?mHCAGjI#h(sp8&sTM5I%;bO@jqyX9V(quz{(Q=IiZZN0u}K(r zD*xzD|77#8{`H@2ZZxRp%7jJU=_Xxx#eskPIe*w8Y$08K=d#YaF#}@*-m$(ZubsbF zix+3Y>3{#1|3`;=Uf)i}97x#&rUCXSn;S6uJ(CjoWrJmtj)PgCIs6UZnDiOUT738* z+B{6TaQ9AIUTg2l$1)1P?ktXO-M+i|B){axnp~KmPv1FgLm|$Bh=S&K{kh4h0lx{Z za=qUBs)Ol%*#V7Npfn&i!KU=naPTT$niX-YXwND;x}lxPyGaNxq$qiqUqpvwHTxYr zZTozfvf8Bi#?3pMPe0cdI_RkK!V5o8jM{cP{8uH9!m7MjXHlxXjTY$eJZpJ8YqI=7 z2N#(f;}sJt1AYCA3F@I>PzG`6NFU-;_xk6@M*=CGm@E0!>*5wuOYdSxc%n`QRJ; z%EFZ*@Iw4ba+%zXWm*+UX*~-_E$%&fWbq_K>!g>GFr{H_a$U)t@Qygiyy#M$;Xia| z6-*Ysw>LT!_Cx&O`~*Cy;1>T(JmGDEY-dA&f|J{(_(Hv*(HQ77LEgm`sEj`N@O3*$ zwU60_QT}9Hxeh)183mK_jA954!u5GFS(`tUKUT7cMYlFZ)|r^vi88!B=~_HVv0zfk z8uDH{GWe<1&iQi3r|8eO{EK_Z(CUzao}%rl+7>g2r_LqMh{MuUzi0ilSjBBK0Dc)aZ75gMBaSJfYWpYi~NdbwMA7#yU ze&72kgARw&)fQvy(m-2^T$Ai}ex$5F5dE&*xx4wt|L{*XzsaS`k3ai7#b=Y?tiazV zV=A|W9BqV>?xPQL&65)9#;vSV<6r$Q<)8Xt@n3H=yn+vo#3S-K&GM~!o5LRq=MF*r zqs>^KMt6&*w)&GlJ{Bt+V*MyyIMt#mxj|+;iYCgc|D$Z7{5acU9ZUB!!T8A+m*+hi zJ0R3FqX5f^C>(XD$45m!i8m__>cbMe=@ZC?%S`E z|0xS?!w)X2KJ()5?|rlRB+JM%9Y*-D{{M$)V~hQXlqBb}LZu+4f}z;ap0)kEcWwo% zL#m?-w@FjjXv?R#ona0%W?_FX7YM9ux#EzwewnZHm$Qht#vsaNSA#Lh@eaOYMXpZ< zoBTgkozCKhF-tu3v<07gcW)05-tAYrdjB}fUimMJTi=K9>?pn(l#iKr9`6y26#uqb}L!0t`l9F`qv+5@$4`neI zOd&Pp&_9t~(!XwJ82;Y7FIPeF8hJ~>bi4lgSmk*2QHyKI-6PS0t0=zREzG{H4GbMp zB3oq9w#cTXAk!Z&7h3pS{aG}!fT2yDKASR5{q*i-V>F9uJq^w$UbYYuo$=aIy0vJK z>=yPQnLYlT_jI6M#ua-{v(H}rl0N3m{>Eg%b?_cOIB=kIS86*;KBS1N&h(Y!M#u*< zss`8CZRnT$xEa5Gcl-O?F`ch(vJ<79G0F2I`8Yn*VgZ*~MaJGl_Q--4l|i0lwz0K& zr@n4X8$wV&^B}8Nc^#r$t9|J!$q#i!eylc-i@-}Qz#pqFS`^dvD^B=vw-MgaWX89J zkpt*PQA);;H|B*uv>On5XHpni41p6x^SCuCzCO*)w>`(&y_nB_gF`%hd|xXcT^45^ z+9eYZJ6D#aKUZbBBT061t;OxexfVK%i@7I#_~gNiZz)0FGzL}{yyol#2G8);m%MG~ zh&VTIUd`>r$w_5Gl#+b|CZM3#NX^7(|;@httm|2jGei$jKj8=d!jBxHsO4IyMd#Wa{23?BVi%sOZ#sN9 zw|L6cc;RGqre5_0n6NMDi=-Dqn^@#gpEF*2Y`f%P68-M8cEKLAcdI zg2jZV$w$g_<=NN5fcXn|Pe&@#z7rPtxss`#<8SgnJZ;$G08ECQ~dkv z>2e>Vyz*Ap#!heF7?)`aG2_&KR_*cplNQU5wuocArCyF^`F^DFuiYT>WoU5o# zV-<3#v~Qw|zWImxK>Yi>GFxbW65rVkffv*TKE`t{R_QMC?1TCt3pEyB8R>8_WV9BS zhMzo_;g5jB^||`Zi}{BCratQS-7L>5uXp+lyC>}6n=>mG2sCLxS_D=jGyezJP00Im zO@~_03NL^R>D3T*;zhK15N61-O*h&YaU-nveT|Z2@>-oV8XJJjWWRWj==FjS^GY_8 z_;t7BS3=vhV0JV3Ow0m3NH0$L)-f}+r?4GG$+l7l-1_QWv5P(mxLw!+GL0tD)JsTF z6m-?_masH5g}d^}Pk8@R)*69fy=TA&$H2UUUl=PK)j4T*>A(xrHjVg%qo7gfVazFy zvV}Jy%u6NiUH5(`CwRzs0ZibnxTBbQm(0Lioh{|wSOue5h0xs$c(^FLy!!)t(OGyQ z@Q{^9*_EFpAc#F*dIN+R1idevm%$vxqE}OQApMSx@04ER&ect6<>-4d?R)TwF9v4y zGc=cRcq@zei_a&c1xk7e`k${I-XZ^vwqB`k7g!==hxItPOmZPp=zfE<=q5=iB;Uf6 z64p98=!%@D) zhr!SSq*b872nQLYP4vD7H~cZgQ{KY7^b5wMk~!oGsoaw{KAUTYOE8oH?%U{-2^u+F zasnlAFYPuG+@{PWFVA=7634r;9expgE;rKmqWq>lq$7_8*1}`bO>+1x{+;)QfiPCQ zbtY&%+ttDg_TUc8?!cc*KFU@ngfvsvJKUtfTTmJrf(}aI;?5Twq^mBy>wYa*l{6k( z+x1zc9#Vw)-UZv)OLSMUt{Pp71RuLBLqSjrd51%u;L zS?sX*EZR)G;4})=&@_-@!N_v;>HBS;PPVbcS(Xi1bY+?J;`zx@vK~wk#q!Uj`$@_v zN+T1RBMk(fS8iPOxcJ& zrL54hV4}8I-?8$3nBsx5%EBUD&UOHxp>2`-_vU++a&IOu)lodfiri#otW_$*kAE*; z#T`62{F8ForM^sd8u3n9wk#ZY6Y3^%${G(D;G1}xMBxdF0E2h(6rZqo(MaFd~(T_`kud*D!*zSohzquO!JlS00qqa8k-Su2@RR=C~3+)u=$FmZ%TV=_p zS$HTPix1wv4d#oiZ@JXczQ5;2CnTH)U_t8_zy4~h(aC@O@x`T&tB1oa$Za-1>rhaA z_!pm^8Vj~hKDxBI^TEx{lRx{5>64V{N(U<6`ToJ+shs*BhwIvIf1*XD11;1%e-Y1? zCu{oqxdNbMeNkKcHcQI~`FkIi5+MtBJTZ%1-Ls6=PSKn^b3iEBrfu1+@?8sM6>Tm` zMVGp!AR>pz*3Un=RK49z*^m-vvy>&5Kl~^q%%{oG7H3no@;6IK^f2Y(_y~?a-z1aC zZwiS0;db?#--O>VrNMTSs$ma{1HGM?P94Ge=}~$odpamfGB} z-O2a#gW#yoM=j9kCr+i%Cc_`Rs0}v`Sc_|KQjVt#49?jz@5jRj+AVW;WG|)8i53U$ zy}36^85X=;7EmxB$eqa959)JjcMnp2uZ8-(Da7wie}c&=C&!XDS!!{d?B5?xDvvg~ z>yyYx@hv`^AF)(bzjl_qY!SdBNeyZ0A{LnR;@{49K0f*4)2uwRyk+hCqPTFPJR5(? zE!Gk&cn5coN!q-{-xDb!Dalzf@5xOBxpJs61ljqf_^&fukl+4jW!&)G*doDzE?gYs z>bbN}a=CJ`vbErlH9Y0`ixwMxNM0&08EQ;7a;I;&Q96x(`1s0rlI;2Gzx=l?`d-`o z=*K?|cj;2_MkmTWidkbYha5hNKT9$1xuLKCtKEu^cKWOrhxpcRJe6#+knyUzqRb=z zwJn9h)Aax&>z|e1>+rBJcDKd2qq*YKS8>~CY&2JGO1t>gZ@z9P-qX#Cp69HN(s6S} z3D1HZF1y2vi&DMT2bkMLH}A;yGgCAQwLQNv(Yy%u<-E3cPXovoB`c^0R;cSB+g; zpvofJ7^Dn`pJI!tcXJktjKcBA>(|loJzDd1E`s{^dip5zHO_)x3hh%Z3~BqWDSz!; zzpd{fYnHOUety8O@fsR&cV|dvuB<iLoJXbly+^rTRM%r+55dHYz6b%|Yg}oE;KO!# z7*{!D(!At;yC`0EC@!maW5pSZ?CD)RVJvncr7wDD`*y~}!qd0x`j>Ix6#naw=au*2 z#>!~?rur!M;50-T{%gDuos3tt>3xS&+Q+-bU(chVMLB7?hM94A=tT<+#@wH^qrrmEx7WUlZw_s~kB{L)#{5v5`{dKl zk`ek1Ml$Aij7!!IkXZ<*FVU{FT`pv}6>?UQ@v(Mfe(-Dz2wKDoFZ0!hosn=d{zUz0 zkTL|+C6jo>oMAnA*v9kDHA>J5CZ>*b8{7{@9x>j%xHEm5JcfZ6?#~Hj>1Ot^4#G*@CLuFuoz%dp_Bg-Obc_bM%g+@0}>7Z)hKImBPi=4 zh8|0@NVn*vb4iPKA->#*I^v+XXSXfgm2PP2 zK03kBXX?*AS`o$xn}Pnj2+Jc+P&vF`v@Wj*3!ACN?xuWx1FM~K3o$(Jjm~spue?{A@5Ru$FtDcmx!|R-lza*l(2Glk@td=ldc}ylNzWNKuiobwYyxwj3h)1lUSA&wN zyFoJa;4s(}SNVkZSz(I`d!p0g9rXuamvmh{ObIlQDT~0assDb_6<%JpYzifwBc4_0JEwV%Ba9#NofAE4L+DoWh zMFwcnbX)MM$9@4Cn#vQ8%e4Q(z z=2?E;uQ_aUrPz@Q{^c;12FD~HLyi4Ce!2$*D1+*8ip%2L`u@lw8XU!;KgAjLWV z-Xjs>6V@rS{kigo>d?^GiWS|%6jCfNP1ac_;R(uJR#Wiam(r8?zp}9ZG4HGJ_vyW#U%geik4m=q8)$j=KPU zTyLek;C_RTTmY69|(6)}99Yb0}NNJcI8@GF4}kT_i$@K#&sL{xCyhSf*o`K#78bbWIT~2RVpAJ1sh)9Y>EZ`D zS+o%U1$5!Xom}?t^=^^p#a15C;!soV?CrPTZgC@bJxz}1@TAPVv=2P;efrRz0TL$M ze1N`{6%IK8n(8CQIt-v(egHJ>Wq_I5CQzWqCe+bb8@ioubCa3@tct_3__NPHpZ5Ku zAN-)Odpl$|w-|v37U;&xwt0cA$K!Lq`XjgKo9)l36BB&a*Ctf^k~J?&I}$DG=+FQ1FQ(nUdGXw|K~|;oQEOM@9^q6wUJlnLt_9SQ zd%Qn#u-zK0WGmCDlSi{SzqtAE-M3~(%$MmPezfjWM|W$^B7sdGki+R+O*0*fTs$#;eEGudf3FEpY444pm1E zPXvU`IRb_E{$7AOS$mbN(lgouzuI)N9Zug~{%XlRP&C#0V&GB zl?V`;Ym#T_^F8~j<@m-u(He>BQPLCDH5~;A*2|3Bq`)6?YEkM+F4?XIO8&_o{&aIV zx`6u%{h+>cjmT>KC528myRo~nv$j53+HyKx8N9oHkIrly6M#2Ye4O!1^+->sTcF&4 z;K|*A+PCrmtn|Njn%qy0pV^sKxT9~pmA+>^+roAF!h-aG!j+3}Vs!B-t)F;OoX)NppBjYh6Ar17I>*02JgK0!RD*aKOLZv z%+G>G<&BXe3l*z0^-Hz$+WG=P_jT^V?ah~8e9}1!-);W8fAz06m(Hg1>eKWUW9SgQ z7E?ZVm|yMdY5LQAODnaws$P`4pZQ1r7^CSMEPSaOy~0@3*LO~zJFbNTo0BKN8s+Nzy%$Bv&Ko#6}`{k-v+Z;U^l zJ-$D@n|CqYIGg;SWpI?*g-g21pbcBdu{ipyHgcqe)^qKEByU&0yD}g+pq173%ulzp zsB3-Gox3+HZ##I>eK*o&bR1(4e%76*u%}(GbJ%O~R=b|;r~Y?_I`~kF9Wb3SEV?XI z9qRyB{CK@vurg?C`0^wjs1Ma%w8wRDtpyf+W$w13Z_-T$=mZNM_pZ4?q zflgr=2H=cwvoV}BU)IOGc$%CxRD?rnxDUq^L+#HGc1+;;ai{XuJHIXg{S<$G%VGzxPF!J@GOz+T%m<`T^x~??{>Q{g zdndrtNy>2>bRsOFULhxeA$6O;Rsg*(r4-t_RvN2;0Nmz5xtNn zUbyCmfVe5lmhNrD6`)Fsp>sbpio1>Pc@mx^rcGmeOA1r-n04`kW-SSA>1gU zd6vgYZt=~Ny!^!Lgk|pWQkmvk@5k+fPgCMiUc;9`kID}%(#Vn&eN)7hRWGT>$zaKh zfXR^6Fj=lt-5^jp-FGXx0rr%EN>z>;>`JOM;u$il?o~##6-K-vYtrdSxWJf+=yB4L zUGICA{wTm+`6?tnm`tszoPw=<=SAPWDx`QM3okq`{^5s5rJjKOEnWGnQ0uJ+RXN3- zT#M67X$@hM;tK28HQ(@L9wyE8eoMDJeZ;G-3FgM*0I=#**xyU9K*$(+x35KGfQ@2kAr{zRb1Kb6fJ;=-m`#bcX!87kb*FKofBHKFWs2QV>blmG*S4@pgE+ z65M?$P+{;6ovXld=)Ya;ey4pXYxP&Kl=j4%)XQoT8y*#JtElM3%Hb7eJsblw1e^M-*W!@dDEgDqpk*Q0e-OW-Oqxkff~a3$p-eZXdjEu z(z)ICZ}$$2L+8UsY0YGQf!N0GblUfu@4mUZ`IDdhae4B8$|@^A*qf{$Dg$tYp6dSr z)NkF%qO^&K@&T$$1Xwu&4{l}|VgSjX>AnCoWd;5KxL94@%(_we#wx2B2uo*!VSZ%~ zG`Q10vpTi#!0OC_gzsPaXuv%_eQj|!f#NUzVEpG|glV@G=(t%ReuKT#|?J-Hj9HMl6`yAB(3_oa^-4i z#Lvq3R+i&uvt0h`|NM6&Ge-iZE@p{JQXSy)5+CST=c>n|Dbk6%$% zc((1I4_qBr6Egt|)U7UUxn@}d06SVZelN-S%O)ADOv|DSa^n%$q<(@ zChTn=j*bDmqHkXf^M@VWdAWrbAdZSP+QMwjJnh;lKYo~<1Vp(Xa|2tXnm{<>}*n&V)umK{K z({@^u^@B~)s6XYF4#2{M&S(fg0C*lt%)<a`*Tii=htFOy(@u@2}0)WHv9N1@Hhk zAfxJJw-5xp)6N#?Y}5g8F&?jdX7#Cmxtk8*UwHU4V94ST;L~?{h_3dFzkBx_AFC&4 z4yd=}Ki{DeSBV3p@Zvaz{4Lwy=iJkJB+vtMdoN zz4-21wU=cn|2SE2-RxlGAAR_4z}1z_jlft72V@U@^fg)5_7s#wEXzOExm=F`d;mpz z_nk~vXGvLDR{DU@mjROiJBI?Z-gJQT^>*vv4Lx@^xw#eKH`}J8;!&L&$X!XcfZarckRw% z@GRU!qXkYoHS{H&3R(OXUG(Rrjs5z9lj*zv`XBr(-@CWdDQ$^w8-yJg_zci>fbom? z%FPPU`pe5-&v_91xPO})DF-YAa9M^vXwgr-0z)i%0y~c!$?7p(cd$BU`N-e;$DjUo zWQN-=?az)Z`t;j?92c-mUt@viRG|7YFj>0-0LSmV{Z;@?e$cCn(=GnFv<>ybI#_$B zA3w~h7RYQz0vSG0xwT7LOS?XkTa_Evt^_EtqTW9`+QQs7-&`JDOb+G{zW|YG&(TQ7 z0710jJ1wG;SAd@Oc{D3ZR*DW8rDrU_D+_B`GOInjpWB}+SM$?eyVnOUt3|rB(ROh) z!I=|xbM4yo0jRjh0xt6*t=~0XqOWZ8C&K{X2P}N)A?0j0#PI+?KGrRikO6$2I!}hU zS$NVy&%^k)zc%#oXP-}Bb0+ZTM)i0kD|!nMh;iUC(CuIgDd;zbn>ld#&{4|&lB~w7 zdgp0Fz-Dcki-LmzwzHT!dLz1{=MjK3@Q0swb<7fdKO_sm!^ldH9 zsYm+5!j(lh>5<)UlJh_Q(?8w((|`8QH^2S$Z(5Y&lH}>ei4pvN$N15cbo()`=T4k$ zL8tLmJbloD-?5(m#gG1=g|&P{S66#qTFh-e6?pn0U3JZliPB(HQ7f4CZ-Ik_s&Uob z+-w=kjXV@$tB!}S3z)Z1T)B(HUOXFANZhBtNd(MNy0c_*3u z)vx|~+>BVDHXhR+iTx|6cnu@5T!>!Z%-QH8yTb6mVZrn;3u$@~t#mh?4?MP@GW~7_ zE5_@rvAH+`sLoKjKiaf8!?>}FXXN(Qt!ue-d80JTD}Gs+P)B@V>!bazj@H~^?FM*G zjvFi3h3BH5@Y@^#O%69cUV9|F+CJAI#yj$T&>^(?+&$^bwNO%AjeEOL@}vVQ0si{> zySMX+&C0yIxKeVifde?5Jz~Mb4v9WSb9{(jw{Bjq4H$3L(xb;X&`yt|=@dI8)H6Hj zqs?*+iZD&|-VX@31IQ;ZWP<=s#0fV|D}sj!B2KbZLQQUj zcTyD#8&uU28Wc`Y%oC=H8Y0SnS%-Vu{m|9jmfl62n7h2@u0J*A(vtopJonOELMyHL zP(cUS=t;~4=G6Fym_lI0+{Y8&CsZ8^x`u|=ls8d{5xpB=DlIh1Q2zean2NFT@Rtx7 zBGlmKDlVa>Knz}18om@~%ip<|oMQBjpP$K3@>@lggw4J8!sENf$|7~q-{1LEzEeNK zlt2QivCJinb+^ErzMF(i$ifVd#4@26{z$X$Q#RVPYJEgXX5t#3kNKIz7KpV{9FlzRKNhYFxRP%C z!n0Vn#9dwX(EqDYVsvf)EPi-a!*RUw3@0c5{tRFHooC@B6#oavD#jGVN0-R`6FA!D zZa#U|t~@MyC*Q(OeR?e)5zyZAq^lCPi7a`Q_wTjOk&j8SU?LS-{`+<6hav zI$gbsCy|NVpGi>YUd>~7SQDe>(*&$s+MF=ah$~)C*(EjsOIcgo){o@zaLI=VzWbF) z5vG2&b#v0XZ9tO00&*}Vp1huJ>n~nSg`;`Ci6cla zrrk%&)OqieJ=*YZ+SX*C%tdVyyk-R+w&hd_Q=sDYPOUTyEq)(LW7mw3Gug{`__(4? zfw$!D_(X3NvjE9_cjR_-qHt5`bSNDxjD&sYY7jgbdMD$4$?UYht$r5LziAf%bW;j4 zCW^cyC(QgQkkccDSBa~uT<)hb=>D!7N5y4@$&}NCpzE~SO6sDpWI}?!Ez?l z#DJy4?b}&S#z$6q07=%*r}KGDpG;B9?|i=FOq5t|8vL$<)B+%z@BkwRY>F^4#ClD? z`DWYtfkN^SMn$fCO$?ftm5`c4gpg?E zVAaYx`nxP*fK?_PYjI*7bm_oFt}YC|Z@2aH#_f9ny#WZ-$LT=(ld}+E{JOX z2HhrS>I%^J%3{vZ0Ji*iYR4x7R0li=m;?TssLVt-nFX4#Rs{BOm%&m}-9IbuQ4#UX z0t7%zYXxcm8rA|&6TbLN?)aNG34ZI%H>W+LdGumZ1@tBJQ%4bxKNck{MjR>r_tozJ zlz`d>`doiJOV(KGj^&$rPlkbpEEVS<#>P-o7pr^qiXY8lHb45|d$St=FZ^f1qRrTT z%HO|mCMWpI2mkC;sLUqa-&@eKu+W5!AM|(f17BgM9rL9LBs57fDSr&m=-Jo?xJ%a5 z5596^a5HPWM*+;|vN|-8I?`cm`&+0XM`T0$pc~YydNc_h>$5(h-HwL^j%QJnd@LPR zJqKLjg$pS1GGJ2M;&+=)kSMEOb-c64nlli}6MYBCZD8Dk>Q}qcTa0D49Qy8(J^=r5 zJOZk!e|nt%RWv@$ZGv)|)Ug*RM1`1g0bJ<}y2bzc0d-2B7G+zmc1E=L z_kM1QE_R^gr(b-r^yKq)gPia1wm@kvTex*PaRMFt(^>bMM0V}~0At_UIbb&f9f=nX zPaJ(#T`3Eo<6)C-yj4Few(x}<^O=1;z4YDHE1S13oEsf}DZkeDYCnJg3w<~5J=y&B z^RFX+EkMj}hRXlEya1$pAg`_7_2iX(fBD$PKEOmeB$- z^IuLsL0-wUcBoGIXa>Tu2nPQAPyfsRw)xE$mur*y^W5${0^$ybv3(ZX_)2#<6#BNj)euB0XOMK zGA7q}ZJ8(R3^!w!gXy5+yr$5>JAOHUU9!vcbEnvL;);m+S>$d>pfwa{Xedqu* zu0-f9yCJw>cvYW3?-H8Bfd>4n9tg11U&>P)eZk4G9DNwLT)k8Vaz$p*N54K#-@I}D zjRCgF$>0C_SDMeXKVcY1;k!cv0W_~dA@xCYqdwR|nR4R0MO*G?=)LLd8!rKe0XlDV zAhUkT;@j8XTpu?Pz$>!CclhUDelZ}MHfh0%{va0?s>Gi<2>@{|kT^LmFX1ZkEI8b` zeIw8~O3Gg!07$o^gf8Jj91uk}bMpa&S01h`%s#d8aZwRJ>~7Ep0CvzouQ3kLTt}xx zvqP?*I8e86beKM1^nN?49{2tq|Ll)u!D(;fv^&)&`2vLUneIG>aorN_behepd-pfq zjb?fyp_=cvT9B9>EWKbLKpyBl^f^@708@MA*6QB%hxPy8^$uCQ zajiU(;l>j$jh|AP^3xB}KV0f?53+Xh(BJv14x~L^yL1p9`5KT(s-I(l?3cg%d~^0^e=_M|UzaXEUU&y2 z9&5oyzGnhq$u)zB=|j^q#&9pQe&rYcSHJmW^Jjnk^U}?|NkrX$kb5EhV7xL`R4-(C zZ)3dyEYl6xtUu=$eB5|ADCZ=q4UaR~EI|L}ae`T6Fzzxl=H){QHh z`*+f%^_eFE2en%Zjkj{eaU|V!HT}F?L6M*G>%Zw2=_qxXENef{8qZyN_q{casBbdH zLHmXC7dNkd@X_Y40(o7=cJi1V)up5j%^WP+)hV5r)FmfJYU3?*#Uo`LpVNIu!|HQO zr#vd?jK?ein5hlOl) zXxw(7-B|ik*8Haep%N zv5LNWkmbFFz$YE3Ij&8r!&|qm#wQ1eS{MPWb2E~Kb>mm_CqVf9+7W#Qd?#xbgueae zt0_A@YvECwVwwK@dF~J^r?f0$o($}_Q;=TKjxD@OYxQ^WIJ%6<)ETRHe4e(DPP*U1 z=BXA6&ZXb4H^;L3!?(5L$HIb&O$_3nI^uHDm|ot-9d}v~A~VuAcIGCKZp8n$ausF4 z?^^!wZ?&^PTc=-+r7MoNpkNGf>-yE%HT2%w?~Y7-_UUKivhP$o&G7u`ljf7@i`x%w z4jmWYey93x2O#B%paHaE<{ZG+ib5CE8JkSH-CuE9Q>prJ;OE(XUhlL(}VuqWI@qVRmFqxY8=Z`0d8O ziPs;>NlM$gl%4_YTs!_$UlVtmPB|6`ZHoqvvw$LwxliU97+~+LdaphNPH-P4jr>bzmprJg~uU|p?H+4S@^MKsa)t+62 zo4nMuJn?ugb%MI`8ru5k)qLyyN@|4>Zl0yNLX>K^tImf!dCF_r#x7LBd*@nwFYXXC zuSzLOMaI_G;n36NPmNI+^|^@mK>^T%0{?j|kx2)KclxXM({2hgX$eCh7OJn$pPl6S8NE|s5jn3`O z=t19vm)C?|e3<**yuHwqrY&l_``_FVQA^)5Hs|R%*wZy zLuB+$f~Z~TkIwZ-47#Sy3Ong~-pcUsF?s+3UtPPsxeZuuvH@rXc-YEsV#W;xYaxqP z24&LQ*CNP(XHA^Bk6=LuSU((a#-}i=u(9ZA&~`G5A(sPWfDSCAl~Z^YZopJPlS#L& zklH11(%|~(b5>>fU2f~W;e1YiSdWH{a?o#H=BQ zw3BJQv-P*j#_3pqm8g)T7E~H999m%(Ib<|_(nF# z7<#LMaS?;B! z=9gJhvh)C!AIfEoNe7o1CctOnN5P|We}G5sEGoal-|jWBFJR&L$qt-r0Q|f`F{^Ao z#|^w$c^X#1 zTQCVWIXfK>4E9Y@)Z>#5P!rGM(a-+q53;7bJ{Fj_%V%G*Vi5(%1MqrUotvn!iUBmS z;s&IeD4x#B)P&nkjOTZg!`W?cClJeIY?Z~N-^7DumHIa!2h`IFYa-u#wDOoB=P`UN zaP$GkazP|N`h+Z9y`H;+z;WtZD^}M%tRN2dP^V4YA=Uu`??$D&I0vZLvZy=Qk z`Aod~K8I0l-z~Q%Sr8VEu?AnYfF1xgT|qwad$wugNqLx*Jq#S%Utwml7cCYKcAzHU zZVs$FSbOR{eW+YOa1&0Tn*|Pl8B2Hh^(k7HRk?I_K&c6|u;Odm%0wTT1iDPj=`k9} z#Fad;ibVq+PMIg%ivX|-H7#4@<=Nt7dRaRAk7q5|MEiQ%{K?p6Z*q$HUH8%}+7N)2 zewc-f@&a}r=&(G1fy>zl=kjrD7s~(pH-DcY!DjO>|Eqtp`SAyDCm*@xh!(qg=In*% zd(&5!N6$xYbUe;FJ3si9gKTLlbO~Sw2(69UwZmGB)u=YAu1$F9Idw}fvfwr$cj%bD zL3-aNr)RT(y!W)l#ON2Eym6`U>ha6X|NOuH>$X;3kAD#oZ?$0~8DXoo;ito*#|=<2 z<1B`czsNea9T7M4QGK>N&$2X+zHeG&L^oF?7OY&@X!jBDo~)ef>;?4NrNrG8|HA@l zKl}DLq^_Mf0cDQ@N~H-*QKz?C*l>94xeoF@AD=H@zML+sT?HO~+1UkWst*TThU6_- zj(A|i^{mSQML_l^g-JOLFm)$T%cd1 zNlppJ$*d~%NhFWlIp^bx%U@5wbvCf`MvGd&5|()UV>`#d;t`*@c2m%gb_vP!0-y9P zmI@C@(oXPRSm3&a19g5YmjxCbEhL>g+X1`ftBhQeAYZ=%bl1o7vm30rc^>b`g#IqJ z=ik1=>MD=L7G)tfWcge=@^1Y(y*#c~zWHk6Dd(LIUevzqHd*pkn=3w79Kzb+q#fSs zyaizXENH~j@9VGd#v#=X-!|_)nm(U}F2vwnGLck{4n7xH^j!21=Sa0WaXv)W3$iDhLyT#}({dLJzrpntl$#bRiGLREE#TU8&j|0z)#q@I8K3#CI zzQ>@BkNXSfT12bvS*)uIfbXqDt|b{ur@Wi>I5#mb^pU{PzJck<)2rxru^xYrHT|g$cz@CX zy*-YOHI8@~D5!5eQ?GO*3A1ML_Mbd7$1 z9>8DY)8o;~jmMt7e1j*;!d6D0u(m*^4MJXU(N*8g-Oa7^&rkm7563mcvlemQ{qVyX zFWsx{39Vg!m7AFlqT}4TH)df^zq}T@YaQ)2nnM$->oUkKMhpJQj;xF;jTLHMqqAAy z(wh&etEU~@{iKD&n^zxgKK|sF$yWVfG^&$zHI8qA1Gqfa;!(3VInmD4DPsx!KK(p> zZF)sqz;7&_{(JiI7O)>Z%&I;fSkxf@cqX&sfvs;|dMg8%cDlvi1hfBF?PzoQfL`?; z^ZLIZ`oiI!Gv25c)F#>7+dF-+w1uI6Fkx?&?#DZKf?ff zz=Du5z@z-ZznkHKG4khMe%3;8pgP@;FlhK^kB6Y zX9-BpA{yO0j{Nhn# zq#(mhAq3j}mX0q=u5}=Vo$ta-;_G*A#qD4B3zQP3SCKN7g~If2o8^34e|!*Vr9I$X z8T!xv6QS=rvNCENq+^=AN1HPIk#WO1U&Bbo}gtlnphr`R~R;u$2kUV}OFO`;J%E2CqWwWUkUslY7_ z-7opXg~g+xPn|21G$tJhNz4D@`q#sx<%8hD@9u>tjA#?QSol)-N)y1>?Sx&tm&}A! zo|J^g6j0*g(PIl!UHqCQk z)a{O_CYJ*)Dxbt$k+5=c)4!SIB^N1H<^faHvYP#Z@>u1bh$2rC=q`MZGjSRIcQ2ob zQPRp=PD9F|uwq6#V!eDFwop5IC9m9wSc&(%@)3K8^WLpe?9x)_uP^ugTfw%!(!iUk z!J_GeE3-Jeyz6l5^?N7cE_}ap^)9+5&&gLh{a4)K*Q6m}vGHHDWMP%Fd%PkWWjMgF z5n#NY_;nzn?`WC(Iv`YF(K-NBKJxRt;*D%ig3|BX1nyo$omn?aMtZSp6I1@um5x8k zsbiCAi(8^Q5s zI{v<`#oq?F^!5;nww0H7g_ysRSPW90z7*Dfix1-TUAYC*;YwpAgA`juvvG*B}% zi_`N~X4bbt?PN%VZQ3jHL|x^=pq1_Vt&L05AaU~P9<5Vud|w>g#_3sn?|o8Ve&-*V z=tjQMR~Jwd-{`ri2%qNTB3V?A!U$k-p~21=V=@M`Hqikd zJKXR6_ut!GbvR*lWl)DTwqzb}dn4d-d{8&g#DCvehqH3yX2w9&gc3NiY2a&XtpVnf zCQ+<1OhCEGaHv{PT43}2e3jk`q;()sfHz=C`AZ*&#!~iC{A4w35Nh&ei@W=&DSVD^ z2B{0!YBIgG$apXl!|ykL@S`7$HHPiU+#sOYmVDdLFMm@xqXduXGZSo+oYls%0;`_B zxpF;gc2+CPjm7|)7B;Tm+5GUs4+emE9Pq3zO|HND?pg!+EWPqod@;)nzSaRpR|1K( zDciKsVDNkUQP%L~Z?a8REo9JtEO=ja0M^m?$Ex@49agW)cRfoc6E7B>tS13y(&IN# zUj_Vo7C_BC3BTJi1#ZqHrrZpaO&0ms-k%PggWbySvoAhtvi)j+AF_D2$pediE)PtQ zSq>SOu^v*t(!nc}m5MN6GmxL_2v#X9&5oZqw)x2q^1J%ZrF3G}lmTGDilwovtW7e2 zsDLCBe3QHJ?-+on9g`m*hY2fRt*miD&fq5)8mE%Pgot96f5&ZfMOsE zP>+6m7VXB>2M))VcyAI!rnt%BG6M~>cvxH$#Cw5M08s5k+9qkNp`O>qSa%<4a*r1= zeJtjy$ig+bO#aEri6)_R)qs5_TeWvQv?E4cn>-#UO|WNx5Zl&|+ z9rZ?^-ETq)7-fCQdJ3KDoDLEQusR1J#+S&gOtaO zCFR%wICkLJv2-op=OvInvv|TXuZ7j>^;+&r05AuRaTUNK`bB#2MlP|=Zhm|7^P;@m zeEs!hIj6(2BF*Ca?D@_esO|5otaL9xN0|?`>td&?xdh?wnPnjwS#j!jPG?ci$G*0# zF<;60Zds9c5N-SVGbFw}BjJAjO?+rijPhU?q^p#xmTy0^*85Un>y?E&0u~|qV7tiZ+9|juHnaAqK z=@kcdYn~7H)D9{yYfXB}f}plW&yZF8F%IF5>13cM9mJ>d=%VW7eB~g++O9rZ7PN(b7^^+DIj|ZkP2B1l#Cv63LS^8+;H+*mWq&=jQ1GLi4okQaU^~u$Y`X3#W zERZ_@o(N@K-K$4eX?D*o!K>&p9btG9?#2Zv#0XK{pvnWRnt7h?hPqb=?2z~~dlv(8Vhf%>aHEHXc@o!xI? zGvkjIX0CkmRo2Nj8XL5*)gl7RXl;9cX87Fhyqdnq!oJ1ej7_2k4=tE!gS#CU#?bm+ zuIaQni{|Rcf)4i>ES8_YOgQbMe4iT=uH&!3a zVA8BD6nFZ}==(mNsLQIYw$W~i4z(ovNyL;Ja6Wa=x1)z@OWJMvNxS7{Z{#bvbNJ@r zw$oTeDkB|cv6GwZ`|W0)op9yzpgF~N*S^aIPUFiK2Z8hEHQK}J_@Ume{Nm@c z(4?QVz(McOC3y~A)87HlFhb5|z%ee4SH1ntKUKZ%{oP#yXUuCvyAemoRO`&ddp&6` zVfNSHWPq>o6QiF=TJQk?OJjhP;%VFyf0FN6@-;~0`7__7KabS805vjSBsc+kHM9vQ zuE^{1Mk>OgdFUBJe5qCKf~K72VZOPQvs}cRq0^)*mJ%C5zyqgodb2)k%QYe9u6y?4pOtA2-P zeVfM>bDjXIV}T+3eDnu>MU&xVDlci^iEyRUKV_UUtiUTT(Osg7CBTHQ?y=e@iS%dc zM~di^&!mn&6JXx2Fl0rXWMCV1B~gMJm3LzG>rUr2SI^tbdhA+k<#@fIv8yDRPTTV9|NhW^MUkXOmpYkzdplt!0A86`3hCcaGrvR9 zFU;gsq@iQ|6>~0iR$64>b+BaRce-N* z^KmSL;{hOSC&w#|$ts#^Z@$;@=txj3H*zt#y_Q>@Ti?#G4GTKX~s8|0S8PF^Nr+$^_wC$*uW&S}oH5KqL?up&R-{ZXGohs5x0g_)*WoXN6x zQ?6|tiZ&mn4Wg+u#^h$p&3Uzr)BTjKax5Uar)c+5FcL6>sQ5JPek!3d^l|#W36fkQ zSshQFUJA2Pn8ZYwYn5|~w0$!<&FjJpgMH{8pA~tCS@q!c(x*MYZV$cme{rYo=3cn< zUi7*89XjVV^hdiXgPslA7Ef0`D+>NDUaWit#it58a$Y=fch@%mX=BPhiS$PqCR}k5 zP@2lm^7ugmz+YXy)@0z`01YfMfqO^y9obyT3Im3gSg2}%O#oM*84KgX4O*l- zAWHEKVr=_0Wi&|q{`vQT?Ex$e2n{w3l-|q_t-%yO+Gq0Jiok~Kh?)u4!@y(*2eD36 z|IZ#b7%UHfvVoo!IMxm(Xid(p=U(JuZUeX|I2K@Vx5>@T26i*iXmEb43Hhjm@-gsb z+5AU8`DiSht~Z&GFTbAva(S{IRL+wPyw=2Mv$@kG1b9rg$LDpED!exNxN+zH=4;?q z`Irn|&tEN2)Swvq4UGA(RR#dC0V=mJr?POwQx<{v%&ON`cYp~n!NiSEO8x|Y_~D1S zqq#8P{q>vqLr-Q5*5B;#JG?hO!&8$HRX^1~zgt%_@ycXXxzPW&fA@=_(WL0o#kUH- ze221xQidCWum-QJ^DYI1d=RLnJglFA%nur@n-sCyzVzlB0}!#^8L%K)<}ftzSqL)+DejpXdM}qq3=YgYB11 zD$X@v{@~qrW{~{LU;S!8!qW}HS&{P53?SLtgng`rlL76DoSLu!P1NDR7Yh_&*+*7^ zpASq7E0f6x(1%O`GSx3|@_2LtSjhSKgb$c~9AHD5`4{ESd=`A-hXtKEsQ;loy=&LuaXn8c_IIp zERM(^zG)W^0&W4AtTy$%CQ|qafT15Aw+N+w;!0%oQ`Kp*$&bFpIKb$*E<H)`mVzUYl1e~^N z?y^97V5Is|cR+LUF)kve{~&+0oc`&9$s@4K!ok~tx!+m%N{!iFRiNqtAd(er`e#q& zJ$3ft=8ZSs-aPy2vjOpq4*=U1M#qA-G68eXq_;@i@fHl7#c?*-5bkk(;{6xvkUZA2 zbl>^&XE#@}Y~~(_)jqmsu22G=x&Gqwc3eXQu2q1|r=NemdH;hC%RL>fO~e~Kw=i@l zFzHdi8r`&Wn_#gC;C3e080P~N?S5eScOrgs7w~O|n>uLtMe_UJyKj%{E8r>XO#05E zkush+doEyj?H*7EFKVj}DxC$u+U_Czih=j;0``=};hZNnKX|{z+I*7ZyME!ui)EFL ze|(kes|El}HUZ{jUORKB=^Gttr#;a*`et%K4h@RvTl0mT>$1JkIDK$H9Uys+b+2TB zY*^?X7XkaT<_xSh&PWkW`{h!D#qi`?8zo!nb8id73(Fug#vN-{j^#k0kpr3^w!@CK z`KzZ}9aA_uWBfD+tK)&)L5~AJ)z|U*Ij){qY_d>S?#J~}e7%#gqw$N*w)^iu`bm9r zoxvj0E_e&yuO6hw;t`nuV3O&HR9O9_T`s`Gv9M0gfa}w)^|$?JEPpWzVST5A^SCpy zxcEHF@Nd4pJhaLGR6y^8cAe3Wz;PDS7cRX$@@72wsSGLi$-92a_~Go?ter2m zptq-mklO9bm&dYvZs%h0jwRpT)x}l@dN5Z)jR7St4GRs%vc|#EBO@=XQzWp?rd#p# zVT4TR)A|gQT2ynL>{jC6Mh9@^}d^bMl^^@X{VN&_`Q2lc+b##Dyy7#%=qg;IfodZFY{eFx3%6$6F`O?|ep*dLj0cZi=r`pBi(9vgouLrN5hrDSw7Cy*7{f9C8 zDvz@!7;s!@x8*lqe?4+x4ENa=U(T5KjdnG1h4Srp?a-ROA_JN7BRj@Fd!v`NxB7k< z*vv0C7gB2LWV)B}fkh{MpLqzqga*3Q*pMFJ#>NF8m$r1*+@f+<4&X9h=+pkU^7$;| zCv`VQ2_t)SLZzy}dwL?nVChx~${M}} zW&AdFbJ20e)%z;bom{*bqpGtT*T0*cQzx4PT)EcRDxT|`4mZ|te#wdI*ZkBN=;Kd* zlhM(G`Vixl7DZZgq+=}{-fo;;i&qZaL>Wf-x_aYqcxQn2MEG{Tl;ugt{oTnUkt#R7mEF*|y8R=xtO zbN?;Z2E7qqGXCHLWt1S!%M?q}DZ{>N)Ww7xPpdCn;U_*Kw%6#gK4~ZgpA01ZyRuIz zE8_yoQvI*M5?N`@-;^w6LIIUiB%wo|-OEEDigp{zd+`T2ofx~kbePiE<}%@WkXLmh z?fIzKldg0WQ=a{c&Xu-c+k5;VbApP5P2JC9f2XZTOTt6rlE9TgQAYk(o0vRGW4-(R zyu=Z5;ec+ncN*!!>Q74$kkczNfB~ zcOnXfSLoYa%F`PyCp-WEKmbWZK~!_{?`{`tm9a2n8K1hHdKunqgM~ORGK;G?6Q;BZ zv(nn?pKa{f$uP~K!+N9LvvAq^( zTzZ8mnU#0(Cxk!JCewXK-dsaNLGe-^6s!0?_Pb=NTkMI>sb4`RKw+fcx8C^E_m#h7 zC!O>JSsZ$|>lJS594%zjDjfd$P57R#ec zd`070L@3`ZTlUW3m}@e{+U{C@!SCIB7}$Ah^Zq-R8vNlB%jM&nzyIy$S<~Jfzvz7S za`$ng!5v@fXfVJB%(ILHRF3c0CT1_{t3UbttHL#St(+_-xwvp}i1K;;KY?CBs)`jGsXWZ$@zCEiTx zs-tL`z981$sxD1rpJW+-v_rC3ewqZ_Zjs|glS>xD6(uZ*atx3wK_D>8#h5stjO@;%6vVrhd+2t(`3s8&B8>iEvyBjEchCr z--;G;e*AcTd;50fdTs?e1ohIz6Q$L}CeZNv!2FTZKo*LO`~$DlJ=X%-5&Z_81I9Q= z7Jp7=o%l2>;ifQw`T(+AWXQV-hqOz$$yMJ26b|Pe1UNFFO-TW;)bs5o8n&;Ru(GHI z_L^`nezph@Kk(J$642^Q5&$yyO02_Kf{oj}c#IG0klw(N@&LwJING}>S`tXsJSS=& zwY2o~gcyF?6=5s5v9xXM7YfFjueitqbs8 z?yLYI4mRCcZv)m@Q93Bsg3DMVrtj>iK-Y=*YN0{;+)G>tK)I5IxdVgk*Z`8-PX0I< zyI$YIQeMB}fJTcwXZ9Z2oNgD(?K}KgcP2)Y_?g((2JwT=I++Exwg8N^kUQ9^~Pp0+yxz-qCEZU#yKw+|nBd-^8shmIUPn$G3ks(SDFVtSA(j=%g*f88R(*TWKv z$EUKOr0cHT$aPG55(vYOw7DU01K`D%`1c8}{?P&gU2s1uRtKf3U;W_hrYIhukN&if z29QP9t$c`EAm$T&4h)TM3&oEsGxq@6(20ZTiZ1lBB0U}-Scazp##gOIC+*pqALvik z=wkgbu#{UEi#FO5EBHPH1aV1ra@?8dQ_r`^@~D1_pK$<@cKomfqmh^n{);S1?@pPJW_&{a#HFV_(<}Il?<#j4I7zR2 zHtxx=LAd0&Y3b$%9ejPN#q0Opx-<*mR~mEar|FLZO~0k@`bT ztK*#<=~uX>(xw21>W`(e@f|l~_X0|1yq8XY=Tc_^u|f_!B)KNvWFsD%XYSdUxKn6p-DGlPMQY)!(WFMQwaZXiG82*Axn*xmXZQ0{;;RdN!Ixrcj)fwuNk65OL{}OkF^Lfc}0V=t;NM;sk~TilVz6O@&zE*i%-7g zKJ868j5D}v*Ckxs*REelC|FoGrlZeGSGyzUbn?hrI&J)x>l|W}_PfAyeZT&}LZP!WoI}HP%bI5~ zFqwtm#)vZyu2kmx`ji+jMkewiU!EC0SPWvk^P=+st525V$EqiXlDf$Ao;}xb@ce9=sVvutFD8TD{!~_zJzkO=^=!cU9m{;1=_Jb)hIFwNQt76PfFG z-j`pQtVXsn+fh1n7Iw1GX-I2nsUd1Wh)Jn$b?m+YjTblMyHZ-u66$&KR<82$yh=B; zukqDVsIoz_lFn8BE6R$C-nA+xKj{`v05K+$axMNVXwN*a{DqmuI2o*XuW6B{%!C5LW$T){6K5VLp^BgrTr!5f>2L&&oT%xWvUj5l^F}=7*j>syAy@he z)m5ziu6);v#k1Yp3Fe^!Qf3C66MAK~;`hSWNoy8AiV$7fXp_D;Ks?pt!ngjPSnIia zb+rAjp8}DIm5ylc@zRgC!b(9#1Ng4Wn}FjsW4p-`KGO|T_7cgd8ISpX7#sRtn{j|-mmr@J#*n7+B~epMh>ER zS5nMF*U%%J21f8g3YJxOGU@#=N;)3Jk*6@qfnNW*&|3Ui;3;I&&Q*p!EviKHu3u^P zJN37{-_f$ZRo3-!=vul2XN%{5yg=`kp7mOJrMvPSl7_zS0!Y*uH%G7Y5dcX=8CDf{ zkc1qNRUB%TTkmYTptdC;XM<&|qS)3Ou~- z7DYQAuhjZ0?;!(y-OEB5cVXvy0i;>MS6F$j?ADExYx_P<{YbsCPDN~G3MsPlNq&Cu z$O~<*cSHblcR>BTn}EId;M))&f&LADx|LRcv{`W{iwU9}zVui9PvPX{%{K256!BfX zEB@4Wl-TxKb%s_i`Y`BAa)0Z;bi=bYbX#ljbpQIqMhcImsxr!A25IpFB+tTOIK_sgsx_6KzT z@I8l6X4zC(O={$?@W2tXuPnrNdY!KS^-v**HFl^vz5DJK6&A@WpMAoE2ITX!A zm&MeFS$~_{b6xYZKh7to!E8X(r=NYb`Okm%cboU#=`hA4cLVlb2E@J@2y?0Tzsv>0 zjb+haMRDoCs_!?y{dfkl^aUV{l@duC(AVI<|5$@qG;ok3-_v{#10nzptPRCA8D>Fe zfXkf>zvrypSzO zAyh{nzW>3L#RM9UY+RHy`(C z)K;Yln7NRp;)5*c04x<{E)$@``#bnE+2a01J2vrp`Lh12Z--1sx>#BR%C7IfF>YYa zo_%|G|5<>iiNK*Vz;LchYCi!s=z01qYxiV|^*K84Hc{0EZv~RGD4X^Y5Wrf<_=Gi^ zI-!ez;70?*0rN8{j)x|1-N+Lks?hPgFTCp#c7trF;&4oPkfH0iZKHj_(zYYiR1Pp-r@S!F@Y zatm$Bh)(+JWfrRB2spezeIUSEtYO_};%Mvj94JfoHMs_e>BFTlz;^%}t*>u8#W(jX zO7inhS2)P)PK$WCifY14e{k<`w@LSmEs|fnJR2bRQ6LYXj(esz(~0lCeKCu+02%UG zdk4InO5c)sZRA3`ZGQ9d=ap?8h{yjhkZx~gvzOlfaP!ap;XmH|ec$bA1G3$?#kj>T z1@4a8T~S?FkT{!V=vx=x86exRji( zeph{QYaySR_$R>h9Uw}bYBT71QvavU&c-YK61lv=I(JI585dd>E6XU~>_-Qn*Zu}X zkJmu$bLpu&=@RE@uzI^u-*l%wKwotKVZMR`Nc2fp(o_1zBS#K&z~!~kBNjUWtw+;$ zPXc%?u9@fqffnf1qE0fWeXDy`sDQlV2RiJt@~9J*#H=j!fn%kYJ{IQV`1b3MKMf4O zwfXP|AC~{M0X+E=cSxuj;~P6Rj~-Wk?U(-NZ~S(AIN!nwegX52^}cGJws8B__0a|N z0dPY*eLG*>78F>K>QjJb$~HjH^yz@1{P|X=+&1C0woaE!-&33B+JUP!ebpCVwE!J3 zd#(kXmuDb3dfK=P zfQ3);Mu)b*B?5ra;?U@n+UG1xm5w}&#Vnl8!LBTMr|qmo0J3S%w3vpJX9Qe{+W>Pd>0Mx^eGehfmX-;hzNtZSHJ~ zXbvS!aZddKy*y&qsIu*}UqF1+!hDVz8fYY3P{Z(u4q?M$)At-m#w&<5?yLHCTS z$qxNE<1g}DlO@=+NDtGml{PR?-;aLhyIgDxp-*}k5ba{H;GCG7Eh0HnfcqLiVWXw` zxeiONzLnGXLffIgj-~sQdd_AzWFg~82c>_V(Z{XQYXFEqcfXg3vfx)T2Qa6bEx3?3 z3n%Df3H`Xfp5JRA_nj8>_*J)9VKGX-W$JPJ_VxJW@Wx73y7VU5OFPvz>GGv(%}>>h zaiRk$-)aZmxLyj_wCJYYs2jX~*+R53&2zFVBdnvv^`{oljp*Kz~GdYoGu zps{lIY4X($0k`MgI32I-w1k`Kb1k$R2dy2jy`J4K>RRMO2OH}&PBLCXg9TKV`m*~; znUw8$I~ak>-km;oVambSz!=qTAg(I;{htHc0p8JKEG6!YeYMllv>;?LSQ|wXpxM|! z=H%x{{g>T|E_xO|v#)lvDgfA?-Y=jwKe%Cs*U~X|bCG*vVy%9x`HfjS_}2oEuz|0z>vt?#T-X{a2;igHF`RJDVi+6Ti|f62iGjDxw%@gvr!tA zWafXy(EicS`pc_VzRnW-VP`~KoIYg^1g9I5k4CH`AM)Q|(eM?)n* zhcKSe>YcF4KW`%ud(pGcp$p(!IenX0BK2?O;a!(_TOLgrr0xR(S3fdgfPxdFw9z08e@t#1pf_N2?t08g(ZEn}5C)Z}RI=o+gbWV@oMVwr~?H5MZlQ zSIqTt@)kk@i|_82Yo1=0+$DLrPKvd5AgxOK0ij zdC|+^dEZ7vg?FiwzWBN<+THhN>Pg@pqG|OHeM|oFd-Xeg9|a(t{!QS%iNajoV6rBS z??bPJGKouX(hyEGk)xo1tB!g#?Q?<(fw~DZ;l#ouJovpde~$?h<29}LHW~LbvQcW= zy!JGy`t({ZNjfyJqy0J(E_LWRyN4JpH((XA%ij2 zaC-y0fu&EIr;?Pi`<~TXKVDLVPS*ZUuBX z2+xFcPc#^OnM{~avjAgpZSu$xiA5)RZs)s{zv&O&zLbCN)0ZfQyVdOsIh_*<&HIKi4@1 z-{0mAW_RzRYq2q&*R4rKK!7GRelg#C;+hB2$X*#%O{H+Uw!l2Sr8!m zCp#?ZPT={o>YfW2meyRJ5b3i4J}lS(=fFc+!N3-XaOOnThSeYc%6KwXO_j%XOB255 zg(rjKgSsxQVxp{UoE43zez(0Aj==@`BL7wswv!?@%fNDUg0pz2V+|AbKCJ+YVfDquu zz7Cir%SQtG0Z(T)`F(Bz%8HCEi<9`w zZ6hwghdct9SGe8VbaREp+;8yB%;r+meNvkmG0HCFwP4M?7-zIY=wPciKA**Zvyj=^+Wk-BV zHtq&iEns~C<9o97m#1`qzg$!Sq+cDxzksT}@i%$rm-_sf4jilSZv@`4j=ul!=>UV@ z1!DfuAN;twX+jFni#PNv7g8pH>Pwz<$JyE(890%Z=GWg|8Ia(eci$Z_3PAQ|?hRNI zK5D^FALHP{;{$T;*}R#LT>TNtE@A1GPd@v6KoW~)4+3LYIS&uh59*h1T5{&yt-#ao zt|w>RpUv;RIvSuW+FsnXc(E28$R>c|OcuA7vMB!at1qTa?CKJ2+RLd!EwUyP;?ddr z>r39g*g=4m>v_9~29ye@v;bhIi+1yEI>D}*pZ?(8bn@BFufF&ynQOtO#7&-$raM$H zP;z85&{Ufyw*#oQ_&Oj??H539yvv0OeP&@c0ZqORZQe=m+|DoR58u61${KBMvr6lc z?eLYC_3zo--mt_6C<4I_*Utb+PShUm-ut@rval~aYvqGk-;%FyTXeJ=Wc=ACn---U zmWE!76erS`wUGIKH2v}*J*cjLab(`kfrse>^XmPTlP=$1J9${Ud;0XjxGA|A-TE{; zgytaP>wzrE6l?1zf%x>}Eb`C=2e}#u)JjIpOXxUKsc^d_ITT!ZMNM(4+WkpORWhIraZ1xb|Xt`_9B zeF!~=*V+r+ZedKF6l@j(0bl0h`_fOx0?f5(W1vGVFqr3G{^Ii%`meRfd~Wljk3O1( zhA%(+VzWP8@~D3Y1J~$hd5BlC)9=s1V|^j_P6H4&RlN&TR4w7PWx zk;#&Uu%|DQ^NjdXo6%|)R69Cq7XY*W>)-yH&eE!wfGtY~aQ-{v};99I|OO^r!i+`p?v4;7deO5MaDY4;*N$uZE4)rjC0e+tu~}#Mnuc3L(ki&zWePg^8Yj)O9;a_NYfizE}P{kKRJ-7n(V%j(dGgl_lFIUnj+ ze`##|qPadfHYeJ*myra3zQxscJKSnAh?|kPQf6O9QrQ5%);}IQ_Ho$Yl3=oH`1lYd?%ptPe=JImg&70ky z`CbE}StlapBVaGKzyXC}qB{xnW8UpD(Ar@J1z4BB!U}}G{?pm-fQDK55VCNSP8si% z-wwno;fW=T!HaabQ_Y90&Lds$8zlgfJ75na9`e?#DkPQF~TD6@-)#@@eD5B>gL zZHhU{U;r)YjSLn(Azcd&vd61RDV=%h2F*)2rg0h!53lF_kWk>lp&x5~6|LX?bZr&L zEUotr-=xh2NRRzL?vq^CxZ0oxGr}K};(pU>deM7u&y_>47qndgX zSH8IcXvDQL#(Q*6J$v7S(U}yPjs_l0IY#OPoRY^5@3(aL7Juf6JnqUXyY5FuhW=hm zUd8NI8YbE3RSw(BUd#Eafap_66Y)EE!CCt!=2lM|{2Kvy8 zp;SIYP)}PqkyJfl?-3%6Zl_e+M261N>{naVCa2x?tS*+`UU8>hBv@Tey3#ApksFD} zfQch-bk8Lp@k_g(Dg7>T&#!|OB5hE7fqJX_lMcD;-L?$VzVLe`Hu?8w)tT~3WAZJT z1^_FA(we#|o!!?h|5v3+f7Rv6r|=RH-S6__Jstx-fr~(-qes^vmoFQvEs&hvTh_uXocU*cnVFxW zkJU!METIkN`NcE9B^xG7CYQ=R+km^5KdTqkr)L_Fv1Z^dgGG>HpAIm3nI-7?z-yL} z06cU7H*PhU{`IR*1Ad!?ls=1513;4w03H`14zlEP(tz~(joX{Y_YJ1eVbBhc4v2`} zqXGKLef9c{;SGKPc9wgK9l+Q`vivW$1>Ydj#OPGCvWVrQ*I|llF*+c*3DnI1nfIde zdVn;*>y0K~cbm}iWos<-O_qlMN?WJ_eloVT7PtdA1*qV&3A!>HF97{aQZPrKV6wy4 zro{{(+?8urHXnTO!RGtJ-{;!ptC^VC8Nl-SQ5M?(Vr65=XKOy)%^Dog{KFr77`Ss~ zCf%&eSXf=EtiVK*aEFKjlq|B`Yl3VL&+W(gEXlbO;e!yZ_p?Yh;d>ID;~t{PyRDfY z{P>*#nNGAb0T6LJt2cH0FtA1)0!3z4tz^-quKBf9mrvu#vRGZ<>dh>2?Ql?jAm?tP z4*WE+z&nctW6fAvCcH25e-CV07J~fsn#AoN7b{$_Xr42OZvtdeWWh)o$DeBhcr^1} zYH{IQz}@AqF9+}ePpXdOn3XtdOzv-h6|)7tNn=iY3Se3d9Df{0(n3VrnvLyQ_Tas? z2|Te_1hBPG#CqyvV5xcs(gU>s%fLhvpQBBR9z6=|ddMQ)L3;NGJhOPCeLd>XUXxrF ztlBex%Jx<=9@l|xO|xe*-Xya$XVTmxN;{|H)?` zR39dRcnmzoKk{ZA3mo%g!NfImq|@&_wP>|mHa*CK%!JZn4I1bR`cE6ZlN%7Ut*{nB z+NluWT=mXGOZ{2AP_J|mHv(LTXiqG|OyB`tGg*x<7TTy|<3SfoejpiMX`dS8uFa7% zGIp?qggMl&dz4klqi1K59U$_9cP?!H(I5Y~y8gZ$J$Gv3*U}T6192_Cs!f6~ym4;x zi{E~hMK1s50E$>%GlKA0Ex;^&4w~T7I%M zndeGlK-FmB4&~IzbIT&TGO;%MzWTUX8~%@f`InpjCxFv94DVR9(QEkpJaAKjfZHb> z)J6YX51y`RZpL29kS!WD7pC zdQ7}V7PWJAM*au{fcD(k^W)2Xz?r^VumnH>B7k22`@jFyucz#XTcEH|V3!R2rr$8! z;J(X&d8e{6FZ20M=UqB~Hr?0`k!Z0vVZnfPHVe|L)j0tqyMT3l4JlKOd({~L7k#sE zmyXi@f#}917A+kNO^1;=ZAe{fQ~HVTuHKl1!?7~#o4yl|r+xLSZ_rlkT(Cohey7WP z=VnA>w|M?Z7U#A z7bH)fKbo>CBa8dT`Q!$`u+$|Btis9u?OW-<^cp}(y5Hr_%v}4NU0>1q@h6`K%s7w{ zK%X0>z;s}!9YYTT{>P#_AjpEJvVQmNe$bea`zU$@f9P}LsdM#>Wa)NwuZ<+0)8>GEhtrST%^0sL!@fX2hj7#7_+}T1 zg}TS-f`b9im|MtMeB6H|gM;GtB0w-Ox$@o#IAYEI(?9tK0bw0n9?w#=o2SvXPd`*0 z>dOFu<^nGp2ba}Y*aD(n)NfhzJd$1q+MYRaE+Ejtum#t^zRJp-!EUak+^|Kud;GIU z^BnNk0`-M=K3qCK?%zvq?zz_@Metjye#fmI7uh( z130EPimPwc_k8=+mm6+S{_M~H!{*=qyFZ(8%e4+JzLv{BbPd>DJ*b0)i4fNSyXt4g zx&4ksb726V#))Lh_`sNQU-NN#Pv3Ya-DzB={C95S(eurpw6jlre)Y}u^jS=-KeOOt z9LKfXxNs|7I}pGAqVkXnysB*YM}TNcivSLKHNM4nyX(lkGe-Pcl;(OxdUpf1jRW?# zXhQDj%~$EQeT|PT&^%7R!7hwhAHC@8fELE^!s5O~IA<3eI7x5s-+c1Puj(gQ1~=x2 zcJv=UbYg(+!3mRZ3q;zHoovGBzmyr>+!2ydVYqv_TU)0ujXhqyS^%;8(`P%Mp!&Ue z?b~_(eL70Kmnp=TwaKxnPk!k)ed5)^@h5+>1&O_Z%Jj+0^q24YT5iqA28-q)q(5A- zSgg~|=n(Q~ZlZnA<>$CjiZ`?HU*6n+a1%op(v24PfV|_*qxvvb7&lY#OusYfMu+hy zUG|{yr?l+)ds#nXjAg8jE;~$~26)@yq)l@d!?ViXY|-ODMmJZZ#rUfE5qa#I@lm|v_6W^S8((TK_wIAy)o$81F3o#8fyY>C7SvX} z8JiW??jQ3DJY=Xq%F#+M0H*O-Uqn|j`mm$xMf0@h&C&Kft;L1mwRhh8pt5#GMxehoWPGGwW;CPzjgG3Y`^IU4ZIa-z#9>}0JU@5<239Z)+D?5E65VmDqTRr zS(6g8e=EQM8t&z_eW&9=*h;eos*X|y>sn!V)->K3a8DF8`!D6LTw@{a`_L&=_ZoZ` zT9QUF*S)+8hi*bYVW!Mwy>jqj-lKE|yBIV94Phz59!hfs^G>05aS-&oM=>?nq0_y{ z6U3K!v3`3wyu`!d1KLo%y-I(wmH(t7lR`8Y@ca5ey*fkbuL4d+k=7swy_2NLos!}~ zX~ypYx{FYKQ&t3kWYS-j`^MVeWcG5n813vImj_G!@Wxit^59UDI!-yaGMrI3B(1GaS-aWxKg?L`t;aBvRUb=bP z%c*-m6oi+)#YK7E%iqT}_--HUlHyIcXbZSaTII;U%1j@f=VN>Ko&u90pQ!O%>(vTIA_G$^iT@RF`)-p@4{luTI?clp*|Cv9(b&&#xv za|5>|ctXO-2~=r^Pkz0OX|#DcX@_U}IfmmY1dJYR+kZMve1aZZB!>3t?@$zFjraJ} zRZfWZzpv%(?RV`u`Fl1M_7X(((Kf$=JpiY{OEVW>(ol}J^xwjowgm-EB;U~n?m9di zQ@p=@zkOEaN{r~+e4cA-Kl@N});mDEhOT{29s4oqJ(t)g9LWy(0wC8s?#8|Hf5i_& zp-DGsr+VryEm&^+=>z_HupZ$dzXzT)UFbLE&yR0Wk#kjX3u7LjchAu?Mdx4ruvoW~pL;F(Nv+z`0r$tJ6Sjh zio8P{r`??X>#)W3G|ViVYh8Sn1JYlXasC&n;a-U;hcUI&6Q z6!C^+uzXzmYCn^wXXHg#jK^f}v?BxY=n9P|WAIO@9{y;u{MEe`FY%FyYBb^D$9Sr} zwFyJq^%hU=G*LH^*Y6mn#Ss=~)NY6|wATh`g=gM<|NVIMqfy};Rkr#HL=#e)`3fTKh-`A?`NA(#|*V!ywXGwe}0}ZjKK`9+RIJ>eSq;& zT7A$&)3p|c>PLx>FPp@g=uLP7M#N;S3HGsgmw}YQg9nySBgBp})WHN&Coq%qzox|+k}tY9V1kHKE}S@?`fhswxD!4 z##uCHkl2*Xy$PAIh-V&tRk1Nt)bK0x46pV)`-`b+VQk`TSN@B&6$9ps>DA;>XCtVy zK0@v+!=u^?sba@U^vQU_=)g!Z#$U$D=*PIi0BLJ6n#|#1eXqW`jK7!ihW@SZ<*iuX zcvu|!kFQ?c{DVLGJMnb)*KTg!{qX&=wy@F!|3D#NUOjPP^JhQ)X@*kqyqge*(|0aj z*!-ja^p7@gzx8g1#@^oi_HVqAA>>{M78eGhe$BvO$J+I4d80*ZdhkoHygGFtXRluU zB;(_`WJ2LD!uwPUM`$0TOnv|Lzx(%_KlzKl-hBVNZ_Z(uH}5{2a2pIsW7KKE-`dpC z+KzEx4(qGNjIzLxNzb2TfU57|yTe;K%@AYE)c+W(ctD!`pDA1mW0(A2WOyP1@Y=rQ zz`3l}U zuj>29Hg8|~baUxq6Zz6ubo}9OT<$E278Wx2UMQ>$gYU~3d|%A4{>JOC-`n{J;X<~c&CTe3>EfjpIE$kk zkLkbgR~8RYWHUb+o!Z>BCraG$O>aRxjoaxZGyX0nR$<_UZxVqCW zC`P0+9p0y(D4*fTqQuRc*Eiq&&gFH0Z+uCn({6dq?$1!|Fw6-p6K%B#I+4c~QSmOC z;n@q9UfX=pLN6WbP`c2=#yDxnB8x=3op9Ps+Zc1kKwkZf-^M)pkh3GcsLU^tKXeN_ z8;qQDm}TP{+03~8u&@~)zWYx7;FZm*m%rOVsh=coGh|NpH5Uk0J2@KuMxRsv109fh z=%6qmd4UJR7$asmS~wrUqATtY)1RbcFeKIO_+q#md0QR^bzwR73)vd~XmhcGkDnL$&BJyU;vZhK+Tl)ybi0VOJI~N%5H4@wO^hRvvBk=Ln?o;T3@r`4$#{DpxpMX6 z_cnk0AO3h)_f})z6ME0=E_B#nJC~B5p)oWy{?L*7x%B03(9Agi7O?>lQZ2Ga_q?!^ zO~zF8(he6cU5J0~Y(Dz<)6I7-znb24Z+K8&!IxET7H{mT;)VYx=LUVk4mvwoEK=g# zHB#`1UOJn;W31Kwv)Iw0pcX#u6tpXmAre0wt^eV9p4k@7=?~&-AI!;s?rZUgZbOcc zt7!DNSn!Og7f!sM9A0DR$@KKK^Wm$_+bwF@ZNRwt_^|^~Gw3#nynFxF%teiP4t%%E zj;>(d4Ss73(UnnpjjMD69^~{0hFIg{gL^FoebYiu^yGzpp)_WmQzQId#BH~b1>P-Ax8KeJA^Ed z+^rwnzIk=}Dns*{ORVrE4CcloA2fUSy|*VE6xtaR8My6irPHh(HETEPm+1)PxezQ4 zvLD$Nub2m@%ai(;x*kdovopmao3^3L8B?Cu56Ch4>F1w)oW7mc`GfjmVSMPa#(NGT z7L1*zB(E{XIGW&E_=Yq7CR4uYo&N10Qi~HDCLGpFPh-TFhoGEA(CRX~1Z#(Dg<3H` zIdkTv_^RDt@s@K9oLiyqUjFX)H{bjI-%8gM=I_wv7k~4UfpNTD33kT`CwMF8A!E0` zV?o<^CI-6QL;KS=V(FPyhX5oN!F4$I1}+_UgvTJ+UGF^e*Z(oHkdm@YN;ec7k0w>= zB%DY6S#Wm`7~2SAzQA7tb46wa7NGM_%cDhIGg+$L*0Xw`FQ5OzVW-w$PoJzV% z=@G*TC_S4b-H-7KJQ`Pwvg$Dx~Tkl`i66bo3FdmU)*%jG{|F~+0r6*}emUWHBC zxUp{)^EMp8>CWn=t)=T%-x;*sPZ2dLyjOi>U=WWPzrEnbXj2Lq28@;jhl;eyksN9m z5Df?a-Y<0h?HPntNqu#nHwbXH%|jg)kXkh9MaBwgR-(!ukLdE$A$W)_;0v#*0r1lR zO>hkSJ9s@_e6T=G8MVi>3tGXYa)&jCC_S6U3&weh!)fr>ireb=mZU$bH;V#W^gJ6w z84BSm?ThZ7_h08IHF z_Hs+tU43W8ds}`$7o8IB$~8$~oL#g7%TwRs7@VQ~xBVpelSyIg_S^JBIC!hu)S$m; z&^3M5w@mO$-t3A2a4K7#ZW*IT6f{Wg(g2}r_ZJDamx}be^n1vG>OEmcrh?rMu!YC= zk=zyKZ&z?eCaGu7hSpn?5@ z@C}CB1u7_hOnJ)azxs4DRhyS9sq!XYf2ZEE^_jN|NcWYsyPvj=j`OX|QU{uX#b7hx zec-t*Zqjw!=pbCREoWIRN~8s&-bJWs8+-+OP2sSXz99meST z#>JP%Sn(jG_p6KuV)hyXu60P>*}Q=q3ijFco0}`wK1&R(n6(Tn&kv-;m!6SWbAJ`x z(3mljckY|7UP>%j5ZG)!_~hzLfZlkOVjjo!j7Q{qAAK0^VsdtPPl_95@p^dOC_e0k zLWQ82Z~_>3GnDmT`!evk9Z^)bcStVC6>{VBu=fgr^(j8?AVKym9&Rl%*~7 zQA+akf%2)dZILD$bOHP%PCX;BEust%jAPoK5s3E|WBP&Gkby^kGr57Ag_AE!|7l9_ zv-1aR~g()tWd|o z1tZh36nQ*_KE}g`P0SgXc;i~MV3=X7J{s@XCd+Fc59l{0ULRcfARZF7M~qsF1NF(h zg>z`)PBx;g1sV0#?{>Ul0%{BNci*^Nn`iKe*TnLdZVc(wTj&N8d~Ko4$W8o?uC_*x zLA1IMU}!}inoLBo-t+W$^w*(eWwA{0S(fBDGLcj6x!wg^a9$wof|Jp>`lOuEJ zT)3OanOrJEe=sqlhT2N);92h(G#K42mVBL+2wF<_oZx>F1m4$yRdiaR9m+6gzq0CMi~sf-L6EPIA;8JgaF_2TAi#_-#I6Qae1l2`o17k5h*T+Jy zwtX_YhIlO=F8qr{OS(zN)UD2;Wk^89$=~ye#yHJ-&l)Yx)>)qlazlQ~!(` z4$&sVmys{|SD!oFLg4v~5|FsZs2_hA$FzfkyYFYjh~2sX06+jqL_t*4Blq5Y_ua}V z%t7t=v!DNL3@z$_hG);7AH(aXSFfzyQq^@CAsg>ozva0~ZIwjr7A40Ohv>U*!n8{?9({Bit<-aK9{sv1Y0B=?>L6W^oj zv*_@w&Sfscn?1xfM>0+xEp(5V<7W$bgci@^zn44fgu(D>?W%0NJ&Tv&VqA@-Mh1@o zJbmF<{mudH#zuYL;vr)ge&RYcbV`O9qrl^(JY!e&BfQc3SsuDC97!*X?=3JryxXpa zcQa7l{(SRifAOb_N2YJKFkV~!lYjExZvM}I_Rr&gA$o!VxB5HW=5KNxpQHQ13~l>r zvkIMZ(VT9yc2@PS1vs((doZ-KOW_yElQ&<#Fl{dMj)fJWtsZ5hCR-*xb9HMKa2y=e z!C5DEEzCI#{f3uu>d;{!v@BYEy?M}XuT$yA!bCCR?Qg(m9Odv~L3ntn=lc#UqosMC za4S`1WQ~OeG_=!Oo9|EOq(?Xa@kIKXzQACL1}D;ce)iYzZLWTDWoV&(`l{Un7Ik-H zme3${I`laZJ{DW(9EUimgul(_Jh#!#Y1&j?C4*;jw8;zNrs<6*Kz8oN91-@CQB z{@F+Mr98jy-q;*CYk|xnObaWu7kv=FlM_4Vf|0R3w-|`N^bQMD4hg>3;+?Q8`U7o* z?u!rUhh|y&ma)y58jo6Zdeq^xH!`S`Q-=#90$+99la8iR(qFvz+U9NxeII=AUQeP; z{Gg4+^%lZL$Vz(Du@fzZMK}5>or*p+Izi9iug5Kd8Z_d4$eDzcC zo)qc`t?{aIj^%|;4%~fmd-OrN&o{LTSsq<7Akzyiy|^LqN>z@?tn-{hEeZAx3nv-jq(~Be2Hc zxy`qd2g`37=%_p(mAL;4`s%2YcK!sz%P#rAoqI$p1H5LNib;CH-DChDKMAuk{VM;q z5YH+3Q=UJBgZkB}=b60js;As@bwAIhE|VbGSTI3aU8c?nw<)(jhK*Xh=L-k0l(FjI z{Xp}m=hAGyyPr4nt>j*S(IuH{=TS{rk&T6L@a)HwJn0NN%IwJ`Dh&pd!h>C1%dS1= znfoBEcH6>|V~cw)7Ajy4G76HT(!1T^uzq_`IpHpa`}tS7bD?+t;4w4^x7}73=+3)@ z0>fU3wrv!oo+-&SX==NHAFU~QP(}B3fnBNao%H2Yhk0H3@Pos*?|Y%R%J8+V)Jh{y z>G23{e42juJ22yM9PFuTxd)6a-`@3OuHH!3*Q6PEy?~0k0-;XxvIk$ zB^erfrY;nN5SzC^q5Y)qxm=}L`R3)KJs4&e2{8m5N|kSDh8%-OuYAka*ZfT}nsx(m z*OtR?UP}em0xD60vfG*9{Apk>ncuo7GOV4_466o8C z@Q#<<{t6ZRQsO}ZuJg7>@@?B0U4~CF(EN`c3+~WsaP38S0fZ;TEL5NQ!g!OB9OzzI zQ;e39?%M})_gkAPTf4Zxt-L<^HvUtuDSPVIZ#3;5D#L4Vu3Ehq`VCC=*_JgW4&J?2 z#9Td{7k(^$K$^L#%kU67sFZ}0c4{E`{I16R+dlIMUS0A`HuabWDP7;QXbE=tXYptY zXYwfA^T9!Rpo1l^cm0m$vP{KsP7Q4korKD+jFHsmiJz+vlrTWv(Qp{Px~LlZo!| zfv=Gf!7TBX6d(`oBd_tIaYwn^=la%EzX!AM)Az1@rj7xkV6E!ORo)C7-^%|%u&0RO zUBAJnI73O_TXJUc?>tm?31@*MOjdd5nHO0&{`8%XHy=O9>pBJY^r;jd%5Tq2Bp|{| z=2k|RAOGyF6t72{^LZeiEF8>bR&z~hvMg-QOQCbs&G!=4xqwDXG{N=Ct4 zbzAXy8T?FUDS}98QXyXA=~FzkZChN1NQ$c3!kZHBcfV%x?*N;L&6u*xU~&3nhYjV4 z_^8iI85?ih`LejwjNUh9vUxs_rz$tbRYpUG;S>89K$cNXJm7af`grrvB(K2~ zcM3Iy_w<=lO^oy2Z4x8CFIi)YrOC!u@uGN}zwzDgY+i{c#iX5ySorB4CXz4Jp1h>A zKjSiED(^>!hf(5BgbPD5g96@UBsPJ47JYg!zZroTNEtR5c^Hm#W4xix_rjBRx!AB1 ziX(b6VEgDpyRa_8%Q&c%;Simk74C$B4+c8&u0&*U-^m#C8{=N}Hh~+StG({WBaHbx zxNqg1L>Ay>_%kq>P-#=YwfD*D$m{+_^v75X$wC*5VKO;=Jx_gd1kZRbZZ-oK;|<)@ zf#IpS&ua7V0^htrtmCo`zs}-Cxb$WwR~Db(NrnvczIjWkr=0}Kv}>gT+J;+uzXUXRs@Hp+?2()h*vK?Ib?g!U4 z7xSVPj_F9AV%M%;DO6K@c;;-OfNm!z;7QyGy|fIaJgyLg{>qs4FgOoxX9Q{yk!%-Y z$HZFb9|k4uyT7)v%jR(mVA0Sb;_2Gy*$3~>0@c_1qDe3rZ18bJnf|7(_}h*WZKOYZ z|IN#rKl$-bMrNSvr=Jx&Hd$bi3hyyiKPkWdi@${6Fs5Do>{dp=eVgxIz7)L=OsJ-( zPj9qf`e1hb+`5-XDas`e84#|;(+-*x3%>@Lwl*%p!{KPWjYBz}=0#s!`k^0N^w5_X zCZ#=>fy1`(>vO5Mt{o}j{m9HJOsXl$DF=}^+r&!N(RzNb99Mmt3q0T@!f1!iV zqMJ5+y6@}qwY5LFa;}g};;O&@&M(K9@kARn25KvP`fjvCKSp>5G@}m#w?#WLiV;$n z0tZAolvV!~(hJ}7);Eq>Tq3h|r-KZfUHbE#+WMW3u1x5*-R_m^H?D8K_r34W0s-%E z-lU9T_v#0Xq!uL|2&z9CR}b(q5B7_BN*Zf~?D~Qn$Z+&UazK~`#w5CkF`Rd6( zWt6m_e*WT{;|y^nUeYf3#O?%#k!~%(T#W-f#@M8 zk7t~XH+b0EeYI#DU-Xaj0=c12SzhyO zBW4aQsZ1Xmy|wWR@8MzeCx7u11L~e8U>3Q3oOO^GOdcOfhMjEux*OfKbvoMd!<&nj z-kdhazl@^~zAE%i#?wR5n4z?|L5+bt+wB-xMqD1>77LwWU{Qb<^4)YiOxt8E9otMTA*S_cBhh^wyq#W|Vfsk?w=;<2KZj=;gDenQq~Q>7 z`&JGz)zLExn+NjZKGlMddF&iEojydD&=1D$5zmdmJ6dZy_xkrNY*s&GkFrmld3okA zbjmNkC=^T|x_2a+aoHGlG+L_vty|Xy7w}$;hk1>^eDT8Ovrn(im_#1E^WNLxnhw}M zEtM~N` z(&U=DqAi(CwjRsN+x$m(#mF){U(9*#-M$_+YYyb_=zvGdwLpXJYv*T-?Z}G9Eg-)0i=S+M{9?6Y3^)ettMBr;ae-OqoL^%o0?;*pOIK6PZE3n$lXVZug}GKiq0%!z=F|OXM1EuG zz9Q(9EyrY+USbfiQKDB`LSw!STnmdyqpAzZVPJy#*U2;YgJ`)X{&JbSth5pt4C<3d z*;5|jiVpoBJh~Y)lA>k*RAClQNWg?1tODt8 z@VDjo@@(kfzBZrxAPoJcn6hm148JTy27CDd=zl5LU18MEZU&2fCBQt~d72JC)jli7 z00?FD-PR$YF_j%YBzh|eUgZD?>9@6;CsUR>%h!cYtD-}l(!!@a4UAJaR$htbVUW<# z@2R7*`(Kh(=E^<%3x-O{E>ZM>%=D#%%{+%3UfSlLb}a4mVGj|=HI+Nyd@9koMwc9<`jSG$yt@E+ctjPOxfVEl2B8zD1Kw`65d@P5eXTb`kJ zKjgExFnIJshiXz@s(1s1y6S7=oju8W;(xdg4D_Bv^Jn1c7t=?|if+pDVke6f4f4qF z>3?3s8;oi_<&_8iJyFLc9@L}zl%n;x`~KrkUyGT2_eeXy13cxySM2p6aPPMNq#g96 zleMw|u#;fut*8Os+hDBr^~`Q*kFlpWYito1X;H*3VLK3}qmr2I|_1C{_ zqLk9pWcGN9D5HijCOjo6HN3GtzIrPq_`&AC{LlVD0A84_s4t~R+2Uuy$Pi;P$pH9b z;XEiP42w>5U&GEM4Cbow1`2lHUr7gNk+CZ)&ZiZR8tZ@&p_O4bW` zOvFDt&rK?+_}8xADO}9;&8gr0TTLW+kqW;d-gkzQl)77^NxfTj|k@0loZFILuys;F%L*Z~d zbLvbAIB(Jx8dBI^dFiDlf1g&HXjpoNN(L+;Yc$c6nbP-CTQ)!Xq?nDpXN*| zCI@)y(?YU**#v{(YQITh^y8^aPoV_AT5La_s3y7Mf%BwuU?b0#J<*Z((ibVyqwo>{ z<$vjH+qv%vHE?_ms$(c%XlEEFu8shcKy1I2UwbfMj^Q|1y!g@ONsAiz$`(m+8`14p z-hK!{!8U=U=rX`Qu5Fei$r~{btsAw+Yp-2gd|x?eC?tZkuSS=%ZFd(-T)laFGMGHr zdw0URD1d^`2*mUJTQB8su~^4Ac`QCKnO%!Cd4p#Rc-|IFTgz?#HJLvUP1MumS~@!; z7~?JfiI0z;P5c=Yw3&L@25(H9Sl{J6(e~8gg=nqcjzKOyHyJ^LnVgU{wa<9Km-kL> zbh3WL)5^rkM_gkQ`I-Ee-{Felv73Oy&qVG-UOb*sFfii+LoL3vup>kSPhE>0 zjN>O;I6-f61OM-9VTb`lKjs~WCr;E)c+i{RnmCz6!;`U!SNp!FdGu#QC6`8CSC8q( z=v5x|F{$@X2#@DYdi2i+^FB}BXoarih43iGEt61v&cuB@oa85iYZG2|78LXo6Ec@F zcrcTz`>N-`3_AGiarJRvqFof)i5DdASraq3k?TUtfUQ61YtJh~KIbJc1TYrDoBY*o z%G8&Hi9uWaOANe!Oe9X1p1)-!f||&af$))W@&HK zKgi`wG2``KGfbUKt{KP==YgqTk|EkiA28YvA4ZeA88;X}zN&rnSw`^tO{PD+(Zo3V zlytsro&Nec!^R7nOP4N`r|=+e{Y~_3EQ;^QtmWnRLbCH*hKggQUxwH7d2PS;>V?gf zE1!+*WaN8VJY|MN3k8f>&OBiJWyH=*6pqfyusCT)NQSGZz?*oC`DAT);IBv&uV$=G zzMZPCS(t?Pt?2U0ci*e;eOic^o`(yA{NMe(-`RX!KRr`ld-O0F9SpQ0FHSVpynL>+ z2-+#2-aiS^}LB0(il2Vw-Dm~=0`V{w|sEV zpMNdfj%J82gimq=Kl0t-iTmz5SH_!Gm>YeJEU?}E-S! z9S`{0xHdbHs(V~E&lqcsJ>=H?#(`@sF5E8UkMWSlyag}q%D69vJo##|lYw0L8!{R% zzSJTIT0V&8WAIJhyqLGT9dL~PJp9QCp}!tKW{621Hty@&u082CqmQX;_>k}W%WL68 z{U=me-xHRkGS?xexQq-D3XSnES(1LicN*dCyJRZ1V}P%Um+?kTsSj*<52OOjeUU#S3CFeP91qmwtn40kA;nPgJm4VTUXwH zcXP8{S#Nx=5J$#g$i|b1)~PtKEgiPM+QT-GA_p zHou+W{neLFZ~pba`RAKoyz~AT94F*ie9Msf_?s5GgSqQBlZFe*d;pM%nc&4Ha(UM2|)BuhfGf}W?#9o zjQWorlJUVwH`UL`wqq~a34kAr7Z&HX3x|z87HYKZo_5VL zj^yz!BnMA)(7T%+*@$ZtGc$9vCV8(8Apa&jo!O2{aXR&zGM~bJ7m&|v+4o@D? zkeo%~f2QdmogTc+U)JdOp zQ1{oxTG#HcT)0^O?QDb`577S0JhAB<782lX@slh*yx-yv@8-s>_=<3?sDI@svr|O@MQyV{P zAp@=HBF8%mM0-5!oEhPrj#SS*ds?KyAdT5CU%0&a_@j41E4?k4Bj0Wxo`XpL{LlWw z=68Pg_iHZ;oy%DTes=Qc6BZK=<-Kh&LR(DS?)u%#O=>?2rxxgVk|)$g{^Dmw?ZYjk zo#~tmp{|_GLY|&YhMdeJ9lu$$wV-$ZP7AVoi}7A~sL|P?t2x7jV@vmA^R+-judxeY zEu^&=-59F<@Y1nlh}{D}_`z>&e)v1Tzj>)K{z2oh`NIDC)=TXUun2rTx$~nx|M#== z3Y|~2`^7?zGpU#~EL>O=)|Hq1Z!sh}I{JBSXWVHdC3>gb%i*4AiUmF2PW*@Cr}>t` ztfAH=ZCA=yO*{D|`!*lQ!SHVfYSR0G8wi2{WdP9qCi&pg-K6n;3_cj3G~Zr~W4miB ztp>Xo)+C4VY#Ap4kdRow;nw@gRV*}okGJg9eSS^aRWmg3UO~HiBZ>!J^gltM1A<6! zE*#4Zy~>(2LxX{^p7&~ZLlCcgGIURo-PUJdC@Z%~0)2k_1FrHX-DtlHPxb2E&|00L zBPHQJHRPT|1UDS%(tz{kHXMfHCD>aBXn%c;Bz~MM{T{M9b~4)J?mCE-l>b(j(Q9MJfD~Ij1uPOS_r{Mk_~}n zpKR+-?PjXKdpJl<^8SQVzh~EjVxl(%$HSpZ@X&J$ObPw>+yCkq4ztLju3iD=+Tpx% zPul4d>urC-Q`F4=2jv+$tfyNfp^09-A9N}~u?@DfS~6|d@6xE(pt6;R(**0$yz_dl z04@2yN*g{NI&RKKgCr3^eZskuSyS~p?G;0-}|M!RhL!Lwm)wX zv(~fPM4oO(Cc-sb%IE*kO51=Ge!p6fTxs;T>0$BRfK$-!e%trvIpPP`*HB^b4&b(| zq35($sVAMnVDsniWlT$%+?#?+nLF5|=nE6~aN)VFo+g<8 z%|HClGel9^@6SY=2jenSH38i6jS1n5aj_pm0Pk{>5)H)YG~-e)OklgepficFOa5XkC zSXt0Glh@+8ytm)|;Dc#Qwj$4`9K7CogYQykJtwE8JvJd{4B+G7P>9!w)m~GPHn&Z!E}Qf}3qs z@8L{3PUf|BA|pvjhAwFF-iII7HZ6!ivP+#9POTjluKu-oGUU8pgxUg+o5+35p;PGZ6cl?F6+{WuX zs_>!uGlUz5`;%cOPt0h_6Vzdo77g@s-lo1yQW>egY+=TB@2xP|2=Vb^22b&CEf|e_ z2_8f8soI#w+}##3p0y~f?MzU$OQ;1Sz+HF@@0j>Z`e# z?#CG+O#pd0GFIbtI|R@S9qx_5NwlW-SiC&CA z=wlM~wDcCx7;G6G_7XCWt8w@6);{FXWauwGcF$PlJ)Z4>Q9^W7B z84wuL#!IwHXftvN-iMkH)(e7{Cslp^h2wc9MgwC6`QD3}gg+?^%B}k?_5~a7EFQ{e z+CScojP=j*?!I!pxcG%k!V@?9e)H8=Ha83R@w9Q>Bz@w1H}MwB|GleMHh=f;{B8@O zE3^<}RaJ;qYkVi?g#Y*#fBeU@0H@#cgu7K-XYfzt>BY2uC1dCJzw>%V(=&O3+sUx# zR~2g?-p3inD}&C@s24%jp@!k3kGp1^EWNNicQOK@nSRK~X`IwX;9)yV7}xNJ_`S!H-MY6nHAV_Q@Z^cD z=j-6!vw7#EIb#5x)Gqjs(U)QEW}b_;gTp)dN&LkCcRHiO!=70XJ5bE=^YuFmZD?>N z8KaNzUcc7j#|wK4&sF{MpB>p`a)0*pxp;krOW?hZCw<5*-;8S7fZQgJ&gEHp{pQW7 zcc8=3xYQzH{HtDMl<}@N;m(`gqTA9fBl;Ltc6=|!Fz@)&ja|Za9BwRRfVg_K*wEPDyzW!#qPJOW77I=;OPU|nv_B3wAPoq0A)*EN51DT1D^ba&7i}6;7OnNhuwNR3=#~2&l&WAbDxX5sM z>_mq|rc3cE-p7%l1wkIChk9?UxSe5@K~UV`Bgq@%5X0YvS6h5$gp6+HlOua;i!n@B z4(-&qM@~4S_PzMSyk$jcv0KvUp{2C>4U}C z_don(&JURd#j;F4E3Gp^j#;b=XU+z5!0GT)A$?{%s-61=%kCli#`LvdoX*f_r=y)v z7P-E-`q9AANA-2Iq7S%kw}5`E1*AvGJaXksjvjUy7?;F>cOdGG>mSEc?NG_KM4qeP zp?F`6{F^tfjhw}o7C*tzM#GE2_Z%PW&vO@_#TMF zArJ4~E<|2DeDc(}>HBmA3m*ES`GsAo4AD12}!|5@`!C9b876}D%wE2WxQZec1KiXC-@0%I%5zV;rJU-BW z9YFi2cF_l(Ckr^WprN{ug%M)-LVa;R`KLSv?fvO6%hNyHpMSAAb0h=t(NjV1s-DJI z2IlE6@tZJQ9%@q`p49u&?VO48RkDJtB7cN1{4$<9S{qw@CPyqVe%XQa@JBEB3aP@O z=~#NPIlwgsu%8^?B{JbWf9Px)e`;JiB-1O@&S=Y|6}Q+tH_Ozpl-` zD2)Yjho9pqa$4U%QQMwvvG+vIMwc$Uo_s!-)5z`3jr8ExUwdQoQR58#;FEUC{QPHs zS-w4+@BQF+X5j>n9*+NBDs-1!C=cUzb1{oc^anCcXf*R!A))H38Oe87#KP=^AL@>g zE+MflY~1thUqX9bYXZJ`Zbe}ceV+GvCiasjCB%1m)_2T5hG3n*ll5+d!QAUKF7Khz z`)-A`o=LqNQ1LvR*HzizM~tz;v?4lidpRDZEk}-rs9f?a0F%`q?3w)Q0!slt>b>rE ze|=B4M!i0R3N9?+Pzn5AsqbW-{0rwX3={H;z$*=_39IrX4g%pYqrjp=ePhxuJkXng z1TDc2?yrSo85m&PW2rqC; z+e?g#xeYvulv(`-W^@??G!(XFO`E~`u7=FlV2WaZ)uS*no(vHwE;img;-E} z066}0>$jU}q25TLkS#24)kW344d(Dn-&+7ZpErOFs8Eq>{>tYw@T&Lj3gg8yLz`ei zW{b+a>!*btWe=>{8*W3BfnAHRG2^LUW!#qP3HV!j?+TT_GNzw)zuKpi(4O{&10>cp zyys_qL_Rcsj&~-_E}s{W?W@~?*{ufyckn3fyx-EP7h}NkJ*->Cxmhsq_SAK2d!2hk zr8#cjtpfdB?Xbn7d}RV|Ky|Y^L*G&?p7u_HISiC#wdeB8wZo$yLs~DUzFUa#?DV~P z2D4zQ$K)T#D}C)Y&twM^uw7fiNtx|Xa{n+L?9L1|VUf)hyP%-}LK+veNmA5c4ZDlmzA;MqK1?QRx29{W@0 zl9diBQzr@0r+<@W@KLBV^Q0eLXbryizt-QS`IQm4_r0E6=$zs@@?{=Gz^M?xrK`-H z_q?*#GeoZa$Rd5(Px3{%-?nYN!`x)uEp=kp~n_`zGP!muG#; zpzVEkO;SJL*AGdRH#zkkIB(m0*RR0pnkFLG{oa*X-T~25isp~rdb^n9k2fzD_s(RD z0&PsXh$b0M zbT=WSfO-Eo{7m%G?DHo03#0?adE<K z@#1U}^gBQN!RDvM__ba8bjC}3^g-LbO=ezs>9y(=ohpN8A_MC2JhjHim(oUozn7u* zQC{2>dtToA_t^^W(8sm?@WgC=el~_%?M=zP`uX)yrXPOrQFLe$SU39wvV3Z!-SQ`|mGatlhO2W8mwTE~douM#*zDI$Olx z*-QCn;C}c`-m38pLxoTmpM8F94&GyY+}lLsvy^0Y;w{2(qyD_e#S0g!kXODwI!5>S zga`DwCXd3@*ui%4LF8PDTr zz}rfXMt^Nbp?A2@OjerM@Jy2r&7Kx}co%DN|8Wy;d{WqT4VZ>3j1izy7{ z_geTlm6s*KbFV|XObEqy3>n=yjk*O;WRr3YUi7^1)O-~xSs4|NGLo) z6BGt0{OA7mH@u(=-%&ro!z2z~5Tjdp3?K{&WCCL-gE6C%wvoq}$e;~ZMse}u^-nO# z0x(VR(RQ`}^7`Am=V0|}L83Nca5Et!!x+WLt-+@}FE+tef3kuR&Vm;iwC|~fqqP-X zdCLav6t+r-ea)un|G!L&~6NA#x#62UenQqJY+~`knCl3SVk9gGJI(_rW^)? zpfyRgxX19w^j!Z5Hlqa^@c>T*|9?9p<_g0l>#^#_xao=YJ8rdxbT5bMxUxAGJ7jbt44T zg${JQnl~>x!cS-tp}8)cKVN)pp{x~xiwNjV-ssB~t&BX{hB25&y}rbl zXWZb){Qid@j@)GUcuZte`5?a zCa5oil{Pmihv(~^d-3^=!W-4U?H~s8Vv7nE&3^Xor<)fVd-vw`Zp;~;j9zn4YvTrQ zZG9gPevwBbxukw{3OwYXGF#6-`Q*wNd3lh2e&gZhPBNNjzPR-kvmDwAM)Q|nJ{K)J z!yWuZwJ81>#0UyyU^i5B8e>vCvKg*2YYPsWcUP0zIn7!JO6nB~D)6w)s( z0ve;rpZp?MO@#GR&-68A;&%qTS(J#!cw!4tGajq)n*|z1ePxo1V(?l(-j^=LTU^Mk z1D$Dbs>PTUGrIP!T(bMc19@sc%McqcIbViRY>dE-!_o+66R9`%Genx2DbwMc-?aN@ zv!}2G(T+j#NaNltz()hJj_h|x@7c32Z(jfI_cLN2ow)S+#q;V!#xq7*sCisn96;$p zOUAL&uVfI*Fig(!D*if{FGll-KK2YUL0_v&WGt^Ode6{yDjNUvtv_4+^i~T&^$qno z*7){h>i}V{VBD{?c&_rm^sFc1J9A{%9Jb@ff+hW&-kC(It|cw+(jz%q*j=&4h0W%p zt9d)uaA?MmO)ic8m<+Wmz|NN=g+#fQ6U6Q0?%oz8?GA8Ipmt(-EZg9FFnS$s@%hRp zAJ%T?YeR=D7iO&Yc4=PhAXf6g;h{M1VLHUK_#E%eSXw*LMIW?d@=yPte>?q-LG*SW z*{>AZ7EEb3R}zn?-}t;Qv{G=^N3= zkH=5oW-)o`?>!w@jKmgC$)~-^8atinQM1dja?o-&%K%4TG-e71bu{My1D0+8uGsh21zdJjRK{7`cC+14oYzPD0k8_0#Ij^O}xJpWhv#x^&-k zmU!Ea3i1*hyCP;cK{C?0BV;xs^DF?S!_!5`n$xxU_0O(Welnmu4$#CD8 zJ0bd#fzE7-7%j4gLwdD3>ZAJF(VRYvkAL;!9}Ql1yFADkZ-=G@Gv`6wzWsSDvKIZ> z43B4o*2f;EXIgMXM{Q}`2J5jgF+9ju3o907=rQ^TJ@NcYmo~4y@tw_UZ@$qY*V9=b z`9J>m|FyjBoG3J;vs*YW{YQWFcQ@x=dU^BCTfdmFGAAlmoHTOgPGbf~8H+r0e4XyY z_uh{$9&OGh-^Y0c6S*Ka-~P;GGs144q49bzeqE@R+jDCJ3dQ-|FpO_)Ok`v2qR;gC|rN>{glIB!G2}7h2d@IGU)U zwXzXv!JN`-!z$Nfb?F+=2%&8mfVR)q+n)7%2KW-rz|*%Rf*+vkfnwm2!T`p&`$1b`pxOgj2Z9eV}`ueI^OsRrx8 z;3Aj$`!|XAEjm3`UcF@#F3p;6cF)Zl|zrd=IYODq>gmv?pBTm%*!k z41X**6ai@M@SZf=KVvwbvb0lh5^R%SUqgFW1r8mm-;|B^3D4;V1Gu)ARJo;DG^^}g zx#&{ilSFz+=Wjd?Ae#8S)U(vE~CMJhihr3{f1|jQn2z3zy%N3 z|KEi+@POISL8hT`kB3J9h6Ol_e$XCN<_3+rJX6p5(%^wc{afv%UHo#X&$b_LWz9UV zti{Wv3+_%any@+pYY?rzr5=)iy=2#v=Xt11DMRe;QZ5$mTUiTVA2O*-`U7A+nRXaj z%&(rQ>(p;sE|}VLn^rz`8~ExCCxCqW8le5Djx)yiy(N`A)v$N?Y|%??T*Cv^Wqu7z z8NumC<$JD%$H41e*c$w8}@CMG7>ChxrIZl+{X?w#JwumT5@vxgZv zO!OUucCkY=-?(%kFRj}XE1c)J$+ZdeOL;V&m{1~3lA0VIKEw-%w^&BCh${rj7fnos zo;i_H$^gh%{jkM|yZ4I$owp{>UWV%Ly!qy|lMn%n8poQ{-ftV{H%$V*NinvakO9CJ z@TTCUMaFPVG33!co_FE?b#!>;r8kK#d4OvJc+30Q=QoFT$MTZFUt=W8sM~EGofPg(<=mWD=XjAJ z%^_kBg6Hrw-W?_iy9MSsq;sDGtlG97{TLeW+`p5d#bI&c8fQquQyt94SZHB_M;aP9 zJk;a}A89il!Q=H4k4&uHzQ5ju>j{5eP>jZ6vyS(0GDDne3m5v5kUNZ=69>2UdeCnM zwY~8l@yWaJakSOWbEseW8T&2X;HA4Q1n^8XQN!Z;ikPg2+CFOn_hme7YyI-%Z`)}G zIR-HD$;4{soqQ*rH3`Qz&*R4Ag z2KlTu7r%4ybPE;nIwP|XBooFV+IeQdir1+2bI2t;O$2#z-^rk6BJ*w9{w4dZd3dKxCZC7^STkPZ{4$ zw#5Xc2k}bhL98F)G4hNd103~d{Nd@Uf8gPn6a`nB@hB~icGpPq4iC0)^!5FWL-7#3 z$i!Qn;AfGCCom(RXAD16c4>LUGAtRgLOieQWte58QLgy<76DwIkzXeFj7%fF<4NNq zUYQ*R)d^nG^;J2BNgnZbB`nW5`6g6N^)ng%`q`4R)8FDFhD0)$fflbas!X4&UJkz1 z4}CtnacgtBHitW-8y&2@O}yS)ygMiqSG#YL$##`o%j?dfjRWTN3vEP>sI$3|vBI`%-frU58%thn zQQ|~0z&R2YY%ZKXS9q0I$FqGx9VHK^j@35~9)kgT>7R@3{H%&-#isl)?t(uoxj{% zzI?e@ttU6{z58xP)akQBOGYh5Ut8kIp7YVah8_dDF&3{1eWIUQVEJzR!VoiIT5jH}y-&@e zBLgwq7(Vd?82Y5PJ{^tm_E(L)LMy--9nUq!o;i~NxPJ0YbiO9UOXDj!3c%HCS0^NZ z@!oEMN0n<~Q2U=dccz8Y>hS7?G3tn|OV%4Vc>Ws0;4`6q4z!!8vCKG#h85SljB&L; zIjz0j53g?ZP5HHr14Z?Jhb_}FgsFO*{vhpyABv`9KPf-hXw0B~<;oQ#2v@vcKY!H1 z3*9S2+ALm%BjZ%Lr|i0D?H2FVZurW${7s=oKKSs1&E5=87hZcaV{?niH(L;jM*1I_ zEj)vHV>Ec9jNE> zjLubtG4?x0F05|KuP4*JG^v(U1Rn^M3k(Jm_xG2_EFB@w3}xFWktXkeuh$EFR+B z{!OwgNjh|bCOAG|SYSKU!hqde_y`{w7b3*aK^tlZ`WLK!H7hbayqCP8gW557b96(V?ZuT&_ZWE_9fi!Hqd3^` z#;5PLkdzLYtmWa4$3D;7_UyUyn-lfjyN&<%)2Hz6v=LrtOkKR#j+jE_klVAvuD0WW zz9rVw&tbsmXhC7w7U)`Q>@pS}ETjlNBlBlmK$m{I=tX#dUW#|v8R*&=;?~VVB0l^u zSm`b;#DOIoiG|{7G0rA;@>Z7iKziVT+Ia|VG!YsJAf6+;`c!vY_}F4**tFVnq0{@|@f6S{Wz_pt*3OpX}x z?Jcd?>BcUEGJerh4rOeoyTA{9EgDz-F(O+qCL8e80Eq_s^2WzrhYp(0Y#o&Gf9(o} z!!IQ!llEp$GYh9n>mRN$mM3HBrF7s|F1$X6p3Z{5G|hc13QRwa#(06}H@Q|J6aM6Z zK3m=uRwf-e8O8W+fxw($LQ6Fd+1sMoiTK!}nZ+l|Py53kPr~uVc*24l-4FkBSb5kE z0rWD4oGEO|q0-pdceZdm2O2XitXzKm^5(Tmm+Ok(Y(9MV?P<%?@dln$FXLNGzIpx4 z9~1)20?6Ib^R)@yuu!2)^ER|aJI*h56x`3aZZT5-v?DW2Mw>kjWyGz4N=`q;~eIZ{kC;Jx0 zuL|r{`E@`HT1t5dyfG>Osx#_yKPoWlE?EOr7eS z>na~9A0FN=v`SM4)rP>27bKixfQlWOq0iJOnhi2u-~n)Cg2}upRk$s>^t5M~e`r0a z7-AJ7hkSE0c?i@g!}|~$TFW!=`;Jk!$vvDjW$e2|F8404K_l7_Y$oF^m`V*w1sw&-9`TPg(mCT<{iM( zIjckOzRg$ad0d%;Z}c59mruP2^h(q)>bGbZ?hKUZsgE z<(F#~8{n#}F?zy(%Nw3o&*{hdQea^+H@;znpBp?@d-W&<4i8M9vOok!d^9KlFtzA+ zkO1@5{jLm9rCD^I>Vtb4RMd6aS^nt@lgXb2Z(&(x<*c4XjmruB^BS`ycy zRX6bS99%p|A%#Cc{wouF_Zn*2K(_LrMG1yZQ%;Gdon@C%KMVJ5p(9T`R%ThfFUQJL z6g*iU002M$Nkl(ALDv4b1LdKBs4$QN} zW0VO`;>BYMgeiTziYRbO0q|l0y%!6PiC-Dcv+mqzOQesnBQ%n!NnBlB< zlsO6>4<4ZlUcGQ(@N;O{kN@VK&7B8%eB_0EI&a|1uU%~7&j5aCCed%d{r*f^&le*c zofwiRfZ{p7(gdC14Q@Q=c&(cNE6?QUZpPmas{yZ<<4yE=?gkM3(t)?R<(jm=x{zCQ*Z^;vDVCl8bi>y@|0 z(zf4c2x7doJAtSAbDloY|H!Fg%m(+|xmWWPW!Squu?g`fh4|T&TPT>?g;9rcytdQw z3eUqbk5}n_gM%_I5y*XAGnGmUl64ZTEO%c8at;SKkw(evC(zV+R60 zWZV+JSRb75HRZ8DART_@QHp-L3VnhYW-upH#@G>@7m^bbj;S^WQ|f~m$+f%m`f80c z#xHC0pG^G7wz}T5G2M9?Tw5TkFN4XLB|jYT3x0#6wqrL? z_UzQ?Ap;uOZ0umrkZ_E2EJs~4zKlM!@@v6xqClDY&^yJBKHr`GiH>w9a+lH8&^5FP=e4Cio@xP2n_84P z8Xw<_-ySBXgdkw_#WxIm^1qswyYmIk953#4hHUcD;gjT(^ARe#^y^SyyF4sT!Qs&1 zJj-vdoih#-7L)r}d|&dR5&92a;layJtW)O>2>D{%!B2Du;hFxofBMfFHxF^ z3EV!t`e_Eym#0th6laKj_nlvK5cc``e3cPLI~pf`=Qn>iG`V>z<3wW(!;=Lja)=?( zf-sq37tib_tNeH0{&{l80*@U9&alunjaRWV1DdwYLcY0n^^=j&o;~g0T4Tb?83fN4 zmgerg#*P+fU&(+koQ=c&(D``nd_E84x88YscnB}*t8g7T5Nx|nMpiXOFpQ&{i!5-4 z2LllN@y|0mJ9l^pR0IeORtv_`; zx{@Vx2KVQ+YlNLXBK;(&PJ5CcM=zbsgZAw7xkvQL7SzVueHKShn`W~X-Wa+xIiB*| zg36n3esAJ%zfc(#9*^xmlA-vY7D7+le>NM2>-uS)s>==(xq7a>`&yqV#&~N1!T2zR zEIts&xP;o7q4!M2Xyd~5786JRsZ0`>-pS#^VbBK;*o8-KGQ8*YpUkyLPTs0J^ckZV5?$)RH3R^Ryf-~4Rzdw=lv@~EvZYU}vqvoAhPFFZR2K?_x4GUKh+zw@1m7mp9^ z(7FDZgV*h}%1b&PH=z<1$T0niW5(h7p>Q_5 zZ|SNIq|^dJoGiM>>(A|)Nw29QbVze9y9Dq&SfgJ~U6a4{Cv+WK1%l(8a9ZuE9~ZtQ zIeEW+YB4~cJDA)#*>0ha-+y=V8wV_+ELq#QtWObI16l z-G2I)e==i&!zcIUsccuLetae`?1wMbFT-iZliCDtFr<19cWCn_*RRLxyx;T#8Xh_x zjE~217%jkfoZOo_>jy0^9S(Qi_6M7n2vel*SSXdof(W@chZT!0A0Lh05{`$$N#C?M zY&??3#ZhfM{|muFSJW0s?ch5f}Y*Q>{+{N4X!Fh2=?8PRQC{{L-i4+y z29n*z#sl9R+MH=u8JN$~dA>~6@d_7`Yk%!|`phe}e_^xY)l4 z;q%Jq^B3AF^}`?jRy&yAZ;sglax(I@%dan5R@d=(Z{d>z%ooXS3s*u@;dkY0*U#Ib zYRo*`!MWx(#{5Oy8WnTyF3dH;PA_-GQ0uipGszo396~^>_x-Iy_({<8qo3aS_)Qrj zO?PE%f6rTDqm+zbOUCtUcdh(D5yHFdo&sAb;X}Z9zu0q?<=#UntH~B;rGvq+jFsn7 zS4yNG>*cl}Pm~Z0Mwz*K7W0iX^V_R2-c9a)2Iv2Sta>gSCN22e^fUPfLsm)V0?>m# zeJ{%REy)(n#GUaQ{@Q6N$np%0dP;d@2-&{fKJdcx>JB`Zic_D2#g6v9?s>4#b!e*Y zFrkq7=USMpw(h6NM8as^mH=N2T#0?WYmzW^n!#g-t8#k(>lfq(vGs7zHlJTWg~og< zL)M+~Xq6cr@T`NY{|@KLq}&EXcr8QGR1|>Hp~<&^r~Kg{qn6SJ9=MAFQx-x+v|n#W zum`Vc8rZ_nN7^7w7I^dySp2ek*WJQ+UiD`U$nz`(c-1x>q=(n=8G!JXY6|r$wAQ`b zxzN7SP5XNvk;M>{d+Od(wY>aKH?VDQ(_rpIy}_oj=n?=%f5Fm^NO? z)nm~YP~h>=`j%EJP}m zaoVWzCYwLf!!LZ?jZu2|yWi>uSDtH%j55sHwK1R-syi{PwHty;|E1z(M5fwD82H4 zOeW6tEq>nfQrXI=^){fZ9#hP$A_a4STJ2D}>1Gn_`t_8xP1OBT)>kbT*oEireo**T z6$99tcyQH~oFVId>*V1-X&1gzrU#y(O$yd8KKg8P|8XHI!iV9SqINgs+woWaBR!()Vzl!u#m$j-vZ?w2v8*sd9*k7jJ=CCq?%tO=n*9eE`+ zM;tW9;M>Fxh774Xxw`q_ z)6Y_*Q=}@NLVfAtD=A^c)XU&v!Yj7)sSNzDUU(&g&b1~9>xA@crD14zqqyPM+Cpn` zdZdXR-NN?8G2(~wgE0W4tY^TpxXw5E_t9t;~RMh@mw`;I-9}&R$lz~_C6@|%7rEX8SZK$*RyygQ^nAi;pfF*GMF%G z-O0dkwyl(6KcXEP@~*-tyg|jy-WeMA6zqE`{|+Q-2SROUaex=5E`=9A%+vK@3m`X} zbc#JY6UOMrxQ5So66;Sw@bK(MXhv9uVfZk<|8Bb{@GnD-N!9%nc4^duw=h|C{@glz z311#;OtaYW^>L=g4m!>7#QR9>+}YY+KM+US#8R70_?g16;7^n4&u-kPFEMO1smx%> zqiqb!!P4i(=#GX>nqHXLw@p}U2$M^OGmXNdn%B|{VaAaz-ZZukGvxEY(z#4N#3mn) zt9XwNcs~PmB-xxiEhc4o$&#L%gz_pg#`1)JftopLWweZk@Dp!XhG}i&o`Kk;ZuqJD zy=|2iyV<0B;>I?Q)>ia0h0-f$YyVbgIw+BRaH!#tykSi)$s2}CW!NQQ7XpKy_GEa` zKeQd*Ho2{+WQ|F1#vS?TpcTgWvbU=7Zr5iqzIwywVcuW*y7sc5z`Na{PfznUrdRPA zx3ge8zsiGO#IHAzQ#Xryk6Yw`yYk^ZhTwv!RLjOy3temX&5W^;8NV{Tl5q?XLTN}l zgmG6JnD}`Q{wyr?%yV_uM%o7H)PH!r?2m$@d>^=jfj@6%gwaM{M$6gW-*3ANo;}Y1 zo5d9g7$kZo! zRnFG@kNdao!MYKdZ{t&X*_I^@KEEB1Dzk%7alMI7&9BMZ)9*ecl!9|kN)-#XYBe} zi>SA^^dsvm$dU8s&vpPO_`M^u842`{>&ZOr%B$N!onrl;PZsiaWUT*5i?-i+?NW=X zM`uCtY`15@wC=fkqYq zEtm|S$CFR;0>-C!Ct}V*cO8FZseUpCa+iLbLQ1h$xKP*yY{t^=={?iula+T0XCy5H z99|}OW~{0FdT${xZcGTSuL=?LQaogl`cUx6N#4a|#FLEM4j;Bys3?XVUXTvoecHH; zR>E{Rl=kd7GU-{NZ!XSz22jRHi{)e*xo%M^0lT?VSP#ZD62(Dw7E_;P+*@OP@~bxa zruJoYdR}NIy58aOa&5;=<>McYLey(*KjA0xe)ZsFyFSDbKAwXB9q4+x6k|V{C$QFH z<|13;x$zODBhVt*^ zF2moU@SU-#vE6*(AwDpVsJ`fLEagc|_geBLV`}){YccJM4_r23dWQ9Or-aC- zp00rA>SDjsffnb`S-7Lw!Si&bamWUJ!`HzBEvN-wC_JfT zBw`$W3Z52X@3tfKXp2FSaqz(NWC?>XXAg3X?mD_z-y%4IeDOga~driK5ZA!!(^y%Mbp0f4sAaC z@WbX07wU_JQ){tI+q{r`S@Xl_p0WE$XA0<3r%sZSP_U|u5R{U)@gFb0t&SC(a!bdn7p4)Z;+nphtOqdUSFTVQ5@P-{G z=rMVC_-9Nvx1=wBc;$o5<;&kGPWhz?qk_I?suSGIqwt*_OvZ?Z9F5X_Clp(M*8=At;_tdBbulVwp!y94%Q!f{ny{PbZqp2Tq+5c}9YH@UXY)>e zfJ~U&tMbYVDc8K|e!L+(i9_tix`*t%1ttkkUe>OL1j{G@s(|LPtO<)7AY zv+(n@|3kw`wDOFR3@rcWv+^-GqPYU*I{|C*w}AKpuit}MZJbbHr12R*C7HB?K(_;1 zZMNlXiH<_cxY->9)MZCUAQBFPx4Ks5;5Bsg6L0k*zL`r}D9%So-Og83hS&D@7;n+L zwo`@SnnA2ORK7N0+=3fWQ%7|l7!vl`;pwqdJsOe(kiZ?>Jf6D9B41$pJ$KO)a?40L zR2W=WasyDQ26?F${KfyC51`er-IjJ}7v$lE-Y+=SIlPCDJ(z<1sqAUHEo^-ljIBIa zGGJS#vMO-#&4LTmw6%9=Kd{TMPWYwYYl2wW$R1xRPWgkUEZhG1ZCSmC%_u;VPF*~d zv1@p5d*9EUv3X^xh@nCK2@19A$g`oL#6IQSf*Tr5T`Qwk{>*o6x+`ILVw-gqB}%d6 zj)yD5%Dc_Hg-7nFlXr5AEKx2x^c;Gw$-Cv-p@Fnss3+QlFWJ1x(mvZf(RW8XxURMh zG@9=Ca}i-+e49V`i+_+|Mh3Zi)>nYQTC}EwQufxf;jKxjtZpcR@YB~;uE}SiW_TH$ zrR$w!CW*?Jyt_)wi9Z|E zGD&*j&#%>Y(f3>aoBSraU`?(-&cgvPkHDX6;Wz2FY2ADKLfh$v$AdHY42<4{lL^Am ze(D-@ICj6_qYoEKKkTZ6aEa8DYFc817llPMj;-P z!aSHTFibHPP)0@}H@S)q!d85fQfi#q+rscHbca9X3QRHlj^=5@Yg8=rJ-Kln*uQvJ zolK%utn~1kDio8Cw@pS>#wv=gL)1pj_TC|L*Yl8nrR{zDRJTlw;poIGaun5?$7GKI zTrBjvDOo(+#NHE4Hm?dwd!Ru5Z!sD^7Iw+`^#v-=rLU> znlMaImMM&l!rkX>CO%(Y7!I3c{5d_L1$cO`a5a>69^Icrd-Y$kHacjd;P{PC83lQP zForW&!Al$PC>F2Kgh7nk8^tmeK7f%O>d5&M`hh)#eJe2xsB6VaI zeiVk+7!>fiHpaVTfal6WS2#Xs%d|GZJGNgl^q(#ygnEdx%iGZ8n6ZU^$MB5Uy`D0s zUCJxu5RX83>q89SWW?zVZ46w>;7!YL5UK;uc75K#yzB8Q-i4z+LMDiBidGEK>cDv5 zw}d8oCekkD5pBZ*(O3II?$DN2V5%NGmV^UBDj!;GQD5*3@2q2|QNb4}O_<59cm4?Hp_CU-P8 z!M#vSYJAIhbUImgz8KjvzE($uYh#z)6K}ouetdCs^I!ZY|MBKGzxU?mhaJvr0ds$C z{n%lG(MMVY)AuX@np`s!{=ZcHSJS08lHhs1WJEBu79=BrA}F%5GD~Mycg?Kr&Y1O# z?YEo{FjsrCH)A$tyS+W#JKfb)l{%S8Cdp6?Ef`ww|K~V|S$jg9^WXs-4u`|xa5x+f zAKfps)_dLO@ZH7|emfg} z=mGw#HjWuozRw8A@P7M#VX~5=bPsaXsR~5Ilf25$o;#Pp_x!wL;Bu;iVc`7v4;J4? zBL)WivAv7HhL-u8moHcFSwAzt|MGUyM##6$0t*Sn{Am6AK> z!i8)S*HtKu^XJabk$3%$=ZtkR%RjkrVdvuIE5mOmU252T#9;bi`QdJi7-3-RTjK;@ zlI;wj$2(nhGOuX!gHU6<(C_E%XtQSS$S@u=a@z z6Ul?>uz_e?D}y(VCacV&_c9_s&H%=!E0o!6{A7S5?|GC9-6C|FjUYyQ7jfPwp8Bo6 zjotf&&bXD)hnFgOnVHtnfjjHlP} zi@8pWaN#$|N;0Y+rS~s7lPRN9B!lt6?$$asA{gzfwR)YVq2r}LWTaJ&S8O0AM*IHS zYIDL#JM?GBkL=BhXqiD2zt(p)dGU$zuj=b%uOP@HeX!|qucP0sn^$-KuU-7V z4FA=cyd_tJdoTy%1+q-KOWYaM^cy}Wt9|f*ag8UXsM7|=_ni{_`kPA`v)5e5Xzblc zYkBKm`D_l&MS;~rc%g6LYG=mH47KI}e1d;n7Q8lXjjcjGU!h~R=@rh_UK5mQ&^Nphq0&3;bNYf7syf``-oXH zYgd=YUcS^R270tRraHYsABuMSq79uw|GMAi%B64G7`Ztc;dEDy2`;`SN9Z^r z%PFs$$Y$~h4D+1+5H{xO)o;uDXzoAIA9Ux#jTIr%;6N5i;~5W*SqOY>!M}YlEakX_ z?KqepGW52V%RiDrI?v`1nWS1h!nfpx)Jp6Bw_B9!To#oDWeb?1-KPOO6x)Dh(*>VW zKb);jNx0mna*Gb#pKL*zZ*LZp_)Mlbnf7JR{^@YID z3@!Zjci}gACx7YMr$72T2DCCatSh5XA8ntXh#rCZ4Ihxa+kS0N7U*+~F2_*4!ssAv zxI$eUnkqk2PB5oG2Z7*ks$Pb>@Ikvxo^mdHHgMFljOYY9*nNenIhROYQFo0a+rm_sC`fTdz%}gf9*mweK{;H#Pmr2h|9Ye?sAFKBV zWLKGKqX|;maFeVJ89n*6>s3NL!m-&s%xX!{JFJA@oxG94-d}aHZ-qX-z6~G7IRR=_CCbQw?>j8>TK5Rk#Utj?8ap zJH`OG$~JIO)UQ&))tjySoAi?BSslIiHGQhzfe7s($XLNkQ~#uS3BPa{zEH<{p7Icv z(bv%2?;fJZ%Gdv7ran-vKVy6-+5EWh2-jdw-&xR>XP$#IID;4LfhS)%)S>*;Ip122 zU&;l~c}13#rog8Er#E_rhriA6(`?Vfzu`IY@25?ESE4!4d^6osx^O*94i84tJwt;k znh;AcslTpgNdEeEv6x%Hyi+)Ymo0e5h?J24n&0Ie%+v17i62#>KP4_IX0%`>P4KaT|C;GK|*vePECe$=h%@>?ugC^!4hLU(@)(LK8y4P5n6 z#2)39&KS?2)nh}c#ZiVN-W@z5Z?vencH>TpREFvnWFMS6o0s5&ozFk{Wal6M3pqPRq7JZ$hod^;X<5!U={0j6#0Vf?hvxi=dG@l-gtERsR!W!{F`#&dgk zr#64^!TA)|2Rm2WR5+3W%pwr~@J7K0jMNr&JZUU|DDDUI$YS(3l=1k%&AS=!56lr- zp>LK4X>G5qj28w9r5l|4(E{!~N|onzI^iYak9P_&a4iE0V>@MA{S5LJ#bekDZ@k2S zaWZ4;#X<_u-Z&YLe8k?tKGZ_Z5qX|vJmcwmj6xPL=r8ob{`XE!p6fSn7eDv-$W4-f zo^&8QEIw`*4#8sZ_MO*VDEs-)nc;CZQ>sTF&5`^1_1(P39KmJ?(Z7!yLlf7(HrV{I zKp>xZfW7Fr>}=L(SA5`eq`0)pcqQD5O9Yqq55xS7-S8Gt;&k;`#4rFXgBk~_55Xg?!zq#ffOIbzeu0?nYeaDC@Z2Fv8d zi@cy1EZ}bAME?ru#6!@+%m*(rEHWB5;wx7V#ec@Ph34>n^)ayUdJ`HXi88eDPCt_~ zJd=3qTS!~D?#_#rpzMFULBm5!c9Kq|Cr|O0&>cKHdAl-Fta##NKcjmvs~4S&0ex3D z&ba4ECcKGL7vfz{xE1m(I-?mL4Xxp2{D@9sPm`ng8z75SbW|5v4eJ$d=hyZRnqfCrybT};c< z)p!N}?me3wwZq&LtQlAJ0sb*r-7B;a10Dl7xC~ThGJsCFi5BHTe&AsSJY^XO)FbY- z4JG4De;NN*GZfxQrX0(AoblDY6~;8Ze4F?8$=Z@E;mz<#&%3TBK7p47elL>zc@v;nk&{ zoHZ8jFu|BdncC5K|#&YyMl05(J*6qRbY#T1ej-DF+ z!KOUsk7vBQ68#@^fgC!0^NXL)=IQl3B3%G_;iC^aDd5ykaoXDerIFyf*KgJwR29j& zPlzbIrJ`Q_41GDUd2+3bsNpFT4Olc7F}%p$y`83b{rbqxJMjcsyr^Dy?<$lD zZ(pH_JcCERF;LnxU`V`rrFpuGI}c`jwlP2kkQM5|KYQ93o(`=1 zB0SCR+GhT!ITwOPAUK?*>I)IHMyhIj= zLr*_&Ux^Jv8`9<(xS#`r?ri!@o+E8;GzT^w>6!GcgBgsCH#({D@~XajTm0aDlY#TE zanC@jZhdp-_VpPLcpP2Noc*BjelCM;r}t{X%gXIfMhRC!ud&f#UZ7u~XHZM`e}z@r zKd>1;=_3}}a2Lxq zc5STbUv0GcdFXJLUDkeKo6N(;9sP#vGe*%KwaEZs8RzoS^l^F~njg#g!5n@^A4H2g z_ixO6DyBIHtNr`CQ{?37bo6_p%i6qPF@P z(#bjb{Hjw)|HJ?IKh=g#9@V$j9uL}}Hl_|nKRkfXjQcqe6OY<(veA1#&vqN22h$mz zg!|Eq{o21916KD4!CE*BImVFE4TO1%it{XIj1smGmNv${dDm~91zJFSS4>q{Kqx}BN>t2m<D%WdJ)FXSQI+xx~JG@lg(0_7GKTY4R zn#)^7VE9-0)hhM$SD$Pavy8sl0_0nixXGYC`TDOHD-Cuf3|uctmBCM+X_x=xnUv~X zzm=bC{!@Q2{v>lssE&DsANw`!Y<#Wm2>E2sfRxoQU?D8AO!IC^YMRb)%^X`%AU8t>t=!F-?_*lVn+A0EA(z5!6xwF5H48-1%7EH5{ZkPm)S zwp35+-=FBb@=NZW@r?h%XK+zobG983oO`~cIZeBlZH zLRSTghFd<`Zj?;raF|W&vMHe5Lk~3TWxto99^~*h7W?)pO-G@As6$r$q0fBa;Z?ix zDRR#;3^ND>wO#iEE!ggjF-*IDn@60TcpjJ(M9MYKSTSF1i0prRV|a{i6e}n>MM*W#*FFrAGY96A><8s{^B`=|a3@3aH&=>%o8e5HPsV{0c@w^~`&eN%?zRzdI|VIohiG#rB{E%U6tIWm zfo@@zQ8y!i1(}a<6v74YV6>3?}6#{AN6p z=lE(yC;Y)!!Ek>tuPH`CUNz_6KfQA-Z%msB;&WP@eV5lY@0^b_NbrcnB|Jtbg_Pjo zlTvwE=w(Jf9^_(F^Wc51Kf;lsYZ1i*Q2odA);zi~kWefg`{!ZBaK`}QC^jSW2Zf>G zvDS*9*ugE-TNHKJbrx0(mACT5!7{?{78eg5}+27)7jJ(m}X@=34^&@_yjKA#oyURHlkA=G6B{PfL`eD4OQp_R3 zv(3WKk$qmTJg|A2YL7((LkLg26D=mMM~hjwMGy7t&q!r)Xt5;h0e)e0^lW3#C}He# zAsz)>3}i+iyvu{sLZY7;d<&CMKR@asRt6*d4<4h2eqabtjzP{zi!t2x%s|BplEGPc z1PiId2k{FIt^JI?wO4|0Gz?(+zy)?=gy@yUip`#<_w#^{wrF&4KM%)>MSL6bff~0N z*VUe)CpC5HV)Q;2j;n*S1@Qts(zjyp^|SePNNrkc#d{TObKBz{q1f z@WK)w)rpLK$p#BsG$b#KVPinKG3rMrhJx8Z4Ow#lc}Q*y&)25Ij@=i-9zV6 zql~M(n9XAF)KrTt{W36vC%l8EGh~uyJowPjhKTSryJ{4}_IQ|8H{-gaz02qf;=)_q z+9xE8RPV{VlI0j4BO7dDp}{-xi#F?fA38l4m{s^B;hAR4)d!3&?_`93oc9y{8ldHy z_C#mAN}saXHS{Zei`CggW)_P_cY#(HIX8uhY1a7tzIqU-&^CHURLK*vNQU z$Pp(9>Za(mJf*?s&CX!6Ja_BA-jk#5XdsVx3X9urb85n^ym54T9j)gntFp4SxD_{k z%@0mkSh!lq&O&_XS;p{1pZ$5W8lN^@$&CBqc=y5Mov)LLpMBT`%8kE$ZOV{czxv{< z^!&U&AAg$%-@f8#pWNB?H$UCEc}IX&uD#nec?##%TcA|H$Uu9e=e9*%-Mj1NLR$rqZZ*fNO!^M>)+j-TWN}io){EM-T z6At%%>Gn4>r!a8aun?c03~HziOEDfgQNr+gHRHp3Z6@FdMqZb(ovw|){r#8CpLb`o z4=>szae9Vvl+l{;6b=l%M=~;FMQ~jjN_K6%8o#}CacIlPeY?%VqZxS_RCx?CT)!f# zgH@+aeGJy-BAXUJ``JH?as5}n`T5LSuaX~(Oyq}gf4>dzv&}8!osc!xZ{32*7ngUt3uEr7s4gd7=nTCa znJ0|Ib}q8{o{12(_3v+j?R?)d(;NUz6|nCG`%YPK^EcZw}e%^ zltDIQ-&`bJ8yFg$yfL4y`D^L=^hq1ZHo#s-n*;TcxZjV1NqjN%o6FHby?YvOHc;+8 zyj0!BfH{15Vl0nj^t9j|=yN|M3gmip(;ntPRET zhtK{laC~#%_|BE9X{6ml_u7uRXEVO`K8ZgL)z40NT4TBtfkxutc7Y*+r z_sPoVIX$FLS!>$>q^q5I|HOo+IGV8%4S3WF>m?N7!SHmihfN8w&s_o>VueS#HTo8# zxmeLBi^YHG;@3Op^2ql7yYDV1JI?Psf0YOBm%kgHg@d+F|3wY+Pxl& zml)r>*Cn9o#tgqW$>|%L62jJfpD`W3?&&^@{Ra-$XNLw4I=(p>zs$O(c69!tI`N&k zJN{^#V$bIJk$p|>v@P))+oleaDq-6&`T3)qod-) zz3B!%`q-wHa5Cl+ed_KEy3Wc$*hgGKV;OiFldBo{j}Q8O3r8GiJB9LleC78`VEpz#UPJ@Jb8la6{TW+#ZZH%#2P#A z0Iy!pQ)gv-*8l1Yp{YO>OQ=nTX-6=WoA;BjU=BgHP%1N>zcDrpPT*E{+O4iF4W!zA z>l2|EAbj9L>7OTUm_7hj-IW;>{#DzQt4`1qTE-=iJZ{sfbyG(v+a{J*AEL4P&@DJ+ zm^ucS_W@0jbHS3`QN1??+u#h!g#XZU)fkuo;COgBC;uWD9F>(XxCEZQQrXly*p^lv zqrcBu{CHoj)35pmRBeDQWVg>%Tt(oS)WCOX!G@BPjj?JB*b72sD<8;3Z?OFCzuKvd z%J#H!S9Z%!P|%OcPYqC*vX$_=f0NXE-~O9?@_2+t8R4k!z(N0&ag~{7m3d(>)AZ10 z8UECxEP&|hH+&VSy!ybLH2n(n%Cq{XM|lX{2LANVU|2r1AAI{g^n=$R(s!R;|L_d( zz?HW7YzZ**3iigB8%RiP@@xMjl(cBJHn#dL+x+jl{NXcc{WnHL3&}~-F1VFGaP_^M z{y}oVUC*nZNHZUtGjyLk(}=REpuas1xOwnf!cqdXb(4GGPZqoq|Nk&{z%Om}#j0-s z-og%oaipJ`c)`F01a^0KwT%3f|=E8|j8u)G&mA z?k0mW>I#8Dq5t8(dVG4MQsm&uKeSTYX1u6xy#r@K7%aR8CHc9nYK{HgPT62@8n%qp z%4)NER-e3;abk5b*z>nMec%kns;+N*Qu>x|%J)(zl~qvt`Z|%8R?$v{P@!gV+eZwx*hw< z@2)}O%vr?jtqohMGk8)mE$l2tc#$4TL7_XiY>M&W)Sf(~8Xs?`-0aHhPRNDvlxqPc zALGdLJfzY+gUjP0O(OkHTvy7eMJ1ysC5Tez*!p*Q!#mD?fPu~eb{4ri#Uj1g#cL_z z8K@4YyjcjJIB|3gXJ3EyW!}*lc=MDK3z||+S);TvBCNQlyYha!JMUfg`6z?OxwB_8 z7<8#-N+jdonbRjnL381x#n1ke9V1WSr0hEKD>R73&dXQjul*+r`SMjp8%pW9j!<6h za!Cds#s&)X!D4)#N_n(!v%nHBngMYZ*}=naJk2a3E#$_C)gqKp|8@PVUc7)dqfE9q zGce)r=q3XZLx@cYp#yLkx-2=F0V&u#4dG;gXrtf-uh0xMmpa19Q0++Mk&G`-uHTwI zaxolx(Rt@HuU%z}9?+G&?wrdN_>a+>N zON_BYb_-LB=~u6Xuy6!7FO)*mFe+T@GAjmL;ba)I7%6xpU-q{-=+3VODhG`89xoVX>Sk0h)ESHzbuC^k;@KH&aySie^VW@N zGyZyp-x|YUTG+mdeikW5uVY*WwyL}^)+uUWo^KU7caDaIUIcv z!Um7d(6>$x?7}MH-A&OhTqR=qZ`GmzS+Nj_}jlqemAd#*I)klv-rBOLh(6+9nWx=>T0l% z9tYa+yi-5e_;oMAjXbVxTIx$1Tx2;uH~%O@R{Z$0&v!0&IX0ZH+`QGi@!pI-T&LYO z7mr7;Pe1#t5L?2Z?eB8Oi#r!De!KHg7Z2+@MrCs}FG3fW-V=_h`3g@Q>lBB%;$;Q{ z#y{g&IH{A#CdTl8{^f6Xe*cHBJ6ZKnylGRs(?QAg*@#Uh-~~qZv&~5^4MYcHjFus6 zgZDmlRsGGA{r(s=jPu)t@^YevY^6W&IAxR|yN@9+{1%0>sdO86CRBGct=}o$h>#v zL{kcxEG&f$u>;LVXM(q@3%JkpTx{qzFv&Y7l6J=@V(>mGe1}t3W5h~FeA!qZgHdgv zCOv%QlG%*z!5bc}ZQA)P`hC}ZC%^vUi{$X_oxl9irxOOqm}UUtG0J<=CXEw32fBdv zZXqY=FB2Z2HaUSsk9oWHxR|zK7mXO=SNv$Yh=1e&;u-TuB2KgL4(c)1$ts(c=>8(v zx3{tPwDCet<2`c1$)qFkh|_YQ@%~0D^7T*`4&tLdd-J#~gah8+n?dgF^aZ-co8-lK z`bMX2PMCP;f1QEK*fj6zd;Idcwh5QFzkWTG@%~=(@$`A)m_h4N#y)zCGD}7^m-hVh z>BfV$u|U#C+L6*WzHAuq&R=xdv-6v;FYY`~CuaCEcbF@m*&r~E>eoXDZOB=By>4#2 zHSJ_5UKdjaroMpGI0`gYU!)t*MU00JKX|V@YVPHMy)O7R7mX)(#y;ykCz!~!r;k@W zX9nl1-(4l}c0T+3^ZKArTj_?3l121qF8}~Q07*naRJ@AKm9O8pT=n@FTH$rC_{~Q$ z4AL=qC^N2a!)gf?L#K3-ilfQ(E=0ce-M2HA=@;f&W>aLc88n+X zA;QB z;Wl3N2fk+XwaFuF(&OZuvU_@`A6y_QB!|uZpa0{Kl9%W(jtS{Uyqw7|4h1`1{{1{dxv}L`2iXr#xXbS@6Kh<$ zhB5!N%?bu*xL>*S$2sAuA6yiAwDQ-kelt9zU37UTGLE*1Bt(o@;Eyx-+eD%xNITsL zoHM6S&z$BC8{^A{%WS?!W1i$r6c|qv7Of3U276(=-gGiS^;fdM9MQ^vdM9GJ@X+ncEYQvq<{2j}QC{n&3A z#z4iW5`HG&!6|r4Xl>}RDLY?NIOquv)edO1@v`qRh@sP9)cg4czdQtx#;O|u`t)YL zr+mMbF$)4Ki}Yoz@+=*WzLe|T@QRS0_(9hZpXk|B5$TVLsM^iAas?AyEd@Or%}>Xr<^ywG^lr5ex5~{2{@PhA?qgV;@QQ6L;F~D!hBTU z;Dukn@-M!E)Wl{_KrbBeef14&C05%8jTQiSUfpAK^ltE1KwkKvLizezvj%4WSIw$x z0pH*vT|Ee+e0lW2T1c*>N!N$TnDu^A2K~_{7oi61;7u_N)L=C5R({}r;M0*i@`YfE z@?Zl62v^rCyXv1lox;QMUetiG6(%~-C? z&{PJw`U})5;oB2{lR?62yL{T~eeV`8%On4Abfvs2bSHJ98dDLV>l!So^~naTY0NnJRi(cYnR1yvUGbq@NPzO z-g%7HyhHR4Mf7r>*^Jc`GZ!B+0`dNHyj@Ikp0yUVE0jZCfEf~m(*WaW@%-p+@0~u9 zLG?)&Jr;kr&x4dX9!?fP4}*I>1IonS3vbFJMfJpqJgD$%`HyBSdDB9UjO7XQUdQ(j zxA3^1aZ5-KMtKXYqj^cbZi8kN?G_vs9m~sAd}0RW`{Px^lalfGRyWp8AGy?f7z8n!|l;(MDuH+FXol%+bol%dmdyd>%q_lV#Sss1F zUEkNDLqDQ3k9`XWgXwrU;uVTC!zDw6x-7cJYbs(}unPIWTlQfFI>r@!pe+_Dj9W+Y z20hs#;#a@_Lk9c9V}SlX+6H>y<5S8$kHwgA@Wb!gSHjSO@t5I%G49fp%Pqu<#c8p` zpwnW6$1a0Xg~lkrkj|6z>a|<(odrNLsP-N!52KGU;bKsWlQF*Z&Rom@L~l11PMj!4 zZ;N@JIShQyT5RA^hMO_u*6zdgExZ^HZ8{j|_u{?#HW3mgVy>h6nUkl6X3s3JqbHuj zGkDa+o3CGY)HqpFay-YtyS#51F!2|ns*~*T+s54Xl35;W43YYN3{c4tVrS$^r(p0B zW7NG2Ht3^Y9=s^tfAB0e^@BX*3OTAueQw1(Zw1Fum^iCIh8iR};j7wlzSo43f%qF5)81g|7>2u(dFS5kRF2aNf9MnwSWe8`y4yH^ z_HCP|yLUeQ_(F2|WHR)4GO3d&%?JDTHuogQ9(|uNDL9M(b6-OEf7rbRjDRQGAZ6tL z;*Z}zFv5d|Lec~+0Nhp?N4_8s%bnI9j@E?D4Zu*A7V?vg6B1hkt8_0Wo_5OSBH&&mG40FNl{cpRF zx{b^?L!0K##(9%jh%v4MoQjScrYxZC63{lnjO(s z#;ns(j@&bZ&Sq}>%`kz-ZSbLqxtBNeAHMl=fUZ>%}_aNJy1{zqbUM~f4cGj{&|XMeZz zZ6Q#^sV75E=jl)O?`>1p#yENZeR2c)EycT)ImB%w#Ge-RMI{nVDJOR zF|W9^vz9gf7^It%N<&j&HlD@TLK4uS_7pDfo3Fo&Ce0n`YquJUr_OY#dyWI_o@6_oN^<%Na>q8eJ9?ekz9kiVR*hb;O z<`tXYS2R^4ypNfw=wK1FL(fS5McXLnU z5|McSzT5b~vzIS@+lKe88N>SU>Xl1FS8|q|N+M6a;>?pn=;~X{XKN>$B6N4-Nq=m; z;P>v%TbLoTcjjyDH7*#oh3Uda;JbX(3Ibl_v#~K7swIraazX-aAAEnaHa^YM{ln4I z$YXK?k9_^*7wP#A+Hg6u^Jjnd)16C~FO5E;W6)Ti3O}M>%~7L^Ru8A2(fu2<=vT&( zQ_ovV(qnin|L&LnTs!FZZEDokLx*e*)sM-gG_U%&{^)|s<0pk(3cfz5Eylv!b5h-h z>tpl2jRtfTU;b5>?T({P{3KKtBOAjsQanx%)z?DLk&)|^mJn=9=NLMN?Sx(3uH{j& zvSjYd@YhD6(cY<@V9x2a_}3iHxk6YR7h4KpM)uDp*RH)g=RW+=&gF|=HfFT-XuNS_ z=llns5AWZ;QTUQu-$k<=gxZ+h+eUzUf5_@9moA2pagf2jaep-V=)Fx7;TUbOynb^t zyjrJK-{>^qurFOv%A)NO=yGQX*8*KW&`W{HnE+C;OR}4?AzDLUSp&+oQ(z> zaAY9Ifm<25$xM87`t;eIAOHDZwPC!x*xx<#ezEMoZ6iB5gI8+@;+*I`CzglF1$^nm z3|xHh>$l&0J^e--!eskTkNoiQd-FVB@@>=l-Cu$aVg~mz za_iJpW)!tb%!1u_b&YXgBdk`PUKnx%p$GYc3&i|@-+CT6m_y#)6Q4{ktM-y(HbgW? z77p9C0amUGsd_+{FV^1x?zN`$eO{WdwXP7-7| z@~vOMDAHphLjJ+mTb1?fU!w?SluN>9`}id(*B8_E*ZEKJ$K6R$}=Fe5w7| z8tE*|XA&J~UiDs?;BF_^p^(BGdaW8@+@JC)GJOq3*`}_3%+Kiq#O-PGZs0M__8GpO zU)qD~L0x4B_4&QQNj@;Pn-cPF=^eaPa!~KTGDbStWx)r1ZAj#WT2?l>{Dzk}@sn|w z0t{~|8<4dildn7jTa63G!0dayDRqmd3=2^2YOCtJo4i{*WZ%M`h5#@_-S_l|S!VjBZ@7c2UU2$1 zbQ!pntHP;%psarTAwkLE_l4rXf#4KaeW5K=S7}RDtHbYYoj_C)pKtIO+V>tx+fCAW zSK8|TCAVe3|HIGVRrPENDX6VmIRtJzLcEU58N2~6-PdGaIQcoqtwD)$Qikm7KhMTv zU-(tK0*p>u{H^&4;+0jXX7!RSbe;q>4tZqJvjOS zEE1B00IoY#@~_=_a8pp`NyxW zrT`W8qXi~o3&rD?zx#5$&nUcSQbri)cvJC$dW#}5Jl-)|3U%$BBe5weylgB`EG8|S zU4-dkq4CmS5X->&{e-5dP0^YGj`#P24Br;wV!PVhW#q+6XpO`SF%0SyB;K-)fAiS8 zeJ8J<7XH|T;X9I~NWPh)tM~K#6OP5g#c^N9#l>c}aA$ycTzw4U7Q^?xPdVOrrnxkY zLF?dwHHLW6i5dCMcr4}xd++-eeHruOS6)mOmlj>Tbs0BrW~d&oDss4f6rR9hlF@2+ z8wAIXWMs)r{@r3%GoI{Aapz@rpvBXJOIL!|LL`OTqDK63UUle%&J3&S*)C)(^yL$B zg<(g1E3u@N8i6LzIlJ9z0nyj_G%V;WCA0o@nq3$w3!Ly zw|HQH|GK{W_~VbKea0s*MIQH#Iv;NF{r-C^6bu6)!_SpswyQw@F%IJqn>6AUKj;!A zm$E&6*)i4qN2c#B0*t4_ZTJWqqAeB$#yh-@XY^)#c#wga;hVvCj`7E*40?=U_(RAb zi$})0`_b@p#zdiN&_aL9!2?TqMo9X+vm(HOB1 zf{#U4YthQ$V;gtEbBLKN25bD)GsCu{{tWpQnsGDnUwgJ-WE|hUD}#1~(zY_qSQ~j$ zA2FEm*k|l#^mDSH3TBKmwpi#ZXCsc0&%)gT*+z+a8ITyDc$JYY3~~4aPxDS@L}f^% zd!m;)Ok7;XW&FY5%j1rxw2ia<<>7HTM&bCIx4k;auzQ8KdHa|J8>78VgSbC>kC!tX zs@LWY&sa29-)+?h_oTp6Rg%^tEt zKaFvwem;{CklckYSuuvM*Sy8Vu+F%e5!2ZB9&Lt)!jm!gMR3TPyBUCPWeC>er%t>( zGMq=PIP~tH_+S3l|Hsas7vhSyJ5S#ujW3&3M~ht@+suZVwmaEjVaz~sqxj%>%jp{@ zd_Md1Dd2 znNu^2nNRY9C6mOOHWtY)a@xhy#({Co5cBP&%OjV`eEd2g!IIVH6XR>boHfRrYT?Z- zl!HyE3m<q%k9?%XLXLvzCGj1_byG1190hUZ&(z<&H8gL%Zyo`aR zG6#+03?BH_MjHMx*BBQ<&iww%Z|0d?IMc@a%{Bm@w`up$M<36H#KI!I@03<@if1hz zGAAExUQeuaGNh17uM7Kxe}Zi@$AyG0RW--kxO0bsc*&k+}WY*Vt;u`}WcdCf@ z@vavEYuu1S#{B+xL^v!PcJrb>^f3nA_uyU)C+D6f|9|nD-|hUjU;eTaRo~4ig#Y`$ z|DSd~s;?g0y*WnhiR;e5SAKedHn|wuDH5BC>N9_s#~AwX0KzgN8H-LIkO9Vw`Gx$V zM=m`mq~j}k03P$f>kk_%`on36_b+@_n2!wg_1lY#liEO@laqK+I-OiwZNwOh57PUd z7o(UV&U>5njHeT`yE?{bU0J6B_STl0IRISCV8v7PqvFW(rY2t)iVxcW$cvLv>Pb2p z{YY36_zSZ^e$57S@EFWJ?@j&?VF5@UCljIx9MQaj(*}I-AcL`3g9tk*(S7v ziy&{ub3*Mfl0!l}-fsNj*#k#-Yp;`5Xz{9z1>XMT!~LRGMhn6J-$@X^SG>sK#^ixVUa*@e-m&&Ud=fwa{r6K!=VBfV72 zX`55lDBc~)*sgs}n&>n0@?3ft9k=T>VLcue-f%WJGngJaR2VBfR9>@&3*qFJ1~xo7j1I+suFegHLxpif1{O7}sBZ@w?%9 zn>5RbV6}aIM+de5^PQ6{PJg&S+zAD=yLbD>&{+r~<96>{WZJt<7Vsj6yEc%4F5NbU z)_7beJ+;XRra3`HzD09#5g(sBdtr3r=kX5wuQ%4{KL<0uAIw-RWQ&!Td7nPH^z(GX z@FX|P*9`tVuy;4Vj1!0a(a9Rl9QwL2gJ`GiPCk$i+TuO|Qs8x)U=O=z!%(qFMxUqC zyp9G(j+|}t>fDUggT)|^Y6Hhu5URxn5nXXkj|JOFNe%>i(?iFTyZ49NSVUo)OpM~z zGW2V@8+j`P5&6HrdDghVceB}4+URE0g%|MEwX2t-H>@K@A9J@hzrCyQfXxRs6CM_W zeDu{eS+8FHI^D2)C#vhmKmE%&HD$f@rZMR<<&!5)jh=S2JU4D%pFTKKSREU*=DO*} zWX!&IYG?g}H}UXAYR~ z9nvje{jP0Qj82R@DjhoYUj2iUx~AC>lE@g>cmI_2JzmocZd0dseFonxoDH2+5jn=z z4%YP1g0cEZInQW5Ak?z?MUjaq2S9KKU)ig-MvXy7d*t8Hx?C%K1pFueGPbOy%Belz zhaa>TV*RdM$@9J+ej^;(`OJ5}J#Oi@%1aAwFvn;s<7Ap*^A2o0I6Q)X$~*K|M;~>r z`tV?%gy6r1A#r$p@DBLY<$IN$yaNLc0Qzs`o4lLnYUxqGcpnWT%A9K0;Gv(V{SkBU zDc5A(e658|P=9fcn$+WaXrtsEIz$k8y~>y2Y7ig|82^s2gW8X#`hrT z#M)AF=!buk%(j@E;M&Y#6Ib7xgmX5uSg3cET`eztaGM;x?A@x|ugwRn4PKQS+Hc-0 z{@>>78x7}C8L(ht%wfPj2~_<#u+e_n6yCB+^Zj&xe-PJW!4k~^HsI)g+45!~c*XF^c-xy&byLe}46^!096iu7; zMWr{;s)Lu`)KaP!WSzYI?t8{D_>&+*zj}>MTmCe@B`MOtnO{K8*ze5(r)?$9>v`F; zmM?m5C-6~7ri?~cX44PKExZ8mfe}2(o#522**HVDo@W9zVNM@S0onR^yT!KefnQsO zKH9rHN*3i;U0YePc?!n=KW_|eY9#g@QflxZyuS3w+PJK(=sPs8ZE!8GE}J}XD{ZwC zmc0NgzML$Rt%ZRWlst89@(gaqQ}AEcK{Fqg-|uN|zkNm?Y-lnxH@C?%RaI_ql5uJe zKyB^ijO}UH8Y7GD;3=qfbK(Z~si!orYO7B*%sj^+cJa!sor`yPK%@+{&_0wBcPm33 ze);g+dpp1R;>+>Au&^6^g1ftgu)ehTjpc`@(?pZI-GmoQNT@#Ob; z-f4wJHqRx>)LeF1^B6Sf0v30#UZ#`>)k5PuCn%f=Gb-a-$(Rd=&4h!zL-`<;X_`BH{5Rdi|FYP?yc&S{!k%vcZwm7p0vyp+W zJn9&7TC2CQ7slV4^9d{W)uEJK5ajArpvf8*zf}6fn=j%c@Kwne9tqN zk&j2+#k}a%$+*o+=3vMD!L*U{%{Nyw{Ir-0hFHJ6{e?rYz-6?Xx`W4X`gpg~1{wFq z2*gv}0>6CG|Hf^`tqevjO4N&v3>c5|c6zVlvo8H(?74aK*6;{Jh;RzVEy>PcrTwO8 zm*`DcCGsL#GyXCh8lx5yj^I9Ok!v9}2G&G}tYRcHPQj_QfBeNu(6Qn*&dG570*-W& z_;7IbiL~uv%f^s=_u~79(PnQOMhr?kmqzCcuKt+?Px!-eU`Ef9YfZ-A`skfDQOE@y z!W(gaJY%CL(v0y&zZnxeekH!n==`M6JZOG=cSelhns4x&wirvirP0o~K;xP^{jYw; z2>faR=X9XHGWN8eT(U87rv<2!E;Vo3{j$wC?=89+6?xth^FlE6cha|d!y0Gmw5VK* zY~FH?nomDOUvc}jOS>5{$l{?BMCnj^y9yXDUv&y_E>aCq^j`gC!{BxCw-01=eO_$g zJRfOI(O|qx!_7CGjWuQTAK7&D&chFnRmoM;CVf_y73+O)j0!c=KNUEViup z@8aA##@`X|WU{fm?;RU2g$Kw`D{RK;(N^%%)KB>QK zRFF$9ZTyQr|MTIauPahStBvAbOo_ zlMNk&u%bVlKmY#FPskK>eb~H9w(i^4T;Cio?(*3@NPqj~S95~QxO1AyfHG$tIc$?= z-ML^)3+H0KHYS|pI)AQAJrEx+n~VT`iB7=?=aXhCLPMy*};x!KN+!aU!kq^emIaLWa-=K2Cq6<<%HQ7 zX2UbI`d4~m-fU(3jt*8TeGLETg9jOkUZ#t^Nv1t+zB+oWdn>|YowjQXHonHt9UT~< zc<`6B3`WtGu7yEovn`t79Y#O>iFU>?V;rOKsV?gL{TII;`FSXQqI=rlF{bA}k!VG3 z@cbr!cpuxy-x}x9{6(^HSN4~gYt7ZcM;D>sbjF%b%2#_HJV>rLUW7IJbv*jrjWz_r z1s>>fvx_M!lJ{-8Uz;C7l-@3!Ns?}8bRYw1%429QIkwtKCZ@wyel{Z*v4@&(?VNw_ ze7tmb=E++(3a3$97H?)a3|GQ#`XIqGcZtM{BUi$SK0EyI_>U8|gE0{AJ5~9paB|1< zOn%(lbK>Ob*?1D0o^gL?Uvp@D?1ZKeM&IAhVIaEWtJ{V2(9aBkzy9Ta!@dpqrLt#BOX!o%_C z-Zl$_qM@ee8D}%>yYDWi!>+qT(2WlDGQqZ| zF@CUd^6MDh`7~1 z4Q>jWvQS;h(JBcwO4Y!Ww_NjoFZ`KYHF(POG)Lw)`QA#n&S$OlYp}tik(;ssp8CL7 zkFf@B|G-_fl>;3Z5(e3lA+(She10&NC^VxJKm&9GVv{!QFUdGYrTIaMEhv3eS9u4_ zq``AhaxOrPtlBAU{kP~nMwJRp8=}RADkW_5PC=zso^aoyHi@VZP-zS4z6UQLsL+ej zJYyHdf@d&QU@(~WZR-VOAoaa~E>aEFk&QB6so^d4grf}d#uE@3!=3LTVDA^0`JQ~> zYa3emxv2*f82XGCs`vfgl2~5&nlR=AaG${soJrQMl!{HAn@U%cwus1A27+rK{E!*2 zEu4O8TS)_VGWJqsjC#X{tA9sWMvsX#UH|C=RV=;WkOv2z{qzeBRirlU^ui8p>Ee0yG_QuTwRiF)SY&5h z+{Wu(V}Ek5c5P`n-_?;|+|)Sj89XPu|Ef>EsSRLs@TYfEPycOreLKFTFHhQj$O|`e ztf#f44FM~Q=WTf4 z!u}49wPha4H@Hb36c!Cl3X^4n;@~wj1A3?w%qiKodTW!saGZjGZ3++F)-&3zJ`4t> z*Te+BdA(rZZ%BfUQj8IejOrz47otJ#L)LF{4gaQOPL*L$-C92g&#g#HH7y@ z4#)}Kc5JJA=sCPr{ag6*fjW3;8{UPRnl?1s_QR^K_N*2Sp87`TgF_DcUH&Pl4t<|N z@pqTLTk*(KPWSeY@#=EQ)q8oDPK>vTsn;S|*aLO@gY0nI9o&i0m$6Dd%JRIQ-syO9 z28-Q=GFaZEj-+-RGG&Ef>7Cll3zOl40>dk4yesPmQ$>#{zA3jE0uSbS?N}_24BiYD z`4+}Jx_Fa+(4v`A&$Hw?V?>KI-s57Fo;`Cig*Su5yDn%eR$q&J#%*5h2l7M|Pn2;- z4CHq^j`+8K_1EKl#ygvmdHv??i51IObs!Hyo?-X*wy}_+$U{(D7#!inush13zG@s< zC{TPE*lJN_GUVlUc}`#Od$&babP+O!5y8cE42JrWrxm3WUgG&$q+I## z`gjuZShg_bX}|Bx-a;K@cnQw2+Rqpb{_Fb2oNi%fLxCab_g{TAbalzkixlXa8Hn&@ zTQJj)#)r;jxMSen*J9%Niwt7btv}{ddo8AY7!?_2CvIkVTdY_l-L20*Dc&$T{o$)G zWDG6*YmvO)*w*F`AWR&o&joK=# z#f1!K7cb|9R(btufu-LkHaw5Z`V7x9@QWjhE;c>pGNcw8!%y)WFFjuSq~-n=a>gA) z8eGI$Rj<0MAajA@t6u_dqKkz@q#L@#+AP8gVQAuI5aIIcFAczYf@T1?AuWn^4N z+T{TavT-2)#3tok7rZeJ8)?b5(ci*Dx!G(B7jTSi##jctr@suiq5H0^IPe*XB1oY)*|#jDHxJg4)*ki-+htGu_p%)3yy9Y17~G)K>H0+jO_m(boLM5E!_j{TSg&(*Zo! zTuRK4&%3v7N@nEk+2UQ_h0@?9-;)d`U;N?gkq}Q_Z{DThz&QM) z3m-N98QI?M9)jgb{^doRm@X3*aw`Ll_WD!XocejwrWO7*L|!*`gm)1eTKJD|^KvFn z$Qz!kV~DP;?u5`6?{=ERn7MTM+ri<(k3JcCeDlrM)5k}X$Bg0MeEZE9F?qBfBq#SA zER;(Iq6|fclf~lR1|T`ahW9L$CS7 zJr-nwO}kU4->u);Na?w%XATmUg`7If08%@6iXYhDg{yh}yRcZW;}!bmt>_Tgr+KkHekeZiit}s!SsSq#2%D=X3`XPW@|Ej5|K*pzsm}^+ zf#0h8&;IO3JD=tG+K5jcWN6A!=9{bEGzJ+i^U%(SV-9(*K4p~TbYz2a&pT~;^^JeY zq>%~qrF17oXoj5K^&g|Gu}dBh;Ik=AK12g=!wEXJbC`7`%8=!2HiJ>A9ZSK!aF3hw9VKx2l^>26B^c(p`$VSx(np>1s?pe z@J@D-Pn|qjh>E+5Z=!?KV(vcCHx;iRyQn(;!(-;Ue)OHay9)~$Bvp1l-Ru4HA59xy zM`v~Mdgjgj`1a$%7@iy5Q3ws25jM!4r@#K<=l^H0#Asi0=#r86S7?~EY0kNXlrcZ^ zZZ{T(R`J)q<{jhbX{XTW+sBW0A|ykhd5u0a`dI1exP7ZT0Q!9Ob&T$1oN+VC3k9ZJ=5+e>n+(bf&_XylO-f#yQyInSBtPwP$7|nR*}0lr z6&~eT_{{v**w|g4aSR$dHpYz=^OVy@#*34Sd*V6cOZrcL`q!PBXp=1(GY;c_I6j~k zhSQNFov>{)g0qV`YMcl9_Tk0euy13ujh6Vtvoii}btC-xES}rDzfH&Js*T3Z(|F*~ z1M+L#J7lwxF`d45v{>#g#S{u?&Ll{J*j{qg>GYN|~Gp>mEVt9RadA4*g2^Ty1B zHlfh~UVAF{ap6qR$au%xPGag~@|Vo8hSMsZ+i;+hgUtzrd^?=yx;W?i(;+52Ve2Cs zvDQA1(lf{u_hcA{k1~v3`1D7~jXdRFfZxg7yRBC;ZvX7R{~wx%{vmxdIz+pnYvY!4 z;PDLN=E$s*xdK+jjZDGsvV3 zpjB)`n|av2FW0m!K=KnlQ>Ai4F}eG_?F-<3fV1E)npk94ue?Ko{sXJgJMSBqW($LV z8s9(}!_Wu=)KQ1368g%61P9qAm>dfCfwz>Gb^Rxuu>^#0ScDDpo+eXoHt?oCw3)JA zR;C9jlQz$_Y5P%kc+M;rz4g(++QOYaStYkc09q;izoq@~+{&@=3nys_QIqo&*wSU_ zzG+|h%zN~nf8o;K!J}tc<+su;7%R2>xcBRkM>&K0|&hZ)9wGg2B$A*!DWyuZ8l%fsrL9i zlLsWHu5yOs^v{&p;5alazenlwU6FnSu|IY{HMEcVv<>PdP5afWj4V@DpKr!hNfHOn z;JhL7299*^7q3(pY-!5VwD30?l{R&3-p_NXlXcpMmw+nYB1-wmuhoj)^ih}h3nQ>+ zyaO|3dKZ0cexT>{FTU_k<&ER5VLFL6$R+;EgWpsG*5b3_ z8TqskZ3mwE`#EFRkJ6{ovH>`M0a|tAH+5*ql%iQX$W4ZtH)blx?SuXG>>Hdr!j z+F5@N{NADef;cs-bU33y`PH@R){hG>a%1G1_dnoJ(Q;a!e0jay|CHLj8TC%)G54^= zuto6)@Bes?*i+<$nb`ZLjnd5VLUm=G#l7 z%>LUhT)R}vaNgY(>(5fM=0sSK#%LYh^qq6>pW3;8`|GHLUozl!%w6aNF+wSc6uWQo z#yeXGlArwe^PT_p^IxUJh4ro#O5sqRKiYeJV^kN55FC_Np2AL1KX54Jw}t47PR6Y< zunb@NW){-5jlo*n=!Z`n zcEmAy#{I(LOn8NGKbo<{QTL;Hm)5Dp6+fMa6tDaYvl;BZyDDaAeHw3Atj{H3Ehv|f z)4afYlrhni+d{ZL-Ip>hYy%Hlgw}_t?mPQa#L<(XgCT|j@2KgmHZd^UvBCkp30E5) zcWxImG}?{w-RS`A#*_TO-a>1%c!0lSvhQbnVmN(bQze)@?-?$@Z~+$F?zG6Y@Oe}I%a{7&yF>opXXKN z63e+1@Pv!lEF!`+`tmdr?^TRW^tVXHAKqlfXNzVhyAyeZ7Y!Gpy!tzJBZ2gxMkh_|Xu42v*2h&pUQkz@GMxVI)4 zV^)1lu2`U3IGM&4?=FMItR2cb8%xn=j_;T0+Kn5H(H6wv^d@7JjUfws#_~sveKcj@ z5N{PvTj0;eU1P`Cf)lSN7fv!H=@7bw(+$QXk5hC#(qfUZPhR63e9z;R9o{1^qTT)s zJzGPulLb6K$y*z}e9-@Bi+#tjZB7_hOZO8yHxJMk%cGD%fe~kaGD*C6-rD=#c~boFykw&t zo@UH6Rz@ESVFuO%<;Oq9vUvu5URRI)8^dMz*^IIt`N_|IJa<7zw;6XYzQv0?%(R`M z8}H%;{}^Iy2E2HYoay6VBd*! z-;9yzQT<-8?ELE2zu7sKC$~^%jM8U1rDGH9Rq})(+r|aHGDZ((;I;{SzYRNcFONaS zWgda!g&n^(KfMfy2!gBa5^v6GkNZ! zDdTWMZs%ov^fK8Wu)%||_<6F^9A)&qN+$f~i$CoAqL4e^w9!a&5%2xhty|4C$D?(g zoQ+ju=Q)FC27AVH@@pB|&BrUQH$$R+cOjt8rC~nkod@IWb^q|YJm3j$D#koI7$X&# zbO%oPJc5OhXH zIr7v-@Zdp2Rc8z}^xR3uB&+C2WC+6{1Kr+e@|$1$x5C1h+vy9u>zj*;&5XyMJuyEy zxg?%=8xZM}+JE9yq0XvL=nLVU=#V_0wG;7l5zPtKuyd15_NsWyjbx-fQC+CE4f=h?wBvx|9?1%9l$BO2{Vr-v0c6v5Rw9lA0 zcKlSh*+g*~PB@_|s-S*%i7LE5{^+y8*QJu?7Dn>Nk28oj4&DxL8$0Gw`p(|{={x4! zAo-A+bZ(mk=8rL=SH|VZ#@+3Bg`V|xdX%*hxnchQ*?;$co6R$x_U;xTQ|Ke+f!Q1z zK3H}5-^TdJ{P-;Gp!(Beo5%KLoE1Vt7@cG3p!mjId@#p~r;W=8weeWSU3~YjlZr<> z2}uUMt}SS3oSBp8rSA1H-#mJ}V#SjWzy8fH>T6CH(OtRn^0sFX=ABOufVX+vcr(ZA zvN=6byXD7k^h)$2%h7Ul6q051yJcT{_O#1s`zZ6OO=N5NhWp^OJHd7A=&8KE&&1n2 z(b*@=`!wmZk!L2U5w-O;md6jjFCSAR(#rrSNEjnI!W6eT^-X4*r|j9HjnfFV(&4o5r8S0 z>z&+$N5x8*g>K9^1_tSyvYtnf)qntN!Mw9zds~mj5Xw$SkKhdSElhByGA|;7GF!+V z6zr9WV?J89trxE$nsQPDG=^SWHU_QoLSNIpn@s(#?K2rEUz&fV$W%F*O;9V{pa|%C zU%MfSREE z;Me=1LC@2+)yw#D=(3^Xlmid>QcAq<*PxXI-{7{s2IqOZq1UD^HEhSo@YaTQ zP>?ZPXG~B2-fzmtxcGQg{ub-Y7NcmTwgEQf)>xJ_vcOw)nB1!f-4=ND_pXO6t^U;4 z-l1o(w>*o^s`ag1Lz_5#8rTbbBd>auE9(y*{R!TD79D5o_pFJ7KRT*llNWtW>Z8c2 zg8{sny`GP@%#Q_^m(E)$uYdZ~==s1A0pwnj`-T9?`bZuEQ-2-ddoVBiY8HG_PLjW4 zl!cGBBMtCnC2Dxzi(_-JrjQ$)fqRA;ea@ z*Z%?B09dWqK1tvHo?ksoKf`}9MCDc`BYW#R`G>Dl^k(xSn&Fm_7g{(p4BBR_sd&;R zT|d6gX2-%QKfCje1Mhi@?x)42)SLhCv%g9iKbhk5qJ^GN2^j@bhSkAH@gknR zb+3!!GH5(!Jg>atp+qgket$;IpZw_4ovSU5u4jP0dcBK{`g5)}SV)M^KJ##N6I;^; zg2js?zr572k9-zc3=0+j3{CnG&c=@gt@r5leG6E5E@dG3=U@H41#^bVj6V$7;+`|) z?mv)cjD=K$J9hNo&bbrsR|l_Z#tX(73b*)swIgNJv0t%kd3v8dc?xe)ry2kNKmbWZ zK~zSg$m>rFFN=GNNypXS<5?1RVsJA=pg+Uh^&7Y9o8mR*&3C1kqwqPH9Oh*McbBLh z?~=CbHwwQKZFLUsKn96ZpB`(0_-N;szy9^kCl@|x5q)b68tR#m8SN&B*xfc7Fbg-^PE%fkuPs!=DuV;fuZ>$6GvI7>O)^y%&f4`pcUwc8j+eub{c` z1b4(Zymmc!#ShK+I`N_7@z97r^DYX8k-|9QWDmaORc-NfByUmPjo0$1w;XfIfPv`L zNtdv?$Sx~OeQzk?$IiM!G-!ZaHlPFFh&<+AsF+ga*Ye} zOP`GnQ2kR+azOiT$Imx!-5eaXfuWNjO?=of7L?ZCG345Sc-o>=K86vSHs)Fe5OJ!- zfqg-1DBc#$6mtHpfslYw?lj=Ym0xVG-faI>oo zVRGeuGW+!Dqsh*~q$E?&m!Xf`IFvkL=yl}$M#e5ORvd6Ahipa)iG^=?-0{HGCxILu zzn3wA!4-2d;vPu0FebVuz{weM_xPz}UXQ_oCnp2DF(sr3BZv@5Co&+vSNm=xZ`>!r zpyQu%#$ZoFf19S_qBFQ%C_eY=jEvXv@SM%4`U@Y3Ic?+RK=O*5=biVgc-0g4w=r<5 zun9K4@a)$aJILox+i-l-SRez$Gk0eKK4lztS|Ic z@BHIG{o}|abba*r`Ob|t6n^~KXUW)3jMy~FqgSl>n;C9|3nJqg2MHQ;^Xv2+N0jmSR2=5Lwz@EV6vk>gL8eZJUs|s8w=yvnO<`A%)9a9v7(OTY2TbA zztb;cpsjyxB+uq-?GYY=-bgl>XBigB@iETVH^-Wj?-z#Sc=F+!F3q!M*X!e$ei_2ePS+y)AsX>OzUA8>e09I;b87o-}mXa zHvh~6`q)^ax03seyH1W=y!2fVM(EE8kVsY?LX$adkU=$mpG~{QJefdd>06f&G78o` zAs~F3jRAUXx*4NxM*~2y+q#sMZecOgp6aCI-pZ`^IxzKu@2zZc3@v(Heqestl zfKXF9;Xn zqzb%dL%Z@%GLWy8$WHU%>5=6^_ZdUGyCjnQ*3Mmpi80@!ADv?LcOL$Eq2KHj-Hrii~N81F=G3W5W83dB3YLgKP6>@x3|J{Easb zhco@);CqUHCK}PS4?RlU%*u7zEqqB11OWHhANY)_+bdju&+Lu5cVSQLMr6 zIr;8FT?Scwd9+X@=<}vI>HGSF?BhXhtb=ju_Klf`trP0N8MEZ?_s_cHC%8-nSHHVB zCwTV0?IQFwhwTcF1IaOZ4tdWD+@+lKP#f{)J!AdsxleMkxDXCI-7R!=PX3H6sFy-& zbVL1Xo_-uZ-Ru%f;dX2aejgpoDNY**n_{En*vU>OH73P{Kl1d%$Qhdi&#KcJ18>q% zUN`PUj-yZAQIm zE+a2bwApsH@L}hhkIZY2VF*JRVbH=OJ&+8BRJqI3R zFXxxSv)Qn{pUgOv9#4niIP|i4bH@g0r|7I9mWEh&sfy6iAQi{T{oANhgwW=>{UFKn zZku@>zc0@=z1&Nf2T0%a-`lwVUwtg+E#_AZ7^ZwxzKm}3@Iy88Ze=KwA-SXprr#O@ zRxRrt##|M{N#1HHZ5cnNl^F-%D7^_mz5VgM;Ha$U!5JM&*SNvApOepH?Hc$7r!xNA z)IGm9DKtr+$)o&0FQHnc#Y`|)bRKITC|kUsG5C5Pqr(+50p!no(4}`_GARt*YiR~@ zZScN7=r#3EdCGCP_1ojTXyBsRgoc>9N``)9oO(lMvaVV~UA@vE2`6CGv-)%aF0)q~ zGAK%c-qLaMC)5V7$`LT?K%AjZaFx|k#-Jov4+i>byMAJv-&DWKs$QQ>9|G2AXwWy{ z{;AN{YJ5LRE^Si(RocotuQz4FYlE-yaY3L^emy~CLD3HQ{{N`D6R$gua)JBZ zcj>OKuCA`GuCA^|V-u2o{2@fkKQs!bsZ)>TK}lD`!wLDRl)894j8MKE`07_$+S1T+ z>bkI4#iHLf%{=bU_Vrx!YA{XUO##CTB}j;cR7ti4d#wt+Uj{hE!gPxM~%17Z8&42Yj>} z{_1*Yyb6_}_u3AvcIe=GrCZN;+7{M*n)vy6y4tRLc=cK7Jzr%{x!S+BT+Mefrd_}eck~&)T_yL-edW_Mf(2YT!x+vkd#7Cnb;y*)wX!CGn`i>R?pM*P{4s

    OVT;nMKnr)>C&=%>1rD+h;xBR;CAQ6dQW@Gxe#ye&d1Z|5I1*B;S_PTZ;R% zOTbmrVLkqlNhk}iS2PXwP&UP5u-FoOo^XTjlu@t|*`=?*dM1>MaNkRL=8O-!ec<`DOqxHl#{Du$@j~-ICD;cztG>eAjWBniVb&mG1a zMobDXk0`KT%gS}XL+1YL-~Vd7IZd>AcsO*-c>aA#;e(X8#V0-4n-M8vI`3VEyg6ho zV+dM)^4s4v*(`o{@J$%b<^66F!9ZXVNjYR7<&m`e!w>Q(JhXW$1I39#57-Wi4Zghi z#pceft20@AlHy3wok?wcVA7yG^%0kw=O_c?&Ack{B;ysH<-u+-fCm|)rMSUj;+{JB zW`^o3lV&`S^5lN`A_H0zH}%77p7R2_QF>#|!NM(QPrnSlCO3v4lV7}IYcYd>zChuQ zaR*PZ9JKf-1^rG+^OX!!jMlcKGi);wyrf5E#4+jS6>8G;{CNj(1#gA?$tx}%-k+jw zf^>cN^|p8~&z__4kGhWcPwdR2NZ1K|4=xr<7zi%BPto5yhVxlih+fk2eEX&txxx*o z!;W#3f$mU;8J^2%$KW%@v1dG!^4{s+&h}UF*7XIYIrLOt;Xyk5+N48TBH-KWD@MKe z|2)}vw%v($gdu1tp!#Km%S+lq#K5i}nAn~^<~F^^z0*_+{k zcb-0@olLyZi?MFnn^$J(pEY?jfnK9RhSKU_GHya?QD=0w{%S{tr>DVTEEC=Vp1j&k z;%3qoY=$#l_~JF=*%KLt$UB}&Jc0FJ#sT$%C;Bn8GfwLh;%Jj4czBFH%51S=PkHns z3q&tp3VD;kcl41pAs_Kix@XiUvl*(4VHFt-7`Il)3AB9DfrYQ9L-(qbefy&w$T**o zv(d*(c+6PZb)(_DdtAGdM-P%=+7mw?&Y)(2Mt?+4w8C4yc%7V8HhHOByLxye|EnLI zOP1$_nQ?<*jSQ3)Pcn{+y=zepzvC0~LU}3%FZER3?R%YDkf*R+3Gbb?*z|C7@$%Kp z=N&A`IIG{o`FP%c+JiA+U*Rkm8XfLP_8s2ac@ZtV8qcohb$$0PI-<>Ja+}YK8UEq< z_Y7D=H#=eu#|QeUkR12&Fh6$W;O564p3eyN=Ex9+*ryqho;F@tfV5!zVP2na9B}BH zog!DObKarRMH7&J7MxD?ZWe-*BNqRD_p4t|AH@Uuqc$2kpm#K2XkobdHlqf^Nd(GB zutI1sP8ypmATvY=eKm%VYVq#bbCbvJmkaN{+cx-i6;)q{GM{K6lhK=JxPJ5?xqhQD zgg0mprr$Xj^cTPURdW5o=KO^ZmVB&S#x!}f`J8`{k?(j$q~|Yr%pWU!%eAS)*)!+H z6Y=ZIw=$d_-|Vh!ESTK5k=J+as=jYU%X=+W;#kHfeEOoX-;Nc0i|4c{UaMl!{`AOj z2SNKBZ~SmTAp`537P)y};vKu$)O8LejCNpYBjI)&>Sm{m@%wZO=T~ZvOYLm3ko!*K z=Hom|-$bdTOA-#%7lc=P9I5nb&*aKNsG9iH$LW~H%+Lj(_AG=2B%`Kgl- zXAUK_BTYJ<52A7c6d(JM=Z)zC5<|ZS|dTF1ifd_y{jPFD)9AYZg~_ z4BEo^y>Yk$L~GRvQCGQO|F^&X&&l<-qD|ra2KPb$RaXn*#@s#WXyySoZ(NDb+j&-a zsy8}JRD0X)X2Hb0onIDT4!xdpPIa^3dg|m`6Q{rCYgbEpaq_@o2;D$^82I68tfs$V zTJ?Y2ZkGpn=FxKn!i8T#4vsy>&$_&ge&oE-<`$%ACnTN&=r zN9aoZ8gJQgVJ=`{3H`_$26_D*{&yeToCTcS$yEyw_}yX-4|sDNeP~CgIZ^v)qcepf zdhde^)wK{&%{z@}-V156ui z4GT4<*jY)9m+gK9lztWWabl50^k6 zP|BOPr3iklzRHhS160O7BM|4R)RjH01Iw6-^1NP^SgUPITX2ITJ(>+nDb=n9>Rx^4 zN%;uhArQ~PzIPoD6m$s=&oJy(G+-QwA! z-mHA^n0!1+;8vO~jAf9Et_c;!Kjn_03VlO*aFL_@QwK%$YRX(_ELaUFE*7n(*kHLM z{=91ts7xlEAs=A1k+!Nv17rALi|afCfAX&UtKy{(7w@NjXqq6Jg@gqdp_jo<8R|W7 zT!+ajYUnXImG>P}=C^sdNliNDJ;5^U=xM^2a1yt+wAa zM$IiO_zm0zb?{RUultTMUz@NhK}Z5Py=sHry{e-;z3>T!HhsmLV#6z6yY{<`f0MFD z`pSYohWy}{Ae@(iH1*!*lgfwiUhUBh8mL-*{|{+tGvNuY`uFRf-zghqdOUm%)&O)s zi@)~sDh-^rbl-XFo?=X$-K zI!{_}Bntu{lczsB0O#HI@#GJdX8~L=7TBTFR8&DL z{TS$nmXmzRgCXro{fe}C>f$>^E)Qn+6waQ6JC8`^nJf&wdkezw&CuR+ZPC*P-|1tJ z)Yg+1;AB^M+z;FyEqYXzvK(ofQ%H{-310Xxj`7+!l%b#Z8V@)gfQOG{5b65c4##|!a>sj1!-}Km0Hk04`gfza zIY^A*hu5Ci#$RVBxt9@AeV;$!d9?K+OmQ|T7WXx-;vPIz*-}u{NU8;+54NXm8 zDAttY*P6&^$Ky>z!C+ir{CQlR_vf*8scnS3D15%XdS%kQf9^~ZR)^EIg}=0DFATyD z-aFsK^7T24^-$jUyYk56iS^`B%5&ujZ=&wnQQ4B>kioY0$CQHiqF$vNx;^&UnT!q*zfTzJni~CKg84Ayyd3z3x zyIXh$hB%WlAze%?pJV`A#+5wGGSr+bJj}$3)y7Rg@Ug9-2kH|K9yF=0OvW-3Od zGN?VQ%~vd3{olb#CO*9DpVVjdOEgHxRHfIl4MJqdWd$z`2~!!*lWr?r5yf8d}C!5`Mg$)MG6YB~^p# zoyHi^8!bp+F^DbNJbT8Fhtt$JSy7#}8C`kFOpHdNFyW*Jf}su_R9{esvp7EmGk%Ar;!a(0j%g-q`4?ZSc#zwyE>Z z0vH|0B4vO7{j(01T-)03Tr4y}I4Vn9;U|0`zfe){ojbevtH1sE)Dx}YI&=?*2N}m5 zBE~@e$3OXa^Iox}zr1v1^VwII@-loUV@=^PqFdm{tN&K=m1i~s<GEiKi}OwD2u| z`-@-XJ$7qz`gk$MGitw?r?s$A$GgA#z;^t#5OE?|Mi$*|VauF?9J!Wv{h_=9-|5T> zbh_nW-DoCu@{{Upk>hZoBc3-_E!x?NZf8r2FD){VkH)h-h2^?h-lMyZ7W4eeX;TYS z7VXFnJdM5rFU+ol+V5)Ph_+XaR||Vo-?o!N zh^8~A&o=MhyZQa6zisjJ=E#$GGj52teEr+YdDiD~UjA=0WLv0uD`WJV8K}t+v3139 zzLCKzfj(o@sk~xu=Z&s!@+!8_&fD{NVR0}ho$2(s_r_?c58x9EvBF*HG5+bFjOe$5 zBXpJpQpS-_FMbwpo^FBrN^tC`S&M4=3AuEo=LeGM;-!-p&oiX#4^Lb6A7#}2>5qR> z5xX|OE)KuN4vXvLBmTJD&Z4(F{PS!(gx+e==2XrB$LiyF??wE6Bm)w>lr_5z!hLV! zAYRiiYLL2h9*_E0d6biBc!)|>{|-Q8_(f}8%64zHqC9xmJ+ZHPGf>k}^+WX0w}dBQNGBuj z$5Z(4X!7G1KmXg!Z+`QO`sl+kRtl~0v^KIZsLmW&?&WpPbKTBi*y$J%aeL`>}`K<-_9uC&au;PzZVViU=J^cJMLm-tZZZJ z@L9Y`b}En2{c!S%{$Y&SA70P978*ude_tJ+CL5l;;k=p7R48sY@AK&^@y)@E-{vFN zu71-3!o4vtJ3q%l;u~+|!5#giJ&~un#RPhdnApx{Fy@~9sDmHtdw28tzLtC?L-m7M zBns^_?_Q`4)6Y2(I3&7!hcaSwlwqKi7wv!b&;K}WVPQnOIPe=yEuspkWwAxbnmfrt z3t~QalHP_N$7>o5^58yt+z!uN71ICc=X4szdvti3{`T}~4j$3L0wYGjZ+olL>C@4@ zzJNdRt}sZ#h0IP}5@b8h(15W3Z&^zGvrqXFa~L6*Fk=Qd;X(Tw*NRPfJZ z`t*Y@XD7nmeJ#qiK(MF&eKjv~5~?mR3k={|kT5>oyxwk6hW+I4g9o>AIw};_^xejq zWVLgK_T?C}x4MhpKaLVDFrGPct}`8e(wNrHung*C;w&PD@4a>(Fs^^UufEzG=Shn` z5P0kC**xO!4DZlw)(+TS#G~qKUO^Ycx8Hp6>E>$V(!m@gUaL>ispnp+&Zpppsz;NTb=b*l-$I!uh<@Rp)qF;$(sFm3B zQj}kAcuTPTnd>3Vyajs_mIuvNv5H=w+H%V1UBZH~Y*GwTfhw<6uyw08z;_4cEBK37 z))TKvuO8C)U-Fd`XsN*H+vOPKK;M?Z;M83M#JQs^|S5T(^a8X|7x^WbAkT zgqQU23%-Bz27b|gD4^*|>rTHTkJcM-dL!LV{iNI8ce#ZI;)1aHk=x*--L_66#r1EQ zMlMj6wt2uFnyvEI(=9%p`_{om2B-tReU+Y!*yd}{&=0i7@Wf8vDo>C9@Bwc7Azy|^ z+=Hz+@cIu=Sgf0_gFk#FZ3zYkSLDmKgrV{9OgYg~(rCEqG(ab>$0P5i-^|^sKP&C3 z2Ydiwc%Do2oJZZkckbZOg>qt-)xT5d8@)y%PM$x=aCsJ(U{=1+S&I)>Ve`(V&ph%+ z0V^M~?i4(78GrVEi{DDreL#jjuVkc3C|TMaKtt&Tw2EpYYg`NX+yz6nZ7WpCNu|F+ zvX?vU<9AVL;i-_dh}eB>(QWVLS-*;zwgO+;I_B)ykY->jf9HBz){HOe)&1mI&y_oq zEcw(sB|_HuQ~l>sp5E!*3A^adc+DuVk_}(MVWlqVuFaK{jxY1Nwx#rVUdAR3Vo38e zIB5HDN(&C9gn2jPc06iA!YlD^1`CRbi8vW;lJ3KkgfWk;3aS=Io@A(c(IVd6;7kAD zQ4`c=Y{x5me}-s=F&-Wcax`HdFZ%41jHdfCwm7u&M4oCC$m)8tv z&ei5TL-vQ`vvi=l*RNH#b(m=+n)cbBVr{#mt)mP=`%H?a{kQLVRYV)gFPiph+Kd5) zk#4-W!`Fld4S26fV*+%rnO&>|DBwfbojaYc{Trk{U(Ii-5v zcsRcC*f!Y=-@Iuv2>d)x!e9O7w)@~6>6 z-6BltgUM}iIa7vtk(+EXbnv$2)pxst54AP?d~J1CFWzV4wN*nfklrfYsbhJ8GbYe4 zlQaS`6b zL2?>q!ZWnwdybSvO4S2CL#Kq1)0|#y*8dc z@WT5H>Eib?#9N$EA0dAB6uv=U5-I>+IV@3sVl1*fl(EMG%%i+mc`uJQbu?jkV359* z*XtO!qdnswq$Tb6Q1360vyjsu>?h%>85n=Y3lq%f!K;0gtV@8xw^ ztO+^Usef5K8LzhZiPzSt6L@t0?Bu~GCe3g&S!E=F8-73i{=1tmGI(FR-Z>G~oyVdv zWENKngK+Cs#+PvX(|`5D&7c4HqtV@e*UlDU2M*>1`#9bqc`b&F=T~)OoVc;|W>zL{ zKG@>l^*qIHT))0~yD%Q--+r@6dhxppgY?O7Ki$0FHtze6GNS*(&o_VcqaP0+f1B(l zi+spIfgGffcfFQb|XpmBF|3!^Jwiu{_x}-d7nY* zVGHNa;v+nwf8eptzxX0Ke7CXj&gT7Oo-**Rc;Vp-m$Pt3hLne|)bnV6hg^;a7?*Y@L+}e@W@^Rg)fQflWGo~r zoijk*-^#FUr^uUU-`^Z8q{7R}Yl$o))Wc|(fv=Dp7C!8Tkk;IS;mqRyjMK?&GS8U| zjC#CqFZ|%A5F37X9?ik+oP+94%-+$l2T{PHNz5nX3|7vry@!{a1#*_M>{&X|`AYYNx zPJq4D$;YCVJ~roT#P{e)PWY6xuSGfwh|Y-Mg&N($aSY_)2Hqr3h6x#?F8&#-jb|1} zZ{57HIe+H;>S#htow9zT)8Vs-0_ zyzWoEd#arvhZ{2+`{N0Vtiq^$dGXT;sp4Qly7cjMUc2w6Hq{Tp^vXl&zvQ#+~eMIPTr$EKzyQH!SoYIYx%G5I_C(;TX9s!Z5U6yK;H+_2rAviX!4!uuRK{^sV*^xB6xZahtPI?(sm z(Ey$3K^9%#Y!~C>XmqFXZx;4@&xjuL4v*S>iTAa^%^QUvtiS2Q4CF5gchc%z`cUJZ z^QCGZGKQ0cod-wbRiSa{EOv(3@pS&9A0+FARn$-7{SFc(6Uq5HRG#tooi(QoBnNqN_Zb*0a!;H6OEH!M;`|^)7z3V zOE9foPZ1mb@^_4NQFba6i}wVldqe0{&ACl*_A8avWRQs(IKSITEcMD;BT?>Z^PVL1 zyC$es)}Ho%a)i^)?|MlvCYZqWkrd}lhEhVnU7aoUuq>6>R|+3WwG?yTpAm-hV5ja< zNweUrpROmZcP`b-`|{}7uGP0Ut1LzclcMdXMz9LDEp^h5_ar#cWfyI^Yr>2{bbS_W z{s3f)x8kb5G7S{)+NsmZx5cM=qUXc~4^16njCANZZK0FEchvrX0aqeP)>-oUm z(s1W)cp==i<`@d6eR|t7G#*+Fk8F7YF1=Y9l%wQ+wtNKc+*ke>qTC`odb-9>y@TH1 zi(Zpw3m^UiVdrW0GQz8}*22#=#nf?|9&B(LToww^%VpJAWrLv~RE`%5M9->ExAF`) z10p3rf2&;2h8Ej_k_Z2HGfCdQA3zF`4-EA~&y{!j*S4_-)~*C6xaa|gEdx_V7EwKe zOz*>EwNJMYRF+r6Ywp#fHcHSBD1WwPEm_h%-UlyXJmn2NfPp7Y1a+uVn$iIa7xn3- z`Yie{FvI6ylxz#7-|l3Y%oVxqpDVP831#o`hlPcH`d_t!uW<$+`Mn3BYqD)VUHjj= zF;=g}lsk|M|4D!zJ83Ehs3kxD0C%NZ{my$hO}$DopA@+e7~EDp7EJ~YTJ)K0{wQmQ z&qlU*KW_ukb@;FTe#oQ%awma@}B?X{&Cg zZXN=zT(7omzQu5ofT9lI;lJ`P*s#cu>~&CF%($z+&I6CY4Hv&#mYx2(eUC*xxqN+d z_h~UGgN>xR7(*+xD1^L=M`7H0e%6%-XUgcUVs|q-9nM(7qw+=l(&30aEA%G{Dnqjg z&7(X5zP);T^U;SF@=S2(q;1X=K#F9_UIwWLd8;x6n+!7i9y!#O>u@&#Fs*-KW*RIl$(V-~Dbp zhwj}=AxpL~m@v*U$UbVqNa1B*pj`j{U6Tk5ykNI~FJ7xG751eqD zt)aZCYG1NY48|kT-l2oi;IVrd?X~~S>i_YF9}T`}acobXR`JvDQGEIFPkz#5qZsRj zUSI^aHTl7vN1HQwquVM9N5;5AO}OA_Q9=L0Q}>$K?5{7pl|hh)Fwa>#J!~`OLB$|| zukizeEL!;7Z|kf{pNS76EpJPc>op0tNX0l{vc6)qj*&Pxt8I$^nbCs0owf`YaqhLL z1+(GhXe4A#A|-mGQ7sqUm4PRCbTgDO9-9a3$!MoPkv~58XZly|zOP9uV;^s+@xiRT zBZcJ=253ir;_;?1{gFq!Lkpd+a4!R{7@zpYdlTOf=@&1)HlDfU&YSfuF=q8U-fFyd zO?dT9b)`;McAgF}Xi__pLEJXq*4_Km0`#=M1;)2P(ZBoGKOO`AfB7$eT^!Zpo72VbzgQe#Mx;ie+C84g zIQ{*j6;qm#a)^+VnSP@G((Ckv3+GOc*YWQ1JV~CMIC*Twzi&HdgY5jpC%@W!c>cmH zM5%kE*_?F< z_~uIEj2$EsPAQr5&!7Be`pfzE-=F?xp_>Q#-Hb5C0^Yh0!{_un?^aJSo->X{*$f!T zmJE0;cGZa{E;bKV2fZ9YW zosd0q&>&7oc4i5$|97x!vTY2X^-+3*82B^a)F|~Mi<{0RFyrHu%HTvc`{;|r%ZyiD zJgeWlj0c3WqX)rh!lw}eT?_eRkz!wCx{X6fS zpMGM8k6kbpy!T|767!qUtA-CpGN>L?T^MZ18QL;As_oX|S7p%$lks#na>7D+q^kWp zx8)yyF$PHnPu=ce#>uEBmEs_=&Jih9c))7OwXIqnE;DZ_ z{nF`@Rpm>tta@}!2cOvP)saWBGey84L*T%0>p;SP_}Skkui9DFv!m@6GX^{?ydHRO zoq4AaM5ptN&(L2!^&~UTyz|cJWn}W*c%JTL*TgsJ0dxqS=V#yjFyrD+)BS`F*|Yii z&;Mq|-g|d%jf`R7HpZ)8t1??UXRM94ge2iDJ_gC;k=Be6N@EWV= z9(V5+CZ>jPe#40l)wD1tbk39S+6hrw3p(%=4$J}$1GL7sYlM!CuHRh#VvMc&E}1AC z6Qed>7Ul`A7H0R<2kcatMWFc9IAL*@@%%^&peNpZXU0;-a=SImFOAXf7bhD%7zYANq^KO6eQMB=xZXVElMiKM`Y}5!lWg3w>9=xlC9LJne5$!(o|p-1Fyt z5byAQPiGI-f#v|lc;hn9_LHYh&d$+ix9lKl+^!z@()$;cb^Yp{XcqspV1DZK`_-YH zx{V8m;xSPQ$X0#WqTt;;)-4bo>Hc8+wZ_qSUcYR-Azk*D|NH9wUFBGey!y?hC12vt z*KRboXrW429`;ZRtu_%b`vYrESG^xlOrP9LoeEySHE2L0?}3HST>Fny z6Yw5qbxk6UDUT;Hb`;{oFUed|$U>fK$QhF(u$xw6Q3M-_Yej z4ki`@c+`9T`W*b1qM~6CENDVMz+BSMV8pfh;r z8Pa&PhJ;s$!n7pvqs^kJxALI?ei=G+HHJFrAu?G2ouo5>NjmiEAufZ<6e{s5qT6X1 zCG_dx;2++87r2$Q?Lp&tE#Ke*CLFpi-5=n&+#;cp+v|S`tA{;aX;jrE9*-3&b3GqA zN!AN_r`o-9>4OSG(w!$>1Z<`4rk4%%B@9dN5(0yxG~Es{ck)7FKHfnHrIp}0I6ZaS z)u8A7wx4)5c?Q0MN~2#^zOTZ^U@e_A3ieZ9N{aTrbbTo>%YfHwh4}3JP8p@ITyJ*h zJLU9b_#{C%fD>?J!9W1Ff0Lfz4BpUpr3?r4^L{;;Y9khUly+xfXmFIK&+ukR27WXe zTvbyR@YE5Y`olm553DIWOuR#z!41H^?g1~yE4b+99$;-T>FPfzc4I(C<3M@7$lS#+ z#m+PLUczP30bPa`aGH|BF}hFw2Jw}?SG}8hqNThNY(G_F-VIs{+3KvHmuuc>quQ$b z*^x0IR(`Y}dQIY9Oq2C)@aY#%$=JX>vPqfB=n?rc?L5f#P+?ueW6G6vrY{Oqj=a4r z^T+^nzytH7zw$}q9-QG1hTUGE{m6=5mSUyxZ0D}RfDY!uQ(Jhy+6yd7!axUaTP>MD zo2)>iZT(@P3Ai`GtiUA;dcWnTN|>B8UiJRf*aA-Z2BvZ#>;2#|FBPjXrhg62(P{Wl zLE!d&NmuEIko|4}^)rU=Nwd<=SO8Ce`#)`3dg;3DL3J-FjHeyk#ET<8CmqQdFKe&T z4%C&gOqCx(Soi0j6<#F8=|mGtle3#`O}~DF(w*l+xlG(BVBRT>fs%LYgS;6W+yo8J zzz2|r5*agwzv#-K#%MK)3kbbK?_vAk@dgps{Q1i#O|IMSnCIBRwz1l>eKZBpL7O}} zMjUsw)NC#KdIC_SGFDlCT<-SYFj8lkcB9*M#|QN?PG1QWE#= z%VR6U;k67swqD+E0^@K_hmbOgBd~BC>R@3&JZOB%>jZ5~5Gmu@g7J|Do-N*t$65=W zml1MJ))>syiGc}y9XKdd35C|Ped${tWKgi^!645lI{hLd8%ysL7w}$_;C)S+>2i#) zCjJL9mYis-FPs^+T~n~Xx^#Ia?IsY)I`&5K)jRBoSFc4EMos*QHWf8($y3aP$pp*R zbh`^~wd+8G+r|&?WmqmG!+2%$$Z3Mm1o_j87sm)R-c+>_0}P|f@J}>!_@Vf*CYZW{ zkO4mWG()Z4VN!xl%Q$JG$BT)=Z2M-R5K_*M)<*guBLpu{We5ksfT{h(;$|$i0EFI@ zXwEatArm9bNy>yhDK#F_GpLi>#z7wKVe8ha}E%;e8c;wGXt9FICK%c7-knp za-{wkpNPl%ULg)nL=PX6|1oZd3!3x(eb!=^17j^*DeqA9<-yPE=}0g`k4sMqDj{#+h8*$SX>et8ckh`0e&yfD288? zPlmzqX0ES0jPAt?@mrT%8sm0#Hwo0ocq1K%eiOo={);crjHf6=hnUVh?o9UdTVKXY z{eLY6EtxwWuhDzSIA<7`u*SpblP20v`jTU_-M$Ik$Ru>{LtcU@y*jxk8`VcU@vJu~ z=MgntThRlK+t`T;L-mAZCaSb(()2~SL@k1Eci z7JY3^f38m?tBrNq2`!%FZN2~Cv3RaN690=|%)rlBt8eT(aIF065Wz9v_srO7*UvY3 zFyceWF3 z3lE^Jg|V3MWYN+AtIiRysOR(Xh4Y)U@!!qP7UA8lzY3A^)z{x_PM$o~n0RN-8hG>6 zi3u;KkI_@`=cD56Gt@tS(Rm5=IpJ}hUN4@0ha{eE!S&>cQ$z2Q8KN&;{(5sHo;{O+ z*?97@HZU$T9?x7N-qI!$a;pA6@~8G<_|up5X&F9^84dIhA4eR6E)}Z2{rS z&wpEg%^+CCtNrG?>bpyaH4?Dfkp1znI*ZguVMpOU%35ffTCB(k60GRDqioooFuqy`?{ zg*>>QF^!&MC*63c1_!Sg59wO+qXCawGUQM^Y^R2HaKNTT96WmD*r`b~3oz||dYyif zL&*v`b34QMnGPyEKl%^&J29}wYrB4Xy@jNw;ctxRbaDQ|52FZ~!$bDK=0f+s{p6oU zt}ue4|Ldi|4exupYscU1CHZexrD<7vAM%=v?0SHEP9^EMpuC1J-Lf1Ct@hqmFz1~=hFYLB> zQmB$yoQU=e=kK2XC|#z*pc{AW+|y2Q5KFxtwc0J+fpMGMcMgU34iy*v=ESMf?T8Rd zyM?!m;@9Gj?|SbYy74%stIWt54+>8*J2>JW{g5uhkm~xxiMKZoGoIc{uKc_I{(s8j z`D7tGzD|bL)_5UVL5IdN`_jY2%BD9v!-byi!+T$xd9ZwE6H<8RIk4;p`NH>R3zaU43F?6PC=PGK4KYddn(4{gwc?)dFD7dPL2 z_4)AH$>i_l8&?Z0bb0!%zRWS0&L&LU*dKegE55UMu)7dTZ=E@}Ih8!di^tM=#eToj z&VYxF6}K|bJFK&6PJicwL59%{8RRWE+jW4BcQTGU!1>#+zv|HY%Pr!OsTL0EkHNHi zX?C21JHbo;qvJik{c!X4+h=F|d;BOPdE+Nt?Y;LuXi+&i67H{$0gYl&rHAUSR}*N@ zZhFuwPc`=TX6~1PAR$s~5zzCxzZiGG`70(XB0UG1f&a)bEEKT5q)KEq8@(&}p ze)UON^t^xL{XoY0Fb+>K0R^v&uO+Cae!X6}OSo#M2;jq7YJ#R|UpAJ+^%UDj*ov1nGg+q%8-`UktT!=uw4-5bDb=lM*%x&nW^oqG>C?`w-eY}=0L z_)1apep|2U4`nA*we#RLZKdo!!(T3Z)MbbNgQ4Qy_e&U4Du()9O6$2K@ExPFNembu z`#vvF^_pUszoRmt{mk<4?t|KdgTfGLa2Am}JtMZ37WXZsX z#{VDo@NaJyzWr7oSPcQH*A%C1r!B()9m3fUehK?Yvl5nCHShpB@6c)?T@pAh_{yD} z3YbjMqw>JrzF9bI^TI2D{jYwLYPGSl5Cvc1nU%)h1!?*?{Cz!D(Rr(s9&0ndzA0H# z+|GDYo#D7(m0+84`nI+NYZcSa7RmtFN!#mH$YhgRW`v#?!fJ&(x7ZqV5w)M#sZXD6y=g5ZL>m*h!v{OiEd`IU%7nz0TA#;xfb9ETY~cNQ zigfT;Mw|N?X76Tv?>B_mE;o(3E zK1CRxNQZtrh%9EDJ$rUMSnfUGMVA33?@%!odHnJ)|Kf{_rGHU~grj2|G_jMP7c3*t zYsDfxnRgRoEBfk3Xs<7td@@Q5KWC6Sk(cPRCmAoIjRR@nZ=$bl#IrZS;9a)Ur+F%$ zYQf3G0gdfgv7o18j7jK) z_iuOj5>H8Sjv0x^+dpjdv1!NNq0!Yx*E6hjh+hj*CejIp+Biea<7gI*&_@4555~iT z``@Vj;x87Rp7~D?CT|bcz9uTfqVdD^zPvY|XIR}GOva^|z{hJA<$U0xaV%O5-NH$H zcI{@8PG*?2>%W05{YdHQ$LMHb>FHyhZsdLPD|nCcqP+a|<%x^>s2J13Z^;@)8xwK{ zPjZ$=ZH%;eJwt~?+EiE>G7cYzr_R6+CXvk0ML#14P1dDhD4#`);Ehfe8sr)S0K=_G zAv&9c;#pKvzO)YJB@4f+&*)1QLZ873@R+qo$3QAPlnMBYml;h0rcbSM)*>Qr zDh5pDnDAN*K^q1}_@bVjDGt;0+~U$ss0yBVd!z5)|NTF1{`dd;?=xU9sJ9qWE(T@g z>(|xbQUjc_&%UK>T(F)3|5|hIinnFIv2# z8(VC3a38*9nBaZ+X@)vGDIVobxB1?Iq)o8nH{oke*4Hl-R)$C6-~8;Coq^!cw2Tz- z$%*56_tw|psC_I-FhCxtZ-Rd)TW8`sAs1KQq??SD%j}J@3fOjCn zK7PO-%3}aJkb&Y#p+UfO4u!VAodN1e#%AG?7|O|9JZh(i{$at)PNIFmLC+69_;6_Q zyTa65E9NYo@R@J{d8$8XfmXa%I2*t5EIECULEkG^%HvI?n_x3jp(jDmI|;B^b22U>LJNo*{hdXz`r z-~9I1n;YR}*VWUEX!McKKKuR13iY!nexvd6WPD+_oE;B%53MYsI2XfMHvO$S!RckP z}uDl0CC9 z7S2NZpkLXOq47of$-|7=&K{WqC!1u@g2@8(v{MLw@G3Xcc^SUXpObmGk2~U7O!u{A}}g|M>HPeey>?ngz>0`lCM%X8av5%_2(uWnX#@!?+!IkCSzF)4oW~ z;Bhgb-@owD$f2883ZHc4%gx6>{^{oNy}~A_l??Z_`3G4{O{upV_w+L z;!dN$ECh_4scoDOhh}8#p1m2R8@mh!+8BMvcWFQS{9>|BynH%hdS46H-?gwX<4PTw zY!vFmf~vNp13qif2*2=x)$fEAA$yGR`tI^P!h24*~SpXD*EPQrTFHG+2?sQS7P8ucy4M|vX5%<0o-YRYzi)kka+EqWJ!@q_vq7Q-|N~Rqs z9Z&K*x2})wMmN<~`oK4rzS^8S|6%EJ00^&#%>&4fV|nlEmpl5v-gIpq-Hh@++JE%p zWVx^_yvg5grydw|2Tl)PUHmlKZ}QAepG`M<_JX0``22e9czDJne20b)dT*RHCL1ex zS0Bj{g+7E==y8XthsB3~`lo-W{y7cRMi#rps=s~fO7FyY&q?Xdjm?qxPMDH9F}{PUjUIFLaWSBk{<& zcg}4d9&KT>&@0-N(SJ|n8vh>jmk%bYver}{IYG_tK%wHZwUpw0#aKx!z zcKvXQkVk*EBZqUulA-$H%gyyG-{fuo%`BiC82!M!th&`6Cr`ZDxO%+tt6gezGRkK) z3xWPsiDe}82iOg6AVrBp6unaybziPP^Gf_tm{DIuaz6ilkN7gGjcI_QppeKJq$Ea<(M zP6y`Tx>HB^!V&bTL+OJ9PgFs(^(kWn9^+z-JM{3kw1e0-zZJr%OMe$s`O$hzaVy_y zlu+t<>Ba!45J`b6?ZO%i_uCiJfZ$=wIpH8C#Y!`&<|P~k)YJvwDRS!73&qYyIw@CL zX~qy1eDySHuaFbn!(ofJ`VC)f@6`>2;jf+z#C~^TtScYVl1n-7Lt?JcOIw=^dC+H+ z>Fv#GXZXw2_g~Ov04-(+R+$IK@KBI+;lIjt-*@W2g~ht5j3$kK2E6ho9Xz!FWP&lY zn7c`fr&levaDgt(w3A100bxfvG*}JMgUtvC@X(_7i)Zx|AA|mC95lf{`sRd!2;}hE zlnj6e(o2_s>3)oq+jawO`W!x&M!h^!l1B`v4ZKr7X($h9G4u=vZ90NbCjH$bR9Ssk z4SO>Gq2dyU_Y-gS^{h$`uc)k#AGid5#YwN1RknXS^c-1{@Kd+>j2zIW0Fn=`w|Mtt z>MVbBTWz*QMp-U>yet2tnP*-5H_E5iUB`cTVU;5jXua!?uu)THzv{4z%-cG3xE$1w zcgoPY`ahYctgd-I^qobysn@F8Y7*@?V7G0wYSE*CH|^v4RVtW!P=Be-hEChIojw`p zVSlN1=;RsxguV&|Ysr-Ep;GpqnS*dp9ncAdE zOg|pf@WD3yO1kPWHSOJ!lit)$Q@Q?x?~GN!TK#HXl%z_$B6tXH>ppMid0@6lLEMf) zQbFSo@me(%|A9hwaNS+t$@(!U0mXw>hN zv&C_TkI`(A5lvQI^)Ypvalp$6v2fhsf^dMUhkfTM`1ieUq&QN_ z)JECjBu~3V6UwcL+n#-GTg^ytG$r|F1_3dTd32k&nP5=fd5|))@NRf)oW?UK6tE{L zbLg?F`ttU`+hdH1UbeoQP)W=C@ zuV+&307><5km^Sn$$8NlD=u{)oQc4xJna@OACHF}?~j+{S#a)VOt_PxJaotizPE{x z#ezuD7Fk9X6Hi;=Gg5`~f#Lz1G@|>#41B_XSVW<~9zV)cC-2|z)egdzh*d6Z3i=NZ zRS)!ItV53{8KuP!#*c8|*{7}#Q>ul@K}&`kiwAF>azNahGvVM##50CRBc2mF!GW9) zGJXj0@LfDjxn?8+lZP^oT#FWB-%i+>=tY(r%}hiL4U9Q29;~oKjIa#EYpZR>q496meJ2U-*(P2W0l$Ifwyn(MUP0Q0TT_kvRe~5jHKP-fhCya%R zdI!SWM;{h`NgtP2J8Fl+JlboE>v>L^JTYv5F~;W-<1-7b>gFIwhFW+CA0SqWgYoFhJ_SG157%X4gwc;$3r{vTu zJ_duqhVh^gB--%$YQZwO_Uq5TuH0=M8crDXonOPLIA7Qp1g4-7Elf!sve zN1T0L*7xi8JTB>#r#e`W%>23?Gq-!U=(ovh{ZtEI#r(|(j_!BFC6BMZz1HGSGEr>v z6D?E@Pu>rY#)K~}UEcikhd*plGhVI?9`g7>r~-=zb{z4pwpewmzBu!y+Rj4affmfV zoOZJihJK7Oyia`{G>KLYs>TcV=DdLnMPx?x)2=U~0lm`r;@Q$Qzn?zN$i=ge_a+Kd47}y3+p=dJ1L`$**FBPIo5{=*n@f2blmGgl z(Q6hutHk|qCA)VWd2RE{fBt2Zf6>D1+ws8XoA>gHefR9y&CmYnm+e&IU9W7O{e?nG zwkn5ln&JOxUZUFgTKF?(ZvM^3AJT4z7a8U3aC-fK5HZ`3N!{bq;rVnoa+*O*etnlrx7%(M+>kcs zk>2>Y>Y33rn$Q7e@tR#E{9=cJ@qF|Lx?TM+oPEd#J3jCT1Js@t9-Nutybx_#jF8o;Z1GWG4p%JMoO0 z=3;La^Z&qs79l?QZ3pcZ13n)6w}0}};5UATk3PE-+KQ2v5u2{3eekq+!E?4pAsO~( zjr`@$ezy7a%P(^hczg55fASYC`k$IU_OMVQb~@~DvDLyEuYdB4;S%kfX@eI=U(3*F z(U%T^AIM`N`YgCGva2IG#ZXOV!;3NA82<3Vl1seF$G}?pF#kpY%FTe?KfidH_w>QWAv&Ud=cAwdI85~o;X2|qi~c;wAND1qm2X@o zlkCu;Ys1snK;Ly}qOpaS_RXu8hbLl&WQs+z4xKDt!!lN=tum6wI2H|zUBa@Un*%?M z8y16Revv$@yAJOhI&^$wiSQ;2^A>PpK*Pw$b7SGyXyT(;$cP8OZ;l{T-WX1!&3EBP z=FBdE=s*W~k$f^XsmI}CZ*R_?|G2()dh_k&ug1u1d^puX&18`U_dE676Q|yp?>hAK zQ1uhTI(ewCR(~GtCmXX*ojA4m>a*Wfw>yoW4hpuILg!yQSl5C}4iL4S80Wjg>E#P^ zn&pr%{5sdX)eC(U-~Zz0f4BMMSHBp&oD|$VP0q$g;7e~2W9Bx|Lr6aP^ixgDD|=#! zhbM;{?Ry|S{ZI=K%70#8h;^e8IhbdC`J7Lqt<}%mPurMF7^Ba&BY}*+*+OUFr#oax!_*yr(cCwKu_w#ts7g<}0Bm+bPz9&B+`|&b@m+Z}P%i)sLQbkIzoOb8ho2 zJn`tR`VJ?O8lfu?D(`g0!#Y$nc>?W}b!hxUSw7)!L&cr+2`BClUiY9g!%seeU z_(XIum$}hmkND~qUG!`6mi{lKkONfVW#I#!LQJ9O;m*l0_M$0=17qGuiZw~Ak!y(Y zm?crFHbGJpEEGr@T^WKt)cs zofc+GQyQ8crczu?|1GSNXO{3Lp${K4iYYEoCvw{B>>dN%~XTZ zvr=#0rzk?9fAHR7JHOIRUC>9pjF#F*8Kq^qnr4){i?RRiz<6aDk)mvRW{Gz2UWPUU zFok@;hcnc-_{0lS?>gS9dv{>Z!vGGA;5Xps`7Q&qWYVC`;O%|s7PNUL%fiPih%@#s zzFY9Y>A6?n_}>eyH6bRzA9(V+-hNzH2a|zGQhq{hN}ob|KlItA>)FC=l`-$;efPlu zd)tbWZV`U9(-@z_Y4Fv=JG?OMyZB)YVL_O70RgdmA?LC@+H#Dj9!!gRzi5w#@vA)Q zKWY2pcf8f*moIn&d(c?VmN&aHdgueZYxUZ4&4S%+gR6A2xBwyN7qx(5PxfEUeCtJ++&|<6?Kt+%4GPhY z_-gC*4s1{`!fNZXPTTeicj@~(*XrK2#X_`L?c!Rx(lm%QB9+Fy+Lu}f?q>(L=_k`} z;ixUW)4zs317)fXggi@b1Z_(LDHiPN>{hKHjHBMw2KcDSk9TWV3c1(6 zk`6ylp6zv67QS2jp*(4WO+L%?3NMNKUl}7wwY#>nzzZQ23%1Gl^}CNY|JyGw&ZL3S zz#(D|d%|N3qEk1p`=@Q=idA-Sy z^d`F|OK-I6<6-f?ZKb}ESI4akCr1wD?a%>^-%-%YX9D=V?S>S8VJ8fwj2mNAY@A|D zdHk&3r`M(%BORsez`-Nq`7?3TDSa8y8RhRcdFG9KEiZb;p8Lg$JdlUb!46zQXWn+= zkl)MuhXI5VmQ`(F@wRx}w(R@&uFhnIQfq?iJA60w41wC4lD8%f4E-6wo1orLAvTFM zY3Fsv*u}$?!PlhHKb{{9I1U&!8Pr{?&YG-*A7j55l7}+Zo4~YeFnHgMc8soXwY|}Y zL51OSLd?`|yk+)fX%o$3D@NVSKG$iVna$YrIJxVg9%dt=3xfpCihFO z!6$g-WdN9M|D_Y|fF~FOIu9B~ET_u<=%bIui1q8=f1W}2#pa#1XODMlZHG>J5I))) z&+rz0+H<^14`q}(9dC?*zJ@hvHC9~DQ+9d4QO@I=CVzBWTQVI=M4u5KA8(9jVE8f6 z@F?=N^?XAN8h`0Pjlf_GUz}zU@f(ZsA z&dnwg4%L(&BQW}jEjs)bJ|+(LU*3miJ?Y_9TNWC)>gHpD#|Ulrf`tjjg}_vH zscM^8xAgUs@`5o3EY|V$ntz^yqcKe(@cBma3r-zOoB~!jkh8%Aq`b~!CUFuUl z{PH9NiEKg_4mL0 zi$B|Z{#gf<)z2IrXknF{a^U9Ym%fh9ypHwZWn@0pLhy%YPbb?C&+d`^O_J$@-+#+s zX7QzS2cpl_c2enkhvR*VDRv6|@sEErp5>o*#sT<@f8s#fZhs~t&2Bq8;uYSj+QET~ z4C%bSEl%yK`x;-xY1enr|LsCmym{)>=JSi6x2?ZjF!c+KWEY0_(|Q+R<0-i z(3oLGD2tmdGTLc{*YJ=KEYJIWaN}lAzMpUb_z^zG!_7SFCqMXj#^+BjUaX()PX^y< z!Rh2?e{KI}bpPh-jGEs)Y|QF#?#7M1oy8H@Xc9jpKab{d$f$TCI_yinUjE{``E~Za^V2VR+FrZ5o0I_@oaT!4@->0*T_jmQ3F)0~9z8Q1yzcGw+g0cR)-N}_;lT~EP z?oRDwIDJyzW$@KMzN<|cgI=${+fDEygY@X(mA5~==WvT9jJ&`7{cnfP76$1EWSGUw z!-w~5ZhrGsvFtzF{Qh^J%$V@@nF}LNW^p;(g<3du`fME54yzWXzs{)r>8HPsW-m79 z&c5HXld~W&i;LlHaaNt`z2TdDuJ1mpehiBAx#!6ja?ir99S#@2_%a=|T@o21|NUS5 zWrv)8zxn#>uLp*ZaJNdce_sagCM&$Yqegt4?v%muyTbaca9q4_(bP^Ddf*%JO7b^4 zAtG8E2bUbj$xEhMXwiom135*YH`!$|Y!)lSe-5*+AAfe~;w(4>tnxD)23j9|JsOK+ z|3=1fO(jf*ws%M%cn(SxZis$xGWt3HgN%M!nd~|iEEvwoT4^6&c`~y1Rt7<1)xpLr zGEjJ;i-otDT_G7Y;lKlTjXv~y-uYtpcW@;hs{ET7o$;H6IdaA#1!Mh_yuu+%9@Ckh zyeQ;Q>9rC5@y{Xwn~?ECKC$=5Aq4NItuf}3JH}=^2OP9J^nbk=;-%G|*S`ItHfX#& z^5*8x|Ly;n9(XpH+CiD=p}Xt1`wu#(S@=A2Mmt+tXs>Ngbymdf+l7Zo#}ZQRL9*#_ zAzOG}>+^Qd;IrE;hA~WAEEMvF*SL^Vqcg;F8h}jQRr^?InT5IJhQ8|z8v6Q??%zK1 zUNm;jPT?ezO~ya`!=Svz;1=UcH*~50(YMfN>XvRspV#+<`#9Jxp?5k*=$o&<3|@!8 z-)ON%8SlQ+IYKw0Sh9ewmX7BjRejMyu|>k+pX$YUZ5+OTZ_N!X;?3@z80*j*#V)_g z%ioy_7CX(88q;bQ^JsDL_tsyu)#=6?^PC$ahrTb|VrLR$wB}GS;SQq@+;<;n@g)A? z`(<3)-MC1%-c$IL2rve9G{vuY&iu|iNJtv`zQezd2Q!hCZqcGui_R1iP9^AP9C}$F zI#e^yZk5WRYfRmvgIx~a-o8BYXU-_-*@X{39`F1cg{$F+bvL+n_8bU~e)Z+G-)~M= zm%H^7_jVrWi#uU5v*33Z7UYRb8CsGwr2 z40Tn9p=D)zI<-({Syw6#mEE&3sBMh}EKrlP{6lx*M@B#g@Z@`STN=g)!W>+JC^lO3 z_Qd--giCJ*R|x~Pj4tzP9>7hyRSeuDD!*b47`5*Rx_)_-dND@Je!m}&(y8UEJcE9~ z2LD;9mIoqz(Ol)Lt|F%_WhkuwKD=FAu6A1Y2L8$54KVWqf{*k8gwN2n`}wlsFisbeab!fzN$-U&_u?`()Ijb@S(?| zpS$hD^}N5hc`#nZEfVO1!^5yvc;8pdq=Z3e!P??Eprz}xLxESc9X!eveWpE2Gmmys zPqv+0+cyKNzOsF{y8J>?imC79Z=x_2t&dC}hHkL7xJ*d{vbNQ{Kty{@g-)*)-iCe^ zX7G>4cCx7VG*y4TPtXqU)<4l?mC&fel&212HghkE%lvU^}vx8ZxwLGcNu`b;_N$spYO zDK$LS`<`~~8H)ZP`?^9}AcoLxMi~n#B;aklnq0l}I;6L?D~$)eM$W0s7M60Rz2q+& z8i6$pHbAy`gkvwgHp!od;KAL^PI`ruzb7kA_fw~o6_ew613nn?PfGX3!=U5a%F#4q zfRCmW8sI0b-)>Cce)0Kdo0}PY_hqZ@LQ9@;+W_dXD4jcz-F*00fE93DL$paZFRD~Dbzy8rjZM|&5 zUzz%}5G)i}wNK3&Pfu}3O;Q;_Ur&+#=5k6^3fs9eXEH{2*iZGinKI2l!aJNtFe9J| z#+9o##!$@CHJe9ySEfi(7RRWX?lWazmHrtUA2Ae1*C9xMO#o(MSKk>=*=WdP zF@~B+$!cE);pGuE3mW0W!}iGG1EX-GBct52V(z|`hb7}`^X%ECh#%jJZvUJSo52Cz zJpRWpl40RQ9>QpZ=+mE zi3JB?JeqAy*aH(b9}`g9$kAy$Y0@dsk+Fz@l#%=4^@kaf-YgVD@fe$=@f2qyqEMR< zS7gS`Z!=)MQ@gD#!f+nrZ4=dT{0RP(7Y|kSJWRiy2~gEq9wqU*#fziIGMF|o^5F@~ z$g9n6WlXwUD3Y^|*6q{O}tixQY7R7B<8N-j&Q1W8LJ70h@u3{>ez-jDQzSA|09p ze_oM{5-*zcDcdATy=~96n82%wVTm{APACWXYft?Wz4p9Of7c$tLqqbKOkyD5S;QD& zL2DYeI^u`v4_$*fGA=$5U;pHZWJ3VOI@}X0P zCdLMRf4uCs^x$Rn`ZjdN%O>VizuJ(IhupXGhQW6kdeCFV4YgQ+Zg6EleDLsoypw@E z9%LLLgT;u&Pdup(L?a&ui`mt}E7atbXBFAV`*tR)r9D_1@i2wkY^N^oS`f8Zxo`8s zkKW&WbS|S|_56o_`d!}Ic{ny!z*AYT)!qz}+T919jFYzuxgi9Qup^9+5plehe|X{S z=A9P!@Z=ZYTpt7GYsu|{P4a~m(Y`O9H_^>|{ijWufAId<&8NTrG}<~mb@BO`yj%C= zQF-%To|(yhlYe19gzUO<{Z@x+9@*r}94^6020owgdAr8b{8V1f%3uI;UWImIjQ!C^ z7viyFoB!kg`m4>4fAC>rU~;Un1Pck3vFo)C9E_~@@{+uH^F~JVVqf2>4NJiIp^o}B z`BsyT2k&>amqRp#OtKiY#?JakaJ0Som%cEDvka=>&bb>sCkKbW>Yw`f+2nzC8L$8P zVFGse!T59}gP22Z@%LK1wBWoJ6K3Hj8vNwPKiyo)6PS_SqQEoT^=m`)F&60Kv)d;A zI+~oqOJ803a=SRzvuLp|I$UdKf#$s0_-a?h)o-tFK6vj;p1gLiSfJgvIhpaqVl5dh z?37(C39H(NJYpcuXgV@r#qk}v7tehC&87NoM%VDOps4@sk0kz#(#{2KS zH%7Ae3mK{S|n^djHmzhXMbNfql>dB z@IU;!zs#^!D7-wLjmN^MF}_2Tv5)M2Qb?1XMM#Tp7Gv=d*{dH|?Ds+0S)i<6y)Zu2 zx0lmKV?g~3jp-N;hb3E|1c#?ST9O;uO(=@J@tQ-iKY0J#c)wn|a%FSo-<;cAyx306 zJH)6F+OtRpC0M~C^so(uc2g=@@GY;_48yWNQhQoQ^ z$WcRmg}Ff|#!c-lR`!i_kZ&$sOppC~#xt?7-zan(V=tNILzY`G;z8~`11fnWlnU95 zM#fIw*2XRiWWsxp-4=3YH$bSqEDk*HVQ|bb7>((OLh6vA#^o2g3c&~WjJtR5-rW4z zAOF#K^nUi$zdGAI?IOSn>O!tz9@41uW3tX&f;o>~GxB zr^Tk{kf2?jRHx?|WZT=49C1E_MZwMHaywu25gH!3WiifJFWiTi;&f;WYTDJjV%oQ| z=#IuEyCv``xoY>H#efGb$lSSgwKljL?J6aHnX{L=)Fv}0?D|nL-!1r5z|hKA{QUbB z*ZoCwdl*ldTatAarcQqNqq&CDv14yeTZx@t@xvz$o75Kisu=A;gWy4neEPdZOO@W~ z|MEWT>!Xdy0(^b2@Gj6YzdZH!JN4N@D4(qnBWk z!=A+G-BREd`B|Byef8M^+M{{e{TOws97d6J1i!%?h3A(rofkbbz<8IZ#5O%hOX#RK z{7R#E-##m=(iRN(c`1$5(#S*x-x%Eh`Iq&R#aD4YextEN zG?-@QGw+nsAU6rhF)(2>7;VdwdIylJ^>4vgurSxkOmOeuhHl<3=sL@SAK4%VpL+KVzW8?)WMg9>ZgPcmC}BGS+T8t}-UJed(v15m>JJO22Khu$gE6EEIY<_*Sok z&1-r6PWq|emS=`10qTD84FUrPp6d(8Ezo`zPb#+ezQcH{rMs()5S$8*<(|2^=TKDMRPz?am! zZYFPdC=Y*+(zEkQ&7|oQ{V4s`Q|1ZQoVW7!4IgxwZaHt1ASI+^celNE?>q74R=TNc zX@)uCgD$q;es=mC8v2eblkZ>f^2n5u;vZQQ74s=|%AM!>(5m?)g14koZWq(03$DI5 z251Srm(s7F?$Cezx`yv6NB^CLuimZ!wqI?f+`e*+%qz#-58bMVHtN}=n>GykPPzJ8 zFW2^7kAvbhxcaY;c8M;_nBP6*dN(ko>#KdI+fEkGO5-nPp zqFV9+%t5SI(hQ$LWWmuslOM#2nKGtKFnhM`e+$|EZt>iqrbLsxq)XHBjZ

    500;dr2y-gahnMFVu!#wa|VCr#!(*`G4LC)zOpjQ3ylU{rLd z=20FO$ybvab$^h7$f2N&#rIN>A2z8pE~6i1UOUd>U9_~d6m8(gU~59bsDTCyyAT{!tURA1!`SUT_(` zh0Q?!!?lw*xr~AMmbVqW)Lm$cnKT9a{k*4t@}r;jdp`9c5AJBoXlijm+wtW8s-w8*JY4VJzddckIJ6c87;UR#Y4D5*Cr{>M8RW!R zvBqLS}4i7OR@&so1@G(A+L3F;r$46`XK3$g_(ibdHNy+=2Si}?4KKgwmB^T_H zu=_=58*)tSQ{MllGAvH|5;9Ean+&@8j`pa!8Cx>~bTP6k_|La=>2f}^_!Uh{FoqHJ z8Y52q&Lk3z@!d+-eXzi00Aq|GE1$GTNw)AvX1v%jLR-KcufNNBdcrd?SxwUjNzwe;rF^-fD7xC4BM2?D~inM><0RegFQSf1Q!M z5C`&S{CN_dsa@z6va?hY(-c<*;tznO7{H|ewZ!Q!PlY4_v3*N4CP=9`Qw z_4Dd3)X6L`#4F&6_d5oxuJs9V*yTNY_RNgQ#yj;Tr_hNJ$^pJ-I*?OYLT*^7#zXol zorQNP!D^1|$SFi#Z!{bWWLW3Bd`eoodVXCCI^uTQ~K zKfZk7)9F{*TWol-&UJ2vKnF!KY`LfFXpfU;GK|(w9z47kZ!WLPgL#wU$L+!)IX}CD zHe=8)uUy%=(t_`~j9wPk8QR2g=52Z-k7SGdUtamyETsJ67r)&3>%aN84Z8 zuKE}~^hi9YZ;%J%!~PB_-;+Khj0OTZSXo=F1sih20te6Zbd&TAJK5+z7TL6IXiWQ+ ztdBD%G6;tI!jB$i!7rm@ykTr$jIBv_UN z5E%ZoDA^!6G$VV^iF{-*N3UtGO+T|xx@|XdlFX6-u1h}V)hxbvG6;OcW;Bup9`Ggy^xa!G zzTf%&yL7o9THsHXFrr#eI9Qv~tKkmkw;2${nkI*h9lN84_}}dz3jC**| z+wItppB?Ub#?Pn4v_@ZY9-nhWnYjGPWx79p7E+4tU`NQSzH|`!oxX=A#5Hv^elMnF1dEXUr$J zj2Yiw{qyKoqr(R;v?`;q#TcsZ)K}m`4wFf<_<#7=@S(ZE;p%XxFP+~wdH(z-J70YM zdEYC^;k=T|M<1r|>vxoXdN3TcmxX_DYp|h<^yd4oIv3&kci#@3oa10e9i3Bu@vL7UU(E+#vV%@|E5vB-6xzJhgrd%yzea=hWRV#i;)O7kP3 zc1%OxWOv5i0QS^yD;}p?Z zRMYumn609Ruo{x!P$=ST4bbDIPyypPm@%qslZsM3aB2j-ONDeAaJuLL^Aw2Sc3T<* zazFXJozAIiZp-b`d(wIyRYq_IuNn&UUiQv9DLhwR&&N(ij*V)(%2}n$RCBerfy!2(v}Ur1K;(GL_u~vFx7WupL|{KF7X&ar+x~7$%2cvKB&V;HE==N zz}O(y6A#w&l@^pbocwT@x4nmZNDeJ~ZUQ$(+upDGmlhsqB^$&Ct`fFzdL~UmuuT0& zk3Kq~XOnVMHl`gA93O0yOy8aAt2Y9J^*^32xfg3svZrxWGJiq8F^(trh zYJ<;~hI1SE^$BRg66`V1^$YIen?;L3wZzb25TOu3RT)CX%qS7i>2s66w<9Fr8?322 z{_o|saOvj0=hH;-)Ih|8P0;2}Aqw3T?pgUf8-5%fLN5=eLHdpU^F&!IEglKEZYJ~a z?^FgHz5NxhqPfOj~MIotLax^zFE(tK8s5H|2w+7#Nw<@yDr477S*5C>r zoFs5v9)0m+JC}6owka3CjTdwo@UeF*zYB2tDeb@jbnYc_(erKoUQAso+XQb6_B^yI zdo2`h>aoE2+Xs^1Y^^I{dzR@(Y+}ISSG_fQ08Dw%4ZWn6W7O&3egflOg7=O>J#@+ z=&C*i)|^oczV|7?w(Os6vOZqu84ks~MVH(CG6>oNE-VXUhw~ENrz8yWUYQt=Vv z0*@xj+oTy9W#nxV#T!qnj@MC0P{QUDjt=G=`5rZRT8X15)XFwA9mA$c`xwESmzdBn zBI*(7pe&Omo>UK;2;e6Z7@=Th5>ewb8uI|;p~=8$;>N&fcfkMhpZ?R%#U{JI|L^~} zbL(y=$=5DIzU*m|ZL)kaZ|FVYQ&+7Gc6GSV!?uGKUVAu8002M$NklCzjyeXvXu#M@MxZb6 z*rs#CUvFOGlZa4RLOVo(ncyI&c=ZfFCV<+&wKg$nIao|a1{$%58Fwevcjc-ZgQqq^ zM`=yAc}2f`u{<~Rb4ojdxW2@bltJhE%^xPD!jWL#FYF9YOl`v$ZQ=r!_Ax;}csNg{ zj9J2`n8Z@pO`@-S@#Pq14i%^W@}A~92wg^2V~z+%Rx>`Eh^Pmntah1+SGd0^ z)I~&^b`~$)#L$FTpX8aWUzw=j=`{fidb$EVf zmo3hjN{8<9YZpdx?Mla3dImXLTZRjR3Ynl@jNWger5!Dy)>xoV6{aejp5p`fc5Tb~ zv11uA!l`2C-sF}ZWAZD#{s_*OUwjddU5}QhM_xZEe($kDSKPV#sL(3+E02m+UF#bb zWGpHjYI2NrVnFYJ)RWTQ{>1K!0a^3oKV z%J_f|KEeo*DLlD96c3jSz>D8p&%3f~eahm;TOn7Xhq=bZ!lazadksC&ODKfr$->?B zyA#La<@zNV4VHNQt)&FBuw)sv_eZ~9{rcA#tiO(4ymBqB?sNvkjh#<2Aj8GZolm>| z{)c<@VTbWDL^;%1jQi-&{rwC+Wcts)xH5)eMhlC#+K~~8@kT$L^Dl2`Bl(QmM>96y^#gWWyzhBN4Sn{Xe*e!q*E8~8 za9~~Sf^OtJ7KaCUf3n2_3%SGh@df&S{pUX=Yo2FN?tGE9=39JZ#HTl?x5ZiGi!mCV z?JuU%;=7j6yx=2l}6dJzlV{Z#5nkCTjRcA7~6_biQ<}qWJsDF093}-!$cx0PJa^u9rlGhNmhQ6ym zF@VFX)2C2ztj? zfBb6Br}*1{^Y7YOl5srGVll3biP~B_sG~X)e{|M&Z)psR_KcxC)ejxE6UGi1p)i)r zqrc7~arHq@?J;duD}cvieqV;Qkr8j!{8wMY(?9$C^60uk8Cgi8I~bpgG32owT;f9G z9Xq#pZ423>y&aJ2j0An^(6PJ8!X$&Q z1p(J&tNwE8z|qET2UoT$Ex9e<^Sqm1WZb6T9ggnO8Pnf{$DVc;zt65_Va5zX$QQZ* zdZ5kI`rO+*_20asEBDx&_!`ey)X)ZFG_UWVJxbC;ALJeXvoEjAIPt(P#`uTfRR4YS z@b2)zCl{}Te~vHF&JHK^xc{IqFzGbU4%)@a=|*@Ci==B;*`4H_rry=_^YMdStqk(K z+AkK~fa{`g__GvmSQc-c5m{Yxv}^^E(aaE1#n zemf4{_MX?gao>WX1#|t)M|%qyCcZm)Z43pY6DMZwC5>@s?I3DSRh^$D)96Qb&1iQz zy$?A+evuo((lAIrDQ@}u+R*{E^d>vHv;~9yoJG`t`{2Qy3C)L2`nNdaOMb7#A$c8o zO4h$Fe9q}ppEX8z)=#^|^cCYSon0H6UmmG!Vg2xcbBwF;LIUAPx>o+shSd4|y*ZKLM&ZG5Xe z@bH^>$vH#H8ahT>2YkLRoXUe1mMrin`~2y%J8$FtI~~#~1kdZ7AdDHOPPH&l=pMZH zvW2W#W#?!+VvP~{5ZT0oJwespk3{DhJ~hYoqw}lD5)xxon3}mK$a@z@`N44O{U)s! zKHirHvE4voJ(k{o45%E>S7%&hO4VQ>4FeeX((oLQAR2k5{#}m|3_g9f2InbDHjn#V z_-;$@cHnKx^o`njIcnYF3Uf7 zPv;!OrfRClK;g+GT35Z>)T{fC1_f#0@BhXiD9<#3vb1IShT0nfpqb~Z-YVf5?)_uP z4WBVaPdpGYV`|KcKN9$~R$0 zwC5Jrl7Rx(T@wPr-J;DHfm~007Eax+3@*S&+p>79T!8!anM{6tds+RIw@o*68k}U5W{cS7)?;Oa$M)S^SGGq0YtNF* z`z>CxVqPI*7z0uogau|L{dkX0;fW6xViI(nVmSRqJ!PzrS!nnOUR`7E!uiaMY2~ zr7jtbOEx^Kj_U0}gVJi7u9dGGbeTRG{8`x8lz~5$HSjleb+7%lze6|W2EKNmdb!a* zDtApH7mUd_=}X&3mZ3dZ8#&@?y3^*}>5nS~KJl!l>MAE3Ce^yt*WkRKmjZdFj4=F5qfOSqx%}G&wo-m9 z7yk`ER|Yva<;g!q&r;MXqh~zHw#N6>n*E(NT==Wc21xBVg>+vX$(yO#rapdF5v%M~ z?e(yBTORjd_F~i4=rr{L3N6qI&-fi!A31CTvoym~;iT>=E$K9*>oWE6cfE_|1E**B zu{_)NgF~=}R|W{WFWUHveu|QR7N#q5wXp)L2T;RL{kp;9r5OD1>GTzuH_u_!Wcv@_ z3-D6Qgq{&xdx0otX$#NfRXpY4KF&$#5hi-twa52^XGC zEKX)f6chGDh6+_SVVIA0>>qDe6H-QXp+e3z85Y0Jgi{y_@Q`6*Er*kO!(W&UvC(;V zj*&6?Fk(_nOjMpyH7i&icv^i8# z8&k&RV~kf%IFWn2OeuOi^cWbm?Xi?U##YV#{Kfqy3>j`K$2LqJ<&^*5{QB2B7d|`R zHq`HT?qmeE6<++(cjv+}Ryf=C#9b$M?|ggh>drS;zl~0(Mu8WLmLb3aYfZstf;aKQ zpYz~nwCix46ne&lBfHuX9p5mrhI${n0#21KU9P+qQcgyH#wT0YR~i$lyimu$adc;I zf`Rwti?*~L%6o(%e~i#Uuxo&U<6#qOMwjsfYXV^c$>_&38J!uv;jp(u1K&oo*R>gM z=0i<(EwGGNUTx#BHy)xEG#qdwZXzSjtJhB^HZJ2KLnH4Xp4IsOc19#jJ>GcLkpY)6 zSY_a8G7UZh0FT43zqvYwSO!y*Wkyg&Zw6Jg(iil9eb}UmCp}Lf1_&OTIy(>JWylhW zrJWD)z!#Sbr;@SyTHA`fV_+110KWL!f`IxmcxVcfAz@VZ)Yom_ex>c=H3wdfOo>*H z3OQi0AN%+$-` zma~QM^@BoJ96M&)e(hHNi5nh$@k_lWT#>l843>osv@;RJ@185yA}+jZqUzlQyTymY z#lX}zOlY;4e&g_8-hKKw%Aeq%0m@;G4#Q zsSjF+(stAk_?loEJJ11a^b))M-OKs_c@__B^_TJ5D*YIGgR719=gp*yS-^-U@D$^D zcQ6?C;NiV~5B~6817OWl<3z#A#hdS1fRPT4FY{jOrgvb3YrhOVCdK%R?#<{XR|y`4=D zGxS{g?9+I-_|+YJ95JKU?ww0*U1xmyFaOVfEf)7L#ITnr7(7 z8;+MCgRY%Xr+Uv=R`c%MzoT9mKB6HRDOUfRmEJ0sYmdH(e` z-$eg|J6~SOJ2OL+SorD>Pm8NU;xJ}D%xI=RKWxF4=eRIO({F1Rp$IGjS+u2t>$CWF zb_mqo@Dz?~dl0W{e9Y)?EOO3)1FVBG1wn^j!`RcON{Modlnp zJu}Y_#0Q5m?jFgwW=GbjkI`6|rbC6UusBOrU5($h!{v4sT>9clJ0h-5+2i0-hlHE{ zPgauQCk}KVbh1P|?wdEhpGC4y8ovkM;QXu|8m@ngC-MKic7z#^d4N8+pAj|OE%ea2 ze#{6)pEO>PkN9VQ@=(b&x zS8%rQ?H6u#iU~7w>{PoPdQX>Ol($F^KjZuKyJCCeBjNe9>w$yOq_(RJMLPV@;SG$OuZNKYo~rVN?EK+R-&Eg>yX7PMo=-xBRT3%7MLww9jX5Ky1;nDS9SN9 zBUpqGhHo7t-y(MOynFA5;IEw#;44?VkTDJjJ>KpP`rXNPu8@!C&tGT(K3HrjEyp7IF1&Jo}-!;i^_wz*gX8p#J!f9_a2uQE9i8SYzgXRAgF^+ zfm;PQC9m|nT*vcaUM`GAVqh*GgSz4axQt-LQ{_akc^$zBg~79Q7-xzeoR-q%36bop z0qbq+cK~-^eoQp!02pABG_aJ%zo{?S;Q<5n>`%BYx}d=rDh8?b2gs>kPzFbJ=_9|) z>8Kw+F4t3ew882mgN?eAqUyN{9h#X`uR2jfy^;nXe_-AxPl|;@ck|yn1YJLqX4*k5 z)n&EMy6)|2$iP)n*(d^YU0GXn(B?zRl*z&1176U;Hffs|Q=Yb}h(LHLE+JllGt@m? zbOOlpzMf5+lwcB7XY`n7D-V1Ip475!9z*y;LT>VJ@%7GUaF{|jwC&j#O6RRo%cpE* z4fw%Fj)C8==Yyg$`ws69$oVsXRu!t}RC48AsLO`w<40caH~nWT8`fklNSdafY2DJU zcJ9Gq?ZFQyDb~S~r3oI~79RDJDMyl_%A_8k^JogMoKkDoO7M5#q~2XFfIZv-@?M`# z!HEe2+b7yY+obBkw|sx;Lp_%$_(R4?FV9Lpd@u$@={)bVL3)hp!Bv|jyEdq?^p25! zlLd~Uuq%PP`g8Yv7C)7@0c&K=&`>$>uCTeEqzkt$$MbjaSoGcyG+NZh(Xc%5S=XC> zuee2sf!S*hHsyCUecrP^5q0$&^eEx*SUADOwY>A4T2Gd#jc3Xp+2pYYRa#vlEUfgs z?Rc=#feUZRRv9Z_k39?UhEj7g@?jDOM|-8v%Kx!l6{}2Tdk$ym12)(W@-TtR7Jg}D zlWg8}K>;nTK0NQ@o7EoDtdG7m&ph$ay~|0k%5|;gaGIj$&w94-3de0eV2wqCLqBlu zdioqV^7;Zg$On1gdyIDd_FyGha@Lrw7A87C$+zW?X_^q8aeeSA!L(hP?${6hu{ zlRm~q6DboS#uSaMX&D23(0+_}m5mo4Wc2)@n3zIyFlLz8@xC5fmzP1Ax9+2iGPg1? z@y@)T(OCOEV}wb;=W#CN!nYZ(?_|W}dHTi0Ccqn>VpMpQcew55l=pWIwKBQrdRLQM z{bVM*ZDF^inBhzwo_em)0G$u#b-p`gKx|wFwC4^aO>su&{o%7WTp3px8h8+ynB#+~ zOOsC9jK!80PxUD;MTUbCWQu6JcplYFC=TS^js}c-@0zTlGh;R*lC7c^UH~;YdX)0d zTMW-q@)=bCG$A+PF#+RYIG!#|CO!Z2*WWdH%1{ylK+TBmRDpk$0UVHa;Q`}ovNyB|A(#~4I2`0_9dKF?JB++lfEyHv6UeT`VMY4HjJw zWvm7JMLcY*xp?8T+Wv4xtqxp_NAypz%8xO2#={mao(Wl04M}WmA`Z56ms?oW=NOj$ z^{;-FtgxFPgJ#D1w;A+>H(@}%)>y*R&2A@r^wrmSHYZoE|8TeZ7aFK`VK8&hw1wTP z9dLLw!w;``ysb=Q5qV;aJ(}^vBA)hy*NijuV=~0_^I1k4VQS!pH_=BKvp81&*FX35 ztBqe5?q=e^)?SQg6W6!)V4x5J#dvbG@!|P_wZQpl2k2RxefqSpE5T$~KGwMt$B)?6 zP}_%)F~uT}kTAv^Azy?L(;4ugg-U(+$K;O(8`rN-9AJkG^7bVcN;>`TNj$*l z&nSMdes%mfuX~|((ibZ0u=vf!&SbEKCkAz))ArO~l{Mkt(nA;r$&l&O@c{jctXn%U z8sko%nVm{PX`YIY86fFd7DLH@<ZnR)#Y^#KF#wpFC(WF1@0<(AffRX%<{oc>Yd?Lxez41~w`BgA)1PN7y%#^>@8%dBa_9!5(bv;o-47 z?O$Y!kG1Lx7QU98!fX06+2Xl{W%3sseRhB4JbZX>JjD+mY(c7fvP2$-TGM}Lw`607 zzGHz&jBmQBLqPSHJMqEVb-)X~G0)groz}RFA6k?;eQxJSGTOLyvPEto%8r!Z!rX*& zDGwfSMig1Sd-(T7M&-_(Np{@6akV;ZcGUc(NXC z$VYPz?Pynqw4Zdiw{e%l2t&HXBMTX1r}5NyY>tLL#%=Qs3v%jZyu)YHXQK()s<(qe zjXnC^`SX`XKQmXoaQRB}kh3ku-H0~NlG8crq%$3e{%`lD=Qc*sA?q>on;IO4*KsiW z^n;jYT6X$`3Q4RddEW25PWi9X`Dk<H|fOB8v>_~hEhum6*-iv@I^56m)X z`l-!4kVc{{Uuh@GWPGR&!)9m#)WUs0mThYk9~#Ir?eE3Ot>=r1-Uq8ki(+cO z(m`o#r3r1>hL=9(b>Dld1Q-@t0by(%`c*Nnh6F=KiNTG3pa9^!l*c^)E-Jl(875aN zxGt$A`m`HaCoLM34LLHY~^l9Dvg5rf``TfwYu~f!*I{h zWZFnNNu^bvo=@LHvj)fk-C(gi&NmG+1L3G#wG}vAJ=PQve(+;w?H1>O+w3 zH+(WONx72-wD8cdg9CilXUZR#!LE$e)>Go(t@d6^k9M0DoM+7-Cg;GaT(G8(OntiA zlr@bNKMYO!opdrGhdjy!zdE=YR_r-`6-WOs^pR;5J@u2fI?6Qw089`}KMB@?hX$@o z3qITym}%L0K4q5|7-=TKN(UFPCyT`B*k5V3^q?5se(-GP_jkmIev#sq7g;ByALo@_ z`fsk?Nb!F$-l-A6xt>(v+Xh-<4G-@iHGM*>x^q+?2d9C0w$ddD2#Uw zIieK5Ns&Eu>R3=0cE%x-G)6Dp!`H5VpR%_rLvF^dCbbkYa2Z>{`zeFSu3a4<=76^h z?mXtiMday7k+nq;eQafA4CEba3o)-chQ-qv9G+8N2k!Db;JHU3<0)t2NlD!otW|%6 zOz}=ZyO(FveDH6u{hNdJHhsz#Z~XM4$vV22{AkOWpq1Z5gggX~7oT^>GN8bb!PsPy zqPsU22NNh;MrQ&xig4j*Zr^TLTd@P>-+!pdNHn1=YjLz>Bzl@R$HNZkGMSqRLAZbu z`STvTJCmsgDU6H}ynX-b7e60dZe@VSJ3N>ep^Xp>5_Vw-31TwtFrAxixfUK~wyTEw zl`k%rlF=lumT00K?%wNQsWWr8h43Lc$^d4oHp3``-{>*5zez%1hW2nX34Pjx1C1TdY(&-Z&@f7bdG?z%SjWXV28XPmQsXp-SJfXoBAvfyCfs9GE^ABI0Yl{mFt_ zMa1tGU|wacU=-9hcs?;`PP*D<#{6)>SL3CZR+61(O?su{-Ot!+ z(#{aX0M1Kvj6>zuUl<)s;0;&un?TQkS$rTIf{%q5eQsZqcx`2|42~v=b{>GEy~i*Y z-L=^m$?7xRj{MYiKJdYt3|#okXB+c!cF$xqG|7h7^yl;`{c#Na!QfG?4@?M?ctVBY z=7aAnRQM$~KV(2-I8`=T?Sq%9&A^sU8Qz<4^NO2KFtxJ}KGIhhwi)huID+-6kR#fJ zEIANNw6!Seok{!$bHLz_S6WD_6JtXXYo4!o$c2X(14U&dyM?X#!#CG6Dxb|*adPK( zU*A-zog)l#86)Uc_?XenqNfQr89;v91;mJSC_KLS{8F<1=7cDE`ixhm7^3=82GaO) z*Y1pw8DH@K;X;MrlUXRu!Kllg7g{63Era#F!hMm&ChUy=$D53kiI+Y-w{yO+>(dN@ z`tGZYZ2J3yhZ!ilCogAbOMODfkYNISF1(*T%?r3Ply&Fk^*nURl>Kvf@4}6W%|WvE$*x`vXth)=(V-9FMkFdCnS(z&n+3 zLtoU^@Vocu-i$9{I{gywqa#C;G-UgUWEr{VYz7{t`W9m=gYnH9H>WNKlO6h_c4zP= zqHnSHJvO zJlvt!KRBo4LyG~MMX_-B{4!%*GCq~lPK=v*GoQ+Ej0QJu-Utu7914q(Ayr>8M%=Yy zq2~_PbZ>zYKULjnbKC7LmK-kJm2hfwboIyg&z`K%1ZbyE%)$?XRG&*Npjb@Y)i`Fc zo53Z98@@9x?>o4sy8LRquwVSc-)F>r(U}#ObTRC1kuKf&I~(v@nHR3bw*BX z_}7y$wzinFyY%`S`FXf8N+^V%l8f4&Vba()J6uX*(QMy=!Z9WP84k&XxAh(4{?2Wu9u73aB(5dV@fBl=^>|DNldFLNG(DTl{c9)czToKzE&BbLdcW_sH^3kl( z0*?e>4tNYUFJgMJ^CTEb$re0eyrf6?SWqD!XVE0tIrHLn+fCd2IjbL&^7PD^cZ3uDYV_pV z<5r6hKl}MFcFvqXzjO22cQXdQd6nmIUd;^J@AiaGV;?%s?uY6;3kLDW!-sbY!*O@U zaN**fS}?Azi*xT}cpE=0h8_+c{WBzHT(t;IuD(n^dy~$2tT0|j_qT{vTQN{;XFx|!2%lp| zPfa(r;9^`LM+3Wl(-of922UPc&nx+x31ec+rvoWt>R7*{k2o~;Wea(S>JN72FcA_X z_;c@ji!jZ2liwC7U$($Ou9HhbeUKXuib)SAMsqx3j&-1rIqP827I%unkAA$I=`7@0 zO;o>4r*D_Uy=bQ*GmkKh$WSfbx&CfEr(fcAXD@MjK*z&{mzl%9!_8Rwpz}P`jYs!$ zx@?DP-oJY*Gi~EZ^#3tkn~u+*j>jx42ytTvCU_@LonOZP>SKQ6@K3ybr!fToqZj$X z7#`!yI5#p*-%Kw0IB3&31^Ne`vFpL%@MInsvlA4LH)oqZA0M(sb9}Kdq+DU)l%wC- zMev+Ho*bLLmaKhVU4${CBjFJT-W$Wvh+M-W^?;}wLr2NJRxC!GhGNfRM8cYaPm;JA zzUpxg*7F|JhrQ{SVJ%|-So5hnZJxOu!ytj!XADunn4|-gQoJUQIw50>!j!7Aa9oBF zIKa1Oz4d=SbGdNx*MF-|@`JMsc;2k?2}XBb4Xhe!!Rr~&>gyt9VD3ws+S)xldLBON zN$}6;zR3fZGH^cDSISjG7E80Kg{QDqu?Amf!|XumWL{7#Pbl z&4q3VIm(0-(X!hStdrO+Am#5n04h=)7Jv12Q|>xcaV42&Xt8M%6`dM@p?)j#_GiJE zv{EhLawt^w6|s2O+v=fPe8g|nMK*XZ37x1YgB-ukYx@W4lAqfjso_U-vp10K>TqkoDRk+7lhV7+0>DMHeZ zAB6?KpGl`K9)Z&buTB5#XAG*9vt-3;7s)2XkL1IYe&s~NfYoLbf@o1d;=Ynd)O7>I z98%hyDy(+v*`#0fSp#o5R2Cil*AL1=E@<<)25*q>K0X~Zl9k@cG^r}Ymh{23pLyqA z;p#d+|Khit;QyssuKi@6`8WdvEFxD709#lh3^$@4H`w?*aT2uWU39+ihCc z{Tn$nxygV9YI1cm|2Fw7I?i}8JhOmobC+%N>G`%&aMs_aoWGPZmv49*nLJiK^Zn}G5r{qEKeDWy$Lo6uR@0F!5ier7T%G{oB% zEB>p~&8H3_GfH?gaJUo01*P32WOvFSFD5kEKJCS;t=x8I+ZtxIx%cf$p3Whc1n;_{lBwPBC6<1MSOLpx#=8vbwhzkYf7D4>yxvO{e|e7A|35+uf(k;Q3IF8TjTPzUq&r zyaX9Y#tXJ~VW4IZGGQ|5cFpTgd(Fx3rRT|qHu%uw8x5O|On8}@9LD#ybBYW4D5Lu8 zJUUI97<*>Hp>}j29Nw`lm3Nwh));h!VmRADZoC|M;oh3<+zewpZW$={XN)SQZ%X#v zyWhtyc>!hkFd;-H3hb`kOZokfvd?1*#l|pD+c79;JKOGmde8xtO%xaw;LSUhsK9Rw znv9~_f~TM@`mpraqWh)-Idnh7{QF?nUwFd1er$C6uK)n-4vX~gK^$rdNXSAX|zO~&;D zyk=31-eIAL_p$bQ(S%Xx6FlWmJwi;KwbPogncxu#_;blJa|67<`;hF{o@;WSxb8VG zbg{in*vHt{1Sq7ZkIX_#b)lO)u6<@Zdh%&k^st5gy|Tj#&R{KrdHfV_dLHHDv3M8bf+=VRtiiJ5x(knHi{S`?DD)?%caI?Qa78z7RWPjbbbm z;87iD$tk9rg}<%wj3T_wMrI@jw1yI;7tVvB@kO%tY6o`ya7XxqWf(=1Y%38_A8pJy z(qdF{{@K$P8U5_~IIwfCiLhAE`0Bs^{*PlwcQD?A!YVNu+48=O5{$abBNjgBeB=m2 zdGac`;M@wkN1iy4E7_#)sGGiUs!21?@_gfW&bRpGFv8gt6HTYjmPQDwgO#-}nmlg- z^}zn~~Vd3g_nx2N^8dvema>kL7VO^+=%$wQLGr@w|D+^Mq;6a;9z^ZKDFmb zF`!$-G9hQ^Io3k-vy3ZWeE#M15%L+0A7nTbtDM}?$4}?w%V^eZ_dHlT2o%58u5o?z zbns$}2jRYxGo!R*7@onG+x}~uxpw?Ypx8 zDKwP!bjE`4MMAwWSk0ks#%X=L^6s`sYfK-RUK>AsW&!g=^_Raztty27EMoAs#uxM- z3s_8k%2oatGAncc{*10thvWfLs#%Qo0s%Y zgs{Gih;_SM#79Ev9278>iDjWUq)*ik813w0qX&@p;%*8b|uDtpZJq6Rfum3^L?i(SJ?%wXu$!k}4{_3xOm683E%CPgNMb_%~KmW)7 zxbvU?>wnq#hu{6Ke*3Dv_HyU%{@=gfx%%z3;b%K3>@XnnKGeQ;cvzJ1A^XTd9^~|j z1IZHepOFVxgX}CFS*kv0OOE+BJ6_T|-diZdL($Fd1a0{KMRK${a~7a~oXcqayFdJK zjCOP~^syMi3-@dxn4T0b*1Jh+z}Dzrs%itb>{Re6TS zqZvw#XY?V@^mp}S=zX_q?Wz*XU6?>JhHgO4{8T7B`sF*0ZOJ=)H|HmmSAJvfoWW3= zTZ|C0g^}9x$I*ll_sTDRT?oM6OrPcj{XBfn=4~!alO3hPI9Uv%QwT}I*!%eLgW<~~ zEry-Wh$bvWL^}!S8RW>`eTB@4j^wUB$dFFw)Yf$Q$Hgx9>}7m% z^Vap@Q3i7%AL)3;bn?#Pjd`dAF8qC}J}unJF&^dg|Nh}`{pQC)+z8PozWXDG#ik4E zqjaEezx!tT=8>Ew4z~k`E@)93e=$BYxF0R_*YHr~;DOIBUYc0$w_C{JB*Ty{Jj>qd zJoOJJ^72V-Pi8DH@k2A7GSn~Q{()KC;oL)J;f)K)UbHdpy?owWCfM5RAn<$(_@T)^4X5y#G(e&)CIyA=o2T92k zF=c{Snc+BCZD37ZpkDb4TWPis%Q5-k0&g&sBSY`POa4Um+$&zXElfp73oiOAVH&ft zb~l)oxF-OhjZ~#z>D4Y%*7ltw=%Xym;Q8FFx=umC@F-z62?ufM!QRj)L&%g^5eA)s zCl5`+XK?7Z`{5<^+a{7uvC2iyx%Xh@UExGFY2;Cd!Ex1a@_Me^;Lp>+J6Ou?)jaDL zmP5l$+k!MzF2%IN08>w2KWeo}w5fk72eoAc?hYGGdX<+}!7p3^a=%Y3Y5396dN2V z=6(?KSh@vYol#=i4tf$#oywtaO}f-5-rurJUAOheZWGq{;jlXzM+x6HvCt=S%`$Q;`#%xft5l(TM$-h+crp&_8P?M1LWvd z8@ebOZQ`QdfIetG{b`lj-+8z}#a-WUg4Mj!7U8-@W$3Z# zpVAKH;iE05ybZvvw_|}PL;fwAgVVOG1!exCwa1%uQ@&gG(GyN4WR1#`2!<VdRzt}-dgWcV1%>aMRwtn^RtTo>;(@R-pPrzXkZ*N zk)$*Mw&X)C9Nxk(Fmk_qpBGmpGb;U<(Urn9Ar~m7)$?2jc!>i`d7-Qx%OFquiI2#8 z<*Pq`GX^vY?9(Rkhw^&hrDF0t1^|2=BnI5)O(0jOgq`v55pVR;g-e@M5v;Rsm2p;duKt z<#TAC*G>l9&o5mbd@0Q2?As>t>USWT;vq4t87*v)C;zmQzBLO3!DTSlz6?;5<9qk+ zk0FRB%&z1;TuhoKu4am|6Hk9M%niH-cW`n<}3V+(tRoV-u) z7%%2?O+5DPYb#~+I&^@CFGF6Fr0PteULJ=?nnzIe)^|>2Bzj&9=Y8)o?8XP4n}jj^ z@ES8=XJAw}6Is0f?Ed|Lm|(x@P|gc!a`m);p)s%hSpWz&!_|W(I6vHfIBjwy?;zo0 z_|RFHx}R6INvOW`Aj6?h5WI=Cxx7M1D90qiHvD?uOb8h@c~lvn@Qu1L80nv5Fz)@) zQx-K==#}Mh6(8!q2_$i#d6?scnd}zQKqv@h=@&od(SG)9@nDDftIyXK zyMtp9=~1Rl3%ALMAuxk+xEI254b%9-9nRhW-VQ3-PFsAwT`p_UWE6W}*e- zw7G@F66NZ5GpDHR<(+MTtiF!kOJegF-NbWT^)8< zp8YiA?SY+t`0ATEwDyZ)isM)7-?a$naIr#pSOm)eWkIy0!F6Ds-7@v*a5aW~(!%PqVE*;5f3f+?y{PVZdPIf8bt3>5`pUl(GUxwSQ`?q&~Q!Mh6jVng*a~Z-IAtzKz z<(%x$=?BG4zw-Iz>CgDkxbY;pajP%@FK5jnc;mq;ZnCf{85jCozj1x%v%)MfZrsYq zPBt^R9nYX)!q0GKJX?CJ_RN5haphv3-aPE2Fx|jsX1?JUc=j=k;k<4mr>3oxHVkK$xV*#e%jYgDm)v*Qxl-c$1zX zG?=gzLZ29ap0p@pOu?PT;~yX2OxDm5)($grMqRY=vv9QQfkEqGhC!h=$k9(e{bbr7 z&zY|=^pgAc@7*4mHy+WmIBb5h^c#Ise|Xp;pjhY@To{7%&oQE<^qeX_?V;##@7}$} zgy<|WF#Yr!GTztqlW*5ty z7W&U+P`0ya*T|4!tQX3_0>IDy>X%b*UYQJ*jOOp6v98P@q>udcp~cjUJNn3x4%fB7 zp&jXMvvV){k(BhZs+*i>;TKL-WKU@rNbfwjm*@J$;Vp(KhI#aU^T9$ENS0wfy7E#T%BX)hJ!S$wZPbg!}Yvkv@w znt_~5IFQV==$a%PS}fhn_`CE7y36i8=od}mVWAx8nj^o1Yk?0f^f&D{Gzl9=M8`d|xN!faR|Kbwaw zy_tS#r`OpQ2pC(#VCb{e_ne_|`_{E~4jNA|0gdyfHU3fXhuU{4hAn;Y{2Z(rz#)QX-7^t7VJF#pdJlE#?++B z)M?+|E|K`- zWPQV!w6*(LU|Am9ytiAxOLiN-wA-QN^$|L555E2WyLhaAffvG;!-9DO`9*#{N`8Pt zSJJ0;@6j$BeIh=6UHW~&(r^4bb>j5SpZ@&S&bNR1YTAuFv#4|U$jO~gFI-NKDh_yc zd2qkbP;`p;nf@e93chEcx2whipuRifV|{{GIr^c={^Wu%JntH_ILkcCgC6}F-RraI zMe(J@1+=6?4GrV3_vPKQmoYj$FJ8TN_0Q>}Lhe~y57TJywuMk*0NDyAoxp6;E=e-n zu1xx)G(W~4bk?6rM~`^Z&aHRBIW&v9%7|xbFW%ip(xdiAUyd=_;Lh#ugDG6g!Ol4N zB$;GvfA^N;+oY?kvI^m)E3thp2r~XTy+tYI&dJ|bZI%^Aj^edst zX7#&MBLDzE07*naRNm{r+2~}Xh=iSs6v0MWRf8Un;U`AGfXSLs6c}CBq+e$-h46Bd zM*b%03iG%cE<;lQbn`hsqdFbXad6 zZVaxNMmPbutIOoyK;oICwcrhBk~Y@GpJhi7^==vMB~T4(6NYL%4-Z3e5J8)ODPq}Kni2tFV_p7n*t#H_Dlcre)Z zTt}R=l25`7ZeHw$>OMn|IWN=olqUIhT--Oeo7+MB1eAWZNyRNTo z%U>OKLu+8Bulil|mJg0=;b2>to=Mwh`U$k$grl@`(}USc*8@e89AHklDyc1G+NM_@ zx0`3vCzLCJG*gCrrGq0n>-Y0gCdF=k0huy9l^@>Ga@z+a3r-k?8+t@zeQW3&{1gSS zcH^JEi&ufvF8!jXwBfKkRaT1(J}Rpp4b7*Zbv+ruOa7@trK{G|TH2m%arS(*13cDe zAdP~tnHW#{lJyUap+op>+o}hBDi0m{Q4;w(DXSl)QaoS~2A@gq&%#Q+f#r6?6Z2Mk z_D^3Nx-^jCCouFElo&cqsvf{|ly;yu_W<>o($NYXOQ)#R)P7xUzFxai55H6IuHgp{ z$vp3CEbx5Vd-G!Y^}Lm8!EiO{ls)PH@@kqw9RVDiDh01iUM0HpH!L@8ykv(LVp4gv%+5w4e^)*Rn2zvR1LdmNkBU{QE zngAr8DW!K}a#khYY+?`Ysoa@7ra+<3QIk;<&+^Y!#+1Ih_ljwoaYV=#2XdOQ@LGM8 ztL~{&$1|{YNKi%$jlMrGUEBJfR^Cji=pIwvHuidPlHb@8*u*A9ZwxjyI?o-7GJR-0 zTUG7d7m*d)JE`DcUxWY@`F5iB!agIDnmXTXCbF2E961m zn$g)7czEM^i!CLcn2!uKyAEeOsz2QRq3z3g>8R^*o}AT-5zT}WO%KLfp;Vt7FwQ&(QZ7-Yi_1@K`uuV7Ci}+|ln$ zmV6ju@v-*M_kW6BWTG%8_(rG$leIm2^5%-pcx!7|jN2xhj81Bjt4)0T@=h{Q)Zd;( z6K%Kez-Yy@%i+Lgfmz?v@4esZ7`6qQu#=TMy6SZCx>(eO6)GPb^j{Tzn&;H<3>o@_ z;9;rx_wBfO072~Z?q_&}Mxd_@d8#kNg>W1U*vnfnS*=aT%XkJ~M7o)j z3!`P1%i-Gm!sUyNXN+u5c5dJNZsZnE%R3q0&gG?cu23;zgFkJ|Ak7&Bz_gHv9t=^& zsVcp5WPd0Wp!cnc@=FPNXL+IDvw>pAltzi zejPf;C_CX4GI9e?ZZI@6FrWoEm+C{`=b=mzrSHEdD+NV3`Ya@#T@1rAQ_<_a^ctEP`W6-A)oH&g)^{=%{jSgTT z>u52C8L0N>mHVVn9C#i-qb<3>uqdwo=qP#ZziaUbNy+QgA5e`g1^w*kiRZOj`PppH z6+k;Dev0=FGRSw$&^F@^ibunXmw%S=_4?$cci@c)TaoSLLx%W$?LfJ4?ayP(+q>^% z-j*%C6jqDl1YPVyy!^JlY7v8c{PGvS$bg+kbp2y?IaP0QmeJ5y#TfdbzWV%m=YM46 zvSS7xUisNClGDYl{^8~<9vm*r#;#}}Zt<62e7W=Q|Lx<(t=Bt0q|1Hx?Kguv-cVNo8u2I{VeK-DZo!DSLq|G> zwh;yeT#NeB30pC{ACe(6wwIjIo32219?RqQaSI@v3C2q`IZPJptG&pjJMr16`k%3x zLkfPLcB&5J(H}3I%K_kVq1V7VW2Xaop|7U<{m>zvqq8MvI6jc?^k%v*UgWLKNG_i9 zxniC#-Li$+U>xtT)kDdgLoImkTr~j_gP_s{0(+ zUVMGV+o#27|K0EIcV5i3((Z1YEL2YT3omlsISuLU`;#{>bM_%)j6p9tg9**WyVnmL zrg`Pc&ss43ap&5#8$&zi1H5|QqENeyjLF9{#FM4^C0xl2VQf^|;uD_P_FJ;l?#ZLI zo4#XL13g`P@Eq6nc#Kl&;d=9NAU zqqfLM2eSC2|G$kd7`Tn)Rc-ng*~W-%45qX0i4W;Qubb1oOeT|^cKg0f##^Y=m-Jh| zXIr2n8!WUtWYw7aG~RMRJGjccd9yRPHo7=kn=7E@hj6Y*Oh6kHH;uBLG?NQq;ZOfU z%>x)?DIDv1q;P3!G?VXT1T1yTR?GE2A=Tvjr{Wb3Gj8(%FovYc2CA}^F@~o);0U>a z-OD;lT+>AYOPN(*UikxOXbfI(o!inWzX=lq`CR+e8Jg6G>remU9girTByQQ{t}Zjs zbw38UFf&P>sv871AGDFD2aFeR2M1$rKSQF)uyW120J)pGM5FQK4acS2Ru}bDXVp_j z@cLH5{DNW96oB6Moick@(gk>VTfle0Liw(TM$${?@A^(z18egLa+`NU=y2E?Tf7?D zYqoG4^t1uxz|A~Zi3a*4US&>QJgoiH-Pi5ZWAi&j51y;e@IceQ&_?n}HTmQndQ7>} zfj>|IPJaMLnpKWsyw%aXpEjL8-O4xr%QL(n%|IEl&b4~iW`p?#j|HjcU2Kb4ahn$} z%%nzpZpPZF54g&U7TYq1Ch_J%3f}n8zple^ct^idh(f2b{q$M|Lu=L3^9193r1RO- zZLTLBep0qUu39-of^QT^aMF9tz{uk?6!BeMoB;+1zG zgIRyqZq7WIcG)PR6&&ovXSKG2n1=B~{YeRBKIz<7rq3@OlDzsd^ zECHH&51#=s^zTmJnSotOX`~MyMS(kIR{Ey8^L}1{({r+O^HLUg&c6ZWiBC7vpq1}A zyf)>d(bQjE)O&c-j{?Egx4Z9;bYnP~cff+%ug??}4Efj123}7#d_VGq5qIjirG(4_ zet`lqSVJ$*y_|AZLEdd?GkiL{Ad$4C*}Pw}apm`X=!i}d^h+7^nTfCVtPO_Gfarc? zXz%44T2-$_+YKHIvjwL|A2u@3+u@e#$OuT;nf`&!{ZkipT0E#Ob2qsbZe5lFN?^fV zd-^|jTR7eTXOXDC3@TtQ`pem;dQ#5*@2{@!+-+iJr^N9L7rgQ)rI>+77f&#rmW;=M zjv=StBPoo;8IR86DQ*-ahF0-mD{Pb)At4yaDV!#_i~;fs!NT}uYxC8-3{7BO&V(x| z)q9GJGPMK6$#_r=+UB08@6imr15dpuhd;Hw{zVfF21Uku$~eWkZxb|OBp8EDgoH=X zr_P;ig3>i3)9`+{2^V5g$B@8dogw)0#m~k7V4}LxGgg~W#`{CprK*f<4Zg{@wqclK ztTS01QQR-`P3*&D`qQ=v-WQ{`-&Y+jB$4)^*z;^s2L=(7P!nVxsPc|+y!VtllR1-w z=S|jkXRI;_;4w=tHr=Q^Mos1X6mAU7CXBP)_U4Ux;#zDO14VpnVo1F4EPdHM z&&{KZNa4GTk__Z)OSM*29=>BN)c^4*Jb0nv4+b#CLcGV¯yFfeNKKV7{#@x@Jc z^f(4BlWn|df&{L1z)vP^n#rQnOv1yDLT~cM^I6(Ynk+kz^>l|IX&=T66C1eLs{N;L zzTLTc@NraC?*mP&#>*|F_GA=U zZS`#Wg5462;|DSA9~E*&*qK)?CI}mYr+k%#t~>%6@fe>2JNTKTCv`mwq;f;_17+v!Jv!dO1gd@z=b>I~mCW zb%qPCSO#Su$w(p0F zK0>Daum9WszH|L%p+3T$aqUI@_GaG3LZ*C(m+_b9Cgc`99_B^P8&Nwm1fFXL)+{W< zpTfV)m{7Y}WG1igwV-r7-02hUe{4aqmTLFSC*jqCSO&per8R!55Lr#G9!`ES3h^|( zfA3DulSlE37{xqOXYnmseP1kH;Vg`W3@;8E1b6k%eTA2K(D+{L*kp{*W%!Yt<$=i% z_%Fkkidk%&!#}eNDx<^G!Y(ix#&E+M3?fJSS`^Tq&07yf)9p> zKs{URcROnA(y^n(*y1qHH4jy$^CB#;Ejj^)XNT#|LH&ckLh8jYLmiRv=B54;qZv>@zAq&ItiP=n5`d;u^0uDA^o5J zbGSvm6OE%huGdbXC=v0=*<~PayqJEv48q%agx@iyMf;_1Fiy5Zue@;GE=1NMmEG7< zJM+*Lt^?mbxZeTk=}pEa2FQ~c$LY80OrDJFwGA1;;HS^Qnc>`S6Z*xMKl{b-qJ<2L z`EP3T#}-)Pef&WdJ1~|2)C-N-yu*!m=Q0{QP!#+}5A)`4N5HWYXTy8Nb#~}!Ao>{l z>0{}1)eCQ$S9mp!WL*h^GbufczsPvJjAr6T3voflTR?i<0*d#Hs-E*erZbKn6iw_r zV0^bj4gD?LnsXRq=`IcucTJaNY!xbo!TMlw4-ES1ql~!UUcWYDmIE7i7c%H|Jn$jD ztb>f~pU^JRg=}2(Z(PuK!cE16D>>O>6dA1U=Dx!0iDfRP{F!zNoxgBt=WK@xTX-}M zTFe%+dl{t>Xw7Tzsxw87*LHUbL(=7pzs5X$%+4SDt6bxsGZYy5OO`xd`uQyGPAHHS zW`;cDmG5H?Vn>ZdDq|Su6XPX4np|g))}Lq$;FA*;C=Rv&e7wG6*OD+=yQAxy#_4Ct zS35^J3TSEa{qxILb}oH(d1T$mjPe$F9E!WYG28h(z%_jzN?&wu{Q#wnp^3a?k& zn*Y&x(Edbt(l^xmQFuP#oDnSHJIO8!4RnmV#XP^I4XZyL%)DhS1T8)`Zjo;yEk6@1EONp}=J2mQ9XnFX$cEub6s z9B`b!R*k1=y3o;Q0QNBQYB0ieJ~ie%n0LLcfH7^$%0K{HK%~D?qK57H1xw>E&YyO*Mo(&Id7Yd|Q;MApBY}2T7^jS10TODEprkm3D2b|%NI9IDKeOITRjO=(3U~5w=?u0Gn+XF&EI&4ym+243GTH^_89saIVj-PCtDe3KmBL!Siv;4ruVH6fT%V zOFr<&Fo>SZVn6AoPjBlA{yc^5HZO!WxGqS5t-J#VG7F~i@xy{q8d*UKKgx(YEV;Fq zd3bEfESdhF0N4du zK*~2o4$rtMg@qJ#8u%;U@MYON1bQ+}TS)X5{HDH=O@h@f3l((%J=h=fZ!VN2(cp&$ z8$3%p{Y@@uHW{bHp`HAH`M4Wpw%;YU7dI+$=n?L-g?}E%H!muq-?>#D3>S`TU|glH z_t8o_Nj_QnGxz;YI}ZlET6hhl(AF&{`4FP3t$f*JbGu0~ZK!{(+R7CzfYV=;F`yS+ z+^li6hYBH!)FpsZ_ArwSa9*%XCK(gdQ8_-Vo|P!crVK5x7O<3`VJ{`*?>cNvF3OT; z3_Uzf$FsPyt-8-P=^DX=kK)2JgQuqSJXB4xD72J1Awg_MpTnC{4EH4?c?0qO0OwdC zM4-gLI|j*aD8AO94yQ~rGQd+f4hFx&$pnhc@nglSEB@?wXICE+sRO(7^2^Zv_4l`W zp8+Jp8RHL{@^Ygvfi=dW${EGIiOMG#LC&e0~T(q!vl zu^r#MJGyhBEsPXe2HSfN?=@+8JQK`muZ$h{Gy1#>50g%@D~}di5{~G!y94yVLeu5x zFXT(HDl=Lf&4^`NIh=Wpn{?TdiXMOX`p@NM9Bf-|_$)FAaY1I`b#0&Ff}=&Wvp8^q z;i0}Vw2UuyzpkCa>0PuiaiFigtM0t27^uK^7^IKMIRnA-zLem-8BTax`kpPOD5J{5 zyttRAecn&yK}VC-#~Fh4gRj568ZMPlnQt>@jF)Y^hKJExd!Y6041_i)gHDUOVmUmn>Ueqb7VA-5JoM z3w2=RX#I8MNb-~+nXwO#p(A67MKdx)TQlm67h4CvF&r49@iq@Nv{N6v@Y6UCRY`b6 zA3Hz}CeQW%(Hr|!N4q>2Kj<#PTg;A=V3L_9Iy4i0!b@2k!7FHlm&lWdMwI$Hw6=Ic zt{v(S;^E=QXlIdy025>~Mc+7eqHWlpTDI zm%ogE+x-+T>xcL2FO2JB@R`0)CBtvLV9~O12*2{|$NOL8$?T9p?Lj`8yg%>E4@M)0 z1@CXA*lr}eX91frnX#7F-=mD~)V%xme#qPIabxhiF+Rh4SBus9%otls!=S^M`DtUo zljID3aQ+4F$V-~5g3spT<&4~VK9<9dN`U20|W3|=!17|Zx{}}%FMy?ChrJp?u?s$rx zZoxiT;qXEAC0p^}yTXor=(lLi)2`?_{iPjl7DZpbY*&VZS_{{5u|r#haB+R+ti|&E zJKz5K&to*X{P~xKbIRzQJhpJ9Y({bS`Y^2 z;KQje6pAvvx_9A4L5QmnH8SKFc&0oc=4% zvD)`kb)mPv-537R{C-W4a~xol<_NK`vhgfE zYP^oyW%#u5fpdd4JzgAbAwtM|2cvWDu*34JKmOx*$y)>( zFKcspkoJRj3m1irVl<6K;@8h+k<=MGN8^9vxY}xm(QE5_V)|R?5bBK)onh7(I=W^s z*Zh%FLAxbhIOsg5mh!Ce-}n{3CpTxDs!;vPnHyl>L1R-56RjGX$cK2wqN@AD!M7M> z5tHn^bL;x>jT`cXOgdGJbTXNdxz?=hGD34ws6SeSF*mw=>0+Mk*GAU!cE_t^O_Cy- zy-EH(**V(c#F>oaK3l^(-Q>*q!kx5m%kklP3oz$fJg^gq&U@xmUfZ=h{mPhad}jQA z)?7gUFz%uA+DQ{{#}iAh$oSntxme|PpO}BjPs&+Ps2a(tS955s@x{DU7|Bo`cz9af z!M6BpafQAB*6EXH2ZvYj5FG>#hZ_gd*EaL-AJxj<`0oT0<*cR4e9C_y%k3;ZFf(R&3DROs5%__go zz;L~tlJ+PeH+dP*T#;0XA6fwO&!35(}FDh zlszC8oB>p>h04N9+2|*|+7OCpG-!gS*#51&3Y8ue%A+uX{o_B+tS(Is8M~J!dIe}# zTbm|x-Oco=L`N+8E(odou(QCikQ;wm1brh6#I5ULS#WU&a!Cq>KD>k9G=%phx{N03}>T@m()VfDQ`` z&~vkap)3M?iXB+-#?ZHPK45R^J-7YYPKcZwaMJ{(Vh(cn|x?E-bj;Dn(&gQf4dT7Q@(PiI&gBXs`|@m!q7u#j4NNx!70BRg(|H6JFV?Ti$rrrw?$t2|2XJKQI|#4@FPLx} zp72V3D5=Q&N6RJar1#h#Wv=UR0Y9Gn_}Szg=>AN3-mXl;$KZIf`9;fN3G}YK;qU6? z^%lfOI6WBJSLUkwN;U0da543qzTNZ5 z6KdoO*wa4!Zm)-?`a*e< zHy|c+(CfRRzUk#0YJzVr3n4$?`+Fi`PyV4M*{^-Qt%7@uAi zpO|8!uDmFkl8)kWAmxMN$M7}EXZJ5g5zQD4M>H)D%I;5zqEHIiz(}rHpLEEZ$-tq5 zl=Vi#`mVTjKkeduvTwWv_uPLnRd;CUu?!p+&!0|t&da+=;^-aGi~)(l{p$5Hy2JH( ziY9MNN)x4xVr?Qtw_z}PRQ(u-4|OeWDn)*~jqzYc9^)eg@?jHp3cX3G356KG*}uV(poG29*@kLA+zeu{EHAC9v&VZ?j9bEk=kaFf^IE*yM*~!5EH^JdbVDE z1V4&D3j&K<<-?!F+LHu4-r5Wdya%;UTu{ngVK{`qAXxhwfx3#dr|O3O#&fLtEjTHm z^!s?)THvL8`SSJG-_AmeauX6>w8%j_JOn8o=Wx()%HvZUU0X|q_gFEQ!&$KF8zcE(b;5}} z7ISv>Jxo!!zc%bGERJ>)n0akM*t)MR_e# zJn@jdS-YVllz9Qe70Qym9X4 z#Y34V7BJcjZP70ptlg!vK%K*k!ZA2rr-(V87r%uDFJ6lkaJ6x{u3GtywV2|~WLxb0 zM=7maSQ#hdS<_gO$Kv;H2k}`LsYfhma7nkn1@@ss;zc^7D^KD2PZ$IXj=SxcKrYfZZa~IC~Ua7U0H*G2r=wlt4UJJ$ti1WoQd<%C(oTgjGVujCFn~dG#aZcKzHl z3o>ou=|4)6yj3j>wF#ckn}sp_=%B@`mJ)%joiaiQk0;@lB3^%>xsN{hAaCfqvtY!> zM)}d0fHTT%b&P^!I8U@5jPj3SEI85q$W)C39{%Pcu!94f(rWvj4h*d44b}1nXV0Fb zup+BaqA_&wf&`ZZ<&4kzh$6$dM~D0K-ZRGSZlFXF_geYq&z@=wrlfA2Gx-|TJg!gX zsV+wD$-GJR1^(=OfM?Hd&3wY3LMieloR+b6Qi`N(h@L4$kE9HF*}KDs%q4cmP-3o7 zJhP(!o~w&eo05X!?OF#!GKiecOZM2&(l?%r6X;`jG{*5f@xmE_^!3TM-t%M@-sqDL zJ`9FKJJ)`^K4X1$aztM|*YHIKk`v+M;K6oOMCW(zJ>L1h{{A1^fpj6I=aZc;zxg)h zPq&80YJlh!t4%RnM6d0Syi~IWh*-w7H z^UHtu`|2$mO8xu1JZCmLzZj&IeL5vIe!e|F15ZIGc$NO4fTFB&kg78fM$uLOI#}+* zk3XF;c%#rwXo;eVqKER#2VKI!(PG4+0r9JkW?ViNEu*V3P{xBOR4BKf*MAQ>KjK;O z%Re~x;hJ|E^WPMnN_=tS|3Lk^x3PI9KET6N9l}kVOc`_T+?DEnvh%7?AhR^{H@qnBhn)AN+;)|URJ}R6< zUcGjnj54(GvpbobF^bnPBguoiOPPA>#*d|Wx^p8#=)*ka5A81&e0_^9&21+-=+84d!% zeSH7jHzO}Qc>ThqE6IY%%la{6vH0^NS7~Scjn|_Y4k!;E+$mo>9$E`9RGkQKkCPux zCQphPj;@DKH)dWmz{v8?o3rtKAI2^Y3nr7gs~;;J&tFW8^;7XHYmc=Ps&&RQJ3xiT zF=yNDK^}uY_zulz%>~{NO`#=n!<*(w22lrVlFuAAY2LT<03DjA(J9`fOd*RHSnV32 z948}+3C`nN2%nqLlV|uD*@vuDl|!?}`NQb`K(dsOh&MCh9BSUR`vaZ+xq(|I>*W(E z@0}N*eg+eBBwjm?2#s}Nsqiv$j>BJhaqo_?+)2hW#)L(>b1P#}a7+lg(vsbT)Vp0j zlR>U!teUVcjR)&W;Z`WW@i$|P!75ZtJ!A+5B)sMipMMrka-?b90%uQ~iw3rG&e|dz zzmC55WxRxQ-uMiOdnpo`IptGEIl~au(#6e(#816C*o180Cp{Q$XK1cYsl( z?=JSZ^z(@jhVvnwDx9ZNZ>@2^smSwDHbjK)Dpx=qFttc05gHu8iK&yT?}7SV{qubC zl|+Vj4VALLxnFR3vJ}?Jn76Ck4O~O8Q|YSD{dxrn+MqmDue{T=zJ|ELCy7}`8RcDd zxc4Vd`KQdTfYRew@DM=>6x^4IPWnEjojP56)niPFx4yDQx#OLJV(K2?yj(ai zflS@%joCQ}ZYi@QQQqp$nIL*UmA%Ug?)8LXN8PhSpzGDj-T`3%0H_ZDJls6)SJ@`2 z)y{JEdNKNyQtD~SyS4;puni6j1n>JBV5a!$ugrO>?hTI6mS?`D-QKV3p7nl9CZk*{ z< zcVlSk>T8pA+V6TLF1?%n2G$Ke*6p+rK-=W12c=d?x$xf=>2KgDfy8}=PV1Z1k4jnP zj6&CVnZD>*3cWc0%2Wl)jbm0O)(VH(rd)ZV@w~=OI#d7jg_TQb@WA15>Q*lx%8D`T z&uQm8nrH8nvC>IAFt{nrJe!_c%~7s#p!LDEq0(ui#4~>DvpJx5ngTba+B9qWWbn0k zFop9}Qd!Dd6@smQLv*0%dCh`x#iVAO@>~OI#^AhJFi$yCmby1EH5m^^B^kO%=p0;D zN_bK4yjSK1j#V1iSNj)iQ|qLa6kscz@6st#5{OeyxKN$j^~~cQ)Nk+VDH--GpRDt} zp$T`Jk~eLUVhd~6Q|^Lq(cq>8G~%+lCKQ)TeFl;GV=`{*8w%=OuK+smDqG>I=wJMH z+R;7uN2wm2__vx`8SlznFlY?gT~(n&w7OtebIugLd8Q_x>Z}|ktlH0i*ykNgxl9nc_vl_i;jskar3BMzTlBRYoCLG`34VKPEH z$M~#QW})!y)vNWJShMDl=GETS{BV6YI9qVkClsL-(?MhxI|%8=u9l(Wg#})VVsS-1 zgA+uZM;UdW^VdWWUi=4`>r^LBURmx9C==A{sBE{>ikf?u?NIT5W;7COL-3`|MQ+mSNC z*oPbZg8_?NWh^~UAwnrp)ohgiO!*{~} z!3-u2eB@EjQ&xTpPKr^E7&?ZN`B#j5v&1j4~E`#t3Dd1VTJe$eJ&0F|PAx%y>V_nc$*)rZ_orWW^;F zmzLu4Q44#tkM__r1t%pqo@|j%DPcE_Uw9mvuYR3lArB7Te)m!q@Fe3sjbWod<>D{+ zK)3TpF~W<^uA8lq)FPW_BR<7&K$%ME_NImY^IdRwpcu{PcmDY0m(^;VhPQT>9Lmet zft|Y=>y&TW2&39!et1}&4^k%MX)hk7xPzbCJE1+gW;`2NtaL}CQ(=Za`|6vWT}M;! zRhIeJ#{$>b7a~9&3K_~bM$x0E8mr;;Uf!4DT=V|BeBs>A`9evcGfKr3Vb0 zqn*F{`H7u}jUk5?KY1K)3HSBVcrsIz>I8~cmuHm}(y8R+yGhMDNK!0~SJ47Gg!5>_?GUM<`?KICmjz%9<{}^X$KUsrMVa!^zQT;fD?nJ(2Umz6?V6qfj2ZI$#*0%{SlXy?o`0 zL%A=^+{`QcVB=+XyF9-C{%X5m?xgfRzi2leMyZLP>^^vW=fC-H|GQv{hc%8)7BXx? zn9!hUs?t{nrnL?dG(#N$q8_vj| zXhR>?%kzGfQPI_NFxoRty<@a`@ACT@iiH)KrNGR0b{D)>X7mZJN6Dr8qH{6|8Si}b zWe$CitibF2QQo`Hqw61U{4o5;x)ZOr3vQHp`n~#u3N%-v0O3dO<}q!@)|oTs+dXz` z+WC@_GWZ!CpS3egJItE^L@)R}zC_VHMpz2w=)rsnM>7B_-#wVgA=b@%GK$LYL)M^p zpZTfs(A16V--o;6ipST4tp#lYqkE5!T<{{rxE&)5 zWag$jw{Ona+V>(Ex$?-Z!m^N0Za=!-P+D^{`EF0nk&KN`;#>G6qtVg$9llP6q|6?l5cf*3?%T z=v#(JM$#9>FLyoyUM##NdEFW#<8nCV;ru*V^l5ON$TNC&g;mzb;@|`O54H0*^7e?=`2B9AYT4t5XOR_ix@hbAdwn z+3Xs#!&l!M1HAGba4ejXIq7D*mS+80>v>~8iC*!QNO_dfc5I$$M-Y4popUaabaBPO zWZXoq!^a;^c`tWPWjuP^juE>l?>BBnJ_*NH-v4mu>!~r1CX){-%cGL|Cpphr<|zZ% zo}GXCr(caj!=2`TJLT@TbJ94$1_HM0)(y>376GzMzZymC@Wm| zF}(U<9GhRAH;UAr^jMNf{Ki5JCMF?fA)QRVBBnm2=%3Dar$b#6i!ldpS0{T%*R;H$ zOFCK(1i3ED+NO0Iq@yhE8Y@H5 ze^&lf)ALP(Wtciv3G$Sp8Fb19NO>)bCg0|M1K=jFEEqv$%DPfcIi4-tD5rK!TNK%I zdFpLCPBZDDMa`Ffm|m&50GPz3T`*HV`Yhqv#bjSKM(_hu zNvd`}Q+(|Q7noJ?u1!;4&%sB^kl3^B^FO8Rg+e#4Hc404ZEkuw?N-L7?gd28F%1uu z>vHPw2yQm%hwkeq6GMY*k`F-L!$El_sXPP729~*(T*W?Jbt#MVbK}{%S$ODr(s;kA zV`yK4Bn;Qnnt{7=jHi(!)v}=-&o+7Irvg3M)`#YN1}E>vV=1N|s$*cN7J$ygE_HcI zyA-ZHuS63PT`|Tr4XTKRiQb{NRnXK8d;)>CZ7Ogf!zQmX|MWpa7}V4;9Rs*k&+1$K zKs?#x+u&uD?OBk3$K~AiHFc|F!<|O~7%y95{k`i^J)&UU@rteF1I8g@@j(1e3-~7w|Bz0Jus+i|BT| z&k-n^oBpaza8qLzZpt|Cw88VB>k__aE&@Y;t3J>BgVV}becQE{o^4ZZua(=&#pAr6 zge9$xZGkJr)Z+D&>2AZ*q#HQr*`g^x=(SaxXI)R*7Cxrvsi6A=nCY+5OfF*pPJ?mU zI3>wnvQ^o@yRhh6uKrc%7PF{c|7zwmb?P68=E1^mb(C*Ya6kH@3~)Sn*}SiSDSHYk z8Q`WZ1MQ@p_9!W#;y2%1Paw`~DdA;85hOfNGFc=adz%-BVk^tGbIKU>OPD1r5`29K ztb}adN)$s$G6`#xMhTG#Zl`wVaY#t5oQWlua6-8?3X23Xcv-;V7vFp}uEijR!&=4SpEB%3LhTq7TI@3@pf!p=LUI!RC@fy4=rsk|w)!%oguE2w7I8dB zu|j3s$n%^c{oJ`T3A-K4SGwPS_W20b78tKz3S;qX=jT89$t(=NOYvc$^>s>I@y!X_ zV#87>Q!I}sF8LmU8s+**puw$85C>N8|01 zF#Psy-iCwEJVK`}E8eN!(wxP|HJiQ%CoeE!UuUNQeIpOUlu+3ucgl{#(3&c=-EDNG3YQRboAmy zwA^?xoYC7T8FWfaorv?pWeQJi{U=X*Ozj6 ztxw1k?XJFkd+iv3Lkr5%^kp$>`>=!VwTp7~$mt`+H5JZg8J}K7y_3D2!VPFlseb4yVrq^yf)%dkm!nYlM$UB)*4{Zh3(4_07 zbaz$+#fGpFDW1y@KDK&PXKFY)x83@5+*AipjD7w359CA;Q{crx}ui_yYZQ^TSpsd&L6u&&pW=ppF@ZsEv zI~n85KMY#pOj8UBC33tmb}F952rL8GMd zhwsHtPYMk)_-J?75J*OYlZ6nd%C9O+<{bb4KmbWZK~y{3^YWDsXFd@sOB=UaAewD6kWVK_YG0C^L)^yEBd+KeuzLIhw)+ zKd{>a?TJTDNoKBiQ9nFRnY23(=Bqz^-@M;0oaP3efav!@N@BDwTnm()$RK$A$8S^U zemnCrnG{_Z2!~RjpGXmi$K3q>i~4BoFxd7Bg%kz3KBB-on(`H__@;wg?do8#I#k=A z1PA3CL+#nhq>MIa?MXMlYnjsNc*YyZ_=o@ae~;nxb~yU9T_BX?vom<3c$r-v#!HEP zQxs7~jVw`G{2q)B*S-_azSAy}^UbUH-|hP=uC#CzX!XSYVCbLmIU&D-pZEQYBYjXh z{0r{jMZZ34Zn@Vue2`KKJy0Yvz(21)c$9zk#aE-WJI3%?+8=H{-npJa>0IFz@Tj-# z2;r=t4rQXD-~QpVXz^}yLGw!e>r52NPlu-d^?&v^J0E=baSE=Nc}ah}bLr#1%-cRA zN<8Uh3dP~4jbHKCh0{1vKfG+dy_BqXCwYultlc^kiEV%@9YyfLo&WRy`1?Zj*i}dg zUAVCD?O<1mQaHua^~cbBv_i>?z7JF$x+OPSH<<6V^X2XggUtu%YW^x0pBZCsdsDmLceA8(^nwbSlZ^PU}8)*Jet=Px|iDMmkXJH;mB*!{eh zDJf4Eip7}_>axp47#yC%H)cd(q#)lZ<^U9N7y-tYm@%9d_$*bVivKrY*dOuNA&y8Eqdu zxfk6YDPF!q7oY5W@%isFj$H|k=CgD;c<~rCnkTi%{CV)yTE~#<%r|#$7iy)KM>F6Z zZZ`sjy0Avz43%?erFIB8##v@ov3AflW@dbapGWPsBa>3X56zWt@L5lrr^LaAFk=x6 zo(-M#Wo|{+F#dN$=e8JW_1cGQKBJGbr(jj7+v5tDoj?}ZuC?`RyJ4l{yA*AVJ3 zPcqHE1M%m^tFw~uX=BzJ3f&nW&YW>x!g2&^S6X#GiwE6$S)Bfn5gxRbxDl`Tban`l z0l{@Aqb1s**nSy|aPp*3S^5qK6+-6FTh3e##4W^GG{n1o=BK78hCs2`?c(TR#?0i8 zc|;Q#D^qu1G)P8?P=hV{XVGVL z^noWmJxU3)u5x-c&r354@=eK8D#qnQc{KHqSp8R!i;e1~yxFG*j)82eAfIY|4P=#5 z9nx0rJe}Vuuy=?xsincY%1{RZWGNA*et=eIiPzs`-N5L&$F`ks8oK!b+YA$6tiK49 zvS`|<%<^ffluH3DeGkIXVvxO?HcBYH`duopw5tfY`v*srKPe`alnXXE?>frvy|xm} zw0o6DX@ zXZRivORD?@59xR<$a*BNL=6DN>o%XM3gdAxWv>1n@Fqoh7NUDpf+<|a(ruUqIOe(6 z!8>>weAipOoLuM;(9_oo9zf`i4W-Gfph>SmTPzK}N;e8B<;vrJb;snGGAe#cH>3C- znwi>FI5;x7EZEBix0Bm-xKyU%H^`kaH!0S8rB7)T3>i2|Ub%25PX$1PhaLp|pvApc zleGNcm~@_%V&1K1i#Da~(~poY%2DQOyTsntaCAN;^nO(&*SeN%b2-T;Ws|8gr{9Bp zvTb;TDiq@71}EAu&xS{oZu@)+*5)RS3J1;Le8$OUT!3do=Zo$VQtJx>!{AGOXiDa3 zW0$Ij{Hdq3ji1Rog2Cc5)8~_>dZq7+=gMGOL$~?;D%vw?+`dbhaocz8;DZ2(&^0dn9l z=C_|2gN>Jgq;%>U+^AbVnd*st7H{ez98T5KAoQ(Vjgqu47jv(5s|{1HuSvVgBKShk z)HxNj*el(wv#_gu|;FfI5j3I?v) zJjPz>z3TnoWAFsvRewxo^$FNEX;y}HKi|gT{2L%EtbW|g!`}OYn>FsdHU`v&j28T3 z>^9a1Khjs`EY#3Wc}q9+479E(a-Jph{r1PDpn03}(E)aLUx-i2^P-#yKSyU#CZ;Oy zB5_wKQYg!Kl+J-hLBe~NH=YB|;DXZJ_U#n|7Vl4p56wLEv!76!teQRI$b zttb39FINB_R8rkI3cS}0@1EB;6sxoFs!juf@=U*2#95@nEu{jX53Q=tHRUG-z}^;! z@1+Rm{dg_UV+sn&F?lGcC~zzucv=ii)eqpYaDrE!_sAXf@S33LKwm;FoH&tJaB#Jj z+j;-}l+WR5QkMSJ5r=ywfQRdAKTxRJ^1Him1n=jObg zAIhs2uF5dV(>m3{Pv5r{W8fWQP>W%7O@RTo7WTG$PS}q@_zKmr!b~i$cb@jf(XkYG zb}hV)hgzH;KYScX3=L60E05<7;(N4Q6@`5uSEcPc9O*F;Z+oA&= zEIt{+wmTPikXuBFPfm8jOSJ1ne|z3O4~N3`G;V``M5S<#&H%T&`eu>U^(tTN-NtUp zCI>Ky_0MpSi)f3w=r$S_-ozphO$|?qCoov>l-swj?dH)1{`GIqtUhbN6vCJ;8VN}aKISui8${*S@}9oXh> zx5tE@2xd6ne)P2&aPSa4-A{=@!HVvsGnVlX+n;$o;=ex1wWEUapvDYX zdiu05J1z7nkQsHx!#f(fnjyx4n@^$wnBbYo$aqUk`IAR8 z44m8f$6x<$#vppax9+5rpZO*}Y|fyBV4S&d>2gM(6!q~e3Z4rWE)}CcFTi$Q4Ij#@ z+1U=1ZRq0tjJo5=8ZCbD#plEK#8|zM=P3ig=?q~MO{Y3|ZWOTL?Dn18qd+omdF~)K z-n-lILp*#RH8)U>pE+}GJcI3oxYgWzqj3!1hmIAtAq5R3^B7f{zu}BLL}?XDX5JYZ zt1o`Iek~8gqj?7(pSt1xC9TQ-FQPE?>-v6fA1EKm4fzKW2(MS@kp7&NOmVY z@X+7C@8r-F{xAnnM~4oS+|3|{XH*V6GU}Q)8Bg%cyLWD&@||O^;?Mm7csU+r*APX}k>)JnH^?-MLB<{$f6@5onQ0iGKV6u)c+#C)@xb1{ zZk{)%*g@b|=%GXP>81BBkD(J!5RW+o7YF*Y==0vrPk-{Wbxue9|BIjeq;P0^JN zt`?8{_|f7ncZeZ6HE*0eC45EUFQWgYc#fmhEum`3=*_=&jFANiW1~=xzfw?A>JHBc zmn9!L%^XF}!qe3II^#L|ngWN<#qYxF^0XBzJ2-O8AeZ7>>K>jJ@6i?qADR!0M>2x8 z;o7~}<%jtz2 zuaJxw(KI^7^^avR;M_tcvE$(DKmK!kBH1wIvpL*cdB5-}cD2Fr^&8*M+=hp`gY_FQ>os4gTz7KF}X+Nnqisu{hQ=KdcDYZLZ{#h*E0&(RmcGRrrkW| z;o(8yO54?EO+)6;S4Z-WpEW>j8{8)g;BC%5G573>PMp19+&pR58Q8??*LM6Ke0IwH z^e_La9kvU<#wjPV?1 zN;n2pMh3k1d5%^m!iO+kFA5!szY8m4z>>d0cKu)2tLXmT%ld-6GXiG9r~&30Q>MhJ zp01F)1zkS@Fjmnf9iOEHw`6cHbMK{{cfRN8-1l;GFWrW7dDedodiOShpqw!<79=GJ z7Qjq>D`91M)=v#`F=;6zwq*|^)BrXB;vdG?pw&K)JtOHWWGmEl=d*=o6fNqWhnxCb zZ*dh_f*pfU%#_yG>VtXQzLsIeeH~>O`3B(dArsPUDwOy2VsBO zsy*(s&A%ypy&Hvm?>#T${2yFn&?Q`Nw)MJGTs8L&6Nr*y3O)1*&r_Xi%9nC0W0k$~ z8623%&H_ z(LXaX#oQaT2VRp^Wv zpmkH->r?u9Us|-+^J$NA(QpiDQ((!_>>3Non-cn}hAF@AveaL))UN~Uqyj_b4IVf3 zZE9b+D`W$YGLUJp>K(rII$40 z=ceA$Z|*Jf-odbeYmfpsA7czCn|G`I)j6G61@peLW5QGRbVzWNb@I4CpK#o}p-EJ( zz5Z=^$MlQ7uN(_z`NG4DwVp~lhQdV*Q_dQz0rzL{>BmLC{d$2e2cO*yZ>!)Lv%#PZ z>IJR#O??Z8S}+DU^~pDNNUJ>U*5{l2vPv`e=tiDZ|AIRl&S&9ZDPAi>ojuwV-j5rd z)^EIM#cSW(msI*`ivFJdQ1R-Yru`sxU7Fq#7={MG3&*`v#K7GZ`~nwz>J0pf>e};p zuk69X$}?l#v)VmlYUQ6a;Hs1SyMExapFY7< zo0h`V*y`ONOkXejY6@`n4NZK}&_{6Gs>d_h=|t>?D|=hYTqw$i^yn6ed_FnzBw^{H~6 zr~Kg61*vG;)@}7tM#K3C_R;SsO-lt~!!d|U4+0>|Tb zh_t-Hs?O=kt_j5y_R>(2P*M|)@ghPux*!Z&keMHbF3QJyPhW$DH`?;{Y1Fph-SXDu zl_Ab-TMug&rL@JOt(y9Xu>JNKui`>|B&45jyChHH+j(dZx+z~MGkJefhERaKdA<(t zp&)(we1(xg?-sZE|7MCo3TmEXlum0A5tMLRyF$^l-Jw-8?t>p3b^`D$@)9mB6h_hA zVvyqe#?AUHx?|j-9Mo>c1LMusWQG@XiC67QasE1CnSxUMMdjbSmy$Oej}k1e&0V`r z=SjI1t8mI_LEzW#`0g0~1A3I={pu5f_qS*-#esgYi1cd_sjah!!e{Gq^kU)k^jThC zy%&$(uLZbmtNLkq)@(e1$52sR$NCCgj^fthBpQGdv^BI8jw#>a08G16Jc5~Gp6AdU zK$sHyc_D}N@zBR6z9wbf#QCo*3g@SHcoABXuSGpY0Oc)X#>8)}ePE?{(Dnl@yzjL5 zeB2_&B5)L<@DPuoe0W|=VQqie!K`=lLel9Lc@{y>I*@e~A1!pI9Vu%xY{pr?7XOqY zqs&hcMbYrGXGfzyLkImB!)Sv+)_pZaeZXbjrJ@$jhX%Bfq!^_b4p8IO%&eCm?fSz&t2yM)P}~s z#ijnAa`53AVEZ;YnekaW)N4Dj56{UxyHn=YPTq^w4*DHls(qAMd-gbNb$Oh1h{Dum zag7GYBee41=jgE&M^>BE3xAAs6lIhq`kVI`o^j$>{2o2V=PkzfsBa^K4bKaoJ{2`P zkuG026`l`|0qooFZ;aO$S~-#txJ7uhy>eQ+<&@&_4d@1*>9=-S8C8Icbzr|xLu z@})C7zxn-VwKqj;v^t?6D%1{+Cxsyp%Heo)!6<`nz$UKvn!9ZgkDoM_&?XPvm%Tro zhcP~Dx5UKE%@f&-a4Ti_(wKbttKH=DOU{rKZ{MC<^2K!m@|KzXya7s`d#Vf|-^1_i}~ zlu9Fm2A?+AT?J=&G+uThqcPkGv7}#5r+`4$ciWAncE`KVV2)}jl!r3u=ZO^;8kzv)Y8arzA>lfc#8{Tf- z#ZNDsyEt>*)o;Jvd6O|j+lKGX*dY2ZuWAE82$K?@u9*Co$CybM}@Tu zh1J!VEf3GqB8LrrhacCk6guF3FkX;5JKubFb>ydG@fWnqaNsc7-~8?mm9cN7myLjTOWYoHQ=V)I0jI>)B zY1Y;iXcwdL4~9ClvU!E$S;@Z91Ou2kZcnrYcc*i>c$g8$E*kHQ<&ouTJ6;S1d}P9> zH3rFY=*x~bL~t%V3lZ|$fBH{5H*$)=rzr=ITetYqF=ln0;_g#W*p!Jl#k%%Si+zkH0OdV=>N_idTXlJCb zQ_jLUTALgOZwH}9H`A9YT2WT_LpRL<<^ns|w8^-B-5CoEA+M5=N4^V&vc~`JO$j3d z2ETD_2ZMuT@!m0ajr4X@bS2Q;@m+#IF+FdU)mCkc|`?qa;6Y2f;EQIEjx~I&_*4vb6^G+F?JUz$YCc~tM(oGq2-@BO3 zB$(n>naY`FMYNu@JdtbF(F5&L4&-*X`BMrQx~=T>UR|EU1)GEMe&Q{IH1 z$pb*Mhk|9UOp2`4-mZ<35)6dO>$9G%at8qA^=_224RVXbcN1&rskG&x+>Rqw{({9M zEL3V&H-+;5Jbfv5;Z~J!Pqj7yq&IG1W=z*m+jE+uSo==m4v*L%3s$&&4 z)lDJ2SnX_)sN4Y{2xnk!;F!KHmGU;|pLD@LuM`enA>Sl6AN}gNB9+@EYxTr`?{}K8 z!L#&(lL21-KkM_hl8p|x$6ikp)ZuUH#TZxqsbJGarA#GYocEfBZuRegv%b*pR9B+l z=kcsBJtIR|hcq9Nm4`%1n=U!~Y;v{hW{o0hVJ4`umGf6e&Z(%>osY!D=e zex-z70QX)c-VeytDph$99OzfMZzp7me5=nYuxsThyXzT)eXk}t$=CcN;7mnh4uxX>Yq#P?plGyPe1FcXN~`n2Q7YqyjZ93Rg zwsF<3BnctbK>w)sJVT1!|t-Ek5-7;I!`v{gOKz z;rS)J2AIXBPz!r@GyW$?w?JH8?e$%GEUtKN*$zxFwRpLoGKZqf_D0?*exE;$@Ai(u z&EoEGc>9Y_e?Bp8zxeX&SzMeun^#4OQc9!c^_vo>#Y&>@2-f@dwV0`|d1ewQDGDhw z85nG5^lZpBoT%TmMT|TaDEj1Vb?}Bl8$1?;Te*DY%6RjL-+t;;9zeD0#?9L!Fdqs> z1W!Vzw(}OZc)*t(v^YX`eGKmwCOn=`=h;TqV5ry=UFcGAR~abQL1ewF?1F~#)n6!-3>gG;P`dqdZ%|s6V#uiXhI+Q+Ja;$ z&TD)0b|*#N$rO9y^*;(H79B#^{P4rI+Shn`)q*6!opE6lnboy5GN3cSFU3wugBBe; zlX)W_^D2Bb3R^6S4bRil!lLcWwT}|}!2=$7l%%z}MGr4K3$*d>3tkFW0|c8x7y7|M z(LzI=3qL%UD6uK#^I*&CHHj&&Ldpm+w_opkcl}0+pEI-YLKo-WJJt5_b?}A(R>Wm4P*+h**JEQDIncpnAOj@|SDqGYC zE5!($=nuPj>}=3)_y*$w#k$43_P=ZZP$vKVuYR}li@*3;?b<*5LO3Iaxc&8mgA8}a zBaJ7p^5WDsMgWGK-~REd6y0}se)034&X)9NdC!qUC~Zf{RK0kgc-T6F2Rl!|vnl<) zE{6Hr;UjMtMt| zzZd`EUCLN_CmwdA@y~byzxwjwfXzpIh-t*xICThmaqj1VNXr~0jF2&u1e&~A1Zz+81 zQ#=vRuy(>fDY!79SA|rY^+-5--FVoW;lbR)utJges^5DzuN4!ygZ4IF?8o9u4~oZ2 zwn1mg;FXCf$n*5s^%*O6?(wjPWAWGTyjjL-^_@sTc(kw zd5m97Nq_hDjh+Aczxc13i%;)-{>{}1jbU!pA3_j4FGLR~9OF$v_+Q+0@B{~==&fw? z2Hqa(CJkBgO@H&M=M<{)lZ6-q%_)b2$GmX;_RZ;Ad>l_df3^dMqg7*@C$f0ZC*pzV883mi8;z5rDgP+q4@E1_qm>gG9PTv_;&r@n(G_JbMd|74`TUE|ng=r| zzMqWr5d2A#{ZZoCS%L>Xi!Xuseq)xbc>I{1ohjafha%R2i!bAmVvh^iXY5iqqW706 z73Dcqzgt(DyLd5Ec}E0^g-f$@h1}qfN9R&-uo3^?L775tJkMxAM$sSUa*j0)g?CspMeM0~+lQB~?0oU% z@4`iVt^V6nKRnD3ia(!>zQ~-9av-7TwH~GUt0SgP*XAv>DbBZWD-O+Ncw$&bH$0Ka z$c)y;v(Pm3YrMO~QAbcupGMCqiARCmJiwvAxaQTa4Dtz?SIll9pcs7_3!ZQIHd*~` zbUS<|JV&}y)~i>IP4x!1^~#IZMz?On>*^EpwK>z8Vy$hHUE?+8Otd~cK3Zbjx|xiw z4;kpuw|T&hn5r2W3VqIQg5*bGC`TVs`92H?*MIo7dVd)DxqbWk&d+}FH}Ss8ihk&W zp9izBE)?Pv$}?|Q=H2F;_X<;nZygENC;Otsa0BiDtiKQMeDulBI(YQK&co)K_by%T zTm#|D>{!GLk_$TH;6b~3$$EQR1B~7#IM2OzIo@3;$oL(3n7mcT*5BY_{I^>pe4yDH zY7ItK5o(eF`*}R$?VF50(bS@icyfJBhIbCv>teH$#mHekFgnO~Gv0|V8Zc4a5UBSf zfY#xofbDHhCe>hiu9_xBDJY*Qqt6Dw$fSHKELT7B>cr{X$vaP0nUeYVZD3FyRc-e~ z(Eygufaq=#^-gZz2FjGVEknJv!`sO_FifuL;J0tUC2JWbQC~fPjcGx*kZ*8!?QfFy z<)4(w9Yr4EoPQW|IM8R47|d?|?DJ>!Nm_#Pz{wJ~mx!Y5{z|J&tHxFIdf#jPM<8|) z!H$BeyQTCqA?QDOeSBA$x=ZOZS4QUM*=o5Ly_uJ#o6Ejx-#pSr_3D%a9M^A~FqAlo zDfe(LaUL0CBq{9>_q4I+QqO;dE6gJ$xL?0eHjjMuAGW8=O}cj=ZlQ{xDfm*=HW^vL<3$Q*Iz4d&?rLL)j8QKbE~{w#VQL<$_JdOvM-+)@3eEv z2r2+W-Sni$aNw7*idRi5z>$1K&e9`?L)mD%`MZS0X|g_BVZ z4*`L{LaJX;>YOf{@sdC{_*?jxyb@II;AiqnU!k%NtaZKuX28tjX$E5k{@Ba@jMatT z!N0n}v_aOuA)Tt+^f&Y|c`K*qUasWQEunCebfFMT9+Ylv(O&qMd9qIK&X-&68x_>D`6@N~_Rq{}TY_-5|5)1E)fj8J+p{x9wJ` z<;Tywde^7&_-tUB-)V4Z*YDl`}@aX@qjiy-gcDgKKF9c12gG&-~U7#<%Jj zFy=+|0HgP-8ScR;#eyN(VCVN8EC$aL$22M5g4#laGL+zJyjZ-0kJkZ^EU0-v5CSbw z#FyjEB=76^KV=DTEsFP(c>(ecCve*GyDx8t7rm=-rDw%|nSh&D)EVv~yIQ1ofLekJ zp-^4-^QfY1A!Jb;TL`Vt3-w^*e-|Z7&*V9ghuhIS6MpyESF?B~@EmSoOmRc8&eQC5 zN)n1sb;G7MQHYBZOUO3Fwe{xhTeDapJd0O23XpIq-|&zO3d)(qMC~PnT123A_|!XU zK#uTKQ_9wF?`=06Wq5+*jDg@3i@boLamb%S-?Hv)j`aIJW?RBe22q@ndV)aPmV+hU?d_*XCfTPFw0H+($|kh};o563N^+EY>H~x(y zo^9~0-4slx8;5Yr{G~5>3<_6-j*LsnL~Za9n|kU_X5PJvQOZI$yAFaJZ>@(W`djgZK{xsJUW1=c57r>;Ol~mT#}! z+WEenO3z+>miJ$a>F9|vlN<$KaA9myj$1ra;8J=r9#ZIk@%4AJ-uU|Jk7KYqB8F~x z_7`M;XV{($Zkb@m_@ST04X4EV;+t=qFZS=8KX-0CQco6^<&!J#rBG<0Txgyfx9;wI zAK$oq`AT(V_^YnNDRj{wr51k0;BxzBil>N+A>mkx8az>4ZS$EI6!hB5nEWzjRj8R= zP-pTA#z(|}o_2++d)=>dGcqdF_mosMCD;qw5nMn1@I&~n55vWi7V{1?L!;6O!Ng+{ zE#69z>#%EaZ1G1lPf^cM;-k-m`+yfh);qRNP-|YP7|MKr+C=EK~L3k?zg0YSF zUcLI=DE@^w@g9wy2rtj#9bbR-Rq&jhv30w4YS;O~)C}I^Tj+%m);N6gsyZ8U-`xAU zgCuY5oJACma|*GY^CxZL>S-+2af<@o4>o zILvGCb|mF1+2u`&CiAC*pzq(hxpVPjJdTAV_bsK7~0=>an8$`!o~#}hQ=>*IqdKf^nIK-nbJ8G|~xXx=1oYnjGP_sjWW z&4a>In$Nc($Iu1?>G7k5G&1Kj-VQc*>@$X=IXH%M{p~{UuKpu8$1|&2|B`|1z`#%T zS%=gPA!d3#vKW{tgD!{R(-^dq2kJ$0au=2&{pO@BQvmXtd zdkcwjI6299;}Bsi!QJsW@FX}fhvBWOf0L8KkMN-W!PSFg;wTZ(gs3`2rrxa!Hy-wmqGAf^ z?*EryZr>@cL97&I>8UO-nJLB_#rdeS0lUibK)$6&TZ)*qaQ&0ll{5q0l%Z2yyi-JZ zJxZa--#~O*nyGH;@nFHW2FZHA$iwxN*aetCvffh$&D*JN)w%FN7_}g3@G7vfDfdQk zRA%K3e98;XP3cpX(q|IgDk{o93*6qWTKfTW#rn*Ep5IErEsc0MHAn|6;+R)Z@z4b_ujqdgUXmcVL5cLGu48 zXUgkqTTV=M^@5I}>qlRDP_u_ob-Pu7 zNi+FYhG0;SkHpGZy=knB0`veP-80 zu*|5ExW83H_?W&ak#fN>d8{gk&28wA-8Q@_u_PffQ z8U`kBz`02Rwr!gF7NQBoLqqe~H^EQ(lQdxM=P6-3OxtAXGul-INO!G}ET& z$l_=4V&1Pg7997A7w+)ALkVQ>ojsLuao|H3CnyVFgs15*3p0H~ zP$rVylyQZh9`s%2SyUq<)P%imbW=PQ~tRZ+tQ-sY03b! z;SktoDbU5PwNM!4di45w4?%o+ZV5dweP>M7_v1xwCrY%#QUIPVHHTsPd{jn9}1^yCl%T z?L0IoXB~3OOH&`>vB$>Pk#Qv4z{_~mx5&iLEO?e@arCKgQs5p)Df04B3c290xS{BD zK;Tl)^41He%IB%gV8R>Jv$cTdiKERa;akM;-no9W!~SafrD7#pIPrF62ok&YSz{he z9u5a{2yc9Z!dAafJltvFPjP7>u5V|)9XvZwEoDaV>$CkG&KE@tPk;lS==y+C@M%h| z@eXUAr2IF=ESAs}MIQW9_`(_9izlE{3V7a86!lwfO}R|@d^h@_+&EkrZ(p~F4ke>B z4!@MPc+T7STEz@3v%R-E@C)MKWE zr%nuv&su=TOg8w>Yp{i;#il-Wh#sDHpf>Ewh!|pP2ZNKt3Zw0^*|mV5kHWH&^c}?( zB_i*69>x}uxELc81Jo!=!V$a)^?)vJ=jlBwlE%`$>R~Ko*t%bAVoJ586p#N@Cxe){ zvisu`cQTApG^vv@NN6EDRPZMuAMm=9g%5d{*>{c z7|9m8__crUU%E8okpk-dLO29kpBC^buD`z#UDgh<`o)~*>4wIH6=N`(MRJRG3-DVh zZz#-9MYoI#himuXDf+X{vLM%&6j%(!6vGo|e!{Axyp3tPS^VB;)M^IPo71G z;h{Qt_UX@u5AV+6owvPtRu_X?$d@xIxbMaX&?0)q6Cb9a7$ZtbUP`uO3@nXLO5S4= z${+=1c;aQJ<6qTZhr=cB%?J1ItTE6Wf{)-ymo8n|`RS)W%Tv=q*?DeOr@Z=m-%&ez z81NXy!e#ReJc{d%FE>1AyxI9eIe6vLd+~uII~NPhWw+4R-+ou@+gD@AfaiPR*rCM^ z)4h>GQa$GjuR^9kzi8|9nGVMc2YYJI?i>N)`W=pZZIpRpr(Z5sGcRa!9=;{~M>Ba% zeci4iiosp8_ojG^Ch>uXw+dU6g5_3Tt-t)$Kel7){mO&7 z6`G0BLq0TSt`{Iy)FMko3-6IN{qGg9jS(yZ1R@Fa_1Y!Ud%y9A~)dU<_LxqiekBBViVH zH%5--jf<%rNNKiv&+XaSFnq2wyHes&ygn@y$bpo{=FIzdZVWx)k>rFM$r*qD%YWFp ziRTA{`1xr2OvYbw)vx~PpLQ;G*eo7ESw{xJD^DEDn>`ve|AdO+iww||+LVzFhR^3U zKDXood7eC>@UJb6AxbcFf&&g2SVq3B56#sSbLR7r@5-mIk2N1vNg=!LO+SW~O6ae# z|GIhld1G?dW3t#fNR{I8z`k~&OwcTiZE8jDaec4;TLCZXs2S zX>tcx;ZI!a`}Ydh^W#@Tv-|hOE6-n^&Ot|GrDT{B7aB?ng8F z+?gM9h;3ulc@Yc*WMsV10n_*ElL&2$y5>=F;U}bAu#^pAnT*It+V8vL*;ufPC3V)N68xI(9y32Ygb{>D8;F|b-h(=oBZqQQ6`jX z8s~S?>%kDRRmAqti}e!;tzz|1<+UwM$4qg(FnLX@tIPI)rSA1yehTtF4ZNveu@=IU zxgRg**(B=Sx|#33CiZPvqd`Jl)Vhgfiz-LEGmFoj&Sf#$Rvb?`xA@KX0@3Zs64KH39taFbTl3vg~RB zxag{T3B8{m`O$$gHOiZQOEX^8_|c}#qsXnzlv+!{x>f8}mX3%~kCJ2hx`I?5lU!B7 zrMkdYU8NoR4B1m{3%u#0P2TAzo2SysnNcy-23!19TLzC{t&HwgZ{s}`j#Ul~%>9CY+Tea_SkD&!@t|_% z4VZh^|LOPoVLYq{F5|h}^BQf)hkoJ9^RD2^b)UI5Ho8RuLq9#Aw>>obkv6^n06+jq zL_t*c&;Y#3yPntnxloo?`DmkWMF%Ck4nfb$ZGpYpmQnkMbXTU;HRUPa@Cp1A{cY-1 zz_ejg{-)G^N{8QS)7<%mzcLPtV6_0~2V4unfkUH)eM?C2+TdXoMWM5Q=bIFVKeVmw zK!Vh(&4S==-nNRSxUdEKn-t`aTJS>v0fjf*C|1DYv!4d5(V*y2V!e7D} z*mwm}BwFMUFnNFyvL{w^3*(DtPUKmZ616s3NLeIV%lV4&}l7sJ1AWVoGQd3Jml!3g@0rNR;wYEK$Z&P{7H( z`;X_)IBizXcpBD^JWS80T!23?S_EIed877KxxfB@`st^68h$5pf7Z5gF!i+W*(2=d|w zKYz~q@7U0;_8mNUcW_Sut!;Pid|%xeI>L{>eiE%IABX-zA`jloxJbmL*Eo&hx7cRJbL-K{!igc!HLcg?e4dEHbn-9IrRVraah!<($KD8wc9Mp?9bqV-WU@ocP%>oSsaq-3`PsxcOeGKqi%`5gk5 z;>FxxVK8GNr17w}hix+){Yxo9}=8aYA`eL|-gi(b4+pb_;7X|KXKO zmCfk3JaLUl;eUi0`mzHg4@BF`fFtZqM%O#MJMlEe#K)tYo&zfNZLq({nD)9|O9#V? z_Mh5+I`|7?5Bje)v^B*z* ztiu!WQv6rQFfsHgn(L=YNOCQsAL=^B4FXR(Yl}&ZY>w**jkQ2N|yF zAG@=}9scs$Z{{FKaj~!D%}t?9L2OL=b4d2DfBm~+P2Zlj{N&?LMo}u1Sjeipc8R1s z`QXD3cm5c@9Tbg*{>5MaO>+e=$czjrkWS{!OzDN+Iv5b&JQ{9ZG=|L8!yltRV~(f# z@r*Ly8~GyQr9k;17>hk*Xu|Mh>~`G?>9ZVs{C8=gOS|NWhR_rL#d9blRPu`%-T zC+5oLvv`+fn%!N`^SD=zHJ5(DuNeT~1`nSdV6`28@yu=^{W5dn$Y?x=F@(XX`Al30!L1KvvB(*7}?<{8f$8XpjEeB|cp zo1K=??-O(#&+|?n!Exv*^vAWGD<6Cu4|3Ma!I_iH^UFvzChqB<8CUUmyv3Ymo`fF? zPVKjjICFwPqpjxETaB+eh@5^iqvFfvn~}A`7oGx+JB`a}zj0qZ97FVhe$=@~8w=oO z@VU`$7H7NMXpHmhKahif`BX?7yE0y8K-haG`2~G9j)baulv5OijCP@aZDtT5!+K^- z^)$Z0p#3a^5T!Z!4!<`SI#$73@It0T-?2}{wsP_Y6J?weU=6g}$CY{GJNj6|_?&WiD5!0g~Twisn=jHKFz3UTk zOY43ftqEb22LZKeP@epRsFiNuEzbh8RI8$?N8866Z7NXcs&jig3Z8kot+V_g^Idyg z&ot0;bxu2_U7cpZpp63yEP`YHO`%%Xg}0^avy>~FrHvEe(=k1F>HX$cl6Pnu*n2+@`&HjcEk_T+QT3u-`G;8DPl3H-T$%ov zWHLL0#}|CIpNCtqgu-`Tu;%7jvA3 zf*qWSVoG+gSN-~0q5VP#YlXV{?rp!?zKWoUlztV`JMg17h!|uj%LhF3vmVdGzJRcO z-<5n@#b6MuHq5h?ci`wAL)sQ0yIQ@QI$Q%uih)5rlrrTBH^Lpw+uAoIE=^hBYfUZ6 zUgiC1Tq$e*FPf-;nV3?dZToqR!*J_U+5tv=Zi8nYZ65dY?taQ0dMV}5z{34%jNBEa zZhhKVpC&bFZy@Yheb@~FLmg93jC=vQNu}Hv^uSokEpHn#Zo-mwjC=Lx@G$)t>=SNh zQ>PmLLh~T2cyJDm7Bmw1mi}Fzl;+Q9X9}8>m9si#{p!@=tMH;2 zrQyMD`V76J)!t9p>H%w?>Dy8>P;|ZiDFPf*RF9?Xz9dWWtPj`toho|2%EMdx{ON0z zuU|ZmM+}&&sHqsdZsX$YBf93>y(c7Vi9x6wYNjSi+ zXVTC7v~8Dkld$*7o9hjrZsw)Z7QMgExx@ z;IBN(*dU+xyLbMd&%UXswGGWScEa0uBenRwEk<*+Nr9t}C>waEJ$Y)YDeq*$$f5&_ z4}F9$ye`%*L5bi+xCRHs;oUrT^{hPd68;G^4|<+Nw{zoe%K8?bXNyfOjKlqhLcF99 z?eNF(9vH!;q&4Vl3bh9icq|ge`*!B{*_Pk24OSs~ROgVcmAr`tREzhTbxsgE06O~6PZ_~9XP5F}$ zYM3u7k)!K~#bUv{0H#1r0L&{bWz|xQGd_sBxMKSvE0x@0Z(puj~hf z0?|1Cc)?isABE3oPdg}p-sGjd_&nh?2q>8-^Tli3x1X1AD3TiloQ}YTyt=4?|4{I z(v2rL*!wev@E{9C3axUCcdGg+9pS-vzxCqocqpRPgKf?2QO{SO`sImy_DqKjmQM^* zVNh0xnmqU_f4eBt{w1qrP-zk7_Fy#kv_%sxXZ{?%Nu`ty>pYZ~2JdEdZ3(T{pJ43)h zm5K4Uj=0pA`|kUz8J1JxSJvzj zI8vkKO<_i%gC8=6lE^1ih0cjb^?g20 z(U}kR&Z+q9t&9&pY%iJ+O9KUR+LL& z^P7Ke%YSY7$)`V=dFx*N^ke*uLhLT{<+PL0@Zfycgs+LN851ZPMOxD_Ys}0@8=h;N zk`NzChv~N&knPA?Cbi(t{QTjovusKnk>^?iBC~NikpA`&aD)&8ALMr?Q1Q2s&h5O zq~>+Jx8FH5+?YsNd*bxji8X(_@sBSU)8>1-;Qq(|;lJPcpZ=GByYu^Rz8l{7^*3L( z`{3J*2q$+=b$BJ7$3TuJ;;EEha83TwW;o{6ihu2ItnJpv@z#YyO7!?zJP)t*>`k(t z5F_Y=yyYpnrX=I}J4THLm3Ig8Vy1W;BR%DGG=yKg4o}nnRp?+aYi`enK&4XUgqhW^h|d;$V*t;7tsP2hVjLOz@#)^N;l$8Q@;=oWY96 z+wIAyg;y}RkbUk1BSGL^;YUUe$#Yrz=j@1ZqJQjEC%}v2^Kflb84SkKVuh!wI z%BQ3rPw{pPz#qZ{14Z)gz2x;|pX%|B5?T!Y)5(yOsEh_D<3HaOq6Wqo3g_Uzk`usnm4m~9xphUY>cLzn=yu(*1dRu4_ekY@F+|iKmXp$3HleVo#)}OQ1kZ~ zE$UbFA-?><96>((_|s{pXJ?z6tdYq4vuh!_fU^GFdsjxD>qYm40SbT5Up$D{>;H^3 z^@F~`x8&ylHU0Dda&=!%cP2?<-VaPSY{LU>VA{^i&hE?_F~}uDBxQmZywRU2-S9#Y zG7-F!g5+|!jSeCIV)Sy@?GSy@?GS2&^lG zj6ox$32?n-pv?QNP@f!N=t=#r;uSR=!m}Nya?7#u*K3qEd3vfiLTDas;dsOfpe*np z9Q(N$I0lN|trEg-V9e{T-T0J$Tk1Rks)Q^5`lVzI!lhhBBPk{9zmBa8xBbqW{s0e- z2RpQa+Zd3_JxcKy!j@p_LFu=&P=y+J?#fSsEP5r_r@pOjc;07~ud(4_EB7)&4%xQl z%bdzQ?Zk$%Q}-!NV51eBw}|!J`~LS^j^4>9 zd4J~9`*?`rk=R^mUccGG?jBC_QP1J!VC~R)y;*7Si26R(?lpCy6R?h@jRGyW_6v~%(eUO(7=yBgZiYHl{k5nGzR;wr)}VX zcKw@oTe@jq?S)48U}(3)A4(dqQ@|>RY|z$IZqQg4`wUE@qOz6MuO#!GGP?}!EPSAe z_8Q>ZmfE8K+cpO(IMVo!e#)Ky+d51i3fA!7RAOj}HdaEXZ1@bj^h~|_5V*q=8GG>E z7@AFJ-J{zit1VVxJ*&LoD}B;SJm7KHWycS5sovYV?;2WEN9pm*)S82^vVag3Y8Vt_bDty2meB~~`_D}#hsjcwIAiLTnKcELen&s>tK1aNoH|DLZGKR~hrCl47%X}Z zaci%Bw{5!0g1e*=OS>(%-`9Be%Dcf&x@{Zn&{4xpTPbS`3+;y|AqhIzPzVS(i>52h zKvo>y10eu}KzqM}$Tj7Fy+AB@()Dk~oRk^;5+1(bN}3s~-$gI=UG3&gb)CY&?_VTB zyB%EBVOPrgN)J};T$($No^Jm9vrBWh%6`Vk4hUofHbJIP@^D+8-5Has+S8Q(w4@SQ^SV0&041@CDV?ujj) zksVK5x|WAR3NNW_yE=n0FDdWlU%w9@ulRDj)E^YPTz=mPo04&W5-YANh0&zOB8SNx z#fjp^(+_O@Y$glFW*(m9W0?Q0I+&=Sp~=>*aKK;hU$`)yWha`PJ1Fvtix;Eu)?ipW zYYxT)6PQK+s6J&>{wUi+swNyMcZ~FKhz4R7Q}po6(j&q(<<3O3?3)Yko^F!M5Mf(8 zFHD|C8BETcT1LX;77ygFGj_e&{r2X11}eridC3KeIc0k$*(M_yW}f9eg`X-hrISG+ zV`WKL(i$nz&0O(?}RrI^0iHe3fK@hp1WGj(MsoC!dD@kR>!)0E>w z;q>M+-mJ^WvlGIFg3s9djG_CDwq4&`o`j3P(GmUPl_p4Fn;=ly>7QtOAS1)R{U(kk zLd)w)`+jxhQrp#w8#_j|jEmL%4W7Bl?^<9CP7G{l!GLWNBdm+OZ@#Ja)uTMPoFb+R|(N_wK(y%Ki(3azFJsym0W%)qH^SmclexR4Ki-arlgIMt ztxbAt)?WM2Jv2$}6HYUMkDd%AjFe)2G7R8hlf>^I#-r77Pq2+Cc&HYb@C_!R2Y41S zde^7%jo9Zrn(>!@sBI%(_ZIW^jJFh2OmLqRKV3P}qoWC!T{mR)Oq$DYNHAe%%#bEf z(|0VCOnX!gkHH6dI@#%h{wD2k5hGonBiE%jxnwjs*_jQ>wN?Lq+k){EBeuyjW4-On z`qhE>;Z;U6lT^Dhv;||$;1~ZhWGZX6Agg5m88P&0@#~MqW3G9aiG6+Y*x}7b=il4> z>=&O-8<=>nv5E1hJ~QJk&d`j!%=M>DvFA^1uHEQRyJ#uw55wu~D5(w(c@{F~(5}Lc zoIW{*yyG1Ns6G!1_oKf&xmO+DIi7J!n1^Il^<|_U!+!)8s-sbEbIS8*L{2l%e{k-d z%4{LGG3HPQfW7I;m~%2D@6Eec%yrtk;R zL+Bo`El7d+)`aY+P2;Hpc>@Z0GCMpNL(}VoGGLH8(*2^Bg|m38f!Egk^Y8xN?DRR_ z!p@f$uS}>JbezTD#z_mIMm}HTaDrp{$ef*#2f4)r272dG9Bao2J{C&JE(eBha|QgR zPoZ5F+3-Ht;mF4`QWxL7w&>yLyH~GWo<)j3`J+F{sF^V)1JM`=lRI`NUA**V?U~`H z{wU6?uwIN4w(RSNjIUp2$o$P0pU)!Q^&4w94~jD)|MIiXqlE>T#_;fw$BwUuEo|`S zcMvLLIRnqNw#{2CCf|9S;@u<30y@R*J2x|4f3rDv{zCZe+nhW1(Hy>cEBSNwT!#2~ z9PQ~k*Kb}QFH^L8Rhu!aYS$wfZ0r!akx|#;5+hOZD@JyIb@}?pI-%G;{o+!($-|-R zjay%DJ_{#fECZ7>7wq(U=iT?xdm5K(n_C%+P8YI+OqFKzHbO5N>)U=AjNYtH!b4E~ z_cV6x%3E3t_Y?1&+uXgCfjSxQa6^4_`cpZ{Vg2D%x&pYuC6GC5(MjJZ%u0qrJix=* zL6hHB2mQnXza194(}f(ueW3;Z<1L`SY4=SFN|&x)uTR$=EsUIQ*M>3aS?zy4qq+s_ z4rnMoeqj!h?~KlzC~kjqapa)TFARvEfAMK@V*loY_s&l&cb?_T%d;^p`87twX#KS3 zH?Dj+Wt}Sg4q>9bx~T2;Z2sYI|I6k-{OA8P2Re@o?)h&%{ms0$>*8P}c98o%bW3tx zdvS;w8rEjQz;RHpU~sp^mdBmt^6F6r%mc-i7dFGc+D0gt|KWf7x5=XWo2%`fa9}Ef zIegXA&W!`{DP!Wn@=5B1-&5H^F!GQ=ahqqOk=?eU$OlxWhO-AwQ1PPtt*mfzMtpL#YMe<1LN|x8?j1 z;jMTmJDd@JYX@@-A7$BH@@>0yv@L!TNB+q1lhcRq-@Y~DtU4HyLrg_V; zQ)f4C$BT~(Q${BdhQ>VYK}L14;ptcw?8)UlwY`vU7Fy}t59-hQuronkv=epaH1V16 zBKij7xo{|)Y>c~hL6Poy`}n}oYMxL??lLT$C6 z;fx{1_&J{`UgGWl)mL9kfA*Yw`Q+mt4Gk>r+$Nw9Q5xSJZ822H75(G|-74Kq z8`|acvIV_2l2tX(=8dO~!`f~ZY})}ED~}UOFzCE*=4`5L3z~F$ysLdIa*YlW?#ick z)Bn}UuDS;;`s(v!t#fwvj&XEIPe2&bdKsS-1eyg07{jp~q0}E=0x% zu~kH5T85f%cRf;P{tRJj#(}3W*E{e1Qe-JO>$-B9&8)fZQfrX?l(xkQ!xW!9es;WMb5L(tZC+vap*n zlqp~JoOi2Ys|*hmNHDJ~-S?@X7re>UpJf~$Fx#4oZ>p_U{mUGHfw3j+6wu?TQ-9|- zVYX;Am`zbbAK9fqtDPLxcX^X5WIdkpC99yBIL+f8#EjG4b>=bZC3q)IOo(S(!zVdC z#m|IAQA&-+bhh=|#gK9#j<$sOpr&3Dy74n~FrfWTUAE7+wJl9~m2Dy*hpf}MQ{e?? zh~{1R;Q_xp@TKhrf5U%UaAo2ZhyuTZtDC-fp*s0YS-qcrhHoZ;vV$odg7_VN)fajm z&D2?_oiaw3uBGX1DtuYDe5&yWUO&@eO9#iT0Tc~@m^Q8KL0^%oGBojv&sI4D3u;h@ z&nW3#`>0O-Pv09r#eFz0a`N2sZ4Nn9> z5o4rP5Kx06y!t@A%fS%Lu17Xh{(82BHAr@ikka*U@acYxsTDFfOuk_BJMAT3?VwnV zfCuL@)s_c-+h^M{{0M0^8o77XX^hZbq|mhQ60r4Mm7hA!Gxaae^wmky#BEEE*9>!?S2IK)3G}l%7n919#*L7~o1a{b{H< zv>#gHFZlHt`LeDC_R1Cgr=fyF&csiiPkRq~%Inn>peB8);Npj_y`Fl_o4Fov>OJ|F zT;C}R099V~m;?Y0TzCvGmtcoC=G~-U`F6@0+NnDp@GFno`2>dy@Jzpj-;}s;9Necu zdp7^qpa1RVMqw7too-8S3a9dTtN1WnQH6yuK4_tZYI1S#U#d{NX-g)0<4W|_9 zVT>C?wcubYX?ielc{$I7q52%n(}Xepozo|p+atu9WM_VW46((>BVp-7j>; zNWcg`p$DelVc*J42`Bd{-Hf`nW$ITG8$Ec8Ec>DjLo@trDfjH?q2ptOI(5=PTHkGc z@#|l0{_$788bb=iwc99=yZ|#C*=~p@Aj4Y&o+en@QCl!{sabK6ASuu3jz;P(v|Z(;5YJoJk~_UqzcdPdHF`3Cm9gq0gFQ>K_hgc(N6o^ zGhv8-DEJI}>TgoT=wlLs-WH>5_Z%Z{lOnOdC%iy>t#2?WJxY1MnL!5+znw>!$q&BQ zri|&o_k$0I4|VBJ3mc(+XHK5p+`HRkB*rtzG#M}f!H9T=@!)X=6<*rcPZV2m=!d#xAi%JwI2-m6uIwoGrjGc%p;i@Xy5dPH*7Z zBxWSfAi#f|+r(fCk!ng#+qZacl zZkdq6XJkoyr%dw7LY6Jx+F9G}%zyMLyo-0xT_3TlhM|_R?S2z^ba*SzFO|BX9$YZ>WkI`|A;tW6N)N>PA{T9vd#4m8RSis1ytsI7j_sHi77ZcBxq;w{g zU6)@!Jk49P$*}xnze%RDfA;fVMZah9{qgDl_~lg-Phv#p!3&HCduK;vhf!vTJJR=3 zFv+$18MG}{@|1?NvdA`k$wPgN5siWB0UytWaDWDB&O4a57w<6m8B^eH;e}VQ{>1n% z?8x!xM;@Ph=j}OU_5BY&+T6+$kv?`TgZPcYPlO%i805 zMvHG-RC1`Lag=dTWwpoQ7HY{Tb!RMIZM{3gZ1L2CMNTnd+pY05UMGLvs$D<%_`}WL z{Os>H7tWm<9)c@P&bc4PHrvrR`!{nC?|orxk{gE#xp1QTFeZPKaY(<>o{Z!A2-#>C z)su_~4y@G2g}?HSAsVkedoC>1k&JT|QJxOZsgw4)lc)T{`tO0{5AQ?^hj$w5zpJk> zRDJTv50|mHIvua?h$nCCx|bnWSgNz{ou30H>7Evy?YLkt!{i!}L8 ze%b+YKO^3w7DvFLYgpv6ICtZhw>H1|?DOi`!P4>S(Rhj6_wV2QtN(FuHVzsmrGM5A zIb#5RMr%9^M|$4Aj7Tpn;@AFkhGTD^>m9Gv!ocLkzAxJHS~X7ZF2C~hUvfd;2cOKj zTQ~ycubm*qnL@QNH042lBR;@?&pWglUH4|#v@`BdV}QkQx}cC(^4f`Ut2oCp61u#=aAYlj3uuR*LJQS@_LP*EPyi-lSTTr zwu>DrC*8+IZFV@mwm9W%h^NJxzmp+tjEBj+H^}GW<$T}xp`A}e-;X~2WV0*1=9_C* zHdilynS($fi@pn|Xvw*QcfB_MzCNM9efWb<3iHKV)s54EWuPiXO?>D5Owo*oK^J1rU(>3hjTjQTk-BFUHCaF8mGx?3lDF!=wkr}ztWYf?)0?_ z?|(Qt*;kh@4xJ)hZD8>reohYK#RP3~QT%nz25epqVa&K$`R6Wt+!%g%^K~>8XPtwM zG3a&%@^8MrGBSGRJJ}zOpN?-Mq8((;Jeq~axw^yZG|xL5BO#Iq46x7}1VbJN3xnIfp0O^QzX?=0N*$Mp4GK!fEMu zys(}B@%1-X1SVGySBM}<*Ut) zfBci#p}_Is+Vv|VKdMl)YQZ^KwR_jo2~owX-+2}4d_VfEg=1$MwJ_Uas4#yYy#Mhm zzUV{hwad9x)s>!c=T>8Aeb8=*JB_FMiUlR-K3IUa_&^u7YwF0}Ag9kfVCt&U3h#Bh4?YLtxUCUk(aV-MIHA((18F0^et{_TKMG-_K+Xin867-Q)rb zD`$w%-zj)y9K#SH9!Gl-?#k{;FPElrf7hrnFIA)W17hF;AB?GASyW7Z>C{~t4AAn_ z$)^r=;zbMZ{qN5fhNKKuE01(aDql1Kdcp~W*L*Y_e8-3v&C2YN&$O*-Y;o*jV6OKG z2Wew2bu8J!S-!qNO!@A#SH(@U^*rvIG&^;gdctQ&P)*dGA;*7Z4?e4PtJMzlbv3w2 zQ=+b48`GnOx(<~+2V`I=WP3fhRPH=q?dH+sQp}_){an|M@K(;Ytk>@sg0Jbu*!bHb zl&tdef8|%dg}p-7)s)en;ib~cFzNb5n649x;jqe|`b@tFC;0b&XJW7rS#34#UKVz8 z$p(+%lkRtTAlNg2z(pFu7uz6U8CGBHa$qRAMA2a3(DmeBd11NoNYeE*LQjG6$Fm@e zH!ph53-5x~*V`@l@X&W;lW#pAB!*#t+NMV?34*m2N~G;RL66rOCFV5{tAZ}VVGDZ< z$Y?$E2oIpVn?Di4b(Np|+kQU%8<=^yQ$M$r2TqBnZ|J8(D@c0Rf4u1;9F?{DtwPrO z5bk8IYMoqXu59$VVX$^~%qa={x!!c9#H(z)u@T=f?oV0tjw`ZG9sRG#>JD4?FD z)7CG&5BF({5LkQ(mORRW7l6L~EswP8ULy7P3{Gyr=)z|Qk1Y=FR~>uTAh3&KS+;3u}10)PPPMYEM_we)5AoZ|5nfeHKrw@wQv-`a7R#hT#Q^GJ}RhbKxG} zEBE!dBmdL|)~hu5?36t)=1)(zi^e-Lz~tpx9!LMj-~3|YzrLBWCg%8fR^=gg|AC2> z1(g)A%B092%-}Ib*(PX0JAlEvMad>(j8~L6Xz{pHXG#qN5U-JYO@iSEC6g4MmlB;k zb|hsckDv@CH*OXJBSnu#`Haz9J{DrYM2WIXxf5RF)32`O4YIGp+1^f}D~53sQid1f z3MF+WN+~%e1C%lO;6#zMjgz-H+)a#Lv?bH~S9_~(h6D#@@!sTB$EzyWg)ziTT?W60 zn|CuJLg;48Ia+N6@fH8HoleQX8?SApB3HBJQx`!R( zBZJ&{wMVo4(f&ZOUdeB8c-8ZGWLP^{4BK98E~gy-<1c?T#xQZyzYpIL_t8X{2=&xf zjF0eeSdmLcWyWvDgoDNZ`@Zzr-h^w#-lm{B2 zdT~_o{qRNU91N%LGg|F#qQ+Rs+mP`|J?MEB1jNflFLX80a$uZo_$ED!*|vP65hWVO zGC{M2QOW^dUf;R=G;uAO<-)(-^;LD zqmyBycUJlD;avSKT0E<5$suit2lemgO}_L=lNIkweih8f%dn~s2|4gIV}LfrYr-<% zEp<`W_w_$Ce0$%?nXKSFUiB8C{0bAHe>^MQ{X$+aaO^C+U_$+5U&S)Cc00JuWVe_D zj0#UXfD)g=<(v3K|2!cKLv6!&dM`tRc4Mq#FvcGiX2w(K;W~_w5qu2bP3Vn(My>OF#Zn>YJdGpn~_J15XX;Z1gtNM z?;N=|_g+1musuhcZ1bRdyTew=KT?3lxc)-UD+4UJ1bq0#A{4yvt_kDwCi3LsgN&w( z#KKrG(24~u9i!jC3YQmo^}%1iHvzXmb@%S=F>D^qm?%znBuP$~knYaFd3p>&76Gr+ z-*}~>oj&$flkwYkf)`)z*2d&h9@dOm=>8Tjuj)z$X>$##jlivSd%j|Yo;VTg44n8t z_$je%$tm(*o8v#Rk`G1W8{v8+;{d}7Puq{qo=pZGm{`{@lKbSd!#vMlINKtT9Vtzw z-)I6~f8k~SMjq}@(W^de7mNeQcv;#_q>Vp(|LiO{J*@w||M5rh#o@xd{GZ{P45*IU zg`q@S2t!l@Pak>JJB-ADX#$Vu7$x%&41@ zmr)*F(1(HIn|1;*dd=cb_0qPs#4|SYpu=l-@q6+M4>Kk`-jh+bzKD)L`0%6J?(XIn zpM93G_{10kd5^!$$bRAc`{NbNtB*0on9U>485XyTYs|pyaLf0iFK|bqmLb zH$VK~qYOj)Gm8J?=6Z`JKl#Z|$6zR~GhIf^e*C~o@nGdxq_1|fn+30rmvILt|I^Smo83nHee~Fg8uipVJZUZmwOqy!qQ-Wc+He zLjNSk@W`9d2;Uq3$P`93^2UxGZFwXQTrwOl*?F+SVst)AJIbDCT;H`XI>vjva|c#5 zA!~ejI+N>of7x?tAI#TBXuqe)D+>x1k}V*?$5;t>spcGy#??dd0#D^%|J^S~){mh& zoOfi0kV5nLv2yYKMY+nY;QE={|Q zj*;=N!7^Q9&FNO`d*fz?!xpl@=czkm6@eZHl5fx2>9;3*pSFWkJ04B`(3wW3=yy*G z5B&^pz0?QeSqrkpC3JRJCu8u57Awu)&{~@+&%$&-qhkx)4Kbb>U-mXW{N8&%nm#5x ziiI-$WGyDGT@d=ZcJ-z6m~KArEC{^BtC$|ez{+6yC>?SZnyS~W7HsUw8679QIe;)c z-?-i`!sBP6M{U_ZivW*W=y9zNIPjcf2d{392}fHXGk3A8QwS)sL*Lck{F04twn&FG z4B^Iw@A4L>{~n3AZdnv;vGT1JBE?0YLo|2gv{Bv|W7_Hsx^XCyLd0( zy*OU%=x{EXL2r0G{V2TZ3;Ha>`i)!PXz^rY^hl1`^`R{N%{YUlw9A3Unvs!jR`+&P z@UW-jObqkh?;4w9x><7R`SyI$TXJW#2xN84}>HPPFsNvoVxo z#1byUH>l;`G1>W^$D&OmMt&D*uO)!>`&$UFQ z#YK~Ul|9b}#Fo~8^`F8_fuN+U2f;Uif$#hU2Mn#Wk}U7v-VZM57o5s_Z4d=h+w}nj zjaQp4IxRrGLBF|K6Y}v`R!Hfm#F7u4N=K0T3@Yed0*be%(|&MJu6xhDmrq)i>jjvv z4S!wJ>{cCRQXgqNsV-x5+m;LPb^K+3@Ghm?<2o>--Qu~P zDSIPFnO&seql=Loqk2n89}~6fGP~%zhV- z1rm?SyuM2ely2$@lz*$Z*ML_>rLIqK2gEk*lo2fk6>zJp#WsWgYqBh?=iLB7LkkIG z;n!x}Zre^B$N)Gka0}Pfrd`)Yc-FgBXpJgUpSANtAFAv`(9mdJ?C?S`1HHWFcbK76 zL#yt!XWudMYTrfp;KzH_YX)(BLBaTL#E*Hb%1=GEkz%T-PZhIa=c z$Of-+h8W>9vIq{_;-&y}nOZH{%w1`fo!YSKQ9oF?uKqiil^{$8_HW_#P<@rFs{M|4 zy0V9yy$c_dQ-I&;6JC~Y`k?CQcOjx21M(U}q}F#O*nVVlKeUpzPYGW0U=KW8OF(SIKF2*LCZbexP{^wHQ0n^hlhw`E8-{I=2{x{23c z{QAn~_P2#pIn*SsSB%qQN$MltrASZ!O|0&voJwGuIin8)6{N?&*`%3)OyK!Q~btg z88lu|WAw}bNFigm;T>Yae*b|jk4+F6Vxt?Q)8lXRc0HIkOdpe4l;h>At$EQLPT}P( z>rgU^GG!9K!JDy{!pYl;G3sufY**VN&Tt`tG8laJjd6|fPE5?@c`KiOqs-?S1hkup zB2QTKMAtDW)pWf27+fjL6ljxV3jTPW;Elc}^5TUvB;8C=ei{w`{+B;b+284d_`p|t zFyOXrvIzh@Oa_%n(S-+C>aQJk#|ON9DaD@gl;tT!shpU(DWAOQ@u9jfn!Z)cWO31X zR+}L4=orIc+{`2Gy?5W4cP1Rz(UxLfy(WD#=?M#-<&1iGjFFbnSS)K^Txib-g;orO z_fn{Z?$B=gE9*>gFL_y`cT^g@bph`g2~4s~AWr8U=!0)(TW^ZH^1jU&ZGtwFXg=rskg?YUX~x*fuqE0e z5kuFlCc=-Jpgqr!X+rd76Z$|m`DjuWt$B@;X*>Ld|CReVgABtH`kXp-Y8EiY1G^{i zH2Kl~=um@ICtmeyo32=>jDBD-BJ#$qkjh{E1pU#8L3WH*$uRZThaP3Hn!Y(DW8Dmo z!4duk%;zn13HM-It}Vjgip@L*{&)c0>`Eaoo^=o$*`a;s0LFmGQ59C%cX{N228N3l$HM>hsjvdx58Kf{#DWpG~}p=(<- znxQjqT^^F;kpuk1d5sk^>b09f_=04Txc(|Q06Z!!exE8e1Qx;n^ zVJ~yY@x;S3jQGL~@tQr_4gtGM^gr9t&%XC=bg*ENM`{L@8yThPHVn_RfDw*D&ddTz zJk9GL?XF(`dfLa>vOi-6*{mNhTCA~ne~ZjFDjHb?PP*+~yUlnQvF0-p9o@3y8I^&YQkQ|Sn zYs@i3kr%qkuG$b?8BlmGe-oY2Y(fK7Kk~&wIU}%e1438{AH;k9Ol@JD(g$+d-Td$m z|77NKfA`n_IfGlfWWpQ&)iXB7<0W2_46nl7*oAZLTH$kA@Y$D9%c2OJ$dG^W&;N9D z^7K2KYgezdC{;L$`kD|U#tnv*ZwomVTD9Yz&3i4p3oip_a^qfxA_hL;eH^IDTi&7I zc5Z!q?!w5WJ9)PnvnC`*GUG}8lHs+k(EDU}ec}7+>+tZy)t#}CQ7IvwmvP41aAedK zcHmB9`M!1+==a}ejN7yK$b?ZL`?cqx1BFAP6C|_sJ&V3%1KvZo?=xQ=CF002M$Nklfq~`@b~H{_{EW%MEF5lJyOnqEwXZL3&cFZB^r?Hd3j-GK+4+IL z?D#s=L52@+e=}qHJMVqC`B!t_YcnmUh`epsfqNPLVW3`bzaY*piBot!?_IDfx|okh!9 zv^?bLwRpuiYXJ(abbd7VUK{9lc3wSB#{TdB?th&#PvECqiH()Bz*PUS`(t2TcMe56 z+8P5s{q;Z8&Tr;yacT_jAAS5u{ovW=@wH2vi-q!=1(@JZ-$@>d%a7*hz(CJ9{#G&* zF5l!e{Hz$(#!Cww;}IV%gpUvcE|ha}(5LAb#@6~{GG%nja0NrWe03bV2u`fdPg*QF zcJx&F*TS*Iefj~M%;d@G75+@9l)}+e^S4jGH}d{v{lOVW7O%mb@w$r{W9gZ}Fiu3N z_#v-o~AFl2%M>%s$8u~}5SpP`(-ZwIqORPl;M)HB8M=d455;ut{Bd{RHV zmHuKJMIU|n)vMJ%_4_r)rv2dO_&5GCPu2J6DHa8dn+M70<2fX?cxfJ}Ps3AtTU@bR zp=A=N)lh@_or`Lva{ge#7|md+FzJKUq!A{JA(Zcx4@&+c*fOj_ah?L!huGhy4&m{r zmO}RVtsFJ%%kVflmRH+OxZw9bH!ub))?w5a8@~b<1_UN zv&jaJsb@F@NSVv8Fd*5B%Xy&)55T8{OwgWlLk=0e)AE`YIu@pa7&R^d9X2 zt~~~wEv$i38ocB67;{QE_nzY|@JM8^`tBIkUh^Aldq2iPc+LNy0q$Lw105_^;V=SR zJN6G5D!!x=Zqv_oUI?&XkGHTU;Z#E_5T2e$H0@BC3x3ar@nrVCFWiIYFPgbum=&^$X8k7|cm$9rDlQ)>X z0(bH&Rh~sWXqJA=_O(Gc!hZ)ZmqV$Qp8a+@@9=*&;XO5&7izMoAeTF2SbY@j7d`{K z%bpCbgB7OGr$bAT_9o1rCM~KLZ2aN}Oc=_^Rl_XKV+1xq(~eEldXH%AU5^!3FZ_0J!#? zOZmX?N}ev@w55}GLl1f3G-aT1^zW5{Wcu??+3*2l@}rZ>?SfP{$hhRg!hKuK9>(Wj zR?eWe?K13+abite|NnUyL)1^AWX(yKa)Foi6`YShtJhzbO$^4)88bl%6yY+lk zcMuM=XVNG~x$|~m<+40O@32|C)iZDBd6$#7g#E5XOmzYX%mqsh*T21URh`hHeA5*t zNp05sJf76SNG1#~wT)z_ey>4K!d3Rj9C^wz9(#Gg{-K@V!Wd804{R(UM9AGj zPB3VV;i8GDzADTCBccyVQf)su_v&~9nw&APn}kcpXgN65#zI@z(rL1(Y+iXL-ab5gk2TriE%oi* z720IdMK4Aq<(@tF&d|^HUPdkMrxt8728^;@P4`qU3Nd3kW4o>8Ti-`l8CRs#p~+&R=l%s-&@mH@fLb(Ywcx`0gdq#FJ%3E~v?-GjJ=;jwTg$snO_KIlN;_iULS@67ll8W;?Cu$^e<&*z_i z)%bRIjN*1b*n!0855LppJC-cElLxmDKL7F0{ybUKE(HkoVQ6xRA9w`X#lyHb_=n%) z4ufPAT?ftLe?0x7T}ib^6MFK8tRjnr!NX8S+!mz&e zW@GcQLMdq1XSF+{j(FyHObqKl53a^Rid@+)Xn6>^=sGT$+tHj zeem(%W!yC8&*DV9tzEBNzfv8FQ(wJ!gd4vZ^2r0dI%7k>4s0Xy8R6lNf8L4Tzy7*V zIMD|G@2ek@YZf=@7up72k|p|s>wC#>vhhfss(<`X{&b8uUw!_I&GpM)%_8M2=v5B` z_Mu`u?}a5_ZxG?WkGg#+@qe@xVINMJH*@SVZKh@3JbCLe$69&-fw%z4>)dIIe#*NkUQK)hlz#HSA zWe{Dv=^PN>E~j>9oIQJC^u^obrAm)%{E4^7BQc|Y^vRE>9OL(`=r5MJP8d`QQM3V9FKt^!qsOV?vVOO0U6T;XX z%$PoNxN!Y0IO+m6np(Iu*4O>F`kbAJ=|t5r&dy++7kRJN>*^AJ>a!LqgdriHUKsO+ zrqAk+?@sx*?tGKCa55xbF*g(L&e#QaP7lTn-qn!drEGVK0~z!A9hn2RogNm_mtIqt zpa2;!!N3=EeQi!QolgFcJ>MqF8N7w9A>)KtBQJT6%RAvhqML<}`}IAcqO{>meS=xI z8l7#oakwu-xXP1fuY8+9|BVdn+F{3Opae5M-)Zqc ze^~Pp{XV`bkKI{hy`2%}lyO@Ru7r1u??Maht}oFgZ*Rw8do38%i=01B5af-;3Ggl#R_OAVUyN)%ZGosU z^}G1beA8c4OSSeY5g&ZZcm8?l`7|!^q=kYv+gs zeGA$gFon_yX;>*jS|@q! zu{@cVv8V)nD0vC72p3ySN044VH=gSpV{D26=DB-DcKPSp{krvjl_>kR?13|dm5NyR zDWm`Jor?uXc>`%CoZDBErl)XPnfhHHxjdh0DQ}AD&(Ogzv{UZRuN(?a5t~d~xL_|7 z*1%XtRF|p07gP578Kc{4_`sNC)(+8Uj7kPv#chjBHJUzL!+TuIw>z!=b__$>5`a+A4-37%1#pBzj&df=W3M_8z=`RbawD z1c{KDe7zl`(v%S%lTli@L2Gh@uyF2n$cC0%_=DNBt$Zu##896!TlkZofRO-f`M3B_ z&EN$1%CB5W2hWvdiW|C55iiReQC!)JN-lJ_6%Qj?|RCD!_eR3-~)r-l;8iU4_fr!Byrja z9s?>^TXq?=dN^e0PwZM6bXi~~!<4wC_0%0MD}iE{VYy#7@;0!p{_1`b3`$-7&c^^K zPk)DZ=WecF)3U*Om7`CVSlak-=-6NMSfNd{m(O~>!xz(*;ICKZSI^(6 zl#_7e%8kSq0=pI?{SLIB4eAP4T zGU+_`UONr0e!W8L;kXjZGk~US2_zX9m96+`tFp?m4P>n zR+kZ$V8Xe_leh~724umT(&yo89DB6R04N1;WgrXiAmcRno)6OAS3a3J?OD2{?9@S$ zN!OF1*-F^6$vkl2G$bDxBeXRa^CWT$vb;qE2bAzc4y~n`ds2rXB!Nmc66Eqv0$D*UuP9 zO$vEAoyrJ`9u(puO}f<4frVhtVNb~bG_vKHvdfDLJd@QY8F}vJWhWNxj$+L?R)(R{ zc+luiZ=>7?!{o4hs%NSbZ7#o|kV7 z7d@k_OS{8&czulb@>CvW&Q^EZNh$ddVF+iOQx7;f7}X@7LVK_Y-s2{K&x@-Gwrz^D z2;Q%Wep%+DJTSy%wAJ$Rl}i~(igg&R$OAjSZr{4q&PN$Ib#^g9aw1;m^LS=Fad;?4!!fL7ykzi#3p~avGi94mgdtF!8A*AO?M?Rb)WLr<39jAn z42cI|`@dDEDv3R{2=9#P`seL7 z55ZS=n$SG?ba^bl@y3DidLKQ8JQ=wzUxw)TII=vRm_;Z3xND>Ggo}v=jISo?Gx^P^t?%%xJeIeUK0{vcdb?lfB@Ra)-K!Zj5#KC{{Ox7}#({2Q;Ni-z$QcXZk`-&h zWhg3zw9%6x0Iyl7xp?XF?3h6l26q$9a~Zxk8JQTOr3w44jJ+1Z)MLt=dg*I+&a8zU zWfB7LM^9Vb-+eoI9Spl_Zhv<_L+qU~%A?7t`0H@=y_a{HZT^h^zxn((#f?vv#!t#s z)|q!YT&w*0zXb=e`h8yHWgFYfq?atiH+WH7`6YXeA-uLkHx=%tS-yg^9EkXSTdomGBVSn#9ockBe(T!vVRuB!W&;+%R5yF z0CMT`ufCj+8t=YSU#)EO90nH)n-?!%9d9|?(aD7a;c+(4WpeJsiPKZ(U;g439W?o6 z@xy;mz2looX0YcuZ|o*dG1Sov_D{oK-?Ml)2AB-}ycR8X8M_#;9%ks`o&U&Tb+y-> zcwA^0{YoBkUO2Kb8pq>)J;$r$#2OpojbJfmOdChrhqd{uM@#-&I8-09V`OqTpNby0 z^AxYCRymc&U_OgrrPtr!b36kCT2B~?aQ~)oTjKfe&7h;bEHYV)v3mXZ+95 zDi6NxR8PFJa{=N6oH*VLzgi6@G z`Ss6#wz+!s;^qe*eUx!|?~GG5RWzVuCA(@o zvaYAsFg{PHlOO(Q7EA6nPV%aJaM$?}EjB0f_V4?;a0q8-hZh~;XmlYjA3ZGI{5^(w zJ3sQA%oxa1{ZZbj!aAIL_uU44;aR?K=fQ>OyLNf$r)cMQcq3%VFSx&qADq2#EZ(zd zWjBEkdNE|WokJD#Zg1>7as24!*I#@wyENWA``+-2a6;cS{=8S*=OfjZ0d`Nkq}|A| ztJg11KhVw=!swUN52DoF+c!2}eDRy=dSGJbS5St@$HmXSHVzlJ?>v}(_k$mOviWEK z-9O#-i)3Cq7@y`PFN}$BBhL#V@;86|7qb|{`}+BV`d08Qus&Q8_7#XyX#jk zSJw>sGj7qZ@{A@&v|)LK#Ly3U_ma!T_F1^g1W>@xS@6e>L*x-S-N8kq(1L7zn><0gJ9fe;vKKI`a&V5c8e`g7X@L&*C|K_^p%C zWI2KGsMR?vF5iE4qkdJ{eT6LHRIuwRD8%Cg|{ z?RR-Or%%zR(b?ESe$Vcj@+3{8L7}FOwUBwI-3QT1GwkTF+LdnHOk@oA(vqiNB2|;begyFU$d~jj^ZF zFU8%r(*!Tjc`Smznddb<_Eoa=y#83KZS z<1C5;Iq!6Er?%8dd}5WZDX5dGhV@|<7=uZP#<)O9(2&x}IlZLcNl&E+LWmBBNYcms zl=9lhv|f0&O|ne~6W{(OsRqAoih1gw!MUjXDPG|wjinvjL0tJ)0)SQ-o+c7|ywU?? z0MJ?goBZ7qeBcpEW4x#^`1EJePZ@K)Xtc^$lX~C=wJBP8?v&hhzbG+x<)?B_IXzu; z9;^m{)M#t9X5bB6sX&oVvQ^T;r+akh`2cVQ$NsHn{jEOpQ8y3g-gV#YD=*MpE_6U) zzmvf)WvP72+uoF4#=a$-e3PO3$s?cqYcjD&Eq$=#p@la?A}Bp?a7Pag&3}-+@@beZ($6q-cBBH6kiJEbUpAr+vfM{|Flc&CZ7lEr#zE)-ET|% zZB-?jhoE+u5;Ntx>;Hn)r3-0?$JNnw!a71vP7kD^rTZwedob5^w`k_UJoK}@kgv=6 zne5wq^&Jz#DXns+%t_RIuP&8sc0TEJv^wS5C-tNnvRe6{jbUhf$>VLAWm z5A#8HG7PK+jK$x*>;2F%yyh34_@rcdEdwZGP5U(f51!LTU88AAh3c(I>@xPPri;gc zpRz#n8Q>+Cp?sdGAb8t;s%#TDN##<%wP2VG3zy-=K^|>O2gVc_jLElfUzZ@%PCN1s z*z%6DBCmY&ULG_9Z#^Hpdb;XDW-TeUgVk$%w^*)H7rY9A)fTUQ=QeubMF4xSBZDWq zQWZPpPu8xs<#}G!7;C25UBXd$XoFsZ^M3I7KN$_KBp=c7r_X8h3( z9tL~$P2~cz#*j%j0CsTb-A;YAm84r7#mL0vK$DG8JS&IdBnImHXWve7%g9NQiq9#C>O{%r zRm{l6=%G8RCu0f4#n?Ps;wod5>e}Ls44f2S_`GxGM4p2gRa3@(_KVNPuo{GEYYLK& zn)@$yD36yn^CDnWWSo4X0`*grvt`nRe-x>njB#FmZHL!OByRSOHzChr%JpHB*Zw_K z41O-pv}wQM?}lcIBV+RnZBuW!^GM(OeR3xl?$N};0^{SzM0f$#Y!CN=i5_UIJsEvX zeuO{>^h})ZWe~yN<1rOn24*-iz~Bvr7x{c9jSHFkfG_+4zGN6kZ?2q-b(Nu=@W3o4 z1dk#QM>z9>ntl>Jjvr-IL(^r@WGEm{csrU%Q0f_)-`;;>yd1|PsQe6)%2jttvdPxh zGr7z&H?jD#g#k)79C=rP_3*KS8P@@Z>J)nM(wz)Gg|ncnN2?d*d(=O?G?UG+*;}mK zqo+C~HY0^}XM~>U{h5<+$os!ZgG2U^h2g>ik;%U;>v+oc%g1?C-MEor-h|*riZx@% z=?p++pT5PIi{7X6#53tU6n>+uk36s|Kxm+~jdyPcs`18Ge{mXb-2D3q_w!z14tV@1 zlc(i(c{K7Q2UiTs@8SXN|EhSlya^dgDgFAd#e&^?Yzfbp60I0k55JZ1r;sKly}W5U zd35t4+-HF$m}9WUFY&mY7GGCC26G1i&SbS%-1`sUpS(D0-!T9hZ)+cm4~&Q=G>nnY z?=GI?wa-KEb{4{GT=Ibj8E>MmA27OWS1>I`FuwWBA+J3@_|i7{ z`e5*R1?k6(dFsE~F`j*uXH)mWA@TmMX7g?*JcW9YbuZEnOse1t55{S2i?=OI9dIaB z&&0(>i(7e{3Atp!=tZ<;v|@Tf*SUe-7d zTAVpocnsz67~bEaPMJe2nU&$;mw*3X^RT{_f&7!rKmTWcy7_+M-{`l+Y`l z>XF*9unQT6$jXV?LfRW_xy$hL<{ zKWhQO7&D6s$zVn|GHuayEfy)$xPoWUdJJtAl3W6JP1zF8F96kAtX9+~=tvHRnXKbSsg?6qj5FVIHc&I_N>`PQx59r{-2 zq>N8B#xS|}*YhqHo=dn8JLcf~vbH)@zaS{CUb|8owJ=jVHi&MX7uE<~4g-H=Av5pL z3l~0&1}#8kNG6jQ>!cme&5@-Ulj=M4^*{K%ALSi?YZk^B#l_iwR9n*@7$3iF@j-~E zAAj=6=D+;SUw1Br5J~$R1C3pUvAKP`T^J`fH^06*F`FHdC@!^jb5J6B2%})9nf^Wc zQu*y@vP&XLj;!MCyFZ=Gn4<4!*Ez=`Pi(r{>9^0;o`q}(E`uA`7DSw1bLP~$^_BN~ z_H9P?uQPX2ff^U0JHj8%>s&iHY}T0r#@Z7{PuJ%fC!#62X9tFM)3o{w+Ut`R^F}wT zuG(;L3pP)2vS%DL;zzL2gVFe9{4Rvs=bsnrz6|=pj77ocnTod=#CiH3KUo;1@8If7jCw!%=#$MK6+-AYzxer#{U=+@a_)`AnDJ5$JLOosx|31& zb~{_hUhf#X$<2vH9*%h8V0^bPor$q@U!MKqYBOX%O(wu`4m_;>^m07NL!OKvGZBG| zCWg)eya=3bm?Vw(lKbKBP5@U=@xG3&L4mB z!&&ILc=__sKWP#_2u0^WaO3={vTQ zYr__I(28Cw#FX)fEKrtw+JH{?u#i<`xdS=(SNAW!{CvFD-)%g!t1l#{-RR>C`cKl) zU#55NN?-lOKmPURYUfRS8J@z^?X5l+zxr}^!NsESWxFP#;gzcwhac$F@4S0%+=L2iw~Ru z+2X!!4_U|17<_{cR91@hcb>&tgwpb$UihJzkR~nf3{KO|J)1V2qPFFDWFbbIqLbh9 z$kfm?&jx^oRQNn!_3viUU^UsiUhhk{5TEq(LeEtG&;X(QSD!KuA-(rHdU)Aq8CX}I zF|;YW^pj!-UK3BB;WHY-y!8vLeibZb_A@me{JWYWs%!V)m1vc#j_$YV7ml9!PV)I3 zFZ5}{x!snrV67PVJxAx&0k(;L3w!WXw(=Ta8pWr~RSY;i9~|nV!@Gb1w}V@0$~W}u z(e!0`riktD!0^)hMc1k8)Uj1`w2>aqCz$ogfrk?Fu9x#$S@@Kk*`beoL$mUzqxR_C zloF1+cz!Dy%-48mAb77j%?_L3lS$Ja$N-iy`z(1hEzo;;r_cp=fLH&Ljv;+Acm>wV zkM;w*XT#U>lX>;Q^{GDLBK^t-`&G_@xp0|h!D5u448TtbJD%^}=;t0X3{NrB_?0yk zD}8yUKGTfoh~FfIn|h(+4i5tEtzYlp)rXQcyzI_bUU177{dWxbJ9Yi7JXeoWYrB3Y zr$6)QSvZcI(uS&`$SHp=WDGrNR=wSOBaO7{X>YgfICUDHodVV$eQ~ZP1JoC3^nZ2e z<&-kiS6mqO>Ef=I1DP9!$fF*heL|Zs~*Qwh`zpkfAbH&`ZPu3$SCYj zGx`W&@v_sdYzIAg%nCi@P#zU2Q4?RZi5lY(!y84N#}tn@A6_%#xs?K>ZenEf#$g=$ zF69sYw#E8V=p@pFq@}9a!q4lIQI_)WJwwA7nH5l~H_>jiD5*`p40XxicZE zto==@mG%5tieu?s27^(E0;+F7SDmDhS9?(6d?XRZ z@Kp=4d@VkBj}`~Fcrj9=C3;8+0B>AFnF9?2ks`-H*!vC~WgNLrxeMpF+1Zc z#eRMARt#tIiq|<~+4R*ZqCQt+s~f}Qc-N41W2A2pfHz2CK;Xm^oYE_Fm&t*cy}Vu+ zEE%~OY)oExQSx94{Lt##Jo|U#&3F~p9y2~xFF5jW!Eh$hLUufgf5vdsq`_p+!p(SO z2LEMB^}%@YPDUmZu6_IRo(p%ey&cNOL&_w#*61M6CRXt@V-C+e+x;>6!O8&t`wpzs zwKUzAS1yfGZ!75v39`aMi08^k>KQ{nJdflp&*-&hcVS2#eAzSg$dfOHT^llNnA~b7 z?K#~y`k^KM7%%xIleWDc&0`xMGpPKt!VCz!>gS4H~7kbSXtNG@_#VHu&^5@ zgyPvU0y2gODe$;!ACqfv<=3tzbS8g0+glk`4!>6n)OeexHqTO%PI}P3H|tx;02678 zbSCBC2?I1k0-g+}_9M$jAB>Jq%dF(&Sml5qy6p!>t91OmmagSamq#@PucoynFW_ zHOX$$U#Jnr12nO7fzkYKh8AO*F-JcY-(J5re-OH3WLz*zi1on(#SJCz)RV5yD6dZ= z2-z8_rvIuZgREG|cI!CwkD=xXufsn7@=yQi^ub?z`sK(>Z7i1R!whie-+zC0ec0-M z{`|Ys4r5FcJtsNZF%~m3*~>dxzZw${_B~v z`Vd3Nz58TMixe4j8S5?fo;lINPW0Q|*y7i2rpI}4A8gkV!yb;(K5z$9oyjfXM1(r} z>e98%AOF!Gv^aG;J@e>{(zsM8gBBMK_CB7ol*LcQ=v^Njs6B%JUOJWxA>N6R?jvsFueCZur z_D22YXzAejMus>>(HGI@?eibz6`4UM8eYHFuCB%pUdH4e+FOJ$mgprd)v0B?%urVIm@k6FM$H5`lj8E^pePQ~)HspCbawvY$$L%h{r)a=)5o~lK z3+@*>3q2Uxg#s~d!9zb4I?C=NXD8gR{NwS6T?6FO@e@arMa$dpyC)qA_w;di722Vd zyJJTgyDU`Y9jlks7GlnR@WUTA9jwi-T;6>7>1Ufi{n!6`?j0ojxWya?GCOpUG2DWM z_~(b}?H~O3_h(Ep2NkdP=GT{t z3tp(E@Ua+mIFI(18Tbygz_9NgnSeJ7VGvK8KmWnVr~mX9f4TYNpZtCXNFJXKO4dFv z^7{U0JlDUz{6(Q(c*Z9u;?W~-I(XVbNej+*lSz#kpMUkm7zz(&B%e?H*$$V;+~PpH zKjLwQ*RQ_(d}P+)#;``SV7|!M^ltS$ln3YK&H0Z$8acYcO0C@w48*5eBzRQak2Y?i z!|lB3$+ZAXTN=BC{;*^1lOO&;9`iRgKmWV`y17~3_|t#!uZr#bb{?|#H-Gq(KP=qH zm5k)~Bbl&6=)UC#Jl>*h$c~pQIl#kyJZYl|Svw(p;<-KPWlxiZ=xHaG1*>rmS{U_r z$y4&o!jgqsV+_5}jtG6k!QA>jqwD>Q-6Ma?=|h(v19b9Y^z_>N;r-jwE*61=jQXar zTdaKJ35SCN$-whxKWgFQ{gF)!$>-0WACKAPonI)Q7H59?M}IPBB`^m6@*jUzUDu(Q zM_UkM5Jg{oYDUI>j~uheu@(dLE8`6VEFpd%IY3wOobEvW*^%{RE_n$t+45v$w=cg{Lszfk9hSJ*t0c*VTjj8<`DFi zikR_y+9w`-_AGt=sqlKWMP<`@!esi=*>mmws0|-K`mPWrd9-^UUSOarCl(8h4rq<} zHs7?k@ZQ_+jh>?)&~d(vjt--}a;lQLforO9PBFAxHBoPT>Nr# z;e!wAyX_LqW1K!`4)>~EB{!}YhULnY&M^2od9eB)IZMZ%xmXqcRw%^UB4a`%PX<(9 zI-c?Q%H^+$N&n8~gO5I$vB>!|^v^faVf9fvqj|=g2buff?L&tvKfbu}sqx6l6CD~5?t^-k0Z!PQkjI-gffa_4gLdZKLaf`c#tl6!?8m_p~-loagsn?fT?gqWXj9O#hpQKlci zDL!TLf8ksj1Gc>L9%kw_DYk#50ia)k)~CKd9mccC?;*(=&I7x2^E)|LCo#B6>kSMS zB&e$g`u1eXM)%+10EAaM$&{CH8$**c(r;fbJQiGvX<=-8w|yPLlp|@^@AgIbRFQ4E zDPYLu%0r;n6_mPfaGBp8YHzR@BZqdASe|9b8)%*-75xlvAAVr4IggJ@g#l zi&nLZXNy?D`R(Q8o`D{p`t)wnp~(=-?J9d-Z0kBkM&&g?yOwcU3mSt?_v!`Q&a>`^ zb+^SVc-!*)1V^6Xyg_Gb<3%_J6pnbp{T6ONi=VLXr0b1xqtW0^=z63d1OnaXJpcpc zw?Ce&?uv$CF*xk--EiIvP79vgDhd}huitE8&wC(-E(3Ly3wH{Q1j_ebp9wM0VLAVI za04FfxmQM6)D4}6$X4TF|>fcFUo*qy4Qh3GTvHZb?-qVKQ_HXE@G-ZVE z7-EN>+p>3X1-~@lPTGN6LrnWDx_dPA?#K0lj~`{}Zt9_&>M`_^D4L+9{=9&%-$^@J zd#3_hS`OfaFG{Y4o5ou(hR$1fQ?D&91AD=kDpaN|`Xd+At9)|dNuQBvaPI5*qW5~| z@4y(QSv;X0yb|Y4N`+iPT}-TZ5Xyk!^D&cj57$w8*JIZMx7#T*+OElS$>FtAH?(m( zZR2-)zr}rWmsa2DO(ML%$-r%$+xqti_<6OEm~7i3H21=-L=(y>2=h@^ zFc-v?M%hzNKT}sYDKEUi`ETEKPdTdZPS*ly>WMGN`vFpx@a*~A`#WiN+JBY*Tjb%S z&HDoCau#>G#&=t6pJWXB?E3AzO?VV!WKLP(9l7HPY~nYQtR|_HBcwdiBo~bwz{wM2 zUu`}fE%5}>j^VY*_9UvTTgB`Z>+;OW;#(@WdYF_`GEGP*YK$94Qc5V8CWSDh4>)9t zqQoHeeTVdU&*M*wZ%Q_$`umI$w$4)O_U~UFU)sauQC&@nDY}*1WO;3KB}&GNG$mL2 zP+obN;q}J|+!tl#HD>uzVs}+{ALKNtq7+i3c%$>++*3U$$2^SuGCa)LCkUfX+NAzG z-5x1=PAOwVGjWiIeq{^nYmb%`cJ((&94-lYyr5hP;?uU-@|qguyD}IshTFiUWV%Zs z9D{z!xcZ{&DCOIJXM)0*JYH5!mP~xdXcaxCcZBPBz;wrZQ(15oN0{;9Nr!`tF{`$D z)Pz)fYLlT&6DRcj;KKQ7>l?S4XhomdikZ<(++JRi4sNuNV}Zsbg$E>D?ZC3AFuFsF zI^V^!yhFtKrfj1tBc_Qy{0}vmgP)0;oe7M?C)#>x0<;WVZT+pR3E>corqS`mp)rD8 z7q>a%gV@ZJX5n8p?i@QaT63r}GNVhfQDnZmp84i2+hKzJu%KAJf8VHh>BKNWqB#Cx}sKNdC)y<%K7xlKmt`&<0UD@HrGFj9xc zOqS8r#F|lT!eMma@Wke?Jcd1CMwXG)f&%ZjCcW|GgX;HolT-%g(|IsiB)Xnl<5k80 zZLwuvWvFk|uC0;+U6a>q++p}2r z&!d@z;yrIT%sB%=_vETDE82&#l`)U8gIE8<-F-h)__riX#OC1fb6YW6Khfc)L;=jLm-fo4) zjX-YR%1iRC6YYZ7WIXHri)StJ*Vl9d<2OS*dW6Op!HpT`&Yhn{_vg{%a zb$wQQ2w@^`D_1XVzKurL zTg0256&zyTdJ*-8(o7YMo3SIBZuypqA z7DS?JoKbA$cUu5Fxj9uhC0@mQYDW$Sytf(1_B4)JB)NC<`m~MyY0-%6JyIvUbMN6C z=*q}xq3|z%_OtkN>6JQ_vj_B%sv3XqZTx$;zD!=V6RWX}zGJbFA{JrUmBzBMl9@3V-s)D zS#&vbw#BAGj>Rh$Ei5`z$jA|WN(dEoWiXyObok>9{TDBxvoVX_@S^@PyHd(86}@V7 zhZa$J=UZ45e#jyac^If*ESXB*Whe6;CxY6Do?-EUOj`!);7!c#c)R)?YP=Kr$Cw6; zFdlE$j@nHh!5enISac!pEhHOTjE(nNU^ss4@a8Z652-?cb&pmR9T$Dy#8URV#i0A7{HVV-mwJ4DDd;n3^`GPX(wH}Z!;w9J!cLXLE| zjLr8pXU~5yIuQf8nAp>IqnY{}D=dhh53g;DW{Y;4*_G4d`PdP5sJ5U;lMRS0M8~sP z_+Ikaxb48zdk*N#YyDJ4P6q0O=_Gbzo!Nc1c6FA3uwKal;ckMif4p_L9g69u@?O69 zS?2^?47c>QgGa{r&*(|F(2o_5cJi91TP&bE4bbqjm_yc+ixwE~Wcb%+hkF*=kFJac zzJTrB!*OJt5Azm2a1IfsHEigV$_Y}6$*#XX!I{tfqknVYN<_+}8@2@-^!}aFunnT>Yab@`J zH=q7`^XX^5?Cgm16N+l$;wMKm*UbBp!|%R#A>P_Qi@9QXfA?zbvOFoI&aT_d^IAww zK8m$Ye}tnQYsSl2P^iD*3;hePnCCoe@rSN2#(bT)`y5q*vHS3@8D|`ldb#jTpMUXb z=RW0JQ66#5dB$sl2Xp>L<4G`fkqzk^U>nbk$NGuTN9++dIz0GJP5>YO;KzBJFaACn z95kNrfz{Pc33`|vU3c$3m~n&L*H`R7Xk9rGhbj@Cup>ay$~@@*T=dGdXMHeMUk?nr zIDWB#+|;=(oI3039BXo50P-0y{h1;?kO<6SHuvkFTfuoY1~k1bq9{(fe)>}#yK%i^2n7!qv|2CbE?jy(&!qN+ zn|fDgaJK1xo4-drANo}27}F*V8ljtV;M4U^UKZ^^tR$;}2JR}`vt9!jN`nhryOl+| ztU<&xX~2%5=6AAW+z#g%IJ=imxjSE3K!iMm4*l-bbrwXF40tU1v^LGC1v9I@$GcaW8 zk#c3;G9I)SG5xMItKQ34uu~Xg+`>TKh0nAbJ|3L5d512p;nr{EPu=C&K3&LaxCL>_ zh#wc;<@w!8%C`l*ZRpY|Svs2E7LIZgMtEa*Rf;X4RvqJ;VC@(cb)PLt&AWp_<*j`mee2X3L3Gre5X(kO1LNeZYL(uwgm(_(1)7-voCQuq7}i`4qh}4+_C(od; zGcK)n>N5XIH==av-xI&R8$Qz>8Y8;6AJn$*8JpE%V)knJEVxfKegmLV%_$1X`A`gH2nQ^lR@Ee*&l;}=T z6C3por7C6q?USwEmzAgGg<_CXnvZ2@5ZjV+YGLKfspFd$H}0lnEF&X>4n?2g+N1%G zt;J!u_b)y@n)kT_$tWNAkK(L-93*vO3>l2QJfxSAco}~(Jf-xYyVL7m>`QrSGAhQo z3BgL6lEolVUi|vq8yU>=1RjH9N)x=`Ej_y7$o)NMP~l-c@&Eup07*naRP}C;FcT?1 z+S26Bd!Bma4i9?y7?Ki5-XuUs5U_b>;-AkiT@FG9&aUwVPs;}x&URwsJIE2M zk4J6QpDBR5R2$r)ar zymx0CeMHkjJ+n7 zyP{`^)qa)VgbH7M-{cTXlK_({aCxxZe{?@e5&3&2>;l;LGge6Vve?ij=-@vVrlY!= z@bhNIKa61N&&yw2@v9dvH$iNRdj==eeXUnDU5HU*Y53J*S&cKn+!?t|50_HPj?+jp5HIL2M9xe072Vq&h)f) zw5!?CZiEyo?F)rME<%5#Li!h^7cv`K?Kq9^>1j4S;XO!rukR=8KIqYT%suyM;?=RvP z(8%Hw00DS_4zdI|6c$)OmjP9eq&NA0=c>Wt&BNryJN%c1-6eoJYWaBBI<+L z#7p1SXH^%k1H{qC`kEYS8!U?NCx^=7kN!THdloWEkJ?QEuy(*TU;$WImLqT8pGjYi z>zJMassSj@H6T;_v%a&7%Q)#lz{DJ)R#`3V?6}Y&6V73M_G;Dt9|K zUf)TE*4Ud&Oj`*|VA+1UwgV)x$mw9dM_FC7q`%u@(*BR^pwSnQ#d^-h1gn2-N6G+C zxkNaz4pb(?8Yys48=zzNvFLu1OP|O0Q@`2_z!^x_B381)f|S2q{Se*8MGW1-y5CL^ zx?G#j1N$s~;@$D|J=s5=r7r8qTX(L^t`0hAU%NT9h0&+c;&49;HTrd6%{mBO_RhkBI(m>kW_fty;mvd%-{}_0v%3E7n=IWgw19Yeb`h|0 zwb1m{cbBO88M^?UcGV8v`J>!`)lSU=ehL_MFy~32cQSbT-Q2eHY;Se6?@0Y;R@tx9 zy}RPmM*+7#``OPn2RlI4&Z%*05Rv$5Y-IN;zu06I|7zf|Kd}EuJgJu~ zS@d)BRxS(l0oA#4CxB10OCVkY-pMp$1b*muaXCP6p_e0=lu7n|#sf0#b;LJM_|9<+E6!>(S+YV_)*k)1R3sP_&Ax3`c zms}=_L!0p{y{y-;h;LNqd`H}Am9y98$X^jzW1qLt*Gjzd7$?#TelVuxmGNBUH*gS3ye7@S#MyHJd^j02A7Z@U~Iug%!yqc3MktxV7B z_bo)=<$&+e;fxacntr9+@kcxHsUNrCQempc7A%t&yZ=}?@8C{AW;+@5|E#mQa^Z$g zKgD=SJH3+ul3hrT^S4h=lSM`r{E?H_!-v!7r_MA+s&DJre|+*^Y`*>O>+0oEzL{Kf z-bsH~JJ6UOyqhkupz$orcH>X&@=^Ud0W%ITu5*|@7l)5)mq(H%{h%GA?f4tW(zC0NI5FPlidtY-+U#$fVW-3O+(}s&(ia_RLDM2IsIAJa?ksk-v#T-~_x{vQuf1zx zUKLI)_EG9G@LxkTk*U-}#LgRqpLbe9;T8Z<)0|`yr(Q1bAt5nI_aa5L+;GT9b`tMz zzWRnrqWJdF2-SR(PYfL5#Ec=M$Ov5mI!Pc1@d;)*mP%Lmu0l;1LA*l_$`|hZyLStM zD~xn|uR;1hsmXK7J-NM2RXK&2I5l=TjMkbI#HrKrtZ=R!{-VXRQB^fe@e!t6D-b%1 z z0*m&Quk_R}E6YiD8*qWj=#%D(JnvTop@w>&JLMhxSs=ZP$Ndc*#UUU56YNQ~a3cp4 z(MqHUbql?mdaUzUIY?uun6gjg%7v#R`|IU|Arw)&!YljayOKdiS;Lv25-a>rxcy$n z;!1y-MUUMN{qv@Ddp*2ZFAC6u%8D8_G4fbvz+b3CAvNx_rAXzu(i3H_K~xbVcK>T% zCSi;C9w?U=uHsB_JS(FurG55Yx{{eo;@e2udYXvn5LaH(5_2kZwU}ApkV#>tT~0D_ zBf3R{?fIOxI_)@FUdgR&XMiJTb>@oO>o9c5!^A3`C2QhJMYR4K zNNn?1#Uceop zNRO;?NMF3-^bCu~rHe;>U#?G9nj@E!j<6GO=s=fNp}l)PyzhHA{VyEdBm43menj8+ z)z)_UVIjF_Q+J}Rc)i)u-TS%qvY`E&4{6G;KULr#vgVnzE8?V4XnMu>ogvvgFWH?z z!Af=#nY5<)C1sg_J%1}d`>r=-vacHY{>-AG>B4}I`L0tQr+Iu zyQ4Q`AzaK@8e#=o&sM&3k53bJJrjQMdlK%x!MUVLg>~iiwmh@)K9O%s2X&fs>MLhr z*(yJa^6?=T2sc2)2j@3GT)Wl4EPsRB{!9tf3Oc20z{%=u2F9fY;t?X~p+T*IJpbbS z=e4c!{j(EiK&9T1DuZA^IxBO$-m@oH1`TGjA*gOyynvP~W_6<4py!H?x8o|Bj`hJr4jl_xU{!ECW2D zM@9bBq~+?3TMbxGm)-(l)PZy@N}K|s7Y1+C0}$brdNx5+ zRuizD-2vK#!y;J-Yg=T2jIrLd4ci22Z}b6+f$An{XtPb!qQbSxax6ZMtAO%D6SpAv z05p7PF++Xz{8S(jkjo^H8-U$Uca1f=ZL!*hN!OV}CL#~=k$WR{zuCN><*zcZdU={Rsrn^0H4*Ik7j3-x3U$N@lp-9nd( zZnLm-FQ8uA;WJ%(1B!9Q@U#gdzQ2=f;Ey!Xj0gU64Z&}x?_9-@g;_i(t+AHRr?3h5 z1INl7?Fc+-d7yeD&36+#jnilX@d5*8f-1+t&i4m+o6z&++tO0LQJ{ z_iHBs4gsahpJ{re`s6<8_U&7<*d;dr0+7{00s2@Mp9>_j%Rm{>W1e2+IVcV5J!-tpOeE}y>p zl5OV<+-c_na7TNR#+uAF=1qQ%9cc@8GWQ_B^=S71KD+`fs7vE$_5Ncr!JW|k2U&H# zd#1&h2LlFp?-0gWz)iQ2adHbF{^3eMKxMSZ#7z-PNo|b+(2mJ4u*#vNPXR^I=$@Zw zZDwD<(c=~zgyo(?rP2HPK8vgR3l^+grq~U`waAS?bp6)xzySS)b`B`qpUfPu?(XEL zSepmT%wc}zIemUIdGY;=n?L>YKdTNOY`zPOq{sKBhu*8czWL_6=@ZGNzMd|#xN+(L zYx3n=nZ>F0t^Wkh?B4Tu79n@ep?Fvy0IXuMs9jiiut4>^MGA7O&$3uRSJ9V`196ny z*&_q+RtA=w^o;hs+;i0)YX>aCxd8g+yYB;2lA#g=w31hP+j$0dEODc=3TbSHbe z!+ls?-wnJPU;NemH(5Jg{NUo~1wN7g&A<7#x!t&5AAF}WJr9t%Q5wh6-}f6=oyb+h zSDQci%fFig;qLwQ-)_!axV!mp{<|!x8;hUKpZ|+K?Ly!q8bJTX!ZUuJXfZ6=r)Qtm z_wGr=%QUOmA2%O-k_(yWC2Kq5idj&r-2eES-)?r*zDH-(#w=9Yopkfo?b;1_%$;8C z%c2EKQ0_KZ6(6i_4g^2}m6rR34oR;Mc3|J>`sk&<^amE=Ey|{wlle>Eev^;mvlZ#=Z`w}Ii;M~B_2jr~vm^Z$~8#``nII^3ePMckWr14;v{P9!U{ zkeGBG-OA=#WNjQ-ebHU4R`sF4E%LMu5KQmZhiRWRRmDtZl8@%re#UwlKhm?$a!rMA zKXR2*dog}_9?#7`$e^;7(8y^2$NC!>EKYB_`lMqT%5K_0onK)>B(Xi+Bg_&AhHlOru`tACd4 zbd==V~Gck?$&niCr2Id*>{Wn`ou$()owe+EWk<&Ix~hJ{a=6B zLY{>li;jTv_ul^~S4X)JQh8I}UFpddk&?mdmyF%oJsREvFhACUr}P*N9N52hLFuc_ zRkZu_7e1;#J=nQ1XGfo0z8Z+0d>d1nJKXJHSv=jhzeS;05NbiL@s=>gJoE?taIb?l zI@wswVWdX_=Z%kMJY8S(tiLDe@JyvbkM$11bFSG(2LjN9~G+#jj~J5f%ZI}%8qRedt6{e1lKr?XI`O+1Yk##fIX zKgexOJN;^>@4a_%^Xboi(epzs)ZI@1uS02#2N-ZTc$ek1{=-=3$f0$x`Xf7~(nDW< z`9+aluvC!8Nj%GwfS8Kx$Hb2JiGh1MreAJ(%-;Ei7{{-n!V=YtSIquZ;0oHXN}`db$GSDyZ``i0uOGd@_j>zl~{qouTlW?u!)1b%&++Ku=HqI6RuSha4b zQRu?zl!cymvLzJK=$RCOO)JR;&7KvMzw72i36`;w( z;toy9U$|`sdWm@A}pN)&0a#NMWbk-pZ&)J?~?sg?4v-8(^&G{tH+f z_1x#Y*gosqqJ0}$xVbIL$lBy7$;w|<4Q-RwmVanfPwHWt{?LG~!Yy8Qk3apOMxL^n zo1RY_@g}($o>y+4Q)iWT_*Z$Rlki+hQ}0U#=1I?oH$7S$@pNS|WtVqp0jpWPt^BwA znq-`SzrySdMxXp1M4IStyzV-y75*AB8XM&@cU+G>V6o zI^3a4{ntUf>F|iPNGJ8;_oGu*M-x@q)Z?rMcsm7K=}SZO_3@Uj(HG@40|j}~3ntq` z+BO3IO6n(W5*-E8F#N=D*y5b^z7W0hIfaZT@<-i=#)m!U#Y(#I=U4eN)&02M3Hi zx+Z$^YChiS=f?*ix&L4to};~xOuEqa`crMs$ET+q()hT++YUHw(x4d*Kv%nZDF5of zfC;#{vc+X!I6x4XvMihFeL(8{hn27V0L3o?6^;j#t9jPDd=07|>9MlfYr-9mSk1l4 zdK5o)fNk{x*fS9_S#+&-U24e&%gvV^D0!@j!h`AnfDaho)xerCi2G_ApIZtO-dE-M zGXJRn7XSh}0VgK*>e8T_RS15dC05KNW{B>dm6!?ck;BXN3=1!5Jd2O$HPKS<0|*z+ zpk99+F~Jl(x^ck|ulD-AjTUO=J0vl^_vq$Mob1wyF< zz}9;K7FVxbZ*b2dIV+YXYU*!SG&)2QSja623wOYii6Fq94gu=eF3HmSm5FUXfPTDY zC2f*3t`N#=Kuh%&8CUarif{belA8fo=-AO)`5SLS2Fw|2@ABhk98m4>MPck};0ln; z0HbYrJ>I0j#OHcG6!k&x9(s3ksCv2)AfZlJr|yeiubSMvej2Z%*LO6NDd3-ptVuF( z2pIKa_jtkj(!ostKmY`*9{{0xV2uPM0w8hCa-u>0r7KzC6(8sbJa#}JP{E{eX6`t+ z)Y;($K4`;a-MBmUPnth<7~hh;c$iOo^1#)PdONT?;42xLiF@s?3K|_}7e{3F)o!yT z3*_EHh`Jb;G0`^tA{|@a+R7}NG#NyLJb|tjYb=0FJF1KpaCXW<9|D)rZucNdDF6-m z<=Y%cq%8Do8Yfo8zrK&oEfc)ZF38r0xox<5{nh}KS9AFQ4Euln?5_g#9#>yyOS46Y z{M^$m!05YyP~=;^(~-aw<)y<}-?2R9A6`2$Y2+3FK*}-=aOi+X?fqmHz1;5HyZ3DK z55N6p^Yc$X+1z>gU=Hj&*`yu7Y=_6~`^&`y-~0UfU%K8o0i|6L2Ecf*_hI>D#U3qx z^Y;#H49ro-O=9oO;t4lHN9PdC)Ab1~U{eLjE;mQjce)+0{`xh2(5{9O27=NVCe-({ z6ux)AT{_jxH{buTxzHh{;b-EEr8fb0vI6gXWq?yiGD3Q?E+e$cZ-PuHM5OA5cS9W1rNwvdAO8f{eP@g?< zoArVDB(DD#=SBxu0&Wn_bn{&Bt znuSO0Dt_OnZpU3m0P#Dg)(!^!l|FV{zEpmF5nY0Ix}L5XsRWv#U+-ScUzzUURuDtkT332E$&#fqo3r(mD1T38MQ$ch}tOKhHsaC2;8q- zXp{8l`9NX|1s?|d|Cs;&Z@<5_`QQKjf2h85tr1xK!G{+&|MegJ%XI#s&H0aimL+CB zooip})*QuR@d7;QwRB!!l=jmk#0tJP{X+-kywXa73-06#vy z`2LLfZntQ0EP(DpH12PA!SJRy#wU`uh%<%+?L65NB7Z+{3oz~+4zkK<0f-NT12~-z z;D40f_^Utvlg&T;;}^-$naxKZf81E`ZaUM#c4vchHo*DZJUPtr|80T5LAn<&e6Tr} zoc^c(`aiD^Z6{e^?3HUb+Wqq=J=1}W+Y9gYW%8Yclb)MH(Z@BrR^RV)(H;3!qPZ!RjRe{OsjB$H!r1U6|lTJi8uE-x&v_ zzxiO+cW2u%e5XIj`JVc%YmGHdo&9ig@xyim#4B}WoI@A<{jdIJ7RjErBkSsA2Pl4- z?rn!tyL#^3yOmG#Gtt??VX{FMW+az9yXVfuBI(don~W9z@?(mU2919Bj+D#Zs z>x&wXzIv9Pxm^(B;1(`3!g00?%jEm%8QdM9KKXf;u?*eX(m~e0M)cB1PBNmiNOSSS zK;C567{gf)7S_iVV149?`W5vok~mzHnPYwY(I@SWJ+S#IowIaz^OpLwH#ao?KK}Hxad&sW@sIH-nKFt%+ldakJ(H2i{&dCTz;?3u z<3qcMIy`pYiD~PMU@T_bD=j|Q=~1o<9r|jq#+c=^fBr`^-m`G6o{SC1l)8%P(VGsb ze>VBRJ3BA*$MikE8tOQ>9lS6t-CYZ{YzE)?W!O;wDJ{gj~eVonTeg ztORS+2yhjDy0y}mr^35-UdQ1cH;I|lCZ!n|4e+&siZ<_8igh$Y^EUmJ`lLB&myaR) zfV^r^OBa92x6+aM0$mD;E;LzU{LKE4&2C zXUZFeCbYdjveEYqxT}yr@gT4v3{*MFKJkb&h_EWd{gihe z$_=lh%=iAL4T#f&DeKU*(wjUgi-iuf8Y~z%PQCfQjWdfHQZ8NjOB@I|>_p47b>D^v zPbOcKN!Nv@sc(k4hlJC+ot22pPaWb`q;|e zw9~>&n-v-{6Iyuk=~Hai&ikIs3rwiM+x|fvKWcFLfByZK)BggU?l(v>_yKYr%7WzS z^MjkOFJH|H;8YXe!}*kbzPWuT|Kbj{l76|EXqZG9h`1KWLK7pK1!VwHgVPg7v%U_j zU>SJz#+?RK`AN;!=Z!n}10LRKk`|yEpZQT_xqC8j<3yH#+yx+ctg9Q8vbHsl-`xc1 zS@Zx8Gc=fk^jL@f6yVP?-r#p8HVxhpvu9j%)PADX*4CLIMcc@F@wpFa{M7`jiE|km zNR5BMXaW44-Ly3xK=X7BcugWKgvdihu&g=~&o+$%fi{2{17mt)PyAonp~-dM)i6E* z{xxv&p_5V1yKBBfC?~1ekR9< z8_-H~PmbdkkOSCUAn)F`#d~HlJI_jA8D2ccyB1DL3s7@0z{57yq$>IW-PODLj~9Di z0tijovJeh10P+K9wLJ?UCYKKz(EBs6pM?r~VfrN+xv5?gJAT70GO-2%NFOM@+sThZ zuxxjB=${5_qG&rRerwmtiO0390a(yaj=sC}!+`Q82I|wo5m4Gf0l?^N)}gm=1ElP3 z*f(I_vV3A$;}A@TRxbbKfK>9v67tZ&cP0(b?j$3(Z{Hhm1t0NXO}go(EMAAU_+6PC ztO$gWj)^Hgyqdbs9Y^u>#RIffCRX#ik0x`K%b`s8i$*%iMV`ndKgEFLH42LPx)9V^f(Oxe^Q`L_5&UQ8;1wE*fv@rgW+n~vyA0E$~%2K1Mg{%Rf;Z`w)U zP2_Cz9okF(UR#+Do_n`8ZiK+fZpp^z47rz{gZ)ASrWCl8;7xgepz`L0V`DcIdi_PEv7%(M)CJy0Ek0WSuktU>NgxH z@MEfHb1C4>f{z6?KHINk$;x7U_vrKbp7h$w?gvz=KBfHr#f$X=xv4mLcIg*7LO&Gk zyXsG#u@J9b?&S`~#9w*kP&2Ph867k}^xZN@t+cP+jsm-poPX}7GkiIz5;cJ&RKmXZhfij2Y(8@Dy&F3$gi#LMzUT#N#&lN3qG5MgS zgUZ);vUZ~#Feh@00Qllw=JVeRK#z8`(5=7}?R0iq6ow^njGg8(eNti68(|S%f4Z}{ z27r0`@WohsKaPh$EMOcfWNu#6i?&~OGbYrorDc&;`{}rh`jZC(@O=LH?{B^hXt`>U zv@-ni56&mM=)YBaI=1=x+pk9U#?3%L-9Ti~>prKi(YKwK;p?ux?3c&hK1{v2|MBz&7VB@`)&1vaq*086e|1bZ;|6SY`#*<5Y*LPP1 zBjd&ikvDBZ->LslsoIl8cWuR@mw7ThMV7S#GB9%%^hU?HNhyx^c4HVROn)DLYTW}s zzS`Y5s^n+HTb|-ehYP83rB=>}nNddk(C-~)>8?D{cDKa=*6|Mh0}!&vK9tq*%i6s0 z5ZYNHkBh#(v(UYn+!!|Cl=W^t??Dr^E+fJV9!$RgA2cLF-JUZ7HxpLG$Rha6o z@m+F#Kb>~{hi^A0JH%D}lGn5O)Yiw?xq)X-P((vF^>5%^)pRApI2Rj>p`xguQj~_bo#3Flrd9wDVam=Yx=hAg&(}B6uD}O+( zLr(EU9nWr+cxr5||JQFj@OW?IMe=8CsUN4qPi9zxn7em6L!&(2?|^3Gxy_qX_32k< zp>cztzGUd?bgl!BjhFQw&+QIOM(JESbM#B&v=@~l zXKG?X3l-Kvt zS;6`~yy%&H=CwEL9zDZ283^jTYj}bBNloxL<3j?%O0PWP@$gN&DN7$!^xBr+v#v?X zO;MG1Dg7-Eq~)4=`rYk37T#zhg)v}wN)myKhA8vO52DeMVp6w+O+J2x=$J1Z`L4YC z-mBq7;k)ghck7w&?x%b;{ITBMrim`)nK;{ttGI7@>!s1hWPryqDv}FL1Yzz?V+^Em zb;*x>lx~2)-VP1L*~VDW`o9D4b@#hx|0K}Jn^a3+$7)uw9+*3^G06%Y97#coKdcBXwBh(TYVkcB7(iP~Iw0Hdt zA@i-<-t_nXv-tI>pOH~@C%tJ8Ju7;{qyErd8Y67NcR%m8$)l$@9!=iT@MxR24hmf> z&H_r7JCh>JRd{?)@65u{@F}V!I9YeI%|jwHNU1(8%m{+E5lNY2LHVxs)RzgMeGk=r z-QHJc15|mA@17UkA7I=Ku6eT(L5Rmow|Tto1r+XgAt#N}UkgbSZM`ioKNEfeNf1B! zz#l*HaYyt@HGj4Y}1;z(qqt~MkeZii4YdG>lTL7!}e?}4xGTx(&pwB?N! zb?w@rbNUkx zOyF!0+-vg2;x$lgU-@w_!K%e}JXYp_`o@$MCGfDx_xLAm0K@9gGdh$?*i(2`N`Ntg zq8D^WJX+Q^@j~HokpAm3kO!s-|GY^pU=FJX=-JKEHNb59%vN-RD}y|Mk)(#lF=y|9 zs7?Nx6pTgg$a8#%m&=cL6OSr+zzuB*@8iXQO{KFyr3ULJu<=(spvnLx4M+=$>X}fQ z%ivuSzS>f068`AXod)%RX@#*iJ3fV*@C*Q3CM=gr zX8uMGjePCSN~_6E{!&?@z7Cu+c%%nR{&1L2YXB{4O$W8vE~?MUEA-}GZbD`YeFIt( z0E7Pl%NsXMtAo+3yP`uMXn7>pq`Pvwui%Su@PTK&O z0Z|N=0VbZYqA@Y4cwO3<3CAk?xp&DE-<5|0gzvQo6rkF~&NkMU$rE0qV-2h)dSxaX z9fr!{&qU`9f7`8v^qqArnHYMSNRdaAIh-Uz4z(iFXmdyzE5}Cx2B#xV-B}z^e`P%j z?J9L9bJ3`-`W-7z+ele{0;_=Qz&*OrBvhMaZO(fB!>p0`YPWz4P#{x2m0?-zG~v#t zZmlpNvPF~9|1o(vbjWxfmc$y3)o|8p0gPM&=-*iHu8FF;Zz589tbFi}74gkmHwNG^ zX`b~HAUO5B;lsXvh zbrwG1%wy%RePN=0ixn*WXpxHLBv&maZs^znL0R3JWRgoj3R-|{tfVMaZH=p?BlKYL zfEQ#!xd%)EV3j#HI_kjIZ-?2H(3G2Q(vE8_m94nxITl)E@nv$LcjY#PPQ(}e0P$w! zPj`u<{xQbH@@aB1R`aE4a{f9XX5=g#!J6%E6MnJ`9Hw{V|9$Oh4%aL^Ky|D%F7xCOF>Y^2cP4&KdU)g)#9( z_uUR81g3rb{s#lD+)L*6@4q&{yz-sMg0G?{Lu>231L}^&^Z)AK{Hw7*ev;1BXB>*| zCoQy`tPeOAc&Gk>b4g>sTR=h9bjp1qdVut-md^${R*~bMRpoOnKsQ{up1ui~|1`_r zLkHSEUAgpqCv)dQw(aovi~IoJx|OBtyDW$IWl?@^7S&i|pKro{5b$34@C^<0g7XH3 zH}Rey=i6E512qn(ukjfWO@`0Zrx}ZJ4={SVav1KZoC%o`XB(uVT?encM3 z7PEM30fvhWdg0>5i&M@Y0!Q>;Dx_UVTZN;@#qJv zFH25~k&Pje{im!hUjY2=UTM*xaCTwMIE<{9ewhv34tbk#P|vhS{Sj9}cUu@^JV@@i zLU5kK^pn+%zEK@(5BmGnuVe&ZyG^>=t{wF>3z>nSfB2vL@#e4o;?Jhu)wMWp+MT6u z`^D$KH?CMd`{ZZS2Ammj>APe4vF7;WilF3psC#!vB$?36^bKiT~4U;TOF@iM>Ufw%e@{by~}$l+Ah|92YWD4Vu( zCq8RWhXPqyBC9WL_`UNV^vt32>SXO&BZtPq`^w7#m$BwLs9pPRY}4JyjPR(w-$Bp@qv)-<50JC(C{(z3QIKx%S7ys}#L(@O|@L$)@ z*g=zSNzJgu$GJfS!z1BQ<`9wAfrTfkXo0vVUYIdJ9D@IOK~@1Fm#(A*(buEy!)f~9%2KRU{xr~0jg zWq6`&`i)(?jPZf1UTUfD}qp$xD|L%Wl%+LqvDo}X6B+-poe#$3htqM|qxNQQVw`9(kiNG2!C2dz!I=m0*!xa> zW@V+zEheCoOe;6K%@C@MT8WxqNvc}9U(D5nYYjA&XAv$e&~sf#ta0>w063FDL4+yk z{zYJ&pwX`H5}LTu_qF^)s6aj^-#$&oJXUa@h+i~lRMHx9{4Z}^-J%(dB24bx`%$Tj zhEb8l5mu7rH%ZB$km!mr-YvTNEUlFW<=P8r4oI`&hotmg{7Ip&F%`kX9=!10)w2oa zoiay*GWDxG20K$mLCa%+oq6X$*GgpFPf~bQCOh;AfcK@7I_y@UXdb?bR#+o4^Ubna zG!K^wCrjm*VZ!0FOSy+VrLp{SdB4I=OnIQ8d|mU^jp#jEWl%Q+avL^9ipN8Daj_tY zoOD)l(Vx_oBgy)Y0o7c!4czk{R|;X;Ct7ziR)i^;^#KDkD&wM6G;5- zg`1Zv&hV-C^KCL*_W;*Jnf|RF#(Y^HMmZsiMUj3gPDqC{;m^QI%6m&7sIdy>>tL&3P&qlAU z2nEEW0*}CM`C0}eFFljjynb8mQc(xf$}11O(f^@WTJoH*V$zGN9FvZJ!jMDlcT&gO zew90(8NA}1hmsloEse6uR!{0^n{Q?K)8Bl# zd7eC<$$A8+t;8%5xw3iGBn0?ikYStnY~kj{q{)*(_5cJ8fLO`O%OG2>CV~$7Je1$z zU9Yn^DU5*^3j~v~S&iS@XsSp@n&bfw%A!AYjz7^CFIi$r6R>z9H!4pX*gS1OHI{kG zULNu?kQvtzeVWh$+#g4`f!4ey^OX_rS+PFLU!4gZOE?zud;p%xl6?T^TrP++a!|Z+ zSJFTpbGeQ&K$IWwI08mS`D~x?pEtiGu0hk*(v8}iaWxHs8(6OyJqqJLa7bk7Ql%m7lu-MYl7doK=aeOAi=6bC68 zV7>y11$G#Vu{d1qSDYqW@q7XL@><)DRS8Ql19Lu+O(0l;#DE#3TlitkY!H1V>vgig zN)&jm#iDoo7+2R0UNYf*nx+5i6UG(0HXnU(vG3mm$YdR~ii4l0Co-_Yq|2Ws?uSvoBq{Hhd?S4y-g7GEuV) z^+l5$6UYG#q6Z-0useWlb|ICgbdoJ)8}L6Gtkm$QYH{dTlc}n$39myiqoFV8G%)9H zo2;_(F%}Y!9`C3gvJ`IcW8K)>;M2vhyY&t zdtfVTbEE;FxOaFbU!AP3fUAHgvgLrxD>rU#4nN^HoKJXlvh_FLWD{^Y7L-j&$pK*X z$U*L00@okh?Olgf-n|*{p4D2wv8)fRgx*#_uP}-?J#DbCbGO}2HW5rauEd1#MKqCNxMLNH=04HshoNGU151?N5 zJ=7Par;;HPy1gwbS}Ylh;wIT>G`8SI2;j$U&FGDn6~0zT^-rEB8; zqyxLOPdWf^xIZ}>ZTu6Hp%c|Ro?4JFK6zFfn8k-=T-yBLl93029wx28nF(J#s(}OP z4ZK?0tO0>s12_P%!zHu8x>Q`2JW1TTsIi5*Bm&b)ahp zv9(C~G#NH|y%>me=l1Pnhd#03Szi>7$d2;x@6NZly1M(k!$EULqJ6Q921t`LR?M+t zlEMpnA@=RsK`CM|zsvD)g0H)I6$9vI=(y9DU82`&7I= z5DhHpPMvH)uyT?u3nUI*{P6t?vmkZ#W)^nIx}v^+?#vvn_AKR0Z;%aE?oaLoFvStN z&7uX%*$O)#KV5Gj?|#67!yy&uL^RG~PvdHq`xXMptHW|FbhxiY4A=ey27Iv-$NaB?Pb6IecrynkVKm9Ne%c2|*NxgCNpneb8(%+|e>PCMq zfIi7$vGD=W;Y{*y>-LRVq@_nJ%CP{oMgMs+^7YqW&jR9!b`(h$kNHIZDF4F_9Q`3T z2jo(l(r0K_`d^0#epOtS%@@vpFaU^l0+id88=6=7p*@_=w5VzP^Q1Tfg2p3&>+8k< z2P^x&WbXwl)AAnPRTe(9FI@|yOkU`AhvDsO$I_GNw-`vzSiqnY`G;QZE?G+^_#;1; z+X8KZJ1c2g7&w!aC)$mf=(JBi{VewpA8f9E`$g{OPHaB^{O9fX_-ganC!f~-9&K)2 z`XPV9?d;I+#RocW4xp`WUnGP3Th)2>G=Q>2fIt7$uV;70y&nU_%IoQ~7W|{-;>8PD zs(-ur=l{e17}(tb)s=^PB%0NN?a=WD9}k@gOzkKzy-)3m;sS*3cgS5;J3BOzIsMtI zTxj%f^x(bR{+w$^7NGu6?U|*pdDER1p&s44vH9xDe;gnl=*ay}m9=^HhyQ%@SGiVs z&|!=g?*aP8Ck{si_UfC=C9Yk)Ix=WXVW-Qp>JZSS59j)Ur8&^icyWJjD=OgdNt>84 zSM)#az({)90mQ6<>C4eRgB^6Mp;uN55#u7SvOC~Wz0sp|7;xWM)SP^G`IGmtPA?4I zs?S+Fnlw=(@#1X)>XS9JSrnU{e`WQXfBc8q+#e@bth58`(m!@fy-5fC@Lfg%#wp2& zMF>Xt?V{V9o=9NYHxiffH#t+oMeNZ`thgSIMo)ovEa_d`1;hlXQnI`8R!8zmn2qkHwJee(RwEN!zWgS) zGZuF~{PY)d&H&K-K>+=c+6&;6EL-^Fii9h+6D`h_SZSo+GqBiqE^G33n_Mfbvsql+ zqUew7D9dku?tea;T|Gc%i{|ck*PG-CY(}+60w;IFMyF*D4k=AJlFx&bS1izOR)$W3{^h-uQ9u!iCMv zYgb3tS->Jk(kA0XSRb&j@%QzNP*{LlC_#^%2^KLeY@+pcR^Iy8^XK2+TyK%{aJw|^ zaxsp196!mx$#k?uk$rpDBBwD1zMgGIlCc{D24@Lyk@bFk1lK^q+sR|m;>@XcXAWn4 zs-0RazgIv1B%PyO-0NUZ#yRA7Tp(2nWq8pJsdrkCAbL-t$zjO1>T@k(;n98z$(3u> zM`P{u^qCg1@kyKh=)+GNhgbm2tyF!#c0>P}b66bFhWX9k&q$@Q_Jyb=_dG z7wdN?55VkXhZl>+;dRf|?Y0P$2(8!X#CQNfX0Xj7hU++tf<7fcU-`CD6Li zq%6}Ogc*u{_fNKHY~Ul{+k6C#hDDnoi@qr$e#@DhR+$^8 zw#r8?g09P*G9hE}W7VY-0xt5MF(o<7D{YXOD`q=VlWVUjyh1!+z&|2szvqrZq_q?4(SS}+g zic3yPbIQ5uZRA`0e((g1ZyG=#0Q-dqSqd&ZBnfrpE}KI#{1Qm;vFV1_gim z+wTHD0&pUhzvSl)z-;T|0^&{s9+MM*&cOz>06zmd{zn1Vz(DfCBhx;*Arw*`I&!?0_#O;s!z{K`i&wg-M0O zgU+44I1@aFZ#46 zgYJ>T>KW*MuQC`Q15L;BgSeQ`NBrwn-B#MHigM=>7#_?pmZb6lE*o&N_9O?kWs_|a zZj(Z-^3WcZA*{nqesN^+v614ExH2GNE)xasO+pT_kV;><=eB}%9^QQM^|vztCznUE z=w#u1;l1}7kalQhvb6UQi?L*_dSzXBCr}v}$I97cSq^ub2%XOA4VZG;1hMbxVyZkE zpT2q$*peHDGpEPm2$;f(nnkk-DIm|M1p{)zza?-KXuNi3RhG&{x6zpcphv&&uLH4n zN1F-BEMi0(Fa$8{Fvz`oS(!GGPWAyN>Wu$*19Ln*p5=@74`k(E_(m2#r}WRU&;VZl zVA3O=$;$$=O;{dJeB+p*IXwg_IncrdV_1AeE}|K&$O!}0P=AQ7h^e=^`AU}Xxw%@P9D@D0D~Nn zYp#g^M=$rkNanJTkLM=?b^x6Nkm}F6w7a3V_$Hit_a;A0TBNtxRk^EEGER>CkcW{m z3kUIooB>mQy59tstkk}|H=!N*GjS=8=UG}8J-J~~*TPU>9Tm&pHkTWZDjVJiWT5~* z_{g?+W8!}*uvPg0J#^sc@pKVO-bfqWAP(R`d4S#I4Dh!WBcii91#Sa}wK=+Ce~W|~ ztoocu*P8`|044bu2R74rXm#iqz+HQYZH82HT_;aesEt>V8CYH(v6tHN*8jHoAt@iR5z>Lm_zZQ}#XgC=1mw)uj z&A)mqy6Pw0P2Z4}%T4G%`}Ak=*bX5sbee1z-U6yRw+O}M zgZBGlK=qFgpQU$?HxUm+C=7WP50HKL{-q|pZEbJvaw>rJP8QqBiq9wC>7c<1udaZ# zJ}rK}dEI!cvhYLu(FY%ndy-;I87&U!3+Nz+bJ?{5xM%(RFr7fZu_~t%fOPuh+gTZ& zPY1o}nMGxb4r_rgy&Aasptj*VYei!)-~&E5Owl6l(P&YB;weAgU$}5C_W;*t(N?`^ z7XS%;mfaa@lTNUs>F}W^oA)lfABY-QY60o>zUreLHl<6B)RkR17K?uhu(k8VuBm(K zl@6|ce#&Cn$V{D_Jndk?*PHLY|7Oa5F`$~2C!Vk%2jTJm5)eBm$nlTSX*_079PJ!``I(boZ-r`~DAeQ46tUbI7fs)HHDC37s}Eqp$2 z_rl#=S^#2Mm0xMW;L>*iyqS^Xqq;UvcH5K zt2K-KX9Mh2*Ma>Ow%c7)AN}zM7c2Ya&7DBZcTcD5E8D(wD8KP@uylQ!1%cHz>PPCg zS?5~p7+}2gUc}$Kxm^GdK4@XbA*DYB+&@eHZr!>yW5MsTB4-eBFRM{?ODEDZ#%|AZ z?W3|Ro@m#6=Z~JR{jjd3bJgSX>gLhI>N*-PzW=k43G(cmC}5m=Qs;m}V6@#=&jaM> zSz{SvTR7ToUMzBPQ|_KNTO8&@w)pFg@J9eehCi$TeP z`ruj#Z!DPekA5gi;qirDx^#+N70wl)KZK(*4(Ikmne?#$RwbIbuQt^^5S%Y=;|j8E zA^zQW-sR7+U~27C=jN;#Zpox0 z$0O--V=tG(VfD>q!WjJ9FMk_<0&3s;Fo5@BJ5O$Gu3f$~^8{m@iZF)7FJn+>-fGiZdTdGeq5WJs9(l=XIVJsz&K>y)hBS11@z|T z!sSe;wr`;<5bER+o z6QefwU))ZAe%bDj8$pW`g8j$idEs{L;>IVR=NSU5 z9R*ppr@I;07z;g0w^~%ENB8T`lTBj=^~nXOwxec~n(g3r#MeK9q8YhFBcrQf6`M{X zv%@4{s!kWfde6<4}CL4%#K0xsebY5}?vHm1J?ntL|jSdR_Xc6hXilm5DfO$e4> zj9sY}x3pZoFJMf5(y2TXNuEOBi6Ndzj!L1lS45Mbc|KuKo51hXO9@MNm02n)=LMD& zfAU9@fW9ja#aD~~p+ysV(Ybgm>0TFZr6%5_-}3?Q@Jt|S`s^BjsdOhsUQ!IN07JmGi)nHZa?#L>)vSc@aPek+Dcq#%Ik}Zi z|0igl=$@2Ht@l$!!Z)a<{P01YDc`gg$;h~?A;HAisV8aAn#kMyireqViTLvspU@$J z(iOHZZvi{sl`$oXKN47B#Y1pcyvEa=a6|K! z$MW1Mr2LAx3SB%aBwZtxXhf?{508dEVS6q<{uw;UqxUP#?k7QsxIxR33E+k&LyAxD zec3)3H~7jY?S7}8%YXEZnEfuEp;=h~1bvcQPsGug?a(s$OQ*Ne896PGJSOZ`UJCc7 z4rPHr>IKc}qx-FV%WuiKxZ1k>7hlSu@OaYi)Lq}F?mf5Fvh-Z&SC^BwuR(3Hn*pbH z+ZU5|U!^&LC*r)9>(af#Zj&7OjiP8)qTRz!A=k612kG=X&-^dxqKzC%eX{XFC%ZO?U&_^2z6*dYjz|yKLi@>MA!%$&T<#R(9@J`ko^NNt3^<1u2!O z-}J@EULnwrj?tm$nQ)VG|L}I(<|Z%cD?owNqdG3trIXZPKilx`x0O+V>Z~}}KUc(! zZl`x*fU*IpzPo&H^UckN1CFyoITFas@`)c`lb><1P-ut60FP05Fu=lA&~sTAJn56q z=9?&sSZscn6dLnbd&+m_g^r zM(+)@(Z0vTu6J15&^%SNT$p&rTJlyFG3s|Lc3v%691AbNY=b!TxD4W?+&<bgJLHa8&g-^A8fKLpP9@%d!})OaTsurFG#L=4Cu+f37H8k%N8Jx zICudlwR>W$&BjexliTD>i&XaKxes_2D5Sl-5ieP{5lh_!$&L^U8?1Z)pq?3@0PFCOtdV~-E4zcv zSo7LquboPFEQ{4^6Pf4nb^#;lirTe_xrvrO+Jc<6Y-d49P92kB^`;UXPUXFJ3t(k+ zr>STQCcJn>_U;EBj(&-k=(WIS0;zohW2&J6CkL?Hvkn?Gfda&PH=tg$qd|Uj7(mP* z%h=1pXEhLgK+<>>sP?$SLibfKd;fKx>oEu7BTO4t&vYNXAl~>CPc{KB z+W5GQY3#6Evt-p<+jdCw50@@&KKtnX>L;u579Cm4tJjgE%5eN-i!g!d7FpHh^wH6% zPSoct7FM_VNBpv@M_*-B;cNkI%pqf}{#ajA&hkEU>clKUif7UDeiLUdwxmfXqUYL; z+wtgei*xTzTe+0g+lL*r$n^_fZMaEhIe*SbNZ7;<+rm@prc62ma_!fh1r2=GWZ=zOtkb*l;cYa1qc0 z9R1{zPpAC%TIjlU`{u|#9jI@&!{Uc4S7%$jGHd^L?%bHgKLEw|m%gd2=LSRo$_<#4 z?vIuE?0&qtQN3DhJAQNvFhuL8AAdSv27U3v6R$+L40xcjoWzFfMbpR2bIFMc@uvMcCdyC&?S0le*R_u8xU zA70Wqd-t?RRo!bh4(UvqD2z!D4crCMKXwq@=%Cw&eH#6dtpn*ZI-LOlkmXv3Nm~S< zKR#}uz3{jz_1nF5g|f%z=5)|2SPPeHa5^ z_}#1|?UVo}?hbgPumE0mRp|e@O?nhi{osLvl-m^)u=8%f*shZuVZkcXezlziW#x2s(+4_I|p1%5v-+r~Zl|DB1ut3#_Bpo-M z!HiYtTjei<-mJdC!u{X;{a+=^fpiD?LT@3lHnH?Y3lp_Re=NFZ!B9O{C-g9|)nbmM zjLR&rK7YzhPzD_J%@+LBzxetv{*M!e(g$)T-1FLEm_DIHKZWn2)96CuCHMM^qwVZ? zRGGw~`{*Q#?E8B+2UnIKqV^wXv8r;iDlV&ud$+dsW3u$PMTM(Zz7swDz(Ty;aLK`q z8(ID)*LQATAF!Xn02ewIe)xf2eW@{Aoe6A5Z2=U@%jrqJ>Kpkt}O=hs2-o!rnulR#&}-epq~A$-GY%{8(MmV+|j$~ znJm)r*`k4d&G=C}HNF)_pJ`!r`k300y1M-RH=Ao$zR#cgiS*achu$B)?EuI4a`xN> z?3@M5S=>lR=-bREUppl9&dqpzWb^Ys_`{JEiv`@s%npg#!l{$zljrnfV5@P8vmIWy zWA9`-^HyU?eapMGQ;X(y95JfF8;V5Vpp3I9RJpHT`(gC$haZ350&-=p92Ob4pPBK! zK0iJJ`^nz<_dcG!k&MhZB)&XrY^;tf+8s+r+}V3;blLUJ$2fcbT=I3I#gkklMT7EM zkTlNJzDGY6&(46e@8#=0P@Un41p#~m%$ej#B9Xn1R)Sr&ALwezZ+yOGg_~F~Y^wPL}>sor>Sr_$}782fhx3n)ec5O#) zeICAi_Os8Yu734*f3>;xef{j|{K;202aYxtc~UzxMr};vGM?j3Qu{Q1(N668+FhGb zzTFu&kT?06ck0_|dj?B0*Qi|>E72!(KI0_vQ-WI=NHF5NS zv3q|i9mh>pcYx&us(HG7KI*aHGf`j4NM4iH-1Mx@UY>-$M7t$WJ@$Q+)rz-(oE39G zi-dg&QG9^Z(B1t&E)s}YD`6SAepdz{BqqsdNb}nLJnc>Im2Ut$N)#=El+U``(yS!H z&F4@qP{Gl$u3pdk`5r&FDXq_hn~N3|xPZ~p?Gi>vWts0Hm-q0(&t3c!(A#OeLR3h??2Y9n& zOi6ZRIjTmO3N}FtJNfRUwP;sQ|}U-U})RcukU2i zwFAcY?6rYYY5lG(c-Wu3lWq##qh2MSwaIDweVeM^q4S^cU;-=Cycnci;p`+DgDCuQxqkbq7-K( zqYmPMxLr$zRyhR}X1-Cfg+bxa*S%K*{0^<%OJeGGrG_5@2{{0|2>oqKBeQiceqqV2 zYh=ggwoT8|$*h#-Q=0DmRmYPk8(VSFxqYwbMHt|@v{#>iU*7jho@m?Ffrrx%)gH#x z%p|q=H4)Nrt4zHZo++nDh+2G^bo;gf_O>{EiTmY5M^5JAY=Z;@Pnwf~WVR{n$m^uF zWJ42ca!J|pKInht~&wLb^%#uKLJqkS9bucjefbFfNRh=oKLG+Ub z=msqY*TAyZ?E-PHs3uP{>58sXZ4b9apVg?y21>30Zh9}QNfHUh!*O}gq!3L8^?)6N z(6K@ppl`GJ=;DP2Pq~$7uypbKdkvyb&(`(7{`^x_#&NOfpPHZN)agR+9#k zEb-*2J)pdIMO4gwp{x3J4{ae+oADLWG#K=MmFYoc*!j{c9{l8j01axcOH z?oIua!aX+mj8|M3?0T8}Cbn}RW0Nvl?T5BFO$_<*HA%jC^Y-S-mCNH>5U6l2t2Ptg zd$}nv0ptG~SY&%Q`D6KPFbl}TPy8bjB^e*q0svW2Upu^I#W*_{;^Xe)EVfywHrXrg z-u+pk2RzJVIoakbnIC8A1A)i{xqcbYJ_|d=13^7aJ{Fq zefRd=nOrFkE4JgEf&|1p5pck&XZHcJ7qA;2)E58<0LDGa;lL7ySpjqRy;=)H+#I>o zopuJ8|IKeZ^stH8@Bh3>L(l!0xZpKQa|dvll-wRm;pKLN3kO#3%L3D&zllzDPX}mI zD-W)avWyD+0$iO+A6U@h>sdQv?a3-wJ7qO~__f270I$hSR(fhg8>GV>>ZQzfiu{;0 z<>+FSN3WHuNv-yQZkKvn+sqgEVDbu(pPe1a9iY^tR{I$Lt4&u{nP~0f+e1Cl>$`Csde%U09@{K=r{QuYY~)8`hL1L>d-y_ltm3XlJ)A7 zEX7A(;XoPTudUGP&6aa=`X_(+w@qG;51+IjR$X|m%!bm3n!E#7fIiZ9AfvM@EK1oS zu65X*`566~ z{IKQ*QnO?`ed<(;S}klPd)#4E+yTE1*FOMQZ{2Ilc~*B85cH`>Z7qtjFUK9rlV z#-I9R`dpvG0+C+%DR5cc+HP+Pxdm)eO;2A9pwO3n`@;_dZt24;GH4%$OFr!uv(P$= z8q?;Ik-}J@q5JU>Xaw9~xp%rn7na2Mcr4ilUZ03>|NbxjJPYRchIbbHrH)Ly`>NLwexDpcXBvmBo?+7#E-NSEO`y!Q5VJ`&YgJgy-#X$ zdp5uP7k{*Qx5ERUw43gWUmu8nm&Xd!LZrn3hp3YI1A*Db49<_ROHNsL+~qVfm~tHr zMC7h2#WkQ1I*p|OnB>ZQ@OS`_T^?MFISiTKa_+Hi-Mv3n;Q+ojAHUfAZ~yM!#n*OQ zWHkxM{||rh7n}d?zy0qwd&5Sax7&aMpwswpwmZq8kQPbqw}@dx1lZPBbC(9#w4)0M zeJkKxn*f0S@(+G7b^H6D|K8?r{_bx(pXS!)SHJpo{B8`D{=A;ta9Lmx_~)N~JkRy# zb|m7xTr#g6;LDy@&Ih)BHjK%5E^k!+jD9pV(*Q@7tF&1vdW6?0KG|HdOp>LP| zu3$6BFTe7b{;qoBGylyUz^0U z-(|)B{pbnG09gH--~8R=J7)#dpI*55@zja&>ZA0%@kBy9Z4;2FFXihQc)c%qJMhfT z!1_YvRSt#j(q9-m3(ul<7PGY~{Oz#Jci;UWUL>#et;bjg_ip-}c&H2jcZVlhxUviC zN%{_0$ub+Dt{Kt250h8@&*7u(CMoTHCqMkhM&t0|dHt}RNduN^>*X}LfWE5rZcrSXVY-S2$ZJF#sHwamDhOVLG6#ux)?zI zIR5Q=(3u{!S1!J|P%;+KKI{@O29nM@^?&-EZ@&FHV}#oS;^U9LP+uD>8vC9a*Ffap z;)m&hbh-A#t(#o~+`TXWxOwx+#4&bp+3~q+cRIK_IoO!ucx5h0~iAzCX9 z;Y)MCk?s=$=?z%4VsHN!uXhVDlOVN;n$^e$#2A4sR}Ftl<4_^1Eu*(&Ox(Bh4T`hg z*a9~B?}Qlirm`wu>F!*HxWbJEVDG5$GTC|U)zC0x%wq}_?}YQQ@)^ZHfh*5E>z#02 zN+#tY0be9m8HbnK@=v)}V5v-uRTmm$>AdBIyjPtJttC2amt;RH@;2Y-Sb%N`x>t7L zr9bzQ@@&POH~-{2a;D`@Iiy*c3p;d889a(Uoy{aN35{qe9J<%@q9|`!z0G^Vmmc7+ zWJXDAd0YNZHtWsg=exYtHL1?6xWmX5VbLdTapmcKWy0$LZilYkDa!9ERj9>_Ma)VE zITPO3Z5q-gKYr^ird+~^x0C1kGJKeP{fkFLX7ZeeMey0L3#ALx-ad=x^J*(gt0eM~ z=Xy6;%2Vw5D8>$LQ#N@_^2395x8>nB8^SZPIRU*}vg4U=D~|#z6MBUsLn9ON^?mzX zc(-B+4_G0$ljKTw-o7ora1)`AK6hx|<~0DhdwFg1nRZfG9X|cA;&@-U@{_V)xVyqO(3P%c;%u@K%cFK1x2+XVA0(j`OEj{80)5mIa ztAm|hMsh+T#ch{Zb1zOmlg{Mp<<8qeD9@JO`80CbmS^&r_`(RR0X&bYlOA43uUG4zFPe7dRrR(k+e{{GVWyq; zIc%1u@_0TEs#83mzc8@4lh%areTAL2GUf3CzXXpS-=-|6>FUA6Sef-e8M+p+#p%Yc z@4fPNXGhX{xJ{se(w(0kZT|Pa{dFN4L^YYS>d(jXHvU&uF0ee}h zv5YiPXK8>K&jCeT6pUYPw zaES*z9Pk?`6t9EZawYI+^M1Zpuiv;<9h6Vyc;2J|h@`I0oIc$o{7Uhgv^O{fG#R93 zpw@u??9fF=vuffGp3WpA0GMl8N?#A?u-Nd+Klp>vvz3#pih#=q|EYmJ3o#d8U;ZXQ z0O*vZeYElMK5d|M4+Z!=&2rP^3)pfa{s7I%J`iCDk3PV@%Yq8NtABhMS4icL24FZ# zOcNErxrtiUT=}w2?%j_8ncNoL4aC#_dNmVdd5?8faq(of^9FFKN88Hxl%8EBCdsTX z0nh-Q2Ti;jh?L|_J<~I_?g93szpr>EdVBB3|H1>vxElcsMOC^ecQNr@SiI-=G!hC2 zbg1a%58x%IGl6M5)o)>IA6bub+rS!ZfZ)KJS*Ykcou}{PzjDVFk+!wKT{{8D)Y5b0 z$#ZoF@KrwiK%WV(dI$Q@F~i8AtgYwer|pv`@?~K}+=H_-XMr5F0^XBx%qFuY(8_DU zLiy-m=N{Y(xI)Ly0+%+zGWyWLlYwZroA3uV))pV<7nM+qONiR4^pyqpWwNb3vL-d@ zBd_Gyf)Bvm0h*5=bFTv^%8!0+3Sh;@F1fK#_vEJ*WRiQb!qQ9A8Vmj8ht-vX$5?6u zr%t`Y^$Wi28n77Pd8SG4m$eW40x;qA*WZ1&`Q`6@w)vC4_{*8tT9EwqhfA{~W_A{& zS5McLxkho`#jS6zj1E?Z-{nIZ0C}oEZ2*`9)W&;d+#C3)%rDv<0ED`pK0g+yGjqGz z@bpi`Gagb`Tpqlxp2_D;#B4HKorXD^Ff0o8?V!04hxE?`IY6d=3k# z=1~?K(XAi*F$+{a#PRc3fHi>mPS$-~ZD{)r>2xjyJ;}c`7cMO0`I8@Dw>;@Mf_SES z({3JDk0XQekFI;Y?^Qm6TNq4^EKsQLM|W;+KK|&#ks0m5ZXtTiV#)FRE`Rpf=d;s+ z>#QfW6FdNPbAfWTvms9BA_oAh{qa3~^l%I4Tg!Nd&Pi_$%}zJ$+ZE7Q-=)7BtNGf? z<;&maU-;7+J%IE{W!sz8{cpc*X?TqwpcK^Uc0+>^IGX!s5lV+kJTUC-u(RYpJ&;5 zr@ru1E=*2Oom;Th$6NHIpAKa8zQ4r*fSdmMc)QW~z}EgBKKgNUJ03rIvKF|`W;M>0 zm4z(sRP4Y(A3w{tuHBg3SS%%h-7H)0EZ`rYPwRgSlj>QS6K#lB(*~POtQ9<#f@Pb|k%OZ1teUtE27w0mk0G zmkW(_s6z-}q%XhCk2`Z9;fe`1X7tQ=)!8BV{y?zzC?91g5aT@IKj{4DM#i^enbIk$@1fJJ@oO8}Sp8{npc zVT~vB=kzOH1Lg6-7!lvc%}eRVI^>Nn>NvFS1COs>$!Mp=t~2M}PY<4M_u+RqTbhlX zviN@3g6fTA3VWPo`x^gfdv74aMAO|^v{xSfGJl>WZ-9?|FT7i7VImGF&5rDe15OCvfNSu zy^V{GcX~p7 zx!?O1iv$Z6kjPn0yF_y`Jb>}NQcyE9ouvVEYOxcDo*^{ z=CQ(SvXJ$^~VE_@B5Qz+e2>w0Q zdxOXO4f`LutE;Q4tE;Q4tH)5Ww)6+B*K9PSq=wXZPuK7=_!*;)MDyv}U=X|+Ok?C_ z)Hj(xi`ca~Vy+$xepaKFR0u=3^qK!P2)v{I;0D(z5sgYGjnc>N0e|-^`}*$L7*~|n zJ6^nH@C(I(5y#85u7CVmfGU5RZXiq>tpYqB*n>eJe9&@A>JmONV*loMnuI6rh%}Eq z-{4m|6V9;`Ouj!Z17Jg?x(8jIR5I{W6k604%9CphWJ5P;;>Lh)aUW+YSJp9-%Zi0jG@n_@bV9w5|w@6G1e#(-Q`ur zz)LtLH8%9>`Svq>;U*kQJN27<{`mj^__i(FPa!47-*D9yld^Q>tB$M2Jws3E^w=|y zrtKh%W>bF8=X)uc(x@1^tsl4DgCQN%Di8>$tQRu%aqmwbO~Ce2ruhbUb`>n5l{dP3 zUD_4*S~`jP9%6Jo74T5P+GbPHLAPX6&t8oz+~ga2xZb{>nr!M`MtJ!1-_NQeApM09 z{=xf5w@@!d?^gd&|E?!_^hyR8G$w2PaO8w{>OUofM0EDE@BzC|fTu2lvnRcuHR^~l*B!ja8?vpEO*nwnR<<_Z%u~r>mXvQ3aE5Uc+yi^{nNoERD>QO# zvd{3iJbBSrsvweQ-BD;v?2wZZ=MT;k(xHj8G?Yz7AQLr8He(+VpnQ`c3b)n&>-TuC zuGoJjgsXmeGvq-whS9v#80?yBg|B#Y(KR?vAEQUOXrJ)lDYrkR_08hjnuPOgCq7O7 zpzZIQw%M1w$zkzQYpYR~QW_cKfBm~(r|f2kXhQlZrT3jCe3ZJ+E`Kp~SJv==x<%i; z0iv*>3uA&w24lz=&eH>=*I#+%e17Tk>X3miMHWpM%#LOR;wcG#bvmB0m8XSmI_U}D9csR_{gp>mR;`KbxqS5|BlJFRR z7Je^w6=SnW9=IlPXw1`jcp)xE-wA0F4E-6up~s#kT_-c>@#N%@GZVyO-V3Q=0;3JC z-?|aZ3{QFO>}i|)$b*EIIM}CZn@iWOPRJ5{a=dseTRYurvS0y33~1i&r!(X*4m;44 z*R#cjBPsS@wP+wNx_)*i;~K*SgWLFD)Enhv>3A%<&qU zOz4_Aj`Sul_*~lvH4wpvzl8d@*CLtzeYU>GLkXXg2_{O1Y9sX%mZI$?895i8yMr(8 zEF&M8!>DB^gNd2~8FP41Wk(6e`lw0J_e}^hyfLWELP}}&9Lzg9TJn~%$TWvs<{9dP z|L?~q@R#0XQ=9TgnjJE9hKkdlwFh2d7+qmpn#?!0FzVoEwB_vxKOumof7iy0=je1G zk0-P_)T9_K8C1{!ohA-(Fc=$!{$OB*&(Vy(>S@BJP8OZCvE4olG1A?>dut}9>X1~~ z7`K;fba*ENgLf4(vSdOE#V^KX~`z&f6_Oevy$} z*qgANg_}1!)bmBK9iGU;-!6_Dx2}(|TwP8SpL}njHSqDlo;Re(HD{!{oCXW8ejLNATwR>CVduA@H|7z1NYX_d99j4e=#JDFGu8>Y+K&w5CN&5Jm zVoMv(@R)I1+w!n>_#1rjqg?{pYU0Z$6D{B!Z2|XbZL0lwM(V%h=kEH50|f8hy&c_7 z%-DU4;l6Rp#z0u;jH}QzpyS7r;ET`{qTo z#;c6erm8~&JYBy$T>t<4i%aq5uXld>(MLP~`9J(W)28pfe{mcIjH7t?b_ODczcDx) zCqDey&*QszDLfc-YQ(-D56q|aL3N}LS*X^xo=3|XZu%-C^xyo~|FpBK@$${LE{-B{ zphc2B;q%~ryA6^rjL=_ZjQsO|@@G5$=fC}{-W{5dEyt4&caojw;@y`Sx<0@9_0Hw+ zcQ`C&IeY444h=1mR<4~T6dvK1w7rE!lG_;>_{y#au#7!duUy;t^oL6$bAR*kC-FJM z*m8WJFIr%;%VQSR6To(uSR6fjI>&{|c8(2MLbgy$@VEX(@3N4j4`Fb0Nn*~mGidb3 zHTSJe!_N+dSyYYY78EU~DJ&k+__7C-M`j0Vo=b!jai#Gcj7Jdr%7Cnw+=w_h&_}70|Z2eoy*n9g%e4R7Gx%Xz=X7ocRG}zYr zVEU1C5&D)z=0o)@eV;s8 z<6jG&jVb%$uR|@skSiQIw4=qVUAtOvNk-7qG7C?A4yDg)L%SO7qa#nTPhZiG_de{Chhm>q{tn*KULGx6{VQxcJ#89}n;7Tlh%0 z83&w_&%0jUZm}UguHKwJPP}nucDPs&ocToMlXuT&{AzqXa;Vtew;QjX4BefHVL^T7 zg~R!gI~>uTF_y0MZs9-labXq74YKBNy7~RPbnqNi4kUZp!EvWHWE9_j&_UA1mf9uV z>~6HS*uxM#vZ%iHxG}(@qj}Q7L(ZXC@v{$BkC)Hd!Ik0qwL>o_%#-sw$U!@2zsdpQ zW?_>~cjg0+Yq9Nx>Eb+s51%HB$%})f#rxWnqlraP=hJ+fqsysw#)v(xq7NJMjjv>u zaZgAfhI5BM7~@qyz&{R<;%s8KxKM7X+YU7!|^7~fz$st{J ztl`(QE`MxGFBg8>XNr8)e%f+?>XzZx{VFR=QmmIDOnu?d_s}o^+d6v&;Ci`e=Q@Gp z2aeMw+GF0IJhgUeA6FRx~MX|H2elE%n)uB z(YwhKNcCsj^8yXZS-SPSe#6VntEtNdH?Wi!&ePB0V{{xotzOe_hFGZ4^U|UFz*Lg! z&HI5kH$9kdg-NSECEk3{&R_#^ya=a&yPXR>=;6BOv4}=PwBV43s|{D`(oHezy<#Vv zk2+4_VGUe^&&cF0pXTX1{HFfW_2<9Ow*RjD^B;OWCl@P!+NSa<*nm99Zz};B+7HfJ z$|uCwEap0~(&=erEDg_Teo1)Z{ZqC@|VWT1o#C2OthFYl=* zCWQmLEp<((9;eJsn*~7re1lT5R+x$HbJXz58-h-9FU9nJlQ{I{UF3syJjD*A^z)LU z;2+6Tj`!t(l=%tW@u02y;fxf#7xXVNo;B0ewjJ|;ny@jYfHUuo+lAF&Qe^n%JPdBz=Y@?r9QJB9v}Pd{lAU@>DETX1g^R=qk71u}C?d3CJa*_B4@#ai z$HoxYu8QXjkV~m&4BW$r9X`ASZ&z1+LZ~C&(zYm@3^OQ^gEQ9DA-sF^Qg{HdHS1U5 zcPO3_!}ql&C=3SrzDdb=J?X3ABV?aN4Ib#|Z8D{uwTsCMxQ`F4_|@pl+vs{;jV7XK z@a^@R!)ro+;MwirofdK}>X>}LXu|hA`jagOs=EXB&|>6DxO>hptj{w>qWKTOVmL5? ze*7f)Jc(r}s#92Q^EA%iq#J?8r_Ei?SKrRbY zFkWH3zn!5|y5*h96TICCO+0Zi+<24jOAeDo`j_!Q`96zwV#+U>VWH1%fnEE_fMp!Q zFM7qZ@DTdMVOzX&d4Hh`xZ2s6A$IZICY)m^s4l#^EqogX^sNb3)xyzdm%d2$7WP3~ z_UGNnj70eMjY2YriF_^Z@UPo3A+Eaf3Eu1(&&2CD?(TeW@#4;({^BQip_yjol4GkV6?y><5F$W%sb2E$YJD-(F*2=6~0$Gm=j z^5KU~AHi83WDzgEEL2Zz^EhJ^?{Nkk3%z73uWxj@e~%Y>X`;6{+(*Jk7z?V$!wka4 z3*$U)2~^(HU(JEbYl}ZQ-hxH_geM$+d=_4eV{f&s{#+it_*wjJi;=f8VqhZM*tG?N z@$n2IWEf*7Pf55iE`I&x)p#>c#o7xp7PVhb&hyA-z!mqm%8mg6ZrT@u&&Hdl9ag=sqRAAR`l80X}@b9=Ohk*9B+ zyHJ}adox<`xU~p(G!I?zqj@0XLm^%sWx#$>xT0Ojh}YYl_F6Jk`y7kb6lO+~OP4S2 z>`f;S0~qfp>sCg43-&+z*`EwfUtha2Ax3!J3Y%f!13ipQ4znkhd7sLwJs)S(qOX#j z`u?d?Z|>ZSug<-dw{y?-XE5K@a|=!{lTD}crhPM>jqP%1xHDmj82^qJDoaSDQ>Wh8 zx$;FGo7b+*4nf}37BcWho{+P!{e5!9qP-n}yW>q`lbGnn*W)c}oH*IR*@cad zK-_JNvlxA_(0C3JoLwz(*b6#!bz!^{Dr{GZ4hF+0{KjeHP776f*|p1; z(y=U-Eg5Ym%8B@vaS@-gZ-Fxg^1{y;7lm5TA2?a8FkZ>T>dCOT^qR8P9uBdE@5D3@ zFT0@}+%2YOB@_D!ZU62Cq=3YyM>Tn2OUYt5sDx@*T| zJMt{5&(4P0>0taf9^JuXytjkFcx!w)ok3f?eB&15taKK*l}GobTlRAJCULv-MzWl) zL)Uel!;NpQPAqGS%@;3xl(+CX$;T+I-&^!xV1E1D`DFdo(YMqqwRLb5liGrJf_dsL z^ptT6U4({ty?1*n?^wKH+(e_twbSA76z+@sun9zNgjZiki>ClxPX9%W$U}`ezzz+_|Y+PwvMJMOVJbUKsA|Y9fr?q)~ z&w?#p#3S?#PC;*V=D~OAh_`P!(9!~RI}WZ~o<3#l`KHiWCv!%bg^R|TZwhH-0Y?A% z^wZz({Ih@lzl=P%m7c48>?V2l;>8TZUw4keFQdu6iSsVMMO(Uv_PJMo=S=XYzx<2( z%ZcKXU*Ea@{a1~9-zB#@JE(qZvBLrkx{|iW-I{SN#zcRi@|GUam4&L0GF8?^aTY3v^51Ze8e(%$0-k6@;*Kg>p_+y+Rf=8d!=jyiM zP#ycDu?&Nb!6smCmPe&dZ@7(f`W;Nn5a!7m5G7xe{m`gmjWJ4MFmPPY2qjSFad#NX zEB$yu??yFz&4*#Zjl8Pox<45b%*tTY(BXN{C}Rvvb1fyn(w0sV_Ypz|T2mrVyy*FS zx>ii@$G}VY!@B`;_4QSjJeVEC!C^qSU(cpe18-5yWhl(QK__HvJzx1137c)&MU%l* zilJSQl>zrHs(nvsJ>SBd2gsLVgD&euKn9L~l~G*`#1z=6du98Te9G$Sz@2}}P(Z)h zY8BLz^=ybWbtymE`>CSv)J8*AWlkF*fX}MmfSUUXMqD3dKm>f~Gj2{Bz@ldW4&GhX z+`|X*ACw9KXFlc#qBTmH?b!OA;EzA^M! z8ik|A?rT2q902N%7GtWLa+K#&ldR9&`WM_SO%mAPtwYOt)qsyB)o;i#IS2E}gIudT z1nf6Lp{c$(M6ImVHzo8@Ua$rSl^Fo53fgc|_`j}JTXySy`r)Qs17OmSrqCPt`af+_ z%Hg-ET;-H+Q^qoSL$Y6w`Ix@Doo_XuY(W%)f4-;2O>V6`vm8Rl*!WzvTtMrd{z-nYv)oKDxz+$77?aQ0DIN7 zENW1quDhBCm7%`9+mt$KyiyiC0hkowvGVtJ5`eM69%@s@rcH;Ii`NHdbSlpaOoy+) z$G5JOlLE02D*qtnRnHM{=noG(*<<)`@S3~}tKNHA`allJ=HBh*LFJU9>T9d6hhBru zG%vavw4|2~ylpWfyYckk)EhX0AtAmPz$=&HnpN~p8rQy~U0*9-7loF=FW_x@=1RX1 zpFE|S>^kbIgL_ZWc%F0(2eeU8A7%8F>d-g5R!+sk<4v)9;!XGyzEYVu14VG_MB6 zFW!95>z9=88Z!kakHNFWHcbf(LIUJ~~HXRS6>Sbcb zI4<9KZb$!<8Hq;*RVT(TZR`NK*=u}cEdT;%sNrQ>Q7?+DViB3s^ z;WLIn1}c*-lTG+AKEGD^+HZdobdxzeYeD0F9&*?6eEG}2_{%Zg@x(uzmlMy$tw##a zY4C<;qP_OOK0M;k=~fX|GY!gZj5n5`zi-ca2YT3c+8~i zacxOln+)yAn6pAxkTI{djoTJ-My!+Lc|HcT+SVBfRd+M#Xe6(0E40gMUwrlAg~KU} zLrKQY&q}8peR~W%O}4)eDF*uY-+gZuF?iq^w~ka#Mn{ttiyQifSicNZbO(kWv6{~` zv2$qK?Yx~rDf%QqA}+%Xqm2HHro4k0;d!uWE7uma@Hl)d`i#Lp=wuYy;v4;KCchR* z;&l@?6L)gd!iq_#KKCpgQ@8}zLW*q9LohKnfx26}GgJum!q{SA+aYQWZPcfpJjF-|pZFrL`0a<|2})5R25t}%y^(4w-wHQVf4SUPx^;U?KxTi_irWF4l7 zPmkw~edl)Z> zce^ySgYY*(w^X~lp(|V8>W5)uSAEBpe0_l|)wl5O;!&v@eM-;JcKnUgV}NA5urmoC z)U~E<96HGB&v;}(k)c(%w;P4yz@Oyc?PSjF(e8^}*7K|}5#c9U$ z%Dq#YFs>d-_8MOqpL{c*ioeZ}IJnej8N)8V_ud@T_-B9e%bow@|N6HxzU=NW-`j;M zxP0}>gg|j9Ctm$8fAiNPXWoAM?fB?}=~H-tzKM@~P(nK~<~@qv9zSHfDXdbSuM9E2 z__M!kr`m5ORE+WcWqhxV#@L#XC=oQW@0>e-KEC1Mo8DD_w{z?#KmW6x@2+3l z`5?Ui@X2SvIUZjN)j?lrEPK+yp@nDFOp%z$pK|e`$%s7$fIB(+z^)+&svHX7TL?VE%{_J2y<)j6nz(!^+!_>@uVhVjm zpCl{sArDdEu{Z_@XLGNEg-r$yB!8c0DdqKQz9967g=gU(9!FCPH-QM(`YV|h7IaSw zH;-BvS&L{EbQ)=r0Y^79XEhGk zF?YPhrrF_8qX~b+XlTdbj3rUAh8el@5)Z@|=zyQ?ROchsbbBaZ#Ru`+Ici+b10LXQzxYG6YiB~ww+;^E zd*x}$O(y_=ObRpZw9S#W=wrW6}9}Utq(hKNCTOC9jXuFr~ znBbv|$I)=+M2)RW*xyrE2iO5be`fAz6c1loh)kssvT?9kB;Hv zU{reN!DQ1nm#;-CizMj-Ei#kU7KiPsw3unpQeT)wrP`XlB|MCg>d@h%_3Z^SReMcX zP5fya)D#_0rPtwmvUD@=!=lS&;p=&TJ=S4Z^5cMku_&eh`G`{l?7PGSS2sQLuAE&n z_DtIT`^+uDSsHH^49wSkFEPf#Y6;f`e;F)XOSh^A{>JbU9;59J)RhK|A#P<2K8%x- zd%+)EGrSR!We#lh^BGu`FyGw|lJ3hF4aS($JCCbvkGrm+hDjznddVimqQwZqN!_FQ ztmg#Kf>ixz>x+B=&A8IdK$tX@KQLE&tmpnib3rIWpPu;+r?>-hJwiM0eAdNO4U$tH zIG(RIoD}HV#qy5sLOb>|@5;Okw>>FcJ$vXpbO6JT^3fG@!_V~~(YN9jq^{u(Zt2GO zvwhPu{Ip3vHQeMI9OS_(Q$O&0lrvuo`jppw4_!|7{)g@Kik^12`iBhDFW}wHD}1Wk zmRn|V3G*^W`Xn$l*%$-|m%(y!FT5&2VU>?eX_Y13SfR@(-F5XC+IsCG z_)~8scRTGk#mnbGH-Q;Gk*3>W2|Ne4Aq>P3rq8_W+pFF!m~%UZt<`D)-0*A}r<`Rh z1=H`6A<~SoUOU0tzadJKIpryP+R>M^5CB8p26y2h9lU$dZ-4Rf$WiI%MX-7{hT%yz ziF>z!C7I{SAbYCgCPnbbB(LCuZ`U^tn(W%OpI4~B+1>p1ZUG)NEIf?|xr{y=Vvn3s z%Fw*JtxV-*{*<)lW*`}IeOR&W5Je=PUq zmEnOuWy^}TlSo>vwXmBc7|mt_u)0U$~F_HO?#rHIsv==Y>Z!Pt{A&4G<>(g0lj$3Z5JHWpD}jYPrdPX_-I3hH4}aZj~;7+x~qf6Zf69s z2qJ#)`A)lcpyHF?t$4t;J2D2E(0$j0kl}i~w1S2Ij;8eOYZ5#v*ziFLe7Njtk%Mxs z-SNkGmhIMVJU@45kUd)7*XOXbFTZK4B-qhFTZ>(bE;=EPb;fT-1$}t7!v>GCX%c!S zgNun8gXir$Un#N{O?VORsyqkL9XxO*<4|EnQuG;3zrFcwM&et8m$oqh6syx=U&>;f z#TVLZ40)wF5WVy%acVtRj&_!g=NyG!TwWwFd1h$ix#AwbMv~!gla*byr_cp$itQc4 zipkuXaG~%0`iM5=X~RSGcsTRQzMtVKRE9>{)B%IM=`DQmx1t3 z2Jn+}K$HcO40N^agK%XC6vGxQe8cd%ChU3Wm)Fh*@Ae%u32qUgdNC9l;qaP;3r1-% z+wIga$+qyZtA#fP3m(9cVEU4f5*7}4Kx*^ZWe|;doJabp%XpQ=!f3|G!x-jIo8bj9 zGx0NHn098k9r;oM@EAzY(-@(zlH2&x#2#)IoAf;%j|}s8%gzLoFXud%L{9wH5HeBL z&n$kR&4FSpYhUoSt%FyE0zwBmB{&YW9m7;K_~BWe;kBEE0RVD9jlaCkz(|JiVB`@t za-g>I!EYmvhmVCMNFIfYKZ{%|#1MIuoNH2C<)(e`Jg-rcPF{;(PAH0a!MIL_?FEC; zKAHEt-3&Y~(M-FTT#6AcKTpAtAB{iov=Z0_e1;!Z*dHR5@>c&_G z5MidF?bXE*{SqFANMG zAonaZy>*%#680wV?G8i~D;)JMynVj=8#89?UB(Du@)%~NG9oL0VWw?kr^ ztKNAZ7GmH)hIZir@yER;{VR5Ii_E3BsKHzJ@oO!Fgx8Z6?4G}jH)}gId)Bz>ki|zC zwvS{05dHyeo_yasl)pgrW>;=F4+0T+Q zZTZimeD$La5j>Iich4rQ1KD2R`1sRH8JqIvuK+T@oaDPa?uW*e8tWF$p|B(B`}n(^ z>n)@T!$scdTfBV9sb}@E>({@DKlaZe#j*J3$YDF3-WU(^Im@MY;(Z$r{?Nh7UtPPh z^Ja?~Kl|y=^XU9C9{P5!?G9pCvdiL! zg>5>+?CvV9_`mwei<~#~aWUcMa_m`plbvBN;%z#FGVZlVdoNhW8ZV!Aq+}uV_?=Cd>H9AZZS>za5wku_TLMwmAG<_}skl^^B9ohOVT%i|w^(uO_KnIaK79IxI*pLP$Kh=Of;T%}GB!FF#)7^DSPlksfCqW)Ctx$4 z7teh9ZYTUUMnqFPZ%;{n7&O zHdG*IkYlx_`P77LqBqu-p*Z}YjW{uo!49t7gSj-UzqoT6RR`P}xVn_LPZgEUr92*% zX-&|Tf*1-~M3JnAr7?-`TV_4K&oWBQ<&bX*SO5pa81^=$#*r&eopp?!V|A?4mENVn zbc*eJP=ebMA>D8A>w6`Vfw{7Jt7wuROGz-RF3mA5iaGIat74sdIA zZr!eg^9(MDCod+`%7P|7N*VfjH?gV-lZ;jFl@ScWV+yHWQ}nby`Cu=?*R?q0wGQG_o}}E;d;{eE^oNc zqn3)c*(F@GfMfIr&y$`H+~JX6tCJ$@P(H5=Sq+fm0X}6yQaO{P=W5;u$@`kt9rz`Q z|4@GMm)5Mzp>0>u%s@O98CU_=smI{g_xgoIX@ik~UG#Qcwt=Ij-Y+B3!18E5knI+!VPI%`;Z*4H}bSn^*d>RbGU!;BNb)i6TJSU_KfS zLosNh-@IF}6|&?Hfb#VRM{xhB%(X+L?9~sB=mZwx47|3iuW$5e01nnxu83nv!QkP5 z=8+PB!Ea#Aoo7C9>0DR2E*O@3Q?kkK|0k6IW&fwkKZ908RrRU6yWU-IB zOn&vm+n#G1h(WrqMWe2jGi7af!aGTOy2W{e-8@`0E631feGdY_Na%aMR<6|sy{c|Q zza9^dtz=N2Y%8BUp7ac=(=P)U&eK#w@VIo-)~h`4qUH3HGS<)SAo}6yYdin@zxls+ zzHS>WZ^%Q1+z`L@ZVHbT_OovsYdc+&j+7J18n2{78MD5*-r-FtF^oox06R@;#H8by zvoED{ur2@LCho>)iX&r_i5cS&T+xl;@lXa2+v>mm_U25Ag%bF_Nz1M($D0K#lLOoF z93;nE>G9*IO&%U+ylLBB*F4oJIhv2r$)TNM*pa&q0>g{1JG7^XEkiz%F;GZ9UXbCj zl+g@_J){f@HSmy^V2J7$k$NWkL;cFwS6xlo9X1(9r#LR}bc+i2^MEI_#cj8>Q)rI; zZQHKm{i+WVG=Zp`AHt3Nr1T3@GVRqj&wg8KcQyV{P;0OzXv>&H!Trte{*b&`-ih)t ziki$Z%&F548JmT}amdb_r%yH^Hp${a6kP^i9$rtP`;EbYcOmZ+%KL%5kj7&*9EE7H z?eeoPE{$OtY~fH$5GbB%g?0?WkBjqu>ddK(N*BWY`FN`_7?_lO^uhb%p~~};@rWS> z&5j&B+yt;T@A+5Xel-g18yRC5)`iQtdiC1Qxr|U2UWAqy17yaone4=m!~t)cH*50) zclTrfSch_vt-`GsqZwTp=?*tBGjWG2!z?e_{Y|hqbUz*hW$B|D1MMfetb5Qq$VmWuCth$ z#h|}@X%u{hQhcqSnPk54#_63euU)G?wz%gB)_7)tNu5qMp=R`d?WIF^g+N-9bKbIy zzdW&RH9g#2iJT36U`Y<#o0Fz)Q6uoIlzBM&@4Ktx4Qn& z7S-GB4zLKp@XqLI!phLZ(_0*FURHRN%$l7O5f%^I`CwZ#11K-sCEu1Tyqo-CJTzf7 z88@-jca$Nd3>l)G#mhAj)8|cy=Y3^Qy())1G`Zx>%vifMRx=XH$E!+Pki{mv@HC0? zjlWGKUE}+zJb$k3HbEEsg+IuN@l-1uPxK+usmfMJTj)0cZ?U&gCPL!w{BgZ?a3C! z{Jq>m^6JQXc1BH$BBKF>%jnI~(E(m#EV2c@Nhlf1Qxshp&mQ;O zSn~MEi;RkA`d){e3PVJ;OdReEx)x{kE#6{{3e$(a%`kWI-FJ8P-WGR1!$);vKs(k! zpS<|A`(ZLwyCvh$9KD>?pe_uTpjyQ7?DVN(um*b$B5kZQ;Wj~kqwl)ljpGLjZ&CSTGCwU`2xF4=XK)mj z%R#Sk$IzZJpZq{ii)9R2&tE*sD}0^Ra6P({i)dWQ<Wq!W0%reG*JJ}bjZkZC4)`;wJVCTe}!POU}BsnN9q()2cD<9@^JQ!G5uIFo(JZW+HcRZ z`blyC4@}JD^4US8W%q{f(TpWz!p$4s^j*Ee&%!Tf77I_WXGG!U$zWw6^!q*Wz=iW; zXt{Rn%V;P3OgvU!yH$JrzkmCy;qiAGr|`Ukw|#^RVO0C{(&z2!c)9c8d+!e&9U#as zFY;V7=rV~`@UDdc4!`>{zo5vm;o|R@aT=s@4WYJX&|8=R@t$|>%GeZ*dE&n4z}A=bH4E@D;rffF5W_vt=QT} zj-1%};i9`S^FgY1hOuSY7?{%bj!WZaNvypUub>vW20!F*~$g zCX4aigW8__v*3V6$|GYYELePJ*A$s~>eSiF&UhSsKK|t6+Vx~(SKhqgM8Al^M<0PF zxJ0MLklk(baP81s{hRl?K8ABwy^L8I8{-%Kh+LF|yqD%-^}C;E=lKio&7#hub};I{ zPoC7j!r4wleM0&0QwQUe4f__k=(~42Otl2V$7m(iKmHW%V%IK<-O1K)GRFEgKh~$5 zg+Yg+ARYuCmf zcK-fXe?3M-_%Mh&i{SOzO=y>@G7CQzmItp4k@S^W6iD9_%U(D#^>Zf5%yG)2-sGeH zXc5+!c)xL4h`@^<{v;e~&lU$1z@uHe+pQoRi@d@RlY#n}amBM)6e%6OBo%YUK8L=O z|93N*nxUZ2T1;r1Cbw%xW2-r;GSE}ltOs`=bO`6Qp_O*K@XiOdp-?uBclC)seEPeL zve)CQ&S1!+{!DSR;Vhh8l^h+7UJC!+H88#7{`2oDGea%?r+4_zSYemN-NJ(4T{@CQ z81n31y!LvA+B>&yWMppfB6%zi1GaM+mYraYsbsb>`9U7l!u*^$dv16ZP7j+qISAFl zBO0->i~~aQQkfw>{H|Z#PiOh&>nl^{3H_)s<=YP4ee+y>vUD7l7;4eMf|@=+#@Q{) z=|ttpB@Q2OvPfky^-1ARH z-)?cqfwkw}d9O1V-rD)QU;VcZ`ur&Vxj4FuzQb|K9PazhXL8UsJII_1v5~KM!!9wr z@T~K#>a`<>$eGy@5D&pyf8?}aVTV`$iw=)97fl+)fqgVSg8>C}yeWI86IAvvytm7! zQ*W8HeQ!T8Zd1T@SA{j~{7JHjZ)VaSfMN2=T)ZK633!rb0)TIeRKU2@1Et<_mi8eUHqw-eyo4lU&dH}h` zRf9yiHaMy8`Y1Vk#`ycHhLg@4`4(?%+hd@FJ5xIq61Zno3?>ZaL{7f zIbk2@KK^;p7ry$V&o;f9OnXO{XP0pUr``~!mjaTzE&y_ikYZq-a z*Ztq{-z*SKEB6djOs5HQ$Y>)!!p%KT|EN^Fvkt54rL-zIm%b|x zh<>KsR_d`rlj-hDC6daTCPcGly`K#Z3 zK6PZ^HgPzZq5M1B(_<$}j>)c=dz4?>9VwJNoOy~dsF;WdWwNJ955tQI4_feqFe$QP zehJ$CV7!mk#C(iU;mK(4bEw4ulf>I?1*W`fqBWsvw2a@h z2P2-ir6yL-n`KyJ(Awx_SSeA)geGh$+&l;;-g9li1CnR|w>NGFB}$bTz5Os&FbbJa zIh0SI7d}Fn`WXWb1Z@6FFs@5R3<8N-j1FWZw(t zi9^`L^jPtNdCBlTyLjc!BAA9xcpBH@ue8Alj`CwxtH z@mw34{D~9Kn~?_)&sX{1IC*m7MCwo4PTltL2#y}Yk-V%=@syn{w8AiSkRe_*zjAQZ zGCbuOpGVXE34{Pfv4_PveSD23joZiSteX91>CwR(C-$>TSqSZWn zqO}E)VZY$QQJXz~-b8E`or*Kw0*W}tCVsrUUcPL5ete}*F)oa(j5!$fc6Ugst-IlB z;(e~qjjS^c>BkJ@)sO78xaELe22#5jrf&xKVG~|`k8x%UoWb0V#^G{k?i6}x9-Cq91n!nt{0w;tXDYmBl(XEMOzN?zs}baq7}3uKI__>Yl? z95hK)p2Z1asL&8kF@QMO6)&LI!3-U|;_NJOupuvEaHsFrx3zoZMBHS*5Cp-GHUchy zxi`LHT($7w;IpIk8&zg}A&aM++F%)u)@0fa0PPxT)pKw0lTYsa(?9#=&VT#we>dK= zm)pS*#pZC$8^uk=W2fFYHS}Uo!hhq{SKGixdwv(~7-`8gY2Zj^3xUEL*LbBZ@t1Z) zC!W;Dsuv@we$9iNA@W#defs;4YugSCee3NEdY?>MlVL{cAL4NbfI2YkPUF>`7V_v= zZ=E|A9fazstPX7EX~qc7cu3yzhG!(V2xdM&ZrVM9_6$R4p%gZ9;T?#HvnJoLQL z?xxops~BU@67TNX?I22v1D)GY8H}$c@^_M<-hUk((S#xKXp76g?#zXAT>~rwxkN96q-G&*DpXbEdG!A_U7Ulx2hyjsd>#SRoy1x9ZJf(PD!5 z&EPSX8aML|oBj?C@BIB4Z^(g1(?2sZWvE48tZdA1eL4fsgN%qrT2LYHA096(OmUBS zOHhRH@}IW-kyF! z&#_zXK{C+wehVVC3;9VJTjEXm@G zvw2lp7|}KsfVC?fjO?F~J%t>(|ERGt8cf)p_yoQTz@eF($oSr3(~*Pi@CaA^SBJxg z+i`Bfs6>ZF=X9-TMt<$j$SC}YHW=OQX_%H*Us^kWc#FfQ@905)9NC#ZO%E}Ks+X}r zQ%-sEqhB!0du|r~;_{Wx2hqp%(+P*N|HaPlfAe=kS1{-s7Ln+nCr+N7h600Eyf7gF z9@zSXwvfkr{DW4;-5$(1C@dGx-r2bpKOIOHviQ&Y_fm1HkG8NOEoXwmdootW7*D&m z$Vv|-CpaRY$1L8Ur?#qGVUI2$H#=POP1Q?hYQZkO^MjxKV(0zWe$lgC9eDb|I3?Vv919kQ>Nggr zw2!%fMY4zKI^+y~719O|;d6@===5EB=^;kvXrO+K$@mn%k>lExQPfx{AK1o{MAXPM zd?z1y#pxh+n6Vu%J^G=YV9~`x?XV}fbFgJ)(K#J3%J};AlWTJj=7SD|Bun9?pFEot z2Mcxw!>e7F)eTOEGT@`Xoe##K6X~&HrfW0K6m)>mTPm0SFWeJ6Z{O~Wh86`lew@jW zeD`KMO3t>3S3kv{--pYUOP@Ce6z(qE(Ctz3PPn4sv2+q+1)cf*iy!WM`q>{wz8Ig- zUp#saSH>w0NMe2S;3jX$wZXXsyxT24+_Vd{Hf9Xp*J1@;(%)Y1>?-_usIlTyXAqgI z2n#d26nbuvk-j>%uCuEVfxzqS(oTr0udSM56tp#a%Db24; z1Vmr+>7s_jIrB*3xFylBdEHY?HF;t%H`{koly3}I1F*aUM1~*p6EGh8oBZ2%CG%!( zW6JTwoU$YaYn7*hGEZ0liMyUh%5)KY55Zpv|M)rl+ttR6B}{YZakGuDBmjG>oj zQ;Pf2Gr0AHm;5rmz^RXX!8BDQ12!ZWgT!1@MyB3_%m!j~oR?^yz)?SWr!Ei(17AIy zGU`NAr@8Azxnvl)T@U_K_+(nC)U&b&=Wx_f)P3mTJ|-TrDHO={Zw{oAjO!%^1Pw@Kl1$^~c_3K8Kz9^8M zkEC_o-^=Z;g~3a?K7g*jsV~VqP`7w)3YoNB%Ysh>$p87m=j3|{4(MVq13W>`R&wCjaK$-Rfd z27hWrG_ZyhfOq&e>7K}-{=?9B8i+=`Quoes%qv@2POyq&jpqLDIv7S#F{G77z zx1WEu^CX4tU<%xe;v||V+M0Ib=G{>Ug)6z!ALY+PMSNv3T(5V?(Re^6`{dcRI|YKF zs4~;BDx0DChY8ujJAwjr_wMrgzTWnC^bl_2{kPAie6+P zPc~`iJtF0NE1mEd2M;hZzc!wmwo@vPCmoe3L-HcuSw z$f(Q6Z|kKAjP4-L?41ngj8j4#0I=GQr*9Lwc!{^04{3!D_P@x$n{%Z0;caTWI?q@h z+1f^GG-J3I+f=B72N}qCvm6R91~~>khk5EF^nl}e-HEOIZCm+0<0YCx$p`?uU+UQs`)7NfffIrvP{|GonkK?>@ z<8L^d9B#es$qWlc;{KwI_~!ZzuXUc$c+R9!7zB%2?w5hu7Wcf$(6^ca?kV>^o&L$d*vS%wy?&yP%QfW(KwtK*?1S)Re+{E>Mvipx^wQ_*;f;}Meq7X##eMbmLb7{1TBLx z9Zmc>#PNqhRV?@!>!UdXLY--3J9R+q+34Py`Gp4W84SG=AWt@PpT0+B^f zMpLm*r;f=AZEE4c0h8Z+)2;yZt6wqpF|wFQn^-E34#hBp=RLQGdZ_et^0Vz;`RV)b z>>Nrr{@rJJ%WZZee0cHwWbzOfDYf$roz10cdnxK<|XHLf}wLKY1=1dp^ z{92<3DP$4qL9vxjosPfib6{8)Bdbp|vDOCrsw0Du$vWB`ZL2+g!DDZ|_14e`^N>M0 z18=+=g{vS(|NNIfFC;>TkG61Z0d|Zk$;`X;Q3q(MJKklW=k+H)8f(Y9_lkF(@##j3 znfQUBevE3p6VG&4p&Cw~ej^N*=Q_g@57D1|^wZ?g{W;KBe*Lmyl3(}Z*K;!_k&kdj zXMK%9!onQGF2mL=v_>2FXYY}mD`9m zw73$!LO)ofv9N*HkF_{!k;Q`0*LSbSr#DMq*p1{mS{{j3qx)3Ox$_s|^QSw1|C`^| z-i4iuR@%=vfi{N=Rbwpus)J@{VKY2mZ>N+H2||y3@x>QA!XJp~`vi|hZ?SLfZaI*l zNE~0@z~nN1Wi%4w`*~&3MHny58}~;~ye51KWACkY_FcJhWkLjr&Hh@R&U>R5&u$B2 z@3zq4Y!v)3y9=r}nNAl)JH~y6-Y>3QuI~vK#HeXZv-7Nl+UVd9J+&d(aN*qfbf6Y_ z5wn)$?GW%kywJ z)Of_0|NGzlx^Onmw!5Kq>ayHM5TX|XAYCxA1{tV7g@*xiyibhx}cCC z+7B$US6DG)B>l|JnXig(jxU83M8|Ini6mT!F^>*xaiN-y{2JZ8_SFwv&v;YamK?5( za0P!t`&1_WVmHtpcn@!(7adMrU(XpsXc1%5cXzH&|3Mq`Ao!8{jOov#?cTm09_Xy6 z<{@P1gZjtW4sLdK2w0~&pNI}-oOzh8|5|d=843=@{QR?zGh82Rq40d+_c?3I002M$ zNklvY%L${U9=q+B>G`e3X7@+i?PEN@^u<@D&2kt{_~hit z6%x!&4kSQ#-upbn)%DPEhbXtG7!K%VT)Wu<M_Jm;a!)Gw(LFq zw8sQI7^_awdF~%TeSuLHY36_LUJJ(H5&%huXf6iOCYdyIAN_}h5~5pJRJQBMs;bp< zad}t0pMfPDOFil4fB$ANV}Qz}QG?~Hw8QJ_wis2zUgMlQIy>#;RJD=^h8)@bX zHKaiw^zy17@1s4e{GSj0nLY!NJ`sH2dq4G*yT63lT9ANG_u6pk;B~8zemv@X_&_;~ z%_(bsR{a(pit%Wk%RYS7g9Q2PWY{DCUaGb5q`&=9ny&S;1t>_tm;;cc+2ljpk$)O! zQ$4Sc5w7c5dDXvLyf^LT-M}eRga5Rp_UVTbH9Xq&)CoV9cIrY#)m{*wNE^WC-L$R# z4{$I>1}L{W4L|o(UW!lI!IIJyM5P(Ixxr!HB@?xSiI#V-K7)68B&uFRL(ev4uQu$S zd;nKj!SPx3DDRLp*vj^5{`$88Lm`{GOhe-bFTyf7lfGN9UJKB)nb(6$b?n-$PU9OK z#cyjoWyO==D0cgAWC5H<_Q@Wh@b={0)?scI{DBxg=o&7IXS)P+fcMPzrl6^hobzbY z2fV}g@EE8ISBVCnq1&bwQ>j5IK1^A7{CwxXe0)jDvaZ8x#NK4|ro@_*J}hj=n)Is{ zBlE!wgC}-=pXb!QCMXnzdk-FuXNHL;B|0=z<$LqOm$tD}(=30rr*MXaq7o2ss zgK>^EiL}+0Vlqa*ws;;$`KOTW&v-rtm<&O;@{D>TPvuXqeA5K0wvTQU;HR&xSf21x z_OZgNP|TiZOylWo(j+Z|>G71e7a8ltp}XA#*JO@io8pOHCgZ|+0OXm;5l=-5wn;Z* z_S4?;xbmJNNO8wr2WDc>_Q#Y~p#)6m9AL`$bmmlBkei_5W1iuR+6Su}ytLl43HP-pIE7AAGsuKAOy!9DMoBjh#PyetDGn{V98AG7eCR#rqzjLde z!WkTSpbOQ*Xg=OBwP&2(p+l!r@K5Gd*CBA5MFMdc9cX9rEpD*3WMqC;NC`V$j-)h? z*L662Sn{{cQ2X(+n`$-}tvbdW#)AyBYG9dn+E$ zE@LomoYM9tsuLqSKE81N{PbTFbV7+iaX!H^c{VAV3HHu1vhs@6ZsfkWlQ(bPh!ito z4Bws2vyq`5n1j{rhsslL_*&4>cV3V77MR{DX5{sZ{9`1np1j(Pr6%C`)ZwX&O2f02 z%h+7tGwfNIP&0i}AH^H$VFAsy*|##x zGJuO^uACEj4^2#OhPVC#U;P|H*w*6UkO>yA-;4&@gpr*gTVL>JVk-29xZM^#eJs$ zuI+GOuu#@AjN}cPoF~7*#up4Qa}Zgy(C42tIlNK)(trLx{*zh!yHf}&-qaR}$Tc3s zyo+tiM@RG;uT0oSc)G?c#>51SFGpW@Ag41c?$yT}wpvW?jM%qYc)+&|ZVclLEZ6e{ z)GrxP!L^e~Ub2AaHQ8`4Pe;Z&ivts$r*w1^GLu0RKiQUVffl{svKB2?h!%@U7D?dB z*mkeQ(W5P*ju(1-VsdZMQQwwt&2vmEUQF4%pv93s*|@`4ZX3GLKohbnIvbzyc>tnq z3v}TJ=IxBIw!Lc?G9NGLI~HQmMt@^SdolgXm|Y{su*X^)csFmotCufjJj+wIJa97Z z9&KmL{dnw+y9jA!Ls>n8mjGzeUM|wbz43yy90s zy;Hfo@?SR2Sady-A?0B8;z|CTj4hA;%KKaRgTup{dv-p0>w~!GQoP#&Yx2`r@%O*^ zeKPLU&cFDd|HaO?-+Vc{3I6fp-_1C-VxiY}qVKPN``c;5H#4k1eq1<{c*cVF`FQ%) z^|yBZ_8&gpIe-40=(!e_-z?0~o5fTA*Z=xo7YDfA6>nb1qw$;SwZ^Owp3(eK_2Mz? zGYgi5&d`^IS->Y22F)G5`ub{K#V1>|zP@wy+U1?=d5ND&w@{Z$m#>Udf`0np`|pq5 z;6O^U*8%GjlBIEFJ0?n}O|;R``r3`gJqxGiM8d>jndwk&o}hGwM;lM_nbTDE9R8O<)7V81M$gg3ALemsMoIU)nNKMP36JvvIp^I`H$Ut6-fXWdUf zs!u;@cNE@w|DE$Yzqebfyl6)b(b-NXPyXW1{&eSezpsDaPv@)uDZ`?lc5uGL?F@}} zCmBy_mgH^5*v20ViW+L)?iNy`%jGXVsr`t;cU%M<40 zWn%y*xOd+F$(+B!ZbEMtw&Qp@m+??P_4)eCtJCMM=Z!Bk9Y>T2)dasuW9QkC(OK_R zUxwNDfBMVeyW!h-(E^SG9rdw8Ge)*Jlb%g>PKcM}$+2`qI)fPE#y~q2;fOD1+%a~f zOW7r2;SYata3G7Hclf5U=W%~a4yKDlAB!x!>FI2-!r*lD?h;G?Y$1H?bFBB6;|c#3xy-eJD-8v zV$_|y$?=b$2kGd?TXs72^xOk^ zhVR)c26-Ou(c@TceZM&y-Q`d)d1IrSIOD?Cj7*QtbKq_~M^72KQ5~GMz|$Pfg;z5c zmLZ&Tw5B2bKm0bM9TPK05aU6<*S8>Na%>GPpyh z4Q|pzjOlmtt}JSx?&aCyM=s4rnoXJBHx>;&SG|D$k)umi{{9odG6ll|muqzxeom0i zbdmVKbCP(7-!$?h-;{;+J=b5oQr7&=GX|{gGjvZ2 z02G|ES1jC^AbTWD_sbAlT_z7YFL(>Ce%-TZ_=;{lSvUo2(gIwXDF9st>_#?8Q(pX=H1PEZ8!$tl?F;9-@;+S9M#t+pZj2K{|8l- zB_G({xb_)xOdaN5`Bq)X!v&+XRSpF`3+usYJQe|s_S62WI&-}ij)T|xkv#z#xS$LY z8_cGFO)2wYBQxyiQF_S1Vqvz$xOV|vccoDbc_XFt(-0~)m=25t(!xcbm``{BUmlNk zrSx98^6k#Q`R~8q`P(ZU%0zCp8gDYm>l9p`L`U;%q69y9ltQI$+LuK0oO=aA0i z4NnwCJ(C{rg?-?SiB={+ytMbEj5DZgx6EyZ&7Cj5&C{cH{<<(MCV`B?wl^zlCf#@- zyzdpx{knxd%$LN9g&KJ+~`mGy@{t5_|G8n@t>EmG&-cOo{oa{hK zabJBXYwGo+e+*E(E=>5~!y8mTc*3|27Nt9!qQoeZ6xtx~Fc%(4iDaNJ`r2EPz}+AR}h?eca4W% zb+r(pu4ttGX#TJh>rJXmY>WdY_G8d*k%i#~{*0UGE8GFYdqu`09V8iE<0Ts|GcNS( z%;}SbYdABw{o`+cSNk?GZ4!h2(xN3#WfO)MFNGq}c5NN)8Nf-+V69%ryZBM*_f$TE z5zm$J)UimAx2Luhw&Y|Jdq!pL`uqnIUL#_Y)B4}yo664EC8p+AO&m^VG(FN5SUYSU zRIb>-_)zylFO#~jGLkyX5^c~#$OH>Zyo&LPL;p<5DB=u=CXZ!7K(uVFn`;1|bNJHR<7{eY18vo!4gFcMf zn1l%7i>~7Hf7j$O&K>>`k^sIF(=~V|N@%e1^fgS1ubveu>Uu^@huakb zp$U@yKt|#B8yVcmqQi%H+~?t0dDm~=7*FA&8RU6Ak3LjhG__dB@Pg*c&}`R3#_Z@G zA*SB?7oL5#DMS77G`?Xlx*uIlnjgnI4!MLYuT*tlq$Ka9w@^hEk{dihM-ETDc8?e1 z;XF$Jhd=pf=fD5zA7?WD;8A>D84k$ufusCjj)6RwRbuLa(CRazPme7&?QddxB*W(% zeid)oMPzYElh{pT!H74mNjMLC*A~afn-j<0?l8))GVo=LZM!sl^$A973k(WcMmb)e z`}3?^3z){6D_5`W{QPHmVQ<=qY}?95~AadKyWPmh2^5oOs4)2YBDwmg=c>}&Ubcif?G;;BIeZzv} ziIdBN{Ygd%d~eIWmU$8_d49if>doZEEO64#j##{LY_#uEls9(MYKP!qgLwXwxXOhfv5;+yVZdC5?u@m`2Oh+>_*eKmznd(#PpLAH`*>e|mzPx^8 zbRhB08Am_6bZO_;e^^GqfAzopuRH(#-~IcYKl{^P%wi24;cQ0smoI<2^Ot}A=R0q8 z{h$Ba-;A6*lM#}vcv0N-GiToH43KYk-v8kJ#tnwA>0ra7`WEA$e)1@JPPf}v+5hpc z|2keT9(6CsQBDkZYA*|fc8HnsO}LYIZWff{54(wKqFJ1Az6T? zvz5)8+gQM$I*Y5-RNwl|uYWUi;AyKbFrW!7F^gNh$Adgf?IN&PEIgXAhavje^X2GZ zkp%tZ*}M11@E}=nJHsI-2QrUlJ$*Wzc2{~A4^#cbZk<^qt4to~1;RhX8!BH{*Pn$ z)34YvURIAskE2EW&5%v!v~%lZGD~c3@`D^OzB0DEXCPM(+7CMNYM#Zf+Ju)s8VOHD zk0N`-i2mT>d$oV`X(#6Y^cR1+^H=}jKMjwdvk)3~Htg;oR=Nx)j@dES1dkjOI!e7Q zCX9z|Y+zx;;lbp=zQz@cK|)!?X2T2kPyLMv*9+glaBAFA&$H*=**TYNI2Mc58q^<|UE&fCM0QYVcB@Mil8XXd7 zo1{wLJnucA;PoCvG#`W1mTm?kboVMF$4f3w<)2#AnLMBN^etT#bNLDuI(Mi3_+tj~ zgu#@%Kuzk!!#&0u;7p#y-;;lyF^ctK%IzZ9CN?Up&EO>M7$Cexhy|)|{YCyweYyi| zXr-mM3seh^E3EH%R!(glUJgi;OX#| zZhoh%aF}-WY~iI(4{)$5L>;H$hgZry?OSHgtHao52Wn+_7o6eY%A0S9N4sTMcH`+_ z+qJ97@?)6^bbK>?sPab!skiICwx0Fyn(7W6d(l1U{)v%3by&AUFRvt;_9@Zo*PbmL z07KK&zc&21u$wfCcG0GEBX7{7_wxA0!_#jjHK^+&ya>F}s#3d<$9I22>Mbqelip3+ z)+RG1;Opstp5wXl4$ZqR?~Jalz@2&oOP!}G>z$%Ub}FYzp%FfsI#gU! zwpG4zhsN=hx~F7HB%S=P`q4b>DO7lZy^=w}o_h6s(=RPBm}qQjH+hsiwHUyz`fTbj zm+;&CEc3;p?{@rr+P32Vng{t`{o(Q$J5!0Ki&2D?@6bsTABH?34Thf4iT40c$Lo2? zd3Uq0BR&+ICwX!zmHekBF{BNT$D`oqg$^4?@HV%WpFGhtG0X_v>t@_=EKgg81;#PONM zf+rdGc-U)G+X+o-c<`-0Ehe(?6nO}T$DBFh|G&~m1uE|Y_qJ7>%A%%bc!w(OJ=7=S$cKbeFx%c!zO8VQ=XHB?;6EU$fNj0fG zcP{U@jAXoz-?>mMXFOJo@t^}))ol#s@vO#yi;xE<_jm`N@!V$IFhQ{O`feUDCQ;|l zUD&ysagAa5e)lFOZ}Hw~vU9NXZ>LZ*6n*~r=e=LsFkh>!(C~|^SMorkh$m~`S=N|uQKRnLU)Z~)E zZ;Tbe(5_;>^H{)7N5IBtNCc!6LT)5o?Rp$W{4aK;8md?4SMYa#$|8OR~ zkE@e~A^l_R!f@6>6TdwAOypm-03qxOqx(#@Gn%hm7^~lzjIa2x870Ml&a0A#m2qZ_ zXSI=h7FxvoW$0LA{#uAqH?ocwt%VCaJ9rv12A#?ym`sx9Ool86J6Zg~*o?8WfRrs3 z-SLgOj=W6>%&v@Rws#*xObau$ss6~A?2i%Xr|&n(@8IEY+ggk#czJ3|GLkHYE1A)4 zb%;O_DQP%u<{fzhwph=P9@UR4-{*OTyBcn1S29%}CntGbX-@`nb%?#De}a$y30+&q zdGZfmR0lg}CO-tKGXv-BK*$Jy9v0nik}tKb!)E>M-kU*WQm+L!0&&UPHKI&^56|OG zWdHPMALg<6R-S4W6!PrDf3+1peKT4xN-{LxX|j9v)Ehe|lQ~D@OJ34;uUK$9n0F}k z$V6D^2lAJ^)HeExZQTrFLIdCj@*ckb_}ky^T)g=IF?FX+mmSGr-!ITWFX(+mZ`cNy zg^OsBl4Uz=heP2P3We>D;1B5+b~tQ@LbNE+6h&%CViqiewb6~<_Z{8v|Ce=db58et z?z!hwWo2b$Wo2b$Wpzkm{bfyhUkuNFclk;(yF>cMGmlW`p)Y3Z>Stt$E$!r;1>ibk zeDS1nGKvG-yWQ>~ZSk}T`P0S;;|h789_nXd=f=$&(YHQSTb$2>*!ZWf>yP>ZK6)7) z^o5aK?U*9NjSE7=NNe#+KV)1vaD?G654ZTlR(=h9AX*ul?%m7FyZ)me-K+j^W)LEe z_4yNpk$F+S6e8tGo|Niv@>m9#Jo@zG+u?dL??KccH!WaU=wbvTqs5=qt_SmMH||)h zaK^x`3=s_I49xGn`(C(Z9BOBUI?lprW$TON1CPYR8C{&Cz(`^-mr>7sjacmAysz`x zW~Aq3xv#!u@oWycEZv)X+i{XSXE4Otj8ks(h>Fj+cqIc?oLO54!7^h&MvI%(?P6og zwZd%N$tZu}!o_6v?ahgHz8uZS@u)CH1Fvpx?75Qnx`X0Q$N%Vexw@as3tz||2HKpj8buy8H>$H1Tdb+@rV+JgQIW9^cF{j7ng!O;iNi&I z@P2x9j5xSgCksyp(lZ!*$y+?C&5Ym59KBbFzkr^o?+F{iVE5T?KaK}-Sg0>EobKjv zLh2nko)>v#!(0ERBheZ3ccDqp122&g6aN`)`;4Ady0v;GtHs4*jy!qgrf4=!&{`J4w{QJN8IKEqY@PX(`AF-%@|7AQ4u zI(p?W9MWNh)w>z}dNFCVJ6TBXsP~M~X%ZYK92|*19Mq{|@}D{X&gR^OjFZOTkX%*hS;?JRn$ zKI$-RJ8;-k%nweVJh}Pgliv*A*pXqJw4nOOfApuN-5=f@YTEe|)@R$v^0YpKm+h#) zOUBq)v}){s>Lf!2a5`=L&cC0f4qAeOdYreRDZ-=qYTotXz z)pQf%Gk%Od7F_S--R)3cw9oIBP3hc zx|jahnK#uN9>yqRqR=t;d?GBDu2HGh%J_l7MhmM}%XqUGWxZD%<=T*oZqRla+$duS zn|Xqny}{-yMK|ky9?7q~zWTjH!|$b*ruPJcMB_XMw_p5l1lyQ>Ia677e+V?nZTRNJ1-ab3A<7wdZu9r zwuMsZ`n|%klrd$9Asd+P2HM(oq0DMO^<7k$!lel=>QR7&NI-MaLO55CP=z^)1VSa!w@@{ESg%mOH z7Yo6u{P1t$y6REwFz?{fSJy!5y}UjPM|4y6?kd&12Pdf2qpz-QUC?>hv*&R1PMwB! zldSC0f)fqEhDV)qo~Z+#9H6KZ4Eeh#KSO^oJ%U=_+v?6UIVNibk8!cQes4=0zChD? zSNVgW1h3jd+ETxIw=G+NXjhr?Kw(v`JA=Q94kD9a-4CRNs^m$8{!}KA0|V{Uw^~jy z-Juicy;nDhr;gxHsl6XVYQG1L^17M#efvKZb|a5hrGaDDdL>VnLWkehPm01^K*MA5 z?8*s`Zl}G}LL#ZFmoBPk(6;$BS!GX2E-@{{{oU zQd;Ky^aYuBSMcVl?>8};;!3kKAgdd)NCg{r`u$?iV2688h6XE-#HAZLEI5?&0n!&5 z{$sk*_P6Vw3~K#>bBgQgC+OSnT^Z6&@`W2-VB9h242+RE-6}{D^eFGh zlpf4NSpbX=LtyU#?0bseeeXcb!v$v=v)d&LR(5cwPVvQvhRH`3EjlgZI#l4(vt4=G zPRFRNhNj*PP9*~p9+ka{FO8f2z}EkHApyGg-vjlOLR(31e3>^rho)Bpdv5ya;jZoD zg^?f1HIM~!aF}fRo9y5Zm?>+Dstjd$?%KWgaNv8mHPjOXgHw5KUixl4jmD!b{7lwN zj%}@D6cIm}=ZUZdluHIwh8JGo2xC$uG=gn(lwSwGDBUF11fK^OBL#(!S2-hsgC8lE zlpHuSLSUUkDIgGcXw=@VEaxc&K96o5E#t*dXc7h<9*N>A-^z3IX$I#XGm3}OOlT<6 z>dkZ0cJTZ63z3jVuKsOHrb#s8(ir~gqsHJd;NyeH&LC_H`SZ3)nvlQ??zZDIn632H zw|esiTgo2IBH9LkLXRf!6sBXm+UwKfd6lv$F7)@;iiuqrLTWHdPoq`N-IP3HgE#Dj z!U%YGC+|02d3c@&657gO!oQ1YO`+teBqlYFUY<6T%Hh>;*FNGfBhr-)&0#oZl!3dl z8QDxC-;8FXEcT4?i4xBUEUWhxQ0y{bys>TitS$d3=nNw>aSon%=9KCQ_fWc7KnPZeBJ2@T-q&03M6Sr+kxa#@ppd(qTPK z%EdCpAL2_s%P5>AAO15L@ zktX6gBHF8;$;4OR7Jo5#7tWm@uNo6{MhEfC@jK(6un5QClCo{`uHOmgBcBPw^uYuH zo`5s_cmg{hj&~~$tLsg+F2`TmUY~QIEINxR{e4E5V@+zbBLkW05yQ0MLmOI}RxAFZXTEPmYQEUOig~ zAsg_4{=^Vr0)3}R8SgW4(zTD#Gd|#@DVFi`Jq#8Ow7s94IWzt4a>nti#rqU8#_9uJ9(Z%O<9JFRkvCW^yu(m=6Jc+1e@qxVYEC%Y&@O$_$5A5(bGP0mWxF&A* z_Xv4oGFrasEHndNc>nzm=lSfAseX(dwg+42(_XWaCHZ`~uqZqLuV*m$^s7rv&RZo2 zmVP!14#8v`QI}oVp4r^dPtg$%YJzj8P1sLc9F-*MFZeC$GE~I1ayc zv@zgmMyOkxr{^*{)Q)g4KJZN5*J6YaJ9y{Nfx_za+!+2w>D)6EITU^tih}>?=f9hA z=_l{MHwF?0UIq#J&Meef5NUyEM%mcgxP>2h1m3)Rb93T#2g-^^{vgBf$$0I?=F)fH zWNgYSvi=S?Jj-ZwCmDGBgdIBf$MD3^qOBa@`?r7p%grzT;Ey*K-uqzl+2^0uzRx%B z|Kx*0Ff8xcmyLzTTM+!oM?Y-=?OyMGzWFDAe6{c^zsdM$F8F4<2EMCb6$5*Ar)<5G zt2;jY#V>xb`TFa4Ke;<+1JriUl8H8>zpJjrfsWpX ztN+U!G==c8)4&+zu0(|w#*Db7Ek5>y( zQM)iGk{!3gr$UBivpASRoc<@qzc!;wuf>UQC?DDGAj-$j@RS(j^~q%X;X)Z+`L6o* zk3MN}W&aIv-^sspgZjuESbxVZ$saZs)2$tHJ07wv2#mon`JpY&952LBhTX>@O5XjD zo{R*CGicj2X-q~lhhH1l_ZM>I$Hrb2W55=MM`#e?jLr}RLK9vrHn+tCC=r@)rDt2X9eT*jwdZ9qs74%6ZMbTfl-6P z**Nfe0vj&QoKPp<$Bv#ZePLA^Tk)3zyAgvfAq>g07SqostLTWrnK3H!EWc+FI$1*o zTF`%z%;Y6~qOpTqeNwD+i*|vW1yc*OzxnmwZGQcmkMmf+IN_*1|MIgL@0_JT9?je= znQk%R{P_zlGIWr9v>|h!$H(Y*<+vLI+c>MXaYnBStT6g?2Nx#>d7>^w2la#ysZU&8wTs4{kL!StN>a zb!@#g9@H<<(jwQPJlWZ^=bVpd@?9Zj%m=lvF+hEhObsxE)(;P6 zI@a?%u^4nr#uZ*hLfq}(LfC}Zydw=Hf#?)4qaWatr_SBlBd_cch|fN?NLSXRT!QPdso`lNKrcSbO)1Vr15FeQ6JcqFQ+2 zVUlT&o+0_lF|RN)1qAgbgK`F-*UIyJM>|-m<1T?hdIxr8thDm=yEN!qZy9_r>OfJq z-Yp3I-61*nF3kM(KCsof@*CCXH-14|MRa+u97*OqbmtB(O6yn8=20+S8-q&UCN0wj zGWO~IAKSh6gEty(GXt&>x1a_dz!@8qHwLchXn~5B3MAkLwqN>=Op20YV8OA+l{NI} z+0ZAj2G2?9TiU*V9K&U|(Gz-unPlDpJQZEP>%qMbe}s`3m|j6`8Sr;y_pCaOL3A*a z==ZPkuac&mm39iN*vFI^m)fwSN1yq+UJ*VAzx-$A(9j2&7&q?bo+|Wl| zCVe#pe@o-h_NIPijxSaxo{W=PIWppTTjp2y0jmci&q6$bw=-N!Ii72~lnOK*zJPlO zj=gbJte@-sU=Jt1hY)a8~4M<>QK5@iOc6V`8;(D_I7MRbJx6w3;E{X^f~YR0(+&K{xN)# zu``3{2Ny1Go`3w!D8K2wC0`zBv)woa>QobRG~-oBai92c8E5vUaPj)&rN&^o$-!+5 zk||%pgPxxVM%X(UJQ-nzZoS}%{dx*xuWLR^D$fa?AbAFaBZC^l6oVEHo70iI3UnK(VKKQq+mA0`$0Jw@0=WE~2+3`^R?1lH$v#wj0d_+B1>KLqbm z-k+4x@uaVgbf($q0Efz9n8GjetG@~T)vGr*Uqs6%DbAGhnY_lULM3S{a!cNQDfK2f zjHzPS3W0Jxj^HskVi3IfG~9YMd~xZ@xA8+puF6FZ{Jd*lZ7goT{>Z?GPvEPcJ$qWe zczqpq#!jeDn&95b@crQjAB-`Np`Q`fBt*OL=r{3I-u{fk4A5w+?iO#v?PTzK>JgW6s{EwgMtFsvOBK)GQcJ_S6l4vo8l)lGSFzRYwuqqz4~Bc~G!x?J&j`np>s&^1F@YIh zcwN1F@#56~MKG3rm6v}!;Na6YDoeX*FL*ru5nWNaJ|0goEHRqTKp=nr==x;h@#DP0 zGj=ukg~z=nj|^q_&BE4`=%~LRYeLOXqHhaTAk2^hZpjWj#WV2eiMEpO4SN0c&;R{j zZ~o>VK522nVuAz4ew<11_3O67$0Ge>VA2Qc5z}s-qc3@berU-%mBI4D#dA}R5EZ}s z=G)DG@hAVZ$^6mHFaP1!wg15;=ZA`w{B}mh&!!&af$Mf-1>OTspWs2Kogzce#2PNV zP~|!N*R|*w` z?#4m=&q9}eKVwz639<6{X&t(YzBtCc-itMEk?h*F>tk4y_hIc|EHq|XEVJkbHo1mh z@GrhKMi`%_p2=oBEB3$sW8uO?{K!j(Ju|$ukXoO;UOD*vT;aZWK=QnOm~nxptph8S z!K04{*+ZVN&#UMDpj7E$! zuAlz&7n`rXZpTgrVezF;Ww60*!}E9_zO@zOF+Mn+A>6K#;~CMeUi&Vib7Ncl{$@t; zopJS}4}UiCg^wORxp}RS3*^Vi415;!ED{>?_U?^NXb_JWW61Dx^;xp%y>~wtFHQ>; zuVvt}V@3E4hO&=-`t!|y{^fs~ar96B*?$x5I>atr>HK@|$H#k%*L!ac)ME_e!=C_~xHXc5-*;l(fj8ET;myFTRGi*I>A>v>(i+MJ`{N+DvzRE-VaYnX7 z?Xu8@>OTu+(L}81&>#9>4vXsA#%{QKEgsxEY;mu}-#lZTKY~w<_;&gH)!+WrgpT^d zpZ!5$Vb11N{`MF+#bo~Kcb{)Q`SOd+pVeRS$jFzy4~NH{4YBK|%5=S1=oDj2L5Mde zj+`2~#~FZJd;8qk>eoV9GURp(2gWAnHY|gp-4Aq=?E#W#6c3}7IMPSzvlb7OXJ^RO z`X`>yK0;F6Y8N9z8y;S$h@ZZHb`1#4bK+!mipRdY^i}VdPDKaNca1$yo*1{|$@=h} z7FJFs!~ci>?q3#$@av@?hHJw`W5617^mpT;ao$)YT#8uKXsSPx<@;+pJI*W+KYd9@ zTw}?l`o!KmTW5UU;^J^Zi%AZ9<#h{wDmCF!`F6auORGaYjag(HZ&rE~!?9%OGKONm zlHbM_qJ#)mD`+$?07 z9bI%b=Z73Wd3N*uPkve(uSG>=lBZu4+x*KfJ|A8nUrr_$ntC+;+0EAB?js)y(-uG6 z>!97QzWD9*a|ZiU8T#!;`u_4aqtm@S*nzy4AC%^k7SRuFzWm)ME%IF-4}bZ;`TENi zh`*oM@5kc7wa}iPTwBbdN3@vvJi^z0^cbNyzAway`W|fImDj(sY#hpY;llgz)U`R- zm;=KcBAYQ9FYPg3>01~V2ku&6Ug0xWI|%h>9zzegTYFva_tEM^ubQ2F@v1O{c-ms7 z@Q6aP{n0OeK4Xn@Ki;ws(pcaCUta9yAoA~N&U9Lzyx9Szg*3yv4C$N?Lat}u`l+J=xYqoKaZyq z-_99^aUMVHNnX^gW-<2NWaiz*WxM?jJ3}BG;9-G|j5i0f14bCX*_m5kpaXGE!E>CG z$Zca49h!qqNL4-uubdH*sYf!zKWHpkx^a4POznxF=8*B`>g;w-wWF(@Ep{Be zolf`Nx8J1etsM%+m)9+HC2+?0UcbTDZ;TgUiqjG_RZ^ds*2qh`<^rY9WR|crDP|Lpt#eq>;U|(JHur!PSz{vCJy_|6BU+Lz4msy(7fQbnU zFP8b0!1V{0+_+LCdAgEgHeAtjW8rbLGE6s23V#V9Ta(KjAR| z2KY98^+?D8)y>dKJ^UZ#TKVh8yq^~dcIXZXz;+>OM$y-bU8J(gnhTuD>}&V4*F(S( z?O?COp1T{|2S}6T@-8n&C8K#MlofN1C+SqKM}8|ICSL~A%B&O31z+@-@1E?C+2Ju| zp=YosDVh$2d!xMWm9qu6YSZuFc)t$uQ@%8S!?XXARPG!cI1d(p6}qIH;cAeWtF*F> z0nzhU0KDiqcvFYo^~h6ceg9ae?(h8HA!&fzLg^bv$xv!`B>e~NC$o3m0J>7G-`nI< zriA)~B;BueSa8zfg4N^sQ-T3Q*+&`W0P9cqOxg1JG>BPf>D?Gx`yPCHwGwQJ4X5a_ z=(>C7djPxXcE|g{7$m3o?YluJ*xUAze98~MKzIt$D$`vr2Vd```MvkOHgTDDLes50 znYw~GeXtvR_o_18C&+i=VrZh^xukIIjjyoP+EEJ@f1;A%2TT=Bb!xYzd2E}<*3hxGNbv5a8nfwoW z{Xr?kz;G`0@867X9`$l9@B|zUCSxxkqm9r>GWi$K`K7&$6?kL%v_w;>-q1rC&dT5ft#$9lr!`zGa;LS3)JjrAu8WuX+V=ux~<##UC`0 z`ak~uyzE ziX3agi$20gyb0GPBs?G)KPYtgj`ytXj3#i8es~aHSPV>|4S${xtB>N#jGe&6#G{g z7j4Zjg{}-qGs(qoeN4JtV3^z}(?Nc>GCaT456X*lciLBP&CbU2_3+U z4<(uRmO9x!%e&Adypp?D$p8RA07*naRH@_Osn6&a4$xCUhPv5hu%|7g2gFd1Cu@K7 zvmKsc3{BNlSQL{S`CTTac=L7!d=m$7^buhTc-Y*lj4_7hjfPLi3UoKYQ12CYo=2bU z&x~13K+uYZKgoFc=l|g^%g5{X(&lGB`)C%s z#M@q8y`9&PY|%zM26;YLXgt(Hfc~F6)}~{qul>l#xAqr$q5h)J@Z8i-81u&XTzcUg z9Gt4nZ`RL`XFy=2vF-j)^6T!+I~{iTeSFCfY7#)RwVTjTW!cE*SRw)L72je3e zPZ)*z?6vAB4ACF`;%D7wn244_E;05mFL-11(gDZxqYS3 z2Mi^~1s=EJc%u*AI-8LrRvvl68(n=FaK+O-9=;)19NF!`oX_u$A#4`Wl7|PAK|-qF z)pZCeUMcQxJ0=*7Ebjc%KmF$!R`0|=dGiK)fArp8KXk?d-IwPi*(O9)uV>MP{CXNv zWRP$L$HLj!1dMPmqhD2?MHwbV{hJ))2m~*?lbr4F>E~aLSL2~#`tzJM9`kCXWBA4g zG@0kMn_XdKiJdWasXQtq)9H*c3$VX9H${VdP1ttPF-41{ek-2JllEn}2Bh`SAS=WG%Yx zNvF7R>(Lza{KtRrN3+=S;BKCr#qQW&+aihuvm4)E&KPrcb2Tq>#u~=JG=X-;JQ$h8 zv)Pz?Ai6Mge}9!1ZN}PoSZFba;~tAwjh!cso!;Cp4)CQ6U=}YOer_=te>$+6cc#S! z^*ESuQ;cDH!LL5~ZL$3SU>2D!<>5?Ru0xG2eyxRCGTL}Jju7!5-5MS)W5OGGk?WU7 zT8JZC-paw?Xm|;EVNu}Mzy5V}e>QzizY)fOq3z8U*~!79hmTi3JFt?Sy{AXbe5JOY z!+kS0>WgSj4j+og$k3%1Fyii=gKFt=+Gobh`Uly`yPOC8=+7~&Z%zw%NNi-|=zsMu z{@c!E_}ghai+Gf0sx`h7BB?6(DE^!N+Ove5g`3?)KJU%>VMeHalw#z>h4UBMtx*W4 zC!J>ren28 zNQZclet4vDeSbS%ZX_d|tw28RNsoE!fJOFpT-E0ZN3=$-gYl_4ky-lH>=NDgT8886 z6JiA&%^~)uuh^0CR%4n)e~Y>L?#I9WyX4Bl*-0Zj%-Qf+^ z{a%ZfNw%AwuUCpS^7=0v}pnX35?XOic{uXQdUjO~H`DLQK-%D;w{pZd(oVM+dG<{>75ONHuITAWGs%}4mLXXoDRn!Cv zz{yCcBnH;K)UMJwA|k>~I&gfnOz$R5zh8MmR!Rv=+PB-eC>VbBM}~R6doQmC3kSr4 z+cHps4|n}`GWA_2j;Mo|doNuviEHrke(KaWJi(v*rQQC7-{3u4$J_AN;oVK*ZKiFR^VOqy8NOTSQ@^RhHu>Z=!%+<=m2ix`-p_A!u7EUj zG?^>len~L(f!nsgF=&SGns{tWMXMdXw)w#EcGYFw4b8mABfG0t0}K3V`|XQ?Ctnv- zruX%G9(k}nvdP!4RWG2y8Ct@7;lKbmfO;^nU^eewJs#MrzWAi)+7KSW^K%S#>!u%I z2h{g=04T}tu><1D8{wlP;pgW-n!H>7^n75>>+U@tNCSIz4NPD0s&_kHDVrSr4S@NA z4|v;jQ^$ZQLm`6)e$&#P`E2o=@7;Z=>%+S~QY9w8{`C7i(!+T*=uVm;vdr^paDh(0 zrv9Gpigg38c^}ltvwa!MyX%Ve*x(OeUhc3s!J+t$R4Eau$8O>$7e0 zNw_+0@G(hc!kg04XBLP&mas>&h_KBJD!itD?@C2i_!wn_+wlI6Mf7-aFMuE}+$i{q zMULi6K@z+Y@<_H?Pkl=0HZ7JSLiS9!?2%z?#cnhDOG7 zhK$*cocD;|44_i*4q>o5R6ZWvE{1g;WzUi$BY$ce%H#eNF5aV*Uz32DaM#BuwI=yu zKd&Za*vh~fOkRDAN+uBMq|GUswmS=HuoNtYzzp`$z(kKnBmC5#((89UW^kbhGHz=} z-eMLNz!0ZfT~T>rX7`=}0B=(I=p=AdXTMD@d3Tu9t_%NTzl;k_IHd!B8PVej#>?v9 zJr84t65>CeqUy(BMN#(5M2#Vs2iTdseU2Qk1vFaYgX)DY<3SV-j6ImjqzsL=cpF!w$q|vuo+I&@#?i}L(^%9kYN-S=E0U#h7ulfyeLgh z&;~4ZFu6xdhaK_$w%~Fw&$6Q#VHtVww@LHyyjR4<6^2E5_z-{a61=ADsYk}+XsMm< z)ef^uAbxQ;>Cb=mhr!EW)5Ju(n$V-A{)9#*X7IA0#n3l6R))#6iNsM8f+n^2%*2)l zl!-W5%Ybc6G>Ih_G~w9{t`36b#ST7WyaSTN)_m4)NO*2SEOs-)*BCfwlFj?B9U9d` zT_*;4IFliaZIIxtC620tlJJ}M_gZc!^V&EZjyVvNv!WnqTunJu~T)QxgU@|*0@GpGGN|RUcEQXn1k;`@ujXa4* z zH8LoEbkHi9W}%WSkjJ94ux(@@Z_HtYFkWNi-sJ7u{mOtJEqJQZ7xXFZ@@9+H`!asO z<7S@s+JpzXb2x zFNFO#7QADH7WlBZ@|O!i;p`2Iz*pO8gC1AoWzWbhJ2*HEC|mtqOIBIfx8Rb&EdBC! zbk>I$u#eT>OP*I|p{e3KX6*Lo~LY-#ptv?H8pzq(R9gXeex&;;e z508+IjE?%o$O_Na(|Ip5jdyO61mkzy)Z#{fi|HqTFTPwaL4}>O}alZvV zav06cG3-pVkSXL;4~z5teG6L;qqVR#=^g1yV8ToN=w)`x=}WVSTAl;x1RNcVD?+O< z4qJ%88{{`xtRHDVyFAE0GUwX&SEg;0!=w9y7EQ@n&H)afcR3K;d~WpUbSik$Y3Wb; zmz_{gOK<*h`O;ULufO;-8FZ{LNPkfL_G9(qyqTl3ahq%~CYa;cF?jLa537?qz-((lc+F+Td}XYojjRN=)l+#&-xK1S&HTVJv8^XccmZlUhd zEZiC&-@bTZ!rM6C!ot3KS$HJl^qZxltUh$Auq!S)Jif78NeD@cIl_Et2l4VPoLfAz z2yL99quNEo2p^-=2iyIX2Rohv$F3^+9S0hhh3R9*-)_O6okclLI}p{vkMWbvW08F6 zkSqPtohzgEbzrSDKKSE}bvRoumjx0XrUT4GIwQGEuO)PSYs_vW7^F4A7>D~UU;Spj zr*$NoW|qcu*o19O4AvWumQW&02|L+`eRpssmp}5l1}L!WSNX@AkYJxQ;KT9vCy7BX z{h<8{r}DWE;ikJsLa#e4Q)IJtMuD90Qaxoy&KF2VDO~% zZkxs1bxkQTrC-V)D)e?i)G;L;9O_I%rNND`?`IuuirD7hP8zFChn{){PkW-VJC{Lr zpvTlEjSHs%w(2vfN;gPtF`d`SET=m74PHMMf2^N7yoc~`leSl@R?0Wn!K8fPmqr3! zIAhS0(1V2LpljSlUj^2QLbtbTQnBy=BWRSa`Su%(u7Iuu43B!=XVT2QM2q&TF1_8t z>84ku2g=K68rG#o|K4riPupzyW4yJdF7j>jmKqR3ap(f!E??lS8!aKrHr=*7Y(3>k zEWI}Goqv;0S?+y$H~9x6(%w}bxx*rg6wbXVrkRh_HXDqkI!;m7NRU%QT835U=Wo&%@E7KFjEXe&u=DbF_~iC{OE| z%7R<^p%HliZgeRBj|^%s8fu*CrJd;ra# z;bjj3qNcM*0iP+fByhsN!}IImv<=?IR}$`!!!JFbRQ|Y|zPj3K()H{WJ<0F><6mjf zVL;Xn=rEM`YAU(DcRy$06C~-Tp3sm`UBYYKt-K&jx;2Qebn3g%UulCJ9_Tj*4J&A2 z8W~F|u_xNabC0C$g@5h_`Tpv=6j+t>W-_B=cZ1ujeErmYy4nqFbqntSB_DW`Y{7L4ncXu2&_#aF z^WX({cw}I0=^Q@}jg%Px%I;tM{F<3MFFoILO-bzst zE@ctcBqk%*-ojVpMb2aH=A91h-1mAhGLKDtCZs`aB0jqbk2<;zH?d{VQ>Wv30X|O2 z&194^UVSOh!diqUd>#~&^0k!H-9*MgZM^5S!>zn)Z0}@Txu1f0D$fdrx5GU{|GOE< z7|+k0;{Cp8Db_4Q2YzOxcD;Df7SueHo-_$EIm53EV3gD`8b{Zgd2|VVaOCWT%_WDF zWmKS;4*%hfZp1qu@7Lh0*tHo{BK7M{rcCfK01qn$^oM!)jn_|z!v9JK^*DjvHf;xz zGE(iEc4z3!)1v}-cQ9H#PEjN)O``cXKmsp{%EeoSA%zG0OgMWftPTXp>#Dfy)dQknvg5B(4;A*5`(9Uq7i}H{ePx>b7CMuM09^EHT9&h5- zB%(?Ao>o9!49!8yAfLjne%?vH=XG&OGvv*rEX1qB$dS_WK7VCM&iJP9DSs9f>o6uQ z45uc5p4nE)(0Hhcfbtn;7#4Yw-OuaDB#MU+0}Q$_NV^z`g?11ZcMgwi0&602r?!@d zSM8yf#mwzDqX};zGK%rRL6O>xlK-H|72_0~@Boi3i&YOY449DF>A;(QCTe+FY5#jB zSn=Y7(}*5-f4DpR#CY`erLSi4WI`Yu0;3{)T8il-BujB9n_Mb~{HN^m`XfKc00yIO z2cPx5U8t#V=r?%9B-^C^SaRV&-buE_3I#w8+a^!Pv4~<~&3jS#Z{(q+4i?Vz69#dU zJdV==l zvoU^WkYZ>!*~FX?$wcvX?M;9%wwVZ<{F8&mZVN-Nzpft|L+dYIs2i7lj!5jPF8lK#lRon7~lMTb9x7NX9<$kMaEC`SVTuj^&BE z;+TH=yU*g++Nen?e8AvgClt*)@0{!Gglo|`V@GnsWFFoweTod>In7huV8QtNp#CA8 z)0^!^8kw06fDRTmPq*9X$K=J8Z@-S#LO=1;dbasbg_QWoyYI%g--ZLJ(gZX*GMwR4 zM$~#g;TTP->VbId?L6FH*1mSvT)p=F=ADc0Y3u;5(V{c1E+=Uun7jp@d+G5*IZ!V;M`&^e4f2cidL z$GNHg`5M77Xpx75LOK*m9Hw6DMZBK+2F9CBe68R8Q%`(X=K z^h0s$@rA<&D|imXwn(5~iknOhSoGS>7vXU@!yZ1MOB`yafWG%E8U6BE@;%s*yM>t+ zuBrz)OGeyp9K3YB@FgvX{rzvhsE;uiwxc1wcE-qqywdOYdHs$md;uI?lK`U)3LF+aY%G&1FQ#@M4L zEQm(m@9#ENwada`k!U~$wiEBia~AHRcm2*e7#6F{_gd-(8)k=Yg~=-+ndyjD9oK=*7i^yQ0R zwaC(W1hqNb={mfj;bol1%V^4(fF3|j8aIFb$A6Xs(4h_qzFZ%7283Om4>#w3@{`TG zAAB&oWb{=Z^C=;j^c6U2M{^C1C@!A$-+Y^AeEsFZ&;BUc+FYr!jUP#bk<}-ozkYTp zzP{c#cK+PO>J`o6cL#@)%N9R4A=w4zP*ewaGGx;s_P^#_4h|VByro@UXoc^OHLv_S zT?>C3!}JY17aWp(`c!9i#INXYM;2b?JaY7{Lh{_WHubV#3?H&(ERlK=1j`WR*8{l5_HuVz`b?pC?HIo|IDj(c(Z4=70=rQf%Cjxj)1FKU_{pzsl zFb1#MdKFShb1VI1nDVAAH0YE+*aRbCp}iziK0_!seZL!E*0tz0_obQVb=bjGqXtUK zrI2ga-Y8=@w-<`)kFutQVE07dl1lw30t?qkG3759yT|C*XUm(bZ%C^b$QuSzwVtU= z&!%0`tk+t?(*;wzmQ!7Nr}6{G?|w=+ZSTHITGx2JPkIm6^VMbxj$()0EAh|)d};c- zUY2p%+;=!n4SJ95$d3h}AGL3iwcllJ;1cHM_34{{54GLMq=0qrXRj748C~dBIV?re z#v`L_RF*#IQdifkr|q6=`+>WIzx_r}NY!TQkyg;ps5kirN`F*l{!evQ$F00qXf9Zb ztkG_6dN#yEOZVXRA8zP0@46qH0@OR(Sf>Y1g%zuSo(x@AJ{Zjv-UAjbwlLQVbchdk zZ^eC83cM3o|Vw=o=v@a?ULD_ z_1}+0;~uF0BnQVOs|R5AV+_z++@%mB6!OqS$E|K_( zwxLp%7@!!HO{5vG_GIj_IK@kvd^4dGUv;U_xV07vpej)@3!H!jw15-X@ceQt(Wee#kp3ym(boY{iit z`C8mvlc7;|!;>MIf_5`6K}OrRqk#jBuB22t{K&+0#(?C_z7%1L0_r{oLDsi<*O)Xj zwiaDuzTYShqt$`7JYLJVcRFR#gvNMxH$`_(6GRhB9xZ6V0HBF^b~3&n&HL(6lTF;? zU`-y?`YexJadB}2B^ck0_dh7XHWAw2H_uhZ3iM$V%KAJw^FkdOWMnhhMcXmRk)s+S zBwl7*L{pP8229T#4#+EZ40O?*CyV}Mfj~dEdmvxN_+)v@3T?6swU;XAwG0<|L-ucs zg27=ZH%V1C#Zoi6=S^+W#?x5H4dWkWRX!6YhE)qV43IqPDDVud6#O4E=um=%2yv+= zImPp zu%exJ%}!Rd-LwS(?L}@d=De)kO}tD9@fL&1-RQT((h6T<2ZqTCT8cl(OJ1TO;+%zzGxys4oql?QmF?6_}zz1aDpQarO6>4wp+v^4c(n= z#)nVt;V1Exc`O#wIbK}hD0q=CU&}}Gjj8QKA0hW4it?BJf!s(W8y57)o$a#*KatEmw3!YJUu8Tfa6$b}Ap}#a;4{qWnp1IrNPE9sEGGP!hYQfXEEBwxT@8@M3 zEwwM6$3Nb?o<^sK$xC!Y2@9JRN66@!V+>aN_S+GVK_?iq8>jqu*+kf++JqV{$G{UE z8J~~mg-3=n>iafiG4fiBI@SBz-+wiH_BMDf^#ND%||?FAZ%9JOYDzG=r&efFV>H z-^hRk{s*0d;K1H%*KX8*#Cl%s0PZ6TPSMDCffg3^w6At015KpKv86weX&uIQ?N&5z z!6;tE_l!>3W|%MeMjlz5(ht5*F4*zmFkqh9Wb5@7GAIM*duD9VP8I|7D@G8!$y1au z$^nITGo3t{H*S5E;Y&HpB9-cco-jEW=!&q_uiR|U{`9(tC#P^_ZiRY3l{J! ztmr>zapL6ZjrhQ`xbRvsA-XbjFrF|nGAPZ$bbLl`ILz(l&1;j63?K_{#cLM4)y)D9 z-d86Vd2Wn%E(FisBN5-+$ zuxP9v4E6Ywd_Zu!g1}I}9!ws_(shS~A@#f05>{i9@FI})-%*tul^N0_T5$;kJ?GrGNeNzU~hk6LKHm#h}@<;1DH z^;`U+2U&2wn_=#L21vNjA&o&iHBZ%dBj4sp^5IsV>*V2~C;&~f3i>?kdEhBF6^8gY1;%yglZK4WatZ^1Uch$*iB zlj{~37)@v1R6cyDJVs*+5iiNv_!mv=d;>3Fv+z#ed(c8GFMWrpf)6orpWbN^hfGE1 z(P83cjs`+h(EI5a!uHt_MTdB;1@pU)3K#UzPe(tBaMOnqSF3LE)wh?v9l33{m;-X{ zXy63EGv1CoeS}O{{W~4Ke)c#3m%sf28tt&W&_c+$Xqetp+tH;sC0O{>N68j^e)?29 zF@x#60tQzLh@TcmTzEkXQTH;29&I7){KXH#r`Yw4B`S{}w8QRP9|wzG>d-}WC4(9D z>0SEx{in(2+S6GLdy^Xn53j{y#_eglaHk`#Y^KNq9NC zcAh>fyiOH{&p3~CKl&XVecHG&q`qg|m__a8G0HrcRUa(cJ;f>sWTSR#ZF)IlQ;LLD@1F0_@88Wcev{4CDAc}d5FK;p_X5c9{Sq74(m;a zfBWqV>9?n+E$Qfc(#?&>^xgyM6vuxQp6cxMWghDIeH=G|J!_1;AwA zedz`x=>Uuf3W|mMPEd`oDn*QE;B>++*sd{Z?6Q{woCSMckI)|@NHE8+>wOJ62Bawm zkY%`7?}6yOGL_fA!KL5x-0MEB5qi>ir(V5qZ&E5P#*qfzU<|-*?lGJ+V0b?G$s$?l zx7F~gXH>*hHrfHT@KR>Kw)9w_mJ&M|)prtuvCRh#2%b;T^82}9P5B`^isC%q(ZA&C zH|+tR6qw>Iqw^dXSwY%-fN7V0?-oVmK?5lkLR*R|r?j49Ooo0ShL+O83E|XP+R7ZV zx+j4ohEo-{Wn$g;SlMW(T-gU7v;d<|zt<$Bm(y+%0S8`i;2L~C<8r0QMS88@L+BoY zR@&9py;mly-jop>K>c(#z_vw%$GpOOmGgRKzTzW^OFQ(Et~yTv%28L<+yMn-SN=TP z{@(JU-0G*3gSYxvG#M0U6br)EKp)_#)-1Nf4JMTUs0O0DCf}AW;QBZ9U9ShQwwj;P zp`(1I8#=5unAhq!Mfg*qgs}pqzfWDa`Lwh4m48S&fTmvZ4csYnQDqs0OM?bGkV`}ED>v-0$OU??KlIBiDp3LkHj5C4VN4py)OIj{z&AVLQ$gq#19Yvmm% zN~zN7+CPw?wi0^>;Fi7~dpT(h9Hq{PYQaqXXCmykOIGOZ?tRZy8Kp&6kio*wydY+x z*duK>SipH(zV;b%O(z4s6!SoX!5ggk>u&0-Y-AXjiq6un3P|#MWef-p`X9UnZSWe3 z#D}u2qTQ_WrZn#Y>#U36o9?TQB(Lh>Hvn*6H1li0C|OVa?a3DZf$NXQU{x^m=g-uk z`@wPLp29+6;FJ*Lm0q&aO#T4I9K%;`f#~uZ-3L2KgF|7S@g+H@PJ2@v{@4HUUv7SP z@BiF0_Xz z@ZD%a!N_V7%m9xqMw4wi|Vi?N2czkYxYjKwoCh%eTO z^5rf4^sVSY;pBB<0-{%&q@wel*R|`~x{YIu*XpMo7!Op7VHmv_6yU|H`E*8$#}2%z z?x8tGDibmWHWR-C(Uc+7&O*koIl!}Ya5P!{zOC=G^|#D{7}_7pXmc&&Dq}3%l<$B` z?a4?n1(hCs7(~#VCn`@x6D@5B7#Tr1mR~3d-fY@jxeR2(wc4pX6n`-vwSh^+Gh1bA z8(HI&t2n{JsvJ%6=1qJhgO@fl zLFWAjC*CoP#5`i~pO~49VE79jj0JcfpIyy}_V(#BYm%2C@JvQQ#;2V@1h~Yq;FFRo}>fH=P86$Lbad7Pzz@i;mn81yckLR@VnjqV0<~1QOLrE$vuxeA)X#~$fg||ASwkTHe-nysYpxKTbHf!p79+uHCpEUGlyR_qQ5vIRr2=@8q6Cf-OvTBtR_f?1zk5 z)rGNm!Zh`m+^|TiZ#uY=aEIsRw&|bCP_?J_XJCJ}r7wOrhS)))?eM)huW|NaG~X8v z+UbW3vy6IT5szb-bjeZ7$2cnfIo`k4?g}HePGFp6)Y2CjuoypiyUqef?P6hM7KBSn z#*ueqq=Tp#_31?nYn&GB0ubwbZ*g?(5W8Hg@fD|9f6u|Ff8Q3)=eTjV;-4oo7}9yQ z-tREn{bWMB{obg)_``dPS7eThA$R1=o;+VG&tfJ6CPOE$PCRsf#)uy>Qj>3R$9rJnBMW>_o?IK5 z?d%*pz*BW}GS2=U!3$GU+esv=)_n` z-dkvJ(D0cSWA0|~E@-$QEsCw@Fo*3t#I$Pvet2$zCZ`$qgc z{;$>d*Mhd)59FM(w99y|*XE3?4pzpO7Sm`h;Av7>w|FFaJT4jl6vi&(wRJlS;uSiv9S!;h*(>(Bama{Pd4vk9y(^J0$oHowkk&F| z?4I|Ymnz{z`Jk*UqZDOCI^wyj_pk0J4KS79rM&$cp;ex`zalDmp7wfrL$au{6FA;c z?9y6$7mRRH)+z%?I47`78Wh1V{VF6Zw=g^(!%zsI%Ot5IxU|jCY%1+R zpI!S_4vgS|zS9QkEm7~llx>wMS@cPgO}j5Z)9%9`3501AL{P;kubkVu3?B@h;XV9f zbr4-Wl&5?3@98X3C|7;7Q+4-T?WTSEwQ%U+j#fSMz6X7P3|5&IUFOMxjc8N1;7=Pv zwY-%zWh8_tKHyCK2UajE*q{oFshXk|Qd6q7M$O?pH1}>2Xyd`WdO&FRSRH=9jF?6l zvQ3&T@Ts4BIQp6tE6onrFv9LpiL{f%lXT^!AR?uv-z_|JH{jHKC3mL}c(dS*GP3Xg0SiS&7*J-P6O@LrCNcEQye{e2M)2eOV;DukyFo;Nx_6B%xcl6uh0N3B6@vcRI zAK5UF%Dd&?*%5%yh&p8;ko$%2@ERTq|B(mr%h25Ggoh@dq`pc+l&-4&K1|r}@Rp$q z9j5-J^Y&Gls8PM(Sr+nsaGNjb!0FxacR0b@BTqd;6TBmxJm5}|>-WIj<-z0lpFF4{ zL#y%)Pt5Oa)^GrBzyl_~E>M|0o(a$NLCmFHz~}|sSG)DN3;gX(xb|xf?(E5yrqe#d zui8|~{@{gxk&UzSB$?(F)PdVA_<=q0ba*Ac9OO#ifA`*5FhgB3&7n7xEgoHPe3Ihx zH-Gu>H-GR?|7;Xb+t5Ghz^6;sZf?HKs6#3K$vYQD31)bC_C1fYy>qA;1KN`&`6e$6 zpC-%PD^>wKlVXF1zy1FEJmQayk;0_tW?=(N z@bL+w7<~5yU!P*=)qYzBT#9k{;!9xB4LETe=7#7tN4&zbO z?{4ca_-x_H>z*=qv#ot6o9KHApP9Uc!}XpYXd=Y;!@C6DjBC@H8BrNWO+qNhD|qdEMX>^)xZ( zu?nB>uEc{)=F!G@&O3<_g9p@}JT5IZy>tGZ;E+dcOD!Eb<41IMIYbRi`M&)6tKhEK z!o1YQj#V!cExaHE1)kBzO|0$cU_3dVSNg$y9q!kGU1KPIy~%KSg&liRXbU`ey@}=Z zj1s(6=@KRm4ow=F7H;?mA6nR8teY~EXBM_j#M9E<$Z(Aj_0!owQI31H&Cx>+r+PFK z4Q-^qOq=w6CW-O&&d7Q_7)P2MF2krW9`Z8$Ww3}pct2`4{HcWfEHH#FLiS}2o-xz*jXwwG9 zDIRG0DKEb7t`+n5KBMRIj30wV2F@#uN9*9(;_BDm_qGUWYdC}agcE5Zs?E{J7}bvm zk(}?|q`PWu-n&?Mk9dm=z(>;uD`&jkd-ltZ|DnF$u8hKLab!7l>YeDab`P8?EXvg; z+IN~*TU229d(wjJEHu~d`W=Jix_F@qV?^%N$NF|qrT#p!gxqUv&|mCGVDNC5^~*Ob zY9>44Z@g*o?=-&xC4WfMHy*sC0W9eghwnU%cw@tPlOnsw;9Q8F0 z(g7`QoPW2i=ovZ4J>&Hcg%#5e#Du<=zM;?ZYIhER^y);eFwEP@Wz2GTgb}aa6kk@)LxrLk&t;XV{C5h$BaS$pvLonGa_M2Tu>0F$+J|{YfBog>d1T)k zL!a?^Pi@DLL~a=S0yA_WBVBmZA-_U6>3jIoc>OThrSBc<(CSE#H+k|uSx&0h9d)ZP zB*q;EIx>MQp2aLidt+sA;q@$;Ev7j7EJJ-E>xvV6KI0O=*~}Y*jQ2SuESY8` zw{S#O3+py9sWU>q{@OAylO;U#@9eRIrG>=C5Pbum@uC+?SO0$8*k*D5WX8r98T%Y| zck}j_$*XxS+8c*5p2Mv?#%PNGc-t6G&wvw| z`ZB&c(xR^hznxw|-w=mexVob)Dm$MAZ{elab2t#|{rrUs=@A{6UAfYe(>FWp@SBXH z7V>yVKYVy}jB;@GPrWVhp`CMb7^jVchig;eGOpjaQr(Xh9_QTX@S}O@dodZd!gCh4 z82Rz5aTwD0*d3XLM;OEOvDL=uu8rqquP+w6Kf|=}4iRx+;$3n8jqsesSZ%$+hgsOq zbJ)UU{Pn!={l%nrj-NQu+MiyaU(z)IT;~R~=n;**r^7PrKS=h5=-_mqb}`Ni?=-vL z!kawSPaRx7V{ZEQ%rRnedc>2ApcOQ_gLX4dw43rqah~-P{oqORgppi5&HE}oUT^WU zHa=Rs?x*#Wp({R&tM6=1oVk!**P?Nmw9lh<0G!Q`&C%fFzxz+)nR_f= za~a=Gq?=hJH3p#b>==wzcax>R`OUBD;~mDFk(Z9cX~8%q1e1R9mIGefQRI-@nrrxn z0b6((bi9{Am<+IhaP)XPGSYz`hdW_&GQ;|NAAY!WsBoo&k|%UGhFaeG2l5zqc7|~Z zUU+xyo=opZ_M8OfmiOt*4EONY@=mk zK;J+7kUmuXZ(LqF?ag#gyJFsMOnbAP0dJ&3;cM__-W49tZ{-{jkM7yi0`G~FGakOY z-&hvDHwzba;k^$=|JDy;&1C+A*cdxt)X#>uzFO!8Csz!S$# z6<59Z`WepkUvh&7xcaL@J+x+vo;?>Cfo1@)_>4isGX=(dUE{q$n#`v^HSkne$C=wTaOr^S zK}ZKrdN8G(s~@T~msHs6xf5v3W!wu*N@?+J*t?O?uX!KzE$u68<>_nE0|tKYCyn>s^?a(fyWjG) zPU4!d4DxQ%uZ5p^HP40$;vr#$exrkL=6|1!iaT0>%*cHaFX>M~55 zZShhUg>AtFv3&Il&-&ks-J5Nq=rvE&(@ptj=fFz6<$XElWnH9uC=IZ_hYr0{?n=_P zm(}1^S<-d+J7FdR`-Js{+GNozen%Eb(*QS3Di-Rlpk)&9iI2T zoa75UhDKFrWF1;m_7vx)vQ~fe5MAUg?bLNJ^I+ahav7zWc3v1RSvHA=&Z{chvimV~ zgpVS^dA_%`8~N?Q;JS3ziqF|MZBQ-PXtZxtDZ6Z~1NthX48u zJxYsG>Z`x3`j<~uW6Tzm=TeeAlYZX&y~V|EeXcSz^Ym?*wM}`UqK?z5l|OZo4t#L^ z)%J72N4qJhOB?N8L1dS&hagUMdgj;sGq%7pc&ooIs0-LY3?XpTcT4MX&LmcGm9^Sz zl@0rm=fEg)i_DL_74mSp*M#JM`#1k~^YJI2wPmu&QcBLRKmFb2&p-Wo^VzqTHcwML z?|*-7^UJTk-F%x;A$B-}HD$x(f59t`b;gNfDW@j!CQ6jU$F{qMtHU*a@=hKMDOO|L zF5mM!x_FY#p_Zi?FV1jd#M@K-DXw6fC{Psl<;f_72Zhtb*NVIe3g!OH$>Vt`AFcdD zV@w2-VFX{y&=5}W5%T3}N)^K+1GeqDywAnY#AV83Jn}XAW@G$wA7GAs`TwY_! zFfpa1IXux8c04pjvy@Fu^Sqg@ZM@TJ)p6o)_v5wp*9kMh_{ZzkSoFo0-;QTBx-!P? zydzAi7%zAW9%@3zSj>=U5ki~elQB9*v+-I9U$R4fhYIfGn|A1bWyS~a3~zj9(xZG6 zB@-NU0@uXfXC@5VsX8%;{Pg{IH-GZ;pEe2MjmK!$C;Fhr$oy?R$Fnt9XlK%1At|wp z=xaiz3?5{M3Pr)Nr5}h<&OnEk7z-Ik7!k#;-WLy?J$KYsj?7TP^O>hyiH7DkGCrHUh%XFxhAI=g zn|E%^_H~|HViQ~F;DO^%rZJ|eYZ(~aC)7lf8U~&fzG!*M@(9Kk!g-*z7@;nU6%*DY zJyc&l*o5TL`^7d|Fhj#qDOrCm)}9SFEQiVuyKp~L@p81gDdPoAgu^K`@iw!&*O zec)zwKX>-+CS!+(2j9*!Q2$>>6h;}cRfwL9su}Voj6r3ppUE`ICf2w9$TRd>UV(NQ z2+MK41Ayg21@sj@WI5LLj0eBkm@zY;hx3cq5Y~hFKGQbl}MfS01DI2W~T= zHCFXoSQ-;1vIHNRw3%!Jm=+En)Lk)Q2y%?`7qs*$mg{1M6jDOYvzR|WcJfp|~p0bQn z4w3xs+IJb>3Ih~=<2fFDxEQxBKJXT0C?!|PKcoQc%fc)$Jid}AwGo`j4srxO%kx_} znFqCv&_o$eGSKi8V*phqo>GsQxJDag>hI=%LZbZqqYv6KVOL1;^6z!fTxB21@W8Nl zYdpLdoHE)}0^WX9=$CtWnTiq5)A!7&wqy4}^U!%rVAHJZy&7 zet-S#H)A9>=8(DSrhPD)of;OZ7@F63w8BQ2YkZwClWzCZ4?n7bGa3cUqO(OavD49^ zWFt>3q;dk_waMsa3_O+5hEW`E=*PSzkJN8wQ7fJlZ`rPrBzSb#yE$3RT}DVoV8#+V zbnwl^ix;Nd7@_g6g;xt2Ci>J4@(fQYLri*n#}K9M->hC_p>aS6hLiQz3SP-&vML+Xgy>7IlA=GM% z;t~rx#G^h#G+cNz?=N1mb19DI4Q~;Y3?+hJ&v3`cKpxR=W{%qn9^wo>+UMGpZwfVY zIeC9158QYlxnX>J5r6WwtqI0Zb>;G<4u|_}yxJM0XI!Z6{>`{mn;0Xs6=T|48Qk!! zFe>8YN@ticUt+Ws`o=CO6?;*iepp+Ptrny3o2wyT=MNtt)hsIXAe_WX*EZi@`F3_U zy;*zituE?GH=%zp?vhs+@kM==XFkK}t-?qcrKHQV;lqXaElCYXP|!Pop(BTHUnv6@axglBE`t&<|-+lG@@Hr!~!e+rbSUh9xoT8^X=vLc}r*L#S)`IaE-CFdr zIHk?e5)AE#N1tZsre84};zM*T;dZ>V*wbQkJNqoWovja*cwo%pTm50^X@CkAoc7eW z;kQ5DvM4_?wOY_k7_@oQo;mwYa{27^O$)Mi>8<%sG;bUvL)T&^==BHQ>~DvwvB{1WbjBBC2VTVE zyahTu%xR?V*tl?Jexn;5oimiW3(2>35EX{7G2OVZ)0Ks0F^=C!=eSZovJgd2u{(+E zLSM8-^VxNzzQY^)GG3n;S{!N~i2fGLkGJ4<_S}V8tgW)MKt?{^Zuh6f=&RdZTV&9| z;HRuCLGR;VbNVkrt=rh&$pe)@X#lJ`L}UW^6$pfd3S_}Wvu?iO*hhJ&rmYif?+vllO(woBnk zcwAfkD?T@#)Axnx!K)7%N6D9ijd5fcS)lEc06__;?{ExggVhXjOPG63p;{C9(k7Hv z@|8y>h{(ITupa}iKfBAzH3+Hzr7g)7%-y~3*bE2OP%|;=j*(&ERGHGpKvRANO2gw+ zS>u%mi3PT_L8(KOVWAQ+Avp%C(hm-mBRy0UTf6kbAHVyK_`oeg)0khpS^phV zExmuQ+G=2US{}u!R}KFfdh`^w(-ytk{+urr7(-I9S6fKyz5%^wL$sc(3d>yCLpy1D z9P_!EIxS)-YF)jvLOM9XFL08qw)bPfA8*xQ46DfNl|Qf2u7?K6b?p+ZG!l9@_zXl(TRNcQ0kH(Ba4EzWBZ0^34T?vcj#pESQt2=h7{jO%l?)Yx+-r2gEBt zt9_*(gW;t0thQb$7p#G>{WFA`l7H`=!86EkaffDex5d*J^!m32yXxn6{}*Ef!#!I1 z#vb!~5|j<(?kNv0IB$Wi^_MAN6*Tpk%-ghO8@i*VykRo07rq*Jy2QY(;Nj!>F{m~A z4}F$5GQLR$#LTk*Amj|N(QenYc+p99t1ZC6r^Ax!vKHJ*7==e4D`VTQ6pAT(72bVq^{h17p{|u+eJ@D7Q-ySk z0}yze@_sQP=|-O4R|?DE0HvpSW?o6~Je=pozxmsrZ$2ng!;w4z?)=3s6C4s~6EqnV z)WKjx2+^;$oT4QPo3;_2eKhO6Gi`%BL~zS9AtgAf8L@4JWN;_A?oG+jXM`%Pe>Pj0 zGxF3Kj~xU@0fm>~hTGi zYz4_YsCai#zFord*l`38!Y^o}4+AsrMg~}3T_d0c4Vu575s0EVigjfW`kuX!XI$+P znuGG|J)w+o%hvo?uWn?3Nuf%(dn>pJ-ULA5WNeu)`LtL0gh}mRb>^*-!a$jE#U6ug zd8iY7zfEyIm+|u-{r>NbQU6-r;kub|QK9}(Xc*9q*x`%O1>+|e$K$OuqZlUyudS_O zg{SnV6roEz))s0?xW2Qcmok4WA=*}KMuXcKt9c(XhVxRhWu4&6&}n<4_EFe`O<}y{ zorZ1*sbi=OPx3N+QuG;;c-zgPjWznhLAIxd8Zu8LtG4A z-lvR2>R~W`qwO(|3vXhV1w7u&*iM0%ZLBpMmt(XQr`{Mv$bM5UPi1&wG!$lq5#Yk* zi_wLcs%vqD0zRHJ#js8V2&cslZh`8Zl%10&PmWh0Z<~wd<5}s@(w8rJ5w7qvjD~o` zCm(zi4HQp5+B{mk&XXC9Zr-{xpIN}r7q#00g3u=)oIN)NcHXZPOMSwq36J8E8k;}* z^pnPR{3CqWUeADzvF&ZVqka8K;pJUe@tbq+y;}&QV=2GURpr1Ry1pL{_h)=KbkNvF zuko|etIu!Bk^-h*qa8F2&uDUA3pR{>lpcl!Ti0Jazcb$7VzQ4RIC$knQ|88<9V~b_ z-0#_QJj3y=@tWnKjE@Mla4f~p7`cAqW=}H2P>Ok@mBP4Cmhp-|jMCOKUbp&;(FzU_ zq!1+*ZSWXguj*iE=XD%tgL6u}@zc)?%C?NZIfwQg%t&V*v0I|*X1v~xzIa2O%3#17 z4vzXi*D}KKRITi7`_MK;+`^d(XE7jUkO9rX#4-zI^;WUe8NeBN0^9$Liygw-9P)(m z`(9@-#Pb*y z_c(kT2d6uXjZSfbp)j}^Tb0R!TN@uUREJB36L}o^H@FXnD8soQ%ixelU}eF>^^D!d zqlH&es|8FXX#)GVe(SP)0hT%O-qR_Y8E$w{sLD zg7IPjjE6Jcy9~9>XVHK*8e@z=_gVnrJ!-xuf1D|-03#c3Z8#RUJSv!ZX8wW11iH9f^TD*96GcTe$Af@$0$}_i->TB?|Jqx;~^sm`WyYR zacaSeLH8hdOKW@?*NkE2k;|7a#ak?lWNfc)l+G~D1YuklQ~HPTd=|vac?M{gF{NMe zZFn*-+-t5PFFh!oxfpz65#zbW!ZM_-w(@3Gi&)WzgmI~#g?X`%FLW1NjG?b_GI~+f z!ZF#WE*Spy%U}I62aC>lDW7>k?0t(--(^hI7QA)kQNP8*+TZxa*ThZ!PUw~vS9k%N z>lvSKvhPc>4_KHMZxfNE92XN=58LsLRP)mfxvrO znDHK;WXQwIZ&in|5f*-h+!3ZnNE&>SY(oAU94CY6mD8s(q{1k8bt5Mlr^d?}V5*m6tyLU@@KiB0Q0?Xt4`h z;^2q27*;L(EzM!+8RVv0H`_^4d-TORAe$T)UTQA`y_oLqUo>YtY`k+!fdg}~xomg+ za`N=K<}Xe)E7Z+f;gc-F0DGlfBu5Lu;jl^caO`+S)53+3#pW!P@}M{Kn9E|>nwtwh z)mX<5^oy}!PH;{I`H|dQW9plX*U8A&(tU&%vIypxJ~PfLq;M>a?;Jvm=V#H*Z+`vr z7A^kuzP?PT+IQcrF%H_mT2Ub zyM-N5#*-&4fF>`{<39c*hZ=L_rAzSy^Il`8#Z~&y-Ds18g54s1p;sJ?PyEh5`lA+19&P^1fB8?Fi{BKg?bFX{>so|2K8+^}Yh;Gw z$6MS;4zp~IWToKf=g14>F#H%^XDr9h zp45--@n>WAE%Qh8wCvFfkFRR4#TJLpnh)t}Z&AEt@RR69n~dAMG8OF>&{QdVlXC{Edd9G!f{^N+>Swa- zW+{5*@!E@Ep6u&S_oY!)-v+bFYQ z{=UYxrrv>l0W$cmuATa)|7O&$_hp{^y1BcG0Z(=HJvgXdgtrXKy&VE6VeJN=x)-L) z*U$BSJ@$Kc#l7|N8c8+6o6s zm(H7kcQHdh$3Xvm|CT4oN4{MU9E_{p)wZE^@2YRAlwTnO;QE?6Vj`1w@ZxG zDy%$hRK8Bwd0iR>PaC%By&Rgd5aQa&C6D2ZynScg&y#YwgjH;cGpHtJV54 zDqmIIj^SAPCiPf)`L=j7!7)!!kfT#?8zVdjogCk|HtD5J1Nikzrue_eV9l9+) z9O&on;06RcWv#yL_gpLUv}fR2cM6;uhn{xeEkpg|%?`f7tL`0MIHgv>7(3cpNQxqox2xAf>3}uH`OsZ7E_v2B>csw#* zsqaL_2;N4v+woesd#`w>8N2rFTOL@?p1sNlcPgbmA#C{bGPKSCgOy6yGOr3xVJo|E z3gXm$d+l}}rX9jl`Qz!Vj^<5XYfB){_!6$3m$z)2uP$HPoT**L?Fe{%n z5JZh=qOjPILZrORQqRcy&gm0dMLPO_qk1YguoD2=GM|B$;%Gb))D9g^k*+R=l~*my z@mA;6hh}Y4<{>Fo?oy^IBpgsqD4DZuny`nz3|%1>-a_j)p`0>r956uGhDnL>9Dm^@ zTGitP{*QkA`(ua@ZeWzH6aWGu&p^{;r_bX-mQ5{1doiGC_V-7iW z(mdSoP&}Og|Mlg|ZACBaL&Z4|RtOZ{+u|L<=n_;Zml-XiIo||cO3~YIzcWJrqiAMt zh60{26Y?YFO1<*xUtWzo%0B$?gHc5FzHy=j=mPzoDYl}qh8Fe3v+zU+ClfMW^PKYg zbPG#&Gtk&7&C8R)&;kR`t1~I%bMRESTn3*G+YB~JE3aY3COk=hSU?(2+v>iTQJiNU z!`QvzSMp9_h!*+=F5fORjX0DyJIG8JmJ=xiwsYDpO?muPWBW+k$DPKO96yK20e;BuX?nJ$M9&fGsYpakK;AGP91i6EgTt` zb~dORFBi53f8F;yWj$q)7nyg)B>FJs(4}#X&zKQh6j25uGzn%G4nP>&=gU??;O+ zg6BJih-ksifj|B8Kday0YGLVm_3bLYYlp}+e=y$jg139(LG;aNX5Qe9XMsdo@Sq*r z-Fa)HHHIebzg?&o#&B(5*b&M}J>aKE?+wONh2c1vVH;kLw>A7;yG@3E>qlXbe)ajU z+YxeQ^XbPQ$A@^iuW@_4g+4|TyHe2aqXW&s;So=8;2pl>0O~iw;fLqWj$!yO|K_hY zUww07#wS|+AOFR_3?FZ8e(|%v$pCyeK2#f;cNo}jWhnn2{`tQe8s|Bf1!}0z+=DJ2 zb+8}VX?L__9Q|HK8jCK^T9}_W$@MS1m=j7ixMFyFbGHSK_+L9+7|0m9gw7JndStlf zQ1$VUd=y{u?ClmZ)Y*@|oo|p4elN*7I*l(KYJBk){p8cnW?_k8d={L-FZ{f$U0^k? zz=Bu2!8TE!y?B;^y0OYL8vkcNhvb?YeAQ83GEhDVM&S$o=pX;_%pX%;d3d@z^wy{r zy6efzUFEHSXO(}C0q?{+(Q|ZS9<4li(Kdv-$1|Ul4lfXw`O)n{2(-Y6SMjnp&fj{Y z9Y^)Qi!8LSyut*b2W=&{F_1H0KQ1o=`r+LUSJtN;_;)-H?%@pekun-5-=IT0pHY8ri>fbPJ&ugV%H*GzzMujmK9VmgsPAa|E0Tldz0uC+gQ{@w#?)1)FykvR&f|ADoP2r`yT4=PkUsMd~|C z4%&aHP%#)>Jb*0X%!i--@;BAn+!EgO8#*kq5gm@8yGDzX?$ojLmm2 z?i<|-)g!!&@LT9wdb|Q`7^+309!%T7Vyue2&KPMvF!z1a82{kIPe)EwFP>)(H?Pwr zEIjjSKUAHJ^lzj;+)L(S%Mx}XaE2BSBpy=SHXC7G^~_MWJv)<=~Zft`IwBZnTbX zlRIu?_;itT$Oqn&d&#l4lZ9r?Rt8>T!qN77k2`>~ePcNWFM>zzV?%t7!C9CT*@ zy?5q==E8@a!|?Z;k3RZ%`j1DwegKDe!*l=69QuXFi;IdGF(2Xy4r8Ap28q zJGeNbcQC6$ehX6g2U>TK?3uG4G=~)qv3z72#4HS#xa|(ig_lbgF2wWV548;sa9Hl~ z6Q_nw$o=NzyASWS`0=W9J>DPL*ZBet;B>J2m3FSxu;HuPgI~N1S38Han}f)U4$QR^ z=YF`4oJW4ScUMRyXV%!E(@vP=tGm(l+S#zLgJM73{EL6}KM#z*{Kd~UfAJT8K61C6 z3uoRvJG?p$I*w1{ou0w_Z~yAA##!V^y48gX7sFYOUE$okF?_am8oS5yjIUdUXPSHF z3XgWupb0$pLG$*LcuFxcxo5dgXj`+x_B||6sz6 zHDhiK9ep?L^oc=)fcjzxV4zq(1_tkR$V!0JgR&2zkZ~E`{FYqeK8%7c_oYR05{y^o zq?-rZv^)1p09{#-Z@(G4WbLyIi&#~qDQljauu8Xu%3E+uo{|1aJ3p4-31$ zyvCs5g&~8%^i`gU(>ArJsOH1N!klM@yHz}eH;;v;#ox5!t{=vN@QzyK1D-YPz+moGV1Sd>>$GGo}dIrAU$@414uvX*& z(q(iqVo#4P>YKjmiFebT;W~~mFsQSN>z#T_8!L8Q@S+rq$NfOH@+?rd&*4Q5&@Gs> z$0LTaWh}29zA>$&0QV(xM!6=S*M`dqz@-; zPr(}lU;MH1gRKO>o${u?R1L7{M>jpt*16_IPtYXXbd_J4HO@Uydl$i=S35K}?Vdhc zX;;6lq?4z2E3fY@T*{q$ue8^1*Q#^TrcT*;zRFl*4;+2KP+qj*@^{LpO|uy3xiA<0l^XXMqC8 z-B0ane5`sDsYGq9ywzE&Nc848+{*{-RcL?FVdZ%!KiUb7`Kb-Ypyy~1T$Mu@9$>ch zsC8((bQHnQuixJM-KA@rYbm}w8vgZ9{&4f>7p`x9`ptJEu$)ZM`TW|g&5dO6XRlU> z4~J3`rWj}mg^!CpOW7X-hOtuP!QwDDtvHeKNB@CLYjmw`Y$ZC=d_)ehUFfPsxt$WwJk&}C$T&oOe;)-=WhTrumG_xbB$F4}en z50o3qK1E~qGs-B4V=I00_`zKnkj5L6T58RC`# zm-0|LbFo9~o<1w~XA0Z1+WMRGUq=ICN1oh#c(yp6rMoi2^f^lsby7-hrnkvvW*4-D44unDx^UB8|XyMMw>I3Q4; zj6pG4ra-{g7~+D7;eY{Lm$%#MsOaF3}Xyf+Rlr|A`9cRHou$V!UN|0Gv|he&SlN-G*028@YmUK5FBEb8{-qQ zpgQFDZpLWXALCcFhcC~$618CtZ_Tmwt!PkOXI|XA`zVYddG!N@!FV26nc zNx*2%C?f{#jhlD!((HVP=;vkSKOSBd4-D42vM~|8wawq00lyCMJJ49K;F8pb(IdI* zn~Rqkb9sGkdG~XR6B(Z0Kl^TR_4P$@Nvo4FS}bT|W-WTX%5ZqR0~goM3f^*03l+m~ zaUz5A7w5kjUN64A1r46T7J;u`yD|I)UM>1*^YOP&w7A)p_B`D10JwW2105QHvq)gZ z6ne%e&>Lf*vGK_#g`26r9k?rQ_&bGGnH?v!*FufOVEy;`Z+;#xji%vw=sd5!>b}%j z2S4~EZ@T)Ck%b{F6pdmpHZcRAg;jaY6O2tCz5hXH44fXL#m|57i#+B3dU(j=jMSfg z`qAtj_;>&B|Gl|z@tfwPyzAQ$@;g8H5vVsGd=R`xPt>oKSNnzgAt&+HJbI#-z8Q*6 z><%BzUHCn3{Od9BT4qTu`p6uv}VL^;s^yOR({cYUvOx#sJ@x+wJ z_|iA{3J-KVo{Z@rKrv%^maFfb6At_=bWk{Q;Bee_7BQY-F2R7`h&wy6h@%k;Q8jPs z#gonB=2?pu-S%lWOK@16M{i&j+JRBZe62ivFWiE80?(QGs_x&_84%C={V0Qt_`>+q z%m?LroZ^vsHoLdhcqhdhJd-E|KVIfCb;Nej1dfi+-{C(R`PdaA?4;bTP z_%UYo?QRFw?&43!Zyk#J=_fy|9YPx9*<6|0d#U)z=H%liPi;Q==)+m4F`tT)j6P>f z*MD#hkHVed!-q~}tgfHUm+;Mi%@ApxAAHr$x9S^Z2rq*D&n|&TgQJDG)8sa>jUVK& zLFQm2W>9@z88b$slLN^K=DK_FXYzw_^fy2K`(&vWgDR8U^g7%fK3ph@XjmO&lb5xV zahmZIo{U%g8=j>@liI`k*gP@ujB}lM{>siMbVe2x(>UX9MooOr++*=bS^5KiweZGk z-W<2Ln9!U>M`j5QyhX^syZ7?6&tP~mgRIaM459%#^1ZRZQ2f?Gi*>=+n5o0ZfJh#> zc`ajQG`$xNtM5UEP&@-3=(OtRt$#41^qb`++o2P>fI}6*{HU}JDnyIMhebp&sNqxC z338CJe)snE(ltJ6e8y+FS|&C|a@KXhX!*O{~DI>7q7nJbUAW9oK&@B9V39eLwl z4&NW-P(l{5=yvdBJAv!J9wf_HL~d?4_t7kL+STIB5+PMM1(1`?kJ@aE3hA=+jmB^M zMSH=rEB^GN@okab`1Bb4?2Tub3;y$;{JYJc{b{=qjEsVD(`SBG z<`AqL%JNEms3h3t2LLAHyqWeXdU>&f2@EmR!2=)=;KIq^L7}Ap{-8z@zo(78%hPTh zwD?;*N-dZ0l?`k)YqQ}&N_fDCm;K?@CWJq1tP|dcCWjGr$X_AvqDFgeoxn%kY zY}3{j6O1Xqd#{aC=$6I@ZWAnos&Bz4?fjfFwFFG%>}&R5suw|a!SSlEuYON|4eqK7 z9wnM`!0K7~roFwHywa$zzf-UBJTr+gsVG6(`CnDwRVIPY^VQe>4vdq&djKXf27OlE)z)>t1$^=j zuLGkpwnYz(&JRd0FB;ND^eJ6m>R_}10{%Bb8@k@&X!;U7;f=BKRvtWGes_CwH7}#P zd2qb@_Q_WF-`ZTi|7dgmYFo_fn*Z&e{v^+#!<)bUwyo>^rchH-C|DHLQyqSJt3%oj zWTX~ui8l!P*`NyDS~UJD*zk%l3N0TN6+L&3@$)VKO;{#@Ed2pnU6LM5YPO&vJ(HHF)usEncy z`+cu40MEkdPk;5B46(&DO}RgwkovTkij0T6)xpiM#lw;jhd?gI=!=JIoT48zi(Z!J zd-y92FRxi(=)G_~uM>)mvwIl^c$-gXncfkS9rnp^I0~v~;?}1uEW>`ZS9x&3=)?%i zThGOCpk3p+Q(qBqEhbPD!N~~6aE#s=^cggsZ+ZQ`@HerHQ^X+A;({?KY{b3z;qyEW zDe8=Ih8}~&l@6%#o^mI2f%Mw{EPM$Qq%Y7Q&q)SPoI@WuNQeg_K4S0?^m$hu&zn!o zc1C*2H^u073W{(^XHpct{QAqmzkYoqSa^bsw@U^J%J05lK~Ib^6dFc?!~4Y!Pr0j) z<`Adn6s{IEf(`AWeHTT~rH}0vFiy>Vj4)4cq|iqr7AN$*LsltVPhaa}#@xmC7?Et} zhg%+JXNy5g&KTo%FqjK@{ZXD>j7s<_WyhF?Q(oBjS9j+9D5i7USK|}A8Uv=jRLc4- zBUS$x|CMw-L+p&FjHJfrggctHm9Hvztb1-@tN+opxc}jU!d4#$apO0-;kCXuWe9EX zY<~1GgFuQoTC5nWQwiMNQ3V1Fy$MgaJv-gwJ7r}-0|eK z5Mmbuqm{8iIn#gQ>Y^5SgDVDh#6(HQV;S%9jH?;I@G4`R(g*)zpzYa?H!nkgdd!Cm znri_hy2uE^Ta4j`=ikiq;pTqZsf`um2F~JS(fIxp)4LNk$pYFhuYECwzLPDaIs2mK z)<-F@-UVBFumzF`>9 zw+zsBP8d%$XZV2-Pyw{r_ikJ1Gtj-4VcSj-h8GKK?-o*m!PeOqw%JQO2G|U5XzzYJ z$HM82417Y=RL!*QC%^Nf!cAR?wp%ccC-Y2KulddRR;T&nSc_poCZTV!s?m&c&Chf8 z20Zr<&*q5_e}^-eG1TZUxEG6?;Za{%5O~optS^7_vv6`?V8i#uICS~q=DUpl=Q{N4 zbUR&ymExiJBHl8G^fkVQ4jPO2B5&K5FY_#{O^k~c?cP6kZu8Th{bKX=#S1eI@e~UE zi9G4VA^!aHUrz|2JB7c&2k;Yf<(t9$G>+JAmT$54YlQEc)8c%pLgMx8^T<*C+?ZjI;PmoV3&ML2_C7wLKNyx;3!3qR-;y1q*AG0~m8~4{ozWx1>!CM_ znI0`!$bbBZclz4JW?V5~GgvAEZeKLk->4nKS1Qv28d@+GjbC##L;sV;DY*>J#ztV` z=voKWh0yxVbFzpr!OQjPx8GDw^wfN-Ok)A=cuu43Yb|iSKm5z?pjOKokK&P!k+!tv znO)Ilh#0;xhxx^`hS;k|8Ok&PFjl^JS;)7+cl-xW!l%pI-xgl?Z~piX{%~`vg@@hI zj4(cL@j|W5b_T5wPQlL59NyJs*AcJtefxKH@a)I4pg8!b@6C~T(Vh%l#tZt@ClhuL zgwdH$AJB9ugC?2XeAscIGoK7z$XLA}Z|CqL1~huWf8J~kwV;Ck&JMf!z#Is_WB?cb z$sqagQ3k%?7V6|m3uwmebWbo?*ddpO)Oc~&3SDEo-qF2!fPTPFh8%Rt+=p?OXXzZw z3%+O*7d3Xo>o*tQu8nAjfqJ}CtLJI-WIkjNA6z!B%!}lF<;e>MVTKs0_46!Z^-Kty zn|5oK_Wh4On|99PCmN`}g}3I8aB(-BFeJmf1*eBCh8it!UgdjaoF)~Ky$WkTq6_#?VFS?qMS4ddtJJ0~*0=Q-Ux zdN5oYBYWe$<~^6)1p0$v^z%Yyyx(pF{m7V(hZxT{T5!2uI4u1x#L?Yoiviru5ja+- zxpU@&;JICT^S%Qr54WgbZe?W0CzZ<>D}0FEP{x|@W8)29-`B$<=g=h{w6I|T)8hN% z95JT<0Ub?Pgf^e!72vni0Sad~D*A{L47H|nJb9Ro_@1yg? z&+STZIPTHr4!a_c=Ar+CPg`s_|J97iIfJ5cPwsu*PN&iR18{W3;AD@o=w*!he%#{E z?OWfKeua@Sm!TJP$(xNs{L9W4vf_!x$+uU&3HBD6N<-IiX!k5q4X4VGrsJ1^xZ5g> zo%DsVEN0s%o5AZ<^fo}7bfq<@=Ze6Vfnfdg(mTwlCB1$bJWBPq&tz{cU7p zXDI3=qL@Te55B_4=b3s-GieDDJ)5N4vil)LoMn|USr#6aak(ODA~>ealYW)E^Ou6) zQqCCM=Dup`n<%VbrF8FkZLF$w?Z7|Z6%vt1x_)ZC8kAGX>JJe39V4C>;ct?_qg-${ zk5)cO=7G0X`PDmk^4xE^z*t>nK^w}0@Ba5@S|stxCy!6pN;8%9w)#CD7`4N_R84LG zim3hEp^d?jx+l-V*(?g^r;<B@q+JuXtD2ALG^-7I`yrO8xxPTBe+$@co)HAh0kJ$3^AXD zl)eVG;IAzd#|cfctM|%r4G#MvY*31ah898>Jydp+Eu0QbfpchUfRV96z}Kr!5e@v zyEVEu24or-v}NG0&LN_0E6cZNT{%6ICt5;I-}g=Rk7xW=Fq?PEui(i$ITwum?U{PF zSLvkLY4)Pc-btv+ZQD@M63W-B1>H&qtE)e^I4lo7RIz=uONt5`v`?Yyc3bz1i|LDm z^zuY{+e#Er`Yqio7`^P-w4wJ?lHcl7_BPK>8F156>Ar89{sWwA=X>ta!zy#p+ji_I zaw>PTgR7PQ`-Qs6r>XEhC6~0c)jogA?|$H&vVygAtNK#NH`(ytZYHm>(+_R%Jz09M z%t=!bJ)e4}pVsfm@BN?y%&O`E7^mC?2^>;ST(kJ-eLvS6w({sdMGfr}(5G)bC}$J& zTX{)bxPEhDR*K<$wykL=Qe2O9|4)AZ2b)j({U3kz_2wrZo!k5`zxUDR+k~bsGoJHM zAJ?_z-6zRgYPmc04;OBfR>zD4@U5>CJ7p z5ypH%H29`P-ea%B!t^yiegv-;4n*flyp;1~3L#&&MOC=jz0g zs&TWY{C+U%%sdm35(38qGF&o95g>(?VRRPq#(ejuZU~wAq0I0WuB1082>s$R<5h&n@s7->fR=dd@aVA}+d>KBdBK1VJm#fp=i){Po9#DXgtFy|1fy)ATPocgA9J+G%^VDka|}8uIG94c8VY4 z^*0y32@jOe=sH1N$e26#i=`WJ;0JInj2POdOi)hri32-dzrH`^^JL-~sqZNn%AhoV z``xwiLY{S!8T67=kTMr{g6 zc*n2My0K`?jd8X5!AHq_nW5+)&*|W{NO2-QAkRq$NQHBT#BVNM3{KmlEm~0^GJw=q zluQZ)rOv|4Yg^gFjd4R^GT!v@YqEWC?QVyF1qd+rbCmv*wJ+3-ha4|k;}<^cOgK;- zuPBa{F-F~<@u(Jt0S_~N88;`6YH6n5QvQuWN-#1N+6BKPTT+H7_CDh0pUfb1a1T!< z278M^c@gK0%-j6hjcXamJH#pD2ah>%-3P8Q zz-T?;2Ffdz^YF-UDSj&$(Kq@5zx)<2)&fKL4t^njc!gVJK{#j!(yDF78r(}yX}B$)6%yuyg#)qVHAn5b(P%`H1b!Y!HUy^QpK^LKxrSAU*c?YbB~Q~rtBOwsS( zOCho<11=uif1LN?8*^Cf%j6iIZl8Yo+2+$c=bmKzU=$XkmjULx8@HRE4sSjyM)wyN zz8-kc7rr*IMUxDyc(neqkb=+XTDWD95~}EKi`90ph(}L>=4op&RR|>pd7*}Qz>CRh zQHJ5>(q)F(7b)P!haY|M_Xyf<-NZ(VOOEWF=;RK5&W3~5VZh)*?U44|8LZ_c8` zwHA*qUiq#$`A`NVhl=r#&TBIKi(|`piQilJ8lyIR)z4^>UWT9CxO%BE)8WczKFDZv zD4IW%ci#2kmv{|AW!OdwJUSUl&{bdIA>&&81}SsET>RGyhD3`{KIjkL7^wC~hfl^Z z@#f}gMn8tl$YREqv2rxpbMWJmhsI!R0AYyUoRfc-;W_e~h@bFQK6|d^w>)&4L@Va-V zt?1%m3zhgqD4RK-SGImR8or)Ap=Ut@=+WgtNcYw;wH&^zrs*(Gl z*!OSve2qir<_C0R-q~@s8kwO63IV{|dEdb|Hh=uz{j>Qk{GLU6Gt3y>g2jT2a4Hsu z8LTWi;w5;2wiv5>_f=m;G=^xf!j<^U7G;k%FM`p+g7l9bCEGOzFhV*Mm3J`SYHpAR z`~=z)C(Uh)e!Mf+9B?8-mvJ!;185{zv_WX2p_kgkX~5iO{>P&%n&1~lTJYn^j>h1_ zSQPWvSvY7Be&n_A=lA_~0$D8Qb$hq5VyD~~>Z3P2@MvLSj4KO4`pI04_ri-^ORfoH z*1Q=yqw!_@>*3&Z;fdUV+%4R%xq>GwgRamMNsJkr+x?S#Gx5J0o8*5o0weU!d_l$& z>ScDO)%R$f)5^IIKAri9*Q&)PF~{G@z^RSKm3D%Z9IL*g&P<8+7=JBh3QMKUc+MNU z&>DGUEufI;PzvMtf#%WEEtDA>c3P_YQH#OG2|0lrF!Whv7DtR{hcjCM)Q6|?24|ck zXIh|gn5P9=c=HUt^|J*)<%pfn8{OiYa69Kd_$23?;??67(aM9z&o__m=B?W}+|~Hq z*BHHjH%FR0qVc!gucEmJ(LPzb_vlP`8nX4-&Q{ceB3 z3u~|NTWIP)Uizb0gQKx*5k#ms{U&^fxk4yHjze%nzQx!|KC%*nzwvYV(uMj+xVSY7 zpcj2-$K~<;$<^`l+btlFYxgIwsQ+kTyzFMVbn!x=X3n$||n*$FJ-sEX?u6$$BxiP2W!NOo&&tb(Jbieu2fwQ~cvZKLV z)dF9OC+3%HIX1}KpQW=@{ut|{ZJy#5VxPwsu6}zVI&VIgwu@o>O|Zthj8!mmiZE{p zZ+83kwdRP7{#y@!i#)fJ6U~kzG|QpmmO;d@tl%E_H}9r7mAGo8JQ)W5cBDkNW%4<*WWu zd|$|v+Mm_=2EcDGP&JJO!z#eLRp-=OTbCi9z_;p^5-@|WRqYnuZMpNcgyKm9F6C>l z3xIXbg37mBZ7w6&`tOh>j9_3b$;83kTFfallWy|W&IbLISNbswDP!{XRT_-2CsXd! z)}Imjrx#PNooaG5Jd38anJv z_*IQOuTn}odcCsborhDW!sn+yeRl5Ac-*81JwdbH4gIZqFu@ZXEZp{8O;g%T-b$)% z1Hr1Vzms;ntn1_0!n}}Xa276B{#DkbAMf#1#n6=U{=sYQCZwP*Fd2K=4DM;ex*fyu zT0r@SV=TPG5qgtLTM5vvo$&*&L+7<6A^p`Dywy?Pd%lHb?m@2@dBWMiQ~Dk&uyVjy zzFA!GT-jRhw(=**>S+f29Ygt45>5x-|Df%=5{M?Zw#q!^`CdY=D)m5LD_v>)lN2pX z7!Xg&zjzgR(EjQVx&3$jmdBLF;{kf{(7{`2emjoUDxKOiQDQWnabRe36}q5F(Cht5 zsiJbu@2OzbEB_V_zz=Jj-{IdiRq2z})`4&OuXh05CL7UTA1ivw+$mHcC6WhCD{o8l z1B*N!f_?D3C<{H!Pi0l<@Qk_lt2~o!2d3bY-y3{Ld1z_yB4PL2b}rm{=W$Ti_jXYX zFYs&%u7ts*B;^N7`Q1-{0=Li51s<~lmuja@gwzF@XdYem6CJGHP*%V3!YR|!Z65iz zX|=(R!9xn)Pk-^tgo-DdfBHv%+^dW)2_z1Q`=~9}KmFM+H~;)cpKQ(~ME>=c7dF4S ze02n=V?Q{OfUtk_%WrRN&K5F7TyF{)k1un^;e=ua42CmExt{kPP>pYmX4ynSKr7(T}M7q1w4NH`tE z8cy@}Nns-B-p)t@j|>lbRY;f@eGX?NnXSYLTi|)vfp(8l0EDMuU_6#W&fs@FqtI;M z%$sf3t`zQ+1wtc3?($k?Y|MaGnPY^G9#qIXTfzF0pi3|fqd}0DjrJtMIqZ|MkXP8D zj0CUpB6s-e;=w#)R{UroS$I{7Lrbw@kU)d#@o$vGzSTDi9pTkNhPsa=d{TyZ8%=1J z+RNa8-ozz;lv2U-@cH4CnfiaWkX9cBo^th~cDx8Kf~Q0N7{u)&u%&hmWKY>-;N>Or zD%g%4Wq8YDCT|YgEyaCis3+*#*8N_Fb};UFzBiA`j8Nz}Z$^jm-N{H#`C#;67oIK>d1@$d@T2 zLdYzRw+s!GCSGj}#*7G*Al`Wg^6pi~C<(zq=_QjL4;G#}_@Fu&3gDNrBfdDJ2g9p{ zsN)&OcD&2+i;=DR7U%K9bLWPx7?zO+9xyP3NA$Ri2j>3{CajOpwLTX*#Nr;l30`yf zcyvc66m^D6%ocaF<)2b%aiS40Fi~I`ao2%*=D>JS{g0QBbr-*WBe)+8uXvTA_4pBC z8J1xPeS_u5D4smr7!ccf+*qb)>i>7j!yEW&#&E+0eOf#rgBpX3LHG)9a_NP_pp=e% zURjj+%AHUK!K)A8!|ocqj`GF0$Seb1d^@-6a~=*y0nOmCXBkCaK2Dhp|CCaO z9Xxb)v{XLjn9>Wrij0PYhY0xYN5QUeMgnj%bf{lC1{e#LCr=)24DFiv0PMo*+`Msf z+N=-svE2nnqN`tg{?+Do25CyOu{|Mt!Ubd8v3NTVReaOJqkXUlmoP>g3G0Z%&A z7;RAozY$W!P77m58+ehUK?ee=Lo8;8;$ll{k%$o+e;=exdyHY*!XGssh==|tyr3=K z-F8PDX|6oo?gjkmb}`U@=cAA7*A|AN^OuLenv8Vb0q`y-~H_83* z7W&H1sO12)S6Pb9*>9gZG4q#2M)M!OV~(&Gh<};KjvhZXW67e8I$^bu(m#Y)J}}XJvy)$Cxp?=C6#+C*l*mpK8p9Ay~g|gjIVJ2 z{88stq-XJ%JRa}a7xW=Y=;YhzN{$-fr>0jAsgE3C1 z2^I3##q$`7h5mR_pPW2#vNzFUTlpD^^tU?Byz|cHd>)VZE_vl(wDetL!>%+&9&(3q z8(N1K;cc_1-2?L8;rKn-;dXEcdG^*D4pq)T7tFlG#XLu6WFX#~_%FVGpz-|3qIfUi z8;*<*hHhS#xAF{q*#g#~!shT)e=D7!2jjJAp6A6&Ho`B!H;Ye|F+3vr3*6!N=EU*F zuRUP$UfbSm&NcS&bK}gsB>V&tbD87EiqPZT;IjIA?TqcqdrBV_2mF$|HPjlS;34=-Ijzd8Tquj~Je_{Yd$ERZc$s>l`fnGG=iK1T3{?{z)}$C|x&`f6~P~wi-sA^~f&4+$^K+!fW}q zWe-TBTMeBTM>u?jx#)H(gp&nZ}TK=c78`qQtofB=R* z(}&WR#%EiG$-R#6r}2sUwm2ACT$ozpy>#1tDaFE~_RO==sbkw`t9bSFYyj%3OD^zF zzXiYX+QX@Lct;t>s68&>7* z)$$s5G7TaMwz!)Z4Ks?a?wCcaNez?;DeV+ z*0zDS)WE0@O+Y-467?5IQ`ahbaH;AoOnBOYZXjHB_*7;Zs?Y&L1BY?cm0@c5ENbLJqo}ge&pfgS9j> zzQF2-RP{Sy6pgJ?)?aVGZ<9AOo>$v7(wIhFJAY|4k5LkW!H zT5D7C-xvUd`4}ZWp_vyQMUJPkzT`dT;KH-KKYPIISlh;!A1zY^gCzDNB9}KsQ1M+)k`|=tt zwS|n-bv(n<+4tU^?Ujt6;3oVkV?4a;3q~6Mw8e#vCZ=a_IHf3Ki#WPRGH(3vGY3Lt)M$(om?;_B$PmFZ70-H+ z0>{&i@qg+lz5cLJaVSL;9Wfw%`Nfx`OcV0I{QCR|)cTe}dG5V)V`yR!*GArFZ|#r5cZG0C^5o0z?*(nADWc(cox$bD~6VnXM7(_F=a4g#AOgbNftE@M4NuYp+3S3C{7mB z7+dfWiXv`}o_VzkxxfI$Tk_@o)pkm^0|t%dNOlZ-<5|^?6=wphxb4Dh6d!pEixv>@ zg=jc@P_h|LwF3<@28uPVj)5=g7gJeWYva3YV^Fz#^-8e3wfXVy{%B}`aZA`BJVahF zj8a+I+QuNr5QD}TQtTcv27G2=p|&z)uEXOR&(S=*+dU(0s6Nye`&tCmKLFto%R3lP zq3o%r7t=oQplyaweX9<}6KV8<_A`iHx^k^p+Qk>G{#kIU>^ph$=}X~f7))Ic^2YnY zrycOsP7Zm6WI}rrZ?yjT$?s;XO{)W~e}0>ObAQzyKpwHzS2ZqR*Z=lVRfU z=E{|eW}NZdhF1p?+l3>{0bb+K#jDpWqV3K=aB_H_@ntbZ%v6UTI~U_d{N;FtHuJNd zW-P;_pFAah?3#G&=i*ncJtFsf!%N2P2g?)qZVQ3Bg5dV8jG7NyY_;Gj^Z{7^`mg^Y{4cM3p%wI@1xz7(+*{;Bdpw8(FCLW4 zwFD* zQTS6?H2RA_|I<0hkqkJCTmivqV0Q~1G@%Si|2(?h3H5{bForSQ;d>0{>>cR&tqf6i zfBf|C|7P=hKl$V4%I3}b&XW7%IoW-h6#3Bd*1H3yJ&Nb)nboDj`d}3Lj$#tSrQUj>s

    -c0CAHD- zufP6!b0YqBx5YfL@B8G3@gV1Nq+_(e0XDml?5?qRc`_p>nStD7o*ezCZ--*;OOCUE z4fpuM0DM4$zv04h@cy-MM;2oowJ83g^unZBI1e>L3l>`N+duh_|F#DVu^FnjiyO`r zy7AqG;5@TAe(rw7$4_>uMUQB23O9f7Id5yBd+_O{l%XS?f$TZPCFNB_{?2m z+rL>~zY`yy`MFS6Jgx8N$noNNV}VCD9u=M1VY;U|{q5wrh-`BuXNWWDBzxmqW*M|- zu6*5jhwyDac6h2#FQews+n!Y~&MTgjb6#XBI{`0DDe2{PO z4!argDSE-}_!k0)OE|t2-@&gxD)05{H}bZ=H1ExEa;vvGI z2`58t`s{~)u=(S7)c^Ny{;%Zab{oW-A0{u5SuIo>KX@G4u`2>!#ZNgeU2Ku%`qj%b z$Kt+*rZWM?aUkAxpfgv{BBvHSL{Ve_y2!GNz*q-=zkjX+N59TW;?lAGYs~aF&J_eY zTT;K5Az=OX#I=moJsZPLJIsNaeAO6gj&;tFRdoZ}y>FcKNDPx{?yYflpPDcz(4NMsZ6?O7X zTL!)AMtQ0acI^+htCUsGS-i*4 z`(8ccPMLCp2pruHB0K|QZ`A1D@dpPx<}RK!2AJx5Q{m0aMFrzGsW3)_eCg zS9%23p52N?RLaLmR#xB>gn zl!V>-JcJLI8CHRgT{GI@Cl>vCI7tW zuDVw?^?HtGl)sav=PO59R(~XIbnkgr-^Q~XDh0V$uhC);*U;ZSlb~ml*0*}l` zEO1fVSNYp~V3cVK^ZJ%w+Nrkt%BlPvnw<>BOXFeaZqiQSD`T$~T7q4OwC{rToq0>X=ws6YGi%gymT`2NG+ z{_W<{y+U&wIKKJWw-?$b-a&w8-!4#5UYcc{7?w4j_tj5(w>QHH<0PRQtOSOm2^jJb zei+0l;gqnkaAxXAdm_e zJ_#)eM#oYb2IZ*mtNMv)cH zLYpanyJ~-L=Dk0Zo?R(}lyfi%%|Up6`bJxP6V3&@C;GBBS3fRwFJ+Vwhxa+72c?vtE_ERO4^>{S6}>QJXnq= z$cq<_e^9{nH39oT22b$tz?{Q5%P)Kh!#K~vi{*)Nfwk=w^9*6UXdGSPC2>VaPO=(5l0mc#a}Kw682_X1rB5#SZ^R z!<6J%>`E#1T&RFC)JJIL9Aj5)G7ijtJD#oj%lNdrWX}qEXc^IAU6e0lbv`Y+iG$7p6 ztNMzPk2Zx?(I+SJ-V_I3yU``uuv3LMo{$L>A3sB%@J8ax-Yx(2>v=Z@2oKEZqk*b1 zTN!pI4gU^q_%J3d(C#dVN(v8iW!_OkG0*AT}Fe9U2Q`qRRwojVpe%>fRI#m@|4ZEz?!K4y#<2j&IF zVDsmTyrbcYJVCRWa3{@E7G&`U^PWT2UKaYt7-1k?<)ZiIvN3cuhU*tj5#|K*$)k)a z`i5umw-+yGq%2h3skaBNAOH9#?V`9o^S{LfAt#J){NDU{xk(bei$Ef~cTyxt4*!l1;9@r`K8xIBP=hX>CYKrMK~ z)9%KGakR5|UDEK{nIgyEt}J|{1-9tsaN~!Z!$`?sNe<%$%A@{8W8-OuCRfqG@Mdxy zzRDnkkD!rn^IksQJW4(kPRHEKvlK1Q&X&4*| zxrIS+k+IE>bI5GGPCRmRn)Zv$4Q@v54>FG8ePl+wcYq()0cCdxUhj@oX2z{cx*YaP_Er{b_;C`3Vz} zDmoKe8OcYD&Ts&X7&0l#ylYK2FUbsL$OFqg#Z0z4Hln45< zFlP+eEn>&SopcKEvl&u_&2hei@xuU5=D1TR4%W02M^8=s?fdubym?r=+sP6w=r^$N z7#@RhjVGU3aIvFAS$Co_h_z7kc81`~m(I6fStyY3@ZNhLOt=po>93x>8ZUZt@sV~o zT1*f>Ti?(lv{6tnOffiN7pd#n}tR; zPS>~l#3H7#btIZ1Up+1!w_Q6H{Ep;#ElkV3b}tFVWD!#+9E-FTh~A8DEF>61Jm`6e z<0;Mynw=WW(Pz(|8@cgWZE@ZWe*C<#XAWfO-<#8vYxwJ)=FDgh-CGEu=j`lCtm(Ct zuGS(+x=I8Q|88tFzV!*1Il>&t8EbYF)Ka<*LEGb#^hdt~l0m z^hSNv!y9`UV|wnHw8^JG>~xDxdm6M6aM4pO3#QeS zB}hz$^1{PZ)89oytGJ!=$GAN222CiUf8k7DZ|k4XFUs;@p7lqeJD+aVy|CgNlszy| zQhpab$=aJ9dDqtz@O_z;C8fWsF!jT7W%X_h%l(=*?&K`#fVs$G@+lWsb4?|cAkkEu zVY>q6aS0Y0!IV%K1CzugKZSl@N-zxl_gFau0EKU9kwP={TIN-D6Je{olSCScjR6m4 zVl>A6hpmN=>2z(IdP*ssJkVzn|N(rwgS`AQ@x6F}3p! z65}AYS7vag+8EyG{%pqOHhE62nxoqk!@Wc943;&(en?lp!>C*h#MJJ4&h z=cVYS^!ST*PTE};Yx2UP`@!TqFIVaH`6^GI@rsgu9%^xdN^xHo zLIwkqcVI7rJUa_;gKw2sU>H6sL3w7pO>I-|;ATr_6p|^#!jU8;_sGL3YG46prO#xd z{NBKyc28ebAKa8jR-YX@#?Rer{q*TSY^(~T?%D8L1c64T{9aZ3_ifsit-i@tn!&-; zCEu!h6*;vHj7nIaouB1fRrcMx$q4)3nV?{ZpNNzMr=&1oeu-sHUL_&`8sm z#77y@2KSZu+cuQR@96x${`{AlpWnEfk+Ut#h3Gi_*$10Td3OBZqaSYWoWHpF55N9u zb5SUOhfg>E;lKWL4&VGPWAk@^{#PTwpFVYbJf6o

    ZEvM3`A$k#IXNk_TSgFHiR zB?hkO6HbSgYcmCm5{1t3`lE%C5tc#- zg59-e4!+Dt?i6@^%NuofiwBI7;=~`$Kqx)0zb6T>sxJM7w~`Rk1G4)gou!;k9AwkWsY!jnpv19bWzg_XkeJkK$8G2-mnP2sLBkBWs_ zC3~U;is_?vWr&6S>_wj8DJ{km0~MuD%x^J$88?Js8AEFDJgFb9wA~gRqKkD9BRUkm z!{K--BSFi9aQJ@Z+{#e)<@v8h83UVfY&QZWw1GW1Vzl6$Z#+=i5EGg;2GDU-Fyo4H z1TSOoAU6zy&x>uw(KdWh_!EkQVk?$6Wsot0hawtf@L>dd^f=`#TC`{o$=5H9m+EBb zKyP@!aNshb8)Me9JB2DT#_>z6NY#Mh(WL zvNRga%$ThVw9f$dx~Rhr+d-p{**{_4i9)VIbOcu;&I#iwbxh%GY{N78HrR* zX*XUi+!-IsNZi4G@hveE;w=_Po|32Qu=#+9y#t0{OG6tSKdv?6{F6S9PabC5xHh+HptIhrDINLVq zD;Xq=QQnr{T%t6$V8`HE8||zRLPFWb6n+A?X#ByP+rg-7K6#R{|4c?}^{janJ{f(C z*8>OY+i2uz<=JlT-K~rc$7(C1B-#Ym>G;;Qw${UI2+x4l`{uiA9oWifai@O1QJ9JM zYnScf_}#(0MvcF3Go0HwrLV*X*1!4*zhT(7OGtcdV^F_;SnPFP?+g!ku$aXz-m%Z0 z#P@D41H7>#T*-uisNLUQ{CbR(_=>iRtvhu^d-{@0Z?384`a2^~9hGjJl5QS#_%|DhY$`J$;e94B)m9u5M2|$YEESe*-s}wHWK_w#HMu#T*;mJb#niQb?@~KaYdut`K0SPs|)} zvNG)sc(eY{FUI_{7HrU(11ibGC!-O@I~TkhX#DZkgntKEGpyqy{m(d`^ca7H3o_9d zca!PI^E@2Bj9;05Eg+c-*BqK0Q(1W4#E@>DI+`rSkj8Q0ZiY-dzYoWU#_KcKM&_>X zwCmx6oE!EVAEBo*@NBVz(H_9auMAtmhl2@R4j2^9=2|#zOsk?~wF-ScQrJ$5u%z%y6ZlyG1k zXiSDXVel;MlW(=_Alafk=E_?+6Zq(Nhl?A>bb0MUr*xdVw{8#rzLLy^_gN$uqiMMR z*)RTf`1UMnRq??>%iMo(r}o?*Ir?b)e}CgsTz~VPc3U_(esax^r}hGe&}r1*5X2VntqI?Z{EBR!m_?s>Ye*dWco|)Nd#Q!U=CqGK z`ZRuUtp2ze{_3mr!9y)jGd2r9V!SW?#n}n%INR5*p`I&?kzXh+Ued=;p4$AukN;ri zk1xM2oXK10ax>?52r+mpId|8D)@u%^ZHu9!ug=*=RV5Qxt8 z5W_UPW2Lpx}V0UDj{>X77z90uxq_2QFw8qabIt52dQeB5PzAkPV7 zXqWP%<`4XfJiq&Pysdk37daK({pQzyzxnW^_cxz>`tjy3|N1YcnAdc)U~ugb%CGrI zJHTl%;6b!>_1mwf3^2(goRs;*I0xvA<>=p>&rz4t0Dk$nc50)As}jb4U#>HX`n8PY zFs{%57#RB5{~m>y5TYW0IN)JPCNUlFnOmYgb|!Q^LzD$JCAzkx%7T1${UTVURzM={w>UU zc#>6gX%e=l5bwJguW;|dG%!}4B&GE|FwVOz?t3^M+S5kWmB}Dfmbw8r6|BE%Yr z|EzbeerGf2QinW!)%G!*c?_vNMo;s8J5HwE9!sJPG2VfEh)-H|dDYWb2Dm=&_uv4A zFhK>^(ir)aA;bDVzgPPvQ@@p7`o>7yw5BZDT$t)-cmO^cpNjB+RXo%wws$>?@lOa1 z?cSlm!C#5N>UnsK`;3u$WrHC3dN;7fb}L-*YYfGfG_KF8M^%fdy-r!1MFPKNkjxF` zbW$&PXHjf^PpPZDa~mu(PRlr}JIq6k+ntKQFhpFI`3J9E)|7D*S3ZoLB!e9OHI%<9;}IdE@l1*b}METyuK*#C>!foE$OfES-Gn(dJ%jxrUwLd$sxT5RpiIqdk8oE1J~dS z4Z)SxZ1eUt$ns>Hey8rmBv<{?ta>VQ4R{%*-l6wtu)dAADukD`M1wL@7eMeym9yO=9}++HO6ffGs4|XKw{uNkus+} z4v$2Ogzy`A8N8SG!j8DCC97K(jYb!BZ#R*nIf@nax)h zu8yE~y5H}n{9U_!DNnb&M)CrnoIlPxM@&JAyYL7U91Y~j!a#05Bv1~%d-1&X@5>`< zFi^j$PwZyGx_OC_jwkz_glxTG8y&bQWy(R#2QusmkAl`2zb4jQLKRQ-5p)u=@GXK4 zgX;MU7Z(YXp8#464Jzb7GIVSw!6L~ukhGo+(5sK&}xOlSMl}<%Mcv=<_MQy zG}b6o`dcU+3LxVt4D(cEs3Q~yMtCfw&f~Us*UY{3#T@w94@wo>8NFz5+8qq!4Mt4w zctpNdXK)F#z++GQdGAbI;Oe7<^1!H+>E~TlJ_SZU(KV2C*_XUIHi@B%HBTX8C>gZrM`0~x(&}lG*GImBE!<1#=z~t z;SWCkbPQ4A2v4|~yNmYrwmagJk3Z^A%l#eVC)zVM;qIWrep97I<#o9=Z&`eD&2v(w)`^!SbRF(VvKMWBiCw6bVoU# z@zSs{F12rWpPli;xTY_`!KgcieGA)U2v_Y;4};Z02vJ3&s z)7S0`*BZCJGelVAiO!6nqlfc8OW~sk?szxtuN_CC74bwVZj8KORu)6CgG()7zz0L+ z^73WiV60+{eOTJUMdW4nZeGz9H~GgatMPHR1B#VrtlKt?lh8NxyK(fia=}({@mwNv zv=S_E2FGxwt+sxPx5`k3Httc3Gq5nY--^cf#nXtFAE3kv zH9#@7#T~tjJZb(a&zdXir(`vJ=WgSMk;xd)Cy(>QeE2A%aP=C)479j}#@Hn>-j13o!{W`e&Gn2<+J(p6nz3o&X?bPST^QlUJ1!%#{=+ z*|XaMRffm#WOyVudR* z_GL!#cjyH>YsW$2WaNbIf8*xuor4)_$br+zvDb`}ZI9<;K<>OKd`vxP>UxnZ2D`-~ zM(ds3#xJ_ZMhB1IEwb#-n{sb(^RlKdGd|r;x6p@PL`!^gH?QMEht}B|c-*cGGWTV= z*yIWCCmB9xepHW$-^Re7fsTwbPBW&Gf#SRyzsT5;jnVsEe9}CcVVuGDNpl7D(q?9i zj~6Yn+oeM>7!|+!x@2JwIzMCR-(qnz5aNU^$5-@8ya9%14;mvvA_K04E@KfI=)ckl zNv1rzf{wMwg+G4(jqL1Nmiad%Yh_|jq$lM&US{!qv&FBO(>a0rGbS(eoy!|ep!%XEW6!;(capN zcl2et^3%Mb)kiO|SmF@iiLalbbKnX`ymJ|r8GB#8+?&_(rN(gKUc|vRBd}mtIE%)6 z8sIcspQv4UMqhi@;^Xeu$U=~d`VgU~i!({`dfezF$GW?q~;(O;)u$NLuj8LeN(m-4{rEG&ipg9i&W7p|@+ zW7O+V;uGz}xts1It~L*N*Rc%7WGOkaFP=Ku0yFt!_X>H7FO9Pf$%c3Lb}nokK7P2G zq4)6d=}%-GBkj?~S>Z;EJ*UIvM~!X9!tcKQX5=t_Wn{IWZr(zk;T1Hmx-oD+$WR;y z2YNj$+8oOBnio18lCRaq*Ks%X|F%gv)v$#8bTUJYF$I%>wu(Lw)-3Cp$m?~?L42d`Xk2xVnq;>SBTXIG^@x4)mWC)14=$cI zk8xE>8l%sz&4wJ3?tBfgS3RsLg002M$NklJCRki_w z5^C@tAiQ(MBsvsk@qXUUFZr~`|H_3o_nUm`m}J$n3~ZWHI^S1OUQAWJ9KjlxeOrTp za;B`3C}@m!q*K?Ft$aM-n|qbL{pn_7kQ|8?>;w50HzfwsDE6C71OLFP+{!fDR&Y7} zUk2G=3IzCd8C*P7TJ`&&UX5_u{YtjrD%IA|?7N(cAmm~@A5dW`VC}| z__Pd}ljQ9$5c?>npM_`dB=HL$%|m4=ZFP!FZ%UwwSBt>`In84@T8yb%~3gezc_V$fm$r>vUvpF3R)y?EznitO}< zn#gd#aC$Vu5_lQ7t*Xy~gejpWrSG3Rm4{ei1@d5L$T)LGxQ!;|@g48%F|gK*6B)>P zD(ePhoc0~e5O64C7FrIBI)oUBM8qqeaZns2Wk`AGKTqK%+TlVq!c?`vP?T)e*M)fXpphOGthU$%2uAq z%H_*fQnZCNV8GIk87>)bg@Q0CPR*;)3~MHced@Mbp|4aC78+I;%6pUwn#wj-98aZgXiFF$UKW@F4s@n&@8$%lUU*|tov!SNs3 zpouUFV#=B{pwr0|=3oEnuXcX%tKYN^)TAnd(y6?E4#hhr8oXe}Kwn!KQz+ruY?7&c zj29-!>T{Sak1Bajoj6^(CJ9X%h2znF;|%Xnc*AG%;{(R=bA=l)5wMt`&zw7ZF5XxZ z(kChBfe!V9`x|*;@lJf4x9iR5eY1t8X#TqzucoJbk2KZ-DOWokv^28W1 zYjf33znfT|^=sQ;ji>6DW(>C({7eK$?Y*;@Rj-Xlp67kd*q}d{fZOpfUVw$Iz;iQs zs7^*5#x+JB{gs?^G18C$FEX$#UgAxiVY3O;p5%ctOjz)tNs0-o!wXM@6Fj6|p7tif zj4dYJ4CN2P=Q{L>j0JZ*Q9TSsc6GoJLj!)*MiXc4LJw)lLj8H$&nIk9?PT5Y3**g+qUj8a-eNIfOp7oG)5OV(vB~vc#^dWZZzlVX z%wio_>|%Pa7~{9vihsLFt#CDrQqqo*Hh$U~I)yHqLtnXU|^@korh8VK~7*Ml(2%dyMI8);a zO=i4rF({ZAB-ebJ&XoZ@nLujMv)+quEP{NW*SuX$`mXT=k<_;)SfmUUMhK1Ohuu#Y zljMOu42J19@yWf6VGO|dfDsq|(E8*_y9$^0`IW2Rwxi?QSbXVlJgbG5*cV*9qxV_# z!SnH{b7d-`w$X!nSO1+ldxuU8SjJ4U2VfT3Pee<2o`suWbnxcHbPgtR3$Dgd4c{~- zDPQ~ON4#S3t#o!1>EjHQ#&3z}CC@X)KmV}>^yDZ*w*!6gp+%L$dB=`tYB*VAfL$Af zI7zSI3CsX4j0d`k&CYnovmO4(7@;=A=3e1@qGok!8{OSFRn{1107UHY;^ATMRiJUHW|G4yFXW{lI9ge1Iq`&Rw=a&o@IUE^!xpmZe& zo)*H-zLy@ce?pcBDRlDWnQ5bY@yy?Q|HC<>M$GRIKl<6uuYU92?)>Fn|99!EXQH!( zNeg?jztI8A57X<>%%b0%HxVBU->AnTLC-v##W&aIPG@94nvMhK`VU8mUD0%f z<%!4Yrwq2nN^J%Ak#|l@WU9x1asf&fovT-;cgaXVL%Y5D&uJ zy>y3t$?n;C5h490E9`J`z_dA##hMeRg!M9p3p>+!O6`ud@O>xaI~j1ZF=J#wz>ZE? zzbAL;Gs03Co6R4d=G5i8#WFv}dwd}*oJBy3#&jKhdv}ge!qEJLBg^^og;cpY{YRW} zbo>1mpH2PpSo9SGonB@fzuZo=YnQJ~IG7118lTyvbl5_A_v9PLvODqF<*Q$9^uY!1 zzG!Ej$I*p6!rM#VA;QfujA!Xi^_eAumxF~qMdu;QAGQcUuPUPE^q3)zTSk&m>*6(? zY(gkh$<{M-8I<}~C74m#xhH~SUe_3Aye_}b<7tOW`o2W3Z#@UNav7-lRl1KnEq>0g#cmmuz`7AtZ&juHpV8nQTGPd zzDzsA+iC|mYwa{~z1!qg!laoz@CRn;=2F9^qVARWDY^DZ3$zBW%~I;52i0Jetw*BzpENEBwu5OBqw{Dqk)kE&Kog6yr)--rh$ zB?LS;#_((Ko4O{4T&2(l7vz(E(~oql%tWQA?oC+(@G!!H44t%LcpOizhxwx{#%!9P zf8!PsEJK9^;3Q4ZniveE!R+r;)g8FN3bJh@2k$j_!v9R_1qb-Fh@8O}gNNZMFMFp= zfGH7J<=0bvTeW(2^)x`h+4>^QRJEzY!_?OEgpC<|l~umE7A%N_PV)f_!J|z}7WURh z|IxX;i-$^A4kTJ_Qox_`3{JL1DjEFB?PoAs=IKic7+RxWa89FS?3#PuCGHWPYQx}G z`zMhP^SS10Pi3G*H1@r(D;48}{I%sz@8D;{Q>!5>t$S_R)az*r*TPtD`ak_fne%

    G_w#7{t}WR&GUTmr4tw(aKAka7 z>tBy|EW?^fl^+Hk6R2BvALMm+X0{WbEOf@vBN=>y+xGaEDUqLicy8y?m76nB(CjfH zrHGld@bD)WO(-9Y5*`+#kqO4p3?KWdoPl=mTV@K`#7vBw5A%%YA;|mBMmH;_)WMNqkDiPs*D26lC62`>O-YVwLaRn^#9=!#l&ocNromMf!}Glrx#E zzs%nNvpAu=+4emXZ?y9Rn8hEsef3wykrd{5kfC$m{){rgEgt#1c^#P$j{zV$?Q3$x z(|Z|{zS;TBFMgfj|5=LQ?J;7zw?*FeM#?HfwDzjH+Jeu*LNw^d1ci}cPjLXX5f4+U zwE>+>L~O}t6uj1immHU#;ck?|T+OJ82$HP z%Gv;iH^pHz*;(CJ?`>k(d!BeaPaY7mlU+!t_im)vu2~%1i6eI}GTIwv`pGEiCDW;S>sHIQbZ&GPz1w+fC3om6mL5@xR52FW9Y5np-VF31I92}mH#`s(u zcoRtn0`rtNNyDS3T9C9rXIz=s{q^IQ9UcZY#sJ>Y;IXSj8V3*Je@1fU;B|%=2Au7< z2$w^j_#td&#J3>HpebJNv7rr`>aVW~O*C{2Cye6oRD;LZ4F-Aeg`E)(@_L;yJ9_nU zezd^?fyEu;!^oZf(obGCan(=p0{*|(LCue@cc5lrSMV=><~@uq;B{c-&G`1>h4bP4 zSamakx6p+z!`Hq&Es)hm==Th|_nRymQ|#stPU?CF_V){!LjK|JD_8SkY^>1M@c=H5 zGReaCGnzB#J9FWAV-Y^I!1FYk-mX77H^AJ^4x}%>{KAdrbcbfR+7DF8R zbv@$_nIay(ekw*P!`-2b7Z%cNZGUj&!N@u70vq1fZ-lN|I)(ngNMgrJ;kd%@>sNP^ zA1B5b@Wc04c7FM*|7Pc0y!GYp|9R*n?ymktFR;6VY-NZu=IJ*=mchG2eub-H)UwFN zFwBrAgay4upSV?+5PZlJ{oI8Q$5_LwaP8O$-_Pz%zvPL`8~bvej}8^aPjlE`bvrze zabdhqlZnPEVcYP;=sgU^WKRO&XrW_-Zew!S*YShh3Tq)B$&)#~Gd|+02v@(ixajn(wCp@{4Ac@6J0`HX#7aZ{ZV~1V8(?JzrUp$3Q>5zFBi-S(TepMQ@bFlmS z9YSc#q{s8}|H3mJnh;wpQ049K;9%n$8VX^jJ$U0*u!yNnXW=Npz|WXF zi{s^+aU~dqKVnQ?;bk&@Hy(ce`Dfv{bB9`FVQfYdy!a+v`_do(Fy$R;Jm)HMI|J|I z;*(#!_TA86obZBu-@ZfXL@h!#-rg%b%lX2kDdTZ6!1Lk5CxVj$(Ak|2I?Lun&Kh`M zT=P58M<|%{7e5}EExeXSz1Z*qIcG=N8mrTW;SUdWs4IB(rvuXCs!mr;5B^{dokT3g z@m+;$)+P%U7A^5DM}fnKi>aT^!v<=u_lIwO*TT}(S@;yg{baCy{F9&W+$aR+fzn&R zC)Xaw7j!M-Gx`gM6+?tS4k+;v85Iw_8~ubJ9NjS79dBXP;wxS_QhSY|4##{QOf#pc zKie@Tmi*(_>1gry!4^i$BWpqV)?yQ#--4JOON{Zn!s!$6boSJ_bfe}87HlzV|Fuzs zk8w7rZXHtDWUf$iN9!YG%=H#j{;z-czwi9&SHH^f=A+vBYV>zIjGo487S!&yqwacR zD*nT_bXMh90EZ8}WlZ&67)rFV`(x?Y!iW0dIFfE(8R$hm`}g>1<5c5G3Xb2HL%8DgZFM~JAVWei=FGVCdB1SF);xLkU-$H~~uRN1$A9!RQA zg}u`Nb;zeBo6GlQ(AtQb`L?M;`P$^&v_qNN7(Bh3Qi5j*wCd>pcr5qVwUi!zt2HHe ziiSWp{!UpXOi(v0PkB=&9P~{wlGeR9u=hR#oitL6;Vc+eao)HnTj1Z+(I4s67lmbF zzwDj1EkFkc5r19EcRw|(d_Ao;x^drN;`=nntMF3$=2Nw#fKxPh*8@^>@U4jolh+vY{zWa*Uz4maCQXcf7B z%B#-GA9QMy-lc4{!(ETkPCeev!=?aV;=}DhiDRsmW{?xl5AGE(Em?3?krdzr8$w?Vd7A^X66UQHE=y zWRy7vn=3Tc{h#U zyd#UWeaq$pc+czI7*Tu0Q*7jldi%D@8_=gsjTF*jck+^`R1u-?dY0U^RBcW27hXU4KG$Zr~ct-&*2BoCzW5lp@a?%lpO}j zI`ouU)9V{UCwV95%2(onamp9~)^~k<^^HJ^o@C^-sm#;22FD9}Weoo0Ux0@ED&17H zawAeJ#1`dW^YOEZ%Q(oyM8yrpncpX1(U{1N5f;Z0#&dQKS!81m9 zkiIfE@N~QC?vH-I>e!^2dM0(>HhJL!tXRF;{q|?&>#_Hbo47u2G3-D5<7YGR`pchx zy7TXT`wu(+_7|V-?9Wr|AHMvqZQ{358h8ouDt@!`O~wNgAVxD2$1^D=Vr^f`z|KIy zV7KV5#Z7vu_U!57#eZ(~zxsH4IW&#Ot?(w=`?C8ZM-OJ)E@op21Jv$oi}{(82X}t^ z`@+B6ypsYc7V1*$@U*gc3Tu?{k|ztppa~SEcsw1TGnl~rB4%R@5`T^{k#f(FW}e)hk!COnLTyewX( z_?Z}*7{kf)=Xu4YMB9+_h7VR&@wr+b$mQlnm{s+^73G~X7m)#RbA6R;v+OOc^f02 z#?}^e;7KxDTvKp`D)|2UYctU}ml95=dYZ9owrbTTQgYqsBvLGPWSj^4f3mQ(m60CWLt3gwWp! zr&Bt{Og{|Wj9xXY!<=sH{Mjd;R&GYp`UWEtx{pV1g}>?3Cae02>vR(i@Q>uH zj9q(82;-yp0ZcrcrVlnL)&@V?^M)ZKzHwP_fzy2lnn*++At9#y<#n*FNe?0(VcpZJ|UzKidu(6HGfb$~ZLg4Gb10X17Fn z#_*0N!8-g9KKH(8@uUG@`o_>}^=HN*FxiTZFX*j8wxIiqa66t)(T5=g-56s`;tnb^ zoDAJNNNkNbJ>jcpizgWE(3R24LIfl8-ejpm)1E(jP}#|!3`QoF@)lN#OjY0I4-vs$5_i-mEq712-ozJXs(Yq|9~eJZ$>hk5m#J#`rWxR z!Pqz?v=N>sgQl;Q&KN?j-pF|OUIwI#dAhy3D+6JR13x5xkJJZGp5}?UrwMu6qm!LC z3Z?YZk3TAOQ(N9^+p}nhpZ~U)^H0OuOyH|a4F5w1 zi*JA7gT|*qKE-RwJbvPIRb+_GxN@X>p#gSoem~>Iiw>>SkI;$iv8X_9=;JqU=V@Q4 zgRj5&X8Muw>=(cIi!n%uWBa;=9-%2d`039x4i$4YW0LLdf1`r*#& z$95VOx*)z`Sh48zpzsQsCzOk4JO~#*w#aSF;}QK%VhoDJvgOS@2G^jqtLwtKLQs@v z-|KK(OZUYuM>480+~M~h@?!q_+siv2e(=#OhQkH@f}!N{w}0IE+0Xxcc;m(M#>RNd zH@q@3CSKBa^bs+Pw~H0Rf7qobHns(Jp#a1=zHudQ$&srsqT%tpJ1#B#K9?E zT5|%<8q`3$f$C%R%UEXcXyN>w#!LNvcFBbInmu)(zj0kYJVkMZGks0Jx8sIUes;B` zZ&~C(Klrkv#)A47eR{9`4wV%e;r7iN#SbQXGWJ#$*`xm1?N}ds>!n^kGOlWa-=bcN zfcDMDKz;P&e!H=Lm^2pAUgrQJOv3R~^|RWPI#*qdZ{hcO zx)TS4BaPw0C@gPrIzZ!a^2j0rZ|Nx~9DtKyuvS)X%s9ANa-U&Wcm)ehJgVs<<2f4A z)_7n4h+mGK%5%7WN?+Co4rE|m&I@*)70#wIwKwo)@s@6Q_&~fL0c({b~dw0ZhZwE{82(fYR=FOcG?ecq5y7Lz<3}1^u&fEWGZ6Nz@-)bz#vEX(G z!`{Amy;p^P3{GPgJiTemIDP)ZaGWld9C+`-hf~M>`ufcqR~w%m?tJ{-#XY z$|1`ye^)GVvArXfJa(!XBcG*PR&aehCyi)Gw&6Fsa4fn#e0q0`{pJpVFh=41^=bTS z?!xQYmyz|kXjAYPuk6s*Z2!Nx_C(V->*G+8Vo>`nCe}DF~pAGNXJ?VgIXU-%9 z#t5&k;K6s33(9e}i?PU}%fobFvdqQVf%iJy+M)H%b~sQTa)HkBps||a{BR4_+WF*R z&R4zP-PoYba46IcrxAK5z5Q`Kq78O1{Pv&!S@qKw59~j@LjM#pPG8es%s=}azAG<2 z8fUTe=69nRJ&C;34=gt6Ywl;4Q{|6yLVPHtKF4N9#=xP)W%{086tQ%w5_nLs%-(s? z^BA#u9-|8Gp8u<|EOHXf|H=C=%8+6{B+T9p5DKkf!%dT|=cz{;NUuD?uYZ%@=Ye5v zR#np(z!_}8O4ula5;k6~f@i+@ z=$&-cSHfl7^HCb;Ud8oi71Wz)O?hOSYg4D^fxDX#T4S;5Wo64X*VNOq_9$s8cc<+= zZ`%nEt{KHt20z}Z#gA93Oz@~-z3rVF?W)M3Rrm6HH!X6n9#u|KdHSM`%2B{ucuNDs zDbja0U5t(fJE{Fz@5>i1rVJklg$-{0lu(A!&-)6i@S$Jt#^75*!eZdtRQ8r8-g_r6 zi2A?ko`$Z6&C7+8Ub)aQc(<_2xZ2m?)63zBp+{e&Rh)vS8uhJ#YKro1$gnX6=nuUZ z-kAjb8qfe24G15%^9#Z$0bR<>t8ET^9cVW7Y>>SSvQ@Sr-IQ6UN3(&qdr6fG*5RMp zFd%y!TvPwxOH-tiVWk-SZ)r2-^l1k0{`Neqg>QOT#`$ht`lx)`rSDh%+p-iLP=jX$ zlw|O30l>9B#N&x7gOIW_{0zb+?guEdNHsLB3}t#d?dd_e3B7?M{_=g=1+MWbpA5Z+ z3$Sb(gID`keW_r5uRa2qJQ*6UZ&R+E>KS+^*G$O0nm)3@foJ86@m4!Zj~CYEWBgAJ zq1%+V;2p%263zVf&A+~OdE?#6qKwHqb!}kjTNO`Qb*gbdEz>NV_$$p6v&tJ9^=b;< z{GJj@IQ2uJab2z*`{<=$MYmBZ$fNV)Li zefRMc+|Q@UvFe-jLoa3aGsXB;p3>Ko=XT%@mR0o1yDA33CVd}gVm15=d*sxnKY6T= z8bc)~dnWA^)xGXJH~=GqTnb@|(Es{hKHK^AzxwIUFF$#2=lp>)J10^!{^qm4%c%K! z=TejL3mLe7b@ANJ|NGl7cV3MVkRh9YxTY7sGo_PZ!HWIv!+C(1bI0=XJ2^2r8Hp$=fAtri%tY^19yAQ;Pcqa{R(K75@y)gJzNo(A zqeL-+?{9KIsbDNRTzbZWy&Law6J>{^oy?fb5aQr5Av$dPgCkzcC-R6pSozm8h}_I8 z^W2$JJI9;2k9Ti8z{}G;#S#@5z>e2W3ek~bB~o&(rbyks_h{!@7+9aM@>@h^kW@f7oY8$$u3 z8qqTcP!icH#2X*N$jK$C1v2B+3dHT|o^ni!g_4(rPQfO_B9)m>i@Jf0+#Wb!<{*d&HWI35`f@X}g9%1wG;Fu4&ck}O@A_CN#_2s(Mr6^$T*w&@;O+D&y#h4qD8+m|>T}LRcMi z0_>4+!~+zs5{2k}r-|821i~f$GMVEw3%|P$8q;``Kg>{deCJVuJtRLW` z8uDs)f6I3$Jo@T|jhAKt&& zWU(#wP5K%5#vo8R{1%Rnx9_X!X!2PbkL2y^a3^6A7&hKyv?BLxpSQ@M4>0C2bXZ*C zHB27bO<=-p+wbttn-=%N6JwK|CAQID5ku%Zg zy}YIwFD_rss2cBRBTD|pvhl19uXZ|FWclGn zUSyR=E}jWrLU!P*C-Dp(G!Bqqd#l@lpC?*iVp#m<+i%CS_h|AJ|1v49+dKF`QUPaB$=(9rdpV@$E1E^`|>`!~gB;%i|6`@r4B`^*W>v3>H8c z^z`pz$wV+)Y-EsRusqD*o9vnK@}0dyryp*1C|&(hKlI+h${1La1!GvQF5dgf(5}%% zO7kH3d8tK&(;39bd54HPN5xonsD;#Tzxirn?Viu$o~QDG1C4#JHhr!3>lb8b01dsT zV?@<4+^2An_p{?Hxc233E%bo!Mh;TEma)k02fM;O#%6h+D3fn`Qt2+9JhnXX#CC4*%dyKxE^1v9k zj6&p(#kb^5eQEzY84Z2I*OkE|*TN)2G}@EWYm8rx4eSPiF*My(U$Fqo*u%gh{ND56 zeV$yo5|1#Pla1#J(ZdKxCdFdu5oBz}y80j|jFCZ=Nk;O3e_p%EnUN>;jc1LOBeyJ8O}q5r;HEz^ z>T?1(oZj}JGf5l(8-q<)jyXg$KIeVRLC3WW9WqD|e;4xOdXpEi(GJ17%vv@OSxWuI6Tvow%ZWo&B;q7GPi_T8?X;q%= z9q)7wH+IDxtWTbp1p)0H2bJUzJ~QU(j~6e#AI=V>R}~5*J&`VCOdz8{d@Fqmt{H;K zgTX<%1e`t2koxj=Ac7B-Lfx%Uoyreiw9-I$G;^dY-L&b{9m z54BA=zx~PgTgmC0SHFu7TNH00^5TV$lXok`leTa)c=)89eaY`xAgDO}!dsiccI;NQ z&-@^Sw7_Zc1TUHAwn$=^S8`C>gphHFtvSW_^@-D`i_u+QvzzK+az;os<$1Teb9*d6 z(}%nxi|CB#h;PC8;l+CaZJcEjstjT__93O zs6EPKB*#qTC>=k^pLi-!ZJ%q5X|c0HBmexHhthb4fW+by^7eg&kE|-q%4itqzK%r# z>m2i`dlv&5BbSm~1cch+@5#T40l=z%iGv=&s($I*%RkA~79uL1fntiEZ_;=d9Ocs{ za85l5FosQqjS)*}Q?YlYol3gxQJV2E@4Ynht~O2C%BZaIa8-e;Kg8P@vq#V^1yRKm z?{!ty01BXfypABVIH8zLzWKI!o(e0u#!nGLxAG1|(yY6IT{YACsiOzas__Bt{Dp({ zvX7fGx25!XgUHD{b#LKR7bHqEe+R(HIrUUNmPx`|uw=a_)F2UnZzohHm7}0v=!@@(~G#g({2`cGMzLe21G;5a^e&BU* zI}mJhG|?;{g|WZW=F&{% zRZqDa90qcHRGEu+yB`IlFX&jEBMk<&9#a?>4Do>P1GjYAjQ0Nfwji2t9%Kc4r~~J~ zS^3~>@HAQBWVL6@Khs`#o3cxtkY6mJ%?jy5%IefKzp7zmRNscm+O^5=PI{?*?wW7D zgK3*gzG?Og9xjqV4}a?ibb;H|u~ zqhgb;Kih>O_?$trwhhguTifVtDd$}^gRXxY*!sKb@TFFPTZ(9=KgsFpcJ9|= z+&mC7Qub?;XVOc(@+jBm#Xsl_o+)k9Mp?C+{FG~hQ)zcK<&~>5ab~c3!t=TJqlDM@#v8oa#`~ye z3W3A+OpvjtLqEqGu)4)r3cl_tClyAB1T zwD4XK{(+a#y^Nd;CY5b>Cg@W&Hgl!->N_brS{xan`Re;H1>f_D4!^yfzC+@eF(nU4v6! zwCDiu)7BKX7j1(zNgczDLZcjyd@qJ;_Euz4_2&fyX+&}FtoHxbmP7v81M zC=)N3c#4H;vURb5vTt1ujEt9khC z#e`94%(QqQwk<{7BoGp`jbYdUVhkA;Qhq3Wfc|i#3Co%cSxCyz8{W1DD2>N^ylJel zB|S)MV;-J8>u(l5@Ee0fY>?6G*tE+*lSBH_ks;`5(u2QFo-h$zp#Yvm7u#9KBfNS{ zRCkTh&(`f0H8M^zZkW*EA&W=q9&fFUAv)m7pOn8p46Mld;+jN0$g3E>OceDS9&N9})6EQsXo705TbO(DG^2c;juV!L;X2+Q z4_5L39;%Nq22K$7T}I#Vk5~1h2}w{IlR`$SX)_$xhR5NPu@W zeUJgYiEH`vJ+w9nCTI8U7GL*ZvgU^g6=h)({>ACkhwP9r#y-dcn`-eOoZ>;oUIsV1 z)BTJ+77Mp@W~>=`S?vr0j1a~I3lJ`_>9?V2ylWYaOzNhBaEuOMT9ZQ(xR;a57!l4| z=)pkh;}I!5hxoeq4xUaQUAsa|Xu-O#!xA0tD^$(#`n;WRLzV&)F7C#ovxS#(={+ z_H2uUCdH2$=XhbF72^(E;@jzG9f0<-=SRs;V}W|`@}nl|Ivy~|WwHH#xOFR>7K1nW z^ZPHqOg_ZdwV&sw-9ZNq3TKi>bMLoK0%X@8zx@3yG@`YGq@O%FSh>mZ3?5JFOFV9G zB|~O+LKU7mdnSCGD~v@l69Bj^8GG*Bg<$!o86(kD2$8dG$7ekG!yi7IkOUufC>@+I z4BWkSV-8JZsN`LlPy38BxJI$m9`qKPL)f%Ae6(*9TB2`op}+pvIAmPa7s;L7`^W*t zqxh=+qI`2^upM}4H#mq7cI_%odS$$b&-E)jI`)-h(%tYn@+exqsDIEg@7&Id-1ry_JVnuz zOqqr2^crK~EY4R4VlX7xDP>V0^i4myksUKeG^4`z(AL!^yJkPOYVfR&bYb zVfULABOY#|-0<6kp6Pz+-|?XFJf2&+2g8{#dMy~;aSjmJjgtrJGdIg0USk&c6o!pQ z@&0(r*rIOpk0*J-TI3V1%7Uuh0lQjsJ)FEabf{2X<)zOWSJpfyeJ#G6a4_j2vwNfR z@Fcy+UnnO-gk=$GWrZzEZ>%ix)b%1hBF7G(N;7F!UO8NdG6JCcdDk-~WPOvj%i!UrU-Ngw zru046;3?mex+R<61W}_*L~E$=a@C{0-t;he*Mk@3nmQ-|eH*xY(fuG;!NA(U;a17Y zRo@t|JxCwo2v%v8<(;y}U>0#JaNfb477ng@?Rl=g`!|Jq(d`ztsMy0YRCqb>H)*Db z>Ychg^fUMH5WrUJRk1C5310P-Fg%X2)IFo5_HWt?7uqoJ6SRF_IKmL%)86@08CYl) zIeDhG()ChzgtGu(mIR*H&fe*=nJ@3KN z&DQwnwU5h4I2k74+vh1}3zfu_rv41xn>3Yo`q_qF!(VtF?8N{b@P>X1oH+!iZ-aY4 zrKk^Z<+rFbhHZdH%V}Z-EJKR~0}LS61)rsT+iuxBX>ix-gXQTznytf47oEX3)zqGe z)jg$XL@-QS)V1myDo%dN=YnlAt7I;~kg>&|YBp+k5Y?rzXQ(`|~k{6&62BpOT!Zo3=V)!(5bT-R3b8U6>(ikiPo z9^}`Ly#PcAqTINqRBdPxW8}@eUCCAfYL%}v<*BAAV)ggULdhyPgw&?cknfrJlZqln*vulAHA2XG{gJVwP+14G^!qbr|%;p z`Z(>Ar~WkXRltCyf_VX6MSJgNQ@@G|>+;8MziF%W)16;7@!Lht4q!{Uwe|de{=fgf zorjc;>c4X9!Oq`*acKr<#;4nP$7u#H>CQ*%eacOWZ*INSwTvv|ol#nz4ZNGvk4LUC z+-S?4j5L=nU+&Pt_fteN*rmYbf0a?8c&i8UXc2Ot%Y=Auura9e{-j*JOiAMf%Zti^ zp6+=)nw;@kn?uf0f_NnJlBQtt5InUxobYN!YnY^H@g6^z;`#b@9t`1zq0e9O_l{*? zI+5YzR+D9hAzn5n`4nuE0LB>;Uq(jCt2B?Q?_t|cO;o)j%P+jww)_nH4<0_v(0^*@ zyKC1Y5&lK@D4K$$US;AJll0j}*!wvnB0NyEczKSztbTor^2VTsAD05vB-Yr~H_9BI zRR;xgz9%nCjz75g!L;2rf8M6}g*UAUk5D8J^E78nk#=tinIT%ejPWxu?w(f;uZ}7h zrR~*gF*Q4EFvWjw9&lr1Dc{Ri&v))NnPEr(3lH)FImdez2S$<&{fs~GzP$VKluE_0 zz1JiPZSpP;pD%k)mf0@Np!(4VAJn$CpjR(r`%h9#uNLZr5-Ij=wW?<7rX2IY*A6eg zZ(IAhwqCz@nZc=iVpCrF=9{T!EqG)=h+lcBm~hGeZnQp{BEBz!6@&3?Z*4-XeQ#R8 z`PDB9Cm=3wNE?sZ@T=>gCo#sL%2Q?MMMnMFg*SO?@(jNv4*Y?&FvC-gVlVI03>}O+ z+eHaRuvxgS&UH{6UdUssJP+zeVyvDBzO$#!!~t){<5_44lWTmzSTsCbU)4X2f66|7 zDi5ge#!zvr7?8J`fHTnQLt^QgQ0k9wX4`4~>DZZ!C>>_mx2=Z`IC#Nmr%44G%_12( zRF6Klw>o)I;#)fa_PuOzsWS8>lMlRpFudb22e7|W*e5bX+}1G+*Dowg>zqX2XNkH{O=}@|+Gfiwul~%UH%3XW`+;Szv=deHzVqu;ZO)7H0~cu3aXA z51K&Qt#H3S4hFHG^{xFa?D2wpT=)shg&JQv^Kgd8qCcp(5yL5^AV`Z#yCbq`2jW5#8 zWVm*dZ{U_jJIFa6z`G7d+ul7LZ>)`HXWxtmJn-<=-r6A#nlO|&bl4&eS)lEK5{?-e zLq|BVc*8KnQ0XAz1N#~SstavxuV)l?U?x5UE5rKzHq*h z07*naRBt^7lcUGPz0D9_UlT$~JaF|re38K?y14Mk*>(!>3fJDB{_N-D`R0rZiyPOH zk-}Z@{=9PetKpw9wpGq7ew|!0U<}Z`VQ{L(e)!=h zW9)KZ?yC%||MCC%KZ?(NGnmQh3}WHoUWQ9NO7A_myRj&KV|e`V!%tgq%jgrW87cIm zBXogelRh!LQGc~_%6&v;^umrIW`&N5VprK|rjJ_u)Ux7}-BfA!7Kd_0De z`3}#!(cawAEh7Z z2ge)74rXxw@#WWz$H~m}1>^QdAAPcOs~sYYUoT&*@iX*}XC~atoHJf$oMF(^HyLM~ zX+{Q!_q@iTjHc0({N~k2LMq1rkaqX!uV_b`AzQkgvD{#$|JfY?@8o=038Ild?#aoA z^;I65uL}Zh*Ml*V3^(_EmX2w>w^PL-j28G9%tt@yoiX6}@R}>oU(OU}>C!iFn^8X=7LKZx?mW-? z6|7_^Z}{WIF4iXFhy^fBr3b)=@t$1#F?=wDkCPSH!$l1onS)36)^B-_AB_HZfsuSd z-v#W$c60EQMI&*I->q!p&2PIwD9||U0;m77E^yzW;p(n;St`~f}c<)LKe+pV)f6WPx6Qy;0SSa@5%ATzMJ>H z#mqw|;*;=978);&BSM4hp(h^SQ`nE@O4=My`wxmE{*%1OKWLYn7~09}7M$!%Invni z2vQR*T^;oe0k9q3>Pk3+`012A7@7?FLyf;?Co-3-5mv59bJzOks3~ zQ;Yt_e{`e!({Ju4`^iqblq`yje$eg*GJJHi>Ot!n)58rOU3G`?4CQuW=qtuBi<9J= zd;P%z^pO_T$i3A+nzz-Tz(=q6=ng}irQV}CbTDsjD`*YgVy3ATZ#Gy_FM7&!Y@ZBw6#4&W%p zapTZx(}0!6XTu9HDr8E6zy5jh8|<5kD?`HVQ}w+4RL(xhFOM`X_09wl9NrPwTlCID z&n^XxbS;neOvT-o)+a`$1x;yr>v{*b;nH0aNhxpf^%jOHAQ(ypukv72cct#XvV81+ zX{DQY(~5anzH*`227J7e<@#a&} z;*|!($*HW~&jJ!$XqWPO<+Zrk?^cg|8{V4qQ#jfx1pOB7%GtXzF0eqhzK5^LGR)X3 zd{?fy1_9kkDu0zv_R_3HmQA`1Ouoj|z3fN-1q8ksyho3Lsn;q1TX^wqUYCCO(S!Fs zO#4+{+IjDn+i1S|Mq!rTJ2Ie${;f3fOkJQFz7D3+l9$R}c}lHOLl18j9IOAWHfsku zjjWm~)B!%S2~6&{Fe|Kbrp>E{0jRf45BuU<_bJ1Vn$-WzfBwgvKNREnf6TMtFMjsv zl&cSXefjFn|MvHPn@7a)o%c=`U$ye?G^wzday~Drb9p{|{^i#xNNqE3B7N!ml=fiT zm66(n`$-cwP@YTK7|+t`6P^c;ftW`SV+UgsZw3Y{%H=m#zDrR^SvZ{mGUa=`kW-k% zQ#7f$k+Jl^{xeN1Gb|k7jhXQ#FHpUP!Hq$N;b0leo2aCmF$^%C+nOt0smTcC_r&oG zhQTQ|?cVBSP=jle_$RmXDuVaQI#LYK3&q3cZK@0k5-+}qS&q-pv{ng6!Z7553wSp< zV`%&7#}`xbp6z`0)s^w07czwRxryXVCO33{*+Gen;G+~(|1zGVHY51*yhU4+76!%f zu1YE9S-#!UYvSjCHy%c}^K?3xaz}}ytTE0XJk-IVm2slU64}KY&!lxGY0*J*cvwku z{AjUjBNzi5Wp($fjFh_`ugO?#XDFkTGp0}uXR#sgLt#e_rl9K!wunQ`$p2sQgkH&*57QMWVH91b_UdhQhq*C9en z(Qi9vc-#pd@m)>Gbz9!Lyv9rpAx~Hb-e$H6n;0<$Ob8J83LbpFr^S+)oYkKg$xWsy z@DEx@5SC-gj)`WHQa*9-89QxH2Du5*#3ZZ^Jfg4RWd=9#AfI*UA>%Lu09JgM0fr|I zZa`z51V7^(ex z!jtnV@bJX4#mjl}UudFvx9~5w+WM>g$7_oi#gVw;n}~!P{aBwE!%IA7{04`K`=k2c z?Tle-w;3-%21_1u)fe3Q#{HQ%wIw?P14EmBQichkfrckD8ZnIPD0pl<22n)cu6~o; zW#H96c*kWd8M%aZ#X`-Cl!2T9i1FVJ7JXwk`Ck45-7|cxP*HiVC%axeFfpq9+GH)`;xNM=$#FlIWufAZn(xE2s`YYL3MM2;9%E2)DmtodHknmr@lMkOW ze)d)YNSm-c(N?s*ZuPg^GB9=lGA$c`7GUg46gw*3emw zWh^u?6$A7@GQy$)<1gIR=;7JK)#S1J!MLN%15bR#P%;OfX0$tb?);MTeHUl^AAkG1 z#k=v5?IMh(+dcXBBw>yVBpeB9#SEDlBk zvWlD_%h4X~UPPA(u~5CCuKuzXUhskbZ}I3rvg75;!x`4v6;VI8O&*@!*(Ij*?wt!C z*wUW|b>sZ!zx%Ae8Fgy6Ixc;8CH%JlJ&QSwr5OziM!mBq4|H*g#~>5MkF;QjE@&Xe zD#myE;=v%4}V#oO^6Z%W>%=ik3L3rn~EIb0_{;K;#&;Ii2D;76W$;WgTyN#D4b z+!2@h^0mvuo6j>we0#aD2jvkD_QMt@^lv+WX56l>k&(ks?UK>|t0S)Lecsx6irRHU z*6{ZJ`Jesy&Y5;r(I8*-{D&{UNS-~c57d9?GU-RB&c2t|H}B@UrZXgX)RX?~R;X#?cn<-mULu zv>~I_^Sr<0{+{$P266^T;iFzfcXYCdzPp&qc!GhS9A#{0j5&Ss>?|7b@O>8S`1(c* zEsXh$y<`wiVFoiYKrHB8>I^nUQ}i<)ZS^W4*rIB*vM{<9)_9hZA#5rm>l&Az$3qre zjM1YZC(y~1ITSGX?XV#;N51soP_koJA%+saGgcVYzH3K}5IuGuFr1M+u=hezsg-?cW!~c~l?Q#o;yoA9||Ds=? zj_=kOzRud1n9S&y41W-wIYz7nfMhR4CAfrX5&Fiw<3Qt)Pz(+UCyO6Frjv%-WQCA5 zNxSBC41)Txxmt_kFABGp&LGT%eoTitVvLEFcAM~sMUOr0jvyPKc5ols8nLzqr!s(A z%$)`1aBP=`@yo*5qNRnQ#-+F_@{eAD4ig=n`WVCK$e+d!b1=FG zeSwkuLHM^|>EQFRNj2mcCm0ch!#JFA|3Et^AGN4pmk3-M+fEn0$pRt$OFj3)#VjV2 z7fxP|%SnNUe6XZPF`7QF-{IZkIRntO_Ss=t8T4^7)$T0i+`e-o7;n|scXrNR z_;B>88JiDhgl-<;I?-7T#-H8vc!((<4|4iLf}nWir{>4uo{YHY#TaX>o1K@@n^E;) z;{zVUOXAHR%3`;x*um-IwNIaPNHW^bB5nH7jm8~g_AJ0f zCp;%?-ouRf+JEry+L0NN^}^t4k+r@-9@4)KcgBKtF$B|n@VR!4k-c_2%SeB_KEgYE zg=K01tNYM6v{<%`H7;97S3Z5$7%*ooL=Q2?Z{NB$@-gHuowfD}6?UgZW3Y>>E+76! zYwgFkry3j0Iqruyp<}>q*QB(^8Y>-OE98|!GSPrudaT9n=fhhngB|jhWERQa5ZTEuRaxZ^$knW<51$T>RyE-fbec;oMtpkmtb~K*N zll?^c2H3~Zq%^|&v`(tCCN`5dczk%lyR{o704uIeS^BZTErWFl-d-^^!CM*vFx>YX zm0cL#JGb+@vgVs~n{Pf>`J{8-`-!6+0VbEa>ohI`e3i4F_UK#BRK5e z9XL!CR}L#TaxY^}<@b7(Rt*a28d%*1i$C+*H%jefD2Mj+KYXuW;O-s5$mgHVxk9q^ zw|mdBzrE(Zbc35!)A}aelmNXcmK5i1YU^q!I7*{D<;u^PDD9M~lTue7RK_?lN!kE&NY7Z^Qtt^7Vso)S$f0`G0c+?O$FuY z<)%GCH%vJiwZZ8uJj~lox%faLKgwJ=;#XaPIOx0A{wjLz`t2Rw@J51ZOsQ9bO`gfP z_-Njv6PU~9@5R{6fRwrSrjB1*uK`j{OIRWd!cNQBaMp|v3o;-Ee zcJiU}r<9pEFMO;L%UtX+WLiLk;b zUqgk;fJ+Ix2Egt;!s{gN_1o_Y?<-x2Rj2$JTsGO^5RNA=d`r|-nfVYc_~&^t&!?%c zy8*3-DN-M2tqh!+s#+W0rmbR?tpEKD0<;ACqzaF*!Qw-2N(krCbIEZ9!Rz`q{irlwU%kHb z+v~*{&N%i@U;lCEryqSh2h~xO{+GY|r=7pOd~fH!|F=KiId$ZRyds*ACa>SL$aN)y zIK%X_l+>qrYTfLI@otoq;HD%p*zk}zmNz6tnb8Yav%NV(hsiVr!X$&|tS&vq=_Y0t zZy2heAI^o(IF@qly3|(VBN-e{7nk)}@n0!4huadH-_$aqG;#d!)P*sw3X#J2Yg-|O zg5u9;@tUVq_;9f0y$oc+9K3k7jG4AqzD!~DP6!st4F2HdE1U--rdY(h@!-)U3u$@k z9m?~N;s@6h{?mCdJwK&k^=G_e78JAo$0mPTVJVw%F#naNlr!Ar2qokj%-8`;8|NZsBpRJROm?nrM z=;%DP{Z3vpWWeLRMHr%Z1n!LwV(oZ8rB|OQkI6k_HNzvCjwe=fpI7CByuk2^ZL|09 zKgtl+A*<1bH@?I4CQk6{!V0v|G8Z0*e|S2>U5~Rh*yPFt)eGTTgo5C0WPCR%o&MHV z;6qK~(D`wbYq4*&9j#8Zby$4ntJkiE2S$n&8`4DKFMjo#oezHUNp)IqYVzKMO8;hv zxtjs`THAyfN%0!5B?bXujuy}6)Kos>C8G$hA9BkC*F049CjYw*M)!mm*`b^<<|W5h*EhU^b6T%1V?o+CT79Y5}`&b=AtOk{~p2K$KEmSKxb zP1YETXJY-TNz?7&@z)H;!NEYt`%;;VRrw2+uf@H*d+E`qnp6u5Q+10x@r8adAzI3V z7a3qr?8{hLJ#!#t+uI$Ag^uWDqHn@vJZAXP9`aJ(0)sw-cj1KmoQWkKj?wVf7>4>= zhA~<($}w2l;ouM~x-vWtUI41he{2;@@Lbhq|Rv#K!0cZ8o6`q4f zWT`fI8QJemO6{QxiDWO{VzdJ%n&Qjn$zF?#`hp2MJ~GioD{awd4z{RCMjneNC*(x% zj%R;(VpJy6LtJ=||H~s}$~LyJg&^N}N^gzU7Ej>8BI~ncds24Nqb)w-MGi*?D<6*= zYlH%VgXekXojOS#IgHccfj4)4|M}-L;UBNI;B)}*=xmYx%GE2uouMUK*+rsX^ML#0 z;)QAzDu5T`<2ic)9?_0`*j0Re^x@qvjx(A&n?rngG{Y+<_BU?d*!lG1pTuA7q(ZCU z)Nk>bogR!@w(hTmf)>6`ogE{G#h=56Gb)9%J-h9&$&k~oo1%yep1=M2tDWEd<3B_* zp>bB22Hw&<)Gc;upT&)f7e45$&a1P4b}%o_2ZcGZFlQG4e4(fK!0KV#zZt&J-?%G; z0Yi^PabAQ%s|Z!}H2gd+&MrO}zKK7KN#iM7yM?N;SamZwBX%s@yvlf?%vbIHsoKFE z955JJkR0#gj3Q*qzCDeX8RW#C#oOfggf0r!MgJpnu%1O{vf*|{+24Qh`50~PW(c+T zyZ_*l=lhEzeEz~m)f*k_!@^d)O8#H^@(&{;>`MCJ!u!dGyBXathMR+<2fZfmst=E! z3hBnuARm1|c)9D>F3--7Qzy^N7{e&a@T`y4th~%ya0oBvKNcpArdwS9<5wLV`t^)E zS6*jai+?Ppk||^_9uS&@0|42g&oPpd!T6I*V-!*$8U0Sqb1xZs!h=Js@u{&$I!MNY z?`EjkogVY@jgUYsyf$X2otCp39?ITyBdn6igvV?j-m3{QdowoVuN;*-jK9{ugi+mnIy zN%SYKJQMOGItzbe@h3EoG2f1sstCpm=iy>_af1W>Y{u63=0UQ7p&ZYS zAv!$sRDJfOop|MY+>V|nkJHO*AGv98JJ3O$cusxZ^Hg_0C8IOE#~FhY&u|LwMl6QE z(T{qbor=SqdbEY(jH&fuv}Byb3-sp+Wm!YEp^2Q|GW--x>8INLIv%z->VR|$3vUV; zc zgvVw1GIIW4&Nbu<96QL^ zcz)u{c-6u>iwpemG|ee9aDWy+yO@H}BN1lL_G%Ud>r7 zXdc>oq}>q?My=>@`%b&Z)ghmsd*R~B|86niO^KDLomJP!;e4S z`Qz_D8~IK@T=Ney01p=Wv7N+s3ejQ_2M)>M20ggc{;41S>Ns!Pq#SuPxt!yTs@$op8?*Vr z^`ZyDPP_WFEaQEyZo+iRC@q>Q%eB0-SKg7=n{?W>(v%O#n>u=uYf0Qmuqn%T|Gn$; zqLZ|!r~F>_aquS1Bn*%J-L_jkKayB^>(73fdtrkoNfK00kZuWRJw!HdH( z!)Y4wr?RFBFW?&-41em-NqJ+3_kRJxTUeDmUSjCDp`BtVR$zN4&*SNn-uc(V&=5rG z380~eO!`6ZSIX`ZhI8L5aL*vuo448})0CC;-1H+O_244o`Mk4& zv6*DA{N;%-f;aLOPdc^cw+Uplf%g+nXZ z@@UhSn0n_Md`Y#&mhx0!L*j-PfTP~Z zix&p2p0lD%u5KhB{JCG1u5yQ-_yE6Yi*(W>^Oio|gKg^9MlxKliAIB0-{hbFQ^R_c zNlI{|zdQ8F=#Zi3Z~o~IJ73+nzw^r`J`bz^R)!z385vmi7cY}X+u#4;ixeIT3D4!q z08@kWoqJ8TQV8(kGN6S0U^hvrk`&tAv(=H3k?uExJp%+UNqAupFj1l?h-2O6$0n8! zn|!8(oXi6*MTAmdtGtQjvBP;_oX+#^Ne42faN5c(X0&+9@0~fZbLmR=ZNa6$o@`<_ zM#;9MJDiar8Bi2Tp%@$#Y7*`pWA&k8lpjuM=e_qJ<@(j@j1<95>5_)w;dUOg^7HN! z0)mkOPcq&*;B!|B7vmL!8)GGfevDt?xrBooxOg0nM~)BADR0pbrx^cCMBlt%L^70F z(BxIt#Oy_(F7OGOBWu5S#IFs*#(b6{5rx~dA zd&UuE2rFZ;!ohTmPrRZ`xEOqRvc68@jh^qfAo5WMoQYM- zplxD|Cl58r;PLUG_p?9|;&(?QhI?L$+QYL;%wNVw+px#jsDHFmKpzJOc!aUxU2LL; zr);yOtecd=*LWEgCs)6Em}la_Cc&@5=S*N;@JP;!seZ+%>VQg%3~+rnqd(^eMs^3~ zF_<53yQ^*K6K*Ab=e?^>g87?E--K^@7|1h`##bvzm-M2dzKlvnOza17C#s2f( z{ihB=>hP>4`$D>yXj+sp=`i`_751Qo3B1e@Due_+-tuP>W5!Z#+`co3GfCakxI=z2 zSdGtoyhUo62=k&7-&uV76Vbx~wRrr8jAw_V2k-00#SAa`@bcDBWO)r~8Qhh-Cu4|v zbQcav`8?Vk#>xm_X9)un$4eM^$0@iwp;v5Gp&g&DCxBgEx6d zCYkK=VBQ{X2?n?wLz~v2Q3dn*KAsntg$IiPdi0Kj3T6>*Z~z-tCyd zo0ZXEWN&3ME<4Z?UIRaaU3`{t^wsO=(Kp5$lXV~+vCuBcPTFSHZs{LbHIgvOj_PMwM`3I!5g{`%Ly z-uW-R7oYfIh64T8f~zr0I2A@Go~Xy`XIFl>JaY1A20ySpO5R>=4Ah6UZ@V2GAE5EU z42g^pw{OH-#X9|Qe_n>+n>@ld_~?FK(6-x)hs($)CjXw&e)P%DcdmZ>Ws7xr0>+yT zB|K7pgKNf#SLw-nb{`%uKV|aJr4Pgv(|+FI3?vMuN8)EXnZ+iC12U1Jovdc`x_b4> zgswT(q9~7Ha@ArC-M~S?%DqZU#a3oUx$Ek>48g}BnM$zujCyV&$2 zht=UD{ltR)cGnq0`wus7#ZQIXc=TUu-;y=FsNcJAv3`=*V?4+^@kTO_*D^0~oo&Xv z^1Z4L{qt8}&!Wb;LU5ceOv;6f#P4QAWt?F+vgT${P1ptJtXK@+R}67|=0LEDiEarN zz2O&ajin5&N66P?(f7rHKhsXI8GHH$kN1kleYbY+{^*09?~+RnlWoXt=R^kw))&bA zCr|8jvIDMo-W$4)M`L|w88uh`S~{qOtqyZ7PV+Y{H2<5w{x>t9nH?+9fu7AEOOBG2 zV>}HT`ssV83b_z%EUfUTM2A_(pld|;;($#5mPbey2Zg?lh7&5cqU{2-3riWK5zkyk zB@0v*TzG&UuRZz?c{BPqMzNPG zfHu5yEsxU*NFTD0bi7auEvlzxooJW9i6bZKE96!ktUeW(@l=cU8B57T9{kGI2gxFa zEjohTCx?>(&uZ(dSLupmbnTf1PO^B$|F!Vx9hr^(WEVN;aC64ECmHtepW&06GaHMu;y z;q<5a_Uwch3D!E)b^z{erzqp4T`AMX;HUGroQ2g1s{8HU)bHNiU8t~l!&vpWIm@BSun=S+=2$#z0c%>(VvYHV zK86=~-CMvUFYw)w78hsNL2VOu&W=BG3vd|MXC6CrVGOoI0J}Bjmxg!rBL^_YFDFi% znYkVQKikeKhG6Y9XWE~>Zg;0$Dti-FZ<-q@(n82A$i7*8rCoD9GY-_pRj74C5ABuiv;fa{uw87AnINPix^JUHUrx>DzC=nK=x;df4K)vl1M3 zP0w?nvE8i?JLAfH!-55wWE?k-pvRKS2jhw7yU-||xW!Oy6}~3U9~kXUAqzZ%508?)tNTaH>eq01#+&spX1j`tbJo^+=&0iDD@?d)M&E@@EDQcu3g>72c(|nuv z2>=F*5uZ!J^mP!?uPI8I@_1ICO8&)FE+5LZdB07p9Pa^Co=I0@6LaP6M>d7JM$i#_ zl1|D^JreaCGrz5OvQ*nN)+9lBUA2D(p!G2I%tX6%o7#g#V-(O&`KH5FA0jR2H{biV z3|Z1`+NE4&>AX|r0D?SI=C=Ld8d^*(;12f*05Fi!%Q(9QW0mh~$>Ii8Y+#r=R^9M8 zAcJzsXJqJ=$pKW#6I}CymRr2`YPENhMmbb`Opj$o)f!`py{8}(1(ZRUAD6;B%=MY{5B45SR_+Q|s$TJ+c! zgl_o22R~>GnrShhHaPN85^d;PjJHzwUH7Y%tB8JY(cor&$_1VtC;#T%l*e6c5wUXn zs@!R>*Q?H`KS$vh38^b6NukRY7(NfNavW^me#!7gjl zlr!{FW~q?ot-*LIVLmIZuYDAHaWd^YsBLjPvlkWXsM^wN5s}-oU5X==tCV~$6-bez+yB2f{*4&^6dHHVoL@% zW^8mjG2}Vf1Uw`1lk*Np>tMj>VfdLS?X8dK&nCij#EA@~_fDVOe0Sk$lltPl=3x#* zKUvt68*S6Q1-!~o!DB|D$%gCk$f``9oWNk9kaoJ82mFPrH#UFyr(e$Zo41Qen}PHq zwmmMdw}Iy-ZU3+T_*YGe4(B1!!7_y#QA-mKvI*e2U2I$5O^~Ke8Gj7$+tG9-&$2)N z(;t;C&xCNSo3}EcD`P7P1D5ihF=;`&>m37XwnA5fXBkz?+kCZvn6t`z)`AuT-uCC1 z*W^sNf+dFvu+0z9Xo8HNj4JIkGSpuH?0}K+=1qnpU`q%V6W)UkH%%|-8}HV30F5=- zs`dd6fafs?D_e}reNC>)Z1X0th@1h}_vFQ`G&hM%M1AxJM0uZI+Z9Q8~Po60R%ZlM@ zqU*z8*N)l%h-qO9I6s;p{+nz{OP+YC*uD#xxDYsX{noW6CCA3&6p#%d2Mm%yGV!!F z5l-UEPd>}j^!?h*cJ;#cG`aizZ-2e{hkyKKUYYOaVVgHn6FL){2W>As89;g`Ab3sM z9OAYnezS-ZU*r?fJx|k-gEsBc7ur|MS8^X)Mo!2I@RC68;6Ncug`XnePrVf->Snov4~j+dgvwWds)S9uAWD4&jB!u^<_3vV&B#{U&> z8^3tVrLXd+1x)Tg!oyrx2YN*mzB=`fU+Sn} zAfOLDsPE2s5CJkg<(0!bj%*DWn=Vx+pq+Oui1gYlQrH0^>=AjzZy)tC4gq1_q=SHg zLMP#u!;NpXd*`V-mM6U;FWznXGX1-37IGaBR~ka$u;nj5{df-HdimyT+-dgL!hlcjn;eXr6uSPzROqQU%}`#?Q0`9N#TC3DxrHM|ltP+^Y}Kw?O)z z{pqK(LxYF-;rMVQPsmHrVeEWA`Ld98ugU&<0Vj_FZA_|Py>gIjAWvnIv9G`Trm^$R z=KNotn*}^u(}fcdeu7N;u|Q!0PA9S}Ytrq+L(7SoYn;i%VbCmSSC_ z^F;S~4wH$?7rq%DTdXu!d+_?+^yh~GK+3q+m}ke?;q;}vFJ1huR$mLB#xY>s?R4UR zHT4OHb1z-agSPVml8cuu7&>r{hqpy8{ffQ>xRX1JqRxu2o65qBGZ=vGX9IZfT#R!I zeElE2{452r#<-yqCP8x#oIT5R}KhIcLG#;P@T@@DSxYHb~zNv=k|^90(DU1gZSuO<=!Y`+#!Q(;_`NJrZCyp*7+mJ$FCP{fCXEKvQwv@3sKVqu)3py-*>0vX|oA zEI?ZbINNSLfTjhJ8Z3aX#k}O-jKuhCyrB19H@=VOf4sF&%rp9V;l3uGaI!r%e9wT7 zcIZ6K8+!jf_CQ8$Hlr|G~*IX5orjY!7rI+kh(G*xm&uKeLmu ze|8^Q1hxQ8f9exU{wziuNFHw&@ApJwBJXVB-dYT}b!!%fjgfSqMSlx%PdYc|(@(z~ zd&n64B7pBe{hofMr!Aba&+LTwzA;t%unC^Dz-qjkkPF#DLQ&`!19a6_y>BG)*EmK0 z0hHgi!)C^-r97j%1>x#oyeP%U5niCtcwiAuJ;W$i-p-h@Gw-nY1q>zEub%?m9i-Y= zQab|n$b_+>#?J<6@v3?Q6k}>T*s`nZgONG55nZ1`p&sj_Dd}t} zMs`kZ=p+AVT?^KBsjSd$+GQ4l?#H)uyTa%h{o1%cb*+8aZWi5*i}EV-^!JU;m3Fz3 zWeaL(J65-gWBy@| zBIF)#YrA5WZDx0pP)p>eIQjL_r;Q6j^8iUtv_ruH4f!EQv39_2i;#5Y>@qAb&I-!K zV+$bcW#<#{?zj8aA)CDN*|Miw3}iddjeuk_fhYK9q5u2u^DK@xb`0o@#_W-)+LoM> zqmlLM!EU9O@zIz=Cq1}-Yjf@LrPykrpz%2;CgIoA!9Z@wgdFIf7ES0^eTBV8ek`I` z(0sQA1a{zeS1wN+_Ma5K>RQeKH?uVzL`-JwrbXm~@&0N0M4s%r`wy3_@eVg`;PuR_;;pgj%GHYl_Olz!LG)v60`lV%9Zs(v zYv%!))X{u&bab@n3;y-6Iwq~nh1A!$sh`@p>N^`=eJv(fgwuB3vD3o>-FT!YtN7HP z=6Wf_$rm;AnPR51&aEmWLz&E9zj{aIy721u&2HVcePg6XSTRcrT;OHm0qH9_Hax0T+c7}eki6q%3GQ-GSJ*J9egMphEJOA zmeqEVhzJZ7%9uh*;~tv)!C%Hqne|=U&1G#dd{l4gcKQ>7BuzQj`{zCFE6INey#KKX zJ!clIw)LC5HVX_`*X7r<9XTB^JOFs{P9@75ZPP|mQsu2$uR3+FdZ^RLqsuLLzX1Dq zLB8anee|0Ya)}!3;sfP8@1Qe=p{1wpDNVZRYo5&=4=R7;OUl&-QqJS8aZ<+7%~0t+ z8Nl0Zdn}GEnOb$}ahmXlw&?2)9gPfSvC5qOG?)EaOqq7lw|98bgRYlBJAj7r)1UG~ z%;*8H%X5A(#*KblPCesU|K=4mj|_X*wRg6%B+cTDtNzKR6p{^pwy(Hn@R(=#Pv$3; zv`u~@?~i>j`sJ^F(-!K}zqwv{YwMjWxsI$3O?``xE{9k6pwy{bc~m#weVyi8Pgh=3 zT+h7goqqRXKYfNC#_TYoyIXpyFxn)OnmkY6n{S?#zQU9{V?yaBM~v#}=+{NaLLSjhe7Gs4c zhZ!-z^mlx-4H3vO@xAqj#E_iY4iA`84uIuFU=QuZLuK{3`UPE5IY8L`?<`L*M(Ccl zwLgBGCrlG{#&8UYwmknbqD>+P*p2x8#asRI^N)w_Uw`#o$=+AhsP;P1VN56T@HrC5CG3sZ#(Ua!dG6f#q0z$2Y)8)UMKgn(t_98u zU$DbRUMhAk;ETl>9yLGv*sk|bo}s!cLwQZ`<5{$$?_Gda zppSO3(_yy2Cc7p#JfLki9^<$M@=v=BzbngDR}+H+fouB->rNfe%1m4aY{L<0q@8PIhjdg?+327y zNqM}LXHr!icuteo$eO-bvS@DdodA)uo%R}FsCwyZ(i;;@`td@@hN)-m%_g9eOh_F( z%N_vanY5db*$J_`bowe_+^z=)x;mKAcqH#04oE{5E&SkBJ!ABmi5!5^L|+>ScK{ec z4-c!(ww<#O`qRDYM!$4G79IlO>Cy>Nlw6qTPG5<(gZ1~BFh?(We-&tjHvov;g;S!F z=@kIKg*KB+ZLJ>k5^qu<5Ks5LEkxc+Cs>4||I|a+o?$XZm5&eb0BA9NzlSF8`iX@D zhZtYaJC&FI?sTL4d1}4sFR)yj&O%@pCZ%?>kn{ublGih!?5lLd_f5QyG$B_vx|Tc$ zF>xyZY`j%_$5^B<@Jt<`pnI3E-OOw7;Q+M?<8fOt)#df(FaG>zvm@Xie)+3eXch9p zf|9UWXWtJr4G06ys;2hkefRS}`_s|y4g%FT0fu%HSsXi%Z7p^xQ1^f_uX3;5SUX^p z{e9s)oE33Cu$TTB`NM!dFVeI753aEU5U4KN1W=-1ScJomSp-f#9Ikx|7+(4ECU<0v z-Z@h!AcxP3dA5cio0)YT7CjLTYko$KIi@hKC89$9h z#$2BCV%R!V@kx0t0BngJV4F=~=YTLC)2Hf>c%#loj|Hj+_Bl)9Frc?~0Ti=)P6pPT zFC@S>U;jSagHwfBMDFryG{PC!*oP=<9<()|89tZaF zLKc2b2$mD&NuH#&i)neWzdH+bddT7%6NJrvftAAd~#^Y+Vcd4mCBN>2q|Z>)GKG z@ASo;#Wjl+_%@CV=H>pew@PQhf%j_}j2x&pu=SnvBQID6$)m!>c-I)H^Uz0oI@}Ut zf$_?br#4%_cs6`vE}H0<32)LE!EU2hc`fUc#st8pvgvC(gy^7g!fE&b^0Jl0%OCqA zy)J)xzgMcO-3!3xJjUzSz`|4YJ)v>%_W8pAS#8+$t7L;9ExU+lG&ZGY{IFlyCU$c? zDa6l{$J);UwRRdWvmC5iO(V_xxe;-T_4e&5eYh8(eV z_HOO({mlXBW_e7Qnsn;H78+-VOT3k*XHRR>G76?h@9auv_!m#dxg;L%G3F$L;&%hr z?VQ5v1IYk;+v31#^S9~ecHP8V?ftN^$U-eWBLtmAR(<bv$f{p?C>~Z>!uGBvs#RvU@jL`AUw!r4 z%^!aE>zqd#H_PXp>SInCdoy52icby-Ep8slb^+2WdmMvm_gN%O9`uvF$qL>*eP(RW zzOTI<^cn+(=j6tsnB70@L^hGO7=4mr*H(7j$=h?TtPx@wxt%Z#Rn)-FyMR=|hJI_)vnV`YyuwF-!^)dK4)qA8XEb+(2I zD&|Mta+9HS^D5!()+AAuI`yUUCG(l*e#i8oRqvQIE3XBLY~PueCgYL-_SC&+({Wec z_%Sq>2f-PFrk=eQXtK6&OS9^-ChqvOQ=Zqy+>d?bpE0B|Q#W~z;w<0cm9%(XG4W&7 z(DfFClctXv^`gYHR zir#tD-Sun+i+Hq?bYy56%X`U<`t?`+W;JW_@vEP}jV$CzH!?T%th0Dk_AZ83_~a|v z{UHa7Khi8ZSNeGkzo+?^Yc>D?KmbWZK~%y$O?H$`yoMi&R>z^EV$c;ohVS^YtrwYF z;ID$$?UB>@DqR`)6G8KuG{XdWx?b-sKyGWwyEk&8@O~zJZA-p;i80yD9+6#1o7AR^ z0HRC6JtwD%oI(L6$XiMKDuAbU2LROP(X_0m+7VVXT?`W0BfW%vR=Fb!-2WfDp{G~7d z0cyRkgm|hBtB+5KlLntAPjp1Y(y8nD-0!U+W$Ll=ntUpEC+!d?U3slK4;3R1b9+0O z%ZFr)kf#8ql&Nd4#v3w@(^DlGZ9BuNcMqmc-IvIp(yZ$D1wB(&^wq^N29^n0@k0vqtUn{8!x!cBZ0PNAJK;ggc;iU~2;&j)@rUO}&6qe)$tImVA@}9M1dL>enNZu3tS-0n za0iwjEcPg`Ax4@|CJxT}?2`|w50HO(oIYqW3;5Z$w~#M4Z;v54laBbq2pSoytv>ze zTy5~Wx;`$(>9^7Jes-_JxbmdRpnbKwNmJZo?3nEC&7+gSC=M>bZ9gD8BTIXXZjVb0 zJ0IgPL)_ttd*2CUjBnaTT*M~<5yR_vpf0F+fO)~X$ZT~Wm+5v%N z37B`dt>Y()NqjvJ2#Ba{$;Z2Sfy)D^O<##o3rx_L^_}|xg5tRwe-JBHqOd(c_9s14 zzt>F+js+e(Egq}w=K71pfp-dpVVgJ`isu^DMdpD23jjCSbx>L0H_x8Y8FkgtkevxR zQaw#-Ez|&J#c2#znn@Di-b9!OsrEM^Hc7x6a;EMt(h=+&I%k)0o$AKdeq$lp#@V80 zKytL`gC<`Z#&%x|Unlbn=9%edwRe-{0ArI&<*!4q0=b)%+6K!Ddd?iE9t*&Y2k+1+ z$z$!s-oQ(;ujo$7B)YQJujhV|kYUI7^9?y80 z=x;v%=z}KTSCh#FAOb~B6#fKg%!%h{df;YZD9G5M11rn|n$QE_+TSiCVGk^v0Z#1N z&=2(8-2s3Oi#%03^W1&^^y$rm51*u;9k|K6zOZ8Tn=d|mulTdcM(-SJw}@~(AGIL& zP2Sj-E?pTAjlN_4P{@$=3DdfaZSwvpGy!sd*;)Lwv(!vRz81^`F& z$Oiq#J6hO^Cr|GbRw8e`#(3U!Po4q=<-0;0Sfs&Ep(>8IAn9;O&&bZP=(a#2v=(n; zp+k2JYFo^oe&cG40R35KeK)E)0 zkBsS8c5pdlI%X|BSN#E4z!o0wFRJh18KcFLrOU`{Tk$(n!NNL^VRn?oYU8JVC(d_T zW;_}JLl?gNy8dx!Kyh-79}a^fyKj?#<-x+2(qOjXzIMKcXMY8{jsJi1oT-fw}tw(5_s*9MF?~ zjV_BEbQhfyoN=Bt?!f}^cgCT3Y-~1mXghTPOgZ2>atHV< z@qmE00-w)hv$3yOjiamTKfv!@g_kifup{Irmh#dK9Yk4P?gN4*Y+HW%z#$9rckN;r zJ%*mb%4G|XZ;RZ<0y2*F_iHcVte!Vk94QUY;U`b;kH>m144_8u8LR1aeVx1?Nls^s ztqkFOECTTUq_^47KrvpN$B&+zFh$QB`yMBURj3-q|7;ujZ|U0fZTeB1bK|))GxUG? z<0YMKch}l^#u!+-oK6<+-pAY%T>xeCHMRst6k);c7ed2#K^Ng>i@A4hB(rM~ z!mdM$VdP|g{c1c^b5gJ~lg<~CV{h%@P{0{Cx@*x?zQ8&1B#a1o1f23_Wo=B`rRT|x zg%^vMbVN7enX_ZWU2g$9eQCV2qvL)$_I8UH4mP&nrj27n^b6;;7GHt8smhVDce4MD zt#4|_=YgN>a&gKZb^t3~hc6#|{Ke+~@Gt-EI2w>-{aBy&VGn3uxCEiUB z*yTs}8&93{aIc-CPqLYg)sHQPkq5cDLtzVjhPNT-nARkNw}&k5ScB* zez5rR=Iz?eN(bW0sdSogNdLqGGJytjLf+(dYS8!R+QmY@vVpZZn@ZZTIg>@4f71A1 z+_|UUZrfWwK;LQ$i%@U`eb`RE*XinFsy8lM)H-nL)aL5r?}w-QBU?+GSr9|h;q<9R zGO^=lA%)x~8*Dmb7`x?0p-=2K{O}Wtj!&|!-`jlt#g8|C{a63%v?<2icyg^nj|=aT z{9V4}AmVgHyqfbTqUqBQKbu3PVFng5Mn-wT$3r&Jqkwj2R?)2b>#| z=x~dYc4o32n^Mj0n8ST%XM4{$uoheFhRE*g9=_W7WBxbiq)Vg@<)e{?56W&(TmbDVV|XJe-YOfvcgjJdeEN*=;i#jkE7 z)|39b>%P_+uyo$1HBzA|xpTlwv4J?Ir*){Wt13f0mle)+y_jM8-9V7{ee z$*45IuF6Hj@N7L4(nQkM@pFp=kKK9ghC_e_eZ=OSgLPqRz;Y<^KT_vI7KUPSF`k=0{2 zs*8M3gvUdWa-zpaR$U_r0;yF{HgxS=O_f%9gZ+@Nk4(9}R=rj|WVK{ZZWB&LGR&D2 zJ9KAkM$(kiGk3=Woeq=#If+%P0Yo~L|ScW<6Y z?_5-#{-eD0Vss=sQ&qp>MS1S%k5x;*`|RYoZHMWD!)FhDN{2oIkB;)+@hYGCc6(B- z!pg6^{a?KDy7#51-AA|geTV-3@;LpzNy9I$T-^N24!e9ENc)?>@2@Ui*&J*DCI{pb z$YT=nC=k^o;zpj<$C?nodfEi9FbAtLQ*`Nfc+vOX8E=&-^KB=UpK2Rxlcdvyt;kpa z&e%T93(3UamOU{GmZOv{g9M8Xx&Yjr&=(F<~5Vn=# z#`4O29BsTDc*rr>&twd(uqO;W2h<$PYb--Oe((x7R2;^Mot(j~z6^UYg?XX^KQCSV zx^1Qzqs5>k)wBYDM~v>X@14k&iox-pw=nBYDc&*?Mqwi)O%RE(t!ZFbzNApet1`k!>shG+@Q@{&(VC)@j zQiIn|7<>UcucP~Z6BUN47MVI!@S3av78vPGAfo$Gbr~;*8t z5XivtlFawrjT1c1cy)RY$ikD@WP!)u_3JlAhRC@#G^u+S0K^mJ!w)~4NvX_ElR<5h z%$b-9`EV-3mu}#-Y2rYx&z?EGxf9s-%{O0_*Xuk#uWv2}P<-5E1-QSjwk=sdg>h-Z z%S-a356_n_o@Q*Lb-a?>lDs>w5FV4~-akKiJqXm$KW0L!y!i9R0#JYl1KlFe=_VIE zARQD(KLD+IVJ0+y8WR`d4{Xyewv{u|dHa_-fN&kk2}nIs+}u-VKWMUexOn2<1faFILxm-i*0K|X{~-?!*t;sqF?TZKqCYl74LT>(ySE`FOl?b;mO+X6s5J^jIl zlkfe!)5*2%-OuZPCU=bcnRu5E09U`zCPG!*ZIbHR0c_%`kN0Pb3??_^Uwe@)Krd-u z`lekF<-xm6UIQ$TT3+7N+Q1}f7A6gWHJJ9&zXk*k;7}+1jNFVK?|UiZ_X3$LoEWsw z&kKmB84qZBi%0R{g8>1hU(b`>wm4}Y2VR-T=-(EuZ2#gDM(*P^RyQsSa=5Bu;}w#pR&mCvTI(PFDiqq#>sTql6VV(K#a9}CtZ8031K0x^ZxGJyw-(2oOdmpA?aK*$Op6HsOHw_}e2zV;W&;!t4eA>m8n zzrG@bgvB-B9Dw)YrQ|!_0WIi30EBw!quQL#*VX_pab)`vt?{dLqkE#`c7Qg(&!iUr z)%t8;ul(*M*S%4`EhHqTWYa{|83NM%S%9cI0r#$?UjgjKwgbiW7f1Z_Pd{lP^=1H{ zn6&ku_(TT8nSGpRrUl76g}%^^fB5Fxk?W*0I@>|gc<~{ z+Ch-t&Aa$R$J5uuz*hf51F!&VYLoiGq4etAdrPNVc(4FrQAT`kcId&j_>-Lp$5mTR z+mD<8Sh5rH+7{B_?(JLY%g58_ou5F*0}hO>c*ireE{%a9JPQ4xBw;`O}+r&Jo@k65oWGM#6T#ZDlbUm-_h)riEW%bKh@t_ z2yjM=1xKJ6c?Y6gyYk)W>w7JrT)Fsliz{a~zxvfbZoc^P)6HM}{9ok-{B-lb{%?P^ z`Nco}Ivc9P=qlrb4?ozvpH6;1UCR62xfXUE3RiWhMIZ|<2eM^&R`XbAKk3W*(eC6G z_`Jgvhpf}V}TdTiohpt+% z237;K=~nHgKUhql7wC%p*@r7skv`A%DI5h5Jl;8w@zn90GLjo(EijLsWJlk)b$xc7 z0FLk64~VZnYA1Yfjtfs>WzyR~`{@q>yzUu|=nk~1Gf&ffdrxGS-I$@km^KG>HXe&h zZE@asrd^I5sqM8_dPrZUE9vQ%=_7Lob(?Su=_up4g)KI#@~6D+(Op!_uD$(24Lw*L zwe0}6l{-LhGHwnoA9e;=;Khx+-|PRv;TJC*&WH=W)BI0#cTK7&IrJpX~W0& zHy6JC-7MahaO@toIWJxxAhN|Swi($ZBeRpV1uGus_X4x+Hk*am%F!Oi4Q=!3N1x94 zFtVN=e_kI1jMGEFU$&{fH{)ydWzXEZ`>L~yzMJ`-F@sIa3Gzxns6{rqPo3o>L=bu5 zd@(?K3#u0`;L92Vjh!E$p*FjI{nGH{?D273v~;uwL>@Cxptu2*+o^n{=UK_DxYn1yu|@_ zB727J5mxZ!?|!@a&2N7@p_Q~9JBxG(vE)77*I)G}=aD=MzvQKcow0}9(WPI0_Sxo( zFMgDMdw0gFpZ~?b+WhV}d0v0@`@&j%Kl)Ffz51YoO_QA?IXKusY2oiy{pxm}-o}4B z`Gg`QXF_Y~^LRz_#KmWGOxq+E!q3Quov7S7KsrA1?&eHzr?G|DzwRD+&JC%#X1k)Z zAso8ud@0ju73Cab;mShrquPiK?htBupJ*YyY?5gXLCIyZdc0i`>{M+~caBcw+6|NQ zP2+{}PQP*ZFu6Na|Cn6`7NuIy-_^p>o7e3mXx=4UjRgsfh|hMstlc$xntvUQh8E+R zg3}So-ou_ww?B;+6Dq9t)(j=%RR{86luZk!)ZH5(!z`lCO+tTo9%I~E04U?Q_xnE2 zC#~G(#wd|wpocYwTkBVU<-3xN^n0)J*R70g*W2%2Nds>GNKw)_ix()iWy7%|uDg}FCr%71dK(<}gI<;GSFV$&?kcL4F$_r zxs$BFJ)8WuIMJ+tSTj)oB3S_yCx8k3ygK z@yrWlON>06I%$XU2ejGNyJCe)07xQly)YShXD9z|;J;h_4E}@M`83ILv+ql!d#CT{ z(U?ETwzSgCXM2H%T~7KGUh1K*>LT+V;KMM?v+{Lsa~%dh!95^2MtsK}LFp1^mGwtNwR!-@FzD}ASp82|Dcw}P=lRuhD9PbBM zj@GF&{#D)ppfPXSM3(bjznV0rCF84S$}acqvuUnv@$Tcm@BkYs%Xj>lkU#!P(@QH2 zqY*z=TJO&nyh7{tGv3*Bj4G65STg*ry-K4W&;5Cj9Jwi7FU)<}4khwfyqkKDjFo26 zOSE2`K7vO>*fy^okAC!5y8e#-SdaUQ~I9t zck#Y-BbWH-O%guC&u>@NSx1$HG~EJtHR8^A!k~GaZ2acR z&5T0Bsx-@E`{C2Jm*qw1oafa6kY#i|YD@q7O#<)TyHVGG%65!y!`13z44mz;dmc6#}z2v1_9!;trJmA%tw`r3t9+M7X5kDE< z#ajhhJOU`kt7CaS{`g0q*45V*WnM`pJ^(nAC6jsp)Z3XPV(F@{;Q?3%V4Q>hGFBB} zl4o*F8s7k!nmA3f_noX{y3XQ+Gg5(%Vl`g5c5Ogfhs*JfkOufNTZvnY7!N49)Blw@ zl3w%LZhAZ*V|Vp_oMCTb#*_9*?Q9Dv`i;wLTr>&D=q!y%Im4RK7)&vfpIJaCPm`9v z|JCot;6pE2VLxb-nJ6~#poSQ={RvPy-b8M-k4dPHFf!!Yw^b{3aDHYe;~%GbLR*6=jlaU z?=>;fPw?KPoo>}uCbK5BLP!9SOc?j<;=Q%TZU%cDy>@DHUAxe6V(8AnKGk16#DA6k z`E%Y@5vybO0krZ;=TU1bErVTK^JKIu;Bb668enIe>El2H6K<0iU?sjgP}F4SZEg6X zIy3OQy?N1M0VVYCMV_balmH@IWI7mcg}eY_vJGtm2J)FC>(lfF(ZVx}96;L9Iq^ZC z7;w0}+-o{N-}Us-O{K9ot4``liq<8)MOMo=;&>uyFpEOkkw>HUVEZi4b$RmHhAs5Q zgnR(}^!oGYd^fOXV)?cJ$6H!G@P$4Anm}d08rkhy{AqQ-W$|x;@y=&3R9WyKM`!_t z@JySOJwzK_XmYPk`UWpz^35|!sFDvloDXm$l-9u}_=nqazJDK&ZFR2yH3_4;@#^K9 z7R-_*28O!gtq=ji@1O(d1=!I)@Y_3hrcKa)r$tM^uZjGr_dgh3J$m@n=q(F4Bdh6o z_4Hvw--%8jrMi&g1>QzN{I2+E<0FAn@-z;!J59FVM#uYSPDXFLD~>mb4%9pR!TZG( zzc9Rg)Z(W}taz~WkIABio)<6g&EIp;XOZt2PtSCVg#v)pfrELU)~}8pE%b&7EFdQ! z{&(MeH@vVwb@KG7&9!!30A;m{wwfIp=&GI$^Cj=hB)*?H^?ty9fJQvvmHi-K(PHVbJj{Xrr(3LikjJpY;N(SC z&IOc^mqXbEPc2BaSi38oE{qI%fY4V18c)W%V*x)FO@LMaV|rE}dt3}%_6J}H2z#l; z0Aq&TP(q;SC$G|LVy-`{-0OJ_@+_rOzi;fcFnjLYxj??(5Ab5qzE|6UbbfT2HgpC6 zIsdy~{bKW<|Nif1!Tm4);`7ZHpM4yFmP{4`<4E;Ghfw1-gJh_^v4A8 zc)UBf|8V123%2n=yBXsgR=GFXX6wBLCf9DuJF@X2z5#po)R(jc(9Oc_oPnj^rLVNh zj-5#k#Hwej2>U<=)x$xI>>>7(U5iKK<-UIC7?7y{`o)?Dt#DHg4g^>No{R}}VOo6J zVdkmzMQyCD$-Z`CH#XpIp5Ni^+xU)#ex79`@#+=B_zM z=_g~d-I!|}ZGmfSQ46TGZ$ukkocpr7aR7e?OX!~Q+B*{#uD^CyqQjxPbnMQ&k!~Y* za?-YQXl(h+;^bB)=_KIw0Pof3+SQAJpx5e;!nG8B<;XkjBs@1ZD*3+`k1Xu7ZzxT3 z2yLug@zkN3uL~Du0sU!pC;My@W1l$t#%mty^uR274P7CeP_vV-8; zZ@=o$&zmhe?Av_&!N;5LUtbx0aH}v`_j4-v>7W10&C&Qyr?uH~z*%#WbJ2}wSE|$5 z4?fyly?i0v)#6&Z>qLi~ORwFJpE@0%@{rGFCTkWD@X%R3`jjb}L!%uEkCzjYFWEV8 z^3;r-fN3`0$-dLAWNLS3dyq}G`sFK^W{yHH+odJs%E@%AK0APMvQF;-_OMW zZ@>O(^WF#NH-GwP|Dy90?v9tg&@<}8{{85qPY2vKepz&Mb_F`jtxmM~`F zrN46ciG-0^B&oNZMdnV=_B_juY+hMAv(@a@#V38A?PPw$wk5Cn^uxe-c8R`lEP$Pp zhRy-NMu%~c*S_#9cN;f%#mA$~L5`%qE&c)c&sIMN+dq9M6iPa(a$>>QPTCjm^smRW zbEbJv<;JiULJOaEc;B~JL6>$QLyDbax=Jrp{G0+gX4X`6*qJB3cVzAdG>ea~YJ zidSA;zgAvzZ(1d`EqbQ4c6dSZ`>4rqXMNrBUpuYb(6X+(SDH!Yo;-Kb$OZZ3InNV- zZFckSBy>kR4j-oIKmO_w@X61lm2ZU&VC;_j^X|?oecwqsRWEIi23(v6Jsgl`?&>2$ zpH>{6%MTq9J$0ILW$1Pjy7Cw~^;do|VR%rw)kFyNzrX16cc=Yj+fZuD;F2BRyE)0W z?WDsE|H~vkuBp}9Suj*&v`^QPb8@k(HnwmwZI8!w(s@4hD3P+=Ly-&^>>5PgrCE9Q zcQs_syTemKn(1fNa^$7^^F8{sZv*;H|D3)qebi3dsAqhU=lcH7Ah-S zx#Y`5d3)ZM9ogY6$y$#g-&9xhwuZz$6`o-JL$&5;} zcRPUik=WC{_9Aa;**p!Pco}%?z)l|4K->ZQ8iOqQ=o0`hBBqUI0+0c5Jg;eh*VMfG zyiMh2kOvC-$cI-eu>4>KG2@A0ZENxvUlDTRWZMD*D4suGfDVFE3uc+=TGy=G@scW32P!1!^)NfpFpkk8$1PAvX0{+gwfD#6SdYJ-F%cuqJu& z_=A9f2gwZ3g%`I87(;d@vXuvL8=$ki@yNshp#4l(fg1363lWbVX9)N1@%Zu6FFtF5 z1n#b*Vges|%UCRoK&PA1X+q<462 znN&Cg5g2E&0=wU77D$F1+-hPm+eXtj;)&}ACY8^&3qE!Q0O<}yt9X(3o1mFI>Ko$? zOb!CEj7orG{Zouf?PXD6Z*?*W0nqZc*WXM`PM?aO(S}ZX(*y%(3&4^$p6Ej+wRGsj z4^J;ihvz_tSqvNjo?4CE(y?@$+xij!l0GG8z#`t&FUe5}`ZIEa)+gyE;hBV|IMD=L z?0XY-VU2hLT71&KyaxbETPBN*xXC*C7TyUEb-FgVoA)9fpvnS-1rWPb^luaS(Wm91 zp3{q?V`sdPCw<9->QH^1N3;b5o~A}*?Rflz1&349KNYgTztYt|4>m#6kJ)c3hqu#K z@ymVS6hYiy-dDfBR$Jt$hL@EQ*QQ^M{)iVRPMq%D+BaF?wI%csPcC+euu1!q3-Sdt zvN&_2>*Gy&d6KJ-vQ1cL5utu#@=wM+;|*$(dbe@lK>g&S56;ekvg8HWc|3WuD~abM z4`*?J7vGmxxAjb=eA z<3?7Ki-yt>QK7JLv%ZPXA|6XnQt?c14qM z`ny0Z02CfP%zM^C+RuLWXMxrCHy1A!Z?*;Ln}HeTVr-?W*%}VV9zdmhwT(p!x{jxw z1r7k`^rv_{2OcNm`pE5g=%7(x*SWLjdM6KXx+?j56CaJTf~89F*#e3)}LA0WcnV?~{){-q=B~w?jYo1x!CH=4>t4PxLg# z0KD!6CcTUgcWYCiH19zD*)Exfd9UwlK}x;F+qdvIhq6xlEhX|Gy+_tD%CS8uiZlRPh{F7l@RFraRC zvFeMTUViK#3&jox6}ADOXdE49i~e#JviKz*1Ijo|=@|7zI|uN&Z?iyB*PK%DePWTP zgZ3F0o7c*h?m@Fu#v=TtNA+hQ9l(~{kX{XLb!9gn-=CxpZj=|Z zhx#stJJ0e5jdL7Y%q{Q`-`ONru3VYjR2HdT)z>W)o_X(l{BQtei^uf=2VUxz&pH6w z*ovR@t)DY32;IuVnocE07XRTK_(e;pgHSdT!bQLA8Cz%1p2A#a^Ip36O=D0yNz+wt znn&WfINo-4UCS=v_;CK?Pd5MQKmEr%$$!~Fr6)QpzTH&mq4&>!I0uCyNZ-G4>vsLK zoxeQkr#@uuZS8$6yYkq`u7&Pr$sn(9c> zJj>f@f8mhmd2-KN-q?JwF=BW2sNF%&cDIX#QOmMjW4RvWMxQPTxX!uK)W}`bkG3fy zr_N2WjfPw=OnL*sN;o-qGVl1iK3xSKjpbUwI_J*)11M_IU3Rgm*A{hH(KX{? zwAGM158d&M;vxJ)yK2nc9;Ccix!sdFNu|*eY9IHE546wF;yLDYfAU_J<%<%Uup9EG zWFm09rOVIV`IKb7RNXRJ&*K>?c{C6pLNM7)QiZR#M{v;I!?|8rlm~7s%GZQ zlK8WLG<8F-|CJ%{E$`lp_FuH%>y+mC6ol_?mDw<^diHEC{Zgl%d(Xf(w<2WZVM_Ay zx-abyv~;%@{N8?0r2_IQU4N0X+5>HFbeFZ_i@q=y63xTDPOaDDc{&-a_j*QtciLdJ z)s#}6tL^65kT9jK{Gl1>9GO*b{2&u$J3R7k_XzTkM9vmTlSUEfT8x=A4&KAt;o~A6 zYvedpoPOe~aw}{4#@Jr*L3?T!ZHG1;T7iKC0Q6nGGu(4ODi=rG@m(PiX<2!||4F&Z z>50N;ax%oMO1PYMSr=2TdfUx1Br69^^QAnb>(=ZHAJ&I1#{ZCUl3hn-HY*kb`0FT1#X<*T-V<+ap+ z-00uzp~a5+-=hqr$I0&h`)_^~YciB$$b$}KTHYGPqpJ)aK#WQN$>Tz-@cevL{~o}z z=fDrZIL{mXieat}V^qb%_cAz7HDUbb!naMx0#OY78Hg|2stEA14c+(suL8(2o)biRoYG2&HVQpsSmozb>^wc=rE z!tMqSWgoPV1rrH?r*>yx3$-9*4scTJ=x0qT@8oHQ2LR1?Uaa@W6RSxT0}!ANAfNWD zJonTYFK42X0c%3xxgULk&XI@j07dlAgunK}TXLnO0l`cE{<-&iXIH?&-OX?Q=@$X8 zyfy>bn)m@l7}fNJZfCM5j;@6!TVr{;J}CY4gW68sJ4W`z`HW7XW6tCy37q1^dN;%T zXcj5_ zDmGCzNH^u6q4(m@?u|-Wqph;B|m^c_wa%zrFv)w z`rks-mfQvivx5gLdSbQ+P%`S~-1)|7LDdfge}DzgL)mOM$beX5oOK%+8P6R_KWSOgNHjEdcAkr%s-X z7U6yZQCgU~6&RzR85i(}*PM3Zv1Z8zIL{qk=&H>>m0^qO!o`mC{Q^mF><*opUA zi2r!=7cDwGue^J=J0vo|Q&+Xyhd_%pYfSscU;Se9U;o$tHvZ)K zU0=K%;O?;JBPZV9e02C^3#b7;@s@X@g`6YVI^wcBaQkU;3fyNm2&-|Xoe~zDd6Cmg zbUm3JJ0zL(c@_}#xCNSBwdWtc{$^qZ8(W+y!PcSUgrEV2*`cDIfFU}8jurO7LcE<* zW9Y<3{am}fYD_Ot?=~){W67KLr8metziE(hnerHe;WskPaG2Z(}$guCulkZjxqgr9+W;B_ok!B=$QaiW0gg7eO_1# z_K8Iey2I`bb>(4hLE8b3Yz#Jz10L<5Vlx91eETr&XQ z*g~)Cr^#PKgm~@ROpJaW<#s5>xpcAX95n*c@XMkC-fOSD*^P8EQ29VM{{H%jwxI{c zb}GLC;u|l>g;@9&e|x=r=(uFj_|q6b7p`y*XitU$@b28cp6!3W5ME5ypTUky)AHDBTR?_hLx0NFX3t^}YD22&aCpK6huY&#&` z*%s~foB2&bl)gSun2sxBZ|trxp*rX}v7$(&qZ$ zqim6S;fy`zXU1b=x_Vovq%X9QzM?;K#4;}Gjezip&z@ZI4ktfJWBIlKU0=I=_4>4v zdD6La=UWWq07PSv(LF5+o#;%53!8K4&Z8~nv2V>sgw)dbCqcHe z>uzJ#!}fjkDkrY^0S|d#A+s_Mw8KZg!Z(M{ z&Vo`h`z8kt^z@)U#6~JSOXJKir1$$jG+D&65Un3@K4JUP9e(JW*_lu~EW0nBSC6Gv z?8>MGl51_0*5pZEN3ZhpnG)2sl3cI<-2fCtVZSH6XEApale=|73P?_bKlDn98_^zZfaj~j^H|xR5x9b53=Vc{ncz^o| zQ5leU%3gIJU`wiP-jjBxU8by+$NV0;{#cJyVDD}x7K^M>N-WRHQPhywckMR?{j+=B znCI>*`v-btQ#I$^03MToS#RhD#Wx@AF!kJi@9QdM@dqE~vwiH(fWY!zPu%HWWpCSb zdSmG6SOPB(JE~pWz7AqM@*(-p7~j6yO{LbYD_&> zvLzRF(^3o(>z$Q#?TS&}?pL%E=~bMY{h)0oA{is=QMbENNoifrpC6v~tg?JfF;Bly zCz+OZ;!f+2?s-4uzr?xcXi82z8@V1Dr)7tXd5mZH($BmCY+q)vje84Z}mU zc8#R@xEPw0Eq&k1XZ}bKK|9xrw(Yyht#0EzzFwL9C(Y;>*{o8?1nH31`a>?%ZRr)o z%zX`7Y08_w$X5PNIeF+# zU#L~kHjkD1huavea{hgfV@9NR3 zoz_F;cvM+svzi<~D@P?{U2-k`Jde8pG?MfAc;=l+?GkOCc+hVd%H2I}?=#Q)OJ?!G zh~I#&y)PFE;je!4i_OnJ{c?<*zx(E!&2Ng4`QQJ=&pH(I%O=Hbh3?(0! zKYZ`x=E4mCTtM!vjPrB=(OH9W7wGGUVc^4y#zA=KV0@YgGGZLC0-PJ5qQ4F(I-SvD z^3C7?6wbE&GM6{6Al{YFGGa{10Ig#E4fxypu0QNB6<&m5gDVEAAdRqi^Xf}LA=Fh`)a@l?g2Gkq_Y4@K#MaO(Y9Fvv>FKl5qWbL3o>I!JXfJx zfHemL!@a`@K6j>xZPm9;o>9rerJxc4-G$t!EgAR2lXNDaN?tsL87tx%4wzP6kMdyS zeR$x&>oFo7peYoL$qQQdW>ESt6d6H#_BYYZNSa0dzB9=7wy+7*0&u))!a5#HBTs76 z0!HA(GV8CwT7|F{3gf5<@OwUAd*AgybYj9cH|n0Tt{x_9JhEp`l0 zRo%%ep#)SN%=-q2Fq6v+Ig>{80b10NE--{F@6o`m+J4_Y`33UFFLFL|l+Vbg8 zz@3E{p>S^G(Rg!=?zQ+rX2i$66&N-^cje$Y!`P(YA$&fVGM3%{lhPRHSan=7G2|2RNfAt+cC)BX@ZXbCV6E1NWU%(pxfyX`%?8kW$ift;qj>DFoK5$^{yBXR|6q;m- zkN+fqkw>EmID4+_E35wAKfnVInO({HI&>*8eZVmMFP375|NZDke=; zNutRi9ynyqgv(^cM2oQx`1|;S4@YlUC|U>i=2g^$8PD!D=`(JeNLQFJh#Tu;hr;Gme#C$xGj_ z_^E&L<~h=2(*);b-dM|atTlROf=f05<^8H{w!0px@5BYi_dv&&frYZ9FHK-gnrt6d zA2KCOi32Ic&OFyawmgGRogmL9c6bM*Y05YBC1a&WwsC;Wn?&=nevu9X3iAdxfi|v~ zxatQ$Hs0rZ_omCRBBokQ(4PP_By1+zPb?Azoa0yhm7X!NyOM0Z7w~uL)R|&te>VX2 z>vlEpauRCBLXf<)2|dsAd(Jh8K6DCOMV@&*;Tu4NY&vimPhZr&@{so3;$;8jpZ|38 zzx?&z4YzqHDzlF62vcM@CIZ0T1+9eE{lr(;>T)ndi@z&QT{ofN=~!rHuh#egKmvjtd>sSQVY?ER8ptZ!bGIu|8AV z@dTa4ipo6RViG|2Nt1OuVhAg_d9N@HPoG_hCw8|4&Zc{{(=0~!95{YBJ*5u~pq8xI zUGV13y~e1*1ynEk^i;sd^>ib;PIpjsK?Me^j=-T!xUCyEg>qQ(Xvc`we3roJC&G^L zLbsdY+sjubodcEiFN=!WjEv9~XIcn#D4!VHY#6dCofy`k9Rjx%u>J?Xh&S9Rzk;=!1Lf`*bpt%yx7p7Dzp zg;i^O=1I&uc09Q&5KR_|$>fVawGZ3nWPI`LVIVBt z8K-AwNX=u%o3}uTOX=8o!>Xi$HM9NN$k4MYny%wJC-?^TgBpH>5&w%KsPajS_ z**HQ4&0(R6ScR{HPRS!=uKxHu(v!f zSqo4O;-p{G)%Asgy-!Z@Z^ruOA^N>@2kr&X(rJ71G@soBr2!gSOg3K5PldC6$t*kI zNj#hIN9BnwJ8uqUGoCnkX0?62&jN-;3jq16-eIRSLi9IzZp`5*<^WndTE`inImh7+ zG-c0!@WBVO*r`vMmo2&$4P+OnPqq#pK3;jo-R1-F0pG7(zn)&(-MsVs9Qgd(-(+KV zU$_|0e)jXf?7-kln~UFmH9)yG{q;ZnGVuJ&=4_z4IfY#{7B}k+o974es86Sn?^A)x z59(jWgj>lN9cvNC85kdb`o-o&I&)_c{`Rfw(+?k2R||<8Hh%o&Pp04f<3Igl@8uPr zErNFP;Xqjn+;2tEh^cl#zgO5CwyZ;kFI=|x^kQ~D39mFDefq(;B^Evh1LJ`O#y8K_ z*-2#Gyu_{zw#4hkL3-Ar=EH0?b85EN+r04cPFs%iShVhLtkm}On7P;OTQ}PQ)wy7g z){aX%mhRjkUr*{kEzUOv`y2JyZvFW$f$VZtW764=KYAhJ73w8Fqnx{^Q};gtCS`eT zUvj^ZGK7H&*X=0*CmTqUeCU-wCXrZ zF^j&EOd&d(cji<6@l*Om^Wwmy8-hy5xcPy9jQmyHTL*#3X6Rq{@Y2@q`Dl>ocggz@ zq>wIl%B~%zT#2U&__qZ@tMdSfrPyK54_OT9tOm?m* zmbo|2TJHku`?q99F8{rS@1lT$?tnL1eRIXv~|F-=r8LZ}OkwCI?sL z5Pf8)e|V}>%wNycz0WKZC=VU|!&5oTJCnk7pGm)cPvgyJC#BbVHdsY}F~I*RbCtcr zMA^6h#i2U}`f4`1i|;{Ot&^3c9yc`{$+OFxShlMiyHfG)OI z%JT8r;`I6&AuA;s|5=%?$#(A}9lvmJitVp=l z`rA93tM~tA^Iv`V?dE^{@BXJ^mEGU`-~alrH^07AY`2u%_4{pU&%^FPVC|9~TPJO+ z6bE{3nM8|mjK`#FhTup`Bm#NqP+%D&10R3`Kmef8qb3F>MLT+&k*6&fr_#JFd`j@T z+F;@`;*tZ;PGq>f3gG>dVtDe3dD_HVI26X-pa1C74A%gbr)%v;m2D zuDzEL&ilq;jL(`t@kku+BsapCg!0Rv5#GeCf9zs&>d z*x`LmggfYQTSwpraNQREXXz;3Y(hnp>4rz0_=7;0=NT?fAHCRI`0n!Na^;&OeR%%! z&D9&fUGf$E^eV8+WP>p{@tvy&38Y%k2#5vljK^-g#4`&4hj{sv2Y}wfgqWp2j&Em9 z=h@QanUJ7M9=7H3@a3(djeumk_APK7hzZ={S;E`gM2#_upS*gmU%xezJ05J$YLAOo zuFkee-u}KS2{%Rm>yA*nMF`)cj zZH}LeSG1t{^_%$LbCX^Nx8A-Z_F=%~ z+dQoDhy>i&Qf>n3TKk)LXlI{OwGGhi;^l7#G?}fd#=I)1Er2+-YogKnGm%Y>?k8`+ zCEIZ6JTi1Ta7MfFRO3-}yoD3;Yw~tD9x=onLdx*po8eCHPT$>&Mw6X@@g`iS^1#%e zfx6@jEx_~>CkwMNeeLnaL_?^F6NMdc2;}1?2Pcmg%Ahv-===x6Ti!T-^5dU2Ir_f# z<=Ixhug%b>T}i>IKt^8W^o!jXZ#tw_*q^F8b=E$%s-go}uTMqMv=`nFAW%mBYC{tb z4QL#}FCY@V^tvtVWYFTc-};Z3>TmgdP&^->f1kF+q|)k2AbG@{yf<-CTqVF#W#QUoBDC!)7fIn z>eFOp#}*=2(hk7Y!j*F^e*OEehJIkqohIJ;sU1gvVmjv8gYqs9pd)uyd*yv4aN1(}XPwK8ifodFMx%|ddV42(Zi z{%>Blklk}Y&)@&=e>EVn9U&H!^;?Ta!kFlHv)#QmJefYzmpH17r+F zf(YrNou8!}g(|w5Ugk9Or$7DK=Hrh(PY<=AoGukkfUwfDC+iny0#N`c`}PE$qzjx8 zK^J`UZF;l$9T4q&Ks8;wr|0Yg@wKH*y2~r@H+?3&25@>7aq1TUpX)99T`%;FG5A(v zCNSq*x|H22%*p$Csb6VKw}b4Xk3Wl!$D4onhrdZr@y1@e2JEo-G@u5Ej$ha6_s$~u z{L4QXoB3*GsNetiKm1Rzn`d=AXjg#6G$X;TcxC|;;AO1VXDuoMcYO3a=Z;7N1gE?7 zQ;P%--tv%cJc>Ts^VM+{qth9O+vW2hPv1+Iz6}sNvAK{JCxG?c(%6}IB0GhLtwk98 z+MgV-6-WQu0hF!-m;zOe6?Op7vEsmwcWkujuW##*iEkj#kbAgU0Eo`8-0 zbt1X3$aA-F3F46p%|)KkEpNI`+-8f^^umqoXzdA{-m_n?so{Y9e(C0cygk}#?QX^NwMfVB(Q}92Lf7D zVB3w|L3(6kmWkP~*}jctNcG+fz0h1bBO_ z#Ui^!ZaK)ZkI->EX6cC;cgt_amC9=+CQvy?fRq=SjJd#Gi?KZR0sYP`xP9klP981z zeE3D}-NJMHAxi27#AJ`$Zb1W(N?z>_u+X6{@3$DpUI50r$If|Jn5Nk|P=DcRJt1A< z@UCcmnamg`%X-Gn=PlOIeZq_hiDcn~+^?9>55~KhT(P^x(>fknJed%B-6!LCWhcqy z#;v1vD4I*u_U8hq@j{!-7?h1^2aj`(v_CI+U@d2fCokI}k_=cxIl>OCjX0o;Y$xMv zs&Oi*-O6$38AqSmnEoH9mMX>JVw^w{8lL_ZVZ=$lSJ*LvG;L5@kRN#pEoH}o@ljuN zP7yonFh13%j%LGLxcFTj+Bc_sea>Ppd2wEbMYijWM;7BPsPNb}2A*kG&-)*~KVupl zHVXm0Yav(&7a>xf)~?Q2u_N(j{rc3&)0>|(cIdyyvcDf*{B{;%**)er-eac^gp-`E z{?`Hs9TfdSO_@6kXc5Pa{N{GXE{AREzn5FA6{5*v-aEV3;mw>9gnaw3a5BPMUHblV z?Uc<`8)_e8sYMa9dW!}}_8%|5^g(kg^DO#mC)AHQ^7P62v`{?G7+}-0L(pLNsXnvo z$#M`ek7nEK^h?+HPL73g*|UlzKI=*c)sXY2E9$ndJ@1d3G=wB`$6r?zN5AV_)qEZs z5SQoYNar?>uIJs+<10TI{7@JcNK^SS4DoaA9X~@vf4XMejzaQ&O|f3{exC#nLkD1z zUEifyly&J|Ka*A&lc7xAscfS|UjaA3M^&B?!}lq-cYNx6Z}WCCTFGMG?p>mRS5xi} z56Wb+m1=#YiB0qFl)JrNeC~b^H8|?$*Uoic#Jg1Od8F(npAsn@LHad6Viw@KVHU;0Gayo86837)!vCpvZ_*SGHBkGh(u*YQ>89daOV_bDj34j=l9 zPU%vr1NKe1{Zg0sKNR&<8LRH{>m3AjfAYj*-@USW>*PP8DQt*#-2Umyr1S1JgSnMy zu$2q9OGB)+V_&^KB4DPBd_~8Dsmo5Cs;377&?-22rK|~yC-XsPwEzEcy4So{Ss6^~ zyI2$hT$In+8or3YOML-fl|P{M4o|(?KlzlPBunn}JKuefIe_i7S*hKqF5|hal0I9x z=YhBr(qi5#A0bL6m0$5m-yjb|(=e&~(Kvl%Xec{xXdmy6{H-<^oiN#K=_uu*v${_k zRNnBV+j2>X{_q_^TRwQUfa%gJ3vJz!#at&$yOd8xry}^#cl9a30F6VUH1nkVx{iOV zhs^D%!sM$>8i1voH--*Hp>OHDNsNz^$I5hP;lex9C-A*AquaWc**uHzslxnq<^GoM zntl>a%JshQ=$C5RyhQSNnjn5I*gP!EwLXgyS{c>U3GG zw8_Jxwi|vEzE5zxk)Dx5vBnyUVvWPj2Q_ zoU!yQy=KB)?inS4LeXqce+3}PxZfRshP}K;49WnGq;)1P`lSJS4D8-Fx%V94YwKVKFc7dlf~VZ4}*ho$XL35_xp^Zqhr(xb8;ZV zjwdoO2HzMv0Ge_cP{w`UcqYYw@%K)j++1n$tF-}gSnArgZHoj~uixH${&AC$`1#F+ ztF>QGtMlFa9k>~xAD%tAIr+(%j3+0{n-u3Uk_d>${j>?vvy2~QfybI)#c&lU;nG0H(3oN2*|i_sk%h3cX^Q?ZL22_6O&|<;xTOV+7o&M$ZMh} zKB0c3og8#zk}kx^?gM!|0gijmRz?6Ro8@d<%-vI0o^!kNAh(4PFsUw7j|q-R4=);? z>3FK2nyliP?S$IUlKWSA?`vBgLt-~F zx*4;l1I>gO&_DDS-bwP}*~L)Z{Ytoq;?nc(h&SShn;ZbX0BN6o{P9d!OgMoYfG(47 zakz!`&{jNc8PdQ-ATi+6XTX8RFB1r50Ve_B4uM0%qw?|BWc}LrHv(9#4?|8av2Ra{Ev%E>0Ow{(gpH0SjPXh2yr4vj-v>A`PM=jVf=AWdS zj#!)snE%O7|Gc{F+x+3H-%ZFH`i!2FpF_;<=6!nO$y&?-{NOKdq45l^jd=H(TOGb=|-(4TGOTr<) z$MTp4q?v#p=rFDi0)>SEIZ&vPhYt%oQeA+By=b}t`%Cxql`|63u_L$ft^PORHLAP5bZEO}xL4vI&wL9fdjp^^PhYfe!-@?zj8r4}tRk!+-PNmZpPuumA0UUr+Cf)!Ag%!X(*k0RFzv z8-Nj>#lnm{P0s^5BXHt&h6e)43Z!ucPn2H+tpa17UUQ_m7=9;Giyjkr&^q zSwICAs`HZvVzM8aK8jcPdav#A?1IP1!0E<8-mc=oKWW@LOqN@0JrhX(vO3<2pJaM( zyl3M9i+S2N;?@M(cgec?03)W&tDi&Tgz@@Sp-w*f;KTG~3oDJIC;oq2{n^i@N0R6H zo!BE7`^E@H1j*o@$=s^CDyvIV)3agJ7)%S0ppgJUcY+3(e@AzME~P772!bv&tr4cx zQ?2PnEm@VBmB~z!NirCWec$(0-;X`#Wfyc}InQ|>f85Q@&CSh!Zf+h>5__!$;XTwAEL`irZ2fa~zogTEKil#yw545`7$Dxg zbQJn6gXIUx@0 z*rCf_G=A{b98m71wz<=|oy^pa=p1bY1b*ImEJ9eFd}}$_81SV|4h*Dg=o0aP#dxQO z^({M0c#7hu#aMvQi`w>bG2qo-AJ8U%n4kar&#P}9vW>6$(Sr`bH6H`;lbL&?_jgo= zL;kW84!=1z(u_0Fr~FZW^=tZ$OlnUHdBPKIsm|#9?(8=mBzUe}7{^yX`1EImFWR#D z@{7+_r{Di@^~Znwr_r2E7BCEK+!Xysj}%M2^2i6CvxU*}-I;F&SZ$=jtQOp&1*ZPr>BMxQ`~gp6no0_P+WfojCl>6Z1wp1}P6cvB$fFD>?CVl7@bde)r9Y$uNX-j<4mv|G?|He!0;Wf58UQcK` z`lx!)Nz1{UOMcpn-MTfNu+=Z0KU05gcWU#I9mU`WXzxnr?$8ffpm^uK4+BoG);9ta zvk@9e%Q0RhpOsem_m(jtdX2r_v1q9OJZcW}?0F0L%_%Ij5gi`->dkhvgY|BU%eR|{ zp~*tnkweEu|A-rJ*BV`87YF&dQ8+r`rqCx3;W*GC%+Va?n%6!WZrli5udj{1j1YR2 zt%H|$DsRyR)gzl%AEs}P7MmPDfu#+$&8cAQLTtn!$}fatq*<7Pb2C*oIQgpgo&JG|FU1qX~OLrBU^ z&y(m9ny2~9oF^ID6CZY@Pb~I4Y+h!8Tpy(H#(wL1Tk^NFxfoeC<`~nOO2r$yiFUV8 zZ2s9?uW<|icO^&kn}wK-aon5$FKaDLy%-UQkzl0C_Xt7+>Q(E`?&;jGl5PV$sE`4E z`sJS2DD`wpIZB9KDk}Ho;Q%`8JM*Z-OL0ASBY~0G{E`JSJedOI6|LPsah&P*#xt*Z z-b&U0;q8tOrk;TDfecS?HWHq-rfr84sAYXkgW)a#NS0Tk;}AEe25;VK?%0Q*x;^x^@khP=L~tg^xe zKYf;1(SCMwSoDwHStlG?G-RokHj`^~$jAN3E2as?>z?=4?eRK=GtV3RJd?17{bmYd_`S z!+1~2OdaN3Nl>Jm63+#?=Egj8Q#O-d15z5L>*_}1P5w)M(rtj>%YBMp-tv{2J9%?T|PQjRkuN$Cj44GnTr>SQNmu8TzJ|xFzpLUXk z_QlWsC}6(RF$!Gxi)XI!s-)7bp90oRNtgCoYW6){L!sQ3JKra&&8Hs8$h3#{njF{f zkBqp3YrPv!UZ$?GevGFJ|H*LleV_l*3X^YHEWiFyIod#*DsyO< zL<@fuNKV)CJtEcPNh_zW$vN8pId#@1b+4O{_21G zpa1hFSq+qAqx=~$4;tL>WB@#Eg`VfnOq%2ruaGhgun1+jt4#OF=(-Y%PN=SEh+am( z#RljbO<3EmnnB3(iB~Mo$d>^aCM+}FM8tsr7M@a%Gj@*Up|Cf@?a3Qrn!XNn+PylQ zLCP?ckGcb-{_eM*n{AA7^B_Qm+)Y<0KlOSeV@KL&&+cS^IW)2DeHkshjq%9>Bam>6 z-CnqvVcxCbF~02!6yqrhSbs2{e#&pFX7ceQzVi%RfVl-QCf5ut?*ijYM7BUhez~OJgk^pv0W`KilHrY6l*P$_59>Ra?S85Z<^^`WAbhq^EunpZ~ z!oXwedLV~`?sNpQMBZLbU!C4EGGQSEFL-mg3#_3>ckDPg^r{O3+vL} zn#`N*GO%Y7nRoD$`sU8~gvUZ1m}KCKcP$dp~`) zurlG8Nk(*z4#@Z?m+~DgS((N=lQ`fWaFj>z=FPxy=L@vZlT19SZF$VxD6a37JoKJL z+nzTzfT|@N$)Z)WxU)|IXk^S{%l(-cr31P@9a;I!@5q@pwuomp96oF4kMdJdPJvCag{Vqqbl z0s@q&o;sucNKexb+8RjVrt=Q;GmC=81cG7l@jv*JpRfMg|KVTN-vVBGPksOeo~Hm? zTf+e+%4=I~eLEeNu9o&{;~1ciQ-X!24G%>V!IybbS|k{{D$ADlN2UGCzxta2j^29f zL+SV#Y*%U5SSn;j6z8c(z9*KVx# z<@rrc=)Ec)=s?FmXeZHE(|#{r-XCz!BHzSsPrmKmxq9VFN9i_c55T&cm$#h+GJ5OS zk=5Dr=c`MAP_itZI-R_yT|sZO;Ck)W-SMj19-psYzdk^k#k9kP8Udv0O9|q%D_Pes zEfn5ujQ{xb>D8wne^foQGvlX4HlC35k1>T#!e;=6e?0GQ@5^Q=9S_evg}D)$jvuW~xv$nS1ES!lvUSum6vmPdQ#-bnp=*Q#vdOh0%xOe*FALJx(XF$L$ zwb#x7GIB%5I%L!ViU7Vh3(0cx#-(XrhxQtmKK$s1tB-&D^VN~s^mTMx&fA^dvara@ zaK@476oP8sp~ExwSghpWM)oc4@&;rJkmYOXIr`_Vlczf{6S%bWbGyQL$s0X^+B4Qv zU*4qG0&(1sX%i78UOW$Q{hyc=Jf!lw`j4w$MBw>5?_!FnDmHzojXW_c0gQka! zFGAu7b)|0^Q}kChC7{rufN#BZYW49Celj%gINoAJy5oQRAOFqjoyM6T{@@4W$zPRP zm?*qWy8^aUF3)vwy2--+zIjUO?ltRcWAzvR;eS5%*ZsWKEnMn%KtsIJ-_WDaD_{62eA3Rwf~_sMtMB#~ zJ64C{_wB;leER8+M<=s6$noz#`(5(c0!4Lgd0+rQBhTzCpy%p8n>&D*PGlbppj~@g zOvV$s-y!h6`+qm6uk#SUvdk`7=i=~q0~jX};#iJfEL=ua~@ ztXdmjJs=JB&Uh_|LM_Z^xAPxsjN`sn=!0n0;A z3oY#edEU4PH0K1MO&kh3{ow@}P4AZ0Le74As~t(4sGJ8vcMuls{$}yh&3}Np!qc&N zII=jGU~~MF@9C3;J!2o?MseN-QDxI5i5_Fi&eVRi|rWv7E3D-RnpZU$I) zPDlEPOimcS+EdsY3wIo1EY=;0Rtu08x-7=80Kbjf11iV2hlP6CpwUjr8hB~r%6X+Rh1MJO?Uu;CN#J)lRZ&EiBsciHGEkjSWPn3$-1u z{d>+bTIKDYox_>NT!bv_&&E6Z<(2qjS4lcD{y)hkQ&&z4)9BG^A;1Bt>SMu2XfNek zT%hWB!E?I7=c+X0nl_-jEPUyAPkLqsW(V1gTaDY*!6J=?WQzmnI}?XIKO*gCUdT6*-%B5ez54~5vP?x7kC zTMbk1NOAhzjWKK&u({v!>1G7&{pw-f#Yty)wq9jhpYJiX$hG9wRZsYKdalgb}|WwTsHndEw5`P8y~_15G&NqRmieBEdzrpzjU5XfU%rZl7BWHV2f?~Q`w zDew7g@Ik`m#yp!Gq?1$yx?bvCA~ZD{Lmu9KgG{U?y;GaB8tsc`{-j}@2Fxh=CYu?)^qXa%1 zvO7N(8YYh^NSZQNwy#kU-jAMnFGYoR_4nx!y47Q;^YVLXRdvCUc-f!Hb^28_DX{V; zn`j+gD8J4=axC$>ori`YaHFh^wvSH>kwc0yG80g<mlwhL_JRsx!9&i(-l}F7^dk;Ml zyWRbHZ+?8ACoV2!x<4tK3SafB*S&BarjoojtcY`R38p?Tn?5i+OFkC9jfWd9DGo%5sd- zX9m^=?a8M)OutGF7ak}lUQOnehfV!ibdR#I;g7B!mU%BV5JzSmr zfBmb~fsBzI0bt*py*%5R-(Uzw1JA8*E;qUFaKv31Uk+#i3LegTuIc z?=gOp3kS>bWHP}qw(i?kEbqJ`01p#8HhKZi>I)$M#czJ!qC=iK@d$YOBBJDvwjFt7 zXfuWh!w3LKe_hF2(E(}XQ#ySHKMn*y05pd`m3KSZ|KWF^ul}Q-|C3dY0mWJk{IBld zEfW&2;^B;2dZ{ovq1U(Kc!vnmr zBf+AFH1aml^`71_9%a#P5)XLfi7Ni&yQg?rHu=p%7D)B5iN_Cq@S_e%JG%PU|N1YJ z<7EtzpMMNoW7w3P(azhSbiLUG+u>@DnrIyjL_T}&Z1P{5B#R~&M-CpzPzDqi${}!p zCnw_{cz3%A&C5Wh4^F>7yuThGGaf|iHhkP-3@rmFJP?p_=KR?Jjb#gHYed7hG7g~X zFhqYM6d(1U86eiY;8CV+jEqm)Ww9r4l1YM};z8eOl4TMttb+bdju`U0Td>k+cwg;m z@^Uz!?P?x&vNs{NfIsnM6AeD}gqX1O8{I?~ONTycWcB?ct0Fu}V& zVVsFP0k!Bbz-f7qe;^xr^+jIcOIVk-man~;bwJ@0URS@D#ai8#P4e_ro?Fp118)WE zhyIzw0!GY(c%}<`W1_!fcfeG%Xji(@J$g#WD*%%%xu?JNHvpPf{c`B9addA0w>|~n{>^8ft*&;+;h+7{kEh-K z{*ZwB)Jg9{l<9$a$YToE-{I5UVNw@#xkrS;)J!e-;l826|q-abwyt0UGbqCr>W4u+ii^*#}t9R`h;+_sd@e1QedB z{s}}R_iw)ScDqKdPCe+4Dy4VKJE!0KAYkiwJWDUu#^R(t3E;gS@TJciYeH|MH*enT z5uV=Gldgj*k%PmG+9NlTrXvX569SuBx&;t0S@4gw(y24gl0(LAESer}W9r*)&&;;{Teq%FTP}b{3*NN}`5LF9+7__B>rfz0 z0L-1v3=vL<>J``hAzObK4D=`zT2|f#9yBUWo5-j<+F*N2kZ&4M&2);KRfbq^5i?^ zLLP+)D6~Rjm9c*VXtk>cO^bItr;PduC!Xp>zlrJVus`*Em=4!hfa&_Lv3Ww})EVe3 z<1@#R8Mk|`jR(JpU;3qm2qDtgc^`iG!^%qc)*r5wBO^>U#V>4lllSY z=xV!w=+)o;_LsB30K^5XTJS@!&`O*~Ea1Lum%@Y}jE+O?j4)0E2;XQYf<*-(Qpm`U zfBci7`DzC}4(OFY2`#5wdRS0l3*F|64IiV60LZ{MW5j}0#jofezSR!^`|Mu}5ElP@ z*jb~G8b&s!+X3Z9bGw#-rN(ayG1H%FFXNvD7+$q0uo*L_yyU}cUr!kphydB|ocd_! zeUVo+J+%AS-s&G;)7$s+wzj*JY$8It7)Q0klRJ;bPQBKyfy0N7)d!A@Ei>b7^`MI$ z2l@(`M}EkrcGkyc$6x)#$Dx>eg&N8J0stEq4(DK_PrRy6$HlQ_NFq=h|2CH&EP_4H zOI#iClkBj!*m7H&tI;RM93Z{WYUmDzt-Z3L%4@fFh-U}i*f(XfL0+YEUd3}^0*y}& z>wJ_`{vvFLS8)=Ie{K zg>wlCO?ULj==?9g`28#r(Sa5hg`(QFqea0ML+!Y|di_fA!#~N6?r_k?G$Cg2(gNV2 z^qc-;XPq4ac3B)}W2MjcE3$w$yLKMRfg-1x%6!xq z&B=;xC!ZF_7Ytyr!L&`Vc66pY8s83P=WT0&NguXr%Gl&!;IU=Q32{DN9St9F@TZW2 z!Ud5F@{5-R#xcq9p=ePcrn$Xi45YI!tR`>`Q$&=#$43G31qtEE$4B7M?G8Igmv|4{jk3ne`WMv|pk!?=Wgycs>9n z?vn{=yEg-O`J;D0QaqosmiLzXuGgP+yDg0*lm1l!3Uz2~Vl;dhzOCQW`Os9^Xcz&Q ze3skVWn`(mK#_4ku1j6#s(j}CY00V0@N?cv>6hn5eTK^Jj|@z)OO8WZ|1EV}$Tsjs zdG`#@m9Z{n=`R~U(1My$l<#t>t_>93=0)H=887dZI$9~gd8gXsFfPGTHkv*6j`o{6 z^kedn3MI?C%RT&xr({5V$k*`J55L`CmraXKxl6uEMe;&6Iu`)ni_1dN-0J64Wbz(H zl0jvVH59FN*o3QbUmqDN@O_#+{|ishjMy{N%07IPj0_ zQIRs6GQ5T{rCkg%-}AV8JxqW3XjlKwLseN`>7u$zrVL3Zzy6cHwwyYUiI}Y&rZ2gg z@-|-ZQar~}ysB*$9qtYJ$ZsidXm;t=#_cYb#zdIf^bb!|MPzlNXNHq0a0!T61~6^?=v`J&dBGQST!c-^F4L0((z*2r*cu$s6L0OTx&3Y zm8@LJU-g69is}&*KM?c-$5`UU)%0|h&cg1o|tHu)I7?75}TX6 znpB{j_k#wN2QOHQ7$$s-;Rl6bSUevVkeK%tK*`}p>W4apBQO7(0W`J=BWXgiRF+T^ z;)p*FoE7`ncnFMOyps`N0S`Jr=&j@z4daofg7q=&$Y>pK61Y`eU0blQy?6nI^R9Z) zfp)dAZM^Elpa+8B;l^Q(CgTi9z#32-K=Gp={BX9`I#BjrM)fcL?r$nD?UU>iG9J|? z3b?a30CwV-H)%1EIn|_}7tweDX9)AMad0VvmDi^Q9TV;a)Z8+jh_o6Wt%|#Ur+y0H znKfz$K%oCX%NtZZKm)&CV;#CI}iQl**ege$# z9hhXXVwq?chyLodkqrwtZ|30!08wv%2$|9D-%sYc#v2FJqLB^*J3UNx9RxceJZeX~ zHhjnwy+gMto7{{(>n2i|JWV@D3w z9}5K(o#YQqGxkKMK16Stc#o%3^$)ffKJdgACv`ye`Ux*obkNUa%a&S`BsAF>aWYWR z0*#p10DgMxMTY|GZ#)Ibu>O2Iu$;%3Ykky2aYN?-PiBFxcf>IT*3tWhr`Ofd;)Gow zhXZNx0oZPYA~z2MFbT(*bLSQr>%BR=Eq%hfR=(mkVBNG=ceK)bMZ=~b3_v}ol z4s_n##`^b8zS%etNYjMG8s?~PsfUAqoGeE03roox5dx5?_R`d9#d=R4d@#~QL&IFQ%^8tHwN?EdkO ze^gy~5-#D2Op?ij&@|%1zW@H|35UgQvJ*o{6^jS^_5+Foow_fi6P*Rr)0fGX`~V&f zLgSI`nup)^>Y?9#^wCEF6hEDQFYoa-uI(%W01|ii-r;~K9S9%jA9_>2xpV9K>SBwF z78CV#AdL_oZ@>Hg>Uw2e`u3}AvM*WybMM^&6aZP|siMb=5}2{6z5p~8CP6&pgXt{W z*lp=Q*20>_8;3pf$^{_33NS*ec3^`%4E)e%9Ok%f7B+d>ZmUgCj$WW!jB68ij2xw7 z4;?+Z`q|zio%wLQyk4xn`TcLEuhDaxN^|=3`__9>s60f|8}-{+h);0=GA)1#(dDBr z&Ei(H@G8|$EH3UTES52s%o>;3Cp>h8y18?s#e(`DnPbcEX<=Y-04QE>_pe-kIAQcA z>{fkQ|2!2aPv>ZB-qS$R)9-)S0@8tLOLjOjespt-H~I!$flm{1CHb0gBH0QS7laA} zy7LaEJ2(%`;m?it0A+GX|LE^_0&=uaSD}vfCHKZ^`bXPq5AOi_fcs*~)5q*UJlU1J z-z-ewEReKtV3*sL7Wpj1M{;E>P8*q6iwsvNFjfB(CWXhX`0>0o**6CQO^+Wvk^OdM zbn3BVodc3*?Twp-AgWyU?X-715#FOFXs1#-sc+uaK(})jzKe%j#~v`RvG^%=zVI{t z@p9IdGf(OHty@>3e@`Jljz>cWHK!8}2AqHR!3X7;ZeJfnt>4kjCr+NM4&Mdr3tvXx zTYyieRY|)lcnWV%UX|q?_R5PE>%_CBlXkrnTFDO54yAwALB;fLtTN|W##bPIb*;}Q zMAaq7k^SU~Q^>NoEbNeRi|zR!JqZNVCh9fzQ4Aq>Wxe|BAO3Cyl%B(ZJ(2}?nz?5j_2uRf{r7QVk+FT%F~Q2oxG`)05giwf*=4hHnM@l`>>Ox&vv>?xe& ztGkOWt6ql=9GSyF0ngX7@n0vKfW=Pe!@Wi(OpuORv>`&oh%ty?f?g^(Ngj60on>O#9cCxz{ z$jdewJS(|l+w^1f1^i|f1!@!Hg8qh=^sTk0Yvzcqu zadTMdfaE}Hyk$4j>&7#7?Y?N>c*Bn2b#CVfhm;+SN$fyg<~xq%#mxO^jjc?{tTWfo}3W4+ZER52~N%`$cH=4_ckI(zvxWyh6d)~a3LyK|dPJLrv^9Ft9 zP>U06-g=HMGUd$o^rXa)0Y_;X7)HBkUn%PFh}YTU&Hx?*T5jaC&SieG#$zCYHEe&U zIC(n^(hvE#(xE1QiMp7_GwPV2M^D*|H{kj`iDU=e1r7~8UR;+aZ_h_5{r`W<&mYNq zW+iYbeJQ(J>$)!&CNFEU$`DAjQC`mxJv4RO+}`hv+?LE|QsS=rZvFdD`NmI;?Zvt7 z{iRctdU!IF`hnufUmj`?pqp6aQ|5BLfbIBNBH4OxgTC5ByxP#V$}E@3Q+-MouNUyp zJ7_?U2kUEP2wme#v{uN5b;*yZg(+M!A&N0;D67jQG3oVCa|| zOFjcc;<<9rCFAm&JY_F$kLHR0Cz&KYEN^x8nKDMj4u3|Tx;f>R?|3=mm48&RGAneE z?{3LZ`%fKxp?!UO1XS9}4@8=JqES{9;#`we>3eU=D8HqXt?#e%FU!2>`G0(`}Mp@IJ%X7%-zo}CL*gDPs{onp#^{+nrdUg3; ztMm6CuP!!l|E$5QY1HI5G;aeF_c%ZFMzx~e2A&@TuYI&Ojem?v1>$Z}AFh&7T=SEvG z?-YXLy^}vI-#h{{q}RYp2^yta-Vk4Un~?7{rX}2N|jkl>t6J$SYU5_wr(N!0D@G`am?? z2}sc&js}wNYhmJEZH`8|g`xgV3n?aGkK*^8%Bt`w9}lz{*=C3VL0>sHq&7mMNglrI zCX7B4LBJ&M$h|ETFg_V~wl(#>Sif)P(w9+`}IfWY3}ZNZOMykh?2zxa>x9Jw-Wb2`sMJP;1R zSg|>}K7RiPSq4`#6uXnm(W_ZH6pxY`TPfe&;8{Xu^2rmHpG{y>xCg}Jz%9u)ckAW9w` zQf*fPy67(a-QPHSbJe&Qz2gyG20RaWm0fA9Psm4CzuRK(w=EEO|D98(CN8bX?D580 z?QCnm$*P6a`%TOp*a_q`Y5w5-_ZpvXuYU0lziTJg{nbYuO6tH#ljTL$18@Vbgd#H@ zUMlhP7Wi-0YZxeSr8pOBd4Gm-f)Cf!RDt;V(^Q}^^I*71^;E*{7 zz>%RV=~ZLr>UH780(*stJJNXE!_G>$FyP9eH;ZL{=*Xns6&*lD`s`4i%Qy49+mgp0 znYy1&0hFA6|KqAirvNn4z0vmI`n6;}&*s{Rj->l|)B*s>&Z8E{$g?p9UnI+$HFJ44b#>bOU^hP*m~_U3(AzXOWADjWl^*6r!(2`Q2MZjYuiik>`M z+ZmxRkpZ6W#)uiL(ph&KJ9gJr=gyx?*Y|#Pbs+Gb7U(S^+wo&jgSsM@H*ejT1Muka zpZ@fx0gC5G291Re+fitT4#x$~Wnu{%FP3mREtt0O_Hf_g8BM;8uiLBFJ0EA-hQ{IO_F*5}ZHPzbE}qREjfcs- z4}1J&Gy?w*9ek^@vQ_H4WK8=`2*q@)FclWtb`{H-?&iJ!*2&WY$j;(RWeAyJD!e5b z0r1~Q4rZ}0-s+2pGY`R&OBVyRldJ6q==mqBFTVKA>YaB!n0edT@4lJyrHmt-eLO#Y z;@zna{o*`^Sl4c!{e^x!G~qVx+`K;Vsd;zNwR`E37# zdk@bdf>1fP(@ol#m%RDLVoy7$yl{|E;QxhEP^LB)3gu9*|9(#>AV*? zg6!?A5B(Vr$cOgTM$Q=!Z=22J%nl1xe&L2X^3HdE!lR|z$Q%1| z79i8RhX2d{_5ky@8@q&a;nC0Y z`~Ip>oGnP$F*UM~9??F}D_b3;aj17IqtmaniLn%54OHhaG9lhN&xEcvW*G0B^RP4C z+`oUT@#=%@pw821K^nfL4d^qDJ$o9T^!5Aq0r2U=cxEhSr)!V1=g-cVY$3&%dM{hS zH^&paqDw1q#tm}5v`u}+SQq#EY)C}HQ9?g;s-&*(ite5cGs@2W?v5epX%G6fyrXd^ zFVjDrGyUOd-wbgEn~G1~{jY0#HlLBou4<6^_;RlVlP41Wn~%Yz3u%2fXjFib#;bsl z?Dl+mXI@>WU&?c{G|{A9OF8T0vZVCnpzP#$(o8*~bC;%cNvY4$ zZ2CZS;`@5=TQXk~)P6dP$uHNbYp{GIC&*6XfJXD}(rHPte_ub^O zawqQ|trM>6QK6;52eK+mzhhk-P4DHnQX0 zA$$G*#(UmH?V3*IbZhGBKUt>#=0>zmRlG7~y694!dVhE`GDZu~o_W7Ro|LBMI2Qb`ykoQ^>3q<|V z+b8k{UBH#$NB326X?xezDLUs~nVKCi~c||@CTs8SPb$tKor|%qFz1cvy zFB(lKh}?^eq03G7F9pW`<*$CT`Y(U}CvyO$%(N}}Su$UbEtNfk-GHWzJ@H+rmPYB- z&SIUv5rEX$nya%{ZnT}fZCu9=cPQvL0}h$Uo_;G&?grIoFC2yzP|>Tu{{2@`y#O*m z3ZXNeHsCY783Y56XoDy*AZyDJ%?9I*5Euq+Ug18|mC)Tgj4)t}$>6@czkl?}4_BXm z@rMBT;-K$)IH4Qv1~6RAs54;)lJjh$4;g{HB!$QTwjDcKyu6GgalECu7r5~9RmNur zQhj&f!Zg5+wk^(<(#n^g_`4(HH8ud}UWWng+|^-kop{2qej_7FJi5mjCObRw`&kBy ziSyJWp4@DkIr(M8T)JNTaH3hg81{f-v@!O{Aw!fG?!$)}JYDmgV;t=b_z((&>OHjg zjSSuZqm0+*fZ!O( z!*^on_CC*aeVkzkMfPHNu_O<%p8y&&ZXJ^K|@&V#JXrEL^yyN-7bI+la z-v&sWIeR8B>hxkvcaNta5bI|@`pI~c36XNG_`3MPEA`bYp7eRw*0vtZg<2 zRv*Y~YD<$|@`IH;1o2Qme%Tl#J}IEYBC_W^Hpg(!BM^_PUclY;0I#EK!(3RLWI~^= zEV_;N@xzy5wm(jO7qHnRTUZzM-XC2+d~$AKgggOYd2?F~=x3l&;Za(oW7r?sW;YF) zW5hRBM})qr4*I$S6t~xg?(2iI7#FP;X2Rjeo5?PLlL4B5gMIPRR@v(}^Y#ni;Ne9! z0el0(1iF)-?hHTiVRJST5M^JBD9@j_aF{o_ogBQE?PyVdvO}LeNN=HQZ0fb0QMX## zTQVCFo^K$ydeM*S#)J66jr z?<|Jt|4)+LNE~3@A`}@1IzKF5y25p@%>toq)BD;+Z#(&;JIf-<@4x(_d|m|bo|^Gz zysUfIJKCDpt9r+PCXV|9aq>j19rTySEg;fqyoycu0GVV02uqiU!>sSy?e*gye6spo zW6FS^F=@Q>lc}m#KGh*UZjL@}X|XJ^PDejzp{7GjYp26aM)j-9ogs3oek*ju)h5y> z+6Mhz2k7#GHXiK_Fc1!9SHPOEISyr2#?`VdLQRNM&x35+_Pxo-p*fiKYT;XWCx86G2eVLpCHvrMdR9mmK=#Y$H}Y)UH9DAw z?TI|Z&tJSa?QpF6Td@6y&wo4NU3MiqZ{-nx_1g7WB)HWAAfW4RU<9vni%92s7YO?8 z*$e4JahZ8CnuuFCYjLrKD|+SBskc{OeEIuAt#nXfK#c=~^%XV(y?7$ec!0S2Uk_N& zAILdCh_1Ko9l)xcAJxV{ag+M1f&4te$pX)FdTe`OBm3mg;bO&inDpJknH)HJWcBv5 z)2nl5zv%!#o|8*xG2?)KDGs>M8uXQMKpgI;)rDN_+7%$4UjDXs4<2r(O84{uZ9?v6 z(XVoaBEb`f-Z@MZ{SKcz5_oRmnWy3>KRlU-U*QZ2Up5DZ2FP3wuy?kByzE5K9zXbr z1(9Ss0A?1p1GvnU*y^`$T+2(d${(z zQTpA5i88N!_)gyRg`c2XdB&a$97M~O>cP{P4lt&$3-%Reh|B@8fvXM; z<8)t|e&mguT4csYL^R{^V>8GD$usvJ-`mrUd!XEnDUSmBjpO(42Euh%H2&@1=MX{cX^}JC>^oNWVZ$_r*H%L4jBQwcoEU(j zPtwO?M{7&A&F=K31t8$1es0|0$Z_SucdLsR&h~rT>h04X1U?>`acktSu@T?Mp|Q$0 z9S>B#bpE^5nQwmIVponNm8supQ`1xB?CT)Qy)8(+P5{hHHXL&FKepWK zOpW@Cg`G`dg`a#>2)z~%&jfI@!Hygh`tp7}S{54Zkht9f6g~pCmk@@@e81U?`uFFb z|7I5PuC{{@f42w1vTKf=JQe?ix zFTJ(3wloft2mk0vbT}&jf7!XW?kqcO+3yy{Hg6I8{?&Nw8-wUBDp6uj^ z38U-mib-Y~XX!}xg)%H~4*zOP_tC);I{m>%EeI4ArLs91(DTMopFFlAUp2s7`5_v0HhvYeRh-|Jhf@K#n|Y3X5>h z(z7qSXZI1BotI*sA;OeeA7X%3zy7`o>%(8 z!-uE8z!KPfKm5^8C!gEd7vxkMzsgS8k&R+eNq>AET^0cFl8tSrtT>ys2#psc(P2_cqPg^(;P(0#RTR zQ8M?W_tb|qOnZ2{=QXtJ(I<6MUU7baygcq=8VI;4Z_lQr7&-6st)c+g!QCdoa!^?=UH!7;Tdph6IGti(9)IPt~VYiNCPc@WeoZ0I{~dgJXo@j03H2~E=IyUk$x=; z3v9JyCqKr_d`IwLrR9g4>kNCZ0crAFZVhl<+RX-D`TDnz+biysX=TrS&(OW`nRZ&= zo7bY%WB2{`p1MTqfNg;jzNa5_f0CANc)ZSY?oO7yC$sObC18a0iBISzlw@`at&Hy% zmSo(=7bQC!(3mNXN#8r}$&B(`c+{IS03fDLGKW(o|96E^HRU$HMfVl0ruzF z!jq{j-gOUGef+<)3EAtHL>SVO(G6~_@2#sj&!zc6Vs z?-D=};M5@Y=O4Y-!1H?b_ZM%hK0TUIn&Esa56}a7f-Q9@VT03r%6BNJU3mIjzIAo= zFaGJzR~N5eU;RUqfPedkGc$00ba>zD!zSQgpZ#u(_gzi8O@vH5c)9VAynHk7(g1@E zxn6itS=HxhhRK%32a~>s4T6s!br9#FHv@2+U~Vf8bOyy+AAGpllvl>NYd0FGi`RDM za+AS4T5GVqO@{7gFiHPB-aQE|FyTK~c@C%C9{(79j1|DBgX$KK4lCS@Hz5V^Eh9HZ z`LnsQHu~nUTU#!Sk3YcVfC`{gli&Z%|LVUQgO~v=AH1SV)SaQn8|-fX91=uVE+>Qs z&Xf=1_0-7|4fYQLYuiQ$sE8kPGJEaddV7;B2h8xWV{nM|`l@_QgdEy3hSKDBeEnr?dnQ)kJ zp1p9hSaEp^G@-TSjbS6ai3!D#1A73cKv=(OX9qGiS<7$&aOo>Q|D&H~xSL=;%P=dx zV&0%zn}nN001M|+I@f@_^_?E!UM+k(~RT+BGoe*0fK-UKfSvtgp_RAiSbqkiSp9oB_ZZD z4=kSF>bq;F*sdL#)#Qxn-fMK)rSlhtcix*xLT!4pNx|KF zfd_$RVr#$i_UYBP-+ecR@9sBvOxjY}gu6+m_7cv)0+GHl9>A4zGyq;-Qh(bWO-O~? zIC}g*3s)!R9^;*U8eOifGQv%CObo~p-GC-KidUgY1+O*wf{+4<7~8zKl8o|F$Hw`t zfaBvu9Zk|Q?q|Z%xcw@lA5H3EA_+K97wwL(Pdi}KBzc+0K5ZdO7z*P?o;c|p_h*t8 zefKiN(SJDLdc0_AXOjnc4PYKGvU-O1JP4mW2hf;&(;+;T*B(u!pL$isc#l7Lb_1e+!-BTZlwghQO?ZpsN}Wy4 zfCBiW4o}E+I#DrSMYK<5D?!3hMsRp=F^>ORUX=0 zTfR*G6Q1r0F<|1vmdLV*S51@Wx3CuEfZSNr0a)QT*)fSEzW`v9KktgO38Vp1JHYU* zuw@jdT?7$ikz3^}*@W1L}$4Z%n&?KU=f=WdF@#nRm)y z;bV>kg4?uc98 z40w9+!ujDZ0P9$KQmpbjH!iQ92=9?cBYD1b{!E}vfKv6Hohvnk1qvX2M9u>GgZk8` zKmDV`Y14#pVdHFR{5uxlD?T(Z!~!JHlJ}m_7e2;oyf;pW9jkBAo!WveD}MMJg|&P1 zFc0h<@gX^->)0T4k1+!Ht#iL%6s3Q}wY{3{cJ!@N1DXJd>E%1OAFaO4PWbklGreP6 zG9Fu$PEXYqyltslae&!J!X)6YvmS(kdLAHmI}jB}YtiX=iwy??>k6u{`r_-aRy))2 zCr_MO?WyjEk8cgA{kAr_yE+wLjhpm3;20RkW7Hz{v*hTF76*Z_!gc|GzWnO<9m*Nl zRQ>hK_doi<>Qe1Ay9{gR!^wacz~b|x<8_O6yZ2k%>@d-21L`}UgJ&@iX@LD$lUf`A zRmcT)4KS6R#HZ9E$Bo9614oPB+q38C=u4L>vjvVCa<#X%5z@!J;YQ%|jMpugyj7UF z{Q<6by9X#DTXn_iR%oHp-wVhzU(>$Ybr#a&1G|n6Iqj~*5NDH*^I z3!s}e1E#jmA<_CI5D}nsIeP_Qz9$geu15lLxt(Pe%tjYB_8e%ZfW;j8)b5>o$p>BM z;8r2(#DKPo;l|B(#?HEQRc99ShhzH6ZfzjoQa}#OB`s$n{hG)1ualc|GUltG`Tm#-3^4$eg?_+OUO%-J-W= zQCpbE+I9m-+*xR?d0(3H#i#p)IU{%Y&K?0?+wr+6IWY$4H}o}GTh4-LG0!~e)Tsd$ z^=shksdwHhM9ZZK*(7$iG0|>d3kUj*#T>R19mipTuA(z;-39DgTsu0vrPqZ2V1I4v zox{lxha?@9uGVIE(}h6Q`+=uIte_j{YWx5mKWJ>To9E?AXAvY{*ZI}=Vw`#3s80tUd zn|)%uzW?3b857Xyuu{Ax`!DyT-y1KK`JjA^LB?y&1Z0$LZC*wOIFPW5?gI47b9-ac z%jZk@7W@_V5|Hj=$I0wQs~dAZdKSIpWB7|M{VlzsjM-%x(>D%jcFspk&#JQ;22K<=P6!0!nqi=VP$d=!i5} zyXm#euJwEFk>?HbyQgaedq7dCj{pHiSRQ5b%s!IWzGBfkN-O26=< z-{pr6pZV^_C|mER5MoPjQ?4#HXj^C*8l_9PURUlAuKnDbI=RL(=}k75fo$4FC5NXC zT$2Kg-Rs>A8r|>s^`+cNrEXnJe!hAmo-QD>2c;k0%4hkx@Mn>)0iN+deQK-W^VBIG z(|%sVukMxUl1JIZ)+Mt>-vvYr%u}yPSKivaT-AC=^XmMl9OcEIB|rI2Eu|dE9bWt4 zegrLxG`-JCNk>km-Iu#khQ?t+yjT}F^^$bj9(QHe8%qTz-RnH!m*3IEBirH;{+D{d zaXeYmJ!MUu)u;r!JGPswscKC`|S!H zZxEWgX{L4B zY0Gu}(IV~AU)Q{vXZRUy^AL%zR;N!KU0rUH@PGZq?^dS|?Opw_3EXz{DtB#6(9;g2 zdY#d^BTp1E;-vJ?1I53)_r>a7Y5(}c4nHd7$$`QieAuBiOI+}_J?EMEsL6&yk4`pu zz4v#&F6_$f)#<~5iu5`@RSDbcY}o>M|J`r?e)Y-wAGbZN@FT_TKG%x;&p*uoD2_2h z`)|KKyZULTrvLZ9Zjw}70qd)~zJeug3={bxV=bUc#p1PVM1lx1wA zkD;Nt<*CJ1Co%w!$}==Pk;Zvn_BY9~pY%ITkO` zwYAuX&pYUk@yTP-$RC9o~h;PQw*JNc|GZNc=^+}+#HZ^t#yAgKM zCw&s1352@u2-MqN9~zLLLbjEcFflV<>Gz_0g<+w614(*!(q@B^Lw(GmB7H$m&{uS_ zb29b@1_@zcvh?z0i#PFKyRz{ks(jWU$+}+Od$+Th`-47u0NCuHT46Y@g!Dk>B2tR5?@aRm-#Im7hPqey>br{paVE))5zku;RS&d}Cv_n^=uC?QLN3t5(-)FU zz^Oj;v_q_~wsYuU{THAOP_rdpc$Yn;p^JbH0Cl>~fvrc=e-`x6d@|MItBI2F&h(if4U+a91bO8(OdOKm37~<)h$~6J!`f~c6 zcLyjG-#VJ$8NHKO;+#cNzXG=yYndbv6DvpW{Wd`2|DB_KmF5y>bFP7zY&1^G@zUJ zIS)+z%y=cug+{J_`|cu3m8iGv6IJHp)%P- zbToVXMQ!ycK#z?moQA$b_Zhyc>$Cq}YRDZH1#F$V{7D?z}i(5x)<3^%U z&iVsg!Q=YM#R~)03f(aFUv#mHsVgCIUeyL{2V?x^=4O#RZS(m4yM?Hr%O4j4>2Fv6 z>3{ZLjK0_jsGKoxQy%K~M}HiD>v$n7;#0Dy{e%*;BWOea1Cj0tz0vr)v;3F2W;!^X zs%*N0T;UhHX-A7HBx6_M4IV$vW=;?5&ztL~76qO*23ml&^U3Z9b6K{5!|!ZWde)(u z#!#{)-_g_MtSmqz0MfybJUlJ9Uum(;$04~sfBjc~8BOg{=|K57GITw!ohmA4l4ZPb zV5WbmpWz3FE@2S@g_Gy6&wMrQAw14x#sRRrcq8wQKl=|nozO_JCLTM(C9l)!$Od4O z*=Yxi1B~w6zUX3IdE-foFG8vTlZ7(eRNhr;#wvOx z1u*>Ajt|e$jWw4?%3U8+tKuj`L(g0*^yxZLBP6yr|IdXjL6lVvC8Erg%D^F#4+KGm57Dp^#=`Xj^30H3v zZX?=-D6=5tn(XXqS15bX7*A$-M{iB%In&9)opHlLFnKv}IB>Ln2SmRg$V=`{G#@lx z$!~!2Dq^RFcICb8@XlR!yrthQ2I}(w-6h;jV0m^Ypxv&Mlc(O9#T%hT*gg6z=K?l> z{)JAvJ%s>Ko-y1rI%l!hvYBcJ_5yj_9xvI@H*d6KB~ba;;Unq!c6bHW542XE!gbM) zRiXFlw97IxFTFeN$T?*~8oxV}<&xx$LkZ?l^vzOwUckmh9f+lh2&=FdW~-Du1?fAPCX ztHJFeK%bo%bj8c&KCe4-W)9DcH%}S|_4#Ypm&4Za&7x>k8GDVhi1ET_cEH68-svD0 z4qkUUGhzFq;*%GP-(tvn#aDMI?^kEOp0SZVXCZHI{VOuc_jS7&?$sZgJJQe9d2~?n zOWtPpLG5q;cVKrR)skC>ePS+Pf7i|yf!Imu5xXQb46pR3&z4<^4-5ZlCz8UlHYvEF zeg0DbBHT1s+0qa1SO6eGG!#Z+4JsWa>95vLkzn|6#!Hi6o%y`CfDLome-rCidNaIXDSj(4lLT)eV)PW$CXDP>%Tn*953 zb{h=sGTM-nB{TIOUO-;dY1*#$ORs$T7}DyNYh zZ!FIz5AV-=(#cD{-RdL1{)?`S^zvUn$LuMh}LzM@JrxWoZ}X`>EnxY6pK(fcPyY`MiT~_$N>I z%46YAPy0)+%|+k9hmpbFF+pFd7?p~WcFIDhwpgAoeA8~;SI)YwUE#}yE*c)G>jvL9 z_?=-Ij9cP`e$Q1?eddEqMe66Spr*+gf*d-^aTyy*szY)x?wXkog8bRBIt_$-C4hm7ia@wEFT=F&yK|zx?~pRzD0h z|DXNEf7q6)w&>z}t4dj}hZAjsNmL`=73U^@neoG-Xg`^!=;PzUTn9 zSF3Mt+*p0pMD+hDtjec9Sp76Fnq7I?oy}|H?&IgHzy9{t>fp|U#oFf4y#S$K=S{RT zL+N@T%x`-CLGtj&0SK4#)V%jWy#ZzV4H##PfBNH}t$y;eKVAJV{;U7xc>Mj_FF#v7%Oe{obtKR3{ed-KpE(1XS%0$w)}T~nw>{QDL@!_7Z88#2THP4%yygMi z@4o-x)ZG?U0EKWrCieI*zk5xtUB7cGpdn-W`E?$%Cegg#^Hc=zS2fqon(0MDbV;-~z1R?b0H)o;s|ClhMM&Wv~R*BvbA~U=%hOM5sO|y(#&DT5>aGvZxEKBNUhj`bd2Q$& zyqYnr%z?ppGlElUpxUO+FCf1>@qnotfG~Zn4i@jU1>TN*Q62Q50oN;^KJlEl;v7iY zm{#KirtAwCxz|B!-{x)4JE~m{`cdWEBFwYOA3LA0Z{PmF!JQW-L&47%(Z;9rPxTY70|8{@I_*qA@+>utZ?o z?|;|M1H03vKZyt3SU-SU<451mlhHR%oEk6?hz8ivN5z9@Hw*u>v5UvpNADX8E?hV} z-zRG`W6;P(a!T&beEZ$%=YN)-uFPBQws=rTyp8dK{&`rKKX%2@bosFs#>{CRq}!Kq zC4g(>BA_>YK!(gP<{XLSXXXLXY=@&WZDP`l9r$C>cuVrMHE&|!4J>K_yV&0jB%~)t zMgtLV1bVXFc<5?>bQwqR>~f%!L$mdFI?!0XGrRK9t8CB0t>EvD_+VVH*hANi9hq*v z*V#av#oU{@OZQ?FMYV)%=v`yuGJVM|$tIh0(Ot)xXIo#f`?Er-=PYE%qmP9ny3z=s z-xx zS$LEj7(3-D2Kk=m2?z}qrca?Udt;pju&y(kk zuzv^AN5(4Px^dx1;i2^1TZJKe5`7NqM$b5!)XyHbI7@dPZeflL9;}`0Vqt&c^@H+J zX|jdy#;6CS*PoxqlgBwa)FWF^xV<{_)t4<$6#6iFj6(++w=JyNtuy_)K46T!m!pNS zIf;O(lpfC)(Vg+i{r=IXcGYO>ofg9@lg*?)77xWBfBW4J>K8wpGeurzTb}v$%y@$z z%APQwAvrZ$y1year5O)|xlvb(sCjSL;5uWn zEbAI0EYlqo(P#0-T&~>*PEE$RS_c{#np?Q6IQL4Y?1g~7%Phvoa)fS8{|Iw8HHN$- zpEL=Fi^ZVr0=-pyGL>eYcRz)#&I^szp)^C|y66oWG^}#v{rxBU2WXpumb;_iqiqyB z;{5ZeL6@@2qgw-b4jq*?X_mapvD{1$kRQE;GZ@{LRxoJ?lNd<@9rT_v*qUge>LE%x3wyPEaE4ArCK< zL+XC67a-6bZM1$iY4FW`WcDt_(I8|H)!b8G&!ny1b4Z`0dTRHkeJYbQ%4lg<@2xYH zf!s%SdOp9`H9=?3=0M2$0n&UFweW64WvLEY{E4om5bwIVu3w#Yh@E`OsF=$m*OO@g z0|Ln+y37dYAwHv^|HxYMI(*+KqO^0+Y{|WWkV#weA*$!%1D zhvsV3xAI2Th9K`(?gnpqXPN|G=a2M2=<)>U_6HA^k9$ihj9cz^PukHheMR4rA7V-| z>Fp?JK%WO|h7S|a+Pkj%cvl*VTz*R*?ZN1vlDQ`dK2BX!EE-dqY15vq^O{6mCsXSA zeHm!+yZop6>-uRMd{Z*s0f&ZP(vSXFWD{RzyJcyHuV{5|sMmn%vF26JCeJlZxQKFc zK0H~s%e=Z#m+xsPrF-kF@OpzcZpwG^nu5t-7n4WdL$Bw(FV&>!9emP8WOki)Cah$& zxq9o!fz|){-?Z&4 zWBJd2^23TL&-lSO{jQbvBS0c`j3&m#G%1gT zGiSdY1Dw~{g*-mQEZ0oHlRbHE9mvatx9{tKHV=5xsmtBk?N`71a`hK~_VWyb4qw?O z{`hjhSxE+L1deQ( zqsM`^G3SjYkwA&Jj#^CN4VA}2^#R;6PXBP`+tmjj zo*pk%;Q@f@4Dd>9VqAGGv)I-L1YxU~n3<%M*~lD^BH?ps0qCcXRj6&t=dv6rq~tUHja zO~nTOG{cN{|Gq$n0|P#p1U-#6jM`fnqbCCm-YF(GIl^C9wzhcHq9R~9bF_(RV9nOO ztM}jgp!TS}nmF-(<9#BQH*g$~!mwXJmQAZSj~t$fj)~WJQB@a{H8rOE#%80SC4*?9umT+lv{q zJU$u6CI<|CF>iNel!{%*ND5O*|55&xPc_7RUZLdOGK*Z%zLuV>Qm zar$Uq6AbP6YI}=0(M%?uRL+(>&aS0<9tRljX=j9b@=`a+u#m#2N8A1;KXkMSxk(gG1kpbWCg`NID|7&yv|M08;XRT$vGb`m@u`n>Xxo4}(Q?0u zzJ5zAgbKM)oa);FKPJh?k|8wm;HUS@FQ#6l1wx8RzO(N=m4PQFW^6hD3(sehQz1*H z|2A%zu{>Aa-W$lvP9Xz(^DM&)Uaxegg-T=Q26%s|#b%(Zh0vLxCSwntEuLOH&eV%u z`c|8u)mSJFDKM^S^U%4awlk^q>|t_DA8NB(Hv&DPiG6&tuv|-MmL042PrtqT z=FGWbn=f&=(fL+7XTtK-*LkGBc#%gfpelK|nQk;@Un>vgn0y~fhRFQ6a~D=;E}W0{ zLZIAeBAgzwOUUl1=?|3;z}l53;{XIelJY%txC6P%%T5@88ZSe@%JurQF^cD;gVJvA zch*KWaQp9M8jnU^IKLV%G^a=;T0SVI?{4D?Y)3u$&A-OY>c>C+AZ zY#bnSfFwI>i~|;1Zr27pHGv_(ppz%x9$jc`1KR!UpZwFxNIvuIdtQF`8UxU5>^~ET z#VghV%AJ5oW0n}MmoA(eodXO~&ZFYBK4K?U{?C5;)73{Gempc-*tO_ohs-CR{$xUf z07h;l=MRz_meG;oXQRVsS8})WAWutrq<-7tgT!JRp65C-1%I z?6Zfp*Is+=-&%X^)+)eiy8-MBGJiLkr~Qj_SQ0Z`4-oXCz67wE`?mCFH{!K;!8rZm z%$KX*|ARjWh`v5!Z(@(v1@~@V%Sh4Agy;u2b-s-G`BD8pG_C&_l_$hNJT>v8n?D)- z9H{+#4AlT6Ay4QdjHt<)#=)zN<0bS%(_74jkM;lV!Xa&3^N77u|JsqsbCt2-tMha{ zhM?DHPJ7T?N!DGtc5(DF@|qk3E}HYU=E-baq8t8TRN|G4_rK2gjK|I6e2`w`m97%% zv3R*h05Yrg&3e5D<|Fjx5=+cGgXS|D__xxeT8}e^&2L--t>@cP{&%#`3%+RN;^&Hk|UPceVq4F32fqwu^ ze`w8cW-g~x4sh4KgMn`!{q@fZ|8XG0$Dyf*b7{uFRMqK<^mBb2F~$Q|!^i{rFI~@W z6m+qRgt0JK5T65S)IB5<#bM@uU|7z4(Ql;kB`6Rfz&=VIZ|`7ofadJpsJ`Y|p`jR!$XIj{u7+H{9jIv@BMbB)orxj9IHH@> zHLJ(1X~gk1ww*m8c-@KH2pdQk=;UcvsX6R>5{G<@6&rYJh=3kzSbVb zTs$)Kiop?`@YnwMh5mfMH5j9`FlS?^{5wwsH5+moT#?nglR;bX_fQ2XVtKdGI`kmjVD@&3aAb)NA&`F9is>PGrI zL&xYp(S;srhuo{3t$@vh2z#7E5btXSj&<2G#-Z9nw?;STicx-m);z|mexcS5j-G$= z&37teUpuERtR6U#iX}%}bIFZUk$#^6x5!hOz)WNNn* z;rWsm$}I0FtH1u2qhFJ+yVV0z#%=R#AyfDD=)5nrG>D`Qlg*MtzrO{}w8zV984o?A zpSR`V%knbv%@Ys1rrpx^h5jLvX^-#yt9Q-?`=P6o)pw(|4Z> z5H=5%chRbhv||asvDCTOm$pqIy=y|MeI|^dZ#18GCfAr zG^spUV;;OXg-r$4-tD`zXfto%#kIB$pRVOSZAT-&tK+xt^j5dl$ELbZeJgU3F8?fb zP%%|OPXlWGl4aNI8fowmyx-EKXCnusuS|6uP|*Ec_6Mj{)>8R-K7`|vZK1g2+)L}U z>h^l4Pn#AxO~oWezh0dXHvLvsccQEB6fWuYi5tpSc7>PD$JA%im#$|jG-GetyY$hw z572yQA(IM~cNs4H*Ne4x`cfWb)X=gY(^q}c@DwtG#MIx5dRI#2_RUw9I(2Iv^|2hpH_6oc)0VE89_vr}?@>#dV1T-WP} zS5}u(oId;UUlk+z)ar0sd3SdZ<==UC@esb+MERqD;>XQH&+;%Io{HAMh4FM8ip6op zV%OcgBwpLxma~+yG@Mb`#eC%rX%QqOfQ6giNJwIAYpa(zj-q4Xp#SiKJaxlA{zw0K z^?&@2|1YaQ{^LJh{qh$-Ssi=pR7(EF)&JxF{@<*A@$+A#=)COkvO9s5g_)_2f0<|f zo-F#m6QF)Pk5-`NU;XlzDWiD}-~VJ31&a-b;{hK&{`GGHge+=`J(zO;dJ8Ux;oTW! z`Nrzxu@kHJ-+8-**x}yY5U2pu?4Z9q)qzU|n4kJq~GnP_v^F9Fh%)sg08c<&NJ#aSq@=%ip8c~dA@mO`}oOX znbwE3?@Bu@zACmQub3fPCU#*TGJKiESV7NTo+dbP>^FRvRvycO#0bChf z^s9PZ$aVG598}k#Ie7(H>^`zM4X_l`)#3v%FIGG+J^dp*ldZ zVm`PL=w#vd-rMgM3m>Q{rqbFCOxc7Hj z+#E@!T7Ze$I&H2_b5LvieIwBC@utQ^yr5ikuwdXdM6Uo$2Q#c*iI3M~Z}M0KR`Igo zjjUYt+EpJLGuwF}76NL3`9Qt*0@yC(VfmZSznpRCP*$GLr38;#=B>3L1SZuLCfOam>S@w{D84!2E9^vBc7?e zyzz%!EeG?g=k32GBa?%H0m#ocB(GvbyvUo1j?BB-w)-}MEb~P9S#;8M9yXrEhi9M# z=I(CH@!mDg)(u=fcLpvzzm?1)3~Mia4NoaYm>j^eun0Q?3I5>k{+$VV;=BUwoOrCY zL;nkVbg{8ZCZBlYaDe6;9W;5fL(!J`3?ECcoRKZ%KQXAIC+|5tR>33ZU)H~yI;aed z#H$Cc+0}!uYG?pXfPD1YI}6?TvPJGe;A!&ztMeDjvD(*FPoLkL1D$!8(^kb;*Jfu* zeDv8D6RzRa`j4F0)Z*FVSnOOLWe(W2(}Z|8XB!WP(lvJM+Sfs$9e`OooYTM{OIGa4 zh)Q1T6LGZ9oI4g3L$9O@^&`HUaBa_@fly}z4`d@Up0?&8~CSY5d!4))GD z#Ozr>Ca>XllW}jqc`~_qpzuq3qD!Dha%WRxfnfn?A?%C3zL{}(4mY%$pmAYmGeC^~ zVE2G9T4Jp?X%++A`4ElEK=VZU=qJ4SfN_BTgBfie71{~NMRsXB&)#}z4%HOW;?CuB ztBudvh0wUM(}!o{Yw0YXeDP_3*uB*O^Hg)GkQo3$XJIhrnL{3=I~^*nyl??A`2g}{ z!im~?E$>x;n6Vrg39z? z1@Hw(-^&f%z zWxV+5y28ixsP-n`D~r+UTDv?Q^llDf%(%#<--D^k8%({Kkn( zD5C<&jU2a2i2RZdXA3dP?GPZV$uV;Vo&}KKy43-}y-SA1BoiB-AxS?dQz(~B(ecel z{75FywTxe9c~~ci&%L8D!K+x?$sn#V@yE$V zhNL+}vc3ea8;fZCl5P;M3%N4m?|%JNJtFt$qt=jcltKDiYB|0gU_O`v8D{xijMlnP(^GGPbol{j5ffo?1s6!rF59=!uzgtPg~%Ir7HIXxA?1qOrSQ8NnM{r34J8$qdpei1s8Pehql6(HBi#=}X~Z{PnWPXjDU zEo0Kx$!{_%t(Uu1dSBP`W#ymT=6OF_s4vfaN?3bhv}uR5+C3E@TzV#nTXU%+;odd$ z`L+fnn!9sD+r8(W6Si^7g}1PMb72=_623>i>2IkdQSz`(yX^@UMwH!M@FfY zvbv^hUwUT&{FJ8#@-3Yg{8o>-+wWQ|cOc?rAvi36524TiCzdo=mhA2J5H?mp!ux}|euB9E7QeFedO5}b6 zT7?GGRgQ5oHC(7Wb#!YkZPq?Fz;1u*TCaR{@aAFwlD0JShmLD4AKIyFw-P!YPkOx7 z_oX-!^^(^lkd3-aJQZF(fDqHSZud+M3qNV``goJqQg2llTKKogcE*=FER|TR(-bmw z8?KshH3^nNrX54&_!_@ds`gL$)n^t0OIwE)^H#qrXJ#ygX*@o3MNhXzA3JF@@D>VNzGsVwAe zHEqH7uR2^%Jj(rhuFL}dn-=iGmiT*=a{Z_UvV|U6Z-^fH$fA3D?*bbDa>8QpP@r%l z-&4wapX+m|bk`}l-2|Xe$bmpS;?mjz<{z*Ay?^)LD^BO$wyj=TJ-dHv^$%0ZckSF? z`*y8<@Av7`zxizS z;rrhi!0XDj>jRcKNRhH0xaI`>9ZG$373_>_WbhKU(|TAHk#E=<6OHbO+0RI z2h4xdHqJK-;efGj)^`@S7Op&D^}TJbWDYP4D19g3B-MTZumf%7KYRXCX#*4T44Q4? z(b%xy@usb}p9Vx**rr(5Wz&B^2YuiMo_`!T_FCRm`iF-Q9$Zh0Ul*`x;c*WD4Z?Cz zssOb>XF!WKQeqDt*q?H>#IW3~&Anqrkhl2E#_hfoHZfOos;!OjeT$n1Dd)fV_~XEv z;;pvG=J_tP3#I+Sg$s)^Ts{*cv4x!WUFblh&F41H!Ez4%dzixab>l%(ZQG4(1LE^C z1?pH>ZQ9ZzH$@jWFjz9`8o)oBt|+#TTE&IEN>3Xel38= z;t1F}-usak4>=&TDp!^*o_XB2F!|RM$09^ZTGo$-Lt zQMR*M)ZGlUFxC$2vv6Csy>1XKs6Lo*9yOYnd0AS3!6?pc#pWGqv1Oa((7c6#z5t5j zuPrTX7?bqlc$`+=%>ZNDP#Jto)7M)FS}gJ|JRW$C7XV8D9AbPIo>t-M4h z)KPiijZ+x|0ea^yo}0xJqXN03?4aH8ezibSC!R@S(=!b1+Z$N6D{t*;f*y=Z^v9A- zUGB9wKb&{(t~{8Q_?h)v-b=jKzNu`Xw)B;-PU^=i^u1H3#tRroqI>|816hF{HMs>a zPqI77j=~xA$FQ^*w9qU~qIv_M)d#5E%>l2--o7q$=pD~_R`I@d?LSbA|73^qEH;ZP zv)WWYFJ%i41@w5-cx(K(!;W0jcXlU`75f8O7`+@cHV490cOK!|Z5Eu6GRb=K0GKge z@A1pF7K{uw#uFfYPjehurGFff#j{@iKyqyZJcw63^J)YUlbbP9zHwCilh^5W+k$!X zb|_v48CwKix_W&;Ig3vVO`fp4rfelPhW=(vwipqr{?yy<3=72FI}QMmKyJSc_$B@Q z#?>6~)#1Cm%+P_n1_JEO)9+w&>ctBeTiotg{lV{jfAy1$wD%umEKZ)FFCd*v05UH8 z5a?CAEKWyHtlP*zae(cnz}Mouj~7}raQNt_Uz`i1zcSGx)d87H&zp>ea-83`@+uU#+hZ1pf6?>fsNzA%T8Y50SYL)rRAKLEt( z6vwt4r>KmFC*y=&Av8>eLT7L=0P{lTjd?i$Q@iwG%ot$6E3X|vft-&Y=gl0b$q))? z5@+??=}%|eSl9~*a^mD$fjswD7Yl=O<#;S8^@bP$>M(5i% zudZ%gy|_Bnp`VNi4@=LR9}S$Vapvsl)o*?H-SM)ete6KKsx5TU-GEfG%ozf=Zxl|V z^!sba-3$o4j5jnMc{l=ifMH*qJ2$$b9cU*qhOQfk&9CMQ>qO(2xU`GL9HE|11Bu9I zdK2At4hN6W%WkK|fHv{C11dQU2_tCt7@7DqAn*Q_yY-o!9@~0{(cn?KMd4|x-`;r2 z4vabI9EjN;0gtwf?%UdVc)D@$#5^2-ne)ggI>va3_noe|fNONLBw|D%$2KHiL)uJ| zrA{YEwrJy`qjjh8VI0pK*K@%2@i*SeYrn8#?Nap~P_!&ZUIFpx^#EB$8)Fn;efZGv z(F3pL#p4KyRQqz3F*I65+HQ6h4j^<%b8I^#gP_7YOwM!R^g?tZnVL zxTj9NHRY2%3^6a`KXo=1M#pH}3%_|JJ=NM^VsO_sJjiGu3>P_UXNOQO&ON$v{@jcy zGMoNoN9e~t|LN*VyA<>}J(cceJddNo^kegI)pRz-<{UeCyB|4nEdFe*kin7uN2epZ z`@iz-s@yPZc>3?_N8ad}Jkoip11B2>tLq)2$w2iu!wSQkvW<6RiC+9#^m3r?!xyay zo6F7Nc+0wmtd&mx3LA8>dHQZ+91od`x3)W*EIN`w^2^g-j6N@(xq7{5%sk9ELavZI z#un!teDkz6Em{ZGEpwGdjDF1L0A%iU&30`wV9#g4SsHI(M1T*Xz3lt?HQ%cARp{vxiZ|-|h!qUk5Z>1-cT18A*lh35-miov`ww|ul@7sEp?>v@Xwm$kk z_iF##EARC@CYy2^AZ_UEPU%b>Ug{rJlZTg<^!+O@mm77ycIv0)9d%!OKKJ?_O@#57 zD#*)=CZ9eQ&uI60t8@eQ%mZ{T-zg-`X5N#?*94-x%fp*e`l+)_f(F3UtKSR%EP1UZ zlEpkUXQ4oH%p^V%JFmBjY-BWUb))e`_sqshSaj3cbCuQ z``TSUma3^pZJWBU(O{_~aA&Q&1b{f-Q$`y4QzerZ4J0`KoBw z)T7@XO~X~GuS<3Ht=ryS1DJX`VPK{$(k);YuZjW2r;Mczgwgbn{*sX*CMOlrZ+b5p zSC{3puT5q%R(n^amT_u9&wEg1N~?|X8~QHo7(N_&cn^`>NBRDcrEf}`Z)^Rs(7;_> zQO$;a`eR8wyyqF(dUx_H$I70TmCxi@daq0q^{e`=<$)aYVENMba_ali_MT1k)zf{C z*UFJ*?ylE+GMe}1fuFt%fAl$7thH4=8;?uVm$of+ucgTolk&(w1x}$VAp>JpzU!A7 zq`ahz{Kc<+v--dO>0fm!eLa;o?76EqS9`ZzTm3s9oGPm(w-$$={QBe7C+G8`$}`~5 z(Gz(F0pyfXJ}GMVQ^>yvB))y0vXeEqZR~$HFZ3NLU`GmDa4-dBbM?E}cIQ9-d*)1woI4Q`BLBGsR4QV=&!o1F%0j`^DaB9@p2KQxq+P(7<>YS@X@);<0uzeO>?* z_FcP$b!gFgZ2>IaYqIdnp+vjBpJ_VTarQar=DS6yap&uytYHO$)?{hd{Wyz23fk(E+6Tk(EwQxYM z*4g?IP|Q2jngSg!mT2Bf|k}_F+#0b6?-7PW?2I?*#vhm6b zKJSN=Rtq)x-w3b;&L7Pq$b>0$OG8|h^GX3HCuyQH24JYXxsK^xef$Qnv`Mf z6}o_7V(+dNVAast5|lA6xc$ zC)w`X&DwoG#r^7dxE$W!R@3G*?X}fb_#7UP5ku97-RuH#AnyDmM0$WuFAIaPcr|BC zs-J*$!u*gGx7yY}9=DALV9@Yhb(_UO^2(=$vu*lhh=uT;%AG}KyoWz|NddJPSib+^ zZ?}8oajytz7mId87U5)Y~E z0ma6k2HKf$Fhdo7-dwpnws`oEH-Ij43I1Ic|81>rp~0g&*k$@3rHoPGu^7dSxbv5m zLy(mP@DXpnBLf4$z59*(cv;;TxUOBhF?plu_W1e48;4e( zefiaZz!swe5LH2k!tvx+S3Jo0b1zWdV%3=DZAVsGz*pTCmTMq)<$Uz3Uk&Z|?s+{5 zV`%_R;0yyB`4&Bd?y|@PEIA`$ce?{Nw2KBH!vh?Bd6aEySAj();CuYh&J5V|Y z;t9QC$I5FvGZ2+t9CaZ1oq*iydFt-iePH$B@BDsS?eh{GfTB5YYX+(M->!xGfxTbH ze+~^?yki4W0)QH7)$dwmub(5aePoIq3wGqtPqh>1!ja%R#Z?CED4QH2OYVK`FyOpf z;~`_prO)jm8eZ=J+${k`7tg12M051Hc;RfZZExrCd?cO;Y^qJ>)w|uhoL+ePs}2$k zV48#PlHQEJ#@+M6s<@6H&r>x(MEMRa#l+f2_jf>{`;6kl*VSJe^!4_wi+p5+VWicG?tdLE#jw44;j01ZP6Y7XzJ7gVwtUb^ zn42*!#Jjg{-X4I5@scdIw*D;ep279c{+B;r9n1*wfBrxH2P03(03pY=?X%ma`Jpz8 zeNO(N9m5+VsdW#abaV{$YOGs1G3FmT8V>+v8>@Ew(OIU z6|ioc0a@?nykUOjRlhB8Sg0p@8bg_|3BX&so`74A0z7$>(sN$qcycS_H)F9daOCj< zDtF*)GD-e)HQksOpzreBT3l7`~GZI7V|ndhDI_GzVcj ztiA(k3l(7<;=4A|ivas}M7(Ie^_DgQDjzgHm%|1b7p;-;j6?Ss_=HR=y*a|zwL6h#K46w1^YVq$%>lPpZ#L%aOn3efP?0C?gUaTytskyk z`D#F9vO@oCw(|y^npYhZzda)~KBg0#eCzGTLPqY|YM!6<1)VeA*mXBfeIqff*uGq=mHe8^6q zH;-|K+is7XXQ;G`0PTe^!SDAoycz>|&YwAfft2U5bo4onALe|983$DkJ4bMO-JF3@ zLb|DP@L=pb@8P={3u}DGF~$zOd*|CNo$T9};bN!|O@%=Drnwm%9rlX%H$^+4jO?-# zQsi**_QuW5+KNwaXB@fC7*~&x7f0VXk^a!(tQimO1lY5ObHpMW(91!_vrb5UzEYn7 z!tnvQqb!E{EuF(NMvv|})RpX*wMKj_Exnfv*WdJAbFQ5{LX=Hdp7_bGBXWZLavV6eTOwRWKiBzS5rcT})FswYL=(RHCF+ z(Q&er)m#zTqIxcMmq$OAHURQ0$h2d<{I&Y6zk?QC%3j*EB%GE;?B!X%)J5I{xJuLS zwOUNO)@!7!N$Xv?tfif-CWGa9!d1~rJtgW|UYYhx`IB+9M%E!ncWaK;uD4TJZkU(| z7n#=T$;`LuKKCLTC89Uy*;002{a$XF!J5FT1OiW{ed@2uu-F-39_>q~^>cZ5F7Nbe z?V9|m;Pk2Y&|#`no13_ntGXy>vMCe9oi=)^4way74T5Q-yKc)|Q9V<%*YU;>#0~fR zN%5v3IxM8=X6<uqgS-I{AATnV-{jb|SQ^6tD}IWyjt43}q9u0+)j@61~ZaprFSxaa?A zPjz0;w>PFP-_~bjPi08HWLdrl;thG0lMnJco~EEIebUo9Y5@H;9-A_?U;XBtUYiE? zL!FH0wYsUt{7{C+%2rHuJ=+0U`6INj*tL2R7`StLiqdMw>Z9|QSO4@ce_j}mQ!V~q zU7fzs0b9jdzI^HO>U%BncfE0}ygJCEMZ}XA_f~(ISND&;y0-fC?9~N)j{dh>SiJYa z+pTsSOx8BCCR`pcpPVZudFAtPJ94o2Wo;dIDCogGE&i+3!CZA6u=)9wJ9&k+5 zZ)cUi(W1x1DsDEJDDLCTqtSYRtQLGv1Iad66r|`2OAr+`%{nr4FB1G_Mhh^_Sx$H`TzQFS}g5tL4JI7wF9*PnUwdd zDTb{2c$en`rRD9n-dtU{c)7*bzS(APyY}wc!u!gE(*Vd^yX7#&yu(udd6ZfZ0ye~7 ze-f}LRv_?b@ro3tV+lp@_ya(*ISmwdw&|01ryW8pqd~z5PS{aAxH9Aq_*09${ z`Vx)ABs|>#wE!Tr0I;f~%fimM(&nAJcTAjp9!v9Hv>$~v-m`G4bGpr2vhXF*(q%UQ zFlS@3gVMOzOAM1d)tA!j8&;Lx|6>!xe_;!lC#pB+#+s^{Dvhf43&$e7K zpn2<|E$|v$9zMJ?b)5Mw(hMNcxO?7e{Aq{&0_C*>K!XOuBfSq;B=2mSJ%91y#E@r% zAU1QL*wWh(c1VEj$u5PMh~%RhEXsOJbD}7uh(~`k^2E#V=QQF@Vet^O6I<( zE;!nsvMlba{Loci_|Kx=^9|Jry@6(9P$FBS*VLypueO5;P+;+I>}?Ae8&A8+V)T0w zaJ@Hq1fT`(p(i?Nhl8eHf87D73jh)X5benfhiP^o-Yl$nUu~P%|Hj{9HU+(W`*(T6u;CUM+%(+2)Dp~Jp;Yn*Sz1q`ttL@nuC3JAlMr4 zR>w08+8wi_zI{D;XGa4d^qn>TXE{fB|5AMp2*HCFlE3!@nHZ$rDwKqI`q}fZk}ZrM(Jj8i zKXfu+i$j;O2;)0rxP!6B2w1*i@6r_<&c--otUYPm;HQ{}Xq~+M z&z|k?%$bqN`fo>a_h~xi^?O%W+ivGw`p!G6I~jBvjQurt#}@-8m!J8KygFJ)2!NP6 z*g*z>CSNWWl2F*R-}>;kCL9^V4^WqWPVV9J=jkrShMgFT0r%Nze+~mYWtVk#jjrCn zzL{SdANtc7IMy5k@CC5*;C!*MI%@O!7+jNe%PqP`U}NJP9qh(<5^suCZ+y@2t4_Sy z0lQ=^uYMpB9paVOit(IW5fh(Wp&QwqMc>|*u5jwDcb7v_YZC)uwfOcTs{#0BUmKd| z2$=qt#yVyS_uVug^h)XJ*%kuUP2{yY?mKw2ouQpm5}o}a?!>&l5f=3P54 zw2M*5`VY7d=;z?_YQW&EiRx49!dLO-%u|I(NgtLM4{R6R$NT1~(NW?-)zfce_|PmF zfk(7wW4r6L%^y(nPBdbSy^*o)Zblb*?8G$`r|=*K#-H@vQqMuA-n)DER)&kK zjdMD8yCsuJ#*l-Q&AHYp=P#U#-onIeT3u@%BapdC{XDD22C;IvGXQr)qQvII@f0{hl-?|>(q5bXJt#6Dyzy&_r zwv%y}j$tLY6#ZMs*$q z!m|dWtFNW=kLUd=`I0g6b(PnA>DdA}Nw-#)DMStX>-MzSXLsgZ-^y=N%xnFFo(pjl z)bIWjTKUU+=+bYKh7={&-pM};t!r}cy#dYF+cfoGdnN^%Es58_b)~rzt<+9>KUGxO zek*rwtffIdG>N{L9PmA$v&u|5AHJ*qQs0Hbz0=~DaOlBIgxaHz)_(jX+$24u-(1Hz?jC`!sRa?E&Z|_(jB)ui< zazFZNyZq4KZC@wn{&hozrh-j`)n)N`UT)3(sZ-^+Fv4V_m%ZA%%9t6y8_J*OmEGrL zt={O+dV0A*#CK%eww?XRTMR|bCRhK@ZmF3?F??v>FLql~F@6L)FW?sn5y0_?gq zDd(N#eW^yo^>4~AUYhz5Uh6r?c*$<~O+y0tbNZmZm~rbx>0Q%5eP0uV zT8|gWBMQ<|Mztx)uz|Yt8hIj)>o+Na+7955qk3!4@Y?)Bdo}DWeM63!82TL?paN6I zlES^-pSI{9V43pzZwmE~&QeZSf;|a7>8Fj|(%vPl1f^1`{_)>Jh{(oGJ958r}HkF1Grk??cC9Vv2Cr_18^T7D(2;@EeN-!G=JTpL%?;w zbrU2J-aE@jaXJ#+zu%>ePv&qik+V3BH@R+Vwo~H&!mNVox3k zj~)R{3we`wBu_wNky1M%`2i;r9u8^E$%dF)tN$m2;o@G@`1ojdaestn`)LZ0h~ z3irau0?c&yk^_TyGuXlm?A1?3^J^`l^-22ZTeQ8{=I=98Eh&;`zg}D`t`@f#J?BlbF#MP)k&eQ zPUZ{cni^k=#`^-Gu4Lq~UDU9#yWlntpYjvid0!r)<~ha%U>_!%#bAB+y!>ooT{lGd zV z{OueJ{qprRj`2d6kjKq04(CdyMt`8~A{U}>JE}G%!+C}S06zN7ug43P7obkFfWwm$ zd%yZK=mD6{o~y0ZY2w=U{Ca>aqpb5hgmu~3Vwv&K&Xv*Oq5}|yj0Ru=&R%T+O4uFT zyJL0a5B^TxWxTSYMfqL55pY;PFbEqvypqk~K+rAafk)AZLE}aBdYX|0u!jcsGLn7v z#hG@U>}ni$_*ruZBY`mh4720lS>@cydwpX7)Ar5^k+-epLd6KfLk+_RWCbP$lAwjU zEZo0gb*7z3cLPEh8h6IGXVW3hWQ?cJY)gJD^UCwpyQfah;jMq2(fCGXGHeN>6$cI< z*l}_&Z$A3QW{`k!5<6r%1_2!A=^?ir1=6Sosg3X&dKtZSh<7aON z7~jm}7#O*?9d>p~J;+P$)eTz$p$-R#zcF);e%_l`;GVq0J%1dZ0D2wXwzcu&x5MD7 zdU9sl>eK+Tg)s=25VM^Ax9lJR7Iqlh<#WR?`}Z{t0}!-%F&3x3XM99VD8ad<62Y~3^Q;Va37>b2!L5NQGU7Z6{VmD&<-0d#Ef ze-wYZgkITHdjRQ-C}gQQfAqY@Pf}!z7h)I-aWTADUcf77S>U}xIS@?$kxPvwyF?CU z=-{b%>FihSj_ZK*bR3}Y7;<`_Y}(k)6|xC%F23@M%60&&n8?P*<#fqSTfdx8X69F6 zE!9u3L9E)Yqd zC9hq*3Ja1&*UrKo(J|0epJ~&M#=N;pduDw)?JS&KGLfNh>KZT3oL1YPJX?1C;RkC= z??^)rVr1H$;aiB0hYxZpX^sNYa(KaS=Ehf&EyB^*`LdxtFdyUXC&`lAx02;#Y~x3L z7}jJ?-qUA<{}BUPgM=>MB~rvFH5ZDW3? zPv7W1Z)x(?7`T1wYKF#7%lFXgd!3`eAW0tE{bT$9aQ9}g8c}WG6%Uia^e(aO2ioknyBe8Mn@ehgRcSx1<6$r6S&;5B-0ZXKXq=uc!Mf6$1X>5bZ|@bNUopD0mJDK2bw3yN#k$tK4Y}A6Vgqb5ptmQZ`Gc;`c}I--rR9A z`aE3y*`NMTtH1vb{;k;&MV44Qm2tpuI%jv+)EMyt;zv-z%kw>Nfwsl}338+x*|YyIUCuF-O# zqc_V3-TF4qrVsk%v(?|+p8}So=!s53eayqPa=gA!*6pQEGr&D?)c}3%>AdgW8qs)g z`>`CDk)Y`NEA9yM}zkDctsh|A3x_0@hZ-(@L>)v|1zJWv`q=Yi_)%U4KKhXyG z>8AUx;hR1^>RNUp^kd#t6Xh-G*)n7`@2{m_p2cHc7}|Rv^R++uuRor9(V=wn`g$Eo zJ@uYK@kJCI1}#fXn&K&<+In|j`=Diu`-8b41a6sS`Y-7qc z^r6$dEWgTFikq^1pQgE2!l6ky`8g(jl!mo#cS<9)3@ zBd=6m&DZL00X}1Bij_}S`HgXdVQ6Tw)OXscm(+hrskY0tG%ULGWd2{P&9qaJ@}9(K z>`TAb`cnF7$MA(`rD?*M)o1w)t-7n!#W=P`jd{QHb|S1_%3o{mQXb_k05o2Bu_kx$ z#ZnoczM^k=MmNg$yYIfUy49q1BSq}D+wu(by3wLi)?ZxRvHIWr_Isn`^UgkzC-)&< zUD5NK7H4{m8>KA!` z-zbK)iSMmrM^XSDtj=7#F|_>Z@~v6GUjO>#>SrH+ng1{x1_Cz$3BGwcVm+Ci0$9c3##tN(Tj*LTjN#@f5qxR(&y; zKY0H;tH1uZ)5rtHETTqnY5Y(;EnqA%9guKTn7p+@B|02VyFBLIRYxt%4J7 zpoP?-j2X{U=G0aHilJ+}`S#v_R=aqrQ%ZK{feP$A@y5{!=Q1E+_0o0=AAb}eTUZ^; zM~`@z2ic|;Y80rOH`@+*7nqvDS-L0ngM5{#4^02c;Ng?jHv9zOa2Ts$DOUX~Qc{Sv z2I}9qd1v+Z$x|H+dT@31W@(}~#r$RO0+;jzfO6)I`g>i`P0uP`eTq`;{CUb|nPp-ne}|#Wv7lLkpc~@+d{#oQMV%MO(K5x`o)# z4l<$oR3~+@wVOBHrGOz|@WB=tV;o6#Y_4DJM3}`z{k4q5cqjgN8J+MRy7P{C*?6@C z{-#5DDB#Ak{DF>mNSZCho27u`cL!|VZ4BbUsu>6p*!iTzT8bh-#UY>;3flrS*G4{3 z6A-$kFbI1qqwqp)!9B2l--Il%5Jh|7;)4J^iGiN?ib+Ya-4&o*|f-qgSp4sc{UHu~pjw@_s-SuO`oJJ4eSS zErfz?Rxk6^!|Rmz0T~qJ8W`aURZ#8i*9mwcy()rth{Z-r++nhsLp^*Qz&D>pZtq| zmYmqR`tEnXmn?sGb+5(P*Y|Ga#cNl@<1qw|(XRAKm*HOrKw5}sgE|Y*;~5V%vPix3 zgEttEXN*Tzp6bTT-RcI=v<@JrcobRS0aL{Qb$BHj$P;gVok!8L40}AmEzFkf@PPY( z;+~T$+v{iiuHVM%I*?zyYJIGpw*8jf)PFtZIb+DZjEiI%1JU&xwr?MfM#U8kTmXI_ z&2xKVk;l^vP~-VlK8sOlS?rsy@T9g_#M+`wHUfVwl&?3o943kv%}M%az}jdBWTo5b zt91Z!b92vC3$wzI`cZel=rRpt>-6xcP~b5e8qSymN~xGU&5_?=j~$|SMLmv zLAISbadP!9{+AyWr+0~~uAj_1#b95Adk0hi%qzMbFz+4hHcmb}b0%5P zR_=I9|1l<#58waZZ?E?4-aekkXpJ{+RzGtk*&@F4hCFcJednEaD?AA3zEj>i17BJo zCzoEf;3lu=nb&HMaXm)O0o{04-kC%OfN&4|c zGX6Ke`lNS@-G3CI!BEiQa{)-|$P1W9pnlvI*dng(-P*?668Olw@AJ=o-EM(n1F9Gw zr`~#h7VnF()j1M*`zK#`f;txg2W)9RVgP(49qI9QhfE6Blkqt~4-iVml9$$9SM!X1 z@SvSJYru?t1j5*M{-AyX7C1zixA>##`)YX^(SSkn5r8Ddw-7FP_=$dv_G0(%D2#cy?2ZY@s~@&z^qaXb8t@RmlDyeb+bd$mz5e$u{mJeMI*U0O z&pm4%AK4wB96Rw=p@#~6c=|U(J0W9EfBD(eX#mp6&YG+d7`--@mm*^MxdtHjt>5NR z^9uPfx9S`3ExH`NfS$f(;*c{`KhBd~KjAaHA|2-z2e#v*hmSI7rn8ybUui7jV{4kb z9jd9GV{l2QBoiJtZqbtLple7^mwT-+hz_<_r^HP4S=I-ARGu~k0!FJxk(fM@F8lJdDdzpPRcmKQM-H*w`O`;)gR|ex9>K zXJ5qcNrTE?)&O)rq3dXdTX*ufc9u(XWrhO^z!*9*u87HQ4SC`GR}=c_#G9ukM9$rK zcnm4g?@lQCljyp4*MZJ?N=DS+Tg?-8!5q$G+d$mDH9)vB7;4GG{f!+4d`?Z~75dYT z=6bY#SQ>mZ#-sSgTti>C626zrCFAwg5ptsT@~-Fk4_M{YLVsS)sETIQ&-%qY&v-<) zpy!(Vg*#bis@dI?gTupsVf19Q!bby;&$w_Pe0|n^0J*+kfUu_8G`l~dfAgDqlTqX_ z-Vu5U04ogM2fy{<(4S#dylA`9 z?e*pYA=%K;IJ0wLTVs?_1%EKqF&Nx$>>Ixhpxmy$@!xLov>VsA(j_=h2yx~xYck(i zJ=XpoeE8kf7oUB)`pX~v`RbqiAO6F3eRl3Z{falt8x~vJ1NO&}AVwrpl)t^Pu$=P~ zf0wpK`@fy5g@B<~SALx`I*s+33ooI!LuGkJ0HvSg9CzzxPHmO7vwD$98S{8Im3lU(MXcxs75IH9$m(Juw zWhW>m4sleO^pjEdX|l3u=2_IbNm!ai(Vxe??DvwO=c+OFk*DtLw>*}bm9~jL={V)Y za08C7_r_Ye^1|repR|){zo!yw<-v@m;*&W^H}x=(C%c~cZ>>VzQ@8&7zjk{enHy@^ zH8+&oz4DvH8tJ8ysLSNH+*y))XUdV@*FN3yzDD}q^NI1g_Gr?NQde0PC+IP+uh+Pj zX4X+2Ri`%f?OVF{15;wGr7>P!CY32uIaB{`yQUt!Gz6`zm}uVUzQxe8aKj7BH4~Zg zhh7Wa6AMez*4tMeLpO^0y5Q*9>@1ivRSnS;zvQgUiMieHDa{QD*7H=PI!#Tg*Yb4X zt@viP%uhedGzKVpEkRp!OggXSC8Hi2Flal)pjf}JA?b~nM`vjI7ywYCi3DX0IYUG5v@Q|ARhnfl4Kd(&8T_Pqc6 zzkKhJw)e5zOkfS4F6AW2WI1_!n_qRAK0pUImwzUQ0fXmW?+hS~+HPo5SMN|b7yAfN z7-@MBlO6owRH1}mnUm|)faWddkiN1RK_wk(DrWu z`QDrUq=2ozyJY3bH!9!Dl!8g>-g^Eroxbz#TK>{$UU?0tEUeq_?yH}A$w(FZ&ls8Z z%uRO|-tlw5@RE+ens@#4x33DCytJ{>e6Ns&cYI%{s0?0V;=h?#OpR6fUT`?d$wMdB4t6O|<29Lsx7GVm6VB-i=T~Q&pijN|cC5c)_1(AM zT7B~6nbo-)_X1?^tUmm~4_5bk?MJ6CrpP^C{gqG*DRi%G%X;6I!W$1;Kt5_qVqnL5 zUtQ_F(;W`>pvA|NybRuX>zx6|&jf}(NZH*~9e9ibB`Bi%T3r3T@4UbI|NZ2rt2-$J zHtRp96jYx-`iqYm((!d;?VtR;@8xl@b@lg8o?QLOUv{`v6ZYakP=6V>U5mvz&%Bu^ zY&F!Afilr;DhODMf=?RTD6EVfc@zt;fzOf10yi~k6mE2N@e-hXnAf~%;p`d!u=Wsv z^H1Ge79$HN+Zc)x$s0iEmEZaP@3eTi*7mUL;oZ^L!w#6NPdbEYHn6|u=Qb?;W7TuGwaw`zx#POrw#-H7n z65}Co~Hdfo!t-RemBn#i)UUGd-6&oS8dh%!yo=&b^83J8r=dgPcEJcfL>+k-z_5t zqEmUVd&COQLb=nn0p#jSKrNu_^3y`rdm>hN3t}tkVdKK*zBSVKu~|YO&LET zbU=Nuc&TUjj9ZK`^kvZCrDmKwxi5}qeP8*!HD}Qf?Jf9A7HxXg7@`or_G+F>r3Fyp z;mtg)>Z6N!t_l?~ihg`YheQKP{sfFE(}e`sQ$lTs0Q{*`NH` zEMi|2>sj9bQvuL;id9B7HQw`-S>OAY{B7KyyppZ884&@INuDB~0D04w74%8W5aSQlV3ulO<8 zG|p^O=gD^U`t=!)znAkYHVc5&f{j7JN3G@RX_)x%sfUivKroqb`U+pYSs zP-DOOY|6FhHAc4eKCe_`P8bJ$${X0Z6ThiX(fo}-jQ8JpJ77Ru@BoY|WxMor*!-KU zNGH-~fQ18j)87lYw#}XQ7Z4S{@9iMz{p58r`0|x&&EGB78#BN_-lXbr;>3yKlrP)a zU)|V_ujHI@YOWy@$UAbGVGMs*;G@6x0>c36#t$&yl~*3kp{0x`v;98GFc``YkPmpXxs`A-H@L9b;fET2goM79B!unINXuJc~>CRo_591!-dk4#$n$8 zTpq5QEBlp@HGr30L~{UO^tpby9UytQ?GBWEH`>bo>9azyWN2h~0fGX$dQl zpYZOT&PSl%kWsVkf98lhtMzaFYP{aQc42j~Gf(V76DERuM`woG0lecgGH^nj_Ka+Q zP#^6n58qK0+} zS10YP29J!nlhKdIIN4y!KZDE78#khBpjCMSwdr!t;=RrB`r({4#3>#FaNzKPcB?VW zF^JsCz#;_9TW`O!dhgx$R{#8;{n6@|pZszRUw}647RKX2dJo#t}OR$WG@~>`UHU%b0ejvFAYf-}>OgiNh<@*^%moN6ig0uQ&e5 zcJu-E;mw(cDnmG*1*>7?r*l=YY9}-efR_woG#LHAN*hxEW4u3}ssX8AoiDz6^&oq0 zCi}@n;M$@aq=VI;)*67n*?C=E?BEU z#1b31xitPT#>|f;Y>sW76wanTnRCS|W-$^i9<=6f?HajXK2>a~d(EJ)JPl0NHW$$S zMSXQEnd2ZzvcR8r^v&x>PDJP1(YEo`y)htE9=+DCwViAASazf?`EO_*F{fH1a4y+@ zpgFrXZ%#L%=a>`As(He>7Iac)b108au{bZl7VIjx7eIdR>sv$00AWC$zYV?r-uoYB z^v(l2qt?mff;E}4?65mjIb@jL5-Now!?9y08h3%X>7!M9jx9IsBbBeiCzc=kR z=C)@reE01SlB+wL1Cuq=pNsxO_kMBTE=D_f>00eBY|b74A8;GZ%(2Fga5%y_YLf$+ z$ywvW4SCXsjF~k0k%Q`d{bt7Yv!~<39dkgiUDd)znHz!lcDC{ypU_?Lv+-g*?u-f! zu>fFb4X4LP&Ch#xFKa&r?rS-%?9Z!yk@s)|^r-yueWki!HXsx+$93ok&-?+n5S0oLCQtyS!3*%k<1`(`I%c!;>HE9q?1m5~@1Eu$d zkp{FO91PP2+q~&{jcqv*(Wa}X!wt#>?8a`=_kZt@aV9>5D3)ncXdge)=ekYqN3M2VZS-#JE-H16Gr6Uvw#L3SySIoIcH|_U(dFqk(W#^ku zkEO8%q10v7=Mbo;5|p?5lV}nutJ~c|%PB`&=dK6+`7voHgXM`VN(by4(AB$BIgi)g zo?G5ovhn#hG%wA1d6Lv`z~r7$$0In zn;&_+>+T_vOo9f@QZh=>^jihAb_%& z{XLj8CIs)V{oidhD({4dEX7!x&_-yg91=j=`o~!D9y$!Yv};n;o=Lxu`P+W;8v6C$ z)N^R=SLLnIV)|v!JQId(>Rd-q>*MiSh{r}*19<2V|zxNLYJY6ax z``X#W^yz0`tp4nauVxYj3|!8;48RRQIFwTOMw2t8=|Wzi*X};b+i2tJ%=v4p!yVlC z)``4XTNHnt1^xf~uYOYe@Vv89009V(Qs)2ZA9aXPi*k$TqrX1C`oq8TVT-}r1K^s_ za_h(oyS{9K{o+cCkKI>hh){kvZg?=@%TT5?;#Z1Oe)`(o)yV^G8GUx8_{VJv?_G<2 z3YSnJ=m8wFsG@LC2#jCh16(u5AZ=rG78%WX7WJ0`LM%G*7v82U55U#C6qbosT!<{8 z2fEc?GNS$-FsweK>`;U)vX(MxSLrMco)&K8c^+E8Acqfr{HtH50Ctd0lee^tlQXAH zm$!&mhFu;@H)gTFzMvr`Kl;tDhC_e_6kcIBY~|a$DKI&u@5ev=+3NSd_hIn^Emi^3 z_oDY}<3V@r`pv>?98R&cO^rfiE8!xSDd0dM*TrkMh8BQ8prCxjem|Zk_S>h9v|udM zOV5n6LwP0FEycWUOK5FH1H&G@D6%}D0er9K`DJ^*Ev&Wek*U(;y2cgr9MkY z3?K(yAUSwI{N=y-*P|5k$ac7!xc2I3yZEK6*Yj)-_>OPUz@kc@2xIf4FT7NrwOt>x zT~ArmuZIsGY+)Se5|99V=W)s~0W|;o^k++ZqQ~L9bLNV7fS?Q^8#c5(Js=Rj0t00% z9xPBOE*`+xHeuyh0I7_H2~SSmOuTvSJQq8+IE*Q?074)${*)&vA+97)=gTu^s!L#W z^5TcT`-4%CXDejm^<3dPPQ3BP@YR8X`zQ9}rVdwp>(ud4;wkl;fJojz|Ch0R^8Y9B%2agOGjGq8`K+z3_`y&VRf%*Dbiyz~Tr?EW6K(?EL$EJLg zKXXz56|iZH<@L{;4-g&cjSeijm**eZrtbjTyf!VQ?waT^!vS*6Rzw?G-R#ea>#A{)7f(77eK4ZWV62UnG#xu~v;&#hxg@@X@w-5(~X{qn+Cb6B_A)ppZJiINk zgi;}Yd3f(mPT3J-(I&ix>Ir#4?vVZJseCc-zitj1W5I-3Df~*{7~OL~rFeBqhCX9m zzmqZQ#@h*)U;!@Sl85P@tFl=kq+aL*~oVXR2QnA-_u_pS}&b|_*dsHRE0cWqcyqs;$?@xMJGlLp=j{%%oXv+j_nTIDsJ@E8>8Pc zusN4w;(nHH7UZ?rf>XFA@@0U#+C@%1`})}!$BZe!pmJtjtUhEU&Zl?SwSfizdi)Ml zs(ICW*(z>~Gv*1WqMi<`W@rVXIJnh;vOoO6_s8(?>(4$LS{*<6RzT8XPBkD4>E1HbBocFCpvlwudr)ZAxRo%boSr+!SCg%cV+d>*)s#K zl8s`ds<1=vtV00OQtwJ$d*6PQLv7lhqfut_8M= zJKXN1bO^e^5C7mF#_!MaPVWG;Vv_4`o^yZ$@wZ((r;Sm-18>;P3AV?1pnv+wPvi4; zam1rz+^vl~Zh=K){Qc&7hcH?=kYjduwK1u&l@a7612to9bI`FoP18T{93B|Gr@qwJ z+WRu_g%|pji{~@AG(R>s19ATNpa1jK*+6FJe_V_wjiJ4jhwfs=9zAw^Kn&r zJlpN&@H#pnFV*$mV9hD;~MBKKDVv+^pWqp_x{xNP#_TlFtA?!2QzFu4)i1q#jt+w zy$?pO6t=)Ya;7l6~{a7!`KLclukow;E^4b_j1wHZ&m9h+}-Vefz%h>`>RnB|v!bW~F3DuR?5i z_nYWoojYe6Bm)4D=*GLA{$aP9P#D7bZ7Zg=IauDDJ!coip1i`-EyUp8)Ogcshkg$D zxpY$D(JrPV9$M|$Sf5u?V{a(lCSj5;4AQ!er*e1>e#&J6tG9X>QdcHY&OJ{+Y+Y5P?T-1lJ z^_{-BDSgOpSOEO}bmI%1&121A+|v6_o_u?C^6hsE6?AFB#f(-`{{25=f?lC->6zMS zUdLC~ICdE+Lue-TaybkWkZx|&&&B{_Fj@CJ*@G8c^b9=i{2UHVqjScK!r9zN|1vHa z;dboFc+^;Zo*vjRsJ^FGpI3kVhc0BsvmD0Ek*01HE8FVUpZ{zf%U;HeC z`myxFtLZHniZXKZSf4d!G!mZCcYyb+&cy$g_a z2{vTL4U+1I9{Ux_PP39#31UBv5I<`sK!?@XC@_J)|!uZBY+d zd4J06{h^QaZW+kcVfdpPr61sZYTfNg)$P*BcPJu_`<1cOu+OQ0Nn8uoF6H;tznU_D zaPLSUjdEv^qmGkO;!;oFEVR)EJiol84$4x;zK_zdyfXl~bQLu1j_$LfD=$KWbY1#K z!TPEE8_eUiK5bp&8AVOM)Nk5cS)Oe4y;sy_1KRdql4i*l{kj%fE}tG(kbk`9?{@5F)PfftjlMa8a^&zC6)L!M|sVHRiljR1h7Z|+?E_ka2q)2CaT z$Vqy@%NJSFPc}JzC(Hkj&gAXZw#a|`x4*yIzb$Xudrwy9uD89bh1-#1M^|55Y>RpM zrk=*HDZGIqSMx^SnRnjhmp4Yid%L=S6(GLhd2K5#a0%Z(sJTDAaH+QP<|-V>4nP_u z0!{@Kqz3$#ihL5Io|lN?=4$V@g*YoDH-*xmCT zwdRZ6EtCFKU7V}9DoV>H^>=CA(vhN8$r41fk$+MBm08FT(h;crT_qkg-e zGW78mXGihjvAQSY$o&T?yBXnlHR`81+#AnE0?Ns~3<4A}zb*0|EJu+Yz@_{xnD+7H zZqWw}*uJ$betA0l`tu7ZgG;;2E4&ND0H*}L(mU$^qD2iQUZWW-_6E#y1hDwwDStB$ zMIeMY-jrF)PD!IA571s8@fc+I>Cu47FK#X%=jN0++jHeH0D13Ix_z(X`vcAjHBwzt z=z<`Ybny*vXKx0OqlJ4Qm&A&-m6oDuyR*Z20K49&Cq;eoDA_C zE%3A7>aahW;m@0aBiS|rn(mIGdbc>;<9SlKBNL6OarM0jfv4OyY-KfupbQ&l9Z$)s?(&EckP2pZf8n{-0)W2!@=W7z$MDLY;=T&v*=#-NYGh+>mqhm+=m(4DuLXsQ>UlG7k+N z1?X%@*4`^b0r?`P^=kn$;?LuA3u@!oMb7D3#w~!y=JKD|qKP{+a1bmnN5GK&*&bLb zyofncJ**SSuf6*KAsI%<)t*hroMg}NZHpv^Q{LbA@-E{w4qU(YG%wOZlpN0!(n8*$ zch?J#z$=oBAkUFgUrj91#tc3bCT7BIwZN0$wPYU}+-cE#s|DJrlW&bd?n-klPwmCn zu!I-68{hL_o81q!mE0k_^dWCXo?pBn#n;{(Zw&uNLmsbm1B**?Qa=E5g_6kkfA|`` zIWGO^uRcny#_t0dJ+0=yP6c4w?P6{!#93wNlZD^g0h7o74}bV~lN0+^zxd?Wv%BY=-ZB0k ztQo&1=4!P8Br)(9yS#CM9NXhXV_SX5D|4p!?{rMOza!&}vko4mH6Sp>Cf|U z&N$sX$yiFx(+hYO8yAlQC&&~=s^`hNS2pB585m?|?dEg{`O;vGOL7iqEgYAFEa%|R zyS1fw=XiyI0ZhxNbu4iAl^2Cp=v})7=q*Pw^g6J2|LbpPLOir3nw=Wn!B1OuZ&`hH z|MZ&dZHs(7{-QkY-MKM_iv@s7GtA)7Gro*LjA11LiyOugB$->s{ZV0uPe*% z{2me$2@(J?gO~^soHLn~%B-wGRZ>`Lsiml}Jcj4!%Z~89BYf>U{{g=7ji(FyuEUnC zE;+ioq-u4|#bgdljv$DcnCFQg0np#iI`7Mp9}LfV&e>-VYp=cbTED&a+7`zEg(o`X z9Pq!lIf;j$c<1yehnvDr}6lY z-xtuaa~YyBk7|10w~zBUZ|vD6M=yO89{`BxN{<@*^hqGn-F64Q@Qj1ySx^9Sx=W4< z79pcS2}f33zHn}l2RjQdw3&B)b6Mm6VhhE>x9H!I7j&>d%IBM7z76nJ?^)D}8ORRw zHI>m>ziH~WzeT=7>iHJ zC`q5n?6>hj-t2$*SPMceI@C|(*MoKdN`^;`Eo}jmlR+=!nZ0=nP`Ab5#=ejuWVr)x zwdHYq1DxhLzIjW)W_0HnPj@$u7=PNfy}5;5K-HPEL81}Q)n(p`hvQ=aG5KK*CcE52 ztKqeHOkWERB4m~JKYiMwUp&XF{BimN4{-B}@%fEd<&`H!vqP}~d$ap38Dw!xS>^z| zWno=g&;X5QfxeQL-8s#T zbNS-w8MC`~?awBBX}o`(z0fcm*|UAZ%5?bRwX5ZQFbdNU4l(CcIEo0Ooq}>G1(?QIs^ug#(k0yV$*++lsW4eNG=QGX0_4R>y z1YccmA#HT?c<;c0L*v*4%-!CC7TuMcSme{KiKBib*>~a8=@wIW&ph_+V<2rjkzPk0njnyxI`HPt|x3@4vPty+c7=9{W3y>2wuyXXXG0CatPJIDzr~m11 z`qH-g%1$BS7wwLq{M=D+R{?(=hQmdDzo$=HJ? zrMRH{^yTtYQf2vGD`m=)T>0~Fek}iu7xcWaJgWs$&=fzyTAi2tWIE-@2W{M+vgPgC zJ@r`XvXr%UO`5N#MQ>pCJZjRM_t3=+x25g3Kfb#^_3FNRQ`iv8FU0khbS^<;K25gL zeE+G;eoqky-aD!~W%@mNm;ZE>49i7*d*^#_TsnoUwPmiQ)Tc7^Y`%Rh_337`0wOJG z&|8)LnzU>0yQ7>*rCfhh!)Iw&U%j*T?|WMNy*8XmNttq)5hU+KIw>HB{nmm6L6C8tF`$XPqx{QG|+6PB0e{sMfc8%1q- zSDTl*O@{w}^qny3@wlB&U&Bez()FCWlYl`u9_Tu@~sau7P=&+bKj~&m@e6afOk`hWh#->p76ab_lNK=-32IN-FixexDdYgS6--}Rib z|9l=_|J5(rrkXOhHxS}4Kl(J!mg3uGO!Bg{yhjtnow$4^-vOSY699Vt zlumnll%hach-ZxP-gBTN5Z!N?4e$0n<>>H{3!dS*D%^(xOF0JYh@tGWHH8?cOu+^` zs_S?w8JE=&uqe;_DGxv}_4?}%elZG%Fb49{e^qm6bv>T3HJBIe-T?3o?TGpMn?S_C z*D*YbyE-65iWbBDl_SOX%q#4hZ=S9`KY4DvD_=i$cy;2`g*+X__pBc?0`dNMc~mJL zBgP1pTL730-VVDwc4*(w7|nRsKWq``#F_K;S^ZGkv}b$DxLCHM%nmTXs4UOQ>2dv4 zeEE2r!N>5qmO>A_5*EP+ZN^)IG8&Ib&m-wp#=ttSkYiP?1%|dYW(e^VyL=@8H<}JW z9=+AY@8Q+bQt-4-|5!*;A8{5b{X5#Kxi5uMyX>04YrHIsZ$q473J~=hp!>9~%j(Y1 za*!Z?dZ9%No+b=NN-MyJ(#q@lUVYDlO{fP44Nh2vXoEj_)Z;xXnmg#69#0f)0 z`rQHRJZXFwNtA4HF2%>(gtuxluQJ<*EtioW`b)VyPId&A8c)08Gh1Z=Df(n{UWh`1 z7&DA#veC7?#aOkV0sJM`jCXnX$J^2P(8oaa$Ht+7Umv))Zjg-8Q$37#TYx1o#)So9 z1dJDLUdj&P@HH4GYRYl zwS(o{#mfU2KZ<6z3p?>Bo@Hb$AZ{Ms^}_%Iy{GNochD00I3yHsIy*J;5avzIkOq#< z0hjT}wTy9<0L&T>`iBQLgUI2xj8lAvCgc&&1Rn$X7EofL5Bf3$(TpLwIbI>_$!MNO z7E6`Oi;OIFKrw^rUh%mfG*=uf?l0b%a|@#Kp2FoQgBS4KfM@ZCc^v^G)WzZ;*`|Mu zF9*E>1a@YS0quq3;N5=i+}YKg4Dh$!erI*<(xuLCI8OjnPVEDL*A_b&@WrzfBW~mw0b*`*&)y`?{9l`hW^RZXNT58a-oH>4jgzXgO@A? zI151nfCYe)E&2{^cI92UbPOczbJQ9f~gD#PNh(B@dH<v4*DCBFBaBhU%>8_JOC|x^D+l6%5xKaq4~z5fP*282cEK3xVWu3B;;{;_ROgP zK`&pvoR{v62}vS6&$c`hkF;3u^4?vmd)F^S|8F~xwQ~;I+P=5;;NhSA_V4DYy2QS> zGYvowOa)S2y>@A64J2eYFnQ3&oJuAbH#)xC(~jAtRKFc;aqISg)^q@%j*vO{uf@)I z*VzCs2Jr2vZ?*)A96ETo3<6*pW8^+S4%j$4RrCTnDx6cn=yb_oyln2n7q7f>Z05kb z(O4d@z41nEZGK6w<{dBe3SP6Q#Jd(>YmdIymTfx%;!FSG$G;w31pm`1%<&F|2L|Ie zhq`{-1YmrbUwGk@t7X=LN-`|q)b1Jp^@pE)G_lkj2q?s&xbfr2QNO2J4^Ijq!Bf1( zMe{|E#>Z>|<5K?|Xc5D2a%w$92zzICF4PAE6<+iq^Iu4w7|(R0mk%DDx%Z0`pQR(- zE@^3_B{?NVyYc0Z_8M0{V;@GF`FLJk>A@u%`8}OYW<(Pn!TxRQFx!3Ab3&Jm{K$5X zDa?G!;$mm>w9{|;pyy>(nGQ+5KRaH?Kw+zf4%NeZFU-!gHBda>-&7lC=UGq5oadW6 zERgf$w-aV`v}nlT#KDs07xO=z!??b7<*UN;?WrE^nxsQEcUv4VM~PJpG*@Zf!)P&l z8!wwz@jBji_@}cC>=q!qdGMMKc-@k-4;$xXu6vsUZjB?fe(>OSw7Js`xih6H4)-_r z16>0#tM8%1$J!OQbM?xrZ?0ZF{#HD5V2$=UX;j|+0Nz($ePjCALfDxzUpCKOPd4RP zQvKD{+-J8QIeh-B3-R=F5VJa}mz|AtBfJu4%>3l{^U)Ly0NQxLm_2vqM7#BzGxOr= z@GEac*8u;@*k1o=3)#9SIMD8)|};eo7Zt}M^#6M$U0Ei#N?jLMmCrUf%~j_1Yu8$6gr0N6xsfC zRe&?OqAZIuLP^=}MUYv=fds0~g=#BWM`*qZSRle&i zDJFe+nvmAtkPSpw9ZX)5O+89iRr<_>xo>b0D5^PW*Xkil2?o$Iskx!JwKC_c_m*_( zJjvJU(C_7KFZSNr?@3falh0bYp8G`TiCtB@26XP0xNh@wEs11PpYc-eM|7YNFR^{s zD$x5VF(uCYxAw9+!1tCXs8YKr2)#FL??#8x6_RzSOL==V|D;9(@AN-m(Z%xVdjVd% z*Rv_Ne21=UL=i^B$CgtAWr)?+c$QZ$6Wfw`xmSp+z5n;hZa1gGlZ*eBawP7b{*!*G zWO=E3DTYVT7oEB@6X?9T_S>WCq&$oPxSdav)AzsE`o|knjt=i$zarZ*VNNfp$GnW! zBVxaoI?22IkRYYen|142{$1c9zx@Y=lM98&mHPD8_Y&6{{k&T`_tDuqlgC241?aCX z>bUm)+Dns%*XJ`?N!PT4=gO}ied8w&*X}I^b-Vle&u5J$XoOd1`*~$~KH040g9cMw z|EsV6=3S{=^JwZ)zH5&>nIFD-xMYRG^Y*j}@nnzlRpuuuhkyol8MW8y$M54&GDON|`=4KHXjCE$#70 zDl*Myk#{bJ7;ELOJ*(^`9Fy1NwcJNDwHR7x51!~_W!r={GE@2~Pkp-Zl{}s6>6>m0 zjpQqja-1}kG;Nl&>n0t>sYCG21PuT57)6NkWktLsO-PTM9Cimx*^czLpMO@q->%*} zC?4x8t3$gEtp0U?_T}P+o47Y^5>0YOxr>tw=oH ze*e_v)x(cZ4}dq`+|`LPX2LH;6D|OJ@vuWuo;>P3-UJo67;5!jbQaR#N%@Y)cfTn% zlndTt-d*dGdDk^I{Dv~rA6^LZpm0$-c`Q&i2Dq*+b8vHQlHd2AX>j#f;~6F3mr1df zjMB%8Kt07Pp7^{Oe*@}>*&K~*4;}@ucPO*GIeA0~Z-5pKG=2MxS34Z^#Okb2HigaM zgo+gxIUgj_FZVPw)bm@AYZ?#R8wd2^q1LGBN@BVT)=d&qbPujYzZz)7y zUAf*C#l?ss^LSM9eA*U2-np9**F9rrsq^C5$500vkaY~-dnv&uPUZd79O$z~hThWM z(K*9m?&_bEzi$KbGkN1>{ThAyCw?*Tkq5(TuLRK6kK)XZw@`Tum{~eT$g}YhP)`~_ zggR0Vfh?Euq>~@7B|J=dLuYbApBtl_IxO?Cn6&W@I&6&YB+z&kHWmQ#V+fY7gU~47 z6gI{M06~Zi2df!p4pGEA#t>r*KjWwMw$DOX{V<-q)lt7vJcZHW(NZrh20(e3vjG*~ zcDSlOeDd6)cr%j8UFF%@sS(FR z@?3Zq{C6V&a$Dh@#CiuLeQW2-jdmoYF9CEcTmS|icUa@|8G~C}oU41n$9y8;7m zU%%FYR|}x}AkRqiF2hlbUOfGEo=fVeF93Dp-5Q}hn{CgHOCZCu86oC*{RZe{*h>Sv;HyP?Z`YaFk=N=~~ldTR$tlE>WK1JgOK;b zMM@!>E@ZgwX^wDEpT#QxI{rsg;J2}azIX#o^y}R~Y>P;Eg~u|l9=lK&eHQY_Pvb{h zM!$}>+Ha9af7}VkV{BPS0-8VRzH;>`@cnlDxvlUR3}?n11Nr&Not+L{^Mtw=2sJw` z>Z|9<0|*FYF@7G$OAKqWi!lkvjmc&_yC>w8n4hn{`s(Wayif6g_T7&zVv~2IKx3si z>ZR58Z#UUy-9`T_R_H_h4!}Bd=JZ;70V&Iw5tEYP7(lZ>LsWW;brymqF8P;*0WlYq zhcHhTJJ;(<-oN(RD}|7_o*cK!Vwp>K?krws?IXu-<=y)7zMZT8?!Wk_9WHn>x(k!@ z!o)ep^XJ=@z|a;(MA`HKUURc(6kT|MlS>w>ZnWD&Y+?X6<}rsHJ^XUsxtFFdX+9Gw zp>a)lR99nDh$%8|+lwv8)aT>?p5SGR&$lPH)$5g)kFQ>T_1J{+xE@e#OdUORbQaUE z-M_uslT0!P4j7p3YvIWFd`@_RuD86fxjEtYSH$p$J8V=bk4cYX=~Oy@!{8F5<8QBIu~b!hisct?%;e{UQJsV0iTC zv5^PzF^<%S4RXH4N?`x~0Cr(Z^dm2Hy9f8=Novtl_zG>|;rg53{Cai1U3**82ZUW2 zS_DpzwRU}N%hOR9lMR)>slItG&~XlwC1-1s`R88pLMRy?-gGN+;$qGkuO5GUwKcHI zeSP9w7-RLB8fzY~$VQG@OsAKbcZI()FK%xE1_5^L+B@$9knR`OY-@)X-+Yv(W3&h6 z;wg*eLP?N6PrC+0(kXaODtG2TH15v}^&=49*acb%?_{z1?YG|zP;F;iGUWC8+*~iD zjz!>ojd=^GLJt5KFBgJMA3T>AvvK}h-sn&52JD9gDe}(%^bD3D&%gcPgF?a_8M(L| zBpkpN?JT;@ft=OS!o=+0tDfowu-jkQHr}1vT4;Hkd=$!}hr>@6oh|-2cy)UqKe=E` z0sQ8m#`s*=5B+4p4XB4^<{dlt$YmiR$Vs#z!^{^$x8w=F*LP=5f4TY~FiNaxV43;K z&K%yc{@R{-#3}BIq(u?3q-mCMo3C>N=Y;K`q-^g0y1qcsd zo4Kg>XWpxAfKuVAgt!oLMjaHsr*lqb0k3=qFl*dw?RtG-?_T3$?|~eH;+J*6dQ)?s zvA7xCy1(IRx^z6_eSJrlA(Ng>K9fIya&+7ot;OKJ_3aY>TRpWC@Hu*7JpMF$2)Hee z7t(j^BHCO!yMykh&m25@tnUXie{m$ZH)BCl%<1?VU>HaqFF#1fdieG2-n}%tne4Wq zzajXhjnB7;@k-;CH+_pu_2GpU0XJNsGsKTA>IuhW7ZpA+)t~(Q%W)z&a`@=9TifUx zcHYS2%$ZZkHM=IBn|V>4>9FXgt>lt1gh=?od_xvHFXUdp@1=`h_8o1i;o)O%SGOHA z?zh>k*G;};I>^k;w2_agVqSNfcHI$fMx657h*4gJRgJl;cZ zd^C$0-|e(8mwF_}x3U=tZPcRFzENwS{~DU_52$CvVi zKXmk!$>-+v&Wni0$CJPE$>+`OmN`_IB~ENW+28qxf3W)ZfAYuiPIg54g81UnUs;U4()?pS+Z5gGwpyHe>f5b5<_wR62M>?OzOnTAXCLM0l9NWfG@k4A zsnKjv;8uCtH7Xn(T@HQNcM}e!v4f80u{y2x#=BdyJLB(1$pC!wApRdYpS-eYYe9<0 z5dOwkV>>LmI~~3dKh1ySkXYls54?PM=8RJ(J`Wr}Gi|!vA`kER8!Z+HS<2x>W)>^9 zb+|2Bv%~0{q>wVjU*Cvv{9$wsKo>(Sj8Tm;hyX$wK3)#e#fWiCzpLp;c1+Y|AB;B) zSUPFEuM#5&z1MF{KP67y%L)?)9Dvzt^I5y_)oXq%dN zNK^W?x~-*^oa_l_x7KL1Wa^#r7^0Vm64HB$ z^nA%lgT3dTPZuMcTy>rBUI5Q|(sS>vwI@FN{Aj2QrE z{WWb|sxjA-WptplO)1r9_zC@HK>#SM_(?g{TKamqJ^!!qfiZ2&sHpl_=vdzBe}wU9 z7`gkZwO-bMqP^3-N-vp*rLWHOd+EA6DlWr-y%aRU% z&qIYq8~vEFj(5BN1eQ&{biv=%m4E0-jdl; zhu-sa{iD_EG#rAc*V@=?)O%RIrBcu8Hf`zFETXAi@8hEX^3VTjbtfVJzxo&dY{Z^> z=kheO#T}?Ae&$lE^^$wH%95|@swG3);UP5WuUpER7vq@fvbDICfBILyT76c`>`Rw# zthT4*y^?38mDtBA=jXmUxB9b>zQ}X0kS7_G-vo^O^+%Ui|Mr((%(nJ_@W!zYxO;B( z{+H)gKYRU^H7^1@hnG@FQo1)hmlE70{UAUCn0Pb;;mZph&{xdoclWn~Oj#<-%Dxny zA0OVo`rYHN4OsV0%D{&wPpvK#zgb?3;v8S<*M6G-0U$zHd~)jC>OXlaPvi+BNY>@i zQvdk}NMbB8dI6mq1R%*%rZl}@pJbd-KP0NC^$-%9y4-pM5!W6JFy&{I@JpX>}#{)%4bM zlW|kOlXcS<$y}o8h3W_pSv+GiGFn`r?C`QeE8{~t{it6o1{klvOtfOK=+oWBsZ;+O z8BAY4%BbDDJFk$u2vU~ReY`t?*zv&p?q$Y25P55SHgu1dwg5^4q-=w>mD?giT~MDC zCWV1WX^rnFUDKDTn`psQ8fj>G|5p)58=bDc-dedS_l$*Ye0@;4pT{*L8Z3A9w~x zV+b*lSKrufo+or|K^s7qMWk%~{0riz+P4^>JhWOUpp?fm0EQhe2KWbp=qvom;{;GQ z4hL0A{{v3dZ&|?M?HAx4&@mu&e4>89mj$G+;eF(9O!3MVUsT?+_(gXpiQhAi#UBI8 z_x)TkvCm#OH$d^$j6ikRmO)~1LRr9nP}J58@<$VIG#OFf%EwsZMYfd{BE@;@3c-o%a3xu-*uYt@OT)BQk_ znvijohfYR)PckT%gS@oILC3)Im*NYco<-v4Yr}kc?!%B8@H-x%NeJuV5g>x(=ke(9 zOW?k-{&j7%u%XWLetrC1b+KrQp5_$%?!e85@gmu<)NKJccow4}kEbPkMX`Sy!;Egm zJ>zvg^#ucx7bg!?vcz+;oPJ?9$Ady}Ig8=9jvg7$sEc`A4S*k@@wooBKw;cipaTT* z{#Hjj4IVx$WJbI?eO7)mPHK0er9&Src922IxaxW!&s<}N5q_lb7C@?-d9VV2zq)*> zK6*Y6?!35@AL=U9kaHBsA6vj3z(0PfixhUK{Kfkp)XsP}AVZ%B z%`&?*>c>~oUoNFXo&U-r`q$Kle%&WG=jyo3o$noOsX8Zz-#nv@0 zF&6LI4f~6Y3cQL}c-?OZkUMht@alAn6v8sto()XCapQ8|0jA9X16;?ecoM%_jJ=>-YbrJa?>KKK6QJJTKPz#QZ6aGmu>y$xR`v zE(Ic2K;znW{kDp4<&9@se%^(7z-?=>g)TIU_{pRvkD`CT)4e-421KLD3XNuGghdb^ zF~Nb!2Le*?h5>8K{@G;fg$|qC6mNc)!@%XPFSV=0c)mpkuozT7())h!?hlKx|9K@B z7r(hcBJ$88jB)i2F^JGjl_2xYrJ;Z=w-LimL;}zgYf412_zBTQV=liRk=% zd}W+-tN}7we3r)CWo(&e#3Uw($RW?@k7A*dpJNA<&X~0bx2f@V;oRxQ-8ZYfFCQ8i z#Dn`mp3}1^oxZ=TIP}2jZT)lL-gs>X6w$e!P41%?@Nr8X%yvLE%quHT*P&4!mUhZj zXLB4Gx3_*1*5po}xM=pvkAE{``p(0E`#0ZP-ONK44?WBqk_WWB%&7oM^>l#jhQt$h7ZrJizPPI5IAJ`?f&F5r#*76d3y7OphnycQ+sNdghTmp(p6R-M^Q< zr;q6fLYalrJhS`m$AhJ9;7hPLD$U<__g6x3G$w_o1|H+Gc_ZknC z%`4YVoCkr1>g&K~AtAia`+U)TdIq$tz2e4mD&VPaQAn5-AT@wrz0Emf9Ki2h{6)rD zSfLvkN8)r3uhgGkKa(7*9E)x*^vpqm+G{t)R%PNGPvhnN-gz(LFZ!nOHV63jJ`dt) zcl`X|Njp~nk2%K}zE&Y4AMl6!X? zmKva1ew&`>1Ta-wZBFK}f$hjNew|%Fo&ndjACG}$9v3#sn55fTT=OyJ*qUOxNBr4c zN$0T(+2?1!{|D{-IntQC74IyLFVa1e&af$a`lXx?+~2%`qsZOlPvC_Gi3IzgN%Uhxl3uzkBHo>_Kw!acK_ZP_ZZeyKv!La_@SJ zum?u=T?#B`>yTmO$ekP|EM(fncf)ma-=@H5^B?|p7^lNf?UrJP*wypJ$xp||G#BC5 zSB@T=xr08HHdM{ayKyrzul@so<8_O$4~x5gGkIkf*RGr?)Kfhy?xB;NS>zCYr4y^q zv%PE4f?aEj%tB2xnuYHA&3kk(w!yyI$&u!II;dS>bpKZh`3fu-`b;`yp(|b$Z+&ZX zgYnL$;h<)0!pI(Vz0tsoR#PEp|5)%|?0!GUJ!4~Gu3}QUH_0(k6>~k`bK$+U-+c`r zRfC4%%4ppS(M!@TUf$)I5iw}-6!iVWXtXfhWbMDHm&#yb8B~76oBXFb+EUHC@1tGf zoD-t;(2{Om>vtI}$*P}=Y0tbi6M8r1$FxCS)9(K$Z-g1bdAPKDDYrB+s!7$|a$Y`_ zvCyHP-SpJ=dVMDc`FXdjYsP5BlP5B$pL^Qk?+jRP#XP;EQzuQ&JfG{nYvF}7SVH3c){&b5B(IhJYOU1{GRvJ&9nM*2EX#W zvzF6Tq{63q-6tI6G1)3a;9cI!Yi|r~r^2cr&*e!ke*c-v(#&@mNjHj(JN;d*CqqAC zkEQ&lS>&^LAWzCe@0zNWH1Dm^YbsZrW^fo#3ThA!rmRoY574moOfCR;K!?9-(dh1- zsnwFF`ggx4)3;iW9@9MZlB&AR8~T2#AwA`4q3fiXT$bmSVywdF?%dYRN?(^iGPzE$C08oASb;NH;FjMKeoSkI^bOFD<1 zdZqg5Ep?Y>lp&Q}YnM6=t@UGhEVMj+W)6N{fj{0&8z?QKmWtkZ+CALbB*wcCJR8`^;#X)`$g&U@|iM+ zPP*vX)h|w-U;Xmz#T4U>tN-j@{*wu}@aum6?43L}1Ajga*!tsQb8mlP)9QGW$eF7d zV1Wm(9X_)9dCJ9~{qoZZB_ZRF0|A9c5%vIitld3Gc@pPxK!{h{_9_VoEP|N^`HLa-8`1sN|oZ@R9G8o0N#ssJYYbf>Zh=a<@$7%7~Hnb%%dfGy$7_pftuP=27+RKFB=S-Y%vvV|qgD zq)geV!jN6OxEI+vw66?w+Ll7KcX!}@bpeR!+XsaznfUG19Z!v6rhmF7Kb)iBa94f) zIFLpk;yTZ5@e_9DUgHLc2IK*L=}Y4PUsLlYAw~>e41}&cWPxcG zitI7~M)BM~kO!kSo%rI5#!o;)N+tf~9gF7|fVf4G3^{R;_XeaJ>2BnqYXLzX8p;@Y@k#ZG7kfK? zu|0e7;KH9i#-I8#2&o3kwp`v;(H{AGk^JIaEl>Fv1L_V;726ukEykfoU0EMj=SG+V zu^3_G3q!0@3UlA07qB$mwRK%gcw2}!<;}{D074kc#y2p~+=J%&j=^YQO(>jSfBN~z zAhGd*(ANX+c-;^0lzw;H)C-Bfx|!@4dQ@+8vY=!fo3j|wfK%-k(glBNBzb2J;n69~ z3!3_D47_~s_07g+#(D2Ec%OMDaH{7&e&@~Ar52AZa`HC2bop)}Gp+;>dh8WwpW*5fBadX>7~vqIkNgH{bA<0scXh|;L-lVN?dC^ zTAb5I2U{qA_tnEg-@pB^@C)tez<0*juRi{0b-cqxt7JUT{n7)f$mw(E&J+@OYHK`uw-wd$$9QKON&%zr1qjP+sH*S6`h!RsRGEwEzehU!TmKkkrpTF1G<=y+zOFWI^$M{}M)t$)<+t0;$2SNc>tM-7|2Lj;% z$>ZslY-Vf=OQoN-#|tk89GpDy>6{7hJAd#`29UcOfbFxVum%kK>CZsD$AKodTfE&> zKj}k2{+(h7&+f4L8@N2%)8$Mor7eJg|^-c8rG%s@c01sy)gPbOZ zjZ1pAxZzKeMV|3Y<%Rdv`Lp9qXyN7R^sV}}&EB0R^A=JVZ!Uihl>3g~3}4;-0rFmJ#4cHZ^p2Jj-cEZ8b{ zwB72WyyxpjAtrVuZ%=&mi&@w_alS>=Q|L0Fc6$ZAcf~6To#X5Keq^yz4_B7`_c8l-5E4UFuQJJ!~@L zOXtlliRAf}%Vz`Z8XwcX0KXQ#H+KLf0Q{xhjkPsE8qoXP^FaINgT|UUl+Ls(x$eXY*MQ;>nB;&z}B#WWb){ESty31AQOt|6`F!B{@Sa@7y8!6>kH6^ z3^__S)2_RLw7_QLW)^y);R}sByAR1wG1kpHb|mN*3uO4{%!NEYv{7Gv-Q4*^Jt~7| zw%FqPT0DM~oW7N=iTB0EeqwJ@jyNY{2cVWW}Y46;2mQ|81P0kGjA} z>}Vms=?2RjSqB&c@fuqS;4GZsgdA!70=%8oq~kVj%26l#;LXD7;K|c1<}IJ^R<9m= zZQhqD>%+GJl?Y;f;Fa*I8~CU6iZ*-F2%(~i9&+$TVsWrQ;L%otJ!yA*^< z*`4eV){2dG`s8QL10A#+&0c@wt?~~bckaWcO9R~R*}Z>tGyW#0*sOELOmuz_UkHPA zud#C1PPpuZLb(x#lyY5T#;E8R!)JbXNvWZr0lCL0_exJGnsX(+f0J&q>c4r^yVCi& zd`~)$)}9;iz1FAi7;3HPNiYm5A#SY!Q_T{~Y4Ys4?B~}2Q;~0}2J&@Z#w4A8y+5G* z{>W!ul4qChdi{FC|TKCWR?hc!CP``+vmp%+T^fBDa( zIOQuqmi?FVJ%@cRHJqnYZm&-Zrr-SNm-O@Tt&MB_()Z9|o}-sied=%bG*JE&f|z9S z$nKUrCuPrOOf2upTL(=k^B>+>>(>;o;enatr7!K2*Y`rm099p^Grer3A-+y(&OKvo z0mxg!r(IW9eK$?hu4x~h@}Bfc@4{!krb6?c>VVF8 z@UD|)od?y;wO5QI)%ThFO1$uQ-<9{Me*5stlYu)aY{h2&lV5(m+LQ{AUs3%6_2Iv6@No_<|G!e^S8{ulr7r>mczI+gKU zeB8k0KmO&3)!T>mtllbS+J%6^SN0bdx@}+o0x3xcKtQoz=%*Txs&T7tr4J`IJsF*P&>C{=rA9?UncDzTK;n z*V_75S%9k#&Ri<~Vjz9Z_|N{qKU=;0;*r(=@lXHXV=OUfgh|jcy;dPjkZrX~`TjR8 zEZhl*^o+7dPEF62=acBcbD5&e3(Ul+|L+EV4!Bc#itd?WaLRR0ip)zHcGrIOn?xge zF2I9O96mc!{3(M#^C!Z1^gYU7&rIaHMh`PEEMWC_*ojw)?X}P3p*x@Qo$=cubZ;TdZiZ1qf0b->``r@Lp%K4XcJ1IXPG z-S2$W7W-%ggy01`^AcLsTE?ih_@D!Dnrr|*>DNq&Nq?uE3sSfQ4tOIuA;DPPV3@C;YIpUBZAy~*3as*(B-yg-k-}d`nw3Y#57~O3}Fzpn1_N$DB#>dim|T8`IQ^c;RKU+sA*c`$Kg>lcOq&rkd-#KN9HALRq4 zfbwfL}3(Cq8Big1tO4Nx8g^#dF`+mR}zH-r0~me6}47 zT0Ntr{PgqpI(6^!7-XO@@_62f<-e!CznpQmqn$s%(vJf{FLj`@ac74G!)fZh#$)m~ z2hR?gsc%zM{Z1cwLboa%uY5GLNIo8(14^`DkRfVZx^HW?ad|1Ac3b1jxV3v^YYT4V z2gB&*DxfSLY4Fb?!o$W;O&UHNU^)2%-0sHES)Yx^Sp5Y|pK*$$iBYoM0f)~8;-S~X z*sniY_K6phF|`3d0tYP2ZrQRd(A-&14v&Kf!r{(29;txLq zlph@cMA)K3f!sVfpKTuA*%<%FzyCX{-+X!^Z_2NSZvgSetNwTNY@3mVRpFoMXZC#^b@(npSU@ah1v^-J=QN9tz4+m?GXcCQpGUKuY2 z#FDc>GT!RIh6z^@aK+$%IZwRD#gKh?qkgK*u}z>@p>vWQ4hjcCAP-skO+v!f_npbB zs{ye*_{5&Z|7XAWcy@;2#c$#TK&ATVgC~iKrjWd{PgVas>~nrF7A-8AXLxYF*lv;= zSFfd$-5Ic(Hz$wkQ|AH~OS9DN*=Sx|?dAgELF|UHP%e}Tz9WmB31M+n2#Tv$emS0@ z8#>!YAJa)^+kA8fWFM*Rr_X+sTxnh^FY_z#)f|EMW&*5pMkh zY;TRX%Pg}L?X=A}cjDxoqTIiqbsK)81pZ#WbgIEOLpC`Bj zK-K3~3pu=Hy>C9in|9gps^(?-27M$KJT+XfJ;Kg>28861elX#n+YTg{r z{OC;2GM|}4p3CF&!2TnXH^9xp26}9Gnq+Pl9o^B~Xu)&G&b_0L0MgDD!}?KU{J+t2+mu}G-O^eR7B*=P_^V8F1^vbu6r1xJp2KZx7f|*=3r`k;%uQ%W z2V)-q=>hNNMj=k@m_k1u*Sq%}j8}I~{q&zY+)c;7eXGTpX#G;W{%!QMxWRk=&Ydec z0$m;Z&Ao#G+8o~Zw^`(hzt(=2Z(!y^)LsuA93#L-;)o0_AO2qR>4rG#1hD#+Bxxz|*|(|GF`Er(V6c6tJXIFVCkCfQ5Q3`HVsDdGjf{d2Q<4g+KFZ_vVw} zS=!bU&(YDd=;P(d-gUH_jQW}cB?IWodkUFqmv`?^)-mbY!`e76c`=&%Zz){f6V}9} zzgAK8n~%DzrNwMz+-vpWS|1UP0|3h;+D+0aMxCoeb)K~7xt7n;C+M_RpP^alr=7}` z(8qOcQPx_2s4}6jG_(ITPqi?mWL{lw$CTIa^-txLSud1{XG2T;xRm4mLI;mgpt2L3 zjfVLr)r2xA-Ml{cyW19w@1_!^n=&L^D_n8@7#n!b^LbkxwAnc5Qr{}yJ@=-BLs^O@>p}Q0GyOl!DnIWi%UJcX zP%yMFfi|eiddgi$e ztN-ag{lnFw}~j%h}9qe^)FVp^CmIz@7TUKW4rBGdEe~bd3$x@{IjcH z@*I5bmno-@R=@MJP72?>t*yk*tiH%-y$~4wm!E#QIrueQsh;1y`}5Vi$BwLSq}U8?Yv72jN~P2ogXG8E>gu2( z^2y|;`#5QrmMgLm$)?pF3Ppd1upyPu~Fcvs(~Sl|W< zvlsQ_Y#~fp0gwyvFv_;^KQeM*f>{(YW*9xO8vwzUO`%~Z3Chs^#Urwx(pHU)c^rSs z)wuQ!n8z^O@NJ6Pu1-$hmvU0*hv;g>erMhmx1YVY+WA5NWD`7~Vscs=Q`*SP0bp%Q z;o6@vbuEAbu=_OczFAmkkOJV08Oji&{ezD_E9}alQHI5=yB-KSU~2pWaI@lmuDInF zuU;QzwMNYQy!Lo&+v;q)BS8K1*>khta({RxP$atEYJp`Gd<5?Ed`1be38Z*7{!(v> zv-t0n`+d83r`!v)zdgV_+VDsKOjtw!`Y#Ll(Ym@gw}B@#py!(mUZCH`#?s!v+6Qg> zyV0R`2l7C`M|V<`7SBTbP@K~iAcPFTU*k<({^+wYUO`@7_`LH6u!|+0`4-?EYt(n~hw+kwSMd=gA4&IS4DH>$r-iseD&)OJF&7qwF@sjp38|n?LWwXo9momX;Q`_R zMM|e~wafw%?~en89WXw4lob(POVo++2s>UAcN`6j~IIr?2JMI)(fJY>H z;TZ<%ggW}#cnTm;hX68t{bEWo&uX&fK=F9ZaK@x~x#FDzyBJ?QjP(8E>Sj0(Z9TbwQ=Gorr1A9UMTxv{3iA(u)r9x;D(niT=<8d$+d?8 z_zaHmC~u4IwRn=JkOLxT$AnQ*9c|ek*)%<0fmOgfJ&Oy~#Q~qc`uLOH&#SKr8NU{S zdOaQ}d`)uT+V#dj3u^lG1qO9(!oRcY0#ipRG+=DH?$s%c0XO~7GqkDd8AgQ-$cQ&? zE!Hr|2L$U4?NPQdw=p>&J|#NiGjlED7#;Q1zIgbSZPWFiE$-?k43U!Yna?tBGp>{W zVLy)pI2pT)bBm|KMksschw5m4kQP5$oH2$Uwx|kNV}v@W%6PRKf+v{g`cz}>hTyg6 z2LqVVW-$pEM81Ch#mRY(_q-VD4mR{5r_t;{;flV>^Z0!C8R6*3C=_aioL5gDo>yXT zGjjFg0;1Yt%;R(6x$$%-E331`>05!I<|TZ@Bzm>jr$VU2hAmJPnjxOC__nh)1M|yx z*mzrV+8l5@I$In)-@=q~faL$`U;L9~$Z~M)p+f<^)%nAK@~zJ`kJLA0nf|wP1|O4= z3C-G@jE|0Y_9?4{tSS*BMjCO zPv5Qqi<&!@aE+SeBK*~3h4Ik-P_lS?R~D$97m^bUo32y@G$_}OXoXq{j<+%*QV7w z@BXj_!{va%>xDMBc;W2oavq^b+~mB(u| zw7{m{pHv^7{}y0?@?^HT0>6N>x~UN4lHmz zeU1gyoKfG2iA~1a;XzNB-73|?;s<@@)~$fZ{s^VFJsJO_cYdpd+NXIxpPanW!(tMd zcrOsud3j>$l3P&)q?t{a2xq`002M$NklesMi#+AHQDGya(S*0Sod@{gZaX0kzw+A9f-XzvnFU{O z_YNNRJ@aOLnubzcm$=RJtxFe9PdR({yc`XS(VX1cl9&18JRvM-@!<9fS%3HL_4sjL zJW`mVKuKc-puV-(-ge-B)4PC9=Li7nl}mrRn&&d76@d2L`Vmknj2n=AWFsCx!urP; z+OVa(x}I@ddzXcScKnw2!GlMKFN`rE#CY7B%gF-Z=a%~F8b_O(msS@GiFG6Xx!J#0 zU;p9s2fa+aj4gEks+~lUcy^=#I4#f}I{JDG_b(R$)SOLU&50o1LC)I0&>~{sD&D{l=KXuklk_jK>JL=+(M3zEZFZ`wiw`={?;^vLXY7#ebQQZ5 zM$RXnjHO;(&f7|lVxxSQ%rTFRT@im7SC2X;;b8iv&kYM=^;J<3r=57;0wlYIv&p`f z56>=>yE)+qK`5*j+L#~d3_SIpY>cliCEKd!KA{Keumg6i?&&kA_jZO0u8;q_ri~jY2Idc1)53z(Cx>6kY3zsj9%_*=0`&jrFbI8qT ziN59yyRNj|!jc_5^3+E_XS}rKnGOI=FM7T?_I8UP<0KGIS!Ck`WcL;Q$3m!dV`E3y zjkTBKgpL^5pY7{RDgZql>3Veo$g`>UH(#R#ez15Y%%3{VIZ$CC0Cx*1=*W48jMt9E zA!rFb$vI-<1H1Z~b96p-07Ov=7I3y}y;!RVG#%~Fr1RYcrDlLGKbF+<#b7Jvp39?L z`YDe|CaH^c(9dt5Da_ZB_x!9(pY>O!tQj_?a&t+vuGywz&3+G zdKuvA$)`J0o^OBEV@V~0DR1qSDX?c={rk`I-uE}xUT~0HqjgCpeeVr@(Q~r*U;o^X z)-xe0dy;lfUHphCd6W-rJ?;JgFKf>S2n4 zJ`=WOo_oJf%w)hWF>u%D*%N)CUdrVQy0%{le$p>0eLKjuPMp+M?Sq5PEehI;@wajTQ?Q`l~~~OFJb^TJ>0~ z%Tnl3-FLbs6wn9oLH*`u0OTquYSzy*gA) z*namc4_*27yz(aRX_E}QzmNyUUW$IJTi;6!)_*VMx~Hu;aEQ~pl7CN+@>|lBuv9a~ zjFX|O-1`$z@u5<@yo@oGsEnziRIZ;5n7F>POB(OFz`tj|{kN$fM)Zy$;(*nf~(Z`N;oxb>_~)CcswQQz!rJ`zKe&cWhm~^ukMH)Z9q{eiWeo zVjiD>6iUO$+>|l~^zZ)Uz18pi^u5)w;*jr6vHNe{|D-MC->zOOX8CVUoL*hI|7i7Z zKK{J0C@C0C$_@cC7Ioe?<;$z4Vh2<-ng8gmV-rT=VxC_KwA$G7p?Q2#M}60J_^&fU zguXEGl4*btY0%$#n`@Ksp2E0@p$$ybQC6!LMPUI9c*l())r6{VO{OMWiarmTvKfVt z;tbH4@{GgEv;x1@>%-$b`|!Y$Q2c8xX>ncVH57%edfh>t~k|J+|QRkdLQl}PHYXNdJ$<(0u00Y)*N(Lr0qRNs&Rrfnlv%7Yg}q{v7E0OkGN!pmz%4vz76 zBi@yc2e*YE?bK)MZKdq#@@2tg0j+_N3>{l00baIk@5ob=hvw!@c^0R5@$3N7@_Ggy ze4PRZfMh6^b9DB=X9lfC1CUWHG{lhj>mu z;unUL{^wz*EsU^{gMOW!J6_5Iw?1+(5`)nkc{{`Mi_@o5p!Z~~ zU0=O=?6t-M&z`3Nh;7%uT}z@p{uI(iT;&%7FUYmwk&NwUTSS|(@~~UzmhoKw7^jRW z9z$gj7!n9r{qO|O8T9A*qAaun$nge$kb>_$vxs&$P|g@57x0&N8M@NE*m!!Bm-Dqk z@4R~K)iD;9t>5?>c-@=T16Y9GGoM!%_kkb)e8wDkDl~?{^&kL~XVvcNd$R?ruQT!) zJUaqt9Qs(br=57oKRZK>t;w^8&dgyrl393gjK$;ua9c<(Tbobi-NT5(3(5wLlR@hS z6o4H4HYYzAIo~|7r5*UTrE#)r`h&sk9{K4U1hI96AGn0kHHZ;U|^4k($ZOVZ3 zK??we#R_uoMznN31F)O1>G^o4HU=$X8HX0q#t_!VvIWqUtUeSMeUU8_#-QJf^8pL{o=zpiV}R*x9Rkkt{K}u;DMeH_SrcJ;vrKc8K2&S*&3%);enKvVpR zj{yS!>dOJ6Z@l$xZA`!Hy^9yljh8Wg08Tm-8^4(EfYPpoLHa7N*8-A->o@ZL-IE@m z@9)<~Po7*GU|KjCz}c2SGY%QTdN`1r9~{+Xb10ARtFx;}Xg!{*=;2VjMS}4HwP@R%-#y^{ zy(bS=@4x?-s{_Z5uik#=#{-lKiy%CN!FWH=s~#U&*YK%{%g!m|ep8Dydm9gI?u!@B zOz5hA{xAOf<==tzje8#l!X0V>U`Op|yZymG`e)r+cI%+wpZr(x+%%|3}g4+s7TI*Ej<bg}f=`c&P3%YDV;7M1e+XM@%P{ZEe4 zw_y!{Pa#{(yL$s!0nX+YynOA(<;oM5ZwZe#xI))teAR5s%dv&w>ajb1+}o~?Pd@xr z{9YI#mG9wq&3{k71DHO{X4sYSOC1|(+iW!l|?p~|JRD{TYKxZyniHJA0Gf98qg=m0di7b zT9k61PQqz`?gW%~#$36Lp1PsA*utkpbn^z8r_Rfvqm>@v$!y>-eF+cg6Lix){oMDcV0`>A2ab5^Tp?tHJwgj*Sh!&! z?G4y87kgjmDqwupShDKe=vN00ACDI8a!8*bgY@}0f<$rv=(gJIpi26z80c57oR3$Y z?<}msU)45pnZ0eCetrK|i=UT9XFYu6_{dT^-RO1AzkqLJX`D^s3v$tJKcT-Wwof)x z&y6!S3K92Au`(Zriyd{>k&Y z_TOmkcBY27-CyT?VxdJHo$tgZp&Nc1&*Lu#bna@-6H?}@3ug;W^W}J#`@OZD5!aii z@Tt$fKzhzMJCc1buFVV1{^BGeFMre(s7!udY~l6l^(({w^aHv`-bXV}7`MQ3b6!Ln z9v)tOVP&p!EZ^}6{f9v}ngR=b*OjIFO)geSjuCj0fdU0!&CC%Tb>7svq%VfY=* zmXKZT2F)JWXHlqrzLC7L2r*}Xl+Vt>1$obIyec$KDCT=I4ZqFd&)?l@ktJH9Vf+MS z9~&iH+QG!ewQ;2mXjizEx<(}o;^6>_b|R*0jHn>@0b0v2UlehZ_oS)vqcrN_2FJYX za=Lu^(y?=2nn_oFymA(7MvobjM{R~ywu2dm&x&r(;PWsY>cR7sk@ zZ$~%F$1`=D&s0M@N@sR)Tf6?F5724Ko$@^Qdr7*UjWPxxlE%LVkuua(%Gq_$^|Cl1 ztsHwko#!_Qv)^WAuS{9e)TXIc*X~Q>%Y=}SGK(*?b-x^>#Vis{TU3_fuqaMTX6_+q z?L-f6exJ8od`ge@{TRSWn)-f5MroDfna^BLKCTx~buykjC0?WF(8f#s>m^s~?7p<$ zIGdN2%)6evhqu+Syeo8ybq_B#*tLHSxeb_fFoAlxH!;Ucz5D?ZYFlGW@l99Hqy#Lb zxvxji+RNGve)AOnxm>H)+L)Z1OGZ6ZUU?)$jsHnmdSHeA8J@2MrTUX*-0M>HYx4E& zr}mXzzp96izC*t$t3>^sy3RfI?icb-K3%NUe|cA?UXZUw$VYwFlY3ik!)5a=cYl0T zalIOZsAqCqF3ayfYX97nr20~tOk{ucH@^x9Y!XiKeYu$AuNRKwU%vO=>i_kN->m-Z zSHD{Q=l}Abu3p};arHMRPOkp+((Tn>{p)|T`k(ylN2?$I;KvQ0yzKHMypjU-fBA3! zeD&`?I-3{ooz?&S-~Z+6fBQfCAEsdGAD=wJQo2ue;O5zk-T&fmf71jB5PWHM>iqdU z%x|uK@M@=AN2K>Z{jNCTH&<`Jxo7n+J5=h=PoA7=UrCAo%U^$<2luN@6vh2(Tl2P^ z2L?ocwu$pr#>|OUf@gP86yQ}sA!7tl06+ioY~bjZt6u~{E*>9GGwQBSe9Y|`MA!4w z5qp_ZijKM}@z6x3jT!5a5|CmN-k&$yTipY2 z8XM*|2dyzWK8&tno&mN77;fSO$PdsTXsREF-|&)=jko&)n8YpmUtLY?lyE#dUMcbK zC^{+YyaCkByGD@s+4_XiH}hA&@UyYXu+}ELXiQMDC|nMC5li;)!NaRR{N3MgqD~o% z<`+{$(U+nkcX?1gu5>sUW#D1TDIhCCOy0|ANWoak-xl`c$BwOj`Qe8XllPUwM`oKG zuWiPdS~BVY5e|Mk^z#0cXv!fEjJ8Jyw%o2C_69_pzjSr_=u)6dtu&Gn!TL1AJU|7A zYRfo9Q3w+80|D9&4ZKqf^TP-BHZC&adh`2GC$)zWi(V^1#bVd5^X6i-DVajPwLQ!hW&r8DdY*iD zbL1Rj*tTJ1SV+1XFEP{|>a!zY>)bg2B@YllGk|;J7;48}c|G3F1L(sG;{j_cERErM z3i`vTU!JLrI|rdTgzklmY~v6SZr;3>QIc{Tz3>hm5Vw76h71sxr@e(kA!uHx-iMAH z8+nC3!2EBbg>j@Edv0t84h0q8;CFC`=0lUd(VF zTS8n|I8m303t5}ifAi!K)`78hH}C~`#fy$X`n2?`?*jERqS1|aF=Nfx1xsn@x$_;K znDJdhh6nV^oVgIC_@m)hdc!ws01}=UFef=n4jY?~)9J{aS!^qR3x^CFUYm7BW5X_k z8UE06x5gR-d2$B7Q8~ zOSF7i+Z|}U=t(@?Ds#r0xgmhNF=cUw_*4gc$jCI-gyRB&-RF@Xoxf{N+1flepYF;846ED!mHKgbYmvBTt6b;73;9-D6s7;X{9)^oeFzWCxq26uC8 z^By2qf9%UE?~_kIUwwY!%ei;_`0L5Vcs5$yxG6MCV=3o@@Py<|z-s)(YfhYMivZ?y zA+GGOnO!Q4w})-J-y7hgKQ?dpZuRM}|9a%A1wdy|>^3G==|?@c2*iU~KfU*Z9~Rg8 z<>nsec@$qekN`0F(FgAbHnj**S;qU=RrT@NbLXNLAnlvx;fq7JOXVe2_U;{d`~iyN zhnWxS>t{E%*xSMu9uUj;ZsqU-r4I?G;GkOa8u)ajK0bf`Y<0bH+v^-_A92wmF)& z?q?k!XS@6#{n3AKb^Q37&0BX`3=XUcB$Osrh?^Qv$qfgKe*JLU@FE$qrE!1y=J_pWho2+x@HRbm7V>HXne~mc zdwK>ayPJampxna!=XvFR^7-cj01G#RR{F~>7Im-)By`j`AS631#0kklR`Uu6w*KU& zzc=xB&z?J5AGX^xUNTO^oQ}9NCKsTZF2>=c8HVtxt#)7@J9d0Ln2p&j0j&25lc2w8 z&Rf#0=^q>I=IOV^adE&aygIk=Q@@a>-xObb!0C9hSL3nTmyTd@L~Q%J$y+k(0bQb; z^f7PMRi2*o>SaN%a+386;65atofdZBkM0Ib>JPq}9c{^g>o=~%^Br6nqu@&iM@l1% z6ZvEwt%@x&H_z4Iz-s4*$b-`d9Y_7qnw+u=!fv*`g@_bJgT73jVhnmc`7r70d$d>w z<>;wEYT@Y4pZjw7$XFN3NUfcfVY0QzJoJoy`2N9z8;6XJ3NH#a@NAtOXrh;^{FUCd;57?7_ml7B44rfiB98u>YWtJq4I`M2 z|8ilk=_-_7-bd5gVh*DAeroGNRt`KsZb4d6V8 z%gNeyfiy1JasYRW(RMpIsCeerM|9})=jyk==lXE*Y^O8c9X;Nnx?L7uo%?F_VgudI zLb|2?cU~B+d^*1Ws$T!cFx{Eka|=kCU>%X?!CL1}hzjZEqr+x4nn`KNY7&=^@8x9S#1D5r8fH-;dQIq-IY6XGs{i$6H+6zl=G5i2wJ;8h?r%CF4=`c|#mp4s` z@^*hoym*0nuitL3rJm$8acFe-9L|GIC)Pvl5V&q6MyE@a2gf!&aXcC?k8kxaoAvhSw}oHTW69OTsC46H7NFG4OX%IVdM@1JjXvl;>ARc9 z&(NinE9;PrM%jdLRO`hXOp13yi>ZN<ll&!wKxr27jU-B%ywdSBa> zMfS)mBYK@~lVGhbsI0Euo%(sXH+wwECd>MC@}3&?v?udUWqeN$6+*t1`&j5ZKx)67C)21h@Km76A8Dvd(d5B!Q*D2yDx3^Q$ z|IdH&A6D-kZVP8dhlU!S|WdaZx|?l-?FWXppN z^0?F?T-R6ccQDG$yS%vD3U)8g%?u=Y0r;-vCG>yzw|`!!lC7)vQY!bi8vW+&4t;y= z!RjyG|1c#YaHGBk@?vH3WMhiVZ zOtENE1uENet1s1Yn6bJut`=Sd+UJ$nZ1UVXBmlY=vx_# zk{MflH|JJ{14(Tq&dV;tV7$+olzju;(GTc@hu(SP&D9Uzemf=N&TNzC**jrmjH$A+ zO%=cam;glcOvQC#U^3u%_u^N;+;@4cGn9@6djBu~NB?T|PyW$AP64_yiiCHB;1DW6 ztnJrdd#&yEU#(8&0s0pJtUy!0@Be&a#O>X)D}$K#LuEHdZAn1|P>lE4giKLy^GVx0 z16BZ1ycNU-jav1625LO;Y)Xi*01l!8RP$EHJ68Ge0dP}4>JA2vaZ7o8KE*L2wRQh- zfL?h70E_#&eqg8V)8d0#y!AFu6y9_Wrxa_M=M{sLckI1~dCxw*TiyFkX>+g{B~76W zNT5OL-_mc@i2}T>9Tb!`_GZ?~DC8cj3Yqu)NbK@(wW-1DSGYaYLvLfU++!7Qh5-0vKMsDin#3Fx&E|4V;Nj0bLBR z9T~0+0|piOif7RPje&+df&ffp8m}21%5vx_SrQEm|pid!W|M>Z{%L3Jb)*ArETo$UL!6 zAAFtBl*jGSLTKPEjUSN|i!{5$F3UD*;Ks<*eh(eu(Z`*O@KpxSGVU@w>i1>ABpzr? zD%Zd9pzS@gZrw6tjyythd_l(>&-}je+URES#PVF80ll?PpVBLqa6@RYEC8X;$6VZgx1m&eKM^5+#r{*Wv3(#~6r0}DxbfZ=AW zl4;&u-=+-g8vbwm-tAzpZ7mMed!@1KBk(*rGth;p;QeG_%eLLgFFMPQ2PXboJO!7y ztiW>P2%t}Xj3NB&Uk3y;7RB;r43ceA z@9N5E_Cc#_H!ONuOl*$bo+s$7K;(3agZuM7EWh2of9|V79Yhm`vN`BR-qYukhiA{9 zZ?}^8_dKn)w5#mp4iCLnD2c0$=j3U9Y&XpB{N%S+|L)KJo6_!B9ZZKgbuJ@6Ijz5U z1sdHc1jt|f?ax=QHZN^+m`VI*ezW*?@X+CD$BkqzMj=o15%29+UVVM!(cQb}(ksj# zbb;pK(gDZCMg8K$=c_NiJUO8+UJi&kc<|5w0?(vtG1C8kRQ=i0URRdi`8~uy5)+A$ zAZ8Gpne$MYRV9^5)?k%vh1`ANj%Z){pRjLy?}(1RazsZ*cVBe0ebLiRhg5B=B$Y~; zHDy*NGm}h`NfIPT%phhEBQg2=S?5Kn`T^eeJ|yP-*IxU#)?WK+2G4EVb_~y~ zE5`8I4z#|S{sMR>`w#4IH_YazS1-SGVCIqww{PU>Rw$HYGml6BEB;hpNSi(}2V72H zTV1iO{$e20^&BYnH9tJJ=2^RLw$=-W_KUawX7%-NeQ$MW|H0Lv+CzsozU^!i%bd5K zgMh_<_CEgO$xoJ}AF#`Tx#}|Er;;mZVP1Lbt#8eoMFx>4Jh~ZZpKbow+}H$?<2hrV zj9nKm&G?Q#jr*B{1EJPMYwfhw0HgyuOcBF{l|I4zg~MYeh1Q6 z3jmD9aMwNZ8|Xd`6xG4`6ZbY0Lpa09@sB<(pF+1}aJ5DMZlKepvnOXR0bq>?BrDSa zHfQ7=Lqzk69So1st1c#wfBoLCSHJUv-5Ky&t%z{R}glQG6G86rfL7|q{!>)VA< z_&7?YD~#?Fn^#}J5Sj`%GkS6K_i>**>}GloaI3?Mlb2rv90EImj<+&iGcfyDb35F5 zN5856W#WycoM&JP;gHeFvex;1;)GEcLN%u3mqEhM$ch{B-7CzRt5Z}&j589 z(md3=~sYzAygKFML5B|=J$;K`e94sc~@szY^|+~{N$Q?(&^|m)~(iM zKwbugvLAZey|uM4D|BCThFIdj?DyaQRq>a9GX^qV{5zA&;&SUZUbDuEac&OtmvO>) z+!F5r)URE+ICGVJc@^^jHy+VTea)lRU6<2`z6@YCr+ZHK+Occ*>a&l3UHz4_U=r4r zLL!;J7&*yDI0=1n&a$~20Gb>(uO2v(7kP$5AtjBYhs78_ckZ*obz~6gA6<6;fg{yJ z_!RjImD5>MS$6MQ5E7@6cP((R_;boLdx!ITta8&0G`j z==W;YKw#f!No}P{mt=W`KzcjWFAIaITd5JU4bB(Ph`rK0i1p5yj zXpYa>r|@&eft@8}#=Q;?el}g+N^q=&l~6V7EZ=>MI@bvR_YoXCFpDlzhx-v?Ku7O+ z!K=8)v$^h9gj}-fdWv|Gwl58I$r|0IQAV#-zyOX4>UqEVx1^o=EQ#=-toyY#Im~;~ zD`ZMss=i!Kx}{EC%1#x_A9y^;r~IY7GFXas(P!G;BS|G*tJc!S^1|RIou$~WD?ode z1PhIpmae@k?aW#IjmB%FkG$UXR_WaIYzk9uk7PL?H1fShc!8%~o!!;nE_Gg57BSs7 zF(v?{pSnc1C(o90dIX?XH>nh0)*7MKyDB{8b%`&Y^aJ`U*}eJeU7gd{`a+!t5bybu z5FC%2Z9H}#h^pMF)x6_9c_>r+{zF-4q7?O*;$8NRx97pMag8SOQm6U9w0S^QeKYOw zSKgBJH-%^URIYo{)OHHj62_+CI2}-4@~+-IwTD;r|Fo|MG1kPMpUjlIr1i>#8=11Z zJ9U+Q63+Ymntb&Oo?6~Uhdz^0zxqE*V+5FwdX%>Op78Y2te*F(;_%S)LoW}l)(V~X z)VuE#DW50l2Xxl1wRaY|P}|~zr9Y=C!$S*r-TRUw8yXW|(-mudi&hFy%u>|!p+xg_ z9#SUOD&1Z8e_OYyrwqC`o$rMu#gef$E*Ur7_e>p1JNcqv?VQHBO5SvTdERe*tR7uT zK5dtW{Q4QHJ)zIvD)zV4m_n*AfnO^2=@4i$xi=7>ocJ#op)vt@m_C=vrb_G)1OIiE%N55Hp|Lfmf{p%Ks|MkE5 zn_U1A9I@|HUZ8Kmoibj00{N zvbQ@taZ@i0#9G_mwbIzNw^SAVr5#?0Mj<+(ZEwqI{gl;bL}(jEmKM$1JA7%i@d7v8KoK! zFkZI-0|4NO4_h9hYeMIgpG7o}*sCo<4{R+y?A^Ru>$kf+vDm4xIp9+fr6}*#g%9;})uT7{>v` zuNT|Y;-?3*;M%k`Z=m>?ccq2GvjIL7P&~w=)^3c5y*hT}Sir#D)w?aWUP<9p*~NgB zLf^Q(eK(~)#ZZNeirX z(OQ4=!~-gv3FI{|0@f@v#a*<})Gs`)Y^SwVpNFk75QSpBIWU|Y25gycfJoZ97^9r4 zuq+_fS!e_vetzhc83Lpc6WC(f0$WUB-pU5Y{n{fhjt27Q@v{D@#&YTOLod&L zJf&~-mBlW=gLkQ>OA9kzHeKmn))eB;Uebikwq*~r-H zaIb6pXIBP!^q_K{DPN%O)76>hw?hua;1u3sK(_AjsMJnQB=_sn^|b{Mxp8CtnY^^9 zW)w^^k4Myaz}C-3w$08ikrR_fDNTUHO3D=h8hgJoOw{!ASAoft?-cg7pV^W^-}w5sX2Cs!B6-CS zNwyDYRJpS)JD%FGvHC@4J7>tNF2@jzHgEs(mw}prN%7%lpMKIpeCzDMdOBIIuU~oj z=*(*lFlSh1yqH6)8!oTC{$@X4AFt-?*Le`emxWf^aD4&ro~zDwqZu0xLL4xv5LL0k z%%6rF=L__;W6^FAhKc*}hxp@Uhq?3PPmZs?@y&15hntJTd$2MBhf0HQ?L2(0cYxY> zt166f#n=FH(r2u{=tp)za&mZfQ)9F7f_eR$@G{Z--Cw`E`exbLdRh+ z-q+Zd4{!s(hu6#{TLN5f$D15S7@#KW%9_J-%m1^_KOH%6H$(Zh+WYFUW2-mbcyo2^ z=u4wt8Mnr?Fh^toUJ|}cy&a-Q-rg*KM$OyNXjJvuNk1L%4Wl&9?BHW?Fn3hViA`?) zymui0EIK^f`$C=UES|F%_3Ntp!TpB{w{oWK{sHX_74gg7+6mB6o}Ev0wu6N>5d(a0 zUaWxNukvO*f9_N}AqoKz?dZSe2RrP1W~XGlVrLV%W+#rg@pLq8+K~MGqd)rRIeuJS zojUp17>8;^g6G#xr))EuP6jaPTyJZqc!OZmmFXCaVCp zKzzK<*#hl(qH-uP4|sj=ga1bdDE0S}*Nsx5>i$LL8Ev95JR|(c9dxU?${$0CDs*_Q{qn)6dRl_-3p! z=EybUlkR!{t3|gpAB!cgnsj7qAo*zrW+cza9CClp{=+i{>@u63m^IpYIry5<0*{&3 zckar#*3Kz_Kfa8jGkzHnZp8z_%9$VDedp(^@BU6_UmZQxdav^^&Yhg`=WGyRy?CH& zCog!06J>eN;h^JuQDYd1lyClA=+BV4gf!b!znY(J_8fRSj)B%R@fYuBAzB=Wx*^@f zP67rGdZ&<5_c;^Pr@Z3z@BN&I?2_SFwr9`&3B~dZc~<@C^H(mOX$|v}p-69AzbSq5QfsMnRCQy(pS52t(oUgPj1lruzu&8^&osWp*A^CU z3`q6=@M!#XyU%E1uE`MNgqOO0eHIX0JH=F=&^pnQ$2VuFZRyZ2)IK3l z2ulV@;bv}S{G&GUM@+Z->k6j52n8aaORijU2?o>#L9Tu8*|lNyZRqXFe*lkX;wj zqL+DJNF_Sgm&wMP)t^xW&8&MEa~TT7r}sVsukUo3MEl5Tw6TMfmpy4ER2n&GEr*tf zdo$j*)_U~l+8mP2$iWM~PHbMN&W-a0BrhXcAmKuQSV$u0-e`;{=)XeM#SC-${KV^MCE#(o9-)UoxA&LmwLSJe8<6pihY= zS-<9~_vUU%hOW!ECp4bFvlu}0d2{KHX?n)SsbnMyEp#BWKN@u2VUp5}A_S9*uaDPgpb<1DRrPuZ{A$5CEvV|_u;tAcR z-%30Amum{L@_t)yZ%u!ezp{NF;&;^zr7YmrLYHN1cxQ_5S0C4!;Ma_;wY2pUrEz#S zL9}#5B^!flV`utGA>P#vSqwjS-F=7BOJR)qUwXIeETh6m7dE&}VE- z0~g9w2}vuHAH&)^(;@n;k1a5(Un}*JfrB2G!Hc6&P{737Z;hRppo8WZ663 z96C*#(cJe%HubP;V@&$e{;f6!*-bw3QdgkduCDs ztegx){Qlv+s|_unzj*qo)j#;gYpeh6-~QF=`02KAH6f|gduQ^bdHnI}a{^u)ODOfJB2G`!J7cD6<4bzT->Lp<3Y2L{?Ex}gJDUPuwTTpaXwkAJrM z-q&AQedmpri%)(p(EF2UvM!H+wwu*fMiEKQ-P@f zK8s~a$;Cj|AHDrfhZE|v6s9Iq{6xv0L)7XD2cKCC&Y@r_G`xmqJeBw9i~{E`b+AY2 z{XEy=-)BO5#G4js3rpZFBZM4S_-02FJh0}=FU3+$@hbsigl)#+TG!p9!0QZsy2e@~ zOYn*}@ChY@@;?A-`B4J3k!&yyDI{XK{=MJ(yJhv%>f=vO1V-5ANA)f5%8|}Qk1vGU zy4E&&o@Hpk`{z;l+lG6gMXxX&aqjA${gZ#X`gecv<5_6@;oteg4$S(betfigJ#c_> zG1mTonrkU#==I5GpSA1YTyz)iU|DcG(CzJaf1M%b#d3^p)%!>>(l=9h0?d^|Zdq99 z6H2@-{=oem#j(B4dpgBx%a$cJD}@cM7zQj72Kec_gED!VTP*l_rf2xdp{%@3fTBPX z_S{}9O~#i!yH*ENDnI_@lXeU290i}p54lEh;>R>VU1MXmP^Sz!xbt)#kmv~<18xb| z@L~s>38nI7_2&^k-lci_P~_{q`Z`aEz`fO_>KCnmy|(KEpN+lew(hR1JelfCO1y>R z;!PWn%v+;;EPw&SKy8ZSoyvQ-IRLP_QdD=fSko@?(E%d1d>b*>5j0ILoK1kxL81NhhXU*=JzzmFb%Y0AFd!E$x>>U;*Di+Q^6B8=?O zp@Vfw!E?ky{Ij--TEIP5h^Fl+@Ia3_WU{*R%oFF@L0V5&`7Ir`%7gODer5q5E&b&Y zqEFF<$B#Zje;*-}gq+&cBHb7oaJxAl&r;?YH~^LoF8v~2Lw}1x;a6^bSsp6e5BdT# zgbxAy+8)fXV15G7Q~ZM=<{-5Pc{J*C{QZ0jtL^nKV_P+xcP+9l#4M74AQq~N*LCB2 zU4pw0VDPJqgJ`}t4=8+71~c9^CJVo+-ybDQw1;d0E(;^$Fg>zcTY(1J!FaV8CwO`R zj5|zjdtSKcRXq$t<;9nhw<#sUDC z;lr5@7GmTPPs#DzNB7!cPKmIAAbIu&x-FnI^&Zi3~16Q-vV`e{8rVct&8_5Po?VBn8rgEe1I5RlEn}P>fy(#*;syh zLbM6hL7o^d=40c{t|g0534x>f3wzos47glZhy{mq;w`kali~8U^JAm~STRso9P&^$ z*8vs)x(^=WQid>#UIsZ};KZ_R{BK;BJZdpZ7VwH(*Qr)y*?K@)ym&e9+*!o- zo)9<}0wXR4-~rG9Qz0g`~jJAo@RYsZ^kGRVba zLW8MSMa=*Es=1-L*y^Q4^JHqCF!f2uGJ>UuW=(@1x^@XAC_t7iS3bI6V~~lEb_a z>8E5NW3l|c^_}kqjvq*t<#}E^jTr~OZYaNJsuMY;AIS=U6hJ024!`0pV2e2h;3Cw? zqzTC2@mJBKhuQK@kE8z!!^0a-ocGfl8{$i{-hr5m#bdmze(3BVy%{KRt1)k^PpF{! zTRm?T%Ib8yZ08;jlAM3-_19PX+6|D|FYqY`g)kTnIshDiDsN0Nx9>Ez=(3-Fc4Bm{ zXB)HVaX3Yy%B#r2oT@Zc^(XHc7zF4Koq2AIGpyi^7w3(1{ln~QmRUMQ>) zLx#M4c#}^klH?~I0wyjoe%A-$1z6QblYe7VTM|EGn5b3NE0C+*LuB9fXt1R@{M6ag zGe&R4GjvjO^yrK9t-_<+%K6C7kw?ihhBIS=vO|`t(&o)e=o#1MdCq^PQoLalhtoYwq7&A|zfAWUjBK(^}uwSh|?cfsz1)tJy)k{CwX&4WCh>&)4c>5q(1k5@0f_U7t`zxM}@rKryB=t!SqoUohgR(b;`1MM<5 z=znKA=mTRM|5`s9m*x%f=1SqMZUis_Az!Sn#`Njs)Rh-Z*&o*D~%sXFw^a(wIj9Aw9ImXoA>b0fukCOr1`dzwb zS~DB}bgeBL1IVL|L}+G*MA9cF&L|&G(E%6*7GB`2(BZMsd_M7}^N=`rn723QfJU#i zRyH34?&;pfAwADH#zQ=#$t<4I+u|+r8d=IqUirQ=jKA^f*QPJ%EOzw>Lq>PeUg1vY z-d}e9g@a(XHSeu^s&GcN@7dP-8>*u*eB#uJ3FBgn(jx%l^d#d-s4w#6!2Ux4GwRwYsfcglHW4+6OL6;Dg zYfG}>`F8A%Au0a3+8Xv!dV<}L=ykQRYMhdN*3R$|243e`Y+&dtFCkRvn`Dgq4;Jp~ zQbs&$WH`Z&!tC5_r;M>_+}c@Y$Mc22{dGAV+7Yp*u()3= zPB(yBmcNaxQKcIAo~s0=AdHfvlgE>c>MRxTRoW3)Px4JLQP?nLyf-PvYcHW?(LY)A z3z*&4-ks8(yscQ(DM7j5e0A&Ivf!)S?(tfhB+@K7%CNM`>$7Azf995F?#)ZR-`}O| zCuMlt&B>y=n>h$}xB80#X7X4vShAY@)~MMvp*9ttv}=`^G-cuqp43zC@^Z@ujVDdl z-CF>;B|Sl>jt*~p4Ct*MGtlR~%JN=6;)xG6>W`s$uMgd%L&*7&k?-oOpWR;ab3J`I z>E_8&&t93D=mT|`_ttK%-N2U@z$rDE(T!hK+jz7$M!#mVTGt^uyH2v#C+#%fDw&zAI^|mL$LJ)8UEANPV11TYbDV zZCA|z70qeOTU(2tzMJ=VN$4+pG6JyS)1D>qi6g0`y*dVfFKOKPZ&OYpZ|#lebsL zFI`)G|7+h|{i~n9TL_N(ZU1?3^>Oh&g&P2}ojTk0()R23~sk&9un2l3#p# zb>iHqCa}C1ny^h?`hnM&NuL+dt49y5PPRZgeev4ruiyR6>gB-9fB07Kx6uBVfAP~| zp_M+`12;AlhGvvzbd1XAOG*3IH{QzIrfrWDjaUExG2X)|5YtKewGOa=(HB65TPbA} zX%B!}G?m%OQr3VEi!@-eZTmbLv=e8IveR`KeXq30Eia1a#OO_4@CuzdFSf9?UB@d^ z+A7uLU7fSaJNR?~vwL3s$eF1_b@}Sj0QhI`zB54N|M4IG zqfricV4i6gg2VOR=@~`xqZ6NNV#A`?aJ;?hTZ*i(D4*vov#~b*!SDQF_0qw1E+q6E zt_Xly27yI+hgbz<3lBrV0@mUyC*E6F&6d&n#_Nc7Y`&Mb`0<9frA^0xoW{_p@fFo zN7)7P>-{Y$kQSnM?q2Qt(r5VI=*Q0%CKlTI(n0~S^)Rn?3&M(+Py<`Hv=bx6m8bqJ zGJ4-!HT=;k{?S*DTR>k=sT2!)*;2S}3)zJGgrxwKI?!k>Oi_wHse=ac1DN`Y9soGJ zF5V%44NcJd!NXjVC zqFxM~{`BOjct7Jn-oKYKo^0M)c!v57{|&%fod7PBVqSWbdE;2#Kv|%mF-hUo4xT(h z%>3$|cLQ>FWk}gH-iExG0B#N);*I)Z0C8-eN6FRkG7&4BC&+mGCo6fMm@D+Ht*i`E z#weO>=wL(ru&a;`+OMB^lRQp|GET^?UAxzO&8L!j;^x@a8Rue}IY`dG)rKws0~b z@!UnzFD_j!P5n__XB&Ndg8#?EXXI21k-QQY&-cv(t^pZ7KlRzPne*sdxG5Jkj7=LIXO+R9jKB&F; z2{3#uBg@X`%cnHji3dj31Zn`sc+#UgPc~%{&g9X2qKP=!11JTE;1>ola(q1QV?=V_ znB34V1$o5*YS$QwC!&>59^~XUvZ(q9jdM5eRpm*CSJj{AAX?K>@C@2&Hz3At2Ydq@ zV35F{z+!d43qV=%Wlx?uGwl>__DW#k$gHMrMyd0K?obY3;X!g_b|v)vVFnShPbdjq zPvikZfCapTpuRybeW`x>5ufpTLauf9136oa>JxJunc@&b{k*r`X21aSW4yT@zi)40 zNQd9sZZd`^9%ke{@X1&_a$s++A+zw#s97KE++Ik8 zXvK3KaK^jZ7HRQb_hyW|)OL3sxIQoRUF{-V_iW*@nu~!thY!CLO|~T`@2uYbo4*=kY|XE4g=k=`y3*mJ!U-^r zP=t72GVTF7?iFrndw}7QV=u3s3)H)m3}m#8W;kpYSpkSDZo1285l?IR9ItZ zafsky3(OYg?BKzJ#Ug&O!|U3~QjFw_wZ*u5qGR2wtycr$&YdkR!;a-_3Nd!c6o4B; zCjEhIbg%ytsj|D!+2kREtgeQtksEBSnK z96K1UW0<^?7XZ}x5L=R)z^nrEjJ{!)kog;p&8vyUnR5Z@Q}MaVRPmAM+ZSNk_8gNc1%>o^QPIzD@sbERHh; zeYkenfj~yzD0Gd!+qvU~c;npk`@KAYXUsMK8aj5Z`RPr8w&~70b}k`$$Zh>X)}G7T{j<-Hr^h-pz7S5yqZbdo zG;#f}rhf}Bv@M$1g>d!SQ6GLscLv4J8yl*!7EvDAK%sP>gvE$JxGGpIzR?y|k?UIa#7;Nm&pg&x#-w#F)dKX?jc`ANPS75*)C+Ij;yME(LwCrF# zbokhW?ix_K`BbQuk3ae-LnfLp;kNJ$8Dq@TQSHpM#u6TKSjpS)ETE%x;e=hP?(3_gabgG1qc8+(b~-lQ5b|bM1oaR=1tC!zP(Y|SqZeQNkmM5PnyTLjS*HSDwcFTn#x)|WO0;b4rFZEr@d6M_!DNO@e ziQb>Om&r0MNwG%L2HTQgsT-zI|EYSv{zDzBfchqM=dWv(nD^ydep9c??JsZ-5eR18 zIQ15IuMGco|0r+y^h{nRQu!)-$#tq?L1N%4XQ`Y@cCD|b!g5*KJok-6Nv9(!V=Yga zExE|B=WF%+580NWcNUuK#ON`^R-fMVOi9`(`zfQp@^w9RnnLE`(&x+b;nyd1olMlL z$vpz8(`4wcd(t%q#^`}&(ofl4BS!V=s(YTHL6?1(Zj?XwWTpKHO3&8yo_|sf&8A}% zyR=E3(+8T--DOO5PjmX2zUhnf%CPWX2RoP{n0Ww{8 zPdbJ9K6#+gT$R2XO-w?>EPU7ZrK6Vmjl5VZM?Ujvc}i9mGr3Hz%C?}BWwZ{!T?*@k zgy8~;20Hh2p3g(+=Y>hDM$_lgbTQz#`t^OOO!SNQm%f?@Q;CIFy}jHU5~{2G-0foW z?aM-eF4Q$<1s1qo4z87uA)-I%rYvL4^?driMn{S6*RG#_^6BcQpWRsf`N#iy01+M; z@1Hoc`e%Rt_g4SEzxv7Qciw!f$+f;M{V$K7D9p;vK%nibZ@zMD^?8dnf?oV)itLW< zg^|b``^5Q6tL-gxc(7TJ0(S0|@770oYi!-M`Y-?FuU7w;KYYCU+AD9ZzV)r|uD;j8 z@CRR9Tm26|d#4Zr9oV#GN8p4*uXqw%Umbg4$Lb$^?e#*j1Zd?=$8*8L%z=MsHXf?A z>#LL##JW%{&j0C0KU@9q*umAY!1!O~?f#qcHECOr=rfZyCCa3$SI?(p9Y}$E_xP#R z*IwAU`oI0B|4D}}eY*Oqyx7sqJB$JIw~5h6;MMIgl3~&M0f4r5*Y?%xM~|+KpIv}9 z3M^3YL5uLQcsGxUVe9%TYjrKqo&k~jODST5 zQ(|Ftqe(VuOxxBE!&J+dX}r(L(f(8L$YJ>o=+jMgaesKb_r`-1pRBbPh=&Fkl7YNKozug7}?=N zhXx?$K?@xF7k~0^S1+V60s$;eD{PdJ|HD7|qty@cG}D&<>3{iO12)>q*CGQL@mT;m zTJx~lvrDY|T>4O zY(^CQKVztV<7o+;rtm#kjG`rPYW1>Uu}HL=fJdI419t-m8BwlYy)iV{6hJO)3B?yL zuG>jb6I*bhuj0P&8_-YEL7>d)j-qRlxZ2Us8 zG|y2C=~P`#yY$;E7MgGM(Ree}@QH<3`;9Z8l=6{ApIRW1brg1dU{M7y;Q<6FP&c5o z1px)yVikSu0vIp4GTIn;rH!-wKAPSCs_+FVkG#(Ic4+YSK&#I_KUE$JuWqQH#CRSr z+uDGSZ0`nY0^twt-`k>!N57pVE&kU7-HSb5_$YG2L4lNG?cGv2<}BVawn(B6qY0o+ z`?eJ`a=;_)?BA)gCs#+09_iqx;#9ZT;>~}i-5(aCfC1xKUp@!~S@?>9Bai&*$#c+; zg$TK6p@g3(!W*7lKv;`*^@`lH1^M|FqS66?@yCXgeTxmiIiN#aMxH~;{`9j?SFZ=g z05Tr5pxU#i-8j*l_xKZUp(oqF%?lK7aa7ISi=nIcqM0$eEK2i+tbcA>h!-A1>{DQ; zJn@HmJeQpQGOt*33~x4}CL9b!n(T;17OTKs3rclhEMTMu0^mmm82v}K-EW=&P7Ckg zIiAI%=8>&|pc5h}nZhZ?0!1B+ALHVYdA?_(wZxy^*OxVDWCsv>(#6}xrM?*{RsQ6f z@jE4HkKk?{Y2hcnb)@gEj4%QewXa3jM-IzUKs<#EEUt)X|O$rzk1|I;66{#r&mXh9t%vr7*Kz3=p_^v zK#~FLQ0e~k$A8g$e|`1tZ$8XB{cN6eudM#T-~ap30V^ew8_W2?VQ1R&;isPk1e{J* z#N+X)+cA%E8qbZv zs{xKGLosy$MSeRSWfaVZy*vq-dOz|f{mAXMGFz~;6Y$u^itNuZLU55Uckr93jpqT7wF>PH2|NT zAkK-n6yM-M>xBdH0xD_j*i~!5NJC;tQE$oqx2U8lm~39zAxmo^4;%zfAy1} z{Ppz9sdFc1&LdyU5#~o8xx-71N$VK(ojCxRBM`aDz+^1r5nv)&xicCG9ilyvA=$CW zHo6?03SG?yG3?0E4TV%&-~2P4wgcWa%_QFz{e`nd`4_&6VN$pg#%mw(!o}jgdWBI9Db!b?jD{#S?qiB z2!OGwOuelO#FodW+F?y{HJQthV~p%-U4rK?ch6y$=BpzC^b8eSlGlupR~j?3Q?#-p z;OsCpZ!;zWNO>#YY_2he^*?}lfY8SLm+913FI^bfM%TqJTk@*k^}_z?BYKzlpQF#- z{Rc;eY@FQ?PbY7ME-RLG`Om0<4vf08YD29GoRl4I7J zH9lTTAI(V3h`26ih-cclaq{G6vkUT-W3R3b<(W+8ki}nQOe41(v@A54dT%Ji4}DtS zF;rOl*uBj#<}m5!3*)nEWas@nqdSne_U|jC%+~T@=rEqHwyv^M?BSP7ylzIOCk7}+ zx(Ct1?o;|AooHKfL47fo&`%7JLL~yncjWN)TJZZSoqTiN4Cf&_Ed%kVpMF%oM2&k3LmB#N;Q((8E~Ke|Z1W#S8WK z=`mt3q;HsT5jCD79;JrQ`ZfayRa9yOT#rXS7+C$pi_@XLxK_xM1hu)3@2PQBYzE$w z>nA06hxe~^^GW!WmO$&p$s3_u&u6a7D@IrZR`(y z(yNceHAdT%>7yo+%tr%M-xrPc$9v0-wFgV?^CduF(x|&OO+CERckh&Rp-C5Bi@BN? zri}USW9rtIXfcng9X(fmo$UYx$G&6qqUn66U~JTrW%%^EV_yCg<2tZ0Y2RVI+R|MT{5`^a~BdbG4_UY&2fGZ}khty{a?M?qR2#(5Ux*IG6B6 z*mt7?gpR!M-0B)@Y!f-JhNquuyKjR33x`Tn*7^jw1^4Ycab#_<-FRCT-yi+zqX3h; z0WS|%|Bv0p3fp;LJbG@$r?w64-Mwq|`(J-+_4a!o2S}X@DBHODfBrB3di7}@>pNOt z{qMf_&DF0zJ2_tA*KV}6vN|h|)s_;mtBGe@3gD{;cCCJP{PWe*U)ieL7N=-ra$%E0 z;YWccCZ4hOrZ^w(z|E5Z^(NjQee(J0g+KdOc^-eY`q_!or4H1n{yc|`X?>btMLnxgbLnqC^vfr2K_&Aeig^^50jAX>`FED*~ZV5S`uR;;IjJQ_AM zY1%R`G>aJC157nhTHpb>D~2XJpz_@ocednFcAhBOJ&k2cRHly@I?9{;vCZ;f`I67e zA_8xewt7&ke8?~{%yFoNs``nSelaES)TuKm5<6#`sk%sucf!moPraf~L*(G<{8btepo?dEt!%s(Mo5EWqMgAc`pHn-^mpq2LW zb|%CB&7b`!!_d)yFR`N+a9~ekK!}h#@fwfvq31Bc!ULx=w#a8aPvR6y?XVEQ3l`Jf z!7sDLyfS(CU2dWM%1a$0)S?Ig@a(fG*!Ke`>!V7okJ^ry7a}0%;X`p7%L3o^VIa|G z6nbsd|9A$_f@gQ6cmk|=Rm-O=hBg$`XSY5xUJ(o3ctz#C(t-wWsh@*scspLs!+KHr zct#itDf3n0nY_#VC7bk*Ls=cLw>z(yk+~Q)8Af?kALHq(4nVpWFY}-VLag6vK_R~4 z;&nbd3sPqDHR-*)=rc^&E^6Dh#Rso$fCv!82Ow?{zyr4tH{;bp&OC5FB^&4lWVd)` z47n2>Es71Zoh>Zhc=K!X?EI-y13>UvvQR}&Tl~?6hs@)?TbSx<{aFO7;V*4MS8)n0 zyl)3U<1=<$rpa*Q4pTH?K)Znwq30|%$A7Uvd^>PD9XR-?6r$M9du z`XYOh86~oXo|oh7+G;UoL4%gfu0vaWQq-o#_5v#5ML+{!0&k)#zTDH90l-6J2PpnU zyEVis9iS?DFfbS&`sQ?C&&$VNnMKsS4g>@G`B)GDFc^?59%9NCO7(B~qvtXYHtzav zmd0Z|vJjB~w_?P7%*c_x5ZpJNq zV2tpzGHJd0!EZXm`_$?iZ@!r<$)hjhn8SH5@7~!qX7NIo@rLh#Vt|{kGKli<N>fITmj;&s3L3{G_*>sf;^?D#g zz)y5|xjE}$o?<+kpHD{cjz^b!_5Z!a9m3c1G?-CK`Wk`yyAe;AHV;r!fd=T^u5+0Vq20!&$P&QP#?q2wtxiQ znT$_6cDCibIn4XQtmxn)@fAZGK>K_ilExO0=D9#~9{K=`hiKi8@oW4DTY%S|=^bmL zv!^bsj=b{vj41%swToxt!}T4u`g!f_y?BQw9U~+oD|+bf`^|MP>^~T5@l;G!B+tn? zyC-Z<50b3!3*k{)@fw;Lw`4yrNAlUYCmZnl(4_jCtMC%9y(=^BAFuX3n{H7H=uNr zw|1WWfu{iMKm0p?uaGM*4!HR7NAIP-{8@ds>{?>PA8+bt1;8V>8MVaK#UJJvpQ<@> z)0|~@AL#hsZ}1I@tJtw`rn)~^=y7H zZ}Vstllt)*{mCeOqcY@c%$j%gvde~NDvwr%05VxwbTP(C*ZSWF-vS(m7a#E=?<}VQ zc^fw+u{NWn(PtSV)M;nolYo7IeX`OTjn1Mxd}{1jlL*Hn^q9Gw0UB6GM(bA~6n`_{s?Z$fqB9y6dM2XBe-VBkB8SrYs;I!!pcbLm>U)y;bs&_$@Ed$$=gGT?XC0wdSreH*_%Y6s7*0NO%Sp{<=H+dGJu zaS!dRPnX>pg)~YRx*fQDzV+yHt@n6d(?fWa+xb!9@ngop_}I8Nj<=@^4^Y~?#&B6Ye;C1ezExsXxm1e{#%w0ntc+jlrGd3;8WkSWz+ z=EuNsYdtaJ>1x-Sr}TyK2*WgwFj~`P(8JjaPe;!SXU>#{F)jy(j4|7)6R&zSv`a@n z&W?=!;&&kR&1Cthiyf@on0fupZ+3w4cbmg9xV26KOp^@+#^^^$zmEOP*ue z`k9_WP6-u)&-NeQH+*t6`9qE~bU2Kdes|-`0NN|TH_eKE;nb(j}r*GsSiU9hl4}C$1C2M3#Z!o^yV91OEz{G zb@jRVrLdVfJH-FMbM(_^51+~aSo*Jj{ad5+7)kef=1}{awd-#6+fpoa{c-;Esn)Td zjWe1MJ&d{1lTUOlIv#zGo@l+a8pL_5zL?<7r1ut}bnS++=Dq$#aKh7#DHDp=ZO@nbEp4I+p}|5gDyT9f zR8G&N_jO+Ow=0j-D}g63&wlH*Dgoi&yWLe>pMFmn%jIO1aG84zf_c9*vl#EYd=~}0 z_sU=AkGE|vPG2cozp0qI%}*ps*bXjip?%XaZ z{^{Ck)WbdER|i_tZa#wd+Iee zl$p>%!{zZ>J=f~0E|X7_`D4KR@LqqXjk9ueW%Ha|KFr(+@nS5 zmwMJ;lBvYdV=lbZ^U_9xktHcz^H~y>vhtS(F8AF*L-*yRZBvHF-B*8YMh0V=%#(7i zT`jbjF)x>%&p1(sDMy;pujR93R5n9|r3|Ixt1t5Ked)mAK)|-Q6>CsieBONhmA02{ z3g~T{=nEZE)C6`S&!%m8HU06Q|E$A}ich*}>zxDMR>xl4UHs!+ zt20-c^lIb17OEzrgHk-_0My48 z_zR6%00dmmGwtq&XB^KwZCb!>3z^#2Gx>?`JzVx#h2-qcph{rh&*=gXE=+eEdE z;)nJ;hb=tU=N$}`JD1W2@YiO5|L}A4l7<2W@VtBP*6RHa-^+6@rKPl6^P+h3mDloU z_+s^+|G)l=_!AHr7*qbhNfvsvcsY;WfASChQH#7=1Cju!w|4;R_U&I=y>j&E024O? zH@T0DoIJDJo^!92#v%|w;%($;cNYJJ!R6E zn*~tgUYt3gq*&zx*v5}nT7cY~g(ahmMcw0eH|!6<76H4Kyf4*R2%jxm+P1p-azfJJ>j5$Ix(9eK@t~KuvKHt1YwFcsp)6)z>1}!m z5Scfd`A!?~3sS4AFg1GuDxYcbWI-{YBc8%%`n`lhgZdU|ZP7-0^H629)JEDcFCL z!iiKLazR}HSNh%fBo&L;A@_$3N}Rt_}qs`ad0A z?yZgG{HY=tljDvnJ50@Hav;|&HM@^@3l;+S#x2Z*f99mx@+XE99P&nS^6 zXx?@E0ywYb@%Gv2GplcY@9*Ym`9)jcFRkAE`nQKR+j|#JZrYUbCa`CA>cmUxO;%nZ zr}TYkb_G7|F2<^Pj%VYU&p%n6JAX=5rX6&1GIsyrmkPJ=+IV4(F|#>e{f#~Gbgf75 z4;jlS3s@Dy8INq4P(y)(@h700?%_T2cbd?Q2Xd_8*}Qypb^Ox~Zf{<~qnEC38{n76 zxb*ZvAy&v_d7sV@aN?8WrAw~@A9Ap^jxeQU@YVDhaa48>~2_ncH%@k2HzPY3Q*didB(2!tl=|#V}Jl0 z$qOG&Gsf|jc8rpn;{jTFP#PI_?4@JH!av^bgJmqtu7mV-Af}y4z%n4R1Hkv}IZ&+L zmqvb`J^AtKLWU;tT-YgPK5T8#G!j@BcvbP}N#<)WnJcf6&HVSC10 zVC=}Hp>1-hHf-6vgaes5r1lEQqjf@_$xqA8KPDRb=#A!ld=5mVQ{B!9hyl0?rY|;D zr+@6G44iD95vnKVnEMU~ojBOZUiIG4{0>i0o_^mHe?R+7yXI=c#1_xswJjaj+$Rx$ z{leK(t5-Wv7hkxE&*h@?5 ze>7f&Q<>c!%}3T|T5ry=Yr*crOII%TyH9x&CB;+gpFuhW_yqW?WJa=Kx#_ zkY>E$NzbV0z;-kq2ZPFH@H=q$$jJH=pPrb$<<0K!(r$JR#r5QR2b}kPSGpmP^-`fm z?WA%Z1G#PoowfIb0MS?RHrYY0(a-3!ciW+NHJ$I7X#S0F|6q0O*sHS}iR0Sh(f{n~ z=#ky4iy0s6%B?BCojcHZB+PB!z6&(8YY9L&qzc|1v)Jfn9; z?<d6ZY*)ISQp+U zoM0n-XmPS#!W=G~DWp!jTaPfBFg%Rx%dqxbJ7t9U!i)NctguGE+uDr*)Oa+O=}z05 z(}2<(L-vP((2tF4eBmAR6-E&tej&#&hjV__Zg3&s@S*weQ9QS2=kDP#VO$R!I5@fn z*+b`Gki`e%c#}?V=YToZ`0(5r6xIvc%)tfUlDmw1j6QTnAUHk19AwSnZ>%}v%36-z zD|FH1sdG&xasM=+*2c-xKhr2z8aBM&Uky$yciSB;#}Ey4RA}IeQ97vxP#$qO&wzU> zQf8CRgfQ1q5AoLWERFY zz(i~9TBtM4nP(L~RjpoITC$Gxk$rtxjy+3NMjhcuws_3sI%gwHRS$jUu=AnDq z)8FVa@4N1^9MQ$^~7<&jJ!)Z*^=+Qdrvojw|G=Ou6TnKt)z8k0a;>bShW`aAa<+$PYWk!R>K zbgb~Mr>{%1w57lOpH8AUtkq!X;M?%h8n3A1d|#uN=Xfe*3gwo3dPV@&$l>YItIdoL zX_Q+&+N_TAtO;~L^vR?Aq?dQe=a;cH-%H))LL=>yU`ag>hQE7?F6t`fyt}+Vd6i~J ztsXu1e`qyokId9hzX!1CDCwJ^rr2aMkU8508@@Jw>1XPZcNsC;g@C+a&C(yqk~$%}%6TnJCaD_;fsyfkGz>G4gD}7x?higLSL_`j3CKx|ve+Yzo5f zee0X6Qz@%Y1t>iZ*t0#A*QJF)NoIkgauY+lzL*1-0!^;BK>fvMr)QyITjmSJJAdcY z`PH|M1iGd8U2g)v93XTrK=}4q-YP9bQc!;U&PS`Gr3JqJ@bzP;wEFbIl~^Q2HNGCj zxSLJUqe$}ZJzlsN0ZP7pc;D&=fAG7jfAx!ZT2w3{0+3;`6bBBbv{JC2Y{lP_A}6le zwSX6g%3TY@yz?+mk;c3QKjmt+mDj!j>smNaa&`v3UP%$W#(JMJb~)u|WK8vQAfv?+ z1!_RfCN_QN*H?|Fhb@es^x5>{^j+*(3LuJcy6}{RNs1NHFOILSWjFT+FrSq5Lv0!3SAFe-``alEIza{70^c`K_;ZwD`T%;#FQ^LyNl(NQv+Z?UUiNxUa21cO;x&{T;6j zore}^9vH1|K2L9a++l8cy~kU#kWaY=N?O1P2Xi%`^SO*G+RPBOVSU~@@gv@O0)`tG z%I0|ts03C&3ly$?$3JnvT3}FP@}p0Vx7GA;N}6r3Er_be&@!6qZws>{g}A{Jc8-k4 zP0x0=-~saUEENg^2&p&iJP<<30v-rM5d;o+-_~$6rO?^Ji9anK$8)(hDwp>p{=f@6 zQ)aien6`jn@Uis}KYx*@2s#5}7~Ux5ye9$m#<}j-ev9?3f!($u<013_BySCPP-l{P zN1l#X0+;SQ6gPWWNR5|p&-5{G5?)FS@ali-cHwVI$J2^8951QG%RDf?GD%3Wo3&fW z0Q_X33H<*uke=rqUO4%A#>BjYzbF)y0~>jb@J^xl1C5t=p#AE4y*{vGz(Np@`xs|-`=B+2TZkKmpM4$pWH-f&fnV1;_(}XrghLaD{U~2q z_wrJsj90)aJ4R0z`M^7!H{RXy2P*PRLnHmxYa^J%?Kd8U0uaW6&Wr!Zy|-R{rT!Kt zJ+P}hyfp9iD&U;#VDQ@5c3*jFAG)Z);-S5G;gLP+e5ajAzygaXvcb6hGC8q~zhqSJ zTPWbM@&2st%e!?r-l}HwYB!^pNLCkn=&T+PUTx*uV0r3+O~km%17VZckpgGMAN79I0F)UcR-jb z0c62bWQT>Pgfrf%gL26O20PoQZ!5pW8&HA#v2X{H%TwH1W9v#@N5-&)l}-dksQ0;I ziUMTGA>O9|cOTSeBrv~kDX9I8WTSBiYwx)-yEKW;CE{jC_hd##TvMz>GewB_4^+mpY~alGK$*uqhG(u zbMXC-KFmOLeL|jCyuR`J>%~$&H3l_{Y3FKhHl^>IZFuJN=>-fffBnFyc;!-u{l&}s93C7WJ)L*s=FRO&iDqQg-V6%s)6K|O0NJ6# zM+@U|dg7r!9UyJ&2$#2U>*8@qKH*1)Ve*)J=k32<9slV4)$@A}Rj+5p7;BsgoAT-L z4{9IKy&XA~wS;HL%Q#s%;dW~Owm@sU7|8#VXHKpD`Y(SpbUhR>dahXMLWP*y)^9Ur zGX_RagX`dtV^ilV0X8OCyKER|Zmeg}TMTRfaDWmXyOSOVtZ>ko?~Jr~8U3s7$j|Y- ztgm?g1Bh-0KJ9!|eBkKo5I2Szp>PfzI@-I8jCoHc*T-nz_5KV77nAvKzy0>eSwPMU z(fz4pAyCU4t}weS061h1fc-{Bg(VD1yF3Ea&z{UXKjz<^QH8FyH9o%8T+HcW06lt>U018Wohm%hfpPRRIvwKxuI#NRx2)j=HP(FUaP4_C3?on5i z^D+k;FXJ_xZe`rTcJL#9VZ4|*yYe>k9#19+9U{~sLjoh1#B{;0N)N~%z`FWOM^%pb zi2i?@vrhB!?ZR9f&$;BqKt5i`=C4h8Pd_udO|(f*GMC}^tLd=DAO}73L?UqH=H2E& zeTP5D5mE*})uhU3Yz5#=NRQ~oknv2RJ$!cVF5FUbc@7b8d<&s+K0eu6KQeefmCmle zl({M1t%w=3rcPnncv&mg*-;L+wwr|kgYnp!Lw|E%5O?0%Kz(%Dg)1Fyc*;({&82N8 zK<&F;%=80W7;kB z*^P{*S1(^G3|D7=bvX0Mlb;2aCX*{;Z)Xsg*U-kC0r;m2n;Y?~9l3OE9Z2UO%U^ow z<(y7ls80&})L6P%UgjsEpzx=&BF63*+JyAd1PYxa#{ryty-qV~%ukbT-INskxLUEwWAYUc!6kdx1+P!3` zxbgT;D30b{_Ti?r4-KsJY8w_Jb zE`^SI%Dzy0>-!7{Swm#7T=#tP>dkHsFz$K88{jgqtjo!B5>6&lK+M@+`5Ve+89%Pw zsY3@~^Q{6rck6$X$9z?|d=#mUr7JzgnfA$YsxH5-=d1jlWHm3AxM@^5Jy^@-NrK+c zmdSbERvx5#rQusmO4+`I2<16vx;+|YME-tB-j&nW~?m}B}@MWj{EB<^t&n0wRqVWdT1ED09c zX^_DUoLQ>b_p)LzkZ#(EAE!=#iw~wfy;BMLe!y|%md}*GCIBaa`|kG6)ETeQ4x}}K zPx&R0&)Nqk$=xdp9qQ96qn?wEjMdMgE`cPk`6%YMd2}Jqc`hSQDtEp=Nv9OgWZ(0i zO_>Sz`R@JsztGhv*EiGho;d`^ggzi*dCVC2Z9BZ9&I_Wyf9b&YfaRTVx`Q%g+OdhZ)raQMhz52&*y}bI90G7)M?{|K6ZWhhJ zRqszsRc-2h*A@iNcFjwP;&tH)kDs;)wlKR>xRsxuyf6z2F{Mo`z=Crvz@AB77-!Eu zvw$G~)gS+P>F=%nuYd3Rt78XVT>Za)@{7F9FARV~R!|-(dB)Su%KE?mpa1#lfBng? zR=*0cI+eokcfRxO)t~&$FIVTQqXo#$fbEI#77v@$W-f{EUVG(O%GT`;Tx?;RqWaW2 zULP&YQbuPxX?;%7HSTy-IEaI1keI}_&2J5mxN@UOjWW~w=*DC0VSVWPUK)SlK>A!QFc)drptnmrz0r5j_ZdUnnGhi8t% zR!b6JvW4&U*NSDIqRInUoXi^ma%e+Y24bSYm-QLQANaa%y*dV> zq{QrM=L8UN_pY6Va5>YpZMoNH)>cti; z}0Q8&VBT5=&PM?pFqD2)W5oL%{WHEKOF+`yi0suI3p+)(fwpmg_EdclJJ2W)r z0S@@MnCA=tSf0FUHkM9N?W6gi8ncrEbIHc9QAn6E8@0r$p%a5us-96S0_TdhAI59g^{ z{CO=99k@S+sOrVjQ2j>mRgOC22gbY2Z3+MFqO-A}O#pXe!eR+v^7OhEVYOEn9q&F_ z5Thlpr5B4^Yymx6xOeEQj1&E{)pVN$n1yBvvIS)v*r7^iR!3htHu<3SrNB0A;XMst zcs@{mp;@Qi$Ili^4>B-TiCF-Nqo|L?B)rhVhCH-*byy^CS9LbdfKL{P+C-`5B_hsa ztTo?B28+gWp6$xS{#PwDY;y)614zkE{jxof$s*IXVsr)gTw2$*%xGu~@b0|cB8+Fg zcjx?r3?#fHE#{oDfNyys12ewrFjsoD&`;)K^M-BBylKx~46uu4&j#i{ewgfkDw-E+ zzyJo2DD!@mO>$=EiWh5FRhWe|o(5={QyCDjsc;_pO?pN+3lp*gD04IKc8eE=ON$(% z#u!j%K!N@;9?4L$*rAU=XNN5r7uy~chrZ{4L*olLFKh^!+%C06N>mm$A;c zg5wD$bE`SD)>W?hSx^D8sCQl5irhPO)QwOdAMtG!e*|gMO>`C5u?|t~; zvAM;~M; zukQ+zhR*umA|1UgzRg998u)H8E@Xy|KKPalWSHS)&&c_UU%xkiln@IJzO1Slj&}|y zDGbWSwm|dhyncT<7;yw;xoFR3AW36v4&l?T`t-r0fVJ|`7~Wv|Q^R{*7g{ET(iTcj2%AL!WF z;epC2|b_Nn% zD}L|Z+Lb}7`to|;RlD&R|oL61+jz8I~RKAg?6_9%Fdq7;Lx1P`|n@}H;V1cd-BsjQH}?@+dV z!}m^|Io;gRA*Po)khCrIj1cA^`Xr#_5Y-r~J}Jsvo5{QV3(cQR}ybK|3LN8g9d ze+&iq_PNH$MRK(I0F`dWf50;1-8{*=_tu@lM#SGd{plURDCwO&w5Pfq&bVM~-z~Pk zxlO+7`wPT8yy4XeCw4P^?}ZMjW7t98Z@&84>in6L#o^5$A5as~(j%>p7H{7PTZ8BH zN%bC`FaZEiWK7xkl=!~^WzEAA-l}osjEgs3do>Tw;%FC2ZFhi`kW}Q0T|5qsel8y8 z<*rNyu{&KeCSQ46NW{hua7f4>`YfIRD1TME9#;>~=-c!u1}w6iM`*1bSv>Pi(+jZDXMmlK1KAJ%6N9-80D+;*fTN6pjsTbH55oHG@@j8ox{fWGGZ zaoFgc9my~64p3QtpKBKl@DkC+@v|#{DA~K^na*`d2QhcxiwaDK>ztO6U4;WlKA>`uO(x&>TPdP~(MK zVE(^z?`HBUdDlDUFgs96G;ILHzxu}4Mm~J{=_jo}(y`++p6uo@-tc$KNy3dpXHG)$ zu{#D3eYJMsw_T0XZ+-iRg+$sr<9cIp$tM&Y1*$&5pLCaNH!hFE1OvjuoSSxU-`%rx zk7T{Mj2%Z+o<-i<6bw@F?$XPDR#| zqg#>ewT}_t!2?F0j9DB}GAeQ|Idd+&B3f?G0A(H@M~JwP!H9g?)K050Hk7A6llS)e zAIWarzMj0kxH@$@=fUO#dcN?I45T+|>z39X+dI&g(X+<3Mi8I%6v+Vtg3 zyBgD<8@qVP8W^qYu!4hE(xNOGASM?wZNMxds(ZK?*fClm()q8X5|-@{WGE5gyRr6u ziDCfC&)n>qH))TT4}sX^K;{SmqGJ7;=cSwqPeNtL!;ABB?|EJY{Pmva2ET%r9F`(< zRMXC+QGNL@pZT(O?XkKN(AB^6Q-VC*o-+EYr>CsB(_MKjbeXFWo=PuWg4nhQrL5hN zblxwcDR0VHtNYXwJ(g;$l`}8*UM0=DXzjvBi9PbnJ^6HN+EG5hU}*>VQ19qG$t1EE zSxajY9J@p$@-aE1mkD9v0r!??>aAGM-16>RyRYrhZb@3ADQprgml~#y{af0yv~^yH z8GVE(nLd=4EY?2iw0!rs2MSTg{56>gNP^Q}yk+r(;)Jhai+Ig?K%r+)3Es3WN zGv25D@p6Ch%#r|Y{{y|1qWud%Z0TG=?VGlFil5L!U6%2MAm}{GQ_trazAB@sLa#0M zvNal(i~9R0d!dc*+S=R7o7+pDNw@To7M7=X$+hX*B_DMp>%70TroZ}k@^WV$$!W>U zYsz@SBNF)F6LNFP=xd$zRo->i6Y$Tc_}{pGr$uBjb_48e_c|Z&Mohl2XUBwlpx8gC z-$=%P^Xm^*fAPWbK;+8q-o)B2G3DppFOVS_jgvGHqg{bKdrr=O1k#fzWS+5|M- z(7os2w0mvW`sCEP=+p$Sy{P zI_Q!GB;s8Q=s+7Xe;EdbB#!IW+@CJe-|;2|$9xBHKC!l&y~EC64_Pvt>DUzbvKE6|1l@f6Lv@3 zX9G+z8l_lU>iuXegS-IdkPo&^s}EpD`G6R_==;e343FMf4msr2RXy={nakfZ zWlkHrFEcfmSj$c|oerUzy1_T4X6-+t@ss~>##d#m?<6ChNb^wa&Xj32-? z#VC-sdg6xximKa&4C}mW7CFE4iABVO$cT2c3n9=SvsskOW5O=rh1#Xf>UbmNMhw=6 z0ogpODCd+nfE*sPNP2K<*$Qn@KA~^Q3kai+fINT?^jKmm_I}s;3D7_+Sa|HoYmdj2 z#TH}Ky#}WG@LJ*d|5aXy+wuY-Gu+2*7MWw?9&u_)DZxMTe;9h#%7bjHBL=xoeb7_Vzjw8a}4u`fX8#EBED6Q}ZID?M-V8Z4!D7DES$)#9VxPx0yQ!P zjWP{5v0G*x>sOCY*AQ0KGccsMFYczCNmp*n7pDm!z z6k-CuhzZFXav9sv1pn2J`+-Qj&SW5FC4+>6@yH3=WqA{Rnc)C89+s#C%#2vAjG7p&b__JrJh-2Sb+0*diChB>7&k*8u_LyV>c2H??1J! zv9VST55px)RAp92U;vPLd$a*g2~{O#{*DY)SMrGGA%{l#%NVp!ctVQ-4&piWH_tPo zkuMf~NvWxma3S|LSokcm-vO`aZcg$^YD9NoTap3gwW)Ihr0wNt1K!h9jD2|2oF^&o{F6hSJ;kVp^|LBSHhI80AZcTZPUm-pUV{eQXd9cWkG zd(YW>?PYRh=1RA6kwunN3FR!!AS+eM-5mTJl8m)SbAn1WwXC>BsYT~M|Dr=ScVDgE z*Q@V+@~!fkel!u^qT@L~IZv$f=u$*F8FPR1%c#WACBL>*MexwvI>3wJ>1u1^^PC-= z-#^W8uxID))$w+2fZf%bcgA4?PXP?Cww}U2y!KeULiE3W?$7AFY0Hu5pq()}B;{7W z(XJkcwZc7g@T&Dl{&6&DR|q-hS^c}$)_8o`&M)nt+iMwcVCjK&LaB3KIDAx2>E1nl zj8RuwmJYMbNc*(>yz&c?2a6Ldkz)cRdn3;%E*0h*OuTUZY<q~M} zZApRfZU!y}S~zSzUS#NePN0RTn_x?B>FJ3&KbKpQu5V?$VEY`0o9U8XA zF@)<=?6V)a>Wjc^z78BZG`P7n*^2Du(hfrz`hL~=YWqB=-@}aJmoK*KF&uXG!uIIs zL`E@N?|=5AKV6;u@@JjXv1|1~hp6Ip94Dg()eq+sa4zi2sI@mIf?XkBeD%xtfmAyk zg2#XyoXClI1_voPpifc%`#0Cm<^-=qms^q-v{BWw){rMiTU(MH8M3eBz?n5ZQswZr zHp6Q&z!=EA7;eHzYaNI97ezULR7$)|ldI?H9J`Z89MI`pna`5RP8~a5o7VVVZ9RZz zciNFbrq}P`_1%*@T@Z@Hea8t}Q`R*H$it5Z4|A|53)l_7fleN&EDwtas!3m4Ual zf^pt14(pthc95#y(+_{i_1b6Lf>VPF@x86tw*(V%1~QCoiPo(j9F-%pSH{|5j@VR0 zG&A15^T9{e8Vs95^ROI5z8ELb-;$DINjqY`URKVo8|#AkWTd2zED`X{LH|5^qZBv} z=mn>@-}|ukE=8}>J+$`42gt9(uX}#(^10Tk;87nrZRscEPB7RRKb6`BJwAO@KIvqq z<0n6gN0qWGdChe*9O5|H-I%RW_%AqadEV|-K_0;jFCqibPtl3PrltBIe_3CxCyP9g zY#LsZ>)*;jM~)}wkc$|WIeg(i93j8a3wGz|v|j5wqxb|G^c-Hi%3%vO-bLr|i6anC zI(h2dbVYK#-E<4jabT!?`uB2HO@8{Gk)h!uXUp9jst2NDbKwA0MtRZPXkt`>!UrCMo1Q|3ps(8@VA=(P?xH`2FC2a(CHO!eqEmBn z9(?O?_!o{h<30A%a8c^8p8oXOq#u}jbt*&(Un6xzli+o_7WqB(P5;%lHs-nCxaay+-GH#XLjjnlYyF>kt8d1s%4wlmdp`GO{2sVh z>rLQ{bLq(f-?iqZ;^|;FDmQIvR$WttI{M&RFUQ_e-R5HYG6j3w-(d&|rDJH)hH_q< zcNOnjXt2zsW)xa`*x&iO)~UJMoX^)3yJFOK!s_lSDNtj5}0>d@bS6;tw}d%81&nESmL6CRi@50+;Y8CWYn zeO!7!ZLilc^{9I(J4VvrsxIS~``O=#z^?A8e)-k>g2(z?s$+qK-<~Xmb)svmLA`kGW)V!cSO49A{hzO{r6~UX=U;ZX z#NE~Z{pX*KkZfV!w?~Am?d<)uNH2KYe6&DP$Sf$&GM3$Yop8~38O+>&`dWK^ryOKh z-_e5fyna7Saa#;>whSj&B*6Z`k0_xq~=gh&5--};{JOl0hO6g445G{+K=Au1(&Xt`M~NQ{k?yb5h4X)*V%IKUXBGX^{UStFtl)xjB%;HQ(Wg@zi{vQ z?V_XLVDxaQt>28pU`e7zL$AQ~@09(>`kzkp*)xq9R={^jX2Bg8KXOv+D2LWTq>4&bLKAIhEg%+br{`85wky*p7$@sX!+uQ^=$nnF?E;1zlT zgW)4}iNV^IW)a8?mSa4Cr^~DLHRY?iUEW&^(8(=N7hK1uC@D2L@4_qFvl+>@Ze5}~ z;gT`x#1i>L9<=u3dnq}=WZTR=!)3;c<`At>E~QA?St^^`MU}dYDULPDsC7$!$3YVx z5S5Jn7_M&IxRpX)stvG@)}(KuFpEZKaJ^mmTRFt8GN#rZc@|!|jODHHgK^#ZjTc#O z`rqMSyViJ8e7(Nn-*D`9>CqS;4jnkW>L{+k8FVWOi{X56Qgk3UgI>nhxs06|2#BHw0XU^f+(vCa^bg~Hpyqzu5pKWco&(-qA zj0vC-n$ulDCUnCy?dWA z0Uf{h?t9VI*439^e!2SSd%v~X9!$5k-fYjHWk=fQKl$kxc#a-BF-AanZ0C^A)(Z{> zPO;+|#I9bu)B$f>;+y5(jz3+^82x632}bFYr{0^kpLJfzBTl|{JlrUP^6lCYK!fkl zJ*+8=QRV>-<8Ic#)yCF@XxmzbhRqcijJkp?H9m$)PFl2rCt7Ou#e-hIDCJgbqTIad zcOU+3Dn*ps!}8fqI*saJjDn}|S0#~mYmLa(a@^KOQGU)Mz)u(1i2SEN;YV{wX0~(X z+u!~5?!R7rS^64jfVRfR@7}t;dXnQ-&g1PFl{h^QWrW8A^lM9Vj)tU9`SFi`IBg&5 zU2WI3#_sUyWjC7nV~p?i@R*4VZeZ)_Oq2%?>z3UG)`6Qhu0|Wz#_8j12rv`)H-Rw0 zlkw0YhS>a~g*E=}OP_J}t1of{#@Bl9^Do1%&PmzX!F}CY{opr#H$L;agY(YJj*y!f z6u(`>?YLm0Z zSjZpX`m(5T@`2|YUj@71hsHN=W)yGmn}$Rd-KSWUI&vA_vSo<1J{aPz8gIX3?r|SQLGIP|9lY7ecCP#bOP@fh8aA~^Sutk ze0y?5L4j{nYy(W(4klW$Q@%&Q9XNx$xo5a+sq9{CfhqFpj2Q9dr_o$AcNj zceTdADeH^B_gY8k6=S5%X?y5kPE7J(vbh6Ft?AaaorgQDxjF@I9nS&a90&5|gZpq- zK!HP2leNNw33doBx0{;@f(XU!{yDh+@W@M?!k7Wr3r6ESAS-bII!KyK_OM88yUwKd z`Sy2z&~6mzxia=gC3xDS+sP5Z9lq0>?lg`UdzW1scfwHzcfR-DhaG1BN#`4!t~~*B z?G~tE^ZYvghd*HETQUUi+|qeY;o1Gx8xCq?a+y>3BsDBqXGgFgf0328rN_d(+sQSD z>YoE?9nMXE38(`PK?DZ7JGhHh2WLD|8m*f-0N~((sfE?Nx9TIexU@8FELmO1N&P zz@BuMfye88MZ!B|w}XFkqzaCKC)N!{b8;-cH;%;M0VXfPRhq$ctohM9t5$&PvsI}M z03x6!F3xAS=6(~U;D(~sR59r_`Y`2|3M$$=m0MqAy{cMFcnoS&SI=t}0%~i$T{U@8 zk+q)8Am&>XgE@|(H-<-rmXfNNdiqmk%V!;&2kU=((!He*wdC2f-E&3yrJq_i%5F^` zW}rU7F_siDBxJn(S( zJCXR^w*?2I>-9B7Th}yH`NlLKW8^c4%8$XQK`qXU%7pOIQjcq^DVR^q8IZZaW-c(?z!)m&y7zD1D7h8aj0l$WSO@a zm;TpB1m#8Wo;tuV*iKgxK0S+6`n}N7?4VFoTiP_sSdDF7($^#I;$YX-IS z`*jNbQV-)y2*6CIU=2qo~ zjyztQL;{&hiNyhC;wARMjXYWj}`aX20k8ABHY37zHdOkD)_v(M;!1_iD z*zcF?`?HMXyDo_kimAtdC-Nb|1?>^SDHeSF=Tky1Wi*!(!29=GbS6r$wv=-k{p$p# zWt-L#9W1TN^YZvPxbtUVQ69$k-zs%Phuv5pCI>SEM15tb zy;wSf11UXkAL&4?Q%72~Z>~Q5>SF8wUuvNYr?%IBLY7gdCy$`BA>2BD<;Lp2`uo2# zir>Hc6mFx3qo|W?+ zp;{S+`KcVNqa$VgFrdL-f)>H>{JHZpR(-qI*2cRTRp1LliTulGlp&RY|5XCQofKJ- z6QYrJc9_t>yLz7G#m8kwQElE|=lBpw_UL6o!5V$c7zfVv=ULI(8GRXYDe_M;pvd>h z$d`F)#xMpPgKJwpS&9fn30zG9p^_jBwi6N2yVrBB_zcz}qx5}nvO3_6=t}f*CqZ<7Mk#+!pZPk0 z-62j1{qYj`GILOW3Bb=f=ydX?Ch$@I)!&fEuz_ZF?Jk-+y3tRmO^js(=laNKL9st~ z;lc>K`%|7ypZ;aS^S;&lr`})P-hX$LEeaQ7*cV@XKI;#~(Y)-+*ukJ^8>n{iiZ?SP z`Tj-@7j2sJb18ZR-yI3u@~Vn7-%_Mh#d}@~j|5>xWCs8qP9b`pf(bShNIM`Hp%?*d z@n&Eo6NpM92tN)D)0`G6V6|d$w&R8 zxV?U~IFS~-&oC9;&YBRs$rJi3MTq&TchmJrW7fL$LVotp>knkOzFK5boge-tnw+E1{5WsJTu~Gm%NZp(&*0_#_$ht_2IM40L=F+S=}=i~ z#YFe!I1iFz1U`81jqvR8>hVom*V|fO&-Z0OqO6KEb+9ev+Z9OaJ7duz=UUsIJ!N<^ z4=IeTm3GI>{A7&Un?s9odF|?rV7YhoUNV4aGmcBpVFbiS&_6?jJhiX4k!9i~@swtK z)-=w93%zrx^H!ut_^t1Me~cIByJ!9xtic}6>XT?*Jdac3MN#9EXSP01YR&GsGK~0EF<8K`D;G$dC_i5bbRLz zL|b-c?c5X3PBuY{_#j^7TmYi0XyT_kY)Q{pX#8WKf0>+gV|DD?trJj4Ka!|-ygxu0|=8HDY~UMHSCpWGJvq;>uSG|4c$G&)*+`PjZqkj&0 zQN)Y^c;#D1+LE53PF`&WPxy%zh|~Cju zH4*=QGY9UCV9!|kYrp%yUycS_wy>^|A96WGr9SJvHgox^O(F?vl`^%w;6DLj%uYR0<0H*k@C~Q3cH-GI1 z8BM>+xz(=K*0!UEb3(`e(ca5w2fxLq;rc{P#(#WPKit)x@6xE?uY*Su6<+ES#-(s% z4zBK@cOLem{IUjcrdEH3K5GV9I8YA_`{3S0mN&oTX>z7@dRIn%sU7Z?CgS7oe1Fmr zp8xtx_j8DtENe@!;J`Fi0UxuDauhcQzN2wBz=x?O3!3JjJ*(40eAEVKJC3 z@!qW)MER~(7tWm>htK|Y8=?6JMQBT%^g7x6%Tll$=srX7`Z}i%cZ=Z0cdc9KV$zk? zClR$|bkVu~l0V6e_e*~#zr6!`FJ3sCQT>e!^zGD(zgW|5f^l-otsA8uNtVIC7%b5? zx%5znQBI!j;DXc_ttgm{-$tXL(W8NlicrbCrjRFjk9)&UZ#t1IK7qO6Tc^O z-YXzu0v@(xV3s24R%2Ut<_J!Z*Lote<0ta%Tg%DG9NbcsaWFi2m4UYP z8XZ~hA=&dMi_VJH-g)o6oX7Fz4C{wlC#1i!+kqTtoaoDEvhtjflEIk#Ke}Rc!(r>% z9n7}H>+LKMgn|!~y|E_|{Pg|XcjGV3HJs%Tx*fm6*U^L>e&BUK{KDLJ?JiYeYlyU7 zjP}yQ*aZr|o@Cg+dGp$0VDG(NFG%EQ7^03%ou%>lM2tH#?NxKTanqsTSo*Y_q{n`6 zWO$5qaZ~mSJMze*4|ANrZw7I>(}$0u>E>rs&$TP^{$)n}$#>tJ9jg4~y$_F);z!21 zeCa~E!J%mU;%{iOstQ^O5IkZ9N+PF2x896!m%bG}j zz(+WoHse9f#7rcpPF}1k8ADagn_z;~x*=nXg}OBcDWV}tTf8-x1mQ=WsudaN@d!Rm z&aArK4{|9dMAa^BPbVs9R{c?bj|ofbQN{YOR;%7j{mMh8x$4jQBD380wrA^oRN>Mw zeVfm!)JFg2o&Hb|m)h5UuiaBpqh6W5EU!%Y^}D|sU;Uc5{neyjuxajl-}m~1d+Srg zGkuu4=V@Tk_sV!LhCQAH?}58NGj6RcZ84xOQxZH8S7lr?2lK(JmC;^{=R9wbam|=~ zY%Wx84(IdynFrld%B2o?0RRJuxl7T(X{Ha;w`E>u4%IVtF7J=AH{*%AjBlx>LSR2I zTAM2aojzA|D)xTmb!BZVwdiKw!N>=7^m|@i!>tH`6X#dG9VS$P?oS_#+dEzdoB6T^ z!(cS#QI#%0Tb}#c8!fwQZNge`Raax1PE-gEw#X7ZC#M*gjR4wTT^RgF8`YJUVxfgz zi_qb(8m62+_s`(Hqqp;}PrfG{Ojl~xJEH3*s>!$hd-t_DnlIk1cwJ*W8>O{=gT3!6 z8d#W{8Ak=d#q&N6aEG?nU^>+Xe`V)?H?%*sthMQL?Q3`GVHHi?Ykl)#KT0o!`s7yE zvS{=-yjI$LP|}9?O!aF8y`qnt20b0)5?%)8;M=o-?^^MJx%$8soq4CuP+;!!tO1Gb8i)yKT+9)`27m7rToRb5c2hfOi)xygpv#=Kl{8&_8};WMh~SJdJh*b_(dvKw z^Pjh1ZeIP3?|!`ctxrB#{fmG9CoQ_#*qBm#b@joqL#w~}@jI)(I(>2Vi;Qe1I>hr( zTkx;k5cLWU%j~uJsJ4T9oL4%>+0G?4N&rx}3RpTA*4G>m+tIN>!j?Se|IS z=6q%ib&9&=1^b}?)c zT;YYSaZ=UP{VD5pSn48}8<%-=;3!^5z^CwNOmxNwC%pmwwtW*+EV_(e=4LToW;E)v zcLy|;UueJiNPq&1!iz@{y3%4S!a_Zz6kdJN;=O;jX!>2N%Q;9yU{Gdx-{n8lj{d?q z2YMk{is#^cLI~ro{xd2MeBfcc4}KWhC@u*R^1v=mtKbuyUHGm;j9vOII(1$eq6~VC z>1omA&#Q9`>$MA}A`55T=$mb<|KmUVpH~+~5nT+~-EM9>q~}P^p1N_n^f<3y6g`)K zcE1B2T?~k}fr?r>cj4;{OBXU|+?g~Xvp$-r;PxPc2w`P0+A;;Ej3%s5HbiFcNue;u z+7wCnG=~l&gLDWYQ{gIw&2#vtT|yB<4m-iCNVgS<@ zhFCN#$F(RGLZ~gc6v$f%#bcyM!I36{v5KJG z2jA178*uP4RmlMWC*g6EwymO+f z1MKiUYZqgp`I*S!@afpm4h7AiBA5Hnclfgrkr8*ITV-E#uO{Ywyc=BQ9QJ%)G)LLE zlJV_E{i8ffy<(o=^}@G`+-zKM+BmlrZ9PVh>RdN?Pt;cZ9-~RoNQ`kCH-sm3#euZS zFL+f-rVKae0L={U)<<}Z-_MywjeVl~y0^~TaOQO!nt2PCL^@6TW;Oh^Zs?~q5kJ^p zC45tiD`b{oz5A4^SRq)ta!MF?fc)YUcdK{F&h5`0j(fJGk93LX`;MI|o zMFwk!yD8Q|4+)Xe+AWOun`v?Glh$C5m^3?gWp4DliZ!rt8nYBwWh<@fAa zc(vT%(stle@{%)}b2u=XA3jtHjH13p`(G?Nn6iJmgF&T?;ZUQ58_V^K0^j}Q+ZmMZ ztdC zN$`1?QQ~>?i-hjD|NGx4f*PEae`5nA0AQ zv&)EF@TfWe*7trRxNV;U($E0s#fD@p5*?f!+DH&#ynt&W^tl~Q!?9=x&&6+@ zf#bXf>1!s^zVEMJ7l08B3)a9J!G@6(-Hu#dYxuqNRa@lv4c6uaPBM<`1LGeWbBW%Z zT+%&fIKh|U_QvM>crdvZjQ8g_;Bduz_2;Lb{XFB(ek9er%6FZyIyn;#m>;;}(ubda z{`rhuURx363~Tr}7)u?t7y*+>8!w*80kSni%ZAR&05ZB)tLMW10JZTs>%irJ$)j%_ zj~?GFO7vFpQjwRhN?m46h~_7%wYg_N9Ggn-F!a6d>>IU`4PLdzkewgg2_Mg#UcLXp zhs{~E*gSw69%8rG<&5RhX4tJk9|1>Wcp5H>gar&nD}S96VjV(zaB4By<(#N5Xh=!} z{AYWHLyiSO82g%!Ij|Q0im%C~jgA;w>+m>vMh}gZjULorn#H9n=c3zvt>>Jb9Y8+2 z?UMKE=l$Rd){_<^(7_w)iIlx|B8k;njN{$Q*<;-y%Wkjj(G7y53onJ2aLhWtICIiN zn%}xG^Ry{Ff?WS~Ftn~5e(PjwREELkd~dR{-C$%2Mn~ryJieUKto7E#Y5usNlsU_# zceOe>+UvWqlIs_FaQm{mGCVZD&J{4%)(3clXILjL6`g%Q9)ixSPaOQB@2wvq{tvYy z30y=YI|TK{wR6c~*Q%rRfNvfjzLey*+8kUb*de}*M##Qf%U_Pq!wZKf;^6>yxI+!k zb!a4=W5%3Z*_WA{vtND@PbfW7G_cW5;b3sT_0yq;WQf=BFt`Gw2xom$Mmut^gPZkf zZ!o-G+Bh-~dB{%C$K>c>bSMYi$+zE|e(h>qUk=0vcwgl=%@ay%`bI=m$uBtze+ zJ}I3}WL!UX;@#Hl(jL_pvNt#Qoz{aL@yc!SxdR6d&RFl0ubU)11&WZWIS>WAaBPYs zm&Q$$`RpzVR(Q|Dg4ZS`RWt~e_n+NMkIj%Djgw7}WS|%5#KGarDZ1~y+TuinlVleT zK{5i_8Q=cFujLGi#zMWBd+V;b5!~ePN@p68>+h7NWKyq$19%ad;iz}^%4fg$+2T-W zEDkw!M$Yiof<2c^O@wz`}!R~NI+Xus=Yps=>?f34|SChV?>j%xPbce@|o*Eiq zgdWFbcw%RkeDV)kS2^g>95$)D(5@+pV1iza8Fvj`b zk*}Vtf`4952iRSTGx!SDWH;$m@CLd)M}eJ0f-X2~uO!E#Y2$q}xJcErWADz9?>UA! zS+0j4hjVOk%>VRfKV2ON9!*9vAREV$cO2exgjuKWw*!P;U@hL8+ykD^(*?ui7;FCq zd;06-*x@=2!r;f49brRu%+=CK*A$u-P?vh^QXb^sonWb#JZj z^HM;`{lD}&`S-vgYpjAYlds4q#>&^@|6aCfd zy09{13BZeatMWQD&kamt{#vd*TZH*_fqp5gPoDbecYPcKCo9@q-3JS=FTWP}n3Jhn z8*87nyVl0i_5!`79%ZJIzE;lMxvFz^Hmu>D_7@}CJgPuA51;!hJ<7bIzg4CGj8o%W zm`Cst?)Qfwxqs$kigxpLhOG?|obP^S0{xw9W~=+v4bD|HWsC<-qK)Qdxm#V~3$AIr zX4>UlgbioS$K>OmIp`Z0E`@q;q2qCusJ{n`F}*s%(V4F?kXgFwMSV}~?Z=4Bk(z41 ze)+Vt>*>;fCZ}KOQeyrG?+OqE1~b7>-{&Qt^{;=%>Y4jX$GW?`GjPBUmXd~ARlNgm zz0mi;rRi_a1_qkY&eHb^EMntE(!O_vhnUIS*z4J}j@d*iCtL`RT>^;}av{ z6s$eSMoF#@=JsLtMNqtcy$CH9%&%``#Nnwg zr`b=x{AKl~Y`4%5axKhvi+bIiz_Bmo>FWg4LwU3xA6k|AWr=#W_{@;|#kfI1l&9`e z0QkmHWIrH7PjAgvye3O>m@O8{V88Jy}N z95VMWd-+v#h9~vEtNPGm^F0Q|Xi?;?`euFUFGUWzFcgE0`2q8-!Jom!0ZZmfo3mw$ z&|G8QAHltM2Cdav`!BWMI2fD=I8PE-38pUf5j-43$>0Q?VIif`mYd~vk1_Dm{8{P-6m$ex{5-#v2xDVzP zY+Pf=>Y0)L*R?Osvn~XVUm*kBt?n= zg#!AdNK-ftC)7zfz*Y!U3~X|4A4-_Hdj0wsa(8BMVfea}U^l4RXY+rr!+QwfjNywx zIlfpuHy!HPcpZG#fMbV@(vK1{MX2qZ2u(O6vgBsbIMzq_Oi7YnV_Ws133b3l5u|6% zoSpF)r|td(T(BJlZeR8HYNKWG(SXb7?u>CR`e%V4_OM^pk$=eYo`ut|bentZ@ z8+vM8$~+kD1iu_7i?SKqin^lQ!FTlPz$HIui<0>~r-(X^mCE7br7NpH`SU-i{~Kq{ z-tJ(z<8Qw`G=o1j!$k*m()=h3ui85L)kLpQ2GI}Ymhy<^C{DJ5Glo#uNAYV6Q6S9# z06+jqL_t&zl@VoXtYGPoymg0x=ve|aW^y2d2}Att8%4oTk{Z9rLYMxUfkreqOCcUQ zZeFDQ;do$}qs(sK21lb$G#Xzp*OcLX2il?=K5cAXM5mz@0zU+pNZ6DuDIy5|c#s^@ z<_d0$%w+^IPV~swbFoA5q~3sobNH{0h8NA=^i{t$H7@BWrhZmD4@GVN@?#4@D z*%pqSJ^yt)s7Ug1z>4Z|2;K0msVN03#nPgH9^uE%4lv`Cfd{YflxJkTCcEzxou+=q z)syLsxd(fFF|X^6E_5bE(xnVF_v;G-2wJJ6(L{r-7mat6cR1BOTa+m*b1-a1AZrgN zD2{JSIweVDk^a=HV3i@Vx~-GD&|iHNnGbG^>ga}%LF%hWVA|v45+!8b1vNMj7EX)$ zI$9n|bMR>fwRhirclgQdNa>v`*YC}Nl6Gp!-;J+vv`}j0#J-k6@&RQlSX078vzq_2 z=gzJUW_+Mj2B@|^7xnnOEyx+E9iRuN@szt6tr&3h%X(3>>8#ZJdY3;IZ4BxJAXy|!!3a;kzi(h8=i@!_1!vV2Voz3^Pl<2R{ou6F~=J-Sh zmcgM**KUmCgma-5sv~}zlXH8!o$xM>7;7IxA3A`ydm1OaV9aF92Llmzc=^Le<)@8y z7M&xy=&%^kz6X8 zoOdbiB4HWC@pOC^E;69YJw9{MdyIVB!kcZ;{*}w;GDI|&;VAgnL5OcoZr}C}I&jar zXq&kE4hMZQ&XSxOt%D2_M^Am&{eAr~7Gxy3bbfXIi=P(t&VU)6hwBa%d@IBErSo59 z^uJXk`0;q-o(x*&8q%iK(UT{Kzda5Q8Pyr$I2k#39%h7lUJl(8CyviSb^D?V4#I6a zY-8V78j|IpxCkv6!r$g6cv<)G98s(S*BowVejS`=w?tDK{zZG?0seqqH%E_BC)mw~ zhRLN2&{ByQo4$%V-yJ^M-Q{q@&Ha@lo)L)g&jnxPutg)ga)`i1X|J|c&Mpiz)&H40 zhJP}p9LD&KbXn{Dy?F6*IG95`ehxpS5#kWA#^9R{_@qBcy@F5L(PM`~%%Sx<<6-kE z3Vv6{91c{unc=7R?E-`!%Y_&J^_tD0u{nd0XjFV*r+x>Ax5HyQFv%tixFcg4NI!Rz zK?Dn+d-FHxOybG(Xlwc$3OzQ6&0|dwz?A^ky9OSxzsvxBv?q#h^D{> zkCgxST4{N#Kl;k}yd%8CcNmGtN9ceQNQdF%L*LT-*fDZHBVowVoG&@w|Jrubn$TP? zGCM;lT8U095r zE4w7%Djow@UcAf^T0IWkoyg|q6RwgOtVdVQpP!usc$Y|eX*ukmwey6m zfhWIN$_aG|P7(NEy%2Qq?78%4oehvPOM7@W_-@Uah}Uk9hO9q!4cHw(M{|D9BRssm z+Cj5vo3mbSOms5C@HpXe2AUgdIEOYKGR~=R0t{Vu3RKvU!{y{V-w2+i)Qdke+>^t| zOVUzUTS{UxYrH^+XUQ>l9AFsl5}_?%k4&tc1Do5Wm(g6%$d+JvGgv3O&3?)>R+=hdX>Thit*=HHtXI2;txQ<(#0qzwAthCMYw>D^UGyaY0*`RdxisW1k=5`*|2a3wlu~<2KLUU4ti$)VZ)p9FKG5d9 z&UF~)Zu7^GZq42r%+T=!($p9C>0xk>jN|}xvde)33k^L8w$@>?qhMCqr>jmi#ax|yZR^B`>y!EOMRSibTbE>;u~m|JR|~rYkK6Z z(revFRxuWIcdzJaW3_AX_{n#hk8fl|zq~qm>|{>1tHJL6IKO6$;XJ)@Z)=p)U1;rE z5$7CbcBXy*w|-~V9I`Zf2mZ_GEvj6A3f$We&!FFNPT(OHSm~_vN9ro=XoKzu(|6`H zB^x+jWCC^!wC7Q9b#x9yybh1ro|BRTid;tSJyMD>yNaGZFF4}%_0roUU)83=P6ZyA zC$z#LZT->K#&Fr%%Nb-{L;IT{3r5AXRCntzGz7dpH@RbYY!LprmcO$K4x-HX?4{tc_M}Z<_dd*%;hV zMfFi109`O=5A&(-br(zibKc*gpxa@~t?7Q|An@4;s-Rn;}`Pw^&j z=>zY+o1%Odg}IrZSEil{k5O^nRG;@5Df$~>j&%F0fq4x~*PbanM$c~7-i)`8kgF%v zv;3X;1@{mT$hF>b(r+J@g?xca-+RYD4Kp$`&M7@_f&F~+YyExwugp3eD&YH4$MSjU zaCc@-E9<#IEdTr42lq6ks)?XqKKFaB>F>Z{4Tf{Kw!K?l#?T%^8Q7WUsT)pIesIk# z4fFu6jq0gPO~JqZqw2<^2Lr0+WWlYq*M~m!MZ;@9 z)#;yi#>uhBFl#mI5PF*$moW@Ny1!i01N0qkX&ycq$l#eK)ZhE_qZJ*~FU;A1TV&@_ z#`E4ER#F8EUs}3K9o7Iy&)D>E=6U(6?dieHg@)Jev@&aHV5e_x`=x_3j76{-e5^h~ z%=QTj>31EDGV$&8yYMG$il+Cg)+m?mf^ zD$IglA$*!~$>Kc`r!5rPnzRxCGV>HB*B^CIAJ z-~7!Vtac<2{r(R>6~acnImJ$(>owA)!1Lh3A=5$MAc~HHt2;$XQ??>eAQ+` z1-xK@N6!Q;#z9e$40~XM2i1)+g6pgI^nZQ5P{%mRW_|J;PBmWTf`h|H2|Vx~OeWVi zeq|!6*nw*o5|(^6muN&JrrfuL#p^fQ-q|zJ)cQnNhWF|c^+{j>Uy6^2LBb~EKjnxq z^OKLiG21U0bVhMX2{KlfxuvM8m$2{qr0nVO@R8~jVM2IfJST`rjl#&O{_QEvXoiuK z(H-mv>2P@FJ6I^=9vBY%qdmJfMC6IExRpU>B9$6<1D(DMJ@(ywaNgEfMX_#chX>(* zg#DhqcqM&6yd(O@9|&gP?}9hhoq=iJMX50U$%lL|!+}%X@eGQMEw?(zphM87zz`k@ zo#uKCTmyH{!P=PvlXTQnIR>Y}myHSb!45BYR=v|-aBoa-YEhnFtloe3?Z$s= z-tkwI5kuV7&28y?1P|JNTR#{d-~#?7`txa#jiOa<2aC7gezyaN&JP_dhcSkm@O1(B zL?A7P-DPaMQ*OoP6F*qS7+&@t9JX%50fJXTxoeDVu_bUkSX7C~by&lw%6M&V&A3E z<$a0#Y<`2uVmMf$?4&pu!M(ZSz&X;^YCLs&hG6{|#XMP(;zQY!I&EvtF?f&X>Myxb zUyYB^&EcH3oZi1*PISiEct6y#=7AwRI?}qZg)-T{O?I5z~(Cy%WZxk)wc%6^H5q2PFmpZ-6K`@S# z@a5%`Mp|{|0gv#`m%sewe7}>zbUl3Lq=fShuzfS7T&`{PiwNHxKasck%ZM-7gM02IcSRHHrZC8I7FT7jNE+{>B4 zxg!eH+Q@)`2Lsr_jADlm9$cNPUWUg%`J+Dw{x4S_eDsZsPolMJC!m^7Jl2>PBDGDH zQy*iu`oK^bI0tsx-rIG@E69O8EN`j@|2QzmT#7=cCgqXR}@hKnYzx{CyD?%}mL zeUU+5zJEp%Iq&gaMpO<+{UD8vA=wz4e+KnUWS{yW6@&3}f=MMbxRH?wy^s+ED?lFo zXTWEyqPO?8d*N%mhq2z){EJ2TIv;@n%Wf5i7?RKBFJ9J|*3s6oXW_85d-C@;KJo*c z;}Btly<4Or9(3{2g~qsHb*2<5WDaWFSme5O8AVWm$Y$#mIhuSok-K&G zLA>z7`Llz&+rrBu$4^8{?WoI%Ao5l!j^UNzHeT)r!0tt_9DHcpS}+H-VwCkknl*B! zuVj2Mw6-uL`wS1RwUg2qcSgfeRnD&XN8{jhvXfEDp-1g7coN-86|%Dw7|v(_d*hOB zX;0C*@6T49mqHbme+T(N3V@@1<(HWcfK{_`ttLiXW+b5 zj`UN(`rz=4XU}qc7TJ5NA2cR<+3r%t*$ZdSl;){j8X5D^E8G*64zEV%s5~4H?YyUc zS?}-X4A_$cgq}B!f}rH!Y5D}C?;_*2^RRRyqMOkbS@^-DdzFiSoc${K^!gZr^%1S? zJ#a8MW^8Vaz#q*wIR`H9h&RAxxIaeuq3cCHeAqflP9q25bJh&waG2!7`?(X6o22d| z>kG<|Cwt_p)-Ce`SIKqzN)fa@+`84F$@QH9`Cw^M-hAuG@OkTjT^g6;WwX;Dgs?Wh{oZ?}*f|k?G>&is4#5@9 z9p@*J4>qO8y;-2r(HyeU;Nbrpw%T$qCpe!v_0I5ZYk(9~%K_~8DJNS-@=}Ivi>CI) zYpoS{_2@yVzIvy)(=!M8HH?BBtz8YC)hF93C?FJ@1`>;?|N< z1@nu6hvf|99w3Ie&y<;u4RDmlxgElWbPl~*n5>$r%R*Bd7Mx-1#^AfI&V^ZE`u*({ z^)X^B?bNUJtM?|VuJ_cV4R-4>_^AsA>|QriXPWxleeEgR^Tg6_fN6?{wn6|oFqeYe zu*5f}G17tSz)*wrt@&63e(%4KW)($DIuN6Q1B^xLF1 z?j8R_Lh?N5If;bp03ZccfyF!%bllpi?tQQZTl`E2an2v=3xli;9$kMqmTxi@|6eq!+WJ`a9`0*TH&r{oeQFN+JuTLU=}L65HsZB%JuO;$ zf!Q*?c`uw|uxpl<`JHZ7cIL2Jt8C!em|fHEz(`daSD$@-ZuM_|{8y_}2luc3lOKG4 zwW~#*fkP3|eztGw8zNuuw)*D%`dl9=0zdog^VQ>x<&eEuL{oFd_yEQ)ii}ZIMDn)0 zoZo!^oz>63JX2i^na>iuudhD+;_T{;6sUKPADhUnD=FZVZ~Q~#21C@qxb~A>7C6Po zDQd1U_F_O?7Fh$7W7fibE5pQp|9}4L)#=MOM)>i}g7eNXg2|@UFHWDCg$RU9jmUQ} zpE<^h8aT5n!IFZF7O&sD-L|E@Da&owPq9*HTZ_EDCi z_8(5*m5e8EEZ59H0} z-Ih{>K*pHBpgCI*>*FYa$piBKF9?(c{*!vB74deE#g|6jEM|Y>e45| z_JDMapHtM_nC+;2epQZM@LS5S^}l&y@LLy{(IUlh!Dr0`r(iW>uZohdVU+f{%+q<=&(pfVYe(=EJ<0 z545vs)1r_ugzwr_KkF+8z^eq}5u}6Ro}6@t4lX&tMX{)ZA@O=n2yLUcR}*>6Se1hz zC4muZ(z)P!DFGrxtUHVo$1>>t{PQp7Ff!4`d*e?IL3;A^tJRsWudm*DTVB@=%qub; zAAOi``no#M)VDwR_CzNB^v;hu)a}^J+ZgxB48a`^s8b{fWc1w}*!GJzZS7zv!mM}n z9W78!MpkSbgkf~Jw=s`nC}k5Za;BLd#y@x|@{$rNvL63_R?4FFx)wz#ye*AVV9$uq zHr#MS8j9U}qx|YTn8J7C#+8()oL|xN%V)P!K97z9gw7bx-~ngEhVoU)@9c*{HgZYz zioUs#lFRrjC4+G=q}8pQVb#(53$Lvg-P8YV@h)SR-`WlY_-41lgSH8y8>w2*->Zj3 z_rS;I?@73VcI4Bi(78rwX>N9uN&_*XPq1Ei8$(C+2k+tQ{T;)6QE@}K@eX*ll!q&= zZ|x+IR?A%7PdVjGn&=?_RB!WZZDaJQ+29k;>KS7+Cl=mtFJlG%Z+ye$E6Bjn#KaZR zUArA9TfL9pi@qCN4L4uBSQKG&ej`N*-`BLX4Wi0!x0U&+bPL@z->z%dZ+3WYhd}O2 zS+6ds2zDJvnGL_pIbQ7D-CH<&7Gp6dI0MM=&fp5~Y&?X)( zT36q}O=^mXo@%mB9e-=}!Q00&5PV&WjlYiL!`4xGlLU8#Ys}NrEik=`AIxLbl=usv~{&VdSRqKlx%dPbzecL zqQsT!zXLPP4g8gt)*8n^P|W1QCmU0pYF>)$Y~1Eh%B>l2FbAUzI~rSXLl|0LLQ9E<@W-YJ^zVFprsW-&~1AUs-q^v!RtzWn^tV6$4C`rwn9 zXU@878!yMpn%m&CJ9%QSNY*;dfY+cp1oX)2%wisv^(_ZchK=QOM&$#gj0pEa^abDJ88OeniN$cW z&_!f4#yDH>Z{6g0Ek#{F=34YNXBqy8wzu{f-*8!@(Is9+&SEU)P!g4m_VDO?8G*n} zG~|QUkeA^Z;)0{2$4p&|9E7Kl1Ed5KjcRw^tKLPb+Hwx#>lu(YHy_p$ zhAQ*2CEEE}yCinR%i)|0U4qS%$JM_p=RyD_69E)Q*!9xUSQq!Fdsq{ld7#ec@$H?_ zD94Y3x8a(d5Buultvffy;SgaC?=T*`ouge_@N;9I2ZrGkFos3*+o7fC+1kWWLS7fS zyD4L(vR|CKnk1j< zlb`kA2C&3y$qKJGv_5pmsfbw)o&#?lo3tNfdF#-LBPZg6?U0BklA$+@Tor$c{_e#m z&;}d_XS+buJAoekwIiHySfu#E7TIo{a+W;}V<7za6i84B$HV7$fi%`FF;c_bg=`A6en_#s|& z;oRxjL4d~Xa&yq**Jr+(Igp#1F%iz1$GGAb5Ccva`o!WY3y)W(}ItA zc=PDV0);xOAtS1Gw}+>X!d>IBnRF&-!u&AA?$04` z^5lEb6{B@?N^WmVvr8w~T@F`gy{>%@DflvLT(|^R;D{dwHf~8**pfq1db!7~Cr^vW zX0#{sJ6qz7;EAOGc#SAX~i z|E_g{qjgD-#9lx)yLS1)%rmr`eSGsuintwC9s0LgzoJA*^#Va>>SS` zz>gn)d*=Pj>CQB&Z?l#Mix@`{^Ve73{N$6>lN^~M%HMzYgV}vXCgv2BLIy2~u-{;p zX77+E5i=gPD;e-92cw~kp}2+_z9!-q8w%0QIMi}cY)l$vuw0d`0mOrdr;a|4F_%Ze z{r)fS^t=!AZfz*Pe#8ACVA!Q|R*3)qH5GKDXGnDrtul<0i!rY97Fkl{^tV6W@7G$7 z`%-yrx;^*T3UaG$r}bmOFMX)(L7Jtq8JX`1X%vmYiS>r6FH2{2B>d>F z=fT05R1s_Cb)gX5a1zujNP;eK~| zN4Ew*`Z4V-ZuKq6dBx3cTdI$b}L~}!taAB=pjnDh_Wh$s0xYE5*^!qR)F$MJi zeL#Z0W9Z4f`nW!&LpPNlYOg+!na`E;d98`=sZ@XKpJ%>yacFpZo!;k#wYN-5jZJy) zTlD_@U;TXbdtY8zJ^Ar3S7$C?Tm932{P)WlozNN+e@XaBcu1<5I#n|`M99)i*TYV1 zzWFf4q5Ns<(bw;4WPa46jzd`|H0p(O93JK0VRA3<01$X+}_Nb3{<7h3Su+ ziz2fvd?Fsw%qk7q%UoJ$(@RDfb{GBoYb}uve{0*UHYPlnqb)rn5I)T)>O14hhKxXC z?CRdD*COoT-`d1##pmYGfqUz`c#KFrTO()kOHC&1kC7y0xDgTLd%3@B^H&jLMd4_S zqTIsOT}FGtEPj#T(Z3OBDnnp07C$2G93-hf?uf83hB2;}yU!SGg=0Ki`kyhUv4X4h zOU<@_9m$ADaE1fwBwS5OjQTEW)fRAk{Br3&?(=x3yx2C2Pq`Q!zynK0BJD+8nh$X1SQ#?t`@p73mNjA^GGq5{|L2`Cq=&QKH=aoqQB?j?rb25xMtQt| zv*keeaineg#(THO-^Eb72)@(z@QUIlDwE+?{}*H30@v9sky3Z8gSg&#|NV)C5S70- zrS5iy2u2!*+If#~N{E$b-W-}0G_++$2Gj0Ppa@}i(zxLII{wM2DxF88 zoi-`KJ5zYxd*}U9wj{jQhPiAU%cK4RM} zT-}<|+n?Z<6Dm4IhyJ2H2C8vZG`>5{x9z~=pfKjz9fNmm4likrhWFQwohSP1aJ|P* zN`Vp1$ZNeg7?*=G?_>Zqrw($Jw#54MI{Y@Lwx#bYr>613dr@do5nXDBiwHH&o~O?q zwti*A$~ZN~zKt0+?_VDS_>CfaDRb|>{Z8iwgCy}sYcAf7 zS8Ys*HC}ZOU+6vK6xF%)b-WDUsjS^4+T(B*X}-75_#DUJ^ZG*(mCi{H`#UN6=1sbr z5-XP;W?#7XYVv@>6MRg*VbN*Fj~`im|2yAV{qO(BznC*DD3=X=#?9EW;806beTNe} zG7wthz{MF3;Gz%kfYBE|Ez0eJ+l(5{HCVrv^bhC?|8?$&oiYp0L_4EH!Mtz~k7m4Q zd~|5;;SBun$C7!o$XW&q>kM8b_plTU@}1iqceT_M;3{%_&%W}8R}SnM47LR$hD7N( zTzgaK$%wCgkEP%>WIG2ya%S80VhzJ1_GbJ7ZxNy+*EL`IAd>m&rSlmqudLqv@LL(l z4riR%JqKwr7#ib5iwD!$br1|0;D&c48=y<;o5POnP7z7YU?wVibB@=B5)PEh_DxPs zGE=EO;tiXEubm#E`gg^rH>2eoV^@kWynOLOQJRN_enkw5^cA&ij)yiIBj@@3)*$qY z&i!zT;m;0Zp0oPQI0ir4K7j$wrN8!WfZpX-b!1AiNiSk(aG~_=UC|z$fqUtpCA=9JIai)&w$6 zgUH$A5Xs=ta}IX=41Ue|6~SYoKO3*HqAw|+93*{Xb#+qTi2B7B7~J5r2!7+gdGl(y z^1m8Gl(b9mkfGma^fFGK*13@p0<{QE`FtnkinS?2&a!sZr`n!1p!H~TV|$%!=+MBc zH?B>Yq2boZCy$+nlF>TYC{O2jk>mI4(_c)#@MZj2y^QPHmg=v;tsVzI*YKFW<53KP z;DK(}9ONBq+Y;T4&vM#4ZSLR^`X@)=eel(J9t@Ufh+$a-_oH^d zyz0BZcF5^Fqb+9)$=$WA&DLbj;&6+RuQ^c;U&M>GW$gH>IkPk1Re6{XzHxFQ^Q|!_ zPh|ki=_Gm{pW5ExmQt-;FM1h&0;k;@I3|+&!WTOLT4iTE+J2f*{^qso1BW9WO3uJJ zuuL`}d)O(;am7i7myp-=MRb4Q42N<|r#J4|&uJCihL>Omk9NON#C?6T{&ViR;G?u4 z9HR`&);TiE*U3U_1jThRvb1TE-2N@cz8`s;ZELxXCP;|IpRk&{L+J88wUi=TQ!^hYbUL~)9 zJ^I2^H^!^zW_G%vGY3*jl|w$_+@D=l_4~}}&sT3{L_Sy=7;7j4`PPiKBcGm0&Ps!MK;`jOpPvOJJ$l<~kK_}f9-7cKBw%PHwGudH1Djq#NNTx%h zqi<}E|A(_8+~Ko{vn#O5)6J4;t52P16U})pC6ak`c(t7bf*auMz2rRgns@WDzcuPn z`0=8-`>I^w&Kdwym%Q|H#b4_n>`#CC7ptHBQ8Pyfq!{*f_;Gv34fgS7#d%{hE^u>6tumh>;nZk}Y1*xr*Hw})r4XE9LQIpY0E ztF*N=YtiQ8=8l$i@WA28lTKbIYrc`Qi>xcShZFEYJZzlxcf+N2^f*M8T%ylxFQHht zu+SyG0w2g*=EHY#0eyyZjRTRaxhH(Plk87cl|J!}+Iv5o+~2MVdF-VX;{@2>=ew;@ zHDnu z8RM>58fLgY1XJ&-d;_(}t^c^?KH$}QpBrfJ{_0YvX1X^9+`cc@+?@L6yZdWMO-P>_ z)D6oq=l)Go9?XwBwU_a3>9cWE0we4HB1S|c-CB&3ZchzUe%k!in3pO&R@ZVZhDQY7 z%^B}JUt`pP4Tz@38TMM;6Q!&QO=Vb8d<={_)mTJ@jZ;D+^;OX+tGxDS9ayWNKZ{{q zCBXpPYYeQl?-gTG%YfBfx~oGlQDsv>AKmt=64UX@DeZx|T<-bakLKoH#nm^4p+3e4 zd|wQx&6l$4?@m4SL=km^ZJ*{xId|tzzhc$RBsz z!}ip&R$i&;X9Dbu76`)c>DWwK_a~>@a%;v}L6J?9hNaHTdnR`#*h5Wcd;*tgK>O~g z+~0m0zv^d9U=pmx`QjaABf@#Eo*2N;gICSbz_34xX?AI(@9uzee`5ZN14Nl=yua1u zT@a|RV<3hnIy(JaOdSNe*RSLrt~|+p>L)xa=KZb(f%>q_$?{NVrt>rB3r*<`x|)}L zS$_HeT!VLR5unx|Ht;Eime<-|uYZjA^HkH7@?4W>#W?!yUFGMm{`t@^U7KiY_l$pF zvAm%x)1l>MU)-6?f_E$-;oB!)u0A_^W%Xx8Jns0tf4ll`|H(gT;dx!8$Jy1lzx~}2 z6qB@O?DKv!r55c_k{lHIARIWbKM#Hj_;{%E$Wm-apv7Zba6bLwtH2W6d;ixy`R3}6 zfBcgKpCUcBzFz%<4CSAl|G!#b6GW?*ci3TABICx08w}iM5V4)q!Xd(e^5mT{FwXNW ztAF`tf4X{D`xK+EE}dO{d9j?zl>vvUtIr8r&GQ!>*l8hW1X`Xg3z)62Lp#l#`vhfq z-yEDMTEgH7QF5nGR0g;Q4+`wS-a^=TDV8EJC`)pH9@y9R@y2UWf106U9p|v9Wz3?I z3H2$>MVO3%H@ME6uHhe4&~`(eewaw*ExbdRbp*B$PF-%V({>DCDLd~o&P>A%RZrj! zur!AL5ylvF8sID?lzO=&A13rEFM>?J2(F2|30@3H!L?T&B$&I1=f)v5 z4gq;!I4^@sf?H*J4B!SkbE_;@2p&4`j4@xYresCeXg``H^@N+w3X0V5<(s4BJ*2V4vO>$#|eB#vSD#KRim}$#>tI z=rf03UP=kz#IP0E4<$n%9T!Vu-dzF!r!ac%ib?(kk4O0H26Uwjmq?bX%`Y6(Bdo=s# zN$`tC;Ez7J90n?CMWh`iR5U6@cAOrSVYr?hB`Mkt@wsv1W>FiWzU0Zx*xguPmnYl2 z%vSH04*1Qu(fFTaDA`edX~N}V99_0Hi-^TXZ1YB+48VJ~?Wtekh+mBt{*AFR+kaq#$QIruY(;sTHRYYwID0UP|(cqS!L z{lFSrTf>`Ke}c)tqh~MsvFoN+hlaICoh@ts`oc zi`t>V_|yWM#lSq<$m>78E2`^T-~4ul;>W9>{QM{FDA`l4?9v`Ydz?;HI<&p4dBH3r z2$r2z3WZ!f_FV%Fp)b=1$b&pZ@5t zG6EF^951}9(yD5PM4_gkAN zq$0q!W|U=|gC~rpqI6xN%eTQ1W$PEinKcirDa>ee44T0K-cqO+c^H7fqHW>HbSKJ1 zE>AN@H5y}J(bgQ)8eBLVX3Y=x4rGLJ_}lf?54%gy9b=aC9-?-ouCY@{3XH|Li`FRm z$w@UzRGmXitMOR7o*s|f7hQ!f4uW%V+=a8EhM%l{^Kbml>hxD%&OP(__@UHMyH=-C z^vMYKGJK$y>lsBDI^>GIa^dUMzJsEsm$mhcBI887a%67FDe~xLyI_KqvQmjX$Vp(l z3<-~t*KS95wvRu$Uz8vGX`ZfKxgK7Gi{TDK`H}b`bh=XH=8a^Ij>vN9%L-$#n$xgJNCqnujE{KbIRLcf|kvTbS~-_^=Q2K4E$jHrjMu#Em&(9m*BcX z!9XAIN^?C98hGBg9z8A%%NUlow1zO$?afGXeT?tsi@^!6 z@`L|FT%S45#{iQt^whiW7r}m|HgX#G9qu;J`o7Q;{Civ)h}Sy{jL9g+hme!D486{61JFz;`4h6VkhX+R=dH7cIV{My3(9M!lwF!pc z3%~4=n03G;C9iQxtnWhco2ScCJ~!xj2u7)c^v(7sL?&-6TW+|2;}NtZEkJ@2?qxQ82Yq6w91*j=_6bu+ugi* zBgg9Q_}SLKueAf^n;(BCIy-=it!}kmUcGo>)}oPH+QlIy;mr)K z_}Gc#C+CcwOW_b1S}t=bi5^zBvBHz>jfsICET0vrzh`fU-M7x*S6g?NswkSW{-U>g z=>*a|9^7C4{NP|0ov3305avw;IU^{(E25iRcf9i!$oB#$(C!z%{A|`&G6$Ve>N7@i zQPFp+D>8}?pySp6vSinRgNM>74on|Bx6?)c-{4fQ9`)X>Z+44t6gaDaeu5WpdcUmbnuaq#%;r$1bM{^?KW3=Ao(to3`7vB@g%igV?+|MuTs{jIV&LvG(O`1YW%~FXfpT3 zOBW_$9}k(Gk*)I_Jd-zl_;K}&e1z6olZWmL>}c*CLVEqywMknf^^YJ4b0cttbJgwx zP8xG@=JZ!N^-m5jktbi!kzHNReN{ul*^nEI=p}`#As|iyf6k=Ws$AlD$ z`BInP7-AgJs-Ob(>x*jE-ko}v^)2S^egFFvKb={qu&6~}yW5L>nttnp@7;HA`mEya z)t)*QLwJ4YU(KyO>YFxHwmxp{u79nq8Q0wLOF{Kl+!)8Wvy>lWwFd(iAOO>)?pmKQ zda*Td`8-v5SO1p&FK}KIDo>`L-5g^GSgpatn5={rnlbzujrv?FS)1>H)C{i26;bD0 zyb3NieVhkiG0e{n zgW2xyp0S&=8JmvG&1LNR$H4Qeo)1>7wbN(#)cXsZ`sk`}>f2DRwK~#`rM`h}^DtGK zgF($1_*(g%wGejGCJfX{gx?wC}rclk8+Zl^KbWgvQ@AYxIv(_Ka zdtqSO=jq|x8p}lA7h}spk5!?5<)*^ki!r;0iLbp~{pLtH9jSWt&)53w#=s&P_GIR1 z*-r0~`JH~wdpe@7rJ89o+OM^#a{$s@O9o_Bh8oi5g4Q)Rue_I`gVIIuDv zom_xh32>iIR6^(d@0VN3`K)jAj(4Z3x>PA+^0hh{9qtru@%x{3SX_rFes%8h>hFC2 zTdOzTJh8fxBKU8A^y%s^zrMKo-ISHz{`kYy`|rFz2AZiO7%okfYp`u#56@p){fD3Y ze6_6wZ~q&6S7$F?o4yg|EoSCkPCkpTXle@aFTOf6=_y1C{;NOx(dzh-!(%L1=3(h4 zxOoR(lEMT%sMGMxuM?yx>DsVR5jNewpMm$Cwi=&4dv^89TMt$jZ$DVQTZ$D*+gFz^ zPn`yEKLKk;27bc#I7@>S0ngYLURVdfEWDvO>YINPRb*behVLXn63B*zy&KIj4iGFJ z_1CsW4ufmAueac3yr~XGJHk9+bR9$eH5S(xw-SUX$q=ZKPoEfT=32u6Um5+KW^Jy= zfL;MOycp9LxKRcNPX_@!Y#f`z1IkikVSFe0Q0bW{5{IJh$Z)`WfB#{TH{qi3 z&E;g2|{Q!C^q8^EEtcTXTl?*7-4BiVwURLqqTw0tfB-99&pCeYVwd zoCL2JK6)Na4?bE88uRdvN)9bm-Z+Q<0DMJiVL+lBA)C5=3s)Z+!4x0xajmhKyVLN1*@r|IZ(;e({UXR`0#{{_H^T?(nw8>flCl z2WJoEW&eIg8HZ^#s^-&N5(XI|9u^tL0Wt@cB^(jh;I7C5b7cFVh?_atJX%1X=vTcC z1JZdo%XmnTVw5A?jZ-K3Vz3o4A@40YWW(zvN={@3`egLRLqt%CuyoLqZTsLg%1!l; zf>j%EK|OZ-Tuq^&#J=~=JL8nQlpzD%>Vrs??T0&jDMk9h!;}fMTE8hR45D(WGPFP1 zD1AZE+`(BCj9kayxUGn!58wYV{vcY~j)eG0aG3m%!D{BG`GE84WCUdVK;MgDqv*~Q zp~;&}pibE^&khQ{k)X+-XV(a$!>t5!#xMPINE;{9mCKjLNI)6cBQIq?lm^i&uB{2{ zjE943!Gck8l;VuAcQ)Rg2)dIA!Y5K_C{n`@gK0=HO84MfFvoiU0L%#8_lsB(S!@0& zQPzYVTZ;Gz7o0;N=c`Lj)|vMh96aEtd6aSt|N1q{om{h@|KjJtZ<+5ghzA798)Kk0 z2;ZV>pJqT<3{3I%=x^Qd=Iqais6*QFP>Prq1WhSjLg-ChTu0^p#Z_j%pHT@ zXb|1Q5$(@9v__v4@hv+uw8meIfl~V_9>&p1G14b^p>77NFtFj*T0cv*OTXPSE=r?u z4^>y6=p#8>x5wY)r?)M6iQWxXbqiniE+@(6{yxq4FOq%?_V*b?mpL&{i;;BcBmM|J z6wPmc^BXDGVE zWDq}j^mqn}?W_Ordw(?WKi*dJrfeL^n>w&|`jXr-1|RRoFYpI#UQM}`zuY>5uT$dD z(T$>=z(~q32a9pE?JJEIqY~U;#C@8gD+1eGSRWpyP&V{|86GNH4!p<$`b?Q+e71v$ zQOX=sB(aRShPGtG{^NDd!zMtPOdIqjOLnGc=0@bVD4rYOy58I@~imn z;fd_y=-`BByb#6z!Mmr5>^rdf%OC!5cA?#hzFqR1NQ}&1spJp=wNuIln{8{q*iELgVVLe;4=8w+>HFu5)NOj(~RG6m;+GS zBK%xCcrN&iTvFZSBy{Dl!sG9JP!wS~+~YfeVdzJijIDb*{OkBT;`Y` zJT1!AP84$0;A~~g2P4=0_<_jO>!krW+<|BhCcS_&VKQ81tle#Ue@>PCMc6SK-px4P z2uu5LbPQjdr*XP|qLFx+eyZC}K|E9{2o8y7r3=FI7(qopUi$j<@KJA~Mp6 z*gb##Tx}LLyZ>;zBy%8#{7<73(boH;RR+e(8Oq-N;2U#jFWBLk<6w^d`Z)72(V#Q8 zFGCpe)wNEe4xv=%SjoKu-&oUuxt zW2rqDX&FznH87{E)CW7L%5ncc;LAYllJAR&>I7z*h@f0!Qhs?eL> z6jCTcA%!GD6cr>PU;zXcVwTHD002M$NklB@^s&O&-tA) zS(#Z`S(#Z`$u#r!_S#T)FGDF_*w;M=PEd$sX1&I{lhz}?-r8weUE}f5pX&qO@HM9; z{Q{p!o%6Q+(DP?GfrDFrPwMwo0Vv}j+$r#)NYm!)p8A`#4$)-cJ&u!_o3I-@1K!0x`CP|3^6i_a7i5?M3izIkPiB9&DY^ z_c#cGMT!&`xzZImT*-oq{Z(V6&)MtHueNjGTowjoNDLJeZMI3GZSrny*d9dL#p5wa)bexbA5EbPx`Hu5gQwx0gc2dVH50$XM>4 z8#>WqG=KTsy#$QYjPUr2ET&&dG2#>p=Y!c}4w3QFuei|L_~~8JD&gfx{Z~Pbn^}V@ zkADPbDW^Og+FCS5_VhCWG;}fgl1q;%j-t>-=wH8D+Mfa#IVCyyALq=$Q*mQXnC?3( z;+_t@Guo1gPAa=B_S9*z^Tq1-e*cfVH1iKzLt7U*IsUrwIKAM`B(%XloMq!s3a*Qn zF0Rg;J~cc`r!bcopf~SvKUU6)PNhC2P+o@S4HZ_SO8V|3+_kJ(^O}-Sj=rDpq@o0brwAFzQ1as(7toSU&fK=kwjV zPz(RYYf@ee92FR3D5%YTQsSX&hS2+o*3-n4@T(6Sh^_-;@^R9gz)J0EO}PzRQHsB_ zJx*K7Db-JK!Rm}LAv4CO7PF}vou~h$!OFm0FMak0PT+IFYfq-h1*RE*@06UrRIC8o z(7-=EM4s^CdoT{nz4u5r8DM(US3WV!R5@YdHIJh8lp{HZ_yg7Sxi|pgg&6xg+Mt1c z2BscYZN~01IQletk4EwTz}D0GN*_yo@ab4Pjydvf(*ICoPO}?m6b4urK;?3^N+4 z90kiUS1-7<5RtJq*rG+Gh~nknkh{+^ip>=>jpK3s8jAr^z8GfkQ7sI18T5?}zHEjp zV$Uft!(WRbV3dWTGOYthwaoo0^nMISwUGq_sB1ietq+wS+2w0ftk3Xf5!as|?TL^d zr8k*DX-R)+qqZ0uCS6EM0K?3%gdrG*pTQV;96ve+(`D`(bMWTPgZrUhFrXo$Gm60Fd{?_8v%(ce9~?%#_+?S* z%yn%V7haut4%hynQDrIn-i_x!l=Wz-AC4Ef;8E`xH^U)Xz~zlww^QzxXy2`EI$k|D z!_CKq7tpjywb2D=8CgClx3vw*`$gA<>eavbH~(t&tLEg#AO0Z2$Nd!eh2JUX>Y($! zHs6opON!0$BZr$aMNK>sK_e&Xvo4%*Tr(q1^T0-gqDxsFEHtj$cW%}e*fZLtP#!4y z#0H7s9N*c1z@La34hzp1OKe0P^RGb!Zczp*k}pd-a~`yLxm`azY{3x0-ih66?h&&++NN6d*M zN4oSWoI8TKr=!DU%^37js;AAA)5lT}wGrOdu}u*|j>g{UI3&fG!Eflu+k zL^{_dxq$vI#RH2xt4~XjV*L{FcE1!$@0~u`X8K6-Pt-0WAn=#AjD4FvQpCtcG{Q^f z^5l>op0-xSljKBjN%8VZG<|rGYKPO%X8X;fWNza^E1NhQRWPHi{>(WWA1|k$98KT8 zNi<>fOdfCA^f1HkJF~g_w?Fx_>GQ+$=UZ1fMdec8HweY&=Gm;B$&fu612*(xjxGI5 zv*I)i`M`K@!)fk~o*FW(1?VwrR`Y+Nq=NVLYbTTz+8j>yoIP`LwYm287ghYYyu!u@ z2k;|86D)N;@@I@Vm4$n}+pnI}Epld%XUUQy87LO{O_pt6eR%%t>e@G5x)p9+0OmXJ zO98-md;aX{)h|B#qUlh2o{XWSS_g3)EG4ASHU8Eg$A z?;f^!KDztp80vN;(;3hg`JF*3xuYy2#QUdDjHBXjfA#tB9zz!%nbUHO9giE=#(G2s zGZw)=V_ua;27g-~XLETX6_XkJ_bq8L(1SdEQsn#oebR7*6EY;8Kf*9we{~ZMmNnS8 zrT=gm4(_)W`EI^B4a0#dr?Zm=+O@{b-50e*mfpA{xOrLwJ5e!G+5bR>ouCy(^yhiqkx zFFfkS>Ft>y3Us5Fj1#Ld!Sw_LH1^GqL`Adyz2E+O ztM}hKz4|}@<-cr zWNM5sMqhfib=yVgjL6pUvGr8;`n4-F4_!Dbod-VDo>N4vk~3GFygPUPd!wUWy6{?fa*7`v&T++lnESLBDa|&WoSJj5oKkD7STe=(0kkY?#k~`7H zC~uCK!JmHq>txW|)%#~ZDA?_*WnQ+g+8tf$XlNrZH)qV*PB8ra-~EH&bSkTpxvd=! zn)m3)i6o7UQ=s?T<9zSKAFeK4zC7~UoO9Z0Qlg}r%)Ti2ccg1_TAV-I{-UNVPX zwrNXw!`~)L!X=p`ZHpZ5WZIlkDR@Utck}n#uKD>QVpV?8!;8=S8T4TstdNe+iBb_6L<8$t^+?U;*D9$7d%NoLCd6B*oDEzZ((2^*0j}P$u@g z64Z;7^~^F?AsTvLPvqzfw$Dc9f0N0;m78}p${5E0ugJ`1f2Vy)Pai^JEWNZ3KL2_) zMlM==ZOr#on|jLhc)pr9^-^# zM?UF$DZKPQ&Dl^8&i=N5sF%>ysGh#VnI<)&`KjUUMwy=Xn$iQu@_D1e7*Ww8Vk$L; z=HO>|=woQnU;hVJOGneUw|ZH_JhThWsZ@o5En4W;v&C?%&iV;@R+q6}ks0Umwcmrc z`3M&MYewDPz-Q{D2Wu2=V59FA;`Kb(@S(A-pG`jl&bra{*k&lT1!EeVHqd==((@?? zqu%3#=m9^ozZk%V*YNkydX1~W;aY2%=U|Ct2L!{$tN0k4+WYPm4E4E;yXTd`R5K=J z)_5Iw*K~r9Auke42iiBXb$x#}zR-qxQ*K>m#tS&ln>YQ{)O^;zw)_k}_1wSUkne4p zK@WAOKW+ZzH5oj+dc69JUw%iEw&bH zMuC@&aSYX!-w`aJrjT#VyRJ`0J%%VSfY~BCkx9KrZglg)I=EY1V}fTJ`&S(ivxrl2 zmqoRP>SKvkTYlYc6y(8KxXb3Y=2mA!uT zYM=YBym?g7I<7o^5Xm3zJ*&(Qe(T5OiY#&@!`L7F(H|Gtb0`N?7o@f6*xzQB(*3w2 zhBh)3fIXF4(Q4}SYcDY!*TT)Dpb>hkrJGxIScP&{ITFQ>H#YyDC57|)g2Uv$L1 ziM|l=2NXPMt!utzoVM8+<7R#0XHF(j?4pYB5rg6O=H9LybQ{jCZj*nN!WNtdGdvv0 zcs9l;@~^fTY7Ca*ZGC+D?7>7L9qHKm_HL8EoASn>`@Ii7nvDkM$+c^js}exLz(}Y) zhNNvptmEesM-MfIqD_mi!JlaIxCju6CxgD%GRH(Yx4};l1nZ3qJBP}DX~X$6ImW=p zU^^SL>S$XzisNP{z-k*0!@s^bCgO|?4dowhW5+;#>B5)cPsIJ^c`j$^!6Waij=uBW zIGTaOu(H228k`AxGdj7@^l@@#WI+Ax52m^p{49=*=Uo(3j>@A&qO4zNTG2Mj85`Pg zy3|X;+$_*l3AmCW^&_xORMYtLp! zpfjfQS+0CI+tb+Pm1dwnkil?MPFVR7@7%sUbhTl9vzc6Aj0i8aZZ1sj`0#1oih|y^ z=hXCDmyM;A7UAYz@_b)%!U1pwdboa_6LE}%_;lvYLXTyQVnAyC*4-HKgOePxw#)`G zXRwVSW03O3XQO9R2ydQAw=t2Sea9Fxc_dA=UgQA-6`3f%v2;i~-ZBMO!kHlz>kIm%f4l>-ocukDE9BBQx={7h~nO$)p(ze(yOInn4lFBKMs}(xM1r z4#kZN(5wUI;uobhI9a5TXet{$FUBtN7jEFuDG-}r>-E3;$)9(b_SdWb;-CK0HeMZJ zW_;?o4L06aW=BlBtREX|O~>Tp^*K$Fe6X2k%z0gTxFB1G*7Yl54i$_MjOG}g6WzSP zQGVvus~`X1hZ(hZW;D2$oGV&6r;=1A`mKRBd#yL|uxO_pMeIqnz=$bYoRP>2Z;@A^ z=7>Lc=G5w|E-7b>A@hIxhd;=mf2lQOiAI+4YH{Fqncv}}C`)Bz&bV-t3~)@JoV}4D z!Fp>mkEWu`=^@rKz|CoZ>TxFGBRb=Q(ibp(xvWjJvO0G&Y%qLn%22}Sapl_AMbq9M z$l-$XfA4M)|JLa?vYo85>@K{yzM(tE*kgZh>$9S@yYCm%Uc-$^y5U)?OUandiUMFi`La6OXV-6 z5AE-AOuEP2=KX<;rPdf5a=38$w3K6y+O%71r3EmTPMm&c=EUxJ1mB@AgE+kgT^LZz zfulKd7+$3B`0}$)Clv!D5?&GwDX00Y9}F)2+tZqM?t_oxj|`Ffk2J;%m+=U_>0(A* zYs$Oteb9W(AyD5(kDkoIb!RTv+kg0Yml+m~IS$97EUQPKo#2UJcEW+gEsK{h zf2=!%-58CNE1Y5sx#YjK3Fp(>7^NA1I1<3*q{9%m5@b(J3>~&@T^zw8G~wI$oTw9s zqMWoxjD12rz$?NR9fr@X1+6_IQ_FrZvrCGW*0=bU!)S+eaAc?T8vT|(A03Usc}}!3G$t=MZ`m{JwTm)E8M`xqlgjBn4wo}$&Q8wo>)ku>&by~e%Tk1I zYs=TyuFd6@U-uk52M(9IuwaWD8LS07eDu-BYrQ=Q8Q$TK4lMsa$DN4f*l>6Q&+OjU z{As?^C+ITPrb!vr1il|#$#T)x_ILD;n>h-t#r7KeS{vY$LjujTOSYQFr`~V9N~Y86 zUbqt`yh?{fe=^7B6k^|7r`=m*@A)*xvzK*v^!%RfDGlBtpU^I2{H*g`5MIC=m_#Ad zA)UOWHyVf3Q1nn~W;mxsx}z~Z@V&KfBfVW8qeXgkc(QKLneMi(IqBhIUHezTAb1ST zpQf7&vXR!0v*uBI0M562>Hhk0&jdp*L$LPzeADTqWp34b^593m{W~-F9<>j7Fzc#) zx06nSe3l>$K_>8e<}71%(d9Y$od~hc8n2&M$!E^7Z@QqGqw7#c=+Ske_nS`Km~(U` zbiY}^n6!vbnww9;`HLK#97Y24<}^V4vtLLZX6*|p!w2KsubmyOtx2jmRrxH385(Qf zd`2%$cc=SYRu5MbTvQGE5*~0ND!D&)VK_JE;&t;KmcF_5^~gHu$0!aQPfnVgK6k!l zWsxD)BNyjdr|20qUd`lRW1M|W`$QMbCQSl$aNn;0pb#yW^2MZf;!Rc}W_qr(#xRp* z8l3liMW`&&a9N0|Gv$MLim0SrL%{qYox(617hjP`5Tw5uWqtO3@*QiT{{(+1ns&71 z#i%yMH6Qd9A-q`(!yZ<#&r3NUHQvjBmx1)RI!m#2AI7_U9f?`W>M29<8edhruzh>0(|}U}?W^2*8De@Yr`VU`D8F!L!d6`xu^KXn3+m zV=&T3&wvyGr_M$Md1P9~J;T&ib>q?D&&Ic4vB&Fw3ml9m8#wW|dS3p3V?O$u;xRmc z2ulI{!PtvNW6+0`{{Y8RS>c9NS#xH-_n@-TQ8Xo!*HoF9fT9bE)$4SAUHVJYJ36yQ$-sANW|e2Z)hTJ+VnPehS9( zj!+lW+G0?|N|6EYXJ9+?Znr&`lk4XfZcMIIUdLbGe%!{tBSJ?rUZl7L#NeG`B@*Cg zfBog^m!({}-lZ4tE@gouP)K9U1iQ@>9^Z&Ku%T;h48o-?pD7=rHYlTmm-xm4aW~=b zGAf%89%tB@=%5UDq8cBkC;`dh?6rXx<&0Y!Yowrz67w@T^HPF&XBYfacA7uPSKl@b z!+UU<(PxY;^&wh%QYf_OSlEnF^q!4+4b6hp!%+aXwHRA~L`zK6p5Z17kMQ+{>A`@bHubM!zdtfnG*TcrOzk&M89$gLIkB49mH9$(Kq?t3R=6tKod{uwLfeDE~2`+8_r{T}O} zcMG33E=9pf)sOVkJji;_wyX9T)^r^xj-2n?Yojvoonod~(z(bklzi5?F=hBCa4r$nAf zzkgO`zz!D^f_joF-<%e;ca+<{$mSW*TU3%&}*UW>BLAYAlJGJ}#T z52DS1NE)zExE)h9=N`7{I#|R8V+#6-BBspCS?D63BZtf37%Vn^6m;oQAd~aJMqQ++ zOKQj*5iPIZw6P06;IqL-Cls^+r~FIx^E9PYx)$?>!$4%Y9K_~BU1gYKfQo0TY2znq zh&&rZ{gYP3) z%dt|XIpWB>4Wp3x{-_O%%{t}S9NitQ>OA)4uv$(yIL0Zx%kB)-#zO9am7xQz@Yj;k zr|7QQe>Y>3x=wURsYD)qH$QsE(AMzkcZsU;ezcw1+FpL|%w{vz$uFKvae9U^%ca5x zgKtj;zNd2uX!wjijNJ?@jBPg1Wc9S)TId7x!lnv*CNz1&@Hz5q zrc9j5DBJj)a6!9S-z!eGEvH!)0}lfOL!oFSa@+iAsPmfkd(YTm-F9sJS@1I8b4W5) zpcxs9kNPv48GWmZHd1Yj(Jy?+`MIsSHYrv!-_KEcMySRB59HsZ8;NI)o6Iv82q9v6 zWKDf)UwIMOTV4lGG_d|K8j(ddgw~K*9V%?&_W*i8g}*#}9?C#?rjt5!FfVcm9E{_e za}qG9GO*&$pZ)3=$=(aA^XIt7CWykgV4I1vJ(*W~T)Il4OKV9spcnaqy& zUXZiNDEOH4Pw|*_)`=GM+!`Oq3k&Y2`i*Yn8M!>`T4NL)yDM37_~6mlq)7WNOLu|U zq^4N&5ZNlS)Fyh{lk}Fmi!K9Bc;RF_S!$hI86r+(F#4=~uc!O`>1V&5w!gY?ad?T* z!unv0w=xR+)nESU>IXmg@uVGi=bd-w*!Yn{(XZvxCDn{lS1(cfu0yYQ9q4jRDR7!sr{2$5Le@67o`=VcC{+AG7Lgg|Kd15jj3{#vCb=Aq@a0^J*L(7JE_rX= za#5hQZl-kOffKS|=k9|=qIQy~`6Iuw0t|jGb>w)JqEPfWV<)`_e8caJz?w@hu%4NF z<|Nr68uHs@Fq}BqYR;0s*1WmAJT?UzT1iV`-8~-9nVX9dnt?04hjVRPOGyv&jeWuV zHV&7c?%jKT_z?rZxl}JstX;cyCE4&Gc(#mV2%e4?-7oC~gWHwMoq7s?lM>>+v)xg1 zDcMbr(2se`!3L*vQnXEGkR{2$Z*zvz5uSI#1)sa**?ba!(<~WXdD%;(Gd9*GjY5vQ z+7$78?_p^hYRg4?Wcfz^YLS@Qb?Vf*!NB$?Jp})ayo=c0%NJr8uL>iNgPLr9t;1n!ChNCMPpiyZx)PMcLh0^VBYJW7y zo{K`v?Iqmy&b*7dN^M0fWjAh$;EgAz2FMH=xy{APKn30#UV%+ z-q0cJy+m0bJ$9l%oFdl4sr=KEgF3l!tBZ=^**a!?EU8YeQ$HBJ*c_YyH&`Xh$uXw_6{o#>Lq))3BDK?Lqu za&p8SDi4GCNe_w5`6bbfPIS|s}S zq(2FEINehuv$2q$9D-!*-8(l&PLrM11#9!u=)5gm7wmLJr&#D7oG4PLI1ObVfySd- zHZMj_$D4EkdkOo~XFY%DRCnvTJ;ibw=0yQ1-?Z1`6l)$g-krOphYYr8`j8Wv4&ONG zxAa1HI^3@v_?NPgP4W1NlSPmJqxkvE(3@de9(92oPB>}z#Z!6-IT|-LkL5DAE=$ej z!qN|QpJ>kk%U#>pH?pMq!&Qp)Q8k64C46Z zcYpsMrE`S)=BJZFoWA6f3$PD2SI8TuKOILSrJ10uD`we~q`+|#&zrLR)7IK*wQ(rNCRomz4P1_n! zp`XDCpoV_S<7JNgh8G5o3cBEIBMNt2aT!mwy%rwn&ANT#TH5!m_7w8oSiHf{T|Sk7 zymsTx>Qec@Y|!s_`PNrAAFTfG%fDLv_~Q>Vp6pt^*CzX;eI&=Yubd^XD~SGwX;pp7(7xUDBBDKQUg3J z3S$zFZUYn#*>G9dyq{5p17k4{9IZ2@ zZM;E-b3}E`dQ;mLsm#f<5aWGqG|Wm>x?`TeVnmo>^-P4eC z(d_ChZNXK$ncJ5HV-ZqI%x;|#<#VN z8AINi)&;QMxN)QLl%p;s-UAy;O1B@!?6h|;qxk8QMLfla(n@^YWhs-lJ4BiHWX$kJ zb5VT;g-^dKYB?Gj8>6@Ie_zt~{vvDz6P7Yn)fiqmej~_g+Q#mw9V4=wAQ>QbI$)zh2y@_*f3!_=J&OpXBNslZ;2v zkX&JuHg{|!Y~<1MW%KW9_b$9!KI!dbX0Y2JU|Ys>V_+b}>kJV4wun?;j-(zeGHIgl z+UPJCaL$d#?^TfHbq@u|% z*n5V3MiFTR4(GHm?;OEp#4(3ncM?H#%R~Xjn}@=+xo17W17xuE%iQzgRMUpBHu`CU z;pFw3jK|5nL#4KnYjjx$;<|8#U&%Ur$w1HOZ_OR~ldJ&;9DCVxk$D8PzHHj>x0#5m zW>W%pXvC=M!ZjO8(PY{LuepY1yW2S0&~1O-{EEJ0-#E+gm~jM?Inu}J&jxsSqV_%9 zkUwKktK5t}_~GJE&a~qb#am120~|hj!{ID1E}125!i`&3OEYmfdKPv2-l>S}GVV4{ z@t{BEM4j&1%#g`>wnX7- zpQGmZvBNnd+$->Ob?1htJ87iKFI(C){C03}`9Cu-UFah2#Tb&KKVF+i<8Wh5hd;&= z4hj+e(uT;(`?FvDdUTJ?jPRAc)x5iY^LEgYw;6R>^BA4y^lRT+whm|9dl)PXu#Vr` zq&t0w-=rTQ_+PdO-(LIl0?t1M7<0%`cqd2hKD^T$kp|1E3 zyenDzyp$<;bD@2F7ys?fh%iRd@Mb)sV(}`TCl{o8(Ko&m{beq?5Yvh>MpSbpe%-Ni z(PiL@(fP!wQ_*hA>dx0!$4Iwp@8M4MU7s`q7;2*3EBjSZ*9?EhN)bVa!}HG@y9+So z4!(7(H7WxVN7%@zVX!$_x(RyNy*qbT=Rf-K>e#y5nr=klX z877R~aq*q;s#Gfs#Sop_ow&={D_@uRy6r*2qyo&J0|6oSXZPjJm zbDCjGdZsbD=b%Epv*yK_^h3;G;Jxsnv^&Q;&18;QpI-G@KZi1qZT|Mu>aTMSf78h~ zDKtdG!u_Nus9k*e__nlizs`txt9jG~hsg+cgpi#+*Ks}N?1+xwF}EIeQi?un9p!}n zCVdgT_T)5r%HSHWnG@u+eW$rWcX?D~s(H?t^g0I&9MKmTd_`)Cp10;Z^%4g(4>)(^ zCQq-yzm3}%H{@Mi3_Z5QN9v*iKln#8EOVzJ~Y1r3E^4;OOep zPyViX;^O9T7Jc!m{+z^MY&9_J=Inp03-K%+MA<#9dmLXb+a1F`SSw2pAIC_}RL>*F z>QFhI+Ba?pc@ejj~2=a2`2lu8@60Li zs8c=%-ySJv_^HO{0?utY96yQoa(>cN*JC3K%^jC}(qB2pL>miyv6erJhc90!uk`I3 zqxbDgroDIW2c=Xw8Xsk}PR5hvZ75OxLI(ed3z3L z^W;)bcmI&{99H_+ucXepKP>#IQhcycQ@R8 z`pGXlO>rt*ClhlN-EQ69lbqRApH3@q>W(w3Jr4)P)-fiBkI@rU^nD0MlPyy z+)!bR-j_p^MKV>dI;!=%@|El5*+?c_)U#1&QR}(yFqd-a2aqFyD=@&V$1y-J#vR?MM^Lr%(rIt{4m|a(`fLl}-B*3FG__H-F{!Jb z`qR)n_^y24R;Te-#N;u4t@^Q1nsM}T=rZHf(RAG3UemAgkkVgOrqk-zD2K+hqFk>T zB}W1D4BnUC;D3$Az6cHtS9fx-*X-ape#?^6cYRhD|KTfxp8p`OPRLMXpt^fho#IZalkHm1d)an-_~p)DJ|s@F6)ma%IS5c=*<^=CZGhoI0&3F2q z^6RoQk2Pn!zF9Xjif+P=BIPx0Zp{6myAcFy{|5(4q004UzN#&KC0r&4ly>I}I3_a~ zWtK6_V^kcPj+!!M2hF`3YWmQ)h5!peK)XCk_$@pD5>Iv3U%cbv6#UI&Ko4)P`(H3p zA>|q`S{wK9-#pGpoJaC;0@@qU?~SiQe@ z`|7{`=l^2$&7C`|pJyQdAOF8!tscMHy87K8eLVSMFBLKLh;kV&bPI-A45L3K(-(R zYfSJ(e?vN-!AcCZuXcwn5l1OC1g%?1ZiV9IYO-_SCi?hF&;K>urWaTK_(V!XW5 z(PE6iv)fy|!0RZP4e;PJe8MoG7?5$C$QIETzJTvB4);7$U=LDSQ;NW7F}JxiAJK~v zUd_g^@Qcm5je@dhM^-RCi{kG?bSuI?{PcBoG63hexW85`ZN^Oj62O>8nTk&KV@@I* zh&FpVxuDNaKaH;c6VWYgw%UZyEk5+zKx8rHx}s zNYlUa(J|N;=STGcH~nVB4-ar!_d|Odvf7O(LDlCx`*4TwVF*Dxn|BO+&kxL## zR6GC{^k!Itf4~e+SgVbr@8S6UGw-kd_#gdIAao@B!dK(Ma)v{EYY8Y{{3(pvB01<;vA)R}@Jb5A4;CaM=L=laiv*sFe!!kd1uICQL6-_+DUvNp0wvpMfgK@38 zh=K&k4e%XJ(RAWOM3X2D%KF0$=jIp3*6ox?n|Cx4ttRqKM2BOnR%yzi2rSMoV?7jJ zL~FSel_Nn8R7MQ_K6+#WpOGTo5J@!ixA|a`pac9MN4A+i>qdAGA!q|IsXm5=MLi{3 z%z4qNADla%!dj$Gn~~6&lAL1OhDo&0gJch*oci}t+~-(w20)QA3}42KPTSjDGgOLL zaz6kTW$f8jw26&?c|CF9KTE7z`MFuog} zY^3*=p5@5Q8#3Lv&3Qi?@*U5CQ{);MbT@+t2f(3p2~l*6BsSXHgO%KdubESgom@74 zYxa)rAi-CWV`!(&80Mp|xd_frzq&l*c)O_xz@CftGFElG(B4TDYqfmOoGgr<&+GTV zu|uU^IyiR{;8}UV&0)OuAbIlBpMN^^B5%fM4QWAS(|W%?I20KfUAAj(JDKvq+4ojI zZ_`Y!Xb%{-?$%#M2G1C*7|j;vdYk_v$KsFeW1JI}Wu0{jMpriFhuuSv;lf=TyLWZr zS_b%g=u^4LR~v-n2U!Sbr%s-L&W^p`9R41sc{C4Q!`q_HIU!z!6H$}q3z@=cu&1bH zKmX}pR`%KI_{lRF zc2Cr1sWM&{ky&(kj6P$1^ZfPdP6jIScW=@1&l|_9<|dl|&hP%ibwu));b~V!yHd5x zJkO}yoMHsKbLV!(jHjzZjQWkki6KV#G3GH$kRSGl(U@bxn#^c$NHk@HeU?G?&gVDE zr+sbp-uFHpS!>Rq!%&Lu zLyCgu8O_f&pGvcF?NV^chkT?u47WK2GaNBeyhz_hBQTSTxB}nHpO05KtvzE%#xJ6i z$v*Qg26B3J~t9t1RBJ?NSN@GZ}e5aditkqpDoH46uG z`CvM;a&)-obfa)a_M78yN`CrFM!*vt%NoW(He;`!mt82j`DHrO$YBQQV^iPyhfmN- z+3j21h0+DFIi^1T_{YtyL!~du6<;4t(cN!8(7RrCe*j#Djrog@HOQdPnP@$?2H5wI z*Klf$p_5vpM^FYd%t-n7D){cDYdmc2v;OZ*-eVFG$Zt9+#pvq<6`blz&TVbbXI|GI zbNm11{> zJ{P-_F(T8g8zPY9j-~)vYei6#=hhJ?u{a|q-+Tqo<$ibexE}}J@YBhY?~T!xA=Mtk zy!__c)zufD{wf3cg>;s^tD~i1;>d|?&#Q*?g$5r|1Jf`17z}^^vcnFYKuNX&nA2EFuam! z>|*rv(HYb61yMa}t&w(%u0!v&zmvAg+?mMfa3J6M!PZ>+Ks1LB82`mOdIPYdcOtx~w(h^lt+xaahOL&<5l`-{K)vv_n*>+FM~ z+RsheCnrHT{SNfADY+BkTOX3c;li3~9Oi(ib9+RFd%WTF5BY2i#%EofRH^9Ld^KkU zT5*6Zdy7tY)USEp1aAIzx=p?K^+@Zpy%V}x5Ag_{Vtf7HE?te!@~L0F_}TR9t_bvE zNT-c zDlgqEe~n8$u)=DMjxoJAdg@;r8g=_hjiJZ1Y1l3D$|>5r<<+=6*r16r)08%<4@9_! z-}=(oMUUc!W=l_nyOMas+#i4~N|SYT@*KTN;L~#{d9807*naRKun0 zx<75M5j5>j-wBT4VZhbyGVWlW_eSD*KicqYqa0_zz^(wG{8^9F7+O&DrP^YWRzUlH z_r2!4U$-|+4V=ci?zftY@gzl3^>uBE%NXOAzCLB!KC;KZC3JxQ0PX>-JTeu^IRJna_Q)I6q1F{dfPz|Fin*&n_+oe`V_XO$|SP z!7#&2n)m!}5g^g+fBwnmtKT_ye0AdNSF3;f**B}7UHGPmv=)5P=tUm!zTasR(9|5b z&BBQ;twYA6w-n&M8lywXk4=p^zBw9*2C>1gDJPTA2>29MjoJ8a$dOUFTJWX%8T|Hl zb5XR|i17`->BC~YKjX^xKX^aG&Gw=I+ywq&^@9&TT0QQseAc(_-t1WhQLpA8_(sO| z*Mb6XU?6KOP}6>jH@Y#9F<78IoDVI5+dIk_c$KGc?MiuK6dNOn>C`hD*2Q>GZF8gk zEsUe^q-4Vxg=Z^ILc>F9v=faBlUvNg`d?tC=({*A1*m!=@GKKEqJgh5z-fiyO&JCl z_||}yDBU8;m*-Q9qdd&K3kc&Lehbc7 zGYu*jeD6E0^(1~|R5M?f@8Er2@jN*+Mz;*9#-*HbOl#Oht%IM3FJor}hSr+FzUnZ~*(Y53c^%gn#ncuiF@GEf@FM)nESf zZ!#X<9;F9f7bEV=)!Eahirnj{arh85v7-yPjvqa;x_J5W&>D?xtR3OM(b4o%r;4hn zzWzjl;toop9NMBNDPE#RZ309OdT~@{DhCdr7eQ9%)xcE%;^RwF9 zmBPE6kTP~tOf*q*H@w^E7_)gmG2gl+BX0_`XcfHdsH@AyCM899v;o_{@6Z_d}5 zgAe~`PhAT2>lA-R)z{^KXV}vZ#gTzp`Zx2|#g}B7qlmBDI8t^+^B9XwEG1LK%`bla z%b80e@@)d8krEj;xnaYfh$*Jgy%{CUd5W)SQJW9>0gn~|Wj^npW3*jJmjPx|y5N;7 zm*;XJh6rUCkkFm+!2G^d#L|fqMf655X|WhoL=c%jqD=H-ji5M7YlSxEr3j^nGMk(s zV2rG+vBH7&#&90n%X7-1a!nXmDnC@K;1)>UJ* zv9N|qYlP32(_G2S_{s(lEe?mfP2u;i{^~CVx3dX~-sZW;`p`1GEIP>CC2!Eed@f0io7L5jC7(tlPRBkbuoiZ z`Fi6Sn`r&1j~C_pUe>F0fbcAee^2rU+#I%x^XAR!@};lG*vkmUfQMgfekXlZIO}6D zm#Z{+=JILt$Czw5$*W_{*IOCs@P-W}2bK%H$iG?Ns*m3e<&@voiHDCrI+r8j>gwwo zx5uG$u!wZ0UoMoEgrSGAoDpJpC;Ex%W2g~98pAYik{=n44kz11aX!q*<;e8pVaJ=L zC}C)N8?NEd2@SkP2Hq@U51gASx4ZFf3KzyB679w|+i^v%AMYL|;#=tB;ZI@G;H`SMp z1wZM-ntkP~FDA7Rg9aUh(dc1@7X6vSQi6F|pU{CbluTj7kh@y6?4>JL)2p_Ptmn|? zFr1St)xCN1dInu5_t0HxmFk6W^JH^yaAZtCNpsj*X^nzwX%Ad5_xACGJP;)F+**c* z$zk(>9A_YNqL1o{w>NEK=t;gNOYrp0Xt+2-8&NVI{MuO7{oaI!accG#48~1Fd5(w2 zpj8FD<}{4xV(B4zGW|=>VU1_l-?=v#(Y(g*^%XwZEZjTicC^ zEF&*uKVH_7I2{~~bKq-@rS$FGU34b+<8OK&94>(_jBqyw*YLadqhvs*wnWo|<5&(M zGVWG-tiI?E^qu{yL*d{X=xW{`Xw4LDn!F4a@^gO%Ec(`xdwWTzL1vQ6J9o)H-MU4{ z&&-T&qoYTs;MV5mY%=)3o<^{b{ejax2M^@1tIo?;rT5^RYpgBZW^(ZhEOME!V2oQgJ0;URdm2BRpC`|qUtPX- zd1M=TtY7kj!F19ar86GSF?y#xo7~tl@4|PepEUuWJZ!uNGD`2^_)CxCfW7hc<<-Tn zJ`3Naj7eU6@5A5f-i7nSx9^mWkL-Mtk(o30>tuS3m8!^zyL7;CW1l1Hnxov@#f#Rg zNlz1eWC%Xmx^>}Yj;hDWQnCW>on~PeHwI&(d%cOj$U5I&zIr9z63EagsrZtta7xVD zDT4V`I-zIw*C*jI86^kyq-|=RC|C7?Rcc2Fxu31zNzQ0%-^=DU`FJ;8qHFKY0R?~H zCxaiQAHk()=+p0=Of;K+;?3eM)qj>pVPD>{~mXnHj&HCeH>;C5P&Hf4s7}>l1_?%>c|B>CT z;}~G4d$6WGyCt?6ckV0e13&PHg&;S<+n6 zSB;JC{7!c~pwqMTYq{3VrEimq_-pj99*AcwE{Sh^LPV_Wiiis}4gAY+>tqg3=o)IQELkV|bcl<$32 zDwk9@5td(5-hx5NVUX&Yc7uYDX!0f99$|*SQO%r|vkChh;@zeCN z47D%U#TA>y%Ogh1G@uRP&DydUMfFib@KQaW1_$u2?LICF=;E~!wM$W$mwpUlI+`Y? z?;iHI0k3iKU0JOUo&Ehy|N8b4C9HvE%ZyZY$MjYw(rDp4)VfLHA{hNUe)FznzcEiY{^gzb|KYJ3@2 z1;UGVmL|eEo(Ogm%xm~*oIk1!aeG=F!cGBrPcY1OQ@iu+`uTc5<9wO+f(bs?IGG&i z+5m&L;b|H*R^@81KMP%YqxJrL*9SOeEKT(3tFPfIkT!5lee}zm`}9{oXu-|d^Z_-HuK$=N=`rM8nDuS>yrj-Ja$wi${Hdd>Q{@`m)TZjc{a^pIjnnZqy5*5B zQhWctJkjNQyPZehMU=Xcl7ZtJDlLMeKnAF}VUY#H!so^oCy|lWlVcjD$C%g{UQfC3 zolFFi!M^F}kqwE+GNv-dS{;46MLQj>t(=^a`mx#k>CgVQ1vq0`b&eLrCGYU=6flu$ zHul#t6fi=l#{k3#f#CCe6s+f=*W|3?)a|AOzsN?2ODC!XF$JfNgQ~iK1@ffI}xr zq0JU}Y{b!Zl(hQZYI7Rz*f42x42~H~Kxe&-LcLoy@^vwW5^a9gCiHF>zfN?-#=6&Y z>!;`g$K+0AC_kRjl=3g40bf7*Ht+vzgoAfBd&|5W9M(R?9312><-JG4YvzLWw8mI_ z*7Fb7^?koMLKl4Fw~92TnT^Avi#+i?Ui5EjG@-AYX) zK3?%tBl@r*)E>k1U;OlMS3mjdzn)F4W{(GylCr^&Cviq{abqZ$l z?`npRiBhc(io4v2F0`8Qa@f`v#gUOkbg@k~1HsEK-F0c?#S}TviIgxN$}Z*AnBWy1 z&D{pmcMRZ5E_Dhr8h~9y3ECSGBOm1qo@V}5bs}pT10J_Y^4cewE&T3FVWd1VFpDe^ z)nEg|ki@`eW4Lqk5^3tl9CRis0SdvBO@OW=F?(cem&r zG-jw_T#|czqW4pl;dNwnL>jph4;ven5(P32q38?Oa+Qia9)65389YR~iI%e&fqNI+ z$z@ItG#waF%x5orzq1Ry@W<`h@G(+%p-^~aOh#)_?(!(UiXX_^Jt_H&jhupbob1@y zILAOfo0m35)%TJY++2Ixg-GPq!wlDRk|Frz5|y7_Ut2qJB44jaA{%k(eava{$G8`n z*PC80Kek?4AINKlp^3c2_tokDI8Bzzu<#LseB;fCqy3R*;X;I@&8B~EN`0Z;7>`?R zW)t6fDYcb6xR1+m8@a}i`{HeeoT8varlO%W%6t+1F0cO1S2+u!A>521&8DOcJ^ntB zq1wqI^NxXp&dyM2(-gInu-3T%@onoeS!+WwbFZIq)eAg1%8f&w+Y-l+zcJ6d}YewyGEW+~Y&5Ys+ zW|#9ZvWfn6$+DL*;sG)Z@1y0;`o_y%^a}InT9MZb$nGqFPIolShBLe?OdfKUbChr# zGL~{CERJvvxK4h=zZXkOCqjO2?J(fOy|sONZM^MRzw{>PXETh(j69 zNyeQeZ#~|=+ol^z&~4}(+?!rDzUn@EW=c>C+ zo!UIH`Z}ZAwJR4oGGCN&n2@iW+?CSc(S5v7tqe8hq0_6P+UZGfVXYoxM(dLkE02qe z0os;jK z8y%2N!^nnL$&}MQhjT_Pb6@n|fg?qL!*=pxOS(`G#{l8-X>>vd>w-wuiR_C{&YV3z zymJ5Utx{VYOYRgs+Z?A4F6DC~#qTamrW23DBYe_-M$efJlY96zS>dD^T;pHB$catg z0n=s|CWl8azdfh-(*MbB?a`AwCojObtmE}JZ6xoTC*P(^x-65ydhRy~5%jW&ny)1W zxrj8lVB{LTsQzexWC7#PvuHp*lHp+P&x{XjhGY)9l$qlhO?`e5ObmU-0!}ZfyIj6! zz8k+c9B||T*-7@I>%lJKy<1urx&QI$!a-!};xr=v@G?g=dH67yULyfh8F*>@qyO|w zz~kQZ3l4axHeMI#LDw9d#Y>MKx`?pvj=1N;+_RrraQUEikC$r4oVa%F^59q>93?NL zZE9}4mS#*|{mO8}Jj$qF?*$C3FBR_@2T&ljKFj@`6S_-1trz4K+H!hZ&(MLA(V(s+lv#VeK>KCO2x>gE~Q=y1# zbmFZj;b)~b%AhReiqkjNSkdF!-13q?~w?(fdIuaiogj#J$iS!uSuKIk-A{@Mx!_yQ3;Zz_VP}^pN}M zRgcnR$c#yw5iBv}IA1x%(>tZ zz(aQ1j~(j{1blP#>ZLJ`(#1JX_XY!=y`4jl6Lfp0U&w9y7N=W)L^Yj?EqvaWccqdly7LRW3&7`}c{{d>tyI=9mp z))Z07=8`=W*qzuwr+cMhJNv=8>SrLW9VyaWYHi-!zTe#-(MFzcdqPOQo3Y#J5o_n3 zjH(ARwsIokUwGWnDdDH^IQkVGO%C`6v#yXk?lvJm=VV~|n|%a5nxXjXt5?GB&Uhdl zzVUF>zH=&m2p3Y)a5C6WKkHNmdXQK2TXYooLl4|ZmcvKw9_fTCN7LgB!8dc_OEYC| zN=0*|G+-~@O(!nxjFU<3pn&hMuU)L&F7r%(hu_2Pt76&4LvAL&nycjE+`rg-Bjfh8 zJ~E8oDxl@l&r7?LBg;u*2JfBOOe`Mny?1tGwe|S$(Ln;*|wA1B3Bdjt{e)?j?j zNza*qcGs?5$yxtSIPaqAaZqpYYj3!OIlxYQ{Et*z!fp8ECY`CqOfB?^)?jY zjUiafrl@wcr+W|jNYS8F2efz7tqJYxpzTR7uxms8CgH2p1oduzNlIsL6q$_h}Yy(Y27q)34~ys_q- z1-==0h3jwN?=wuMVt{5E(atpJFZ}hm|4aMExQ2E5i6;6c4A+QQz+4{oXg#<-J zQq`973C3wa`KhC=$~BRu)Ah$8SP#BUKrDvszL>EEeQnJA(fk_!qQ%FE+q0QB@TC3% z-O@Hboc=B9zSFh7(X=N$7`n|ji;{*1U1qB*kLRy9cuWKH(pEib8@|->9G(2d$3wU3 zEDd=~Mzb;ojAARdJWp${2b+v0Q#j^8$F6FHd+N0NaQAurKn0@ zXVeJ)6bo%SKJM>3dB#t@bDTc0`qzK{H>+>%7v-?Yk*kDi3y#gwy?YO5vplk-@#x>M zZBiL)43SbG3eM#tf?qV_7K%%Yq{aOHW4Y}5vavi!*`btKur?HQts4Fq9l{Ozjxn>p%hC7Rji2NU z-qwP_n?KFS*SK`?r2>2J6869FjuL;q#a!G%wjP1ds3E#ny_ zrt#>^}L1lMJCQxe0K#ry5^jENbR;XqWn0>(=@ z8$76h5&Ob=yqxUm;Sg#q8fh~{>L4MyNtwbpOGY?COL?N?EfKoQ2AT2aWycjok%&O23?C`C;`W$# z$@!ZRcnt$M844VO72RXwPa%|}ep}-sD=4WHO1Rscq0YwUb?u{paVRswv9T>UkU}F~ zuf9u*4wSOt`qj%5&Gf2`^w%kI`gP$I9%l5F8(0*ejngVy0tH{Y15f?95J^OY%aPwC zZ+`C&|4I3czgd0q%fBn_!NWN=J8d;~N4H&~HBP$fYQwl0VqNTWSgGvKZItqB& zo#MLWeqS74bxaZyEzzi`)%xvG3V%^+ZMH96{AxBVBH3)hZhd{Z&EV6Nk9@@2yBp7*a-_d}d2is-KSkag z5V?ZRHmk<}ASc8aO`7lWxXZbXZ)`-)o&BJw%1bk@iR5f-v$?8$Ij8p&sf3oZnQk61 zj*H&s)Ig)R$xA2b_iZTg(TrT-ytCP0AhU6wjU}-df^5oMI>mu$lR$=&5e%LBp0#Q{ zKjYAmzw%9z`HW-Mi7^sK4>(d@eKL`eUo^3ei?jpah6j#3GM`cRNQMbaF~;@k<0}ph z?To?`l9^e8_+y> zqSTxQ8U!|;i&u>^BNCa>T0bX#nuCwZG0OmkMxx{z+?^e6-E%hsxQPo5eX`^&%m%Yn_B zV?Mt~4%1m~W@LJm40L%rnIjU>g=Cy0jCI>~FAuDyuLf$VmLq((cP`sKcZ^rK|HxdzAHcZXvJ@lGm)D|+_Z z9BMBzj=zat?slYHI>EV^thwgBi#Q)-EPdKt5D~n~_Krt`GJOZeMDO^CLx$1nWhq>w2&79$ANFQ@CkI+L7?9@FQR^>!SZg>Y8GS~l zt_^DkJ(;dXw;^jd+u@w-5lKkTa{_Jjp5_->Y7U!gBA&N5-W~CP~4k4i~KRPV$bC>&e6REDuUu)O_Yp*c$JV zE94y=7Z1KD6%j+YF}Xn0>4!0Dx9*$&kT)kFTRV(pWB!UE}0SM@NuyD=rk-Iax#iR*IfkmSE7Lrbh3xsaPso#i7n}87fbO`&_cXuoj;P}j=__2 z;&F~(@{~N|tQGNnJzbj)W<4>dto>5w2r$``1Mp?*3VJ?E{_Ly|^l>43T1dL=#ew~! zkGq63UH)kX&`V$bx@h(5t&MLAo;bbw$N%`BO>h7Q$Cl)hwb|V{j~lam`e=!tcQ(FI zG`b=E9&YjcoJwjOvEV$r&viYOi-AkvmF3B(dZSvvhv7=$7yu%EQPaaQ0} zbBcq(08>D$zuIPf;V=-K!r1+!d7w{*UIBNv!YLX2ZkI#vOzz(AB-hh)leg_F$s~ql zIOHgnN{qai`Q5YoMRRl3?I?g_dwiTC7457;uU2P1KC}9p(xrU-@o!C<8&3M;`iLKz z%EPN1zfKTcZ@oHJP|1N#0BlLN)2ZBtF*)+%2dRTb*NL{~v)~^4Sn0AzHo4uM-h@!| z=u!N=@5RB%$KH-%cA;~+kqk~|(+SOE>-yo0`y%YoNQ%(MPty%_&gLV)eKUffa&j_n9>g!@N8_S(#l=dlpH3GwJ}qq+ zy$OtswtBmCi7V%WO?mar2C(R>cH{ZFs9uYTXCT=a!qV8f{CD5u>0q3~OC8mWXo0Ts zUbU&nKvW;g@YjZfKB(ihfZ~%vDomvXhDr^L8{YxQ_wbXj>y0LsetW#BMdDiw?fc5r z;*1UM=4A{s9Q_+}tm&)zwO^Po6%bhxZl+8h=jc&JmVp{AYDO{r6S~vieD15NF4*Zi zWxsoco$xREed-RpmG@bDz1pJmt1_x>JPW|7SEY^imanFx1wWMs>(GCrZsVV*YlRmi z54O-~D5XwiX8e8Qy$wX2t^3wcKWMTbsn6@T{hi|QrpVIB)SI!-7;B50Nk8yZ8BK4{ zLNrg3aT;3Ms2{MVr~`EPQH3>6Y&bo%H_eRH(Z)2)4St%*QnP6;)v9CPO&Pxd5@ZWv*6;fRH=cRTThQZO$|kC< z;ha$^<8wdrO>{w%sxQqN>E#>i z#WKNs{Aj5jPOd(`cxe{ixr_|RwQqxIgAWe9613W1p z8Rtf&L_~~1LbGH+?J?j_lxooln>uRA7zaj|TI_Ej(~rfB#M+Te*+#i)!IANc{y{|=is%`!Ij3~ zQX}+QKu(4>A7`UvT}smhtADe3xA~}^&+^jgdkjiFq?G8xJex?&@JI1dhVtj7El)55 zybTVTTNI|H|H@UL5{8#VF3wyunhNNb0wup5dTN)E)G=xbC$p)iW7rWy5D6Jye)wWX#GheGlaf-7lzRVAN zv!qa{x1MQ-vZdp&H46W!qG^tGOpsG*6yfkMg72#}JHy1F@Pm%lim0}MaWoNMpM2+Z=@sM#E)uGFk7wir zv~jap+!({?WRYQW`T^t7(;_17q?n6V+#Ih_09`)B32>^-p~z$M_5j%G-?CfN@AjmoTzxWFUDGrEIFuv%IQgCp`J<)Brk0 z@!@aEJOhtQ>=+jJcdSzs?UJ%02T{(+O`9{21^1)UUXT;!8>6fFfDet~X)^G65xLTR zcpZtyMaw*Sx){R9-%YIIMp7G&dTWaV7k&)SI)S(P^xX>{M68*I$}F-E{Os zJjN399*jGZXN>;jql=)Qp-XN0(tMe*1Qc3<9S*IP;DP@n(ZFQR_-s7-L*u8l%Ryt) z&tSm0h`*d#dKL^z#Al~)nnRNpKLf3ooY7uni(cG+@Zd=i+|`vw_D=MdKI>Sim={lJFIIxF6M2x#qqPll93MU7r=UcKn9fpK(%Q?f5=j_g?Y z20vyn4_6}c(EQbAG%9C){pd@YCk94NamHu_0_m>cCvTp{b2gCsf`y@AjNQ!%26}0> z&YXI0q7ScLzg&8S+rbbHGfLlVqiXyd`ExHrdf3oBcn)z|g+H>*iux#d#mF*gg<8Am z0oF6)F;^CcQuuBB3`yHE8q#~n3#S+0NS(=cq?u#B|tJDFl zghiMgh;O==ft*POEt~wJ*h-Ib;nSb5PQCxZ=q@groT$bk+2G%taL3Z_-HQ`Kq+qC; zwSIdv+1p7&*dx1dmR5p8hv7sa8*_Ym=Kb$y#A=O8UL4A3$C%<$?&rz!7KIE<E=&6Km> zq-AtqwA&p%NXUdHLeN4uk-zC075>I4CKiiY&GaYQ}Ixd=DW znT$4v%!Ra!;g@YW>z&Gxw(Y^4<)jBW^rnlBovM6Mki_jf8BJS{%*Cos9~*l(GEsCs zK6=#;o_4~56rDX}(7VR~js^QXZ{3s{3v3*7WCR>JaYVjtt(?H3!Dq7lW#u1c&?fKj z(-`T~2ag?pKbYH-G?E?7UvLSYBYQ*yKWWa%VayQzBHTQGDw^JSmUSBrtgAg7JkZHT z-lbO^PDXH~NndjCP{!&`w$WX@@S&e7#TSc{odW}IaZV&3+2)kY82KC5(V~{EJLt2m z_N8$Gi>*A|J!s7)|J+;Rf@ETKF4!1Ym&tmL^Euft>smRNJ9SLQ zWOQdV6yXZ*RT%^3yJtQaK66qAUyidfN5qcpU3wa?-~q;2w2*6?9&op4<)^{C=px}S z7_VKsn*2Yq4rD5A$^=&kJleMU@sECc^>;t}Y4YdB%yWEC9-lmUIw$er+LRtC2Y<-p z0Op9a2JdMeyWkOQS2}qm9SxplD3`8f3Cvn9ZhoBe?QZZa`)>r%JSK)bdpFZ<+prwl`IWk@B%YkKoNk62Ipwq3=G2UoiOB*PH8jm_n=MqP0 zxniJBi>I5l&d|f~g|%T$9|VKfmMu9W8jDjwWV-#pgLwQ#GGuaaN7p;IZjGbBS|A5D z*zVu!;@(~&{OMEHcx$;ih3A*~w{vy!^qFxCajw32B-K!klz8%Oyk_4{zkd6s3sIA8 zyY{Y6W>UW#Ygex%m)j3C=B>>E(*0?;B{#pkcDYlo zE?aHhJ>9fAw&$JE+3brrk?rR>E|0d~ICbju*j4OjM77hk^=U6G9U>aq!`~|Z`TbkT z$@uj^GM2O8K+ajZBIm=F=jpZaK6%f`PY!c7O1B92*RNlh_2Ph2tjUnA;qT0Q=fZls z%kXc#oISnz?H!y1qYK(!kr%gfG`RfN<-EHx>^}>a@a`gAjt2oG9EXX4I3ou+qBbT3K7Gb_BK*mEN!dUGhNg##cbMI9 z?>C$Qy$!x~r9O-id|eLgQ$ZyLeV^yMfne!##u3cZurGV{ObNivap4w^`S|~Hbze=F zCdpynPfvUAd#2rZvpZ{mg&{zKASe+e#S2L{y3}{jSJ6jNC_*7BL=TX_E_N4d#+x3~ zX1b@n_ucyc@;UD;LZ|yZ=P_lnva+%=v$C?(y=0v_$Drs{X)Er+6ne7ZN++^PH*RK^6GPN@$O#e zrT5ck{X}zP$d*Tw7vv2*18VuEq($;r?8J(nHcAd_fgL`;iwUfqzK>!ZnObuBCpE_fe0l@-Cn5*5$}IFleB^B}2c` z$H04Jack%ljZ8_XamlyTTcR~xsi&TvRKtt({S@^(!Xh;OAAj}VR#(Hx7w68U;O$J& zy;Tv7&-#tfv*C@gWk*VSv@!;^CNEw^FKi)DPw?nSopsv4@*TdbgV4<2HeO^<8^9)Z$_=H5!F=HDnqs=((WI(jw)6KMe%JlB z@a0A5pac-I17apnLJcKkj55KZ%_buX&VxKcDCltJld3gs7=foeCUH&R`9@IDW+2#l zN^qBM+7}KSbR{u*8AEOF=J2d38Qbz6AC{*3gb9ad8g6!pj6xM&g0yL8F}(A}*)Yz? zRPqsymqY5xD}Vo#Hw7Cc&7XQQ^H9_1;)$ajge$+&!QUum;g{0O;6o`P2r6&Gd-8b( zMcy?@e}%MYo11!S-x!|5Gf&QqJjTpyO|I`=*cK)e0K(krh5B_oWP9(Q{{-A)d97L5 zzn@TwzO-SCYElI!_5gJN_B z-Gnz@WBcnR9IItC7-@svlXt*4Y>Dv7) z9wIykLEboI9M%`R6DC#6Q2D5kKImzT1mSfIRQ)m@8mHT~7P=&Z2%5dU>DI)c+?kQv z*l2mGZKp4dKz_SEBHZhHMk0#!$vj@vyES-?1@GX~^}UqHs~K5dN||%`+r_-Mcv9m@ zhw=&{j581!i?jVb0~Q5<$K378=ZUkEA*cKMkEBhpcK2P`pYQZSS?iv{%oTvH?& zC!RgY(4NwGFGJDZgl@)teTD=cj}nZRF7dA^Blv0^KSmlJ^E~!0T)dF7mseD@`Y_Kf zY2bu+k!KdA9=5I9u9=FBK9(_05ax-)2)k>0G1S9__=0%MsS~HC|Ac=zkg;>&hmDo` znb*sVyV{PUG6KN2#mYxQfJC#ripHQ=dOR*Lh9((CjRS`LZ}a#;A83<+ZrtgkZH-5~ znRoFlzLuGnu4nGAHO6@~QCcma41R)1pX!Tc>o=NC32YoFQy-vDG>5L{5aEHH=U|F@ zUU$YxpbWhULt<+&qXb&sFhXE}%7aJ-1%F!``yQZ^C(!_2zszks)vLd{IZIql!L#V6 zy6oh`a5X=DgLW_J`3vGi3z4YR-@f~fbJ>lmpP@`{h7-QzQ8Fd&97?>&ecEtVW7e7lmzp{Gkhrc&;eyj1tkN_rqyt{O~ zUYF2X`eT`kj0MgZ=;=TUVV{2Rn=yvi9dqDdju9#SB=`e)h3?9rjHk|4_2`3qCRCVy z(hjumkUn9W#Cc`#VVvEZ(s2V{9iG|(!`-BhV0OT@5GklW6G}h3{4wV+if9A$$+yc&) zVBfyAn7iTVPM*o=n`pc#d_PTgxZC&a!Tkc0AeR~O@}usgSE zD;Wm=uyDvz^?G&i7)E~#Yzv=V_&YuYm%Lzkh+YXl9174Vx%Ni+_Ovi-Zesj)up(S@ z`oI@P9#d}P!MMRMuU?5?hsQ-OUBZSv04*j~O8uyQa}9aIIMDCLiA5d!S(vr+Uw<{c;cwN)1JxoCquz`u zyCO>?jG_EO)oBY^d=7SuM_By5dim;%i6`Y_e0}A$H{)-@WffOHTDcVt&<2m!?HL@& zj&Q^%eEas*(e19qJM6k4R~+4YdJN__Z(i;`J2HhC!oGh-ksEIF_2N*E%hf8^U^I|_-m zZy^8+ymzYC{JnG6k$A4S@$up4?A-aY?L@lXuERH%MT+u_V_EPC2RhDP(azGwT_bnJ zei|Q-l0SE?UTQ1||7B+;`ViL}ztMNj_R%g*BlxSu4!blQoVm-q9Q@|s1Ig>cC+Y(W zHR#Gh_RSVg?=~+6%FOx7o!u<8?|!=g#7f7P*%#5R@^{DIv|GO^AFn@o`t<5fi+a=M z@B}S*lm$&Z`D)>3=4_$Tiw&+F9B59z@@o9zK!)!ttCvo^+~KMR3$b`~yy@ve90%-p zxY(kMT?H>j*UGkork{bE*ZaBiUv*IUt#+-p5Oa4qaM`X?{Cei_`Wk=7+h08X(rVi^ zi|fn$z~lb&Pd_g7O1qz$Q!YgN7D1gO!W;fhp8Ma_wg(L6@hl5~Lgk%{-w0#FXm7j= zLuSYC&iZ`jgLsy9K8)||Y>|&f@?tA%Y=K5J#7~yo* zBZnb_MXMgh3dn(474KmY}Qq^W)vVR{jglD?O+fK?r(nj(}s>4JBDn+yo}WBkPV zl{3J~;cqJc?up5S;bw%nN#bit?bl~9ka(gA^y3Yqpp^1$tiCFHlzh*Xp)I}lv!qd1 z*HhSnUu7)s6z;$F_AWvmRF>sCL*(G34Boe5KM7O=hMDEuc;CAe2@4jCs|-lf2g;oH zy_4Pb8qP%PxgT6r{Nw=7Y02lhsY|T z_hSh5JMC2eMtec(Md|DN7h-tIF^SAVh+_H!;}j|N;Moh~w{8m%?O!@YJ}H%(6578P zhCq5W?OWq~Z~%txl?g6*P|g~hrOcj`Zz(cf(aV82Tu$>c0O*!Uv%IMs^-CIp<`>K* z(%&18Tzr==Wcu`a7KYYv^m@u(K$2J@P{Chx1V^sd0IT++GheMPw1snX%F7M!HS5qtntg%s*hf(HJ9HYnn!l?ApLbJdehFz^km(HM^cldt|H{KNAY3#ZSt zg}2(GFlrZ^7^dJ;HDI(A?jJP?Z74qk4BOn$v34fw>b>qfWE0US+gu50$BewKi`EJjpKb@Tdw%Nb7rsn__^I3QB#Nz(y7SGr^rI0BQ> z2;>S14}rO zODPIFTlCtJcP}FhJn}vuaPpKmcKXx^gA{PB9VI8hatvkxWeiHIFLxzWF*@8$k%51T zr1bjKc6`D;Wd&_8Y6(Lz1HW<^GNpN*@K4Z1XA~@_*E3KF#j}0;V)$gt5`qKGh|6u) z1Vac9?$ddN^K`+%Ep%;-=4>NG{9;JnzLNnt1;psRn;^_!c{}0orEqsOqrRQN+dr$M2&eQxrXBdcS{>rPb4==C?#)wG3w9PsMjR(_>wrdi` zc`BcN<#h1~KaPA;xD%lFXPCAv^xDQl<*f#{k62@S%La2dR>90+?P9Bj)#g7%Cn zIOX|S6Y9tHLQQ=M7leHa8fcL+JVut%+xj^M{@zifZQW%Ez{`#vPSMd9plb{}^lBC( z!j*AF;l&p$iZLP>PYjV1SBoEGl-k7WxwKh)@Inihlvno-V8jkBT+F>LR$o_xlxzO{3K7w4vj)qy9J7x)Y`l=qQS&}Lh8MYh(wW*yhcI#*#TH?Vc9=VVL z7R-!q{m1Kw5&6KOwi;)|x%}yeQ@^dodv@n-6@2E$>y1Ooxb64&mW6>a*hVMt0&a8g z=Ek{skCKTd&^yE3a^NXG$$(8Z;AP9(bMZ#|_kZ)#)z=*gXHm{Lwh&kEWCYu_cn&s4 zFbKda!Jg3#jq4#^#AxK{({@0hvHIEq3p%1aA3h*v_ua;oL-*P>l7WYDLmPNZJ+F<3 z;bKM!JQ5ERMqp=-1iJLr+91IrpTTeVc4gaHHr{6$?ipm&$w-1{^T=1u&gk4CJf4T1 zW(Qn-;vHVfxB=mM9=~&ip7Gpw-+W~ZFgz1~{lO=LD}0z?0iSR9OnvCgnB3Gj7Y5;R z1~qfA`YecUie|6o8Mpu7(Y(km=ZStK`b*)@U~x0P^@E@M?40B2kGbnZFTzIqVNxQbTpmO}> zSF7jTc(j=l4<0VwdHwUW{*=bSnnI3g4;f5O8T`byHRyVJ709<__^QMFc zVLN$J;yw5=B=SrinJ8L~%ocvw1usq+jj4%QouNhB#im9tPva@P@2`gob1xpl8Oj(l zr;UKn*dx!OA@d}Duyc2FOl^H0ztPq`8H`SxdbMr&?@t@PDXfKs<(rKgABQS_we{=O zsnaiyK6X2qWlshbvZ1gUS2`GR->a|H4|(He$lQJ<<7u$-vfa0BUq-@dTqqz{N|`0<1AU zyDr1i-P+?oSZ4&_g~MU-+4_5P@N9lzTYP~RX^TDK3*V!AjGP+o@Lk3tF@VWJJh?4| zkar*7Z9$@X!Fu4}u?&12BpdA+(B^A$Ir)g5vgGCQ%8dl9n;TnS#D9`+yQhQURcH%u ztec{b#!~g8b!}p_q}m=je7rIiT@B6|+{s2lve-p5^K0+PxqY>QCm&fTw^yl2TQ_2V0FzZH`?&~C=_=RQsLyEU<> zYkYY)Nfi1j9=#`T+baVPC!@|Sh_~InT_}rWV2c8FNgQb*%3|18@h!ZaVcKEoy!&~D z<4exaz)Kiq$vLmR@#ciT`~34yr(bqtM8xCpBRoOaq%#9Tk~ z*>77IJzp4%x8oVbua4%-tDH}irLN!p>ZfCb7yH`Zn{T}{vd8`AU~{_pSX+5Y8w)<@ zh#Y`lS@0(N=yU1z?rjHO@RM;lAqZWwjGq_U&Gbq;5A7t)NSyq;xAbTk&m$AfyxQUc z9zhlYr(Hdq749@IIAGR-0NLB(yF~*$WAkQbP1ps;m1QhymDwCw-!hI%$7l;#<_LU+ z%;?_2p}B|yiJcg1E9AEq8XrQ8j9yZ^(XB&-Po6w6dLW~-uv6%07LnHTJ{+NSJSQ@2 z-py+~df)Q2U2Tl$;SW!h&h8~+##k^~Uh`rLE_dxj zYu>g%*ycSZvP@V zvt5;T(+doZ5I(nEQ@0{my|8{J!#vjudE4Mz1kk~9=@cIQ6f2gWM~n`^E4Kp1d%CAf z>An>*uV-Q&jH-O#=`DtiD0=kWXMmHYCru1d+EQF~c%>c#f-u_8)Jb5RwoTF=dNJvg zT^%uJu&B0+kIFvjuI=s>oJOsQCqo;uHhCgdk>%g zulJ#86t_hN03=JsU}ubP_Da zlJZ9Ai!rxO9i%S;;gYZRgWv46Zto~?UBjnGixE<}l{F9G+hxy;Ol5XGDWZe+Bq@zH;Al08bUzx3lPnZ z!8u@pr_T_AzF0m3eYC)HeLfYT3G<1ze2|g}dT=##pg{i@xJdoo_F$d4Y#WzLKXzZC z-_fze(*T+J%)Pq0aP8B3YNtH^kk&K?&0|6UU0?;9jMBqjBWT_&?es>MK+j-7du0r7 zi8htfTTD^qQ()zOcirPYgOX{K^hzvsLJ|0sKCt$UHol|lNvGT?+1uq}?w2x_nmkuV z(A9Uwt~T=~Ip1MO|L%jYo6Is|^zLyyV_REvDF+Hu${3NmLO%pSo>I4)z$q}chg1CY z;n1ygcwKaFA|=;33=gQt$U&wK)-jnYe?#z`Ex{#l&>HVEb;1<`k4Y6I!*l8v9ytS+m!Isse0~B@Rosp0CC7_Hw!U*F6VcYz{5JCZbS{!wCZSP<} z3KyI%<%hHI0=^!V20zZ=z81Q*kwJ{XW5Lp(EhxfCF!o>7(@uC8+yxu5)VG6U<=5^l zc@{9(FoMIoI;0|Aphlm|1Ven?mzs$t6%>1 zx2rF{{xX3rgK0vQLwH`z6GHifcpd$`z9andTw=gEcKEQ$f>EfB49D>X%Cxw}gv5g> zCbn*0`}|DrGYBQ<@!b9UQuG57CKxA zBD89+Py-ZghB`*NgDIqY!x2U6()q6vH1m*8Su=+SQ*t00ZGj#i-^}RWV6r7u8j2QUg)-pSGe$1T z#_V!1zhQRzs&~p;ytB(tE{5&iAI`@36inI*4me|wQ2*=( zC?UmCZz2wJi1};oOD~^TzvJ~-pD@HxsDuqWfBt-m+vOB4USKy?zk2`Ia~SNM!dATd z=39BKF8MBAxm--ui!){{z)%P;Uix&5I}C1@uUyHuDbM%tz1lJ1vnmb#;Szrk%bAxJLq9{HcJipl9rW+B zFYOxIm(e99w&!-jyz#O_Mfa|L`QgVipG+SI*NjzjPvychZ@7Q`=l?Wg$G5AW{`$8= zL%2A@IIn4KInecuLI~hXcYX$U#azCgamGAN-nySxF#aIq11~`# zBOZ(MoB@rYhF9J2+n}}ZakP*W`V?O@KQcNoIODx&YBNW!V6@;0|M=oJXFJ$7+~2Jo z`ub1*`M+BI@cTdNyb)f(%`JK6(v#4#y^>T(mJgSm2IDb23eEQ-0 z6VLcZzyF8jZLTZsx_O6Zrh}m!xMt^FLm^x&J7;R{$a)d@%yaAr77G}90(t0KzFoB& zf7G5AA`&~S%;P6domhQ#=8FuLFOE_E><7PI_+)M3(K`6wzThQJJ!Eh)!}(wThkqO2 z$`iEt&Tax}yRE&+6!D%P{`mJ>bbfjDv%meD)s_0}M*U=gbYwfYY#^`|eA>e9fBWnI zoKgMK>fij2|I_L({`{Y&Lx(fAI`G*dA$W&3N6*ibL-A8|y0@6!FSeT^ zmKv<)^9)7d)-U6raed_QOVe-i8+Y^RJs!m=V_(MPLkEvcUz!Jmg)yU`bp~+V`@RVaii!=4x^+I-l4-Uwu#u+bV zGX1vduzLrcd5#9jn9>RpWC7EdH<#~-cRD0>d+)Br-_aaz`9tVDzNZX4_Hr=Td4)y} zHaC!M@D?GNgsR|8jYrx+qx}{v&V2UKI831zdcppk#qmeW?&HJv?iXJ@+0LA(Ei*p{ z$AcD(7*OGwbAlZW_cDI&Z;n~od1>ZGuzq#sv*?i&{%m#PWS+a>hhAYZ|BKH)TwT6& zZuQ!mKPq2gvZCQ@*9r@otbx!zc>lK}Gm!Z%B+H@?hm@1&@gw&ZO)j-4D>RT0O&898 zv!0XVZ^_}tiSQj4T3FDZ@Ib$%zgi4ucC*Oz=)vvPm!JOq>dYs__H$=`N@Hb2!S^4QjJ z+^hP7{*m!C{9bMR(9;=3XV+|0as1ecJl9{$0piBcw0HRA@SA7@?HVI!#j|5aTdauo z$f5z%j-tz>&pKCQIcFd`Y&@V;~x0LV)oQ#ZDOq!jt=3@glN48ggjuwW}BN z-oBDPakOXgzw+uYI*+*%ZCOx#RQODDinf!luU?He1n1Qjuo;NA)z>`tjUS(FyrNsc z;xWGPfM}+1JJ+BJUO2uMk2Kze??L+(8^rm>Gx2pio*Zkj;O5P|sG}VV5auEw9G}L^ ze6F-3=~71LWkKZCI&5)XxZa}Z@e?P?Cv=Pj!Qy|1&u2}rm#=mfNc5!dbguDbe%RNp zt$mGgbuaP`?`{Tsi?XY5wSRC<*02!tLOTcB#x)-HT~&8HASO@Fu)Bxs_}M2POvo1X zkk9PazSs^@i);ePg z8{v#W%|jDM|CUaPfCI#5?wi6-^Mfh`_>2;c{*9iLJ9Q7#80HY%q*@r{ zdN3F?^K^YN2Iz=u@~N@sW2B1^=2LZu4`W)&U+O`^8)XG#g39t<+J#|DURg84_Q>0*YSNy|{NRHnc# zr{d}{kxpEX%Bl`4P~Zl~f{T(4lPQyOWStLy+?6h3nNPP1uGHnG$)@%%ce)ru>)7Ciia_|86le1s-z**0LFzt!aR_N3% zk0QPEw-}A}L3hgguI~qufsf~zPf(X8dR&OG>%sU4vy7TkUxiOUh3lzFdGM?H{`sAB zeY*z>82nCH8FiqQF;evluEAqtLb{E<)lLH!vAFKtV!&VEm~y?-pI%pvT^>_=Me=IL z{053OjDs%L)q+H#~Q`Ivi)DeAa|8 zZL7X90yp89s2PY%@L;9*@a|y@*G?0yRbAH)(AFcyI}>qf)XA7zrBff=^f!5Gt32xS zerprt7;XnV+egPZUB1l>+NJ63 z)W^_8ScIGTRL^{RFypct{YfxGPv|8C3|^#z3*I-{^jeA>!Ro#D-yeaJv451olpi~7 zc$1(3TtkDs9Ud3{haQa!IM*gAmEHZY#=z>$JQHrk2anU{UEB7o{>4B0ixy+<=Jm2| zVn^D(cZKRKVVsghk-wNX7e(>s zzf5>fP#tf)l(F;YzaG5kAKniLa)!2=MRNxiGxNBXi5@K}ye~ z#*spmEeyzm2O0A+7VX|=!H=Q2Q|1#2ZKEV}y_xZw7a`h1+IX?8)DElMv3=PJ&ye-( z`MuSd&p#dESLlG@k-5M`Q zj*R^3Jay{T1nvtn{>Q+Q^25Ue4KUaZh14FnWuOy3aD@M0wFTOaANSk$=QUe8hYInQ z7pE5e?m5_&?BKk5?(5ZyCthwG*%n_ojRbDS5xCPY_|$4gJU-Zj%-3J=VYkkm7xF?X zt-ezqMc;x2+7(mW0@(&_+d4`C#eWuB&p46tq7FRQ0*$!5Xm+FDcr@>iF7?kCR!cWL zH7M}K#Td};1M!vnM^VC)c+D-I+l{s8hbR3wu7p<}m*aIA9vQj@mf%KD;I>%f*F0vW zZs@s7e3@}WSeJ(G%pLgC7_sq@kixqsWKTK^R{GIGGQ*j1P}dGmRHk;A>v-4RZJ{7Q zbm=T4;BO4i4DRry?edrhluv;*1}USI?b(?UPQ)EI_P&kgguVEaKl_+V~|B)Zjg?SvT@X6?Lx3U>=7&qWm8?@WvtKAQ9V{Csn?0U zZnSWA_x_ooVKDI;z1SErw_0d0QSfX$ak8B{Cr+(C%ouO%ow@bpv=2UJXH!Xempg}L z7BZrpX;gAV&hFLgZ@tr8a(MdE z!Y%%Uc8x3ZFr7*tgv_BC2J3Cn;oc1K(t_1ut1a9<594KU$_T>w<;THp{b*usvj@{AVe&V9A|;o(0VCSc z+gdM#NyBYIuATexO#IRFRymywJKB)d>2ShgbCVboI_Vg;)C7wMjE}6iBVy20>nq zpRf5XW9qJqreCc7_h0>DJVM`n_s7k3yOW152b8#!{p-GDTH4%-gh?F?SFK*U4nUcHU{<|e|hFUu;UxLYUY6Y zBXXOv@D@;!wRx@MF%r_r%vt{KUKKywLUnplZL`R!PxSS@_^B-LAv6u02Q6E0zuI^| z)($iJ#q|t^b}4e0IM^Y6(%m7a#YHY=7!-HenJc8v(TVg|^^LqyJsf53-@TFXu|=X_ z7@cKcU2uBu-mP#DO;*>g-3O9w3+E6{%(t^kxpH55C7RgV;fyU}9jp%>NNcV*SR8+$ zP_AV(WnAR>itfm?&J(zL_0sGN&{qyp+_rUZ;a9#HK5G#}yz)0+e=GU;z!+^Wb+!yc zES_T#hU{bU4~}U`AAk7ELb?1l1N!sgLch~l zI;V4ldXUjxc&oyHL^mIQ`0J5dUU}o4;bG1=AWwagjA$1HXBFPc=ze=L-lePUAPvvb zp=}GD+{V;zOb7T&k+`kj7#ppm*`GpXf$o1=6d+EyF)&U^%f`? zhw&!7o=#wK!}a0Dz8#M@#mR0wFo<8N50MRS+_G_izDwqS&IpWntx6m zKRrC(ZVrpsRX=SV{V~2z*N3kM>F40R+q`BWPsk^*;EOlwQz2LF5;L#p-<{>LXkw1z zjeo!VWJJz97tXPHMDf^CeShKn*BPO2MmylT04{SB=OXmzoC;&wJZ}!i6YPF)5H5=+a+R-$5+v0 znpvEq#xGu@(>Hw)R4A`O<}SpK@!0Hv)J7j(7&*ctJjyf1zmXG4-+e4DtoEi3nRF4b zNAp#~mYaD~U6hh_olCXlueRV*(v)ArFr?*L`T(vQ45AgFS|}v}1tpodjU%cdjM&&hz;{ZBe&p>hWIRPfNR@UWpJJws z>Rr;w1wS+CgJBX?aKGLWR+hBux(1G}Atwf*F^f^Ca)N0<%6Q>3aC+b`G@v~faIwy~ zq?tN<60R+TEbxG>PGw7;o&$3}G5$%xSfm|8SK8Ry z`q$Vpo=n!-ytJmW>T7ojlGN9t%=HMqbp#yBA(^tn_k@j^^tDSR@UzD6l-KrljByIw z7fv6_Ke5YjaEzUVAE`caFbTWX^ThjEox#R7hOt@aRllT=pE zn&9p=iB}?wRi}D_w|8BIqQ%&Mzg2Sv=fyb0qntM`gVNG_WhzfzAx&0{)EWW4HKj~> z`a+#U7xmc-+pDoQGWIEEjPJ5y3*9dQHjv<-cckrYdBy|NI0|<{V~D{HuSQmndbi-2 zfr62$D(XYVstR|AWw0{9F@SAq3+pm2Dl7bH?|1Z5y$s0mjmpC9U3++|CD4Z_Tc;_=yZ1V% zGo`xS7eaDq7a^247#bq9Jt+Mg{Mz`D*B1TVmHX0>gF~YhHiSvonQ*-`L5|`;Sk+%! zgY9`9CHoKL$zFTZaV!rwiVww=hq{>ZJCv8Q$n#2E&8PL74_;6!2A>RiXoXNoFl8j! zRXMz#7tO{ocK6qa272NV&V%fkogtb~rE8;M9Nk*^?Ad-c;fgZZ04E z;h9@Aj!Za=2!p``FJXX~c{-YL@Lz+ncXx&k(Hd{3=i!03nEt>=4(vZR27EZ$k@7DT zf#wKGkJ{nChKJSkv3G}uj^ZeeR1$1L>F~0Ce zhx6(a-DUhFUDQ_@>w?!Ipfk+Z@9t-`rF@H}t&hP*d71@}#_dKKdY&>bud?yPst#bg;QwVRR!kK!X6D@B~F&x*m`_||u}hcnW^ zGc~+ykWeKQDpeK`6v|5f^5PH!w!ZzF*#$6lrTye{D!Q7cju0HbU3=5a&253 zOkouZcvH0hRR#@wXJ@h1jde7p6VaYOWA}x|{(t!EzwMCYePiUcO&^UZ>srdEc92&X z{Vt_=-UEOAghpOGa=1laGKo1JeLrn+tJunIdA`{eXt7)I5o7YvJU_U`G>%DmW9U9 zf9IXIS08`;af*4dcB2=9q(#iH^UO4-Ic#yfn)4KV6m8zU_h|LjyFZ#f!jm4}F62$L zH*;R;wFB;b=2`9V5jNt*47mGiD;W&`VXV28C*jBxjm49BhvJ_ffAmqt#M`T*$4`yS zcJch#jB&@>Ns|}&$R%SI zqv^t|e0k>6G02=gbvmQixz&v;8Oe%Ie(d=7YkOPuGoW6{ndJD%Q;nsz!PoxBk8dpe zxxT5g;Tu6ggy?(W0~21N@eu;bdvbN|i;w0kjQuV4{NzvmthsN!pg*vaRWjbxC&q@h z@anTri}vm|Z(VJkVob7VeW>~5-QWL56GxW3$?$dWUZK8P1U>fB>3)|u({mmMxv+-N zm3YM0fEmvYo5e%OmgHD;NS3f8$auql;R%1o1Fv+D-`;3v_}8-*)VIbXlFx*zVyqj^ zSQ+To;yaI9yz*Xjd+pk{Ex0^bz4hkX8K{SQ5n-e|#qv5$ajOzzrMIHeXA==;x; zMeXLYQ|-kT8SLN~SulB)(?OYrf8hNN5CjiTOriG}*6;}PwYk7JGcL@#46n~ye1fA5 zd1Fua=)inrK}f81ePH(hzT;3!Jk^5Omgtpy_}Z%{R=?pb9zWe*9(cUp!Vnw^`LbQ! z+5qO=#rYPBX9>v^uSgd|KQo4E0OOjzWiY-UT`(RVI&>`hOv{L-_r=4;)3c>M?8TiQh@ zq}bJ~-%fkCho_Uldp{Y(*+LHrsiN)hcl^bZBX9EXr#IZnz)Nm@_j^BxZ@e_}45P1p zwKI>=`FcAE&7Wj{yM4rUKJ)pebRiAPGcH`byx4?LNtFc@dzzg2yQk{uba z_tyq;5IW_Y@l8fd+{fH0?8dWnQU{INF-2Y%YKE+1XC;`$Xctlme{t}o`AUdAb=zLllz@XaAU^Ei}7IwaB?CdCx2_V`P+^p`pcdcDD>?(XV6oVJ%sk*O!nvz zT`k9%`p7s(5Edl;Ub_-~e{EA31XW{eJC!G}C-EL%K(e5=2B!tv%xi&$nXi`aIQ&YTjz z5u(K0d-Lk0(GwV`!Zs4zUhD`gYI3;*&R?ggZ0-0!Sn)G{e& z9+bHcMlm>$x4$WC5=_!b^Iak8@sV_LY3TB7U{jVVYMVAHW(<5Pox63rri`8i`#cA0 zxSn<;OiUROKX|m_J4mLT8+}(<(_V#@LfVopxRS3=Wx>(^3F^Bkf*`yEc*rfLn`y$I%VmA1?yKrPoxne$#>YM3-WlfU1Md$MQmC%f_n1__tX zLVE+k)FIuJ*CaX*5M6oXT<}+0!|Br8o_jxSa$V)q{ezszJ_&mcf77-y4rp~lXe-Sc z?tXx-uSz@<-jr9R@-4}_P+iIgp=Vxfyh9sfyzjTf(#!u{f6QaJ?6oRNJ@nM?0tgt< zfVTQ4Md=cFngIv4H4w}5rJ)0durJ_dKx-;r%AU-N0#HrWMUjKku7B5{ZUd$qGhPO$ z9)Vvv_jrZ$-7iqD6M3$YgCFf(<78dF5{8c90IbTKwz*VBpBYcl+D4zu<8|Nl+H)W6 z0>|5LytR6o(d5$gwvVP@Sor(vpM5YB3;ZcRZ*29RpP- zX-cnt_?do>HVFQs7}wrgYsG)W_Yzw5W^{a-z@SbZ)&acGN%@|q@EZrS=v&=e8bEU$ zj3!-18OleQ!heGK8o#|56oCssGrsD}EiFV)Ou&!)3c1hKlibkZ<1 zb)6Aq!dJwf;1)09RZ7@_oQ-)$nfj?79;HqgD2F`hdkQxc6Ve_BgSeL6n#k*;(uX(i zeWbU2aG5r`MpIJve{kTv#64f=rcAZTRz0*oNzs0f88OfasO%!DfmP60=$9dE*-qNq zX;+N}eLsvK0itxpLshAX~%BYm;UhpW*wFDl<7vp1TR5WV)?zE%w2BImT z>voIe4MVtj=dHI^ube!Qk!IV(>m)>e(P1qV9_0#saWrF_`%@W9cVa)xl-PiNmyb#~tGx81C=nCC3|;0?N>| zt27KxOZbiFDX}M3-)88)nhOZV^&SkjyC9z?UZ12 zUU^dw7?nE&!K2(FO>l-+>A)nu=CusP`oxyn?fX0QH5_T5#T*L(aAKiT8%s6@P-P9D z>B)ls#Srx5@$EQ4#@*6dz&6L+y?1>SX$A|%hbPSg=06_hyoWYti093I>*j@w7rSPT z{pj;grwzQpcv{`!QJqo59D#@63ycVM`cP;O=Q+p-<~h&1FTehB_0i{_q-fqPy*u_cu9sM!cs)EG#0%mv6mN<$ zdJ?Wi9tcH)-~Zk_s~uZ@(A@X`bbi6b;V07RBV(GOz&Nz6`*HBS`~B~)P8X~5uYdZB z3?_$G-)2m+NGB$^`3aBuQAW1+Klxmcacs*m&&HCY3IGj*DrD2GGztbt;3?RJD^$&~*4`B?UJBt@+ZsdUa6K|!E z!ZldU9q)hm>FUJM!{MZ@|IwRv3T;HrV#s>=-~&Q4jw&z`t><*^3KiM;rH$= zisA3Z@1_iJJnF!1>@6M6HoGk3?Z{9cu;C;FTJ5!n&B(^tMq3US2E{oI`0J6E7^e5E zzB%_r?;11FeuJwx$F{gPKEfk8LREGqICRVclXCEk(QCrHvHYOCn;$clR(E0L%<&eR zuV=jA)yOa_G{erk=>O;c<$oQT7P914yQsz(R2>#dEV3OdT-NZ|>YT-l_|?QcudX>C ztNIxD(4n+neDvPx@BZ7rFQmiCp};+^N0mqvC{2+x|E@r zVZ#FOkN)5v=S6>TjFdcG@k+?%tvrk7U6ZM_d6Bz{yWY6CkN@>LvHYVYfTnO*RncY@s#yo6J!_v$}aBplgWHD+>&w+yn`2LK&$`-?U z7FCOhY=;8_{%&WQgbQVvyM-~~C2PQ#gN0ea1KJ=TBv2fpw)gQ-~E+dJ43+(Px>G2q}uj=!NfbJEjo$#Bs_jT$2|Bf5F<{Q0jZ zrhM7!Pdk)QMaJVt3!lcEwg=0><|3g-Zgps^g`S(Y3i}Zs=X_v}8oPnHwsC6?J$UeF z3p=MW&K2wW$+hV-#@pS|4zJ_kv1qq(3zu8BSODS;Zx_zZ)!DB<8-B5K45M_g2Q$XV zm)c71+n2Ywy3x$xF#yKX<6C48=fvp0^XJaiB8!s3qolYj%2%i(fP&mU#vd+xbQ>4LC3S0u_qozF2N6O)W;U9ENt7&NQSDl zGw(gCoP&pjcZ#=1r{;gV&^Rub*N%7SrNv8&t|Kc4@55x?Z!gFO?ltlNp zq8mKZoNbp9WZ+-Mtwl8H&9N3f@Un-Eq3ex3`Wm`hg2iz<4-l1bhhL-SGyT;})Pkbqz;BLDKzDRyB&e5cHIuLhH3qP|nFF5Jc z<~@d5WA*IUpSL)0yq&yTS1-2U;LynzlZ%}pv$>t`f`hYx(UbKT}xR5~K zUWr%s3{J)IrgPFi)Nfaq&)pVt9zHlQiy-3&6YjMOEEl5(XDu+`n-k#oLAXB{?<2F2 zpXd;DAP3=+7%VVZR3o3^aU38noVy+ji`*+T%VPgq9IF`q9Z)+v*~)L>1P|KXj!?A! zynfdQ=n_$y=iv97X!ke2_~q)Yx86=3JF@!ZO;cf6fx)@q|Sh-sW-9%Vm?d3zeKbiKr6b{#$UQ-Nu4DM$r7xe-3{)MwzFCw`2$~|+m^0sE~bv<$ws!xv94#SQhT&yY1=e@-Dqk3O=TM? z6z9_mU@XSx`m6R*E~fcj!ttc7eDDn|tznUG(z@@j`aq71(ApffIe7D4S<+NSaJr9T zJYARDl@3YBn32@;rO5gfeV2C`*rm%Rq^v0wNU}~NWa>tr&E-0ervnh6B=(>`$^81v zV-KR2=wp(t+cr4ez)K)LoAS#Ld1$B!b^5rzUJ9(<>B|x;e_&fYzf`_dxd1UJnYw+g z0fRtg4sig9Kn}Ms7Jdf78^EheyOydpzL)!Tdp0Uju_`K8X+HXIuH8<*ta*dykWgJq zTKHBEIMvcu{jEq3rj%)OZ_q(>Bh9=i4;UA|QOYLGTdf>!%UkSL%`=0 zFoMh!59cvRlN^C;!ywMc1UKVR+xsyv)J6;#jSzy8wY#Y;g{k4Ove0QGcMBd71~7c^ zf;9oPE+22Kr`7qScaNU(Fs$vtrC^gBVVP0KHcpehILku2C=XgNe)SwGtc2B6UU9bC zT9xNLU>n|NpMN&_bm8N^UaJW4$M3cHKXq2F~H-SoM0!nrARb{U81tUYAH2My2C9Dt@2_h9S z?Z$bh?fNPn$B?RTzy?oc9#r=t2p3~6fSJCh?8@N@%{z12&~M!m&MT|DGQ-z+_tdf4 zyU>+BPZ5Ck>OtmpNE0=jcGI)vJdX2jwcVk>H3)~Qt9%*Yz2ak5fX$E-r z_>%HX^3MyqLs;QSd8<6NaOgt+gNZWhBfb0av~l0KrNC;=-~l{kLa$cv2_qC<_*jJc z@KoKSa8~~uD5J#E>jPs|%S9F2fsFI58ApPjr{t(-{!@4c)TnwUAdaaWS+JO zhrE$reWlor2}u^XzW?qIMtPuQI#|-7ZMG@;yxq1?c>DCTPsZ4CqH!c}?TebYNKcHiOaK6cdI{3f;xLUQV5CAtEIQO*2eB zOJKip`PFFia(XrtqKI`L>h7h;M&ZXtz1;$dyw(XN2SBUgB$JZ4fB>;llILUy5_D0662 z9WQK-hT{npHSKLAjlzhRAIRu42OmZg6m7;u@gyg7N4Z-(yl&&0VgWzQ+s4@xby1@bc{bMiV<3FwAk1h9%m&Nwz1}ex@C>K)jnY#Z22gZr zCxwx$y*&Kv641X4SQ}%|)^LZ-X(O&^4A{<2feRrcQi&@MD6}Ik=Xl#SAHnyw>P5#l zZ?%Ifqsk8b*I3Zc!-s04`J4e7FQRBMgyIi>^Pmh7 z_rLj7T$7kwQm=J$p3 z86hrJ9hrdEKB%d(A7)wHJ9ZONZa{2P=L_22oW$?69=udwC z4`*Ra+OrqGZK`;dLH$JaJa7B@--j2AD|sp&iGQ^f{QiS_G+Z%MocZFj@vs|XcCzBW zgN*&f>~6lY(`Dj02O%Tv(bfPp82>?rzA3JZ05^}&0;UY#mT$mv&T=68mEWh(zk{Ee|xIIEFS8aKw) zELt{p8AA8Q_tC_R`{;`N@O6feT|4h|klx?t*_UB0-h1}TGx4XwPON1(V;k?3@2*AM zo*Uylg2_#fZHXT|*u9n!GlTR`-~aXM#Oc?<<^C2}is{^V+>_B_464zsc^XadHUy(K zFy4?mEKD+X@op9?eNTKAjq&(4p2!xwx$QD!;O8JAKf@apOKW>iOx%$=5|E7hQ4y%rbZ14{} zb8~GP-9DayS_sAykiOm@zCuCID8??NP>ty?? z#?Rh>!;f?u(w!o{@m9?YZhbh8bXC)G*!Z-5hQqB}Ioh;H)vgPDMZT~|XxyP`^V&;!q%&;tq~=BLK+gRw z(B5iZw<~LRJi^#w1i#RNBN}oLEzjb!UwqX&bGEkbpSckY#QBr4t~F=lC>b&vlMJoj zhKo;&*=`KJ`tqv-gD@Isb$fJgXQXjrQ8H3b2H!c0RTfCl-5Be`v^l|_F(&-gy@$(U zfP>l5f_a3a(4ma*2P%u)?a$mpn-r#sr}O#dR||?0-#!?~Iq=E>gFfq=tUWCj;DdLX zd(nbWR}8@LO(tRVja)|Fg&XoT`V_|PYDREz_MIC;4nBYWtJV1y*uTAaZg{hu1LUf` z2aYB?9vd1CT?@aid<(LNo6C+SgDwk|4^~HVWs~Y#H0eVI(uo1LS1366fuhe+QQtl<(Irvf1r+LR|c|U5{Op zD=fI#NuW*(B5VxCCuB6}Yus zjO0S&+}ueQ!q?0v^b6%2e&P7g`?Z{u$j!@Q7Dp9c?{@#%1vj4LP`n<|CB!%w6?oVn zc!FCPtw&2U*9Lt#4 z3Km!}6J_|4U)k#t2VU0$h9u>cXzDN7QclX}a>Z?55$Rb5N{n&I^@aJceLR#)(#c=f9XZoHdh3%rvT?mU|t*OhHxhx54}1oj#% z>vUr<9T+xn*o)<5`KHn;`v1Y`y>C&nHw|^aD0^sM)%u13&F^?)E7x;v=<(F=JskN=eJ-?F(!NWp^!5LB zo^BiX;FNXBQD*f{KG_=3-z|Qui`Pf!S6Ok(exyd{?#u6IYU|A)tS8^$zxK`bI_312 zm+OIb0hH&>KmXHzRJS}|{nz(D&2uxw zJqEp_He#$|kP?=GKBJ+@lR!TTq~{iH&`ar;3RM-1P2%%`!}tP0T)PPfym-OqaL5<7 z87B$(@LEdto?^*#h= ze@$P}AmhtGS2nAyrE9VooR!xHoC1bL%#tTwB6tu%T0A0WdfbQp2u;d4?Xfq4WbsI2bZR zPQ9`~ICb~F+whj6U8on6GohlaQ>NIE@TrYstXfDl^eyIPk&|s@83tMViukZw_={1%rElsRK~vD?%FS*c^2}LF()` z9a@)>NE*rmf%aehi+{N~bLR6rQohY#+yN~ajc*t3fU1k%M@Z5d;{!*~_lztbVeF@UEG{0Z7TW+{yi<83!G(v7hgzegB-(4vq| zynA?>Q~d4*gDviiBo+^GB+3vR`9+VEJvg#;S6smvON{}Fm6+DNvtBwA0sUR6Jc+(|dH?EAeThYbr48Z53$;a`ZogGZ34OQHtYhIx7GrR|n zz>^tLXWZdUm1&DC&t0+WeJm<1p6cPO!wMCo55|BUUt_>g_N@%|3~lBh^BLpNwQy#P zjj<~Hk4KSrDcp?N&r5?=84oR>+)KGbr?%#555;$s(cWnn;%;!Wl!e04mT zD6O>{jXo^iy?(L?Q^vv5-SX-e#t2=g4aOh1D4X!WvshpA^x<{K;ORZOV8k`7ct(Tq zMqW&w85g4fW^fX}RGDbrZj*^0n&NZji!WwAWV{y|39kSvFQF=)g;vja&+XqQgg{DS zX}>!A^(d0Y{un}2==Y}Zo4YVA9?*FS>0Aij*ZPS94L*x%K6a4U_AIpA)8;Zb!>cLC z3}VK_wRni#CUg%Ah4{+WLdm@S+AFKiGRp9L#KRamO*Eq94%%jw=lVDp$w!Qk!h;ASvMWzPi-LP@(gREDCSJ05K{uBxv98{fy${ zVH#hwV8j5XEsMc5gGfA^(Nf%QyvP_oe(Lr3ZE?wOUym+7%UHk@k;gA@?wgJGLf5>0 zF!Btc;S+7yV();ndyT_G$4+LzHLjbV>Vx?W&Lv!tPz{T*hLO7jBU9k1I?$Ysrto{a zEZ__;Ge6COL$qr=?hW_CJ~YD*+(L6*{q|7@-d!8RmA3H2$1ms%_>A(Dqi=l3Yu-O> zSERZvD(-8Zf9HE|uRiXUzxc~Ba+*WYrZ6euG8>x~Za3X2j6;0_PT^Me zHg8%OzMLm82OA+G_V3-@oLtD77Kj-A_s56+{-=MnIvYSH<^! zEpO6z_J&Pm7KXzkPiBW3E=Ey{#flBm>Yas{`tM;L<7kqeKI11kc^ds0dvuBkFDPR$ zk9^$k)*X8akMr($nLlYRHFhk19%*Ni-5m^ecH^Nx?Hi+DG`VVrApOeX3|`@y0~H*Z z-?bHT9ya&bjk32z2>B1xUTr4NV~_hWK1(yj}ub;B9{K)r%vU#xCqoE6H0E@)@#EX0Su ztlBtd7Q{&@}qx)N60V|6ACoEk_S!@J$!HpUiJwp;Is%Tf{T3>})Zf4q@(J zYy+HJo}^R5VLMji2b-Et&1K{iT*cT$FLsQfAq#2~k|i2=P#--j9bKnar3LCd z|DneS&Gslybu{LX=dl??V8$lHz4GoP-+xo6%gf0+Cy$?+`F?LW-cu-?%Z0AHeT~<@ zT{neV47b8b32haU*E~FK*!bBI9}PkAtn|?4TX4|J@u|JhD?5Vukc(ow4Yy6<|J;-XCJ zmFbwCFYn4rsjm!qyqu!l*Lh1?^_0TvDX-+~dJPoiW8jEEv3`^>M&52H)HBJ;t*Yf~ z+SlFGvs9oy<$*~7(yG|C8)@f9i~Cfb`FgpT^z3Uv9(=!hIAwaeQMaQX{q>cOomkOZfZ4ZgvYuy2BVrR|X?j_hB3_&MbvL2G;PNm;t>G2RzX7N;sB$+-ksMxQ5}*L+H` z!OgphjRf2nxRrVak%7BLZCt}ELj-2bSTOIaXfe1fc^LEFe)ZKR;k|i1U92p9v+&qa zu$%5yA95{zo;0HZa z<`_)ti{X3y@)Vo4mY2{^_^+VeCy3V0i2+$X=tEkI9lQ**(KsyWd>k-2#{2TmJO}1* zj}|m?ypa+vmhq%~UUK2;i58-5?J4aHgYebm@Wt?K1Wve;O49!Eu3y(ZxoexaoZ~58 z{WFU3qn_2Q5yp6jJ}ShANuQ85AMg7M%<`8socVZMA47j%-+1i#rQmzF@y5|*(aY&yZ2-$<1xwWjc5Cz1afgpw`|F~ zu)*-c3mIg>g?P-jZWVr^`g|U?NMKwTKSKlksz@8)4=r1a-j|UI?QDB753TBU7}qG3 zDSE>F%x;Qh=q)s@n94H#8DwwPhiFBd%~GNqcq1DGlYXOc;MI5&&cY~tIQnc=v?X{?3l7HXcs%DFd^!4{_)zBm==c9%wXH=U_z{XFfCf(&+ZDkeKuP79bufke zM?d+4=5e!QlOb^cp@zZs;Jt+BFsB46F8?G$P8VV?`9QTk)Y_N|^r2M;nVUTXn|mz=t; zUio(TP$O#i(yMR08r^4f7@m~(bz_RR*o|W2-nswH&~{aiAw^tpH2bR$K8m04?o6%; zZ;ZQQdkYojnYo%59{A9rK0kiqWaIPN)WeJaXtem%dF4OOaMBKf;bAYV#EfN#R~Ej) z@OUBm;(d-*Er|Gh&|#$d3m*`N{(K9Vzj^Qd`eoDV$3OZ}VK2VS(0Ow8XMg_BS08@x z-t_U2aD(47u#tlp8W{lbYsOTxZ;s<_DemsIfnP_7fXAEk?J1bLtv&dR%WoQq&gA;?_IBSN@Ahs;* z-Y>6lER@f&7KER~E1vD`FW%}9N5-#jE_^wCbfZ4spCQv6aiDVde3C>6H+5`2^fBJX-b{0>C{IHP8*#Gf|@2$S~!{1xIc>GjtxA4=B z6Z1~j!cH+z?myV#aXb-U<)O%s_~3z%AQ`n9lV4ppH@ku?45D{)zwtjhX>GijQEqd> zAp;1boH=C1LJOKBU(|+u!6)|kM<4tqkIBw3C_I#duJ<&D7%Su@3m+GqWl|Y-sfY`0 zq495j{?pah9e`+&1FtyLpPeYXGn${sIR5UNEk3mH;y~2@`qTe32K^Ix?m8pjl~-Sn z4|nKeA;_+ESoSY}{_`n&Umm&UaCE%AIxHN`Zb^NajK4kpvnxZNFf<1a9$S6yM}IIf zm#_tl<2=NLpdp}2M?caT+}Dk;cS>Dy^%3OcJJ}H1oW=A?N0t@xD0>z%%SS0PcT4| zJ2=eXk?>B=RraRnR39jS$Fwkb4532ekuL(dzYSd@d^Mk#SG9-1jSRjsnkG%&Pd?he z_edqRi#0gNGs4d~2L-QEFE3_e`+0|TBPuXFo}FyTR<+Scy3eaSI#u(7LajW_I4TX} z{iEj(D!ub4%GaA!4&s5sB{892WGgWp?SNuNc3<}dWQDOnBwz0r=yz>iO5$c%TIvp6Z3 zhwjur$6rk5&d6_9;nhNxFx=~}BaOTL!G*tWZ}G?6L&hEcn5ulW4{isP08BjKeVq!hh$BB-;O`KI?ezp&E$+4;cUz&my=7x&89nx zBaVmK9e}>CUcDS{YIppStWR#VOZ-B-G3!G87DDUP!A~vW*=Z0@MqPGItSU^)` zt8p$!e&P`Fu$>#^T{8-Uw*^1D4$Q~zo&B-}=+lLjIyPhfd~M!v1Tw#Hrr~wIBYg_r z#Gg1(kRQd!7j_m5919-rToy9uQrmYX3q+&ij1xYe6mAt>9b~HBaWJTSIum(PpOH_u zCs(jdTQoj;;-$os5@E`yZ=zXIkvBU*#o;aC{RVvwc824#g{uJMNe)iEiB(&06=a7l~B z^sdi_QF>ahF6H%HS-f>eQ1`e$&mlrJQ$Y{MXh!c|=kvZE3RLBIFOSS#_*>wdT0EOV zwSytUP4$jq!+@av0G~t(@+r*%z*1*zoOVx|%JU!GQ|Ct7ezm>aT0Z%wF7NyA^-}Ui z(d#ljUze{=iKN?rbMAuA0FbgDTX5!6dNt0ovidBBe>l;mKGO6j2E6u9Iqvme9X7vN zn->$1LI4Yna3kGRr5^b6TwdwGtiB#kqUBw8OS+VRF&67TW%MQI?>^G0->PWCvi|UX zy0EKB+sy*|y2i>X*F01I)TMq8l@Y;@v0l3%d%c|e!>T&And}l~7-1Rq;hwQOPCtSZa0qwffl()2%y@OSNq1rdmX5+^ z%i7cvO+LzNthsy?QWKcUs%FwNbQ5B`9SC?u=_?8cWgq<0ez2By>8GyYrDwbYc_{EE z5Ux9j)po5ehMt*UWR?`$#slkDlN?y!!gfQ(5eh6p zKznLJMnVS?t$BS}!8_DZ5e#uA%NZ9vg$D|QkUR`y%J*!6v%1UAQhj@7`*i%zN)Szq8HSYp=cba(it?46rz!^{R_|rcia}Sa7g$ zjLsa^zLX;HkB-pv0!Q#`c<>!BE~1bCLTF!(n>FYvprESFe2;;+@0)~RT-KQH#w*^K z2z>+s&7!yUz0z|$Xz;16`EFUacoZQTY0|NsZ!FGQsHAHbU_UulfVC=b^ zz$pU8eB3Bi3A&;r5aM=ckZ?5JzQyom9XaY5@@CD5mO~HQQ$DQ+dGjdM@KmAN5IrNZ zXff6?=qzhV`idp0T=cEz3W~TnUW`_v+f!Z=aJv?78DBVKyhG!Z!7SFZb}hVyGlnQ| zQ*48ne0%t65v(sVYBE%vJaam{oEu|;bwl|Vsl{oz zF=fWOG@oA9iipss-NCSFV~#MRo_<8zFN&Xa;Th#aj!jB~`Ls5y1*tzc8PElVP1)tr zO3~qKsUX7j82{q? z7~L*M^jv&ZUlg3Ok&L0=`p11lDc--w=<~K{r!LLP5OnzPp;1yfCDAteyVFIG`%V^uiRhs7hVA-W8t`C!0hATJMde- z@rdSjCNzNL2t{L8;&r#eEgtsAKluJAs}b6im(vwr!5=p>8dL^s=+nL?;+=C}6uPx^ zuM>ZFc0O7i4jwa)M%BezzWUjSL2U+Q_%Nz4F2vIn;7fRxp{JO zJckmi~EgGCt7j0cz%Z2~q zc~Nd0B_YL0ERGxz(+s~ZUS7`~4SspY?R99>9S)3l?w+uJioCSmA7_-5(#bxctrr=Z z7&Tlc<{En(VvJ#Cy(&EBVD*G&DV2D`~ zsuQY>;|HI98gMzm+gJYbuYWT5IM79L_7Za3!T5{*@#VQVFWlfy3^?GQ6ge4f{FRpH z!%x3AyzYz7e=&x#ALMjE14E0ow4lMXi7k*hCd3m!R(EoMFrv)HM%`s_(5pgao~`oZ()5(tSb z#32ArjEydIRYq!~-MbHjnLun_%@uhLzB$sRdIT#&oKB^AB6o6_;iKfgJ9jwb7Gvh_ zQp7N{O}d`$F?c6~Cj*+(8f4Vx$-?l^uj#W!hxUcNIH_pQu~&i{y#!Tlbb(-S!ZG-y zt}sq~YRB&KcE?Z1n2#&Z$z?9>yqr2xhCL}__lw8OZ?9Po$NM(WWWN6*677<)e`sKseT^5p%D&c%3wQz z6lka)^F}V*m0ZlZ#SwEioEp2dFZ5_6zUxr@yY}&vrfJrZh;A|yL2fYb7aOlJ( zzJL360dI|wY%E}eftA;Nq;maQLD5ncqienjU?wfMCD_q>4yF@zi7yZ~j@HOQa^x>GSw=aR(OvKM1t72ifl8#;oe^O1NP ztdhfk=kL-g;I%Y;X%>}u*5CQ2`UIb)@%~Kdis;i~2vZdDdETUt@qO7*0}~8j>uWG$ z-rAow*Pi(^pFLmehgdXY40)Q?Kd`jZYankic@A#v4<6RW*bFV@`oaS8Jv>bPser95 zc%5gOQb;F@mtoq~?V9x`eCW$2l|nV;Vk3ROPE=}w$5)z~Zq^Z|QIP zP``3R1oaY46IQ={4Un}j*R+RV!eESd{qq!7uQy}Fs~wC74qB{(i#3>rLk`R;^?k@0 zJP#hKw?0;QTq`nQYuiA=!Qzbap!#d&mN7LJc$uo8^G3PqEE7=u1pS!Y^6GndSE9FX zC;>V{6Vrw-{21^0v)|*-@!-MA+FT8dsWA9MbFdVY*7>xjFaLUO=$h{NhPUW&tu5cF z)6Y_18Iw9oVPEuu+gW(aV|^{{SwCo3q2(DAtn&;C-w(=}d;M0&j9bH(u9yFc5Kwsx z@|VB;yr{L5>^3_`$-pwBtkHM6kcpr{VSvab^2jF9$J!wTq7kV( zIM#Ulr3!#^eLN|P zv%M+UL}$Cah4MBA{mR3Mb*@i9P|!CPX}v_qo8h6dzJfafbmMNGtCn&sS|;JH@#BZ1 zB-FNCY=j>3u%5OB0x2BuZyeefgMLFINW%dGE&*^}!MZ3NE!PxX#&sTlBsM&`_NTp! zLr@zXBRlo2Ap|LSf@0uE4M>i z%Lo$I_GL_Ze;h%NQ(Oq}lQTJ^3*p0np&H%5nLf2SANo`GJ?z(fiEh9b^Z|;NeJ65l znJ;CVzsjoeCcewqISn>X^YTUXxV~R3^Z}Dh zh$8%nVBUCtnMZ~b^nE1b(d~|sI_@v}2E86GAL^w2NT}Z5_Y6xE8^#IIQxA()aSV|W zL^~83YsZD2>uHxnLyC4ka(Fo+i56d^xG&LkXfuO)26z|Kpjk(^3BZgM=1lv${8Uy@}dwsDYH2xcX7O+rRCUry*Hm|@=1Jol;0F9FU~@SmRAp# z3qGa1pa=(6sd6$ZHfM|uCr+LoqZ9>S-cu2M7cXTz50}WKX$&9Mv-LfM)6tGc+gxpx{sq_BMluNGa_(w(_*f~anLzFf>op?2p5#S)J|H?#Ue_ZTf3 z#ux_TmwjEmq5w07N|SN^!lfC5i28}N8HI%bF(+BZux&+UQ~o$PmZPY0r!RcxS?f_> zoMIXl4Fpa`Py3j4AjhfPk(17<4e%Y@NL-xBY==z8t{a!=a6f(WCi2(>(&;{EL4!h9`XH7r*+| z{Z5#@+d9W+7&qkFmap_&272pJggl&YiB?3n zi^zHz&Su?MmurS?M!8Xldj`+;rDkg8$h$^+33Xj_p-<1S9rEu*9?8~s57oyFJCj~z5(Mz zBd@eKZ_EIR-Z+$`>~Xruy8PXjUk2mG)%#n^t6iPz84`BppyCif!xt}KY~9_Qb$0yN zu~Gh=6p@lk^qaMM@%%TdAO6vw#@kmui`-b`qASYd(9=ov-vfn@FNm(oFt9M zer6A`L?(qv3^|9$8?9Zmy0xgwox8iI!95dXpuVGXxSS6^Jy0}ds^HAKyvldu z7e|ks3WlQ8+pjnRH)pV8AmAVqh`^DEzC_?MKI|_KG9%6y?3*j;S3-XMEyjLyH0gIT z#B6DAz^7bO$avtSialZ+x$y%yR-W5;j0zTekq-;&)nEULrU&ohP&jk=1 zNk+PSy>Sg348Bqlxv(#TW$VlR2=HpJ!$X~dVU%+c0-wfH@ehV6eEvx35>K{=-MxNk zoT*ZriOjy%o-sN>lb{WN3%2@o6~pJ%>D)$(`?(^ZVTc@%rQyt9hbJ zli&B0UwYF!Z&sJiC;xn1oMJ(&RmSj}E|ieEyrSCVkM4{^&nmefPV6Ft~m~ zr&A(4-HC_TYw&tbDQQ>)F$h>X)QJUY-{9?TJklwKeqDCFJk!Q0|H?&5gd9{;9i z(l>qgJ3m}~^3kV@)2lsU8Jl!k?fcOBtTpe6M|=sPg85vz~JgT==v775$Dg ztg)!WIdCLiaHQz_B?^8C+>nBYtVU?gHvi>cRbK_oSY%cST7qk#*gCPjNT%@$rbn{8Gn(ri?WU{*#q(6 zv**svynMIFad*T>y>qYmb1CTUg5j)V25k-wa=tZkFdm^lr*C$>*gdqwIKO*qeFfu_ z90_!0vMzaxO#XI@(`lvq33q}@tY0UL&>n}ew1kZ4E{<)HtjQ_SaJaEP8P31?`ghY8 znz|Jo*((LvxzzQ&qo>Ar&v8J0VavdK$zj%_6B6X|b$f^J?XA+L>}VgjQ~&NCVQ44! zI=MxkVaRsERgj>0<}~MkB5U6*>K#8Dr)YeNylzhELPtv<23O>5m!00Yd3Dk)aSV`C z07tGL3jq&-1TiV?lmwISd@zRoc#m0qTU-x;V+;eMx9~FMwG(tp^|ikV zQd6Oa;OYIetHH)Q%Ifr4AJcnkP~Qy0wzz<$9C-1Xl44`BVBZkBnN`ueP$rMcM>7TTBx^pgA4DNtM2REryM0s8*)^wXsiy$}hCKUQatUG_=xRZTeY$ z)qZeT;C;W|%Q9z(L>s*-GjrMpIJBnM`&E5lHMaiQ#P^p!^XEVNNjZyeB>=P`wHXb( z!GU?pw<*{4f%Gt<`jS#wi31R>ao+!bfsYfay7Q_*bq9a5_XJ!x5lOVEv?w;oZEe67zx4y=G1fPpO$4hhGaB!A z?acb}cYthe+f?z4`B+_RN={OW{kOCTe~k%k%>5L7fo9+2fFNj?V?&;`8eC}BW^JEb zPTiQ2a8{jlJ`WCc8`d~+-Z3}mGdwa-np6FA5H)eLp2ndSznU@HHmA?Z*nesdDHBu} zHz}(xi=KIyU_0_b6E(*~*XHvA1NMR~K$bv>qBh(NT5FY_$Ui2Bo|Hkbb zb0OB5Qw%sc4LXHkOy)(T7Xg?e$B00IN5djU;O>6L5|OfuLxfy{DPj9zMvXC02D4X9 zjRH);G-r$R?M}wna$bs%?E8gg7w4C#OA%H_jvkF*=)+OYr6PFQ8n~bEZ6DD$ z!CaKmmV|22%oG*|4LO33XS5ORCx58u9jRZ!#zad=8ABj}15y>b>8E0_D-(5O~ z5AKPUC^(B;SHx;{jurKEq1@3?aP#wa(#PD)&@)Ep_*lbge@dZ_wnY=$8yHdW-=S)-Uhhn(w`ElDmqe*-oO*5WQLSDQ~@d{`T5^anE5>L2&vlKfSUY(>61uVkXcXF!Y z@6Vrd0CkyMFful}c#NYG{ES!9@9fFo>vWEC`%YUfV+>ZI{fv`NN}6LI9A16r!;`DO`P;vpIo`js(_+o( z&gwehB2w}FQbN4PQTMROtKbu<%~5wh`W=H{bI6!1SH4|<14<r6N#kIXhb57!zUvNq1YwM!QpSEp)n z3Nn^%Y*90k+k41Z95-aHD;c9i-@8oHJ|aT<$+OBuqYTF)(AQ;VJf6Wz)ThYrmzQ2- z)G8l(hOd454ozey1BdY($`K~Y*Pg&|EZqd2!Kj93T3f}9C&S9wo6{~@+>zY&-mwpZ z=V0}^Sg^Dz))1UYt)%ZaZ=bGy{Nq0w8Nq(~xcz2x{CR>enms!PNtz+Z_$+50e)znTi}pZz!=^16JNHYiQdFsZCHP)7ujYyI84txP z$-!ht^+ZA2gN)(KC!bClnWy21k@o2OUEG;Li7{+aiq-$%rnd0i&Ee!=#vg{a)$39? zWUvdsDenXp=ir6Q7gry=cYLCRt#fpXUPDvw$t>*$(G4T1fR`o3P-WsPDJx?XU-XD+ zdSG?$LnD|mIHfzOPijXrFP?I+vD{)fZz4FL@ffm@y=7VXt%3IUmdV!>u(Z z14L=Rt~}Y8!4n?OUpN;Yw#`2F-S2+4HZ!^=Zw$RhE1XBS;)`=PL&j2p4A!xed{PK$ zn-h&uli?P9xvXAlGAAtY&Ud5l=h5CcKU<3@PJF!j{!6FJd1!AFg6#@SfF^yK;Dx#L4*d1#$=VlO9?EqYVIWvyia zD^lL!tK`xepZ4X%W{5v}^hEp8-qII!@+JCpion_vq=E*Y4nK`1!?S(c4?{8M;F;24 zUA}Z-h#amSJX!Ab;Z(=tHoc5edj9wk-3_e?N{|jGLwk6{&o~IdZ)_9D)%xC<{PC>4 zcLHl#EBN2L?bq_>yFmO(eM-~x_HBm#D;LA#vm)e=O{!6RlAemD8Kj*IlMcseMGhBH z;SB!=l2_&Re%UDpd#5oyd5pJ|&;52e(;F}6jkG1RPZos^&g-45+FU!h+^mer$PGYCG?6$qjVts962!oJ0zO!^qYV-m5T<$arC#UmV60!poxD&0f> zaN)`Eq)h)POgdq_wa~-mFz4P)U4Og~;_}k-+E)tAM{R1W2c8)~eQRIZ0uW69!Ln5F zmw{Me!hXs@b?57zOqqdqKFm1P2b{4Ot4(crt-sM{H5>9`Y|@7Z)AjsXd$K&&&Kk@! zvY1^|S$Ut<>h%teXFT(#U(*2?JgB_V1GqZX9US!CKmo7n_?&SCs(U}i9FflEsJZ)g zFz}{tzm=PJey>08S#Nb=GxVA{?eoB}2Kw?v-^@H{Pg_--X4diSdnKzh^?cA!KLfAA z;o006rjK^;HT(=5^SD{`ow~I%2B$W&b%WJ1zV$LoGs_^%3@8AhMMusYS4N&H=_v$wmZv{wCbSr#&se^zj)9PST#mVk>S_ZBhJm$T z{tu&f)zrflOK zC3bCKWAuzhf~Ix`9ZUb=&_?eCZiaL`W%yn17-sZu^EAGp;qYgZi(}VkN?Lu_aG#1k zY2Kcf6V;_@TedRZCV2Hdni|7thHoX&&y1_T=#=oi&`o0spS3zLG3qHukn<8PNO@%N zrP#p1I9Ig^4zIow<*SSwa_|f9A`#IcybnzT`|ySkGLbyZaYzfdy2SGs#mqe-s~_`M zo8Uwn1UtBdqcPB^hbBka0k`=HXZ@I`9!y;zE^7=Xs^qyz3lDIqWP==B!;$s}WsS+p zk;KCXJvj=rX-pKwQC<|vDC=mY zh$K<)j6g;{&Y5U$Gd!Bh;Co#M=A6Omb;2Qo5P@gGk5PalJXQyu%!4&}v19lrPQ1T5 z``e$d{>lIHKNb1*c_rVh{?%Xno7E?ue6srS_Y=G}*K6HsTSQ_ltll};WqdECU`Vjd zu_NM8-d;ykHJ&!uF?OaJ2H zvdQ;ZTXj9rWQ^wggmy`H_rzFip{NDdwa# zNSI`NMBmn_2n=bQtQ9Zwp$@~YahU@O%)H`5_G1wx48{1P2=|+}u8-ioKcmm~9nV+i z&YvB+LsQ1JH{}=~d-3YgL?OLf7k4upiP|woltOqpfBu{D5PvX2Y9GgFA_5W(Q+z4Y z@>DJaw5$`x#tp6a4UHYVnp0Z+oM3AC7D~IzGpLr*q&iH*V1LvGzu-1GRk~n4G zwyw+rL&2+XU@g2zIRPIBnH-V$40t#Iv{ALyrOOwiuN1519DkwEkmICffa_%+Z*R&e z6cx<6Kpn$dOi*|hsq?bELcVY`fz~P83{{r@7hiuBPn7Nk?nLCpr&E&9*z&3k{dcE^ zJqumydsjwLaNd2{QG5O1L2Kj2v>0?m!nr`r+5$WL-2Lq@QYtUk&a$72^gNRB+i400 zix>Qv4FVe0b*g>PW`YTSe?`&XBfagIuHW($)%N6#3cy zw;L`){Im9aXcJM(kdFuA%)6Syo!eU1^&=we;QqZc_kkA984Ve;@#juAv_I}E=e}{# zxz1j=Jdyiz&ry9W=_Xv@mq9PuVSr&w!0I_LoccJL5&UYoW*0~4I}>q!;%GT!cgp!$ z)K_EKmEyZ4{&`?;sTNYc-%C-xenWIK=YEES=+rBaXYJ#&=tIOLyq)NDj|efeYtW1z zBFYaHS;+`_xztqGQo?^zp79%{c#`jTKchf~p<|r}_Xn1Q*UXG~u zGpeEE{iS2U!#R4NMZ=SFBAI{qcDxJEbjjP(XyJpSM^{%;>KPkEs#3%mo5x_5GmcSU zj-H2fr;qR~PS=<5hBwKCyAK}EwrCwKyoPh~*3}EEZ$A6gYHP*@O8ndC2VJ9GNAusw zz{B{tCpe$h2VCLhH==KRPs*(aMNLZkgXhetnvpN9sg0%lI#N6Mc(PVwLF4H3&aG>! zt&d7&lCxvrd5F*9b0{FBy< zTxE?-v}OD3jJx$O#TlGVsG#?1e$z#w9}FMDYw>ZkWzOv(BMX|Z z_=kv6{K_c^FY=;2ma%eDJ~bEaEwGor30F1IT6I^AI|*F$CsN;KpZH@2+i+)m=Bbuy zE1oP5a{a%PqKuciVEjS!ek((sz1}6H_9Cfs;C6e4WOOYR0;dES%6b6z#^#tqbt4B? ze0Z^N~HU|TuQ-}~Uh4Ay_JI(7QP!P{ewVR#AW z70HPGpngjom)t4H;6R3ZCl<-047!{__io#ZIw==kIrcdU{AKXpMc!+IoytHk?s_!(GhSzac zHMZx;MJ|0L6FS8}e&W#QSawQefs2utGq#`=>!dxyS~z<2Sf^KRt?sMo zr$78;_|CJO+l<%bceW`3034YdbJAAcyLYpG8%JZ~6y37D`x)XBWI<_7IO`u4wSWBB z@p#D2IlX0#(4B@_ z;xX-1f(N^Xr>r`&A@_-aK$%4`>w8V~Yyj-7HpYNFJ^M~?>+oSEVDqe}wXX+t2gVl6 z(j)aw+db9SdK-kZX$pMm)SBHuTfq;msi(~HwC`)H$zO)Qz^bm()7a*R^zuqd=< zl$-fngK05bfvfU0H6yCr@><4cV_0YhEv&&?kx4fbAfihRC6+RHn0Yb2`W&d|edPw| z+Um3R7MevH0M=*ER5SMaQFrZsC<#VI=5Ox@bv-Z@1E0SN1Roexg*bqr(*)I)#(cN* zrD$WGhOC*jIRnkCQ#7JN4b_bGy|j)Vv^&tv`*l7vd_z0_DN?_a4rbb}tr|douiAu8 zrab@ZY(h?by-7gZo8ZGtdhbOSX$7;J>-TpYjW_-Jt(zTJ?g&+a&m1pK;4?TO*W-XKkD|p!KMh1?_F@z4<*IR;d>VMyT!$g^st?hAGmMSD7FR_MzwK zieSILqm~3jM?#+`L=a{GLC9q&AICv+33hV|H$xLl+Kt0Uk+VZu(Kep!1r8gzQz+ig z)0#FG#tgJG`v`b?_9BH$goHVun2lkpGT?{9Hwjb}ZNd^^zV1`@ZLS$}GBOw&V63^Y zI>;D@Zl6+*N1z86BZ09D+07hl*pQQ9jM{yMcl{C)dFnan zWTVTw8DpELuDqd~uw zU7Ksh8Dm}2wSXH9EHu8p&M6CwVhb%;gHeaES_?VotLN17(V~{Y(m)O)xZ18zi!QDlq$02^3@w7*!|HTe7DGt zqOS(2;Xm9_@^&Vyze)h!wWmmrXL9T_78M=Wn!nny>c=96qisU;JB^uOm_s=c zi8;fIp1mnbFuwJ{`^N@PE?pFPM`1)uTS~=1*+VPVF5EDRh{BP}o59dLEtdo{$}RK5 zNF{{^9`GPzB*WREj#bCfC$ddm$9GeLHq_2UVkCT0q8J(EN1d@Jyi;gzCm=scnLsx# zHl;*SOc;cGHqky?vr;^W$`n-vu2G7D{b}o*Gtm6tsSMhTYR1EuLz&r6?SNo|v$eKY?5!yo;<3>6duCtR8v zaM$0HlnRQud9~KeCEf&H^z77t=pFMX7qqwYeIrF0Z$dxJ z_jsqvpvECmTlk!gH2qGpoOvAn+h0f7DW|gsKw{&=uQtDZH--CJ>k)wEG);*yR;OFu zzFnf6)(yP7i?-1|K4yIypP!-A>V~n_j6EmbRC9ZqA9;7h0>V9GFyqLfjH33!>p2wO zYoB!y=cJ&pc2TYMfVcM#uhpOWqsYW-tn+7$=XrDN^*SE>&K7*EbzFb=(=-zvL=u`4 z#-Xpi`D$?OBp7-+eIg@MM&nJL)Dqn(n$rcMjw>(sbG%vYOJRMRVT{5=d3xG@ICGd{ zD|htma$aw44k>3+;fU}yKj!UKhHpxuH9LD>u;NenkFi+y);|T1vZfw~E<-hg_UY3{ zd%u14#n}rf$X!ZXMD)E!o!)3Yi#Xg;I*l`@P8894F`O*>!K3g4`DDm_2ljW$CG3ZX zZ8;jo_|QDv=Flv1@6@T2DTdK(@H+`4a-D(v?1c-fPe1u6n&6xyU%Xx2D&m$SpAq4M z_fCyLc5-LOgE;RwdtAIn$(9CXXYd}34{%Dj@Kw4chU2@Xm149cmmH2qSa(}8oVzSm zsw!i%&mqX|8TdI-%)gxVXzlPpQQjFG!#{tD{Ft{nBNN2ljVjpwD+ZmBZkB=Hu@UY|;t5Y)+3JJGICWAyL#g=#czHQFMYoH_z(VYSr2Rbs?#K#V2mx>4|gdq*)Y1kd+W+->&EzT zj)Fa1Y{#h_=<(3_Y~z1fRHBnmd*XQvCYLXsjW=FcUG1{?1MiBK^JVO)xNhUpgRTw#uWw(rzn2)^S_?-M+{?}ok!k( zFZ|q|3u+myZ{}Qj-26^@uI5FWEBWy4755(89;2G~;~Y#TB;$^gyD}pB8G+#CNoxsj zl&U3^w2sJiOG*$96Y?kCfj2OWyEvS4P!4F0>Q|MO`}xGtQ>$P9{BKv68n+9TMdzX; zMwYKW|80HcCtFnG_1PC+k74O(r#n7AUG!r(ly-;F1wHJ@Ny?D@w&w?<*Dt^6B0oWe z{inB*EsP2N55@l&(#;#Ek9EXY^&&pxUIljzSXaYaTW5Qs7qXL6Eyp@3Vf-SV?-zxA zptel#q(G_uI?q8^!!x1-`FQ0^<|z2>*{`Pz$Ar2Jl4#&?4kb~(_NbAYTXzj$#%yhJ zrg4~X6mbljJ2HauPM$F~3T|r=I|tR_Bgco9hKF^j?vtWvop5p5%%05H zPNsghz3xq^`F8BwKeCb28}9Cr>cf1v_?46EMYy?e_Ol5Zu!lbl9(;)7+WK@NV@DC{ z%er~6`uwY}+G`#b4SZ;H2Req}9CDnC(%-4?yBYDo&4~a0u}_9C+3(!tf`@?*f5Ts| z=hU1v7VqY0+m(@+oP?ImCubpfX=kz;xt8H@;60G@NqV>k z@m2f@A0o5fPqw4;xX6CPp2b-tDjUtCS7Y4MXD7DlI(U`jh4W|8+{rU%!V!|~lJ@io z>m6M=&9i#eI)$golihY^fE9(0Ur05w?x1pNNPc<7^OGl}iYX#Ic?>^&n2f+N`t*5x zK{ZKcanD>&V|s{A7e4ugIw)Y2K?(AZE?)9i|$A^ebrjG9}7qz zZ^NlI=VXnG$2Yv)Gjwl0Hikzs0N%DeooFJ|+ap{+Y@Cnp6fDx5TC46sqPtlC4~kqz zKX}58s~5Vr=E^wxZsfFm*dBr|Il=^tz>U*c|L))Z%kb{biruS|r$6qb9DSkF2(8^; zwugQ5<>y7^?_2%wdq0k!q;s_1VgWOsBZJi!$H(6N`{y}XnX~0pbT5zrzVX_{j*1=` z?wtw|jn5JOvb{eZ*R-_&V$rp36CVRq64)4xdtkn2)+1m_)0#%z^CllP1jrOpRs;S= zoE9JB!HCwh-M;H98cE6tfsfQak87S-(W}3j(<{dLZGHV{n*=a@D6Rjc>%qXZwUl8r z)rwMzPxUFg)bT|h*Wj(UghRrY@i3|_-KpQE5S$>HcB(UtR$wtA8-sHCSQ|qNmqFS> z*RJ~0jvjSdLxVeigL6`h^c*^-%}Nh!(=ObmF0}zqxvGB)2q5c+9jz^_$ICz$LwPXu ze7e&_U&+&kh^H1L_X5YXHzVwG&%r(~@T}1WgiQN=VNMo3i`a1TMT#$Z7q*HpRWf|^}RMm zm8Tu`mFfFgT|Jz?>tpEqrM~`_I`hEuHV5sYpBbMb+Fe^8jC(s0R(B+nGx+T6ukY@( z+1pH?#{4&LmLp5P;iADZhPc80U>n|NUw+wUwHQ(`P{Ol|{}`C=rKB+QV#0wj1~Ti! zBnRJ`sx2N_I3YZNAETe> zj^NY3LhATLe)nQD_n|F&;32dUMwJU%ZGX5@uS8 zQZu0AX@iy;eA77I);xGW^sArxd&5Wz7Xy=8hShgHp9tJ=3I_PHXw>cZ2*8Nl2F`$M z|*3FbFhb)Bi*`r+|%NuYT-p(@ABNGv5^){212~eQ$%#NY(etFY9)p(b+d^ z*X9lHV~|1<5Z3=OK5KI5sQPf+cVmog{@@o+pu|iYc*I)U!-w>lKuCCAa17jBs@e${ z?ZPpoZsxS#Xo~@mA!9?rC|a#T?MJuaLp$)|6bA9ixYkD8o0-UYCQ?fTr<|12SGYB2 z+FuOr1eIuhZLBKo&p-lR5uVZ(z$J$cBb(?2bqTkG;a5Fl{0s#%c8D;J;UmFu?!uMT zcR&7U^$-8SpRIoU_x}Fsx1arP_0RvsKOMuxiy|ZLmtMpLK|6PCA0Y}(8EcYkM=s!i z5kdNHg4gNO$1@Tg%t&-|a`c*mtBeZ?)Qke7WP}(;^rbQoWoP^Z`fKHwC6othujZ@Y zqFzNje2_u{zYOfw-Z;U+`1$%!Ijo6v2Z;9lwv{EmScomdMZPQW7C1uwBP>q~yrW5-Se_tlw?Pd@%Qg?3pt zl#v}>C?`sj%%(iSS=-=3dtN&$6Y-D11@FXPU5e;pDgv_f8HWxW&RKAAVA+!bERu$C z@?m^hdJYjj+Pup!Q(unTZb_+S#95;L$~Br};9_mKL{J2^`4a_oHN}P@8C;^@)>HAU zuOsv`XlMyL8)Lc zwcZ$_u6F^VlO+3cCR5fZ{fyFZ$sqm1?|r|%-W0XkrL@u5{KRV*N~{lX*e}o>1K;(O zbNo^&tH+z1rV_!`1$ae3F8rx+r;IViAITWR5$0(4{U`EKZe5)`dVEf3STl~mgFyti z=ygU5&Pv7(k$>j&aSn0(hLelK`Q=N>a0aaA0~H-2Wl ztnCf!7n7oTioShCzD;YwoQi7R8-KC=S?BUU&k3k)3v6TX4?c=Fn*Qale>?OdGE~H# z{T@!@0;?0pJ}6qQ3(5K{t;x1%<7yGs_L4vOkN(q)EC*9e?~L(jj0N>AO$o>T$@;bb zOC>}a1Iwu**3MnLIO#|}&(QvhU;T3S1Nc69?C9!Fh5#vq_BP)(Bly6hoERY=;ExBR z>4y)W)gUhoIj+|&66XwHIu7d)y(6P5Qe#9T2Mm42OU{1% zbv)?Q>g0zX%`<7PhQ|9&Dv&4f70S94D`$$*q*#+T7;n)vcpjDB$aA!U7Ib9&ZP`}t z*Ve2QBpkqu!=675A2+U^U;XYkKX1R<5MMpj`Yk;a{52=`kv$o4T~wo=&KXx2iQmmwDY9JjpG(W7dWB~49@3r zE*XpciLn_U+IsZe@GIbh!=lpw<@IHV#H+1a^o>>-nxqEXm~l|*EKYQLGTI;y;CXl( z{e%2@^U90nCVtSuSKi5{x7|szC1=NnAAK^puki$H7;h2<@8Z&Z89JnG!aH^sF)7u? zqX*^zk1Ewda53JABowu{Q6A|GLB89vBi`7FhIiXTx3$lm>ZHPlAAKhr-pMg}ZS{FZ z@msBTM#BKhLDKg{gK}hFIREwFe;j1VyY46veGXqFxZC&QA<6Qb0r{_2JHBK!YxYJ^T6$g|Jj!T9Z~<^peH zAh$P8bneV{k+R7X){f|Wat)qqjgVcxPn4aqVJ!=hEOUP@72OxTLB>)bFAD>e&k@9^%gAP+|%jjxmSUX(CKJA!W``A zZi+1oq#2lX?<9hD<^PsjdUVX@#YrJD->pvRaB@B-d&T#qWi#iD%Z&7|tfOGwf2h=C z(WL!tWXi@qY2K2J$5Bz8CUcC__{7r}rRa-K2(&P_WRneBoU)XficP1}4$;rTTN?N9 ztMs2}!)d!oT^C)2g)R&(kg0hkA8~eD8w|P(zwT0TijwR;PX5|<%5QW8_2Wm#Ln; zf0O|nJvz~P^Y)ea)3((|Cq9XuIW9{jlbn8^!z0{Cl|&}n6AXBjlPHrOEI`OS$^zcS z#t-k(D_Ymh&$U9)5X?2*RfLidf0+lhmA_nD%ZYzuK-4Y;j-lr0Lh0DC}$zfbcWeuOIe=+8VB zaUMkRgf32^1r+;Iq&cLZ# zpVyubZj`q(^mlOE_x=H7jQf2xW1kUtTw80+Or16*jjOxt3e!}jmp*3-r$XaX(-;gA zodC~v1is$SD_GFbwC%x+32qiFqQK>GpADso;GHf8hNXnk>X;bD1V-!WXF6JkrTK2W z+F3WK4tmG9R6WM#rMoFN?F~Mwvqoc6AO7r!fT+Si(-?mb*I;A5PC6X;`?5Cszm9uj zp7)wqE7JmwQ!MQ?2F)*HQij0`eH){OmOtxnv@qXIQ`H^Vl0Z zKSsYfTmzTjFyjM4gHX={f`>IRkLz7s5l@TJCQ+dJo8QggQ8}BFVa^z3w4*Me+s0zf399qz zxwR#_;zb+uL_>5GQpD2pgtQrD^vgTTDCT9f`m>ItrKsEKW7-Z+jDWyeZ~#|URk%^G zIntMg!6%{*y)tmXbN>@w6SOmCX=lsU6vO&Zn!q@np#uf`MKGTTFyn0;_APWXFz8_} zidJ&L!5EmsJzOpH7){UnO5`XQ63ZA#iF=a}MA6aKbQkVwJy0r}lVZI-diHN#Ydp#Z zZMXLy{tz3utBQjhw<1Xu8j zzN)#51#)|F5Ue+Wor#!gkONO$Ryy9$|L1R9NHvgGJ^`vlA`;YG{{27w)73xv&;HTs zfBWD6@m#8>o+F6Hp=J z!d>8FjOdNg`qZXq0_~2Yy3YjU-+le{>f;kfCz|1GIsJBKn31RR|NV=9SJ9NZgrXzm zzWwx*@2o!i{PR)l|NPJXd{SW$Hpf6!AB;bna}=FCd2F@02u(*1kGy{{T5yR>c^E?m zqXXlZ2r`1HI!{wHC*47V-;`m&{7yvm2&9w-IZ!)V>vQ0L@w?B)2to04B>YiEPlghL zIwP@T-=ep7`!Q|9g$ZFBtS z@xc>%LfUd$qj485Src#COW}ySk}^wSYSE7p=hDwB85o|n2QZ9NU^l&cV-#P}*4Ne* z&XlIf&lxYGP%L9^owok>?maY)D92J6r$0LVQ5T+`ug{mIY`I=pG{kt)QT`Oyd*wT4 z^nOl3B9>cs5r8pTJJuXSD8r2vDGdl`*7_+IW2c-B43#h^<_hl>r8#3Sa(9WweH{+> z%o?FA!^e^)s5O$I;%T{T;mo}hjvLzp4s@DG)T6%fAPTfg@kH^;D@pm)C*?vk`MVE` zxQwQriiC^+txtSK!+4f+NF#ImtJN412E#L$C}F#6fC{k>yo% zFT&PX9Xr0eF@>5UUVmVCoe_z#!upVAi8BE{tYiBnI%MdwPcXQkZS(fgspFH=p7Jjp z3A(+X(kn&C?(oG4!13^Y3b!NH#zc|DGbncYR|g{>g+IPJ2KS=U;nk&`@OJ6SofK~o zT=J0?xee2e7dgq@ZQYDxrk3n8!*AjpoO{v~aSF*pd#1>(J-c_TzBzX`#rR35FfLUl zCrid3hBr~Q);7B0aCMRFyZt4Y;k^uY*KQThkFFdcE<cV7LvX)-(Hf6_$>)3GJ@`KZFGCiC$m{0dX}srJ#v_rSc;T%( zotW8>jCAqq)v1qv(8X*YMVHUY>3(VT%~xLzzkL$j9M14^IOE^f_inAeI)62u+xmSc z-Wu(RdX+jx-s^{tx;vr%(HW=e4?5ca>8IaY{mozfo96HC@Vu9=iW+X*eney*E;sJI zoKWV&rP?C8r7m)hfu;DUxxsgi9X(lEk^AGc+*{6m!4-Sj1Ig?A4>FWWiPUP((8U3; zv%N+XB^tDETUfiC&fts+?mO)t`!j^U*StN*;q?5WG+3RUXkC*@FSPeC4omNh{NOmQS7&#EDb;V&<-FqXSqz!&VRWnhiRRu?S{OPT-nT6rke|24A8+RzN9Rr( zNk`?i;DJnh!;#`XjH);CRm>_qPt3I>9lVHUa_2RIeT{hTC^)Q%-YH z)b?oOF=kE*={De8bUB%5XZ(fky2z68lXyO!^0v4P^CbG*3AAyVSC789d)uB_uW)^^ zwRh_D>FDd?>Uy-vq2-hghXX$8w3>0#P4H}Mf=(d9deW`5hS>vLB25PF9yb+h1I?%Q3jOJxFjIVll5zpLF^;zD;%$9nXniKCO8JV=H->W0Wqn@4$ij zv%jVPx26@@ob!&}hX1$CE=-Xi3~?0Qp*-POy~ZdP0x6nPdmi^o1$BifGD!AElp@AE z7AzcIZ$T}3bH3~2d_9aw+kR>z;1D|{WnO*M9RW-sF#4~(o-8i{31;v4JftWq(gW@F zaQz=9=m~;YE9_JMV#f1LJq!YBW35TBjB;HcQ+Mt6648wXsy~p;@AgV4tM{H;;njBO2qrSXA(9#?$nZhn7@$8X2ew}atz+y%cO zsJpxgH&Ydkdeh+NyJ(ijJv<|ng)}UD?VrfIYrT4Wytu(dKdHf_S~dONx&mJQiA!aFE4IodO zH4?za@LZI+`OzpOM(bmUsBp!rkU*1A;d=_CF%vd<*tJDq$P}>%D8b-+_2<&I=5HBT zxNcKTtQ=#<2y5XMiI@{h!R4h=1Lzcp!=tnny@C!cz%49i+KvyYDQ!k|DrXF6V`#{D zrYS~nc@&o@@Tn7y34f3=qpq&M=t%t;djO#Ah1admL*-giF2f>N4;t%u6of$wV^O%k zD+ih3&px~$JxGHct=6{)fx2A=8cyGg!&Unn9B7YQL_3x8zp=hfc{DvUCw}!_dIr4N z{7=#9X=CPv@YHkgy+|-oVU)H0!r&MjYY@z@a*}9Z1ZaY0?GSuF`^~QsSRRgoxice< z%ME=8$8caSKq@`MnUiNGRm05;9%%c_sSgvlu8$Be@1K`Gr*8Gsr8pbMz!pq^H>&|c zM46ZJU!1$J`puVL)=xB)!2KdTUcPZ_U|Fqhuf9BgW%aAyogLx(J0E>CQIT_OF~QZC zUL{yDF2A$2sM-W6LM3DWq5W?=8s5?Rg9kF!7FnxYw2Qy&DPm*O#(hOhmeVnK?C+w? z2=J5?M*;&l;r(91_`M8_i%@tvV-=%LE28!Jb_8$Z6Tv5!BEfN27ZFnct|gQ@vTiyF zp5}Q_(%4<9N$_3b)WcTaJ$?K-f&P5Pt|Gt^+Mgy=GJshF=74d9L7K8C8V8x97)BZw zOum=0yE!zAjAPhA2_ljRmUCP?rALly#>qMAe6LW!_^y2k|Yq*{x&i$1b#OWG*%f(3GXMCm9P~s`hB20!}p(xr%8Zc$~hSPb>>wHT#4#(%$yl~P2(1b>UMeR+?^GeC>>XE4UE?F^`W z49Y%d0N)r-W5l5VP&m;da>u6_|D?RRQyL68g7P95F7w?u(43^3+8afLVT zrtsJ|b2M=zh;TNy=4DUEYo)ZacC1&HQ@BO7ZR`>+Yx!;U#%SL>FgP$y9L;(5UNVH^*vjw23xbbAX-zSd zU%Fn@bhLZwSouJ2bmYAVRVPp|E5!_bP)lXdoHj5)*5R3A6)bTL`7*5U4Ta4GW5c3zOBcRm_)%3}BG+_`g8 z4^7$^@HB=3{ZOm}a%k#khFSa0h3Lj+NDkpVdY;o;1liRq87AugaHmE@w7!ULtefrI zy96}{fc6*XQ1qWsOB;5uEg3xI3%_*j_3Ff#?=((y#xPu3nAV_m{PA~xkP-d;ak4zP zn^C{EIix=N=+mJ?`{c7uT%G;$@5cD~{;^Xtw?~SGb-IRO(y6KEtsRkjXc14A(rT-H zt#k)?V4b(uw5PA_2Pc}7i)RnVgO__XIPgZM2trZLj9F4I;5`r8s}3AK)OcNBn3KTX z(7rCU%cf`!;V@WfpJ5JuUj^H>D;I~Ck^wjX8HMa&Z(3isT3h6c!yg=;g$y9|yjwKLjve**(4r{bL4$!PSqUw<*YQ#8C3Jm=4zUw!fU=d+Lh z-DkgDeR$^M_}#8?uxxA}+Ltk9Q--GdIVk@9Pku6RbA&w30K6y1z|QzQp1{e)dHyD6 zp?#mceW-Quy+8PL^?5nHmvl`#rYvWWb%)U+raDe$F~`eW0^^7sEy5!N{~a?}|YW7)%pw^yG{G*{d8e7fMmWkd|S$ z{vOP!gZBFPg%e1d``OZbfgitfigQN+L%_IJ1TvnzY45@2@=lS(t_loiaq}vgeA#T>voqERObg#YaN#DI_op_;fGPAT4`hWv^uX=($ zUPt?6K5K~Mj{(*GiI+3v+7B6W)xA-=5HgpOFV?yJ*M;l&(XKt+rP7$K?d{>`NS9i2 z?s8Iy0!G93ZhF}`0ZQ$0_w0A^&AZXG6KpQE zT~9T21MiC#==t;@d+&1LIobYYYiLhFxR({rjGfv^2Mj zry?Uj*2na$Q~!GiMbmf{d}~qbYccG3vo_xO+jrF&XpOJB1K0E)%p45H?tQXazJ7#2JXSE>yB{apyphCLnj#j<&;H*HNn8iQ|bscbNPHrQ%TKmCq6r`>v6 zdm0uj;58(v7%~ng?(@BhrupO9wsm!RLPJZ}5SR2nh34 z?N^r2H~;%%6Z^^M7h?1&0x@6n0Z+@NEN;hMjya(r8{zE{klU<^`1th9v2qojD_W)K zj|5+aAWRhGj1!LXGM1y8-i;!@;VD6{eg>ZKq)hK;ZhB)#iOCt77C{1_NR!b9jTyJU z3^xjkOv5k7pwc7H2y=|2BjD7ph&1!Wz(gQej+!rh&YIF^C|V-O7yQBX@b2o!cZbHu zs2_>9$IM3lG$D?soxrTWF#^X38M-E-0`B`ae6$VTpeLt#ZI2Pg*y|YV`XZo?f{>+r zE;J2hQSx;*0_(RlUz4N6)!yK(zYNQwu0+jm-Be^kf(-(KPsi?;fNw4(xrBNQ6CC<4mJi+Mpji#=A^o)7}{F6V(_-F zRCL;=bj0qe+h@zSf-{bJ;Im>;ass|N8acp|=FD8_m#~5iIWaaDqQ+n|F!yZmpAd*2 zjbRJXtaS$8zSrNCjNDg?L=Y_~VqrtM#zi(bhCAyv;P!Rg^hZDV-s<%6_f{W&^ih$= zcZ&Z0YVu%cOJ6VCAg!ma${XyMYOBEkAj`y~u- zeY!+BZA(}dMI-<40mr}-=(l8uk;>$0a6YVkN&&@!5r;t<4C_V>M|GtwKp!G+I1NON zFiL`t;^#;y!!85fM9`wUzT2EYOff+B+7(gDn8C=3-{I?XiEBj8DI|K&`dHFkJnd4@ zbE9BcPf}!U-I$_~g2Kp7;D;x)?KOtB47V>CkSl+r$UugFxsj#mq70qO$>Oq9O4r^L z2vNu2csYt=k>PtXO60(})%c7BeVxB>Zs1v>*D^??K&Y#K21e1WoHlr&Xn4_S=+XWm ze=WAdxcjzw_uiN%qADzl;xb49p#T6t07*naRG|b}Qyc=;0Rt*ri)g)htGhRPKVz&> zf6=RkIB4d?P2Y>K9{${O7wIt|S^H1h7e#q#V@HbiD44CCH}T9}83XPG7h@s=w0+@T z(RYlYgOm6KL(PkgMIP3kQ$}bAF9)dcF>sq($E`(XFT5t>eM+}I%s4rY%%3(La~6I5 zqEj25p?gPtj~+dm!gO~ob+j+OS1OJ@trz&@=-HF8&01w#xfd@{7j3y{lrhG-8-+54 z{kRB3%Cv~w?W-c2a$3pNiXST@y#(fAuf{z^FuTxr%jVMBWTdihh~ARtcwg(q+~IXw zbF8^HU~5hXYs{SoFRpZ9)%gpv4kE?5V1> zN-(*CV?s`I5t3+!L6&kXN>H@crufsNPP#P|yy^ZBP$` z%N$bZIb0sZzwi}2iQ(wC89D##PyQss|EA%=#w;4vG55O}ZSlzE62DigfB5HrzWVBm zFIGSQ&2PiS?bdWou-UISbvmcC0@3q>3{%z?BQ$!S%f?DgB!ED)uIM`sInjHEGQ=D` zd202d6dVsyxb3CVdXP6nncmAVu)i|)E5QUYnv68@iq`P<`gLl8b74-^g`dbP15oLW z`iDk2@faM?Afr9!%FWAXi>Ci7-khVk%DdG`5j_fjPDCZMr%gNJS?D*b7%Q;lslW^|rH^UDY{0??O+{sgCip;(|d=}hW z;t33)ACx}p^3_`zU^3*y3-7lkK8{u|T{xS8<-y#;fTwNCp>X8riSTA!x&W17Cpn|C z=8sXBe7@*(^3yJt9iIB|qt%@*Kio9q{hQU_{^Tz+l5`3p7(}_gXpYpeE^ptA|0Zi3 zK6145GRJbzls7uV5q+oauCbfff;!?iEr#eJ89G{g)0{5yYP_?u*c8Xi;c)W1&z6%Z z3@VIfcrZuA^JvKDMVDxQuZ`1ZKFn~_NdktJHhvBjd4?II)WhfYwSQmy`s>**ckg5H zE%G&Ixf2>oUhYndW#q)8Ig2@1=mX~CLC?toesvJZYlQd(LUySJ6s*tT&517(KM!q@Unakgu35z!9(*$erMn`!^?aGBcySOHZ#~ zKR1;e;clhWs#GE8!IIUNK3*#^d_6Kq;zacj) zCk4SInwr5(Il3CV3ptU@bH7UuU%d*KjfL|P+)exHZR58W&Be0yVb15yh}P?}hj9wW zBb#$hp;U+7Se$nKT*E3IT2T6EA3;3G}qrzIclN zy-ZQ9og5UszoS3~bBh1&bYdVHur{465w#4LcQe+(4SxPQz}8PY>4(u2Z-4!^ z9ORus5RuK{=j5O$-Cz9dr^$tf$0-ipm*GPsJ|nq{Uh!*=Kh6$!K}ZWk{z2Ps-##A$ zy^|h02j?6u%bgHTmQBi|=y-8D*7zDc{*t2wRNz5QE|Ak0Xz@Jj0lm;c@f32v@aF&| z_e{D|Mt8goomJO*+n>YA-b(3t^Q#@a4Fr}op|Bace~&=nGnq{r$%ePdE2@* z+H79fSe(M3p9n^>uQF&$>9jxIL|muY zO$F_dIl;=YhUdC`)oCTX@LBwW1qF^?Ja6A_JYyjnnI9l zJPNzrY{bQO8yu`ZQ!wNxS*xoJc}+B6g2u=(e?98iDEEDD^)s#~#8K}jcXiLknAqS) zB5Pn?V>Hnuu(Z+prR|;-Q8E$fJ)B;_+~Acjg~9AMGdTDBEiciN>da8Sh%j zijQ%_7lR`e>c^jiX^UQ!fwlL83YL?e>S;k;C5D4z378m8JZ^J=NFR zSTi6j1FoOih&ToYLq%8%`TwUqD_PLw&6~R;ELEsa=CI zRfaxr&&Yew|C!?%uhU}-?IhU<-M<^coVm9RhwRNj%=b-u!Y z?B6DI6vfhp#z1o*Wreq#hw@=L!yYCKQG^Jaq7Agwsz^vEk`}EPXYE0Qm-lcv7b#jF zUhz`3T&OY3IetVTGv2`pYipI38H>L9NGW5i86~o_%6c#wcYy<(GZSV?OD&4^+m~S z?#yHK;4Xr<3EJ=@=cl=?xtWGgHTdv=ut2zH+!{db+X)gn1^*%>lJ(Zst*@#h|DUz7 zK2~cV^v3qmzFGsy>2{&@1R8Tt=M%}1kiM%QmsW|W68$E(-)Z?mkEhfg-oN_E&wtr5 z;UbD_>sC=B()vgT@FW47@P%Gx{Pi~d=~Ex!Z00hU^xr`GX$-AjI0hG?czw>RGJW&~ zG8q~nP~FKm{=fc{f84W#?hJT;??*rEa!w~p_KzU%n7D|o*WH5tDre8N(gsjWjF)2e zVR>xd=_Cdr5lO#D(b(NZVR(xRq9%<*9gc#O!E1X$yhujJ{$IVx*c-2r16%YOrG+x! zC7++jJ5fChV9%c~QNNUmcSYMZPDU?+o=9+)`4DzRPYj;wlVB>}-$&p5e(U3P(VOqB zUj6Lf$Im)aTQoWY^!AKw6z1zU?!_xbu)bb>=fjW62fcqT&66^~{EHlsQELBNWnOQj&lQThLAP5liJP$;FKkK|m=>gvNea<=i>|yP-*IxU#)?V8n zz+*ffH)uy~1hP=3EsBJtF!r^{LPx&{Gl4eh>M%NO*Dk!`{gI8Ow@_2B7u$MYz^xSa z76kS8nX>_XEg%3Jhf>%N9z0Zh#4jtexa!rD7nX6R&C26x3!pMj=wqHc;x-#=y93$o zZobn3_X0_UW|_sMvKax4H}#H==2eSc3)2Au0&edHI`V?P z)ne1mAYv?7X+-2q_@ZeHZ~9^A%*p!)J`=Lx<&Im38t zykWefM~@Yf>TICP_QtlEsQ#`VtG2z94MNep8t`H9>~OR9KItID!hryP*ACiK&zuOCYFEL7DS$Liw`M=p^SZyY&NXsypzAAb1J>fN{BM(Qc^=)uDS zMEuoHep1}}Yvmt49UuPXkAFPl#r1c;^_>8rXqIf#HwLelkGLH-)?)k5A+|ey93v zed{}|b=nmWonCk${wk)g`IRB&&1kcu5FK{{4D1>Pf?drh2l%2N?97OYFRxz73$pmK zqlZQLF{(927@2SsIn$zQ=5_|)BlS4|;Z9%-`EudXxyENYR+T+{@{6JEyWji0rqjlG z{0GFOiw&q;18*d+kG}P-cs%dxWbTn;Z%-TU1d433`y>7~&g`H9){1+*bJs#QJkDzx z@Jr6zN|xKEPfzE8`>=L=^2NuiFFyZx#`P!3PhOd~@7n=J4~}d5i+|Aof1~-GLg$>% zYx4RnKzd%O$t3{FI^)2YkN+86%qMhSnc$-vfjGij*ad~(FLnqqgBn0+bl~ckc`mt9 zyOPV!!g%w@vGH&dDGXE(L5d z^pNq^9UL4O4CwB71&`qS(c#p+e+=r3U?Zo>%=`L=tlW^1pRV}eN@1A_&A@y4PIJhF zDr^qex;3xv&0@miw<{gIdu4!6-l_W!9tlY7P~O%+&i`P91kOE47OE#X44A{u^4z=c zP@wAN`0h;eS-Zsw+q1bC=fXzpe&t|u))J29YC4~gWn`8)kL(ss#~NlZvuT4c6nRb8 z-}-z)yQIr-0I>cf*CqoDz>3H0MByZH^#Q}^t2u1!2t&F!hW2MpDZ-F(*w_+A z;1hREyBK!$_49=p6dug35o;^D%vWEXo%%4s-@co^%ebAsIBSh)WPYBxJbs}kGr;ov z-WXp9SHwuSE#vMuu>iM|HR>*8iXAO>6FygXo$q|_Z)Nm*rP%bv*8lp;+Ilm3u2yHi zI<-2MG0N0W_eBF^$Jn5MqZfUR4)$_={c;XTJipNuD8IdVdPB5pP|Oa@d+D`b9RIYj ze5oAFrS*3@yzxnH>e~xnpP7M-N9Zobn_V&wS`#tYRp9J;diB+Va~^;lwv28Yl8IZl z7CtB0y6&4CdCUj3l>rrPG=eeW?3vRu7IwFr%lLS)XZNBhWB#^w9l_wdX9oh=NjBQa z#4zk2;aLkL^ynnkF980>8T9YpyHUC?=34*Wxz)KkIX6_@4B`9Zk&N8!Fu9%4KY7~A z6+~G@Ov=y#*7P^ZBYsqz$$0VfEnN*5z>k#ku2O9Au7v5527p;hXW7bvit;b>X*`A< zk64-a9-zc?Dc!;xC0Q;eZS`>7$HYQe9!;rdewsuo|NS=l~=m9t+i8HC3#le zm%q|?4~6El6jm)KwO{VcU2X5(dEEoA%takL^F#v18T9unT73NDfN#YE zUR@2Sqq{b){_flRR^NT+@K|jvDi;GyWk)9*CKf545tCOH$7nI_Q{PrZRaPC5kRstr zU+3Kc{yoz!ZS>ej#mjH4w-9!DfAZ=5wZsG7dgQ(MVd$}z$*@4bBwhMS!nx{FKhy?Y zrP9M6!&#F?c6!&}GF~bcz&H2%wcMB2GRD{5?7Fw2hjybA{u=|xjK{UQ^x7CK&`Uk) z_o0TxF`klb?O$p-Syo;hUS-`w-+uSg2P3Ha+C6tMVy|o8Z z4LrEk@~)R`O1GBZlDE?PSN%UZTa2ti4E*rB-_5FdV0ErV^9P@O+IE^ifOrDnOc{Gn zn33xNOIuRzEULFRq1-Zo1s>GcnT&0*TQ}S5d3&~{G)2Q!NfY$YsltJ{v(+?3f>K4% z)k=!#q^Zsn(W!3@23P~6DRb)X8gNkcrg0{-p|tjO)5Pqa?-sbWBf0m;M3_ReX=@%j zy>Bvm(!?UES1A@DX_K$*v%=0OVzxV#U|A@uYIhZ--d^mlZSNN36qpBXUmxWSO?nJ) zz(aaOoKi}Q2^AP)%$W2rKQ;zZJ^N_?bgjbrXv5OZzOxeb#|a*osh95m#K8-Mk{J#-sSH}xWa#L7^~{!{!AU|m~$2ly`uxkCx$1orw(TN zejenPT4lMy z#KPBt0w}02fac0^z#)a!Lg`|3-(CInF%PYWj{^Jh?f?(~qk*J z3;*kLfzI{u8~_*pZ4We{^fDmq%VYIQF$yiD3@HGJuq|XSPwg+)Jaq9n00 zH#e1SeQ_hNWnRi1Bv#)6Z+Ri_YT;#0v52*OnmiKcbaNg%3_3IKH4hEopujQ{_YwGc zVGMEGNxEaZ+;;Vr1t;h zr$1i(;CFs^F&g#G-N1UD(6<5`g@7U3&;_q(=SK9>3fElo%I!2V65yc?cqT0 zpXbTQGZ26ASQI1tdh$*Effc-))d5}X64_ilSG!Pvi{>Wa<;#V*_**~vz13g*<-aTB z9YbcIQGNL!`Lz}AG%kTk7O;$@%KPl|FJ=r_Fq2*QOdWVCzObSA#q|dx)OfBZBhXac zg^#()yRP!_r_eS}8*jFxZ_ArkjOlwf?-my1HHyuES7JVIYT<3(VNBAekAc_mkba~i z+)W+=dx2fw3Q)Eqfs9)huK_C=9`MnoWWkL9I5D;DU_AKRK~wV3|MleG#^{mYUVZq% zuk!BOH2~_0Tdx|^5J zuHO6EFXnJJ9;nu23_aTab-aE(U}|qMtra#xYkU!I<``g=0~F7kI9`7j4kfynbL{lk+4$f& zN-wf@Fb41q8JRSgI*>2Mzxt8WKxpB;g!>w=+#WJ&(u?*MzUaOk4vmSO$xyP+9F8|C zV(Jqqqhq>j!gS&Q$HzN5mqi-^@D8+lzGvi&dBb^49D{`2A^$eE!{peTztfzwZPrRx zFP_Vo@lkD9JT}GDzjbQ~&BDW!9%E-2uUm7mFd{%$YtfnG;@Rh4G9Pu$LEfh7LB9ut zk-fqu@Ia+!qt7?d)qHJ^o_Q%5rLJ`3jn6H{*Sq)Pjp+1hK>n6IONDuOxxaYY_(nGX z=Emd_P>?*b3vkEISH=KPxG+KKaCnuGf7vCO4q3a*%i3m5f%nYm4k)x!&iwi$T{G^T`gmXH zD?0GRH#;hC-eV&_lDDr?8;t1N?fnFEUtopAxL z?K|@3j7^8w1DhG2$Dto~mYHOiiMEkw_ z_ebN_{>i9w=e`_yyEmg0L%F(Z@15J})%EG{j^E{n{ueW3nMv&Cyml$?@(W)z=N?Li z=d=>V)~ePU6wcpPLmj#jPcnWaYMYIRK?3>Zx9n)w74ceX!i0#`ks75H4i~baX>K`@6KEOKH5m z{OY0t*3ziY()QY=%-Ujs>7AzyD6}6SW$C1DS6@Ug%`@6fC8oY>E!Kc3LZ$jlow^~- zZ=cPBepT1*Puqw7sM|di@EJboZ_kuH={>8`ENBn1Zf)JsDdc$ooxc=gw$1kItnn69 z2J&aOUdl`9{OV4i%;qmnueLWBUO#d~#$}du3MeCc6aq}KmB0!?O%M}Hqjh-Mu?Htfr+M%g;7akSU zn#Ma|e?rPTKAJ_S54qgZckcPx4<%(WK)FV1fBRiQ%a~m1r2_LDFZK98D4`ehe|Pk) z|MHZ1Z%DcD6nrVxY}zF5-sbV@~+ z$!-W$diT+9zV%yPeP8-w?WOV^P~4+F&+=NvQK^?kb-yPbD#LhpM*;HdyL8e#$sqII zsVg16vbP0Hi|3#HT%xh+17Iu)D6G@py#oYv`u5fo;@9(RwYa@-@k&Z~i@?AV+t{)# zk8)*#10>nWAeJDnxW!vJ@Aa(p7WSvU%pIf`000BBKz^&{gNT$J@CUL}rt} zH0}e?0Ox>SmU-S36tJi3QmUWuY$<)$4<9aIjrO4zr9>K;EZu4{2o$Z|*qt{j#lgWl zw(ea`q2V?E6sVQ*N@>SewmF$hr~YN+@DcKNy05Zqakr4ce?ovvyHvb3P+*_S`VKH( zKydGuZM0Kyvx1CjTIKbt*Yuk&>No&SbBw?418hxhJxVrcd7rhpVA8g`XzDrRYf&CH zbppK8##xis^?WKG0Ea+$zfH+i`8cTVn+zmNfAEN_UeS1XX!5RYQueC9vq0+-jmcvm z>~qx}P2~N|*gGa?8@e9vR^%t*R zdVPSWCvn%4Pg$8eEINQ4&*deDhhwkOMO5@iiTK5PzgYb@|Kz_&xqT;4{POBVv4c+( zTUJbGV{%^$yz41-7KMNb9tZ;@w$Oc{#k%dOmoD>mi+fbIyaxzJlK>2~GJY(!wAx2o zEkLEkCpgbyDl*Kyr7yeiUpuv#TIeZ}oL4rn4QtcPO?NBv`{jiK5N0Ca&LeTX}0cEV1~lck|4Ow&(I< zvB=sJ5CA;m&GB4+&s_d;(gH9xrW8|n@x=9FVq4gJQ<#XS^}hwBFap}?)OlQ5C1=c8 z@Ls-5skZprl^5TYSsX2(Me}`vb7zYk{P5M8FK4l-%|LS=qq}zoP#TZX-!@dh2?LRB zz{3-D4H-0@ULU)sc*X|6hmW*~ai@hAK*wQY`vc@pocN;ly;vxT*IS5QZ%+8S{tTE@ zM(rGLop|m>o{;7WF`xn2=yPEE{_(m8bXu^OBY0cQBB{agavoQ_&3NbyaNWGG4Fl+u zr~V}$$4jxl_v10Um$m|%qo6|-Z)Uu>AGm2DY2l_}7C)N7iuSAhQ$-lFW`dy!Y}B_ThqA2!{>@cJt1Z4H;b&B#jfT(hu;AL zyYtq)rqAOY^*6>DbT-8Ac#I*HN0y0@7qxcm%Ug|cf=4jAJEYFxSiE)(-Z$TTtA5Nd zTAsY}UMgSl<;j+bH(cEq#`L2;=dE?F!_m~&=UfX!^}`Efk2>&hwV12;$q!IHdPVnd z^)m;mMiY7x!xryCo{YBVUk>1K{lyn2R_}Bm=*?@lR=@n<99A`tdMfgM=yr&osAo`RL2)e)BnPQh^ON| ztqvCEg%_Ou(-wV#xAz6$zEt~8o<22(3*O^5ilNLPdLd)Rf#%3BK7M}|u9%|#wKG7w z^+19X**XViZverKyvt7v57`#}CY5mag2GHpRG73Ea?7}0^Pj)7cZZ;-(uWqe=XiN97 z3t#}{-WNwVN5>9vB;NyTnj^%R9UUruwM)f3gmz>ZIYP&LC2(mDXKSoIDgC_+20uOd z*?>Ub`JLa*Q}w{=bbb5T=bu-{!f)O0VAjNH(93XKysYhcXnh4Cs#6jPD1nZ^%yuGvR%N zzyRv$KQu5;^|iJ%|BO_i=O*{=Eo&7!5;oLFj0ohfKC`aC`wp|VbBt^g#(=Tpz~MIr zq}Lbg%kxwQ4gB<6yXY>q8^QX6Q3~zQXwT+-mERnHY4Lz{use-d#dskF@FKH8q)cTYSebP=%qhH;XtVB8p;uXa8EO-71BYrYWB4D~XkpBbV`4NUKj_DRS;mgt?JOf} zo}}jhRC$(O zrf$WD@}1pB$rU@p4<0zUI{x|b)!jhs%Ncey)}M^-b8bO@7bmoKTf}Q*hutQO67RhI z?&{L#pUv2=ru7T&a{aT!8Mlk!ehy{IGpnMnwsWEo*6+z9apsrMj%4N8$`^zGddAcn z8Qn&QuI+a4>_2dL#u3AkcItb2_jBC;Dq$P>HYyMqr@r|09ZJC%#)&z?RR_`WsI?#{)C z{}@)Th9z3Zz(4GIy>AK zl=<@26Caam3CeeX-JZ|ggsC!T@k$<4$q_zvx^zlW`6+cR?3Ah^bdNuUMqcROcxcfOf%Zx9oeXm%%orY z`|QQFe3#;S{@XT{deV4TTm4z+0?3>^<)=MsA0A^SKa(=+mAF*D{vylHhbLBtJH+f< zit(w-4UoIvOl;RpDN-97>_Eqd35bu*9FJ-9vIvmevFYO=iw053BjJ7_>+jds+uE5@ z$Ron(*Z=IVP9~r%cnRT{sz$J8@7c0rwRh*v)mLqK|NQEUtB)={Sp6cv<$wImg;_{h z*xb1DV0CQghSl%CorS12@9A)eH{X6^@lZph=v+4~-BQ2StS;6X>$@~);%~Q6q+k7@ zOn3F;r0V7x&yDA`o9_4W0ObhpMeokn<=XApp@HN*#DD6btO1f;muA4=ZsM(Z2MhL0 z-IAoSgD=Z-H|ldfKJIDU;=ncZVgsJ83-feW8VWYZjWyAAzyF??Ktj z1TRUIAO0Cy^+3_y9l|agJ4L$J2mLh2z2UV5?ir>ZV>)pzJIz@+-?yKcI#EfgKf z7|=)?^9>u?YPX(1=Vx*kpfy(ZW&sMOiOt7?5BM#E+4_~W8K@e&bVrZYrW6jIQ3HU~ zR&frWK9`cABBdG`T{<9&k|`b2$JK2@X!H&*a+8;5lu7w7Z!f>S>n32gsB0)C z*+=(JL#HE$GrM$iQ3P4Eo4b2It9KK>`wA(mvMwDkv)D$EKlUy7#K@?`tSf#l(U@gqNZ z2x>#@7LAs|mWo>hQ5=P7DDyn7fg5B`N`C$5ne@;4Y6&$ZkIEnVFJNG$RnclzS_&Ad zFT+V~d%MDcM2j*zm0x|+;av_x#6trbqO&W1 zC~}o=JE~Za(}TLN`#jwSto7VNqcVg9F)p4@q5kD>J{p<6y9F>HIW~ciE^Gv`+S`0xKm6Au>scsh#N*?^2c9!|_V zfS&^zow81Dzx&3q)tx{3wKmV(LyZYVl|8Vu4cfY&Zko>%O$>O=t4=rLY1Ueb( z7B*;Rp|1@w=ooI&tEZm^(5l{e{pIF)i_#=vl)7@39a;KGz{fd4cmfTekSN4K(0XJ8g9xy3hEH_bbypNUq=~ zi+=_-U@36f!Wa-^T-e?|<19HID|NjX$;1)1sDCj-ibaY1Jp&HmZ?vY^198k;hBKgl#m@M>8y`MsVKiY6Dp&s;il;4+Cv(%NxrFf(0 z@TljuTjvGrz4En4w@`Kv8836&;4Mz^(}TRD)|bzrj2g~3kjK?PxGfp6#MkC2cI{S% zpz;D9?X>t05G50YsJT^W2V=}a*j8~MmR%onC}!i@+T4P04Wbgz=ifO6fz3&UMG8wzJ6@LU7!@NLWY^&&gs+td74)5iEutH*1-=E|@KoEH4n`N)5@@(3L)r(w`dRILylna2cQD}k@_>mqi=_+-Un++D2dfibe3a+7K3HF$ z`25qEhlDt=8$bx3&yRmvh_%9ewM&y%tMT!;i5Y;)o1a|U@aiH*@v{RnN5@P*1%{fR zHYDc;L{j_i;S)S1Jur&S8}pZ!gIbG!9WnMDI9PvVKuTYsu+hDDwyp$X+EL^~2iz3x z0X4>j&|pFhiCMq@)q^u0%pEdkq$ST@-kssLI}CsoJ-eUesr}>w?`j6duL`B+Y@-I@ zgzb^{#SD&TPM;XL_qg%=uxDhG9|q=i&2Q_fr|=mj4WS0aMSrC+z8D2EKqYg@hQ~QK zT)W+YhLy2<_r9pry26}NUOZ*p*phLDd?pjg8eqBknG8JNVVHEoUA>ET;vgT`|Juw8 zj5WXd^-rQxhV>?b3B{Byq|X4+LQvdH?wWsu^Z@eF#Sp|Y(tJgO!`tHf+wJpeo}e6+ zZeO_>U*^FdEjM>q-{xe}ON|S93lR5Kfa!RV(>Kz|H`Jdm6+UI>&K#E-8|1AqEA-If zNVLew4H=BhTf$!6O{N%YdtYg%Lw!3See){7c;>z7QHJ02h4{k{4ROMwG&aQD-v8=> z(Sw1Lc9YP3=uB4v?)JZWWaJ3GgV^lKVGFQ$CC^>;S_qInUwb$s;Zp#qFfQgk^*`|1 zk!6=id|;k7@9arV@Pbwjn&Pq9F;+e8zOws7E(FlFE6iL>KLCdM$Ya7T#+zt5p#q}`9m>Ji#yq~& zN1Qy2Wjca0Pk22N?VHmL)Cup> z!L5IRT@G+{9umFG_(OkZw{VWQbopY2z@4)Tm|QV`96$cqtVyRF#>eQbzwqd#!v1g; zICOaLoSpH`JKr6kUtM5y*1$aN$N3=MZbBTHW}Un~yn(K6oH5K1L^lGsZ``~xG^Kl^ z17p!E``QiCoMN}sCm(;%oZoH$d{s!!F&-3t4-d2-bZ8>qM;Tcet?af@272nN(XZ=I zJl|lL^~&g<-J>IH*jUW`CFDa1Nbde%1_Z$jh)H@aAwlLrg+|Ir*R!ntl`}w;2AFKT zQptHCrlff$A0MxEJv~uc|D<1@cQr(FPkITL=Szk9u3~e$FVFI~ zo=l_Wk;j@hS$HPn`CDE3GM~9iap<44lWZ;RJX3zp*B&fSToU#gi3r*z#r*2w^1pk1 znIuDjC4XM{(7X!ci8xluAy(gUZnQCg=SAhpw6>0?n>N?Zq{rkXn`d+_^`Af@tppcWIEZmeE9)fU~_Bkt6#JQ8M`*SZc!e7eMzeOwvmE@qWW_)`Y85rFW*`aBvo+M;*hUx#jLZVxF_&<6J5Uj&FX}0s|nNR*4e7`IKgvg^}l@* z^LG%_q0Ot+|0!Ykt>68AJR1$s1pPGFc&M?2+cO^34+Fq>gd{_!Ug+;!yVbt|J$v^V zOLa%;<+uKmx6km9XEOA&d>_s@dKzF?ws;dsCsRQRW7ZcK1(KL+4V5d zBHUR|@ygVqd`C&IZos6eqAb;@w2A}5%=S5SoN*lr9^$z~| zzx=!RS7+O{^m+==j+bp2dsw`-JnmB7DVu{y<(4leSYr?&D3BrGlPi$F)mCk;qo1&cc`Yviz?aX^_i zY)TQ<&%B4$C2v@ceLyLc7z=p3?eLOOk{UC^`}$xyxj*=}@9R@`zG*DUfdOK?U221a z*^ITt7!a4LPjkWWP2-g!k3V^B^iqAJjD}uySC0W_y9U5myz2AmX_l)NY4XBfc=kzS z-dLbWY1cgSZnc`QE0w2i%kya6_XV?_NR1!8vVEQt4GlRSXuf3hEtjs&NK#9 zXQ|uNtF*&w>g#*F=PVHA%6HQC$X`F|U@`TKXVimY=rf9BeE2)x`u6IFKltJ5fBx5h zKI3oauI(vX)w%a6UOX^gC^Zjj!FRs1y3higClQ+Q z5?ExgX>s9W;k4S?ITrCrOooH=#s674Eme~S05{&Rjs?d3Us_Ug^m zKmGsw|5k?&y}J7SAO3KFquVVmgl*!%E8GRpVQ=0Ty9(RDZ|7->;g5dj`*ZjnTAe(7 zat;~W9vEvO13dlS_r9O!ki~YPG+OLXG%sAZ7;T;_ws`E$&*;|ziFV}4bSEGO2tL3; zia&Y+o{b4|lqazRwYImY;Z-WWq&Uixrm;dl1Y8Izh6{RPCMj%70^CR|9fA zs_$xxHrWCT#gpb`y9J`TP&Bi!>s^b$1vCOWE#VU^&QG5?GeGKr0|zG$`ofFV{p%K( zXNnsy?9W?of15}Q6mENPUoKoZHw#SQ7k&X8{pC;ovJfM?mK_SwnFsSMq+8JM&-J!K#=b?P*G9dc&NL{)qy=nLE9rcY^%6UcAcmVP6 z>Y8j_WSmJbKqNin%=x_8;$3_SIAYNGrt5Fwt!@qdo(qK7lw8=l)mRP$i-x?Zj}=q( z#!X&*#m0@F#Zng1;9hO#6@9ryvKD-^}yu=E$sXedqhhr#s1|!o2hYBm)qB^!~3` zpMPH3H{NY7$QwQHR{U}@&uO~ekN(!*9Z=zN+s5$$;Ge_7#p+-k=8PP?4+JtE>GMF< z2N`p|X^fh)j487G7eD!nKywCw4xSI(7gj}xJirn?&HI-wUmBS`z(Bl@&xj5>&h~Z= z2z~UcU;QGm`;FC}Wd6KIC#^3Y1YF)vuJR&%F8%e)iBI$9KQSKt)^Qiw<#hPuiQ2e; ziyyxKUb4S~qXUyJoU47c!>%X?t}4|*d)96nnkODt5B>Pw-~6iDJXiRY!yWi`7#puX zJ^smnL98#vCFWAARxJ#~mJfwA~?gB5hcGcKp)`RrXG?-pRBxXV28e zfWW-8>A8_%_zj43CmI-^u*TxH=r5h!aSp*Wk6+7} zz!*-4HIF@PY@3@B@%^HwHWwvdc-fl+9f~}2V7di)W1gfxIcWJtV}Ojb^9EKxzHQxJ zh|Hbs2B<7@&pP2TBV_3x*Ov@LbV+iVj$sGs)%x${>UuD}W5IA#C9r+^DCt@~=p8xUfP*K>|*oCCcu1UKCPeVag0&=k&LG6EM+%07-KzGSB%IR0*%QU zU^wi=9PTl1Z(z2=K;=cooIUg9v>$)->^1M2r|h~L2d&!mVmmyh6Ut*p4hzy*4d5+v zG@3C6;kira&ev!6+llk$I4asTWH%E)eW7JxF=7nibao_@fXCKl+Os%a)aK~KxGQh6 zo}pP-8nMqGq<;aDfzWh2JOqFh`jNb#8_oEMF4ioI{+eDDBW_G~aH6}HQPF|eoCbX8 zbmZp9k**m6&Np_9E%d~jMMJ7#q0n?`@K8c zWtW5AS6`jTIAAB6G18%$-T&6Ne|L7(esQJnMS;g0Kj?n=jS+?CxUr5m8LWWx+VwDl zlRl@rl`P%!`sx*D5H)Y!x_2#E9go!4*W3AYF^4`o9Pll}h<;E9JqaI^2`F5{*?2iBKTDX>OBE#j3k?YBt4AIfXjv7X1 zyBoBd!OuDY9XNI1#fl#q&3dsq=|kfXe+f(Y==pSbT!M*{0u@wFK6!P|Ete69uqavg zyD?vuj17i4cy8XAS0+u%kttkul2hR7`R4Y@0@!)4#mckds#@QH2}?PI+T`Xh$ysBb zm8)Jla6EqdJKm(rowBGU)wR|y<@uwKsdwdhzTByPzf}0yd-M0-KSL?c zrcG-vEPs|38n6*-=v85pb}d=2NmI>RZJ_7WyR?0o`VJr%4J>@utPV?^mdBGFx@x$8 z14K;bOI{1jm4ddlqmO2Hi_n?eXx4(bXztp#uNDGzb-yrLQFZgW4MilpLo9fwgBnscPCT7-4=Bd{P{~A zHq#_!kov*+Gw2g`PMNic!7FzY{s!)r2HTBx%`SlBFLiS3eBDqUSQ2rca)Ed&?8J zw{$ytHo>6H8p#ZFWcOtGEPSh;{^5v)E9BpgWYNuzhsnq7+BO|(3`{+g;d?1&46Qxy zk95lfC~u4AGL@gdQ-JFd_A@kz52med%d@ZuONA32l{>ZW*|R(ch%YA(#!GziU-FuJ zQ;M>dvE`e@SP1=QM?_@R8B@RNG>;~Mw0)MB6fn=0FI`s=sv&AZX1 zx=U+WzODuKPd@s1b^LmV*c1_|wldmJCV7;u6n#@kHP-FuwE2rRdK z3a>p=1}HM#^$W=vxn4}^urM>R@@6*SIzVMn6cHTZC0_dK1rr+uF$Ut*Xe%OKc%gih zf3L}jRi7+=P`Ux%^h;R)OemE+ov}5gWvugc`T)Js)vrUJ(o0+;vM5~#Am=MV0Em^!;_7p@|>nu(E<8v zk2Vi@uS|3*!833XjcxG*-b{Zs#whs8USw-!_71)@=G0;8qV35!z>vHx^3ZOLY3J3p+NGbXf3NV){lnk;g9$YP z$fV34I`qmY>F9T-Mf2s0VtA(1eBlYjJ&{>kdd+do*H|LROz6o0ike(KBBU;Om1SAXYk|6z;jcP8`-?_4`k z8g{GK5AL6E8|(AD;=yJ6-;SMo$IAoIHoQ@L4jnug=zO(>>E@L2w(YlNn-chHA;c`E zMtQFR03-`hJc>7YUTjb4eXcTrTsJc8)!Olrdi=P>&Mk-L2&Ix7X;CnSton)Jh1U&_ zd?((cyF*kb#(b0*Z^C2^K>2nH>o{fVDjbSKh$dEVGAL#+0M`1&LdVwOT1>on&+XtL><~8n?Bu7uWK|7uLL}Dhr@)?92kC ze#Xz*;~FoBQTSrAXuOx}Q*s*kD-;FKD?CBA014JdgMHEXUge`V!2bPT|FU-HwO5|o zTEOyR=0!XFRh@Yv8I#H|$A}-hfB%7z86(%~8w*Yc=~)<~e~dXEJG>T0PIPC+eeId> zEYW`epYCgqh1KoG3puZ^{oBx-XEAs)Amq+BJj>cz-aN8B4B0?G+Kz{=Z&sn<- zEs~9MK*7O0gUPg+o1*uGqezOmM`yJtwj04X)DGT+2VNCJKX0kzjKz%cLFYL4_1O-E zyEODx-&ktqWn+i2f-&S)u_v$RB~_KiyDxKQfYgO{iQO#}&FR&z+Qo1?;8=a8owXwg zJziA~d_)U^XwD)~t&!Jxbw&end{+xMx(%R*F3Lf~%=F9?Pz+P^-?@v$z-%o9V7H(~ zLv6fxIj4^Pp{sGr=&mi=%F~uXl6R;W;sB7ld5}Nzz%p-c4bdg1$TQ$`%LPyzY=rp;+^4v&Obdkip7XwD#eDm$q z-Cy3%W4XhcfA-h24!BdCZsU<*X>VXP4}JQAb7X+F=54W|0ik#DGN*?ae|UsZ>$Ss2 zCiKVadA@&j=45Rw>`vaR`s7fuR%j3o4CavY86J68Gjw0NaAs(>Gcmtu!-4U}!&?Uq z9$Nj)&wny{p)mo#))%i96CN=Ao!|ZanF>ui05@Cptuc>(^x=RFFXxSYW+<9Pwvjt-o=bTLoic zMkhel|M<`Uht-k5?TrBqcI)!=&Ek>FN{U7UU=hIqn4}X-|Hsngi2h@M5?9;&53E9y zZ_?==C%^6`Uxj{QR0Vzsw+Ae_-B@<$^pQh{2Y^3#u)_({ukn$%)plBviEq92X5&xz zt9B;j5pT?#y?bH2P{p<0`^wG%c?YD8hwO5>7Er~2=yD{#q<6kF)GG=~^lbP)hMJ?5Ln2cXMtBwqOFfBAeMHhG(nAm^{ew^sxC z#{fgmtSldZ%dU86W8>!9rOWkO<0<{Zcy=DZ3#A2j;9KJnwZ9HbcE%A9(i+%#NIbWN z3%C{C4M+3RrHtxdetEh+e^eOK!y_}E8DoXi5(j;e)yd}azMNCf_KZJUgx;$CoX(6N z^2+$7kDK@P@ut=`c0&nE;4^ZjIhAMh&gNpkxb-o4;Yq%3N^2FK_NEKLiic|{^)*TEGtXD;8XA7 zd0?5oR6d$B^k^G;-pwohY9X*ge9eroQ)H$bx{Gqd$j|hG9dC=F; zh1V;6;AT5yI2OF~&O6aEPyBR~hqceayUx2nH}R)mr~xBm;(NOY(9*d#vpcD|60ey5 zIcHvOoQ$ll4G+^{ISUxO<}3Q0^&5TVVY<2l>!(lX2pP7>wspxfp0DNsA&UTh;+?+` z5Xo?a#YhMBc-VU2C>)I$z^{ z@7YDb6PO;e$e1K{I`7NrqFdul-sM*=p0CVh_lWTX9JjMzoFM9Za+GmoYyIyz@9>Hk zef(j0!uqqkkS(2k3p)Odp-kQXHP`i7?FOJ>@volMAOIB znU4PAhG<=RJpYYjyu|3^VDz)6KkbZ>=Bz%?mv+y-W6i(&XXnJplgC%*JgZ*%i;Pi! zjt?(1r%mke_>7z<|LAe}QJisg(>FIN%kCWOgR9Lu2M-*oFRrdmocz4HWav+B&}A4x z$!H&5tA0}#FH{%aFaAwEFnW`cf8&YW5c%x|g;|kA`!zzrHG) z&|E;7<(c~fVoBKl&r4=6`o0*|11Yl_|lq?!f~CT-OlPosV~ADCt^;eVs9&Z3`&9^q~aPzrC~cPJfrg zbz`)e@u6OI#-u4#lLBR50g}%9uDY0ZsIUqu8!t?Ti!k9el}KLhbzfdXi?uSRvHI70 zQ+K6wJ(+jUI4*zfNMcvaT04|KwIOe`MBd%0Leu7+dqt9|uf8#Ulq=0r@wID-yN!60 zzNC3#oWFW!_0NC)`Am?T1D`4Q$B*Ak(Rgk3)*Ejn7~2hT_N$p3fnB^b_oRfM>7Iq| zzE@wL1wDbQO)S=j4(&_fc39hUt9RadCtg~gSJ3Iz$De&VUM4(K0Dm`|sIOiBe0A)| zk=5(39vE;7AaWyKU$*2o3D#c}FWbh32GxB)mg+%qyEb4}C8H&9mhxQ_pD7C3Y;2(g zz6SJC;GQ-ypdF6)45v*j>7FASZ&ro@mQz_Tz<+(z1j}>D%M=fa>(pzF)|>Jg0Xp!M z7E;2yx>~(h&?&W)2#f96p5OZmz}G}Ibt22ES91&*VVq0Pz^1;G$93g{m&Uuh?A1Y~ zS%NM6D5hiu@VYLlzsu0`6w4>|i8|n`sak2K>XkRVRe2^$GEKM*Wm0~WCwBExrZzXx z%C@{L!T=ij-@%nYRq7-9Onrp5v+cgW{hta@CKy2wXDLhhOWnMO zHl94n3tA|h$J(BZ9mTzNkHSF~sNa%l<&!P(Re5U5VxeEFn@Xb`MO3;-_0F-QtAF?> zf0{DQtU z+|BQ*Q>Lz{N=BH9>}}IW!{)FXI$*w)yeNIU>>$exSn!(Iq>XUUe>R4?!n&O0kVNy z#v729XDeM9AZI*NKDW0(FmLkM5hj9xcxy&GyFe(wd-orVj`s!xrLbFY09%0C=2dcM zYxA>y-W7oKa-clp7qH@5i!*uzBNm_z&_~(bvuD?UAa5Ldqeb1aFr1xXm2La`9Ol`= zM81p}C%^bS1IKc(&W091hu?f>^^DcdhwEo+HJ}Gj3T)v0v$w@8 z03Cp5Ze?&tkTtHVp`ZQ(9A|7dCZ_q8@Cpb<}8eFp(0i& zI@zhTiJ>_8}{09ECZk}ozl z&Mt#wIlzJ37Is8O08+?2VFHBwFvkfq0Wh*?1fG*;=34-T!&l9v#+=x@4!!)=yWj4) z*rRqXHO2z}?LMGKHd2vpWD~$befQ=y#Y;_JT)c8|j8eQ?&2PW>)sIK_6wbx_4z?=O z$VHef@?s92#xh;&jtc>FVh7jQSukq9Ie6o`4aGq2Fv`lBcY9}Bpw;!_Xn&KQp}lr+ zee}ua@fi?!IoQu!PDbe`3vBJTptX=4?? zJ&-dxKG8>&!P9KybN@4r7X1*mZ9tJAA{pL{YpJurrc>QWX! z9U!54R0xmr=|(*J9aOt@0PYRxd^eICzkKh#)enC6N8?reQu#P6Zr}d> z0hzm2ujkeN2Y>X(g*rJkvTnvlW9~}t-TCsXDA`#A##iG_|1ba@dHqP`=5ZgNl6A(9 z9V`y;+85aO>2Lmeb^6rtXplTi2A(>7ybw;W_j;bRfdXW#t>)B{-Me>IheLs{U$0Jm z@#X5R=f7F~Cb@Gg4_=2Urrrd+zZo#dso}}$mw7wC``z!4{B%9Dq!}vVf;M(69C#IJ`R00c^&u_~U2Kei<;hv&H{`;bnDtEr9Ei1FDnD zw!feH;^OQ?vJ(Xe^1_y#jmz4;;l}De`Op8}>cc$N58XIC22*@|H$dTyV{Z)*!>A(8 zHPG@Icu{JP7T`!3*TzrRHP?%zgV`?U3NjtJ7bd39P!a@NOd&FXH>RTcZKzgn%&qcNJm- zIR4~Gb9!Te?A*OOufcX~IM9q&v_#b~NvxAsNpoiyjV|v^&T| zAl=K=4X`R4fSCCl%C0yYW7E;*)iq$89IPI5Rzi8LYu*Ef(*5{A3NQ0)_Y$L$5DuF% z4iBwbx0@>%dCX%DsCH2Lmg3cKdMVG~`s8u)#N02m;nk}bqgMv;k@4+ri$^9FdIq68 zd3poWXUA-^(}!L>hbG5&c3Yskv4Zy*;K;>^6JGh`0G-*~-PAT~o6)ap!}`jrHT6$U z1C@XH*t^wZ*UG`6_`*_9|DqWleLf?J9Uk5%yRDb(nzab~Z^0alwzJSt5S6@|rw7r?GNv{SJGQJ-Sc+}PtCyqDQ z1)BCVg5qK=lk*Q#N@65*2@nn zS2(E6(Ue}u0Ioa-P6F?R+#3*dZ4MTm$oX8H9LNvyVS94lIV`|h`Op!p1-EC2p{L=A zD_5*PIHpx@Ah>Xh+At1a)%E3!$d|8PT7CA}N4;}Dpf;Ia{w)KC&*=0Qo3GJ@4tJT1 zM%O@D`hhWv&@-n!pYf!;UrK*D7tiDKNnAO`664;63@>z#(UBTYTRZUgh1M#J80&Ia zqCb1-R%6qgwCr+Y_=%>iTghH$fYGHkq!0Met?+^|By`mW@BO+l6OZD%%G=$6nC2ei z@lwVQ{iD*1VAcxeJo#>@9kb3#hh!XAcJTSg>c}u-+l~Ps_DjxmU}UYn_~Y!^WOMzq zGovawF`>XRWLg_AFsR>)>)L7GyLOpfO5VJF_>IboU&4 zs7Sh%fhGT1Hc~9#y*?LyFF(T&eVx1~9eBgLel*;}fC^I2S0H?q_S}sHIPROvrGabd zp4D9*lJ-Vbn}=&jryWbCrBPe?SO%1K%Jo_Rg!cA$fT{9W%9>jRLPe@tNe6R>w`<)FGyPR4OBvCV?v1%PoU{9-X=gGll038ZLw7!MT7t8H+fn-{x_@t!+-Xp z;yxXnwByZ9Gj1Ag_)J&KI2od^@xW5#Lbs`BUl;bCx(uHxuUux(R`=iXpnj|#N|^4$ zHwGJis=xKy0O{VHGP_q^)7Elaj5glHf1aTf1*pH}vG8ZP%c@^dweQ;Em7(ff&q(R! z!oy2S)$m!~QD-;i&9&dpE@fU_s@KBX8sB?s<3ZU={g!qwr7mMqjWlb14G(tlTY9LK z`bNPLP4Qh0-^;tFGsbJDTKcz?D{sl9i5GhOPygz})hAc)7oRVq3R=|%7I7A4ETwyP z?Hu5T2hz@frCn_ivk<$QmHV{=`&UO_Ytho=ajsbR1nJ&a_AJU$Id8mBTc-YVn zPX*%zpJ~rfINF*drnu?=Y%zgMNSDTgm|tj*Uo}b{D5m;rEV891lN}gJL7h66X27+| z8-P~N)$XUs1xkqgC~VWe@x;s$<%f;~?o~d3Phs7j^yCwM2Fx}SmevGV9=&fHD~>iE z3{0^~OF9acT^Iu*eMs;P(!`|3Vk#(PJPzsI%B`o`jmVcx;LJu_l(!Vi~#jjR~_~DdUfrC-a&n} zYXR()VbSY=M0u%;s_Cg2;e8?Z=bi4Y6E>|p7B7jZX8G&eA=~6N{pVikDt-EY-bX8k z!d4gWkL;1NI%r4l=wnKaI{nGt`J>f8`ul%6)7Z~{@yixoTLK8LHs0F0+&`W?;%1_^ z>-))Q2UouR#?e`PZF)7Y<7o793smEb#{wYii?-tP;MfJ+uY+Cmv0-mD!IZgg}$LH<&=3!x-bU!0t#NqqY}uxG5V8{ zVjAzR--JH_nB!|Q#}-Z>-Zl8~t+(F^B)UAbKXdBjwB3UAvrj)w;cnrSqCep_>OWp> zWi$%{3lJeu#;d#bJ=^-p(?$3zUWgV(K8&3_=U%XQoqi<`EmrLQ2+(a&;QhN#i*=n2 z0@%SPfHfX}7EnAO%@Ja_dk0^?{`zYHo*%cMX^~R?Jl#gF^o%Ur*Mfqw5tW86|uG-ueO%#*15wW=1i3#kmYK-!#WF z=<%ka&pP90Q^x-{jvN{%5QZ=MAW($i3J5}$0MCVVAfEt|#w0L{XFK|+ud_(Zkzzr= zapcIP{rK~fEr_qy^7l3?_^*f7J_UY?!1 zTkP-dfY8ISW}REv56xGBeY`lye1|j+_!I@r&wv4o@K;|u8VfXM_73l5fDoR#bYY2s ze&TZgL*q3*0g4NY0C*IRgsz5%=>d=8hwT#*!6DAub5?pQ&u%gM17xBPFlf<7+RYme zJ>#{*mH z_w9jV`h+aIaEWJW9=wfnGWKo<(VovUnxm5O1f-)APKeAG+wqf85>UM}uTMa^_ZY(& zjc(q^fS26j5zaUcgx2;gJMu1#&-MnI;bqzl9yU&HSFdf!1iJj_v(cG%`s^w*7NZ}T z4vfsSm&ZI0_GK3jqsh+7`ugmb>E%nf3?8$>X8?)KAL_**vpZnaPH5hsjNEsC?sspk zjvYCeK{cSeu||Ixxm%eGdFGV3bjG(i2+i##5W}3|cxQb%=QZ596%XcVfBxLs_>H|9 z*MLWB+QrK|BR8Yf0O=VA;+F@}eGEV5iKN)Zcd_NTL(>oW?_|3(jqcAS5NY(pdC28boz zxJIoHDBFMFXgsu~Llpto_ouGbD2&V6LJn`s%b6UxQ@`4&o4S@EET3-Ul^@l#RreN@ST6h-T-1}awo$F4f9|WQX@_zE+Z<0IWUpK#& z=l1O#Y~4R?pq~KIj~#8-O7iz!^?5$}cxHDbV-?=L-flqkXo_ksEcq>pN&yAJ@}i>vcN+;-De#Tjeykvb2~szpDw&ujsSply2C52 zg&B!_p3&U=%VTHgn4!PL9n#cEFe; z$qS(x)n`j>GWR%R1=cb8Newn%%-XlsPankZ;*&E73=nHXP)7Q(3zkD1!?X3lmnT1s zM*`J1??~QfIBQkCv-IC z#b`f1=kcV!SFeqop>p8$w^m1vy}A0%w|VBp`7?4>*G zOkW_ouGjWi!=#^>H)cmm^cI%qN*?5NIcwT$>EF&xb2b#&%Nrfd?p4nx9ZHrF3)*M?p841##s*)qa2($k%*u_40;Altm4Z>^KjcvJi^bDRE1PLPlKjsC}Z zK`2*!wXK8hz4d4R@Bhc@2jBm}$Z~yhKVvf)Y;L3@>7IufczEg4E2qCI%V$gSZDR(3 z`*#c3l#H4_tu8Upc#-2JGI)|VMj3hr_ek$tAMZ7)LSK4LK_t+rv_6EfZ_gH$PXl7+ zzMIu(T@!Pot^~bPXY%@5o=I(wsbRgb0qJ?puS z@R7x|pM;hFTAt5n<#NiBqjr{VKu>~5-K)!zNALH4sg@eA)j_ruHu<7!i7kF)J1Nx5 z_g)-IO@ZBzlkd8vD?KxAF zp&5b)Z1h5y5JE5D`!u#k)0y~JJpqzPyS6CDOEY+<(|Wd+4)F1fK5e`h6O+fZbLp2- z_Tu#B!bds@c;EjytkAphmJgcG$Gzd9wM&WPyWt=253elO!)M*`uJ8Czd1ytsL9B&e zyw^|dnYPVUPkLwSJ{ibp;nzt!9J=trls`r5L;Ss#-&(EZA$ym@q5WNB%cX8hEux{Z zsh&$6d!rZBQ@KkeEJmIWu>QqoXIFpr%a2=#wT-d~KATuuZ0R}MLD%jksn>b^`U z3tDGk^vnn~%5GJdwoDpnOH=*iIg++(^fhdyqikEK_&e7vqBqnplzmDEYy8t_jmYSv z551f^+OP?1$xCH``0~_x>3~vtbo#|y6-~T{u6TNkPSu_Qu*`MKnAFeG=}tT{@})Yy z(3bm=Pi45|Q$0#l^4cYTC8$7o3bE76u9XLzUI0_M4#g+08Ry=wC9w++ z2io1g#pscu5xwypAnTv}$g7nba>kjz-sz<=%HUO? zUfY&RQ#$?DJ%H159bk*ct`@Ug8CQ$V%nRnd4?bLdo#F1E{Fnbtw6mkBHb4JW?^yf- zGL1Wc-D-d$04Q&Xb@9*jyobz>Jj=!9f84zH;9;IKmHW8Cj*fr}+u<#y4;(m z-v}(aQ4D9G3xG>(YX(O#K)2T)+X9Ae<(cN(21=<#pRKkQpW=errpdGM@L`8gT^pVg zs~3mLS1-Ks1%)aYTGUh>m!5HFdBSu1tXJf>|hj$-Z zNdt)Vxpy-iqkf{eTLkdz5|f&84Wze7BM$+};tk73SdeeO`__c)umBfrIsBu{e|cK&`DUC@A?rGO`5F0LU@tkg*nW#u0RB=@vMyL+V{VH(=%N;XwK`dJvMQyC9kj2m<& z4~EZ)wf;Fg&RH3}gwS(ai}Xv)oBAI(X0DhmtB-P0u=oq?0jO8_^ohkQ&r|v#Lx*-) z1mhQr+gtG=nMcjh4)js?^|krv;iHAmIGq9FmDQpB#iI{kMLeCrLeLz-*aU!C_`C2g0Ws$9 z&E}bCI%B&!Rnhnl=$-C-C*3(v)FI)UllKQ2ch}n$5SMC99?!^J49Ll)MEV zJ-nBkXu*m{(8A7wtIj({Ga5^UG!;mT;TJCD)vOpew-W_@=%S;%q0ju;06-;4e? z^X_y&o$w_9Q=Zy4%11n6d?FpJ@w7~ z!0pS06rtlkN3KnD{7*31{eqJ4Gt^u%JfOEpQl`m#ey zzxVC$WkfyJdmC3T)d!3&Jm>c`H(qbfV=%qnVRDB9!Gzb@+#I)kSD|t0mvd)M)HjQh zfEeSJ?)r;d7u&aPY&_+C9;kevoe|^}rN^+xkR=WF!7u?B#( z)9B?ufAkbT++pLwmyt*2hi22>T@G92n6hQd68_}H7g|fFR~U2h;_*Aq1=W#kC2NFI zyK}qq7JBZ#xkG5V(FrGSP9+;NychqzHrtiMF@c^!xAPGmfs7e>8!xPj9t=7&X9m3M z@XdjU>hbcnebsw8SAv0iiObvp+vy4BD4~_qfgT_>HUkabyxaJrYaDsw?HR8QS#@~j z&h3TG3e45-JjAaTZij=w7|O}Z+PbV)lQ-#aM-CsJ`TS}!(>qUV!;TCJ6*nOHz55HV zap)KWaJv4?kJ0KvV^x?9XAJPxU0**pa>iIR<2TUp?(MvPn@{LBXTCfYNrh@@E~-sO zj=j_R)cP^`n+z1r>DA^|bRrv#rRPU)Si6n~czc^G>1?-ZoB8;ezH3L_&E{WnGR-7< z+-Z$`AQ^0T#g378WZy@hydUqES98eK(sE)vkbE?U+l^qn@D|^(TlleMJwV4K#~B@c z>{v8k&;!Q#vG(N~mMl(oUb)m*TfljBBO4qJ>bp74c|`Aj@ax&-@J4ey9YM&SBd;HA zd=@^i`Wm0~_KjU{iVvzr9?qAO6B!+oW#cfB-e6K&vTniu5S;$h=iNtOez_aAt3 zWD!FlS~@rySbpT_TdQxs{jF%kQ$3(NdBo6XM~YbL<5ZS>ZGcQas3SS8PZ%udg}ajb zH{utE%`@0DG+2AzkB$tf&-X5)5AXMX^KbrTWdXs(k}oEHvTAl9hU29*^ejD+>>yu~ zk(`@mu3*^ANc1@4G=rZ$TGFH>L?5zl25L8ZZN|&GFbwh;9#-nYtwdxm%~HFYlYwi6 zrR*ZiB(ci&TuySvIFrsjEZ^f4@8w0g^-pC>GbNQwV+ z5K}dL43s5>v}({j5BfHBaO1a$yQhGz0hUX}T}$BI+C5VNsg$dWIdwf@pd(EO~=JVH}1}Qqo&4BnSXcd56ye zwsI2A&!gp2i^>jK%2LOwYB6il_3j!APkGM+>Au;p`uNhL)hGYrqtz=vKDm1L(4HxC zM_v{G-GBWL6U^1qq9w%y?+?Qw;`D9x9D_#nLQI`kdJ3mL8Cnk`^iY3TuVA(sHxq#T z@ctT5w#>r|f0ff3pXkMM^1lRFOWDs1Sm>kR>0{NKQcEOFSJUn;XS^#{@yi>1L6hZ5 z9`kV8)0LNfhMrv$GRpEm=S+hBCcL^a>85hEr-%J4beO`rTKHJHzW;~$%(H&U*7tr- zTNH(kxU}*XuoO?%uG(fCOg;RabfsxRxSjC)zyHOr@}@1GxJ-M_^GqCVBkD$qhS9=G^1Yx#vRL{1(C*Z+^0hz%+c!^{A~mH$)qq-7&Vo5%GU`U<4SW$Z)vG77Cu( zuo~GqtW@4}`g|h_oK9ogXAV04dfI!F(KAnt(OM+mc_w?@F&_7;vY#!Je0SVOC|?@> zxzGGCr9cHBcEp_Fv8i9rI73&wBfs%b%VY@hyv~&JF7J4sleP_-sMJ{&M*LGr3ym?3 zQwVSIFV1B274I4R@V@wYz@WrUvp2#g1ybneE6D>KFknz3oRZ3~2AjYv!3lsun<@m# zOy8mhuDOQC1{1$MP}}GHFXYCx)MCe&^y9cRpt69pEll9b8CQaLlCp4N{rp4{Nt0gW zUcRHXx;D095YrE%4iBR_=ub@IyKtf_`Mz%&6i{)M#&s$;W2kiEOuF~Ll0?*aB^$JC z-zNKpweX|y>!G(QaAxbfqeF%Jx1W51aWEB{Siw=7cIl=KW4*thgWI5WYt?p=Y4hE0 zeJ7bV_GRYi!)Fr#oAN=vDR!usm-TjdtXXiw4EExn7kA5g8f+o6$8}&{uBm)m<1;n? zZH>QQkqsB+OE>^44L%*ifg|&hrlsR#Hq+T2_y6<<|C;Up6V*TaXa6-$lh*2u_x>Gf zY*KQ=Esi+Xq)Ux~R|K!rPU=uXJxjrZvJ6VNGex6uhuX3;)KHZVEu-vec#MvY1%^6z z2_GkuM|wKR85{<9tg&iM|5|pAnU9Yj+(XePJFc^rIhm!7kqP|GKn4JzUFE!4Iytn- z3=-hx!Dt#*2D(+aEp5@n_HYk#T$-K4QDFu%*@pxUb#$8fxyZIz=h(-5G$h3Hvn`F) zP~(Maa^h)>bsOKt;V6$=`W(319e9ezPZS3@h=%E$+Kfwaon3v(SOt1oa& z;PmuwjRSR!;Alo6-T5@z`!U>{gK$^{kLytmblzI)QKMNrvF>%jQeFssnaN6N4&jSI zv25^xXQeW;qFgJ8B~B^(sm-rscu~6%7&|BRG?|UEL!)T}Cy`k{@-mx1x&+6w)tqt3 z^StW2+c?R@pEhKqS>MWB9cih*3U-EWfZaK=bQ^J6nQ<2WXP>}B`VU*+8DXxG(}i=y z5*!A0b&SY2YVhBNA1EuBaCo;yrHdwfI=~~#{b*Es$Yck>1p`tV&}OrmiTz}9lIM{> z$l1qFAK~P_K~QNSCDhDFw;z986UXCx9BS~#RcB0jhIMzVw)MJiT)9lJhEghwc?sv< zIhg=S9_Ok{BO}$H{@Gu#MwBF>4&e|^)oua`sn_}BS)5fp zWPQm|%dRZ~7tn`+0_jP%tn<>tI_2dIHO>uri9Fv-^lo@w>uJ`2=QQ-a?wKT8)VqbO z=o>m$_k|Z2pt7Fv8QM3oX@T-q(zNyd-34MvCV8*t@^q-OcZ*1Le{*F zENi*StIv?Zer0SdG^V4&QFV7wGK9c_P6PFsPMpeT*}iU)%)@8Xp+n*9_UWmKXVl(> zcOZ{XqvJ13O)?Liv`8s*dX0PoFWoqQEe&CN8pf+QW%>q&gEQ7LZopP>?#e0Uq-P?j zudDBM5_~gLwTm&mSe?Zw+K(I{TYhDMvLt<+CD55{eVyde#?a6Zb))C2Asn3Nak!YZ zc7F5{8IYYA;yU$DkZIV=*6&ZN?+%YLPJ&UvaUA=vsNalCa!1KaeqNca%32-aDDh*) z^$P;Otl3nTArY{tH!oda~7zQIwPCnMkm znf1`H&gui!Oy>kc3yl~YwVcCR$6|Gd%tR0Bl)o6{aSqmpP7sZ?*On6b<+IYd0Xcb> za!vMr^3-dI&!)##rhWyw}bze3) zMdz7k`B>7z7&Hq206+jqL_t)<;GjI(V1-WB!SkbmndemqAK@!br~^h%WSsIp>E@+b zpRFa3sV<~bT_+=Wu5G4STryGoFXwV)!h?C7VT34@7wnH=vtc8w# z`S3cyz+rYEI2IEeOx_Z{Sr6u9(Cgm)dvRjhr@O&U)SXFb3#LfH-9tv32bA zqU#n1xO}C(o#Y_pz0?J6s5k79JSN{aQ`6_`UzPg7+c<R7db477WIXb<{|yA<@5_}eqn>`r@W6WTBK3wT8z^3Mi> z7SdaEK?Y$a_*uqa@LQ+6a%;{VKlG&~B~NN1z~Gp~PY13u(BO$XIIS(Ob5dKtpF?=l z-Ye;bM5`J6Q6u4m_DE9GJ)qb{Vs!xh@l2 zQ13QCY1tIl!QF83{`FOMSHOGKqb;jv{b)Zb_ua?{cETJ{K8D|&@EP?* zOAS6_2a`nB)6oqL7}P~T@?BcmAV{XHJOEcd`RI4hMX@KA|5nn$v=ZNmWTAv_(Pob!l8xrik`<>Sb}be9S|)Y=fX=pt4H~& zESFy8!NOIYM&o=FdfJ0;Y7bfeeuQlBIMIQnHneN!ZXosQ(P093{5{XEA+z6=d#AC- z+IPFEi^xaMo;f^NLlCgcxbv+WMAUeZ~5?ZXPmf@K?W+rLweNv}@YYGsMMNbO;qC<-WV;>Ru5DX1;{% z_)#x1=>$Im@2k^_YuW|!KR?od^sY@MY~7U-I}IIJ-(7G}#>_MMCx#;(_}y;G+x!LL z`q7xyuN#I$PZ^8v!=&>r9mZ~F0L6*yO`g|~_D|Z0Gljv2TG9&-FxfmH(Q+r>y{Ay~ zMWw%M%-=HJI2d`-@80$kVV}3TE)dGqJgf7fJf;%GJEarGD+zQezHnJ+FDU+q8lkH=6(#PekcUp-U$ z;%i{WOSYdrOY4Cb_jFzeGasuB)OmR!@N*!wbz(Agt|xupUE1N0Lx{@q5*U7~TNs(2 z%~lgDht>Err>5_?Jq6}P1l@xi9f!{GJLA+eg0Z#3)ZKEkY8DD@TIUqC0Raf|A~?$W z(>LQ5c}4mY2yYpn3c3cUFg24+qIG+*M&%X)RR?-&ecOygux!q zYJl3F2Pm4o@YxriC*qa5=)7fyf)PbCeV*f((;0CD6P#Mbxxa}L-9tGIwyGWeUw>VF z<1%%jhI*>2Y|WIw(w}P=_|h1E7GV1~U&Lhxl(pd+F!K|fgI;PMmq8;bjy#_AyGC37 z;!AOU{~cP(`q37!P5XJ@=lSFx+NP7>K^o&aFz|bx4}O*YgV+9nj@%&_#ufV^*_~jSXC)1qbjF9xCbPvQZ69%N>d8*KV^by9;EDDO5 z2Gqy`N8WkoT?FJO;1w;nTlEu2h zL7pm5p4#leLUjl?6$6#zRutP^Gh7x>4sh03TQv?v=5Kb00VT6~Nf(|iYJhuL4sn|H zk71iKCbZ}!urG$34w)0shwVuBE}wVpTA?f2YeYVJd@n|g873EosXd0E-j-&#_mVkE za~e(&UeXC`rv4?J4x;ENELBK#6!?*6xXoN)>wM#^=bBN>7)~fLfCMENVW<$qSaqC4 zfHQYtBIj2LRq)2?&AfHKsF)Y|z;ECgz6$;5oPPoA>jYvr(ci}vy-f_p5D&vw&>~Y zz`*ROE{~28pc>B>`z4GhjT8HGSA^L?Iu^}-xO?{w+uQrAVft6WyL4fQgE23sWW|5} zPyW;DHy?jYKcBM2d=7L09?ls!XlMusd2OH#+yX!5B|mYle%E2+v%Zav4S7MTqEW4FTbq*yMOV|1Iyvz!Rn9x;XkSdMy^)N zWK`=QPw<8{RQ5F-xAoGkDe9Kj zHXLfoTQSLu4Gn(Fgh&V0-F^D_0d%(xKP-U(vr%-sUqF%eo$EJVqpz8&lT69}f#DeJ z8kRaK+sTCMr3_0G(pZHIOwhe_? zrF(^PY|3Ypf@G9-a$WfEY>U%Hc?QDiYi$`LvdOKHW*ZR%SjLDB?upqRj>$G|bODV4 zVQ)5zFKX23pclt=+RJZr61zR!ZN95G417=iq){TD)hIQqPuPqTxG_s{lX3!P!dS=A z+T587&p3axxm+`yk=8KRfg^l0Vs+xl1I+*yuaX%Lzt5;a92IoGXzV*~;gifj@LY0L zAM!9^s4sfCbJ5UaQuHD3<8U-~>|MBUshXW3Fu}admXenx%ZT}7PT_peK$0)enS+Bs z!Veq>*TVOqS83K~{4HDyj{%N20nK{W>2$goc=5h`Qrg?D z8d(cJ@c=7nCx>M+Ru+*!oq_Yt1R>6vMT$cNjTr}wB^}tQ(9o1Ee>se#|!g2Bi@PXntsgIJ?Ij8%80 zcxax^HJwq(76!f^+UnuRmOF7B%>FacB&FZ@Zm?0OsBqG`APvmp_}QUWv4JTMVJ$(R zS8?FGJ1BCLv3FynTXw>-O4e+)jEJ({5=g5!k_@tGENkdKeDt)sa^ps1=bBDRXlE4O zWtN@BzkJk#OXa`n@|F>?4V}#zy*fkcplxf=Nw>|X$%KV4rl+1IlijRfYe&ycPm;xr z!x26;@#JxJ?dEIM!1+-i22KErde+U|5js`Z*2%;qJNFWS6=`XMpjH1cho+syxzP&` z!Etu$^^?@;ovRk-2m)g7co?*F-h|WSh+VaHlnaxF494#ftk4;tbJKGjo;~^!*+eN7 z)=FoPj-o49Zj^Ex@B^KSp(6s1Q|y=+fT!zRla>}{=Bi=vP~0^rVd(}l#Sd9i5Axe( ztdE$Vj_rONjLPUIj~@ko>ORuG!CM^;aR}iMD{>3$2~8-cq<_&#+2uiRj2%AgT8f`j z1Txxj%cZxBoC5*XzNLz@|4Bd$>8`|^!YD7|Cr<$9QNRYjvmW2H4@<8+Th?j*2YYIWvKe; zbDUf96$3~*s^c&PU$0!cgv=Zy2wH^5?HH^~0txL~ zYD=CXY?V(Q2yM2nG$hV=K7?(!TYK>QCGdl~^>Y)EKl|7%kDopXt@Tj8$jor*)RH^O zumzl5)iS{ZoLhs#Bgm=-_~jzsTjS)^cH4u-#Ep~*TcJKUbax8=0dotEtLKTcX6Rc! z$wRl7NhUI*T>C*-%gcn5nab*FI>5}lmzP`Kp^YHA>!%&=_{4enyTL&ni+HBFZlD_8 zCtd>IBi8LS{8M`71>{``a2rTUX(JY1J}Dk6uTG+qDXXKSi4^=8ke8PU%Oes7d|thX zn{gC_XxMe7Ow@CnoGX;k)Q8c03I{cgVP(__*3*yUR)#CTA3|dijl0j(CA@~F%oXa& z_YZ>~IDX-8FSxHsD!SVo&g@&Sze}H#wm^?mZYoPsu82U88Nl*%aZTOc-4NoS zXV#32f}ij(>CClQNA~QIAlZn+{ty3v#6i|-0ll*a{ZRS43Ey^3*1^~L>+Y_n@S_bJ z%Mk!V1uo7}I4P6c+10ZSpNt-Z`m#sjhr0m)MRX*2kU=}o*ZK1E56K3ftNOru zcTpHT)vxsWt+!cXWib4Q9t`68PLuv9bh|x*A9DtufTuwP&u_6#x;W@qq8B1B2{kjy zI`|R}eg?3G%`X4j@K9yo*VWY*J5s)9za~yt;?Kk$cLjCA8z;yL-(}A7@gj4zBM355 zqVTG9<$*+kFR}v+xot zeC}6$1mERnTV!Rwdc`>}wAX_Sn3;J7?nrLZ06P%iB_4z-o*OK8SHW`v3`{yYt@^qN zCxb(lMeb+(1g4?w*o6RU8Q$owCUxiqc0s5oSn9>K)0WslAM;}(L^lBzcN%{B`A6uu z_&bo%@=Im+$ulezbS+)=)c1yNloOU#JZu1e!Ecl4T+rnB-P6%+%m&j#yPqfsw4{;|bOJ@hdMo z4W7xD2-akL(V5C?ZaEG+w=aIld{Q2Uc55FAkcYU>H2~~$elMTnv4Q|`g7}pi={aNd zJMWP-r-IMhxu@GGANqE90=c~Bef!}z7s}9%$0B9>U|#iR{wNOxhPF`q>T^EB1sK17 zICnu-d(XC$rJM$RzV;k{b5mFZ9=?}{9TxY%A%#z7LK|LkW_D#83X{yiYapD$W^Rf4 zqDOUA8;Cst;Vz+&KAkMeNHVKXsvF53X@J4|F?-wyej`S;$bII_5e|{|AlUOIh=Tdb z2ulW^{rPbOjEZ@C7|RK^h`^(mzmp##<$Qt3$f`aeRZ^(ns-0E zH(re+oBt7l+{1jM7 zYvsApXbB4OzGD=(X|UjXtv^@Uer(9+_yuenki&FNUi9zKxes_L|CZKD3kCnglCVItY)F&7y^%td>@6cm6Z@iCF2};BWzs~g}qCxqlT}1=*bIwiNl@0#ZwP5RESE~ z(yvZ$gKNTGRLDELy>|#Lmc*fI4dozoD&8_U9ZjCd?Y71;*3zFc&k`+W7iv`4zH1hI zb0GfjTEl}Fm0v@z!h`{(j{^gPv}406`UQ;``!V83-wJ4_mUhAAdhUsjiMG?K!qIXR zHY+X|^zfeZJmk0h%K6KG%y7zsEKn3{>wGrrzOW|Tf)KX&d1+TAb2#qgg0Fi=OBDE(tYox(Fne$#q?3| zp!6fJ=ROLSN|8p1#+d!4#0WA!!GjDB7%_(^8a!*ZmW~WzlU+N&O+0I*EX9BHAN~i_ z?=_PcM>h)V!}|}o_X=lOZ}sMzZ;^p|E(ZMd*RDtLo5vYo$pULFzJQjjO{=jwgJbDX z?=;Ttx)W@9c0Taw7*r@W+c+?$aFBJiH-pQkP~v7OgD_GJbLgiTFMs)yAAy^U3rD-o zB%P1h!t0tv@nWv3{j3tN-Gk{#Vu4zy7Ui{NWvHgHz54xn}L^lP6J< zF_6sQ^S~-)ppM$cCbl`E?7Ln2+Kn5LpN{SI+qaWdW39nI{=a`xy-mi;@cHvm=*VG%}TqfRC~YqseU7 zNoM&GWvnG>%rvrH4KT;)E*++gLkGi!Hay6#3%)6hdVr9IqYALQ(AqSnz)R((a(xFQ z#>`u{t7{Y()RZq9K+w38XC9C}(gZDAYc%z(=~F|i_PY-6 z1RgLN%p@_GZ`PjKX9gHAw2g!>TIaP^%3^VLF>{)oC0htXT!Y75NXkCxUuTpK>o}!p zOXrQe%LDK%OEJw30!!ammSSdhn)e&xVDSJtovLw4GrBl08K<#Rez$S9DyaK7B<3m;&{c%rA#UL54WIc>9MJ}%F-ER>mxofxU=mpYL|YUieG zm-cnuT3%<6?0o}s+uOhfLbCn7-foPzk_DUdD}aZv_weG*F7yKUq9u&fvxT{GUB08U zY!}CnYp7F3y;vSCe#Obenwemtp{y~lk$Nh0YEbc1=*5Ep!%s21w`OLmJ>FeqCtb-# zQ!g_3h?5+LfTfLe&@U5c(paoF*o9F7>+g>vWr_nnpW|S7jYBo9EiG+ZcH)c&GNYU62r`>@ zo^pxb_{KM?iz8#zkN)kC!70z60B-R5hSQ~7k~|lbEcn&w*evhE*$C+S$~844OD75U zk&EmwvQ{;GRmYEWmN#j?m;o1^hB?X$#A|oHJ$w3uB#0r_QTssS-LevOsd?_XA6Kv4 z%DT4^0O+Ko2Mwikp8f%_*yf-^jxQ;*hGBoYFwct!mnH}W3fg5?! z3bIn3!9p-*^i^Qy8XmwymT}xl6Y7@QfxDZlpBo$FLzuWTXHmuJ+q!^$;Bza1U!7~hJdRuF$6DSxZj=2?nbMW^z z2_*)%uim(wdfXl?tv)Zmm5(?#^*;L-CI{$UW)mAQRyxSy47A9<+&L!Sk-u2>QQl*1 zcX6lbEK&fRS2h^X(h=HAi6F8l(% zf$h~|W;tWh#?W3^cohp7NJFXypDeRbBq8fx`B3#&N3aP2G7*t z_gQy?;0CkhvDPYg2aSOyGjYvo_gs)I@WHiyL1M-_4n@yJkw3Z%&2gv`R~%qyGRz*y z5_Cy>@Mga2uzrEhME!D@gBH)@#08kZi_$1{FRD%Wf(*%ntsCHbhX=r$b<(zExR@@^ zNgDDW!wEz4_bq=0t0B9ZjS0~ir?QXvp$jIAo zz8Bp}{v$ur&d^~US%O?tM^m?OR~FKgu_LpP!z19QyMM$z`INkDV3@;64VFRo*1T3e zbfQzJi&>6F{a&4Xbo2`NvsOKR$nHeostqdqq&3Ukn8@G`AZ@SOP7*Vgm3Q*0b@-Zc z!~j@=Ebx!1$)_A>`C;(j_S;_ze>a(`3m?*j3!{;NmN8mGW{8j4*QeRl=2{ZMO5n&~ zlndZ}eJbjx>gCE!%kQ@Jw&6@CFwL5JPJ%%%ONZ#l?~>NQTV;>9uAZl^Xd=Zhf!*x< z;kx>RKC5l~MJ8L=Hl6S(x`K{$;ozEER<90Q#*(b+DW|AmZ~K;ORSuX4^zi9Zd~9gP8%fDSF;R%SX~8Ap$T_3N>kB)Sx4`?{ypgU+J#pyU8^R> zAB7({&fEvD#P=&#Z^lW#v(4c7-}N`3X-P!wc}sw8oGd%j+?gY8t>VP@?66uLd3W1C ze)7DU#K)nm(8uV`VDZYVbOYe>PTSWJZ*t()#uoAoc>z35qwjgz41l!F%a~-;Nhqme!_XEc~p~Ha)_WE}9zy0{r>O&6Xtb>px&}6=VOftc}J%|YAd=>$t;Ti>-cF$le z>GU!a*5z11ASCk+W+CHtHVUO=@iHupL>ZSxPz)}F0k7wpPN$bdjOwWGYGhskqO(td zs1cC$7T0J;7&>1SG0-K zF&Qul-T+&^@*tz&{XFaYzJ|j90F?V)@HXq>I`geO3vBJ)F}vpJn?LL+bMd)6ZG+Cu z*YdVc@Wuc6PTMuydYSgiGkIHh_>2QRzFgqM7si_TWSrdh6XKwC`%&`qeqMy$(+8JJ zGX)mDl5yqLvbI?RvPVL@jwe4pz_X4+gmL2ZZ}WjR-*tejph*3%Kl&}TdMPu2L1DI( zncCK+)5y;@Za`~&{1aVl!H4l461g5?r@$wlVb~&&-CbhVk_xXzlDJ;79@v(N;;~}p zAKl-(|B#F%4j`e%xm#J^xN)<(IC?%t^Bw}S5o3pVCG{ zFvUZm=JN+W!^fs_mVmP3ctP?Sf2%la?4Uw%tLD5t#xcf$XyAPfE}c%SiS*!m{Omxt zY0kCdKe5vq)Gj0cXAQmMLmH{V+3#`29xE6p$jG;iukf5V9g|K=`DPSkV4{*mgEZ_M zO9#Ono+-f_U?^|nsk#%uR))o=fQQl93Gcb)wxtSH5KJXje&u_|PxITk(lqznrpCMt zIyeTdDRd_Bzd^s|KF25uJREI6}?(`r4bfs>@@8)V3Q%q1&&1{G%U6X&)WAfO2~wyjo?^ zdUkVU?@T_MtiHH&9|KKA5~l$QcJH~aD1zB8j$*H)q6-Izr4{V2vx@@{p_g}Vy&EUv z+6so`@?y$vZEjk>wG)R;ZIH3n^2tZYZTJ)8ef;!s z_1&-kUg}FS$!g@{7z*(+87S0-MOjs;ee&R5aNa;;M_V)NP!0vP4zIP-_iCmnXD_b z#8bKr`ZA+N=S45u%d_sZXXy_OQ4g>a7d5_$(*TDl^Ks{pd{GByLjyR@+%;;};ro{T za{YZ?T(Y!~#zuG?*udaI*BUJTNc%cJ8Zi9K5|aOU*qri2XNEO6g@u`4e*CT8=2q%o z{_0ooDh@Qq2`Tc%xdr^?4IVmX8JFS!gzwR=oJONWxX2qEjt(DX8}1sE*32qA#OW$s z*`5x5gD6|C*qRO>Gbp2hZixxYX|w=Kw~60-_#oTj!$WZ}K6&~CB4)T`#PZr#pX?%G z6=^(pZLG5F}#Q%eoa%8Zp|%B4U{I{bnI$PamQ z^n~cVW!Hr3WBDo%%d|{^2B>8PExBeX9i2J*3{oeVWom?VsS$0!THT!tEY`=2a-EMu z!>*ym_7Zqa83csM>PM^9Gyx&;Tl)8T17TM$U#Z@?{WcC_4g{rraYUKwA&klj=j3?_ z9)P7&Z)5W=L7_#ipRRuPs}F!(krf)o9%QQ%y#u~3tgXrJZqyd=vmIXcia^MXE7uX9 z@Tl>ptb=p`4`vM=mrg~`Gq8TJl>iUGw22T+l&Jn>31J|lq^P(*aJ<~{e|%xuf4-sO~r9EIr&TkXDsRgIIY12Z6lp? z9#&&UaSkbFj_&?FfM(QtQd&|Of=rJ5r~I7PIrvS>yZ&*`;%HSDIxe z*8X;fW`djGZ31QJHYXXUl;vE_^k;AhFl>^k$$+Y%!QnV3q5iZlUor>=!-tMp3*hz{ zdb+_{bs6C^Jw1zl5rua z&1|&~$L#JVcfiVjYo7$JRw=1~4rZpQnzW8d@xYqDyOdy&r<^-?9$Sm-dvyTdCtnx; zq*qIys`Kn_A0}Aj86V0A1DWEF<8y5K6HK-^4WDf7)U7`$UQ zXsrx60x#6TUSjW`B6xd(Ao3Og9;9ku2uPFcrd+X2aUpUM-E5BX8O~oG?;(gPyj$=h zalm!5UFEOAdISFQx0WGFkoR>GFNm12eWd;{`_u$Z*KA-0hfCO1cy^Q--E5K7p*)VpBRU>IAD38 zAG^4dxvWHfY1>*?-ra#_e>WLupr6U{M+sbuKdzfJtn4#D(Ao*i;4hYNNdgsmw{l3C zE(_1o&mP)Ee_@T-yTo)zj)VqSrqGQ$ zt^-wQR=MDgrZyb?>QmbI@(Ys_bej84GRJwxpp>$AlN}_@1ZKO?*R6T3<6ByCy^aV7 zHIYzq23*%s9bJibL4)wO$d_ZguyOh{E$6>$*ftd#UNjpT_oa}OR zb`R?|>A^BzI;Z6=)=bwQz%X(8k5OW31fEGX3HYuD7OT%jkE1R95>3#FyDilF23tt> zAR%dwu{F`r!AtZabdqIwgXL0^j0mdj)fud`)KY8Yu;sPHOZl`19vi&V?`9&6I$Cz# zBZFAp@H7LuZP1UfQ~tPXqJzH;Ivjz^u9^M-VG*Q;8r9_zyqa zD4Dl#LB8l5$?(b>cD_lh2oHh@Fn$dLi@@0f0 ziaA1H6T@c$g~6;^GpOv?Y#WtX4_Pu2wY12IPx^p%4Pk zx`jjGVb586nV9o(&3HBjltj&x%;fQ-Lp1e~dBXq9c;95K9T1j>C934OcrL5k_H~vk z^p+`;A;HM_vMlz)L%Ta}Kia9)~&#@;18YCcEC&n zSvuOt%CJVRMoN{~u^s5db1a!lhF<8*_2xYra$F^&+C}6$;|u_)OP9{X`jj>6}U=aMz4^_c7YlX3B+ zOpE8_8+plr`JS+G0zT(J{nKAu%jhIn{x28#jY5%+_kBmurA^=Aj_jP=hyyjy*a zBdniHMb5LLpOWRO013V`CY`}%U&YWSJN5FFtJUjdv>t-r9wan1ISxs%MGGazTI2`R z6q7OQ&`|KH7}|GFNu@9GAawBv7SNd?jjCzmAUNVp|No9*X4SJ1KS^W$RV zIq1&%e^CVJ*Wty0=65o=<*9ybQ+jm3b`77@xhzjHARB(nqtd#@zEO5diTJGdf~)>< z{5`UY?>tT$C9Bu$SC#D0m0cqA8CZEIw4?HZGQm6!;3sA`D2z|izjHHdR#_G2GHWDU zMZJQn&X>Dc1C0ydN?+k449e>UB@H?{Mz6<2zW`0 z-^~!w$uDeeTN%J@@VhwHju?9mALL>JpLBcXBtc4^fA_t&tM7mR-zRJ8LL8TCOH0)o z&{?twP;mU1kzfGf(UbA&^E-DE3}pmyI$96(G>1w$)@2UA)WEv^+6|oEZPm@&uSfaQ zs5C=br?K@K&BD;w7=LhwY!0(y*aZSEZiDmR`PR4D88uOT_W7rQxpT>!zgUchIfoUvv{ zY|qy)6~B3i77nW4{Njhy{ku?o_xJyPb?wdHtA6}9Kfnmus{TH8OfBQVsw8V$!_W*U zvsW}Ytnuq1N(QtaJ-Juix%WjfDs?g}EKDKK_Nrff@N10xJ2=2E;;5j8Bo3K%vOf(d z>rCt6Fu+zay9~sMr;TiruJ4eJgom|7IjoU2=p@r0KOD+B8GAgd;wdfd;>dtdfS(S1 z=8YcE$hi7gOPzdm@GZA-8(^QC!WoAnQKyFv@IA6m4#DM7f+S&wllD=%SPH=%~jz4qW9k>SywPbuD49appbwZ4yz!OSlhgHB9R} zHMpsf4E%L^oMr4ffi<+QT)I*X^$%qSl^H?GE9IVJha!Mm3z?(T_665sv@=)d>~mJm zcTF_-WUpp*i9L-3NgCNP;uuYm(3os;9q4yxXc#B?3jEo%d|AzqRisScB}v1v)p)WA zCa!5d%*(%|c}0RdgXDeUk$S2>W;F|Uaa{%=A3cIcD^E1w_x9VMl`V8eYZi;<{_C>U zFC17qqQ3r23 z&&h-24QI|Ua>k|s$uD$+)~b#5~Pwgot>iZes+|u1fRQcn)$qrv33qW(+T&GY{B6{4=lG{ zu=P((egb#ZO~6&apY`;xu1;`JIN+Zk2D9eGFf!Eg29lRuW4}Q~_pp^X15>~Cl;s?VGbIlM!K2ZG!nX#R|}7g17OW~#8IW1IAk)Cw5Q|C zMT$;K&+aO5$E77(lSRGDPtZZ6B3Sz1Nsr@HDYcwxYA30RyvH-y9IA@JT!uw?_e zWDRAr$eoY$RA)BDF}*1Vbh=MYY{57Bs)=WhD2MQz-2}~?A#js*8ID6-JlF%C9ulfN z&^hf#dNd?i zsRf*GIvPC$SKU9m6~#=(!LP{KJ#;sN%3bgW%aARz_O-yJ_#3;7YvQnuEw7o};vt|O zAS;g#e#1X?wr{`><^9@imY*rv)@OO1`GfzKJ~C)+Ssp)TMdvVR+Fo;aD>N<~m;}5K z{zBOiX=wqwN1HG~0AQjnVenpFs{T+igFC9Z>6zf7G-(+ld7*sW@?gpvb#f=~T%@5b zf;sAqbF(w(2-u0x;lPDa{0?uB{reOiB5k%*JHTcY-Ob>iyEViq>&`1*l#RRCi%W~@ z&(s8m2bG;ZltqCzxrG@3U^MLkm7F<)1JVw>;ebTw_0N;3HrcmSx1Yw@t2aSLZ&5?T&{R zcfteAPLAIr>`wbg`}Zj4C|o#yu>_;oZ8kOjkg=^3a2kXEqSqkL)E}ISeBX63fno=` zlW$sj$>2meU{!EMs9Z_yO|r@mxyVUJFs!Dqk3;mmKl&6@I5juBxDU*AKt_ zpIgZ&%r*NcZ~KZ2_$#N2E2gc=`=yD}W@hA{{FEO3lQvu-+LD+usKC<>xkg*|;*V!@yMZ5%mKXB=SAWwEAmzg-1YFjuyysQ>%1_2nz6+N!LZ0WHV*Icj7-78V zR^hhZ>e0k>wLyKZAAIR2^~(PI{k za6DXCVmm2@A#pogEZLr|q1BA>^~TN1C{|=+;HW-CnQKO==zvINRytVBfKefAhKStC z+DwLp_1{zgUl3t35*B3$K_Ef6R%fWSX@oP%GVtC-VNk&9>ak53j7HUS2zeD}i_R1Y z-w4_+?YhLSm$2?< z`Pi2P$K|x;ghPP*kv5onA;^HHn6J_R&$%xC!H@stJHH)6F6Y{D*O?RR4ZeQ(V7mH~ zPsf8xt<+f+CLG2dr-Dper%a!9vcP2(9UU_symRB`&Faas@i-YqN5>+F;i?FbeUx7! zzbG{?*kY&=FJ9_k8DcA49nQelUwa#YeLcp~IEwxkcfOz&I1VJD?bDNc5eh2et!y3j zu!;pT15~nI00429vThN7HEOQF|89iJ#{7@j(v6duCXwuHC;so<#qnt_~WguiRY z^EysTXbYn~>*W}9DkE$W*1l((8v4nib~$RDCv}^3hJIbLq z(q@?xU4_mvI>#x_fxmIClg*-YiXm{VKtkNnp;gD6#pOEsaz4(%Iavxo0ebQ!QE}id z+~fs?Kl-d6-k=+^TFlJD0?0GLOYYfsXwHY}!>*mD!7ryJEae$Ge}s#a?njj83IF09 z#_k5L%b}82G248pVLrCL&tLz20F7H z`$?1!jRrp|9k-gevKd-Fx!zvC)z0y?5WO zE{mivz*BvWv6RI4V#!alDu{V!h-j%@}kK zpF9npc0R7Dx{h*Acru;fN&v(C;J0uCCV_X>5?D)ryz4l1_&9F^9mzzHws}RT&RV7XcZlOdyfgc67v*+&X|cL+eh9^%nuRFFQxnfH{!s2Q2J7l~ zst?}(x5#<6O?P$CZWj(*$^u~U{Nh)?q=dqIz`ATJZfU}SjZ;RtJS0oj@*yXnS63Yl#fXReWlzFANX<--TA2J`GJ`=HKg!6O!`m!&`_oC$i-XPa zCCiIBDev6QY`MJf7U_<0yOw5CI}aUyo^hkT+1h4?D^7amY*_%E*=Cxl*R_z5r=k4- zeAnSzFz@u*vNC&;a~mPpW$p+g;#R$ZHz_(G{_;I za7+eHPPH7D0#PE;rjsz(W54nk=eUG}WevPLOE%j!I>IIns~sGTKChf#Vn>CEAZ9G9 zQ|hQPS>Qa5;XC&qLAL}vNKCmjIud7>`hq2VtW~-MEbK?;kx=WxsZ+9$p*LWaU>N7Q#G)q z6Q=~!36|izc(KG*Ze*F|g`A&`Twwy*2PPfpThfQY6SJ6gaOuSJ0r`-JX^vg?yocu) z)^ix&IHf`U=fF#z+r)aY)X9Vt_Y5WpyH4g~CY_FY^(A)_xJJ%RM@}<`F1r>6w1+NT zk5R5+{|a6_JN^LYA(@fz+jh=i$e9P+6DNFEC#O#LJ+i~)W~V6|b%2erg^p)7@(Ie; ziA!??cK`CnKdELH7HIcn_0C)GVpH9Mugp`!{SxOl%;ykfGoT;bzaJfS8)uiLLgM%k z=KRPP3~cD27LT%n1U*XZIt@I{7WbST%UrB;UclN0Yr0_2qKUGA?!qlQ9kLr(+JIJO z$X4&`r#|p7ID}lajFb*$GiW!E%hHy_q2A$Ql%Sc;+`a1KkA9Q+D1%+=^Fu?`H{N>( zd}^lV{|s<4nBtD+au}hdB;38E%oPsunmNuI(P5h15WsqVel~5Hm2J>VzM#WW+U~-E zm3J9W3(j`SBUx%hSZM2PanSsoPnXGDeSm{(2HV#)f@Xo&83V=QHMlE%=#Vs!s;n5K zjFt{S5Ba@w@AK-nzkwHmEBZ2eIDf&Qs`&hT`WX(?=9D`~hCT4qv89~Z1wUt}o?u5T zClDYms$ZEwEiG%i>ZBH@J2@Mn9lX?`XK-Gpudwh?Y=iUK$FBDhdX?GV;-}70Vc*qj zopMTA5KI)0eeWpFFKJW#qXY?+6d^yX`CC9wGG4}K1B zn53i=JRAHoIZ@|gGmd}tICX>j1TkHg?#`ab9`SZ=ZYDG>k29cNk1ntZ4m3b-gYCWG z$YAisWEORqb%J7MUt4=yT5w#TS7f3v62{8Sc}g0pGk9pQB{3#vCg9M8_SN^Cv;55W zQxBW0>>`KV_2zCGcTIrD@bBldli@cP$F2saR#)e%r;qOC?451kufw)~@O*YfDF=k1 z8R7DDWs&eaKQsakvgU`BrD7dyU%k>krEA9_(8U#|5bO1BQL;MY%d*I=li}2-rY(!~8e3oa~j<{t2Xq%E-+FUK{;1KrG@%3xhGv{GS z7M^J=L0JPI`;1?@2;XL2OPUsRB#x7$s z?Nm!7=uNU?)Z`F(rMqY3o!4-Hw?S9xtV8f@{cb-MhZwW^SSxs9rnmLR)$1%hVs8~x3>E6c?R<%ITC>WkAquVb&A%g;3YwAj+{O2%gipV$SA&vDq7YgS|rPXY(w zVKRl*vGX{Dhac$=I*C5bJPlwO;7{T%qC|Kv6LW@aL#gHQ83<3l2L9DccXJg&T- z4tU-M_}kyM=}++}rw^O4&#!77`!4@63jTFW$G^Fv11U@!fI2Xn@?9Hs9vO$%=*zj4 zd*yz*b*xc4Xe+}}4Y#d8Ej@Z8hPe;;!@P5s_S0ilnD>w0`l{V>Az8^8m;IOppDEau z11q00Zu)f|e0fE!aSsbzM(O?Sqp9ls`xDjq{?Tf2w+=`7EChEz@qcQQA&hno>Z`}k zra*EOK@dj7b6c-bZG~@lZx@lcMwrNQ4hLLZ-M|==P*5@vjvBW{jU0~-lqR+aN2vzk zjV!nIvE2d(mIn)N(#Hy8vd-o%^H*_u;nr@LsZ99tB(a{lo_hRmJNF0XSTG=JPq z=H^R0m)SWc|FHkiUMM28k$DOTAC{>ck0DRchWNRK;rhqaAzys4TU{DxCE!D*3eKel z6zt*@#zAqaDa?KeVd`4CF5M_QW`jS%X;!i~8lyO?C@EOnUb_CmtS85h!G*zQR8oh+ zx4->OPS?I2r_2ahpTGI-ucEM-L8p^?m#odD6%NiqanbRh@vo9*_Jv#dpK&mg+njXz z=|DH4t6|^P*@uzAVRSgD%sg6HBC`ZzI9XN*RJS{cYdV58&azrMi1;zvN`Oiq(yH&I zMjygLCxpl%+~l#fwO5&{M>u39K{A!uIGuPrxNMuW9o%)D_)l69f0A{_`|>k0&lQxe zq1k@Ea26+=84RAJ%~fv6-+7MqO3hqHU)EIPTL+F;#Roxx^22C2cT4(a5ZM zGqt!}et1NumIA#V|_f={Se z3K2)FY?jY4m5kqZ{J55x12e3%X_eu7&Vq z83ON=!bv*#^XqkbrI zLgi9LNyBS}ofbRL@k!+e#)LfiEMpPJyC}c$```Ueb>;fC(1yF6%zSmLzlSznzIM5~ zdh=QwxR1y{Sy4%xnyH>nOjfUx`J&wU%|{mKmxuIhtI)w$8t}H59|~o_ zZ^$&pc{?yTL^jS{cpc6!;59Z%4e@5O3o)v7i2vwkzYJZQ{bok1rKilQFcU{QRTuN` zQ;qGRJFtOC)3hPk9h0FWD z`Z?onBx{ly#5kaN90k3btRAzXXrmFsh6CI>aYQXpo46aNw7P-qDD%xET~8(!muOE% z1xN+%7I?}-G};VGXh^tKUt_>^6~9jVKF*-Xq7X1`BH!hs@*u}zUH6SI0O!{K-{ ze_&+>sM$dpE5#W});xGp@V?^x73dh*#XFNYjNBsM(}@F=@iL;|yBS0aI6#GK=lKhf z8`6_y2Ud}<8bvw?+c1#LJTuGIGA?G`HsN@8_llW{?t%f`GY3GQak-1cEHY)0*?u~Y zbe1>Nx5eRUcH#k9L(P;l`}pJEAitkDs>o3BNRFfoc048XS{_t*qdYkoXg(7j@6)XJ z@w)RRe873ymJ-L37LVX1wsHN^C3sIOI`Vj6Do$kQBaUiIiG6nOF3%~O38oXAQm$+= z_nG-6ALqz;8Hd5%r_)o_ z4)iyN6VQ3;D3LBSUe!6QscXRFIkm`lcFeG&MmA0x6IS}bVGUw4C^cgHq(dAKckbVh z&aG2idFP?$G1>u!juW@-o5ALq$vfSdQb(|<87GT}%$(%LF`G*5!iY35|D-RC%V{Ak-s*DWH)M0espr~?^!dK9TD&qVQua1diWUH0tjcVU4#NcQ`)i~x%G*A2F?={ zTmW}Du$JJMb->SpyV_C8KV{By@M4xjTwkcSvW~0JsHLi=XXp7FrzUgt3>@k5ePF(e z^tSI*K`(~q0EZ*PVBSTRqAM^D9Y!3=l(kQpwxphRTjqxXUc&21*9^u@FserR64nQR(9BJ?5 z3D*s(;yn+4GviU+*5HTs*6rK3aDKF7OFl$5eqK$@&ShM;-+Vj45&3FMGfqJGi~RZC zUC!+QXHGB=&$7{(FOQW*l)rVzMD=ZD+Qo~P8Rx6)lu#D94PRYST*@vR9Kw?;3)NHP zt!1LjbTo6m2Pf722lum^<^(+5auULA3m98QK)Z69@*M7H=;GWJWkk!lZg#hgvXdFd zEJ0-h2IG$(@^@49wQqkfPVF`HdjWhJKL3KmnL7l)Cnl#-j>@$Wl?t9SesvSB?I0ga z#OUlB&iE|@V@7Po15Jwe@+);#X}*=f)&@JFtwVkYuas5|&YFm0ptA#qm@-pcbRRo6 znc}Ry^^MJD&N(zR$Y{Vdozgh2kmJJEwUtk8Zms}x`~>n!aCj4XZfb1HHF-*Q)dFK} z7G?h8(jxi^b{N-UcR(-d`SMj|jl5ntsLf(_@3NV*jNiU>622tsDTyP<33W;Jj}{&4 z_!z|DHJ)cE@Ktz#vJd{Hm(H=Ap{{z_eWDZ2fF`^HIxM~hl9Rv_cgkm15BR#r4!=Y6 zS!H!QrHpidyUz07G3q&63dUe!JG%*Nkq2l3XRd^7gnjazRd7|>1@uz2h4xB2Jb9JYIACLfc zb};&dndCe02+LNfwGNQz)y~c>?F1d4t=MDk2y?!tSpV#hLS7i;)1jCj)=Jw~I#@khTK*@@tHTSqT_>8#Zz zAjks`x2)i^CzI%7ln;a-ct(WIa`jG=800}G+4-kq*YbGc`O8=6fIQ=&#VO;;zEdI_BL~V=(>TfM6EE zNcqbtXttJ2BJZk$mm2ui4|fXR)fxfKH3E9dDD|%-&jLgJGrPzkWn3flz0EV00%Iy5 zD>M(y%9l;{6))61Je0NJ47+2%D|HM@ek;=r<}0r#vsM&G+mQLhaOUsW&kR6F<(K#Q zPXQ#ilOaM)UgTMgCxxcQBES7g0~#M9mha?0UUg8ul-lFI4@Z-!`>r!iUl;|+fR$iv z-!^$NUF&$GgE$d#k$lMt(@qGmk$Eo$o z&G_D(f-pE9a~bqk(BW$amTq~2_nfE7fqnV6@A7ZDrBB<=hwHhBLhp8eq*K}oJnh`3 z@>1H!TznN~m67vq%^B->b=Jtxb@=@GPd~2y^uv4A$|i~xjzqIzCYMfDJsmyO+VXle zOlF6Qn`3lana;W~ve0Hx?9Ch#*>&!@HG78a3u_kUG;svjDvGiF8IcP!V%q>HrXcY*i|U^4~oIv;o%T$y)ok~&N^^Lp`-I#LuL<#W>nnl zMGth*5cB&!0?~dILI-%q_YhdZOG8ORP@+)z(`YY_B#m&K>B3w%@>^jX6g_szTBfB9 z#Y|)36@n)Z0`E^|532w1Pd=>vTPnhIcXO~9%CHU`w}WIkUEr)U>p;71iwjQWD*b_D zK_(t{P6`#cN${fJ0MCen85nIy3tqBK|Fsk5=6~r*#N-cq&4c_BKb;$|WnH00?$M68 zTLziui?hrz@gwnuR``{+#98~`y6@zD+vd;X{(RhL(-VL5Ki~5yPeLpG;8?6rAa5N6>ITrFZaX zT6)0DKb4sAsc8%gPNZf{z5b1Fev9_%tGC{Lulo5f-mhlJ=1?)vnc$&;AAR<5^|g22 zCHtc{4yq`W+`oMJQke7mAG{wyWJZCrF>-7zO|!rI*kWmYOARaOCQ4Covf#A=7_-5| z^BdQ1gwe}$ibI2-5kgR$HS12g)FD`Z3Wi_T9AWGjRh~7Lg_XiZ#a6@mEZLtLHPVj? zjN9J~c**#MS%IX=q)sd_L*-7Khn};ZJP`N`@T|`<4;DROI4^-Ye`XDI42P}|zOJF` z>)MDi?nK!`>3}1E=RT)1#5Ht@UE|{qxTjL-_u`~C8q5`+v44rykK>|kGi+46O9PBQ z_{B5FZ6-s9_mnr;l8NCS;{hcxN(lTW?}%3p%I5v$!1!KIfUV#VbCo`&Kk?EJeW(zn zU1^YaeBI0{vp@p3Oi%+==bG6SJ2o+csz0hr~7+sgT8U|I&|C2E{!%PmNnQQfc5;@ zeB=X!SgoVn-9w4{=+nE!#JF?+aWyt_KFamP)J%1LV4(Vs|G^(s|N5_fki%jP zENRG_{op}RjVPw#29rgG;p}7m^?*!0LJlFcs$$Mq9)KVM$-Y*ELdJX}qXJetM5gAYkBni@)5he*5cRRNwvne^T{d zc@t%kZO+Irv(is;Fxl$NB>c^hFmAsirPU!6W(4l86}%)cv{SsFc#UA|&=kxueM9L{l2GY@Ip3FFeUr_4=y z5{5pr&A2;ofZe+FdahYF&`gpoc(F!w<_w(o56lP@ex=r=#=C~MWg3!sg03Ks1$-Hg znabj9QE<7)8kjLCsmiJ~`1dvR`wyL@!sD+6snL(g!VcrAZY zu83day!wU?!{|ZWTcgdXY08%{9uH}pUj%js3uGOUT}OM?T3vJ8drk+2XCqH{oJ9>O#-JQN&QhsRh##I+KZx`C8~;--NK z#esa0CIDUG)jX6y)zypC>!f5yFV3@092hzfbtKNeTn6WrEgl$0Mj2x>i&N*h&RH|u z7jfW713IQk0Ku$iv-WY+;M6i8XO?jZP7>(F7}SB7Bqb_}sN;$a1}^0j;)60>nCSG- z@nGpCoj&damCvsNJ4=Ghvt!PknFdBq5+K~hVZO6lGUqfhty}F@?cL};z)&0_vsqqe z63Z4mV~kqQ{`Q`ZS$Bl4kfphUGwJfkAaFR0ex;++d21KAGemvL5<`|DQBL&WaMnTK zfn}}eWoC*B={Sbbw-=TOpu%&dA^Uhf{S-&ZUG94j8BTVb;o?JavMJZK6_6Qy1L)~G z4M-~3f%bGFklCuSZf2o;(u`gMU-DrCWd;X5JXMENS8q?oM~Jcd;Fmum*hKJ#FU0X%>0^|z`kz|Nq_Ji*`5(XnI}K6v=B zdjEGH=6VYbSOXL4p2}hA-3(*83$93);?mWNm#T+2Y}FV6dF0tLS%E2wf#XD&NoVpA zY3JOzLF&_9NQn&f#6i|`dS)uRug(|iZO_e4CDYeA>da9&5HD-ABfp8xE(d}RFn+#v$H$2s2*s5cOFN*<;v#a#p)v_ZzVAcT#i(965VtN}zEZ6q1E;2^!zT`*1?!3Tc6C?RuU?|R zd3fMr@ZJDIf+Gf_ple}e#<^L^*RS6~4$M|JUw<f*QsPvBXNWK}B1uPhBPJbW>9t`3j|0=H|K$I2+n>2%|;ja|{pyH*0bLWk z#W&y?Bl1c!erKj839t-R14BdDn)BiH?kZDPd^Y~H>K_;iEy`aM&&pw)x$=h*N)u*h zB!L;}cL;pA_4?cJG@Xm=(7>U2fIYcNzdG}kf%7=(UwiF#bX56;eB$MyXO&R;i0?h< zE7If+_NKa%u#BD=y|TbSUf4=9gtDaQnj{c{>o3rM`>3NWf01{|CmgRb&E0CAg>rbr z87t77azws7#|{^9$so8H^~3!`2{NyfTyeoO8H6uuqg)WDeb(Ud#Q3AivP&kd-MIET z`bJaroA-Z)Ja`VyYI7Ak%wRz;2@^XwO4Sp^%Q)b?rzY;A2GIII(p?5C8G)e_)~to0EAjh(G>IIY3vW_)MrhX$fo zk+EwjQ`AoXoXLSKkO%7<-2jFuorG=Sg7*`A^Xv@mbjL1lmWb8Y3=l1_tIF?=I}Uen zw27MKI(rS2HLy!Qx&YTr-ZO*UvwS8d#;bQ}yXde45BX2sQ-`)mOWL8zV0DvSf-BF) zALk6BQ>OJp)HLG!)(@-#>W@dB&L4 zyQ1^K!?W`Mj^ubf_d@>cI=ahH9=OeX)LRVDrA!U)T9QRwXkmUiG;F|NhmtX?YXm2r zeGWCBM6VbHuLqeSIKdTnONf{1U-A@x?~5-j!)thrx6y?eyJy;T zp6$g}W!C}YOC}tz=(JbEC=NG9ul)|p^P7L;#Bz~SCk@7~63G*^Tzi(^a%(P0bRx2h z$ie6Yw7uFr{(>3v5B=GRhLff@-*}IgY}?Kqw(sXd8^?d!cQG)zk=r)TpLvb;RQ~fU zzZoC1qCMN?H$dV4(of#Xo4F^D9b?)=c$EIQP+rT8BIMH%!?RuQ)2D4Zrwrowca&;x zGxjuWQ?i_sThoK#wL}4ybeg#Am1aXoHxXn(&I*Y+v2eX?+Ngbdph1*!0U?dtqo;a&QpaCg7 zc6Z&1=_%C^Glq1FiXVur^IM_r)+mu}7eaYD`Mi4Y_hkg618!m$%^%-G07BT|}Z}6V}ng(Swjkq!H$;^6d6= zF-`nY@F8R2RVcDBEVs{D!!3vKfkTd?uFeAuVc_(2VelODsi*$vxvGBNOjI3?Y@uP! zPu7mA|KZQSsM?57_czy7{}h~fo9%)pP!{~ShOT86O*-MrSKqzP^BxlE^7%|0F2}S` zqXDrfbQd(`C2^UCT?6Sb=xsYb7aZpi@A{}0q^I=dU>pMtdEHK>W3Tz~R~{_4T~{vT zM*8BfjMtuy@yqKfzh2M*nl|jcbY8yK`jO8Zw^90WLXM9QV{k|^{EVG{eO4vgF$8A* zQySoP=fE@k91=|VH^2V8`oVn!DbEl0b>}S=T@7Eef;G&|h(z>5;sFO ziO%YM6jPzLk3(c{e+rp{a5)xcW(?&Z6n^`Vvoo7PdNG3l7e<6eJC2{l#ie8tbU;@| zY(0?HV3$r;1^yNako;2^1X}!#Gnw{Ia`3Hdpz)_-qj2-P^r<2j8u2au_G5V~-e63w z=W#m%8~hN?zH4OIk4vCq$T|8?oG`Yjs(poknfkB(%b!kUOjEbrK*_T)m z$4z9(DVRsmFSjrZdGMJYG)*PxRv=v1hEY%5v$$N1*B3>26h3Xy|L^dPS3 zu;V}HA>9?D&Vd3ezDEgxS1@QEFMW6tc^w%7!t*QVz0UxjI5_$H)G>a@Gt)Tjo6+Zd z1cLtseH}3P?|gM$)f~_&mEutuSb>HBaHPs&v#W)v@MM?_o`(55Gs*4{zBnFlw!zspyi;bHsYi;AqU%ZCR}!E*zD?{i(;Eo_2kcwa}4pSRzAEQZG#jub#&eemgTQu?Y72h$Y{{S^$5_SccT*Ho`nx<<06`Sz5)gU8-j17LuKDj%|Qu>C9rpEF!-S4s|dg-!RN_ zpdC*8vCAW=2dZ58<*$EKJ-DL5hCNz2lS2BJ02&D`{M zfxC_)9auVuaj1dw&!*UhO4i#fB|QGgfBDa=K8#77Q$DkY;{Eu+qiUeFr#eY_rxgwb z(l|8xNQdACim!NM2?`A%4Xj2o-JG*^g(ul|IZei?*+9Q~{}74aVL6*^ZN{Hl z>ot5+BUhdyz4NgR@xT%fCvYvmA9InXSa@$zDA~(a!;9%uc?a(h0~3Wx4!Yr%4h9z-9GQ zor{iF+&3Fgdfq^u=~OlAw*_NQr*(*%Hso94r##5Ek^#^AW(pO)q<{DDH@EI9kIG<+E!kzfIx=@U-S=7g_l3tZ-b`949&;l=C`4lxLfaM?FK*iL*ugz$au@vgKzE4@)gp=~gF{K?c{^46iw|am`xX zIM&}aVyy)#Ettd-9hi}7jJpHHk}Kk=!3l9lhl{wQ<7f$<=T3s?Ai#&!uD-bci1py$ z&gw$hDh{u?e)D#=t1o@>37N6IdC$7dj_Lf!NCHmd6BF>$j+BTwiQ(#4U*PoU>Sn7w zyJpOsw)VTb&>lUw2aL(mBh#!F*l-7ejyIabF#v8rk-q|u!FJI3$)YEy!s{Fy(6-&#PZa|CVVvP4dq<=*dIX#G9GvN#3cezW#gPOGe@I84@cf|AAsrtuM`0 zqc`8GhH-8X53S}VPFBDB-TT#>WIcAEBf4G*B-6LGqFaDX<5_|!_24P;(J{$W3~)4{ zPi(N8X&OBK>%aXeyPjr}@%Od&z8>eAYoeZKqLB{AZGy6vJs2NH?|D{O7Y<%!%flxRLu-WwDHTH5g)SV$ z?q(G4n`gUmdNJQR@ah}i_zq6_1i&NO0Nhg6-2Zttmz_e?mV@)SsC)t_u zipI08RS#uP3_8lA>fzO{wa$}q=4Ga$yh!|+oEQh62x3439b|v&sINm$zjWoboJVBF zr8^&VU_G5=-NDZm>g7sX+NSDUr{Qx3@a0kNf*U}7T8^octX&UURKCU83>=gV>L3Hd z*wnzyQaH~Zd{+JBDNaGoKN=bsM#i>h{Ac;?nMgTc5jerjGHwI4pf;tID2c!hI!mTP zw&By(C_Vxv;+-;9JxrW=0Z&)PXB{$jgK*F)c+-H*t4?F73- z9--c2fG|uAnmNE9pc8n}z!SW(1w5Z$)~4e>X^L@}L42wKenRp2aAaWNInK_Gt!&8;VczfcB<-xD*+z; z4Q3fHQjWv&6VmL(OIOj2uhAr9XKhV@nwy>^xZX!U@PFW|&DsSGx&z66V9U8n|Qdiv>2XYP*ER?^Q!|~j(?8CwCU*y zO6DG<#Y-)q>t<@VzKEE3*85_rcG?r~Y56-YYiGEY0(KC1#32 z%wmv4Qldghs$3nWyK82<+l^VnHWm&r3>XFs`^HzkGi*4)zB6F>!dTl_Ul{PNFJ=t` zvxC{0UH9x%=k5xsDoLdrC}vSiVi1E!k@D~NKVwK@89L zbJvvifx-J@oL!Sd0Ca`BcfP6i=*yt9S^oyqzxT)g3Ou9e(dwewKk7i+=o%_U>NW;0 zT|@Dxk${am;=>bqmGjd}Y0&VlnMIKx=wo6?$zxLcGHJH}cW%-2t7K;6T{>ziO4 zyj&k)==l^ZYxQEmU5)%8&p-rPps%2Rx<=BJGEIA2UXO~G=ZMpRKdq&C{Sxv3^=0`< z^lD(Mof3aBNE{nbJro^Gr2&nPwLzAE1rQ+YKqc4_#0N4 zfNi9ph~vuJ`Ro>Ap3X}F>BZ8{n~xlPDiaZ&X-In->ICqq8IKd=KA&l)PQZThIRncV z{>pvc?|rgEewCNnQC>1y-Z&U9`GGMzucP_q`cYe-!n`w{!(0A;^G!S9VLSVCd_JEa z5GszC!|OgX>&i1r8*c_UjwLoes9+Y z2*q2sZdHHz^;Gp>5kw+u%tIcWvkXsWsj_cIgN|esuH)1WTTx-~8jQm%a<|P&Br5dQ zz%^pH4ly=zDxwbN2HGY{rvgGdg;#wu3jc-4)Cf@92%$kdtkr_(|1gzKm#l4BB`_a#S=PQFLZ$%=9 z<~leIzsuK-#rD#u*SNp@JACi=&g}@d?K;THm@_Co+HdZaMtDzueCtK-HEY#Q+bleYFB0B}u(!hXF&WmA}_kQN#$u<;;K;ZOuJV`q+WH2FOEf=YF2 zx6{VC`wGtTEHk)NicvQh*C9Mz}Is;A3QDfZAGEo}AyVv0GAQ{{PC@&sJX%_7w+wQyIl`7MXjmQ#=2eU*@ z5B2kC6nbRIhaY_uS#|yT_3G=dzT%;j`$CCsU>&gPabCqaxH(5}CoKk(mIC-H4a!99atKu+W)f7Ur0OHljP}MFKGB0p9HB9F&gC@G5fD+M-35 zFzC#4P$W57SZ^pLrPiwX&wg7il6Cpt{11OWXB)U?x1KCkH*Vdou6^)v)!c!SPF7+| z=U~-2fXqf#e*Dgd)!+P-13=0A5j`U3biRW#mY?#PJmC4lGV?B(oNizA3-OLET-6fB zUlTMUJ-M6ES($aSy-#Lj4~jbcur+I_bdP z7aqzb0^Se_Mk(XL8R=o*I&$2$+R{>wkzT`g(vj4bcRQ)|VS6y#Q}>uml)YDESpkof zc?;HCwxb-@yoqy3x>iBnq&{S_*?==zFyMrWSYLI9Wd6wa)Omr44oLa9J9u;e$fK)c z8|Xhc?kSU?AwM@a6Wq`sHbZ7`pr4GyDGVdaau~qasqQ}>ryqjx&GgGcdl-^Q2glAJ zBhWoG)^rroIU@(wj493slz(}X>*JPZ@k@uVJV<9zQTXBQWGLzUiPMWVI+4VyBfVVX zSKc6fsu#(-{m}rhHnTimC+02&i#nAKS+@!6bk)GIw2AtWYaM-#F-e;m&>rGvhPe1G zDLGd0)-{%Hlr_T8z(gIqUk8UWO?+~;1_+$5+peW8`9-~QmhtImrq{>~9Zm9IYtt_h zpjg1jSJp~P@_cd9z=Y3r=ZRypKjnhoJO4T8-W?U;&n~m$<@q(Nl@4+~#z`%YGkL+9 z=kj#3r-hwU)Y)$R`CHS|@SCUA`QhQJud9oUV)zP~usYy%!1?Sh_>|8zv)JH39}Wlu z*RGwi@I5k5-O@gZBYP2}S>x1lC-M}XKCX?%vrJYewa$F|RX*j#ca~65rq*&OqPt+~ z>9-!3C^MQ^Lz4n5>0w~u*>f_-;U8BnT}*j7*HjoNJ6unV*wmqCO*s+CfDz!gw#*|&jqyYAXFF~GI)In$II>FVxb-lf*QuzStEa0CAD6Oa{# z+A7)>I;YGScSmUpS+~;63itxp5;RIVCE%<~)~PT&I$Dj4o-3K59E{sbsf<@-vI*0x z?|!QqV&0D*d_y^fnZUlet&36%SAz2u4#EC&m#aPa${N9imd+hYGHxyYf)@GiI7%af-jo+ca9vutXC;oQswr!x*w z7koU976U00I8v?oYOQ8@;5@a~*Kv%QMS0=ENEwR}SB&p6*dchI`OJu?>5p=fxvNU2wt3i}W)>r<0e(Wvy z-82Er=ixUU)L!?D1$XYK$64!Ko=|Ysq!x7#bVSxx-BrD?n@m^F9xyBMqaS>aWUZ#^ z(Zlc~Eq+ zdcjrasjep<)hQ}%=tzCZnyTl>|CB`r&^%OAr_=Li1ftCcb3L+ypExq3)YxSVd<|K9m>GE<58AcTVFoeKX97soUZ{) zb=2wzRK9oyhrw0loTWEBoYeri_}_~2_4L`%BtGa4Qx2p|BlpSUw14ym$WwIcnK0y> zEw>_!9gqB0nIdi)%-SJ%(}WYy>jvb6`N-%PyIY#D8wr$?F>N5r#HVA&7ISE+I!6z| zGU0mX_N|OtT8mR0;u;3uEJL+dqi)FV9c0NCc%}>%e|?V0UFR=dB@k{c>{7l~-Y`Eq z9$c87CWu1vl)DDZsPFCJd>Y`czGAu9XCyi(r!HK)9DUgJ+a$rnj9v9z^#^we$WPU= z+D{sICJ48+Mu2>ggs<5Iqs{m}g$y%We48=3j?%KFD3-x1<*fFoy1X*cwR8ufvb+;M z;0}Ln!RQ^_cb4_>`~b_dsk^kG4?TEziy-0Cz-N$yhozr+0tL@KkddIjJ2*WnsPuvU z!WfiO&DaXAGssgtesmw2zQCGe-$OIjs&}3S(VU}rE?=CPo2u^Jze^I_w|V~Y1h8X+ z!)HxCaHo*`t&QE1o!ulMz{5>QQKwY@Yr=kCC#b1z<@i_0>i4{v)|M{TgJ1y9b|-<) zb2pvzu8wuGvo{C)YGug>-Q8)jhw@4ryavCT!A&`rzr|tfzQndE^r$lB}Xvc%FbIS~ggBX;59bi7~2ES%X|>yrmn} z{ZI9u27cN*?B=61qWoGOw+k%AP0OW7P&&^Yhj{23C&>UhYA3K}Kvn%t7|V#(2ny(+ zcfS6}^QCj`qhkA_cd<_DDy!^h@%iGm8R`k5Ox=f<7|dht^ym3e>d@;POuUb?e2+|X zY1yD=l5>HvJi_yZ3;=AB{qGJklfBi6-8HWs6=_Fbu3PK|zhi8USw80TEoWF8pEyh* zO0FW3n~M@D%M3icl*Ob`7%!jrhE@nHku=}))(0!te0+WZ%s7l`12lNg;Jnv%&Nlt9 z?0)8B95W)dw;%pJauB9Z@1{=$l27ru8Z;{WNJ{=YKFPy2_O8gcQ5y2c=lC6eq$F{Cw6RGh$_Q=eSAOSs z{q6Jo>@%2S+WbrX`K?pZIr0Xlja>!WrHezDX?*OnwfcVb7dNJ>&mJu!2w)f>pxeSU zxQ(mgoJAvTMeM z%V742#;_;2n}OX;_MHZcj_pR?D^%9WK!1T!e2Od+1*Dnmu9eO@YeV(*THgq#Hrd+? zOKULSQp;SWXa|Ra0>N!ojWGCLGXY=(8k;BEnsJB>S6_c~AEW=UwjuM=(PMqXzP_&N z!BZ7Y4(MQfsY`}Io-9+CK^Q6^Hj73@ybRyIIu|)*5r9$F5O{&M{1Sio;F{Zpe|+M-3(R-o zWDtzMd?U7`c|I}@=~84(GdrYz=B2Z|(4_a!#)bAC+6UQuh%XV(Yrc3}Xqqvw2Huc^ z54|rpa})h|YsTtz>4Uc7Z0W#(*uYl2vAyqMUKv{u-C6i@w*D#Yd}~`BkY4ls(OmOP ze%Y1tGw}1P`;*oDGQtoiBG`h_M71L8VXV; znGI&GxTV;waUOu7v$r3ER-)FU&?U~d*2ErXLAD9<{q9HSXuW{Vlf(kW{; zQ4<3D)$VEz#;tFw$Du*g9{77;qHt008DSP3RAwE6&(p9~xGS9G(bAo3t`Q;|RQa)k zH~0$E5&eieY3C)@m#|u{nJF&LV;1CookP-?bm{Dzf1CXwR1pY;n=@6t%Rq9EeU9`wD*1e~c|*7``vwo6X`_QB1_a&>8(fzw$Pt+uq}J zgG+qxy)`%$9IwxFo__Cq@;sM}{_?$J6mMw|S;RPXx(MUU30R2J!b}?DP1-vnaWI+u zJV$6TB7W14KZ1mJ!riwDXU8Wk75=JINW+DH%31$9NZuNp^R2Ze=_-y7=^?U!FS3?4 zv$7Qp+yh$qPM(k`f7TSm0^`9xeK}U0%yGUkzBrqBj>h$oQ%)G$+?uq) z(w7FRaBn7yVF6`VC2E**CuS;l!>`*>IJe;c8Xe2`IB1M#%(KPTyOl3GXgyR*C(bl= zfN$OUrg}O#4PP!9&(?B1Mdq1Kw3!*s&cL8D7$#oppi*H>CM|qhg-e<8^wFd0!*72; zrp`0?_p@qj_#95HS(Gj1g#K9_ZU&4{w4t}rvuqK?k=ceJtwMMLgU;+ZY09m=u0apR z#o~ckw-_B5QsQ@bT;N{tJ?qasaz2+ga^rAqQorAbp0ww!IKdC~aVaF8)#SXv+4jL>C*YdW?b;?%iDd79CS?^b6o-L1a(_0QO{{~->b0Tf1@ zPpqG>Is(VijQC+-PD4OrLc?tc+1uAY6gqm1f+-5hsaIJyr@2aYYO>tntIZu}sR@4# zrIL(9zD#yBhT(hfQ$h+pvB+Uv&!#4-sqqJ7mE(xO(5E6|)j52*I?>6_ANqS~=IF{( z;H-f|rVKFlWhSYPRkz2tceJF`h8ag5>Slm^Zx=@${@9lU4m5-Xq%cb4ItGri-ctHRud>#b!K-L!EuN=~8bF}6= z=qmeUQY%AVvpYejpS(d`#e3VJ10CY(vt9ISU222&gQtc$*ina#P9FNhxq9{5+o6*O zj~>SnFFwor%*Hm0Tq<`bMORm6=5Jr};(B%!tg&0ez_PRRmGfy~TK5|lu1fqt~t%#d|U`0^I|HT0tcODC&3uNiA* z-b$NAY8H8Fz|i$^bvt>Ubduy1?10skMPNq&2U>EshCJ46Xv;&IX|A(d{_eJa9RMDx zC|{qOoe3>;zWptDaBVd>az059I=4>afNAgQsU~s8T>jt(?8v3$5Sg?F4;niLstW`n zEg?De=pGIy@zT5CDkX?;oJ~(pWSwL=4d4&USnUE6d4!HA9TPfFyK$)P z#TiAmCj4mUm}PnfVk4@%>BP`!WesAJQ^Yyvs*a&<(uEv0Q2y}Y?dZ$h1hY>KoW=on zH|5k?kl{V2PFEK$y+f>F2>GC+m&|lzPZwFa1{VgyU~>hs6oGdy#uTr=ZVqg^jRFoI7?0XSU{(;glU}8$|~TdKG2CXPJW^D zx&&YiEa+E&uQ8^Vz(A*TXGc%?t3+?G++-okZ&W&fVf3@U8A;*~H)w@-Q+_JAw63S69D{UO-=)o7gJAuMyvcyjGa2lXrkiCKXza zOuTUS$tR!uv*?)yW~E_epIP|wGGTgZa43IYa=w6p7^R+;fpW;8x}01eguy&_Mj90H zy}Kpnq4&40y+;=LWtm>^YIPEI^dz-W+ihCb7J*#mLk6JR;HhVZ&LmN2nauuY z@N&;W831Qnn&5B9N1fu*Pe1V1#_%9*+gdKH5q-ssZe_Z@F_WnJ2L_@0;)G0q0{Jby z&N4SkS~YQo*am&v%e2Kiji2oEcG* z2`(>STFSy4Io4;UrvkSQ_(dt<U_{#?LWErmnU!Xw` zvQK?@bW?5SRcPIE#H}P}SZd|XJaTUMBD+AZRZ9d(v?Y#85o_D1XX(Sx|KJ*|u**aK z+eFfV_Mp5`*{%MlLtHqC7urhRV@a(IY!BoDAc-@^U!dX7=S#Tf{{El;YNPeXzIm?Yl#l?t|i!Y0@@884D_%eOkh*#R$ zHP4A*&qG9IJ1!RiBNHhqsyFbXrt|yz>5unuNOPwzd(g9EQ;n135Q+Uc8CqxZ84rkO zhvj*Wod2Zqy?awr}+8q3bI8DS{Cf~g zqTmQVqkn^^$c{q-bzx9k7#Xe}Pm(dYupFWEit#>MTE~%kta`lsY4w_u;s4{Ge7Ac0 z!?y{OF|9G2ZT@VVXj_J_b8e(ehR5q8Uv!J7ojGvAumifupV-j6&8>+)+4+yT@y( z^$Xf5Cayye--35`W-$q1u2OQsE7luZ&3YGN)HM~{Qe9nrQAXzJ+=s)fR zX2Bn>8yJ&*R3xQYGhp1##7)pK3Ys{_KNS(uo#W8p@jMR&leq*n`Cj3AWF#Yca*rh< zJQz+T!4)*3>-x(d~$LXoP8XT@+^DQ2?Pg=(=26Uz{dm@fNLE8%t09W z=meO6lg5SP$@7H|3bo3#&y+qKPa&i7{H!8KD`<~0i#;T@$RGNn55Iha0+LteSJuOh zB&8CBWf1nGbDQU>fSPF|+zseys1@fY?OadcgL3L&2*+8YjF|@<(QwAhy;*af;~HfS z;vO#@Q{ooAc$ZM+U0DO=mAj4nYYkDM@?Wi#2T@7xAp`x<{Rh?9 z`SVfMHN-yu;yQRomMXl_4D)T2D9_Tk|L`&WP;+$dDfGR~A!qZ|*`dK4Dz}1TMVi{i zxYgM%%*~`S3udq%0|~jYys{Q2oQFQfLGGQ_ksp zYeLbrtj-|t5!bEJsNoN`A={yqwjt{Cp2g@HNQt$k_GS!xofP0S^kQJSxz&aPcc~>6|no(g?Ikak1hD? zVru+uV+_iVb90BMhhLw z->tp4&Eb3=6fHkD-&oOAddUwYog7PRKUpzX{> zSZLe|YxO+oK! zKFg97C&^k`!WqHPfe{WXV4$Pj?aj8a{I)Jb~^ z-mYxx!qH+Kof_KX)d(ts-xvD2)*6ii}Gp7gywPMIq z0z%#gjXs|oCy>{d%;Ux9INgC|Yu_0TwLMq8*g+5L8UiN6)p2A~Lk$Pwl87YVZ3YH9 z22O#aJvbl5*~JCwV*~pJoVAxPqd!7JH*bDPP=*p=;HCH_PZrr;ahS8TE52ywO;1ll z4>%{7zjM{up*)wR=*UrK>*zGl>bRq`@SQrXvO#B{$rSVRlqdneMn*>|fzbo*9mn~y z0^Yx%%t~K%;jIsFLN-?(us1(D30@OeW*g90j>!%YQXw9xl?w!DCXs|%1 zop^TT+WV~OeDrJ&_B)NN86ji48C&!v<2S?DozYMhPWSm7@Twzy7iWvvqXso~oX-+C z5f;M4ax1MQsU*OMVD>C5=|!iaWf#osJVnr8?=_kBw3BDt`0_K#Hr^w+Q4Wu-$F3EI z%8Cx=RIlt~J>;DOgJ;4&S8+zG2kFcfk9BC9&8j}#MUCWk;1Z`9IFMuoc_?My4zPXQ zjWs*-lrb%(?zOUkucOzg)0m84!0ZJ2e=WM<9=kEzJ#9Y*9^BcjQ(hQa5^Nn_X1!xN z&MqzQZ^OZ;BTO60k_Ww~C>5ivB;Q&x;HP}SE&(5PJSh*X6D{(2kEK~HQzPnHW=H-m z4^)=afj?b1V|9S)Ja-H<<2w?C7_U0+3!Gy9jlBU6@X*y-WXB2z`_|}4uVIahL}ALalY=z^}WvBBYX3p);ihL@1}ppeGD3UgCl)pc(i(i4t3$u z74)12?8{jWhkptj%$Vl@Tja8bQuni#X8#wPNGvlW7h4BiT%Dl~9q0MH!Rz9Pvpk79 zg*NG>%hw2Gevmb|N3w%*`r_4V*&$>gWOL(KHSy?fGPNxaV5BU` zu1)xfXY)vh%9$l-V09G-Gq`F(f+bkw*Ya%xeXg_gor6Dlrh&41cfKZp4SE&|^esPf z*Ghsv@N8v1Rt{^2=F9i$U^&zgJlJGyzPNci`lWoWlkzXxx(ka794^iQnyhQe#JTX` zm%5M10)4dWrBse}*F77i^<{H)gOXqdTs_EKzroPi0rX1G-a}@B!`g+~$g69M;G=;_ zc6Y%O8VDA&b#z19oG}A$+hy(KwIYnqQWt7xEC!DZg86bC>Y)M0z0e}%4X?F{llG4@&Fy*&5TpNvStP~?+w@`5sUF;tniGj7wD`w{@s<+fq!BLc{UWsuH% z9rNl-W{`)z+4-sNCtsH4goiRto}!=2u9f#CIA-R5Jw6k759=)rN;A@m`dAb4X_FFc z+vxaj#9v-oq)4m;rK~sq3i+%quAc6WR{31V$zEuTd9WS^KGm1XtJqG;NcAl3#3U+d z$8yawj}S^=t_~{y@f@RC$L+u+1j*S&l4*D`hjYSE0wonV7ARZaK|Y%l2Q(nHIfJyF z-}^ByyF7d&vvPq2ni=EvmFMOc_W&FhCgT7|E)*>f@3twQ<$4(>mt8p@nF)G7-}7Di zO>3IcT;q~aL2No0=#T$L27k`Y@;;=-4@4WJ*8sYUaY{-*gr;H+Z7Z)w-U>+ zB_8L|gEFIn_GEGf#w_D1ME9-tt}qTx(ZzCov4-FRE~3BD_oemi>e-4BQzE1P^sDOM zh57vW#~&lSVW6}ruR|PhHG#bgD;|fQY@iZhhkoaE6=z ziNZ(hhjmKO$JXgLZ}Mp8QGy5d!^<%Um%RLrtNDRHNPF<3{bP02pZ)6d>TjMblVyVf ziLuy%p>Y~xSf*_dW(9-PgE8F(-`q$oFq9VYw*%abQw-dbE+nC62>C^D*?MZ|W+)6C z9)ptI-#<*ygRC7K2p^4&!T8yFzN_OM2LZ72z)cTd(n-Af62lanKH1Wrx{}XXck7#L z5Lky#C9rHU5@$V;UdJ$s2nMZN2;aC}l{rgiX6x<0L7-v?%h!_0utNZb`3o8Ql%Hwv z^k@j{*hi=cH=Wo5Hc+H-BV#%N4L4=$)P8mK%4M7dlVnS{Jr7!fKWw8A_*?~zTepPK z(%N$1r0{i(6e2p-t+i|^C*_X9ho>E`pCe-p9kU3;d+AkL^5yEPpaQy+XK9qG)}5YwTdL+8PxCsD98 z76+*nn~W%~U%PU#y7Cr>BV%;VPEVuEk|{;-(kvOBvdxtFYHefp5jE@_dKa?Ck|>k3 zmAD*hJ~_x zf0k^UWO&g|nsF7BFUK)R1(Crz*;t&Hhb2x5E@|*-EES{7dmMalj2jxksR5kQ?|<~& z98P4w@7v$|F5o&|ee%=4s_x#tL5Y<+I3qZOjBNZG47*-3lGs{O-MR5ib$0Z8b%G4} zZnC`p&wuz24%d^>feuHG#3P&*f2B1uA#dEgRek$AA6LUddYixI;vAY`BU(haew&* z>xlyvoXFUKu|e^`5LswYT{^NXM=_45RjvV!tU_s0b4j%G&NW0Tir!^Hz;U*XXm$PMiTtd13)c9q8{Zq zP7j?yuD)Q6kxSCF{Pysrqsyd=Jq|{5_Yx1vTANUye5;|*=o<_)9mv**x1PODeRVx? z+OsuWclAJP#fmqZ=$9I$vN{evNLr2JK2`aLF9-=%K)_ zv;5edCeogBQ;(D9@(X9rX7$zbofxLd$M8$8%in~HJ)gU@qV(i1)PH~VPK1sZbLSBZkZhQl^PN$E?>sE z3;bHhuCk;~H|64_p=Ds{A>r~#Gn=&)#D5(&u7}2IcUKqdF&Vtrz$jG%S%!|pIh}u< zjKflPf?A#aH^2EgI>l!7t@l4b58F#3!Enbh)^^?yEF(aP%`*1R`^?KN>8;gA-~Tv{u2!<+ z9(?gxb?^4q@biU?(J}-h=P$Bd{&Y1pF_pPm^LZCIs*kU1E>~aQ`Z@>n8Qe3eL5eg; z?JGeY-UeSo3-Bg*kB+luV5f7(wG4eo9nf6^w%#5A40m`atE6#pRvphQOm*b!N<7|> zox1MeFq>G1tWNDq7cLSYY^v_v`W)xhd>mnKzxPr2uDc-J(KSkDbyrs}&h}aG3g;5| zDJ%>e8XR%WcCq;uNj^DFCbi`}>gvgSWnE1aQFphD&CC1us;{qq%DEY*DerQ=8XtcW z-fa1{nc3;|-H1Krp{d{ zaMVW7d8!T3dukA9kzEW<8c?(xMJK@lA0qtcW~ShA>`bB6g|TZmM~A`5a^{h`>~oUM zw5{8cjZZL)3H2i_wgB>Wb)>) z*%yl}J`OPwv6hkf8;twj-CM}l#$@lWzfF-TbG%vBFa z`3`uVvd+v*>oT8!?>3_sDL16K{sA)I3ECLU)@~7JItX_7GVr3VlmNbV0`Sz?r%s`r zq0grQ=dkv-e9)JCSNeJd9?Hk9p*+OS4#&E&i!&M7+l3=?ajDow6Rf*|Mt8)q%5f~( zZ+5Zock|scrEMM)BzcHx?)B$YPbR%VV6AG$GBLNd0KC@6kDwAMzd# zGIj^B6y{tEz^E^1yIo<|+$7H_J|#v1t}7EQsrl{iehmInh7A5GU8?^mKBCjpnQ&eQ z_dFwl2cTDjkF1f-{!3Tism`6d5W1L}ny&7CbF=#X$3KFfoT}EADDSy6S4~VHSAo?# z!{rcV0}(pCcSuMu*s_i+dHUoLPWU-6hFx^(H^5*0)qrkYdtCxFn>ene_i12irn=7Q zc6dcA>uqpd8f4~-OM3TNi(|@S z<)98>?bVsNiRv*Kb z23Pud;SN3Y2&)ZP2YwD(mv#(XyXNW)26s}z2l`%vzkYT7SIOXZ$Di}->mQ+C_}$Az z)(l;W`;!sPC{@SgqvA&bddy$DrJ)}F0X|(Ja3U{KpL6$){V7LvmWPi*SMgl{gB9lB zOC8kn9V`bX-AOA3zJ-@`q<-)UncRS0#!`aE#b#6Yb7zG+mD=GOCbpblr-=Bhe(;jr z1PAcjHIjSQX=75JySLP*76}L+BVpz~{6gK=XBt#<_ecXtOD}kjYK0v|0FwT?p+9$F zSvuC$@?0o!S4X$OV&P$OtNMj@{MH`j`Us|YmSOZSmPlRq75+l;!C>%_FH4wu#zim5 zB`f$_)RC3pCGc)N`*wC`-Da*3PMoy?TkxycF2I>}HR++0daftf-OKmr3D9aSinKCE z`qfTw5#5cVek$GTL=SQD+-*v0)-)NEM()$@H8+_k;u;sLh`}Ej!{TKagTL(1oa_&MG9rG4`Z-+`2V;YaT^a!@kOeXrrb z#XQ=3Xe#IN*_T;o zGo&9+&!RjLEyh^%)cHjsMk2UdYgAgssI_%r{U zmSI>?R3M~n1cr5m%+6>&bSSFaO6W%WbT}IMb^JQFRygEM20pu394MCM5eAmW=<8)$ z=W(;zF!I1BoornkAm-r-GT7EulnTbHu<}4K4^tAjI@R66c?Ji(H6ei_!o8hsE-zpt zFF33UBT`UuJq`;VD3jFqU4qffJfoH)YcS1UvvsTkdHw6_9B_(*lXhm%C^#(PqTrNh z>oM4G&F@rGe|0Yk#*cpZ{oe<9H1Nd9-~>!9Yt0-3NPqMG2zPCi_X5E-H21A-f^&Q} zqg*>Gj&{ab@Pup7P{BjLqaA(uuROr7?acmd>+JomJTDG;6$JqkROT;Z&ga8%q(9qx zJ-90(^K`%WKHuBtA)_i|(Rj>PeB*uorWJp=%xAwVV^Ep5D+Q(RkAW-OWX=8gr(ae7 z?EV}P&_34BETv<~#%jO`BdlCE6~fbG5V?-)s~nVu;DiL{e{MalC{%3e3V+ zG9KVBN@`>~XZcNyyoGuD@)oZ%z>HaiBFcXd^8ELvEE^?MFA83b@9eJ*qHxzwl)t z^EcGYm~{eK0wZN~?3KR76CFXecdd`$nZAIFe-(kwM?8x$0BpoVjlML<_l#H;<2B_m zFqRj3@zK+X>VNs){@3ci_|N~#>fQI>#kj;V!#aA-gC$fhUws?pkgc3^)71o@HDZ)0 z*6siJ``<0wbLscDzag8P5-IP#eWiN;-M6EJ{`Qx@W}D+28DgWX=dtP`r2{%~=4i-0 zmqw8@tMK`U4gpkTNtg(C4YvWD%+_yQUd1@& zy4jZU>@$=uSR`8|G6I~|p|FZl*$A(1=6wPLz$taM0g{dy+m;})xFr?4CelLQ1>anQ z6K!}~xMt~;vuvZajtGs8yZ`#?cm^0vMATcW|p?)k137C zJ@R$8DLVrr3tFh5h=8h}@=d1^+Q%`!+NhqzZuy)TW&!N#^NeB6n-}5-$!=D(R zNTU2m_At-4#<5vi1{%~&tre~_9#Dg4tSa|Nqn(sAa<|_)xb2~((y?n`zrrVucko&V zUU@DCFS5`eLd|htz+X!{$ZOq3ZLmPPU%^SX%JZeuj!qIHTvuMU)A6}WwyZKlILn`2 z?U8X>*A<>A;yE{I%$IYutxhS+M`#%9jMBLjnW2oKBiF4JXW1$+HxS`&pzw3%;*Kx* zquY|r_=$6x`O8;ngtLoGKwfKP8>w?@q~~7Ijeo++wh4?d2k}$Jr>qh70#;nn(3N+K ze_nU&#fd;W#-XA4_$k@hJR{C_)}jP{`JQp;oGfySv0|_`!7D7);V3Kv09rt$zcWtF zVtJFbxZ5b9cAPVGe5TF`*DYJ>9h;-3Z=LkoMrHul)T!4W;DDaamiQ_7qP6)u(Tf)- zd(w@*I&$uO9MUb zS-ioc%*_C0GbI`)ap?3_Km7RP=vCLhx?Wuv9cBm1)9mU?DJS|9&fCaW9rQXA8m*lU zo@#^0hm{knIKwiYUPobvSD~E6*(PSo9dm7O}3*u>~iII4)l| zvE}UO82zqQkM4Z~&T9ly3WB-sAQv9oxlw)ZNB=st#lda*we-Tq!i#DN*k1kU2jDW< zj5t8%Czf*n+uZyz2c3O~^<|A2*S9{TJ~{fNGS2KrmMb`1O66c5keUAmp7_=8KCQmI z`8)V`*;%0eXa?&1GZF`|TV`ga=R!aGi~B_Dm)XNarfyD&9rL&uTZ*QH{e1GaC$(L4a3>mXpI(^hARnUa=V zSR|3dtX>^Qudu0Z-uOH)*FmELb$aRv*_s2<_jG()@?-&=Q3mb+7jexp8J3oK$(+oZ zmaa_5id710XoWux^p~2p&v@SK4B6b=Fa7Kv%))G0o1Zg^IOXNWX1|MQpM4!^MeyY!KuW#HNz`ES`U9rn_)FxB>Q&g!y!mBp-cSNaVqk74uA<^weXBaoMHMPl&?CQZ-4bGXzOtTRhSp<-kXX7Jar3PF#$*(u9HY!*2m60`LhmQd7{qKM(EeM z>P&jM!LBIAYw$}xtGtw!#f@5?XC`=(ikM%L%IL%3l5?{Ztbr@(+?V{=aqbZ`GhnYn zbs0ywnWD;vX=?LZzG3LhXi7BN_rX3{wcOX(M6jDR5zoaLgYX7rbQ0?P_Mq~}8R&iu z`ZJj0IXLPJYrtPVaiITzwe`L&;IGq8KZWJPhIwXc?xRoHkB&+NN$A{6W%=Tbn>UyR z4m$8lSs}gZ%;!J-QQijKO*v$lK6Q;=$~C?H?t9h9*m?BY+3N1C8}YkbxOgQy0;E%E zM#twoJ5)}xqo9spMJKQ{xnlWT0w){oAEU)Hu?US1%@ zlYTqt|}J1|xTh*KWmZZPZ_yGmCHHi=_CPkXJI z-D=|T_Vzlkw)7o`M-n(lApa%j^stuBSAHs=-@$LR{$e4viwDWeyZ2vE34wW=_21Lo z#q&xM+yU}Ycy8ff*4J5&e)Sx=yNa(&g~K4Q>u0GJhb@m*pB66Sa}Di1@8}hH=MEeX zrBnwon5%x4^0TZrsD^A}Hw*lYnO9AWwT*-xalwGr474-D&YKx_@VIM48p-Z4c^qr{ z3OQ_~Y%UtRju|&_W*L z^EAr2#&gq=tennDr!=B*9KFA9@1?@f9fX}T6W+qO96Rszc&OBcf!nW_N>_&+me1+Z zFAn=iqr;2dWBgXY_;tpYJ`da5CqIAam@zO9$CTmOtmx;tIpg$(GSSi}9jiR^kfV3p z#JK541{cSaZ+Pj%0VrwjT{O!40s)EJ=L)ydn$Nt+v%J6u|2w`6L5AU5#b#*=Mdv%O z+jj=w_cp-E@&z@A1%bfUoZFFV98<}j!ESf(N}p47dx~`%MCkS#vd%!-6&TvdmnD-=q;;#AQLYX$vwF!!F$-lCr@32i7cl}<^s->A zu|}Z=m7f)~0;Zc9bJ$v9J?s^`5%UoG=?^-&HO_AntOlgHK!qCxbq$mFVIt zcp{NArb1hM5#0B)_wbpoy)W$xv2cGdvt0CE*Po_#8>21nye=-4>p?(qoQDU$9H%$< zlj~`o?_GyHf$uVI8hMWjO35-WyhHla@$EdN8yb5x>FWRb^Z{FdN2`|vK4gdnbXvh%6*upV0A{QWIC>Ov;?mYjwvEw;%DRru5+Nt* zN(5Uxu#S}=Qr`iUsu#3>MbN1S1>7yi)+~1l@~|jb;KnQ&Z)PzKUEr`P+PDMb{-cLH zALkZ0eU{QM;-lpw+>+eQwN(^TGv&1=?pRfD=rib!uRv zgvC+IGc3^(dSTJ!4Jz28nu6aS*Tiga+XpXcrO;7Oq% zW}4WFK6D%who7SoRw`bONkt(rVl1{%c@%DD2Wa#=4cj;;pH(v988@Y~3Y@s#F*t9= zmrRQidEtAMFNUgw37POdN(68#85vw)+-6lc(tWaz8AJjug{H}-MX{D|1e^wu?VJC? zNqkdTGgDHdLLTshT7SR!)-kJs4{X_j6m^RM&1nbm$rJs>u%90E*W(+fM-3&Yf zxGIOA;>=Jc^^=L{K}GHiG5g~L*-x(V3iNK77PEsccU+3X`wXS`B|J&|EAkyBRHH)q z#hS>Yg|~PjlDJ+uBo;@Je8XVbJlP)Z4v{WYK9iZ^z0x54PCk8{xmqvQ3{?%0?~tkZ z)%7nhz*ej2r{k2Exl{e8|Ji?C?W6QIu$}SM9;Ii<7M_`8ZCGE|N*U<$#lzq%#Y zqAcTZ{_DT`>+0|P$xo_PgTgnyz^w3TKQryxBv9wfZdvDJ` zVwneM!K06O2c0mEWaywv%u(yrB!iq)fuW@lr1NHTgKTZC%cE5hOdODhx1%2|FOgBD zi~=U|Z`U@?UHX-db664MioWMOneK7=v0Q@qVTO-Jy}UKXVb+c9#5j67SP*%WWm?91 z&c{JPuYjv0Wms0lwTXj(tIkpSa?t@ZS9svzy4#ONr#h58I=q!}i4)Pi;f1R>ehsQN zQ14Xd&FgiHV`+(h25#h=E?b;4@G!dO1K)DdtRfdhJurM#BD5`QZ8p1W(}q*QEw*NG z>u~g0@;mAFHM~%o^?Pxo$Pi~>pU5_zpGixYqwU-u)zN|so)a>pAgI8|X?u@}K9=kI``Rgt- z^`8v_KfZLPxON77+R?*)@JHW+FKkvn|HZE;ufl=q@MfJ{X4;E4o10{}1CN6@bt!-I z&ef~XZ8I5etS4hfl)+b*zy+LR;9(Pd*dU2cbTbwUe2~AuV|$BT5(x-!?h#|CeTA)o zzGOXi1C-K^&h?i#|8;`64StbvdZ?g!L^txIo`8Y;W8&#U=G9WQkU7@{zgO0;;z$@L zQ@WOH=X<~VS@ro(|APKbR#Ow>;dSqQ`v(MgpH{El)K_D}1L#9I39)19;8`0>8`VGm ztAEOrDCvhjB~F|$lONcw@g(ev=$O-cz7kc1Ys z6-uy%u6SSYyAZ7oZ_jK<84BiXrm}&AEu0wIBI;`e4|U8!cH%xX;Eys!9Z!4ELkcY! zrOecsYf!~;`&@aGP6X@luWI}8V<`ol3-0oDm*7ckoK2ie^UtP(BRYW0`gE|C!;-Fa zEZK(vAo0qqX4h9z&ZO zi48hC#g*QHvpCbOt=>!tw;AqrClNcEflF&w59P$p#wlan+2;$C&79`&(dFd?3tg9E zwUqy4T+*|A!`+_Jy+PS-b{89b7sdvphR9UcNvI?9nDAtbo6OU|T^;;NouQp|^$<*x z7Fu!mNzZ^5+2Nso*y`M8#<+nagEsO?*Hxw_4|5(Kh!}?+_CzCPZnR;{yqsmXg|@nD zE69z9M1J@T?O#i~=ztTy_MsPR9*dJE8Cfbs z+e7_98Ep9?Gc!HN(rjwyog2j)Xh6JD-g!W$_P%^i*<5s599SS#a0^%{Z-l$0qQq58 zd#vD`lV-(55AyCdkO$wq(63~>0oyp=$gHKBgkp(X##h` zn|nNm!&=*KUAb0$@#)Vq=I8-DH?}bFG9cj?W@bqAL3c7pB#+mhmB0plsRz6UFD>8U z?#JWo&T+@A^sO!2+fTiAc-J1uILZPUgLdg_?v=Kbo#ONx`~u6&%XMyi)xwUyv)BU! zhoL<&%uMxdaApbnp&PmGd1#*PV6a(vxB}091#M1GPx09U(mBi*8tx{r>6rlX5R*!j z3EHVvX?TqsZlaxuKPEJYr{d1BV<*E))N3rsYyhw%%@sS_T?@iSSen7Fe?@!D`%MZm z(5Np(UTC7KvRZ$!iKgmc(!9HX#cLUxBqSVtS$4;raF!BmCV(5KHacGuJ3`d8+(F}x z7yWYX>}i6(%G1PG@mU^hJA)?;4ahu_f8$_J|LUps!84erJ~XXrP}XJ9yAFoZ(0^X> zzb_qJVStQQ1KWG_S_R>APia`@z`g3i06D$bAAs`ZXD1qmd9Ls)NCdLc8SJSgvu~U)AZZ+f6{b3VFR9+di z14w6f!8d-%C6+mF%ryGy!Bq8Vm5kS^jjFYQx}q%f8irAnYq5{{*xab!k)co+6A?|2 zMM~d|NBpGb5gWU8n)|!*T8?K-NN_F7%k$zymv#>Eqbxlz z4^BID@eP5)&0!$km=|xDSFq#vxOIptd{57Ii{5wiE%)=WeE$6yxWF((=hAWM$yAE~E}dUGR~?5LH28#Y_>rNCb7c`B*xOC!BhQ-0KpPt!p{5pwB?hpxjx1-b z#mWLV1-5WAD?ll7TwLZ}6%2(8A8>YgqIq#l?IOrJi6)x?>3mhv6jo-5#ZU)tb*8AG zs>C#)j5i^WT=AW~nkZFA{R42eGw}$ObH!8YrQ$R2JcOZ9a^VsGPumJ3Se_p?dW20a=i<7??F2Ja-x=b$505?rpZ4QQ)^KN`E0hIE~7Y3&rGN6mfLtQ44+L# z^5pak@UN)`2#OlW^`MwVwsCGDTMPnN*T0)>_4}{bI?lG_37q*VnUj{kk=Ci#2i` zuYdO2Lz>&G?mf6iUvZ>H0>Vi(j3b#FthI){ z&T0A05{LKvPyhXY6&d^tetVj%*WR8k>gH0b9p2}baZ8R!>+VV!93-0tI32JKlfYvN zN5|j$_y416@a%AmJ+n+D40m&g!l60l;TsidxrMy1qkEA4aR9D94<6Rk2_KAQ^bZg6 zQFn5jD#>Q`=(tc03g@nql;W8ubNJ90Kj5Hman;==3AFKC+bR3iXS}bz#_g*brhuHb z*2h$S>vWNSm?f%HKqJAL$U1U#PUzItvFJ8&4Wci;v`oyK>ioH}7!q!m*U_qeB|qX! z));*{y>!fF9a-cfz9oa#;cHnMWmgXUBycI8yLS04GUl!0Uurwg5@1rcb$4}v7u0f= zcY~LHZ|3DLYpqUV=DC^nIyaqTcIPl3$1eYup8VN=15P)0vh5%FfGjd;!dj`;IQ8(| zIDCM;#;DHu95f2vsFV5LSp=6Ee+@e;Tw}Ae#PJ>Ks+P=1VUO;^_*?@4F3#&vPH7i) z5U=rFvZ(1-*y-3+f49c8J5|i6ON~{+z`hKyNh|4_^-Q3H`yxqPZ*h!1<)6n`ch^ju z-NTXMA!8b7!DXJOGgu>Bs+@z5h?@~d}yrWeKPc{ z_smWU`ag5wo$ABy{g~hZHU{NQe#_x}CP#E&`%FEW$GQF-@?>`n*@x;aCNuo{mp`M1 z_Gi`Q_dWy`Z&z<$y;L1zj8jjix&P@1$b(TaW{@7;ALBAG}2Y$5;vKmbWZK~&>Wt(mE2ff@vw zri6s@Xow)J4i|$ej<*xO)!xMpH26*@d}!7S#ig0rGV#iwg&cg$rPf?>9hpHK?(G7vCfgWLI7QVIF3-2nrqfYW# z_~rVBXIL0W5SO=cp5xHunOo>O@&fs=hcPP4TadZ#l-8*(jF<{Rg=uy?V&||<0@6Em*N{@Zfxfh)n8(DU&zR8-ar|1mSIlDwa`4vuEZ8GWE zHPVsidY&5{&b;Ob2pE|D(I0pII^J&9n#70Wt{F98{h!mm*C(&a$_53>ee>pH&|a~ zyN5d}N1uUD%2;`h>!veo?_dX8(Vb+_Ciq!o8@0jFqk7r3;uvSA(3hqj2Y;mr`JJ-C zGFT&LhN~s?Rcn7UO#(wB`ObtCUzYfA92?D{$*Wvr!{43a14jsCs4fqzH) z23V>GSZ-ImacuFk)FGc(i+1S6HC>Z`0h|*go!Q?`4uu5q=)VMg_EQQ)eav}Jjz2tH zyY4uU*9m^8BZNNyAN5P;U3ZLKlq7U2f7}_Cge;a#Txx(8baa~F)ydfeW9KhGpOjQ$ zqEE)h3Aj)q5t`jX4yZrWb#_O7stbwlu7|k4h%K$7*nrp$JnR+tVR;;7-vM@6U;jA* zVx7<|xJAjG*XSGV9lhX7AM(OXeam|3$R<;qdp4-)EnL-MEmN`ye>CaGvPRl#>R=7% zZRsDGqZ@^=tnkrBv~;tui|s3dqgxG~y+|oCf*sV}4}HNC8<3w( z@O5$6Una%44$^`8kA4;(AjMclt>F!}6n}M~doZ!*RjBK{X04C49_n*;#eKC3erE4D zf~a2C@uKo4#55LsP{wB?9Sb0!LeqcVsA2B+tW+jZI$}uf`X^6 zHpk&B&o~^LZMg0*99R0`gU+GxiRaaq(>v7fBLf>4Sfa$}mf2FSozJK-`V2vMIEF~P zPDECukSS{1ON4IcwKR&tNhFmySbAlE47?`liR~aX`f+~0#PICW=;E_4l`7$A*7@s& zUADczSecrJ=q!Y)(x)J@j+u(7SrlfT3j>KD2ldg#NAF()p^sHx-MXirC=6WU(iq&t zxUk--8GkxAn<3WYIMH?Hx=(DBY+?NA97sKA#w)t1lqitR z1UOQL%+C61X1n@t|IsJapT0j_egB;c*#^0Q0)OxE)2a`{e(d~k91M^r{fNwBmY+1( zp|>ofaCS8nF+9v4ultuV2Ok-8@I%7mOC9O9DVQUUAL5tzhN2>%y=HGmI4Eu<6P$ZP z7ZNQ42zl@}FZ<08ErPpVrM>O!j1lIU4xG>Y=KA6H_Ubi$`2@es{JhP3`QY9ADDO`u zm#eR)I5dL;GM=-h+hkrHW9??CVWsgre0I1vDVV>CzJ|KP(U^Y00a)U>QB4gL=c1z5 z3iCG;&p{}#Trbz!=O+4!Vs3VUWe+UPu)tT(7bzFS3EXDTs8kyrRXKdM*IjKB3Ecyi z_8Z*p$02Pnc{9}aF-q2`alF1x-DIx04c_@#vO)IlftotD6>ReHQi=qh+D!yrG(FLLpoe(*w7n4QE zk%$rHGRx0j<9Jq4Qz3r?6IVf(zKx3OJdA@&1&%h(&o(-#y&eZRwV91}+ei6_P=kh5 z)WocUi=w)C?h*qp{w2WV9b7EV4})yZMO@@L^rr$`!y!I8RD4O6%Cyw@&hrYc7rbU1 zC=)8?;kopoGO&;GA-w50r6JFb#CNzHNuaFFq$1bkK;=oU;xK}k)3#OjtaSDRoqJJ zt`7|g+nH@+|1IoPaa|S|OC2&yLuUB&K=u6Pe02s#y1OstmUZfu^-tEUYsqJ`&V7Y; z7?;RzUpfH&!0Le=>?UAIK@Zl=01&JhW5#vg#zAJu7Vu}j}>o0e8R9LP1+ zfJs&v_-PRF84h;W+=UhkT}b($^S6$70wuJSb~Wx?Z(Ar4G$bBV3P-2FEBKpEjsf0p zqsTvf@;J}6y;;NZq_4nNd97JA8a6sNKKaZ48`zyBQ}O||k>9GWzWY(tGkgxZXAaC! zdW^%BC%S`1KGEEcj)0@MwYxhwTuXVCFMsj3RXvVMoeUe>lsUn%m`oS4R_ib(8px~* ze3_~l<-@~dQc?%eAiqYe0iSJPDUZ{6Air?efb&=1@n!vRYt`4VJ|6I?Q`w*pVaVXA zdWJNpym0Hcj(6d0{~Djs1L)@jL3s}Xgv&$`*Vq;Q^6cmaEJU`My0$nE3@SKw`#AtE zItY|U;>{jetQxIu`xh@gA7hmay3x@Qw!ohvt96p`F<{rsH7AI_vCd(t9n1^(8pQB^ zYl6C~&}T&Vbi%nuM>k~$7^84`hLInKC^WC(d9>T0$W;+k8EF=e+1c_2aFsQ4tlcbe+Pd{K%49CrcwtKPwdMB zE*m(QNyAbdBKHGZJmBe1SW0HfmpJ1kIbb7{tWmAQY>}*0d4+nOwQ9xvlig;BD)Yz? zB~#l8xKl#?s2*C}rM{LptKO_3Znk*|cp}5myn|;vJMw^sx$eS1LsVQm*QX( zCIB3e6I9gsfD@(gOb@gy+n&{7Z{4}$vao~a5)Me{1t+uP)!`&=n>6wQBUsp(jVU~9 zu_a8PF>}0!vQ=gYws7{34xn?RIP$)! z?yB2?jyJ2hZ|}hA z*e3EZ-z}ljSbYGy_uG>hxd_w8bIsH*OMCVmmw~EFM2xCG(K%R@SbG zel2-1Ff<%zoQ?wzLA>?5UsRL#?^n+rzfMP%1e)YMu4R3F zIaJpP^bed#_MY!&rf{^tleL@P|KK~>jz7m3TFf*C*Q8hZf;(bdPy1CzFmq9zBYZ6U zR9qBKpbubWfLpp1aIT}ck#Yp|C%o*}T_v$;;A8cJz!^HR%!*?;IZGeF@9;xa}(roYR&Crs-Hb~Av~gY zkgQH%qWluy1hl(Y)rED~xHj$%@QfA%t!Mbkxi_7GXbPV zvix;gy}(KQo8SG5$d&P@PomQp?6S;(dY^p7XNw!kH_KD0gXmQ1!**8}-@uOYP(Eb? z8XmlG4!NcbLblP5_K2k&oOg#~K^7_-KojnV_rMd>3Aw_Ubm|kB=p8y` zrcP+U&mpyBRQO0>|XBckbRrj~hvLzkI+_ zuLf@n;F)1>ITX*uFoF6}pE7Q@KeS!Om#1+1g%c@o@K*HPZK!*|u&#MgT0!XTW_ z(E+TU;=%G81PbbpQHlqcNK*#sHV7hEz6(Y|U+%(sRXwEbkN8(_3ZQwIUB(cXz$%81_Kb zP4t``jEi2gyg=Xu+2F1#>2q-be-yYczMG8M*~3Bf17`>*FIL+dD}h-pJ~DTPn0>ss zAg^;>KRV--m2b-uV-MA$Xx|RtISW z|HdCb%&yea@N(DPond_>8@)j9vE;0L`#9}9Sk&EC`-7JCBikIC&Sw||bLWEmTpeir z1rBd?FmYKuQ+!jND&NF+aZ?&hA_wd8kmp+-EeDTszp_C6HGV(nG5AJ%_4p&KDF}5P|>4R?dTNmZ@_YLTp@%RX>PvyM#dBHdSrGLBkm!m5U=!Z8tnr)We z_!UqnJkm^92#uo`pM*u}g}*$H|8idYuwj8S?+-f-Jb4D7%C(F$ogX@P>?=2OJN@{| ze3_^j#yUw{?Ae*=>L2{A!a+>pf8E5h3JY|Ce~$)vkGL^aI`L&Q9X@W z6)&C5mM~GMZoYKus|E;>Xc+8ZUl)oDhn&Dvc2It&VKipJ94QhiMHcy&A)oEfq_}GNCkWbhl4n#Gj0)Kp+Nf6AN>#kI1e-O03x<0fuw6F z=_;d7CTC&V)Ki3MX(-7Q3=%ZKFkB6vXBLBw(S8h`^P{J85Gf6-dk>$)kzpOr%muST6lfRR8mvtCm3lS|byeafg@th3k0+h*Sl)c+eXJn&^{<`iE=59~8kj1ij#k`^(*2XW+q5 z#-dxK1Yzg|+?7_1yQQr(>Ag;rH%H$IS6_j>v_v}}tU{8Pc*W2(LUzHs3d10{_zdAAZ2&U{p1FMHMj(f-o@zADc2nYEr zB!Uv&Tn*q9d^9=DvzEcln(FrLdt|1&eU|!(tfPjD3Z)8&{khi<-0_asTC}3 z*@uR+4^dI@F@;ZN)R}{L=v?C11_rLb;}cKBKXKdd-3G6tJYzTHOgrJ4Te;8Yz%77a z3A8w9pA~%L`Qn6gwSP|{&zKo^+OIdN2`ZR((RCf)BcNYh~^|0=*)6Qq9jC$CzJ9d_-7wI87kH;s-lGAa0qMDfD ztPAFD@aQaABljLXf;Kp``5fESajd++nV|#G!&6&uRL1%8{}J_GzkQ`?p5GxUQ6$Bj z#Uv)BQk5!oRj2Cho(9I_#qPow!+;l9z<_TIUl=gp3j_B5VDH}eYR3k44P)UQyLSfT zp6#CQnXaBr6;&$b$Q;C+Ge!CH`5v-{3jBq0-t&ehexK)kUX1VGfA%GY66KBPmxq15 z!U*>O+ZQj!*A%IAV!JLrCS&=386Ql*oFtYK^rTu8vaxXzx<@D7K_{mn0ahCz>BKD{Tj z!^QHUa!ZHHogry}`=r}#_= z8tR>Uc$OfnhV1B@?-%ovFLEH6!SCiv!%P|hmpy2T8pG!ayh~>~A7+T0uSWhJuhM56 z+hqdW1S$P~4}jf?*Y**RQMBlM7??9aue>&ps9q1Bcr`B#K)GWle4cqB@Jc_Ge*&)Z zj=+IC@;V75B*T;BfNE&?Qco`d2AnWttFqK=?e^9V>=d_9qsycd|G>Zg`0NdWL6(#d zSe4c+^|J;rfA7IV4xDQ(zVrR>6pI8w8nYFeL*<|c0iUg1fLIPfb7zutRFBiE;jc5J zy`m$mP3?ECx9@p0>t0d3;2Z%n&jld_0JQJ6Uy;3!ywKq`AR>=29y&3*O&yYqX6!EK(TqfGIomW7RWRi2$928nHeA|sz|y;0=`A-9{X_qB z(A!xXgJRmb?EnO>=`QoOEOOt zL3uM8bq+npW|ei)VRhG@HBjS_12m{}HG+B@*jF|Ey^-BZiypGxz`=V{$%fO$FF?LKKP&Ei`D^M!a~R~r zGv>M~HK<)TgDIm!)$TAGg zm%9Tdu?QqG&Biuu#_tdu+9Ie}&)h2a1>BV3i%<~PbS!-7FvnTfgU~)_NAp|U)vWCib?GytQka+Aa%K_LAOHh{d=Wg9L z^u-A~yXL|d(qtVzCJW5An>>hB( zfIipI&~S0%>L_#VVRofl^1w_3GA8OMvs_zc^xN-!H$&*{8K&F>0T9;6jC(E%*lKwg z0c?SBgPE852h%@gb}N2@8OKS$;0A4jx!LjTKwIHlEfWMRo3@6W78p6f&oE&{S`&OR z!%?Pup=oxTXfS=ZP1Dh`9eb%3keYb3^yeXb8Vba^;(u)c{h zCOpIPJC=ix9_rz9fkZ(h|0#0>;?`GR78k2;6@R$b1&e<%a&3{YX|h{`|ajLT5dITz%|L4t;IGy7snqz_jw5C~eR> z0QPxw!4yF_vs?|7x_*|VnVYwi7`A42U(v{}5gr-6rO3CAxd9UcjQa4(N$JtKlup}` zQ8|ABStM^;>cU+q);535_0j!IN==;rJEiIf3MmWK3rCBVMq($9G8Oq<0S;jM+V=~{ zN_BuiJN1%Am@-TH*a8pXOL#)O0bGP}KYDmC{qPwAcJ8_~P}sse2u>=;w9lVBdr&Mv zJFcob7M#zQ=zf9rb^K4|gh`7gc9{_4g=H`R&>UK}7PNgZ7@*8I;ZHiV9|Gvof_ljS zkYy}1a_aGu`vxw@zO%%QzKC;YhP@!N2Ov9c4?or$Y3sPeJ**M-<>u;AJJ!v!g5&p6SiOHVU zw>Pmtz`v z*~O_fpI+-XVeeNTBlnufpzdJZySe9Ff(6=io=c#uq`mGggKe-IWu^%M23)LDA6kG< z5-dVyYO`xcD67mKw?6p_cFz3FB>c|2AoDHxr5!UlJr(;w|5snzfS3Q|R|AgLX77I8 zmr^Pa0JKSB5_(2G;0_3;I%QS#O|>fxV4p*#S&qif9zJ=9Y!UdUj=laQeYc#`CNw9H zi`!`%8FVv1qwa|RjGjA(UvKi2ex3fbvdGdng5|YGJP$pr95AC_yV9VTKL!BhP4&D7 z0qajn*Xks3GfjC>aLD<0#nFBCXf5wFNECy?dYSi!2C9F3e7SH`cm3a@w^}d{xFrHWryiM-p1f8 zCqze~EF~hiQWEq@jKWOW=^ft0zf)sT1#_bG;>Z8bCeiP`!3Agad09S>8R5h7JKK>V z{mMnlGt)m_=8imwHfX|^b}2tL%!{*MzUGb3%Ex?;w&Xv13gnV;r%py%ne@W z_4bnQuvSXoo{zjv0A8oVZ_mb+{^n+HE8FsWI?EF9?-Z!aSiRtzE@yGPC%^|Ud}iKf zp!UV@N~dCecDeXpfBJcGdyJYsY}IW-Fbf)QK%{Of*MJ%s7>PjNAo{0+B5~SwHH2sc z&ey3ChvM=eI0?(h>lzsydCzqkea)?H5$J1dH-EXjj8M}lb1ZI4#<8TGb%d!0MO~my z_|V`$ar^OPn5zW37$fZnGz#=u&(7yq;zj11d%0BnhyU*v)ST)AJn4pE*22s>1QM(Q zLSWi;-opvBrmQ6dtWV@2V*!k~#>~wfvW>5fTqYB^2e5?i1jw>W3$(+(+=vm8wFL33!;`Dxi>O_|KlyA%bGmMf><&m`E3d{W7c6w#5 zrv0=BE%I&nm>c}6yw2CIUGA|B8|P~$hkNoy%g(uzwz*Q!4ljq=-aEx7lvjDYgK>qR zx7?0k*&;Mz6j|jQkV7XPCDnvNuS00kI1Uzd^r6kt(9!rWX>*@v7`=9DyqU0`dafax zEEt}y63{`gZ8NGY3(?-zSv-3D5WtMO-P8=;+_J7#J%Nla=%L)M<<5Z!dII4d)FI#P z5*78ptMdqPY1{*G+y!B#f=;eZh1=M4RNW$)=sCYPFg#QYW>wUqP>yH|%RKYd43F$X z4;rNMlS-qUTj$vMLq$8staZRo*3Zv1QqGZ`8$%8n@Dl%XZ6gzz7wZHH_P7q^=cO|t zZ7WYSutjyF2R#vD7eCco(1>rezVrd|2Uc}XTH zG~hg1)6-6*72F#=y(stga>D7sMsg@#da)T6P}~5ewVrCb3fB5 z$ovPu;Cer!UHcLF#n@>r?aIf}r|Yc4=eBae5M`C~7-xi0Rzcs^PV;b1f0RqMCAj#_ zJVP%={{1-%Ry##HVXjRaI=+)DohviqUGtIQ(c;4ozZKT+V=I3S?Lmha<-bGLVmCn@ zGr%-R)JZ%1mZb%a%-DA4(hdP4x5drl)Wk^zP?0|S01hqt^OD1m&1?|l>7^E|j=tCK z(TCObm(g7sdxImR#XXF)*PyklU7 zUYK`SBD5ggyKb(#B1an2sm<1Bj+4 zEr2hrsY|!WXwoUri8s(BD6SmQne#KVu`Dm36Xn5U)g%;LyMB|b*@uw_&aHIrnt4`5 zY$s(b|9qylj`HpjAfy?!j_X-gO06a%?0JH!R?&-ET$n8>@LShWkZByfTrAiW)RoRE4RLDd$^d}Fx z6WlOca*z4lXJ^Cc$WZa#+czVNtmAud=+^4(V)*Ly;#a@>6@i>509%W3!UYPNS=&l# zm>S&F&d?clewnhYw@$6=VDQiEP<5=fhC4f5%g9UUqz&-iavbW7`GvV+U|={26Vjfb z-8`VXB{4iS(kx;fZ>^+7YcY3p7hTwNZC$-&(@|=Rc4o#2hBD9Y>adpV()guIb5pOG=8KB>euv`%gfS-@uQ>&Hm1mM3s$%w<4@ zbpYPh_I6|+^=Gla)rZOz$9`58!tw~KfXxC-+F1b?XjdSjv!j>slk{=#HhPxQC-h%A zXfWI$_sr}R{^m-ugj>4?ke3ABS#JYhv-8u)p8a9~K>AYOKr!+BS#jmY&Crf?e*!(i zD4`vBvx>Fbr|&HUv)l0_J$FJmEUmc=Xr{l%d@% zq%UY6EoV~;uUJB$8hLDqMGvktJKUWq0$TfAZ!lGvCOw(4s_ZvFurM>3jA40JnP3T) zmbSL&pSfw)8lJE|d&(9dr#-M!AC>xI&_zozG4`NnSM?^$AGZucgdiyY&3D?UywWubD=bZD>KRpT68 zB-p6UYS2KQRZr-zx^vw%&|iFg${&tPFl-h6a94=DQU*Tki+zN5b-Q`IHN2f~!CDW@ zoP_VR(|dc^1%m(ce1af3^3iftOG`_keajvRhMJ9U|MiWOn*p(DGd35rRt?}@0dFWj z%O^vUb8ueP@X@G9v0-aFsQ_HassO$B~Tn~M7%f~8nYYE;p zvID|$g_c$GFj;rS2vB;Rv~qwRaJPUwYl#a>lXwuWdQg0+hV@B?HTN82r`k99L7LYm zmPT9`pP?K$ zrafg)H8j9l>W^Dya2FZs!K2zf1|Dh|W8d)gfUO3oESXYIfT9|Cbpd&LeBv$wWCiqn z5Lzb4i;Qc)Z$HM~+#_%zy-Ekp^?CJ`ExTdV3LnI)S$XN%cL@ORRD7Iv^KE`HU>7dbK^sMAN*80^AL`TTPGcLTnbDcCR3<51pxe*X((U& zviyK={8IvAXPKjDIqsu9dqiiwRC86vPbP~(8p)U@on3$=7d%MHS z?(*I=&3~m==NN<_{9 z2NqQqH->tOK1+nE)+=`@OE1}9TnQ!`} zL$tnqSbTDu+JqRZ8d@raC@+U}D!?=Vjrt7r_of{k)*T|n&tFW!1a9l(`4GQR@pA}~ zlvzO$y6xC)Q)Ui7c|KSC;gipc58l6p(E?zKpgV^UcC7tmU(+27^ah-P4nWrNC`dEM z-PWywbL*!Lh<*1tE`?6;-u_xnW;VmrEcWvp;^JJ`U?pqp+W5a?)kww7<2_?!ZWS>8 zra8tXBho7We4c`-iinO`AU>-OprIOPP@!$M{p;5r>e7OvhVe{08ppS%sW-^Dy4xQY z@4R!1yLeXS(H3~Yb&^aT$VVkB@WRLue|>iO9dsmp$~SqsUzgVeSfm~MT+(wn!t~uH z{PRtFJdF!W9)cz_P{Ece9qA&KXm-%bQk+t-RGj12YQDT2nq8@eB_)>4Im>jOrv}>_HSAlt{6jtC@Kk{?@xwA+*I9RL9D6P<43K-^8nroxrxoNwvdxlzeF=2whEMUs=bb)b~#Tq=n98s%sG zw$OU+=N8w(cBB*6g88e#Nj4w_QSu>%2)uMyRn~wlBiZmH^K^j>_hSy&)i<3qWiF<15}td`fI8+nX-E15fqJ-aj1 z7j~ataZKzpA1KrPkx%WfhP-s2OG9^z%WM3b01dzSQh8b`Q)n8{@ff+_+^HL+)?|}I zKMW>w<2;9NxWoX0^O!!;wmkt3=Y9ZdWL&a4c!=wojxcr?uXJ41RovrRIX@Z*wjdvI zy|gPxkl>PSULunEm8xPKXg_FIGajzrqw%S%QybUHTEU$8SmNmx9de3GU4N{Z* zJWg42OILA|YdaZtF95p+r)#i;VJE0B-=`!C0J7PzFEN^2KzUaFSmPkI)pjzynQKe0 zOiaxbH?DKw7W3IoIWe7*dw0K1KRkS~1;?wuzYqDqws63gacT}u@=v;HL5D!Z0C}&A zr(-XutA00zq}jPCGsN%ML9Da$KRylUfK2J`wwC8m@n`?XzYah8 z9QC?~^GGw&xf&z7HvGz5A(xy_dn(P*8$b@S@R&=1rGdUaGUx}w4`1KA7l2qGOef4> zo4@7b9)My&L3MZ=#{L@4mbCmf8SM)MAO7wae+{ix6{7_4YZzzm=#ApXfBG-zo4|5e zT1_A-KmdH8?f9CVUmCiC*V18EUw`rP1tqE7C}j=>{uXhrU)TZe9Y6M2mDWcI7TjLtwafQP@w$33X;Do)@K4(JX$@7QKba3f$PwzJuU(vE9n zpPs%LgVx3pzzPkm?)CSO? zP3<=Io4|>@UDne@9@3(6K6V|vRa!|>5j2EtY1Sr#%pAH)qZ1j`c3K;B72qtuEaS00 zvU8EFdvu$!-e8G(Sea`8p#%j9RzUCClZtoGCQx^(>$C?h5ArgjUmu7`T}2gtX_4<96pz7ycu8n7$KXAiQs&iFRE z%gp;`l2WYk+ky?Qy|u}jx|P3+z{dL@e3#t{cZf#|FcveH8@j{&M5KKeRxO<8;Q;nyS|{G-f~ zYiuT|0J+)e`XUH@Kr?_lcuSyqV`Z*boS()PnTpNiq)RWZRqRG|gsNQ2(=|N@n8gb+ zf9Ghblo`s6GQmU}Q#yc{Vu|8!AWhOGdqq7IO-x~hY`gI;tFS;Nbl=VnZ3;ki% zx3XLt!8z5(uS903caN*F|KVTf$dZk!n^l4lCI}s}7M5^0FkuZd=R$p%Or+nwN>IbK z@<70TY|-qFqiuCffzGrXo_4Arn>3T%BeXBA=YT@y)BY(>5;Q^fX$QNr)c}z}aOJc> zt2>br+*w)5dN<*FZ@ykfCNYytkO6sh_tk4}B%^jka8FuOo-%(q3>5y{5$p!+69CTn zMDT!obx0e{EgVefjv4xD@`6DXXWQ~B{EGgym$3<4Ixq4_ zBfh$Ji}NGR3cl*A9D?~=U^k++m^PZWr}Sg_#ope&9BQeK)E1J4SJev{Iv^j7zk94w~$Pv-K{;K zucN%$M*hTiL-Y$6zlNW*mz15A1jjtgyB$!S{pO*0=WCB$m!8*Dd5*w3e5p)pq7>wd zrw<8QvpeDGW7f8`(@fOR{?T7*AjoZCzq`95I#*xAbu@Fn4LRLK!joruVbu%x0PSo|9)rSwzLz= zl5xonhu-ybEM4h!%Fozs$OCavmLqM;3k)ngk;7D>40F^$JzYbOcllbzH z5%=fWd6?Jpg8qEd2ix^7KS=lfq3^cnjkf6+%O4km*Qf7GFrT+Rz&`kS`cn4GOYNh- zGalyz>EmUKWZ?bzZ|*{1M)hD8jFuG~V>7t~%Z=u=AzWt3kh9cEjCbcIZPG05*rpD` zTi1uU<^bjW4C5XFjev;~Vg2p@G55j#5WbO}@^pIJ(=j^Ww^M1;8r#ec#_&4A0_MZ0 zSW@Z1AU@bXIcI}m_`iL zE2VLm-Z;ii^+o8C&RsdGLg6=gSDKeV0nJq**`b z$bbS)H}lkh^J=}*y$X!MUFs^ys9ZcdJIUu}cPVs;LXsU+Re!noS3mut`0tpDZ+-BN z$XVu6vg3us;eY4=o=0cO@IV+`RF@5|k% z-_u8VNm4v}e#u*YU?cqVd)oERvR$5$F47XTSsLp0iN`YsM+67%Jf1H;9iJ(V$V90) zPxc)2+=4Tvf%OR)gKqD3EfxA^6SSkyTLI<#$%)YlSd17Cs?-Q*YNQ??&stdEJ^WE- zxzRP|eu0d1jTe8U71!VFum%j$bIgT6$j86=9S#EJgZg^OT3~A^%1H-QBYC)QI8K>L z^y)P-T<_g2CZVx*vXv~glA515x@L~Ego%!7=osvgW~`50(iMkKuwx_Z!-P7YI!A)X zs|35O-|Qr#%`Pf*TxO$8@ zWkn2U+B?Acm$x*e2wbG11wE*up`{@*O7`dt7Nf*lNZ7A}X zx1mA47r2zy{AW<&EL%MNMi-n8A#)X;;2F-B{CN7XyjB)DM-|LL`p35du#wGP%S|cW z0?}uhc`n^xtOkF}e#vXXYVJTv1UuuD!?;5|n|6>zw3YRthL*drqP(3W^)Ao!Kw&cl z%B*+-4>)I~Bj9@bEIvaQoFH2b4mqw8Y`2??gFd)J#*D}RNfy=FW{e};(L@$FhkiaJpqHt*JNgqg=1aREp`FvOtiqC{RE&MkR3iWdZoDa z;pL)wwg!ih8g>}MmQd0_bMECSgJ9jYUDo{;z-c??X${m%xI&sD91v2-^4X zSp##SW5%b$;8-x-VrSVmL1nY2r2VmRwya`YJGcFPeSqdYtbJYa!rIu}fBov!VgkDG zIgja+nLC#UhobX32}THJ)+5_rjF-Al$9R#9lb7^+dzTC?0+Jv7=syhrCZJfBv(TuP zAIiDX3D#~k^JnPo?-vjM@QdO+vfm734}+uQk+m103A4GJeC4gq$pruecPSWf9TS9wHK5w{KgWT(Ye%LX{8oN`_d22n_jQm@`(MAu` zFJ-L`mvkXeW4RWcTR}o;EC+|9m!wkVb5-O&05&#*!H5huA z@ViC-&%alU6OeUxH<1B4i3Z{|F zN(HoS_}U=k474CkSs&ZOX)lbwDc1F%py0BXe4hsie6wAppRJdjqK9N4TbWCbZeiVTqQNGh5N~GE5RWhx`y&IFG$t(l_!8nJLX= z-0*=iMOslexUJkQUJtsL*R}`@Fr?T`Oi_4~%UzoAmjK>6TkQ4y+`ZKf&@9-lt>}3T zhx;ZLsVgc7P6INtu6<;8Yg@R(%f{QdNS zvW$W9)4+gPo7#^0`1(_VF0WX>9l&`r<^%Kr2sK~_c^Kype)Qww0rq@k0Q2Aun)8(B zbVrNxBB&$}%9r|!9w=zm=KRzobHz?j=G%aQNihw`F%PbsnR=9Lb?07?(A`l1(`mzO z(quE!Pvx!jW=5&CcAq?dgzP{bLRSJm(tvtgd1xP$9pW9XxBQ}=4{`B|*L;#g;2C#} zYkwh!Tx|V$X)yL6*Tt8hQFj*Q5Ibm6VAS^ckMHqE;m_1D#(s_eMo$FHy~aayOPNH8 zChS53mHPe4A(J&)(FG>JX~(Op{9NondO;1LA1G%9&somHx$GO@z;^;!uG=PZtjsJ| z-|WCwoc%HjEilF|f~(?5lN^?+ycURd-I~$030z=Ds(+j?=SaP(Kjb{FLc7kP^S27! zi%UeVM_1z5Fvr?t+N1-3p1a#*;$z=h54okKJAqPn{WunRPT6dbOkRoK;XI=+l!4N! z8P*=ms$C>qI)_{snYV#HUF2}tHEba7(Z}?x8DIMhWwv1R(CGDI`1-Z%%CoGG<)7+F zbQI*PCfQLrqHfU+5x|$mv>A_BYiZSfIHp$k%Xt(4wVwFo_zPrQQ$T6$D$ldCcD6p6 zy3I9K*K9He+8|4dv$R8Cxu-unB~18I&m00c*3qX~)i*2pAF znAtx|aOk_(zq-~oDFPir=}~-+mX;3u^cLu#r0GNWbphaB`^i9U*ZE7uw7Yz;>jYXI zzxuVDe-5WbPpRv*pOlYVy;M4Y?=20}+(Kduptng9>Y}|J`hXs?T$ldt!3loIrgO$_ zK4`L+v096MZ*n^|;61=JatEE`P9+a&y~z42Pa=zuT`ld{e32pUa@&tRr~M~gv%sMP zc|!2kwb@z4{zYynC$-f)f5Xx!27-2wTlIk1%a(LvSJ=S7TRG!r9KE7Ez4O)Qj2}M+ ze9S~kOHXzXknD$c+wkkeMD(?$C#OOKR_U;WEle5vei_=cte@q?1c%+B=e$LJG6&n( zUiL-$&e=QAx4U&L%jAv%0~hZ6+up8jx=mhVU*J#UZeeOB45twD(Z&aKL_FxsZMBvB zo?n!Bi(-N;lIv`<&_FlqK`=q5q?20ZT^u|y0f3;hx+^i$kP+)zwx;O#t^h)t)ubY| zj_GA0ftPwZQI2feV?gJbROeuX%)s~Fyjt9Q^gN1HWnBSK+(2JnQj=2VNp=LY<3XX; zow6hgc09tLZCeO|Rshh8I5w?h^tru$jrz3$pVHOJ@)`_K3K)f{4t#IVrDO~o(=W6C zJ%?bd^3tfApo>rT$G$kuJc+dn7|7?LHEzjo{`2;9kb5;i1mx0t zKeylc+UK6$#65YIZz0sP+vTfV>@Pa#bMs?Ac%J|qbNX_1yZH3kTycAi?4g!RMF;C; z@C1p@K^hgs6w&Y0KSeob+bJ|IEqf@;CeDpJ3zE?f_$KYTeNE%9l>?>LIBdc)DeVAm zD`dg$?3P=$lYIxjls}FLo^`DLWS=KCe*DX^;wRDet6AffQw$*X5rw-bIyG{jm+&)3E#|BbX~PyYh1` z;d=UBrE;}hK2HWE*V!6(NAWraX}nTBVH@dBIsfH)+7J8aSIi&un? z)A^CE?WV?o{V-$7zLuHgE&^@QQmKD<<@0#&_udd%@Hd5ty;`&p-Ree}Yj{!!~c^|JYdZ*_U4;?=KdA^n*X5{K`TM`D@oa zb-uZ{|Ktfe;ec#~OE?E)ZWCPkv;Y0y#2Bswi2Lq$KSECdwxTC=YOL97HgbZnfEbp} z>4LsAfR)7tr@sHuN7S1e3b5>!SapgZnLwTMJ3!eL={*@S7>P|}g9Vob#fhde{Wx$TG!vR z8y1&Wk!1yCU)~PiBvTK%mKV*I6*yj518it-F7Dm_JT;ta$mAX#y;6+ay2ZMBU?H`u z$&m7V6YWz0e4nl3-O}Axy!pL|u4Sw2f zD8WkmZ#gZUHMuvLLM+TVf>W){8rhUFKz{F07Q-={MJynw^KQ`7^+8y~0oK{_u-Q7= zy1+Bc#BC**A7^K$5ir)8kQ}HE$Rka8h}%WL^_?v;&HxgQ0rjnkU)m%W@m)MnMTMwT;1x$R& zb80DLL53={B41d8y^kFTo_+I@odkP(X1P-?< z4}y!cZ8jHjW0B;O@C!U(nKauF(6uy)cAoWi&1}@RQzl!oP|h~Z3av01aL#nFE0U-um)xiyNm7Ctv7RM@fz7(I{DfS-q%WYmZcy(i%0sibP7X= zjoV2{65BNc(?hCQ+02E(Qh~N1?C&0cXdT1Ow)SGWrc8dZG>`r8oCwF}A<360em~Ckj>2PXxmW;HCm(4>T?bvNP z;Nd^$XMAFuHVIO&_8tJJOcHpror#%=Vr*ubU9xM?GrIx#Es*ifyB}uP&C8j|fVQKf z*Er;m1Pga#LIWOvC*W{+uvtvg{w5_h25_+d{lER!%nI550>X^@z4zZMMy_0?hWFD` zkfE9RDR(HC&FtEnk>4q3%-R~3P{wou(g|W|6PjgiX5QxRTI@diZ;3{e3F-k1%&0T_ zU!X}}&-L{1SwWO70z2}ahbgJ?0sw*|2;5RlM* z5L~M|f(H4cj@3tzSM-AnZa1CtY#zW4o}>IU@FbYg3OMDVeDiZNfW?;nsUw5&Qs%~k zQtFO%5F72Z-+ttka zR(78>wYI0Ug9kX8p{Sg5$A@)?&8QV%wBOks0xu@G0pIowT_yRdh5GbgW^O&$e0=;V zz}jXJ+M*2u=O$z6CwlN**QI{!7S@h7%WPZAJE~u?W$3py`PcXE#NQThGq7eqA9L{I zhuA}#$XDftGC5cTa{KvMhUp@W)_t5@j^h`Kt{hnA+1%kQusg z{`7;K8xz~4CH<*GvPv!Y(Li zL)(x&=fQ6_>4U=mfNNb}pI?2N;apuwAg2aQg85gr7~oQ;>eE&ed^D+IYVt+&a(~|- zeJ%TEU|z7$a;_X&n9nUO=4boXXh#NK0(=#O6}OSzUF*FA0OO50>@v=JAb{A|f(*tE z9Uzd_0PkeS2Yp=QencQPAoRm;efb2i(XU77QM1U^_m<&RPNjY|{Plpu9g`cbUAxA0 zBsv_r>y-IqY@dJr3AR2ZlbAyTx$=iLnmb(wE)67T+|=C8TvZ|?E=4Y{k=YzNKu#Ng z(e5`eDZS`lx?@q^Hrssxowd)75dBGmgRPV*i!s8SdBEx-yK2-`-s^cI(z2kmyU3KC zNetk7OX$hJ9z^N0+6V*~%raP3+TrXD!`_v4wY4o1bFh!Sh0L)G=^^vqfbV8d&j4P) zX#o50|KN`!6FnQmbJ}dvYYc{Xu(vvlg^J&xY|zhe)fx!go2+JEwejtL;|0!1;CagZ z?(QD=fMC$c%g7gTqeJ9ZHTH!G9c-sXW)rx5rQEY@kEMFR=hTG;B;n%~bclg*Z3b|hqUHV`0(k68I)?4qy9&tC1#dYeDPub0a{0zN8mfBLvI)kE?!u0GK zdBA|X|KtC*Is*J*0n4hPezUDd`;(jSTz?ZKOvNC|4OaMEGHJ9*_gezF%1NlGi^Gaabjxu07 zW(Quwp=RCLao;D~)q(hR?k(F;vF3*_^102@);G_#-`;N@%Z`=TYf+yy#LG+qAEbne z@A)9Vm~@{}8O7w7_=eBYU#_>A&8mo*sL96Ux^Ec_FG3xZ!~+1jPRODF^Z ziS6Rbh?y+ZGbcjowo+?t841-{rsh&gO4vu0zS&$S$TqXyYir8dzM8dyEXUMgiy;fY zx+*%1u8-MKuh3{sG3M5H z!2z?V1#5JGtXr(}Zv_7k;Hix|o^DhAZ!F!AEX~MwU#l2 z*WK!lY~kUI1zKeMZsU{J&I4Mg{54(#K|-by7}ra!8YyLG`Ha72YoCGGr=&*@`JpY# zU!0vU5gKOQ^IYlNKlIUy1wEVx`REMXr~t5&cdS*Gh_-#@kJtEEq(=ZuAEm5yT`Zwt z_HNECpzD5?=g^lZWX7TMoQLvYLA}Tjo}*E9!W;-_`eT2Tzu{l`kN12@dpw76n7yAY z91JYiL?#p{@_w^Y%s%e9)Qf|$70~P7XIWXKM9cW;fLRj zA+FQegn|74W2}qp%coDDC9`~;z~ku1mEt@=pR{xPtGfYOMy_6=#KyZ^gS-WBo}c9~ z-p7y0Vw?p~W$QIg`uZkWwak+NEq6%>@~UG6Y^-5CJ~oDo1{A_UH9%xWm|ORn0c&aj z9?j6PYcfDIyYX}D8u<=jh?lt#1*RJ!4bf@zNjk=;5T`4o>-13O% z4Ze}yTr1DesN`T7bx&DK^weqQVh_s|2t2GN^B!Ye0xNZlj;G}o2r4sQf|@#$8jTwC z9?m6DVkWl$;u&pbCj&GRKn0qJ@yK)RyMROZ3fW;F{ihsM#za?YT)S^vracLS+wl&|uofl9S)!=^A2CTL>2=2mzIztkwPLMW_z*{#ou*Ev+bjB9KR#-Nf3S(VegLNEQgSmo?>YgK< zv}5?xzI%_gZ?!W7rPnwpUEr_*d+-?D>-s7K1TC_SAK7~bp177~_nIxK&Q8!AN9te& zhmrg0dFBEAF_U5~rB6T+(6OHYM-DruphajeQx%a8hpFe-0=3!p7$4pXSM<36(9{K`7 zBQw#hP3sOLeT1vm-b!{{e-CF-kkPDvAl+QMc7uT31oMHtfehGOUBuqOrbo7V@TuV4 z7himyb+VRe-Noi)OHVP5i80P>VBXv{)zK%a=UBo@W?mz$y84l;=a zlHElRAeQHPrp2p817`wLl%Io7-CZ(;e2PpZU~Gn}fNu1cv!m{bwGx`^CTZz+o{ZPCU~QM3B%Xh%$-gZk-~Ls^&|hi+L5XjeLS2cLbh zZtnWp68?$oc)$p1)JUR=DO~muv3b;x1aXOc{7-x4bwk# z4S8p+V`U3jsnZkrt{Qn_GK?9UFUg2jzPo$Z;Gu!HjE%OGsg|MC#?iiZ#CAt|F;HVL z)t$K7g!L*V*-+-9z+UTg)Jk zS-Hwg=eh#l!ZE2UlT1efL2OFr5}oXzy^e`N-<15S9p*ib+wcAT=)vu@xy<+sw7K(X zV}(*u=v>#&^8u8521c}3^<@RJm7&go$t2eOPL?(A&B)I#Mfy52I#T@NZ+?a?-$rmj zAk;FYWj$@n!w5WP7YgGw809>gF)pw64~|0nT_oC&pr&q({yr==7T94%JFV@P5)gJL z(Go6mUt3>#*S68-)u-2{Ht|J!BI^rNnN_S$=|Rcz$SO9YHjr!RnNQk> zVoc75>t+Ds#7uM6r=^YZQRoA0Z`a-uHtt$*O_VG83$B$xP_a|b*FhlgJc60%B7;0@ zE1Z8+-;(*1ZcREc5NA0WF%5kRfp&xK=@08Jf4L6YNR8+U`Fw(NT{^mZi-CUREILR1 z(bwA-{j-l5zFXF( z3?k`!lY|FYzXN#R*4lxLcLygxH2mv+J~;xKc48+oeT*8GZdGH6Rb(1av+TPWGLJv42o}bKJ1ISB42BGF<{^17sOkFd8eOqPk#75;5LNrbNr|Clow2vkBG?j z1#LKnXjguj{o-3*@s;T~zO68Sy{D@~pxcb%E z`}9U5`0QE!<~cn3c%M^>iQ*^(JU}jv*@@sFCLM&D=qp0N>^MPz4l;Tq@|PUq;z_(d zOXbxFz~^?b{R6;r1d#@TYn*Mg45-{1itqz$(Ya`YX;R;bF(~-V^tgThJ_5yh+d3Ek z*z{?zcO*{ZG#R+|#Xf0_TaO7^E^gm@90SX&L8oJUY&Ly$^cwdLc?VEZ0p<5*yz8I} zqF66*;kAlJMFIDwe-D=ria-DO>*9YJ z9$^g;PCQ#?*O^Vg2HK^AnZuHAd@GSkdVK!P4}IpZy_R22*^zI#*86QovdulG?fII& z`QDrJ!xg!nkMmJEKfcU-_#$ukoPNHd-O1Ud;_qLq7SFcHcEL~|?re$DGIP|r#e;p+ zLPY3$qW2=fl$>mif{^FkmMq;h5Zo|pZxdsAXCI>)<4EUDr$8Pt>oA}gV^{@XGb&o& zKm+te`Zltwg0oQ-vb7_XX{#JW<&OqN4vl+VW@>J43mUWA0>kj_TknPsE&=EalKG&+ zZ5F$=i)ZKOV?A{u!*Z5mZN+Bqm=31Dqu zG4pJei`RC*SJI<~nAsW{|7Jd_aPB_1AAq%$oeotDpq^*CMaw=UTMs2_2F-c0Ni1!o zgJcGcN>C%=BIR^679DRdpwT`6jR*ayuq}h*7Jp@%O55KK{Cu1(oJSmQ`O~Z_0h1^; zddxSht;VVISsE|_nfc5)ICS!mK@tEYa{A-kIJc#vC2eSoNcXm3BtLVf~JyyKWPTJ2Yb@{ufHpP4?YL*>2Pqd`_?4{M+ZYJ1?TeSu5} zSo)sZ=&SG5WdXW;mW;<-C;$$#(9idi$4%a6Ah9;jqdlp%@&q?Sy*FM{c ziGE{z`_O@zE|l+x!;1F>|L2r$a|bp8Cm8{@;`Y zxN-eD!2XS#6=A)2vl7)sa~reJOf&OKi7lQd&91KLn2Aau&>+9{s>21a!!%4!Z{>#7mDRR2H7#4JI_98hoIbNyl^F&A{YTUA8=xrlI4rfH;jaVXGn|L$ zEap%>hv|&YX6B%qk~VR|_?*^?y7P7Da-30|5CcflWRTVXb`1{>BQuaW^vNAo(!H`s z#q#v&la!`$F4nLm4zMNEpBu;#v+b5}2<|_60&tE!6S@`3Qlh1Jnv$c`%schfR}(q|fdjLCogZa{4vcn}Hd=y`Jol>eFgR5F>bJkn+6qbx z4N+1LJ?Me9Rj&wQV)<)dNf?6SWi~QE2f-zU&LJ{C01EonENkUg&%mH$6k7DR;L&;3 z#O%Bm6I1lZt=!0&a~=Zf7JT>{IITS5835lZTDH|uZ}Eh%aeGx;30j{=}z(ieH`okgLGrC{ono%#SsY-#cOC5KxTG!3R}cdOM>3`IP)`HyHi|}Qd#3#?4)G363`Niao2Pm zeuVTZ!0r5MQ)=}nw_I02h4aW;*UT~^(*HhvFtDdjX9l+!#o7%Xw5WY2jVu5Vx&zla zw)CE|w2{m@gSYi$`$e`wF7@aU`C<)F`SokEjj?UsdjF&1*}X53DKC-JlzG4&v~IDP zw}LVzW8{1rzvmapkVNb@bCL0n*t!h)-is?gW1X;Ftcl zb10{ozT+>RWxYL&vZn)NvJO(82K8%&jSPX8%zWdc;SJOYnE2<@H;H8*aeFm z`fApqJEgUaq~}^~S?02Z0Ip>ee3rkR zb7|CdYlCNsHan;A!`1m72>AHneM%XvYbLiP$>POLvX#yG@odiS)Lg?dK8bRMdKcbS1W%b(}yr}R37X7w6`^0oG3TU#6Uu<-&|fx&%kYh|W8C77`QnvSoohMkJgzH5-VWUbYG$HyK#j;|M4 zsD#n>S8K3D&MSj-{`3=E-|RwU`CP-uDdf862dHx{0c1-GmU(J`E|&=&nq};|>4(rn z*2@wqX81~D$_V9>zNB`s`g#F9Iy?#%f{aq1Nf!$Osql@vmrX7a9M#Wsr$Fph?)Q3m zW_aW(b`$XFtkr+jRGpwt|Or>g(vcEw6B(IyA3LS3bEOFam39@S(H?j$euUsKYEZh3!N_ zsXE0HEVkj2swbJI(k|9NO#AQwyEfMdoLbr82w-^?fZRYsFFPSzyP5G9$sAXX8(i|h zX(lnYyuPnVRoX3r=a%Df2Z3dbwNH+YIm?IO^|PlBXy5bHI6xJ?)$aE&SxZbA1aV&V z51Q#~8^I;#%AM$0EBLM+8^XjmZ6wPQn~=JW3>)A%(ylaNQi(yvoEN}YkHO?jRMant zF9WSfBi2Z__W8@XN%S7`Cb`pw^P$fWFdg}0@Td#juRgK-)ge5NIfk4Uw_sWAr5Etm)8uVea3Sw$$IE3b1vWES-YQtAyovksxqUpn z*SPA5C3ILJdSMscz^nuFT+?~nZPX#{slSgv>KU`k6;dgG+ z%zS0;R9f~cchC-hGZtTPol-dC_j}te@6Ck>ma|WNR2uSw)BZ{ws%O_i;q}Yke_8zP z?Pp{U5HZ8)tvu1FFBMV*6!)o&s{m<@5NixrwU3&p;(8ED*!OU#jT0gMn^M`FD9mOh^@O>8fyxlso8la zh(76nSjvR$hB&utd>#M*qA-J4BU{JOtwVbV$tsOjn9yrIYJ^;DOK?mJBnsa2& ztpKtbxf{fIm$^LrL!f02r&N002BiGX`)?M1`Ll&&aW$f(SIBxyz@W)YSn8Kg`yB#? z*Jff#qZ$vAm`sYlnI*FpC|_X|{R7>U6U0g}W*OAeynDJ^5*atMNT6Su*i5@9jI4v( zqfkaP=9tYcHj-&%RI-8yl6j*Nt>NJnfTHIS(ryKeqbR{JHplx-5XgnHbFLRZ(hSY` zs=P;@v2FM+m!9&C*LmAF_X4JPtF-6Gkp;BrJHb)u2SO?*#_xSBzrC`2Ro*2q3^C%Q z?~y$|Uwpa5;Z^9OPO{>FjEgOdZiTjoO1Ztq0MP8>WE{6`l<3p(i5NW^Ud<@*%Br2> zgZI9Lk%J+^*!OW7H07jIw~cnR6N!di9P$FY2No&E)2>2>2j=FcnPax36OlB^>FZ9i zT8v1lys{Q5TJH4#sdfavV6N-p*3enDikngH;dRn|Gh52cW`FhqWk=9NW7tfLi#QPV z99-B;_ADJv78#Z!01?@k7&_Tf#TKHr7P3=OLZw6H;ZPnz<@7PTbV2|mazoj{2li_R zo~?lxrBggY=#v_jfGZ)F@DK(PS%)VAA@oP!U|Zt{J`&Kd2H+WwIsOG`StmwESHn|B zLt5O~;lMe*Z)Wal$+*Ux96I#Y$mxhx)$451_ zWJ{r}<$b|@Hf9B!(&5&S6hKq%=oD(asniX)DFZcZTt4Y-)^ zX^tO3`_d7iM~pS{gX|m)q7umWDzBlPFq_Z!8Oj;P>ksY8TP0AA@f`V6g76yR0bS(3 z@GrfGzN9VrPa`CmzuwPW(KO`4wYI@$8qNa0o~4jEhc6wQWwfG?xz_A{`)UTZqCxOg z+FvE(`?D|ZP=Eg=TPS;z-IEMX2v&gf8UaY1hbv^MoB>-up(N29JY!bR@bF0SU;Y<= zTKvr~{ubkzOm+I#hz^)tT;@59gPj8N1TZIO$nM6Ga@()=K~7Ys_Zr0>2Bv}K!IX#4 zu|X*Ny=aY8K#nGyU3Zq43F58UNmtE05J7=YXzP{Tu)nXiJr|E&0et(}YL8s}H1(l6rRxRT6wR+NdeW~Adv zMk>acPJ#};I#n8R&6AZ!Uws`jhR>BD22;#{GN2_;q=8w9!R_t{Z4Mpd0B|tk$+6qu zaS-@iAh`f+i3P z{XZ$*eB%b0WYYm~F87ZH_sIC`i|J?giz9TfStu2(*%6MD+4NfGhk$BXIDn=W0wW%% zB=}X=LgpZPVFxhALukh)$%ru!#5Fp>>R;`r-+%cPqoEAi+wVk%l}fWbOmKlHxVG$`WlB4_VY!l!vunE?mD!@P3X`V+8H#)5FIP!lRd3DSJVnQ5&Tm@Tna>xVE)-vfumpsusp6{X>1^)QoX{w=)9Tj=}x0Qvb4-Ld&TozPqTo$#)Jf&4(UPL z$v}eJx4-z}Gqz&47T0gwfEIDM(Sz2!G<(hQPR~we2cN+-0khZ{j7j-yd4*20VAY*E zdhTed#vV0r(t;xlDv6WNyd%3du)(s_%!L_EH5UXIuu*YV_E;k8AIl%%btp$!>Uu?9 z(GMMbv$IV;@Nh`a;<-9J5>U7br%I>NAb`NQyATAhLtD}tbe`Q1wCC%pDt2w)ptYj& zNI=G>ptJ;bgEFuQ9qTsZ0QU4-hpITC)F4@@Oi1QGHYs|~0U21TV%OR+fQ?{`a@ut( zcXHJeIGVwRW$p{||{ZUp3ILX8Auy9A#wJSr&p`n1;T^+qeZzqQ%GS)4E zA5&8k*^wDAn$Eb>;}Lx5A)(7;G8%YRr`KVdTphiE?yckS;s*p*uzAqwU){Y+mzDkM z#fswko42sB-YY)+^tS|r1|p{|$6$hqWfR%~WBN%FI6+3&z_TyLC&R1KpSs#MqNBAd zsF!P2u7Ok!Ue>0wYzvphCh>F2_6WwC0O7%h$I!JpNq%{STxKeZciDp7K|2#18h0L= zGJvmbK6>?90Kn*^4}R_DtiK1?{~V~!Jgg%l<&*OO9rYK<5JMN5Ij0`* z07rF=nWX}z1`rpI<`@MTz38-NKyEY2UlFiuAj`TD+s!1Jb=FJXba$yVsmv2x`rh|` zoCDTcTRH$!dm|U7r^k@fWDKI)1TWgG*Q(BEU8(>{m9>^iI9H9`*wz`q*np4O-PRR1 zxLQT9$pi#}D&?@r2|D*~OV_`!?3o#b4ai0Hw{)`sI5bR9acgw}zo`VHr3LlJgL}v> z7Q%ztx+t&HK)>7xW!7LD*^xKjc)R%IcfW$x@rCO3-LRXXPi@4mo?ZfbWbxCM8H(y_ z4}3IG7TtvHsNZr9yS)}YuddYYRK}-F1#{+39RWt|ZvpZe?PYk!+SryxSzykVUrrYd zS8hhW3xFFO5;*A_zKX2ophebA`CxFuzIf)3b-gXsWR|Dko_6j5bNO;^8h#=Hi$1kN zAH9P^#r)J1!O_9uZ+`k8vW}Is;cfsr65V^wU<99;Aqv^$Op88(0|Hf>E7acBw`P6< z=0GFvAW%o#`SRCfdyXUTImMxI36>bt*hTMH63n17EQLG=J3vmV2g)EWHUs=1Fzy-| zq)~Qwo`w3v?CfQHKka<^PTinvmv&5W*v9`b=+x9erf9bbCP#q0bNCU|Y^HP8=vVH$ zv(bTe0=nswMF#RrZGkRi)-mh<<>#LgWLYVG{Kx+kkd1oN0DR8FKDtp_c6{3M%}E>} zxXqYNDB2_d_xeOzh`P&0jUEc#Y? zZpJHy;m7;I{T&u7NQ!kgMr9;}|yyY$OIV@ec32aZu&+ud3A z?&1-;lQq+yuM?Aj{$Dds&bRh}r8>-3-$5^W*tg0{`TAN#ilI3@JClULHf$RCLpvCC zPf-5~>qe3Xw4wZ;p8-2UXHB5fE`bMp@7o_`N1*|V@fXk8tuPltHu=q6Kc4F#AbY}? zmG9ag`vfvVN6fE>NDH8=uLZ;Px8>)(Ep~aV%`=ZAp%8%CJ=}p#T}N~`_F)a>g#>Zk zDW1%B-WxzqB9e9ivd=RaOfa{d`OE8(NW(9?#qg{Rz$Y~b9Wj&xl z_1i@PJMyvoXz*y1vQ(~tcH0WU8Rs~`cWBBoLhjNKoYr@kA<+2n{vG=4PS@sQ@bXCL zxfz~t$D7G=5|(zK`cd5IjhkfQE;W7So4Ki|`pozN=200hN`+9o% z!;9-=?XMERti?`IKVD=9ihi8(s2!Rz@nn{@+h$ziefsgPzu@{Yi5VyO>(XW2Icw9S zo3KMI>*5(U&FED7;x1Tqo_eY*uY~Ud|1x2$Ip!94Pw7i*|6v~>Gv$Nz)g|bgAh9xr zAf`NG0))Cz+gp4?es|qH=T4rpZ}!6yO$kydSNT!)Ykz6SdtI4kQGR#Y5pNT8uu}+A zg({x}{M7(b67W6dWge>Qtx1%R`R!c+>%5mQ%$}piw8MQ0dfEzLPKW$nXVIJZl>bh_ zzVydu^1|<#424IY;pL~F2PCA$bin_-j^8?dXZLB~*tA>g`Q;bo_!!vP0f&5$Pcu%z z?9+D&ymUnZ&KvvU?39LNh0O0DxauDOdPd^4e#+(LtJCZK%)X;(^Q<(^cbTZNZ5M_M zxQ=;6Fl$JLfoROB`1lWB7ysK&zk-=CSa|Lsb7sa+ZjAW#{;bP zc66A`=w=Hl-&NHa^`i|K;gx_R2AdHh6}AH0?KP_^lL!tEyYhtelNe$OV~s24S|Dy6 zphPCM{KDhs&x=hA2Qvd+!SsSp=Kw%l>#QL|v=7#Z?IsFj#CZj9!nPA3W*oH;d$uAm z8)gDFA(X9+ZU$|4JFgM;lhZTw6=8z_?CUEJlC#F1;JXKA2#UG&-;>KVBF!wvPfP7Y zWEBdyF8_AuiwA32YblXJ`lFGp5Q=l={7A!e-qWL{u|u}2hP0SR12Cau`tNr#qRy|5 zrv{;pkAl){0gVuOQ33A!okxHRG$~XxC!7q8xMUXgYx4h45Y=h6TM(Lmz(*~b8P@myT zUK>=)*!jSw!q+x=$``gJg$uNyp_7sLd}68i{p3mtP53GJ_=WGpvx2*_=8{~B2>9yFM&8t7mF1675kQDIh=HNJbgQNYyY6fi%e zgo2rB26!g6P}+>mkt#3+%yl3P4k_@Rd)L}1s`Y?Z$THu;fR~1H7$b|OaMnpth<|DdBjXw{9RTo+W}x-}z?1QB#-;6Ns}}Wcb$aSh?yZ1xy9jBGB_r%d z3U=s31&NCc9RfzvR%!SmOIg27_;48qaetqKwE#gnP_jA$0vP*bV>(9r-q+WU0s)-H zkkXKAL=kS$SLK5AtYQ}|ivs0&W?25<`#;2xA({^{O|hNJxe{PEV1@{dgKjO->!Vlb zpIf8J=*L+;qN8m~OQ|>7%vN#dO>ofdjc?t!#vEfDvpxd2I`Hzk*&}8dxC5jI{#Fh= zd4clg4?_seNv|4xuaP6tUo|qz@7&TF;)@LC3;KKZNBZ%*=nP()i6<#5Q#BZQ$yyj? zhF{e~@NfA&cS73IiM;8&$fu`doh+eJ4{;HX$Pd@rv1BmVj_pCa>LQ&p4Fogkq+bmp z#}?&I|DwMfC!eL3p)!G{RR>Yp@;Lp7Q;96{QFH0#-;$Y zEDu5k0c-66x9TaK;g@WUE%i}WlF!M*K(APz&&)jKqoC+MUrbJLcpT*pH2U0T>3IP1 ziAJ6~DjbMBGaew-z=+_Jpziqq_42LGkorX4u*SXP*2r zT5H^^Z*=rrc-LJzJV2L6qC-d;*Jjl?PtthBIp&0ZC<8P&H8Aa4Xk414zqVm!M-B4G zd!$cyzS!kWo^_G-%>1-0n|)Vz^$%Re_=m0p>}KYYCF<(0qiD@87w}3@38PkhCfJ}e zsf_?9hHlmU(UH6x>F)!y5yU+z9z1>$gI_TDTkpID00dxt_mkq$o!_(Fw7Yom{CRQb z>#rlLl#|!4j28ddfBw&l_rCul4pwU}`YyE+BR*;V{GG5aK0ni-nD5IS#b+p;BH?F@?^pJ7o zZjf!{xOLA*hX;#dk~J%=jZe8NfWYM?YDELKlo?DOUPw7T41a-t1D>16ef3=5Kwt5@ z-~EUUyd*z#1JHjAMEK?wtg*&BAeam5V3e8z9}^1AO8>$ew2aA|vOS z7I>)&+mmhch5$^#002M$Nklpm+7y- zR2w_#Y~$l!|1ERCSVoZLtEKF5LX%zIj@z za;S$LBCey(yZ~G+^1xjW+A{`P=H_Qg5CU2<3wwe0(&X7Tb^#J)&3eDJVAPF40TyIHG(ZSWqFe3i*El%-?G*H`~`fbOMM8dtt50Tup?oX`7w~{&J`(H z(5se=%vyL@(9r-^-@p~-z9!^!Y0Ah8p5L3~UXi~ehy<(a%L z6W;ik@|#K8;xlCmzYsnAM_aaOrl)PnXM)uI0BY*ob^5GClQ-S%DlpiHU8s-ZJqu*s zYoj}NvbS9`tG0t=1LgY~b3&eus~6d8f1EevmS+xl{#m3TeO6YO%wYdlSs!;jYNvUR zwv%@Xux@bHikarhA?{JukR-*=JXCW_jtB$aCIuI{SthI?$!l7V4N0K>101crkE34$QV ztGvn^19@R2c5EbuoghGf1hL`4Y~Ai2_cA?IJ+*bIRN5Dk;wtVUDT=#D=KHx0RpTQ) ze9m*u|9}7Q<+`uscP;n(?{=2KcV|sxirl`Dei(hw)i-B{+vn%YSYXf5#qe%=BYjrH zwRw{jOBSvxVt!ThGJ0BbN2gU1#DTRn9Y1y`Z~QME@_OfESrE32%zs(4oYQ2NJ-pPl zm$prs+So?#z5n(&Av{l3BXAeUd4rRoJ*aK=#q|JTPN3~MmIQ9>ZExa9bYQ&H-g~0! z>B2kPdpFb3>=R{_(6)JAI-bj-%wZ!M*^7?M)<+NH5U&6D6o)&#`F8d$yh1jTjEg0H zQ?^xqI|Ip@2xz3+UXLdMn~ywmWN5%17~Q=-!Da4t|GCpAqs40-AbM!=;@4kYJo~~+ zQ>t`J{MzRyPkb^?rzpEV6ckW@IQKZm*QQ$-yUc_omO{RUAGCH8FxMP^l|8|>j5=caR!@G|{4d*;;Wt{k8GH|u+^@^9Uz zmifcbR_HI zi(4?n=C(bv6%UGMB(Z0n=}p_|7tOaS0?wl0AkcRa|F_|X?Ujk9@A>vN2d+{wN~1(v z(ejPo@Gt=ofO51z&t}ubhKAOxtvxt4T796`;a`1Jn>lP67n(J2G|XPTzcHPr-i~2} zxiqr5z=H=cONqqzrj=S6L%D|ijd5G#spsk`X3Ot@v0CaqKtOZMwLa^s0V6!3FGQ~U zo~NxDTz>CqeK zJ7d^xsY95|PwWmwR5{ zFPrJtS8e#}@jSS_x%}+hslc`4d3vPjZeRT5gR_hO=byc|xLO2q*yBWb+I06)$RBk` zA7LyyN^NZhuGAt*Gw?}NC!)T=h{%v(G!Y061=^Q#MF>(}5qA@qx4c^50qW+6f^XQ? zn^*B%(NWsM^iqRo84%UOM@0peI;Cbg56m#=<`gpoYHf>o6T~9$9|e@EGt1Did3L5) zFe-Qlfe0&NzMJdQjRYUY>;&qEirFGhsAs+Utx)GNivjRo`K#RRn8Y>X3$ zRokM+?iE?#*+2gH%HkjX*-sb0|Ba^`_rdyVf1xRKNvUe@bv+}{$B%qJ`!-?h@99DN zE>LIw7+P#F&GYPEn@3xHIZ>#Osnj#j0fE|VbWQj48hV~LOQth4t+^Z{pLJNu$7jwi zes#G>ghwJkFD>>INhTHUjdwq2T>{OThl7F1YVJ}jccfG}%;9{rbT4FvBE2r-(uvoi z@I-7}DvF`)68$8IJHS(v+5T$MZ*`b!??ohUYad*^P>O9m9DqBTl70uI1>lOvm0rGb z)s@BG(wAj7yzt_4&3(~%MXI95ODWdYld?}}JxI8TvJ|l-B9#)!3%#xbEH2+$f)5m7 z!szoaw#*)KNFrhL#mzvOlo{#!JNxYO=uDkWV8EUBoq4o>9g<@2P%?1~-hPH2u&`O3 zIRGZT@XVpb>tB0iaWQ9s`pJLr!#`O37rz;5FjKRZ&LLc<|3yY z?&ou5Gzg#V2}XO>I!H-nsM_P3168Hcoh$9_(+<~FCTVxZ5U1waA|eqs1)30&imKGi zmaQW5a~jmf4Ute!te^d_RE(5=wJLAsP%uU{*+ks|P?WGy`*ikS^9-yd43|7~305$= z7)|a)z?3lb4KJ5sMrlF6+Q0|AAE#hz2#}c_>l(WWsr#fqr>{{aZ+&y| z_J(!1suy0?W=x2+or-~v7 zG;=81bI%+ug8WJj(}0Si=gt%)?!JVT9n-N2VT z8y$VwTC8nvu1Thgay)wcc$qZ^;|V)@j>E5(<&~stQjyG%AF|WlvJRADG9D=KL~G6w zGfn{W<@&~ZzNl58GF^*&r_BHOr$39Xi-Ha?zZ8vp@~3|?YX}qtx-)qH=2yR7yz|a` zi`T#X?QxzzefaR=5B|>I@35(-0y;Wivbj>vR{H+Uc+tI^*NP@A6X<3%THlwctkvT^ zdn^EP=R~23@DqIwK=~v(*MH2z*(Gb@kN>Ox{r_R{JHP$y43!IW5ZA5Gi#p3N0U9$< z7wK$3rl}7aja?V5-q-;aDB$ir4wmS$^*a8^(TtwG6Cr*&5a3L*^6&lIe{b}(x|cDP zEJ=1mh(%=WSbVQYMVSWgy#3~^2l_kp>FF|jUT97?FTV4=_DcGQJ+iI6s3zuvWXYuf zwbN%#F23{a?~a3XoWfnB9_{LkyQdHCFVe4lSpVmYm)3?>kFPM!y-+NfLC2ryoaR>g zvQO0E^mW#Eoa-U#V4mvN86t?0`f)5`7#O(Xrg{8qmjP6Z|X)b>uP>h$u&HM?7{FS@~K7R zl=YE_H2jSt+IIu2#2d(0WpY05z{97CQYBZO$?+s=-98z6tNYhQPv7o9Pr)tr(v8M@ zG1>n+zxCbW2Ol0iKD_I8ywuK95B{@s7zXE&BS#jea>Cq?znrXG;r;d_AVi>m6#LIQ zR|2TLyFEe`gie0O4Y^`^vs=FOGirZUFe^#fz_eGmzzYx=)8ijxBR%@zbCFb>Q&6p1C3?>&YH1 zRl5vZJgBzr7epblkS=qVUDAI~?k_qz*@6CkST@SP_=`VHPg+K*?$2=tv{oW!^!uK( z@6eGlAucxF9@BG-TQuN>qTRPgYv<0K9^UWsFPje_xVqbT`J6G)T!_S1dSz`vJNUIy zFQT2#wui|R^RYU4?JzicjPs>LVBcT(oECw<2N>KYwIO zh21!>G)!4nmoG*?52nt$eWi4REX|eCG*DNm6!UCu0Padz*n8w6nzt8}7_|54MF2YX zi!x;#;OsKaI9udx z*R}DN>x=z|(p9R}?q20cIr@Qu90h_;%+-^Xh@p+I4X|6ic1goPdQQ2CVl8TWY{=$) zSJAKZ+bLz(I*ZbkQP~tWU%<_Wb3ipfhGbN#g{^V^;-_U~9IuaE*AF>5P9lG__eXbZ z?nmDD5gMXb4BzVgGIBU7j!&a+Hfpzd=)A|LMXf8n3IJBy{rIt?qZ2nR!=C|oyLMy; z#PfYWr75&gPw~?q|JDCcTPqj;;NSnB_q#r{KiTi}LcguQ=Pc)%^QFMNqG1K!8Q9v^ zQ7{BO96z3eJ)@7Q^WS|yZO%Pyv8~uV9C6z#u_73Nj>93dD}L#GHDd(!ZtDlnM0>{O z2d!F@EzyCfU_AK!_upE4qH&^fM#|p$Je`)KSZNd>FMyn5 z@U!%Gy5p2mYM;q!ruQqw0SK14X>1@;TxYYAUCKN; zP+3$vu$WGQ=LiU(3!-n)`pO!i4RT}E{jw!L{;>A}yvZq1(33eFUF_L^Xz8%A=USh) zfAzC4qDN&?z0&$`FPLuqoSEcoH9DKN@f|hJKWnehA+~Y|*0)Rb-QlZiYnQ%wyC6cD zvu%;Z?lLF^rpB5^r^8?DD-Hd|kDKFNQ2hqe4HJ&(gyIhR|>4ttM{@-RmRnF!wzpg4ATYs8se)6H9J-dvhL zIEgWf45u3{vsvub=C+IV2|tw$hKCuKm+#iLsjRp26|5nh(SAqo?QoZkgdawX8LD;J zE*>Fe0=s(*D!;!1E2Oofpyl@?E^BA`&?j2Nhp4d!_+~gtUom#34Ug>Q>OSeZ{%z9c z?o?D7 zF=toQrsg!geCLnG)425CYZyrP&zM}?PhINs>3ARXdtQ&nV-EZ5H|;(CcKTh@o;%=9 zZ(aF$jO@SuCx5y4>krQ_wj>DE)BNz<7ZFAZN+5x#2Ev19a$`)9;3J&W9p$}McX4G# zyeL+pQUo6Zf+2x1ECE0U5bsO^D`n(niqQzTEL+Tm_ed1#rzw0qK8Fq*=m3n@tLb_! z;Te$9nAep$L7-ufB24dQl;}UgGgivN-8?d@8~?geUv~tCKdAlBicpZ&cROabI&@1v zUVZuH#ar*aw>WpXL#I+cp3G3Z(B=l%@LU62Tz3}}NKv=p&jqMa;wg&`O59r?(S|f* z2lZe)40#cdJvb$rvb0R~8plD5WkqxJMX6#8m;0eL=Px8gYJ=fqO?d1HLd6{D1lM!Pw`=is@gS8ct7|Dvv zeB}wgMr-|pmY}Va=tafP5G6H=~sov60fw1dZZ;{ax zAy(Uj>Aky`qUW2{$Vy3Qjju)P49Axm-P=K7 zDPAJzMYjXqtp(*5rxkfe;3)5LF+~~Be;}ik01`PMLPGlDm5fG_%xaSpWcC~-3XM|Q z9w5?wfCUsJN{jCtb~x9@^L#$E9sS~OOTu1)$*yI5QGh1haRg$t+7|$D`^3HOu}_!6 zbIG?U#l^VIwlMFM%PzDi?eb)czL8(Ox3|3*K04^nl{t!x1rX#Rgf8#^A0epcv-Kpr#3Uz^X4MvL%6GZUK(I`|9vfv^KBD^I*)tR`W9oZ(Sg(E?%kLXZ(hv zN6D0vXV0`J))nzv6jO6ALg#$*DN1Mh(9^z>8TdSUmAP@}Zi;i`?(O2{wt)5aMGKRG zQEj-MyRr22GexYOOD3R+uQF!L`RWvz%k=}OX=)gE3_G>Tzl`4jptq$Yaon0|2jo6{ zxIF`P&EmD!UagL&QXgk3F>q=WW&4JrZ;#wB^F;kfp!4TB&!mQX-sjiVUazn53(6Q8 zx>>5PwmGS+%kxK`iDz6{9C_~W;=AAfZfUZo7XQ_M_5YZ%1tQG#g{-u$0|bq}VAid8 z-=YmaE|__5P22BSrTt}%tT9Dx=w=j0)D^E!86H{L9#Aq0otd9|?PL6za_Laq70u$jFlwEhnL`Q3l6 z^=_io3~knFN&iY*(OdRZX3CiiWb7xkcXm7AN$-!tNStim7FK{5_ znl*cJ-rO&p`E&0?RF0`Nts`BCqmdIC7$MWp+S)Dj9L_iU$i5t#qjm*qC}UwQ zyKHgv*zs|y;WV-o9yZ>W{vNBJAx&eAK?)90|OaOyuT|pGc zkldaly+zQ!% z6Z>1=dVPX+-g@t&#nFIjymoI9amSAzAN^f6>Gc3oyyAAX*q^M74B&5qZkkJ=n>wX{ zwJl|39De5cS^K>K5)PMr`k5EHHytC;h@+E{94U{^@B9g+EdZu$0tes9$dqMD{z3%q zq)*{b_~M@U*1;ps4ygJd+6Ceq48(ON3!0Mka^%_Pqr*ruI$EFof#1_N9H<98;R__EdUlQFN`* zf#!Ti(eq2&XKBC5Uic~<9ZktPJ$vTl00i2TwRg33t zD;Y3{;YQ!+44~N3F0N%0TupYr_Kj~hZyn^J^=d2>;b|aK6to@%_8zE z+hQ$6(!TTFn-HA%ak8ogMMs`#4+47d_=#jr4gvhx$g&l*OCL}U34gp>=BvP`?KyOu zvv4F@qFbwlZ;d=mpV^k?oQzV|>diNQzWA;0{Z3J`@w)h!fRPAnc;p0q#GgiwYs~l} z(8>4g4182p8=KEqMz^RhfX@Hm|!XJnAOM^K%^M%7}c zn+-iR@0&Y|LqH3?{z^`&E$Mjnh4M&(!O(;NEg&FZPhD`fFZpV}cqVA>dt@nw|+N$<5K;M$0W?B4E1=1J@dpb5IVYJDrTDa~2z; z_QxsKydI231!q0E@9AvKgUxLvnsRW60zOyv3Sgge&>V3vnESfZI_Bugmb`kon&s_B za?IZxU-m0!zw8Uo!e5lK`t)qUE$!{C0q}Hn>!fVS(&9r zLw)c3BbjF^8_@CLQV(+cDzn02eK$I@4(n?JZdLB<$PXQvY_#?R8(8Is+vztqJ}>aC z2zoqCJ$=~#vt9aof>`Pwr-!wIyOF=_!og=cq}bkqBhZyjRjYq3FuzPLbR3Dbh6VK{ z&k@osvNd@* z^rvN~zxd7Hn!R-y+Gr1P4? zCo4f!?|N=Rv~{-stv7vQ0@E7%o#ga|c<1xay%0~>(tO5~qt z=pnqaET%mN##zqsg)WRo=C5dRj$kFx;4t_Vtb-$ZHK$_zBERWM=8Zn@8JxF*AwLgS zvM92-oSO#AWGC3S5BmPg=ocojW~KV+Y<+e(8*HhkC_RcZ({XOWHH-Qmy|O*449qI! z7`k*8bbhvTeaijns)s-2wdN5nx642@6*=_o0|DCSB+7NJ)mYhdipc%X-(VH zDGhdgzW^ROcQ?iHFW&fY@z37+EJY!MF~Rud<)Tmt$L_hECxDYQH^2U5& z@anfBbWG8sF)%&Ad{?z71fdOXUDrnNhpSlx5Zg6Cbep^l0)V;|0lNm%X9$ZD*cgN1 zNxK%+y%M-GgFE>eN9vMTT$=;we79_j~0fAeBKnGvs6B4#8q z?_#xd8PtrQ6DLmu%D=GqZqa6Mz4Km7Q;qPVUi-P#901cr>oU{{j$sxJo)Kr<){X*j z|AB}d%(XGJxmGv#`->#7DJek|HZ(wRi?*=-7`e!=NxA87VC#mAwfnd0d-w4mVp0sP zSi^wnvj$zerth@zsMFxzxNv*%-~IK`#ecYCnF?6L(IBOhWq^l&}(1&ev#iDqW5Wsa>eX-X9Ruyg%?t$JGJ`Erx{eI+D*vl_pX!F%+qoL!Ka5_$Ac6Ynh4T~P z@l{&Q#^{X^WPgaT`Q+4@);jPwQ0dDD+cKhgV>`T3>gLD|4yU5bLpdmRA}HKzz8T1r zIZEZ;z-XZ9m(f6H=`EhP)6Kmwb?L~QJOB%|mB0LQYiaY(j8SnpAYScLsac|f>?!n0 zI86PoK5&2_r`MnV=x5RKrxU>gtokhAac}!fs-%5<;qv97rF|JDPR|}FsdlC&SHfpk zMhXEgf|FOCcUHUBk?~JZ@vfphQJXJ3``m!c>XPnFxB?gmq#HLow6w@Z9$cWi>nT=% z>F1s~vN-)|^`8@r=HmxNyfHri{4f6H;!boy*jq<^L#jXte7;NLu>h_1b|oUl0cCG9 zYMbi;#^zcc>m~4PzcDJzbhi&}+Ph2A?4I8I=6%=B1MPvX#n34cj70o!_E=wt!6s0B z?=X+?i1#`EEh~WX(#Q45)`_%IV({U^c$mMn(LMGHg?e6%PoL2!N7h$mHb@35 zKL_zP<~c7U8j!NbP}dHALviSjxla4;Gx8PqVZ2wA%P9AKrnWMgFphh*gKxk0@kgUX zJr#YZ?@i$m@n>IBzTHo$5N&4f4L@tlqHc`Q-ZVB5h-x$9+0O(JNFTh~9x+Y;a6pg0 ziVj{Th4yOE0&7xMKQE&~`{+cSU`EWL_B0u0U1d1v$34!~_%?;**?@vW2Rb+`{&@7` zkE16v(?Kuo1xAOIdAwQ_o_<{oRBEs(Zl%qx5*=PNYHf*t{XC^}|NaA|?q(Dv-$ZMm z3lWOD+Y>TYe*MNT7yqaKk>bSP%v-g8esT{P(@MJHcbe42qaMnTt7teso4jIdc6%jkiAHX#%}P$GCQv!$M}$p2d#Z|Mcw1 z#lQT?&j+w%SfGh-edoJ5e=jZm7yrS(Uo`OU#ee!={NLy7lyy6*$-V2v`t^KZN)hDO zE6>v!z8XK1ZhrdcN6|uan@+?*%O{udGL_ByiZNE zEq&$It=hPAZHy`g^|?zT^;S;w8s2VRfh3}CeQ!Si+t8ZSREJG&UP;ylwr*_SMtf?C z?(P|Q10&{s#@NjpSEuGE1O85QYhNmD6UQ3BlT(!5Bx?p>f}STLzrM&$vgWIb5Z#@l zIii&!=DRFJkwl=Z@6J_E3Fv=vHJoK@awt=1oB6`U!%h-1rM#RoX6 zK0SRZohv@voIRONA$!MxyTJYZMTX1nI&Lb4syVHY@~G=M1To41yF z!Lu)XJ$>rJ;=}jeCItg8Z%tiW4iTmAM5C#FeycqVK-w5j*%(b?CLF;}e7<6F`qCy3kH8edThFc)m8hmDuBg-IydlH>I9XBS6qqHRE@jDu@M&I7tR z^_0yxRG;Tse->@LxqGgqZ_eaI`)ZT3#gu7cCtGiIo&kpT9-Ww8aP*`1+Jm1IX$pK; zId&F%h=V|sta`ivQnlUzsQaIJF?#Ay@NArsv(1OSBqAAzPClJKcQTqSlkyM$PU{a0 zdSda>`|tJH<*6S(b%LYG2`CGI6!j@1^j09z%If6)lmFlUyNKTR0$sM}1Sv=--Pe3_ zJOT!vN$*7u>|wwn9bd}75;y3NZfni(!R>)e+W$xY-~T!g`b6W;Vc0&qb0-?;8o=kg znp+MVdp=TK{Q9lGnV>FXrX-Gx#B)Z+O?G#kc?9~R$0sZO@>Dwer)AsXT^!e5987o$ zy^2gRXEN*DrxrdtbeYvrE%Cl*2Pt19&;Upe?7CD$I~($gc-3UgJh80hYz^&Adh*!< z2-d$CziCa!`5Oi@WNUWcjTQDVKEJee_JmQa ziQlj*l7OQ_nU`gDz}Cs;GXI_@$l=q|r$>hzusfT|`iyfn87R0<022N5sr1^<+s{|7 zT%0T_QN3I3v1ne9k1{44LYLT0tvmaN+`JvX9UhflV&AYgR(9{$dhvYOYcj+~7c|fD z8oI4M&lwBt7aZ?)cBK35G2s5E@$VytkK~Xjqquoe%2m0e*Ixhb?BkQCJ}H~48t~1r zXV!`Kdpc|MwlSR<$k=pO4?6jgyui!pJ<2x8mcE<(#CNPYzIvuL6PzOJ3Ye~cC)}ptgLQ_pmh;FS1d!izpnG(wL@NH~&{+DBgVG%|X?(U3 zo-_SvK9|qvVAtpp-QDXfu$y0W*yQJBFlGPPbeuP^Fz?}3^hoyKo!bSIb}bntgXc}GFHyf4D9Wcu5qWR^FwRID|CUPiV9MwPT$Q=51zS?>U3>!6eq zfP71iBl6T9RyW_=bD+}G1RLPl?B_X1w!aS*hyho+Tz~1Z_9#q`{-h0&+8-bNaOCO! z&JtjIkahGzPD9z|a0@gnTVEg=0Da>|+3)F7Ev&LyoYkKfRHXbbN2jQ5`qa5kPlr2P znUYNSkYJ1ZtucBdH_V5^{9Gy3rT9EL{?@>faOW@7khwa3pcS(iyC? zY@!`ITZ{GwhCZhZB3NldW2n{RrLk^*0T*KIp4@#i z8sGb%4w)1N<*WxeleeVv!j9|H}2HtoC4g|+K7ZMRrX z37)Rtz@9bV^lKP*AH)|75HmKO0SzDbU4H=>yrbPP*ZE32GcWV|t4A*HU6zQcPd(Rz zDdBEd&QW7*(M*Kr&BS)iME-VF7q$8i7k~1z_ZR<{fA-4^tGw6=Jt=d`hc1oGI|+8t zB3ojpz*dnS=H0<4Qq}eqsX>W8A43$KvOfiQSH$;4%wubZUv17PxLSI}tqcz1nM3_z zWCIrHSsRJB>rxEw!j<#0ZVpwTWT``iVT;%sZ(#%t47-!!&-?ae2TQ6MYt*7d2;rx8 z?+jSUyL&tEzRlL5v6C9IqEZA`@_3Ps`f)el@j?bRLifSVfX?xZ#T2A^AZ(k3=k>_l z{fljVwsUtW_$lF1`YDGT2f!vZXw8|(Q4C#VAy40cT+3-5=71sV>oB8hw-%Q~(L_`0 zd7-0o5fQ6P^Ux0wlr=ewrdxjVB1ka@6u0hNF?^1hXNZDtN zfkN?Rd;CmwE`NIT+~VWgDH92XOJ@VS6OQVQQgYOif9ZwqHQxKBSbZ3yzL9|5my;kN zn()HIq{cF;%}p@N(C?<0*kAwbuRG7@;f#MhkkjW_}1X zg7s?YJCtsRUzks|D1qc6ex7XYfRCF>BNnZu?zXhskB)vE!+b6wTTQs=opXoL^yPyT z#sns?X$<)~V(!tD^}Sux>a9G?qE{UBv^7PWakeGnQGL`W6G*(glmXxlL8K<2^_?`o z+9E)fdpl=KZ35B^_?aU|W*GBn?VXRE2`nQhGUcy_JY4lN3ZIUn6nIfV6 zFEgGUzUq1AWoWx-@~L_5>iCHE(aFk`Fv_dGyL-}fTURoM>_RhJ8oOHe*HVnkr?GTt z{eMsm#)~O&MHIG{%BnC}4;?r(N*7R}%cdWjQ&_fcJs9t==4;>#ut0Q%L)fg(lNtZd zz4YSZlTWH0cnAA z(7gN1!Kxg;^d$hh!-zQ;0KoVW*=}EG$7>ldFdZ5?o_@xBfC;;I>{-12jc<$tZ}BKz zUmEnC2RY=+miXx7ql?o;cX8khPtn_&v40Ni-@iEY^x;6j!=)sjU%c_lpUuABcVOS* z-}(FhP6u?pxp?Qp?1k@s z{gZpp4x^o$x27O^7U61huWIk zx23z*uh+i)-B~9_p1A@Rd=>z>DsX1U{-;~{cu4=Ecje`@r7u7;hPnBP&`~znsPJb`-#AogZmGZo_%>4@z@?X)@SDX@S%OtUXk#pP879PP(*8@ zeBt#n&;%-M3p8f5GVJa3(VODkqV?!efEoZ89ZYb;=0Jfq;qUlX47=LDmpWi>oJ;t2 z{jkT@_I>;WLL5`I7b>px4Mdue#;+USWcYHrv&TLZ`*hLnqJSJzr}E*Sza=M)j7 z;5yFVB{;|yDN{C3;C8`69DEy-4JSYOIJ#XG!01rYbl=v63=zS)W&7?ja1Td2(Q5O< zc9J=vZt<0?7sqB3A&u_M(-VzT2FqtfZ#&O{-mxLwK#+=g)+Rnp2UDUzfDD@EAb9`X zUk`l&;qDfZDEe?`0MM(ier=%H(G$Y!7zI@7+|>}o}e{+7M+i=X_<#hY)xljGv4#!*m5dIpgF zTqUl6l>>}NU+iCW zMph4KSHG;Kd)bxOb5N3f^dn^;0osDdoDJiQBD6~fqq{qBdtU)JD_bjvt9H|@hvv?l zE<+`qt;Y|IqX+UsP!WD&F4qctz{MIVJx$YjIE=^(O5>@70X;m^uOxL^Hk^KIT_ zky}UfNgw^9vvfx1uMKj=I-Nd!viaT79t<#TUeP#%z}~ROwyjl;#X+Roa)|sY;I_K+ z>6PS)%=@}F*~)Q(+E|_?hKQY0#wM~BfL`?HbL9&4&wG=Nf4L6$=S%}7nY`$D=L5rU zS8n68_7WQDqrubm>^-pf>Cb;Ua$v{Kz1_EKoRKp0K0SMCK+OGJL&q4fy}9Lt;2?3v zf^u=IS~EO_yoi8iE%eKo0Ww3C8$p}r&YbG`;X$tLd*zMn&9A@m+Cv4|IWL zaANe`GAV_I&nxLV&i(j}gVBBIx>f5oXRAILjgh0W#?%{+=La}1F}2nzx&TZ&a1#xh zOA*T30#IN7b~Zr{S#x49%W~_IvGLVB$K#aE+OTnZGHS;-R?Oj7_U$-3n}bd97-axg zU9Hqs^UUURi0$p0=`7heDj{$%dM;e{Y$dgng5ul+s5^&{vv2_UbZSnpCV%+lELHRg zdjOu_PVW!^xNqNq0k+B1eFZY8Za~hl9lCYKNd|4-xu<=yIr=HsG~V>JS6`n)xIb^r zPn|pwMpzbpXGfS*4kq~Hr7$b@8HcP=UT9Ee@TK10#fx#Df^3IpCX+nSc?`;k(HXB~ zV*$u*6mx^`uV0~BM!~(w$6N6LJW60KXI9-^tluSwpa7uO)tZw7qS-lHJV$9S^td8D z5q^N!$x&qDE66;ThNKV z#=X-prP8`~Ui})N-(`KRrxtyAtJZs~A2CuN&eQsv!Qk&D1oODH1~%Y``{xBLA!c0L zYi{YE+FJJ71D9iH1(wg9CwA#fovTtdrW2l_f&T6Jw~JhP^Gr2TZvZ*e+mSvZ3Z@9n zl-%f`2#q#f^_8aHb<9Jzc30#8p=m9*6`eu2z4^{t14>@cX`n=q z4Wc~6#!{`-Be!t~Zc#zT#fVzcuf+pMWdL@KzAMAQ1NCtVX%~%fr{qyu0fHi@ZkC3< z1Unvs4-vtN*44(HvNF-}>lsXk4;D39P18^Fr1CoQ_+dPZJ`a2NFr_#nNbSsj{?YBl zmjC{zi~r5P_jku*o&;!qmUD!*mh|xtO-;j_;UvsSzQ+SO7rn^%T7 z0|;G3V$(5^Llj3*qf0@R5>qW<`yeu2Jp1hPLo+v;Pu_RY)*>V>Hs2!Blxe`s&z?Rz z*QqNKPZq)W84YT1_s$eOX4+=BpT@Ozg?3WT>f_F$UW@TcDJUv3#cW^m^7O%k zRrB#-5jG<8lo`PMd7F8A8MVM&AfBk)TY=f<^K3i)d;k7Go$eQLB--k;#&QS9*_=55 zMC6__4WcQA4tswkp{8`iM26OPeH1OT^mgabjc-t)8K@i$A_2x}6^&eJUtDjm5jvLw zNu<&4*s(k3*|o)nq4!k@+$Trz=B2;hSkx63O~kO2rg{Eihe{<(5AT0wfO}CWfRjnR z59mR+1A;e~OFHdM_fnt$jc5}H%8NY4Vb5VC49NYM=12Gix>=xy31j!#vuo$G=(zhS zLE7=MhXIWg9iI(NcP|b;ijf`E{l*=3H*=~!+UnIAwDISuOC6uFKOdHXqu;iXeP!`S z5+IKLp7+2(Bk8jNMY~9OgRwXsUo(Jt?~Q8+%k}*LNUa^ip_yp?suvu8_Oinn50vU9 z$m;&~bbJ^Om7)#Ip{RfPFd#DiW>YKMCA!#HDRF4pL9-$NfUKez(E-H-NJNf`X1EdW zSi7dokiZZgRLNPt>K;Uydyilbknhel-c4z~k`nJU zc71oKvWV_Ub#5jo`XE;BO0h(fV>x{i*`mQ84_O7G+Eqa&vT8>fvVjVUEn~q-y9J1 zoCl)(7!h+&XWy-e{>Mqx`s>%UJrC1&pl{z>%v#h}d(7fK5l{L&rJPaZT#<>WYftDi zIy6pWH}6ZGXeoO%?a&}SBli-5DYUYJdUkCKZpaTm>vZmHr3W&)(Ji3Ebdc+HbAB|v z+&kUNQ0YHL7WzGU?4!mT&2;bXz^u#ZE(OzCgaPJBM|sWgdXplj4?cKnz`MOqA6b0=cYiNKINIL5D}#N@;@PjglFnE9>WQ)=+B4?^9zQ;M zbZY4f%RQK zm!+jD;So?^{q+UlGRAZJShU{mT{}ltyB*Md{?e7o@Rgn)O{eSaO&joaZ0*ZnIz8eTl7zIf{f`uWeotUK(wFytDlUVJ(pwT(4oVvdo-J2_f&k2 z!xUdWAUeHtdD%*wr1Z|c2lhp?Mb##&f&BOHE*-Zbo}Y@B4tO*2zBK7u*W*={PHax> z&C!|CzeP;#jF*bi-%%vn3opJnhA`t8sCE34W9h$F7SA3|FOBYQru(D4Z4CEl=JM%N zvtOT#UPWH*j3z|%?n-W)O8#F?PutVy#vrd$x+6X&af6fT& zqg;ct1q3pg3*Z32Vs(Kr`1bzh9|(1+Gc&$Q=_gy1FOiD<)V`;49PA$;&pNU_%qJSy z^sw5-MYGZ+1!LR@AltR?$n32Fi(0oU$&2kBIJYJlx+0q1y|?oma?A-h5U~t=l|oL3 zJ5Xe^jK$+0zEfa9a~Ex{TU}IqYkfWt{&wZfW-f1wZY~h0HN0@W@)O;&rnTDFST`hZ zP9Fa#+oZ_f=AFEkp+VOp(>DYb()W~6p%2odH*F3CPM?%6Pq+LsU4b5|w1KjbY#4RE zIq08x{<(21d>WtrQU-E7$b5=e#gFj)UAuNI-hK0@@r9GKPsrV8UwC=(-S2+C=xe}o zz*Wy(9iZRki`C8DvfM`wtK=9bnlyWI$-em_fbM1hy=dcSo_lH5?PAW)twp5b6Yc>b z0e0{=&iSnYH9Ok3^s@n&I{FLe#1$j1~eI?_O%iibu`fSM6}*~w%74EAnC=<$l@F(L+u3skN_8L$?^ge16rpn zOmsr8+SIdUs4V$=d%foiRJj^&ud=nJoEw2d+VJymplwDY_Koz<7$Jq&mZhwMx^4af{w^Fjw6`^X9A}8KDoa@gj>UB z1P965IDh#}eR`PP{ABw}cD7P}1?JQzKpvU0bNezA3ScV;XLEAL`5%|t*MM`GHP)Kk z768E+@_FF6^Ctf3AODlZp1{@DUirr8kLE!=<+C|9_6T%pkCSmG#u-$Xuf#XPONQTO z*U<~`*R5Ng4miG&T*#r=ezgWN$jvF82i{^mI1cD9ve-CV>>~%znkXj8ib^U1;jZ4u z;oV+xeu8#61|}OLo^j<;<)A9_VeQwoF68__`qMwFuMZYa9e#ewvutm_|MW+HRuNltP*tpFh;F4V}sBi7g)f1pnQ>}T~13=@hw}(9kaB42u+RB>I|H&+lB$+S*ZSddM zU;lP`^pg|)y}OCA_my5EpQkiXHWS(P*00`}V1%7vJZqZg$*yQ_R~5`)OrOV_I9fSV z>A>_?Ag?Iqaa5K$#=%05x88V`wQF#5zXP4Al-o9UeqDBqy~Bg~!A0tc2OIhh07gqwP1^Jwd|H3}5#=Gz(I=6s@ zMDpkl`e!cCDOm)w!cXxWfb_<60s%R2G7fw}F?6byb%6FSn;-X1`JmQRMv@=)J$d`E zwL|k+^Fv!3H>7LDn>pa=rUT^nEc^boS6>^w!@=Hy;bgu6!r2~z9B;PfbX3^dhV@~7 z$tF&)waq7xTvWKd%a+E6(TRT0J;zqFcWaAtm4kUhHYwYhLs!723IbzmV{I-{gDfH1FZwV3<}g!8l{;wi$Du_>7VGM9(oc z&!6$vpGN~)JRX<+^_pmQ?PzcM+%KPJOikareR*m3&kdB}>C*IN(&l|$8<>UPUapxG zwKne0-urOz<9AL}wnJoco3#dE!oKHe*|#UfuUkJYy2YVZgpR}2c#9F9L#ZxgBmv9z z6h$D4lu|S4*jeD00y8Pld)izSAhWe{jO!wOV@x8`ZPq4Yyu!vx(d5aKx@{A=#tW#Y zUrJ99Dx##U(CR{{CK5VfA<}nS(d$3_-QQXK<3Ik>jE&F63l5N>NHaRFr0@_XQo#wv z_W2O#cU~=8wmEqJ@!@X-DkcJK7rnEw)1^hK>@Mw9ZE^sT8gLs+PX?~6Zj;o#SrmtE z+50O3rA2j1)4!I|t6sE2FE%!|l3>QGbiQaE0uqU#3+-->L9cA%Nz2k?Uo_sBdTa40 zLBi<5(Dh4t7I1R)Drv$+9rQdkB+&*YHm@2PP$F=kHz}8}m*%%Wch>*!eDl@C&)@iU z%m^5!W?+Y&)+c>XFWTmH$}uDG{+BBkfBbGW4*%>Yi$Clz&;5sP5FaG9C z0{T(qc`|TsUN3?)VMMj{la$8qe)HP{0CNt_p>EMDu#-1g76N8Zfb)t!%<$4jyzTydAQ~VMl+|3O%&G&g z{uCq0K{X;Fcx+`%pusOvD*xtJzYK^k^{%z#Oxn9AVh-+>Z=ZaLURs}$U7cQiY&c;zrEZTR+piX znLC1Ql*W`Mr+ouC9<(<4Zmjx)CN@WpD+6xOBx4l6QLp{;_5?*iZL$#v%`;$2l-;>X z(b#Vr>lbI1(oF!hHPsZmRHWc2p3MzI7JV}s)e#r%!AQrePM$iqc>Bh&l<%U$im0XN zT)bG>C=n#01scT*M~oxjDpWna!*ILj~!9?W9_Z#c_N^`6k*udM1E1K z#@W?e4NcZ2zR>?8=tUKnfBy_1S>G7@D_1R*zooH2uC*^(H*1hOo)BGfAZ*#de5NHnH)xG|sP!PTkJ12nBVStPQe8>0mtgi9iF4TWdaiEloIIH?1@I4;- zc+A5^`;HOg>SZ`q+566 z>qR>0&sWZAM(Pvwi=n0L)7M^jX^fQdF*X7=g3@4wSnRs@7TU;m{JmVuRH zb>qhNYJmFfXi@}|{WlJm#%*4cinAUfRm?X94Ug)>=@+@@JxcbUJt>=|)#-W5^9O#T zceSEJKW=Hw8Qe=y-fE-eXlvw)%MbxTIYBp*`T5=A46kO-7C9jv^Xe&_(DUg# zplpMEJrC=*e$1Y0ZOBSvR4&BUaNy9)Qf)241`c`78__qqqQmI`0Y~@l?Hgy!)j)Ck z(|lb@8TKf13Np{G*T3lWr~cmvS%bLb|kh65WF5-b{x5?Z5N47vK8U_hvoz9z47_98k11 zT}55#bEi)?Yk?N;f4F%6=f4c_*&aQWrk>unU_kxa-)^7nOSdS3<_Tj@_K8}` zAln`gc(wCmPUNKLF#Y)BkJ9ss)T)oG)^=@1>!jE>i+67Z>}8y+E;{EvIodcG9{^q% zCwQQeCbFKCz?wAIN5BfvpOuekpY5y0>05r8c_ zuo%ewAimF#2I@Opnxg>F{;+iff+2D8l|kxy*<2gr59FLaqC0JIr~wG@C_p0G0E*xn z8N)CvM70B@izw_NWC+MvYq&c_ltq(apwobH5J^^$^S2=(YD0!vv zK6og-n*IThS{`&3V~F-2PRh6E`i|p-e6~+HCIuNW9`V5YJf^0Ts_3 zIkNcYfAP=L8J7+?l5R&p(euw98BqW2XxSWY3aoOFok%(M$CP!nRt(bi^^mn|m1oo(&LF5|I;j$Bw7sZw2uLsL@G2yK*h?b6LMtITBIwGJiyXZVANU zWXx@s?z1_)DCb2^gC=|c1%|eR?REy(K6Rj4(t-K@woVTMn9Ir2fmzY zes|6OyOAD*H;CRQ3q`O2g3Rq2e55%cYdAzi9ZK-5NA z6!rb3SHC?x=uYxSFbBu@_U7@6_MUP$$}2hGUMm0+*(09^`qKN&6P@-}0Ybm}#gCJp zrza|15YO5NIhrCQ@fVka$R{qnqN{3^PSDr?5${(lU8;udB%~?UT2rBUl)Lzo$eg9)nNz%&Nzt# z->?DgHNd>c>yb~%Tl{WIz^PI}qm$XIt^2y@O#62OKDVtYVmO|nmiERCIey|>!0X{b zTbBE*&DT04BLh&M+_!J>WSRbZo;n1fu9ozfaUzTZt$D$-(a7d#ivtK(@)obClkHg~sjP;sp5uZ$=B3y6=d(HE!N^PPFY>2*60-?dZTgznul@NCZ8 zFUp+e$l&NyLe;^}oGog9bG8h2QKPa=ISy)m@Bj`mC1&kUI{%bCi=I4(>@yCfj`kfW z)1mzhpw`E?-}!a?rYyC12_5bJ{mz7lA8kpF$UekppNNkM_PF1?D_LxP>%-VkDl1dbV@XgE(Ai+rRc?YfHphR%AK{+@mO#QDXazLR&ZzZuOKx(mkx+QI?{&s|swL}RA# z?X0@S^Xnyu)nJ+E69}K!6A-)C?4Bw>E%fBH9`cAVuntjmn@O#%PYZ z-;9W)#jLI;Xhdl-3V<@pIg(zbjf|-x32Z!RYCQPrt{pvbY4OU7&n{m1+DnVKKlo&E z`TDuMt_tL<9lDHynJF;E^$(EkKu@I^4(?Oa@M_Gr2!<3Gn;AfJz6g?yfhv@5ULiFk ztq~)I&^0as&U*@hH6S#W^Tw03vNXH*Q$n7smev>JmHge`{;eV!i#Cb2T+c{kFavou zry%g^i=_1#(0I9eG1uM^k_^WsJ(-IoeJihV+r9}5;iY`Bdg_+~(yx8Kj3|GQp*zt> zLsN~vwy&f(s-OJoYp(?2pN}zjf_egrBI8<$5zrWe;N5-{uu;(Y;=lQuvx|36U0D1_ ze|M3$@YzbQ*i?j@iCuz%%d8E|sT8*a;JWoO^2YfkM$)}I(7ta(u*JO>byj=JhLH1-(Mb?Q! zLzISyB?{irW0h&>0E$g1o`kCCG4pZwz~PwX^~Kj;tj23XT7<^XZR1iaj8j5(!@+|G z?7G%+3ETnLHm9`h&SO0WW^IXVXH4;uh2?xST!5o8X-+7t3^y-*0OkPODK{S-JyzR+?eU76guLe&c|p!m&Rsttx^z39Gx zx!$=So#HYh4eAo2TFPGz)UL&NyI2_t`_lhD@Bev`&G^3vU)ev(Z!HbHctQ%lyQJ0a z-nn)0^p@PJ|XP`i3ZroG#=BnfBS0&oj1);Awv<^{{pO=EbIJ z70WOZS;=|%Y|(g}B;?(z5bBU)KHSTk6fL!b~ji${#i8A<}iXs@T&mTVX))W(e_{RVI`$PqhmK$`xt z&r8~31T%Kbjc4K;Uma$qzNO}Y2}9dGhf+JGA>#KpGoI}8nb-KccBkx0V|MuU_$o$3} z%C&2Iiu%3niy!>%50htKB%{uaUi0c}uXWDGnZ@t_8-IJTDd2O}*8PjSMX%i}n`~vy z_v;R*URPRbQN)j$ul+B5Z}H^w@$`&XLswWy- z85k!&Io`S~gEW>=N$Mnbu$y^kF73w#P}=vN)}V3->0Z)<4+d1IgRZ2})}pw?#hAY@ zd$z;Y)vX3DUpRjyTDg~jcrJkUaM4$lamfe<`n~by2kDDv1G3T`qZc|H-Hwx*4rH&5 zqx2EbG#L?%t!R@P?$6K66!)ti6%>BB>lFTC- zgVnrGuK?!b4I>BYzvwLI`Y>>TTIk4kOM`VOv*|=fKmIU2xPI|;8F;cg?OQTXU!<-B zcGWc96zIS}#tW>)7xfv<0?Fy|9M<;&OO+s5@(j8#N5j&&fR;Jd?>7IZ&YWDl{OYUq zDOzZafRw;rr2*dh)i1_qRyswS&NUF}cIwR8#{S&2NjDw`Lk@sXKA=fFLu_xhp^V@n01QeW0@fzHg`2pWA^VMMlP#Io+-nY_KW) z?n|~pXV}<`=Eqzyq_18T7!zO|{hc`R$?&3;m0h}Y3se7R`$7-DniY@MRilY{5a zof@8_1~56s9wQUX>!WyWP~YND|KuMB7G7=# zJ+b(E|KNYN_}w4=aB=0#(YgM5jtutVs`w+`qA%NkV)0hM?3WJ&gZ0>Ww`hIyW)C`h zXU|gympXUr4nTECqGtmx*ToA&eX8+oQKv02v=xDViYpiT6hee-lADsJCE%B{6 ziNhm$&Q*X|*9iQ;2lN-w$r|B^r4n%7tlA8G`zkx2tik5vi}>%_wb>{EylfMDnau>F zSkg>QHI8x4wfCGua_Q=&=DM+UaHDJBB%)6@#e48Rgw75fxzj?m%JVn6bXc}BLPLx7 zQM5ANi${HO^7zQI#pN)8_Ws&<)Vg&J*{tMcbV6%j5AMyOtQPgQ-8;*0cxrL;a)BTQ zX$~6u3opDhYwGz^+NsY3r2wY|kXQp{X)ac>79Vi;pBeW!Q)m9$N~{cAfG@`lH+!+wbPhWMQ=TQULLHzWu$%QmLi> zzLA{7H&zx5LMOV+cF>P#m-A9K3lQ>1;}T%P87;u_%V<;(r{I>`0i^U>nPHss0?V{{ zwtmuG$Y!P3n#AnxvKToAYGXqIG8{U1*Z`yL*(*iv)7$Z6>uDbG$<5pLB`>qFn+olW zZk@bSf=P*~9i2C^yDYe?Wx_v@(NiD+9wK{cZ5aupL$}9{9es}`RUh}%HwTnfs`31( zu$6-Jnmf-$E9Sy}GA`H%T^0~a7eYfKq?Hr&vn34A+|PMz&7Bg6+qUm&FW+vz?o6-o ztd)xo-hC_3JG`SoT#m1@Y4#m_n(7_RbY@ySRJO2#&1Ps~rrF=Vte}mu1@DyjfItWi;6*Y$H+V z*3U$9!r&zjTSM(r-K~j1_)VtZdz@5ignQPq&Uz8-*>r2_#O58XbUjxg*&EGC-E_~g;%g1%yven1;J|=|u z(DQuhpSi~`V`5y5IsBXPO1T{KCX5g>rp((c0M-BDM?YPB=Nn&}^}6_3Y4EkRDTRxNHVXy8JSop`Jdm8B z|Jn96{o*{?fNpZsvfj&}gj(4Duy5(a;WK2k-zz}T&*H}}an2u8X0PhTZ zLg(QAr((3#hpem5A3m`7#T(U`s(+Ov-cC~ zTQb_$1q_r{d%yb`-l@rpZ)9izoUMnHNa>9`W6%_RK*LojWHBJdql2^5U;gOm(J=%! zCj3Ns-A*um@7v!Qz-mjtfVAr?SI)GqIeemLG;BRE>HYimSG#;q%4P~g3dqz#H7C&^ zI=q;n%yRZ0|EQZLy- ztLRnK&;2_o(Fq}S!K?`etc=j+O7`sCyK8aa;K4b;`_?e>u4^5(HV*{p<;&*=AsXPbrGfK+sovZ^_(cZ6c+5$ z9)9V?mvVf3p3!}740+DDr2^ge@g>)79{@J5dP*vVX7)TluJ%?HWq^k&H;&2s;F`5mm22y0{Y`< z0Gd#WDY6V!wd>YxD7`Ye;0QEcih5lf$J(YcZMFr1ts$FM6q(R}z-Pb~@Kq@UhM;T{ zDdM5QLvN?goJn?WimyJs_~G}zS7c7Lkh|yfC&!W}YZm|QKm2bNKki^gV86*{(BP3A z5h%b~1Bhf?apsVTJ|81>)+1hTVfqm*VPeT!yc6&&N(sPfUC{-Dd+OmepOaQv`(D<0 z)~EM01<~(h^;dd>b(db-;D+DWquQj8Eo)w9QbwzP^)LS-nE)g!QYv}1DMkIW3{GI% z&{FsK-6cH-eBrtM?lZa`1%19@ENMSrmch?yjE9riN(e;3?fEiuI5nf~wdoRgoAKjU z4R!Pa`@p=JA0tHrvv+zNnKCrqo{`mL=IY%%f56>@v+?6stLg{i^*8NAv)aOY&6_pz z`-$ku`3d$hJqlmL6YWvajUsLAeZV&7+nf#1ea7bH^X1-M%339WDChdtW%bwIi8jo? zan(P%z^#lp5vK!CXQ(TWf@e&$OVFm%o?HBv|K)!%&MapK;7_-11zNSQL~F`g zI`ZtZiy!{p?=Svu|M`Db*_IC$|MCC%KT9X5Uh)^RV{&lBgMbm2FDJ*1E17%nrHtte z&pXM8GoQRy=2C}21Bhz>i4{3PWv$RBGm2{sOoD$QPVlzgoIu5)LM8Q`Q; z!L!e;TfFz-2i37YnVjENbQ2?g!{XS969LO*c4Y{vJ&R5mbpTODls#iVSYI=b9{Qku z+6PN;PUf8`Yu8yMbYQLeWA3+i=wx4(`Lnb6m1)Aj!_Sw-B7?2k$sZmq=piRqH%q}+ zR$>XbE)S=oe_RQ4-WDCH>3Jo-hnFwULCLAu-rijVnsa5yQ2@yom71adH~?{?=vFqq zA|dfub&}^SlEy@r(C3dnKDKzd%oVah-tUtyur?0HNt(7X^3~@jso~GS< zJ#+n5^u3}8!eeDBoQc;4M70;_2a6Te+)OWPh8E8p*w+*dBM4a8o1Rr>Tk|vqcVL)j zzV^DaWR?I7e(~YkZ!G@&{rB2SIns;De*Kkij1IsdX=zRu$ifx?&~bKHLs6{^Yx0Wh z-nETWWo7&7M(Yx=m5z4v=9&*gZ&YnHr_;SRw z2fy{L?@ngYvc7DfZ=fqMUQ{(BS_C5+k^!I|vFP5l?L+11fJ}67Jmh-7$d+V}C`{4B zJ9qC-zYY9|F81#`7%f#kq^L|0z)u}|wvu{JlpbFhoBGbE2Jjv1Iy@V{jC$L1ov#s4 zbV~-t)6Wl0ugpQsS&H`P1FPF_&X}S9y!7gKTJM4p=)nQ)z`;%FyRx0eCJa3G8a*`L ztL{5qJkUhhol3PFs&DkQgNI*S?A`x#(c~x6f#ZMpYJ6yOAn9UdXV2u&5aDi*d>PXc z9lg}e+cV7v-S1IrN59k0m96Qz`ol)uwg2GQo3cdc=Yn4B4doZe>^srhU;X7Dmw^*d z-`s!exBqbQ2mjXpWO3u->DF09dZY*c%+Fg4zyb z2ROi8(3vu*=p3L5Bp0#F!Hg~hIZ;joCa@}%527>eF55!{G@VZ=O+g_qzwlz?%rO#O z0+8>w)=Fjp{l{6)R%|cQ@11}1Nsb-a=E_3IwsXKET@(G{*Bs)2GwbTdI&Nv7z)JLu zL((@9- zn}203WI)SC=y*?`J-Jp26f&2@FL)1}Po{hS;0 zb^z-2`bF<#d$LiU+E+QZ`1JV#+RV2Sy#UZ_pNZzqVG?g}7%L!NFe00BPg$V13MS)_ z$KzlGw+jT24bb8T;1(5L)(+hj82m;032hXww)Zv^0WGQ@ILqF)yX=9L50WL7UfH=T zJG+N#mtB3m{iviT`7DC`PFNf{=ej-nD(4e_(bt&A?Ei6Gt_q)MeQpHei+CrGA2bF| zDDA+BWKH4`>&eOJ-l3VW48g@OJokLp;mZPGE92BNWF4{{hiA5K=JP_1FlVY=Z9QaS z0p!WRx8C|?&YAF(XaRt~a^;@Jl#W}2XnSq*<3(;eM~4nf@8+npXLhuIloGt&9B~+2 zxGeZC$4H;KcFLTzC+2L0=!H{&9)TyVN`Aqc=-6NB?3^0fXD_c=R|Zmg$+>L(>+Kl< zU;+ST>^c8M0FA&Whw*xGuBs++zDg2gJBO>idgSnt;a?|Co|v^VU)BW=9-FIGmw7b- zZ3V=@Eq7PW4v*)2GB0pb>mNNn2GROb<1SwQW*9CRFu{l+EdS}v|MRR5_ze8mHa$i65s;@ECwDft-^mF>U zY-tRp<&)-q2H-U4_Q&mOh64Dnzgs@TC7w?4)!+HlqeqDK(R9K0kNfnS>s_;)H`mPg zA78ud_I%Pc-Pd(Mp+>Oy`LWLyM^6`N+TRn++4lsT&E#4P-)~W+2uzCH6Kw{@GG@*w z(&MiT3WAHbXbk=Cd-y<&a?eiKq)afd7J;)nc5YjoCNjmAyE)@5vFMM~@y$P&!O;+hSF<%%w1%&Va>ywMoIg-CT-@ zWCTgsFh7{c(7X37xpfCLG1#QhD@(-SG3x-iTNx54V@=Vt4qdtvEEh6^AtbxMQ|(6pi~0^ec|GjIg#Ctdg}jA z)qnk5o~?;}->#fPnr?>662pGvW+~LrRuJktQHnHemb8Fl<29|6yO)H-=%_ z@P%PRAYeh3I2;T|;snmjnUizpP&stv>dLvw-_NtVhVbrw>wWj$zu)?;^{gkSRYrn? z5-3(DPMoRjNQdAwNB8I-{U80|KNv>#>9J21fA9zYFs7Viy*)8c5zkTH+TXoB*WTeb z%J3LEPnx$Z3K_+nKT=7)2>v-54B=c;};N5v{J=Tv=;l$P(cYmT~@YjF3vkGa&TPzh3F?>|xq>0(A4Ws{SKA<{x;e=c8>iTh{-UmKO8wW$*SQ)75hm>EPAQyEQ! z#G{8B${4s+UQb4AILLrRr#CY;DTNdlEm8<_S>K$K96St24v%f&;JOseTX&bj5r5fP z2H(2IzMgZ30k^)NLx(b)%I-RIwoILXcwqnjo|v%d@2!pTO2G^Q>PqxHhlREm-s??D zxfZ1cXFbsGlo@mUM1A8^%(e+Mg&Q|Ie3j!&rU6=!X-H{-rh3fn%J8$f5J3-rV@xl1P?w)TYBd3=!8cFyJdETHER|{R~rYznApt9VI6~yLI`~ zA9&gDpXTc>iWj8}evl>O=;`-JlIUgVEfO3V8V?aXlEu9>V*>q9YV|&xgIeO*aBuLU zy@cP}<125xc5w0j2Ok&wxVE_d^PeqY5M1pmUNkERZh|#%*?Lep1$Kt;iGBgW1hHok&IGr50rcD3J<*O+*Wm!=6QrPyy z2dtaZXnlJ+efu_Ut2M*^aZXUIx9@l{T*@)iTm+h|OG%|_k3(fI3R1~%y&GR7tJXzl zJInsr7B242=wP(SWb#a{yi^^bXMFR>p#vRSQyG1<2=@f5HnhM0{Qv%cU7TtijF?bo z;NP#2Yxv4I#Mj`_AT$uHYASf8V43J-D+fJ1U^16LlH9THZUO6XgR=;|+kc-uEtod? zM}I$mQPjX_dDwJW%hBHRuLTwLK z@a5%u`iN||=FnQ78=OEgpEt&mu9mQQoXjpEv3bta$FQ&`OT7&J8NZcOQH9sheK_DK z{^%)SM)UE{=0EK;x<-ZxJ>lh?U5wBv>qH~{w=D6WnJ#&HFrs$}sDnEm<$0*q%O1~9 ze}h#E?b$suSw&tg0~~aMf(bbQtQmb*SdwF_wpF*&%`C^~&D){IG8;JwWUT(}5B@5= zToWywUHqH>_P_d~SARVrGlo~SU+=#EeonBxivx$>%J~>xhx7L`svpHuUn)!V$kEqx z^j=t8I{oS5$3Ogwg7XF`uGD^E&oO@e^HN_HKMcruL2aj%pkZ{mFKUr|eI*37p;T9ymn@{NT6m8sIAj^{ED-+AXZ;qKB&{d>Rn_j8P#DiH9|;G`;`7t8LvdF<1X zFYBu`TX#F#qIqz_;1L3tXy;jcV^cV`B3((lg4c5@(m}`?vVG$VyGM`PaG$PR+n)H( zn)C-eT&5=4QEk96%ot^hT<4%2;trj9ro-vj8!8p(9uCpv+#4EiF4j-Kapna6^I|rO ze(eJF959~ibLTRF#lX2eTUT&r?WP>fJF=_NF{H0-N@s#M)hjGkg*TiH4jV(KY@!D_ zaP*nQ*EfX&o_(uzyn5*6D%A=gMU1ZPWlul zaLRadOP_6QP5_5N!uL~^^zYq!AY2hB#NX3bf;|R7FXaePEvVOTu8VlsGI7H9=y!MU z|NKTllkh?7F?{RL>t6~_wl4nSzyFs5GxX&^Px4-1%v=Sp$;>!yxXvllp##yJ{epk0 zH`oqi;yi8^Sby0C!51Smxox!%fn+j-nwjy8~3gZ9jOeq2Up8v7BnLI zKF?`2ZC=Quc<5xJhkKiIs+hy|%jY_5@7K|B)dd|k%gML1Y<>p_e(CKmwZDsaQGMid z82TH}o@9ymI2*T_#MeqDY99nR=!JsVoUd9l{P55JsDoK^cDG}$XN&kfbc^2vLw2b^ z%Cbu5Zn$0IPrBYXrIORb+tR(!zAN^#x zp1JI~Y*&}(biBOpz~Xzq`}apaYVE-}yOfRb=wME-mQ2HuD~W7<-R60Uq`x{|fDkBU^=}&)E%vBzW8CcPU43m+tst zEoSM=Y^Q5wM!#IZTPC%t!%gX_kxLH0+P$P>YxAvpuF)f&3h_Eo^&!PzP{*rT5(%MdM`(f&ek_u!#Jk#}RnPtm#RW;PGHyOPYc z_k2b(=hg(KKmFlfEq>?w|6rV`9HRmVs#Vy(Yucl?-;AHcCvP{;Nt_IKL-Oc^lQR4M z{Mqcw<>8R__1u}0W6!-bXKK{4Dtki9Q2M+UH)o?^L0fd9A{JlKicQN=0bjCm-@bjr zU(LfIm&Z?>7zZw0fF5To4t4i_9^51xkU2NA6^C#2ys0v%!i_Dh0)6z_&QK| zQ>EWMs+EJ*nxVzWqIL9Sx@7OyP4dH&v0Gj}czAdldqmqqt#QmxOPsA$4y}z>d4a!q zEniFh#`W}G0iW^3p|M~_2XQdv1SIWcwwAyE+e%PyTP;$Y^{{V$$x7iq$9&KqJho;n z;`qLtkH7&hP-Z@lgCkjRIfu3+ycd!$&OGrvz3JVfTLQh+p5yP@X@Re~lRNMJ=GW1C z+2i3VT*McycmBcH5bgb=Y<_1DtnQpD6-|L_^d>x#oM5ZaaeaZNwDi>9AJ1_Odjub& zFLLM$zGz8zuXeeZA^X}~@Fo5@GSanY&v#Bs_uX&K=KJ#AmxkX660J-xUEkhLg7MN6 z7~R7p(68U{QUCl$$%L!flynRF$-3st2e^9aST<>s=%!hNRU~ox%>T=OT114fw4Q8B zuNK#n=8|-NAGm$4^LF0#$FyoNpRV`)7>J(t#XWr#^XQu|7O@;XW34}?RYZ50=gcuc zE4(jcrVyO_0E4^S;qNHwQky*UWqQg!$h1Mt>SY-L8!9%(VxLpL}?FvPe0gz!c(8 zN-4*fGKYdq-PrFjsvCo1_WaN|218G5ZDvg2$hjM$QSLa{O@u?l0w<%Y&ki3wxHy$_ zQvYRe5*Fc{6v4Z4`O1_j?2WPM>nBo5h);B-xlv>pjRQVUZd`(%6MR!yU$=5L6PBXA zj0#z_mvaa+I54bNJ=e46p$3Se-BYcd>VR|lyFk|gTf;{u3b?t-b0m4oo-1fjXlFbBA7n-%DCqU6q5jhHhN%3NXZ_i>`_)Z1Suwr zc9Ak=SPr{zu+GjMIlgb}rU8C8DS{5l z6DZJXL}rdQG?=bT?t2f`E&d;W@$oqIzWd!W#$z1!%8EL6?#kk?KS)XZEWtte7Pz<; zBl_Kan-;IX{guV~?w$37GpT@oM`tTqoAUGb>plM7)f~I4HZM+<(Y^`myPyNa*N+^xPzEEysWvtEWbgaJ-3(ZZFabL4KFJz> zq3AW{EZ{|`tK?YRgDn)Ivf>h=vX?|<_eD?J6LRSF#EDNvF~j(wk^sq^rrzFAULJnV zn1-G3Ze6sHGSj}_&MCps3m+H}b93`C2SI^5x6oD!U+XuE4H*cOx{VvqZ1<%c!fVb? zJ>=l(!Go{Nx(4sU1CCjlx^VN<*?KeQblLl|b*3CPKX`8cb{~3m_!Xt+Vb{O^(MRJL zRpFxyn?g(gb9#XNu_C+L3~bA}u`5MzdqQ^!DLb)@STjEKmfIQL$dFW)@32Qk^R3p) zP@*V4dL|Q857XK&m5DNetDZ~gX3TwH(LF_u6NC(%PGJ6SZtf3MbN|S=;TH@d>m@93 z>!tkhyrq-PjJv02;Q8CO>>Wj4&ql_P44+r`6?kaQ40!JafqLoc#l=U*KOJRA zOC1I|-1M9gU+uljSr3`&gQlSc0|n3LdQ!p(&I3lwT}nf+7@i#Ne~u=L`ggzd0myPJb^0#Q`_F zYCWT8uk1o_%|r37j+D}y#3 z_4b$F%1Ly+mO>{6-a&Zo7i4nZv{|7PK)N2nfb?gKXuj{C-p75T!3`N{6u1ZV(t;sN zW!v|zZGY^2dv09cjYF}|^uIKYYkENQ7Fh2f?)KH5&i&CzQ<`z{Cmw zxtPy5%yN*&CC3>~RJaCTCD$Av4adIym9H#5%Gr0j!@TGVo7D+@+2$KLDX$mz?0&bg>wp7HtIP(aV{UKVhx4DM)JfJ;y0>x+N>fBWa7 zSFMZY=t-V&qijul2rYc$8)efL>^XVj_+-d|7uq_Tvskcp9J#Zv%``wPainQ9-eY_& zyau1ykbZggU;p4oiw{5e5C%rq8*{jA3pP0jfxxM0TqR}SW2z1&ax5Zt`Xmjj!eQjQHy@D|R{YgcF3l83V6cW&93 zjPA^g0t4_$Wy=Lc5!;tM$~1mZbN>36b|cXnv|8?U`KI^ErzR$DfAXeEbp`XB>| zVM#`7;q+O$!kp7Hd#+-qEZUy!>;zf5s$FCW2FRJOc5FL4gJ&Fmt;N33W6=nt6ZpVa z$nIM?vdAj(iyYs*XXkt;0bqO1r-Si22Myw5>$1nRYBXLOG4kZ#(KllE>2Ns-Wgl~3 zU1~pM(jPu>a17>)^$|Dzp@WCgf6JT=hXf|b1bDVI`|qSvR?!$7 zUOn>W@M7&_247pJKn^@-)bEZ~l)c}&^Q*4^yr5isqy3Yy4mSi0?U}&k&79igFF0`I z2*wH?>`F&MLzi0zKDcy9EL-qMqPf!NdRD_lv;%JihuKF0W&j@jTb! zkFHUb$H>;VTB`%CE;v?SdG*zFqX+32*$%;t)vNNtfpDCGoH#pooQ=>&4FzAhDpys`c*=La_j^+@ zaqIf!$u5*-%K^7O8l{)Mdg%3$$K5#k4mkn893skzB70O{Jdt>`12}BYTdV7`( zb(r5c6T(dnH`&o=Pk!9lB>&yCmEl-h8GZfEAN&Ue!(Uk3F6;hw^v)p#7P9sme(;LU zKEcKGcg}9SdGt;DnDYj6qHF0>zcJn5Z0A03-s%UgCC-UY-;JjnPM3J4p_le3SPKR` zE^G8hKlrPhco)k&tXFqD)-w(rdaZ<=mueaG%HqZ;}-pva-MDd&|v`tG;CxA>!f`TrT634I|~_yT9GgM7u4RpW3p z;;Ecbo1>z1I2sZ7J$_F&V5rtMphiDwZ4)&1owMN5-1;Zpc?c4WC8J_2(J^J;M z2MGGhPQ}Mmpqqoh1l@`Q3m>)aJ?ZZEYumUzM>+l`VCIMX3LJy$dvYkNhLMy+KX>1b z5*v25x0fznoosi!{G}YIcS|lhcB~*@_pex&eG^JbO18=Z1-z_O5f*8ApZr5 zy}uAI{K=31dhw0#{OZcdU)jESQ*g{eV&B7`1iJ2F24Jl zUv}Qg`-@i&y_vtE^Rt={$1Pc-0!{MNllJghJP&UdkU*dCk!+#2ur=<6e`FL}hhE4h zMYm(4P=Uxda{Lkdr)Nu0!BckcCEmfwVJ(N z@Fn2(=~CAc*z6)=7Und-hag+^v1-i-WHoB5--3V4T*gDl%5K5By5#huuhK@*!Q1$2hc-eQl30 z0tc!-%prffWV%n2Vd&@8S6+$#7eHnE25UOo_2icTgBN**Cdk3jZNoXVGo2HH@lrRa zS}2&_X}xVscuNOe+PvUT2cm0nlHZMPeJ0_S&c>&BxQ|L5NMVEM*-j?dub$^jeL4y?-H{otd;-@gCp;;(=E z$>L<0zqcQ(?GPkR)2so4x?pyjVtOx@ z#eL)1U()(e6S4c9MVPVW%F|0%DmdWb60a$0#3=Y$1>NO zQP2L(x!a5X^S}Dp;{SBy&=`ULmp}TO#owN}xw!YFeS_cqTrX>haJhKv(I|c!MXAS7 z{dYWIMNH<}wL6QSeVij{O~y#@{=6WCKEA7CoJT616G5JX!j2!uP$~yHve;iq>9qth zW=MfS-hRKBaftyreCd1zwJ*4kaS12J=@^qf6W(F|qU=YHzSarkN^fqLxn9ea zl(E~b`L*q@4PMPj$BlF9%;{QkoJnYMz?QMr+(qqWg}m60$_0)zQQ0dgMk4A@Bg~aC zZ-*RFzSHHQ^30KA6h46IWbDRtH?C2NTE<2H5i$&Y!jY$J(QHW#0)f?dn>J2qnx*g~O94 zIS>geq%InVE^9X9iEG;a)>KLV-TG>b1o-<(#fg;j-RMX;B0}MlS3yIJxUz~UWw=3 z_AKcf>~Befz>`d9$9Wlab$;hmIbuESVB@dYtGKAp-8`;Ui<**k^nkKOy`h zz4oFx^@o1z6!K#^i5VhKgSViTjJSL4e^y;MU#3~Oh$jWo!C%BI;*O%dD2=aWK-#bK z^#*pxo#;OUkwO3x9NKWnIKvIX0eb{?6Hx9CnOAsz7ZGpWBbb6pNClgL!}zazdTU?o zyHAvEc5~NoD3_hJr^D!a9-O*#?NabJQ)}R?N-2!5U$`7>dJyB>IAH3zqquc7h1TE8 z!xd8|!(UNz^21&+1{gX!!+~=t%eQjsm=pY_#G+5i^rbQq*2H)557jF&#AOJ|l5-$) z3$0zi@lmAL-px_;G#qK{YnQKP%;uo(xp3lz3?o$xBkNM|j=uJ0N@#~MrG#@@fSZFo zAH@@%J>9$b$xnWf!{loC(qYfdX@7BkT(%ObG`t8-V_&Chi(Ax1;*=G0*R2hTJ zb@YhF3{!?@j1Ce_tMXX6B1PNv<_oFd97l5dfd`LqXE5vQJJ?6Zg3DcE@a7PX!XFOW zJ|zEruC#J>2MANnSESH_&#LGH&k{UaSsRp33NBx6zxsITa5aHm&7kEbqvA)aGbW8G zqwDV+;%KOOqdT`SkSzf0W}fB{Jj{m^0qU^wIGS$NOmMC`@L3a`MFBvw3^w&~Lm?AAEEl>zEhN(h<$kbz=+kRJ1U3UysO} z952oJdWO>Z92ShZxZm7|9>^|y;@Yze#m0Z{qmLH%~tV2Pi@>?Ka7#kQyR94){ZWz~B!Zs<#2{qnaLM-T2Rq2XlZ z>-DzHkS7AQqPstK56udq7%HGzVEb+Y7%~{R?dCFKhWbFq&OCC8~QpF%S z(dGsFC9Z>)Oy`@o?k|p?I#U4fm4!av*V4;m_Nffyn0|5J{=qrbL)ro0mEeO8@8qy^ zzR%vCwP8IO`(W|f;lqogZxpO6(dAG6^j|GrJ^V%vl>OsCkYT+kXA2y**MfvEy|63Y z=iwaGEZBT5yy3k1_{7P@%dZ}34e1ehkv2+UjEvEH@mLjC&svXcTsnh5!11%`Z8@8c zm&_r+Paa4-$D(|4i)xc=fx`xfuN`|EW2&H~xJcN|{0jCL#d+wKf&G=%S{M0l}- zKm%0{_p8ApCv*FTmhjk3B?)XRfGa?I>f?9D5zMi7KYdNhI*vc{&~{Ry4_o9>xGx)! z!w@{~mF#l0tWj`x&cuprKuJ3sbuaF#-9da?-)Koa>)LaHL^6xMyQO{8zHgEW;>YmT zUKm%?qS0-dD<_U17q|&R!=G_b`Ys0s$IiA6Equ1((c(g_eg5jN{>`u#drGI?d+@aa zc)v64PCge5X-)Rj9vXK7V`Y$&e?5F0g7i~*pB6v%h%;8DzzX{vu954Uidqtw3w=Q1 z(8HYJs#rL{IgRxqKlb6T7r*@3kK#22lnOedK~5YQmwUrSedh0!2y*$e?8jOlI@I++ za^ZjYlmGAHfAmlO^TmN9Zw)_`2!ID^IkqBQHqU!_CyTm3M?C*VddEeTI`Axdf8v~o z{)P>B*@0IMWuu&(z0yyAbq*WVbaW2;fxhr*jyUZM*dH9-9Di^)Q1C}(%C*(}UdpMgn=df0Z_-a8d4p%U{Q5Ng!Dh}WL>Ve7lW`jyl znu@V_0p}@L(hF4Mwxn^u-f!;OGHlLPB6sN14&&sEwr6YOX*+W49^AKYaVpuup{U2b zg9F(~oUQN&tq0cdDO?LTB_gejra3DeCg_X>I-3l2EiMO7It-E%Z)>`|Y7XsZB+f}P z(^5lwl3VeSb@?X9kMTLQH+U6jU2;Z23qjxZXj`&q9DUuRMGJi3NR&W%C3`@U5SVXD z4~Ey0on*LQ{Ot66zH!6$399XT_0Y)loAxcdbU@}WfA-^&O!iN5k`^^@yj68nxPCKd zw;t(ukYu81%M$Hz%)ffzVDnu1jyOf_*}7nlhp8Ue)IJKT{q%=_y*Pd9ME-#E@k2-A zxsiRiqpC^eIb4(QV$JYkY{>8lU;3Z}Gi6wVo1i1zl|I7m=Cl>S`Rub(BMa;+K0rTl z9XcDwX}lE9hp?l=2u4loktm4&ge^GdFCfV9d1d3$$7KI^$70_r2f~HVJFxbH=9llL zy`VP_t%np3AD8^OKe_#DGLzoAOxUn*vaQ)%%e5_OekzVrnvEfuMHT4g=!SkmPOr>{ zb@=pD4}=RZzVLDZt8){;oXU*wM>T>oh%Urm1Pu5=$xM0%_=6o7Yzzlg>j;L?XD0X^ zF3-6wY>4nq+tQoSY1=Zkood6&7i$j_Y^G&g{GYFbY`;=ai`}@ZIXx&4CYfrP@Ri=y zb!-?dlGl_VqHPx)pMFo~nIHYo^<*0xgU$9RnI`C{Ewe*z$dIsmtQVP8$Rh{lpt zf22UZgpp58I!lqM(a1>cZrk_2wLdew04rHU5MldJ)+BrT|p79gOT>XAP_oaVE4 z-z)Lf7=i(s-Ux1+IXmj#fDC*6!VRX4$QOUlhyCq4A^Z2;#~FGv!kof@#Y#^UY<|wfb+F zGlY5`>5Og6cWFF*xO8@`uVh4T$;(%lrSSI1+-?_@lC@6Jbg;$21BY_XUP}0V*3ZSo zkqn3J+g22ntfl=~oo;D9&dZisZd1~>M^>5WeVr%OyYS_Vaue|)N#lQaZzntu=b!(J@VmdM1 zP>-RxGsXL6CETBWUdxM%Hx}PM@aE$3qZEtB^{7lKTix6V;0>!YY+}Y++G{=6y#`-F zET$PZYp-s1IOmzgwGQUwT)dM5`%$xloI#hr=Lvr zh-|7G88EvN+H=TYfriy7VxOlFTrZP^ppXT1`s~S^BCpIkuO{&Km3^>o?bE?=%7u)w zm+JKg_X#d@UHeq~5l-#|Rg4znWo+%-`SR@lmV!yPNPtMz9p)+!v$jb7ty-sWGRq#K zG=dd6C}c9YX09BhQ~nzbTu#7U$XLJ^u$swwZco;S>lB+Yq@vMPYr>0oG}`8Dml=FN zA#49Qr9MY&1QuYiX$#6YaU_|-x&IBEvYpnah|LT>i(>CLUJ$q-plb6O9Vm3R?+OUDp2G*r7S3^dGo{Y@T(>^sRYV-0M85=SQ-~kM zKOdLfSGCDUpL{sqZAm$PlaP6m)EBNCQZ zG)FRx3&_iSd+$xxF9Xni<5vA{y!pHL9FYIu_r5=h)4LyjGJNP+xQ9e3J}WBsW@L0z z`<7BtlFAmU(@*_3z1vbt7Ic zOMwERJRaVm!Sy*Z-hSgtWhNaMqll5pxpeyE$uYXO*B9M^xRpySE~eOX&M<^`3j$V)+;U;=i6u)-`JjB&DD|NZwGM z(ai)y>`~)-h)h|?GYa`A-MwwIv_-ldykzE~iQDlGrG(wI9IL3HKe-p-8soSukhEah0wb<<3u@1ldz_qxYxXsCO@(;Db_!wbQwYaf)DK#@1~ z(V;RPmYG8Z>&l?HaZ~@qWO4FN2GInpx>yE_gGcWtv(d0eFczk@OuDh7I+uH}99QNU zr-td_*OT!b@90_8WKgS8@f|~Kxy(Xr zcs%OvI+;{tlC{J;F}KFD$15t$0TXb9tJk`hTz>Dv58@{oklptKAFv~;uJ;$vQJft(!D3(8D89fl6;Q7_8)p}u`WlJmdQtt zzLC-CjEeYhx;)(%t%9wrZW(+#$Mw<|Jj9PVayVbLV&ed7x|8T3qY)i&ddtLo zIJmcJ0upCRTv4;J@m`Kw@+3Je@Gup6!DipS10zSw-`q_D-f=pD$$^7!41PS!0miVt z!Ex63_za_1k_0{#;3k`NZw{qpfLh>w*@Dk?bf>|UK4a{95IkL%TyqHG?r9sbEG?J) zDbTI|H0Rps6UW9dmc?n^U;W1S$~-=^IGHnK_(nRIN*!7A4l2BL>-sp)>^WM*zbE6k zj5Hb2&S`kLb!$Ulo%;_QSq9qU1NK(g|GJzX`pP>yV%x4gV|%UHAg~%8wZ8&Pt8={J zi=P*qzMmdL$2A{W*AAdZL+s-VIe{Gv`eFgWU1i`d6L{k7gL7yQ>71d(Uw8K zUVQ2JCm#jdgY)^-OXo@~`7HX^9?=;Gn{wc8&4F`%@zb(SCF}@}a5g@ys+MCG&gri^ z6(P~xz4q+n>Ei{{Bp{f#{`EH(fAYuw&EkLgzy2@7?AmtZB$q|+`S{uB9L>OZcn>}1 zrJO})<9BeF!^Zjo_rUtI<^yLr@lWQYb|5Rg3?HPg;&1r2z>`eI?X4U9;lZ=^cp22~ z8YJAc);sN>=uOEjIyC;tCf*wUasuH!2VXrj_mEj^a+P%YWvkGIBk*0|!QspTf2+a^ zz3Lh$-bvkt?fIqZbkAH&D_jxrzfc> z2MZaiH?!YV6scTnmFblAAdc50MZ=|A>FeqJ!9x2m{hR5c=o%c?r0;48;^1co{;u9w z3yEX|{fh3Yx|Tg|@A0DtJ)7fna}`AR37X~P+g(MYY8Uq4OYubk*NfSXTfzgRIJw!+hVF|;IVE5ds*=*PA|9mh1 z(*$EUS(8tAA3RiLYHx60)lpzRx^C+c)cXAT<;6$uzc=%F^Q|wJlvSqg7kUdF3ZKD` z?d^$U$A*_W*=UJt>JI(F2bT9zIef+-LoMBmQzOoF|g# zC>nS*OgN=V;9~Q*)n4v-@j%ZF9?3)%44mZJpx!RH;#!V;@<9tX^dsPiWza)a9`ex& z{=qq|yB?f*wD`_9zFC#E7QXFSeCve*F5NORWqY_~56E=7ol1i(1!J6RaSvSw@7b5& zM|WnQliLrI_uBB-OEB6Q4Zvrx#kYL^+R-D8S%9s*R7FEZS-Z+BFh76xT)dzvuW&_m zvtZ$tc+9%T?NhjX_pwAdeqTCKav*rp`^P@9g080z!5Okm6$JXcqYYjABM~2*pC=5F z(R?FxFndLpfdlCAdODSC^y!;kdd3zQ)Ak_{-tC-nD?~ z0gSnMgmJtYmxFz&ym@aNbMU7P>wV*;TKZ)9V|*`fXE4`zLy#E1`@R_7GiPkqxnu_O zNB5X(03dK?vb~=*cy8aj&nC@0d!1LDU zbos+?eRJ_{ryu|DC-07vl5)MS&DHm6%3eJ`1(Fj+#6xzUj2LZK#0q32acEOy5Fkcu zHd`pi2_$VVCNdU6P^6Vc>ls8KE9Y@o31Nx=IfBjo>C+jM z;|{c(a>*@i~s$<__vFHdg{vJa*B~Yl7ba_En&`UVqQ@CPL4^8xzKj=Z%5k?usC+= z?BeGaW1uO!J91iGOgJ7p_Q~Rn!v|Al8#5;K-~I8ws$I(E#n}unO5OFF_ZL6->8}^x zeY(2VA|1w4R8EVU7wbbU+vRwfhWY_2!~5*QrD2@63Au1l-)YWMM1q7li{AdynuA;W-UA@LeMEQ2q61Mp-`fd%V$+&){^^zKX;3Id?nOTk`0N{k+U zQCbOR3M`mY1XrwZpjHv#6kxrY^>H(Sl**Ks9_8ZZ^e8;L*xu|=T(Os0_ib2>JhatO)>lGOvkAC5@#^IF|ppB22Im~jYfNSVOzu0<;di)}BANbbvH2V}pD&U%0`bAb+gaD;2l@IF2|cmjW3cx)#0--t{+pa*K?0 zxIp+remE^lYMfjBO{x0rJMYDZMCm$bmc5sQ)a2} z@Cy#8fBz5vFk>l0qB)$eoed|?<}Jw<##qcrWBm>?boH~Uohojhhpt%-@gByuT4M%e9(o=*H#%Guj>G!kK?OK3Uh*_aH-;G z8m(K}x-U{bZ5>uX(BJZoT)cSVq-nr~US1=9V;vhSjg8jpv z{Air+|MZ{y$BXZM=UcUGxl)Gk$BXyhdw+4d_FC4SORjaNIOU+@I9R*(evYDxlM&I3 zW=>BFQrI_p>--N>Z7GD{7_@kkDh0ZVb{4W#wCIV5r{ZgMdp9MibK4R}R(2);kkAK) zUIqY+jb_pP0&W!9d&z8Y8XAvRjdIvM_S(w|#yOd)_Bgnh9Lf{?aK$*0!c%J@iYN;N-zgB zr#oZDo>(WHWii2N#%Zt!7sK4yW6y*L_}h|uw2tOTIqt)8MhjT1O79~7#u?r{vQ8M3 zlzH?tPV?y2y^jmFGm=)57j}#Z<-T*nM^nS@R)vgb#q)g74fFQg_dHymNa~DHW+R85q-A% z=r!{m&frV)ewnESUzR}u&!&TfW6?Ogv&hU2vfk2@d$v z$er**y8}8Jrw#bX-jjJbfj)TJJiJVa?$vSzuGi~3j>n(}bE7lv%D6ck|9Tia$VKq5 z*3CI1Kl=ESjLXY89vt%K433;jp40Q3k@CZzk}*#+#7i7#AGJCH<1INNdPvu0u(q#0 z6VMwRNk*!oIokoGvOb;bLvN?|Z;bZM0}XSUp^Z7vJirN?=?)Y$s9*+1R(0EZ7HoaQ4jE4zl~2Rm^xQ`($|Qb^nk5$^UHew}10* z7RPHV_N({aoqITgCK(}IF$O-OfArBKug}^ar8|t%Cz#kbS)t_N&|ddEZ;69f_>o~d zz3I#Fr?b7d@0H-&;b+O!Q^!AUj_Fn-6U5VRk6($}=`QROf?WW0KNHk81G3~h-=DkyK|G~_UG zwu$Rx1n)1)c-ht>XK!;6pxYGw+)V$G=z%wpi6*o*yw%19-w8;AUmW0Q2Os4me)_00 z2;To~`&}!VoHk3DnoU~f-dDc)&DM}ERU!)q)U$_GCpTuyGhD7WeYOjG)d8Zrlj{?( z&L&~|*psc{@y+yC{dxr#BtdcZSgUHC>sQZrpl1i8CO7fG(c zX0Q@yTDB*s4G*0;+B|upwk_ML(oM(H-a%huc&^pQie$?l|JVQ3;vfIB|7rMK8p$eLCEQ^ffJE4whWEt3CLvs#OVF z(~>b*(@@vP&$L;x)|qST!|x@gc*c&_e?NV{;kt&?u=WVi3*NtJLlqq{9~BjJdwiIq z7Cn#5z%TpzYB)?^)>;JIIm=Wt+`0Gp+_y6Pf&=5M3z(8TBtad`G5gD3{$$pMy7wIH zjDm3H^Sgqq63?Q6?z>RX^Xp&z_TtlHpG>fEPdYq@BwOf4wk3V$@fEyjb3MX4Tr^x` zH&`$FYMSYi`&utQz_Sh$E-;X@UF(jARiD$tB~NriJV3yW%|WiGr`84v4>1|{KepfP zBc4ewl*B={I2&edYeq*aqxU)3I~*B3Dt$w*XnOF|_EA++qYU5MQKHUKK z(~$%a)|TXC{v6=z<1bBa<_6AsOlv`uAg%3Pz1wR|^WJ;!%${gvxHVp}Ga0d~>Q4zI zyE~WeaOX%J`}C8>sX`}PUtk1&y|nMZ;>&M;ZL+Gbm)v;i-1+8_6E|4!Nsznt9-V4= z0S6=t5lHx@eZF}9^y0nW{wh3+e#x?6^0lvjJN#Z&tv(oEkTy*_rYE}UD>m|7OGeqebbs=1y!b9!utLpjS&ocw5f29o23 zMoJR=;Jsfb9|Q_K3*Kap78s-#m zyz=9X?C<)IKK>AhCy3|FAqh%PI){bBT%hGbt#2HD>2TkBCBJ>`t6yJy`OPmce)Pk? z?)wAx`k)tWD8Y1edJ!Lp7u|FMyvSb|94#%&F5nP^r5eOIOF-N zg}63U3@ClxLnq^WF7pM0s9y%_l3=xbg-CO6cYg7>-hVNd8L#VQ)XdG@v>ap{{*x~d!y9rSdiiQk2n>fTC zgT?e^ML`lu<>r)YNTd%NhqL~BW862F2ckI%YRVkOtrThZE{AJIXd#;nbx0>$PuV)+ zB-Hh>lO4>_$&i%M`K+Gn=0Lc~#^fNl*V<0i`}+8qdRAo|Zj0fWmlp=AeanVx|@$ScG7JJKNe_>bstrCow-<^b(!yphQW95OZYd?Bh=Ed%Wz=jxf(d>&?M;V-+ zyfFgI0CN)2^B*K3s2K#ZWjxdFB5*=W-$Ke5gJ469;!#FOqN?%>h!sEwJYM#?GF!_|4vMZIsO}1q>Fh`XdI|zjUBi$cg%BQzHcX72(d480aUToL{_Gzr;K1D+?;+MMJOp0pLGHKbF<$s7 z=Agrja`+FgcjcURDAw)El>*(DdB^a_aX3d4ya{>hWfaM@-M4RlO4o0LtAjgb7HBbW zGrC@8$x?PXOE*^u;2_vf!!NEXj#Yw%g2!3OSxsqg3l?Ac@|Wu2RXJ_zzMP}u@!7MJ zb)dWo8>FB!MhF!d1r$|>h~6!l4Q8C99O5Rf9Cc?r=WB1jHT>sv2P+Cjad68@(Wm^q z_dlv~=G75`oK8v@UwG+-jG0rzmmUO5`;TVqP0TsGUS{74u zz(+@FE*wXb74kTGc;*D}t>IGp-{4`s zS97dxD=URTbbqpWmi&0>%nbVN7v7=r%4aG*z?}m9q(J1duOOIrU3f+5bPa96p`7SD zs!F+2Mwnx%=RMClQ`gH4beckMOg92bPol-F!#(VJ!V{5~GJF!2^uV!;6DQb}RjCIg9AF z>rK}SES9w2oSO4W4sKs2*D1SpVQ8iC@d;VVoLT};qsWH~f{$>(de^QC9zBTBA-F~t zQKd98Gx*X4z>ShWS$n~rG1b;3_c#|a#8Tn|&ENwg+GZ`+KOmlYk0KvDX2be5>+ia! z0xK43JAwn6bq!p?3%X?a`4@b{dwc#I&f|FNzMva8-3OL*R`}cB;jk=HIPF?GY`_?J zj7(`TV|gAN$N%X4M&zh;kNF8qfivUZbjd=3wT{MJizmD^8#`Rgj3?&mDO zK5eczsbrZSI&yG=64s55W#>J9g!bZH=znylXd<`^QgF1cmYvmq_tJ?T#&ho%D0=Vx z_tR|-apV| zmiM&ATipvu@R0Qd9@!4~fz~1C&!4F>P~}lA0^{X$w@n$>_H12p_tTHw@8HENt#e7^ zhT?RyZoEf!@%D5fS*d!3Kd<6~pOazOxMT-jpj8DK0$0#19fC8T6Om2Bp@ZjN?|ED6 zdv3k#lFP|IylwCPL&0tta3y=#Ne2%dnT*v-_b&A89084WJ)QN*qc_GTV5{kGZyY>A zfLXRGhIQfGsl|b#9Sqq$WCa|b;8?Ia{?U8s@@2eH^5e5`dFzfcH2aPt*jOqdmX&Yu zrf3$$NT{}T|}l%sECe{x7Qp+o0}|D)Pqx+@2>%-JV>Pmj<}2See6 zUYDa1UF*+^4z>9qr?vQWyS9)mt2xi43*u~9y!)#kO%}F8=byHYRXKXl`Rh5^Uf8pz zD%1kqWN>5Oi9YZ}6fI+LOSbB@cm)|nf0H>)4m^oxTT6&I2e>-eo9?b!36JCqrnv77qgR96klNH;2kUI>j9N4E#L=$0&EEOfq>(V zDmQ8yvPx1!yzyb5(F6Ctax^(pa#Z)&`!&fvatAGenfXn%R=*#FFPxzQaP*Gps~q2f z1CkWsk;-GTh~st3R=%Em7U{7ZnvyHg@6Gt4xy3QFm5wbw{@}f2a4j*Tsjq(R8;zZO zkKcX$Ti@&N)r+6~&b*mC3}(EU;FAevWq)(bnm`er@q-TGL#n}khxH-<2%NaX?UQT zST4Qs-h+C>x0am+h_y8#PhKdIVEliu_dlPx>i6~wMj5-4g3q%B*Ba9 zp|=POlHopsi-ID?o`hXGN_vE3Vm5-nrkC?$$PPXyof-scv~xOr@+Vznk`e;p8Kaz=%EcHRo~Qp9 z27dMG@&G97!gw$`Wey04LVPd9h5`jV>20GZVeXSv(%3SnCd({_{3vIO79_@H%s8;H z3ACc61L-28f%7b)6~rTjsrPmf>>l(mV;XU2M|0`g%}Q>rF24EYHx^GW z-B?_@-ntVGXU^C1VdKRzQC^%>TscSp06+jqL_t){6^6hVaZSm8e{bEq)KqaBaj-Q%n`CCViE`tLJi|sKm<;EATTq|?t zdJJW-)gnaPrXH)yG&((wOaoCpwTM2-zFYQ`vPWiif;qv0*g=>qCKhM#- z|MkV;GAiu*hwpz-8S05)SVzj>`RgD3C_K5F0#ncY)(W>6-W+|(g}?iaZ^m5UW~It! zYO7E`&VBp$Ew%+Chwh0aJ7`lNXq=h$TgG8Q7g2P98u%h}`JMMaEI?GRz!bhcJKrj! z;=(Ae8-g=@cete1De(4o83r3uc$H<91gylt&`zF=;B7QJPBq3 z5?dNvZU2?ddx@mNOMSrco^c+vr`KiX<~Yv>m$E7|AcZ*L!4b5h!@M}?lo2is(Pgi= z$Y6w$->NwT)+i`0U8H{B{i~dsD9;O)IOOcQr!l@;Q_XWiT=Qg$pO_fssAbalJSDBz>w@if; z(~DQHgl7U?GB>dARLJguKlrL_DhdfjTgC#MW=u{tZsP^6a7Hk%h4(YYLi_9e>YhVL z!`C}Kw+ZzC@575OM?dxjKKLNDXL01PzN}}*khdrBYw2v{STaZSz$w=L_Pyufc@!S| zx3=#n@8j(188XFPAHsC)(t$xnqEC zEkogc^m8>FqgyzA{P4kp6O281>L)45uXJr~lj1%4Xj81TT+*i>x!4P^VvOR?&*GIg z3(!(_;L<<({ok!TzH={f{P-TQO>#tHznqlm{ok46Zcn@Q!w}Jz6?k@th4^tAGEa#w9aXZx9sp{i*^A*GEZL zT7Rhw&^e$x7%p3}WOVm8eEK_ut+CB#WxRpTfWJ%yR5Z2{VKf;TqcgeI=$fQ(&x>N3 z@Y2YaQCvHm_W$c&``Re@#t~rY!uU*k-dZLbxkaHP2Ke(m{X4vlzj%(o0{K6@X>ffB zHwyH#2@r(G!Ft-^_5ILB<1g=xn#|KU#OuOC0ut^n;oh3Gq&jWZul>~AzXS?)H;6=-Z8w!EHG&werv7#r4$`shRGb+W3_ zV{l&bl%|TWSVrp~9TVU1zJ11$xsUKLhJnf6wwHa8%8<#4G$_?hAkP zfVAiM5PF>hhx-y-ti#9iYR)uC@Ua)3O}_bBnZI>eU<>oYHF@#0*K8MA1BBa7_Af41jDP;~_d={fM?dV2U&x28vF z0j0&7YAud;&mWo9^BhvH9S3gZwuP@Hi-b!u-96Vna|T^W#&9~4Umun+D2ZrmHjwicl<#j!hdf(C(3ii` z;c}cT(eq?lrW11L=-;a9_u5qsiFFz4?N<&Tjxs@K`WX6ja2On?2i;F!x}RRCsDdq_ z)fSp^px(5dt6Ii=0uttnW&{X^r}TSgPO4qWb3EO3_G)-~dy94jSkV5+KK%Dt4prhK z_|v7}EIZMDfj2wc7^*04Tq&{UonP1DaR2N%+SlsHIZbbW{kwyApPlMJ=k{BWLS=<^ zG;AI=QNyMWC!=%Z2p-TMR5E)x^j({on>i5i13KxYD%QxM_1!4o!b$Y3^Ohv0+$aO{ zM%9~hcN6Bw!w(J}em&TA17LKCI=+t zphNqEzj&buj!`WoIJU25@6%NTpY8YX_8x}b@i&e+tpa7DJCGi~d+V*Q#pCqgUtjj; z!Eq4Z?^-l!U2Isu!4vThIu*L)3?=WJ<8-|M8yk%r7ZB1m=()51xsT_;1TZ?3F?ulI zXP=#!x#F!;H50u$i;EtE$Dq?&>GY~`R5suT4!P8xjxGqf=z@BrKd8d+PSq*&g&hT& z;0$^@m)`t&v^UbKF&z}YJ{48Qg51 zoYVVXJ}@-LIhQIuwxfO2K9r*soJm`?RNO43*UmCdM;VCs>yt{_Z^m z2;yVst9qf|y;QP`jDK>MU+3cw-tSP%cSrWbkg9a49Y%H;XYR`d0`A6x&Dq&PJ9h49 zpW-Xc=g5(x124VNcjw4_d0%I*v^Tf1zu}gu+ob2oyTkst%tu?rx z?Q8u4aQ2w&9^Oc-46Sr7K(GZf{*`OVR{Y2OpOsuSI(~Q_Y7A|v)`Q>pGTax~WhbJq z8`qNIeI|e#Ku4cgxiZ-vACtg}$I(aENJMN5+MF@GE?vE_ICSXX;>kTpthM?HM&LMl zQ%`r6lmLbUTqlScEjZ`z#PLrT9|u>#<6SRr8z(=f_qu53Y3%%JdnYMk$L1IEQ*huH zq=icb8(ydjfCGCs-{~;nbeL^hbFv2q?OolcDuAV+8^O)iL$0!sv~wjdhmQmgFc*lE z_($)3p_ayQTbrfNle6Z_<{vtY7yY|_`zd?j{l=a>Ez_b$a716KEbv50!1x*1NqiVz zK`;kr@PqesYWz>oUt2M}W_cc#&H(L4{6Qajve8D8d3eX{Ds+Jf&mGK@)!m{KfwOq! zFz$N(uj>plAIyh6+%^6VI2JT5Oh{CpWvtEDHo6TYT;sl7yBuUj?pl|(lE#Lnz)q}C z@aI?0=$aUbXZehAGWUM*@7&w>p5^m#GI*wU1g_zF&HRT%OwOzCXFPu{$9Hk4*|R9- zo@2x=VwqT@_x&BKGm+M8fUf^SuzNqF19;aj=j5)%;`pVUQ>(X@L86R+6N^HTq8lP< z??6aG(kW94$uSE`($ZnwM)sYEn_f_maQYfXjFehJ8!E_M-Fs)^XP#h>;)p) zCyo}5idAKeQNWb!ZLTH7ks}AkdFQ}PkzoRR4wvY?le!7DwF!KVmS?ShT}sdLfR1&+ z-l2p=q|Z1}HzuB-kO?w48mw>Rpls<8WtqyA^vYs#|HhsRmGsfPTKVnkudi8r=}TW) z{N`7`9k?p#{ovD2#}T_{SAs8QqRq|ui^pO%Ie$`49Z*KGmO-ZF$Ck~@o^8iZo}6Cm zlq&bFURkEp-CBWcI=c8j{J;Km@v~z^YukS?IFv(PRyKoD#*-dqFP9-opnmmyyK@$UsI{GGM3FEoa_j!QfsI{oiE(hOhIz$Y;XX z9xpSd;vv0R)qID~MFG>ICi-@Oib{}ingn2xSqIqSRrr%Xu(uUuwBET<;3!x;?Y=W- zPme?f0E8qYRU)Y`E8Xkd*gNx3bT}Ht_`SkcG)g0e1 zq*TcW(sRvxWk@Vaijt|Rf&wlG+&z%l7tGyrqkjC_THw=g2)?f?du{XPddT%$g8W?j zPr0)o@L3TbVsykk#@O6f*1=f@S3D6vz1eR_NNAd~G3?cVJA5MC$rS_Q$(}A__-UDr zf&%mEdc2Ub4DPN+Q}C7`ICJLo@F>QEsQAc?_CtVl6uxL}oKbD@lKbY&?0%U&rXqmg z+PfJTGDDO%k2A0_M&@-5m~bdk&U+Kj#j6?@KMz*1vP$w&K-XtDq-;HUaw6K^)p|R4 z_l>t&PY2Ju+M!R)`TW_l?c3fI$A^no%0{F#Ie?8Ktc})<44(Ji`RzCkP8Ya5AAcrO zCZpIK8WW9y8ddd z-hAuz@TC?`1yMQ0HtycIICSJt@-(MQcrG*UdU*NW?|g6Y$-J*$uBAZZ3XZ(|a%WD& zue2e!l9GnrZR@la={W*G0LKa73&FCjDN*Z@iI#FW@O)IENm0tvv|p!8ivg z;J1EwAbSJmkTqnWJ%$(0_jquo{TfGbI)O6OtpzX~WX0KlPrwL#Wp5ew|JgtNXYI$K z;CyTG%b))=o?JF+vUh1fCl}*&?ipp@e1=&xoxuw#7|ky{O^=a*;Xc=f;+EX^9!DXy zn&&!9N@mw^xhzOZ{c5C91q~2 zX}EMhctMU^zrUxH-;;b^=IXnh?^|O=7`sNmfpKPZXDf@GfroYcuv<+ytPLW{6}BU zdwb(aFk$+>hwo!Z$iitieu|NDD* zZ2M#Up{w>^7I#lw>M%WzzChM%JtBA{3t$6upWecP-vS@6)czvlxe*72v7bO&f)C=gy5RlUYblb(pep z=w%=Bblsja8CI=Z;B@o)4xbAlIQVWP%jl5~h6lU#IpJ+MW0fH+VSs+ZULbSHdU`3^ zk+HpQMTfTM4AAO}0SQC5#q%GWmyMd^7JhdvUj8I_&>`3n@JYfCxZ7inJbE+T|86{& zZrr5Dc?SP%w!@rVuRlNh*|KF*bE&6jhA{c#?I+F2A*S@9=sG?s z^Z#~mV>`j+Cj}(o@77?)&X7$S1x>=i)dv@*Rl=t9?X#y&j$;|_96orkaqBr=P+ujI z1C;6bbV$J4TtW&v2JLAxaPwyPbpKb2fA}B#lf|pAy%9e?vN-kSWj*dd#*f@^B?K$8sEq3iK zI9|V6W&9kJZSu_Bns(;c9S2n~K_99GIiM!!g2%$0@Q5vXEyp%G+|ZtA(Zt5vmD7Lt zP_PzYc~pXhHli0VUkdNyM)*|EyBBV5*;ZfW*S<8eLpwgYE;wCp+`ZWsfB9$sZ4Tvq zqqm?DSZ-KG4qN1lnN;CFNodiaa!*Z0#wwLaRn zZ+~YJyk51)#l?YWMiwk&9~^KNYkg8x*U2H){5G%vw_Ka(sHygk%= zJkK1_*tYh9vsqAq4&+(ewsesC&8x#x=o_PR_8jZC##3e6;{(rI&RB>2!Z%cRZH>O3 zChzXmRtT*$M9+(Ng@Zr;@!t$B>?jdS<;9~~_w3y9!s5Mme>>Us|C9gu|7unVha?KJ z5jiItuqwxwMg|B5?G9d?k=iQm-q~UE?ccQr*T%ufX^o$?WahrzDx ztPX)4ej?8QC+!VBD+o4xF&f%Dfy+B{AoAhEuNAB~n*;O1(dRwy+?mt0e5$G{I=~k% zu3rIsT(+yrj%{@JpLs znkt+#XHV8j^U$38p~XsDSupC$i{B)thzN=6zRE;wtEWW6$a$xZt3_V$+}{ga%q z$4`7b^q|*2-2|RK*GJ(b`qLuSxO=MTK!@m5XMihJU|1t~hXf;E1#-zGdrUtJv6uMV z{Sq7nXz(Jo$&=Q)qR%#^52(u43P+W|+NUzoOQ^~R#6HkY&z#`LgJ^hTx&s~G{%njN zbG-63k?(>-Xn*C(bH8|$-LNiFA8us(a9T$J_C2g5fdAT(9%$y^80 zqfz)GTm9v|`=iZ*ugPTxm7>*?Cyp;(JaA}u0~>^Ws;w;A`~2pzwXbLF5$V>-u zCIUNvLwu?!y2gLzwMH^q_$^URe|Mj=8|h*EK7zWNTO0YfCYpmY^r3CfI)eo39K$+1 z^0#__M$^>&Q-YvT3!6XuGuI*U{!W>ppfC`;o~4|7abjcWt|6zMXT_|9NFk^XmyY_3;e%) zdEaY>oj))_a}SZNW3G2!U(RHf$@@_pFb`YmIZMp+o#A!pMDG)Ua~%KYQnR z3Py@yiZ=vby?R5;y5{}0sj#68a!Ne{*GrAz;UK+z`%+NbSkV-Y-1`}L0cj#07`aG{ zYh~4W>GNpKQ})|DO-H%6$hXM)l@Rpx*IpmzIAg$C?ialkfhEYFbXWn!5p>+U%nYe? zB*yV_in^$jFU+|%F~WcI=;54ZSBB}V2&OZ~KrB+XvQpF~BPfd7rdg+`zyGHhbbt~Hd_Xo{+XIVuLQ!MoLYxH3vgcJps0|`+p z7k!dbN$=xPGFlJkri>j2c_7aN1e<*PZVVm>l9U2A?XS3ZR{&W_=> zELF4KC-n+jvtjk(8(;bA;?o%Cr7M>gpJsS`UKCtgmCtg2fAbg z@7`TBKZbnyQcktjr-YFL54XSm&2J5V_=~^z(-9DGNUV$k_#`J28mI74u%tLhM0fa=gd=9!}9tNzVRr{6c-8*qYX|{g6^aQDuvd2 z9pwuki~}SdO&Cw5LU^%_;ns5~b)4STXiXe=N^vf zEtrfWt!GS@idi(@fqnB^(Z=mOxM%UzoX9`>@lR@vb9(rpjPOH;j+V7m)x&yaop@no zzbO(Qe)MTd;9~Lje*gF4iO&{4``OQ?$L#4d=Yrd<97<&h6@bF?tev9h2c5uaGL5s} zvXpP$yPtAYJ0fMS9EUQk!9}lN^IzB6!T0)&vN-!sAjUzjfBUmvcEHxD#eex<{1=Pw ze*3HQ+->3L<$`cOIDWkMIlmu%zWDa{zPEVw(5piSf^7o19Ju@A9jDHnT6}WsqwrX8 zqzvQcwG`7!1168(%mK{DIR}^Ghv@j>6UIW0@sz=91;xmlA?ntM9^uK#e#6C8RmZr; zUNK7H*pe6I$d5mH;dP^f+5Md3i?T{4=+WmDmJA+KzQG4Sfcu{Zllzq^o;v>F;=lj% zKa2M3>DfFS{=<0fq~{MxBpuUV}LwvJq5bv?r;wuv|h9|j?o#tS;Z$k z1Dv{GU}gU)xOkVtsV7MF#n@}(@!HiU2M>MHZ+Hj} z))w3^cBR$navWz4pME#_AzRe^wAO+k0wc=0_4$7N$f2nwbk+bT2>XQG3`)~( zWm87HqzGsD+%JKF-pk14r~#`_tL8Ir&UB7DXV&cAv8@1a8LHjKxp6nX@LA4}O%f>D zLyjmFJLdYZ^DdOdlO^=u4+>V}{Ho#bO@T+c!_}O`n>N=zFXQsTwj6`e5#65krTqeV zY+XZZ?dx)Hm*;CdPCvdF&vURhJz!J%AoWS$Ij$v|7fhif^z=rD9& z>z3f47c^VIT=e5L)~Yh4@z*PtE>H4@0|+Hf;6rFv+eg3MBak68+_;x=!U=?YB|S}F z-SemFKfS9~8|nVHtGv{5@OJ#O!3VE7ZaEIloj*Oc!^JW^&z?KIIF$2|qw7MM=KnvY z{;S#2D^2YDb^=JG_uc{NU}Jo5B#uIgxnYb#F>|eJNuNcbD@Eu+5mJa$ zkRwh|O^R%`sjlkk>T;-pOC%EMy?02TssHcUnXTE0gR{@x@4Mc$p7nUQ&?)+6J8aL< zj6dKjg0M?kuSMMl-$`$+4ae8V#-SUU<*?4a3VwIX)a7(?a2|tRChdyWf-bC~;nR^X zw5RFv;pUA?XR=)i{A@n74CqEfvM-%uL|-xWaU{h%I6KH|@VlGi?b5l^16O0+4%Vkm z9?N0;;RFykd)~;2N_XKXMj~scw3er=vzW09bACE)n?$z`C?it|$oTHD+rTizs#8) zpV+>0nQ1D!mm|vSVwwMfRtMiYSi829$@y@-y@AWx14wc>clztGyZ-1;|9NoF`O;kR z%bVA(2E$rn+>$G!C6E1WJB) zR#q^9^MWC0*4jAT;0HV7a?Z`82M-L7J$?Qn@iWzr^g00=4(Ijh((4K!(9a%3b8i)# zVvn3Z+hNf;S;1!&xYs4KL(af zf&*FhHdv{Gy?Z0vE%?Ir(c2quwRZ5|ut7F$Iv~E;`Zg69U0p(oc0WK(kmKWbuhoM( zc-&9qUya@c5LFT5wH*9#fRnZx=00sz&>Dw4C*Lbx<1k!2jlO*<+jU#`DuI9_QG41Q z+0p0Do(xCZ?l1T80oLpxAS9OwIV4qY# z=z*<<{^bNT*y;jR0y`5t2$x<}X~13jgg)>1%DQB)px33Vmx6!tBK>dl;r7OF6(?|# zoO+x+NREOt`41*5lReKK_zrBB(haRlWp;3*OTl;etCb2JmYxPjCUd1FQSi|o z36z;5`mkm^LnW2!a@7D@?dVu>@%-t~ZW){z+c$ko%OlU+maIH??tJHrEPY{XUzErc zp7C!P8_vNa$x>&tUsZ-{uSmzcoTH!L2|v^N`ReuLbNqSq&4>*CPZdLSLMGV*nfj+s zA0HUgMN}BjGOdTb2><1d)OrJ6173@M4G35EQhzZUcx9%8Tm)g;5#_oST$fz^;Jgz_I0v8#+ zS0BiHqbx!ppSxxhWEP!^`hKqN3-7%AW*lKmbKA>%=MQ6gs2?H0A+jOfPx_M0$74E-_p{@?z?FBbowpP#BPHm3q z%FcA!@UbtKjA&Q=x}QFGXlH$sBXEY;-6G~|W18k^UKIKV_nj0iy0+~1&GAU6io!u~ znRNu;WVJVUbFy-v^uLSsM|%EZXV1@Jl42$jHjcCKZe{N&!-!+qe{gxC`$Iy%+bMDa z3vVSrKRNcr;$A{|dZR@oO3%R$JeGaG%8<`tLx^n7Fw#HuH^2LB+m*4{4@VoJ=3pK0 zU7JxLEA!ytBW+iP0CYf$zfpwaaJkiQF7b#FX8$QHgoeH5SarA)XD8)`67^;d@U_7g z^P!a7tn@FJ*Zc?|Y}zDJ?vTwC<%GMeDA{Lg%YLO)U^b$vjONQZeH?DGy$mINnq|1y zpPjoBfZe~ux0K_EUN-r&lrlmZLxr!KHp4&titmoZSvBxn6YvyxbF`y<0Z0 zzV505C^~QLJ1`lU81l8N%brdj^S3#DeN6JizF2DnfCc__1h9Hw&6{Pj?;}C;`1j{!u z$*;aX-Y!tm5=^aAfZZ{q>bC-?;X0qT?4Yo~it>6Whao znF!```TCW~h$ReUac;^0RsMrEpQTLN)qwyIGdku4abUp#`HyK9o(S@E2lWz zp}fnk(!-O0*iaDVQO3ZvYnQ@@vfbhlvdis(iLh zRexG)S)LE-jhOE6+|r(}5s<{M0uvkCkV-g>m*i(T0MbaH)rqNS85_QRCTr7_TL zc(Z%GUWL?vN6J|(!ydJ&#Yhyth3IOF@z?#y*Dc((k z&yV|!&N+YhD?Dl0`D-KV)d$E+}{Zeon&H82Ywec+i;`8cZ%9t5}<&#ere2SuV+JX^}08#!72^`HH_ zablZ0e4v!u{2no5PS5l3`^{)a1j{V@U9Y!A^( zm$pVfm;=XfnK$9{ovK*Av9H$g@}=CGgM)Tbf1nN^W~^}T+LfLgY?GSJvuE}53U4Qf z(BFf^=ruYATX3zab>4fvjAVu_cxq9GhdnF6|15mMB8Da#XY>!xKm(p!n-Hy&jIVu1 z%ZNtE^QEi^kb_Hj9Ru8+&c34i_JvG0J$nY9z32tzXSz@03HS&DV_rRAO zT5^l(8uafE_cd5S$7Mjsp@ebxdA`r#nTeavlD6Y7jir*v4uL6nFbM3mYdE!+Mo!~K zkNDZlz3c4jjMai6=3>iF!L(%%lTXKN@$DqD) z+1VvB_tTS-r;}kF4!~7T70v~^3a5f6d2es&Q|5rrne$}*lYyS!T$KBMc=X`>y?4)! z4EWltt*sSB0gE$dFQh+Qt)ZzBwlmN89xdJpUl)fJ5m1 z<6r({fk7 zo$PNi(vA1Zux^2)N3nZ2&Nv1I!knS9bz6s}bq3Cp_|rJ6x=%Yf{3G-oI|@uMr^_fy zzn|QA|NW2B)9O`9@98@kc>BVQv**t(K2J8N4pgZlQ(s#N_C-J{E3v(Att+x=wph`24ri%NH&&{%_p4IzNYlzo6dOG6~DUxr}m7 zBaT}0evqEdnTU?yTSNDGdhexki@*EHe;z!vm*lRo@pMil@S6DqayE zy6awL7KY57X+mBW0H!}F&`PR3SF&wa$Cy2Op%Z|x*Wh>mfQ452EhZn~SW@z^V zf917}K$1*2|C~woAbs{$Rn$udd96=c6Z|2EIOzrbob^P8 zv*p&ri#fd}0U$hFAAYY4KhO}F;4r+%VeEB$n|}VJ4A`&B=ok3flmlBpo8JLXVRLal zNzBs<;%NuB3M$df`5Lr8Gw$7J4o|&Os}rpdBvtTfNQ&FJ_rPLvPCtk5cKgtw!)+f) z4r`qgWNECxIZnCN=_h!DilWEa$J5HBO6X5M`AzcR=HlQ%L9YT-*_GNyE^T8KB6J9H zbty;j*{PO^k3m>Gba^oBm2~D>Z{#Q@BRJT}GCk8})Uuyt@#E1OYO|$k?f$)ziQPxyf`d=H24+R+z+gF}~P#|q$`zi@W!i)~xC zXMbIs3Xbo7?}ue9UkUH3+-eW^@7>p)KPr%QZs4N=T@aNV6%bmNZM~}N_CW*8&dzgZ&jgQW`Duek_nY(HcbMU~y@s05z(U(ZP zX@!UvGzJ`G-w8MmZig&%d)4)DRiFwjxOvVY3U}#9l05LY(N%}nNvfculwg;xWgVj@ zh9U5p++4eA%kX77o_p!I4o6?Yw)OWJdw!|^HK%9_U8AK<@h8bq_yqe=@SVIIeIR@k zAl_4t{re>vdA2NXdo(t3^m?n{@7C9<77ADB3+!mYK{89=E)*Ww3jZI4lRg+6YaBiP z8)AHB_g?C9>_c>=;)`x1S=HX}-o3AD(8bF@!1Io1s{QYaCoIhjt<(q=n5uF z4;J*`3y~;{55Zk+*tGlE7@or&ZIe_sO<*h;;rs-Z>dT;HbY|fO^c!=YH$ICoFMIH% zzz}9$EvT7;Z!gT!<;&mgQ-3T!k%-O!UcS1=_{Q)(<$HPAa@;047xe#OzYzo@VoK1B z)1ULXd*(j>V|?{ww|aH`7_cFf>)W&Cqn0SYbHj#nS}km1CU0)cGK%EhmLbA0h{|MpmYm&zv2@v^2U@U1(q zO_W-j0ZKfE!%^xGG-VtzeK8C7Yh*9uLr-1?>O}XO8+EkdCUQhUaJUHrUV8?sAZQ*x zF53Fm!H8w^L>D)g`2vXuemxRpnV^V_{SRPg=rd-Q%qR`L-Nfc%}U;_ z*FTQPaa={n3|`D|X{6bsn0w4dDU)clObkwvXFZQ2(F@Euycl;5U6MU4`t`xP3A6}Y zhVa=+VwIw=S{akcwr3>E)EwdrMvoEznB+~2BIhn8Qbv+8tMxIU(`QdDe)(AnX0kLAyXfv42tJDD`dZl;xVW(11JR26!BZPWx%HdrVX4pbEQ4F zFmu5O9MY+hfRl&fi1|}=IL_DP^t_(1gLjyP@d+=_&^LbC9)y<71@ z-}_eC<8RGL@!K&*3~)blz)vw`5QQwkZtX|59}Vths+RIK!M)>_JKaj0n%RU zGpXvaYE}YXk z1+3uvs$lq6Kl$nOpRsI0#WFHpQ3F zIG776jj@jh17t7R=C*`W49qAr!^@i^emW=1lOx~YsxMz{33{?S%ZzRNkYj6-d>AOk)I4HrG>Hx(A+q* zTJPOp0zQ;H*GzUsOb|}2Nl8YFvL*4dJ$rVJ;j1OiI+?{OT@-m4lz8^P{n!7`u3MKf zdt~w7{I~yYO30SQxl?B*3*=FF@;Il{-+%l`fxb%Qy_(3 z->k6qjlyEnI8JOM*rK^Gc=74+GlSc*(&nJ+6mDlXeCxe;3$A_ISPpF3yEu8`>)|^W zULFo>Uj(NeENpIf?-vAX9~elziMHTJyg!&ZXc>HO=kVbKwQVW_nn+hPq4>n?S$Eh| z4oZi{FgVGv!MWg}w=*EXEKU^Tn>S+zU6}`bv`E>&))__&Z+@6u_P1xcHWnT)Nq&c8 zKEwMMG%rLenv!fQ*)Y$;lV?kW>N@`|zlYC;)1Q-mz+rEEN_GFuIIr|U_rPns6fL{Y zGi29oOE;lhl6`rU+E=u(+$*$gwdQMG#tX%}|NpPwjl%#PH75PSBPOG~Wg)o!bi>?B zmf>AqqrG)8Sl*A1a4gDBH8sYdfnJ0!>U|mO_Rnn0Lm(dC#1qiIfArI3CayhbZ}VBo zMjIa9aAOFC6MBxim*bCv)fi;!7&B-+oW|RyTA(qRX@rOUc@cm0i8Z&{X?bOBsst?* zTnFaOpHXF;3AiAfnLVzveD8+YhVZM)mN%#0f(M;z_F&-CC&As-)*2$_fT1776Z@N9r25PJr$xXUmCoOwvS1J#YQ3&_wH{H8N3X-XEgipxp7 z?CqoPOftt8$8v0dMF#O~aKN;`>=n4EVhBICX6xc`VbE)_xGO$_wq0weH$~Os4Sa4iGIf zydQc$+=oM6oDrN*a9(wzA7}gs1a6tuUROqTv3;K2=WvjLUv=TN%Zoi78VQNDEZLO4 zd+WxP38-B@cXswmOBJ*$Q|);Nw{e`%8D+f*czB@?FavK6%7?w#knZ)1U;kvBi*WF& zmSLl327q`P#~AuqAI>fvSn2jTNjaqPe{{z|F2Q8)fg`npIarV24yZ4~lXI2+gomwe zO|K;jJomNW`Q<0STs*2$Np|_>0_yf^Wp>qx&p%!K#b5l{z+=z8w-@hx_`Oz@lP0}@ zL;9co>HlJJ^4KSfXJ3Dv^ZfU6v{VsWzwg^+w34Nq+A5M5m=kG5bho;I1&7VqQzyo*oIr3_qZx?`uGxLy@Z!)r z?=Qai^)G@=2U)h?^p(?Rs*Fq5xR9esHmM%L94_?W%{ejQDY~-HWTU-W+w<_vX-yEW z4{Wm=;1QkS{r7*bB79k$_i9OcYMgQ--@6~}lCKifv|!k_b=%_T+k4a7aspq?{vUlo zMs-!M;V}9lDO7oa(QWt~KfpuK+LrX;v3H^Y^kjU2Fck;8_wJv)bYQEh6?;uyp)*c) zaGdsj9p+B&SW_Z_=cz{ByLVr@z5^MnM$I159&g>IaHG9^5(5s==?+VoDHxMY8SzJt4X_H@p#V=t^yLWalbnD#p0j+SO0wMz3bU#Uw-lF zV&B2T$*nSV!wXsE>#`drleT$C%A_}dFGnPNC%&ETLs;c9HBYYhFH5hs>-3Wid zua;d0YSof%`^K|yv3mqjoP$RWbDXQ76>tVC{vNQ@-f#4%KI7~0Y!%d;<=Z=0bAox% zpJ1UjWcb*)K*L1=IQ|s64%_r$6?oQ(PG>!hr^O|`SrFlsF8ujtzncWK>AKMK1p3f} zwmB+9@J|kVGG8Jb`nxGXE<0Y!G(kxLHZ)7#f$3B*1fL7Fl9PPLKIY%x_apn^+1BU` z4sl@9H?74u^h3G=d_W75Bhe<`2V7=HO#rVw@x5N<{5tS~UH(RU@ZkP(@9@+~u4~S8 z3V|1RQDRmo*+@i_{H*^Ige!W;_^?5-pJhgQ??+4LL|}-_#cZiIv;lg9 zS^Ps5xknjyF7(V^rr?HNW2iNsWs4SHKsV#|y=!K^%j=d8aevofWU%z*7{**a#+|;4 z&CY_%$MdYnt$jc9Yg>KbI@ixU=ZY?Bp6)UDa?p8W40H2ip0C#60K%s!6z6~Uo5jxk z_40`UoIZJF@qhV`ziWeXGDf_jHuvu5U=2~O6eX1z&cGlH^&FIGmh@Px!(3wEoXIi> zC_9W12iY**86iqaDHenc2Bmy)gldQNy_uuv?YH+YPM$ou_~2Xbopm28002M$NklfW}P>R)PL`L-x)>ltK-LuKGnRx=gam)yfP*zU@}&hiYo(3AGNZM-z?iS z1Z0>$C<@BSMIkdEf{pU(H`o&TPjdKUB%E$w#TkJiX{F(BP_##svDK~30W81$;>*Q5 zWsR@RLH$)(hBC+@K1T|Nmc7thOxDQy7>j<(a8Aj$l1wEnkX@fzjOD<-J)&WIIj^-8Ar-oos2Hhq$jPlagKkJvGAgGIb06<>?gn=8KU$c(RNTf+IZOh*}>rc{8|dD^f>TI9w5PC8#)&SN53_ zDGs_TW#CZ2I9c_wmvMP31&A~FW!b;WSp7+J70W`nIij?eXH>gNGLX_=kV6 z_?Q3Vzge6uQ;V|nfBZlE@0-V?#ee$qKVN+E*(bwrHz%+iV8!sHkgrbx%s;mH@PiK) zXU}ygO9~DKM;V0QR|}}vL*WIE2IB;Rmf>R%R$R z&wu7{qxK2sVHEx3Z+PTo?p!9-ZqS#p*dM$J{%@%?1!J_!Nn}n_V&65Gys}M>`6b_3(gFl;l;+X? zMl0a*?3sWL!JO+NoSh7>-s42@_F1hODEvncy;J7U=}|Bk+K&@tbD%{qRHnJ2?9LtA zw^~tm(i1uA66VqLsWa#5;l3-JR))19L6@?Ya`!A}6qwpOM%Y{X_l;xV)6c&Sk(FOr zTZVapmSQaWt+mt9qlZ&YPmcg}3bjCtL(l9#MUaC_77Ph;^326rs%)F!%8l#suXx{` zaPVe7_%QgQQvtEb$Sd;|oRzW5T3?k>#1O!TychxCAGRB*U-N{b)10CEx_M4R86Al0j}G7pKnUZJ$&duJpm7Ni0|c* z0k=BW;97FydX93};JbR)39RhewrTN4e^5)3f>E;GfBVayEiPZUT$RZ;RePLVe14)J zSYvOl$LNNB7%86}J3dD0+c}~c5_-j-E{Mj7@5Fi4C=@_^N_iebOMl%bH5!IvlpH|> z__4DLJ-|G6zD%bS*bl$;-joo2^7*IH{gt*Uj0lGKuq;%tqEMj)$_^vo!}s5dunPdR zckg5zsEBzMu3yL@(&p52b!ST2&Be*Djwe405cT~iWzpNl0;CkR%NcjyB*P{U5p8-9 z!`Ajwa5)ek)M7=&&nTkNF#N}J{cSH-g!`1RXJvl&*#F6^?+XrVAF-mWQaIr?dRO~P zXBsCDeCXfODG4_O2ek7@3G5kg5FJy3U(b*VJ!Wo@beZ|v-vyQFAmF^r?y!5k3;)59 zvzWePeV#jpY0s3&^SC+6!k)Fj>E(ZeuU#^Z_T|Usl|fSF+4thvcZ2nv4*Y&v;N`BW zg0AJNodZ`9N-x2)VWK@-wuvdg>y_yOufLw2(|)1_JRfgEBmOy9dASGu)pwkJzQ@1G zvT`X+Y;-BkHM&U`EqQF?bNthZhF?aPzO!A7ALFK`P4Es(rj<)DvPL`_ujR;II1SG`IcLB%YdU9uu8Y5EMPcr) z!7DjK1ll-CK-hKQudnFn*8Smg=j7nC*7fE}=W0}zpMGq<^i{gc`t$)h&xQ;fXJ=gL z`J6foHT#5)=u@&f*QOJ58vVKe;EuA~4!(V8bTD+zpxmAQZH-!m*}tO)505~t` zMb(8WLHF)jwhy3}+jDYSkM2)C{j`3`2Xag=ow|Yn=mjjvL9q4X5HB{&qnrfylb6O2 z1Yh<@Er8QnWKKJ$=N12nn#Pg-_GDLI=$hq0VN01O@nY@bVwHWHbAoQl$bH({j(_o4 zvZod|efJ=v_e%QSI2ne{)A#B4y*MvChavmwFfDqu_8Ode9P{+VE7#Mxts|XyvUjr= zR@GwVS$M*jp9GXX)8asoN-x~kll!ygWG5LfqZ)5NSJw47e&ZJ$N7u7~cJ1oyh@5k( z=gj*;4p}<4c2}GYo{b-ImaKh~Q?`D@jWr2LQ*qk!*d@s{jwm=Qi_ShU=2heh;zwnR zgJrI(a=e#w=32exIR)32Rf}h9M?<62X2RZ#17h@~m-a(28|U26R#kn;+Z8V!FTVKl zvm9yl?~ad8aJ@0$=f=jR_h^TuclGM@uATe#rHa*BBj?eXlV6WMayOmYq3HY43#@I= zTZgK~s#R6+nR4^syt4I3Z~*HICqAEcL7UQPIV2`1kR$m&{rmsd;@7|X_2Qimez5q% zKlsx!K0jX^N#tCl4jSCzyuRR%el@yR!u2S+^I6^LzmqkC-Gtl~9WJla|(X{!0b zQN5~H#fOg^{b1-;@9A&SWyc;5F3uBK1_H^~=Kdg0(v{7f~)Wx7*x$3e6XZpW*P z@#ZRaN(nQ;9d59<&z|}+ySTL`{{@rm6(`~K>z8vb;mg_O;W7UJy#P*Mil00Wrkgg> zJ!HGLm+_TpnK%@(OoE_~Ne09(;mNvqE4fSd+Y>5v38rwBA7j`4gG*;~bHpD7C&uxe{0Gmg!MW|2>o1&N1`siD74x1WNwujB zo@^|WKj{;R1k(bzXa!HjU-99K1wz2`-FM$F$a*2(MYcSwec0jb`pe0I+VEuiox5?h z%ICi;k>ss$c=L_y*tWIGpzF~R2X~I}cnuo5*8K2(355L_AKf#RKmq^)T;#TA^6Naz zzTlj^7d+p2_gjmd`MluL%4mC}INI!oO-Q#AFyQ>%y?=iPvGP~xD$pUV@rbi$wGd`M z2HW<{zQYf81p8{|uAJxT{5L0%RDdqNKo(r6Y6g5BS2ZDPy)ez-zyxREL-3rlHJa~K zALDmjhxhXZ+`8G>9Zgdg0DJf}e+t`i&OD0GJ{s{_MAd`zzNJ@XAg!KgqU|4EbQt%evr*UhbqD zv0)$H?QrFMSbE;CN~Uj2KCXG~=;A>#`6qw#BZ46fN z1A4RMMX<*cR2PF5+`@l0RAB?Bgh%jhd}8(}Skw3U5a98nmo&tmBxvbbp8qVnLbVNt z`gj;9q;`mc^~%xvN=fzLF|1tD_l&E#CaM`nc8f>p%gpL1-cGx4UdV%DX%q%Tf3tPE zWRjcw?wc9!rExmz z6EZ<#IiFYeYZ+rCf6udeI}hoDnL}%RxlEty!@kT!bY1s!&GQKQ>~DXyIB~sRZy~_7 zyU!N?=l}4V#j(o)JqEADaLO0DR)&}c{u<`%m9%gFfhB`9KmTTYCE)ru=9um}rPs29 zAB_;^L|E?0wU80xat&s2Swu8|-?&kBVM@oL{kwBSo?omkBaowDZwIjLDASq)6H=wM zCETwiwCbM~GGl0ia0v`0wG2DPAO|pl5JlqjM6?bJ`@hl;HLE*DVemKCt+9C&hpES5U| z%J{(O5bcA91rgvyc*H=G<)V!RMaKa{mxA{<5iUF+JfO?$P3u}83SfK|-HfFQTLZG-ZD#}XD_!CJ!U*!5{KmXf_?lJP;IsDZ^317VRR63s8*0CBuE^`A zQRIK~$!F1M%47TYk`c_9!))O0k}}80d0uGN+#Y0{aYV@=^&Gb@uWKvAAEll?@v^Vu zTuq=7?wnLh2Mcb=d4el0+0Bd-ZE(imH{%Q`qq~b{YQrPC-Z<9!B09h)PM$m-T;7=M zu{XNbcbp_}f?-QJzJFTZE(QgskJIg!itP3j;Pkl^^IJJ9@8krnf})HI{n{C0lkwT% zpAJh|y{gKRlx!tUPIq@88=RqpC_^+aCCtkB^lGIf3WnfOvbL3lKke*;)iNW3n>I4{ z?iLMRQRaB>`LZ2Gbwif_HxA$}n>j&_=Ip&}!6~avnSKzQrCzPpWMx=H^9L7oQu8X(TYT&L6 zfMkk3`s9Z;HAKH3>a<`@wtP8U;q*QBJP+ zj~uFWbbCtu>j3zH-z($*mJ#eKmXj-S7Nf&+5RlzxkU=YCEW@3{ncvv{Z_p!7qyM zv}U511Xad7Ll0efE%#HPm&2!h2tMd~zL?Ew^XMG?;+T{L#2}(u_<0r&n$|jvB?Abr zl6_)cX!3DJ^XNtM?1nNN1aW=yQ0Dvm-ja5gcq9XDC6IHLLQC;pI(hq^ zFzP~lo(y!GpIFZDFEk;z_M-Wasm6wkOXsh3x;8b@TVFNa=8Wh% zM)z6Q@SEPx{QM@j!drOEfjI%H9N^F6v4i-*WI4I!87{jI&4J74V9k-k`}G1FZzlVm z6pR_Ccyk$g42Mi+d3{5+R>31M!ShtDJ&YGjPzp4R-(I-4w~e(lV(nWGp8Z!fMMnYO zrPDizgIo9Db%@X*Zj%6z9%Ii`85q%hbOU(QY-S!{KKnOoT8`Dj;v2Ag0PD}e9S`E1 zANd?SWmv0xFn+_#oCYr8+cHZ&`fiOc9>L7u*Hw4|c)%g+lnFMUhyOCr$SG^pKFVvd zEO2f0Oaxtol)JN`{?)qyEq2vjcf& zZyQmGIK-MNjRIL~c7SX2(R?{30E7+(E_k>-mF=TNp#V1B%e?SH{F>Y`eHS6%yr)5AE^ovfu`tsP~{hWLPv{&jg&0(@S{!gyR5O%i8!}J6P{W(0i zWyKrIij2oxNbllsk^SWm_e+<)%t^Rsu`Rt3JAgB){I(T{e!VO=uyhs-gZE5MaFr;_ z`J%%dCCShYwcHc5wQuxpJjd@dXU{E89Q!O@n&Am=?bEEA4it|_*KXN8uLIt)0j`ya zPFG@tU#V0+uuL+61L*H(;Bvf^0boU6e$FzE^!m&Wc>-Uy4aN+#fu?x0t-9B?S23?yoLYo{dEjT8} zIdiqc+RKEq-*j{O1{@-b{#XCvzwbcBtBarf^?#0rS1u0j-9I>GK4;FJ9z3B-$$r~i z!pV`t?+ok(XjUg5Ri?rJ2_Cc; zRgO9iP(fQ6wDA0PHWY_zqG|=6l*Wh$%gy?;caJ{zY=uY9RX5})>Us3ydwu_9&)l{5?b#=Ea`)Ed#lQKN z|JNKAcNc%~$N%->2S55}i~GUoV2;?NRJ4|}P(o6z2Y&XGzZ%_a$NodLB6)jp^L%F$ z<)qtp;9%F{j^Rnd$NJE~!NZ4VZw2@^q<8M_zK4yaC4r!iHY@bT%>~psQC6-hJGjp| zGaM-T@h^VT0e_#&T;Yl~hVYl;buvqPs%pGj^(atw2%Q*qHKmKR`d>obJ&GdoKiRVz((`QZ&`~?A> zYecg!CwPfJs!Yc79m0G1^r>KyzL7r6=63Ds@Mv|;ESa+cGuSUaYmb`F@H*M^=v!90 z#2`HNK-80;Q+oy;Ca9$UzGT47 z?X%}S%2%-_nZm(0yv96+PUu`oivS_J#Ja3ubhh}|qws)35~p>(+$2XeCol$cxTGx; zUD_ky$(Qr$aA&fLZD*bIRIObvT{v3?@!7#+_TcBA|GK}=m8{gE zjs=<57AX7Z_r6zajx$wdU5an2+Rj!pcH@y#>|KdDSt#RFe_C7D)%iH=k3Bo^_Tl_K zo1?YU17EP0e2C|hopd-sZ~6Nc1E}C zIrs{H;PZIF$ormo`mHcRJMN79MAdSHVZLe=AvdKR<%N zgUX|)>z_}+-`NF|%#)tUp^r}3Ty*;<@!HMtim@A82VEN9q(gmE(3O6P{|k)c&1jV0 z0Bye>4n11;&G?eWRugz{f6y9V9DI0{FhUyXzM#F(j*_W7`)2$J?~%>!Fi}-Cqan*I zc_9y{7Y`-f@u#(X$~+PUSNUfqeq4#xHQOv>1VlVPo+5oi9r_aUpX z|9m|T|GuA#`-NEM`SZTN{ndM`f(U-e2n7#)E{)^EZkuw}Ej#ut-r4c`;;WpK|MJg& zy!h%$N`4zADk|HBla|A9rqlmc)|+f&jA;5cHW&n>v_n#c5QD%YCZn!(-w!z)EV8c> z6NIPr1c64$L=2O?)!t1eTA2m^;=lP{&a-4#%l1<$>e+WQI4FHi1f~><);y^yy)KI_4Of8ihhKL;5cG$-uj|-71543WPNe8U20BCL$Y=QDMZXS^BY61 z`<716SX)Fv8GlkVl0uvxbPRLCyJpRU0IEL_8y6NUH?FG<&FaO)QBV?sMHQ{ps_q6ac!l|KMw%OZ!3=Jv5JNeJ(t{i` zyLN9|?8uP&XMggeF@TSqIGckjr)AD83a-A24lkmlI(P*W!8EiTAsnvPFZ%}{9h|-7 zj6>KM0D9pZ<9s`E=un?onN=^_Pl)(($~x5H;hnOJ6Q-EHXgf#$+JXTDw%7U>%V<0X z1^=#PY%)?Qlec0Zo(CTZ5k>{Y;ydqsu=wqlU(|Nx_~Lh;e>M!5;$UVk320y=a<{ci zRz}0&g9m5-M1-$gQ6<2beNtPTvom$dJ-l4K%0W-{RIbN1L81p9Aa9U~LwTy&WXS`~@ZvPn9Jbs+Ulr9SW)9zy+IIMP&Ab)Wg69``Y5YEP% z;8VQV{d7R^ z+14rnjvqhP+&eTW1A0}CI4zyF#YZW0x&5(% zd`@vR`odwI-RGq;<0UH?eDK+{Vd;52cJB3=aYlB#@8vU9>^uoR=t&TUqAPomvL~Yk z5(#8`t}{hchTuP+AOB)Tgipru0}n@+=smjhyuO$@z34g1mpLtYosvn}x-yexhuFu< zRd6k{hcYm7aGp4Qa&ar&Vd>~y(m%aJ+5WUN36IR#G?dbt2gQGaUoUBOV2Ci#MHgx@ zH#7jQ{cA>;n*aKaoH16g@4IK|mG%wpa&CAH{|B3%sdQP^Y1B5x1>FlcCNS0WXWq@% zo_dXu6OAnQO!hY;KPPb44&QIXv~XGKSIrenJ+uFF7*F5L*&E*pvcioZJ-D)5AG{nz z-I{xu!xE1g$F;3xMbBH}p08Xdq)E2Ce0A?s1!RC+dyk6nS@_9=XP@o@(5=`)uW zSC3weca_XgR+LIgW!nxnzgMO*c&ftS6tudFL=$&`V zZms8W#y;EtWR;Vu`pCo2j-O~x1oG~;?r5+i8m$pI!Iw9@mdsi|!8Uq`y^#H;Zzg?T zaEShd<~Ec)>H+pgf&skoA3iNPKr1W(z|pI_2j1C>N!-Z!P2Z5{V2+++OrNRBA@j(H zyE%F6m7Qj2oA+C99ZDwDTe>}(b9#c0Jp)&~1-;#_wHjGVxAa_re^p;2-&$5d+^|qDi+JWOwYNVp)Tkuu)=Y|)?jm>fo(ZW?*$9=!U6U0QDy1D1TH97CpTpA z>HCW}+V`nu>Nj4zt3x|Eg=B_~9+4h*<=lzI(|cM_MrU-2g3c3kioUGN;eGT&_+$?S zDLw!7cs+dxY_tcup9Av57r(=>Ua1h{oTU%x{ku1(!`XAECzDSrAnkgFS47X~QpMQy zoTa-;etGZPKUi$txi|c{TPFCohX=vCRmpahE}Skes-pah|M&l6S|rd>Il9i&r~9|R z{OMxzj{S>2{jdLM@n-s&-muq8irEp3T+AW7Df~Zm;*0Rw0p;7qf%4s>@5iIk@otu= z)f(BmYzVsZhU_ov#fvtU#d@_CXza}m@f1$FOC@By%3h`0;)zpD-FT`)R3K^vbM4%h zIp@BP=2eHtyp`pdjTgK)^ynkbCeUi=divzL>{EL7wjBiw+u!x+tzUfh$4(~g)@j=&Y6i)2PLJsC=f^*){*Nvn zLzu%Do2g1#Je_|=7OVqW={sl~pM2H;V|$M7AW2YXf%r?hl08J5DkYrHupyiKeuuDg zI^(?qo9L0=p_Pa#mEF-aOoMBc(H^l>A* z`-6|ZGh>rW^gDVReP|rL?a`XxWqdq%eWl|kKmXmFb)@}~YV=K2EjWzwZanL5vh#dA ze#f@m`Dpg0A6%LWbCu3ot)OB1PPT*hvMot6BKSVizA5~C+BNWB-|4;k4}=3H3(fl4 z^S+x(gXYQ?!atz3=+dVNTIn&(_m$s*Y$B(?VRWbVX96y*f7J?yP}X-ox&UbS4hC!h|KBHL1&+_YpSK0~D0R;QVMluiG zzytDCTN(Z~a`#?IaD0_5YHURO@X?d|i?#jz+M7$=!}($=Vem40!AE2qiAn3@7vTQ- zS{44PZws_V+e@3up0Mxoc?M4vO6(FzX`8lh8@Vx&qn8PygqS$}^1_Y9|NTE7UwnQ3R!GvJ zWg!D+BnN=r-mfB0lxIv8(ovu(ETfbn%06dIK(19UV)BS22FobAeEn*IPX=ERy^!3# zKu%MdeTewIj&?ZVfdl(;Tz;L?=;mU52tXLv55iI@kDf%BChm%dLpBJ z#(PK+0Tc2;L{`hZySzqt6lA6Q);J2>G766k#Y`=j`sLp-_d|8@TZgk>w z!UrRjee#RneX{uU%VV>L1OO%eVu!aleBkYUmDNTpnCHDpUx8~=L@QYEyp{+KoDh6a zPK(H8aD#hO?vZ)`*B35F96>aPOT;`M$57kqU|L4WHyvUSgJC=o&Y18QwVgPS1L}YF zCx0}|Jp>)yMe#_rJCHi@*Ns+6XK&*&IlA z@0&McAZzE4C(fQT7do)A=fcHdls(g0y{3QIwW|Y%4H5F;v-9tIba=L`uqLG|3Cp$Iy%(-(fOKUQc%B)fz>xAVq7tW6} zcHhoUSl+v5N{AQ$kQfe_9|7bU{V#@?5gr@~R`da1#}EiU9FpS@=vu;4Kep>Pp3KS3 z)^oF;=0?q8@PqaWH{Ti(P#R)r4OBtT<(V1`@WxD3-;fW!#mlf!$+4_G!+wqlkj4BEf~9(E&x|JG4BRb z@)54RRsePROiE_YaZU6$j)W$^vr_fH{rS&kFW~0{`g#V1AS#==zcB}ewYH|kp^eWi zd1TYEP}AzSgX?7SP|z7B4*ey+1<9|+GaUrYF}ze(GyDob&=JNw0&I`9w)7!n*B^b< z`_^Rc6jZNq68DW@q}Mpmf`@WhlZ5-=I0j7cnK?H$g?1*YynbNa_?Tz+f+HC?0b+1A zpJ4FnbHT;5?HS*=_#wLJs-ZtT#~l0L?1f(IuZi3bfZ{nEvAws~kJG~AbcVhg*%Vz4&duKqQ})wR7J2#-1~ZfXw8kZFn$MEwSfaUU6$77q&xt#I zI`M9>>l%lkLultEJTB9D0(|{#Ztyj5^mRNx+ThIctmSwn)*gX1e4OX@QU4y~L>YYX zj9}=XO6`-T?{qM>S7t0r>}~;dNe+`R5Km{6&=KdeBATElTU(av(ZlJd`*v5+_(e3I z4jMe^Wg}~om-qnrYdsQ67ApklAC?`qCFjhx&i2`yPIs;GN~lP_G z0lzL7U4dSvx(y#BHzW)!WjV)6eXZ|Qxz%lBEJoX$ie%@qjevk>*=((oL3Z-g*};bu z;oFnwe%i#eC8{DrsOX054t@cDpL zswMOrT~%S36B(c67?9y?Y_hzq9Gp-(y`juytp&Gl-C3KiozpUb(`{YPC0Dnn^NgMx zA4F4(ZVoB|1hT~gIlMT!^ytM6&~-lX*(oVC4Vs``=mo z>aYJp&ajh{jjD`WAY_7I&3$|0a>RnC;A?1{Q+RoBxcO*XfquwX!9~uL)b4p_F)bSnVg(hbd!}i zBH=GS4gc8!vT%LJv7<8g;<+>a&+ckF#T$KuYR!Ht#P%S@dtS#l}7kip!97k~d(|94x{W@+bf^0T&Y!l-*rNZx7$Na_P+C zjcYkpg7wdS_SfOPWRrr#@rvsuI>lUDL-HuzAW;EPf#V$L4Hjj|ziQLr@K^~6K?spd-eES~-xB8&hga5|$oZ~S;ymV`hM%?yB zG%g7CqQoaQm*<+tH}T-j(Vf2IyLLS+G|w6G)jh zV#&&l+jb57&;mPI*0FXbc)oqN=2yC}BnrHL&h>~E*e8;|tet!Y3%n3KIqd_{d@g%i zbphJ3Uu0Ndo!_cA@PxPDIy}#mJ-;@dPtMW<(YW=UuDaH~I9PRB?uD25_8btqDJNOP zKzh?jIF#w-GFI_V&!bP=uD>!_&n9)i;hx$HaisH6S@XjV5?Ghs-bG`NjpH_WUr#4i z@wdHJUVD>Ed=rm5E2`~WT)c25TB(gLx!n2(-;yJoqW7?3!CL*yB}j zSrElOSuY)e-e`}8Kjjb?EKu#CMaZ(R^@`-@(kaX#sPzv!m>hTLD0mo4HZ?wHU+5fy zf|vA-R`pfDNn5tF_4VHw4Bq?T!@+fXO=AXU@=>Ll`J*2=y>6XDtuI6?KV4kPxyp9@ z!4LmQnaH(ry?J#il)kDmL?G+R?noL%sdM z#9nKKLJx(L_v1qo5GSa@Rlm%AU4c=DIj?NI9VN1|!L+fxaQ@85o>gl%wEwCHmU=3^ zjQ)g1GbZPnCr|P%gab<*IdFy(Ps6V-&s8}S-`<*zzCGH5dvvZuW={E%4f>2;E->=t zr@xyv#fOg^U3^&rf+WYWS;ArXrZa%97Rzc6GF~E|;B83;&9Ple_T{TwnU5?S-MV4- z;s@XT-r~>y>aWAkm2(K}7xC5OUwqmM&||P$BOsFxE?K;H|G{*Sx5oawR&d~H{0C1D zHO9}gJ73Ptk|cg|;&}Wee$g1<=b0_5!?&~V3)W_tkIaHY0y=blGD9VRUvdObwX5WJ&`-6VVhsg)ZN5XdAmd{NvjNXbT3F5pl#z4R_Ylo|iHgCC{ z<_)?0Un+8lGJgj|faHMoqCC&%R^||exBV0W9<5DuCXTfbba#~ z!)d-Vh7sJKQo6jP@vWl^`eFFE*Oi_t&cM0yuYdma;@|y!2Og%(zeX8Maal5o2rg#C z8LkDvhJ=RouPnkWqRPRzmNP7( z>y&uHW$AEP6N0^a@a?8wdx;zigqZiTJ}8oUO%rz87&d3lg!r!_*_!w-BU&8$vfnsi zY^zo;4}z0DJ6WXe!vbdavm;OrqQ%98u6?DK#0gC7(;sK+KDOX7xun5oie@D_cf&~C5q z<^H_~7AIDofvJm~wb>bA6aG5rLa)7D85tY94(?s5wCQpNtc)Pc6vI{8wq;t9I3VfP zTqlyvNWWa~M<=3->dS_>e>Wx<58<5Tz+Aaews6kAaPwM1c;n^k<4n6)f5`I}PcOck z()o3Yy?&q+fy-C!Pd35U0u>t=05Q%zJBsd=eGcD_{rvA1zx?>u!zA$tJYmUlqNN0j z{dj~<>tPC>XwRPOVT$LC^P#zcCy-K_7^0$$=CC$r5{Hn$%f8BX8F!q>+sk4&bolVh z(Ylm?UARywXS6cT@aCe&;@8(s43F!P!@x23CJ}8cGO%}ajm!xLL_|kr#4DMkEzIEDteQGgqjE55cX37r-@9pL&fJCU8FWjZv2?*h1-gna|EAu&p zECQ6}OX-un%@KCb7~%Xl?}opn*f+n|nm+^aadUp$fj^WcFp=qxjzRU>jf;z$-GkrZ zHTx5iGMydb!_c`u#=DchSJyx9NQTQ-84Rc@xWzmhx);DMBx{w_(#r`qouTlWxMR0swyz6FJKl-$fb0z1Gwgrsnjb->x|H>R8cgn_> zeW%oMTaM@5wQIU~?p&WKBQEmZ_hiB90;o(xouqd62WKJ-lrCMEn$!Ge|} zIQjGFPmkKeM~^Ii{fl3<-|>3*)q)(tCEIX)^5u&!j?ME9y?wC#zdr}=QA%&b@6Q)t zy_WoP0{WhMtZz*Q8T-P;S|61G^1=Hb4&L9oeSh(1$Nn-tTP9AxqdQ#A`K5Q?HxBs= zrs#b|83+2Ha|UfGGsS^Y=gyy-%<%^gbEa3S_xO2EnSzT9*R`z~T`uXCd|%38C6=$` zCmOTguERs-)xK~nd+Eb(-^U0HHx5zb-n;K4FHViz_3g@>T{5m^XD%I;3@-R8 zTPeg1h_9_^{WFi**6_RFYVe(w0LfSx9!+H&%A9)emIKh$UzA}>9?%OYtnvZ{*H)D! z1Qw5Gu3ahcLm};%u47;t6fIa|7x%k$5AJ}w{cQzc-Zh>-fdKEjc67o%9H%r|Yz)uv zbmPqRjWtOT{&ruF@%%AJz_a-+>9YN6@9>YVoOR6}2OG~CCnD#h(dj;|n?ufAr7)=Q%9`oZ)!@n&&QqElb=d zyL#lymo+b8YwpUU>B2B|UG*z%jejp_cPk5%}-^(_bzA*Z=&#oW1bBWv_HH1l*UIh~G~) z`nV(s6>ySG?5VZrpZolI6;V~ha1t%EDz(k%{_b57AKsLMfSz(M{cC;3u1t2kby?{K zfBGRC`o|d5wvSGYPYATCW>ooLE-E28FlEFEd@|ymKDoDe|DEL-K@v->{j+4@-JVD9 zMsM(xEt9R&^MCU%{<{vfzqR<{b5(+^ zWAx?>+w|>mVkYy*XY;{p^kAMWv2+CZ?7SXMEd5WHx_eH6NC}+Yu04l#SI$p?ixMrg zt|XfT+|PX7IT@e)E(e4b12_8n#NynkuM4_XuHCw>T)i@mCA7v7!sut$*t_e@GPrAz&IE^bYat=9 z#AXt}xRhQY^Ax^ok9pzz+0NY9HBNffmv^g*dRFi7hv|`fb4-x&$B%!pc<(z^N;ZEo zeaD{t!8Upc$9s5vzRQ{`(DB=!{`unWjVp_{-uWOHzOgv|>2GrU-_EK0hv^A2)KwEd zo~qijU!7TOPrjZzf3oR6FFDWLvG%-mWSn2@%ayANl-FjBBSrsqfrY#Gu0?0bq#SAJj2u%*x3cp=KK;$#PEgD~ zCcuYhfe{^w-VO&O7U4}Vo;wgO8!f#@X6}w)^Wk^DzqoSxi^UJ_{%C2Jqyv7P6LoXJ zn3Jap!UPlc7RN78c|QA66`HK*Yh_aJDd2D|9di!o%%MLrHCcl9lW$u4s0g6PnLFM) zGA$gyli`LRjym}5Ob2*Rm!+S^v<9zbRddj4{WEQ)+Mhewv06gDb@1r4`Z)IaFG-L1 z#Kox^#e;V3IXJZb`6s`gXZ!4CIM0FRIp)O?%Z`B)4rJ#TrkB#+4;?x(&NJU@V+oc5 zFOxld?dsqnc}7-!lN_M)I1rUiLvO{4N~<2*f+I_N4f0ZT2)Q@9NKQZeN)m?*P<$Xz z4KHPr2=sv^yMX>K!7I?EC*~7~Z^*Jfck=6Sei^*dPU?PoB7Vy@rSIYZ=c1Kk$8zL# zFPuI4?gxtxJ9PHy)q2YZcflr!K-yiPeeij+XVLu}giD@sB;(a=5ETb|_Z)y2v&Wph zT70yqslbDmEoHLD;oZ)?`=@fm{N-~uG%TA`b+CM%j4)a&?gB+Hz%zyOYewEA7ul+lCo%zHV+uZJ^yeEc!X_9<0p1V=VMC%U8nz~lI}mw5lu35@OwQiXpv8t>|*3mpi1 zu;*yG)Zy6KQ?#rf{lT9MPlU7Dwq3k_s_)XbHZ<768%g+gq=tN{E1QE>E z%2d@peE{7E1Z^v5$|1fkJM(T;566!k8=0djNOFdxr}K@)XSJq1pda22KUMwg+kbGJ z;b5Xl>}oB`#-8bkH`*I(xp?vXR5Z||dgS0GJ|KBm;D-Nz?ZfYg2TwboSyvGxmGVPKdX8COt@Uo#3sW>Th5;5RcaF z_iGFbGyR{w+BeK;H2?rW07*naRCvZIq75thrY)hnzq}99&EHMVz2i83dH*=CUFZI} z!5_20%yFnZ5fot zBT9zIIJi@M2~UwH3~937yWfehA~KxrvcBJZmJt>+IaAc}8AoaBe%gudQL76eUlH?cH5@ZA=1#k>SHgQJOlD>+Z+g&6_}>w9A5O z2WKqIP^4Nwgn`e|z*(SrfKv0Iv0i`uSQRiV~Qi*v;vPx$$zw>&!Q)e3Xxw6VBqfl>@6a06|YB?Fc)sr%q+)f}ee(v147CuCqt?zMjn97xK3Lnt7lYqyy@zm*({%dckiTuzYdeK`H3gOfEAwqQU(d=@@0Up|+$ z8ovv##$nK-C^nwY`vlK81Mw9yDaemfzDou`upcA3iyYvl9SFgEtNlBB7A?D%<29ZW zY_@H6!gqANt(E~t-yQnJk7VtuhPZpDgHXzx!{=3ekRP72BffJhH;t~GoSveQm zC;SPoTNw`EpSQbC)}Z%ik|TRMw?g1dRug%0?2FGvVgJ3_og8_m!(q!V=j2jBajvSB zOI3VqDzj6&0%h~pqtkbrt8#uuJEj{ zS3OwbBiC-!|1uZ?3Yp;mGfs?^@!1F8=yRST)SR_n(Pwi@~cA6AJ!$ z!4D5G=I9Ivah{16g=2j_l-ie#GW1C?d=b7iSacEYcpt8e0ZoDF84l(KYbVuz8m*Wo zC+f2lR!(Dhh(;*1R_yy%4w7-yHb;7lYbS8j-Lg`AhCc#_SR)7F>g3<*_!5VM3_e+# z@ZZlHFEXfd60K;@9S%s-(ss@A$GP7ez|raiahEUOc1?@{?*Na{^MXZ_oWBLc`}pPM z-k`z2+`4@3H%H1GFzMUjY0;u}b;0nr6!|6nF86wz%8g?WT{8hl*P3l(4WP`SzrhbY zXPz^UmwN?fWg?C0+FpzUr~QFj#(_VKRRRBT7PQ~y;sqYs^^82Sr#T>&>?&{ zdM|wGFZeW{*>^Ye(%+ptlU2IxjhW-XSi#uz8Bug$&Te}+4s@gS<>+2HFgZ!g6AULg z#ADivwW~|INDkqR=4GzNv`_dw!;UkKGtg`v$G4{6{Jby8Af8Nja+Zyr-}G8=n2Mp` z9{6%dX|Hyv45zQ7-;B1d6JV4D$6mdmHs{3k z3!NiVIlHoD@R!WNfIf5X^kjB5jiCbvs%=}{>#rkUK~86e{7kk?UxoO9B7ZzNzv-(pMO!%yY?w}3kI|%JacFI+mRaUepcpGoL=TVA6{^}$hy3fj-<_sUc{VCx9_yx{xPEAHD0ZS_>=|w^3vjywz@i_GREy=)vfevSMSgKz6|Ktze z4S&-C`nxp4Du2NGX)QCdzY3bJ?~Iu3d-gZ4vPhffw)por4cZ6JXO)`YT(4CA^WQG^ z9z0aW>xB;QK38k4^Na8Q=#P`H+L%-skdq8gev+>6EPb54YVI5_63+IeXE^#F3VX7H_uaD%G^ubMPpq3B1^vo+4R5;6u-9vYa#B95D-fEBQ(V z$j5*GQdRK+H%OgKXLNJ(9OukWB>;n9;EO6-kf^p&z?$`%8rhX zWNbf5?|rMW1i5yElMXMIo&L*T{O#Cf65=>>WTUhF*mu*GB3g%c)(1BxbF|-RecG%v zJ~_ZfWe20F*}nwOJMVrtl{DwioCqH-%%P`qC}n(bTdg)$uU;Q-{WN&U1eYyZfUqC- z1Nzs7hJ*Oxd57IDt0ut{u5OGMuP=$>cD{$P!Jd`%8cn^HAQ^oqM(I2u^VR2`JmXA- z(=%4{mr;(cWcl8!KuCf2pnDZh7)r`8RPrkI@G?M$#EC|KJfOmDPdcfbRMOAfw~@59+;Z&nFEEiuv$?JYeEU!Dx{ zaOrOL;=O2KYkXg&9^HkUb}i?ghuR~00^3)h1s{+Mpvv}MIE1e`4}or?_dWS@GY2_a zZf8k<0th#=%k@@n0l_3EYrGA=5~OCsd(h*@WsWzO%#!3qRjT3n9iQbVVcT+SKFgoO z`Ftf@SXLo$IImb-j4xileszMs8Q1Z$oK;ZZJSlA)5`8k5YlYEoJxVsE+ zt-u`IdE)qIi_Z$?NNU=?ea{3f9_0^l$mDC$jTb#5DK!CE&U$Csp^Ymgs-=Qg*>XFZ z@$;^WA4Q9Il5-d1Wov5_G?m}M{(1MH!&fgYJnT1L;P`IfRxZ){dt;k3*s(9}&l|Y96$}r-tu*dGYfCvRfv& zyC&Jn@xLOu>-uG7$Fg0WwlriBT@#I+KAoQ?9Otu9l@owMnfOS$+U|XC)pbAzjMs+m zY^!30Q(lF^w(#1#!HlyU@1aw|*{ki9L(bWwd~yyBr9(^fac+=Uh+EU%hSh$OgQH9;c%Pn8MXiVfH2xn1F=DV!EXtZCk(}Z{?4{W3Cm1 z0M}K?Jv8XMH^%u5tr}#U%Ph18{)eANhnQV5Fmj#OAjHc*=Kb8@Hl)(aeT}nhr-I!y zMg(f0`F^}Al;u8~3w=YBW=O@95HKZZdGYe0^Y=W#$0ljsJ#i5HKJ8$o_YNIeoJi2#e^`XGF*!MG+>|ag%+gVDdm`q~LWWBj5z9zV z^NcaQ=oy&Yi;!0t_M?jtahpq7Q3i|wZOsuS6LC#Wk<~?=308-e5dg|(Wa?mk&vWAF zTj%~U;F=GIJf<${%V1SvWlTo&>J2gdoZa9m`)X5TASxw7FP>i?f}yx5TXR6BGAjKb zjf2T+#?SfrpiD!;v4LW4Ic6esO4XwrMT`)R#IZr50q`Jh-rWD-qFPeAAD=^_34v~^XJb`+W^`8uN?816(xL(!Nz7( zfH|JPG0%z0023(-6=prwCeYx(ygeznmm~aPlwH3s1y#n+$+PFjL2lo=ZKD5Q<|KOO z@LQt@oWFQ8!|37|MlvMqf#)9ByK@AfRyHEANv=_ZDc!PnFbTwMIv;<2VoHn(*e_Px zuNVBbZNWIkwRPj--H$$8oND}*lq1|>Fdc@M!g#8Aar83?2+KzewDSMQ)PFtQeWixT`I##`-_)qf3a()Qk8X;*c=#T!gIf`)baP9e%!&How(ip zX{&-U<2A1xIMBgMdlx6$mwT;SCiI!Jr-Nfz%Q4EQ1ys(S$r)cj=J0EWrvKzWeDJ}P zILijMK?B=xL$;5Mi(mcx7YS{Vb6rN%xNY=S3#fQEWY!IrJ84Ex=8 z-(Kjm%(1^_ZAxS~{2+nCc>mR({N)(!TEjSS=YtP^n?j!RuIwUccs{)4n8v6%Rwy_O ze9Bu7?Ei2!*2J-g7EWal?@LLT^5*t(cS`N&UwzRvRZP?`)nP-7ccq;S4`b~uNP%7M z-&Z@A@KH7{I50{nrmr8Xg-v~f8RQ%iPZAbRmFHw#8tNIbIT_F|WlDcenOo^zBW%n~ zkZ>}(qv@hOvzDjfx@Ew3bZb8Ph&nahT3%!jk0Y_RZIQUxr8y4*0>zZal#;$~9dL{%D;e+aA8*zxSi(mBE=&b*{O5 z(>SjlI-CRPV2WUU=5AFT(_RZM-n@J}r%+|DWmSNSLz0y)l1&`F59_~r^6c4x%z|TY-c#qv%apT(VE55Bhm+$E%A|Cr@;6V8&bf zdiuht+5fdET7ySzto3M}fWOaAJLfP%_zZ{~%VQk1-*giDz9`WHPlHRVldD7LImZ31 zUC3lIgae##l)JDhndCbA(lzj{`&A|gY?F&WE86gv86RrJ^8v!;g3@M-M6 zqbp3M2br89On$+ecRCN^-{1Ug&kOerF@zm%8NdHH0V&=?pU>xGUR}AITi-v8uL~%y zT9qRWJ~zkyiTCVT{FA@>k6-DK*w z6V?xJN2q&B@8R&$fsDU!gamT}f_&(2bm`T%{fpPwujO~1k4JzfJaE4tsP_yV>mObp zzcTIw0Q+tX-}qBNHE^RgUK4#$*=RgZ3)SX#Kpb3ICXIA$*8|zyX$|4A;GuQDlv8k+ zdE=88=42huqOIi$GW5eodWyNNNq6bCoLH|m$CsumkzBs>^ycDN8KoyreOJa*j<2p= z?iJk3DU?jqFW;^Wga+6-$q0 z!HbbUy;n&JF;5)-YK&t&M72lUm!1UYZWJ(<6f-@*lWo_pUl@3;Tv29tjs;@|v2h0B zF{?MZrWRA_R-C!ohY6f>#=$)fhie5&Ze&Q0gA*-=JDgij(iy+}^bb|Xy}h_t8?dy_ z@G)B^2OlSkxijcDa2y6_*dak=OEUIuPJ4RZu98SN%~TS6`oZ6ip20~p{hX`jf~T(I zU^ty#{MxZM8h=Oq!3!EZyt6o6CiLz~`(Mr(`9pS>WRG`#`p45YgL4!fPnB=7Sx8gaBgKpO=azxRb_XF--77(a@5f+9zEJPK_uDV z_ESrYU1hdD>bu+V4#^R0gV90Tf9>%&KzHpw)VTPw1dXx=t74Uv$mzMK%1!MgpyFhT zhyV1!4cXDhKmTCyY0flykCt)!+BaGLBm$W&SidO;^*{Qnf7Y6+aJ_VH4hN*KX(7hJ z_;-KvzbtG)?xkHN_$A~}BnA0i9k(&!b@`^M~# z;XbVw@6b|}eh2`n3(z-x2zfs3(0GXw9LLVB;XI@FIEQ6hfrfqi4o)J8%Hj*(pNs}d z;E5(VVuwGrE{PHNIa(7`5un=>-l&Y2vB@rs~570 z?zFMsiQpZ041bOPT`I^zW}tT!n66(25;)k&-srA-IDGN#4eQ??-v8Mrzh4|Z_GUDd zGdTw%c|fMoV+5nfQvpG`mBc@7J>a+G9P{O56MUf43pOrRrU$pDT9>GT;u9GsXFA-< zp0Bz9AB0zzs%+X8zI#R*!|W?q8*6Rr-dbhL(`ba;<1Z0Vg6p1n<;v+CkIn)q+3=-D zC6&D~fo2HAQGVxK)wONzv-sZreXnG~wPDehjC4tq=uOKqzqhp}aCKPq)6V`;B{hi? zb|)C~3*EaD{E~t9(&c}6l%J;W*q(RW<2BjETf#X9SJRuENvA3W&fW^9WV{5uhwU-= z+aCcvuLJRF)m>-Koi4-w(~@V7x2Ag~^6idJUzs_t$p>U#@qF~KuEa@eWRnPlsZfOb zTgZuMMNp9#-;qsoy#%r=kE=@Q`Dl)w1dp{0i=>yhJy;n2wO8FX{MDEC4ezG|sC0EO zD_uaJ_DRObrjQVO^;-6CfigA~Jqx`q^~U(xlY$W8^486}hQ^kGj%DJ2_HTDphsS>f znT8I+DSP2>L3gkxo9SbxzN;EH-R@epu}pB;`VuCXHPZH&VDL*O=rbl z*s}Dnt;q@Ucw;&$UkRkMX1L3qJyhV2zG3a_k~wU|?X{)+>Wj}NDQ4fn10(0ZKYzA4 zCgW=-3h6Ez~xTC>h(Fmqs?^F>)>ua0)li;l0_0vE6dVCCj^hf`=YXya}L0ipigpoMRS5)#l9MmryYC4>j`p|vP@1A=GOXrKe8HNXedp3bI$YujQI3O$s zaQpJd-)*q3ImefucU@mUpUpgPdN`Vx&;9Pc$x@#y=fnBVM1x(QE;ne#Kt)|2{QLa* z7#6gov!35~Qggh5pKF!eUoW%w|N1XKT%5U_+(T1&0xfZdwnL@{SqJUc?MhanM zW$(x-CL`Q|M0(%dyHUG?2u60?FsrW7;)S3lBrKa_N(&6V_VhA*a=Faun3+Dln6Cqh zWDdT(CIcV?{!)j$aXxdnfZgp%^6!-SY`>LKU?P^{Hv@~Xyxbv%5D#N_;F6w?oS+0Y z<}IrZG2ac*M12nAl-wCHe)xxvL-Y)v9<(hYv1VX&h~N-QLa14`&2pZ11j4CNA^i zz^_L$PdK-pf3qay)2nS70TH6w@c-PGP5 zI@o?_#(zyhETXe zkvgB#{C+}lds#7*klQgf`|l90+>k>X4tGF6PP9KEEDIElZOhOW(0N>jhrVYF^@-y4`}+bmKYQ=J#ZP|n?zCCi z^k;uMO!?pZ?Y|kvZUdBY@ck&bqNLj6d>VdVPSHVs%7!^*wZz~sg|GzEWMShg1ZV#- zmN9VjrEL2Cbv;9uWyKpdtj>vF5Fx<~w;f1I7|3eTe^K^?h%4v)Hy?eKBmZ}kMUD+_ ztP~cHqP+iK{?GrX{I)MG{;U7Tf13etu7?-7PHEem!pniTFC%P!nKiQAP8|QXt#s(t zG8>R^q*QXY%g~0mgc6?6@Dp4kywMY-V`V%8f0V_h6b}6}@~!h}IBnH%foG&$GpF_pj8>$mQ1mG4=%3-N z@&T>LM7&(K#{FO-Q)GRxWbjZr)&_fYEhA$V))VkjV{km;De%yN({P8=g@I=UK8KqY zF0yT{_IH_C(>xk?V~X9jtqzweAl0FmaM1!7D3k<;qu{wR_P~~5=OlXvCsV-Ic9@(w z`|jk4lPL=I?H|6P)k?;1h9||ELj9~vR>s7`lvf51Ud8#LTvyAe{W7R?Dx)LwF+M}+ zc81G}H5n!u7nBo*==7Hjzwf2=p;0C597CsasH|$Qx5rx;HyeVb78_^Jo>_eS@gK&Z z|HG%BEKZ(0nQ>dtry#|d40w39r^BnQPl@ij>p6DXH!phyR!YS6>D#)a)=xoN$t%a# z@#Ei&OoBU%2JO0xW0UNm>_$e}iWORhWnczhRTYdEz3&-)vTE%4IC?wmPgPGF7;Z1| zdMR7&X%jJ@_I?gaji%a{?=x!wo4L*d;Cgh#=vpdEo`t6H!{u6N3QO277vA*9 zc|aRE4qv*WzgHC4ryH3(xk?A&=z!Ot4QEYjbH^ z2NzUQzW98cBc9cL=4UT6p&H-3<8!qb;V6F$;C8(0250)r=d&ib)wfGWt>Bx#?7R`u zz0xzt@|kF#2wr+o`U6mNisuC5hUp3L2|o=6U;#YvhvCzHSm)CMEu$CBoVy@cxZVJp zH*FtUMBnIqTbZ0>*qZpt#)2&zuK2mO63V&{95|9Qx;9Pq-9;zlo4~@8p2bnZh&Qxk z1n?xEPM6h4Us?t&;+=3HnX!JOa~d8jR^1mckPTi&S30D9Q!PMdnim^@-p0scsL6U7 z=LdWYkKhgZ+?jI`JurqIraNkj!f{}4)};^clIa&N2p~%Q5Zn({#yN;qWj6_+z!!L^ z#fq$0fJlzD!NLQGn5boOI(yt19nrEohMw6;HD!jscm>U_+MN zsb!P^yK2&W)4js?T5p{>RZFdGjrApp=qYR8+qRJpXz^CgR*nW$l7|kji;tfkoO2C1 z0M}&Nap;mMutz(Vn^iY)8puYKXtJy9`qjxm*U{leW}08?7PQn(?c1xDqvQ135+i;{ zAB6{7Hq;M0xPl{x68XxB>{-q`Id$s0sXP!ARsyX(gaE@hr`SQ3It()UPtS+H;HJ8T zGsAlEA&z@ACEy}Rz_E`$H)h;FPANuT9Q$}1{KL=TBbtW~*V4n;TMx3w@U|UgC2!ra zy>YZYt5rgDsu~p?>`33ZvEs(!;@MM+J?Ws^(=-3@(Qg-f_P;vM=R}u`0)N-wQ^~W7 z=T41$xD)Mi;;&AQ-M{%{u_gQE!grsKp0M-S8;jq6R2z-v$uXwyJf0#sLs|Q4ufLsv zUyHIcC(HakAAYW@{mTA16jFw#YR+Bt3+8+zL-i$gNUZ?C^_<~1+E=nzB_v$IS9k5r zvDSRIHI7VwPJ0!1bP%lT&JXF&tr;I8Uon7v2VWgL;rux8%HgpM?{zlOiu6f*V`KWB zsw0(LoHCOUAKjo&du1n4+8t03gb62Bb zdd8jFwvF5mpN9vbJO72VcpQ{d^#-5MUZ`61_O)r@aq7&OacoI&Q1vxuqdczn@cA=i zv%wvrtAo^rq31`UEnD<<( zB@kKiqH0WqtLmFO&HrX&(J2Lsj3;9oY$PhtdnAQ;E+^Wy_WH?Hc0)g1BdCcEB%n+c zRIr)8-XWL@9#uQ=MY7-J)(HP(rIRzOhLY`QYn=~KFS3=LS4}!AmHYH>Fy&@%MXMQNof31^L`3TB+Ep5G|d3=H_)tLeR^0y=;c)7kr zEV$>4gQYDT-L=1+D{e=|_Mpp6u+dhOy52KzA+$x@s}S#a1Mw5 z8)4*@7s`D1H|~ly%kZ2PGprGPV8~t^JcPC<&W9d5&j5Hphrj6A?w@b`%aQRMd0qPa z9ub;xcv$~TvwQl^xDed$nJ$Jj^Xyj7_ue3VY)!^-h3L+|`@=Vj&(Bs?)W*Gt1U`FK zCPWMaf`S<93Zc%PIXA*t2E?_>(?sN!&LH#iBy&&D1w#ry;@O>31#t9c zhUl0sVe=^AOdwDieI}FQZq9)#S2|23h4js1M|;MNGK5Y|=>q1C`7$nCf4%Sg?Ql4R zN;&6b6Xj)*6EY)2LM)DGCH;CSF8d!Zok`bYY_hZ|RkR(d#=){hujX*)l+g|b(_S4> z-^_WPrW29p{7Hcy$3ZmBp&8U1jNb8dv-3)m09ig)((-=s^mt&f{*_2$>N{nn19ki2{0Fuj9~2@MG=lZ z$@si;F9BL4Qq+zRGVgKV$B32l=!YvyUq5pv-V0u6?O>0-{^py3r3s6`VRqWTyp+M9 zY-OBV?JtJokhI%nhKvFe(@;uHS$Pz`t!%ANf}2ueg7Wrrw>m{o`lNR zUwtvk{*7RHrt(R_3eFIxNLwFfd$_g)fBwh6YEG3R_ME|~6rM9*ogcyIkR~{I_WQFj zwzBv8>-1?+@+uZm+O;;heTTxZx)R08C^4q?U{8iZ!w+0S-!dPx52=;OO@?ro-Iou_ zxHae;?WySoPU;o46OiJ$F9KEN{oNt_Z;DzTudiR|X z^vaC1EBW@j6Ft9!T+W@DwkDUOtBaR8ETW;1ildUiH&6SreaGsuU-x%!CvKlOK8m-4 z{WyYUwo&Tzt#bnT+I1=?I<@}TvBm$PHR`D)IL7Jp#n)d~^>MXUA+L-Abn^6>ob0DY z*uW}qXM|CXrbIeszcZLKgg^eYpi;`2@^lo>G0x#jiQz!O8yKd9Dkepc>A6R!KkR^6 zhaK)MNb`q}eiP_qgyk$v!F==eqxD3)9K6m>J)&-FV|PDc?`Ap{ba$!U+{8Dg3o(!zk_#UkK?Dc$Bu>$Ry3YgAB=pC zg6aEC(Fz>53O=6i&}Q@J7-TGV)+Pe1vh{nJ9`R%ZY>C~jF%a4=(M zO%4xE1sOp~78!vnHmq12J#x6r#GNy?12HdVSjsXn2l7Jp#@Vwc7rUajEd@H=Pfl%5 zVSHA=RB*G);y3ZC_IY@E4t4O_y1A0fvV!2@lX!s)@ofdZIQroqxgw+HcOU(3_>{_z ziv^j;wO3MZ|MD;Ya&h>zN?Hr}{N%^)F3x;+yz4mFx0X%wV0xmP3*Hk*mT+r6oc62R z{|g;@cI%E7P4Ul+gEx=9Jr1KW#Oy-~-rn5~!KqhV$|L0#%q}NWIWM&jBcE6!li?3- z+ptld;?G`q4~H38!AbI)5js7Kdxr0q%J(u0+&vS_C67EM`WlB@a)5IP-snF+coZCl z98&n~HzQuwb2rXCY#mC98zy25hqCm2iT*j`)er1;%Gp@bx?FhR6ml}ofh7Wh8xkFt1|?O3pK>zv;`s}U_kZ$}423JhbIl8Xw3l=r@i#j5~B4T!vS+K6lP_?G>je0Kx4(?$PGf_sxO63Fh;h?(&`<^dHq_ zwBhe$UU~>8EGG#$YYxt#a1IFv_wd6$9-0LK`YJ^w8sHNj*PRbYXm_BU>QR2MJt7Ib~Ak2e-6UC@e$?t5&<@??*Pj9J2-BS z*IX)-8C^UHN4AwbKv(6Q+mVyp%bwdmy3OdOZIO(qt?AYRCF2BWAIQ6PmFOFTgL_NP zWjuKJX5$Zy87leVfMdE9XQb`s#Ff##W2ZyiIdmW9$VvZdAL*MvWMtjR>HO-!TFv!2 zy-pGZJ7Cw|UC{wYTjS>Fvrl7qB|kh<*)>CLZS=UOwp9X}TD}p_>IdIpH2>xU5+%i#ta`)mSZSo=5MXH>1JH6n(aAJqKP_E@ynl z?;k#-^F$w2M=keRS6dC;evpPah-J`OHr!>I>_b%H8R#YlQx#&_}z(|rD+`;g3h*!^n zAMl6cLLgzq%4_LqmxfLxR>;~kFG0~q1@UB$ZB8dbi{>fFq{?@aIigYgqAi;iQFMfr z;Wa0UmM;d~wQJuv77pc1{QApVy0;ZW?Y>9WjXIJL-|JS-cB`d?Lxl!?6;uy9ESy)IUbjX zY;R7!uUo&R^7XT2jb9!I@Vh^Hzdh3ma(U*}vA2Jm6J3whXr}qC$@wL*U{&k=;`jfs zc&qDgD>Ytbb{g6;m-E%avAI_gCyS>DR#*i6*BfMFYOn;oCQHC-8ptTYwxquYd>&Lz=-DVtY;oXY%p^Y8d zd0N%o2fzN;6S$I4gJ+>j8G&cgcVx!XU0*qPX!h)rk3UL2r0cb>&)TQ$Iq^0HvqP_? z6Q4T1c&nB$9AC~IcvKJ_kJWnQ(9y%`VrQeRWuITtQuQtwLXUeX-k`PSM<4vdf`gn> zXKw)`8K89dE8TnI%WI3ft$PAU?I8NoYG7+Y$!SB?7(T}IaL3i3e;n6z_ z^5Gd|7QQWD!(sZ)kA4>JU7w)cD~ArxKCyRRZf~yCD(7naQi8;tZ0&0Wm>j6PY#R{` ztDcA_;caNcfv4vj&+vvdP8OmJLFK{9FVwCpnnH1h+{CpqBOu1T*zFVT@I1#?YH zUp&iRQ5iH6E}k}hhJ)eG_LZ)sGMK&umSiaYJ!gPL!)w>%ObajIuK*le=h&0bBq&b@ zpTvX4y`Qcxa5;8b&NOy9U4ZRtPT&RaIn-pe!)2Mb=g*$XF1TO2h*!FW>^b*8NpmhQI7SLBbvJSS`eE)^=?5%Xf2rz8YS!YmFlt*9U+_laSfX}I^!TWAWGP{=h+dilQ(L(BK zl^vga_J_r-_8^0-7C6VMT6=5okvya;X!S$aXM3vN=TDG0C$x85yi zS%pkaV6Uh$I2GXU8RkK8v+sS84zxDidt1CN*+5SXANd}@Wh(f?Q~ZlRVAazdlMIC( zH>u!gyh~TBddfMzfB&l^v%miQ)9|FKtnk4evC#yQ(8ZSEHD`@P2a;IndKYhA3|}ve z&kg$=p`~rxI!mqR(aCnz zQiV+6cahZ(XC`qr9aXSMrwYlp0_L*IwR9uHWsh^BqY1DgckxLy#wXJ*3{9acu%$V} zDQo2{Cp)edu;(}Ne215+E`OSCuwy&e(ObGV{o&UA+U6x&R5r}RdY*ltTk++x1yrlz zKc@?}f{g$Wo%n1r<=4Odx9xoa!_oQe>T~VB>xQ`fYoZp?ze@~2>D@qZz=$zk`iv9W z-~Pu4UA=rQ@1xW=RG(AnedhY!_jaxw%<3D6=4BFt*#Fym46wY0!qL%s`N}A~*8vrrs2JL^%(Xty3A3X|%h1Vjz>&NEK&5Hn>g^jvjGjBBmr8cVcL+&v za%zo`?#YB3B%7@9oM_5hI6Gdv2swyidNQfdflQlKFFjtI$Z-crUXPKU3(*M<2ktz` z$teoQaNU!FOUeE2?CA(FgR|=i6A>Hz>kN#@Ffa`3N`iPIK}~>Q{vk%JH~H*E`=c~| zW5oM?O0kks&S60U%3@m_SUDXqV8Xhxu%4w=iVUO;!H}SY^et)D?)lbcU)rJezQ5bX zXJu!OA>Q0L@AXjHlu`7b{ipxw;-gPLou6O*?EN7Ehc5=!#y_z5{bye%Xv#cnFSeEe z+QcTH@$_b;ggw-uPpc{k(Sz|?j{f}|>}6Fx%o$)WC_S<{Hm3xf4!-LWoD}`5!B2lg zMz@y(9=B}1SHR%a5p?6|i~#SwbT=b3r+JK@QATLrUcc-s-O@+c6HKyCW$``=?y?|v zyxfWJ;lcVczgEAL;TeP3P|J{yzxaFvlm5$}w>Bl0mosk1fiUnY2obP{xyEpI#GF3< z^z*K}vp84A1?5Gi2^_i^6XK9^z~4A+x^G1c%b_5gKX<~byH^uP?VBnCuX~*Pt-;Cb z_5-sOImGDR|7pD~bE-|Rw1lh3v&>JKB^01*DSx+arzFNqk+R;SqR7U;z&`o%)2Z-`ldEjy%1uw7{H84O*A`!Xc5R-K?V2NUd5Zh0mr{B&khX39 zerR#mR+$ES%gne|KqVm>jtu;wM|9-SDMJ222C73vC@^>sc){xfd-qP|1D>Kc_Js~I zlqD!CjlUAW91##7YvLHuTIutzziPh{4rRrNK!5(_XQKq6AAuCTI34Z_mXuayU&>;^ zNKi{Q)!nicz*m+_QmHxh7&O;^GFB)r`p?d59v)P~nF%CC4;{t_Oj%{JId{}x@cTH${~Je-n^}K+BgI-M$mlG+!#paw%XYR z4JvEJwG`)RWzkr89A(gT!_Rv@I_$D}S$E&eT0D=I@n+AVcqtLPbNA+CQ?C|b z5AF{fw3=*K7cJv!7i;At3vpEkx9zKnNl<0V^5b1H9XG{Gv>;MV#Ch#7PrU5QZwdyL zHMzG8wVgY5Ene+V=!5$YF5Z6gt@dD=)U|9+WlvTwoLHLj%?a>zNu!a8qdc;4)D^2f9Njn8Z; zBSL_IaoLuJUnyek1AYOA-~`>q=agYe@E{Akwa4%%1&^^eV|FhdJW5IP2{XDTya_kp z8s3Ez{1f`kp8I|rc-_k(s|VmXRx`#Yu-cw?1!T)uCBS;yIL+`mO8ZU_rca^|d+TN1 z4Elcgdhwf&eji^t9Bftx!+I~qPrz)le}Wgfd=?+Ico}0azc32bjtz4xS4~Zl0|UeKKXA$bS^7SI}5rTe!?i;or8kjfLAgo$l*B@tNB?!1Ej^p zN0vPNhXTlqq`i9%jw2EOmi=wr>(WoA)H_E6Ie}Klcl7%LUl?`HH@V!wgPa{`*t#ar z(R#qkwdi?Q2^S|$o@$(&pwZPzvasj4UQo}z(knO}F4w0S4RT+t#vR=x8oQH|#KGArh?G#{#~gt4jt!f0 zp7i^r9FLrG_{bP|(Hy$~FODHh72clR+k@{?*ABqR%$18{M~=kb$dx5dYFVYCZtIo} z1Bb6qoM=y%^Tw}^9XS@P7{Tk>1FZlAMV5MyHCQkDTvukltW~zf{{06Q+uAd&X~wCcB?mM^_j>x#)((i5&AB5xsVx|pdA9@`$y3%N zm>|TN3 ztH#YqaUeP8yBj%h9}BuiFWZuvx6a%O4%f#?G;*ly0gmIS32Y5-!EP}_U>dNZ!c4qo}f?op&W(l3e0SZ?`+!q(&8`v{6AUj z4);F%-QVVHmYuq#Yab21u=6Cj;SE~M+${V2%Ej7RgtxmnHM4OS_i8`dI^O!xPX|x* zoc`jA&nD10)gawFysxoj8cPC@G~^5%{KR);EPCCMV|zn*$QFE%O)WceYj)ZjZ@)Ly z0rz_Lk)!W)Cd@mF^Ji*R+4IfG;f?frS-5CLU?(p698Ni)`y6fGy?3)YtPclNq1L*u z&m2T55W>kwKcI^lhx1&3$J#b@Jz0#71q-z~WZSI>Z{WUu+$!Pc%&JEYpIyoq33Y;2 zw~ZD2wZ=dMYz)D&OXttE53)_Gs)D1*%x8X%y#3Y$Fiw?8jW_NMPgELlgmYZ8u^bwkbQFexg>Qhg<8@xz60cLpx3ZH8k|%MeiE@CpvK$w7IKlIM;sBx4$|5MKroMT3R|# zwSb}*a%7VMUxdTrVDza(^l4P@ycE)ea?ZoveuIwM@e|X0x zQEly9fjxT<4$sk+%lbyQZk~7AQxz8~SjfK7ap-nE|IC?e#C$nR{#%O@!Oa~0bX|Ir z7hOTn*2^9WJeceJHE>9FH69+JHI4(+`INrT7TgdnkO!A8)cPmig#^!+N*Gg_fe+yK zyLLKEy9#^fk0e9)L~nz~1wF_BhsR^6;3fELUSzSk>OoEp;D79d(bd`yj&yrVFNMEj ztD*Ux1K;s$=gPrFL7rVzHtW!#N`$VcI>+Al@of4iKM}u*;M2wIYB0dl$nE3lF#MIz zsd{NZ&ZGlaGY?qAB;_-f)qwQ%Bh-!H;3or4|TS|{vLmPZ#;kIW3ZHm<(SKHm#A*u23Q(T-U5nGrlj`y z@XIy!NB_@1o`Z3C^?*54)bsie<8$4-56&{AM9Nn@yTADG%L|K-&(x&8%yFeSdvc&5 zI!~zqV|x3`(%qbK!D%OJ!=6+6 z^+;P60XPg23^8PxG7n+MxR(J|pBv*|?v6^-YLd1hK$@k{k|r%G;E6A$E8f%#@j62ACRR z3vi3CzB$=^>xmWvxs|elaIaiTxO8CBwj4h)iXPmf_^fT-ouHdwx|yP}b<46(9Ku?i zB6zg`z`5_Qj6>*7eYZrpwP?}uMPE3*FSpdngCMw)GwD)&CK&V z54W7yPn|m-9K-0If9}GmdU55* z>N~LI6uW!xMvk>IbTUXWT?WE97Qsq*w-<*7r=tw8iy0Jp>cI~J&P&g(up*eH%(O;~ zNM?@B(i4~=ue6QhW_8ctu%`@&R&sVz)X|*Nvnkoux-=r&i*Qgm;*Hu+=!-5ZKXqmp z8tj+R>A=26t7NuTqLkwRjm;}qP}VsMJpm(-g>oe)yZvRz{n7is$dPlfKJA|mUT#Vm zm-TStMnXSBVzRBmF%=ZMw`Y7c*W1nc_{r1tf!`a=Y)c4SNP+pbb=PvIwQkvw@ey-_r=AFmoqY>`C#FY3j#ql$YgwmANI7gkbw{GNf~(-eh5Oq zgH;)?F(aEWaB@Gv2v_EFN`E|q0x%AbZ}q2XbJ5 z521@%hU*6ptt@2RQ(k1^uFg4*59$Go54O+|z|TCdbczbFKTAW%R!L#?i%R-yC0j z75;)fd2>HLLQ#}yysKcAGaT^wHn`_=lF6dTvD6-sAxatP8tqP(bi} zM&Y}!9~cZg=38$qe*Uwc4}CZc%a|6;P`=ezsssMV|M~wFz26$8=J%g| zU53M!6ygH9;gI04dCF8E6FDO|mMDGZdi2oY;FH{lR%8rY8=j-r>Kwq+ex}D{=pIbz z?5vIMdDwV(4q3&isS-oL2Oq?fCUDan(Z=9&pW%<>h1V#Ma5i`=BV;5gNqiI!)J6-< z!sj{I&UzsT8ptb7swx?UXU6Ff^P)&U?Y?EeZi&-yL;BW~nVUlkSy=!SI>O&^PG{gw z=6K_bo)ccp-|;^B$bo|$8W;@8Uo9a7VE@bi{eO|X+f=FTmpKZy)T-zB$V|a*kMz&T z^fW%x(?S71eBpE;OW_))G+5C?7;8^kA15w8a-voma|l>_)r4AaOu{`7y+sdaZXpxA z>LHwPgO`oT5UNmX3`u$!7@HJa9lG*a|KShoqIcOT?sYx_T3gB$N|)J3=g|s<8E*_| zL)U`s=1w+@-Zx{mP5pM?i!5@qoF1tkL0np7DJ=d_xDTy?Xdq z^5j%-&>~`IGGW#5ef-2298vem+O88oSJT=eWk=4V;V>HC%tnHicEhJ*v7BywH65+ zVEYUHX;-4630_R#H2A-EU~h0IugcaB?i>j+fTSh+|CCkp9JvnU%u3x)U z#;Kq}@Jx11V5$2!!QtcD91ZYSZ{i*4ombQ0o=PyOn)u+MLyO=1fB$XcrlSQEd%v@! z68ein?kCupd^-Kz*U@W94Z*Um2Ej$v?V81Fue~|Eab32PgZuOv-nw<~&=Oo##x7`J zY;?h4p;Ze9jA{pZ+A^pZjYSXmFhouqOHaa`*=yFI_mZ*x6oj7Tzo2 zMB9x!w=PGYIU~D%GIPr;aVY7I^zxzQY|GK@+G__-u1^31t@(IJuFmw+IfbCPaiFZ&N1tX<5N zWYN9FcgMe;iVfMgKdX;8UF7(QFBXsDX}|o{pD#Z9?1S`;`o)J!bhPQaAKOQmCy#$K zx|(cnHUe3tpYr|Q&F^ zILVIe9XO8%$&lBJUho>8$}m3u^`~VApIrRp{a-EKdhh+k{dfQeu{~TKmc6WafFF{Z z@F7{B0`sIPYwSHlqP{V0ERc%B#_xd2HKOrA2e)82#eoPXcmw z&*iI&U;N@%CGAxW)O;UA$mqery7FXNz}#`bXhR z$rdFQ(3RFF$H~_p3jBOq%a`wSvh!_C(6#HWVQj7D<{)W-K0YPq5bREdTuIKY-blB* zKP@w5hYJcipj4m=PJ#E$=*_YuZA#du@EQkOXBx2IqMi&LEU~1p- zdHmjfkWpF{-)+s3RR!8POt0r022T!DRW`4cD7Q6Pbi4hde~%>bjQ9%;W)P=ebYTG+ zt#iD0-6Wzke|W=5zIWHQvBk-%*~f5;?zuaepjw5FEKq#!_M1T; zC8O9F6YR=YL#H2SaJZpO5g*9oR!V{eOw z1mQxUfxV!a_1Pc%cI>m@#Sh6xwzq&4{~G!bc;cdv#9_{o#jc&N-BxzYgRb3Cbq{}= zp8OIzA3my$b%7WC>QyQTND8#m&AMoUnRtv&7Z1Zf0pRk3zH5;vpkYVhT;AaNxx-!aoqxNmSO5Hnuc-dm9W+{gICEKE zHAp@;J-^f9BhtGi~23hhCPZx~!SIx9dDxPsq9D`F(ve9qK$Z_(;XGq=7R;WsA6gqg?$dO7DV*Ar3Z&Yh{aH&2f96zT~3Z{*zJi(;vTAWa@U? z&QTWe-6<=ke&ouaDUV8=WRrZ-K}p}7INAFSJPS?^s#_i?m2(B2^*_ff!F8cb56ofN zKeH^O)@*FJCA*SB`KYp_Ip8XU)%)+_)qCR%f0FSrS$SOx->=_Rs@FY(n&C_1Zw(d{ zWo;9blX8~Lih2e*g%hjbP4kiQ%|P_*rwNCCHAYM`q<<|2!x{TsLV0CM3Z`N&L`P-R zi$Y?K1j+J1lmZ1QrFZ&VzkY3cWK7-5*~TGotA3N8eEP{m@MQqKk$_~(+$-xDKFCON zI0d5w!^eAiH?->n>UeQ@8LudVpP|2m^GhSJFn$?AAAk1A;+H@B`DFJn{vDL_^0t?2 z$@0cB;Bm5O^JFnS3(q@Lqzs3fw`u{D06u@Q76Lun9z9H1U6+G_!XumVdcEaECpq%o z&w=^l@PgClY7TmaG-X5IUo_=FFZ3{_hXHg=3MHY30W&@>C3J6ei0&wjV>nle-k!jB z?Fiog$maem^+)^>@cdv3Rn6*=Ly(5~EyJ2Q^k}K_~F|Ny-rTK$x`!nwI_k zC>kmSGVH^hX^YZ5k9*%uZCW@L9G`>}oN}_44Y7U4_Te)W${nkBk0GM;e0M@@LkB}~ zKr4mTXIL*z8KyF=^pPB=Nvo7;Oj(wN0to>9drpO=V+b2+J==E;yzI@c0!2If`S9c4mzDVS ztU;;SkKcWF6lKPapcA=qEr-1fqhm*o)yC(|ahie~N4S!9&HyE_7pv@`=)Cdzk5hoO z)mcX79nj~@}p(RT;n+mh_STHKB&wxr4etqb*G?e zPN6NUH*_HE>DHG56dq7&c9+HW?%QuQ-yFv&wToNzTTMw7JYXo=Yj}!|+mJE1p(q`} zkAMzY?H}dFSAIZ)%S??nZSel-+yJ%vdw6QbL2^kJ#Sg(^l=Wx~UmboG%pII*4&!u< z9#-i|Z?v9cPZ@u%EjJ?EYnR&JC@tFSDzw-&=oY-mo-=39O;AISeZ#uuoYM$@KK$Cz z3}6m|GLP==YhOQZK0UBCq6ZJ9S5TsFh8LJC+2MJXv(%?JKg02!g(&bc?IC1nHuEv0 z@hZHBtne9{1G{m!G*20V{-*r9!gyZpLt_jc>sj&*D_z2t!NO4^C}q_%r>?)>4>(M# zpr*CdH*<^xv!|5%zU%F*a{?g2kPN~Phh8bd_RXjR(th{%=$Z4e@j=-Y@w1FzgSmU4 z-*wy05WjTwnJlIL%zU=S7cO_}Ujsm?Y!vEgX4a81!Y= zHfGn&82vUE2_)`ZW{h`DlQy>D$i!K5pV67_b~rpe_D($|SU(Km2fM@X0=1 z&zZQbVC~v?$(kF>)>{^OBWKCBN`Ql4W4Axy5u;;G)l&^Ra+@BxZ~uYCiBsQAD>wV3 zY;{Z7zy}IsO%`nv#B-OG0n4?(2kYi3vKO2XvZA01SkPxV!-kjNm$_EYQ@hdyg7}!2}(^b!|=Up=@x z{@y{CWk7$QTppg;cn*t~F$Ui`P3$|TqJE_3%97_Omyvp4|GruWUYzVL{8#C(Dry=xmp-c5r((!>xF~{e0)0cNT9Qdp$gTy7=qA`B&qdmdT8g1P~C$fu4cqfVWYa<7`n!x9p}oL=)dxNrzgo5G%t_I8IC_@U=L z=)h-f1=tVQo9Es#^LFj1>ab^f>FKO`ft-0fJdu%dZ9dvEPkRD^!~`wx>dD;B4|?OcuZ-!I<%*}B@V99i6oZpi-?$$x?1J=;6; zLn(NAAqO-Xzg70;;|%;ye*HI#uRs5AvRrT0iiVyIXRii_jX7vI0>H;_b_yq~>`idk zSq0Y1>73vwlN?Xpk*=jWN)|M|MT{iFN07Qg%Z|6jetJK(N)fY+V+|Np+$GaPnrmvnHX^F2Ok z-slK{qi6g8-ohCUMAaAQn%ooYaaN1**M^gyeeidS4?p;A_Q0|~_sD{7m(kB@$;nK2 zptna}t=FNIS`bx{94*7Eag1{Yw-(OaLx+!qgE_a`bB^jg;o8+&Kwe76xf4ywg5=av zEx_3;z&XzA9-!CzYe(N0UgH^(-SAXNLzA4>8WPAI__}ZCP|F-nC@;^nX4hXRTUSEG zfrE$oJ6o}P?g%P{7x)H#swE2^Pv?Hp54w=?EhwZt!&Z*E<_mAk4^53 zXAh8fYkID>NOTYW9=ZBoCZUTFR1puhdu)8Eb<*!`*}`JSegReWdFN)FZNq`BNG z2=(RXe@GX|@gJX<#0A&JvmNTY|KQQ~qZYf#LylnkzPpTH=R45)(B<-Mf##chlIbs~ zr=2CAO%EAaMMn0vjQ92@nglbvihP{+E#gJ81K5=rJG5%!I7V4$1|X-0--O4$<9onQ zH*BC6q#s1zSFc_gc}0H!OF_-^=|ylDE}ILzR=}HWhvo%L?2pzeWW3p34>qds$yC)p zYt{(VE?d>A9(4FIp3P6d7pG+xeu&4YB0x{zj2#IyEfX2FaNU^S>)hDDV{fMCfdhU4 zW?RzF1g1}&I=(pi=8r~h;BEE+?z0;kdG?)C9=*W7#}6UGvXbaPj`C9_$?(A)h*!Wh zEpO?5aFAS)oFQPN-Kc=W?g9bFj=eQF^u=eN)Sk9(4LeE_iwDwME_49)^@4VMO_-nvo3U`dp7kY{jyx?-73h997Hw#rK;G*ST0ct1Y_vL)0al0iBfgnF86 zJ2k&uWfvvn{D;hP*UT9Ga;c^I%r&m{wU3?)9&g6r?3(#1?Vq9Iq9x+@yEzzw;?;*U zu5sp3T|Guq3!3k`)93E(ol*Mt#l;@n-yz2=BgWP@%Q^LXIIzFH_wUTVw=Rl#`Dm2- z#-KP*n$F*UY4N{)ce@kg>nHB>F4h;wAvBOUU&hRZu%WOR8$;WZ5T{geHgC-c;kdpX z0}`ELpbqnG^AV8@>$uD)N(3Qcv8umim~%)F(j4U@oI;H4O3+f`r=+I)fkK9o3>pmJ zY?0w3M~@6K9PS`O1b#Uu;Sz3(7*f2-!H6ujQB>1F!EyqTwi9xS8XJ0&<5y?|#`T zUiM>U89uTIFIBcOIN#?3gXT4d#l`&5#mi;LV3r5>S0374f0xsKB4v%Q2i&x@h=FU# zV(%DVga8IC7_y;=u>F;VbUz3CwqS$#=xwa$lZf*1@4lVDgWlg1W@{k`Fl6vD)}sd* zUQP5y#t$a#&=q^5O~U4I*TD&UDg#s@At>?XS6|OUSCxf*KBg&ChH^3ViR6Njz0X(O z=Lj;#0=R~E^3MpHm$ou$VvMbm@TYWo=A{%AEpN_usE7W{_F`261oQj;a(x36a!S1! zX^e9Z6`hs2;52qZdNPA^*y<-p06a)=eEG$<(Z%kVwoLW{BVi6B8RIwq_3LsTPzsvQ z=3OZ@&HC(_GmD?S|C7Zpe)^MyRfm^c$(ZdLbl*k|t#Y)L^#$Fk?8SePqrK0sRD$GexW1K5JY4{zy3FqzKdq4TbIA96g0|)j+107PE6Jg)3 z9R+}HwHM25DOuglBw&1>K6@tPw+MQ$e>vf`zP%H%2Y0Qc2#$O0x9?@3$s(CW_FSd) z*4xE1PrTs;e}`MnL-4oON=no@WmVIdKi){;mi4$4h#B?~9_OYk7p z{yXoyol&$i9(E}h)$%5~eZ}7_lxHl#ATtA=9^SPgOdqId-NbY|ojytIVKJ zKmEh(sn%I~OEc0qrT+cLA7^ylUHs{v{8>&J$_nG1Q>)Cq=4bBbFBjYl=VYJj6D-ie zX$j{Tq!b;_5^D!rnO(B>FJ8Gg1}&q7oF$ibZQGS1qPk&w#{TYjh(m85OtlB)7H{+S zI30T4Z-4vY7!e1fT7BKuB@1~dV8#~DF!Z9e3NAeby;->vTRc6E4cyWD5o zf-B-*87b|-U}yJ?E-@$*T>G3H+MA4|glb`=a)iP)21p}720Sus9X@V7kdTwezHpR{ zv!ivveVI^rsa7bS58K8#v|I2r*y2+TFgo(ukqM-2D9FuF6qJ4(?|$|yCsOqFdDSm; z4jHX*62E6azL*30SUx3u?V0}8nq~QnphijSo>9ib55@}oCinO4eJw}X_v37}q-XI$ z1FKTfHfG7I`0&JgR`*5Ye;J@>apK}vXnzU+?#1`c%a0)Mb=THUua z@~i?+9QoM&?(^cXA)|aZuyap8#_0k5wB7+mx^o?#%dV1 z_tla=Sugm4VgJnM^BYWqnK5ST`c3y}O3fpgW1!}1{qS(Udr2Zd&up4>unAaz+u&$$ zTi(=t;Nf!^-nGx+W6fTi4xTf5NXDSdAD?k9xQC&el$be~;nfc1d#Q{$w7w-r)~;^^|J&W#Z0DMmXp8>jO z=l1$s7t{++f4G%Cm}AMIRy#Xb-rUFN4wjOA1lX2m;8em~uzP*$l$AQw2f>`qjK?@o zZUZ{%KlFJuo~IIVs&*QOu{rqDZ0NW23685dq%cR7e!|)zH~@>|Me+;$f{L#kC{ruv zIm2`ULBUuOhJ7Oo;hZ2S7>>Ts=jf1w&<{t*>qlO1Eh5Lian ztD~JqXHGXxJOlibJsfzR$C>`sw_ncKa~4rD|8CBhfHv9Y@4Wlt$(X;9-oU}Gg_W}k zWQhOhM?apq{pFwk`O?9}=>OZG75=L{xO(|~xUKzDj)C;Ew=&jWfAj6KJb#zWtFQF= z)Ah(N*y7N*o-1SVt1s_QVvF`6S3NIY`Emz23n<4r#*s#+yH^0^=8cQfJ9l@y_*qkS zFuW{ab_AGmxC*kuZL%@oHlN@hTx4wDOBYmu!(qNIS+*^GL)(t)>6fzMz<&b5$y7MH zr0r<5u~ZUTr)oNUgv_Kja=3l>`NxyF`^u5GhKFt5zH_l2J`Bv$F9VhhnjkhlST?QuGnY?|L zuIQle#mb!FeP+M!=kURkml=!BkSLJU=Siy3mDd*Bzgre;(tI3VW1F_FJqHhueny@? zO5WVfKH78O)y2-TfYH*`c)_)THfyTp&}a8rGR?dkkfwbFUiBpVPfHSZgqBaw@;j2V z4*z7YjLg$=reJ}98`@$Ap$UD<_3nODQp7M-@Q6?U%m4Aei`F|tH77CoclOkY>6@+( z?)?rbz8y~e@)y5aoI3H{v=4ag@Ue8^0$It+mm|dg;lKayvQg`u+jH15);F>w+`(Jn z-Ggw-v*_1aN~#h-#n3t72Zi zqTb5cr<}fY;3v_ML?Lt|)4h)dm+%;R=kV?54ugZk`YDf$4R&PK9NHNwbF{jKbBiq^ zi4fe4$$k}B__FnA>$5)`n`bxPJ8!+4-G6${Nx^$>1&d`iEBZ_x22(a7{GwrO>i5y3 zuhsJ9YCO1LZuU5*@QLh^P06P%+2d>56SVRu`O7X3JSB2$9$v9^>(h9V7NTfBS&?lk zI6=pR&wHc2jV0?`iD!)d)>9?B4PU9H5B%YKd5|1(&CdFDtG?J+Qkx9#my#`bnYF0; zKNyd|m*^At^y7HL^^&J#TtA7wkhM4ajSq3W%95RSX*Wvx3jQ36&gkh^ds}PW@ZbJ% z-oq{S)3RDI-2+c82Fc-olbRBRtU^U>8LVa7mpvZs(jzXHf&Rr8mkQ=- zBfGsMp@JmgfqBCXI0l|7o9{JDG)Fa+Xi9zRz?upnuv=GR0ihB$rm?U4Xf($7?7Rk#MfwPPJUpmBE;93EYuHBaVy51{MNk=S(fGUKbV ze$f7GiPw=;5+t9bLngY$Z#Y%G-D@qIy5B+P;IIr#Y4u!-rr`JJu__9Ct7YAW#l~df z)k~|V?b!F{Pxtq>v0(+NRU<`g<71P^E6LIL-2}fp?Q@cY^r@ zBfAGZvu6N@KzP5%(Cf(^fn>bw#?8wEKYOW%y}6B^R00yK-Zi*bUqB${2ek0-^1T@{ zK;|{bW^a+s^Ur$s{Qsq+ybI_1KAuYkAePmLbAK!D)_G2U`)tPdUH=1!?`J-}ohVm} zGMw)kZuuG;V2S80zt6g+@66VVVrxM6&%GgAe>6vmch~giWaxN)o9s^C&+Em5{GPAO zP~>d+JR6AsjSQ&!V?@_~kTL7fA%{8UU<=^a%VwU;o``}0^pZw*YEMEUde^wOgbeV0RPgd%_-8&YaeRX=ACyXsldA)Dl zC!=Mu##<+c&4o$@^^qRu)bssdT#RuLA}b}8Ns8IbA%rOeXF5UoUJ)d4w_h*EDP*Qn ze*Wfff4%tm`|ngDT4qY?wg2w_>iFp#gq5HtVAkZYdbw=yr!jKh>s82Uhfr6htU{-? zoc+O@QLA^`hBcL_Jzi$_6H-t6vFWY>LzyiC6#^^g?$^sHfos#1L1734)wI>=xl_6p zI{M5DzS)~GC?|;2bDJ9`BJ+d7#*m}neNbe5DafP5){B92#=6b{3KSuwD<-!k;jPSS zONT5d!6BSCCn&F6zq~k8xs?ncePcOq_l0``4h}})ae0(t#pS|?K5Dd!2a0)slGe#Oiz1LobNj>XHtyzlLUREw#7H_bDA6odl{rbUR zlM?bUXF&%=nTL$K>jjHY<;%hF#L1JRTq)<{w5LqoyjE|`gvjF*XM(H+TH4?o+JF7n z8_oY#^SjWvvhE*dRO;UdKbjR>B;;d&o1^U=WyCN9(WtDIttoPxpLg%qOZ4Q~;RTnj zmhqo+L8}MOHYK^T*JZMB#OqND-x*A@yPqUbz@4)PAE8`@hJ%mRk3&+a7@YW=z=Ic@ zgu}KA9TJp`AM3u=+GM+}TzPZ$iIbAUT(;NkaNA*&mlIHEcn&(Lbn|`!S~UZufxzVW z*W>QH6DL}GRVQUxISWE{ht@UOD`i%om7F_`>80Hjnz(ZH^00jK@?1jIT!(|UHck=> z)Q&Q((Q|*a4(^Zeu5s`Q_(|D(QcEN<(SOEf6hJf*1?}1_W8)P*7s38FUw%HgaFbK2 zlCC=$tpD}D_;2Ff9om#)bg&G}jq&D_;S2`~Stgrol#t=e@w4D7(4%+X-pb9<`42a1 z!x6vobWU-+k^=G0n{S4jk834#d<-=>2%pKyLj_9BQ{e2}`R^ydir3sIsHu(6_c=pV z6EG^sstL^GU^spHeDgauvX|4uL2CzJsVp~#(*6S(Z^EWww;ov)7*W-rP?Zq6V)CZLJ;gLO8c8uo=7Le`s(svV(3^oie zI3X)_eQm`A<78k^&QG2>GkChIEVoexgRR4589j8FrE_SJ*|Y0BkxY~IxUW6S z3E3WbKI0zFk5eGn9oTm$BWTxTWXV)vz`py=J2?(jFT`(CkazAx-}ffV<$B7p%uZQ4 zFWNugb@T(hMP<57JC5csbLsPeJJ@^Q8aWcpd4er{q|{q^uPdB+p!LDl0^%3T7DPWP zg}Q9Uopv=~)m)X3!&i92nFKao6k*Q*hk0u6jRPkb-W;(;^s|a%r{Cyqvi*%~n*B%K zxX)b4K=ZK9Swpyh2H=g?7|xC13g0XO#8d_E$3n$)?=*Yvm0=UfBlX{_U96|g4ITCg#Kf6mD1H(d^DeE!^2PW96qDh zQgr*<%J5)UdQb225ETsOFlQ zcMbCR2|c5#m<-}aIj=lHrI?@FRYl^JD$h6uw-d;T8ZgE~ z&2O3HzzG9p=EAsrxmJY^Su{1DGgdcr9>})NZn$1m$D;z&v5#maM_oAi^|xP_t-GNB z`K$4`T9ln0jlH(E&1-Hzd1zIeBlwDS#@6uTjO-NXZn>g;xMGv2W54~Q?+k!t+8K8 z4@I34QRtBkl=EOmGOxg7-@|R6Ie?W6xLU^K&Yj!Z8`-EyqP9=Fk1ZMC_Eu$^1Euk% z)A4dLTy_?H)L3`#-&`Di=g2(YUa?2?RVGWXeep>=?^zX1hm-Ri)_(5v(5(LYda)jP z?bsNU*Q%@|%Q$Drf>*m9@00n>*}J0aKmYubadLGMNb_L`FRNYP^{v*W#n{h(`K$Vf zzg8=k-;VCGE9WO3fl9SFId{GQaFtf7rbd3Ihd6wbo=?B=QXXwx^z27ZA5Mnd)ho%s z^arv}RxSG3nDanTLl!dLMCJ>k2`<0->amg8Xyjr18Sc~9@h3PG!nOs;?VKZUVXXtO z(_>%*-FRybhL1n_eU6>a(|ZdNn^*8(8$HMlycs`U|C1b0At(;0wTW!i73;!@^Jgo` zm*Kv5aihe9&vP)5rOss<{W6AOpJdR@p{pDR@dXZN`XD`1%NRXr_w1|vSZjw@2igyN zFVSOLIt*tl$2}Rqu>vNXr;{+$=K*nW1Rird2_$K$K-asSZi;qC{x=`E3Et!=JX_Dv zm&0rLTI+MT>-V*f*|YDJzAuY6ycDd!qqcP&d)2f3?8s>@31>sPoiWMP2RR3wWpMW7 zmlF)^P z-(LLu=fBMM+p_rL)8Egbi0GAFJpqMqfjpwGKJGjhwmC;3JO(@Pnz^Qja8k->{^sl2 zuoMvb@sEEBO(uZx)z@DPZfg^|w3kYHOJ@@h*s`%hYjY&oC-7wp<6V*;WHJj3|Fi%6 z|2Ro1|L|}Bdd@WwIJt2CObI}jm zh=2jd->#C$IQ}o6KRvB~oQ-v^wtj*TJGSqtdVw5xo>d!d(k}u)V>qOFSvlwVUhrdV zPx7SiV@<#?*WZoK_z&8gS>yWb4*ILTVtY=|uJ1>$PDYTu=uyPKfjn{4L85^^q1p+uj1x+NZsYv4(1!oS} zh-uo8uxISGDjzV}m4p5LTK?$E&o1NeZqlu&Rdp}EuR2^3<^}dZ^hg6RP9&8qDqt3$ zVON>wk~Y)d+pE)OP7b{Y2y#9ei*C7`4>84$P|Mfo2EI$`LDL2v*&N?vW5aoYS@d92 z>EJiZjNV_6Niv+ovL7y==`4ybhA%Z#IDm$-kGf%ViJ;rIcJO05ucSOa5&iS=A64Jl z`JFy-Jf43tp4)+@(c*?+O|RlxgDd7n=Y{9iAe)}E)Jxz=z-j~{{bzW>GW(eR1lef^`JTW1m5Dawb_o(p4%GZ`{;YTV#e%WasbbFRv66{4L^IzsT@vcOXEXevv$+= zC2oz3s?||?7cEW3J6>oH_8+Xmu65pxk583b{88luo8)n4S#T^bWq(x(@g4B7e*8hk zv^je&-gpU(Mtl56K1V|n&`mbJ^wNeZ!!FFa@fm?9?M9a8H7$L54yE0Z>@^RvL;Jf8 zwU?8GfIfBBibqdmi#Se?#zL9|}}Vpt-ef$(JL*iiVBVo5^5j(>PNCuT))+FW$b3$8>&G zc)x0Iun6<$UEug6U08ro>tOQ7Vb!W1FJ3y;HQ_*eqQwiCvVFB8Wc#77WOmygAy8Cb zY%+Y9-L~T%1Btr8DbW}tVw!=<_k=v;LZcEq!WcGfe2#Z$KUg z*z-I3H7{`SeM?)V#BGDj`RLX#=uAN&W`i=E) zc5-t<{CdudG7Qc}gvKOXw4`w|I~={5V3os2K!Z5|Jx0%gF6NOard`^v(Cj zx#iHtB~y*DH3#%DG}m?hz7zZjCCa@rv@ILMgBakolp%+$$krm*Co&pd$nKe*TTjZo z?3tL*2-jeUX;aGWJ9h>}lmat{J+(erNbekbyI{`6;MD;NWh*$TSt;Y56oUWHzxr3< z^20JsPc&`|L(2@g#{21y-<@obU;p;Q#TVavF+$3^kkCVEI8-A(jt!I z+kOA}S6?+y!ZlvACZ2RPqYQr(6p@9yH$^#AU3}MpXpf?S!-rlk`(SBot=U{QTsSXF zVeqhdr~L&3&P2+R9+w$(!^0Z2wQ{5z(3oCr+3*_pxwY(owFx);??wl^P_`(X@NW89 zHzz_Cj<~1>_z^YcGtq(`Qb!FIPwC66*K}9!2@YTbD|y2L!+1s#@oI{CRjbS_=-0 zoKZM~t?U*!+Oj49qB>`Hne?a6oDD|{4o0iy%5kJTnR50+!6$M^wvJ2$nKa7-h*B(b z@{dC_=hwQmDUK=r=j&a2_CjTb43C$V{rM(9vVv1$skpzKoRIm3(VYv<_!eCM>YHyT za53!?$dUHfVKUds0Dtw+Yl~HPlXWRf+NJ#F!@m!5DL`bNEyw|Ixe~aYo1WA&_re%M zJ1aXJg_Hsr%ouI<=1>3lPjm9*P^ypO*)oEzwf|oq|EAG6gWIS03EDDm7`1e)&}>!azqwczn{xtd=#y6wmoP}PPW|{Sl3dskv>%RiXOqg`{1L+sk3K?FLTH=!te!nDpPM{NE7886?mWv(4Bw- zOa?CGQ8U%^7|zficI{jm+0Je7S~?Aw480ly(-JuK`N*@@3$`4N@OKQxVClWH|pzz5svo5qu&u=cg~snPKj8%DgrDj>6loxvwV;{eU%Gux@iTEx0;? z)93ffw&yr|+MKAE_68pA8GaA1jnqu?>w?=8QYRVl%#vc z5~sSc@4a^sSQs1Z&;@964Cl2TJS8Xh?m49NG-pc(e1+S3^=q4ejqWWtd+hL$oObIL zzlbh>_WpZ|PyX=h$qZ}<;-dvOyT^gA)&e&e`J5%z0rqIDo>c=Udj&THp(cwsS-ZP3 zXJ^6){%}Nm_1$-i*N+}e4t%}1)Y{02J=2@Iz(%wxc#iK2M(mGI=~sO}I9Th;9F?7= z1B!HMi3vM$CixCP^|+)@3hE!M;%(pVZOP|Kr(+DNXYSsM7vxZt?S~&fdR8fL2H@S~ z5hwXi-v7z)fVG`lb0eKohAo=DTc-Ej0%&xO)j3fZhG)*6pUSlJ@mnokwrn>4Wk6cc z)P8b+gI~ZKJlql8Yxx3~=rxbR8T;wQrdZo&oF=L=@k01#9Emo9SEtXNYCmg95l?YA zZ^O>{KXh)b+wLT5d(hDQvA5o7>{TVYoSNhT2CN&j!Lv`hf~`RQw%QMV`q}3tL3}jT zm8$PHrDu+#GF&=;{?xRfp(mfIbo>ALU;p2h4!rggzmV9lZvBILweARSvXk0-)t+0@ za|F7zzJk=8FK|Pm4<3*bIyA7p@3ocr{}c76J$GK|f$z6Tit`}O6i1PiR4SEJbGdAn zkL}|I0_0>sfCLB-1jviL&!@=;$cqk;baD=o^ug)V>2$Zd%dT<_u5w8#&7vsI^E^nD zL^A*1b(8K*J$jzK_kFK>t!rKLv>akcK8#Z!eZq?_IOp8l_;L*7Zl9o{vqIQMc#sU_ zk={P_K(-9h?Kn+%)@if-j~x+iz_-mJay-~=DRjrb}+hjaU0T&)+dYM zn8W)X=Ae{d^)N%6!*u_lqY+*Kj-G!jdUEiz{aSd_5(MFp|>GS*Pq5@Vjr_t=e zM57y`bOd9n7(F>|3v|e(`tzA^k^7sroM$u=@Fb`v3K`f_>o#Ck=kY5k-%u z0}MYVs$JEyWLo-->moHcCKT}TL<|<|8N%D*jKgP0w=*){s?>*$R=ppX8KMR_!f41x~%&h%7=cRWVg^d zMW?p_06+jqL_t)^9`!jsa=joCdzn7yIxrzC;NF3kj<%oqMsm6cT3;_2#NI&|Ni%xB zv)3Gu{dJUnMw&n2%o{pnr1((!KP2e}#DkbUSGUfEYpWpld` zZGRCiZwe=@V-B1A$4ey^{<5iR99$>~H9mk21@F+@S9f($_+qs^+@}XG)r<7~ z^xoDxI%imwQ{>3w$37Ea;Y?Rk)mjn0kVm3B;W}rb&-%T z<%D8!rWD-` z^B?dn_ssL>!`^%B{BL1|eX_h7i^AXR=6=XQ!S-^$IYJEYm_wfZeBNzSA2+bCefa$S zvul?{bbUs`5JJC~m-ovp3>Re{H_xA5?Myl3XjRI`F(tzE(w+|Jh=4vjdnr>L{LBKw zkKj@kWbd{2^TOc9LEdw278ykp9M+T$PA5GOMU*dIypkc@!Dku4=Q6}Gu#S*OA;^PP z$(C$tOB#k20y*$UcHxmN2SdV;A|h5IX*PDErNcgtlqg@ z)-TVcXs`WvJOA`xITdy1UphpZY7{8m=v31mAn7(gHx+Ny!-y@zxfw` zk)ia1)xZ8X|8ATn6zCHtPEL8(-~GdHSC=9t#^}!)gD~T$H`q6-cjjh;0=8`ir0bFhz7zS&47wkU2XH8U|p9oPqic?#xkp^T40$AqcF zop!b+ZD!0xwmOG5<%a3}Vzdv}9g;jY83W&%r0%^*jUB#T;7=7?a5nmf%HTm^;OIG8jTi0{&)- zicIrGKoK+)Bu>4UYWRz6$KYz7oYcv~c`Vq6*<5Ue^ds3=7gB))9;*JjT7B=kZ;sLZi+}j#>UV$meK^?KV#eT!ru2*? z`b3!tWH5OWGvFwlEYM&GfAyo2Irz?>Zv;Yisfq*WkL_5_$d__Pe&gwXyDj93*3rs(N~IDOU|UzV?fGxg8k zm{KpIY|gTj1sLoDq~Oq{XuQxm?%b~YG{N8yUX>482Hnkhy0y%&&9(d)z9NGl1y=_4 z<;(hmE(YTMoTCgpl?t4dJIa2QiFEYHvDNyr3-G=zcT)m`$7i2^9)Bqi&|aK;^_4OF zWU2k;AAd7ALSQf;)@DG3ox>Z$%g4uFN za&#EtFtFgk(Zk0R#11Gepw<3t&4GfpwCGVi1CR0F$Jy&D9CZV z18lzY>WS4~{p_dpOuRJewetnE|Jah!0X{OiHmpxEk7rV#I4a=jWbTGP+H*Y3Ss|Ng zBzI6Xl> zxOAyQ(^P?o%xlA=Qh~uHNZ@;lh>Y;pUVVM$wSJv`bx$gdJYAXl?$w@n>Hqe>`QL<} z+g5-3%ipff#CsVUlYLi~4LV>n*b{v(;kArL&Zq0ae0TkaZ-<-smS=Hza2_4pmh$8nS4s5t@F0~rMD)?ahr_QwTm9kv%4vh)>~r`))^6(%K@KHGF9m8%bD2zv_Hcjv zVNb@N1C}3zGn_J7L2-7erV@AqC%j5PpR%;3wF+j~GqARO>-xO6vGnu;e3vYbw^J7J z5@!T_P2umm<(}}k_vE}RR7&rw-i9dOc;df1&9pl(KyAIBkURA_~2% zEn``WL#X%VtaQ-z?f9h1sDWMk9%Jagl-NDhHzd_xZf~+gNmB=E@o`@6-?JZ-6AhgTVQc^fF=k@J|wYQCOl9>(UDq#jS>|W2tYvtXdG;~+Hm}x8 zMTkhL+~Dct3pu80kLX1CQwS_@p(=fw4Kb%$aleAAbA0WJpf6 zfPKAQ&MX3aLfP*Uz=5GJK^ zVnh40A^ls%p0h&i-xu*$En2jgvDfS4OXyprgn+5)zpEE66iB$%*jpw+fT1a{JC5sc ze6qmfy*uNTXTG{P{F?lE9)!ggIq~2hhIB`Eh8%#lrNt=8Nd09 z)yE%x6wRQQp4A-9OMs3HgvWT|M<0EV9DkTo>saFyJm@p>&VjI;7GxL)l)%1fuBp~& z{%Dx+Hw`u#^S_uWpxt9iJI_(){hM^7T!@{r=_at23W{ zJZohmIOI?=$H=g*wRiLvc*7y4RS6we5NCV#;`XXSIehS$-DSzC9+X+VJsVD+X^vP9 zOb)i~FBXJo??=B19{9ZsY=;k$QFO+Q8{-4`Y&i5~w%NX8M@yzSH0=`CN9VHNCs+`k zo%;CQ)u9uw&K_UQQAQs+a`fcvgLXz+;9Yd0A1%2&&YyIJy=CgsKRt5-qJsxzz}9d3 z%<0dXU%FrWC)@eu+Vi0A-~YqU!z~A2mRyws{7!p_51~^nq7ENAJo>Z4#|07a*7fOR z@4WN70yAgQkuSwt7~r*-2*0eB>X_D1bH4jw*I#n*w==}$f!o`SE+qTW@OJDvA( zxUeHwu+28M--n}rHvYNOpH}VrS#o^|oYNWgUFIP2T(nM>_uuFM5OVwW0tn4{g73Yj zuXx__k}yz)XU{|*oPxe{|Hjq^u4u^EWH%jbPrCE<9K4?6AYV8Pw(Kl=gZ8Q8h$Pb+ z3vXa+t=SZgwkAnyTI#4k$FImH&fBThNk8FW-nDc4>i56-&#SlJ{(XvFyd2zfrVAL3 zQ?I>cS4y5aUjXk1KmE&jmiapH9DUI3RBn#kY%Uis6y$1*ldr#-!#tWQJDo#&Zvh=P zwU$D2K1;apq_tiuQRG1O;f?s7wnIPp)BkF9x&SZ7+@*_W>WzGJa6C&txG&WzoAWc! z!8t6i*0M^kb?0OWT7aW~3wepJ-p+=mJJCVj`@_4kcLk2Hw#g3l@NdZ+rNB-o)lFH!w1L?~BIh7j1gE+g0NjpWn~Ev;WJ`bI&{mlJzpl9@0-F z7=jLB+Fdi_L~K{bnP}KPo6H`!^q!;osa@|szl<@%nM?B))%2c2ei&N6&2y$Y*Sp8~ zR)*Yr_2u&TK7W3V4_p@zDOmHoB9*5;I=%X*k3U)cB-;It?_FAbaK8RYZRUZTS^MjI z_|a#bIvitVU|?V#FN212L-{)85>XOfh-7*oN7S4T4kLlY_F?OqjXA&SPt?8p_w4Sv zO{)u6YZK6TBJvz35NFNWGAx>$*<#9!kc$^D&iC4>5Z;th!Udwr3_em@mE%Varugiu z4aB+N#EB$AN`RyUrEvZ6!%xQ9a{MR-qSK}$bPA5~WWH|RTu;1*i-D>g2PZQ^I(zog z>RuSM8YU#p!xp&Km2BdvBQ-_*serSlm=xRFJ>ehjd^lP9ZD(s z^>6=BKk2(E@=CfGk}XWwAzBdO+D9`{1A` zj8+u(dcyHWhhRN^vSiSS*gs7nMZkz2GL8TWk0?27LuQJR?+`Jj$H~ko^Q50q;@k%( zn`fiUeRqm{k1^S2-8hWnw{O2Q45x>T0C*uLO-K?RW1NQ+TH$ETLa{+{+EFOkqNJN0 zr55b;p=MA~9L)3iv(0hFY8~#uOerS}HZsW==CytUBf0}BQncw@rM_07jV9=u-&atv38h7KiN;J!`<^)~8LC?kv#Ks^^~pY4_PFK95<7hOaD3}YhTrzp-UPX*Ozht`$DG6x3UzoGAS zX&8q%C9(HyX%fHCjeCNYY!-Ec5t^!1WWADzr1x|Au1vksZ7q8U|pZq{8+ArL%=Pv_{ z68ET93D=@=JmQTvzB96jBK^S!AFn=GeLT+A?|$#idYA8Az29NSl;78@)*%xejLVqP z{z+97gN2Noh+_7fA{F_YN=xE9`1Jcui zSY!#r*Ak|lJflKz@(mF2?Z$uj$etlm*ELRj6mBs5?ZftHkIXbzGz$J?M7Itt%kT`L z$N6fntq*Q9ct(9OR%@3%u{Q(Z+D)0HOD4kv9K%7^>&MR#;3WH>{rO+6{`{vu$(i?H z^;v-dNV|F4!_}VPf2T~N-~8&A(=KUS#+AciRV-N?bxwH)B5ScIemM- z3@y)`j_D2g0EAANQz&^zpA>s&Lz>}Z5x{rZU$1-Y*jn8|D3{5Tu|Y4$C-XU=AmlZoHe3G5S!vkRi6NN`$A;(_(Cc9$ub40XUdaV^aPXr*I!NuKo0#Y>)XfU@v2K zEm;xW015|?JsLUYllI?dUDfy*GyRO>-C$(8eb&=(#wT8T203c}9>`D`-Y{Ik99nM| z%{2Spb3TrTkTvt_*PLnLP0yd7=U@0Jc;a*LnIqR-<~;N4&GWhV&C&N97w4Jom0=D? z`stkk{Woj%7QE<}uE)a{O?NTyX71+a=6Uin7%>dNs!x^h%PbnFWbc)xE=C@CxbSDK zf3%O%5~-yW9J{{y+yC%yR{!F!|7H%)#ox%pFQ4R~Z%~JKp+g2K`nCs|x9N=n-|^u1 zz2~PD4PDWTj5}2~@x@yV?W)tBl)3G5d*>V$ZTSQl_vH`)gR^JP$44Je=JZbsD7lXQ ztM9Zu8bhe>!E|(`_h?qLD;kdy9Nn6(?BJ~lX!cx5 z0r>FtZ3X$#SyWIrCtiM~eRy$XmcX*s zUVDN&876ULqc&^J9p*^HxKg_h2$wc7`p3+-zXeHTj!(P8-ji)NbI{RK=v1;AIUYID zz;)UyR*&?0?Sa?-Z&=T0(Z7IkFOrrCpdV+S6R<+41Ut5rqsSY6F>rY_Sc=e zS66TU?l+@@N*vl#Ldc=xudMdupx>CT^49l$P!{;Pf-&!7SUqpu>g4g0qa#WvSnNLi z>sv?o1ZH=_UA#u?NP9DD>@T(oM+zEKUa!p{F4%LWE$MZ4>{6QDabJAyr;u)IMcbn04%3sHJ$`U?96q~9D)*N&_xJR zOm}fq4qxvc>mGkh@M8m@X|}I%TXK#v2O@77-E@&D5|(zP6F3KrzOJQEJ?k5v&gR<1 z!I^~ES`Z^0SQ{KN()DlxTsSVl9$q_8Q~QZ~RWI*5aAbm*sut+ed-m>0Z!gf-z4{lQ zKJ&@yNc>BpqI>O+9{y>I`^tDpYG--O>)#Fxn8JSI+mvPvQ)9zdwz z_17+5%f_bvtxIqFXtk;Ko>vQ)s@Uf|mAt|)boA))kq=tpfYqb;pWx5OB}Zs~I5u-F zedt_$-76W@UQOT;&kv`lA6Mgl9Fr29Ch#8Y=@4X*xks4Y3D1|`ZB_TusrU)nPlHDr zxtUxBKYP1#Q5$C=i0@xte=edF+!ak_d=qwjs`Xj$`bRiRY!P&Su>~b_7SCBCr7gWU?ILPVG_?#fLVEosuP49i}jqc>%38R=`ja}d=7*BE` zS=l!ZzjZ*cKs>x6pO^33gXkNL;RCuM_@2X=EW|HlMvqPz-osTmHBRVYgy+!l}&e9Wt7|F>NhNM%tPFfM)p*1aQfWTk$Y~Xga{?Ck4mf zEt&D(hHw6``Mp~{yf&+iA1!Hj-~PP(+1D${Hah0~i5eB=j&r51P*+>QVX;WRu|df>f<;?weUIBg|dy z^&dw!#%1jJ)n6mIL!f!@IUx)ndC+oge@gM`*`CGF!syJy5?#~&`P68A9}rz`h(24c z@wX3qfuQF-`vlW*&9c@$^+)5me_qXnb9%9H`mQV<>kSKC+;CmkHQRH@y#C6!SMOYZ zy!wCtDo1jR=}m$3|j=RWPqa+LVS})1A-xJFInF*2q<#* zVzf&2AN2k=zkesix@c<8aj(*s*H6A2Bf6h4vXr1%zZNANc(M$YSnevL9C2>X0d%8& z=Ejp<_lKMz?)7??hxgS#1r@YwJ6!#6Lr-Yj35Lj1x0vF%igqbCPq0 zoV!paK|j~ZB0qECd`uz7TyR&)u7}g!1G{tJ>{|Wex4&BiQggg;;rciUwToczQ<5l* z!}!~QZ82fJqmLdr(j1o)yhYxx_l$Gr%ldDOtEK9Rif_y@`oel;D>)z{y5GO^hk+l$ zRMrJ;low%)ls?K8MIK>K;03Pq-BTG-0}bpPJ}h`7_~Wom``1u0?e?iX9HnhO3)g+o z&1enV_xDmZ4`utZ|1bfE2oc8cn${sAMwwwCxd*f7ycz|iA!iP*?}rlLqW)YxjR`Hv z5ohMk`P^O*5-JzaA*#a+Wg{%vL(N&!)E@m(PG$#v?FcvYS#zMGwmGLypC7iOOh%T) z+Ao(fS!Fm5ui3D^h-CXU4yg7(N!=G^G0VR2EMpTOQ;wNXx~k_orC^l&l(lf$W!5E_ zV!h*-2}%U8*Q~G4iV1w+ z&~p7!_9i>Jvcbk{=qYam`g=K_?%t`5O>k9Kx~X;8ZS&@sQohR&VXzT8VBr9i{rk!Q zDl7b9ftxjD>)x+tHC!gZKlr$whuyz8KGx)%+?t`KR8q#m7wubzH8matl_7TR=9SgH zl=}M*HQDdd&wI|@oX#JNF~dOM{5f{?<&?D!0LxjuErm^%w;({OM?sFu z6P#`Kqe$wXEi;{C^l%OeEegn-{k1{*xc=V)N_%$Kqqgjhy$ANByuY?O7k+31aP4~8 zR{ps|=yOJ?*H|2>z;!v*3W&GbO zv+nbpSYFDdIrVQSq0UjZwLM2;ZRE_6^5aDY$%NBpfaBrZIfp^l?xf5ge0OI7+O=iJ zKZ;H%_V_q{up>pD!M3#^HW}%V(`jcBPekwbbdl8stRrB~EIriSd!a1Quj=KGr!zb^ zGmcYq7Xt-PUlZTkUvKUXcZohI!zvT-zjfW?@EUyi?gIxOW(X#OuHU-4DB9s9eDi$H zE%X0&Evi&PFnAc>@Q<>sboE446F>gZk80!aqjpDOc=Z&SXl9seD@(6WXTMn`=HcJrb$#!GtC$dh%oJ98a5 zVuJqB&RpZw-7}tmtrusj$xJzS8}&`j#d)R?`<%SAAJ&3zI52Jm!|Xu!TSRM%;QD@< zi?vNZ^>koTR>jTY%x@%w?cVN#U)|Zu{~J3K&|1kv!4%_|x@V-(4#3PG|5@V)oab4s zzj2M?w?I01HRCq%Wjr^kAT5Ov*aHCb=Y;`dZE=y{Bf<0A!o zb65#-ahiPk*{PWm{H7a_qm19DH_Wm=p~2{TKl+oA8NdC<-%Q&DNkfNffpg|eZOr0P zoF?evdV5Y^+nIiI@ZiyL^wXKQy|61@R`yu%QC;U+ykk3(N3hx!b?>V3(c|P)~>^!3a|v{9c6XPq<09a;HO|SzWH?l z1@iif+KZk3^u3We7==nCyiZ%^AKdk3+TF@=_A38Gniaocd zg0pN|8Pl%Wm9v5_D>#6+SvOjz-!0X8B@*pFjJec8C|k3>`zOEio#s&so@aq0*?$Mp zX)a&-yx_^@)q6SkUn-NF4LKEW=|J0SNwj^>a-cPbr_5+P{EO&E^%vaG{~!M&chKZ2 zz3_Lx`uW(*=wN;N_kkm?wx*X?_ikQa{qDWDvw;elv`%fsZuOb0Z8$?GLn~}wHn(Td z;pqYRrXT`(#1rYz9Eh@N$!C=w618TG`1Sg=9by`+>>vG6AOJ2NI&>`9El$Qqx4tL< zcW7k3mP_;nj?_!%vLT}>M>$$oClvABDe zin;yGa7}#30qIve|K#(&>x`EZ-x^0^#N0jW>mz=qmKA4*Uf62_oS5Y&9mK23bh0s9 zFK6MpaByeq(=Ll%z^>&S6ugJ;&nx55=ZhsuOma$c*qo=@JsM+cOod2%8w0?jruwD* zL=S_L$#&Iw(@(nR;SCSU9B!D^qbEH(9zcdT99>WfPST+c6>Pj4kD%)cYSN84$k75@ z5YINgdEV}<3V~?tcT|V$+Ep8y-~rJ58cX23^K&kot@1qGG5Wuq?h5e_A33>t?b~mS z_1FC z()-O(q9WS_yzkz;nxEuqfxg#P+p{Iuuw*Hoarnsbc2k3agTip$r+&EOf1sL<1* zRSUtJ@#tGBH^Rs9hjqcwS-f>mvP~O{ZYcStO7NRi=r>2O`QkygX)whTB~^?K`-~pw z3X^=)X5ee~Y;=u2XXAhmda%H8_V>Mu=KM5rrsOwxbME}9a3YzQoI!8sWavHQDj{7O9+KY0G4$KZD~ox&}_FWL@2(H4N?X~smuct1Z9K7u|}%+po)TdYx5 zc$!1HMDi`%#V6PVg4&WlCn>J$TnuO0wt6=!O84Kx@BF z$IihU$!Aqv52JB3ucR(=039&IN$rQd-{%Z4YP0U3>JN& zA)47B$bDsBKRk1F^>2Uik8}9dj+EphFO}^Vk^iwQb3F$gx<;VNWEG`?R99+;;Q6uz z7?~6v0+m8QaZ-9_{*a9TN7S3zbdC^C3m`gn>~Jv1*)|!PeF8?!)w~Eb0!ho3mr@)l zS_}py3;Nq?{x2HD@h+o98v#nxroNBM#+i7yve;Y;l*6k7Iq~#R*AM(o#>dUO83sEX5PG{T@3Qq{WZPpZ zoY9OW3dVz*4yMe252o6$z!Uw*H|6-_%>3lk=?uLxn}gS`GUBgXxs{O4D2jNe|d>|noi$tN*-s^o*X@wa#CP?=s?sG16qua=9bfn zVT_rY)bsr{KXdWosO!pM?ih*dWKwzdIDcdAj3ndszloBKgS`38^8v+BT}k_ z#8+cv!Xn!k(^2j;dC)hD&2q|q^^wxt{23dkq-Pt|XLm+=$pJEjVduubH zhrANp7sDaO$A)#k>roEra1i6wVaN=-Z5>);JT#@ucY-xN-<+S0LmD$}F-=O}d(2;O z0gbF%w`6b-(xR1G&M)`FG4T{AS&9#D8F z>hO`A6X3cVyx{OC4)J|XYvP0hRa+tongdPXls!PH6n587V6rSAiq*EYvfQrCvo;04 zWa}8RJ2DRUq(EG0A7q&m{QGK&vSZig)y@+qCp!d>`TDEdWsw|fe^LaBa68mas{rL| zvLycEPyS+hgmM_^B@54<2cvGvrfV%n4<8*T4w@D%fB)kT$N8#)fx&ULeY(S$6%HRf z)FHR={WsrwBf713Y&0l~NKozL&Ag=p`5NX0)OwvLK^ODhDvxLyqUIIq%6q@WL;~ zF%|y85!q|nig1+5u6*rVZ>-+>&KubqWet^0TCajKQcq{B%bKe!IHg`D%@Zc)j1TDF$TI!{LpM(GA5OW>%V6oC~3|)xRU~_4GQPAwdfCguRYk9W5qcP z-+%Kv1LqHd5q^=IarIBX{pBd;)~H(K&9}Z+DfYL=S@!nZe;A&^&~@IzPrhFZvgQWI zWmIfPQCCTE`s{_~%OH?llc5^E9*h^sep-{9`dX&aGd{}DlwHm-Lnbmrm%f3?wqPvS zgSV-4K*!$W!~SDbI;4=H*oQMV8h$36;$sU9kF$ArFdB`1l?}F5`x2$jxMSq_yZPX4 zbAT(EcN6*Y zT%T^#aoFI6#+$3)`TT833s0N%gLUsYh@MwqHXW1db8^yj`hQ>^hj_%Y7|4y$WH|mU z3r{ge4ng0?Qgaw@S$*f*udn{@AAT`C`OkhHFZW&lWEuVV2R{j?JNW&=>2M|)#+Z7x zXLv4p{U(|Q*B90obm|%8R)fTAIT~fFE$|t~cX)s3;F0vS%gMoTu(>eqW%{5ywExw# z8VRo9F5ZL2TEVor*qI*802JuhoW5-x<{+>v0l*=bsyXcES5=g3i${=!c>ex%xVz-Mh8Qo+DMAoLK$tcfT74)Y%K? z;ui&IlNPeW9-rsZ4e4_nmzXYmcc?b!{Pbz=*?V*3 zI7}GU!aT;Rxl5YBgE&M+&q-!|{OKpHd7%*(9j@B#XFvJj>VN#V|KA?1_j5*g2Z_D; z#&=hL_`~nUF+N$s?YB&DZAr*56^CptnPv7s8x1mGU$DogJeSQiNZqX9ges^Tn3RE|#Pl*Rr+ z@W~vRO27}_`E`!WGWE*nQvt-OBsjp?vT5_eGaiTI^jJOAFXtGReJ7i9Pi+d&HvGm1 zcL!@c{~pInG6lYP$+mqVcuzHCGNxOi@q))W(C9CmU*Pz4Fy0o7R4wf;Fl6oU`u_FX ztMg?b?=L&~QO+3cR_<2W>iy9ZuLpNYFWno?=-qwnmDg7fx&}Ry9h?zt>n~-LnoBbA zPW_efc=Om6?|A=rzZxA_R;u93$=BYfitgax)W!2>YOipq9qC}()`;$|)UH90ABUxH zv$MfsY^27ur^7!j7Trf5WXqwuwUyx_A#9QOHUjNR_<63>yW%YX2#q;MH^KK5&9BAo4c*K>y=UDPG zCrKsR8SZY6RQw3|ah}md=*F@Hh9<%p@`bEY(V)7A9R#1v|HXo9YUS;JKD% zL%YEiE=<4ZY&r18%k6>n;zthXhUc8~Z1u52`~E;dKDy;+r_T((+Zv9K7SNpFGn?Ye zvi3QNzllDK%OheR`v{- zojCE@$UQpuv<=D5z)J*8Wo7S4uX?E@ktG2l7{yaml1M7z)ZSW)M)C&z8RMI9;!**O zUD;=QOGJ6t`c)dKY<=T9KUf_*a$*wiuEbwe+}^Gw)4`XH3|mqe0`B}Os#d<}9*Jj5 zV7T_T1yi+c;l!ttfgw~SEFg^%$Iz)fpX`KyBeMu7vF_Iy`pb9hvNP1n;^9{1yK zyhR%x_-1kFZPBk(-mxVfEbzr2v~e4mK>v$&O8}hUN%(@!$Ou6U zeAj{90?_!jxsu!CgzlM7!%;!aN5KG}L05tx&i->~_KkFj1j?+#m~fh{qphb_G-Ss& z(aH8?J^TDYbNuv^4@LM_+cqq8mjoF4+`QS^yFZxPe?hJ;AGu|pRAs1Of};`!4jq2E z%A8B_rvlgD7LTy?lT_P0;LF!N6D`SdR{=Od?go7>8}P`7@i+WrZ^@Euso@~O2@klH zoVb27ep4a<`3apk!|}6$NB2L<(f>kUSpSpkL_W>YuX{e6xF^vldV-6$Zd@)H1ZI+p z3n(|{@YRwH_$1k1f`sUvZtdW3ue(+6)AM$A(E853dsdfowo727za2bytfaNevuE_) z$M;JdKmg&zU|D|-QH-MA;urO-|3A0#JZY|>WFwdUxv4jc)44yrZ@?j@xu|Qpc3#U( z0in9K8Vy7iyu81rxCy*CH{?>qA{7jGjl zg2!W*0Koh;WBIE;W-W_Defb3OxON=%jb*(pwAuHs=Ut6y{mWncadqz6-5jl3ihS;l z8Esm<^Zw`c7Jo1VgfWOJIiY?_jEcg>KqN4x0gPnFfJaX6(+grnP0*xwFGvLP*&JvR$uU zx*1{L%5ct^5EFo82w;6Dw(r^1fp8HR1gEU+Z{IndD@YyzBMaT3a)UTAs4G`H7%9a; zrtQw2@mk8u!%AVtp^-q9A+&A#=GDiaUkHK=6EM%)eLnN;K1_*&fJ0UZw{lh4Mh9B6 zvdtye;2V-1%%tGiLWvVki2xba{y_1ck)SnVB@(eB$ z*^MAM$3H)JdG+CkpAMmNHW3z_G0Hv=`Te_P36|Lo*0TEqHQ>0?5<;Ru6g<|M}1UY6RbzGv`OR6CBpE^Ks>u3G+40U*GEK z;f}7Fw{{bzHqNH6WLfE58B=Y1LWKk4g$xmb4pU}~jsiUSaOSnZ$R0DI&B;{Fd2;u_ zsFz=HmK3wr*=m!;^?rgdvY6IUJFb#yX9E|jsUU;BnT8O7m<2i(rs zQR*w}9P9*6c$idg5DYR%9R7%LbAn^^43l*)RDRSvo(7w7tR?WCR#D>6lQrS2pq4rM z(f;U44o=RCDbbDgz=xwx@L+q+Kn|`ka=MqWn@DJDVT|8Q2y@I{xpp(>=%Hw<14|RS z#=UUmTGxgn@sbmpPLz48|M-KH|1#(bs$Ft;V>6|!?cRB?tnjNTb-P!$b0knW2tDnK zD7*XjY>$qY1ODzO02#wF1{kjTlx=A|0_x=P->??G@hqvIE!w+S<;StMyeUDEx z9$Z=sws=M3f;FB(2|R!P>^RZ~=fl?_fgD)YPp~tD9O5|m8V{kM$-qOicOTxXb<{6& zA{3ZtfTKrGl+CgnI&$q=L8JJHAjcQ=spbfxEZwjFo3i&^!RlMDzY!mLsp_ONDTrI! z$K9*TIWaGMcCqouscz|?R#*6*Y!mm+xZxRR&j%?~0usuS4}|R6wVY3mymI`=D4G|m z+MyJ34sp~9C?3j*&iQ`!^Rop&XGDBJ77!`giXUj8*zyR>`>z4oybL z)Y1Y)Ur<3LvI?JrZ79vNst9 zbFfT~0Qkc(!hm-O7J3=Otj`>3x3QkVoF-CX={vF_%IO-1f-P;f$k54RZ$es(*bDE6<{E?1WPaT~<2U5+ z$wtesVLU3Xr}Vjb^h1_DzaGBO*IYn1m>G2un7dYBVZb!s^?qh(FmOr6dG-9O|MA0*W}mfiLbqB`v12wzcj!VuRL~AxqD2{H%DlCD zV61O0sCngjvbMP#J94P%s&v&1;0+sO9z`SZ>W%Siefl|14#YR;$dXUU0mhmnjnB>| zGmXRdbqVay0cD`7O3|;E96TRyA&>BR<8d?y z5`59MoF{;2e>u~&mH7T!-(UUv|Mc%y+mh7+V|Odf9!Eyw-|l<^2{n6ihBDF*9j@)# z+0PdlUv|2z&hu3@$+m@?bPX>~pxwLoPB2z5R|VzpvvDfi89BfyO7`Dt&bP``W7JQ= zNb9k`V~aJWwQ37T_aXnruy1ehmXVbf(~A!Eum08lZR`#~zU?_Ge)_ZjYIQG%=>%H3 z-dfmK^ki9+;09(Jl4JH+lF*(UdkzCNZ$0A$S?SMo>78Zfe*GZYPM4(TaNe1x@dCiS zf710kvm0K2_0{aPoWJ4n`S$qpbLU5A6>Q+J{h$2Levr<)D;Qs!L-gR9R2}q#<{u;< zzOHYo!)G@Ze7$<{v(-oS%LWJZqNOF+sMLjn4*b)vd2{>D!3{sQ>sQ_90zl-w!27+N zeEN_3;cU9!^F8}fYn50cAjE-%2FYOi>RLMdOp)fosNtoqsjk==|b8h z;(>I`4PR^9upG?xMF*RI@ZlfB?;Vpk@Pj}9t7(nFF2lq1k3ZToWX^LKbCkeoElN*T zEg~`O{Q2{fpZIA5tQFu5|{qyoT>|7?7L*u7t#3|Kf&eHCm&2^H~hm( zLxO&4b5z?Qx{kGQWL>y?EgD6Fb--g$SDn<^gPSap2=~bl(AfZ+AfkH8)-2YIsvC%Mkwq}lSK0NldY-xIn zv1Cn?DcXLhh>^e)psMO9C>-_(wg^r?ejy&&{hY{~q7it5r;yWez}2nJc+x(V++Y_z zh!?85cGeIbia&_Xyd=M>Fi$2X@2_PO4u6j?aGKLaw?!NDQ)}=ud?5OY2xiVIbL{o4 z;BvIHBm^!$|Ma8gFX>^?L+Jf{1Mc70`p7&LY+@IwQzhcZSoPSJphWY`&5mC;{NorLx|8hBFrNH38u9Yqbs=WBj!n^HKKPb<<0~ zz)m4D)!}BY#v&^-> z(CEy$ai72DK+isBto6jMUgkAp`oulvk4pae<%VT^AGr@a{QvpC5t7SC<5c|b|KX3T zw=dj}5p>#d&S=Kz&wusq>S_mG5=xK=gA$=y_l*)%!Xx5H;5iK7!CkE<7Dwc^1i+mb z@}}mjY>*>?LttY_;b+&*11ZY8V-Cxq6qj?X$bOTp_)S@%oFJo&hA<+&4&S_Y`+f@A z{nbl{Qi6+^Fnq4v(1*5+wvb%59z>d!2CWov?`k4{~x{83V>X6JYwaj^jFbT)uicM{~i02t-EKfHHiW{#_9o z<#M$>$9zhHYr27l^;av|yRSdw@@a;SUcCvm;1M+}(XFw326k-gJFt)D~UG7>&n_iO?twFHn)%G`EB~KU<1%Lt$|E)Zt>srnNzX>Bw zwY?pt!{Nk`u?Jcw=r0Wir*#X3J>10*F&6?;nH55|FNu}L?vi=m1Q_7xq(4X5AG+?7 zKEMRJY4DLSK|zEt=F_87M4dvvadj!?7C=s1^g z_4UtxzB+pRWQNzpaey^^IFqp!9*KIx@h=h*=tOG>Fm-q)$ECo7L~cef;kK?U=0+bu zs+6rJH7TA0ZjHro3g9UrI6lm8=MMmc##T7ns$6mWR%7$X?b7~v(O zn*n?)rz(SNPvgQH_zahnK#IO{IKWjn!_3}1y*+S!JMlPa@0~R_1bhU!t399 zZOJg^)aaim@~KmwmXUa29Hw6;(5#P92wwAdf1zCtBMvAcYzYRozv0q1jY0A1&lG(n zRMBK}l%?FIIS9j#ZbBO92JUfY^k;irFtniE-~Hm}tJ9yI9%r(^iI;Wvzi=RDP56R_ z@JwT&_c6fwUSR*fz z>sd0fE(eE;=g)OW<=cawJ5rE$rf}YGFBsGuPjF~s3ONODGIqivbaJ!<#2ns;c0T>& zREo!j$@>1`_urZ|z|HgLFXSBU0O#QUr$7G51dXoTNck+=;p8i2iC12Y2T-)W_no&e zjBx;c{#geOrA(0*vIFdnGY|Bg`@s)?)OrtvM;%J{*{3;t%YNu3+Y>E(S)fcS5iiP` z!;s0dYZ-cXY6+uF7gz@DsdRZ%yQTNudnY*M@CokU1hYBE!G5gGc^Yq7b>J8{>`D2t zFZdN&6HI!NT*c_8?{0hz?{$WSAnI0h+jrNmWn?zqom%6NE1Rowk^xHQe3@dW+?$g1 zLVZ`AL9y`kY`ExRjEzQ7K1^PI@q$dJlzF@&rFT=oA4;1Hl`&`>PLvY^-dwzNw!?|v ziAI-__3JlfDi$ONt|5I@2!quXM2W6YfF#6e_{psqJldpCt)q6A6!74(| zoGUw|`){Y9?thX|o+79%k=7Bdh2pvEHp9lELvoYlTGA{EUPhGq6rRILBzG~Gf7zCTH6}EgfOwFp?QH9 zG>W##xo%$I8hq^4T-99HhOgj`PuMGXZO=J<=eYe(GSu#QHlvM|-;|GGoUdtFTb-B;n=Ios2ul{E~ zZC+3AX)_0x`*wi`!;xMw`b1-Zul**42zN0b6G;NILkk z;FopIvwQ3~E(;{tQ!<+@!y9Mf;M5;cSUjvg)6qKp%J$C~J;T$q@ldsBp4wcsMHyvf z;@z(uTeQC5LkDLTaD3%x=Rd^T1)k|3V1fs7N#c&v*K(9>k4Fj6j{_(t8NF8vzuQ$^qKO-|8zY0!ow9Ve%~+-bk?rt?&7hUp z)73vUGhi*_E4YCN-SOzrqw@^w1_wHmO!%|$Y=)z=6zqp!o84EnxFL8>yQb(>;=;$D zd>kzoRD{co*)ZwpC1j*0p8DjIjKf71Uu{kHMVUMLfhJ!+uHrO0px^CmPw8o3;zcjH zQ%g9Nv$EOOINzbYmX%4*u@7F(z90`rXAA}czIY2=2X9!i|JnHhZfH0B4PjKX`hl7+ z#n*oD;~!7O+uOhSyZG6|Y^#D7@e~17`acJWwj<7V7-PC;(<7}}VAgMr7TNlX&b}I4 z0+72U!)S-V$*win7ddk6&)62>=z)EE$Nu0Hxt+W{^Lc^U_Q7j8bg{!(bIN%xr=aXf zS?rwRYs2$LWy4)M^V#6TjZ0^T*1rxXI2Z64a$tfI;h^?)_Skw<$n6Xq0-e^#0rK!^ zZPGaBlGg%1kCIJ?!V@;v{mu>f@ag975!~(sscndkoLlJ^gC+i=Y3S=DEP^dq4Wgl8v7}qOxya2a*2ipZ#@h2MWTs zca@s9E_+i(Vz%hgwko|ke728d804`A&sYKw$)M)+AcxOl-!FU<4wxq$3q0_NNumfx zhrhM&9Kv3B4SRzJz*g^OPM{^Z# z9DBF!bS6;v;moV1*B-IA?CBCr5|BVUH|}?T_i}dc=m6LQ2ak+iCYTQ{U)uk^mo05g z_#L|7xZ0IJVEg8>h2zTbiajofry5|#?!D1>JRmsHkBkAP=##@8EXV>p%K`VhcW3{? zkDkdcaW>WFa7(MrwsLr!WICIS-VzLj{y^f8}`7uFK4suivQXd zZCAHN#U1{2ha?%LA9e-#>E7wD%)x%P$lzlsR_SNjPEWr830(9P(g<76E^ z&8{xdD7_pHYpcU6*TMU!rXSpVeI`R4+}uyMd-51RSo}zAZwGh_8oQnY zSvwyxO_I?+w=C&;_JaFVIowHB+%3t4<68i0D(6~@mY0`LpN=+`*kk_0FUWe_G54)8*TQl&u(N(;RP4s*V_wvkq?r5 z_&C^hcW*Cp63uq|$am{e4I~IGdB|E-yY0x1Q-SIkW6$r@zHZS|?l;e?wb$HHGM)f# zj)c(x;k#b`Do_Z9?|h{%7+=OT&Nvz(=Pzo5}nT(o9* zAo*%X0Y`o$S|k1{AWFv2DLBprtM3<>_7VgfJvLiM%O!X(!BB;l>S&2VJ9cIJB##eP zT`vG~^Lp(u7lH{oj0FEdToZ@A{oO~iX@gNiA$=e47xbQQJ%1FVFs55V5FZ<0h^(88 zH$>V^@s*v{$Mb9->0G(4b!##?b9~JcRm{@Yu3Z^-dy``K3|6R@ELbL?53h_ z5OY5D3~QLzh_vT#=H|sPTP~Q{%=){NwbF|~y;SDq-=DgiQ?saj$|wicuiyD3#rj@- zvh^M-vp59fxS}{wKsY}jzm0=nkz(Z&DG&Ek0E#Aw*c>RE1Tzq6lyQ2XGOowXn-l6$ zeNko3ie}0VA4g&lRtFZ`%$SE17RI@53@r)>u>CpLPrh6kL3866-q9TFFM&nafwe5` zeS3GW_LXsdDMz<5e=xdy=~`_`C{vx5p76qyy|g{KQ0Ad%GiJ=NAXs+i*r7Oc?ooU( z_{GVPQ5_+1TIoML4$%=zjhB!c$7`F(DF;S+${u`a-|E}192-VYXvhfJSr(zz61Out zwpDV^d2s#igGD)B_hQ7IvC#MSicbC%P}o#~i^# z8I#f$I6&cM*j(#b4g=biA+qi~qjo8S92mAAt`+&Wr)d@Qdr`b&SY6AoSW3xY>!LV~ z(b?PGIl1(ovOd|h98fYzAH)RR=MWlnM$z_LP~wF(N_u4#D9uk{j$wZh{ADkLH)aug z%;-&GhO&f)--Po*FyTBw2d~@YVerx7WSqB$ZN?Zklsb%bPfW}8stcYzRVGL9T5H2L zCxl1gdeUK!b3g)N9V{FK_4Sj}eOIP&iWg_G66AH2Dqp$Lezref)ziHt50lad4$~yq zy<~?m7EoPk!5GpqlAVSwbBW^RWnGJqBKE+T61c3l0R+2Uq9ClwkDibW2xlfx1oPks5$dRMt zY^6+5q;MSuC`W)DBMWxECuXMmh<1uh3DJ`ym$c+T6%;C*B z$8R<6>Go?~JfI4S@a&aW9Ew}SIDFKjRis`^kQY*FF2@_ch>kWjzdhkJ2foY=EjUD- zuiw~Q+3kVg$pM;TRpeSGp}yr?i_jC~6ehjdWho2L5S0479>}qG{o1wFxt#3UREPqD zwLRtxmnCjf?ayTJP=$Iw!MtEK851cZzNaix>N&Yb(d+%(K6XC`LS@Y<5MR|l@6>0f zN4{`oYFjZl_1vM>e(O>AxC0I+O5gL=tXmul_Uv$FlgEy{)PCKJe`?nw+gA`In=`oJ ztJ+a;)9;J_+ZT@Eqlb$A2mjke+%MF^>3WKeo~H0#3A<_*ya%79a0xu&$CM1bXq+X@ zWp6DsjvYM`9`-+A-<14CIoyOSd_#T@KRvHJY;f(y~xhwr^Nv;fDI zyq^x=cO*X#hPU^=${CRpZCm%EM`eB-HUe8|fPU`_GmZAQ)fB*NjEjzNh@367Z1`ijk-W|@$5TPh< zi@%a<0=8tS7l+!{wb~#nzD@z-0MTm@pP+z~{pj@UxzEF$rSI#y7rvZw<|BuXj?-GU z{KlMGa1Xzj-;c_|$^k+aQC1xw2~H=E9uD?pPlP{vQ&u^0Z{+aD0|a<>mL2)0fAS}* zox69HCH`T2DZ?VCk)R)u1i$XY6V7!Is~*MfFfmhcbTm8eYf0eNcpIcXRmYsF% z*ue=L7bhBrx;cP1`W3W>!{$5uw>9HIc$xc|u9?Ck_ZHo$Tcb4w$MWK&==xgH(8tDT zNl&qtYkJN%;hS;D&F2})g(^p$pw})OX9(Un3Ucqw9oHXOWVgE?UE9KTjPI} z=OuG`xgJe0#=2=P9>ZV3eYtC8h`gk+jP^^&a(EeVjG+%+wKoZBsWx&&0QH@hahL0sbsK^Fm9?|B zEZIZ*j}6|)T(tMwl06Uc?u;O<4P1|3apHjG;^>Jd_c?=dCIBV=K{Y1Sy~a@Pp;c@0bK1& z=4O<(2^3@+C2 z)?3arxVif6FaO=@*ukR%H_qp!onr8bC!sGmCj)UDQOW8Z1tH9-V0Id%He2@bsR`kPbzM<&xZ^z|Iwuz8>*_rnGK#ivbE&aEy`}@P=cO7z9lo7o_~#|L{Mq{^{MfM^6@f;2@K!wxj*} z_~Z8`5b5yF2dzUEGr1}|Q($j&_vp@;^exUVGV02$OWn6CJnD>-*275!K3Zg;4~}DU zP{P1RAHF{sbY%Hd5d{||-AdWrFTsQ3_UmX@V#n39r&eFL#`ixc2%7UiHZ*>*we|et zA3iAH))U%8FqSanFl;*3`s^8>J?_xkC+Pq(3a?*Hhv}armirxO2G=D!J?@#e%4&Y0 zWRo+e-p}#+@#}PNuMB^(2SF|KtDif1dsH{ln;a$L{pbDzrEn1!Fh^Hqwu|Knulo;!Db^gQhj98!p%?@8a`h!PCrJQc*W-}XrI6x(51>ybNs&S?WTCRwHh z%Ya=zi;pB{^vx&xA4HcNRQ47=4k5Sa3$O2^VB`fh642(7zIVUFX7LStZgco9sDt0# zXz#qvpG)WMGf6`9^>OGm&&_MLdcpI z*;bsb?8XplXoVwhZ1cucG4LSWk*@wY`|+(ezn3mu)poogK+HVQI9QN7oV09iIL6UL z-*fOKhu6y|UYQJGm6K=)u3Rmd0{&`8g3oSWQ&ssJZ*||Dc;UI`h3CZWqs2|lk=(=A zIpTnBocN99^E=@_TY>F}W@SRdMLL|89QMjMT0q(>*~l{d*>Stuf30!p7WCq^_(jOJ zw`x`Xj=RBJPjZfA&O&F8;fZ&`Av&fOc0<#x$ryqkbXLKLV=uot8TiK8nmif6TdQL-FnS=&$R;PZp|%0avp6 zVYEX2Or~{kkvwrT{oQW)n=VEV!mG*B>+!!9kvwu(WOi3Rr2Pe#%=c>hY;M+ymbAs8 z3)wGz9X+Y-5g3ER$lS&n+_0FgomRzW+4pT`nC4QNzK&L}n;U?6^+ZR1Rdlq{7;@szR@Y0UdoWafjPs&Vo*d!S9RgnRB zHNBHhPbv+0u%kVCIY;X0hrL*Rtvn;{={dmvyR z1&@`Gvn!=@GnM*j)>;4ZEVjM^hpC-5>mTbo&eEJ{xDV`RQn| zxiRez9;K2~&|js<=yA{B(U1wBmZ6RBz+vk;b?Q{#%aWHgRCUYnmHY&45WmmpR?%{gT5vD= zyT`E&znjksCGeqjx}&rI*dL=uXXmI2gNqLf2JhUpoS#6Buou26*aIgdDY2E=_4bwz zXm31O@JJ$uN-aUGZ@>A2)tS?uL}%BBkGYP#$^+AulV&t-vxF?EsW1lP8{$Z)Jm0V- zGhk61TsfE(&=9X{7(lRdXvqcM&gXMQAM{;&NJx1Wg!9?F?_!Vli5I1eQ+$NevoR(+ zr+K<>nUhD%yZ%`2ob@!0@1JKEEfX?k<0GKu5vJ;?&))TSu&@L#*UXSzWDOiGA6=|9 zO18|?l(q9^GAxH&mW7eQ43RiFl^iVdV_cM-+Cw?c3ARWiL}M&gQn7#Ul%zvM3L+$& zEW8kuz|vj?k;|AsSQk3<>~>|Dj06hI{jw0o$O!(f2RnjIR^Qs3#qRZdrJOfAVAH`s z95oyX3~J1flYCRHR+{qaPKvAp@EB63&mru3FgF%O+YfS_JF}MjZ9B(NDbmaxEgR zt*)Q`yb|752fly%pZ_ifS>|=lvu!ad8G`yH&mOjMvKeIODw#cdp)#}-3K_S1Q`Bte z*N<=IWN?^8?JQz~j26NJ?4DWDjA&H~P#QU5to{6@^TUK4U~?%a7U##{T=03^+V-c2-YyU`PKgeN z;zSW$-I_2u_^dDSho?RbqnbnTWh}sX#t9A8FS7$9bCodx*WjRo{bXLi0cAvJj4*Ni zhaZ13>!Z+3)j?x%qH+!`8z0-?9FMWiz_Z0m7B-rFVSNtdlncH1UP#H!1wEI^ig{Z0 zrNaR!$TD@bMZhEpCB_9>Y2e0ZY;l^EF%oie#xr&&L$0}+?-+YsI}udtY)!MteiElU>Go{9`5*ZN}%;uJzJm)!&hq69lvukBoY~6M|THHLD=yzK4wr!PT z=Lor5r3?rDkt2tPmjCdFKcw*77(D%=c1#RG)iF1_hoR~)LGXAl6xvYq$rIX7)J9E|o^nOrwjTQn#CC^M96+YdjUcyGVMn<>5R8;Ti) z!BzH8xJA$goMi|-$7k(#He`UJVI{Kgb9?Z*8C)nk6x6+)m%wOdXnW3*$(LNpu!g6~ zvhURc@py)X3n%dP_{0%rxz=ozEzOowb#Bi z_^4XqSHJz;IL%KSe`VlEF*(>?DwTuRzy9s7!~c4Qp7j0Gsd_ou;i3Q3LC~i^|73Nt z^}>}K7tXBy?(cp+O8Cn!pBRORQug3+vMwHYxhzkJef-4n@N!o?@Z4l1Dnq<_^~NZY zOAC_>j-E3umfByihA(&odb95`rt~ai$hFk)rYwRKO@`O{99bt%92+H2Su!Wsd8O0g zh~VCXc=WX!SLOgw?RW$ov zh`~wxBU#FE^>#q-9Un}#L{N%VopSv=mUwkpTuJqDcE;b*TXyk-yB!`dc zAOGPGQ1-?VN5=%Vhyc$*}pJ6xPXnoFXBmPgh9*2IP;pJZ7EdXF|@qazCjY)3MJM04llsqF<7i#&WS|yWV>;*>_}pre)uoCw+N_ZJhov z9slzur0IK56ztGC1W&?r2cBI6AJH)0GV|;+c<+9O33!v+aBIc{v#uYb)*l>teP#`_ zitbsrB_0;um?ije$jwa+fPc*WjXO)}8#2?&K6s3mIWe{cA34?CBvYyf%|6bMCe-Ki zFWT5y<=C`t>0@)}^!8hz%@6`i@EE<|zq216)(Dem(4h1*PF>I+cy~R$ZEv!Jktt)8 zlbln5BmSi_eJ>YK!tWVZ_RIZ@zP$xx$yEs?uBYS8UbkLyj?T9X_=b9m4>2jd(F zS0y;;|NGh53#(5~m*ti1u&zoI^g$=33l6@uZ*-}TPMsQ^k6hgl|DwBT`DScug-iSP zO18MS`op^ySO59%ev#v$jJnppxpr_I)hfznuYy0hW=?4HQSI%{oja4vuUfG8s>OQJpAR)|I_NzQ=g=xyj&1)_v|U#LQiB4#`_t| z@abExq%Y)ny?1Z1rMCB(wc?cyETvCA2tMxB-UUBpgEind3h2K0u@(u=>^Oh^-0GuG zPnAf~!Opug#@TSKpNw627yQr3akCaQ(b?5xnasW07e8O!&j|nU;}7Cn@%|TUC05&k zhv5P{!rJ#8J{%rj8Yh7BFfJC%jYX8{$LaJqCuiBOWoMg@y#{Cc>a_yF7cQKcG3>+M zV7-=;{B%zF@bt*>S5`ar9$M`>aAI}h&F`i+1dH%Vf&yFO(&-PA=T#KtxY*vBmexR> zYm>7Dk5P?qt9CHI|K-15eVI)Bvbmh;ng9F$pCQ`Z0Tv18c|tJPoq*Z=c^ z40mQ-&H=eo@SYw?cbCcdv%mPOlDaO0!zGF&^C!D}_z|8RuL>{l+~--Ucy3^rR;YX& z!%V|zdbkW{a)(1(ma0QWIo;MI-@tfyb?Y?1_!&H>G3Xgyfo;JFa-<*a2s}@}Z1#{b zZWr*-lXz2l3H+v8a@66mJ$$u`vp=Wz`7_ye!Tiq9cMjL~opTB;pvSA}o^(cIa3bBw z!RlUo)tM%2sc*ma{nfGbsrO52_^Po5wJwCqGO|@t3k>Ya32c5cK=GujwK5tVB_G1+ z_J(tkBbLJp-w=4m%lQ^mUA%O(^J&<&&1nv2&nc+lX#atN9Azq78;7`k__71}B&m^W z_DO&Y&j3HVD|+Sk7`?5za7Oc+@EZthvQ;_wr&5-E8SdhNFV!1g6;JHFM8kUS_nqJa z2ld*u4=0eqz7;622aD{6T^yAUW^Dpm=HS_I>t2p#&(?Q;Wb6yIGclOO+i=l}dZUf&rqB?Zx& zoX>@iu@_Y_+3RuEClko%J{a4<{p7@)0T3O@^0z1BaE?asIPGh+>yYq)r>f-HRB&oz z`^F2vk&7n5gM*tF?;(489<(Gl&>uN9zslbQpCp{^&*t5{DP6mLbbzOQe_?%rsP;&r z=9c&iT*hZMCX>;fGtbZyKZ@!WvQ&_Q&I3Q-wSbro3)cM6M<0flFaDRrw0I$&2G2Rj zuND+=aHY)TAO7eEtIf$pEe9oLUAr1Br{B_h&<32Qi=an#8G7UE(HTH6KH|^fHd-bCq|1=pA4u*&1 zmmBClO}#y3_ofHdVp1j6&iKNTFor*;H;2D)(=#UPy=R!e%4d5{pSWJqn;?Q@oEz}G z>jkj1ce{S7E(6&WyLRtueYHx?#+3QK_rS5C6}W64*~I*M0#h53x!TszVIPD5dO(H0 zv+$Mt)#~Koz2t6dq)V%g;BOjxzIk~~>!WZ-n^^SG;s$Re5O3VLZT0Q?`_rLLfBNz4 z0Zn^-)&15;?`1o^*jx`CIy$^htFCw6{&l{ZGRnJ(&OpABi*Q0hU<2hhEdkI30M!SB zDDI%jx4>ypz*7{WxoY(F{x`p8=c$`s z8AP>3cPQ&39uKkeT2%I6OiY6Tv|$6K_DNX^ zDN~I{m|aT2zm#+8Y8j!jK{;oXXUittzUAd{I(_uXCuK2~{Stw4ux~88;NioNv1efH zsb@<@HgE(7FcIC9UBgKv3K=ct9E;(#&KHj#m2Q zk}*I5gi8pU(})wHRW#lU3Bp@BAYYnt{f|$59`mbgR3=;wJiTsZc;CG9u&m3X#Ig4a zW!PY9hs#csdH3#e8Suu01BtRsXwsak2-=(OE-dB*m@z}WCB0|xe$U->`re_+6{7cgKfum(Jf?VXiwTixnV($A{r zB~tfH!mZXfuHCTOA0D1oySMSIi*fBQ74ocFTaB4gC$mAFb$ET_hEvCj9O+qWjzKfd zf$OCrhMw*W0)nlS%SneCCYBPgTFNPjCIWYi(E1SAIDtYe)U&j=86%8c9n+JQxjy*-HR@URg z-{+jG6jG7TgmVYkIE)L;!U*5W$SUnqv=@3M&3!|L5beM6@+%3urDaDmZpE!PWYjWyv5+|k}+?JL5N$g|BkAor!{ZA#cXlJR9c+;y-fT6Q}{i-Ew|4>udD zd*6tZh_(=3MFH>Ix39G0J?f43Z#pN>Pj#YbqDb@GMmiKSJBomgW>a`yi#Uy|eM! z`qkTSzq$JO)6ZwV4ygX!haVMPxoh^&i6YO`jL+O2&JoyMJ_Sy)rjTL7AZ4$;O7$nTz$9Lx8$&tP^N4?{TifBhTAo#$xlw(^?pkfj?X~ zZgc9Xe%wiBh!>;b`swvq%YJp&YmK9}Mgr~c;aBray!4?!?ttofssCwX_&L~3=f;Y+ zcBl8&be)_9mwYAye_CI7m}h&VcktYM42V{Kg?ezs%h%WNEo8pYFZGNz2WYDeKwzt2Z9Vga8 z2f$SW%bdLf*BISIx}kNP9C!r<6mH{f42m2=3>j;ihnkwFs{I)@%(%gW;zRBu4^YNA z??gM2-}I^6OTaAKqiC=kluD49Hy+IxPATMY`sl;o)oNoh#l};U@&C^Dxpq9-pCL2j z>G4x%qV+HI{+iVrue=<+yRkZczsT?Uvk%|{-2lzAzroCzKJH+St6AkeLPuj@Eh*7K5gQM>z-jXBMYa*ucQa#)ss1{-u>>6SKs}?AFrp_QD^=_%ln4mx!HAumKbhQ>w! zTb|k)YTMtn-&Zu--)b}KV*-|%BpgRF>Fr&fzU@PY;^JANPuG?SZ9ksPDD`5|m$D2G z9r!HWx^kfP#gPaHJnt+7`{aC1eVl+H68>H;T9o1Ext?Pz7e)TFKmCi z-SFDlzGF|#+$(~2Ip@G$u{S>d==ZCi{Mo;**E1M%r`-XM`eu|C-G;z4GvmEb>qn#aMmG)0s;Na$p6H;WUSUeDGClD*X$8` zBc3{RIK1DQgKb9+&|B^QqlY`RH$&@=ogFl~bz3-^{;+j%@Ccq`(=fkv1xhJXa%K2g zwx8yN-rp?H6Wll=(Cer0;qKuF_cP?)DiCAs)@14S!;#|$>SuROmK`&nC9}JXfb>{< z>-32Z@hx&&84WegU)sNaN)F2svk!Og+PylpIyw6tjk6b*3@RLh9L?ESDa^(T@rF0W zw;s!=O0McsV>KMGm|iJ6#4}__4Nfh2SibA3wNur85d1TpGA9 z6$Zlb?kjJJ*I&7KE}cDIQkg5IOthses6&gLQ$WVK+k9jQ!&iKP94({$V&@8OXtgrL$7e^zh$wJaGRBYm84m)N{V%^USu11(wmmcs&JX>`aNWa$v*?iP_$Y_V@XPcB za6BFTg_~y@H(XKKA|<==RC_+>>Du;clMSYeJq^7-eE87dzq1#>h)i*-KwQqMh;DFh z&*f0?o&$kpT&Surx>N9gAK=9fbU8XQGE8$Mqgp5#^G~POIcx1ex+r;R0vUQ2et`ix z=?sGB3kD)5(&Ge5ywG9N=c1!*G=eYiql@8HuTJ#;rR0yH4+O#BQ!BJ{+u93LsUe`4IuM3qT7cJz3{AmhN|xp}s<_uV^R>@cq*tIaQTm|$bLk*A*ErnVti zQA(bTX}wUIplG>s3EqUxNwH|$5cu0pr+s=o<41FEKAS3YvuF42N<26qXE>5yG^$xkLUUf0gMCRoD)&;&Fa-xH>|$*-a8o=q!FpxzNN^>ug6KC z)WMS-^e3{P(!pSL?8K=IY)2!EOC%ATf~OPR0yeYep+TE(LAN$^f596JW)yNO2j9Kh zb;bSPN_g}ATt-RWVnhk7MZHpzzHR--dlp0FjM!DlnC;uP#^AR_bZ%EB;?zWlQeGa_ z{L{br6Br)rumZTT5VlfTg2nbqOdwMK{K1FABnc75)e_Cvy-!GfW#219NM77g1bs^4 zM@0=FGIwKmYB)bq&=FCT9)90#OunmGW-ZM{!(c!eosE(p>}iB-w%%|G!k|sJv@`ED znt8pkVFtQ!BC=E2jZ|x#IT*31cGn1#>T|DsB13TaMX@m+xQx*SmJEKFcaik1bAx=? zQezKy?O%P(_V3>8*`9z`)|)}aT<)}feVfrbiT$ltUr7MWI8=Rl-bJS&e^WFXd>|Nc zPB=S&Fd~|S0(dPrUd?ceo>(ubVUJzRK$>7<9Fx}Am?mngLv9I3cYBwYTc3nj21-Dp zbWT)6^x=sN-M{_#lLXHgWsHh41rB~+mnBl02omFrsLRKm4CcX>k?ZT2l6DyTc-%3u zG1lcsfqxW1p599dD-SXrGFGdpxjDFqbpPky{HC!iC6Rck9RlOfK;CL6c<;zi{_9`= zX7$6o^s-)!MF(ljGF7$ zSDvH2L>ar1u;zd*LhLs=5N#R)ff{=1g)qECqZu56$DPLTdXACze(?PqH2X)1KYjW{>;7C0%Z;l)eDFbd|J3RizxY}GmKJ>f zM0{c0IB3y#&RPn>=~HO@o8t_CAD>0LzWnOriPB&^vX?oo2vwY?iB#igaaM&jb*)F1 z^=h5b$ZyVPbPB(Ro`;izLkY5s<>t*{Y`r-FBN2lOlxPM5bYM}<(RG5DXi+>t3g+#cO?W7ypAugi;Z0dxJ9bv{E{C>$zbSHyqtl_0 zW87{p!#4^W;rT{*tS`nPMkkIwG~(Lz4q|F=qA3hf4D9AWaaOm{x^k@kyTAIY!BMBc zb3$yZ9g(xwa;B0c4t{mG2)-hGWC{fj6tf%gN*t*_P|xuBL2aYQ6b?#+z3Be%aD1aV zgO8etgl)!HANyz8LH}y~LEq6R?GoUO!G6R);jHgIl(p%-aSUHtsQs+F9Xg-(=Qze{ zSKk~P=)LC&@EDhCX0`C1SV`++WVqWx)0e);U`rcaPNrM>*B1UbO*H<5joKc*(?!7( zjLi+db$4qSGlRV`QtF1L^sF{0>gc&IYl|Gd6Fy9olkpo<{o2P%hs^|X_wnw?OqsBZ zYl;3btLD}73w)YKbMP7GK=8j~P3NO^RE?q692Pz~X`Nkv1h?f`PxX%+4R*^iHlzN{ zaJm4z8AoPZvpzHV83(0%T{K-Fh`C)2e;q=0{kpPHl_}g%RCnW~_%kdpR=^?U39Lut zwTb8}BXu;Aj5WZiPeyA8*2}0nnj;l2XJX$RZ9VnP5i^-FF*cWb-?K_nEGg6E3QkV& zS0j3nACkW*sNw7Jk3XHU*vkSyo{JY9iv}Gyd}8(MvSi>szG|!taxF-Gad<1$kRn-| z^nmR-rj!{GT?OOS9wk#77~Uki4W6Aid3;&N@C&|(DD;6dGRD+(hm+%gjaK0W4lsnr zWKiu5sCplsT*%=*ybBJs+|On>x>>}s2<(4b(mm% zd~-G$lQAsh9{7Qcb9l%?9N)%<=e*cEWbSV&*+DVCo$@PS0e&txD(h@X*+xIm40s3b zJz~=hz6{L>=I+C{Z&UCuWblInN{i8ZUMdUbPO=ol+txTaN4_pfl5=CB*U1ZLrhUl4db&(4Yk*FY`RtkNUB7ni(#+%5&1$G7e}S`zzpD&| z%@eYsxXG?n@9TMG8AZkZ(T{(&`t?8l%_#T|h{gj1?wlhF_ROnqz8ei%_7yp2 z+s>C(8#1W1vEmB_SOi=8{+;Ol`6Acty~iHg*1PK3hoc!;M2j07*na zR0qEJpvdrRtMC8hFRC|NjppX_bh6Ra^v6w`JB&8CfBDgG+ZUBNO78jLkAJ>;??*qY zluP{UJ8M^875s22`2bD`oFIoe;PB^v`j@MJ_>X_R+WOk|VB5|QkH8ZiTr4O_L@@*X z(W7M>R1V>-H{V(Prws0K`r29|Ru>rKGYikaf&2sa+xN;y3m(?(QiqAw?L!*u=|R8N zsK4si?u&HIe)Vmk!HjS@YU1Vij{^nmwe>lyCOWkB5wXtz%)rZG#<u<^vv*Nvk2kdmH9?)-@apaII28KM(zUPu3q?h_22$C|Mw|L zAuwWNGCQ5Y{Er^`YGh@0feqn~sN0RL(a~cElCQ3=zWD5ektrB&=>_ZBOB%+UHWKMJ`eaJc~WdLBxsvbLfuznAAF3rz_LGx|Tg7LZY1u}$(qSDV7kYoKAQtcfX z){J-PHJiyCyqHYoBVzh;G(iMD#{>FfFAFT10GIZWpbqo_t$NV@<1}!7gEn&I7~oI33*w35@5k$7;a{mM{n?C3xEkzFt6cztf$wlY6oonR0diu#{}GvluU zvd%>BE@a?;BKU5ImK?6+3WwLF_P8le&p2g%amd4Sa~@o3EaYzM!f~u0 zP9CsjY_*@)JR$noI}2WstCU1uUpq1(I*BVhVr{>r?_qPCB%%OH+%Ha<9a@IYnYau&OAxBDf1OrOfY%A|O% zvb6XF*cuz>;7j2+s($oHrO8gEYc?KpoBuUm`0DQt?j!_1>KGa8(*m)!_eakR`{)_p z``{WuZX&_8*t5Mojfv2~AsN<{E%EMr5^7D%X3)&^qF?Xzh-SPu%xB&iQFrb$mK)y~ z-ZfX-eodeLs-L;mujN`dUH6`rdw2QnJx}z1*IMA=b)KDPJl4p}VaD#Q#yCv{hsO8U zzdg7*@=c26-KAcRQ|X;fe=)CGJYt&Bw%Vv0Loh}Ly+5}#2HxgoJY$el?+b$Q91^l` zWEjK1AGCoU+!0+W1x6%6rv;}-Q*6y<(l$Cclnb0E|^J)x+!iQlI5DD*$1&Zfydo_VDWBYRO zW;GBG92|tc5J6O*-TED?zWSDoMQ`q3QYU`*`H}iARU~iZxeQm*#wY-YqeZqUz=ndf zI7C`mYi-hf2EzvjC}?m#l(L?+;b5je`oRy{pp~0xo=-;rp2^ra#48411IMn4qDdX9 zpC`f}5yi^WNMm=H-R6xEfo*RMp<%e-;YO^c-}+ixo41S5r3?`!5lDw#D0zc;xwieq zl(yg~gWz;RKV{LR)HU4r^b@PyMcKcwE3aa>V-B;RF$r_gPmR`&{TDo%9H!uh5)n+ zUv0!P=h|dP^xc{f9`#RfWw27ae~58^Yul3X;y+oc7QvoDieY^jBc-)>8;81Ogh{Zr zr!heC-_>|3;_q{yES7Rjz z@qH)YM`sdT60qaPp#VRWz;ZW96NS& z_37uLkd|nai-m^YYi-UKy>Fh%91vEnQXeEDjTSmDS-&o*CCF%I`p zYfnZ31IwmoUYbaX{rg|3Ht_SS-~ILvtGC~HBl^5;_22wI{~zJN+SQ-^`JZtT56vg=GpsY@i-g#@bI;W8KXm9o zI2X^1=Mjkg;0t5?LLcH!9IE$g(|Q;)+J7`-HkuCK@!j=hUnv73^9EfaKvQbao{3s+ z@!`PmkbW^jqnQulagR60%NYiq2=Cyb$a{*x)H1Jc&Pg@&_U+m`bp5L%2emqaDMeFS zo^F`s}8I9vn{cZ27 z%Lm5jF}e=6woyPFI_}Q}PF?E#Mb3*W7C9C$gB!zafAoQaJv6lbvD%!hr3~fc5DY>L zrK7O49z*YgcW?Ago3nO&LCc>iYKKF|Gj%?{@wR&tH31tKT!$gey~TK#Q6s#YcC=|N zz3;zSXP3wZX1B-sGv|Ik8Xx(8bbv?e(whVBa05*oo?TOv5qp~)FUtzby#4e z8SFUq88~G8jZ?oq81lgM#rTp8GLV#o=KKaxphH_u9ImH9sNg|TfhWN`ymWnF5Z=Rg z^R|8*XOwSmaS^!QEC**H#*8%jC$I>v|K|PoYx~aXr$7CZWb!r9uF7U5uLyb((T!hC zztcwg$FbvMSa`ddzYOe$j~p49Ss!ra@o4o#<+grgJ}KY#?=N(&M+OHU?|Ou3j9z3j zG6%dhNBhp$dop~J(w}aXZgb+;;nl^Af1K82XZq388QJ!{w0+*w_nl-T(VgRPNPZLO z0Ds^ECj%O}u}C+iGn71GsC*|Q^|><5@VsqXU&_I9V&?0;twpk~ji##;&Uj8Y5LK$K z_nKr~Mo#UDIz9NwA4czFFd%osDJk%04t>7*@Rxs^%=SgNO?|tw4s|%+qeRuM!)rTMcVd&n@jDhd|=m#0GstupsN@fDE zCmZM1#)gONSpK@e)K#nxP)dh zAl-f}V{EXG!?b6^gYGS)&inn{ULt?@zl=g*D`aTh?#0Wm-RSw+v9CPC0EmZr-(JRx z;DIq(1A680xLX4Pe$wMB^#1)-+uS|*%&(CpmSejQgS*SIdmwQ{7j`HUKqHe z&rg)4$%&vu>$NMvtbUz;v$6dwP;aS89zAc~!`m7ootQE5$>z_wARCmylGBJ>2tNfR z_>2q{9UdAQA}L`sM$y&<{Y0$8~?bYGAT_h)3tIf4*J*_*Q^X;7u01mI|&Fo6ol3npx z*(%2J=G*U03BF6|u=?`Mr7{wxjr7*4*T=wdQ=s%pscVw^9jh<@#IDGI_a*V)F z_zFIpy&|jOEq#$=!}xn}b!6XT0*;99i8S`U~gqSbWZYJXwa6 z5@F=*34ZGyr_;^!1bB$&;I-Dn839VojV{(&Z0tE_6ME+HyUQ~Z2pnyKhj-gwhYuZW zjo%%bBna@r;R~&Egmq+*VRMTt_Ec*byuoQoo3u=qvZ>LNWss4r$YgZkHR;S>r}N)z zt(tr`zMW;zy}mke>e$>DUA{ONvh`)dVkelOulASu-)_86+230?Z%Y?ee)s-r&yKR< zn>YCY9x4&~by@p2(7NHfte?s2 zKrOfHZF~ZV4NTZqGGuE6+oD}>(KX3HZZr6~tL@swh%@D_E?=(Od}py$eDz8~7p41BU(gd**%s3VI2^wmv+50his@1_^aP`3U!W#@4fTZY|`I<@>vR5L>m!*$XxY> zQ9Z&fn;B!@_Waf{dL21&e3%a}>eInoR1O$#e|~31$Z9@iz+{j@^f@?e&%-B9rnqE8 z3-&u+*gFd0mxqrGtRCU__9E7Q{^Rdv9NAMk{bxmye>}KHNg8|({^lssXZ7tO7+5Di z=C5r8Y=M^H7wwcs`pIycvjM|nT*tUa@dB@&u?JxUrPs?0aWLG6Pymj+qj2+H21N%^ zsaMvZ2HtU?)*IgSeD5ImKH3;VhK++ZxE})~NQEU!yX`TIM9no32aM86RhSco)*p?x zy%O`-o02U1;P8oK8N(CQ%BHwmHU+^-M8l2tt^MeEjL>+&mI8dcJ!T#bE~O+->8BVL zf~GGFTutZAx3{cbE28<;efy{EgFPd?=y+>>IN^6oO2G&X?K?s-rmpVq=4WqLLS%bH zIip^4yjwPg=nxT3gk=#@A}=ufg?{A#YraauaE^_&C5CJ*`ft`5QzGE{eldlK;B9|M z6DE8(Vf;vj)bs68(IsCeSTg%CWQ~C+7*psOI+bqv`OkhD6Uz{s!S-$fm>QFW8ujfk zbVg;-Ko0-9kx+too++C{~l_&%s?Y_uTR)xII*^9f{YeX*_4fYcb?8r{^HDAEjsi~M9AB3WwrZp#cT*D{oX6Mdb1Ea4p0u!#^c=PoX zNREsrN6`D^lh22~iV##M_0>1t%%J+r>VrT0ZuN1>skJ%Z;XZ_%s$@k+64+BD(OPSG zb?TtzxXi$?Ift8lg=c+prUQy|#PaNa+p~mFN|f!)A@xMj_?N=NmtWdfJLqi=hWOsm zBgf(cw^uK}_HyG|8=h}o{rR8%c|xl4T8!Y;zdU?!QgmN`{ms?Bz56Dk=yX{FgdRrq z@4Wj?Fi9Y6-Qnu5-~UznD%BKGXt~!t)#0sMx3}z-)Jh;b8+~VBpcr1i(U>2E-#ur| z@1`8s|H^N`d1c=4p(iReBb}S^(_yNzbS}h`tuxr@i%@2*WoZm;?LGCDz~s^Q;T6G) z;sIN}E&`n5NH|q%lF%!X#lB)_5>*PX{g5XJ{g>L;4C`oz2{J~*OZbO>;Nzmb;hcy` z3G%j*w*O$S}_(#cTf*dc# z4<|Ie&=2hghbC$~xTXmh2bQ_i_U?-LcW>z*-9hUY+3n#Fnk(K1aNv~et&LfS#o!{V z0&N<;Y~GF0n(C{jX8ho7=zjZ4KZ9Jg<72-J9=7)LNOQJdW-S^>P_UGJ@N@3*)>j+d zGdFO>L&1bn46+nRdvbx(a?hOL^S9*Y-bZJxr7_RAfTO=1Xa&#FL5C(^y^_Kg4O`b* z(D8ogE8B5JR-L>K0z1vjgePdVq^rO!{Sbev{rC)z_U+;D3bo~04zxhpaRK~vO z*Oi><4%w6m*6?Rsb^9;-go7Q-)~t!Qv^SJ{pi?OIhE{+6#V7TLW;dtSVRJb7##`SV zhbbMwa}GVkb5ET*F$W*+d}&{6D$4QPL~lQlesnfj6TMZ6;qiFN_8ohM9zPb|Zpx^1 zHU9Et2JY=0P_1MEnS#;Rnr@7jKiQbbVq=W$x)Km%J;5R)(}rg!iypp_fo0pv8KyJ9 zf0IFpPIc&u&sP8VAO3&IY|pITdZ%(K;eIcL|4)_iv}^P#bW*@i>RUB&kFU<0JWvUb zgYA=btB>FR^=d;h_Lk>&t^VTQ{GXC<<9nN*pNP@B_sa-AaU2mEUh-Tr%9+#0iu69T z`pyr2I&#F*n=_I>a1i#I)i3|^e_kE!`HMNMl-%H8Lqixtt&lt}H*e*uw(Z3QS00U^ST}FIe+2nS+hOuoh!=t#7j12{C8lyOkhsgi)A9AVfvV$rufO$qPi!L zV{lzrsWRyMw<9@PfRBSIIW@+?mz-k_w?}&%JbEcdjk7t7!>&`59=3#y9Zj zYKG+a?d^g@@j84CKNtML@j=g!O{)Z>di!tyACwVEMs;qEJp(qf&%oe(JnZV_bIA(l zI{#RdLUH|wGt6TRUIo!jc#^GJa$5tUT`N-iz9Zvaf4#(}`+S!qPCbN<3 zmu=>;_=mNBJh;Mfwk>tStr>fUI`>~^4DeSL%jr|cRtG!S+Zub19p=RyyH@Y*ekHx~L^@L$ z^U*BXxq>O@rB>yMQH;zCP@A~Ti0QjNGj8Gf+O_WKDdPeLvD{mPTlX#87?o>uiIf=Z z^LFo8et-3^XBT6KcOG8t;-uEMNQOcJ!ndYbut2f2Fc)gaI9yvU%sUftqixr`vot@y zm-%=_`^KV!p3{zAT-Tm)Sb+K0-!U+Kef-hqtN+`7_m3&K5$X`;en$J}w!WA#xyXw) z;=CWzOLa@o&KMHGxgNukp2v%YC~wV!Vbd+JN??fLGEfqhFfoQC1`&}jXELr3Mt}9& z>J(wq^-~c5QM6-Q;wiL0Z1UdTEM$S`GzlZRt7dUTfRPG(H*VHW z(K#{S%SA7WKAiW9oIZQD+Kp`rDU{0rMQ#3whH)9kI8|!j+A&@dFrJTaGkjPrn-fCV zaB363nBjT*wk_jrycP2mnUe2fh~@T(IwOO&%$FjyIhgYBhPkUx5^4kbZ0-%~W0DV< z)rR2ez=^}vZhQC5H&!oK3;3(UYcgU=?YuPQB-CK~I^!bY`lU)Dd{e}TTC)TLcryPU z;xKF%`K@-_|NQ=k)!8~0+^SRU0G>AT<^)(#;Rw#9qLm0wjM3nstve1Q1)Khdi4qo1 zg%_V6I8gtT;*{wg|Ky7=CT;Cce)7ZBrw5Pz3&Rf_ji9Yp`;?a%Uj*7mo9@m3jJNIJ z3t}uH7lS|m=Y9Jcv8^*SVNryaF?O~77M#Wie8$k){Cu5z-N&eX2Bh zQgNjCe0xI;F~$dD#!G^y(KKFE06a801`>0OE-cskfA5chpsUP5-z1pW^E)~?iSdB& ztB&4VZ@yiLrY|Nn?B3liMV?;HA@ag%3qh?u9|Q-?3&X#2y?R~E1O3>1tJK80$1n(O zH><6WR?NQaJLYaZnPo(lqYXUC4AB9ny<3D_c&P+~QbB}1OR4sG+^hN^q%N=lbKDwr1Mvre&5ML}xkCBftk8y!e1TRIciJBe3Km556 z?Rxd)mxE)*{=>(|_;4>8W6p2BUd_~)+nL(4e$OOO{NfkCSiS!Gn-d*>@X(!ucMS(~D^JL5@dqZu!?p$_|%BDi;D zbaAk*v8;P)fAjA^v8$ExNol)#hj3k;z9Kfj%Avy!N_u(!{?+%s`<;mjy_>MLzQ_lq zH3%i^uRS>ehC1=jN8g@KKz7L57hipu&{qFtmYh$ScrHQa=YR6^)%(ABKc4kb!bd_v zv|4#A{8erKE74{=hEYHy+unV9TjT!_9C_k{M{{S?nmy4RmCe~vMED#K+xWM&M);!W zmKWm>>gk_N;QCL0^LIra6kU;k{8TvUfGD-)KmOui_<3{nCx7y1IX^Bm*K;{(j%4sC z0(zgQ=Uo|SJL4h2^^4Cx%Xofr`o)fvkfb%$~}bCtAxsVgNqVUf~47(MKL=z9Vl&htM!I z^-(s>I8EA%Xe1h~bP}bUz}a9X5{s}$fXB~>!cP_5w}bJdXW@qE;zzVs{Xw;I8NJcE zJ;8^f#~FFD8m*ix6w@6Ex|GMY(FL^;pWTv!sxf@<;fKj21iY?axWC$wQNo@%aPUCC zJIf4KKDft`xXj1=DW(l4>Oa+6O~D&0D>6w}|_7jdx_GV2sAh zS_H!xVow_fUc4B2$rRXC2 z{NjMUdS!{;6IshaPL|%AVL|$=^lyhSazx97VGLOlo}k+dN9%L)JsD5F)jP^Ooj7@{ zvQ8gI|JB8g$ENUZNaJWM>9k~4ZtFKJ@z5VSVVF$?LD-OM;w7>Pn>%-5kkAZkr zWCB#?K+@j1k^&76jpXZsOM?CpHH(SIH6!ESh*{(UlzIS2LoK6$fYGtZQA?HjuE z&70v;uxdQ#&zuVH*2Ke#cx(@HzW6(&%mRI5^-q8MKXlmMA7uo3vcpFAuHO9a_f~)X z|NVcJb=g@sz0V2$_?mSElH}xRY>dYfg-VBO%^cjQ6zh9s?vW=?XJkf$_5`=L-~RsU zgWvyhb?ERH?eEF}6oLMA?c6K?g^@k|6LXK}Vh& z95$73q{pnseEL1-mL+qK@vV#K3x@-mi;q~ht(9Uxj|cZ_`&N7In`9oyMn@&{GfKW! z{rB|+2sm6&rUd%@Y`PJ6(g{djP*NA3luCE_#VBbuK6E7++=gV|HbvfIbteEIUR3BIt8 zm1I$(jHAk!ZUh?<^MW3fi!?VhRn2TR4t-w??>wvR?HP7uy0W2ohb&<3_T~CuuWm7T zIn0k!@y@py(1WuwT6DAx!HD4!uiH?6@Y+6PuXvE*6pnHpFd8oQvm8y)bs2{oTQBU` z)AfRg%HVl=V|y|AO@#W!=j9OJ#+_mCt99ve~_sW}ssj9d(l@H$-XZIQU^gUOmTWrJnp zr?q@$_(8OzZpekZ+`jNr^)^2Gh9A9^6I*rB-Rp7;c%Eh z!)T37NWY@1nkyTN{b=uu%wOO3y7LC?%MBZqZroxi+9M7dB}4WlSg&6n)P$O967GYVM~d`_EPe|$Qg~5UM+|X zFLgd6CzVV!wA=vgVHq!i#jLeR`p4TVxVH#z@a8PM)SPb$_G=wQhm(&+Zf@Pio*S;g zX}s+5XqnUxN1$JH5>pOR+rP`ou6Rjl;m?ib1uy#OaW>?%R9n6W=6*q7Z%+A@Cj6MN zU)Le?w5fsK^J)mHZ-1tfddr0&26J%+*3P z@(kVQHM4j(GMH$eq35ZFuJ*SX+^*jZ389QeHnEf-hXdUW=65n$U`SF~7)&4*W+;-J zLVG)=BH9y77;<@g)WLe;x$0^+Cz0b;=u*#}$yh9{4HBJ<=zn(b!RlC%H6mXwT`6Th z!ZnA#^$sCTbdJ>0b9sN2d@zrexxR?Bep5)7^wv*5|FVtuV4@f>2C0mM0SmV+qQi)A zqQ3s-AAdbcvXqQnous`v0l{GgjDrX<$1Q zU^tV&a%v2Ur-}f*5urU@jc*5Tiv0Qbi!TTE82Y)4QldjK?sI3)gwhex2mpKs6C3(c zHK_jKm%kaq8Nru=L-FHX<_Iy)CA~a`7x8=V?RO^nQf=o^1Y0vg;mvQat-dH-)OtSh zTIT1R^$sYjq@VO7r=PMYmjwDQ?~tA$Hh^`jjaS9X!hr%^>*3>u*$YB*89%Z*E&M+8sE2a2V8u z-p2^WfL3JmlV4{rAY|%0gM=FC1hoy7aM=)EYg-h?LbR49dJWQQiA1jLY`WRoHo4EQjCpJxtfhJw->h;Wzu*=GgD?Cd+dePrJ|I- zJalw$+`17ITFEg~-;9WHK2gBFICvn(!Iwiz2-=h&88otQ7*x*2sCa~p%YLQ|5cK@s zkU@BRwZ*I*A^*)cpPIwUu4Y6y*Lx>R?{rY1nv|YDl~K`w9t;!e4LU*o{rA^oF#NJT z8rHPV=+$!>ei`ts_fz#dkr@eWN+5lmQ8%hRxTzkv(jcc!oJnB2URjmntLK0Cy$sPC zirzh5nsCuASDZ9|v|8@pt^K0sGa?*n?=t%HY%_2|CDC`=dGt9f`@B@C5j^A2p+m#m zH*b9*oKa7{sDTXXBE*$MfnO&woZpP5u8HpK-S^7s&;P5xjK1Gn{pPp7UH$$KA2!dO z<4inu?09QiG(&TwC{1KZ{i6*^G@yY5&)X@_&u$@DGf;_=5KS{2INr1&FMbB`3`Ni9 zF#P<}PolBUkAh^pBI%9otJd}9mv&Ev)4@Xr!;vD{dKVr&AOHC3tAiPEKaOvlNm=TY z@dT62n;c*kgUew4#mDcj{^lS4uD!h+dVS$ma0yUo2}3e^F~*&QEzvHs$D8w-l%{8Q z#$UfaF~YQW2v-hJr8Kp~wcqCM`kI8)^Eq5L7l|mE)SIkRiW^Jc^ zNZ1cK(mbc#-t}F7lo*Byq}%~^`Zh++&l$kHyR_uy@UQ3?K4ibQf?Z<*b1x+_LxLWc z{%d01@q6iCPykx#*~N%b$KL2ZaM+(@71Lr+ZZcZydl=Ju4o+d+@yeT#xr!JMT2v#zy3sna(wHbIn-6l@hqX zbK(1Lg46KCny{XItL^pxy3zens@h-0C=R_Ig>O3a0yyB~968}nduztsv}Ll9;qIq# zsJ)F>KX?$WjTd{5JW4sFj0r5+yd@cSeTr514o4hTh37nz;}1>vI-~WDctN?u$<3Q` zL>D0&ow^F0zl(h_*F)v_~M9qy)V*Ml%_tfXC(9DJnMIG zS>(!O?fQTRj0^hrA@8EK)&fl6E2GQYZ+_RSr)wW6L%^}`K*C)`dwm@LhQ#`DP~DBI zl<6h+11WndouFwO_`T1R?tqe!|wh2$6ze7 zT0Lkq{)I|d*h4on_FO3aTlDv(oV+bvYmnZMz9uTxSY9YH`*!$$H{FBrPCb9dn43A3 zMYB(>&W z>neG1^1$JA&oeoUo~{0H<$}6*ps2gAGn9VRUUPQBwd6);n=n-D+Vkd=clq&O{_j>V zzWnCM_}SW4r;i`6&Gg}dgYZSPOFi{l!Q@`B{`EioZTgg&;fGiI-ua`|TR;Bk7-|01 z|M9OQZMc6 zfYRtpbYb+`+Cg8@Li8RT5=2ISgtt@jqp{1tv$mW_mok3CC*@wpARW9mB|oDD^jh@b zY*EY(XcVoz{e@j~kIcHKzeR|eC%hGbtqd#K01ZLU$W!gPN;jX*FwKJ7i z#Y50obZAS_%XHvPVA(um1FPSBB|Jp~_qNxa@qusMOg05?^q8@Jc4fHBscv5|W?PsY z)uHEbRSsnI<#aw{1Ftc9MpqfM$)jW8P z$i4pY&Qy9%CLVf57A)W)dN6WM>wp$No6-43M)BLx2lVn%QQOCk9$3Bl`rGaGH50x2 z>KpH6K&-q~G@ilniC{_(ekPg)2vdB<_WsNAqb&g^fZmhUaB-;;S-<(8Be}=9>zzGG^n` z=dA}fnr}{q_%t0+)`Cc5fnE5BAi!rf?i&8bR)p>f22%cnjZA%L?^{>12UTDHqX z!rM>nJ@wVWT=`#_2IK~O1B;h>9=2g@$u?sKmW7U@4osnUb1^^gw7wt z%g`Bo=kobWjp5$vwSBLR{z&eNW4AZTY%2Ipy>I>6Z_dmb+<7FoCMS~Nlvg`^u(SP= zDV!B$%=8ktry9n{!9A-SC*APL;nfIaI&1WUVfnEq3b3ee`x4GW%HBnSFcFl(fg;Pw-%obk8rrjh^6sx)^uDv{xlG5}!V~C8 zXvgEhO{NR_HIDan;K9kp=~gM$aHQA9QdZ1Md~W^VC@1U9>*q$~kF8p! zJN(hlj9()|+!F*IZ$TazfG`u@+Cd6E_nqb5_Ze}GG#3*5AX+!H)cebC$R{n-)Hm;qpHc0~Wpc$(Y$pC4NN>S%_oh=5w-ZQm5@A~!RV8V@9gXfu^s zzmY(Mcw!o&61RlF>T_QSAs@4_<%X-@wJ3*oSNr$x4~|{i*v4&3ZOsVwLXpy<%x+!F z_`7RIk&}DJ%QVqUJp<%6+6bU28+RfS+h2SkqMN{4zoG;gR4Kj)AjU@VpuDO_^n5jX zTHORKhZ`))AOr92HYb7p@R6g#u%v8KtPueWhd{y$rwwpK6rYPIZzIqq%zl3G%Vmv% z3t?W0>86A(HRh}W;Y#}I5EbjxC+N4CdA}NW&)mAj*nV~x_0t>jVr3NHP&C5D^OrLS zTpmQep7Hq~fAjm*`G}~J4G&@}YS&>P1Wkn1cty*cPVhT;_~4?z3~nqdg)mE%w;?Dx zm#-CR(HI=CbNJY)1lI_1umE$+a)hH^eX?4pm?#C{>h(1_FWyU_**R%__liDbWEwmT zW|S@j;-x)%W~Q7I2s00$x=3ThNkHAYq4lmtvnWf3vBfx%pc7+9q`$pbW&)xLM)nk# z%?enBzRkQWy_8we0~uzlS!-@i#xqJ#kHbxKVSG+wDY~fWn268vu|Hr20Wm> znGX&O2lkYeT1L@#{oAX3Kz5#O4LtKikx|~E2pcz=LV3`rXoMbVty1LdyT&u6gBlxL z@dNh8$)V!l+z@ZZ)oVkb?-Zd5zh5M5ZtdWQ_9%t+oQQ}bWJbtpZyFe740)IthMNpY zqhRVvy@vjC!giew4GT&maDt!yhK87bum%H;A4ZdbwfQ$U2i@|R+WT`Vd}F(kAV(OT zs4)Uj`{!(e%jOPC0Ye`old^;nUQA)yw{!36fANd|RrDkL3~rxRX5-V3KOEdwQfLIF z-lfk^ zRE`Wxf)}50eo`n?@;7IsV9>tTp59U|b_$VlQ1E(F&ONxOP8}gY^wzoz9a65r%Avh$ zirjDj2Ssv;NOg$uy44pspynEZD3}x2eCO#Ek!*d|K2d~SFuk7<(>=Ajdvs_sL-H42 z9SF6G`tVHcVB*qmm5t))I)36r>;6>6lox_gG{>s8-ZEaEYmOJAxkJ~3EwB9fZ_bZ# zQ72D5!BfAc`&T9{5zV;US}0-SkY1_wU!M`Fi`f<#-(kp46qPEq-fn@XBC;@Mu-wso zB}tx(*3b5B9$$U&c|!P>)jQw${wU;|H*Z}1hyU?^=$-qkeMNk4iO#A=4<_j7JKy=v z>IXmgQSVhZG9&h%{>h&Ps}z{_6kJ!9YfKZR*<6nnpu^ABCgB%dJ%0+FFDj@4mPC!H<5>&yPm={ky;Yk12ak zuYUKt-_3i^X1IDWflRdL`pU3;|GV$5zW@F2txk5(t1N`iKKo>KC?)>H$y2CJ>#N)p z;fKI`XAH%!y|OdqO}$}73J^W{SJ*jK4a~% zp$&xM8CSUIEPyM)iE@d?8KKWY>l?LcM$c+y_9gl*jhmwd{Ti9h9DBsxw|CJD4pDrD zfvFq+<$i4qPwH8tBMgtgwP0$G;uqSkKR|mpo~3LbUf2DFATRX0@1o%u-v-xfbBtW| zNr7yj15<0U7gX(a`)_sOf4p;P@j9~4TIX0xZKfi`Q4#c`1 zEt!eh#vAdTMUDbX0Oz}ep!4ln`p%qPUJ#@GjUYaAgFR%4nFNagHj0HN;SNoOX zzKnmGH;AR(-WA=-NTVI?N*B+|+_*ebgZ}FtUe$~JTRx(5^(%sJy6s&KDj#@0{H_~} zGs=ujy8kFT@$=*%Ysi3u9vd5{!2=edrf092FQ>G4&>Voggq3NJw#tRu!pM7~~WI!-wz~tN$`6u$~sRtRL ziT-a@Z|+AN;K^Im0>$MtPc{w&&>+@Q_@< zxWrJlc3sN6k^w!-a00i<63&{!zsWt194=6iVa+q+P-$*T>iIs(rp4-K_D2|CP2eBQ zUX#M9l;qW5{dG#_)Kk??W0&nl-ZrnrnY74f)|(!HUT6c1&@mBS3@i8UR?jqJC>jh7 zWCYLor=B#vcj(Z;3~lGeC~`MvF8J6R$`*WE)GK9O1RfaP&uIx)??wO2!M)*Y-sxYz zRv&-*Wi{+ey>CrF`|7KqKlb4*Yr7`+Kioq-KKsRf1(q6|alif6>#NPrJk`2t zZd{2U#XmAI{Nzvm)#~-PzMDMWVQ}&DExY!vUfj2DU}(=?Yd^_kkv{M6cDUgX$Oq9+ zb$dTO{N>1CyNiI|`|gic-~ZX4HqTbOdr3;G|NcK_Ag^uFv(DE*TUy1Q0iOc7k_X=V z;UBGDd+oK=-~CVjZq9;1pElIr<@E6v%6dB_i?M(9gK;ipozN!^xn#oT*$@Xtmeo2B z--3VP&CtTu+TLTJ@<7~cVNpiYpOYm9DT48$dwH%Kea03LelEJp;wbkhgFPo1ey+B> ztRy8iM4`%d-Mr<+)pvjJv*>c=<#KjhFM5)}bY1cgU6Wm9$$H|Lu6#%`lKo9bG~Bh1 zE77w%m^*i7Q}g_`LpTqnYkU)~Y>QX)Ycr7>?=~I>jI2nG2WtWD=%eUtk*HsXWAFg(Kh^c&XT3Lu+hn12WTfy&nGpunTjxa; zSCXe^>3uijF$~uJK2snNxrEVwqJ&$=r=O_|R!#`(Z*Sqp3vJ>oZtsnr$evT?MYL?9 zW@|&%(=$&$J8R2EGCEiMk3p5g%BuT~<}S1|es}3iwwOvRr30PLA-bcW6;bz`i&Ms- zm%jMq_XYe3_=(>He`Bii)ldHHFIR^@|8la!&s;uR|K_~tGhi|FHFP_B{+prQ;CQ|@ zz7t(p7cH?CoFV7KvrCO53LLEQS2Qpgwz)lBRscC$&GEHmEJY?qRw8pkrZwqw;3}(O zZTO^&4q8qwr%SQ{sV&c_C_}&=Ah9_Q>gD~fPG+Z!P^ERB*${rl?AJe|G)iR&Gb6A}h@8an55YI)j(?FQ))zp9SIf4s zN0s**$5gy=Q~XI;nWq{L9A`jY&*5mT!x1u@muz=DPsBGGaW|dFSxk(_Xv*#683Am@ z&)#q^cz^ZfXYHdL!O<$R6P@WwyuS&R33Oo$-pa>3)7m}=j^<21!?OgRF(h+1Tf_JN z`JX6lwZYkwC_K!&e(4eOe^> zSBDRDK2;8(Xb3q59LbzAP{3AsQERV^u;9t%?3Kn9KY}CZ=H2ujwD(p#&6>#27>8ib z`-}~;IgkP9`W&d%Z)CA>7v7;^#*N0YJF;0i!-#VbuOP=ba8*Xn#Y*y6!zstu^NY;R zd17A4tQMfXGfI$FF(v;Ij>ZV>fdO83Vj{J+JoE6mX59T|;xX&;@OpCX4ANpPe|oP= zJqnrU%ko0+Ar;-NAJ5Lc0IIKl8Ju>EV{Ur1zuoSI|I(bXJZxa*RC^wJ_}u+E_vc)A z_&!6v`FpNm^mp$xF5^VZ51s8a^m8{>yAnohP=J0EfyH>^`vAE1cv>_8p$W3uhT|0{^>{%W%2JDR+)=gcwuM_mIWH`Ed^~p)y*T9vVR}yN@tllm%^o62jAfrf6 zvlm5T7XH4^=!{6zx8n9sZceFd9=t5PclYisscJ}=GBKD4g?nuVl8Z&GGp27B(N|R6 z%xhpR_h3dy~mg`q5&GSs}QL01~9Xyg3 zD5c`nQW0ScL-|w9nfFVnWzU(gMn+8<)+ zVnVQpj>SL_0g5?jW7pr5X zCbP6p%z_~4q;K^VC!M}`9z^vBocAhqVNRkDF-l`IOLHMCaT2aeA;QQIcq3CcoB@TB z1)niM(JIER?k1+r`!0guR)^NW1%fj?g&#NWrqDGmxDMameEsz)wejIcAJ3k8_pNtU zKd-*Es9gtxHiQhg1gu-D6UR@5&lGDBO*x(#hiErUf&$IJ`^L+=Ga6h>sjn1D^W>RT zb8u6_%9YCwN!&6DrE!UDA`F76@93=qNkox0^VTaIjz(CT4P~)dduptD&S>Ga_JT-a zj6@A`k-@SrcJ16g(Z>`uB~LDt&dk%S59L96dG-nA*kNu`*6|iJd>%I+Vyo*;5UeA`2=0LMCNG#}? zlk+bCBG;es1-H?|pyugFpI76L#q5men6V{9yItpZp|T+MDtEXsO39O$~lL z=X!#%nw?W8(*d#>oWK9W@2>v4uRa@mCAgfsr3PHlD;Li;uFAbcZyyOxXU~5%j-rar1q-#w8Pedn5;WWdfB6@GHiwsO%1CdIfFH-v zCmG3a+`JTTVuVX!3715B`*@$z=v>kE4xE0fy{jzMGo@MrHQ`4?6d7xT-l%ga%R(l_ zICFb&P3y#fxV0!Ij)Z^y)o?N0)GJPdz$CZ&KdsT_?x&Ct8;xtE&nE0YN_0yi3w zM2^2RXhJxVuIeLGM785OWf2}t6m4*TPmky(C%N+(q_eMEAAa>bCDeg8N~fJYUu0a7 ziVP}@XpC1R!Z=t>RG2sziMWZGX zHTX@$uen8I(OF|>)Bs!2clgmnfrEOD=yT>4Oo@beP+j!te&4&?_l2gqF^2H2wJ7s0 zng(z2EOd2*^Vz#S6by}lL*Kh&wCM84Si!*9RvoyiE6;@{miq!(f(P>0x5hrhuffIrksrF(xJ0Gt%kxXSJ**_n^ygYK zONt*QPK0%@w^k0Z>+F>bHu{*p`U4orh~P4gRp#d{hs#aiCQk)cCj(W@PCb(W~_jinfe~JRXkv zY|ls{<=Uaw-xOJSHNztV(*UA5XlGynM({KxzQ5vWWdOx5Ufch6fdJ3Nr!G}e;q=Hx zc;TjuETZWc?B^jygw+{YeoY{UhVf-JHd6`Ri{e z#W_W*|LuK7=4($}&G1vUsXf3R1%9Qw+|WVqHnQsGcqn-?K`?l4TK)Mi{tv6| zd*A3lLb_;%^s=_r#xJ+++}T*s5@oj1IWC+@PE6jbk8d0Q3mr0jKV#j+GNu0T+h498 zduqe#fB#?qUsum(T;IBVM|+DFw&2*ga~D>xW;{RKIVU?eJs)|j7Ia1ox)r)(-&e^aVrN}@A7&Heuw#oY+`URJwdGWV}=AsvH9qu5& zC~zO|%^S<}@XEVz(Jvm#MOT6>z~JegZLD+)W5$n_aFQAJZL$uY&zKuWo$NY>;RDr) zr$_92<*j6{aI`fdbHW*=2+k%C>wuhYpU@pSN60~p-b%_WnK|emnf=?@pGS@y90Q^f z7~6O3iT-SzvPBbJ-TXQJ>>=f+Mh6W49)NN5?$D91y1sSw%ByeBz84@Na#%T+7mIM8 z%vthB`&Zvbj~^amw^BE+y#CJU8|VSLxV6Y@B{FvJeL17BTF5zkn%mZ`yPNmobTc>d z2pvqV#V zRbuHv8KnYrl(O2Cb40lzG*A@#wTyCKwu!+>Qr<8g`OV{S) zg~RHTpEz-B*(<^M#&tEj8Q>XY;|V$0j311Z^%CSq&$NfxlCBh?&!Ne&v!RS$?@T~g z`-hDGtuxLZZ#~;1_VThLWmkw`7g5eRQq*%eCAh^}lkv2_e3EOUc{S4YPN zAl&=%{s~NaAsTkO3|m2evc?dxojdmoA6F8|y1;jVo~Mrs9-_N8AixY4|3&6*jw7o! z#|c_)Y;Z<7%sqQvYM+Oj*%s-E>_BXUQJ~P!% zCFf_M$ClF^Iafx<2#hVvjD2d76Fzj{w_mes9x{~q>eg_YZZ%<#4PLc*gG^tjZGHT& z`{W!8KhaiAO6%HesPm9_#}GY@%#C?(UgQ?iK#QxbTGLqn_nfx+l_2Pu88u9UMAJ4z z@aMz+W^8?*C#Qv;^?)l{sx3Vv%uSayvu5Vky%rRaKUGcd7xVIN*_6jEA&X+adv^wp zgx({^PF9=s!g$yi&PCDi5JR3&e|5Em5Y@J~!?h6Ln;C|Av%-mtc{m8#3x;Op!)# z?+yW=I$zyq2q$DgY6d!3jvx`Pr5(Qh`YRc>6KF(4053tJ_oTjHp5P_&kx>$|5~}Y- zY%V2~5VR2F9XobSBO=yQ15T20#Hg6##L0!*Advehk|qCRK8+O z#APY3F13!2&)qc9@RM`q&7ar-Lzt zaPe|sT(FCg-3CP9Dh}ylZ&~Oj&AW(ys4fa^4Ve@$M3S*W8 zaeYvXeHP)*UhC!X-4LO>XBbbc*KFs6O+q1}hanK`5ae|!a0t^f*PczuHC6_IdwB(U zn)mE_X(CpSo;ccj!L9ceZIqagGi zgH%gBTNqr;Wl^;1pgy&SUic_^0RL{?ih1^(P|Dz?1PKR

    xdufEp&GNcE$lO0f` zW;57QNbg~6k7$J6+JE|+d%;jN9HGqtug6Xv zFY4v^L;;NPud({r0}d}C*umo_Fw9_0eR+bS>7@95)wO>2y|)K8UmQGC>fnnLDJw$s zvFGz9w-1c_wU{Smn4r2Q$G>`ET1Ag&^ zZAIi(pE_nMjm@E@2Z9G<(#tQ~`)ZGM_CoX+J#eD-)f6%KVP8oBWt_PbE&Cvc1E!A- zO2eE(Y?>=UP+ylb3IwQ7ptQ~K15`Kw>@7(8 z^V1`sixh*)zkmOCMLK4vY3|pX*Ar!FY$!wEaoc0zSQ+<^MO!IqPFqKR z7{9NiJUq2CMW8jLD4)qFXMUG%Tw0yUi7}3XYLM?PQ$%zpJbX4jD*DD)j32MIzfolM z4~Gs9ZK5;;{9vp<*D(sb(SEG4fujW=7kS@v@PPB3Gb?d5nzX>k{t%fvsk7i2K6!TP zkq7%W=+Jii#vZd4wK)5Bi84pS8F?+Z!S+ngE>49W>v0A?blXQnbkiLg5LZ~nSO4B; z{BFhPx3$;L&s5T-xw7*Iyvr-X1qI>(dJ^b z@!PxAnogeF_-y^umgrn`XPdPB#v>F;Hjw=ViOc9Hg?P|2)}HRNJzY=6w5nH~7XD9@;3Q^K*K8MRHR z7a6H`ZC{6%E%@wt^70}Vh`x7y=s{z!BhWH!*?ZRT zm0^P+<9^Rwx^isw`;6n?oH!gWJk-JU->yFT_@mk=7@+ZO*}ix6?hk*q+P8bh)M@5C zN{I^Z%5n-;Y9C)ohI!>Xug*DEjJn`-FDJp3q8qPVD0t#>HHe$Qmoyicv`C)RIX z{oDWZ|7G=ba?1aossHM}^UC%+zs&%WAOIqR$T=`pv2y7oS!#urrDLl{cMo6qqF+GY z_2@g__yzc7_{LTbd&pKx9@5b{I#T5-vREu;B1a-;kO&0(`piT>){_r_5r@CT?1L_%*@b2Q-{^j6uT z%3Ydcdt*zH{r8iD4fG<9U{5OF$etFx zOb)%$9^BmfJ2~diq61smMEJ6C0GaWF=h_=DzObyHZIi5t@5u}?r*sm!51Bys0C)iz ztpULBhd=rIIVOsVwpUy4;pNG!tAVArqXod`iv`r#93@T6U9^g`u%Zp1tBB3V^xZ?Dc-#F=rck|ctO*5vGx88}&B^&S6GvgO5i z{ba;6j{D8GwPMSLn=j6n75PC9Ftw(4#wXr>#trZSSI{IcDHp|lheSsJ)2Ye zn{_p_Z_FXxd>DcahGBeeq?&yvS!pL5q4b8shgY7Xg_c?bisDA~=xzO-d!}92_Iu>pXUpe! z1jE3mqe8hSLY`*4@2jV6&Exy*$tUSqeyL9y{r~H?E7YW#pY$=$Yhed;Z(2uD^QA7- zsJT*pW_vYwzlo3m8}3od)q!%Gi}Jn|6XHdbIx*2BJ$q7i5+HyfB?zuwEpj1&4Xja5 z&vg$Ut*eIXtJNi4Q;Nc=NhzW9PFaM9JgCo3irm*NKEd~(2-a1rQdau>LGvK>MHC6Z z;9gAn;r%>_F-CJJg6z2rPXIl77Y!j*zWFd+2p%PZAH zu9OKyj1@h!MZ`9kj{Fcd(-9g&TmE?n&G>H|rOV+ic%N#0|SBi|U%ixJ7M9v9(Dtr5g#g zTPY9UB$yH1gIj?T2@NF)?#HYgUN}Zi<3bYpfwZhol4ukO>AGu6-4*49ZrjPjIBjg3 z$a*a47q4DAP#%|ZPw6MvMeYwkVeT8Bv@azM(CG^qJ~rv2B9Z~&gx|is`%9@S!nXGe zRNvpdTZTaZp~Do%yWd(&gmQ}6(`O@33IYX(!@@WyB}(=HKpE?+Qyy>SykSr?x`)v< z=AqpfBt_E!qVB&F)4QB9r0+HtL)v~8i9~Uki0PEDJGTS4Y76_BL%Z6QJktXpv`2c_ zFqw&Fnhi-QHP87`Qx0D2nSexIOxqJ+>U*F0oj@5IsdfRkyu|2G)Ft6Rpu1&h&1Ov9 zM`+a;V5~<37m-@E12x+wVFv0XvDdm(yF9(9+ z1NIn#V7Oep;t-q_DJ&Z*{jt0HUN2_2f3^7Cdmr>{eF{iTd3B2Y&cJEqTF{G{=)ltB z$4*4k9mcZ0XrxyH6$0__(t90vdg02=#nxw*LhIGdTW1VF_jBhjwZE2*iIqhZ-RgNa zt7FF)^1PQA*X{YKkNrZQwFb}QqZ>BIBOXs3W>mVS2=dns9S9^pk;1!j@sI!Fr?c)~ zbg0^|fA@!FX{qMr@ePNwt!~^!6$;kae=I>Miq-PEBvkx%nQ297ExoLg02?bu zvn62WlP^A-$N`R(ug;#$AnH7d__s(22ii&Rf4H@1#LJA{N~z?HKX>j-;P>V^$VYa~ z+M+Vlti<=w=fT5=izZtgfSLm}Kz47ASRj%!8$?w&EL!!8mp3b0~prMpZ(+~i~qy_{=Z(9qG`|ktN-V}YCm2beuNJ2XZ&#@ zV*6=d&+InemVS+WQ(AeekWRt!pMeRDnrun(v-3L-_Jb`+2+Of-hR7C`zs@h97MCDNW06mMPA;`7`q#|Z{#n> zqm`yv-#H_c;_EjuOfFyRV5Dk9-m84qj%`zZ;FhEAt&Ru1jWFT^^S#L_Cc^aK9aEen7e>0Y6a-i57BTM3y-?Se94g;F^e*W~i zawtFs`U-jEr4O>ddUR{EV z=319}ZeO@{-z*}Ec@mz`tS4GIYsZb59s2ILA32aGrFI3K6AFSBI-B(zoZg$7)>+}cQPi&iPphU zKoDa%bN>9|!k1+$rMIZxZIKwh9K6bR^q4WU=}8j_7N}qxYPsV5Ue;&%Z>FgK0=eTG z=E!Ry1d$*c%cZcTq5 z7qzDE9BpXD8qiNU#EgPoiyi?gLld=cKcF9W$TL8X^c<1fX$`Xm4SHzAJS^wN6KbnD zJ>S_mBSO0o(8vq%7s#8qHeR<*XQr0xr8#h+^M?h51e!u|Fi$Dn&i7GhxYm6`uPC%=xg1&&F$Zu8;#>k5z3;<*9U&dFcuW>O#sZLN_$+t zX57j`RyH)ybaZGkw)Ogl|Kh)mMmTrkzpV#dttoBJ1#B#B!3P2Szxvs~pYvs`^#rxF z7Uc4#K-I5`VE;G(a!oqpTW`KIk(Iyw?JokcR;4qaYwz4%ymsVp>GsBnERl=y|+uzNNX|uL2vriKe{|#h}gz?>APO|Bv77A^BSJpHRP_FMYn4M zxO}xff0Og#H^2F1HM(~!uBJyjlkCOx*EVItJ2pkuJ<2bK37WsWUJ7?SW-SM*mHky+Q7veMR+Y9GU7f^F{)>6s8 z_1Wq4kLOp*%&t^Y`Xh`0?^-(7<0tj~+Z*;l0bhaD57Mh-ot#M@1Qf68+0SQxZ0{_c z4bhTd24zC+7tzRqRXDchfIhlzj+Mz|tgm>JzTHSD5pa?6v+WUrPR651jdl%5&e(Q0IvnMJ|bmnZj`P*eH z?VYoM9GH2xsP5<6@9b@v0huP6E*%w1XS44f)OP^-{jZF*b7s0XDOTGXrVP|`>9%MWP3S+T zE1AG)080YG`rV_v~N)ls1Dwu@?ZQbcpADM<-5Kq7TrgoP|fm2?*3THQC2z@3{h` z+VB(gZhh@NIyD_fsT`FI*hnJC1+EAxxm?fxoS@ew$;_`FP2-@G_;olpEp5Ik5Y{U;&&T=z?zius+jA9>&kv zN$lu5$&-80`t9ghU-3nJ`l%i)IAPs}S0@8SnKAebISY>%e5uKig>C<#wIx214PStk zIZ&G%{n^Z_WC(J&aXop~zMshS==9mvZgjMG4AyLI$<}Yv3*lAtiIvS01>Ao3?3XIt zsl6qFZ~j9B@~<_sJ8Mt@~AD{q$9%H8=B_-`jMJug`3w-gsG% zQk8w?T1;?SZGePaGg3d7y{-fEyB{;UX}kY4r7u38&s^qBKW(jPXZgK0Z(h5-xOn4k zgprb$5q&cuZjz4|QF1vz8Dqk{MM_e<^amj38eVVXaRNMIaA@B(p5fk`!xb#PnAo;0 zrQnq^@VnoC(EV-fHt(|u6}4i`AEQas8ifi0TTq7Z>Hv^L_(XdGSzqjW(J5*?D+2%& zdj6YI{@R=l!F$+A*y`3g*?m=j%k>xrW}zFDj{AZA_jqnRvzB>+6UD;J2#3XZz}=GsOJ!2&;WSVQ@z`U7UhAFt*og23!NBjj-D!qBIdVj zTp!4{1dGE2N0B|UZ|~wqKX_*pk-z@gFY3EAs}%Lx1}ck&G(QJ+?^}HQ#TN;5wVgx< zTi&O%2~1k9Eu-2vbG)_QO9jSv$bmyaEmt7=1qb3KkSD783C%B+Y`u0f#hfDE+rBq0 zA5#WJvGVqA$O8}5bdc(nO{)XTZZ5v_#*w0#FI57fQ>}}rpcG0MTT>~It(&B7wcgc3 z6cKQvh>z>nI~XtqhRF=`_H?wV&l#7}o0Q?XQ1q#C7O$4l=YRz@`3@dDRGqBP7ysd} z|9$ro7p-UajS^UUA~*J=++$K#EA8+~j1!HbI}F7fw(>(~4nkCRfpIVZR%^iU8Ya@% z%{4*2WPlX@p4I1=m^o*-qb2(Q?LHGgqb=o0o^Q-!WcR&2$~Z8uQzO6mM0?w}?#qC> znE}#aPR)Vx0}KzQx^&E0n`p|uH$L3Li2Eg=>!ro6YB=uLwmI+ls>Qn>1P(`wJmu@w zZ;no@E2%6((Ixt4o+fIc`MDPpKV90}^H-h=06)Jtw0GY)ht?F?`AWibb0s50tor@_ z!%b78donuW2lgi;+&nRM?1`F>{sJ}3M{Ds-yktk|rXqb$2K-*?S=Lar?SpvVP0o{) zg|(IE5ETlvzV+PQ#s0nfi(WsFSO4+==&uAsA3AU_Ww^d3*xrfG{^X}WUi|VmznueP zH>b?}cp?DPes7Dw?sdyBfd9fnj&_+bitiFq<~Q%HOfd(@HTbrTSpG$aM-+f|HDrfCrfh% zdW+sacI>moPk;2IA}3Gw{)@KLGttVf0jLfi+&`skL;?KtN8hauX=gqZ5qb3Z(Uivw zmz*6OPMiuKe)N9E(%F$8r_Y{WoDS?+f{E?-9D!}T(%S+i@D%&E2_56PpHnlry>c%Z zwW5AA4%coib-&u1K>E#_HVy!F?BvmrKLDJGhQ_z+@A4neF@6y9mr3zLGz^#*t+_SF z_nqW=6x(yvJZ|r$grP@K55QiLJLu|#q7`noM>xdE_BCZ3IXno6DKdg{o1sN+^!Ulh z*u8UGb;qw%dS`2EP~HCI%EupmwB!ZxHfKz5P*cDGNE{khim+km${10jym)m&)L#xsEWHiKQ@J;-hv|E$YM%_wh0w7)h;v9

    v#OW4lt_eJ=Saq@><8G48s4lL3aX@ zn1iIpd^`?P#=PU!7n6 z?C!&z)8HJ53zx1&uMaz80e@Knz4TRvoC7j9=WP7$n{Ujae}I~o19`s9Afrp&3ZPus zIUQFr0GIl_!@9~8%johfhUNP9+^^bi&nIsLQ9O8oesYfn@5!ItM_Cz48MQ8C~wB<_t8J2 zC;VgfLG5T0Em&82+t7bw1g`dD=J@3TOCB_Dw~LmPx({%nM?B?s=>7RNr!@Kh@Hc-u zHWddZ9{JWgKNx*qX7R55M`pYUwrI9PtFyVz0wl)3zgL-+vbX}5Hdls%UPK?l=h%lp zND;jHc(&Twr%xSQ9M~^4ya>|{=lkOG&uVXZAoi7j#Z#w_H!o{OKmGcvo9STmz-**| z_*2J^H-BaSMmx_$x5g|Y@@!d^>dSB2zN<1qmCx9*bFuIBcNQCSes9^azy9xE+`f9H zLp0AWP8|JY4!9j1D0)19;`91e9rm5u=itn5(vQw0_m3Ar`Y?Uu$m`#YW(6(<{KNxJ zA5Grn#1I@H`|VNmDw~2m{CrLg7}4P=jak^AE;i#}#A&Hm}j+5^_1S2{;quNUx#^K=RHiRuM}%V0Qv=FH;x-Fp{r zzw^Vu$1`)lD;?zfKmG^lB%2n0c<-0(y$+hI&BFozbjgbsP7aQ+q~-QvbEQn--AXFV zde%4Hym0>PvOn1}$|PDF0H};U0d2-Df>qFw5;REtN?CrQgUuB>xp-lzpV3|NW;CV( zXj%)wdLptNa_cZ!WkFZf{^ds(hnCFArmfpY$3K1ISo0-QxxLos^dU}!aV)SG;?eVJ zeq|Y~tJe9sKmMSl5?!nH<4u%-y~{yBB|a|JRi?VrRxfeXSEJ1l$lB)YuQ z$Fo1yERMYPX3aJ?$yM@teZiI8y7=IOcc)J>1->}eSr2{37PkNJ5Bl-NoQKYN6SXd= z>p|m1$8ZYkN9VDx&Yn9nQO#ck2yqgccNq(#$2Ml|*>gmn0LqbL$oIL*?)Dr`S;2mE zv**IV=x?K=WuMVIt*wk`a>hOw5V$#3bwJi7S+lbKaY9cvQj{m!+X1=CW^yY1>SzBc z{3co4eVmQ5oV4ex6g06ZzOO8ZXQd3)(Pb9Gior|LXVa;ap~FwqOMjuD9k`u+{Ki{v zFMj*GU(dW)|H-21a|eF2Ax95t1_0!gR{PZ=2+HI%CJP?6f5*WSO}7eDa>=?2l=}8@ zS=a(gIjNfuWlP9QrJ0oZS!PtxBhxFRBm3u6=M>(#b-C-8GWOe?TU}5u459VFf8$X* z_8d;;ubp5f0bg^bT)Yu)uzsTXWjmO2S!puB*9BCgCH=FueCvSFXscwk}LZKQMMTKiyh$=VQXahTi5pcT-OhZ_mdl^ z&E;fHL(`_yXP4328mfhP%#(I$=sw^HsQu-0U1!mj*Xqu^j{ue%rpp+^t}+YLMAz%9 zCgzLzdj1=ONFzf&E=i$E@XU3F;ggh&Uw(dZak6^k-()ZoIzW~I1T%Urr{t}S&@t{g zAW>gh6zL*f<4mY8&m{B;`s^xEn4JzBLEFgd0;+9d9@Up4)a~1<=M>nbt%)q|`V*(m z1{A(JMEHE^X-XJiqN|H=yI7>?r$Sc*i1&U)DBGg1;d>!+na!;B_3i(%oJgZ+$IWCpaT04~Pm%sc? zk)&55;Kq>AZni{#eD|$4>-UPq2NyqTu%&^tF)*e%j3FYVxKlo#D{99*6zp|@^Xxq( z2BgVSWPVUz|JgtOuSQAv^ytY^Y$>7yJwVtCSOvi4>D;tw{o=%#lNnc09eP1~EhI(E ziXs8g^Bd}`wY%{^J!L>HnzpgqOl@+T*xWkZTJ1vX;`gV!+0UNB8A2FI+i93n z;zC{Q4QumEw~2P|3b5X{Gr^EY|J>O?@*)UR1}+pK>PDif)o(GOnGxqD)0qo>3OY>HJJLEzz_GOIno?35Kba`?3| zkgndklEZ3I$(G9j@TI#3Y%_cSb(b!87*WD@9AoVd9U%uIaeODGA9zEW7fnM=~s|hF$H?+%OT+yBa$VtOg!sJl~HgFuB`On%* zkK4I>dq(h${@omaHa6ExRc9>zfiH-b+}J*R3Xsyb8Gf84ufDpqwRm>1Gl#|20K+rY ztF-@sx%LBt5ukhO^obmc%ZM}kK(yzW^Y~{VrF}?IQl^IR0}y^4Prg}{&H%Up_?K(v ze0+gS03`3(wYSvA4~9o#4S3Dnuev|Fqj=aCkBb6)DH?q_9w?=-FFR;*>+syqkA0r; zx*-KA1-j=`5IET;(knqO^%el8#Fl3Qn{I$}*&n@2hZe@5bUGCY{?!1swE~9Bj_31^K?5JJ!U@6Pr$yq9@<&zFF+fg4c zb&nK3`{j6m`Tyhp^}l4GJXrkqfBBCVKl{~tBQtGZ;CT#Z ziWVcDu5c^)suuCC__o@5qA$(8Xcz`FtJbGn$}+ngKqU%&qQK&3=AAQ(aT$@4hyw_55HZlcyl{Fza0chn zfGol_T7Hc8*1nhNw`a&qAQ@iCIYFjt(7x~M_QMj)o>B;8;pkuS(tdJ`YNHMf&lv@~ z5A@H=Qky%x#JWwbZtuGW&5YeX-|J87+WP^&y8UMJY%eT9LeJ<5%ymBP|0jCn03|aR zPh380WPgttp1d4Pk7TGNSLzfX-Tn4FhmU=?1fQeTXp5fZSyAjb5QbAdd4}=M+!&+2 z&wTXZe2*vM6TUK@_Ve^pU(!kXeALtY?uCXm<(fo2#DNn9+de1DXKV%JBfYjGrTgtoAU!_lOyao-WseK1T*< zBU*W)w*VRS;od!mH3dMnH`KRZT_hm60AOUy7^d-VT>;Jj06+jqL_t(AcqUbv)2wHa z%k;LlUVCHOzj7_GEd6nKNjy#F-iCCb3(0YPWyA_Fpl|EX+5kZ`YreMxEQ)NUYnUf( za1zJ<>z|B|bkKOOXenT>Oiy&hKz*&}i`2TCL(Ta+GI_{j04Lqk*+J%qzDb^l@^ZEe z$H4l^JK2Zme0{PSFe5vKZijYdiH!_uyyn-O(J8Fy*8wl~o?x2+>$=2z>bC%g2VaxZ z%RM;JS?TLmN5mK%rfD6W$|>L(^W5u<|Aj*f!R&{d+<=E5~ol!*?gcK`UKuG;~E}??<{qE zGMWBtF7Zx0aDc6NpXbU77JzamTC}%Ecbka2GJCR1L>roS4j)Fg!^y5*?eNmZwXRGn zAouYz=LdM~Aw|~iU+mtyH#&Df*)ocb1L44dO^ZLi`^y{xS4Rhy-6{a$;+1O?6{~Ii zq_^I<);S){|7(X|Z@!*g{OAAlSGD(2v=_J@*meP;He2;}?i@7=Gxwb&X!F7xU-S>Rs177alvoHBlW;P9KKX&xf#ooYk zx;;Ai`7eGpr9AFsFTeer9|vSE51W(~g%(7&Ds#a`V~S&H@_2Y7Zej;#oK_h=T0AQy${F30sy<$=XljMK&#-A7Hw6* zHTKQj8}}ze6#qtRoQWb~@l#9ua$q|E3{MyM!@=Ckw zS_Ay7C2TJ$pH*N*!B^z6EN+Lmj+|@_$b4DR%GV@{XKZ9Sc_cUzUC8|3wzV@i1`L(S zS4oQcdMl?YyP94uNU|+52Ul}oJxpgOcN`GBeaEiKL>?LYYDx&#&PM^@=Jj9vl{st5*Wz83;V>98CG+d8CMM%SbC3Be~`JF4F;Ylsda!ujx#HwL3{Kq=iH zE@8h4TA01n`zr;-I!EQ+o#h!v_9^Vdd3p4-6^#i!4(8BzKLO*x7# z&36}bY>J@XkgbpZ%BHd&`u<|he7c*GD@wK8F6v*+dipFU3Vmed%MS95cYOWqV&C2) z*=@&?As2_|*=Op2)BCn>-?f~=@GJTZzWGf&TN`K<1~Ya**N*;4|7~vQYFoGO?cmlu z?I{%_(nRXhmgbTDbh~|mzGWFbX#Qncy>K@Nb$n!N5%4NdC{6oySSzO-d+z@8*^kkx z`PY_mu9uP>GE7z^Cj~JGsMM}L(2F@f*m&l7jWZ-0gM9!mffw6bU*A=xM(LMl3KpWL zRc0#Qt?bI0ur+0wFEl5zKa7&j#EL=#{h-u?NPG3*FKF zM*_KIdAHB|rOx?q>dm>(M{_AA^hD{Nd;Hf0E$q^;=)GIr(17MC8h6=~HhVXdR5LxI zFA`)U@qGW}zGWNJl&kw|=+j7VmxhK${8@HB<*G5?3>5m$AD(Sd+eCc@G!=cygLgH< z7-7E>6I-V2R0lN$gz`<0>I%WCPxWX(O_8QRUXgtpiZZ5*0i8EQBo=1>j%ow7`2Z06 z4<1YdId8Zs~X4CTM+_-ag-cvAcryzgu`R7Ao zl#{j9Q~&4x^dBbdHx*U-QNY>hVK@LK%VRPLYs~F>2Ftr2evqObldo?KI3Nq4f$?T7 zygV08h*%k8;R$V+i!sjn$v71uES04A%%0Ur%|%!$k8(MIP1zbFR0mp9>q_7N1oREy zO`u&2s5%>niNY?&AYV)|I(VQ|mLsbdKTYvERV4ZE-~D}QcPSbD{=uhTRM+`f!aHwb z3aYeuOlWDa_N<;E%A8WRbKB1OPW|LBOT|Mc6q5_pioOw`f>B?-c5Cs;C&xOJ@9o78 ze(=M^sS~1DGeQyo(&o0LL>)SKXz|`3Kk7i)6SLmv0Z{SM%8aC$I|fOB{WGO{MGv+L zhCcHZjo4!h4|~^_K1a*?41At`q>#NDUGq?W=etJ+%#|+u+8eKDD1F&FYz)LdH>jmO zI7)17DxI-%CBrr#DM3K7kjlsL#1M2yqC7+wr>hnm@In7s9UA%RF?zE(=2(gqF- zS^aWR&jC+wzwt)cx{xN{?K=*f$(bpo2?VoC#1>!Se2@1v{5OC#- zt$oIqES1&qBKz2So;@AN-`p}B?j__X6V@HLqI88V6A28c!GKwnuo5{S@*i({E(g$s z6ob!?9vh=yDI|)jl)9#Ocn_mlv>=1sUKV9RKvIl36?vyco{K;nL5yy?85|(0SQP$u zzyJLyQQ=UnW5-X7!t;Y4{WKu*N`~v6#k=pmJ1KHU4jq{?JZhQ)VI9bJ@W8>vyPaWR z52(#9s>|Bn$>UDBcFMa56M%up5sGbHA6^p))r)v>*OMdt7{!nya!YB+hYlX@V9h@c zKigfD2m{(7$>tD9;E+vOK-&HA!;hj-X|>NzO4#c9il$}eh$MMR1ojiGR z@x6E687Jh0qKpph-<$Fmz>}i>lkfd#@#jDK&f+)k6%ii)+_YiyL_sp9l^nU4!}Zvz zvjN4odR}wWo;+6#RM9wSKC&2@iFVp*l}<@MHc#IcO^LQl)Hm%juW^dRUnno`#RnPn zexFhj%~jic3<-v>Jvs+(w&&0Zxr^^V(^%1rJ!Vy&Pifj3&;wMEv9LDhi3o{}8`dp0 zmdy|xnhZ(N%#pOV%${}C&U|)TbKO3pD?I)-W3c^ht+dCWl7`FppJS|J1KOkSXPmrz4U80xz6WPFvrnf!&yb=S;h_4}~{zK=wD{5bRBGp#67dMw(Lq>`;wx`K{y0ZQ% zF#)g!Y6GeOEMzPGO!ouO0C|-9TeWJ>a&DS;&e2)0`Xs#`-O!PAz}yU|*SN`{ivjSP zw_I(0vj+-f0Dd!)f$g3JOg6XH1ND*5@4f%QI6PmL=d8J z&)#fY+Ti#_-!t#gx2&33m#)K8$Y{FJy^PLV0Y$TZ-8)XluE9sf7_LtoT67ct%$EV; z3(JV{ctd{%eCZp9R*?z7G>*{KfhfQ{&s?*ny0IDTKw<`_2vY~FDgU*1-|oe~`~UvG z<3T5q2df+RGScvkc}V7dOFHV3u0+38+NG#| zcH*^cr;}e?$`SpC#jUHC+5|u=HGf~}ZJ7t@?pFhil+dvTUk3<1xE~0d?yKhdoI90% zARAZEg*ClWM5}QD?+2WT&9Rfn2H#u9msYJNLsY{RqXPK~1pu&CCse#&k+6gY29#yOH4 z6$yExh>=e|`y}1u#MpjzmU40;hGkF+G~2o7VD`;V7Ju^}|JP_Mn>D}+jnVOc{j0xC z7glP}d0J(AuBgmh^G7$q`psSJtM$a2vB-3jab(3q`vgy&=*Q@4bc%SBOg`VSS(~OB zKnGn{3cr2i@XXEIceX|XMcTLVm7C5g3qS?NsT+UuN?E_zLoWoz?B2V7@$nyiTV&{k z#lbh;Y9AKS82Eec?8yPTUs=DsjD=1D>FYE44C!7m*nKAMfJM38DFkltpm45PwM*f1=x%uqcJR_%$J#m?(y&C%G6qG zJc!)*`feayHb|>D^D=tc*j9mbMbEQzp6NQ+f_YMH01dHKfokr@V+IrGGjfQi+o)k zT3jcFuL%@!4bYhGYwg+MH{-|X@#P#^y=mV>bI~u^O$OtooJ900d+Fhqm33|2M}9>! z9Gd1**3`cGYc7??S=k(2%_bo8{`?>OqinBNa=sjk$EdMhv~)Jd$rGQAoZY+swc&+i z>NC|zr$>?>BIL~@$0}KLt3WUN$ag^YHH`<|7!%oKFZkYB18sugKlB)7S6sqBL6LAr)9IB)6Tkt50BJ2Q@N(v4sjGQ*W0JALL%{au!3kx_Ad0$*Fh zcn3$<=u5SMM+q=f_GAJDYhODT%826-Mr-D#&x-|vL@os>{qrBM;gb8 zJ*Dp))#m2e=$1T0>n6p%zj_Vuo&%vdrCW1)-^}TX$DKU+RcpF!WUGe`jW;jGU_aAG z=n!rAt_gQ+Z&@4tTeEI+>$YO>xzRzAbM&v9@pES;-Va-o*#WSA&>D(lS1MEBF6>m$ zk3IGjBuCqid(H$yH-@E7AAR)sl$kDYj{aiZ1VIT@lNHjahi7m&ICt=Fbfq5xG=SqW zX9QSqq)jlylXWkuD!u$c7+x761w)PQhHIF&|M`FTum7(Y$SCQkivry5^IwBQT%&IE za}+`nrt2uq6oz{5>VA?uy&8~z#K*k4#P4&dxAVL2T(6zp(;`iFPgvfi&EEHsjWMsj z_eXCiES`-3`e=Di6FPrLu?9ZT;%=PZ^`}>3c=}pCw+-d~X=7TRjS6V}?Z@X9|N6Hl zCyJc%NSFd=DRQE>R<*b`-}Y^R6?yVS1lUB&M^NLuBEs&&sO#Qh-`>4>jyt8f$ja5F zPa*`Vb>`E75fmt1uXn!t-8|*L832Vryr#58wPuynK)81Ul=K16w7CNzHwKVzC{kr@ zkqvVYPUBe}c*R5Hv{p(lC0|Wj5izIEB!DXSKo}4jYNMXLa3N*z`Rah@b?O;!9^M;Z z{g1|oa=?iDw-*)qVoJm20Gd~N){Y%*sF=}r-#pT$d2aE?KwqA++c6evC8d;t$18{_ z+R(rjYh%f6Xye~l$_`-d)lKV)E>BPd1c^%e)o*`4K&8?x*6Gty`%V^JIrXEPGYmvM zSB3@nV2tqIquE*4o?-qF39xQyC?bQt=r2VX(6F~UiAn{yMmnd+;X!6i+|;ewbtZ zG0En+i%s!>cz)$|L|4{VJy<6Rt7)x_$k}seN5PSH#QUn=9dLcq`VQXexrg@epR_(H zX6o4ze(O_yckS3&dl}&|vsD?O81Y2Rr5b~341Yhs({j8V@P}?M2F8E??YHN>@)Gu|lp>BX z0ETF8_12`p!C#ypPGJ%?BDxvxB3vnDcj7yQoDxnADrI9$5gaL%jp?h>%qS4n04<1o z`Ko*Tay2FHWD&(u+O;XgiPMmxGUqci9((`*LO7`_2#|fXsMj}&GWqe3f0`lmT9M(q zi@-k;=zOr~>7CU+JG9t$U|-SnTZ(|laBF_-ap~auqa#kkAOGZsDfv4WZ&r$COZ@BN zxeJRQeD`}TNp%vHqiD>c$z*1TU||@T6Z9&=fx`Lrn{T$KUTnRKl4y^h&E3%ogF-62 zmpKx#(NxrC!aUli$OBkjEdl_qzL#S8T*ld|_&MWHw7d0oz;L8qoy|99?(zCplPMf5 z=<@i9<5Q|@do-=D0CtLnw6JYEUah}H5p_M!{pFld6s$vs4mOADCz3_vxy*x&{r<(V z&u2{?zUAPtT|2kO|Mphu9Cy-aW^Z)Wc{f~?P_Fw*G5nZ1x{_S7?BK^UkbRA&X zzC+i0s*O)srO?n8E|vuWltLfs>(A-)DaPtXiWbB}H>Kpge(;Sk60QSy`eU<@cW8vd zj_2V)3}S|=4MoP|BYu)0B3Rn(?dbr8=!RC(S7R_T1iZ+|$FfaY2Y|k41?6=_L~|Si zg}?XR*W1Gx=*_PP_brtYTUYdf3=T^A$oqa;ch6ClN9hNLg5o{SCjba8WmS$T{31od zwLN(Fial>_L^-IZ?BF9c9yt%yPCs-0Y&3CiGPyXeC}?+bLb>Md{c4S;RB|xiGv@lP z7PD-iof(e+Gm7=~(w9f>^~{lLrZw5=Jzj=CTN`o&{V-A;7$uF;tFPP1_?L=G4X3V8Ej88<5Za?Ik?G7(@=?8QnJDkM7v|kSm_0KYiuh@FM$$!M*%! zxn}esGF`v?oSV91%7@WU>x1h#^U;O%61}5k{KLb2zkExlGNz?$!xM)-8WSyz9{aR? zz?<3N;FC5?(bkgd23^-~?J(E|*sdP{3&1#ITB!rF2thJ{^lgC2=5+w%=*-Jp=pbWe zl+S({rCg`yrB8Gi^AW%rAF&n;0h5S_la=}~fW)%7u1^P=d%CIjqo4Pkhgn;FWIWMt z@kuo9cY3FGXN|jh=BG8(_owqY7tjkl%RX&<)9yTPNhf}{&sW7C-Md^n(dg|7ed`WD z8G!F8xNV=$L9%@>C_#T->a(4dNh3$eg)3LCrk`GHuYA?L9I#LN+!z3Nf-g)IdBk~U z&7QBs5B|NgEW3UC_BW2^JNX1;vZwXwdImdLj!(;?G*!xS(eu@8zI5eEJa6eN{xVRJ zGaJaEkFq4B;Vb_KWFr^Jzl%8^@%b&=cg-42jm)xHHpS~@pS6thj0eff`^_<((`)wb zyjkv7k;dDKptVK=mUQFG>HokM=Q0c!5Utt^cnENJBCP{}=?eBdeS4fFLj$%z54X>) ztM|ReDebfVSQCb#^E70?jLd3Ww~|laev=NDZY&sJ*Y5q3<@C{q?=Sw)KM`|uqZ=9N z=I_hE;bTWX8=lDc1_IoQkKyYa-H3>z$XEdDbZy{@e$yM!jrnK`jEr^Qn|&~{p?)ue zVFHojX?QDoB}dKA(|H`7JvtDbJ>?}@D^Zs86LU@`$)cmzIna|%14y0rdOjLppM?*P zuEaUtZf@R7Yu~tGbTfJ>o=k4x4|FD_QC4*yr^uz82LOmK1H<)^j4-iU;f9Tflb?X4Ij5_ zj}=uefal2f|8(*C_y1zC>(Ea z8%2ZS0cydTn~(qaUeV46iw3UV`hWSW=B|-{^{cAC+}witAAW}NVEhH#>WRdrnBO4&HMah*G2z$u{ms+v9I}# z^U|8?KR&xU0D9;N?%+=}pczDorg!c=*!mSD5Dl+Rr>6I4N|_+e1~qq`DP*o>P2NjS z0!(k+zOyEtt*p(b!`rr(MaE{iQs(K60zwKKJHY?TeH(A8X<->hx=a-N8|K=G>`l!5za}+%G7}Fx3j* zT4@B4%Is-&xAF((0qA$(Ty&h=e|lbmy>mNoT~G(U$Zk}!jqawD?7DTE`kbD%vdHS4 zL(fW{*c$^FVv_N_b7xL=u2l0^pGRJ_9s@o$W+kUK6^V+EKS)oaa{yh*Zfk}HfQ;(q z1F=01*tgV20q5=Ku4CJhF&7Kk5{v~TT-!KdE$mM`k3CDbWtTZ`fUWL%>@k@G&UPY8 zlu`7#`D8z=X#cK`9?48j00+4%vx)|{wHD;l$>YcB)2;T&+MFlZbN%@s2Z{h9fj>9f zQ%&^ri9JC6`1#c7{UH5Pi40B>au=VxgMSyOf@aVY9gAA{@S%NGhRogT$&kx4cMd28 ze%iY`_v{p56N@0rJK;=+uo?63IcXHwwxUCA_PCa>RamE{qHD zm~+`v)`ES97YP{QC|{S2;6C>DIKtRa(H$DMF9Z!;YmblK)4*UaFIJy@xd)#u3V-A9 z2eQsMef}t$j$C15D+7#fMqWpEqWbkuc^deG`_QI9FZv9L$Wf3~9GU6>WsU)*qsWS- zPD8Je8QETprmnW`cr;ywgJfNFiO1ayJAo@GQzSEp-KJv0*#K}3R;4@z5(o?!{jqE9 zsl9vmk36Cqa`p*E+8!U@m|pc#xQ|18>Ermzm3ZvgQ^#B1&LJ1`!bq(Y6m>g7kl0c zHp#y1>-s43ToBTQY#ZP-dHGdy@Xpe9<;A@wa z#BH8dMk_ZeyQ42_r({roI6W|0o8XK34GY7!1xf0sU|@UoetH{OLbf}^+PGIf!H{60 zbe)J~u{Q_N#S5oqe$5Ns_ady#1|E(dQ9dO;AEEHbeSQwL&xfw?+fu+Fre=6HW^^+` zLt8c~q2d-w_EW%mDUjyNc@{H>@~#e$evE?SW>22`y?^rRZ7Tk>>!fFlW3HS3J+tR9 z#Fh`Ae)=zG%Dk3+Alw>*X8lJ%_7g)GJE)I&BJSL|zxX$QIGtip(f7VbFjAQbI^G+? z591YK4Ak|VQV$bdBfAdcl|M&kzjZ&UV zisp+`i}rlLhz)+}QjtI@c&E;t3w&EqJ-OX6b{->41)ypJcFd*}n_Yn% zBh7h-;Z7R3>tkRHdTqBvm9V%H2o(@Rp-2!~H;VE47#dJ_0R6^iZ81r-Lb%!qp2aY_ zUligrHnbMgYa_enQS1BJ=f_HgxwZJ+A3n-}NIaUrKc`bjtqn#ZnjO=n3|+f{2`|AgQ8)x3 z#S3uvID^VTi~!K9DfNW?JKuRdg{?Y^XU`_QMO3=ip}tVmx1eT)o~F130x*=mGsjsWs=Daw<6JRQZhu9j^QM&NoFV zUzqj8aET%B1ak_)@TC@_cSG9~eb=72b~D~lf3|Pm z)x4%uv=3hGV8yGKQ$7=tzyIXZz}L##Bp3$NXuel={mvX-#&$IUAv$+MM$5&Fv@7i$ z%HY=tPeuyEUY{sa4qJ2>jB^e2)d4y7CEB!(l%DGu!sDcf?=27G1B`#YxbWo?7*|Wp zTBsrSOa{X^4%+*761+nnt=lMT396MT$JU!*mkQ4S)i(!l>X-MFs~1{(Dd|_PmY&wS z&B3YlO(d>8pd1Wd^EO-L z#SGDJtLrU#18t!pkto(*c+ccdufyzla#0}^$A!b>hVv1`lA5uLn8|=rfA?3 zudL0H813Of4mCY<@9mlVTbWwjx`F9?`aVe$s8|ZXgQqO!7*qO&zW>r330s#7`i6uTf23=v2i-e(g6J2 zs&+nrQS{*Pqet8Gz{ddD`m#1T?!ErpC?kimcP%BA0xTn7Wi-cOWm7N=ZIpRlpQxRQ zFUUmUjC)B9c>T2|bp>x_lznrC^R$%1)|%`Q2}pK{E~O}nra@n#-0l^<{@e{|Jhvh*;&H^xgV;}vdwdV{qKbQ(W?=Ux22wDJ@wkW;~zk2&rs{uPh-V%$C!!kL|d;YGJMi>yT{r+ z`D-ZitkDAhPHi=59sP#*qawx+x(a_Pb(gFb@XR{lil=TIz|s10B^~U_1SgC zI$axQF?#lw+yl7ls|R@$zR^cRGt;j=1%`@{(fp$h&vrPH`^>kwp%2XOWJ&X792)qx ze$jcBYiFH&-lLiarFG2pQZH&w`@Ea|H{)eIdbzh1n!bAPG98?C^?M(fZ?C7%VZ;FJ>!?*MLW-x=srL;5U=e|W~U-J!b42>l1{h>irtSr2F2$oc@h z0^R|ZOVBiMrtvvv!~BapX4HSxf$N^!Z_s#PHcjH zN>&;Mk_HU__22&W;-CJLe?DN{&dwy+6CJDLdL=_0@TfjL5aZOTQoiGL-;}jG>r~%R zq8Ax|wRJ)__?GLf*S2gT_Sn6~cIMofS+~{csUowz%%k-KTnaL{_cfm7z&}oo-!I&0tx9dYI_M@VaNC4G?nMQ{D`%PToB@oG-c)-Il5evO}vB0G03*Jl?8)AA^q>9he@N#ln`rO8(UIx2u6re$ zvm5VJ8gkd->bc{K58kitesgy4NS~)OJikH2UHkQg)r;@_`JXLz7KJV&bxN6am@%5W zc>Znx{)P@1zmVL#y4Z8@@az?|N6);Ht$N_~w*y>qFxMs-vsi@&m%%h!x9uLF3;1S! zohz~pf>&7wXOEBz&pn4Xqs#V3zmM(SAVh~h z-=WZS2s~o#6FPDo9?pK1Ie6-ePpAI&tK0WXooefOt_bLx@i4Wf*GC&KWMi2F5Q_|Y0cG$A8y>(65Ra2A z?pOZgP5}XU5Lxun0DVA$ze@!m6euFH-@SL*&l>{D*$-C&L7msYnISmGo)%OmTKK}b zv*TP@+nx#9ToT$6=pj`0zo@+$$<0{BF$atfmb z^l6VbMT1S)!_W8pje+kRkr89peL1u`U|TxA7pLmVc+Kj@!09j!kn}zhQpEvId2(fQ z?Erv9if>70`>KA>(_c!4T|9q!z`$GSvX`pA?)q&zb`RfQAAbjYJNM`E)ys>wzyIp& zZ+pf#*{$RSr!uG6+Q#ObGG!y+86)bu7+xT-04T`LV=q5we!fZ$0oOTQ0E%P{+9g|+ zzE$>zzRDg~hR1we4)3AIY%UBDla-iU0(uIVF_sHwkGJNnVQtWX=)Nz#7>M2e1{e=+ z(t6;-?7(l^qt?=Udzbw5JDH9@a()eH-ZLz!{rn((cSTvF0-TO@`0(X0i|sjz=;qhc z%Vot(C%gWe`Zp2sb(#&vX(@|GM(^{_yi^&XQvva^BMSPA4tl`oroj4_2RjHmY`!;d z%2`qn<=6D~(T0|~Fxr4k?bumBV3U2NfEIHPFF|ixw(n#b6pc^*Ecv<51Z-QC=Wq-? zZZ6rq=(qBHapu3?QdNFYO=oHvx6O;e5=~>aIV|kR}-@zoEb10F2#uYV^Noi z7Bvx&V=cLCT^mc2BG%5`)fOfIqh}L^7jPRFw&(V zp_#6%$y(|9`E54Zysb&S>iY=G#?gl{ChE5p*2j72WDD_k-@B#n{8+Yce9L=vX@)V+ zwQ-(Vy!-K~#fN7Cl`xT}Nc1S>4S?rBE5ciIUJ7Q=>&lX`OnpFKM3X8H8cm8mEaq`?BD?9PpFz=UQ zJS$UnH?)CIpFLYW-vm};qg;8;L|7Kp_`SE^tPar1#nCU0bpYSaIZ6A@?XQZ2Ue-|e zT%ayvgojLoD$sv61SPAUVO%DNp~IkHTu^Y;BJ#awK9@mzy_&DT|Mb}6LdumWP6~}f z9-nWMh!DMZ8M9xmBsA+uoFZD8_yJf&##qx)0vMyUh~^nNHPMrepm8?-d0O2wMqv51TlWG)uN2`Cc-ZIK zABIz3C|mk(UX%@?Xfd7%5MV10;)59YjfAVHWda@Z;V_UaW}a^c&hXrcPF=rgW7pkY zd{JZqruRw!wgX!TTIF9Z2cAn&#t7=hq|G|$@Dv(;tj^e@1ZL5532aLChV`Y;B&^H} zru9mSxjhENf4Ov*ttmbZsyNsDpnc_aFk$?`!5Khj-r(!ki*O0F7HMy*8lNakgEuA- z4>RuWyOgJ1jdlu-1L-!#z$jbhfP%FmhDz}?*95VDfLQgX8HDE6Db20W?49fFt&``@ z%sc{X?0b$9v{1p1K;#ZVP4GKS9*89ERfPEfAOUp-wl#yy!IqyN{e1Dp;ny>US1pd6 zIM!SOp3P-#MxSWJKJdFkac+$OTGE(ya2@mlIG6)UTZ92dp3qNSo8KQqpU+gXMfr!< z%0$>v)B%Uhhky7W(D#dpNFl&JKYn5qP4%>Z%}0-YF~nDmy#FwZVG##&L_+sxTVjyF|Y2!ZR90DGCe>36w{QaVz@;>wMOX>goSjt4o-G&?$|Hpsu7mNS+ zSO2Lfuyb8)oipg8UtlPwqeGIF*is6}L35lq2lnnAnt51+-sYm1t;?sMbe==^oj7q~ zaXP2ttJ`-iUVHt`QIgJ`J(F@2Pi)-kzn?l${mQZ=uH-CT7q~5=e^pKe5osIC3;`f1 z;j+Ex#S3Su16ovssF~dUS9m0|M<(Pq5WJ3r0fld z$3z`Ey{Slb`(S%Uzh|Q5fAhcoyViW^#Iw#r7tz0iYk(T+#fzerDt+xn~z?O>>Q7)CAUSN!4_;&s6qR8*( zi1Cc2yj;pRhumZ<`n~@ilp!GX-oc%1 z%jUa&GAziX#z9qMVJjg%y_^xNqp-HVnAI-IRFsB|qFzeH^ z$!mSmIOUYwb%(t~c3UseG{!x$-3MKcf6SY%ey*`THG16xq*ykopIz29ejgdybB5QZ zd%4GRI4J$mrvcc>A06r$kCS)vUNgxjbH`a=Jqc}29DMAB%GeqA3oD%AQH|v0g;Nhu zFl+FHUy%J$htUCzU;uG#_q-lI?T+q|(I)c8Y%C29x?|T9l2eI)x;N1;3|RljH#&?l zxf7UKkGn6rsox@p7?0KkNavZ>9}wbmd|J7XncBuS?ex8u&xXEx14IR~n^SZBv>n&= z)Co#>a#zn@g6e=k(XZnq(1vx0e$2(n^a$4htkiscGV1Bi0BZH^>v&;i=s2%LZvg_A zfV| zrmKj2+tf82mLjOp=ZBs1agvNbcyR0;dIB)!!BlVC4inb?QX?a{FT2uAR-13``k4&kndt z|BA~Dk0I+eM1xcc(c_}LVIO$>r5t8J`vGa2TTY6RrQJ(20G``_#WVV{d-vr=x?aDG!{LRetz2Yy86E)62hG@}B1J zTDs?#(eIw!`^T;oeSW#JIL;OlZ7X{I*%eH zZEN@I09yDCJL0)?0oQoZ16=p@SEAC_=FHrdo}5!Y-Xu82nG`#B?3g|1p8M$t-Ihe{Dek zGIg(JkKqGKWh&cboJwt8zjm=}!YP_BZJSTXlrm|~)Z4Im=kOG|xOsUcu=mExmlij> zmW`oI&E`!z7N-ktiu@q0x`)o88=?|&gjxTi$jrY z5MYK^vHP)g_PqHclb69h&CBR*g7}ufO6t+eK9C6~s|7Dz8@|Z~y?rYOM!GaC#hC)| z-1W&AWjc!_9}0W|DC<%{<4jtgo3V(rbzgp8Z-Cdy0pM+fx#DY3hbGC5D_YtPg@4UW+B zh0kkZ`Pq|Zr>(x6zn-sV=Yz$6`1tHZ>8`B5jJJo)0RwX6Y+$1Gwv0G5HVV`o6xnWFzb_+U1n zLrC`T*_Y50-Sf7le>@D5oMk2z!eI__|jVBf`fRuUxOrcGnUP7{J|>0i{M5 zb+ggB%eu`HP74&Bm_Zv#B)zCp?E=m`lX1)oOK2DnA=!^v*Ac9(6>!+*9B+30aOi?) zLy>+C8!(|1q)B(_W`^<{P#1gB-ZJ8VF&)5|paoR_>fKJWPk3O)gtc@q%B?z5UW_k$ z0WeXohOtN|*&Q%SDHnY{^xd2zX&&1bd)6og4W4ndwzc22bI0P--a8RRb9u<9UebgN_KWf%bozVm<@MG~@ zQNl4R#)!jwb|$a^cp{UmJwbCj;ROu3`;Y+2VUTd*MHS)Ik`Cgp2LoX%&r$j`Ls|I?N`(E(VA@?|)Y@Cs6t5qg ziR#3oQ}xJK#uqMKS=OzaHLX)^lNsL(F}|)|n8Qw=YYnXFDDxKVJOFpZ-M$=)S)A`OkmWedxKp-F#ac#&xaFoXQ>mrk#(UT7Oea zZ{(Ly?10CV8>v`4)yu334hXd{zs?|hWeng+X?-D3mHg>Hg5}Y}jQ0$FUTzr=UuJ;t zzUr@iy|L&M0OfN%k1&m-NAXsx8jWsg{@#A`jd4h9+qz}u>g?IGrDa_&0-^m7ZK}6! zu0`5g1K9`LQc`3R04;!>FQnwW68))x%HXB+%R+JpQ<9@uz?KXVnvw$r2qz?=NJWc{zZV_uGq*{&o(F z5jj|G*^J2BMX&u2|L6bH;;ck{N+FY%V;16U~4VQghIBnQYjn)U!~MK^i{Ny!&Mmm=2&Si#ug=1 z%0Iw?qiW47rP=Q}Fi{~K7N>Fu{Fk5otSFZCv*!SKfYfW8v&j`T{L~U=nBegoZ20BX z&d0c&^T_oBCbq{-7CDKg8BX@CfA+*gQ=7~uFZ;>*PLxPDOdIj)200TxI!4#x6;^CZ zO3?Ob514kP2yyg$`t13|fAcT@S+a9S`}h6Wpb|e0)?1!Ev8i$icT$=FXcVZU$Iq4p zlQ9st1DN7|BJ;o<;XKcB;CrJ&yuCX9%=DONAIXn&eMbEa#?qBNThlFBk#{ zFvF#H002M$Nkl0i{K9+1Y{#Itz zXEs+dL1T;;d*$Y{YMdrp`uyqA*@JktMKLz=a^Bb4D7rOZy+}Df^?BH7KmDOE8J63e z8)3caJ61zm3@yfr=b{0h4@j&LebZi_jSdoMCCl&cbb&tCQ2>Z0C)2AoyJg1AS%`-L zt)$(qC^JQ$tp8K}WU?El+h)~V&>1;QeTZTK{k7k7=&B=U`)VnR(vOTQ{wf1uj4q9I z7oF9Z@l=s`<_5UpMV2w#(G+`rMk+Jtp9!&*=d6 z(D04EL+8ts_>7&EMKZ?Q8PIE!C$gj7`|zX1EAfVjA`d_~8i>Szz1n#!+P+J7ZM++E zKCMVcqtoGAc^q5p`AvgryIarS_dS-W*3H@K8j!J+8zS@VQ|RaDoq@ z0idaVj(rootjoEK4uK2y4bXzoM=xC;upvrtO)^iP@p*fJA^ud?HNOBLpWjR8v!>(# zp68hsapGLxI0;f)EgtaUv6HnC<&2GqOautx}3P`tSbfO0B+9{osK4$|D2LLx9*(s zX69Mu&BI3pZ`AL{?HRHKI7atYCSq)aCin3-!0bSp_=9-`h7K?3y?GQcqNX!m;k={A z-zdXiuF^5nJFTs0!W--_{d#7F{TgrWk2#$7+8^|==X0O|h5+9uszHC~$ZJz7;eI;I zs-kp(xgyI&e4nb$_oig<=E}9IhHCG>0?*&RHyKopUUlN?-W8%+W)y?;CpI zG;($m0B>#f-_7)5yiX?A%D@JJANc%4|HUs>XJ`KIXTMv#x?|7ct#^J<%y(A2oCZ9tQDm%0o+xp`{N zMCz(#yrD-anQ-*u4;GuY?_KOU^3LMjzy0^MQ@~O39huDA;uNzd$a0>q>!CYXY5R0HG`V9S>+#X791hY9hMJR;`rZ;7isn zIRdyse~)qmlW9t2;vEmObH4celf|(w%D9gQvM(*QU<@)#h96sgqIiDvjB439(lc}@y3O(nBy0Xt=}It_C8GKTh&=$f42C} zJAW^Ix-6hNLSEtzYh_vlihB;)znEjag}^-ye7}=V&}rDI`1}NgSeKkC9Heu2wvLTXLceM({j=82G*T{x z3{f_NgYtGZ!q$RaWYu$yeD=wQi{HHaix%m*4$J*fvUq*{UOI2Do;excy`H@$^SQdy z$pku+Ktl73w=W$`@s;kON8oSf(}Ae8iy3S4al3#zrIh?Ga@ZUKl+jHO>~H&_Va$P( z$vu6YQhANd`j38DyPh#l#P+p%?c-#|xBE`N9b~)AtSoCKn{WH}?Li-3Ev{dQ2I{wR zXAW+C+XQunoc?&F$?b)FW}=(NRe|r;T-hD^D0^Sl%g|pkYD)0buKgAh>oLthdd1yj8vI9A z7!Z7Hp1QCjSttwT{Do67Y5GVyZnnNNlJs@XVOW{o*gB#Md_-T)ojX&TcM0m&Hqg8A zImf|XRvwXaPKM6z-TUGTIVJlHtzU?4uV0aI6Hwnixl&dK{hLF3%_|#we(f2r39^tA zg@jz|?9}Y`S?>V^QxDRm^+@L^{WDQG8F2VkX_sQ)5T)sO-|LC8AkFGDKG(aq-<}%O zo;8!(=U<$@y!hx;1kyxPR4GZoPyI$7FE`mDz9?-F48S=?;ALC`ko0bhjlgM(3|Mr# z$!BoB5CN~u@IrthRzLgf^Mu%n#lC%eiu95$N;xPJErzImkyNH&&&AtC35bqh93#km zMfq(=A^S2Q{B(eo&D9&gU z&N+w9Ju^L-lSqo9K*@qE*d|~@fDdf=Blug`$2Q;-1NMOfY}r(RL~=Of3}@JrdpcHE zSFYNXbIw)%|DLys@~Ymw_e$^kggah$c-EgtnBATI=6AmvnJPc{-Ve(5V;D`ucK*VZ zoRo~XN?L)gGBj3}k$(B=&GETzn*nltWCqKcaj*|+;CN`4a<7thK!IlZgvg_SFC?NQ zsO3$O*7vV}vi3xT^w8$S0LQGc{bKNLC~CQ-xdT4$+*>@ml7iDWlJhyh-5B2kEy@HD zoWVF4dfDR2djkIK>&k?JOj-s8C%_o|j|1l6dYm6oOwnQ#=|6`cFp2<5>zcoICd!7K zX74(LhUhJWsv(biP0=onUo_jlCq?u`6GsFNu?~KaWsE5)KnkPR%UEFW5Fmh+9ou(x z{o0r0;4#Vp0BcgPguXxl+IaR{2`*r;vi+9^(Z&qX4Keq^ZBKJFI()Ep2M*OU7T;mG z094krPfAKhc4hl99P5M$pvC@BzIbC%fN+#M3|Zw!W35vV*Sq#-hZ#+^3bjD};!jtJAW|R5PkO9rv+MS zf${YY?~ESNs)+GF`H%jItCzv9b%TScIGe3y>mhq%TrpX+vFZWYcJ3m3Tn6!_0Cg>F z9H!AC4KNxw#L$)jp!5kJan2Z-+9BY3nOpuM+b&(dm~zfh4HQMcJGbr4k(;sx$h1!n zAD(^i!SABkiI2%uPJ}8Iv_0CLB&P&?jvqfh8Axcog>%hODUeLz%sN={a{afRV!eCk z&e_7|0~{HxFj`JkC>JkZONMP}pDTZiPdTo@h3i#RC_!JJa^6V3W_&q!!F|5+-dAUr z{`}9I$JG1&PT5zo%SX9l@2|@!Kw}QT%mNw#qmO>~;q065eR=k$|NNiC+jq(u`c2nd zGUhHTe;D0eZ(H}eSEY;~#@VvvzmgtVQ%3fR=glX+U;xfu@?wu<${qy0lUreT}5ZQb1^|E8?UAX4P z>^Hx7f6%BNZL-IeYbx8ePw%|*&Nxt-BR_w3epDvO5V@8!wcEj??bnBaR5t>slwQ(f zC!HS_~9=x_X~kGnM%+Ma-}u_QRg0{yh17g|wQ6=FFpWX{7eD;#*?;oS z{%I9kZ;ov74LJ!wBzDZhHIHWVODc8jUJhkeG8l|96Ex>&yUU7Y$)9-PIWVdFk#Wjn z$q>P^n*j%Sf*yRfB%Pgnc+|WX+%L;x{g%;d^K|8%3|kK2d=(cJbuzzf*>sN^zbIJ6v3dSV55(=z2O|CJ<1f)k&^d=U#A)cvlwc z1R}SVt+_Ys6`e$H(!}IFhhkc1fRigFE4@ppkMEIb3}4k3l}M)B0QtU~25Fxr5YF`% zzg(*YbY>6Nc?IaZlyw{#n=gP?)4Y6YY>FYeMxXK9{YG%0iS^fbj?dlKn&?G39Ph@7 z7X`syInh4C=~7ld*1yk>^6d2N6T!fE)x$ovo&B`G?Hze<7@W_0&w-YcoF4Zyte0c- z2Yt>)8_vFq^=KuAjA=8ww%PxO*XAS(xPNkU;ugsVLTBlXJ`41Fy2_tNQ}&|$5|nND(HS1Wy8=%fbh0q;)0#_A={;*?j(YhDPOfBm}jM*0s}bg5(rv`Eeim;;1lsRLkuW7dXVV_(TQWS>2LyuAM4yGE78 z<$%Er*KtAiNnBz=+b8nq`#hMzrD zD7)~;sEx15Yv&Lh4v_xnx4$0#*p>j~6GuNOIDTeiDSr8@zh5h{wS74mk?-WdSv<;* z(ndi>?XrSIY(dEtSI(U3K+@(AxbMNYk)|cU`-jZAoee0w{3$ois0k&r)wvZW9r47LCC4V%x$B#-1 zT3=E}Jc$4BCLOuBWS+VFMXfxR2P$s}s20#TH-9?aJGCv+TUXnTOSNR&dFaim07`1x zd!SaSQ;W(C9jvOAfmSrGgBa2O*T3=m%~@N$!zItn4{(*N(oSdsz)Q4iyjpW=QOIY+ z=KyULv0FN`guW7NVXLcTAm>zs_N)C!Hs>pSTn7H@Z@km}Zw$Xm(ucNkfa$B*rz;CA z0{(QWa->Ku1Zi+qQjI*K3X11K$PW?kouNpz2|IT!PNBegKKUN&&-*=g#KW z!3_Ai^=gmVRAh~+tggFqW%l8nf>&jl4?Cj0wZ9MB+sE0kXbmPK@$Gs(=GkoEjRhyR zRAGV1C4aEH$T7Z&WC=DHKaVXl0P{XcH}UVa)k71P1F^T&-UyDdF0fO=gLPS$?p(7W zAg{G(8M>~7f~neCLe7)q3>#gw%%(DPjd@9PzL!nTmUfl^0GCXV9ZqKQmGPLM80HwZ zK(xv(6>vkJ+IG;ls;=;X_B-?0iz6|kBn~=zB%NF>DW&_3wW=kJ*0h(dUTBQz-e~pd zQyKoQHB~)L*UY|2?pd25@6*-mO5SkKZ+`1LxTIEEr$;sKrtIIACw+J=d;j{V;&}Vr zuLsCykMuZws&=yZ%WOYqJ^@470C*nuA<#Kml*DVxlGiJuFMu&$8usGMFnh{gA$!TL zqel;qd)gN=dqMQXP9ZEMJ80{|HlkzD{zDP6?k9jZXuP##kFdcc39Ju@WqXv}IqVB- zH!z%lS3Er`i};gQu0}KQX8K`&_P}e0-WYmmKHdaE+q0!lva6!KNAVy1#?Bf3@Jo6# zU$WbL$m;8tN*ZnC&JKgCH~@Vl5cFMRLaTh{mN=hXa^ixKc=aM)l|-ikSd|BvbGvmG zlxCx_QQ;k%H--sbC|T!eb5CB|clHIS+q&X?_q zyVIf!u9==B6e*LJYtrBdhQ^uEW)V&G)x(OuT(mZ3dVlbJA9!hGxz4NWchOhh#)z*T zvctptrdM3tPp{dqS9>x(>vh+2oqkW_=>OyO>3ZY3<1=Ff<7?CZrq7L!HYdVj`mF!= z{N)&bez;yy6l04D%#=NVXhtS@1jlf(IWk3YkON@-O z%U7?@epoi7EWYnhYmZzL#S+lgin5hasYQpV9@+vzJbAWkwta1D zpTN{Q#=&*PDO^sBKuXunc_nbs3K}EaMG-xQQ`BA(+OlwyMr|PhG_|S0 zwDynl>;9P4Jb<-6%Sc4sz+i_HTH~9y%M|PL!-48PCyt+JEV6tYkQvz7o;xU!QKQGN zHVA~Co>4=nM~n;;;OWW~t8tG5BK!UNYp>GRpx9H&!2Ctb8723kEj%{>l>!lj$pFIr z)fj;Zf=(@duW$^bSM=53i!aeu-M4KXw=l=8E?_a4uF z`u@+xft5RV)WRYjmgPplpFMN7Oq5;Y(9;tqPYiHXIo$SoQjdUBiuSEH-fDgh-{`Qy z_Tqkk=GWf)#^7DSxC5Pmu&rR()j@O9{Ei(z+FDYi!MBP{@$@=%Y_-Ig!$D?wbQJtdWeoyX_^u9de>| zz8px=&Q-zU0M9MMDLa1b*pNAE0>&8+0%@xXV%b-vRrybiEh8a1^5B%OiXK+UUTIv9 zGm7XR_aH}P0Ql(EvL!gxKo#dR9LzYIz`Q+oz>S1~+8VZCBSbp;KT=FM?m(iMh+E;ep#hidE z84_RLzdL!iu6}=KMh`u}^`5=^GsrqXDtfwKw%VFNVr`FP!0dbNwGQUlGyCnYepOHW zZL{~@eRuX(Kl;(^$3OXLMpv2c(ZQ7rE&y)}8;#APgABXj&`i|?g3a3to&l~F-!H;{ zAFsCueHgDf3>9tm^=a?=yComI%NXkGIo}z5t@i}hWIPLebPY(sGrZl02G=xS{}!bm z?T-NR((d8h4#BRo=P%}5Z=L<(skfnw(tr3z|1kR47!RHw2mV~>vs0Ue^i0>c_P~ij&(<&fVmyA68L!4V zWk9MDYn{4>^~Argfabvj?VEW32wJX}pUK(j-vGO#dwL9K;jam3pVoZXQGIvlzpmT2 z$G(~qg0`Kn01A2r^lRS%AWb51*H!!Bj4#QF0IaL5nt(bLTkS#f{pEMZ*Sh`! z;JCY*L;uVZfTZ+)v`WZ^1F~3OL9}6b_xZU39exb(@$!K0oFUn1fbukWV`wkiGma^p zIUMW8VR6i3%%Q)hxuA>bHS21|=(V@Um}ECiHiPSW(K?a$({K89RN|OVUmvvH`)F-c zFS<%jk9*m7>!Cb;QDvrGWXK%l&hBl0*y48s#m!^u)_}V{c0Xr%^#1SuQ!vB`+~2y& z@C5F%XPZa=$e$75ZC)eL7%$LG0P^7@Ag7+&Hx4BI+q9?gn?qwoL9gr|)H22&KaKrt zpBBcaN@v-59AJ8ab7=l5CRAX|mwCsT13Wr-jdSeg6g9FYyy{@S(6LoU75q0sAcpnmd`zZqq=-g}UaMNfTY@GMzmwa_Ix zv{nNoN)I{&y`c}V0vM>I8n0_Bpz`Vb@%cc}_A5OyY}wWr@M{vTB`a?nTG#c-DM=9M zfGz=mun8Bo*V7s$N0L)F9(3+TwjVnNuZ?wX1329o6=O|@Z1P=ngXY>VGRmB6^?3BU z%E4!S#=EL{@C`r27=XyYEbYovL9Se`^~6-ozzG*~x5v6CC;`+8Wt_Cfl`S>)|OjhxOtgjcT;s!yEL{g953O z99%H$Eg5|ejvX1_$9pnqt-}CI+Z%&f-ribcK>KLjkgh|s=Ee5jzJ1SVb+vKRwgI}{ zC{uF7hAqus*5!lQq1vP1g^hjZ;e&wh#nj=vsqO?{`UPJS6TKq+47raZ@m5Wh&QmZeR%q8;q1GA^v`G8 z0yWR|9exmf@328Ff~?Wge1j!{3@RfL%Z>8^>-_)>CEBq6o}|y8$D>0}lU3kh9r84kz8$c+IS}BZO;R*8*SsAh{Gdb{_w+ZJ`=mgK%vI}t@Zeix zP1r;D*Vzg7vtbQdc${1Wbn&s#D0z)HM%!;Q$Pbu7~~8{@XwFJ`RJ7p0+1btkFjwe=ul#_pVx)X7`+_!b1;ix=ZB- zJt_EiB^h(Gdkw6q`SYRfHl~Ts3YO;^((42BY277uk)^PL5$p?eAJ|y3VMW0>neZQc z^ub705r8Z+CR?1m=EtYUqvyp1;m8zgD`{vXIp$*l=9kp3{KegDn*;&AE%w#tfywPr zYk>~NoEyuqqoWgk9leM@@Htyk<~IBJTJvl3#(u%Z(4Ipc7p2qDmv)?MR%^jil4x`% zkZNqsG{J+TlBZ-)K>^8@D*K&NvOYigaS2?L{)v~9MMG|N9m!W0FPu%jZ_lo(ZDk<% z#q_Go_7%y`Tdg}RiY+RsZEZm@JtBSkPMy)GZ&n}%`U1dglW1mz=ICD&LF5AXBo8RKrjz7LSI-AR`v;PulKL4yk zrStI*zV=poIhPHwd)Ne*t6WiuKG)}8#4q&l`s@ODmz&o>^IfDPLf8 zR0wZ_Pmqytf;^IGQOur_F)B*Nlc6}qYs0)9K)Kgt?Y{c#FsR!E-$Q`oa^pLkpsta( zA|ryLoP_F_ceFQd&?oy&3%H;~AJ_0ow;yBhd)#Om(+0k+!Y1vwGgUr@M=5+J!u z7)1VY7pyLYdjy>W2QI1E8aCeY~k=~FSt^Vxe}`BLfq>tpUR zpc^i5a%It34uQzlvJ?hz`*xWvSqC+^&$xfu0f9$P*2DM8Y)=5jL}ZQcQA(ik0T-WF z8Yl`qKYuPGvTTS5ZGBncGXEYw2^dHS{^p~PXWxpcS0>cL@B{oh$TI-y)5D*QLtT`r z%RXOQ6lYz`{vw5}^y*RIC}Wp%v~mEN5nBR8AKUqJ%J?5-Tvwu!641&d6JyX6K}?YW zQh>o2ZdLPpvDl$~ubhX4UE{)FB%cM2h-4Ei_EyFbdZ1iACuA7)+1EK!8TBbpSr)RV zZ+5z_d-z=c^^=p?Xn+aG@JbAcY@X!|6N+mZ^QM5F&yOFQUC0n8TsN-kz>*b3?b?qO z$^6Fqc z<&2Gk0tfi7Y%cDjZN#44J7){eCR`c=(~#kJ=l;#vFMoA*(A@)NJk4<&#MFHOwxbW} zvqyn|mFL_p3ns^9L(1UREj-bO%)G@x`+6I5>@1cHDN#g$hS77b^^z6jaFJjA=EDHK zhehZr>v5kZxiyDJPqimk>$_Igma-iO(lH$Lj+7Axz#4njJ-2PGY$!frT&PfZv7oZ0 z0z>yJ>6Iyb<$A{U#;s#M4xq5N+Cczp2}xl7yYIX+d;Q>n;d}}>Q7Q~bn~Of%XoDNd zf^0x5Hk>0c$(h!m=SoZgYD$C|`-cwf8*6i+{(+prk!9Vr2JN-(=H$V6ypq!cM8lg4 z7v!9~Z=dl`R2xsbt;`yq7lEc6(4*|+sT93^%YgDdJ+*xSy{&Z^Xs-e;IfjBH4!Q-h z3q~9|^xF9T;WCFg&&qw@ee12pG64jDA^p`jkIF#NJx6BC<`i%a7{}J3pljB2SV{f3 z7==f^I5t4zKlzh?JbU|{uVif9n3Q|hx|{109`=#Too9L zi@bA$1^tvBFNp5WU#ld&=>EElCP6>O1h8G@g#epK{QwSf+UO48)%AjbQ-8HuF-{UAXhcW@q&R2%q zct?AT-q*|hx{U5<0)4#*sd*e^}qHTclNzGRfxbkJ1D&EO@n-q{o> zv>~VP*VD2E?86k?&3S8G=$gTIjl*Am{?~s!`|=xa&;G&pzMJt?1;lm-#XJm*K0Et& zKm18!TV4R{d_7Mm1J`^Qn%X)D+%c+lHun=}=Z$X2Ks;GkbI{-b`|Nk)#_QHVR*HED-MXI)PwuU3|2QZzZS~G01J(u$z$C#%oo}h z=3&mBm_HjuU_rJ%{bRmfA5{fi!*yr}e*(6E29v{J52kwn2=R!q13r|z%fced*)Kp7 zcFp+$CqNINrUwwk{)V>nzt%3E6=YY%FhI}NacM31hON=q*ej~~p0)3j&WZ*G4^8@U zT(dnl7G>TBZEW4fU&G#bm0kJj{1SrMoXyD?jCTOHaA^cTLD(F~Ar zFt%|j>AhFmg>3;tXHTCTGWLyw2S=MKt>IQjbAp|NAG??99oWA=`F^%cz^koUc18Oh z*~IH595ff}`Kqt_`2wEyi!CFobKxR=jqA&v%(0$>?qBYcwMFOl#IB7h027SAhWtc3 z-G^-^gOcpil23J~3L;sqbR$~5nSC&_q1UOY}@s;+cY_Md+u%DZ|$AjqnR{qrb>;g8&&1jmQ zdVcBB2nZVw5OaHdt^r)*MxR`M#tSNdbBXZ{4wT zw({2U*)QM!@$6@h{;F|5OQ-G|hl_3POc5=9=v$e|>o;WA^!;7Cc8%)BbLY=C&N8ag z=gtRFWtKpgZ8`K#hx~nawrR~=ZB#y)T{?Yi_Th)W%bwUd`|9ugVZH!kK%Bo{0rlVf zZ1&*!;@P+U@qZqmuy=N$&#a8jWxS^TXR8X_@)fS0tBqp&w7utd1AlMUp5=bvE`Rp5 zFTXeT<5~XAg5<*DoJJYk`0sM(`dsahzmBN z@S=T2Q*;S=Z!ZP7M-oQ5#^JT-So;%M!W(N>0uXl2&Vm3mhx=iyZ+-udXP3^Elu$** z$pCQrX8)o3(g(t#`PbfjcL2iY&K{qgJ9Q$zO)G?au4r$|j@`p9T+|xS6JBSxYQIF6 zt03W)z*9Xma4?ZBkdFu1c zPo>Am@+v8-2w%Cfq?Y8Sv8d{_KU(b3L$_|6ZoEtK;dVzqRkZfAcv%&C3;E>nNIZw9 zt?Sa|dVBAk{k9e!lDVvlU=leefB~o+*Y0%{RAiVv6@2tV9<6BZ=>KkZlLxHBp7Rs! zI}mi}A?p`E(`}yPUG!=W)1j7vm9>FLW}QBMB;O&~y>nlmD?rlvsbsZ2s&{TAQ&exz zJ^TfHeJ^?ET5itowq61hqv|967?wicmxQ!7KyGshFFx$`hflRp>8!ivt7?~Sy;8f5 zUHe~et@s41;)zq;KYMA&&DNm%drv@4>njW8pNx1HP$e$dqlM=6!05?7r)MVcZ|Hh@ zzI&}@qqjHk235*1g4bVrbGBu}rrKDX2{iv*IxYV(KIM~Kt#7*4!2*u!4jdXuX?oJz zZ~VZ1l08mu!oe;V_}sgvwe0n!^nLcVeFCOx8v+|Mhm+C1)+?@|QUb0X4zt! zk`sJG!H*GGjGqM0CHx62u;sKe#dl~Ie|YVAYb|L&5~^PHs!~*mSU0wXLpRUNcZ2rZ zchMo4W>f7m-|0ye$*fKG2tJl%AP6dm1IOsbecnn~iwT!m)O;V;dgOXO8{du2V^6dY zJ~jTce(MVOsvclV((`-}3Az&)-+E?)qKQ$7Nw2a^Tq3==D1I8}6ZC$YHaa!jP2a$Z zv>?3_)?u8ZDxvuS$k)c}>$3gzALlO}{q*DZ`0kKDXHK7JoP5`Cf@pHc5R3Eb!O$GbWoP?? z8!*-+V+|rX`YyIc*`(9|h9TZZy#4Z}>5rZ?F@KNj^0BznhbFNwF|^AM1NY_Pch_#3 z@!`8YI-6@;jvms}1)b}040YqiI6I!x-C~{0C%ijCIt6gP4t_^6unSfeX zB!2JidM`&e3v!&U1iF1z1jJk}XAtiW;NG`)Uw7f4ao#$(CW2brCf$j#Vw;G5$?VMe z`Ixox!a$MBSM)9|6DC6mAS_a4)d(iagJZj}>~;NoFJH;9t%PlL#>(~ivL%Z?-6&J@ zR3M10c2A%f5KR`f?9ustZYFs46&+GSy?X7+GEd(b0!)8sQ7k1?GFs=)pPy}fym>@A zFIFOcr5-=aihAFzN3blc-5G>SGGZ3Qa2Qs*83Zl5i*fY{Bk&(S0hnjVBusCY#cMxp z&m%;OF8W7Na?&ZRmbwRuY7Q~DJ+!xofPw&OUCR)A)xN{0(F}vaK5<}yNKUy2gg=Wp z35fM&Nyt1rH9tR$-`zU`wpPz|u+-Dpu`_2zG6wkguTv)6q9&%(%G{Xvuh&nWaRWu}mT96R!*z@XR2!UR0y?yPHw< zAfe`3vVs~0}wTXqfS)cSJ18e$i8 zfM4ZZ2^|q;rJ?S@0lJ;BrH8ok=vxVIYkNO~7f-wb8b!Nhplq$0;Z7hp(EYO`$0vWD zsA&z*>h0UgUIW_N_tiOU3=W_Of*1#Or9>!B3V{;lls;|jSIbOtD!2d?@CYAq#s!Z? z)_rSXVf2VMzF|x^rd~e17C}6ORru@_Nr4+?(Sg5Frb8$U}QS z0AAS18n5(uQ=vejwU>K0-u+`C* zxNqV&QBQyaL+;MaD;+GjX7*-A=JPUXFJ_1s&s*=jlXB=Vn=97~jGY;Qt@8on%Ekrq zw(Z<94ChM$yGvFqoqg*YUvEus&5oWpIs3bx{&MinQpQj6N%1mGrgJpn)#!(D z%&907@cdTb9EL4q}mhV!S7 zH;yD?vT<%%{LmVnI5R(JpUhXKv9W;Y_A>FzY3;gA8M6xl%1;lcbfWJvG?Rt&>&as$ zX21O9FUA2kO5cVtRDWiMr?q8xE7w*+tXH|VG3fnL`pzlhvX@;GpZ9Ar5u)|BtN91y zw!Xu-OPb<2>MLF@Qxr1nP{y&WMugqJ#pgPx!JuR{%Ce! z_qN%$zW!dVO|B1F+DB$T`pNqvIQ-)u|7`Y0|KR&05Y9M~S^mopKB|S#)!D%V`|GvN zDSBFv>dMIUpo0c&An)WkqZS>G$+T7jv^9?2=XgJ_J;!Lf)qcnp5!A%vbIXBc%ST{Q zIkI)syK>LYox{o9R5sw{GBwD(?|t{XKHbvCd-Os-__|$a z3}}7Q&2-|}+deqfzWuU^(>}FFuG?oN4Y;<;b6QuIkrFJQ6Y*f8-S!PVO#2D&n{*`G z#@~RkCv)1J=@C2u8?tqVP0*gcs@5{s2?UI{(#Z{a&^3LczohwS0T3L zQ<7?I#tyJ?&0%|5lIGDgdU#?zdi1%mT>Z~H_0Dz8-sp97J!LWVV&8Ag%&+lqdVP0* z+e48|w!rvqmp0b!Huz%fRckDWKtAf5Z=G2{)3J)KHmWw85^8Cw`)EX!EWu3=BSFnWfL z6&IwOsvp=5J_jJQ7b`N5=K`3P^*%6!ll$=TwGmurk2)OjJKz6K^US8`Jr#X)njJ8n z$4e~ZR6fJbjgKuXq%Ihl>X^`vIs!D zcHPnfW!q;*K0h+Phc*RnWjx>OhaNG{Gw05ZfWyrjmuJrd8qAaP|F?hp;{f+nqq0kq z1Nvc;EL|$lAL!M0cUCPW7`CBg3~jGuh&t{8u)#03ltBU9UV0{0s;%Uh?Vr z+Nv=7&Z=y8^AUt{5aqq}IdHLI1qhxPb{8AWI%@S}ocNzpFT=|r$qyf#9yEF{d(d98 z-{!Knua*EI=)hKfRyC?@;Yaa3I~i!YMa!3Lh^E*26r9NJG)LL26Ihnr)|{OCLnqUx zd+Yt|(C1@Ev%8WzKpOplwF(EV&_i6H+ zASM6x(mizWwE+(9FUV=l9;XxaB-UyHcqEZQ59G{;?D2=Q4?hX8&+)%pKU8~h`pnUS zD?4Ww`W#)#j@_LfcIaS8loH zd-JUXiSI`f8Q%7mUY1cxmM=^O4S&A%Aph|=T6Kse zJp{1BbYzCW3f&E)Vk_+2xj(RaLwhwPxvc77y%*^vK@-65Yj3~wvvAD5`uO%w_!iW$81Zzk)QQ*uQAg}Ys=z9h+0`a ztmV*|XhHB`aenWuY>8Fz>5F)01RN5E!0WsD5wf5+m9Z?~ai%?BfAHznRS7BSOpu>_ zc%^fPc2{M%s^I>k0?KTzx#*KDad;}-bn5sQ1JBR`V@K^u)&&%5C&FgmeelipM6%Ys z0%&E7*HK2GR=?y)Y8M!7Ec^Dq(Y2X?olPJikar$s*U^ znO;zltKFD=r#IFnC)o+`0sJ9=05s0Y~- zjJ?dK@iWS~vFV<65!UPc>^#!$9uecQC2%)wZ%d zMJIP_nMpPeIo+O)mUZ!kwjEEk9K$27L7QDHwVpTQUuP0YewlislXvvZ%jAIW&2G9L$W4~;1^Ko%Z@~b^v&Gv#{qsjKsI?@c zB!{32^lp6GGT}QF1Z0K)^TYU+zE=gfG+jrB$=dtH1|=%?P6Nd4SX^5VgO$ zeZ9a@{O&otZxCz$>*_Fmzm2~!|s%+RC5*Q_@UN3m@ z@vq+>3B#`?OYMUyaES+9qIoyA{B{^0jEsJOCkg%@Is8$bJ5FKY=qLO(S-EEI=GLH= z&hhJ#u@;jJWN)=r>fp}C-sVf~w+6$y9=|4|e{5ue2-xNOgf0qUAdE>js^|TTb|prL zV7p%L8bu$)j^pJ)D5G7AH@!Ihug`kn<%@09%WqGwbW?`#=t0`VCga>dx-O@p_ZILB z4X*pXx~l1n?~QRzWA6ihf8Q4ov8U~R#1^4vnEvSF`Pt)pj2&gx9H^I($97eO!*B@; z&N8MOheGtCyBafM0PwEP0oxv^DLV9nZ@e>m^R-=Vlyco1vBq`uuCDFG)vNj^PRw%dTtG_m*T+vbAw%B*JhC*FHrPY^_Kepk0*dg$xl=Ll zf=cP%9PKoA1jOG~M%YLhvDRGlR@T|J0DUE(1AvUUrcy+NJQs21#0TnaFI!`-`5ZfS zDn)*+2y~8DQJuvZ0#`EPZ`4A85qqt2kcYL55%HUQP?qby{V{^lwEF%A)H8T7$Gw!m zBvfL~80bdKfgwb#o;+PxrsJ9c=3mLFx&LU%>;{9KgU_OA4uqTEG6TG34+aJG+*_*y zp}eMKi~D^fK|H1x%~FMBsq{l6%}0g7 zvi8lxy(msuGC(&y<2bR(zyRL*%j$8Ab{M>r0NU|A*Oj5~`y*4JYZA8FN)U9EowCbI zIS(S7oElQWij3Aa&0$1{n-5yiCSW*`c%vz%Mp}@RX->h0GDkQP+Ex(4YwxcqgXz)$ z+027Mw5AN9N43o1x}Z&c5z*(m91Diz0Kele#wG#D!0-?a=G?sEeMS{70XlAr4)5q& zUki$B9T-YrGrSms_bLY`Kq+c{wy$>`p#JWFqMh~N)cRm;(Oa|y3=~BaQ9srGFmlij z0BSX5U6zENcof~^3^e;;jBCs4pDVM0LuS1JQ-Hg|Q)3@w=bsMiud z#_{TwqRbSCetR-JeBZ%d_QtxAYnKX~*g!(xz5=-fet!MohqJ3GY(VzLjT;6Je^lm> zQaJB#E9!je)VVUk?lhLp4_I}eK5K_se+Rv6tjzM#$QHO(rqH%B$=61oAAj`GY(;nb z{vZ8bPTS=%p5Xszu&m>h#)1y-y4F3^Hvj-Y07*naRDAAiRRn*RGk+o4&&f&-$>>l4 zu)WM7*%d>^WF$Sx*}yBZ^EvJ^?*xqI`Vs6IZJ@gDqYMY_Z2&*AQkPY?YAE6RhK zcx5&MvR^;AZ}#!WA150e{8nGkWU%r;fsq$EIgg%H?g=D`r_Y|7?{0zM&1;m=N5HZJ z(EF3xc3ru0ZG2BVr;P#odZ{|p##s}bm*0KxyJ64)z@NP|K+gx-Gct@(OBHJ?%L6Tt z4dkhL`B`3p94{-h5RL{tIK$A+V$pQTz@$>+9n+GGZ{9n55BBcf z`S-kguem?0x18Spm*Wcuzr-Tro*WV&KbgZBmHlI{IOJs3*S`9-0V@2Y%;<(b?1bU8 zv_`TeQtD%0sAz2)y3F`KxlN`I+1R=>rnTX)&OSTJ^~mgTQI|>gCW}2chhYY|Cb3uG zMK;s?rN{C0xg4^+qjCiR#)i>u=D_|v9cF!a0MnbJbA}`b?c|yBW4(U$+fT~U)`msk zE+;4(U||>Mgfq~)W9vl<8+XHK051w$HN8=`=+&mtwI&_0Xdzpoxjaa&=Gc%?1B@d4 z`kOqqw&%~Cp1u9XYon?QUF&=L+G~eKM#?9ZrJgud#Zmf_ti+poBI{9j^mw3CG_fiN zls#e{If;y9V+mp$aV+utVswu|}=q zf2e?HZL_Ba{p4VEgYMJw_!Fq_s$xRD5Kng4I{nZ+`^eaT`c~k=QV%dRo?o@H!^N7f zHCekR@TP$Q9<(mGUZ7Qv=;Oz$Kza&bOb+C<2$;R-^DgZEoXfSzC;S538RKuyWlwG? zz$UBkYybtjX}s4Q$QibwvlTdoY|-gGv?t>$*_~VyupahLV;-Pu^roC!O9MTd0beT2 zZr`~+`@s)>kdCT~thp=U)#8c{9_=@}ig^Pe)|cJHQ3u|-=jk(N3zTk{tqF)ecI;?@ zzym`@t{tFfv{t3Zkz>aL=T0`xDez(~IrG=ko0|i@R%XA;{*sj>yO<+Nudx@`MmG+d zyxD71X#IdC$v)>YA^Rhs<= z(g1?-N>hmjr~Mvh@U(_#LnR)s3DMy<064nDAHxQV=Ek9n1v}PrY}*fZhOx499dwCT z7qx%v4nY-+hxZ(kHY)#;vE=%~MY7wL6ntA55OpX!3*i1@_WsY`pKM36qrC=NH`3@S zps78vUicdrplaAP$R*&c`+0rX&dmvkhhJ7FgW1hw!)(EneUERnQ8Pa@Dj7v(ogj_- zp{tQK+23T#^IEK-Th}AI&(5EYVkSR|Tx0Xdc9WscZjljb;v-nVPG~yg42XWyug=<7 z%wBE109@AfUP&r^DPW^2fzxMB&Hm_*{y3ZQ)~M3k+kvI)3_mt~YL}Tz$i)0`3l>7kDOf%ifPBWD%}R z_oH2QGFi5zjC?i@{-pC%EbJ`+aKEvC@!|Wk?PcUE|1bNgH7G$P+f5avIh^?XclBBS zxOFetr}bL9adTP2`$iSQ8@p=fSi;vv?O3wWuN4FaDBHIe?UOaZ%jg09qHD4Uy=udF zx{7wY;lja(0;DRm*~S7^bk(5EfJ?zI7{KG~ zUI%7A=$eo6Gqu;0H9BNM`zdh22OEL?Xi1;)aUgPwK+XVR?Cbhya`can9>@o_>sn_H zWgp8_Mf2;@8*CbSg}*U;h4{Fe!E%LtEw?zF5CM-`g000wNFFmr-$-2_=18$ z*2K9cdcKc@K(enr1K6)hXA2H)NiR$Ia=si|MT6{EGDfn_ld{iOg>kUy*TX52KfZ@% zmv?=3%!BSB00=ZT*3hT+7c0>f(b8Ct5)y&@*^uuP%#rQyoG84gBHI~a+UdA1|HoRe zPst65o9=((nwBf|sBg^t{#W!SF^n$KW`~agz#XfAT6CEsBGU@0%n|!J%$ll_%#u6-}mHfm7Wy?#}V8^3-{oy^V z>HXG^4Jc@b?u|i`7@a_Wz=jf+J+eu&b)Q#xH4+-*3v|7?YGsu$i~8 zzy_`TR`cS+-br?|xyd$(lh?u;@da905#MhJ4+>%{FtoU!&6(K;zx!oZS};5C`a2`o zXa6Ne@R0@kuA%v^iALxUKEjmn5SAKEty>@7nScB^JZsU4Y?~RH}Y`S>=?-d{H$Ey=Hblow=UKzyE|9r#~zuPp% z(g4* zj5t@VT3TzBT`A0nvRWxwGe#>5n<2V-HK%7`2K$aO@m5rld3<*8%`XoCEe;-KImUSQ zT&F_EWH+wY)P7?pzz0^`54ad*f*Dc^Tcb^z)+C@DOqB3RNL?)=_&DLbIKsYIw0&*N zg_s$FoS+f$ZkhmI2kXgdT%H49JV0jzeY^4@<$j|1*DCecw{!bY-uLcTCX?Z5O#obI zX-me}qh1>sF9`*05wuNkVA_E_yJjcPo*P6*=n%ey4#$SDqwsfiNYC@KZyB|IWO9G& zEARFhPVcGNqpKogoHqYWh7$*67(VDL+8DIe|7E4^+_87;jZ9}{(lYo)^sBiMUgqcf z7}hn0@zuR#2Wa(hBZcwV@uLaJ?%BgrrXiEGQi0wP);qAn8C%!SY1|^o?n7cCr=)0{zbSmaLoa@XAQP&s(dm~)1Kqu zfB1)gJo`WXum3lL!yels#gaK0k6~WCoyp^zMMe zs4wH8ESJ_vDcia-di9pmQiXiL8vga{y3^&TINOqO&t+Wp4*o_D>eap52u> zMz>e129VvXd|jj$kOqV>|HyPG;~Rm{4uX9YPjfD{0ofJ5ZfX27;Ix7I=(oR!*PqqT z^34q2YqP_jej44M3zRLO)b%(F4D*8r4&>lhb{#N#vwfP+3EH=3U-t@(P5z-RN}aQ8 z4roNH1cnxc`|L@-DdH4?9@k1uIq$NU@F#;0ST?eo`#vHkPprAj(lvqV+Be~6%8Nq| z=sZ^sYMFxTWvXW!j}qwSHXQV1fxTq#a-caax6625o8f~W-hKC-*@+Vc1sj9w0-o^y z!bSDpizb!tfA-nu18}<w2X#G>LG4Qh|!*ICKx}bdyFIrWme(j5+gTB|d&sz$30&-T} zE{mf3apduqEcG=7an>}j^Eo~%;~CWzR|-^FvAw%?C8t&m2StztGsqCTQ6>n-KXM&% zVs%C=gY@<-Ad!kIeFOC>+?$bpbyOd5KrR<_{`iwma{4;VIRn(^0O9yquyJZxM0Al| zFJ)3|25KF0Jtq{M7zg<~fS=^c*xz0o`rl3`+gr2&{Gc5$A%|$J|4Z59z&-RL%a!ia zenwDNrs}n8&*Ie%MNQWH>bD=yUN2ka#F^9Stn&jP!+Qes_VvMyoZ56K>DdK`GvMBx zgEbyQ#}2xlXo~zpy8}RP&qg_=F*O#gM9hy9#i$;0wEO0oE&$DsF?lr0WCk$r;aFwFpnvlSWOY*1?fTm~|bG0MhYJx1WDeSFs1+m&ez%8XUD z;eOExm8Q**=VZR%^*EF`-N_-aPn@83EZrkf<3Bi*_H#`K5}`wt2kSSM5#05*Y}{V8 zLHfJv+DAX`KkVB6W^-)axMg-3|Mm_?akQ$+zTs3UFFvsUz-Uh*`0cPW2Z-W%8Hoe* z?|Kcnpz6fCN~{GG%+Xe_$sS%;wW4zZmKUI)TLgNPQ(vit!{*J|m=AByzIX{F-^w1D z+I%UOFNoQ-*p&9sx=hy%Buh3qoY#3i^qpYnPk;H-g6?}luCM2C{U&g`wjy2caRD_! zLS^z=ayV-Pov;blWfO5&$*aM~4O-i-Ma^}2Yy2>WvWbt(+EIDe7}l=YJ=WwO{qa8v ztbH{5U;g+1wFO{Ht{8H11S5vLDvPf`z_lA?Nn8KqmvZyXfoaYb0R$sIvUBV}f6t{; z#@@G=WT|~vnqB&+>OFvCEIw!rSmVH20eKE29X&ws_&qCr$Qvyh>=Rv$!PwwFx3QK0 z!v<^b$TopIt@gh9^>2<=ee-8e&)$3Q8^g94!Ow2`ploSlMLPpjYSUFdp%<-h*eiZ{;33jk)*^(QbW z8>7$0$OCjamy^9C@OJ<`1+fSJMt|h0H3aCAKU(3DGiZNg!}fi)8;dd)@AKA6yM<(l zmU68NJAGZ*r~q3DB|ugEySMB}x5g_Q)7887znRb0xg!N^@i4Fg5OuN4XnK_{G?o)3 zW2_G#c<;+!8|{KV{^*yTL=-e?%C|n zTi@uqWkYV-HQTs-=KzaVRTl5SwkPS8wd-mn)LPnOtu_Gu4jLq<)&>Z&MIHpWNgi4m zcs}_YwHb{+w%0~;S^RtWlaHhA^#Ki4Z%SfnOoAy(>j}&jIR3>4*@+)C=asX!zx3_o z(_HOTULU3I0Q`p$z+CrSo4u;%HUGjsJnry0fE_!H9u$b>qZ$LcOsEc9*za$D`+Mvo|m_dY-DOtPfCVNc4`9DStw9+^9mq57Q6N}2rp!1DkNI0w8L2Y%?#qqSj!lv4> zIM~wjacFeoR{<+1@t{BpyI@5&pe#`!_V(?`sQ6<9*6F3zymp9zv3TloG>G5O8-GIblij*qnlJS_blM(KtS_qGSvp%T_MSrj@KlH~UOWJOXY* z!9AN=ExpnYT5SwKu5Xe7YubZzCy%BN1v*p-MA$Z%t^|XOH8j3XrbiS!O;R1^>ZYY;)^L_sf#s5by8> z@FYI~@Z2r?Y`Dn!`r!{hJ-&Gm4ue)wRzoNC_w+DmI(n*_;a}3nbe)#K>;`_lb(Lhm zUPNzWvi*ewJy%gcX3L64?}9Xb>o_4XIAYE(3P;;g@^3P1QTu$XpzF~uKFx+)HaqzG z+Xa$#rOS6Ghe~KIfadS3@fn=OdM;x-wB{ZHOc!fmq*~;1XI_x29`@>9cnn^jD`A+d zSM`L3=<|CeXS71o@W+C9GEHGDkm;GE_Xf!9J)C8hennX0NjEY<|5}EL8GCQM+h^f* z9@EucerH_EixI^bFvHe!pX%{yHG6H0p%*zg-KHTJkJtOxbN7w{`#|^Z0Vwp+czC~S z8t}NTzkPqaKYd8A#lQOW%mfH_-(?*RFdVytC_=Je10ZRD4D(e9dSK1CU>6$BM9(?+ zK;{jV3cmfuo-#}(a8y}4LyjQUS7==z9S2R{zBk`II6y9jz?BZWSYBxp<6P^3bM>)3 zcJfpkyu^78DqIPcw>{SAmVBu7_&(==PzE>}%8#e(?ftZc% zZW-PV*L)NwUx{(o2g-fpy)RcLwWGEVha;p8YzypB;wdX!G{?OzUG4PZzO$l;*QWZC zKZ%hRuCCX2nKN3`?5St#XGcyn_6~nZ0e^9#O!@1VXSXBL_kYbeU6p}Q@3a(vv27`f zQY4X5!BFeECSfpt?rZ|}>4;$d*OBV(*> z<~U^+^;(KMrsE{Pq|otAuTxS;VgMOkOT_S|D1Ji2JqT;SA9`F@Fkwx5cmC|fXm#Pp zcqTA8iLT`!3sFh}81w5-%o%0C?ATsAjVsZp@wC@koNUUFy`I8Dj90E)E}K|ZRUheI zep3K_c|?T;cqTBZxtf2%kD%hbpm*zLOxD744f0~vyAIU3DqYZ=tmH5F5og9T$&tfKPMh=y8P@A02R0lK{3eegRV)je6wroa%Syl(5N%R6Xr(a2cDt?cC`xGzwhAK3wx@)3gZM{`#c3FDB=JY zikoAqU-ja}8GI?g`E%zh_nl4}*Yld9QcisL-ubcSt1^P{^jzTTnbT)-)NhuN@wCrW z3fS5mK71r4e50PPZv~ECpZ)H4pUf`QK7_IL^I!aY$Ps-VR|g`KiBF8rze(6a!Wp3e+jPP2f3OoH>tU|H6pzzmGt+jorsCB?t2HKH_) zpR;D)87-_&?2y_wWsf_gDi4~%n1&nF?2m-RP@M*zaM?rGnwmW=jWrh z+3agy|N5YtFTOZ3_=xORxno@zjUy9dTr0r3>;pjtz|(Y2fb1XD2M+?M_Qo4!b0=HZ zl?m+tRdZgGVR`uTBV!6*Dd@N;NA{zt4;&;qO8HvPs~KL%KYEy$3wXZA&flWGLUzV#v(WOAguVl}ci zzDy>Qe=^$`j6<%G)6qX;kzA%L8N@Qx$KFP7WQg)sdpP7o`)tjgcFhq8Z{58|-pEiO zJC~=k=$RG9-8E&sEUjc#nLfSon~y$f{&!~YzWL_tgC_@LlGO~{rL7y|9zDpi<@hkthV#=`N2OE()A(sOpZ(<1O7C+( zRDtc2<3D;Dfd}h}7n+N)Pk=jMnxm3k)jst9A$!NZ^jUg% znqzv+eC>btG!}a6#iMB*R+T|(Pwkg>X+tMl)7Am)Sm!bB^Z=uOb=5)+l+<&R9`qS` zG%k#GJjh+Zk22ie8S`+q%F+yNht&>yqP4Jwo6CxRyI`P$f*uHfeg1jhXlz{yAPGn^ z4|uR1bB9pZ((f+2ufI)n~h+uPgD! z)oj??wNk=+QO(v_EYahp zO=Vi9hYptQG_qe$9Ic;bnSbp8UCYL^h-fDY8+$qAL9em??Cs8-wK{z1JOYnojp?JS z{S6uiQh1gRS+W<{$!{>EB0B;*s{0eKii1E&$HHkm-Cadj^f6 z?wP}itkc?~4Z(3`+t!|*w-?62878NE-UB_+i|jLhqp<}qfu|OcnrWWR zwQE{;cKFk5E|q#-_r3EM&W!6@SLN*VTDKbaSoT~ynR*-dnzeR*l7J<^8$U_XaDX0p zVZ&t1KU*ysW5qZ~+g#YRu6v+t@?(cT?$T2*;J_R2beQAGF%Nbhz;V@zfNXYf>-Qqr ztU_S^-08CCPE<`%Rbuy_%bx%JfBL7hAN=G0Z1yNR`=9@Z|GxF=;KAr@e}`c@baX|a z_};g^JaipiP_fKke7LZl(Ze<-Tj<3AsH&3|2JA{S0fycwK#pFw?%X-819_Kx)NbR- z{HfXLFFp^n0iHh$_`8_hePPH48G6p4xqIu%?6VG>+`ec3IEZrfx`4rT0{3&Xv!{;N zg5!;m-7ZjQ{qH10iXgTQL;IBEa&*uq+LWp7K@Z_?vPs6X%7K6VXa8qwG_{wbOYFbT z$$Dj@*dKtLASC^>F7O#$;Va2Of}L#DG0FDZwFF)x_V_}Tc0rQ92iRL1of>Pm82m?% zt_--5j3r^}`lAc8h1qTb1P`vCoBixZ|0X+V)$Ci}`QyQpU43@C3V;I#3luaS!1L9M z^V!(7)&xYib`G$Vh(-RWie0yU!|>&e(b;69Eol5Dk?Tpktmm$|($)7%EaDsOtU?Cw z%96y#`c(sR1si=&dxiz{R=`}risu`FLl5R|@VKal4(hP+fw zJ9JE2K{jfaA<1HS05Tg_k{=61RqoB}7e;~>|8eoke2Vs+j+G!~9qj37#}sX$Pr>tH z+cfs;SM&M$?64z!j-T7yC6obTM=*5QYuTvjH1^Juf;a$Zt(O|Y?0N}JT0@}8C)cNJ zc5+KKke~xw+I-0Xw6rQPRRDqQz?U8gi_KRH9c}ScBRJS{-@(`Vu1dhN{9B(p*&+ac zK?`#2Q3+%YxwQ{^!B0NwBrCO5(>B7ITy1~t!2pV*^g-j1Jh>XfbiEZ-4D6Y`RR;OC zZ1+2zaiS^=pQ%(AZ1+)eb^pFY$#Q;pRoT%gc_O)QRAOhd4Zu9PH3dsw%&wQ zmB|hPlCNrg&7PD*lEcFn!pBz)h_!7uE_FmeJGUufx{_wbAXETk9(r zrX3kuR%P=@#%WIt9!7>1{0L_G&festvG8lzM0i`!b8qc}Hm1`gs!g(?gvjM3p!Ho3 z_PC@RL4oU6^Rx41pFT~dbd9I=>Gu#U!z&FUyr460l0&k|B^a(>Uj}n)Ouh_XI9d_R zn_%r^OY6;Ud!DVx)*!^a8t0IM?Je3CD4MF% z3z|i@@Cka9TVV@4I+jY^f3tYQ! zrX=4z0|QV+h;OXLEy;J;EdpwD$wl_o!gvo~lPOEHlN=UnRaKj4!%P-f=7;U;IKwg9 zmo657V2A#?#D)v!Pxrlo&B@(O$t<)!4oOc&bftVc>r{29An*9w{z*oDp6r)6nH`tx z`Sjxn*J87i3C87I3FkIRejQkMW4RySu^U)KXvsas!d36o-;_r$^d6<|4HNRG&)R@N z2wwH}`0bO-yM6}(ag+zqcCA-gU*js{+I`K6^caA4pB>kJb-nRcpLY3i!>;ANj!3^96izdT6Q#wKFc}L3ZlBI*$;m2s~J3%pv4$6A+-m1m;jN1V+@G-X>CI4HT=c#ai}4McHv58HFr})f#pvEKoE_xpZT*X`9?M}mAVH5nFE7CWEEb# za${tW0Fb`)_FIiKhRdl@^64~pppVEKUl)=>VJBfI4#Qi=k|_Vjw(6L?21ChmvRp%2gpEj3@)riceUqrq1X_wL8{wE-b$fVTjtxkv3;3W7l?(_>_&H-}N4 z*?5Kn8*SZ6K%PB!W?aa7Z@qS~{V!vv>`_Mh`T+O0-#8S=o8x3WjsHqU=YtFtd-4B& zP}p6le+-8}1o!tH;8dRm3<%M&(Fl%PE0p1aUGMioVYwnhsK#qV}99K(N>M@7-G_hr_n| zPBgS{|GwGJ-v32PErr$oQo62Ty@AP;n!_Lf*8(J>+gGonn^w4aZ7nPQaRA2S0ssp- zrCfi=Pro?Qy~^HfZiS$SfY1Ri(xX4kCwjElyW5=%Udoh6=-A5ndwTv8&&82!w zlb{xh0^gjYx~(6IpYl+)MOj>J4_*|+e)i(gIFM6$wl*G&t2@!ph@`j9fW5ST4p}En zx5of;<>&%U6v@*{L;s7qEadUPYM$0*uiG}7k zM9XqA9|uMNeHflHx*yzMm@`#BaxyL+6L8bwXY*!GY}w&^IvlLvX~Q1|(og>SC!_zO zwFktbt5c^sEGMJyMFFIJWwUIGZf?|K;`{DV2WOS_Cw1xa_&_yYhfu< zcXK2dag5Y88Fkm|uSzC3bVwG@+BF*oKWwPPRWRp8G|vH8w2=NwFE>cQymKM^{WRKo zWn(abR^%w|*uFP?70{8q)$-<6wEn0TO?`3D(#S|O+33bK*dfN*D?Q3ijJN*H5C3xL z7gYuhr~a@1%YQL@_e)=${q>K2Sk=JM+4q0%+aud!&z>F0?@Pmo_if+^pjagkd3-EX{ihoo#wwnC4t9-Gji zM%^>lZ-YmcSm5mVjJHC|cV&xacLZ)tS++pF_^-7!B;z4Nfu4HevYP+| zi z{EHG9YOj{gxzl>9m^?9mW&mGj&!3&WQ!q^C({DfgWU3m;)({YKJ-`zifj$6SK%>9F zXTT3F6Sd@#+#(ZIX1TK#)^%w8v6EMZ{Q)SYpUEH<0aq@bnf>|~e=~s8ZS~G<2m{!a zv5ppR-j;3EoZ>YFF;HaRzvr+{}nLN)@?=re(3ZQIOwlw+q09A-Ix_Ecb?AbZYj zbVc{v3{WF;tW%d6`pWhAE8}@IVM965f->v~vJR+3_XD8?iv@hpuPiIA+vd;C_cdkK z*T=a#fFeT6-f-g45ZVF`8H?-Sd;baYv;C|qTTJkmeIm>5l?+AQBUz-eNv;4cssf6| zhfOuLA7lS?5OuWV;Bu-O#4A6bEO$7>m{{Bw~KitlKc5O0`Z4sQ7%~z|n ze2H}ht#1Y@J5&yB;R(8cFNpUx2Ur82KltDmV;=-K@t z_A*UqO?CoaB+??-z|`!Y=xUZ5>tMXw`6_7VVS(c_M~}=lM85|E)E-u;dG7Ry`cWS% z!D{bVr;(vu+oF#@_`7sM{?3=aRa=+WM&{}I4I5`4cVOt2?YpuAYundept*M zQN6cp(ev5gzW-O*Vh?6-9QxAmfv;RRGyCGmhvR-ryJrG@*!%d4j5j9!B>iMj^jg1q z?Q+?(o01h}M)!07-mR`x#&9ij25_Iv$-Wo-y-;@Kxq>LEp^@b-Xw3H}UkPCPf-eKC zdy*~Amg6_bNWC10YK{4-_>T`@e%6N_&0b)S7Pe_3C8TT2k8^M zO9uhx@I3j%4g(tN)vtPLc|HvP4-M)O%#I?*VGqL}?A~|+?W6nKcdkX-dk1gho8|E| za%B^s3;rv+Z%ILKz0~tlRe(uj0qDz&#e@78 z^uy+Ru%H8o@p9Lei9Y;^fWQUG8`lz)*tu)p0LzEmjrRe{Ekf7G7V0w#(jx%9LDR{j z73*r<)x2+aeQOA_U~9vR?zT_H3p7?G>D%@x2m}qI`TuE)oBzBGcWrLxydj<2BF3EP)j;(8= z5qQRmD$3Z?g6)87g3y}s_4w7!}S!&fBdm!~7yJ7`oeX2LC|gcLMn&VyF+ zbLjCK*RN!sln9kH8NsGxy#R&^GA-B8yzx3SPdlQyc-!Mvfb^{@T^#DVxP&+Ko1H9xax{q|-vaOZzeA&|1@td;|iJWL~QSt_UMV|NJ5oow@?#vj& z`t?-_h6CIV!1sCG7qh6q)BPyJX3( zfez3z+4S(9?hPf##(NJ+s*xy)w)hyX)qg|_!PLfh&P!_o6OsATb1(FY-Srsn_q>|` zb*Im|MsFo3#)^-xdC%v10}=PV@m{}N!)xP@{vJ>N{(s(k_2WYQ?MgnEa&zDDy#E{h z_`hH7-DBLx-^M+D`t=gj9{2CHGY=Rp0udPgV^!7_34kJA#k|8*HkjGTO zEBzN=%x44!lmhThoH;kL$G30aJX$_%z_>Y87wZeOYvnz~gUi=0rl31$At7S> z2(niX0@EBB?2IF8oWvO0NNA1>HtR-F_nXs)#yG6ki-e0}#Ui8R{PdZrEr~h!mwPzq z55rS5?u(x6`I-PlhT_8nrLsM(4k(}8i{VHJ3NV6zRpzt2_YYJG|1gD2d5&?~-xQ7Z zAfk=y0x#BQbSit&g67JtoYls#E`i?$&(73?c;B8~365p;@%wZv0g4#Ff6q9(oF&SQ zQ-F^eddjNxh(3V9=qz~<(Z}#PI|M+N7)IQZvL)YmZC`-cnS|Qngxbzt2SVk{C#(QQ zqO%NAJu9{Map0l*Uy2SGgy`(#$rHoyc@?Q2QSsJY_9dlEAaQ1Zxd-;|%Ry_OyEY?& zK}8V*7alx*Suz^qI1Ms5EAKhB=^(;^90`8SpQn%^kzR{%RSr z9CpTs4B8W?PS@(=!cYizQ;Pa!TNei0kP%&YWHEM6hk>1Me)}@Q4jz1`9dWqog%&Y` zBOn&NIQV3_xkaxsO_rn_Zv-I9d|Mq)?Ffu*x7*($sgbWXC*~pGz!_xRT)PqInNhPQ z;clJG&3KlS&1!u2Zk5Fl;NxJuhmCn;(Klv*vWIanqVFceUku~2s*VJ?jBdP%fAp4q zzD%%&F$OFvVeb|G4q-xdb=$s#kR-WrN{|A7Mq6wkyTr^nu@&e^cJEQt6SZvZbT6M%&8 z;tN2^LYbc#a`sd?E+>6gvVcP~`o#^yan?aV+lPE!m$SJ4(87LR&v_R$&j5d@ckQMz zM$nha1ZC>@1#O^((bM&kSa=$=HmELCNqqe(ZRWd0;>WV`G#dCLng| z#K}PHN_AbYzyjd@P8AP&?|tpP_bLhBk(0h@7_UG1$xjDuS|o=y$_{oAv#b^CAb3mm zt&QKh@$6wVdHwR048=f|+gEFea5nj#VHfbGDgm$}lk43t#jDLppb!%*Sm^wW>p3D- zlq5$O9%z7mBFE4@!Xod0Z&w2hH*MN6TN-UJ1R0uwTr%Pq>WtBe%iGw#ORn@+*KGnl z?3)9u=?-$5(Re8%?5kg?r~jI{*=rr#xwUn0;N>6m-uu7)WEi41Gm71r~^;rQ@du$!;EuF)dG#)&LC?`D#n3JJZUn)Bw8CxrajzH9;IwrbS6tlrQxHB)<>81>fmPF00Ct zZI&#m>=G>w!*Dnp&UDXoLN{{GILJBs`*}9N^6mz3&faUU^@M-?{ljBxWd0b>!-AFe zohJMkKmhOFSTr>N@_#N^+=3)Qahc4;9_lMKR*5gzHT^|0gaHXes#Q){a|&|EFN&YV2jwXccgk7CnW6!LL?>MhAxd9T^ z2%Q;=>(Lh(G<0mw^O@kxvjQp>mc34%(hY(l?6()$6yASXdx|AFG~H?J*O*WDY7vJY zAj^DvcJ1|mn*m~WKeRQHP1?MhIh+E&fXsK^-ama)fWw@~%@KQvEEbM+l+}|>Vnp%9 z)c_i0)*MW7OiRCO^YKP{cCN}qV8P1-`o(Oc>#|#;FNrIo+Tt~R0;mXfz^qK@h0W(? z<9zjso?RTkcq>r31ISL+9;}~b3E{sxWiiiZ&kLFXJnxjnX(4S((;mPRIWn@|qo;3= zAI|pQRnWp26_seG?~VO(wyWe2?dMd}pa#`=OA8FKx6fZVHyw?R$l<-a_YAsIcJHw3 zXU|lj;Td$A{K&3YzI+B~Y}-~qJsNj7GkV?s)&Wd12{7KywzcOnH?>Mzk*?a5UI9Sp zuMN1n_9C5_1L^DwLA9mXv<|3cBjQIHrR2G}+PjyH&w=smEP5E<(3!x&A(y*QHX#52 z*w_qf^LBoK&v;22H-U8J${bkhX*|{wJ)t>%!LsG?``9nQ$jpx+FJ|^0eGtvLz6p}2 zF9ge)HyP&8`iK31PXx>bqwN8?w`o&)tF;)(FwJqaSqNyt;{ZGJe)TNcX*6sD&tW%d zyP?{1J^=JhwE!EHEHiF%a~;0&$7{&!>;i%QL-{zWByU})ZB*msXKhb^ko|07<=>Ys zlnH+FNQc6mOy5-Dm=iwhS6nNA^dlJ|!1PCDhOf-l`s$Z|HCh^?U%GmAdY|l-J^jOf z@-L?C8&*&M&wu#01+K1E8MR@0_v4RB2$&lHgtgnAZLuafrtRZUqk$(j`f+~9s@3g% zOo{*4GN#0*Bu9XbGx;At4t(%s^N)V?N8fv(zUuXiWS5EU%aK z=>Ycn#pd%`?;n2r!)fE)=I`S5`7Qnk?9mZ)4Z5T2WqZ%sZVECMh-wYkKM0fu zC@`OdOb*Wajg#N#?0%Kldv@%K1^}ahs>!dBY>_P4vAqO@_6uDI@@oBYTd_#Zr?7!Lz})5 zF9OWh)+<|ZfnO=OBpD45HMd80x)w))kjYH`kxbp4Wm@u^*_=y~djRGoPXs(>!Tr$+ zD0#6u8Ug~!faOQAUCsS+mQb-Y36;7__hvIzQFlJ44&KIMas zAVx}8yM(pvH#uz|$rAcTf{l!8wgEt$oiQ>?+jF+;%K$i8se)cB(x=ZGAKL`s3Utdl zl|3$TVZO1#K43NImyY9ykR@3SgYK3lpH}5lu8wxJoRKN|H2Q|S;6s2b|3hLA8d#8y ze4Gr1l{^gh;McwAz7J~Cbv*j#_xiJEZ}K@}(ymbEI=f9`1ndGIKY#Y1_BACkcJJo{ zsPA8P3?Y+j<4edK_lt8j1ogJp34mO@(>`di^DG(_a0ie(0CY+A@1|s}G2MP#%fyUy zc+~mxXU2OfhuG-+ZI$bSCksp9l%?HI)1t<@b#C`a6nSHt(KQ>g(*>0z+#NrDeAr+* z3*0RLCs+#P#hG*g>26QyA^~uIJ$;52*ypMR0MDb!k2qTi#>Jcll$2l#sz&mT|s_1)fJ7wCZCnpE#=8^*Y@KcbZpxapcc9z1Uk z(1$>gG3XMoAe!aNqZzb7mfvpOuhyzq+avz8_pVl9FN^%Cc}OZ)xUIFGRb|q%FY_(Q zQpqpmC%bASJ}m~+vWWunDcxSaw{+>OiVKDEHQBqy%`WG^4L>0|VfVt+(7p;v=Mw?^Wo z`8xcOS%5s9-vjLfet#UD)5(5+628_&@xQh`j{@!Y$Fu84fQC;?hm%V)Ec9i5Q9+N! zr1dXdDZtpSkD!2ao18OnND@mUajUo{MJ>BkHJ8%KkwS{dH*^bW9D4UKJ$!O zmig@N7{TUsi}7q%&AvVR8^rEuV_a{>&_!>uTxS#Ljq%=_XU*>M=I=2RT*gCj+lYcw zf9Cv+>Ce9^Qa7Wmh@|YQ=JBkEoyArD{xHQY3w5>{F;D$nukW*ecV%mwfoa(| zaO-i#_hT87WjE?;t|S!W$VeW}eBbBDAoAF|e*>8xRNC|6LC;(}t&PY&`}}Z>T{Jj? zCyZpV%l;XqauIBYl22E2AkLn@l<^z3G~B=PwRJR-{jfrwodlH|ygV*e3J=9U7?w_nQ-^0x>sCZ|~og zpjkD_Jx`v!GJW%H**j&r{iE-FoRW3$gD6gSk-@0ZG9QMH%yQ zlrW0dD38w&o|V1=68h8rP%P-yRs#F|OF$4L3GN@$S5zfO9cSDQ8kGTK*l8K ze)3T*X3kD02$AkR0NKVVvus5FUd^(_*KO#4!S?%VM!7!M`X67ew8t1XXFx3{C>sZ3 zi7|<%l-|usMhse1zS8JT8#*m|ZX75&e**y9!5tZ5>ocU#bbivvl9M^LEihM)X$FIy z)=E+4=PBY@#+`cs{-0m3ol1K@mys{BzI)fs`kmGL?c&AhT*{$k8YRaMq9aC?)7LpV z0E}$`*|$>e6zg0EH@+1xD??UE+5Dq=X*K8TjdKRV10HA5=S|%o=t)TrMUY~(cSD%> z-kl6YtyTb#qOuf)vb62nH&q(dIBQ{ZixHE<$Ec+gcNf^X+x{3Y#q_unuVrNuO&njc z2guq(QC{C!Fu>zoolw7j@7vSeckYe-!JB|7N*$OWOI=Hp7cXXx{>@vJ6D4@R{N`|L zsRylnPpD^1k!pes2M-(=Elp0JJsT)+X4)#?Q2t%E`5)9I(mCRp}Tw`bevNtof+MD_Lo5{mf!$2ARpc|)bEde0P z)nPyaEi<0Z;OZU%NC1d?^|W^=FWRHnw8FSHe`D5*J4c$)1jL!YQubqOsnl(K+1QK_ zv^6S8x=-XV*26UdLb8|igy+m_m&1`_u)Iu;lT{qG867O^{l?q92CsJ?8Qgew*RF#Z zuNijH!YCOFOhDhVO15vU6-m$Pjre!dbfqkPU=62cNozrp`HvGJqlZBvn23&-Ex9&r zuI0wc0s(?m=gwV-=4%fTc(pPodh{;rIZxU{z@$0qRj7}hY}YpdMA=T8%L-9hv8wFH zZ3Qb%pFS0@?;bp+U+>0b-~b1kvoS1s(mIs&JzJ}Mh%PgNZALxz3$-g{+=1yGkE15^i4coraM$ibXPVAA!n?&#bt9bToZ zlYt5tA!lT&wiK;j{rS_cfAHV^)8>19`t_$jZ!Ot+d9X)?)~*Q zUrj&!-S1}17O-mm`goEf4#EcP>yh`jpMEi_Sm?zIwOPXNHlLw}CVIq7hV^8CDIR20 zvuOZGfLpCyRH^`F*O%FZwpMHm;Z3Fs6mH5$HwH9C{tEcXlmG+(H&>U1IE?tR8=Ma! z!{f6re$y%QGT)XabLWO_p`X2TA7sh_v^LdR1pR6GX1?yvFtslIIt?563;U7wFI*!16;FC(~M5Y2Ku`E zz1DGNb1}_41AqhYqc=FAoO1G5*}pLlpuRoAPxsP$g42KrcBX6FkAz)8GC2 zY9IB6@ASOq&BwLF5$u_IugZc15OI+31a@t=nq#^p`DJg&1_9}*zM|v%(Qr%Nl5r`# zlG({x4v&MG@jc+vd>=eGn>`lj*8Q!KuU)-zdCm;-sh22WWaJK14JVlm2d{H$?jupFr zJh;?Z3tIVcd^v)9%5q&1NWN&{)z(^Do>`CaQK^t~ca`nVOx8+U3jhs2$GjcJHf))GzV`Uq%y)QM%a0sj z)ekoU$Hrdb*_=NI76Pv66YXH01{8O<_JX`_zoR9#$K!(YWEMUGx^Nsh?C1pGA-JGr z&5Xw5f$mS%3ydTT8=H#SSx@*ywR5pf?lI18YVYwndO*KgU63E1&2C4V_CUrynL@96 z0Rx1~1TtT}TpbYJA*caKckfjq-~0gsO6lp#%a^mQk}=D&i2zCH1b+b^*&U2C^JA}& zDUy|xhTA{>1>1v+G#}3eT9es}9^>}*IlBZ9hhdFfPz_()uVusO5(q9`tT%0IIQC)3>iL(NAY-)yi%_L@!Grd(VvPqv_|Zq2Af#?8dDESNErLCk{`ij(t7tJMd1v z?b7LT0Qs{=0an>;`^uhP(U}J_=-G0S#gMOOs?gfeddMR5>Ji=h!F2u7`60Ijk?vLA z|Jg78Y9xyQPWgi_;qfN{Y73Jc zm!kzVxwAwGHi~CEd~xT_z3q1(Z+z_h2q4VM_S^Dng>vz7hnSWbe)iPK#;|1Cf8gCg z*Yp-}9PQCp&+ZmjNFM@2N43s|4hanm+BUaq(3aLR+K31uIIwYhKt8>;Jbti`0_&2; zHf*d9ezxa=h4-_YceF3bpvLzkusx<6e$VD;T!jTby?v{y)xYVWx1Z4_8ds2L_gNl;fz2`&!s!>CHEYh3*g3M%cI3d$qDw-vOagd51VcM`prXb z=p+6jf9g&?srEBhE}tzqBKe*D@7V}f5(LU{o(+Bm6ka%Ytn1SoC%zs2w)Qz&+Y2D7 zp8w>n>UqA2%w!=yfjT6%Y*qM9V`Gfcd&BqZdh(UuMy|_db=a$Oityyafz`L?h<+_qzP=hVEF{tdY8zW96CuH9Ls@8wGggq}-QvqdB{32aDMS-LdeE85ta zPeH!`pwq5HXKt(I9N9LX-OfI7Ff+V`K2q%~A#2!9VNuuWaSD^;lirLMm*tDJfUR+d zgm2iCF5WhLIUN-|XJfQ{{Z@OO?y--+$p+Y_SJjf-bhs((MGLE2oprFN)@%Bov#Vfd zXhe`_MG0qGG`p`C+mGENP-V|x3FMf69rS4;^aY=qub~=yTRsK(LB^~PuQHa=g#lQb z+!JI#%g>hPpKVAlq^A%4!AH{#x}7fhaYUwn?O4_c-r3bRRjb^GGU+5%eXl0*}v93{V2IEep(eT@Q>gy zdDDoam#}obx$vp;-CHurS6w;iD&ihxYoU9 z*EKU&^x5lw$NR5;wjr;7lm(w1Ivbn28*5*i{q6hX7dG+qmtUQpu5}>S(g>s#?t_`Z zU=wBnLg~LYCNh-Dyoj(6rA;1=Ue|BUp;=W=EE(wI#N)1|tT814Bin;8m$mXVkXuA^ zQN}4ziI|>uVAcIVx1|x8V1f*GbAIsfr_=BM;0Hsw-Y;8gX9kyu&6148R}rM;pkS0g zKWKi-GJbaLnhUhA+_r;hZs+`1Lja8V5OUWl$rtf_`>h=r_-j(28EfHsAHV-j#^I%m zr;9O72QD_g#f!_BSYFh+?>fNYPLWtl3Eag9k2A()M#?mmy-m5Y&vKoI+#R0h8?s!^31SJE!jI{I#K^C?$gI&7b_T@wxpYg!G8gvm0{()kKho(a7*` zEP8ogxt6h!fB4%UzTH8LC&%84)M41eM~+WNPM#TpNw3@em7jgoC62k%X9A3Qk8XvZIbpeuoy4x;_=!}lw7I$tl~Z->Cr=h=Qfs;z?kCD;fxbfd5S zjrrTtuYU8{bn^V^>AN3&XS7aPn{jb)|Go^|JMlsNHCrz&0MPf4Ck(L0jOS#BSWo;x z(cI3sd9cL+MDcxw4`8T?<*2S7bhc&l#(0H;Uf=x;XLNk>^vQ0_IjrX`!@hk3Mi6h8 zbJ!W{LvHpyBV|;-WC&0IS8GQxdXzUvMyv60pgC=_24z)jUcYtF*(gPhUpWKzV(~(P z-~UP5#)(#~i;N9mrNguE4P}4)$dRV8pe(-wBV&q;{N*ox(Oxc}e)_YY4kPW*q4!ei zt7c$tjtH7q8PMZ-9Od-^RtFBgGZc(0H!WcRb1PD)da?qk8GA2Z0whw@^$@2^(5?0z z6fXl{D8JT?^12bQY<-FOQQC*+S~mxS-AXjAvA zjBjhqDe(+4NjA~l)*jH#utSprkfWEHzqKN9M*xw-taXL;QmsK& zD9fe#$v`x&WPWw)CS&o_U;idUZ_V`aC*MhyH~>rLgBA}ftA^Op*%R-My&BGWb2JP< z3%w`PbY1uIo?g{!%c5A{@4x!xr$y2`II|$_xwG}6ZXM}c#*0G+Z_U@vDm`?oDiFW~ zLyvqp^3Bn*TlO}$M%$W@Z7bT(n>WiiX_4Q3|4?+jZWvwHE}l(yEuY@~=>67&VX~~! z@txBb-Fsm)BqJQ)Z>|7&IzsRjPjV_B2Jqqqyg8~D7$_;$arj^vQw&uHw?2!Hzxw*~ zK+U_+(xHL|+p9`Q2Q%Q}aQ3|ZM3 z1+uMv`jgf)-+|zcSO&O04nX`!ByeKK<7}|8LXxf9rc)pV64% zjJ_O-B&%vm^tC>N6rsvC8oHFWq9|w4*gV%LV0lINu5oZGpoUW|;{(~2T-T=EYV)Us6V3Xa~ zYY%hk?A1ZP`29`+^HZl!#IFSjTh~RR3cW^~m-dD8p_j0^cs)D7NBclc6&$i* z))r7%&=Tk$i4V!0Md=5??$A@+Ysk`Q%fB3PpcFa#pz*pN9*r8ZCj-&a1u|dmrz7si zZ+KJS#(|hiV>o@{wMGzN7WhGD+A6V_76fk6lh)S2huzrk%A!YLv3<55_;v7l^Rj0E z0!BHyC8xVp*PCNtY+x|3@c8K`ovm`TYZi2{V_A>wtyX&uD^J3XHAYK7y+zGcmX`Uy zszsl1EX^?@#2DUp?-Xqs9UAov<3R*UIwplkAz&C!G+44CaJqHNw^ZUtt z@_5!lD?STwvyU7g0dn^ttH!=Gu36ve4yWC@YdV?3EU{um!JWCeUDNRkSCVy==*D|7 za^JCUwK)NbWN-pJWn2!vPxd*KQ7`i6&&rzYd0P1RjtUlh`7GIs4(KMWgkJJb`q|u_ zm*5HXHhY6!!1FS@fZX?o6C5A)y&VCYWcfSUGwdKB4I4sGkb*fEKnge}r`bOaLPLog zH*Cp4ol7t7DBH90;bcipSZlR-RBs+Xc_JsHy-RKlkUKps$wc78`3Mp;RQm1cyMUF; z^?p6schCmEg5Cl?Nq~8jO@N2Z)x79bK_L#Iy=MbSn9;68wWMq-fut<2_9<{629aqd zh*gBD`_K9aKkm84$4&vp1GkapIA0){z@EoDXbEqRf6__M+L(Zt zfbx+QXGfA(==4QF1au&W(pUvP~L zc|X1AVDB}}VMU<8;edwq$?^5sL#je0-fZ8nXFC4n-whe~XaDb?Mm1GTMJN1Mauz@1 zjYrAI6R!bao0q~XlLMGt5b}vE>qg_gZ|<%7 zZw#vEm@9dHx0WZevNvzpG5BA0t>7oHlr6rs)`{$8_7Y&z86PSxZ`HyC?b2`dZppH> z(|aF(f7qC#KX2n5S-Y>+)(`w*X{sAr=gYoj{oS^TFWcD9!VwBt7b9vYC{JQ5L-8+;D7;_7JqP8s3L z78VM;AA8* z^gUna=B<8j-N`H0L2BQUZu`n;8Ytp(Bg`1e_0?$Y!GJnd%5Eqt^sD&blOHXuFdhT6#7O(t$l2J)5g zm}ORdAdnk%M*FLV?T=0b1?i*VAGXA*tnVgQVI&*Ymo&7pfMj4cT2r0aa+lP^7iqZJ zne4#D3-R2{*Gv4h$KhP{K$pZvrm%mV=fPHx3_)k`!xI_P-h=OFR}_RU04IxiOXL2R z|MFi=fAS~)KK&Cwoh~uI4PgWPa;?xP6u~VrYQEqX|Cz5y-@vN?#ds0{3y6_#Y)W#2 zoD&e|m%|T{7N&8;`wrJ_O8YF`KSEQ}HYbi0ud; z5`eva{dnI?H#RO+dICzYkI}X?X;{Io~bP-`;!zYhF1T69J>sR+_) zg$3OP%5-?;#P0 zA0UN{id8E*r99%eaVKLrM)pErcM~>S6Hs3tIXN8$(SEs&QNVU5Bi=ta_lkIii}6?LMS^1$D^2(5n&z}_FkD5Cr@9u;bK#y%futGpEa3yBwe<+o!U(fF1qPZe*xjEi*!<0$IjblTB?b zBKe#k&*!Xh09O=1;(#zb(Bpy^0ju?qLpKiLS`)aU!r@Ut4)T`)G6SIkqT|tKHJmQ{ zDVS)?D++S$+_SGT@gvjGJ^qJmyoP*ne4$Ay47Cd^|xX~pbSbGqe+m{=I z>_IYnd7uGW129vtDmVmyl>Etx1Q^I_14_TFLWsT~8i2hjDIT{5PpzJEEQLqGYuz2aOL zTfYv2Q5hM4T!!7(UmqU)=RLinIeFJIjyUmn{K?|vP37nfDH$0}Bsc5>{a|fx1&Hfc zPM*sywzvlm9;i)3>)5q)+Jofoog9v9zq&eo`0*#>8Kdm1=UGRJbV7A zOqU$M3}4koDgmCvw}K{n-r6_h(4~uK({E+)m$`HM*0t#`{^qZHKhP;&23mZ6_&3vr zbL&S-3I-FyN_(%3ITJeqkiJQ-T)1$q%*^ctN_LIPB-sbD3C~@;9RHp%KiZU>S=7Q07hW% ztitDZAo!WGXt%}}i$*|0#;Su}e-po=T>%}Ah3qsSf>uzE%Zeta&t15jfmjdTWaIDu z!5>U}-@UqNVdevUc3`sOJ+Z&7lh)%gQHXu^~U@{dT z@J63<0?5HZxcxA6YO}VoocF;)eNXwO3OUbkEeFu)@|>E;c6zx|22qt+l{f-DVyxEH zw#Z1Iff};)@Lle2nRxcLy>BlKz;$C72GbXTwxcR({N7uxZ##T)KI_34V-9rXDo6+-VoR{%~voo~hqO0B1q0i{< z{G|&88?IL+RhIJjP7l476+S7F0WN^hk90IsqZ@!GVu5aM=;B+FOpgY$tp24|~~~(T&>g+=w3mSTegf zKBI?idIjyg3d9_sO7!r?zT_j|LF0SXUa5Ez9KefO6Ox5mVc67=~nLc=L-?a5afuUs8f}XoL8AG1Q)?*WHSidCzCcWFUx@mJ-Fap6^ zGRQn4BeIl+yp-j{Ngo;G(GuPh*y_Q9J^^e}M|s8V9z&j4nb=EJ^38;#@XOJ|5& z8FM@n&$5ev$@8_0VMm}>!E}GfB;XQ|>R@I00)i_7qp-w7@;AMNKVHUDqpeKWvfZse z$MjD6k3-8zB-aIKfxhR?of~opPra$$@;o|S)|Oe2Eh8|{zS6aCY!Iy+*W}*_hO>*G z_H0QK16ZyHJ*QK$2-^)`_+HC9*3;ereI1O-J|5L=xdd!Bd_~`*3)Qb=2|jum585~M zG2U-<7#Yn!YaiMCl6H)_C2rjTo&m$z%7Ap&v8&LEao|}k-++niA6b<2-h6g}ia)*u zTZ{Y$q5+E~N7!3)(Z&XC@*xC(0ClP;Iou9AR855!1k7jpS>??0)+eB^?_9iiuEQwT z=5$}4e)v1znU>`v3Cb%a-%)iV)?()kKZe|Dyw}SRRlM)mE$t&Grdu1)KIxkE1nA~tT^5`E1o8rY2d zFD?G~N)jdLd-@vhlAD9Rdj8P6)+UOW(RLi~IbFeTlR-+J?cTG0(AF&2#zwEzU|EOT zcJ3b63i!wjeH4JXZKrm7wSnCG?)1TTemt#dZ2QUxN6$|lYU@#8A|77V0zIr{hG(HA zvI38I?LYW#HplG(Xl3&T1fDu|bh;c+Wo(jQWUFotoHdSB?FAWr4K0?j&1M)G)yYP* zqym=CXLH~=I%!FB1|H2{zdD|`rA%>r4lE{*`NVkFm_`Cv>$gRKH-5mQc;9}p`MYT7 zefrK^@Su!m*`2_7I)=Rf#I?plUM#8_{y85Z66v#-Rcq3z0wV$dbn5ct58U8Rhfk7! zcmeQ!_Uy@e{~xX&|0iP%%S(K@eCgcK$9x${J>4<=j#u*|qklTWbNo&|0r6$9>w~Y) zKAm$n06&r_;p9pg?e%$2-`nGaL;ugtRoK111fc-h`KyUTxSP#=V*s8&YJtp0kCU&pyim=s0YIHyz9SGD z*1@kpqmoG$b=Wf?)fr}|PXy3cIk_vmV0qV>BRqlKrfU7h%`k-Q1G-3y3w!?JPC<#V zq8$ZsBvjIiu6OTS(fqyEd}b#&GlU%-?V+CrfEOZwu&>LliGW@5s}G!i;Jm%(1qklk z)4n$RC^@sRL@at@O%)xIDbU;Eo|DyCRgS$+9;QF+$D6%I1Hg24hvz!1)ffaq$qDui z8dQD6uEJmNo_lxCcc1Ii$zw-4v+INDM?d<*>979s&xU=wvTL&w=W%pec=dtL&b^r(QIa|zgHUi{sW?Z0XLsIWNv-vuk;u_U35A%zL-yTmmLrdZ;OpA?9|BirDJ~KTw5+YhJ zc7%HZ$mxzXRr;?Cf6)q;>>AYdHf#`Z|C$lwk%AemQoLtR&9xt! z$M}oEJYM62*DK?m{W#ivbm0Krdnh$!p)7*$6T$?l_q-NJXBv<0FDv7s*@ffZalf8F zM%?>|+;hgYUG9G4^4^~@p?i!QH%VUfDNYJpg(=@hJbt2_Yrh zj5-R_{Y1O;O-Bz5%91O}CaP4iYwzXY&sEN|zUcP6@1`Jb6tM;N6DYDVWC}m)K&Evu z{>aJ-z>*pMq%kq>5InHFXPcsg_8LQJ1vN;zyVT6jBfqqyA2c)C$bhHDAJ zs~H@!`jnz=PWgb`{i*;>C`s|UP; z|8~sniXI<~%6>;Nia!zhEH^R!nAV8uGf#3|P z^XJbcs1s@lEVSI)ak1O2jPxO48W7O+`W3H08*yUWNwHb7jN zx9iq)dU^M5ky?u!sC(fMC zXg`;6ZH)jbZFq!1$uLr0HV$B}>^$YB-}~+FPe1+nPnz446i~fPUzRnKa&gFvQZ8$J z^oUA`4%xKN-ItRsYs@$~I@&PFy5Ah#aJJS5a&l(xMduGPjscv6?yCN9#Litf8y!_9 znlNSzj_ln<%lnS3LfK9^6A|jI?t3#Qdd-?Wt--mV#q)25ITool5uFp zo~-TJ6u1mupa2YpY-{tD4P}zXC-EYn z$le1NZxzhBo71=?aMrbeKa!2{ynEMNb8j5IZ!Gwbe$qN?`{FWL3J#FzPs`GNV+7$n z@>2^Z*~p%;wjhaqf@I{VC~1%N7CnCCtLgJEzL>Ue+EfNFW4Y4PGgUCvkBzaMRPB}V z6wi=3zCRqP=F6E_SZTfcGM-h;Y%5bvhQ*rIWyNw#b zQh6<)6z~W%HMee-vDD2P3;si+D$n)>gwa`eV;F~Byn$Y5yq+z9iYFWZY(SI0O6c%E zl{;@G3)g2mpfxfUt=>!5+|P~>+yNFbYy_VK#>foccfCHMO{4Xj0ZlUNueD~^Z*)la z!#;1#Wfc29z%}61thfHgIIQkBcmSv2lQ98h8Q3sygzJs=N1e4L6y@w7L zXG0|iUiQ$Z@!YU2+f&XC**CzQ=yTRfu@ckgwPJAJoDZO!^|vqV!-m~wIdP5D`{VQE zx6gqooEJRLPP5K4UIZ-8g7)}}JWDZ+a_2GM)}0LluwK%*?8E)^=dLPX4!ylU{U7Z% z|NHT`EYHh;vYblm$x#?%=>GP|e7uZ-e3`#_eeeVQOID8Tv&QzkAnJo0$EO|Ex`O;E zdve{H!Pw?Lwx%n`3n4GV~=Wu=Erth zQr0a#5ZICU!9-h|eaKG}Fus>;WG4U(01R5Guz`Tx&QS4PFS1CW(7)^v_PXBM=u5WU zq9yC1xdMU1IhpW%SMh zQRq!+`-0Ij>|Fbbhv+0AfOYdbULy|%ji8ly&5JEb?!Ag8wMi3T74SE9e9uSHGDHxV zO-k<3Dd3Kgl^)+Y6mw~ThR?n_5@5BydAyu9u%eXL|hZkF#_3jX9bV0K|OJ z+0q610RfFq;~nkrRBr%|8^{Q10Gb~cDAQVDeFEFh=u=aenuU@Gg(wTx$ z@!XPHXvR0^Pamt-_NVne|6YBSH&0)F`Zwvb4b$ZdXR_VvS)5MX{r3COpG?KFVgt_c zm${rfb8^~H*5cxP6#7Ph7#}+nS8F@HkFTchH}2Roeek{SPoI=!_%yKc-~P}4)7Te& z9-0(npjWgP812|z2Z4Ya#;>Pwo7-B>g8z5`?ek&i3Ym>~bp%zjbF4+P?tR&vermts zA$#vSK+f_4Fu=^Kt%=G}dV#;|pf?FvH_HUyUOOiTCa)>UYI8Q!rfs{Y_rLSI(>ovj za9Uk5(V7Ab%#PbP(YK9oUC2H4M6*#Ug8$9w7tosY?P zAd7}y*qHo$DSO|(9SFF@yHU^Bd!Ny@Px4Xkv<4EW1eqkoIX6eJP>LO?A}*vAb!z&ZOO>C zwPjx0lY1q+kUtV0n)HxUlAqRAv3cXhwE=z#*t8Wb%}oEGz3uzno<9EYyVL!WU*vmcN@We%VFrC9NJ+0ty_UXLhn+$*V*q`n;D90$e?J;X-_~ ze|qb!L*ZGst3cS9PrZ4p2V0G=t);C1$MR@P4|#SWT3*zc%nR+mD5;8F+eX;_LC3HK zIM9OR{upy6p&=Iz>-ebnna{)@96|B+ z_Oe!=&BxhG{6pF80DZ|%Nx~7VRYBk|T*+50bn+t`F&Wm!RMg-P1E-(-;fpM)<&tD% za}ZRf*XbX+eE{ua?qqFz-aZ?n>sh{=N{C~DT{?fVwU7{03)K8p_NF!Pav$eFpcQ^Q zzLvn@-_`Be!^S1RLgusU*`O*YM;bNbbw)t9<^qeR{vFpUo?ub$NToo%Ils! z+!D}uXecn{Dyvs5o4(0;yBO&8?ty|C8BP0k?J28zN5JgKA-EmnusUOHa{^3;F@d*p zN5EY|#=htm$%$Inn9pB4Ki)IC&kr9NOwF*TR5%XjFP=+KC#)MZh6gYLoblP+l#(g# z-LrRO+gb+>ygfSb_JNcU1(IW2IWXbH!SqL)6(Aq*?NNfnfsP!yO$qpGDRX5|j1)p< z%f5s@=eX}bh)>Y}^61=QS`-4hx^w6Bh$24fetJIZm3%gXch9c5Q9^d`z`@}h-YA2f zAVyQyG87z8vS)7R^zApt2KY~4?yNu7s+?Fo+DF-4{O5hfqN)m+Vw5|BmVwG4ryMvJ zYwBOhsr>rL;m#K*vd+M3{?^4_D<>Zm0`(y@8jF@mfBDm&bYSO!v9_Y(uD=c(?3sv8 ztDwb|FHunFP4G#X)21@itlQ@LKQ3!87X>&0n*^hjWM3+v#d%uR+A)B-?R4jP(ddlL zZ3|@l?H#nOlEl7IBzm2D{{8Mt4%-9rarfN5_VsZFU(s}++7n<>txW=E&>N#>K5*6E zkArdoXx`qFLss_buTPvFYlgR!>H!_z+P`n~s8`0k`qrwE6{No(V9FkGcrTP$bzuL2 z(F6IL!(WdQrupkjDj&bl+%7e$Y5K!I{G$}_OkM!w&=kX11}Olnn+-X{DH61~Rd8(d z9)Egb%uQP$0IA-4YoaBfIC*vS81TnoaJLFdZEj!d^BrwCU~OsOHf5=#N(6o5rj5~b zt$5lW8D$hFd4{&1CEp_1oSY5Ap|kDQXq0lbM{9c~2l8byiLAL(z)SGNcb^B?jlTOC zVRv2?@QK%klhnM%*#xa4-UJAd?R4Xlf-XyQpad}1mA!pGSz(Mzmj(KF0|wH$(`Tj= zC(n&2zwAv$hkzKv3K+?-5x67W=$aQR1I42c1CTK%-F@{F{HB6Y$w|?C3Y61;(}x z2feXnT{E&gc#eF6e-rg@NNfp>HkDivWeitzXR9 z+Qn{+^>C(Or0;hL5!klLSfwn)usTPlUF?ee@l*vNbUIWPM z$$FcrIAiEK=LJx^K-p|G2s9b=*SvP^KA5pvhH$n|>>lVanCie5aW4*m~HMDj4Cj_z896DZJfiQX}|kw>y|Dt*XAWN+q}s<^ecM}fGYrw-egJY z7k@XM>wD~`RQHe#lcAg3qd_zlwAzpU>vwB8bY|B90q8{Q=nC)SF}m%3{rvIB zuL73nUov#ute#VV5=mB!eSClxCtfTW~rwPtG4C+<@0ovjyqj#YB@`ATo zGzmrmSlIRK1^|K#UbMa{XK*-@(SZFF*e8R$C*Wxh$Zgr-04-Cu4=R7i0eVP91RXvA zo@fMJs(b;|$qeI|-sObmEDrFk`>2GvasATJJNtL-D&e6(Zu(Q7%=v=U0%Za&&&mX| z&YXUMcdcdISB6_^XUOqEcRd4NxxYaU9vp+6wd*tIQt;#R?CJJm>`iNrmfq}B-x@xI zJ@tKdj9Yj;O4@VU7ZW zXtbLSpKI_J9!N%P*tjB}>q6JkpYVb#Z!+;L<$Zx&(VcE)h#R_8}cs&WR86C_XU}*PY2)oIB<6Bbo8s= zWUppB>^~U5a&P*b@BN@G@!!r4Uz6=86L_{?yY_b$Prb3ps15lcbdK?%llx^llcV^` z?@!y8{U4+c+uzQ4o4!1&%5m0Wkxgrl-(;2IPqO8}fw$YY%Y&v=myAS=*)EUgkgM$D zkr|jR!(JG2r}qgTa?5@EdDYJVRQ|ba-8Ze9w(s0MKu&g#;Es$@XFdRowK>?lV^?c3 zH_nsM28hl6_Mwlntz>Q9pH@F#Un`x@(^<~Aac0hr;ZNa1l>xF?FJ3&Geb6DzJNFFV z0&TM=#yK3xHyOJCX0mBiId|>4EoG1f%xm*^Kfgs*xomy?$bp*rEenpyMjk>hs4(8Kj{>a%se&o0 zR|j}TF42w22LU=d7VpdQrc2qov=iSEud$t%rrWh^@J+z^y?jvn&E6TZtZT>(G`zCb zHJ^O^yF<=M7NMU2_r`=r$#`odXffK-q%&3IsH}x4$QT|vt#v2Y7Csj2>rlopo2B_O z#wRNr-Jl0;@aW1OFkQcVzQmrg^-Ff)Ym9adt?QHQiwF5!0-0><5$sFf_zs(20AgeQ zqF{sdkQ9k_1UX(rFZ|By^MU2fAN>d_j{VQBJ9ql{;L~Nvn{_2wr~=!u=dFC@J@HA& zH6>BmzaBeuELrcqvj7X=HJ#gMsv^n6_1V+>i;dlj4png?k&^FV-`HA!a+uWh%T;Q$ z{|x!XIr}DHJiVR)hAC1h?rCKG{d{dJ-WOAYvZMxCQik)I@CmX^wb!}Rr?OkF*H&xG;0rWwZ3T7KCPTc2Olb~+ zBC$>5tx}|)haT_u8K331HTQ)JtLE7hU|f~13-x4@Pc#Fhy7`&G&8i7758)$As-*y&txl<`1hMMtrjy8Mi*3_IF(X!=}v}r;pw{G<|#QXvX}#5ro*arNis`&ZfY`t+j2~Re#dI zdicxcH7m{8Q&yG|w3FvAlr6t&jL%v)@JIH(=K*RDz4La=x~J93u%?^y3Wwoa88|(Qq7iLut(V9rDDE*}X z6Dzm5V2Gdqj{ zja_M8jKJ@F_(2Z!lw)g&24t=Slt_JqJ4 z(M__UwT=klX@5_(3OY8F3~-?^X!|mAI7UR>rOC8KDdAN`lsC^QIn-V!WzcnAz_oJh zE5(zM^k`&hbJWHmpYc`09EQ5X4P}#UtB>f$l&6C=MgNbN{lNhS-o6U>c32MZc1Zx= z&bfjZ$(xJq-_?||ERJU>nW4}l^p^#_>=0#O9C#Bw?C)^hYXv6A-L;&gW{NH&nGqOS zTN?ns|K9h1G#xtd)^zyW!zqI@KiiL6f%vvV?fa-eh%S_(92Grx@tur!zYo%G1kAN#j%Zrj+dq!}13|{=sN#q9^H} z|Ha=7zGD=~_WQ-Je$&{>T8{tRbL;jhY%;8_2aq(dXrnBY9CU`k^)mhis5lc(*i@mWC|1U9BX86rB!z($6&awQCvb?a7iwdi`bKrI%l^ zR$2?C#K2(pcgnqeXY6_Y!tA{vBf6s($7M;t8CyWfvkX828x47$?1~m`^tyc2!FG4< z$lwNkz0PoohrMQ5*vkEs7FIUfDBYV;$O)Zg1pwdM=E z2L0024i9&kKibiuqtTzg-_fWHjsZe9M&CC+3&h!B(>%YIJk7|JZN`aQn~qk=hBiEd zd}VvkVY8mf4Dh1WT-%WW|VTnU#Um`=g zY3l$akAgWNU2AUR`qrJpIM%E=p?PJ#^MUBs%6uGletSs%ef`as{TuL?URqKxSg?hz zzf-9;`^EYTqIe0ku-SZ%y@F1F80;+L;7GC8INm_hzA?Z~`ctbxAmqBWm2)q!e|k?A zSj)aaqnAr4u>Uh4N}Ib5r4smP4{jx=P#jr89($2R^sqV7?e^deKXZH?Fz2xGdj(MG zHMY5oHO>?o(`R^8C)iqipFF*pJYwhLFMBco@$oQ86=OHXzWCm%^t;w6FS4=lP*WZ< zRd)Q^Dkrv;;KKGc#>Me3xopg`FR#=lA}W}6J8(I>{A!g_XnJ}3Io~-d&$E@rV+W~r zV{05h{U&P>Zv!mIJ$qyShTLzT-}G6x{_ArFRb`E~I_=qTSfhK+Gk)L*yXTNo$XERDdId%F}_Tj8r z!ukT*(GY!04zqdKFL-y@wB4(rj%TzngRjwmJshnYdk!$u^Hli)u9j?Ak{+)buj_j| z8CmvO|N0&};j^&2M}V`RRVi?kWs^N#EJNSE=Xf_)I?F+jWISGTKh=Yq^H1>00P>QIO3>Q@@Ebco`|W^e z?HKh+xXfKzv|Eo7_t;)D+!suuH<_!acC#Lb=0G!*~RVDH)>#Uml+06Qh17()4 z3NUF5WF%)_A8PxfLI?jWNH#1;W?ee|b@s~H_OlZA^wX^lnLql~X9dtdpZ@tD|Ep=w z!S`yva5VdG+f3HAAi&5~8}%06GQd?mh~0B}YeGljfn5RaL)Mb3edk6&u(Lq3v!Wj4bJr5Eb(P_+H3hpS`GXq~scIikbaSt1WxGY0|Ja-sm+ z-u>^5!w!G*>CdL^RRsVNt@HC&kIEFj*zdK1DmXwUJ#SslWMj?k+%?*(98U&s-?=xM zj}M}MZA%=$dhp$kYG+eoP4vK)60FkRMRJf96YMFpEc;!n6}I?H-U~F;nkhM|wG4Y` zWQ{jZfqXus7KK_WsFG}YV;=Z^V{=;9y0c?_Cs9~{CZN7bCu_q#({r9KqYKzVbR7Ar zN{)>FeC<+Z^IK6{#XURsrDy7k9r(PqL_`Oqve%80oTbxNWsj2+^p)}4s#PU9$5)C$ zhm2YsRwK{>=a7MJPR<2U@x1S?cSdlJF9qyNDE5r1#gkc*(wxPheK8-%8nXfPn7@7F zdVbcussZYKpPX|*__}y)OFXlG&wH zYS`-R)V@u4qD5!WcppYJGUicEKTC3mT*jPaNVDhgJ$Xuo;DdH}+?!ptBt2^FvLafe z7tHM5zrf&JH7sw^TmS>5crnY~fqBf6epk6N!yon=%)=85C&%}mC@gCs$X^@(om zA&lWwx`yA}8$&NW%|GIk--?c5KazuIS@p@uWCz+c7nrE`*%vUmg~`;Ft2XwhM2glK zCc=KE6EmZF<%h*bAAIy%y_4URoH2HE<+IsYf&G+?C7~_?=>M7BP2W3PfPD;LUmcF> zGi_A88v3wF*b(@}-0(J8G;E94Yj!{VA@Cv?`ux6N`MTzq{AgX+a&E-7g|E;X=DH&} zH(Ju>BM8E>%>*I^Lg-}n@{oy*QLv3ouHX8YBy)TGjKbQKVSX=2dGw}Dq-+ef4|>ac zV?F{o-2l1vMwk0%eAIVbKZ^+VenMt^zd^k*?q}58{XDa84JW`L#xr`y_xq$Dd}_sA z%sQAYx=(%PBLDhofBMB=v%kkB{oZ)q+(&ZMm#42yx3lLLE{SL&vWCY~sc&3H+~WEk zCj<$^wGqkcgyON&=LR9WFA!Xt3*$uuV{}n6Vfy@83U+Z>CJ~uU*|lfiXn6rdUlg%8 zZ0^|6Z#(h4NOeXXp(A1kyxqBL&rl3DbwnRiRyYK>smJ4R#*Uw81hPMtc4*N4ghW}M z0cnUu#-ODD8g2^cWw@`1(41C2dMu_mfg)?xCV0B{vH>8a*dxwcEzt}Zn3ZV~QjA~V z7bo5R&dtUA8H;*eiC#Tx!7pFCJ{{P*ErH$vdS(3GxxalnTDH6Xo~u(dGU`U}q|d*K z=u*}(24iq_#`E`n`-24jv$C--j@BiVh3FzBrwsO0Og#Yb_GFOAAW&q$G;Qx1(})zd zhA8kw-x;*f&x~6D%S;g?7a2gcb!ol4WY>wPKDY`S-v2btCEE zyc8*#-RcLta;qsgI67=o=M1BM)02s~(g zp4Vo;yZ~Z&n9(i^zab-xj;VE8=E(jd6$wmKN;e(k{72u?E9k1 znbzi1aS{cTv=AaTIJys;$K}?Ak+mU5C>;*>{Q0am+Lf!#$znZJ_Y zPd+#l|8JXq_IJOW&YV3{mB6leOuGw!!wjI=nw$pE9Z$yNQ-PG7Wgtb$gI*Xx4B)}D z{VXepQd&1MLZav9m4KNZMY|6(rnYRZZ{fB1kr{WXHeRlEcEsvHsGt7qSJUZe2rd6# z|M#Cwzw^oa<8aZhj-Hqfw7+Qc)r(3udk!51u%%bfJY$~x!lTMn887-DZppzE;PLsj z)2E2p_AMYf`8pQ7->of~G%~vyFQ=Ac#E4Bu_Y482!0y0-0U%{pujrsF<5(TQuUwqt zz*t0Uo0O+kd9bk00XuIr!vKwX9&p6EkpEuvgcccu*#Nj(&xbAW!un-KCXEd^Gy}Fq zrbFKy4j|Gp-o|JB7i3_$2ZspFxqp|8)@=@K4k~{gRA;=Z?y8;D!`d=!T3-qKx#|D> zKmK$&UMnLcD^SC@(|Q7c^dwn3?56Y`5aq`|{^9g?2V(!`*99_;Ro=gG)~{Auiks;j z`#|pC*;}o}#s?eAn%FvYn-WSo3N3L8Ih5LxnEUed!VKssi?`|r&L{8>ue~VR;GW^@ z{f>_~kWKle%v@_hCkO2Mxi`kz4CAYHw$yC2e(2+&^T-A*K5i#dwBtGW*1m$2XPRHV zE#qMZGyTPJTUwipMS`QRX~OMB&)8_23>kabvjAo?o7PuNWD~3d`Onr{m($Atevq>v zgZP#V&t#$NJj1}~he&aZk!~^vdJeqI?s)Qa1_;lv%dEWin9U(OYC+dOYaLPC;NSG~ z)9gR?ASaYFv@k~iQOM*}%`h`gIoEjJn)UF8f(O{t)@m$n;~uR8gIFmpM?6s}i`)u|OXT|9&H&-?Y*-hacdhnjrpb-~ zEUc}sK6_Ud_IU@=_e}Q5s>*AX2jlr&WpRFc?3=O1)nA|@#$hb>dIUM+Tk9$a=TIk3wIqUVIn8Wa4!Z2J zXD{vy-M(@~fK}j%xvnH5n>l_v>k_PadEgO=A@<#TDq$0GkV#SiGS#~6MYWG%~ zy|d>p4SmR71>&PYoA2Mj*J!%?4YnBXchjN}=*)dpB^W1tgszSE*)tWAS}(i?^V(zf zGoWz{HTf?_?eaH}5imL({BtS?$;MCs_ z&~omM$rU!|jqBG&<+K2? z>_q*3Wyg|jE7#PXvLvES5=FXRpZte6fro&qg1>+aK4(6OKJxU%8ycMPYW5qjr2W@6 z4+zQU27V0Cwe=CGebc9!o@e)y1HC(~kEeHJYe}$>kZ~isb6r`$4%l5=1(e?Z5|r3M zi<6YO7196?0(9*|d1ku0^|KCxf*EVvpf#emJ_kTKhnH9ss8=GCPG0hc6#J zsuH(7+q!dCp9_{z8|F2uI@H2d@89S8v34C9%(k#^5?o~7(;4KcY7w$Va7$uBkI2vM z9ITyl)0xvplL2MTx2NX&xDUy6mGyVJhk(KG2m8+2eC`{!@*UbAe$kb(qxoRlckC$v z@jyJ0Z4iwB!?l(fiBEj)0A;N>v`QJtH<{S?qur;qBWhaNos!1NOfT50O^2YYHc(`! z=kw$5^Iu!{&0z*)GI_&(#k1z~FuVP7>$#<1sg`Gg(`2G7SoAc`B#AyZ)mn!xq5}nU zwgiBaWfH^Cf0`iO%@37yn6t%7pml);Z)%x?DF^-@1U_X<`^d%Ni_kQbLjGe_Rh6rrwRnmU$j_2vnn&P zq2GsIYF#nU@slT_=b7Gknh)eN-x=rc^s_x$70*ikJ8|-8ykC$y9x`6G0OFyKN3uol zqopC|q78aXf`NMif!Re*dM*In3-~Wvo$r6@K zhg!0s%licz7A?`1CEK=fS|1mCzUPt~+O51S$% zzg^XpR%Mcm2d=xI%H!k)`Xoc%*tQmN_^xQMTS&MvX7n!U=+TSXS|=;}wOuPwSpd3+ zljp`j-by4}mTy1~+c*C2;zg>$~YtmLkqEj^3_YPN%?9!VZXJln_jM{%u$vNZsd)3sNW@ zx4*ZF49VzHzLfgyPlB^G2N1iK0vwszXerwBA4Zx{NA%Fom`C((V}PFd$-pyaZA^x< z83PQ*tM>?jdzB@A`R&Ov0GmtgX!KGS?f&-U=|N|d)Y&qoM^;G6PzLuGUmYGjJHPY6 zd(%&U{>zxJ?1=XCllR|k&)3GVmD;6%1P~Y_%0&snt@UUpG_;;TpX*+BI&;P}dLz(e zR8c0QDj|B4d88y+6cgyIbcW!O3F?ICeS3B%yw7BS-kZ^885uHtm22L6kR#FJxc*+z z-p9{ma!FB2=uuSB{`Nq-MSVxaY(qx0>@6~2br~3plDofl0HAZEf__FHfBiW9sBl8$BrMHE}p+o&|*^{epwfd8DLF;UyZK>Xk@8z$RCxpBuFF}@E{}mLG-)` zuK@g_SDC4RnOPM^8{Pbr@C`sNI+l^G^z&}l>F4Kne}J_PNLkWX$Uapx`X&I$M!9g~ zArDaEiTM4U0O%+E{>`^Xs?PXi`usPaXROzPDS7v|zxcGhdsUm6ZPPbjem$K!cD#d{ z-kvrD7~(4j{Tbi3_}W3BWR$*uDlqWGaP-qpbSxRIG!ob|0=2zIKA}aKbq^oh$Uwf< z+E@0Q!@jpQ-dpucM3-~y@ZC+*|M;)}&GgrQ^=H$c{HOm^7TJ{oo9Cuuug;F3)xNj( zPyg&+{L!?ktmbp)FGm9oH+@<#tF{gq4)mg|KJ7cKxi&X`UzZLddj&`I*jEy%;)H|S z<_$mzZ_>YQda@_JY`qqgA+!V z;k3WQHh=&3elY#KK+5&>H*qcNK}(7eME~diWco>2>0f_yWaz%TgH8gh8Z*7;^mnqA zUGhA+$MIXeTstZU(vj48V;!Xv^YbatpX=?FNsg zCl|_kCjk4yp#cE4u;W<*%p3`{;yKUs<(_h);(v(`BNHT`hEt+agN!JBF`2rd075oK zx>e<#`!=e^I4ES?lQUtjA1AZO=0`0+(;Tu+z)b5cz<|j->kNRJ9S-_=+MWykyBPh= z9IWUwnpb&3&I#1aXl+JcY{(HLYTkZU{y=XH2dZDK^Ye5K2j9m=%1KcXfEKO3v5&95 zUU%zkKdk&4yNJ%SUKqq4nXS3cXxl!`_=7FwDfYp2vQzzeF?46wy9LLg`T18ycn?AD zL++CYH?l*u^`hq&EuLjHT2J@Y-ho{Y;P6as>16hC=w+7Evp{Hlve$PI*T(kl=!sK< zC&}`I2lgkI7fxp%UmPGOIqBI0IO=C}oB?&SN}wp6_ZBze*y^Y_yMT6}3s zaJS%!b58^Z?8S>@7)p~-4OGJKfQeC67k|o{)>guE-Yp0U43h-~JlmOFzhPtTWugN> zk3fm6I+@KxUutz)wK-rS8>ynwVH;l$tQ-J!a{krJg0$I(!5|!+kvX+cYS-Sh2@oO zOMcx*oG8dW==EU^a6)xfaU7pCY5GOWN}vm%GfOTSm1%f)+}lCf_nW5y*c+fATWfJa zTsDM$xSPxP)VA>b4?mdhrW1f%^zMUzHGT`*eoes?L#9iw*@GKu%075CYh_lCP^$gJc`> zHUG)F%3?f~KBF6N-n=^PUVUKbew7gy0#dbWB(>kP>tG+RNr*{0vwv(TZ9ughdJ}XI z#FmA7Grwe$Eb_*jVibpYLK zmw}BXA%sH!vHT%3(4d462Tlw6X&VBo0A`a%bd@JQ%|>RMq0|45sQ+xb^vd${&Xe9E zy+L{dBv3%1T$gE45~oQ`G=t*EhZIuG2bd2v?-^2z6x5IrWN?BSHKV(_tE;Qapx}Y@ z-g}QUx_;{*r3aOjPo8s|z1LoQng23d>k2ZAzEh0}GcX4BiSIu3Z7uj{=HMUpb$Q-!=Lbz^c`V z8)jwk7ru9o1Ia0J-tb;Y|kgST_BC_ruVn+idXa3?%cjPa+)1-tIB|VM^Dr{ z|M2)PFf4Z9&aSweg5H8{!?olU;AaxJrU4 za3Hb3b0w?UJ8a~Zd*&-RyrKKp_SV>%3IO0Ydd?3Xasyht$X1-s=&pnbU8>6h9(ye4 zk?d!m=n#Of`B-#|4EpVjjUUZVW3y^k$xd9`zVIr0{7ol`ufF=C%DwQQ=&GXEp`KcV zq8B+qthGJHuGBAkLkUZ&^eJXKA4ool7SRy>=s@SJ2ryc6`${`7fnEB}`GfRrvz*EV z$wCrh@Vm1OBhmc$dy*Tx+zd^!579{Cj~|JIlQ5jTKvb9D1wrAt1)+P7@vYyWqHb$C z<7{6o?GkfF5Wct|w%`~n$a9vqWAaZ;idd%l~;VlFf3-mW)9@ABCg z6a9Ykj&=bZc#y#VHo?$WT~}FKO3J}@veZP3HOaqm#hshny+z(u&35YJdiF z2RL0yF=H|<1{kaN1IYkI3}`>IL023qI#?t6m`h|B_{IU&lI2FuIYG3qGXGsW-dY^1 zwS|bCzTsD|R#ugwklmyYAtg@8a}=JhSZ0%>j|d`xC6J&evI7JzxvIm z!wO5vBY6?g2agv2{@1@B#cBKm@~(u2j5(X(>w5HBK7DNgDYHRhTAVo9ajg{v%xT|T zhLMZ_0Q>+ceK+9fFqCx;yz1H>*K=N1Hn%%r9$=inunBf#>~>3kJhM?YCq@&)`(8?r zG5g~m|6uX;*QWwGC>nySzGgW+an7_$xt%iT*V)t;1J3{Sr$4E`*51YW`r{ezhOX65 z`E(hAuk+dkNwjE@vCnyOXqyeVbC<(k%0!Dtwgt{TDZ@~tROXX1dq9~$%9EYXrgxXa z7a4^>r9FXT@0~bWTa9{>2f|*8R~$mA=lQ0xN<7Emc?=mDE&Ah+;}#9?f=-rfKuhwn zdE=3ct=8$o_upUq(|__$7Qg=cKh#pFgT-1u*V!wOt%{uASwU^Jn-Pn zm<4@LYc!N(qI$Wzj~6bt@>n2L^D3#Dk6(Fp4I1k zM-Dxw0dRG+K-`z7PF31_r69=u_^5-20-7$x<7dtkO>a%L&Ct?;A~KGx{gFxh{`)6- zPKUN$xR_!3FoSJb5_>l}@mGKIH;cdf?cXha`jem54}1UcoIUWcy^l{FZiU`}20Hrr`4{P`>m$?jtOUx9 z-fymlj_hCjvu*#$;{W@v{_TKG&v(68T)cWK0H>;_rtxr2wp?#}l z4}YT-(CY;Nw0*$|X^1l)#+@t&kX`On`v>iohLgEWAfz?$nat*TPsy`*Xoo=AckcsOZQH(Q&I)+eUKdn-8c)4`R#|HEzFXhJZ9BJ4R{!T;79@+# zw*z2+${5r^dK=?c!E5q($Bv!JqOC(0PS&LY6JLJy)#AesKFCo~Js=Bw{ZuVI$XVfl zEdh$HRcst8aF)~ByvXY{?QyyjpKfgb1NA0j)28l2W$H?P9jLt}M}QH(uCpOt*023l zJR&euq2sihS}--DndAUE`BnD;zBoOM`6VE`1pSvrXK#ppBt-oD67ZG^y{7r zyaQ0IaaCOHAAsE69OT3@wNUndV^8n4SaaPlNpC|q!P!`6Als(lLhh%nKucfIJF=9Ei2fE8Xuzp=gj=B3^5@8wf`yVO%- zv*dKFWK#!J4ZlWH0|hXl_q?Y^^y$b%`jXA%PmgI&^v3?43as`L83(WwWSjT(<*Zda zKJ#d;-8g`H@0tA*O&#+2y3IVcT5Bcq*de<&Zq{a^du`F}(VXqc7O|)HzkN7*Rl*-o zB4f??kxUB#vY7)e@QSa)kcEl6SQHBtk z`@u6oGeNxCFhrNhu#DF9z3Y5-r+_!$9Nz)p$Rm1FmFKkiYkmM^O9II5nB(?@QubY= zM*(N(xNRF7GvFzn0A8Z$s$})10-x+~bJ-L^zOz;{Zw&xfRZV*F`t9bBsM@^VxU6w9 zlDw>917YZ5&bfWf0mM-Pg6q<896e>ys_w`z{6|(Uvol*~a=4{)o)I1+c&!VEeSXNn z;RUh~-vmgP6yb2D(T7&Oby=GXef*N$HQRoC13WdLis#13mjjWOyw?Gmdv=X|!s{E* zBfY1Z?9|EAfdpOGT*!$n0RsHCl3v>1^s$4yRPfjX`iT1; z(17k}kVH?DfmPaN>T7-E z-47RMzx-{q&o{erX>siMyY;u-S;q05#l>?c(`_Xgr04mA)@k>FgV_;#roXs^rU4+M z1&4lm{Wq?L+}9(1+jP+C_1SC9^YxQ#uG*={P$v)YD1i4y=NJ6( z(~lRM0z?1fKmEsnJ7uNTn#g$Ujn@Hl=Rg1Z#W!Dky!a3PlmBdb*4iKF0+`&GEPq~q z<}2q;Eq3icGR&9m8VcMbyoQkxZtKx7h~zNYW-(D*y)FvQbZ!9Hi>T|M)~;^Ntp19V@#a$$h6 zAN}|j$*qDc+e-3@&3Bd{aUj|VieJChIT7i3et1ki@~yvS?~d=6oOo6nmj)1h$%S-M zx_#3Gd?i9W8aeB16?;WcWGb7x=4C#YaRT}PdMnfW!1kNTXh0u5|G2#il+*Tr43va1 z?KI*ofusB36n^&9cUgaB`WYR@PmjthUO2}mUS>=niK4da1NE;b@ zi;n^eS++1~!J5}pwP*WCfB~=przbh3u>(wHY6EI_usKy+AE;N@_@ zBeI1Zb+dimn;E;^m7)z9j5 zvjaQVMUz0L@T|^q3PZ9_Zj{)D7pB4_zJ5}b7C+m*q(AAu2c3z~i;{5#ug zdNrTZ+BQeF{;u77M+ec_4iQy753HpNWL{6QKyy779!7V4`PFCn9yg-vx@@%FJtY}Y zvYLZipVvxsWj5TCp!~e+tzHEF&uy! zUyysSuF-e>L%{g%?J64Mi#_py3V!Vp1$o%w+NXYdP4Z^+iU#bJOC_Sv2P&=BJg>r} zJ*wr>mG(XvMV73pC-`Fh>%mL}-Hcf~nkyGCq^FkcPsnNefnH_*^I3qi8`dSyoYcL-=Xuf@CWvh^?;vj zEn#(Ohb?^->oQgE?Gsgq`wt$d`sla$i_M|?>3V!+ALH4r=}q%D)L^biwi>&*~Y--PfMCe9V09k$#zL`ffh-lJBmX z{qBCBrOX&@emvv5ku)xE|L!v@YeV?jum9IoKA!J<r1y5x9(@a zHynK@E#{_(u{{N(bbygcX)0IO7Y;Z`u)dtrTkE%(Ksj^n{Ng|T^M5o#`BtFUuHCyA z$1{K|{v2qK@n_+b=*xCv(B3ZH-zFtU#`$V7l|S7nG9*im!KN&8ZHC{v#=yzi6@%R@ zlk(bt@`0!^AEu7g+kA|^1mV_{c68H!nj#miVMuHY_>oz7Eg(dT1WwC=z!_y)n<|+R zMSVNKcHK`t>Upacr_Wqi96feu#_F(`wFyDa5kY5zb70*|d3ZNrO<;Z-jhDuLwEm9v zBKcP7Rmy?T(0_PsjJL9A(4_!oYjQUlIb9j>?M`bdVn?w8lZG$9Tlm{Ws-G zS^xfj$#vekjJo-p&^vwZ>;bpXXdt;Di=X?LkHgBkg`!OnQwUtDn@P zdeX{VB_!Vfr+9)OywO@6Idr6PK3x2|7Bbe%^X}H-BA4y(gl^eMfh5lZB`GQs)Bljtrvwh}KNRmJbP^8R;-wQu4=#TG!w)mK$^yQ2 zs~&jum&>UC_kZ;_vo6-@X=7(-m?Hz=$rELKf#zi&69i6OSN480FnwzWuM!@2%50M5 ztxSy(_b>kCzX^=py*PIC&^SQagltaP8NUo+fInbo-Ohwa+g7GE0}9A!HDur2DS%=< zl-bEXQ<^LaIeRI-F;75^&j@~#SN8MZQT=xb!|14#${LTLZtck;b6Al9G*udn*BVXF zU~;0p{5qoy-x4|^g`6PKV5PO!DlKIcc0tP!sMFxL=G5-*Wcn-htP2&(_!K!`8YPo) z?7G%Ue{i7gp5%hI4^IL^H>}Mlf34hlX)sc3z@X~|v9`4@8H$f0NW;#ElVQcU%fV|e zk+%%J0bNr9dci3jf0$zX-iZ_OKmxnDQ_kDV*xi4i0}wMT$bgYiIkyi3ly6m%ZJuPg zzy^St?3jpjW2bB`H#cV}Y>J1Jv%k#P++GHpOew8y?mY;+j90(^;k%0y-+zDc|Nr0q zV(~YB^S1$j^;xarfRbYjahi_Bg8<;O$-rmUz44LI48GkNOIPbD|L%LoTiddsJ0wK; z>XUcgS$uQmbO+jfviQMAKU^FrU}q&|dnhATu|S4AEbxPG4-|wGfTA4rXC#OB#aKkwZ3Gk*2$Fpaq~EoxOpo^0=U;r4 zBidS|KV&vCv^l-+zxQ5rx?U@m%gX~F+g~ypfK3Blr7!Sj51+DWvS|95_Tt+aCU?)A zTx`2h56ybj?Wo-Z{Q@9;t3a3F#-l1?fYWdl>Qq@@ubfVHT0j8-oYJGrP`0x#o0<ua3z-N=b0DAPqfmrgAJ#}FDGY0c3B&-kxv1TBbUwgoX-qbp#`mo3P-iM|vhU@{oUA>BY}7dj=TQ|}$x8Y}Cb`x&j8+GQ-;CEk zsv_ZcAAeDhT9(mb@%w)N_PM?CFuUV&RU|TyqILJmc6nB*;$#XZvlyg6h7FZl>oL0o zVVhGlzjWdJ7~8}D(XJ`=UWV+80m=p4mjHM4jduiDRwkS6M}ZOFIXgipn*8~;02^7{ z%V&Q?tw@K8L$O{NHG8k;t6urKswcl2`{;tvYcF&54$iYV=hm41afq|0sO-QuvRtC< ztPN+zdw^83P-KOTp4EDQjnb7eJLy0f%e!-sfkiT~=+TDM4Ek}cb?Z+L9=SU2!2ezM zvgdo)TggemPk<7*#dwyqK*MB2JUifhW10iTqth!i@m&9lPnK`=4|n)08sjJ{Y^TJye$KgJiac8>6+kSLGLAQoE2>>BXA` zS!MIjd>i|e=Jxk(2_tkzH_o{uJp*X^wB(VI4_$*F9NKArZ>*x?!lf%?)2On#Ql%YT z*aK!RXkZTlVd)}!g1i_KxdH@Tgj~=X~() z@zy#}pglCL8{1#Y!|t$E&maC@W_3CAWOTOPrmqL_pB+|;=|52`X)J`k`Gwk%PdN?9>A{m!WjUQ z^z(ysolIolqO~C}Ze>T=$FjpGNZ;Pl%8$JY?9HIJ0tf8g!?O@FB^edHm( z;`x&R^W;;*nBW^9gN|9<{ug9i*&q6CY0I=v;@!t(A>YjA2CM<>t{3pou7t0TK2d4j zi(lyuI#NqGei?fVE%2e>K3P9tRqvFH5-|g_Ye8BYlE@K&TIOnV=i~G7))kx=><}Cv zAB=1CPcraTt$CluS4*u?fGHZOVAv6myC%AmZ7MdjtQ!0y`AqJUiK=_-Lv0sT-`wv$ zV`D2w{1X5t*8p&D#_#$J;$%tyXZEl;0rmt$ZUlJFn5}iR(IV^Ju$isV-!qJp{9!Ga-3JJVFQ_61G6J8sWn%z=+gIp~002M$NklHH7l8BV-mck`FK&zidu%N{<%}?U3J=ac?;YeX zAC5kp`IeX(`Zd?#pKqJr<0{n0Zf$MJXZi*1crR>p+8wn=bh)5QmO2v-SnIHNKkE8? zlW515nllCBXSx8dkR6kJ!4~LVb09B(_I~g&+hJ>ZL{iX`NB27St4;}3Xg2gZd@OBK zRi)iFdRAatcL1O^yvs8MFU%!3X=ou~M!@djt%80%UlQ~60?}74ogF?QdyI=*;cvyb z20C(2H2`Tg!MWZ;&d>!i^_S@dG3e-^5pi8=mbkWG5NMdQM=ZbS-@cE5{nZ(y^Pn%|zA`ije99g;C!Ekh?*CD}POk+zDbE%~SE`_&7Gn*7sO5_+ zLBROaz&(QTA>$=lQ2?VvTT8UHCL;`*6GoUwWgzD6vbY#jqFfl1QQ|yDL>hkGV06en2k+P_+BKN;2;o{ zN@-(r>GHkBC!c@1`1G?+8b?CE6Uxn*k@>o^YKqmE=at>7^mlIulirM83|dC&a*fs( z%(y;0b@<@HiR1(38FHK!4wUf%D=El({ShHY3r1LCY6PkF6pX=r_51@OniBv?bn|fr z;n!b$G0c3SLpM&H4XA7#?)SMZK+wHaxtWsY+4KpnDQ|E5S8tXf9}k?rcwqo;fZgLK zWt#^)QgAnKlzrQO`qOUTxpUU{fBs+p#p36G^0UDD{mn6@)$!2eVLNMN9F>!2mGBl{{G_UKl=xZfAO#W$Hmp_Dc1mB z@G1w-fx4SFuIWI%f4KPFC!dXQTsnI|P&~7-efendFaOQI?Y^Am+EsjX;^^YDll5k7 zuQj4kTR zuj@M}&og)*^^E&ZE=8}_T<|Q#4E&Q-1H^nBEjXi$2MP=r_xMTWwvF?}i|w@zslRLB z^4ab?7wG?k55M1e3m40HeYV(hV1G(_Z(#3@#h?D^FOpmDjPdHgQBhlMb1qjN2R#4V z-~3(sX=y-Dw$1iV@6Ksm7ob8x-K?FHmM}YZ)Nbk4y$Kk6n7sTXVD=B6e7d-DIU}Jp z+FkYph5RU^`p&KT)wa)Nr|VU|1P4?^HRf;t&XWc2z<-Cp*$XK~&q5*^(f0w`15yht`Z>5K8SPAX42 z7%i}bjI~CxMu1?)lJQEk1u#`0(HpmJ-kKIpdjk762NF3nN5;jL6fYUIt$**?{iEi2 zJ34gGaWZRf0QzJx_FikQtqsGL;r6We+|F2#-NyJMKeo1(H(K*;1p`+{CvCO_I{)++ zf0Dt}S{D#wI04A*4HpXisTG&JWw`Xrzly99M zdJsNJE@dIT2nfdSdSKG&05sJ_3^=+#=Ix7@y}!M-r@1o*wFt8>{Jymw|NGLN(-$+D zd^>;*5cBrF1HD(7u2uxe-sG4pLB=q{ox{qwW%R5|UNhok7swo7j4>X~ZB^?{_fPp_ za#YzJhpz|CdUAeT2N0X|!!8~<(~Jn zdq&TbITsCHL?7*{UagRMk99fC@WnyKY_MQhpi0YJbg7W9mf>8|aH(K)a)A9*J-XI$&+U@mXJX0XeqR8TgBRLFeoF zEo+o~xSJkEQ@T$8cb?OAY*Y8(hpD`5eaHh~khSk&vv=uTzmHSG7HR#_2mi1S*e0tv z(&pEHk!!gQp5do|$1wAm#7ghSKc2}x&{I2QI%63f*udDM^yHhL`F!4&W8B^wAc9`- z9PhLjES7~y&ytsZ{56knURqe81AAw4Jc>{6q=%YpEdk8D9UxO)Wn z-_Thw{8jIE;2pb4-)H&&U=E;rQ1XL=#>juP@I4!X9P%tqydd@NfF1j5cYyf8BS$At z%qFFCea>cQ2LX6hgvN?P4>D~E8lkBQyGp6Fk|K=|2Gn+yQ)$nS}>9 zmiDn{u)bDB^Hb+9q=Pn1P+M|QWG>O;=oTA z{m9zpQveLm%bbBqWGGpPpWjZ$uy4)NzI>g`-L-R9a&Q^6oU@L)5C7h8jgB3AFCNYY zOFoRv*uuDv?KS(ne@h5L6JQ*t|K)edFy3pAeDn3E1;x{C(Soiu_elzB-9P>K?`F?l z3`AeKaeHIgldgQy*%D``=kU($B?|;(-%lq3Qmrk}9dF6tJyg=pWcW2+a&`hF8Omhe zij}Tk%8pm*y*?a!#~Dy`l>p~!KvMGMNpf&(&+JnCaq-OQDg>_w)?6rY;kSz|yY??W z`td(l>b*znGP(4#-eg@RHo zu;hkK(M}SOHX8!YbmlU9y>?K6V-9qFX^-_erqF_g{QB|F|6xfFwdIUg)}>!0tK7PA zZE>b7Ms}+nyO%GYpY~4X<Si|T*=p6`HX@~{2_XXJkTyhU-a$Uc4Zguoc#`5y3=~xtj9c_2AXO4g8})R z#t5IVc;o~9$ky^q>otMK^caxPp54;C-3y4P`y@AQPma!^?#aug&6a!&qqqYHX?|L+ zXmbMORi*hbTUS*#dkIdXg{L_QM(PzVP$#RLexQfV6L9(LML>9TBo`iK2LY?ebceYb z&*6HIYh$UM5?S^zJJS2Lp}ZG3_bMj4dF$$|uXZGW_cyXYJ1};own2RKvA6n+y!7A5 zWAsy<7H{Z}PDV9_3F7dD0IqxAu4QUq{EcM!x6u-qMdbaS%1~SS1(?i z{jI&td+&dcT+IJxyTk*}#vWch)hhgH?S%xIfaLgv48NLhrG?Js_}4t$uVp4clHKmS zyDhcukmSQ|rY8WN@Gk(QzzMz=R9scHlXtI=M!VLc$tCk=8{8@YqN_xzZFCh|TPt3?Id*`N_e{FUc{275 zK6O6Pi@VDpfGP#+GY{?gWH{YJjxO%^qG&Kb$#U}k`Kl5YYY9shT7d`Ydpd|bXj3PT zvum0o|4(qpVebN@mIro+R`%7}0BDJBc(TFg6J%S5nMoejtpZ=PY&|30-Mwf$ zze-ezdwn%Nj39@WXLv-Q)V@S}iOK#RUo?HjwzFT(W!^tG(W=Y;HJ;^Xgww3B%Z;f>62?+Ofa2aS z+`s|2cIUz3<1>|QxgzCe^8p1IN=h8x4y>>-92B^9OU#kr^|Ucid$(sP_5bsK^nZ)d z0~vE__w740-@8tl{;JiK%gZu6?=5Z@>D^QRZJ7@i&0!zgGyWMu8)AGB!rcL`_Z~j2 zoSidQ8;pSNVb&tsqI55A1CBHLH>UjJNz5gCSp;0~?OT=qD8mNaZZ5m;`Zb46tXrHu z8&Db3{OF?(x+hla9!pNKw`1r2bLTG(Js%cTvYB=SP@Ot;rb8l9rm-iU@bje238awa zBA6jVRwML3=H^F zr%z2_z)Lypdv~o|{Nl$yte@Sj#kFWpu}~c5iRB zE)1(@&&r5zoTAB(Ggb&Vf}7H!jAfJhT%d>X%5i)2IM5~Dp$q{DfQc5hIRxBU=d$qA z0RzZ&)9hzpiavCVf;qUP>qkDJb@!kvnZT*wH2v%+f6`ojRyI>>-}m}(P8*Kq?41nj zGv~iaNIxl);c8>)-JTIox&CQ?;}^!7sQj5SYc{km*Ve3hb8DI>(lbPK??%%UZQv)c z_-ffDqUgtt9-Zvx9f7Am`2P3ikXjMj+hyYa`gb2k*L}^WAW8J#EKJ|Uf?mJ>eL zkACpukpbo);|xD2vtl>{y$-b(f#`ZWA3ShyXeQtoY2t-sTyz?6y$@8p$asHJS-_rv zW>p1vS|CsFZ5hS_aWYQr|7+K8#_LZP-+TXi(d$QxU;g#4Y9kPM+5~aO0Jz9toJf4{vWE8G#;wtzxd9d2E9j-wk_r!p#>#FO9_~%9 zZ*en9SIs`Sd0mx9x{_3Ey&MQk2hz3nEM2v#ee3xgCaS=JpHG{ke*)@dPyp)9xoxuC z`+Y8A|Lv0u7W;DE9j^s2?8#7{(z_l2{KkhHYwtlGGj_=qnbm?bbR2;9(4hnA;SSFJ zh7-61Q(gwpnv9;)_%+(v^#&V>n&0&hWLX3{dvn%+WvE zgFSLSH!gc?nQ07=@l2PvX6dxf{M_HW@Ppt0nh)rj9A0M1vl{{`80hg8+4OC^4PXP1 zT6^!Xmw)@&ryXit%axrSx|*#5tW??b`00(lj}LQ5@vm%*={?vp?H6U8PXk-D>oH8( zlgEyonDY!sQ`c)V!9i*;jUl-l0RY72NzNHFhX8oS@$b{cyGP!wMca0FbLmVBVImFy4le?B?0a(^J_{^;UMmbgD)QFU<_iGLy1Jdy%y2cI z$?$27zKusFP}*e1zH3bkiJ?3hj+wjOO;2#}>~S)hPGEoN0ek0mRYQS7)&Q*=R#$tD z1@7+*dir7llZ}-jZ~kur7)(j}{Pmh;M)UAm`~lSrP8>SYNiPHQV~6HYHcCWY*_yqu zk!*_QkH+{K*khcpUM&Oup4UnYzYhOIJ7XKL(;6AAL1Qw1x_|EJ?-`eme2tOQS$J$Y zNmOY)&82IXwkzP0Z0Y&Y79H3noF~u=fYOJf_ZrvGsK=2xFUy+ch#8OjI2Z20Gg=@3 zN)_c8BpRX{hsVD?R7Rz1fjXQbJm{No-e!F9Vve$4sicgh{Gpe%Gl<{&{B@=m@XB+h zA|xF(>l!^J6leXE)6}2H40ZXJ3#{BO5xbp1nF_Z*KGfeI+2ivy8&^$y>BAHmw}@zrA8{ zqfD(08=sXGf3-Qv0s~aUGs)bi*}8(Fz~;C2zCAX_oYRw>v|rfD_NjHl#gpCEz1D>c z8UDo(eTF&gL*v5(z-Y3K!?|4lzW1D}RIIOk%cnz&33AmU$YZprAWL6p8PuJE?#jQ7 zneJda0+oAg?~+im%sv!QShs%j;&-2YnSGK^)A)@?k6_Q=SMZ$^>l}~C6m74iG-mB4 zxuOSN&w-Xi^dwy{09LdT=p-8)md;=BNBc4W=(5TIjg|mmj~m-`XdK1H{)v8Of{)Ig z`6gMrds;a-JLGC@FaRYJxbHm@1Mt!1^ap*sER*lb1S!>SD<*0}u%+)h9Vw zcJ%nW6G*pjUgdke`fgqEpSAk-X&KP1sRW2;*`5I02Gm(sRfUzX@r*r}%ze_C9uj06 zeEXm_o+?dc)k@fUR%ObMfBI*Ot-BA^e_U@@Nkr|vY`JR}I^QO{Pm9)Bp$Xm01jIzjy2A;!ygA@4Swmnr(Xbc78{`fhx2I0omtIe?6e@JKy_3pQp2; z8+q~kQ5n1EPEXau%KSvWk@X`haVqhKci3a(ADzked)Qvv5U;@+fUpA4Hv@j{i$hff z|49|MUE00t7N30j#p2}oOR?`#UPzY0Ln;dVsK~SaIOBD;*TJL5M_wD>Tj@VOvoYYY z@kt1f*1%z`Wbx*L{r~m<^Zzx~1ZTebUF%wsM}X&(uHU+LTRPksJ|z@19`+(2?ADzd z$(%AHd&a%{9oQE=WuM~JsVImBBWs(GHPKFlZU(NZWaE!G^W<)}y7nA91EmH2BqBK@ z39!aCS_YHrn=Uxe7}f{C%^{BMFZjcg>;`Qe?1v4}(imYO_LWMS&9$QB7tUIB|E2)e zXUX4Z&ucf4eB4n6zr-^4NYq);pZj-`9qkn|TCkK5RejArRKWt+mxPCgb#$2TB#T%R zAe+h}awVo6JAzz+nE?3#>3nw;hAUPr^{>ihRR>R^3H>2(hfdahPjd8)jbP053LyQP zFFq-=`{v@E+9F-NaJrUB#@Sd(M2QTeQ#P%y8oXz)yCmP(UHgGtQM*4tNZy~5=-ciWpFs% z75~Gy1OUmpWx_+_?im8H?3_tN>pE3dlH#l}9f3bsDb^KuPCm+#e=GiAqp^4IRUN=i z-yV+x-c_nUNyk}R_R3pjr^65?2~{#vh{sgFhz7G5DXz-B^^x5xADa!9hQOL-bF+5j9;~) zy>7g8Pn&S)GUG`eJZkT%pp$^-JRdU6ep%8y`Ix;nwwb+~TwVFJ1fHIWp{)F=jZeaeYPrD~da8AXO}t46K?7AV{8se46+Q4q4;oYJ-d)ued*+O~TP5rX z$g)|Eyz^f6Q!QZQX~|=JV;JAM^|j?KW8C>o{C6$4?0Z2#)hS!wTKdkzbKx?RFwmOD z8Q5cfB&M;C*XB2wKf6k+ms*$CIa^Vv+?JX?)BX~aK8EO*SQwh{(oJ&d}rT#Ps(+k z)4iTKV>EvA=^FzAo?N=|aDehnYk-yY@l9B53G5YFkd1UD1*xCs>a^?5%Geq047<2^ znAT={|DEIYgg#XCU*=Kz;1K3=u*8ls4*JJ{3o8=J zzyhEW!^OR;%7T*d!Kl2us>5SqawidENP=}m*KI2ze7=@3O9Q?rd7y~3B77fIlD)g^ z3*gJ9&1J7=tltc9+po zD789-Jn$18Ih&6wiIOGsbVY`C5lzAjK)Ga%r9;F(pA(_G$GWuY1tIiV&Zth{49GvL zk2AcoE20yl#=Ac+14I@gu+is)F=f2B?AL$xXFp$@zI1)@um0U%&7DBMLka)K2`GTe zSKoX+B~lMkJcKDBfbEs%C{@yXmP5F+$Slxogh`ar{^eQ1&B0<1FB6W9DSp*M+56E? zX$j@#AjzEpq#{h(OI)o4RYu&oOP6pn5-zt>FP<`?bi%U~_>I<9Z&{AnWdFB6ocO-0 z=Q~6~Acj*T>zT16yGc+)Uvh0#^vIQMa{K1(#ee=!|JmY~zxq`U-T8?qGvK^be|>$c z8BSvW=7e%AI7rL`&i0yw;Ebd3hz_rNm66t-lYM&kKAJkHZv<|iwMsoBa3rw8A(1i#8CKtUf8Qy2rQy%Vp!)ewf7b6AMBjW?Ipl?&Df3k@ zq|8c2STwQ*`X1l;>Wdk_{Y-Ri`#E&A z!N~BvUrG1=w>zXR1?2iGRf(vG*miLL&|g67;Cn|qtfWI;-zuXhhXr`%Yyg0eu|0WK zAH8__@nZ*7%FN9OOF`T7o+X$gyAIEDVtuwXXKsIOO!Pf~1WyUwdn(=hx=;6h&<42Q^wczL(KPmXaV9~Bcuk%)Sv8PIfciufwU*-K7 zlLf7!fh@>J$)>Z>MJDt9{hg#9NN0bkn%P;eXZt{gJpU7tZy?&4kx1-8h7th zs+UnmhPob@55yp60S?o)3--0g1Ac%lgPOes}NOHT0jX%;fER z(MaXb<>c|5$@qD-c<;Rvi^I{F@xuV22c~UF42AOE2L zfSJv8);~IU7RSf+ucM*ANBH*J{NA&CO&-dQ;nc3GJegBJ-?y(<1gNd7wb2;JWoQc` zc%r`KuLZG_0nY^vX%7ay?p8X*fM&P-+K^f9RUTSwEsDO;Y{zZXMKEyu;2UM zJITB)i+}y^{@uKnX*+W_nZ7q?C8HbO5_I7v@NJT}?Mcr2%EorNIqctapydhhX)F#U z;`Bc(xbphR60DzRjYrssXP>Gtt7Mm)v>we<#!#F+v~rl4_ADId`_19o^vr5J-rB|n zL+jD;{Z@5&_cbp6A4|N^Ip{afo~yA-0lol;UNp{~{bdi#tfIO7vjlDe0Nc-gcmHJ5 zcAdvyvvFMf;*Gu1bL>aXb_1NO+mSENyQc5`{>EWl1_|P?czA+#U2n_+0w$&^#vG?* zM&tB=uAb-8m;)dqz>Li33@dpUzX3^Q{kYAa;UAB$?HDK3k27=p$kwBWfK_aZN9iUu z^z@iMa%^h@r}>eQl959G4EvCvC~w(0TpCc%e=(j^u4~L+qdqW@d7ercqS6( z83JYYxd1ym0S!qHnMs^I{4TgFV<;vYAcW0vJ4cCKb0@tlb8Jb2_DIihXgLRU^QM&r zqIV9cq->nSWS%4E7fO5!if`CFhb3!|s4bpD=#-M17k(5}WlM3geJBfAAboWi^<<=W za`w}s$Je9HGMK95-MGm|XB@EY_#^ZQz|LA@eKpWf&$Rlq!rhEYb%xT(AW?MYzY?cW!v<|*E4tvL10Cw4%-i`kB!0dIjYs}Lsq4ja* zh?~h?K*UslHV3v7kp5P@Jb`12JUqVK8%pY(wWONngO7fY?VF4{N0$n~tXLeaf3Y?S z4{LXFtHV7l(Z|34buxH)*q*%(SOWH1M`OAz%loe$#D`BN>1Aht*@nO~_Ni(N!3w%c zD~0th0p;1Y$=xiMK%Cu+yEo3)C;fMeE6MPeuWQ{?TdTc?-W^)pxN)tuu9y7rfT4}q zCB0W$wA=X@bEZ*rknN}mn-(F5Klt%tYuV;1`uJBe%<-$X30E#($o@FdmHa?^>&3K{ zP|3qLA_wjSMgqU~1afHUL6$}0)NS9Tryc}+ejB*|*MIrHwXan1#Y6EZIk^nH&bI3F zBwOq0d!VvFEZf(QL(U^kdwF|V(*-y3c?9y=B@(kFG4MU;8`;ljzBYYAW{@wQ*Ow!c z_#yV(+Ma=b1*YfRn_5jcNS1suBGoI_k`K8qyLZF-ZSmouK>x2Bd+kEL18n#x)`rFx z$hlci;X!umRMPfNeg{)89V}y0RpQoItG(OpJHCmv5dU!Y)Qa)TR{}Q0~%VB8#k{P zjH-Wl`<)MTw%`C?a&u#pp*yy4GXN69Y51ak*Y}$4Rz2#ubO?VCP1ztL**}sRuEamuXux*Xq(k4*;y8ISK79c@_6$Cfjc;Gc{N}^iA9#@;=RMX- z_0QT8V^)-`wyb({XFghMJ-i4g?;di=zo#8Zd~>y6Cf;Gw0!{@a**aQbng8=-64~>z z!#>YlIJ-Df&>XKUGw@5OEm@77F9ThBmkB?-v-OD|A9IM#-XRjo%x}*n#8@ zd7#x3eU5(xNp@y~yy_glB@Wj9Fpl_hGSj1}K#x6vuO&uGIN~E(pOH^JZ(3|+S4bE$ zuZ^4AC&^#7Kl>W(wCW+ZfXd6CC4MLAQ6Oae3bN7t>9lXJSA7so8_WcZ@PUl^*U|3C z(WBE|bpj}jRZ@ZqcYOXbpU|^hPXiVms`Sq8sqc)ncL|@`raa0XMQz*p1ydP zUf>fuQ_LaictX_#Tf^B?Wc0anXOds^vP6peCEV>z_I#V)ayFVTZI;PWAGs6%G`HcU z*4nywr^OlUDStzN4gzE}M>`*G_0ptgSj9?izzyI14q4~qbZtMr$Hay?&+fOrFtYjn zyT7`Bo)xpoj&Tnr9U$Sm>*x2bdy}Q!FLU!4cQ|PtnDOWSc}s8l?&F5mCvUFsxhwkl zZp?nq&+Q53+wXt(n?Rl-?J473JGWIXwQI5W;L#DR7fyXuB(zc5vVrEeTE;-qZ82_fLjn#@3WSCD`;G$2Vh0Mti3lqqVY#w*uY|bg_) zXjAi3+H~RK>A4<%U`teN%S!B%KxLJ8mpKnc>K6$!p z1PEhYDSFFT%bOc1^U7yh-_z&X{0UtExPGdP(jijgq=Xm(O`-qEYI?J+1KX@^YXlgj zfXIUP-+6cT!>QJnfakP5Zq6%L)t{}u_aEFJ9Wu7#abTaZ>X`_XkPUn1=B>cqE3;mA z1A%2Xp%>u^MD;_Ny?j|oYR|{}oKqPo3>OaTgMbTrim*pFyAn^~l~;{_z>Mfm4FV6= z_Z?y`_i)eexj-WV^=S%@@D(^3M+R`!M9CKO1z2oJiCn7mOt0^UPdh-cy+H=QGWSGQffOO{)Jbzx+LPnpA=mWLM zxqEMI<9e9Wxl}kZ!?tWWUItJ>wbuE_u@mj3t+P*OU80XUDk}r}3&wr*+SByB zYh|I^NBCp^!GrZsz7%J72wTdP@wFxcjWQP5chIK!poj8Qj>CXv*r0{sKnifm-qY{0 zB_FyxE&GZ?v!;?*hSJ0K9Vc43IHOLRhT})IDq_$SL_y<>9ThsVFv)L0q<7!>X+eTt z=LD~6otFWTD+SL^y!YW`x-nK>q&p`2Il5sfl|I|Im0g&ez$2O$o@mK-quxR&#BX=C)lu|46dL5Nm*>Y2OZH)uz0WKYc%dQyOx=XM>b%??02I!He~RS5VMU3M{ok|DAQ80LZ+yLZ|XWo$GjnHk5A z?O%KlID5YKQa}N{6gh8iRqYj-GEA4{b7bPn^pWq(54eL4WGX35mJT?b>}YZSPufn| zU4{eO#gG37E+Wume`BB?NDq)zvm|{+htRL{)4LfT18BwTFY2xAJ357NYEJ;>tT7q( z#!$nr^Iq>tXOf*$9hB~t@#YYz=MVKK1(*bc^sA?m*`WO!qDsn&d2JK``bY{l|YcZK3}Dw-@G|$)|zUckVZ*=EcF(i~rW0 zZx{dkfBc`up%{R*xe1`ri)h9i#IIz9iZ0HTfE*xn0tc%Kr1VHm$@xoXqDnfsef;Nt z_Kz2T_~RctXX9ElTRI42@JzOA=3qkaYEL~MC%2b*p`U1sZ(p?+9B#RI7#$nW*dXX4 z%es4IwmF#AB{Agafw>(MHfHjPb81h^p7wsgn!_0%ugpOh}NKP>KZU^NzkH*v=?`0oxApIEE z>~9O1-lfCovRmhFjx&Z=1i63#?2s#0E|0#Qb3+<8=YLO!-)fB{`_No~lk4j#Pmjr{ z^0&Dw_3rC*K>hZ!fA7lZ3V?W)bESkHuq0!M%{-20HjJR33QsN1*zSNz{IjWktuhf0 z=JaZVV7>S4KQsY20aP|7K#r5a`14^#YQ|t^t51@&C0BpFE zDqU8*E|DUdsVbz8fFRBvd7Q4YE{HLIM-w2I zHU`fh=Y;l7z!F{`xz_h&Ez`*lJuofZq9dNbdz@X3qS|GWv|&K3Xfd(EiXKkZp`l@QZ!H z=?8i{PsAf64eUO&zvK{yGPl|K5#0QCr6Bb(Af?)9+m`gW?DXD^CHWQh8oT0oW48|M zYhaOev2HSC8}#g12M*3>&2>vaxGEd%AL(U1@@2cRNmWbUpQM8&nD%DxJA>jkfA?35 zfBMh<>+!G59WUrx%=SF=?)Rs)%JaaBTlsfa>PM?8P;kq>+*7~%0R#$^*q`og85W=X z{_o?-?9~3-p3dBv?Xat?ScmalzIZNLIUmZw=auKTH=ef_>5*+a_g00lzwFpEi(=}9$xlaj95{GnK&nT9D8RiV zCqC-Eho=|$i}=?eqcY^j?vC#d96CCH9UBs8Z!fBx9veQ}O)v=W0f+Xq-^Op5Pb4_p zpK^OL?$b{{nIxU528;Lb9e|fEpMZP!uxH4;dv|XPKRCxj@|1nak5jpL;K2UC!h4Iq z_~kEKvod0vpEjlvyH>;#d`J3{T`8%-AC(Mae?-$sQivyHX9H!e2f6CqS|;dAtt#v7 z=E9G#2iR%s#cLNXw2xLVjvW7B>`WD1<8#LIPn+wl+sgzLev@-HfN9$c%+VcyC*Ui& z3^XR|rq6nImf+jfOBd7C0h;M`feSQs5aW!yby)LyWAooJbSFa}1;PpnT&|_crOW37 zL3b8>dwT%wbFC}c`Cj&_;ExPhHuU;_mw=$1E5A+u@iSk4F?(}tj>fHR1^%>2@!)Iw zG1}Ajcmh2H8`+fhn8VQV1YH2kBy-py0wgbA)L;8SZF%xT?D6#uJM^>2&y4Z^;}8xk|v4XT0cB|30*>+3LYI@}?Y?6^nSI(Us=Nbb*%SgL6_O|LBs?izZmC9fbga^EFf%IPBIBJ+P;00CvL2o3SgFz5da? z*~}V|Rk|Di$>*TYc+no;v8%2F?Sp%@QkfqAJ^TE*)3f$ti?)vX#6Ktj#~zgYr6n5d zNYc>L=gGV1N1oCTStDc9*mHuqj|;p^5<{|G@EzUopLLB?!>4S#N&aGr(L;Uzs$?*h zH%5`{fLEU6w|^V>y=BMY^!erqc+mN)k{@*Lrg#Pq;}KxEGfbSFw!Zb3?8tT($Ydwo z%8z5)v%e$&J<5mmLvBdmL2v%K>YGU%%6{0Ae=ITO>Xmb~ZAz}J$-Zic_tG=ozq$9o zHYO0ph8})yQm$?Oc;ZH_o1gaF@70P&)!Bv>RdFP5ced^`@fBel;aYU}*v%box*|CP zYg@an07h1ki5-0Dh z$GKBqFTO2_j=d>>rJKdsvuA4AybO}TL$-zgvHvb!_$Iww6+>&~c~NlYX`i9D1U$Ar z{$^T# zzGnFq!_a-6F<&lU^}6w|jKF@L#H4?Jw&-jIIndS|Md+seLv!`Fh1SPaCe@XURDc18 z0mJOu_15C2AH5%g1c-fovPHNX=;^Sw!)@3a{ioEF=E<_#Uet7Z08LwHjOtYh;~r9J z*)meIp>`3JyB^1)X)9K5SbXr|>xo?43WPGgEg93>%lh6Mqfjz$jeuIcI8*Bdw4rEa zhVO_eMQ%p%uKczv=aU&?WzHn%Ij8Nb88by2Ps~P>@w;l(v-%O&C#|w>S>BY(wdTxF zT%S?7J>Z5TuOyY>Ao@=j-cLA=ff%#S;goUJ%f|`$HrbCp{BSazrwlh?%0cI_d>gGf zIkIa$eDB@G=U<-60Z5^?v<|hU$z=o5Zphp>H zArojabus{^F@M*{Uf!f0avgj;s`PCo4 z8vdmOfUDR|Cd&XC@rlUX?F==U0>CbGzU;cEzG56O9J0SV4 z0v_60>`wW~V08e~!}?BBaMncm;Pc1IUQ_zl+DWcXuyFCm&~Rmh}!Sw zrQ~u;pd{hNur@dR#<(WL@Bl^Tz!dz@9xj7Do^7&pBHx{?NgcGKHTN z0J~qH1i$=)TKAkdesu8{zxv%$0i|FVP6fYu8?!{&WHp*UD(o!;aHvU7s}eb7#+u;(OW}t<4}LN6#0KKtoy3 zdm0~kF$yv|T`l96lgfw%SnkZ=egFOM&6+)_AGVBxv!_nB?#gH#Uc5TvwF4-77XW5! z!7ZT6#rRZ}3`3rrm6dX>!!Knd?%iGg!v0M;l6x&je6<7u1pDy5_1K(pcb#>0u;`6= zoKave$cld*EdbDDp3FMIl1Bv-Md0I-B@bn2{a63hf4cZr|MK5N(_IA%a#~W(n{!xY zMq6J7;T#bOBo#_@DmakK^=mHf6pzUjWa_-2Bzw{$ppRh zfR@4ZH^2Vd#h?HApJf>8Ihf(p_6DqIS@W>KgZ*P)a$=4i*wz(}QnibK^4`3G{2?!y(1Y=br(LFC~ zJHTET!O-5AG4~vD;&p3FPScSiL*fJPFbDF^RU*Ub~o~T(F{h+hsGSsi25Y zRFD9X(3wtqP&qsPv2ON2t2z7BC^*8}(mA(+^LP7JCGy>4kgmsbh6{`#U(NqnS(48J z9|TF!z-NBQc6!CW7t~)Luqk^4=*Kqcfv7a}AQuFyWt^^wk3RYAlf|W+wx@wL_9}f3 z$e?GZggl4mdG_0j_?y1BcJ}zt(|Y@_vB)-N>~qld>7Mm$Y_kSsY1@b1?LPGO4z%il zLqqGuf%ksBQRmD(v;MuK%!B5>d?!O&)_elLYrCdrFQ06!DVKRmAK@bZKU^^X zKVL?V)wQ1Cz|%v+8(q4rW}2Iq&q2F3i>DaBAN2M^Ms)M+NpG0-=rfLwX_D(rjd2hYnIeubs_Uy?4PB}GWtL8YML+$d~Lz~i} zyLW0kr-fgj(%913>Fov8qvo+Yc@1P_Z{lh9(+YT`4?s1mO#|~_x+0AUcsmAGDz|nDjCc}RIE?njZSPO8pm7@R zw9v>dN25uY>6&Se+57Mr{<7Xpy}$EO;$iv^C~AM%XTZy2C*I9Ys`9g+yVuUoUf!1- zDnaIH2_ld4alZca?*j+|le0DG3;JvS{($4?C-LRcgJos0{Xj==ijICq-)pKB3n24F zmFbgRe~2^ToySkAAT@^1)A%561hlbFr>hDIK_+9r#xEWFB)o z)4JTbwzyKybToe58vp+9{&MEGs}?qU4jilP!@dFf1pVzB3x-xQaPg7(<7Y_*Ks!La zvAh-d%ubgv=pape0hqL}WuRgffdm<~2lLf_^s`?q{+ECL|1J=IX84jn&Xz~y0ntWw zvniKrfgiNqvG4W-7H`X6RXrsu(_Wj*@@{_e{8oO=u94~ACTrMv5+B&<5+%lWZXMW= zw{K_n24wmXPz6@Xnx*TsLfD#~l!Su+d()9uJbSxAWcTva5kvYu0=-I(qNhhpIL`IJ8AaV7VZN49>9| zpDkM!$b_~8^y-%F(XcVi`nRSsQ}-lG`2}|i47!(`XPawFvMT#Eh-&P=HH{Cy1Iv<- z^WzPSi~s;Y07*naR7|!2Anjp(DE|TY=$du)I9|%llEwm8ox618@bQ@&{*y$2{(yD- zp;aVV?VJSd6UPoq_Z~g^&aCa-+FQ~4>?RshR;i$-y{pwpR?XO5o(oHP++6R}mWbX# ze_5&A3e4icpNP-&n8{T_+-@)X@-Q^m^ zW9zb^wPJze@JCLb{HjBDFAPBG+yIG%umt}d-hX)3WOy$u$=a%#zTSSFc8#?m(PO^L zXME(yqp%KuI@!r@(>Gt$4SA&{jVk7oUw<7>>d&u|ahd#L|MN#OKf;6ZJ=-$^+p`cB zBR&=+vVPC=AxUSykL*u>YSW^H3j5few?6o1b*aAAW@Q1t7x`sugb8f*$b=CwGe4`# z`j&}Krn6C1!Mw;vkxaHVd1!3xBl>atg5Ld;AOCb2!08&M#nymq^6lK2Q&j|YFm?Y4 zQt*q(wPyt~*udZW-bc~k^QpvSGi-}4D(H5Vz_d$sL&2--`O9cztZZaSoz^{#9bLn< z;)khDM2RpFw)CxQchVoplCVWV=A#8PCkY{0g;&!4lRPKk5soTZ>skCy*Tc=mM&8hV zXwO?z$t~!E&%Kxad!CO;u4reZHPW^)3TNx_GkuR&*#!cN_A}bEkLXo48hWsA&^NK4zW~X_Q}`s^(tIZIqkGMVZ2$wISC1V( zviP%q{Ertq!=a*#ks+4b8)(1#F~M;*=dmV$nAkQ_vgF;!G;NY#kdsSa((e?n~vf6(3qxa{u z^;4(MPPXp0gNMd&BhW1dCS*inOqpqqGvGut(;R`_dzv1=bP4D8C9Qu_4VR=ZP*t%$`3Mx7$9rQ+*?@*kPXqAr!2}s3{b#a&ns^L zfQSY%{wY5jRBs(d-GJzgwZV>K@F=6_enuIvXjR}kgJWA6>4ZGTaeJW6M7W#J_R9Q> zAFxM}doBb2dH|a2HQ6h^lX0$GLCefAVpl0)e04jV6q0neKoXMxqU* z*OA_nn~UNdJ9Hpp?^Qzkn>Huq94OIy4;T5p+ZY`FwIxID?LZdI`>&-yfo2rp!7^2E zrsx2|{y5xa2^Q+-nvs^FC>lsv$XEf+tZ0+@jNEvgQjDvx-(>hOU~OnwKj<)kL*oWK zUcMHP9Y5`JU`3mZkd`4e?GQ3%ds^jncgki6ybsJ5wFQoF;vHDRLC}Mq1G6kSuWUG) zZ&;IH$#Cy0TyPQJ>R3QXct?ildE8%PJq8-PV{2|;pT<+mO zJ$@Bv77b-1*$*RTq7MbCJBo}AG(+m}z8}{`0n`PXFvUMj%o)q zW{^8|{mO;&0nYbFd1so$pH7lzjD5TKaqm@P{`uD@$KYdxI&6~?wP&pPfvR389flvG z;m5~ijnE$l4j)WrX(dy+VslZdc2gUXj5)1=lnov_bhLh{46Fi>8F09z1}s z566JmUPSuV?J{=@CXiRl)-lU=4h-K70hWjBHAi;{US5qqWsNgR=@*U+&>AoP?)QI~ zJu2&(u>izcX6cjy-5PJnpjEknIq^NClp*_Gdrz>eM1YZL``V9W`Iel)HBUOIFlTU6 z#wp{@p1U3`WtXh)_cehf*REWcwXip2!{{0K+u!}ZgI@#i8;j2YTAY3TW|!RG-o*Fu zyh<)cEcTH_$mw*D3G1LIM$@l{~OiyVUk1*P-?y&S(iOINH- z&j950!Lx@O_Rx|sH#W?7Rv6=_k0uKfzqEn7K7H5?4&I#ayOoR@dDUJYJHtJ^qvoVk zTy`lV+H17eQg*nKafQ#9CSTw6ZM^k=8uaO0?*?FvozmA!u)fQBE`5L}0Q-Vt`snR? zwtE5vTKBwHiz97&7|18jo?Xhl*4bK+Vk4iT>x(kA$+t1w1En4ZkOQY;>M?}Phg>5I zCxFl#$=xf-o2TtDa#o2nAVP3_oX?!b0bV270ITTPzVuhKYWzUF4D??7ytNuxie~Y- z^Ec+Y-obv8EHU6)<3=I}>)n~2xtE6?<2Yq-chSs?`~}!rn>Qu3>>4)NNansb_u<~? zJ$6ojI=w#cYHb<7bcTBd#I_K(PcSE1RGF$X3EPt}|ZiZx2|XAwvHKboSTKd~9lO^^Tk%e77PX&9$yu63BDma-fa3tb z%~9*Op>;O_!@O4y;C(se57>O((`0Vnx!#_ybz5?8`ulhNqio17PYx}?SOEic0!-

    @Z0qKi^lyFUp7W#=!*HBW5yn0&y9|0JZJ$_Iu_`* zw;sJmj~yF8fzHuCS@s`0%D!~pr3--v$!&6az^U}H{;vQZdW2m@PI0_m)JNO_Xb#m< zA?clL-wDz;CLor*RQS62mr=E~Ha+no$Di#O2@3R)<$wKQOvx{wy&a)r7 zMxb>|K;kcc@{`GiJzE#`CVj11ggMP#ZU9 zZ)T$aahLl$-j6N(Tl*fFul6YL0S&#I^FG%{`!_xzn|2B4EfX?~iS29esuWq0#U4b} zC2yVm=Ih0WKgy}j_GK3w=+I5`c~sW*rBh!nP9_UZe{*VR_c)oprKBPKsFyNBtIPNx z{w7D7*aSq`8WMp3FSCEWt@*D>|62QHwyHh9VsWsH^0+jqzx}=Q7K9xyJlQ z?IX6=HU&>U&9=FE;Y@(S7b9cW1rqLl`^e&ZKloX8K?ybS|K`T$;5NV#kkQ({DA@Q` z{vf@)EuMIhOd?zDmz5bSWVW^*n@VhWmaPgf)speVM?Z@0fzr)^UHBw>;$~GQ?DviN zMbDy-3dIc_%y}pu?{=Ancakk}as1Oe`W}7s{ol}-dd>LVf(Dyvb)vFOrtGTdFF8aN zrXLx=GE&K9d~OX1LVjuwpFif-E1D~ScFA*VWGhCq<(kAB{TsOCus%Ga7x>l77sn4i z65zS1_1u)tOx8I_djNR0Ph(NFy$nDF{N`_I$;ijsb8zdd?F1K^UxS=`(;TzDf{lU^ z+Irrq6^=b};PBDbrz)*%LGpQ9ej9Lze7Sz@LW-?Kk=xf6Uwrw=$cDZ94^Ck6M)uOy zWXsO%H1go`rGTh#2;lIlz_3jrAN;k#Go$+-XKV1?C8XK=z(F<(-6yd5R-in=a5Jz_ zHO8iR!e+aZZzJn@|B-iQjmg>w)*fSHm1x&9MohH^cwi1MOrG#{$ux3oRsJq{B0zcM z&^yzg7!3gV69{eDREOXrwjo|50|xv{PFjn@M~^3Ct56MRdDQo04}VI_I`b50diT8_ z&7LRWoF!r31E9%3dru|w?VI(ePtHrqdy)^ON>!%)_P$3C0Rs2&+wjlJm#P;EJ_anp zVXoK0k?q0`R#oxwAASpQPM7TK`ehqTfoX{>T3!H}m%dp6ismVJx;ZcmK>rly}eBj zc|Uw3Z5O|)R$F2-_RNi{rEj%IjNA2O{j|6vZPF_$x@IhmlRn|sqNyqgh&at)Z7WAwYKS@x&Ez7ZuF1kJMzSpm^OOkJEo8Z!SXoij3d|yY4F}25jURJ#L z?(6(o=Tq+6vvYD{B3%(3Rsro<_N~lf$8{3}Un-~AlFMcwMb>+s*7-HMvy%Wc3 zuW)`q7VR>&r4Y}ZyVz#UVd!0i%*M)vlvn`(?*vu?J9h3r)Je;-L+UZUDJ9hg;7kzs zJ3B>t94>}?L#8BbXeBy(Iw~m94X0S**u+Sztas2r3s#2L3F(qN99SxwA640COcuv#p zN=&ZQ=MSNQYcEsA7)Tbtj`eMZ?!%KC+gz`1MbEC$r%_4C^IL7`X#Aq*zI>%THahm4 zTNx5JGUhm00Pdp)mx@{222Sk=ApD6RyWXKA?Wh7GGGoikOo=N$<_G{TzB%>P@WB8_ zjfE0@)8`uxYw@x^=j&-@Lr>x#K^+eU1q>);6U6Vbff?bKuUxIq^WIU;dj5(=5;zPN zS*>~#3T#+kM%=C)0deu~tpGT!Uhu-hc*+}Y-Yz3N;WvA;|HJb#P=_a?^Ac=ez^_{z zsHFMS$*)t!wHAs`?QPp_9QYYxzco(A#27X`k8;;@i}S z9FhgXX=222E}rzq^}c`dEJ2v^9l6l^0a4~LAW+XH;AI+)(U$z+bWkdc;#VYR>#fSc zbG^fYb1{J@4vb)Q>D|xp2ApwjMOy_LZlrCi;uWAUG-xP$cfPgw&;QB)eT>5Y-@pIscw#BLga+O|uz&Hp zKYr3YIv^uifOnjd{-D+}Em485N-~oL_wIii_^vYGZ~C{asX&e9rbqg-%GLh*SHB+c z`@jG1{)ff?^uPRn%HsNZ0%lj@A*H@6${@8?TJt%Z>xRWAAAcUWP=-># zuuL9-r<143P73_pU%$K^83BSJloi=@_H;%@yzNwPN)K>x`$AK1Tt_PqV$W|bzg+P-Q0YvM^VX3y>&bKpu^Zyc1(8v}1;R3!5q%Ey7mBla== z0lx0e@zbB}a=`We{RjH4tgZH7@%H9hR@whS)PMfod8c`v-yuax%o3BBDN3SJDO6D9 z?y8;+(_Rn8o?T#Jz`$Z*_`&Y~;D5mn7JgvZfFC%(>m9?M85r%|+3xD;uIa9*N-C9d zU=Cu=Nlbpft|PV&Me+AL=RWuS4OjeJ;oJSsmf5uUYqU_uUt(zDX z4%=k39j(>Ks@@}KjvPN;*4??`8RsMb%LV!b0(x}w>%gdj9h`p-lx!Q>aSY?@Wn=(R zw)Oehtjjb&bWK(*`AT2#3jwt<@$Pr<*R=v0XU=?43FW8bjI2%uF;Ihy7r%S^{nolb z5BbqEw-vaMQBGDf5E;1%iqR9+R-j8|jccCXnS&NU#E9hV=nLu^VDQ#$TXW*;z1OwZ z(@h&U7Q{>#7^ia`qQv2od-rd}U)LAcZe1B&&%qk~*7`9Hd$Y$g=;LeqeLW!0;MLZi&4BjE9QR`sG8$#5n$P@~&U=_4 z)mM9UEn%2!S^apDyJrHd#>kJJ7?~W9b2)dLYVRiF^f$lx#o{mj{7>WUfWqztBv?GG zO+b7osKWST6RcbrU>GfOQh|Tg0MNwg0`Ahko=2CEXL@InS(nlm{$&&4PXlemnj1qq z+8{ss-81MZ&e?L4O}Wn+6A;s12DG)t2iCMDpXbec{mfRo7ifzv^!Q&F~4D9BzVh)jR4O$bn?<3%^q0G%jH?`-!Zyf;NW8+ey^K;`5+%_s; zcmMQ$w3aOPFaEQ>_GTz_)~@mT(>+vWSUdI|(tZ%{8pE^&>R$AoiFqa&$pMl@>-*mG zvR4LgUV@Q(pWUlfW@H% zlB`{u-H{HrW)sj`pVJj@k*Rp-5s=R5?a{4q2T1NJIDR=ED1g5ynWew2d(m&!OZC%d z$#I!k9K^BTTSHlc^dLLJ^X{~E?2#$G&bA~k?AgeQ=0DYLJ(DaRUY$cg|2PCt8?{bfD9(3o`sT6{q-v~CUrr$2{=`oFVtJP#OYd~4cQ^ln_^ zKP6ut%O>5ps$RG~=T6sZt8=^ecjn0d;k|bzc>clre@L$suxO9=@7p~=0sV3Xup5HW z%zx}8vKugO4JJr8v_w8M$7oN25>WK+1da16dM2BVjvIb*O@9H#OZzcd5FY`|wbNjC z2=rb|p3~7PQ2`iy5+ETNArOrd0hPw_m?#t$iI+T2X86Y+zSs92Up&2QSHa}kfCLPE zQpV`T3+MCoZp`73O2ZwXPA*?5kfa~5=K%O_2dvPKbolIdbG}vbRHNE-0j>k_Os|Ct40qSooUV828W#d(rALE@qSzEI}?#(6iEM??!k5{eCzKlk%L>o`_ zTuDCDmgnBGs&C!KZHbq%i34HDv#DE6{|LI>r6q&IqU4T+gZjEN*R}W@?S$}*?2azKWo3q4~eP4ZHK+>D- zA0EQP50j&F(5K(yYgw0UIqksM=hq8RkZC)b4_{1hdF{G@*p2bat-!r}+dcKeMR%VC zb_=ZOMJ)+Ipk9j@^Wj?yW=@4?_w-y?#)r}3{^t%RQ)+uzAm23(7xjJ89nU`ZVh3(! zHwyX?#mgX2LB4>z7P@}z8y4H*o7>r}Y!PyEQ#P$&h_j+pSblK_Si7?J72BInAXq@Q zJ=ztp%?7fro3aV7b-dY0TMRWNe`0oEN?~+IT$ELxr4lw3ZN`i3yfWui; zvj~uFSY5_@dm?CuPxw^)jGgtqC*yA2Y(CM~1Qi7j3IaZ0QsS# zwbV@RZhC5a_guF)emo3jFckLTgQ@)H6P`F85E~Eff9|D#x}5`VlQmBy?@yP3Os6$yk!D@=fWLyFwpaxeuY8p+4tPy;7c!MgLimqd*(eE`A_fIm8{Edj*-X^ z`VVbtsgibVZv$rYMbHeNLxS35;eM$brh>e=s5#-{rI%lwWF2F8&|K}v%q4%s zeo6FLXMW)@cELf(37*Qmk>9pDXfwl`zL_B}UkstrH;IUAQb_;IV+b9VFU)}F1!e;vwAhQKt2Zdy7# zj-IYp*?s8KLyMpN8*>5F+>gy@z8}%gSN+nD z%P%c&W$uV%9F%^ZkDCl|Y7B8->>_`BYLWYN{& z$9Zt?8=FxnKG0?9Q)Ti@X4ZR#&V?A zBp2(CXJdWu^5vr5F)l;nb-shrrG@TzQu-44sQS{hz?KLlkkmTFP;Bv z<{@KqZN&di4o;(;KIIMIl~-Pg$n}<5v-tBr{=xJMJsqfWr2|?RN9K;fwfJC&18orB z+Df?0n}QmGZEloeWYXI9QDZz+I|f;-O7R&w0Ckx+qJ8Vj-d?j-6wU!y^$zNAMA=-; zdJF_}5jABTS{DLnZOrdyOu#WabRo#wYkzbik4C z_u-VoD*17+$sD4Zpz5Mwh6!Rs#DG`Pe_$?QynpYW#R~`arI0Q!u9tl@z3O62j5R&y zQkYucP>|;@oE_kRz$Qchk=Ga)(J8}@L9}EpEe5m&h{;;-wY5iC{bUj11c~d+D6C8bA7Th#jL*)`{)&K832p z%=_rK%Tr)Q{U~-j-*`EPoVd1f_7*cRSP5bPwE6hV$JV7q?vkF>J4+)t1{!vdzx)270LU`T3P70yfLuoH`V8Ll>(R*<9W(!nx890=RxJMV z&;Hrsum1c0u~MfaEj`6@Yl0rzQqrnK9t8G0#VE>H^zZs;byf21LZHNj0%do5w)P_a z0_mK5J>Xk&yjrjlu%py$|1-~IWCTXEFAPz@?z9(ae>TNKdQCDyb_Pm5cVO@Ek~Jk? z0pnyWKF05QA~Il{Hm=Vi&~o?QU5n$#j@QcN$N<$VpQ7bx4n|vL(Y0GS*}-93wm7`# z%pAO;?Do*%Ph0=847zf;7iG3ZHw^Hrt-tJ^Qyn^LZSa&{vm7C7!+CYBAeGE}{j>F9 z*D3=@qNMYBz%zOvpY<0M9J*ZA{^{g05JIq1`;(Qeqf+KQd-f!g*VKRfv&v7;l{FyC zA{h{qkf$=^?Kiq>DRb_fiGbegf0yC)!G|A>K{tkF6GvBoNHpCPql0usnJTy9C5K4= zH~;(p>_E(C4xCbP41}X)W)7pZ#lm#xh`3GMvNR zkcLjlMZNr9Pv;*yexkm~yB9CNaB%V72OoA|@5SWusl~tkH~;T3ewCq19FU<&wz;oo z$lhV_Ei*UU-+N0ZqGcaC<~)zC?YsVH=fmzW9!U~FvrWuH)kAxdpu(-^Q5ks77)JC8a{G%(hAExY3 zY=Z2eC#yvI%7CUD;{A93@cVe$p0LLnQ)5-J@xsBEW`EF_a|yH&11`?l3e9!0?y{3q z>ygFi(R?OLnT~HBOmut)5b`-)Yi&lCw0Y=%#p>oZ*91Uy4I7)IgR}q`4wwbtp<^wD z1cCrFY25Uwj8Jm_>79Gp$FgAqp|<6`p1yXf^CeW9ES)~UzRm2m z^#`tPUHXIW(q@X&NA9r^_U+x9olx*OT}ywip1=y4V$&qQvOx$o_JeaF$g`$4V|72g z2*7Wv^EKE<0wwnXX|i0(7uJ^-N|Z5xbbQAAfPJy72q}wdCw8t-|S$*_!6+bJ|`XsD2tD6j&^`%^r)Vy*S`D1b|8MsR`~T#{&wu{Z+!a)i|ZH9MJw6f-OC)= z2dgWezj^WV#gBjVZx)-sr;yXY1i`KV&?ypubz`V`%HGi^1fuW1$c|H4z z&eXbxkHmgb zfU!q*-jPo}tbIyg(os)f; zoMq8PfK|Z>l^vhe51ec;cY18s?q`}e9uEi$bdxs%P zr{{fsPtV#^h2-#b`zC0q;?e7g0)phexsjbkx<_}UCK{h)g%W46vb?F%fgYT8Ek z;=rt*_FH6HTn zM(cn_P9~36wP$=2`h2k>AFcZ^70iJ@!>);~q6_j1Cjue6mST(J1S?eH!wLAP_KwV2 zR&OjZH{uVzB0Ju_fTw^tmdI90UG&lZd#JJ?r(S}GEpaUtoFoYBh2 zLEi4QSQ&V0t!Te!;K17r$v(1+j=GTSgWb`0bm}tErr*;; z)(AMfvmgcxh@NMs^PeBwEIZ%(@r$Yz)kZy!7;NJg*svXAJ>{`c z9|hv$&a&!{k4`vN5;NT?IDts)nbxiBQL^^2D!}o_V35&@{kzq{*bl=Y$DeIY1S{E@ zA00Y6e(6$+&ag6+8Y2NT2I>sfw1*sxeFzu0jUkq=FTcd`;z;1;gXTu$G7^+8t4PRnS=M1^XZo_ zf4_J|Pou`rB3j6CniK8-f=S!&x&wPUxPf7e*={QUi^mSXh+Ovl?D7S|yT2^i5U5hd$p4(E%yV@&Ns zu0(JShS`yWfPerWcLEb`rC>zYG4^8#HYIb@ZlpbELMhA2{3BA}m=nLPhth5!h&5mo z*%4q9Wjac@^)$vI$c&YV>Z6n9j?XZYY@IKP9uCN#p$a4#g~VBnA$PQ{`#0|!+F(e_ zhCW%@-W>E6m|;K86)`3-Wd(6^%yC`wux=a<0@rD|qFi2#VG40mJ<0E8`~m3AN9M=f zF%DDYltBB(seL@UaWDi2Ly%$&$X1ro1xVaf_La3mXK9pq-Ut^0rVXIn>^;NA`?UKeX2U=`+j*<`Q>t41$e3g_zxL{@MX)b-_+~oIGvLx-XO6krAF2z47x7QQoUu`TL z@lOsPng9uY0swBv7~8Qm1=xC=yWD|`@xP1$M!s=bONU-+#d5t&=M80$J^~-NcJGfq z`nWbK#-}aCve&pMwo)x6%N)o6qCj20zUx2#@O1otzijiJ6Zlb~V!zOkXA6u9Y6^C2i1+oYrTjTsXcXw_&&66dAuC{t ztXnefN{*v!c$r8HHLZ;BH4v8^bC{OTWQcF=d<7@W1G)t=w2vXTf#l@8DjoZxYGd!7 z-Su}r-rC)-#nr9U>d9&4hDT>&f(L$i*TGQec0pYM=hXCbV4 zLQpY~C1=xG07q}rUod zjHd9wWST@vbYsI?>cD7nY?am{VK&YgLv0M--T`#%pB^E}f`nYD@H+ zx8F(R=A?CS=k3)6ZknU-nb0F+6h9Haz^sNfKhty2W9u-_EjR)c_M%(OcLLmW%9r!e z3!kkpSw8JQd841{*h|ghu>yq3?eQ?0f7qP6Yvap_YK#M7rc2C_!#0_}o@s-uS8FT4 zMduBW-gVY$dB{?7Ast~}GP>-M$|-Wt&$EdApPZg+Mh`9b6rjNwSk5&*4M^jROd!Y; z^7hRUbu10!?A`8Zoc4=MU)g%lxA>l8Z+t@&U+Re#ex7aKYyhp|@YI8xLgN|7vG-?2 z&DH%UbKGWItL|s63DfMwuIsN~(Q04nuQ~QczZ=(l2aOF0i3$O3L!zwQ)Nt@%^n0C(2LtT(9dnjz&GFg&SV5#2^inE zXIGhymHwu)R;*}GqIIRSod2FQv}mmKNJ};}VcdfD54!%*Sbc7K*1}pk#1v1&Or|n> zN5_$GWD*;N9^TcVO{+^(5Cqc3Mcak**>nKS?FF1=K+@5_d;6{VuIji3lU%7!Y=A&N zrU&*+FqE#@vUTe?@y9RHA?5+AwZ*86EWVB>o=`%Ja9l_QEM5>~?RtE9mBOUns{BvXbS;xzQ$gLXQM%Rz_ zH^M~{vv$^e0PfaI#fWvjm)+VFN7idW;Cu9)eSPuy7y5f}@snS^HGW8!^$hnz7d@wY z3}0o_qT8wN>T}~q(fHQ^dh{OKa^C7~&iG9S6!)_|G+oX-0Paj+Gx;M+Z9wAmith{X z?n=IG-;#6Rx;PkEhchM(*1>N7G%UYu)FD*YeiE|eM#m{N^vOHW(t4H6n|LAON_V&WU3IYQ#{S2T7 zs>r<7$6ofR1RO2UetzW_Qx$bKJqNHL7i3D(rz0~P2fG4|JF~uE_w1015{OMVGJ79Ei#&L-;3!2+xo;9TXx-DC_}VGmERtUYJv%8F%! zqA4_wzR#WM?2+W(_CQ78`pRVa(g#}lU`xNa=QTfa<@71%5LFGCuSbKg&%edHd><{v zw8V0N{pb#IuV({xZ=5X8L~P3a;gkr`fGcc>YtxDJaUTdurB*n+eFDJV1v*MYOL z;j`1yY1)C26Dm^apJ$(WwzZ3nTMx2p)4gz-i~@T#_J4D8{?e7?gWl%&;A(R~hwdlQ zaWcCtBm{CCKa@IYL&thTV@hCt0s?J{7%hO#exwqr=$5 z=7Y}MpRAwin$`h$YcJ8>w1kXi2Xlxh#+L3oQEYrCDP;-T+ADSiT#262F<|W6k^ton$<23RVeDh;)QHE= zHHK*s*nSIA3T$nT#wNg2!s(+IK1s4KzWc+!SUkOXN8ewu`0(A|7Noqe*jw@@JwM3} zVIO*uKV4$Py+H0yKRVW$6!-}%wLa{4`d)=H8k*<#?E9jA8K-pj;6BliIqL8r5Tiwx zZ2gQWmdbpt@r>EP`K(3%@{Rs=i{(T6VXhe?q>Y^K`^>9< z2)kLe2$VFNd-;su^zwQCo6Gv#H!bv+AM`$^oOfLq0T8&1p7}>|!{ffHIAY}}%Mcf=ns2IL<@s|};@0nncZ=sxIqmr@`~zJQzhV;L)= zU)f)P4`t)v&63!em zhhZ?_5h>%5@*M}V>tu~!c0GuUnb0!^a{!FXe6e1Hjy4Os)-t#@ ztgk>`|XSpFNVDF5hm8OA&)>KfE_%!B|!AtrL*Huav0Eu z12U%vRe=4K`mh4Wwmi9gv17~AWqWs`c#8UiPd{njWj`{ga~PvNKrjP*OKmMM{g!RA zXq<@bAfz&O&Yh1wI}~u0laoEaQ+^Y)4#W}pKABN`?D&zzE3dv7t(Nh9ZkbK4B?aSC z00VDQ?v%*kqH)U;u&YQ&u%OG~ldcm%93UnJ#=|SxbL+BG)o-;{e(~$yrsVXUE>a%9p;^jYpul~@BNpEZ09#SHHD2RbbDqzhJ2%gK?X_3y z8U34a6u(x6-|x$$6(F*{Z~gA~^$f{^4fDvtP_3CC2Dy|i_ z$hbQL4Y)_3%Rt&!min2px2_d@a+na{Q_GdT@%V{jC)*PTMXsq8)Bd@y7EX6+V>IQM z-4np}AOesLA^V-7A@HJP)*-}z?#Fxn0A|tTD=$62ct>XWOR$VTzd%0!SZ6FkWDskta7&^a3qp2HhZQ^OHkIMm9g)UfWlJ%PY}<64TQoS*%}Pve>BE&bzQL;C8)dX?(+eLS8N@VgYa^x#&}_GIC<0vcCaA4UTj zmA!+nW#rOH04jRNUbO*@o7~UX=sx5I{uMxBkfCL7$c*f#F;E#}14efLt9=&;K`&g7 zt}o}<_{#Ioy}UT^+`;tKngI0rS*G9g!r#01Ks;a2HX1&6_15D5Ugd)|!KQld`wRo% zXS^&cfQ>)~#`UH%vW0%*Sd*z7NBfHBNhY#noWp{G?lE9$v@8pg9A(%oLB%H2eILDg z%-jQ5N#?I$l%{i+K&W9cR+CxyDPY-p;E@3dn)E*b`i*@u*V<18aZ59D!ML`@OPc~o z{$~##U%dU!?+d_wK7TXXX71LOj1f@mYx7=GJe|?Z{2c+E{^X2A4=VxA?c0=@+PMA- z+R#I?gH>K6x))b-j>!$i>@vfJ3{H2(dlpsup8;#TK{K57wEwQ9`(z8cj{Nmn&b{A_ zyEU=)UI0dG;yRyk+}Lj9H~J+z=kso}{5RXq!uLA|t*>-V87HkjP>CFNQ14ve|6O3* z$fOHLw>|bx-!~ptW`jiY21)k;+9x}xpYg&ngV8g`(rweN^bBBV7hlh>^N60&S{vh1 zkIB$!ubUH^ANKIf1~jYVHhph4Z}vX@8I6&xzJrcEx67iToWI_)-}8>^{OR{cFW0!E z>)fBtv`*HtUuO(T#n-Q`#l)WdQ>k-5`F8DknU`%m=Y4PB8G1f+__M{qK-G2G8%NI` zj&2{%o~eafpoZ!X6*NJl>8EHUy?E=Eb`VRs@MzB_p{BK1&!K9sB~Spd@R>QvD2wD8 zuK*VgYu09A-|pqPI=^~1U@+k7MzW2(WpmMc zkJb}^$`oUZ;t{rn(s(u*Dxeo+PLDI)Sb+zC8NXjm-u{p223hv(NAzeu=4>y=ZbaiH zGXSQ6WcHKJvR3RS8%1xik0iSEYh#Pn@n6?IvAUppJVQ@9WO#RLcB#YdRf@5dH$&MXA9;k3SK%xYS=xUOE*nKyawiJ5S&y>2l|8N=1 z;4?{did=@a=DGiUpEL8Rxi+ zPHj97S|cEv%;WX(-5P*vfzsoL-p&sEaIx-7J4TS;$3Ob3#lQMr{?{|7EAHJNEnU{6 zXPhA-aB;i?CDDY6fhP-&PiErvvx`IgiP8W7KmbWZK~$^fPtKV$Q(@3L+qX${iRRFq zb{lBU-c7Po`$pcmm!O#}*lX#s@k0YA?IX!eZYcNHU%4-g?3v`R>_s|+KaJP-W=E_1 z^RlOS516(iK>T8BF4Odh4(1g2Srw1)Gme(^oBE5MwPdM+xBcGQz8R-#S;>G?X;pyX z?8#%TqgJZBdM~@TX8^|TUtI!At`ktYP_otL+BUrW+SkV3-gn@o_{#z1@l)Ti7q_yX z@X1rHpDOmZfBTEE8=osk!Nyk6IoZ@@iIYb_Sh`nGL8h(w4)7YCPunw8*_umun;+Zr zdjWJ9{mFdO?YsBQdVGHD(Ab@?zxl?@k>AJGxtlB_ZywqA>_fnrmw7%Bp978WB==PP zI2Y$({@t2n-s6pdtxD&Q9{EzvvN^PpdFS_UExz*ln_t?>$%1HF6~dF7woXt$utTNs zCO~rmcCuggxOP``039MgDoD(Rm6iJGCm$@Hd-?U@|INuJcDL3*coX1zO>36uYi+>U zz5d1f>~leghB?{w7cQJFn|FT+c(w5f1HtS3_p2q)JRSakcJH@G?|sm@PknCvpLynm z>GjO#U0tB*bU}daC5s$<;pG95-+%uPi&ww?y#k5*;?J_yqoI4}lUoS^bR{?dZ2tJ8 z&Ju{8f&9Q)dX{}D>;Gy2r&nHibpjN8Nca!`LZXYRn|=EZEI#@4!)(Txum;ii@=HKGc<5)Rp^#ncB5+IFa3UV zbT-5!$@Hxu=>g7zY>)AeRgQ<#X z&i9ik=v5HnV)NP&&#Io@l?{Rp-g)Pp#U~$s6iB|a@l=@<9SStKsmaAK;x?)UVn=p= z0im(UeU_{tQxUmqmv(gjw&wR9hA*Zs=_z!2x%p0ajNaQ+Q1r=g1??uCwdY{l!Km6h zk7AFEMcOm@;^Ycjz_Zw_r|hQ1|Gjoe+oX=J$1;YIh`AGQq7wnvV+J+`y~d5FpN-|tMC)_0b_ zxMF-VU;d}hd-u!F-H0Gt-n942c)~<;BOm(Sm;Vsx^NBI^gU$S>XZLe&EJ0X5Ef%57pZ-M~n1LEo?A&u; z7I&f^Exa)})%WC?bMy1s2!RsV5|D(TI1WlaS4RkXvRPakAfu2nFj6fJ&K^GWrIGzb zS-tf?OrYoyC9BYw9%KYPnV|Q)3uV+bdWlB?W}l2>`cFjNM&7G5763;eDce8*oHh|m zh93uA^!Di@>r;}Eq5GiuG4c`Bn)>G*JyM&90C|7_(BZsWR(D?f9E5PG3?WJcxaOLVi}c!e?LThcaZ+`? z_}V8FSc8F#EF(*Ws$($J>x{r?Bwu_7VR)FJ%KPz zc?_ma4ggbsv@c11@oLP?my;6u$P+2`G2a)R zr+B)T0L;cswX|q#zf%;wla09*Pa8p(Aw&?-7`?0k$yQq&8g?ICddFFCB=0$1$^b|$FXN1PxN;=W$&O9 zf=0BPBR;{a?hklj7&?4N`SVka*WrLyidG&!eQcER2;FFM9636!`%tR((mmU%_$0ZI zp!34JORxyI&@&QN){BJFQ)_3+W<&e|9Ceu1ea29Vk+bWbgo6-7;?a zuF4MmI)RKFZbrQx&)1`QWqm*T=}#9gJp26OUfDRu%9xgU>^a(|96fp@{`g|?%8LgB zp6eYRZ4+th*XSk8B7u6R)x-9Kk29vC=l<9$^!@Q?hiCmA^0~F|Gr$18oN*5P?SexO?*|YRWtTxB zi-97>_zV+ewCgfb9&0>%%N*1Ss3DBw^Xb~PFz{aLa7Arz$UXFU_{foX=FZ~fuY9GoR$b$u zu=VYqikN;5C{e~O)8-XA%7V)3mvzqWX;F&%1c1;j3; zYyRcG{1=PA|NDRFImx%ibos*7`qgb+eDmAiUi|K@Uk&}r6e6pCS@33KhW!BCt&zQ2 z>cM5^__cz^fP2{oiMI*D0;&OE-fwea%yd}9NQaxV5=Ouhoh1N*QyiQ{Z&`aX=t2C# zXlHZ@+)kxr|5K9dU{q~cR7v0wh=Vd#bAi82!Qbcw+6E+8@34#^RX+2h#fv z%DZ27(aD--$*|FiEQ2w5@YSz>wZ5Pi7C-*+kLox2VOb#^^m8#MIHwfoGtP`r#^am` z&IRyFTcfdu$h5}d=T$3JSGi8Wj{}m9ZanNUSz+FrlJ&+yMl;yi78p_XSl?_ft!aUY z31ar#FV|O(zzGESz3XR<(xL7%_J!Fuj-}NJU|Z6N@q3Pqa$mg=?U_ANCO$3b8arCD zpI(o$0MVR10sKMv*$fl#n9Im-PixH4E_;=nnp<=)`?K7~Bz^0)x!$;Rz-1XXeGiSJ z8+FI7do21L+p2fTzR`zWFr(_HK+BCdWVURq2VEw>yrJ`0WQmih)?tz%nvR3J$OMilkiutdYjz3RR3SBXbr14~ z!)R>mrn3QwN}*k=J&a_A%h_F8b?7q*Akfy~M1U5+gyRMjZ~z}$P-eGaF8)x$3rql3 zx5CRHckA2q+L!jAsgi*JF+F0Z`YAnk=2RuWK_g*^q5w#!?~?nTi=G9I$x5wA zPN%odoGS>iS!E--h}Jl9_PlEW;v7x9B_PxT%5ds_U47sFHV5t3WZxOTL*Y4p=xFTG z#xDpmt?XLU6%I&`9`M}KUkHe9U4o^%4?kdOLo~(qGil~o(XcZ??pz9t%$CwNp-0bH z_(|H+05)R{*i@uH9H0kuotwxvhNam$U=OIF433ylMRh@-fy%m9Z)bG z+&zKZs>95a-(g;CMj6$ddUA_jqlYiK!0E=GId=05_RYwh_G19q=DjI9|6bk*$cqQ9eZo(vTfIZEe?9zx%WVR?wa~SSDgejd}31|;pL%` z)vYO=0PK7Wokw$Itg+F}0&_P3N~`WK{^qa#bu7V$URHHE|KzPZk|XT> z>WBI$w-4zt?GUYrwY-(RjTTe~oR3bHx+PiMXY?)_QTcSMW#0cx_Hwu17D&m)r@LQz z<@M;izTxfNvQ=k?qMtgp_~_l=G=|3W%!{>My)eGo)~ewoX#mIgx&i<)>;dm90|5b+ zYqITDtk~W&%P7yT6;LFejl-CpZXE{biti-deO~K}UHOFj+Q%#HFWENjFayOsLq;_k zzg4A$gc$4dt6%(dv90Xp7hisDuF)IZc`o!FJO9be+eiMll66{7f7ZQfE)EapTmpqmvKvf%cQ56 z1P1_+PvJvkNk-BZAJ z)hZe6wdiS$+<$4yB@fYQo6nXZHzoO?i5tnH+f}b>*Rq_0%H)z++0SgC?s1o{eq77X z0`uv1_JR4)8(Y#t(*^}?nw$1@!Us~SUa8-ArtTq7HM0k-jk)+yNpww8@(Dq3NIe_~_HqOQ5qQK^3CG29CWgxQa;X`Gdt}D3u z@Mi1Sy=P2aBarcA_MPfWb^={sUD+cIrZri;*njYq(LVx1j|+^qrh;z9A<@kEBo}H; zBsjIP`#mVZ?|1LMmz=-c{x194<9R$ac2Cdnuk#_S#rOiz4^p}_6>#~c$%)mgw`Dgi z?W?=DmNq~nw#=Ac?wH5>+y7gfnOLMtF}iTspPaLhYBHhTW4u#T2-a5^QSL#Y_`cU8 z$88M#Mm6{A8j~2SWWMhKOqk`fUE}-X$n@j<);-4{b87_FoAYD;duXwj-vzHe~&X)OyFxpJUx6Qp*^zqp}n-~AdU;c5-o*@{Mu34Mmyz1^^_kkCu z9ES3heRuNoxtQlh;Ao54bElu;EYQ-B+E(A$T#*6Gm1qqZ(KD6@c%-~@`YEqD7^$%< z_Y~1ZC>IlKqQ0w-9bf$Upa1iivULB58~}1RMn6yR1&#v781oneQHqdn*t+aNemO#s zQDH9_4^M9181rl&huIt`06n8NCp<+4Wo9K`ILcumWS=gh$cxhBga~%17_gt#U)hWUInHD>DrHi( zDJ$r+BZo$b>?srD(?cJ3kWHmsDgAAMj|Yknp9%m&L;y8{T_11MD)NnzjSay6RKP9g z;9-J8c^6PjbXjC;Q_dnm3S_sIoMMqZ&fWg{m_kFTL>G2x;r_ejwnnqo>O#PB{kv z$uh_L#;->=S{)$!K@tD%Wsh#?`5BnW870(4A>#Yli`Jm|*%S9?-0j`8uU^5IDsMa; zjR=k`fwhSc_Z{mvS+100d*XM8qES|McL0L^WypcTtmY4lHN zw(O$>Wpkdp_<4N!MeS`4Cl4-8*{ucs?{OEwDujiBjI<(oi z7C&=-(GlRDVIqL>o3{gkkC^{AOUJK+d7VM^jK60AP z^3qGqt>8>+Lq=&$!>MI>S}QG7u4a6o-KN?24y3a_FBoug@%+JOY(qgRq8^ocVl zR%dkGN=|)VWC&|%`he_wA@&%2vRA%1<<3!@P2e4 zEB8+Ph7Rn5GHP^)CO8?Kx;?x0EuMewg~rq&w&}IYWfv+{9V4zF%+1c+@B)_U_rGj^ z6rTi20?51d=m*&kg3!h;gLo+;@HBbd_NsK!>h;1AX|^SrR*3z;0}!ypv3_7baKtS?>D~gKKJGb3Q)}YL}SmE zUGu%~d}rqL;Ro+G*XSldK`R=WFaj#Tv=?&x4wP~F&;RsK3Z@;c$NtCTkk||5g(j`_ zh030^XAdVs$yR#Zr2NNL3+!kOr~h{2mEAPingyIf=DWUf`dUWN^Bf+sZw`!XA`Hw2 zogD%+aX)iNiAC?pZ~M@L$Y#O&0aeU-)*0Y0L#{IUXzjLjM##Mw`41`ao+qmoeGXs{ zkC5m0%Y4Ltll{-0v!FAJ_P+7lsqI)>*giJa{s>~2r$B-Edzp*xwCnxbUwhPjMvwJf zw`9}W1T-L!+U0X~@6P88dzptmmwkUH<9x2^-kv+QuKOUqxxa4$-j)I3$KnBeg7#(( zt-aPqex~=PZB1Ww&tX8%H|~~c96dK{Pv^Uh)jrLbqiNZ3)*XO4YtUXS!GwN`FFDru z*kNAU_R(#ZsvcP{aN1a9K1DX&JKdR0=6nE! zr>^JX13L2Y0&1$M)|IioexsnKO34DCt$~24*V6^D=_rmU8&Ob=u36f=S|x9+Gkc%E0`xPGZq;|~BY&uS^%*`{z8|0v;I+QWOYH_E z%K$r9r5C^T-9KLZ@+bc|d`$iUGOdl>CyUUk=b|N*vgizI((c4+K}LlnumS{{XSQ9MlRtRrN%+09VjDpaI~DL}z}iRExPkV)kH_$|)^k&J7w>9! zqpIs#b`SrvOKQad?yHd*^=Wjjt($^MS%36FpAcn6XsRhstOk0&L z*@lfRcCofK6A*~@ffHu0@m0T~#2- z&CW13>$=nhDwJ<0cd|2jpWbsW%=is0EYOmjfzPx9n8RnIJz(Lrz|Y7bUaOM0pn-K( zfw{dzvd=#KAigY-B)a8GZ`)CIY`k>p__@VfKmT#Q)WOB?fBTE}tw7w%Urm;58T&?` zc0Lr_$l(bm^0gcQ}}>q)5rb=jgT1Pr*;57p=8K#|;I3oi)UMR#DO0?S&STf$@R*4u2&t z;W?ik`Yhit+^P0DbpN(J`?CF)<0ebU%MH;4fSG-&P06$S4@{;x4B*%Ay)*j-Bq!&z zB-A?W-DqQf*v)41Dv`xmBIc{VGMe7KXK$IhRahm*mSA_jL+hl1=UV&4KX|r4xZnef z=U#y+ty$QgbkO*6@%8%F_v*bWGm0e9F;7%6g^zCre)C70dNlJzK=1m+!EE}I$-U3( z{Z8-Ps?tUJ`x~_hnr!&inr){#egaj||K)<=_#B8oZ9Af!r1b#N%X5LE$LNzRp1#+u zp*=#sw^qQ1B;Yu!p~RVZm#(lO5`rFq_{ONZVEfZ`e28AnLDkI#pm(n)3SyBRlN1rX zoV%5;-gRWOWRq*{r!m1nw4b5dBr$3wbLV!tCR$}%u!s5idU*5Y{H|T;>u-FuH7`h7 zWdT1ch*E;AXZex!FEM1)Smi_uq@& z0^$p-Y%73;hFQ0>evKc7bf^6s+ao?c7f+C@z<6tRUhlM%PVZZhDwZvMTt9yk_ssYnidqE_4&G#Mk^A z)s171f#=l*XhGmNS6X0--axzw4q4s-!MdO~`@wZ~nFHxZ*N?&PUdtQJ1m`nk7)qoWjb7*GMQ+9U( zbfV`jX`?ePH<#f<=%4P{6QHm}EzlSh5eZOmX|zUcjr~TT1je`DTefW-XA2V$s05CZ zC(pMAw?uRMmx2b8>F37qFxN3Ao0IZgyJ3!{9n48<_$BnP5{XrqWlFD zWHDU`AU}8hOaY1<$Qd!ZKnD7cUz_Z94ke>lH2U{{_`{SQS*r*2Cfb}56j9rda3x@r zFX>}+Gr@_T3C=z1cei%YZuB;QUP_Y#v2ktAV06OKCsDSHh*XLK+ zq^PsKkdX?|cPNkjXXI#?035algh6mtbGtBfY0v-chd*5W@CQFk5pO9o{^;Uw{=?rc zKK=BwWzePjef#TQOPL*991DDt1*3##CeZ&GP%ESB`}I5P_8CZ3Ni-(F*`xi&ms%Mq zO-+o#GhpAF%Pnv;a{ug*Gl;IX5)s*$-0u4-1m5 zkR39#kO0jP@ND$P&;bB(B1J$|6cC(xQB#0g-!OIr6n1cO#K#~6C5Y}yThyz93IB$O4R|GRy?0(6K6DDBD0Y;ZCsC_PS;YluCWWA`3( z0BiC=*2BI%&orqNX%3vdRGND*L+bRYlkrw9VUlrE3fOq;D|%wcZYaC(i@P^EOzU)k zog&UTaΝUhAo?$WukHPcQcG%F#-Gh~&y96)3z~782vcSe9i_E8@2@lLFrDygiaR zi^tISmBw)A!S%)8{j8pyIV{%$VgA*B{-2lezjblCOidu_`GDalCYh13%FuaOR&lL1 z^i^Mibq_OYFQ>FG)F$QGvd^}U%)Zs$KIjBDa(iRrTxPEnq^erxiPp^C-@F$1*E7i6 z8yT=a{_&3&e|Yb`$rP5cf~5Z4U;Te4tM67>aP}COcs0Z4dI0mbZ9Aij46d>eWOHi? z6QhruiaaxfYg2IOX!1k9{|?Mez6wa3NR~62wnrC$q9;~&u<=>_T30NdO@5Pk4k^_m z{$6XexeAbN&Fgz>zqRxr|6J{q3D?p&Cbz-UIue&EP$mv3YHAAj4NMgj0R+ zg_mZGjEdVCC>(U9q5v3~27tb#PICsfEDZ;;{fqW|4CKZ}_KgnhXS&s?@OWNz0ps5I zhX(te!%<}Va}-=_p0QHjmua(fklmlG6;wNW?re0ce`5sO!k@lUulo2-Fo-_7o2~)A zaZvQK?B@ag4glH$jQFY*z8qgm%K_&_B$SWEZN-KqaOz>?|pg{ zZ7K1l*U2Dz&-uVm)@y11H2>y9&XQ^P(Og~YJ6fT!KV;gC;o5ru%#nfp?7_xl>F8A% zpU)h4X6%AfWsm*#H*d|^8uZ?`zWMFNfBC=qUstBSyZhc>eE8lU7KcB2yS7LF7>~&u zZ$8nA@iDX9w2=jbe3`}CdM)`6DAybYU^e$2-gvusbd)uS3KsY4N7!4-xwodPm&2X% z&t))?!Q0b%&b=}&y^J%~n)$CxCX9XN8b*6Owmj^zH~Mct+8pr*=@MY%*b&(Xo5|7ZWdoYyve$WEUbVD$Idkk80H+|P z@tdnXTYYPZihuMfxiRq!n*mm?X(Pq#M@gy3wFRu3tPKKb7_gv$) zn{pg>@7mjV1Tw1h$T@tnK;4O>htf5da^QBB0a**K9Mx5;YVD95))qnrExYDM2_erv z_uOQcfBexWtzO9&=>|6^UpYQ4S@ztrx3?e{zSy=|R@~;~jcTZ-rDcUg{}eISyQ&1c2)R~)4Hi^jAwK2z4QB< zi{HQf>-g*W?B3-wWdh$yR|}r2#4-=T}y@P0<8k~`ePwP;{6591H#(6~m+3E$zP*bj0Du!*lGgEtUsOYUEf zO(cJq_xLI33n1PY*kAmV$Fc`K6MSK+N^IF2_@(O88nU<8^6VB)@4mfzv&B4j>C~0v=*29C0mXoYWGh->D|t<@wegvUX8?-X%YaIWO2)RP`#)%I z7Im`W3SOc$)fUc?S+{ZP;=6zLKc7CW0!{m)?{|Oq^Aeu|Y}-d!g=^QZs?Yb1#op(? zR@PyMZ#I8(Tid>|Bj`(+m$6s=YZ>J6v5fb9&%HEHSk+o<2fy<9a-{$0C z^c!dEZ~o5D#};{d_r6-Z9Pjx8IaS6U*uOaZ;fI}P@F&w|W$U&Q6k21n?%WCi!~F-p zk`J|darFIPE`D?FWUVhgPPQzAI@XemYq!QH>X&pudb)LTpFW!LJvv<028`TmjmdQh zSbR=ZvE&LFa<2dbx}h7`bh~%%&p+JVnG6@RRjL%oKinH=g$JyUEc&bYGrRWeU%dT` zUkt!bK9K{0{&*7T#5Vl=IvKC*Ui-TX39EL(9=lH=tIpD&)NwOnj4 zqKEu+&r=0o^gu_0vbF)_rwWA2#HP>hm$-(;o~$zVb~e$4^PR&}U=vpG>tFwDWbN|+ z^7iQT$-|9xV{#%e?&7({uKZ2`#P$7kptcplyH_rptDW19e6A%u-ff?Ox)E^S-%!h= z!xPxqd*FpIjV*z=>A(`NB&Gl&wI7jqU_9tUHNf=%ar)1puk12F*&H4mVbei$pwG2> zv?qFR0~fDn2eM58$@u+Fwm05Xk&Xw*o;8yd9DQUT0^8W{_+BNYN{}th^L znXB?NJRwT2Y>fw6h-xfV+;VH{}0z~$xs{zR9?q)tNyy1z) z%f>w4+Vbc4pD+w(6Yz@!mg#Kv@1ajWo;G1*-zqpqH{4Y)V@G}fu#=2stH1W@SEHHQ zHs#mQg;Q;n-1)E!^h+g6!H^`W>E#XBJK5f_q2E0C^2j^ExRWPNj7@LQyQ3e$P=O7> zxvR+p&yp}Hu;k37t@$Q=rsF4$jeozRDuiwE>BZ!(YIOTRZ`}wV82$Wkd4`7jm?Ixv zKk20{liteiKX$a_uExTLbEvjJ;@7_QC(&6lsI`~ig^uZO|1J}s(pM4PtuK~UxzkRS z&)92dAvq;zK{nFEl0L`ib#{hw<6h z)>XnOKS$6_yBfdWYp?0P^97q!xk}6ftg}_=0=VGDXn1^=Wb)Yh4bXh(B_Q#wD(*h| z=)(yRImlD{QufUr=DxhG}GWn6sAR)lnGiw^tyn4p!_@?7F20!9aB^Z;8 z5#Unkw{5*OfNP&wu%)>04}$AMTi~DmT{;+FzV99l6u>i}d)F+D{_a1o2JYYP<5vIl zyyZjYcaxZ#yT;Z2?Y<*nmN&^*@P5CvQD!_ta*r?XGud2S;19)o{`{52kKQ|4L}1w` za0oDFCq#B`UOzzOU;p)w>cQCIHm|&qaJ?1iTn66@UrE@ln-TyHG{a^~%wc2CU%ax| zv$vj6fu>5*MVmDK-?{&}4CnP3uPL}Zx?A;0B0SG^{i%9AU%6C?Gy;EO&k*jKN;e-6SC0WvROm%fLE4P)>QG$>mAMZ$qHznhY`KVxvt zx}$^ouT^?Q@oM@HzyN-Y5ep1$41|?dE6a#qPI5Ft!9Ujei?-|O$7!Q{IGAYaRzjCS zv?S{GxcB;~`*h$(V3iUhEl}>QEIOaDvbD@_QSci%P6Wv~{|N)ei&7avRr&ADN>(@) z7zYW76r001hba~%rJM`E=!Fak0@R@#8{6mS%Rv2$AAUEd=JPVy>o*>g5F~!5>sBrH_Z;OTlmouh ze{g`>_ECxL0H&?Y>~)6u3fb!cCl2S6jdJ?biN&!1@>8eIR?hnFI8Fqkz{BytaR=_n zdcrq^XAkM$B@N9XrS0|fq-~w76*jZy_>O?2ytWsSe&Ya%Kz6^?*UNG|-@$voN?0z- zu-%IjB0Esk$;g6e(7HNs3FBTWJ3TEma%Wi9{TmA+eFWx=(JMNhv*M%4lI&viHT)YgJQ#Ngt< ztA*_UKbv`D5{Qx2=3!;L#=&56=?7yth6{zCxphl_8#`R0O= zzklD(?t6BrJTo+&3P7eDSI3t$h&@{QFn&ylD*f78<&HJf_Jpy1rhR7Uk^gf*>ba_i zjvndf0MPbSCePUnIN+!KKXl}Hl|i=_KluLF@b#n3!efrt7fw8~)_U}m_)F=PvKc5VSk3KHw(WLM$$|kFx=&Xvn0dq2`@Sn_O z{6*gTm!mUIW#3sE8qsp&!xzSnzO4-hwgnhNe02^wph88(=pS;w`?(%H-f!(?u@uo* z90(u=hMp=*byI7h0_k|!mDlR?XFw>N%w~kO0Ygs%*3?h^UbKWR&6N}3`U%h(T;IHt zp>53QW`G>H5?<`OK5$WAiL;v9{2VQN(N#)wCwgVvxHKFxh+0a7wryQVuiqrdv; zKQ7+*>Q`I0jL(}_7yrZG{A~_S8J3L+Z+-NHS^MRC|W6=9ayf8XNw^%fY098w4wGu0S3n1 z49E5YW<0#pz*+|~@t*qqc${-=T;vwJ$~*xK)@yX?(BcfN`}L>iO*@#baeuTj!5?$$ zclMC6&7b6n8Oo%3wU=ladGlm}w_UsU7NC1<@$T<`Q+uw<lX1#$7N>UwA2iSZ4)o(<%TPGXMc@B452Ve*1`7Xz7 zM!jN!py$hGl1U}0b?m~0(rlzfS2%jC76$&Fj_2^(f>W$Cv9k?5asw0m#-k=BEQj%V36 zcwJjJ`{gxb%Kl<&v)9lTpqP1MKDh7Wk3Q;5p|_*aed7%Ksx{vocw;RjDH)>#1yx5^ z0|v+yK9E)@GMSCpvsG!G3rKu2;P`F_)sY+5TN^Uf_|Q!oIP)}*X$^C)QS=@;Jvyaq zY>qsiZe9Ceo|Apu^|D9N!iv_<`mi?y#r1Pvf`i$Q?c3OWP2}+c03OOmy4O0XdYK_~ zjSTjC**ctZ`WH>HRq0h=h&`7z?~D%X56pX|Lm6-7FKrH7a0t1UU0XH>BxF-Q9<6NH zyuC42RS-?Gx%gcUMBN;yuJXzuo)%B0tl-eaDpOVDjZ7uR(gP|8v>4f#?RDa_k7@_e zJ@dKr5Z=6P%i@Dye!TdbAN@@>+K&9O5)}fW&t+S!h)oZ^_(BIr{$Zha^|pEuA3OAZ zfu!S$H^2RzNTT%!hVh3$%x} z_e}xD4s)bWFJ%{A&p+K;Pik|$QPr)s3R;2e?;uCCXd?Pqvw2)|zRFMm!>z5E7K+Yn zktwYO-RW#_6;3a`^xCxkI9A1~0|Pg=|3?mgQb48ZnE_q%$KpHmD8tlVeUTg-aI5va znXhy?a1-rHf|1a1{KV14H@^MDvQ=wunY^%;$B!I}hN~)Wl>9;Zfhny^GiDmk;ufg+GO9u?o4O5}j_t|u0HXX}uAuGry)e>k;@B@w5v+H3w zboYS3_j_hK>Egvx{XG*V^y>H;K=uK;o6D2W9UQRyVdLWmKV5|a8{zoL;{&JyE0gAd zj#WIR{{X%4p@uaHToP8uGqj*8%2@>3Cmjh4C%f^ZDhTItT)oy|qb0>0c>cu++LDbY zdk&on^U*t63s9}p?nG-(MBjj2+3YID$SQ{jOYl4y#zp_qdCwnwKK)Tpsa07Qy|r3% zs5d~{_+>L2iv+%9MRR-9IxcO$Dqf=PL!X`*xz8>W1XVJRocmcHdgU;nw3gmhY z+67|XtlFBsn${mZ4`cG#JR2Vl=F~k^&#@o<87zXm*BBy@wlY~!fNoPkMD#U&;f*rD z89+UZ&LY1hg`q*?LSt*gW5!PG8aDZ~uk4pF_PgZHm0>K}y?Ws%0Vw(y zI4uEbZTlp!^C0;oaQn-*e%+Yw75v(nPj+C&t_7C4UyIKL)*YyMKh}pc!3Li$u*uJ~ z*7TjqQTBv{H0KxX%#ONvwiZ3S#>W!6Zv| z4gQt<;JfzOIOsruK{kPL3Q)0M*)k5K1(?HUFQ?nRw``4;U08rj1@@Zs)tdUrZ``nL z>>B&Zml=9!ZiBh4U)oXb!!8gY@C^8gy%TsR2eb+%Ge$N=1LRv5E!NkGLju(*PX2%j z5eZW+tui;73vqFO-MG2-LZ060m)_%av>r40F9gcUl4UmPgYiAia2}k|fjvPIl;z$v;nI3oaEVth=qxt@8-Zk_rqQ+w5hqV5@?Lx>TDwOw&H1=CnDjXvYIZa2;jJwou9#!@%P zQ7(Im$OEGNE;4UVL|MlevtaFo$R_Yjgy&j<{@&dfH3R31oMlmW_XWt0quM-_Wm*Hx z_c0-bjwx1WF+nY{xvCj{12GPZ7Q26TJI zaG)4P!Y{Vp4q;r^`m9U<$$S+#=bT_1rK~p#PMm&tzEg}V*X`iTZ-4V^8JzVRzjS>f zwT#fo_UN9=3?$%w&oPqmo&g~f=2p>I0>JC@^A{Fp zFV;IP`ec9)(BR(fC))I~??(=Q)>}*A##m!O;rS`qjV6^yoepppm4EiY!S1ImLI+DE zH0J=F0w(rN*3_f#u8*$(`8NV;IbIB6b22vykia(%Z}}g`i^6HB^B%yV2aSN!W@!Q# z_1-dvO*u5%>q~e3{KXv4&l~D8ayj?Jiz%EjlA7v(Bh7_SlYQsr%Jc>*K?f)&RAMm#J{(O_m^{9_BFj-)c8TpK4W5Kr%rop3C{!CUK@i0 zuno*x?g7~`lkA!{bqqagBVc81JcQvd!`pXc4v7$J@pG#{3B|K#@1Du{@G#M5-vQ>2 zvye=oB&@l;Y?M>-zHDU?9VRcAQCJ{>7C}kO=#yb5cs2?%BWkA-u&yOXpRASA6VZpj zh!QkL*V+uTyw5=Af`#q5z}cDP8ly)!Eg8=!{NUKB#rpyLdw2Y7@q<79<9gxh%~oH= zWRrjy!x#^)P64NdGW=^Jbnsx$%`hv{f4X3h5=+6#SHAM<(2D)%kbd&fhXcOs*;7ec zGFFM(Cm($peVob=)Tb{1B6+|lICJh|W6dawh8fp@t))Yzg+TisQP4XavKu$r2iH?tSvfr-9(-s(`q#IPmPV-7`LK?de&D>Hss0h^=L5CX;^h4?kJ_;Gg|q@vZNF zXYp3q`KlKVefHS^28RwEnV!OUn4_sw-QmdQA*&w^(|dC$XX{KB=}Al$;dh6s(Y<&; zfC1ezEc|O-(DSfRhUo-1jj?r-Nzm=*wbbFQQFJ!8Lhs53AlKF;KmOSte|rF%|L4E? zcV)X&lG`&U)3mAxm(GsL(Ok<0o)7DWI&Wg(N&}P2&yidF_ zlH=-RN^dgOGWaHAvg^li@;pr5%l-T)pkb{BB=08Vk3I0-^eazx&|ChZr8UvP^XYV_ zuIr`$MC*ah@TScF34C=QeC1pV)n6B%s7*sOtv!OOun-`|rLxI)J>n zTA=DidzAF(nJc4%9XV`TY+W0DyP~ysm>ONuK*vZmBlCGIaP#S12XkyI6YhHRd!!TS zj-h38&3u!EBZqn>+3hdm){U0gGOc4gLN3vBlNs4Fe9ka7R%l>SqhUmEfRFy5B|J2s zzUOz{$avQ`pxK9L%>7nJCuniT(%NEdV=k^YbMr92_e=LlZuh(I4SlvpZaH?!@*)@Z z9)o=egvxHA{~p}uv@iFzTl8Q5i+)Bo^h|o;Zq)^DC@b#97xlip5WtYG@8h9U{J@U3 zo`8Rk93U~UW@)QNZ`nf=kcfun8GV@??+e|#^=#~De%kzuVcWgC*B3eI*4iFWe*@0V zIGal}?mf?DcPz*8*kaedXFDA1+2nYowgvQ_%BI|w9kDSm{Azkih2PvSKHxlXnAsGk z1+V(fRX}KypaC3HvAU0iywv`5)7vg{t9*~W`TBpf)$;bQX6JX}w| zu&>xhfA{x4n!f7J){%rE>yvyw_SvVCTp??eojC2<8t>Rw=_=0bl9uVZzJul)@lyfr z4#$%SV~^R7#*`;N0aIDr0?_u=n%P&hXKk0S)}|kw^WeZ?*}1al%~c=r`^ivyhqoqK zp)nZ;9W?Y4&!AQ9jAT*XEJ$chewXEE4+P_Y1LFrpQ-W=P5^{QV_WP6V|Mi=v0=&x3 z?2llXmQ(;B^fPu%-#0hc$l@Ql)*4KLOnOfitrlAwI?Vf;w#K^B0yCngEnl7f&zV`j7v9 zsuzH%kLTAsxoyt`%U1Us_K_s5rK~GsF5Z;oyS4U%^w-V6H+xNXY=|bbuj6xVXzlU* znsmRavoBglz6CqwPX5FCO$W1sY7v;Pc{_0U{Q_-*7?Q1i@{fNPEm>PtYu6V482^!* z|LuSKe_H%k|C|5q?AwhiXBYpk|MUN@0LGTZz5wBSSIW-4e5TfdM;2e$gLd)p=Fi>- zR`u4%lT{^Y;Kczl+BbZGuLKli5(5VXgFL&QUyX+@4QT6zz!f$NkOaN(1=vxSTF*1= z%kBY?)IRY{e%PTypDe!qtv?zN|Mc;r(cYTH2k*bVc;g$r7p*+Kqio}7{bV-h_HD-( z@BH@U(DSxH+BNamnUg0*w*ft;?{~UihUS9@RYP7=HB&2x^G60?2U?lOi!Z(2Ue)vX z!r8Ief&A7T-JkD0Kp7C}$l;GCz^Ix<6|=tUm&Jas2ojC1v)fckCX2wzwTQf3v?0HBh^@ zf$3p$Mt4O;URt@5C)ca6Co}G8Eya3Mz1uqATbRlIgB6h^(+^kk{Yre5SOatePM@}e z7~p*OZk6dfc8_k;%FC|TZDALi$wvM>I`X$a{UkE zOYA#%c&hrJXUEAF9RSY&lJjw$ne^zI13}|i`XgB*lRJ0r8o%P3I=g&w^0WMn;*R1z ztz0B)&sRVK{0HtH%Er^bbTnK=3mf<1`-xxtW#gt;GhAa!t+V(f)}DSpefIQxm#*2L zuRsT~&DwZKh^YEiRo?`LKH6&Biv|uMMNhhmU-wOPwGQV_UnrKa%ZabnE9YB64KJ`= z`t&Ozxf-s4uhE>YH)cGloeLdE&-ci91@;o{!)Evn{*I5u!1R2!ZIp%;ZN13N&Yk-w ziJzA+v|*QEO~Cin$<(HJj&5LQ_jZoey}J_n6*`8U;Y0ozoQdq&T?NQhyhdSfN$w<< zPoF&*NPTGZx8ly-`81bGHW)XbkUy^2(e7m!l=TsBT+6rm?BnIGWrb8r5V&@rt^Ky0 z*}dXJ@^Le|tK!}k&+{3*cemt-b>Uk}5?R}A`Eb|s?KW-6CSN?axOo12XLucooMylD z-6R*C*@f0hw@z?C zX8~wy`r$_(EWZDvUrVR2UA%JQJJZU>{LI_SI;i$HH~#Y2jC}ajE7Dc*IG>6ihqjVp z+DUyAuPyyhW5$DZ9BFQc4js~NW$DzT7{YmR%H!C%+4$=QkWko0Y|XpJ{Es2_`L|=6 ze_l97bCJ(_m=zi@+ZWxZf3#LeGjzY6+k3|0HGxzABCMB*TR8WdN6q*8z`wmmaAr)d zaxY&R0?6cCUp(1;Z1D$w_D)2oMaH&-I^_#Eb*R$%0!oB%1HGG8R|WaS$y1BBI)KfA zoR#QKHOhs+9oO7UxvTc`+R`>$PNB-2ZDsUTC+fksYhMm>O54^B%#0yU=K$){Bnv%r z4@@#!!2|~gz)GQ0en1P#czu;L02hbd06Wi>?YH0pZ0d|K+Ri)+8y$=V6UyMssquxV z>vI@5^TV(y86b+FVON zGMvCPp#7S@BYQs8lz~Y50?4%%a32aDcwzgH>KG?Kuei6Y)I+ZewVRNYX4tR|MpEE&^G0jmoQ+5CKRqm8&QWxzoXjh}M$7wE z%37ZR`kN1jTM#scgMsPu4?p?5dp;~HyKiW(U!MMf>(^O7>$bUSu7dDrd*kNST6O5V z82Hk70E*UToh@m#rg-Dk*9S~To9QbXzoCPk>zsJ4Htxnpk{29FYbRhLPk<~A8!j9L zRm91CvPjJRL>yiP;1!pZ-M&xd?1gQRIS@|2^eDKkGDVj`fYvXz5 zW(%KcroFY1kn#Sst!NZ4o?ikj#JOiRHb9RpvmOhe0+xD_^;y8?^7@H4qT>OYAtz=1 z;{EqOm~BDJD+{u#jzIHnI`>=x@XIj?+?(w16@};}r~&v!r1>m086BEy4uk`n^6R_UZ=>%2Z4x_zXGEC(Me(uqub6z=iY$|X7eu8(E2?Bb$RG+|=1-%kv zUv;oCngRjI*{0TdvecZ)WE3z@*0dz@qMx4i-kI8uT>kg}etPtuIPqrs(%ArO7T2mW zK<5?lsP7-n$<%+-=a(*CEVxmjxUYj@jz){+Sp*Sw^uqSQ`)xUddXHZ$u({APKKtTi zrd6uU^MiR9%@Y;uSktHf_3<-Um*lp9;qB zARzw%P&+lvFf6?q9oERNHRfX(y0C zsjR&zG}TciVmRWuWTC?*CHYkSc!d5;(p-lRV*Q zJa}?<94<+X`&}cuj@|U8w-boG&X&B^W%Ok{ =iA<@gx?vK|CM}yA5N9GL##%tz3 zL4QAyCkaP9Cn#9061DF*nf)*R;=krlIvns+yq5zn2<{pCO1j#XxVC|5Pi4LWG0Byw zwu=5EkKMDkMt;6A_MZPuTlka4`Pntgc1)g+-;%ZLJXz%kvcXd+!^ZR-APnC1b2rF= zO^3(VtCGEyeJ03Q*{+=~z&3OJ)2cwpCSE$6^9(?g@Hnt3#H!A2*Vg7Z`2j*!XC16t> zGk|SFGB0Bp;Lv!ZJzjq*NNHUA4;`uB>ZW$reBSy1+gnrDG%n8Tn)EY2$T8t!9W%)ai`4wAk7OJ9C(G{Td=_nU#$Gfp$s3>9 z*@DJ`PdHqW!a{1UgEB7^)0bD8-VV}H=(Df`pQdZ9O)F+Ov4 zVd&KNjcTQKU&fjL)z`W&e)aQ2^4Rp4H^2)WVV}~7j#TE)O&*um2 zt*B!H?*OrTJ%=p-@CaVK1f)QLJAJluS2`&_Xp$|)+8ohNLTO#+%;0T437eq`oaDy8V)uQlN-;brnX^oifJM6*!r-pUo}#q7+LD*+bS*4-tG@0EA}*4lN2 zhejS6C;y)QYX)-;%wuiHn&b6$m8$BylYNkYc>axDQ@gu=wvepFPwO_IVRofw4yf6E z0JM1XalR*=i&g@A+x_{Pt}_nB2!LC%;H;XR?YwlxkT=IzBR*8Z)p^f)g)zNeTi;;$*tOUvndS;oY-d;`bHJWoHMSDiph1qKO?FJKy_s4jt@Y^On@Y z&(?Zd#brmbZTI3yw6JqQ^72Npjdev&GR1G*+q#KmZwIpC(dB%Lb?Vve-jWHscI=I= z#hZ=UfvWn(voDy)Vb7~pEa~*3eI>j1AiCd3cJ1<5+xluNHmxJ*@#d>&QSrdp^JkOA zl8)(3soFj~O>cT}AvqmcW~=*53k-Z~{q0yecj4^BS=P>ZO_C~P z8Q(7ll~32$wQjU~#u`|6>jV2CH{vBd(9O%5(o>s?g*TNvao?e7I?vC$b{`lz8IxT& z;#G1w#euyC!?;#VNQdkRG=~*A9F=cvS2s}q`SWZ^YLxGH^7D_<313Eum5T%I*qC!y zdcK!kU>kd;q>6Y=+&gF3q(`*8!NYdUNw_Ogb6_gp8-2(XUA+G5=oC%f{pDXZr>%{- zqROvMN3&ati{{?W%(s8^8_DgObbt6x_Vh~YuPuq>?#G{fJZq^~G?p5Yj-6~y{6^S? z1h$yyaS1bcDc_OZQ5dCQgzRa{M<)F|{!n+C0@@e{_6Mwn%iKw}#C^_xgNup@VKfR* z-1kX-jqBap?`zwEDA7E2J2~U)%pslG-lxT)iY(BC4Hi55j1HTmXz$w@X2%b@@GTza z_gy@HI(})qHgnsOCLl-n4 zDUwsGr)T$&)KYs;|3qO+ucmA7ko?Zkjwgj9F#J*RiETWT$kzo>D%UOi^()sixa$_*{qA=Hx3-MaxFO|=-k9`3 zih5N+wLZ7j5!j{LSQUmbW2h$y3Fkmd1HY>_aDYIRZmUqt+1gkie{YwK(;M14T7+k1 z>@9<_2nas?{L`4S!!%;fS(h&JnZFNX!T~QDk3f-P!$_1Er)rZf4FI@py#+=abDoXm z!o{x^r%#_LSlqSfe8MqCGGfd2Dmhvafl^uXyscCj5em+h>PtY6GIN`9WPbkhzn*OO zwVO-BB*;=(!GYm~p`SHcc{e&_d~fI6-zizJJI52fWqemY?K@?n_4j7@rj>xL}UZ>u2r0Q8(k2^V{+1^y(92*!{X(`$EvER^-p|j-snwu=>QdbUb6ezOmO@GB>gb_jAwr1 z2S2DffB)jcc<){c6+h|ukN&rFWPohsnL)*G{^!r=wVq>a=;fI{#>aCSd*2sOOyHbh zm$~f8Glu~!S|4;vlT6Yuv20!`=j7^HK;klx1`t%W(Y%**4j58RZhZnEJa&|2ccu>- z$c$T1L)IN;N_ODLXy<)_owiBz1u*#0lPa;2@9Eu~T+gnD=E{KtCaSn_`m=TW(c5p8 zJh_q){k*`>IRFC20>tEm5w(R&0#HSq3J%Wia_yJfqPCY5;;ejL0@0Q~Tdjb!s&Epm zoapnDkU<~x<&d>UVUm5a^?)PJus`GIbyD`F+Asl$ zn%O)9ZH(<{5yJJ$k_!Py1+TWcsy@OiPYS4QzuQ=qr^LgJczACKJOLbj{OpsDY9;kj zv)k2~7nc?*_8$zyxU=}H^M782aa;X5H)2Jgok{~+Wr4JJ3x-uN*ZE{7V>x;D^f$)h=&XTNBT9xs0XPcF{&{)XpT z38>~Rfz?<5DLrZ&G`u>z6j4`Yvj9-4ya(XyBI8$~!X|J+Cs0c8=qNJSe+8sV=g4^J z-j-{~BH#h80FA970@05Ssvf>@e+%4ua2?;ZP4LBy>>Tl%0H-5)WN}pDHDL( z&2YNF{6dv$d*a<=$6v0W*Y|v>)9uKxF=g7!1~~$j~1721!hF^aX539wMV(0 zZ$e3o9ZsL&&6Twc_$GPNf{lK&e&pE>pL3^6(xy}CCbTp$;0qa?z`h?GYL0Ges=5Fa zOd_v20*2^>uP1A_lyiX?y~M9FUVMV6e&&b|F?(a^Iu-7XJ33lVpRq~fm?rnG!C#h{ zPX;`fT$?#1hr#Mgz1jDy6+LMUt&N`(fzcV0$Y;-5XST%s4aJb}aM*4&hLE~J3zRSL#>8jS^z@eAh)_WkGQew0~$ynyz z@v`c?3+GO!Yx#!kPD#1^&pqvGk<7jNRX{izGysQ}1J~&_RfB8$yJ@GSOwVoJC%;U7 z`)o_T!nBZxF9gKv*8#pAkk?_#`D+7`=i}Y1b=;-^s3;a63WV@o{#jt>)__v&gYG@a z7tVhI^Z{-jKk59K^q)Bc3eAztKqLU=jXr&Src(iLdOF9CX~ezluu^^?^d$arS)>BsL+@bu~% z-|6}imFe>}tp$J_Uwcge+I!D)5VRJ1R-udgk$-;oiPv81K<=~iJYd0qL5)q4NYM$~ z_mHp9Isv}vME2Eu$6odC0;4$|#r)TNqkx#rPBN2t@20C4UZeFIpe36kQuZS-t#_`g*v&&tR z)g-XFHW_yG2keFMKapiDH1r{d~B(8*9IukEWW9g2M~;}wRv-3Mz)SWLPzY% zPvBP`KYVZ!JLa>yc5Fc9LkC}L%|4#O2?=nt1YnHZunXwd`fm&X6i?Vr|JrMB*H8P{ z%-xw984A>m+Xiq^|><$1^ZL8<%gZu)#w7K}jTyJYflsv5~F(JkwpIS7yhbm#Z zNV}Pxfgq9>su-`I{&aEii}wfQ+8rqT>8Br$pG{YL#gdb-Ig_pVt$*@AFLta?hiyp5 z9eio=H-GuT;$pqs=^482X+=QRmHuU;9Q3R0o%L|QyX4gP3?1C*`WQ65)FHKaHGcCQ ze2g!R9r&oalb>g|%*}uspdWp3|7p8^`Kx_q_x4YI^Y2#;|JV7b%Uv_{-uCFCrQXLM zU2ok2YYLU#dj0#!$MfF5SMTsI7H6yI7VDqAaBA^N>2OnHg-fk*m zz8#46o4@ss2mD`A`zRo{N_5G^eFu&%wgfgRbg&!)EX6mHfpig>%|uWt0lqu>H{BQC z0kHsmcxqREuY?{Op=w+ltZ%ZjE5^t8R!8>LY(NRru3h%y$EWQ+uV~@q$qTdVepdwq zUhll~v)YBcF*?O_wrtaus7m_4&g&J^D5!z^#3Yks5pzk3sd5*S4kon50iPA4DO8F6 zcJy5m7OE9~0UcIUT-K=^$n+7BJg>3?(O%1yZN*SpeRRVCe0}H9 zDzehaU~~FP5}=1Qk8IWkMM=Q@;_T!N264X~PvpnAH>G#k=S#)RyLNR(RTzUr9X<3U zU*-7Auhf?F-AUS9x_G7}WeM2sVKLTaBX{g9CTNmctz0@EZx=7*wT*uw?pv(9GIq!r zYUa-FZrzgql5ce(oyG3nOYe${A{Ccs_O7mFnIgi5L-#8xVav`2 zfa8M$2agonb)b6wf^i%?bf_wDg{Swze6Eh4r??m%HGX$%3Ae(NcEQ`#Vh5#yxgUP- z?GE`pviRBGytBC0TG~lP7hA(wM`KPuOQg|@TZ$`J0~RvkaE) zM7I;Hzy9UB14lgkfhqlHaws% zN8G_W)5j_Y$~JugS#OLJqPn^Vk8&|Sie^vOFfb2OyvDbC&+fKzd^q>snKHc@z{jv{ zO;A*Zbk0mX^kDIafAas9z_>cbjf0@=#?I|~a>6!bI1hB_$+?k*38s@TOswbT>Xr4& zs&d>~P6j(JdYE%b$-9Q4rVlynl);WFR~Y>bo336)XwP4s@ zds(*rL5YTQi#J|5yx5ZS`&YmCxC)djB|FNh_h(Nj5sYJzjX8XEMP3f_LEYqWtdNA#f={LDcA;eYwl#nFRDCmD9XLqf>>y5_v8b~s-( zPnWu$0p@(HP~}C3^h|t6rf2I?_XDai)(+302kf>NBnlq0|Kmv6E-aXL&u{Q?BS|DC zka4Xx9aH_0;{bp+*U@S{lcPL9R?nU}#Q!f8-V5D2pm@_HM;w9y2UHl2ekD&p#}Xa! z9|L?pKG+gZ;e9%U%r2`h0v8*dYL0t3yb>tye;hcG>;Uh+sVBG|mMZ)_)2@?80kyZz zpIzKtVOKzvayi;$dw<(4Rh3=4el-W9bxRhn3Ot$z0&vh&5~#p3J~{!7Ip&sZc?f8r za>j^-|NX_`4!+!(L-6KXuQ!ek{rc>)(JR(t zOGXd40MOW+zT%V|Nd6e_KmUuL){lGHf@G3a>4&Lqh+aSwJhXXwmUVDimCV{3O$BM= zpi5Lqunns8OxF169z03j&R_Vdb%;M&2TrV)CPv%S1NfM8J*`ZVNlAvQSFbHjUpSq^ zerb9GAI<@@)%uOsU#-e=>*7ED=l_yD(Pm_^_~O$~7eD!l?PTqE2oQagLnlC19eBB2 zJ-c=m%(cGfty#S!KmY90#c%w*zrXnI4}R3zRi)G#zW?3_&2M9ex$dumqn?!)E{s0E zS%Q&6DbX>zmzs;JjJ&2{=t%^{zulyH(DCW*s`IRGza*S=Cs|m6y=ZMDHEij0 zc0w-f(m!lm-WS9Ymbpte=&R8%Ubf?b9?AzZ*Oe*rr5#@C6~I2bcTX@dcE8)qIMAg9 zYMs6KIZOUo>a_t4&5OU9E&%AUU1;mG7vOgv44nX6ex4Q{`HI$U`J|-`%($D2XZYKc zdb<1eyy!pIHXM(j6{j-KPtjuZs|Dz`&vaM^cC8m?WTN=qU`u}+1kVw;;-8AU=8^hyLWA09Ija8y?5Vf zJlT=_OjWYC3WT?*Xzgd)bX8kTGLp_{Zgi`}tYnyKI^)w~$~?5&P*H7rx%LNIf=m^D ze6*@L^5-1p+^rgSKv>VYm5sCWl3qA->hl4xBzyrvXgs|~(J}gJRdO+1?|@V`j&28F ztte2%7x-kE^Z>#I0K_9I^XZn4-}`x0OI1|n7XcWLz4pd{HUivD%}47J@{u%H_F1>3 zuN=6Xf|zw`9j_KMEIqW91>qEgiDAJs;b)b;$^_tm+P(0Aq9}Kf;{x0{LUV-CIKZcER^@ z97|SY`+(PWqmof}`f62pKrj>bNAbdP-0^g>xnaYW{L~I)i~c7&&>C3geM#)EuH2kn z_1fkXwj~vnH1^pYiwV-ds?Ahg0=58~66JOf0to0- zw*G!R>30X?k=2#!0xF}sopTA%{MA~)JODyRj=s{Z135}4Y|N*!xAJ~7qF*n4Y_!h903sNnw|T0wC%mhR5DxQ);4q?%85dw z18>E6%Rbv<80Pm4k)z7l5XSYC&yw~blM!9*>XNG;8g{xc*i;@HrgA& zFP2g`B*p~-ZwU++W0Rkq{R~6~z5=qo%HIHz*v0Vnul~Kofn#svXFOcI_SO&TC4PMP zORsb`P#kkR0P=b9uO9r{+F5h-l@rOyvYpla#yhRh@MJQGj(P^8x&HcmWzPi615)Yr zvsJ1dJb0}2)AHilR9oADM|Sz1bLKo;{%f7fth3STu7Misc^KjH0TES z!ZJBQhp9*xgH+949RH*>1)wgI8OwHIPgcfHtrc-bSNaYg^vJ12`x@Co?+?3;L7@`rA)gPV-`zWc@v#e2Jk&b}+gMqg_q9++3-lr$m>DXOuj z{9-(Gxq~h31iIuf;-^|(?yKs$Gg6W*=Ud@H_WH|HpDf<%tOPAibOkt;EoiYv?!?#l zo2?VGUa24kpP>nzwu}R5!u*S9DrveS9e*p|>eW|Ibja>cit$2@cug`WX~-zzHmCb6x>%Ex9cT( zb|ee>)1$@R2X~Ui3J2pCYrTxqTHAPDGR7BIt#Ve|YY$n`l-5CFTd{?bK=G8_ z3Vdrn^Nqguogb!e+bNyhgIjEpFepy_I-Acf=ola-XVclG@jV*PwCG7EtZt20wH=z@ ztssK0xI5j0yxPa1fn>{^yI7!d=G2$T{MOM6&Js9(_H=v$+%6Fr9VMp~JMdN5Kf4$1 zH@*kySIMXoufN_}JqaNH`Lt5H8|L^pzEcDulHk3GgXm5ETH$|%C$*LR-~Z|F{4)!T z5r(;j+nR{?`hxfRsed^o{Y!Ir?^|Q%GOxKucb(t8hgbggv%cFym)G{@i}%MtS$@~| zz3u<=)w#wq`wPW4ygv7u>rzI3nBd9}DFxdQ&Q~G)!O71Tum7FjUHtQZ{f8X}^GO-$ z^AHKsr@p7p6-SA2)yRKGiqQde=L0@2TsYT#a&8;913qo{*c6x`5uln!kKoNI;EgG{ zZ8;Ad1A=k+TEN$ptBoZ?(e-G`pNbl$2QVut2#C0A@g$zqfHWM*X;)(ye-ZS0FhMRy!8X)$S4< zM-J_q?@|K5@xupd<#6m!eZ=dr+5Ok|-us_@F~A6cTOs;tQnu0orS*4exRc00BTgQn z6Krw`fefk~mf1SNj_oK*iaC0vQULJPKABbc*_R)#;px zH(oim_|A!!tDjr+S^p&N+wiK|bYQEQQg_|7)ajA+DJ)mDy zl>n~2MxIRqpTwJf$D@8m6BFcUbD9L%))qi7^C)O~XJ|RaC>v?Wvg^@v89vol zzRb6SZ7&6yyz%Di$<@~MaIt;Mlp?dC~-1x2=e>-+XMfAcqs-}_hpYVlwH?9azI z|9AiJzn!(-mGSM7$;+~8?m;?d$zy>P@goqHo&w-#sWNS-8t0+IhpYJAKL9oRK|Z|b zksa}swjmsZwK;}%v^W5XQ^q;i9q5UVUVrsiG~rx6TYT~5>2^BYtJm4l=&`cO#LGk9 zx65$9TS9fJHF6;A@Oi1q$d5n#a2z(l)MY)ayFbVJ<>SXjPm#+bhYqx@_i8k`F-`>i zzOyysqyag1?b*3_@BMeP2LWS&w}M%Lk+m4h*SuySCP3-Oew{!LFSbS;oF(7JV11_7 zn>RRgR@3JKCHzXS0o#1eX>_f?$E&X1VRlb<-PvWYl-E6UqPFhpy?~ zWEicDnJh`nkT2u<_QH{AP98p{XX(1@fsT^qc6{6P0?B&Gb3e}-^?mmlecbo#d_wEUw50jB-$#?tU+5iA=u68# zmW1xa-11SWmv%md%2tL9nX{@?m86L8JH>n2$3 z-|=aitw3E%k99!&bTIuc&|B)#7rfE)yRGleF1EftJFTK8eT+|E=wp4HJ;%-k{51NT z#$t~svY{~kURK|B)BaoXl(87vi+kZKbEN?BoX=j^4e#Ph{N;r|m$ss3#fwW_v|=ir z4jwo%ft-VuW=BVS@hDz$_`7Fm6X0-7J8=NoGtVBt=4%~C&jHxkCVs(&KnFUo(aaez zFKmelPVL`*)J`tI;3uDalnq`nKB>8lFOwbNSMde#6n~ZPr!RlEoA=1Fr1Pyn2!T29 zjh)_;&b!?>ou7b|_#74JGX6KYKYIA!;>X|X`OR&0W8Jm$rNzno{##X20kJP$IG@iX z$lo-*nCWMrn^p~q4isJ7$`-PTsHMOMjsW8Q}zHI%xXD5U+o9?s|5*>k~%WCn0^lZQR(^`@%{L{ln zUY<2Mb>@rguC^?EC%dDnvcwPmwr+N<>Zv<=uK7qh8;?YQ8Z7;-cLX}#D(SJe_Ew5` z?6|pJQu^G<4;EJfug;(TY;h}J(_y(PJa2bs=C!JE6+_W?_gmYGRR|il^%LNe`;qlz z7m&r)0Wz!={Xw@Va>A4Tk{k1San_6?t*f=s10nBDG4RDnwL;>!&9l-yY3@%6js-Ky(uXK(Pl=V@)(Ku0f%r@U{++foPMlj4Ntv#mtL zwF_S^&RjU_^3fNgw7Q;7p<9<#o!>03Mw`z+`=m8g#k$lj!y}APYq-?gVzO=Rde*9g zrmHZawE=AN=_kfY*RjJuL^1$077J+6`63bGpkCk^KTZPUda=3BCHFQ3B8h+KY(-B8 z4j;?^I=(!hF`eZMC2<%&-&U3G{(!L)Z~bub_D_B*SwA>&j6wyp-dHRKj68qtWR>No zT8qH^=Bz@L-1d<1SNLH3eh2!UKUYO$^cUY-WBg!8k2sbt*6Qf1{99m>LMwDtoTl1Y zwf+mA6Ky3AuLb@dNdFwze3I(Wh3-T{^CY+I zi5OB<=H>JTVEV*&zCR%J?%jLC9UkRZR{Yc$c2u>^-va&_Ai!+wbv&iOU~Rfqr9M0b z;C(Z{SRBBfX(@B;mDfjR*j>eW^r3j2J!gj{W+dEhCx6DJC<8X2T39T;W5-L&!%f3@ z_)B!NVu1I4@zarmeFu)E>tE_n*}dtzb~tsfo3+Hc9nNw6`jxqc{vKbec}hmwWy9uf zDHdE2O@PM=@W>Flj4uy}4y$>bEGSYD-(2b7%wN9qv*h;S6udNwG}^!ZSWl& z0$+%k@wGK33w%p0H_%~xmU!j#+0&Cq(?)hjyvr^+zvE70pqsP=60h^mCHx$~s^IQH ziGYYTx{R+RaR~h9iQsE;Yb}7v?m?$6`>RWk7TfZ%(pg%p;EgNkn@wzDW2W!L5Q?qz zh*mH~U(hw?b1xYk3@uy5_t{yClZ_?V^<`%(Z)ay!VB2`<%^29FYXR^Q)OHXw zsIjMqql5V$JaBkyI2?^n^TIBWLp$`u#_kOmK6&!f0nNXfd& z8^775Yrf_nZl3D(u2mFCw@4tspeS@U5$`E39U43Pex^=tb_HljbU6$O|4xk7J@^I2fwt@@`$e~tPjY_s+V#cf zU!EL)=au8f7iZ30nDv8`;d^777^nH8$A$_nUwiqL#j5}9pZ@MYLzuaHgoKsP@BVr5 z>L#Oheb!&UdHKKh4eD>dGuQUpjJAtO^qAcK`Ie#jY5Be0pKk(W{V<;sj-Jp!XH4FI z@r>S^N6kA%u~eeoocr|q#}}?Ie($e8T)gxCr;Gpm=O6Y?0a6S|+6lD<11jjz2`EsV zIbc_J;OKc#&MHX&-B0@-=Yqlb!4KYEoH+63ypK?-F?Z(V>33u+lEXx772dLQPpvtw zEdKb9|1@G(eO^|Vv&kU`coQ&NEf`x$;Ox1x^RM@>U9X4M{kC@mMso19Vo~uhtypr( z2#g+80QRrSWXUXVNg3bQS|s71-~kG<=76EkPgbqgeKFylwmPXof0Uzk_RRTVZURQo z(4OO|m(s?v>d8&jqxD|9cq-vhap97j)5-AVSS37Y?m(Cw0gtYyY|)PL0k~6Cde7S) zs9L~bsF+&sZEK3I`YGMNr$W4E(#YRB5u8XFeeZE9^#TH>B2NR_$YFk5pu!y6MeRXDZ*-LA!p)4`u~z~O;_ap2oa zyLK%0IT5}w{QTW_7e5bZ`szx3?gR53s$l%?Y>X7QtePYSqs1WOojx8Tf)*4lWmou; zGYGh-x~U$})`0TD^Bj4ZcS?ambZ={BO=mkLu)RqF%msH2)l=zbE#?&u1E~ZC_?zMt z_zH3u+za(g>@o9fi^i})2UKpV!eL-{RZTKI%2LJv`^0m2eNEX$_p%jhj7f?V2==?b z^A8tqz42O!hqk|FNboCV|D*4osH*&>GMNwJ*)2JB`)2;v;$un>&v4?JWVFOzfMV5S z@m3#B+XO#1-?ezA>o`-hmOXo%h~~x6Jj%h*t^z>r&?>s&(1Ans2HsmW?SU}}PakKT zn!K$(oS|Jia%|&ORM3mtww^a$f2-$|nNM)F65ycXYy7D1o-wyzOFfj*!f#W--xFyM z>&j{5P@)wBzRd0exPU*LqNSX~^ka-Hh;8hM;QmWF%2BjVtv?6GgJ?^}$;OSzsSwycs(HsT+SmO$oejyQfarlE1~GZahk8Xx7%#diXg zN3D|}-{;S(mS;rpw7=*n=_d$uV4*;Ub_U|#7yvpsG5(%o+~$b;m%Nuj>V1=4HC3O; zfCw4xA%M@@%}{W9wjlN8gZl$*1P9j_=gyx^X3v*Ze{b>Azx-Jl;$O6l?NZ}-GUNGf z!1J&F`cI}0@9iA3+l|E*P1Schs`@#Qle{y)0-#yd0VnA5s2v;Et1wbQCNbeKkqd#Il9^V~d-~p8&v?K7 zoj0d1;n}kVpYe%hcH;b*)2GuXt<`MN3^{9F{m0uKi4(&#UJOUY2&fHNrP3 z)~SM}U;Es9_?`U*n$Ip9&%xJouch3epDnL1o`LrV2Pm7xd9d_G&1$AoT3q>kI$^97oEXIOi+@IwBF=w>pIZUJ;(pS3X@KnY;elJ#zMS&pZ-hbCwbupOtL%%+#3y%#ZC(x7a|B7ohqm8T^=gjhWc&k6_$(vIx6z8idjmLjZ)@b$h~`mIq3`<{ z9XvZ(Cu5#(0LI_{CqdC0<}fnVm}b2Lsep8LFmOL!d-=^;OuQ2C*g;$IieOwtvEl|h z8tCo;ExXU=fB~{(dp}(V#5-1T$AGqhK-X(ewQc+Ecq4jcPw?E1oeq^+f|KJ1Cx;3K z4j(!^+qSju0tjs_X=rWvy5}#P9eGuS$d6E2$vy+H1+&0b^x@wELj|IIM%7e;MLr>& ziq7V*J=nGa7VEILE&iAG@T(%v}v~F?lO$CE~zftfn zQ6RWgohN_>rmHsR2FHiS zuiAJj`O<+zX%4K7SI`p>xUE>=%=wFr2~W`tC7t?1C!rNUj}2D+)u8hKs^E;y;|C0v z1|;+y0Pg^?jq7HGt1j~2002M$NklP*6-~ z-syh8NxXD3@PFI6&*EDAU$~-WY(r`Rok5M;rGszNL(+TrURc5Bb&8nC%1t zsYILDvHM6yE3j~|8vkbN&Ry|*_bXBHpf(ui&!2A1w7c6<#d$nJ{v!%~Y|M68sW2680q3H#*Ve5gJ`0Z}sDj!=L89TScX?*b);{`JEL)NuUz%8=$&BGguKl=~= zSG%l#ku5IiBT3S{sI#Td78l1WadMQ8j?Isbl#n!k z0H|x{spJUo0uM^u0KxG9`Q`8K3IGS-C|Drh*DD}EcZCE%8D}qu1KpP%w43+nVM&I~ z9dQ2X0Ey^}R=}B~>C!je{*}eCmtUWr@2Vp2bgz3;F|SB&xuZkN%(JJznDY*H?%th# z_@ePITQiyet$0z<6@5jAUA%NIko@*M7ybaav3r9*v%2fgrMq5APG9NZP2e<}L)V@- z@%GSf5(QE8i!VNIF13Jp`$zHMu91b!fz>J@(U>3iK(SEY#l!2u1<3fcz)K!3T|6^@ zt+jjC4k>g#gg86`j~t@&>>6qv#FkjGF8(*sU#T)R5Hoo8s}LBH%MX~C~J z@z(cSlcU2k{Evs-%Y4pX=z#i)QRq4CROvali)nYdBA-9qB@yc!0N9FHiM|pqNkk>doqzStB>E0FF9%}t5%qeW0-ShN zK^t4ahIi}HU4w`4g}VcM0QNKfSF%bcixaNdjgZVA4DhEjmz@uauV5MJmn+&i7W30T zbe{ObZYm%=+hP6bmho5P0esycCcfCYOKYa=ZG6dA*r~$i8V_v7vnLVNzjn>C1N>b& zb6vRBg$rkD9e8>8*?Vjwn@-Q75#2HN=A}b}TR1q?87TLY*@qAOZS9l&uZvgkjqf?M zk`AR0RLYO8YfOBr(G?Zv;ei*v5T2&9N>=oGbiFlvPZtAbs8`_c^zH~fVUe+ zV$rkKt*xjl9^c)$CdMbxlvh^kk`kVsV@Q{~FZp15wGv^wZr-S(zcGtV=nMMsLG<+e zYbAvwRUWmr`o=42+8B>1J`k6E{`n`1U;Wk2L1<^ttrC>%(lSQU6=2!2h8|~6*`$}+ zxpCqA*^w7JirLh?`wo|!KRmRP?BOG=$rekpNjhvG@7Xj=x2yF=5B?WztVrQn^CW{5 z3>~dIfjK(I$lBBW?Wp({5@uv?>4S(F#Fu@$ah^K)MZCoJD8|Y@ z^xcuU)?^YtJ@3}d<-y0!)=@~jCp>RYKIXF+$a#MbP+bEH%D%B$bfsN`d=ssHx9_a| z&GBRDXv6P8E;7+#y{V|_&3R`&>F4>~B|Y2+bA5`yM{|!9m&wl?u5wLVUkM>ajJfB^ z`GOBIaBp>wxn;kk=lik$b*CY0*U)aud-S$n{qey5o&QHLmw7#_|J#x8{dn>CFFs!U zKY#Goov2$THX|~XDk)C^_KBB|FMj*)|D7>vz+$}^KTlEKOQ{Xm269pF_De_k2B{HYYSPv9{5v{Qc)!!Da%A0y7hhN_xW zV?62amB5}GwNyEHz`_Io1i*4cZFM;mSc~!2uIs>t6rAkgi4$*(A^GCeml>4=x@&<* zXu`q#x`WF$Bsd!ak;lQX@G0Ce5;2_C2oD0KfvDqfrdSEmhLsts1W%?EFz5to!fm@| zZt`q;!VFr$-Y-R@|5A z!3sI|j9m2I6Np7QKI-%7Z{6LmRiU@BIm(1>=)~}C0k0G>=6+W1ar2E_Lx+u<3f!u0 z2F9#dx!jJETh_P&NfQFSohW0%F3`JjodM7b>2}-ZFD%}C{f)(ejPHk^eB8l2ZTrta z$@q;^-926ai>>5J6 z7+BCQngOYz^^0c-d}R6$A2>Khd`o;jhpnYw^m@JA_BNFQ0%(a9b9X-gw;w#05#5o| zxKyGIeHef80CaT?#|xlxu|9>IUsdiZDCvi_IW9K$ba{NGQh3pIUyZ|Kt^>^Vra&Kf zwYh_}Bo6Lo$US4$s2pwDu4?q>(~WZdthdoNd+ zaG^kZ+vrUREC3IOP1S%Z)#FDB45}hC*P};|wbpm%0NbloGzc6H=Af-_-cKJsTfB1g z$l{$}{LOq<1t14+DnHXrXsjI>zIO;PKF60FFqLlR?iy=D_Ey!G+B&vC14M9sy^pU* zA%1ebT{ifg!z*~_NaI9y!NdGKz(fm3o;=gF_aEg{*B6kpgO^m5N;Cl}J!1e!V@!|Y z9nLc*o} z-7@g)VzlsQT)yN=YZX1JyUjd9fBHZ9kAIp!mF~~sk`$yj?x(-$a1TeLp{w=i4RXzf z34{!Qk6v;KW9 z=uPeb46gIPrN&x*ZxRyy$gT(syJh3+jpj1d>y3Bjla61mlif6}nR#}pajaOwEm;usyFWO~e7__JVFDt>9V_zmxmI2T3K{OGR z0w>2#_B-FyeB5MY--9AO`T^7R6ZYBs>6sIy&C`tITfJ(Xi~-I2ThB0ria%5(FX_RF zcg?IvYiJI@4T+Y~?K6IIxRirFT>8fKm%>iR05qpdueD1MBrQ5V!vzmj&`8jY*(ROC0bXDQ_1@8JK(eJ4`oKNYaW=PI=8;(F3pH7~{aJGj}CAjlk|6aU!Hb14Fx`6-y%FbBp z{fbB$D>~3&0EQLMvz_VV=Z%Mt@l6MCJ&ty>TdJfAxnI>hBnSA-&fa;{I!>FDgh+)P z{sLO?<;e7Gp9lUm8sl*|`ik`5>hy`j+p!zq_p9tPJ8BNj<3I;CSsRWu`H?_SeK)mM zP;IX5&ej56$sp^@&*D4aW8eqy$=n=P>wO12fBD&`(d}yVDW>RmEtxh&pQnL07cZRa z`3_jN`=E6TEW*oT4zJ;LW92)>D*&43ZLvlRpoMYV%g@;f7vylikyOGbI}>dysI9%o`0ii_vgjJ zKy$KYM+W~Qf(_4%@7;A|ry+Kq{G5EuzxnC!FaGqu{7JFkvSOTH3Z$M`C!&U~_-55%7FmS?ja1F+D?trvenwLYF7pt5_a26dg{ zGd}6H;%ET59p9?;_wCZItiJF~fQ+K~ve&(x4t7aZL^5DqbVyh2+_k?R%11^v#9(@H zZ))6kZUl^_=j;m8qKPef*k}7nI=yu8$mozSPo9hhk*TrrKUB}s>2^43$8qHF@#a?qBG{dF+OSvG@&}FKJKz1$$kZH|8DFn2 zfkF<%eq{Sea-}G)f%ZK5m{0dGtU$|-dSmZ9)H-QdS_OLvd)4L_FP!gC?_>F<2Q2pN zj=to+ZH|@i>P_rYp{uP_PD)G27#5c7d(| zjsa^Qq%&0DKBhZ+&fP$Eg-vAna=_*BSKqFn<<;R2AfMgxcJKi4c6L8Ih9vnO6!WO8 zjv!NfV&@6oaP9S*SF3XFaK`kO%K2Hx=yUJh&58eCsrS6KyL|D?`1rf^;ZGmmxLd21 z=4dB@)|9E-8NYt!^r>Ce`~e9gbR{#3+u;i7X#8&nGk%sAVn6(c&V2K&C9WlR zzi3y79o*M$Tx|!=;VBRtTp)fXcM3tTHV(X@0-nxrZ#V-R>O2NxWe>@+LPN3xU|rMa zlF_QS#p@2L1>!#n90%ZvCGS6ocs++F%8rSRvYIn){>=RfV(2U_re^ni`~*a`%Zh9{ z$n#O(Z&1^cmCf~;_1usg94)zJM~0VJeB#=0u8d4;<8b8KgLa0~=1t4Cz2Z18ZAa{k z<`doQQ0-g!Lv(}P68jDWus82%Z5Ca=u1z9+YeY};ckH^H^PRdC8j=bAv*hmB^44Tm ziL}#azD!3YgJD;$Bd_qbHMI!Q-ej>VaJ~C!L9+{=HlLUFACET0H60GUsl*yT4(9kE ze`Asa*%LNeGQF!NNuhNY{@9X@e;Vc}fid`0>n1_K2L%K>IQwCK9D5-VFoj3yr141` z-=>lr_yG;Yr!XOfJ7Rf7YLUGi-d{E8_%z?_enn*B_2!RbT=PJClb}O-AJ<<`@ zeOGO6v`tcE`XHN5zesGcf8q;muT*xA{}~@8F*D{{B`*N?8=B9#GpF-euV#;TR|Yhbz$_*+ue1WD^qTjY2;$C_je7`*N!37j|6MH z{U$Jh!o1i2=KK9K-|H9GxmVxwo%#30Wqsty%K)kS&hYxZ?|KSn>}IFO|Jes$F5ds} z{9;SW1DGS*1vmqC97{3$>W_bA7DkosdW!m3TiB|w{^?JDI@@v>B+d#&rX>pySHS!0 zzxu;KrLwmpx8 zw)#&px=8C_&f9t-#>9sX9rR_Jsz$s>zV))1O~8pVU!?6DDlEc9tX5>$tHE#Fyo`fykgZdz-j2szb!5)BZ}C4 zckEbh4c68`7OA6_Q*@0XWpJl?>x;Z+cL#6=j4=*(dX7q`SB@Od7+i_AfsQ$NDwa5f z4jd%>F9lWs{cL-?lc8i-DK|g}LHY93nVjYIRjQuLXy@d{eH?!d*zjg^AHE*8^mAhx zri|~SzuzCUl}J_Ov^9w~S_p8!^)+2Q)>FB|fKon>n)s8)YYW(_@`~>q(6}|Q`f|V{ z8c+@hCOdhzfWm=(wo`eJArc5U`P=F4swAebT943P!5l_*Bc1UvXbk5P*s?>SjM?_X z?9K#N`>VWmz2u7ue-5zfW7SbB8=K(v+`03CgS&^`dv_hqu-@n%9YT?F#)10ylg}6b z=(m1t@oT^OT(h>B?GB#3ziAH&ypg+ymYv59E0cR$(M#UI)9V zLffOH4641AS z`)_{l%e^;?FosN0NUh89;VhW%^a+b@M-Lrd{13nT zJ5|)xr~AuOiz9)&4sy}g|HDr|86GrOEUC(sgY6(svU#J5vs-1>fiVMoHU@zKYSJI6 zfH?&ofB7pow;oGsOURT3mc@=QcH}hMqJHMm*|}F=n7Iq2_f-`mV4vRatts&STFxpS zFn7SHwMBz|jR$)UptD~DzG!U%oXwVN^x&oXxc>g1{O36+O-2} z3+K-@236j4$Wl*iFECam^7YyPj(`D9DJR6Pk-Jrq{o;2#V-`;8Aq#Le4Q8b>`CLoGtBII#?}-j;!3x5q7As0FPWaA40NH zt0Uv(paE-Ca0ycFqDnlP^D?PnARKICnTr3ODM0~Orl)$OMmF?jZWiVq$B8D^fVs-vY8SYWEK6__l1O#Yn zUIU=S121+7(ARdP>F3_9zoldA(D?dNfAb}>b@+>qL@!y*R%Z?%>oGlR`90sqE53?f z=sUNNByc~$lyNnQ<#?l4AIzBD#GL)nySA%SHux(#&)9s@z08p~_^s z0tNQ(d9nc<-c^7!z2x()1UvN9&4LQlP=ugLOds<72Rlrv-8Ifvz`c6rlP>|0_2m0a z6?6P`?Z)K+Wv{hE(t%ZX^LzOMw#=I6#dh%Q%Xgq>XD3qQwH|?w&T zfeTod5MTPWZGn#V06oF#Bx~ZUt&L%4cKc$53x^LKT%0;{cJXQbt+xcQ+o7Y-V0KdF zBdzG(<|<($X}N5{)uC<8MH?5}b5+y-HAQ5xIr{BU4N(qYz$xV6c?C4fq`=)tS`05VHasf9X${0NAHMsuw!j}6 z@Ct3L)5dHh|BG+#)&f&Jj7Jpm;4>AO=ljdXT30!uwebj_S~V6RW@7~r@KUbuKZ@aFgwrD)}L?C8sr_!WOmu$`Rp zU$xNCYEm2{7+$q}}2O_D+1UzhvuC~%^WeI@PhQaey&8}(f&qgQDRi$XxtHa*3K)apK zPS;gJG`<`?vaWUIV*~er_WappCF;tt!^e&v9iGtt9%!>Mpi9O5==#Q?7>hqSHoaJA zlBV&ASkT&$N&24Mu%-aSHTkyasH)Sh0`cqknDL(&<5>WQ`=Ak@kNw5GE1w5QUVB{s z^|OsLn#4EHE5`aJ&}ICz)_2yq8PZ>#w**LvC*v>c?Ve&(yfF4Nj@!L+Z}eifC4ZJN z$RJgfNhz5G26d*cAV$6jd+> zP?ua-RV68YwfjbFwzulno$a>JzG2!HG!`-9ra)HXqA$&fzM(57MvZ4iU&mnI`OXi< z&WWXfKZs`c!r5B%yt@94{5gjyS0vZ^NJ@}jFSc>_?%Juun{4bhyByP(_ilYP;IZoD z0o%HcKlpt z?#+sSk|~8-_)UwIRKQ|KYo{I2^3c+&*_O^9C>GMU{9dw*hs0C#`W!l(4g##|>&(Ae zziC-Kq1xTtujKQ;_WF0KRFB_VQ*l3j=DTVQ>zot^&2LNw*#s|Dy%aJ(V*J1EYj?Od zDf}SMW9Obev$JOD8z>w?D;UMuQ(x3ae&^yuy9v6}#N~YEZS5i#|6ROvZeluzT9Y53 z9$G5$W9zMfHnk6$Hy`9-^rRmp2uHW~zeB|cc3bcXwYK0Zpov7qj{MVe=g*`&+Ld4` zt&hKSfJ$XNM4!OTo|aISw4)o=#LxUYyCq>?vs*8JXbvI0+s>wR%`#qy4H^^su}Z=> zI+3G!HTI?Es26oQ7&wnsiW%3eN?y8_PiBXL99G4nf#H}0lm%wvY;*b=oa_^ zzlKBU8F=tT4DH-Mi)#L(`^}?n48UVMnfb0~yRSk@yl^Yr%;Cf^D~E$G z@Wa<7GvXD!_~`@o()#Hwt|$|4^4V~}-uy~&r{upK26Uo$_e%ZbC6l$nQb_RCS7#?) zq65|x?>L8ne~%xY7E3*@8w9@(fq@Q@)$$Q=05_#z}llk1<7 za|>2_X=LZF!!U%-qWViv$Aj@K?{yRZ%vk);z2=R+)0JJ{SmuWmvwM55F}u&LjQ_v> zvv(K&;g@F@7j9GyrP8f{{z(&mIL_zV#anNlSbXoh-wC)b!`~l_A!CV|cE-d%`q2*; zr%s(5#emtibohxZohpVy2lv%@|CLdg-*k|g!(7$|K01&~)@?^wM+d#F#&j|82futj znwNnp0O2eWT#VsbRl^*~EnD?9s#+i=Xj{B3`g8@yheM@b?W5?fpV#da8^ualFEcEh zy&UKkT_Eg!8&_nsS`&=#Byv?dDz5j|PC%fp|K8*JSl^A=RHO@BfD~G_oM>)|QOo{y z?J6$hKsn{OY6}(Lt;1}8ZLSd~s#_Lnk#MFUGSJD6cPHaKpi9DDIFS+wIEj`NECaMT zdOC#kX02odS@tn(3^+b?;K%^Pvn3J$QZz7uWOQIyZC%r^jAMDXIhn5p|9V{NX{d_e zRt`Hl+ZrL`D>##-yQc?dpzOYO2HY*f|F~-JcYg6jg7+UiZ2L>6z zJqM$%&*6UfAU;kZ*eZFjq@(o(WT{AdY4@HKj-K{aU^kB2 zIpO$TJCB&Yc0Ok_A~KO5&vu%v%}F-)>eU+otz}UU)ax`r@y$10U3~aiK&n3}w-YB$ zEQ#2&Znw>w;lEPwh?aYHw~aD7sb-p;3^^Iw>mv{RG#5R^IeLiSC`M-_H4~IIXu z(X9YBIvxxFcu?}|bS-)4LRGS|k!Xtj@%nNo@dyiamc3K9co-*~OoHLb7y@fR+B6+P?OmRz`x9RNFb)>}Sj;$QsUziSME9?`>G zHbJIA=080WfntKXJthb7($Ik*oBOJqGtr-{3r=y;iogSxWSUY{5jFWQ* zgc|*V zMw}hI%efK!Ouy6kN3sb(FyLNV#d;eLkkcfdRFU_*)5BjqQY)Y4_3r!cH}7YQ@4WWv z916TVhoR(3yK-J?4aqsfd*^0bwA00sY`e;oZ``o9KKn-&|I`2YPfD2ne3BpJ@9g<= zLwB6U;qqL)LDxhx&M3JzelN5H#Bi9796eU>wQq6y%ae;u>0fIzLHe63JM8Y4t^1vE zo*k}iH_YZO1qT8B91&o>io0`x=sx@WCLIX{Ga`Qf06#H8nY^b zPd@)VKH5-wnIARM?=L=VC)33XS97F*PISWB0i_0%4BWQ<>G$tB#oj zOFZ#SGRP(k-I96h!KIlh-((Pw3-BVp!0~ag;v;ln!oOWG%+^{j!OdqA)T^LWT?sHO z=#G{S&b1ZXT62ae2sGtK2zZ~bsM;g`XylD28SLhqRODxO=oR{mv&*jEO>Xo_J!77+n~QqC^Y{DHuimjq}EhY z(O8?z&}mf(hE;3$OC*>t!53RPkVC6k$AYKcHFk1uMs9)J=Fgtsmq}c8C-j^@=o;$>IG;>?-1 zak5D+cbHE%pEWm!F3)cOnzcUeAK#)4TkATzcLtd37whBA0R<;1Gr!I_1eU6fR8PIA zBE>7143CimHo^1A&H(ceY+1WUVG(D51H@T@xnJ*mA=vcZ3}=s0Lz=#Zcjo{%Y(gb zUlu$AiB%jh-WKz~5B}7ER@qPo6zjh}+m7>ZjE&A>gFgE3{l%++u-4^a^x0nEeX$lu z&L^2l+Ja|(wL~5H)aO>9CXVsq2cYNXd`NsLxHE@tIJyUatCo}Kdh(=pGxZns$?QNiyfMB!8MVyufq-3-T#xDj$6E`*^P1IcVnBIzMBiM`l%SrX zj%e+7+r0&#fB@%X@aa?s^Wy<9s`S42{G-L|fjo&iZ0J5-o|us!~pYQx6eZRTY@0_BNiij_7NY;S1% zbegkhR_FJhxnzrfJ80AUcGR9a_2uIAx4yUdt$*;_i=X}ZPZxjqAOCoGM}J=J(G-jf zz?aPtmpH&rh5L!u_fFF2-u=@9Ftc%Vx&nhFbml49?D_%g0aNiV@KlnU4cqv*bCCLc zV_+UY#O`78Z+9@>q1WvO#dDv$-_GB5ah(0)llSBojc<2 zGChy^VS6jgk<0`}NLtfRk`I#%jW@Nxxf%$f4ZwzyjCM;N*mA6c{hrrmU)DjvfnLgFcY89mH9D%AxBd^~W5rG!=5)Klagh3ubx@_<$wCOQv~qE$uk2a13RXeqo)96>AJB)J(KSAGnxTE#YAY= zZO6u|OeC9l9YDS0XGxWStxIomp86h${hq?Se_J zjfuk3H`jB?fR;|cn73YkeUhKg-Q;D+;iy2jOR{Z86f_``LoSkahn!J#cs*z?hqbCe$5Pd}&c zru7C}HP=eQ1%~JUv+0UCROHWh15_QL_u+@XSiD|~82~L%*F9GCUHU*W=W+UQU9{x) z($!N1_=kKjsSLJr|_WAY-;2h5D`qk@7>fE{w2URAI7`0z>e6;D5HJrrWN>bW?K4lT1UCY5A7ed^QXv$G90iVwb7?8ufq z3p2Ujx=Q*eEMULs6nNMq_%K6vWIydz9-qG7@vr2?wae$iCrZ4O9MFnI)N!-lo!PP~ zTj82UK7NxOS`G+>JK>pJfYC#~yFVRrGu%y8HvP$#i`DUn9bIICpJ|7?&(Ohc75vY$ zl;rweyGYJ<@HlyMed0g9h_%vv!tN3@#uNM@MTTT3@ojF)xPUw+-qB-;3DF_<`##^E ze!*LG;`q6ZpH7t2vx|%mYsZJ``7TZYl`w4$6h>-~GJKEsdsdx!$2NIBxqg%$kwmA9 zwJR2vck}s7&*U$uSWeJv^_c9P@Z7fNI z15#YObanC7?fYf+6T%kINxI6_fm*hG-z#IVZ<{UUXx2e(+cz)vw>_Nj5ps(wV@Wv? zij4tQCNxgjFlU7J#1_NC$Dy-0D$r*oTQvP4mjPGfq+ymYtOreEcY(Zh;W$)#FDxB&MxYRTyK?aUya&dRR_jy?Qcwmr_)SJ_o`bIyj1i zGq-#9&JF{7rA1f*KdW-?WPDc0A~hb)*xL2fS>ucwwJZ4ai_cR|?~Q`n7TC3|Du8jk z0*3|ZYpXUjb}+kHYJ7PLZW`NFwGwULQi+HY~sD4PL0t=pXvHUbOx@VVfF!Y8o1rvIF+ zcK|Q#7E%U1&pftl$}o;nHFgeUiXqZ#o3Jz|BunN1Y(SI0xx+WgN-{QpAV$?e8G@qS zdmLC)6?6&(IOp_ivavn|t+FPrSnLl7*P`Q({`9BauNFhKgiv{!IJLEN!{V3k2bKrk z{@vgFwW`Pe(>PaW&YfSpontP*y>dNJHNLZ5my*0#@9-T}h|v!!Y0#gdyHdsY^{N^6 zA2>Kwxs>*k)=!1*J(+JZo6%8GAGNGRf2~KsVhT_8`FO1*IIMcuZQ1@{ap|2;W~>*h z?4+w+su%gzg5HZ)AJ&_(c}FmMgW6%Fw3JaXd@DV7aNnVETqGr~UG1==I~}@J)i9@X zS7Z8438pQ9TL0z`{;=-^Hf9_dx!G~i{T!kxk;FKYVS&2$$jyy*ko8T`@iFdRk*rAw9^8K@*=vhdYbgOqPjJLIS-_`V z0T+5`N@m@vg1{jtBl`uhY=!N*7cX6mMylWdRZIQM8Bu{uA&j1nR_;fJ#;HQHo=NuC zmZ$*&shoL`?OATEWHweT-udXW#rvOrHbIJt-Oo;+THK6=d)mgm|KQ=pwukl9>{%RH z^qj*#(rYS$RV4%sEgn{tF9`TbZ`5evh2J>HT%kM7pUzve=5D&Fs)=-o`2#;zt*RO! zT{6As;!nEwAN}@kFaG+S`aibj5_=pOx_f^-b+uh0c#2c0*B?GOceX^%!2@l3-J88S z(X*bVr|(p89`N45x(bi=$DJ<_$J;0@^Vn|!{|KeV?eZ7H6KT-91t#-_2I0| z8YXWi;t2wj{5&k z)So@wd1d*1=fOY>#5|IiNq{qx$s8)HvZ`FRq_QPD6zvGR9pSs~+rI1m5Bjg@h>mtx zw!>{X)Df0i(uLb{X)0G$R#gtg%;Y?RAZEOnXOIN?`&kbm9}>X(p7WgN*?aA^*YI1z zX1EmI@0pCRB`d)*2;#}s&Dj1r-f;iNt@E_Y=tK97#L>BX#6R>fJ``~AWuyzD43pS^IKJOQoF_1VNRH7mhl0}GVA=DA+&7oO^#XV9$Qm*4)0SHy?U=- z{B7>f9XhdkA;9crW%zWBUXJV@HjabM1r`9Z4sHTwYl$LR;(EHwA-Kkk7snzKz^kcvieK0kce)3_3IOIj0-y`< zs+moz3<2Hi*#Qzqwx@&GAsaR<=ZWkoS;QRKo^-{!$Fkua<|YVRzwf@IS2G8FN8<$W zX%Ax`xQ5=)Z&48+nn~mVLIC$|} zG#eYZ=gYVo-swB|SCCCB8K91JdN{U>4A?~-zEP$)c@dAwxHl~o zc?0fb+p)X&4O;ZcF4u>bUHg}R{;vldazFgy04-(wk8iZzjbY>ZvfGkNH*CTQe886CT(hmnsq?jVn=4#y_!ez1pIeJA zfEzsbd^Y-W2>P{vc>Q^u+XO7RaN%s@s>wjSGgY2}vDWjOvf2Q2Y>ngD2SB&y4_g(VEXk-x{0W@f|?KAN$KbWM7irzzyp`7TG_tKJhwVXluagb4QL=seHbw z;j06tYm>2iSBFx5_UqBx!1|y4>A$Fj+xx@UOBHo(L&ok2_#^il3mG@kuknl>Z|#91 zw*oWbA-dLl=}R{HQ@y26_vo5vI{e&V`DOO9@_#b<+cMYLS7T4~UHk;(Bk$QwWS@*$ zeuST$6)=0NP3*a3O=zUeY9zA|){8GN@OE0~e?e*CtOwxS^ z_m-TJoVsx4t1^Gjj@~`gI`7@v`5Em+@=>)2-cw;TpmI11Q2bB<1AZQx%=*%+>@s?Y zuXwuvjv!Y=U%mbPAB{|0D@dj~*}jr|f?nE}!Fdk!O#X?A`Kc-oGjS**|7mM`-<~su z6ED0l0X2ZWOm%;(leQek1KQ~$`@s2f60GzozZnL^78u(hJ|a8F8aykb+M41$0f84^ zdM)he(180lvk4?6NMM}r6d<`#CB%&zm&YC_r|Ew(ON$k9!o4O~S(fx?GfcbV=)8o%}9ob~Qv;NJ8z7rIKrJ%n(O<#WX z-ia&JD41j1{Lb8tFq*K1QjeEsJm+FWma z=*8{nC*LPi$(YL>B&j*i+>2@ZL@qe!gt%1~_H#l{MKIPa=9VXmdr)Vf@Ux{vXi1HzsNw7~4f% z&p85)oLT*|zd94pkqK1zf?@1B!c@t>g<`nK*p>~lQ5kST;(ClGDkCHBQjY7_2{T3j z2j1Z_=PuM!tjxX_0@$a7G^dZFEGuA?HYSS(+NQXlD2l8<2Y2qtP&Iyloo6a1)*F`> zx3$tor9t`u8SDFjI|mP@j2j=|Lt7AF;I@>kC^(}u7MM)yLz?-QVW#!U&Ws#ohm%>? zvjipd@MHW@sJCMbQP8FDVC99ap^`!1$JGu8wJ``*bG#R$-srj81EGvjx#ci&H0FpA z$`eV9sTe8dM!+Pea)Paf-($FFjzI}MJqHOy!b-~#^jU=1H>*RnfRQ0jA>V7<9K4Cx zB(O1mL%;?xZT&CH@D-^fpb1S`xO-}i;egRASH4}n|KYonmAxkgeJ~?{vo+35Yw7-X z0w^3hM0is4BS0GO=Dy!B2f)M2Hkmo5LucdIJixdOWh=-kLN`Wzm`IHRmL^2>P|{g#uyWpQo^rC7o%(X?jktHu;&k7ubhzmU9(IULeA{f36RJxH#O9q5@RJ{^#Q1Vr zJ-qqKORGQpgP-(l2V~t`{qmg;R-b>>A+u#W?1@eD-+X`N;a)l1A784m8{+kU=`eYCK;oR$y#&0Sg+^Z zIiR6s16Vqc^e=w-3o8>qTq}KlZ-0!yeFi^_)l@q+Z{r7Cag`4RJWng$!&`X;M000I2_FHdtf_r);qy3voc#{C}vPgN; zZ~rM}-J3#Y6ipkR=ty}zDqzvz`Zvcj{s6?`J%VB~nj0s9K~Jc@(R*KCcY&#EImC=o z4xoc|UyL5>IH`SqU*rAH{@&m19+l=bhd=-MuU7B8`_Zh4pd~uPo zaQq?b+J2Hr#$X~-DQsDsHv%L6^iTiw=*6Q)o?CtJ$tSB>6KSQEjF!QJasb1FNdOe{>eZ8#~HyLRNQ_%m{V}0IdK^68GGm1cw%ecxgMQW zI+4E{lFnfq4BR@*Lqlken$te-eQ0R{L$GoTK4Xhd9+yGl`W`a8)_~?25pHOMuL-RsGgxMP z10W}wocZX5NS5L~c0_OH9JY7sm)nocZJEpbVt;gP5B8L9)BO6*JioX5f9Bjj^SuQZyBl***dYvE_nJUR z#tIoTFFFey`y=pX?5Y_2J32q!Clgg1ET8oZU(LB#|1jp3hyVaU07*naRJiy0k<&MN z-oE&bFNQ*6yrQ@%;kSmFY>=Hi!Ia+fTtClV>B@OVyg9tubDPSthUvzsis)w8_xVb^ zzu%cVoGZM$D;?lLw+f<|w?n@ja!Ieth`!3MO>WUMQ@+|Y04fupPqwhlva3G&{EO~a zCUbz@GtDuq=+wL;l_5?6J zDcjiiWR3Tg)s4$nX5F+c+*88M!;664nUNJ zwI%try`cRZR2Ti&G(f3u0=rthaTeDHfY3{kb8OfBwNiW>Eq?pkUuQE0#J9%u^Vn<= zntipIjaRkV*nIuoFQZ?`arOzDpA#E*|2robLpYG^@xz<8+}w}tsse_57+z`+SW}Co ztAP*#4{Q$f;F#G5WFI*)0DgQ4kkzM|egh`RGNzAJ5296EGUJ~y#@}q#GiN@D?m3(F zG1U%)?%dv(>Bg=e8_`1OQH~GUj9N8q$}YhV_rIx1GbdP9+=n^xg6EuIeY{nnUy3tDJj!X~z_`eEU_^aObH{?g<2(%v$|CwLK!254@7Y%ar)Y~^QMEV#|)y8rO{RBc@Ap7xICYFV%$ z9kwxlQyVyki18y|e);>e?i0{#&B^jTm7%Nb;3JWj=q#g7P#5p$ojagM_uWjM^_}sR zlToo>YY>PWO+5G8Kp)^XA)r^iOmnhcmM>agIs5+Vw}12JC3<~2GL>KVxRQN3m>_rz zVC?(sKQtV@+TH9ave);hy>`Atz0*Te`s7Nzk8d`|=Z+jNLFL%o zi+s9yy~A_!r(L5@_TV1Tg-#hCueArl-wY_ca`w#pEK669aG=)&H~Z|@zx{cYZtGWX z|KtxNdu?Xoi?&m=ljtXrK#)`0Jhod23#~PqXWCT^FXZElZ@xAa+Q(`W;=7yEA&;v@ zwGPf{li9B4xQcKx6aWUypI-dU=gir2t#=ifJs&8(uWVldi%WAJPraG@z5qTTosEsZ z$R5dC8>55$dg08+tH1d3f3^CL|BL@~AourHH_M9VGd#>sIdl5Mk#S^$V1zl*kMtLr zq(?i9LQ;~zU4u;iRRNt26XezQ^vad)GyHz{2*#`2uj0{B!+Ea)-TGLW;S@Z_aeUk7whtlLgsfF1iXl?m9LCAKu2< zV87F2WJ~J>M3&|2;7~Tz#{6Fx%qJgzIQ}r1ByhdVQw4qpVO~$K_rAFZRIshIh9fid z*thTT>ex?-6>Vv!Rax`)!iQg1m7~=u--=&qj|3>g;-ZajFNSmrJG{RX(poRN3q3Xbci*+6;?~%BJ97 zOM+D0NT*CIr}PVbC(+Acuw=n+YU`tt;s-zY;p(Fg-*0V~;KZd%XXnh2BPU*-JqM@M zhaX+~EBg%C4vVY0A>8EhrSpU9>C2B-2S1D7?0s#O+2n$(uta*FjcEPIxK%DRACo<< zt3yQU3UaN)zJAwvK~l-}=AlwtRhlFuOY&@EUx4~r2lf3)z>FqK0#x&jXTB*BrsXck z; z2T7h`A72eOIw14^^k?T+UtPVsI#V{{jhr=ov}^ab+Wq{d)u)%P)t~y@2uP)x90SUG zTgECuz}OQ3^sHt!!suQb;$hb?tN>p4S;oRQWg|U^;Ab)tKfIYSaXJ7oCO@!silGc?eC12wk~$DTXP!wQlJ{&aegK8qDUH%y>mW0H4}#0Q&T=yHXbHeNd)S=-AZK2TZzg*HZ!v4qK;TODSXb9Fr-LlMgm2QC@MV&DLrD#`k8ecBiE zV!SgVahxc)AAlhuXR+RFxF_+(XJ4KkT8KeyElcLX*`D34BV*um;A>8%jj46asQ^BJq45$@z@_^Mizn?P8<_xl3NAk?n*}I3 z0Cy2mMC5Khie{W7!qg$AJG!?F?z3l4kIzi7JJi_sq|k1x{@|xSDIoD#K;Y4$`bu95yks1{{@NP> zc&BP@^2r!$Uw(Oh=sImQ9%fKv=m0jJ1l)5lwp7A*pfVeMp%fee!}_T>f@gCOah<72N>R7)k`vd^`HMA|Fewj zo2x(n%U@ImBqOEXY3;2u559^9Wu#}UBlU1&K>crj`e*TW>oNOY+nRb)=ZsMz z_gWJ{F{QND>eA&-xDG7PU*6#?7s?2_k)o94ckJl#)^~|)bIuvn4x*SScD!ZWRwSf`bi~d&%B<| z+u-7BZKRGJDF~WTDv&9NG!8^YNet7y7|BZJ896eJlWT1(l^F;36>!P1lYPZG1H6!F z2lpOa{qXmGuYTXptv-71y(w`|EH&Q~vu6c1ZpFVM+eeQc8zp(}{8t%MYXR`enwO1m zFhJv#S6;8Z%#9RmU~rp;fnwh|$nr^9MK+SX^b};!-;VrqkFRU95>rk<LoId&~4J&%;)jxGpko#d#!tX zI8MX1G9}-5p2%Lg@Z1pj)=V$eqQ3&(!X-WM%q|Ax}q578+gJI!(Z_e+M9K-Xw_! z_G18SzxNqB(A};v8V9Ll84oBo2OZD&`(A6-1v4Mt0d6ngs_z0f(TM|SZ1frVyv)!1 z$^Ge9*}6@Zc=zp#qgU8CT{>f9h~Y&b)(m%^nSSW`Ba>T;u>pEU5AAz7(wqnLYg}aU z^-i@X4}b8ZALaC2TK(!*zi2=13B2Q^3nnjzi{pKc5Sb)kU@w^?2i1BQ(~=oewoL}; z({T!Vn43&RpeY?>oy^mEn#uxOG^GrF<2L_!x6QWbLLkmUPuI`)S_hy5D|ll)5Ai1_ zO2y%Zc((~I_fiqidyG`N?d$f2;B*WGMO?<&=hlK#V-Ec@vgVuUGq#=ejt7j}v!9N? z8_ZzHCo;$jK)dk!LT{N*QLFP(duqNm+JoDW%7IA{lyUu6v z8XIH+0n2dG&&iB3{x(Qg%mlmW+q1rEs6+#V03jZEbis!pUg z**)wJ4*7%HQLxGPW^YO!ktuaE-6L~cWr~2hAwH<0PG3n$Etj)d1+O=j5d`3naizDY zb$BM4j)R`fyJ{|f;y8#q?$9KMoug=%=UFrMR zIgdycHe`0Hv;{>n)Q{K zg0D`Td?mTif$C**$}}9nA|b*N#=G>k_BQ+;fI7Ly_E%OegL!{G0v#-a{O-Nx+JBxq zLG*kX!0^`A7cCv!DzL}rz;kr`RC_f$Ad_?gj??|S&yiCI^>Pb(NnN-gs0g_qw>eh28rtMqQT(p(2P<=eT3 zgsQv1r{oYDhwY~20lr5Ib|n8~U&%=_5AhTr96&H&a_-;+DVj50-n}Q^d)NN<(B-nn z-)s%DEwgDIxXA8q5gK*lXzlT3nrD1LHi!f+@`8+W0g#5o_a87(03hKrASpk7OLp-b z_|~<+6?=JIZR`H@U;Y2ahF@ARb%*%UTpSq3?_2`hB~z3*Lq?4*?H+U^eJf*@yl6`| zI=jL~X)F;Q%~inxOy&%Y{L=fi$YX2mDk*PcYpv>*olCZVT{Spg#{AZ`CTwCs7Iuhn zpaI=R2C~H^Gb}(*9NWXlBR->j#;3t24#j)Wz0rp5<*#eyA~SbqJO|v>4(@(7v^DYj z{nl$Ln!6|Pj{Rcy-pwx-P&s_K!{Q6dTsVJrK&<^G#<=g=+Qp%(KGOOZOX2|zU#^7% zxpXH`PpeEoCcA~bWiHpQ)w&})^F%GR4jn$;NS8xYuhhyxX6gD}`FH)tSCP0t#$HYa z1UrsA(Go%R8vlChwy)bSC&mVpl`I+Z#)5#>aKV=s`h&~foCTpUd z>JnL`b2e4Kt9*EnUpcxz8j$DueeX*zz&^+W{@abK+RWy^40y^ub2x7N)AP}EWADo( zzjC4W2k979aCG&alArL%2k-s1->U>Z{9@y|zq%0kORka8=my-R$N`U!d;YI}{^zSx zul=x$_#cnVorKPG!M!rPB@{ke0u#XZV(WuvkH2tgT~D63^OWN81L?CZB-D%S-j9#3s}Jw0cCu)RCC0q(Fi!L^1=SX+vM-q zply%`w>!}NnOZHjH_ul!*bK*?A`^G+QaSV>KeYo#)Aja9i#aqWi`PnUlI4r;+jj0< zy>Rkm*v^5~U;X8ubw1JYbn@{)NN1ScoSyx_M*9x!c9(t6moXn}2468B!HUgc2(Sb; zHaux9Q2Y6kzg~OgmCiUSfSG&|WI_+^%Eo47U$PtGzq>>#!b30s zK+0xL@}-*q*?h^`2K7CBI8tL|1-U~mrGFA1c#X~;+osPK`zZUP&sw4hXbCh=yP>}0 zUow_WJi4oU%jkbrLT>lD680y73SICPTn2q$HuoPUkMUc3amL#H1yqP4@_Ia$?0j(;3NE;*mOo?zdF8b=I>ta@c2`D@+-faedg-MC_U|T#RJ{2^p;Zq({@NRFw9i#h6*S40G=ID+ zkTQNm^R*t5j~_i#rA9WQe(YWw|3AN~=T6`P1#)nI1lW0K^@~pbeDpUT4U;KBj3Xy- zL(s@JC+IkV%H0`SB3=x3CHs?k+W`0W0Gmaj&h{2zd!Q)ByL_#(&J5sQGMl z#%rfi5|yd+-u6y3*MIt*cYYP~)N86uTR37EN6#Dmq4)3Ik+9Y)Dx;zYJLCh?Z^=*w zh74dH9Yj?b1_(XINfB4kLd4FT6}g6=J|jB@(w+}YKuFTZ5(j&jm#`g~<#*Ar}nHRgIgBkRzi z1as>IByk7;M@AX{MNHS2b_H~8JGpiB;m4l_>gX*NfZLjKwmIsSz}f@GZB7c*_pAy7 zT87$&j9@(BZHfg=d`g*#Qe$$8a7q~Zd0&xyVEA0p%|t*avo<1~s6~J*P;qr+07 zA7>Yc+=E6~+-@F>v59g=rz;7P$4?%W@pN;wA)n~=4Cb>%pGCVxd%c!y&m5NonRR*U zjFM?jSapqq(n#_&{_foj80meZpZ(r-eUGs@5iIXE1_2gM&V0YU(m+ z%YlST4&cW1oS#kgp#FTyXO!w34M4S@I5(uEY+MG~7*8V3DU=6~>PZ+)D5MeU?M22S zWrx=QI1~Z^S40u`wyVr=ylw6ueE8l7I}XvqqHOw05`s$jWmxE0=HP;*%%pioi^J_l zQ`6q!dc903Nl|q3-V^|EG=~Pj@$S3tPA|D1{^-Z6KlzhCndf}jAzK{E*IvuR=zE;u z58wZ!KJoW6fS&I;m7m3f+M4Vmlv9KhivGu6^5TmxWW;6oFtAgKFP?Z|Dq55vo<4mh z5d7ZofWthDY0H)l{fYnXHrFLXyiJwdpk^5N=g)jOiau*&_@f;dA1qG%WX3fX^B)j! z_LfM0awi&WX#E&P8_DMO;I+p0KmFtXDdXtw>f{S2S9_A-r(SfoxYlhk7o+% ztS@N6I2M3)(5zBmt*q>&Pe1&)IXx#l>#^>}aeWpgDnWjH(yP}vLf9rN!G z-Aos||6~WnFJ!l|$)EyQPSCdT(Mw}Q`Cj8QlaX_z^O)h&!hRPrRt$%+qeSiI%_XDJIn3KVJ zTTZLkFlW>j128=h9)GA6e0dL^2os7R_Z| z{j2n{ulKnXb&Z#a#D~qprSq@Pn~QyA|F&@T5$_-N%v`qwc6<*w#E>O_$z1&T?*bQn z&%Pyxj0?~<=7huPtJt8UX~k~$N}R30eJ@?r#r2`uQqniKn>6D$)46P@k}zfkNR!lxn>w08*Ae> zo^g`;3~gf7UfB)JFRjqCyY@SqYk<`5XT27_Vq-*s@%ntPm$5Vt_j==vx0AciO=}7L zs@bo%+t1o`N$wcBr}tIZElB}!N;1G;2QW{k3~w}_-N|behrpt<*^Bxn@2{HCv&j{u zg3i{BQ*rz_&>}lJnaJT|C))2ja=d}9Kwf3Qr%t{;`%2j^AnQs2LvoM(YYB|Eu##6u` zaspjH`uO8%;RfKh-+_mH(%N&n$}sCCcqTA20iPzk=-V=_<8kar51=Xg9%wB~|C#8% zNbLS5ADRs3)0%k(?xCZU%HPkSJbrxl1ZQQlj_=S~%hvP-fSMq(gHQqTc#iIt{9r!T z&~=ObN-ptP=!yaWAN*#(EV@q*zVF_SN4@y!w&Rp0G{?R>o??!bRLRl-_MU2ptDiQ-o2VXfkzg(54=bF#u(j(xBCn| z%!mFFl%Ifpd;|2SJU7pys4)~&NY z+GDM;YFlj>wVpYC{Nx<+`sEj&tX?>Is)Us13)Gcw8e{VAsAXoU?!F+CxhTUIQ(sZKI3_W!iJ zY!2&_hYtcu=`j76$*p_YG!}bHyrt64b2r4#w3z64Cr0boT`Ps^a@wJ90K-kUCVdRXH#i8*`L{xf_+V7>|cp*sy!S+M|bY;9J382i5Q#p6tIw>ai#azyRW|d zMr(Ai*`*bt%jwTP8d>>VyuawYz^G)9AQYcc;)-W&&Tp}=`HuF5KJ*e5_=Mz|tmkv_ z{#!r%>FgDWcrEGBkFTcn3|2H)>bIKm9s;N?&8Xm;l__FFzX$1WmUE zYObE#S{*<6^4I_ph_qz5ww7-dAF>JYot8Dvmf?N$@C&Q!+dF_aJ6`a3SG+i~IJ(hO z2tn4}%HM#^>_~?5@A(n;o7V_+GS3&KBsgF1s5CfA6_? z_4Cu8Rek@#==|-icO;x-oks=a*g^t${{AsoZe%V~>VEeYz zvylT1_f(k!$K%(oT{*Y95Z?1UKmDW5-FRd5!Mnc>kBkq)4cTyHyI?NR77s2GzACh2 zw)sEGf4&$0(-VBov0syeWU)#ZXIZHDR*7--%39dM!&*0Ktrjj4A3tt=;R+Mv4DZE9 zE!M~eSOjc?Ok=}ar(4a58BM^`hcE*RfX|)h0=s+oAemmLkv%(WPgOvkJ_5LZvo$>t zjbL=>$ahmgaiD#_z2Gvuie0cfSp)02mR#79KIp>eRr{r@7yLNu|6F^=nF3_3AWOo2 zd~%ig=Pz82uWOB)Ts9`M)Vkhl@2f_nukrEk{^;$oFaOogel@gRdiHm&PC?7@CC877 z;_;*^x;~${|FPT|b}>hhVr3aZO+;E9d-SEGO49mcHJwW%cfzM8a>sTK(Wo z2b>(=zuFK%y3LGZoVl(eye+K7QU>(fcRyeK_y6*j{R{+L3@}VYNr{MVs{P9AZ@fCn zL2q)3$Z53zLgjrJ-9ddEHf5@Z5A2W0>Q^lC*u2`eF|BAqinzJ<=8Rzg)W*T7;xJ=X z87Zz0DjH!2B!7^iWWX~{(Tei-Vsv|^djgR}U$3m)7=xNK{Z+bib0eq5+v4}1HiGH?7Uf-ghel#bXgCE`yy;q5D+|QXk0)QV`|?_B=5OO zL-c_@d*!R)6ZGYDQfTINH^%}aBd7>ggr5kjwjvY?VK8Mp-Gg)DI=!auw^?j*&;8B& z@5cii^~N9bW6FsvcCW|LkTJE)YjJ@`VKqlNXKmfTUxYuJ5`;>(?v}-U`Rb*DQ->Dkw&!Y@q7}lIUwt+7 zv=)2I5VaOcNkxyhwN?xxy_tj@pWT@Q6rBY$o@OM-y4k(^`LcTt1-{;%Rw&v~EPXs> z095)FZ&Oqca|k%N_5(vv5J9l!*{vN6(tBSAGAJ9>BFFk5r!2K?MV&c)gwx@}Wl3MY zI{fjoU;MH;P|yXJ;?)zUUW`W*ia86$2ATWRC;TDRh1JX`JgBz`y`~*F|}s9>}ZZ#QAe)+UrV_ zb7t=4T(+RHpkyvN_>uxYaIpUFReER#!$^$vCL8eMk3L=9XkGA>Y$jYu-fHL3a0cMM znLNCbVWoUfZ!XylWF?SeYvAs#O|?r&VJ+DXfG?S2-N$p#G`{bLWS734XuD;~01q{; z=i@P1ho2P;09-H(l$kjYWRzxu(7*n{16fz?t!{O%Du$=~or46r0H}z}>)-0|O-2?a z?VrUlY~RiA?YibW27F`Uyy1K8Pw+sO_qix>MoQ27hyVDWwXWNy>fyClU+&PE`a#EM zZ@m1<>gVm(d-rQe@~3|m;Ie;Cc)y!LVvjo%ShmlD_<@uD9|dgAxfboedsSWiNC*3ZQ3EJ9CPuSN!sl|17^(mAU;fGJs@~`EyuC1dSzFmV-xj1AVbx6?NUdKvTH60~+?1Xrlkfp&tVJ67_oe>L@J#oS zbJl{*uGy!{cN(W6bvj1lSOCj$YYggf_pgtHOCY4ntZOZ-->|Jee-t zsd;gL0ljnI#)uE~)#HR1BRx66neK1I9Gm&KF;5VvJNb@r+>RFO))ky@@7yR@_T#tT zoM6+3pPXL(&JW)X^t`|Nq$b*RoG~tAK6y^{t_GG-H$Y^JD@=$Kk!cE->nzZZy3a*$Vw?{OICYW6P#H z%umJ+FvwZ}WgKQNU}Y}&WEpQij~+rlj#U2=!2L7#wtu_!YYvC+pmQH^-X>7sTQhf~ z>Rz=Fd=l-BA3xPuE~k>8IWftEJ}fx4mh9i1ZLoJ7bzn1_3m~$dQN5TAC+qMOXNx@zd<0gh zP7&m#d+~_*c<$}=fVP1PaLV@8j~vKe!bjs#m9@7(xbZ(sKWr`VF5%$xR~KgPGRC!8 zQ0_Zm9(~kHqK5#uO!UcK3&do@vu7lTumc6W1}LRQ!CBl$)K8m|kQ1KtCE@6J}SY}%)>JM9m0 zM^ML}H7@_ofuOx_FR^!kN4B9X&)Kihm_8a7^Aahy=M1y;Q=jcyI z=-Y`-Q;HmKk{im77og29uFYA#(8aS~6o9$7dilkZ$+yPRJpnhvud(U0@#!ojtuVBV z>8-J2*&CN{U7B%CH3&NN--fQ;Fxi=Gi00UO*bZ3UevxITEsrw%`}c2GvHbbShz<3r z-ChFK#d>9v8803?vikO!!1O@3{Q=%L*4D(Q9_H`dyrXFE!Q0A2?#*52V_yi3=!iML0e4vTDWIN)sfEFJ&9gNx)^fr{4F z-eP~DrSprn?JWa2zfT1wn|q_I!uaCZ#v!|z4rGTdHjpY&j{ed*0@yPA9KW#TW$_K~ z?=E96zGY((cX$;4Ts(Vbdb~dyT{F#BXMX*w?BJ@D`>yLoZ>C4^q4ff`s9?eU_Tk9L z#=_e!(VH(#F$& zGIaUV^y=){oRnJp>n2GYEK0rQ?p? z3v6+Cr9<`5L0iYi@zlKlp=k`{zeQk@81lpeC@Tj+dtW2@ghCr z`*g=$)e`wY+uBR$pjFkqWFL9Gy?MwGc1ZKZcpXUkLakBWdizJCyY1((?OIc36-?lV zY>FnM1BYHEx3xZk06u5c{2cn_VVAep6tRzPl;#HH4cU418?W9?ckFk5^2;^d1*v&G&Y z8Mvh}AC6b(b9VfWWEVLzRR-~vGk3Hf)k9e;Cdq(oA3)^u&F%0JmDQgP4xn0VV|;+4 z_vByP)*3b2LxP%iSc0Ug6V6nDss)k+CDq7enCxf0n$h@~o^$Z<@zv{Zes8MgWquQ` zM|;+9e)Fs5R~1a-mWi%X=QUS09=A9o`7iy z8mZQ>$)*Ft)*bDcT7UFDon>FJVaA_pd~i7H&+jA)w6L{z$&W1?mgGt8A?ZA_M!$Lf zy4T37_RFJu%5%xR6IF=I*&XE1)~y8+!myk<#~$P3-M<^|(P5^4_HX`V^`HNj|EJX-{(Jwy>VN*9|7koF zKP2O`1kxWRj`WN(XU>I7s_fq~RaYCMc`jA^A`B&99YC#3&*kt0^V}9~JmYbJes%)+ ze&w2u44oBbABE+>ap06H+|kYWjeE{|NzRg8WQTLo$X9k9TgrU*q!*t&&i3gwNwck) ztodhZJ%|_fhBNW^`GfYh`>(~T)?r8cQsC&ozJlAeDVp}z@#X0+J{v6Wmlie)8Ec z?as2mm8^_#iLe-DK;D=Kh~+>_8K4#y;}O_V)A;8fid{sApa2?~l~T4Fw_B{70uJ-d zGA02H2kHSV)0Lq8^4pE83-6s<{b35>f+0F6L$zg@j@ zbM=4!hyP1JK#}@{)|SR2n_YPUkR15^{qMaonbZ^-rTFQGAI?2(K&68gRD{^)`vTGS z?o&cnlp}%S<=Gq}N|YmR+xS@@W5%Y4PQT2?#Dq%1IEOE^@UOo4*6No(`?ssDEx?xg zkTY60Oy5I}cg|MtW1?>}qyc%1st19ldXzDQkyDd?#4{&-!wE9r564`OJ|(Uc$&Cc< z?HG%IIFiGo?B?>t%cBGs@k)@#NxJ_yV7-G1V-mmv8Zg#w^lS%_>bC{hBCPcC@jMD7 zPAQnNtPvqM4xO?0#~f@f#vUG-zCsCA<7Y~3j$SsZY>_(&t(RUpwfd}VQ5nO`Lqdjt zne5~?`SqCHfi9OnKR=tp7;x5sdI17>MZ^hD#ty)ZlBPiPgPs$9Tg$UwovGzdcWvBH zU)E8ES-0!uXZ%U<@nhq0LL)&jG;SWU%Q$Ynz*xW|!^m#X|AhZ!bM_3+xt@|Gut$)7 zcP;0dl75(iICJi^5z0%XbXi{!Lgl-FB$+6b359gN{?Q941mIae0?{}qX`mZLioW>d z+kghb?8O(34{x9O>Rd{zzHe=KN}R$uefq2K45b8C3hL`eDjSx*rJ`>7c)fV))z(1& z`AWHq`~sB^l#zzkw$pF1PK=$4C(fRX$>V0K9)Wb4o zX$f?3(5rCS`drVwxw=-MZd(UGi6{d3ca`aO_W^zhNC`au!ymtyp>k*Sd57vy+7Bz6 z3vyYVu6%H~8Sf_p=QQgKGJ=_77K^O*?btHqdOK4TZ=CEf;d&c7&jA?sO?`Xc|MYZ5 z(YciNPs)pUe%j=O-6meit9NM01o5)!S{X;kg+WqcX&yT8pH0)xwC;HWJBeT z?Jqqg&Gq9Cfrdqsqp5>i&wg2DQidu3VA@QzH-L=Jd7$v-We@eDfc=L@fLi+Pku}!X z_!BNO$GO(e0fc0BOxI-#z((c(;pTuwXl(ZzP}P1C?F4>U$^=)T7LhnH;OWEU;X$8-&_6W z-FN#=fr(`AtqhjQ7Hgc07;|8RZLU4Q=chkk{n4Y}OKt_caHe{`63@@O-}4!*vO=xj zII@k{T9IoGklOXA1MC8B$SE>FR!Nd+`ld00_Y`o{ia|D%dAjU=&cemhk0;puyT9{0 zt1HR;Tg_qJI#mntTR~#-Q+C*ZZq3b-khF9DN6(g3Zr!Ypzh&c07D96(5Kx+~OYrxT zWlJ~CeOim|1cvyYkWDdU>?H5mWlMS6{%nZAs+ zmzVW&eUmlEdG(KbAUa^#>T{UABm`{Gv)vrOBhWY$eV0u&G-waHlX0T&VvrXo>Dl<3 z&LSJ}ieccSd2&P?dEl3x&ml7QR=(S`Hh_QgYVFW_0YfsJldaZBV8^{Aau~Du4<9s6 z`^We3;TYVlp&7~mU-+xpkR$QTjOe?d0eXA6c5`xiuIYXpmPRul*r)C*;Ng37f>PH1i`&`CeYd|NV9k7?}=@2qN`sRR(OBZU}6*#lI`v7;f zH$vBK&u$7psdT<N380b(~lN=MPThYtaK7Vw^Zhi4Xkjz_wVzTOyMW6jxp zWb$HrX8$FxeaB|8cfJl7!oBa-ms>j$`%WMl!0td`ats^MYu#`hfA*C=sP>f}zGDYR zi_xWBvo4$FlaD_be?rBt=Np&yd=ENk6mV+439O>SQvF~LSDBj~sN#$b{%tzUv(R5F zB|w=qep>8!mBMlBfxrr4ol&$KS+dGy7TtKI3j!&M*t`j_vF z4&0Ni!OqnyU#6K%w7q)*n$itD3@!>;9OSbqZ`e{R0Ah~|W&$j;8GRVIA z^3wphD&s0Cznycu=eZ+o-R$$o2iRLXe`fSGJ`rS9NxWmPAVV^dJc%A2W{(Ki;Q{+f zLW|7Tefyqop8NW_z1!Hd+;HHh%1AxdcgEu~>GpTtfQ;|k->g-N{`|;~XIcx{hW7pT z>}H7wz>S2;$bIW+4avVvA>AW~+LPox9m*fsoSlQ_ccT#-*JuGE5;z{*stv%#hpS)w z{NJqp`9Jwb$&vP2El30sB;iQDdNzMliNELWY<&2Q?JOgi4EM)=$D6~8fS2C$9DHOA z94w12_Kx;L{PWTAeKuK=JxgXJpJihK5x$1As=V0F1K7|V(cajuXYbl8_QvKd@pk+V zAeZ2=WrH>g@l*klS33M0p#Av=UrYtU`tt_2!7@*YW=-G!JNkZd|AeVNX{WJI8`C%1ErmlL?@4IP9E_I z??eZ7ymnxczt|ppYCy8N3z#`04bZ207zocM!bg0S!L#zE$jyh*&Ub;>_QU05(W4UA zR`=JzBu*rE79t}>f*VtCcB)xmZYS83tl{P?)2;hGPVEn zZR(+&yxH5hoTyf=YN7{a<w;oQgwYl2;{`mhBYF6=O9Gz7 zCp~WLBC-;{@5tscNBc$~^w~|FV;8NRALYJ+vE++nP#Bi=T%T<;Hh=fD&#u%SO)D8y z+1Bl=FFr|!tj#^`O?$!rPi!6~=WEb2+T85jUzOGE zr5al>i*L<-f}za*=>BB1b-7;E>33>>_;7z9v8oPo*LshQ+_Uf#8{GQapSP2j&34w; z8H*BF1bSRgzR(|nqzgs?|0^LTEODxn;;9?SJNO%0({~=F|5PgO3`d$4IDJ>UTZw@e zYb)d+YX>Q-9K(CCw50;GkaK%ai!8P;A7Awbo5r~ti(MSv(7wZq)>NiFJ*(w1-%Q0K zeFy_UpF4MwuQ$UT*j#i}bON|rC+}e{_6ogHm?KQ6LsFB`U!1;Dz^AhZ8t2Acfh`O< z%8nBU43+V1vri^o1RLP5dn)PFQ;+k!eVg*69QqW;;+QPXB;(41_0rUcUf;j_DWW^$ z%%vcG-s0Fqx0Linp#6ak&o*&Pxz=gT|Mtrc&b;&K>Pic%1DSn$a zIo2>_%?2hV$c`L+K7wSdj&Zjr@rk~)5Ws7tq89SW=EbR`Oc`98ifGx)lmjqTe?LI+ z#>S2D9MmZ?{&g9DnAJ{O9LS>7IS^k*%Q?g{`tB?%=-A=<&o*xv)OrkEBVaQCaQ`qy zo7Dz3HU=R>RYco?Mz<<=I(72o(0?4o?hO#q^Z8bem33w4tOY=hkv`l431GQiBo z;XjO<*kO!c#=8EtUIg>H3@v46o*N^KQQ^~3vKd~RbApm71Hy1vo|G}V^+AkwTjtLC z4jw%}Ed)el8SVs?c|23_XcEgTMvZ zr?N>1c;I=g)EtP8*5y08%hvGvHqb%IuYoc!27E|a*mUy%59wvSe9nNI06i}0Mv0eurL zN{SdddRnes?c{4cY%@%Zc~i7zMB+0JDJRu;m9PNtDcz^wz>W^VoE~DOY%IaJ1g|0V3|S2USoQ4w zto9mLuHP-Y$KfmWl6u^J>$A<#$iW|%8vjS1e$n%e&hw`XJs~0jFGCU;pRX^q5-0OR zYr>gqx?bdz^T80@7I47jIM`C= z^uC;=Jv#%y3)20?&;E3^?%=LK@$IXh79Icai!-f9M#&eSt)8pBi2j+E+gk?$Q?+k8 zdgwqI<;N!&u%VuvdJS`|X3w;aSay_9>!D2vLu3K+HmqJo=hpdhQBYL{H?EeQ5|O6_CHVo%xWJ*KtD;4fPGZM_}0sPd^S8Z8(l z+H{D(liNzD$y z-_@eG*4=xu{k{sU(4X9zs*vEYP&@*u^o2Db_l((Lkbv!V4vFi2o7*$Hnj1NJD`5V9 zimqK7$hiP$DneXYJaK62FD1N;EdS9@jPME8^)nIad3`jzJPulJ!jx18F?`!v-AWcl z?OC&T-hFq1LML8$VRiVq{j0ZLexWIkI~7mDe+dFEsa6U`)nu>-6r2A17zlbzsZ~Rb-X1 zSH`4)PE`N^KmbWZK~xIo2Ytu|**qzj06p|gMkeDqnqy2yM>2{C9G%wZV;VQ!rxi7f ztI0BoCUb2MU)G@w9B&%m$lh!N`+5wX7DZs0F>Jl?FrH8Q^*h*yr1srpWABr_#>OD( z*SX)i#_QRW6^s{#b9-d(&39(O@rdvDX=4V2GMdbVGv6(z;((##8GsxzO-8{t4*^H% zAy4*w_lXyMX8_Rd=bnBi1H17u?|xrKyMPw`IwLdB1+6kBx~n;7PboVFSl+Ic7y}V4 zzC3e&Y^#UK`qjprtNTy3u8zOdVOXupg&cdmB@dLDVq({y6fEqPvZ0szc8~aH<`~}_ z_q_c3-TlcU>u1intUYt4F>8zGjBWH-pO5VIguc5@Re<$vuK1Bri%%B%Apzsk_km`C zYpoF(+%FBG{cl|T*Rtj`=a6M1uo)*X+WozT);-S{jNJW($NCxFWp)WJO}TY}l63{( z*qKWf<>rxN?ja!quiF3Jpuc>--vfWl+oa8ntneV<=+pUm4rgV0wRf{80rN9DG-%v( zy=%1TVE3BiV%U2Q?nQ&175&j+dHL>)=sP;irT^|;L*Io2(YN~=V`DQnG`gX!OfQv(9BB4{3XyAT^|^098-elD2NGWPB=2QG+t&{%mhKB+*H6`4=y?i!`bS4F zSxU~(&|XT*2exG786fi5@pNFe#DHg+J%%UyTgY?~&bhE`HTI&lwN{ z3bd}cfwN`K*UG>GlB$+qOFA_CVuyIX^un?1yg-A38q+2reQtjWdILizDI*>n0583E zKiTY{F!GhHLdGq;n;(%=Z#^W8nCAd?QOS45F-r#wz*t5S8jU<_4VGYBSw-xwcv~P6 zKXG(rHTqc<6V5&^ETE=RA|4^Wp>i~*87>pGJ&$jV^^>%E?+F*9)N3no7z+Sz0p(gviYNXw{32E z8h2jHu@2hHW|0ZK(-|DyO9BgB&79cKd^}qgzrA|t%32w?-wPPqlMjSnZkMRh9%wBD zQ21HXXS{NG>nd~Deiwv!(6zVP3)&`Z%{SBg7g(=nus{@<^RVl$Uw1%m0jLrMWTH6( z1kHeoO3LxvjVdR`KWt6|>IZ7>2|%}(v;{d?LYD+AV>xv6g|WAQlsV?kAyt$SP+ePbh^fLDNHawcmtKKZ8Pvgd}Kn{;be#x9$^`v9)J zMz60dyQAG94= z)c)JFb=T_S-~J*WZqMq?H-9)F6*_mx_=4uF_xA%fVFTQ_sYE^UsQa*swB-XnZrfK& zgRv7pO?94VS`wrjj>v)t3 zd@h>KSrk3fdK!~4vdy*z2)03Ieu6jJ)5-eB3FfU;j{R{jz2LgpKgj~R>F9}9R&T%k zQ_6eRQ8v2-FY<$5dFaTC!*hT)S?H>79(T_RXHOTn3K&gi4qez8-P>mX%S|Oe0vi|H zB$?Nkt(}A|Re@|_En05Im!F>gG)y65?`=cd&sVEuV`>&qs>FMh?JFo6$fr!4Rih9Sk zy{i)^-Uyg42{Kto$Jvu~^HgssQeeFgUm!-RfF$@T;?5PSTq3 zEn_lXu+viAI6dd1rz+J4B_Ho;uk*)YYbyEp_bT4434WKHBYF1Oc!@2q-L9ZB`+!V* zy5Ba{7Wr-o3#-mS*x5L>va*!y1M=7SZlw=)t>3kJ<>gny1+ESrY5aJYUD{vq)zZ#K zfFAhGuCv+ox1S33#wPLi#STEeaQ;lVXS8dqcte|56<*`xH)d5?Fg|;a|8XEJ1LYh3?xd4U9oV zi0@4}$WFU~S$of&l%CwFRPWQ<0f&`d-@6I)?0(&6dm#4LPd2YEUVXCqCx87(SIe@l z{X~mru{O3rDekv3IDY(--)jz>i*;it0f7Otlo!zLUh_1+P0dSb&Gv!?gfK&X4)ehP zt)oma!ba54yp$cSPk2r_YtI85bN0=L5oF;#6S1Fl&*o*5$OzU#pbv+(m`G59$29^r zM%`NA0Z>F3YmERvmSxI_ma)Rv1`u$9MA0Z8LYt!ToadfRkf&5{RqF9gO0dz)np0Ntqs;I%S;Q1l8J&%N^ACPJF=Wlqh45gITN1tFul}vSGGmd3W zZnc1WH!>iAiVh8s%?{+goIUEsJ3OL~lyeR_CId=$=?C^qy(Ig|GJ!7x#*iq(8&@tu;AGnGnue87XIE9LtPv z`ri0$o?D|Lte*jH0>`&GEVD{O-N=8ca4O5>_94^vUoWCs!ahubf_U6scKOKVt?{bVOr~$sCUBj97P%<)}0yv)O4?4K6LAJ(S*a*D$ zOg!v8h7AYb!KZkEv7?MmuTXq{y~+VuGx$v#B7qh`Hiy_4qaJZ_^vEE=B6RNm9&CFPxeEq(|W2|GR&uIX_wbcmMDI zA$fH%L;CRQv*_{9|LOmC_5N>v(+wV%h5qX5U;M9sGO~G385J_hA7n6TmGW#l>vFtz zqZTT%My_@s8smk)0s2Lbd7p!)os>-Jy~)S5wY3f%K0kpJlO&57W5!PoJ?Y=`85oR^ z)KW@4fZ@d9DkUzDj=o;j|N0J-QL@S4Nw~$wms{r?A&chK7>toYKqkpv0*G>y$rP<> z$X*hUo<{2ly0tGL1o$aRy3t%Wa&3OjfSm&i`^X5BJKA;3bNSu_jj8bF8O( z4#-tY7KRS}PJTS`UB(aj)kFOE6u2d#hX)(q(}N%J0exr>j=t~bs5FMN(M~yR*SLrA z&2xK>b^$=n(TVuC_m-Ct$Gh`;W9)vuH|<$`ACDVMKdiPTOK+zS0BGz0*$uY>Af~T! zdnTgIXGHfLPSIJ_08)t;Ffc3--DvQxX$A~*bt&*HAeg!_QI z-+rB4@Ew2Zfz9Sy*Za5IYaICrgfy0M>Sr`a;70w@ePnmd0?gmbq{cQK+9msQj`HSwp|ixbbECWdbX`R~l}&k0#l3o~3HRLGtMCKcqnmjxqZZotO9VOZ?G2 zT>;@UAi2qa52SWfY+b+hd%m#=a$4FKDAAnC_OebNBvI zc}9K$^BpufCE3xDoLE-xNzUZWY3X*Ny~Ji}C(l0PyaHb)6T1Csf5`4)Ymz_s@H=>V zd3N`tpRA8D>yJ7BMr(NWTCK8Lb26%-x0FjDxR1JisiM1<{$jJEqjqENk0m$76!^v3 z$fU$?9D24lP!JE(O8_4BiZ%9-haD*P!MpDafWc2do5g6&!7D=5V_=-=)JvcHumzv{4j@-OLQ4<-+oTs3mVAO zlWEVE;n!GSGEqoy=aGjn^PBahy zqM=HjX9G2~k8$|zjpQ`hZoa!oTzNj+TQUTZ3w+d7bi zZ002?NY$%07qvd@{tiw&7$9b!A3FJJ^UA+T_Z`dz_a2$Fbz7Ok@$JKOi6o<&CHfpV zbbNIv{NO+>eq=s={?YrxclM%Zl3%;`9vIokcVlM($MNQ16#3hxFl27zJ~{p%agrX+lHOHzu(N!Fgx|aSGA^kE_-2Fn;Ao#xpH?jWuHwiU zZDR{GNB9W7VaxE6`ApVd5Qy$jDZzd>E;>}(jY|bZ@rlZ2dXYWgTmg8B04e$S%zB9% z>0DXWUyFh{$8})MFt$xgWPqerne`syGfcwep?t$LG%agCxRp9W6@po00b))fJ zwQw-*XAd1)XqCT19ysis^}(+G?6XhXclXAw2kbApzXOQ#*U-;CvF0#`20U`EzZd=6 zcpt|6>=cJ6qw#W>Y;vG%=lfMf9Y0!|(Z+f=nWs8hG7MgLkc?%q-MuS7tUX2nk7Vq@ z&f3C1moA+zSbAaf!Ks&OBYEWb>eG)uD)4x5#Ht47^pr#;;wq~hYQH~b&kX4)^qe% zGHU$c?kPEDZ0Bf1kD(DB{Q61yuFvVXte4hMAOub!Ic7;zeQIk9WRvIj!V9eT`uv%~ zfTMTu6r280tqdh_NFLGt2JOZNXbvzDj3j|o%hEpX9u5r6{t5VIFOAHK@1Ob}zJtdF zPuU7=i5&$>t)YFsdv|9B^e?^2&ZWQc%>C?-d$&s@ZZD0^m~076obPGvLZ*;aaFFMp zKhj*AbKiHUsYC_3-RA-=tkO$W#TYxCE~!-#0lVqYkz=jb=8|qY4`Z4q zXuovgg%`)yWAl>>_TT7|u@#~fdn&nPeRc7;9#6KtN6u(JtL?L(vczEafx~ts53||a z@5$Zlo7RY}VoeV2FKM*rp1;_-cV9Zj**(v8-W&gVc(L_g@Vcey$iDF1E^U0xD_)vD z@9hI)age=z0spWkHo7R&tqL*+VnYE! z2B;1MBRs4R2mIbQGGn4)lV$KPj%BmxcTA{kRuo(WmcabFELfkp)*5kC0rq(6W}Abu z+mjI~Dhc2K=IEWw$XNjLu5Z)39vG|52=X#eUg|X5cRuVO%4h?CCa^q1i;nMQ{O#Mf zw~VEmm8_Q$5KzidK%+6#`#z;$?a))U%aX~Rq2U4jk!e0Zx3-b9`6*M02fs5;2`d?H z`W88G0*$PT2<7n((Gcl>5Yf*Sk;RulPfP-6rf|lo>lu$LpS58p3#&b`>9LZz4xVUD zN6-|B-S*7V_5+RZ-l6)JS?jO6pLQ9tUSzS1;~Ssg3)$I>{IwM62$;}Q9C+{HQZeA? z!wK0v(do-GXKFX(kjlN&9!Hjj_CFf}Xb&WW);d7z?%hWTWV}!yKo<7iJqhEYmIPt+ z+SYgW1l+IHTmM>RNcSfg^Bu>xXMXiJov2>sUUP_wjT+NZFf#d{ZEeXVK|jJ( z5J5)HFW>oPZC#3rMnl0jm?@h0^L#+~$g-3zK}&`meZJ^@Ai8qpea-Xo^;@f-zw_&y zgq`DnD*K~2Ik$M;owQN-wmopAe;qDtuYPv=^Ev2JyOiJm(f3z>@Z;}|LU{Al7gxXk zlkbnAK#>B57>vr&7;yq%d_u9{TXX{CSzjiu=nJD#I~Q4E(eYAII-4c<@Y1E9w+ zj5rkP?wkslF@RlB-NOOL&ICf~REO7@TxMchx56N{5>Ki+}yU76`6d%CqRh zXzB{Bm^eio2>TZQeH$Qtx$lrUFPwOBK+Us(QA==dVN|kL*39Rhe?D}4zFva+Y8Up! zmuCw&d5WNGj!AU65D0SZ`lZncPGl$9M!#91^qw^ZMD5+(p<_A1_d76a0=Us>o$3UF z4}t70>)W^iF$@OsfMJX0$tH7RWO=S_Ku<2MOnNV#UVz!Y%K7P<^@MBNf54vJ4jvl) z<0*3kH|d%QLdDmkZ(Gv_FwRfPuItzV(bK(p0S~OP?~^UdcUi@Bb>D3s#*gM-*K5@G zWHvCo=peG+T5yKF##r`)vGm9KGX5Bfu6P=R71ShSP+_v&`%F8nCmH?ZfqAIr86!CP zb~~Bybq1NSvWu2>F6&qS<3ISXR)6q^f3W)FKmPXvvHp1VU;n>8jRym+2DH09BG_9>dX#(VwUb#%nCH{@HJ;mI>qsFXFE?o?%`RQ&EeS&>hsC5kpNjfg=qU;rC*fbbkIb2@wO z>|~qMLFBDAN((NE z0>Ibm-WUYb0v+vqN_-R`JpY`_K0TTKdZ(3Ubf_>x#!29T<-v%z7 zk0%1TqU%(>rrWm!(jMqMDK-yV5*<`*2quupUSt%I#(de1bU?^tV!4OEN62ZJ$j<=X!D}dlHM|p)*yLukPY0k^|8NwM}O}Qdrfx> zkVw*@?;llX%6P$#?o~t%L8Dz(I#|#hagh@$qW^p`Nv+KxOu%wcM6(x9{-r zXz4tj0&%T{KFQ{OEpY5M5I_BFeu7sjJAiWR1pBCg&X|0MZMf6Hvt{K?%ZZKWYkP5Y z=&kkE&b^%{5^e7WEFOO0r0KQ(Dk95}&S#smhg#F~U!LwhRhQOcL<=shTF52OwGUL= zjxC;Vw>i6%uEo288Ig2)r_;GdUwmWr?bUOuzy4SMw2ada8~+jje5yOxQ<7uond8S^ z$Y(53BD#)zY#ewJAc+18SW$qdv62(o;0WXaSS=VH*E+!U=oKrqXBJu8+tHdP^CI&X zJ>LIFPGv0(nPY_dV6nKI2FAfZ@vlIeMUd>Oe}ZI`Hwq zgGYvsACMv88EC(Ik1DNz!*n3u?fDZgROPug-$6h8CC}q(zzy1%=g||VW{tin`6i+a zT~xwIhCp{c-T_E@jk6&IQh>TDQe^b2w$Xm-!M%8D%>l}_$Bd@NeWCB{De!eL5Do7G zNRQO7oD?BT@te$G0oiT4miqx~r)6lH$}?nBvv>5kJ`%qH-Xx%q{p19C(m~c+`w+~$ zjl63g0ZxI{)=&i)+&~MVJ=x#@s8(ynP6p`XtY<#`EXx>w==-gu*k|v*Q+uL(r!x2_ zGq&%2_4y~GV;!KmGoRBK9N2j`e|3L0zu+NX$-Md7bEa2gG#I*@Z==Fi74FPGoI?+O z2S*C#4u+AOA_tu*aNuy20tLw&?l_z_j2Fn7e}JdSb~^9#&hyzuF%Q7_G(Sf@p~&bGL%h?UIa)+$Hnv3l6;5%Oe@_!*DeVikU@T0zo}%5XP;%O zbhmt{@UeWa+1q&cemX&g2I(>h9qlc9?M3%mH99>%H#$B5A;$mLnj&MAnme0R~G0+g8AA?(I+}dfn(KVh>5? zj&g@ijR*Y-Frq2`)KrhfllD`0jtQtXe|tjTzyYiY8A6_u5o3$RdrLp2E$=#jyXQ5_ zxt9R!YM{l^-~nm^kH_?GQNPddX<@Zsk$HXNZboSjvq{SDo<&^L`Gy#bfTrAJ0?YE* zQ1tX+rG+;$qLqWXNb427fJ{;rxgO;YTDb*j!`%1hAN;7a|93~JfA;Ce6PXiLA=D65_Q$*|6ky?S%5ZfHfhnSm z7R)?k5HX;&F-Y(wL@|W^%p4JhkVUtcj4TE^W6#Dhv~Aw?%jW`Y9mMCp3ZZkAwn?7Dyrc%$)7f7jG%R2jVFXL_e{ALL} zU{sCaYCSPx_pky$9mM^j-jal|dA695oBrxsGx{Uc_W$QU`^jV>dr$Gm zCM6)&t(K9#u4^)!qW@z@k1W18@y+5~*(J-UA!lymwDT~}*P{{4`^A@E1RR#(-Fp7w zXTR)F*!LHI{-6Jk#pyFA=NZPhCjof$a8c>@?Lr6hZH~_OW|W;dduqzjCyL0abx_|erfdpOnv|T_u7*ufu!e0 z$xO*#Lh8hcF9X{ywGWk8?L3%KqvWs>p`N{M%kJSfk?4CV;h((!L3Ewr(U`7W=@jyw zCA(T>$<;t|yr}o&x-zg^iZKL%Xe2cK2fxb!&l^~LnL{VDc}F1ig?jE^ zPS6U9yk42&gPubLpn(HN4oyVXfx-X+?UeS^#zn6*L5!7F$-k%Hw3Niv`fURr2=!BaoYc%<9|NK8J{!Lt_NVP>gR)p zcamX&=B*{r0XTCp+08i7pFR?6-GLa1#rPH&=p=6&B?@k>h&ow{3>07p5K0;PP*nTf z<=up?>oR-R0PVBC&z!wfN#~RL1DDNKR-ArzFPrOYt@E?zm&brni9*Lrz_)(sbQhy) zO>MzsRNT1}0FcweVCa_1_38UlCDkGf=!h!fN8gzY-T9oJ>jNWg9?mo2kOBoq|CwI@ z=u3OmSE7M*MC0>219@nm$#`zxo@;@J)^h;$F#sA@^PR)*dX{_7gBy?U+9>z(-hi{o z5!bJ2u0Uk_!hl@`RQHs=N>>g@`Tacgz(yBi3w3{MkSQr`Ot#1~3M%;r49%0{|F6H~P|}-4$e%aSwoIo%(O&Z9ip= z(&4V(T-MZ#`*2_6F;AG+*`#}G03Lr9mvU}QyL}R08Ji41KRYvO(P&xOvFS8Kl+&pb~ zH=qxP53NbEkIeD1hGYB9pT6Jx0$q)N_H<~hzq^jkMmyGa$~Emr_W{|5E5&iA30fY+Q;;Gzni4e2>{mufK?==w9ua#Qgz zj!SFF22pvn|Inf7QOuEF6=16mCO!LIRX=)pqK92&KnjGq1|ZJ9#{S!p3TGWAK?0k04*mT*c_RV^#-ZNI^xe^Ryqwm_)_?xqrHGb0C z(w*!B8R6{h*^k=K;RkI`tm(?W^E5gjyIRWDbIDumP~`Jk&i)g@iX87X@rOO}!CV{N z;)j<{>W3LW3*_??ZkM?>vZ?R62N@*MW4Xr5z?|oeZ0&Y<(EWVRbFvhS9(Sl^*`;}974HmTMoHJ?>P?mGJZA%ntj&3uBpuuKm)+7QV{JO>JZwq zXD-y%pz~9*N2f$Pz+`JJ7|`t9WY*4gdnQ0Y50IOa@X|8@yMO|f5DvQ6I#W;9{Q<+P zqYu1`A2tR|nX9$%jD33#1RO?h1uXB~sBK=AvSY(U>j1F5yALnUb`a{;c$FO`0cm^S zADe`{LDxWXI$vADeXqYAFFNpCAgNx-(JxzhXsLaWMQd-hZ20{5f4eyOhX8pMo6`=$UblFo>YbK?d=ZjE_-v+nn#O&`~UlYFTVQv ztL*at?e<0>lF!5^lBs>;T8CbC&>9_gp}yL+UpadqM7 z>uz{cB~5-4`gf@Ly2i?9$6q5qy3gw9=xRVCUwHri!y`}btXj&v=ke~w?2P+W%)E#u zu3S7@-&+T`HNR;3vycBaTe=pjtqZ%-+_fzNxSNgkPh~+dVCJsqWBU%tIxra^UV5}dMj51ZU)pD$1I(82us+{` zeX!zz?~-W_&hf@Xl>8lo~*FH^cKFGeQj>CT4539gHc!4-hd8y+`RPp2A=KO z*O;pK#(W6IatTLg=+bhu!TR>vL`o2d$UJvgvck|J<)&8#y zM<9!EpdgI#TsVKWDwjRsM%!BJ4qyyFkoAA9N@%pWW7mP^xT5wiXPY0o3T$j&@Yf`9 z4&LPqmjX`NC(lbZ5bPF^Aj8y~!oy5!sO*LURi)~ONegpnkS7k0)GS}2%2j~m)6zrnc@W{q^RuIdW>BcQp7d^O}t(A_s zSIbyDsghIw@vp!7G`r}}26NNe>tu2Ujjnl7aIksNPuJRmHOcYw=g%*G{onno{Mc(_ z1Ch&Ga01n@wm%OF#B6KN?2-LW0*sw$U!HfLX{9~(0(-V|xT0y;BLA2@BzZ8l8ykV| zw)B0Hu*q&2@%Ut2av8m|SIJ>KhA-Js1o9z8Et}G%0OIryb`Kpx3)`YO$NHRP&^iK&Up`$DU&}Nc8W5gn`of$&1 zYDHfl0#x>f_58aU+!BzoC*A9SeRiPIM&wjvYVS7wn( zYiscO(Zhjl>ne}=IwMY&Q5o40os5=o`ZI`E=O{iXt9EZ-&5wUl`B6mNZZ;{*3uJ0- znPRWaoQP&hWeWsUK)T$#7M}7y_4Q{N(PaqsTzof5yg37- z9CpOXUJ{QULV<=EQ`$}lhFNbJoi+$~Cj)d0&zu#JN^3?TJ$bU6*Uk<)+|oah?yb!Q zsA-*T;*>E)cy|Ljk*CLkpY~Z>|XZ^x5+>7Y6tBoFU+?D7uWp9yTB)Ae@6S zMp5^skkI6NKX|t_U0HjUlamRwXGi5eF$IOjkX06V>r>ezWjXhrjCC~V8UlopJAU-| z&;w_82fIp%yYBk}zLE`Xe?l{W9oNQfZ1S3TCs@c2*v>Awt6yM1ei zVHE^GMuh6cGM@#199B5lL>aU{c;~(5aKA$tfmQKU>#9u8iQE%;jaPbPj?<>e+@ru5 zQIx>jZ@rnri$@*>BnN!8t{nZxn>qlfOhlmF#mfO(-9RSO;}wsl6zkmC)6r$_PU|zh zM^m^25PAf>THpHt+Pk(pba7`rtUq7;{cnHQIz8>s+Yic;&46o59(EQ00k&qWh(W#68nKH+^ z|LIfpqzf?C8;jE8!_UhvY6}CPS{?V)lw*)1ONoLw#L~&${)QxX>;AQ#! zWoDv7+4K>2PsR~?19kxuR6ndtj#7-q>Nm$)sTTp-)*6lqn~F%jSZgvf}!EZ%%_C;I>-pd>4;s z_j3A7EoK^*Y=8$V%N}YS{VsdWN!T*d7u<2au$i_`Vw z(zN+#EaO0R8wOK<)-L9|MjB5Se4aUV7uj8Ol6h|3v~z%mD}nk^`8d`BLhdVT@qP!< zaeR!dA@+aWH+eF2hQWFd50X77a!tVudrh`_F@Vj<%k$8r@2{%$%G37Tfo1bCLmEFf zF6HObnx%Cf`fVbdGxr3D%)R?Qx#OAE4H#F%vwO7CjcW-IFV}O%+OPP=6gXh?ke@Qu z(7)$s-7=pw{--Z9!1~+Eb!3gnE`MhWErCq55#i2#@bds)y+yCP@5trmGc?tG`d>61 zJ+v+k1}r1N&7zY6`^lM2*&th=bb`C2fZn$z_GM#ECbGMc(CtmVu z25WqeKG(uzG=-7ZTwv%Imet;l7WPl&G&bqbiP1m|tbd_v0j#c$w+@I}&b~tZ# ziay(%j=EIPZd!R|Pi@TRBD1tpP=Y%_^5iZ%$iaC#x9^z>F`&xjoMq*-d$Q%U;Cn5( zAt1gjU8>BTou!Yr*Wts5qL=6(yTgl|1dgyZ0daJnz1)#6@cHK-2XwxkouH!aOwQz! z?savl(qu~MNlM?KMe8&=Yn2T2fR{4NQ90X%4!EDwrLqt00fv?!x%GXS%q{>BZTgH2 z5s|g`OJ|&eZ4GCiT1R#$86+EXbZ^&d!_b$;X41xo>s5z6X)kxwgW2KV8?!IeFjiraJS@*I%Cm40I}30f^jP@Jsta^2B@|JSgZI@a3Jz3xMo*Z}~rM2cwx(0%AK@^S#)XjqF60X1!T1PP~~ZuCcH z2@tSp0H7c_X!FwFr?*vJzB;!7 zNU3M>+durHXy3j^ZRxP97YhOwaIpWE&Yj8bmi^wj5$(aP=BVoK%Ej}GxH)mccC9ZT6+00No)8h1>$XO$(*ungcDZ z^ZgRDRQYZ$NJ}p)v*pYXNX%J^mmjKpU zzVY@4$^9wbGD%5J2b~GcHrNxl4u07(FFO(?_HmL;gD7PuM&hFvCnf5Yxdb{ zl{U$X34ABl$nV|pEP3^!`9G837=5S0JN$N;T}(EhVg8BpA+E)TcgytNQ~*PpS%){T zUY)Jkf7;Nvzp?b_@s$B{GQ(d+2RE;)91DbROfZ{i9n&12y-bE>cRbIoe9&IWB6mP2 z|4xv_^93CE<>rg8C3S9(XY@F?=HolI4(3j8X+Z|8yqXU8+Gzj#OrPiTVHx^NlkHop zc-d4%zyt}Jx3TdzB)%HEDjBqSGx_dt*C6sLoR2L&`sAZo-E!w%#$$sr z_-LUo6}gUR+(5>YzO{aT*d`(r3x*+Nq+$NI>&puuB*+-x*D_K?zc!T_K%i^+p|uD` zy5Hg?x-yV*a#QHqfb0nLyH>xfs~P7?F9`H7_Y_(Rzxy=j1Y}DcL-y9XJ7R5C0n1}Z3`3E;NPst_S^RD}FYBiy?sfvsA#MJi3_PRi%i{>Q@KY9* zV0bMd&fqcz8FvWHfe||cd>lH0*sQnmz1K1dIXs)oi_*Z-}h|b>4pTw?G(kKgZsZxl(6mPFpeSDIHKph#fMdiLD8;Vpvv%=xnk zrN@gmkG~aItBv8@Vzj&#|7^$+-p3(G37oIeVn-DKx%kal6j5ed87yTZ?X67;#qjZ`pNv7? zwvB+td-uzH*}SQf#|w_gbTkgidSi0rbi5}Mb64P#Q>>NF2rP`;>LvuPvj=|v?N`%R z{LN$++WhSEuM2uS?yQ66U(j%6Mvqn<Yy1{pRC8 zEIz4U`28w1Tp|Ot1!_Oz*A@w{My5XM{eI~Mnq#E{)AGdB+D8} zt_m&yag>C<{mu{i?h?cyKQ6}O-_?dB2|Sq@S^=Pw7s=FfRYW-SYtPR5hrad3M7{5p zHMJ`T(tc?V^E5eY4YjZ#!^apRt62VqH=t{4zg*$ovJVkof1`7}J!Nk;h0bU$&ofw` zD65arct}jj#b*)^#cS?bY0E9^w5t_`06}1H3t3=x>_#P=T4_M~*H2;iKQi zgMHR_#z>5o7$^Sr{SE84Rr>pGL+!A*t5;{dbhE(Nr3>dWb}w|lrQUM>!N&EnJ8V=%T`3S*|{G0(-_c^aRNgH1zzN!09()&`s2(2V~6&7uRmrl3Gte`Vpa z0?eO$WUN?seO*P|C%d@iOv<>;=wS2;SnkfF>(Mv4<{o5Zi!gsL!K{Ed_iG;Brzd7# zeYd^A3&Se-qW6H1eX)P`ByhWD3;b~KT!&}o)x*gZ&Vl>W-;*_-4q%J|H_0Q;)ADXh z8u2jtd}h=h0puLu#Ry(z+|^t8zP52nZqsA&q(BrpXSk_e0m7-!xi{k}xqx$&?neWF z&jCf7n>nCo3)R$?!)~0&$ZH&8>(XmvgD-wRrvWnBO!J^iN2j}ApK}E1$y~r`$Hd4d z<6GI;6UHy#)V-8sbJpF@Km=R_J^+vAGr+O^9ePE#RfjZZ>%3gr<@(Xh?6emJ;3Yb2 z+*EKs2Z0ewCao@m;A!V?uw~GomN=7D)EMdIWxyl(7OhNgc_VH8&}rM%&;CVM!jw4@{B6O75GMDJZY zl_K>ow?=GIBk~OVCTPBtM>FL5eID+afppe3x?v;X3AP3r;DpUH+jQ$YKw1ys1Z+?D z>0ke__GotlSlF88vpLXM>;FtwcerAp znIN!a4xr@O+8rJ}@_LTjnQ{EDS9vBWg`GIIezM*92d@AQ`u9;l#HQ{&bl3W`rN0dP zWz!0rU#`09L2?DxZ%c+-bG9)3?uGw3p|bBEJXr=zm#r+S+(C!&1-oyOC3@ZjJ-QAE zi1(fZKH#Icbk@)Nkw@AKAf2Dk8r!?a=?sA1vq$+j+0n}Q$shCz7z$6$CMaN#ZpMeU z5>2_UxsV5JD?D>29m%GCS+W2y)Uw+5@h9kWe>Dc*_ri;82LKg1S(Q%t>WhyT7pg)W zhns!Z{DBB-s}9;y#;le%g2MtocnqDWq#)DKh>TxBHi;^nRL|EE?Bt2BMmGuA3ydB* zcrcrEYxdX8+U3>G!Cpsak8)lOax%tZadQ1;Ksbp?>C3*fnmVDT~!0lns={z`I+=F zxklCe@F(vFRz1r`_^!m88)GxEL5zvRjiUjM<0v<#HLb*;N_WZZQy0ry*c=uZSdUo+~cEr7s%+Qb4 zj1pZYkt#pLe(u||d-3&&6JtXkKXz=|FtYvZ9Y9GNM84Fez%woE_*-aqk_QT4XmzJV zeqBK=G7BiQ3W4{+Tb{+H@Qy@vj_lxsT#l&Y#;o{K4X77)5iCbyn)@F+Y?W)4FD>So>j@!IMyZKTIi}PPB)gS zZ~~O^(!TakCL>v{h3dcjfBsLM4^>ul@{&$|B{$YoUBI^Z_PehKNc+V<`R4^M_Qn?_ zFSQ1e5!MId-OR??v-VK3G7vi8_~DB(C!;MsmCBwU{QOrXY(<_^! zF=wc->tDW1e$O6cN40nCUhQZ$ZkB|kvp`@~GQgN@zARk+Av5|v^x+q^ zTwNcKIX<5Gw5OB6m=7axai}22hV{#;sL3Xcjs(tBJ;FU&)t(6grx;*apK_K4iaH)xcdNoLyL6@mDx^J(7u(J#UXw$=Xe z)LeTf*$qr4OC=f#c&cKZt^$FhH`)saR?_{}M?%9YegHj@T;IEQe|Est#V4)7o-mr1 zD+B;kKSl=~(n%LuyuHE33Ngbj5~Db&1|cKbFa0Vw75)XcTYAt9%ZXB@mk@LUjmzO9(F#x>8Cvzxz@h= z`8>WrGccSkpYPGb#?LOeU2qtH&X+g82`*;;+_=*DDv7+CVL|V_^>*0Emc@hCU3Hm& z3VY_#g>xgvwpU&8>;EonvoT5-P@$%c&>L^PA4d2vU4CL2z-lE0pYMeQ!fDv@Ugn8L zo!>b79v`jFZ{DziJrIBKHB`VfE;!`MXlvG^%h9d`nHRk;cxG-Y6kxzl3wWsDhi_?% z;{aWm?QB37wAKu)MJ#eRB zEX<0{`pcjHq%k~Q{Nq1mGaq9Vpz7KQZ%f?tIs=$4f%&&E}BYW@nH+{l~PI zh`p^Wa=wLNuji7J_F=WGI8G6TBeR7OOyFQLFUAF63Q@`|cOmd-cgEGR*N@I3ep|Z$ z0MRYw1Stx%vZ@7Cp49M~oQVIjtqUU8B0=e=GC;uAsz!!DfqYk+@RWO0^6|6qiJY~r zTQ=?=fpX>Ajf8EE!>s?^BDx*e6Z1K&@lb7CL}mfqN^>{>Yx2uaX=^V?!Sd9YVfgwWfW)HSj`o{%1c4AIwB4$+DfEwB0~C zZ+D#UYDdD!11!kK6w#+>wrpN92V);Fhv?U@T^_<}4=5$Xeyz3_@9uiLb=+1oQse)? z>JD_+94(oL0|f;@tR=9R5WL3d;A}+4=+@c*jW$J(p2wL)n*`$BvU#u6MugEj5w7O4 z?MS7dIh~@r_Q}k(u36E2uJ_&h0glRvI76~#m8*BFqVhKat{vL2HpNxntPB~(?Tu#z zWKuYcK%g3)d3^2LB9U_^Rj!(7@r+ql)-dzXnt=_555NQ z4;Y6>;?MJCO*sVVLgm`Ba-Ii*JPmAHse++<0K3S5?c*#2;^s`C!Ry_DjfpK_q*Y-WF+n$1twZsY7 zMMGrTE6}jT=>9p>2(Y=5M^nxizci5Hb%3*v8P6{QX^ah6|5{3D$~=?f`1^Uijb+jA zuHX3d!HwPlf9`7bhj-3X*rbfdA2>fS}##@U~6ll z@8hyPKz(0#l&O|8b+dpLWsJ9!I&a^uy+8-x9XdQgPX|)nC{u`I!|2|c0vCwmTvSrtCR7(^vkc zVBJ#hxCi=hiv0R@Ik^QDvU28`j0U>tnX>4{j*lI#OBODBj4nZ5`WL_fqg^^CJ+Se5 z#>{EvolcBS7>UN{US!v-SwEGwdCsguYq2y=SqiOHeB6coTpCYw!%7j+e4sJfG+K|b3B2Eb_7M%*XFSX)pZhl_ z#7mc>7bCY;1MY{CTc0tWExfUP|I@bwZh+e6oX3)S%*XQn)6}W%l$c%8`J$a&z+lgRse8-xqi+L z7}u};z`-MnLr0DcXv9X|R{KJ&u;>Yn&;V)4SrdKnqSh)LxBim_ceTuPeZ}8=^WAK? z4o=Q)0+7Cz&XuGfkU6q==qnyD-}Xi&mjfPwt2=Y_1jFZh&Dr|P(9{20Ab>56;rTY` z884ywLrS$5Wg%?_xaLlgs%9;GXu!7DG-N~JY*3Md|MFV)ajGmJNel@N)|yb0KIKH zli@kb>@towS?0kqpC;%vm3e_H$qUcXj%7fxWbEa}_U-92B`oY7A7VuZ?e5=yIDN2w z@rN(}A$zK<+Q1)vhc(g~=0W#g(p8!MWwGL0Hvjgj0aQ)iDxkHg`O-VqLYoU?zE!a0 z*=x&+HB|&M>F^ahMo?R#(AFwp&A}mgiGs;IqcdbQ>R)fK@Vf*EG)_-lx%f@bqWccd zy{P3uT?97f0UyVLs%6S|1&&6+ywO*8f(?XJ@|tbg`oG z{?%WodaXBlw$=on;>F3XMdR%S+y3*fPmIs8r}h-*OGwZL6DF`NJ5Vb?Rjglp^L1C~ zU%T|v$RvI^+h_2D_J4Q&iEMPX($Hg{jjhykJWD`vaDwJ(jnN62HuewNh~}*0-FpQ> zy4IN(zS;#yQNu#g?Ty;hv=bFwxIz;|oMpI_G@-Pf{*cOR&YPkeAMP;>B-#t-mS zRkAAI#~O^Cnt&kRb|<5^G7Z{v~VbL&Qabd@Muca%BZd-lcQi}vV6iDxp*KmF)$Yw>t`u!D6~eQX1Ww@!kG zXrI3Q_T)G7Ja#r2;`w;?M*3=*5Ypk5?Sp_kzr-G}&B@{(8vj>im*2!T;1fS9pdbP8 zQu6CwU~3DL9#~dHTMuVpuvOQtDv_{f>2bdMNbSPn8|yK$*XH%V!9M)ni=B#2$q#z& z#1|dv+nfN?XU=>Zu5+)pE}e&zyj9u6cA#6?U2G=0*k||Sv&bV~yF(1yM=hh+3V2}%=ghXM5t_atNQlzDG_Xk(c?*-g6-AaFy~pa<)P&lcR0kMoOP z{K?|VxwA7KAoaB{28Tr-2wO7#yV>{nK;qM;f{_xV?%xkLC{YGBf?n|H%PKP94bB!h zd+t=R?C$+4X9~vdZoF(-G%stM9!7h9S{MA<*m=rkM@{HHGS;=FVAtl4S$#3ApRtYy%u&xHa{rXF+tw! zQ2vBqX7^(t62zRD2(5|RMT2)x=e@E!F|N$cAo358&MdcW1O*t2};sI3GGRWBJ|R{e}eV_vB&U zVPFHnwF~h(1qR^B7_2@Nk;z*<7aLvlx>AeIBqc zBb^G;i_77S!&HPo>pV^;Nu4HqfSo7Q=bz z1Yo+1uA+dl)CuTYMGYrgu61Y7PLFMhz0Kd8Kvd?)0Rj=csNW-ozC{3cEuzF&3~xZD z9#l>OM{xFMcadOG;2Qz4gbGQq1gpEQ^+G%tg#*0I9H(~)4~$irb^-|)Mp#g?zVF`m z>uV3xR8|B`UPk{i0*Y4lg~pT7P~=y1RFryE{o@{G6uGyom>W4%jUc+?Tr8i5`Hmbs zv{VK$*m@+~GdY|D3Z~oGSk`8gGmtjCC__u+ujk#&P%?gP9~cZr4<5)sJioXZa7mj| ze0n&qdc5pm=s7Z9>@UN6K;DG#y%-;m2#`H|;NarGp`&H1RNj2yY{uT5#$SnR4%W>Z z0xlhhmGYpZZg&8rOjgE)wX(L%mVIR#i_m{p)Lfo1>BOG{rgS5vth;yZ3{d>8_nt5Q@ppeWw5GL@)=2_n z*E2W>VdxJ7{z8XE3O0D}QGtW&*RD^tyggAmDFeygGw#*|h`5I`Jb=!CPkqKvbXeWG z)@65Vu&zubnIZr)fT;O9kQbnG_4=~s{1%!10ocoXPzEA?dOZRVI+!KrOvQz$w1c0l zr7TT~`D}_Ky?V?g)3JpAKuX(s&d&@ z&&jJ(4tp&S;XnVU|G0ST?KeBY`EZ8m?PN$a5?wydF#G146B(qJ0>0`k7`>3CN6J*P znVWM033!Im_JU$Jl6~LR6Hz-Q&jg@8I?6Ht~0~!I(!klMP9s;f*R}u2s2nGe=Up7@&c(4OZt6*oU1t;Icp+ zkW8U57;i+U)>um`4*8k@rOmC|rPfPCc>A_J@mJa3Z@m-X5lzRVGLViOdA)geXiSDT z{*Rz?S}#VAkD|eYBi{}kKGc52Q}Gi==t_q3nRDmI*^!O$U1iBPq6gVHN6NyQ);5jz zPBce1-Rh@oW%`K`MR6`x1fsN$96}kXstSw;D73c#EJgdq@i#|le|<7zK10<3ykl4v zm|~PzXu4(AZ`OB2LW`gFfjxDNmLR^s;h^9tdO+Utz(&reJ?b9bb9r>JGz$FX0qs^_ zq1YL#4z4-6|Ip%t_kXzfcmKz~?_7g2>zfy2+#LplY42UC6nXca!}0!>48@}CIj-9? zvK+=sAKb5v8ac0u%i06BcJHoV|N2VTdw*U0PA4)}jB#t{6fjI0&aAZxvCDyCTAk5b zBJ2)uy4NZ9WbcM#%kCU%JoGRFA9zYW$RK!nna|Z3z+Exk1(C4$#=ABGco()74sKqbt zO_sFb|M&gPaimMhiG*O+K$fcxyFj;fwL{hjEZ49;Ai)fJ=P!wq}T5c zU7#_~8UxmQ%M}?Kvp&WI$nxD~Tanhqe+_(WjVNHo(f73W0cyU;0PpLqL%@l(>o5B@ z-|H`HLw`=y7bo1cJwqnA{UDcpj*2bNtM#4FeZOZCXXq3N27m)D0${9pH)tQax~h$C z-Jjo&w=2;|qZ!#@Yw(EsErI%sWS(m;>pJa|;zhtA`ZSi|(JnEHnIpZodn2pCv;6(csJfKR%U6}7+V{sgn`#PLV!U3BYoooo?0UodufNV;YjR(EUDpH>8yKH^u$vlYP9a_nL zR?T)X`FcMmoj&4}aguglC#2V}Pb@&Gu1 zP~VeHteSCYqs7yGW}Q{`UC##Kj2}L9WMuN~0-J0_z?ZUb_S&px`-plhl-0#!TDNRi z+ZdB?*RNff@1kj6P(iR=R&4Uc{tfTNPd=x6`7G$vI<7>|@icql)!wOamDGVcM}Bu5 z{V7AvJIovqwj633%k z?4sIu^t_kx?&I|Rm+sp-*e~Vyz>~e%oBGb4I$PCMG%)%mo><#;Y&z>5Q?|F)TBBs) zT&t3?O!E8&K-lW&2e{*&_ELp18t2216994G1V4mKBOe7KpQYEH#fz6OoE=;I z=<&A~JNF&w@XZ~I>phQ8q>`ls#M8GeEQGF?&FHJ>Z3m9e*G>!!|<5bHFk^8{L@GtZVPf)Q`Z17aG#o zJoZb{i9t;NpkJb4LosSfqYNx~Bd##Mn*Ydabwomr+ zUh<4wap9C)NX`IW76teqehcJ@`s#C~U$xG+Yoa(UMgJpQ{jh4<$OAtWs&DyGvd57P8 zXYN1k%bFAWRLd6j0H4{uSi9k~WShVB4!@T_yR!rfynmy!msI^Zlg76l_Bdd$`!z0r z)iSu0Y!3ec(yd5;Sa*D}%HexTu3S0a9xujU(l2}O-Xrk`x&FBORpAiL$nq5|}jg7=!>p!*ydw`rYM|AVBV2r~Rja_RF zl_2Q#cG>zsKUT~5C(+BZ#)pU4nu5T-M>eW>N9*DlHgxJlQ*ST-he$7jk81@J%4&$?7V<ebm&Zp;cJYZR&`je z_vzwS8S`wVE$MPT(cSieegL-Y?CYyzD+*zoz|U1FUHcUeST? z@J}ZLI+|CJAkgz#)#A?35IkBFCh_!X>)G{suHR_goWUa5>C~xj8~?M#FMsxn#ZNx? z>EgqWe;56CKyUPbwghe7fB#1#yDy$UJ8g^}gf~bU)9Pj=Kd041Pw246kAF|*td0N3 zJ(YvgW+r(<{tLdmtm4MLE$znEx^<+tw}zvPIp3uDa6TtvAC}Gkq_JB+d&MrEmPgV3 z*7(9$fzfOLIK%Q>qx^^tDK;j_kFTzqJ&5n%5D$mf3m9+TJ($6Z_R_{#?|yWRJy^B) z=G58Nt@FC#FLc?qB$Kiu%sCzyKagGCoY*K@gio+JIVG4tfV0V#^Ih)o{pm|aayhh>o{Bhiy6d`H90mnB3{W$;}mz~>pE{+{Ll8`IX z696i+R|FkEr8NI~3V)*5t&{O_8r}^I`|{Mu#p6ovy2+B@`y9bKxf@t@;K0EF0D+hU z>$b9UTZS3)fdhLNpZwv=#l^CQmHs)LOz*&r31)&^4>N?w;iSwsSO159Aep&!vI!8XD@ zI!4=1qM;LK%UWrU=<8O7n*ho6>+Na$N2;w)U~H?Mkf`GQGM^k;={{(EPfpgmZys%* z>%WNi0@xUFTOEuRUx+eZNrC9w58MS*5SaRmImF}CxikGcJ4S-OqPLoVn?(u{x>?;Ji*0H)wk zJZVpN08$Iw?B4yO#T#$nf$p2Z=-DE#q{;7p`*$g;52vsEyYIf&y~?0&9@nm2oz_SM z>%D-99T~@Z^KtF<4pY`9D;q#33+VWvL&LMb`P;v#^y~2A&3E3;n5#0OebWE!Ui-n2 z-BAX|8^_)*Q>YTCHvC$K5Sk~W?KeYtTjhTT4{(~E)h^}_@ugs5eEn`(Y$#VgF1(xpbyuUb9Px)(&moa;! z-<9dUdHl`AA3p!0pT`zk%2-lyV2wm<#~_T(oJXP0plH2bb>uVNryt0ik$nLHqqtMr zo3^a0tn#ao1;<~1Gn##8aVLJdUWpoKnzL@dwkN-uMtq;)TeiG8%EX0VYYTC{L$m}| zlx#ENoS8wc3-JBwpZ-~LI7c!=|J$l5WTO1zhrgY9(+R+QhMGP0j8=2@*BHIBqgqe; zh8_i)aAxqVSuOdE@x|bhDMwy8AXb*C_k3r|rgdfQFMG``TLe8`{Kvoe^8#-U8Z{5! z^B`va(ok@4)&KlI{m+XJi>Tl4d)9bux)`qt3~&7+)!v<1_1{TynS+`W|yW-OCSN?`5J`dYhar=)EWnQ=RvFDre0z&joBsB8!toK{Lu})aKAt%y%~a; zd|a+M;0gVYz0f@bvQ(CF*asMOZF-sG)!E`)Mdk$}qxK$l~^5z)uh4ejgyY{QwFWArQ=K#@LN#+&(~KePj=2 zCe0%XY#i1wkTw3BER)`Q^%{PS=9}hRv%Hrai$ru4hD?xFo`1ePfpbMefh z>y~|-?QKu#tFEIL>9GD7{AsA(<|T=No#%Sjjh?lTJU^*G;cOb>Jbo9)%iQ^CBo`as_y|F{T zcf>gL?q4&+?pI|*V{|ao%4|qFU-lA*c2l}ZAoNB_63YNif!Irxp|@VD_{{5R`^jb} z8)r|_8)H)fUy>l%=<`eMi5^%;cq)wNVKP_)}E~#3q;Z zuI1y{{RuSoOf8Do$J6>IURVaaa`JK#0m)}he6#rU(@(mF&am&E$ao7AMbMCc`tW|dQecR#{Vx7+P&r*}ZS5Do46TyqH|}1Z z3IiF&m(G5@xPI~5?B)XhUGw=DpO39|_~5awU0cR`8OrHZIt~!T4!v;l_ftVBIfviM zzT;clFQBA^Bx^|b08gG~Cp$!uZUB6$?DP^`Fg`k3uV8dW)_A=D2qrhImkiDEy}DjN z%D!3yJc6&`2orEboY63S+b!oMyB)aV&)yy&k!;X{63=V7G3_?{hBE`Sc)p&evcIj}{nq%r~g@2#tsu7g(r_zt!FRW|vyvOvil2SwUX`kP&}co`1{0BvrqH>w^kAVuCh z3^d)9{cT+?Wq$)+S2qth4BO>#pzzpG>58Y(=DlZHP?g;n-2$!fnII|se6Q~U2UQ`D z-O@PrcK#Om!Z&!BY_jC8ec|td87rc*wyCu&i@do!@42#QX@9T<`N%#ai|zm80z~YR zJ!RZyq|E%6eASm-(_ZMqZBO>LCcsJA#}}e;$yz?wA>i9@zg~RslV6P<*^-RiQyU!X zsG^$9M?2D?vQsO0YR?M{M0=wHqO&)voS+w)#?ZwC@gkZz47mx}&%@yf4j$J!m%LuJ zp-P10J^(!)0cfjq^*jk5O{e$E)P7n5 zNqC9G6^ua#*hBKhxCb~)PEVCeW3z`F>YMF)I1xIruIsX+x9`}y`0nD_Rgi#FP=+x z%Z^{-HE58oJ979~Ho(P#S8#$bfYy<&qz453(F7YEt!d+82;?lx;$Gbu9v3Ja>WXg3 zji=V7JzTV)aX2dXn;+;F8Q+R zNo&#b=qB_#TDvhC59`5!o8DvjjxF1D{crKKU?@Ix=<_7XHbycW)-g6@*Eo!KYrwBI zV&vd5&=CLjJvQUM1Bb@n;&Wd-e{pPu_oMN*-+sGxKc{Am@ZdYOAeGExK6six`^Ufh z_3#22j`r52Lj?@k8Q|L%hTazKs0*5!u7ma{{m19Dphz>4fXJZ8VG zCp#1^%;IHQOi-c(RWfav6jPu*y@OWC`7POLlYrAm1ZvUu_=5e-mwcF>-MKA{BHMt^ z`m+1ub+on&E-kTWbCKLczZh?K8=jIZXn$eF>^S?c-#fa2$MO4h0KgaKN{G=fOP_FQ zZ~JT#Wco=*pwqo!0D8pZ85NFv7D>kTrkuha*-X|)bQHrFZY>mWb|TL6z-3m?YZ1SQ zRFCSjB`70G2N+nD(JeB`Sp-@F%F>1rbQ8)5I(q!gn19Fgv7!+fgPfYj8Qe0pl}IY- z{Vb)wA|WNKS(X^1OwS~O?OI^gnVQ}MeQqa+m2z-VfMXXb58J(S=NNtn98mo5Q9xr( zJYu0}WEPII8-uQo*(nx8h4^fyHFa&Gc{Ypy(Z7x2u<^Uf@_8ln91;kaJ(1GXw!rmX z2#vrSV*q9KTz`9(`ys3W9V5bvXUj%N@c^A{4(0*=xlGpI?J~v3+3LOox3MZ?L2Sm% z@LRPaLyJNNKJ^V5;7YcP6Je|?BD5wZxm30#BMnh07Zf!$7L!&I_RYyox(|GsvioR( z(ZLDPqjLA&A1uB)@y+z-vi6DI^yA9K`$aFWXDnVkSLSt&{wqsINtMzc^SW@U=x_h+ zEaLvb58fW3h~N(%?7EDv8}-4uSI`CF>dTE_pY?2q;Vq-*%>%*5r+O_ay^zsP0O^&h zw9;XhjHO@v;*X}yi9_Ak-PfkXRA zzpkzIL-*9n3qL3wZ7{Q^dVHNceL6$Eer*|A%R?F%)A2xSBkNYnnm>H@Y5THNhB8^L zH34({jpG6I*LvpdscM*LY)sB+lpQ@Z*w|$s>FqS2XyZr!6t>I}dqCNvUUcCaea>Gt zR?36Y*jJ_(pa4(4Xe|Lw=xSF+96sAsz)0r%-GE$u{ur1jA8#`x(KG%hECr!t8_^c} zKs!m<`-GaFe+0gOQc|hCEYqZWtf)l-qw;=6)xCmB?vF0;0|!s0Ic24k^4;TaROb6- zJhF63IPHv54iiB*z)=c%-+`l*$?kUh?$01=U;B>dGit^O4Xph5(@&>W$S;5KvlLJ5 zZ&JjY0`?B?JzNGpkS}Aq^yIC0@@4${3c$WI3hAD@Wpy<25vM-vRyciwz^0tx^$ zj)#DOHaSZ1fE5le#dqY&rLv09Kh6@6{PnN?c=2YS@W1`{f02S&mYI?d=8eaY<}z3_xH6bAQ%|&0I(B?$=<_eXE(^SVnDLh}m?OSBcKpp5*PYtjaF`x5j{MhNQ?!0^BspUYv|s+w zA1{9J-aCu8UO&G0>)-yq`w6ZHQaETeBPm(9Dn-3ESvSfs`7MZuM+DyV6Lcc{iu<>t z8LhW!36L=bI5>Xke6;Pv^P`JP7n}i5&%g|J2eTUQtJBpd8XXVQbu6Qol}``j$&khS z$qc;X8al*hdZh~lqs!U9b{L5FDya7g&|@?Le)jIFgf4oy8;JBU-k?hbE7rt!=4mhJ z75e0tfAov)D=VW?xt_;($p9NS89C4HU3*T(X&*;MF+#~< zS!A1XG$*qjy|}*{x8SmM-+A{3qraN=1Splj8{<@Muxa8I4wrr4s6B~hR?oiobbDu9 z*3;po->-|weQ(xB3+-w1S+0@M_tpL*JAgvQ;WM-}FKg-drZ|~?-^-%OUhn-^Gmw0e{`h^CHrU3W~NmK zZ*!b6HIG;O4M640&0*NB<10P~vQ4n3`;zA}GVPIn%XxL%SFbk1_+%`-7X38i=CW$C zR}Pm+#Qvfu`-(-%F~|n&Kkf9i>0|rJ@+?JWrlKWBc@jK_Jq1S1%6uBC+C8b|;5bAKmrI+!t?ON9EnN4?R)x2KW&uAN+qaz1x0SVYQbiT}eGVhh+NZfGn%#~eB zuPnhHS@^3L7cYDpK>qQJf$l~_4(QZFduvrfs@nuToHxPVlI(_nV8Jt(LsiKKqKsb)D;6BIvLJ2$eb$~7UV{FN#(~L7@Jfi{6AioEkZ^tEc zSbwz6j<(il2hW<@vw#zQO@a3Mq5>$4K{cg}cfRI}mt~A~59|H3>w(f*DgMRZ{3c)J zld&_++x;A9{+qx1hq2!_dOYvx%)R@}5{sGR& zLV<~m*-O^z(uMQmqg|}s02+L)b-PvI2`!lKCiZMT%DvWY+rGn%m(Oy4@%9gXQvdSC z-+l<%14;!;ZU$Pt_tSq;=4_yIW9KLCEPEJpKgg!q9sl5mhtbTYgH?A~Gd$XR>>j`{ zuyxNtS+?r}xh^a=-`KYJx}*tNgnXdGN8Voii+}MyWM^F}nd3K$Bge02@017ffWwq*OavIK*t*#u9LDM0U$b?qTr{6UAjj=b!B>*aH@9&ZB+ftHgc z-aY^lo<0kp?mBW-Vn)Mn7qxQB){g+@z~KjB1zoGB_hj)AZpoTCr|XqH)mz$*PVls` zpdS^@XlP_%b#^rB~xbvM4x_+?Ms%nsVO|4`3fBrEETn_VhvSEcFU_YM}6-qSVw zx&Y{Ot+m@+a*E#KfZs=r{qp%U**!}D-JHnP%_ZG<{@R{5nZLcS7d9a6MfcISUTcy) z`wmTl%MxIV57;RUUI0ko<;3To)TZQevTRdhlSJlBg#6J!Dm-()3eJPa-iZII%FRc3 zP$e&b?^$Dz;Kj#PDY!gCqe|*%i61j|PS0A?#8s8NaNl(GvnqheZJ3S&i`nb+TAbN` z{71H;M7w31nJ4L))$Q5z4EK%3_M)|5AFgeTV^?2eSCF#TGh0d zxoMMu-Pj}St@X`)b#%z@eoz&;j8?YzuIzI%;@p|924fLWiV$YM$Y6ns$#P9+8v6ut z`>AEF)^o-M^{|Ba5f*M{?5_e-5a%L%r6iuzLXzur^G=A zjr#j$pV{vf@zCaYSNqROAPsAtg-7|XeD?9P+Z$(|=n|lkgx`8AKNgRWzk8|}fJKeX z(zCTc`v5B+-Cw!*_``pk z&I~{Qqkl5`>h6_WQ#HHq!1{tO7Z+cD_0`y@8}j9Mg$=}Dt#51Co?JM0BE40If!0oS z1-Xv@Bw5m}51s?Z+pBC*zU{Wo5?dcgzBSyy+;(i=vN&Jmj$}WHcR+pmj;^!s+Y9Vo zERhXnK(FJy$0Z83!|hL+3zN5dczPMYAa5QG9(3p4jcg_O8XqT_(LU>ct@pHNxjz}c zc*s6YXEg`+!F&7mAD%HgD7pcSYzOqi&DcM9L-L(HVZTe*LyxOFmyFJi!MjcJa{h~F z*o)~V!!@3%AZ)GpaRd1FnZ&F~G;SQ`YF_IbtNZy|fXtZL#CBHl z{%v&Cp~oevP1bjG0~~i)W+a!sS>i)LdlO`b3491PYB!{1Q6G#v;G^?7oz>vX5`jH) z-Vkp~-jXDt-7P)B7MMiM!#B@LIZ)euk;t54)H{XWI{Q zM(f@)(XkiYb+Y|i*JYJherszBi`E*?Guc&u@S&}k)=yR6be%mxao+Ry^T#R+TBj1a ztr2^ee0;JFwiXY_bd)DxkRI+reOY9XQ>d78vYd*fMGtMDET0VgzAqB0tc7B^*TPsx zhZ@R~arotnDSz*Fa{rYol>7Cb}uH~ z6!1$?oj>#4;&ucMzy%r%gSCJg0^N26auJk_6KVB~bU-twltZ$2|DI7I`g;*Ph(Gr$ zA+XHU@TjKVKdjs0tsx`=Cc2*!WR+*GPVf-~moJ>D4`T-tb?w~@KV<0%H!Jf zvgKYWIpy=(!FT~E?2*!Pf(;W-RJ=VQRE``tGX0?b>aTw@0^mu4fWY5W=J}1HX6pky zfFp=R`wJ!Uz!(4tr^3Yk`9J&h;(z$p|9bHs{@uSX;`nuIEIae!Fuf8#rNdiqc6daF z8|6rNA33zI#Z1t&sMAV>kjOY&n?dn3BNqS=*^R;FJNDPL4w8G=dUIwF2VsdxWL2PS z2ai}^`_Flj(MYfXCV1PXeXhM+9JV0a7u(0onR(Q>P4%9c7U_{PbV_ zKmIJi9f%#xI>__qJ?onjvQJ}}4B|We1IleL8^N56lk&S=JA(}y0K4@wOrfo>rO=1J z`~5OvnqeV31{2;Wif2AdlmiJcErISt4*=9FMX@)uChL0EllGAVGD5WVcs-!>X>EM& zRW>A(9tOeKN0URRtqUNK=Jq)SC(^q;L+0JL-!9AKv(`;ty-Ia=$LuL(;Q!VX2R|qfP-*X1U$#~c;@cZ#yjL2Q;k2dl$9*{&vd`Vq{;2|ygjDNC002M$ zNkl-e-pQ36fA?X4tr@2;vhk`wl8)q@O#91o)Z?md~|Dd6aX696!}e6!X#mGe09 z`2Y1k{*P6`97_QXbC2VDs=no?FI<_*0wsFKjs*muj$UYr z)`il!+kG}bZOWn#`!AY%UfYtKi5DxLEs-$W;y;vc2^H*VdHFKYFX zLT41o{Nf}4t@Ca2U#kGLbH~)_MjC zipkJtv|GDDVYq(%YLjjs8VjS-d<8tmVC^PAByF2Gb>z2st*8$o zpb^07C&+STa%Khn-!=Y`G2LX=vWvQR29A5-)qYy@e(C<{g93BpAUbagde!18nkBD+ z92^ig-P>HyulpDXyJLbT$q|6~kACppQeSo5v**Xh zz`9)}5(ix=tO|_T>&E)iAOCc~#Xo%VhxD&rqAIHjV5CR2yd%G~vci}8R?-Nm{#gg0 zS)*Fgn@_%9yU7~rJ~KUQ_`Pi~yxTnHXL`$T=*rsD?`YZcC$Rs$?g6y*wAR@Cm(E5+ z(Y=@S?tKjLf36zc+Iy`jfSZo-ylMM{?wd0p&wJK@Zfo`Wd--Cow1d6V&&K6Nb}a$; zzSr}7XI{JV_tA^@(1sC?;oLe8 zeY8IA-3Q&;XAQ^q`*RL}?H5757JO)Xg;L;jz>LFY*dGJxi36xcLnAxp5`dm{j^C15 zlXaJYj*f>%`iztCAh4QkA;4}NlQ_^l@z&CrQ2m#)*jUL6&MEoI88pt-@iaNDPps-s znRd<;(1L@Uxp1MLwdo0RC(F@7W(O5q5A$Z8<#tjQ9Oe}3}O->1i%A+rRG%*V?$>ldwSw8#To$daGRq5KikLKAwzd!d6# zDrsDRX7YYha{Siy?-rkb_WRZaV6>!1)v|x}o4;RNNEc%t=eMlub3V!KT2>6uVSeo+ z-D2;JXLt^6chRF-sTqXNjT0acnGfA_+Rfu}_CBYWFXI|Mj$rhb=$(%tST<)b<)}Lh zjLpNo(*hG83es=LPubO8vyUd=)_pHkh578t_57#T3-CXh#Ha7ho*SB#0Zgax6CMO0 z*n9KfyB^(lG=4igd~mJSZ+wg$0o&*puzkEf;S>03BC@@2)JDajhOY$%?ArhO_%LMJ ziuh{(fn$M{>lfc$JJp=Y$?Umo*1NSN+ZgSv@38vkPaQs~-Qumq;f-(AQ@n#kZv~ht zyAQ0kUwoO!F}qp-CHdTYbJ$`0s0S?BsC~$$AO0+$ec$xdepd3ysjoV??7LGP-u7+t z-@f?4&;BGTsI}n56N|t5`#)ct|F&x1a5$i>>T5d9SjSW*3 z=4wAxR;;g$Qj#rSSA|Y|ne3tKC*wWaZ1|?}I2;l{{VK@Lk2b#vQU?q<1L)3ezIXo0 z!Q+eL@4PoHtW-tbsqLsBB+$MoHOFYJs+Y6h{-Jr5CEfey&zx*5+VhpAz39xNz$_TZ zgFrd52vAQa1NBw8OSaGwQo;f`0yM=7Ku_yst*1>#>mtBHuWLstm?tQ}hdXfeSj)7O z2(T4rG~gZm#K-<=-kY{+S6X#nd$n9g{nS_G=iI3^K|SDGb6KN<0R;42uj-k+Uz6;# zfB&bJpjmRV?Pnu?XL57t0ncR>dApnEF)@18UB3t{o zD<5)wGE!h}Te|q-`E&K?zExl>T4_x$oWGD9R_$3bWA=oEqaXh0XOY*E2H;if4t5)$ z_QKh3N4|9N_b-%^6v-clpRiM8^?H~6IG9=M%rAYI4EpWKPevZy4bbj^@!gZDsM*;# z1x>#Crs|jc8^*!N`<{#TR2q|O_-C@^`}riF0v_Uk#(Vc``xM`!#aOjTHQB)LrqiwY(}Hi1>buF$sqc}Bevt8oZK zK75osm{u!-&&#Sk--90+#3a@9lU%YM^zGQh>3X`VDSj{C!c*cWJ-qonY&7kVB&uys zkN>x-$Mza_MTU_rve8$TfMqTYHvi!df6&2{*XDD+%*}$fzUMH~E1k8muSB^wUVpox zEfXr*mXa`+XYO6?c^kv@B)eTp7QEF#pU&mukF$%=<<9;&E$vbxW8v7n?^JgVANKn)VU+cL~KK^KN>B`mC|3{0}2BudIsWzZ! zA<0L9KA4u_^bB~IVZsRVH(~^^ArRo~7;70;-rE_&id3yDi)w4Y_MSFy@An54-YEKg z`s6naFohaHtPiEvHe|riV5~JorgrV#v3T!;A5S#X;W2vr9xlW0$gyJ)&9lYtzr8pF z&GEJvgvi>2f$T*LaQj-(P{Q2eL@Zo63JNghkgSbukV85cq!f+Jx987+t+Fgzf)=-( zi|H7#0LXx5nzVn6EpvVt_#?uzZk0%o)9dwi>@(N*Il+jK{NWi6E7Fsf@r>E^X}(ZY z&A9e;7>vUbDE+hN&ZQV{lzqJ~g|aI_T{dALre||9WRf{#kn{Mw1D7~w6M4&-5Jff) z2Cn|bH;SAQW_yah+`QJobPq2UO+44Fin_IK6w|F|4p@*y69Y8PiJ~R6WESn%R@;`= z)!uLx{`dd=-;~Mu?&6m}{rTd9ci(N_3H1)qtmVShtIM9{@AdgR@4ho@X6}Tl1N4;P zIw(?8{6~RXyXuvtr}f!$=jWd_L&Vxla9DvXqN@yl&WC@^bSVs1HOB=0&DN=fi5PQO z+p?t8;{Bct1SV;4SIDHkcg>B+soZD9Lgjws_P(;&0FfBm=Mqx_M=a} zik34-6BKbmWyu#72cyR~5AR?6)jxcc6DQi$^BM371DTqXyX+`DF_|$b;d5s%R-UMo zt_;f*E)a?nzII)u^eflbTIbZ_i?2S9jss%tUHfG2s}dAjaxxt#$N|Gwh)63A2WCy+ zA|51=m;EL29tR~Fe@P)P!4W+x^$+{-_a8dVvHZ3 zJuC7TmNy2%^wF;uA|vR@;{&P0Jh>mVmz%0Q}Y)Z$w9jDuLfMK%kzaWWmjPiQg~7{10E9sF&+^Lx)$I`;};N zea;f6nj`qxr(evT=-IrCAO}EaFzwk{f6#>arHhv)b7^nK zxo!6SWzT+%lMt<_G}#wj+PDAspZ{Wj`&FxtRta)q_(UYs+6aEhQqk9KV+!0l2^x`W zcLMKjmDM0S@tbciL`Su}N{NpHTe}lr`TOr~>2R;p<3Qex{+5*p?c+60czi&yMfVR1 zr~q?L7c|?GZ2vB0cK?2Xwq~%UR#2NZ)}KDGTh@N=cFL=4pt7$Ri@SF3j6cfyi2upY zt0^UicQ(ky_Vft2>EZW=l)%zo|J`poNc3L!2g-2#^kJ(isH`EfW93Z%7NG9`&(wdu z+q!1=UFY06=bXdI6{RYbRg!E2jT&Ddx9N)x@P@w&UP1%y#s=CgaJFO_sY))XRH~EX z&N=6JcIfYC%yWFP_E~4GHRm&*=ME$OM!0KT5HFEIw=>SZ%@*9h_h=a-=mMC8#&fi< zUSIstpZw9Z`6B*R-2jRWLRQWC*Xt+CT6T_J`@0&m_wc=HrX|SN^^Mt>DC;(UX*l&7 zWjw%n7clYxR=J@|gR1xO8epIPMEhF*Xagf?K{kwj?R~nL;k0wt-hyBmemR^Ez6p@N zE^r}&UMfqvsh;fVr9H|1R{`I6_3)uL1{8C~fq64djAMG|cVB?pQjXb&-jDZXx37$U zII|Ay0laE&#uyV+<%nFqd||OB{vxCO?Q9EY0mw?xCvOa@?lVz?R?QmqN6Q1`TY{Gj zr?u)C87R|Yr7^q4e}Er`QQ~l9=&WnwHHQ<&PtM#Nyb9EvHZ9#(@Ld2FAUF$!ZjwN>1Wq5u1v^%|LRvCw8vyn zwnTQw_rCMy;=MPHw+;^$zy9d6)*^n0w&>Zj=WF$K>(&_0PoG-P^j~^*{rXCrd*0C_ z$&XTOuXX?~f@0@z$mhv~q*2bZ^sHPu%X5?78u=PUG1iw~nfEga*s# zKI42!P1C>V?^WXg*0>j8U>w6+(bn?W-Mjk+F@m*uONx^7>xkeEBjC z?-{%Oq$?TH^WLlvN6BBtHvgaN=i0{Nr`1^?}jP zZcUo`TCNIP%$~@;cMz8==NIkuyX-`=s-sK?K@W* zUwNGU&Zfk#st{iHjGX~N4s5hXvcI-xtK91vGx9e8cq6bpdsMQ^nl;If*5+Shw zh;?GuvuD`k&OUgS@8P+3k|CT{pwr0j_<+p-nA?y(xlwOia)TbSHmdG8n&yM9fDi7A zc9&=Al%>}7K95Fi_f)gAFPy;9Z_x!hS=;@6N!HmKnES;i@29UXk381SkS|2euSG{) zGql(z1^n4*#wRc~^wTw-D-g9d9|ss3k+vtjPL{=!z&<^3f7RJADxlci00z3Fm-s-BtH6@Qj5n7K@+{Bnp>-fDR_A=n@U*^cVdGXQu_)Q-eDp!q z;1NI;kbhS;or*?#z3l%UKSyUzqX(6QD?1O0xB~LsOK$a9iyU z@U274S7-MK@ZY(`FA31=xd4AYlvWCB8y^6GAAvr|Y!z7R4{R#Ktt!vJ{Leo7AP|4& z;@F$tuV?gu+63)foOt*9fl%rG=ET;$ed)^L#^v)K8t>#sw$F6!{=+9b$n)glgTMSY z{XE_}C&OD)x>>dz;8KeVnY$NGe>~N>hfaJqIUmRyaDY}4A?(?D~h-3Rs7?>`MV`zRGmoHw)Zd7JF5egrlO%WStdlG(?HJb zNHUg~j$c8tY^6=bLvPBycAa6g>iHgbz;^iX< z($lMaO!69dH2a>+mqpEH2YU9__@?9rzX1>6Q}S)qs*)r4a6Qz&b;&7wtdf|Y06>p| z3f|ujkh>XQ1fnG$fA70L%C_INmqvKlxa`RsHzhn~GvDzcjVC%X!?dETOzP3BE_75CAo?Oj7T>+fl z8Eq6yZO*_=GKnmdl%^5|NJS^D&quMSszS(}(bugb8)xjC=B`ISP+!o)H~^7_Mmm=y zf44o5xowTf3HZzKZ8QadV+WDRyUGri`AqiP*tLyAuXcxgKDfQC&LAHR=5atOo94)o zH(HO9d)f!Q%1%I!>|SSN@HYW$-dr7;B~Jj@0CSa3s*hE^@HK&<_Vvr!!r`aPEMJ`+d*fzFBz@s;narLC^mZ27s`^!%x5@#5D?P3qmQJ4G9sD>I z2xYqBmBR-QkL{qZIC|&X%jn;K=y3NfAX!_JAN=r#tzYszA4s*YdGHH%g)Nc&LeI?#ZzaZEI`~)H6 zatRoc6w!|sL~BYW9sj4t$K5mjHmWS?l8@?rziIQ%#nCt3UV_w(Pf&I;@RPyTlg{(E zz}(=6=>5hhUkDQ#2$ z`w!|MKxP4?1%|zTvA`~wdFW^X*l1#Sv$>r=Cp-Vn;`q^{*=ssqyj*1@YSZ;@$OC5jX&5k z*rMbvtZ7RLd|MNek?7!LD{_MZb`q)W_wCB;E^Afdid{pA(=4`(` z*O*K71ip#HVGc^j=4e9!Smx;5DNFDhF3q@EJ<-EQMbhH?GKn@f7cqUZ~hz^!gS@D6n39qSJtgPxqj{X;(P#>6WSRul(*+;PLJ&g39WMWl*y() z_M@1DtDt=g&h??f7Hw2SuRj0$^Tq%9Km7Lsajy2f z>&<2vnUi(NkpcQ@0U@HezF-Zd_SKnlW%=&u+Lr|@9?Z$Y2LdCV^8V%5lhO9A zl-b?@9029!`eeH=FkduLHkUmW^#&HmvXT|-aG1$_i$5Gb#6X`+kCZby_8xj1@S}M- zxYF9m29xm{8O(tPdIg%K@gGfTLrkDJj%g{?k{CD zGWw4mJ(SYjvUs=P#HmxKN68T4M~~IlI|rA5rvUWX9_KY4YUzgF@xqqYOK(g{@^%J0 zzFn0p5?tWCPXH_0xzeF_*D4W~;W8ys%@>nx*tBk1_{b95l7lB32sr+#wRSDJay6P% z0dTlp{rlQW^bYiM&>{KNM#OIgrj8!TpzH9N=)v5N6`1>e!8!*^c_t%@91ukL2Y>i4 zJHYo;L59mCgC^6V`*0{%#O!zn-)>753dr4y-hn?m1Ap(z0%?EFXV_g0uuyTKe;iu* z>F*}{GWJD_We5D~SARV+Hwo030?AheC>J;*7_VNt9guRjJvcTp=t6+|y5tw-qz|l? z3Celz<|tZUeGnM|f;OsRti$JLl*9#eNAJp**Tq!m$YD-o9QCnh*MTwq&z4wjk#}-br_D|>Zq@2CLsIlp ztr9poWmSO#`^u0K&^&eK^y0{YePehDbZMW2hVBIPD_@rZEmLu8^b1gXmO;w+1W=$O zSvq*>Nr1C`n`m>dQ@qA|y~8kN0AwB}f4sSKry&2<_ul;AlbB6G-Rt)5mHP9Q5-gqP`-kQFo)Oy$Zc$%J~wA-Rl z&<(LwPX|RKc=_1jBi*}z^=S2?{W9o#Pw&B}>5)wxFb353nN`V)`yH72^vT`DjoUXC z*RPzPGCs0rM*(XOrB|j>D0&oBURz}kCm7HnaHYJo1sweaxCKthsvz5BJB_g@nDdI$ zsC+g9X&g^M(*U2YXRt7|&`MZ9^UD!#E(}|P;B4ZN$;4>l-m@aHQ1|M8{X(WL!D`h^ zozajC8pk==q(AJD?2EVGers{+%hTO+DKAt?$lN5a>^a?Mjmc(JTzfk}3NL9rqZcLl zsAU*lSe6nuI?uKVUTxjR230$ zT8DgNG_z0WM>IBH&6`~?&q{Uxh#maHjsJH}cf4h>d~TV6-d=Pc;E!>cr%JXEDF+P!8!x~9!(ExXo7WoD7}abeFH`kov2cWc|PoVylx=Es@z z&lT*!L+PHk-*|VNCOmjQK=MJh3BA528DpQV3H}1804~TWuS;$bOqQjDhXt$YV89J9 zfD^m2b<+BdjCodooPF&OF~AsrT1hRMCd25D(IN4istjj@0FS+|uQT~->`I2w|M@eg zlJ8eXm+h~0gnrCqBfI}zj*#))s!|T`kk>$(+jnZcmprf~CoOI0ViP3FEXF~B;^xY~O( zWPF@|vdxz)dGGtoryssQK)>~0X7w&**ybGkz3Ji20Z@V$GG^z~c&lIBlLNd2v>n7( zDSwq~?rFX0f%9j+ie4lSv5h3_#4GIy-$jPBp5$aUCwkYu!^3OF=pP&TdfC>JW!m!L zcl?Hb1ZJEMf*zDZ)8p0_aK=t2OCJve0dK%hxZQNnU3s?bs85etc;0$pdW(sv?e-FJC&l zI3jbs3F+%iKO2*^cF6q7URV9QG0=Wh_OKws^-HG~fBKJ07`WoI?=Jp}-~Z1`R>)s$ zUtX3Cop6oERg82_ShfwD*M3ghs)*)R_CqUJ@MUj22qX{OX@B^%_+77WvJ_}=Hvp23 zU$@@+FZn{SfDbw6MzsY)E3L2OvAfyRz}9W~+GB^ecJ3#$-xpiZ9!gB(nsuQfTx6+t&R0%G`GFO;sNORcW3h=pXo$?)&yCbzjYDR!{6Fk z$S746|2R-w8xWQ8+9@b)la74so@~j^5FI@Q zJYBy^B|sPj+1#8|0E}G_qtZX*a~m>iKb2uU=YBy@x|uG(XM!EKZk5Fz&F-m<37ei= zBgfc+`n_++x7r!lBq8B`K_~}h%0iVuX0oc{Prm*A_E7Sfzz%zH$>($jS;K#M(4ILs zcu%$*d*iEumlxt6^doDU+`n5D{_{=oP&~)a3r(xSLc7oa^>bhK?5fM7LArZq{OT1WjbAl3Uw^O8Kkxo!QHEjgjFRUaQ zTOY1R|Iv@$lie*TNwp07CLFxrl00znaE1Kir3JpKx`8ry5i@h zu4iWoRBkGmAYigJjN;kVsu+@|XOrvNXz`b}6@Yz^jJb08{Nh+aG_v0u(FTmsGf3PwAY|ntl?%T(*`>JwE z7M;r9(3Z+xf8AVGX0y1)VY*wQpLLDjJU2Bj^LJ0lck9-4h;eJrt^xQzdY!yzU9{qz z#NmEN)4e^jbbtQyKXrajeaM<55S($R{ORGNq69sBcR&wg_IZoOfU?Lka5*6>dyQZe z#YG4X?lfSnDL(n?%RrMG0g+21M?{|?BXaUMLA);Ea`b3X_7=&)$?SFLBmjcIxDhZy z0DPxX0}A9`_x%6;vwxLGvm7$NNee2?d#NU!4AOX(09iJ)#K%#tI{Q2;x=ykP&2U zWb9yC3SoP~5^$-n55{7Yi1KQuu|1$j_T}?lKX>6=18HE<1|ymQATyni5}c5QLIEQp z^gww#C_+CpSxL`hb{|G3=PzCEl=BOVLzVK&RNCD>|INSoyNeSiJJd8lU{^-QU%mf9 z!f4Uqo^KbVI5?RX|L`CEuZ#UTYfoiR1n^wAc%>mRthO%x*6;nKlYI{@Ze-A1xOAx} zal)6*`D7`y{u@&^gcEwOPiW1Y2s{S}GLmFNIc(O8gdoRHmO8`Syaab{+`PT`B#={2 z@Et{p&<&=4@+1SGHGVT;^CIi}tv60A-hA`qWMpj%oIZ2*!hpGq1JQB}Zfp*C6k*%3 zv%@j&7yNm2J^{Y5pu?fXicdcn+5y}@4*Wb_Mvnun_H^jUi^fQa?@oYkZCx430I*dm z6fy=M*2;x54$x;XQ5Y){sGMMqB`{krx2+kYx1v}5{4KI5A5xpDpXV=ylUK=x0MaFi;!QJ!K6ZDbIyj05AAORi~{%rsv^(Dn@11VR^;fk z7}C!R-)fyAoAXXta?0Zm9xejkz412SQi;CUSJt&EmRm*T$wqR* z9QDaxvo2Z+aM)L{MFgMBk)d*`fab1Uy9>yrWS>{^+2Jjl>Pfy~qjE;&>%agdRX3*; z>}+(sBf8daF!9n_tXRBx;Amjx!No8B?O#kOH05-+;M1yAwG>HqKT0NU4@lg-d-pg_ zr%#{h^>uT4_pO_ihMhPv0O1_6+D1~mqS3P2jvYDJ_;$w2cg8?O`^KMm8;8%{Tq`Rb z?Vot#c`)wiT#nq{dJgW)xnta-YdY{*_h76v zhza7ECvb+*IR}Bo>nn=zJ0zH1U(+*QR^h_v1W<7rw3X@Q_>2(~Xw#mOZO?$rjg<`S z&5_y45=Zwnrnx>o^`(6CtJ6;xpXCJjOqXLpT-bA#`hf8?z2sW}>q*aFZR`%rK78o# z;>#}zct-2!Wk6*M)r3Z+`K)V; zZ^*#SmT5_My&zz+BAwC8Gk)~T{xonl9Os>5jfdSAT`kAHe3pB=7I-qj5OziDZfxJK z)4yA*R<_x8j~pHQ-e+fQz1B?H^W~Zjh~BtfbT76Npqyjpb@qrgn)PZS(5`Ipd8T#s z)i}wbLkEs5zWbdYwiY?%-RJDN(*<)16b3j;C}9WC^Ri5>i}7#ECIy_Zx#{JX(NYhb zjJ-+NXg%b;<2M1>7INBq-3|m`!vhllm}DTkTW@BWrfh$6uqM_Eu);P6P;pfAYtxbG zlJvvZ>CuO`t}ISInJ)O=4}a3yS4DNZ48*dHE}TC#G%7hp5ST97*nROjnZn_*5da(2 zs>Z)I+mBACN9-A0GP|1H;koqm>ts%=JZpvzWx;>(>0dXF6hC}At`M)vvXD#mJG(R!->1-hgCr}RhnR_V1Q z)AVI?aPRKazolH6>dWlZ0lV4@Rgvi8=;0GXQ`3$!yJ~ltbk2de7HIw~Q01+6f4DgM z{oh*bi7)SD?+)0IzLiyq2PGSUa01rAi&taa=(vP=qUKra?pilC~<@2Anuh|i`A#4im(XzdbHhu~9vWu^^w$B!4 zzv>K`0!(MWc)z~HznX-KW5=tY&1SxK=W;>gkNa%EW1o4H-7R^={s2Spl?rh-unKJL zo$x$g6deH?@u%Rec1-pP&?|FLg@8ngN704zTliM@lTVXf7)_JG+jnHg^*Ns*hg2w& z348?f;u&;ayVn>T^qa5IXOQ4jw+5KVSQZ$W0Z%)e?4AtT+%oG?w;4 zrZ`{+kCW5x=Zjvn&t%KSk{kds4xA(>_zusq{~yOEl5n&!-F5JAe(u+c@BZ+&K=VucmR6sACELjF9NT$lL z7nsD0bB$(FL1z_r8P60DXJQPLYhXc3rxs ze{*PizQ4nAwLZC-Y?1IM3)udW@Aw5M1z3{#3?C8fpTKHs%7!7c0sb**<1tJ$6wbx2@_Ya6i@yEK!wvJz zo)^qp4-b0ST5n$5xO9GW58V!wq>p48Piv9x*&QaJKvv-k_Ap&Hws}4;y55r?WslIa zN(27tfrE$Izbz$StO#e=KD>eURm@2CyA{m}3ZVu3{()z_X4I0v^T z3!G!H+{Y!!_TkaX^yN-Ir8)FX=1S}oG!ocnv#ET2{OE4DfJ#!fey?}$h^F&Rj)(gk z@BI=^>6`B1>$Q@|z9ftYO0VriMEc)ZC=zn)A31IOd^c;Z6NBgJLG2tp&*HPJDu6_X zlmBemCnb=`z=qElg8&6NqP6nY^olbV-4`(3^hTz^qSoZ^@NXFt%PT{t%1tbH=?LDdf{AFY_mJ8iOVl-d5|*41R}`o)QY zqwc94+R7`fL;FN-u^HDCh=FI_Ob@xgs#h4M-=9XK`wzddcy+m|_w2got>3XX-^u?! zxOi65#9#gVkEgxL-h)Sz8~GZ|Z}YAKP_4)7_!xb$k>ID(6(d=2A28kC&H7^m{6`%CJ4*q%(_sAnKZ{76Qim)&87 z?2ypLo(nXuTBK3jBtuHFIB~M<|LB{2B^c5FW-bD^0uU<$$Jdm=t2aAOMY0VUIDSrJ zNt3i*>6Z2p?>di%y*7SqYp^w4a$wK4bXf_refHqVlVPFR$l=3>%ijO+qfbhHyR}$N z*-SPr$D=R3{TMF}&0K!TGK$E1$n*{^V2hwv)Srs+Pk1w~8qGL;y8?pu)Mn-LFTR*; z3q%cYGuY=Z0>lp;T)gw%yE71m2k=;GJ|KEZNmBwM?0bv00&QeZh@xJ*$Cfzbwc!6(S5B$L=PMQ zVxdGM^|uuTM?A{mHWduGe>-qJAIhdsMHp}6}# zK?JQGz)jCSMyj<2uv0MS&Yc+{IuZN~(L~({5Qa0uTPuZUWi%_dpD1p^kg>z@Bg|y> zaxiY+mr<}(7)pH3M<*P$k3RZz@#|l`pRv8G4D&$WB9_`W0C)7TvrdN(A6b0%Wo739 zHlKZYE@0qhU}A=H3Pd}P!-w{!JZ?|cJ8*Oka_icNBIRvG`d>;}rZB^k09>^?VV1zs z=ST+Cwd;YgtsJBGL4fC9{PLfU655(mxHUn!qn_W4zbn^oEq?vUXI;6Nb|4%RP8-AT zcFeM)wj4XV_W$%B{NKvfy;US{U$lH`@q-_HC;Ch2^gn>Ap1?b!Q)Q}ezw^%YMf~~C ze^z9+%&A+IqQ$sJGS=>AFici_jNaz-)S|MjyX^fJ88=^kUdBlRg%LT*m{1OQSPK4S zn-xZ&?1*zYqyyeGUe{hP8(9{k9#wC?{Z@2sv2tK?M6PDQyZ+w8N@?O3LPW&>(Eg)Q z7jR>dQBdoSDhM8=h<)$a*p7!;w0Drp)86rOYT?pvJer#MYG0!Gp)fu~@S<#nAhdAzHK#_S=|lN2`}p;=p!$ zAQPDa#oG)_SqvA;pgwWz_^i1x>?uP@#E`;#nBq9TfA`|V(L>QeeYV>N2Ii5YZ}izB z{PEv|KEv@pe56d^vH%2Z)^)(4GCl?(Am>HbJnrYU8@HqNFBexXT!0JJo+c2oe=?mP zWF&7$h%;0uJWi7ynGGeySr8{9k|A^X!sYJIN$-vJWJ~3U2Wm%kFXQcEbmN5O&%XR} zdeAbcWzzuhfC0B|H2>E4c|AMvxWG<#>0W`4(T?nMnfVtlTxz~$=Ow#rvPe0nWpDeA zw{ACHvW!A=-5Pz~8_zgO(Wre=GDiO0jVC8l$DL9t+4-%__LNHCMfAl%rzCypr|n?K zEi#@Op9+jE$uRVD>-6blS%+-IJKAtK0eyXYz_?VZi7(Hd`(l(UP;66sJojx}jHL6; zNr2Rv9B#d=^Qm*^d+lz@TG`r$iF_~T;n2x~M9q5}=fs>EC$Mh}3{GD%V^3hz?c3Kg z^6$4+$-Ay&q_|cqgYSH&?B3+t>tync+7W5-bmI7t(M_x0ey^&R_lL%hA2}Y6cgRS3 zux+30v`u7R^lHtKn3)w*%M@En7J zGXaE4>k(o#kC{oXx?mpZr4 zsb0Ep;dF9f8CVol9GT8Y%Ygw*J@0Vk$$)9?0pJ8H=Sm-h)VPA*$+UhAbE&{f^N`^*kC zABI05(AufS^M%&j!kE8VrrFDv$sT*?9(u7`JNx3kz1a71?d>Z^$oT0PHvwi^yRWmg zIDoST<}+h%O^P1SdF0a=;oWnbr{yT(r7}aaZ_NPJ2zq-o2hcBCy$y)!x!u$Hn&r$D zi?+8#yC$Oy(1oszW3u0@N4(Gq%ox7~+l|+>OwXE-qi?_Sy|Li{o3dn&rJGy>xZ=EY z_2?-9K$T$b2_SIzApn3)Ywp(PZu-%DtQUtJfc~V4!);|DI{@%$&g|Ms?X~Oxe98Qt z^Gq5LhjeT9HamO*L%jw7=7_LC$q`PKmUA4#&jTn{X2Y%`v)XeYnzaU&2{@C7Y}4&I zNn`@h!TtexWecrt94i8*#_ve(XaQhP*fiIkQLqUAc5y1~ zCKS}R_nt5Ncy~H#^X3wOsxn*xX8B#+lkColk8b1HAU4u8eMbkz%<q`26Dm@FX~JxMetQukzRW;}sP#5~u8ss>YL5RcJ-|DEpc$+gTC_ zxuEjcy0LxP@&i6bBigu;!veMaIdZy%5A@WA(_wqN($UN8Kv~lgRGwc|@_sk|S{{hE zCmXhf&c=HGpVEZCuWtu5z}q@7!B^zW4v_;+^mRWU;#* z+JMr{?M-m^B&uAxkS~4zM!d7E)Y_g-|E=Ho4;Fv+C;z;P+)ozI{^$Ru3fA~JAko3B zZ@>NS;@5xvvstsHoJ(@VpLkKSj&rIS+&p`Nm94GpOEftZ*$=YIR`NFk>7%cW`A{$7 zxAEbV9e{8uhaA+>!1)Gz82z%z+3h>_j^1Vy0XO*1Y!W=d1{Jh6E`J-7n*p1bHdvr$ z0pSC+DU{(YXavNd`$*wDqLIaX0j2^UvYUUbcPq$irmhQkF%`?WJsXHhm&= z+Le%y4kj;(u$$^7YsGvH?Py z0@asoUA`_ErL}GCMxXbvIn1+r!3N0hC$E-CQQC{#3(U91`>S$LDU1i%z7OLgz%3sD z5Q-oBaL?BMC1|NV5>OaEOCRD-x<`P)J_ugYUo^CF9Ij0Y*=Cn?j+@Ii zT5Qdx))!l#UyQ{I{ios&eP}@}*@5n`#$7Zxm$oMYM5@*%s2L3a8JFXb@K#%i_*n04 zV62bA6g&YBp1*W|N0(#*X@$woAOQZJAf0N(v3@7u03yGHaCU`fxQmJGzPre zfYZ^(7w>)NMrP2%^O#e zF{p4)nd8p8)hj)~ysa@8_%~iw1 zZ|GNRAnSUm9`)>)4e^8^;+?L0SW*_OK(&VZ*lEeUY!~B^D2iv?o9tuz+Y+)k3{x5!ik~-yiJ&2}!nt?mZG85f-8;$6>D(2aW5wQBw&zkYWL^TL>482k zxsx1$xlF=kzW3}+d-p|^KIHTEg22Pe$(av+-QlI}FS`ML2}gD=8XY3I!FPpiEXmi- z%LEU~6+qW0z4*WXk=G}hr$Hh!Z}Wytr!cTeh4vTF0H@y*ywEOoRVdB&&tH5fxA zO}|@jR)El=Y+CFM>?2W-YXAU107*naRDKwUhxrlovldv7SrH|Dg7oX#7KosavV+CO zQEPjZ)So|lb~03C!`;hJmwmB1P+i&QPk!=~u4O1^PzL_!U8fvX*|)OjVZaPFa*1JX zGcHwTa_Q2g`FnE&Is&4>zg^a^Y&eS~!_4H7vB{5ui#Vs;qzhz%iUyAW%(z{dqxc|W z{!R?Y(6d3;ZgyBrPOkoq7U|)gK=aENCri?%uB(LrVxf$H;tnr++=j~}#sK<=-we#T zbH|~PgrJR0D71C}$flAJT^oh~F41RzpT4$)n`l3Qjgb6q^R?V~*;-M61eyrW&U#zF zaq?uD&F9Lpxi#wp5ZD^%JiRLW3EbA7`|`CrwJP|W?3(^yK!~p+aKr)-XLLF!2V-MW z85 zW1hWGsZ$2G0|Rem#G)erDsc5l*&EshZ7du1?1hWV43qA|Xj$7sL~QRpzuyVwl(x(Z z*-x#-+WiTESG8MtqbIMN9?cV|$iUy5ldvN?m9e$Cd0x8`IGG~j%n)jXEudmu#*QpD z2c1ynge8H!<^>^A#!m|S=B;ZZQ1Hpq#tP(&{ z@#2T?ygO?zb?=S2Fn5w5I|A0AQuo_v$f?FRtCVzStY!gg!1@taV8|Jc`DBqQPA`V?X@v z8wvXkVg0hQ)FOkTp(;5zuZ(8=D0}!}w2LRSu@LaPc=5u3)(;DS?P?r)ke@Foa_+*F zSr^&EYgRFzJMSOmpr)j@X*fV1UyBxbI;BUJ>Uex2j>)6i6$H> zHN84B%y#V9R~Gggi?6==EO{pbnmi9wAp83a<+pWP**ksK0fOkqI2mR7jiHa*WlPZq zK;M03v_$6cx#UadQRL*yrlwCGCI3GA>eS+$H&09eKR7L`0x;cMX`|9+XC?rlISR^N(K6{`KN#s_pY%lY!lMjpui=hf>b_{5 zeZG9pb-w6cdO={!n3h)s6a^$qd3T>*?mx-3G)e`Q=c{>>vF>GGdv~6<{M{7+Hk~tK ze6qCY07jrgk?FoC$wB@2cq1|tWYPN8dg)-9p|UlQ&R4i96|_yJZsB2zrUFWbb5nd`tWGno7aP zUEX*00NLL@Fz#i9FDqz#zO{>fT{v>3b#`y-<8xk{J!%{>H0=-2)p)#TU&oedE$9n- z@5{c8UU%olG2^w~*1a|F!!spgoPGLcf4>dNx!xMuXV>@VOvQu!W8e6qU+A8s+sG;Q zt3$id%gg5MK3cJKRiAgw68s)I>UDc-pXn5m!s7b9|IGsd4U<9NeXN;m74+eYJvTJd zwaYmzKQB=B{qJQ5_m5Go`e+!UfY|pARdK7N~ zv{fDXjIrE%owL<`C^g=b9X2iYqHVIx-jiu6>+Y8v0eolgX`x3?3lQW2%pNYYt^+@l zK{D7DSIU0OKDm_cJyxqPP8TQqP%RNT^FTz-^6i{0yQvrf_ zQA>vYoIRvpM&Gp`!!B*p$hpQg4lbIA-svB$FV39$qW7c!Z?>DSfj?Be87uj|BHqxl z4v4+aZ?#vT5lWHG!yB@_utL>!+lWhZ3cdt)MGBJ1h#{L_;b>hrR@Yz}bR7)8> z@a+fChWsY0C#j`-*;hSX$pZZF#b=)cEY_ML**~pVqD#P)dr!NXXo0R4fJ0MSB0g+Q z*;RZ2>?erz)mL96Kfmr8eNuC5TN?=}0yL93S=H74vP1yl(xoH!}@|T?F-s>&53uuyLjvSznRT*ZTjuf zP3v0wy+>3a?o7Adp6dklxAgbkLx+>UIo6HuZj}zVu3u_pquJKrPWBr>Yg6{%r7DDX zA2>AN6q&6p9DYq0jNH+hgirS7_kJ^7t|Gf&Li|mh%J>!(mf_0AQC;Xz&LhW9#zU<| zpW7at{L4T7Cqw`57r6S~4}Y|H>&>?cYL~c@eMI)s7wjHBoYw?J&?xy~9nrmYMawN@ z_DVJ@^toz9V$NRUMe8LyRZnQ2>A$gc0HOF-MlnB@t%)AVk+o&{xwl}ueIf4wb>uTX zcON#?04R-(9yGQc*~b80Ksdc=J*En^>({@oB0GQGt<*~b2-H75EJT!9(vuF`K?zSre~a2L;> zPA1oyEqh2i3vyJ({)>_dB?~+&NTlMEA1HYCs>A~NgzaVxWnEi4+5UK8}E=&GF6E?+uHW8=}uFINU)#oyY~s6=%bp*c=QZM09G z(HnyEdt1vRC*REvx|z+rY!f7D6J2l0hV)GKe-E2rAiRk_p6t^0<3&jl=gyrf5O!%~ z4*QjDAy~lw)?$O~OLPo>_K}hML%_X6CZYko-)WDj97rz;6bcBi8(v3~^bWgsX=jxD z(zWm*>#;VyZ%&dz1fbE7pX4Dqb}6jsW;C)fA7NUXv>$khu5i}e`n5X;r_+|`VsxV= z#_d2XYm&djucZynT1_>8+7R7L1R*CY!?hw#-3I%LfWe@U6U-bKO9Oy^Ja{<5B8`hv&drP0g z*GnE;5;6RU9NN@=+*kRYE)W3MlJU-+8$(YHkYtOZ1GJWfI5KgC_3b&52>54wEpq>P zYi_n<6SrnNNh*lt95(cf6kv`A1 zdt6nPbFrSUU$#WDF8I`%{`kkgS8(*w;-CECKPZ5AzjcTw(m(qOoHp_jARmrgL5tQ; z)z`FBl4V|k@w&3Z*Cg-g`92&DvohfQ`eIKqNp;NXc5hpYi#Sv4YAWL|9aZ{`21=s_~siwTDtTmVTj6`GqfDl<0LwQr%LXvJG#CWlAEk zH>&+5d?)wI&gA4ZNj;)Pjy42L*(e4iK)flZxH146_@kV|`!U{*+8rG0z(dZ`IDrIt zK-$Z)?huInpbxv2U|AcuF7s?(P9Om%1)ngud9#R42HGx%j9L5^4`Xj^V#;A21<-1T z^6{r%#F#C9S-UR?)r_7a2X-%h;|K3#_}rWfYQVdDuP;+mX5HhA)XNtx)g$}!`Z*m< zag?pn;&XozjtH2xz*i1GrNrpCeD!96 z)nRxU={;-J%9}0rGK0(-h1CE@_9Z5_|-Tsu6>sA z#Yolm;+;3jtO(fuSAX(vQlK^0&!AFbErLq8|Ex@5M$+cZ%Mz(|>$lXO_4|ttKKQE$ zx$?QjN^njFaSXksy;9nCyjH53;FqnYb6HO<1YCrN`;98#zXnPXackIOR#pj=$3REl*65#1{ zVA(>KFJ3A*(ZNDx)}B0G1oiDVGx9H%J$gOE`fea#rCrhTtLCBV0)2Bbw&ei%T9xoo zf}?$d9)^#f*E2fdD_Tm(m_Or$;Xq&-@QN6IB8#mRA@C5NjIr;arrz6BnPb;;YzZ<> z`%-`hUL{m+Wwac4@B5RTI%LE|m=VFt>_Gc*6#EA*!%lrvgkqka3^8Ech}~{Prmnd2VNW-=kU$9 z-W{+6tu^Jo>y6*o@#(gLERPb34+EDeyG`xy|Ne_#lmUCKDXdtWOm;ZrQY(&$&uOEy z9XNLO%$c&K&yP`|9gZpt&haN7{`IWq{i->8YuG5Qj@J z%e^;r?Q1>$Qse@W5zolmH&31%gWXB&syOg{FN_nsEBdC$th2RXsGu2vA^{eGP2&J+ z?k&@YqQVO@$nfLl#=NoMk9AW8=KZ-1zD5{q4n{{Of-e&u{DCr#Bb>+duwCV}u{> zUaM-kgSWK-^MM^Z4=$cuS)R+mFrjPoLSE+E!`vg@pG8};~3>}p%15iabKz{o> zf!k;aZ_{A|%tRY2YSSX47eK?f*;M8^K1T2N@7Fi9y>Xb-@sr2m`DcqyK3xVQc-)M- zmpzYz=lA=81bBq3m~u^rvh^23b=IIc0Z8a-j_vDcc{0db{{eD)-PiziE0aIpj+JBU zeHBjx27T_noc_^`jQn0p0CXMN_G|y^%V(3a^KG9!-~9(%>vcw+dlMvRt{>m%#(MZ? z#{iE$`Qo#Y;UfpTEkjWcQDw+yE?gM>Yu;oTeIWas%>6fi@+So$kEKW6>fq^5%4DhE zZ}0ESnI7HQdI0e5=Mu8I|%pQgOM-x;nJm`#8tyJ*I0 zuaxAtP7s%IEn{;WgFZjbTi4oe`|h5yKW2~3ty^*U*kW`x^~zO>L|@N-%wORPB**=eoQr|Dbkg64n@^twm5e~UEhgg<2Q zxc5AN?&>qv&b`-`5%!zofN9(3V6Sc2fxSB!xM*e>R9I&GPUTUuc*d5vI=&XPzjX0}nFV5I*EcRS#1`QUurKv{26m}%B&S~2G6#Q?1=jNL zk;60IH4%+=gfhpKddo1~5h2iLn zXiSSo01I$MX5VBz+i&X>&k003@Dpub&gQV!z>pVZSmGCFC~SDzeR9TpR`QB!HvWhM zpxO68-kC=vLpRzgGFtG&efw)@ggdIF+1|1%&zwHBw5yvR-lDJO9EYJffLU#@TEEBa zh{m$7)-_w=Q_k&!g6fZ>F|v8G2^$ys86dyUw-rP87pk7Roijc_X7qzZ*j3I)k$j|W zmyFO21xq?Ss(I&x=Oge%^oXa&1m)Oz^qe5TjX>KA7tfAejW`8Prfp;M)I!ohp@0i5 z3drn@={r1V9jtE)Fm(R;rysU=H%G?do0er{qZTH5Eu(F`JqLc4K?ac5ssk_n@}m#t zFm`h$C&<(k=ajq)`~i|Cu%E4I9e1TakJgs)i_>QUdG=Ommh8!vWxH4`K|TPq@uH<3 z-JUuWya03e2CB)PB%_t_OTI8ReuL_(XVKt|n^)#M5;|{NzU-X<3#}j=YPd6?e}e#f zpMQ~GsFQ_{KoLe;lOf~!d~EeKF)TaFYL{YD;=J8>3nPab>|DbRZ`XY(H)a) zlnqNZ2$mc=_SRxYHq+@-AI==e?AzJP+VVZPtrzmy2}W+nMzl6B<3+x%Y-;xw*wc>h zfBF0W_2TI9W7BT*;`!6jME+U6%E>q0p1|oJ{=@$^pENM@b^G3Utw}Mb?6lhIm7w8J zM0P9L`M7oiY$7%u;uCo71CxD+M)~{t;0jW$50JMA{60Lw=42D$Exwa+U@nKOks*{D zKG(X3p4Z+J?R@LY*dKNR8&e>cykgVJ_TL-O2LQ8=nHNtR(}Ai_91_abQ{gCK34r3DK5`IPa_*~Bi}R;GTkJd7-qa@N zow8ujHrks+8uV7sT3e5ojS;YTHJy3w^4ZDw7i?M=?XSsJ6s#7!V~=?b@LDA^AX$I? zYso1zdHm#?wL`kM_~iY+j1IGf^M}{7d(-FlZkI3?t#3?@ZAgCO$vZc%)n+PtDZf?= zRPu36a#Qd@HG>2Z`=_lCU<#Nd@M^!vNfjqASLT~XheJ#0$d#@A)8y`>)^^YOqiuVj zRG0$VTLQC7`$5+C-6vP8mN*+0qlHJhyn6z-@1?&4NDf6S53{=~se16Y?qqAo(yw|t=-FN8dV)wx#vu6VK0 z>r=b-9S@W}G|yCl%(s5Io3!$YgI=5zBkvp|PW3}D94KEpV^we{>b+CX#G#SiqtUr;rS zE@mE$@y)ltGXb)-^~An=S4*p9B^;V(leH}K<(_Qs$x!c=sZ#5GHtMtNA8VjLz1GWj zTcan@2inxPe_wj-T7Bnlg?BpacUv-0K#87~w6`buB+1ddm$qtjI|*O$ZF2ibbhCMN zmBq<0@?Ax~>P4kmvR5g2l;r)nMk%4?wHd-&oM{(`;CLFKJ|KyKGrD)u$i- zb?YQ>UM6}DC4CN*-&C-YUQn&~`jx}qYrT|y;Lq9%`!>8i3&qY^6VAFPJ%4TJZjyib zlf7@RhE^hQbT}FN@z(lqZb4M@Cxah$rk9HB{!C_PPbQPuTqUX{)N%++f{_i-@bD+S ziC8cR0;cf3j%c5^(R(rokDsjPPyiiaHus1bwrmZ!$+1&jjFa}%Blpd>-YaUlJB3pA zYzyEpz%%F0C9ob8rOz0QnSiwpJ|mph21d!myA$|*G|*ztfy0Zx_#gh^7+JCoIVal# zQ2x{Z=Wuy)GKG~@$Z45#&DnHZ5(*jY(PCDLHKdZ|Z&js9ESjfh4%!U*RLMV+>Rvu%-;ams> z=(1~1hE;1ocxgjIaLNWWP_06M>*GW;25#Su`fXEwgcg7ZL*7gXG29(^xqA<#Q}nd= z7=RBWVgZBuWl(+#kYiXz8^_Z@j=TDt(_8Nc023|)cry4j4|k3l=!Zo{^w+ zjl&H%AX@^C4`d+fhyNnu+gbp7j;P<&lBkU0$jcBgR+;!lE?QW?j*eIf82@}b4WwjF!ppH8#>{*SAG*j<_Jhl?Ks zE}nSf_yCyt-D7%=99jh6$zbrMy$8DIn8+9b$ZYPXzHGi20m?6dB2Nl_Xqh3(y|(MN zwssEO)OG`?`MeY2Zx@YMu`xz+pO;xp(Z6_*EQu!+x5$WUIy0vA8K0EEZM2#|y>~mO z%!8K$o&egbKCf*O#c`*=gCGxs;^eU-!&8i#g9i$-)f(n*GG*J=4U3=t_&c3v@LucX zppvq4T2u5y$UaC}IJ6d+fHnlSE?v15T~syEo>8vP%eLi6ZEURC>fF5JFwGrfe2`K2 zYGsb;*7kcxErvMC#x+5w*4i3wQ}VUu!5Boq+S;#87U1z5oJWD~f`IS7`(70xrzf}| zD`XUX_c2x_yXTWtl+69c%-Iv@1I#=PV7+wd$~cEWDo*09+6Zx$t@q2X3rxy5>EFuw z6;pt&)6zx@9?#J~{ZMjKS^1eu7o)F~ZgdI=7od<~PYFN&I%Uy*cZ)uk;tw?ej%-Yc z$xQM^UM>&%k^Lw`J|`s};DqT*#|c{(I)XR$R4NSMyMFWX;`1**TfBAhjcGIRd3~Zi zn+!@6l_B*Q2LN5cNpnU|WD6PHpY2zf#mOQ@7UTD~fBYMZW9dXR_^9!G-FPgDKG)kBB9$C6 z{$wOM<^E9R+s-W5fAGlS@BKgi^PHs)Y)y9nIDi7m&loFY#{PgRhP1ur*!rn*060Z1 z`Wk1r{hMHD&k;;nlZ-b{V+X#Ww>dwg^#Kku&>X79A*RFdEBYi)BAVu(VLI|bI|!MI zoYD2u0?7d^j1z`VVrI$r^D}yE|3)@ucyXRNy=ZU_YmNW_S?Cl%j=p&M93gv%Mi2?- z-;K2bv5u1k+x+z1YauOcFLr6PF&WI=Z$NOiY#&$x<8HnLdx8I};vw|sz8ozyMn+6_ zLa*tM4fq;Ew!4sPbkFm^jL%PhQTD~oS#S4{7$BRji3AXJXjOBv)&fiPES@Cy0DP*)EMqL%^D zR@<|B!mPRZGG)^$z2o)f(Y$<_dN&?<=nuL|(9D_xG~I{6J{L9yPM5%*2QLE*nQ9&G z{vv&mB^qZ{<$@972JbJ;-dM-MuH46P2m zCpYLv`{Xlth#_q~8s6-g_c48e3I|CzmpieC4hWz?=jW|DB)3Ka@ zsbYyv07LZHR9~dy@s%V82hgz*(5S3)0Z()Db948WNsd?XKizN6KuJzBdlsLwyS5bQ zri54nI~}-d z?h+loo%hh2&&Mwp0-%=8cy>=?lzkO3bnR@@+ zGRtsvw)w6JHV@FXbNB4ynrzMCZTuHcO+czZhP?vHShG6;yByvN7tSuZ`WuEqhp*x{ zIz+azD#il=(ps8L#bo+{{y;M==dAIii)R;S+Do$Dyj2-L&M%Bi8IsddPI3_1mE;8Y z1~%EB3ujJOg?FKS0+6>WeOqHm1uDGwANK7bKL?NJ93MZlzxQf|m`=UQ#!F^$Q0bV_ z34LDnzQx0@+8MA(@EKjEy5QurFFPL)SS4Gujz9G5eEloh+N}%EBv84~~AhtO$ynP`1%z0&VO#5Vmt(b#7fA#18 zdU5f}xd9-r2C|(g%O9u+EZleC@PM?ZPL;i%Y%q2`h@CHjFJ46-$BrCm?c<4BdyWim z?PNs{ptMp3VX{8n82%&6<2_lO*4sV_LOG0=E%W#O;O`D-@yoxiP0*T>gbIwKYjPZa zka&D0a+EDIay&X5n}ML{Ke~kfWldlKWb58+9a*-5d0vaa$JVC*wwHK9ZV0-`w&z;{ zI3!F7#yI=PUSGc@ktSPrM*-{hyPI^+T4Vs&(!17XZMNO>=;UQS%ZMF)5G3g>*v4sz|jGeZe43{T6Z#6`yRYQehETYcWbNd-@b!~ zmO;`0?tR&allk9T1L@fkHiHab%EGXL!FHCxBsQGj-2_zQ5xyMxq*BCQ3v`lQ>}i!C zWWxYO?JY3=()qK)N0HS4pnzHJ9}(x3DE`XtRT_N_^{n>^63(z#AUnTfTfr-el)JrGE=T zM{Df99m(2#$t-QX=1iMiTBy`7x%J-D8Vb~C=j1@^FTeb1YzH_@BtPR|qb_|58P~IH zBg@dbvCQVTmaRnhgimO5ytDSK`=Ykuzh}$N&dtc~N9Wd& zE=A|~pFcYGVtj!fB~idWRLHFDeE}SLOhuFZo{HerwO&oXN!01X`Q>#?XhX=Ru^-q4 zWNWx2Ft-s@|LyOnHNR{m(&N!GM6!&7)->}Y%2SlAw_i`YAY zdJ{NX+!^}8C)R1&)MUSHXx#-^B^L3sU_&YZ;AqnpD4ImqXoyUd-~=b2>#fhy7i6Pk zn(>X zeSZ4Xa-S)w{)+(^+e^olA?6_j2IEo|%hRs8_uz5H)`7)Oe)@Z3&^(QRcI?hb;=Hv{ zS~R?9Gl7bG0}cPiZ~wIS_oZBbTWx%U){j+RsbghmB5qj)4@&JfNr2pBME30Oedqg? zChuKb4M4kECf~-0>()1A>L=bf(xR^jM5$+TP6OuE;62u9qbf(paNyL6;xXnfU%in* zk|7g#f2Kp}ZUyeG&IsIhK!4jZyA!qu_*;-c7PpN78g5D1J}%ma=o5>ryTk2dq4u%X zNx7`G(gV_35`x)^V~~ib0yF z*^K%TYJmUXVI_hQ0R|`3wgm3TnkQIZ<&xLn`6D>mC)cdW6Lo6+ofJM$kpV9g9BBFU$;QP8AAK}!DSVCt_jpClPV*B1 zI)CnB>#<^5NQ}~HwTxS!<5}%!lvas~GNuPaYQ8c?mgUMZBY{m2#0Z=_OnoTYJ9GX* zOerH$R(9pb?e)WN6uf8?Wnc}P%m}y1Kxl206Myu@sc)FAgA6J~!w?SgB;x?%`*-aK zfZDgXkb&*kX(g%ycP(odmg-;Lq;(#b?*{ ztSNc!SsPm`fDXqOz@}sPosn!6*{SxB(fl|-Ql$9lv-<@LI&3n- z3|N33WJEQg=y0iM033T#;!dW&7C%xj3^2SVs?E7kB81N<9!|309U;sBk(o=$&Sv$K zqOo2F0?75bWYesR_kQEMCl}xU!S~CssEtWNZv}vGWdM4{CGZlh?A=#3R&=A4$m3d+ zT)%NE1E~Iz0oxZYU(et!Al6!+ICydZZ50Z4>JKeTlL5v6mGOV@PyveW_s2i~%Yqgi z9^2XhExaZmBZJ8Gl*hy9!2v|Ca$fG-eY|-0o%iCIhXw4;moa*`^;lWUlwAYHUk{l7 z__I&qhxWgD4h-ZudG`MYR21{86<&TU^AV2KWiev#pZ5+79K#q|MfGL1DS6Q9NzyqTcg4uKMGl9~lRf2m03zh0%V06pKD>DeRuRIfwCK zH2tVCGVT!bt5;=sb;!r#XaOB>s~?<0bkAQoog(_GfI@-5Ubpvvo0e?x)!7TR*}6GZ zAP$p#US_y}$N)wg%8aN)jNyzfr(I3J68fNU0W@3EJ6~r|zgNJ>=l}TUe;qBQgqxXG zMgph|SQ)-2-#A(3#pT7n{mY-l>vyIk`N8x|ELaweTy8CHjwgEtJ2Zv1Q zTYLlziu&6FMjLr%%NRB(<~j8A)#BriKgf_p|IuvkalR?*oqLn5@!XT>690$Rc9_%; zqmy^jLt7RnPMlnP_Q|IcD7<4&a=JLLJ9AP1CX;c3h1Z+o z{(>;iUp$%;SD*!XHsEA(*RwIL*CtCjL(5ozN;9_TOLn{Im=y)Ju zL+^8CnnceJ{{BB${KtRrpS0Hp#y)cJ^WK8~=P&1Qcj2;!wjgTHOg_wO@gC!NoD);S z!3|u%d~Aed-vC!p=(p>VdQMJA5CHUNlsA8?3ZyiaP3Z(YIs4uk7$gJKjoBF1zYmQr zU~D5?K|Jqsmh~k!wy|qw6*;{EI1E-5dFgid==lN`KH0+0^$d9znv-i@B^Tz)hh5t~ z(er}#4l1|ek8(!YlTRMCXFZqGv)ogG7W>g&wI1$kOe>n3hjKo~Zi)WQapU*j1xThN=vBAo z)UVH8?AN*Xl)WcYHz%v;U(PmJGyM~*QZe`YITbJNj`SH@R@NKvW7-Bpo5p9&^pa+8 z-Mo3dj9u;3miDGf6D=6n0neX58b{^i$+waT&lVs5`Y#vP3a$c$SH*|N-#EDJwVWKh z5wNoiCOBB|i_zN$4joxsDe%fELaQtK45tW)pwh&|1hnh{KG_^F=I~E;G@d7)fo%AP z)yNT&b;S005MU)JPsgoEXHFGjhiPla#`%&gLZ_kWscOg{Ay4K&!e~~8+On<545PoZ zyL$$?IYETh=+fo0>FC?TO} zXU~0EQ2lbctVE5#C~J;yjTeZ|-=g2is_oGaz$Ky{-;NG^+PDO2ZU@|7zI1tXp|SPx z(e-Rp+7hrgiOxC{4{`qz_9p5VU{qB?G8GSP+p>P~;V=Ju@zcNccjEmA`9TL4|Ih#X zf4TVMfBwHN{^cM2vnsj+M;e2S_xt(bcgiduFsQvxnslDhyY0cM#n1oz7mb&#Cg5KS zzt(|GFuJa5tuK=Fjc&ZoJ|U+cJ1o5KAOGMtCZqjN|LoWE3_1_(NZJDOjjYS|N1JRE zK0jMudxxdJ(KFgJ31ic8qD_=>%8mf=&{b?Y_UF3R0@%#nkN|K%kH7uOUZyLYpCs_N z1YFjX)qN?s5pDL_wj{X`Xc8SrB%*`Zjai@LUkHMsn@w8-Ht)1|?a|g9yOTxrAwE>n zNJ#_0)h06Z`63|w-nAQxD;K`Z-ai&y?MUu+5O4B{zr(iHqkTi;qB~TYYH@ev%K61d zAC>{$TFA)dzkXciIQsG#EruNOJDH~O^pobS#f9oH_J9BYu$lbePv5Ar-I+f-OB_`F zup+wwFs7F`n-)+fLBu%<2al9NpB*l6I(BvIg7@tSU4yUKg!~_V#nWhCbrDb(HleMi z-r@kH=kfWSk^=3m1QX!7AQ4?^Y?2vo_bo$}uWs%eva>&Iu9xCz0Rwi1!$wuej6Kpi zX>mz*96o%w#Kkk=LXQ`Fw(lR^M-DGR-KG7tYhQXVez+GOqDg1TxhY$%|AsHw8vswV z>zdK?y$SHO22HBW>o+C|;$}9yOlXx(QO(d^u5S8!36!(*$ZxV_?1uC|xuN1}8Q`Ze z+Q-(C?Qg7--}qlWxG~9+4=3n+Jaz~_7HFw0nChK=y)9$4eg~JT#J_$~$$9q`dbYqH#C0w39^HrS_K3cq8<+OmH1WdA%pFIKb_We$H z8vFI@5}@t{QnCg3;7xI89{;Wt^y|Jyjugxf>)I18clB!id~<%547hvu+I)_`piPPZ z5r0^MRHTz!&~l^eyB8WIo0j%<6{Pwv z{WSJS>o9&rJjd7h`ZXUqerql!hpyNx<|B}Rr_79APdv`1M0@PIp4y8l6WMBu zX{E;gUB7Ny;Ow5-J>|FM-}1ZZ7&g80;LL$Mm>^DTEzs;ftFuYbnDLq~c?sw8?9JhO z2Mgvp5A5Im;%CF>K=S?d?>C-l(G;=s54Blc6-@}z+Y@%D)R|^7^CTn4>=A`M$1iw! z?8jx{@Q8gPdUG*NGXHN@+XnVEVgB+eh$TC(a8^$uG!Z@28x{~4Yphejp$o12| z`L_eUcXwcg(wm4l0LAI#SFU%kV?fI8-L+wiBHgxQ$h|K#HYYFX!u`xXl^Y$_7CCxF{{I0`Z|l{493$`^<| z!Y3v%2gI~ac|pd>(IbZf2$xQvd9GL$*bw1S*s`>$^jjKRW%-U_88I*W`7xqvqtaX$ zds|w}hh+mk&L|cUOf=4XZNk?P2NjUc`Ak@J55oC*;5Xvh z&~+QOB*gm6lZc5RdXaEg8c55}5D)g012%`&wHC&-3jybx=19ZgI`w=ok!_GtuJ`|NX*iJxFvn?d3r zkt;WEmGygh#!QKeH0tRN%#~Fodjip~TGihfcY4!*@7;F-$+k{OqE0_S%bZJD{_&swETI)hm{K`=;Yu@hl5o$8PVN^hGQI;Uoir*+*2BeG%{jV50EywH+ zAX)EG63NNLkE6>M(SyHb>kFCykF?*|(0txJaU#Qc|Kbn-(f>4SCn zF3@r_kl$J|aD5Sq0949ecBK8DawWVA)(OI&95o8z>-h10#{0oT2jlblre|Pr zm;uRJ3;C>8CfDmpeeq(MLABR-OiJl34`_E|{nypRKg$w5fOjc$t5M>RNSZmXxd|t+r za=D#5mf0}wEwZ|l6EdR9ZjO#*yvw@LLvvg0sBT`rwK&@P8PDmnXIq1KtA8Tp0$b-A8QRa1TtGAt*~T=P5QhMn*;BIMSXDWCf8VWb&chw;XL|0t-}_#& zbJw&Z({uRjxif)B8y2@)bD2qGm<%cW+|~?ly4SjOm4pV$p`AG_CEjFUsgOVm3^>N$ z%jb=2RUk~X!l<#&#xHA&@n>y(3qptZ#7h~7WibdoeVri(xMnv1wmjea4hXuFqXh&! zdN}%-wMYiFGxQ4Qf-D2J4e(^Y`n-R`EWOY089=?)8OZb*4eQ{+En67nfnqtTJ|eKn za07BN0?{4^ZZcF_f40V+WS;|>zx=8|8@nT!#0hFLb6;Td%Jy`IFvF?bzcD&|)Bak! zT?HPr1;|~RRs#21f4b9Y=;SBHz@x4u znK?B6xLnJ+|Lq_Dv&HZI&QBNL$rfU`?FsnVUB=FeSw|;2#z3UTMH|5 z@tRTiA_F;d01ZIR*irGY=aZ@Pfqs9ML*+gWcV@d)DFB9_zxesTS==c2cCc*nTa}|K*=HjO_*zJnIL`HP*dT<8jmnloHyp9;RYE#! zNrtFIJ8^~pnhJK3z-UTojFd)DSr<8|hYlHaZQ2+Qin z?&YR^)2pib4jwq1^I6t;W8k3DgJc=-86TRLaa*QycApmh5*4huYePC z-I(8Czec|WFH6$u8%Js|ElLUB7fT0K4R+ z=Dzv?x-I}uj<#NV4kxF3#H+0-&wW3qxARoAMzc(f`9|8)EEdU33*K;!Ax_R;#s z8~IJzE(kX2pM5EMM*m-(`nbv!EqT~^@qU5n6L0-+@y6TlHd^~ynRhx$h1uqS#jig3 zi|C}*VcAE&@mv4?;*b9D|F!tVpZwFFS7KST_xVS^uHx)K`f=TWVCV!b^M(A>U-pwv zKgnj2fUtTp2ifnFI1vr8TL>>$K#!j>*eA40#^@)$>d7*Ycq1OW{IhG(cG`>fLy(y7kQ%#&yDz07BSpIIdn;4#SwfK>Vc7y=x-e*JP| zN~UGElZ$l7v(CUGJJ9xM)6zQW54$#S%xl=W$p^L^P|(?5OP9YKZAr*~$ua2TWqM*H zT-}fqk!>Dd1Fj|2T%12w@=)s$e_n~#*k1O4EdsblLn~M5qrY@=;K;Xz9y1=g=kom3 zJiYcLyYgNDz4lV%_mynG^QTT0q^Y_lKD{mJra5lgwXf^=8B<;*s?pfyDkk}E>&UWX zkE~2I&u3GSy)GMiB%Z{hPvhU=1Eq@_)Zo+AliH=EN1i<7-NK!dJzy=rnn&}UBq`_@SAsjXysM=KaD zg~)$BoPqXyK{~@hoC0M4Ce`rgFU++jfmhL@VCVA=qh#?6NG%X8Rd&Ob*rQ^qW zO7S~+POby);2z|otYY$r?sk6_mbcnZba$!tT@pZMX0~--RcQJ?3ls}{ zu~dQ#ncTdmc6;|Elaj3}O|KRpv;Nz*?-=W)Kl(@n>sj6x2e z$*O$G;t}|6kyh3%0UtSANQKaqDq&`31I^PrH*VB*A)6ol%y*yfy!*Wp&*sY@|1ur0 zp(LM@Q+l2~dr0p$PJH|dq;25s&#~|XE|oqBs5Y**#Zrew%2TKYxydfWJ$8% zX^Ci()&Q}(IV=e$W8?Gx*h`OVKTC(9SwN}vC!6V0_8r~pp4zxADyd+3KBjvCakbJ( znhiVXc4uFWbAY1xXWz)RHoP%vZC1)tcb4?mTi!T*%($Cq&(ON?~HOF(j< zJ}gjT(^Ovj3l{dtKHD{X9D#X(mtjjsKiknrymqIr-8*(o_8d4kp6jfzalkj*wf!p7 zC%RMVQGh!c0OMdcqhsSZQ7@7k`g12;z_ zFjKs+xk_z1l)Xe((%%x$Br4D!f|?C*%-?=?o4%TVvo!<>fzG5sM|^TY83|u>9ngMC zT4P0$X9YX+=bfRpt;8RP{XRJNsy?c6=&BO zivg(XT090aLtKdr@MRcJP253n77vjr$-h>X%1lhMbLWoUZ=qT=hUihP;xL;cTR|_3>WNy*3wtMRyw#0P7OYlp+xC z#?8yaV3k#~B7bOAPPmdKIaPpqAR(ZdPnL zBac2k{f>z?Y}hoE^_g1Dw95&oJ0lC5(a!1M=nW;%xG9Yg(Qdo%j<_6z`|=P0vBe1kEY2ZNZf14OMZ zi{W`j?I5n!5qQ2Vuw3Nj?EKk!K(3nn^oQ?Ee)sWbDTlIiGi1Fm3Ifx|d!`I1y|)~i zffuI2XlcOi&N277xzmlb2lq$xVKE6m#{7~T9uYqR5(9B4WQzJcCu~HUdLKx2zbL1D z6=~hr-oJ5lfBoopRn}TLSo4;#xodTi>oS>#BJ6py8yFYQCf|AMtvuy2CfBT(+zJ>w zbM9OMYk7O9|7C|Y#1FtAz^kv@F(tt2=xayEfk2}#acd!~d3HlaL#JRb%oyXa&c={R z_e6$e?(6*zsL;Adgqaa3AmEhcnVC(k{-Vjbb0>S|iWqp;=o`19zz{m41gwipiU$OF z&cXU3!vFHm{?9UAF10=@CnsvLV*Swqfc(YFvg@ut4A`vae(OmQbI=%_TQ_b`8I+wK zpnmqu=>%%vLtCKD1RnbO`0)YUz5CsFbMm+MnGUk8UC(>J`gKNnhtb?DgPcRye!XZ8 z49Y>%8E;w;D7QgT48x59J)R}_bT0!9p9zFL4v;3u2}-4?s#;bjptY6w>@$HRTjMZ_ z06ov?8EabWO&va}{Lq1$>r>3%d;9g_3=xn7AVKX~M&;V>OQ~*3UNE9ISJm@>lYQvvZ|tn~{IVD0($qUUv;ukc^}gP%rwkLxddsrh77LM}>^p5(K`hzAGghO(2n&l|H+S3w9bva=87 z)Rb+W3?b7PRo62P_U_&>B*WE>k6X2HjEGfW_j1cTj9HTLwAa zc-0$y1UA}-;MF1b{O#rQo*Q2e0JU)$$Cex${bA2toSOz7qWeVw>-Kf%EOH4iWKaX? z$6>9>Qo0V$`J1_s$=cT#8(y8w=0U^P10V1UJ#jYKz$rUfhRMYau03+(P!&zJ`6-zC z=_emGz9|51-OZh@l$nD+jKzAsdie}vc?5A7m;Hf(-3qs1ewdD*%ospe_Zo(Jf3n^U zw7q*30Gwt|S*Sif_Gkp(*dnbLSeee)-@zuE4H-pg9kK~h#Z(yGu%>YC9u+G-p-3>Tl$lwHl6I!iJq>trnz579|^|phMT?s5H#(>8rK#j9%7u z)0Wv0{2jr+bnvERoFoUy59o^XIQFjV$angALBS2O#{C5V^cr@+FPqOHgfi0wfhD1+ zP~fZsiRf4WlDXkIAPb$04%oav1_PDR?(jx*2y~?Ttch%OZB72;zx#{Hjhyo>0UQE9 zO}PxS+9C9;;jHvoZA~tgp^UE30CtSdhZFn_+y+zJ; zXfIl4`%iwmo~*#rqhwnGMDj^+ksopAb|7EZu?yL9mjnK_)Nr^l8dI$ysLsh{GYAY? zN3!3ZPHC>aiH6Ld-SV(Rkgtz@KI{cNf?uuEv;w4-NZlFTtSr-6D>hY#M-CqtRh$AQ zH*OS=u!dcmqU#yaNY`7N6$LKQD(6c^@c^IQznAQ2i8MeCpL$=cAJ; zj|uze4{A+(@nhn`!5jEZNk6ec*Wfvh^;chiJvn*$^xzRb23`8B@u25*>Ev4lPJugk z_g=DrBYO2(i6O1!>VRbc-?jUfdrtOuG(F@O`O*afXh4vD7_`tW$6i`fGU8GC+qp!8 z*Si0%y$9084<~c={$3J)qQA@6t`AT^UHlGytj7*RKt|x+Z^R z`C5rSJ$nuNxhlWyyUVJqTA92FJe7!XxBH@>+5RS*5^xS|lWh*5n;!58@XFo|w3!05 zIp8-6o_0E}xmXmE_k-X5T}fi6Cx_nt(d6J;+ex_oR%OwRvUJmTY^2l2epfL4=;Xin zqklO0-~QMC*^-Rc)AXsNt#$dfH}hTChGaMX0K9$p$=M-~v?Y5SAHAyL;QzhS7XbCC zT*Qt*o}M26`2kJ73ea4>cCCz2?JfdIqeIUCmXVrdE4uWz%h94BgwL>@t-+(@lY8N3 z{LLP=4`dS%3lGZbbLhHEKEM!A))`aImqHPM!KUf2CwILd9F&lrO^+2;z|DFwTtIE%re`F4(oa5q=gM1N2RMu zDeIy-cTu>8zzAJ!ZwGb~ec*XuwjeQnzY@5G=62VYUS)tbGb0HhI~(0D&(9;9w0Z^b z(T(ibr+xNW@>D|9x_I2-g4)}%l?7FeX=}F8px1cOy8FWW>^@cB0xkoXY`uq0ZKqUo z8N>5c$vo@O`f1_i{i{hn?R%H+OjH^*$>z99YM09r~=fEe&H>Qu5#OWTX4s ztu@fI0C>rN10-#Z_K|KSE64k-nRV7C^3L5Wlf!SlU1x)YRXlntd2>H|@#2MZfzut* z*n8+)g+IN&I2mJp_D996^PQf?JM43MK$}tHvku-9P+ZmLvj#?csp}R)94>>%AytgMI+j6YNv}Q zv>TNKrzOR^1r)9o1iKivB2cBBj4`@SBB2-kKY(s}FZ!pR_|4?3&yx9GL)QdSYc0Ad zSt7ypisT~COm;qeAX%ykhx9MLMdup}FzPTcfZA@newAQ&_DTD7FS|@)>9EP#MgL$A zc#wP|)1EZW$Bk=Ut!!L3fOhgAouD1ZNCq&%n=eaf++A{>Lr(Jp^F!?Zco^L!+o5LTtXuhjM~)!m#VnhA#GZ@0$gWSyPycBg$hKYuQJd&f?j^rQ4$wniA`jmuRd zM5pZgMe&;XN>o!BGm=URHdse?fU(gpWV&?+@@qqe;-1H&&$FS}LUadtPS)y^zbamH zohnjt*_zFCb{9S*Q|S|D6L>9I5FXInANpJ0(d3dQ24A~Z5XgB0f}eZ#A53;SC+7KN zXWa*8)~}8~RAVp7rrDS+a4Ws|Vyp#uBapRbt~2s()j?qMjGotJc{OP#vQq*hLny@D zA)U&_b|2V3dHdV%qzoYOPh!Z+MMldG$>?q@ZQ#0LSOcX1#G863hYyH0oTXtnPKAqX zH9dB5Dx$H$GN}(AK0J)B;rPS|YZ3@wpE^DH>XXkWn`2mtX(-b`n77`1eFzhZkI+>L zPvGHoiePo1?)?V|))d!{q6DHv3`z{RK7+qiODF}fW?QWYa3D-52Q6C$fYZ1L{nosb zoi8=6B9blO%Svu-nv(Q;_ot=*Hj_h*LuLcQ2tLk=ECCskB6~JY6!^oBJ{-zzV?y!t z*+7RBtVkK+1?mkDHYK}Z#fAv&Nr1_05ur6@s9Y=abS_70OY>w{)b6Fh$>f~rnMy2W ziF2GBGI%GDN5tu23KmGidDYjGL*|f1pdn={Qf40*86xMLK|N+E0ZPK!E@~eMG{TV) zPe>~xl|^G7moLsw{_1akRYbW`uLQUC;n*-Do_6116ty?Xe`GI=9`w-&0Db3Cp%^kpe`C7(I=LM`>QHErnBjaV_$oitlE*E^#%gq5YoFjoG zeeeOqcgynFRc74hUz8Qlo`}9uR1|_fcMc62*=^lNRC)WhUE}W@-v@UC)!SdROlhvj zAfoKh>%#tiFCbX9(np_vl9IkUoVLOLm7!9+T^dgV1D00O_(^=SX7a=Des{8O|Na5M zo44}C;aoMZ)hYeedZtBLsk}j>C(q315TESu-?xfr)>p5LCOyd+Yv_paB3tISAAQw0 z%Xll00<`_f4}MrV)7IYqB*Fe*WGlUhC(hTdLyz)pf$!(e&$WML^yZ)gCG>oyk+g0Q z5P9QU-yYc!tD{GX-TT1lO;5`xPoZqc!2i(?e-t=>e)44pla5vs@%P}%Q5hoB$QY-k zL87M)ly#ngK6Zk2cOS|w&mrr~1kT9yoF}|OsWZM-6}UOLw>^ocRZDOzWy$ETE^<5i zb7iz>+hcraiV|i>DK(cdygs=KWG7Dm*(wAC+#aRe$fski@2zO;hu{6y$jDw_VC{Lb z)xMD>ZOxGXvb=+HN9h;+qNn%(xfwxy=)k^x@yoOLMYdbcRN&8&bdh_chGRxz?8Ri? z?n({|;@CfW;8FmLgP;ER@Bd+d`t`|)(4piz8E@NzrTQhfe4Zk@!@{GeqnMT3-_VsNUdoazcPKu9HE1 zy9`nWl^38`A5@tvm39ri!f)iTVwMxXdadhYi#r@)~t z_F-h!H%~^|>A7>A({X=fXAjV;_tC}_Bm;&MUpe{dbk8H-1q?_-j#O%97*=1NI?+D# ztma7n0KhnCk>xPH48otRuS^F>G+Tg^;Aa_4WHaFD7jk)=~>*6&=vvIqRN*MZX ztkFYR%Q*lU-SmXN51O<;u)qF||C>YCxG!D-<_!?NYerj_3^pA(I*j$+z$+tb zrT5L>{m3CbuH7DwvVF`!5M+7Ire)RcPZGrI2an|JJnT$`tK;BKHW#Ns5Sjc2Ognf< zpK`X2RC@Q&9`DTAlLNFcR>r%QBjnWj^t5tg3X$!{QIf^%nfm7nY8f-xwK)eKum~g+ z2zUir2ud5X^_Z68)($K_`tWyOjc2ODTM+NB?EAuG!;0^{>_#mBo+NhwaY~MX zS+iSr1r9p%BRe8MUCA?^3Bj4F_Y9Xcs<=}A3O zzpgz8UjR@;E)6-+fE>n42ct=qp(_h60D9O|M-Ltt&p9(+Kj>tL1b}hSb@LcbVxvJj z4jE;$>EA1Op^Xp6)tawPM#^+YKc0cNJdaEqEhw6g&kiRz{mvGbv@%+*WSeZQm4Pa& zd)YkOx9|Lh2dk`zrZDOH&P@SmOQ7%qR$j@6lZB|_);^8w(`evMYi*6tAYNdz54tAU z(ivO2Kl#6YHlHkd`}C7fva!!&nD{9t>oA>{Y94xPSB6ClrNtz7+yO$vg_dfec5%{ zE7!Hu<4Y8{G?nIRtv3bKJq+AlwzPAj;>q1dN){*s9B4~EENjht?&*VC`J~&)i4iO= z)9vm8=fD(vTEDS*m$e-5wIW?CFoW*bHEw+NxP9g4uE`E%OaAzcZ%>YY+F|6^+k^Ju zWwylKz>j5JJJaEwssh=L)@Nq6?8<$+C!hTKFDHNgfBY|#y?ghMWC~SoQ#~7@wc4M5 z`q7{-@~~ZqCi{QF7A+~`mnEk{b^u~ybEZKlvH(~nfaPVI$MrR6d*8_J zXjj{vtvh#(`42fCeF=Wt${rOUIdt@mTI&>WiC=)}>=z)cbzt*q19PQ}WI;ngEIhV! zX%z=$N6Kzrw*g&DTan`ZwSkE%pH&5uuEM((TUFemt{aI5?G8R+oAC!;#%F96Ht~l1 zFJSoH_EBPtdtI+TJHFVx|3LF15#ynDpB!FV#-AXy#2S1fQP1A0HZTW1z=HN%1@L@x z0E*E`J-T4(hJs2vM<%`YF#@l^bQLRb5lJ@o*dDPh(SY_#Uw-yM$t;%!#sctHCB7y8 zLMSWwls)@S7{OGhs$zK>V0<=kz65~{;WW*O&xRgkc$zoA{mR8!QeK-+-lm7!w~_pn zd=PwF8;#&+>x53VR(f$iI*-02d@X3N7-{SABpY`^Jy`aLDU34ctT+w|Zk0Gb+caI#wY5YEjBmQ+z<*Fr}sTFQz z?FE-V`k*#V-EVdF1fA?KUSk*m=k`JSsnIvR{qvpeB|SR>nF||L0*3AmBe?;iB%>H`BWpSN5646+Y{?PmueXZ8{=hHu zY4}{$WW30wp$q$T@?SN?vpd;&Y`xavTA0R_OXo)=$b5Fn_H8=`9qq2CIQ{V50v`@v zy&2#vSa_@8?e+Xo@`tTX;%oI|J|iiLOloxS97!td0sbJ_0-IWy4!v}_jBS2ud$2nF zq7{#fa+LPw+=Et3K7Q|41>!dj4392kpQ&uvnk~Sd zRLOt2mS=2~XX)l)2ecRTGr6&1Wo@roPZ)s!`TFQ+?p){FT)#SSBtdzBz%^lBEBhNC z$sDcW^S)ZmA#d%$l-CO|w~p49?-oKUIc)Rx&)>bB zn(Pn2rhL&3!N|baUSBl*!)=0tAOQ>;Q|JIE04=44Aqrx|m0Yh$3#-htlV`gZBb%_( zhNv=$ zA+WmVlavo4pkU4GZj2|((Z=0Q$lX45ca+jN*_u-`z_6I<+&GM-(uT5JW$^>v*QTHd z;8=Y`O|*f*$OsT%^B}76@HRPIoegoS4^X59DjlN_|xjlp@LNb5L!vk3F`MOTaVOj~bUxmM(f5cQnCdjVor z1{zc7_H~pzN1QTPWv(z@Iik;=W(>9`FIq#!JU3jXbbQsmr(jYzU!6D|op!*<7S3ce zp2D;SDknsw2sW7sj9PP?3)mC9*_^Qou-u)~$ALFMJ-76gWY8$fx?7g{++0~W35O5* zn%%f=e2rdCOFQ(W@%+u-{#|?^yR;1d3+F~Z|JfW}0NUk*A7|y2A)==oMS%zO6`n@d zrvlg^ZW}kwM5AXXO9S_TE&?wLpQfa$vM_f%H=?Vpl{uho29dpudM7tKft*21d0b1V zF*qCq=-dJHxHgBC^59G@x)uFLAAqraTlcke@lXaVW%AxHe$in_WyY0li&h6%)H7u; ztz1*-jEKLomLakj(isb_F^Atlt&|%>UOO1PyE@~h!9Sn zIn$VyBzsa|30ixv_qfAx1)(;_r*m_ak*CBQ3ViQQB}J{v-d$xs7j0&o>fipLF)qol zxODZ}fF7y zW^yW!oDuWUN57lg&sqE@|HJF=I0JWUHAS{l zEXFd;yp8K}^tE0gQ_agE#B|xMcwFF(Y#>JkqS~5_m3VD=nPoknelZ@hX$q_)ANn20 z;S~0N_aFW;XKQo8`ST+%Z2SN}**1(qt!Gqm_&j4AIMCt`nMRHrKRsBH47q1A()*Xx9r|nVZ0o`K90qkin-~2x zg^^*t@Tw_^ru*`o%AEGzu)l2S{k>?B;SL`{%-? z8_ARxWzL>&&04!?Pak!eC1m#K(H>0-W==C8j*IyMn#ISZzT#2&xR12G;G^ zR;Exg=Bs00XKR)`63@y!l$|A@52*HB@}h+ra?JXo6D>0IA;#ybIt0x)<{oaPZ5!H^ zu)ra{nL{pkYitpfISR{W5p@hP^l(n(RY;lD|Io<Kii>_R-wc6yRLxt;S5ASWNoLmo6}Ujzs6oNPPt9^IG|fD(wg zBwJ8&g4YOw^nFotr+;mr!1_QrFiY{Z>a)f&&W zk_j$dxzT6J93%sy$ze;!L+BKpyAH5)FOX|_JpC}gLV#R$sx~^t?Rn@&R=E8ifO*eC zz@s8SkTQ9~CR$Xr3!90&1FX%TKby=i%fDyx=hhD3k=>DvQ_w{UL^Q@owy$1!LyMeJboFQX<_ltn1{l#;1m9_7~gXi%5_+0BS z1=g$jJ5eP-cEh2!-yJ}emJloROKwNo)(1GCP02J^8<3Ljdyt-85zlI?fXB3Ep_hjI z%AWFilx_e_kuyu^-G5G&2R3&GO1NUl zG1j~_MxqG;bl_#3p0&SZzWovScc9+ofJ-1EzmA-?me$Zewy&4<#|zrSS1V;Py0!HU@xreq5b=#jj9M**X7Au_qcOCps?#kd!6L$d|6-FJh?9XnhNvgDOcBaLZ$<93oI;4KzxSw@r^FMEl{9--09g@1Nj9P#^|DUa{jV%9= z?K*UEvWIT$TfyjPX4tr`E#BxqNdp%G_Fz3b;+dPb!g(Z{#4Gkf+mybI^>jGnmT)X< zqW|{E<6lhY(EO+JJRhueyO|yqz_kZ@#_!p?Z^);qoG56VZMPtK>38%_CaZ`Q1YwKy z!=O(=C+$OK!=MDWhyH224pY?cosVu^7j8;chFcB$CqCj!;%Tk_9C-TL(KlPeXf(PV z;BvI53Y8Bl(P`c4Y0@J82gb_U#oNykadd$N7+Jhgl(kk32|&Jc&%w3X3a!V20*Wd? zB~z{nqo5|)*wb?kv?@(Eq?6FtSgYpRN2ao;v68Dg6wna~xG1+|$YdR|M~6P$6vmpZ zDl2_+mGop6xizxVyN95GKotAIJ+v|+)AK5;JXznl8m9yKH@9c`YRLyxzT}YJ-E3k3 zZoxM`qPBwM(v!zP+;Eick7lNP*%l>Z>=(aKL{TkyjVe$lxL1@5AKvX)#P zl_agPt`_4QsDkJ4Dsx%NZtFg^zl*R-c9-`JspClN#>i59FeTM)6~ef|cUqYniinf2%)oTti$4J97tuS?!uRYmmX*5zJ4D16WvE?P+GWI#_#CRA<7kG?N( z7w+@o#i_~S+kvb`GKi@`J<9o8l_CH3Ye!4@-!lL!4kh75AQ1i!yV=>(CnEMvOg(U@ zp?k5C4 zt>l}L%$NXPVTe5ebFzf=M7>#7js;@SX+Lsq?sWZQFOMwPty}8f6wxSI)%OOY>g%?e zg390*t$q-ovc9Ze+42~A-@XHZZ?%RANOC%^Jz)4g3|vIyo=<2f6PcY^JxZkU#D>7> z^#PB!TA#!~ia-z8C1tU1Z=Tx{a<%)YBt};5XC{rjl>~i2B1+Fw3bG4gg6m>9N8+$vzT{{pU+0E;>hu{W4V@#3AHu1E6Nt2JO@CLKIFhQ5wr&1DXLNVvQto5sxt?6`cEh@M1&DW2P zXrMiK9-*TJ5!j#o=sT5^tegDy(=QWxj|L#7tp_F}46dfw^%Q?uZz^+lw?@{_(PPZ6 zoal$&`+H?rzccyE|L|8M$U-3!j%!!eTA}CNjb1Uk6LFR2Xu-f(5V5tmBEWa*!LF3T zh7JHB`h3#Y-RR-w^($rmUQUo~nY{bUf_&KI=cES;X4tMsgrp#<}(vo|KM9e#82=38%7 zNLh=Ez_7C?&tz=nD7Q~r>W%JTtb6ezvmE#gw3SK3@EK)U3Bt|aD+{@-^e;}H${5k# zwC88wb3Cjeg9eSn7L98PfZ5yI_ijw1?9stPM$v-=e8ASD*5F}%1;6_Gi=O?cKEiui z_uC^FBA}qB=&pE`b9tpUHJ^X}aZdV;$O zsTXB6dc_6^Kh5dT#$o^7z424VQ}>Y_yoiEoZ?0G7vU+U>8^9sQKn9*n28J41yqs~x zxc$?A_|wVJH{Q(YEf~;TO5?MJI*lZ->z>y+Tmo!e`purc1K4a>$H z)j;EQ1E80gR(4Za*nr73wUz>U&Q_wcPMOXKUQNq!9nQKt8Tc3f`rqVpSIU&&e;TOv ztgKCk0^?yBx#((hl_2{M>>pr?9{Zfi1!Z3^OgW;FN72s!yW`pObMwiJ!1nfY!{+uq z1IodDs$yi^%ghmc0Ph+|1&)LCW>u0$o^IJz21>vm1-G(i0Z$nfF9OQZ_P!j3 z8`03=!>>ilW$h$u9%XdNEWUO9@&F}O2{??A!>N50Fj$Ki;Fl6^>;9-UdHyVVYTc%e zJemw7YYZrQ&%Q$iXI7L|(E&Dnc4UAqoGMNVZ{Oad$*!wqmYjMtf&Z&hW%F*|zr&?Hv* z5&9=pM<|(B_IqJ|ZWs^=x-lLGC3!4RaldEYo4=C$-ah$zKmKvSrdpeP^!en?citL% z>u{h`=2pMS=NYD7eEHerZ<9Y43ZU&fcp$o(W*W6L(OdmSE3+i)7$Ew-m-ab1aNyuD zR${Y$UpO+;(WXH5&70GT0{XkLAHX49L8s99&uinxqgmW|hwkaR zlxw}Y`=N2;Rr~j{Ii5IkdUCukpe$oQt{Fz!lMx)8x|V=(hH4`i*q4BuzNA+RFzD0w z{CN(4;{dW)LwhVE#JD(OGFs5CYJ_D{o~V za{zUY?&|c$Y%Q?>RGf1MHm=CdmHiBme={KR*yo>&v0Vr-lsT!){BPg?c{*oRwA@&e z^|AnlKzYA|tdo89d_B1VHd;dAv3>iG1Waye9MddjpvnF8rnVOBD0=MC!}cX7ZateI zy8&$j-O-MNRmDC@r%RCGPdJ-_{RVtSS8N3VMhOMg=}$iYw6Qg(zAZ;J*!1(U$r@quvM}`_k6q{i~Pak=-_$)LDi)KkI^YW zYMN+K@BOMDNCD;L=gyxRCG8IHmPqg@+Mcgs0dFsCJ@1!cs2adSHkIgLeo4w`Uxg)k z)!rnRm*tyn*t~r*TcW`7e1@HS54Uz&Tm)t}Cshm!7U1!EJ6}2*4d&ZkI5zpsfBv&! zccamT*{WpwfkTJ8hwCH(ar%=n(bSc*Ur#>$-Csv%wopZqd<%EoM; zwR#&euo1qJztb})0J_f8($}{niss_)_G=1`_FM0vmEIc&$S!@ zE($(enLk_NN|nz&Lsm37vo!F0&(>W7$URnD2z)pyN4qCXgm0(i4;xl1ItRU?6Y^jt z9r)lTfDTO=YxLJWx9r$E*}m_{p!uf(Tc^INzkWQmws9X{| z@daxOxI~L+$oV+P9nZa}8V^`{JspLPwX(Vv0HT6Hrar5Q?$<+{Er}MjQ*l7)zP$&s zucvFpZq^Est;u$fMUD^9wDwx;*hTU1gKDDu{;JjJh52)X=Cnj|9@5lKPliS}w~_&4 zzVQ$HZezMx1}sqY?RUOAc!_=Q%sYo53d~zGRWty4!3#8Yzx&MAGk@vwwXI19ao()u z%>WhKzia}@3KDI!anTwE#<1n!?jaARty1}ao1)8lL^l@t176_}a;+d2G%xLa(LTFN zU}NaAb{!oFlnG+!C!ZpSm#U`jKGy0XS!5sL`vtM6fCc_XGi>&!RSA$mqhg@v&?QrQ zUO=p*xW{C!ijXx0MAnAORrQuVCOD&QG<`en&o>Xp5bPTL&B;=lXLML9OK5dDR!Ir8Sa;g-{2-w0B*PV{jax(*~&ceU1Z3jiPekHA=Z?q#@-e)uY)1ymj;JMa=) zABZpUYW13pgBS6c@r}TV!S)mWm0ixa@)o%>{H)g8f%D|vy#hJ*$N3C&Du3l=gtezi z^ED+^!i3-%QDXroRS#is$#-_=Of8SmGCLdZz$(&u9KnQVWUxHKI=F9NJuqJzW%eF~ z;gkn_{@G_y@AHv;cQv7am<}F3GTJr_rP>5+*myO$K|H+|HK6(?l7SF&o;-bOD7Kw} zafH;vZ-5;_0t1>kCVJQ*W7n@0;m?=`W+<~|6>5yi3OE_2LTEPl#vU_d zFvzmF6Og#Jh2Oh-@8rPV`UW+A%rYOyV(gUNuAQ4(mzjvOgWt;ba{BtJ`py32C*K=c z7_!N42F?I^pFS-EDM28jw=iP$vU)wVM`m=SBpCrm4!l`e#`dw9XHS*BcvCUQnW*&4n2Rs>$ZEZ8E-IUd)3k_e*1)*4`>@8ZTl($g`a{o{n0 z-_i(_pkYv8{@0EkiMcL~QsB9{PN^?Dddrkh8DEW68Cd2(UkR7) z`Rr+I;Q8$b#mn z)L8cM-Fx+7VtDrXTTg4r(%xbUVvmAEOY>JNhpNAR1DkBkAsyK-Efm1v>yxJ^AAk1o zFs>O@GdXjkEV=32Y&?`?+9`!pS=i;Pql)3gxw6X>6w{u4t!cDv?XRbdjOFV68yzBd zJmVt;*IIMV98SrILA#=)_H=qU(UQrVN55b8;Dzo-5tN-dKOZfpke*iBo6?zPEb3FA zp)%IAY^$f5w%z1j|}?k-!HJ?*uT;jkW|bpe|m>Aju$j9`CHm zaoV%16V(%FqVOk<9ZS&4(0DZY)vte*Q0vg1XvDs4%CJ!~4oC&$$joC{uE{}`$wshG z+jg|?5d)`QwgTa<1Xmx&X%^FTtooi&OrRHZ;95Zwk-jSl>)T}l{Nq3R{mJir|2rcq zdr|Ag35a=zGM&jNCcB29?)hXHP-7b1t_NBm10f3IlM$e&8XnnQcJa=wl@b@6ICuJN zO0oX2vkCNqKgM_e-s8#N{_;1?yL&drgS9|dl#KE`S$u@9vjaE?r#lQwyApKx$!DMU z`>8S$t|o8e37|*wx?ZNYa|cA@_12X6VO?Z+Ux_B|$&R<)7=r&^&foF{Wg<2X^yHN5 zlgGcTMNef>{SG9nzA&09Xw&rs+xhvk&3jtPr<{zlxFJQnCIvhjZ*fX)W<=dDc*TIC zJY@LaX-{P+%gEclb6fk_3GT_5YduSwrYmJP?CTtgQO*}pjwmjJDgGLVgSFpn-{2+X zq1$)v9Li_$k}}+LKvgRU7CsK-WBjZQ><3I;%$ZaTM2?Zi3=FdOVnD#j)~|^s*Hf0Q zorv{c|LreFpZ}&mnQcvFFLEelvvanm*)z49$w08iWFuM>u)I|dS^d|~R8_#K9T{-y zKYFG$+>qhh(^~HiF|s zKVCp{GSCHu+;jA)Ozyk4Aj;YE=LSt{ogg6P%#RlzE}ZP$zi0C1o397B2d+JXC!DM83^_Ymf#pEle|S56TB(0BjEvy? zD2-H_b}m3xxjv(N&CdNJOGp-%_Ea1kL8UWi&vY%>+`hKhOO}<*(ZSV2P9;m8Cc`;n z3o>pQ!W;~aBm-0iuOO2>(~`%yUevybJ|^pKT(9Ij5lKcfxL(#mWq5?%?8S&iU*z`a zFWvt8=g))Z+xvki_>MNpT4Gev?E=PvM1tND0UG4!Njbpl?qTn4i z1z?a~Z2XM{CE1AKKKN>(m$f;e!7^Q2+`t>LG?-uP{wDj_utTxxU(A?Tj>tbE; zD>>y_bK?|D*V6^fxo6r(HUgUP_mKpEmD4Mp<=H?fjwa`2VfU1|WX;g`V)CQgIRF$b zS!cGGvAAD1&t?D=vC&k@M2r6Q_bFUYPLxT!P*(gDB=@`lT*QBz(BUkZbT7KHp-=DM z4-VHhU*j=P&fxgn=erMH7|ubLaL#&cI!XV-oR;{3U2QB|3x<(7r%s(3*>bb}t_+!O zQ~?Aqb-nCxI+~ne{{pH{oG9Sm9uD9%XG101`LiebT=J%MHy`>&Rm+li>g%t+80Ws= zJHXv^?OTKBeymwTAA#A%MJLOu0}z>)o~s-vS)c$>cGbo*9oMDT1PX4{M_ESC$Vl&c zl}!%TV|!`g2ORj$5C33t;nY{rTe@shnYJanNcdP^maMVJ5WSi$JUz6xio-PFRNBb8p%#E{yv_ZIwxl@Dxhi#qAV}Gb9lms(C z$7l?l4_@i#h3vKVOV3s^BNe;-mLR zYd)>G@Grf{mhl2`D7RJx!NylzhyPSnc#(PL=0G}?!lzFhpM3VkXIzrjc8euBpC$)=kuC|KKivrQUUd=&3R+e5ar&tqT8vVA3g z%zX5;jqiu^9c=@Zfra>NbJ^}P1wa1e^TCs+I-loBp9S2p$Jxl*?(92Q)@5r&AC0P( z1`a&TWIhuB06+jqL_t(MS0G{4?DoLM?8E4W?W{5l&9V8&_0=^J61X%DELuo%XBh&E`a2lEp1ab0r^LK}Y$Qt4Bb-$&-H3;fq6ssM_`O5`bvHo3HHfEEEE*|!hMZnuZ)TPt#$eWl7+ zc7CF%eJEJceaIE-MgQ{Ghkw{heKMVlsF3cjIsJ0p5}wb?#YVHt#^JxzA%84J+piT z+oq(QfLAirINgBk)>4q&27nzI*xiTSF}1M-9;#+(E%gJ(bHF;7fy~ZZ$+Iir7VH%~ z&G&K6#D?^N^hZfpk{9h6dT^cAC;Z}dvm5&kAkFVi&!Gd?;sc2{16Yo}MxsGHv#Ovc z9O!9laPiFX;TQ2|_-^}Nn`u4MImslgo$TM&Uwk~d-h2B-^-QYrWkcOwOJ@K3TnC(~!|@GA|Vjbm{aJ=a8@T{F8j?0DkRRtn2pl z)2QO=gQFc+`;LzQctBcq+pYjML8H6HQH< zEDBlx?Ho}Qe^|B-yD0&ZRVuKnrK_UQ1TBZF_h`s(x6JK+5r-)G2X zd>l>jLnR22m0C3|@A->YRN;L;e=oVj9$eBsZA*qZuSjKrYI8bXQXZScp70x1`@L&b zh3SOA1{6r)=M7uCy@KaGdseV9p2o}L?4AZfmKLfU<9r-FKVQPG3e-;jjCP;-Qxen! z(9qrEJ5|XxXDzNghpZ*T`C!I^-&84h&vW>zyY?Pz()n=7EP>l`E?)CzuRS7v$h*ek zOPe6_>Twl2_T4j_nL>X*Np~)NI^}a`-wnXGb+U$ZhL*B?ak|bI9v-^0ad?p*fN>R6 zUL^GJ8Tvl`T$OYM+yt!6k-Wu=&Y-w@=i=mpDzz@x5=}c330^8QMxsvRLoa0b2zn+j zx0MiN{$%Tds@FrDqk{BEeONO7jaHgcrB9zVK1k0H;;G3FeP89%hX0%9yEn5m7(O!ncnf@b8*qdx@iM2lWG@VWlbVc(}X;N2<+N`LcT3WDLWr2hl z^tqt~M;Hp{G4!zW|2X?wNsk+rcCw+balv6 zi^0jCtMp3=qQf3-!1jP>EmKwpwCvtXu|WJ&ylwhaEr0X9jUKNCPoP?$OBn+q4SNH} zckbNTz#fgV%pq_SAhVTr6Ur81bHwm}|LcFBaI5SlqNZ5&C||q0!wrjc*=&cE4WqP4 zAVJ0yP{jdXH!~)HRx29kV8AxdN#S}76Q;7A^&$tH*~|eXL=eNVOOUwXlg5X*7$PeJ za1i5@vfnV~!6RGyOcBzA1_6rLwv-_x!zVL*l=3XdSa=fC-EaN}_8%ByK9nE@P|G^` zcYpPF36PkqO+r`#0YlMoa+?nah{D>ie(mJVH{Ph##Eo&7Bmy~n`0(Ux40Pps21SB^ z!H-KB+{RAv6G}?x5om*IKGTvDK$P-s&W1g^+4_xMsC@>pVWJH=2Kt(=Ey80fl`Aqv zL&D1) zh||;vp7yUEeI{Es?(XS8qEc5{ktuUO4`fEvjCvq$Lr$qY4#2C7a)5-B0doGJ9>1R- zJ3S)nTXv)f5VdODT(g;Jyy$T#%h###Tu;)q&5pemab-Z5?NMC!Ue0WmO;qwfW zs|BT+Lj09+9AD@|DEm;h+|GcxZQFKrHbDK>%1m9<`%1Jo6_o=9I&tKTt z*#UDSI3)v{Qao{Hz8?1%6Tr(R`*-asc=*L=+jOI-^lb)a2Q3~sw12XqN)cIFctn(( zA%EdQQQDLj!$+Gbfer`w;{OL_LRDCo?7BJ>+5kYzwG})5<(JL9XlvuO2^8VwYnLWl zQ!4kmuQnw!8)qi_c5SZ(!<8~m>sxw%!%#x9h|mqek19?LA+1BSRAH45Op$ zkJsKfJc1urF4xO6M;^Gny8Ah>^~CYx$%feh96D^5VYRM3!QT#X##dRY$%y0`10zL; zR@SFeROmP?lUx$OS=rp_g_Ri}DvV@W=wr-Kx_wLgkRtZ2H~s{a>#6rwzx+jWNip{R z*5vn&Z8MVtd$!eo{qf|z4?eDH<4RHYy^|mQR+`^ zcLV=_5kJqzZ;vMr;sGrK9>yz5iOEW>5S+*kc(M;IYtJh3oeuis;`xz@BjS%%0Dvza zsBV~6)aZ5WFl4geUh)Nyr601-$`08Wa76}@V_J0id2x=)`as!(&FxIVBYjpE);o~1 zqxyprJk?yyk1LgICl~S7qemU4-Tkex-f^65&fWbo2UK)1?0Z=I3ZU(A(K-5`61zF& zurTWB?$!d}iqvG84F|9JjaSzI=&k4NoV4p1mK=Y30H`MqEtGkW%<=v{>QbgBx>S5E|W|`by4ZguVii=y|Os_U?LX zvM1j9e70|v-)G1#a*@Fehz9Tu zBfGt@S!A!nY?Y5&Fa_by0~Gl=^Bokr|-H)?>|d6x0>CH^V#*$8awAn zya!0|{+C!eJ+&Kn>)fhEi!z@$E~{6~jQg{7@e=2iQvrY(+0ngk4)!L(rfAn*8kE10duYoj=_+kbM&IW2KHy?WpV^!);=J1{pGULC$R@!A4#fh(yv*@q z2R>`v9@e`XNT{*{Sb)X=8Z!M_l2Mw^4>At+Bp#YSUtit!=TV@M12Q)TA_F+t!$3>P z2=p*t2=HzV(94UQ6u!WM_)0)mzxS`c{9w+K=mgoCB5PY-?=vw??eC7fSL%;aw zi2$^_lZE{Q(Agh+=L{eFHno?hegu02y#qkTWcokfOi;|+M({Zr?8osr0WZ%M9OqQ? zAC8|q5&f>u{{jpIW|jzmRy}j`<{hnhL8@q9nLFoNMd{AK)Mo_<*O!G#-yrMBly_(t=CBI2z0k@0_N*xly}!6<3)35OjqX1nrj@}_Z*q5zu(`|3#Y#R zbl9O}+oo)6$t9Q0pPu~oZ}W3EY?^G^`P$?s2alA|7tnX_Vf&|@q8`j;W;fR*$tSYL z8l$_!`LLA+sN6a&$kuPAh?CYUyT3IVuYQ&#Y!R(_x9BxKG}hAZ4SBprV+?uPH-T4o z9P%RHi_D;l*h1#L7Fg1I*YZcvvQLwx0=4dM%>93i73csMcW+}ERmIUN`W>xS1UsTj z1Hun@+*tVH_n&LiQW8w}_6!Hv8Jpm_IUYZKs-Fw)7u@)RfArsFyS`R7=Bgo21mS?K z3AxD^pL7U%I+TC9bN9XhegS4tl;jPGOuKgPAA!2Lb0?F#D$&Y%&Zm*3K6FX6@3pz4 z25a`X@v(KychGZk3{c~sO8~tLcswKHn2a_lRScn((RNSnegXmLoDHWv;glj*K0t1A{Be8N^alM)a-jJV2ibSYHh@*|4sbfq{Cm((T8cKuPjL#Sp%K zSu!R%Bl7|Ku3fmGdj?ECdU$8}8#`(RhQ7!(NmC<9FM?G$1|UZXz+V-ueAh+sAN{P& zAzh=b+2i;I0LceYJz!nPEDJ?W<0<>KvA+{@<2lg--3ZvVZm!kt>eT_=DvCqNvk_KJ zKK|%~0mADr&;-VqZniIEj5bqz%p2L!4q{blysUj931B~he+#3xO|P66n^mhejIp!v&?Rh#9AYo%rH(%(AE5!&0zhozrw=4m#Ub&2_(pb+ zOmQFdH-XcUJkUN2IUaq)5Up$3yxoJns?`t74xs#^d*C&}8P`e@+Z;wI&~+)S#kxqa zn`WYyiC?FI>4B};tF5)O0q6nAM#k!ePG7d}BVgV*1x1}(f=+d%fIYS7{anyLSu<>w z_MCix-=O)y&oTH2@Fq0JLF%>FG**>JkJ-xU`TJE>+xv02ZgYkIkx9m@4Ha8cpbu6D zM|u2cegpt?^tgX7doX!3ed08*wqyIQZ03Rmjb++-qiMy*>@`00z!|gO>3hj4TGk3q zu)WxQ7xEj)AbzRyV^m~m$s_ywUM+XnsO*_;8ZS=+;^e=j!%N%3k+ossM{f#0B3nj_ znD~LsXD@NP!SrtSOMmJHy>0gaIU;R8GAqROb9tQ+IEAK8;J$MAsRAGXmM>&@=nJhJV_F`o_cBE<%W0ZbF?E6gL-!&= z2dTV>k(?T|5D9FUyQaE0hW+NLehv(EiI>Tp!b7M47Bj zd;luSMK70`ek;LFNir@*RJ-5-a7~+x)92>e8_~AfL-gEZ#~0OJVMlvWG%LDffXM9j zdR7EV81p!aaQOP|EM#D3t+<$npc^*hk>i!m0@bWS(_Tg1Y~u{8P}Z>-Iy2 z!ZZ`F-}?;Wa2T#OK+_&xz`)9Z6ST@>C<0XKP%#oZM7Z&%4;#h@nzgNtn+z+1H2#$qM-%U>T9|pnbFDf^~PFV zpzNnVe!F1BPg3*&;KSg|sVgJTdNOF09RY~QCS$mh(A6W7^CweoS+YR;mSN0BXB5Fu zn(_C!DncGK$K3^1{@|xSnk+19+(E?*B0-{)XGBofjI$RUbokL1Uq??D6T%7m#SV+e zn26q#5ZSxi(XGsCy!d>1bTjEdpMZbsX1g3F5Ih~Im z#-lR@{Q5kaADOK!Jb=Kt5a^r3s+|f#+Y(r-`r>Yi72vzP-k-~>!r8TB=h!FJCFXGR z>#v5)*0yBd-W`*@JGKnxJoq8H2^dSMtX^HDxqT%A-gy0u##7(^0B1Dv*=L_jUVr^a z%BY@SDQ|L7}J+aJjhyXixn1qv(g@{(SPr(bq=x3$SJFx~aY~4}m4X zCSJTN$P-{VPvq z1mb^uHXA(%=w2yM!C{vP;<{Idp{Wm&RM<1S60^ix~yphJhWZd%p?@Ub7598JpU10AA3* zg6Q_XLph&w4g<-v2?mJT4hVz+Rd3vy_VQy0x@KfB+tbExTrK?2ZGG$d?vt@Dle1d{ z_9$VD&RT091PJ-uaH@OuL(Wcbm=+`SS;6h6eScXtxNH~aj|@k@&jDEV)EsSyvP;et z7c>DBEE==IJ~T3)*G)4QIG3edak@X%DA{s-VsXlP|vd zBA{*kiPnOhgM)_~usWB<)$yHm5Zey#CvB>M~-#GHJQH=Hld z3P*pG##Z9${$nj#7h_OPPUeoDkm*g&q7UthK$^YxdFzW$0B0Wjtg}oQw!mk`{L}P= z`D=^!;4YwOkU`GFn$CIH8Ut4;!szW9d&7AiHfYahZ+H$mur_QKP6ECFe&M|rffvpa zaLvkv>5fo!t*5M!3^IDy`U1iMlmkTUe)f3;7+Yr`3^_8^9_>Ug4ph_YR94jP4(G*> z_itVsZLBzTYu0a!{$yblVC)J2G7xC#qotFJwdmt0uTI_#Cp5MEL3X*Zvqu2SktT|kx=W_h#oDc_0$EHIT%53Cpu!A^7)<}8y08V?RY$l+$ z3O~GU{e0Hv7Pcmn}`+4Ejq(Ip4%#^yswBnDS?K z9*4|(+PsYtXC_Vm20dYj=6x@{^4UkfZC|I(FMA7!#R=qq4_(?^$+m_Tj|CR5+f)Ki zAX)y%Z$J99Kw-9f`V0-ATlNINQ?iO+IOop4x$|d+eTQBIG`DW6)nq`d^=GS~Cuii0 zQR2Ji#(%QjXwvguWYY;UXpIA820VE0{MlTQuHo&KJ8QNyg%P^zg#8}a%>t)WZO)ozv zApJbs3sAssbLb`-9pDL>63;J*j#u}!p=6MR){r%-qlPSqmzM=*fA!(d%V6#No;|Mx zjHeR=8~8rs5VeQ3s|!SxdCqq|bLPzC?b;x`@pe7RqumED?bqL{mD`=kzy0_BynXHv z^IA**F!+IjHY%}zQuI4{fnL~*PZ~2)Cs)wEc2`YrNN{qy#hZThnenbUxd1WsCo&ej znlD)i1OnW$hVUev?WI!GW%`T*O~>3<@(=r5`{g;~zI_Lz0uMa`fJlA-pV%w{U3c39 z&l&PP-k~>T7YY)12<@Yd6Ce`U1<)o31)YIHGVR$Y+xG75vH3w=KN~2gb<2Yr7bhRR z_t)V9+2ZXxI%79T40ybtWRv_c?WA5lb>=~5XjSOZcn!W9yE6W}HtK;FR3Phh*K{`LoHD5{8nIGLv`jJv8*M z1M{@Dm32?I4?b;gWtcVQ(0U}O$Em_0!c)_1v}tOO;19uP+$)8V)Riu5qrXYcSs z-CNZ)03O!yFl-5*(A_(??{0}YFT$F&Ui6i(OX+|1#FkC@d+E~MVM#|0zdouZ&V{LM z?D?Jtqxvd4a{io_G!C^bGdbT5hJx2_->Kp@;QHFNYlFWogq6U=)>RE-e9j?b->+UX zTL+0ZCbJTn(seK6OR^09LT}@%LH~_)cJ1b2Z%NM5#%0(9eO>|;Ta+Gsm~I<^*JO!6 z)~Gz{+7a-LFUYKMCJT_dpV|DXbAjQow2@@d=byD!qXlu#=kxfSziTaN9*L51J(?ny zmSt}(kH4HTLRSe++)S3M_8@=w@YWmskpZg;I$Wb?evaPus;pJByi6anDfaB$U!~xe(OcE!(ao;fq@w53r%qgzgR+l;wd zKR5tmbvIZZfv?D+8K700?SKoy#UXmz8;}V7h57SiFaR~gvOEwLFmyh^iBrA*;GqF( za8L$7-U7(>+?1inSs=I^0`$B^&yp{jb3Ub2wnLF7LJ63{=-$0?UnXtNeBdQ;z#%)i zTI}2|t7rG71XS2jf7GFY8jYYckCIb)81^cAGUsoW;j%hgHdth$m^r$Aeut_FB6uRG9Zs1KUQ=& zCqGB*&4W7=svA3W?$gPGHA^P19Xc{O8AyA*y|~pE0zaJK(v$VU15T>!qbZ;Qq+4ZX;vc`S= zrmVpXo-&CUxyk}>WIPBu>?ry|c&{&*A~RDjVi~il4K4@lC=ohw@>JKRINDFWjsMv{ z|MN0d1KqgQ1u8hZvjJv|NLkFXy8*pUuXoC_ z2q8nuJkX}#hpbslbT!)f;Nt+m_S^xbgD%@g3VCsRvmz$D8HhEUlLR9MLkASWm&Z;v z-|iFBcFO?{FKwJN_01$SfD*ET^nshMqn0HJvxLOuYtzF5h7;X5hH=^&Q%1uWY^;wH zBA$WabRe=$m!3*V=4w*slTTjgKIY7Vzcr_0t0`wO0T@p*7`WN+}O!n2X49 zn$FIjO_&B^#&n#r*_q9OjyI#9XJuU1W@w|npo>$Y)BaaW1Id9<*3BvAvIF<--DjId zdGu`8-OP#B!){~Q^{%^+9MXp4VuskpdgI+LBS%EuAb_V7;9Slo+4BAGe}B*xa5>S_ zn(5cPH3eN(e;FB{XVC3TW}=1p%DivgeHdt5kLeWiy}R{gs|3<%(r3<|9j#=Z6~)t1 z#Kv|>H1#auU0aY0{6QxvN^PY6$N%O}t1|fQ#%h z@9r(lAxAKN_`&zyo&3j}KQHUzR`(DvTQk~iIJ8l_B0c9QWCx7=n}7W;=(zA{; zHrj7Y@x^h^0Y;@L@#FiyeLwnleg!$%9<>I5-T?wPH@buT;_TkJ9WcFNPqM6vr3eF` z4kg{Z??$`EEov?xef`GGWd7!|KIW@ZI5+yFJ$^I|E(uI*+dMn@-gn>4IsVS5QuyV2 z??vB5)mu{;fDEs@88tvX;2m8iLub)q2hb={lhGNt6m7o>1Ttnkf79kS(nT5H?Xfbm zhdD@`gyBXf@90RpFIXoyJ^-VhM?V9J97HI?gdUYKrZgXY`TfB?Ao|oHkV%AoAKwlf zkG4i;arYl5#V41{0pQQkY9VtZa@t!r>rXBjkC*#02nTqIE0UEnwLaQbw#!_7{OCg0 zaxNI_cuN}$#ty@=-^TsOee>er3OsS5l-sRpj)J}_94=N`%BgS<`$cvRP|!fSm$4Ww zK*Jbip#Zp^>2=<71_4I&?(s67ALk&9Ik;z^5Qy%&M~%b0Wj{W={kTlQGG&t)yV8>l zI{uhR-p|&OZhHk(H)|VqHGNuDTKf1#2J)#YI<8&o9FMUcizXj_^ilGm0Dhnxjbq$& z*%ZWR9<3_}WlNbI0`^~Je+az0o~_o@rmwN&Q(J-)NLqPoI64`r10*fbwJ5$EPIu2? zNV|L-+S=Lz{f2SP0Z(o}tL@5@DrD$J_u+7pgNrNopT4ri+AHf~-3>-xRN(&N^gsQG z7t!nf&hyw6uiwh9xOwABcICCnkG}t-;S>pMn+xZF9isvd@B%cS^6PXzoGGA1)8d)t zhSr-W+3GU_Zh$g+RfPh_d$fXU?}h_p$gP8aQ_#G?RL+{uB_$9NPvhpIW0g+f$9-NA+wUxvw`4fknFmB{bD-x>TqUcf3X{# zS7MVmgQ^hNQM-2MV3pZvjDQmBFtTIf6Pe;u@I>&U1HXGV+C?(}OhL94@uxkVg0{4e z>jMCz&C!Oiy}TapUFwjwmZtR!Pz}7WcbCgVl7K+|NvhzW=zXgN!>jXE*b&1PYRnQ{ zoEgBrpDC~>AmGL3;d}{b-49gMAJh5*lK=-=TX1yFmYuGhz}OY*&>Ew4$t2`c1k{|& zwN?+&Z( z;NhDzribzOt%5-K2%ux{^b)?2jileIj466X#f^l55u8mYsJ>gBi~>>sr|&dg_g$4u z^Tx{8N0QUk_-W8wdi{D?(+=>qjvE4V3cnA#=KT3{!@dJR0X7%47VHb-AWz8yKkMC2 z=c;~c*u$3PW&x1%J;>5Tz>rzZda^7)0w|_3?&dXM=elgp0Qh(rV0G@R-{kLo(fa1N zM~{5Qd)G@W%I1jxhM&Y9@%qC*`6t6Kx>4rrvSr%q+>8ec61Er3y5}p0cfL#@W9zhJ z{nB(pywjNJB(@UTP!U2NO&0|B_Ocn#j`yF`(^gOf&y%@RS{>`mnLs(~;bfKD?97zu*Cx zoNj2oeQ*RJ=!M~*z3E*qKgbn`3tE@s`ne{$*?CaK~Gn)dX&mI42 za=J|A-TM!h;MSQjRggSRR?cWIryRd$I-}>}*;B1Fd0$WEXl8dl-vGlJhq2HnBS4X! zTh!e7JPu%975^>Cz7qU@lI~g9*ql+p7LW)83}ov|n$rS~E;o1jo}5%k=9$`Z^s&JM z_8UL1S+7-AYcm)j{oLB9Hr>>DXZ$ezecycZyMr$Tmb|Rb+I4Kf0QSa%&-ln2vc35T zR|`s>IQB{7FF=qT&d)y7;jt<&j(zdrur+NX9kMx$2ADMt+-%GaMZ8hw>FPc^crRV` zG`XeS2$_33zt}vc)j82w@ zF6eqNIeM=)WnbpwEuZX)zP4`NG3G#jNTi|zM$3-$VJtD8C6kuj@q?fJNp{qxDml*7 z3ill1&gQ<9t`TIeiZNMku?17vmvp>Li0kq zo}Z4I7LMc-p3VZ``BgMm({+ z)|UbeXvp=F*A^|(-baGr(<%^-wU4VN2M)iUk6rKg_K*#!f?CgX_j{Q>mJsB;I&?0O zqN0mDd!Pyezx723vX9obwukoaE+|oJ*mzfUN5dQI;0D&u?_?TXl=d3y5a|!uJU$cc z#O}lIAy3iUs@2(nCCmx*;!U=zm$qNK_wE@qK6!aRIe#f0-7@)qdAhGAyRS9B?;AM- zjhsOQk^sYDI-CPICx1U{6VoI}bobu>z1Fjy_7|A#+m9{)F;?VZVqdq*dK<4^xI zeJ}|-JyRU?tjg#4_^HAJD;Rqqf6gF~I(-KO#|)-$tcy3ytT*1u{f@VvO|dJ#M~C6N zRp2C9bWajGnz4QT*6!k))|?J#LnSdAqtvq$;IKb*-Nj3@eWnqziaZo=wBO|T3V$ei zlQT&eMPCX6CS8V1S3G-K92igZA@OsC(Op&z(|VT9C9C92#XjF`y2n-%J<*-dh=z(2 zV3j(qmg37|SA}|X;D+dZ;rit<7K#4bl}*Lhbe*o5h3&VOPtIq{bWIp(yZFm5znH8( zb)tZ>U^wHgjc`@3GRsePMHQaV1|U6ChTX-TK#g@d0T-)Y`R>~zqw-Exlrygt;6{f9 z)Vg~8S~Gt4PL`$2fOamhBvd<7Dx<9;BDi?$N&wm*T#LO}OZ#8`>XT88r)|i^eVp@s zyLV3BdHc=DH{X3XIdS^ja7^BL|J~6($eHAj;zCA5d#9OlFI#LK`fzf3je`&%kpRLs zgqpB+XpbHgIHZ^z27xrQMuoJ)W_&jX7Ml~MV(Ze}seqqPZBMYDI`d2d`OGrRCY0$fW@bbfX3_Tlht?!ZoEz`rfi;^I9SyiEmLcLKI$ zsI{u7%=LL|a?CK&gT~K8sWncu2too?b?7UP`60)TU zlzEr3otW(8$rAzaTY7z2M!`%<-!pFqUOEf(*jCjzP)(-pQqCF&?c48;2O4!rOM$-( z_?4LP(pA8B+2|d+MtpdXE_{d5Gu@*KCPy~h|6 zpxd*u<_wq$1$5Jqz?mCBeEehHC1rKyYyfu(yAO$%w1A`_6M9ZR6+&BT%Nab@8Yx4L zA|?9LP#6a^z?7al0<@G20aryu8JVqb4)(QPXMWif5GzxCJK7~Pes6{rqi6o!sS-?6 zXS)xHuK`6vAv*Nd1gKorIZ{|3y!+uue4ILSs&l&A=RF)_lpX}+Ud;fs+`d$-rpS8F z)~aL#e6}V)p@i=~$XU%fUKR~?2ne*y-MX0UkIS)ohA|KT#JRb9wft5|6)3f0dB8w? zjHUwwHD-qr<5WZ-yk{T4ij7s>Zjr6ec{q1=cCu~5s>wh2J3lXJ^!iYMDnB@`%UX}X zQt|;0k?Fqe`zqX?EtniTajtPON5%+@QyG(=DK|56WnhjmEGk9be*0~#Gvxl@ zp*P~4ZeK1t4;0+qjQiEAH%~r0d^9=~AZL&lB+pl_bt!9As+?o8zqUYlLsinM#)0y5 z(w&SGLn|4m${mO#akytkt((a+c~}_lY-*g!Kl#W1NzUl4$wwc5GL*iN6k6>B>MPR8 zfV9uPI9!%KgIE=v`wYfg>%F}{k4vJR?>!HbDb*)S3$8Pi z^JV4JGm>8RL5HpiXrC#$crT-(mD`9E=W>4b+;G6?z3n@8jFAr~PoFMOxs;Q;H=YlC zi}x8Xi3f&<st!WoZ>>^hFZgehL&YTfl~Z=-|OalVAPf z-;9fPM=)ILn*<8OdVcoOe|~ znav2ml6g9b{Sy!juPsW@0|F6&`v6Z`FHlZp0vk)maN@PYt0*51aI585yGMXb0+JS` z&z{v5X&ugw&elcYA76mfmyX%C0-jw}bA5B{y8!~evTOI`{rBFR{L6py=Tm2^*N#A# zdtA&)fg%M;_J*g7Wf>J1Y*0SIi}V8802Y@f|1Sz0*&&w}d%*xd(qo)Uf44S&?2YbU z+}ix#T3@6?hdjqu!{%n6Vu_xYPnYgl*zc9jcx^kaQcnk3!G9fhY&kpVsl!L>qnsUn z7H|UW=qTq5kd}y&XxVq*P#rc~Cg)0)UA!jdm zl|0i5(f&I2s6vRZYrOzIekB0P-271h(C07eZkg9B3>b4`^A~6GW$?nSfNTjEpCeg& zGyfQ^T;j~JHv&$uhL`Qdm%mGz^S#XbbRi+%q1xNQ?<(PSD!SVPAbIeO9~Ai98dc^l zBceCPquQfjfSQ&G(Z0)q$mg5Z`M*cQK~*jzmK5Ho`Dg={@-b9p-F-PyxlEG@VN@Ue+CQ%)Yow|BY}$DJ56b^Opxx(7B? zwcH0!R;_QoWp=0!4Ro({9pB zCyrDhnoUn1>0VQ)L0_o2icJSk(iM0{;zE~)%N5Pcrvr^l;?K`zQ|=`PX7>UO_~~@W zgXD~!Q86Vsc&kM2wQE=6yKGHoBfJ!R&IGKg%9YI2k;ezKx5dfqi?%%OONvHh^Fay9 z`vGhGaL*ZCi|vu@-pL2yhpIf4)OQQX)?yTUqZ93g?!d1q$M{$0PL;fG{%2ZzoQ*lF zf=B0lv*6rCRqn-DnBH^I_}R0klZQq>r2jn=AFv58=d%JPeEBSXio~kdvE#smoqG?A z_sr*yD@b6w2hiS{1nYPPy%#4lqi7(Zo9gFj;7g_k@5OH|v2MMoKH+ELUHTJE*cLVi z@N~OGw6RHa;*w%jx<|L~!8hI?;GAF>NMlyM>Q>JuaH?(}Jub*E7I2xWnfRk3KHYI& zu~EDTBtuJaLJzLvDv+pmyFg6Lx0qgTtq~#lzS8G*@nL6p{`85-E3dyd`N4;OIQbX< z+y5Hx)Tx?1*Y1C^(M6j#t(&~{_PbS$znJ{$&;R%0zCDvY`wrD@v8-y{Gx1gPxATY1 zHD3H^dpr&^m^w;w#cF1^RLhw zxZ*s~&_2b|l1@6emvx5I@mtdhQQ+gFhipgNPqq|`;T0WK5@P%ivES5&i^19FVxIU? z@=MH39@&j42tV~Te6Bh6)~c8Se~&n!^P+pum<yng7Ham)(XqA*=PpgY4X`z8q1yVfCi1_d(JN^7^;e(t z`bHP*+c^r}$ga&QX2E0l$T&9T#--Z8@E&is{% z$?E8j?^O=3Z;yOhz5`vx-aYE>7TsF(ye?#2`bMklsk`}cfB)ilzZ!`|@&ix8gM;VD zLeHT)_)*q`!SLbfQ^zY9?b6?L{L+oZFWE|QYlInkSYqSGeBBE@&-`kjt&x@ruiW)P z&ZZp((=IK%u)Rb?3(p>0E#5RAn~ymeAf@q0#@ISvy4qf5vh^EE&fz7O^=>QPat{SVqG{1U&+rD0-z&IZpY5PlyHkibVr;u^u^-*YkM#~ zLe;gH)7e-Hf9HOc%Eq0pEB2xP#MJ1hm|_$*bPz@iDN=lzOr`|Jv(t|GWC81~c|QN^ zelo(wE$#Ud)_~FB<0o@gqYLCg{w!f*%e>#e*5NNclYj6uJd6&YH&%8=d~-U<_x$$t z8sYw>rs2Y=z$ z7b|tErZYqry?$A~sl?cm3V*F7xS`jFW2B>vL^c&-Ngav`NyL?_v(L+xY-~-@qBA3# zE|XVGLH2d^D};VroZep=!Pi|C=PUfZ36p#=dFP$?#niy?G7UMi<~}a(VGc|X;Z}5q zdOK#CUeKVbrRu8`JYe)x0@_YTkk28%o&n-$yKqz6-J1bVT7y-RJihs)mXX~


    + + > **Note**: `CredentialSpec.File` and `CredentialSpec.Registry` are mutually exclusive. + Registry: + type: "string" + description: | + Load credential spec from this value in the Windows registry. The specified registry value must be + located in: + + `HKLM\SOFTWARE\Microsoft\Windows NT\CurrentVersion\Virtualization\Containers\CredentialSpecs` + +


    + + + > **Note**: `CredentialSpec.File` and `CredentialSpec.Registry` are mutually exclusive. + SELinuxContext: + type: "object" + description: "SELinux labels of the container" + properties: + Disable: + type: "boolean" + description: "Disable SELinux" + User: + type: "string" + description: "SELinux user label" + Role: + type: "string" + description: "SELinux role label" + Type: + type: "string" + description: "SELinux type label" + Level: + type: "string" + description: "SELinux level label" + TTY: + description: "Whether a pseudo-TTY should be allocated." + type: "boolean" + OpenStdin: + description: "Open `stdin`" + type: "boolean" + ReadOnly: + description: "Mount the container's root filesystem as read only." + type: "boolean" + Mounts: + description: "Specification for mounts to be added to containers created as part of the service." + type: "array" + items: + $ref: "#/definitions/Mount" + StopSignal: + description: "Signal to stop the container." + type: "string" + StopGracePeriod: + description: "Amount of time to wait for the container to terminate before forcefully killing it." + type: "integer" + format: "int64" + HealthCheck: + $ref: "#/definitions/HealthConfig" + Hosts: + type: "array" + description: | + A list of hostnames/IP mappings to add to the container's `/etc/hosts` file. + The format of extra hosts on swarmkit is specified in: + http://man7.org/linux/man-pages/man5/hosts.5.html + IP_address canonical_hostname [aliases...] + items: + type: "string" + DNSConfig: + description: "Specification for DNS related configurations in resolver configuration file (`resolv.conf`)." + type: "object" + properties: + Nameservers: + description: "The IP addresses of the name servers." + type: "array" + items: + type: "string" + Search: + description: "A search list for host-name lookup." + type: "array" + items: + type: "string" + Options: + description: "A list of internal resolver variables to be modified (e.g., `debug`, `ndots:3`, etc.)." + type: "array" + items: + type: "string" + Secrets: + description: "Secrets contains references to zero or more secrets that will be exposed to the service." + type: "array" + items: + type: "object" + properties: + File: + description: "File represents a specific target that is backed by a file." + type: "object" + properties: + Name: + description: "Name represents the final filename in the filesystem." + type: "string" + UID: + description: "UID represents the file UID." + type: "string" + GID: + description: "GID represents the file GID." + type: "string" + Mode: + description: "Mode represents the FileMode of the file." + type: "integer" + format: "uint32" + SecretID: + description: "SecretID represents the ID of the specific secret that we're referencing." + type: "string" + SecretName: + description: | + SecretName is the name of the secret that this references, but this is just provided for + lookup/display purposes. The secret in the reference will be identified by its ID. + type: "string" + Configs: + description: "Configs contains references to zero or more configs that will be exposed to the service." + type: "array" + items: + type: "object" + properties: + File: + description: "File represents a specific target that is backed by a file." + type: "object" + properties: + Name: + description: "Name represents the final filename in the filesystem." + type: "string" + UID: + description: "UID represents the file UID." + type: "string" + GID: + description: "GID represents the file GID." + type: "string" + Mode: + description: "Mode represents the FileMode of the file." + type: "integer" + format: "uint32" + ConfigID: + description: "ConfigID represents the ID of the specific config that we're referencing." + type: "string" + ConfigName: + description: | + ConfigName is the name of the config that this references, but this is just provided for + lookup/display purposes. The config in the reference will be identified by its ID. + type: "string" + + Resources: + description: "Resource requirements which apply to each individual container created as part of the service." + type: "object" + properties: + Limits: + description: "Define resources limits." + $ref: "#/definitions/ResourceObject" + Reservation: + description: "Define resources reservation." + $ref: "#/definitions/ResourceObject" + RestartPolicy: + description: "Specification for the restart policy which applies to containers created as part of this service." + type: "object" + properties: + Condition: + description: "Condition for restart." + type: "string" + enum: + - "none" + - "on-failure" + - "any" + Delay: + description: "Delay between restart attempts." + type: "integer" + format: "int64" + MaxAttempts: + description: "Maximum attempts to restart a given container before giving up (default value is 0, which is ignored)." + type: "integer" + format: "int64" + default: 0 + Window: + description: "Windows is the time window used to evaluate the restart policy (default value is 0, which is unbounded)." + type: "integer" + format: "int64" + default: 0 + Placement: + type: "object" + properties: + Constraints: + description: "An array of constraints." + type: "array" + items: + type: "string" + Preferences: + description: "Preferences provide a way to make the scheduler aware of factors such as topology. They are provided in order from highest to lowest precedence." + type: "array" + items: + type: "object" + properties: + Spread: + type: "object" + properties: + SpreadDescriptor: + description: "label descriptor, such as engine.labels.az" + type: "string" + Platforms: + description: "An array of supported platforms." + type: "array" + items: + type: "object" + properties: + Architecture: + type: "string" + OS: + type: "string" + ForceUpdate: + description: "A counter that triggers an update even if no relevant parameters have been changed." + type: "integer" + Runtime: + description: "Runtime is the type of runtime specified for the task executor." + type: "string" + Networks: + type: "array" + items: + type: "object" + properties: + Target: + type: "string" + Aliases: + type: "array" + items: + type: "string" + LogDriver: + description: "Specifies the log driver to use for tasks created from this spec. If not present, the default one for the swarm will be used, finally falling back to the engine default if not specified." + type: "object" + properties: + Name: + type: "string" + Options: + type: "object" + additionalProperties: + type: "string" + TaskState: + type: "string" + enum: + - "new" + - "allocated" + - "pending" + - "assigned" + - "accepted" + - "preparing" + - "ready" + - "starting" + - "running" + - "complete" + - "shutdown" + - "failed" + - "rejected" + Task: + type: "object" + properties: + ID: + description: "The ID of the task." + type: "string" + Version: + $ref: "#/definitions/ObjectVersion" + CreatedAt: + type: "string" + format: "dateTime" + UpdatedAt: + type: "string" + format: "dateTime" + Name: + description: "Name of the task." + type: "string" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + Spec: + $ref: "#/definitions/TaskSpec" + ServiceID: + description: "The ID of the service this task is part of." + type: "string" + Slot: + type: "integer" + NodeID: + description: "The ID of the node that this task is on." + type: "string" + AssignedGenericResources: + $ref: "#/definitions/GenericResources" + Status: + type: "object" + properties: + Timestamp: + type: "string" + format: "dateTime" + State: + $ref: "#/definitions/TaskState" + Message: + type: "string" + Err: + type: "string" + ContainerStatus: + type: "object" + properties: + ContainerID: + type: "string" + PID: + type: "integer" + ExitCode: + type: "integer" + DesiredState: + $ref: "#/definitions/TaskState" + example: + ID: "0kzzo1i0y4jz6027t0k7aezc7" + Version: + Index: 71 + CreatedAt: "2016-06-07T21:07:31.171892745Z" + UpdatedAt: "2016-06-07T21:07:31.376370513Z" + Spec: + ContainerSpec: + Image: "redis" + Resources: + Limits: {} + Reservations: {} + RestartPolicy: + Condition: "any" + MaxAttempts: 0 + Placement: {} + ServiceID: "9mnpnzenvg8p8tdbtq4wvbkcz" + Slot: 1 + NodeID: "60gvrl6tm78dmak4yl7srz94v" + Status: + Timestamp: "2016-06-07T21:07:31.290032978Z" + State: "running" + Message: "started" + ContainerStatus: + ContainerID: "e5d62702a1b48d01c3e02ca1e0212a250801fa8d67caca0b6f35919ebc12f035" + PID: 677 + DesiredState: "running" + NetworksAttachments: + - Network: + ID: "4qvuz4ko70xaltuqbt8956gd1" + Version: + Index: 18 + CreatedAt: "2016-06-07T20:31:11.912919752Z" + UpdatedAt: "2016-06-07T21:07:29.955277358Z" + Spec: + Name: "ingress" + Labels: + com.docker.swarm.internal: "true" + DriverConfiguration: {} + IPAMOptions: + Driver: {} + Configs: + - Subnet: "10.255.0.0/16" + Gateway: "10.255.0.1" + DriverState: + Name: "overlay" + Options: + com.docker.network.driver.overlay.vxlanid_list: "256" + IPAMOptions: + Driver: + Name: "default" + Configs: + - Subnet: "10.255.0.0/16" + Gateway: "10.255.0.1" + Addresses: + - "10.255.0.10/16" + AssignedGenericResources: + - DiscreteResourceSpec: + Kind: "SSD" + Value: 3 + - NamedResourceSpec: + Kind: "GPU" + Value: "UUID1" + - NamedResourceSpec: + Kind: "GPU" + Value: "UUID2" + ServiceSpec: + description: "User modifiable configuration for a service." + properties: + Name: + description: "Name of the service." + type: "string" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + TaskTemplate: + $ref: "#/definitions/TaskSpec" + Mode: + description: "Scheduling mode for the service." + type: "object" + properties: + Replicated: + type: "object" + properties: + Replicas: + type: "integer" + format: "int64" + Global: + type: "object" + UpdateConfig: + description: "Specification for the update strategy of the service." + type: "object" + properties: + Parallelism: + description: "Maximum number of tasks to be updated in one iteration (0 means unlimited parallelism)." + type: "integer" + format: "int64" + Delay: + description: "Amount of time between updates, in nanoseconds." + type: "integer" + format: "int64" + FailureAction: + description: "Action to take if an updated task fails to run, or stops running during the update." + type: "string" + enum: + - "continue" + - "pause" + - "rollback" + Monitor: + description: "Amount of time to monitor each updated task for failures, in nanoseconds." + type: "integer" + format: "int64" + MaxFailureRatio: + description: "The fraction of tasks that may fail during an update before the failure action is invoked, specified as a floating point number between 0 and 1." + type: "number" + default: 0 + Order: + description: "The order of operations when rolling out an updated task. Either the old task is shut down before the new task is started, or the new task is started before the old task is shut down." + type: "string" + enum: + - "stop-first" + - "start-first" + RollbackConfig: + description: "Specification for the rollback strategy of the service." + type: "object" + properties: + Parallelism: + description: "Maximum number of tasks to be rolled back in one iteration (0 means unlimited parallelism)." + type: "integer" + format: "int64" + Delay: + description: "Amount of time between rollback iterations, in nanoseconds." + type: "integer" + format: "int64" + FailureAction: + description: "Action to take if an rolled back task fails to run, or stops running during the rollback." + type: "string" + enum: + - "continue" + - "pause" + Monitor: + description: "Amount of time to monitor each rolled back task for failures, in nanoseconds." + type: "integer" + format: "int64" + MaxFailureRatio: + description: "The fraction of tasks that may fail during a rollback before the failure action is invoked, specified as a floating point number between 0 and 1." + type: "number" + default: 0 + Order: + description: "The order of operations when rolling back a task. Either the old task is shut down before the new task is started, or the new task is started before the old task is shut down." + type: "string" + enum: + - "stop-first" + - "start-first" + Networks: + description: "Array of network names or IDs to attach the service to." + type: "array" + items: + type: "object" + properties: + Target: + type: "string" + Aliases: + type: "array" + items: + type: "string" + EndpointSpec: + $ref: "#/definitions/EndpointSpec" + EndpointPortConfig: + type: "object" + properties: + Name: + type: "string" + Protocol: + type: "string" + enum: + - "tcp" + - "udp" + TargetPort: + description: "The port inside the container." + type: "integer" + PublishedPort: + description: "The port on the swarm hosts." + type: "integer" + EndpointSpec: + description: "Properties that can be configured to access and load balance a service." + type: "object" + properties: + Mode: + description: "The mode of resolution to use for internal load balancing + between tasks." + type: "string" + enum: + - "vip" + - "dnsrr" + default: "vip" + Ports: + description: "List of exposed ports that this service is accessible on from the outside. Ports can only be provided if `vip` resolution mode is used." + type: "array" + items: + $ref: "#/definitions/EndpointPortConfig" + Service: + type: "object" + properties: + ID: + type: "string" + Version: + $ref: "#/definitions/ObjectVersion" + CreatedAt: + type: "string" + format: "dateTime" + UpdatedAt: + type: "string" + format: "dateTime" + Spec: + $ref: "#/definitions/ServiceSpec" + Endpoint: + type: "object" + properties: + Spec: + $ref: "#/definitions/EndpointSpec" + Ports: + type: "array" + items: + $ref: "#/definitions/EndpointPortConfig" + VirtualIPs: + type: "array" + items: + type: "object" + properties: + NetworkID: + type: "string" + Addr: + type: "string" + UpdateStatus: + description: "The status of a service update." + type: "object" + properties: + State: + type: "string" + enum: + - "updating" + - "paused" + - "completed" + StartedAt: + type: "string" + format: "dateTime" + CompletedAt: + type: "string" + format: "dateTime" + Message: + type: "string" + example: + ID: "9mnpnzenvg8p8tdbtq4wvbkcz" + Version: + Index: 19 + CreatedAt: "2016-06-07T21:05:51.880065305Z" + UpdatedAt: "2016-06-07T21:07:29.962229872Z" + Spec: + Name: "hopeful_cori" + TaskTemplate: + ContainerSpec: + Image: "redis" + Resources: + Limits: {} + Reservations: {} + RestartPolicy: + Condition: "any" + MaxAttempts: 0 + Placement: {} + ForceUpdate: 0 + Mode: + Replicated: + Replicas: 1 + UpdateConfig: + Parallelism: 1 + Delay: 1000000000 + FailureAction: "pause" + Monitor: 15000000000 + MaxFailureRatio: 0.15 + RollbackConfig: + Parallelism: 1 + Delay: 1000000000 + FailureAction: "pause" + Monitor: 15000000000 + MaxFailureRatio: 0.15 + EndpointSpec: + Mode: "vip" + Ports: + - + Protocol: "tcp" + TargetPort: 6379 + PublishedPort: 30001 + Endpoint: + Spec: + Mode: "vip" + Ports: + - + Protocol: "tcp" + TargetPort: 6379 + PublishedPort: 30001 + Ports: + - + Protocol: "tcp" + TargetPort: 6379 + PublishedPort: 30001 + VirtualIPs: + - + NetworkID: "4qvuz4ko70xaltuqbt8956gd1" + Addr: "10.255.0.2/16" + - + NetworkID: "4qvuz4ko70xaltuqbt8956gd1" + Addr: "10.255.0.3/16" + ImageDeleteResponseItem: + type: "object" + properties: + Untagged: + description: "The image ID of an image that was untagged" + type: "string" + Deleted: + description: "The image ID of an image that was deleted" + type: "string" + ServiceUpdateResponse: + type: "object" + properties: + Warnings: + description: "Optional warning messages" + type: "array" + items: + type: "string" + example: + Warning: "unable to pin image doesnotexist:latest to digest: image library/doesnotexist:latest not found" + ContainerSummary: + type: "array" + items: + type: "object" + properties: + Id: + description: "The ID of this container" + type: "string" + x-go-name: "ID" + Names: + description: "The names that this container has been given" + type: "array" + items: + type: "string" + Image: + description: "The name of the image used when creating this container" + type: "string" + ImageID: + description: "The ID of the image that this container was created from" + type: "string" + Command: + description: "Command to run when starting the container" + type: "string" + Created: + description: "When the container was created" + type: "integer" + format: "int64" + Ports: + description: "The ports exposed by this container" + type: "array" + items: + $ref: "#/definitions/Port" + SizeRw: + description: "The size of files that have been created or changed by this container" + type: "integer" + format: "int64" + SizeRootFs: + description: "The total size of all the files in this container" + type: "integer" + format: "int64" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + State: + description: "The state of this container (e.g. `Exited`)" + type: "string" + Status: + description: "Additional human-readable status of this container (e.g. `Exit 0`)" + type: "string" + HostConfig: + type: "object" + properties: + NetworkMode: + type: "string" + NetworkSettings: + description: "A summary of the container's network settings" + type: "object" + properties: + Networks: + type: "object" + additionalProperties: + $ref: "#/definitions/EndpointSettings" + Mounts: + type: "array" + items: + $ref: "#/definitions/Mount" + SecretSpec: + type: "object" + properties: + Name: + description: "User-defined name of the secret." + type: "string" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + Data: + description: "Base64-url-safe-encoded secret data" + type: "array" + items: + type: "string" + Secret: + type: "object" + properties: + ID: + type: "string" + Version: + $ref: "#/definitions/ObjectVersion" + CreatedAt: + type: "string" + format: "dateTime" + UpdatedAt: + type: "string" + format: "dateTime" + Spec: + $ref: "#/definitions/SecretSpec" + ConfigSpec: + type: "object" + properties: + Name: + description: "User-defined name of the config." + type: "string" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + Data: + description: "Base64-url-safe-encoded config data" + type: "array" + items: + type: "string" + Config: + type: "object" + properties: + ID: + type: "string" + Version: + $ref: "#/definitions/ObjectVersion" + CreatedAt: + type: "string" + format: "dateTime" + UpdatedAt: + type: "string" + format: "dateTime" + Spec: + $ref: "#/definitions/ConfigSpec" + +paths: + /containers/json: + get: + summary: "List containers" + description: | + Returns a list of containers. For details on the format, see [the inspect endpoint](#operation/ContainerInspect). + + Note that it uses a different, smaller representation of a container than inspecting a single container. For example, + the list of linked containers is not propagated . + operationId: "ContainerList" + produces: + - "application/json" + parameters: + - name: "all" + in: "query" + description: "Return all containers. By default, only running containers are shown" + type: "boolean" + default: false + - name: "limit" + in: "query" + description: "Return this number of most recently created containers, including non-running ones." + type: "integer" + - name: "size" + in: "query" + description: "Return the size of container as fields `SizeRw` and `SizeRootFs`." + type: "boolean" + default: false + - name: "filters" + in: "query" + description: | + Filters to process on the container list, encoded as JSON (a `map[string][]string`). For example, `{"status": ["paused"]}` will only return paused containers. Available filters: + + - `ancestor`=(`[:]`, ``, or ``) + - `before`=(`` or ``) + - `expose`=(`[/]`|`/[]`) + - `exited=` containers with exit code of `` + - `health`=(`starting`|`healthy`|`unhealthy`|`none`) + - `id=` a container's ID + - `isolation=`(`default`|`process`|`hyperv`) (Windows daemon only) + - `is-task=`(`true`|`false`) + - `label=key` or `label="key=value"` of a container label + - `name=` a container's name + - `network`=(`` or ``) + - `publish`=(`[/]`|`/[]`) + - `since`=(`` or ``) + - `status=`(`created`|`restarting`|`running`|`removing`|`paused`|`exited`|`dead`) + - `volume`=(`` or ``) + type: "string" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/ContainerSummary" + examples: + application/json: + - Id: "8dfafdbc3a40" + Names: + - "/boring_feynman" + Image: "ubuntu:latest" + ImageID: "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82" + Command: "echo 1" + Created: 1367854155 + State: "Exited" + Status: "Exit 0" + Ports: + - PrivatePort: 2222 + PublicPort: 3333 + Type: "tcp" + Labels: + com.example.vendor: "Acme" + com.example.license: "GPL" + com.example.version: "1.0" + SizeRw: 12288 + SizeRootFs: 0 + HostConfig: + NetworkMode: "default" + NetworkSettings: + Networks: + bridge: + NetworkID: "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812" + EndpointID: "2cdc4edb1ded3631c81f57966563e5c8525b81121bb3706a9a9a3ae102711f3f" + Gateway: "172.17.0.1" + IPAddress: "172.17.0.2" + IPPrefixLen: 16 + IPv6Gateway: "" + GlobalIPv6Address: "" + GlobalIPv6PrefixLen: 0 + MacAddress: "02:42:ac:11:00:02" + Mounts: + - Name: "fac362...80535" + Source: "/data" + Destination: "/data" + Driver: "local" + Mode: "ro,Z" + RW: false + Propagation: "" + - Id: "9cd87474be90" + Names: + - "/coolName" + Image: "ubuntu:latest" + ImageID: "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82" + Command: "echo 222222" + Created: 1367854155 + State: "Exited" + Status: "Exit 0" + Ports: [] + Labels: {} + SizeRw: 12288 + SizeRootFs: 0 + HostConfig: + NetworkMode: "default" + NetworkSettings: + Networks: + bridge: + NetworkID: "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812" + EndpointID: "88eaed7b37b38c2a3f0c4bc796494fdf51b270c2d22656412a2ca5d559a64d7a" + Gateway: "172.17.0.1" + IPAddress: "172.17.0.8" + IPPrefixLen: 16 + IPv6Gateway: "" + GlobalIPv6Address: "" + GlobalIPv6PrefixLen: 0 + MacAddress: "02:42:ac:11:00:08" + Mounts: [] + - Id: "3176a2479c92" + Names: + - "/sleepy_dog" + Image: "ubuntu:latest" + ImageID: "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82" + Command: "echo 3333333333333333" + Created: 1367854154 + State: "Exited" + Status: "Exit 0" + Ports: [] + Labels: {} + SizeRw: 12288 + SizeRootFs: 0 + HostConfig: + NetworkMode: "default" + NetworkSettings: + Networks: + bridge: + NetworkID: "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812" + EndpointID: "8b27c041c30326d59cd6e6f510d4f8d1d570a228466f956edf7815508f78e30d" + Gateway: "172.17.0.1" + IPAddress: "172.17.0.6" + IPPrefixLen: 16 + IPv6Gateway: "" + GlobalIPv6Address: "" + GlobalIPv6PrefixLen: 0 + MacAddress: "02:42:ac:11:00:06" + Mounts: [] + - Id: "4cb07b47f9fb" + Names: + - "/running_cat" + Image: "ubuntu:latest" + ImageID: "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82" + Command: "echo 444444444444444444444444444444444" + Created: 1367854152 + State: "Exited" + Status: "Exit 0" + Ports: [] + Labels: {} + SizeRw: 12288 + SizeRootFs: 0 + HostConfig: + NetworkMode: "default" + NetworkSettings: + Networks: + bridge: + NetworkID: "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812" + EndpointID: "d91c7b2f0644403d7ef3095985ea0e2370325cd2332ff3a3225c4247328e66e9" + Gateway: "172.17.0.1" + IPAddress: "172.17.0.5" + IPPrefixLen: 16 + IPv6Gateway: "" + GlobalIPv6Address: "" + GlobalIPv6PrefixLen: 0 + MacAddress: "02:42:ac:11:00:05" + Mounts: [] + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Container"] + /containers/create: + post: + summary: "Create a container" + operationId: "ContainerCreate" + consumes: + - "application/json" + - "application/octet-stream" + produces: + - "application/json" + parameters: + - name: "name" + in: "query" + description: "Assign the specified name to the container. Must match `/?[a-zA-Z0-9_-]+`." + type: "string" + pattern: "/?[a-zA-Z0-9_-]+" + - name: "body" + in: "body" + description: "Container to create" + schema: + allOf: + - $ref: "#/definitions/ContainerConfig" + - type: "object" + properties: + HostConfig: + $ref: "#/definitions/HostConfig" + NetworkingConfig: + description: "This container's networking configuration." + type: "object" + properties: + EndpointsConfig: + description: "A mapping of network name to endpoint configuration for that network." + type: "object" + additionalProperties: + $ref: "#/definitions/EndpointSettings" + example: + Hostname: "" + Domainname: "" + User: "" + AttachStdin: false + AttachStdout: true + AttachStderr: true + Tty: false + OpenStdin: false + StdinOnce: false + Env: + - "FOO=bar" + - "BAZ=quux" + Cmd: + - "date" + Entrypoint: "" + Image: "ubuntu" + Labels: + com.example.vendor: "Acme" + com.example.license: "GPL" + com.example.version: "1.0" + Volumes: + /volumes/data: {} + WorkingDir: "" + NetworkDisabled: false + MacAddress: "12:34:56:78:9a:bc" + ExposedPorts: + 22/tcp: {} + StopSignal: "SIGTERM" + StopTimeout: 10 + HostConfig: + Binds: + - "/tmp:/tmp" + Links: + - "redis3:redis" + Memory: 0 + MemorySwap: 0 + MemoryReservation: 0 + KernelMemory: 0 + NanoCPUs: 500000 + CpuPercent: 80 + CpuShares: 512 + CpuPeriod: 100000 + CpuRealtimePeriod: 1000000 + CpuRealtimeRuntime: 10000 + CpuQuota: 50000 + CpusetCpus: "0,1" + CpusetMems: "0,1" + MaximumIOps: 0 + MaximumIOBps: 0 + BlkioWeight: 300 + BlkioWeightDevice: + - {} + BlkioDeviceReadBps: + - {} + BlkioDeviceReadIOps: + - {} + BlkioDeviceWriteBps: + - {} + BlkioDeviceWriteIOps: + - {} + MemorySwappiness: 60 + OomKillDisable: false + OomScoreAdj: 500 + PidMode: "" + PidsLimit: -1 + PortBindings: + 22/tcp: + - HostPort: "11022" + PublishAllPorts: false + Privileged: false + ReadonlyRootfs: false + Dns: + - "8.8.8.8" + DnsOptions: + - "" + DnsSearch: + - "" + VolumesFrom: + - "parent" + - "other:ro" + CapAdd: + - "NET_ADMIN" + CapDrop: + - "MKNOD" + GroupAdd: + - "newgroup" + RestartPolicy: + Name: "" + MaximumRetryCount: 0 + AutoRemove: true + NetworkMode: "bridge" + Devices: [] + Ulimits: + - {} + LogConfig: + Type: "json-file" + Config: {} + SecurityOpt: [] + StorageOpt: {} + CgroupParent: "" + VolumeDriver: "" + ShmSize: 67108864 + NetworkingConfig: + EndpointsConfig: + isolated_nw: + IPAMConfig: + IPv4Address: "172.20.30.33" + IPv6Address: "2001:db8:abcd::3033" + LinkLocalIPs: + - "169.254.34.68" + - "fe80::3468" + Links: + - "container_1" + - "container_2" + Aliases: + - "server_x" + - "server_y" + + required: true + responses: + 201: + description: "Container created successfully" + schema: + type: "object" + required: [Id, Warnings] + properties: + Id: + description: "The ID of the created container" + type: "string" + x-nullable: false + Warnings: + description: "Warnings encountered when creating the container" + type: "array" + x-nullable: false + items: + type: "string" + examples: + application/json: + Id: "e90e34656806" + Warnings: [] + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 406: + description: "impossible to attach" + schema: + $ref: "#/definitions/ErrorResponse" + 409: + description: "conflict" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Container"] + /containers/{id}/json: + get: + summary: "Inspect a container" + description: "Return low-level information about a container." + operationId: "ContainerInspect" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + type: "object" + properties: + Id: + description: "The ID of the container" + type: "string" + Created: + description: "The time the container was created" + type: "string" + Path: + description: "The path to the command being run" + type: "string" + Args: + description: "The arguments to the command being run" + type: "array" + items: + type: "string" + State: + description: "The state of the container." + type: "object" + properties: + Status: + description: | + The status of the container. For example, `"running"` or `"exited"`. + type: "string" + enum: ["created", "running", "paused", "restarting", "removing", "exited", "dead"] + Running: + description: | + Whether this container is running. + + Note that a running container can be _paused_. The `Running` and `Paused` + booleans are not mutually exclusive: + + When pausing a container (on Linux), the cgroups freezer is used to suspend + all processes in the container. Freezing the process requires the process to + be running. As a result, paused containers are both `Running` _and_ `Paused`. + + Use the `Status` field instead to determine if a container's state is "running". + type: "boolean" + Paused: + description: "Whether this container is paused." + type: "boolean" + Restarting: + description: "Whether this container is restarting." + type: "boolean" + OOMKilled: + description: "Whether this container has been killed because it ran out of memory." + type: "boolean" + Dead: + type: "boolean" + Pid: + description: "The process ID of this container" + type: "integer" + ExitCode: + description: "The last exit code of this container" + type: "integer" + Error: + type: "string" + StartedAt: + description: "The time when this container was last started." + type: "string" + FinishedAt: + description: "The time when this container last exited." + type: "string" + Image: + description: "The container's image" + type: "string" + ResolvConfPath: + type: "string" + HostnamePath: + type: "string" + HostsPath: + type: "string" + LogPath: + type: "string" + Node: + description: "TODO" + type: "object" + Name: + type: "string" + RestartCount: + type: "integer" + Driver: + type: "string" + MountLabel: + type: "string" + ProcessLabel: + type: "string" + AppArmorProfile: + type: "string" + ExecIDs: + type: "string" + HostConfig: + $ref: "#/definitions/HostConfig" + GraphDriver: + $ref: "#/definitions/GraphDriverData" + SizeRw: + description: "The size of files that have been created or changed by this container." + type: "integer" + format: "int64" + SizeRootFs: + description: "The total size of all the files in this container." + type: "integer" + format: "int64" + Mounts: + type: "array" + items: + $ref: "#/definitions/MountPoint" + Config: + $ref: "#/definitions/ContainerConfig" + NetworkSettings: + $ref: "#/definitions/NetworkConfig" + examples: + application/json: + AppArmorProfile: "" + Args: + - "-c" + - "exit 9" + Config: + AttachStderr: true + AttachStdin: false + AttachStdout: true + Cmd: + - "/bin/sh" + - "-c" + - "exit 9" + Domainname: "" + Env: + - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + Hostname: "ba033ac44011" + Image: "ubuntu" + Labels: + com.example.vendor: "Acme" + com.example.license: "GPL" + com.example.version: "1.0" + MacAddress: "" + NetworkDisabled: false + OpenStdin: false + StdinOnce: false + Tty: false + User: "" + Volumes: + /volumes/data: {} + WorkingDir: "" + StopSignal: "SIGTERM" + StopTimeout: 10 + Created: "2015-01-06T15:47:31.485331387Z" + Driver: "devicemapper" + HostConfig: + MaximumIOps: 0 + MaximumIOBps: 0 + BlkioWeight: 0 + BlkioWeightDevice: + - {} + BlkioDeviceReadBps: + - {} + BlkioDeviceWriteBps: + - {} + BlkioDeviceReadIOps: + - {} + BlkioDeviceWriteIOps: + - {} + ContainerIDFile: "" + CpusetCpus: "" + CpusetMems: "" + CpuPercent: 80 + CpuShares: 0 + CpuPeriod: 100000 + CpuRealtimePeriod: 1000000 + CpuRealtimeRuntime: 10000 + Devices: [] + IpcMode: "" + LxcConf: [] + Memory: 0 + MemorySwap: 0 + MemoryReservation: 0 + KernelMemory: 0 + OomKillDisable: false + OomScoreAdj: 500 + NetworkMode: "bridge" + PidMode: "" + PortBindings: {} + Privileged: false + ReadonlyRootfs: false + PublishAllPorts: false + RestartPolicy: + MaximumRetryCount: 2 + Name: "on-failure" + LogConfig: + Type: "json-file" + Sysctls: + net.ipv4.ip_forward: "1" + Ulimits: + - {} + VolumeDriver: "" + ShmSize: 67108864 + HostnamePath: "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hostname" + HostsPath: "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hosts" + LogPath: "/var/lib/docker/containers/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b-json.log" + Id: "ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39" + Image: "04c5d3b7b0656168630d3ba35d8889bd0e9caafcaeb3004d2bfbc47e7c5d35d2" + MountLabel: "" + Name: "/boring_euclid" + NetworkSettings: + Bridge: "" + SandboxID: "" + HairpinMode: false + LinkLocalIPv6Address: "" + LinkLocalIPv6PrefixLen: 0 + SandboxKey: "" + SecondaryIPAddresses: null + SecondaryIPv6Addresses: null + EndpointID: "" + Gateway: "" + GlobalIPv6Address: "" + GlobalIPv6PrefixLen: 0 + IPAddress: "" + IPPrefixLen: 0 + IPv6Gateway: "" + MacAddress: "" + Networks: + bridge: + NetworkID: "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812" + EndpointID: "7587b82f0dada3656fda26588aee72630c6fab1536d36e394b2bfbcf898c971d" + Gateway: "172.17.0.1" + IPAddress: "172.17.0.2" + IPPrefixLen: 16 + IPv6Gateway: "" + GlobalIPv6Address: "" + GlobalIPv6PrefixLen: 0 + MacAddress: "02:42:ac:12:00:02" + Path: "/bin/sh" + ProcessLabel: "" + ResolvConfPath: "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/resolv.conf" + RestartCount: 1 + State: + Error: "" + ExitCode: 9 + FinishedAt: "2015-01-06T15:47:32.080254511Z" + OOMKilled: false + Dead: false + Paused: false + Pid: 0 + Restarting: false + Running: true + StartedAt: "2015-01-06T15:47:32.072697474Z" + Status: "running" + Mounts: + - Name: "fac362...80535" + Source: "/data" + Destination: "/data" + Driver: "local" + Mode: "ro,Z" + RW: false + Propagation: "" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "size" + in: "query" + type: "boolean" + default: false + description: "Return the size of container as fields `SizeRw` and `SizeRootFs`" + tags: ["Container"] + /containers/{id}/top: + get: + summary: "List processes running inside a container" + description: "On Unix systems, this is done by running the `ps` command. This endpoint is not supported on Windows." + operationId: "ContainerTop" + responses: + 200: + description: "no error" + schema: + type: "object" + properties: + Titles: + description: "The ps column titles" + type: "array" + items: + type: "string" + Processes: + description: "Each process running in the container, where each is process is an array of values corresponding to the titles" + type: "array" + items: + type: "array" + items: + type: "string" + examples: + application/json: + Titles: + - "UID" + - "PID" + - "PPID" + - "C" + - "STIME" + - "TTY" + - "TIME" + - "CMD" + Processes: + - + - "root" + - "13642" + - "882" + - "0" + - "17:03" + - "pts/0" + - "00:00:00" + - "/bin/bash" + - + - "root" + - "13735" + - "13642" + - "0" + - "17:06" + - "pts/0" + - "00:00:00" + - "sleep 10" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "ps_args" + in: "query" + description: "The arguments to pass to `ps`. For example, `aux`" + type: "string" + default: "-ef" + tags: ["Container"] + /containers/{id}/logs: + get: + summary: "Get container logs" + description: | + Get `stdout` and `stderr` logs from a container. + + Note: This endpoint works only for containers with the `json-file` or `journald` logging driver. + operationId: "ContainerLogs" + responses: + 101: + description: "logs returned as a stream" + schema: + type: "string" + format: "binary" + 200: + description: "logs returned as a string in response body" + schema: + type: "string" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "follow" + in: "query" + description: | + Return the logs as a stream. + + This will return a `101` HTTP response with a `Connection: upgrade` header, then hijack the HTTP connection to send raw output. For more information about hijacking and the stream format, [see the documentation for the attach endpoint](#operation/ContainerAttach). + type: "boolean" + default: false + - name: "stdout" + in: "query" + description: "Return logs from `stdout`" + type: "boolean" + default: false + - name: "stderr" + in: "query" + description: "Return logs from `stderr`" + type: "boolean" + default: false + - name: "since" + in: "query" + description: "Only return logs since this time, as a UNIX timestamp" + type: "integer" + default: 0 + - name: "timestamps" + in: "query" + description: "Add timestamps to every log line" + type: "boolean" + default: false + - name: "tail" + in: "query" + description: "Only return this number of log lines from the end of the logs. Specify as an integer or `all` to output all log lines." + type: "string" + default: "all" + tags: ["Container"] + /containers/{id}/changes: + get: + summary: "Get changes on a container’s filesystem" + description: | + Returns which files in a container's filesystem have been added, deleted, + or modified. The `Kind` of modification can be one of: + + - `0`: Modified + - `1`: Added + - `2`: Deleted + operationId: "ContainerChanges" + produces: ["application/json"] + responses: + 200: + description: "The list of changes" + schema: + type: "array" + items: + type: "object" + x-go-name: "ContainerChangeResponseItem" + required: [Path, Kind] + properties: + Path: + description: "Path to file that has changed" + type: "string" + x-nullable: false + Kind: + description: "Kind of change" + type: "integer" + format: "uint8" + enum: [0, 1, 2] + x-nullable: false + examples: + application/json: + - Path: "/dev" + Kind: 0 + - Path: "/dev/kmsg" + Kind: 1 + - Path: "/test" + Kind: 1 + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + tags: ["Container"] + /containers/{id}/export: + get: + summary: "Export a container" + description: "Export the contents of a container as a tarball." + operationId: "ContainerExport" + produces: + - "application/octet-stream" + responses: + 200: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + tags: ["Container"] + /containers/{id}/stats: + get: + summary: "Get container stats based on resource usage" + description: | + This endpoint returns a live stream of a container’s resource usage + statistics. + + The `precpu_stats` is the CPU statistic of last read, which is used + for calculating the CPU usage percentage. It is not the same as the + `cpu_stats` field. + + If either `precpu_stats.online_cpus` or `cpu_stats.online_cpus` is + nil then for compatibility with older daemons the length of the + corresponding `cpu_usage.percpu_usage` array should be used. + operationId: "ContainerStats" + produces: ["application/json"] + responses: + 200: + description: "no error" + schema: + type: "object" + examples: + application/json: + read: "2015-01-08T22:57:31.547920715Z" + pids_stats: + current: 3 + networks: + eth0: + rx_bytes: 5338 + rx_dropped: 0 + rx_errors: 0 + rx_packets: 36 + tx_bytes: 648 + tx_dropped: 0 + tx_errors: 0 + tx_packets: 8 + eth5: + rx_bytes: 4641 + rx_dropped: 0 + rx_errors: 0 + rx_packets: 26 + tx_bytes: 690 + tx_dropped: 0 + tx_errors: 0 + tx_packets: 9 + memory_stats: + stats: + total_pgmajfault: 0 + cache: 0 + mapped_file: 0 + total_inactive_file: 0 + pgpgout: 414 + rss: 6537216 + total_mapped_file: 0 + writeback: 0 + unevictable: 0 + pgpgin: 477 + total_unevictable: 0 + pgmajfault: 0 + total_rss: 6537216 + total_rss_huge: 6291456 + total_writeback: 0 + total_inactive_anon: 0 + rss_huge: 6291456 + hierarchical_memory_limit: 67108864 + total_pgfault: 964 + total_active_file: 0 + active_anon: 6537216 + total_active_anon: 6537216 + total_pgpgout: 414 + total_cache: 0 + inactive_anon: 0 + active_file: 0 + pgfault: 964 + inactive_file: 0 + total_pgpgin: 477 + max_usage: 6651904 + usage: 6537216 + failcnt: 0 + limit: 67108864 + blkio_stats: {} + cpu_stats: + cpu_usage: + percpu_usage: + - 8646879 + - 24472255 + - 36438778 + - 30657443 + usage_in_usermode: 50000000 + total_usage: 100215355 + usage_in_kernelmode: 30000000 + system_cpu_usage: 739306590000000 + online_cpus: 4 + throttling_data: + periods: 0 + throttled_periods: 0 + throttled_time: 0 + precpu_stats: + cpu_usage: + percpu_usage: + - 8646879 + - 24350896 + - 36438778 + - 30657443 + usage_in_usermode: 50000000 + total_usage: 100093996 + usage_in_kernelmode: 30000000 + system_cpu_usage: 9492140000000 + online_cpus: 4 + throttling_data: + periods: 0 + throttled_periods: 0 + throttled_time: 0 + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "stream" + in: "query" + description: "Stream the output. If false, the stats will be output once and then it will disconnect." + type: "boolean" + default: true + tags: ["Container"] + /containers/{id}/resize: + post: + summary: "Resize a container TTY" + description: "Resize the TTY for a container. You must restart the container for the resize to take effect." + operationId: "ContainerResize" + consumes: + - "application/octet-stream" + produces: + - "text/plain" + responses: + 200: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "cannot resize container" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "h" + in: "query" + description: "Height of the tty session in characters" + type: "integer" + - name: "w" + in: "query" + description: "Width of the tty session in characters" + type: "integer" + tags: ["Container"] + /containers/{id}/start: + post: + summary: "Start a container" + operationId: "ContainerStart" + responses: + 204: + description: "no error" + 304: + description: "container already started" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "detachKeys" + in: "query" + description: "Override the key sequence for detaching a container. Format is a single character `[a-Z]` or `ctrl-` where `` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`." + type: "string" + tags: ["Container"] + /containers/{id}/stop: + post: + summary: "Stop a container" + operationId: "ContainerStop" + responses: + 204: + description: "no error" + 304: + description: "container already stopped" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "t" + in: "query" + description: "Number of seconds to wait before killing the container" + type: "integer" + tags: ["Container"] + /containers/{id}/restart: + post: + summary: "Restart a container" + operationId: "ContainerRestart" + responses: + 204: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "t" + in: "query" + description: "Number of seconds to wait before killing the container" + type: "integer" + tags: ["Container"] + /containers/{id}/kill: + post: + summary: "Kill a container" + description: "Send a POSIX signal to a container, defaulting to killing to the container." + operationId: "ContainerKill" + responses: + 204: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "signal" + in: "query" + description: "Signal to send to the container as an integer or string (e.g. `SIGINT`)" + type: "string" + default: "SIGKILL" + tags: ["Container"] + /containers/{id}/update: + post: + summary: "Update a container" + description: "Change various configuration options of a container without having to recreate it." + operationId: "ContainerUpdate" + consumes: ["application/json"] + produces: ["application/json"] + responses: + 200: + description: "The container has been updated." + schema: + type: "object" + properties: + Warnings: + type: "array" + items: + type: "string" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "update" + in: "body" + required: true + schema: + allOf: + - $ref: "#/definitions/Resources" + - type: "object" + properties: + RestartPolicy: + $ref: "#/definitions/RestartPolicy" + example: + BlkioWeight: 300 + CpuShares: 512 + CpuPeriod: 100000 + CpuQuota: 50000 + CpuRealtimePeriod: 1000000 + CpuRealtimeRuntime: 10000 + CpusetCpus: "0,1" + CpusetMems: "0" + Memory: 314572800 + MemorySwap: 514288000 + MemoryReservation: 209715200 + KernelMemory: 52428800 + RestartPolicy: + MaximumRetryCount: 4 + Name: "on-failure" + tags: ["Container"] + /containers/{id}/rename: + post: + summary: "Rename a container" + operationId: "ContainerRename" + responses: + 204: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 409: + description: "name already in use" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "name" + in: "query" + required: true + description: "New name for the container" + type: "string" + tags: ["Container"] + /containers/{id}/pause: + post: + summary: "Pause a container" + description: | + Use the cgroups freezer to suspend all processes in a container. + + Traditionally, when suspending a process the `SIGSTOP` signal is used, which is observable by the process being suspended. With the cgroups freezer the process is unaware, and unable to capture, that it is being suspended, and subsequently resumed. + operationId: "ContainerPause" + responses: + 204: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + tags: ["Container"] + /containers/{id}/unpause: + post: + summary: "Unpause a container" + description: "Resume a container which has been paused." + operationId: "ContainerUnpause" + responses: + 204: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + tags: ["Container"] + /containers/{id}/attach: + post: + summary: "Attach to a container" + description: | + Attach to a container to read its output or send it input. You can attach to the same container multiple times and you can reattach to containers that have been detached. + + Either the `stream` or `logs` parameter must be `true` for this endpoint to do anything. + + See [the documentation for the `docker attach` command](https://docs.docker.com/engine/reference/commandline/attach/) for more details. + + ### Hijacking + + This endpoint hijacks the HTTP connection to transport `stdin`, `stdout`, and `stderr` on the same socket. + + This is the response from the daemon for an attach request: + + ``` + HTTP/1.1 200 OK + Content-Type: application/vnd.docker.raw-stream + + [STREAM] + ``` + + After the headers and two new lines, the TCP connection can now be used for raw, bidirectional communication between the client and server. + + To hint potential proxies about connection hijacking, the Docker client can also optionally send connection upgrade headers. + + For example, the client sends this request to upgrade the connection: + + ``` + POST /containers/16253994b7c4/attach?stream=1&stdout=1 HTTP/1.1 + Upgrade: tcp + Connection: Upgrade + ``` + + The Docker daemon will respond with a `101 UPGRADED` response, and will similarly follow with the raw stream: + + ``` + HTTP/1.1 101 UPGRADED + Content-Type: application/vnd.docker.raw-stream + Connection: Upgrade + Upgrade: tcp + + [STREAM] + ``` + + ### Stream format + + When the TTY setting is disabled in [`POST /containers/create`](#operation/ContainerCreate), the stream over the hijacked connected is multiplexed to separate out `stdout` and `stderr`. The stream consists of a series of frames, each containing a header and a payload. + + The header contains the information which the stream writes (`stdout` or `stderr`). It also contains the size of the associated frame encoded in the last four bytes (`uint32`). + + It is encoded on the first eight bytes like this: + + ```go + header := [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4} + ``` + + `STREAM_TYPE` can be: + + - 0: `stdin` (is written on `stdout`) + - 1: `stdout` + - 2: `stderr` + + `SIZE1, SIZE2, SIZE3, SIZE4` are the four bytes of the `uint32` size encoded as big endian. + + Following the header is the payload, which is the specified number of bytes of `STREAM_TYPE`. + + The simplest way to implement this protocol is the following: + + 1. Read 8 bytes. + 2. Choose `stdout` or `stderr` depending on the first byte. + 3. Extract the frame size from the last four bytes. + 4. Read the extracted size and output it on the correct output. + 5. Goto 1. + + ### Stream format when using a TTY + + When the TTY setting is enabled in [`POST /containers/create`](#operation/ContainerCreate), the stream is not multiplexed. The data exchanged over the hijacked connection is simply the raw data from the process PTY and client's `stdin`. + + operationId: "ContainerAttach" + produces: + - "application/vnd.docker.raw-stream" + responses: + 101: + description: "no error, hints proxy about hijacking" + 200: + description: "no error, no upgrade header found" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "detachKeys" + in: "query" + description: "Override the key sequence for detaching a container.Format is a single character `[a-Z]` or `ctrl-` where `` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`." + type: "string" + - name: "logs" + in: "query" + description: | + Replay previous logs from the container. + + This is useful for attaching to a container that has started and you want to output everything since the container started. + + If `stream` is also enabled, once all the previous output has been returned, it will seamlessly transition into streaming current output. + type: "boolean" + default: false + - name: "stream" + in: "query" + description: "Stream attached streams from the time the request was made onwards" + type: "boolean" + default: false + - name: "stdin" + in: "query" + description: "Attach to `stdin`" + type: "boolean" + default: false + - name: "stdout" + in: "query" + description: "Attach to `stdout`" + type: "boolean" + default: false + - name: "stderr" + in: "query" + description: "Attach to `stderr`" + type: "boolean" + default: false + tags: ["Container"] + /containers/{id}/attach/ws: + get: + summary: "Attach to a container via a websocket" + operationId: "ContainerAttachWebsocket" + responses: + 101: + description: "no error, hints proxy about hijacking" + 200: + description: "no error, no upgrade header found" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "detachKeys" + in: "query" + description: "Override the key sequence for detaching a container.Format is a single character `[a-Z]` or `ctrl-` where `` is one of: `a-z`, `@`, `^`, `[`, `,`, or `_`." + type: "string" + - name: "logs" + in: "query" + description: "Return logs" + type: "boolean" + default: false + - name: "stream" + in: "query" + description: "Return stream" + type: "boolean" + default: false + - name: "stdin" + in: "query" + description: "Attach to `stdin`" + type: "boolean" + default: false + - name: "stdout" + in: "query" + description: "Attach to `stdout`" + type: "boolean" + default: false + - name: "stderr" + in: "query" + description: "Attach to `stderr`" + type: "boolean" + default: false + tags: ["Container"] + /containers/{id}/wait: + post: + summary: "Wait for a container" + description: "Block until a container stops, then returns the exit code." + operationId: "ContainerWait" + produces: ["application/json"] + responses: + 200: + description: "The container has exit." + schema: + type: "object" + required: [StatusCode] + properties: + StatusCode: + description: "Exit code of the container" + type: "integer" + x-nullable: false + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "condition" + in: "query" + description: "Wait until a container state reaches the given condition, either 'not-running' (default), 'next-exit', or 'removed'." + type: "string" + default: "not-running" + tags: ["Container"] + /containers/{id}: + delete: + summary: "Remove a container" + operationId: "ContainerDelete" + responses: + 204: + description: "no error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 409: + description: "conflict" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "You cannot remove a running container: c2ada9df5af8. Stop the container before attempting removal or force remove" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "v" + in: "query" + description: "Remove the volumes associated with the container." + type: "boolean" + default: false + - name: "force" + in: "query" + description: "If the container is running, kill it before removing it." + type: "boolean" + default: false + - name: "link" + in: "query" + description: "Remove the specified link associated with the container." + type: "boolean" + default: false + tags: ["Container"] + /containers/{id}/archive: + head: + summary: "Get information about files in a container" + description: "A response header `X-Docker-Container-Path-Stat` is return containing a base64 - encoded JSON object with some filesystem header information about the path." + operationId: "ContainerArchiveInfo" + responses: + 200: + description: "no error" + headers: + X-Docker-Container-Path-Stat: + type: "string" + description: "TODO" + 400: + description: "Bad parameter" + schema: + allOf: + - $ref: "#/definitions/ErrorResponse" + - type: "object" + properties: + message: + description: "The error message. Either \"must specify path parameter\" (path cannot be empty) or \"not a directory\" (path was asserted to be a directory but exists as a file)." + type: "string" + x-nullable: false + 404: + description: "Container or path does not exist" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "path" + in: "query" + required: true + description: "Resource in the container’s filesystem to archive." + type: "string" + tags: ["Container"] + get: + summary: "Get an archive of a filesystem resource in a container" + description: "Get a tar archive of a resource in the filesystem of container id." + operationId: "ContainerArchive" + produces: ["application/x-tar"] + responses: + 200: + description: "no error" + 400: + description: "Bad parameter" + schema: + allOf: + - $ref: "#/definitions/ErrorResponse" + - type: "object" + properties: + message: + description: "The error message. Either \"must specify path parameter\" (path cannot be empty) or \"not a directory\" (path was asserted to be a directory but exists as a file)." + type: "string" + x-nullable: false + 404: + description: "Container or path does not exist" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "path" + in: "query" + required: true + description: "Resource in the container’s filesystem to archive." + type: "string" + tags: ["Container"] + put: + summary: "Extract an archive of files or folders to a directory in a container" + description: "Upload a tar archive to be extracted to a path in the filesystem of container id." + operationId: "PutContainerArchive" + consumes: ["application/x-tar", "application/octet-stream"] + responses: + 200: + description: "The content was extracted successfully" + 400: + description: "Bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 403: + description: "Permission denied, the volume or container rootfs is marked as read-only." + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "No such container or path does not exist inside the container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "path" + in: "query" + required: true + description: "Path to a directory in the container to extract the archive’s contents into. " + type: "string" + - name: "noOverwriteDirNonDir" + in: "query" + description: "If “1”, “true”, or “True” then it will be an error if unpacking the given content would cause an existing directory to be replaced with a non-directory and vice versa." + type: "string" + - name: "inputStream" + in: "body" + required: true + description: "The input stream must be a tar archive compressed with one of the following algorithms: identity (no compression), gzip, bzip2, xz." + schema: + type: "string" + tags: ["Container"] + /containers/prune: + post: + summary: "Delete stopped containers" + produces: + - "application/json" + operationId: "ContainerPrune" + parameters: + - name: "filters" + in: "query" + description: | + Filters to process on the prune list, encoded as JSON (a `map[string][]string`). + + Available filters: + - `until=` Prune containers created before this timestamp. The `` can be Unix timestamps, date formatted timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed relative to the daemon machine’s time. + - `label` (`label=`, `label==`, `label!=`, or `label!==`) Prune containers with (or without, in case `label!=...` is used) the specified labels. + type: "string" + responses: + 200: + description: "No error" + schema: + type: "object" + properties: + ContainersDeleted: + description: "Container IDs that were deleted" + type: "array" + items: + type: "string" + SpaceReclaimed: + description: "Disk space reclaimed in bytes" + type: "integer" + format: "int64" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Container"] + /images/json: + get: + summary: "List Images" + description: "Returns a list of images on the server. Note that it uses a different, smaller representation of an image than inspecting a single image." + operationId: "ImageList" + produces: + - "application/json" + responses: + 200: + description: "Summary image data for the images matching the query" + schema: + type: "array" + items: + $ref: "#/definitions/ImageSummary" + examples: + application/json: + - Id: "sha256:e216a057b1cb1efc11f8a268f37ef62083e70b1b38323ba252e25ac88904a7e8" + ParentId: "" + RepoTags: + - "ubuntu:12.04" + - "ubuntu:precise" + RepoDigests: + - "ubuntu@sha256:992069aee4016783df6345315302fa59681aae51a8eeb2f889dea59290f21787" + Created: 1474925151 + Size: 103579269 + VirtualSize: 103579269 + SharedSize: 0 + Labels: {} + Containers: 2 + - Id: "sha256:3e314f95dcace0f5e4fd37b10862fe8398e3c60ed36600bc0ca5fda78b087175" + ParentId: "" + RepoTags: + - "ubuntu:12.10" + - "ubuntu:quantal" + RepoDigests: + - "ubuntu@sha256:002fba3e3255af10be97ea26e476692a7ebed0bb074a9ab960b2e7a1526b15d7" + - "ubuntu@sha256:68ea0200f0b90df725d99d823905b04cf844f6039ef60c60bf3e019915017bd3" + Created: 1403128455 + Size: 172064416 + VirtualSize: 172064416 + SharedSize: 0 + Labels: {} + Containers: 5 + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "all" + in: "query" + description: "Show all images. Only images from a final layer (no children) are shown by default." + type: "boolean" + default: false + - name: "filters" + in: "query" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to process on the images list. Available filters: + + - `before`=(`[:]`, `` or ``) + - `dangling=true` + - `label=key` or `label="key=value"` of an image label + - `reference`=(`[:]`) + - `since`=(`[:]`, `` or ``) + type: "string" + - name: "digests" + in: "query" + description: "Show digest information as a `RepoDigests` field on each image." + type: "boolean" + default: false + tags: ["Image"] + /build: + post: + summary: "Build an image" + description: | + Build an image from a tar archive with a `Dockerfile` in it. + + The `Dockerfile` specifies how the image is built from the tar archive. It is typically in the archive's root, but can be at a different path or have a different name by specifying the `dockerfile` parameter. [See the `Dockerfile` reference for more information](https://docs.docker.com/engine/reference/builder/). + + The Docker daemon performs a preliminary validation of the `Dockerfile` before starting the build, and returns an error if the syntax is incorrect. After that, each instruction is run one-by-one until the ID of the new image is output. + + The build is canceled if the client drops the connection by quitting or being killed. + operationId: "ImageBuild" + consumes: + - "application/octet-stream" + produces: + - "application/json" + parameters: + - name: "inputStream" + in: "body" + description: "A tar archive compressed with one of the following algorithms: identity (no compression), gzip, bzip2, xz." + schema: + type: "string" + format: "binary" + - name: "dockerfile" + in: "query" + description: "Path within the build context to the `Dockerfile`. This is ignored if `remote` is specified and points to an external `Dockerfile`." + type: "string" + default: "Dockerfile" + - name: "t" + in: "query" + description: "A name and optional tag to apply to the image in the `name:tag` format. If you omit the tag the default `latest` value is assumed. You can provide several `t` parameters." + type: "string" + - name: "extrahosts" + in: "query" + description: "Extra hosts to add to /etc/hosts" + type: "string" + - name: "remote" + in: "query" + description: "A Git repository URI or HTTP/HTTPS context URI. If the URI points to a single text file, the file’s contents are placed into a file called `Dockerfile` and the image is built from that file. If the URI points to a tarball, the file is downloaded by the daemon and the contents therein used as the context for the build. If the URI points to a tarball and the `dockerfile` parameter is also specified, there must be a file with the corresponding path inside the tarball." + type: "string" + - name: "q" + in: "query" + description: "Suppress verbose build output." + type: "boolean" + default: false + - name: "nocache" + in: "query" + description: "Do not use the cache when building the image." + type: "boolean" + default: false + - name: "cachefrom" + in: "query" + description: "JSON array of images used for build cache resolution." + type: "string" + - name: "pull" + in: "query" + description: "Attempt to pull the image even if an older image exists locally." + type: "string" + - name: "rm" + in: "query" + description: "Remove intermediate containers after a successful build." + type: "boolean" + default: true + - name: "forcerm" + in: "query" + description: "Always remove intermediate containers, even upon failure." + type: "boolean" + default: false + - name: "memory" + in: "query" + description: "Set memory limit for build." + type: "integer" + - name: "memswap" + in: "query" + description: "Total memory (memory + swap). Set as `-1` to disable swap." + type: "integer" + - name: "cpushares" + in: "query" + description: "CPU shares (relative weight)." + type: "integer" + - name: "cpusetcpus" + in: "query" + description: "CPUs in which to allow execution (e.g., `0-3`, `0,1`)." + type: "string" + - name: "cpuperiod" + in: "query" + description: "The length of a CPU period in microseconds." + type: "integer" + - name: "cpuquota" + in: "query" + description: "Microseconds of CPU time that the container can get in a CPU period." + type: "integer" + - name: "buildargs" + in: "query" + description: "JSON map of string pairs for build-time variables. Users pass these values at build-time. Docker uses the buildargs as the environment context for commands run via the `Dockerfile` RUN instruction, or for variable expansion in other `Dockerfile` instructions. This is not meant for passing secret values. [Read more about the buildargs instruction.](https://docs.docker.com/engine/reference/builder/#arg)" + type: "integer" + - name: "shmsize" + in: "query" + description: "Size of `/dev/shm` in bytes. The size must be greater than 0. If omitted the system uses 64MB." + type: "integer" + - name: "squash" + in: "query" + description: "Squash the resulting images layers into a single layer. *(Experimental release only.)*" + type: "boolean" + - name: "labels" + in: "query" + description: "Arbitrary key/value labels to set on the image, as a JSON map of string pairs." + type: "string" + - name: "networkmode" + in: "query" + description: "Sets the networking mode for the run commands during + build. Supported standard values are: `bridge`, `host`, `none`, and + `container:`. Any other value is taken as a custom network's + name to which this container should connect to." + type: "string" + - name: "Content-type" + in: "header" + type: "string" + enum: + - "application/x-tar" + default: "application/x-tar" + - name: "X-Registry-Config" + in: "header" + description: | + This is a base64-encoded JSON object with auth configurations for multiple registries that a build may refer to. + + The key is a registry URL, and the value is an auth configuration object, [as described in the authentication section](#section/Authentication). For example: + + ``` + { + "docker.example.com": { + "username": "janedoe", + "password": "hunter2" + }, + "https://index.docker.io/v1/": { + "username": "mobydock", + "password": "conta1n3rize14" + } + } + ``` + + Only the registry domain name (and port if not the default 443) are required. However, for legacy reasons, the Docker Hub registry must be specified with both a `https://` prefix and a `/v1/` suffix even though Docker will prefer to use the v2 registry API. + type: "string" + responses: + 200: + description: "no error" + 400: + description: "Bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Image"] + /build/prune: + post: + summary: "Delete builder cache" + produces: + - "application/json" + operationId: "BuildPrune" + responses: + 200: + description: "No error" + schema: + type: "object" + properties: + SpaceReclaimed: + description: "Disk space reclaimed in bytes" + type: "integer" + format: "int64" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Image"] + /images/create: + post: + summary: "Create an image" + description: "Create an image by either pulling it from a registry or importing it." + operationId: "ImageCreate" + consumes: + - "text/plain" + - "application/octet-stream" + produces: + - "application/json" + responses: + 200: + description: "no error" + 404: + description: "repository does not exist or no read access" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "fromImage" + in: "query" + description: "Name of the image to pull. The name may include a tag or digest. This parameter may only be used when pulling an image. The pull is cancelled if the HTTP connection is closed." + type: "string" + - name: "fromSrc" + in: "query" + description: "Source to import. The value may be a URL from which the image can be retrieved or `-` to read the image from the request body. This parameter may only be used when importing an image." + type: "string" + - name: "repo" + in: "query" + description: "Repository name given to an image when it is imported. The repo may include a tag. This parameter may only be used when importing an image." + type: "string" + - name: "tag" + in: "query" + description: "Tag or digest. If empty when pulling an image, this causes all tags for the given image to be pulled." + type: "string" + - name: "inputImage" + in: "body" + description: "Image content if the value `-` has been specified in fromSrc query parameter" + schema: + type: "string" + required: false + - name: "X-Registry-Auth" + in: "header" + description: "A base64-encoded auth configuration. [See the authentication section for details.](#section/Authentication)" + type: "string" + tags: ["Image"] + /images/{name}/json: + get: + summary: "Inspect an image" + description: "Return low-level information about an image." + operationId: "ImageInspect" + produces: + - "application/json" + responses: + 200: + description: "No error" + schema: + $ref: "#/definitions/Image" + examples: + application/json: + Id: "sha256:85f05633ddc1c50679be2b16a0479ab6f7637f8884e0cfe0f4d20e1ebb3d6e7c" + Container: "cb91e48a60d01f1e27028b4fc6819f4f290b3cf12496c8176ec714d0d390984a" + Comment: "" + Os: "linux" + Architecture: "amd64" + Parent: "sha256:91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c" + ContainerConfig: + Tty: false + Hostname: "e611e15f9c9d" + Domainname: "" + AttachStdout: false + PublishService: "" + AttachStdin: false + OpenStdin: false + StdinOnce: false + NetworkDisabled: false + OnBuild: [] + Image: "91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c" + User: "" + WorkingDir: "" + MacAddress: "" + AttachStderr: false + Labels: + com.example.license: "GPL" + com.example.version: "1.0" + com.example.vendor: "Acme" + Env: + - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + Cmd: + - "/bin/sh" + - "-c" + - "#(nop) LABEL com.example.vendor=Acme com.example.license=GPL com.example.version=1.0" + DockerVersion: "1.9.0-dev" + VirtualSize: 188359297 + Size: 0 + Author: "" + Created: "2015-09-10T08:30:53.26995814Z" + GraphDriver: + Name: "aufs" + Data: {} + RepoDigests: + - "localhost:5000/test/busybox/example@sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf" + RepoTags: + - "example:1.0" + - "example:latest" + - "example:stable" + Config: + Image: "91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c" + NetworkDisabled: false + OnBuild: [] + StdinOnce: false + PublishService: "" + AttachStdin: false + OpenStdin: false + Domainname: "" + AttachStdout: false + Tty: false + Hostname: "e611e15f9c9d" + Cmd: + - "/bin/bash" + Env: + - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + Labels: + com.example.vendor: "Acme" + com.example.version: "1.0" + com.example.license: "GPL" + MacAddress: "" + AttachStderr: false + WorkingDir: "" + User: "" + RootFS: + Type: "layers" + Layers: + - "sha256:1834950e52ce4d5a88a1bbd131c537f4d0e56d10ff0dd69e66be3b7dfa9df7e6" + - "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef" + 404: + description: "No such image" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such image: someimage (tag: latest)" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "Image name or id" + type: "string" + required: true + tags: ["Image"] + /images/{name}/history: + get: + summary: "Get the history of an image" + description: "Return parent layers of an image." + operationId: "ImageHistory" + produces: ["application/json"] + responses: + 200: + description: "List of image layers" + schema: + type: "array" + items: + type: "object" + x-go-name: HistoryResponseItem + required: [Id, Created, CreatedBy, Tags, Size, Comment] + properties: + Id: + type: "string" + x-nullable: false + Created: + type: "integer" + format: "int64" + x-nullable: false + CreatedBy: + type: "string" + x-nullable: false + Tags: + type: "array" + items: + type: "string" + Size: + type: "integer" + format: "int64" + x-nullable: false + Comment: + type: "string" + x-nullable: false + examples: + application/json: + - Id: "3db9c44f45209632d6050b35958829c3a2aa256d81b9a7be45b362ff85c54710" + Created: 1398108230 + CreatedBy: "/bin/sh -c #(nop) ADD file:eb15dbd63394e063b805a3c32ca7bf0266ef64676d5a6fab4801f2e81e2a5148 in /" + Tags: + - "ubuntu:lucid" + - "ubuntu:10.04" + Size: 182964289 + Comment: "" + - Id: "6cfa4d1f33fb861d4d114f43b25abd0ac737509268065cdfd69d544a59c85ab8" + Created: 1398108222 + CreatedBy: "/bin/sh -c #(nop) MAINTAINER Tianon Gravi - mkimage-debootstrap.sh -i iproute,iputils-ping,ubuntu-minimal -t lucid.tar.xz lucid http://archive.ubuntu.com/ubuntu/" + Tags: [] + Size: 0 + Comment: "" + - Id: "511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158" + Created: 1371157430 + CreatedBy: "" + Tags: + - "scratch12:latest" + - "scratch:latest" + Size: 0 + Comment: "Imported from -" + 404: + description: "No such image" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "Image name or ID" + type: "string" + required: true + tags: ["Image"] + /images/{name}/push: + post: + summary: "Push an image" + description: | + Push an image to a registry. + + If you wish to push an image on to a private registry, that image must already have a tag which references the registry. For example, `registry.example.com/myimage:latest`. + + The push is cancelled if the HTTP connection is closed. + operationId: "ImagePush" + consumes: + - "application/octet-stream" + responses: + 200: + description: "No error" + 404: + description: "No such image" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "Image name or ID." + type: "string" + required: true + - name: "tag" + in: "query" + description: "The tag to associate with the image on the registry." + type: "string" + - name: "X-Registry-Auth" + in: "header" + description: "A base64-encoded auth configuration. [See the authentication section for details.](#section/Authentication)" + type: "string" + required: true + tags: ["Image"] + /images/{name}/tag: + post: + summary: "Tag an image" + description: "Tag an image so that it becomes part of a repository." + operationId: "ImageTag" + responses: + 201: + description: "No error" + 400: + description: "Bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "No such image" + schema: + $ref: "#/definitions/ErrorResponse" + 409: + description: "Conflict" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "Image name or ID to tag." + type: "string" + required: true + - name: "repo" + in: "query" + description: "The repository to tag in. For example, `someuser/someimage`." + type: "string" + - name: "tag" + in: "query" + description: "The name of the new tag." + type: "string" + tags: ["Image"] + /images/{name}: + delete: + summary: "Remove an image" + description: | + Remove an image, along with any untagged parent images that were + referenced by that image. + + Images can't be removed if they have descendant images, are being + used by a running container or are being used by a build. + operationId: "ImageDelete" + produces: ["application/json"] + responses: + 200: + description: "The image was deleted successfully" + schema: + type: "array" + items: + $ref: "#/definitions/ImageDeleteResponseItem" + examples: + application/json: + - Untagged: "3e2f21a89f" + - Deleted: "3e2f21a89f" + - Deleted: "53b4f83ac9" + 404: + description: "No such image" + schema: + $ref: "#/definitions/ErrorResponse" + 409: + description: "Conflict" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "Image name or ID" + type: "string" + required: true + - name: "force" + in: "query" + description: "Remove the image even if it is being used by stopped containers or has other tags" + type: "boolean" + default: false + - name: "noprune" + in: "query" + description: "Do not delete untagged parent images" + type: "boolean" + default: false + tags: ["Image"] + /images/search: + get: + summary: "Search images" + description: "Search for an image on Docker Hub." + operationId: "ImageSearch" + produces: + - "application/json" + responses: + 200: + description: "No error" + schema: + type: "array" + items: + type: "object" + properties: + description: + type: "string" + is_official: + type: "boolean" + is_automated: + type: "boolean" + name: + type: "string" + star_count: + type: "integer" + examples: + application/json: + - description: "" + is_official: false + is_automated: false + name: "wma55/u1210sshd" + star_count: 0 + - description: "" + is_official: false + is_automated: false + name: "jdswinbank/sshd" + star_count: 0 + - description: "" + is_official: false + is_automated: false + name: "vgauthier/sshd" + star_count: 0 + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "term" + in: "query" + description: "Term to search" + type: "string" + required: true + - name: "limit" + in: "query" + description: "Maximum number of results to return" + type: "integer" + - name: "filters" + in: "query" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to process on the images list. Available filters: + + - `is-automated=(true|false)` + - `is-official=(true|false)` + - `stars=` Matches images that has at least 'number' stars. + type: "string" + tags: ["Image"] + /images/prune: + post: + summary: "Delete unused images" + produces: + - "application/json" + operationId: "ImagePrune" + parameters: + - name: "filters" + in: "query" + description: | + Filters to process on the prune list, encoded as JSON (a `map[string][]string`). Available filters: + + - `dangling=` When set to `true` (or `1`), prune only + unused *and* untagged images. When set to `false` + (or `0`), all unused images are pruned. + - `until=` Prune images created before this timestamp. The `` can be Unix timestamps, date formatted timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed relative to the daemon machine’s time. + - `label` (`label=`, `label==`, `label!=`, or `label!==`) Prune images with (or without, in case `label!=...` is used) the specified labels. + type: "string" + responses: + 200: + description: "No error" + schema: + type: "object" + properties: + ImagesDeleted: + description: "Images that were deleted" + type: "array" + items: + $ref: "#/definitions/ImageDeleteResponseItem" + SpaceReclaimed: + description: "Disk space reclaimed in bytes" + type: "integer" + format: "int64" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Image"] + /auth: + post: + summary: "Check auth configuration" + description: "Validate credentials for a registry and, if available, get an identity token for accessing the registry without password." + operationId: "SystemAuth" + consumes: ["application/json"] + produces: ["application/json"] + responses: + 200: + description: "An identity token was generated successfully." + schema: + type: "object" + required: [Status] + properties: + Status: + description: "The status of the authentication" + type: "string" + x-nullable: false + IdentityToken: + description: "An opaque token used to authenticate a user after a successful login" + type: "string" + x-nullable: false + examples: + application/json: + Status: "Login Succeeded" + IdentityToken: "9cbaf023786cd7..." + 204: + description: "No error" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "authConfig" + in: "body" + description: "Authentication to check" + schema: + $ref: "#/definitions/AuthConfig" + tags: ["System"] + /info: + get: + summary: "Get system information" + operationId: "SystemInfo" + produces: + - "application/json" + responses: + 200: + description: "No error" + schema: + type: "object" + properties: + Architecture: + type: "string" + Containers: + type: "integer" + ContainersRunning: + type: "integer" + ContainersStopped: + type: "integer" + ContainersPaused: + type: "integer" + CpuCfsPeriod: + type: "boolean" + CpuCfsQuota: + type: "boolean" + Debug: + type: "boolean" + DiscoveryBackend: + type: "string" + DockerRootDir: + type: "string" + Driver: + type: "string" + DriverStatus: + type: "array" + items: + type: "array" + items: + type: "string" + SystemStatus: + type: "array" + items: + type: "array" + items: + type: "string" + Plugins: + type: "object" + properties: + Volume: + type: "array" + items: + type: "string" + Network: + type: "array" + items: + type: "string" + Log: + type: "array" + items: + type: "string" + ExperimentalBuild: + type: "boolean" + HttpProxy: + type: "string" + HttpsProxy: + type: "string" + ID: + type: "string" + IPv4Forwarding: + type: "boolean" + Images: + type: "integer" + IndexServerAddress: + type: "string" + InitPath: + type: "string" + InitSha1: + type: "string" + KernelVersion: + type: "string" + Labels: + type: "array" + items: + type: "string" + MemTotal: + type: "integer" + GenericResources: + $ref: "#/definitions/GenericResources" + MemoryLimit: + type: "boolean" + NCPU: + type: "integer" + NEventsListener: + type: "integer" + NFd: + type: "integer" + NGoroutines: + type: "integer" + Name: + type: "string" + NoProxy: + type: "string" + OomKillDisable: + type: "boolean" + OSType: + type: "string" + OomScoreAdj: + type: "integer" + OperatingSystem: + type: "string" + RegistryConfig: + type: "object" + properties: + IndexConfigs: + type: "object" + additionalProperties: + type: "object" + properties: + Mirrors: + type: "array" + items: + type: "string" + Name: + type: "string" + Official: + type: "boolean" + Secure: + type: "boolean" + InsecureRegistryCIDRs: + type: "array" + items: + type: "string" + SwapLimit: + type: "boolean" + SystemTime: + type: "string" + ServerVersion: + type: "string" + examples: + application/json: + Architecture: "x86_64" + ClusterStore: "etcd://localhost:2379" + CgroupDriver: "cgroupfs" + Containers: 11 + ContainersRunning: 7 + ContainersStopped: 3 + ContainersPaused: 1 + CpuCfsPeriod: true + CpuCfsQuota: true + Debug: false + DockerRootDir: "/var/lib/docker" + Driver: "btrfs" + DriverStatus: + - + - "" + ExperimentalBuild: false + HttpProxy: "http://test:test@localhost:8080" + HttpsProxy: "https://test:test@localhost:8080" + ID: "7TRN:IPZB:QYBB:VPBQ:UMPP:KARE:6ZNR:XE6T:7EWV:PKF4:ZOJD:TPYS" + IPv4Forwarding: true + Images: 16 + IndexServerAddress: "https://index.docker.io/v1/" + InitPath: "/usr/bin/docker" + InitSha1: "" + KernelMemory: true + KernelVersion: "3.12.0-1-amd64" + Labels: + - "storage=ssd" + MemTotal: 2099236864 + MemoryLimit: true + NCPU: 1 + NEventsListener: 0 + NFd: 11 + NGoroutines: 21 + Name: "prod-server-42" + NoProxy: "9.81.1.160" + OomKillDisable: true + OSType: "linux" + OperatingSystem: "Boot2Docker" + Plugins: + Volume: + - "local" + Network: + - "null" + - "host" + - "bridge" + RegistryConfig: + IndexConfigs: + docker.io: + Name: "docker.io" + Official: true + Secure: true + InsecureRegistryCIDRs: + - "127.0.0.0/8" + SecurityOptions: + - Key: "Name" + Value: "seccomp" + - Key: "Profile" + Value: "default" + - Key: "Name" + Value: "apparmor" + - Key: "Name" + Value: "selinux" + - Key: "Name" + Value: "userns" + ServerVersion: "1.9.0" + SwapLimit: false + SystemStatus: + - + - "State" + - "Healthy" + SystemTime: "2015-03-10T11:11:23.730591467-07:00" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["System"] + /version: + get: + summary: "Get version" + description: "Returns the version of Docker that is running and various information about the system that Docker is running on." + operationId: "SystemVersion" + produces: ["application/json"] + responses: + 200: + description: "no error" + schema: + type: "object" + properties: + Version: + type: "string" + ApiVersion: + type: "string" + MinAPIVersion: + type: "string" + GitCommit: + type: "string" + GoVersion: + type: "string" + Os: + type: "string" + Arch: + type: "string" + KernelVersion: + type: "string" + Experimental: + type: "boolean" + BuildTime: + type: "string" + examples: + application/json: + Version: "17.04.0" + Os: "linux" + KernelVersion: "3.19.0-23-generic" + GoVersion: "go1.7.5" + GitCommit: "deadbee" + Arch: "amd64" + ApiVersion: "1.27" + MinAPIVersion: "1.12" + BuildTime: "2016-06-14T07:09:13.444803460+00:00" + Experimental: true + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["System"] + /_ping: + get: + summary: "Ping" + description: "This is a dummy endpoint you can use to test if the server is accessible." + operationId: "SystemPing" + produces: ["text/plain"] + responses: + 200: + description: "no error" + schema: + type: "string" + example: "OK" + headers: + API-Version: + type: "string" + description: "Max API Version the server supports" + Docker-Experimental: + type: "boolean" + description: "If the server is running with experimental mode enabled" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["System"] + /commit: + post: + summary: "Create a new image from a container" + operationId: "ImageCommit" + consumes: + - "application/json" + produces: + - "application/json" + responses: + 201: + description: "no error" + schema: + $ref: "#/definitions/IdResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "containerConfig" + in: "body" + description: "The container configuration" + schema: + $ref: "#/definitions/ContainerConfig" + - name: "container" + in: "query" + description: "The ID or name of the container to commit" + type: "string" + - name: "repo" + in: "query" + description: "Repository name for the created image" + type: "string" + - name: "tag" + in: "query" + description: "Tag name for the create image" + type: "string" + - name: "comment" + in: "query" + description: "Commit message" + type: "string" + - name: "author" + in: "query" + description: "Author of the image (e.g., `John Hannibal Smith `)" + type: "string" + - name: "pause" + in: "query" + description: "Whether to pause the container before committing" + type: "boolean" + default: true + - name: "changes" + in: "query" + description: "`Dockerfile` instructions to apply while committing" + type: "string" + tags: ["Image"] + /events: + get: + summary: "Monitor events" + description: | + Stream real-time events from the server. + + Various objects within Docker report events when something happens to them. + + Containers report these events: `attach`, `commit`, `copy`, `create`, `destroy`, `detach`, `die`, `exec_create`, `exec_detach`, `exec_start`, `export`, `health_status`, `kill`, `oom`, `pause`, `rename`, `resize`, `restart`, `start`, `stop`, `top`, `unpause`, and `update` + + Images report these events: `delete`, `import`, `load`, `pull`, `push`, `save`, `tag`, and `untag` + + Volumes report these events: `create`, `mount`, `unmount`, and `destroy` + + Networks report these events: `create`, `connect`, `disconnect`, `destroy`, `update`, and `remove` + + The Docker daemon reports these events: `reload` + + Services report these events: `create`, `update`, and `remove` + + Nodes report these events: `create`, `update`, and `remove` + + Secrets report these events: `create`, `update`, and `remove` + + Configs report these events: `create`, `update`, and `remove` + + operationId: "SystemEvents" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + type: "object" + properties: + Type: + description: "The type of object emitting the event" + type: "string" + Action: + description: "The type of event" + type: "string" + Actor: + type: "object" + properties: + ID: + description: "The ID of the object emitting the event" + type: "string" + Attributes: + description: "Various key/value attributes of the object, depending on its type" + type: "object" + additionalProperties: + type: "string" + time: + description: "Timestamp of event" + type: "integer" + timeNano: + description: "Timestamp of event, with nanosecond accuracy" + type: "integer" + format: "int64" + examples: + application/json: + Type: "container" + Action: "create" + Actor: + ID: "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743" + Attributes: + com.example.some-label: "some-label-value" + image: "alpine" + name: "my-container" + time: 1461943101 + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "since" + in: "query" + description: "Show events created since this timestamp then stream new events." + type: "string" + - name: "until" + in: "query" + description: "Show events created until this timestamp then stop streaming." + type: "string" + - name: "filters" + in: "query" + description: | + A JSON encoded value of filters (a `map[string][]string`) to process on the event list. Available filters: + + - `container=` container name or ID + - `daemon=` daemon name or ID + - `event=` event type + - `image=` image name or ID + - `label=` image or container label + - `network=` network name or ID + - `plugin`= plugin name or ID + - `scope`= local or swarm + - `type=` object to filter by, one of `container`, `image`, `volume`, `network`, `daemon`, `plugin`, `node`, `service` or `secret` + - `volume=` volume name or ID + type: "string" + tags: ["System"] + /system/df: + get: + summary: "Get data usage information" + operationId: "SystemDataUsage" + responses: + 200: + description: "no error" + schema: + type: "object" + properties: + LayersSize: + type: "integer" + format: "int64" + Images: + type: "array" + items: + $ref: "#/definitions/ImageSummary" + Containers: + type: "array" + items: + $ref: "#/definitions/ContainerSummary" + Volumes: + type: "array" + items: + $ref: "#/definitions/Volume" + example: + LayersSize: 1092588 + Images: + - + Id: "sha256:2b8fd9751c4c0f5dd266fcae00707e67a2545ef34f9a29354585f93dac906749" + ParentId: "" + RepoTags: + - "busybox:latest" + RepoDigests: + - "busybox@sha256:a59906e33509d14c036c8678d687bd4eec81ed7c4b8ce907b888c607f6a1e0e6" + Created: 1466724217 + Size: 1092588 + SharedSize: 0 + VirtualSize: 1092588 + Labels: {} + Containers: 1 + Containers: + - + Id: "e575172ed11dc01bfce087fb27bee502db149e1a0fad7c296ad300bbff178148" + Names: + - "/top" + Image: "busybox" + ImageID: "sha256:2b8fd9751c4c0f5dd266fcae00707e67a2545ef34f9a29354585f93dac906749" + Command: "top" + Created: 1472592424 + Ports: [] + SizeRootFs: 1092588 + Labels: {} + State: "exited" + Status: "Exited (0) 56 minutes ago" + HostConfig: + NetworkMode: "default" + NetworkSettings: + Networks: + bridge: + IPAMConfig: null + Links: null + Aliases: null + NetworkID: "d687bc59335f0e5c9ee8193e5612e8aee000c8c62ea170cfb99c098f95899d92" + EndpointID: "8ed5115aeaad9abb174f68dcf135b49f11daf597678315231a32ca28441dec6a" + Gateway: "172.18.0.1" + IPAddress: "172.18.0.2" + IPPrefixLen: 16 + IPv6Gateway: "" + GlobalIPv6Address: "" + GlobalIPv6PrefixLen: 0 + MacAddress: "02:42:ac:12:00:02" + Mounts: [] + Volumes: + - + Name: "my-volume" + Driver: "local" + Mountpoint: "/var/lib/docker/volumes/my-volume/_data" + Labels: null + Scope: "local" + Options: null + UsageData: + Size: 10920104 + RefCount: 2 + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["System"] + /images/{name}/get: + get: + summary: "Export an image" + description: | + Get a tarball containing all images and metadata for a repository. + + If `name` is a specific name and tag (e.g. `ubuntu:latest`), then only that image (and its parents) are returned. If `name` is an image ID, similarly only that image (and its parents) are returned, but with the exclusion of the `repositories` file in the tarball, as there were no image names referenced. + + ### Image tarball format + + An image tarball contains one directory per image layer (named using its long ID), each containing these files: + + - `VERSION`: currently `1.0` - the file format version + - `json`: detailed layer information, similar to `docker inspect layer_id` + - `layer.tar`: A tarfile containing the filesystem changes in this layer + + The `layer.tar` file contains `aufs` style `.wh..wh.aufs` files and directories for storing attribute changes and deletions. + + If the tarball defines a repository, the tarball should also include a `repositories` file at the root that contains a list of repository and tag names mapped to layer IDs. + + ```json + { + "hello-world": { + "latest": "565a9d68a73f6706862bfe8409a7f659776d4d60a8d096eb4a3cbce6999cc2a1" + } + } + ``` + operationId: "ImageGet" + produces: + - "application/x-tar" + responses: + 200: + description: "no error" + schema: + type: "string" + format: "binary" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "Image name or ID" + type: "string" + required: true + tags: ["Image"] + /images/get: + get: + summary: "Export several images" + description: | + Get a tarball containing all images and metadata for several image repositories. + + For each value of the `names` parameter: if it is a specific name and tag (e.g. `ubuntu:latest`), then only that image (and its parents) are returned; if it is an image ID, similarly only that image (and its parents) are returned and there would be no names referenced in the 'repositories' file for this image ID. + + For details on the format, see [the export image endpoint](#operation/ImageGet). + operationId: "ImageGetAll" + produces: + - "application/x-tar" + responses: + 200: + description: "no error" + schema: + type: "string" + format: "binary" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "names" + in: "query" + description: "Image names to filter by" + type: "array" + items: + type: "string" + tags: ["Image"] + /images/load: + post: + summary: "Import images" + description: | + Load a set of images and tags into a repository. + + For details on the format, see [the export image endpoint](#operation/ImageGet). + operationId: "ImageLoad" + consumes: + - "application/x-tar" + produces: + - "application/json" + responses: + 200: + description: "no error" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "imagesTarball" + in: "body" + description: "Tar archive containing images" + schema: + type: "string" + format: "binary" + - name: "quiet" + in: "query" + description: "Suppress progress details during load." + type: "boolean" + default: false + tags: ["Image"] + /containers/{id}/exec: + post: + summary: "Create an exec instance" + description: "Run a command inside a running container." + operationId: "ContainerExec" + consumes: + - "application/json" + produces: + - "application/json" + responses: + 201: + description: "no error" + schema: + $ref: "#/definitions/IdResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 409: + description: "container is paused" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "execConfig" + in: "body" + description: "Exec configuration" + schema: + type: "object" + properties: + AttachStdin: + type: "boolean" + description: "Attach to `stdin` of the exec command." + AttachStdout: + type: "boolean" + description: "Attach to `stdout` of the exec command." + AttachStderr: + type: "boolean" + description: "Attach to `stderr` of the exec command." + DetachKeys: + type: "string" + description: "Override the key sequence for detaching a container. Format is a single character `[a-Z]` or `ctrl-` where `` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`." + Tty: + type: "boolean" + description: "Allocate a pseudo-TTY." + Env: + description: "A list of environment variables in the form `[\"VAR=value\", ...]`." + type: "array" + items: + type: "string" + Cmd: + type: "array" + description: "Command to run, as a string or array of strings." + items: + type: "string" + Privileged: + type: "boolean" + description: "Runs the exec process with extended privileges." + default: false + User: + type: "string" + description: "The user, and optionally, group to run the exec process inside the container. Format is one of: `user`, `user:group`, `uid`, or `uid:gid`." + example: + AttachStdin: false + AttachStdout: true + AttachStderr: true + DetachKeys: "ctrl-p,ctrl-q" + Tty: false + Cmd: + - "date" + Env: + - "FOO=bar" + - "BAZ=quux" + required: true + - name: "id" + in: "path" + description: "ID or name of container" + type: "string" + required: true + tags: ["Exec"] + /exec/{id}/start: + post: + summary: "Start an exec instance" + description: "Starts a previously set up exec instance. If detach is true, this endpoint returns immediately after starting the command. Otherwise, it sets up an interactive session with the command." + operationId: "ExecStart" + consumes: + - "application/json" + produces: + - "application/vnd.docker.raw-stream" + responses: + 200: + description: "No error" + 404: + description: "No such exec instance" + schema: + $ref: "#/definitions/ErrorResponse" + 409: + description: "Container is stopped or paused" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "execStartConfig" + in: "body" + schema: + type: "object" + properties: + Detach: + type: "boolean" + description: "Detach from the command." + Tty: + type: "boolean" + description: "Allocate a pseudo-TTY." + example: + Detach: false + Tty: false + - name: "id" + in: "path" + description: "Exec instance ID" + required: true + type: "string" + tags: ["Exec"] + /exec/{id}/resize: + post: + summary: "Resize an exec instance" + description: "Resize the TTY session used by an exec instance. This endpoint only works if `tty` was specified as part of creating and starting the exec instance." + operationId: "ExecResize" + responses: + 201: + description: "No error" + 404: + description: "No such exec instance" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "Exec instance ID" + required: true + type: "string" + - name: "h" + in: "query" + description: "Height of the TTY session in characters" + type: "integer" + - name: "w" + in: "query" + description: "Width of the TTY session in characters" + type: "integer" + tags: ["Exec"] + /exec/{id}/json: + get: + summary: "Inspect an exec instance" + description: "Return low-level information about an exec instance." + operationId: "ExecInspect" + produces: + - "application/json" + responses: + 200: + description: "No error" + schema: + type: "object" + properties: + ID: + type: "string" + Running: + type: "boolean" + ExitCode: + type: "integer" + ProcessConfig: + $ref: "#/definitions/ProcessConfig" + OpenStdin: + type: "boolean" + OpenStderr: + type: "boolean" + OpenStdout: + type: "boolean" + ContainerID: + type: "string" + Pid: + type: "integer" + description: "The system process ID for the exec process." + examples: + application/json: + CanRemove: false + ContainerID: "b53ee82b53a40c7dca428523e34f741f3abc51d9f297a14ff874bf761b995126" + DetachKeys: "" + ExitCode: 2 + ID: "f33bbfb39f5b142420f4759b2348913bd4a8d1a6d7fd56499cb41a1bb91d7b3b" + OpenStderr: true + OpenStdin: true + OpenStdout: true + ProcessConfig: + arguments: + - "-c" + - "exit 2" + entrypoint: "sh" + privileged: false + tty: true + user: "1000" + Running: false + Pid: 42000 + 404: + description: "No such exec instance" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "Exec instance ID" + required: true + type: "string" + tags: ["Exec"] + + /volumes: + get: + summary: "List volumes" + operationId: "VolumeList" + produces: ["application/json"] + responses: + 200: + description: "Summary volume data that matches the query" + schema: + type: "object" + required: [Volumes, Warnings] + properties: + Volumes: + type: "array" + x-nullable: false + description: "List of volumes" + items: + $ref: "#/definitions/Volume" + Warnings: + type: "array" + x-nullable: false + description: "Warnings that occurred when fetching the list of volumes" + items: + type: "string" + + examples: + application/json: + Volumes: + - CreatedAt: "2017-07-19T12:00:26Z" + Name: "tardis" + Driver: "local" + Mountpoint: "/var/lib/docker/volumes/tardis" + Labels: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + Scope: "local" + Options: + device: "tmpfs" + o: "size=100m,uid=1000" + type: "tmpfs" + Warnings: [] + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + description: | + JSON encoded value of the filters (a `map[string][]string`) to + process on the volumes list. Available filters: + + - `dangling=` When set to `true` (or `1`), returns all + volumes that are not in use by a container. When set to `false` + (or `0`), only volumes that are in use by one or more + containers are returned. + - `driver=` Matches volumes based on their driver. + - `label=` or `label=:` Matches volumes based on + the presence of a `label` alone or a `label` and a value. + - `name=` Matches all or part of a volume name. + type: "string" + format: "json" + tags: ["Volume"] + + /volumes/create: + post: + summary: "Create a volume" + operationId: "VolumeCreate" + consumes: ["application/json"] + produces: ["application/json"] + responses: + 201: + description: "The volume was created successfully" + schema: + $ref: "#/definitions/Volume" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "volumeConfig" + in: "body" + required: true + description: "Volume configuration" + schema: + type: "object" + properties: + Name: + description: "The new volume's name. If not specified, Docker generates a name." + type: "string" + x-nullable: false + Driver: + description: "Name of the volume driver to use." + type: "string" + default: "local" + x-nullable: false + DriverOpts: + description: "A mapping of driver options and values. These options are passed directly to the driver and are driver specific." + type: "object" + additionalProperties: + type: "string" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + example: + Name: "tardis" + Labels: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + Driver: "custom" + tags: ["Volume"] + + /volumes/{name}: + get: + summary: "Inspect a volume" + operationId: "VolumeInspect" + produces: ["application/json"] + responses: + 200: + description: "No error" + schema: + $ref: "#/definitions/Volume" + 404: + description: "No such volume" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + required: true + description: "Volume name or ID" + type: "string" + tags: ["Volume"] + + delete: + summary: "Remove a volume" + description: "Instruct the driver to remove the volume." + operationId: "VolumeDelete" + responses: + 204: + description: "The volume was removed" + 404: + description: "No such volume or volume driver" + schema: + $ref: "#/definitions/ErrorResponse" + 409: + description: "Volume is in use and cannot be removed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + required: true + description: "Volume name or ID" + type: "string" + - name: "force" + in: "query" + description: "Force the removal of the volume" + type: "boolean" + default: false + tags: ["Volume"] + /volumes/prune: + post: + summary: "Delete unused volumes" + produces: + - "application/json" + operationId: "VolumePrune" + parameters: + - name: "filters" + in: "query" + description: | + Filters to process on the prune list, encoded as JSON (a `map[string][]string`). + + Available filters: + - `label` (`label=`, `label==`, `label!=`, or `label!==`) Prune volumes with (or without, in case `label!=...` is used) the specified labels. + type: "string" + responses: + 200: + description: "No error" + schema: + type: "object" + properties: + VolumesDeleted: + description: "Volumes that were deleted" + type: "array" + items: + type: "string" + SpaceReclaimed: + description: "Disk space reclaimed in bytes" + type: "integer" + format: "int64" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Volume"] + /networks: + get: + summary: "List networks" + description: | + Returns a list of networks. For details on the format, see [the network inspect endpoint](#operation/NetworkInspect). + + Note that it uses a different, smaller representation of a network than inspecting a single network. For example, + the list of containers attached to the network is not propagated in API versions 1.28 and up. + operationId: "NetworkList" + produces: + - "application/json" + responses: + 200: + description: "No error" + schema: + type: "array" + items: + $ref: "#/definitions/Network" + examples: + application/json: + - Name: "bridge" + Id: "f2de39df4171b0dc801e8002d1d999b77256983dfc63041c0f34030aa3977566" + Created: "2016-10-19T06:21:00.416543526Z" + Scope: "local" + Driver: "bridge" + EnableIPv6: false + Internal: false + Attachable: false + Ingress: false + IPAM: + Driver: "default" + Config: + - + Subnet: "172.17.0.0/16" + Options: + com.docker.network.bridge.default_bridge: "true" + com.docker.network.bridge.enable_icc: "true" + com.docker.network.bridge.enable_ip_masquerade: "true" + com.docker.network.bridge.host_binding_ipv4: "0.0.0.0" + com.docker.network.bridge.name: "docker0" + com.docker.network.driver.mtu: "1500" + - Name: "none" + Id: "e086a3893b05ab69242d3c44e49483a3bbbd3a26b46baa8f61ab797c1088d794" + Created: "0001-01-01T00:00:00Z" + Scope: "local" + Driver: "null" + EnableIPv6: false + Internal: false + Attachable: false + Ingress: false + IPAM: + Driver: "default" + Config: [] + Containers: {} + Options: {} + - Name: "host" + Id: "13e871235c677f196c4e1ecebb9dc733b9b2d2ab589e30c539efeda84a24215e" + Created: "0001-01-01T00:00:00Z" + Scope: "local" + Driver: "host" + EnableIPv6: false + Internal: false + Attachable: false + Ingress: false + IPAM: + Driver: "default" + Config: [] + Containers: {} + Options: {} + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + description: | + JSON encoded value of the filters (a `map[string][]string`) to process on the networks list. Available filters: + + - `driver=` Matches a network's driver. + - `id=` Matches all or part of a network ID. + - `label=` or `label==` of a network label. + - `name=` Matches all or part of a network name. + - `scope=["swarm"|"global"|"local"]` Filters networks by scope (`swarm`, `global`, or `local`). + - `type=["custom"|"builtin"]` Filters networks by type. The `custom` keyword returns all user-defined networks. + type: "string" + tags: ["Network"] + + /networks/{id}: + get: + summary: "Inspect a network" + operationId: "NetworkInspect" + produces: + - "application/json" + responses: + 200: + description: "No error" + schema: + $ref: "#/definitions/Network" + 404: + description: "Network not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "Network ID or name" + required: true + type: "string" + - name: "verbose" + in: "query" + description: "Detailed inspect output for troubleshooting" + type: "boolean" + default: false + - name: "scope" + in: "query" + description: "Filter the network by scope (swarm, global, or local)" + type: "string" + tags: ["Network"] + + delete: + summary: "Remove a network" + operationId: "NetworkDelete" + responses: + 204: + description: "No error" + 403: + description: "operation not supported for pre-defined networks" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such network" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "Network ID or name" + required: true + type: "string" + tags: ["Network"] + + /networks/create: + post: + summary: "Create a network" + operationId: "NetworkCreate" + consumes: + - "application/json" + produces: + - "application/json" + responses: + 201: + description: "No error" + schema: + type: "object" + properties: + Id: + description: "The ID of the created network." + type: "string" + Warning: + type: "string" + example: + Id: "22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30" + Warning: "" + 403: + description: "operation not supported for pre-defined networks" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "plugin not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "networkConfig" + in: "body" + description: "Network configuration" + required: true + schema: + type: "object" + required: ["Name"] + properties: + Name: + description: "The network's name." + type: "string" + CheckDuplicate: + description: "Check for networks with duplicate names. Since Network is primarily keyed based on a random ID and not on the name, and network name is strictly a user-friendly alias to the network which is uniquely identified using ID, there is no guaranteed way to check for duplicates. CheckDuplicate is there to provide a best effort checking of any networks which has the same name but it is not guaranteed to catch all name collisions." + type: "boolean" + Driver: + description: "Name of the network driver plugin to use." + type: "string" + default: "bridge" + Internal: + description: "Restrict external access to the network." + type: "boolean" + Attachable: + description: "Globally scoped network is manually attachable by regular containers from workers in swarm mode." + type: "boolean" + Ingress: + description: "Ingress network is the network which provides the routing-mesh in swarm mode." + type: "boolean" + IPAM: + description: "Optional custom IP scheme for the network." + $ref: "#/definitions/IPAM" + EnableIPv6: + description: "Enable IPv6 on the network." + type: "boolean" + Options: + description: "Network specific options to be used by the drivers." + type: "object" + additionalProperties: + type: "string" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + example: + Name: "isolated_nw" + CheckDuplicate: false + Driver: "bridge" + EnableIPv6: true + IPAM: + Driver: "default" + Config: + - Subnet: "172.20.0.0/16" + IPRange: "172.20.10.0/24" + Gateway: "172.20.10.11" + - Subnet: "2001:db8:abcd::/64" + Gateway: "2001:db8:abcd::1011" + Options: + foo: "bar" + Internal: true + Attachable: false + Ingress: false + Options: + com.docker.network.bridge.default_bridge: "true" + com.docker.network.bridge.enable_icc: "true" + com.docker.network.bridge.enable_ip_masquerade: "true" + com.docker.network.bridge.host_binding_ipv4: "0.0.0.0" + com.docker.network.bridge.name: "docker0" + com.docker.network.driver.mtu: "1500" + Labels: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + tags: ["Network"] + + /networks/{id}/connect: + post: + summary: "Connect a container to a network" + operationId: "NetworkConnect" + consumes: + - "application/octet-stream" + responses: + 200: + description: "No error" + 403: + description: "Operation not supported for swarm scoped networks" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "Network or container not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "Network ID or name" + required: true + type: "string" + - name: "container" + in: "body" + required: true + schema: + type: "object" + properties: + Container: + type: "string" + description: "The ID or name of the container to connect to the network." + EndpointConfig: + $ref: "#/definitions/EndpointSettings" + example: + Container: "3613f73ba0e4" + EndpointConfig: + IPAMConfig: + IPv4Address: "172.24.56.89" + IPv6Address: "2001:db8::5689" + tags: ["Network"] + + /networks/{id}/disconnect: + post: + summary: "Disconnect a container from a network" + operationId: "NetworkDisconnect" + consumes: + - "application/json" + responses: + 200: + description: "No error" + 403: + description: "Operation not supported for swarm scoped networks" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "Network or container not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "Network ID or name" + required: true + type: "string" + - name: "container" + in: "body" + required: true + schema: + type: "object" + properties: + Container: + type: "string" + description: "The ID or name of the container to disconnect from the network." + Force: + type: "boolean" + description: "Force the container to disconnect from the network." + tags: ["Network"] + /networks/prune: + post: + summary: "Delete unused networks" + produces: + - "application/json" + operationId: "NetworkPrune" + parameters: + - name: "filters" + in: "query" + description: | + Filters to process on the prune list, encoded as JSON (a `map[string][]string`). + + Available filters: + - `until=` Prune networks created before this timestamp. The `` can be Unix timestamps, date formatted timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed relative to the daemon machine’s time. + - `label` (`label=`, `label==`, `label!=`, or `label!==`) Prune networks with (or without, in case `label!=...` is used) the specified labels. + type: "string" + responses: + 200: + description: "No error" + schema: + type: "object" + properties: + NetworksDeleted: + description: "Networks that were deleted" + type: "array" + items: + type: "string" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Network"] + /plugins: + get: + summary: "List plugins" + operationId: "PluginList" + description: "Returns information about installed plugins." + produces: ["application/json"] + responses: + 200: + description: "No error" + schema: + type: "array" + items: + $ref: "#/definitions/Plugin" + example: + - Id: "5724e2c8652da337ab2eedd19fc6fc0ec908e4bd907c7421bf6a8dfc70c4c078" + Name: "tiborvass/sample-volume-plugin" + Tag: "latest" + Active: true + Settings: + Env: + - "DEBUG=0" + Args: null + Devices: null + Config: + Description: "A sample volume plugin for Docker" + Documentation: "https://docs.docker.com/engine/extend/plugins/" + Interface: + Types: + - "docker.volumedriver/1.0" + Socket: "plugins.sock" + Entrypoint: + - "/usr/bin/sample-volume-plugin" + - "/data" + WorkDir: "" + User: {} + Network: + Type: "" + Linux: + Capabilities: null + AllowAllDevices: false + Devices: null + Mounts: null + PropagatedMount: "/data" + Env: + - Name: "DEBUG" + Description: "If set, prints debug messages" + Settable: null + Value: "0" + Args: + Name: "args" + Description: "command line arguments" + Settable: null + Value: [] + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + type: "string" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to process on the plugin list. Available filters: + + - `capability=` + - `enable=|` + tags: ["Plugin"] + + /plugins/privileges: + get: + summary: "Get plugin privileges" + operationId: "GetPluginPrivileges" + responses: + 200: + description: "no error" + schema: + type: "array" + items: + description: "Describes a permission the user has to accept upon installing the plugin." + type: "object" + properties: + Name: + type: "string" + Description: + type: "string" + Value: + type: "array" + items: + type: "string" + example: + - Name: "network" + Description: "" + Value: + - "host" + - Name: "mount" + Description: "" + Value: + - "/data" + - Name: "device" + Description: "" + Value: + - "/dev/cpu_dma_latency" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "remote" + in: "query" + description: "The name of the plugin. The `:latest` tag is optional, and is the default if omitted." + required: true + type: "string" + tags: + - "Plugin" + + /plugins/pull: + post: + summary: "Install a plugin" + operationId: "PluginPull" + description: | + Pulls and installs a plugin. After the plugin is installed, it can be enabled using the [`POST /plugins/{name}/enable` endpoint](#operation/PostPluginsEnable). + produces: + - "application/json" + responses: + 204: + description: "no error" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "remote" + in: "query" + description: | + Remote reference for plugin to install. + + The `:latest` tag is optional, and is used as the default if omitted. + required: true + type: "string" + - name: "name" + in: "query" + description: | + Local name for the pulled plugin. + + The `:latest` tag is optional, and is used as the default if omitted. + required: false + type: "string" + - name: "X-Registry-Auth" + in: "header" + description: "A base64-encoded auth configuration to use when pulling a plugin from a registry. [See the authentication section for details.](#section/Authentication)" + type: "string" + - name: "body" + in: "body" + schema: + type: "array" + items: + description: "Describes a permission accepted by the user upon installing the plugin." + type: "object" + properties: + Name: + type: "string" + Description: + type: "string" + Value: + type: "array" + items: + type: "string" + example: + - Name: "network" + Description: "" + Value: + - "host" + - Name: "mount" + Description: "" + Value: + - "/data" + - Name: "device" + Description: "" + Value: + - "/dev/cpu_dma_latency" + tags: ["Plugin"] + /plugins/{name}/json: + get: + summary: "Inspect a plugin" + operationId: "PluginInspect" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Plugin" + 404: + description: "plugin is not installed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "The name of the plugin. The `:latest` tag is optional, and is the default if omitted." + required: true + type: "string" + tags: ["Plugin"] + /plugins/{name}: + delete: + summary: "Remove a plugin" + operationId: "PluginDelete" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Plugin" + 404: + description: "plugin is not installed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "The name of the plugin. The `:latest` tag is optional, and is the default if omitted." + required: true + type: "string" + - name: "force" + in: "query" + description: "Disable the plugin before removing. This may result in issues if the plugin is in use by a container." + type: "boolean" + default: false + tags: ["Plugin"] + /plugins/{name}/enable: + post: + summary: "Enable a plugin" + operationId: "PluginEnable" + responses: + 200: + description: "no error" + 404: + description: "plugin is not installed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "The name of the plugin. The `:latest` tag is optional, and is the default if omitted." + required: true + type: "string" + - name: "timeout" + in: "query" + description: "Set the HTTP client timeout (in seconds)" + type: "integer" + default: 0 + tags: ["Plugin"] + /plugins/{name}/disable: + post: + summary: "Disable a plugin" + operationId: "PluginDisable" + responses: + 200: + description: "no error" + 404: + description: "plugin is not installed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "The name of the plugin. The `:latest` tag is optional, and is the default if omitted." + required: true + type: "string" + tags: ["Plugin"] + /plugins/{name}/upgrade: + post: + summary: "Upgrade a plugin" + operationId: "PluginUpgrade" + responses: + 204: + description: "no error" + 404: + description: "plugin not installed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "The name of the plugin. The `:latest` tag is optional, and is the default if omitted." + required: true + type: "string" + - name: "remote" + in: "query" + description: | + Remote reference to upgrade to. + + The `:latest` tag is optional, and is used as the default if omitted. + required: true + type: "string" + - name: "X-Registry-Auth" + in: "header" + description: "A base64-encoded auth configuration to use when pulling a plugin from a registry. [See the authentication section for details.](#section/Authentication)" + type: "string" + - name: "body" + in: "body" + schema: + type: "array" + items: + description: "Describes a permission accepted by the user upon installing the plugin." + type: "object" + properties: + Name: + type: "string" + Description: + type: "string" + Value: + type: "array" + items: + type: "string" + example: + - Name: "network" + Description: "" + Value: + - "host" + - Name: "mount" + Description: "" + Value: + - "/data" + - Name: "device" + Description: "" + Value: + - "/dev/cpu_dma_latency" + tags: ["Plugin"] + /plugins/create: + post: + summary: "Create a plugin" + operationId: "PluginCreate" + consumes: + - "application/x-tar" + responses: + 204: + description: "no error" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "query" + description: "The name of the plugin. The `:latest` tag is optional, and is the default if omitted." + required: true + type: "string" + - name: "tarContext" + in: "body" + description: "Path to tar containing plugin rootfs and manifest" + schema: + type: "string" + format: "binary" + tags: ["Plugin"] + /plugins/{name}/push: + post: + summary: "Push a plugin" + operationId: "PluginPush" + description: | + Push a plugin to the registry. + parameters: + - name: "name" + in: "path" + description: "The name of the plugin. The `:latest` tag is optional, and is the default if omitted." + required: true + type: "string" + responses: + 200: + description: "no error" + 404: + description: "plugin not installed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Plugin"] + /plugins/{name}/set: + post: + summary: "Configure a plugin" + operationId: "PluginSet" + consumes: + - "application/json" + parameters: + - name: "name" + in: "path" + description: "The name of the plugin. The `:latest` tag is optional, and is the default if omitted." + required: true + type: "string" + - name: "body" + in: "body" + schema: + type: "array" + items: + type: "string" + example: ["DEBUG=1"] + responses: + 204: + description: "No error" + 404: + description: "Plugin not installed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Plugin"] + /nodes: + get: + summary: "List nodes" + operationId: "NodeList" + responses: + 200: + description: "no error" + schema: + type: "array" + items: + $ref: "#/definitions/Node" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + description: | + Filters to process on the nodes list, encoded as JSON (a `map[string][]string`). + + Available filters: + - `id=` + - `label=` + - `membership=`(`accepted`|`pending`)` + - `name=` + - `role=`(`manager`|`worker`)` + type: "string" + tags: ["Node"] + /nodes/{id}: + get: + summary: "Inspect a node" + operationId: "NodeInspect" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Node" + 404: + description: "no such node" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "The ID or name of the node" + type: "string" + required: true + tags: ["Node"] + delete: + summary: "Delete a node" + operationId: "NodeDelete" + responses: + 200: + description: "no error" + 404: + description: "no such node" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "The ID or name of the node" + type: "string" + required: true + - name: "force" + in: "query" + description: "Force remove a node from the swarm" + default: false + type: "boolean" + tags: ["Node"] + /nodes/{id}/update: + post: + summary: "Update a node" + operationId: "NodeUpdate" + responses: + 200: + description: "no error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such node" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "The ID of the node" + type: "string" + required: true + - name: "body" + in: "body" + schema: + $ref: "#/definitions/NodeSpec" + - name: "version" + in: "query" + description: "The version number of the node object being updated. This is required to avoid conflicting writes." + type: "integer" + format: "int64" + required: true + tags: ["Node"] + /swarm: + get: + summary: "Inspect swarm" + operationId: "SwarmInspect" + responses: + 200: + description: "no error" + schema: + allOf: + - $ref: "#/definitions/ClusterInfo" + - type: "object" + properties: + JoinTokens: + description: "The tokens workers and managers need to join the swarm." + type: "object" + properties: + Worker: + description: "The token workers can use to join the swarm." + type: "string" + Manager: + description: "The token managers can use to join the swarm." + type: "string" + example: + CreatedAt: "2016-08-15T16:00:20.349727406Z" + Spec: + Dispatcher: + HeartbeatPeriod: 5000000000 + Orchestration: + TaskHistoryRetentionLimit: 10 + CAConfig: + NodeCertExpiry: 7776000000000000 + Raft: + LogEntriesForSlowFollowers: 500 + HeartbeatTick: 1 + SnapshotInterval: 10000 + ElectionTick: 3 + TaskDefaults: {} + EncryptionConfig: + AutoLockManagers: false + Name: "default" + JoinTokens: + Worker: "SWMTKN-1-1h8aps2yszaiqmz2l3oc5392pgk8e49qhx2aj3nyv0ui0hez2a-6qmn92w6bu3jdvnglku58u11a" + Manager: "SWMTKN-1-1h8aps2yszaiqmz2l3oc5392pgk8e49qhx2aj3nyv0ui0hez2a-8llk83c4wm9lwioey2s316r9l" + ID: "70ilmkj2f6sp2137c753w2nmt" + UpdatedAt: "2016-08-15T16:32:09.623207604Z" + Version: + Index: 51 + RootRotationInProgress: false + TLSInfo: + TrustRoot: | + -----BEGIN CERTIFICATE----- + MIIBajCCARCgAwIBAgIUbYqrLSOSQHoxD8CwG6Bi2PJi9c8wCgYIKoZIzj0EAwIw + EzERMA8GA1UEAxMIc3dhcm0tY2EwHhcNMTcwNDI0MjE0MzAwWhcNMzcwNDE5MjE0 + MzAwWjATMREwDwYDVQQDEwhzd2FybS1jYTBZMBMGByqGSM49AgEGCCqGSM49AwEH + A0IABJk/VyMPYdaqDXJb/VXh5n/1Yuv7iNrxV3Qb3l06XD46seovcDWs3IZNV1lf + 3Skyr0ofcchipoiHkXBODojJydSjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMB + Af8EBTADAQH/MB0GA1UdDgQWBBRUXxuRcnFjDfR/RIAUQab8ZV/n4jAKBggqhkjO + PQQDAgNIADBFAiAy+JTe6Uc3KyLCMiqGl2GyWGQqQDEcO3/YG36x7om65AIhAJvz + pxv6zFeVEkAEEkqIYi0omA9+CjanB/6Bz4n1uw8H + -----END CERTIFICATE----- + CertIssuerSubject: "MBMxETAPBgNVBAMTCHN3YXJtLWNh" + CertIssuerPublicKey: "MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEmT9XIw9h1qoNclv9VeHmf/Vi6/uI2vFXdBveXTpcPjqx6i9wNazchk1XWV/dKTKvSh9xyGKmiIeRcE4OiMnJ1A==" + 404: + description: "no such swarm" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Swarm"] + /swarm/init: + post: + summary: "Initialize a new swarm" + operationId: "SwarmInit" + produces: + - "application/json" + - "text/plain" + responses: + 200: + description: "no error" + schema: + description: "The node ID" + type: "string" + example: "7v2t30z9blmxuhnyo6s4cpenp" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is already part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "body" + in: "body" + required: true + schema: + type: "object" + properties: + ListenAddr: + description: "Listen address used for inter-manager communication, as well as determining the networking interface used for the VXLAN Tunnel Endpoint (VTEP). This can either be an address/port combination in the form `192.168.1.1:4567`, or an interface followed by a port number, like `eth0:4567`. If the port number is omitted, the default swarm listening port is used." + type: "string" + AdvertiseAddr: + description: "Externally reachable address advertised to other nodes. This can either be an address/port combination in the form `192.168.1.1:4567`, or an interface followed by a port number, like `eth0:4567`. If the port number is omitted, the port number from the listen address is used. If `AdvertiseAddr` is not specified, it will be automatically detected when possible." + type: "string" + DataPathAddr: + description: | + Address or interface to use for data path traffic (format: ``), for example, `192.168.1.1`, + or an interface, like `eth0`. If `DataPathAddr` is unspecified, the same address as `AdvertiseAddr` + is used. + + The `DataPathAddr` specifies the address that global scope network drivers will publish towards other + nodes in order to reach the containers running on this node. Using this parameter it is possible to + separate the container data traffic from the management traffic of the cluster. + type: "string" + ForceNewCluster: + description: "Force creation of a new swarm." + type: "boolean" + Spec: + $ref: "#/definitions/SwarmSpec" + example: + ListenAddr: "0.0.0.0:2377" + AdvertiseAddr: "192.168.1.1:2377" + ForceNewCluster: false + Spec: + Orchestration: {} + Raft: {} + Dispatcher: {} + CAConfig: {} + EncryptionConfig: + AutoLockManagers: false + tags: ["Swarm"] + /swarm/join: + post: + summary: "Join an existing swarm" + operationId: "SwarmJoin" + responses: + 200: + description: "no error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is already part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "body" + in: "body" + required: true + schema: + type: "object" + properties: + ListenAddr: + description: "Listen address used for inter-manager communication if the node gets promoted to manager, as well as determining the networking interface used for the VXLAN Tunnel Endpoint (VTEP)." + type: "string" + AdvertiseAddr: + description: "Externally reachable address advertised to other nodes. This can either be an address/port combination in the form `192.168.1.1:4567`, or an interface followed by a port number, like `eth0:4567`. If the port number is omitted, the port number from the listen address is used. If `AdvertiseAddr` is not specified, it will be automatically detected when possible." + type: "string" + DataPathAddr: + description: | + Address or interface to use for data path traffic (format: ``), for example, `192.168.1.1`, + or an interface, like `eth0`. If `DataPathAddr` is unspecified, the same address as `AdvertiseAddr` + is used. + + The `DataPathAddr` specifies the address that global scope network drivers will publish towards other + nodes in order to reach the containers running on this node. Using this parameter it is possible to + separate the container data traffic from the management traffic of the cluster. + + type: "string" + RemoteAddrs: + description: "Addresses of manager nodes already participating in the swarm." + type: "string" + JoinToken: + description: "Secret token for joining this swarm." + type: "string" + example: + ListenAddr: "0.0.0.0:2377" + AdvertiseAddr: "192.168.1.1:2377" + RemoteAddrs: + - "node1:2377" + JoinToken: "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-7p73s1dx5in4tatdymyhg9hu2" + tags: ["Swarm"] + /swarm/leave: + post: + summary: "Leave a swarm" + operationId: "SwarmLeave" + responses: + 200: + description: "no error" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "force" + description: "Force leave swarm, even if this is the last manager or that it will break the cluster." + in: "query" + type: "boolean" + default: false + tags: ["Swarm"] + /swarm/update: + post: + summary: "Update a swarm" + operationId: "SwarmUpdate" + responses: + 200: + description: "no error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "body" + in: "body" + required: true + schema: + $ref: "#/definitions/SwarmSpec" + - name: "version" + in: "query" + description: "The version number of the swarm object being updated. This is required to avoid conflicting writes." + type: "integer" + format: "int64" + required: true + - name: "rotateWorkerToken" + in: "query" + description: "Rotate the worker join token." + type: "boolean" + default: false + - name: "rotateManagerToken" + in: "query" + description: "Rotate the manager join token." + type: "boolean" + default: false + - name: "rotateManagerUnlockKey" + in: "query" + description: "Rotate the manager unlock key." + type: "boolean" + default: false + tags: ["Swarm"] + /swarm/unlockkey: + get: + summary: "Get the unlock key" + operationId: "SwarmUnlockkey" + consumes: + - "application/json" + responses: + 200: + description: "no error" + schema: + type: "object" + properties: + UnlockKey: + description: "The swarm's unlock key." + type: "string" + example: + UnlockKey: "SWMKEY-1-7c37Cc8654o6p38HnroywCi19pllOnGtbdZEgtKxZu8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Swarm"] + /swarm/unlock: + post: + summary: "Unlock a locked manager" + operationId: "SwarmUnlock" + consumes: + - "application/json" + produces: + - "application/json" + parameters: + - name: "body" + in: "body" + required: true + schema: + type: "object" + properties: + UnlockKey: + description: "The swarm's unlock key." + type: "string" + example: + UnlockKey: "SWMKEY-1-7c37Cc8654o6p38HnroywCi19pllOnGtbdZEgtKxZu8" + responses: + 200: + description: "no error" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Swarm"] + /services: + get: + summary: "List services" + operationId: "ServiceList" + responses: + 200: + description: "no error" + schema: + type: "array" + items: + $ref: "#/definitions/Service" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + type: "string" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to process on the services list. Available filters: + + - `id=` + - `label=` + - `mode=["replicated"|"global"]` + - `name=` + tags: ["Service"] + /services/create: + post: + summary: "Create a service" + operationId: "ServiceCreate" + consumes: + - "application/json" + produces: + - "application/json" + responses: + 201: + description: "no error" + schema: + type: "object" + properties: + ID: + description: "The ID of the created service." + type: "string" + Warning: + description: "Optional warning message" + type: "string" + example: + ID: "ak7w3gjqoa3kuz8xcpnyy0pvl" + Warning: "unable to pin image doesnotexist:latest to digest: image library/doesnotexist:latest not found" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 403: + description: "network is not eligible for services" + schema: + $ref: "#/definitions/ErrorResponse" + 409: + description: "name conflicts with an existing service" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "body" + in: "body" + required: true + schema: + allOf: + - $ref: "#/definitions/ServiceSpec" + - type: "object" + example: + Name: "web" + TaskTemplate: + ContainerSpec: + Image: "nginx:alpine" + Mounts: + - + ReadOnly: true + Source: "web-data" + Target: "/usr/share/nginx/html" + Type: "volume" + VolumeOptions: + DriverConfig: {} + Labels: + com.example.something: "something-value" + Hosts: ["10.10.10.10 host1", "ABCD:EF01:2345:6789:ABCD:EF01:2345:6789 host2"] + User: "33" + DNSConfig: + Nameservers: ["8.8.8.8"] + Search: ["example.org"] + Options: ["timeout:3"] + Secrets: + - + File: + Name: "www.example.org.key" + UID: "33" + GID: "33" + Mode: 384 + SecretID: "fpjqlhnwb19zds35k8wn80lq9" + SecretName: "example_org_domain_key" + LogDriver: + Name: "json-file" + Options: + max-file: "3" + max-size: "10M" + Placement: {} + Resources: + Limits: + MemoryBytes: 104857600 + Reservations: {} + RestartPolicy: + Condition: "on-failure" + Delay: 10000000000 + MaxAttempts: 10 + Mode: + Replicated: + Replicas: 4 + UpdateConfig: + Parallelism: 2 + Delay: 1000000000 + FailureAction: "pause" + Monitor: 15000000000 + MaxFailureRatio: 0.15 + RollbackConfig: + Parallelism: 1 + Delay: 1000000000 + FailureAction: "pause" + Monitor: 15000000000 + MaxFailureRatio: 0.15 + EndpointSpec: + Ports: + - + Protocol: "tcp" + PublishedPort: 8080 + TargetPort: 80 + Labels: + foo: "bar" + - name: "X-Registry-Auth" + in: "header" + description: "A base64-encoded auth configuration for pulling from private registries. [See the authentication section for details.](#section/Authentication)" + type: "string" + tags: ["Service"] + /services/{id}: + get: + summary: "Inspect a service" + operationId: "ServiceInspect" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Service" + 404: + description: "no such service" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "ID or name of service." + required: true + type: "string" + - name: "insertDefaults" + in: "query" + description: "Fill empty fields with default values." + type: "boolean" + default: false + tags: ["Service"] + delete: + summary: "Delete a service" + operationId: "ServiceDelete" + responses: + 200: + description: "no error" + 404: + description: "no such service" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "ID or name of service." + required: true + type: "string" + tags: ["Service"] + /services/{id}/update: + post: + summary: "Update a service" + operationId: "ServiceUpdate" + consumes: ["application/json"] + produces: ["application/json"] + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/ServiceUpdateResponse" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such service" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "ID or name of service." + required: true + type: "string" + - name: "body" + in: "body" + required: true + schema: + allOf: + - $ref: "#/definitions/ServiceSpec" + - type: "object" + example: + Name: "top" + TaskTemplate: + ContainerSpec: + Image: "busybox" + Args: + - "top" + Resources: + Limits: {} + Reservations: {} + RestartPolicy: + Condition: "any" + MaxAttempts: 0 + Placement: {} + ForceUpdate: 0 + Mode: + Replicated: + Replicas: 1 + UpdateConfig: + Parallelism: 2 + Delay: 1000000000 + FailureAction: "pause" + Monitor: 15000000000 + MaxFailureRatio: 0.15 + RollbackConfig: + Parallelism: 1 + Delay: 1000000000 + FailureAction: "pause" + Monitor: 15000000000 + MaxFailureRatio: 0.15 + EndpointSpec: + Mode: "vip" + + - name: "version" + in: "query" + description: "The version number of the service object being updated. This is required to avoid conflicting writes." + required: true + type: "integer" + - name: "registryAuthFrom" + in: "query" + type: "string" + description: "If the X-Registry-Auth header is not specified, this + parameter indicates where to find registry authorization credentials. The + valid values are `spec` and `previous-spec`." + default: "spec" + - name: "rollback" + in: "query" + type: "string" + description: "Set to this parameter to `previous` to cause a + server-side rollback to the previous service spec. The supplied spec will be + ignored in this case." + - name: "X-Registry-Auth" + in: "header" + description: "A base64-encoded auth configuration for pulling from private registries. [See the authentication section for details.](#section/Authentication)" + type: "string" + + tags: ["Service"] + /services/{id}/logs: + get: + summary: "Get service logs" + description: | + Get `stdout` and `stderr` logs from a service. + + **Note**: This endpoint works only for services with the `json-file` or `journald` logging drivers. + operationId: "ServiceLogs" + produces: + - "application/vnd.docker.raw-stream" + - "application/json" + responses: + 101: + description: "logs returned as a stream" + schema: + type: "string" + format: "binary" + 200: + description: "logs returned as a string in response body" + schema: + type: "string" + 404: + description: "no such service" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such service: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the service" + type: "string" + - name: "details" + in: "query" + description: "Show service context and extra details provided to logs." + type: "boolean" + default: false + - name: "follow" + in: "query" + description: | + Return the logs as a stream. + + This will return a `101` HTTP response with a `Connection: upgrade` header, then hijack the HTTP connection to send raw output. For more information about hijacking and the stream format, [see the documentation for the attach endpoint](#operation/ContainerAttach). + type: "boolean" + default: false + - name: "stdout" + in: "query" + description: "Return logs from `stdout`" + type: "boolean" + default: false + - name: "stderr" + in: "query" + description: "Return logs from `stderr`" + type: "boolean" + default: false + - name: "since" + in: "query" + description: "Only return logs since this time, as a UNIX timestamp" + type: "integer" + default: 0 + - name: "timestamps" + in: "query" + description: "Add timestamps to every log line" + type: "boolean" + default: false + - name: "tail" + in: "query" + description: "Only return this number of log lines from the end of the logs. Specify as an integer or `all` to output all log lines." + type: "string" + default: "all" + tags: ["Service"] + /tasks: + get: + summary: "List tasks" + operationId: "TaskList" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + type: "array" + items: + $ref: "#/definitions/Task" + example: + - ID: "0kzzo1i0y4jz6027t0k7aezc7" + Version: + Index: 71 + CreatedAt: "2016-06-07T21:07:31.171892745Z" + UpdatedAt: "2016-06-07T21:07:31.376370513Z" + Spec: + ContainerSpec: + Image: "redis" + Resources: + Limits: {} + Reservations: {} + RestartPolicy: + Condition: "any" + MaxAttempts: 0 + Placement: {} + ServiceID: "9mnpnzenvg8p8tdbtq4wvbkcz" + Slot: 1 + NodeID: "60gvrl6tm78dmak4yl7srz94v" + Status: + Timestamp: "2016-06-07T21:07:31.290032978Z" + State: "running" + Message: "started" + ContainerStatus: + ContainerID: "e5d62702a1b48d01c3e02ca1e0212a250801fa8d67caca0b6f35919ebc12f035" + PID: 677 + DesiredState: "running" + NetworksAttachments: + - Network: + ID: "4qvuz4ko70xaltuqbt8956gd1" + Version: + Index: 18 + CreatedAt: "2016-06-07T20:31:11.912919752Z" + UpdatedAt: "2016-06-07T21:07:29.955277358Z" + Spec: + Name: "ingress" + Labels: + com.docker.swarm.internal: "true" + DriverConfiguration: {} + IPAMOptions: + Driver: {} + Configs: + - Subnet: "10.255.0.0/16" + Gateway: "10.255.0.1" + DriverState: + Name: "overlay" + Options: + com.docker.network.driver.overlay.vxlanid_list: "256" + IPAMOptions: + Driver: + Name: "default" + Configs: + - Subnet: "10.255.0.0/16" + Gateway: "10.255.0.1" + Addresses: + - "10.255.0.10/16" + - ID: "1yljwbmlr8er2waf8orvqpwms" + Version: + Index: 30 + CreatedAt: "2016-06-07T21:07:30.019104782Z" + UpdatedAt: "2016-06-07T21:07:30.231958098Z" + Name: "hopeful_cori" + Spec: + ContainerSpec: + Image: "redis" + Resources: + Limits: {} + Reservations: {} + RestartPolicy: + Condition: "any" + MaxAttempts: 0 + Placement: {} + ServiceID: "9mnpnzenvg8p8tdbtq4wvbkcz" + Slot: 1 + NodeID: "60gvrl6tm78dmak4yl7srz94v" + Status: + Timestamp: "2016-06-07T21:07:30.202183143Z" + State: "shutdown" + Message: "shutdown" + ContainerStatus: + ContainerID: "1cf8d63d18e79668b0004a4be4c6ee58cddfad2dae29506d8781581d0688a213" + DesiredState: "shutdown" + NetworksAttachments: + - Network: + ID: "4qvuz4ko70xaltuqbt8956gd1" + Version: + Index: 18 + CreatedAt: "2016-06-07T20:31:11.912919752Z" + UpdatedAt: "2016-06-07T21:07:29.955277358Z" + Spec: + Name: "ingress" + Labels: + com.docker.swarm.internal: "true" + DriverConfiguration: {} + IPAMOptions: + Driver: {} + Configs: + - Subnet: "10.255.0.0/16" + Gateway: "10.255.0.1" + DriverState: + Name: "overlay" + Options: + com.docker.network.driver.overlay.vxlanid_list: "256" + IPAMOptions: + Driver: + Name: "default" + Configs: + - Subnet: "10.255.0.0/16" + Gateway: "10.255.0.1" + Addresses: + - "10.255.0.5/16" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + type: "string" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to process on the tasks list. Available filters: + + - `desired-state=(running | shutdown | accepted)` + - `id=` + - `label=key` or `label="key=value"` + - `name=` + - `node=` + - `service=` + tags: ["Task"] + /tasks/{id}: + get: + summary: "Inspect a task" + operationId: "TaskInspect" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Task" + 404: + description: "no such task" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "ID of the task" + required: true + type: "string" + tags: ["Task"] + /tasks/{id}/logs: + get: + summary: "Get task logs" + description: | + Get `stdout` and `stderr` logs from a task. + + **Note**: This endpoint works only for services with the `json-file` or `journald` logging drivers. + operationId: "TaskLogs" + produces: + - "application/vnd.docker.raw-stream" + - "application/json" + responses: + 101: + description: "logs returned as a stream" + schema: + type: "string" + format: "binary" + 200: + description: "logs returned as a string in response body" + schema: + type: "string" + 404: + description: "no such task" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such task: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID of the task" + type: "string" + - name: "details" + in: "query" + description: "Show task context and extra details provided to logs." + type: "boolean" + default: false + - name: "follow" + in: "query" + description: | + Return the logs as a stream. + + This will return a `101` HTTP response with a `Connection: upgrade` header, then hijack the HTTP connection to send raw output. For more information about hijacking and the stream format, [see the documentation for the attach endpoint](#operation/ContainerAttach). + type: "boolean" + default: false + - name: "stdout" + in: "query" + description: "Return logs from `stdout`" + type: "boolean" + default: false + - name: "stderr" + in: "query" + description: "Return logs from `stderr`" + type: "boolean" + default: false + - name: "since" + in: "query" + description: "Only return logs since this time, as a UNIX timestamp" + type: "integer" + default: 0 + - name: "timestamps" + in: "query" + description: "Add timestamps to every log line" + type: "boolean" + default: false + - name: "tail" + in: "query" + description: "Only return this number of log lines from the end of the logs. Specify as an integer or `all` to output all log lines." + type: "string" + default: "all" + /secrets: + get: + summary: "List secrets" + operationId: "SecretList" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + type: "array" + items: + $ref: "#/definitions/Secret" + example: + - ID: "ktnbjxoalbkvbvedmg1urrz8h" + Version: + Index: 11 + CreatedAt: "2016-11-05T01:20:17.327670065Z" + UpdatedAt: "2016-11-05T01:20:17.327670065Z" + Spec: + Name: "app-dev.crt" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + type: "string" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to process on the secrets list. Available filters: + + - `id=` + - `label= or label==value` + - `name=` + - `names=` + tags: ["Secret"] + /secrets/create: + post: + summary: "Create a secret" + operationId: "SecretCreate" + consumes: + - "application/json" + produces: + - "application/json" + responses: + 201: + description: "no error" + schema: + type: "object" + properties: + ID: + description: "The ID of the created secret." + type: "string" + example: + ID: "ktnbjxoalbkvbvedmg1urrz8h" + 409: + description: "name conflicts with an existing object" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "body" + in: "body" + schema: + allOf: + - $ref: "#/definitions/SecretSpec" + - type: "object" + example: + Name: "app-key.crt" + Labels: + foo: "bar" + Data: "VEhJUyBJUyBOT1QgQSBSRUFMIENFUlRJRklDQVRFCg==" + tags: ["Secret"] + /secrets/{id}: + get: + summary: "Inspect a secret" + operationId: "SecretInspect" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Secret" + examples: + application/json: + ID: "ktnbjxoalbkvbvedmg1urrz8h" + Version: + Index: 11 + CreatedAt: "2016-11-05T01:20:17.327670065Z" + UpdatedAt: "2016-11-05T01:20:17.327670065Z" + Spec: + Name: "app-dev.crt" + 404: + description: "secret not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + type: "string" + description: "ID of the secret" + tags: ["Secret"] + delete: + summary: "Delete a secret" + operationId: "SecretDelete" + produces: + - "application/json" + responses: + 204: + description: "no error" + 404: + description: "secret not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + type: "string" + description: "ID of the secret" + tags: ["Secret"] + /secrets/{id}/update: + post: + summary: "Update a Secret" + operationId: "SecretUpdate" + responses: + 200: + description: "no error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such secret" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "The ID or name of the secret" + type: "string" + required: true + - name: "body" + in: "body" + schema: + $ref: "#/definitions/SecretSpec" + description: "The spec of the secret to update. Currently, only the Labels field can be updated. All other fields must remain unchanged from the [SecretInspect endpoint](#operation/SecretInspect) response values." + - name: "version" + in: "query" + description: "The version number of the secret object being updated. This is required to avoid conflicting writes." + type: "integer" + format: "int64" + required: true + tags: ["Secret"] + /configs: + get: + summary: "List configs" + operationId: "ConfigList" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + type: "array" + items: + $ref: "#/definitions/Config" + example: + - ID: "ktnbjxoalbkvbvedmg1urrz8h" + Version: + Index: 11 + CreatedAt: "2016-11-05T01:20:17.327670065Z" + UpdatedAt: "2016-11-05T01:20:17.327670065Z" + Spec: + Name: "server.conf" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + type: "string" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to process on the configs list. Available filters: + + - `id=` + - `label= or label==value` + - `name=` + - `names=` + tags: ["Config"] + /configs/create: + post: + summary: "Create a config" + operationId: "ConfigCreate" + consumes: + - "application/json" + produces: + - "application/json" + responses: + 201: + description: "no error" + schema: + type: "object" + properties: + ID: + description: "The ID of the created config." + type: "string" + example: + ID: "ktnbjxoalbkvbvedmg1urrz8h" + 409: + description: "name conflicts with an existing object" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "body" + in: "body" + schema: + allOf: + - $ref: "#/definitions/ConfigSpec" + - type: "object" + example: + Name: "server.conf" + Labels: + foo: "bar" + Data: "VEhJUyBJUyBOT1QgQSBSRUFMIENFUlRJRklDQVRFCg==" + tags: ["Config"] + /configs/{id}: + get: + summary: "Inspect a config" + operationId: "ConfigInspect" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Config" + examples: + application/json: + ID: "ktnbjxoalbkvbvedmg1urrz8h" + Version: + Index: 11 + CreatedAt: "2016-11-05T01:20:17.327670065Z" + UpdatedAt: "2016-11-05T01:20:17.327670065Z" + Spec: + Name: "app-dev.crt" + 404: + description: "config not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + type: "string" + description: "ID of the config" + tags: ["Config"] + delete: + summary: "Delete a config" + operationId: "ConfigDelete" + produces: + - "application/json" + responses: + 204: + description: "no error" + 404: + description: "config not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + type: "string" + description: "ID of the config" + tags: ["Config"] + /configs/{id}/update: + post: + summary: "Update a Config" + operationId: "ConfigUpdate" + responses: + 200: + description: "no error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such config" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "The ID or name of the config" + type: "string" + required: true + - name: "body" + in: "body" + schema: + $ref: "#/definitions/ConfigSpec" + description: "The spec of the config to update. Currently, only the Labels field can be updated. All other fields must remain unchanged from the [ConfigInspect endpoint](#operation/ConfigInspect) response values." + - name: "version" + in: "query" + description: "The version number of the config object being updated. This is required to avoid conflicting writes." + type: "integer" + format: "int64" + required: true + tags: ["Config"] + /distribution/{name}/json: + get: + summary: "Get image information from the registry" + description: "Return image digest and platform information by contacting the registry." + operationId: "DistributionInspect" + produces: + - "application/json" + responses: + 200: + description: "descriptor and platform information" + schema: + type: "object" + x-go-name: DistributionInspect + required: [Descriptor, Platforms] + properties: + Descriptor: + type: "object" + description: "A descriptor struct containing digest, media type, and size" + properties: + MediaType: + type: "string" + Size: + type: "integer" + format: "int64" + Digest: + type: "string" + URLs: + type: "array" + items: + type: "string" + Platforms: + type: "array" + description: "An array containing all platforms supported by the image" + items: + type: "object" + properties: + Architecture: + type: "string" + OS: + type: "string" + OSVersion: + type: "string" + OSFeatures: + type: "array" + items: + type: "string" + Variant: + type: "string" + Features: + type: "array" + items: + type: "string" + examples: + application/json: + Descriptor: + MediaType: "application/vnd.docker.distribution.manifest.v2+json" + Digest: "sha256:c0537ff6a5218ef531ece93d4984efc99bbf3f7497c0a7726c88e2bb7584dc96" + Size: 3987495 + URLs: + - "" + Platforms: + - Architecture: "amd64" + OS: "linux" + OSVersion: "" + OSFeatures: + - "" + Variant: "" + Features: + - "" + 401: + description: "Failed authentication or no image found" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such image: someimage (tag: latest)" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "Image name or id" + type: "string" + required: true + tags: ["Distribution"] + /session: + post: + summary: "Initialize interactive session" + description: | + Start a new interactive session with a server. Session allows server to call back to the client for advanced capabilities. + + > **Note**: This endpoint is *experimental* and only available if the daemon is started with experimental + > features enabled. The specifications for this endpoint may still change in a future version of the API. + + ### Hijacking + + This endpoint hijacks the HTTP connection to HTTP2 transport that allows the client to expose gPRC services on that connection. + + For example, the client sends this request to upgrade the connection: + + ``` + POST /session HTTP/1.1 + Upgrade: h2c + Connection: Upgrade + ``` + + The Docker daemon will respond with a `101 UPGRADED` response follow with the raw stream: + + ``` + HTTP/1.1 101 UPGRADED + Connection: Upgrade + Upgrade: h2c + ``` + operationId: "Session" + produces: + - "application/vnd.docker.raw-stream" + responses: + 101: + description: "no error, hijacking successful" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Session (experimental)"] diff --git a/vendor/github.com/moby/moby/api/templates/server/operation.gotmpl b/vendor/github.com/moby/moby/api/templates/server/operation.gotmpl new file mode 100644 index 000000000..3ff63ef94 --- /dev/null +++ b/vendor/github.com/moby/moby/api/templates/server/operation.gotmpl @@ -0,0 +1,26 @@ +package {{ .Package }} + +// ---------------------------------------------------------------------------- +// DO NOT EDIT THIS FILE +// This file was generated by `swagger generate operation` +// +// See hack/generate-swagger-api.sh +// ---------------------------------------------------------------------------- + +import ( + "net/http" + + context "golang.org/x/net/context" + + {{ range .DefaultImports }}{{ printf "%q" . }} + {{ end }} + {{ range $key, $value := .Imports }}{{ $key }} {{ printf "%q" $value }} + {{ end }} +) + + +{{ range .ExtraSchemas }} +// {{ .Name }} {{ template "docstring" . }} +// swagger:model {{ .Name }} +{{ template "schema" . }} +{{ end }} diff --git a/vendor/github.com/moby/moby/api/types/auth.go b/vendor/github.com/moby/moby/api/types/auth.go new file mode 100644 index 000000000..056af6b84 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/auth.go @@ -0,0 +1,22 @@ +package types + +// AuthConfig contains authorization information for connecting to a Registry +type AuthConfig struct { + Username string `json:"username,omitempty"` + Password string `json:"password,omitempty"` + Auth string `json:"auth,omitempty"` + + // Email is an optional value associated with the username. + // This field is deprecated and will be removed in a later + // version of docker. + Email string `json:"email,omitempty"` + + ServerAddress string `json:"serveraddress,omitempty"` + + // IdentityToken is used to authenticate the user and get + // an access token for the registry. + IdentityToken string `json:"identitytoken,omitempty"` + + // RegistryToken is a bearer token to be sent to a registry + RegistryToken string `json:"registrytoken,omitempty"` +} diff --git a/vendor/github.com/moby/moby/api/types/backend/backend.go b/vendor/github.com/moby/moby/api/types/backend/backend.go new file mode 100644 index 000000000..74cea5003 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/backend/backend.go @@ -0,0 +1,106 @@ +// Package backend includes types to send information to server backends. +package backend + +import ( + "io" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" +) + +// ContainerAttachConfig holds the streams to use when connecting to a container to view logs. +type ContainerAttachConfig struct { + GetStreams func() (io.ReadCloser, io.Writer, io.Writer, error) + UseStdin bool + UseStdout bool + UseStderr bool + Logs bool + Stream bool + DetachKeys string + + // Used to signify that streams are multiplexed and therefore need a StdWriter to encode stdout/stderr messages accordingly. + // TODO @cpuguy83: This shouldn't be needed. It was only added so that http and websocket endpoints can use the same function, and the websocket function was not using a stdwriter prior to this change... + // HOWEVER, the websocket endpoint is using a single stream and SHOULD be encoded with stdout/stderr as is done for HTTP since it is still just a single stream. + // Since such a change is an API change unrelated to the current changeset we'll keep it as is here and change separately. + MuxStreams bool +} + +// LogMessage is datastructure that represents piece of output produced by some +// container. The Line member is a slice of an array whose contents can be +// changed after a log driver's Log() method returns. +// changes to this struct need to be reflect in the reset method in +// daemon/logger/logger.go +type LogMessage struct { + Line []byte + Source string + Timestamp time.Time + Attrs []LogAttr + Partial bool + + // Err is an error associated with a message. Completeness of a message + // with Err is not expected, tho it may be partially complete (fields may + // be missing, gibberish, or nil) + Err error +} + +// LogAttr is used to hold the extra attributes available in the log message. +type LogAttr struct { + Key string + Value string +} + +// LogSelector is a list of services and tasks that should be returned as part +// of a log stream. It is similar to swarmapi.LogSelector, with the difference +// that the names don't have to be resolved to IDs; this is mostly to avoid +// accidents later where a swarmapi LogSelector might have been incorrectly +// used verbatim (and to avoid the handler having to import swarmapi types) +type LogSelector struct { + Services []string + Tasks []string +} + +// ContainerStatsConfig holds information for configuring the runtime +// behavior of a backend.ContainerStats() call. +type ContainerStatsConfig struct { + Stream bool + OutStream io.Writer + Version string +} + +// ExecInspect holds information about a running process started +// with docker exec. +type ExecInspect struct { + ID string + Running bool + ExitCode *int + ProcessConfig *ExecProcessConfig + OpenStdin bool + OpenStderr bool + OpenStdout bool + CanRemove bool + ContainerID string + DetachKeys []byte + Pid int +} + +// ExecProcessConfig holds information about the exec process +// running on the host. +type ExecProcessConfig struct { + Tty bool `json:"tty"` + Entrypoint string `json:"entrypoint"` + Arguments []string `json:"arguments"` + Privileged *bool `json:"privileged,omitempty"` + User string `json:"user,omitempty"` +} + +// ContainerCommitConfig is a wrapper around +// types.ContainerCommitConfig that also +// transports configuration changes for a container. +type ContainerCommitConfig struct { + types.ContainerCommitConfig + Changes []string + // TODO: ContainerConfig is only used by the dockerfile Builder, so remove it + // once the Builder has been updated to use a different interface + ContainerConfig *container.Config +} diff --git a/vendor/github.com/moby/moby/api/types/backend/build.go b/vendor/github.com/moby/moby/api/types/backend/build.go new file mode 100644 index 000000000..300d35896 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/backend/build.go @@ -0,0 +1,44 @@ +package backend + +import ( + "io" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/pkg/streamformatter" +) + +// PullOption defines different modes for accessing images +type PullOption int + +const ( + // PullOptionNoPull only returns local images + PullOptionNoPull PullOption = iota + // PullOptionForcePull always tries to pull a ref from the registry first + PullOptionForcePull + // PullOptionPreferLocal uses local image if it exists, otherwise pulls + PullOptionPreferLocal +) + +// ProgressWriter is a data object to transport progress streams to the client +type ProgressWriter struct { + Output io.Writer + StdoutFormatter io.Writer + StderrFormatter io.Writer + AuxFormatter *streamformatter.AuxFormatter + ProgressReaderFunc func(io.ReadCloser) io.ReadCloser +} + +// BuildConfig is the configuration used by a BuildManager to start a build +type BuildConfig struct { + Source io.ReadCloser + ProgressWriter ProgressWriter + Options *types.ImageBuildOptions +} + +// GetImageAndLayerOptions are the options supported by GetImageAndReleasableLayer +type GetImageAndLayerOptions struct { + PullOption PullOption + AuthConfig map[string]types.AuthConfig + Output io.Writer + Platform string +} diff --git a/vendor/github.com/moby/moby/api/types/blkiodev/blkio.go b/vendor/github.com/moby/moby/api/types/blkiodev/blkio.go new file mode 100644 index 000000000..931ae10ab --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/blkiodev/blkio.go @@ -0,0 +1,23 @@ +package blkiodev + +import "fmt" + +// WeightDevice is a structure that holds device:weight pair +type WeightDevice struct { + Path string + Weight uint16 +} + +func (w *WeightDevice) String() string { + return fmt.Sprintf("%s:%d", w.Path, w.Weight) +} + +// ThrottleDevice is a structure that holds device:rate_per_second pair +type ThrottleDevice struct { + Path string + Rate uint64 +} + +func (t *ThrottleDevice) String() string { + return fmt.Sprintf("%s:%d", t.Path, t.Rate) +} diff --git a/vendor/github.com/moby/moby/api/types/client.go b/vendor/github.com/moby/moby/api/types/client.go new file mode 100644 index 000000000..18a1263f1 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/client.go @@ -0,0 +1,389 @@ +package types + +import ( + "bufio" + "io" + "net" + + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/filters" + units "github.com/docker/go-units" +) + +// CheckpointCreateOptions holds parameters to create a checkpoint from a container +type CheckpointCreateOptions struct { + CheckpointID string + CheckpointDir string + Exit bool +} + +// CheckpointListOptions holds parameters to list checkpoints for a container +type CheckpointListOptions struct { + CheckpointDir string +} + +// CheckpointDeleteOptions holds parameters to delete a checkpoint from a container +type CheckpointDeleteOptions struct { + CheckpointID string + CheckpointDir string +} + +// ContainerAttachOptions holds parameters to attach to a container. +type ContainerAttachOptions struct { + Stream bool + Stdin bool + Stdout bool + Stderr bool + DetachKeys string + Logs bool +} + +// ContainerCommitOptions holds parameters to commit changes into a container. +type ContainerCommitOptions struct { + Reference string + Comment string + Author string + Changes []string + Pause bool + Config *container.Config +} + +// ContainerExecInspect holds information returned by exec inspect. +type ContainerExecInspect struct { + ExecID string + ContainerID string + Running bool + ExitCode int + Pid int +} + +// ContainerListOptions holds parameters to list containers with. +type ContainerListOptions struct { + Quiet bool + Size bool + All bool + Latest bool + Since string + Before string + Limit int + Filters filters.Args +} + +// ContainerLogsOptions holds parameters to filter logs with. +type ContainerLogsOptions struct { + ShowStdout bool + ShowStderr bool + Since string + Timestamps bool + Follow bool + Tail string + Details bool +} + +// ContainerRemoveOptions holds parameters to remove containers. +type ContainerRemoveOptions struct { + RemoveVolumes bool + RemoveLinks bool + Force bool +} + +// ContainerStartOptions holds parameters to start containers. +type ContainerStartOptions struct { + CheckpointID string + CheckpointDir string +} + +// CopyToContainerOptions holds information +// about files to copy into a container +type CopyToContainerOptions struct { + AllowOverwriteDirWithFile bool + CopyUIDGID bool +} + +// EventsOptions holds parameters to filter events with. +type EventsOptions struct { + Since string + Until string + Filters filters.Args +} + +// NetworkListOptions holds parameters to filter the list of networks with. +type NetworkListOptions struct { + Filters filters.Args +} + +// HijackedResponse holds connection information for a hijacked request. +type HijackedResponse struct { + Conn net.Conn + Reader *bufio.Reader +} + +// Close closes the hijacked connection and reader. +func (h *HijackedResponse) Close() { + h.Conn.Close() +} + +// CloseWriter is an interface that implements structs +// that close input streams to prevent from writing. +type CloseWriter interface { + CloseWrite() error +} + +// CloseWrite closes a readWriter for writing. +func (h *HijackedResponse) CloseWrite() error { + if conn, ok := h.Conn.(CloseWriter); ok { + return conn.CloseWrite() + } + return nil +} + +// ImageBuildOptions holds the information +// necessary to build images. +type ImageBuildOptions struct { + Tags []string + SuppressOutput bool + RemoteContext string + NoCache bool + Remove bool + ForceRemove bool + PullParent bool + Isolation container.Isolation + CPUSetCPUs string + CPUSetMems string + CPUShares int64 + CPUQuota int64 + CPUPeriod int64 + Memory int64 + MemorySwap int64 + CgroupParent string + NetworkMode string + ShmSize int64 + Dockerfile string + Ulimits []*units.Ulimit + // BuildArgs needs to be a *string instead of just a string so that + // we can tell the difference between "" (empty string) and no value + // at all (nil). See the parsing of buildArgs in + // api/server/router/build/build_routes.go for even more info. + BuildArgs map[string]*string + AuthConfigs map[string]AuthConfig + Context io.Reader + Labels map[string]string + // squash the resulting image's layers to the parent + // preserves the original image and creates a new one from the parent with all + // the changes applied to a single layer + Squash bool + // CacheFrom specifies images that are used for matching cache. Images + // specified here do not need to have a valid parent chain to match cache. + CacheFrom []string + SecurityOpt []string + ExtraHosts []string // List of extra hosts + Target string + SessionID string + + // TODO @jhowardmsft LCOW Support: This will require extending to include + // `Platform string`, but is ommited for now as it's hard-coded temporarily + // to avoid API changes. +} + +// ImageBuildResponse holds information +// returned by a server after building +// an image. +type ImageBuildResponse struct { + Body io.ReadCloser + OSType string +} + +// ImageCreateOptions holds information to create images. +type ImageCreateOptions struct { + RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry +} + +// ImageImportSource holds source information for ImageImport +type ImageImportSource struct { + Source io.Reader // Source is the data to send to the server to create this image from. You must set SourceName to "-" to leverage this. + SourceName string // SourceName is the name of the image to pull. Set to "-" to leverage the Source attribute. +} + +// ImageImportOptions holds information to import images from the client host. +type ImageImportOptions struct { + Tag string // Tag is the name to tag this image with. This attribute is deprecated. + Message string // Message is the message to tag the image with + Changes []string // Changes are the raw changes to apply to this image +} + +// ImageListOptions holds parameters to filter the list of images with. +type ImageListOptions struct { + All bool + Filters filters.Args +} + +// ImageLoadResponse returns information to the client about a load process. +type ImageLoadResponse struct { + // Body must be closed to avoid a resource leak + Body io.ReadCloser + JSON bool +} + +// ImagePullOptions holds information to pull images. +type ImagePullOptions struct { + All bool + RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry + PrivilegeFunc RequestPrivilegeFunc +} + +// RequestPrivilegeFunc is a function interface that +// clients can supply to retry operations after +// getting an authorization error. +// This function returns the registry authentication +// header value in base 64 format, or an error +// if the privilege request fails. +type RequestPrivilegeFunc func() (string, error) + +//ImagePushOptions holds information to push images. +type ImagePushOptions ImagePullOptions + +// ImageRemoveOptions holds parameters to remove images. +type ImageRemoveOptions struct { + Force bool + PruneChildren bool +} + +// ImageSearchOptions holds parameters to search images with. +type ImageSearchOptions struct { + RegistryAuth string + PrivilegeFunc RequestPrivilegeFunc + Filters filters.Args + Limit int +} + +// ResizeOptions holds parameters to resize a tty. +// It can be used to resize container ttys and +// exec process ttys too. +type ResizeOptions struct { + Height uint + Width uint +} + +// NodeListOptions holds parameters to list nodes with. +type NodeListOptions struct { + Filters filters.Args +} + +// NodeRemoveOptions holds parameters to remove nodes with. +type NodeRemoveOptions struct { + Force bool +} + +// ServiceCreateOptions contains the options to use when creating a service. +type ServiceCreateOptions struct { + // EncodedRegistryAuth is the encoded registry authorization credentials to + // use when updating the service. + // + // This field follows the format of the X-Registry-Auth header. + EncodedRegistryAuth string + + // QueryRegistry indicates whether the service update requires + // contacting a registry. A registry may be contacted to retrieve + // the image digest and manifest, which in turn can be used to update + // platform or other information about the service. + QueryRegistry bool +} + +// ServiceCreateResponse contains the information returned to a client +// on the creation of a new service. +type ServiceCreateResponse struct { + // ID is the ID of the created service. + ID string + // Warnings is a set of non-fatal warning messages to pass on to the user. + Warnings []string `json:",omitempty"` +} + +// Values for RegistryAuthFrom in ServiceUpdateOptions +const ( + RegistryAuthFromSpec = "spec" + RegistryAuthFromPreviousSpec = "previous-spec" +) + +// ServiceUpdateOptions contains the options to be used for updating services. +type ServiceUpdateOptions struct { + // EncodedRegistryAuth is the encoded registry authorization credentials to + // use when updating the service. + // + // This field follows the format of the X-Registry-Auth header. + EncodedRegistryAuth string + + // TODO(stevvooe): Consider moving the version parameter of ServiceUpdate + // into this field. While it does open API users up to racy writes, most + // users may not need that level of consistency in practice. + + // RegistryAuthFrom specifies where to find the registry authorization + // credentials if they are not given in EncodedRegistryAuth. Valid + // values are "spec" and "previous-spec". + RegistryAuthFrom string + + // Rollback indicates whether a server-side rollback should be + // performed. When this is set, the provided spec will be ignored. + // The valid values are "previous" and "none". An empty value is the + // same as "none". + Rollback string + + // QueryRegistry indicates whether the service update requires + // contacting a registry. A registry may be contacted to retrieve + // the image digest and manifest, which in turn can be used to update + // platform or other information about the service. + QueryRegistry bool +} + +// ServiceListOptions holds parameters to list services with. +type ServiceListOptions struct { + Filters filters.Args +} + +// ServiceInspectOptions holds parameters related to the "service inspect" +// operation. +type ServiceInspectOptions struct { + InsertDefaults bool +} + +// TaskListOptions holds parameters to list tasks with. +type TaskListOptions struct { + Filters filters.Args +} + +// PluginRemoveOptions holds parameters to remove plugins. +type PluginRemoveOptions struct { + Force bool +} + +// PluginEnableOptions holds parameters to enable plugins. +type PluginEnableOptions struct { + Timeout int +} + +// PluginDisableOptions holds parameters to disable plugins. +type PluginDisableOptions struct { + Force bool +} + +// PluginInstallOptions holds parameters to install a plugin. +type PluginInstallOptions struct { + Disabled bool + AcceptAllPermissions bool + RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry + RemoteRef string // RemoteRef is the plugin name on the registry + PrivilegeFunc RequestPrivilegeFunc + AcceptPermissionsFunc func(PluginPrivileges) (bool, error) + Args []string +} + +// SwarmUnlockKeyResponse contains the response for Engine API: +// GET /swarm/unlockkey +type SwarmUnlockKeyResponse struct { + // UnlockKey is the unlock key in ASCII-armored format. + UnlockKey string +} + +// PluginCreateOptions hold all options to plugin create. +type PluginCreateOptions struct { + RepoName string +} diff --git a/vendor/github.com/moby/moby/api/types/configs.go b/vendor/github.com/moby/moby/api/types/configs.go new file mode 100644 index 000000000..e4d2ce6e3 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/configs.go @@ -0,0 +1,70 @@ +package types + +import ( + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/network" +) + +// configs holds structs used for internal communication between the +// frontend (such as an http server) and the backend (such as the +// docker daemon). + +// ContainerCreateConfig is the parameter set to ContainerCreate() +type ContainerCreateConfig struct { + Name string + Config *container.Config + HostConfig *container.HostConfig + NetworkingConfig *network.NetworkingConfig + AdjustCPUShares bool + Platform string +} + +// ContainerRmConfig holds arguments for the container remove +// operation. This struct is used to tell the backend what operations +// to perform. +type ContainerRmConfig struct { + ForceRemove, RemoveVolume, RemoveLink bool +} + +// ContainerCommitConfig contains build configs for commit operation, +// and is used when making a commit with the current state of the container. +type ContainerCommitConfig struct { + Pause bool + Repo string + Tag string + Author string + Comment string + // merge container config into commit config before commit + MergeConfigs bool + Config *container.Config +} + +// ExecConfig is a small subset of the Config struct that holds the configuration +// for the exec feature of docker. +type ExecConfig struct { + User string // User that will run the command + Privileged bool // Is the container in privileged mode + Tty bool // Attach standard streams to a tty. + AttachStdin bool // Attach the standard input, makes possible user interaction + AttachStderr bool // Attach the standard error + AttachStdout bool // Attach the standard output + Detach bool // Execute in detach mode + DetachKeys string // Escape keys for detach + Env []string // Environment variables + Cmd []string // Execution commands and args +} + +// PluginRmConfig holds arguments for plugin remove. +type PluginRmConfig struct { + ForceRemove bool +} + +// PluginEnableConfig holds arguments for plugin enable +type PluginEnableConfig struct { + Timeout int +} + +// PluginDisableConfig holds arguments for plugin disable. +type PluginDisableConfig struct { + ForceDisable bool +} diff --git a/vendor/github.com/moby/moby/api/types/container/config.go b/vendor/github.com/moby/moby/api/types/container/config.go new file mode 100644 index 000000000..55a03fc98 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/container/config.go @@ -0,0 +1,69 @@ +package container + +import ( + "time" + + "github.com/docker/docker/api/types/strslice" + "github.com/docker/go-connections/nat" +) + +// MinimumDuration puts a minimum on user configured duration. +// This is to prevent API error on time unit. For example, API may +// set 3 as healthcheck interval with intention of 3 seconds, but +// Docker interprets it as 3 nanoseconds. +const MinimumDuration = 1 * time.Millisecond + +// HealthConfig holds configuration settings for the HEALTHCHECK feature. +type HealthConfig struct { + // Test is the test to perform to check that the container is healthy. + // An empty slice means to inherit the default. + // The options are: + // {} : inherit healthcheck + // {"NONE"} : disable healthcheck + // {"CMD", args...} : exec arguments directly + // {"CMD-SHELL", command} : run command with system's default shell + Test []string `json:",omitempty"` + + // Zero means to inherit. Durations are expressed as integer nanoseconds. + Interval time.Duration `json:",omitempty"` // Interval is the time to wait between checks. + Timeout time.Duration `json:",omitempty"` // Timeout is the time to wait before considering the check to have hung. + StartPeriod time.Duration `json:",omitempty"` // The start period for the container to initialize before the retries starts to count down. + + // Retries is the number of consecutive failures needed to consider a container as unhealthy. + // Zero means inherit. + Retries int `json:",omitempty"` +} + +// Config contains the configuration data about a container. +// It should hold only portable information about the container. +// Here, "portable" means "independent from the host we are running on". +// Non-portable information *should* appear in HostConfig. +// All fields added to this struct must be marked `omitempty` to keep getting +// predictable hashes from the old `v1Compatibility` configuration. +type Config struct { + Hostname string // Hostname + Domainname string // Domainname + User string // User that will run the command(s) inside the container, also support user:group + AttachStdin bool // Attach the standard input, makes possible user interaction + AttachStdout bool // Attach the standard output + AttachStderr bool // Attach the standard error + ExposedPorts nat.PortSet `json:",omitempty"` // List of exposed ports + Tty bool // Attach standard streams to a tty, including stdin if it is not closed. + OpenStdin bool // Open stdin + StdinOnce bool // If true, close stdin after the 1 attached client disconnects. + Env []string // List of environment variable to set in the container + Cmd strslice.StrSlice // Command to run when starting the container + Healthcheck *HealthConfig `json:",omitempty"` // Healthcheck describes how to check the container is healthy + ArgsEscaped bool `json:",omitempty"` // True if command is already escaped (Windows specific) + Image string // Name of the image as it was passed by the operator (e.g. could be symbolic) + Volumes map[string]struct{} // List of volumes (mounts) used for the container + WorkingDir string // Current directory (PWD) in the command will be launched + Entrypoint strslice.StrSlice // Entrypoint to run when starting the container + NetworkDisabled bool `json:",omitempty"` // Is network disabled + MacAddress string `json:",omitempty"` // Mac Address of the container + OnBuild []string // ONBUILD metadata that were defined on the image Dockerfile + Labels map[string]string // List of labels set to this container + StopSignal string `json:",omitempty"` // Signal to stop a container + StopTimeout *int `json:",omitempty"` // Timeout (in seconds) to stop a container + Shell strslice.StrSlice `json:",omitempty"` // Shell for shell-form of RUN, CMD, ENTRYPOINT +} diff --git a/vendor/github.com/moby/moby/api/types/container/container_changes.go b/vendor/github.com/moby/moby/api/types/container/container_changes.go new file mode 100644 index 000000000..767945a53 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/container/container_changes.go @@ -0,0 +1,21 @@ +package container + +// ---------------------------------------------------------------------------- +// DO NOT EDIT THIS FILE +// This file was generated by `swagger generate operation` +// +// See hack/generate-swagger-api.sh +// ---------------------------------------------------------------------------- + +// ContainerChangeResponseItem container change response item +// swagger:model ContainerChangeResponseItem +type ContainerChangeResponseItem struct { + + // Kind of change + // Required: true + Kind uint8 `json:"Kind"` + + // Path to file that has changed + // Required: true + Path string `json:"Path"` +} diff --git a/vendor/github.com/moby/moby/api/types/container/container_create.go b/vendor/github.com/moby/moby/api/types/container/container_create.go new file mode 100644 index 000000000..c95023b81 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/container/container_create.go @@ -0,0 +1,21 @@ +package container + +// ---------------------------------------------------------------------------- +// DO NOT EDIT THIS FILE +// This file was generated by `swagger generate operation` +// +// See hack/generate-swagger-api.sh +// ---------------------------------------------------------------------------- + +// ContainerCreateCreatedBody container create created body +// swagger:model ContainerCreateCreatedBody +type ContainerCreateCreatedBody struct { + + // The ID of the created container + // Required: true + ID string `json:"Id"` + + // Warnings encountered when creating the container + // Required: true + Warnings []string `json:"Warnings"` +} diff --git a/vendor/github.com/moby/moby/api/types/container/container_top.go b/vendor/github.com/moby/moby/api/types/container/container_top.go new file mode 100644 index 000000000..78bc37ee5 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/container/container_top.go @@ -0,0 +1,21 @@ +package container + +// ---------------------------------------------------------------------------- +// DO NOT EDIT THIS FILE +// This file was generated by `swagger generate operation` +// +// See hack/generate-swagger-api.sh +// ---------------------------------------------------------------------------- + +// ContainerTopOKBody container top o k body +// swagger:model ContainerTopOKBody +type ContainerTopOKBody struct { + + // Each process running in the container, where each is process is an array of values corresponding to the titles + // Required: true + Processes [][]string `json:"Processes"` + + // The ps column titles + // Required: true + Titles []string `json:"Titles"` +} diff --git a/vendor/github.com/moby/moby/api/types/container/container_update.go b/vendor/github.com/moby/moby/api/types/container/container_update.go new file mode 100644 index 000000000..2339366fb --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/container/container_update.go @@ -0,0 +1,17 @@ +package container + +// ---------------------------------------------------------------------------- +// DO NOT EDIT THIS FILE +// This file was generated by `swagger generate operation` +// +// See hack/generate-swagger-api.sh +// ---------------------------------------------------------------------------- + +// ContainerUpdateOKBody container update o k body +// swagger:model ContainerUpdateOKBody +type ContainerUpdateOKBody struct { + + // warnings + // Required: true + Warnings []string `json:"Warnings"` +} diff --git a/vendor/github.com/moby/moby/api/types/container/container_wait.go b/vendor/github.com/moby/moby/api/types/container/container_wait.go new file mode 100644 index 000000000..77ecdbaf7 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/container/container_wait.go @@ -0,0 +1,17 @@ +package container + +// ---------------------------------------------------------------------------- +// DO NOT EDIT THIS FILE +// This file was generated by `swagger generate operation` +// +// See hack/generate-swagger-api.sh +// ---------------------------------------------------------------------------- + +// ContainerWaitOKBody container wait o k body +// swagger:model ContainerWaitOKBody +type ContainerWaitOKBody struct { + + // Exit code of the container + // Required: true + StatusCode int64 `json:"StatusCode"` +} diff --git a/vendor/github.com/moby/moby/api/types/container/host_config.go b/vendor/github.com/moby/moby/api/types/container/host_config.go new file mode 100644 index 000000000..9fea9eb04 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/container/host_config.go @@ -0,0 +1,380 @@ +package container + +import ( + "strings" + + "github.com/docker/docker/api/types/blkiodev" + "github.com/docker/docker/api/types/mount" + "github.com/docker/docker/api/types/strslice" + "github.com/docker/go-connections/nat" + "github.com/docker/go-units" +) + +// Isolation represents the isolation technology of a container. The supported +// values are platform specific +type Isolation string + +// IsDefault indicates the default isolation technology of a container. On Linux this +// is the native driver. On Windows, this is a Windows Server Container. +func (i Isolation) IsDefault() bool { + return strings.ToLower(string(i)) == "default" || string(i) == "" +} + +// IpcMode represents the container ipc stack. +type IpcMode string + +// IsPrivate indicates whether the container uses its private ipc stack. +func (n IpcMode) IsPrivate() bool { + return !(n.IsHost() || n.IsContainer()) +} + +// IsHost indicates whether the container uses the host's ipc stack. +func (n IpcMode) IsHost() bool { + return n == "host" +} + +// IsContainer indicates whether the container uses a container's ipc stack. +func (n IpcMode) IsContainer() bool { + parts := strings.SplitN(string(n), ":", 2) + return len(parts) > 1 && parts[0] == "container" +} + +// Valid indicates whether the ipc stack is valid. +func (n IpcMode) Valid() bool { + parts := strings.Split(string(n), ":") + switch mode := parts[0]; mode { + case "", "host": + case "container": + if len(parts) != 2 || parts[1] == "" { + return false + } + default: + return false + } + return true +} + +// Container returns the name of the container ipc stack is going to be used. +func (n IpcMode) Container() string { + parts := strings.SplitN(string(n), ":", 2) + if len(parts) > 1 { + return parts[1] + } + return "" +} + +// NetworkMode represents the container network stack. +type NetworkMode string + +// IsNone indicates whether container isn't using a network stack. +func (n NetworkMode) IsNone() bool { + return n == "none" +} + +// IsDefault indicates whether container uses the default network stack. +func (n NetworkMode) IsDefault() bool { + return n == "default" +} + +// IsPrivate indicates whether container uses its private network stack. +func (n NetworkMode) IsPrivate() bool { + return !(n.IsHost() || n.IsContainer()) +} + +// IsContainer indicates whether container uses a container network stack. +func (n NetworkMode) IsContainer() bool { + parts := strings.SplitN(string(n), ":", 2) + return len(parts) > 1 && parts[0] == "container" +} + +// ConnectedContainer is the id of the container which network this container is connected to. +func (n NetworkMode) ConnectedContainer() string { + parts := strings.SplitN(string(n), ":", 2) + if len(parts) > 1 { + return parts[1] + } + return "" +} + +//UserDefined indicates user-created network +func (n NetworkMode) UserDefined() string { + if n.IsUserDefined() { + return string(n) + } + return "" +} + +// UsernsMode represents userns mode in the container. +type UsernsMode string + +// IsHost indicates whether the container uses the host's userns. +func (n UsernsMode) IsHost() bool { + return n == "host" +} + +// IsPrivate indicates whether the container uses the a private userns. +func (n UsernsMode) IsPrivate() bool { + return !(n.IsHost()) +} + +// Valid indicates whether the userns is valid. +func (n UsernsMode) Valid() bool { + parts := strings.Split(string(n), ":") + switch mode := parts[0]; mode { + case "", "host": + default: + return false + } + return true +} + +// CgroupSpec represents the cgroup to use for the container. +type CgroupSpec string + +// IsContainer indicates whether the container is using another container cgroup +func (c CgroupSpec) IsContainer() bool { + parts := strings.SplitN(string(c), ":", 2) + return len(parts) > 1 && parts[0] == "container" +} + +// Valid indicates whether the cgroup spec is valid. +func (c CgroupSpec) Valid() bool { + return c.IsContainer() || c == "" +} + +// Container returns the name of the container whose cgroup will be used. +func (c CgroupSpec) Container() string { + parts := strings.SplitN(string(c), ":", 2) + if len(parts) > 1 { + return parts[1] + } + return "" +} + +// UTSMode represents the UTS namespace of the container. +type UTSMode string + +// IsPrivate indicates whether the container uses its private UTS namespace. +func (n UTSMode) IsPrivate() bool { + return !(n.IsHost()) +} + +// IsHost indicates whether the container uses the host's UTS namespace. +func (n UTSMode) IsHost() bool { + return n == "host" +} + +// Valid indicates whether the UTS namespace is valid. +func (n UTSMode) Valid() bool { + parts := strings.Split(string(n), ":") + switch mode := parts[0]; mode { + case "", "host": + default: + return false + } + return true +} + +// PidMode represents the pid namespace of the container. +type PidMode string + +// IsPrivate indicates whether the container uses its own new pid namespace. +func (n PidMode) IsPrivate() bool { + return !(n.IsHost() || n.IsContainer()) +} + +// IsHost indicates whether the container uses the host's pid namespace. +func (n PidMode) IsHost() bool { + return n == "host" +} + +// IsContainer indicates whether the container uses a container's pid namespace. +func (n PidMode) IsContainer() bool { + parts := strings.SplitN(string(n), ":", 2) + return len(parts) > 1 && parts[0] == "container" +} + +// Valid indicates whether the pid namespace is valid. +func (n PidMode) Valid() bool { + parts := strings.Split(string(n), ":") + switch mode := parts[0]; mode { + case "", "host": + case "container": + if len(parts) != 2 || parts[1] == "" { + return false + } + default: + return false + } + return true +} + +// Container returns the name of the container whose pid namespace is going to be used. +func (n PidMode) Container() string { + parts := strings.SplitN(string(n), ":", 2) + if len(parts) > 1 { + return parts[1] + } + return "" +} + +// DeviceMapping represents the device mapping between the host and the container. +type DeviceMapping struct { + PathOnHost string + PathInContainer string + CgroupPermissions string +} + +// RestartPolicy represents the restart policies of the container. +type RestartPolicy struct { + Name string + MaximumRetryCount int +} + +// IsNone indicates whether the container has the "no" restart policy. +// This means the container will not automatically restart when exiting. +func (rp *RestartPolicy) IsNone() bool { + return rp.Name == "no" || rp.Name == "" +} + +// IsAlways indicates whether the container has the "always" restart policy. +// This means the container will automatically restart regardless of the exit status. +func (rp *RestartPolicy) IsAlways() bool { + return rp.Name == "always" +} + +// IsOnFailure indicates whether the container has the "on-failure" restart policy. +// This means the container will automatically restart of exiting with a non-zero exit status. +func (rp *RestartPolicy) IsOnFailure() bool { + return rp.Name == "on-failure" +} + +// IsUnlessStopped indicates whether the container has the +// "unless-stopped" restart policy. This means the container will +// automatically restart unless user has put it to stopped state. +func (rp *RestartPolicy) IsUnlessStopped() bool { + return rp.Name == "unless-stopped" +} + +// IsSame compares two RestartPolicy to see if they are the same +func (rp *RestartPolicy) IsSame(tp *RestartPolicy) bool { + return rp.Name == tp.Name && rp.MaximumRetryCount == tp.MaximumRetryCount +} + +// LogMode is a type to define the available modes for logging +// These modes affect how logs are handled when log messages start piling up. +type LogMode string + +// Available logging modes +const ( + LogModeUnset = "" + LogModeBlocking LogMode = "blocking" + LogModeNonBlock LogMode = "non-blocking" +) + +// LogConfig represents the logging configuration of the container. +type LogConfig struct { + Type string + Config map[string]string +} + +// Resources contains container's resources (cgroups config, ulimits...) +type Resources struct { + // Applicable to all platforms + CPUShares int64 `json:"CpuShares"` // CPU shares (relative weight vs. other containers) + Memory int64 // Memory limit (in bytes) + NanoCPUs int64 `json:"NanoCpus"` // CPU quota in units of 10-9 CPUs. + + // Applicable to UNIX platforms + CgroupParent string // Parent cgroup. + BlkioWeight uint16 // Block IO weight (relative weight vs. other containers) + BlkioWeightDevice []*blkiodev.WeightDevice + BlkioDeviceReadBps []*blkiodev.ThrottleDevice + BlkioDeviceWriteBps []*blkiodev.ThrottleDevice + BlkioDeviceReadIOps []*blkiodev.ThrottleDevice + BlkioDeviceWriteIOps []*blkiodev.ThrottleDevice + CPUPeriod int64 `json:"CpuPeriod"` // CPU CFS (Completely Fair Scheduler) period + CPUQuota int64 `json:"CpuQuota"` // CPU CFS (Completely Fair Scheduler) quota + CPURealtimePeriod int64 `json:"CpuRealtimePeriod"` // CPU real-time period + CPURealtimeRuntime int64 `json:"CpuRealtimeRuntime"` // CPU real-time runtime + CpusetCpus string // CpusetCpus 0-2, 0,1 + CpusetMems string // CpusetMems 0-2, 0,1 + Devices []DeviceMapping // List of devices to map inside the container + DeviceCgroupRules []string // List of rule to be added to the device cgroup + DiskQuota int64 // Disk limit (in bytes) + KernelMemory int64 // Kernel memory limit (in bytes) + MemoryReservation int64 // Memory soft limit (in bytes) + MemorySwap int64 // Total memory usage (memory + swap); set `-1` to enable unlimited swap + MemorySwappiness *int64 // Tuning container memory swappiness behaviour + OomKillDisable *bool // Whether to disable OOM Killer or not + PidsLimit int64 // Setting pids limit for a container + Ulimits []*units.Ulimit // List of ulimits to be set in the container + + // Applicable to Windows + CPUCount int64 `json:"CpuCount"` // CPU count + CPUPercent int64 `json:"CpuPercent"` // CPU percent + IOMaximumIOps uint64 // Maximum IOps for the container system drive + IOMaximumBandwidth uint64 // Maximum IO in bytes per second for the container system drive +} + +// UpdateConfig holds the mutable attributes of a Container. +// Those attributes can be updated at runtime. +type UpdateConfig struct { + // Contains container's resources (cgroups, ulimits) + Resources + RestartPolicy RestartPolicy +} + +// HostConfig the non-portable Config structure of a container. +// Here, "non-portable" means "dependent of the host we are running on". +// Portable information *should* appear in Config. +type HostConfig struct { + // Applicable to all platforms + Binds []string // List of volume bindings for this container + ContainerIDFile string // File (path) where the containerId is written + LogConfig LogConfig // Configuration of the logs for this container + NetworkMode NetworkMode // Network mode to use for the container + PortBindings nat.PortMap // Port mapping between the exposed port (container) and the host + RestartPolicy RestartPolicy // Restart policy to be used for the container + AutoRemove bool // Automatically remove container when it exits + VolumeDriver string // Name of the volume driver used to mount volumes + VolumesFrom []string // List of volumes to take from other container + + // Applicable to UNIX platforms + CapAdd strslice.StrSlice // List of kernel capabilities to add to the container + CapDrop strslice.StrSlice // List of kernel capabilities to remove from the container + DNS []string `json:"Dns"` // List of DNS server to lookup + DNSOptions []string `json:"DnsOptions"` // List of DNSOption to look for + DNSSearch []string `json:"DnsSearch"` // List of DNSSearch to look for + ExtraHosts []string // List of extra hosts + GroupAdd []string // List of additional groups that the container process will run as + IpcMode IpcMode // IPC namespace to use for the container + Cgroup CgroupSpec // Cgroup to use for the container + Links []string // List of links (in the name:alias form) + OomScoreAdj int // Container preference for OOM-killing + PidMode PidMode // PID namespace to use for the container + Privileged bool // Is the container in privileged mode + PublishAllPorts bool // Should docker publish all exposed port for the container + ReadonlyRootfs bool // Is the container root filesystem in read-only + SecurityOpt []string // List of string values to customize labels for MLS systems, such as SELinux. + StorageOpt map[string]string `json:",omitempty"` // Storage driver options per container. + Tmpfs map[string]string `json:",omitempty"` // List of tmpfs (mounts) used for the container + UTSMode UTSMode // UTS namespace to use for the container + UsernsMode UsernsMode // The user namespace to use for the container + ShmSize int64 // Total shm memory usage + Sysctls map[string]string `json:",omitempty"` // List of Namespaced sysctls used for the container + Runtime string `json:",omitempty"` // Runtime to use with this container + + // Applicable to Windows + ConsoleSize [2]uint // Initial console size (height,width) + Isolation Isolation // Isolation technology of the container (e.g. default, hyperv) + + // Contains container's resources (cgroups, ulimits) + Resources + + // Mounts specs used by the container + Mounts []mount.Mount `json:",omitempty"` + + // Run a custom init inside the container, if null, use the daemon's configured settings + Init *bool `json:",omitempty"` +} diff --git a/vendor/github.com/moby/moby/api/types/container/hostconfig_unix.go b/vendor/github.com/moby/moby/api/types/container/hostconfig_unix.go new file mode 100644 index 000000000..2d664d1c9 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/container/hostconfig_unix.go @@ -0,0 +1,41 @@ +// +build !windows + +package container + +// IsValid indicates if an isolation technology is valid +func (i Isolation) IsValid() bool { + return i.IsDefault() +} + +// NetworkName returns the name of the network stack. +func (n NetworkMode) NetworkName() string { + if n.IsBridge() { + return "bridge" + } else if n.IsHost() { + return "host" + } else if n.IsContainer() { + return "container" + } else if n.IsNone() { + return "none" + } else if n.IsDefault() { + return "default" + } else if n.IsUserDefined() { + return n.UserDefined() + } + return "" +} + +// IsBridge indicates whether container uses the bridge network stack +func (n NetworkMode) IsBridge() bool { + return n == "bridge" +} + +// IsHost indicates whether container uses the host network stack. +func (n NetworkMode) IsHost() bool { + return n == "host" +} + +// IsUserDefined indicates user-created network +func (n NetworkMode) IsUserDefined() bool { + return !n.IsDefault() && !n.IsBridge() && !n.IsHost() && !n.IsNone() && !n.IsContainer() +} diff --git a/vendor/github.com/moby/moby/api/types/container/hostconfig_windows.go b/vendor/github.com/moby/moby/api/types/container/hostconfig_windows.go new file mode 100644 index 000000000..469923f7e --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/container/hostconfig_windows.go @@ -0,0 +1,54 @@ +package container + +import ( + "strings" +) + +// IsBridge indicates whether container uses the bridge network stack +// in windows it is given the name NAT +func (n NetworkMode) IsBridge() bool { + return n == "nat" +} + +// IsHost indicates whether container uses the host network stack. +// returns false as this is not supported by windows +func (n NetworkMode) IsHost() bool { + return false +} + +// IsUserDefined indicates user-created network +func (n NetworkMode) IsUserDefined() bool { + return !n.IsDefault() && !n.IsNone() && !n.IsBridge() && !n.IsContainer() +} + +// IsHyperV indicates the use of a Hyper-V partition for isolation +func (i Isolation) IsHyperV() bool { + return strings.ToLower(string(i)) == "hyperv" +} + +// IsProcess indicates the use of process isolation +func (i Isolation) IsProcess() bool { + return strings.ToLower(string(i)) == "process" +} + +// IsValid indicates if an isolation technology is valid +func (i Isolation) IsValid() bool { + return i.IsDefault() || i.IsHyperV() || i.IsProcess() +} + +// NetworkName returns the name of the network stack. +func (n NetworkMode) NetworkName() string { + if n.IsDefault() { + return "default" + } else if n.IsBridge() { + return "nat" + } else if n.IsNone() { + return "none" + } else if n.IsContainer() { + return "container" + } else if n.IsUserDefined() { + return n.UserDefined() + } + + return "" +} diff --git a/vendor/github.com/moby/moby/api/types/container/waitcondition.go b/vendor/github.com/moby/moby/api/types/container/waitcondition.go new file mode 100644 index 000000000..64820fe35 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/container/waitcondition.go @@ -0,0 +1,22 @@ +package container + +// WaitCondition is a type used to specify a container state for which +// to wait. +type WaitCondition string + +// Possible WaitCondition Values. +// +// WaitConditionNotRunning (default) is used to wait for any of the non-running +// states: "created", "exited", "dead", "removing", or "removed". +// +// WaitConditionNextExit is used to wait for the next time the state changes +// to a non-running state. If the state is currently "created" or "exited", +// this would cause Wait() to block until either the container runs and exits +// or is removed. +// +// WaitConditionRemoved is used to wait for the container to be removed. +const ( + WaitConditionNotRunning WaitCondition = "not-running" + WaitConditionNextExit WaitCondition = "next-exit" + WaitConditionRemoved WaitCondition = "removed" +) diff --git a/vendor/github.com/moby/moby/api/types/error_response.go b/vendor/github.com/moby/moby/api/types/error_response.go new file mode 100644 index 000000000..dc942d9d9 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/error_response.go @@ -0,0 +1,13 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// ErrorResponse Represents an error. +// swagger:model ErrorResponse +type ErrorResponse struct { + + // The error message. + // Required: true + Message string `json:"message"` +} diff --git a/vendor/github.com/moby/moby/api/types/events/events.go b/vendor/github.com/moby/moby/api/types/events/events.go new file mode 100644 index 000000000..e292565b6 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/events/events.go @@ -0,0 +1,52 @@ +package events + +const ( + // ContainerEventType is the event type that containers generate + ContainerEventType = "container" + // DaemonEventType is the event type that daemon generate + DaemonEventType = "daemon" + // ImageEventType is the event type that images generate + ImageEventType = "image" + // NetworkEventType is the event type that networks generate + NetworkEventType = "network" + // PluginEventType is the event type that plugins generate + PluginEventType = "plugin" + // VolumeEventType is the event type that volumes generate + VolumeEventType = "volume" + // ServiceEventType is the event type that services generate + ServiceEventType = "service" + // NodeEventType is the event type that nodes generate + NodeEventType = "node" + // SecretEventType is the event type that secrets generate + SecretEventType = "secret" + // ConfigEventType is the event type that configs generate + ConfigEventType = "config" +) + +// Actor describes something that generates events, +// like a container, or a network, or a volume. +// It has a defined name and a set or attributes. +// The container attributes are its labels, other actors +// can generate these attributes from other properties. +type Actor struct { + ID string + Attributes map[string]string +} + +// Message represents the information an event contains +type Message struct { + // Deprecated information from JSONMessage. + // With data only in container events. + Status string `json:"status,omitempty"` + ID string `json:"id,omitempty"` + From string `json:"from,omitempty"` + + Type string + Action string + Actor Actor + // Engine events are local scope. Cluster events are swarm scope. + Scope string `json:"scope,omitempty"` + + Time int64 `json:"time,omitempty"` + TimeNano int64 `json:"timeNano,omitempty"` +} diff --git a/vendor/github.com/moby/moby/api/types/filters/parse.go b/vendor/github.com/moby/moby/api/types/filters/parse.go new file mode 100644 index 000000000..beec3d494 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/filters/parse.go @@ -0,0 +1,310 @@ +// Package filters provides helper function to parse and handle command line +// filter, used for example in docker ps or docker images commands. +package filters + +import ( + "encoding/json" + "errors" + "fmt" + "regexp" + "strings" + + "github.com/docker/docker/api/types/versions" +) + +// Args stores filter arguments as map key:{map key: bool}. +// It contains an aggregation of the map of arguments (which are in the form +// of -f 'key=value') based on the key, and stores values for the same key +// in a map with string keys and boolean values. +// e.g given -f 'label=label1=1' -f 'label=label2=2' -f 'image.name=ubuntu' +// the args will be {"image.name":{"ubuntu":true},"label":{"label1=1":true,"label2=2":true}} +type Args struct { + fields map[string]map[string]bool +} + +// NewArgs initializes a new Args struct. +func NewArgs() Args { + return Args{fields: map[string]map[string]bool{}} +} + +// ParseFlag parses the argument to the filter flag. Like +// +// `docker ps -f 'created=today' -f 'image.name=ubuntu*'` +// +// If prev map is provided, then it is appended to, and returned. By default a new +// map is created. +func ParseFlag(arg string, prev Args) (Args, error) { + filters := prev + if len(arg) == 0 { + return filters, nil + } + + if !strings.Contains(arg, "=") { + return filters, ErrBadFormat + } + + f := strings.SplitN(arg, "=", 2) + + name := strings.ToLower(strings.TrimSpace(f[0])) + value := strings.TrimSpace(f[1]) + + filters.Add(name, value) + + return filters, nil +} + +// ErrBadFormat is an error returned in case of bad format for a filter. +var ErrBadFormat = errors.New("bad format of filter (expected name=value)") + +// ToParam packs the Args into a string for easy transport from client to server. +func ToParam(a Args) (string, error) { + // this way we don't URL encode {}, just empty space + if a.Len() == 0 { + return "", nil + } + + buf, err := json.Marshal(a.fields) + if err != nil { + return "", err + } + return string(buf), nil +} + +// ToParamWithVersion packs the Args into a string for easy transport from client to server. +// The generated string will depend on the specified version (corresponding to the API version). +func ToParamWithVersion(version string, a Args) (string, error) { + // this way we don't URL encode {}, just empty space + if a.Len() == 0 { + return "", nil + } + + // for daemons older than v1.10, filter must be of the form map[string][]string + var buf []byte + var err error + if version != "" && versions.LessThan(version, "1.22") { + buf, err = json.Marshal(convertArgsToSlice(a.fields)) + } else { + buf, err = json.Marshal(a.fields) + } + if err != nil { + return "", err + } + return string(buf), nil +} + +// FromParam unpacks the filter Args. +func FromParam(p string) (Args, error) { + if len(p) == 0 { + return NewArgs(), nil + } + + r := strings.NewReader(p) + d := json.NewDecoder(r) + + m := map[string]map[string]bool{} + if err := d.Decode(&m); err != nil { + r.Seek(0, 0) + + // Allow parsing old arguments in slice format. + // Because other libraries might be sending them in this format. + deprecated := map[string][]string{} + if deprecatedErr := d.Decode(&deprecated); deprecatedErr == nil { + m = deprecatedArgs(deprecated) + } else { + return NewArgs(), err + } + } + return Args{m}, nil +} + +// Get returns the list of values associates with a field. +// It returns a slice of strings to keep backwards compatibility with old code. +func (filters Args) Get(field string) []string { + values := filters.fields[field] + if values == nil { + return make([]string, 0) + } + slice := make([]string, 0, len(values)) + for key := range values { + slice = append(slice, key) + } + return slice +} + +// Add adds a new value to a filter field. +func (filters Args) Add(name, value string) { + if _, ok := filters.fields[name]; ok { + filters.fields[name][value] = true + } else { + filters.fields[name] = map[string]bool{value: true} + } +} + +// Del removes a value from a filter field. +func (filters Args) Del(name, value string) { + if _, ok := filters.fields[name]; ok { + delete(filters.fields[name], value) + if len(filters.fields[name]) == 0 { + delete(filters.fields, name) + } + } +} + +// Len returns the number of fields in the arguments. +func (filters Args) Len() int { + return len(filters.fields) +} + +// MatchKVList returns true if the values for the specified field matches the ones +// from the sources. +// e.g. given Args are {'label': {'label1=1','label2=1'}, 'image.name', {'ubuntu'}}, +// field is 'label' and sources are {'label1': '1', 'label2': '2'} +// it returns true. +func (filters Args) MatchKVList(field string, sources map[string]string) bool { + fieldValues := filters.fields[field] + + //do not filter if there is no filter set or cannot determine filter + if len(fieldValues) == 0 { + return true + } + + if len(sources) == 0 { + return false + } + + for name2match := range fieldValues { + testKV := strings.SplitN(name2match, "=", 2) + + v, ok := sources[testKV[0]] + if !ok { + return false + } + if len(testKV) == 2 && testKV[1] != v { + return false + } + } + + return true +} + +// Match returns true if the values for the specified field matches the source string +// e.g. given Args are {'label': {'label1=1','label2=1'}, 'image.name', {'ubuntu'}}, +// field is 'image.name' and source is 'ubuntu' +// it returns true. +func (filters Args) Match(field, source string) bool { + if filters.ExactMatch(field, source) { + return true + } + + fieldValues := filters.fields[field] + for name2match := range fieldValues { + match, err := regexp.MatchString(name2match, source) + if err != nil { + continue + } + if match { + return true + } + } + return false +} + +// ExactMatch returns true if the source matches exactly one of the filters. +func (filters Args) ExactMatch(field, source string) bool { + fieldValues, ok := filters.fields[field] + //do not filter if there is no filter set or cannot determine filter + if !ok || len(fieldValues) == 0 { + return true + } + + // try to match full name value to avoid O(N) regular expression matching + return fieldValues[source] +} + +// UniqueExactMatch returns true if there is only one filter and the source matches exactly this one. +func (filters Args) UniqueExactMatch(field, source string) bool { + fieldValues := filters.fields[field] + //do not filter if there is no filter set or cannot determine filter + if len(fieldValues) == 0 { + return true + } + if len(filters.fields[field]) != 1 { + return false + } + + // try to match full name value to avoid O(N) regular expression matching + return fieldValues[source] +} + +// FuzzyMatch returns true if the source matches exactly one of the filters, +// or the source has one of the filters as a prefix. +func (filters Args) FuzzyMatch(field, source string) bool { + if filters.ExactMatch(field, source) { + return true + } + + fieldValues := filters.fields[field] + for prefix := range fieldValues { + if strings.HasPrefix(source, prefix) { + return true + } + } + return false +} + +// Include returns true if the name of the field to filter is in the filters. +func (filters Args) Include(field string) bool { + _, ok := filters.fields[field] + return ok +} + +// Validate ensures that all the fields in the filter are valid. +// It returns an error as soon as it finds an invalid field. +func (filters Args) Validate(accepted map[string]bool) error { + for name := range filters.fields { + if !accepted[name] { + return fmt.Errorf("Invalid filter '%s'", name) + } + } + return nil +} + +// WalkValues iterates over the list of filtered values for a field. +// It stops the iteration if it finds an error and it returns that error. +func (filters Args) WalkValues(field string, op func(value string) error) error { + if _, ok := filters.fields[field]; !ok { + return nil + } + for v := range filters.fields[field] { + if err := op(v); err != nil { + return err + } + } + return nil +} + +func deprecatedArgs(d map[string][]string) map[string]map[string]bool { + m := map[string]map[string]bool{} + for k, v := range d { + values := map[string]bool{} + for _, vv := range v { + values[vv] = true + } + m[k] = values + } + return m +} + +func convertArgsToSlice(f map[string]map[string]bool) map[string][]string { + m := map[string][]string{} + for k, v := range f { + values := []string{} + for kk := range v { + if v[kk] { + values = append(values, kk) + } + } + m[k] = values + } + return m +} diff --git a/vendor/github.com/moby/moby/api/types/filters/parse_test.go b/vendor/github.com/moby/moby/api/types/filters/parse_test.go new file mode 100644 index 000000000..ccd1684a0 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/filters/parse_test.go @@ -0,0 +1,417 @@ +package filters + +import ( + "errors" + "testing" +) + +func TestParseArgs(t *testing.T) { + // equivalent of `docker ps -f 'created=today' -f 'image.name=ubuntu*' -f 'image.name=*untu'` + flagArgs := []string{ + "created=today", + "image.name=ubuntu*", + "image.name=*untu", + } + var ( + args = NewArgs() + err error + ) + for i := range flagArgs { + args, err = ParseFlag(flagArgs[i], args) + if err != nil { + t.Errorf("failed to parse %s: %s", flagArgs[i], err) + } + } + if len(args.Get("created")) != 1 { + t.Error("failed to set this arg") + } + if len(args.Get("image.name")) != 2 { + t.Error("the args should have collapsed") + } +} + +func TestParseArgsEdgeCase(t *testing.T) { + var filters Args + args, err := ParseFlag("", filters) + if err != nil { + t.Fatal(err) + } + if args.Len() != 0 { + t.Fatalf("Expected an empty Args (map), got %v", args) + } + if args, err = ParseFlag("anything", args); err == nil || err != ErrBadFormat { + t.Fatalf("Expected ErrBadFormat, got %v", err) + } +} + +func TestToParam(t *testing.T) { + fields := map[string]map[string]bool{ + "created": {"today": true}, + "image.name": {"ubuntu*": true, "*untu": true}, + } + a := Args{fields: fields} + + _, err := ToParam(a) + if err != nil { + t.Errorf("failed to marshal the filters: %s", err) + } +} + +func TestToParamWithVersion(t *testing.T) { + fields := map[string]map[string]bool{ + "created": {"today": true}, + "image.name": {"ubuntu*": true, "*untu": true}, + } + a := Args{fields: fields} + + str1, err := ToParamWithVersion("1.21", a) + if err != nil { + t.Errorf("failed to marshal the filters with version < 1.22: %s", err) + } + str2, err := ToParamWithVersion("1.22", a) + if err != nil { + t.Errorf("failed to marshal the filters with version >= 1.22: %s", err) + } + if str1 != `{"created":["today"],"image.name":["*untu","ubuntu*"]}` && + str1 != `{"created":["today"],"image.name":["ubuntu*","*untu"]}` { + t.Errorf("incorrectly marshaled the filters: %s", str1) + } + if str2 != `{"created":{"today":true},"image.name":{"*untu":true,"ubuntu*":true}}` && + str2 != `{"created":{"today":true},"image.name":{"ubuntu*":true,"*untu":true}}` { + t.Errorf("incorrectly marshaled the filters: %s", str2) + } +} + +func TestFromParam(t *testing.T) { + invalids := []string{ + "anything", + "['a','list']", + "{'key': 'value'}", + `{"key": "value"}`, + } + valid := map[*Args][]string{ + {fields: map[string]map[string]bool{"key": {"value": true}}}: { + `{"key": ["value"]}`, + `{"key": {"value": true}}`, + }, + {fields: map[string]map[string]bool{"key": {"value1": true, "value2": true}}}: { + `{"key": ["value1", "value2"]}`, + `{"key": {"value1": true, "value2": true}}`, + }, + {fields: map[string]map[string]bool{"key1": {"value1": true}, "key2": {"value2": true}}}: { + `{"key1": ["value1"], "key2": ["value2"]}`, + `{"key1": {"value1": true}, "key2": {"value2": true}}`, + }, + } + + for _, invalid := range invalids { + if _, err := FromParam(invalid); err == nil { + t.Fatalf("Expected an error with %v, got nothing", invalid) + } + } + + for expectedArgs, matchers := range valid { + for _, json := range matchers { + args, err := FromParam(json) + if err != nil { + t.Fatal(err) + } + if args.Len() != expectedArgs.Len() { + t.Fatalf("Expected %v, go %v", expectedArgs, args) + } + for key, expectedValues := range expectedArgs.fields { + values := args.Get(key) + + if len(values) != len(expectedValues) { + t.Fatalf("Expected %v, go %v", expectedArgs, args) + } + + for _, v := range values { + if !expectedValues[v] { + t.Fatalf("Expected %v, go %v", expectedArgs, args) + } + } + } + } + } +} + +func TestEmpty(t *testing.T) { + a := Args{} + v, err := ToParam(a) + if err != nil { + t.Errorf("failed to marshal the filters: %s", err) + } + v1, err := FromParam(v) + if err != nil { + t.Errorf("%s", err) + } + if a.Len() != v1.Len() { + t.Error("these should both be empty sets") + } +} + +func TestArgsMatchKVListEmptySources(t *testing.T) { + args := NewArgs() + if !args.MatchKVList("created", map[string]string{}) { + t.Fatalf("Expected true for (%v,created), got true", args) + } + + args = Args{map[string]map[string]bool{"created": {"today": true}}} + if args.MatchKVList("created", map[string]string{}) { + t.Fatalf("Expected false for (%v,created), got true", args) + } +} + +func TestArgsMatchKVList(t *testing.T) { + // Not empty sources + sources := map[string]string{ + "key1": "value1", + "key2": "value2", + "key3": "value3", + } + + matches := map[*Args]string{ + {}: "field", + {map[string]map[string]bool{ + "created": {"today": true}, + "labels": {"key1": true}}, + }: "labels", + {map[string]map[string]bool{ + "created": {"today": true}, + "labels": {"key1=value1": true}}, + }: "labels", + } + + for args, field := range matches { + if args.MatchKVList(field, sources) != true { + t.Fatalf("Expected true for %v on %v, got false", sources, args) + } + } + + differs := map[*Args]string{ + {map[string]map[string]bool{ + "created": {"today": true}}, + }: "created", + {map[string]map[string]bool{ + "created": {"today": true}, + "labels": {"key4": true}}, + }: "labels", + {map[string]map[string]bool{ + "created": {"today": true}, + "labels": {"key1=value3": true}}, + }: "labels", + } + + for args, field := range differs { + if args.MatchKVList(field, sources) != false { + t.Fatalf("Expected false for %v on %v, got true", sources, args) + } + } +} + +func TestArgsMatch(t *testing.T) { + source := "today" + + matches := map[*Args]string{ + {}: "field", + {map[string]map[string]bool{ + "created": {"today": true}}, + }: "today", + {map[string]map[string]bool{ + "created": {"to*": true}}, + }: "created", + {map[string]map[string]bool{ + "created": {"to(.*)": true}}, + }: "created", + {map[string]map[string]bool{ + "created": {"tod": true}}, + }: "created", + {map[string]map[string]bool{ + "created": {"anything": true, "to*": true}}, + }: "created", + } + + for args, field := range matches { + if args.Match(field, source) != true { + t.Fatalf("Expected true for %v on %v, got false", source, args) + } + } + + differs := map[*Args]string{ + {map[string]map[string]bool{ + "created": {"tomorrow": true}}, + }: "created", + {map[string]map[string]bool{ + "created": {"to(day": true}}, + }: "created", + {map[string]map[string]bool{ + "created": {"tom(.*)": true}}, + }: "created", + {map[string]map[string]bool{ + "created": {"tom": true}}, + }: "created", + {map[string]map[string]bool{ + "created": {"today1": true}, + "labels": {"today": true}}, + }: "created", + } + + for args, field := range differs { + if args.Match(field, source) != false { + t.Fatalf("Expected false for %v on %v, got true", source, args) + } + } +} + +func TestAdd(t *testing.T) { + f := NewArgs() + f.Add("status", "running") + v := f.fields["status"] + if len(v) != 1 || !v["running"] { + t.Fatalf("Expected to include a running status, got %v", v) + } + + f.Add("status", "paused") + if len(v) != 2 || !v["paused"] { + t.Fatalf("Expected to include a paused status, got %v", v) + } +} + +func TestDel(t *testing.T) { + f := NewArgs() + f.Add("status", "running") + f.Del("status", "running") + v := f.fields["status"] + if v["running"] { + t.Fatal("Expected to not include a running status filter, got true") + } +} + +func TestLen(t *testing.T) { + f := NewArgs() + if f.Len() != 0 { + t.Fatal("Expected to not include any field") + } + f.Add("status", "running") + if f.Len() != 1 { + t.Fatal("Expected to include one field") + } +} + +func TestExactMatch(t *testing.T) { + f := NewArgs() + + if !f.ExactMatch("status", "running") { + t.Fatal("Expected to match `running` when there are no filters, got false") + } + + f.Add("status", "running") + f.Add("status", "pause*") + + if !f.ExactMatch("status", "running") { + t.Fatal("Expected to match `running` with one of the filters, got false") + } + + if f.ExactMatch("status", "paused") { + t.Fatal("Expected to not match `paused` with one of the filters, got true") + } +} + +func TestOnlyOneExactMatch(t *testing.T) { + f := NewArgs() + + if !f.UniqueExactMatch("status", "running") { + t.Fatal("Expected to match `running` when there are no filters, got false") + } + + f.Add("status", "running") + + if !f.UniqueExactMatch("status", "running") { + t.Fatal("Expected to match `running` with one of the filters, got false") + } + + if f.UniqueExactMatch("status", "paused") { + t.Fatal("Expected to not match `paused` with one of the filters, got true") + } + + f.Add("status", "pause") + if f.UniqueExactMatch("status", "running") { + t.Fatal("Expected to not match only `running` with two filters, got true") + } +} + +func TestInclude(t *testing.T) { + f := NewArgs() + if f.Include("status") { + t.Fatal("Expected to not include a status key, got true") + } + f.Add("status", "running") + if !f.Include("status") { + t.Fatal("Expected to include a status key, got false") + } +} + +func TestValidate(t *testing.T) { + f := NewArgs() + f.Add("status", "running") + + valid := map[string]bool{ + "status": true, + "dangling": true, + } + + if err := f.Validate(valid); err != nil { + t.Fatal(err) + } + + f.Add("bogus", "running") + if err := f.Validate(valid); err == nil { + t.Fatal("Expected to return an error, got nil") + } +} + +func TestWalkValues(t *testing.T) { + f := NewArgs() + f.Add("status", "running") + f.Add("status", "paused") + + f.WalkValues("status", func(value string) error { + if value != "running" && value != "paused" { + t.Fatalf("Unexpected value %s", value) + } + return nil + }) + + err := f.WalkValues("status", func(value string) error { + return errors.New("return") + }) + if err == nil { + t.Fatal("Expected to get an error, got nil") + } + + err = f.WalkValues("foo", func(value string) error { + return errors.New("return") + }) + if err != nil { + t.Fatalf("Expected to not iterate when the field doesn't exist, got %v", err) + } +} + +func TestFuzzyMatch(t *testing.T) { + f := NewArgs() + f.Add("container", "foo") + + cases := map[string]bool{ + "foo": true, + "foobar": true, + "barfoo": false, + "bar": false, + } + for source, match := range cases { + got := f.FuzzyMatch("container", source) + if got != match { + t.Fatalf("Expected %v, got %v: %s", match, got, source) + } + } +} diff --git a/vendor/github.com/moby/moby/api/types/graph_driver_data.go b/vendor/github.com/moby/moby/api/types/graph_driver_data.go new file mode 100644 index 000000000..4d9bf1c62 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/graph_driver_data.go @@ -0,0 +1,17 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// GraphDriverData Information about a container's graph driver. +// swagger:model GraphDriverData +type GraphDriverData struct { + + // data + // Required: true + Data map[string]string `json:"Data"` + + // name + // Required: true + Name string `json:"Name"` +} diff --git a/vendor/github.com/moby/moby/api/types/id_response.go b/vendor/github.com/moby/moby/api/types/id_response.go new file mode 100644 index 000000000..7592d2f8b --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/id_response.go @@ -0,0 +1,13 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// IDResponse Response to an API call that returns just an Id +// swagger:model IdResponse +type IDResponse struct { + + // The id of the newly created object. + // Required: true + ID string `json:"Id"` +} diff --git a/vendor/github.com/moby/moby/api/types/image/image_history.go b/vendor/github.com/moby/moby/api/types/image/image_history.go new file mode 100644 index 000000000..0dd30c729 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/image/image_history.go @@ -0,0 +1,37 @@ +package image + +// ---------------------------------------------------------------------------- +// DO NOT EDIT THIS FILE +// This file was generated by `swagger generate operation` +// +// See hack/generate-swagger-api.sh +// ---------------------------------------------------------------------------- + +// HistoryResponseItem history response item +// swagger:model HistoryResponseItem +type HistoryResponseItem struct { + + // comment + // Required: true + Comment string `json:"Comment"` + + // created + // Required: true + Created int64 `json:"Created"` + + // created by + // Required: true + CreatedBy string `json:"CreatedBy"` + + // Id + // Required: true + ID string `json:"Id"` + + // size + // Required: true + Size int64 `json:"Size"` + + // tags + // Required: true + Tags []string `json:"Tags"` +} diff --git a/vendor/github.com/moby/moby/api/types/image_delete_response_item.go b/vendor/github.com/moby/moby/api/types/image_delete_response_item.go new file mode 100644 index 000000000..b9a65a0d8 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/image_delete_response_item.go @@ -0,0 +1,15 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// ImageDeleteResponseItem image delete response item +// swagger:model ImageDeleteResponseItem +type ImageDeleteResponseItem struct { + + // The image ID of an image that was deleted + Deleted string `json:"Deleted,omitempty"` + + // The image ID of an image that was untagged + Untagged string `json:"Untagged,omitempty"` +} diff --git a/vendor/github.com/moby/moby/api/types/image_summary.go b/vendor/github.com/moby/moby/api/types/image_summary.go new file mode 100644 index 000000000..e145b3dcf --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/image_summary.go @@ -0,0 +1,49 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// ImageSummary image summary +// swagger:model ImageSummary +type ImageSummary struct { + + // containers + // Required: true + Containers int64 `json:"Containers"` + + // created + // Required: true + Created int64 `json:"Created"` + + // Id + // Required: true + ID string `json:"Id"` + + // labels + // Required: true + Labels map[string]string `json:"Labels"` + + // parent Id + // Required: true + ParentID string `json:"ParentId"` + + // repo digests + // Required: true + RepoDigests []string `json:"RepoDigests"` + + // repo tags + // Required: true + RepoTags []string `json:"RepoTags"` + + // shared size + // Required: true + SharedSize int64 `json:"SharedSize"` + + // size + // Required: true + Size int64 `json:"Size"` + + // virtual size + // Required: true + VirtualSize int64 `json:"VirtualSize"` +} diff --git a/vendor/github.com/moby/moby/api/types/mount/mount.go b/vendor/github.com/moby/moby/api/types/mount/mount.go new file mode 100644 index 000000000..2744f85d6 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/mount/mount.go @@ -0,0 +1,128 @@ +package mount + +import ( + "os" +) + +// Type represents the type of a mount. +type Type string + +// Type constants +const ( + // TypeBind is the type for mounting host dir + TypeBind Type = "bind" + // TypeVolume is the type for remote storage volumes + TypeVolume Type = "volume" + // TypeTmpfs is the type for mounting tmpfs + TypeTmpfs Type = "tmpfs" +) + +// Mount represents a mount (volume). +type Mount struct { + Type Type `json:",omitempty"` + // Source specifies the name of the mount. Depending on mount type, this + // may be a volume name or a host path, or even ignored. + // Source is not supported for tmpfs (must be an empty value) + Source string `json:",omitempty"` + Target string `json:",omitempty"` + ReadOnly bool `json:",omitempty"` + Consistency Consistency `json:",omitempty"` + + BindOptions *BindOptions `json:",omitempty"` + VolumeOptions *VolumeOptions `json:",omitempty"` + TmpfsOptions *TmpfsOptions `json:",omitempty"` +} + +// Propagation represents the propagation of a mount. +type Propagation string + +const ( + // PropagationRPrivate RPRIVATE + PropagationRPrivate Propagation = "rprivate" + // PropagationPrivate PRIVATE + PropagationPrivate Propagation = "private" + // PropagationRShared RSHARED + PropagationRShared Propagation = "rshared" + // PropagationShared SHARED + PropagationShared Propagation = "shared" + // PropagationRSlave RSLAVE + PropagationRSlave Propagation = "rslave" + // PropagationSlave SLAVE + PropagationSlave Propagation = "slave" +) + +// Propagations is the list of all valid mount propagations +var Propagations = []Propagation{ + PropagationRPrivate, + PropagationPrivate, + PropagationRShared, + PropagationShared, + PropagationRSlave, + PropagationSlave, +} + +// Consistency represents the consistency requirements of a mount. +type Consistency string + +const ( + // ConsistencyFull guarantees bind-mount-like consistency + ConsistencyFull Consistency = "consistent" + // ConsistencyCached mounts can cache read data and FS structure + ConsistencyCached Consistency = "cached" + // ConsistencyDelegated mounts can cache read and written data and structure + ConsistencyDelegated Consistency = "delegated" + // ConsistencyDefault provides "consistent" behavior unless overridden + ConsistencyDefault Consistency = "default" +) + +// BindOptions defines options specific to mounts of type "bind". +type BindOptions struct { + Propagation Propagation `json:",omitempty"` +} + +// VolumeOptions represents the options for a mount of type volume. +type VolumeOptions struct { + NoCopy bool `json:",omitempty"` + Labels map[string]string `json:",omitempty"` + DriverConfig *Driver `json:",omitempty"` +} + +// Driver represents a volume driver. +type Driver struct { + Name string `json:",omitempty"` + Options map[string]string `json:",omitempty"` +} + +// TmpfsOptions defines options specific to mounts of type "tmpfs". +type TmpfsOptions struct { + // Size sets the size of the tmpfs, in bytes. + // + // This will be converted to an operating system specific value + // depending on the host. For example, on linux, it will be converted to + // use a 'k', 'm' or 'g' syntax. BSD, though not widely supported with + // docker, uses a straight byte value. + // + // Percentages are not supported. + SizeBytes int64 `json:",omitempty"` + // Mode of the tmpfs upon creation + Mode os.FileMode `json:",omitempty"` + + // TODO(stevvooe): There are several more tmpfs flags, specified in the + // daemon, that are accepted. Only the most basic are added for now. + // + // From docker/docker/pkg/mount/flags.go: + // + // var validFlags = map[string]bool{ + // "": true, + // "size": true, X + // "mode": true, X + // "uid": true, + // "gid": true, + // "nr_inodes": true, + // "nr_blocks": true, + // "mpol": true, + // } + // + // Some of these may be straightforward to add, but others, such as + // uid/gid have implications in a clustered system. +} diff --git a/vendor/github.com/moby/moby/api/types/network/network.go b/vendor/github.com/moby/moby/api/types/network/network.go new file mode 100644 index 000000000..7c7dbacc8 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/network/network.go @@ -0,0 +1,108 @@ +package network + +// Address represents an IP address +type Address struct { + Addr string + PrefixLen int +} + +// IPAM represents IP Address Management +type IPAM struct { + Driver string + Options map[string]string //Per network IPAM driver options + Config []IPAMConfig +} + +// IPAMConfig represents IPAM configurations +type IPAMConfig struct { + Subnet string `json:",omitempty"` + IPRange string `json:",omitempty"` + Gateway string `json:",omitempty"` + AuxAddress map[string]string `json:"AuxiliaryAddresses,omitempty"` +} + +// EndpointIPAMConfig represents IPAM configurations for the endpoint +type EndpointIPAMConfig struct { + IPv4Address string `json:",omitempty"` + IPv6Address string `json:",omitempty"` + LinkLocalIPs []string `json:",omitempty"` +} + +// Copy makes a copy of the endpoint ipam config +func (cfg *EndpointIPAMConfig) Copy() *EndpointIPAMConfig { + cfgCopy := *cfg + cfgCopy.LinkLocalIPs = make([]string, 0, len(cfg.LinkLocalIPs)) + cfgCopy.LinkLocalIPs = append(cfgCopy.LinkLocalIPs, cfg.LinkLocalIPs...) + return &cfgCopy +} + +// PeerInfo represents one peer of an overlay network +type PeerInfo struct { + Name string + IP string +} + +// EndpointSettings stores the network endpoint details +type EndpointSettings struct { + // Configurations + IPAMConfig *EndpointIPAMConfig + Links []string + Aliases []string + // Operational data + NetworkID string + EndpointID string + Gateway string + IPAddress string + IPPrefixLen int + IPv6Gateway string + GlobalIPv6Address string + GlobalIPv6PrefixLen int + MacAddress string + DriverOpts map[string]string +} + +// Task carries the information about one backend task +type Task struct { + Name string + EndpointID string + EndpointIP string + Info map[string]string +} + +// ServiceInfo represents service parameters with the list of service's tasks +type ServiceInfo struct { + VIP string + Ports []string + LocalLBIndex int + Tasks []Task +} + +// Copy makes a deep copy of `EndpointSettings` +func (es *EndpointSettings) Copy() *EndpointSettings { + epCopy := *es + if es.IPAMConfig != nil { + epCopy.IPAMConfig = es.IPAMConfig.Copy() + } + + if es.Links != nil { + links := make([]string, 0, len(es.Links)) + epCopy.Links = append(links, es.Links...) + } + + if es.Aliases != nil { + aliases := make([]string, 0, len(es.Aliases)) + epCopy.Aliases = append(aliases, es.Aliases...) + } + return &epCopy +} + +// NetworkingConfig represents the container's networking configuration for each of its interfaces +// Carries the networking configs specified in the `docker run` and `docker network connect` commands +type NetworkingConfig struct { + EndpointsConfig map[string]*EndpointSettings // Endpoint configs for each connecting network +} + +// ConfigReference specifies the source which provides a network's configuration +type ConfigReference struct { + Network string +} diff --git a/vendor/github.com/moby/moby/api/types/plugin.go b/vendor/github.com/moby/moby/api/types/plugin.go new file mode 100644 index 000000000..ed3c2c26e --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/plugin.go @@ -0,0 +1,200 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// Plugin A plugin for the Engine API +// swagger:model Plugin +type Plugin struct { + + // config + // Required: true + Config PluginConfig `json:"Config"` + + // True when the plugin is running. False when the plugin is not running, only installed. + // Required: true + Enabled bool `json:"Enabled"` + + // Id + ID string `json:"Id,omitempty"` + + // name + // Required: true + Name string `json:"Name"` + + // plugin remote reference used to push/pull the plugin + PluginReference string `json:"PluginReference,omitempty"` + + // settings + // Required: true + Settings PluginSettings `json:"Settings"` +} + +// PluginConfig The config of a plugin. +// swagger:model PluginConfig +type PluginConfig struct { + + // args + // Required: true + Args PluginConfigArgs `json:"Args"` + + // description + // Required: true + Description string `json:"Description"` + + // Docker Version used to create the plugin + DockerVersion string `json:"DockerVersion,omitempty"` + + // documentation + // Required: true + Documentation string `json:"Documentation"` + + // entrypoint + // Required: true + Entrypoint []string `json:"Entrypoint"` + + // env + // Required: true + Env []PluginEnv `json:"Env"` + + // interface + // Required: true + Interface PluginConfigInterface `json:"Interface"` + + // ipc host + // Required: true + IpcHost bool `json:"IpcHost"` + + // linux + // Required: true + Linux PluginConfigLinux `json:"Linux"` + + // mounts + // Required: true + Mounts []PluginMount `json:"Mounts"` + + // network + // Required: true + Network PluginConfigNetwork `json:"Network"` + + // pid host + // Required: true + PidHost bool `json:"PidHost"` + + // propagated mount + // Required: true + PropagatedMount string `json:"PropagatedMount"` + + // user + User PluginConfigUser `json:"User,omitempty"` + + // work dir + // Required: true + WorkDir string `json:"WorkDir"` + + // rootfs + Rootfs *PluginConfigRootfs `json:"rootfs,omitempty"` +} + +// PluginConfigArgs plugin config args +// swagger:model PluginConfigArgs +type PluginConfigArgs struct { + + // description + // Required: true + Description string `json:"Description"` + + // name + // Required: true + Name string `json:"Name"` + + // settable + // Required: true + Settable []string `json:"Settable"` + + // value + // Required: true + Value []string `json:"Value"` +} + +// PluginConfigInterface The interface between Docker and the plugin +// swagger:model PluginConfigInterface +type PluginConfigInterface struct { + + // socket + // Required: true + Socket string `json:"Socket"` + + // types + // Required: true + Types []PluginInterfaceType `json:"Types"` +} + +// PluginConfigLinux plugin config linux +// swagger:model PluginConfigLinux +type PluginConfigLinux struct { + + // allow all devices + // Required: true + AllowAllDevices bool `json:"AllowAllDevices"` + + // capabilities + // Required: true + Capabilities []string `json:"Capabilities"` + + // devices + // Required: true + Devices []PluginDevice `json:"Devices"` +} + +// PluginConfigNetwork plugin config network +// swagger:model PluginConfigNetwork +type PluginConfigNetwork struct { + + // type + // Required: true + Type string `json:"Type"` +} + +// PluginConfigRootfs plugin config rootfs +// swagger:model PluginConfigRootfs +type PluginConfigRootfs struct { + + // diff ids + DiffIds []string `json:"diff_ids"` + + // type + Type string `json:"type,omitempty"` +} + +// PluginConfigUser plugin config user +// swagger:model PluginConfigUser +type PluginConfigUser struct { + + // g ID + GID uint32 `json:"GID,omitempty"` + + // UID + UID uint32 `json:"UID,omitempty"` +} + +// PluginSettings Settings that can be modified by users. +// swagger:model PluginSettings +type PluginSettings struct { + + // args + // Required: true + Args []string `json:"Args"` + + // devices + // Required: true + Devices []PluginDevice `json:"Devices"` + + // env + // Required: true + Env []string `json:"Env"` + + // mounts + // Required: true + Mounts []PluginMount `json:"Mounts"` +} diff --git a/vendor/github.com/moby/moby/api/types/plugin_device.go b/vendor/github.com/moby/moby/api/types/plugin_device.go new file mode 100644 index 000000000..569901067 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/plugin_device.go @@ -0,0 +1,25 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// PluginDevice plugin device +// swagger:model PluginDevice +type PluginDevice struct { + + // description + // Required: true + Description string `json:"Description"` + + // name + // Required: true + Name string `json:"Name"` + + // path + // Required: true + Path *string `json:"Path"` + + // settable + // Required: true + Settable []string `json:"Settable"` +} diff --git a/vendor/github.com/moby/moby/api/types/plugin_env.go b/vendor/github.com/moby/moby/api/types/plugin_env.go new file mode 100644 index 000000000..32962dc2e --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/plugin_env.go @@ -0,0 +1,25 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// PluginEnv plugin env +// swagger:model PluginEnv +type PluginEnv struct { + + // description + // Required: true + Description string `json:"Description"` + + // name + // Required: true + Name string `json:"Name"` + + // settable + // Required: true + Settable []string `json:"Settable"` + + // value + // Required: true + Value *string `json:"Value"` +} diff --git a/vendor/github.com/moby/moby/api/types/plugin_interface_type.go b/vendor/github.com/moby/moby/api/types/plugin_interface_type.go new file mode 100644 index 000000000..c82f204e8 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/plugin_interface_type.go @@ -0,0 +1,21 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// PluginInterfaceType plugin interface type +// swagger:model PluginInterfaceType +type PluginInterfaceType struct { + + // capability + // Required: true + Capability string `json:"Capability"` + + // prefix + // Required: true + Prefix string `json:"Prefix"` + + // version + // Required: true + Version string `json:"Version"` +} diff --git a/vendor/github.com/moby/moby/api/types/plugin_mount.go b/vendor/github.com/moby/moby/api/types/plugin_mount.go new file mode 100644 index 000000000..5c031cf8b --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/plugin_mount.go @@ -0,0 +1,37 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// PluginMount plugin mount +// swagger:model PluginMount +type PluginMount struct { + + // description + // Required: true + Description string `json:"Description"` + + // destination + // Required: true + Destination string `json:"Destination"` + + // name + // Required: true + Name string `json:"Name"` + + // options + // Required: true + Options []string `json:"Options"` + + // settable + // Required: true + Settable []string `json:"Settable"` + + // source + // Required: true + Source *string `json:"Source"` + + // type + // Required: true + Type string `json:"Type"` +} diff --git a/vendor/github.com/moby/moby/api/types/plugin_responses.go b/vendor/github.com/moby/moby/api/types/plugin_responses.go new file mode 100644 index 000000000..1c6461f2d --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/plugin_responses.go @@ -0,0 +1,79 @@ +package types + +import ( + "encoding/json" + "fmt" + "sort" +) + +// PluginsListResponse contains the response for the Engine API +type PluginsListResponse []*Plugin + +const ( + authzDriver = "AuthzDriver" + graphDriver = "GraphDriver" + ipamDriver = "IpamDriver" + networkDriver = "NetworkDriver" + volumeDriver = "VolumeDriver" +) + +// UnmarshalJSON implements json.Unmarshaler for PluginInterfaceType +func (t *PluginInterfaceType) UnmarshalJSON(p []byte) error { + versionIndex := len(p) + prefixIndex := 0 + if len(p) < 2 || p[0] != '"' || p[len(p)-1] != '"' { + return fmt.Errorf("%q is not a plugin interface type", p) + } + p = p[1 : len(p)-1] +loop: + for i, b := range p { + switch b { + case '.': + prefixIndex = i + case '/': + versionIndex = i + break loop + } + } + t.Prefix = string(p[:prefixIndex]) + t.Capability = string(p[prefixIndex+1 : versionIndex]) + if versionIndex < len(p) { + t.Version = string(p[versionIndex+1:]) + } + return nil +} + +// MarshalJSON implements json.Marshaler for PluginInterfaceType +func (t *PluginInterfaceType) MarshalJSON() ([]byte, error) { + return json.Marshal(t.String()) +} + +// String implements fmt.Stringer for PluginInterfaceType +func (t PluginInterfaceType) String() string { + return fmt.Sprintf("%s.%s/%s", t.Prefix, t.Capability, t.Version) +} + +// PluginPrivilege describes a permission the user has to accept +// upon installing a plugin. +type PluginPrivilege struct { + Name string + Description string + Value []string +} + +// PluginPrivileges is a list of PluginPrivilege +type PluginPrivileges []PluginPrivilege + +func (s PluginPrivileges) Len() int { + return len(s) +} + +func (s PluginPrivileges) Less(i, j int) bool { + return s[i].Name < s[j].Name +} + +func (s PluginPrivileges) Swap(i, j int) { + sort.Strings(s[i].Value) + sort.Strings(s[j].Value) + s[i], s[j] = s[j], s[i] +} diff --git a/vendor/github.com/moby/moby/api/types/plugins/logdriver/entry.pb.go b/vendor/github.com/moby/moby/api/types/plugins/logdriver/entry.pb.go new file mode 100644 index 000000000..5d7d8b4c4 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/plugins/logdriver/entry.pb.go @@ -0,0 +1,449 @@ +// Code generated by protoc-gen-gogo. +// source: entry.proto +// DO NOT EDIT! + +/* + Package logdriver is a generated protocol buffer package. + + It is generated from these files: + entry.proto + + It has these top-level messages: + LogEntry +*/ +package logdriver + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +type LogEntry struct { + Source string `protobuf:"bytes,1,opt,name=source,proto3" json:"source,omitempty"` + TimeNano int64 `protobuf:"varint,2,opt,name=time_nano,json=timeNano,proto3" json:"time_nano,omitempty"` + Line []byte `protobuf:"bytes,3,opt,name=line,proto3" json:"line,omitempty"` + Partial bool `protobuf:"varint,4,opt,name=partial,proto3" json:"partial,omitempty"` +} + +func (m *LogEntry) Reset() { *m = LogEntry{} } +func (m *LogEntry) String() string { return proto.CompactTextString(m) } +func (*LogEntry) ProtoMessage() {} +func (*LogEntry) Descriptor() ([]byte, []int) { return fileDescriptorEntry, []int{0} } + +func (m *LogEntry) GetSource() string { + if m != nil { + return m.Source + } + return "" +} + +func (m *LogEntry) GetTimeNano() int64 { + if m != nil { + return m.TimeNano + } + return 0 +} + +func (m *LogEntry) GetLine() []byte { + if m != nil { + return m.Line + } + return nil +} + +func (m *LogEntry) GetPartial() bool { + if m != nil { + return m.Partial + } + return false +} + +func init() { + proto.RegisterType((*LogEntry)(nil), "LogEntry") +} +func (m *LogEntry) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LogEntry) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Source) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintEntry(dAtA, i, uint64(len(m.Source))) + i += copy(dAtA[i:], m.Source) + } + if m.TimeNano != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintEntry(dAtA, i, uint64(m.TimeNano)) + } + if len(m.Line) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintEntry(dAtA, i, uint64(len(m.Line))) + i += copy(dAtA[i:], m.Line) + } + if m.Partial { + dAtA[i] = 0x20 + i++ + if m.Partial { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + return i, nil +} + +func encodeFixed64Entry(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Entry(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintEntry(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *LogEntry) Size() (n int) { + var l int + _ = l + l = len(m.Source) + if l > 0 { + n += 1 + l + sovEntry(uint64(l)) + } + if m.TimeNano != 0 { + n += 1 + sovEntry(uint64(m.TimeNano)) + } + l = len(m.Line) + if l > 0 { + n += 1 + l + sovEntry(uint64(l)) + } + if m.Partial { + n += 2 + } + return n +} + +func sovEntry(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozEntry(x uint64) (n int) { + return sovEntry(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *LogEntry) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEntry + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LogEntry: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LogEntry: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Source", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEntry + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEntry + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Source = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TimeNano", wireType) + } + m.TimeNano = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEntry + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TimeNano |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Line", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEntry + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthEntry + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Line = append(m.Line[:0], dAtA[iNdEx:postIndex]...) + if m.Line == nil { + m.Line = []byte{} + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Partial", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEntry + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Partial = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipEntry(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthEntry + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipEntry(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEntry + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEntry + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEntry + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthEntry + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEntry + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipEntry(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthEntry = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowEntry = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("entry.proto", fileDescriptorEntry) } + +var fileDescriptorEntry = []byte{ + // 149 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xe2, 0x4e, 0xcd, 0x2b, 0x29, + 0xaa, 0xd4, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x57, 0xca, 0xe5, 0xe2, 0xf0, 0xc9, 0x4f, 0x77, 0x05, + 0x89, 0x08, 0x89, 0x71, 0xb1, 0x15, 0xe7, 0x97, 0x16, 0x25, 0xa7, 0x4a, 0x30, 0x2a, 0x30, 0x6a, + 0x70, 0x06, 0x41, 0x79, 0x42, 0xd2, 0x5c, 0x9c, 0x25, 0x99, 0xb9, 0xa9, 0xf1, 0x79, 0x89, 0x79, + 0xf9, 0x12, 0x4c, 0x0a, 0x8c, 0x1a, 0xcc, 0x41, 0x1c, 0x20, 0x01, 0xbf, 0xc4, 0xbc, 0x7c, 0x21, + 0x21, 0x2e, 0x96, 0x9c, 0xcc, 0xbc, 0x54, 0x09, 0x66, 0x05, 0x46, 0x0d, 0x9e, 0x20, 0x30, 0x5b, + 0x48, 0x82, 0x8b, 0xbd, 0x20, 0xb1, 0xa8, 0x24, 0x33, 0x31, 0x47, 0x82, 0x45, 0x81, 0x51, 0x83, + 0x23, 0x08, 0xc6, 0x75, 0xe2, 0x39, 0xf1, 0x48, 0x8e, 0xf1, 0xc2, 0x23, 0x39, 0xc6, 0x07, 0x8f, + 0xe4, 0x18, 0x93, 0xd8, 0xc0, 0x6e, 0x30, 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0x2d, 0x24, 0x5a, + 0xd4, 0x92, 0x00, 0x00, 0x00, +} diff --git a/vendor/github.com/moby/moby/api/types/plugins/logdriver/entry.proto b/vendor/github.com/moby/moby/api/types/plugins/logdriver/entry.proto new file mode 100644 index 000000000..a4e96ea5f --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/plugins/logdriver/entry.proto @@ -0,0 +1,8 @@ +syntax = "proto3"; + +message LogEntry { + string source = 1; + int64 time_nano = 2; + bytes line = 3; + bool partial = 4; +} diff --git a/vendor/github.com/moby/moby/api/types/plugins/logdriver/gen.go b/vendor/github.com/moby/moby/api/types/plugins/logdriver/gen.go new file mode 100644 index 000000000..068f98732 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/plugins/logdriver/gen.go @@ -0,0 +1,3 @@ +//go:generate protoc --gogofast_out=import_path=github.com/docker/docker/api/types/plugins/logdriver:. entry.proto + +package logdriver diff --git a/vendor/github.com/moby/moby/api/types/plugins/logdriver/io.go b/vendor/github.com/moby/moby/api/types/plugins/logdriver/io.go new file mode 100644 index 000000000..8c1ed49dd --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/plugins/logdriver/io.go @@ -0,0 +1,87 @@ +package logdriver + +import ( + "encoding/binary" + "io" +) + +const binaryEncodeLen = 4 + +// LogEntryEncoder encodes a LogEntry to a protobuf stream +// The stream should look like: +// +// [uint32 binary encoded message size][protobuf message] +// +// To decode an entry, read the first 4 bytes to get the size of the entry, +// then read `size` bytes from the stream. +type LogEntryEncoder interface { + Encode(*LogEntry) error +} + +// NewLogEntryEncoder creates a protobuf stream encoder for log entries. +// This is used to write out log entries to a stream. +func NewLogEntryEncoder(w io.Writer) LogEntryEncoder { + return &logEntryEncoder{ + w: w, + buf: make([]byte, 1024), + } +} + +type logEntryEncoder struct { + buf []byte + w io.Writer +} + +func (e *logEntryEncoder) Encode(l *LogEntry) error { + n := l.Size() + + total := n + binaryEncodeLen + if total > len(e.buf) { + e.buf = make([]byte, total) + } + binary.BigEndian.PutUint32(e.buf, uint32(n)) + + if _, err := l.MarshalTo(e.buf[binaryEncodeLen:]); err != nil { + return err + } + _, err := e.w.Write(e.buf[:total]) + return err +} + +// LogEntryDecoder decodes log entries from a stream +// It is expected that the wire format is as defined by LogEntryEncoder. +type LogEntryDecoder interface { + Decode(*LogEntry) error +} + +// NewLogEntryDecoder creates a new stream decoder for log entries +func NewLogEntryDecoder(r io.Reader) LogEntryDecoder { + return &logEntryDecoder{ + lenBuf: make([]byte, binaryEncodeLen), + buf: make([]byte, 1024), + r: r, + } +} + +type logEntryDecoder struct { + r io.Reader + lenBuf []byte + buf []byte +} + +func (d *logEntryDecoder) Decode(l *LogEntry) error { + _, err := io.ReadFull(d.r, d.lenBuf) + if err != nil { + return err + } + + size := int(binary.BigEndian.Uint32(d.lenBuf)) + if len(d.buf) < size { + d.buf = make([]byte, size) + } + + if _, err := io.ReadFull(d.r, d.buf[:size]); err != nil { + return err + } + return l.Unmarshal(d.buf[:size]) +} diff --git a/vendor/github.com/moby/moby/api/types/port.go b/vendor/github.com/moby/moby/api/types/port.go new file mode 100644 index 000000000..ad52d46d5 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/port.go @@ -0,0 +1,23 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// Port An open port on a container +// swagger:model Port +type Port struct { + + // IP + IP string `json:"IP,omitempty"` + + // Port on the container + // Required: true + PrivatePort uint16 `json:"PrivatePort"` + + // Port exposed on the host + PublicPort uint16 `json:"PublicPort,omitempty"` + + // type + // Required: true + Type string `json:"Type"` +} diff --git a/vendor/github.com/moby/moby/api/types/registry/authenticate.go b/vendor/github.com/moby/moby/api/types/registry/authenticate.go new file mode 100644 index 000000000..42cac4430 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/registry/authenticate.go @@ -0,0 +1,21 @@ +package registry + +// ---------------------------------------------------------------------------- +// DO NOT EDIT THIS FILE +// This file was generated by `swagger generate operation` +// +// See hack/generate-swagger-api.sh +// ---------------------------------------------------------------------------- + +// AuthenticateOKBody authenticate o k body +// swagger:model AuthenticateOKBody +type AuthenticateOKBody struct { + + // An opaque token used to authenticate a user after a successful login + // Required: true + IdentityToken string `json:"IdentityToken"` + + // The status of the authentication + // Required: true + Status string `json:"Status"` +} diff --git a/vendor/github.com/moby/moby/api/types/registry/registry.go b/vendor/github.com/moby/moby/api/types/registry/registry.go new file mode 100644 index 000000000..b98a943a1 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/registry/registry.go @@ -0,0 +1,119 @@ +package registry + +import ( + "encoding/json" + "net" + + "github.com/opencontainers/image-spec/specs-go/v1" +) + +// ServiceConfig stores daemon registry services configuration. +type ServiceConfig struct { + AllowNondistributableArtifactsCIDRs []*NetIPNet + AllowNondistributableArtifactsHostnames []string + InsecureRegistryCIDRs []*NetIPNet `json:"InsecureRegistryCIDRs"` + IndexConfigs map[string]*IndexInfo `json:"IndexConfigs"` + Mirrors []string +} + +// NetIPNet is the net.IPNet type, which can be marshalled and +// unmarshalled to JSON +type NetIPNet net.IPNet + +// String returns the CIDR notation of ipnet +func (ipnet *NetIPNet) String() string { + return (*net.IPNet)(ipnet).String() +} + +// MarshalJSON returns the JSON representation of the IPNet +func (ipnet *NetIPNet) MarshalJSON() ([]byte, error) { + return json.Marshal((*net.IPNet)(ipnet).String()) +} + +// UnmarshalJSON sets the IPNet from a byte array of JSON +func (ipnet *NetIPNet) UnmarshalJSON(b []byte) (err error) { + var ipnetStr string + if err = json.Unmarshal(b, &ipnetStr); err == nil { + var cidr *net.IPNet + if _, cidr, err = net.ParseCIDR(ipnetStr); err == nil { + *ipnet = NetIPNet(*cidr) + } + } + return +} + +// IndexInfo contains information about a registry +// +// RepositoryInfo Examples: +// { +// "Index" : { +// "Name" : "docker.io", +// "Mirrors" : ["https://registry-2.docker.io/v1/", "https://registry-3.docker.io/v1/"], +// "Secure" : true, +// "Official" : true, +// }, +// "RemoteName" : "library/debian", +// "LocalName" : "debian", +// "CanonicalName" : "docker.io/debian" +// "Official" : true, +// } +// +// { +// "Index" : { +// "Name" : "127.0.0.1:5000", +// "Mirrors" : [], +// "Secure" : false, +// "Official" : false, +// }, +// "RemoteName" : "user/repo", +// "LocalName" : "127.0.0.1:5000/user/repo", +// "CanonicalName" : "127.0.0.1:5000/user/repo", +// "Official" : false, +// } +type IndexInfo struct { + // Name is the name of the registry, such as "docker.io" + Name string + // Mirrors is a list of mirrors, expressed as URIs + Mirrors []string + // Secure is set to false if the registry is part of the list of + // insecure registries. Insecure registries accept HTTP and/or accept + // HTTPS with certificates from unknown CAs. + Secure bool + // Official indicates whether this is an official registry + Official bool +} + +// SearchResult describes a search result returned from a registry +type SearchResult struct { + // StarCount indicates the number of stars this repository has + StarCount int `json:"star_count"` + // IsOfficial is true if the result is from an official repository. + IsOfficial bool `json:"is_official"` + // Name is the name of the repository + Name string `json:"name"` + // IsAutomated indicates whether the result is automated + IsAutomated bool `json:"is_automated"` + // Description is a textual description of the repository + Description string `json:"description"` +} + +// SearchResults lists a collection search results returned from a registry +type SearchResults struct { + // Query contains the query string that generated the search results + Query string `json:"query"` + // NumResults indicates the number of results the query returned + NumResults int `json:"num_results"` + // Results is a slice containing the actual results for the search + Results []SearchResult `json:"results"` +} + +// DistributionInspect describes the result obtained from contacting the +// registry to retrieve image metadata +type DistributionInspect struct { + // Descriptor contains information about the manifest, including + // the content addressable digest + Descriptor v1.Descriptor + // Platforms contains the list of platforms supported by the image, + // obtained by parsing the manifest + Platforms []v1.Platform +} diff --git a/vendor/github.com/moby/moby/api/types/seccomp.go b/vendor/github.com/moby/moby/api/types/seccomp.go new file mode 100644 index 000000000..7d62c9a43 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/seccomp.go @@ -0,0 +1,93 @@ +package types + +// Seccomp represents the config for a seccomp profile for syscall restriction. +type Seccomp struct { + DefaultAction Action `json:"defaultAction"` + // Architectures is kept to maintain backward compatibility with the old + // seccomp profile. + Architectures []Arch `json:"architectures,omitempty"` + ArchMap []Architecture `json:"archMap,omitempty"` + Syscalls []*Syscall `json:"syscalls"` +} + +// Architecture is used to represent a specific architecture +// and its sub-architectures +type Architecture struct { + Arch Arch `json:"architecture"` + SubArches []Arch `json:"subArchitectures"` +} + +// Arch used for architectures +type Arch string + +// Additional architectures permitted to be used for system calls +// By default only the native architecture of the kernel is permitted +const ( + ArchX86 Arch = "SCMP_ARCH_X86" + ArchX86_64 Arch = "SCMP_ARCH_X86_64" + ArchX32 Arch = "SCMP_ARCH_X32" + ArchARM Arch = "SCMP_ARCH_ARM" + ArchAARCH64 Arch = "SCMP_ARCH_AARCH64" + ArchMIPS Arch = "SCMP_ARCH_MIPS" + ArchMIPS64 Arch = "SCMP_ARCH_MIPS64" + ArchMIPS64N32 Arch = "SCMP_ARCH_MIPS64N32" + ArchMIPSEL Arch = "SCMP_ARCH_MIPSEL" + ArchMIPSEL64 Arch = "SCMP_ARCH_MIPSEL64" + ArchMIPSEL64N32 Arch = "SCMP_ARCH_MIPSEL64N32" + ArchPPC Arch = "SCMP_ARCH_PPC" + ArchPPC64 Arch = "SCMP_ARCH_PPC64" + ArchPPC64LE Arch = "SCMP_ARCH_PPC64LE" + ArchS390 Arch = "SCMP_ARCH_S390" + ArchS390X Arch = "SCMP_ARCH_S390X" +) + +// Action taken upon Seccomp rule match +type Action string + +// Define actions for Seccomp rules +const ( + ActKill Action = "SCMP_ACT_KILL" + ActTrap Action = "SCMP_ACT_TRAP" + ActErrno Action = "SCMP_ACT_ERRNO" + ActTrace Action = "SCMP_ACT_TRACE" + ActAllow Action = "SCMP_ACT_ALLOW" +) + +// Operator used to match syscall arguments in Seccomp +type Operator string + +// Define operators for syscall arguments in Seccomp +const ( + OpNotEqual Operator = "SCMP_CMP_NE" + OpLessThan Operator = "SCMP_CMP_LT" + OpLessEqual Operator = "SCMP_CMP_LE" + OpEqualTo Operator = "SCMP_CMP_EQ" + OpGreaterEqual Operator = "SCMP_CMP_GE" + OpGreaterThan Operator = "SCMP_CMP_GT" + OpMaskedEqual Operator = "SCMP_CMP_MASKED_EQ" +) + +// Arg used for matching specific syscall arguments in Seccomp +type Arg struct { + Index uint `json:"index"` + Value uint64 `json:"value"` + ValueTwo uint64 `json:"valueTwo"` + Op Operator `json:"op"` +} + +// Filter is used to conditionally apply Seccomp rules +type Filter struct { + Caps []string `json:"caps,omitempty"` + Arches []string `json:"arches,omitempty"` +} + +// Syscall is used to match a group of syscalls in Seccomp +type Syscall struct { + Name string `json:"name,omitempty"` + Names []string `json:"names,omitempty"` + Action Action `json:"action"` + Args []*Arg `json:"args"` + Comment string `json:"comment"` + Includes Filter `json:"includes"` + Excludes Filter `json:"excludes"` +} diff --git a/vendor/github.com/moby/moby/api/types/service_update_response.go b/vendor/github.com/moby/moby/api/types/service_update_response.go new file mode 100644 index 000000000..74ea64b1b --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/service_update_response.go @@ -0,0 +1,12 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// ServiceUpdateResponse service update response +// swagger:model ServiceUpdateResponse +type ServiceUpdateResponse struct { + + // Optional warning messages + Warnings []string `json:"Warnings"` +} diff --git a/vendor/github.com/moby/moby/api/types/stats.go b/vendor/github.com/moby/moby/api/types/stats.go new file mode 100644 index 000000000..7ca76a5b6 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/stats.go @@ -0,0 +1,181 @@ +// Package types is used for API stability in the types and response to the +// consumers of the API stats endpoint. +package types + +import "time" + +// ThrottlingData stores CPU throttling stats of one running container. +// Not used on Windows. +type ThrottlingData struct { + // Number of periods with throttling active + Periods uint64 `json:"periods"` + // Number of periods when the container hits its throttling limit. + ThrottledPeriods uint64 `json:"throttled_periods"` + // Aggregate time the container was throttled for in nanoseconds. + ThrottledTime uint64 `json:"throttled_time"` +} + +// CPUUsage stores All CPU stats aggregated since container inception. +type CPUUsage struct { + // Total CPU time consumed. + // Units: nanoseconds (Linux) + // Units: 100's of nanoseconds (Windows) + TotalUsage uint64 `json:"total_usage"` + + // Total CPU time consumed per core (Linux). Not used on Windows. + // Units: nanoseconds. + PercpuUsage []uint64 `json:"percpu_usage,omitempty"` + + // Time spent by tasks of the cgroup in kernel mode (Linux). + // Time spent by all container processes in kernel mode (Windows). + // Units: nanoseconds (Linux). + // Units: 100's of nanoseconds (Windows). Not populated for Hyper-V Containers. + UsageInKernelmode uint64 `json:"usage_in_kernelmode"` + + // Time spent by tasks of the cgroup in user mode (Linux). + // Time spent by all container processes in user mode (Windows). + // Units: nanoseconds (Linux). + // Units: 100's of nanoseconds (Windows). Not populated for Hyper-V Containers + UsageInUsermode uint64 `json:"usage_in_usermode"` +} + +// CPUStats aggregates and wraps all CPU related info of container +type CPUStats struct { + // CPU Usage. Linux and Windows. + CPUUsage CPUUsage `json:"cpu_usage"` + + // System Usage. Linux only. + SystemUsage uint64 `json:"system_cpu_usage,omitempty"` + + // Online CPUs. Linux only. + OnlineCPUs uint32 `json:"online_cpus,omitempty"` + + // Throttling Data. Linux only. + ThrottlingData ThrottlingData `json:"throttling_data,omitempty"` +} + +// MemoryStats aggregates all memory stats since container inception on Linux. +// Windows returns stats for commit and private working set only. +type MemoryStats struct { + // Linux Memory Stats + + // current res_counter usage for memory + Usage uint64 `json:"usage,omitempty"` + // maximum usage ever recorded. + MaxUsage uint64 `json:"max_usage,omitempty"` + // TODO(vishh): Export these as stronger types. + // all the stats exported via memory.stat. + Stats map[string]uint64 `json:"stats,omitempty"` + // number of times memory usage hits limits. + Failcnt uint64 `json:"failcnt,omitempty"` + Limit uint64 `json:"limit,omitempty"` + + // Windows Memory Stats + // See https://technet.microsoft.com/en-us/magazine/ff382715.aspx + + // committed bytes + Commit uint64 `json:"commitbytes,omitempty"` + // peak committed bytes + CommitPeak uint64 `json:"commitpeakbytes,omitempty"` + // private working set + PrivateWorkingSet uint64 `json:"privateworkingset,omitempty"` +} + +// BlkioStatEntry is one small entity to store a piece of Blkio stats +// Not used on Windows. +type BlkioStatEntry struct { + Major uint64 `json:"major"` + Minor uint64 `json:"minor"` + Op string `json:"op"` + Value uint64 `json:"value"` +} + +// BlkioStats stores All IO service stats for data read and write. +// This is a Linux specific structure as the differences between expressing +// block I/O on Windows and Linux are sufficiently significant to make +// little sense attempting to morph into a combined structure. +type BlkioStats struct { + // number of bytes transferred to and from the block device + IoServiceBytesRecursive []BlkioStatEntry `json:"io_service_bytes_recursive"` + IoServicedRecursive []BlkioStatEntry `json:"io_serviced_recursive"` + IoQueuedRecursive []BlkioStatEntry `json:"io_queue_recursive"` + IoServiceTimeRecursive []BlkioStatEntry `json:"io_service_time_recursive"` + IoWaitTimeRecursive []BlkioStatEntry `json:"io_wait_time_recursive"` + IoMergedRecursive []BlkioStatEntry `json:"io_merged_recursive"` + IoTimeRecursive []BlkioStatEntry `json:"io_time_recursive"` + SectorsRecursive []BlkioStatEntry `json:"sectors_recursive"` +} + +// StorageStats is the disk I/O stats for read/write on Windows. +type StorageStats struct { + ReadCountNormalized uint64 `json:"read_count_normalized,omitempty"` + ReadSizeBytes uint64 `json:"read_size_bytes,omitempty"` + WriteCountNormalized uint64 `json:"write_count_normalized,omitempty"` + WriteSizeBytes uint64 `json:"write_size_bytes,omitempty"` +} + +// NetworkStats aggregates the network stats of one container +type NetworkStats struct { + // Bytes received. Windows and Linux. + RxBytes uint64 `json:"rx_bytes"` + // Packets received. Windows and Linux. + RxPackets uint64 `json:"rx_packets"` + // Received errors. Not used on Windows. Note that we dont `omitempty` this + // field as it is expected in the >=v1.21 API stats structure. + RxErrors uint64 `json:"rx_errors"` + // Incoming packets dropped. Windows and Linux. + RxDropped uint64 `json:"rx_dropped"` + // Bytes sent. Windows and Linux. + TxBytes uint64 `json:"tx_bytes"` + // Packets sent. Windows and Linux. + TxPackets uint64 `json:"tx_packets"` + // Sent errors. Not used on Windows. Note that we dont `omitempty` this + // field as it is expected in the >=v1.21 API stats structure. + TxErrors uint64 `json:"tx_errors"` + // Outgoing packets dropped. Windows and Linux. + TxDropped uint64 `json:"tx_dropped"` + // Endpoint ID. Not used on Linux. + EndpointID string `json:"endpoint_id,omitempty"` + // Instance ID. Not used on Linux. + InstanceID string `json:"instance_id,omitempty"` +} + +// PidsStats contains the stats of a container's pids +type PidsStats struct { + // Current is the number of pids in the cgroup + Current uint64 `json:"current,omitempty"` + // Limit is the hard limit on the number of pids in the cgroup. + // A "Limit" of 0 means that there is no limit. + Limit uint64 `json:"limit,omitempty"` +} + +// Stats is Ultimate struct aggregating all types of stats of one container +type Stats struct { + // Common stats + Read time.Time `json:"read"` + PreRead time.Time `json:"preread"` + + // Linux specific stats, not populated on Windows. + PidsStats PidsStats `json:"pids_stats,omitempty"` + BlkioStats BlkioStats `json:"blkio_stats,omitempty"` + + // Windows specific stats, not populated on Linux. + NumProcs uint32 `json:"num_procs"` + StorageStats StorageStats `json:"storage_stats,omitempty"` + + // Shared stats + CPUStats CPUStats `json:"cpu_stats,omitempty"` + PreCPUStats CPUStats `json:"precpu_stats,omitempty"` // "Pre"="Previous" + MemoryStats MemoryStats `json:"memory_stats,omitempty"` +} + +// StatsJSON is newly used Networks +type StatsJSON struct { + Stats + + Name string `json:"name,omitempty"` + ID string `json:"id,omitempty"` + + // Networks request version >=1.21 + Networks map[string]NetworkStats `json:"networks,omitempty"` +} diff --git a/vendor/github.com/moby/moby/api/types/strslice/strslice.go b/vendor/github.com/moby/moby/api/types/strslice/strslice.go new file mode 100644 index 000000000..bad493fb8 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/strslice/strslice.go @@ -0,0 +1,30 @@ +package strslice + +import "encoding/json" + +// StrSlice represents a string or an array of strings. +// We need to override the json decoder to accept both options. +type StrSlice []string + +// UnmarshalJSON decodes the byte slice whether it's a string or an array of +// strings. This method is needed to implement json.Unmarshaler. +func (e *StrSlice) UnmarshalJSON(b []byte) error { + if len(b) == 0 { + // With no input, we preserve the existing value by returning nil and + // leaving the target alone. This allows defining default values for + // the type. + return nil + } + + p := make([]string, 0, 1) + if err := json.Unmarshal(b, &p); err != nil { + var s string + if err := json.Unmarshal(b, &s); err != nil { + return err + } + p = append(p, s) + } + + *e = p + return nil +} diff --git a/vendor/github.com/moby/moby/api/types/strslice/strslice_test.go b/vendor/github.com/moby/moby/api/types/strslice/strslice_test.go new file mode 100644 index 000000000..1163b3652 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/strslice/strslice_test.go @@ -0,0 +1,86 @@ +package strslice + +import ( + "encoding/json" + "reflect" + "testing" +) + +func TestStrSliceMarshalJSON(t *testing.T) { + for _, testcase := range []struct { + input StrSlice + expected string + }{ + // MADNESS(stevvooe): No clue why nil would be "" but empty would be + // "null". Had to make a change here that may affect compatibility. + {input: nil, expected: "null"}, + {StrSlice{}, "[]"}, + {StrSlice{"/bin/sh", "-c", "echo"}, `["/bin/sh","-c","echo"]`}, + } { + data, err := json.Marshal(testcase.input) + if err != nil { + t.Fatal(err) + } + if string(data) != testcase.expected { + t.Fatalf("%#v: expected %v, got %v", testcase.input, testcase.expected, string(data)) + } + } +} + +func TestStrSliceUnmarshalJSON(t *testing.T) { + parts := map[string][]string{ + "": {"default", "values"}, + "[]": {}, + `["/bin/sh","-c","echo"]`: {"/bin/sh", "-c", "echo"}, + } + for json, expectedParts := range parts { + strs := StrSlice{"default", "values"} + if err := strs.UnmarshalJSON([]byte(json)); err != nil { + t.Fatal(err) + } + + actualParts := []string(strs) + if !reflect.DeepEqual(actualParts, expectedParts) { + t.Fatalf("%#v: expected %v, got %v", json, expectedParts, actualParts) + } + + } +} + +func TestStrSliceUnmarshalString(t *testing.T) { + var e StrSlice + echo, err := json.Marshal("echo") + if err != nil { + t.Fatal(err) + } + if err := json.Unmarshal(echo, &e); err != nil { + t.Fatal(err) + } + + if len(e) != 1 { + t.Fatalf("expected 1 element after unmarshal: %q", e) + } + + if e[0] != "echo" { + t.Fatalf("expected `echo`, got: %q", e[0]) + } +} + +func TestStrSliceUnmarshalSlice(t *testing.T) { + var e StrSlice + echo, err := json.Marshal([]string{"echo"}) + if err != nil { + t.Fatal(err) + } + if err := json.Unmarshal(echo, &e); err != nil { + t.Fatal(err) + } + + if len(e) != 1 { + t.Fatalf("expected 1 element after unmarshal: %q", e) + } + + if e[0] != "echo" { + t.Fatalf("expected `echo`, got: %q", e[0]) + } +} diff --git a/vendor/github.com/moby/moby/api/types/swarm/common.go b/vendor/github.com/moby/moby/api/types/swarm/common.go new file mode 100644 index 000000000..54af82b31 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/swarm/common.go @@ -0,0 +1,40 @@ +package swarm + +import "time" + +// Version represents the internal object version. +type Version struct { + Index uint64 `json:",omitempty"` +} + +// Meta is a base object inherited by most of the other once. +type Meta struct { + Version Version `json:",omitempty"` + CreatedAt time.Time `json:",omitempty"` + UpdatedAt time.Time `json:",omitempty"` +} + +// Annotations represents how to describe an object. +type Annotations struct { + Name string `json:",omitempty"` + Labels map[string]string `json:"Labels"` +} + +// Driver represents a driver (network, logging). +type Driver struct { + Name string `json:",omitempty"` + Options map[string]string `json:",omitempty"` +} + +// TLSInfo represents the TLS information about what CA certificate is trusted, +// and who the issuer for a TLS certificate is +type TLSInfo struct { + // TrustRoot is the trusted CA root certificate in PEM format + TrustRoot string `json:",omitempty"` + + // CertIssuer is the raw subject bytes of the issuer + CertIssuerSubject []byte `json:",omitempty"` + + // CertIssuerPublicKey is the raw public key bytes of the issuer + CertIssuerPublicKey []byte `json:",omitempty"` +} diff --git a/vendor/github.com/moby/moby/api/types/swarm/config.go b/vendor/github.com/moby/moby/api/types/swarm/config.go new file mode 100644 index 000000000..0fb021ce9 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/swarm/config.go @@ -0,0 +1,31 @@ +package swarm + +import "os" + +// Config represents a config. +type Config struct { + ID string + Meta + Spec ConfigSpec +} + +// ConfigSpec represents a config specification from a config in swarm +type ConfigSpec struct { + Annotations + Data []byte `json:",omitempty"` +} + +// ConfigReferenceFileTarget is a file target in a config reference +type ConfigReferenceFileTarget struct { + Name string + UID string + GID string + Mode os.FileMode +} + +// ConfigReference is a reference to a config in swarm +type ConfigReference struct { + File *ConfigReferenceFileTarget + ConfigID string + ConfigName string +} diff --git a/vendor/github.com/moby/moby/api/types/swarm/container.go b/vendor/github.com/moby/moby/api/types/swarm/container.go new file mode 100644 index 000000000..6f8b45f6b --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/swarm/container.go @@ -0,0 +1,72 @@ +package swarm + +import ( + "time" + + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/mount" +) + +// DNSConfig specifies DNS related configurations in resolver configuration file (resolv.conf) +// Detailed documentation is available in: +// http://man7.org/linux/man-pages/man5/resolv.conf.5.html +// `nameserver`, `search`, `options` have been supported. +// TODO: `domain` is not supported yet. +type DNSConfig struct { + // Nameservers specifies the IP addresses of the name servers + Nameservers []string `json:",omitempty"` + // Search specifies the search list for host-name lookup + Search []string `json:",omitempty"` + // Options allows certain internal resolver variables to be modified + Options []string `json:",omitempty"` +} + +// SELinuxContext contains the SELinux labels of the container. +type SELinuxContext struct { + Disable bool + + User string + Role string + Type string + Level string +} + +// CredentialSpec for managed service account (Windows only) +type CredentialSpec struct { + File string + Registry string +} + +// Privileges defines the security options for the container. +type Privileges struct { + CredentialSpec *CredentialSpec + SELinuxContext *SELinuxContext +} + +// ContainerSpec represents the spec of a container. +type ContainerSpec struct { + Image string `json:",omitempty"` + Labels map[string]string `json:",omitempty"` + Command []string `json:",omitempty"` + Args []string `json:",omitempty"` + Hostname string `json:",omitempty"` + Env []string `json:",omitempty"` + Dir string `json:",omitempty"` + User string `json:",omitempty"` + Groups []string `json:",omitempty"` + Privileges *Privileges `json:",omitempty"` + StopSignal string `json:",omitempty"` + TTY bool `json:",omitempty"` + OpenStdin bool `json:",omitempty"` + ReadOnly bool `json:",omitempty"` + Mounts []mount.Mount `json:",omitempty"` + StopGracePeriod *time.Duration `json:",omitempty"` + Healthcheck *container.HealthConfig `json:",omitempty"` + // The format of extra hosts on swarmkit is specified in: + // http://man7.org/linux/man-pages/man5/hosts.5.html + // IP_address canonical_hostname [aliases...] + Hosts []string `json:",omitempty"` + DNSConfig *DNSConfig `json:",omitempty"` + Secrets []*SecretReference `json:",omitempty"` + Configs []*ConfigReference `json:",omitempty"` +} diff --git a/vendor/github.com/moby/moby/api/types/swarm/network.go b/vendor/github.com/moby/moby/api/types/swarm/network.go new file mode 100644 index 000000000..97c484e14 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/swarm/network.go @@ -0,0 +1,119 @@ +package swarm + +import ( + "github.com/docker/docker/api/types/network" +) + +// Endpoint represents an endpoint. +type Endpoint struct { + Spec EndpointSpec `json:",omitempty"` + Ports []PortConfig `json:",omitempty"` + VirtualIPs []EndpointVirtualIP `json:",omitempty"` +} + +// EndpointSpec represents the spec of an endpoint. +type EndpointSpec struct { + Mode ResolutionMode `json:",omitempty"` + Ports []PortConfig `json:",omitempty"` +} + +// ResolutionMode represents a resolution mode. +type ResolutionMode string + +const ( + // ResolutionModeVIP VIP + ResolutionModeVIP ResolutionMode = "vip" + // ResolutionModeDNSRR DNSRR + ResolutionModeDNSRR ResolutionMode = "dnsrr" +) + +// PortConfig represents the config of a port. +type PortConfig struct { + Name string `json:",omitempty"` + Protocol PortConfigProtocol `json:",omitempty"` + // TargetPort is the port inside the container + TargetPort uint32 `json:",omitempty"` + // PublishedPort is the port on the swarm hosts + PublishedPort uint32 `json:",omitempty"` + // PublishMode is the mode in which port is published + PublishMode PortConfigPublishMode `json:",omitempty"` +} + +// PortConfigPublishMode represents the mode in which the port is to +// be published. +type PortConfigPublishMode string + +const ( + // PortConfigPublishModeIngress is used for ports published + // for ingress load balancing using routing mesh. + PortConfigPublishModeIngress PortConfigPublishMode = "ingress" + // PortConfigPublishModeHost is used for ports published + // for direct host level access on the host where the task is running. + PortConfigPublishModeHost PortConfigPublishMode = "host" +) + +// PortConfigProtocol represents the protocol of a port. +type PortConfigProtocol string + +const ( + // TODO(stevvooe): These should be used generally, not just for PortConfig. + + // PortConfigProtocolTCP TCP + PortConfigProtocolTCP PortConfigProtocol = "tcp" + // PortConfigProtocolUDP UDP + PortConfigProtocolUDP PortConfigProtocol = "udp" +) + +// EndpointVirtualIP represents the virtual ip of a port. +type EndpointVirtualIP struct { + NetworkID string `json:",omitempty"` + Addr string `json:",omitempty"` +} + +// Network represents a network. +type Network struct { + ID string + Meta + Spec NetworkSpec `json:",omitempty"` + DriverState Driver `json:",omitempty"` + IPAMOptions *IPAMOptions `json:",omitempty"` +} + +// NetworkSpec represents the spec of a network. +type NetworkSpec struct { + Annotations + DriverConfiguration *Driver `json:",omitempty"` + IPv6Enabled bool `json:",omitempty"` + Internal bool `json:",omitempty"` + Attachable bool `json:",omitempty"` + Ingress bool `json:",omitempty"` + IPAMOptions *IPAMOptions `json:",omitempty"` + ConfigFrom *network.ConfigReference `json:",omitempty"` + Scope string `json:",omitempty"` +} + +// NetworkAttachmentConfig represents the configuration of a network attachment. +type NetworkAttachmentConfig struct { + Target string `json:",omitempty"` + Aliases []string `json:",omitempty"` + DriverOpts map[string]string `json:",omitempty"` +} + +// NetworkAttachment represents a network attachment. +type NetworkAttachment struct { + Network Network `json:",omitempty"` + Addresses []string `json:",omitempty"` +} + +// IPAMOptions represents ipam options. +type IPAMOptions struct { + Driver Driver `json:",omitempty"` + Configs []IPAMConfig `json:",omitempty"` +} + +// IPAMConfig represents ipam configuration. +type IPAMConfig struct { + Subnet string `json:",omitempty"` + Range string `json:",omitempty"` + Gateway string `json:",omitempty"` +} diff --git a/vendor/github.com/moby/moby/api/types/swarm/node.go b/vendor/github.com/moby/moby/api/types/swarm/node.go new file mode 100644 index 000000000..28c6851e9 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/swarm/node.go @@ -0,0 +1,115 @@ +package swarm + +// Node represents a node. +type Node struct { + ID string + Meta + // Spec defines the desired state of the node as specified by the user. + // The system will honor this and will *never* modify it. + Spec NodeSpec `json:",omitempty"` + // Description encapsulates the properties of the Node as reported by the + // agent. + Description NodeDescription `json:",omitempty"` + // Status provides the current status of the node, as seen by the manager. + Status NodeStatus `json:",omitempty"` + // ManagerStatus provides the current status of the node's manager + // component, if the node is a manager. + ManagerStatus *ManagerStatus `json:",omitempty"` +} + +// NodeSpec represents the spec of a node. +type NodeSpec struct { + Annotations + Role NodeRole `json:",omitempty"` + Availability NodeAvailability `json:",omitempty"` +} + +// NodeRole represents the role of a node. +type NodeRole string + +const ( + // NodeRoleWorker WORKER + NodeRoleWorker NodeRole = "worker" + // NodeRoleManager MANAGER + NodeRoleManager NodeRole = "manager" +) + +// NodeAvailability represents the availability of a node. +type NodeAvailability string + +const ( + // NodeAvailabilityActive ACTIVE + NodeAvailabilityActive NodeAvailability = "active" + // NodeAvailabilityPause PAUSE + NodeAvailabilityPause NodeAvailability = "pause" + // NodeAvailabilityDrain DRAIN + NodeAvailabilityDrain NodeAvailability = "drain" +) + +// NodeDescription represents the description of a node. +type NodeDescription struct { + Hostname string `json:",omitempty"` + Platform Platform `json:",omitempty"` + Resources Resources `json:",omitempty"` + Engine EngineDescription `json:",omitempty"` + TLSInfo TLSInfo `json:",omitempty"` +} + +// Platform represents the platform (Arch/OS). +type Platform struct { + Architecture string `json:",omitempty"` + OS string `json:",omitempty"` +} + +// EngineDescription represents the description of an engine. +type EngineDescription struct { + EngineVersion string `json:",omitempty"` + Labels map[string]string `json:",omitempty"` + Plugins []PluginDescription `json:",omitempty"` +} + +// PluginDescription represents the description of an engine plugin. +type PluginDescription struct { + Type string `json:",omitempty"` + Name string `json:",omitempty"` +} + +// NodeStatus represents the status of a node. +type NodeStatus struct { + State NodeState `json:",omitempty"` + Message string `json:",omitempty"` + Addr string `json:",omitempty"` +} + +// Reachability represents the reachability of a node. +type Reachability string + +const ( + // ReachabilityUnknown UNKNOWN + ReachabilityUnknown Reachability = "unknown" + // ReachabilityUnreachable UNREACHABLE + ReachabilityUnreachable Reachability = "unreachable" + // ReachabilityReachable REACHABLE + ReachabilityReachable Reachability = "reachable" +) + +// ManagerStatus represents the status of a manager. +type ManagerStatus struct { + Leader bool `json:",omitempty"` + Reachability Reachability `json:",omitempty"` + Addr string `json:",omitempty"` +} + +// NodeState represents the state of a node. +type NodeState string + +const ( + // NodeStateUnknown UNKNOWN + NodeStateUnknown NodeState = "unknown" + // NodeStateDown DOWN + NodeStateDown NodeState = "down" + // NodeStateReady READY + NodeStateReady NodeState = "ready" + // NodeStateDisconnected DISCONNECTED + NodeStateDisconnected NodeState = "disconnected" +) diff --git a/vendor/github.com/moby/moby/api/types/swarm/runtime.go b/vendor/github.com/moby/moby/api/types/swarm/runtime.go new file mode 100644 index 000000000..c4c731dc8 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/swarm/runtime.go @@ -0,0 +1,19 @@ +package swarm + +// RuntimeType is the type of runtime used for the TaskSpec +type RuntimeType string + +// RuntimeURL is the proto type url +type RuntimeURL string + +const ( + // RuntimeContainer is the container based runtime + RuntimeContainer RuntimeType = "container" + // RuntimePlugin is the plugin based runtime + RuntimePlugin RuntimeType = "plugin" + + // RuntimeURLContainer is the proto url for the container type + RuntimeURLContainer RuntimeURL = "types.docker.com/RuntimeContainer" + // RuntimeURLPlugin is the proto url for the plugin type + RuntimeURLPlugin RuntimeURL = "types.docker.com/RuntimePlugin" +) diff --git a/vendor/github.com/moby/moby/api/types/swarm/runtime/gen.go b/vendor/github.com/moby/moby/api/types/swarm/runtime/gen.go new file mode 100644 index 000000000..47ae234ef --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/swarm/runtime/gen.go @@ -0,0 +1,3 @@ +//go:generate protoc -I . --gogofast_out=import_path=github.com/docker/docker/api/types/swarm/runtime:. plugin.proto + +package runtime diff --git a/vendor/github.com/moby/moby/api/types/swarm/runtime/plugin.pb.go b/vendor/github.com/moby/moby/api/types/swarm/runtime/plugin.pb.go new file mode 100644 index 000000000..1fdc9b043 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/swarm/runtime/plugin.pb.go @@ -0,0 +1,712 @@ +// Code generated by protoc-gen-gogo. +// source: plugin.proto +// DO NOT EDIT! + +/* + Package runtime is a generated protocol buffer package. + + It is generated from these files: + plugin.proto + + It has these top-level messages: + PluginSpec + PluginPrivilege +*/ +package runtime + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +// PluginSpec defines the base payload which clients can specify for creating +// a service with the plugin runtime. +type PluginSpec struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Remote string `protobuf:"bytes,2,opt,name=remote,proto3" json:"remote,omitempty"` + Privileges []*PluginPrivilege `protobuf:"bytes,3,rep,name=privileges" json:"privileges,omitempty"` + Disabled bool `protobuf:"varint,4,opt,name=disabled,proto3" json:"disabled,omitempty"` +} + +func (m *PluginSpec) Reset() { *m = PluginSpec{} } +func (m *PluginSpec) String() string { return proto.CompactTextString(m) } +func (*PluginSpec) ProtoMessage() {} +func (*PluginSpec) Descriptor() ([]byte, []int) { return fileDescriptorPlugin, []int{0} } + +func (m *PluginSpec) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *PluginSpec) GetRemote() string { + if m != nil { + return m.Remote + } + return "" +} + +func (m *PluginSpec) GetPrivileges() []*PluginPrivilege { + if m != nil { + return m.Privileges + } + return nil +} + +func (m *PluginSpec) GetDisabled() bool { + if m != nil { + return m.Disabled + } + return false +} + +// PluginPrivilege describes a permission the user has to accept +// upon installing a plugin. +type PluginPrivilege struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` + Value []string `protobuf:"bytes,3,rep,name=value" json:"value,omitempty"` +} + +func (m *PluginPrivilege) Reset() { *m = PluginPrivilege{} } +func (m *PluginPrivilege) String() string { return proto.CompactTextString(m) } +func (*PluginPrivilege) ProtoMessage() {} +func (*PluginPrivilege) Descriptor() ([]byte, []int) { return fileDescriptorPlugin, []int{1} } + +func (m *PluginPrivilege) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *PluginPrivilege) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *PluginPrivilege) GetValue() []string { + if m != nil { + return m.Value + } + return nil +} + +func init() { + proto.RegisterType((*PluginSpec)(nil), "PluginSpec") + proto.RegisterType((*PluginPrivilege)(nil), "PluginPrivilege") +} +func (m *PluginSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PluginSpec) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Name) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintPlugin(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + if len(m.Remote) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintPlugin(dAtA, i, uint64(len(m.Remote))) + i += copy(dAtA[i:], m.Remote) + } + if len(m.Privileges) > 0 { + for _, msg := range m.Privileges { + dAtA[i] = 0x1a + i++ + i = encodeVarintPlugin(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.Disabled { + dAtA[i] = 0x20 + i++ + if m.Disabled { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + return i, nil +} + +func (m *PluginPrivilege) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PluginPrivilege) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Name) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintPlugin(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + if len(m.Description) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintPlugin(dAtA, i, uint64(len(m.Description))) + i += copy(dAtA[i:], m.Description) + } + if len(m.Value) > 0 { + for _, s := range m.Value { + dAtA[i] = 0x1a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + return i, nil +} + +func encodeFixed64Plugin(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Plugin(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintPlugin(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *PluginSpec) Size() (n int) { + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovPlugin(uint64(l)) + } + l = len(m.Remote) + if l > 0 { + n += 1 + l + sovPlugin(uint64(l)) + } + if len(m.Privileges) > 0 { + for _, e := range m.Privileges { + l = e.Size() + n += 1 + l + sovPlugin(uint64(l)) + } + } + if m.Disabled { + n += 2 + } + return n +} + +func (m *PluginPrivilege) Size() (n int) { + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovPlugin(uint64(l)) + } + l = len(m.Description) + if l > 0 { + n += 1 + l + sovPlugin(uint64(l)) + } + if len(m.Value) > 0 { + for _, s := range m.Value { + l = len(s) + n += 1 + l + sovPlugin(uint64(l)) + } + } + return n +} + +func sovPlugin(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozPlugin(x uint64) (n int) { + return sovPlugin(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *PluginSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PluginSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PluginSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPlugin + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Remote", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPlugin + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Remote = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Privileges", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthPlugin + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Privileges = append(m.Privileges, &PluginPrivilege{}) + if err := m.Privileges[len(m.Privileges)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Disabled", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Disabled = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipPlugin(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthPlugin + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PluginPrivilege) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PluginPrivilege: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PluginPrivilege: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPlugin + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPlugin + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Description = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPlugin + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = append(m.Value, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipPlugin(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthPlugin + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipPlugin(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowPlugin + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowPlugin + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowPlugin + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthPlugin + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowPlugin + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipPlugin(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthPlugin = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowPlugin = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("plugin.proto", fileDescriptorPlugin) } + +var fileDescriptorPlugin = []byte{ + // 196 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xe2, 0x29, 0xc8, 0x29, 0x4d, + 0xcf, 0xcc, 0xd3, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x57, 0x6a, 0x63, 0xe4, 0xe2, 0x0a, 0x00, 0x0b, + 0x04, 0x17, 0xa4, 0x26, 0x0b, 0x09, 0x71, 0xb1, 0xe4, 0x25, 0xe6, 0xa6, 0x4a, 0x30, 0x2a, 0x30, + 0x6a, 0x70, 0x06, 0x81, 0xd9, 0x42, 0x62, 0x5c, 0x6c, 0x45, 0xa9, 0xb9, 0xf9, 0x25, 0xa9, 0x12, + 0x4c, 0x60, 0x51, 0x28, 0x4f, 0xc8, 0x80, 0x8b, 0xab, 0xa0, 0x28, 0xb3, 0x2c, 0x33, 0x27, 0x35, + 0x3d, 0xb5, 0x58, 0x82, 0x59, 0x81, 0x59, 0x83, 0xdb, 0x48, 0x40, 0x0f, 0x62, 0x58, 0x00, 0x4c, + 0x22, 0x08, 0x49, 0x8d, 0x90, 0x14, 0x17, 0x47, 0x4a, 0x66, 0x71, 0x62, 0x52, 0x4e, 0x6a, 0x8a, + 0x04, 0x8b, 0x02, 0xa3, 0x06, 0x47, 0x10, 0x9c, 0xaf, 0x14, 0xcb, 0xc5, 0x8f, 0xa6, 0x15, 0xab, + 0x63, 0x14, 0xb8, 0xb8, 0x53, 0x52, 0x8b, 0x93, 0x8b, 0x32, 0x0b, 0x4a, 0x32, 0xf3, 0xf3, 0xa0, + 0x2e, 0x42, 0x16, 0x12, 0x12, 0xe1, 0x62, 0x2d, 0x4b, 0xcc, 0x29, 0x4d, 0x05, 0xbb, 0x88, 0x33, + 0x08, 0xc2, 0x71, 0xe2, 0x39, 0xf1, 0x48, 0x8e, 0xf1, 0xc2, 0x23, 0x39, 0xc6, 0x07, 0x8f, 0xe4, + 0x18, 0x93, 0xd8, 0xc0, 0x9e, 0x37, 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0xb8, 0x84, 0xad, 0x79, + 0x0c, 0x01, 0x00, 0x00, +} diff --git a/vendor/github.com/moby/moby/api/types/swarm/runtime/plugin.proto b/vendor/github.com/moby/moby/api/types/swarm/runtime/plugin.proto new file mode 100644 index 000000000..06eb7ba65 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/swarm/runtime/plugin.proto @@ -0,0 +1,18 @@ +syntax = "proto3"; + +// PluginSpec defines the base payload which clients can specify for creating +// a service with the plugin runtime. +message PluginSpec { + string name = 1; + string remote = 2; + repeated PluginPrivilege privileges = 3; + bool disabled = 4; +} + +// PluginPrivilege describes a permission the user has to accept +// upon installing a plugin. +message PluginPrivilege { + string name = 1; + string description = 2; + repeated string value = 3; +} diff --git a/vendor/github.com/moby/moby/api/types/swarm/secret.go b/vendor/github.com/moby/moby/api/types/swarm/secret.go new file mode 100644 index 000000000..f9b1e9266 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/swarm/secret.go @@ -0,0 +1,32 @@ +package swarm + +import "os" + +// Secret represents a secret. +type Secret struct { + ID string + Meta + Spec SecretSpec +} + +// SecretSpec represents a secret specification from a secret in swarm +type SecretSpec struct { + Annotations + Data []byte `json:",omitempty"` + Driver *Driver `json:",omitempty"` // name of the secrets driver used to fetch the secret's value from an external secret store +} + +// SecretReferenceFileTarget is a file target in a secret reference +type SecretReferenceFileTarget struct { + Name string + UID string + GID string + Mode os.FileMode +} + +// SecretReference is a reference to a secret in swarm +type SecretReference struct { + File *SecretReferenceFileTarget + SecretID string + SecretName string +} diff --git a/vendor/github.com/moby/moby/api/types/swarm/service.go b/vendor/github.com/moby/moby/api/types/swarm/service.go new file mode 100644 index 000000000..fa31a7ec8 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/swarm/service.go @@ -0,0 +1,124 @@ +package swarm + +import "time" + +// Service represents a service. +type Service struct { + ID string + Meta + Spec ServiceSpec `json:",omitempty"` + PreviousSpec *ServiceSpec `json:",omitempty"` + Endpoint Endpoint `json:",omitempty"` + UpdateStatus *UpdateStatus `json:",omitempty"` +} + +// ServiceSpec represents the spec of a service. +type ServiceSpec struct { + Annotations + + // TaskTemplate defines how the service should construct new tasks when + // orchestrating this service. + TaskTemplate TaskSpec `json:",omitempty"` + Mode ServiceMode `json:",omitempty"` + UpdateConfig *UpdateConfig `json:",omitempty"` + RollbackConfig *UpdateConfig `json:",omitempty"` + + // Networks field in ServiceSpec is deprecated. The + // same field in TaskSpec should be used instead. + // This field will be removed in a future release. + Networks []NetworkAttachmentConfig `json:",omitempty"` + EndpointSpec *EndpointSpec `json:",omitempty"` +} + +// ServiceMode represents the mode of a service. +type ServiceMode struct { + Replicated *ReplicatedService `json:",omitempty"` + Global *GlobalService `json:",omitempty"` +} + +// UpdateState is the state of a service update. +type UpdateState string + +const ( + // UpdateStateUpdating is the updating state. + UpdateStateUpdating UpdateState = "updating" + // UpdateStatePaused is the paused state. + UpdateStatePaused UpdateState = "paused" + // UpdateStateCompleted is the completed state. + UpdateStateCompleted UpdateState = "completed" + // UpdateStateRollbackStarted is the state with a rollback in progress. + UpdateStateRollbackStarted UpdateState = "rollback_started" + // UpdateStateRollbackPaused is the state with a rollback in progress. + UpdateStateRollbackPaused UpdateState = "rollback_paused" + // UpdateStateRollbackCompleted is the state with a rollback in progress. + UpdateStateRollbackCompleted UpdateState = "rollback_completed" +) + +// UpdateStatus reports the status of a service update. +type UpdateStatus struct { + State UpdateState `json:",omitempty"` + StartedAt *time.Time `json:",omitempty"` + CompletedAt *time.Time `json:",omitempty"` + Message string `json:",omitempty"` +} + +// ReplicatedService is a kind of ServiceMode. +type ReplicatedService struct { + Replicas *uint64 `json:",omitempty"` +} + +// GlobalService is a kind of ServiceMode. +type GlobalService struct{} + +const ( + // UpdateFailureActionPause PAUSE + UpdateFailureActionPause = "pause" + // UpdateFailureActionContinue CONTINUE + UpdateFailureActionContinue = "continue" + // UpdateFailureActionRollback ROLLBACK + UpdateFailureActionRollback = "rollback" + + // UpdateOrderStopFirst STOP_FIRST + UpdateOrderStopFirst = "stop-first" + // UpdateOrderStartFirst START_FIRST + UpdateOrderStartFirst = "start-first" +) + +// UpdateConfig represents the update configuration. +type UpdateConfig struct { + // Maximum number of tasks to be updated in one iteration. + // 0 means unlimited parallelism. + Parallelism uint64 + + // Amount of time between updates. + Delay time.Duration `json:",omitempty"` + + // FailureAction is the action to take when an update failures. + FailureAction string `json:",omitempty"` + + // Monitor indicates how long to monitor a task for failure after it is + // created. If the task fails by ending up in one of the states + // REJECTED, COMPLETED, or FAILED, within Monitor from its creation, + // this counts as a failure. If it fails after Monitor, it does not + // count as a failure. If Monitor is unspecified, a default value will + // be used. + Monitor time.Duration `json:",omitempty"` + + // MaxFailureRatio is the fraction of tasks that may fail during + // an update before the failure action is invoked. Any task created by + // the current update which ends up in one of the states REJECTED, + // COMPLETED or FAILED within Monitor from its creation counts as a + // failure. The number of failures is divided by the number of tasks + // being updated, and if this fraction is greater than + // MaxFailureRatio, the failure action is invoked. + // + // If the failure action is CONTINUE, there is no effect. + // If the failure action is PAUSE, no more tasks will be updated until + // another update is started. + MaxFailureRatio float32 + + // Order indicates the order of operations when rolling out an updated + // task. Either the old task is shut down before the new task is + // started, or the new task is started before the old task is shut down. + Order string +} diff --git a/vendor/github.com/moby/moby/api/types/swarm/swarm.go b/vendor/github.com/moby/moby/api/types/swarm/swarm.go new file mode 100644 index 000000000..b65fa86da --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/swarm/swarm.go @@ -0,0 +1,217 @@ +package swarm + +import "time" + +// ClusterInfo represents info about the cluster for outputting in "info" +// it contains the same information as "Swarm", but without the JoinTokens +type ClusterInfo struct { + ID string + Meta + Spec Spec + TLSInfo TLSInfo + RootRotationInProgress bool +} + +// Swarm represents a swarm. +type Swarm struct { + ClusterInfo + JoinTokens JoinTokens +} + +// JoinTokens contains the tokens workers and managers need to join the swarm. +type JoinTokens struct { + // Worker is the join token workers may use to join the swarm. + Worker string + // Manager is the join token managers may use to join the swarm. + Manager string +} + +// Spec represents the spec of a swarm. +type Spec struct { + Annotations + + Orchestration OrchestrationConfig `json:",omitempty"` + Raft RaftConfig `json:",omitempty"` + Dispatcher DispatcherConfig `json:",omitempty"` + CAConfig CAConfig `json:",omitempty"` + TaskDefaults TaskDefaults `json:",omitempty"` + EncryptionConfig EncryptionConfig `json:",omitempty"` +} + +// OrchestrationConfig represents orchestration configuration. +type OrchestrationConfig struct { + // TaskHistoryRetentionLimit is the number of historic tasks to keep per instance or + // node. If negative, never remove completed or failed tasks. + TaskHistoryRetentionLimit *int64 `json:",omitempty"` +} + +// TaskDefaults parameterizes cluster-level task creation with default values. +type TaskDefaults struct { + // LogDriver selects the log driver to use for tasks created in the + // orchestrator if unspecified by a service. + // + // Updating this value will only have an affect on new tasks. Old tasks + // will continue use their previously configured log driver until + // recreated. + LogDriver *Driver `json:",omitempty"` +} + +// EncryptionConfig controls at-rest encryption of data and keys. +type EncryptionConfig struct { + // AutoLockManagers specifies whether or not managers TLS keys and raft data + // should be encrypted at rest in such a way that they must be unlocked + // before the manager node starts up again. + AutoLockManagers bool +} + +// RaftConfig represents raft configuration. +type RaftConfig struct { + // SnapshotInterval is the number of log entries between snapshots. + SnapshotInterval uint64 `json:",omitempty"` + + // KeepOldSnapshots is the number of snapshots to keep beyond the + // current snapshot. + KeepOldSnapshots *uint64 `json:",omitempty"` + + // LogEntriesForSlowFollowers is the number of log entries to keep + // around to sync up slow followers after a snapshot is created. + LogEntriesForSlowFollowers uint64 `json:",omitempty"` + + // ElectionTick is the number of ticks that a follower will wait for a message + // from the leader before becoming a candidate and starting an election. + // ElectionTick must be greater than HeartbeatTick. + // + // A tick currently defaults to one second, so these translate directly to + // seconds currently, but this is NOT guaranteed. + ElectionTick int + + // HeartbeatTick is the number of ticks between heartbeats. Every + // HeartbeatTick ticks, the leader will send a heartbeat to the + // followers. + // + // A tick currently defaults to one second, so these translate directly to + // seconds currently, but this is NOT guaranteed. + HeartbeatTick int +} + +// DispatcherConfig represents dispatcher configuration. +type DispatcherConfig struct { + // HeartbeatPeriod defines how often agent should send heartbeats to + // dispatcher. + HeartbeatPeriod time.Duration `json:",omitempty"` +} + +// CAConfig represents CA configuration. +type CAConfig struct { + // NodeCertExpiry is the duration certificates should be issued for + NodeCertExpiry time.Duration `json:",omitempty"` + + // ExternalCAs is a list of CAs to which a manager node will make + // certificate signing requests for node certificates. + ExternalCAs []*ExternalCA `json:",omitempty"` + + // SigningCACert and SigningCAKey specify the desired signing root CA and + // root CA key for the swarm. When inspecting the cluster, the key will + // be redacted. + SigningCACert string `json:",omitempty"` + SigningCAKey string `json:",omitempty"` + + // If this value changes, and there is no specified signing cert and key, + // then the swarm is forced to generate a new root certificate ane key. + ForceRotate uint64 `json:",omitempty"` +} + +// ExternalCAProtocol represents type of external CA. +type ExternalCAProtocol string + +// ExternalCAProtocolCFSSL CFSSL +const ExternalCAProtocolCFSSL ExternalCAProtocol = "cfssl" + +// ExternalCA defines external CA to be used by the cluster. +type ExternalCA struct { + // Protocol is the protocol used by this external CA. + Protocol ExternalCAProtocol + + // URL is the URL where the external CA can be reached. + URL string + + // Options is a set of additional key/value pairs whose interpretation + // depends on the specified CA type. + Options map[string]string `json:",omitempty"` + + // CACert specifies which root CA is used by this external CA. This certificate must + // be in PEM format. + CACert string +} + +// InitRequest is the request used to init a swarm. +type InitRequest struct { + ListenAddr string + AdvertiseAddr string + DataPathAddr string + ForceNewCluster bool + Spec Spec + AutoLockManagers bool + Availability NodeAvailability +} + +// JoinRequest is the request used to join a swarm. +type JoinRequest struct { + ListenAddr string + AdvertiseAddr string + DataPathAddr string + RemoteAddrs []string + JoinToken string // accept by secret + Availability NodeAvailability +} + +// UnlockRequest is the request used to unlock a swarm. +type UnlockRequest struct { + // UnlockKey is the unlock key in ASCII-armored format. + UnlockKey string +} + +// LocalNodeState represents the state of the local node. +type LocalNodeState string + +const ( + // LocalNodeStateInactive INACTIVE + LocalNodeStateInactive LocalNodeState = "inactive" + // LocalNodeStatePending PENDING + LocalNodeStatePending LocalNodeState = "pending" + // LocalNodeStateActive ACTIVE + LocalNodeStateActive LocalNodeState = "active" + // LocalNodeStateError ERROR + LocalNodeStateError LocalNodeState = "error" + // LocalNodeStateLocked LOCKED + LocalNodeStateLocked LocalNodeState = "locked" +) + +// Info represents generic information about swarm. +type Info struct { + NodeID string + NodeAddr string + + LocalNodeState LocalNodeState + ControlAvailable bool + Error string + + RemoteManagers []Peer + Nodes int `json:",omitempty"` + Managers int `json:",omitempty"` + + Cluster *ClusterInfo `json:",omitempty"` +} + +// Peer represents a peer. +type Peer struct { + NodeID string + Addr string +} + +// UpdateFlags contains flags for SwarmUpdate. +type UpdateFlags struct { + RotateWorkerToken bool + RotateManagerToken bool + RotateManagerUnlockKey bool +} diff --git a/vendor/github.com/moby/moby/api/types/swarm/task.go b/vendor/github.com/moby/moby/api/types/swarm/task.go new file mode 100644 index 000000000..ff11b07e7 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/swarm/task.go @@ -0,0 +1,184 @@ +package swarm + +import ( + "time" + + "github.com/docker/docker/api/types/swarm/runtime" +) + +// TaskState represents the state of a task. +type TaskState string + +const ( + // TaskStateNew NEW + TaskStateNew TaskState = "new" + // TaskStateAllocated ALLOCATED + TaskStateAllocated TaskState = "allocated" + // TaskStatePending PENDING + TaskStatePending TaskState = "pending" + // TaskStateAssigned ASSIGNED + TaskStateAssigned TaskState = "assigned" + // TaskStateAccepted ACCEPTED + TaskStateAccepted TaskState = "accepted" + // TaskStatePreparing PREPARING + TaskStatePreparing TaskState = "preparing" + // TaskStateReady READY + TaskStateReady TaskState = "ready" + // TaskStateStarting STARTING + TaskStateStarting TaskState = "starting" + // TaskStateRunning RUNNING + TaskStateRunning TaskState = "running" + // TaskStateComplete COMPLETE + TaskStateComplete TaskState = "complete" + // TaskStateShutdown SHUTDOWN + TaskStateShutdown TaskState = "shutdown" + // TaskStateFailed FAILED + TaskStateFailed TaskState = "failed" + // TaskStateRejected REJECTED + TaskStateRejected TaskState = "rejected" +) + +// Task represents a task. +type Task struct { + ID string + Meta + Annotations + + Spec TaskSpec `json:",omitempty"` + ServiceID string `json:",omitempty"` + Slot int `json:",omitempty"` + NodeID string `json:",omitempty"` + Status TaskStatus `json:",omitempty"` + DesiredState TaskState `json:",omitempty"` + NetworksAttachments []NetworkAttachment `json:",omitempty"` + GenericResources []GenericResource `json:",omitempty"` +} + +// TaskSpec represents the spec of a task. +type TaskSpec struct { + // ContainerSpec and PluginSpec are mutually exclusive. + // PluginSpec will only be used when the `Runtime` field is set to `plugin` + ContainerSpec *ContainerSpec `json:",omitempty"` + PluginSpec *runtime.PluginSpec `json:",omitempty"` + + Resources *ResourceRequirements `json:",omitempty"` + RestartPolicy *RestartPolicy `json:",omitempty"` + Placement *Placement `json:",omitempty"` + Networks []NetworkAttachmentConfig `json:",omitempty"` + + // LogDriver specifies the LogDriver to use for tasks created from this + // spec. If not present, the one on cluster default on swarm.Spec will be + // used, finally falling back to the engine default if not specified. + LogDriver *Driver `json:",omitempty"` + + // ForceUpdate is a counter that triggers an update even if no relevant + // parameters have been changed. + ForceUpdate uint64 + + Runtime RuntimeType `json:",omitempty"` +} + +// Resources represents resources (CPU/Memory). +type Resources struct { + NanoCPUs int64 `json:",omitempty"` + MemoryBytes int64 `json:",omitempty"` + GenericResources []GenericResource `json:",omitempty"` +} + +// GenericResource represents a "user defined" resource which can +// be either an integer (e.g: SSD=3) or a string (e.g: SSD=sda1) +type GenericResource struct { + NamedResourceSpec *NamedGenericResource `json:",omitempty"` + DiscreteResourceSpec *DiscreteGenericResource `json:",omitempty"` +} + +// NamedGenericResource represents a "user defined" resource which is defined +// as a string. +// "Kind" is used to describe the Kind of a resource (e.g: "GPU", "FPGA", "SSD", ...) +// Value is used to identify the resource (GPU="UUID-1", FPGA="/dev/sdb5", ...) +type NamedGenericResource struct { + Kind string `json:",omitempty"` + Value string `json:",omitempty"` +} + +// DiscreteGenericResource represents a "user defined" resource which is defined +// as an integer +// "Kind" is used to describe the Kind of a resource (e.g: "GPU", "FPGA", "SSD", ...) +// Value is used to count the resource (SSD=5, HDD=3, ...) +type DiscreteGenericResource struct { + Kind string `json:",omitempty"` + Value int64 `json:",omitempty"` +} + +// ResourceRequirements represents resources requirements. +type ResourceRequirements struct { + Limits *Resources `json:",omitempty"` + Reservations *Resources `json:",omitempty"` +} + +// Placement represents orchestration parameters. +type Placement struct { + Constraints []string `json:",omitempty"` + Preferences []PlacementPreference `json:",omitempty"` + + // Platforms stores all the platforms that the image can run on. + // This field is used in the platform filter for scheduling. If empty, + // then the platform filter is off, meaning there are no scheduling restrictions. + Platforms []Platform `json:",omitempty"` +} + +// PlacementPreference provides a way to make the scheduler aware of factors +// such as topology. +type PlacementPreference struct { + Spread *SpreadOver +} + +// SpreadOver is a scheduling preference that instructs the scheduler to spread +// tasks evenly over groups of nodes identified by labels. +type SpreadOver struct { + // label descriptor, such as engine.labels.az + SpreadDescriptor string +} + +// RestartPolicy represents the restart policy. +type RestartPolicy struct { + Condition RestartPolicyCondition `json:",omitempty"` + Delay *time.Duration `json:",omitempty"` + MaxAttempts *uint64 `json:",omitempty"` + Window *time.Duration `json:",omitempty"` +} + +// RestartPolicyCondition represents when to restart. +type RestartPolicyCondition string + +const ( + // RestartPolicyConditionNone NONE + RestartPolicyConditionNone RestartPolicyCondition = "none" + // RestartPolicyConditionOnFailure ON_FAILURE + RestartPolicyConditionOnFailure RestartPolicyCondition = "on-failure" + // RestartPolicyConditionAny ANY + RestartPolicyConditionAny RestartPolicyCondition = "any" +) + +// TaskStatus represents the status of a task. +type TaskStatus struct { + Timestamp time.Time `json:",omitempty"` + State TaskState `json:",omitempty"` + Message string `json:",omitempty"` + Err string `json:",omitempty"` + ContainerStatus ContainerStatus `json:",omitempty"` + PortStatus PortStatus `json:",omitempty"` +} + +// ContainerStatus represents the status of a container. +type ContainerStatus struct { + ContainerID string `json:",omitempty"` + PID int `json:",omitempty"` + ExitCode int `json:",omitempty"` +} + +// PortStatus represents the port status of a task's host ports whose +// service has published host ports +type PortStatus struct { + Ports []PortConfig `json:",omitempty"` +} diff --git a/vendor/github.com/moby/moby/api/types/time/duration_convert.go b/vendor/github.com/moby/moby/api/types/time/duration_convert.go new file mode 100644 index 000000000..63e1eec19 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/time/duration_convert.go @@ -0,0 +1,12 @@ +package time + +import ( + "strconv" + "time" +) + +// DurationToSecondsString converts the specified duration to the number +// seconds it represents, formatted as a string. +func DurationToSecondsString(duration time.Duration) string { + return strconv.FormatFloat(duration.Seconds(), 'f', 0, 64) +} diff --git a/vendor/github.com/moby/moby/api/types/time/duration_convert_test.go b/vendor/github.com/moby/moby/api/types/time/duration_convert_test.go new file mode 100644 index 000000000..869c08f86 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/time/duration_convert_test.go @@ -0,0 +1,26 @@ +package time + +import ( + "testing" + "time" +) + +func TestDurationToSecondsString(t *testing.T) { + cases := []struct { + in time.Duration + expected string + }{ + {0 * time.Second, "0"}, + {1 * time.Second, "1"}, + {1 * time.Minute, "60"}, + {24 * time.Hour, "86400"}, + } + + for _, c := range cases { + s := DurationToSecondsString(c.in) + if s != c.expected { + t.Errorf("wrong value for input `%v`: expected `%s`, got `%s`", c.in, c.expected, s) + t.Fail() + } + } +} diff --git a/vendor/github.com/moby/moby/api/types/time/timestamp.go b/vendor/github.com/moby/moby/api/types/time/timestamp.go new file mode 100644 index 000000000..9aa9702da --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/time/timestamp.go @@ -0,0 +1,124 @@ +package time + +import ( + "fmt" + "math" + "strconv" + "strings" + "time" +) + +// These are additional predefined layouts for use in Time.Format and Time.Parse +// with --since and --until parameters for `docker logs` and `docker events` +const ( + rFC3339Local = "2006-01-02T15:04:05" // RFC3339 with local timezone + rFC3339NanoLocal = "2006-01-02T15:04:05.999999999" // RFC3339Nano with local timezone + dateWithZone = "2006-01-02Z07:00" // RFC3339 with time at 00:00:00 + dateLocal = "2006-01-02" // RFC3339 with local timezone and time at 00:00:00 +) + +// GetTimestamp tries to parse given string as golang duration, +// then RFC3339 time and finally as a Unix timestamp. If +// any of these were successful, it returns a Unix timestamp +// as string otherwise returns the given value back. +// In case of duration input, the returned timestamp is computed +// as the given reference time minus the amount of the duration. +func GetTimestamp(value string, reference time.Time) (string, error) { + if d, err := time.ParseDuration(value); value != "0" && err == nil { + return strconv.FormatInt(reference.Add(-d).Unix(), 10), nil + } + + var format string + var parseInLocation bool + + // if the string has a Z or a + or three dashes use parse otherwise use parseinlocation + parseInLocation = !(strings.ContainsAny(value, "zZ+") || strings.Count(value, "-") == 3) + + if strings.Contains(value, ".") { + if parseInLocation { + format = rFC3339NanoLocal + } else { + format = time.RFC3339Nano + } + } else if strings.Contains(value, "T") { + // we want the number of colons in the T portion of the timestamp + tcolons := strings.Count(value, ":") + // if parseInLocation is off and we have a +/- zone offset (not Z) then + // there will be an extra colon in the input for the tz offset subtract that + // colon from the tcolons count + if !parseInLocation && !strings.ContainsAny(value, "zZ") && tcolons > 0 { + tcolons-- + } + if parseInLocation { + switch tcolons { + case 0: + format = "2006-01-02T15" + case 1: + format = "2006-01-02T15:04" + default: + format = rFC3339Local + } + } else { + switch tcolons { + case 0: + format = "2006-01-02T15Z07:00" + case 1: + format = "2006-01-02T15:04Z07:00" + default: + format = time.RFC3339 + } + } + } else if parseInLocation { + format = dateLocal + } else { + format = dateWithZone + } + + var t time.Time + var err error + + if parseInLocation { + t, err = time.ParseInLocation(format, value, time.FixedZone(reference.Zone())) + } else { + t, err = time.Parse(format, value) + } + + if err != nil { + // if there is a `-` then it's an RFC3339 like timestamp otherwise assume unixtimestamp + if strings.Contains(value, "-") { + return "", err // was probably an RFC3339 like timestamp but the parser failed with an error + } + return value, nil // unixtimestamp in and out case (meaning: the value passed at the command line is already in the right format for passing to the server) + } + + return fmt.Sprintf("%d.%09d", t.Unix(), int64(t.Nanosecond())), nil +} + +// ParseTimestamps returns seconds and nanoseconds from a timestamp that has the +// format "%d.%09d", time.Unix(), int64(time.Nanosecond())) +// if the incoming nanosecond portion is longer or shorter than 9 digits it is +// converted to nanoseconds. The expectation is that the seconds and +// seconds will be used to create a time variable. For example: +// seconds, nanoseconds, err := ParseTimestamp("1136073600.000000001",0) +// if err == nil since := time.Unix(seconds, nanoseconds) +// returns seconds as def(aultSeconds) if value == "" +func ParseTimestamps(value string, def int64) (int64, int64, error) { + if value == "" { + return def, 0, nil + } + sa := strings.SplitN(value, ".", 2) + s, err := strconv.ParseInt(sa[0], 10, 64) + if err != nil { + return s, 0, err + } + if len(sa) != 2 { + return s, 0, nil + } + n, err := strconv.ParseInt(sa[1], 10, 64) + if err != nil { + return s, n, err + } + // should already be in nanoseconds but just in case convert n to nanoseconds + n = int64(float64(n) * math.Pow(float64(10), float64(9-len(sa[1])))) + return s, n, nil +} diff --git a/vendor/github.com/moby/moby/api/types/time/timestamp_test.go b/vendor/github.com/moby/moby/api/types/time/timestamp_test.go new file mode 100644 index 000000000..a1651309d --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/time/timestamp_test.go @@ -0,0 +1,93 @@ +package time + +import ( + "fmt" + "testing" + "time" +) + +func TestGetTimestamp(t *testing.T) { + now := time.Now().In(time.UTC) + cases := []struct { + in, expected string + expectedErr bool + }{ + // Partial RFC3339 strings get parsed with second precision + {"2006-01-02T15:04:05.999999999+07:00", "1136189045.999999999", false}, + {"2006-01-02T15:04:05.999999999Z", "1136214245.999999999", false}, + {"2006-01-02T15:04:05.999999999", "1136214245.999999999", false}, + {"2006-01-02T15:04:05Z", "1136214245.000000000", false}, + {"2006-01-02T15:04:05", "1136214245.000000000", false}, + {"2006-01-02T15:04:0Z", "", true}, + {"2006-01-02T15:04:0", "", true}, + {"2006-01-02T15:04Z", "1136214240.000000000", false}, + {"2006-01-02T15:04+00:00", "1136214240.000000000", false}, + {"2006-01-02T15:04-00:00", "1136214240.000000000", false}, + {"2006-01-02T15:04", "1136214240.000000000", false}, + {"2006-01-02T15:0Z", "", true}, + {"2006-01-02T15:0", "", true}, + {"2006-01-02T15Z", "1136214000.000000000", false}, + {"2006-01-02T15+00:00", "1136214000.000000000", false}, + {"2006-01-02T15-00:00", "1136214000.000000000", false}, + {"2006-01-02T15", "1136214000.000000000", false}, + {"2006-01-02T1Z", "1136163600.000000000", false}, + {"2006-01-02T1", "1136163600.000000000", false}, + {"2006-01-02TZ", "", true}, + {"2006-01-02T", "", true}, + {"2006-01-02+00:00", "1136160000.000000000", false}, + {"2006-01-02-00:00", "1136160000.000000000", false}, + {"2006-01-02-00:01", "1136160060.000000000", false}, + {"2006-01-02Z", "1136160000.000000000", false}, + {"2006-01-02", "1136160000.000000000", false}, + {"2015-05-13T20:39:09Z", "1431549549.000000000", false}, + + // unix timestamps returned as is + {"1136073600", "1136073600", false}, + {"1136073600.000000001", "1136073600.000000001", false}, + // Durations + {"1m", fmt.Sprintf("%d", now.Add(-1*time.Minute).Unix()), false}, + {"1.5h", fmt.Sprintf("%d", now.Add(-90*time.Minute).Unix()), false}, + {"1h30m", fmt.Sprintf("%d", now.Add(-90*time.Minute).Unix()), false}, + + // String fallback + {"invalid", "invalid", false}, + } + + for _, c := range cases { + o, err := GetTimestamp(c.in, now) + if o != c.expected || + (err == nil && c.expectedErr) || + (err != nil && !c.expectedErr) { + t.Errorf("wrong value for '%s'. expected:'%s' got:'%s' with error: `%s`", c.in, c.expected, o, err) + t.Fail() + } + } +} + +func TestParseTimestamps(t *testing.T) { + cases := []struct { + in string + def, expectedS, expectedN int64 + expectedErr bool + }{ + // unix timestamps + {"1136073600", 0, 1136073600, 0, false}, + {"1136073600.000000001", 0, 1136073600, 1, false}, + {"1136073600.0000000010", 0, 1136073600, 1, false}, + {"1136073600.00000001", 0, 1136073600, 10, false}, + {"foo.bar", 0, 0, 0, true}, + {"1136073600.bar", 0, 1136073600, 0, true}, + {"", -1, -1, 0, false}, + } + + for _, c := range cases { + s, n, err := ParseTimestamps(c.in, c.def) + if s != c.expectedS || + n != c.expectedN || + (err == nil && c.expectedErr) || + (err != nil && !c.expectedErr) { + t.Errorf("wrong values for input `%s` with default `%d` expected:'%d'seconds and `%d`nanosecond got:'%d'seconds and `%d`nanoseconds with error: `%s`", c.in, c.def, c.expectedS, c.expectedN, s, n, err) + t.Fail() + } + } +} diff --git a/vendor/github.com/moby/moby/api/types/types.go b/vendor/github.com/moby/moby/api/types/types.go new file mode 100644 index 000000000..f7ac77297 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/types.go @@ -0,0 +1,575 @@ +package types + +import ( + "errors" + "fmt" + "io" + "os" + "strings" + "time" + + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/mount" + "github.com/docker/docker/api/types/network" + "github.com/docker/docker/api/types/registry" + "github.com/docker/docker/api/types/swarm" + "github.com/docker/go-connections/nat" +) + +// RootFS returns Image's RootFS description including the layer IDs. +type RootFS struct { + Type string + Layers []string `json:",omitempty"` + BaseLayer string `json:",omitempty"` +} + +// ImageInspect contains response of Engine API: +// GET "/images/{name:.*}/json" +type ImageInspect struct { + ID string `json:"Id"` + RepoTags []string + RepoDigests []string + Parent string + Comment string + Created string + Container string + ContainerConfig *container.Config + DockerVersion string + Author string + Config *container.Config + Architecture string + Os string + OsVersion string `json:",omitempty"` + Size int64 + VirtualSize int64 + GraphDriver GraphDriverData + RootFS RootFS + Metadata ImageMetadata +} + +// ImageMetadata contains engine-local data about the image +type ImageMetadata struct { + LastTagTime time.Time `json:",omitempty"` +} + +// Container contains response of Engine API: +// GET "/containers/json" +type Container struct { + ID string `json:"Id"` + Names []string + Image string + ImageID string + Command string + Created int64 + Ports []Port + SizeRw int64 `json:",omitempty"` + SizeRootFs int64 `json:",omitempty"` + Labels map[string]string + State string + Status string + HostConfig struct { + NetworkMode string `json:",omitempty"` + } + NetworkSettings *SummaryNetworkSettings + Mounts []MountPoint +} + +// CopyConfig contains request body of Engine API: +// POST "/containers/"+containerID+"/copy" +type CopyConfig struct { + Resource string +} + +// ContainerPathStat is used to encode the header from +// GET "/containers/{name:.*}/archive" +// "Name" is the file or directory name. +type ContainerPathStat struct { + Name string `json:"name"` + Size int64 `json:"size"` + Mode os.FileMode `json:"mode"` + Mtime time.Time `json:"mtime"` + LinkTarget string `json:"linkTarget"` +} + +// ContainerStats contains response of Engine API: +// GET "/stats" +type ContainerStats struct { + Body io.ReadCloser `json:"body"` + OSType string `json:"ostype"` +} + +// Ping contains response of Engine API: +// GET "/_ping" +type Ping struct { + APIVersion string + OSType string + Experimental bool +} + +// Version contains response of Engine API: +// GET "/version" +type Version struct { + Version string + APIVersion string `json:"ApiVersion"` + MinAPIVersion string `json:"MinAPIVersion,omitempty"` + GitCommit string + GoVersion string + Os string + Arch string + KernelVersion string `json:",omitempty"` + Experimental bool `json:",omitempty"` + BuildTime string `json:",omitempty"` +} + +// Commit holds the Git-commit (SHA1) that a binary was built from, as reported +// in the version-string of external tools, such as containerd, or runC. +type Commit struct { + ID string // ID is the actual commit ID of external tool. + Expected string // Expected is the commit ID of external tool expected by dockerd as set at build time. +} + +// Info contains response of Engine API: +// GET "/info" +type Info struct { + ID string + Containers int + ContainersRunning int + ContainersPaused int + ContainersStopped int + Images int + Driver string + DriverStatus [][2]string + SystemStatus [][2]string + Plugins PluginsInfo + MemoryLimit bool + SwapLimit bool + KernelMemory bool + CPUCfsPeriod bool `json:"CpuCfsPeriod"` + CPUCfsQuota bool `json:"CpuCfsQuota"` + CPUShares bool + CPUSet bool + IPv4Forwarding bool + BridgeNfIptables bool + BridgeNfIP6tables bool `json:"BridgeNfIp6tables"` + Debug bool + NFd int + OomKillDisable bool + NGoroutines int + SystemTime string + LoggingDriver string + CgroupDriver string + NEventsListener int + KernelVersion string + OperatingSystem string + OSType string + Architecture string + IndexServerAddress string + RegistryConfig *registry.ServiceConfig + NCPU int + MemTotal int64 + GenericResources []swarm.GenericResource + DockerRootDir string + HTTPProxy string `json:"HttpProxy"` + HTTPSProxy string `json:"HttpsProxy"` + NoProxy string + Name string + Labels []string + ExperimentalBuild bool + ServerVersion string + ClusterStore string + ClusterAdvertise string + Runtimes map[string]Runtime + DefaultRuntime string + Swarm swarm.Info + // LiveRestoreEnabled determines whether containers should be kept + // running when the daemon is shutdown or upon daemon start if + // running containers are detected + LiveRestoreEnabled bool + Isolation container.Isolation + InitBinary string + ContainerdCommit Commit + RuncCommit Commit + InitCommit Commit + SecurityOptions []string +} + +// KeyValue holds a key/value pair +type KeyValue struct { + Key, Value string +} + +// SecurityOpt contains the name and options of a security option +type SecurityOpt struct { + Name string + Options []KeyValue +} + +// DecodeSecurityOptions decodes a security options string slice to a type safe +// SecurityOpt +func DecodeSecurityOptions(opts []string) ([]SecurityOpt, error) { + so := []SecurityOpt{} + for _, opt := range opts { + // support output from a < 1.13 docker daemon + if !strings.Contains(opt, "=") { + so = append(so, SecurityOpt{Name: opt}) + continue + } + secopt := SecurityOpt{} + split := strings.Split(opt, ",") + for _, s := range split { + kv := strings.SplitN(s, "=", 2) + if len(kv) != 2 { + return nil, fmt.Errorf("invalid security option %q", s) + } + if kv[0] == "" || kv[1] == "" { + return nil, errors.New("invalid empty security option") + } + if kv[0] == "name" { + secopt.Name = kv[1] + continue + } + secopt.Options = append(secopt.Options, KeyValue{Key: kv[0], Value: kv[1]}) + } + so = append(so, secopt) + } + return so, nil +} + +// PluginsInfo is a temp struct holding Plugins name +// registered with docker daemon. It is used by Info struct +type PluginsInfo struct { + // List of Volume plugins registered + Volume []string + // List of Network plugins registered + Network []string + // List of Authorization plugins registered + Authorization []string + // List of Log plugins registered + Log []string +} + +// ExecStartCheck is a temp struct used by execStart +// Config fields is part of ExecConfig in runconfig package +type ExecStartCheck struct { + // ExecStart will first check if it's detached + Detach bool + // Check if there's a tty + Tty bool +} + +// HealthcheckResult stores information about a single run of a healthcheck probe +type HealthcheckResult struct { + Start time.Time // Start is the time this check started + End time.Time // End is the time this check ended + ExitCode int // ExitCode meanings: 0=healthy, 1=unhealthy, 2=reserved (considered unhealthy), else=error running probe + Output string // Output from last check +} + +// Health states +const ( + NoHealthcheck = "none" // Indicates there is no healthcheck + Starting = "starting" // Starting indicates that the container is not yet ready + Healthy = "healthy" // Healthy indicates that the container is running correctly + Unhealthy = "unhealthy" // Unhealthy indicates that the container has a problem +) + +// Health stores information about the container's healthcheck results +type Health struct { + Status string // Status is one of Starting, Healthy or Unhealthy + FailingStreak int // FailingStreak is the number of consecutive failures + Log []*HealthcheckResult // Log contains the last few results (oldest first) +} + +// ContainerState stores container's running state +// it's part of ContainerJSONBase and will return by "inspect" command +type ContainerState struct { + Status string // String representation of the container state. Can be one of "created", "running", "paused", "restarting", "removing", "exited", or "dead" + Running bool + Paused bool + Restarting bool + OOMKilled bool + Dead bool + Pid int + ExitCode int + Error string + StartedAt string + FinishedAt string + Health *Health `json:",omitempty"` +} + +// ContainerNode stores information about the node that a container +// is running on. It's only available in Docker Swarm +type ContainerNode struct { + ID string + IPAddress string `json:"IP"` + Addr string + Name string + Cpus int + Memory int64 + Labels map[string]string +} + +// ContainerJSONBase contains response of Engine API: +// GET "/containers/{name:.*}/json" +type ContainerJSONBase struct { + ID string `json:"Id"` + Created string + Path string + Args []string + State *ContainerState + Image string + ResolvConfPath string + HostnamePath string + HostsPath string + LogPath string + Node *ContainerNode `json:",omitempty"` + Name string + RestartCount int + Driver string + Platform string + MountLabel string + ProcessLabel string + AppArmorProfile string + ExecIDs []string + HostConfig *container.HostConfig + GraphDriver GraphDriverData + SizeRw *int64 `json:",omitempty"` + SizeRootFs *int64 `json:",omitempty"` +} + +// ContainerJSON is newly used struct along with MountPoint +type ContainerJSON struct { + *ContainerJSONBase + Mounts []MountPoint + Config *container.Config + NetworkSettings *NetworkSettings +} + +// NetworkSettings exposes the network settings in the api +type NetworkSettings struct { + NetworkSettingsBase + DefaultNetworkSettings + Networks map[string]*network.EndpointSettings +} + +// SummaryNetworkSettings provides a summary of container's networks +// in /containers/json +type SummaryNetworkSettings struct { + Networks map[string]*network.EndpointSettings +} + +// NetworkSettingsBase holds basic information about networks +type NetworkSettingsBase struct { + Bridge string // Bridge is the Bridge name the network uses(e.g. `docker0`) + SandboxID string // SandboxID uniquely represents a container's network stack + HairpinMode bool // HairpinMode specifies if hairpin NAT should be enabled on the virtual interface + LinkLocalIPv6Address string // LinkLocalIPv6Address is an IPv6 unicast address using the link-local prefix + LinkLocalIPv6PrefixLen int // LinkLocalIPv6PrefixLen is the prefix length of an IPv6 unicast address + Ports nat.PortMap // Ports is a collection of PortBinding indexed by Port + SandboxKey string // SandboxKey identifies the sandbox + SecondaryIPAddresses []network.Address + SecondaryIPv6Addresses []network.Address +} + +// DefaultNetworkSettings holds network information +// during the 2 release deprecation period. +// It will be removed in Docker 1.11. +type DefaultNetworkSettings struct { + EndpointID string // EndpointID uniquely represents a service endpoint in a Sandbox + Gateway string // Gateway holds the gateway address for the network + GlobalIPv6Address string // GlobalIPv6Address holds network's global IPv6 address + GlobalIPv6PrefixLen int // GlobalIPv6PrefixLen represents mask length of network's global IPv6 address + IPAddress string // IPAddress holds the IPv4 address for the network + IPPrefixLen int // IPPrefixLen represents mask length of network's IPv4 address + IPv6Gateway string // IPv6Gateway holds gateway address specific for IPv6 + MacAddress string // MacAddress holds the MAC address for the network +} + +// MountPoint represents a mount point configuration inside the container. +// This is used for reporting the mountpoints in use by a container. +type MountPoint struct { + Type mount.Type `json:",omitempty"` + Name string `json:",omitempty"` + Source string + Destination string + Driver string `json:",omitempty"` + Mode string + RW bool + Propagation mount.Propagation +} + +// NetworkResource is the body of the "get network" http response message +type NetworkResource struct { + Name string // Name is the requested name of the network + ID string `json:"Id"` // ID uniquely identifies a network on a single machine + Created time.Time // Created is the time the network created + Scope string // Scope describes the level at which the network exists (e.g. `swarm` for cluster-wide or `local` for machine level) + Driver string // Driver is the Driver name used to create the network (e.g. `bridge`, `overlay`) + EnableIPv6 bool // EnableIPv6 represents whether to enable IPv6 + IPAM network.IPAM // IPAM is the network's IP Address Management + Internal bool // Internal represents if the network is used internal only + Attachable bool // Attachable represents if the global scope is manually attachable by regular containers from workers in swarm mode. + Ingress bool // Ingress indicates the network is providing the routing-mesh for the swarm cluster. + ConfigFrom network.ConfigReference // ConfigFrom specifies the source which will provide the configuration for this network. + ConfigOnly bool // ConfigOnly networks are place-holder networks for network configurations to be used by other networks. ConfigOnly networks cannot be used directly to run containers or services. + Containers map[string]EndpointResource // Containers contains endpoints belonging to the network + Options map[string]string // Options holds the network specific options to use for when creating the network + Labels map[string]string // Labels holds metadata specific to the network being created + Peers []network.PeerInfo `json:",omitempty"` // List of peer nodes for an overlay network + Services map[string]network.ServiceInfo `json:",omitempty"` +} + +// EndpointResource contains network resources allocated and used for a container in a network +type EndpointResource struct { + Name string + EndpointID string + MacAddress string + IPv4Address string + IPv6Address string +} + +// NetworkCreate is the expected body of the "create network" http request message +type NetworkCreate struct { + // Check for networks with duplicate names. + // Network is primarily keyed based on a random ID and not on the name. + // Network name is strictly a user-friendly alias to the network + // which is uniquely identified using ID. + // And there is no guaranteed way to check for duplicates. + // Option CheckDuplicate is there to provide a best effort checking of any networks + // which has the same name but it is not guaranteed to catch all name collisions. + CheckDuplicate bool + Driver string + Scope string + EnableIPv6 bool + IPAM *network.IPAM + Internal bool + Attachable bool + Ingress bool + ConfigOnly bool + ConfigFrom *network.ConfigReference + Options map[string]string + Labels map[string]string +} + +// NetworkCreateRequest is the request message sent to the server for network create call. +type NetworkCreateRequest struct { + NetworkCreate + Name string +} + +// NetworkCreateResponse is the response message sent by the server for network create call +type NetworkCreateResponse struct { + ID string `json:"Id"` + Warning string +} + +// NetworkConnect represents the data to be used to connect a container to the network +type NetworkConnect struct { + Container string + EndpointConfig *network.EndpointSettings `json:",omitempty"` +} + +// NetworkDisconnect represents the data to be used to disconnect a container from the network +type NetworkDisconnect struct { + Container string + Force bool +} + +// NetworkInspectOptions holds parameters to inspect network +type NetworkInspectOptions struct { + Scope string + Verbose bool +} + +// Checkpoint represents the details of a checkpoint +type Checkpoint struct { + Name string // Name is the name of the checkpoint +} + +// Runtime describes an OCI runtime +type Runtime struct { + Path string `json:"path"` + Args []string `json:"runtimeArgs,omitempty"` +} + +// DiskUsage contains response of Engine API: +// GET "/system/df" +type DiskUsage struct { + LayersSize int64 + Images []*ImageSummary + Containers []*Container + Volumes []*Volume + BuilderSize int64 +} + +// ContainersPruneReport contains the response for Engine API: +// POST "/containers/prune" +type ContainersPruneReport struct { + ContainersDeleted []string + SpaceReclaimed uint64 +} + +// VolumesPruneReport contains the response for Engine API: +// POST "/volumes/prune" +type VolumesPruneReport struct { + VolumesDeleted []string + SpaceReclaimed uint64 +} + +// ImagesPruneReport contains the response for Engine API: +// POST "/images/prune" +type ImagesPruneReport struct { + ImagesDeleted []ImageDeleteResponseItem + SpaceReclaimed uint64 +} + +// BuildCachePruneReport contains the response for Engine API: +// POST "/build/prune" +type BuildCachePruneReport struct { + SpaceReclaimed uint64 +} + +// NetworksPruneReport contains the response for Engine API: +// POST "/networks/prune" +type NetworksPruneReport struct { + NetworksDeleted []string +} + +// SecretCreateResponse contains the information returned to a client +// on the creation of a new secret. +type SecretCreateResponse struct { + // ID is the id of the created secret. + ID string +} + +// SecretListOptions holds parameters to list secrets +type SecretListOptions struct { + Filters filters.Args +} + +// ConfigCreateResponse contains the information returned to a client +// on the creation of a new config. +type ConfigCreateResponse struct { + // ID is the id of the created config. + ID string +} + +// ConfigListOptions holds parameters to list configs +type ConfigListOptions struct { + Filters filters.Args +} + +// PushResult contains the tag, manifest digest, and manifest size from the +// push. It's used to signal this information to the trust code in the client +// so it can sign the manifest if necessary. +type PushResult struct { + Tag string + Digest string + Size int +} + +// BuildResult contains the image id of a successful build +type BuildResult struct { + ID string +} diff --git a/vendor/github.com/moby/moby/api/types/versions/README.md b/vendor/github.com/moby/moby/api/types/versions/README.md new file mode 100644 index 000000000..1ef911edb --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/versions/README.md @@ -0,0 +1,14 @@ +# Legacy API type versions + +This package includes types for legacy API versions. The stable version of the API types live in `api/types/*.go`. + +Consider moving a type here when you need to keep backwards compatibility in the API. This legacy types are organized by the latest API version they appear in. For instance, types in the `v1p19` package are valid for API versions below or equal `1.19`. Types in the `v1p20` package are valid for the API version `1.20`, since the versions below that will use the legacy types in `v1p19`. + +## Package name conventions + +The package name convention is to use `v` as a prefix for the version number and `p`(patch) as a separator. We use this nomenclature due to a few restrictions in the Go package name convention: + +1. We cannot use `.` because it's interpreted by the language, think of `v1.20.CallFunction`. +2. We cannot use `_` because golint complains about it. The code is actually valid, but it looks probably more weird: `v1_20.CallFunction`. + +For instance, if you want to modify a type that was available in the version `1.21` of the API but it will have different fields in the version `1.22`, you want to create a new package under `api/types/versions/v1p21`. diff --git a/vendor/github.com/moby/moby/api/types/versions/compare.go b/vendor/github.com/moby/moby/api/types/versions/compare.go new file mode 100644 index 000000000..611d4fed6 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/versions/compare.go @@ -0,0 +1,62 @@ +package versions + +import ( + "strconv" + "strings" +) + +// compare compares two version strings +// returns -1 if v1 < v2, 1 if v1 > v2, 0 otherwise. +func compare(v1, v2 string) int { + var ( + currTab = strings.Split(v1, ".") + otherTab = strings.Split(v2, ".") + ) + + max := len(currTab) + if len(otherTab) > max { + max = len(otherTab) + } + for i := 0; i < max; i++ { + var currInt, otherInt int + + if len(currTab) > i { + currInt, _ = strconv.Atoi(currTab[i]) + } + if len(otherTab) > i { + otherInt, _ = strconv.Atoi(otherTab[i]) + } + if currInt > otherInt { + return 1 + } + if otherInt > currInt { + return -1 + } + } + return 0 +} + +// LessThan checks if a version is less than another +func LessThan(v, other string) bool { + return compare(v, other) == -1 +} + +// LessThanOrEqualTo checks if a version is less than or equal to another +func LessThanOrEqualTo(v, other string) bool { + return compare(v, other) <= 0 +} + +// GreaterThan checks if a version is greater than another +func GreaterThan(v, other string) bool { + return compare(v, other) == 1 +} + +// GreaterThanOrEqualTo checks if a version is greater than or equal to another +func GreaterThanOrEqualTo(v, other string) bool { + return compare(v, other) >= 0 +} + +// Equal checks if a version is equal to another +func Equal(v, other string) bool { + return compare(v, other) == 0 +} diff --git a/vendor/github.com/moby/moby/api/types/versions/compare_test.go b/vendor/github.com/moby/moby/api/types/versions/compare_test.go new file mode 100644 index 000000000..c2b96869f --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/versions/compare_test.go @@ -0,0 +1,26 @@ +package versions + +import ( + "testing" +) + +func assertVersion(t *testing.T, a, b string, result int) { + if r := compare(a, b); r != result { + t.Fatalf("Unexpected version comparison result. Found %d, expected %d", r, result) + } +} + +func TestCompareVersion(t *testing.T) { + assertVersion(t, "1.12", "1.12", 0) + assertVersion(t, "1.0.0", "1", 0) + assertVersion(t, "1", "1.0.0", 0) + assertVersion(t, "1.05.00.0156", "1.0.221.9289", 1) + assertVersion(t, "1", "1.0.1", -1) + assertVersion(t, "1.0.1", "1", 1) + assertVersion(t, "1.0.1", "1.0.2", -1) + assertVersion(t, "1.0.2", "1.0.3", -1) + assertVersion(t, "1.0.3", "1.1", -1) + assertVersion(t, "1.1", "1.1.1", -1) + assertVersion(t, "1.1.1", "1.1.2", -1) + assertVersion(t, "1.1.2", "1.2", -1) +} diff --git a/vendor/github.com/moby/moby/api/types/versions/v1p19/types.go b/vendor/github.com/moby/moby/api/types/versions/v1p19/types.go new file mode 100644 index 000000000..dc1315054 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/versions/v1p19/types.go @@ -0,0 +1,35 @@ +// Package v1p19 provides specific API types for the API version 1, patch 19. +package v1p19 + +import ( + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/versions/v1p20" + "github.com/docker/go-connections/nat" +) + +// ContainerJSON is a backcompatibility struct for APIs prior to 1.20. +// Note this is not used by the Windows daemon. +type ContainerJSON struct { + *types.ContainerJSONBase + Volumes map[string]string + VolumesRW map[string]bool + Config *ContainerConfig + NetworkSettings *v1p20.NetworkSettings +} + +// ContainerConfig is a backcompatibility struct for APIs prior to 1.20. +type ContainerConfig struct { + *container.Config + + MacAddress string + NetworkDisabled bool + ExposedPorts map[nat.Port]struct{} + + // backward compatibility, they now live in HostConfig + VolumeDriver string + Memory int64 + MemorySwap int64 + CPUShares int64 `json:"CpuShares"` + CPUSet string `json:"Cpuset"` +} diff --git a/vendor/github.com/moby/moby/api/types/versions/v1p20/types.go b/vendor/github.com/moby/moby/api/types/versions/v1p20/types.go new file mode 100644 index 000000000..94a06d745 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/versions/v1p20/types.go @@ -0,0 +1,40 @@ +// Package v1p20 provides specific API types for the API version 1, patch 20. +package v1p20 + +import ( + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" + "github.com/docker/go-connections/nat" +) + +// ContainerJSON is a backcompatibility struct for the API 1.20 +type ContainerJSON struct { + *types.ContainerJSONBase + Mounts []types.MountPoint + Config *ContainerConfig + NetworkSettings *NetworkSettings +} + +// ContainerConfig is a backcompatibility struct used in ContainerJSON for the API 1.20 +type ContainerConfig struct { + *container.Config + + MacAddress string + NetworkDisabled bool + ExposedPorts map[nat.Port]struct{} + + // backward compatibility, they now live in HostConfig + VolumeDriver string +} + +// StatsJSON is a backcompatibility struct used in Stats for APIs prior to 1.21 +type StatsJSON struct { + types.Stats + Network types.NetworkStats `json:"network,omitempty"` +} + +// NetworkSettings is a backward compatible struct for APIs prior to 1.21 +type NetworkSettings struct { + types.NetworkSettingsBase + types.DefaultNetworkSettings +} diff --git a/vendor/github.com/moby/moby/api/types/volume.go b/vendor/github.com/moby/moby/api/types/volume.go new file mode 100644 index 000000000..b5ee96a50 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/volume.go @@ -0,0 +1,69 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// Volume volume +// swagger:model Volume +type Volume struct { + + // Date/Time the volume was created. + CreatedAt string `json:"CreatedAt,omitempty"` + + // Name of the volume driver used by the volume. + // Required: true + Driver string `json:"Driver"` + + // User-defined key/value metadata. + // Required: true + Labels map[string]string `json:"Labels"` + + // Mount path of the volume on the host. + // Required: true + Mountpoint string `json:"Mountpoint"` + + // Name of the volume. + // Required: true + Name string `json:"Name"` + + // The driver specific options used when creating the volume. + // Required: true + Options map[string]string `json:"Options"` + + // The level at which the volume exists. Either `global` for cluster-wide, or `local` for machine level. + // Required: true + Scope string `json:"Scope"` + + // Low-level details about the volume, provided by the volume driver. + // Details are returned as a map with key/value pairs: + // `{"key":"value","key2":"value2"}`. + // + // The `Status` field is optional, and is omitted if the volume driver + // does not support this feature. + // + Status map[string]interface{} `json:"Status,omitempty"` + + // usage data + UsageData *VolumeUsageData `json:"UsageData,omitempty"` +} + +// VolumeUsageData Usage details about the volume. This information is used by the +// `GET /system/df` endpoint, and omitted in other endpoints. +// +// swagger:model VolumeUsageData +type VolumeUsageData struct { + + // The number of containers referencing this volume. This field + // is set to `-1` if the reference-count is not available. + // + // Required: true + RefCount int64 `json:"RefCount"` + + // Amount of disk space used by the volume (in bytes). This information + // is only available for volumes created with the `"local"` volume + // driver. For volumes created with other volume drivers, this field + // is set to `-1` ("not available") + // + // Required: true + Size int64 `json:"Size"` +} diff --git a/vendor/github.com/moby/moby/api/types/volume/volumes_create.go b/vendor/github.com/moby/moby/api/types/volume/volumes_create.go new file mode 100644 index 000000000..9f70e43ca --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/volume/volumes_create.go @@ -0,0 +1,29 @@ +package volume + +// ---------------------------------------------------------------------------- +// DO NOT EDIT THIS FILE +// This file was generated by `swagger generate operation` +// +// See hack/generate-swagger-api.sh +// ---------------------------------------------------------------------------- + +// VolumesCreateBody volumes create body +// swagger:model VolumesCreateBody +type VolumesCreateBody struct { + + // Name of the volume driver to use. + // Required: true + Driver string `json:"Driver"` + + // A mapping of driver options and values. These options are passed directly to the driver and are driver specific. + // Required: true + DriverOpts map[string]string `json:"DriverOpts"` + + // User-defined key/value metadata. + // Required: true + Labels map[string]string `json:"Labels"` + + // The new volume's name. If not specified, Docker generates a name. + // Required: true + Name string `json:"Name"` +} diff --git a/vendor/github.com/moby/moby/api/types/volume/volumes_list.go b/vendor/github.com/moby/moby/api/types/volume/volumes_list.go new file mode 100644 index 000000000..833dad933 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/volume/volumes_list.go @@ -0,0 +1,23 @@ +package volume + +// ---------------------------------------------------------------------------- +// DO NOT EDIT THIS FILE +// This file was generated by `swagger generate operation` +// +// See hack/generate-swagger-api.sh +// ---------------------------------------------------------------------------- + +import "github.com/docker/docker/api/types" + +// VolumesListOKBody volumes list o k body +// swagger:model VolumesListOKBody +type VolumesListOKBody struct { + + // List of volumes + // Required: true + Volumes []*types.Volume `json:"Volumes"` + + // Warnings that occurred when fetching the list of volumes + // Required: true + Warnings []string `json:"Warnings"` +} diff --git a/vendor/github.com/moby/moby/builder/builder.go b/vendor/github.com/moby/moby/builder/builder.go new file mode 100644 index 000000000..e480601d4 --- /dev/null +++ b/vendor/github.com/moby/moby/builder/builder.go @@ -0,0 +1,105 @@ +// Package builder defines interfaces for any Docker builder to implement. +// +// Historically, only server-side Dockerfile interpreters existed. +// This package allows for other implementations of Docker builders. +package builder + +import ( + "io" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/api/types/container" + containerpkg "github.com/docker/docker/container" + "github.com/docker/docker/layer" + "golang.org/x/net/context" +) + +const ( + // DefaultDockerfileName is the Default filename with Docker commands, read by docker build + DefaultDockerfileName string = "Dockerfile" +) + +// Source defines a location that can be used as a source for the ADD/COPY +// instructions in the builder. +type Source interface { + // Root returns root path for accessing source + Root() string + // Close allows to signal that the filesystem tree won't be used anymore. + // For Context implementations using a temporary directory, it is recommended to + // delete the temporary directory in Close(). + Close() error + // Hash returns a checksum for a file + Hash(path string) (string, error) +} + +// Backend abstracts calls to a Docker Daemon. +type Backend interface { + ImageBackend + ExecBackend + + // Commit creates a new Docker image from an existing Docker container. + Commit(string, *backend.ContainerCommitConfig) (string, error) + // ContainerCreateWorkdir creates the workdir + ContainerCreateWorkdir(containerID string) error + + CreateImage(config []byte, parent string, platform string) (Image, error) + + ImageCacheBuilder +} + +// ImageBackend are the interface methods required from an image component +type ImageBackend interface { + GetImageAndReleasableLayer(ctx context.Context, refOrID string, opts backend.GetImageAndLayerOptions) (Image, ReleaseableLayer, error) +} + +// ExecBackend contains the interface methods required for executing containers +type ExecBackend interface { + // ContainerAttachRaw attaches to container. + ContainerAttachRaw(cID string, stdin io.ReadCloser, stdout, stderr io.Writer, stream bool, attached chan struct{}) error + // ContainerCreate creates a new Docker container and returns potential warnings + ContainerCreate(config types.ContainerCreateConfig) (container.ContainerCreateCreatedBody, error) + // ContainerRm removes a container specified by `id`. + ContainerRm(name string, config *types.ContainerRmConfig) error + // ContainerKill stops the container execution abruptly. + ContainerKill(containerID string, sig uint64) error + // ContainerStart starts a new container + ContainerStart(containerID string, hostConfig *container.HostConfig, checkpoint string, checkpointDir string) error + // ContainerWait stops processing until the given container is stopped. + ContainerWait(ctx context.Context, name string, condition containerpkg.WaitCondition) (<-chan containerpkg.StateStatus, error) +} + +// Result is the output produced by a Builder +type Result struct { + ImageID string + FromImage Image +} + +// ImageCacheBuilder represents a generator for stateful image cache. +type ImageCacheBuilder interface { + // MakeImageCache creates a stateful image cache. + MakeImageCache(cacheFrom []string, platform string) ImageCache +} + +// ImageCache abstracts an image cache. +// (parent image, child runconfig) -> child image +type ImageCache interface { + // GetCache returns a reference to a cached image whose parent equals `parent` + // and runconfig equals `cfg`. A cache miss is expected to return an empty ID and a nil error. + GetCache(parentID string, cfg *container.Config) (imageID string, err error) +} + +// Image represents a Docker image used by the builder. +type Image interface { + ImageID() string + RunConfig() *container.Config + MarshalJSON() ([]byte, error) +} + +// ReleaseableLayer is an image layer that can be mounted and released +type ReleaseableLayer interface { + Release() error + Mount() (string, error) + Commit(platform string) (ReleaseableLayer, error) + DiffID() layer.DiffID +} diff --git a/vendor/github.com/moby/moby/builder/dockerfile/bflag.go b/vendor/github.com/moby/moby/builder/dockerfile/bflag.go new file mode 100644 index 000000000..d84966162 --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/bflag.go @@ -0,0 +1,183 @@ +package dockerfile + +import ( + "fmt" + "strings" +) + +// FlagType is the type of the build flag +type FlagType int + +const ( + boolType FlagType = iota + stringType +) + +// BFlags contains all flags information for the builder +type BFlags struct { + Args []string // actual flags/args from cmd line + flags map[string]*Flag + used map[string]*Flag + Err error +} + +// Flag contains all information for a flag +type Flag struct { + bf *BFlags + name string + flagType FlagType + Value string +} + +// NewBFlags returns the new BFlags struct +func NewBFlags() *BFlags { + return &BFlags{ + flags: make(map[string]*Flag), + used: make(map[string]*Flag), + } +} + +// NewBFlagsWithArgs returns the new BFlags struct with Args set to args +func NewBFlagsWithArgs(args []string) *BFlags { + flags := NewBFlags() + flags.Args = args + return flags +} + +// AddBool adds a bool flag to BFlags +// Note, any error will be generated when Parse() is called (see Parse). +func (bf *BFlags) AddBool(name string, def bool) *Flag { + flag := bf.addFlag(name, boolType) + if flag == nil { + return nil + } + if def { + flag.Value = "true" + } else { + flag.Value = "false" + } + return flag +} + +// AddString adds a string flag to BFlags +// Note, any error will be generated when Parse() is called (see Parse). +func (bf *BFlags) AddString(name string, def string) *Flag { + flag := bf.addFlag(name, stringType) + if flag == nil { + return nil + } + flag.Value = def + return flag +} + +// addFlag is a generic func used by the other AddXXX() func +// to add a new flag to the BFlags struct. +// Note, any error will be generated when Parse() is called (see Parse). +func (bf *BFlags) addFlag(name string, flagType FlagType) *Flag { + if _, ok := bf.flags[name]; ok { + bf.Err = fmt.Errorf("Duplicate flag defined: %s", name) + return nil + } + + newFlag := &Flag{ + bf: bf, + name: name, + flagType: flagType, + } + bf.flags[name] = newFlag + + return newFlag +} + +// IsUsed checks if the flag is used +func (fl *Flag) IsUsed() bool { + if _, ok := fl.bf.used[fl.name]; ok { + return true + } + return false +} + +// IsTrue checks if a bool flag is true +func (fl *Flag) IsTrue() bool { + if fl.flagType != boolType { + // Should never get here + panic(fmt.Errorf("Trying to use IsTrue on a non-boolean: %s", fl.name)) + } + return fl.Value == "true" +} + +// Parse parses and checks if the BFlags is valid. +// Any error noticed during the AddXXX() funcs will be generated/returned +// here. We do this because an error during AddXXX() is more like a +// compile time error so it doesn't matter too much when we stop our +// processing as long as we do stop it, so this allows the code +// around AddXXX() to be just: +// defFlag := AddString("description", "") +// w/o needing to add an if-statement around each one. +func (bf *BFlags) Parse() error { + // If there was an error while defining the possible flags + // go ahead and bubble it back up here since we didn't do it + // earlier in the processing + if bf.Err != nil { + return fmt.Errorf("Error setting up flags: %s", bf.Err) + } + + for _, arg := range bf.Args { + if !strings.HasPrefix(arg, "--") { + return fmt.Errorf("Arg should start with -- : %s", arg) + } + + if arg == "--" { + return nil + } + + arg = arg[2:] + value := "" + + index := strings.Index(arg, "=") + if index >= 0 { + value = arg[index+1:] + arg = arg[:index] + } + + flag, ok := bf.flags[arg] + if !ok { + return fmt.Errorf("Unknown flag: %s", arg) + } + + if _, ok = bf.used[arg]; ok { + return fmt.Errorf("Duplicate flag specified: %s", arg) + } + + bf.used[arg] = flag + + switch flag.flagType { + case boolType: + // value == "" is only ok if no "=" was specified + if index >= 0 && value == "" { + return fmt.Errorf("Missing a value on flag: %s", arg) + } + + lower := strings.ToLower(value) + if lower == "" { + flag.Value = "true" + } else if lower == "true" || lower == "false" { + flag.Value = lower + } else { + return fmt.Errorf("Expecting boolean value for flag %s, not: %s", arg, value) + } + + case stringType: + if index < 0 { + return fmt.Errorf("Missing a value on flag: %s", arg) + } + flag.Value = value + + default: + panic("No idea what kind of flag we have! Should never get here!") + } + + } + + return nil +} diff --git a/vendor/github.com/moby/moby/builder/dockerfile/bflag_test.go b/vendor/github.com/moby/moby/builder/dockerfile/bflag_test.go new file mode 100644 index 000000000..ac07e48c1 --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/bflag_test.go @@ -0,0 +1,187 @@ +package dockerfile + +import ( + "testing" +) + +func TestBuilderFlags(t *testing.T) { + var expected string + var err error + + // --- + + bf := NewBFlags() + bf.Args = []string{} + if err := bf.Parse(); err != nil { + t.Fatalf("Test1 of %q was supposed to work: %s", bf.Args, err) + } + + // --- + + bf = NewBFlags() + bf.Args = []string{"--"} + if err := bf.Parse(); err != nil { + t.Fatalf("Test2 of %q was supposed to work: %s", bf.Args, err) + } + + // --- + + bf = NewBFlags() + flStr1 := bf.AddString("str1", "") + flBool1 := bf.AddBool("bool1", false) + bf.Args = []string{} + if err = bf.Parse(); err != nil { + t.Fatalf("Test3 of %q was supposed to work: %s", bf.Args, err) + } + + if flStr1.IsUsed() == true { + t.Fatal("Test3 - str1 was not used!") + } + if flBool1.IsUsed() == true { + t.Fatal("Test3 - bool1 was not used!") + } + + // --- + + bf = NewBFlags() + flStr1 = bf.AddString("str1", "HI") + flBool1 = bf.AddBool("bool1", false) + bf.Args = []string{} + + if err = bf.Parse(); err != nil { + t.Fatalf("Test4 of %q was supposed to work: %s", bf.Args, err) + } + + if flStr1.Value != "HI" { + t.Fatal("Str1 was supposed to default to: HI") + } + if flBool1.IsTrue() { + t.Fatal("Bool1 was supposed to default to: false") + } + if flStr1.IsUsed() == true { + t.Fatal("Str1 was not used!") + } + if flBool1.IsUsed() == true { + t.Fatal("Bool1 was not used!") + } + + // --- + + bf = NewBFlags() + flStr1 = bf.AddString("str1", "HI") + bf.Args = []string{"--str1"} + + if err = bf.Parse(); err == nil { + t.Fatalf("Test %q was supposed to fail", bf.Args) + } + + // --- + + bf = NewBFlags() + flStr1 = bf.AddString("str1", "HI") + bf.Args = []string{"--str1="} + + if err = bf.Parse(); err != nil { + t.Fatalf("Test %q was supposed to work: %s", bf.Args, err) + } + + expected = "" + if flStr1.Value != expected { + t.Fatalf("Str1 (%q) should be: %q", flStr1.Value, expected) + } + + // --- + + bf = NewBFlags() + flStr1 = bf.AddString("str1", "HI") + bf.Args = []string{"--str1=BYE"} + + if err = bf.Parse(); err != nil { + t.Fatalf("Test %q was supposed to work: %s", bf.Args, err) + } + + expected = "BYE" + if flStr1.Value != expected { + t.Fatalf("Str1 (%q) should be: %q", flStr1.Value, expected) + } + + // --- + + bf = NewBFlags() + flBool1 = bf.AddBool("bool1", false) + bf.Args = []string{"--bool1"} + + if err = bf.Parse(); err != nil { + t.Fatalf("Test %q was supposed to work: %s", bf.Args, err) + } + + if !flBool1.IsTrue() { + t.Fatal("Test-b1 Bool1 was supposed to be true") + } + + // --- + + bf = NewBFlags() + flBool1 = bf.AddBool("bool1", false) + bf.Args = []string{"--bool1=true"} + + if err = bf.Parse(); err != nil { + t.Fatalf("Test %q was supposed to work: %s", bf.Args, err) + } + + if !flBool1.IsTrue() { + t.Fatal("Test-b2 Bool1 was supposed to be true") + } + + // --- + + bf = NewBFlags() + flBool1 = bf.AddBool("bool1", false) + bf.Args = []string{"--bool1=false"} + + if err = bf.Parse(); err != nil { + t.Fatalf("Test %q was supposed to work: %s", bf.Args, err) + } + + if flBool1.IsTrue() { + t.Fatal("Test-b3 Bool1 was supposed to be false") + } + + // --- + + bf = NewBFlags() + flBool1 = bf.AddBool("bool1", false) + bf.Args = []string{"--bool1=false1"} + + if err = bf.Parse(); err == nil { + t.Fatalf("Test %q was supposed to fail", bf.Args) + } + + // --- + + bf = NewBFlags() + flBool1 = bf.AddBool("bool1", false) + bf.Args = []string{"--bool2"} + + if err = bf.Parse(); err == nil { + t.Fatalf("Test %q was supposed to fail", bf.Args) + } + + // --- + + bf = NewBFlags() + flStr1 = bf.AddString("str1", "HI") + flBool1 = bf.AddBool("bool1", false) + bf.Args = []string{"--bool1", "--str1=BYE"} + + if err = bf.Parse(); err != nil { + t.Fatalf("Test %q was supposed to work: %s", bf.Args, err) + } + + if flStr1.Value != "BYE" { + t.Fatalf("Test %s, str1 should be BYE", bf.Args) + } + if !flBool1.IsTrue() { + t.Fatalf("Test %s, bool1 should be true", bf.Args) + } +} diff --git a/vendor/github.com/moby/moby/builder/dockerfile/buildargs.go b/vendor/github.com/moby/moby/builder/dockerfile/buildargs.go new file mode 100644 index 000000000..e0daf9a77 --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/buildargs.go @@ -0,0 +1,148 @@ +package dockerfile + +import ( + "fmt" + "io" + + "github.com/docker/docker/runconfig/opts" +) + +// builtinAllowedBuildArgs is list of built-in allowed build args +// these args are considered transparent and are excluded from the image history. +// Filtering from history is implemented in dispatchers.go +var builtinAllowedBuildArgs = map[string]bool{ + "HTTP_PROXY": true, + "http_proxy": true, + "HTTPS_PROXY": true, + "https_proxy": true, + "FTP_PROXY": true, + "ftp_proxy": true, + "NO_PROXY": true, + "no_proxy": true, +} + +// buildArgs manages arguments used by the builder +type buildArgs struct { + // args that are allowed for expansion/substitution and passing to commands in 'run'. + allowedBuildArgs map[string]*string + // args defined before the first `FROM` in a Dockerfile + allowedMetaArgs map[string]*string + // args referenced by the Dockerfile + referencedArgs map[string]struct{} + // args provided by the user on the command line + argsFromOptions map[string]*string +} + +func newBuildArgs(argsFromOptions map[string]*string) *buildArgs { + return &buildArgs{ + allowedBuildArgs: make(map[string]*string), + allowedMetaArgs: make(map[string]*string), + referencedArgs: make(map[string]struct{}), + argsFromOptions: argsFromOptions, + } +} + +// WarnOnUnusedBuildArgs checks if there are any leftover build-args that were +// passed but not consumed during build. Print a warning, if there are any. +func (b *buildArgs) WarnOnUnusedBuildArgs(out io.Writer) { + leftoverArgs := []string{} + for arg := range b.argsFromOptions { + _, isReferenced := b.referencedArgs[arg] + _, isBuiltin := builtinAllowedBuildArgs[arg] + if !isBuiltin && !isReferenced { + leftoverArgs = append(leftoverArgs, arg) + } + } + if len(leftoverArgs) > 0 { + fmt.Fprintf(out, "[Warning] One or more build-args %v were not consumed\n", leftoverArgs) + } +} + +// ResetAllowed clears the list of args that are allowed to be used by a +// directive +func (b *buildArgs) ResetAllowed() { + b.allowedBuildArgs = make(map[string]*string) +} + +// AddMetaArg adds a new meta arg that can be used by FROM directives +func (b *buildArgs) AddMetaArg(key string, value *string) { + b.allowedMetaArgs[key] = value +} + +// AddArg adds a new arg that can be used by directives +func (b *buildArgs) AddArg(key string, value *string) { + b.allowedBuildArgs[key] = value + b.referencedArgs[key] = struct{}{} +} + +// IsReferencedOrNotBuiltin checks if the key is a built-in arg, or if it has been +// referenced by the Dockerfile. Returns true if the arg is not a builtin or +// if the builtin has been referenced in the Dockerfile. +func (b *buildArgs) IsReferencedOrNotBuiltin(key string) bool { + _, isBuiltin := builtinAllowedBuildArgs[key] + _, isAllowed := b.allowedBuildArgs[key] + return isAllowed || !isBuiltin +} + +// GetAllAllowed returns a mapping with all the allowed args +func (b *buildArgs) GetAllAllowed() map[string]string { + return b.getAllFromMapping(b.allowedBuildArgs) +} + +// GetAllMeta returns a mapping with all the meta meta args +func (b *buildArgs) GetAllMeta() map[string]string { + return b.getAllFromMapping(b.allowedMetaArgs) +} + +func (b *buildArgs) getAllFromMapping(source map[string]*string) map[string]string { + m := make(map[string]string) + + keys := keysFromMaps(source, builtinAllowedBuildArgs) + for _, key := range keys { + v, ok := b.getBuildArg(key, source) + if ok { + m[key] = v + } + } + return m +} + +// FilterAllowed returns all allowed args without the filtered args +func (b *buildArgs) FilterAllowed(filter []string) []string { + envs := []string{} + configEnv := opts.ConvertKVStringsToMap(filter) + + for key, val := range b.GetAllAllowed() { + if _, ok := configEnv[key]; !ok { + envs = append(envs, fmt.Sprintf("%s=%s", key, val)) + } + } + return envs +} + +func (b *buildArgs) getBuildArg(key string, mapping map[string]*string) (string, bool) { + defaultValue, exists := mapping[key] + // Return override from options if one is defined + if v, ok := b.argsFromOptions[key]; ok && v != nil { + return *v, ok + } + + if defaultValue == nil { + if v, ok := b.allowedMetaArgs[key]; ok && v != nil { + return *v, ok + } + return "", false + } + return *defaultValue, exists +} + +func keysFromMaps(source map[string]*string, builtin map[string]bool) []string { + keys := []string{} + for key := range source { + keys = append(keys, key) + } + for key := range builtin { + keys = append(keys, key) + } + return keys +} diff --git a/vendor/github.com/moby/moby/builder/dockerfile/buildargs_test.go b/vendor/github.com/moby/moby/builder/dockerfile/buildargs_test.go new file mode 100644 index 000000000..241bc8447 --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/buildargs_test.go @@ -0,0 +1,100 @@ +package dockerfile + +import ( + "bytes" + "testing" + + "github.com/stretchr/testify/assert" +) + +func strPtr(source string) *string { + return &source +} + +func TestGetAllAllowed(t *testing.T) { + buildArgs := newBuildArgs(map[string]*string{ + "ArgNotUsedInDockerfile": strPtr("fromopt1"), + "ArgOverriddenByOptions": strPtr("fromopt2"), + "ArgNoDefaultInDockerfileFromOptions": strPtr("fromopt3"), + "HTTP_PROXY": strPtr("theproxy"), + }) + + buildArgs.AddMetaArg("ArgFromMeta", strPtr("frommeta1")) + buildArgs.AddMetaArg("ArgFromMetaOverridden", strPtr("frommeta2")) + buildArgs.AddMetaArg("ArgFromMetaNotUsed", strPtr("frommeta3")) + + buildArgs.AddArg("ArgOverriddenByOptions", strPtr("fromdockerfile2")) + buildArgs.AddArg("ArgWithDefaultInDockerfile", strPtr("fromdockerfile1")) + buildArgs.AddArg("ArgNoDefaultInDockerfile", nil) + buildArgs.AddArg("ArgNoDefaultInDockerfileFromOptions", nil) + buildArgs.AddArg("ArgFromMeta", nil) + buildArgs.AddArg("ArgFromMetaOverridden", strPtr("fromdockerfile3")) + + all := buildArgs.GetAllAllowed() + expected := map[string]string{ + "HTTP_PROXY": "theproxy", + "ArgOverriddenByOptions": "fromopt2", + "ArgWithDefaultInDockerfile": "fromdockerfile1", + "ArgNoDefaultInDockerfileFromOptions": "fromopt3", + "ArgFromMeta": "frommeta1", + "ArgFromMetaOverridden": "fromdockerfile3", + } + assert.Equal(t, expected, all) +} + +func TestGetAllMeta(t *testing.T) { + buildArgs := newBuildArgs(map[string]*string{ + "ArgNotUsedInDockerfile": strPtr("fromopt1"), + "ArgOverriddenByOptions": strPtr("fromopt2"), + "ArgNoDefaultInMetaFromOptions": strPtr("fromopt3"), + "HTTP_PROXY": strPtr("theproxy"), + }) + + buildArgs.AddMetaArg("ArgFromMeta", strPtr("frommeta1")) + buildArgs.AddMetaArg("ArgOverriddenByOptions", strPtr("frommeta2")) + buildArgs.AddMetaArg("ArgNoDefaultInMetaFromOptions", nil) + + all := buildArgs.GetAllMeta() + expected := map[string]string{ + "HTTP_PROXY": "theproxy", + "ArgFromMeta": "frommeta1", + "ArgOverriddenByOptions": "fromopt2", + "ArgNoDefaultInMetaFromOptions": "fromopt3", + } + assert.Equal(t, expected, all) +} + +func TestWarnOnUnusedBuildArgs(t *testing.T) { + buildArgs := newBuildArgs(map[string]*string{ + "ThisArgIsUsed": strPtr("fromopt1"), + "ThisArgIsNotUsed": strPtr("fromopt2"), + "HTTPS_PROXY": strPtr("referenced builtin"), + "HTTP_PROXY": strPtr("unreferenced builtin"), + }) + buildArgs.AddArg("ThisArgIsUsed", nil) + buildArgs.AddArg("HTTPS_PROXY", nil) + + buffer := new(bytes.Buffer) + buildArgs.WarnOnUnusedBuildArgs(buffer) + out := buffer.String() + assert.NotContains(t, out, "ThisArgIsUsed") + assert.NotContains(t, out, "HTTPS_PROXY") + assert.NotContains(t, out, "HTTP_PROXY") + assert.Contains(t, out, "ThisArgIsNotUsed") +} + +func TestIsUnreferencedBuiltin(t *testing.T) { + buildArgs := newBuildArgs(map[string]*string{ + "ThisArgIsUsed": strPtr("fromopt1"), + "ThisArgIsNotUsed": strPtr("fromopt2"), + "HTTPS_PROXY": strPtr("referenced builtin"), + "HTTP_PROXY": strPtr("unreferenced builtin"), + }) + buildArgs.AddArg("ThisArgIsUsed", nil) + buildArgs.AddArg("HTTPS_PROXY", nil) + + assert.True(t, buildArgs.IsReferencedOrNotBuiltin("ThisArgIsUsed")) + assert.True(t, buildArgs.IsReferencedOrNotBuiltin("ThisArgIsNotUsed")) + assert.True(t, buildArgs.IsReferencedOrNotBuiltin("HTTPS_PROXY")) + assert.False(t, buildArgs.IsReferencedOrNotBuiltin("HTTP_PROXY")) +} diff --git a/vendor/github.com/moby/moby/builder/dockerfile/builder.go b/vendor/github.com/moby/moby/builder/dockerfile/builder.go new file mode 100644 index 000000000..fb1786225 --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/builder.go @@ -0,0 +1,420 @@ +package dockerfile + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "runtime" + "strings" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/builder" + "github.com/docker/docker/builder/dockerfile/command" + "github.com/docker/docker/builder/dockerfile/parser" + "github.com/docker/docker/builder/fscache" + "github.com/docker/docker/builder/remotecontext" + "github.com/docker/docker/client/session" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/chrootarchive" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/streamformatter" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/pkg/system" + "github.com/pkg/errors" + "golang.org/x/net/context" + "golang.org/x/sync/syncmap" +) + +var validCommitCommands = map[string]bool{ + "cmd": true, + "entrypoint": true, + "healthcheck": true, + "env": true, + "expose": true, + "label": true, + "onbuild": true, + "user": true, + "volume": true, + "workdir": true, +} + +// SessionGetter is object used to get access to a session by uuid +type SessionGetter interface { + Get(ctx context.Context, uuid string) (session.Caller, error) +} + +// BuildManager is shared across all Builder objects +type BuildManager struct { + archiver *archive.Archiver + backend builder.Backend + pathCache pathCache // TODO: make this persistent + sg SessionGetter + fsCache *fscache.FSCache +} + +// NewBuildManager creates a BuildManager +func NewBuildManager(b builder.Backend, sg SessionGetter, fsCache *fscache.FSCache, idMappings *idtools.IDMappings) (*BuildManager, error) { + bm := &BuildManager{ + backend: b, + pathCache: &syncmap.Map{}, + sg: sg, + archiver: chrootarchive.NewArchiver(idMappings), + fsCache: fsCache, + } + if err := fsCache.RegisterTransport(remotecontext.ClientSessionRemote, NewClientSessionTransport()); err != nil { + return nil, err + } + return bm, nil +} + +// Build starts a new build from a BuildConfig +func (bm *BuildManager) Build(ctx context.Context, config backend.BuildConfig) (*builder.Result, error) { + buildsTriggered.Inc() + if config.Options.Dockerfile == "" { + config.Options.Dockerfile = builder.DefaultDockerfileName + } + + source, dockerfile, err := remotecontext.Detect(config) + if err != nil { + return nil, err + } + defer func() { + if source != nil { + if err := source.Close(); err != nil { + logrus.Debugf("[BUILDER] failed to remove temporary context: %v", err) + } + } + }() + + // TODO @jhowardmsft LCOW support - this will require rework to allow both linux and Windows simultaneously. + // This is an interim solution to hardcode to linux if LCOW is turned on. + if dockerfile.Platform == "" { + dockerfile.Platform = runtime.GOOS + if dockerfile.Platform == "windows" && system.LCOWSupported() { + dockerfile.Platform = "linux" + } + } + + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + if src, err := bm.initializeClientSession(ctx, cancel, config.Options); err != nil { + return nil, err + } else if src != nil { + source = src + } + + builderOptions := builderOptions{ + Options: config.Options, + ProgressWriter: config.ProgressWriter, + Backend: bm.backend, + PathCache: bm.pathCache, + Archiver: bm.archiver, + Platform: dockerfile.Platform, + } + + return newBuilder(ctx, builderOptions).build(source, dockerfile) +} + +func (bm *BuildManager) initializeClientSession(ctx context.Context, cancel func(), options *types.ImageBuildOptions) (builder.Source, error) { + if options.SessionID == "" || bm.sg == nil { + return nil, nil + } + logrus.Debug("client is session enabled") + + ctx, cancelCtx := context.WithTimeout(ctx, sessionConnectTimeout) + defer cancelCtx() + + c, err := bm.sg.Get(ctx, options.SessionID) + if err != nil { + return nil, err + } + go func() { + <-c.Context().Done() + cancel() + }() + if options.RemoteContext == remotecontext.ClientSessionRemote { + st := time.Now() + csi, err := NewClientSessionSourceIdentifier(ctx, bm.sg, options.SessionID) + if err != nil { + return nil, err + } + src, err := bm.fsCache.SyncFrom(ctx, csi) + if err != nil { + return nil, err + } + logrus.Debugf("sync-time: %v", time.Since(st)) + return src, nil + } + return nil, nil +} + +// builderOptions are the dependencies required by the builder +type builderOptions struct { + Options *types.ImageBuildOptions + Backend builder.Backend + ProgressWriter backend.ProgressWriter + PathCache pathCache + Archiver *archive.Archiver + Platform string +} + +// Builder is a Dockerfile builder +// It implements the builder.Backend interface. +type Builder struct { + options *types.ImageBuildOptions + + Stdout io.Writer + Stderr io.Writer + Aux *streamformatter.AuxFormatter + Output io.Writer + + docker builder.Backend + clientCtx context.Context + + archiver *archive.Archiver + buildStages *buildStages + disableCommit bool + buildArgs *buildArgs + imageSources *imageSources + pathCache pathCache + containerManager *containerManager + imageProber ImageProber + + // TODO @jhowardmft LCOW Support. This will be moved to options at a later + // stage, however that cannot be done now as it affects the public API + // if it were. + platform string +} + +// newBuilder creates a new Dockerfile builder from an optional dockerfile and a Options. +// TODO @jhowardmsft LCOW support: Eventually platform can be moved into the builder +// options, however, that would be an API change as it shares types.ImageBuildOptions. +func newBuilder(clientCtx context.Context, options builderOptions) *Builder { + config := options.Options + if config == nil { + config = new(types.ImageBuildOptions) + } + + // @jhowardmsft LCOW Support. For the time being, this is interim. Eventually + // will be moved to types.ImageBuildOptions, but it can't for now as that would + // be an API change. + if options.Platform == "" { + options.Platform = runtime.GOOS + } + if options.Platform == "windows" && system.LCOWSupported() { + options.Platform = "linux" + } + + b := &Builder{ + clientCtx: clientCtx, + options: config, + Stdout: options.ProgressWriter.StdoutFormatter, + Stderr: options.ProgressWriter.StderrFormatter, + Aux: options.ProgressWriter.AuxFormatter, + Output: options.ProgressWriter.Output, + docker: options.Backend, + archiver: options.Archiver, + buildArgs: newBuildArgs(config.BuildArgs), + buildStages: newBuildStages(), + imageSources: newImageSources(clientCtx, options), + pathCache: options.PathCache, + imageProber: newImageProber(options.Backend, config.CacheFrom, options.Platform, config.NoCache), + containerManager: newContainerManager(options.Backend), + platform: options.Platform, + } + + return b +} + +// Build runs the Dockerfile builder by parsing the Dockerfile and executing +// the instructions from the file. +func (b *Builder) build(source builder.Source, dockerfile *parser.Result) (*builder.Result, error) { + defer b.imageSources.Unmount() + + addNodesForLabelOption(dockerfile.AST, b.options.Labels) + + if err := checkDispatchDockerfile(dockerfile.AST); err != nil { + buildsFailed.WithValues(metricsDockerfileSyntaxError).Inc() + return nil, err + } + + dispatchState, err := b.dispatchDockerfileWithCancellation(dockerfile, source) + if err != nil { + return nil, err + } + + if b.options.Target != "" && !dispatchState.isCurrentStage(b.options.Target) { + buildsFailed.WithValues(metricsBuildTargetNotReachableError).Inc() + return nil, errors.Errorf("failed to reach build target %s in Dockerfile", b.options.Target) + } + + dockerfile.PrintWarnings(b.Stderr) + b.buildArgs.WarnOnUnusedBuildArgs(b.Stderr) + + if dispatchState.imageID == "" { + buildsFailed.WithValues(metricsDockerfileEmptyError).Inc() + return nil, errors.New("No image was generated. Is your Dockerfile empty?") + } + return &builder.Result{ImageID: dispatchState.imageID, FromImage: dispatchState.baseImage}, nil +} + +func emitImageID(aux *streamformatter.AuxFormatter, state *dispatchState) error { + if aux == nil || state.imageID == "" { + return nil + } + return aux.Emit(types.BuildResult{ID: state.imageID}) +} + +func (b *Builder) dispatchDockerfileWithCancellation(dockerfile *parser.Result, source builder.Source) (*dispatchState, error) { + shlex := NewShellLex(dockerfile.EscapeToken) + state := newDispatchState() + total := len(dockerfile.AST.Children) + var err error + for i, n := range dockerfile.AST.Children { + select { + case <-b.clientCtx.Done(): + logrus.Debug("Builder: build cancelled!") + fmt.Fprint(b.Stdout, "Build cancelled") + buildsFailed.WithValues(metricsBuildCanceled).Inc() + return nil, errors.New("Build cancelled") + default: + // Not cancelled yet, keep going... + } + + // If this is a FROM and we have a previous image then + // emit an aux message for that image since it is the + // end of the previous stage + if n.Value == command.From { + if err := emitImageID(b.Aux, state); err != nil { + return nil, err + } + } + + if n.Value == command.From && state.isCurrentStage(b.options.Target) { + break + } + + opts := dispatchOptions{ + state: state, + stepMsg: formatStep(i, total), + node: n, + shlex: shlex, + source: source, + } + if state, err = b.dispatch(opts); err != nil { + if b.options.ForceRemove { + b.containerManager.RemoveAll(b.Stdout) + } + return nil, err + } + + fmt.Fprintf(b.Stdout, " ---> %s\n", stringid.TruncateID(state.imageID)) + if b.options.Remove { + b.containerManager.RemoveAll(b.Stdout) + } + } + + // Emit a final aux message for the final image + if err := emitImageID(b.Aux, state); err != nil { + return nil, err + } + + return state, nil +} + +func addNodesForLabelOption(dockerfile *parser.Node, labels map[string]string) { + if len(labels) == 0 { + return + } + + node := parser.NodeFromLabels(labels) + dockerfile.Children = append(dockerfile.Children, node) +} + +// BuildFromConfig builds directly from `changes`, treating it as if it were the contents of a Dockerfile +// It will: +// - Call parse.Parse() to get an AST root for the concatenated Dockerfile entries. +// - Do build by calling builder.dispatch() to call all entries' handling routines +// +// BuildFromConfig is used by the /commit endpoint, with the changes +// coming from the query parameter of the same name. +// +// TODO: Remove? +func BuildFromConfig(config *container.Config, changes []string) (*container.Config, error) { + if len(changes) == 0 { + return config, nil + } + + b := newBuilder(context.Background(), builderOptions{ + Options: &types.ImageBuildOptions{NoCache: true}, + }) + + dockerfile, err := parser.Parse(bytes.NewBufferString(strings.Join(changes, "\n"))) + if err != nil { + return nil, err + } + + // TODO @jhowardmsft LCOW support. For now, if LCOW enabled, switch to linux. + // Also explicitly set the platform. Ultimately this will be in the builder + // options, but we can't do that yet as it would change the API. + if dockerfile.Platform == "" { + dockerfile.Platform = runtime.GOOS + } + if dockerfile.Platform == "windows" && system.LCOWSupported() { + dockerfile.Platform = "linux" + } + b.platform = dockerfile.Platform + + // ensure that the commands are valid + for _, n := range dockerfile.AST.Children { + if !validCommitCommands[n.Value] { + return nil, fmt.Errorf("%s is not a valid change command", n.Value) + } + } + + b.Stdout = ioutil.Discard + b.Stderr = ioutil.Discard + b.disableCommit = true + + if err := checkDispatchDockerfile(dockerfile.AST); err != nil { + return nil, err + } + dispatchState := newDispatchState() + dispatchState.runConfig = config + return dispatchFromDockerfile(b, dockerfile, dispatchState, nil) +} + +func checkDispatchDockerfile(dockerfile *parser.Node) error { + for _, n := range dockerfile.Children { + if err := checkDispatch(n); err != nil { + return errors.Wrapf(err, "Dockerfile parse error line %d", n.StartLine) + } + } + return nil +} + +func dispatchFromDockerfile(b *Builder, result *parser.Result, dispatchState *dispatchState, source builder.Source) (*container.Config, error) { + shlex := NewShellLex(result.EscapeToken) + ast := result.AST + total := len(ast.Children) + + for i, n := range ast.Children { + opts := dispatchOptions{ + state: dispatchState, + stepMsg: formatStep(i, total), + node: n, + shlex: shlex, + source: source, + } + if _, err := b.dispatch(opts); err != nil { + return nil, err + } + } + return dispatchState.runConfig, nil +} diff --git a/vendor/github.com/moby/moby/builder/dockerfile/builder_test.go b/vendor/github.com/moby/moby/builder/dockerfile/builder_test.go new file mode 100644 index 000000000..5fedca0fd --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/builder_test.go @@ -0,0 +1,34 @@ +package dockerfile + +import ( + "strings" + "testing" + + "github.com/docker/docker/builder/dockerfile/parser" + "github.com/stretchr/testify/assert" +) + +func TestAddNodesForLabelOption(t *testing.T) { + dockerfile := "FROM scratch" + result, err := parser.Parse(strings.NewReader(dockerfile)) + assert.NoError(t, err) + + labels := map[string]string{ + "org.e": "cli-e", + "org.d": "cli-d", + "org.c": "cli-c", + "org.b": "cli-b", + "org.a": "cli-a", + } + nodes := result.AST + addNodesForLabelOption(nodes, labels) + + expected := []string{ + "FROM scratch", + `LABEL "org.a"='cli-a' "org.b"='cli-b' "org.c"='cli-c' "org.d"='cli-d' "org.e"='cli-e'`, + } + assert.Len(t, nodes.Children, 2) + for i, v := range nodes.Children { + assert.Equal(t, expected[i], v.Original) + } +} diff --git a/vendor/github.com/moby/moby/builder/dockerfile/builder_unix.go b/vendor/github.com/moby/moby/builder/dockerfile/builder_unix.go new file mode 100644 index 000000000..5ea63da82 --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/builder_unix.go @@ -0,0 +1,7 @@ +// +build !windows + +package dockerfile + +func defaultShellForPlatform(platform string) []string { + return []string{"/bin/sh", "-c"} +} diff --git a/vendor/github.com/moby/moby/builder/dockerfile/builder_windows.go b/vendor/github.com/moby/moby/builder/dockerfile/builder_windows.go new file mode 100644 index 000000000..7bfef3238 --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/builder_windows.go @@ -0,0 +1,8 @@ +package dockerfile + +func defaultShellForPlatform(platform string) []string { + if platform == "linux" { + return []string{"/bin/sh", "-c"} + } + return []string{"cmd", "/S", "/C"} +} diff --git a/vendor/github.com/moby/moby/builder/dockerfile/clientsession.go b/vendor/github.com/moby/moby/builder/dockerfile/clientsession.go new file mode 100644 index 000000000..a7709ce51 --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/clientsession.go @@ -0,0 +1,77 @@ +package dockerfile + +import ( + "time" + + "github.com/docker/docker/builder/fscache" + "github.com/docker/docker/builder/remotecontext" + "github.com/docker/docker/client/session" + "github.com/docker/docker/client/session/filesync" + "github.com/pkg/errors" + "golang.org/x/net/context" +) + +const sessionConnectTimeout = 5 * time.Second + +// ClientSessionTransport is a transport for copying files from docker client +// to the daemon. +type ClientSessionTransport struct{} + +// NewClientSessionTransport returns new ClientSessionTransport instance +func NewClientSessionTransport() *ClientSessionTransport { + return &ClientSessionTransport{} +} + +// Copy data from a remote to a destination directory. +func (cst *ClientSessionTransport) Copy(ctx context.Context, id fscache.RemoteIdentifier, dest string, cu filesync.CacheUpdater) error { + csi, ok := id.(*ClientSessionSourceIdentifier) + if !ok { + return errors.New("invalid identifier for client session") + } + + return filesync.FSSync(ctx, csi.caller, filesync.FSSendRequestOpt{ + IncludePatterns: csi.includePatterns, + DestDir: dest, + CacheUpdater: cu, + }) +} + +// ClientSessionSourceIdentifier is an identifier that can be used for requesting +// files from remote client +type ClientSessionSourceIdentifier struct { + includePatterns []string + caller session.Caller + sharedKey string + uuid string +} + +// NewClientSessionSourceIdentifier returns new ClientSessionSourceIdentifier instance +func NewClientSessionSourceIdentifier(ctx context.Context, sg SessionGetter, uuid string) (*ClientSessionSourceIdentifier, error) { + csi := &ClientSessionSourceIdentifier{ + uuid: uuid, + } + caller, err := sg.Get(ctx, uuid) + if err != nil { + return nil, errors.Wrapf(err, "failed to get session for %s", uuid) + } + + csi.caller = caller + return csi, nil +} + +// Transport returns transport identifier for remote identifier +func (csi *ClientSessionSourceIdentifier) Transport() string { + return remotecontext.ClientSessionRemote +} + +// SharedKey returns shared key for remote identifier. Shared key is used +// for finding the base for a repeated transfer. +func (csi *ClientSessionSourceIdentifier) SharedKey() string { + return csi.caller.SharedKey() +} + +// Key returns unique key for remote identifier. Requests with same key return +// same data. +func (csi *ClientSessionSourceIdentifier) Key() string { + return csi.uuid +} diff --git a/vendor/github.com/moby/moby/builder/dockerfile/command/command.go b/vendor/github.com/moby/moby/builder/dockerfile/command/command.go new file mode 100644 index 000000000..f23c6874b --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/command/command.go @@ -0,0 +1,46 @@ +// Package command contains the set of Dockerfile commands. +package command + +// Define constants for the command strings +const ( + Add = "add" + Arg = "arg" + Cmd = "cmd" + Copy = "copy" + Entrypoint = "entrypoint" + Env = "env" + Expose = "expose" + From = "from" + Healthcheck = "healthcheck" + Label = "label" + Maintainer = "maintainer" + Onbuild = "onbuild" + Run = "run" + Shell = "shell" + StopSignal = "stopsignal" + User = "user" + Volume = "volume" + Workdir = "workdir" +) + +// Commands is list of all Dockerfile commands +var Commands = map[string]struct{}{ + Add: {}, + Arg: {}, + Cmd: {}, + Copy: {}, + Entrypoint: {}, + Env: {}, + Expose: {}, + From: {}, + Healthcheck: {}, + Label: {}, + Maintainer: {}, + Onbuild: {}, + Run: {}, + Shell: {}, + StopSignal: {}, + User: {}, + Volume: {}, + Workdir: {}, +} diff --git a/vendor/github.com/moby/moby/builder/dockerfile/containerbackend.go b/vendor/github.com/moby/moby/builder/dockerfile/containerbackend.go new file mode 100644 index 000000000..7b241f3d3 --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/containerbackend.go @@ -0,0 +1,144 @@ +package dockerfile + +import ( + "fmt" + "io" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/builder" + containerpkg "github.com/docker/docker/container" + "github.com/docker/docker/pkg/stringid" + "github.com/pkg/errors" + "golang.org/x/net/context" +) + +type containerManager struct { + tmpContainers map[string]struct{} + backend builder.ExecBackend +} + +// newContainerManager creates a new container backend +func newContainerManager(docker builder.ExecBackend) *containerManager { + return &containerManager{ + backend: docker, + tmpContainers: make(map[string]struct{}), + } +} + +// Create a container +func (c *containerManager) Create(runConfig *container.Config, hostConfig *container.HostConfig, platform string) (container.ContainerCreateCreatedBody, error) { + container, err := c.backend.ContainerCreate(types.ContainerCreateConfig{ + Config: runConfig, + HostConfig: hostConfig, + Platform: platform, + }) + if err != nil { + return container, err + } + c.tmpContainers[container.ID] = struct{}{} + return container, nil +} + +var errCancelled = errors.New("build cancelled") + +// Run a container by ID +func (c *containerManager) Run(ctx context.Context, cID string, stdout, stderr io.Writer) (err error) { + attached := make(chan struct{}) + errCh := make(chan error) + go func() { + errCh <- c.backend.ContainerAttachRaw(cID, nil, stdout, stderr, true, attached) + }() + select { + case err := <-errCh: + return err + case <-attached: + } + + finished := make(chan struct{}) + cancelErrCh := make(chan error, 1) + go func() { + select { + case <-ctx.Done(): + logrus.Debugln("Build cancelled, killing and removing container:", cID) + c.backend.ContainerKill(cID, 0) + c.removeContainer(cID, stdout) + cancelErrCh <- errCancelled + case <-finished: + cancelErrCh <- nil + } + }() + + if err := c.backend.ContainerStart(cID, nil, "", ""); err != nil { + close(finished) + logCancellationError(cancelErrCh, "error from ContainerStart: "+err.Error()) + return err + } + + // Block on reading output from container, stop on err or chan closed + if err := <-errCh; err != nil { + close(finished) + logCancellationError(cancelErrCh, "error from errCh: "+err.Error()) + return err + } + + waitC, err := c.backend.ContainerWait(ctx, cID, containerpkg.WaitConditionNotRunning) + if err != nil { + close(finished) + logCancellationError(cancelErrCh, fmt.Sprintf("unable to begin ContainerWait: %s", err)) + return err + } + + if status := <-waitC; status.ExitCode() != 0 { + close(finished) + logCancellationError(cancelErrCh, + fmt.Sprintf("a non-zero code from ContainerWait: %d", status.ExitCode())) + return &statusCodeError{code: status.ExitCode(), err: err} + } + + close(finished) + return <-cancelErrCh +} + +func logCancellationError(cancelErrCh chan error, msg string) { + if cancelErr := <-cancelErrCh; cancelErr != nil { + logrus.Debugf("Build cancelled (%v): ", cancelErr, msg) + } +} + +type statusCodeError struct { + code int + err error +} + +func (e *statusCodeError) Error() string { + return e.err.Error() +} + +func (e *statusCodeError) StatusCode() int { + return e.code +} + +func (c *containerManager) removeContainer(containerID string, stdout io.Writer) error { + rmConfig := &types.ContainerRmConfig{ + ForceRemove: true, + RemoveVolume: true, + } + if err := c.backend.ContainerRm(containerID, rmConfig); err != nil { + fmt.Fprintf(stdout, "Error removing intermediate container %s: %v\n", stringid.TruncateID(containerID), err) + return err + } + return nil +} + +// RemoveAll containers managed by this container manager +func (c *containerManager) RemoveAll(stdout io.Writer) { + for containerID := range c.tmpContainers { + if err := c.removeContainer(containerID, stdout); err != nil { + return + } + delete(c.tmpContainers, containerID) + fmt.Fprintf(stdout, "Removing intermediate container %s\n", stringid.TruncateID(containerID)) + } +} diff --git a/vendor/github.com/moby/moby/builder/dockerfile/copy.go b/vendor/github.com/moby/moby/builder/dockerfile/copy.go new file mode 100644 index 000000000..c7db943f5 --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/copy.go @@ -0,0 +1,444 @@ +package dockerfile + +import ( + "fmt" + "io" + "net/http" + "net/url" + "os" + "path/filepath" + "sort" + "strings" + "time" + + "github.com/docker/docker/builder" + "github.com/docker/docker/builder/remotecontext" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/progress" + "github.com/docker/docker/pkg/streamformatter" + "github.com/docker/docker/pkg/symlink" + "github.com/docker/docker/pkg/system" + "github.com/docker/docker/pkg/urlutil" + "github.com/pkg/errors" +) + +type pathCache interface { + Load(key interface{}) (value interface{}, ok bool) + Store(key, value interface{}) +} + +// copyInfo is a data object which stores the metadata about each source file in +// a copyInstruction +type copyInfo struct { + root string + path string + hash string + noDecompress bool +} + +func (c copyInfo) fullPath() (string, error) { + return symlink.FollowSymlinkInScope(filepath.Join(c.root, c.path), c.root) +} + +func newCopyInfoFromSource(source builder.Source, path string, hash string) copyInfo { + return copyInfo{root: source.Root(), path: path, hash: hash} +} + +func newCopyInfos(copyInfos ...copyInfo) []copyInfo { + return copyInfos +} + +// copyInstruction is a fully parsed COPY or ADD command that is passed to +// Builder.performCopy to copy files into the image filesystem +type copyInstruction struct { + cmdName string + infos []copyInfo + dest string + allowLocalDecompression bool +} + +// copier reads a raw COPY or ADD command, fetches remote sources using a downloader, +// and creates a copyInstruction +type copier struct { + imageSource *imageMount + source builder.Source + pathCache pathCache + download sourceDownloader + tmpPaths []string +} + +func copierFromDispatchRequest(req dispatchRequest, download sourceDownloader, imageSource *imageMount) copier { + return copier{ + source: req.source, + pathCache: req.builder.pathCache, + download: download, + imageSource: imageSource, + } +} + +func (o *copier) createCopyInstruction(args []string, cmdName string) (copyInstruction, error) { + inst := copyInstruction{cmdName: cmdName} + last := len(args) - 1 + + // Work in daemon-specific filepath semantics + inst.dest = filepath.FromSlash(args[last]) + + infos, err := o.getCopyInfosForSourcePaths(args[0:last]) + if err != nil { + return inst, errors.Wrapf(err, "%s failed", cmdName) + } + if len(infos) > 1 && !strings.HasSuffix(inst.dest, string(os.PathSeparator)) { + return inst, errors.Errorf("When using %s with more than one source file, the destination must be a directory and end with a /", cmdName) + } + inst.infos = infos + return inst, nil +} + +// getCopyInfosForSourcePaths iterates over the source files and calculate the info +// needed to copy (e.g. hash value if cached) +func (o *copier) getCopyInfosForSourcePaths(sources []string) ([]copyInfo, error) { + var infos []copyInfo + for _, orig := range sources { + subinfos, err := o.getCopyInfoForSourcePath(orig) + if err != nil { + return nil, err + } + infos = append(infos, subinfos...) + } + + if len(infos) == 0 { + return nil, errors.New("no source files were specified") + } + return infos, nil +} + +func (o *copier) getCopyInfoForSourcePath(orig string) ([]copyInfo, error) { + if !urlutil.IsURL(orig) { + return o.calcCopyInfo(orig, true) + } + remote, path, err := o.download(orig) + if err != nil { + return nil, err + } + o.tmpPaths = append(o.tmpPaths, remote.Root()) + + hash, err := remote.Hash(path) + ci := newCopyInfoFromSource(remote, path, hash) + ci.noDecompress = true // data from http shouldn't be extracted even on ADD + return newCopyInfos(ci), err +} + +// Cleanup removes any temporary directories created as part of downloading +// remote files. +func (o *copier) Cleanup() { + for _, path := range o.tmpPaths { + os.RemoveAll(path) + } + o.tmpPaths = []string{} +} + +// TODO: allowWildcards can probably be removed by refactoring this function further. +func (o *copier) calcCopyInfo(origPath string, allowWildcards bool) ([]copyInfo, error) { + imageSource := o.imageSource + if err := validateCopySourcePath(imageSource, origPath); err != nil { + return nil, err + } + + // Work in daemon-specific OS filepath semantics + origPath = filepath.FromSlash(origPath) + origPath = strings.TrimPrefix(origPath, string(os.PathSeparator)) + origPath = strings.TrimPrefix(origPath, "."+string(os.PathSeparator)) + + // TODO: do this when creating copier. Requires validateCopySourcePath + // (and other below) to be aware of the difference sources. Why is it only + // done on image Source? + if imageSource != nil { + var err error + o.source, err = imageSource.Source() + if err != nil { + return nil, errors.Wrapf(err, "failed to copy from %s", imageSource.ImageID()) + } + } + + if o.source == nil { + return nil, errors.Errorf("missing build context") + } + + // Deal with wildcards + if allowWildcards && containsWildcards(origPath) { + return o.copyWithWildcards(origPath) + } + + if imageSource != nil && imageSource.ImageID() != "" { + // return a cached copy if one exists + if h, ok := o.pathCache.Load(imageSource.ImageID() + origPath); ok { + return newCopyInfos(newCopyInfoFromSource(o.source, origPath, h.(string))), nil + } + } + + // Deal with the single file case + copyInfo, err := copyInfoForFile(o.source, origPath) + switch { + case err != nil: + return nil, err + case copyInfo.hash != "": + o.storeInPathCache(imageSource, origPath, copyInfo.hash) + return newCopyInfos(copyInfo), err + } + + // TODO: remove, handle dirs in Hash() + subfiles, err := walkSource(o.source, origPath) + if err != nil { + return nil, err + } + + hash := hashStringSlice("dir", subfiles) + o.storeInPathCache(imageSource, origPath, hash) + return newCopyInfos(newCopyInfoFromSource(o.source, origPath, hash)), nil +} + +func (o *copier) storeInPathCache(im *imageMount, path string, hash string) { + if im != nil { + o.pathCache.Store(im.ImageID()+path, hash) + } +} + +func (o *copier) copyWithWildcards(origPath string) ([]copyInfo, error) { + var copyInfos []copyInfo + if err := filepath.Walk(o.source.Root(), func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + rel, err := remotecontext.Rel(o.source.Root(), path) + if err != nil { + return err + } + + if rel == "." { + return nil + } + if match, _ := filepath.Match(origPath, rel); !match { + return nil + } + + // Note we set allowWildcards to false in case the name has + // a * in it + subInfos, err := o.calcCopyInfo(rel, false) + if err != nil { + return err + } + copyInfos = append(copyInfos, subInfos...) + return nil + }); err != nil { + return nil, err + } + return copyInfos, nil +} + +func copyInfoForFile(source builder.Source, path string) (copyInfo, error) { + fi, err := remotecontext.StatAt(source, path) + if err != nil { + return copyInfo{}, err + } + + if fi.IsDir() { + return copyInfo{}, nil + } + hash, err := source.Hash(path) + if err != nil { + return copyInfo{}, err + } + return newCopyInfoFromSource(source, path, "file:"+hash), nil +} + +// TODO: dedupe with copyWithWildcards() +func walkSource(source builder.Source, origPath string) ([]string, error) { + fp, err := remotecontext.FullPath(source, origPath) + if err != nil { + return nil, err + } + // Must be a dir + var subfiles []string + err = filepath.Walk(fp, func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + rel, err := remotecontext.Rel(source.Root(), path) + if err != nil { + return err + } + if rel == "." { + return nil + } + hash, err := source.Hash(rel) + if err != nil { + return nil + } + // we already checked handleHash above + subfiles = append(subfiles, hash) + return nil + }) + if err != nil { + return nil, err + } + + sort.Strings(subfiles) + return subfiles, nil +} + +type sourceDownloader func(string) (builder.Source, string, error) + +func newRemoteSourceDownloader(output, stdout io.Writer) sourceDownloader { + return func(url string) (builder.Source, string, error) { + return downloadSource(output, stdout, url) + } +} + +func errOnSourceDownload(_ string) (builder.Source, string, error) { + return nil, "", errors.New("source can't be a URL for COPY") +} + +func downloadSource(output io.Writer, stdout io.Writer, srcURL string) (remote builder.Source, p string, err error) { + u, err := url.Parse(srcURL) + if err != nil { + return + } + filename := filepath.Base(filepath.FromSlash(u.Path)) // Ensure in platform semantics + if filename == "" { + err = errors.Errorf("cannot determine filename from url: %s", u) + return + } + + resp, err := remotecontext.GetWithStatusError(srcURL) + if err != nil { + return + } + + // Prepare file in a tmp dir + tmpDir, err := ioutils.TempDir("", "docker-remote") + if err != nil { + return + } + defer func() { + if err != nil { + os.RemoveAll(tmpDir) + } + }() + tmpFileName := filepath.Join(tmpDir, filename) + tmpFile, err := os.OpenFile(tmpFileName, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600) + if err != nil { + return + } + + progressOutput := streamformatter.NewJSONProgressOutput(output, true) + progressReader := progress.NewProgressReader(resp.Body, progressOutput, resp.ContentLength, "", "Downloading") + // Download and dump result to tmp file + // TODO: add filehash directly + if _, err = io.Copy(tmpFile, progressReader); err != nil { + tmpFile.Close() + return + } + // TODO: how important is this random blank line to the output? + fmt.Fprintln(stdout) + + // Set the mtime to the Last-Modified header value if present + // Otherwise just remove atime and mtime + mTime := time.Time{} + + lastMod := resp.Header.Get("Last-Modified") + if lastMod != "" { + // If we can't parse it then just let it default to 'zero' + // otherwise use the parsed time value + if parsedMTime, err := http.ParseTime(lastMod); err == nil { + mTime = parsedMTime + } + } + + tmpFile.Close() + + if err = system.Chtimes(tmpFileName, mTime, mTime); err != nil { + return + } + + lc, err := remotecontext.NewLazySource(tmpDir) + return lc, filename, err +} + +type copyFileOptions struct { + decompress bool + archiver *archive.Archiver +} + +func performCopyForInfo(dest copyInfo, source copyInfo, options copyFileOptions) error { + srcPath, err := source.fullPath() + if err != nil { + return err + } + destPath, err := dest.fullPath() + if err != nil { + return err + } + + archiver := options.archiver + + src, err := os.Stat(srcPath) + if err != nil { + return errors.Wrapf(err, "source path not found") + } + if src.IsDir() { + return copyDirectory(archiver, srcPath, destPath) + } + if options.decompress && archive.IsArchivePath(srcPath) && !source.noDecompress { + return archiver.UntarPath(srcPath, destPath) + } + + destExistsAsDir, err := isExistingDirectory(destPath) + if err != nil { + return err + } + // dest.path must be used because destPath has already been cleaned of any + // trailing slash + if endsInSlash(dest.path) || destExistsAsDir { + // source.path must be used to get the correct filename when the source + // is a symlink + destPath = filepath.Join(destPath, filepath.Base(source.path)) + } + return copyFile(archiver, srcPath, destPath) +} + +func copyDirectory(archiver *archive.Archiver, source, dest string) error { + if err := archiver.CopyWithTar(source, dest); err != nil { + return errors.Wrapf(err, "failed to copy directory") + } + return fixPermissions(source, dest, archiver.IDMappings.RootPair()) +} + +func copyFile(archiver *archive.Archiver, source, dest string) error { + rootIDs := archiver.IDMappings.RootPair() + + if err := idtools.MkdirAllAndChownNew(filepath.Dir(dest), 0755, rootIDs); err != nil { + return errors.Wrapf(err, "failed to create new directory") + } + if err := archiver.CopyFileWithTar(source, dest); err != nil { + return errors.Wrapf(err, "failed to copy file") + } + return fixPermissions(source, dest, rootIDs) +} + +func endsInSlash(path string) bool { + return strings.HasSuffix(path, string(os.PathSeparator)) +} + +// isExistingDirectory returns true if the path exists and is a directory +func isExistingDirectory(path string) (bool, error) { + destStat, err := os.Stat(path) + switch { + case os.IsNotExist(err): + return false, nil + case err != nil: + return false, err + } + return destStat.IsDir(), nil +} diff --git a/vendor/github.com/moby/moby/builder/dockerfile/copy_test.go b/vendor/github.com/moby/moby/builder/dockerfile/copy_test.go new file mode 100644 index 000000000..aee225b5f --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/copy_test.go @@ -0,0 +1,45 @@ +package dockerfile + +import ( + "testing" + + "github.com/docker/docker/pkg/testutil/tempfile" + "github.com/stretchr/testify/assert" +) + +func TestIsExistingDirectory(t *testing.T) { + tmpfile := tempfile.NewTempFile(t, "file-exists-test", "something") + defer tmpfile.Remove() + tmpdir := tempfile.NewTempDir(t, "dir-exists-test") + defer tmpdir.Remove() + + var testcases = []struct { + doc string + path string + expected bool + }{ + { + doc: "directory exists", + path: tmpdir.Path, + expected: true, + }, + { + doc: "path doesn't exist", + path: "/bogus/path/does/not/exist", + expected: false, + }, + { + doc: "file exists", + path: tmpfile.Name(), + expected: false, + }, + } + + for _, testcase := range testcases { + result, err := isExistingDirectory(testcase.path) + if !assert.NoError(t, err) { + continue + } + assert.Equal(t, testcase.expected, result, testcase.doc) + } +} diff --git a/vendor/github.com/moby/moby/builder/dockerfile/copy_unix.go b/vendor/github.com/moby/moby/builder/dockerfile/copy_unix.go new file mode 100644 index 000000000..326d95bb3 --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/copy_unix.go @@ -0,0 +1,36 @@ +// +build !windows + +package dockerfile + +import ( + "os" + "path/filepath" + + "github.com/docker/docker/pkg/idtools" +) + +func fixPermissions(source, destination string, rootIDs idtools.IDPair) error { + skipChownRoot, err := isExistingDirectory(destination) + if err != nil { + return err + } + + // We Walk on the source rather than on the destination because we don't + // want to change permissions on things we haven't created or modified. + return filepath.Walk(source, func(fullpath string, info os.FileInfo, err error) error { + // Do not alter the walk root iff. it existed before, as it doesn't fall under + // the domain of "things we should chown". + if skipChownRoot && source == fullpath { + return nil + } + + // Path is prefixed by source: substitute with destination instead. + cleaned, err := filepath.Rel(source, fullpath) + if err != nil { + return err + } + + fullpath = filepath.Join(destination, cleaned) + return os.Lchown(fullpath, rootIDs.UID, rootIDs.GID) + }) +} diff --git a/vendor/github.com/moby/moby/builder/dockerfile/copy_windows.go b/vendor/github.com/moby/moby/builder/dockerfile/copy_windows.go new file mode 100644 index 000000000..78f5b0945 --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/copy_windows.go @@ -0,0 +1,8 @@ +package dockerfile + +import "github.com/docker/docker/pkg/idtools" + +func fixPermissions(source, destination string, rootIDs idtools.IDPair) error { + // chown is not supported on Windows + return nil +} diff --git a/vendor/github.com/moby/moby/builder/dockerfile/dispatchers.go b/vendor/github.com/moby/moby/builder/dockerfile/dispatchers.go new file mode 100644 index 000000000..1f7424124 --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/dispatchers.go @@ -0,0 +1,884 @@ +package dockerfile + +// This file contains the dispatchers for each command. Note that +// `nullDispatch` is not actually a command, but support for commands we parse +// but do nothing with. +// +// See evaluator.go for a higher level discussion of the whole evaluator +// package. + +import ( + "bytes" + "fmt" + "regexp" + "runtime" + "sort" + "strconv" + "strings" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/strslice" + "github.com/docker/docker/builder" + "github.com/docker/docker/builder/dockerfile/parser" + "github.com/docker/docker/image" + "github.com/docker/docker/pkg/jsonmessage" + "github.com/docker/docker/pkg/signal" + "github.com/docker/docker/pkg/system" + "github.com/docker/go-connections/nat" + "github.com/pkg/errors" +) + +// ENV foo bar +// +// Sets the environment variable foo to bar, also makes interpolation +// in the dockerfile available from the next statement on via ${foo}. +// +func env(req dispatchRequest) error { + if len(req.args) == 0 { + return errAtLeastOneArgument("ENV") + } + + if len(req.args)%2 != 0 { + // should never get here, but just in case + return errTooManyArguments("ENV") + } + + if err := req.flags.Parse(); err != nil { + return err + } + + runConfig := req.state.runConfig + commitMessage := bytes.NewBufferString("ENV") + + for j := 0; j < len(req.args); j += 2 { + if len(req.args[j]) == 0 { + return errBlankCommandNames("ENV") + } + name := req.args[j] + value := req.args[j+1] + newVar := name + "=" + value + commitMessage.WriteString(" " + newVar) + + gotOne := false + for i, envVar := range runConfig.Env { + envParts := strings.SplitN(envVar, "=", 2) + compareFrom := envParts[0] + if equalEnvKeys(compareFrom, name) { + runConfig.Env[i] = newVar + gotOne = true + break + } + } + if !gotOne { + runConfig.Env = append(runConfig.Env, newVar) + } + } + + return req.builder.commit(req.state, commitMessage.String()) +} + +// MAINTAINER some text +// +// Sets the maintainer metadata. +func maintainer(req dispatchRequest) error { + if len(req.args) != 1 { + return errExactlyOneArgument("MAINTAINER") + } + + if err := req.flags.Parse(); err != nil { + return err + } + + maintainer := req.args[0] + req.state.maintainer = maintainer + return req.builder.commit(req.state, "MAINTAINER "+maintainer) +} + +// LABEL some json data describing the image +// +// Sets the Label variable foo to bar, +// +func label(req dispatchRequest) error { + if len(req.args) == 0 { + return errAtLeastOneArgument("LABEL") + } + if len(req.args)%2 != 0 { + // should never get here, but just in case + return errTooManyArguments("LABEL") + } + + if err := req.flags.Parse(); err != nil { + return err + } + + commitStr := "LABEL" + runConfig := req.state.runConfig + + if runConfig.Labels == nil { + runConfig.Labels = map[string]string{} + } + + for j := 0; j < len(req.args); j++ { + name := req.args[j] + if name == "" { + return errBlankCommandNames("LABEL") + } + + value := req.args[j+1] + commitStr += " " + name + "=" + value + + runConfig.Labels[name] = value + j++ + } + return req.builder.commit(req.state, commitStr) +} + +// ADD foo /path +// +// Add the file 'foo' to '/path'. Tarball and Remote URL (git, http) handling +// exist here. If you do not wish to have this automatic handling, use COPY. +// +func add(req dispatchRequest) error { + if len(req.args) < 2 { + return errAtLeastTwoArguments("ADD") + } + + if err := req.flags.Parse(); err != nil { + return err + } + + downloader := newRemoteSourceDownloader(req.builder.Output, req.builder.Stdout) + copier := copierFromDispatchRequest(req, downloader, nil) + defer copier.Cleanup() + copyInstruction, err := copier.createCopyInstruction(req.args, "ADD") + if err != nil { + return err + } + copyInstruction.allowLocalDecompression = true + + return req.builder.performCopy(req.state, copyInstruction) +} + +// COPY foo /path +// +// Same as 'ADD' but without the tar and remote url handling. +// +func dispatchCopy(req dispatchRequest) error { + if len(req.args) < 2 { + return errAtLeastTwoArguments("COPY") + } + + flFrom := req.flags.AddString("from", "") + if err := req.flags.Parse(); err != nil { + return err + } + + im, err := req.builder.getImageMount(flFrom) + if err != nil { + return errors.Wrapf(err, "invalid from flag value %s", flFrom.Value) + } + + copier := copierFromDispatchRequest(req, errOnSourceDownload, im) + defer copier.Cleanup() + copyInstruction, err := copier.createCopyInstruction(req.args, "COPY") + if err != nil { + return err + } + + return req.builder.performCopy(req.state, copyInstruction) +} + +func (b *Builder) getImageMount(fromFlag *Flag) (*imageMount, error) { + if !fromFlag.IsUsed() { + // TODO: this could return the source in the default case as well? + return nil, nil + } + + var localOnly bool + imageRefOrID := fromFlag.Value + stage, err := b.buildStages.get(fromFlag.Value) + if err != nil { + return nil, err + } + if stage != nil { + imageRefOrID = stage.ImageID() + localOnly = true + } + return b.imageSources.Get(imageRefOrID, localOnly) +} + +// FROM imagename[:tag | @digest] [AS build-stage-name] +// +func from(req dispatchRequest) error { + stageName, err := parseBuildStageName(req.args) + if err != nil { + return err + } + + if err := req.flags.Parse(); err != nil { + return err + } + + req.builder.imageProber.Reset() + image, err := req.builder.getFromImage(req.shlex, req.args[0]) + if err != nil { + return err + } + if err := req.builder.buildStages.add(stageName, image); err != nil { + return err + } + req.state.beginStage(stageName, image) + req.builder.buildArgs.ResetAllowed() + if image.ImageID() == "" { + // Typically this means they used "FROM scratch" + return nil + } + + return processOnBuild(req) +} + +func parseBuildStageName(args []string) (string, error) { + stageName := "" + switch { + case len(args) == 3 && strings.EqualFold(args[1], "as"): + stageName = strings.ToLower(args[2]) + if ok, _ := regexp.MatchString("^[a-z][a-z0-9-_\\.]*$", stageName); !ok { + return "", errors.Errorf("invalid name for build stage: %q, name can't start with a number or contain symbols", stageName) + } + case len(args) != 1: + return "", errors.New("FROM requires either one or three arguments") + } + + return stageName, nil +} + +// scratchImage is used as a token for the empty base image. +var scratchImage builder.Image = &image.Image{} + +func (b *Builder) getFromImage(shlex *ShellLex, name string) (builder.Image, error) { + substitutionArgs := []string{} + for key, value := range b.buildArgs.GetAllMeta() { + substitutionArgs = append(substitutionArgs, key+"="+value) + } + + name, err := shlex.ProcessWord(name, substitutionArgs) + if err != nil { + return nil, err + } + + var localOnly bool + if stage, ok := b.buildStages.getByName(name); ok { + name = stage.ImageID() + localOnly = true + } + + // Windows cannot support a container with no base image unless it is LCOW. + if name == api.NoBaseImageSpecifier { + if runtime.GOOS == "windows" { + if b.platform == "windows" || (b.platform != "windows" && !system.LCOWSupported()) { + return nil, errors.New("Windows does not support FROM scratch") + } + } + return scratchImage, nil + } + imageMount, err := b.imageSources.Get(name, localOnly) + if err != nil { + return nil, err + } + return imageMount.Image(), nil +} + +func processOnBuild(req dispatchRequest) error { + dispatchState := req.state + // Process ONBUILD triggers if they exist + if nTriggers := len(dispatchState.runConfig.OnBuild); nTriggers != 0 { + word := "trigger" + if nTriggers > 1 { + word = "triggers" + } + fmt.Fprintf(req.builder.Stderr, "# Executing %d build %s...\n", nTriggers, word) + } + + // Copy the ONBUILD triggers, and remove them from the config, since the config will be committed. + onBuildTriggers := dispatchState.runConfig.OnBuild + dispatchState.runConfig.OnBuild = []string{} + + // Reset stdin settings as all build actions run without stdin + dispatchState.runConfig.OpenStdin = false + dispatchState.runConfig.StdinOnce = false + + // parse the ONBUILD triggers by invoking the parser + for _, step := range onBuildTriggers { + dockerfile, err := parser.Parse(strings.NewReader(step)) + if err != nil { + return err + } + + for _, n := range dockerfile.AST.Children { + if err := checkDispatch(n); err != nil { + return err + } + + upperCasedCmd := strings.ToUpper(n.Value) + switch upperCasedCmd { + case "ONBUILD": + return errors.New("Chaining ONBUILD via `ONBUILD ONBUILD` isn't allowed") + case "MAINTAINER", "FROM": + return errors.Errorf("%s isn't allowed as an ONBUILD trigger", upperCasedCmd) + } + } + + if _, err := dispatchFromDockerfile(req.builder, dockerfile, dispatchState, req.source); err != nil { + return err + } + } + return nil +} + +// ONBUILD RUN echo yo +// +// ONBUILD triggers run when the image is used in a FROM statement. +// +// ONBUILD handling has a lot of special-case functionality, the heading in +// evaluator.go and comments around dispatch() in the same file explain the +// special cases. search for 'OnBuild' in internals.go for additional special +// cases. +// +func onbuild(req dispatchRequest) error { + if len(req.args) == 0 { + return errAtLeastOneArgument("ONBUILD") + } + + if err := req.flags.Parse(); err != nil { + return err + } + + triggerInstruction := strings.ToUpper(strings.TrimSpace(req.args[0])) + switch triggerInstruction { + case "ONBUILD": + return errors.New("Chaining ONBUILD via `ONBUILD ONBUILD` isn't allowed") + case "MAINTAINER", "FROM": + return fmt.Errorf("%s isn't allowed as an ONBUILD trigger", triggerInstruction) + } + + runConfig := req.state.runConfig + original := regexp.MustCompile(`(?i)^\s*ONBUILD\s*`).ReplaceAllString(req.original, "") + runConfig.OnBuild = append(runConfig.OnBuild, original) + return req.builder.commit(req.state, "ONBUILD "+original) +} + +// WORKDIR /tmp +// +// Set the working directory for future RUN/CMD/etc statements. +// +func workdir(req dispatchRequest) error { + if len(req.args) != 1 { + return errExactlyOneArgument("WORKDIR") + } + + err := req.flags.Parse() + if err != nil { + return err + } + + runConfig := req.state.runConfig + // This is from the Dockerfile and will not necessarily be in platform + // specific semantics, hence ensure it is converted. + runConfig.WorkingDir, err = normaliseWorkdir(runConfig.WorkingDir, req.args[0]) + if err != nil { + return err + } + + // For performance reasons, we explicitly do a create/mkdir now + // This avoids having an unnecessary expensive mount/unmount calls + // (on Windows in particular) during each container create. + // Prior to 1.13, the mkdir was deferred and not executed at this step. + if req.builder.disableCommit { + // Don't call back into the daemon if we're going through docker commit --change "WORKDIR /foo". + // We've already updated the runConfig and that's enough. + return nil + } + + comment := "WORKDIR " + runConfig.WorkingDir + runConfigWithCommentCmd := copyRunConfig(runConfig, withCmdCommentString(comment, req.builder.platform)) + containerID, err := req.builder.probeAndCreate(req.state, runConfigWithCommentCmd) + if err != nil || containerID == "" { + return err + } + if err := req.builder.docker.ContainerCreateWorkdir(containerID); err != nil { + return err + } + + return req.builder.commitContainer(req.state, containerID, runConfigWithCommentCmd) +} + +// RUN some command yo +// +// run a command and commit the image. Args are automatically prepended with +// the current SHELL which defaults to 'sh -c' under linux or 'cmd /S /C' under +// Windows, in the event there is only one argument The difference in processing: +// +// RUN echo hi # sh -c echo hi (Linux and LCOW) +// RUN echo hi # cmd /S /C echo hi (Windows) +// RUN [ "echo", "hi" ] # echo hi +// +func run(req dispatchRequest) error { + if !req.state.hasFromImage() { + return errors.New("Please provide a source image with `from` prior to run") + } + + if err := req.flags.Parse(); err != nil { + return err + } + + stateRunConfig := req.state.runConfig + args := handleJSONArgs(req.args, req.attributes) + if !req.attributes["json"] { + args = append(getShell(stateRunConfig, req.builder.platform), args...) + } + cmdFromArgs := strslice.StrSlice(args) + buildArgs := req.builder.buildArgs.FilterAllowed(stateRunConfig.Env) + + saveCmd := cmdFromArgs + if len(buildArgs) > 0 { + saveCmd = prependEnvOnCmd(req.builder.buildArgs, buildArgs, cmdFromArgs) + } + + runConfigForCacheProbe := copyRunConfig(stateRunConfig, + withCmd(saveCmd), + withEntrypointOverride(saveCmd, nil)) + hit, err := req.builder.probeCache(req.state, runConfigForCacheProbe) + if err != nil || hit { + return err + } + + runConfig := copyRunConfig(stateRunConfig, + withCmd(cmdFromArgs), + withEnv(append(stateRunConfig.Env, buildArgs...)), + withEntrypointOverride(saveCmd, strslice.StrSlice{""})) + + // set config as already being escaped, this prevents double escaping on windows + runConfig.ArgsEscaped = true + + logrus.Debugf("[BUILDER] Command to be executed: %v", runConfig.Cmd) + cID, err := req.builder.create(runConfig) + if err != nil { + return err + } + if err := req.builder.containerManager.Run(req.builder.clientCtx, cID, req.builder.Stdout, req.builder.Stderr); err != nil { + if err, ok := err.(*statusCodeError); ok { + // TODO: change error type, because jsonmessage.JSONError assumes HTTP + return &jsonmessage.JSONError{ + Message: fmt.Sprintf( + "The command '%s' returned a non-zero code: %d", + strings.Join(runConfig.Cmd, " "), err.StatusCode()), + Code: err.StatusCode(), + } + } + return err + } + + return req.builder.commitContainer(req.state, cID, runConfigForCacheProbe) +} + +// Derive the command to use for probeCache() and to commit in this container. +// Note that we only do this if there are any build-time env vars. Also, we +// use the special argument "|#" at the start of the args array. This will +// avoid conflicts with any RUN command since commands can not +// start with | (vertical bar). The "#" (number of build envs) is there to +// help ensure proper cache matches. We don't want a RUN command +// that starts with "foo=abc" to be considered part of a build-time env var. +// +// remove any unreferenced built-in args from the environment variables. +// These args are transparent so resulting image should be the same regardless +// of the value. +func prependEnvOnCmd(buildArgs *buildArgs, buildArgVars []string, cmd strslice.StrSlice) strslice.StrSlice { + var tmpBuildEnv []string + for _, env := range buildArgVars { + key := strings.SplitN(env, "=", 2)[0] + if buildArgs.IsReferencedOrNotBuiltin(key) { + tmpBuildEnv = append(tmpBuildEnv, env) + } + } + + sort.Strings(tmpBuildEnv) + tmpEnv := append([]string{fmt.Sprintf("|%d", len(tmpBuildEnv))}, tmpBuildEnv...) + return strslice.StrSlice(append(tmpEnv, cmd...)) +} + +// CMD foo +// +// Set the default command to run in the container (which may be empty). +// Argument handling is the same as RUN. +// +func cmd(req dispatchRequest) error { + if err := req.flags.Parse(); err != nil { + return err + } + + runConfig := req.state.runConfig + cmdSlice := handleJSONArgs(req.args, req.attributes) + if !req.attributes["json"] { + cmdSlice = append(getShell(runConfig, req.builder.platform), cmdSlice...) + } + + runConfig.Cmd = strslice.StrSlice(cmdSlice) + // set config as already being escaped, this prevents double escaping on windows + runConfig.ArgsEscaped = true + + if err := req.builder.commit(req.state, fmt.Sprintf("CMD %q", cmdSlice)); err != nil { + return err + } + + if len(req.args) != 0 { + req.state.cmdSet = true + } + + return nil +} + +// parseOptInterval(flag) is the duration of flag.Value, or 0 if +// empty. An error is reported if the value is given and less than minimum duration. +func parseOptInterval(f *Flag) (time.Duration, error) { + s := f.Value + if s == "" { + return 0, nil + } + d, err := time.ParseDuration(s) + if err != nil { + return 0, err + } + if d < time.Duration(container.MinimumDuration) { + return 0, fmt.Errorf("Interval %#v cannot be less than %s", f.name, container.MinimumDuration) + } + return d, nil +} + +// HEALTHCHECK foo +// +// Set the default healthcheck command to run in the container (which may be empty). +// Argument handling is the same as RUN. +// +func healthcheck(req dispatchRequest) error { + if len(req.args) == 0 { + return errAtLeastOneArgument("HEALTHCHECK") + } + runConfig := req.state.runConfig + typ := strings.ToUpper(req.args[0]) + args := req.args[1:] + if typ == "NONE" { + if len(args) != 0 { + return errors.New("HEALTHCHECK NONE takes no arguments") + } + test := strslice.StrSlice{typ} + runConfig.Healthcheck = &container.HealthConfig{ + Test: test, + } + } else { + if runConfig.Healthcheck != nil { + oldCmd := runConfig.Healthcheck.Test + if len(oldCmd) > 0 && oldCmd[0] != "NONE" { + fmt.Fprintf(req.builder.Stdout, "Note: overriding previous HEALTHCHECK: %v\n", oldCmd) + } + } + + healthcheck := container.HealthConfig{} + + flInterval := req.flags.AddString("interval", "") + flTimeout := req.flags.AddString("timeout", "") + flStartPeriod := req.flags.AddString("start-period", "") + flRetries := req.flags.AddString("retries", "") + + if err := req.flags.Parse(); err != nil { + return err + } + + switch typ { + case "CMD": + cmdSlice := handleJSONArgs(args, req.attributes) + if len(cmdSlice) == 0 { + return errors.New("Missing command after HEALTHCHECK CMD") + } + + if !req.attributes["json"] { + typ = "CMD-SHELL" + } + + healthcheck.Test = strslice.StrSlice(append([]string{typ}, cmdSlice...)) + default: + return fmt.Errorf("Unknown type %#v in HEALTHCHECK (try CMD)", typ) + } + + interval, err := parseOptInterval(flInterval) + if err != nil { + return err + } + healthcheck.Interval = interval + + timeout, err := parseOptInterval(flTimeout) + if err != nil { + return err + } + healthcheck.Timeout = timeout + + startPeriod, err := parseOptInterval(flStartPeriod) + if err != nil { + return err + } + healthcheck.StartPeriod = startPeriod + + if flRetries.Value != "" { + retries, err := strconv.ParseInt(flRetries.Value, 10, 32) + if err != nil { + return err + } + if retries < 1 { + return fmt.Errorf("--retries must be at least 1 (not %d)", retries) + } + healthcheck.Retries = int(retries) + } else { + healthcheck.Retries = 0 + } + + runConfig.Healthcheck = &healthcheck + } + + return req.builder.commit(req.state, fmt.Sprintf("HEALTHCHECK %q", runConfig.Healthcheck)) +} + +// ENTRYPOINT /usr/sbin/nginx +// +// Set the entrypoint to /usr/sbin/nginx. Will accept the CMD as the arguments +// to /usr/sbin/nginx. Uses the default shell if not in JSON format. +// +// Handles command processing similar to CMD and RUN, only req.runConfig.Entrypoint +// is initialized at newBuilder time instead of through argument parsing. +// +func entrypoint(req dispatchRequest) error { + if err := req.flags.Parse(); err != nil { + return err + } + + runConfig := req.state.runConfig + parsed := handleJSONArgs(req.args, req.attributes) + + switch { + case req.attributes["json"]: + // ENTRYPOINT ["echo", "hi"] + runConfig.Entrypoint = strslice.StrSlice(parsed) + case len(parsed) == 0: + // ENTRYPOINT [] + runConfig.Entrypoint = nil + default: + // ENTRYPOINT echo hi + runConfig.Entrypoint = strslice.StrSlice(append(getShell(runConfig, req.builder.platform), parsed[0])) + } + + // when setting the entrypoint if a CMD was not explicitly set then + // set the command to nil + if !req.state.cmdSet { + runConfig.Cmd = nil + } + + return req.builder.commit(req.state, fmt.Sprintf("ENTRYPOINT %q", runConfig.Entrypoint)) +} + +// EXPOSE 6667/tcp 7000/tcp +// +// Expose ports for links and port mappings. This all ends up in +// req.runConfig.ExposedPorts for runconfig. +// +func expose(req dispatchRequest) error { + portsTab := req.args + + if len(req.args) == 0 { + return errAtLeastOneArgument("EXPOSE") + } + + if err := req.flags.Parse(); err != nil { + return err + } + + runConfig := req.state.runConfig + if runConfig.ExposedPorts == nil { + runConfig.ExposedPorts = make(nat.PortSet) + } + + ports, _, err := nat.ParsePortSpecs(portsTab) + if err != nil { + return err + } + + // instead of using ports directly, we build a list of ports and sort it so + // the order is consistent. This prevents cache burst where map ordering + // changes between builds + portList := make([]string, len(ports)) + var i int + for port := range ports { + if _, exists := runConfig.ExposedPorts[port]; !exists { + runConfig.ExposedPorts[port] = struct{}{} + } + portList[i] = string(port) + i++ + } + sort.Strings(portList) + return req.builder.commit(req.state, "EXPOSE "+strings.Join(portList, " ")) +} + +// USER foo +// +// Set the user to 'foo' for future commands and when running the +// ENTRYPOINT/CMD at container run time. +// +func user(req dispatchRequest) error { + if len(req.args) != 1 { + return errExactlyOneArgument("USER") + } + + if err := req.flags.Parse(); err != nil { + return err + } + + req.state.runConfig.User = req.args[0] + return req.builder.commit(req.state, fmt.Sprintf("USER %v", req.args)) +} + +// VOLUME /foo +// +// Expose the volume /foo for use. Will also accept the JSON array form. +// +func volume(req dispatchRequest) error { + if len(req.args) == 0 { + return errAtLeastOneArgument("VOLUME") + } + + if err := req.flags.Parse(); err != nil { + return err + } + + runConfig := req.state.runConfig + if runConfig.Volumes == nil { + runConfig.Volumes = map[string]struct{}{} + } + for _, v := range req.args { + v = strings.TrimSpace(v) + if v == "" { + return errors.New("VOLUME specified can not be an empty string") + } + runConfig.Volumes[v] = struct{}{} + } + return req.builder.commit(req.state, fmt.Sprintf("VOLUME %v", req.args)) +} + +// STOPSIGNAL signal +// +// Set the signal that will be used to kill the container. +func stopSignal(req dispatchRequest) error { + if len(req.args) != 1 { + return errExactlyOneArgument("STOPSIGNAL") + } + + sig := req.args[0] + _, err := signal.ParseSignal(sig) + if err != nil { + return err + } + + req.state.runConfig.StopSignal = sig + return req.builder.commit(req.state, fmt.Sprintf("STOPSIGNAL %v", req.args)) +} + +// ARG name[=value] +// +// Adds the variable foo to the trusted list of variables that can be passed +// to builder using the --build-arg flag for expansion/substitution or passing to 'run'. +// Dockerfile author may optionally set a default value of this variable. +func arg(req dispatchRequest) error { + if len(req.args) != 1 { + return errExactlyOneArgument("ARG") + } + + var ( + name string + newValue string + hasDefault bool + ) + + arg := req.args[0] + // 'arg' can just be a name or name-value pair. Note that this is different + // from 'env' that handles the split of name and value at the parser level. + // The reason for doing it differently for 'arg' is that we support just + // defining an arg and not assign it a value (while 'env' always expects a + // name-value pair). If possible, it will be good to harmonize the two. + if strings.Contains(arg, "=") { + parts := strings.SplitN(arg, "=", 2) + if len(parts[0]) == 0 { + return errBlankCommandNames("ARG") + } + + name = parts[0] + newValue = parts[1] + hasDefault = true + } else { + name = arg + hasDefault = false + } + + var value *string + if hasDefault { + value = &newValue + } + req.builder.buildArgs.AddArg(name, value) + + // Arg before FROM doesn't add a layer + if !req.state.hasFromImage() { + req.builder.buildArgs.AddMetaArg(name, value) + return nil + } + return req.builder.commit(req.state, "ARG "+arg) +} + +// SHELL powershell -command +// +// Set the non-default shell to use. +func shell(req dispatchRequest) error { + if err := req.flags.Parse(); err != nil { + return err + } + shellSlice := handleJSONArgs(req.args, req.attributes) + switch { + case len(shellSlice) == 0: + // SHELL [] + return errAtLeastOneArgument("SHELL") + case req.attributes["json"]: + // SHELL ["powershell", "-command"] + req.state.runConfig.Shell = strslice.StrSlice(shellSlice) + default: + // SHELL powershell -command - not JSON + return errNotJSON("SHELL", req.original) + } + return req.builder.commit(req.state, fmt.Sprintf("SHELL %v", shellSlice)) +} + +func errAtLeastOneArgument(command string) error { + return fmt.Errorf("%s requires at least one argument", command) +} + +func errExactlyOneArgument(command string) error { + return fmt.Errorf("%s requires exactly one argument", command) +} + +func errAtLeastTwoArguments(command string) error { + return fmt.Errorf("%s requires at least two arguments", command) +} + +func errBlankCommandNames(command string) error { + return fmt.Errorf("%s names can not be blank", command) +} + +func errTooManyArguments(command string) error { + return fmt.Errorf("Bad input to %s, too many arguments", command) +} diff --git a/vendor/github.com/moby/moby/builder/dockerfile/dispatchers_test.go b/vendor/github.com/moby/moby/builder/dockerfile/dispatchers_test.go new file mode 100644 index 000000000..b3672fce1 --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/dispatchers_test.go @@ -0,0 +1,525 @@ +package dockerfile + +import ( + "fmt" + "runtime" + "testing" + + "bytes" + "context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/strslice" + "github.com/docker/docker/builder" + "github.com/docker/docker/builder/dockerfile/parser" + "github.com/docker/docker/pkg/system" + "github.com/docker/docker/pkg/testutil" + "github.com/docker/go-connections/nat" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +type commandWithFunction struct { + name string + function func(args []string) error +} + +func withArgs(f dispatcher) func([]string) error { + return func(args []string) error { + return f(dispatchRequest{args: args}) + } +} + +func withBuilderAndArgs(builder *Builder, f dispatcher) func([]string) error { + return func(args []string) error { + return f(defaultDispatchReq(builder, args...)) + } +} + +func defaultDispatchReq(builder *Builder, args ...string) dispatchRequest { + return dispatchRequest{ + builder: builder, + args: args, + flags: NewBFlags(), + shlex: NewShellLex(parser.DefaultEscapeToken), + state: &dispatchState{runConfig: &container.Config{}}, + } +} + +func newBuilderWithMockBackend() *Builder { + mockBackend := &MockBackend{} + ctx := context.Background() + b := &Builder{ + options: &types.ImageBuildOptions{}, + docker: mockBackend, + buildArgs: newBuildArgs(make(map[string]*string)), + Stdout: new(bytes.Buffer), + clientCtx: ctx, + disableCommit: true, + imageSources: newImageSources(ctx, builderOptions{ + Options: &types.ImageBuildOptions{}, + Backend: mockBackend, + }), + buildStages: newBuildStages(), + imageProber: newImageProber(mockBackend, nil, runtime.GOOS, false), + containerManager: newContainerManager(mockBackend), + } + return b +} + +func TestCommandsExactlyOneArgument(t *testing.T) { + commands := []commandWithFunction{ + {"MAINTAINER", withArgs(maintainer)}, + {"WORKDIR", withArgs(workdir)}, + {"USER", withArgs(user)}, + {"STOPSIGNAL", withArgs(stopSignal)}, + } + + for _, command := range commands { + err := command.function([]string{}) + assert.EqualError(t, err, errExactlyOneArgument(command.name).Error()) + } +} + +func TestCommandsAtLeastOneArgument(t *testing.T) { + commands := []commandWithFunction{ + {"ENV", withArgs(env)}, + {"LABEL", withArgs(label)}, + {"ONBUILD", withArgs(onbuild)}, + {"HEALTHCHECK", withArgs(healthcheck)}, + {"EXPOSE", withArgs(expose)}, + {"VOLUME", withArgs(volume)}, + } + + for _, command := range commands { + err := command.function([]string{}) + assert.EqualError(t, err, errAtLeastOneArgument(command.name).Error()) + } +} + +func TestCommandsAtLeastTwoArguments(t *testing.T) { + commands := []commandWithFunction{ + {"ADD", withArgs(add)}, + {"COPY", withArgs(dispatchCopy)}} + + for _, command := range commands { + err := command.function([]string{"arg1"}) + assert.EqualError(t, err, errAtLeastTwoArguments(command.name).Error()) + } +} + +func TestCommandsTooManyArguments(t *testing.T) { + commands := []commandWithFunction{ + {"ENV", withArgs(env)}, + {"LABEL", withArgs(label)}} + + for _, command := range commands { + err := command.function([]string{"arg1", "arg2", "arg3"}) + assert.EqualError(t, err, errTooManyArguments(command.name).Error()) + } +} + +func TestCommandsBlankNames(t *testing.T) { + builder := newBuilderWithMockBackend() + commands := []commandWithFunction{ + {"ENV", withBuilderAndArgs(builder, env)}, + {"LABEL", withBuilderAndArgs(builder, label)}, + } + + for _, command := range commands { + err := command.function([]string{"", ""}) + assert.EqualError(t, err, errBlankCommandNames(command.name).Error()) + } +} + +func TestEnv2Variables(t *testing.T) { + b := newBuilderWithMockBackend() + + args := []string{"var1", "val1", "var2", "val2"} + req := defaultDispatchReq(b, args...) + err := env(req) + require.NoError(t, err) + + expected := []string{ + fmt.Sprintf("%s=%s", args[0], args[1]), + fmt.Sprintf("%s=%s", args[2], args[3]), + } + assert.Equal(t, expected, req.state.runConfig.Env) +} + +func TestEnvValueWithExistingRunConfigEnv(t *testing.T) { + b := newBuilderWithMockBackend() + + args := []string{"var1", "val1"} + req := defaultDispatchReq(b, args...) + req.state.runConfig.Env = []string{"var1=old", "var2=fromenv"} + err := env(req) + require.NoError(t, err) + + expected := []string{ + fmt.Sprintf("%s=%s", args[0], args[1]), + "var2=fromenv", + } + assert.Equal(t, expected, req.state.runConfig.Env) +} + +func TestMaintainer(t *testing.T) { + maintainerEntry := "Some Maintainer " + + b := newBuilderWithMockBackend() + req := defaultDispatchReq(b, maintainerEntry) + err := maintainer(req) + require.NoError(t, err) + assert.Equal(t, maintainerEntry, req.state.maintainer) +} + +func TestLabel(t *testing.T) { + labelName := "label" + labelValue := "value" + + labelEntry := []string{labelName, labelValue} + b := newBuilderWithMockBackend() + req := defaultDispatchReq(b, labelEntry...) + err := label(req) + require.NoError(t, err) + + require.Contains(t, req.state.runConfig.Labels, labelName) + assert.Equal(t, req.state.runConfig.Labels[labelName], labelValue) +} + +func TestFromScratch(t *testing.T) { + b := newBuilderWithMockBackend() + req := defaultDispatchReq(b, "scratch") + err := from(req) + + if runtime.GOOS == "windows" && !system.LCOWSupported() { + assert.EqualError(t, err, "Windows does not support FROM scratch") + return + } + + require.NoError(t, err) + assert.True(t, req.state.hasFromImage()) + assert.Equal(t, "", req.state.imageID) + // Windows does not set the default path. TODO @jhowardmsft LCOW support. This will need revisiting as we get further into the implementation + expected := "PATH=" + system.DefaultPathEnv(runtime.GOOS) + if runtime.GOOS == "windows" { + expected = "" + } + assert.Equal(t, []string{expected}, req.state.runConfig.Env) +} + +func TestFromWithArg(t *testing.T) { + tag, expected := ":sometag", "expectedthisid" + + getImage := func(name string) (builder.Image, builder.ReleaseableLayer, error) { + assert.Equal(t, "alpine"+tag, name) + return &mockImage{id: "expectedthisid"}, nil, nil + } + b := newBuilderWithMockBackend() + b.docker.(*MockBackend).getImageFunc = getImage + + require.NoError(t, arg(defaultDispatchReq(b, "THETAG="+tag))) + req := defaultDispatchReq(b, "alpine${THETAG}") + err := from(req) + + require.NoError(t, err) + assert.Equal(t, expected, req.state.imageID) + assert.Equal(t, expected, req.state.baseImage.ImageID()) + assert.Len(t, b.buildArgs.GetAllAllowed(), 0) + assert.Len(t, b.buildArgs.GetAllMeta(), 1) +} + +func TestFromWithUndefinedArg(t *testing.T) { + tag, expected := "sometag", "expectedthisid" + + getImage := func(name string) (builder.Image, builder.ReleaseableLayer, error) { + assert.Equal(t, "alpine", name) + return &mockImage{id: "expectedthisid"}, nil, nil + } + b := newBuilderWithMockBackend() + b.docker.(*MockBackend).getImageFunc = getImage + b.options.BuildArgs = map[string]*string{"THETAG": &tag} + + req := defaultDispatchReq(b, "alpine${THETAG}") + err := from(req) + require.NoError(t, err) + assert.Equal(t, expected, req.state.imageID) +} + +func TestFromMultiStageWithScratchNamedStage(t *testing.T) { + if runtime.GOOS == "windows" { + t.Skip("Windows does not support scratch") + } + b := newBuilderWithMockBackend() + req := defaultDispatchReq(b, "scratch", "AS", "base") + + require.NoError(t, from(req)) + assert.True(t, req.state.hasFromImage()) + + req.args = []string{"base"} + require.NoError(t, from(req)) + assert.True(t, req.state.hasFromImage()) +} + +func TestOnbuildIllegalTriggers(t *testing.T) { + triggers := []struct{ command, expectedError string }{ + {"ONBUILD", "Chaining ONBUILD via `ONBUILD ONBUILD` isn't allowed"}, + {"MAINTAINER", "MAINTAINER isn't allowed as an ONBUILD trigger"}, + {"FROM", "FROM isn't allowed as an ONBUILD trigger"}} + + for _, trigger := range triggers { + b := newBuilderWithMockBackend() + + err := onbuild(defaultDispatchReq(b, trigger.command)) + testutil.ErrorContains(t, err, trigger.expectedError) + } +} + +func TestOnbuild(t *testing.T) { + b := newBuilderWithMockBackend() + + req := defaultDispatchReq(b, "ADD", ".", "/app/src") + req.original = "ONBUILD ADD . /app/src" + req.state.runConfig = &container.Config{} + + err := onbuild(req) + require.NoError(t, err) + assert.Equal(t, "ADD . /app/src", req.state.runConfig.OnBuild[0]) +} + +func TestWorkdir(t *testing.T) { + b := newBuilderWithMockBackend() + workingDir := "/app" + if runtime.GOOS == "windows" { + workingDir = "C:\app" + } + + req := defaultDispatchReq(b, workingDir) + err := workdir(req) + require.NoError(t, err) + assert.Equal(t, workingDir, req.state.runConfig.WorkingDir) +} + +func TestCmd(t *testing.T) { + b := newBuilderWithMockBackend() + command := "./executable" + + req := defaultDispatchReq(b, command) + err := cmd(req) + require.NoError(t, err) + + var expectedCommand strslice.StrSlice + if runtime.GOOS == "windows" { + expectedCommand = strslice.StrSlice(append([]string{"cmd"}, "/S", "/C", command)) + } else { + expectedCommand = strslice.StrSlice(append([]string{"/bin/sh"}, "-c", command)) + } + + assert.Equal(t, expectedCommand, req.state.runConfig.Cmd) + assert.True(t, req.state.cmdSet) +} + +func TestHealthcheckNone(t *testing.T) { + b := newBuilderWithMockBackend() + + req := defaultDispatchReq(b, "NONE") + err := healthcheck(req) + require.NoError(t, err) + + require.NotNil(t, req.state.runConfig.Healthcheck) + assert.Equal(t, []string{"NONE"}, req.state.runConfig.Healthcheck.Test) +} + +func TestHealthcheckCmd(t *testing.T) { + b := newBuilderWithMockBackend() + + args := []string{"CMD", "curl", "-f", "http://localhost/", "||", "exit", "1"} + req := defaultDispatchReq(b, args...) + err := healthcheck(req) + require.NoError(t, err) + + require.NotNil(t, req.state.runConfig.Healthcheck) + expectedTest := []string{"CMD-SHELL", "curl -f http://localhost/ || exit 1"} + assert.Equal(t, expectedTest, req.state.runConfig.Healthcheck.Test) +} + +func TestEntrypoint(t *testing.T) { + b := newBuilderWithMockBackend() + entrypointCmd := "/usr/sbin/nginx" + + req := defaultDispatchReq(b, entrypointCmd) + err := entrypoint(req) + require.NoError(t, err) + require.NotNil(t, req.state.runConfig.Entrypoint) + + var expectedEntrypoint strslice.StrSlice + if runtime.GOOS == "windows" { + expectedEntrypoint = strslice.StrSlice(append([]string{"cmd"}, "/S", "/C", entrypointCmd)) + } else { + expectedEntrypoint = strslice.StrSlice(append([]string{"/bin/sh"}, "-c", entrypointCmd)) + } + assert.Equal(t, expectedEntrypoint, req.state.runConfig.Entrypoint) +} + +func TestExpose(t *testing.T) { + b := newBuilderWithMockBackend() + + exposedPort := "80" + req := defaultDispatchReq(b, exposedPort) + err := expose(req) + require.NoError(t, err) + + require.NotNil(t, req.state.runConfig.ExposedPorts) + require.Len(t, req.state.runConfig.ExposedPorts, 1) + + portsMapping, err := nat.ParsePortSpec(exposedPort) + require.NoError(t, err) + assert.Contains(t, req.state.runConfig.ExposedPorts, portsMapping[0].Port) +} + +func TestUser(t *testing.T) { + b := newBuilderWithMockBackend() + userCommand := "foo" + + req := defaultDispatchReq(b, userCommand) + err := user(req) + require.NoError(t, err) + assert.Equal(t, userCommand, req.state.runConfig.User) +} + +func TestVolume(t *testing.T) { + b := newBuilderWithMockBackend() + + exposedVolume := "/foo" + + req := defaultDispatchReq(b, exposedVolume) + err := volume(req) + require.NoError(t, err) + + require.NotNil(t, req.state.runConfig.Volumes) + assert.Len(t, req.state.runConfig.Volumes, 1) + assert.Contains(t, req.state.runConfig.Volumes, exposedVolume) +} + +func TestStopSignal(t *testing.T) { + b := newBuilderWithMockBackend() + signal := "SIGKILL" + + req := defaultDispatchReq(b, signal) + err := stopSignal(req) + require.NoError(t, err) + assert.Equal(t, signal, req.state.runConfig.StopSignal) +} + +func TestArg(t *testing.T) { + b := newBuilderWithMockBackend() + + argName := "foo" + argVal := "bar" + argDef := fmt.Sprintf("%s=%s", argName, argVal) + + err := arg(defaultDispatchReq(b, argDef)) + require.NoError(t, err) + + expected := map[string]string{argName: argVal} + assert.Equal(t, expected, b.buildArgs.GetAllAllowed()) +} + +func TestShell(t *testing.T) { + b := newBuilderWithMockBackend() + + shellCmd := "powershell" + req := defaultDispatchReq(b, shellCmd) + req.attributes = map[string]bool{"json": true} + + err := shell(req) + require.NoError(t, err) + + expectedShell := strslice.StrSlice([]string{shellCmd}) + assert.Equal(t, expectedShell, req.state.runConfig.Shell) +} + +func TestParseOptInterval(t *testing.T) { + flInterval := &Flag{ + name: "interval", + flagType: stringType, + Value: "50ns", + } + _, err := parseOptInterval(flInterval) + testutil.ErrorContains(t, err, "cannot be less than 1ms") + + flInterval.Value = "1ms" + _, err = parseOptInterval(flInterval) + require.NoError(t, err) +} + +func TestPrependEnvOnCmd(t *testing.T) { + buildArgs := newBuildArgs(nil) + buildArgs.AddArg("NO_PROXY", nil) + + args := []string{"sorted=nope", "args=not", "http_proxy=foo", "NO_PROXY=YA"} + cmd := []string{"foo", "bar"} + cmdWithEnv := prependEnvOnCmd(buildArgs, args, cmd) + expected := strslice.StrSlice([]string{ + "|3", "NO_PROXY=YA", "args=not", "sorted=nope", "foo", "bar"}) + assert.Equal(t, expected, cmdWithEnv) +} + +func TestRunWithBuildArgs(t *testing.T) { + b := newBuilderWithMockBackend() + b.buildArgs.argsFromOptions["HTTP_PROXY"] = strPtr("FOO") + b.disableCommit = false + + runConfig := &container.Config{} + origCmd := strslice.StrSlice([]string{"cmd", "in", "from", "image"}) + cmdWithShell := strslice.StrSlice(append(getShell(runConfig, runtime.GOOS), "echo foo")) + envVars := []string{"|1", "one=two"} + cachedCmd := strslice.StrSlice(append(envVars, cmdWithShell...)) + + imageCache := &mockImageCache{ + getCacheFunc: func(parentID string, cfg *container.Config) (string, error) { + // Check the runConfig.Cmd sent to probeCache() + assert.Equal(t, cachedCmd, cfg.Cmd) + assert.Equal(t, strslice.StrSlice(nil), cfg.Entrypoint) + return "", nil + }, + } + + mockBackend := b.docker.(*MockBackend) + mockBackend.makeImageCacheFunc = func(_ []string, _ string) builder.ImageCache { + return imageCache + } + b.imageProber = newImageProber(mockBackend, nil, runtime.GOOS, false) + mockBackend.getImageFunc = func(_ string) (builder.Image, builder.ReleaseableLayer, error) { + return &mockImage{ + id: "abcdef", + config: &container.Config{Cmd: origCmd}, + }, nil, nil + } + mockBackend.containerCreateFunc = func(config types.ContainerCreateConfig) (container.ContainerCreateCreatedBody, error) { + // Check the runConfig.Cmd sent to create() + assert.Equal(t, cmdWithShell, config.Config.Cmd) + assert.Contains(t, config.Config.Env, "one=two") + assert.Equal(t, strslice.StrSlice{""}, config.Config.Entrypoint) + return container.ContainerCreateCreatedBody{ID: "12345"}, nil + } + mockBackend.commitFunc = func(cID string, cfg *backend.ContainerCommitConfig) (string, error) { + // Check the runConfig.Cmd sent to commit() + assert.Equal(t, origCmd, cfg.Config.Cmd) + assert.Equal(t, cachedCmd, cfg.ContainerConfig.Cmd) + assert.Equal(t, strslice.StrSlice(nil), cfg.Config.Entrypoint) + return "", nil + } + + req := defaultDispatchReq(b, "abcdef") + require.NoError(t, from(req)) + b.buildArgs.AddArg("one", strPtr("two")) + + req.args = []string{"echo foo"} + require.NoError(t, run(req)) + + // Check that runConfig.Cmd has not been modified by run + assert.Equal(t, origCmd, req.state.runConfig.Cmd) +} diff --git a/vendor/github.com/moby/moby/builder/dockerfile/dispatchers_unix.go b/vendor/github.com/moby/moby/builder/dockerfile/dispatchers_unix.go new file mode 100644 index 000000000..62ee371df --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/dispatchers_unix.go @@ -0,0 +1,34 @@ +// +build !windows + +package dockerfile + +import ( + "errors" + "fmt" + "os" + "path/filepath" +) + +// normaliseWorkdir normalises a user requested working directory in a +// platform semantically consistent way. +func normaliseWorkdir(current string, requested string) (string, error) { + if requested == "" { + return "", errors.New("cannot normalise nothing") + } + current = filepath.FromSlash(current) + requested = filepath.FromSlash(requested) + if !filepath.IsAbs(requested) { + return filepath.Join(string(os.PathSeparator), current, requested), nil + } + return requested, nil +} + +func errNotJSON(command, _ string) error { + return fmt.Errorf("%s requires the arguments to be in JSON form", command) +} + +// equalEnvKeys compare two strings and returns true if they are equal. On +// Windows this comparison is case insensitive. +func equalEnvKeys(from, to string) bool { + return from == to +} diff --git a/vendor/github.com/moby/moby/builder/dockerfile/dispatchers_unix_test.go b/vendor/github.com/moby/moby/builder/dockerfile/dispatchers_unix_test.go new file mode 100644 index 000000000..4aae6b460 --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/dispatchers_unix_test.go @@ -0,0 +1,33 @@ +// +build !windows + +package dockerfile + +import ( + "testing" +) + +func TestNormaliseWorkdir(t *testing.T) { + testCases := []struct{ current, requested, expected, expectedError string }{ + {``, ``, ``, `cannot normalise nothing`}, + {``, `foo`, `/foo`, ``}, + {``, `/foo`, `/foo`, ``}, + {`/foo`, `bar`, `/foo/bar`, ``}, + {`/foo`, `/bar`, `/bar`, ``}, + } + + for _, test := range testCases { + normalised, err := normaliseWorkdir(test.current, test.requested) + + if test.expectedError != "" && err == nil { + t.Fatalf("NormaliseWorkdir should return an error %s, got nil", test.expectedError) + } + + if test.expectedError != "" && err.Error() != test.expectedError { + t.Fatalf("NormaliseWorkdir returned wrong error. Expected %s, got %s", test.expectedError, err.Error()) + } + + if normalised != test.expected { + t.Fatalf("NormaliseWorkdir error. Expected %s for current %s and requested %s, got %s", test.expected, test.current, test.requested, normalised) + } + } +} diff --git a/vendor/github.com/moby/moby/builder/dockerfile/dispatchers_windows.go b/vendor/github.com/moby/moby/builder/dockerfile/dispatchers_windows.go new file mode 100644 index 000000000..71f7c9288 --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/dispatchers_windows.go @@ -0,0 +1,93 @@ +package dockerfile + +import ( + "errors" + "fmt" + "os" + "path/filepath" + "regexp" + "strings" + + "github.com/docker/docker/pkg/system" +) + +var pattern = regexp.MustCompile(`^[a-zA-Z]:\.$`) + +// normaliseWorkdir normalises a user requested working directory in a +// platform semantically consistent way. +func normaliseWorkdir(current string, requested string) (string, error) { + if requested == "" { + return "", errors.New("cannot normalise nothing") + } + + // `filepath.Clean` will replace "" with "." so skip in that case + if current != "" { + current = filepath.Clean(current) + } + if requested != "" { + requested = filepath.Clean(requested) + } + + // If either current or requested in Windows is: + // C: + // C:. + // then an error will be thrown as the definition for the above + // refers to `current directory on drive C:` + // Since filepath.Clean() will automatically normalize the above + // to `C:.`, we only need to check the last format + if pattern.MatchString(current) { + return "", fmt.Errorf("%s is not a directory. If you are specifying a drive letter, please add a trailing '\\'", current) + } + if pattern.MatchString(requested) { + return "", fmt.Errorf("%s is not a directory. If you are specifying a drive letter, please add a trailing '\\'", requested) + } + + // Target semantics is C:\somefolder, specifically in the format: + // UPPERCASEDriveLetter-Colon-Backslash-FolderName. We are already + // guaranteed that `current`, if set, is consistent. This allows us to + // cope correctly with any of the following in a Dockerfile: + // WORKDIR a --> C:\a + // WORKDIR c:\\foo --> C:\foo + // WORKDIR \\foo --> C:\foo + // WORKDIR /foo --> C:\foo + // WORKDIR c:\\foo \ WORKDIR bar --> C:\foo --> C:\foo\bar + // WORKDIR C:/foo \ WORKDIR bar --> C:\foo --> C:\foo\bar + // WORKDIR C:/foo \ WORKDIR \\bar --> C:\foo --> C:\bar + // WORKDIR /foo \ WORKDIR c:/bar --> C:\foo --> C:\bar + if len(current) == 0 || system.IsAbs(requested) { + if (requested[0] == os.PathSeparator) || + (len(requested) > 1 && string(requested[1]) != ":") || + (len(requested) == 1) { + requested = filepath.Join(`C:\`, requested) + } + } else { + requested = filepath.Join(current, requested) + } + // Upper-case drive letter + return (strings.ToUpper(string(requested[0])) + requested[1:]), nil +} + +func errNotJSON(command, original string) error { + // For Windows users, give a hint if it looks like it might contain + // a path which hasn't been escaped such as ["c:\windows\system32\prog.exe", "-param"], + // as JSON must be escaped. Unfortunate... + // + // Specifically looking for quote-driveletter-colon-backslash, there's no + // double backslash and a [] pair. No, this is not perfect, but it doesn't + // have to be. It's simply a hint to make life a little easier. + extra := "" + original = filepath.FromSlash(strings.ToLower(strings.Replace(strings.ToLower(original), strings.ToLower(command)+" ", "", -1))) + if len(regexp.MustCompile(`"[a-z]:\\.*`).FindStringSubmatch(original)) > 0 && + !strings.Contains(original, `\\`) && + strings.Contains(original, "[") && + strings.Contains(original, "]") { + extra = fmt.Sprintf(`. It looks like '%s' includes a file path without an escaped back-slash. JSON requires back-slashes to be escaped such as ["c:\\path\\to\\file.exe", "/parameter"]`, original) + } + return fmt.Errorf("%s requires the arguments to be in JSON form%s", command, extra) +} + +// equalEnvKeys compare two strings and returns true if they are equal. On +// Windows this comparison is case insensitive. +func equalEnvKeys(from, to string) bool { + return strings.ToUpper(from) == strings.ToUpper(to) +} diff --git a/vendor/github.com/moby/moby/builder/dockerfile/dispatchers_windows_test.go b/vendor/github.com/moby/moby/builder/dockerfile/dispatchers_windows_test.go new file mode 100644 index 000000000..3319c0658 --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/dispatchers_windows_test.go @@ -0,0 +1,40 @@ +// +build windows + +package dockerfile + +import "testing" + +func TestNormaliseWorkdir(t *testing.T) { + tests := []struct{ current, requested, expected, etext string }{ + {``, ``, ``, `cannot normalise nothing`}, + {``, `C:`, ``, `C:. is not a directory. If you are specifying a drive letter, please add a trailing '\'`}, + {``, `C:.`, ``, `C:. is not a directory. If you are specifying a drive letter, please add a trailing '\'`}, + {`c:`, `\a`, ``, `c:. is not a directory. If you are specifying a drive letter, please add a trailing '\'`}, + {`c:.`, `\a`, ``, `c:. is not a directory. If you are specifying a drive letter, please add a trailing '\'`}, + {``, `a`, `C:\a`, ``}, + {``, `c:\foo`, `C:\foo`, ``}, + {``, `c:\\foo`, `C:\foo`, ``}, + {``, `\foo`, `C:\foo`, ``}, + {``, `\\foo`, `C:\foo`, ``}, + {``, `/foo`, `C:\foo`, ``}, + {``, `C:/foo`, `C:\foo`, ``}, + {`C:\foo`, `bar`, `C:\foo\bar`, ``}, + {`C:\foo`, `/bar`, `C:\bar`, ``}, + {`C:\foo`, `\bar`, `C:\bar`, ``}, + } + for _, i := range tests { + r, e := normaliseWorkdir(i.current, i.requested) + + if i.etext != "" && e == nil { + t.Fatalf("TestNormaliseWorkingDir Expected error %s for '%s' '%s', got no error", i.etext, i.current, i.requested) + } + + if i.etext != "" && e.Error() != i.etext { + t.Fatalf("TestNormaliseWorkingDir Expected error %s for '%s' '%s', got %s", i.etext, i.current, i.requested, e.Error()) + } + + if r != i.expected { + t.Fatalf("TestNormaliseWorkingDir Expected '%s' for '%s' '%s', got '%s'", i.expected, i.current, i.requested, r) + } + } +} diff --git a/vendor/github.com/moby/moby/builder/dockerfile/envVarTest b/vendor/github.com/moby/moby/builder/dockerfile/envVarTest new file mode 100644 index 000000000..946b27859 --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/envVarTest @@ -0,0 +1,121 @@ +A|hello | hello +A|he'll'o | hello +A|he'llo | error +A|he\'llo | he'llo +A|he\\'llo | error +A|abc\tdef | abctdef +A|"abc\tdef" | abc\tdef +A|"abc\\tdef" | abc\tdef +A|'abc\tdef' | abc\tdef +A|hello\ | hello +A|hello\\ | hello\ +A|"hello | error +A|"hello\" | error +A|"hel'lo" | hel'lo +A|'hello | error +A|'hello\' | hello\ +A|'hello\there' | hello\there +A|'hello\\there' | hello\\there +A|"''" | '' +A|$. | $. +A|$1 | +A|he$1x | hex +A|he$.x | he$.x +# Next one is different on Windows as $pwd==$PWD +U|he$pwd. | he. +W|he$pwd. | he/home. +A|he$PWD | he/home +A|he\$PWD | he$PWD +A|he\\$PWD | he\/home +A|"he\$PWD" | he$PWD +A|"he\\$PWD" | he\/home +A|he\${} | he${} +A|he\${}xx | he${}xx +A|he${} | he +A|he${}xx | hexx +A|he${hi} | he +A|he${hi}xx | hexx +A|he${PWD} | he/home +A|he${.} | error +A|he${XXX:-000}xx | he000xx +A|he${PWD:-000}xx | he/homexx +A|he${XXX:-$PWD}xx | he/homexx +A|he${XXX:-${PWD:-yyy}}xx | he/homexx +A|he${XXX:-${YYY:-yyy}}xx | heyyyxx +A|he${XXX:YYY} | error +A|he${XXX:+${PWD}}xx | hexx +A|he${PWD:+${XXX}}xx | hexx +A|he${PWD:+${SHELL}}xx | hebashxx +A|he${XXX:+000}xx | hexx +A|he${PWD:+000}xx | he000xx +A|'he${XX}' | he${XX} +A|"he${PWD}" | he/home +A|"he'$PWD'" | he'/home' +A|"$PWD" | /home +A|'$PWD' | $PWD +A|'\$PWD' | \$PWD +A|'"hello"' | "hello" +A|he\$PWD | he$PWD +A|"he\$PWD" | he$PWD +A|'he\$PWD' | he\$PWD +A|he${PWD | error +A|he${PWD:=000}xx | error +A|he${PWD:+${PWD}:}xx | he/home:xx +A|he${XXX:-\$PWD:}xx | he$PWD:xx +A|he${XXX:-\${PWD}z}xx | he${PWDz}xx +A|안녕하세요 | 안녕하세요 +A|안'녕'하세요 | 안녕하세요 +A|안'녕하세요 | error +A|안녕\'하세요 | 안녕'하세요 +A|안\\'녕하세요 | error +A|안녕\t하세요 | 안녕t하세요 +A|"안녕\t하세요" | 안녕\t하세요 +A|'안녕\t하세요 | error +A|안녕하세요\ | 안녕하세요 +A|안녕하세요\\ | 안녕하세요\ +A|"안녕하세요 | error +A|"안녕하세요\" | error +A|"안녕'하세요" | 안녕'하세요 +A|'안녕하세요 | error +A|'안녕하세요\' | 안녕하세요\ +A|안녕$1x | 안녕x +A|안녕$.x | 안녕$.x +# Next one is different on Windows as $pwd==$PWD +U|안녕$pwd. | 안녕. +W|안녕$pwd. | 안녕/home. +A|안녕$PWD | 안녕/home +A|안녕\$PWD | 안녕$PWD +A|안녕\\$PWD | 안녕\/home +A|안녕\${} | 안녕${} +A|안녕\${}xx | 안녕${}xx +A|안녕${} | 안녕 +A|안녕${}xx | 안녕xx +A|안녕${hi} | 안녕 +A|안녕${hi}xx | 안녕xx +A|안녕${PWD} | 안녕/home +A|안녕${.} | error +A|안녕${XXX:-000}xx | 안녕000xx +A|안녕${PWD:-000}xx | 안녕/homexx +A|안녕${XXX:-$PWD}xx | 안녕/homexx +A|안녕${XXX:-${PWD:-yyy}}xx | 안녕/homexx +A|안녕${XXX:-${YYY:-yyy}}xx | 안녕yyyxx +A|안녕${XXX:YYY} | error +A|안녕${XXX:+${PWD}}xx | 안녕xx +A|안녕${PWD:+${XXX}}xx | 안녕xx +A|안녕${PWD:+${SHELL}}xx | 안녕bashxx +A|안녕${XXX:+000}xx | 안녕xx +A|안녕${PWD:+000}xx | 안녕000xx +A|'안녕${XX}' | 안녕${XX} +A|"안녕${PWD}" | 안녕/home +A|"안녕'$PWD'" | 안녕'/home' +A|'"안녕"' | "안녕" +A|안녕\$PWD | 안녕$PWD +A|"안녕\$PWD" | 안녕$PWD +A|'안녕\$PWD' | 안녕\$PWD +A|안녕${PWD | error +A|안녕${PWD:=000}xx | error +A|안녕${PWD:+${PWD}:}xx | 안녕/home:xx +A|안녕${XXX:-\$PWD:}xx | 안녕$PWD:xx +A|안녕${XXX:-\${PWD}z}xx | 안녕${PWDz}xx +A|$KOREAN | 한국어 +A|안녕$KOREAN | 안녕한국어 diff --git a/vendor/github.com/moby/moby/builder/dockerfile/evaluator.go b/vendor/github.com/moby/moby/builder/dockerfile/evaluator.go new file mode 100644 index 000000000..ba4315940 --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/evaluator.go @@ -0,0 +1,327 @@ +// Package dockerfile is the evaluation step in the Dockerfile parse/evaluate pipeline. +// +// It incorporates a dispatch table based on the parser.Node values (see the +// parser package for more information) that are yielded from the parser itself. +// Calling newBuilder with the BuildOpts struct can be used to customize the +// experience for execution purposes only. Parsing is controlled in the parser +// package, and this division of responsibility should be respected. +// +// Please see the jump table targets for the actual invocations, most of which +// will call out to the functions in internals.go to deal with their tasks. +// +// ONBUILD is a special case, which is covered in the onbuild() func in +// dispatchers.go. +// +// The evaluator uses the concept of "steps", which are usually each processable +// line in the Dockerfile. Each step is numbered and certain actions are taken +// before and after each step, such as creating an image ID and removing temporary +// containers and images. Note that ONBUILD creates a kinda-sorta "sub run" which +// includes its own set of steps (usually only one of them). +package dockerfile + +import ( + "bytes" + "fmt" + "runtime" + "strings" + + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/builder" + "github.com/docker/docker/builder/dockerfile/command" + "github.com/docker/docker/builder/dockerfile/parser" + "github.com/docker/docker/pkg/system" + "github.com/docker/docker/runconfig/opts" + "github.com/pkg/errors" +) + +// Environment variable interpolation will happen on these statements only. +var replaceEnvAllowed = map[string]bool{ + command.Env: true, + command.Label: true, + command.Add: true, + command.Copy: true, + command.Workdir: true, + command.Expose: true, + command.Volume: true, + command.User: true, + command.StopSignal: true, + command.Arg: true, +} + +// Certain commands are allowed to have their args split into more +// words after env var replacements. Meaning: +// ENV foo="123 456" +// EXPOSE $foo +// should result in the same thing as: +// EXPOSE 123 456 +// and not treat "123 456" as a single word. +// Note that: EXPOSE "$foo" and EXPOSE $foo are not the same thing. +// Quotes will cause it to still be treated as single word. +var allowWordExpansion = map[string]bool{ + command.Expose: true, +} + +type dispatchRequest struct { + builder *Builder // TODO: replace this with a smaller interface + args []string + attributes map[string]bool + flags *BFlags + original string + shlex *ShellLex + state *dispatchState + source builder.Source +} + +func newDispatchRequestFromOptions(options dispatchOptions, builder *Builder, args []string) dispatchRequest { + return dispatchRequest{ + builder: builder, + args: args, + attributes: options.node.Attributes, + original: options.node.Original, + flags: NewBFlagsWithArgs(options.node.Flags), + shlex: options.shlex, + state: options.state, + source: options.source, + } +} + +type dispatcher func(dispatchRequest) error + +var evaluateTable map[string]dispatcher + +func init() { + evaluateTable = map[string]dispatcher{ + command.Add: add, + command.Arg: arg, + command.Cmd: cmd, + command.Copy: dispatchCopy, // copy() is a go builtin + command.Entrypoint: entrypoint, + command.Env: env, + command.Expose: expose, + command.From: from, + command.Healthcheck: healthcheck, + command.Label: label, + command.Maintainer: maintainer, + command.Onbuild: onbuild, + command.Run: run, + command.Shell: shell, + command.StopSignal: stopSignal, + command.User: user, + command.Volume: volume, + command.Workdir: workdir, + } +} + +func formatStep(stepN int, stepTotal int) string { + return fmt.Sprintf("%d/%d", stepN+1, stepTotal) +} + +// This method is the entrypoint to all statement handling routines. +// +// Almost all nodes will have this structure: +// Child[Node, Node, Node] where Child is from parser.Node.Children and each +// node comes from parser.Node.Next. This forms a "line" with a statement and +// arguments and we process them in this normalized form by hitting +// evaluateTable with the leaf nodes of the command and the Builder object. +// +// ONBUILD is a special case; in this case the parser will emit: +// Child[Node, Child[Node, Node...]] where the first node is the literal +// "onbuild" and the child entrypoint is the command of the ONBUILD statement, +// such as `RUN` in ONBUILD RUN foo. There is special case logic in here to +// deal with that, at least until it becomes more of a general concern with new +// features. +func (b *Builder) dispatch(options dispatchOptions) (*dispatchState, error) { + node := options.node + cmd := node.Value + upperCasedCmd := strings.ToUpper(cmd) + + // To ensure the user is given a decent error message if the platform + // on which the daemon is running does not support a builder command. + if err := platformSupports(strings.ToLower(cmd)); err != nil { + buildsFailed.WithValues(metricsCommandNotSupportedError).Inc() + return nil, err + } + + msg := bytes.NewBufferString(fmt.Sprintf("Step %s : %s%s", + options.stepMsg, upperCasedCmd, formatFlags(node.Flags))) + + args := []string{} + ast := node + if cmd == command.Onbuild { + var err error + ast, args, err = handleOnBuildNode(node, msg) + if err != nil { + return nil, err + } + } + + runConfigEnv := options.state.runConfig.Env + envs := append(runConfigEnv, b.buildArgs.FilterAllowed(runConfigEnv)...) + processFunc := createProcessWordFunc(options.shlex, cmd, envs) + words, err := getDispatchArgsFromNode(ast, processFunc, msg) + if err != nil { + buildsFailed.WithValues(metricsErrorProcessingCommandsError).Inc() + return nil, err + } + args = append(args, words...) + + fmt.Fprintln(b.Stdout, msg.String()) + + f, ok := evaluateTable[cmd] + if !ok { + buildsFailed.WithValues(metricsUnknownInstructionError).Inc() + return nil, fmt.Errorf("unknown instruction: %s", upperCasedCmd) + } + options.state.updateRunConfig() + err = f(newDispatchRequestFromOptions(options, b, args)) + return options.state, err +} + +type dispatchOptions struct { + state *dispatchState + stepMsg string + node *parser.Node + shlex *ShellLex + source builder.Source +} + +// dispatchState is a data object which is modified by dispatchers +type dispatchState struct { + runConfig *container.Config + maintainer string + cmdSet bool + imageID string + baseImage builder.Image + stageName string +} + +func newDispatchState() *dispatchState { + return &dispatchState{runConfig: &container.Config{}} +} + +func (s *dispatchState) updateRunConfig() { + s.runConfig.Image = s.imageID +} + +// hasFromImage returns true if the builder has processed a `FROM ` line +func (s *dispatchState) hasFromImage() bool { + return s.imageID != "" || (s.baseImage != nil && s.baseImage.ImageID() == "") +} + +func (s *dispatchState) isCurrentStage(target string) bool { + if target == "" { + return false + } + return strings.EqualFold(s.stageName, target) +} + +func (s *dispatchState) beginStage(stageName string, image builder.Image) { + s.stageName = stageName + s.imageID = image.ImageID() + + if image.RunConfig() != nil { + s.runConfig = image.RunConfig() + } else { + s.runConfig = &container.Config{} + } + s.baseImage = image + s.setDefaultPath() +} + +// Add the default PATH to runConfig.ENV if one exists for the platform and there +// is no PATH set. Note that Windows containers on Windows won't have one as it's set by HCS +func (s *dispatchState) setDefaultPath() { + // TODO @jhowardmsft LCOW Support - This will need revisiting later + platform := runtime.GOOS + if system.LCOWSupported() { + platform = "linux" + } + if system.DefaultPathEnv(platform) == "" { + return + } + envMap := opts.ConvertKVStringsToMap(s.runConfig.Env) + if _, ok := envMap["PATH"]; !ok { + s.runConfig.Env = append(s.runConfig.Env, "PATH="+system.DefaultPathEnv(platform)) + } +} + +func handleOnBuildNode(ast *parser.Node, msg *bytes.Buffer) (*parser.Node, []string, error) { + if ast.Next == nil { + return nil, nil, errors.New("ONBUILD requires at least one argument") + } + ast = ast.Next.Children[0] + msg.WriteString(" " + ast.Value + formatFlags(ast.Flags)) + return ast, []string{ast.Value}, nil +} + +func formatFlags(flags []string) string { + if len(flags) > 0 { + return " " + strings.Join(flags, " ") + } + return "" +} + +func getDispatchArgsFromNode(ast *parser.Node, processFunc processWordFunc, msg *bytes.Buffer) ([]string, error) { + args := []string{} + for i := 0; ast.Next != nil; i++ { + ast = ast.Next + words, err := processFunc(ast.Value) + if err != nil { + return nil, err + } + args = append(args, words...) + msg.WriteString(" " + ast.Value) + } + return args, nil +} + +type processWordFunc func(string) ([]string, error) + +func createProcessWordFunc(shlex *ShellLex, cmd string, envs []string) processWordFunc { + switch { + case !replaceEnvAllowed[cmd]: + return func(word string) ([]string, error) { + return []string{word}, nil + } + case allowWordExpansion[cmd]: + return func(word string) ([]string, error) { + return shlex.ProcessWords(word, envs) + } + default: + return func(word string) ([]string, error) { + word, err := shlex.ProcessWord(word, envs) + return []string{word}, err + } + } +} + +// checkDispatch does a simple check for syntax errors of the Dockerfile. +// Because some of the instructions can only be validated through runtime, +// arg, env, etc., this syntax check will not be complete and could not replace +// the runtime check. Instead, this function is only a helper that allows +// user to find out the obvious error in Dockerfile earlier on. +func checkDispatch(ast *parser.Node) error { + cmd := ast.Value + upperCasedCmd := strings.ToUpper(cmd) + + // To ensure the user is given a decent error message if the platform + // on which the daemon is running does not support a builder command. + if err := platformSupports(strings.ToLower(cmd)); err != nil { + return err + } + + // The instruction itself is ONBUILD, we will make sure it follows with at + // least one argument + if upperCasedCmd == "ONBUILD" { + if ast.Next == nil { + buildsFailed.WithValues(metricsMissingOnbuildArgumentsError).Inc() + return errors.New("ONBUILD requires at least one argument") + } + } + + if _, ok := evaluateTable[cmd]; ok { + return nil + } + buildsFailed.WithValues(metricsUnknownInstructionError).Inc() + return errors.Errorf("unknown instruction: %s", upperCasedCmd) +} diff --git a/vendor/github.com/moby/moby/builder/dockerfile/evaluator_test.go b/vendor/github.com/moby/moby/builder/dockerfile/evaluator_test.go new file mode 100644 index 000000000..72d7ce10e --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/evaluator_test.go @@ -0,0 +1,210 @@ +package dockerfile + +import ( + "io/ioutil" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/builder/dockerfile/parser" + "github.com/docker/docker/builder/remotecontext" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/reexec" +) + +type dispatchTestCase struct { + name, dockerfile, expectedError string + files map[string]string +} + +func init() { + reexec.Init() +} + +func initDispatchTestCases() []dispatchTestCase { + dispatchTestCases := []dispatchTestCase{{ + name: "copyEmptyWhitespace", + dockerfile: `COPY + quux \ + bar`, + expectedError: "COPY requires at least two arguments", + }, + { + name: "ONBUILD forbidden FROM", + dockerfile: "ONBUILD FROM scratch", + expectedError: "FROM isn't allowed as an ONBUILD trigger", + files: nil, + }, + { + name: "ONBUILD forbidden MAINTAINER", + dockerfile: "ONBUILD MAINTAINER docker.io", + expectedError: "MAINTAINER isn't allowed as an ONBUILD trigger", + files: nil, + }, + { + name: "ARG two arguments", + dockerfile: "ARG foo bar", + expectedError: "ARG requires exactly one argument", + files: nil, + }, + { + name: "MAINTAINER unknown flag", + dockerfile: "MAINTAINER --boo joe@example.com", + expectedError: "Unknown flag: boo", + files: nil, + }, + { + name: "ADD multiple files to file", + dockerfile: "ADD file1.txt file2.txt test", + expectedError: "When using ADD with more than one source file, the destination must be a directory and end with a /", + files: map[string]string{"file1.txt": "test1", "file2.txt": "test2"}, + }, + { + name: "JSON ADD multiple files to file", + dockerfile: `ADD ["file1.txt", "file2.txt", "test"]`, + expectedError: "When using ADD with more than one source file, the destination must be a directory and end with a /", + files: map[string]string{"file1.txt": "test1", "file2.txt": "test2"}, + }, + { + name: "Wildcard ADD multiple files to file", + dockerfile: "ADD file*.txt test", + expectedError: "When using ADD with more than one source file, the destination must be a directory and end with a /", + files: map[string]string{"file1.txt": "test1", "file2.txt": "test2"}, + }, + { + name: "Wildcard JSON ADD multiple files to file", + dockerfile: `ADD ["file*.txt", "test"]`, + expectedError: "When using ADD with more than one source file, the destination must be a directory and end with a /", + files: map[string]string{"file1.txt": "test1", "file2.txt": "test2"}, + }, + { + name: "COPY multiple files to file", + dockerfile: "COPY file1.txt file2.txt test", + expectedError: "When using COPY with more than one source file, the destination must be a directory and end with a /", + files: map[string]string{"file1.txt": "test1", "file2.txt": "test2"}, + }, + { + name: "JSON COPY multiple files to file", + dockerfile: `COPY ["file1.txt", "file2.txt", "test"]`, + expectedError: "When using COPY with more than one source file, the destination must be a directory and end with a /", + files: map[string]string{"file1.txt": "test1", "file2.txt": "test2"}, + }, + { + name: "ADD multiple files to file with whitespace", + dockerfile: `ADD [ "test file1.txt", "test file2.txt", "test" ]`, + expectedError: "When using ADD with more than one source file, the destination must be a directory and end with a /", + files: map[string]string{"test file1.txt": "test1", "test file2.txt": "test2"}, + }, + { + name: "COPY multiple files to file with whitespace", + dockerfile: `COPY [ "test file1.txt", "test file2.txt", "test" ]`, + expectedError: "When using COPY with more than one source file, the destination must be a directory and end with a /", + files: map[string]string{"test file1.txt": "test1", "test file2.txt": "test2"}, + }, + { + name: "COPY wildcard no files", + dockerfile: `COPY file*.txt /tmp/`, + expectedError: "COPY failed: no source files were specified", + files: nil, + }, + { + name: "COPY url", + dockerfile: `COPY https://index.docker.io/robots.txt /`, + expectedError: "source can't be a URL for COPY", + files: nil, + }, + { + name: "Chaining ONBUILD", + dockerfile: `ONBUILD ONBUILD RUN touch foobar`, + expectedError: "Chaining ONBUILD via `ONBUILD ONBUILD` isn't allowed", + files: nil, + }, + { + name: "Invalid instruction", + dockerfile: `foo bar`, + expectedError: "unknown instruction: FOO", + files: nil, + }} + + return dispatchTestCases +} + +func TestDispatch(t *testing.T) { + testCases := initDispatchTestCases() + + for _, testCase := range testCases { + executeTestCase(t, testCase) + } +} + +func executeTestCase(t *testing.T, testCase dispatchTestCase) { + contextDir, cleanup := createTestTempDir(t, "", "builder-dockerfile-test") + defer cleanup() + + for filename, content := range testCase.files { + createTestTempFile(t, contextDir, filename, content, 0777) + } + + tarStream, err := archive.Tar(contextDir, archive.Uncompressed) + + if err != nil { + t.Fatalf("Error when creating tar stream: %s", err) + } + + defer func() { + if err = tarStream.Close(); err != nil { + t.Fatalf("Error when closing tar stream: %s", err) + } + }() + + context, err := remotecontext.FromArchive(tarStream) + + if err != nil { + t.Fatalf("Error when creating tar context: %s", err) + } + + defer func() { + if err = context.Close(); err != nil { + t.Fatalf("Error when closing tar context: %s", err) + } + }() + + r := strings.NewReader(testCase.dockerfile) + result, err := parser.Parse(r) + + if err != nil { + t.Fatalf("Error when parsing Dockerfile: %s", err) + } + + options := &types.ImageBuildOptions{ + BuildArgs: make(map[string]*string), + } + + b := &Builder{ + options: options, + Stdout: ioutil.Discard, + buildArgs: newBuildArgs(options.BuildArgs), + } + + shlex := NewShellLex(parser.DefaultEscapeToken) + n := result.AST + state := &dispatchState{runConfig: &container.Config{}} + opts := dispatchOptions{ + state: state, + stepMsg: formatStep(0, len(n.Children)), + node: n.Children[0], + shlex: shlex, + source: context, + } + state, err = b.dispatch(opts) + + if err == nil { + t.Fatalf("No error when executing test %s", testCase.name) + } + + if !strings.Contains(err.Error(), testCase.expectedError) { + t.Fatalf("Wrong error message. Should be \"%s\". Got \"%s\"", testCase.expectedError, err.Error()) + } + +} diff --git a/vendor/github.com/moby/moby/builder/dockerfile/evaluator_unix.go b/vendor/github.com/moby/moby/builder/dockerfile/evaluator_unix.go new file mode 100644 index 000000000..28fd5b156 --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/evaluator_unix.go @@ -0,0 +1,9 @@ +// +build !windows + +package dockerfile + +// platformSupports is a short-term function to give users a quality error +// message if a Dockerfile uses a command not supported on the platform. +func platformSupports(command string) error { + return nil +} diff --git a/vendor/github.com/moby/moby/builder/dockerfile/evaluator_windows.go b/vendor/github.com/moby/moby/builder/dockerfile/evaluator_windows.go new file mode 100644 index 000000000..72483a2ec --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/evaluator_windows.go @@ -0,0 +1,13 @@ +package dockerfile + +import "fmt" + +// platformSupports is gives users a quality error message if a Dockerfile uses +// a command not supported on the platform. +func platformSupports(command string) error { + switch command { + case "stopsignal": + return fmt.Errorf("The daemon on this platform does not support the command '%s'", command) + } + return nil +} diff --git a/vendor/github.com/moby/moby/builder/dockerfile/imagecontext.go b/vendor/github.com/moby/moby/builder/dockerfile/imagecontext.go new file mode 100644 index 000000000..64b2572b8 --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/imagecontext.go @@ -0,0 +1,211 @@ +package dockerfile + +import ( + "strconv" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/builder" + "github.com/docker/docker/builder/remotecontext" + dockerimage "github.com/docker/docker/image" + "github.com/pkg/errors" + "golang.org/x/net/context" +) + +type buildStage struct { + id string +} + +func newBuildStage(imageID string) *buildStage { + return &buildStage{id: imageID} +} + +func (b *buildStage) ImageID() string { + return b.id +} + +func (b *buildStage) update(imageID string) { + b.id = imageID +} + +// buildStages tracks each stage of a build so they can be retrieved by index +// or by name. +type buildStages struct { + sequence []*buildStage + byName map[string]*buildStage +} + +func newBuildStages() *buildStages { + return &buildStages{byName: make(map[string]*buildStage)} +} + +func (s *buildStages) getByName(name string) (*buildStage, bool) { + stage, ok := s.byName[strings.ToLower(name)] + return stage, ok +} + +func (s *buildStages) get(indexOrName string) (*buildStage, error) { + index, err := strconv.Atoi(indexOrName) + if err == nil { + if err := s.validateIndex(index); err != nil { + return nil, err + } + return s.sequence[index], nil + } + if im, ok := s.byName[strings.ToLower(indexOrName)]; ok { + return im, nil + } + return nil, nil +} + +func (s *buildStages) validateIndex(i int) error { + if i < 0 || i >= len(s.sequence)-1 { + if i == len(s.sequence)-1 { + return errors.New("refers to current build stage") + } + return errors.New("index out of bounds") + } + return nil +} + +func (s *buildStages) add(name string, image builder.Image) error { + stage := newBuildStage(image.ImageID()) + name = strings.ToLower(name) + if len(name) > 0 { + if _, ok := s.byName[name]; ok { + return errors.Errorf("duplicate name %s", name) + } + s.byName[name] = stage + } + s.sequence = append(s.sequence, stage) + return nil +} + +func (s *buildStages) update(imageID string) { + s.sequence[len(s.sequence)-1].update(imageID) +} + +type getAndMountFunc func(string, bool) (builder.Image, builder.ReleaseableLayer, error) + +// imageSources mounts images and provides a cache for mounted images. It tracks +// all images so they can be unmounted at the end of the build. +type imageSources struct { + byImageID map[string]*imageMount + mounts []*imageMount + getImage getAndMountFunc + cache pathCache // TODO: remove +} + +// TODO @jhowardmsft LCOW Support: Eventually, platform can be moved to options.Options.Platform, +// and removed from builderOptions, but that can't be done yet as it would affect the API. +func newImageSources(ctx context.Context, options builderOptions) *imageSources { + getAndMount := func(idOrRef string, localOnly bool) (builder.Image, builder.ReleaseableLayer, error) { + pullOption := backend.PullOptionNoPull + if !localOnly { + if options.Options.PullParent { + pullOption = backend.PullOptionForcePull + } else { + pullOption = backend.PullOptionPreferLocal + } + } + return options.Backend.GetImageAndReleasableLayer(ctx, idOrRef, backend.GetImageAndLayerOptions{ + PullOption: pullOption, + AuthConfig: options.Options.AuthConfigs, + Output: options.ProgressWriter.Output, + Platform: options.Platform, + }) + } + + return &imageSources{ + byImageID: make(map[string]*imageMount), + getImage: getAndMount, + } +} + +func (m *imageSources) Get(idOrRef string, localOnly bool) (*imageMount, error) { + if im, ok := m.byImageID[idOrRef]; ok { + return im, nil + } + + image, layer, err := m.getImage(idOrRef, localOnly) + if err != nil { + return nil, err + } + im := newImageMount(image, layer) + m.Add(im) + return im, nil +} + +func (m *imageSources) Unmount() (retErr error) { + for _, im := range m.mounts { + if err := im.unmount(); err != nil { + logrus.Error(err) + retErr = err + } + } + return +} + +func (m *imageSources) Add(im *imageMount) { + switch im.image { + case nil: + im.image = &dockerimage.Image{} + default: + m.byImageID[im.image.ImageID()] = im + } + m.mounts = append(m.mounts, im) +} + +// imageMount is a reference to an image that can be used as a builder.Source +type imageMount struct { + image builder.Image + source builder.Source + layer builder.ReleaseableLayer +} + +func newImageMount(image builder.Image, layer builder.ReleaseableLayer) *imageMount { + im := &imageMount{image: image, layer: layer} + return im +} + +func (im *imageMount) Source() (builder.Source, error) { + if im.source == nil { + if im.layer == nil { + return nil, errors.Errorf("empty context") + } + mountPath, err := im.layer.Mount() + if err != nil { + return nil, errors.Wrapf(err, "failed to mount %s", im.image.ImageID()) + } + source, err := remotecontext.NewLazySource(mountPath) + if err != nil { + return nil, errors.Wrapf(err, "failed to create lazycontext for %s", mountPath) + } + im.source = source + } + return im.source, nil +} + +func (im *imageMount) unmount() error { + if im.layer == nil { + return nil + } + if err := im.layer.Release(); err != nil { + return errors.Wrapf(err, "failed to unmount previous build image %s", im.image.ImageID()) + } + im.layer = nil + return nil +} + +func (im *imageMount) Image() builder.Image { + return im.image +} + +func (im *imageMount) Layer() builder.ReleaseableLayer { + return im.layer +} + +func (im *imageMount) ImageID() string { + return im.image.ImageID() +} diff --git a/vendor/github.com/moby/moby/builder/dockerfile/imageprobe.go b/vendor/github.com/moby/moby/builder/dockerfile/imageprobe.go new file mode 100644 index 000000000..3433612de --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/imageprobe.go @@ -0,0 +1,63 @@ +package dockerfile + +import ( + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/builder" +) + +// ImageProber exposes an Image cache to the Builder. It supports resetting a +// cache. +type ImageProber interface { + Reset() + Probe(parentID string, runConfig *container.Config) (string, error) +} + +type imageProber struct { + cache builder.ImageCache + reset func() builder.ImageCache + cacheBusted bool +} + +func newImageProber(cacheBuilder builder.ImageCacheBuilder, cacheFrom []string, platform string, noCache bool) ImageProber { + if noCache { + return &nopProber{} + } + + reset := func() builder.ImageCache { + return cacheBuilder.MakeImageCache(cacheFrom, platform) + } + return &imageProber{cache: reset(), reset: reset} +} + +func (c *imageProber) Reset() { + c.cache = c.reset() + c.cacheBusted = false +} + +// Probe checks if cache match can be found for current build instruction. +// It returns the cachedID if there is a hit, and the empty string on miss +func (c *imageProber) Probe(parentID string, runConfig *container.Config) (string, error) { + if c.cacheBusted { + return "", nil + } + cacheID, err := c.cache.GetCache(parentID, runConfig) + if err != nil { + return "", err + } + if len(cacheID) == 0 { + logrus.Debugf("[BUILDER] Cache miss: %s", runConfig.Cmd) + c.cacheBusted = true + return "", nil + } + logrus.Debugf("[BUILDER] Use cached version: %s", runConfig.Cmd) + return cacheID, nil +} + +type nopProber struct{} + +func (c *nopProber) Reset() {} + +func (c *nopProber) Probe(_ string, _ *container.Config) (string, error) { + return "", nil +} diff --git a/vendor/github.com/moby/moby/builder/dockerfile/internals.go b/vendor/github.com/moby/moby/builder/dockerfile/internals.go new file mode 100644 index 000000000..c0d6081d0 --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/internals.go @@ -0,0 +1,300 @@ +package dockerfile + +// internals for handling commands. Covers many areas and a lot of +// non-contiguous functionality. Please read the comments. + +import ( + "crypto/sha256" + "encoding/hex" + "fmt" + "strings" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/image" + "github.com/docker/docker/pkg/stringid" + "github.com/pkg/errors" +) + +func (b *Builder) commit(dispatchState *dispatchState, comment string) error { + if b.disableCommit { + return nil + } + if !dispatchState.hasFromImage() { + return errors.New("Please provide a source image with `from` prior to commit") + } + + runConfigWithCommentCmd := copyRunConfig(dispatchState.runConfig, withCmdComment(comment, b.platform)) + hit, err := b.probeCache(dispatchState, runConfigWithCommentCmd) + if err != nil || hit { + return err + } + id, err := b.create(runConfigWithCommentCmd) + if err != nil { + return err + } + + return b.commitContainer(dispatchState, id, runConfigWithCommentCmd) +} + +func (b *Builder) commitContainer(dispatchState *dispatchState, id string, containerConfig *container.Config) error { + if b.disableCommit { + return nil + } + + commitCfg := &backend.ContainerCommitConfig{ + ContainerCommitConfig: types.ContainerCommitConfig{ + Author: dispatchState.maintainer, + Pause: true, + // TODO: this should be done by Commit() + Config: copyRunConfig(dispatchState.runConfig), + }, + ContainerConfig: containerConfig, + } + + // Commit the container + imageID, err := b.docker.Commit(id, commitCfg) + if err != nil { + return err + } + + dispatchState.imageID = imageID + b.buildStages.update(imageID) + return nil +} + +func (b *Builder) exportImage(state *dispatchState, imageMount *imageMount, runConfig *container.Config) error { + newLayer, err := imageMount.Layer().Commit(b.platform) + if err != nil { + return err + } + + // add an image mount without an image so the layer is properly unmounted + // if there is an error before we can add the full mount with image + b.imageSources.Add(newImageMount(nil, newLayer)) + + parentImage, ok := imageMount.Image().(*image.Image) + if !ok { + return errors.Errorf("unexpected image type") + } + + newImage := image.NewChildImage(parentImage, image.ChildConfig{ + Author: state.maintainer, + ContainerConfig: runConfig, + DiffID: newLayer.DiffID(), + Config: copyRunConfig(state.runConfig), + }, parentImage.OS) + + // TODO: it seems strange to marshal this here instead of just passing in the + // image struct + config, err := newImage.MarshalJSON() + if err != nil { + return errors.Wrap(err, "failed to encode image config") + } + + exportedImage, err := b.docker.CreateImage(config, state.imageID, parentImage.OS) + if err != nil { + return errors.Wrapf(err, "failed to export image") + } + + state.imageID = exportedImage.ImageID() + b.imageSources.Add(newImageMount(exportedImage, newLayer)) + b.buildStages.update(state.imageID) + return nil +} + +func (b *Builder) performCopy(state *dispatchState, inst copyInstruction) error { + srcHash := getSourceHashFromInfos(inst.infos) + + // TODO: should this have been using origPaths instead of srcHash in the comment? + runConfigWithCommentCmd := copyRunConfig( + state.runConfig, + withCmdCommentString(fmt.Sprintf("%s %s in %s ", inst.cmdName, srcHash, inst.dest), b.platform)) + hit, err := b.probeCache(state, runConfigWithCommentCmd) + if err != nil || hit { + return err + } + + imageMount, err := b.imageSources.Get(state.imageID, true) + if err != nil { + return errors.Wrapf(err, "failed to get destination image %q", state.imageID) + } + destInfo, err := createDestInfo(state.runConfig.WorkingDir, inst, imageMount) + if err != nil { + return err + } + + opts := copyFileOptions{ + decompress: inst.allowLocalDecompression, + archiver: b.archiver, + } + for _, info := range inst.infos { + if err := performCopyForInfo(destInfo, info, opts); err != nil { + return errors.Wrapf(err, "failed to copy files") + } + } + return b.exportImage(state, imageMount, runConfigWithCommentCmd) +} + +func createDestInfo(workingDir string, inst copyInstruction, imageMount *imageMount) (copyInfo, error) { + // Twiddle the destination when it's a relative path - meaning, make it + // relative to the WORKINGDIR + dest, err := normaliseDest(workingDir, inst.dest) + if err != nil { + return copyInfo{}, errors.Wrapf(err, "invalid %s", inst.cmdName) + } + + destMount, err := imageMount.Source() + if err != nil { + return copyInfo{}, errors.Wrapf(err, "failed to mount copy source") + } + + return newCopyInfoFromSource(destMount, dest, ""), nil +} + +// For backwards compat, if there's just one info then use it as the +// cache look-up string, otherwise hash 'em all into one +func getSourceHashFromInfos(infos []copyInfo) string { + if len(infos) == 1 { + return infos[0].hash + } + var hashs []string + for _, info := range infos { + hashs = append(hashs, info.hash) + } + return hashStringSlice("multi", hashs) +} + +func hashStringSlice(prefix string, slice []string) string { + hasher := sha256.New() + hasher.Write([]byte(strings.Join(slice, ","))) + return prefix + ":" + hex.EncodeToString(hasher.Sum(nil)) +} + +type runConfigModifier func(*container.Config) + +func copyRunConfig(runConfig *container.Config, modifiers ...runConfigModifier) *container.Config { + copy := *runConfig + for _, modifier := range modifiers { + modifier(©) + } + return © +} + +func withCmd(cmd []string) runConfigModifier { + return func(runConfig *container.Config) { + runConfig.Cmd = cmd + } +} + +// withCmdComment sets Cmd to a nop comment string. See withCmdCommentString for +// why there are two almost identical versions of this. +func withCmdComment(comment string, platform string) runConfigModifier { + return func(runConfig *container.Config) { + runConfig.Cmd = append(getShell(runConfig, platform), "#(nop) ", comment) + } +} + +// withCmdCommentString exists to maintain compatibility with older versions. +// A few instructions (workdir, copy, add) used a nop comment that is a single arg +// where as all the other instructions used a two arg comment string. This +// function implements the single arg version. +func withCmdCommentString(comment string, platform string) runConfigModifier { + return func(runConfig *container.Config) { + runConfig.Cmd = append(getShell(runConfig, platform), "#(nop) "+comment) + } +} + +func withEnv(env []string) runConfigModifier { + return func(runConfig *container.Config) { + runConfig.Env = env + } +} + +// withEntrypointOverride sets an entrypoint on runConfig if the command is +// not empty. The entrypoint is left unmodified if command is empty. +// +// The dockerfile RUN instruction expect to run without an entrypoint +// so the runConfig entrypoint needs to be modified accordingly. ContainerCreate +// will change a []string{""} entrypoint to nil, so we probe the cache with the +// nil entrypoint. +func withEntrypointOverride(cmd []string, entrypoint []string) runConfigModifier { + return func(runConfig *container.Config) { + if len(cmd) > 0 { + runConfig.Entrypoint = entrypoint + } + } +} + +// getShell is a helper function which gets the right shell for prefixing the +// shell-form of RUN, ENTRYPOINT and CMD instructions +func getShell(c *container.Config, platform string) []string { + if 0 == len(c.Shell) { + return append([]string{}, defaultShellForPlatform(platform)[:]...) + } + return append([]string{}, c.Shell[:]...) +} + +func (b *Builder) probeCache(dispatchState *dispatchState, runConfig *container.Config) (bool, error) { + cachedID, err := b.imageProber.Probe(dispatchState.imageID, runConfig) + if cachedID == "" || err != nil { + return false, err + } + fmt.Fprint(b.Stdout, " ---> Using cache\n") + + dispatchState.imageID = string(cachedID) + b.buildStages.update(dispatchState.imageID) + return true, nil +} + +var defaultLogConfig = container.LogConfig{Type: "none"} + +func (b *Builder) probeAndCreate(dispatchState *dispatchState, runConfig *container.Config) (string, error) { + if hit, err := b.probeCache(dispatchState, runConfig); err != nil || hit { + return "", err + } + // Set a log config to override any default value set on the daemon + hostConfig := &container.HostConfig{LogConfig: defaultLogConfig} + container, err := b.containerManager.Create(runConfig, hostConfig, b.platform) + return container.ID, err +} + +func (b *Builder) create(runConfig *container.Config) (string, error) { + hostConfig := hostConfigFromOptions(b.options) + container, err := b.containerManager.Create(runConfig, hostConfig, b.platform) + if err != nil { + return "", err + } + // TODO: could this be moved into containerManager.Create() ? + for _, warning := range container.Warnings { + fmt.Fprintf(b.Stdout, " ---> [Warning] %s\n", warning) + } + fmt.Fprintf(b.Stdout, " ---> Running in %s\n", stringid.TruncateID(container.ID)) + return container.ID, nil +} + +func hostConfigFromOptions(options *types.ImageBuildOptions) *container.HostConfig { + resources := container.Resources{ + CgroupParent: options.CgroupParent, + CPUShares: options.CPUShares, + CPUPeriod: options.CPUPeriod, + CPUQuota: options.CPUQuota, + CpusetCpus: options.CPUSetCPUs, + CpusetMems: options.CPUSetMems, + Memory: options.Memory, + MemorySwap: options.MemorySwap, + Ulimits: options.Ulimits, + } + + return &container.HostConfig{ + SecurityOpt: options.SecurityOpt, + Isolation: options.Isolation, + ShmSize: options.ShmSize, + Resources: resources, + NetworkMode: container.NetworkMode(options.NetworkMode), + // Set a log config to override any default value set on the daemon + LogConfig: defaultLogConfig, + ExtraHosts: options.ExtraHosts, + } +} diff --git a/vendor/github.com/moby/moby/builder/dockerfile/internals_test.go b/vendor/github.com/moby/moby/builder/dockerfile/internals_test.go new file mode 100644 index 000000000..8073cc671 --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/internals_test.go @@ -0,0 +1,131 @@ +package dockerfile + +import ( + "fmt" + "runtime" + "testing" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/builder" + "github.com/docker/docker/builder/remotecontext" + "github.com/docker/docker/pkg/archive" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestEmptyDockerfile(t *testing.T) { + contextDir, cleanup := createTestTempDir(t, "", "builder-dockerfile-test") + defer cleanup() + + createTestTempFile(t, contextDir, builder.DefaultDockerfileName, "", 0777) + + readAndCheckDockerfile(t, "emptyDockerfile", contextDir, "", "the Dockerfile (Dockerfile) cannot be empty") +} + +func TestSymlinkDockerfile(t *testing.T) { + contextDir, cleanup := createTestTempDir(t, "", "builder-dockerfile-test") + defer cleanup() + + createTestSymlink(t, contextDir, builder.DefaultDockerfileName, "/etc/passwd") + + // The reason the error is "Cannot locate specified Dockerfile" is because + // in the builder, the symlink is resolved within the context, therefore + // Dockerfile -> /etc/passwd becomes etc/passwd from the context which is + // a nonexistent file. + expectedError := fmt.Sprintf("Cannot locate specified Dockerfile: %s", builder.DefaultDockerfileName) + + readAndCheckDockerfile(t, "symlinkDockerfile", contextDir, builder.DefaultDockerfileName, expectedError) +} + +func TestDockerfileOutsideTheBuildContext(t *testing.T) { + contextDir, cleanup := createTestTempDir(t, "", "builder-dockerfile-test") + defer cleanup() + + expectedError := "Forbidden path outside the build context: ../../Dockerfile ()" + + readAndCheckDockerfile(t, "DockerfileOutsideTheBuildContext", contextDir, "../../Dockerfile", expectedError) +} + +func TestNonExistingDockerfile(t *testing.T) { + contextDir, cleanup := createTestTempDir(t, "", "builder-dockerfile-test") + defer cleanup() + + expectedError := "Cannot locate specified Dockerfile: Dockerfile" + + readAndCheckDockerfile(t, "NonExistingDockerfile", contextDir, "Dockerfile", expectedError) +} + +func readAndCheckDockerfile(t *testing.T, testName, contextDir, dockerfilePath, expectedError string) { + tarStream, err := archive.Tar(contextDir, archive.Uncompressed) + require.NoError(t, err) + + defer func() { + if err = tarStream.Close(); err != nil { + t.Fatalf("Error when closing tar stream: %s", err) + } + }() + + if dockerfilePath == "" { // handled in BuildWithContext + dockerfilePath = builder.DefaultDockerfileName + } + + config := backend.BuildConfig{ + Options: &types.ImageBuildOptions{Dockerfile: dockerfilePath}, + Source: tarStream, + } + _, _, err = remotecontext.Detect(config) + assert.EqualError(t, err, expectedError) +} + +func TestCopyRunConfig(t *testing.T) { + defaultEnv := []string{"foo=1"} + defaultCmd := []string{"old"} + + var testcases = []struct { + doc string + modifiers []runConfigModifier + expected *container.Config + }{ + { + doc: "Set the command", + modifiers: []runConfigModifier{withCmd([]string{"new"})}, + expected: &container.Config{ + Cmd: []string{"new"}, + Env: defaultEnv, + }, + }, + { + doc: "Set the command to a comment", + modifiers: []runConfigModifier{withCmdComment("comment", runtime.GOOS)}, + expected: &container.Config{ + Cmd: append(defaultShellForPlatform(runtime.GOOS), "#(nop) ", "comment"), + Env: defaultEnv, + }, + }, + { + doc: "Set the command and env", + modifiers: []runConfigModifier{ + withCmd([]string{"new"}), + withEnv([]string{"one", "two"}), + }, + expected: &container.Config{ + Cmd: []string{"new"}, + Env: []string{"one", "two"}, + }, + }, + } + + for _, testcase := range testcases { + runConfig := &container.Config{ + Cmd: defaultCmd, + Env: defaultEnv, + } + runConfigCopy := copyRunConfig(runConfig, testcase.modifiers...) + assert.Equal(t, testcase.expected, runConfigCopy, testcase.doc) + // Assert the original was not modified + assert.NotEqual(t, runConfig, runConfigCopy, testcase.doc) + } + +} diff --git a/vendor/github.com/moby/moby/builder/dockerfile/internals_unix.go b/vendor/github.com/moby/moby/builder/dockerfile/internals_unix.go new file mode 100644 index 000000000..f4784e1cc --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/internals_unix.go @@ -0,0 +1,42 @@ +// +build !windows + +package dockerfile + +import ( + "os" + "path/filepath" + "strings" + + "github.com/docker/docker/pkg/system" +) + +// normaliseDest normalises the destination of a COPY/ADD command in a +// platform semantically consistent way. +func normaliseDest(workingDir, requested string) (string, error) { + dest := filepath.FromSlash(requested) + endsInSlash := strings.HasSuffix(requested, string(os.PathSeparator)) + if !system.IsAbs(requested) { + dest = filepath.Join(string(os.PathSeparator), filepath.FromSlash(workingDir), dest) + // Make sure we preserve any trailing slash + if endsInSlash { + dest += string(os.PathSeparator) + } + } + return dest, nil +} + +func containsWildcards(name string) bool { + for i := 0; i < len(name); i++ { + ch := name[i] + if ch == '\\' { + i++ + } else if ch == '*' || ch == '?' || ch == '[' { + return true + } + } + return false +} + +func validateCopySourcePath(imageSource *imageMount, origPath string) error { + return nil +} diff --git a/vendor/github.com/moby/moby/builder/dockerfile/internals_windows.go b/vendor/github.com/moby/moby/builder/dockerfile/internals_windows.go new file mode 100644 index 000000000..bb3285925 --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/internals_windows.go @@ -0,0 +1,95 @@ +package dockerfile + +import ( + "fmt" + "os" + "path/filepath" + "strings" + + "github.com/docker/docker/pkg/system" + "github.com/pkg/errors" +) + +// normaliseDest normalises the destination of a COPY/ADD command in a +// platform semantically consistent way. +func normaliseDest(workingDir, requested string) (string, error) { + dest := filepath.FromSlash(requested) + endsInSlash := strings.HasSuffix(dest, string(os.PathSeparator)) + + // We are guaranteed that the working directory is already consistent, + // However, Windows also has, for now, the limitation that ADD/COPY can + // only be done to the system drive, not any drives that might be present + // as a result of a bind mount. + // + // So... if the path requested is Linux-style absolute (/foo or \\foo), + // we assume it is the system drive. If it is a Windows-style absolute + // (DRIVE:\\foo), error if DRIVE is not C. And finally, ensure we + // strip any configured working directories drive letter so that it + // can be subsequently legitimately converted to a Windows volume-style + // pathname. + + // Not a typo - filepath.IsAbs, not system.IsAbs on this next check as + // we only want to validate where the DriveColon part has been supplied. + if filepath.IsAbs(dest) { + if strings.ToUpper(string(dest[0])) != "C" { + return "", fmt.Errorf("Windows does not support destinations not on the system drive (C:)") + } + dest = dest[2:] // Strip the drive letter + } + + // Cannot handle relative where WorkingDir is not the system drive. + if len(workingDir) > 0 { + if ((len(workingDir) > 1) && !system.IsAbs(workingDir[2:])) || (len(workingDir) == 1) { + return "", fmt.Errorf("Current WorkingDir %s is not platform consistent", workingDir) + } + if !system.IsAbs(dest) { + if string(workingDir[0]) != "C" { + return "", fmt.Errorf("Windows does not support relative paths when WORKDIR is not the system drive") + } + dest = filepath.Join(string(os.PathSeparator), workingDir[2:], dest) + // Make sure we preserve any trailing slash + if endsInSlash { + dest += string(os.PathSeparator) + } + } + } + return dest, nil +} + +func containsWildcards(name string) bool { + for i := 0; i < len(name); i++ { + ch := name[i] + if ch == '*' || ch == '?' || ch == '[' { + return true + } + } + return false +} + +var pathBlacklist = map[string]bool{ + "c:\\": true, + "c:\\windows": true, +} + +func validateCopySourcePath(imageSource *imageMount, origPath string) error { + // validate windows paths from other images + if imageSource == nil { + return nil + } + origPath = filepath.FromSlash(origPath) + p := strings.ToLower(filepath.Clean(origPath)) + if !filepath.IsAbs(p) { + if filepath.VolumeName(p) != "" { + if p[len(p)-2:] == ":." { // case where clean returns weird c:. paths + p = p[:len(p)-1] + } + p += "\\" + } else { + p = filepath.Join("c:\\", p) + } + } + if _, blacklisted := pathBlacklist[p]; blacklisted { + return errors.New("copy from c:\\ or c:\\windows is not allowed on windows") + } + return nil +} diff --git a/vendor/github.com/moby/moby/builder/dockerfile/internals_windows_test.go b/vendor/github.com/moby/moby/builder/dockerfile/internals_windows_test.go new file mode 100644 index 000000000..b4c8d4b3c --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/internals_windows_test.go @@ -0,0 +1,53 @@ +// +build windows + +package dockerfile + +import ( + "fmt" + "testing" + + "github.com/docker/docker/pkg/testutil" + "github.com/stretchr/testify/assert" +) + +func TestNormaliseDest(t *testing.T) { + tests := []struct{ current, requested, expected, etext string }{ + {``, `D:\`, ``, `Windows does not support destinations not on the system drive (C:)`}, + {``, `e:/`, ``, `Windows does not support destinations not on the system drive (C:)`}, + {`invalid`, `./c1`, ``, `Current WorkingDir invalid is not platform consistent`}, + {`C:`, ``, ``, `Current WorkingDir C: is not platform consistent`}, + {`C`, ``, ``, `Current WorkingDir C is not platform consistent`}, + {`D:\`, `.`, ``, "Windows does not support relative paths when WORKDIR is not the system drive"}, + {``, `D`, `D`, ``}, + {``, `./a1`, `.\a1`, ``}, + {``, `.\b1`, `.\b1`, ``}, + {``, `/`, `\`, ``}, + {``, `\`, `\`, ``}, + {``, `c:/`, `\`, ``}, + {``, `c:\`, `\`, ``}, + {``, `.`, `.`, ``}, + {`C:\wdd`, `./a1`, `\wdd\a1`, ``}, + {`C:\wde`, `.\b1`, `\wde\b1`, ``}, + {`C:\wdf`, `/`, `\`, ``}, + {`C:\wdg`, `\`, `\`, ``}, + {`C:\wdh`, `c:/`, `\`, ``}, + {`C:\wdi`, `c:\`, `\`, ``}, + {`C:\wdj`, `.`, `\wdj`, ``}, + {`C:\wdk`, `foo/bar`, `\wdk\foo\bar`, ``}, + {`C:\wdl`, `foo\bar`, `\wdl\foo\bar`, ``}, + {`C:\wdm`, `foo/bar/`, `\wdm\foo\bar\`, ``}, + {`C:\wdn`, `foo\bar/`, `\wdn\foo\bar\`, ``}, + } + for _, testcase := range tests { + msg := fmt.Sprintf("Input: %s, %s", testcase.current, testcase.requested) + actual, err := normaliseDest(testcase.current, testcase.requested) + if testcase.etext == "" { + if !assert.NoError(t, err, msg) { + continue + } + assert.Equal(t, testcase.expected, actual, msg) + } else { + testutil.ErrorContains(t, err, testcase.etext) + } + } +} diff --git a/vendor/github.com/moby/moby/builder/dockerfile/metrics.go b/vendor/github.com/moby/moby/builder/dockerfile/metrics.go new file mode 100644 index 000000000..5aa953aa7 --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/metrics.go @@ -0,0 +1,44 @@ +package dockerfile + +import ( + "github.com/docker/go-metrics" +) + +var ( + buildsTriggered metrics.Counter + buildsFailed metrics.LabeledCounter +) + +// Build metrics prometheus messages, these values must be initialized before +// using them. See the example below in the "builds_failed" metric definition. +const ( + metricsDockerfileSyntaxError = "dockerfile_syntax_error" + metricsDockerfileEmptyError = "dockerfile_empty_error" + metricsCommandNotSupportedError = "command_not_supported_error" + metricsErrorProcessingCommandsError = "error_processing_commands_error" + metricsBuildTargetNotReachableError = "build_target_not_reachable_error" + metricsMissingOnbuildArgumentsError = "missing_onbuild_arguments_error" + metricsUnknownInstructionError = "unknown_instruction_error" + metricsBuildCanceled = "build_canceled" +) + +func init() { + buildMetrics := metrics.NewNamespace("builder", "", nil) + + buildsTriggered = buildMetrics.NewCounter("builds_triggered", "Number of triggered image builds") + buildsFailed = buildMetrics.NewLabeledCounter("builds_failed", "Number of failed image builds", "reason") + for _, r := range []string{ + metricsDockerfileSyntaxError, + metricsDockerfileEmptyError, + metricsCommandNotSupportedError, + metricsErrorProcessingCommandsError, + metricsBuildTargetNotReachableError, + metricsMissingOnbuildArgumentsError, + metricsUnknownInstructionError, + metricsBuildCanceled, + } { + buildsFailed.WithValues(r) + } + + metrics.Register(buildMetrics) +} diff --git a/vendor/github.com/moby/moby/builder/dockerfile/mockbackend_test.go b/vendor/github.com/moby/moby/builder/dockerfile/mockbackend_test.go new file mode 100644 index 000000000..adc22762e --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/mockbackend_test.go @@ -0,0 +1,130 @@ +package dockerfile + +import ( + "encoding/json" + "io" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/builder" + containerpkg "github.com/docker/docker/container" + "github.com/docker/docker/layer" + "golang.org/x/net/context" +) + +// MockBackend implements the builder.Backend interface for unit testing +type MockBackend struct { + containerCreateFunc func(config types.ContainerCreateConfig) (container.ContainerCreateCreatedBody, error) + commitFunc func(string, *backend.ContainerCommitConfig) (string, error) + getImageFunc func(string) (builder.Image, builder.ReleaseableLayer, error) + makeImageCacheFunc func(cacheFrom []string, platform string) builder.ImageCache +} + +func (m *MockBackend) ContainerAttachRaw(cID string, stdin io.ReadCloser, stdout, stderr io.Writer, stream bool, attached chan struct{}) error { + return nil +} + +func (m *MockBackend) ContainerCreate(config types.ContainerCreateConfig) (container.ContainerCreateCreatedBody, error) { + if m.containerCreateFunc != nil { + return m.containerCreateFunc(config) + } + return container.ContainerCreateCreatedBody{}, nil +} + +func (m *MockBackend) ContainerRm(name string, config *types.ContainerRmConfig) error { + return nil +} + +func (m *MockBackend) Commit(cID string, cfg *backend.ContainerCommitConfig) (string, error) { + if m.commitFunc != nil { + return m.commitFunc(cID, cfg) + } + return "", nil +} + +func (m *MockBackend) ContainerKill(containerID string, sig uint64) error { + return nil +} + +func (m *MockBackend) ContainerStart(containerID string, hostConfig *container.HostConfig, checkpoint string, checkpointDir string) error { + return nil +} + +func (m *MockBackend) ContainerWait(ctx context.Context, containerID string, condition containerpkg.WaitCondition) (<-chan containerpkg.StateStatus, error) { + return nil, nil +} + +func (m *MockBackend) ContainerCreateWorkdir(containerID string) error { + return nil +} + +func (m *MockBackend) CopyOnBuild(containerID string, destPath string, srcRoot string, srcPath string, decompress bool) error { + return nil +} + +func (m *MockBackend) GetImageAndReleasableLayer(ctx context.Context, refOrID string, opts backend.GetImageAndLayerOptions) (builder.Image, builder.ReleaseableLayer, error) { + if m.getImageFunc != nil { + return m.getImageFunc(refOrID) + } + + return &mockImage{id: "theid"}, &mockLayer{}, nil +} + +func (m *MockBackend) MakeImageCache(cacheFrom []string, platform string) builder.ImageCache { + if m.makeImageCacheFunc != nil { + return m.makeImageCacheFunc(cacheFrom, platform) + } + return nil +} + +func (m *MockBackend) CreateImage(config []byte, parent string, platform string) (builder.Image, error) { + return nil, nil +} + +type mockImage struct { + id string + config *container.Config +} + +func (i *mockImage) ImageID() string { + return i.id +} + +func (i *mockImage) RunConfig() *container.Config { + return i.config +} + +func (i *mockImage) MarshalJSON() ([]byte, error) { + type rawImage mockImage + return json.Marshal(rawImage(*i)) +} + +type mockImageCache struct { + getCacheFunc func(parentID string, cfg *container.Config) (string, error) +} + +func (mic *mockImageCache) GetCache(parentID string, cfg *container.Config) (string, error) { + if mic.getCacheFunc != nil { + return mic.getCacheFunc(parentID, cfg) + } + return "", nil +} + +type mockLayer struct{} + +func (l *mockLayer) Release() error { + return nil +} + +func (l *mockLayer) Mount() (string, error) { + return "mountPath", nil +} + +func (l *mockLayer) Commit(string) (builder.ReleaseableLayer, error) { + return nil, nil +} + +func (l *mockLayer) DiffID() layer.DiffID { + return layer.DiffID("abcdef") +} diff --git a/vendor/github.com/moby/moby/builder/dockerfile/parser/dumper/main.go b/vendor/github.com/moby/moby/builder/dockerfile/parser/dumper/main.go new file mode 100644 index 000000000..ea6205073 --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/parser/dumper/main.go @@ -0,0 +1,32 @@ +package main + +import ( + "fmt" + "os" + + "github.com/docker/docker/builder/dockerfile/parser" +) + +func main() { + var f *os.File + var err error + + if len(os.Args) < 2 { + fmt.Println("please supply filename(s)") + os.Exit(1) + } + + for _, fn := range os.Args[1:] { + f, err = os.Open(fn) + if err != nil { + panic(err) + } + defer f.Close() + + result, err := parser.Parse(f) + if err != nil { + panic(err) + } + fmt.Println(result.AST.Dump()) + } +} diff --git a/vendor/github.com/moby/moby/builder/dockerfile/parser/json_test.go b/vendor/github.com/moby/moby/builder/dockerfile/parser/json_test.go new file mode 100644 index 000000000..d4489191d --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/parser/json_test.go @@ -0,0 +1,59 @@ +package parser + +import ( + "testing" +) + +var invalidJSONArraysOfStrings = []string{ + `["a",42,"b"]`, + `["a",123.456,"b"]`, + `["a",{},"b"]`, + `["a",{"c": "d"},"b"]`, + `["a",["c"],"b"]`, + `["a",true,"b"]`, + `["a",false,"b"]`, + `["a",null,"b"]`, +} + +var validJSONArraysOfStrings = map[string][]string{ + `[]`: {}, + `[""]`: {""}, + `["a"]`: {"a"}, + `["a","b"]`: {"a", "b"}, + `[ "a", "b" ]`: {"a", "b"}, + `[ "a", "b" ]`: {"a", "b"}, + ` [ "a", "b" ] `: {"a", "b"}, + `["abc 123", "♥", "☃", "\" \\ \/ \b \f \n \r \t \u0000"]`: {"abc 123", "♥", "☃", "\" \\ / \b \f \n \r \t \u0000"}, +} + +func TestJSONArraysOfStrings(t *testing.T) { + for json, expected := range validJSONArraysOfStrings { + d := NewDefaultDirective() + + if node, _, err := parseJSON(json, d); err != nil { + t.Fatalf("%q should be a valid JSON array of strings, but wasn't! (err: %q)", json, err) + } else { + i := 0 + for node != nil { + if i >= len(expected) { + t.Fatalf("expected result is shorter than parsed result (%d vs %d+) in %q", len(expected), i+1, json) + } + if node.Value != expected[i] { + t.Fatalf("expected %q (not %q) in %q at pos %d", expected[i], node.Value, json, i) + } + node = node.Next + i++ + } + if i != len(expected) { + t.Fatalf("expected result is longer than parsed result (%d vs %d) in %q", len(expected), i+1, json) + } + } + } + for _, json := range invalidJSONArraysOfStrings { + d := NewDefaultDirective() + + if _, _, err := parseJSON(json, d); err != errDockerfileNotStringArray { + t.Fatalf("%q should be an invalid JSON array of strings, but wasn't!", json) + } + } +} diff --git a/vendor/github.com/moby/moby/builder/dockerfile/parser/line_parsers.go b/vendor/github.com/moby/moby/builder/dockerfile/parser/line_parsers.go new file mode 100644 index 000000000..d0e182e8e --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/parser/line_parsers.go @@ -0,0 +1,399 @@ +package parser + +// line parsers are dispatch calls that parse a single unit of text into a +// Node object which contains the whole statement. Dockerfiles have varied +// (but not usually unique, see ONBUILD for a unique example) parsing rules +// per-command, and these unify the processing in a way that makes it +// manageable. + +import ( + "encoding/json" + "errors" + "fmt" + "sort" + "strings" + "unicode" + "unicode/utf8" + + "github.com/docker/docker/builder/dockerfile/command" +) + +var ( + errDockerfileNotStringArray = errors.New("When using JSON array syntax, arrays must be comprised of strings only.") +) + +const ( + commandLabel = "LABEL" +) + +// ignore the current argument. This will still leave a command parsed, but +// will not incorporate the arguments into the ast. +func parseIgnore(rest string, d *Directive) (*Node, map[string]bool, error) { + return &Node{}, nil, nil +} + +// used for onbuild. Could potentially be used for anything that represents a +// statement with sub-statements. +// +// ONBUILD RUN foo bar -> (onbuild (run foo bar)) +// +func parseSubCommand(rest string, d *Directive) (*Node, map[string]bool, error) { + if rest == "" { + return nil, nil, nil + } + + child, err := newNodeFromLine(rest, d) + if err != nil { + return nil, nil, err + } + + return &Node{Children: []*Node{child}}, nil, nil +} + +// helper to parse words (i.e space delimited or quoted strings) in a statement. +// The quotes are preserved as part of this function and they are stripped later +// as part of processWords(). +func parseWords(rest string, d *Directive) []string { + const ( + inSpaces = iota // looking for start of a word + inWord + inQuote + ) + + words := []string{} + phase := inSpaces + word := "" + quote := '\000' + blankOK := false + var ch rune + var chWidth int + + for pos := 0; pos <= len(rest); pos += chWidth { + if pos != len(rest) { + ch, chWidth = utf8.DecodeRuneInString(rest[pos:]) + } + + if phase == inSpaces { // Looking for start of word + if pos == len(rest) { // end of input + break + } + if unicode.IsSpace(ch) { // skip spaces + continue + } + phase = inWord // found it, fall through + } + if (phase == inWord || phase == inQuote) && (pos == len(rest)) { + if blankOK || len(word) > 0 { + words = append(words, word) + } + break + } + if phase == inWord { + if unicode.IsSpace(ch) { + phase = inSpaces + if blankOK || len(word) > 0 { + words = append(words, word) + } + word = "" + blankOK = false + continue + } + if ch == '\'' || ch == '"' { + quote = ch + blankOK = true + phase = inQuote + } + if ch == d.escapeToken { + if pos+chWidth == len(rest) { + continue // just skip an escape token at end of line + } + // If we're not quoted and we see an escape token, then always just + // add the escape token plus the char to the word, even if the char + // is a quote. + word += string(ch) + pos += chWidth + ch, chWidth = utf8.DecodeRuneInString(rest[pos:]) + } + word += string(ch) + continue + } + if phase == inQuote { + if ch == quote { + phase = inWord + } + // The escape token is special except for ' quotes - can't escape anything for ' + if ch == d.escapeToken && quote != '\'' { + if pos+chWidth == len(rest) { + phase = inWord + continue // just skip the escape token at end + } + pos += chWidth + word += string(ch) + ch, chWidth = utf8.DecodeRuneInString(rest[pos:]) + } + word += string(ch) + } + } + + return words +} + +// parse environment like statements. Note that this does *not* handle +// variable interpolation, which will be handled in the evaluator. +func parseNameVal(rest string, key string, d *Directive) (*Node, error) { + // This is kind of tricky because we need to support the old + // variant: KEY name value + // as well as the new one: KEY name=value ... + // The trigger to know which one is being used will be whether we hit + // a space or = first. space ==> old, "=" ==> new + + words := parseWords(rest, d) + if len(words) == 0 { + return nil, nil + } + + // Old format (KEY name value) + if !strings.Contains(words[0], "=") { + parts := tokenWhitespace.Split(rest, 2) + if len(parts) < 2 { + return nil, fmt.Errorf(key + " must have two arguments") + } + return newKeyValueNode(parts[0], parts[1]), nil + } + + var rootNode *Node + var prevNode *Node + for _, word := range words { + if !strings.Contains(word, "=") { + return nil, fmt.Errorf("Syntax error - can't find = in %q. Must be of the form: name=value", word) + } + + parts := strings.SplitN(word, "=", 2) + node := newKeyValueNode(parts[0], parts[1]) + rootNode, prevNode = appendKeyValueNode(node, rootNode, prevNode) + } + + return rootNode, nil +} + +func newKeyValueNode(key, value string) *Node { + return &Node{ + Value: key, + Next: &Node{Value: value}, + } +} + +func appendKeyValueNode(node, rootNode, prevNode *Node) (*Node, *Node) { + if rootNode == nil { + rootNode = node + } + if prevNode != nil { + prevNode.Next = node + } + + prevNode = node.Next + return rootNode, prevNode +} + +func parseEnv(rest string, d *Directive) (*Node, map[string]bool, error) { + node, err := parseNameVal(rest, "ENV", d) + return node, nil, err +} + +func parseLabel(rest string, d *Directive) (*Node, map[string]bool, error) { + node, err := parseNameVal(rest, commandLabel, d) + return node, nil, err +} + +// NodeFromLabels returns a Node for the injected labels +func NodeFromLabels(labels map[string]string) *Node { + keys := []string{} + for key := range labels { + keys = append(keys, key) + } + // Sort the label to have a repeatable order + sort.Strings(keys) + + labelPairs := []string{} + var rootNode *Node + var prevNode *Node + for _, key := range keys { + value := labels[key] + labelPairs = append(labelPairs, fmt.Sprintf("%q='%s'", key, value)) + // Value must be single quoted to prevent env variable expansion + // See https://github.com/docker/docker/issues/26027 + node := newKeyValueNode(key, "'"+value+"'") + rootNode, prevNode = appendKeyValueNode(node, rootNode, prevNode) + } + + return &Node{ + Value: command.Label, + Original: commandLabel + " " + strings.Join(labelPairs, " "), + Next: rootNode, + } +} + +// parses a statement containing one or more keyword definition(s) and/or +// value assignments, like `name1 name2= name3="" name4=value`. +// Note that this is a stricter format than the old format of assignment, +// allowed by parseNameVal(), in a way that this only allows assignment of the +// form `keyword=[]` like `name2=`, `name3=""`, and `name4=value` above. +// In addition, a keyword definition alone is of the form `keyword` like `name1` +// above. And the assignments `name2=` and `name3=""` are equivalent and +// assign an empty value to the respective keywords. +func parseNameOrNameVal(rest string, d *Directive) (*Node, map[string]bool, error) { + words := parseWords(rest, d) + if len(words) == 0 { + return nil, nil, nil + } + + var ( + rootnode *Node + prevNode *Node + ) + for i, word := range words { + node := &Node{} + node.Value = word + if i == 0 { + rootnode = node + } else { + prevNode.Next = node + } + prevNode = node + } + + return rootnode, nil, nil +} + +// parses a whitespace-delimited set of arguments. The result is effectively a +// linked list of string arguments. +func parseStringsWhitespaceDelimited(rest string, d *Directive) (*Node, map[string]bool, error) { + if rest == "" { + return nil, nil, nil + } + + node := &Node{} + rootnode := node + prevnode := node + for _, str := range tokenWhitespace.Split(rest, -1) { // use regexp + prevnode = node + node.Value = str + node.Next = &Node{} + node = node.Next + } + + // XXX to get around regexp.Split *always* providing an empty string at the + // end due to how our loop is constructed, nil out the last node in the + // chain. + prevnode.Next = nil + + return rootnode, nil, nil +} + +// parseString just wraps the string in quotes and returns a working node. +func parseString(rest string, d *Directive) (*Node, map[string]bool, error) { + if rest == "" { + return nil, nil, nil + } + n := &Node{} + n.Value = rest + return n, nil, nil +} + +// parseJSON converts JSON arrays to an AST. +func parseJSON(rest string, d *Directive) (*Node, map[string]bool, error) { + rest = strings.TrimLeftFunc(rest, unicode.IsSpace) + if !strings.HasPrefix(rest, "[") { + return nil, nil, fmt.Errorf(`Error parsing "%s" as a JSON array`, rest) + } + + var myJSON []interface{} + if err := json.NewDecoder(strings.NewReader(rest)).Decode(&myJSON); err != nil { + return nil, nil, err + } + + var top, prev *Node + for _, str := range myJSON { + s, ok := str.(string) + if !ok { + return nil, nil, errDockerfileNotStringArray + } + + node := &Node{Value: s} + if prev == nil { + top = node + } else { + prev.Next = node + } + prev = node + } + + return top, map[string]bool{"json": true}, nil +} + +// parseMaybeJSON determines if the argument appears to be a JSON array. If +// so, passes to parseJSON; if not, quotes the result and returns a single +// node. +func parseMaybeJSON(rest string, d *Directive) (*Node, map[string]bool, error) { + if rest == "" { + return nil, nil, nil + } + + node, attrs, err := parseJSON(rest, d) + + if err == nil { + return node, attrs, nil + } + if err == errDockerfileNotStringArray { + return nil, nil, err + } + + node = &Node{} + node.Value = rest + return node, nil, nil +} + +// parseMaybeJSONToList determines if the argument appears to be a JSON array. If +// so, passes to parseJSON; if not, attempts to parse it as a whitespace +// delimited string. +func parseMaybeJSONToList(rest string, d *Directive) (*Node, map[string]bool, error) { + node, attrs, err := parseJSON(rest, d) + + if err == nil { + return node, attrs, nil + } + if err == errDockerfileNotStringArray { + return nil, nil, err + } + + return parseStringsWhitespaceDelimited(rest, d) +} + +// The HEALTHCHECK command is like parseMaybeJSON, but has an extra type argument. +func parseHealthConfig(rest string, d *Directive) (*Node, map[string]bool, error) { + // Find end of first argument + var sep int + for ; sep < len(rest); sep++ { + if unicode.IsSpace(rune(rest[sep])) { + break + } + } + next := sep + for ; next < len(rest); next++ { + if !unicode.IsSpace(rune(rest[next])) { + break + } + } + + if sep == 0 { + return nil, nil, nil + } + + typ := rest[:sep] + cmd, attrs, err := parseMaybeJSON(rest[next:], d) + if err != nil { + return nil, nil, err + } + + return &Node{Value: typ, Next: cmd}, attrs, err +} diff --git a/vendor/github.com/moby/moby/builder/dockerfile/parser/line_parsers_test.go b/vendor/github.com/moby/moby/builder/dockerfile/parser/line_parsers_test.go new file mode 100644 index 000000000..cf0b21bb5 --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/parser/line_parsers_test.go @@ -0,0 +1,74 @@ +package parser + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestParseNameValOldFormat(t *testing.T) { + directive := Directive{} + node, err := parseNameVal("foo bar", "LABEL", &directive) + assert.NoError(t, err) + + expected := &Node{ + Value: "foo", + Next: &Node{Value: "bar"}, + } + assert.Equal(t, expected, node) +} + +func TestParseNameValNewFormat(t *testing.T) { + directive := Directive{} + node, err := parseNameVal("foo=bar thing=star", "LABEL", &directive) + assert.NoError(t, err) + + expected := &Node{ + Value: "foo", + Next: &Node{ + Value: "bar", + Next: &Node{ + Value: "thing", + Next: &Node{ + Value: "star", + }, + }, + }, + } + assert.Equal(t, expected, node) +} + +func TestNodeFromLabels(t *testing.T) { + labels := map[string]string{ + "foo": "bar", + "weird": "first' second", + } + expected := &Node{ + Value: "label", + Original: `LABEL "foo"='bar' "weird"='first' second'`, + Next: &Node{ + Value: "foo", + Next: &Node{ + Value: "'bar'", + Next: &Node{ + Value: "weird", + Next: &Node{ + Value: "'first' second'", + }, + }, + }, + }, + } + + node := NodeFromLabels(labels) + assert.Equal(t, expected, node) + +} + +func TestParseNameValWithoutVal(t *testing.T) { + directive := Directive{} + // In Config.Env, a variable without `=` is removed from the environment. (#31634) + // However, in Dockerfile, we don't allow "unsetting" an environment variable. (#11922) + _, err := parseNameVal("foo", "ENV", &directive) + assert.Error(t, err, "ENV must have two arguments") +} diff --git a/vendor/github.com/moby/moby/builder/dockerfile/parser/parser.go b/vendor/github.com/moby/moby/builder/dockerfile/parser/parser.go new file mode 100644 index 000000000..7f07ff215 --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/parser/parser.go @@ -0,0 +1,355 @@ +// Package parser implements a parser and parse tree dumper for Dockerfiles. +package parser + +import ( + "bufio" + "bytes" + "fmt" + "io" + "regexp" + "runtime" + "strconv" + "strings" + "unicode" + + "github.com/docker/docker/builder/dockerfile/command" + "github.com/docker/docker/pkg/system" + "github.com/pkg/errors" +) + +// Node is a structure used to represent a parse tree. +// +// In the node there are three fields, Value, Next, and Children. Value is the +// current token's string value. Next is always the next non-child token, and +// children contains all the children. Here's an example: +// +// (value next (child child-next child-next-next) next-next) +// +// This data structure is frankly pretty lousy for handling complex languages, +// but lucky for us the Dockerfile isn't very complicated. This structure +// works a little more effectively than a "proper" parse tree for our needs. +// +type Node struct { + Value string // actual content + Next *Node // the next item in the current sexp + Children []*Node // the children of this sexp + Attributes map[string]bool // special attributes for this node + Original string // original line used before parsing + Flags []string // only top Node should have this set + StartLine int // the line in the original dockerfile where the node begins + endLine int // the line in the original dockerfile where the node ends +} + +// Dump dumps the AST defined by `node` as a list of sexps. +// Returns a string suitable for printing. +func (node *Node) Dump() string { + str := "" + str += node.Value + + if len(node.Flags) > 0 { + str += fmt.Sprintf(" %q", node.Flags) + } + + for _, n := range node.Children { + str += "(" + n.Dump() + ")\n" + } + + for n := node.Next; n != nil; n = n.Next { + if len(n.Children) > 0 { + str += " " + n.Dump() + } else { + str += " " + strconv.Quote(n.Value) + } + } + + return strings.TrimSpace(str) +} + +func (node *Node) lines(start, end int) { + node.StartLine = start + node.endLine = end +} + +// AddChild adds a new child node, and updates line information +func (node *Node) AddChild(child *Node, startLine, endLine int) { + child.lines(startLine, endLine) + if node.StartLine < 0 { + node.StartLine = startLine + } + node.endLine = endLine + node.Children = append(node.Children, child) +} + +var ( + dispatch map[string]func(string, *Directive) (*Node, map[string]bool, error) + tokenWhitespace = regexp.MustCompile(`[\t\v\f\r ]+`) + tokenEscapeCommand = regexp.MustCompile(`^#[ \t]*escape[ \t]*=[ \t]*(?P.).*$`) + tokenPlatformCommand = regexp.MustCompile(`^#[ \t]*platform[ \t]*=[ \t]*(?P.*)$`) + tokenComment = regexp.MustCompile(`^#.*$`) +) + +// DefaultEscapeToken is the default escape token +const DefaultEscapeToken = '\\' + +// defaultPlatformToken is the platform assumed for the build if not explicitly provided +var defaultPlatformToken = runtime.GOOS + +// Directive is the structure used during a build run to hold the state of +// parsing directives. +type Directive struct { + escapeToken rune // Current escape token + platformToken string // Current platform token + lineContinuationRegex *regexp.Regexp // Current line continuation regex + processingComplete bool // Whether we are done looking for directives + escapeSeen bool // Whether the escape directive has been seen + platformSeen bool // Whether the platform directive has been seen +} + +// setEscapeToken sets the default token for escaping characters in a Dockerfile. +func (d *Directive) setEscapeToken(s string) error { + if s != "`" && s != "\\" { + return fmt.Errorf("invalid ESCAPE '%s'. Must be ` or \\", s) + } + d.escapeToken = rune(s[0]) + d.lineContinuationRegex = regexp.MustCompile(`\` + s + `[ \t]*$`) + return nil +} + +// setPlatformToken sets the default platform for pulling images in a Dockerfile. +func (d *Directive) setPlatformToken(s string) error { + s = strings.ToLower(s) + valid := []string{runtime.GOOS} + if system.LCOWSupported() { + valid = append(valid, "linux") + } + for _, item := range valid { + if s == item { + d.platformToken = s + return nil + } + } + return fmt.Errorf("invalid PLATFORM '%s'. Must be one of %v", s, valid) +} + +// possibleParserDirective looks for one or more parser directives '# escapeToken=' and +// '# platform='. Parser directives must precede any builder instruction +// or other comments, and cannot be repeated. +func (d *Directive) possibleParserDirective(line string) error { + if d.processingComplete { + return nil + } + + tecMatch := tokenEscapeCommand.FindStringSubmatch(strings.ToLower(line)) + if len(tecMatch) != 0 { + for i, n := range tokenEscapeCommand.SubexpNames() { + if n == "escapechar" { + if d.escapeSeen == true { + return errors.New("only one escape parser directive can be used") + } + d.escapeSeen = true + return d.setEscapeToken(tecMatch[i]) + } + } + } + + // TODO @jhowardmsft LCOW Support: Eventually this check can be removed, + // but only recognise a platform token if running in LCOW mode. + if system.LCOWSupported() { + tpcMatch := tokenPlatformCommand.FindStringSubmatch(strings.ToLower(line)) + if len(tpcMatch) != 0 { + for i, n := range tokenPlatformCommand.SubexpNames() { + if n == "platform" { + if d.platformSeen == true { + return errors.New("only one platform parser directive can be used") + } + d.platformSeen = true + return d.setPlatformToken(tpcMatch[i]) + } + } + } + } + + d.processingComplete = true + return nil +} + +// NewDefaultDirective returns a new Directive with the default escapeToken token +func NewDefaultDirective() *Directive { + directive := Directive{} + directive.setEscapeToken(string(DefaultEscapeToken)) + directive.setPlatformToken(defaultPlatformToken) + return &directive +} + +func init() { + // Dispatch Table. see line_parsers.go for the parse functions. + // The command is parsed and mapped to the line parser. The line parser + // receives the arguments but not the command, and returns an AST after + // reformulating the arguments according to the rules in the parser + // functions. Errors are propagated up by Parse() and the resulting AST can + // be incorporated directly into the existing AST as a next. + dispatch = map[string]func(string, *Directive) (*Node, map[string]bool, error){ + command.Add: parseMaybeJSONToList, + command.Arg: parseNameOrNameVal, + command.Cmd: parseMaybeJSON, + command.Copy: parseMaybeJSONToList, + command.Entrypoint: parseMaybeJSON, + command.Env: parseEnv, + command.Expose: parseStringsWhitespaceDelimited, + command.From: parseStringsWhitespaceDelimited, + command.Healthcheck: parseHealthConfig, + command.Label: parseLabel, + command.Maintainer: parseString, + command.Onbuild: parseSubCommand, + command.Run: parseMaybeJSON, + command.Shell: parseMaybeJSON, + command.StopSignal: parseString, + command.User: parseString, + command.Volume: parseMaybeJSONToList, + command.Workdir: parseString, + } +} + +// newNodeFromLine splits the line into parts, and dispatches to a function +// based on the command and command arguments. A Node is created from the +// result of the dispatch. +func newNodeFromLine(line string, directive *Directive) (*Node, error) { + cmd, flags, args, err := splitCommand(line) + if err != nil { + return nil, err + } + + fn := dispatch[cmd] + // Ignore invalid Dockerfile instructions + if fn == nil { + fn = parseIgnore + } + next, attrs, err := fn(args, directive) + if err != nil { + return nil, err + } + + return &Node{ + Value: cmd, + Original: line, + Flags: flags, + Next: next, + Attributes: attrs, + }, nil +} + +// Result is the result of parsing a Dockerfile +type Result struct { + AST *Node + EscapeToken rune + Platform string + Warnings []string +} + +// PrintWarnings to the writer +func (r *Result) PrintWarnings(out io.Writer) { + if len(r.Warnings) == 0 { + return + } + fmt.Fprintf(out, strings.Join(r.Warnings, "\n")+"\n") +} + +// Parse reads lines from a Reader, parses the lines into an AST and returns +// the AST and escape token +func Parse(rwc io.Reader) (*Result, error) { + d := NewDefaultDirective() + currentLine := 0 + root := &Node{StartLine: -1} + scanner := bufio.NewScanner(rwc) + warnings := []string{} + + var err error + for scanner.Scan() { + bytesRead := scanner.Bytes() + if currentLine == 0 { + // First line, strip the byte-order-marker if present + bytesRead = bytes.TrimPrefix(bytesRead, utf8bom) + } + bytesRead, err = processLine(d, bytesRead, true) + if err != nil { + return nil, err + } + currentLine++ + + startLine := currentLine + line, isEndOfLine := trimContinuationCharacter(string(bytesRead), d) + if isEndOfLine && line == "" { + continue + } + + var hasEmptyContinuationLine bool + for !isEndOfLine && scanner.Scan() { + bytesRead, err := processLine(d, scanner.Bytes(), false) + if err != nil { + return nil, err + } + currentLine++ + + if isEmptyContinuationLine(bytesRead) { + hasEmptyContinuationLine = true + continue + } + + continuationLine := string(bytesRead) + continuationLine, isEndOfLine = trimContinuationCharacter(continuationLine, d) + line += continuationLine + } + + if hasEmptyContinuationLine { + warning := "[WARNING]: Empty continuation line found in:\n " + line + warnings = append(warnings, warning) + } + + child, err := newNodeFromLine(line, d) + if err != nil { + return nil, err + } + root.AddChild(child, startLine, currentLine) + } + + if len(warnings) > 0 { + warnings = append(warnings, "[WARNING]: Empty continuation lines will become errors in a future release.") + } + return &Result{ + AST: root, + Warnings: warnings, + EscapeToken: d.escapeToken, + Platform: d.platformToken, + }, nil +} + +func trimComments(src []byte) []byte { + return tokenComment.ReplaceAll(src, []byte{}) +} + +func trimWhitespace(src []byte) []byte { + return bytes.TrimLeftFunc(src, unicode.IsSpace) +} + +func isEmptyContinuationLine(line []byte) bool { + return len(trimComments(trimWhitespace(line))) == 0 +} + +var utf8bom = []byte{0xEF, 0xBB, 0xBF} + +func trimContinuationCharacter(line string, d *Directive) (string, bool) { + if d.lineContinuationRegex.MatchString(line) { + line = d.lineContinuationRegex.ReplaceAllString(line, "") + return line, false + } + return line, true +} + +// TODO: remove stripLeftWhitespace after deprecation period. It seems silly +// to preserve whitespace on continuation lines. Why is that done? +func processLine(d *Directive, token []byte, stripLeftWhitespace bool) ([]byte, error) { + if stripLeftWhitespace { + token = trimWhitespace(token) + } + return trimComments(token), d.possibleParserDirective(string(token)) +} diff --git a/vendor/github.com/moby/moby/builder/dockerfile/parser/parser_test.go b/vendor/github.com/moby/moby/builder/dockerfile/parser/parser_test.go new file mode 100644 index 000000000..bb057ecab --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/parser/parser_test.go @@ -0,0 +1,154 @@ +package parser + +import ( + "bytes" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "runtime" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +const testDir = "testfiles" +const negativeTestDir = "testfiles-negative" +const testFileLineInfo = "testfile-line/Dockerfile" + +func getDirs(t *testing.T, dir string) []string { + f, err := os.Open(dir) + require.NoError(t, err) + defer f.Close() + + dirs, err := f.Readdirnames(0) + require.NoError(t, err) + return dirs +} + +func TestParseErrorCases(t *testing.T) { + for _, dir := range getDirs(t, negativeTestDir) { + dockerfile := filepath.Join(negativeTestDir, dir, "Dockerfile") + + df, err := os.Open(dockerfile) + require.NoError(t, err, dockerfile) + defer df.Close() + + _, err = Parse(df) + assert.Error(t, err, dockerfile) + } +} + +func TestParseCases(t *testing.T) { + for _, dir := range getDirs(t, testDir) { + dockerfile := filepath.Join(testDir, dir, "Dockerfile") + resultfile := filepath.Join(testDir, dir, "result") + + df, err := os.Open(dockerfile) + require.NoError(t, err, dockerfile) + defer df.Close() + + result, err := Parse(df) + require.NoError(t, err, dockerfile) + + content, err := ioutil.ReadFile(resultfile) + require.NoError(t, err, resultfile) + + if runtime.GOOS == "windows" { + // CRLF --> CR to match Unix behavior + content = bytes.Replace(content, []byte{'\x0d', '\x0a'}, []byte{'\x0a'}, -1) + } + assert.Equal(t, result.AST.Dump()+"\n", string(content), "In "+dockerfile) + } +} + +func TestParseWords(t *testing.T) { + tests := []map[string][]string{ + { + "input": {"foo"}, + "expect": {"foo"}, + }, + { + "input": {"foo bar"}, + "expect": {"foo", "bar"}, + }, + { + "input": {"foo\\ bar"}, + "expect": {"foo\\ bar"}, + }, + { + "input": {"foo=bar"}, + "expect": {"foo=bar"}, + }, + { + "input": {"foo bar 'abc xyz'"}, + "expect": {"foo", "bar", "'abc xyz'"}, + }, + { + "input": {`foo bar "abc xyz"`}, + "expect": {"foo", "bar", `"abc xyz"`}, + }, + { + "input": {"àöû"}, + "expect": {"àöû"}, + }, + { + "input": {`föo bàr "âbc xÿz"`}, + "expect": {"föo", "bàr", `"âbc xÿz"`}, + }, + } + + for _, test := range tests { + words := parseWords(test["input"][0], NewDefaultDirective()) + assert.Equal(t, test["expect"], words) + } +} + +func TestParseIncludesLineNumbers(t *testing.T) { + df, err := os.Open(testFileLineInfo) + require.NoError(t, err) + defer df.Close() + + result, err := Parse(df) + require.NoError(t, err) + + ast := result.AST + assert.Equal(t, 5, ast.StartLine) + assert.Equal(t, 31, ast.endLine) + assert.Len(t, ast.Children, 3) + expected := [][]int{ + {5, 5}, + {11, 12}, + {17, 31}, + } + for i, child := range ast.Children { + msg := fmt.Sprintf("Child %d", i) + assert.Equal(t, expected[i], []int{child.StartLine, child.endLine}, msg) + } +} + +func TestParseWarnsOnEmptyContinutationLine(t *testing.T) { + dockerfile := bytes.NewBufferString(` +FROM alpine:3.6 + +RUN something \ + + following \ + + more + +RUN another \ + + thing + `) + + result, err := Parse(dockerfile) + require.NoError(t, err) + warnings := result.Warnings + assert.Len(t, warnings, 3) + assert.Contains(t, warnings[0], "Empty continuation line found in") + assert.Contains(t, warnings[0], "RUN something following more") + assert.Contains(t, warnings[1], "RUN another thing") + assert.Contains(t, warnings[2], "will become errors in a future release") +} diff --git a/vendor/github.com/moby/moby/builder/dockerfile/parser/split_command.go b/vendor/github.com/moby/moby/builder/dockerfile/parser/split_command.go new file mode 100644 index 000000000..171f454f6 --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/parser/split_command.go @@ -0,0 +1,118 @@ +package parser + +import ( + "strings" + "unicode" +) + +// splitCommand takes a single line of text and parses out the cmd and args, +// which are used for dispatching to more exact parsing functions. +func splitCommand(line string) (string, []string, string, error) { + var args string + var flags []string + + // Make sure we get the same results irrespective of leading/trailing spaces + cmdline := tokenWhitespace.Split(strings.TrimSpace(line), 2) + cmd := strings.ToLower(cmdline[0]) + + if len(cmdline) == 2 { + var err error + args, flags, err = extractBuilderFlags(cmdline[1]) + if err != nil { + return "", nil, "", err + } + } + + return cmd, flags, strings.TrimSpace(args), nil +} + +func extractBuilderFlags(line string) (string, []string, error) { + // Parses the BuilderFlags and returns the remaining part of the line + + const ( + inSpaces = iota // looking for start of a word + inWord + inQuote + ) + + words := []string{} + phase := inSpaces + word := "" + quote := '\000' + blankOK := false + var ch rune + + for pos := 0; pos <= len(line); pos++ { + if pos != len(line) { + ch = rune(line[pos]) + } + + if phase == inSpaces { // Looking for start of word + if pos == len(line) { // end of input + break + } + if unicode.IsSpace(ch) { // skip spaces + continue + } + + // Only keep going if the next word starts with -- + if ch != '-' || pos+1 == len(line) || rune(line[pos+1]) != '-' { + return line[pos:], words, nil + } + + phase = inWord // found something with "--", fall through + } + if (phase == inWord || phase == inQuote) && (pos == len(line)) { + if word != "--" && (blankOK || len(word) > 0) { + words = append(words, word) + } + break + } + if phase == inWord { + if unicode.IsSpace(ch) { + phase = inSpaces + if word == "--" { + return line[pos:], words, nil + } + if blankOK || len(word) > 0 { + words = append(words, word) + } + word = "" + blankOK = false + continue + } + if ch == '\'' || ch == '"' { + quote = ch + blankOK = true + phase = inQuote + continue + } + if ch == '\\' { + if pos+1 == len(line) { + continue // just skip \ at end + } + pos++ + ch = rune(line[pos]) + } + word += string(ch) + continue + } + if phase == inQuote { + if ch == quote { + phase = inWord + continue + } + if ch == '\\' { + if pos+1 == len(line) { + phase = inWord + continue // just skip \ at end + } + pos++ + ch = rune(line[pos]) + } + word += string(ch) + } + } + + return "", words, nil +} diff --git a/vendor/github.com/moby/moby/builder/dockerfile/parser/testfile-line/Dockerfile b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfile-line/Dockerfile new file mode 100644 index 000000000..c7601c9f6 --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfile-line/Dockerfile @@ -0,0 +1,35 @@ +# ESCAPE=\ + + + +FROM brimstone/ubuntu:14.04 + + +# TORUN -v /var/run/docker.sock:/var/run/docker.sock + + +ENV GOPATH \ +/go + + + +# Install the packages we need, clean up after them and us +RUN apt-get update \ + && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.clean \ + + + && apt-get install -y --no-install-recommends git golang ca-certificates \ + && apt-get clean \ + && rm -rf /var/lib/apt/lists \ + + && go get -v github.com/brimstone/consuldock \ + && mv $GOPATH/bin/consuldock /usr/local/bin/consuldock \ + + && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.dirty \ + && apt-get remove --purge -y $(diff /tmp/dpkg.clean /tmp/dpkg.dirty | awk '/^>/ {print $2}') \ + && rm /tmp/dpkg.* \ + && rm -rf $GOPATH + + + + diff --git a/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles-negative/env_no_value/Dockerfile b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles-negative/env_no_value/Dockerfile new file mode 100644 index 000000000..1d6557879 --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles-negative/env_no_value/Dockerfile @@ -0,0 +1,3 @@ +FROM busybox + +ENV PATH diff --git a/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles-negative/shykes-nested-json/Dockerfile b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles-negative/shykes-nested-json/Dockerfile new file mode 100644 index 000000000..d1be4596c --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles-negative/shykes-nested-json/Dockerfile @@ -0,0 +1 @@ +CMD [ "echo", [ "nested json" ] ] diff --git a/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/ADD-COPY-with-JSON/Dockerfile b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/ADD-COPY-with-JSON/Dockerfile new file mode 100644 index 000000000..035b4e8bb --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/ADD-COPY-with-JSON/Dockerfile @@ -0,0 +1,11 @@ +FROM ubuntu:14.04 +LABEL maintainer Seongyeol Lim + +COPY . /go/src/github.com/docker/docker +ADD . / +ADD null / +COPY nullfile /tmp +ADD [ "vimrc", "/tmp" ] +COPY [ "bashrc", "/tmp" ] +COPY [ "test file", "/tmp" ] +ADD [ "test file", "/tmp/test file" ] diff --git a/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/ADD-COPY-with-JSON/result b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/ADD-COPY-with-JSON/result new file mode 100644 index 000000000..d1f71ecc5 --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/ADD-COPY-with-JSON/result @@ -0,0 +1,10 @@ +(from "ubuntu:14.04") +(label "maintainer" "Seongyeol Lim ") +(copy "." "/go/src/github.com/docker/docker") +(add "." "/") +(add "null" "/") +(copy "nullfile" "/tmp") +(add "vimrc" "/tmp") +(copy "bashrc" "/tmp") +(copy "test file" "/tmp") +(add "test file" "/tmp/test file") diff --git a/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/brimstone-consuldock/Dockerfile b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/brimstone-consuldock/Dockerfile new file mode 100644 index 000000000..9c0952acb --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/brimstone-consuldock/Dockerfile @@ -0,0 +1,26 @@ +#escape=\ +FROM brimstone/ubuntu:14.04 + +LABEL maintainer brimstone@the.narro.ws + +# TORUN -v /var/run/docker.sock:/var/run/docker.sock + +ENV GOPATH /go + +# Set our command +ENTRYPOINT ["/usr/local/bin/consuldock"] + +# Install the packages we need, clean up after them and us +RUN apt-get update \ + && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.clean \ + && apt-get install -y --no-install-recommends git golang ca-certificates \ + && apt-get clean \ + && rm -rf /var/lib/apt/lists \ + + && go get -v github.com/brimstone/consuldock \ + && mv $GOPATH/bin/consuldock /usr/local/bin/consuldock \ + + && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.dirty \ + && apt-get remove --purge -y $(diff /tmp/dpkg.clean /tmp/dpkg.dirty | awk '/^>/ {print $2}') \ + && rm /tmp/dpkg.* \ + && rm -rf $GOPATH diff --git a/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/brimstone-consuldock/result b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/brimstone-consuldock/result new file mode 100644 index 000000000..3b45db62b --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/brimstone-consuldock/result @@ -0,0 +1,5 @@ +(from "brimstone/ubuntu:14.04") +(label "maintainer" "brimstone@the.narro.ws") +(env "GOPATH" "/go") +(entrypoint "/usr/local/bin/consuldock") +(run "apt-get update \t&& dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.clean && apt-get install -y --no-install-recommends git golang ca-certificates && apt-get clean && rm -rf /var/lib/apt/lists \t&& go get -v github.com/brimstone/consuldock && mv $GOPATH/bin/consuldock /usr/local/bin/consuldock \t&& dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.dirty \t&& apt-get remove --purge -y $(diff /tmp/dpkg.clean /tmp/dpkg.dirty | awk '/^>/ {print $2}') \t&& rm /tmp/dpkg.* \t&& rm -rf $GOPATH") diff --git a/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/brimstone-docker-consul/Dockerfile b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/brimstone-docker-consul/Dockerfile new file mode 100644 index 000000000..25ae35216 --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/brimstone-docker-consul/Dockerfile @@ -0,0 +1,52 @@ +FROM brimstone/ubuntu:14.04 + +CMD [] + +ENTRYPOINT ["/usr/bin/consul", "agent", "-server", "-data-dir=/consul", "-client=0.0.0.0", "-ui-dir=/webui"] + +EXPOSE 8500 8600 8400 8301 8302 + +RUN apt-get update \ + && apt-get install -y unzip wget \ + && apt-get clean \ + && rm -rf /var/lib/apt/lists + +RUN cd /tmp \ + && wget https://dl.bintray.com/mitchellh/consul/0.3.1_web_ui.zip \ + -O web_ui.zip \ + && unzip web_ui.zip \ + && mv dist /webui \ + && rm web_ui.zip + +RUN apt-get update \ + && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.clean \ + && apt-get install -y --no-install-recommends unzip wget \ + && apt-get clean \ + && rm -rf /var/lib/apt/lists \ + + && cd /tmp \ + && wget https://dl.bintray.com/mitchellh/consul/0.3.1_web_ui.zip \ + -O web_ui.zip \ + && unzip web_ui.zip \ + && mv dist /webui \ + && rm web_ui.zip \ + + && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.dirty \ + && apt-get remove --purge -y $(diff /tmp/dpkg.clean /tmp/dpkg.dirty | awk '/^>/ {print $2}') \ + && rm /tmp/dpkg.* + +ENV GOPATH /go + +RUN apt-get update \ + && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.clean \ + && apt-get install -y --no-install-recommends git golang ca-certificates build-essential \ + && apt-get clean \ + && rm -rf /var/lib/apt/lists \ + + && go get -v github.com/hashicorp/consul \ + && mv $GOPATH/bin/consul /usr/bin/consul \ + + && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.dirty \ + && apt-get remove --purge -y $(diff /tmp/dpkg.clean /tmp/dpkg.dirty | awk '/^>/ {print $2}') \ + && rm /tmp/dpkg.* \ + && rm -rf $GOPATH diff --git a/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/brimstone-docker-consul/result b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/brimstone-docker-consul/result new file mode 100644 index 000000000..16492e516 --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/brimstone-docker-consul/result @@ -0,0 +1,9 @@ +(from "brimstone/ubuntu:14.04") +(cmd) +(entrypoint "/usr/bin/consul" "agent" "-server" "-data-dir=/consul" "-client=0.0.0.0" "-ui-dir=/webui") +(expose "8500" "8600" "8400" "8301" "8302") +(run "apt-get update && apt-get install -y unzip wget \t&& apt-get clean \t&& rm -rf /var/lib/apt/lists") +(run "cd /tmp && wget https://dl.bintray.com/mitchellh/consul/0.3.1_web_ui.zip -O web_ui.zip && unzip web_ui.zip && mv dist /webui && rm web_ui.zip") +(run "apt-get update \t&& dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.clean && apt-get install -y --no-install-recommends unzip wget && apt-get clean && rm -rf /var/lib/apt/lists && cd /tmp && wget https://dl.bintray.com/mitchellh/consul/0.3.1_web_ui.zip -O web_ui.zip && unzip web_ui.zip && mv dist /webui && rm web_ui.zip \t&& dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.dirty \t&& apt-get remove --purge -y $(diff /tmp/dpkg.clean /tmp/dpkg.dirty | awk '/^>/ {print $2}') \t&& rm /tmp/dpkg.*") +(env "GOPATH" "/go") +(run "apt-get update \t&& dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.clean && apt-get install -y --no-install-recommends git golang ca-certificates build-essential && apt-get clean && rm -rf /var/lib/apt/lists \t&& go get -v github.com/hashicorp/consul \t&& mv $GOPATH/bin/consul /usr/bin/consul \t&& dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.dirty \t&& apt-get remove --purge -y $(diff /tmp/dpkg.clean /tmp/dpkg.dirty | awk '/^>/ {print $2}') \t&& rm /tmp/dpkg.* \t&& rm -rf $GOPATH") diff --git a/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/continue-at-eof/Dockerfile b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/continue-at-eof/Dockerfile new file mode 100644 index 000000000..a8ec369ad --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/continue-at-eof/Dockerfile @@ -0,0 +1,3 @@ +FROM alpine:3.5 + +RUN something \ \ No newline at end of file diff --git a/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/continue-at-eof/result b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/continue-at-eof/result new file mode 100644 index 000000000..14e4f0932 --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/continue-at-eof/result @@ -0,0 +1,2 @@ +(from "alpine:3.5") +(run "something") diff --git a/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/continueIndent/Dockerfile b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/continueIndent/Dockerfile new file mode 100644 index 000000000..42b324e77 --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/continueIndent/Dockerfile @@ -0,0 +1,36 @@ +FROM ubuntu:14.04 + +RUN echo hello\ + world\ + goodnight \ + moon\ + light\ +ning +RUN echo hello \ + world +RUN echo hello \ +world +RUN echo hello \ +goodbye\ +frog +RUN echo hello \ +world +RUN echo hi \ + \ + world \ +\ + good\ +\ +night +RUN echo goodbye\ +frog +RUN echo good\ +bye\ +frog + +RUN echo hello \ +# this is a comment + +# this is a comment with a blank line surrounding it + +this is some more useful stuff diff --git a/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/continueIndent/result b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/continueIndent/result new file mode 100644 index 000000000..268ae073c --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/continueIndent/result @@ -0,0 +1,10 @@ +(from "ubuntu:14.04") +(run "echo hello world goodnight moon lightning") +(run "echo hello world") +(run "echo hello world") +(run "echo hello goodbyefrog") +(run "echo hello world") +(run "echo hi world goodnight") +(run "echo goodbyefrog") +(run "echo goodbyefrog") +(run "echo hello this is some more useful stuff") diff --git a/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/cpuguy83-nagios/Dockerfile b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/cpuguy83-nagios/Dockerfile new file mode 100644 index 000000000..8ccb71a57 --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/cpuguy83-nagios/Dockerfile @@ -0,0 +1,54 @@ +FROM cpuguy83/ubuntu +ENV NAGIOS_HOME /opt/nagios +ENV NAGIOS_USER nagios +ENV NAGIOS_GROUP nagios +ENV NAGIOS_CMDUSER nagios +ENV NAGIOS_CMDGROUP nagios +ENV NAGIOSADMIN_USER nagiosadmin +ENV NAGIOSADMIN_PASS nagios +ENV APACHE_RUN_USER nagios +ENV APACHE_RUN_GROUP nagios +ENV NAGIOS_TIMEZONE UTC + +RUN sed -i 's/universe/universe multiverse/' /etc/apt/sources.list +RUN apt-get update && apt-get install -y iputils-ping netcat build-essential snmp snmpd snmp-mibs-downloader php5-cli apache2 libapache2-mod-php5 runit bc postfix bsd-mailx +RUN ( egrep -i "^${NAGIOS_GROUP}" /etc/group || groupadd $NAGIOS_GROUP ) && ( egrep -i "^${NAGIOS_CMDGROUP}" /etc/group || groupadd $NAGIOS_CMDGROUP ) +RUN ( id -u $NAGIOS_USER || useradd --system $NAGIOS_USER -g $NAGIOS_GROUP -d $NAGIOS_HOME ) && ( id -u $NAGIOS_CMDUSER || useradd --system -d $NAGIOS_HOME -g $NAGIOS_CMDGROUP $NAGIOS_CMDUSER ) + +ADD http://downloads.sourceforge.net/project/nagios/nagios-3.x/nagios-3.5.1/nagios-3.5.1.tar.gz?r=http%3A%2F%2Fwww.nagios.org%2Fdownload%2Fcore%2Fthanks%2F%3Ft%3D1398863696&ts=1398863718&use_mirror=superb-dca3 /tmp/nagios.tar.gz +RUN cd /tmp && tar -zxvf nagios.tar.gz && cd nagios && ./configure --prefix=${NAGIOS_HOME} --exec-prefix=${NAGIOS_HOME} --enable-event-broker --with-nagios-command-user=${NAGIOS_CMDUSER} --with-command-group=${NAGIOS_CMDGROUP} --with-nagios-user=${NAGIOS_USER} --with-nagios-group=${NAGIOS_GROUP} && make all && make install && make install-config && make install-commandmode && cp sample-config/httpd.conf /etc/apache2/conf.d/nagios.conf +ADD http://www.nagios-plugins.org/download/nagios-plugins-1.5.tar.gz /tmp/ +RUN cd /tmp && tar -zxvf nagios-plugins-1.5.tar.gz && cd nagios-plugins-1.5 && ./configure --prefix=${NAGIOS_HOME} && make && make install + +RUN sed -i.bak 's/.*\=www\-data//g' /etc/apache2/envvars +RUN export DOC_ROOT="DocumentRoot $(echo $NAGIOS_HOME/share)"; sed -i "s,DocumentRoot.*,$DOC_ROOT," /etc/apache2/sites-enabled/000-default + +RUN ln -s ${NAGIOS_HOME}/bin/nagios /usr/local/bin/nagios && mkdir -p /usr/share/snmp/mibs && chmod 0755 /usr/share/snmp/mibs && touch /usr/share/snmp/mibs/.foo + +RUN echo "use_timezone=$NAGIOS_TIMEZONE" >> ${NAGIOS_HOME}/etc/nagios.cfg && echo "SetEnv TZ \"${NAGIOS_TIMEZONE}\"" >> /etc/apache2/conf.d/nagios.conf + +RUN mkdir -p ${NAGIOS_HOME}/etc/conf.d && mkdir -p ${NAGIOS_HOME}/etc/monitor && ln -s /usr/share/snmp/mibs ${NAGIOS_HOME}/libexec/mibs +RUN echo "cfg_dir=${NAGIOS_HOME}/etc/conf.d" >> ${NAGIOS_HOME}/etc/nagios.cfg +RUN echo "cfg_dir=${NAGIOS_HOME}/etc/monitor" >> ${NAGIOS_HOME}/etc/nagios.cfg +RUN download-mibs && echo "mibs +ALL" > /etc/snmp/snmp.conf + +RUN sed -i 's,/bin/mail,/usr/bin/mail,' /opt/nagios/etc/objects/commands.cfg && \ + sed -i 's,/usr/usr,/usr,' /opt/nagios/etc/objects/commands.cfg +RUN cp /etc/services /var/spool/postfix/etc/ + +RUN mkdir -p /etc/sv/nagios && mkdir -p /etc/sv/apache && rm -rf /etc/sv/getty-5 && mkdir -p /etc/sv/postfix +ADD nagios.init /etc/sv/nagios/run +ADD apache.init /etc/sv/apache/run +ADD postfix.init /etc/sv/postfix/run +ADD postfix.stop /etc/sv/postfix/finish + +ADD start.sh /usr/local/bin/start_nagios + +ENV APACHE_LOCK_DIR /var/run +ENV APACHE_LOG_DIR /var/log/apache2 + +EXPOSE 80 + +VOLUME ["/opt/nagios/var", "/opt/nagios/etc", "/opt/nagios/libexec", "/var/log/apache2", "/usr/share/snmp/mibs"] + +CMD ["/usr/local/bin/start_nagios"] diff --git a/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/cpuguy83-nagios/result b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/cpuguy83-nagios/result new file mode 100644 index 000000000..25dd3ddfe --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/cpuguy83-nagios/result @@ -0,0 +1,40 @@ +(from "cpuguy83/ubuntu") +(env "NAGIOS_HOME" "/opt/nagios") +(env "NAGIOS_USER" "nagios") +(env "NAGIOS_GROUP" "nagios") +(env "NAGIOS_CMDUSER" "nagios") +(env "NAGIOS_CMDGROUP" "nagios") +(env "NAGIOSADMIN_USER" "nagiosadmin") +(env "NAGIOSADMIN_PASS" "nagios") +(env "APACHE_RUN_USER" "nagios") +(env "APACHE_RUN_GROUP" "nagios") +(env "NAGIOS_TIMEZONE" "UTC") +(run "sed -i 's/universe/universe multiverse/' /etc/apt/sources.list") +(run "apt-get update && apt-get install -y iputils-ping netcat build-essential snmp snmpd snmp-mibs-downloader php5-cli apache2 libapache2-mod-php5 runit bc postfix bsd-mailx") +(run "( egrep -i \"^${NAGIOS_GROUP}\" /etc/group || groupadd $NAGIOS_GROUP ) && ( egrep -i \"^${NAGIOS_CMDGROUP}\" /etc/group || groupadd $NAGIOS_CMDGROUP )") +(run "( id -u $NAGIOS_USER || useradd --system $NAGIOS_USER -g $NAGIOS_GROUP -d $NAGIOS_HOME ) && ( id -u $NAGIOS_CMDUSER || useradd --system -d $NAGIOS_HOME -g $NAGIOS_CMDGROUP $NAGIOS_CMDUSER )") +(add "http://downloads.sourceforge.net/project/nagios/nagios-3.x/nagios-3.5.1/nagios-3.5.1.tar.gz?r=http%3A%2F%2Fwww.nagios.org%2Fdownload%2Fcore%2Fthanks%2F%3Ft%3D1398863696&ts=1398863718&use_mirror=superb-dca3" "/tmp/nagios.tar.gz") +(run "cd /tmp && tar -zxvf nagios.tar.gz && cd nagios && ./configure --prefix=${NAGIOS_HOME} --exec-prefix=${NAGIOS_HOME} --enable-event-broker --with-nagios-command-user=${NAGIOS_CMDUSER} --with-command-group=${NAGIOS_CMDGROUP} --with-nagios-user=${NAGIOS_USER} --with-nagios-group=${NAGIOS_GROUP} && make all && make install && make install-config && make install-commandmode && cp sample-config/httpd.conf /etc/apache2/conf.d/nagios.conf") +(add "http://www.nagios-plugins.org/download/nagios-plugins-1.5.tar.gz" "/tmp/") +(run "cd /tmp && tar -zxvf nagios-plugins-1.5.tar.gz && cd nagios-plugins-1.5 && ./configure --prefix=${NAGIOS_HOME} && make && make install") +(run "sed -i.bak 's/.*\\=www\\-data//g' /etc/apache2/envvars") +(run "export DOC_ROOT=\"DocumentRoot $(echo $NAGIOS_HOME/share)\"; sed -i \"s,DocumentRoot.*,$DOC_ROOT,\" /etc/apache2/sites-enabled/000-default") +(run "ln -s ${NAGIOS_HOME}/bin/nagios /usr/local/bin/nagios && mkdir -p /usr/share/snmp/mibs && chmod 0755 /usr/share/snmp/mibs && touch /usr/share/snmp/mibs/.foo") +(run "echo \"use_timezone=$NAGIOS_TIMEZONE\" >> ${NAGIOS_HOME}/etc/nagios.cfg && echo \"SetEnv TZ \\\"${NAGIOS_TIMEZONE}\\\"\" >> /etc/apache2/conf.d/nagios.conf") +(run "mkdir -p ${NAGIOS_HOME}/etc/conf.d && mkdir -p ${NAGIOS_HOME}/etc/monitor && ln -s /usr/share/snmp/mibs ${NAGIOS_HOME}/libexec/mibs") +(run "echo \"cfg_dir=${NAGIOS_HOME}/etc/conf.d\" >> ${NAGIOS_HOME}/etc/nagios.cfg") +(run "echo \"cfg_dir=${NAGIOS_HOME}/etc/monitor\" >> ${NAGIOS_HOME}/etc/nagios.cfg") +(run "download-mibs && echo \"mibs +ALL\" > /etc/snmp/snmp.conf") +(run "sed -i 's,/bin/mail,/usr/bin/mail,' /opt/nagios/etc/objects/commands.cfg && sed -i 's,/usr/usr,/usr,' /opt/nagios/etc/objects/commands.cfg") +(run "cp /etc/services /var/spool/postfix/etc/") +(run "mkdir -p /etc/sv/nagios && mkdir -p /etc/sv/apache && rm -rf /etc/sv/getty-5 && mkdir -p /etc/sv/postfix") +(add "nagios.init" "/etc/sv/nagios/run") +(add "apache.init" "/etc/sv/apache/run") +(add "postfix.init" "/etc/sv/postfix/run") +(add "postfix.stop" "/etc/sv/postfix/finish") +(add "start.sh" "/usr/local/bin/start_nagios") +(env "APACHE_LOCK_DIR" "/var/run") +(env "APACHE_LOG_DIR" "/var/log/apache2") +(expose "80") +(volume "/opt/nagios/var" "/opt/nagios/etc" "/opt/nagios/libexec" "/var/log/apache2" "/usr/share/snmp/mibs") +(cmd "/usr/local/bin/start_nagios") diff --git a/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/docker/Dockerfile b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/docker/Dockerfile new file mode 100644 index 000000000..5153453ff --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/docker/Dockerfile @@ -0,0 +1,102 @@ +# This file describes the standard way to build Docker, using docker +# +# Usage: +# +# # Assemble the full dev environment. This is slow the first time. +# docker build -t docker . +# +# # Mount your source in an interactive container for quick testing: +# docker run -v `pwd`:/go/src/github.com/docker/docker --privileged -i -t docker bash +# +# # Run the test suite: +# docker run --privileged docker hack/make.sh test-unit test-integration-cli test-docker-py +# +# # Publish a release: +# docker run --privileged \ +# -e AWS_S3_BUCKET=baz \ +# -e AWS_ACCESS_KEY=foo \ +# -e AWS_SECRET_KEY=bar \ +# -e GPG_PASSPHRASE=gloubiboulga \ +# docker hack/release.sh +# +# Note: AppArmor used to mess with privileged mode, but this is no longer +# the case. Therefore, you don't have to disable it anymore. +# + +FROM ubuntu:14.04 +LABEL maintainer Tianon Gravi (@tianon) + +# Packaged dependencies +RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -yq \ + apt-utils \ + aufs-tools \ + automake \ + btrfs-tools \ + build-essential \ + curl \ + dpkg-sig \ + git \ + iptables \ + libapparmor-dev \ + libcap-dev \ + mercurial \ + pandoc \ + parallel \ + reprepro \ + ruby1.9.1 \ + ruby1.9.1-dev \ + s3cmd=1.1.0* \ + --no-install-recommends + +# Get lvm2 source for compiling statically +RUN git clone --no-checkout https://git.fedorahosted.org/git/lvm2.git /usr/local/lvm2 && cd /usr/local/lvm2 && git checkout -q v2_02_103 +# see https://git.fedorahosted.org/cgit/lvm2.git/refs/tags for release tags +# note: we don't use "git clone -b" above because it then spews big nasty warnings about 'detached HEAD' state that we can't silence as easily as we can silence them using "git checkout" directly + +# Compile and install lvm2 +RUN cd /usr/local/lvm2 && ./configure --enable-static_link && make device-mapper && make install_device-mapper +# see https://git.fedorahosted.org/cgit/lvm2.git/tree/INSTALL + +# Install Go +RUN curl -sSL https://golang.org/dl/go1.3.src.tar.gz | tar -v -C /usr/local -xz +ENV PATH /usr/local/go/bin:$PATH +ENV GOPATH /go:/go/src/github.com/docker/docker/vendor +RUN cd /usr/local/go/src && ./make.bash --no-clean 2>&1 + +# Compile Go for cross compilation +ENV DOCKER_CROSSPLATFORMS \ + linux/386 linux/arm \ + darwin/amd64 darwin/386 \ + freebsd/amd64 freebsd/386 freebsd/arm +# (set an explicit GOARM of 5 for maximum compatibility) +ENV GOARM 5 +RUN cd /usr/local/go/src && bash -xc 'for platform in $DOCKER_CROSSPLATFORMS; do GOOS=${platform%/*} GOARCH=${platform##*/} ./make.bash --no-clean 2>&1; done' + +# Grab Go's cover tool for dead-simple code coverage testing +RUN go get golang.org/x/tools/cmd/cover + +# TODO replace FPM with some very minimal debhelper stuff +RUN gem install --no-rdoc --no-ri fpm --version 1.0.2 + +# Get the "busybox" image source so we can build locally instead of pulling +RUN git clone -b buildroot-2014.02 https://github.com/jpetazzo/docker-busybox.git /docker-busybox + +# Setup s3cmd config +RUN /bin/echo -e '[default]\naccess_key=$AWS_ACCESS_KEY\nsecret_key=$AWS_SECRET_KEY' > /.s3cfg + +# Set user.email so crosbymichael's in-container merge commits go smoothly +RUN git config --global user.email 'docker-dummy@example.com' + +# Add an unprivileged user to be used for tests which need it +RUN groupadd -r docker +RUN useradd --create-home --gid docker unprivilegeduser + +VOLUME /var/lib/docker +WORKDIR /go/src/github.com/docker/docker +ENV DOCKER_BUILDTAGS apparmor selinux + +# Wrap all commands in the "docker-in-docker" script to allow nested containers +ENTRYPOINT ["hack/dind"] + +# Upload docker source +COPY . /go/src/github.com/docker/docker diff --git a/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/docker/result b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/docker/result new file mode 100644 index 000000000..0c2f22991 --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/docker/result @@ -0,0 +1,24 @@ +(from "ubuntu:14.04") +(label "maintainer" "Tianon Gravi (@tianon)") +(run "apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -yq \tapt-utils \taufs-tools \tautomake \tbtrfs-tools \tbuild-essential \tcurl \tdpkg-sig \tgit \tiptables \tlibapparmor-dev \tlibcap-dev \tmercurial \tpandoc \tparallel \treprepro \truby1.9.1 \truby1.9.1-dev \ts3cmd=1.1.0* \t--no-install-recommends") +(run "git clone --no-checkout https://git.fedorahosted.org/git/lvm2.git /usr/local/lvm2 && cd /usr/local/lvm2 && git checkout -q v2_02_103") +(run "cd /usr/local/lvm2 && ./configure --enable-static_link && make device-mapper && make install_device-mapper") +(run "curl -sSL https://golang.org/dl/go1.3.src.tar.gz | tar -v -C /usr/local -xz") +(env "PATH" "/usr/local/go/bin:$PATH") +(env "GOPATH" "/go:/go/src/github.com/docker/docker/vendor") +(run "cd /usr/local/go/src && ./make.bash --no-clean 2>&1") +(env "DOCKER_CROSSPLATFORMS" "linux/386 linux/arm \tdarwin/amd64 darwin/386 \tfreebsd/amd64 freebsd/386 freebsd/arm") +(env "GOARM" "5") +(run "cd /usr/local/go/src && bash -xc 'for platform in $DOCKER_CROSSPLATFORMS; do GOOS=${platform%/*} GOARCH=${platform##*/} ./make.bash --no-clean 2>&1; done'") +(run "go get golang.org/x/tools/cmd/cover") +(run "gem install --no-rdoc --no-ri fpm --version 1.0.2") +(run "git clone -b buildroot-2014.02 https://github.com/jpetazzo/docker-busybox.git /docker-busybox") +(run "/bin/echo -e '[default]\\naccess_key=$AWS_ACCESS_KEY\\nsecret_key=$AWS_SECRET_KEY' > /.s3cfg") +(run "git config --global user.email 'docker-dummy@example.com'") +(run "groupadd -r docker") +(run "useradd --create-home --gid docker unprivilegeduser") +(volume "/var/lib/docker") +(workdir "/go/src/github.com/docker/docker") +(env "DOCKER_BUILDTAGS" "apparmor selinux") +(entrypoint "hack/dind") +(copy "." "/go/src/github.com/docker/docker") diff --git a/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/env/Dockerfile b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/env/Dockerfile new file mode 100644 index 000000000..08fa18ace --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/env/Dockerfile @@ -0,0 +1,23 @@ +FROM ubuntu +ENV name value +ENV name=value +ENV name=value name2=value2 +ENV name="value value1" +ENV name=value\ value2 +ENV name="value'quote space'value2" +ENV name='value"double quote"value2' +ENV name=value\ value2 name2=value2\ value3 +ENV name="a\"b" +ENV name="a\'b" +ENV name='a\'b' +ENV name='a\'b'' +ENV name='a\"b' +ENV name="''" +# don't put anything after the next line - it must be the last line of the +# Dockerfile and it must end with \ +ENV name=value \ + name1=value1 \ + name2="value2a \ + value2b" \ + name3="value3a\n\"value3b\"" \ + name4="value4a\\nvalue4b" \ diff --git a/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/env/result b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/env/result new file mode 100644 index 000000000..ba0a6dd7c --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/env/result @@ -0,0 +1,16 @@ +(from "ubuntu") +(env "name" "value") +(env "name" "value") +(env "name" "value" "name2" "value2") +(env "name" "\"value value1\"") +(env "name" "value\\ value2") +(env "name" "\"value'quote space'value2\"") +(env "name" "'value\"double quote\"value2'") +(env "name" "value\\ value2" "name2" "value2\\ value3") +(env "name" "\"a\\\"b\"") +(env "name" "\"a\\'b\"") +(env "name" "'a\\'b'") +(env "name" "'a\\'b''") +(env "name" "'a\\\"b'") +(env "name" "\"''\"") +(env "name" "value" "name1" "value1" "name2" "\"value2a value2b\"" "name3" "\"value3a\\n\\\"value3b\\\"\"" "name4" "\"value4a\\\\nvalue4b\"") diff --git a/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/escape-after-comment/Dockerfile b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/escape-after-comment/Dockerfile new file mode 100644 index 000000000..18e9a474f --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/escape-after-comment/Dockerfile @@ -0,0 +1,9 @@ +# Comment here. Should not be looking for the following parser directive. +# Hence the following line will be ignored, and the subsequent backslash +# continuation will be the default. +# escape = ` + +FROM image +LABEL maintainer foo@bar.com +ENV GOPATH \ +\go \ No newline at end of file diff --git a/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/escape-after-comment/result b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/escape-after-comment/result new file mode 100644 index 000000000..9ab119c41 --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/escape-after-comment/result @@ -0,0 +1,3 @@ +(from "image") +(label "maintainer" "foo@bar.com") +(env "GOPATH" "\\go") diff --git a/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/escape-nonewline/Dockerfile b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/escape-nonewline/Dockerfile new file mode 100644 index 000000000..366ee3c36 --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/escape-nonewline/Dockerfile @@ -0,0 +1,7 @@ +# escape = `` +# There is no white space line after the directives. This still succeeds, but goes +# against best practices. +FROM image +LABEL maintainer foo@bar.com +ENV GOPATH ` +\go \ No newline at end of file diff --git a/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/escape-nonewline/result b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/escape-nonewline/result new file mode 100644 index 000000000..9ab119c41 --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/escape-nonewline/result @@ -0,0 +1,3 @@ +(from "image") +(label "maintainer" "foo@bar.com") +(env "GOPATH" "\\go") diff --git a/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/escape/Dockerfile b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/escape/Dockerfile new file mode 100644 index 000000000..a515af152 --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/escape/Dockerfile @@ -0,0 +1,6 @@ +#escape = ` + +FROM image +LABEL maintainer foo@bar.com +ENV GOPATH ` +\go \ No newline at end of file diff --git a/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/escape/result b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/escape/result new file mode 100644 index 000000000..9ab119c41 --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/escape/result @@ -0,0 +1,3 @@ +(from "image") +(label "maintainer" "foo@bar.com") +(env "GOPATH" "\\go") diff --git a/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/escapes/Dockerfile b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/escapes/Dockerfile new file mode 100644 index 000000000..03062394a --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/escapes/Dockerfile @@ -0,0 +1,14 @@ +FROM ubuntu:14.04 +LABEL maintainer Erik \\Hollensbe \" + +RUN apt-get \update && \ + apt-get \"install znc -y +ADD \conf\\" /.znc + +RUN foo \ + +bar \ + +baz + +CMD [ "\/usr\\\"/bin/znc", "-f", "-r" ] diff --git a/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/escapes/result b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/escapes/result new file mode 100644 index 000000000..98e3e3b73 --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/escapes/result @@ -0,0 +1,6 @@ +(from "ubuntu:14.04") +(label "maintainer" "Erik \\\\Hollensbe \\\"") +(run "apt-get \\update && apt-get \\\"install znc -y") +(add "\\conf\\\\\"" "/.znc") +(run "foo bar baz") +(cmd "/usr\\\"/bin/znc" "-f" "-r") diff --git a/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/flags/Dockerfile b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/flags/Dockerfile new file mode 100644 index 000000000..2418e0f06 --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/flags/Dockerfile @@ -0,0 +1,10 @@ +FROM scratch +COPY foo /tmp/ +COPY --user=me foo /tmp/ +COPY --doit=true foo /tmp/ +COPY --user=me --doit=true foo /tmp/ +COPY --doit=true -- foo /tmp/ +COPY -- foo /tmp/ +CMD --doit [ "a", "b" ] +CMD --doit=true -- [ "a", "b" ] +CMD --doit -- [ ] diff --git a/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/flags/result b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/flags/result new file mode 100644 index 000000000..4578f4cba --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/flags/result @@ -0,0 +1,10 @@ +(from "scratch") +(copy "foo" "/tmp/") +(copy ["--user=me"] "foo" "/tmp/") +(copy ["--doit=true"] "foo" "/tmp/") +(copy ["--user=me" "--doit=true"] "foo" "/tmp/") +(copy ["--doit=true"] "foo" "/tmp/") +(copy "foo" "/tmp/") +(cmd ["--doit"] "a" "b") +(cmd ["--doit=true"] "a" "b") +(cmd ["--doit"]) diff --git a/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/health/Dockerfile b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/health/Dockerfile new file mode 100644 index 000000000..081e44288 --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/health/Dockerfile @@ -0,0 +1,10 @@ +FROM debian +ADD check.sh main.sh /app/ +CMD /app/main.sh +HEALTHCHECK +HEALTHCHECK --interval=5s --timeout=3s --retries=3 \ + CMD /app/check.sh --quiet +HEALTHCHECK CMD +HEALTHCHECK CMD a b +HEALTHCHECK --timeout=3s CMD ["foo"] +HEALTHCHECK CONNECT TCP 7000 diff --git a/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/health/result b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/health/result new file mode 100644 index 000000000..092924f88 --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/health/result @@ -0,0 +1,9 @@ +(from "debian") +(add "check.sh" "main.sh" "/app/") +(cmd "/app/main.sh") +(healthcheck) +(healthcheck ["--interval=5s" "--timeout=3s" "--retries=3"] "CMD" "/app/check.sh --quiet") +(healthcheck "CMD") +(healthcheck "CMD" "a b") +(healthcheck ["--timeout=3s"] "CMD" "foo") +(healthcheck "CONNECT" "TCP 7000") diff --git a/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/influxdb/Dockerfile b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/influxdb/Dockerfile new file mode 100644 index 000000000..587fb9b54 --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/influxdb/Dockerfile @@ -0,0 +1,15 @@ +FROM ubuntu:14.04 + +RUN apt-get update && apt-get install wget -y +RUN wget http://s3.amazonaws.com/influxdb/influxdb_latest_amd64.deb +RUN dpkg -i influxdb_latest_amd64.deb +RUN rm -r /opt/influxdb/shared + +VOLUME /opt/influxdb/shared + +CMD /usr/bin/influxdb --pidfile /var/run/influxdb.pid -config /opt/influxdb/shared/config.toml + +EXPOSE 8083 +EXPOSE 8086 +EXPOSE 8090 +EXPOSE 8099 diff --git a/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/influxdb/result b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/influxdb/result new file mode 100644 index 000000000..0998e87e6 --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/influxdb/result @@ -0,0 +1,11 @@ +(from "ubuntu:14.04") +(run "apt-get update && apt-get install wget -y") +(run "wget http://s3.amazonaws.com/influxdb/influxdb_latest_amd64.deb") +(run "dpkg -i influxdb_latest_amd64.deb") +(run "rm -r /opt/influxdb/shared") +(volume "/opt/influxdb/shared") +(cmd "/usr/bin/influxdb --pidfile /var/run/influxdb.pid -config /opt/influxdb/shared/config.toml") +(expose "8083") +(expose "8086") +(expose "8090") +(expose "8099") diff --git a/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/jeztah-invalid-json-json-inside-string-double/Dockerfile b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/jeztah-invalid-json-json-inside-string-double/Dockerfile new file mode 100644 index 000000000..39fe27d99 --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/jeztah-invalid-json-json-inside-string-double/Dockerfile @@ -0,0 +1 @@ +CMD "[\"echo\", \"Phew, I just managed to escaped those double quotes\"]" diff --git a/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/jeztah-invalid-json-json-inside-string-double/result b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/jeztah-invalid-json-json-inside-string-double/result new file mode 100644 index 000000000..afc220c2a --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/jeztah-invalid-json-json-inside-string-double/result @@ -0,0 +1 @@ +(cmd "\"[\\\"echo\\\", \\\"Phew, I just managed to escaped those double quotes\\\"]\"") diff --git a/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/jeztah-invalid-json-json-inside-string/Dockerfile b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/jeztah-invalid-json-json-inside-string/Dockerfile new file mode 100644 index 000000000..eaae081a0 --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/jeztah-invalid-json-json-inside-string/Dockerfile @@ -0,0 +1 @@ +CMD '["echo", "Well, JSON in a string is JSON too?"]' diff --git a/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/jeztah-invalid-json-json-inside-string/result b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/jeztah-invalid-json-json-inside-string/result new file mode 100644 index 000000000..484804e2b --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/jeztah-invalid-json-json-inside-string/result @@ -0,0 +1 @@ +(cmd "'[\"echo\", \"Well, JSON in a string is JSON too?\"]'") diff --git a/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/jeztah-invalid-json-single-quotes/Dockerfile b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/jeztah-invalid-json-single-quotes/Dockerfile new file mode 100644 index 000000000..c3ac63c07 --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/jeztah-invalid-json-single-quotes/Dockerfile @@ -0,0 +1 @@ +CMD ['echo','single quotes are invalid JSON'] diff --git a/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/jeztah-invalid-json-single-quotes/result b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/jeztah-invalid-json-single-quotes/result new file mode 100644 index 000000000..614789120 --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/jeztah-invalid-json-single-quotes/result @@ -0,0 +1 @@ +(cmd "['echo','single quotes are invalid JSON']") diff --git a/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/jeztah-invalid-json-unterminated-bracket/Dockerfile b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/jeztah-invalid-json-unterminated-bracket/Dockerfile new file mode 100644 index 000000000..5fd4afa52 --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/jeztah-invalid-json-unterminated-bracket/Dockerfile @@ -0,0 +1 @@ +CMD ["echo", "Please, close the brackets when you're done" diff --git a/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/jeztah-invalid-json-unterminated-bracket/result b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/jeztah-invalid-json-unterminated-bracket/result new file mode 100644 index 000000000..1ffbb8ff8 --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/jeztah-invalid-json-unterminated-bracket/result @@ -0,0 +1 @@ +(cmd "[\"echo\", \"Please, close the brackets when you're done\"") diff --git a/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/jeztah-invalid-json-unterminated-string/Dockerfile b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/jeztah-invalid-json-unterminated-string/Dockerfile new file mode 100644 index 000000000..30cc4bb48 --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/jeztah-invalid-json-unterminated-string/Dockerfile @@ -0,0 +1 @@ +CMD ["echo", "look ma, no quote!] diff --git a/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/jeztah-invalid-json-unterminated-string/result b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/jeztah-invalid-json-unterminated-string/result new file mode 100644 index 000000000..32048147b --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/jeztah-invalid-json-unterminated-string/result @@ -0,0 +1 @@ +(cmd "[\"echo\", \"look ma, no quote!]") diff --git a/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/json/Dockerfile b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/json/Dockerfile new file mode 100644 index 000000000..a58691711 --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/json/Dockerfile @@ -0,0 +1,8 @@ +CMD [] +CMD [""] +CMD ["a"] +CMD ["a","b"] +CMD [ "a", "b" ] +CMD [ "a", "b" ] +CMD [ "a", "b" ] +CMD ["abc 123", "♥", "☃", "\" \\ \/ \b \f \n \r \t \u0000"] diff --git a/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/json/result b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/json/result new file mode 100644 index 000000000..c6553e6e1 --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/json/result @@ -0,0 +1,8 @@ +(cmd) +(cmd "") +(cmd "a") +(cmd "a" "b") +(cmd "a" "b") +(cmd "a" "b") +(cmd "a" "b") +(cmd "abc 123" "♥" "☃" "\" \\ / \b \f \n \r \t \x00") diff --git a/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/kartar-entrypoint-oddities/Dockerfile b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/kartar-entrypoint-oddities/Dockerfile new file mode 100644 index 000000000..728ec9a78 --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/kartar-entrypoint-oddities/Dockerfile @@ -0,0 +1,7 @@ +FROM ubuntu:14.04 +LABEL maintainer James Turnbull "james@example.com" +ENV REFRESHED_AT 2014-06-01 +RUN apt-get update +RUN apt-get -y install redis-server redis-tools +EXPOSE 6379 +ENTRYPOINT [ "/usr/bin/redis-server" ] diff --git a/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/kartar-entrypoint-oddities/result b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/kartar-entrypoint-oddities/result new file mode 100644 index 000000000..e774bc4f9 --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/kartar-entrypoint-oddities/result @@ -0,0 +1,7 @@ +(from "ubuntu:14.04") +(label "maintainer" "James Turnbull \"james@example.com\"") +(env "REFRESHED_AT" "2014-06-01") +(run "apt-get update") +(run "apt-get -y install redis-server redis-tools") +(expose "6379") +(entrypoint "/usr/bin/redis-server") diff --git a/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/lk4d4-the-edge-case-generator/Dockerfile b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/lk4d4-the-edge-case-generator/Dockerfile new file mode 100644 index 000000000..27f28cb92 --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/lk4d4-the-edge-case-generator/Dockerfile @@ -0,0 +1,48 @@ +FROM busybox:buildroot-2014.02 + +LABEL maintainer docker + +ONBUILD RUN ["echo", "test"] +ONBUILD RUN echo test +ONBUILD COPY . / + + +# RUN Commands \ +# linebreak in comment \ +RUN ["ls", "-la"] +RUN ["echo", "'1234'"] +RUN echo "1234" +RUN echo 1234 +RUN echo '1234' && \ + echo "456" && \ + echo 789 +RUN sh -c 'echo root:testpass \ + > /tmp/passwd' +RUN mkdir -p /test /test2 /test3/test + +# ENV \ +ENV SCUBA 1 DUBA 3 +ENV SCUBA "1 DUBA 3" + +# CMD \ +CMD ["echo", "test"] +CMD echo test +CMD echo "test" +CMD echo 'test' +CMD echo 'test' | wc - + +#EXPOSE\ +EXPOSE 3000 +EXPOSE 9000 5000 6000 + +USER docker +USER docker:root + +VOLUME ["/test"] +VOLUME ["/test", "/test2"] +VOLUME /test3 + +WORKDIR /test + +ADD . / +COPY . copy diff --git a/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/lk4d4-the-edge-case-generator/result b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/lk4d4-the-edge-case-generator/result new file mode 100644 index 000000000..8a499ff94 --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/lk4d4-the-edge-case-generator/result @@ -0,0 +1,29 @@ +(from "busybox:buildroot-2014.02") +(label "maintainer" "docker ") +(onbuild (run "echo" "test")) +(onbuild (run "echo test")) +(onbuild (copy "." "/")) +(run "ls" "-la") +(run "echo" "'1234'") +(run "echo \"1234\"") +(run "echo 1234") +(run "echo '1234' && echo \"456\" && echo 789") +(run "sh -c 'echo root:testpass > /tmp/passwd'") +(run "mkdir -p /test /test2 /test3/test") +(env "SCUBA" "1 DUBA 3") +(env "SCUBA" "\"1 DUBA 3\"") +(cmd "echo" "test") +(cmd "echo test") +(cmd "echo \"test\"") +(cmd "echo 'test'") +(cmd "echo 'test' | wc -") +(expose "3000") +(expose "9000" "5000" "6000") +(user "docker") +(user "docker:root") +(volume "/test") +(volume "/test" "/test2") +(volume "/test3") +(workdir "/test") +(add "." "/") +(copy "." "copy") diff --git a/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/mail/Dockerfile b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/mail/Dockerfile new file mode 100644 index 000000000..f64c1168c --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/mail/Dockerfile @@ -0,0 +1,16 @@ +FROM ubuntu:14.04 + +RUN apt-get update -qy && apt-get install mutt offlineimap vim-nox abook elinks curl tmux cron zsh -y +ADD .muttrc / +ADD .offlineimaprc / +ADD .tmux.conf / +ADD mutt /.mutt +ADD vim /.vim +ADD vimrc /.vimrc +ADD crontab /etc/crontab +RUN chmod 644 /etc/crontab +RUN mkdir /Mail +RUN mkdir /.offlineimap +RUN echo "export TERM=screen-256color" >/.zshenv + +CMD setsid cron; tmux -2 diff --git a/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/mail/result b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/mail/result new file mode 100644 index 000000000..a0efcf04b --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/mail/result @@ -0,0 +1,14 @@ +(from "ubuntu:14.04") +(run "apt-get update -qy && apt-get install mutt offlineimap vim-nox abook elinks curl tmux cron zsh -y") +(add ".muttrc" "/") +(add ".offlineimaprc" "/") +(add ".tmux.conf" "/") +(add "mutt" "/.mutt") +(add "vim" "/.vim") +(add "vimrc" "/.vimrc") +(add "crontab" "/etc/crontab") +(run "chmod 644 /etc/crontab") +(run "mkdir /Mail") +(run "mkdir /.offlineimap") +(run "echo \"export TERM=screen-256color\" >/.zshenv") +(cmd "setsid cron; tmux -2") diff --git a/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/multiple-volumes/Dockerfile b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/multiple-volumes/Dockerfile new file mode 100644 index 000000000..57bb5976a --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/multiple-volumes/Dockerfile @@ -0,0 +1,3 @@ +FROM foo + +VOLUME /opt/nagios/var /opt/nagios/etc /opt/nagios/libexec /var/log/apache2 /usr/share/snmp/mibs diff --git a/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/multiple-volumes/result b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/multiple-volumes/result new file mode 100644 index 000000000..18dbdeeaa --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/multiple-volumes/result @@ -0,0 +1,2 @@ +(from "foo") +(volume "/opt/nagios/var" "/opt/nagios/etc" "/opt/nagios/libexec" "/var/log/apache2" "/usr/share/snmp/mibs") diff --git a/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/mumble/Dockerfile b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/mumble/Dockerfile new file mode 100644 index 000000000..5b9ec06a6 --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/mumble/Dockerfile @@ -0,0 +1,7 @@ +FROM ubuntu:14.04 + +RUN apt-get update && apt-get install libcap2-bin mumble-server -y + +ADD ./mumble-server.ini /etc/mumble-server.ini + +CMD /usr/sbin/murmurd diff --git a/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/mumble/result b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/mumble/result new file mode 100644 index 000000000..a0036a943 --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/mumble/result @@ -0,0 +1,4 @@ +(from "ubuntu:14.04") +(run "apt-get update && apt-get install libcap2-bin mumble-server -y") +(add "./mumble-server.ini" "/etc/mumble-server.ini") +(cmd "/usr/sbin/murmurd") diff --git a/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/nginx/Dockerfile b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/nginx/Dockerfile new file mode 100644 index 000000000..0a35e2c6b --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/nginx/Dockerfile @@ -0,0 +1,14 @@ +FROM ubuntu:14.04 +LABEL maintainer Erik Hollensbe + +RUN apt-get update && apt-get install nginx-full -y +RUN rm -rf /etc/nginx +ADD etc /etc/nginx +RUN chown -R root:root /etc/nginx +RUN /usr/sbin/nginx -qt +RUN mkdir /www + +CMD ["/usr/sbin/nginx"] + +VOLUME /www +EXPOSE 80 diff --git a/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/nginx/result b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/nginx/result new file mode 100644 index 000000000..a895fadbb --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/nginx/result @@ -0,0 +1,11 @@ +(from "ubuntu:14.04") +(label "maintainer" "Erik Hollensbe ") +(run "apt-get update && apt-get install nginx-full -y") +(run "rm -rf /etc/nginx") +(add "etc" "/etc/nginx") +(run "chown -R root:root /etc/nginx") +(run "/usr/sbin/nginx -qt") +(run "mkdir /www") +(cmd "/usr/sbin/nginx") +(volume "/www") +(expose "80") diff --git a/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/tf2/Dockerfile b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/tf2/Dockerfile new file mode 100644 index 000000000..72b79bdd7 --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/tf2/Dockerfile @@ -0,0 +1,23 @@ +FROM ubuntu:12.04 + +EXPOSE 27015 +EXPOSE 27005 +EXPOSE 26901 +EXPOSE 27020 + +RUN apt-get update && apt-get install libc6-dev-i386 curl unzip -y +RUN mkdir -p /steam +RUN curl http://media.steampowered.com/client/steamcmd_linux.tar.gz | tar vxz -C /steam +ADD ./script /steam/script +RUN /steam/steamcmd.sh +runscript /steam/script +RUN curl http://mirror.pointysoftware.net/alliedmodders/mmsource-1.10.0-linux.tar.gz | tar vxz -C /steam/tf2/tf +RUN curl http://mirror.pointysoftware.net/alliedmodders/sourcemod-1.5.3-linux.tar.gz | tar vxz -C /steam/tf2/tf +ADD ./server.cfg /steam/tf2/tf/cfg/server.cfg +ADD ./ctf_2fort.cfg /steam/tf2/tf/cfg/ctf_2fort.cfg +ADD ./sourcemod.cfg /steam/tf2/tf/cfg/sourcemod/sourcemod.cfg +RUN rm -r /steam/tf2/tf/addons/sourcemod/configs +ADD ./configs /steam/tf2/tf/addons/sourcemod/configs +RUN mkdir -p /steam/tf2/tf/addons/sourcemod/translations/en +RUN cp /steam/tf2/tf/addons/sourcemod/translations/*.txt /steam/tf2/tf/addons/sourcemod/translations/en + +CMD cd /steam/tf2 && ./srcds_run -port 27015 +ip 0.0.0.0 +map ctf_2fort -autoupdate -steam_dir /steam -steamcmd_script /steam/script +tf_bot_quota 12 +tf_bot_quota_mode fill diff --git a/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/tf2/result b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/tf2/result new file mode 100644 index 000000000..d4f94cd8b --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/tf2/result @@ -0,0 +1,20 @@ +(from "ubuntu:12.04") +(expose "27015") +(expose "27005") +(expose "26901") +(expose "27020") +(run "apt-get update && apt-get install libc6-dev-i386 curl unzip -y") +(run "mkdir -p /steam") +(run "curl http://media.steampowered.com/client/steamcmd_linux.tar.gz | tar vxz -C /steam") +(add "./script" "/steam/script") +(run "/steam/steamcmd.sh +runscript /steam/script") +(run "curl http://mirror.pointysoftware.net/alliedmodders/mmsource-1.10.0-linux.tar.gz | tar vxz -C /steam/tf2/tf") +(run "curl http://mirror.pointysoftware.net/alliedmodders/sourcemod-1.5.3-linux.tar.gz | tar vxz -C /steam/tf2/tf") +(add "./server.cfg" "/steam/tf2/tf/cfg/server.cfg") +(add "./ctf_2fort.cfg" "/steam/tf2/tf/cfg/ctf_2fort.cfg") +(add "./sourcemod.cfg" "/steam/tf2/tf/cfg/sourcemod/sourcemod.cfg") +(run "rm -r /steam/tf2/tf/addons/sourcemod/configs") +(add "./configs" "/steam/tf2/tf/addons/sourcemod/configs") +(run "mkdir -p /steam/tf2/tf/addons/sourcemod/translations/en") +(run "cp /steam/tf2/tf/addons/sourcemod/translations/*.txt /steam/tf2/tf/addons/sourcemod/translations/en") +(cmd "cd /steam/tf2 && ./srcds_run -port 27015 +ip 0.0.0.0 +map ctf_2fort -autoupdate -steam_dir /steam -steamcmd_script /steam/script +tf_bot_quota 12 +tf_bot_quota_mode fill") diff --git a/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/weechat/Dockerfile b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/weechat/Dockerfile new file mode 100644 index 000000000..484208816 --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/weechat/Dockerfile @@ -0,0 +1,9 @@ +FROM ubuntu:14.04 + +RUN apt-get update -qy && apt-get install tmux zsh weechat-curses -y + +ADD .weechat /.weechat +ADD .tmux.conf / +RUN echo "export TERM=screen-256color" >/.zshenv + +CMD zsh -c weechat diff --git a/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/weechat/result b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/weechat/result new file mode 100644 index 000000000..c3abb4c54 --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/weechat/result @@ -0,0 +1,6 @@ +(from "ubuntu:14.04") +(run "apt-get update -qy && apt-get install tmux zsh weechat-curses -y") +(add ".weechat" "/.weechat") +(add ".tmux.conf" "/") +(run "echo \"export TERM=screen-256color\" >/.zshenv") +(cmd "zsh -c weechat") diff --git a/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/znc/Dockerfile b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/znc/Dockerfile new file mode 100644 index 000000000..626b126d8 --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/znc/Dockerfile @@ -0,0 +1,7 @@ +FROM ubuntu:14.04 +LABEL maintainer Erik Hollensbe + +RUN apt-get update && apt-get install znc -y +ADD conf /.znc + +CMD [ "/usr/bin/znc", "-f", "-r" ] diff --git a/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/znc/result b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/znc/result new file mode 100644 index 000000000..bfc7f6513 --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/parser/testfiles/znc/result @@ -0,0 +1,5 @@ +(from "ubuntu:14.04") +(label "maintainer" "Erik Hollensbe ") +(run "apt-get update && apt-get install znc -y") +(add "conf" "/.znc") +(cmd "/usr/bin/znc" "-f" "-r") diff --git a/vendor/github.com/moby/moby/builder/dockerfile/shell_parser.go b/vendor/github.com/moby/moby/builder/dockerfile/shell_parser.go new file mode 100644 index 000000000..b72ac291d --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/shell_parser.go @@ -0,0 +1,344 @@ +package dockerfile + +import ( + "bytes" + "strings" + "text/scanner" + "unicode" + + "github.com/pkg/errors" +) + +// ShellLex performs shell word splitting and variable expansion. +// +// ShellLex takes a string and an array of env variables and +// process all quotes (" and ') as well as $xxx and ${xxx} env variable +// tokens. Tries to mimic bash shell process. +// It doesn't support all flavors of ${xx:...} formats but new ones can +// be added by adding code to the "special ${} format processing" section +type ShellLex struct { + escapeToken rune +} + +// NewShellLex creates a new ShellLex which uses escapeToken to escape quotes. +func NewShellLex(escapeToken rune) *ShellLex { + return &ShellLex{escapeToken: escapeToken} +} + +// ProcessWord will use the 'env' list of environment variables, +// and replace any env var references in 'word'. +func (s *ShellLex) ProcessWord(word string, env []string) (string, error) { + word, _, err := s.process(word, env) + return word, err +} + +// ProcessWords will use the 'env' list of environment variables, +// and replace any env var references in 'word' then it will also +// return a slice of strings which represents the 'word' +// split up based on spaces - taking into account quotes. Note that +// this splitting is done **after** the env var substitutions are done. +// Note, each one is trimmed to remove leading and trailing spaces (unless +// they are quoted", but ProcessWord retains spaces between words. +func (s *ShellLex) ProcessWords(word string, env []string) ([]string, error) { + _, words, err := s.process(word, env) + return words, err +} + +func (s *ShellLex) process(word string, env []string) (string, []string, error) { + sw := &shellWord{ + envs: env, + escapeToken: s.escapeToken, + } + sw.scanner.Init(strings.NewReader(word)) + return sw.process(word) +} + +type shellWord struct { + scanner scanner.Scanner + envs []string + escapeToken rune +} + +func (sw *shellWord) process(source string) (string, []string, error) { + word, words, err := sw.processStopOn(scanner.EOF) + if err != nil { + err = errors.Wrapf(err, "failed to process %q", source) + } + return word, words, err +} + +type wordsStruct struct { + word string + words []string + inWord bool +} + +func (w *wordsStruct) addChar(ch rune) { + if unicode.IsSpace(ch) && w.inWord { + if len(w.word) != 0 { + w.words = append(w.words, w.word) + w.word = "" + w.inWord = false + } + } else if !unicode.IsSpace(ch) { + w.addRawChar(ch) + } +} + +func (w *wordsStruct) addRawChar(ch rune) { + w.word += string(ch) + w.inWord = true +} + +func (w *wordsStruct) addString(str string) { + var scan scanner.Scanner + scan.Init(strings.NewReader(str)) + for scan.Peek() != scanner.EOF { + w.addChar(scan.Next()) + } +} + +func (w *wordsStruct) addRawString(str string) { + w.word += str + w.inWord = true +} + +func (w *wordsStruct) getWords() []string { + if len(w.word) > 0 { + w.words = append(w.words, w.word) + + // Just in case we're called again by mistake + w.word = "" + w.inWord = false + } + return w.words +} + +// Process the word, starting at 'pos', and stop when we get to the +// end of the word or the 'stopChar' character +func (sw *shellWord) processStopOn(stopChar rune) (string, []string, error) { + var result bytes.Buffer + var words wordsStruct + + var charFuncMapping = map[rune]func() (string, error){ + '\'': sw.processSingleQuote, + '"': sw.processDoubleQuote, + '$': sw.processDollar, + } + + for sw.scanner.Peek() != scanner.EOF { + ch := sw.scanner.Peek() + + if stopChar != scanner.EOF && ch == stopChar { + sw.scanner.Next() + break + } + if fn, ok := charFuncMapping[ch]; ok { + // Call special processing func for certain chars + tmp, err := fn() + if err != nil { + return "", []string{}, err + } + result.WriteString(tmp) + + if ch == rune('$') { + words.addString(tmp) + } else { + words.addRawString(tmp) + } + } else { + // Not special, just add it to the result + ch = sw.scanner.Next() + + if ch == sw.escapeToken { + // '\' (default escape token, but ` allowed) escapes, except end of line + ch = sw.scanner.Next() + + if ch == scanner.EOF { + break + } + + words.addRawChar(ch) + } else { + words.addChar(ch) + } + + result.WriteRune(ch) + } + } + + return result.String(), words.getWords(), nil +} + +func (sw *shellWord) processSingleQuote() (string, error) { + // All chars between single quotes are taken as-is + // Note, you can't escape ' + // + // From the "sh" man page: + // Single Quotes + // Enclosing characters in single quotes preserves the literal meaning of + // all the characters (except single quotes, making it impossible to put + // single-quotes in a single-quoted string). + + var result bytes.Buffer + + sw.scanner.Next() + + for { + ch := sw.scanner.Next() + switch ch { + case scanner.EOF: + return "", errors.New("unexpected end of statement while looking for matching single-quote") + case '\'': + return result.String(), nil + } + result.WriteRune(ch) + } +} + +func (sw *shellWord) processDoubleQuote() (string, error) { + // All chars up to the next " are taken as-is, even ', except any $ chars + // But you can escape " with a \ (or ` if escape token set accordingly) + // + // From the "sh" man page: + // Double Quotes + // Enclosing characters within double quotes preserves the literal meaning + // of all characters except dollarsign ($), backquote (`), and backslash + // (\). The backslash inside double quotes is historically weird, and + // serves to quote only the following characters: + // $ ` " \ . + // Otherwise it remains literal. + + var result bytes.Buffer + + sw.scanner.Next() + + for { + switch sw.scanner.Peek() { + case scanner.EOF: + return "", errors.New("unexpected end of statement while looking for matching double-quote") + case '"': + sw.scanner.Next() + return result.String(), nil + case '$': + value, err := sw.processDollar() + if err != nil { + return "", err + } + result.WriteString(value) + default: + ch := sw.scanner.Next() + if ch == sw.escapeToken { + switch sw.scanner.Peek() { + case scanner.EOF: + // Ignore \ at end of word + continue + case '"', '$', sw.escapeToken: + // These chars can be escaped, all other \'s are left as-is + // Note: for now don't do anything special with ` chars. + // Not sure what to do with them anyway since we're not going + // to execute the text in there (not now anyway). + ch = sw.scanner.Next() + } + } + result.WriteRune(ch) + } + } +} + +func (sw *shellWord) processDollar() (string, error) { + sw.scanner.Next() + + // $xxx case + if sw.scanner.Peek() != '{' { + name := sw.processName() + if name == "" { + return "$", nil + } + return sw.getEnv(name), nil + } + + sw.scanner.Next() + name := sw.processName() + ch := sw.scanner.Peek() + if ch == '}' { + // Normal ${xx} case + sw.scanner.Next() + return sw.getEnv(name), nil + } + if ch == ':' { + // Special ${xx:...} format processing + // Yes it allows for recursive $'s in the ... spot + + sw.scanner.Next() // skip over : + modifier := sw.scanner.Next() + + word, _, err := sw.processStopOn('}') + if err != nil { + return "", err + } + + // Grab the current value of the variable in question so we + // can use to to determine what to do based on the modifier + newValue := sw.getEnv(name) + + switch modifier { + case '+': + if newValue != "" { + newValue = word + } + return newValue, nil + + case '-': + if newValue == "" { + newValue = word + } + return newValue, nil + + default: + return "", errors.Errorf("unsupported modifier (%c) in substitution", modifier) + } + } + return "", errors.Errorf("missing ':' in substitution") +} + +func (sw *shellWord) processName() string { + // Read in a name (alphanumeric or _) + // If it starts with a numeric then just return $# + var name bytes.Buffer + + for sw.scanner.Peek() != scanner.EOF { + ch := sw.scanner.Peek() + if name.Len() == 0 && unicode.IsDigit(ch) { + ch = sw.scanner.Next() + return string(ch) + } + if !unicode.IsLetter(ch) && !unicode.IsDigit(ch) && ch != '_' { + break + } + ch = sw.scanner.Next() + name.WriteRune(ch) + } + + return name.String() +} + +func (sw *shellWord) getEnv(name string) string { + for _, env := range sw.envs { + i := strings.Index(env, "=") + if i < 0 { + if equalEnvKeys(name, env) { + // Should probably never get here, but just in case treat + // it like "var" and "var=" are the same + return "" + } + continue + } + compareName := env[:i] + if !equalEnvKeys(name, compareName) { + continue + } + return env[i+1:] + } + return "" +} diff --git a/vendor/github.com/moby/moby/builder/dockerfile/shell_parser_test.go b/vendor/github.com/moby/moby/builder/dockerfile/shell_parser_test.go new file mode 100644 index 000000000..c4f7e0efd --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/shell_parser_test.go @@ -0,0 +1,151 @@ +package dockerfile + +import ( + "bufio" + "os" + "runtime" + "strings" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestShellParser4EnvVars(t *testing.T) { + fn := "envVarTest" + lineCount := 0 + + file, err := os.Open(fn) + assert.NoError(t, err) + defer file.Close() + + shlex := NewShellLex('\\') + scanner := bufio.NewScanner(file) + envs := []string{"PWD=/home", "SHELL=bash", "KOREAN=한국어"} + for scanner.Scan() { + line := scanner.Text() + lineCount++ + + // Trim comments and blank lines + i := strings.Index(line, "#") + if i >= 0 { + line = line[:i] + } + line = strings.TrimSpace(line) + + if line == "" { + continue + } + + words := strings.Split(line, "|") + assert.Len(t, words, 3) + + platform := strings.TrimSpace(words[0]) + source := strings.TrimSpace(words[1]) + expected := strings.TrimSpace(words[2]) + + // Key W=Windows; A=All; U=Unix + if platform != "W" && platform != "A" && platform != "U" { + t.Fatalf("Invalid tag %s at line %d of %s. Must be W, A or U", platform, lineCount, fn) + } + + if ((platform == "W" || platform == "A") && runtime.GOOS == "windows") || + ((platform == "U" || platform == "A") && runtime.GOOS != "windows") { + newWord, err := shlex.ProcessWord(source, envs) + if expected == "error" { + assert.Error(t, err) + } else { + assert.NoError(t, err) + assert.Equal(t, newWord, expected) + } + } + } +} + +func TestShellParser4Words(t *testing.T) { + fn := "wordsTest" + + file, err := os.Open(fn) + if err != nil { + t.Fatalf("Can't open '%s': %s", err, fn) + } + defer file.Close() + + shlex := NewShellLex('\\') + envs := []string{} + scanner := bufio.NewScanner(file) + lineNum := 0 + for scanner.Scan() { + line := scanner.Text() + lineNum = lineNum + 1 + + if strings.HasPrefix(line, "#") { + continue + } + + if strings.HasPrefix(line, "ENV ") { + line = strings.TrimLeft(line[3:], " ") + envs = append(envs, line) + continue + } + + words := strings.Split(line, "|") + if len(words) != 2 { + t.Fatalf("Error in '%s'(line %d) - should be exactly one | in: %q", fn, lineNum, line) + } + test := strings.TrimSpace(words[0]) + expected := strings.Split(strings.TrimLeft(words[1], " "), ",") + + result, err := shlex.ProcessWords(test, envs) + + if err != nil { + result = []string{"error"} + } + + if len(result) != len(expected) { + t.Fatalf("Error on line %d. %q was suppose to result in %q, but got %q instead", lineNum, test, expected, result) + } + for i, w := range expected { + if w != result[i] { + t.Fatalf("Error on line %d. %q was suppose to result in %q, but got %q instead", lineNum, test, expected, result) + } + } + } +} + +func TestGetEnv(t *testing.T) { + sw := &shellWord{envs: nil} + + sw.envs = []string{} + if sw.getEnv("foo") != "" { + t.Fatal("2 - 'foo' should map to ''") + } + + sw.envs = []string{"foo"} + if sw.getEnv("foo") != "" { + t.Fatal("3 - 'foo' should map to ''") + } + + sw.envs = []string{"foo="} + if sw.getEnv("foo") != "" { + t.Fatal("4 - 'foo' should map to ''") + } + + sw.envs = []string{"foo=bar"} + if sw.getEnv("foo") != "bar" { + t.Fatal("5 - 'foo' should map to 'bar'") + } + + sw.envs = []string{"foo=bar", "car=hat"} + if sw.getEnv("foo") != "bar" { + t.Fatal("6 - 'foo' should map to 'bar'") + } + if sw.getEnv("car") != "hat" { + t.Fatal("7 - 'car' should map to 'hat'") + } + + // Make sure we grab the first 'car' in the list + sw.envs = []string{"foo=bar", "car=hat", "car=bike"} + if sw.getEnv("car") != "hat" { + t.Fatal("8 - 'car' should map to 'hat'") + } +} diff --git a/vendor/github.com/moby/moby/builder/dockerfile/support.go b/vendor/github.com/moby/moby/builder/dockerfile/support.go new file mode 100644 index 000000000..e87588910 --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/support.go @@ -0,0 +1,19 @@ +package dockerfile + +import "strings" + +// handleJSONArgs parses command passed to CMD, ENTRYPOINT, RUN and SHELL instruction in Dockerfile +// for exec form it returns untouched args slice +// for shell form it returns concatenated args as the first element of a slice +func handleJSONArgs(args []string, attributes map[string]bool) []string { + if len(args) == 0 { + return []string{} + } + + if attributes != nil && attributes["json"] { + return args + } + + // literal string command, not an exec array + return []string{strings.Join(args, " ")} +} diff --git a/vendor/github.com/moby/moby/builder/dockerfile/support_test.go b/vendor/github.com/moby/moby/builder/dockerfile/support_test.go new file mode 100644 index 000000000..7cc6fe9dc --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/support_test.go @@ -0,0 +1,65 @@ +package dockerfile + +import "testing" + +type testCase struct { + name string + args []string + attributes map[string]bool + expected []string +} + +func initTestCases() []testCase { + testCases := []testCase{} + + testCases = append(testCases, testCase{ + name: "empty args", + args: []string{}, + attributes: make(map[string]bool), + expected: []string{}, + }) + + jsonAttributes := make(map[string]bool) + jsonAttributes["json"] = true + + testCases = append(testCases, testCase{ + name: "json attribute with one element", + args: []string{"foo"}, + attributes: jsonAttributes, + expected: []string{"foo"}, + }) + + testCases = append(testCases, testCase{ + name: "json attribute with two elements", + args: []string{"foo", "bar"}, + attributes: jsonAttributes, + expected: []string{"foo", "bar"}, + }) + + testCases = append(testCases, testCase{ + name: "no attributes", + args: []string{"foo", "bar"}, + attributes: nil, + expected: []string{"foo bar"}, + }) + + return testCases +} + +func TestHandleJSONArgs(t *testing.T) { + testCases := initTestCases() + + for _, test := range testCases { + arguments := handleJSONArgs(test.args, test.attributes) + + if len(arguments) != len(test.expected) { + t.Fatalf("In test \"%s\": length of returned slice is incorrect. Expected: %d, got: %d", test.name, len(test.expected), len(arguments)) + } + + for i := range test.expected { + if arguments[i] != test.expected[i] { + t.Fatalf("In test \"%s\": element as position %d is incorrect. Expected: %s, got: %s", test.name, i, test.expected[i], arguments[i]) + } + } + } +} diff --git a/vendor/github.com/moby/moby/builder/dockerfile/utils_test.go b/vendor/github.com/moby/moby/builder/dockerfile/utils_test.go new file mode 100644 index 000000000..80a3f1bab --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/utils_test.go @@ -0,0 +1,50 @@ +package dockerfile + +import ( + "io/ioutil" + "os" + "path/filepath" + "testing" +) + +// createTestTempDir creates a temporary directory for testing. +// It returns the created path and a cleanup function which is meant to be used as deferred call. +// When an error occurs, it terminates the test. +func createTestTempDir(t *testing.T, dir, prefix string) (string, func()) { + path, err := ioutil.TempDir(dir, prefix) + + if err != nil { + t.Fatalf("Error when creating directory %s with prefix %s: %s", dir, prefix, err) + } + + return path, func() { + err = os.RemoveAll(path) + + if err != nil { + t.Fatalf("Error when removing directory %s: %s", path, err) + } + } +} + +// createTestTempFile creates a temporary file within dir with specific contents and permissions. +// When an error occurs, it terminates the test +func createTestTempFile(t *testing.T, dir, filename, contents string, perm os.FileMode) string { + filePath := filepath.Join(dir, filename) + err := ioutil.WriteFile(filePath, []byte(contents), perm) + + if err != nil { + t.Fatalf("Error when creating %s file: %s", filename, err) + } + + return filePath +} + +// createTestSymlink creates a symlink file within dir which points to oldname +func createTestSymlink(t *testing.T, dir, filename, oldname string) string { + filePath := filepath.Join(dir, filename) + if err := os.Symlink(oldname, filePath); err != nil { + t.Fatalf("Error when creating %s symlink to %s: %s", filename, oldname, err) + } + + return filePath +} diff --git a/vendor/github.com/moby/moby/builder/dockerfile/wordsTest b/vendor/github.com/moby/moby/builder/dockerfile/wordsTest new file mode 100644 index 000000000..1fd9f1943 --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerfile/wordsTest @@ -0,0 +1,30 @@ +hello | hello +hello${hi}bye | hellobye +ENV hi=hi +hello${hi}bye | hellohibye +ENV space=abc def +hello${space}bye | helloabc,defbye +hello"${space}"bye | helloabc defbye +hello "${space}"bye | hello,abc defbye +ENV leading= ab c +hello${leading}def | hello,ab,cdef +hello"${leading}" def | hello ab c,def +hello"${leading}" | hello ab c +hello${leading} | hello,ab,c +# next line MUST have 3 trailing spaces, don't erase them! +ENV trailing=ab c +hello${trailing} | helloab,c +hello${trailing}d | helloab,c,d +hello"${trailing}"d | helloab c d +# next line MUST have 3 trailing spaces, don't erase them! +hel"lo${trailing}" | helloab c +hello" there " | hello there +hello there | hello,there +hello\ there | hello there +hello" there | error +hello\" there | hello",there +hello"\\there" | hello\there +hello"\there" | hello\there +hello'\\there' | hello\\there +hello'\there' | hello\there +hello'$there' | hello$there diff --git a/vendor/github.com/moby/moby/builder/dockerignore/dockerignore.go b/vendor/github.com/moby/moby/builder/dockerignore/dockerignore.go new file mode 100644 index 000000000..cc2238133 --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerignore/dockerignore.go @@ -0,0 +1,64 @@ +package dockerignore + +import ( + "bufio" + "bytes" + "fmt" + "io" + "path/filepath" + "strings" +) + +// ReadAll reads a .dockerignore file and returns the list of file patterns +// to ignore. Note this will trim whitespace from each line as well +// as use GO's "clean" func to get the shortest/cleanest path for each. +func ReadAll(reader io.Reader) ([]string, error) { + if reader == nil { + return nil, nil + } + + scanner := bufio.NewScanner(reader) + var excludes []string + currentLine := 0 + + utf8bom := []byte{0xEF, 0xBB, 0xBF} + for scanner.Scan() { + scannedBytes := scanner.Bytes() + // We trim UTF8 BOM + if currentLine == 0 { + scannedBytes = bytes.TrimPrefix(scannedBytes, utf8bom) + } + pattern := string(scannedBytes) + currentLine++ + // Lines starting with # (comments) are ignored before processing + if strings.HasPrefix(pattern, "#") { + continue + } + pattern = strings.TrimSpace(pattern) + if pattern == "" { + continue + } + // normalize absolute paths to paths relative to the context + // (taking care of '!' prefix) + invert := pattern[0] == '!' + if invert { + pattern = strings.TrimSpace(pattern[1:]) + } + if len(pattern) > 0 { + pattern = filepath.Clean(pattern) + pattern = filepath.ToSlash(pattern) + if len(pattern) > 1 && pattern[0] == '/' { + pattern = pattern[1:] + } + } + if invert { + pattern = "!" + pattern + } + + excludes = append(excludes, pattern) + } + if err := scanner.Err(); err != nil { + return nil, fmt.Errorf("Error reading .dockerignore: %v", err) + } + return excludes, nil +} diff --git a/vendor/github.com/moby/moby/builder/dockerignore/dockerignore_test.go b/vendor/github.com/moby/moby/builder/dockerignore/dockerignore_test.go new file mode 100644 index 000000000..bda38745c --- /dev/null +++ b/vendor/github.com/moby/moby/builder/dockerignore/dockerignore_test.go @@ -0,0 +1,69 @@ +package dockerignore + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "testing" +) + +func TestReadAll(t *testing.T) { + tmpDir, err := ioutil.TempDir("", "dockerignore-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpDir) + + di, err := ReadAll(nil) + if err != nil { + t.Fatalf("Expected not to have error, got %v", err) + } + + if diLen := len(di); diLen != 0 { + t.Fatalf("Expected to have zero dockerignore entry, got %d", diLen) + } + + diName := filepath.Join(tmpDir, ".dockerignore") + content := fmt.Sprintf("test1\n/test2\n/a/file/here\n\nlastfile\n# this is a comment\n! /inverted/abs/path\n!\n! \n") + err = ioutil.WriteFile(diName, []byte(content), 0777) + if err != nil { + t.Fatal(err) + } + + diFd, err := os.Open(diName) + if err != nil { + t.Fatal(err) + } + defer diFd.Close() + + di, err = ReadAll(diFd) + if err != nil { + t.Fatal(err) + } + + if len(di) != 7 { + t.Fatalf("Expected 5 entries, got %v", len(di)) + } + if di[0] != "test1" { + t.Fatal("First element is not test1") + } + if di[1] != "test2" { // according to https://docs.docker.com/engine/reference/builder/#dockerignore-file, /foo/bar should be treated as foo/bar + t.Fatal("Second element is not test2") + } + if di[2] != "a/file/here" { // according to https://docs.docker.com/engine/reference/builder/#dockerignore-file, /foo/bar should be treated as foo/bar + t.Fatal("Third element is not a/file/here") + } + if di[3] != "lastfile" { + t.Fatal("Fourth element is not lastfile") + } + if di[4] != "!inverted/abs/path" { + t.Fatal("Fifth element is not !inverted/abs/path") + } + if di[5] != "!" { + t.Fatalf("Sixth element is not !, but %s", di[5]) + } + if di[6] != "!" { + t.Fatalf("Sixth element is not !, but %s", di[6]) + } +} diff --git a/vendor/github.com/moby/moby/builder/fscache/fscache.go b/vendor/github.com/moby/moby/builder/fscache/fscache.go new file mode 100644 index 000000000..63331091a --- /dev/null +++ b/vendor/github.com/moby/moby/builder/fscache/fscache.go @@ -0,0 +1,609 @@ +package fscache + +import ( + "encoding/json" + "os" + "path/filepath" + "sort" + "sync" + "time" + + "github.com/Sirupsen/logrus" + "github.com/boltdb/bolt" + "github.com/docker/docker/builder" + "github.com/docker/docker/builder/remotecontext" + "github.com/docker/docker/client/session/filesync" + "github.com/docker/docker/pkg/directory" + "github.com/docker/docker/pkg/stringid" + "github.com/pkg/errors" + "github.com/tonistiigi/fsutil" + "golang.org/x/net/context" + "golang.org/x/sync/singleflight" +) + +const dbFile = "fscache.db" +const cacheKey = "cache" +const metaKey = "meta" + +// Backend is a backing implementation for FSCache +type Backend interface { + Get(id string) (string, error) + Remove(id string) error +} + +// FSCache allows syncing remote resources to cached snapshots +type FSCache struct { + opt Opt + transports map[string]Transport + mu sync.Mutex + g singleflight.Group + store *fsCacheStore +} + +// Opt defines options for initializing FSCache +type Opt struct { + Backend Backend + Root string // for storing local metadata + GCPolicy GCPolicy +} + +// GCPolicy defines policy for garbage collection +type GCPolicy struct { + MaxSize uint64 + MaxKeepDuration time.Duration +} + +// NewFSCache returns new FSCache object +func NewFSCache(opt Opt) (*FSCache, error) { + store, err := newFSCacheStore(opt) + if err != nil { + return nil, err + } + return &FSCache{ + store: store, + opt: opt, + transports: make(map[string]Transport), + }, nil +} + +// Transport defines a method for syncing remote data to FSCache +type Transport interface { + Copy(ctx context.Context, id RemoteIdentifier, dest string, cs filesync.CacheUpdater) error +} + +// RemoteIdentifier identifies a transfer request +type RemoteIdentifier interface { + Key() string + SharedKey() string + Transport() string +} + +// RegisterTransport registers a new transport method +func (fsc *FSCache) RegisterTransport(id string, transport Transport) error { + fsc.mu.Lock() + defer fsc.mu.Unlock() + if _, ok := fsc.transports[id]; ok { + return errors.Errorf("transport %v already exists", id) + } + fsc.transports[id] = transport + return nil +} + +// SyncFrom returns a source based on a remote identifier +func (fsc *FSCache) SyncFrom(ctx context.Context, id RemoteIdentifier) (builder.Source, error) { // cacheOpt + trasportID := id.Transport() + fsc.mu.Lock() + transport, ok := fsc.transports[id.Transport()] + if !ok { + fsc.mu.Unlock() + return nil, errors.Errorf("invalid transport %s", trasportID) + } + + logrus.Debugf("SyncFrom %s %s", id.Key(), id.SharedKey()) + fsc.mu.Unlock() + sourceRef, err, _ := fsc.g.Do(id.Key(), func() (interface{}, error) { + var sourceRef *cachedSourceRef + sourceRef, err := fsc.store.Get(id.Key()) + if err == nil { + return sourceRef, nil + } + + // check for unused shared cache + sharedKey := id.SharedKey() + if sharedKey != "" { + r, err := fsc.store.Rebase(sharedKey, id.Key()) + if err == nil { + sourceRef = r + } + } + + if sourceRef == nil { + var err error + sourceRef, err = fsc.store.New(id.Key(), sharedKey) + if err != nil { + return nil, errors.Wrap(err, "failed to create remote context") + } + } + + if err := syncFrom(ctx, sourceRef, transport, id); err != nil { + sourceRef.Release() + return nil, err + } + if err := sourceRef.resetSize(-1); err != nil { + return nil, err + } + return sourceRef, nil + }) + if err != nil { + return nil, err + } + ref := sourceRef.(*cachedSourceRef) + if ref.src == nil { // failsafe + return nil, errors.Errorf("invalid empty pull") + } + wc := &wrappedContext{Source: ref.src, closer: func() error { + ref.Release() + return nil + }} + return wc, nil +} + +// DiskUsage reports how much data is allocated by the cache +func (fsc *FSCache) DiskUsage() (int64, error) { + return fsc.store.DiskUsage() +} + +// Prune allows manually cleaning up the cache +func (fsc *FSCache) Prune(ctx context.Context) (uint64, error) { + return fsc.store.Prune(ctx) +} + +// Close stops the gc and closes the persistent db +func (fsc *FSCache) Close() error { + return fsc.store.Close() +} + +func syncFrom(ctx context.Context, cs *cachedSourceRef, transport Transport, id RemoteIdentifier) (retErr error) { + src := cs.src + if src == nil { + src = remotecontext.NewCachableSource(cs.Dir()) + } + + if !cs.cached { + if err := cs.storage.db.View(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte(id.Key())) + dt := b.Get([]byte(cacheKey)) + if dt != nil { + if err := src.UnmarshalBinary(dt); err != nil { + return err + } + } else { + return errors.Wrap(src.Scan(), "failed to scan cache records") + } + return nil + }); err != nil { + return err + } + } + + dc := &detectChanges{f: src.HandleChange} + + // todo: probably send a bucket to `Copy` and let it return source + // but need to make sure that tx is safe + if err := transport.Copy(ctx, id, cs.Dir(), dc); err != nil { + return errors.Wrapf(err, "failed to copy to %s", cs.Dir()) + } + + if !dc.supported { + if err := src.Scan(); err != nil { + return errors.Wrap(err, "failed to scan cache records after transfer") + } + } + cs.cached = true + cs.src = src + return cs.storage.db.Update(func(tx *bolt.Tx) error { + dt, err := src.MarshalBinary() + if err != nil { + return err + } + b := tx.Bucket([]byte(id.Key())) + return b.Put([]byte(cacheKey), dt) + }) +} + +type fsCacheStore struct { + root string + mu sync.Mutex + sources map[string]*cachedSource + db *bolt.DB + fs Backend + gcTimer *time.Timer + gcPolicy GCPolicy +} + +// CachePolicy defines policy for keeping a resource in cache +type CachePolicy struct { + Priority int + LastUsed time.Time +} + +func defaultCachePolicy() CachePolicy { + return CachePolicy{Priority: 10, LastUsed: time.Now()} +} + +func newFSCacheStore(opt Opt) (*fsCacheStore, error) { + if err := os.MkdirAll(opt.Root, 0700); err != nil { + return nil, err + } + p := filepath.Join(opt.Root, dbFile) + db, err := bolt.Open(p, 0600, nil) + if err != nil { + return nil, errors.Wrap(err, "failed to open database file %s") + } + s := &fsCacheStore{db: db, sources: make(map[string]*cachedSource), fs: opt.Backend, gcPolicy: opt.GCPolicy} + db.View(func(tx *bolt.Tx) error { + return tx.ForEach(func(name []byte, b *bolt.Bucket) error { + dt := b.Get([]byte(metaKey)) + if dt == nil { + return nil + } + var sm sourceMeta + if err := json.Unmarshal(dt, &sm); err != nil { + return err + } + dir, err := s.fs.Get(sm.BackendID) + if err != nil { + return err // TODO: handle gracefully + } + source := &cachedSource{ + refs: make(map[*cachedSourceRef]struct{}), + id: string(name), + dir: dir, + sourceMeta: sm, + storage: s, + } + s.sources[string(name)] = source + return nil + }) + }) + + s.gcTimer = s.startPeriodicGC(5 * time.Minute) + return s, nil +} + +func (s *fsCacheStore) startPeriodicGC(interval time.Duration) *time.Timer { + var t *time.Timer + t = time.AfterFunc(interval, func() { + if err := s.GC(); err != nil { + logrus.Errorf("build gc error: %v", err) + } + t.Reset(interval) + }) + return t +} + +func (s *fsCacheStore) Close() error { + s.gcTimer.Stop() + return s.db.Close() +} + +func (s *fsCacheStore) New(id, sharedKey string) (*cachedSourceRef, error) { + s.mu.Lock() + defer s.mu.Unlock() + var ret *cachedSource + if err := s.db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucket([]byte(id)) + if err != nil { + return err + } + backendID := stringid.GenerateRandomID() + dir, err := s.fs.Get(backendID) + if err != nil { + return err + } + source := &cachedSource{ + refs: make(map[*cachedSourceRef]struct{}), + id: id, + dir: dir, + sourceMeta: sourceMeta{ + BackendID: backendID, + SharedKey: sharedKey, + CachePolicy: defaultCachePolicy(), + }, + storage: s, + } + dt, err := json.Marshal(source.sourceMeta) + if err != nil { + return err + } + if err := b.Put([]byte(metaKey), dt); err != nil { + return err + } + s.sources[id] = source + ret = source + return nil + }); err != nil { + return nil, err + } + return ret.getRef(), nil +} + +func (s *fsCacheStore) Rebase(sharedKey, newid string) (*cachedSourceRef, error) { + s.mu.Lock() + defer s.mu.Unlock() + var ret *cachedSource + for id, snap := range s.sources { + if snap.SharedKey == sharedKey && len(snap.refs) == 0 { + if err := s.db.Update(func(tx *bolt.Tx) error { + if err := tx.DeleteBucket([]byte(id)); err != nil { + return err + } + b, err := tx.CreateBucket([]byte(newid)) + if err != nil { + return err + } + snap.id = newid + snap.CachePolicy = defaultCachePolicy() + dt, err := json.Marshal(snap.sourceMeta) + if err != nil { + return err + } + if err := b.Put([]byte(metaKey), dt); err != nil { + return err + } + delete(s.sources, id) + s.sources[newid] = snap + return nil + }); err != nil { + return nil, err + } + ret = snap + break + } + } + if ret == nil { + return nil, errors.Errorf("no candidate for rebase") + } + return ret.getRef(), nil +} + +func (s *fsCacheStore) Get(id string) (*cachedSourceRef, error) { + s.mu.Lock() + defer s.mu.Unlock() + src, ok := s.sources[id] + if !ok { + return nil, errors.Errorf("not found") + } + return src.getRef(), nil +} + +// DiskUsage reports how much data is allocated by the cache +func (s *fsCacheStore) DiskUsage() (int64, error) { + s.mu.Lock() + defer s.mu.Unlock() + var size int64 + + for _, snap := range s.sources { + if len(snap.refs) == 0 { + ss, err := snap.getSize() + if err != nil { + return 0, err + } + size += ss + } + } + return size, nil +} + +// Prune allows manually cleaning up the cache +func (s *fsCacheStore) Prune(ctx context.Context) (uint64, error) { + s.mu.Lock() + defer s.mu.Unlock() + var size uint64 + + for id, snap := range s.sources { + select { + case <-ctx.Done(): + logrus.Debugf("Cache prune operation cancelled, pruned size: %d", size) + // when the context is cancelled, only return current size and nil + return size, nil + default: + } + if len(snap.refs) == 0 { + ss, err := snap.getSize() + if err != nil { + return size, err + } + if err := s.delete(id); err != nil { + return size, errors.Wrapf(err, "failed to delete %s", id) + } + size += uint64(ss) + } + } + return size, nil +} + +// GC runs a garbage collector on FSCache +func (s *fsCacheStore) GC() error { + s.mu.Lock() + defer s.mu.Unlock() + var size uint64 + + cutoff := time.Now().Add(-s.gcPolicy.MaxKeepDuration) + var blacklist []*cachedSource + + for id, snap := range s.sources { + if len(snap.refs) == 0 { + if cutoff.After(snap.CachePolicy.LastUsed) { + if err := s.delete(id); err != nil { + return errors.Wrapf(err, "failed to delete %s", id) + } + } else { + ss, err := snap.getSize() + if err != nil { + return err + } + size += uint64(ss) + blacklist = append(blacklist, snap) + } + } + } + + sort.Sort(sortableCacheSources(blacklist)) + for _, snap := range blacklist { + if size <= s.gcPolicy.MaxSize { + break + } + ss, err := snap.getSize() + if err != nil { + return err + } + if err := s.delete(snap.id); err != nil { + return errors.Wrapf(err, "failed to delete %s", snap.id) + } + size -= uint64(ss) + } + return nil +} + +// keep mu while calling this +func (s *fsCacheStore) delete(id string) error { + src, ok := s.sources[id] + if !ok { + return nil + } + if len(src.refs) > 0 { + return errors.Errorf("can't delete %s because it has active references", id) + } + delete(s.sources, id) + if err := s.db.Update(func(tx *bolt.Tx) error { + return tx.DeleteBucket([]byte(id)) + }); err != nil { + return err + } + if err := s.fs.Remove(src.BackendID); err != nil { + return err + } + return nil +} + +type sourceMeta struct { + SharedKey string + BackendID string + CachePolicy CachePolicy + Size int64 +} + +type cachedSource struct { + sourceMeta + refs map[*cachedSourceRef]struct{} + id string + dir string + src *remotecontext.CachableSource + storage *fsCacheStore + cached bool // keep track if cache is up to date +} + +type cachedSourceRef struct { + *cachedSource +} + +func (cs *cachedSource) Dir() string { + return cs.dir +} + +// hold storage lock before calling +func (cs *cachedSource) getRef() *cachedSourceRef { + ref := &cachedSourceRef{cachedSource: cs} + cs.refs[ref] = struct{}{} + return ref +} + +// hold storage lock before calling +func (cs *cachedSource) getSize() (int64, error) { + if cs.sourceMeta.Size < 0 { + ss, err := directory.Size(cs.dir) + if err != nil { + return 0, err + } + if err := cs.resetSize(ss); err != nil { + return 0, err + } + return ss, nil + } + return cs.sourceMeta.Size, nil +} + +func (cs *cachedSource) resetSize(val int64) error { + cs.sourceMeta.Size = val + return cs.saveMeta() +} +func (cs *cachedSource) saveMeta() error { + return cs.storage.db.Update(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte(cs.id)) + dt, err := json.Marshal(cs.sourceMeta) + if err != nil { + return err + } + return b.Put([]byte(metaKey), dt) + }) +} + +func (csr *cachedSourceRef) Release() error { + csr.cachedSource.storage.mu.Lock() + defer csr.cachedSource.storage.mu.Unlock() + delete(csr.cachedSource.refs, csr) + if len(csr.cachedSource.refs) == 0 { + go csr.cachedSource.storage.GC() + } + return nil +} + +type detectChanges struct { + f fsutil.ChangeFunc + supported bool +} + +func (dc *detectChanges) HandleChange(kind fsutil.ChangeKind, path string, fi os.FileInfo, err error) error { + if dc == nil { + return nil + } + return dc.f(kind, path, fi, err) +} + +func (dc *detectChanges) MarkSupported(v bool) { + if dc == nil { + return + } + dc.supported = v +} + +type wrappedContext struct { + builder.Source + closer func() error +} + +func (wc *wrappedContext) Close() error { + if err := wc.Source.Close(); err != nil { + return err + } + return wc.closer() +} + +type sortableCacheSources []*cachedSource + +// Len is the number of elements in the collection. +func (s sortableCacheSources) Len() int { + return len(s) +} + +// Less reports whether the element with +// index i should sort before the element with index j. +func (s sortableCacheSources) Less(i, j int) bool { + return s[i].CachePolicy.LastUsed.Before(s[j].CachePolicy.LastUsed) +} + +// Swap swaps the elements with indexes i and j. +func (s sortableCacheSources) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} diff --git a/vendor/github.com/moby/moby/builder/fscache/fscache_test.go b/vendor/github.com/moby/moby/builder/fscache/fscache_test.go new file mode 100644 index 000000000..2532a218c --- /dev/null +++ b/vendor/github.com/moby/moby/builder/fscache/fscache_test.go @@ -0,0 +1,131 @@ +package fscache + +import ( + "io/ioutil" + "os" + "path/filepath" + "testing" + "time" + + "github.com/docker/docker/client/session/filesync" + "github.com/stretchr/testify/assert" + "golang.org/x/net/context" +) + +func TestFSCache(t *testing.T) { + tmpDir, err := ioutil.TempDir("", "fscache") + assert.Nil(t, err) + defer os.RemoveAll(tmpDir) + + backend := NewNaiveCacheBackend(filepath.Join(tmpDir, "backend")) + + opt := Opt{ + Root: tmpDir, + Backend: backend, + GCPolicy: GCPolicy{MaxSize: 15, MaxKeepDuration: time.Hour}, + } + + fscache, err := NewFSCache(opt) + assert.Nil(t, err) + + defer fscache.Close() + + err = fscache.RegisterTransport("test", &testTransport{}) + assert.Nil(t, err) + + src1, err := fscache.SyncFrom(context.TODO(), &testIdentifier{"foo", "data", "bar"}) + assert.Nil(t, err) + + dt, err := ioutil.ReadFile(filepath.Join(src1.Root(), "foo")) + assert.Nil(t, err) + assert.Equal(t, string(dt), "data") + + // same id doesn't recalculate anything + src2, err := fscache.SyncFrom(context.TODO(), &testIdentifier{"foo", "data2", "bar"}) + assert.Nil(t, err) + assert.Equal(t, src1.Root(), src2.Root()) + + dt, err = ioutil.ReadFile(filepath.Join(src1.Root(), "foo")) + assert.Nil(t, err) + assert.Equal(t, string(dt), "data") + assert.Nil(t, src2.Close()) + + src3, err := fscache.SyncFrom(context.TODO(), &testIdentifier{"foo2", "data2", "bar"}) + assert.Nil(t, err) + assert.NotEqual(t, src1.Root(), src3.Root()) + + dt, err = ioutil.ReadFile(filepath.Join(src3.Root(), "foo2")) + assert.Nil(t, err) + assert.Equal(t, string(dt), "data2") + + s, err := fscache.DiskUsage() + assert.Nil(t, err) + assert.Equal(t, s, int64(0)) + + assert.Nil(t, src3.Close()) + + s, err = fscache.DiskUsage() + assert.Nil(t, err) + assert.Equal(t, s, int64(5)) + + // new upload with the same shared key shoutl overwrite + src4, err := fscache.SyncFrom(context.TODO(), &testIdentifier{"foo3", "data3", "bar"}) + assert.Nil(t, err) + assert.NotEqual(t, src1.Root(), src3.Root()) + + dt, err = ioutil.ReadFile(filepath.Join(src3.Root(), "foo3")) + assert.Nil(t, err) + assert.Equal(t, string(dt), "data3") + assert.Equal(t, src4.Root(), src3.Root()) + assert.Nil(t, src4.Close()) + + s, err = fscache.DiskUsage() + assert.Nil(t, err) + assert.Equal(t, s, int64(10)) + + // this one goes over the GC limit + src5, err := fscache.SyncFrom(context.TODO(), &testIdentifier{"foo4", "datadata", "baz"}) + assert.Nil(t, err) + assert.Nil(t, src5.Close()) + + // GC happens async + time.Sleep(100 * time.Millisecond) + + // only last insertion after GC + s, err = fscache.DiskUsage() + assert.Nil(t, err) + assert.Equal(t, s, int64(8)) + + // prune deletes everything + released, err := fscache.Prune(context.TODO()) + assert.Nil(t, err) + assert.Equal(t, released, uint64(8)) + + s, err = fscache.DiskUsage() + assert.Nil(t, err) + assert.Equal(t, s, int64(0)) +} + +type testTransport struct { +} + +func (t *testTransport) Copy(ctx context.Context, id RemoteIdentifier, dest string, cs filesync.CacheUpdater) error { + testid := id.(*testIdentifier) + return ioutil.WriteFile(filepath.Join(dest, testid.filename), []byte(testid.data), 0600) +} + +type testIdentifier struct { + filename string + data string + sharedKey string +} + +func (t *testIdentifier) Key() string { + return t.filename +} +func (t *testIdentifier) SharedKey() string { + return t.sharedKey +} +func (t *testIdentifier) Transport() string { + return "test" +} diff --git a/vendor/github.com/moby/moby/builder/fscache/naivedriver.go b/vendor/github.com/moby/moby/builder/fscache/naivedriver.go new file mode 100644 index 000000000..f40ee570f --- /dev/null +++ b/vendor/github.com/moby/moby/builder/fscache/naivedriver.go @@ -0,0 +1,28 @@ +package fscache + +import ( + "os" + "path/filepath" + + "github.com/pkg/errors" +) + +// NewNaiveCacheBackend is a basic backend implementation for fscache +func NewNaiveCacheBackend(root string) Backend { + return &naiveCacheBackend{root: root} +} + +type naiveCacheBackend struct { + root string +} + +func (tcb *naiveCacheBackend) Get(id string) (string, error) { + d := filepath.Join(tcb.root, id) + if err := os.MkdirAll(d, 0700); err != nil { + return "", errors.Wrapf(err, "failed to create tmp dir for %s", d) + } + return d, nil +} +func (tcb *naiveCacheBackend) Remove(id string) error { + return errors.WithStack(os.RemoveAll(filepath.Join(tcb.root, id))) +} diff --git a/vendor/github.com/moby/moby/builder/remotecontext/archive.go b/vendor/github.com/moby/moby/builder/remotecontext/archive.go new file mode 100644 index 000000000..f48cafecd --- /dev/null +++ b/vendor/github.com/moby/moby/builder/remotecontext/archive.go @@ -0,0 +1,128 @@ +package remotecontext + +import ( + "io" + "os" + "path/filepath" + + "github.com/docker/docker/builder" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/chrootarchive" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/symlink" + "github.com/docker/docker/pkg/tarsum" + "github.com/pkg/errors" +) + +type archiveContext struct { + root string + sums tarsum.FileInfoSums +} + +func (c *archiveContext) Close() error { + return os.RemoveAll(c.root) +} + +func convertPathError(err error, cleanpath string) error { + if err, ok := err.(*os.PathError); ok { + err.Path = cleanpath + return err + } + return err +} + +type modifiableContext interface { + builder.Source + // Remove deletes the entry specified by `path`. + // It is usual for directory entries to delete all its subentries. + Remove(path string) error +} + +// FromArchive returns a build source from a tar stream. +// +// It extracts the tar stream to a temporary folder that is deleted as soon as +// the Context is closed. +// As the extraction happens, a tarsum is calculated for every file, and the set of +// all those sums then becomes the source of truth for all operations on this Context. +// +// Closing tarStream has to be done by the caller. +func FromArchive(tarStream io.Reader) (builder.Source, error) { + root, err := ioutils.TempDir("", "docker-builder") + if err != nil { + return nil, err + } + + tsc := &archiveContext{root: root} + + // Make sure we clean-up upon error. In the happy case the caller + // is expected to manage the clean-up + defer func() { + if err != nil { + tsc.Close() + } + }() + + decompressedStream, err := archive.DecompressStream(tarStream) + if err != nil { + return nil, err + } + + sum, err := tarsum.NewTarSum(decompressedStream, true, tarsum.Version1) + if err != nil { + return nil, err + } + + err = chrootarchive.Untar(sum, root, nil) + if err != nil { + return nil, err + } + + tsc.sums = sum.GetSums() + + return tsc, nil +} + +func (c *archiveContext) Root() string { + return c.root +} + +func (c *archiveContext) Remove(path string) error { + _, fullpath, err := normalize(path, c.root) + if err != nil { + return err + } + return os.RemoveAll(fullpath) +} + +func (c *archiveContext) Hash(path string) (string, error) { + cleanpath, fullpath, err := normalize(path, c.root) + if err != nil { + return "", err + } + + rel, err := filepath.Rel(c.root, fullpath) + if err != nil { + return "", convertPathError(err, cleanpath) + } + + // Use the checksum of the followed path(not the possible symlink) because + // this is the file that is actually copied. + if tsInfo := c.sums.GetFile(filepath.ToSlash(rel)); tsInfo != nil { + return tsInfo.Sum(), nil + } + // We set sum to path by default for the case where GetFile returns nil. + // The usual case is if relative path is empty. + return path, nil // backwards compat TODO: see if really needed +} + +func normalize(path, root string) (cleanPath, fullPath string, err error) { + cleanPath = filepath.Clean(string(os.PathSeparator) + path)[1:] + fullPath, err = symlink.FollowSymlinkInScope(filepath.Join(root, path), root) + if err != nil { + return "", "", errors.Wrapf(err, "forbidden path outside the build context: %s (%s)", path, cleanPath) + } + if _, err := os.Lstat(fullPath); err != nil { + return "", "", errors.WithStack(convertPathError(err, path)) + } + return +} diff --git a/vendor/github.com/moby/moby/builder/remotecontext/detect.go b/vendor/github.com/moby/moby/builder/remotecontext/detect.go new file mode 100644 index 000000000..434573680 --- /dev/null +++ b/vendor/github.com/moby/moby/builder/remotecontext/detect.go @@ -0,0 +1,184 @@ +package remotecontext + +import ( + "bufio" + "fmt" + "io" + "os" + "path/filepath" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/builder" + "github.com/docker/docker/builder/dockerfile/parser" + "github.com/docker/docker/builder/dockerignore" + "github.com/docker/docker/pkg/fileutils" + "github.com/docker/docker/pkg/symlink" + "github.com/docker/docker/pkg/urlutil" + "github.com/pkg/errors" +) + +// ClientSessionRemote is identifier for client-session context transport +const ClientSessionRemote = "client-session" + +// Detect returns a context and dockerfile from remote location or local +// archive. progressReader is only used if remoteURL is actually a URL +// (not empty, and not a Git endpoint). +func Detect(config backend.BuildConfig) (remote builder.Source, dockerfile *parser.Result, err error) { + remoteURL := config.Options.RemoteContext + dockerfilePath := config.Options.Dockerfile + + switch { + case remoteURL == "": + remote, dockerfile, err = newArchiveRemote(config.Source, dockerfilePath) + case remoteURL == ClientSessionRemote: + res, err := parser.Parse(config.Source) + if err != nil { + return nil, nil, err + } + return nil, res, nil + case urlutil.IsGitURL(remoteURL): + remote, dockerfile, err = newGitRemote(remoteURL, dockerfilePath) + case urlutil.IsURL(remoteURL): + remote, dockerfile, err = newURLRemote(remoteURL, dockerfilePath, config.ProgressWriter.ProgressReaderFunc) + default: + err = fmt.Errorf("remoteURL (%s) could not be recognized as URL", remoteURL) + } + return +} + +func newArchiveRemote(rc io.ReadCloser, dockerfilePath string) (builder.Source, *parser.Result, error) { + defer rc.Close() + c, err := FromArchive(rc) + if err != nil { + return nil, nil, err + } + + return withDockerfileFromContext(c.(modifiableContext), dockerfilePath) +} + +func withDockerfileFromContext(c modifiableContext, dockerfilePath string) (builder.Source, *parser.Result, error) { + df, err := openAt(c, dockerfilePath) + if err != nil { + if os.IsNotExist(err) { + if dockerfilePath == builder.DefaultDockerfileName { + lowercase := strings.ToLower(dockerfilePath) + if _, err := StatAt(c, lowercase); err == nil { + return withDockerfileFromContext(c, lowercase) + } + } + return nil, nil, errors.Errorf("Cannot locate specified Dockerfile: %s", dockerfilePath) // backwards compatible error + } + c.Close() + return nil, nil, err + } + + res, err := readAndParseDockerfile(dockerfilePath, df) + if err != nil { + return nil, nil, err + } + + df.Close() + + if err := removeDockerfile(c, dockerfilePath); err != nil { + c.Close() + return nil, nil, err + } + + return c, res, nil +} + +func newGitRemote(gitURL string, dockerfilePath string) (builder.Source, *parser.Result, error) { + c, err := MakeGitContext(gitURL) // TODO: change this to NewLazySource + if err != nil { + return nil, nil, err + } + return withDockerfileFromContext(c.(modifiableContext), dockerfilePath) +} + +func newURLRemote(url string, dockerfilePath string, progressReader func(in io.ReadCloser) io.ReadCloser) (builder.Source, *parser.Result, error) { + var dockerfile io.ReadCloser + dockerfileFoundErr := errors.New("found-dockerfile") + c, err := MakeRemoteContext(url, map[string]func(io.ReadCloser) (io.ReadCloser, error){ + mimeTypes.TextPlain: func(rc io.ReadCloser) (io.ReadCloser, error) { + dockerfile = rc + return nil, dockerfileFoundErr + }, + // fallback handler (tar context) + "": func(rc io.ReadCloser) (io.ReadCloser, error) { + return progressReader(rc), nil + }, + }) + switch { + case err == dockerfileFoundErr: + res, err := parser.Parse(dockerfile) + return nil, res, err + case err != nil: + return nil, nil, err + } + return withDockerfileFromContext(c.(modifiableContext), dockerfilePath) +} + +func removeDockerfile(c modifiableContext, filesToRemove ...string) error { + f, err := openAt(c, ".dockerignore") + // Note that a missing .dockerignore file isn't treated as an error + switch { + case os.IsNotExist(err): + return nil + case err != nil: + return err + } + excludes, err := dockerignore.ReadAll(f) + if err != nil { + f.Close() + return err + } + f.Close() + filesToRemove = append([]string{".dockerignore"}, filesToRemove...) + for _, fileToRemove := range filesToRemove { + if rm, _ := fileutils.Matches(fileToRemove, excludes); rm { + if err := c.Remove(fileToRemove); err != nil { + logrus.Errorf("failed to remove %s: %v", fileToRemove, err) + } + } + } + return nil +} + +func readAndParseDockerfile(name string, rc io.Reader) (*parser.Result, error) { + br := bufio.NewReader(rc) + if _, err := br.Peek(1); err != nil { + if err == io.EOF { + return nil, errors.Errorf("the Dockerfile (%s) cannot be empty", name) + } + return nil, errors.Wrap(err, "unexpected error reading Dockerfile") + } + return parser.Parse(br) +} + +func openAt(remote builder.Source, path string) (*os.File, error) { + fullPath, err := FullPath(remote, path) + if err != nil { + return nil, err + } + return os.Open(fullPath) +} + +// StatAt is a helper for calling Stat on a path from a source +func StatAt(remote builder.Source, path string) (os.FileInfo, error) { + fullPath, err := FullPath(remote, path) + if err != nil { + return nil, err + } + return os.Stat(fullPath) +} + +// FullPath is a helper for getting a full path for a path from a source +func FullPath(remote builder.Source, path string) (string, error) { + fullPath, err := symlink.FollowSymlinkInScope(filepath.Join(remote.Root(), path), remote.Root()) + if err != nil { + return "", fmt.Errorf("Forbidden path outside the build context: %s (%s)", path, fullPath) // backwards compat with old error + } + return fullPath, nil +} diff --git a/vendor/github.com/moby/moby/builder/remotecontext/detect_test.go b/vendor/github.com/moby/moby/builder/remotecontext/detect_test.go new file mode 100644 index 000000000..6b47ac227 --- /dev/null +++ b/vendor/github.com/moby/moby/builder/remotecontext/detect_test.go @@ -0,0 +1,123 @@ +package remotecontext + +import ( + "errors" + "io/ioutil" + "log" + "os" + "path/filepath" + "sort" + "testing" + + "github.com/docker/docker/builder" +) + +const ( + dockerfileContents = "FROM busybox" + dockerignoreFilename = ".dockerignore" + testfileContents = "test" +) + +const shouldStayFilename = "should_stay" + +func extractFilenames(files []os.FileInfo) []string { + filenames := make([]string, len(files), len(files)) + + for i, file := range files { + filenames[i] = file.Name() + } + + return filenames +} + +func checkDirectory(t *testing.T, dir string, expectedFiles []string) { + files, err := ioutil.ReadDir(dir) + + if err != nil { + t.Fatalf("Could not read directory: %s", err) + } + + if len(files) != len(expectedFiles) { + log.Fatalf("Directory should contain exactly %d file(s), got %d", len(expectedFiles), len(files)) + } + + filenames := extractFilenames(files) + sort.Strings(filenames) + sort.Strings(expectedFiles) + + for i, filename := range filenames { + if filename != expectedFiles[i] { + t.Fatalf("File %s should be in the directory, got: %s", expectedFiles[i], filename) + } + } +} + +func executeProcess(t *testing.T, contextDir string) { + modifiableCtx := &stubRemote{root: contextDir} + + err := removeDockerfile(modifiableCtx, builder.DefaultDockerfileName) + + if err != nil { + t.Fatalf("Error when executing Process: %s", err) + } +} + +func TestProcessShouldRemoveDockerfileDockerignore(t *testing.T) { + contextDir, cleanup := createTestTempDir(t, "", "builder-dockerignore-process-test") + defer cleanup() + + createTestTempFile(t, contextDir, shouldStayFilename, testfileContents, 0777) + createTestTempFile(t, contextDir, dockerignoreFilename, "Dockerfile\n.dockerignore", 0777) + createTestTempFile(t, contextDir, builder.DefaultDockerfileName, dockerfileContents, 0777) + + executeProcess(t, contextDir) + + checkDirectory(t, contextDir, []string{shouldStayFilename}) + +} + +func TestProcessNoDockerignore(t *testing.T) { + contextDir, cleanup := createTestTempDir(t, "", "builder-dockerignore-process-test") + defer cleanup() + + createTestTempFile(t, contextDir, shouldStayFilename, testfileContents, 0777) + createTestTempFile(t, contextDir, builder.DefaultDockerfileName, dockerfileContents, 0777) + + executeProcess(t, contextDir) + + checkDirectory(t, contextDir, []string{shouldStayFilename, builder.DefaultDockerfileName}) + +} + +func TestProcessShouldLeaveAllFiles(t *testing.T) { + contextDir, cleanup := createTestTempDir(t, "", "builder-dockerignore-process-test") + defer cleanup() + + createTestTempFile(t, contextDir, shouldStayFilename, testfileContents, 0777) + createTestTempFile(t, contextDir, builder.DefaultDockerfileName, dockerfileContents, 0777) + createTestTempFile(t, contextDir, dockerignoreFilename, "input1\ninput2", 0777) + + executeProcess(t, contextDir) + + checkDirectory(t, contextDir, []string{shouldStayFilename, builder.DefaultDockerfileName, dockerignoreFilename}) + +} + +// TODO: remove after moving to a separate pkg +type stubRemote struct { + root string +} + +func (r *stubRemote) Hash(path string) (string, error) { + return "", errors.New("not implemented") +} + +func (r *stubRemote) Root() string { + return r.root +} +func (r *stubRemote) Close() error { + return errors.New("not implemented") +} +func (r *stubRemote) Remove(p string) error { + return os.Remove(filepath.Join(r.root, p)) +} diff --git a/vendor/github.com/moby/moby/builder/remotecontext/filehash.go b/vendor/github.com/moby/moby/builder/remotecontext/filehash.go new file mode 100644 index 000000000..417230297 --- /dev/null +++ b/vendor/github.com/moby/moby/builder/remotecontext/filehash.go @@ -0,0 +1,45 @@ +package remotecontext + +import ( + "archive/tar" + "crypto/sha256" + "hash" + "os" + + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/tarsum" +) + +// NewFileHash returns new hash that is used for the builder cache keys +func NewFileHash(path, name string, fi os.FileInfo) (hash.Hash, error) { + var link string + if fi.Mode()&os.ModeSymlink != 0 { + var err error + link, err = os.Readlink(path) + if err != nil { + return nil, err + } + } + hdr, err := archive.FileInfoHeader(name, fi, link) + if err != nil { + return nil, err + } + if err := archive.ReadSecurityXattrToTarHeader(path, hdr); err != nil { + return nil, err + } + tsh := &tarsumHash{hdr: hdr, Hash: sha256.New()} + tsh.Reset() // initialize header + return tsh, nil +} + +type tarsumHash struct { + hash.Hash + hdr *tar.Header +} + +// Reset resets the Hash to its initial state. +func (tsh *tarsumHash) Reset() { + // comply with hash.Hash and reset to the state hash had before any writes + tsh.Hash.Reset() + tarsum.WriteV1Header(tsh.hdr, tsh.Hash) +} diff --git a/vendor/github.com/moby/moby/builder/remotecontext/generate.go b/vendor/github.com/moby/moby/builder/remotecontext/generate.go new file mode 100644 index 000000000..0b52d4992 --- /dev/null +++ b/vendor/github.com/moby/moby/builder/remotecontext/generate.go @@ -0,0 +1,3 @@ +package remotecontext + +//go:generate protoc --gogoslick_out=. tarsum.proto diff --git a/vendor/github.com/moby/moby/builder/remotecontext/git.go b/vendor/github.com/moby/moby/builder/remotecontext/git.go new file mode 100644 index 000000000..158bb5ad4 --- /dev/null +++ b/vendor/github.com/moby/moby/builder/remotecontext/git.go @@ -0,0 +1,29 @@ +package remotecontext + +import ( + "os" + + "github.com/docker/docker/builder" + "github.com/docker/docker/builder/remotecontext/git" + "github.com/docker/docker/pkg/archive" +) + +// MakeGitContext returns a Context from gitURL that is cloned in a temporary directory. +func MakeGitContext(gitURL string) (builder.Source, error) { + root, err := git.Clone(gitURL) + if err != nil { + return nil, err + } + + c, err := archive.Tar(root, archive.Uncompressed) + if err != nil { + return nil, err + } + + defer func() { + // TODO: print errors? + c.Close() + os.RemoveAll(root) + }() + return FromArchive(c) +} diff --git a/vendor/github.com/moby/moby/builder/remotecontext/git/gitutils.go b/vendor/github.com/moby/moby/builder/remotecontext/git/gitutils.go new file mode 100644 index 000000000..b94d462cd --- /dev/null +++ b/vendor/github.com/moby/moby/builder/remotecontext/git/gitutils.go @@ -0,0 +1,159 @@ +package git + +import ( + "fmt" + "io/ioutil" + "net/http" + "net/url" + "os" + "os/exec" + "path/filepath" + "strings" + + "github.com/docker/docker/pkg/symlink" + "github.com/docker/docker/pkg/urlutil" + "github.com/pkg/errors" +) + +type gitRepo struct { + remote string + ref string + subdir string +} + +// Clone clones a repository into a newly created directory which +// will be under "docker-build-git" +func Clone(remoteURL string) (string, error) { + repo, err := parseRemoteURL(remoteURL) + + if err != nil { + return "", err + } + + fetch := fetchArgs(repo.remote, repo.ref) + + root, err := ioutil.TempDir("", "docker-build-git") + if err != nil { + return "", err + } + + if out, err := gitWithinDir(root, "init"); err != nil { + return "", errors.Wrapf(err, "failed to init repo at %s: %s", root, out) + } + + // Add origin remote for compatibility with previous implementation that + // used "git clone" and also to make sure local refs are created for branches + if out, err := gitWithinDir(root, "remote", "add", "origin", repo.remote); err != nil { + return "", errors.Wrapf(err, "failed add origin repo at %s: %s", repo.remote, out) + } + + if output, err := gitWithinDir(root, fetch...); err != nil { + return "", errors.Wrapf(err, "error fetching: %s", output) + } + + return checkoutGit(root, repo.ref, repo.subdir) +} + +func parseRemoteURL(remoteURL string) (gitRepo, error) { + repo := gitRepo{} + + if !isGitTransport(remoteURL) { + remoteURL = "https://" + remoteURL + } + + var fragment string + if strings.HasPrefix(remoteURL, "git@") { + // git@.. is not an URL, so cannot be parsed as URL + parts := strings.SplitN(remoteURL, "#", 2) + + repo.remote = parts[0] + if len(parts) == 2 { + fragment = parts[1] + } + repo.ref, repo.subdir = getRefAndSubdir(fragment) + } else { + u, err := url.Parse(remoteURL) + if err != nil { + return repo, err + } + + repo.ref, repo.subdir = getRefAndSubdir(u.Fragment) + u.Fragment = "" + repo.remote = u.String() + } + return repo, nil +} + +func getRefAndSubdir(fragment string) (ref string, subdir string) { + refAndDir := strings.SplitN(fragment, ":", 2) + ref = "master" + if len(refAndDir[0]) != 0 { + ref = refAndDir[0] + } + if len(refAndDir) > 1 && len(refAndDir[1]) != 0 { + subdir = refAndDir[1] + } + return +} + +func fetchArgs(remoteURL string, ref string) []string { + args := []string{"fetch", "--recurse-submodules=yes"} + shallow := true + + if urlutil.IsURL(remoteURL) { + res, err := http.Head(fmt.Sprintf("%s/info/refs?service=git-upload-pack", remoteURL)) + if err != nil || res.Header.Get("Content-Type") != "application/x-git-upload-pack-advertisement" { + shallow = false + } + } + + if shallow { + args = append(args, "--depth", "1") + } + + return append(args, "origin", ref) +} + +func checkoutGit(root, ref, subdir string) (string, error) { + // Try checking out by ref name first. This will work on branches and sets + // .git/HEAD to the current branch name + if output, err := gitWithinDir(root, "checkout", ref); err != nil { + // If checking out by branch name fails check out the last fetched ref + if _, err2 := gitWithinDir(root, "checkout", "FETCH_HEAD"); err2 != nil { + return "", errors.Wrapf(err, "error checking out %s: %s", ref, output) + } + } + + if subdir != "" { + newCtx, err := symlink.FollowSymlinkInScope(filepath.Join(root, subdir), root) + if err != nil { + return "", errors.Wrapf(err, "error setting git context, %q not within git root", subdir) + } + + fi, err := os.Stat(newCtx) + if err != nil { + return "", err + } + if !fi.IsDir() { + return "", errors.Errorf("error setting git context, not a directory: %s", newCtx) + } + root = newCtx + } + + return root, nil +} + +func gitWithinDir(dir string, args ...string) ([]byte, error) { + a := []string{"--work-tree", dir, "--git-dir", filepath.Join(dir, ".git")} + return git(append(a, args...)...) +} + +func git(args ...string) ([]byte, error) { + return exec.Command("git", args...).CombinedOutput() +} + +// isGitTransport returns true if the provided str is a git transport by inspecting +// the prefix of the string for known protocols used in git. +func isGitTransport(str string) bool { + return urlutil.IsURL(str) || strings.HasPrefix(str, "git://") || strings.HasPrefix(str, "git@") +} diff --git a/vendor/github.com/moby/moby/builder/remotecontext/git/gitutils_test.go b/vendor/github.com/moby/moby/builder/remotecontext/git/gitutils_test.go new file mode 100644 index 000000000..c638a498f --- /dev/null +++ b/vendor/github.com/moby/moby/builder/remotecontext/git/gitutils_test.go @@ -0,0 +1,238 @@ +package git + +import ( + "fmt" + "io/ioutil" + "net/http" + "net/http/httptest" + "net/url" + "os" + "path/filepath" + "runtime" + "strings" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestParseRemoteURL(t *testing.T) { + dir, err := parseRemoteURL("git://github.com/user/repo.git") + require.NoError(t, err) + assert.NotEmpty(t, dir) + assert.Equal(t, gitRepo{"git://github.com/user/repo.git", "master", ""}, dir) + + dir, err = parseRemoteURL("git://github.com/user/repo.git#mybranch:mydir/mysubdir/") + require.NoError(t, err) + assert.NotEmpty(t, dir) + assert.Equal(t, gitRepo{"git://github.com/user/repo.git", "mybranch", "mydir/mysubdir/"}, dir) + + dir, err = parseRemoteURL("https://github.com/user/repo.git") + require.NoError(t, err) + assert.NotEmpty(t, dir) + assert.Equal(t, gitRepo{"https://github.com/user/repo.git", "master", ""}, dir) + + dir, err = parseRemoteURL("https://github.com/user/repo.git#mybranch:mydir/mysubdir/") + require.NoError(t, err) + assert.NotEmpty(t, dir) + assert.Equal(t, gitRepo{"https://github.com/user/repo.git", "mybranch", "mydir/mysubdir/"}, dir) + + dir, err = parseRemoteURL("git@github.com:user/repo.git") + require.NoError(t, err) + assert.NotEmpty(t, dir) + assert.Equal(t, gitRepo{"git@github.com:user/repo.git", "master", ""}, dir) + + dir, err = parseRemoteURL("git@github.com:user/repo.git#mybranch:mydir/mysubdir/") + require.NoError(t, err) + assert.NotEmpty(t, dir) + assert.Equal(t, gitRepo{"git@github.com:user/repo.git", "mybranch", "mydir/mysubdir/"}, dir) +} + +func TestCloneArgsSmartHttp(t *testing.T) { + mux := http.NewServeMux() + server := httptest.NewServer(mux) + serverURL, _ := url.Parse(server.URL) + + serverURL.Path = "/repo.git" + + mux.HandleFunc("/repo.git/info/refs", func(w http.ResponseWriter, r *http.Request) { + q := r.URL.Query().Get("service") + w.Header().Set("Content-Type", fmt.Sprintf("application/x-%s-advertisement", q)) + }) + + args := fetchArgs(serverURL.String(), "master") + exp := []string{"fetch", "--recurse-submodules=yes", "--depth", "1", "origin", "master"} + assert.Equal(t, exp, args) +} + +func TestCloneArgsDumbHttp(t *testing.T) { + mux := http.NewServeMux() + server := httptest.NewServer(mux) + serverURL, _ := url.Parse(server.URL) + + serverURL.Path = "/repo.git" + + mux.HandleFunc("/repo.git/info/refs", func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "text/plain") + }) + + args := fetchArgs(serverURL.String(), "master") + exp := []string{"fetch", "--recurse-submodules=yes", "origin", "master"} + assert.Equal(t, exp, args) +} + +func TestCloneArgsGit(t *testing.T) { + args := fetchArgs("git://github.com/docker/docker", "master") + exp := []string{"fetch", "--recurse-submodules=yes", "--depth", "1", "origin", "master"} + assert.Equal(t, exp, args) +} + +func gitGetConfig(name string) string { + b, err := git([]string{"config", "--get", name}...) + if err != nil { + // since we are interested in empty or non empty string, + // we can safely ignore the err here. + return "" + } + return strings.TrimSpace(string(b)) +} + +func TestCheckoutGit(t *testing.T) { + root, err := ioutil.TempDir("", "docker-build-git-checkout") + require.NoError(t, err) + defer os.RemoveAll(root) + + autocrlf := gitGetConfig("core.autocrlf") + if !(autocrlf == "true" || autocrlf == "false" || + autocrlf == "input" || autocrlf == "") { + t.Logf("unknown core.autocrlf value: \"%s\"", autocrlf) + } + eol := "\n" + if autocrlf == "true" { + eol = "\r\n" + } + + gitDir := filepath.Join(root, "repo") + _, err = git("init", gitDir) + require.NoError(t, err) + + _, err = gitWithinDir(gitDir, "config", "user.email", "test@docker.com") + require.NoError(t, err) + + _, err = gitWithinDir(gitDir, "config", "user.name", "Docker test") + require.NoError(t, err) + + err = ioutil.WriteFile(filepath.Join(gitDir, "Dockerfile"), []byte("FROM scratch"), 0644) + require.NoError(t, err) + + subDir := filepath.Join(gitDir, "subdir") + require.NoError(t, os.Mkdir(subDir, 0755)) + + err = ioutil.WriteFile(filepath.Join(subDir, "Dockerfile"), []byte("FROM scratch\nEXPOSE 5000"), 0644) + require.NoError(t, err) + + if runtime.GOOS != "windows" { + if err = os.Symlink("../subdir", filepath.Join(gitDir, "parentlink")); err != nil { + t.Fatal(err) + } + + if err = os.Symlink("/subdir", filepath.Join(gitDir, "absolutelink")); err != nil { + t.Fatal(err) + } + } + + _, err = gitWithinDir(gitDir, "add", "-A") + require.NoError(t, err) + + _, err = gitWithinDir(gitDir, "commit", "-am", "First commit") + require.NoError(t, err) + + _, err = gitWithinDir(gitDir, "checkout", "-b", "test") + require.NoError(t, err) + + err = ioutil.WriteFile(filepath.Join(gitDir, "Dockerfile"), []byte("FROM scratch\nEXPOSE 3000"), 0644) + require.NoError(t, err) + + err = ioutil.WriteFile(filepath.Join(subDir, "Dockerfile"), []byte("FROM busybox\nEXPOSE 5000"), 0644) + require.NoError(t, err) + + _, err = gitWithinDir(gitDir, "add", "-A") + require.NoError(t, err) + + _, err = gitWithinDir(gitDir, "commit", "-am", "Branch commit") + require.NoError(t, err) + + _, err = gitWithinDir(gitDir, "checkout", "master") + require.NoError(t, err) + + type singleCase struct { + frag string + exp string + fail bool + } + + cases := []singleCase{ + {"", "FROM scratch", false}, + {"master", "FROM scratch", false}, + {":subdir", "FROM scratch" + eol + "EXPOSE 5000", false}, + {":nosubdir", "", true}, // missing directory error + {":Dockerfile", "", true}, // not a directory error + {"master:nosubdir", "", true}, + {"master:subdir", "FROM scratch" + eol + "EXPOSE 5000", false}, + {"master:../subdir", "", true}, + {"test", "FROM scratch" + eol + "EXPOSE 3000", false}, + {"test:", "FROM scratch" + eol + "EXPOSE 3000", false}, + {"test:subdir", "FROM busybox" + eol + "EXPOSE 5000", false}, + } + + if runtime.GOOS != "windows" { + // Windows GIT (2.7.1 x64) does not support parentlink/absolutelink. Sample output below + // git --work-tree .\repo --git-dir .\repo\.git add -A + // error: readlink("absolutelink"): Function not implemented + // error: unable to index file absolutelink + // fatal: adding files failed + cases = append(cases, singleCase{frag: "master:absolutelink", exp: "FROM scratch" + eol + "EXPOSE 5000", fail: false}) + cases = append(cases, singleCase{frag: "master:parentlink", exp: "FROM scratch" + eol + "EXPOSE 5000", fail: false}) + } + + for _, c := range cases { + ref, subdir := getRefAndSubdir(c.frag) + r, err := checkoutGit(gitDir, ref, subdir) + + if c.fail { + assert.Error(t, err) + continue + } + + b, err := ioutil.ReadFile(filepath.Join(r, "Dockerfile")) + require.NoError(t, err) + assert.Equal(t, c.exp, string(b)) + } +} + +func TestValidGitTransport(t *testing.T) { + gitUrls := []string{ + "git://github.com/docker/docker", + "git@github.com:docker/docker.git", + "git@bitbucket.org:atlassianlabs/atlassian-docker.git", + "https://github.com/docker/docker.git", + "http://github.com/docker/docker.git", + "http://github.com/docker/docker.git#branch", + "http://github.com/docker/docker.git#:dir", + } + incompleteGitUrls := []string{ + "github.com/docker/docker", + } + + for _, url := range gitUrls { + if !isGitTransport(url) { + t.Fatalf("%q should be detected as valid Git prefix", url) + } + } + + for _, url := range incompleteGitUrls { + if isGitTransport(url) { + t.Fatalf("%q should not be detected as valid Git prefix", url) + } + } +} diff --git a/vendor/github.com/moby/moby/builder/remotecontext/lazycontext.go b/vendor/github.com/moby/moby/builder/remotecontext/lazycontext.go new file mode 100644 index 000000000..b29c413fa --- /dev/null +++ b/vendor/github.com/moby/moby/builder/remotecontext/lazycontext.go @@ -0,0 +1,101 @@ +package remotecontext + +import ( + "encoding/hex" + "os" + "path/filepath" + "runtime" + "strings" + + "github.com/docker/docker/builder" + "github.com/docker/docker/pkg/pools" + "github.com/pkg/errors" +) + +// NewLazySource creates a new LazyContext. LazyContext defines a hashed build +// context based on a root directory. Individual files are hashed first time +// they are asked. It is not safe to call methods of LazyContext concurrently. +func NewLazySource(root string) (builder.Source, error) { + return &lazySource{ + root: root, + sums: make(map[string]string), + }, nil +} + +type lazySource struct { + root string + sums map[string]string +} + +func (c *lazySource) Root() string { + return c.root +} + +func (c *lazySource) Close() error { + return nil +} + +func (c *lazySource) Hash(path string) (string, error) { + cleanPath, fullPath, err := normalize(path, c.root) + if err != nil { + return "", err + } + + fi, err := os.Lstat(fullPath) + if err != nil { + return "", errors.WithStack(err) + } + + relPath, err := Rel(c.root, fullPath) + if err != nil { + return "", errors.WithStack(convertPathError(err, cleanPath)) + } + + sum, ok := c.sums[relPath] + if !ok { + sum, err = c.prepareHash(relPath, fi) + if err != nil { + return "", err + } + } + + return sum, nil +} + +func (c *lazySource) prepareHash(relPath string, fi os.FileInfo) (string, error) { + p := filepath.Join(c.root, relPath) + h, err := NewFileHash(p, relPath, fi) + if err != nil { + return "", errors.Wrapf(err, "failed to create hash for %s", relPath) + } + if fi.Mode().IsRegular() && fi.Size() > 0 { + f, err := os.Open(p) + if err != nil { + return "", errors.Wrapf(err, "failed to open %s", relPath) + } + defer f.Close() + if _, err := pools.Copy(h, f); err != nil { + return "", errors.Wrapf(err, "failed to copy file data for %s", relPath) + } + } + sum := hex.EncodeToString(h.Sum(nil)) + c.sums[relPath] = sum + return sum, nil +} + +// Rel makes a path relative to base path. Same as `filepath.Rel` but can also +// handle UUID paths in windows. +func Rel(basepath, targpath string) (string, error) { + // filepath.Rel can't handle UUID paths in windows + if runtime.GOOS == "windows" { + pfx := basepath + `\` + if strings.HasPrefix(targpath, pfx) { + p := strings.TrimPrefix(targpath, pfx) + if p == "" { + p = "." + } + return p, nil + } + } + return filepath.Rel(basepath, targpath) +} diff --git a/vendor/github.com/moby/moby/builder/remotecontext/mimetype.go b/vendor/github.com/moby/moby/builder/remotecontext/mimetype.go new file mode 100644 index 000000000..083d60997 --- /dev/null +++ b/vendor/github.com/moby/moby/builder/remotecontext/mimetype.go @@ -0,0 +1,27 @@ +package remotecontext + +import ( + "mime" + "net/http" +) + +// mimeTypes stores the MIME content type. +var mimeTypes = struct { + TextPlain string + OctetStream string +}{"text/plain", "application/octet-stream"} + +// detectContentType returns a best guess representation of the MIME +// content type for the bytes at c. The value detected by +// http.DetectContentType is guaranteed not be nil, defaulting to +// application/octet-stream when a better guess cannot be made. The +// result of this detection is then run through mime.ParseMediaType() +// which separates the actual MIME string from any parameters. +func detectContentType(c []byte) (string, map[string]string, error) { + ct := http.DetectContentType(c) + contentType, args, err := mime.ParseMediaType(ct) + if err != nil { + return "", nil, err + } + return contentType, args, nil +} diff --git a/vendor/github.com/moby/moby/builder/remotecontext/mimetype_test.go b/vendor/github.com/moby/moby/builder/remotecontext/mimetype_test.go new file mode 100644 index 000000000..8c00ec286 --- /dev/null +++ b/vendor/github.com/moby/moby/builder/remotecontext/mimetype_test.go @@ -0,0 +1,16 @@ +package remotecontext + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestDetectContentType(t *testing.T) { + input := []byte("That is just a plain text") + + contentType, _, err := detectContentType(input) + require.NoError(t, err) + assert.Equal(t, "text/plain", contentType) +} diff --git a/vendor/github.com/moby/moby/builder/remotecontext/remote.go b/vendor/github.com/moby/moby/builder/remotecontext/remote.go new file mode 100644 index 000000000..4afd516be --- /dev/null +++ b/vendor/github.com/moby/moby/builder/remotecontext/remote.go @@ -0,0 +1,134 @@ +package remotecontext + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "net/http" + "regexp" + + "github.com/docker/docker/builder" + "github.com/pkg/errors" +) + +// When downloading remote contexts, limit the amount (in bytes) +// to be read from the response body in order to detect its Content-Type +const maxPreambleLength = 100 + +const acceptableRemoteMIME = `(?:application/(?:(?:x\-)?tar|octet\-stream|((?:x\-)?(?:gzip|bzip2?|xz)))|(?:text/plain))` + +var mimeRe = regexp.MustCompile(acceptableRemoteMIME) + +// MakeRemoteContext downloads a context from remoteURL and returns it. +// +// If contentTypeHandlers is non-nil, then the Content-Type header is read along with a maximum of +// maxPreambleLength bytes from the body to help detecting the MIME type. +// Look at acceptableRemoteMIME for more details. +// +// If a match is found, then the body is sent to the contentType handler and a (potentially compressed) tar stream is expected +// to be returned. If no match is found, it is assumed the body is a tar stream (compressed or not). +// In either case, an (assumed) tar stream is passed to FromArchive whose result is returned. +func MakeRemoteContext(remoteURL string, contentTypeHandlers map[string]func(io.ReadCloser) (io.ReadCloser, error)) (builder.Source, error) { + f, err := GetWithStatusError(remoteURL) + if err != nil { + return nil, fmt.Errorf("error downloading remote context %s: %v", remoteURL, err) + } + defer f.Body.Close() + + var contextReader io.ReadCloser + if contentTypeHandlers != nil { + contentType := f.Header.Get("Content-Type") + clen := f.ContentLength + + contentType, contextReader, err = inspectResponse(contentType, f.Body, clen) + if err != nil { + return nil, fmt.Errorf("error detecting content type for remote %s: %v", remoteURL, err) + } + defer contextReader.Close() + + // This loop tries to find a content-type handler for the detected content-type. + // If it could not find one from the caller-supplied map, it tries the empty content-type `""` + // which is interpreted as a fallback handler (usually used for raw tar contexts). + for _, ct := range []string{contentType, ""} { + if fn, ok := contentTypeHandlers[ct]; ok { + defer contextReader.Close() + if contextReader, err = fn(contextReader); err != nil { + return nil, err + } + break + } + } + } + + // Pass through - this is a pre-packaged context, presumably + // with a Dockerfile with the right name inside it. + return FromArchive(contextReader) +} + +// GetWithStatusError does an http.Get() and returns an error if the +// status code is 4xx or 5xx. +func GetWithStatusError(url string) (resp *http.Response, err error) { + if resp, err = http.Get(url); err != nil { + return nil, err + } + if resp.StatusCode < 400 { + return resp, nil + } + msg := fmt.Sprintf("failed to GET %s with status %s", url, resp.Status) + body, err := ioutil.ReadAll(resp.Body) + resp.Body.Close() + if err != nil { + return nil, errors.Wrapf(err, msg+": error reading body") + } + return nil, errors.Errorf(msg+": %s", bytes.TrimSpace(body)) +} + +// inspectResponse looks into the http response data at r to determine whether its +// content-type is on the list of acceptable content types for remote build contexts. +// This function returns: +// - a string representation of the detected content-type +// - an io.Reader for the response body +// - an error value which will be non-nil either when something goes wrong while +// reading bytes from r or when the detected content-type is not acceptable. +func inspectResponse(ct string, r io.ReadCloser, clen int64) (string, io.ReadCloser, error) { + plen := clen + if plen <= 0 || plen > maxPreambleLength { + plen = maxPreambleLength + } + + preamble := make([]byte, plen, plen) + rlen, err := r.Read(preamble) + if rlen == 0 { + return ct, r, errors.New("empty response") + } + if err != nil && err != io.EOF { + return ct, r, err + } + + preambleR := bytes.NewReader(preamble[:rlen]) + bodyReader := ioutil.NopCloser(io.MultiReader(preambleR, r)) + // Some web servers will use application/octet-stream as the default + // content type for files without an extension (e.g. 'Dockerfile') + // so if we receive this value we better check for text content + contentType := ct + if len(ct) == 0 || ct == mimeTypes.OctetStream { + contentType, _, err = detectContentType(preamble) + if err != nil { + return contentType, bodyReader, err + } + } + + contentType = selectAcceptableMIME(contentType) + var cterr error + if len(contentType) == 0 { + cterr = fmt.Errorf("unsupported Content-Type %q", ct) + contentType = ct + } + + return contentType, bodyReader, cterr +} + +func selectAcceptableMIME(ct string) string { + return mimeRe.FindString(ct) +} diff --git a/vendor/github.com/moby/moby/builder/remotecontext/remote_test.go b/vendor/github.com/moby/moby/builder/remotecontext/remote_test.go new file mode 100644 index 000000000..c698726e8 --- /dev/null +++ b/vendor/github.com/moby/moby/builder/remotecontext/remote_test.go @@ -0,0 +1,263 @@ +package remotecontext + +import ( + "bytes" + "io" + "io/ioutil" + "net/http" + "net/http/httptest" + "net/url" + "testing" + + "github.com/docker/docker/builder" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/testutil" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +var binaryContext = []byte{0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00} //xz magic + +func TestSelectAcceptableMIME(t *testing.T) { + validMimeStrings := []string{ + "application/x-bzip2", + "application/bzip2", + "application/gzip", + "application/x-gzip", + "application/x-xz", + "application/xz", + "application/tar", + "application/x-tar", + "application/octet-stream", + "text/plain", + } + + invalidMimeStrings := []string{ + "", + "application/octet", + "application/json", + } + + for _, m := range invalidMimeStrings { + if len(selectAcceptableMIME(m)) > 0 { + t.Fatalf("Should not have accepted %q", m) + } + } + + for _, m := range validMimeStrings { + if str := selectAcceptableMIME(m); str == "" { + t.Fatalf("Should have accepted %q", m) + } + } +} + +func TestInspectEmptyResponse(t *testing.T) { + ct := "application/octet-stream" + br := ioutil.NopCloser(bytes.NewReader([]byte(""))) + contentType, bReader, err := inspectResponse(ct, br, 0) + if err == nil { + t.Fatal("Should have generated an error for an empty response") + } + if contentType != "application/octet-stream" { + t.Fatalf("Content type should be 'application/octet-stream' but is %q", contentType) + } + body, err := ioutil.ReadAll(bReader) + if err != nil { + t.Fatal(err) + } + if len(body) != 0 { + t.Fatal("response body should remain empty") + } +} + +func TestInspectResponseBinary(t *testing.T) { + ct := "application/octet-stream" + br := ioutil.NopCloser(bytes.NewReader(binaryContext)) + contentType, bReader, err := inspectResponse(ct, br, int64(len(binaryContext))) + if err != nil { + t.Fatal(err) + } + if contentType != "application/octet-stream" { + t.Fatalf("Content type should be 'application/octet-stream' but is %q", contentType) + } + body, err := ioutil.ReadAll(bReader) + if err != nil { + t.Fatal(err) + } + if len(body) != len(binaryContext) { + t.Fatalf("Wrong response size %d, should be == len(binaryContext)", len(body)) + } + for i := range body { + if body[i] != binaryContext[i] { + t.Fatalf("Corrupted response body at byte index %d", i) + } + } +} + +func TestResponseUnsupportedContentType(t *testing.T) { + content := []byte(dockerfileContents) + ct := "application/json" + br := ioutil.NopCloser(bytes.NewReader(content)) + contentType, bReader, err := inspectResponse(ct, br, int64(len(dockerfileContents))) + + if err == nil { + t.Fatal("Should have returned an error on content-type 'application/json'") + } + if contentType != ct { + t.Fatalf("Should not have altered content-type: orig: %s, altered: %s", ct, contentType) + } + body, err := ioutil.ReadAll(bReader) + if err != nil { + t.Fatal(err) + } + if string(body) != dockerfileContents { + t.Fatalf("Corrupted response body %s", body) + } +} + +func TestInspectResponseTextSimple(t *testing.T) { + content := []byte(dockerfileContents) + ct := "text/plain" + br := ioutil.NopCloser(bytes.NewReader(content)) + contentType, bReader, err := inspectResponse(ct, br, int64(len(content))) + if err != nil { + t.Fatal(err) + } + if contentType != "text/plain" { + t.Fatalf("Content type should be 'text/plain' but is %q", contentType) + } + body, err := ioutil.ReadAll(bReader) + if err != nil { + t.Fatal(err) + } + if string(body) != dockerfileContents { + t.Fatalf("Corrupted response body %s", body) + } +} + +func TestInspectResponseEmptyContentType(t *testing.T) { + content := []byte(dockerfileContents) + br := ioutil.NopCloser(bytes.NewReader(content)) + contentType, bodyReader, err := inspectResponse("", br, int64(len(content))) + if err != nil { + t.Fatal(err) + } + if contentType != "text/plain" { + t.Fatalf("Content type should be 'text/plain' but is %q", contentType) + } + body, err := ioutil.ReadAll(bodyReader) + if err != nil { + t.Fatal(err) + } + if string(body) != dockerfileContents { + t.Fatalf("Corrupted response body %s", body) + } +} + +func TestUnknownContentLength(t *testing.T) { + content := []byte(dockerfileContents) + ct := "text/plain" + br := ioutil.NopCloser(bytes.NewReader(content)) + contentType, bReader, err := inspectResponse(ct, br, -1) + if err != nil { + t.Fatal(err) + } + if contentType != "text/plain" { + t.Fatalf("Content type should be 'text/plain' but is %q", contentType) + } + body, err := ioutil.ReadAll(bReader) + if err != nil { + t.Fatal(err) + } + if string(body) != dockerfileContents { + t.Fatalf("Corrupted response body %s", body) + } +} + +func TestMakeRemoteContext(t *testing.T) { + contextDir, cleanup := createTestTempDir(t, "", "builder-tarsum-test") + defer cleanup() + + createTestTempFile(t, contextDir, builder.DefaultDockerfileName, dockerfileContents, 0777) + + mux := http.NewServeMux() + server := httptest.NewServer(mux) + serverURL, _ := url.Parse(server.URL) + + serverURL.Path = "/" + builder.DefaultDockerfileName + remoteURL := serverURL.String() + + mux.Handle("/", http.FileServer(http.Dir(contextDir))) + + remoteContext, err := MakeRemoteContext(remoteURL, map[string]func(io.ReadCloser) (io.ReadCloser, error){ + mimeTypes.TextPlain: func(rc io.ReadCloser) (io.ReadCloser, error) { + dockerfile, err := ioutil.ReadAll(rc) + if err != nil { + return nil, err + } + + r, err := archive.Generate(builder.DefaultDockerfileName, string(dockerfile)) + if err != nil { + return nil, err + } + return ioutil.NopCloser(r), nil + }, + }) + + if err != nil { + t.Fatalf("Error when executing DetectContextFromRemoteURL: %s", err) + } + + if remoteContext == nil { + t.Fatal("Remote context should not be nil") + } + + h, err := remoteContext.Hash(builder.DefaultDockerfileName) + if err != nil { + t.Fatalf("failed to compute hash %s", err) + } + + if expected, actual := "7b6b6b66bee9e2102fbdc2228be6c980a2a23adf371962a37286a49f7de0f7cc", h; expected != actual { + t.Fatalf("There should be file named %s %s in fileInfoSums", expected, actual) + } +} + +func TestGetWithStatusError(t *testing.T) { + var testcases = []struct { + err error + statusCode int + expectedErr string + expectedBody string + }{ + { + statusCode: 200, + expectedBody: "THE BODY", + }, + { + statusCode: 400, + expectedErr: "with status 400 Bad Request: broke", + expectedBody: "broke", + }, + } + for _, testcase := range testcases { + ts := httptest.NewServer( + http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + buffer := bytes.NewBufferString(testcase.expectedBody) + w.WriteHeader(testcase.statusCode) + w.Write(buffer.Bytes()) + }), + ) + defer ts.Close() + response, err := GetWithStatusError(ts.URL) + + if testcase.expectedErr == "" { + require.NoError(t, err) + + body, err := testutil.ReadBody(response.Body) + require.NoError(t, err) + assert.Contains(t, string(body), testcase.expectedBody) + } else { + testutil.ErrorContains(t, err, testcase.expectedErr) + } + } +} diff --git a/vendor/github.com/moby/moby/builder/remotecontext/tarsum.go b/vendor/github.com/moby/moby/builder/remotecontext/tarsum.go new file mode 100644 index 000000000..3ae9d8242 --- /dev/null +++ b/vendor/github.com/moby/moby/builder/remotecontext/tarsum.go @@ -0,0 +1,174 @@ +package remotecontext + +import ( + "fmt" + "os" + "path/filepath" + "sync" + + "github.com/docker/docker/pkg/symlink" + iradix "github.com/hashicorp/go-immutable-radix" + "github.com/pkg/errors" + "github.com/tonistiigi/fsutil" +) + +type hashed interface { + Hash() string +} + +// CachableSource is a source that contains cache records for its contents +type CachableSource struct { + mu sync.Mutex + root string + tree *iradix.Tree + txn *iradix.Txn +} + +// NewCachableSource creates new CachableSource +func NewCachableSource(root string) *CachableSource { + ts := &CachableSource{ + tree: iradix.New(), + root: root, + } + return ts +} + +// MarshalBinary marshals current cache information to a byte array +func (cs *CachableSource) MarshalBinary() ([]byte, error) { + b := TarsumBackup{Hashes: make(map[string]string)} + root := cs.getRoot() + root.Walk(func(k []byte, v interface{}) bool { + b.Hashes[string(k)] = v.(*fileInfo).sum + return false + }) + return b.Marshal() +} + +// UnmarshalBinary decodes cache information for presented byte array +func (cs *CachableSource) UnmarshalBinary(data []byte) error { + var b TarsumBackup + if err := b.Unmarshal(data); err != nil { + return err + } + txn := iradix.New().Txn() + for p, v := range b.Hashes { + txn.Insert([]byte(p), &fileInfo{sum: v}) + } + cs.mu.Lock() + defer cs.mu.Unlock() + cs.tree = txn.Commit() + return nil +} + +// Scan rescans the cache information from the file system +func (cs *CachableSource) Scan() error { + lc, err := NewLazySource(cs.root) + if err != nil { + return err + } + txn := iradix.New().Txn() + err = filepath.Walk(cs.root, func(path string, info os.FileInfo, err error) error { + if err != nil { + return errors.Wrapf(err, "failed to walk %s", path) + } + rel, err := Rel(cs.root, path) + if err != nil { + return err + } + h, err := lc.Hash(rel) + if err != nil { + return err + } + txn.Insert([]byte(rel), &fileInfo{sum: h}) + return nil + }) + if err != nil { + return err + } + cs.mu.Lock() + defer cs.mu.Unlock() + cs.tree = txn.Commit() + return nil +} + +// HandleChange notifies the source about a modification operation +func (cs *CachableSource) HandleChange(kind fsutil.ChangeKind, p string, fi os.FileInfo, err error) (retErr error) { + cs.mu.Lock() + if cs.txn == nil { + cs.txn = cs.tree.Txn() + } + if kind == fsutil.ChangeKindDelete { + cs.txn.Delete([]byte(p)) + cs.mu.Unlock() + return + } + + h, ok := fi.(hashed) + if !ok { + cs.mu.Unlock() + return errors.Errorf("invalid fileinfo: %s", p) + } + + hfi := &fileInfo{ + sum: h.Hash(), + } + cs.txn.Insert([]byte(p), hfi) + cs.mu.Unlock() + return nil +} + +func (cs *CachableSource) getRoot() *iradix.Node { + cs.mu.Lock() + if cs.txn != nil { + cs.tree = cs.txn.Commit() + cs.txn = nil + } + t := cs.tree + cs.mu.Unlock() + return t.Root() +} + +// Close closes the source +func (cs *CachableSource) Close() error { + return nil +} + +func (cs *CachableSource) normalize(path string) (cleanpath, fullpath string, err error) { + cleanpath = filepath.Clean(string(os.PathSeparator) + path)[1:] + fullpath, err = symlink.FollowSymlinkInScope(filepath.Join(cs.root, path), cs.root) + if err != nil { + return "", "", fmt.Errorf("Forbidden path outside the context: %s (%s)", path, fullpath) + } + _, err = os.Lstat(fullpath) + if err != nil { + return "", "", convertPathError(err, path) + } + return +} + +// Hash returns a hash for a single file in the source +func (cs *CachableSource) Hash(path string) (string, error) { + n := cs.getRoot() + sum := "" + // TODO: check this for symlinks + v, ok := n.Get([]byte(path)) + if !ok { + sum = path + } else { + sum = v.(*fileInfo).sum + } + return sum, nil +} + +// Root returns a root directory for the source +func (cs *CachableSource) Root() string { + return cs.root +} + +type fileInfo struct { + sum string +} + +func (fi *fileInfo) Hash() string { + return fi.sum +} diff --git a/vendor/github.com/moby/moby/builder/remotecontext/tarsum.pb.go b/vendor/github.com/moby/moby/builder/remotecontext/tarsum.pb.go new file mode 100644 index 000000000..561a7f636 --- /dev/null +++ b/vendor/github.com/moby/moby/builder/remotecontext/tarsum.pb.go @@ -0,0 +1,525 @@ +// Code generated by protoc-gen-gogo. +// source: tarsum.proto +// DO NOT EDIT! + +/* +Package remotecontext is a generated protocol buffer package. + +It is generated from these files: + tarsum.proto + +It has these top-level messages: + TarsumBackup +*/ +package remotecontext + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import strings "strings" +import reflect "reflect" +import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +type TarsumBackup struct { + Hashes map[string]string `protobuf:"bytes,1,rep,name=Hashes" json:"Hashes,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (m *TarsumBackup) Reset() { *m = TarsumBackup{} } +func (*TarsumBackup) ProtoMessage() {} +func (*TarsumBackup) Descriptor() ([]byte, []int) { return fileDescriptorTarsum, []int{0} } + +func (m *TarsumBackup) GetHashes() map[string]string { + if m != nil { + return m.Hashes + } + return nil +} + +func init() { + proto.RegisterType((*TarsumBackup)(nil), "remotecontext.TarsumBackup") +} +func (this *TarsumBackup) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*TarsumBackup) + if !ok { + that2, ok := that.(TarsumBackup) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if len(this.Hashes) != len(that1.Hashes) { + return false + } + for i := range this.Hashes { + if this.Hashes[i] != that1.Hashes[i] { + return false + } + } + return true +} +func (this *TarsumBackup) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&remotecontext.TarsumBackup{") + keysForHashes := make([]string, 0, len(this.Hashes)) + for k, _ := range this.Hashes { + keysForHashes = append(keysForHashes, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForHashes) + mapStringForHashes := "map[string]string{" + for _, k := range keysForHashes { + mapStringForHashes += fmt.Sprintf("%#v: %#v,", k, this.Hashes[k]) + } + mapStringForHashes += "}" + if this.Hashes != nil { + s = append(s, "Hashes: "+mapStringForHashes+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringTarsum(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *TarsumBackup) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TarsumBackup) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Hashes) > 0 { + for k, _ := range m.Hashes { + dAtA[i] = 0xa + i++ + v := m.Hashes[k] + mapSize := 1 + len(k) + sovTarsum(uint64(len(k))) + 1 + len(v) + sovTarsum(uint64(len(v))) + i = encodeVarintTarsum(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintTarsum(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintTarsum(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } + } + return i, nil +} + +func encodeFixed64Tarsum(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Tarsum(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintTarsum(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *TarsumBackup) Size() (n int) { + var l int + _ = l + if len(m.Hashes) > 0 { + for k, v := range m.Hashes { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovTarsum(uint64(len(k))) + 1 + len(v) + sovTarsum(uint64(len(v))) + n += mapEntrySize + 1 + sovTarsum(uint64(mapEntrySize)) + } + } + return n +} + +func sovTarsum(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozTarsum(x uint64) (n int) { + return sovTarsum(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *TarsumBackup) String() string { + if this == nil { + return "nil" + } + keysForHashes := make([]string, 0, len(this.Hashes)) + for k, _ := range this.Hashes { + keysForHashes = append(keysForHashes, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForHashes) + mapStringForHashes := "map[string]string{" + for _, k := range keysForHashes { + mapStringForHashes += fmt.Sprintf("%v: %v,", k, this.Hashes[k]) + } + mapStringForHashes += "}" + s := strings.Join([]string{`&TarsumBackup{`, + `Hashes:` + mapStringForHashes + `,`, + `}`, + }, "") + return s +} +func valueToStringTarsum(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *TarsumBackup) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTarsum + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TarsumBackup: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TarsumBackup: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hashes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTarsum + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTarsum + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTarsum + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTarsum + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthTarsum + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + if m.Hashes == nil { + m.Hashes = make(map[string]string) + } + if iNdEx < postIndex { + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTarsum + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTarsum + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthTarsum + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + m.Hashes[mapkey] = mapvalue + } else { + var mapvalue string + m.Hashes[mapkey] = mapvalue + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTarsum(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTarsum + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipTarsum(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTarsum + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTarsum + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTarsum + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthTarsum + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTarsum + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipTarsum(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthTarsum = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowTarsum = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("tarsum.proto", fileDescriptorTarsum) } + +var fileDescriptorTarsum = []byte{ + // 196 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xe2, 0x29, 0x49, 0x2c, 0x2a, + 0x2e, 0xcd, 0xd5, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x2d, 0x4a, 0xcd, 0xcd, 0x2f, 0x49, + 0x4d, 0xce, 0xcf, 0x2b, 0x49, 0xad, 0x28, 0x51, 0xea, 0x62, 0xe4, 0xe2, 0x09, 0x01, 0xcb, 0x3b, + 0x25, 0x26, 0x67, 0x97, 0x16, 0x08, 0xd9, 0x73, 0xb1, 0x79, 0x24, 0x16, 0x67, 0xa4, 0x16, 0x4b, + 0x30, 0x2a, 0x30, 0x6b, 0x70, 0x1b, 0xa9, 0xeb, 0xa1, 0x68, 0xd0, 0x43, 0x56, 0xac, 0x07, 0x51, + 0xe9, 0x9a, 0x57, 0x52, 0x54, 0x19, 0x04, 0xd5, 0x26, 0x65, 0xc9, 0xc5, 0x8d, 0x24, 0x2c, 0x24, + 0xc0, 0xc5, 0x9c, 0x9d, 0x5a, 0x29, 0xc1, 0xa8, 0xc0, 0xa8, 0xc1, 0x19, 0x04, 0x62, 0x0a, 0x89, + 0x70, 0xb1, 0x96, 0x25, 0xe6, 0x94, 0xa6, 0x4a, 0x30, 0x81, 0xc5, 0x20, 0x1c, 0x2b, 0x26, 0x0b, + 0x46, 0x27, 0x9d, 0x0b, 0x0f, 0xe5, 0x18, 0x6e, 0x3c, 0x94, 0x63, 0xf8, 0xf0, 0x50, 0x8e, 0xb1, + 0xe1, 0x91, 0x1c, 0xe3, 0x8a, 0x47, 0x72, 0x8c, 0x27, 0x1e, 0xc9, 0x31, 0x5e, 0x78, 0x24, 0xc7, + 0xf8, 0xe0, 0x91, 0x1c, 0xe3, 0x8b, 0x47, 0x72, 0x0c, 0x1f, 0x1e, 0xc9, 0x31, 0x4e, 0x78, 0x2c, + 0xc7, 0x90, 0xc4, 0x06, 0xf6, 0x90, 0x31, 0x20, 0x00, 0x00, 0xff, 0xff, 0x89, 0x57, 0x7d, 0x3f, + 0xe0, 0x00, 0x00, 0x00, +} diff --git a/vendor/github.com/moby/moby/builder/remotecontext/tarsum.proto b/vendor/github.com/moby/moby/builder/remotecontext/tarsum.proto new file mode 100644 index 000000000..cb94240ba --- /dev/null +++ b/vendor/github.com/moby/moby/builder/remotecontext/tarsum.proto @@ -0,0 +1,7 @@ +syntax = "proto3"; + +package remotecontext; // no namespace because only used internally + +message TarsumBackup { + map Hashes = 1; +} \ No newline at end of file diff --git a/vendor/github.com/moby/moby/builder/remotecontext/tarsum_test.go b/vendor/github.com/moby/moby/builder/remotecontext/tarsum_test.go new file mode 100644 index 000000000..8a9d69bb7 --- /dev/null +++ b/vendor/github.com/moby/moby/builder/remotecontext/tarsum_test.go @@ -0,0 +1,157 @@ +package remotecontext + +import ( + "io/ioutil" + "os" + "path/filepath" + "testing" + + "github.com/docker/docker/builder" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/reexec" + "github.com/pkg/errors" +) + +const ( + filename = "test" + contents = "contents test" +) + +func init() { + reexec.Init() +} + +func TestCloseRootDirectory(t *testing.T) { + contextDir, err := ioutil.TempDir("", "builder-tarsum-test") + defer os.RemoveAll(contextDir) + if err != nil { + t.Fatalf("Error with creating temporary directory: %s", err) + } + + src := makeTestArchiveContext(t, contextDir) + err = src.Close() + + if err != nil { + t.Fatalf("Error while executing Close: %s", err) + } + + _, err = os.Stat(src.Root()) + + if !os.IsNotExist(err) { + t.Fatal("Directory should not exist at this point") + } +} + +func TestHashFile(t *testing.T) { + contextDir, cleanup := createTestTempDir(t, "", "builder-tarsum-test") + defer cleanup() + + createTestTempFile(t, contextDir, filename, contents, 0755) + + tarSum := makeTestArchiveContext(t, contextDir) + + sum, err := tarSum.Hash(filename) + + if err != nil { + t.Fatalf("Error when executing Stat: %s", err) + } + + if len(sum) == 0 { + t.Fatalf("Hash returned empty sum") + } + + expected := "1149ab94af7be6cc1da1335e398f24ee1cf4926b720044d229969dfc248ae7ec" + + if actual := sum; expected != actual { + t.Fatalf("invalid checksum. expected %s, got %s", expected, actual) + } +} + +func TestHashSubdir(t *testing.T) { + contextDir, cleanup := createTestTempDir(t, "", "builder-tarsum-test") + defer cleanup() + + contextSubdir := filepath.Join(contextDir, "builder-tarsum-test-subdir") + err := os.Mkdir(contextSubdir, 0755) + if err != nil { + t.Fatalf("Failed to make directory: %s", contextSubdir) + } + + testFilename := createTestTempFile(t, contextSubdir, filename, contents, 0755) + + tarSum := makeTestArchiveContext(t, contextDir) + + relativePath, err := filepath.Rel(contextDir, testFilename) + + if err != nil { + t.Fatalf("Error when getting relative path: %s", err) + } + + sum, err := tarSum.Hash(relativePath) + + if err != nil { + t.Fatalf("Error when executing Stat: %s", err) + } + + if len(sum) == 0 { + t.Fatalf("Hash returned empty sum") + } + + expected := "d7f8d6353dee4816f9134f4156bf6a9d470fdadfb5d89213721f7e86744a4e69" + + if actual := sum; expected != actual { + t.Fatalf("invalid checksum. expected %s, got %s", expected, actual) + } +} + +func TestStatNotExisting(t *testing.T) { + contextDir, cleanup := createTestTempDir(t, "", "builder-tarsum-test") + defer cleanup() + + src := makeTestArchiveContext(t, contextDir) + _, err := src.Hash("not-existing") + if !os.IsNotExist(errors.Cause(err)) { + t.Fatalf("This file should not exist: %s", err) + } +} + +func TestRemoveDirectory(t *testing.T) { + contextDir, cleanup := createTestTempDir(t, "", "builder-tarsum-test") + defer cleanup() + + contextSubdir := createTestTempSubdir(t, contextDir, "builder-tarsum-test-subdir") + + relativePath, err := filepath.Rel(contextDir, contextSubdir) + + if err != nil { + t.Fatalf("Error when getting relative path: %s", err) + } + + src := makeTestArchiveContext(t, contextDir) + + tarSum := src.(modifiableContext) + + err = tarSum.Remove(relativePath) + if err != nil { + t.Fatalf("Error when executing Remove: %s", err) + } + + _, err = src.Hash(contextSubdir) + + if !os.IsNotExist(errors.Cause(err)) { + t.Fatal("Directory should not exist at this point") + } +} + +func makeTestArchiveContext(t *testing.T, dir string) builder.Source { + tarStream, err := archive.Tar(dir, archive.Uncompressed) + if err != nil { + t.Fatalf("error: %s", err) + } + defer tarStream.Close() + tarSum, err := FromArchive(tarStream) + if err != nil { + t.Fatalf("Error when executing FromArchive: %s", err) + } + return tarSum +} diff --git a/vendor/github.com/moby/moby/builder/remotecontext/utils_test.go b/vendor/github.com/moby/moby/builder/remotecontext/utils_test.go new file mode 100644 index 000000000..1e23ab4f7 --- /dev/null +++ b/vendor/github.com/moby/moby/builder/remotecontext/utils_test.go @@ -0,0 +1,55 @@ +package remotecontext + +import ( + "io/ioutil" + "os" + "path/filepath" + "testing" +) + +// createTestTempDir creates a temporary directory for testing. +// It returns the created path and a cleanup function which is meant to be used as deferred call. +// When an error occurs, it terminates the test. +func createTestTempDir(t *testing.T, dir, prefix string) (string, func()) { + path, err := ioutil.TempDir(dir, prefix) + + if err != nil { + t.Fatalf("Error when creating directory %s with prefix %s: %s", dir, prefix, err) + } + + return path, func() { + err = os.RemoveAll(path) + + if err != nil { + t.Fatalf("Error when removing directory %s: %s", path, err) + } + } +} + +// createTestTempSubdir creates a temporary directory for testing. +// It returns the created path but doesn't provide a cleanup function, +// so createTestTempSubdir should be used only for creating temporary subdirectories +// whose parent directories are properly cleaned up. +// When an error occurs, it terminates the test. +func createTestTempSubdir(t *testing.T, dir, prefix string) string { + path, err := ioutil.TempDir(dir, prefix) + + if err != nil { + t.Fatalf("Error when creating directory %s with prefix %s: %s", dir, prefix, err) + } + + return path +} + +// createTestTempFile creates a temporary file within dir with specific contents and permissions. +// When an error occurs, it terminates the test +func createTestTempFile(t *testing.T, dir, filename, contents string, perm os.FileMode) string { + filePath := filepath.Join(dir, filename) + err := ioutil.WriteFile(filePath, []byte(contents), perm) + + if err != nil { + t.Fatalf("Error when creating %s file: %s", filename, err) + } + + return filePath +} diff --git a/vendor/github.com/moby/moby/cli/cobra.go b/vendor/github.com/moby/moby/cli/cobra.go new file mode 100644 index 000000000..c7bb39c43 --- /dev/null +++ b/vendor/github.com/moby/moby/cli/cobra.go @@ -0,0 +1,150 @@ +package cli + +import ( + "fmt" + "strings" + + "github.com/docker/docker/pkg/term" + "github.com/pkg/errors" + "github.com/spf13/cobra" +) + +// SetupRootCommand sets default usage, help, and error handling for the +// root command. +func SetupRootCommand(rootCmd *cobra.Command) { + cobra.AddTemplateFunc("hasSubCommands", hasSubCommands) + cobra.AddTemplateFunc("hasManagementSubCommands", hasManagementSubCommands) + cobra.AddTemplateFunc("operationSubCommands", operationSubCommands) + cobra.AddTemplateFunc("managementSubCommands", managementSubCommands) + cobra.AddTemplateFunc("wrappedFlagUsages", wrappedFlagUsages) + + rootCmd.SetUsageTemplate(usageTemplate) + rootCmd.SetHelpTemplate(helpTemplate) + rootCmd.SetFlagErrorFunc(FlagErrorFunc) + rootCmd.SetHelpCommand(helpCommand) + + rootCmd.PersistentFlags().BoolP("help", "h", false, "Print usage") + rootCmd.PersistentFlags().MarkShorthandDeprecated("help", "please use --help") +} + +// FlagErrorFunc prints an error message which matches the format of the +// docker/docker/cli error messages +func FlagErrorFunc(cmd *cobra.Command, err error) error { + if err == nil { + return nil + } + + usage := "" + if cmd.HasSubCommands() { + usage = "\n\n" + cmd.UsageString() + } + return StatusError{ + Status: fmt.Sprintf("%s\nSee '%s --help'.%s", err, cmd.CommandPath(), usage), + StatusCode: 125, + } +} + +var helpCommand = &cobra.Command{ + Use: "help [command]", + Short: "Help about the command", + PersistentPreRun: func(cmd *cobra.Command, args []string) {}, + PersistentPostRun: func(cmd *cobra.Command, args []string) {}, + RunE: func(c *cobra.Command, args []string) error { + cmd, args, e := c.Root().Find(args) + if cmd == nil || e != nil || len(args) > 0 { + return errors.Errorf("unknown help topic: %v", strings.Join(args, " ")) + } + + helpFunc := cmd.HelpFunc() + helpFunc(cmd, args) + return nil + }, +} + +func hasSubCommands(cmd *cobra.Command) bool { + return len(operationSubCommands(cmd)) > 0 +} + +func hasManagementSubCommands(cmd *cobra.Command) bool { + return len(managementSubCommands(cmd)) > 0 +} + +func operationSubCommands(cmd *cobra.Command) []*cobra.Command { + cmds := []*cobra.Command{} + for _, sub := range cmd.Commands() { + if sub.IsAvailableCommand() && !sub.HasSubCommands() { + cmds = append(cmds, sub) + } + } + return cmds +} + +func wrappedFlagUsages(cmd *cobra.Command) string { + width := 80 + if ws, err := term.GetWinsize(0); err == nil { + width = int(ws.Width) + } + return cmd.Flags().FlagUsagesWrapped(width - 1) +} + +func managementSubCommands(cmd *cobra.Command) []*cobra.Command { + cmds := []*cobra.Command{} + for _, sub := range cmd.Commands() { + if sub.IsAvailableCommand() && sub.HasSubCommands() { + cmds = append(cmds, sub) + } + } + return cmds +} + +var usageTemplate = `Usage: + +{{- if not .HasSubCommands}} {{.UseLine}}{{end}} +{{- if .HasSubCommands}} {{ .CommandPath}} COMMAND{{end}} + +{{ .Short | trim }} + +{{- if gt .Aliases 0}} + +Aliases: + {{.NameAndAliases}} + +{{- end}} +{{- if .HasExample}} + +Examples: +{{ .Example }} + +{{- end}} +{{- if .HasFlags}} + +Options: +{{ wrappedFlagUsages . | trimRightSpace}} + +{{- end}} +{{- if hasManagementSubCommands . }} + +Management Commands: + +{{- range managementSubCommands . }} + {{rpad .Name .NamePadding }} {{.Short}} +{{- end}} + +{{- end}} +{{- if hasSubCommands .}} + +Commands: + +{{- range operationSubCommands . }} + {{rpad .Name .NamePadding }} {{.Short}} +{{- end}} +{{- end}} + +{{- if .HasSubCommands }} + +Run '{{.CommandPath}} COMMAND --help' for more information on a command. +{{- end}} +` + +var helpTemplate = ` +{{if or .Runnable .HasSubCommands}}{{.UsageString}}{{end}}` diff --git a/vendor/github.com/moby/moby/cli/config/configdir.go b/vendor/github.com/moby/moby/cli/config/configdir.go new file mode 100644 index 000000000..de2257781 --- /dev/null +++ b/vendor/github.com/moby/moby/cli/config/configdir.go @@ -0,0 +1,25 @@ +package config + +import ( + "os" + "path/filepath" + + "github.com/docker/docker/pkg/homedir" +) + +var ( + configDir = os.Getenv("DOCKER_CONFIG") + configFileDir = ".docker" +) + +// Dir returns the path to the configuration directory as specified by the DOCKER_CONFIG environment variable. +// TODO: this was copied from cli/config/configfile and should be removed once cmd/dockerd moves +func Dir() string { + return configDir +} + +func init() { + if configDir == "" { + configDir = filepath.Join(homedir.Get(), configFileDir) + } +} diff --git a/vendor/github.com/moby/moby/cli/debug/debug.go b/vendor/github.com/moby/moby/cli/debug/debug.go new file mode 100644 index 000000000..51dfab2a9 --- /dev/null +++ b/vendor/github.com/moby/moby/cli/debug/debug.go @@ -0,0 +1,26 @@ +package debug + +import ( + "os" + + "github.com/Sirupsen/logrus" +) + +// Enable sets the DEBUG env var to true +// and makes the logger to log at debug level. +func Enable() { + os.Setenv("DEBUG", "1") + logrus.SetLevel(logrus.DebugLevel) +} + +// Disable sets the DEBUG env var to false +// and makes the logger to log at info level. +func Disable() { + os.Setenv("DEBUG", "") + logrus.SetLevel(logrus.InfoLevel) +} + +// IsEnabled checks whether the debug flag is set or not. +func IsEnabled() bool { + return os.Getenv("DEBUG") != "" +} diff --git a/vendor/github.com/moby/moby/cli/debug/debug_test.go b/vendor/github.com/moby/moby/cli/debug/debug_test.go new file mode 100644 index 000000000..ad8412a94 --- /dev/null +++ b/vendor/github.com/moby/moby/cli/debug/debug_test.go @@ -0,0 +1,43 @@ +package debug + +import ( + "os" + "testing" + + "github.com/Sirupsen/logrus" +) + +func TestEnable(t *testing.T) { + defer func() { + os.Setenv("DEBUG", "") + logrus.SetLevel(logrus.InfoLevel) + }() + Enable() + if os.Getenv("DEBUG") != "1" { + t.Fatalf("expected DEBUG=1, got %s\n", os.Getenv("DEBUG")) + } + if logrus.GetLevel() != logrus.DebugLevel { + t.Fatalf("expected log level %v, got %v\n", logrus.DebugLevel, logrus.GetLevel()) + } +} + +func TestDisable(t *testing.T) { + Disable() + if os.Getenv("DEBUG") != "" { + t.Fatalf("expected DEBUG=\"\", got %s\n", os.Getenv("DEBUG")) + } + if logrus.GetLevel() != logrus.InfoLevel { + t.Fatalf("expected log level %v, got %v\n", logrus.InfoLevel, logrus.GetLevel()) + } +} + +func TestEnabled(t *testing.T) { + Enable() + if !IsEnabled() { + t.Fatal("expected debug enabled, got false") + } + Disable() + if IsEnabled() { + t.Fatal("expected debug disabled, got true") + } +} diff --git a/vendor/github.com/moby/moby/cli/error.go b/vendor/github.com/moby/moby/cli/error.go new file mode 100644 index 000000000..62f62433b --- /dev/null +++ b/vendor/github.com/moby/moby/cli/error.go @@ -0,0 +1,33 @@ +package cli + +import ( + "fmt" + "strings" +) + +// Errors is a list of errors. +// Useful in a loop if you don't want to return the error right away and you want to display after the loop, +// all the errors that happened during the loop. +type Errors []error + +func (errList Errors) Error() string { + if len(errList) < 1 { + return "" + } + + out := make([]string, len(errList)) + for i := range errList { + out[i] = errList[i].Error() + } + return strings.Join(out, ", ") +} + +// StatusError reports an unsuccessful exit by a command. +type StatusError struct { + Status string + StatusCode int +} + +func (e StatusError) Error() string { + return fmt.Sprintf("Status: %s, Code: %d", e.Status, e.StatusCode) +} diff --git a/vendor/github.com/moby/moby/cli/required.go b/vendor/github.com/moby/moby/cli/required.go new file mode 100644 index 000000000..d56bc213a --- /dev/null +++ b/vendor/github.com/moby/moby/cli/required.go @@ -0,0 +1,27 @@ +package cli + +import ( + "strings" + + "github.com/pkg/errors" + "github.com/spf13/cobra" +) + +// NoArgs validates args and returns an error if there are any args +func NoArgs(cmd *cobra.Command, args []string) error { + if len(args) == 0 { + return nil + } + + if cmd.HasSubCommands() { + return errors.Errorf("\n" + strings.TrimRight(cmd.UsageString(), "\n")) + } + + return errors.Errorf( + "\"%s\" accepts no argument(s).\nSee '%s --help'.\n\nUsage: %s\n\n%s", + cmd.CommandPath(), + cmd.CommandPath(), + cmd.UseLine(), + cmd.Short, + ) +} diff --git a/vendor/github.com/moby/moby/client/README.md b/vendor/github.com/moby/moby/client/README.md new file mode 100644 index 000000000..059dfb3ce --- /dev/null +++ b/vendor/github.com/moby/moby/client/README.md @@ -0,0 +1,35 @@ +# Go client for the Docker Engine API + +The `docker` command uses this package to communicate with the daemon. It can also be used by your own Go applications to do anything the command-line interface does – running containers, pulling images, managing swarms, etc. + +For example, to list running containers (the equivalent of `docker ps`): + +```go +package main + +import ( + "context" + "fmt" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/client" +) + +func main() { + cli, err := client.NewEnvClient() + if err != nil { + panic(err) + } + + containers, err := cli.ContainerList(context.Background(), types.ContainerListOptions{}) + if err != nil { + panic(err) + } + + for _, container := range containers { + fmt.Printf("%s %s\n", container.ID[:10], container.Image) + } +} +``` + +[Full documentation is available on GoDoc.](https://godoc.org/github.com/docker/docker/client) diff --git a/vendor/github.com/moby/moby/client/build_prune.go b/vendor/github.com/moby/moby/client/build_prune.go new file mode 100644 index 000000000..ccab115d3 --- /dev/null +++ b/vendor/github.com/moby/moby/client/build_prune.go @@ -0,0 +1,30 @@ +package client + +import ( + "encoding/json" + "fmt" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// BuildCachePrune requests the daemon to delete unused cache data +func (cli *Client) BuildCachePrune(ctx context.Context) (*types.BuildCachePruneReport, error) { + if err := cli.NewVersionError("1.31", "build prune"); err != nil { + return nil, err + } + + report := types.BuildCachePruneReport{} + + serverResp, err := cli.post(ctx, "/build/prune", nil, nil, nil) + if err != nil { + return nil, err + } + defer ensureReaderClosed(serverResp) + + if err := json.NewDecoder(serverResp.body).Decode(&report); err != nil { + return nil, fmt.Errorf("Error retrieving disk usage: %v", err) + } + + return &report, nil +} diff --git a/vendor/github.com/moby/moby/client/checkpoint_create.go b/vendor/github.com/moby/moby/client/checkpoint_create.go new file mode 100644 index 000000000..0effe498b --- /dev/null +++ b/vendor/github.com/moby/moby/client/checkpoint_create.go @@ -0,0 +1,13 @@ +package client + +import ( + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// CheckpointCreate creates a checkpoint from the given container with the given name +func (cli *Client) CheckpointCreate(ctx context.Context, container string, options types.CheckpointCreateOptions) error { + resp, err := cli.post(ctx, "/containers/"+container+"/checkpoints", nil, options, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/moby/moby/client/checkpoint_create_test.go b/vendor/github.com/moby/moby/client/checkpoint_create_test.go new file mode 100644 index 000000000..96e518761 --- /dev/null +++ b/vendor/github.com/moby/moby/client/checkpoint_create_test.go @@ -0,0 +1,73 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +func TestCheckpointCreateError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + err := client.CheckpointCreate(context.Background(), "nothing", types.CheckpointCreateOptions{ + CheckpointID: "noting", + Exit: true, + }) + + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestCheckpointCreate(t *testing.T) { + expectedContainerID := "container_id" + expectedCheckpointID := "checkpoint_id" + expectedURL := "/containers/container_id/checkpoints" + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + + if req.Method != "POST" { + return nil, fmt.Errorf("expected POST method, got %s", req.Method) + } + + createOptions := &types.CheckpointCreateOptions{} + if err := json.NewDecoder(req.Body).Decode(createOptions); err != nil { + return nil, err + } + + if createOptions.CheckpointID != expectedCheckpointID { + return nil, fmt.Errorf("expected CheckpointID to be 'checkpoint_id', got %v", createOptions.CheckpointID) + } + + if !createOptions.Exit { + return nil, fmt.Errorf("expected Exit to be true") + } + + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + + err := client.CheckpointCreate(context.Background(), expectedContainerID, types.CheckpointCreateOptions{ + CheckpointID: expectedCheckpointID, + Exit: true, + }) + + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/moby/moby/client/checkpoint_delete.go b/vendor/github.com/moby/moby/client/checkpoint_delete.go new file mode 100644 index 000000000..e6e75588b --- /dev/null +++ b/vendor/github.com/moby/moby/client/checkpoint_delete.go @@ -0,0 +1,20 @@ +package client + +import ( + "net/url" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// CheckpointDelete deletes the checkpoint with the given name from the given container +func (cli *Client) CheckpointDelete(ctx context.Context, containerID string, options types.CheckpointDeleteOptions) error { + query := url.Values{} + if options.CheckpointDir != "" { + query.Set("dir", options.CheckpointDir) + } + + resp, err := cli.delete(ctx, "/containers/"+containerID+"/checkpoints/"+options.CheckpointID, query, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/moby/moby/client/checkpoint_delete_test.go b/vendor/github.com/moby/moby/client/checkpoint_delete_test.go new file mode 100644 index 000000000..a78b05048 --- /dev/null +++ b/vendor/github.com/moby/moby/client/checkpoint_delete_test.go @@ -0,0 +1,54 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +func TestCheckpointDeleteError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + err := client.CheckpointDelete(context.Background(), "container_id", types.CheckpointDeleteOptions{ + CheckpointID: "checkpoint_id", + }) + + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestCheckpointDelete(t *testing.T) { + expectedURL := "/containers/container_id/checkpoints/checkpoint_id" + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "DELETE" { + return nil, fmt.Errorf("expected DELETE method, got %s", req.Method) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + + err := client.CheckpointDelete(context.Background(), "container_id", types.CheckpointDeleteOptions{ + CheckpointID: "checkpoint_id", + }) + + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/moby/moby/client/checkpoint_list.go b/vendor/github.com/moby/moby/client/checkpoint_list.go new file mode 100644 index 000000000..ffe44bc97 --- /dev/null +++ b/vendor/github.com/moby/moby/client/checkpoint_list.go @@ -0,0 +1,32 @@ +package client + +import ( + "encoding/json" + "net/http" + "net/url" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// CheckpointList returns the checkpoints of the given container in the docker host +func (cli *Client) CheckpointList(ctx context.Context, container string, options types.CheckpointListOptions) ([]types.Checkpoint, error) { + var checkpoints []types.Checkpoint + + query := url.Values{} + if options.CheckpointDir != "" { + query.Set("dir", options.CheckpointDir) + } + + resp, err := cli.get(ctx, "/containers/"+container+"/checkpoints", query, nil) + if err != nil { + if resp.statusCode == http.StatusNotFound { + return checkpoints, containerNotFoundError{container} + } + return checkpoints, err + } + + err = json.NewDecoder(resp.body).Decode(&checkpoints) + ensureReaderClosed(resp) + return checkpoints, err +} diff --git a/vendor/github.com/moby/moby/client/checkpoint_list_test.go b/vendor/github.com/moby/moby/client/checkpoint_list_test.go new file mode 100644 index 000000000..388465715 --- /dev/null +++ b/vendor/github.com/moby/moby/client/checkpoint_list_test.go @@ -0,0 +1,68 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +func TestCheckpointListError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, err := client.CheckpointList(context.Background(), "container_id", types.CheckpointListOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestCheckpointList(t *testing.T) { + expectedURL := "/containers/container_id/checkpoints" + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + content, err := json.Marshal([]types.Checkpoint{ + { + Name: "checkpoint", + }, + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + + checkpoints, err := client.CheckpointList(context.Background(), "container_id", types.CheckpointListOptions{}) + if err != nil { + t.Fatal(err) + } + if len(checkpoints) != 1 { + t.Fatalf("expected 1 checkpoint, got %v", checkpoints) + } +} + +func TestCheckpointListContainerNotFound(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusNotFound, "Server error")), + } + + _, err := client.CheckpointList(context.Background(), "unknown", types.CheckpointListOptions{}) + if err == nil || !IsErrContainerNotFound(err) { + t.Fatalf("expected a containerNotFound error, got %v", err) + } +} diff --git a/vendor/github.com/moby/moby/client/client.go b/vendor/github.com/moby/moby/client/client.go new file mode 100644 index 000000000..7e1453172 --- /dev/null +++ b/vendor/github.com/moby/moby/client/client.go @@ -0,0 +1,314 @@ +/* +Package client is a Go client for the Docker Engine API. + +The "docker" command uses this package to communicate with the daemon. It can also +be used by your own Go applications to do anything the command-line interface does +- running containers, pulling images, managing swarms, etc. + +For more information about the Engine API, see the documentation: +https://docs.docker.com/engine/reference/api/ + +Usage + +You use the library by creating a client object and calling methods on it. The +client can be created either from environment variables with NewEnvClient, or +configured manually with NewClient. + +For example, to list running containers (the equivalent of "docker ps"): + + package main + + import ( + "context" + "fmt" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/client" + ) + + func main() { + cli, err := client.NewEnvClient() + if err != nil { + panic(err) + } + + containers, err := cli.ContainerList(context.Background(), types.ContainerListOptions{}) + if err != nil { + panic(err) + } + + for _, container := range containers { + fmt.Printf("%s %s\n", container.ID[:10], container.Image) + } + } + +*/ +package client + +import ( + "errors" + "fmt" + "net/http" + "net/url" + "os" + "path/filepath" + "strings" + + "github.com/docker/docker/api" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/versions" + "github.com/docker/go-connections/sockets" + "github.com/docker/go-connections/tlsconfig" + "golang.org/x/net/context" +) + +// ErrRedirect is the error returned by checkRedirect when the request is non-GET. +var ErrRedirect = errors.New("unexpected redirect in response") + +// Client is the API client that performs all operations +// against a docker server. +type Client struct { + // scheme sets the scheme for the client + scheme string + // host holds the server address to connect to + host string + // proto holds the client protocol i.e. unix. + proto string + // addr holds the client address. + addr string + // basePath holds the path to prepend to the requests. + basePath string + // client used to send and receive http requests. + client *http.Client + // version of the server to talk to. + version string + // custom http headers configured by users. + customHTTPHeaders map[string]string + // manualOverride is set to true when the version was set by users. + manualOverride bool +} + +// CheckRedirect specifies the policy for dealing with redirect responses: +// If the request is non-GET return `ErrRedirect`. Otherwise use the last response. +// +// Go 1.8 changes behavior for HTTP redirects (specifically 301, 307, and 308) in the client . +// The Docker client (and by extension docker API client) can be made to to send a request +// like POST /containers//start where what would normally be in the name section of the URL is empty. +// This triggers an HTTP 301 from the daemon. +// In go 1.8 this 301 will be converted to a GET request, and ends up getting a 404 from the daemon. +// This behavior change manifests in the client in that before the 301 was not followed and +// the client did not generate an error, but now results in a message like Error response from daemon: page not found. +func CheckRedirect(req *http.Request, via []*http.Request) error { + if via[0].Method == http.MethodGet { + return http.ErrUseLastResponse + } + return ErrRedirect +} + +// NewEnvClient initializes a new API client based on environment variables. +// Use DOCKER_HOST to set the url to the docker server. +// Use DOCKER_API_VERSION to set the version of the API to reach, leave empty for latest. +// Use DOCKER_CERT_PATH to load the TLS certificates from. +// Use DOCKER_TLS_VERIFY to enable or disable TLS verification, off by default. +func NewEnvClient() (*Client, error) { + var client *http.Client + if dockerCertPath := os.Getenv("DOCKER_CERT_PATH"); dockerCertPath != "" { + options := tlsconfig.Options{ + CAFile: filepath.Join(dockerCertPath, "ca.pem"), + CertFile: filepath.Join(dockerCertPath, "cert.pem"), + KeyFile: filepath.Join(dockerCertPath, "key.pem"), + InsecureSkipVerify: os.Getenv("DOCKER_TLS_VERIFY") == "", + } + tlsc, err := tlsconfig.Client(options) + if err != nil { + return nil, err + } + + client = &http.Client{ + Transport: &http.Transport{ + TLSClientConfig: tlsc, + }, + CheckRedirect: CheckRedirect, + } + } + + host := os.Getenv("DOCKER_HOST") + if host == "" { + host = DefaultDockerHost + } + version := os.Getenv("DOCKER_API_VERSION") + if version == "" { + version = api.DefaultVersion + } + + cli, err := NewClient(host, version, client, nil) + if err != nil { + return cli, err + } + if os.Getenv("DOCKER_API_VERSION") != "" { + cli.manualOverride = true + } + return cli, nil +} + +// NewClient initializes a new API client for the given host and API version. +// It uses the given http client as transport. +// It also initializes the custom http headers to add to each request. +// +// It won't send any version information if the version number is empty. It is +// highly recommended that you set a version or your client may break if the +// server is upgraded. +func NewClient(host string, version string, client *http.Client, httpHeaders map[string]string) (*Client, error) { + proto, addr, basePath, err := ParseHost(host) + if err != nil { + return nil, err + } + + if client != nil { + if _, ok := client.Transport.(*http.Transport); !ok { + return nil, fmt.Errorf("unable to verify TLS configuration, invalid transport %v", client.Transport) + } + } else { + transport := new(http.Transport) + sockets.ConfigureTransport(transport, proto, addr) + client = &http.Client{ + Transport: transport, + CheckRedirect: CheckRedirect, + } + } + + scheme := "http" + tlsConfig := resolveTLSConfig(client.Transport) + if tlsConfig != nil { + // TODO(stevvooe): This isn't really the right way to write clients in Go. + // `NewClient` should probably only take an `*http.Client` and work from there. + // Unfortunately, the model of having a host-ish/url-thingy as the connection + // string has us confusing protocol and transport layers. We continue doing + // this to avoid breaking existing clients but this should be addressed. + scheme = "https" + } + + return &Client{ + scheme: scheme, + host: host, + proto: proto, + addr: addr, + basePath: basePath, + client: client, + version: version, + customHTTPHeaders: httpHeaders, + }, nil +} + +// Close ensures that transport.Client is closed +// especially needed while using NewClient with *http.Client = nil +// for example +// client.NewClient("unix:///var/run/docker.sock", nil, "v1.18", map[string]string{"User-Agent": "engine-api-cli-1.0"}) +func (cli *Client) Close() error { + + if t, ok := cli.client.Transport.(*http.Transport); ok { + t.CloseIdleConnections() + } + + return nil +} + +// getAPIPath returns the versioned request path to call the api. +// It appends the query parameters to the path if they are not empty. +func (cli *Client) getAPIPath(p string, query url.Values) string { + var apiPath string + if cli.version != "" { + v := strings.TrimPrefix(cli.version, "v") + apiPath = cli.basePath + "/v" + v + p + } else { + apiPath = cli.basePath + p + } + + u := &url.URL{ + Path: apiPath, + } + if len(query) > 0 { + u.RawQuery = query.Encode() + } + return u.String() +} + +// ClientVersion returns the version string associated with this +// instance of the Client. Note that this value can be changed +// via the DOCKER_API_VERSION env var. +// This operation doesn't acquire a mutex. +func (cli *Client) ClientVersion() string { + return cli.version +} + +// NegotiateAPIVersion updates the version string associated with this +// instance of the Client to match the latest version the server supports +func (cli *Client) NegotiateAPIVersion(ctx context.Context) { + ping, _ := cli.Ping(ctx) + cli.NegotiateAPIVersionPing(ping) +} + +// NegotiateAPIVersionPing updates the version string associated with this +// instance of the Client to match the latest version the server supports +func (cli *Client) NegotiateAPIVersionPing(p types.Ping) { + if cli.manualOverride { + return + } + + // try the latest version before versioning headers existed + if p.APIVersion == "" { + p.APIVersion = "1.24" + } + + // if the client is not initialized with a version, start with the latest supported version + if cli.version == "" { + cli.version = api.DefaultVersion + } + + // if server version is lower than the maximum version supported by the Client, downgrade + if versions.LessThan(p.APIVersion, api.DefaultVersion) { + cli.version = p.APIVersion + } +} + +// DaemonHost returns the host associated with this instance of the Client. +// This operation doesn't acquire a mutex. +func (cli *Client) DaemonHost() string { + return cli.host +} + +// ParseHost verifies that the given host strings is valid. +func ParseHost(host string) (string, string, string, error) { + protoAddrParts := strings.SplitN(host, "://", 2) + if len(protoAddrParts) == 1 { + return "", "", "", fmt.Errorf("unable to parse docker host `%s`", host) + } + + var basePath string + proto, addr := protoAddrParts[0], protoAddrParts[1] + if proto == "tcp" { + parsed, err := url.Parse("tcp://" + addr) + if err != nil { + return "", "", "", err + } + addr = parsed.Host + basePath = parsed.Path + } + return proto, addr, basePath, nil +} + +// CustomHTTPHeaders returns the custom http headers associated with this +// instance of the Client. This operation doesn't acquire a mutex. +func (cli *Client) CustomHTTPHeaders() map[string]string { + m := make(map[string]string) + for k, v := range cli.customHTTPHeaders { + m[k] = v + } + return m +} + +// SetCustomHTTPHeaders updates the custom http headers associated with this +// instance of the Client. This operation doesn't acquire a mutex. +func (cli *Client) SetCustomHTTPHeaders(headers map[string]string) { + cli.customHTTPHeaders = headers +} diff --git a/vendor/github.com/moby/moby/client/client_mock_test.go b/vendor/github.com/moby/moby/client/client_mock_test.go new file mode 100644 index 000000000..0ab935d53 --- /dev/null +++ b/vendor/github.com/moby/moby/client/client_mock_test.go @@ -0,0 +1,45 @@ +package client + +import ( + "bytes" + "encoding/json" + "io/ioutil" + "net/http" + + "github.com/docker/docker/api/types" +) + +func newMockClient(doer func(*http.Request) (*http.Response, error)) *http.Client { + return &http.Client{ + Transport: transportFunc(doer), + } +} + +func errorMock(statusCode int, message string) func(req *http.Request) (*http.Response, error) { + return func(req *http.Request) (*http.Response, error) { + header := http.Header{} + header.Set("Content-Type", "application/json") + + body, err := json.Marshal(&types.ErrorResponse{ + Message: message, + }) + if err != nil { + return nil, err + } + + return &http.Response{ + StatusCode: statusCode, + Body: ioutil.NopCloser(bytes.NewReader(body)), + Header: header, + }, nil + } +} + +func plainTextErrorMock(statusCode int, message string) func(req *http.Request) (*http.Response, error) { + return func(req *http.Request) (*http.Response, error) { + return &http.Response{ + StatusCode: statusCode, + Body: ioutil.NopCloser(bytes.NewReader([]byte(message))), + }, nil + } +} diff --git a/vendor/github.com/moby/moby/client/client_test.go b/vendor/github.com/moby/moby/client/client_test.go new file mode 100644 index 000000000..bc911c0c4 --- /dev/null +++ b/vendor/github.com/moby/moby/client/client_test.go @@ -0,0 +1,344 @@ +package client + +import ( + "bytes" + "net/http" + "net/url" + "os" + "runtime" + "strings" + "testing" + + "github.com/docker/docker/api" + "github.com/docker/docker/api/types" + "github.com/stretchr/testify/assert" +) + +func TestNewEnvClient(t *testing.T) { + if runtime.GOOS == "windows" { + t.Skip("skipping unix only test for windows") + } + cases := []struct { + envs map[string]string + expectedError string + expectedVersion string + }{ + { + envs: map[string]string{}, + expectedVersion: api.DefaultVersion, + }, + { + envs: map[string]string{ + "DOCKER_CERT_PATH": "invalid/path", + }, + expectedError: "Could not load X509 key pair: open invalid/path/cert.pem: no such file or directory", + }, + { + envs: map[string]string{ + "DOCKER_CERT_PATH": "testdata/", + }, + expectedVersion: api.DefaultVersion, + }, + { + envs: map[string]string{ + "DOCKER_CERT_PATH": "testdata/", + "DOCKER_TLS_VERIFY": "1", + }, + expectedVersion: api.DefaultVersion, + }, + { + envs: map[string]string{ + "DOCKER_CERT_PATH": "testdata/", + "DOCKER_HOST": "https://notaunixsocket", + }, + expectedVersion: api.DefaultVersion, + }, + { + envs: map[string]string{ + "DOCKER_HOST": "host", + }, + expectedError: "unable to parse docker host `host`", + }, + { + envs: map[string]string{ + "DOCKER_HOST": "invalid://url", + }, + expectedVersion: api.DefaultVersion, + }, + { + envs: map[string]string{ + "DOCKER_API_VERSION": "anything", + }, + expectedVersion: "anything", + }, + { + envs: map[string]string{ + "DOCKER_API_VERSION": "1.22", + }, + expectedVersion: "1.22", + }, + } + + env := envToMap() + defer mapToEnv(env) + for _, c := range cases { + mapToEnv(env) + mapToEnv(c.envs) + apiclient, err := NewEnvClient() + if c.expectedError != "" { + assert.Error(t, err) + assert.Equal(t, c.expectedError, err.Error()) + } else { + assert.NoError(t, err) + version := apiclient.ClientVersion() + assert.Equal(t, c.expectedVersion, version) + } + + if c.envs["DOCKER_TLS_VERIFY"] != "" { + // pedantic checking that this is handled correctly + tr := apiclient.client.Transport.(*http.Transport) + assert.NotNil(t, tr.TLSClientConfig) + assert.Equal(t, tr.TLSClientConfig.InsecureSkipVerify, false) + } + } +} + +func TestGetAPIPath(t *testing.T) { + cases := []struct { + v string + p string + q url.Values + e string + }{ + {"", "/containers/json", nil, "/containers/json"}, + {"", "/containers/json", url.Values{}, "/containers/json"}, + {"", "/containers/json", url.Values{"s": []string{"c"}}, "/containers/json?s=c"}, + {"1.22", "/containers/json", nil, "/v1.22/containers/json"}, + {"1.22", "/containers/json", url.Values{}, "/v1.22/containers/json"}, + {"1.22", "/containers/json", url.Values{"s": []string{"c"}}, "/v1.22/containers/json?s=c"}, + {"v1.22", "/containers/json", nil, "/v1.22/containers/json"}, + {"v1.22", "/containers/json", url.Values{}, "/v1.22/containers/json"}, + {"v1.22", "/containers/json", url.Values{"s": []string{"c"}}, "/v1.22/containers/json?s=c"}, + {"v1.22", "/networks/kiwl$%^", nil, "/v1.22/networks/kiwl$%25%5E"}, + } + + for _, cs := range cases { + c, err := NewClient("unix:///var/run/docker.sock", cs.v, nil, nil) + if err != nil { + t.Fatal(err) + } + g := c.getAPIPath(cs.p, cs.q) + assert.Equal(t, g, cs.e) + + err = c.Close() + assert.NoError(t, err) + } +} + +func TestParseHost(t *testing.T) { + cases := []struct { + host string + proto string + addr string + base string + err bool + }{ + {"", "", "", "", true}, + {"foobar", "", "", "", true}, + {"foo://bar", "foo", "bar", "", false}, + {"tcp://localhost:2476", "tcp", "localhost:2476", "", false}, + {"tcp://localhost:2476/path", "tcp", "localhost:2476", "/path", false}, + } + + for _, cs := range cases { + p, a, b, e := ParseHost(cs.host) + // if we expected an error to be returned... + if cs.err { + assert.Error(t, e) + } + assert.Equal(t, cs.proto, p) + assert.Equal(t, cs.addr, a) + assert.Equal(t, cs.base, b) + } +} + +func TestNewEnvClientSetsDefaultVersion(t *testing.T) { + env := envToMap() + defer mapToEnv(env) + + envMap := map[string]string{ + "DOCKER_HOST": "", + "DOCKER_API_VERSION": "", + "DOCKER_TLS_VERIFY": "", + "DOCKER_CERT_PATH": "", + } + mapToEnv(envMap) + + client, err := NewEnvClient() + if err != nil { + t.Fatal(err) + } + assert.Equal(t, client.version, api.DefaultVersion) + + expected := "1.22" + os.Setenv("DOCKER_API_VERSION", expected) + client, err = NewEnvClient() + if err != nil { + t.Fatal(err) + } + assert.Equal(t, expected, client.version) +} + +// TestNegotiateAPIVersionEmpty asserts that client.Client can +// negotiate a compatible APIVersion when omitted +func TestNegotiateAPIVersionEmpty(t *testing.T) { + env := envToMap() + defer mapToEnv(env) + + envMap := map[string]string{ + "DOCKER_API_VERSION": "", + } + mapToEnv(envMap) + + client, err := NewEnvClient() + if err != nil { + t.Fatal(err) + } + + ping := types.Ping{ + APIVersion: "", + OSType: "linux", + Experimental: false, + } + + // set our version to something new + client.version = "1.25" + + // if no version from server, expect the earliest + // version before APIVersion was implemented + expected := "1.24" + + // test downgrade + client.NegotiateAPIVersionPing(ping) + assert.Equal(t, expected, client.version) +} + +// TestNegotiateAPIVersion asserts that client.Client can +// negotiate a compatible APIVersion with the server +func TestNegotiateAPIVersion(t *testing.T) { + client, err := NewEnvClient() + if err != nil { + t.Fatal(err) + } + + expected := "1.21" + + ping := types.Ping{ + APIVersion: expected, + OSType: "linux", + Experimental: false, + } + + // set our version to something new + client.version = "1.22" + + // test downgrade + client.NegotiateAPIVersionPing(ping) + assert.Equal(t, expected, client.version) +} + +// TestNegotiateAPIVersionOverride asserts that we honor +// the environment variable DOCKER_API_VERSION when negotianing versions +func TestNegotiateAPVersionOverride(t *testing.T) { + env := envToMap() + defer mapToEnv(env) + + envMap := map[string]string{ + "DOCKER_API_VERSION": "9.99", + } + mapToEnv(envMap) + + client, err := NewEnvClient() + if err != nil { + t.Fatal(err) + } + + ping := types.Ping{ + APIVersion: "1.24", + OSType: "linux", + Experimental: false, + } + + expected := envMap["DOCKER_API_VERSION"] + + // test that we honored the env var + client.NegotiateAPIVersionPing(ping) + assert.Equal(t, expected, client.version) +} + +// mapToEnv takes a map of environment variables and sets them +func mapToEnv(env map[string]string) { + for k, v := range env { + os.Setenv(k, v) + } +} + +// envToMap returns a map of environment variables +func envToMap() map[string]string { + env := make(map[string]string) + for _, e := range os.Environ() { + kv := strings.SplitAfterN(e, "=", 2) + env[kv[0]] = kv[1] + } + + return env +} + +type roundTripFunc func(*http.Request) (*http.Response, error) + +func (rtf roundTripFunc) RoundTrip(req *http.Request) (*http.Response, error) { + return rtf(req) +} + +type bytesBufferClose struct { + *bytes.Buffer +} + +func (bbc bytesBufferClose) Close() error { + return nil +} + +func TestClientRedirect(t *testing.T) { + client := &http.Client{ + CheckRedirect: CheckRedirect, + Transport: roundTripFunc(func(req *http.Request) (*http.Response, error) { + if req.URL.String() == "/bla" { + return &http.Response{StatusCode: 404}, nil + } + return &http.Response{ + StatusCode: 301, + Header: map[string][]string{"Location": {"/bla"}}, + Body: bytesBufferClose{bytes.NewBuffer(nil)}, + }, nil + }), + } + + cases := []struct { + httpMethod string + expectedErr error + statusCode int + }{ + {http.MethodGet, nil, 301}, + {http.MethodPost, &url.Error{Op: "Post", URL: "/bla", Err: ErrRedirect}, 301}, + {http.MethodPut, &url.Error{Op: "Put", URL: "/bla", Err: ErrRedirect}, 301}, + {http.MethodDelete, &url.Error{Op: "Delete", URL: "/bla", Err: ErrRedirect}, 301}, + } + + for _, tc := range cases { + req, err := http.NewRequest(tc.httpMethod, "/redirectme", nil) + assert.NoError(t, err) + resp, err := client.Do(req) + assert.Equal(t, tc.expectedErr, err) + assert.Equal(t, tc.statusCode, resp.StatusCode) + } +} diff --git a/vendor/github.com/moby/moby/client/client_unix.go b/vendor/github.com/moby/moby/client/client_unix.go new file mode 100644 index 000000000..89de892c8 --- /dev/null +++ b/vendor/github.com/moby/moby/client/client_unix.go @@ -0,0 +1,6 @@ +// +build linux freebsd solaris openbsd darwin + +package client + +// DefaultDockerHost defines os specific default if DOCKER_HOST is unset +const DefaultDockerHost = "unix:///var/run/docker.sock" diff --git a/vendor/github.com/moby/moby/client/client_windows.go b/vendor/github.com/moby/moby/client/client_windows.go new file mode 100644 index 000000000..07c0c7a77 --- /dev/null +++ b/vendor/github.com/moby/moby/client/client_windows.go @@ -0,0 +1,4 @@ +package client + +// DefaultDockerHost defines os specific default if DOCKER_HOST is unset +const DefaultDockerHost = "npipe:////./pipe/docker_engine" diff --git a/vendor/github.com/moby/moby/client/config_create.go b/vendor/github.com/moby/moby/client/config_create.go new file mode 100644 index 000000000..bc4a952b2 --- /dev/null +++ b/vendor/github.com/moby/moby/client/config_create.go @@ -0,0 +1,25 @@ +package client + +import ( + "encoding/json" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +// ConfigCreate creates a new Config. +func (cli *Client) ConfigCreate(ctx context.Context, config swarm.ConfigSpec) (types.ConfigCreateResponse, error) { + var response types.ConfigCreateResponse + if err := cli.NewVersionError("1.30", "config create"); err != nil { + return response, err + } + resp, err := cli.post(ctx, "/configs/create", nil, config, nil) + if err != nil { + return response, err + } + + err = json.NewDecoder(resp.body).Decode(&response) + ensureReaderClosed(resp) + return response, err +} diff --git a/vendor/github.com/moby/moby/client/config_create_test.go b/vendor/github.com/moby/moby/client/config_create_test.go new file mode 100644 index 000000000..000eaf1cb --- /dev/null +++ b/vendor/github.com/moby/moby/client/config_create_test.go @@ -0,0 +1,69 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" + "github.com/stretchr/testify/assert" + "golang.org/x/net/context" +) + +func TestConfigCreateUnsupported(t *testing.T) { + client := &Client{ + version: "1.29", + client: &http.Client{}, + } + _, err := client.ConfigCreate(context.Background(), swarm.ConfigSpec{}) + assert.EqualError(t, err, `"config create" requires API version 1.30, but the Docker daemon API version is 1.29`) +} + +func TestConfigCreateError(t *testing.T) { + client := &Client{ + version: "1.30", + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.ConfigCreate(context.Background(), swarm.ConfigSpec{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestConfigCreate(t *testing.T) { + expectedURL := "/v1.30/configs/create" + client := &Client{ + version: "1.30", + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "POST" { + return nil, fmt.Errorf("expected POST method, got %s", req.Method) + } + b, err := json.Marshal(types.ConfigCreateResponse{ + ID: "test_config", + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusCreated, + Body: ioutil.NopCloser(bytes.NewReader(b)), + }, nil + }), + } + + r, err := client.ConfigCreate(context.Background(), swarm.ConfigSpec{}) + if err != nil { + t.Fatal(err) + } + if r.ID != "test_config" { + t.Fatalf("expected `test_config`, got %s", r.ID) + } +} diff --git a/vendor/github.com/moby/moby/client/config_inspect.go b/vendor/github.com/moby/moby/client/config_inspect.go new file mode 100644 index 000000000..ebb6d636c --- /dev/null +++ b/vendor/github.com/moby/moby/client/config_inspect.go @@ -0,0 +1,37 @@ +package client + +import ( + "bytes" + "encoding/json" + "io/ioutil" + "net/http" + + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +// ConfigInspectWithRaw returns the config information with raw data +func (cli *Client) ConfigInspectWithRaw(ctx context.Context, id string) (swarm.Config, []byte, error) { + if err := cli.NewVersionError("1.30", "config inspect"); err != nil { + return swarm.Config{}, nil, err + } + resp, err := cli.get(ctx, "/configs/"+id, nil, nil) + if err != nil { + if resp.statusCode == http.StatusNotFound { + return swarm.Config{}, nil, configNotFoundError{id} + } + return swarm.Config{}, nil, err + } + defer ensureReaderClosed(resp) + + body, err := ioutil.ReadAll(resp.body) + if err != nil { + return swarm.Config{}, nil, err + } + + var config swarm.Config + rdr := bytes.NewReader(body) + err = json.NewDecoder(rdr).Decode(&config) + + return config, body, err +} diff --git a/vendor/github.com/moby/moby/client/config_inspect_test.go b/vendor/github.com/moby/moby/client/config_inspect_test.go new file mode 100644 index 000000000..010b18841 --- /dev/null +++ b/vendor/github.com/moby/moby/client/config_inspect_test.go @@ -0,0 +1,78 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types/swarm" + "github.com/stretchr/testify/assert" + "golang.org/x/net/context" +) + +func TestConfigInspectUnsupported(t *testing.T) { + client := &Client{ + version: "1.29", + client: &http.Client{}, + } + _, _, err := client.ConfigInspectWithRaw(context.Background(), "nothing") + assert.EqualError(t, err, `"config inspect" requires API version 1.30, but the Docker daemon API version is 1.29`) +} + +func TestConfigInspectError(t *testing.T) { + client := &Client{ + version: "1.30", + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, _, err := client.ConfigInspectWithRaw(context.Background(), "nothing") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestConfigInspectConfigNotFound(t *testing.T) { + client := &Client{ + version: "1.30", + client: newMockClient(errorMock(http.StatusNotFound, "Server error")), + } + + _, _, err := client.ConfigInspectWithRaw(context.Background(), "unknown") + if err == nil || !IsErrConfigNotFound(err) { + t.Fatalf("expected a configNotFoundError error, got %v", err) + } +} + +func TestConfigInspect(t *testing.T) { + expectedURL := "/v1.30/configs/config_id" + client := &Client{ + version: "1.30", + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + content, err := json.Marshal(swarm.Config{ + ID: "config_id", + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + + configInspect, _, err := client.ConfigInspectWithRaw(context.Background(), "config_id") + if err != nil { + t.Fatal(err) + } + if configInspect.ID != "config_id" { + t.Fatalf("expected `config_id`, got %s", configInspect.ID) + } +} diff --git a/vendor/github.com/moby/moby/client/config_list.go b/vendor/github.com/moby/moby/client/config_list.go new file mode 100644 index 000000000..8483ca14d --- /dev/null +++ b/vendor/github.com/moby/moby/client/config_list.go @@ -0,0 +1,38 @@ +package client + +import ( + "encoding/json" + "net/url" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +// ConfigList returns the list of configs. +func (cli *Client) ConfigList(ctx context.Context, options types.ConfigListOptions) ([]swarm.Config, error) { + if err := cli.NewVersionError("1.30", "config list"); err != nil { + return nil, err + } + query := url.Values{} + + if options.Filters.Len() > 0 { + filterJSON, err := filters.ToParam(options.Filters) + if err != nil { + return nil, err + } + + query.Set("filters", filterJSON) + } + + resp, err := cli.get(ctx, "/configs", query, nil) + if err != nil { + return nil, err + } + + var configs []swarm.Config + err = json.NewDecoder(resp.body).Decode(&configs) + ensureReaderClosed(resp) + return configs, err +} diff --git a/vendor/github.com/moby/moby/client/config_list_test.go b/vendor/github.com/moby/moby/client/config_list_test.go new file mode 100644 index 000000000..4fe05469f --- /dev/null +++ b/vendor/github.com/moby/moby/client/config_list_test.go @@ -0,0 +1,106 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/swarm" + "github.com/stretchr/testify/assert" + "golang.org/x/net/context" +) + +func TestConfigListUnsupported(t *testing.T) { + client := &Client{ + version: "1.29", + client: &http.Client{}, + } + _, err := client.ConfigList(context.Background(), types.ConfigListOptions{}) + assert.EqualError(t, err, `"config list" requires API version 1.30, but the Docker daemon API version is 1.29`) +} + +func TestConfigListError(t *testing.T) { + client := &Client{ + version: "1.30", + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, err := client.ConfigList(context.Background(), types.ConfigListOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestConfigList(t *testing.T) { + expectedURL := "/v1.30/configs" + + filters := filters.NewArgs() + filters.Add("label", "label1") + filters.Add("label", "label2") + + listCases := []struct { + options types.ConfigListOptions + expectedQueryParams map[string]string + }{ + { + options: types.ConfigListOptions{}, + expectedQueryParams: map[string]string{ + "filters": "", + }, + }, + { + options: types.ConfigListOptions{ + Filters: filters, + }, + expectedQueryParams: map[string]string{ + "filters": `{"label":{"label1":true,"label2":true}}`, + }, + }, + } + for _, listCase := range listCases { + client := &Client{ + version: "1.30", + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + query := req.URL.Query() + for key, expected := range listCase.expectedQueryParams { + actual := query.Get(key) + if actual != expected { + return nil, fmt.Errorf("%s not set in URL query properly. Expected '%s', got %s", key, expected, actual) + } + } + content, err := json.Marshal([]swarm.Config{ + { + ID: "config_id1", + }, + { + ID: "config_id2", + }, + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + + configs, err := client.ConfigList(context.Background(), listCase.options) + if err != nil { + t.Fatal(err) + } + if len(configs) != 2 { + t.Fatalf("expected 2 configs, got %v", configs) + } + } +} diff --git a/vendor/github.com/moby/moby/client/config_remove.go b/vendor/github.com/moby/moby/client/config_remove.go new file mode 100644 index 000000000..726b5c853 --- /dev/null +++ b/vendor/github.com/moby/moby/client/config_remove.go @@ -0,0 +1,13 @@ +package client + +import "golang.org/x/net/context" + +// ConfigRemove removes a Config. +func (cli *Client) ConfigRemove(ctx context.Context, id string) error { + if err := cli.NewVersionError("1.30", "config remove"); err != nil { + return err + } + resp, err := cli.delete(ctx, "/configs/"+id, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/moby/moby/client/config_remove_test.go b/vendor/github.com/moby/moby/client/config_remove_test.go new file mode 100644 index 000000000..f2a9feea7 --- /dev/null +++ b/vendor/github.com/moby/moby/client/config_remove_test.go @@ -0,0 +1,59 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/stretchr/testify/assert" + "golang.org/x/net/context" +) + +func TestConfigRemoveUnsupported(t *testing.T) { + client := &Client{ + version: "1.29", + client: &http.Client{}, + } + err := client.ConfigRemove(context.Background(), "config_id") + assert.EqualError(t, err, `"config remove" requires API version 1.30, but the Docker daemon API version is 1.29`) +} + +func TestConfigRemoveError(t *testing.T) { + client := &Client{ + version: "1.30", + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + err := client.ConfigRemove(context.Background(), "config_id") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestConfigRemove(t *testing.T) { + expectedURL := "/v1.30/configs/config_id" + + client := &Client{ + version: "1.30", + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "DELETE" { + return nil, fmt.Errorf("expected DELETE method, got %s", req.Method) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte("body"))), + }, nil + }), + } + + err := client.ConfigRemove(context.Background(), "config_id") + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/moby/moby/client/config_update.go b/vendor/github.com/moby/moby/client/config_update.go new file mode 100644 index 000000000..823751bb8 --- /dev/null +++ b/vendor/github.com/moby/moby/client/config_update.go @@ -0,0 +1,21 @@ +package client + +import ( + "net/url" + "strconv" + + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +// ConfigUpdate attempts to update a Config +func (cli *Client) ConfigUpdate(ctx context.Context, id string, version swarm.Version, config swarm.ConfigSpec) error { + if err := cli.NewVersionError("1.30", "config update"); err != nil { + return err + } + query := url.Values{} + query.Set("version", strconv.FormatUint(version.Index, 10)) + resp, err := cli.post(ctx, "/configs/"+id+"/update", query, config, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/moby/moby/client/config_update_test.go b/vendor/github.com/moby/moby/client/config_update_test.go new file mode 100644 index 000000000..799e544da --- /dev/null +++ b/vendor/github.com/moby/moby/client/config_update_test.go @@ -0,0 +1,60 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types/swarm" + "github.com/stretchr/testify/assert" + "golang.org/x/net/context" +) + +func TestConfigUpdateUnsupported(t *testing.T) { + client := &Client{ + version: "1.29", + client: &http.Client{}, + } + err := client.ConfigUpdate(context.Background(), "config_id", swarm.Version{}, swarm.ConfigSpec{}) + assert.EqualError(t, err, `"config update" requires API version 1.30, but the Docker daemon API version is 1.29`) +} + +func TestConfigUpdateError(t *testing.T) { + client := &Client{ + version: "1.30", + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + err := client.ConfigUpdate(context.Background(), "config_id", swarm.Version{}, swarm.ConfigSpec{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestConfigUpdate(t *testing.T) { + expectedURL := "/v1.30/configs/config_id/update" + + client := &Client{ + version: "1.30", + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "POST" { + return nil, fmt.Errorf("expected POST method, got %s", req.Method) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte("body"))), + }, nil + }), + } + + err := client.ConfigUpdate(context.Background(), "config_id", swarm.Version{}, swarm.ConfigSpec{}) + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/moby/moby/client/container_attach.go b/vendor/github.com/moby/moby/client/container_attach.go new file mode 100644 index 000000000..0fdf3ed0c --- /dev/null +++ b/vendor/github.com/moby/moby/client/container_attach.go @@ -0,0 +1,57 @@ +package client + +import ( + "net/url" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// ContainerAttach attaches a connection to a container in the server. +// It returns a types.HijackedConnection with the hijacked connection +// and the a reader to get output. It's up to the called to close +// the hijacked connection by calling types.HijackedResponse.Close. +// +// The stream format on the response will be in one of two formats: +// +// If the container is using a TTY, there is only a single stream (stdout), and +// data is copied directly from the container output stream, no extra +// multiplexing or headers. +// +// If the container is *not* using a TTY, streams for stdout and stderr are +// multiplexed. +// The format of the multiplexed stream is as follows: +// +// [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4}[]byte{OUTPUT} +// +// STREAM_TYPE can be 1 for stdout and 2 for stderr +// +// SIZE1, SIZE2, SIZE3, and SIZE4 are four bytes of uint32 encoded as big endian. +// This is the size of OUTPUT. +// +// You can use github.com/docker/docker/pkg/stdcopy.StdCopy to demultiplex this +// stream. +func (cli *Client) ContainerAttach(ctx context.Context, container string, options types.ContainerAttachOptions) (types.HijackedResponse, error) { + query := url.Values{} + if options.Stream { + query.Set("stream", "1") + } + if options.Stdin { + query.Set("stdin", "1") + } + if options.Stdout { + query.Set("stdout", "1") + } + if options.Stderr { + query.Set("stderr", "1") + } + if options.DetachKeys != "" { + query.Set("detachKeys", options.DetachKeys) + } + if options.Logs { + query.Set("logs", "1") + } + + headers := map[string][]string{"Content-Type": {"text/plain"}} + return cli.postHijacked(ctx, "/containers/"+container+"/attach", query, nil, headers) +} diff --git a/vendor/github.com/moby/moby/client/container_commit.go b/vendor/github.com/moby/moby/client/container_commit.go new file mode 100644 index 000000000..531d796ee --- /dev/null +++ b/vendor/github.com/moby/moby/client/container_commit.go @@ -0,0 +1,55 @@ +package client + +import ( + "encoding/json" + "errors" + "net/url" + + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// ContainerCommit applies changes into a container and creates a new tagged image. +func (cli *Client) ContainerCommit(ctx context.Context, container string, options types.ContainerCommitOptions) (types.IDResponse, error) { + var repository, tag string + if options.Reference != "" { + ref, err := reference.ParseNormalizedNamed(options.Reference) + if err != nil { + return types.IDResponse{}, err + } + + if _, isCanonical := ref.(reference.Canonical); isCanonical { + return types.IDResponse{}, errors.New("refusing to create a tag with a digest reference") + } + ref = reference.TagNameOnly(ref) + + if tagged, ok := ref.(reference.Tagged); ok { + tag = tagged.Tag() + } + repository = reference.FamiliarName(ref) + } + + query := url.Values{} + query.Set("container", container) + query.Set("repo", repository) + query.Set("tag", tag) + query.Set("comment", options.Comment) + query.Set("author", options.Author) + for _, change := range options.Changes { + query.Add("changes", change) + } + if options.Pause != true { + query.Set("pause", "0") + } + + var response types.IDResponse + resp, err := cli.post(ctx, "/commit", query, options.Config, nil) + if err != nil { + return response, err + } + + err = json.NewDecoder(resp.body).Decode(&response) + ensureReaderClosed(resp) + return response, err +} diff --git a/vendor/github.com/moby/moby/client/container_commit_test.go b/vendor/github.com/moby/moby/client/container_commit_test.go new file mode 100644 index 000000000..6947ed386 --- /dev/null +++ b/vendor/github.com/moby/moby/client/container_commit_test.go @@ -0,0 +1,96 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +func TestContainerCommitError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.ContainerCommit(context.Background(), "nothing", types.ContainerCommitOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestContainerCommit(t *testing.T) { + expectedURL := "/commit" + expectedContainerID := "container_id" + specifiedReference := "repository_name:tag" + expectedRepositoryName := "repository_name" + expectedTag := "tag" + expectedComment := "comment" + expectedAuthor := "author" + expectedChanges := []string{"change1", "change2"} + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + query := req.URL.Query() + containerID := query.Get("container") + if containerID != expectedContainerID { + return nil, fmt.Errorf("container id not set in URL query properly. Expected '%s', got %s", expectedContainerID, containerID) + } + repo := query.Get("repo") + if repo != expectedRepositoryName { + return nil, fmt.Errorf("container repo not set in URL query properly. Expected '%s', got %s", expectedRepositoryName, repo) + } + tag := query.Get("tag") + if tag != expectedTag { + return nil, fmt.Errorf("container tag not set in URL query properly. Expected '%s', got %s'", expectedTag, tag) + } + comment := query.Get("comment") + if comment != expectedComment { + return nil, fmt.Errorf("container comment not set in URL query properly. Expected '%s', got %s'", expectedComment, comment) + } + author := query.Get("author") + if author != expectedAuthor { + return nil, fmt.Errorf("container author not set in URL query properly. Expected '%s', got %s'", expectedAuthor, author) + } + pause := query.Get("pause") + if pause != "0" { + return nil, fmt.Errorf("container pause not set in URL query properly. Expected 'true', got %v'", pause) + } + changes := query["changes"] + if len(changes) != len(expectedChanges) { + return nil, fmt.Errorf("expected container changes size to be '%d', got %d", len(expectedChanges), len(changes)) + } + b, err := json.Marshal(types.IDResponse{ + ID: "new_container_id", + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(b)), + }, nil + }), + } + + r, err := client.ContainerCommit(context.Background(), expectedContainerID, types.ContainerCommitOptions{ + Reference: specifiedReference, + Comment: expectedComment, + Author: expectedAuthor, + Changes: expectedChanges, + Pause: false, + }) + if err != nil { + t.Fatal(err) + } + if r.ID != "new_container_id" { + t.Fatalf("expected `new_container_id`, got %s", r.ID) + } +} diff --git a/vendor/github.com/moby/moby/client/container_copy.go b/vendor/github.com/moby/moby/client/container_copy.go new file mode 100644 index 000000000..30ba6803f --- /dev/null +++ b/vendor/github.com/moby/moby/client/container_copy.go @@ -0,0 +1,102 @@ +package client + +import ( + "encoding/base64" + "encoding/json" + "fmt" + "io" + "net/http" + "net/url" + "path/filepath" + "strings" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" +) + +// ContainerStatPath returns Stat information about a path inside the container filesystem. +func (cli *Client) ContainerStatPath(ctx context.Context, containerID, path string) (types.ContainerPathStat, error) { + query := url.Values{} + query.Set("path", filepath.ToSlash(path)) // Normalize the paths used in the API. + + urlStr := "/containers/" + containerID + "/archive" + response, err := cli.head(ctx, urlStr, query, nil) + if err != nil { + return types.ContainerPathStat{}, err + } + defer ensureReaderClosed(response) + return getContainerPathStatFromHeader(response.header) +} + +// CopyToContainer copies content into the container filesystem. +// Note that `content` must be a Reader for a TAR +func (cli *Client) CopyToContainer(ctx context.Context, container, path string, content io.Reader, options types.CopyToContainerOptions) error { + query := url.Values{} + query.Set("path", filepath.ToSlash(path)) // Normalize the paths used in the API. + // Do not allow for an existing directory to be overwritten by a non-directory and vice versa. + if !options.AllowOverwriteDirWithFile { + query.Set("noOverwriteDirNonDir", "true") + } + + if options.CopyUIDGID { + query.Set("copyUIDGID", "true") + } + + apiPath := "/containers/" + container + "/archive" + + response, err := cli.putRaw(ctx, apiPath, query, content, nil) + if err != nil { + return err + } + defer ensureReaderClosed(response) + + if response.statusCode != http.StatusOK { + return fmt.Errorf("unexpected status code from daemon: %d", response.statusCode) + } + + return nil +} + +// CopyFromContainer gets the content from the container and returns it as a Reader +// to manipulate it in the host. It's up to the caller to close the reader. +func (cli *Client) CopyFromContainer(ctx context.Context, container, srcPath string) (io.ReadCloser, types.ContainerPathStat, error) { + query := make(url.Values, 1) + query.Set("path", filepath.ToSlash(srcPath)) // Normalize the paths used in the API. + + apiPath := "/containers/" + container + "/archive" + response, err := cli.get(ctx, apiPath, query, nil) + if err != nil { + return nil, types.ContainerPathStat{}, err + } + + if response.statusCode != http.StatusOK { + return nil, types.ContainerPathStat{}, fmt.Errorf("unexpected status code from daemon: %d", response.statusCode) + } + + // In order to get the copy behavior right, we need to know information + // about both the source and the destination. The response headers include + // stat info about the source that we can use in deciding exactly how to + // copy it locally. Along with the stat info about the local destination, + // we have everything we need to handle the multiple possibilities there + // can be when copying a file/dir from one location to another file/dir. + stat, err := getContainerPathStatFromHeader(response.header) + if err != nil { + return nil, stat, fmt.Errorf("unable to get resource stat from response: %s", err) + } + return response.body, stat, err +} + +func getContainerPathStatFromHeader(header http.Header) (types.ContainerPathStat, error) { + var stat types.ContainerPathStat + + encodedStat := header.Get("X-Docker-Container-Path-Stat") + statDecoder := base64.NewDecoder(base64.StdEncoding, strings.NewReader(encodedStat)) + + err := json.NewDecoder(statDecoder).Decode(&stat) + if err != nil { + err = fmt.Errorf("unable to decode container path stat header: %s", err) + } + + return stat, err +} diff --git a/vendor/github.com/moby/moby/client/container_copy_test.go b/vendor/github.com/moby/moby/client/container_copy_test.go new file mode 100644 index 000000000..c84f82e9f --- /dev/null +++ b/vendor/github.com/moby/moby/client/container_copy_test.go @@ -0,0 +1,244 @@ +package client + +import ( + "bytes" + "encoding/base64" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" +) + +func TestContainerStatPathError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.ContainerStatPath(context.Background(), "container_id", "path") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server error, got %v", err) + } +} + +func TestContainerStatPathNoHeaderError(t *testing.T) { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + _, err := client.ContainerStatPath(context.Background(), "container_id", "path/to/file") + if err == nil { + t.Fatalf("expected an error, got nothing") + } +} + +func TestContainerStatPath(t *testing.T) { + expectedURL := "/containers/container_id/archive" + expectedPath := "path/to/file" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "HEAD" { + return nil, fmt.Errorf("expected HEAD method, got %s", req.Method) + } + query := req.URL.Query() + path := query.Get("path") + if path != expectedPath { + return nil, fmt.Errorf("path not set in URL query properly") + } + content, err := json.Marshal(types.ContainerPathStat{ + Name: "name", + Mode: 0700, + }) + if err != nil { + return nil, err + } + base64PathStat := base64.StdEncoding.EncodeToString(content) + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + Header: http.Header{ + "X-Docker-Container-Path-Stat": []string{base64PathStat}, + }, + }, nil + }), + } + stat, err := client.ContainerStatPath(context.Background(), "container_id", expectedPath) + if err != nil { + t.Fatal(err) + } + if stat.Name != "name" { + t.Fatalf("expected container path stat name to be 'name', got '%s'", stat.Name) + } + if stat.Mode != 0700 { + t.Fatalf("expected container path stat mode to be 0700, got '%v'", stat.Mode) + } +} + +func TestCopyToContainerError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + err := client.CopyToContainer(context.Background(), "container_id", "path/to/file", bytes.NewReader([]byte("")), types.CopyToContainerOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server error, got %v", err) + } +} + +func TestCopyToContainerNotStatusOKError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusNoContent, "No content")), + } + err := client.CopyToContainer(context.Background(), "container_id", "path/to/file", bytes.NewReader([]byte("")), types.CopyToContainerOptions{}) + if err == nil || err.Error() != "unexpected status code from daemon: 204" { + t.Fatalf("expected an unexpected status code error, got %v", err) + } +} + +func TestCopyToContainer(t *testing.T) { + expectedURL := "/containers/container_id/archive" + expectedPath := "path/to/file" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "PUT" { + return nil, fmt.Errorf("expected PUT method, got %s", req.Method) + } + query := req.URL.Query() + path := query.Get("path") + if path != expectedPath { + return nil, fmt.Errorf("path not set in URL query properly, expected '%s', got %s", expectedPath, path) + } + noOverwriteDirNonDir := query.Get("noOverwriteDirNonDir") + if noOverwriteDirNonDir != "true" { + return nil, fmt.Errorf("noOverwriteDirNonDir not set in URL query properly, expected true, got %s", noOverwriteDirNonDir) + } + + content, err := ioutil.ReadAll(req.Body) + if err != nil { + return nil, err + } + if err := req.Body.Close(); err != nil { + return nil, err + } + if string(content) != "content" { + return nil, fmt.Errorf("expected content to be 'content', got %s", string(content)) + } + + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + err := client.CopyToContainer(context.Background(), "container_id", expectedPath, bytes.NewReader([]byte("content")), types.CopyToContainerOptions{ + AllowOverwriteDirWithFile: false, + }) + if err != nil { + t.Fatal(err) + } +} + +func TestCopyFromContainerError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, _, err := client.CopyFromContainer(context.Background(), "container_id", "path/to/file") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server error, got %v", err) + } +} + +func TestCopyFromContainerNotStatusOKError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusNoContent, "No content")), + } + _, _, err := client.CopyFromContainer(context.Background(), "container_id", "path/to/file") + if err == nil || err.Error() != "unexpected status code from daemon: 204" { + t.Fatalf("expected an unexpected status code error, got %v", err) + } +} + +func TestCopyFromContainerNoHeaderError(t *testing.T) { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + _, _, err := client.CopyFromContainer(context.Background(), "container_id", "path/to/file") + if err == nil { + t.Fatalf("expected an error, got nothing") + } +} + +func TestCopyFromContainer(t *testing.T) { + expectedURL := "/containers/container_id/archive" + expectedPath := "path/to/file" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "GET" { + return nil, fmt.Errorf("expected GET method, got %s", req.Method) + } + query := req.URL.Query() + path := query.Get("path") + if path != expectedPath { + return nil, fmt.Errorf("path not set in URL query properly, expected '%s', got %s", expectedPath, path) + } + + headercontent, err := json.Marshal(types.ContainerPathStat{ + Name: "name", + Mode: 0700, + }) + if err != nil { + return nil, err + } + base64PathStat := base64.StdEncoding.EncodeToString(headercontent) + + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte("content"))), + Header: http.Header{ + "X-Docker-Container-Path-Stat": []string{base64PathStat}, + }, + }, nil + }), + } + r, stat, err := client.CopyFromContainer(context.Background(), "container_id", expectedPath) + if err != nil { + t.Fatal(err) + } + if stat.Name != "name" { + t.Fatalf("expected container path stat name to be 'name', got '%s'", stat.Name) + } + if stat.Mode != 0700 { + t.Fatalf("expected container path stat mode to be 0700, got '%v'", stat.Mode) + } + content, err := ioutil.ReadAll(r) + if err != nil { + t.Fatal(err) + } + if err := r.Close(); err != nil { + t.Fatal(err) + } + if string(content) != "content" { + t.Fatalf("expected content to be 'content', got %s", string(content)) + } +} diff --git a/vendor/github.com/moby/moby/client/container_create.go b/vendor/github.com/moby/moby/client/container_create.go new file mode 100644 index 000000000..6841b0b28 --- /dev/null +++ b/vendor/github.com/moby/moby/client/container_create.go @@ -0,0 +1,56 @@ +package client + +import ( + "encoding/json" + "net/url" + "strings" + + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/network" + "github.com/docker/docker/api/types/versions" + "golang.org/x/net/context" +) + +type configWrapper struct { + *container.Config + HostConfig *container.HostConfig + NetworkingConfig *network.NetworkingConfig +} + +// ContainerCreate creates a new container based in the given configuration. +// It can be associated with a name, but it's not mandatory. +func (cli *Client) ContainerCreate(ctx context.Context, config *container.Config, hostConfig *container.HostConfig, networkingConfig *network.NetworkingConfig, containerName string) (container.ContainerCreateCreatedBody, error) { + var response container.ContainerCreateCreatedBody + + if err := cli.NewVersionError("1.25", "stop timeout"); config != nil && config.StopTimeout != nil && err != nil { + return response, err + } + + // When using API 1.24 and under, the client is responsible for removing the container + if hostConfig != nil && versions.LessThan(cli.ClientVersion(), "1.25") { + hostConfig.AutoRemove = false + } + + query := url.Values{} + if containerName != "" { + query.Set("name", containerName) + } + + body := configWrapper{ + Config: config, + HostConfig: hostConfig, + NetworkingConfig: networkingConfig, + } + + serverResp, err := cli.post(ctx, "/containers/create", query, body, nil) + if err != nil { + if serverResp.statusCode == 404 && strings.Contains(err.Error(), "No such image") { + return response, imageNotFoundError{config.Image} + } + return response, err + } + + err = json.NewDecoder(serverResp.body).Decode(&response) + ensureReaderClosed(serverResp) + return response, err +} diff --git a/vendor/github.com/moby/moby/client/container_create_test.go b/vendor/github.com/moby/moby/client/container_create_test.go new file mode 100644 index 000000000..3ab608c21 --- /dev/null +++ b/vendor/github.com/moby/moby/client/container_create_test.go @@ -0,0 +1,118 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types/container" + "golang.org/x/net/context" +) + +func TestContainerCreateError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.ContainerCreate(context.Background(), nil, nil, nil, "nothing") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error while testing StatusInternalServerError, got %v", err) + } + + // 404 doesn't automatically means an unknown image + client = &Client{ + client: newMockClient(errorMock(http.StatusNotFound, "Server error")), + } + _, err = client.ContainerCreate(context.Background(), nil, nil, nil, "nothing") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error while testing StatusNotFound, got %v", err) + } +} + +func TestContainerCreateImageNotFound(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusNotFound, "No such image")), + } + _, err := client.ContainerCreate(context.Background(), &container.Config{Image: "unknown_image"}, nil, nil, "unknown") + if err == nil || !IsErrImageNotFound(err) { + t.Fatalf("expected an imageNotFound error, got %v", err) + } +} + +func TestContainerCreateWithName(t *testing.T) { + expectedURL := "/containers/create" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + name := req.URL.Query().Get("name") + if name != "container_name" { + return nil, fmt.Errorf("container name not set in URL query properly. Expected `container_name`, got %s", name) + } + b, err := json.Marshal(container.ContainerCreateCreatedBody{ + ID: "container_id", + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(b)), + }, nil + }), + } + + r, err := client.ContainerCreate(context.Background(), nil, nil, nil, "container_name") + if err != nil { + t.Fatal(err) + } + if r.ID != "container_id" { + t.Fatalf("expected `container_id`, got %s", r.ID) + } +} + +// TestContainerCreateAutoRemove validates that a client using API 1.24 always disables AutoRemove. When using API 1.25 +// or up, AutoRemove should not be disabled. +func TestContainerCreateAutoRemove(t *testing.T) { + autoRemoveValidator := func(expectedValue bool) func(req *http.Request) (*http.Response, error) { + return func(req *http.Request) (*http.Response, error) { + var config configWrapper + + if err := json.NewDecoder(req.Body).Decode(&config); err != nil { + return nil, err + } + if config.HostConfig.AutoRemove != expectedValue { + return nil, fmt.Errorf("expected AutoRemove to be %v, got %v", expectedValue, config.HostConfig.AutoRemove) + } + b, err := json.Marshal(container.ContainerCreateCreatedBody{ + ID: "container_id", + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(b)), + }, nil + } + } + + client := &Client{ + client: newMockClient(autoRemoveValidator(false)), + version: "1.24", + } + if _, err := client.ContainerCreate(context.Background(), nil, &container.HostConfig{AutoRemove: true}, nil, ""); err != nil { + t.Fatal(err) + } + client = &Client{ + client: newMockClient(autoRemoveValidator(true)), + version: "1.25", + } + if _, err := client.ContainerCreate(context.Background(), nil, &container.HostConfig{AutoRemove: true}, nil, ""); err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/moby/moby/client/container_diff.go b/vendor/github.com/moby/moby/client/container_diff.go new file mode 100644 index 000000000..884dc9fee --- /dev/null +++ b/vendor/github.com/moby/moby/client/container_diff.go @@ -0,0 +1,23 @@ +package client + +import ( + "encoding/json" + "net/url" + + "github.com/docker/docker/api/types/container" + "golang.org/x/net/context" +) + +// ContainerDiff shows differences in a container filesystem since it was started. +func (cli *Client) ContainerDiff(ctx context.Context, containerID string) ([]container.ContainerChangeResponseItem, error) { + var changes []container.ContainerChangeResponseItem + + serverResp, err := cli.get(ctx, "/containers/"+containerID+"/changes", url.Values{}, nil) + if err != nil { + return changes, err + } + + err = json.NewDecoder(serverResp.body).Decode(&changes) + ensureReaderClosed(serverResp) + return changes, err +} diff --git a/vendor/github.com/moby/moby/client/container_diff_test.go b/vendor/github.com/moby/moby/client/container_diff_test.go new file mode 100644 index 000000000..57dd73e66 --- /dev/null +++ b/vendor/github.com/moby/moby/client/container_diff_test.go @@ -0,0 +1,61 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types/container" + "golang.org/x/net/context" +) + +func TestContainerDiffError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.ContainerDiff(context.Background(), "nothing") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } + +} + +func TestContainerDiff(t *testing.T) { + expectedURL := "/containers/container_id/changes" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + b, err := json.Marshal([]container.ContainerChangeResponseItem{ + { + Kind: 0, + Path: "/path/1", + }, + { + Kind: 1, + Path: "/path/2", + }, + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(b)), + }, nil + }), + } + + changes, err := client.ContainerDiff(context.Background(), "container_id") + if err != nil { + t.Fatal(err) + } + if len(changes) != 2 { + t.Fatalf("expected an array of 2 changes, got %v", changes) + } +} diff --git a/vendor/github.com/moby/moby/client/container_exec.go b/vendor/github.com/moby/moby/client/container_exec.go new file mode 100644 index 000000000..0665c54fb --- /dev/null +++ b/vendor/github.com/moby/moby/client/container_exec.go @@ -0,0 +1,54 @@ +package client + +import ( + "encoding/json" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// ContainerExecCreate creates a new exec configuration to run an exec process. +func (cli *Client) ContainerExecCreate(ctx context.Context, container string, config types.ExecConfig) (types.IDResponse, error) { + var response types.IDResponse + + if err := cli.NewVersionError("1.25", "env"); len(config.Env) != 0 && err != nil { + return response, err + } + + resp, err := cli.post(ctx, "/containers/"+container+"/exec", nil, config, nil) + if err != nil { + return response, err + } + err = json.NewDecoder(resp.body).Decode(&response) + ensureReaderClosed(resp) + return response, err +} + +// ContainerExecStart starts an exec process already created in the docker host. +func (cli *Client) ContainerExecStart(ctx context.Context, execID string, config types.ExecStartCheck) error { + resp, err := cli.post(ctx, "/exec/"+execID+"/start", nil, config, nil) + ensureReaderClosed(resp) + return err +} + +// ContainerExecAttach attaches a connection to an exec process in the server. +// It returns a types.HijackedConnection with the hijacked connection +// and the a reader to get output. It's up to the called to close +// the hijacked connection by calling types.HijackedResponse.Close. +func (cli *Client) ContainerExecAttach(ctx context.Context, execID string, config types.ExecConfig) (types.HijackedResponse, error) { + headers := map[string][]string{"Content-Type": {"application/json"}} + return cli.postHijacked(ctx, "/exec/"+execID+"/start", nil, config, headers) +} + +// ContainerExecInspect returns information about a specific exec process on the docker host. +func (cli *Client) ContainerExecInspect(ctx context.Context, execID string) (types.ContainerExecInspect, error) { + var response types.ContainerExecInspect + resp, err := cli.get(ctx, "/exec/"+execID+"/json", nil, nil) + if err != nil { + return response, err + } + + err = json.NewDecoder(resp.body).Decode(&response) + ensureReaderClosed(resp) + return response, err +} diff --git a/vendor/github.com/moby/moby/client/container_exec_test.go b/vendor/github.com/moby/moby/client/container_exec_test.go new file mode 100644 index 000000000..0e296a50a --- /dev/null +++ b/vendor/github.com/moby/moby/client/container_exec_test.go @@ -0,0 +1,157 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" +) + +func TestContainerExecCreateError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.ContainerExecCreate(context.Background(), "container_id", types.ExecConfig{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestContainerExecCreate(t *testing.T) { + expectedURL := "/containers/container_id/exec" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "POST" { + return nil, fmt.Errorf("expected POST method, got %s", req.Method) + } + // FIXME validate the content is the given ExecConfig ? + if err := req.ParseForm(); err != nil { + return nil, err + } + execConfig := &types.ExecConfig{} + if err := json.NewDecoder(req.Body).Decode(execConfig); err != nil { + return nil, err + } + if execConfig.User != "user" { + return nil, fmt.Errorf("expected an execConfig with User == 'user', got %v", execConfig) + } + b, err := json.Marshal(types.IDResponse{ + ID: "exec_id", + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(b)), + }, nil + }), + } + + r, err := client.ContainerExecCreate(context.Background(), "container_id", types.ExecConfig{ + User: "user", + }) + if err != nil { + t.Fatal(err) + } + if r.ID != "exec_id" { + t.Fatalf("expected `exec_id`, got %s", r.ID) + } +} + +func TestContainerExecStartError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + err := client.ContainerExecStart(context.Background(), "nothing", types.ExecStartCheck{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestContainerExecStart(t *testing.T) { + expectedURL := "/exec/exec_id/start" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if err := req.ParseForm(); err != nil { + return nil, err + } + execStartCheck := &types.ExecStartCheck{} + if err := json.NewDecoder(req.Body).Decode(execStartCheck); err != nil { + return nil, err + } + if execStartCheck.Tty || !execStartCheck.Detach { + return nil, fmt.Errorf("expected execStartCheck{Detach:true,Tty:false}, got %v", execStartCheck) + } + + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + + err := client.ContainerExecStart(context.Background(), "exec_id", types.ExecStartCheck{ + Detach: true, + Tty: false, + }) + if err != nil { + t.Fatal(err) + } +} + +func TestContainerExecInspectError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.ContainerExecInspect(context.Background(), "nothing") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestContainerExecInspect(t *testing.T) { + expectedURL := "/exec/exec_id/json" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + b, err := json.Marshal(types.ContainerExecInspect{ + ExecID: "exec_id", + ContainerID: "container_id", + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(b)), + }, nil + }), + } + + inspect, err := client.ContainerExecInspect(context.Background(), "exec_id") + if err != nil { + t.Fatal(err) + } + if inspect.ExecID != "exec_id" { + t.Fatalf("expected ExecID to be `exec_id`, got %s", inspect.ExecID) + } + if inspect.ContainerID != "container_id" { + t.Fatalf("expected ContainerID `container_id`, got %s", inspect.ContainerID) + } +} diff --git a/vendor/github.com/moby/moby/client/container_export.go b/vendor/github.com/moby/moby/client/container_export.go new file mode 100644 index 000000000..52194f3d3 --- /dev/null +++ b/vendor/github.com/moby/moby/client/container_export.go @@ -0,0 +1,20 @@ +package client + +import ( + "io" + "net/url" + + "golang.org/x/net/context" +) + +// ContainerExport retrieves the raw contents of a container +// and returns them as an io.ReadCloser. It's up to the caller +// to close the stream. +func (cli *Client) ContainerExport(ctx context.Context, containerID string) (io.ReadCloser, error) { + serverResp, err := cli.get(ctx, "/containers/"+containerID+"/export", url.Values{}, nil) + if err != nil { + return nil, err + } + + return serverResp.body, nil +} diff --git a/vendor/github.com/moby/moby/client/container_export_test.go b/vendor/github.com/moby/moby/client/container_export_test.go new file mode 100644 index 000000000..5849fe925 --- /dev/null +++ b/vendor/github.com/moby/moby/client/container_export_test.go @@ -0,0 +1,50 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" +) + +func TestContainerExportError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.ContainerExport(context.Background(), "nothing") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestContainerExport(t *testing.T) { + expectedURL := "/containers/container_id/export" + client := &Client{ + client: newMockClient(func(r *http.Request) (*http.Response, error) { + if !strings.HasPrefix(r.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, r.URL) + } + + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte("response"))), + }, nil + }), + } + body, err := client.ContainerExport(context.Background(), "container_id") + if err != nil { + t.Fatal(err) + } + defer body.Close() + content, err := ioutil.ReadAll(body) + if err != nil { + t.Fatal(err) + } + if string(content) != "response" { + t.Fatalf("expected response to contain 'response', got %s", string(content)) + } +} diff --git a/vendor/github.com/moby/moby/client/container_inspect.go b/vendor/github.com/moby/moby/client/container_inspect.go new file mode 100644 index 000000000..17f180974 --- /dev/null +++ b/vendor/github.com/moby/moby/client/container_inspect.go @@ -0,0 +1,54 @@ +package client + +import ( + "bytes" + "encoding/json" + "io/ioutil" + "net/http" + "net/url" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// ContainerInspect returns the container information. +func (cli *Client) ContainerInspect(ctx context.Context, containerID string) (types.ContainerJSON, error) { + serverResp, err := cli.get(ctx, "/containers/"+containerID+"/json", nil, nil) + if err != nil { + if serverResp.statusCode == http.StatusNotFound { + return types.ContainerJSON{}, containerNotFoundError{containerID} + } + return types.ContainerJSON{}, err + } + + var response types.ContainerJSON + err = json.NewDecoder(serverResp.body).Decode(&response) + ensureReaderClosed(serverResp) + return response, err +} + +// ContainerInspectWithRaw returns the container information and its raw representation. +func (cli *Client) ContainerInspectWithRaw(ctx context.Context, containerID string, getSize bool) (types.ContainerJSON, []byte, error) { + query := url.Values{} + if getSize { + query.Set("size", "1") + } + serverResp, err := cli.get(ctx, "/containers/"+containerID+"/json", query, nil) + if err != nil { + if serverResp.statusCode == http.StatusNotFound { + return types.ContainerJSON{}, nil, containerNotFoundError{containerID} + } + return types.ContainerJSON{}, nil, err + } + defer ensureReaderClosed(serverResp) + + body, err := ioutil.ReadAll(serverResp.body) + if err != nil { + return types.ContainerJSON{}, nil, err + } + + var response types.ContainerJSON + rdr := bytes.NewReader(body) + err = json.NewDecoder(rdr).Decode(&response) + return response, body, err +} diff --git a/vendor/github.com/moby/moby/client/container_inspect_test.go b/vendor/github.com/moby/moby/client/container_inspect_test.go new file mode 100644 index 000000000..98f83bd8d --- /dev/null +++ b/vendor/github.com/moby/moby/client/container_inspect_test.go @@ -0,0 +1,125 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +func TestContainerInspectError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, err := client.ContainerInspect(context.Background(), "nothing") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestContainerInspectContainerNotFound(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusNotFound, "Server error")), + } + + _, err := client.ContainerInspect(context.Background(), "unknown") + if err == nil || !IsErrContainerNotFound(err) { + t.Fatalf("expected a containerNotFound error, got %v", err) + } +} + +func TestContainerInspect(t *testing.T) { + expectedURL := "/containers/container_id/json" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + content, err := json.Marshal(types.ContainerJSON{ + ContainerJSONBase: &types.ContainerJSONBase{ + ID: "container_id", + Image: "image", + Name: "name", + }, + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + + r, err := client.ContainerInspect(context.Background(), "container_id") + if err != nil { + t.Fatal(err) + } + if r.ID != "container_id" { + t.Fatalf("expected `container_id`, got %s", r.ID) + } + if r.Image != "image" { + t.Fatalf("expected `image`, got %s", r.Image) + } + if r.Name != "name" { + t.Fatalf("expected `name`, got %s", r.Name) + } +} + +func TestContainerInspectNode(t *testing.T) { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + content, err := json.Marshal(types.ContainerJSON{ + ContainerJSONBase: &types.ContainerJSONBase{ + ID: "container_id", + Image: "image", + Name: "name", + Node: &types.ContainerNode{ + ID: "container_node_id", + Addr: "container_node", + Labels: map[string]string{"foo": "bar"}, + }, + }, + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + + r, err := client.ContainerInspect(context.Background(), "container_id") + if err != nil { + t.Fatal(err) + } + if r.ID != "container_id" { + t.Fatalf("expected `container_id`, got %s", r.ID) + } + if r.Image != "image" { + t.Fatalf("expected `image`, got %s", r.Image) + } + if r.Name != "name" { + t.Fatalf("expected `name`, got %s", r.Name) + } + if r.Node.ID != "container_node_id" { + t.Fatalf("expected `container_node_id`, got %s", r.Node.ID) + } + if r.Node.Addr != "container_node" { + t.Fatalf("expected `container_node`, got %s", r.Node.Addr) + } + foo, ok := r.Node.Labels["foo"] + if foo != "bar" || !ok { + t.Fatalf("expected `bar` for label `foo`") + } +} diff --git a/vendor/github.com/moby/moby/client/container_kill.go b/vendor/github.com/moby/moby/client/container_kill.go new file mode 100644 index 000000000..29f80c73a --- /dev/null +++ b/vendor/github.com/moby/moby/client/container_kill.go @@ -0,0 +1,17 @@ +package client + +import ( + "net/url" + + "golang.org/x/net/context" +) + +// ContainerKill terminates the container process but does not remove the container from the docker host. +func (cli *Client) ContainerKill(ctx context.Context, containerID, signal string) error { + query := url.Values{} + query.Set("signal", signal) + + resp, err := cli.post(ctx, "/containers/"+containerID+"/kill", query, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/moby/moby/client/container_kill_test.go b/vendor/github.com/moby/moby/client/container_kill_test.go new file mode 100644 index 000000000..9477b0abd --- /dev/null +++ b/vendor/github.com/moby/moby/client/container_kill_test.go @@ -0,0 +1,46 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" +) + +func TestContainerKillError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + err := client.ContainerKill(context.Background(), "nothing", "SIGKILL") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestContainerKill(t *testing.T) { + expectedURL := "/containers/container_id/kill" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + signal := req.URL.Query().Get("signal") + if signal != "SIGKILL" { + return nil, fmt.Errorf("signal not set in URL query properly. Expected 'SIGKILL', got %s", signal) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + + err := client.ContainerKill(context.Background(), "container_id", "SIGKILL") + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/moby/moby/client/container_list.go b/vendor/github.com/moby/moby/client/container_list.go new file mode 100644 index 000000000..439891219 --- /dev/null +++ b/vendor/github.com/moby/moby/client/container_list.go @@ -0,0 +1,56 @@ +package client + +import ( + "encoding/json" + "net/url" + "strconv" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "golang.org/x/net/context" +) + +// ContainerList returns the list of containers in the docker host. +func (cli *Client) ContainerList(ctx context.Context, options types.ContainerListOptions) ([]types.Container, error) { + query := url.Values{} + + if options.All { + query.Set("all", "1") + } + + if options.Limit != -1 { + query.Set("limit", strconv.Itoa(options.Limit)) + } + + if options.Since != "" { + query.Set("since", options.Since) + } + + if options.Before != "" { + query.Set("before", options.Before) + } + + if options.Size { + query.Set("size", "1") + } + + if options.Filters.Len() > 0 { + filterJSON, err := filters.ToParamWithVersion(cli.version, options.Filters) + + if err != nil { + return nil, err + } + + query.Set("filters", filterJSON) + } + + resp, err := cli.get(ctx, "/containers/json", query, nil) + if err != nil { + return nil, err + } + + var containers []types.Container + err = json.NewDecoder(resp.body).Decode(&containers) + ensureReaderClosed(resp) + return containers, err +} diff --git a/vendor/github.com/moby/moby/client/container_list_test.go b/vendor/github.com/moby/moby/client/container_list_test.go new file mode 100644 index 000000000..e41c6874b --- /dev/null +++ b/vendor/github.com/moby/moby/client/container_list_test.go @@ -0,0 +1,96 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "golang.org/x/net/context" +) + +func TestContainerListError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.ContainerList(context.Background(), types.ContainerListOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestContainerList(t *testing.T) { + expectedURL := "/containers/json" + expectedFilters := `{"before":{"container":true},"label":{"label1":true,"label2":true}}` + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + query := req.URL.Query() + all := query.Get("all") + if all != "1" { + return nil, fmt.Errorf("all not set in URL query properly. Expected '1', got %s", all) + } + limit := query.Get("limit") + if limit != "0" { + return nil, fmt.Errorf("limit should have not be present in query. Expected '0', got %s", limit) + } + since := query.Get("since") + if since != "container" { + return nil, fmt.Errorf("since not set in URL query properly. Expected 'container', got %s", since) + } + before := query.Get("before") + if before != "" { + return nil, fmt.Errorf("before should have not be present in query, go %s", before) + } + size := query.Get("size") + if size != "1" { + return nil, fmt.Errorf("size not set in URL query properly. Expected '1', got %s", size) + } + filters := query.Get("filters") + if filters != expectedFilters { + return nil, fmt.Errorf("expected filters incoherent '%v' with actual filters %v", expectedFilters, filters) + } + + b, err := json.Marshal([]types.Container{ + { + ID: "container_id1", + }, + { + ID: "container_id2", + }, + }) + if err != nil { + return nil, err + } + + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(b)), + }, nil + }), + } + + filters := filters.NewArgs() + filters.Add("label", "label1") + filters.Add("label", "label2") + filters.Add("before", "container") + containers, err := client.ContainerList(context.Background(), types.ContainerListOptions{ + Size: true, + All: true, + Since: "container", + Filters: filters, + }) + if err != nil { + t.Fatal(err) + } + if len(containers) != 2 { + t.Fatalf("expected 2 containers, got %v", containers) + } +} diff --git a/vendor/github.com/moby/moby/client/container_logs.go b/vendor/github.com/moby/moby/client/container_logs.go new file mode 100644 index 000000000..0f32e9f12 --- /dev/null +++ b/vendor/github.com/moby/moby/client/container_logs.go @@ -0,0 +1,72 @@ +package client + +import ( + "io" + "net/url" + "time" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + timetypes "github.com/docker/docker/api/types/time" +) + +// ContainerLogs returns the logs generated by a container in an io.ReadCloser. +// It's up to the caller to close the stream. +// +// The stream format on the response will be in one of two formats: +// +// If the container is using a TTY, there is only a single stream (stdout), and +// data is copied directly from the container output stream, no extra +// multiplexing or headers. +// +// If the container is *not* using a TTY, streams for stdout and stderr are +// multiplexed. +// The format of the multiplexed stream is as follows: +// +// [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4}[]byte{OUTPUT} +// +// STREAM_TYPE can be 1 for stdout and 2 for stderr +// +// SIZE1, SIZE2, SIZE3, and SIZE4 are four bytes of uint32 encoded as big endian. +// This is the size of OUTPUT. +// +// You can use github.com/docker/docker/pkg/stdcopy.StdCopy to demultiplex this +// stream. +func (cli *Client) ContainerLogs(ctx context.Context, container string, options types.ContainerLogsOptions) (io.ReadCloser, error) { + query := url.Values{} + if options.ShowStdout { + query.Set("stdout", "1") + } + + if options.ShowStderr { + query.Set("stderr", "1") + } + + if options.Since != "" { + ts, err := timetypes.GetTimestamp(options.Since, time.Now()) + if err != nil { + return nil, err + } + query.Set("since", ts) + } + + if options.Timestamps { + query.Set("timestamps", "1") + } + + if options.Details { + query.Set("details", "1") + } + + if options.Follow { + query.Set("follow", "1") + } + query.Set("tail", options.Tail) + + resp, err := cli.get(ctx, "/containers/"+container+"/logs", query, nil) + if err != nil { + return nil, err + } + return resp.body, nil +} diff --git a/vendor/github.com/moby/moby/client/container_logs_test.go b/vendor/github.com/moby/moby/client/container_logs_test.go new file mode 100644 index 000000000..99e31842c --- /dev/null +++ b/vendor/github.com/moby/moby/client/container_logs_test.go @@ -0,0 +1,133 @@ +package client + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "log" + "net/http" + "os" + "strings" + "testing" + "time" + + "github.com/docker/docker/api/types" + + "golang.org/x/net/context" +) + +func TestContainerLogsError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.ContainerLogs(context.Background(), "container_id", types.ContainerLogsOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } + _, err = client.ContainerLogs(context.Background(), "container_id", types.ContainerLogsOptions{ + Since: "2006-01-02TZ", + }) + if err == nil || !strings.Contains(err.Error(), `parsing time "2006-01-02TZ"`) { + t.Fatalf("expected a 'parsing time' error, got %v", err) + } +} + +func TestContainerLogs(t *testing.T) { + expectedURL := "/containers/container_id/logs" + cases := []struct { + options types.ContainerLogsOptions + expectedQueryParams map[string]string + }{ + { + expectedQueryParams: map[string]string{ + "tail": "", + }, + }, + { + options: types.ContainerLogsOptions{ + Tail: "any", + }, + expectedQueryParams: map[string]string{ + "tail": "any", + }, + }, + { + options: types.ContainerLogsOptions{ + ShowStdout: true, + ShowStderr: true, + Timestamps: true, + Details: true, + Follow: true, + }, + expectedQueryParams: map[string]string{ + "tail": "", + "stdout": "1", + "stderr": "1", + "timestamps": "1", + "details": "1", + "follow": "1", + }, + }, + { + options: types.ContainerLogsOptions{ + // An complete invalid date, timestamp or go duration will be + // passed as is + Since: "invalid but valid", + }, + expectedQueryParams: map[string]string{ + "tail": "", + "since": "invalid but valid", + }, + }, + } + for _, logCase := range cases { + client := &Client{ + client: newMockClient(func(r *http.Request) (*http.Response, error) { + if !strings.HasPrefix(r.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, r.URL) + } + // Check query parameters + query := r.URL.Query() + for key, expected := range logCase.expectedQueryParams { + actual := query.Get(key) + if actual != expected { + return nil, fmt.Errorf("%s not set in URL query properly. Expected '%s', got %s", key, expected, actual) + } + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte("response"))), + }, nil + }), + } + body, err := client.ContainerLogs(context.Background(), "container_id", logCase.options) + if err != nil { + t.Fatal(err) + } + defer body.Close() + content, err := ioutil.ReadAll(body) + if err != nil { + t.Fatal(err) + } + if string(content) != "response" { + t.Fatalf("expected response to contain 'response', got %s", string(content)) + } + } +} + +func ExampleClient_ContainerLogs_withTimeout() { + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + client, _ := NewEnvClient() + reader, err := client.ContainerLogs(ctx, "container_id", types.ContainerLogsOptions{}) + if err != nil { + log.Fatal(err) + } + + _, err = io.Copy(os.Stdout, reader) + if err != nil && err != io.EOF { + log.Fatal(err) + } +} diff --git a/vendor/github.com/moby/moby/client/container_pause.go b/vendor/github.com/moby/moby/client/container_pause.go new file mode 100644 index 000000000..412067a78 --- /dev/null +++ b/vendor/github.com/moby/moby/client/container_pause.go @@ -0,0 +1,10 @@ +package client + +import "golang.org/x/net/context" + +// ContainerPause pauses the main process of a given container without terminating it. +func (cli *Client) ContainerPause(ctx context.Context, containerID string) error { + resp, err := cli.post(ctx, "/containers/"+containerID+"/pause", nil, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/moby/moby/client/container_pause_test.go b/vendor/github.com/moby/moby/client/container_pause_test.go new file mode 100644 index 000000000..0ee2f05d7 --- /dev/null +++ b/vendor/github.com/moby/moby/client/container_pause_test.go @@ -0,0 +1,41 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" +) + +func TestContainerPauseError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + err := client.ContainerPause(context.Background(), "nothing") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestContainerPause(t *testing.T) { + expectedURL := "/containers/container_id/pause" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + err := client.ContainerPause(context.Background(), "container_id") + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/moby/moby/client/container_prune.go b/vendor/github.com/moby/moby/client/container_prune.go new file mode 100644 index 000000000..b58217086 --- /dev/null +++ b/vendor/github.com/moby/moby/client/container_prune.go @@ -0,0 +1,36 @@ +package client + +import ( + "encoding/json" + "fmt" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "golang.org/x/net/context" +) + +// ContainersPrune requests the daemon to delete unused data +func (cli *Client) ContainersPrune(ctx context.Context, pruneFilters filters.Args) (types.ContainersPruneReport, error) { + var report types.ContainersPruneReport + + if err := cli.NewVersionError("1.25", "container prune"); err != nil { + return report, err + } + + query, err := getFiltersQuery(pruneFilters) + if err != nil { + return report, err + } + + serverResp, err := cli.post(ctx, "/containers/prune", query, nil, nil) + if err != nil { + return report, err + } + defer ensureReaderClosed(serverResp) + + if err := json.NewDecoder(serverResp.body).Decode(&report); err != nil { + return report, fmt.Errorf("Error retrieving disk usage: %v", err) + } + + return report, nil +} diff --git a/vendor/github.com/moby/moby/client/container_prune_test.go b/vendor/github.com/moby/moby/client/container_prune_test.go new file mode 100644 index 000000000..8a1c63897 --- /dev/null +++ b/vendor/github.com/moby/moby/client/container_prune_test.go @@ -0,0 +1,124 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/stretchr/testify/assert" + "golang.org/x/net/context" +) + +func TestContainersPruneError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + version: "1.25", + } + + filters := filters.NewArgs() + + _, err := client.ContainersPrune(context.Background(), filters) + assert.EqualError(t, err, "Error response from daemon: Server error") +} + +func TestContainersPrune(t *testing.T) { + expectedURL := "/v1.25/containers/prune" + + danglingFilters := filters.NewArgs() + danglingFilters.Add("dangling", "true") + + noDanglingFilters := filters.NewArgs() + noDanglingFilters.Add("dangling", "false") + + danglingUntilFilters := filters.NewArgs() + danglingUntilFilters.Add("dangling", "true") + danglingUntilFilters.Add("until", "2016-12-15T14:00") + + labelFilters := filters.NewArgs() + labelFilters.Add("dangling", "true") + labelFilters.Add("label", "label1=foo") + labelFilters.Add("label", "label2!=bar") + + listCases := []struct { + filters filters.Args + expectedQueryParams map[string]string + }{ + { + filters: filters.Args{}, + expectedQueryParams: map[string]string{ + "until": "", + "filter": "", + "filters": "", + }, + }, + { + filters: danglingFilters, + expectedQueryParams: map[string]string{ + "until": "", + "filter": "", + "filters": `{"dangling":{"true":true}}`, + }, + }, + { + filters: danglingUntilFilters, + expectedQueryParams: map[string]string{ + "until": "", + "filter": "", + "filters": `{"dangling":{"true":true},"until":{"2016-12-15T14:00":true}}`, + }, + }, + { + filters: noDanglingFilters, + expectedQueryParams: map[string]string{ + "until": "", + "filter": "", + "filters": `{"dangling":{"false":true}}`, + }, + }, + { + filters: labelFilters, + expectedQueryParams: map[string]string{ + "until": "", + "filter": "", + "filters": `{"dangling":{"true":true},"label":{"label1=foo":true,"label2!=bar":true}}`, + }, + }, + } + for _, listCase := range listCases { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + query := req.URL.Query() + for key, expected := range listCase.expectedQueryParams { + actual := query.Get(key) + assert.Equal(t, expected, actual) + } + content, err := json.Marshal(types.ContainersPruneReport{ + ContainersDeleted: []string{"container_id1", "container_id2"}, + SpaceReclaimed: 9999, + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + version: "1.25", + } + + report, err := client.ContainersPrune(context.Background(), listCase.filters) + assert.NoError(t, err) + assert.Len(t, report.ContainersDeleted, 2) + assert.Equal(t, uint64(9999), report.SpaceReclaimed) + } +} diff --git a/vendor/github.com/moby/moby/client/container_remove.go b/vendor/github.com/moby/moby/client/container_remove.go new file mode 100644 index 000000000..3a79590ce --- /dev/null +++ b/vendor/github.com/moby/moby/client/container_remove.go @@ -0,0 +1,27 @@ +package client + +import ( + "net/url" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// ContainerRemove kills and removes a container from the docker host. +func (cli *Client) ContainerRemove(ctx context.Context, containerID string, options types.ContainerRemoveOptions) error { + query := url.Values{} + if options.RemoveVolumes { + query.Set("v", "1") + } + if options.RemoveLinks { + query.Set("link", "1") + } + + if options.Force { + query.Set("force", "1") + } + + resp, err := cli.delete(ctx, "/containers/"+containerID, query, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/moby/moby/client/container_remove_test.go b/vendor/github.com/moby/moby/client/container_remove_test.go new file mode 100644 index 000000000..798c08b33 --- /dev/null +++ b/vendor/github.com/moby/moby/client/container_remove_test.go @@ -0,0 +1,59 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +func TestContainerRemoveError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + err := client.ContainerRemove(context.Background(), "container_id", types.ContainerRemoveOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestContainerRemove(t *testing.T) { + expectedURL := "/containers/container_id" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + query := req.URL.Query() + volume := query.Get("v") + if volume != "1" { + return nil, fmt.Errorf("v (volume) not set in URL query properly. Expected '1', got %s", volume) + } + force := query.Get("force") + if force != "1" { + return nil, fmt.Errorf("force not set in URL query properly. Expected '1', got %s", force) + } + link := query.Get("link") + if link != "" { + return nil, fmt.Errorf("link should have not be present in query, go %s", link) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + + err := client.ContainerRemove(context.Background(), "container_id", types.ContainerRemoveOptions{ + RemoveVolumes: true, + Force: true, + }) + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/moby/moby/client/container_rename.go b/vendor/github.com/moby/moby/client/container_rename.go new file mode 100644 index 000000000..0e718da7c --- /dev/null +++ b/vendor/github.com/moby/moby/client/container_rename.go @@ -0,0 +1,16 @@ +package client + +import ( + "net/url" + + "golang.org/x/net/context" +) + +// ContainerRename changes the name of a given container. +func (cli *Client) ContainerRename(ctx context.Context, containerID, newContainerName string) error { + query := url.Values{} + query.Set("name", newContainerName) + resp, err := cli.post(ctx, "/containers/"+containerID+"/rename", query, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/moby/moby/client/container_rename_test.go b/vendor/github.com/moby/moby/client/container_rename_test.go new file mode 100644 index 000000000..732ebff5f --- /dev/null +++ b/vendor/github.com/moby/moby/client/container_rename_test.go @@ -0,0 +1,46 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" +) + +func TestContainerRenameError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + err := client.ContainerRename(context.Background(), "nothing", "newNothing") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestContainerRename(t *testing.T) { + expectedURL := "/containers/container_id/rename" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + name := req.URL.Query().Get("name") + if name != "newName" { + return nil, fmt.Errorf("name not set in URL query properly. Expected 'newName', got %s", name) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + + err := client.ContainerRename(context.Background(), "container_id", "newName") + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/moby/moby/client/container_resize.go b/vendor/github.com/moby/moby/client/container_resize.go new file mode 100644 index 000000000..66c3cc194 --- /dev/null +++ b/vendor/github.com/moby/moby/client/container_resize.go @@ -0,0 +1,29 @@ +package client + +import ( + "net/url" + "strconv" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// ContainerResize changes the size of the tty for a container. +func (cli *Client) ContainerResize(ctx context.Context, containerID string, options types.ResizeOptions) error { + return cli.resize(ctx, "/containers/"+containerID, options.Height, options.Width) +} + +// ContainerExecResize changes the size of the tty for an exec process running inside a container. +func (cli *Client) ContainerExecResize(ctx context.Context, execID string, options types.ResizeOptions) error { + return cli.resize(ctx, "/exec/"+execID, options.Height, options.Width) +} + +func (cli *Client) resize(ctx context.Context, basePath string, height, width uint) error { + query := url.Values{} + query.Set("h", strconv.Itoa(int(height))) + query.Set("w", strconv.Itoa(int(width))) + + resp, err := cli.post(ctx, basePath+"/resize", query, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/moby/moby/client/container_resize_test.go b/vendor/github.com/moby/moby/client/container_resize_test.go new file mode 100644 index 000000000..5b2efecdc --- /dev/null +++ b/vendor/github.com/moby/moby/client/container_resize_test.go @@ -0,0 +1,82 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +func TestContainerResizeError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + err := client.ContainerResize(context.Background(), "container_id", types.ResizeOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestContainerExecResizeError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + err := client.ContainerExecResize(context.Background(), "exec_id", types.ResizeOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestContainerResize(t *testing.T) { + client := &Client{ + client: newMockClient(resizeTransport("/containers/container_id/resize")), + } + + err := client.ContainerResize(context.Background(), "container_id", types.ResizeOptions{ + Height: 500, + Width: 600, + }) + if err != nil { + t.Fatal(err) + } +} + +func TestContainerExecResize(t *testing.T) { + client := &Client{ + client: newMockClient(resizeTransport("/exec/exec_id/resize")), + } + + err := client.ContainerExecResize(context.Background(), "exec_id", types.ResizeOptions{ + Height: 500, + Width: 600, + }) + if err != nil { + t.Fatal(err) + } +} + +func resizeTransport(expectedURL string) func(req *http.Request) (*http.Response, error) { + return func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + query := req.URL.Query() + h := query.Get("h") + if h != "500" { + return nil, fmt.Errorf("h not set in URL query properly. Expected '500', got %s", h) + } + w := query.Get("w") + if w != "600" { + return nil, fmt.Errorf("w not set in URL query properly. Expected '600', got %s", w) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + } +} diff --git a/vendor/github.com/moby/moby/client/container_restart.go b/vendor/github.com/moby/moby/client/container_restart.go new file mode 100644 index 000000000..74d7455f0 --- /dev/null +++ b/vendor/github.com/moby/moby/client/container_restart.go @@ -0,0 +1,22 @@ +package client + +import ( + "net/url" + "time" + + timetypes "github.com/docker/docker/api/types/time" + "golang.org/x/net/context" +) + +// ContainerRestart stops and starts a container again. +// It makes the daemon to wait for the container to be up again for +// a specific amount of time, given the timeout. +func (cli *Client) ContainerRestart(ctx context.Context, containerID string, timeout *time.Duration) error { + query := url.Values{} + if timeout != nil { + query.Set("t", timetypes.DurationToSecondsString(*timeout)) + } + resp, err := cli.post(ctx, "/containers/"+containerID+"/restart", query, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/moby/moby/client/container_restart_test.go b/vendor/github.com/moby/moby/client/container_restart_test.go new file mode 100644 index 000000000..8c3cfd6a6 --- /dev/null +++ b/vendor/github.com/moby/moby/client/container_restart_test.go @@ -0,0 +1,48 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + "time" + + "golang.org/x/net/context" +) + +func TestContainerRestartError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + timeout := 0 * time.Second + err := client.ContainerRestart(context.Background(), "nothing", &timeout) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestContainerRestart(t *testing.T) { + expectedURL := "/containers/container_id/restart" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + t := req.URL.Query().Get("t") + if t != "100" { + return nil, fmt.Errorf("t (timeout) not set in URL query properly. Expected '100', got %s", t) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + timeout := 100 * time.Second + err := client.ContainerRestart(context.Background(), "container_id", &timeout) + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/moby/moby/client/container_start.go b/vendor/github.com/moby/moby/client/container_start.go new file mode 100644 index 000000000..b1f08de41 --- /dev/null +++ b/vendor/github.com/moby/moby/client/container_start.go @@ -0,0 +1,24 @@ +package client + +import ( + "net/url" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" +) + +// ContainerStart sends a request to the docker daemon to start a container. +func (cli *Client) ContainerStart(ctx context.Context, containerID string, options types.ContainerStartOptions) error { + query := url.Values{} + if len(options.CheckpointID) != 0 { + query.Set("checkpoint", options.CheckpointID) + } + if len(options.CheckpointDir) != 0 { + query.Set("checkpoint-dir", options.CheckpointDir) + } + + resp, err := cli.post(ctx, "/containers/"+containerID+"/start", query, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/moby/moby/client/container_start_test.go b/vendor/github.com/moby/moby/client/container_start_test.go new file mode 100644 index 000000000..5826fa8bc --- /dev/null +++ b/vendor/github.com/moby/moby/client/container_start_test.go @@ -0,0 +1,58 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" +) + +func TestContainerStartError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + err := client.ContainerStart(context.Background(), "nothing", types.ContainerStartOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestContainerStart(t *testing.T) { + expectedURL := "/containers/container_id/start" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + // we're not expecting any payload, but if one is supplied, check it is valid. + if req.Header.Get("Content-Type") == "application/json" { + var startConfig interface{} + if err := json.NewDecoder(req.Body).Decode(&startConfig); err != nil { + return nil, fmt.Errorf("Unable to parse json: %s", err) + } + } + + checkpoint := req.URL.Query().Get("checkpoint") + if checkpoint != "checkpoint_id" { + return nil, fmt.Errorf("checkpoint not set in URL query properly. Expected 'checkpoint_id', got %s", checkpoint) + } + + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + + err := client.ContainerStart(context.Background(), "container_id", types.ContainerStartOptions{CheckpointID: "checkpoint_id"}) + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/moby/moby/client/container_stats.go b/vendor/github.com/moby/moby/client/container_stats.go new file mode 100644 index 000000000..4758c66e3 --- /dev/null +++ b/vendor/github.com/moby/moby/client/container_stats.go @@ -0,0 +1,26 @@ +package client + +import ( + "net/url" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// ContainerStats returns near realtime stats for a given container. +// It's up to the caller to close the io.ReadCloser returned. +func (cli *Client) ContainerStats(ctx context.Context, containerID string, stream bool) (types.ContainerStats, error) { + query := url.Values{} + query.Set("stream", "0") + if stream { + query.Set("stream", "1") + } + + resp, err := cli.get(ctx, "/containers/"+containerID+"/stats", query, nil) + if err != nil { + return types.ContainerStats{}, err + } + + osType := getDockerOS(resp.header.Get("Server")) + return types.ContainerStats{Body: resp.body, OSType: osType}, err +} diff --git a/vendor/github.com/moby/moby/client/container_stats_test.go b/vendor/github.com/moby/moby/client/container_stats_test.go new file mode 100644 index 000000000..7414f135c --- /dev/null +++ b/vendor/github.com/moby/moby/client/container_stats_test.go @@ -0,0 +1,70 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" +) + +func TestContainerStatsError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.ContainerStats(context.Background(), "nothing", false) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestContainerStats(t *testing.T) { + expectedURL := "/containers/container_id/stats" + cases := []struct { + stream bool + expectedStream string + }{ + { + expectedStream: "0", + }, + { + stream: true, + expectedStream: "1", + }, + } + for _, c := range cases { + client := &Client{ + client: newMockClient(func(r *http.Request) (*http.Response, error) { + if !strings.HasPrefix(r.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, r.URL) + } + + query := r.URL.Query() + stream := query.Get("stream") + if stream != c.expectedStream { + return nil, fmt.Errorf("stream not set in URL query properly. Expected '%s', got %s", c.expectedStream, stream) + } + + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte("response"))), + }, nil + }), + } + resp, err := client.ContainerStats(context.Background(), "container_id", c.stream) + if err != nil { + t.Fatal(err) + } + defer resp.Body.Close() + content, err := ioutil.ReadAll(resp.Body) + if err != nil { + t.Fatal(err) + } + if string(content) != "response" { + t.Fatalf("expected response to contain 'response', got %s", string(content)) + } + } +} diff --git a/vendor/github.com/moby/moby/client/container_stop.go b/vendor/github.com/moby/moby/client/container_stop.go new file mode 100644 index 000000000..b5418ae8c --- /dev/null +++ b/vendor/github.com/moby/moby/client/container_stop.go @@ -0,0 +1,21 @@ +package client + +import ( + "net/url" + "time" + + timetypes "github.com/docker/docker/api/types/time" + "golang.org/x/net/context" +) + +// ContainerStop stops a container without terminating the process. +// The process is blocked until the container stops or the timeout expires. +func (cli *Client) ContainerStop(ctx context.Context, containerID string, timeout *time.Duration) error { + query := url.Values{} + if timeout != nil { + query.Set("t", timetypes.DurationToSecondsString(*timeout)) + } + resp, err := cli.post(ctx, "/containers/"+containerID+"/stop", query, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/moby/moby/client/container_stop_test.go b/vendor/github.com/moby/moby/client/container_stop_test.go new file mode 100644 index 000000000..c32cd691c --- /dev/null +++ b/vendor/github.com/moby/moby/client/container_stop_test.go @@ -0,0 +1,48 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + "time" + + "golang.org/x/net/context" +) + +func TestContainerStopError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + timeout := 0 * time.Second + err := client.ContainerStop(context.Background(), "nothing", &timeout) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestContainerStop(t *testing.T) { + expectedURL := "/containers/container_id/stop" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + t := req.URL.Query().Get("t") + if t != "100" { + return nil, fmt.Errorf("t (timeout) not set in URL query properly. Expected '100', got %s", t) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + timeout := 100 * time.Second + err := client.ContainerStop(context.Background(), "container_id", &timeout) + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/moby/moby/client/container_top.go b/vendor/github.com/moby/moby/client/container_top.go new file mode 100644 index 000000000..9689123a4 --- /dev/null +++ b/vendor/github.com/moby/moby/client/container_top.go @@ -0,0 +1,28 @@ +package client + +import ( + "encoding/json" + "net/url" + "strings" + + "github.com/docker/docker/api/types/container" + "golang.org/x/net/context" +) + +// ContainerTop shows process information from within a container. +func (cli *Client) ContainerTop(ctx context.Context, containerID string, arguments []string) (container.ContainerTopOKBody, error) { + var response container.ContainerTopOKBody + query := url.Values{} + if len(arguments) > 0 { + query.Set("ps_args", strings.Join(arguments, " ")) + } + + resp, err := cli.get(ctx, "/containers/"+containerID+"/top", query, nil) + if err != nil { + return response, err + } + + err = json.NewDecoder(resp.body).Decode(&response) + ensureReaderClosed(resp) + return response, err +} diff --git a/vendor/github.com/moby/moby/client/container_top_test.go b/vendor/github.com/moby/moby/client/container_top_test.go new file mode 100644 index 000000000..68ccef505 --- /dev/null +++ b/vendor/github.com/moby/moby/client/container_top_test.go @@ -0,0 +1,74 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "reflect" + "strings" + "testing" + + "github.com/docker/docker/api/types/container" + "golang.org/x/net/context" +) + +func TestContainerTopError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.ContainerTop(context.Background(), "nothing", []string{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestContainerTop(t *testing.T) { + expectedURL := "/containers/container_id/top" + expectedProcesses := [][]string{ + {"p1", "p2"}, + {"p3"}, + } + expectedTitles := []string{"title1", "title2"} + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + query := req.URL.Query() + args := query.Get("ps_args") + if args != "arg1 arg2" { + return nil, fmt.Errorf("args not set in URL query properly. Expected 'arg1 arg2', got %v", args) + } + + b, err := json.Marshal(container.ContainerTopOKBody{ + Processes: [][]string{ + {"p1", "p2"}, + {"p3"}, + }, + Titles: []string{"title1", "title2"}, + }) + if err != nil { + return nil, err + } + + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(b)), + }, nil + }), + } + + processList, err := client.ContainerTop(context.Background(), "container_id", []string{"arg1", "arg2"}) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(expectedProcesses, processList.Processes) { + t.Fatalf("Processes: expected %v, got %v", expectedProcesses, processList.Processes) + } + if !reflect.DeepEqual(expectedTitles, processList.Titles) { + t.Fatalf("Titles: expected %v, got %v", expectedTitles, processList.Titles) + } +} diff --git a/vendor/github.com/moby/moby/client/container_unpause.go b/vendor/github.com/moby/moby/client/container_unpause.go new file mode 100644 index 000000000..5c7621125 --- /dev/null +++ b/vendor/github.com/moby/moby/client/container_unpause.go @@ -0,0 +1,10 @@ +package client + +import "golang.org/x/net/context" + +// ContainerUnpause resumes the process execution within a container +func (cli *Client) ContainerUnpause(ctx context.Context, containerID string) error { + resp, err := cli.post(ctx, "/containers/"+containerID+"/unpause", nil, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/moby/moby/client/container_unpause_test.go b/vendor/github.com/moby/moby/client/container_unpause_test.go new file mode 100644 index 000000000..2c4272719 --- /dev/null +++ b/vendor/github.com/moby/moby/client/container_unpause_test.go @@ -0,0 +1,41 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" +) + +func TestContainerUnpauseError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + err := client.ContainerUnpause(context.Background(), "nothing") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestContainerUnpause(t *testing.T) { + expectedURL := "/containers/container_id/unpause" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + err := client.ContainerUnpause(context.Background(), "container_id") + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/moby/moby/client/container_update.go b/vendor/github.com/moby/moby/client/container_update.go new file mode 100644 index 000000000..5082f22df --- /dev/null +++ b/vendor/github.com/moby/moby/client/container_update.go @@ -0,0 +1,22 @@ +package client + +import ( + "encoding/json" + + "github.com/docker/docker/api/types/container" + "golang.org/x/net/context" +) + +// ContainerUpdate updates resources of a container +func (cli *Client) ContainerUpdate(ctx context.Context, containerID string, updateConfig container.UpdateConfig) (container.ContainerUpdateOKBody, error) { + var response container.ContainerUpdateOKBody + serverResp, err := cli.post(ctx, "/containers/"+containerID+"/update", nil, updateConfig, nil) + if err != nil { + return response, err + } + + err = json.NewDecoder(serverResp.body).Decode(&response) + + ensureReaderClosed(serverResp) + return response, err +} diff --git a/vendor/github.com/moby/moby/client/container_update_test.go b/vendor/github.com/moby/moby/client/container_update_test.go new file mode 100644 index 000000000..715bb7ca2 --- /dev/null +++ b/vendor/github.com/moby/moby/client/container_update_test.go @@ -0,0 +1,58 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types/container" + "golang.org/x/net/context" +) + +func TestContainerUpdateError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.ContainerUpdate(context.Background(), "nothing", container.UpdateConfig{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestContainerUpdate(t *testing.T) { + expectedURL := "/containers/container_id/update" + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + + b, err := json.Marshal(container.ContainerUpdateOKBody{}) + if err != nil { + return nil, err + } + + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(b)), + }, nil + }), + } + + _, err := client.ContainerUpdate(context.Background(), "container_id", container.UpdateConfig{ + Resources: container.Resources{ + CPUPeriod: 1, + }, + RestartPolicy: container.RestartPolicy{ + Name: "always", + }, + }) + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/moby/moby/client/container_wait.go b/vendor/github.com/moby/moby/client/container_wait.go new file mode 100644 index 000000000..854c6c053 --- /dev/null +++ b/vendor/github.com/moby/moby/client/container_wait.go @@ -0,0 +1,84 @@ +package client + +import ( + "encoding/json" + "net/url" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/versions" +) + +// ContainerWait waits until the specified container is in a certain state +// indicated by the given condition, either "not-running" (default), +// "next-exit", or "removed". +// +// If this client's API version is before 1.30, condition is ignored and +// ContainerWait will return immediately with the two channels, as the server +// will wait as if the condition were "not-running". +// +// If this client's API version is at least 1.30, ContainerWait blocks until +// the request has been acknowledged by the server (with a response header), +// then returns two channels on which the caller can wait for the exit status +// of the container or an error if there was a problem either beginning the +// wait request or in getting the response. This allows the caller to +// synchronize ContainerWait with other calls, such as specifying a +// "next-exit" condition before issuing a ContainerStart request. +func (cli *Client) ContainerWait(ctx context.Context, containerID string, condition container.WaitCondition) (<-chan container.ContainerWaitOKBody, <-chan error) { + if versions.LessThan(cli.ClientVersion(), "1.30") { + return cli.legacyContainerWait(ctx, containerID) + } + + resultC := make(chan container.ContainerWaitOKBody) + errC := make(chan error, 1) + + query := url.Values{} + query.Set("condition", string(condition)) + + resp, err := cli.post(ctx, "/containers/"+containerID+"/wait", query, nil, nil) + if err != nil { + defer ensureReaderClosed(resp) + errC <- err + return resultC, errC + } + + go func() { + defer ensureReaderClosed(resp) + var res container.ContainerWaitOKBody + if err := json.NewDecoder(resp.body).Decode(&res); err != nil { + errC <- err + return + } + + resultC <- res + }() + + return resultC, errC +} + +// legacyContainerWait returns immediately and doesn't have an option to wait +// until the container is removed. +func (cli *Client) legacyContainerWait(ctx context.Context, containerID string) (<-chan container.ContainerWaitOKBody, <-chan error) { + resultC := make(chan container.ContainerWaitOKBody) + errC := make(chan error) + + go func() { + resp, err := cli.post(ctx, "/containers/"+containerID+"/wait", nil, nil, nil) + if err != nil { + errC <- err + return + } + defer ensureReaderClosed(resp) + + var res container.ContainerWaitOKBody + if err := json.NewDecoder(resp.body).Decode(&res); err != nil { + errC <- err + return + } + + resultC <- res + }() + + return resultC, errC +} diff --git a/vendor/github.com/moby/moby/client/container_wait_test.go b/vendor/github.com/moby/moby/client/container_wait_test.go new file mode 100644 index 000000000..7b8c9f096 --- /dev/null +++ b/vendor/github.com/moby/moby/client/container_wait_test.go @@ -0,0 +1,74 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "log" + "net/http" + "strings" + "testing" + "time" + + "github.com/docker/docker/api/types/container" + + "golang.org/x/net/context" +) + +func TestContainerWaitError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + resultC, errC := client.ContainerWait(context.Background(), "nothing", "") + select { + case result := <-resultC: + t.Fatalf("expected to not get a wait result, got %d", result.StatusCode) + case err := <-errC: + if err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } + } +} + +func TestContainerWait(t *testing.T) { + expectedURL := "/containers/container_id/wait" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + b, err := json.Marshal(container.ContainerWaitOKBody{ + StatusCode: 15, + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(b)), + }, nil + }), + } + + resultC, errC := client.ContainerWait(context.Background(), "container_id", "") + select { + case err := <-errC: + t.Fatal(err) + case result := <-resultC: + if result.StatusCode != 15 { + t.Fatalf("expected a status code equal to '15', got %d", result.StatusCode) + } + } +} + +func ExampleClient_ContainerWait_withTimeout() { + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + client, _ := NewEnvClient() + _, errC := client.ContainerWait(ctx, "container_id", "") + if err := <-errC; err != nil { + log.Fatal(err) + } +} diff --git a/vendor/github.com/moby/moby/client/disk_usage.go b/vendor/github.com/moby/moby/client/disk_usage.go new file mode 100644 index 000000000..03c80b39a --- /dev/null +++ b/vendor/github.com/moby/moby/client/disk_usage.go @@ -0,0 +1,26 @@ +package client + +import ( + "encoding/json" + "fmt" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// DiskUsage requests the current data usage from the daemon +func (cli *Client) DiskUsage(ctx context.Context) (types.DiskUsage, error) { + var du types.DiskUsage + + serverResp, err := cli.get(ctx, "/system/df", nil, nil) + if err != nil { + return du, err + } + defer ensureReaderClosed(serverResp) + + if err := json.NewDecoder(serverResp.body).Decode(&du); err != nil { + return du, fmt.Errorf("Error retrieving disk usage: %v", err) + } + + return du, nil +} diff --git a/vendor/github.com/moby/moby/client/disk_usage_test.go b/vendor/github.com/moby/moby/client/disk_usage_test.go new file mode 100644 index 000000000..51e622233 --- /dev/null +++ b/vendor/github.com/moby/moby/client/disk_usage_test.go @@ -0,0 +1,55 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +func TestDiskUsageError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.DiskUsage(context.Background()) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestDiskUsage(t *testing.T) { + expectedURL := "/system/df" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + + du := types.DiskUsage{ + LayersSize: int64(100), + Images: nil, + Containers: nil, + Volumes: nil, + } + + b, err := json.Marshal(du) + if err != nil { + return nil, err + } + + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(b)), + }, nil + }), + } + if _, err := client.DiskUsage(context.Background()); err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/moby/moby/client/distribution_inspect.go b/vendor/github.com/moby/moby/client/distribution_inspect.go new file mode 100644 index 000000000..aa5bc6a6c --- /dev/null +++ b/vendor/github.com/moby/moby/client/distribution_inspect.go @@ -0,0 +1,35 @@ +package client + +import ( + "encoding/json" + "net/url" + + registrytypes "github.com/docker/docker/api/types/registry" + "golang.org/x/net/context" +) + +// DistributionInspect returns the image digest with full Manifest +func (cli *Client) DistributionInspect(ctx context.Context, image, encodedRegistryAuth string) (registrytypes.DistributionInspect, error) { + // Contact the registry to retrieve digest and platform information + var distributionInspect registrytypes.DistributionInspect + + if err := cli.NewVersionError("1.30", "distribution inspect"); err != nil { + return distributionInspect, err + } + var headers map[string][]string + + if encodedRegistryAuth != "" { + headers = map[string][]string{ + "X-Registry-Auth": {encodedRegistryAuth}, + } + } + + resp, err := cli.get(ctx, "/distribution/"+image+"/json", url.Values{}, headers) + if err != nil { + return distributionInspect, err + } + + err = json.NewDecoder(resp.body).Decode(&distributionInspect) + ensureReaderClosed(resp) + return distributionInspect, err +} diff --git a/vendor/github.com/moby/moby/client/distribution_inspect_test.go b/vendor/github.com/moby/moby/client/distribution_inspect_test.go new file mode 100644 index 000000000..eff28d7ca --- /dev/null +++ b/vendor/github.com/moby/moby/client/distribution_inspect_test.go @@ -0,0 +1,18 @@ +package client + +import ( + "net/http" + "testing" + + "github.com/stretchr/testify/assert" + "golang.org/x/net/context" +) + +func TestDistributionInspectUnsupported(t *testing.T) { + client := &Client{ + version: "1.29", + client: &http.Client{}, + } + _, err := client.DistributionInspect(context.Background(), "foobar:1.0", "") + assert.EqualError(t, err, `"distribution inspect" requires API version 1.30, but the Docker daemon API version is 1.29`) +} diff --git a/vendor/github.com/moby/moby/client/errors.go b/vendor/github.com/moby/moby/client/errors.go new file mode 100644 index 000000000..fc7df9f1e --- /dev/null +++ b/vendor/github.com/moby/moby/client/errors.go @@ -0,0 +1,300 @@ +package client + +import ( + "fmt" + + "github.com/docker/docker/api/types/versions" + "github.com/pkg/errors" +) + +// errConnectionFailed implements an error returned when connection failed. +type errConnectionFailed struct { + host string +} + +// Error returns a string representation of an errConnectionFailed +func (err errConnectionFailed) Error() string { + if err.host == "" { + return "Cannot connect to the Docker daemon. Is the docker daemon running on this host?" + } + return fmt.Sprintf("Cannot connect to the Docker daemon at %s. Is the docker daemon running?", err.host) +} + +// IsErrConnectionFailed returns true if the error is caused by connection failed. +func IsErrConnectionFailed(err error) bool { + _, ok := errors.Cause(err).(errConnectionFailed) + return ok +} + +// ErrorConnectionFailed returns an error with host in the error message when connection to docker daemon failed. +func ErrorConnectionFailed(host string) error { + return errConnectionFailed{host: host} +} + +type notFound interface { + error + NotFound() bool // Is the error a NotFound error +} + +// IsErrNotFound returns true if the error is caused with an +// object (image, container, network, volume, …) is not found in the docker host. +func IsErrNotFound(err error) bool { + te, ok := err.(notFound) + return ok && te.NotFound() +} + +// imageNotFoundError implements an error returned when an image is not in the docker host. +type imageNotFoundError struct { + imageID string +} + +// NotFound indicates that this error type is of NotFound +func (e imageNotFoundError) NotFound() bool { + return true +} + +// Error returns a string representation of an imageNotFoundError +func (e imageNotFoundError) Error() string { + return fmt.Sprintf("Error: No such image: %s", e.imageID) +} + +// IsErrImageNotFound returns true if the error is caused +// when an image is not found in the docker host. +func IsErrImageNotFound(err error) bool { + return IsErrNotFound(err) +} + +// containerNotFoundError implements an error returned when a container is not in the docker host. +type containerNotFoundError struct { + containerID string +} + +// NotFound indicates that this error type is of NotFound +func (e containerNotFoundError) NotFound() bool { + return true +} + +// Error returns a string representation of a containerNotFoundError +func (e containerNotFoundError) Error() string { + return fmt.Sprintf("Error: No such container: %s", e.containerID) +} + +// IsErrContainerNotFound returns true if the error is caused +// when a container is not found in the docker host. +func IsErrContainerNotFound(err error) bool { + return IsErrNotFound(err) +} + +// networkNotFoundError implements an error returned when a network is not in the docker host. +type networkNotFoundError struct { + networkID string +} + +// NotFound indicates that this error type is of NotFound +func (e networkNotFoundError) NotFound() bool { + return true +} + +// Error returns a string representation of a networkNotFoundError +func (e networkNotFoundError) Error() string { + return fmt.Sprintf("Error: No such network: %s", e.networkID) +} + +// IsErrNetworkNotFound returns true if the error is caused +// when a network is not found in the docker host. +func IsErrNetworkNotFound(err error) bool { + return IsErrNotFound(err) +} + +// volumeNotFoundError implements an error returned when a volume is not in the docker host. +type volumeNotFoundError struct { + volumeID string +} + +// NotFound indicates that this error type is of NotFound +func (e volumeNotFoundError) NotFound() bool { + return true +} + +// Error returns a string representation of a volumeNotFoundError +func (e volumeNotFoundError) Error() string { + return fmt.Sprintf("Error: No such volume: %s", e.volumeID) +} + +// IsErrVolumeNotFound returns true if the error is caused +// when a volume is not found in the docker host. +func IsErrVolumeNotFound(err error) bool { + return IsErrNotFound(err) +} + +// unauthorizedError represents an authorization error in a remote registry. +type unauthorizedError struct { + cause error +} + +// Error returns a string representation of an unauthorizedError +func (u unauthorizedError) Error() string { + return u.cause.Error() +} + +// IsErrUnauthorized returns true if the error is caused +// when a remote registry authentication fails +func IsErrUnauthorized(err error) bool { + _, ok := err.(unauthorizedError) + return ok +} + +// nodeNotFoundError implements an error returned when a node is not found. +type nodeNotFoundError struct { + nodeID string +} + +// Error returns a string representation of a nodeNotFoundError +func (e nodeNotFoundError) Error() string { + return fmt.Sprintf("Error: No such node: %s", e.nodeID) +} + +// NotFound indicates that this error type is of NotFound +func (e nodeNotFoundError) NotFound() bool { + return true +} + +// IsErrNodeNotFound returns true if the error is caused +// when a node is not found. +func IsErrNodeNotFound(err error) bool { + _, ok := err.(nodeNotFoundError) + return ok +} + +// serviceNotFoundError implements an error returned when a service is not found. +type serviceNotFoundError struct { + serviceID string +} + +// Error returns a string representation of a serviceNotFoundError +func (e serviceNotFoundError) Error() string { + return fmt.Sprintf("Error: No such service: %s", e.serviceID) +} + +// NotFound indicates that this error type is of NotFound +func (e serviceNotFoundError) NotFound() bool { + return true +} + +// IsErrServiceNotFound returns true if the error is caused +// when a service is not found. +func IsErrServiceNotFound(err error) bool { + _, ok := err.(serviceNotFoundError) + return ok +} + +// taskNotFoundError implements an error returned when a task is not found. +type taskNotFoundError struct { + taskID string +} + +// Error returns a string representation of a taskNotFoundError +func (e taskNotFoundError) Error() string { + return fmt.Sprintf("Error: No such task: %s", e.taskID) +} + +// NotFound indicates that this error type is of NotFound +func (e taskNotFoundError) NotFound() bool { + return true +} + +// IsErrTaskNotFound returns true if the error is caused +// when a task is not found. +func IsErrTaskNotFound(err error) bool { + _, ok := err.(taskNotFoundError) + return ok +} + +type pluginPermissionDenied struct { + name string +} + +func (e pluginPermissionDenied) Error() string { + return "Permission denied while installing plugin " + e.name +} + +// IsErrPluginPermissionDenied returns true if the error is caused +// when a user denies a plugin's permissions +func IsErrPluginPermissionDenied(err error) bool { + _, ok := err.(pluginPermissionDenied) + return ok +} + +// NewVersionError returns an error if the APIVersion required +// if less than the current supported version +func (cli *Client) NewVersionError(APIrequired, feature string) error { + if cli.version != "" && versions.LessThan(cli.version, APIrequired) { + return fmt.Errorf("%q requires API version %s, but the Docker daemon API version is %s", feature, APIrequired, cli.version) + } + return nil +} + +// secretNotFoundError implements an error returned when a secret is not found. +type secretNotFoundError struct { + name string +} + +// Error returns a string representation of a secretNotFoundError +func (e secretNotFoundError) Error() string { + return fmt.Sprintf("Error: no such secret: %s", e.name) +} + +// NotFound indicates that this error type is of NotFound +func (e secretNotFoundError) NotFound() bool { + return true +} + +// IsErrSecretNotFound returns true if the error is caused +// when a secret is not found. +func IsErrSecretNotFound(err error) bool { + _, ok := err.(secretNotFoundError) + return ok +} + +// configNotFoundError implements an error returned when a config is not found. +type configNotFoundError struct { + name string +} + +// Error returns a string representation of a configNotFoundError +func (e configNotFoundError) Error() string { + return fmt.Sprintf("Error: no such config: %s", e.name) +} + +// NotFound indicates that this error type is of NotFound +func (e configNotFoundError) NotFound() bool { + return true +} + +// IsErrConfigNotFound returns true if the error is caused +// when a config is not found. +func IsErrConfigNotFound(err error) bool { + _, ok := err.(configNotFoundError) + return ok +} + +// pluginNotFoundError implements an error returned when a plugin is not in the docker host. +type pluginNotFoundError struct { + name string +} + +// NotFound indicates that this error type is of NotFound +func (e pluginNotFoundError) NotFound() bool { + return true +} + +// Error returns a string representation of a pluginNotFoundError +func (e pluginNotFoundError) Error() string { + return fmt.Sprintf("Error: No such plugin: %s", e.name) +} + +// IsErrPluginNotFound returns true if the error is caused +// when a plugin is not found in the docker host. +func IsErrPluginNotFound(err error) bool { + return IsErrNotFound(err) +} diff --git a/vendor/github.com/moby/moby/client/events.go b/vendor/github.com/moby/moby/client/events.go new file mode 100644 index 000000000..af47aefa7 --- /dev/null +++ b/vendor/github.com/moby/moby/client/events.go @@ -0,0 +1,102 @@ +package client + +import ( + "encoding/json" + "net/url" + "time" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/events" + "github.com/docker/docker/api/types/filters" + timetypes "github.com/docker/docker/api/types/time" +) + +// Events returns a stream of events in the daemon. It's up to the caller to close the stream +// by cancelling the context. Once the stream has been completely read an io.EOF error will +// be sent over the error channel. If an error is sent all processing will be stopped. It's up +// to the caller to reopen the stream in the event of an error by reinvoking this method. +func (cli *Client) Events(ctx context.Context, options types.EventsOptions) (<-chan events.Message, <-chan error) { + + messages := make(chan events.Message) + errs := make(chan error, 1) + + started := make(chan struct{}) + go func() { + defer close(errs) + + query, err := buildEventsQueryParams(cli.version, options) + if err != nil { + close(started) + errs <- err + return + } + + resp, err := cli.get(ctx, "/events", query, nil) + if err != nil { + close(started) + errs <- err + return + } + defer resp.body.Close() + + decoder := json.NewDecoder(resp.body) + + close(started) + for { + select { + case <-ctx.Done(): + errs <- ctx.Err() + return + default: + var event events.Message + if err := decoder.Decode(&event); err != nil { + errs <- err + return + } + + select { + case messages <- event: + case <-ctx.Done(): + errs <- ctx.Err() + return + } + } + } + }() + <-started + + return messages, errs +} + +func buildEventsQueryParams(cliVersion string, options types.EventsOptions) (url.Values, error) { + query := url.Values{} + ref := time.Now() + + if options.Since != "" { + ts, err := timetypes.GetTimestamp(options.Since, ref) + if err != nil { + return nil, err + } + query.Set("since", ts) + } + + if options.Until != "" { + ts, err := timetypes.GetTimestamp(options.Until, ref) + if err != nil { + return nil, err + } + query.Set("until", ts) + } + + if options.Filters.Len() > 0 { + filterJSON, err := filters.ToParamWithVersion(cliVersion, options.Filters) + if err != nil { + return nil, err + } + query.Set("filters", filterJSON) + } + + return query, nil +} diff --git a/vendor/github.com/moby/moby/client/events_test.go b/vendor/github.com/moby/moby/client/events_test.go new file mode 100644 index 000000000..ba82d2f54 --- /dev/null +++ b/vendor/github.com/moby/moby/client/events_test.go @@ -0,0 +1,165 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/events" + "github.com/docker/docker/api/types/filters" +) + +func TestEventsErrorInOptions(t *testing.T) { + errorCases := []struct { + options types.EventsOptions + expectedError string + }{ + { + options: types.EventsOptions{ + Since: "2006-01-02TZ", + }, + expectedError: `parsing time "2006-01-02TZ"`, + }, + { + options: types.EventsOptions{ + Until: "2006-01-02TZ", + }, + expectedError: `parsing time "2006-01-02TZ"`, + }, + } + for _, e := range errorCases { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, errs := client.Events(context.Background(), e.options) + err := <-errs + if err == nil || !strings.Contains(err.Error(), e.expectedError) { + t.Fatalf("expected an error %q, got %v", e.expectedError, err) + } + } +} + +func TestEventsErrorFromServer(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, errs := client.Events(context.Background(), types.EventsOptions{}) + err := <-errs + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestEvents(t *testing.T) { + + expectedURL := "/events" + + filters := filters.NewArgs() + filters.Add("type", events.ContainerEventType) + expectedFiltersJSON := fmt.Sprintf(`{"type":{"%s":true}}`, events.ContainerEventType) + + eventsCases := []struct { + options types.EventsOptions + events []events.Message + expectedEvents map[string]bool + expectedQueryParams map[string]string + }{ + { + options: types.EventsOptions{ + Filters: filters, + }, + expectedQueryParams: map[string]string{ + "filters": expectedFiltersJSON, + }, + events: []events.Message{}, + expectedEvents: make(map[string]bool), + }, + { + options: types.EventsOptions{ + Filters: filters, + }, + expectedQueryParams: map[string]string{ + "filters": expectedFiltersJSON, + }, + events: []events.Message{ + { + Type: "container", + ID: "1", + Action: "create", + }, + { + Type: "container", + ID: "2", + Action: "die", + }, + { + Type: "container", + ID: "3", + Action: "create", + }, + }, + expectedEvents: map[string]bool{ + "1": true, + "2": true, + "3": true, + }, + }, + } + + for _, eventsCase := range eventsCases { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + query := req.URL.Query() + + for key, expected := range eventsCase.expectedQueryParams { + actual := query.Get(key) + if actual != expected { + return nil, fmt.Errorf("%s not set in URL query properly. Expected '%s', got %s", key, expected, actual) + } + } + + buffer := new(bytes.Buffer) + + for _, e := range eventsCase.events { + b, _ := json.Marshal(e) + buffer.Write(b) + } + + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(buffer), + }, nil + }), + } + + messages, errs := client.Events(context.Background(), eventsCase.options) + + loop: + for { + select { + case err := <-errs: + if err != nil && err != io.EOF { + t.Fatal(err) + } + + break loop + case e := <-messages: + _, ok := eventsCase.expectedEvents[e.ID] + if !ok { + t.Fatalf("event received not expected with action %s & id %s", e.Action, e.ID) + } + } + } + } +} diff --git a/vendor/github.com/moby/moby/client/hijack.go b/vendor/github.com/moby/moby/client/hijack.go new file mode 100644 index 000000000..346c74ae8 --- /dev/null +++ b/vendor/github.com/moby/moby/client/hijack.go @@ -0,0 +1,206 @@ +package client + +import ( + "bufio" + "crypto/tls" + "fmt" + "net" + "net/http" + "net/http/httputil" + "net/url" + "strings" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/pkg/tlsconfig" + "github.com/docker/go-connections/sockets" + "github.com/pkg/errors" + "golang.org/x/net/context" +) + +// tlsClientCon holds tls information and a dialed connection. +type tlsClientCon struct { + *tls.Conn + rawConn net.Conn +} + +func (c *tlsClientCon) CloseWrite() error { + // Go standard tls.Conn doesn't provide the CloseWrite() method so we do it + // on its underlying connection. + if conn, ok := c.rawConn.(types.CloseWriter); ok { + return conn.CloseWrite() + } + return nil +} + +// postHijacked sends a POST request and hijacks the connection. +func (cli *Client) postHijacked(ctx context.Context, path string, query url.Values, body interface{}, headers map[string][]string) (types.HijackedResponse, error) { + bodyEncoded, err := encodeData(body) + if err != nil { + return types.HijackedResponse{}, err + } + + apiPath := cli.getAPIPath(path, query) + req, err := http.NewRequest("POST", apiPath, bodyEncoded) + if err != nil { + return types.HijackedResponse{}, err + } + req = cli.addHeaders(req, headers) + + conn, err := cli.setupHijackConn(req, "tcp") + if err != nil { + return types.HijackedResponse{}, err + } + + return types.HijackedResponse{Conn: conn, Reader: bufio.NewReader(conn)}, err +} + +func tlsDial(network, addr string, config *tls.Config) (net.Conn, error) { + return tlsDialWithDialer(new(net.Dialer), network, addr, config) +} + +// We need to copy Go's implementation of tls.Dial (pkg/cryptor/tls/tls.go) in +// order to return our custom tlsClientCon struct which holds both the tls.Conn +// object _and_ its underlying raw connection. The rationale for this is that +// we need to be able to close the write end of the connection when attaching, +// which tls.Conn does not provide. +func tlsDialWithDialer(dialer *net.Dialer, network, addr string, config *tls.Config) (net.Conn, error) { + // We want the Timeout and Deadline values from dialer to cover the + // whole process: TCP connection and TLS handshake. This means that we + // also need to start our own timers now. + timeout := dialer.Timeout + + if !dialer.Deadline.IsZero() { + deadlineTimeout := dialer.Deadline.Sub(time.Now()) + if timeout == 0 || deadlineTimeout < timeout { + timeout = deadlineTimeout + } + } + + var errChannel chan error + + if timeout != 0 { + errChannel = make(chan error, 2) + time.AfterFunc(timeout, func() { + errChannel <- errors.New("") + }) + } + + proxyDialer, err := sockets.DialerFromEnvironment(dialer) + if err != nil { + return nil, err + } + + rawConn, err := proxyDialer.Dial(network, addr) + if err != nil { + return nil, err + } + // When we set up a TCP connection for hijack, there could be long periods + // of inactivity (a long running command with no output) that in certain + // network setups may cause ECONNTIMEOUT, leaving the client in an unknown + // state. Setting TCP KeepAlive on the socket connection will prohibit + // ECONNTIMEOUT unless the socket connection truly is broken + if tcpConn, ok := rawConn.(*net.TCPConn); ok { + tcpConn.SetKeepAlive(true) + tcpConn.SetKeepAlivePeriod(30 * time.Second) + } + + colonPos := strings.LastIndex(addr, ":") + if colonPos == -1 { + colonPos = len(addr) + } + hostname := addr[:colonPos] + + // If no ServerName is set, infer the ServerName + // from the hostname we're connecting to. + if config.ServerName == "" { + // Make a copy to avoid polluting argument or default. + config = tlsconfig.Clone(config) + config.ServerName = hostname + } + + conn := tls.Client(rawConn, config) + + if timeout == 0 { + err = conn.Handshake() + } else { + go func() { + errChannel <- conn.Handshake() + }() + + err = <-errChannel + } + + if err != nil { + rawConn.Close() + return nil, err + } + + // This is Docker difference with standard's crypto/tls package: returned a + // wrapper which holds both the TLS and raw connections. + return &tlsClientCon{conn, rawConn}, nil +} + +func dial(proto, addr string, tlsConfig *tls.Config) (net.Conn, error) { + if tlsConfig != nil && proto != "unix" && proto != "npipe" { + // Notice this isn't Go standard's tls.Dial function + return tlsDial(proto, addr, tlsConfig) + } + if proto == "npipe" { + return sockets.DialPipe(addr, 32*time.Second) + } + return net.Dial(proto, addr) +} + +func (cli *Client) setupHijackConn(req *http.Request, proto string) (net.Conn, error) { + req.Host = cli.addr + req.Header.Set("Connection", "Upgrade") + req.Header.Set("Upgrade", proto) + + conn, err := dial(cli.proto, cli.addr, resolveTLSConfig(cli.client.Transport)) + if err != nil { + return nil, errors.Wrap(err, "cannot connect to the Docker daemon. Is 'docker daemon' running on this host?") + } + + // When we set up a TCP connection for hijack, there could be long periods + // of inactivity (a long running command with no output) that in certain + // network setups may cause ECONNTIMEOUT, leaving the client in an unknown + // state. Setting TCP KeepAlive on the socket connection will prohibit + // ECONNTIMEOUT unless the socket connection truly is broken + if tcpConn, ok := conn.(*net.TCPConn); ok { + tcpConn.SetKeepAlive(true) + tcpConn.SetKeepAlivePeriod(30 * time.Second) + } + + clientconn := httputil.NewClientConn(conn, nil) + defer clientconn.Close() + + // Server hijacks the connection, error 'connection closed' expected + resp, err := clientconn.Do(req) + if err != nil { + return nil, err + } + if resp.StatusCode != http.StatusSwitchingProtocols { + resp.Body.Close() + return nil, fmt.Errorf("unable to upgrade to %s, received %d", proto, resp.StatusCode) + } + + c, br := clientconn.Hijack() + if br.Buffered() > 0 { + // If there is buffered content, wrap the connection + c = &hijackedConn{c, br} + } else { + br.Reset(nil) + } + + return c, nil +} + +type hijackedConn struct { + net.Conn + r *bufio.Reader +} + +func (c *hijackedConn) Read(b []byte) (int, error) { + return c.r.Read(b) +} diff --git a/vendor/github.com/moby/moby/client/image_build.go b/vendor/github.com/moby/moby/client/image_build.go new file mode 100644 index 000000000..44a215f90 --- /dev/null +++ b/vendor/github.com/moby/moby/client/image_build.go @@ -0,0 +1,128 @@ +package client + +import ( + "encoding/base64" + "encoding/json" + "io" + "net/http" + "net/url" + "strconv" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" +) + +// ImageBuild sends request to the daemon to build images. +// The Body in the response implement an io.ReadCloser and it's up to the caller to +// close it. +func (cli *Client) ImageBuild(ctx context.Context, buildContext io.Reader, options types.ImageBuildOptions) (types.ImageBuildResponse, error) { + query, err := cli.imageBuildOptionsToQuery(options) + if err != nil { + return types.ImageBuildResponse{}, err + } + + headers := http.Header(make(map[string][]string)) + buf, err := json.Marshal(options.AuthConfigs) + if err != nil { + return types.ImageBuildResponse{}, err + } + headers.Add("X-Registry-Config", base64.URLEncoding.EncodeToString(buf)) + headers.Set("Content-Type", "application/x-tar") + + serverResp, err := cli.postRaw(ctx, "/build", query, buildContext, headers) + if err != nil { + return types.ImageBuildResponse{}, err + } + + osType := getDockerOS(serverResp.header.Get("Server")) + + return types.ImageBuildResponse{ + Body: serverResp.body, + OSType: osType, + }, nil +} + +func (cli *Client) imageBuildOptionsToQuery(options types.ImageBuildOptions) (url.Values, error) { + query := url.Values{ + "t": options.Tags, + "securityopt": options.SecurityOpt, + "extrahosts": options.ExtraHosts, + } + if options.SuppressOutput { + query.Set("q", "1") + } + if options.RemoteContext != "" { + query.Set("remote", options.RemoteContext) + } + if options.NoCache { + query.Set("nocache", "1") + } + if options.Remove { + query.Set("rm", "1") + } else { + query.Set("rm", "0") + } + + if options.ForceRemove { + query.Set("forcerm", "1") + } + + if options.PullParent { + query.Set("pull", "1") + } + + if options.Squash { + if err := cli.NewVersionError("1.25", "squash"); err != nil { + return query, err + } + query.Set("squash", "1") + } + + if !container.Isolation.IsDefault(options.Isolation) { + query.Set("isolation", string(options.Isolation)) + } + + query.Set("cpusetcpus", options.CPUSetCPUs) + query.Set("networkmode", options.NetworkMode) + query.Set("cpusetmems", options.CPUSetMems) + query.Set("cpushares", strconv.FormatInt(options.CPUShares, 10)) + query.Set("cpuquota", strconv.FormatInt(options.CPUQuota, 10)) + query.Set("cpuperiod", strconv.FormatInt(options.CPUPeriod, 10)) + query.Set("memory", strconv.FormatInt(options.Memory, 10)) + query.Set("memswap", strconv.FormatInt(options.MemorySwap, 10)) + query.Set("cgroupparent", options.CgroupParent) + query.Set("shmsize", strconv.FormatInt(options.ShmSize, 10)) + query.Set("dockerfile", options.Dockerfile) + query.Set("target", options.Target) + + ulimitsJSON, err := json.Marshal(options.Ulimits) + if err != nil { + return query, err + } + query.Set("ulimits", string(ulimitsJSON)) + + buildArgsJSON, err := json.Marshal(options.BuildArgs) + if err != nil { + return query, err + } + query.Set("buildargs", string(buildArgsJSON)) + + labelsJSON, err := json.Marshal(options.Labels) + if err != nil { + return query, err + } + query.Set("labels", string(labelsJSON)) + + cacheFromJSON, err := json.Marshal(options.CacheFrom) + if err != nil { + return query, err + } + query.Set("cachefrom", string(cacheFromJSON)) + if options.SessionID != "" { + query.Set("session", options.SessionID) + } + + return query, nil +} diff --git a/vendor/github.com/moby/moby/client/image_build_test.go b/vendor/github.com/moby/moby/client/image_build_test.go new file mode 100644 index 000000000..1e18b7bda --- /dev/null +++ b/vendor/github.com/moby/moby/client/image_build_test.go @@ -0,0 +1,233 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "reflect" + "strings" + "testing" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" + "github.com/docker/go-units" +) + +func TestImageBuildError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.ImageBuild(context.Background(), nil, types.ImageBuildOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestImageBuild(t *testing.T) { + v1 := "value1" + v2 := "value2" + emptyRegistryConfig := "bnVsbA==" + buildCases := []struct { + buildOptions types.ImageBuildOptions + expectedQueryParams map[string]string + expectedTags []string + expectedRegistryConfig string + }{ + { + buildOptions: types.ImageBuildOptions{ + SuppressOutput: true, + NoCache: true, + Remove: true, + ForceRemove: true, + PullParent: true, + }, + expectedQueryParams: map[string]string{ + "q": "1", + "nocache": "1", + "rm": "1", + "forcerm": "1", + "pull": "1", + }, + expectedTags: []string{}, + expectedRegistryConfig: emptyRegistryConfig, + }, + { + buildOptions: types.ImageBuildOptions{ + SuppressOutput: false, + NoCache: false, + Remove: false, + ForceRemove: false, + PullParent: false, + }, + expectedQueryParams: map[string]string{ + "q": "", + "nocache": "", + "rm": "0", + "forcerm": "", + "pull": "", + }, + expectedTags: []string{}, + expectedRegistryConfig: emptyRegistryConfig, + }, + { + buildOptions: types.ImageBuildOptions{ + RemoteContext: "remoteContext", + Isolation: container.Isolation("isolation"), + CPUSetCPUs: "2", + CPUSetMems: "12", + CPUShares: 20, + CPUQuota: 10, + CPUPeriod: 30, + Memory: 256, + MemorySwap: 512, + ShmSize: 10, + CgroupParent: "cgroup_parent", + Dockerfile: "Dockerfile", + }, + expectedQueryParams: map[string]string{ + "remote": "remoteContext", + "isolation": "isolation", + "cpusetcpus": "2", + "cpusetmems": "12", + "cpushares": "20", + "cpuquota": "10", + "cpuperiod": "30", + "memory": "256", + "memswap": "512", + "shmsize": "10", + "cgroupparent": "cgroup_parent", + "dockerfile": "Dockerfile", + "rm": "0", + }, + expectedTags: []string{}, + expectedRegistryConfig: emptyRegistryConfig, + }, + { + buildOptions: types.ImageBuildOptions{ + BuildArgs: map[string]*string{ + "ARG1": &v1, + "ARG2": &v2, + "ARG3": nil, + }, + }, + expectedQueryParams: map[string]string{ + "buildargs": `{"ARG1":"value1","ARG2":"value2","ARG3":null}`, + "rm": "0", + }, + expectedTags: []string{}, + expectedRegistryConfig: emptyRegistryConfig, + }, + { + buildOptions: types.ImageBuildOptions{ + Ulimits: []*units.Ulimit{ + { + Name: "nproc", + Hard: 65557, + Soft: 65557, + }, + { + Name: "nofile", + Hard: 20000, + Soft: 40000, + }, + }, + }, + expectedQueryParams: map[string]string{ + "ulimits": `[{"Name":"nproc","Hard":65557,"Soft":65557},{"Name":"nofile","Hard":20000,"Soft":40000}]`, + "rm": "0", + }, + expectedTags: []string{}, + expectedRegistryConfig: emptyRegistryConfig, + }, + { + buildOptions: types.ImageBuildOptions{ + AuthConfigs: map[string]types.AuthConfig{ + "https://index.docker.io/v1/": { + Auth: "dG90bwo=", + }, + }, + }, + expectedQueryParams: map[string]string{ + "rm": "0", + }, + expectedTags: []string{}, + expectedRegistryConfig: "eyJodHRwczovL2luZGV4LmRvY2tlci5pby92MS8iOnsiYXV0aCI6ImRHOTBid289In19", + }, + } + for _, buildCase := range buildCases { + expectedURL := "/build" + client := &Client{ + client: newMockClient(func(r *http.Request) (*http.Response, error) { + if !strings.HasPrefix(r.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, r.URL) + } + // Check request headers + registryConfig := r.Header.Get("X-Registry-Config") + if registryConfig != buildCase.expectedRegistryConfig { + return nil, fmt.Errorf("X-Registry-Config header not properly set in the request. Expected '%s', got %s", buildCase.expectedRegistryConfig, registryConfig) + } + contentType := r.Header.Get("Content-Type") + if contentType != "application/x-tar" { + return nil, fmt.Errorf("Content-type header not properly set in the request. Expected 'application/x-tar', got %s", contentType) + } + + // Check query parameters + query := r.URL.Query() + for key, expected := range buildCase.expectedQueryParams { + actual := query.Get(key) + if actual != expected { + return nil, fmt.Errorf("%s not set in URL query properly. Expected '%s', got %s", key, expected, actual) + } + } + + // Check tags + if len(buildCase.expectedTags) > 0 { + tags := query["t"] + if !reflect.DeepEqual(tags, buildCase.expectedTags) { + return nil, fmt.Errorf("t (tags) not set in URL query properly. Expected '%s', got %s", buildCase.expectedTags, tags) + } + } + + headers := http.Header{} + headers.Add("Server", "Docker/v1.23 (MyOS)") + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte("body"))), + Header: headers, + }, nil + }), + } + buildResponse, err := client.ImageBuild(context.Background(), nil, buildCase.buildOptions) + if err != nil { + t.Fatal(err) + } + if buildResponse.OSType != "MyOS" { + t.Fatalf("expected OSType to be 'MyOS', got %s", buildResponse.OSType) + } + response, err := ioutil.ReadAll(buildResponse.Body) + if err != nil { + t.Fatal(err) + } + buildResponse.Body.Close() + if string(response) != "body" { + t.Fatalf("expected Body to contain 'body' string, got %s", response) + } + } +} + +func TestGetDockerOS(t *testing.T) { + cases := map[string]string{ + "Docker/v1.22 (linux)": "linux", + "Docker/v1.22 (windows)": "windows", + "Foo/v1.22 (bar)": "", + } + for header, os := range cases { + g := getDockerOS(header) + if g != os { + t.Fatalf("Expected %s, got %s", os, g) + } + } +} diff --git a/vendor/github.com/moby/moby/client/image_create.go b/vendor/github.com/moby/moby/client/image_create.go new file mode 100644 index 000000000..4436abb0d --- /dev/null +++ b/vendor/github.com/moby/moby/client/image_create.go @@ -0,0 +1,34 @@ +package client + +import ( + "io" + "net/url" + + "golang.org/x/net/context" + + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" +) + +// ImageCreate creates a new image based in the parent options. +// It returns the JSON content in the response body. +func (cli *Client) ImageCreate(ctx context.Context, parentReference string, options types.ImageCreateOptions) (io.ReadCloser, error) { + ref, err := reference.ParseNormalizedNamed(parentReference) + if err != nil { + return nil, err + } + + query := url.Values{} + query.Set("fromImage", reference.FamiliarName(ref)) + query.Set("tag", getAPITagFromNamedRef(ref)) + resp, err := cli.tryImageCreate(ctx, query, options.RegistryAuth) + if err != nil { + return nil, err + } + return resp.body, nil +} + +func (cli *Client) tryImageCreate(ctx context.Context, query url.Values, registryAuth string) (serverResponse, error) { + headers := map[string][]string{"X-Registry-Auth": {registryAuth}} + return cli.post(ctx, "/images/create", query, nil, headers) +} diff --git a/vendor/github.com/moby/moby/client/image_create_test.go b/vendor/github.com/moby/moby/client/image_create_test.go new file mode 100644 index 000000000..5c2edd2ad --- /dev/null +++ b/vendor/github.com/moby/moby/client/image_create_test.go @@ -0,0 +1,76 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" +) + +func TestImageCreateError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.ImageCreate(context.Background(), "reference", types.ImageCreateOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server error, got %v", err) + } +} + +func TestImageCreate(t *testing.T) { + expectedURL := "/images/create" + expectedImage := "test:5000/my_image" + expectedTag := "sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" + expectedReference := fmt.Sprintf("%s@%s", expectedImage, expectedTag) + expectedRegistryAuth := "eyJodHRwczovL2luZGV4LmRvY2tlci5pby92MS8iOnsiYXV0aCI6ImRHOTBid289IiwiZW1haWwiOiJqb2huQGRvZS5jb20ifX0=" + client := &Client{ + client: newMockClient(func(r *http.Request) (*http.Response, error) { + if !strings.HasPrefix(r.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, r.URL) + } + registryAuth := r.Header.Get("X-Registry-Auth") + if registryAuth != expectedRegistryAuth { + return nil, fmt.Errorf("X-Registry-Auth header not properly set in the request. Expected '%s', got %s", expectedRegistryAuth, registryAuth) + } + + query := r.URL.Query() + fromImage := query.Get("fromImage") + if fromImage != expectedImage { + return nil, fmt.Errorf("fromImage not set in URL query properly. Expected '%s', got %s", expectedImage, fromImage) + } + + tag := query.Get("tag") + if tag != expectedTag { + return nil, fmt.Errorf("tag not set in URL query properly. Expected '%s', got %s", expectedTag, tag) + } + + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte("body"))), + }, nil + }), + } + + createResponse, err := client.ImageCreate(context.Background(), expectedReference, types.ImageCreateOptions{ + RegistryAuth: expectedRegistryAuth, + }) + if err != nil { + t.Fatal(err) + } + response, err := ioutil.ReadAll(createResponse) + if err != nil { + t.Fatal(err) + } + if err = createResponse.Close(); err != nil { + t.Fatal(err) + } + if string(response) != "body" { + t.Fatalf("expected Body to contain 'body' string, got %s", response) + } +} diff --git a/vendor/github.com/moby/moby/client/image_history.go b/vendor/github.com/moby/moby/client/image_history.go new file mode 100644 index 000000000..7b4babcba --- /dev/null +++ b/vendor/github.com/moby/moby/client/image_history.go @@ -0,0 +1,22 @@ +package client + +import ( + "encoding/json" + "net/url" + + "github.com/docker/docker/api/types/image" + "golang.org/x/net/context" +) + +// ImageHistory returns the changes in an image in history format. +func (cli *Client) ImageHistory(ctx context.Context, imageID string) ([]image.HistoryResponseItem, error) { + var history []image.HistoryResponseItem + serverResp, err := cli.get(ctx, "/images/"+imageID+"/history", url.Values{}, nil) + if err != nil { + return history, err + } + + err = json.NewDecoder(serverResp.body).Decode(&history) + ensureReaderClosed(serverResp) + return history, err +} diff --git a/vendor/github.com/moby/moby/client/image_history_test.go b/vendor/github.com/moby/moby/client/image_history_test.go new file mode 100644 index 000000000..101bffd0c --- /dev/null +++ b/vendor/github.com/moby/moby/client/image_history_test.go @@ -0,0 +1,60 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types/image" + "golang.org/x/net/context" +) + +func TestImageHistoryError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.ImageHistory(context.Background(), "nothing") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server error, got %v", err) + } +} + +func TestImageHistory(t *testing.T) { + expectedURL := "/images/image_id/history" + client := &Client{ + client: newMockClient(func(r *http.Request) (*http.Response, error) { + if !strings.HasPrefix(r.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, r.URL) + } + b, err := json.Marshal([]image.HistoryResponseItem{ + { + ID: "image_id1", + Tags: []string{"tag1", "tag2"}, + }, + { + ID: "image_id2", + Tags: []string{"tag1", "tag2"}, + }, + }) + if err != nil { + return nil, err + } + + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(b)), + }, nil + }), + } + imageHistories, err := client.ImageHistory(context.Background(), "image_id") + if err != nil { + t.Fatal(err) + } + if len(imageHistories) != 2 { + t.Fatalf("expected 2 containers, got %v", imageHistories) + } +} diff --git a/vendor/github.com/moby/moby/client/image_import.go b/vendor/github.com/moby/moby/client/image_import.go new file mode 100644 index 000000000..d7dedd823 --- /dev/null +++ b/vendor/github.com/moby/moby/client/image_import.go @@ -0,0 +1,37 @@ +package client + +import ( + "io" + "net/url" + + "golang.org/x/net/context" + + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" +) + +// ImageImport creates a new image based in the source options. +// It returns the JSON content in the response body. +func (cli *Client) ImageImport(ctx context.Context, source types.ImageImportSource, ref string, options types.ImageImportOptions) (io.ReadCloser, error) { + if ref != "" { + //Check if the given image name can be resolved + if _, err := reference.ParseNormalizedNamed(ref); err != nil { + return nil, err + } + } + + query := url.Values{} + query.Set("fromSrc", source.SourceName) + query.Set("repo", ref) + query.Set("tag", options.Tag) + query.Set("message", options.Message) + for _, change := range options.Changes { + query.Add("changes", change) + } + + resp, err := cli.postRaw(ctx, "/images/create", query, source.Source, nil) + if err != nil { + return nil, err + } + return resp.body, nil +} diff --git a/vendor/github.com/moby/moby/client/image_import_test.go b/vendor/github.com/moby/moby/client/image_import_test.go new file mode 100644 index 000000000..370ad5fbe --- /dev/null +++ b/vendor/github.com/moby/moby/client/image_import_test.go @@ -0,0 +1,81 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "reflect" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +func TestImageImportError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.ImageImport(context.Background(), types.ImageImportSource{}, "image:tag", types.ImageImportOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server error, got %v", err) + } +} + +func TestImageImport(t *testing.T) { + expectedURL := "/images/create" + client := &Client{ + client: newMockClient(func(r *http.Request) (*http.Response, error) { + if !strings.HasPrefix(r.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, r.URL) + } + query := r.URL.Query() + fromSrc := query.Get("fromSrc") + if fromSrc != "image_source" { + return nil, fmt.Errorf("fromSrc not set in URL query properly. Expected 'image_source', got %s", fromSrc) + } + repo := query.Get("repo") + if repo != "repository_name:imported" { + return nil, fmt.Errorf("repo not set in URL query properly. Expected 'repository_name:imported', got %s", repo) + } + tag := query.Get("tag") + if tag != "imported" { + return nil, fmt.Errorf("tag not set in URL query properly. Expected 'imported', got %s", tag) + } + message := query.Get("message") + if message != "A message" { + return nil, fmt.Errorf("message not set in URL query properly. Expected 'A message', got %s", message) + } + changes := query["changes"] + expectedChanges := []string{"change1", "change2"} + if !reflect.DeepEqual(expectedChanges, changes) { + return nil, fmt.Errorf("changes not set in URL query properly. Expected %v, got %v", expectedChanges, changes) + } + + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte("response"))), + }, nil + }), + } + importResponse, err := client.ImageImport(context.Background(), types.ImageImportSource{ + Source: strings.NewReader("source"), + SourceName: "image_source", + }, "repository_name:imported", types.ImageImportOptions{ + Tag: "imported", + Message: "A message", + Changes: []string{"change1", "change2"}, + }) + if err != nil { + t.Fatal(err) + } + response, err := ioutil.ReadAll(importResponse) + if err != nil { + t.Fatal(err) + } + importResponse.Close() + if string(response) != "response" { + t.Fatalf("expected response to contain 'response', got %s", string(response)) + } +} diff --git a/vendor/github.com/moby/moby/client/image_inspect.go b/vendor/github.com/moby/moby/client/image_inspect.go new file mode 100644 index 000000000..b3a64ce2f --- /dev/null +++ b/vendor/github.com/moby/moby/client/image_inspect.go @@ -0,0 +1,33 @@ +package client + +import ( + "bytes" + "encoding/json" + "io/ioutil" + "net/http" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// ImageInspectWithRaw returns the image information and its raw representation. +func (cli *Client) ImageInspectWithRaw(ctx context.Context, imageID string) (types.ImageInspect, []byte, error) { + serverResp, err := cli.get(ctx, "/images/"+imageID+"/json", nil, nil) + if err != nil { + if serverResp.statusCode == http.StatusNotFound { + return types.ImageInspect{}, nil, imageNotFoundError{imageID} + } + return types.ImageInspect{}, nil, err + } + defer ensureReaderClosed(serverResp) + + body, err := ioutil.ReadAll(serverResp.body) + if err != nil { + return types.ImageInspect{}, nil, err + } + + var response types.ImageInspect + rdr := bytes.NewReader(body) + err = json.NewDecoder(rdr).Decode(&response) + return response, body, err +} diff --git a/vendor/github.com/moby/moby/client/image_inspect_test.go b/vendor/github.com/moby/moby/client/image_inspect_test.go new file mode 100644 index 000000000..74a4e4980 --- /dev/null +++ b/vendor/github.com/moby/moby/client/image_inspect_test.go @@ -0,0 +1,71 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "reflect" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +func TestImageInspectError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, _, err := client.ImageInspectWithRaw(context.Background(), "nothing") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestImageInspectImageNotFound(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusNotFound, "Server error")), + } + + _, _, err := client.ImageInspectWithRaw(context.Background(), "unknown") + if err == nil || !IsErrImageNotFound(err) { + t.Fatalf("expected an imageNotFound error, got %v", err) + } +} + +func TestImageInspect(t *testing.T) { + expectedURL := "/images/image_id/json" + expectedTags := []string{"tag1", "tag2"} + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + content, err := json.Marshal(types.ImageInspect{ + ID: "image_id", + RepoTags: expectedTags, + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + + imageInspect, _, err := client.ImageInspectWithRaw(context.Background(), "image_id") + if err != nil { + t.Fatal(err) + } + if imageInspect.ID != "image_id" { + t.Fatalf("expected `image_id`, got %s", imageInspect.ID) + } + if !reflect.DeepEqual(imageInspect.RepoTags, expectedTags) { + t.Fatalf("expected `%v`, got %v", expectedTags, imageInspect.RepoTags) + } +} diff --git a/vendor/github.com/moby/moby/client/image_list.go b/vendor/github.com/moby/moby/client/image_list.go new file mode 100644 index 000000000..f26464f67 --- /dev/null +++ b/vendor/github.com/moby/moby/client/image_list.go @@ -0,0 +1,45 @@ +package client + +import ( + "encoding/json" + "net/url" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/versions" + "golang.org/x/net/context" +) + +// ImageList returns a list of images in the docker host. +func (cli *Client) ImageList(ctx context.Context, options types.ImageListOptions) ([]types.ImageSummary, error) { + var images []types.ImageSummary + query := url.Values{} + + optionFilters := options.Filters + referenceFilters := optionFilters.Get("reference") + if versions.LessThan(cli.version, "1.25") && len(referenceFilters) > 0 { + query.Set("filter", referenceFilters[0]) + for _, filterValue := range referenceFilters { + optionFilters.Del("reference", filterValue) + } + } + if optionFilters.Len() > 0 { + filterJSON, err := filters.ToParamWithVersion(cli.version, optionFilters) + if err != nil { + return images, err + } + query.Set("filters", filterJSON) + } + if options.All { + query.Set("all", "1") + } + + serverResp, err := cli.get(ctx, "/images/json", query, nil) + if err != nil { + return images, err + } + + err = json.NewDecoder(serverResp.body).Decode(&images) + ensureReaderClosed(serverResp) + return images, err +} diff --git a/vendor/github.com/moby/moby/client/image_list_test.go b/vendor/github.com/moby/moby/client/image_list_test.go new file mode 100644 index 000000000..7c4a46414 --- /dev/null +++ b/vendor/github.com/moby/moby/client/image_list_test.go @@ -0,0 +1,159 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "golang.org/x/net/context" +) + +func TestImageListError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, err := client.ImageList(context.Background(), types.ImageListOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestImageList(t *testing.T) { + expectedURL := "/images/json" + + noDanglingfilters := filters.NewArgs() + noDanglingfilters.Add("dangling", "false") + + filters := filters.NewArgs() + filters.Add("label", "label1") + filters.Add("label", "label2") + filters.Add("dangling", "true") + + listCases := []struct { + options types.ImageListOptions + expectedQueryParams map[string]string + }{ + { + options: types.ImageListOptions{}, + expectedQueryParams: map[string]string{ + "all": "", + "filter": "", + "filters": "", + }, + }, + { + options: types.ImageListOptions{ + Filters: filters, + }, + expectedQueryParams: map[string]string{ + "all": "", + "filter": "", + "filters": `{"dangling":{"true":true},"label":{"label1":true,"label2":true}}`, + }, + }, + { + options: types.ImageListOptions{ + Filters: noDanglingfilters, + }, + expectedQueryParams: map[string]string{ + "all": "", + "filter": "", + "filters": `{"dangling":{"false":true}}`, + }, + }, + } + for _, listCase := range listCases { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + query := req.URL.Query() + for key, expected := range listCase.expectedQueryParams { + actual := query.Get(key) + if actual != expected { + return nil, fmt.Errorf("%s not set in URL query properly. Expected '%s', got %s", key, expected, actual) + } + } + content, err := json.Marshal([]types.ImageSummary{ + { + ID: "image_id2", + }, + { + ID: "image_id2", + }, + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + + images, err := client.ImageList(context.Background(), listCase.options) + if err != nil { + t.Fatal(err) + } + if len(images) != 2 { + t.Fatalf("expected 2 images, got %v", images) + } + } +} + +func TestImageListApiBefore125(t *testing.T) { + expectedFilter := "image:tag" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + query := req.URL.Query() + actualFilter := query.Get("filter") + if actualFilter != expectedFilter { + return nil, fmt.Errorf("filter not set in URL query properly. Expected '%s', got %s", expectedFilter, actualFilter) + } + actualFilters := query.Get("filters") + if actualFilters != "" { + return nil, fmt.Errorf("filters should have not been present, were with value: %s", actualFilters) + } + content, err := json.Marshal([]types.ImageSummary{ + { + ID: "image_id2", + }, + { + ID: "image_id2", + }, + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + version: "1.24", + } + + filters := filters.NewArgs() + filters.Add("reference", "image:tag") + + options := types.ImageListOptions{ + Filters: filters, + } + + images, err := client.ImageList(context.Background(), options) + if err != nil { + t.Fatal(err) + } + if len(images) != 2 { + t.Fatalf("expected 2 images, got %v", images) + } +} diff --git a/vendor/github.com/moby/moby/client/image_load.go b/vendor/github.com/moby/moby/client/image_load.go new file mode 100644 index 000000000..77aaf1af3 --- /dev/null +++ b/vendor/github.com/moby/moby/client/image_load.go @@ -0,0 +1,30 @@ +package client + +import ( + "io" + "net/url" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" +) + +// ImageLoad loads an image in the docker host from the client host. +// It's up to the caller to close the io.ReadCloser in the +// ImageLoadResponse returned by this function. +func (cli *Client) ImageLoad(ctx context.Context, input io.Reader, quiet bool) (types.ImageLoadResponse, error) { + v := url.Values{} + v.Set("quiet", "0") + if quiet { + v.Set("quiet", "1") + } + headers := map[string][]string{"Content-Type": {"application/x-tar"}} + resp, err := cli.postRaw(ctx, "/images/load", v, input, headers) + if err != nil { + return types.ImageLoadResponse{}, err + } + return types.ImageLoadResponse{ + Body: resp.body, + JSON: resp.header.Get("Content-Type") == "application/json", + }, nil +} diff --git a/vendor/github.com/moby/moby/client/image_load_test.go b/vendor/github.com/moby/moby/client/image_load_test.go new file mode 100644 index 000000000..68dc14ff2 --- /dev/null +++ b/vendor/github.com/moby/moby/client/image_load_test.go @@ -0,0 +1,95 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" +) + +func TestImageLoadError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, err := client.ImageLoad(context.Background(), nil, true) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestImageLoad(t *testing.T) { + expectedURL := "/images/load" + expectedInput := "inputBody" + expectedOutput := "outputBody" + loadCases := []struct { + quiet bool + responseContentType string + expectedResponseJSON bool + expectedQueryParams map[string]string + }{ + { + quiet: false, + responseContentType: "text/plain", + expectedResponseJSON: false, + expectedQueryParams: map[string]string{ + "quiet": "0", + }, + }, + { + quiet: true, + responseContentType: "application/json", + expectedResponseJSON: true, + expectedQueryParams: map[string]string{ + "quiet": "1", + }, + }, + } + for _, loadCase := range loadCases { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + contentType := req.Header.Get("Content-Type") + if contentType != "application/x-tar" { + return nil, fmt.Errorf("content-type not set in URL headers properly. Expected 'application/x-tar', got %s", contentType) + } + query := req.URL.Query() + for key, expected := range loadCase.expectedQueryParams { + actual := query.Get(key) + if actual != expected { + return nil, fmt.Errorf("%s not set in URL query properly. Expected '%s', got %s", key, expected, actual) + } + } + headers := http.Header{} + headers.Add("Content-Type", loadCase.responseContentType) + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(expectedOutput))), + Header: headers, + }, nil + }), + } + + input := bytes.NewReader([]byte(expectedInput)) + imageLoadResponse, err := client.ImageLoad(context.Background(), input, loadCase.quiet) + if err != nil { + t.Fatal(err) + } + if imageLoadResponse.JSON != loadCase.expectedResponseJSON { + t.Fatalf("expected a JSON response, was not.") + } + body, err := ioutil.ReadAll(imageLoadResponse.Body) + if err != nil { + t.Fatal(err) + } + if string(body) != expectedOutput { + t.Fatalf("expected %s, got %s", expectedOutput, string(body)) + } + } +} diff --git a/vendor/github.com/moby/moby/client/image_prune.go b/vendor/github.com/moby/moby/client/image_prune.go new file mode 100644 index 000000000..5ef98b7f0 --- /dev/null +++ b/vendor/github.com/moby/moby/client/image_prune.go @@ -0,0 +1,36 @@ +package client + +import ( + "encoding/json" + "fmt" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "golang.org/x/net/context" +) + +// ImagesPrune requests the daemon to delete unused data +func (cli *Client) ImagesPrune(ctx context.Context, pruneFilters filters.Args) (types.ImagesPruneReport, error) { + var report types.ImagesPruneReport + + if err := cli.NewVersionError("1.25", "image prune"); err != nil { + return report, err + } + + query, err := getFiltersQuery(pruneFilters) + if err != nil { + return report, err + } + + serverResp, err := cli.post(ctx, "/images/prune", query, nil, nil) + if err != nil { + return report, err + } + defer ensureReaderClosed(serverResp) + + if err := json.NewDecoder(serverResp.body).Decode(&report); err != nil { + return report, fmt.Errorf("Error retrieving disk usage: %v", err) + } + + return report, nil +} diff --git a/vendor/github.com/moby/moby/client/image_prune_test.go b/vendor/github.com/moby/moby/client/image_prune_test.go new file mode 100644 index 000000000..453f84ade --- /dev/null +++ b/vendor/github.com/moby/moby/client/image_prune_test.go @@ -0,0 +1,119 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/stretchr/testify/assert" + "golang.org/x/net/context" +) + +func TestImagesPruneError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + version: "1.25", + } + + filters := filters.NewArgs() + + _, err := client.ImagesPrune(context.Background(), filters) + assert.EqualError(t, err, "Error response from daemon: Server error") +} + +func TestImagesPrune(t *testing.T) { + expectedURL := "/v1.25/images/prune" + + danglingFilters := filters.NewArgs() + danglingFilters.Add("dangling", "true") + + noDanglingFilters := filters.NewArgs() + noDanglingFilters.Add("dangling", "false") + + labelFilters := filters.NewArgs() + labelFilters.Add("dangling", "true") + labelFilters.Add("label", "label1=foo") + labelFilters.Add("label", "label2!=bar") + + listCases := []struct { + filters filters.Args + expectedQueryParams map[string]string + }{ + { + filters: filters.Args{}, + expectedQueryParams: map[string]string{ + "until": "", + "filter": "", + "filters": "", + }, + }, + { + filters: danglingFilters, + expectedQueryParams: map[string]string{ + "until": "", + "filter": "", + "filters": `{"dangling":{"true":true}}`, + }, + }, + { + filters: noDanglingFilters, + expectedQueryParams: map[string]string{ + "until": "", + "filter": "", + "filters": `{"dangling":{"false":true}}`, + }, + }, + { + filters: labelFilters, + expectedQueryParams: map[string]string{ + "until": "", + "filter": "", + "filters": `{"dangling":{"true":true},"label":{"label1=foo":true,"label2!=bar":true}}`, + }, + }, + } + for _, listCase := range listCases { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + query := req.URL.Query() + for key, expected := range listCase.expectedQueryParams { + actual := query.Get(key) + assert.Equal(t, expected, actual) + } + content, err := json.Marshal(types.ImagesPruneReport{ + ImagesDeleted: []types.ImageDeleteResponseItem{ + { + Deleted: "image_id1", + }, + { + Deleted: "image_id2", + }, + }, + SpaceReclaimed: 9999, + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + version: "1.25", + } + + report, err := client.ImagesPrune(context.Background(), listCase.filters) + assert.NoError(t, err) + assert.Len(t, report.ImagesDeleted, 2) + assert.Equal(t, uint64(9999), report.SpaceReclaimed) + } +} diff --git a/vendor/github.com/moby/moby/client/image_pull.go b/vendor/github.com/moby/moby/client/image_pull.go new file mode 100644 index 000000000..a72b9bf7f --- /dev/null +++ b/vendor/github.com/moby/moby/client/image_pull.go @@ -0,0 +1,61 @@ +package client + +import ( + "io" + "net/http" + "net/url" + + "golang.org/x/net/context" + + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" +) + +// ImagePull requests the docker host to pull an image from a remote registry. +// It executes the privileged function if the operation is unauthorized +// and it tries one more time. +// It's up to the caller to handle the io.ReadCloser and close it properly. +// +// FIXME(vdemeester): there is currently used in a few way in docker/docker +// - if not in trusted content, ref is used to pass the whole reference, and tag is empty +// - if in trusted content, ref is used to pass the reference name, and tag for the digest +func (cli *Client) ImagePull(ctx context.Context, refStr string, options types.ImagePullOptions) (io.ReadCloser, error) { + ref, err := reference.ParseNormalizedNamed(refStr) + if err != nil { + return nil, err + } + + query := url.Values{} + query.Set("fromImage", reference.FamiliarName(ref)) + if !options.All { + query.Set("tag", getAPITagFromNamedRef(ref)) + } + + resp, err := cli.tryImageCreate(ctx, query, options.RegistryAuth) + if resp.statusCode == http.StatusUnauthorized && options.PrivilegeFunc != nil { + newAuthHeader, privilegeErr := options.PrivilegeFunc() + if privilegeErr != nil { + return nil, privilegeErr + } + resp, err = cli.tryImageCreate(ctx, query, newAuthHeader) + } + if err != nil { + return nil, err + } + return resp.body, nil +} + +// getAPITagFromNamedRef returns a tag from the specified reference. +// This function is necessary as long as the docker "server" api expects +// digests to be sent as tags and makes a distinction between the name +// and tag/digest part of a reference. +func getAPITagFromNamedRef(ref reference.Named) string { + if digested, ok := ref.(reference.Digested); ok { + return digested.Digest().String() + } + ref = reference.TagNameOnly(ref) + if tagged, ok := ref.(reference.Tagged); ok { + return tagged.Tag() + } + return "" +} diff --git a/vendor/github.com/moby/moby/client/image_pull_test.go b/vendor/github.com/moby/moby/client/image_pull_test.go new file mode 100644 index 000000000..ab49d2d34 --- /dev/null +++ b/vendor/github.com/moby/moby/client/image_pull_test.go @@ -0,0 +1,199 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" +) + +func TestImagePullReferenceParseError(t *testing.T) { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + return nil, nil + }), + } + // An empty reference is an invalid reference + _, err := client.ImagePull(context.Background(), "", types.ImagePullOptions{}) + if err == nil || !strings.Contains(err.Error(), "invalid reference format") { + t.Fatalf("expected an error, got %v", err) + } +} + +func TestImagePullAnyError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.ImagePull(context.Background(), "myimage", types.ImagePullOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestImagePullStatusUnauthorizedError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusUnauthorized, "Unauthorized error")), + } + _, err := client.ImagePull(context.Background(), "myimage", types.ImagePullOptions{}) + if err == nil || err.Error() != "Error response from daemon: Unauthorized error" { + t.Fatalf("expected an Unauthorized Error, got %v", err) + } +} + +func TestImagePullWithUnauthorizedErrorAndPrivilegeFuncError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusUnauthorized, "Unauthorized error")), + } + privilegeFunc := func() (string, error) { + return "", fmt.Errorf("Error requesting privilege") + } + _, err := client.ImagePull(context.Background(), "myimage", types.ImagePullOptions{ + PrivilegeFunc: privilegeFunc, + }) + if err == nil || err.Error() != "Error requesting privilege" { + t.Fatalf("expected an error requesting privilege, got %v", err) + } +} + +func TestImagePullWithUnauthorizedErrorAndAnotherUnauthorizedError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusUnauthorized, "Unauthorized error")), + } + privilegeFunc := func() (string, error) { + return "a-auth-header", nil + } + _, err := client.ImagePull(context.Background(), "myimage", types.ImagePullOptions{ + PrivilegeFunc: privilegeFunc, + }) + if err == nil || err.Error() != "Error response from daemon: Unauthorized error" { + t.Fatalf("expected an Unauthorized Error, got %v", err) + } +} + +func TestImagePullWithPrivilegedFuncNoError(t *testing.T) { + expectedURL := "/images/create" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + auth := req.Header.Get("X-Registry-Auth") + if auth == "NotValid" { + return &http.Response{ + StatusCode: http.StatusUnauthorized, + Body: ioutil.NopCloser(bytes.NewReader([]byte("Invalid credentials"))), + }, nil + } + if auth != "IAmValid" { + return nil, fmt.Errorf("Invalid auth header : expected %s, got %s", "IAmValid", auth) + } + query := req.URL.Query() + fromImage := query.Get("fromImage") + if fromImage != "myimage" { + return nil, fmt.Errorf("fromimage not set in URL query properly. Expected '%s', got %s", "myimage", fromImage) + } + tag := query.Get("tag") + if tag != "latest" { + return nil, fmt.Errorf("tag not set in URL query properly. Expected '%s', got %s", "latest", tag) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte("hello world"))), + }, nil + }), + } + privilegeFunc := func() (string, error) { + return "IAmValid", nil + } + resp, err := client.ImagePull(context.Background(), "myimage", types.ImagePullOptions{ + RegistryAuth: "NotValid", + PrivilegeFunc: privilegeFunc, + }) + if err != nil { + t.Fatal(err) + } + body, err := ioutil.ReadAll(resp) + if err != nil { + t.Fatal(err) + } + if string(body) != "hello world" { + t.Fatalf("expected 'hello world', got %s", string(body)) + } +} + +func TestImagePullWithoutErrors(t *testing.T) { + expectedURL := "/images/create" + expectedOutput := "hello world" + pullCases := []struct { + all bool + reference string + expectedImage string + expectedTag string + }{ + { + all: false, + reference: "myimage", + expectedImage: "myimage", + expectedTag: "latest", + }, + { + all: false, + reference: "myimage:tag", + expectedImage: "myimage", + expectedTag: "tag", + }, + { + all: true, + reference: "myimage", + expectedImage: "myimage", + expectedTag: "", + }, + { + all: true, + reference: "myimage:anything", + expectedImage: "myimage", + expectedTag: "", + }, + } + for _, pullCase := range pullCases { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + query := req.URL.Query() + fromImage := query.Get("fromImage") + if fromImage != pullCase.expectedImage { + return nil, fmt.Errorf("fromimage not set in URL query properly. Expected '%s', got %s", pullCase.expectedImage, fromImage) + } + tag := query.Get("tag") + if tag != pullCase.expectedTag { + return nil, fmt.Errorf("tag not set in URL query properly. Expected '%s', got %s", pullCase.expectedTag, tag) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(expectedOutput))), + }, nil + }), + } + resp, err := client.ImagePull(context.Background(), pullCase.reference, types.ImagePullOptions{ + All: pullCase.all, + }) + if err != nil { + t.Fatal(err) + } + body, err := ioutil.ReadAll(resp) + if err != nil { + t.Fatal(err) + } + if string(body) != expectedOutput { + t.Fatalf("expected '%s', got %s", expectedOutput, string(body)) + } + } +} diff --git a/vendor/github.com/moby/moby/client/image_push.go b/vendor/github.com/moby/moby/client/image_push.go new file mode 100644 index 000000000..410d2fb91 --- /dev/null +++ b/vendor/github.com/moby/moby/client/image_push.go @@ -0,0 +1,56 @@ +package client + +import ( + "errors" + "io" + "net/http" + "net/url" + + "golang.org/x/net/context" + + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" +) + +// ImagePush requests the docker host to push an image to a remote registry. +// It executes the privileged function if the operation is unauthorized +// and it tries one more time. +// It's up to the caller to handle the io.ReadCloser and close it properly. +func (cli *Client) ImagePush(ctx context.Context, image string, options types.ImagePushOptions) (io.ReadCloser, error) { + ref, err := reference.ParseNormalizedNamed(image) + if err != nil { + return nil, err + } + + if _, isCanonical := ref.(reference.Canonical); isCanonical { + return nil, errors.New("cannot push a digest reference") + } + + tag := "" + name := reference.FamiliarName(ref) + + if nameTaggedRef, isNamedTagged := ref.(reference.NamedTagged); isNamedTagged { + tag = nameTaggedRef.Tag() + } + + query := url.Values{} + query.Set("tag", tag) + + resp, err := cli.tryImagePush(ctx, name, query, options.RegistryAuth) + if resp.statusCode == http.StatusUnauthorized && options.PrivilegeFunc != nil { + newAuthHeader, privilegeErr := options.PrivilegeFunc() + if privilegeErr != nil { + return nil, privilegeErr + } + resp, err = cli.tryImagePush(ctx, name, query, newAuthHeader) + } + if err != nil { + return nil, err + } + return resp.body, nil +} + +func (cli *Client) tryImagePush(ctx context.Context, imageID string, query url.Values, registryAuth string) (serverResponse, error) { + headers := map[string][]string{"X-Registry-Auth": {registryAuth}} + return cli.post(ctx, "/images/"+imageID+"/push", query, nil, headers) +} diff --git a/vendor/github.com/moby/moby/client/image_push_test.go b/vendor/github.com/moby/moby/client/image_push_test.go new file mode 100644 index 000000000..f93debf5b --- /dev/null +++ b/vendor/github.com/moby/moby/client/image_push_test.go @@ -0,0 +1,180 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" +) + +func TestImagePushReferenceError(t *testing.T) { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + return nil, nil + }), + } + // An empty reference is an invalid reference + _, err := client.ImagePush(context.Background(), "", types.ImagePushOptions{}) + if err == nil || !strings.Contains(err.Error(), "invalid reference format") { + t.Fatalf("expected an error, got %v", err) + } + // An canonical reference cannot be pushed + _, err = client.ImagePush(context.Background(), "repo@sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", types.ImagePushOptions{}) + if err == nil || err.Error() != "cannot push a digest reference" { + t.Fatalf("expected an error, got %v", err) + } +} + +func TestImagePushAnyError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.ImagePush(context.Background(), "myimage", types.ImagePushOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestImagePushStatusUnauthorizedError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusUnauthorized, "Unauthorized error")), + } + _, err := client.ImagePush(context.Background(), "myimage", types.ImagePushOptions{}) + if err == nil || err.Error() != "Error response from daemon: Unauthorized error" { + t.Fatalf("expected an Unauthorized Error, got %v", err) + } +} + +func TestImagePushWithUnauthorizedErrorAndPrivilegeFuncError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusUnauthorized, "Unauthorized error")), + } + privilegeFunc := func() (string, error) { + return "", fmt.Errorf("Error requesting privilege") + } + _, err := client.ImagePush(context.Background(), "myimage", types.ImagePushOptions{ + PrivilegeFunc: privilegeFunc, + }) + if err == nil || err.Error() != "Error requesting privilege" { + t.Fatalf("expected an error requesting privilege, got %v", err) + } +} + +func TestImagePushWithUnauthorizedErrorAndAnotherUnauthorizedError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusUnauthorized, "Unauthorized error")), + } + privilegeFunc := func() (string, error) { + return "a-auth-header", nil + } + _, err := client.ImagePush(context.Background(), "myimage", types.ImagePushOptions{ + PrivilegeFunc: privilegeFunc, + }) + if err == nil || err.Error() != "Error response from daemon: Unauthorized error" { + t.Fatalf("expected an Unauthorized Error, got %v", err) + } +} + +func TestImagePushWithPrivilegedFuncNoError(t *testing.T) { + expectedURL := "/images/myimage/push" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + auth := req.Header.Get("X-Registry-Auth") + if auth == "NotValid" { + return &http.Response{ + StatusCode: http.StatusUnauthorized, + Body: ioutil.NopCloser(bytes.NewReader([]byte("Invalid credentials"))), + }, nil + } + if auth != "IAmValid" { + return nil, fmt.Errorf("Invalid auth header : expected %s, got %s", "IAmValid", auth) + } + query := req.URL.Query() + tag := query.Get("tag") + if tag != "tag" { + return nil, fmt.Errorf("tag not set in URL query properly. Expected '%s', got %s", "tag", tag) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte("hello world"))), + }, nil + }), + } + privilegeFunc := func() (string, error) { + return "IAmValid", nil + } + resp, err := client.ImagePush(context.Background(), "myimage:tag", types.ImagePushOptions{ + RegistryAuth: "NotValid", + PrivilegeFunc: privilegeFunc, + }) + if err != nil { + t.Fatal(err) + } + body, err := ioutil.ReadAll(resp) + if err != nil { + t.Fatal(err) + } + if string(body) != "hello world" { + t.Fatalf("expected 'hello world', got %s", string(body)) + } +} + +func TestImagePushWithoutErrors(t *testing.T) { + expectedOutput := "hello world" + expectedURLFormat := "/images/%s/push" + pullCases := []struct { + reference string + expectedImage string + expectedTag string + }{ + { + reference: "myimage", + expectedImage: "myimage", + expectedTag: "", + }, + { + reference: "myimage:tag", + expectedImage: "myimage", + expectedTag: "tag", + }, + } + for _, pullCase := range pullCases { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + expectedURL := fmt.Sprintf(expectedURLFormat, pullCase.expectedImage) + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + query := req.URL.Query() + tag := query.Get("tag") + if tag != pullCase.expectedTag { + return nil, fmt.Errorf("tag not set in URL query properly. Expected '%s', got %s", pullCase.expectedTag, tag) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(expectedOutput))), + }, nil + }), + } + resp, err := client.ImagePush(context.Background(), pullCase.reference, types.ImagePushOptions{}) + if err != nil { + t.Fatal(err) + } + body, err := ioutil.ReadAll(resp) + if err != nil { + t.Fatal(err) + } + if string(body) != expectedOutput { + t.Fatalf("expected '%s', got %s", expectedOutput, string(body)) + } + } +} diff --git a/vendor/github.com/moby/moby/client/image_remove.go b/vendor/github.com/moby/moby/client/image_remove.go new file mode 100644 index 000000000..6921209ee --- /dev/null +++ b/vendor/github.com/moby/moby/client/image_remove.go @@ -0,0 +1,31 @@ +package client + +import ( + "encoding/json" + "net/url" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// ImageRemove removes an image from the docker host. +func (cli *Client) ImageRemove(ctx context.Context, imageID string, options types.ImageRemoveOptions) ([]types.ImageDeleteResponseItem, error) { + query := url.Values{} + + if options.Force { + query.Set("force", "1") + } + if !options.PruneChildren { + query.Set("noprune", "1") + } + + resp, err := cli.delete(ctx, "/images/"+imageID, query, nil) + if err != nil { + return nil, err + } + + var dels []types.ImageDeleteResponseItem + err = json.NewDecoder(resp.body).Decode(&dels) + ensureReaderClosed(resp) + return dels, err +} diff --git a/vendor/github.com/moby/moby/client/image_remove_test.go b/vendor/github.com/moby/moby/client/image_remove_test.go new file mode 100644 index 000000000..985631130 --- /dev/null +++ b/vendor/github.com/moby/moby/client/image_remove_test.go @@ -0,0 +1,95 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +func TestImageRemoveError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, err := client.ImageRemove(context.Background(), "image_id", types.ImageRemoveOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestImageRemove(t *testing.T) { + expectedURL := "/images/image_id" + removeCases := []struct { + force bool + pruneChildren bool + expectedQueryParams map[string]string + }{ + { + force: false, + pruneChildren: false, + expectedQueryParams: map[string]string{ + "force": "", + "noprune": "1", + }, + }, { + force: true, + pruneChildren: true, + expectedQueryParams: map[string]string{ + "force": "1", + "noprune": "", + }, + }, + } + for _, removeCase := range removeCases { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "DELETE" { + return nil, fmt.Errorf("expected DELETE method, got %s", req.Method) + } + query := req.URL.Query() + for key, expected := range removeCase.expectedQueryParams { + actual := query.Get(key) + if actual != expected { + return nil, fmt.Errorf("%s not set in URL query properly. Expected '%s', got %s", key, expected, actual) + } + } + b, err := json.Marshal([]types.ImageDeleteResponseItem{ + { + Untagged: "image_id1", + }, + { + Deleted: "image_id", + }, + }) + if err != nil { + return nil, err + } + + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(b)), + }, nil + }), + } + imageDeletes, err := client.ImageRemove(context.Background(), "image_id", types.ImageRemoveOptions{ + Force: removeCase.force, + PruneChildren: removeCase.pruneChildren, + }) + if err != nil { + t.Fatal(err) + } + if len(imageDeletes) != 2 { + t.Fatalf("expected 2 deleted images, got %v", imageDeletes) + } + } +} diff --git a/vendor/github.com/moby/moby/client/image_save.go b/vendor/github.com/moby/moby/client/image_save.go new file mode 100644 index 000000000..ecac880a3 --- /dev/null +++ b/vendor/github.com/moby/moby/client/image_save.go @@ -0,0 +1,22 @@ +package client + +import ( + "io" + "net/url" + + "golang.org/x/net/context" +) + +// ImageSave retrieves one or more images from the docker host as an io.ReadCloser. +// It's up to the caller to store the images and close the stream. +func (cli *Client) ImageSave(ctx context.Context, imageIDs []string) (io.ReadCloser, error) { + query := url.Values{ + "names": imageIDs, + } + + resp, err := cli.get(ctx, "/images/get", query, nil) + if err != nil { + return nil, err + } + return resp.body, nil +} diff --git a/vendor/github.com/moby/moby/client/image_save_test.go b/vendor/github.com/moby/moby/client/image_save_test.go new file mode 100644 index 000000000..8f0cf8864 --- /dev/null +++ b/vendor/github.com/moby/moby/client/image_save_test.go @@ -0,0 +1,58 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "reflect" + "testing" + + "golang.org/x/net/context" + + "strings" +) + +func TestImageSaveError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.ImageSave(context.Background(), []string{"nothing"}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server error, got %v", err) + } +} + +func TestImageSave(t *testing.T) { + expectedURL := "/images/get" + client := &Client{ + client: newMockClient(func(r *http.Request) (*http.Response, error) { + if !strings.HasPrefix(r.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, r.URL) + } + query := r.URL.Query() + names := query["names"] + expectedNames := []string{"image_id1", "image_id2"} + if !reflect.DeepEqual(names, expectedNames) { + return nil, fmt.Errorf("names not set in URL query properly. Expected %v, got %v", names, expectedNames) + } + + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte("response"))), + }, nil + }), + } + saveResponse, err := client.ImageSave(context.Background(), []string{"image_id1", "image_id2"}) + if err != nil { + t.Fatal(err) + } + response, err := ioutil.ReadAll(saveResponse) + if err != nil { + t.Fatal(err) + } + saveResponse.Close() + if string(response) != "response" { + t.Fatalf("expected response to contain 'response', got %s", string(response)) + } +} diff --git a/vendor/github.com/moby/moby/client/image_search.go b/vendor/github.com/moby/moby/client/image_search.go new file mode 100644 index 000000000..b0fcd5c23 --- /dev/null +++ b/vendor/github.com/moby/moby/client/image_search.go @@ -0,0 +1,51 @@ +package client + +import ( + "encoding/json" + "fmt" + "net/http" + "net/url" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/registry" + "golang.org/x/net/context" +) + +// ImageSearch makes the docker host to search by a term in a remote registry. +// The list of results is not sorted in any fashion. +func (cli *Client) ImageSearch(ctx context.Context, term string, options types.ImageSearchOptions) ([]registry.SearchResult, error) { + var results []registry.SearchResult + query := url.Values{} + query.Set("term", term) + query.Set("limit", fmt.Sprintf("%d", options.Limit)) + + if options.Filters.Len() > 0 { + filterJSON, err := filters.ToParam(options.Filters) + if err != nil { + return results, err + } + query.Set("filters", filterJSON) + } + + resp, err := cli.tryImageSearch(ctx, query, options.RegistryAuth) + if resp.statusCode == http.StatusUnauthorized && options.PrivilegeFunc != nil { + newAuthHeader, privilegeErr := options.PrivilegeFunc() + if privilegeErr != nil { + return results, privilegeErr + } + resp, err = cli.tryImageSearch(ctx, query, newAuthHeader) + } + if err != nil { + return results, err + } + + err = json.NewDecoder(resp.body).Decode(&results) + ensureReaderClosed(resp) + return results, err +} + +func (cli *Client) tryImageSearch(ctx context.Context, query url.Values, registryAuth string) (serverResponse, error) { + headers := map[string][]string{"X-Registry-Auth": {registryAuth}} + return cli.get(ctx, "/images/search", query, headers) +} diff --git a/vendor/github.com/moby/moby/client/image_search_test.go b/vendor/github.com/moby/moby/client/image_search_test.go new file mode 100644 index 000000000..b17bbd834 --- /dev/null +++ b/vendor/github.com/moby/moby/client/image_search_test.go @@ -0,0 +1,165 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" + + "encoding/json" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/registry" +) + +func TestImageSearchAnyError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.ImageSearch(context.Background(), "some-image", types.ImageSearchOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestImageSearchStatusUnauthorizedError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusUnauthorized, "Unauthorized error")), + } + _, err := client.ImageSearch(context.Background(), "some-image", types.ImageSearchOptions{}) + if err == nil || err.Error() != "Error response from daemon: Unauthorized error" { + t.Fatalf("expected an Unauthorized Error, got %v", err) + } +} + +func TestImageSearchWithUnauthorizedErrorAndPrivilegeFuncError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusUnauthorized, "Unauthorized error")), + } + privilegeFunc := func() (string, error) { + return "", fmt.Errorf("Error requesting privilege") + } + _, err := client.ImageSearch(context.Background(), "some-image", types.ImageSearchOptions{ + PrivilegeFunc: privilegeFunc, + }) + if err == nil || err.Error() != "Error requesting privilege" { + t.Fatalf("expected an error requesting privilege, got %v", err) + } +} + +func TestImageSearchWithUnauthorizedErrorAndAnotherUnauthorizedError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusUnauthorized, "Unauthorized error")), + } + privilegeFunc := func() (string, error) { + return "a-auth-header", nil + } + _, err := client.ImageSearch(context.Background(), "some-image", types.ImageSearchOptions{ + PrivilegeFunc: privilegeFunc, + }) + if err == nil || err.Error() != "Error response from daemon: Unauthorized error" { + t.Fatalf("expected an Unauthorized Error, got %v", err) + } +} + +func TestImageSearchWithPrivilegedFuncNoError(t *testing.T) { + expectedURL := "/images/search" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + auth := req.Header.Get("X-Registry-Auth") + if auth == "NotValid" { + return &http.Response{ + StatusCode: http.StatusUnauthorized, + Body: ioutil.NopCloser(bytes.NewReader([]byte("Invalid credentials"))), + }, nil + } + if auth != "IAmValid" { + return nil, fmt.Errorf("Invalid auth header : expected 'IAmValid', got %s", auth) + } + query := req.URL.Query() + term := query.Get("term") + if term != "some-image" { + return nil, fmt.Errorf("term not set in URL query properly. Expected 'some-image', got %s", term) + } + content, err := json.Marshal([]registry.SearchResult{ + { + Name: "anything", + }, + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + privilegeFunc := func() (string, error) { + return "IAmValid", nil + } + results, err := client.ImageSearch(context.Background(), "some-image", types.ImageSearchOptions{ + RegistryAuth: "NotValid", + PrivilegeFunc: privilegeFunc, + }) + if err != nil { + t.Fatal(err) + } + if len(results) != 1 { + t.Fatalf("expected 1 result, got %v", results) + } +} + +func TestImageSearchWithoutErrors(t *testing.T) { + expectedURL := "/images/search" + filterArgs := filters.NewArgs() + filterArgs.Add("is-automated", "true") + filterArgs.Add("stars", "3") + + expectedFilters := `{"is-automated":{"true":true},"stars":{"3":true}}` + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + query := req.URL.Query() + term := query.Get("term") + if term != "some-image" { + return nil, fmt.Errorf("term not set in URL query properly. Expected 'some-image', got %s", term) + } + filters := query.Get("filters") + if filters != expectedFilters { + return nil, fmt.Errorf("filters not set in URL query properly. Expected '%s', got %s", expectedFilters, filters) + } + content, err := json.Marshal([]registry.SearchResult{ + { + Name: "anything", + }, + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + results, err := client.ImageSearch(context.Background(), "some-image", types.ImageSearchOptions{ + Filters: filterArgs, + }) + if err != nil { + t.Fatal(err) + } + if len(results) != 1 { + t.Fatalf("expected a result, got %v", results) + } +} diff --git a/vendor/github.com/moby/moby/client/image_tag.go b/vendor/github.com/moby/moby/client/image_tag.go new file mode 100644 index 000000000..8924f71eb --- /dev/null +++ b/vendor/github.com/moby/moby/client/image_tag.go @@ -0,0 +1,37 @@ +package client + +import ( + "net/url" + + "github.com/docker/distribution/reference" + "github.com/pkg/errors" + "golang.org/x/net/context" +) + +// ImageTag tags an image in the docker host +func (cli *Client) ImageTag(ctx context.Context, source, target string) error { + if _, err := reference.ParseAnyReference(source); err != nil { + return errors.Wrapf(err, "Error parsing reference: %q is not a valid repository/tag", source) + } + + ref, err := reference.ParseNormalizedNamed(target) + if err != nil { + return errors.Wrapf(err, "Error parsing reference: %q is not a valid repository/tag", target) + } + + if _, isCanonical := ref.(reference.Canonical); isCanonical { + return errors.New("refusing to create a tag with a digest reference") + } + + ref = reference.TagNameOnly(ref) + + query := url.Values{} + query.Set("repo", reference.FamiliarName(ref)) + if tagged, ok := ref.(reference.Tagged); ok { + query.Set("tag", tagged.Tag()) + } + + resp, err := cli.post(ctx, "/images/"+source+"/tag", query, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/moby/moby/client/image_tag_test.go b/vendor/github.com/moby/moby/client/image_tag_test.go new file mode 100644 index 000000000..f7a0ee331 --- /dev/null +++ b/vendor/github.com/moby/moby/client/image_tag_test.go @@ -0,0 +1,143 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" +) + +func TestImageTagError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + err := client.ImageTag(context.Background(), "image_id", "repo:tag") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +// Note: this is not testing all the InvalidReference as it's the responsibility +// of distribution/reference package. +func TestImageTagInvalidReference(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + err := client.ImageTag(context.Background(), "image_id", "aa/asdf$$^/aa") + if err == nil || err.Error() != `Error parsing reference: "aa/asdf$$^/aa" is not a valid repository/tag: invalid reference format` { + t.Fatalf("expected ErrReferenceInvalidFormat, got %v", err) + } +} + +func TestImageTagInvalidSourceImageName(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + err := client.ImageTag(context.Background(), "invalid_source_image_name_", "repo:tag") + if err == nil || err.Error() != "Error parsing reference: \"invalid_source_image_name_\" is not a valid repository/tag: invalid reference format" { + t.Fatalf("expected Parsing Reference Error, got %v", err) + } +} + +func TestImageTagHexSource(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusOK, "OK")), + } + + err := client.ImageTag(context.Background(), "0d409d33b27e47423b049f7f863faa08655a8c901749c2b25b93ca67d01a470d", "repo:tag") + if err != nil { + t.Fatalf("got error: %v", err) + } +} + +func TestImageTag(t *testing.T) { + expectedURL := "/images/image_id/tag" + tagCases := []struct { + reference string + expectedQueryParams map[string]string + }{ + { + reference: "repository:tag1", + expectedQueryParams: map[string]string{ + "repo": "repository", + "tag": "tag1", + }, + }, { + reference: "another_repository:latest", + expectedQueryParams: map[string]string{ + "repo": "another_repository", + "tag": "latest", + }, + }, { + reference: "another_repository", + expectedQueryParams: map[string]string{ + "repo": "another_repository", + "tag": "latest", + }, + }, { + reference: "test/another_repository", + expectedQueryParams: map[string]string{ + "repo": "test/another_repository", + "tag": "latest", + }, + }, { + reference: "test/another_repository:tag1", + expectedQueryParams: map[string]string{ + "repo": "test/another_repository", + "tag": "tag1", + }, + }, { + reference: "test/test/another_repository:tag1", + expectedQueryParams: map[string]string{ + "repo": "test/test/another_repository", + "tag": "tag1", + }, + }, { + reference: "test:5000/test/another_repository:tag1", + expectedQueryParams: map[string]string{ + "repo": "test:5000/test/another_repository", + "tag": "tag1", + }, + }, { + reference: "test:5000/test/another_repository", + expectedQueryParams: map[string]string{ + "repo": "test:5000/test/another_repository", + "tag": "latest", + }, + }, + } + for _, tagCase := range tagCases { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "POST" { + return nil, fmt.Errorf("expected POST method, got %s", req.Method) + } + query := req.URL.Query() + for key, expected := range tagCase.expectedQueryParams { + actual := query.Get(key) + if actual != expected { + return nil, fmt.Errorf("%s not set in URL query properly. Expected '%s', got %s", key, expected, actual) + } + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + err := client.ImageTag(context.Background(), "image_id", tagCase.reference) + if err != nil { + t.Fatal(err) + } + } +} diff --git a/vendor/github.com/moby/moby/client/info.go b/vendor/github.com/moby/moby/client/info.go new file mode 100644 index 000000000..ac0796122 --- /dev/null +++ b/vendor/github.com/moby/moby/client/info.go @@ -0,0 +1,26 @@ +package client + +import ( + "encoding/json" + "fmt" + "net/url" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// Info returns information about the docker server. +func (cli *Client) Info(ctx context.Context) (types.Info, error) { + var info types.Info + serverResp, err := cli.get(ctx, "/info", url.Values{}, nil) + if err != nil { + return info, err + } + defer ensureReaderClosed(serverResp) + + if err := json.NewDecoder(serverResp.body).Decode(&info); err != nil { + return info, fmt.Errorf("Error reading remote info: %v", err) + } + + return info, nil +} diff --git a/vendor/github.com/moby/moby/client/info_test.go b/vendor/github.com/moby/moby/client/info_test.go new file mode 100644 index 000000000..79f23c8af --- /dev/null +++ b/vendor/github.com/moby/moby/client/info_test.go @@ -0,0 +1,76 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +func TestInfoServerError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.Info(context.Background()) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestInfoInvalidResponseJSONError(t *testing.T) { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte("invalid json"))), + }, nil + }), + } + _, err := client.Info(context.Background()) + if err == nil || !strings.Contains(err.Error(), "invalid character") { + t.Fatalf("expected a 'invalid character' error, got %v", err) + } +} + +func TestInfo(t *testing.T) { + expectedURL := "/info" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + info := &types.Info{ + ID: "daemonID", + Containers: 3, + } + b, err := json.Marshal(info) + if err != nil { + return nil, err + } + + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(b)), + }, nil + }), + } + + info, err := client.Info(context.Background()) + if err != nil { + t.Fatal(err) + } + + if info.ID != "daemonID" { + t.Fatalf("expected daemonID, got %s", info.ID) + } + + if info.Containers != 3 { + t.Fatalf("expected 3 containers, got %d", info.Containers) + } +} diff --git a/vendor/github.com/moby/moby/client/interface.go b/vendor/github.com/moby/moby/client/interface.go new file mode 100644 index 000000000..acd4de1db --- /dev/null +++ b/vendor/github.com/moby/moby/client/interface.go @@ -0,0 +1,194 @@ +package client + +import ( + "io" + "net" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/events" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/image" + "github.com/docker/docker/api/types/network" + "github.com/docker/docker/api/types/registry" + "github.com/docker/docker/api/types/swarm" + volumetypes "github.com/docker/docker/api/types/volume" + "golang.org/x/net/context" +) + +// CommonAPIClient is the common methods between stable and experimental versions of APIClient. +type CommonAPIClient interface { + ConfigAPIClient + ContainerAPIClient + DistributionAPIClient + ImageAPIClient + NodeAPIClient + NetworkAPIClient + PluginAPIClient + ServiceAPIClient + SwarmAPIClient + SecretAPIClient + SystemAPIClient + VolumeAPIClient + ClientVersion() string + DaemonHost() string + ServerVersion(ctx context.Context) (types.Version, error) + NegotiateAPIVersion(ctx context.Context) + NegotiateAPIVersionPing(types.Ping) + DialSession(ctx context.Context, proto string, meta map[string][]string) (net.Conn, error) +} + +// ContainerAPIClient defines API client methods for the containers +type ContainerAPIClient interface { + ContainerAttach(ctx context.Context, container string, options types.ContainerAttachOptions) (types.HijackedResponse, error) + ContainerCommit(ctx context.Context, container string, options types.ContainerCommitOptions) (types.IDResponse, error) + ContainerCreate(ctx context.Context, config *container.Config, hostConfig *container.HostConfig, networkingConfig *network.NetworkingConfig, containerName string) (container.ContainerCreateCreatedBody, error) + ContainerDiff(ctx context.Context, container string) ([]container.ContainerChangeResponseItem, error) + ContainerExecAttach(ctx context.Context, execID string, config types.ExecConfig) (types.HijackedResponse, error) + ContainerExecCreate(ctx context.Context, container string, config types.ExecConfig) (types.IDResponse, error) + ContainerExecInspect(ctx context.Context, execID string) (types.ContainerExecInspect, error) + ContainerExecResize(ctx context.Context, execID string, options types.ResizeOptions) error + ContainerExecStart(ctx context.Context, execID string, config types.ExecStartCheck) error + ContainerExport(ctx context.Context, container string) (io.ReadCloser, error) + ContainerInspect(ctx context.Context, container string) (types.ContainerJSON, error) + ContainerInspectWithRaw(ctx context.Context, container string, getSize bool) (types.ContainerJSON, []byte, error) + ContainerKill(ctx context.Context, container, signal string) error + ContainerList(ctx context.Context, options types.ContainerListOptions) ([]types.Container, error) + ContainerLogs(ctx context.Context, container string, options types.ContainerLogsOptions) (io.ReadCloser, error) + ContainerPause(ctx context.Context, container string) error + ContainerRemove(ctx context.Context, container string, options types.ContainerRemoveOptions) error + ContainerRename(ctx context.Context, container, newContainerName string) error + ContainerResize(ctx context.Context, container string, options types.ResizeOptions) error + ContainerRestart(ctx context.Context, container string, timeout *time.Duration) error + ContainerStatPath(ctx context.Context, container, path string) (types.ContainerPathStat, error) + ContainerStats(ctx context.Context, container string, stream bool) (types.ContainerStats, error) + ContainerStart(ctx context.Context, container string, options types.ContainerStartOptions) error + ContainerStop(ctx context.Context, container string, timeout *time.Duration) error + ContainerTop(ctx context.Context, container string, arguments []string) (container.ContainerTopOKBody, error) + ContainerUnpause(ctx context.Context, container string) error + ContainerUpdate(ctx context.Context, container string, updateConfig container.UpdateConfig) (container.ContainerUpdateOKBody, error) + ContainerWait(ctx context.Context, container string, condition container.WaitCondition) (<-chan container.ContainerWaitOKBody, <-chan error) + CopyFromContainer(ctx context.Context, container, srcPath string) (io.ReadCloser, types.ContainerPathStat, error) + CopyToContainer(ctx context.Context, container, path string, content io.Reader, options types.CopyToContainerOptions) error + ContainersPrune(ctx context.Context, pruneFilters filters.Args) (types.ContainersPruneReport, error) +} + +// DistributionAPIClient defines API client methods for the registry +type DistributionAPIClient interface { + DistributionInspect(ctx context.Context, image, encodedRegistryAuth string) (registry.DistributionInspect, error) +} + +// ImageAPIClient defines API client methods for the images +type ImageAPIClient interface { + ImageBuild(ctx context.Context, context io.Reader, options types.ImageBuildOptions) (types.ImageBuildResponse, error) + BuildCachePrune(ctx context.Context) (*types.BuildCachePruneReport, error) + ImageCreate(ctx context.Context, parentReference string, options types.ImageCreateOptions) (io.ReadCloser, error) + ImageHistory(ctx context.Context, image string) ([]image.HistoryResponseItem, error) + ImageImport(ctx context.Context, source types.ImageImportSource, ref string, options types.ImageImportOptions) (io.ReadCloser, error) + ImageInspectWithRaw(ctx context.Context, image string) (types.ImageInspect, []byte, error) + ImageList(ctx context.Context, options types.ImageListOptions) ([]types.ImageSummary, error) + ImageLoad(ctx context.Context, input io.Reader, quiet bool) (types.ImageLoadResponse, error) + ImagePull(ctx context.Context, ref string, options types.ImagePullOptions) (io.ReadCloser, error) + ImagePush(ctx context.Context, ref string, options types.ImagePushOptions) (io.ReadCloser, error) + ImageRemove(ctx context.Context, image string, options types.ImageRemoveOptions) ([]types.ImageDeleteResponseItem, error) + ImageSearch(ctx context.Context, term string, options types.ImageSearchOptions) ([]registry.SearchResult, error) + ImageSave(ctx context.Context, images []string) (io.ReadCloser, error) + ImageTag(ctx context.Context, image, ref string) error + ImagesPrune(ctx context.Context, pruneFilter filters.Args) (types.ImagesPruneReport, error) +} + +// NetworkAPIClient defines API client methods for the networks +type NetworkAPIClient interface { + NetworkConnect(ctx context.Context, networkID, container string, config *network.EndpointSettings) error + NetworkCreate(ctx context.Context, name string, options types.NetworkCreate) (types.NetworkCreateResponse, error) + NetworkDisconnect(ctx context.Context, networkID, container string, force bool) error + NetworkInspect(ctx context.Context, networkID string, options types.NetworkInspectOptions) (types.NetworkResource, error) + NetworkInspectWithRaw(ctx context.Context, networkID string, options types.NetworkInspectOptions) (types.NetworkResource, []byte, error) + NetworkList(ctx context.Context, options types.NetworkListOptions) ([]types.NetworkResource, error) + NetworkRemove(ctx context.Context, networkID string) error + NetworksPrune(ctx context.Context, pruneFilter filters.Args) (types.NetworksPruneReport, error) +} + +// NodeAPIClient defines API client methods for the nodes +type NodeAPIClient interface { + NodeInspectWithRaw(ctx context.Context, nodeID string) (swarm.Node, []byte, error) + NodeList(ctx context.Context, options types.NodeListOptions) ([]swarm.Node, error) + NodeRemove(ctx context.Context, nodeID string, options types.NodeRemoveOptions) error + NodeUpdate(ctx context.Context, nodeID string, version swarm.Version, node swarm.NodeSpec) error +} + +// PluginAPIClient defines API client methods for the plugins +type PluginAPIClient interface { + PluginList(ctx context.Context, filter filters.Args) (types.PluginsListResponse, error) + PluginRemove(ctx context.Context, name string, options types.PluginRemoveOptions) error + PluginEnable(ctx context.Context, name string, options types.PluginEnableOptions) error + PluginDisable(ctx context.Context, name string, options types.PluginDisableOptions) error + PluginInstall(ctx context.Context, name string, options types.PluginInstallOptions) (io.ReadCloser, error) + PluginUpgrade(ctx context.Context, name string, options types.PluginInstallOptions) (io.ReadCloser, error) + PluginPush(ctx context.Context, name string, registryAuth string) (io.ReadCloser, error) + PluginSet(ctx context.Context, name string, args []string) error + PluginInspectWithRaw(ctx context.Context, name string) (*types.Plugin, []byte, error) + PluginCreate(ctx context.Context, createContext io.Reader, options types.PluginCreateOptions) error +} + +// ServiceAPIClient defines API client methods for the services +type ServiceAPIClient interface { + ServiceCreate(ctx context.Context, service swarm.ServiceSpec, options types.ServiceCreateOptions) (types.ServiceCreateResponse, error) + ServiceInspectWithRaw(ctx context.Context, serviceID string, options types.ServiceInspectOptions) (swarm.Service, []byte, error) + ServiceList(ctx context.Context, options types.ServiceListOptions) ([]swarm.Service, error) + ServiceRemove(ctx context.Context, serviceID string) error + ServiceUpdate(ctx context.Context, serviceID string, version swarm.Version, service swarm.ServiceSpec, options types.ServiceUpdateOptions) (types.ServiceUpdateResponse, error) + ServiceLogs(ctx context.Context, serviceID string, options types.ContainerLogsOptions) (io.ReadCloser, error) + TaskLogs(ctx context.Context, taskID string, options types.ContainerLogsOptions) (io.ReadCloser, error) + TaskInspectWithRaw(ctx context.Context, taskID string) (swarm.Task, []byte, error) + TaskList(ctx context.Context, options types.TaskListOptions) ([]swarm.Task, error) +} + +// SwarmAPIClient defines API client methods for the swarm +type SwarmAPIClient interface { + SwarmInit(ctx context.Context, req swarm.InitRequest) (string, error) + SwarmJoin(ctx context.Context, req swarm.JoinRequest) error + SwarmGetUnlockKey(ctx context.Context) (types.SwarmUnlockKeyResponse, error) + SwarmUnlock(ctx context.Context, req swarm.UnlockRequest) error + SwarmLeave(ctx context.Context, force bool) error + SwarmInspect(ctx context.Context) (swarm.Swarm, error) + SwarmUpdate(ctx context.Context, version swarm.Version, swarm swarm.Spec, flags swarm.UpdateFlags) error +} + +// SystemAPIClient defines API client methods for the system +type SystemAPIClient interface { + Events(ctx context.Context, options types.EventsOptions) (<-chan events.Message, <-chan error) + Info(ctx context.Context) (types.Info, error) + RegistryLogin(ctx context.Context, auth types.AuthConfig) (registry.AuthenticateOKBody, error) + DiskUsage(ctx context.Context) (types.DiskUsage, error) + Ping(ctx context.Context) (types.Ping, error) +} + +// VolumeAPIClient defines API client methods for the volumes +type VolumeAPIClient interface { + VolumeCreate(ctx context.Context, options volumetypes.VolumesCreateBody) (types.Volume, error) + VolumeInspect(ctx context.Context, volumeID string) (types.Volume, error) + VolumeInspectWithRaw(ctx context.Context, volumeID string) (types.Volume, []byte, error) + VolumeList(ctx context.Context, filter filters.Args) (volumetypes.VolumesListOKBody, error) + VolumeRemove(ctx context.Context, volumeID string, force bool) error + VolumesPrune(ctx context.Context, pruneFilter filters.Args) (types.VolumesPruneReport, error) +} + +// SecretAPIClient defines API client methods for secrets +type SecretAPIClient interface { + SecretList(ctx context.Context, options types.SecretListOptions) ([]swarm.Secret, error) + SecretCreate(ctx context.Context, secret swarm.SecretSpec) (types.SecretCreateResponse, error) + SecretRemove(ctx context.Context, id string) error + SecretInspectWithRaw(ctx context.Context, name string) (swarm.Secret, []byte, error) + SecretUpdate(ctx context.Context, id string, version swarm.Version, secret swarm.SecretSpec) error +} + +// ConfigAPIClient defines API client methods for configs +type ConfigAPIClient interface { + ConfigList(ctx context.Context, options types.ConfigListOptions) ([]swarm.Config, error) + ConfigCreate(ctx context.Context, config swarm.ConfigSpec) (types.ConfigCreateResponse, error) + ConfigRemove(ctx context.Context, id string) error + ConfigInspectWithRaw(ctx context.Context, name string) (swarm.Config, []byte, error) + ConfigUpdate(ctx context.Context, id string, version swarm.Version, config swarm.ConfigSpec) error +} diff --git a/vendor/github.com/moby/moby/client/interface_experimental.go b/vendor/github.com/moby/moby/client/interface_experimental.go new file mode 100644 index 000000000..51da98ecd --- /dev/null +++ b/vendor/github.com/moby/moby/client/interface_experimental.go @@ -0,0 +1,17 @@ +package client + +import ( + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +type apiClientExperimental interface { + CheckpointAPIClient +} + +// CheckpointAPIClient defines API client methods for the checkpoints +type CheckpointAPIClient interface { + CheckpointCreate(ctx context.Context, container string, options types.CheckpointCreateOptions) error + CheckpointDelete(ctx context.Context, container string, options types.CheckpointDeleteOptions) error + CheckpointList(ctx context.Context, container string, options types.CheckpointListOptions) ([]types.Checkpoint, error) +} diff --git a/vendor/github.com/moby/moby/client/interface_stable.go b/vendor/github.com/moby/moby/client/interface_stable.go new file mode 100644 index 000000000..cc90a3cbb --- /dev/null +++ b/vendor/github.com/moby/moby/client/interface_stable.go @@ -0,0 +1,10 @@ +package client + +// APIClient is an interface that clients that talk with a docker server must implement. +type APIClient interface { + CommonAPIClient + apiClientExperimental +} + +// Ensure that Client always implements APIClient. +var _ APIClient = &Client{} diff --git a/vendor/github.com/moby/moby/client/login.go b/vendor/github.com/moby/moby/client/login.go new file mode 100644 index 000000000..79219ff59 --- /dev/null +++ b/vendor/github.com/moby/moby/client/login.go @@ -0,0 +1,29 @@ +package client + +import ( + "encoding/json" + "net/http" + "net/url" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/registry" + "golang.org/x/net/context" +) + +// RegistryLogin authenticates the docker server with a given docker registry. +// It returns unauthorizedError when the authentication fails. +func (cli *Client) RegistryLogin(ctx context.Context, auth types.AuthConfig) (registry.AuthenticateOKBody, error) { + resp, err := cli.post(ctx, "/auth", url.Values{}, auth, nil) + + if resp.statusCode == http.StatusUnauthorized { + return registry.AuthenticateOKBody{}, unauthorizedError{err} + } + if err != nil { + return registry.AuthenticateOKBody{}, err + } + + var response registry.AuthenticateOKBody + err = json.NewDecoder(resp.body).Decode(&response) + ensureReaderClosed(resp) + return response, err +} diff --git a/vendor/github.com/moby/moby/client/network_connect.go b/vendor/github.com/moby/moby/client/network_connect.go new file mode 100644 index 000000000..c022c17b5 --- /dev/null +++ b/vendor/github.com/moby/moby/client/network_connect.go @@ -0,0 +1,18 @@ +package client + +import ( + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/network" + "golang.org/x/net/context" +) + +// NetworkConnect connects a container to an existent network in the docker host. +func (cli *Client) NetworkConnect(ctx context.Context, networkID, containerID string, config *network.EndpointSettings) error { + nc := types.NetworkConnect{ + Container: containerID, + EndpointConfig: config, + } + resp, err := cli.post(ctx, "/networks/"+networkID+"/connect", nil, nc, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/moby/moby/client/network_connect_test.go b/vendor/github.com/moby/moby/client/network_connect_test.go new file mode 100644 index 000000000..91b1a7667 --- /dev/null +++ b/vendor/github.com/moby/moby/client/network_connect_test.go @@ -0,0 +1,111 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/network" +) + +func TestNetworkConnectError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + err := client.NetworkConnect(context.Background(), "network_id", "container_id", nil) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestNetworkConnectEmptyNilEndpointSettings(t *testing.T) { + expectedURL := "/networks/network_id/connect" + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + + if req.Method != "POST" { + return nil, fmt.Errorf("expected POST method, got %s", req.Method) + } + + var connect types.NetworkConnect + if err := json.NewDecoder(req.Body).Decode(&connect); err != nil { + return nil, err + } + + if connect.Container != "container_id" { + return nil, fmt.Errorf("expected 'container_id', got %s", connect.Container) + } + + if connect.EndpointConfig != nil { + return nil, fmt.Errorf("expected connect.EndpointConfig to be nil, got %v", connect.EndpointConfig) + } + + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + + err := client.NetworkConnect(context.Background(), "network_id", "container_id", nil) + if err != nil { + t.Fatal(err) + } +} + +func TestNetworkConnect(t *testing.T) { + expectedURL := "/networks/network_id/connect" + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + + if req.Method != "POST" { + return nil, fmt.Errorf("expected POST method, got %s", req.Method) + } + + var connect types.NetworkConnect + if err := json.NewDecoder(req.Body).Decode(&connect); err != nil { + return nil, err + } + + if connect.Container != "container_id" { + return nil, fmt.Errorf("expected 'container_id', got %s", connect.Container) + } + + if connect.EndpointConfig == nil { + return nil, fmt.Errorf("expected connect.EndpointConfig to be not nil, got %v", connect.EndpointConfig) + } + + if connect.EndpointConfig.NetworkID != "NetworkID" { + return nil, fmt.Errorf("expected 'NetworkID', got %s", connect.EndpointConfig.NetworkID) + } + + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + + err := client.NetworkConnect(context.Background(), "network_id", "container_id", &network.EndpointSettings{ + NetworkID: "NetworkID", + }) + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/moby/moby/client/network_create.go b/vendor/github.com/moby/moby/client/network_create.go new file mode 100644 index 000000000..4067a541f --- /dev/null +++ b/vendor/github.com/moby/moby/client/network_create.go @@ -0,0 +1,25 @@ +package client + +import ( + "encoding/json" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// NetworkCreate creates a new network in the docker host. +func (cli *Client) NetworkCreate(ctx context.Context, name string, options types.NetworkCreate) (types.NetworkCreateResponse, error) { + networkCreateRequest := types.NetworkCreateRequest{ + NetworkCreate: options, + Name: name, + } + var response types.NetworkCreateResponse + serverResp, err := cli.post(ctx, "/networks/create", nil, networkCreateRequest, nil) + if err != nil { + return response, err + } + + json.NewDecoder(serverResp.body).Decode(&response) + ensureReaderClosed(serverResp) + return response, err +} diff --git a/vendor/github.com/moby/moby/client/network_create_test.go b/vendor/github.com/moby/moby/client/network_create_test.go new file mode 100644 index 000000000..0e2457f89 --- /dev/null +++ b/vendor/github.com/moby/moby/client/network_create_test.go @@ -0,0 +1,72 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +func TestNetworkCreateError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, err := client.NetworkCreate(context.Background(), "mynetwork", types.NetworkCreate{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestNetworkCreate(t *testing.T) { + expectedURL := "/networks/create" + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + + if req.Method != "POST" { + return nil, fmt.Errorf("expected POST method, got %s", req.Method) + } + + content, err := json.Marshal(types.NetworkCreateResponse{ + ID: "network_id", + Warning: "warning", + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + + networkResponse, err := client.NetworkCreate(context.Background(), "mynetwork", types.NetworkCreate{ + CheckDuplicate: true, + Driver: "mydriver", + EnableIPv6: true, + Internal: true, + Options: map[string]string{ + "opt-key": "opt-value", + }, + }) + if err != nil { + t.Fatal(err) + } + if networkResponse.ID != "network_id" { + t.Fatalf("expected networkResponse.ID to be 'network_id', got %s", networkResponse.ID) + } + if networkResponse.Warning != "warning" { + t.Fatalf("expected networkResponse.Warning to be 'warning', got %s", networkResponse.Warning) + } +} diff --git a/vendor/github.com/moby/moby/client/network_disconnect.go b/vendor/github.com/moby/moby/client/network_disconnect.go new file mode 100644 index 000000000..24b58e3c1 --- /dev/null +++ b/vendor/github.com/moby/moby/client/network_disconnect.go @@ -0,0 +1,14 @@ +package client + +import ( + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// NetworkDisconnect disconnects a container from an existent network in the docker host. +func (cli *Client) NetworkDisconnect(ctx context.Context, networkID, containerID string, force bool) error { + nd := types.NetworkDisconnect{Container: containerID, Force: force} + resp, err := cli.post(ctx, "/networks/"+networkID+"/disconnect", nil, nd, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/moby/moby/client/network_disconnect_test.go b/vendor/github.com/moby/moby/client/network_disconnect_test.go new file mode 100644 index 000000000..b54a2b1cc --- /dev/null +++ b/vendor/github.com/moby/moby/client/network_disconnect_test.go @@ -0,0 +1,64 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +func TestNetworkDisconnectError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + err := client.NetworkDisconnect(context.Background(), "network_id", "container_id", false) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestNetworkDisconnect(t *testing.T) { + expectedURL := "/networks/network_id/disconnect" + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + + if req.Method != "POST" { + return nil, fmt.Errorf("expected POST method, got %s", req.Method) + } + + var disconnect types.NetworkDisconnect + if err := json.NewDecoder(req.Body).Decode(&disconnect); err != nil { + return nil, err + } + + if disconnect.Container != "container_id" { + return nil, fmt.Errorf("expected 'container_id', got %s", disconnect.Container) + } + + if !disconnect.Force { + return nil, fmt.Errorf("expected Force to be true, got %v", disconnect.Force) + } + + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + + err := client.NetworkDisconnect(context.Background(), "network_id", "container_id", true) + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/moby/moby/client/network_inspect.go b/vendor/github.com/moby/moby/client/network_inspect.go new file mode 100644 index 000000000..848c9799f --- /dev/null +++ b/vendor/github.com/moby/moby/client/network_inspect.go @@ -0,0 +1,50 @@ +package client + +import ( + "bytes" + "encoding/json" + "io/ioutil" + "net/http" + "net/url" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// NetworkInspect returns the information for a specific network configured in the docker host. +func (cli *Client) NetworkInspect(ctx context.Context, networkID string, options types.NetworkInspectOptions) (types.NetworkResource, error) { + networkResource, _, err := cli.NetworkInspectWithRaw(ctx, networkID, options) + return networkResource, err +} + +// NetworkInspectWithRaw returns the information for a specific network configured in the docker host and its raw representation. +func (cli *Client) NetworkInspectWithRaw(ctx context.Context, networkID string, options types.NetworkInspectOptions) (types.NetworkResource, []byte, error) { + var ( + networkResource types.NetworkResource + resp serverResponse + err error + ) + query := url.Values{} + if options.Verbose { + query.Set("verbose", "true") + } + if options.Scope != "" { + query.Set("scope", options.Scope) + } + resp, err = cli.get(ctx, "/networks/"+networkID, query, nil) + if err != nil { + if resp.statusCode == http.StatusNotFound { + return networkResource, nil, networkNotFoundError{networkID} + } + return networkResource, nil, err + } + defer ensureReaderClosed(resp) + + body, err := ioutil.ReadAll(resp.body) + if err != nil { + return networkResource, nil, err + } + rdr := bytes.NewReader(body) + err = json.NewDecoder(rdr).Decode(&networkResource) + return networkResource, body, err +} diff --git a/vendor/github.com/moby/moby/client/network_inspect_test.go b/vendor/github.com/moby/moby/client/network_inspect_test.go new file mode 100644 index 000000000..9bfb55d74 --- /dev/null +++ b/vendor/github.com/moby/moby/client/network_inspect_test.go @@ -0,0 +1,107 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/network" + "github.com/stretchr/testify/assert" + "golang.org/x/net/context" +) + +func TestNetworkInspectError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, err := client.NetworkInspect(context.Background(), "nothing", types.NetworkInspectOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestNetworkInspectContainerNotFound(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusNotFound, "Server error")), + } + + _, err := client.NetworkInspect(context.Background(), "unknown", types.NetworkInspectOptions{}) + if err == nil || !IsErrNetworkNotFound(err) { + t.Fatalf("expected a networkNotFound error, got %v", err) + } +} + +func TestNetworkInspect(t *testing.T) { + expectedURL := "/networks/network_id" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "GET" { + return nil, fmt.Errorf("expected GET method, got %s", req.Method) + } + + var ( + content []byte + err error + ) + if strings.Contains(req.URL.RawQuery, "scope=global") { + return &http.Response{ + StatusCode: http.StatusNotFound, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + } + + if strings.Contains(req.URL.RawQuery, "verbose=true") { + s := map[string]network.ServiceInfo{ + "web": {}, + } + content, err = json.Marshal(types.NetworkResource{ + Name: "mynetwork", + Services: s, + }) + } else { + content, err = json.Marshal(types.NetworkResource{ + Name: "mynetwork", + }) + } + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + + r, err := client.NetworkInspect(context.Background(), "network_id", types.NetworkInspectOptions{}) + if err != nil { + t.Fatal(err) + } + if r.Name != "mynetwork" { + t.Fatalf("expected `mynetwork`, got %s", r.Name) + } + + r, err = client.NetworkInspect(context.Background(), "network_id", types.NetworkInspectOptions{Verbose: true}) + if err != nil { + t.Fatal(err) + } + if r.Name != "mynetwork" { + t.Fatalf("expected `mynetwork`, got %s", r.Name) + } + _, ok := r.Services["web"] + if !ok { + t.Fatalf("expected service `web` missing in the verbose output") + } + + _, err = client.NetworkInspect(context.Background(), "network_id", types.NetworkInspectOptions{Scope: "global"}) + assert.EqualError(t, err, "Error: No such network: network_id") +} diff --git a/vendor/github.com/moby/moby/client/network_list.go b/vendor/github.com/moby/moby/client/network_list.go new file mode 100644 index 000000000..e566a93e2 --- /dev/null +++ b/vendor/github.com/moby/moby/client/network_list.go @@ -0,0 +1,31 @@ +package client + +import ( + "encoding/json" + "net/url" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "golang.org/x/net/context" +) + +// NetworkList returns the list of networks configured in the docker host. +func (cli *Client) NetworkList(ctx context.Context, options types.NetworkListOptions) ([]types.NetworkResource, error) { + query := url.Values{} + if options.Filters.Len() > 0 { + filterJSON, err := filters.ToParamWithVersion(cli.version, options.Filters) + if err != nil { + return nil, err + } + + query.Set("filters", filterJSON) + } + var networkResources []types.NetworkResource + resp, err := cli.get(ctx, "/networks", query, nil) + if err != nil { + return networkResources, err + } + err = json.NewDecoder(resp.body).Decode(&networkResources) + ensureReaderClosed(resp) + return networkResources, err +} diff --git a/vendor/github.com/moby/moby/client/network_list_test.go b/vendor/github.com/moby/moby/client/network_list_test.go new file mode 100644 index 000000000..4d443496a --- /dev/null +++ b/vendor/github.com/moby/moby/client/network_list_test.go @@ -0,0 +1,108 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "golang.org/x/net/context" +) + +func TestNetworkListError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, err := client.NetworkList(context.Background(), types.NetworkListOptions{ + Filters: filters.NewArgs(), + }) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestNetworkList(t *testing.T) { + expectedURL := "/networks" + + noDanglingFilters := filters.NewArgs() + noDanglingFilters.Add("dangling", "false") + + danglingFilters := filters.NewArgs() + danglingFilters.Add("dangling", "true") + + labelFilters := filters.NewArgs() + labelFilters.Add("label", "label1") + labelFilters.Add("label", "label2") + + listCases := []struct { + options types.NetworkListOptions + expectedFilters string + }{ + { + options: types.NetworkListOptions{ + Filters: filters.NewArgs(), + }, + expectedFilters: "", + }, { + options: types.NetworkListOptions{ + Filters: noDanglingFilters, + }, + expectedFilters: `{"dangling":{"false":true}}`, + }, { + options: types.NetworkListOptions{ + Filters: danglingFilters, + }, + expectedFilters: `{"dangling":{"true":true}}`, + }, { + options: types.NetworkListOptions{ + Filters: labelFilters, + }, + expectedFilters: `{"label":{"label1":true,"label2":true}}`, + }, + } + + for _, listCase := range listCases { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "GET" { + return nil, fmt.Errorf("expected GET method, got %s", req.Method) + } + query := req.URL.Query() + actualFilters := query.Get("filters") + if actualFilters != listCase.expectedFilters { + return nil, fmt.Errorf("filters not set in URL query properly. Expected '%s', got %s", listCase.expectedFilters, actualFilters) + } + content, err := json.Marshal([]types.NetworkResource{ + { + Name: "network", + Driver: "bridge", + }, + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + + networkResources, err := client.NetworkList(context.Background(), listCase.options) + if err != nil { + t.Fatal(err) + } + if len(networkResources) != 1 { + t.Fatalf("expected 1 network resource, got %v", networkResources) + } + } +} diff --git a/vendor/github.com/moby/moby/client/network_prune.go b/vendor/github.com/moby/moby/client/network_prune.go new file mode 100644 index 000000000..7352a7f0c --- /dev/null +++ b/vendor/github.com/moby/moby/client/network_prune.go @@ -0,0 +1,36 @@ +package client + +import ( + "encoding/json" + "fmt" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "golang.org/x/net/context" +) + +// NetworksPrune requests the daemon to delete unused networks +func (cli *Client) NetworksPrune(ctx context.Context, pruneFilters filters.Args) (types.NetworksPruneReport, error) { + var report types.NetworksPruneReport + + if err := cli.NewVersionError("1.25", "network prune"); err != nil { + return report, err + } + + query, err := getFiltersQuery(pruneFilters) + if err != nil { + return report, err + } + + serverResp, err := cli.post(ctx, "/networks/prune", query, nil, nil) + if err != nil { + return report, err + } + defer ensureReaderClosed(serverResp) + + if err := json.NewDecoder(serverResp.body).Decode(&report); err != nil { + return report, fmt.Errorf("Error retrieving network prune report: %v", err) + } + + return report, nil +} diff --git a/vendor/github.com/moby/moby/client/network_prune_test.go b/vendor/github.com/moby/moby/client/network_prune_test.go new file mode 100644 index 000000000..3e4f5d041 --- /dev/null +++ b/vendor/github.com/moby/moby/client/network_prune_test.go @@ -0,0 +1,112 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/stretchr/testify/assert" + "golang.org/x/net/context" +) + +func TestNetworksPruneError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + version: "1.25", + } + + filters := filters.NewArgs() + + _, err := client.NetworksPrune(context.Background(), filters) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestNetworksPrune(t *testing.T) { + expectedURL := "/v1.25/networks/prune" + + danglingFilters := filters.NewArgs() + danglingFilters.Add("dangling", "true") + + noDanglingFilters := filters.NewArgs() + noDanglingFilters.Add("dangling", "false") + + labelFilters := filters.NewArgs() + labelFilters.Add("dangling", "true") + labelFilters.Add("label", "label1=foo") + labelFilters.Add("label", "label2!=bar") + + listCases := []struct { + filters filters.Args + expectedQueryParams map[string]string + }{ + { + filters: filters.Args{}, + expectedQueryParams: map[string]string{ + "until": "", + "filter": "", + "filters": "", + }, + }, + { + filters: danglingFilters, + expectedQueryParams: map[string]string{ + "until": "", + "filter": "", + "filters": `{"dangling":{"true":true}}`, + }, + }, + { + filters: noDanglingFilters, + expectedQueryParams: map[string]string{ + "until": "", + "filter": "", + "filters": `{"dangling":{"false":true}}`, + }, + }, + { + filters: labelFilters, + expectedQueryParams: map[string]string{ + "until": "", + "filter": "", + "filters": `{"dangling":{"true":true},"label":{"label1=foo":true,"label2!=bar":true}}`, + }, + }, + } + for _, listCase := range listCases { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + query := req.URL.Query() + for key, expected := range listCase.expectedQueryParams { + actual := query.Get(key) + assert.Equal(t, expected, actual) + } + content, err := json.Marshal(types.NetworksPruneReport{ + NetworksDeleted: []string{"network_id1", "network_id2"}, + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + version: "1.25", + } + + report, err := client.NetworksPrune(context.Background(), listCase.filters) + assert.NoError(t, err) + assert.Len(t, report.NetworksDeleted, 2) + } +} diff --git a/vendor/github.com/moby/moby/client/network_remove.go b/vendor/github.com/moby/moby/client/network_remove.go new file mode 100644 index 000000000..6bd674892 --- /dev/null +++ b/vendor/github.com/moby/moby/client/network_remove.go @@ -0,0 +1,10 @@ +package client + +import "golang.org/x/net/context" + +// NetworkRemove removes an existent network from the docker host. +func (cli *Client) NetworkRemove(ctx context.Context, networkID string) error { + resp, err := cli.delete(ctx, "/networks/"+networkID, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/moby/moby/client/network_remove_test.go b/vendor/github.com/moby/moby/client/network_remove_test.go new file mode 100644 index 000000000..2a7b9640c --- /dev/null +++ b/vendor/github.com/moby/moby/client/network_remove_test.go @@ -0,0 +1,47 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" +) + +func TestNetworkRemoveError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + err := client.NetworkRemove(context.Background(), "network_id") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestNetworkRemove(t *testing.T) { + expectedURL := "/networks/network_id" + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "DELETE" { + return nil, fmt.Errorf("expected DELETE method, got %s", req.Method) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte("body"))), + }, nil + }), + } + + err := client.NetworkRemove(context.Background(), "network_id") + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/moby/moby/client/node_inspect.go b/vendor/github.com/moby/moby/client/node_inspect.go new file mode 100644 index 000000000..abf505d29 --- /dev/null +++ b/vendor/github.com/moby/moby/client/node_inspect.go @@ -0,0 +1,33 @@ +package client + +import ( + "bytes" + "encoding/json" + "io/ioutil" + "net/http" + + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +// NodeInspectWithRaw returns the node information. +func (cli *Client) NodeInspectWithRaw(ctx context.Context, nodeID string) (swarm.Node, []byte, error) { + serverResp, err := cli.get(ctx, "/nodes/"+nodeID, nil, nil) + if err != nil { + if serverResp.statusCode == http.StatusNotFound { + return swarm.Node{}, nil, nodeNotFoundError{nodeID} + } + return swarm.Node{}, nil, err + } + defer ensureReaderClosed(serverResp) + + body, err := ioutil.ReadAll(serverResp.body) + if err != nil { + return swarm.Node{}, nil, err + } + + var response swarm.Node + rdr := bytes.NewReader(body) + err = json.NewDecoder(rdr).Decode(&response) + return response, body, err +} diff --git a/vendor/github.com/moby/moby/client/node_inspect_test.go b/vendor/github.com/moby/moby/client/node_inspect_test.go new file mode 100644 index 000000000..dca16a8cd --- /dev/null +++ b/vendor/github.com/moby/moby/client/node_inspect_test.go @@ -0,0 +1,65 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +func TestNodeInspectError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, _, err := client.NodeInspectWithRaw(context.Background(), "nothing") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestNodeInspectNodeNotFound(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusNotFound, "Server error")), + } + + _, _, err := client.NodeInspectWithRaw(context.Background(), "unknown") + if err == nil || !IsErrNodeNotFound(err) { + t.Fatalf("expected a nodeNotFoundError error, got %v", err) + } +} + +func TestNodeInspect(t *testing.T) { + expectedURL := "/nodes/node_id" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + content, err := json.Marshal(swarm.Node{ + ID: "node_id", + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + + nodeInspect, _, err := client.NodeInspectWithRaw(context.Background(), "node_id") + if err != nil { + t.Fatal(err) + } + if nodeInspect.ID != "node_id" { + t.Fatalf("expected `node_id`, got %s", nodeInspect.ID) + } +} diff --git a/vendor/github.com/moby/moby/client/node_list.go b/vendor/github.com/moby/moby/client/node_list.go new file mode 100644 index 000000000..3e8440f08 --- /dev/null +++ b/vendor/github.com/moby/moby/client/node_list.go @@ -0,0 +1,36 @@ +package client + +import ( + "encoding/json" + "net/url" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +// NodeList returns the list of nodes. +func (cli *Client) NodeList(ctx context.Context, options types.NodeListOptions) ([]swarm.Node, error) { + query := url.Values{} + + if options.Filters.Len() > 0 { + filterJSON, err := filters.ToParam(options.Filters) + + if err != nil { + return nil, err + } + + query.Set("filters", filterJSON) + } + + resp, err := cli.get(ctx, "/nodes", query, nil) + if err != nil { + return nil, err + } + + var nodes []swarm.Node + err = json.NewDecoder(resp.body).Decode(&nodes) + ensureReaderClosed(resp) + return nodes, err +} diff --git a/vendor/github.com/moby/moby/client/node_list_test.go b/vendor/github.com/moby/moby/client/node_list_test.go new file mode 100644 index 000000000..0251b5cce --- /dev/null +++ b/vendor/github.com/moby/moby/client/node_list_test.go @@ -0,0 +1,94 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +func TestNodeListError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, err := client.NodeList(context.Background(), types.NodeListOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestNodeList(t *testing.T) { + expectedURL := "/nodes" + + filters := filters.NewArgs() + filters.Add("label", "label1") + filters.Add("label", "label2") + + listCases := []struct { + options types.NodeListOptions + expectedQueryParams map[string]string + }{ + { + options: types.NodeListOptions{}, + expectedQueryParams: map[string]string{ + "filters": "", + }, + }, + { + options: types.NodeListOptions{ + Filters: filters, + }, + expectedQueryParams: map[string]string{ + "filters": `{"label":{"label1":true,"label2":true}}`, + }, + }, + } + for _, listCase := range listCases { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + query := req.URL.Query() + for key, expected := range listCase.expectedQueryParams { + actual := query.Get(key) + if actual != expected { + return nil, fmt.Errorf("%s not set in URL query properly. Expected '%s', got %s", key, expected, actual) + } + } + content, err := json.Marshal([]swarm.Node{ + { + ID: "node_id1", + }, + { + ID: "node_id2", + }, + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + + nodes, err := client.NodeList(context.Background(), listCase.options) + if err != nil { + t.Fatal(err) + } + if len(nodes) != 2 { + t.Fatalf("expected 2 nodes, got %v", nodes) + } + } +} diff --git a/vendor/github.com/moby/moby/client/node_remove.go b/vendor/github.com/moby/moby/client/node_remove.go new file mode 100644 index 000000000..0a77f3d57 --- /dev/null +++ b/vendor/github.com/moby/moby/client/node_remove.go @@ -0,0 +1,21 @@ +package client + +import ( + "net/url" + + "github.com/docker/docker/api/types" + + "golang.org/x/net/context" +) + +// NodeRemove removes a Node. +func (cli *Client) NodeRemove(ctx context.Context, nodeID string, options types.NodeRemoveOptions) error { + query := url.Values{} + if options.Force { + query.Set("force", "1") + } + + resp, err := cli.delete(ctx, "/nodes/"+nodeID, query, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/moby/moby/client/node_remove_test.go b/vendor/github.com/moby/moby/client/node_remove_test.go new file mode 100644 index 000000000..f2f8adc4a --- /dev/null +++ b/vendor/github.com/moby/moby/client/node_remove_test.go @@ -0,0 +1,69 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + + "golang.org/x/net/context" +) + +func TestNodeRemoveError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + err := client.NodeRemove(context.Background(), "node_id", types.NodeRemoveOptions{Force: false}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestNodeRemove(t *testing.T) { + expectedURL := "/nodes/node_id" + + removeCases := []struct { + force bool + expectedForce string + }{ + { + expectedForce: "", + }, + { + force: true, + expectedForce: "1", + }, + } + + for _, removeCase := range removeCases { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "DELETE" { + return nil, fmt.Errorf("expected DELETE method, got %s", req.Method) + } + force := req.URL.Query().Get("force") + if force != removeCase.expectedForce { + return nil, fmt.Errorf("force not set in URL query properly. expected '%s', got %s", removeCase.expectedForce, force) + } + + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte("body"))), + }, nil + }), + } + + err := client.NodeRemove(context.Background(), "node_id", types.NodeRemoveOptions{Force: removeCase.force}) + if err != nil { + t.Fatal(err) + } + } +} diff --git a/vendor/github.com/moby/moby/client/node_update.go b/vendor/github.com/moby/moby/client/node_update.go new file mode 100644 index 000000000..3ca976028 --- /dev/null +++ b/vendor/github.com/moby/moby/client/node_update.go @@ -0,0 +1,18 @@ +package client + +import ( + "net/url" + "strconv" + + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +// NodeUpdate updates a Node. +func (cli *Client) NodeUpdate(ctx context.Context, nodeID string, version swarm.Version, node swarm.NodeSpec) error { + query := url.Values{} + query.Set("version", strconv.FormatUint(version.Index, 10)) + resp, err := cli.post(ctx, "/nodes/"+nodeID+"/update", query, node, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/moby/moby/client/node_update_test.go b/vendor/github.com/moby/moby/client/node_update_test.go new file mode 100644 index 000000000..613ff104e --- /dev/null +++ b/vendor/github.com/moby/moby/client/node_update_test.go @@ -0,0 +1,49 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types/swarm" +) + +func TestNodeUpdateError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + err := client.NodeUpdate(context.Background(), "node_id", swarm.Version{}, swarm.NodeSpec{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestNodeUpdate(t *testing.T) { + expectedURL := "/nodes/node_id/update" + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "POST" { + return nil, fmt.Errorf("expected POST method, got %s", req.Method) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte("body"))), + }, nil + }), + } + + err := client.NodeUpdate(context.Background(), "node_id", swarm.Version{}, swarm.NodeSpec{}) + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/moby/moby/client/parse_logs.go b/vendor/github.com/moby/moby/client/parse_logs.go new file mode 100644 index 000000000..e427f80a7 --- /dev/null +++ b/vendor/github.com/moby/moby/client/parse_logs.go @@ -0,0 +1,41 @@ +package client + +// parse_logs.go contains utility helpers for getting information out of docker +// log lines. really, it only contains ParseDetails right now. maybe in the +// future there will be some desire to parse log messages back into a struct? +// that would go here if we did + +import ( + "net/url" + "strings" + + "github.com/pkg/errors" +) + +// ParseLogDetails takes a details string of key value pairs in the form +// "k=v,l=w", where the keys and values are url query escaped, and each pair +// is separated by a comma, returns a map. returns an error if the details +// string is not in a valid format +// the exact form of details encoding is implemented in +// api/server/httputils/write_log_stream.go +func ParseLogDetails(details string) (map[string]string, error) { + pairs := strings.Split(details, ",") + detailsMap := make(map[string]string, len(pairs)) + for _, pair := range pairs { + p := strings.SplitN(pair, "=", 2) + // if there is no equals sign, we will only get 1 part back + if len(p) != 2 { + return nil, errors.New("invalid details format") + } + k, err := url.QueryUnescape(p[0]) + if err != nil { + return nil, err + } + v, err := url.QueryUnescape(p[1]) + if err != nil { + return nil, err + } + detailsMap[k] = v + } + return detailsMap, nil +} diff --git a/vendor/github.com/moby/moby/client/parse_logs_test.go b/vendor/github.com/moby/moby/client/parse_logs_test.go new file mode 100644 index 000000000..ac7f61679 --- /dev/null +++ b/vendor/github.com/moby/moby/client/parse_logs_test.go @@ -0,0 +1,36 @@ +package client + +import ( + "reflect" + "testing" + + "github.com/pkg/errors" +) + +func TestParseLogDetails(t *testing.T) { + testCases := []struct { + line string + expected map[string]string + err error + }{ + {"key=value", map[string]string{"key": "value"}, nil}, + {"key1=value1,key2=value2", map[string]string{"key1": "value1", "key2": "value2"}, nil}, + {"key+with+spaces=value%3Dequals,asdf%2C=", map[string]string{"key with spaces": "value=equals", "asdf,": ""}, nil}, + {"key=,=nothing", map[string]string{"key": "", "": "nothing"}, nil}, + {"=", map[string]string{"": ""}, nil}, + {"errors", nil, errors.New("invalid details format")}, + } + for _, tc := range testCases { + tc := tc // capture range variable + t.Run(tc.line, func(t *testing.T) { + t.Parallel() + res, err := ParseLogDetails(tc.line) + if err != nil && (err.Error() != tc.err.Error()) { + t.Fatalf("unexpected error parsing logs:\nExpected:\n\t%v\nActual:\n\t%v", tc.err, err) + } + if !reflect.DeepEqual(tc.expected, res) { + t.Errorf("result does not match expected:\nExpected:\n\t%#v\nActual:\n\t%#v", tc.expected, res) + } + }) + } +} diff --git a/vendor/github.com/moby/moby/client/ping.go b/vendor/github.com/moby/moby/client/ping.go new file mode 100644 index 000000000..a4c2e2c4d --- /dev/null +++ b/vendor/github.com/moby/moby/client/ping.go @@ -0,0 +1,32 @@ +package client + +import ( + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// Ping pings the server and returns the value of the "Docker-Experimental", "OS-Type" & "API-Version" headers +func (cli *Client) Ping(ctx context.Context) (types.Ping, error) { + var ping types.Ping + req, err := cli.buildRequest("GET", cli.basePath+"/_ping", nil, nil) + if err != nil { + return ping, err + } + serverResp, err := cli.doRequest(ctx, req) + if err != nil { + return ping, err + } + defer ensureReaderClosed(serverResp) + + if serverResp.header != nil { + ping.APIVersion = serverResp.header.Get("API-Version") + + if serverResp.header.Get("Docker-Experimental") == "true" { + ping.Experimental = true + } + ping.OSType = serverResp.header.Get("OSType") + } + + err = cli.checkResponseErr(serverResp) + return ping, err +} diff --git a/vendor/github.com/moby/moby/client/ping_test.go b/vendor/github.com/moby/moby/client/ping_test.go new file mode 100644 index 000000000..7a4a1a902 --- /dev/null +++ b/vendor/github.com/moby/moby/client/ping_test.go @@ -0,0 +1,82 @@ +package client + +import ( + "errors" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/stretchr/testify/assert" + "golang.org/x/net/context" +) + +// TestPingFail tests that when a server sends a non-successful response that we +// can still grab API details, when set. +// Some of this is just excercising the code paths to make sure there are no +// panics. +func TestPingFail(t *testing.T) { + var withHeader bool + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + resp := &http.Response{StatusCode: http.StatusInternalServerError} + if withHeader { + resp.Header = http.Header{} + resp.Header.Set("API-Version", "awesome") + resp.Header.Set("Docker-Experimental", "true") + } + resp.Body = ioutil.NopCloser(strings.NewReader("some error with the server")) + return resp, nil + }), + } + + ping, err := client.Ping(context.Background()) + assert.Error(t, err) + assert.Equal(t, false, ping.Experimental) + assert.Equal(t, "", ping.APIVersion) + + withHeader = true + ping2, err := client.Ping(context.Background()) + assert.Error(t, err) + assert.Equal(t, true, ping2.Experimental) + assert.Equal(t, "awesome", ping2.APIVersion) +} + +// TestPingWithError tests the case where there is a protocol error in the ping. +// This test is mostly just testing that there are no panics in this code path. +func TestPingWithError(t *testing.T) { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + resp := &http.Response{StatusCode: http.StatusInternalServerError} + resp.Header = http.Header{} + resp.Header.Set("API-Version", "awesome") + resp.Header.Set("Docker-Experimental", "true") + resp.Body = ioutil.NopCloser(strings.NewReader("some error with the server")) + return resp, errors.New("some error") + }), + } + + ping, err := client.Ping(context.Background()) + assert.Error(t, err) + assert.Equal(t, false, ping.Experimental) + assert.Equal(t, "", ping.APIVersion) +} + +// TestPingSuccess tests that we are able to get the expected API headers/ping +// details on success. +func TestPingSuccess(t *testing.T) { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + resp := &http.Response{StatusCode: http.StatusInternalServerError} + resp.Header = http.Header{} + resp.Header.Set("API-Version", "awesome") + resp.Header.Set("Docker-Experimental", "true") + resp.Body = ioutil.NopCloser(strings.NewReader("some error with the server")) + return resp, nil + }), + } + ping, err := client.Ping(context.Background()) + assert.Error(t, err) + assert.Equal(t, true, ping.Experimental) + assert.Equal(t, "awesome", ping.APIVersion) +} diff --git a/vendor/github.com/moby/moby/client/plugin_create.go b/vendor/github.com/moby/moby/client/plugin_create.go new file mode 100644 index 000000000..27954aa57 --- /dev/null +++ b/vendor/github.com/moby/moby/client/plugin_create.go @@ -0,0 +1,26 @@ +package client + +import ( + "io" + "net/http" + "net/url" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// PluginCreate creates a plugin +func (cli *Client) PluginCreate(ctx context.Context, createContext io.Reader, createOptions types.PluginCreateOptions) error { + headers := http.Header(make(map[string][]string)) + headers.Set("Content-Type", "application/x-tar") + + query := url.Values{} + query.Set("name", createOptions.RepoName) + + resp, err := cli.postRaw(ctx, "/plugins/create", query, createContext, headers) + if err != nil { + return err + } + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/moby/moby/client/plugin_disable.go b/vendor/github.com/moby/moby/client/plugin_disable.go new file mode 100644 index 000000000..30467db74 --- /dev/null +++ b/vendor/github.com/moby/moby/client/plugin_disable.go @@ -0,0 +1,19 @@ +package client + +import ( + "net/url" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// PluginDisable disables a plugin +func (cli *Client) PluginDisable(ctx context.Context, name string, options types.PluginDisableOptions) error { + query := url.Values{} + if options.Force { + query.Set("force", "1") + } + resp, err := cli.post(ctx, "/plugins/"+name+"/disable", query, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/moby/moby/client/plugin_disable_test.go b/vendor/github.com/moby/moby/client/plugin_disable_test.go new file mode 100644 index 000000000..a4de45be2 --- /dev/null +++ b/vendor/github.com/moby/moby/client/plugin_disable_test.go @@ -0,0 +1,48 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +func TestPluginDisableError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + err := client.PluginDisable(context.Background(), "plugin_name", types.PluginDisableOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestPluginDisable(t *testing.T) { + expectedURL := "/plugins/plugin_name/disable" + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "POST" { + return nil, fmt.Errorf("expected POST method, got %s", req.Method) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + + err := client.PluginDisable(context.Background(), "plugin_name", types.PluginDisableOptions{}) + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/moby/moby/client/plugin_enable.go b/vendor/github.com/moby/moby/client/plugin_enable.go new file mode 100644 index 000000000..95517c4b8 --- /dev/null +++ b/vendor/github.com/moby/moby/client/plugin_enable.go @@ -0,0 +1,19 @@ +package client + +import ( + "net/url" + "strconv" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// PluginEnable enables a plugin +func (cli *Client) PluginEnable(ctx context.Context, name string, options types.PluginEnableOptions) error { + query := url.Values{} + query.Set("timeout", strconv.Itoa(options.Timeout)) + + resp, err := cli.post(ctx, "/plugins/"+name+"/enable", query, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/moby/moby/client/plugin_enable_test.go b/vendor/github.com/moby/moby/client/plugin_enable_test.go new file mode 100644 index 000000000..b27681348 --- /dev/null +++ b/vendor/github.com/moby/moby/client/plugin_enable_test.go @@ -0,0 +1,48 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +func TestPluginEnableError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + err := client.PluginEnable(context.Background(), "plugin_name", types.PluginEnableOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestPluginEnable(t *testing.T) { + expectedURL := "/plugins/plugin_name/enable" + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "POST" { + return nil, fmt.Errorf("expected POST method, got %s", req.Method) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + + err := client.PluginEnable(context.Background(), "plugin_name", types.PluginEnableOptions{}) + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/moby/moby/client/plugin_inspect.go b/vendor/github.com/moby/moby/client/plugin_inspect.go new file mode 100644 index 000000000..89f39ee2c --- /dev/null +++ b/vendor/github.com/moby/moby/client/plugin_inspect.go @@ -0,0 +1,32 @@ +package client + +import ( + "bytes" + "encoding/json" + "io/ioutil" + "net/http" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// PluginInspectWithRaw inspects an existing plugin +func (cli *Client) PluginInspectWithRaw(ctx context.Context, name string) (*types.Plugin, []byte, error) { + resp, err := cli.get(ctx, "/plugins/"+name+"/json", nil, nil) + if err != nil { + if resp.statusCode == http.StatusNotFound { + return nil, nil, pluginNotFoundError{name} + } + return nil, nil, err + } + + defer ensureReaderClosed(resp) + body, err := ioutil.ReadAll(resp.body) + if err != nil { + return nil, nil, err + } + var p types.Plugin + rdr := bytes.NewReader(body) + err = json.NewDecoder(rdr).Decode(&p) + return &p, body, err +} diff --git a/vendor/github.com/moby/moby/client/plugin_inspect_test.go b/vendor/github.com/moby/moby/client/plugin_inspect_test.go new file mode 100644 index 000000000..fae407eb9 --- /dev/null +++ b/vendor/github.com/moby/moby/client/plugin_inspect_test.go @@ -0,0 +1,54 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +func TestPluginInspectError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, _, err := client.PluginInspectWithRaw(context.Background(), "nothing") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestPluginInspect(t *testing.T) { + expectedURL := "/plugins/plugin_name" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + content, err := json.Marshal(types.Plugin{ + ID: "plugin_id", + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + + pluginInspect, _, err := client.PluginInspectWithRaw(context.Background(), "plugin_name") + if err != nil { + t.Fatal(err) + } + if pluginInspect.ID != "plugin_id" { + t.Fatalf("expected `plugin_id`, got %s", pluginInspect.ID) + } +} diff --git a/vendor/github.com/moby/moby/client/plugin_install.go b/vendor/github.com/moby/moby/client/plugin_install.go new file mode 100644 index 000000000..ce3e0506e --- /dev/null +++ b/vendor/github.com/moby/moby/client/plugin_install.go @@ -0,0 +1,113 @@ +package client + +import ( + "encoding/json" + "io" + "net/http" + "net/url" + + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" + "github.com/pkg/errors" + "golang.org/x/net/context" +) + +// PluginInstall installs a plugin +func (cli *Client) PluginInstall(ctx context.Context, name string, options types.PluginInstallOptions) (rc io.ReadCloser, err error) { + query := url.Values{} + if _, err := reference.ParseNormalizedNamed(options.RemoteRef); err != nil { + return nil, errors.Wrap(err, "invalid remote reference") + } + query.Set("remote", options.RemoteRef) + + privileges, err := cli.checkPluginPermissions(ctx, query, options) + if err != nil { + return nil, err + } + + // set name for plugin pull, if empty should default to remote reference + query.Set("name", name) + + resp, err := cli.tryPluginPull(ctx, query, privileges, options.RegistryAuth) + if err != nil { + return nil, err + } + + name = resp.header.Get("Docker-Plugin-Name") + + pr, pw := io.Pipe() + go func() { // todo: the client should probably be designed more around the actual api + _, err := io.Copy(pw, resp.body) + if err != nil { + pw.CloseWithError(err) + return + } + defer func() { + if err != nil { + delResp, _ := cli.delete(ctx, "/plugins/"+name, nil, nil) + ensureReaderClosed(delResp) + } + }() + if len(options.Args) > 0 { + if err := cli.PluginSet(ctx, name, options.Args); err != nil { + pw.CloseWithError(err) + return + } + } + + if options.Disabled { + pw.Close() + return + } + + enableErr := cli.PluginEnable(ctx, name, types.PluginEnableOptions{Timeout: 0}) + pw.CloseWithError(enableErr) + }() + return pr, nil +} + +func (cli *Client) tryPluginPrivileges(ctx context.Context, query url.Values, registryAuth string) (serverResponse, error) { + headers := map[string][]string{"X-Registry-Auth": {registryAuth}} + return cli.get(ctx, "/plugins/privileges", query, headers) +} + +func (cli *Client) tryPluginPull(ctx context.Context, query url.Values, privileges types.PluginPrivileges, registryAuth string) (serverResponse, error) { + headers := map[string][]string{"X-Registry-Auth": {registryAuth}} + return cli.post(ctx, "/plugins/pull", query, privileges, headers) +} + +func (cli *Client) checkPluginPermissions(ctx context.Context, query url.Values, options types.PluginInstallOptions) (types.PluginPrivileges, error) { + resp, err := cli.tryPluginPrivileges(ctx, query, options.RegistryAuth) + if resp.statusCode == http.StatusUnauthorized && options.PrivilegeFunc != nil { + // todo: do inspect before to check existing name before checking privileges + newAuthHeader, privilegeErr := options.PrivilegeFunc() + if privilegeErr != nil { + ensureReaderClosed(resp) + return nil, privilegeErr + } + options.RegistryAuth = newAuthHeader + resp, err = cli.tryPluginPrivileges(ctx, query, options.RegistryAuth) + } + if err != nil { + ensureReaderClosed(resp) + return nil, err + } + + var privileges types.PluginPrivileges + if err := json.NewDecoder(resp.body).Decode(&privileges); err != nil { + ensureReaderClosed(resp) + return nil, err + } + ensureReaderClosed(resp) + + if !options.AcceptAllPermissions && options.AcceptPermissionsFunc != nil && len(privileges) > 0 { + accept, err := options.AcceptPermissionsFunc(privileges) + if err != nil { + return nil, err + } + if !accept { + return nil, pluginPermissionDenied{options.RemoteRef} + } + } + return privileges, nil +} diff --git a/vendor/github.com/moby/moby/client/plugin_list.go b/vendor/github.com/moby/moby/client/plugin_list.go new file mode 100644 index 000000000..3acde3b96 --- /dev/null +++ b/vendor/github.com/moby/moby/client/plugin_list.go @@ -0,0 +1,32 @@ +package client + +import ( + "encoding/json" + "net/url" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "golang.org/x/net/context" +) + +// PluginList returns the installed plugins +func (cli *Client) PluginList(ctx context.Context, filter filters.Args) (types.PluginsListResponse, error) { + var plugins types.PluginsListResponse + query := url.Values{} + + if filter.Len() > 0 { + filterJSON, err := filters.ToParamWithVersion(cli.version, filter) + if err != nil { + return plugins, err + } + query.Set("filters", filterJSON) + } + resp, err := cli.get(ctx, "/plugins", query, nil) + if err != nil { + return plugins, err + } + + err = json.NewDecoder(resp.body).Decode(&plugins) + ensureReaderClosed(resp) + return plugins, err +} diff --git a/vendor/github.com/moby/moby/client/plugin_list_test.go b/vendor/github.com/moby/moby/client/plugin_list_test.go new file mode 100644 index 000000000..6887079b4 --- /dev/null +++ b/vendor/github.com/moby/moby/client/plugin_list_test.go @@ -0,0 +1,107 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "golang.org/x/net/context" +) + +func TestPluginListError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, err := client.PluginList(context.Background(), filters.NewArgs()) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestPluginList(t *testing.T) { + expectedURL := "/plugins" + + enabledFilters := filters.NewArgs() + enabledFilters.Add("enabled", "true") + + capabilityFilters := filters.NewArgs() + capabilityFilters.Add("capability", "volumedriver") + capabilityFilters.Add("capability", "authz") + + listCases := []struct { + filters filters.Args + expectedQueryParams map[string]string + }{ + { + filters: filters.NewArgs(), + expectedQueryParams: map[string]string{ + "all": "", + "filter": "", + "filters": "", + }, + }, + { + filters: enabledFilters, + expectedQueryParams: map[string]string{ + "all": "", + "filter": "", + "filters": `{"enabled":{"true":true}}`, + }, + }, + { + filters: capabilityFilters, + expectedQueryParams: map[string]string{ + "all": "", + "filter": "", + "filters": `{"capability":{"authz":true,"volumedriver":true}}`, + }, + }, + } + + for _, listCase := range listCases { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + query := req.URL.Query() + for key, expected := range listCase.expectedQueryParams { + actual := query.Get(key) + if actual != expected { + return nil, fmt.Errorf("%s not set in URL query properly. Expected '%s', got %s", key, expected, actual) + } + } + content, err := json.Marshal([]*types.Plugin{ + { + ID: "plugin_id1", + }, + { + ID: "plugin_id2", + }, + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + + plugins, err := client.PluginList(context.Background(), listCase.filters) + if err != nil { + t.Fatal(err) + } + if len(plugins) != 2 { + t.Fatalf("expected 2 plugins, got %v", plugins) + } + } +} diff --git a/vendor/github.com/moby/moby/client/plugin_push.go b/vendor/github.com/moby/moby/client/plugin_push.go new file mode 100644 index 000000000..1e5f96325 --- /dev/null +++ b/vendor/github.com/moby/moby/client/plugin_push.go @@ -0,0 +1,17 @@ +package client + +import ( + "io" + + "golang.org/x/net/context" +) + +// PluginPush pushes a plugin to a registry +func (cli *Client) PluginPush(ctx context.Context, name string, registryAuth string) (io.ReadCloser, error) { + headers := map[string][]string{"X-Registry-Auth": {registryAuth}} + resp, err := cli.post(ctx, "/plugins/"+name+"/push", nil, nil, headers) + if err != nil { + return nil, err + } + return resp.body, nil +} diff --git a/vendor/github.com/moby/moby/client/plugin_push_test.go b/vendor/github.com/moby/moby/client/plugin_push_test.go new file mode 100644 index 000000000..d9f70cdff --- /dev/null +++ b/vendor/github.com/moby/moby/client/plugin_push_test.go @@ -0,0 +1,51 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" +) + +func TestPluginPushError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, err := client.PluginPush(context.Background(), "plugin_name", "") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestPluginPush(t *testing.T) { + expectedURL := "/plugins/plugin_name" + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "POST" { + return nil, fmt.Errorf("expected POST method, got %s", req.Method) + } + auth := req.Header.Get("X-Registry-Auth") + if auth != "authtoken" { + return nil, fmt.Errorf("Invalid auth header : expected 'authtoken', got %s", auth) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + + _, err := client.PluginPush(context.Background(), "plugin_name", "authtoken") + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/moby/moby/client/plugin_remove.go b/vendor/github.com/moby/moby/client/plugin_remove.go new file mode 100644 index 000000000..b017e4d34 --- /dev/null +++ b/vendor/github.com/moby/moby/client/plugin_remove.go @@ -0,0 +1,20 @@ +package client + +import ( + "net/url" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// PluginRemove removes a plugin +func (cli *Client) PluginRemove(ctx context.Context, name string, options types.PluginRemoveOptions) error { + query := url.Values{} + if options.Force { + query.Set("force", "1") + } + + resp, err := cli.delete(ctx, "/plugins/"+name, query, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/moby/moby/client/plugin_remove_test.go b/vendor/github.com/moby/moby/client/plugin_remove_test.go new file mode 100644 index 000000000..b2d515793 --- /dev/null +++ b/vendor/github.com/moby/moby/client/plugin_remove_test.go @@ -0,0 +1,49 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + + "golang.org/x/net/context" +) + +func TestPluginRemoveError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + err := client.PluginRemove(context.Background(), "plugin_name", types.PluginRemoveOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestPluginRemove(t *testing.T) { + expectedURL := "/plugins/plugin_name" + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "DELETE" { + return nil, fmt.Errorf("expected DELETE method, got %s", req.Method) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + + err := client.PluginRemove(context.Background(), "plugin_name", types.PluginRemoveOptions{}) + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/moby/moby/client/plugin_set.go b/vendor/github.com/moby/moby/client/plugin_set.go new file mode 100644 index 000000000..3260d2a90 --- /dev/null +++ b/vendor/github.com/moby/moby/client/plugin_set.go @@ -0,0 +1,12 @@ +package client + +import ( + "golang.org/x/net/context" +) + +// PluginSet modifies settings for an existing plugin +func (cli *Client) PluginSet(ctx context.Context, name string, args []string) error { + resp, err := cli.post(ctx, "/plugins/"+name+"/set", nil, args, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/moby/moby/client/plugin_set_test.go b/vendor/github.com/moby/moby/client/plugin_set_test.go new file mode 100644 index 000000000..245025446 --- /dev/null +++ b/vendor/github.com/moby/moby/client/plugin_set_test.go @@ -0,0 +1,47 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" +) + +func TestPluginSetError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + err := client.PluginSet(context.Background(), "plugin_name", []string{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestPluginSet(t *testing.T) { + expectedURL := "/plugins/plugin_name/set" + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "POST" { + return nil, fmt.Errorf("expected POST method, got %s", req.Method) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + + err := client.PluginSet(context.Background(), "plugin_name", []string{"arg1"}) + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/moby/moby/client/plugin_upgrade.go b/vendor/github.com/moby/moby/client/plugin_upgrade.go new file mode 100644 index 000000000..049ebfa2a --- /dev/null +++ b/vendor/github.com/moby/moby/client/plugin_upgrade.go @@ -0,0 +1,39 @@ +package client + +import ( + "io" + "net/url" + + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" + "github.com/pkg/errors" + "golang.org/x/net/context" +) + +// PluginUpgrade upgrades a plugin +func (cli *Client) PluginUpgrade(ctx context.Context, name string, options types.PluginInstallOptions) (rc io.ReadCloser, err error) { + if err := cli.NewVersionError("1.26", "plugin upgrade"); err != nil { + return nil, err + } + query := url.Values{} + if _, err := reference.ParseNormalizedNamed(options.RemoteRef); err != nil { + return nil, errors.Wrap(err, "invalid remote reference") + } + query.Set("remote", options.RemoteRef) + + privileges, err := cli.checkPluginPermissions(ctx, query, options) + if err != nil { + return nil, err + } + + resp, err := cli.tryPluginUpgrade(ctx, query, privileges, name, options.RegistryAuth) + if err != nil { + return nil, err + } + return resp.body, nil +} + +func (cli *Client) tryPluginUpgrade(ctx context.Context, query url.Values, privileges types.PluginPrivileges, name, registryAuth string) (serverResponse, error) { + headers := map[string][]string{"X-Registry-Auth": {registryAuth}} + return cli.post(ctx, "/plugins/"+name+"/upgrade", query, privileges, headers) +} diff --git a/vendor/github.com/moby/moby/client/request.go b/vendor/github.com/moby/moby/client/request.go new file mode 100644 index 000000000..3e7d43fea --- /dev/null +++ b/vendor/github.com/moby/moby/client/request.go @@ -0,0 +1,262 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net" + "net/http" + "net/url" + "os" + "strings" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/versions" + "github.com/pkg/errors" + "golang.org/x/net/context" + "golang.org/x/net/context/ctxhttp" +) + +// serverResponse is a wrapper for http API responses. +type serverResponse struct { + body io.ReadCloser + header http.Header + statusCode int + reqURL *url.URL +} + +// head sends an http request to the docker API using the method HEAD. +func (cli *Client) head(ctx context.Context, path string, query url.Values, headers map[string][]string) (serverResponse, error) { + return cli.sendRequest(ctx, "HEAD", path, query, nil, headers) +} + +// get sends an http request to the docker API using the method GET with a specific Go context. +func (cli *Client) get(ctx context.Context, path string, query url.Values, headers map[string][]string) (serverResponse, error) { + return cli.sendRequest(ctx, "GET", path, query, nil, headers) +} + +// post sends an http request to the docker API using the method POST with a specific Go context. +func (cli *Client) post(ctx context.Context, path string, query url.Values, obj interface{}, headers map[string][]string) (serverResponse, error) { + body, headers, err := encodeBody(obj, headers) + if err != nil { + return serverResponse{}, err + } + return cli.sendRequest(ctx, "POST", path, query, body, headers) +} + +func (cli *Client) postRaw(ctx context.Context, path string, query url.Values, body io.Reader, headers map[string][]string) (serverResponse, error) { + return cli.sendRequest(ctx, "POST", path, query, body, headers) +} + +// put sends an http request to the docker API using the method PUT. +func (cli *Client) put(ctx context.Context, path string, query url.Values, obj interface{}, headers map[string][]string) (serverResponse, error) { + body, headers, err := encodeBody(obj, headers) + if err != nil { + return serverResponse{}, err + } + return cli.sendRequest(ctx, "PUT", path, query, body, headers) +} + +// putRaw sends an http request to the docker API using the method PUT. +func (cli *Client) putRaw(ctx context.Context, path string, query url.Values, body io.Reader, headers map[string][]string) (serverResponse, error) { + return cli.sendRequest(ctx, "PUT", path, query, body, headers) +} + +// delete sends an http request to the docker API using the method DELETE. +func (cli *Client) delete(ctx context.Context, path string, query url.Values, headers map[string][]string) (serverResponse, error) { + return cli.sendRequest(ctx, "DELETE", path, query, nil, headers) +} + +type headers map[string][]string + +func encodeBody(obj interface{}, headers headers) (io.Reader, headers, error) { + if obj == nil { + return nil, headers, nil + } + + body, err := encodeData(obj) + if err != nil { + return nil, headers, err + } + if headers == nil { + headers = make(map[string][]string) + } + headers["Content-Type"] = []string{"application/json"} + return body, headers, nil +} + +func (cli *Client) buildRequest(method, path string, body io.Reader, headers headers) (*http.Request, error) { + expectedPayload := (method == "POST" || method == "PUT") + if expectedPayload && body == nil { + body = bytes.NewReader([]byte{}) + } + + req, err := http.NewRequest(method, path, body) + if err != nil { + return nil, err + } + req = cli.addHeaders(req, headers) + + if cli.proto == "unix" || cli.proto == "npipe" { + // For local communications, it doesn't matter what the host is. We just + // need a valid and meaningful host name. (See #189) + req.Host = "docker" + } + + req.URL.Host = cli.addr + req.URL.Scheme = cli.scheme + + if expectedPayload && req.Header.Get("Content-Type") == "" { + req.Header.Set("Content-Type", "text/plain") + } + return req, nil +} + +func (cli *Client) sendRequest(ctx context.Context, method, path string, query url.Values, body io.Reader, headers headers) (serverResponse, error) { + req, err := cli.buildRequest(method, cli.getAPIPath(path, query), body, headers) + if err != nil { + return serverResponse{}, err + } + resp, err := cli.doRequest(ctx, req) + if err != nil { + return resp, err + } + if err := cli.checkResponseErr(resp); err != nil { + return resp, err + } + return resp, nil +} + +func (cli *Client) doRequest(ctx context.Context, req *http.Request) (serverResponse, error) { + serverResp := serverResponse{statusCode: -1, reqURL: req.URL} + + resp, err := ctxhttp.Do(ctx, cli.client, req) + if err != nil { + if cli.scheme != "https" && strings.Contains(err.Error(), "malformed HTTP response") { + return serverResp, fmt.Errorf("%v.\n* Are you trying to connect to a TLS-enabled daemon without TLS?", err) + } + + if cli.scheme == "https" && strings.Contains(err.Error(), "bad certificate") { + return serverResp, fmt.Errorf("The server probably has client authentication (--tlsverify) enabled. Please check your TLS client certification settings: %v", err) + } + + // Don't decorate context sentinel errors; users may be comparing to + // them directly. + switch err { + case context.Canceled, context.DeadlineExceeded: + return serverResp, err + } + + if nErr, ok := err.(*url.Error); ok { + if nErr, ok := nErr.Err.(*net.OpError); ok { + if os.IsPermission(nErr.Err) { + return serverResp, errors.Wrapf(err, "Got permission denied while trying to connect to the Docker daemon socket at %v", cli.host) + } + } + } + + if err, ok := err.(net.Error); ok { + if err.Timeout() { + return serverResp, ErrorConnectionFailed(cli.host) + } + if !err.Temporary() { + if strings.Contains(err.Error(), "connection refused") || strings.Contains(err.Error(), "dial unix") { + return serverResp, ErrorConnectionFailed(cli.host) + } + } + } + + // Although there's not a strongly typed error for this in go-winio, + // lots of people are using the default configuration for the docker + // daemon on Windows where the daemon is listening on a named pipe + // `//./pipe/docker_engine, and the client must be running elevated. + // Give users a clue rather than the not-overly useful message + // such as `error during connect: Get http://%2F%2F.%2Fpipe%2Fdocker_engine/v1.26/info: + // open //./pipe/docker_engine: The system cannot find the file specified.`. + // Note we can't string compare "The system cannot find the file specified" as + // this is localised - for example in French the error would be + // `open //./pipe/docker_engine: Le fichier spécifié est introuvable.` + if strings.Contains(err.Error(), `open //./pipe/docker_engine`) { + err = errors.New(err.Error() + " In the default daemon configuration on Windows, the docker client must be run elevated to connect. This error may also indicate that the docker daemon is not running.") + } + + return serverResp, errors.Wrap(err, "error during connect") + } + + if resp != nil { + serverResp.statusCode = resp.StatusCode + serverResp.body = resp.Body + serverResp.header = resp.Header + } + return serverResp, nil +} + +func (cli *Client) checkResponseErr(serverResp serverResponse) error { + if serverResp.statusCode >= 200 && serverResp.statusCode < 400 { + return nil + } + + body, err := ioutil.ReadAll(serverResp.body) + if err != nil { + return err + } + if len(body) == 0 { + return fmt.Errorf("Error: request returned %s for API route and version %s, check if the server supports the requested API version", http.StatusText(serverResp.statusCode), serverResp.reqURL) + } + + var ct string + if serverResp.header != nil { + ct = serverResp.header.Get("Content-Type") + } + + var errorMessage string + if (cli.version == "" || versions.GreaterThan(cli.version, "1.23")) && ct == "application/json" { + var errorResponse types.ErrorResponse + if err := json.Unmarshal(body, &errorResponse); err != nil { + return fmt.Errorf("Error reading JSON: %v", err) + } + errorMessage = errorResponse.Message + } else { + errorMessage = string(body) + } + + return fmt.Errorf("Error response from daemon: %s", strings.TrimSpace(errorMessage)) +} + +func (cli *Client) addHeaders(req *http.Request, headers headers) *http.Request { + // Add CLI Config's HTTP Headers BEFORE we set the Docker headers + // then the user can't change OUR headers + for k, v := range cli.customHTTPHeaders { + if versions.LessThan(cli.version, "1.25") && k == "User-Agent" { + continue + } + req.Header.Set(k, v) + } + + if headers != nil { + for k, v := range headers { + req.Header[k] = v + } + } + return req +} + +func encodeData(data interface{}) (*bytes.Buffer, error) { + params := bytes.NewBuffer(nil) + if data != nil { + if err := json.NewEncoder(params).Encode(data); err != nil { + return nil, err + } + } + return params, nil +} + +func ensureReaderClosed(response serverResponse) { + if response.body != nil { + // Drain up to 512 bytes and close the body to let the Transport reuse the connection + io.CopyN(ioutil.Discard, response.body, 512) + response.body.Close() + } +} diff --git a/vendor/github.com/moby/moby/client/request_test.go b/vendor/github.com/moby/moby/client/request_test.go new file mode 100644 index 000000000..63908aec4 --- /dev/null +++ b/vendor/github.com/moby/moby/client/request_test.go @@ -0,0 +1,92 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// TestSetHostHeader should set fake host for local communications, set real host +// for normal communications. +func TestSetHostHeader(t *testing.T) { + testURL := "/test" + testCases := []struct { + host string + expectedHost string + expectedURLHost string + }{ + { + "unix:///var/run/docker.sock", + "docker", + "/var/run/docker.sock", + }, + { + "npipe:////./pipe/docker_engine", + "docker", + "//./pipe/docker_engine", + }, + { + "tcp://0.0.0.0:4243", + "", + "0.0.0.0:4243", + }, + { + "tcp://localhost:4243", + "", + "localhost:4243", + }, + } + + for c, test := range testCases { + proto, addr, basePath, err := ParseHost(test.host) + if err != nil { + t.Fatal(err) + } + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, testURL) { + return nil, fmt.Errorf("Test Case #%d: Expected URL %q, got %q", c, testURL, req.URL) + } + if req.Host != test.expectedHost { + return nil, fmt.Errorf("Test Case #%d: Expected host %q, got %q", c, test.expectedHost, req.Host) + } + if req.URL.Host != test.expectedURLHost { + return nil, fmt.Errorf("Test Case #%d: Expected URL host %q, got %q", c, test.expectedURLHost, req.URL.Host) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(([]byte("")))), + }, nil + }), + + proto: proto, + addr: addr, + basePath: basePath, + } + + _, err = client.sendRequest(context.Background(), "GET", testURL, nil, nil, nil) + if err != nil { + t.Fatal(err) + } + } +} + +// TestPlainTextError tests the server returning an error in plain text for +// backwards compatibility with API versions <1.24. All other tests use +// errors returned as JSON +func TestPlainTextError(t *testing.T) { + client := &Client{ + client: newMockClient(plainTextErrorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.ContainerList(context.Background(), types.ContainerListOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} diff --git a/vendor/github.com/moby/moby/client/secret_create.go b/vendor/github.com/moby/moby/client/secret_create.go new file mode 100644 index 000000000..4354afea6 --- /dev/null +++ b/vendor/github.com/moby/moby/client/secret_create.go @@ -0,0 +1,25 @@ +package client + +import ( + "encoding/json" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +// SecretCreate creates a new Secret. +func (cli *Client) SecretCreate(ctx context.Context, secret swarm.SecretSpec) (types.SecretCreateResponse, error) { + var response types.SecretCreateResponse + if err := cli.NewVersionError("1.25", "secret create"); err != nil { + return response, err + } + resp, err := cli.post(ctx, "/secrets/create", nil, secret, nil) + if err != nil { + return response, err + } + + err = json.NewDecoder(resp.body).Decode(&response) + ensureReaderClosed(resp) + return response, err +} diff --git a/vendor/github.com/moby/moby/client/secret_create_test.go b/vendor/github.com/moby/moby/client/secret_create_test.go new file mode 100644 index 000000000..ccc734900 --- /dev/null +++ b/vendor/github.com/moby/moby/client/secret_create_test.go @@ -0,0 +1,69 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" + "github.com/stretchr/testify/assert" + "golang.org/x/net/context" +) + +func TestSecretCreateUnsupported(t *testing.T) { + client := &Client{ + version: "1.24", + client: &http.Client{}, + } + _, err := client.SecretCreate(context.Background(), swarm.SecretSpec{}) + assert.EqualError(t, err, `"secret create" requires API version 1.25, but the Docker daemon API version is 1.24`) +} + +func TestSecretCreateError(t *testing.T) { + client := &Client{ + version: "1.25", + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.SecretCreate(context.Background(), swarm.SecretSpec{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestSecretCreate(t *testing.T) { + expectedURL := "/v1.25/secrets/create" + client := &Client{ + version: "1.25", + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "POST" { + return nil, fmt.Errorf("expected POST method, got %s", req.Method) + } + b, err := json.Marshal(types.SecretCreateResponse{ + ID: "test_secret", + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusCreated, + Body: ioutil.NopCloser(bytes.NewReader(b)), + }, nil + }), + } + + r, err := client.SecretCreate(context.Background(), swarm.SecretSpec{}) + if err != nil { + t.Fatal(err) + } + if r.ID != "test_secret" { + t.Fatalf("expected `test_secret`, got %s", r.ID) + } +} diff --git a/vendor/github.com/moby/moby/client/secret_inspect.go b/vendor/github.com/moby/moby/client/secret_inspect.go new file mode 100644 index 000000000..9b602972b --- /dev/null +++ b/vendor/github.com/moby/moby/client/secret_inspect.go @@ -0,0 +1,37 @@ +package client + +import ( + "bytes" + "encoding/json" + "io/ioutil" + "net/http" + + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +// SecretInspectWithRaw returns the secret information with raw data +func (cli *Client) SecretInspectWithRaw(ctx context.Context, id string) (swarm.Secret, []byte, error) { + if err := cli.NewVersionError("1.25", "secret inspect"); err != nil { + return swarm.Secret{}, nil, err + } + resp, err := cli.get(ctx, "/secrets/"+id, nil, nil) + if err != nil { + if resp.statusCode == http.StatusNotFound { + return swarm.Secret{}, nil, secretNotFoundError{id} + } + return swarm.Secret{}, nil, err + } + defer ensureReaderClosed(resp) + + body, err := ioutil.ReadAll(resp.body) + if err != nil { + return swarm.Secret{}, nil, err + } + + var secret swarm.Secret + rdr := bytes.NewReader(body) + err = json.NewDecoder(rdr).Decode(&secret) + + return secret, body, err +} diff --git a/vendor/github.com/moby/moby/client/secret_inspect_test.go b/vendor/github.com/moby/moby/client/secret_inspect_test.go new file mode 100644 index 000000000..1581da101 --- /dev/null +++ b/vendor/github.com/moby/moby/client/secret_inspect_test.go @@ -0,0 +1,78 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types/swarm" + "github.com/stretchr/testify/assert" + "golang.org/x/net/context" +) + +func TestSecretInspectUnsupported(t *testing.T) { + client := &Client{ + version: "1.24", + client: &http.Client{}, + } + _, _, err := client.SecretInspectWithRaw(context.Background(), "nothing") + assert.EqualError(t, err, `"secret inspect" requires API version 1.25, but the Docker daemon API version is 1.24`) +} + +func TestSecretInspectError(t *testing.T) { + client := &Client{ + version: "1.25", + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, _, err := client.SecretInspectWithRaw(context.Background(), "nothing") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestSecretInspectSecretNotFound(t *testing.T) { + client := &Client{ + version: "1.25", + client: newMockClient(errorMock(http.StatusNotFound, "Server error")), + } + + _, _, err := client.SecretInspectWithRaw(context.Background(), "unknown") + if err == nil || !IsErrSecretNotFound(err) { + t.Fatalf("expected a secretNotFoundError error, got %v", err) + } +} + +func TestSecretInspect(t *testing.T) { + expectedURL := "/v1.25/secrets/secret_id" + client := &Client{ + version: "1.25", + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + content, err := json.Marshal(swarm.Secret{ + ID: "secret_id", + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + + secretInspect, _, err := client.SecretInspectWithRaw(context.Background(), "secret_id") + if err != nil { + t.Fatal(err) + } + if secretInspect.ID != "secret_id" { + t.Fatalf("expected `secret_id`, got %s", secretInspect.ID) + } +} diff --git a/vendor/github.com/moby/moby/client/secret_list.go b/vendor/github.com/moby/moby/client/secret_list.go new file mode 100644 index 000000000..0d33ecfbc --- /dev/null +++ b/vendor/github.com/moby/moby/client/secret_list.go @@ -0,0 +1,38 @@ +package client + +import ( + "encoding/json" + "net/url" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +// SecretList returns the list of secrets. +func (cli *Client) SecretList(ctx context.Context, options types.SecretListOptions) ([]swarm.Secret, error) { + if err := cli.NewVersionError("1.25", "secret list"); err != nil { + return nil, err + } + query := url.Values{} + + if options.Filters.Len() > 0 { + filterJSON, err := filters.ToParam(options.Filters) + if err != nil { + return nil, err + } + + query.Set("filters", filterJSON) + } + + resp, err := cli.get(ctx, "/secrets", query, nil) + if err != nil { + return nil, err + } + + var secrets []swarm.Secret + err = json.NewDecoder(resp.body).Decode(&secrets) + ensureReaderClosed(resp) + return secrets, err +} diff --git a/vendor/github.com/moby/moby/client/secret_list_test.go b/vendor/github.com/moby/moby/client/secret_list_test.go new file mode 100644 index 000000000..67a94d3df --- /dev/null +++ b/vendor/github.com/moby/moby/client/secret_list_test.go @@ -0,0 +1,106 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/swarm" + "github.com/stretchr/testify/assert" + "golang.org/x/net/context" +) + +func TestSecretListUnsupported(t *testing.T) { + client := &Client{ + version: "1.24", + client: &http.Client{}, + } + _, err := client.SecretList(context.Background(), types.SecretListOptions{}) + assert.EqualError(t, err, `"secret list" requires API version 1.25, but the Docker daemon API version is 1.24`) +} + +func TestSecretListError(t *testing.T) { + client := &Client{ + version: "1.25", + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, err := client.SecretList(context.Background(), types.SecretListOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestSecretList(t *testing.T) { + expectedURL := "/v1.25/secrets" + + filters := filters.NewArgs() + filters.Add("label", "label1") + filters.Add("label", "label2") + + listCases := []struct { + options types.SecretListOptions + expectedQueryParams map[string]string + }{ + { + options: types.SecretListOptions{}, + expectedQueryParams: map[string]string{ + "filters": "", + }, + }, + { + options: types.SecretListOptions{ + Filters: filters, + }, + expectedQueryParams: map[string]string{ + "filters": `{"label":{"label1":true,"label2":true}}`, + }, + }, + } + for _, listCase := range listCases { + client := &Client{ + version: "1.25", + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + query := req.URL.Query() + for key, expected := range listCase.expectedQueryParams { + actual := query.Get(key) + if actual != expected { + return nil, fmt.Errorf("%s not set in URL query properly. Expected '%s', got %s", key, expected, actual) + } + } + content, err := json.Marshal([]swarm.Secret{ + { + ID: "secret_id1", + }, + { + ID: "secret_id2", + }, + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + + secrets, err := client.SecretList(context.Background(), listCase.options) + if err != nil { + t.Fatal(err) + } + if len(secrets) != 2 { + t.Fatalf("expected 2 secrets, got %v", secrets) + } + } +} diff --git a/vendor/github.com/moby/moby/client/secret_remove.go b/vendor/github.com/moby/moby/client/secret_remove.go new file mode 100644 index 000000000..c5e37af17 --- /dev/null +++ b/vendor/github.com/moby/moby/client/secret_remove.go @@ -0,0 +1,13 @@ +package client + +import "golang.org/x/net/context" + +// SecretRemove removes a Secret. +func (cli *Client) SecretRemove(ctx context.Context, id string) error { + if err := cli.NewVersionError("1.25", "secret remove"); err != nil { + return err + } + resp, err := cli.delete(ctx, "/secrets/"+id, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/moby/moby/client/secret_remove_test.go b/vendor/github.com/moby/moby/client/secret_remove_test.go new file mode 100644 index 000000000..bb41edfe5 --- /dev/null +++ b/vendor/github.com/moby/moby/client/secret_remove_test.go @@ -0,0 +1,59 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/stretchr/testify/assert" + "golang.org/x/net/context" +) + +func TestSecretRemoveUnsupported(t *testing.T) { + client := &Client{ + version: "1.24", + client: &http.Client{}, + } + err := client.SecretRemove(context.Background(), "secret_id") + assert.EqualError(t, err, `"secret remove" requires API version 1.25, but the Docker daemon API version is 1.24`) +} + +func TestSecretRemoveError(t *testing.T) { + client := &Client{ + version: "1.25", + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + err := client.SecretRemove(context.Background(), "secret_id") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestSecretRemove(t *testing.T) { + expectedURL := "/v1.25/secrets/secret_id" + + client := &Client{ + version: "1.25", + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "DELETE" { + return nil, fmt.Errorf("expected DELETE method, got %s", req.Method) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte("body"))), + }, nil + }), + } + + err := client.SecretRemove(context.Background(), "secret_id") + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/moby/moby/client/secret_update.go b/vendor/github.com/moby/moby/client/secret_update.go new file mode 100644 index 000000000..875a4c901 --- /dev/null +++ b/vendor/github.com/moby/moby/client/secret_update.go @@ -0,0 +1,21 @@ +package client + +import ( + "net/url" + "strconv" + + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +// SecretUpdate attempts to update a Secret +func (cli *Client) SecretUpdate(ctx context.Context, id string, version swarm.Version, secret swarm.SecretSpec) error { + if err := cli.NewVersionError("1.25", "secret update"); err != nil { + return err + } + query := url.Values{} + query.Set("version", strconv.FormatUint(version.Index, 10)) + resp, err := cli.post(ctx, "/secrets/"+id+"/update", query, secret, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/moby/moby/client/secret_update_test.go b/vendor/github.com/moby/moby/client/secret_update_test.go new file mode 100644 index 000000000..4a791328e --- /dev/null +++ b/vendor/github.com/moby/moby/client/secret_update_test.go @@ -0,0 +1,60 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types/swarm" + "github.com/stretchr/testify/assert" + "golang.org/x/net/context" +) + +func TestSecretUpdateUnsupported(t *testing.T) { + client := &Client{ + version: "1.24", + client: &http.Client{}, + } + err := client.SecretUpdate(context.Background(), "secret_id", swarm.Version{}, swarm.SecretSpec{}) + assert.EqualError(t, err, `"secret update" requires API version 1.25, but the Docker daemon API version is 1.24`) +} + +func TestSecretUpdateError(t *testing.T) { + client := &Client{ + version: "1.25", + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + err := client.SecretUpdate(context.Background(), "secret_id", swarm.Version{}, swarm.SecretSpec{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestSecretUpdate(t *testing.T) { + expectedURL := "/v1.25/secrets/secret_id/update" + + client := &Client{ + version: "1.25", + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "POST" { + return nil, fmt.Errorf("expected POST method, got %s", req.Method) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte("body"))), + }, nil + }), + } + + err := client.SecretUpdate(context.Background(), "secret_id", swarm.Version{}, swarm.SecretSpec{}) + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/moby/moby/client/service_create.go b/vendor/github.com/moby/moby/client/service_create.go new file mode 100644 index 000000000..a36839443 --- /dev/null +++ b/vendor/github.com/moby/moby/client/service_create.go @@ -0,0 +1,156 @@ +package client + +import ( + "encoding/json" + "fmt" + + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" + "github.com/opencontainers/go-digest" + "github.com/pkg/errors" + "golang.org/x/net/context" +) + +// ServiceCreate creates a new Service. +func (cli *Client) ServiceCreate(ctx context.Context, service swarm.ServiceSpec, options types.ServiceCreateOptions) (types.ServiceCreateResponse, error) { + var distErr error + + headers := map[string][]string{ + "version": {cli.version}, + } + + if options.EncodedRegistryAuth != "" { + headers["X-Registry-Auth"] = []string{options.EncodedRegistryAuth} + } + + // Make sure containerSpec is not nil when no runtime is set or the runtime is set to container + if service.TaskTemplate.ContainerSpec == nil && (service.TaskTemplate.Runtime == "" || service.TaskTemplate.Runtime == swarm.RuntimeContainer) { + service.TaskTemplate.ContainerSpec = &swarm.ContainerSpec{} + } + + if err := validateServiceSpec(service); err != nil { + return types.ServiceCreateResponse{}, err + } + + // ensure that the image is tagged + var imgPlatforms []swarm.Platform + if service.TaskTemplate.ContainerSpec != nil { + if taggedImg := imageWithTagString(service.TaskTemplate.ContainerSpec.Image); taggedImg != "" { + service.TaskTemplate.ContainerSpec.Image = taggedImg + } + if options.QueryRegistry { + var img string + img, imgPlatforms, distErr = imageDigestAndPlatforms(ctx, cli, service.TaskTemplate.ContainerSpec.Image, options.EncodedRegistryAuth) + if img != "" { + service.TaskTemplate.ContainerSpec.Image = img + } + } + } + + // ensure that the image is tagged + if service.TaskTemplate.PluginSpec != nil { + if taggedImg := imageWithTagString(service.TaskTemplate.PluginSpec.Remote); taggedImg != "" { + service.TaskTemplate.PluginSpec.Remote = taggedImg + } + if options.QueryRegistry { + var img string + img, imgPlatforms, distErr = imageDigestAndPlatforms(ctx, cli, service.TaskTemplate.PluginSpec.Remote, options.EncodedRegistryAuth) + if img != "" { + service.TaskTemplate.PluginSpec.Remote = img + } + } + } + + if service.TaskTemplate.Placement == nil && len(imgPlatforms) > 0 { + service.TaskTemplate.Placement = &swarm.Placement{} + } + if len(imgPlatforms) > 0 { + service.TaskTemplate.Placement.Platforms = imgPlatforms + } + + var response types.ServiceCreateResponse + resp, err := cli.post(ctx, "/services/create", nil, service, headers) + if err != nil { + return response, err + } + + err = json.NewDecoder(resp.body).Decode(&response) + + if distErr != nil { + response.Warnings = append(response.Warnings, digestWarning(service.TaskTemplate.ContainerSpec.Image)) + } + + ensureReaderClosed(resp) + return response, err +} + +func imageDigestAndPlatforms(ctx context.Context, cli *Client, image, encodedAuth string) (string, []swarm.Platform, error) { + distributionInspect, err := cli.DistributionInspect(ctx, image, encodedAuth) + imageWithDigest := image + var platforms []swarm.Platform + if err != nil { + return "", nil, err + } + + imageWithDigest = imageWithDigestString(image, distributionInspect.Descriptor.Digest) + + if len(distributionInspect.Platforms) > 0 { + platforms = make([]swarm.Platform, 0, len(distributionInspect.Platforms)) + for _, p := range distributionInspect.Platforms { + platforms = append(platforms, swarm.Platform{ + Architecture: p.Architecture, + OS: p.OS, + }) + } + } + return imageWithDigest, platforms, err +} + +// imageWithDigestString takes an image string and a digest, and updates +// the image string if it didn't originally contain a digest. It returns +// an empty string if there are no updates. +func imageWithDigestString(image string, dgst digest.Digest) string { + namedRef, err := reference.ParseNormalizedNamed(image) + if err == nil { + if _, isCanonical := namedRef.(reference.Canonical); !isCanonical { + // ensure that image gets a default tag if none is provided + img, err := reference.WithDigest(namedRef, dgst) + if err == nil { + return reference.FamiliarString(img) + } + } + } + return "" +} + +// imageWithTagString takes an image string, and returns a tagged image +// string, adding a 'latest' tag if one was not provided. It returns an +// emptry string if a canonical reference was provided +func imageWithTagString(image string) string { + namedRef, err := reference.ParseNormalizedNamed(image) + if err == nil { + return reference.FamiliarString(reference.TagNameOnly(namedRef)) + } + return "" +} + +// digestWarning constructs a formatted warning string using the +// image name that could not be pinned by digest. The formatting +// is hardcoded, but could me made smarter in the future +func digestWarning(image string) string { + return fmt.Sprintf("image %s could not be accessed on a registry to record\nits digest. Each node will access %s independently,\npossibly leading to different nodes running different\nversions of the image.\n", image, image) +} + +func validateServiceSpec(s swarm.ServiceSpec) error { + if s.TaskTemplate.ContainerSpec != nil && s.TaskTemplate.PluginSpec != nil { + return errors.New("must not specify both a container spec and a plugin spec in the task template") + } + if s.TaskTemplate.PluginSpec != nil && s.TaskTemplate.Runtime != swarm.RuntimePlugin { + return errors.New("mismatched runtime with plugin spec") + } + if s.TaskTemplate.ContainerSpec != nil && (s.TaskTemplate.Runtime != "" && s.TaskTemplate.Runtime != swarm.RuntimeContainer) { + return errors.New("mismatched runtime with container spec") + } + return nil +} diff --git a/vendor/github.com/moby/moby/client/service_create_test.go b/vendor/github.com/moby/moby/client/service_create_test.go new file mode 100644 index 000000000..6915d636e --- /dev/null +++ b/vendor/github.com/moby/moby/client/service_create_test.go @@ -0,0 +1,210 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + registrytypes "github.com/docker/docker/api/types/registry" + "github.com/docker/docker/api/types/swarm" + "github.com/opencontainers/go-digest" + "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/stretchr/testify/assert" + "golang.org/x/net/context" +) + +func TestServiceCreateError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.ServiceCreate(context.Background(), swarm.ServiceSpec{}, types.ServiceCreateOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestServiceCreate(t *testing.T) { + expectedURL := "/services/create" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "POST" { + return nil, fmt.Errorf("expected POST method, got %s", req.Method) + } + b, err := json.Marshal(types.ServiceCreateResponse{ + ID: "service_id", + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(b)), + }, nil + }), + } + + r, err := client.ServiceCreate(context.Background(), swarm.ServiceSpec{}, types.ServiceCreateOptions{}) + if err != nil { + t.Fatal(err) + } + if r.ID != "service_id" { + t.Fatalf("expected `service_id`, got %s", r.ID) + } +} + +func TestServiceCreateCompatiblePlatforms(t *testing.T) { + client := &Client{ + version: "1.30", + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if strings.HasPrefix(req.URL.Path, "/v1.30/services/create") { + var serviceSpec swarm.ServiceSpec + + // check if the /distribution endpoint returned correct output + err := json.NewDecoder(req.Body).Decode(&serviceSpec) + if err != nil { + return nil, err + } + + assert.Equal(t, "foobar:1.0@sha256:c0537ff6a5218ef531ece93d4984efc99bbf3f7497c0a7726c88e2bb7584dc96", serviceSpec.TaskTemplate.ContainerSpec.Image) + assert.Len(t, serviceSpec.TaskTemplate.Placement.Platforms, 1) + + p := serviceSpec.TaskTemplate.Placement.Platforms[0] + b, err := json.Marshal(types.ServiceCreateResponse{ + ID: "service_" + p.OS + "_" + p.Architecture, + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(b)), + }, nil + } else if strings.HasPrefix(req.URL.Path, "/v1.30/distribution/") { + b, err := json.Marshal(registrytypes.DistributionInspect{ + Descriptor: v1.Descriptor{ + Digest: "sha256:c0537ff6a5218ef531ece93d4984efc99bbf3f7497c0a7726c88e2bb7584dc96", + }, + Platforms: []v1.Platform{ + { + Architecture: "amd64", + OS: "linux", + }, + }, + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(b)), + }, nil + } else { + return nil, fmt.Errorf("unexpected URL '%s'", req.URL.Path) + } + }), + } + + spec := swarm.ServiceSpec{TaskTemplate: swarm.TaskSpec{ContainerSpec: &swarm.ContainerSpec{Image: "foobar:1.0"}}} + + r, err := client.ServiceCreate(context.Background(), spec, types.ServiceCreateOptions{QueryRegistry: true}) + assert.NoError(t, err) + assert.Equal(t, "service_linux_amd64", r.ID) +} + +func TestServiceCreateDigestPinning(t *testing.T) { + dgst := "sha256:c0537ff6a5218ef531ece93d4984efc99bbf3f7497c0a7726c88e2bb7584dc96" + dgstAlt := "sha256:37ffbf3f7497c07584dc9637ffbf3f7497c0758c0537ffbf3f7497c0c88e2bb7" + serviceCreateImage := "" + pinByDigestTests := []struct { + img string // input image provided by the user + expected string // expected image after digest pinning + }{ + // default registry returns familiar string + {"docker.io/library/alpine", "alpine:latest@" + dgst}, + // provided tag is preserved and digest added + {"alpine:edge", "alpine:edge@" + dgst}, + // image with provided alternative digest remains unchanged + {"alpine@" + dgstAlt, "alpine@" + dgstAlt}, + // image with provided tag and alternative digest remains unchanged + {"alpine:edge@" + dgstAlt, "alpine:edge@" + dgstAlt}, + // image on alternative registry does not result in familiar string + {"alternate.registry/library/alpine", "alternate.registry/library/alpine:latest@" + dgst}, + // unresolvable image does not get a digest + {"cannotresolve", "cannotresolve:latest"}, + } + + client := &Client{ + version: "1.30", + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if strings.HasPrefix(req.URL.Path, "/v1.30/services/create") { + // reset and set image received by the service create endpoint + serviceCreateImage = "" + var service swarm.ServiceSpec + if err := json.NewDecoder(req.Body).Decode(&service); err != nil { + return nil, fmt.Errorf("could not parse service create request") + } + serviceCreateImage = service.TaskTemplate.ContainerSpec.Image + + b, err := json.Marshal(types.ServiceCreateResponse{ + ID: "service_id", + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(b)), + }, nil + } else if strings.HasPrefix(req.URL.Path, "/v1.30/distribution/cannotresolve") { + // unresolvable image + return nil, fmt.Errorf("cannot resolve image") + } else if strings.HasPrefix(req.URL.Path, "/v1.30/distribution/") { + // resolvable images + b, err := json.Marshal(registrytypes.DistributionInspect{ + Descriptor: v1.Descriptor{ + Digest: digest.Digest(dgst), + }, + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(b)), + }, nil + } + return nil, fmt.Errorf("unexpected URL '%s'", req.URL.Path) + }), + } + + // run pin by digest tests + for _, p := range pinByDigestTests { + r, err := client.ServiceCreate(context.Background(), swarm.ServiceSpec{ + TaskTemplate: swarm.TaskSpec{ + ContainerSpec: &swarm.ContainerSpec{ + Image: p.img, + }, + }, + }, types.ServiceCreateOptions{QueryRegistry: true}) + + if err != nil { + t.Fatal(err) + } + + if r.ID != "service_id" { + t.Fatalf("expected `service_id`, got %s", r.ID) + } + + if p.expected != serviceCreateImage { + t.Fatalf("expected image %s, got %s", p.expected, serviceCreateImage) + } + } +} diff --git a/vendor/github.com/moby/moby/client/service_inspect.go b/vendor/github.com/moby/moby/client/service_inspect.go new file mode 100644 index 000000000..d7e051e3a --- /dev/null +++ b/vendor/github.com/moby/moby/client/service_inspect.go @@ -0,0 +1,38 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "net/url" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +// ServiceInspectWithRaw returns the service information and the raw data. +func (cli *Client) ServiceInspectWithRaw(ctx context.Context, serviceID string, opts types.ServiceInspectOptions) (swarm.Service, []byte, error) { + query := url.Values{} + query.Set("insertDefaults", fmt.Sprintf("%v", opts.InsertDefaults)) + serverResp, err := cli.get(ctx, "/services/"+serviceID, query, nil) + if err != nil { + if serverResp.statusCode == http.StatusNotFound { + return swarm.Service{}, nil, serviceNotFoundError{serviceID} + } + return swarm.Service{}, nil, err + } + defer ensureReaderClosed(serverResp) + + body, err := ioutil.ReadAll(serverResp.body) + if err != nil { + return swarm.Service{}, nil, err + } + + var response swarm.Service + rdr := bytes.NewReader(body) + err = json.NewDecoder(rdr).Decode(&response) + return response, body, err +} diff --git a/vendor/github.com/moby/moby/client/service_inspect_test.go b/vendor/github.com/moby/moby/client/service_inspect_test.go new file mode 100644 index 000000000..d53f583e9 --- /dev/null +++ b/vendor/github.com/moby/moby/client/service_inspect_test.go @@ -0,0 +1,66 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +func TestServiceInspectError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, _, err := client.ServiceInspectWithRaw(context.Background(), "nothing", types.ServiceInspectOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestServiceInspectServiceNotFound(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusNotFound, "Server error")), + } + + _, _, err := client.ServiceInspectWithRaw(context.Background(), "unknown", types.ServiceInspectOptions{}) + if err == nil || !IsErrServiceNotFound(err) { + t.Fatalf("expected a serviceNotFoundError error, got %v", err) + } +} + +func TestServiceInspect(t *testing.T) { + expectedURL := "/services/service_id" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + content, err := json.Marshal(swarm.Service{ + ID: "service_id", + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + + serviceInspect, _, err := client.ServiceInspectWithRaw(context.Background(), "service_id", types.ServiceInspectOptions{}) + if err != nil { + t.Fatal(err) + } + if serviceInspect.ID != "service_id" { + t.Fatalf("expected `service_id`, got %s", serviceInspect.ID) + } +} diff --git a/vendor/github.com/moby/moby/client/service_list.go b/vendor/github.com/moby/moby/client/service_list.go new file mode 100644 index 000000000..c29e6d407 --- /dev/null +++ b/vendor/github.com/moby/moby/client/service_list.go @@ -0,0 +1,35 @@ +package client + +import ( + "encoding/json" + "net/url" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +// ServiceList returns the list of services. +func (cli *Client) ServiceList(ctx context.Context, options types.ServiceListOptions) ([]swarm.Service, error) { + query := url.Values{} + + if options.Filters.Len() > 0 { + filterJSON, err := filters.ToParam(options.Filters) + if err != nil { + return nil, err + } + + query.Set("filters", filterJSON) + } + + resp, err := cli.get(ctx, "/services", query, nil) + if err != nil { + return nil, err + } + + var services []swarm.Service + err = json.NewDecoder(resp.body).Decode(&services) + ensureReaderClosed(resp) + return services, err +} diff --git a/vendor/github.com/moby/moby/client/service_list_test.go b/vendor/github.com/moby/moby/client/service_list_test.go new file mode 100644 index 000000000..213981ef7 --- /dev/null +++ b/vendor/github.com/moby/moby/client/service_list_test.go @@ -0,0 +1,94 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +func TestServiceListError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, err := client.ServiceList(context.Background(), types.ServiceListOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestServiceList(t *testing.T) { + expectedURL := "/services" + + filters := filters.NewArgs() + filters.Add("label", "label1") + filters.Add("label", "label2") + + listCases := []struct { + options types.ServiceListOptions + expectedQueryParams map[string]string + }{ + { + options: types.ServiceListOptions{}, + expectedQueryParams: map[string]string{ + "filters": "", + }, + }, + { + options: types.ServiceListOptions{ + Filters: filters, + }, + expectedQueryParams: map[string]string{ + "filters": `{"label":{"label1":true,"label2":true}}`, + }, + }, + } + for _, listCase := range listCases { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + query := req.URL.Query() + for key, expected := range listCase.expectedQueryParams { + actual := query.Get(key) + if actual != expected { + return nil, fmt.Errorf("%s not set in URL query properly. Expected '%s', got %s", key, expected, actual) + } + } + content, err := json.Marshal([]swarm.Service{ + { + ID: "service_id1", + }, + { + ID: "service_id2", + }, + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + + services, err := client.ServiceList(context.Background(), listCase.options) + if err != nil { + t.Fatal(err) + } + if len(services) != 2 { + t.Fatalf("expected 2 services, got %v", services) + } + } +} diff --git a/vendor/github.com/moby/moby/client/service_logs.go b/vendor/github.com/moby/moby/client/service_logs.go new file mode 100644 index 000000000..24384e3ec --- /dev/null +++ b/vendor/github.com/moby/moby/client/service_logs.go @@ -0,0 +1,52 @@ +package client + +import ( + "io" + "net/url" + "time" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + timetypes "github.com/docker/docker/api/types/time" +) + +// ServiceLogs returns the logs generated by a service in an io.ReadCloser. +// It's up to the caller to close the stream. +func (cli *Client) ServiceLogs(ctx context.Context, serviceID string, options types.ContainerLogsOptions) (io.ReadCloser, error) { + query := url.Values{} + if options.ShowStdout { + query.Set("stdout", "1") + } + + if options.ShowStderr { + query.Set("stderr", "1") + } + + if options.Since != "" { + ts, err := timetypes.GetTimestamp(options.Since, time.Now()) + if err != nil { + return nil, err + } + query.Set("since", ts) + } + + if options.Timestamps { + query.Set("timestamps", "1") + } + + if options.Details { + query.Set("details", "1") + } + + if options.Follow { + query.Set("follow", "1") + } + query.Set("tail", options.Tail) + + resp, err := cli.get(ctx, "/services/"+serviceID+"/logs", query, nil) + if err != nil { + return nil, err + } + return resp.body, nil +} diff --git a/vendor/github.com/moby/moby/client/service_logs_test.go b/vendor/github.com/moby/moby/client/service_logs_test.go new file mode 100644 index 000000000..a6d002ba7 --- /dev/null +++ b/vendor/github.com/moby/moby/client/service_logs_test.go @@ -0,0 +1,133 @@ +package client + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "log" + "net/http" + "os" + "strings" + "testing" + "time" + + "github.com/docker/docker/api/types" + + "golang.org/x/net/context" +) + +func TestServiceLogsError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.ServiceLogs(context.Background(), "service_id", types.ContainerLogsOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } + _, err = client.ServiceLogs(context.Background(), "service_id", types.ContainerLogsOptions{ + Since: "2006-01-02TZ", + }) + if err == nil || !strings.Contains(err.Error(), `parsing time "2006-01-02TZ"`) { + t.Fatalf("expected a 'parsing time' error, got %v", err) + } +} + +func TestServiceLogs(t *testing.T) { + expectedURL := "/services/service_id/logs" + cases := []struct { + options types.ContainerLogsOptions + expectedQueryParams map[string]string + }{ + { + expectedQueryParams: map[string]string{ + "tail": "", + }, + }, + { + options: types.ContainerLogsOptions{ + Tail: "any", + }, + expectedQueryParams: map[string]string{ + "tail": "any", + }, + }, + { + options: types.ContainerLogsOptions{ + ShowStdout: true, + ShowStderr: true, + Timestamps: true, + Details: true, + Follow: true, + }, + expectedQueryParams: map[string]string{ + "tail": "", + "stdout": "1", + "stderr": "1", + "timestamps": "1", + "details": "1", + "follow": "1", + }, + }, + { + options: types.ContainerLogsOptions{ + // An complete invalid date, timestamp or go duration will be + // passed as is + Since: "invalid but valid", + }, + expectedQueryParams: map[string]string{ + "tail": "", + "since": "invalid but valid", + }, + }, + } + for _, logCase := range cases { + client := &Client{ + client: newMockClient(func(r *http.Request) (*http.Response, error) { + if !strings.HasPrefix(r.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, r.URL) + } + // Check query parameters + query := r.URL.Query() + for key, expected := range logCase.expectedQueryParams { + actual := query.Get(key) + if actual != expected { + return nil, fmt.Errorf("%s not set in URL query properly. Expected '%s', got %s", key, expected, actual) + } + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte("response"))), + }, nil + }), + } + body, err := client.ServiceLogs(context.Background(), "service_id", logCase.options) + if err != nil { + t.Fatal(err) + } + defer body.Close() + content, err := ioutil.ReadAll(body) + if err != nil { + t.Fatal(err) + } + if string(content) != "response" { + t.Fatalf("expected response to contain 'response', got %s", string(content)) + } + } +} + +func ExampleClient_ServiceLogs_withTimeout() { + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + client, _ := NewEnvClient() + reader, err := client.ServiceLogs(ctx, "service_id", types.ContainerLogsOptions{}) + if err != nil { + log.Fatal(err) + } + + _, err = io.Copy(os.Stdout, reader) + if err != nil && err != io.EOF { + log.Fatal(err) + } +} diff --git a/vendor/github.com/moby/moby/client/service_remove.go b/vendor/github.com/moby/moby/client/service_remove.go new file mode 100644 index 000000000..a9331f92c --- /dev/null +++ b/vendor/github.com/moby/moby/client/service_remove.go @@ -0,0 +1,10 @@ +package client + +import "golang.org/x/net/context" + +// ServiceRemove kills and removes a service. +func (cli *Client) ServiceRemove(ctx context.Context, serviceID string) error { + resp, err := cli.delete(ctx, "/services/"+serviceID, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/moby/moby/client/service_remove_test.go b/vendor/github.com/moby/moby/client/service_remove_test.go new file mode 100644 index 000000000..8e2ac259c --- /dev/null +++ b/vendor/github.com/moby/moby/client/service_remove_test.go @@ -0,0 +1,47 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" +) + +func TestServiceRemoveError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + err := client.ServiceRemove(context.Background(), "service_id") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestServiceRemove(t *testing.T) { + expectedURL := "/services/service_id" + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "DELETE" { + return nil, fmt.Errorf("expected DELETE method, got %s", req.Method) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte("body"))), + }, nil + }), + } + + err := client.ServiceRemove(context.Background(), "service_id") + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/moby/moby/client/service_update.go b/vendor/github.com/moby/moby/client/service_update.go new file mode 100644 index 000000000..8764f299a --- /dev/null +++ b/vendor/github.com/moby/moby/client/service_update.go @@ -0,0 +1,92 @@ +package client + +import ( + "encoding/json" + "net/url" + "strconv" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +// ServiceUpdate updates a Service. +func (cli *Client) ServiceUpdate(ctx context.Context, serviceID string, version swarm.Version, service swarm.ServiceSpec, options types.ServiceUpdateOptions) (types.ServiceUpdateResponse, error) { + var ( + query = url.Values{} + distErr error + ) + + headers := map[string][]string{ + "version": {cli.version}, + } + + if options.EncodedRegistryAuth != "" { + headers["X-Registry-Auth"] = []string{options.EncodedRegistryAuth} + } + + if options.RegistryAuthFrom != "" { + query.Set("registryAuthFrom", options.RegistryAuthFrom) + } + + if options.Rollback != "" { + query.Set("rollback", options.Rollback) + } + + query.Set("version", strconv.FormatUint(version.Index, 10)) + + if err := validateServiceSpec(service); err != nil { + return types.ServiceUpdateResponse{}, err + } + + var imgPlatforms []swarm.Platform + // ensure that the image is tagged + if service.TaskTemplate.ContainerSpec != nil { + if taggedImg := imageWithTagString(service.TaskTemplate.ContainerSpec.Image); taggedImg != "" { + service.TaskTemplate.ContainerSpec.Image = taggedImg + } + if options.QueryRegistry { + var img string + img, imgPlatforms, distErr = imageDigestAndPlatforms(ctx, cli, service.TaskTemplate.ContainerSpec.Image, options.EncodedRegistryAuth) + if img != "" { + service.TaskTemplate.ContainerSpec.Image = img + } + } + } + + // ensure that the image is tagged + if service.TaskTemplate.PluginSpec != nil { + if taggedImg := imageWithTagString(service.TaskTemplate.PluginSpec.Remote); taggedImg != "" { + service.TaskTemplate.PluginSpec.Remote = taggedImg + } + if options.QueryRegistry { + var img string + img, imgPlatforms, distErr = imageDigestAndPlatforms(ctx, cli, service.TaskTemplate.PluginSpec.Remote, options.EncodedRegistryAuth) + if img != "" { + service.TaskTemplate.PluginSpec.Remote = img + } + } + } + + if service.TaskTemplate.Placement == nil && len(imgPlatforms) > 0 { + service.TaskTemplate.Placement = &swarm.Placement{} + } + if len(imgPlatforms) > 0 { + service.TaskTemplate.Placement.Platforms = imgPlatforms + } + + var response types.ServiceUpdateResponse + resp, err := cli.post(ctx, "/services/"+serviceID+"/update", query, service, headers) + if err != nil { + return response, err + } + + err = json.NewDecoder(resp.body).Decode(&response) + + if distErr != nil { + response.Warnings = append(response.Warnings, digestWarning(service.TaskTemplate.ContainerSpec.Image)) + } + + ensureReaderClosed(resp) + return response, err +} diff --git a/vendor/github.com/moby/moby/client/service_update_test.go b/vendor/github.com/moby/moby/client/service_update_test.go new file mode 100644 index 000000000..76bea176b --- /dev/null +++ b/vendor/github.com/moby/moby/client/service_update_test.go @@ -0,0 +1,77 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" +) + +func TestServiceUpdateError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, err := client.ServiceUpdate(context.Background(), "service_id", swarm.Version{}, swarm.ServiceSpec{}, types.ServiceUpdateOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestServiceUpdate(t *testing.T) { + expectedURL := "/services/service_id/update" + + updateCases := []struct { + swarmVersion swarm.Version + expectedVersion string + }{ + { + expectedVersion: "0", + }, + { + swarmVersion: swarm.Version{ + Index: 0, + }, + expectedVersion: "0", + }, + { + swarmVersion: swarm.Version{ + Index: 10, + }, + expectedVersion: "10", + }, + } + + for _, updateCase := range updateCases { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "POST" { + return nil, fmt.Errorf("expected POST method, got %s", req.Method) + } + version := req.URL.Query().Get("version") + if version != updateCase.expectedVersion { + return nil, fmt.Errorf("version not set in URL query properly, expected '%s', got %s", updateCase.expectedVersion, version) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte("{}"))), + }, nil + }), + } + + _, err := client.ServiceUpdate(context.Background(), "service_id", updateCase.swarmVersion, swarm.ServiceSpec{}, types.ServiceUpdateOptions{}) + if err != nil { + t.Fatal(err) + } + } +} diff --git a/vendor/github.com/moby/moby/client/session.go b/vendor/github.com/moby/moby/client/session.go new file mode 100644 index 000000000..8ee916213 --- /dev/null +++ b/vendor/github.com/moby/moby/client/session.go @@ -0,0 +1,19 @@ +package client + +import ( + "net" + "net/http" + + "golang.org/x/net/context" +) + +// DialSession returns a connection that can be used communication with daemon +func (cli *Client) DialSession(ctx context.Context, proto string, meta map[string][]string) (net.Conn, error) { + req, err := http.NewRequest("POST", "/session", nil) + if err != nil { + return nil, err + } + req = cli.addHeaders(req, meta) + + return cli.setupHijackConn(req, proto) +} diff --git a/vendor/github.com/moby/moby/client/session/filesync/diffcopy.go b/vendor/github.com/moby/moby/client/session/filesync/diffcopy.go new file mode 100644 index 000000000..533847acd --- /dev/null +++ b/vendor/github.com/moby/moby/client/session/filesync/diffcopy.go @@ -0,0 +1,31 @@ +package filesync + +import ( + "time" + + "google.golang.org/grpc" + + "github.com/Sirupsen/logrus" + "github.com/tonistiigi/fsutil" +) + +func sendDiffCopy(stream grpc.Stream, dir string, includes, excludes []string, progress progressCb) error { + return fsutil.Send(stream.Context(), stream, dir, &fsutil.WalkOpt{ + ExcludePatterns: excludes, + IncludePaths: includes, // TODO: rename IncludePatterns + }, progress) +} + +func recvDiffCopy(ds grpc.Stream, dest string, cu CacheUpdater) error { + st := time.Now() + defer func() { + logrus.Debugf("diffcopy took: %v", time.Since(st)) + }() + var cf fsutil.ChangeFunc + if cu != nil { + cu.MarkSupported(true) + cf = cu.HandleChange + } + + return fsutil.Receive(ds.Context(), ds, dest, cf) +} diff --git a/vendor/github.com/moby/moby/client/session/filesync/filesync.go b/vendor/github.com/moby/moby/client/session/filesync/filesync.go new file mode 100644 index 000000000..9a2ffc857 --- /dev/null +++ b/vendor/github.com/moby/moby/client/session/filesync/filesync.go @@ -0,0 +1,183 @@ +package filesync + +import ( + "os" + "strings" + + "github.com/docker/docker/client/session" + "github.com/pkg/errors" + "github.com/tonistiigi/fsutil" + "golang.org/x/net/context" + "google.golang.org/grpc" + "google.golang.org/grpc/metadata" +) + +const ( + keyOverrideExcludes = "override-excludes" + keyIncludePatterns = "include-patterns" +) + +type fsSyncProvider struct { + root string + excludes []string + p progressCb + doneCh chan error +} + +// NewFSSyncProvider creates a new provider for sending files from client +func NewFSSyncProvider(root string, excludes []string) session.Attachable { + p := &fsSyncProvider{ + root: root, + excludes: excludes, + } + return p +} + +func (sp *fsSyncProvider) Register(server *grpc.Server) { + RegisterFileSyncServer(server, sp) +} + +func (sp *fsSyncProvider) DiffCopy(stream FileSync_DiffCopyServer) error { + return sp.handle("diffcopy", stream) +} +func (sp *fsSyncProvider) TarStream(stream FileSync_TarStreamServer) error { + return sp.handle("tarstream", stream) +} + +func (sp *fsSyncProvider) handle(method string, stream grpc.ServerStream) error { + var pr *protocol + for _, p := range supportedProtocols { + if method == p.name && isProtoSupported(p.name) { + pr = &p + break + } + } + if pr == nil { + return errors.New("failed to negotiate protocol") + } + + opts, _ := metadata.FromContext(stream.Context()) // if no metadata continue with empty object + + var excludes []string + if len(opts[keyOverrideExcludes]) == 0 || opts[keyOverrideExcludes][0] != "true" { + excludes = sp.excludes + } + includes := opts[keyIncludePatterns] + + var progress progressCb + if sp.p != nil { + progress = sp.p + sp.p = nil + } + + var doneCh chan error + if sp.doneCh != nil { + doneCh = sp.doneCh + sp.doneCh = nil + } + err := pr.sendFn(stream, sp.root, includes, excludes, progress) + if doneCh != nil { + if err != nil { + doneCh <- err + } + close(doneCh) + } + return err +} + +func (sp *fsSyncProvider) SetNextProgressCallback(f func(int, bool), doneCh chan error) { + sp.p = f + sp.doneCh = doneCh +} + +type progressCb func(int, bool) + +type protocol struct { + name string + sendFn func(stream grpc.Stream, srcDir string, includes, excludes []string, progress progressCb) error + recvFn func(stream grpc.Stream, destDir string, cu CacheUpdater) error +} + +func isProtoSupported(p string) bool { + // TODO: this should be removed after testing if stability is confirmed + if override := os.Getenv("BUILD_STREAM_PROTOCOL"); override != "" { + return strings.EqualFold(p, override) + } + return true +} + +var supportedProtocols = []protocol{ + { + name: "diffcopy", + sendFn: sendDiffCopy, + recvFn: recvDiffCopy, + }, + { + name: "tarstream", + sendFn: sendTarStream, + recvFn: recvTarStream, + }, +} + +// FSSendRequestOpt defines options for FSSend request +type FSSendRequestOpt struct { + IncludePatterns []string + OverrideExcludes bool + DestDir string + CacheUpdater CacheUpdater +} + +// CacheUpdater is an object capable of sending notifications for the cache hash changes +type CacheUpdater interface { + MarkSupported(bool) + HandleChange(fsutil.ChangeKind, string, os.FileInfo, error) error +} + +// FSSync initializes a transfer of files +func FSSync(ctx context.Context, c session.Caller, opt FSSendRequestOpt) error { + var pr *protocol + for _, p := range supportedProtocols { + if isProtoSupported(p.name) && c.Supports(session.MethodURL(_FileSync_serviceDesc.ServiceName, p.name)) { + pr = &p + break + } + } + if pr == nil { + return errors.New("no fssync handlers") + } + + opts := make(map[string][]string) + if opt.OverrideExcludes { + opts[keyOverrideExcludes] = []string{"true"} + } + + if opt.IncludePatterns != nil { + opts[keyIncludePatterns] = opt.IncludePatterns + } + + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + client := NewFileSyncClient(c.Conn()) + + var stream grpc.ClientStream + + ctx = metadata.NewContext(ctx, opts) + + switch pr.name { + case "tarstream": + cc, err := client.TarStream(ctx) + if err != nil { + return err + } + stream = cc + case "diffcopy": + cc, err := client.DiffCopy(ctx) + if err != nil { + return err + } + stream = cc + } + + return pr.recvFn(stream, opt.DestDir, opt.CacheUpdater) +} diff --git a/vendor/github.com/moby/moby/client/session/filesync/filesync.pb.go b/vendor/github.com/moby/moby/client/session/filesync/filesync.pb.go new file mode 100644 index 000000000..c6ed66638 --- /dev/null +++ b/vendor/github.com/moby/moby/client/session/filesync/filesync.pb.go @@ -0,0 +1,575 @@ +// Code generated by protoc-gen-gogo. +// source: filesync.proto +// DO NOT EDIT! + +/* +Package filesync is a generated protocol buffer package. + +It is generated from these files: + filesync.proto + +It has these top-level messages: + BytesMessage +*/ +package filesync + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import bytes "bytes" + +import strings "strings" +import reflect "reflect" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +// BytesMessage contains a chunk of byte data +type BytesMessage struct { + Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"` +} + +func (m *BytesMessage) Reset() { *m = BytesMessage{} } +func (*BytesMessage) ProtoMessage() {} +func (*BytesMessage) Descriptor() ([]byte, []int) { return fileDescriptorFilesync, []int{0} } + +func (m *BytesMessage) GetData() []byte { + if m != nil { + return m.Data + } + return nil +} + +func init() { + proto.RegisterType((*BytesMessage)(nil), "moby.filesync.v1.BytesMessage") +} +func (this *BytesMessage) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*BytesMessage) + if !ok { + that2, ok := that.(BytesMessage) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if !bytes.Equal(this.Data, that1.Data) { + return false + } + return true +} +func (this *BytesMessage) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&filesync.BytesMessage{") + s = append(s, "Data: "+fmt.Sprintf("%#v", this.Data)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringFilesync(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for FileSync service + +type FileSyncClient interface { + DiffCopy(ctx context.Context, opts ...grpc.CallOption) (FileSync_DiffCopyClient, error) + TarStream(ctx context.Context, opts ...grpc.CallOption) (FileSync_TarStreamClient, error) +} + +type fileSyncClient struct { + cc *grpc.ClientConn +} + +func NewFileSyncClient(cc *grpc.ClientConn) FileSyncClient { + return &fileSyncClient{cc} +} + +func (c *fileSyncClient) DiffCopy(ctx context.Context, opts ...grpc.CallOption) (FileSync_DiffCopyClient, error) { + stream, err := grpc.NewClientStream(ctx, &_FileSync_serviceDesc.Streams[0], c.cc, "/moby.filesync.v1.FileSync/DiffCopy", opts...) + if err != nil { + return nil, err + } + x := &fileSyncDiffCopyClient{stream} + return x, nil +} + +type FileSync_DiffCopyClient interface { + Send(*BytesMessage) error + Recv() (*BytesMessage, error) + grpc.ClientStream +} + +type fileSyncDiffCopyClient struct { + grpc.ClientStream +} + +func (x *fileSyncDiffCopyClient) Send(m *BytesMessage) error { + return x.ClientStream.SendMsg(m) +} + +func (x *fileSyncDiffCopyClient) Recv() (*BytesMessage, error) { + m := new(BytesMessage) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *fileSyncClient) TarStream(ctx context.Context, opts ...grpc.CallOption) (FileSync_TarStreamClient, error) { + stream, err := grpc.NewClientStream(ctx, &_FileSync_serviceDesc.Streams[1], c.cc, "/moby.filesync.v1.FileSync/TarStream", opts...) + if err != nil { + return nil, err + } + x := &fileSyncTarStreamClient{stream} + return x, nil +} + +type FileSync_TarStreamClient interface { + Send(*BytesMessage) error + Recv() (*BytesMessage, error) + grpc.ClientStream +} + +type fileSyncTarStreamClient struct { + grpc.ClientStream +} + +func (x *fileSyncTarStreamClient) Send(m *BytesMessage) error { + return x.ClientStream.SendMsg(m) +} + +func (x *fileSyncTarStreamClient) Recv() (*BytesMessage, error) { + m := new(BytesMessage) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// Server API for FileSync service + +type FileSyncServer interface { + DiffCopy(FileSync_DiffCopyServer) error + TarStream(FileSync_TarStreamServer) error +} + +func RegisterFileSyncServer(s *grpc.Server, srv FileSyncServer) { + s.RegisterService(&_FileSync_serviceDesc, srv) +} + +func _FileSync_DiffCopy_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(FileSyncServer).DiffCopy(&fileSyncDiffCopyServer{stream}) +} + +type FileSync_DiffCopyServer interface { + Send(*BytesMessage) error + Recv() (*BytesMessage, error) + grpc.ServerStream +} + +type fileSyncDiffCopyServer struct { + grpc.ServerStream +} + +func (x *fileSyncDiffCopyServer) Send(m *BytesMessage) error { + return x.ServerStream.SendMsg(m) +} + +func (x *fileSyncDiffCopyServer) Recv() (*BytesMessage, error) { + m := new(BytesMessage) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func _FileSync_TarStream_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(FileSyncServer).TarStream(&fileSyncTarStreamServer{stream}) +} + +type FileSync_TarStreamServer interface { + Send(*BytesMessage) error + Recv() (*BytesMessage, error) + grpc.ServerStream +} + +type fileSyncTarStreamServer struct { + grpc.ServerStream +} + +func (x *fileSyncTarStreamServer) Send(m *BytesMessage) error { + return x.ServerStream.SendMsg(m) +} + +func (x *fileSyncTarStreamServer) Recv() (*BytesMessage, error) { + m := new(BytesMessage) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +var _FileSync_serviceDesc = grpc.ServiceDesc{ + ServiceName: "moby.filesync.v1.FileSync", + HandlerType: (*FileSyncServer)(nil), + Methods: []grpc.MethodDesc{}, + Streams: []grpc.StreamDesc{ + { + StreamName: "DiffCopy", + Handler: _FileSync_DiffCopy_Handler, + ServerStreams: true, + ClientStreams: true, + }, + { + StreamName: "TarStream", + Handler: _FileSync_TarStream_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "filesync.proto", +} + +func (m *BytesMessage) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BytesMessage) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Data) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintFilesync(dAtA, i, uint64(len(m.Data))) + i += copy(dAtA[i:], m.Data) + } + return i, nil +} + +func encodeFixed64Filesync(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Filesync(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintFilesync(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *BytesMessage) Size() (n int) { + var l int + _ = l + l = len(m.Data) + if l > 0 { + n += 1 + l + sovFilesync(uint64(l)) + } + return n +} + +func sovFilesync(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozFilesync(x uint64) (n int) { + return sovFilesync(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *BytesMessage) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&BytesMessage{`, + `Data:` + fmt.Sprintf("%v", this.Data) + `,`, + `}`, + }, "") + return s +} +func valueToStringFilesync(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *BytesMessage) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowFilesync + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BytesMessage: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BytesMessage: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowFilesync + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthFilesync + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) + if m.Data == nil { + m.Data = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipFilesync(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthFilesync + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipFilesync(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowFilesync + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowFilesync + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowFilesync + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthFilesync + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowFilesync + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipFilesync(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthFilesync = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowFilesync = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("filesync.proto", fileDescriptorFilesync) } + +var fileDescriptorFilesync = []byte{ + // 198 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xe2, 0x4b, 0xcb, 0xcc, 0x49, + 0x2d, 0xae, 0xcc, 0x4b, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, 0xc8, 0xcd, 0x4f, 0xaa, + 0xd4, 0x83, 0x0b, 0x96, 0x19, 0x2a, 0x29, 0x71, 0xf1, 0x38, 0x55, 0x96, 0xa4, 0x16, 0xfb, 0xa6, + 0x16, 0x17, 0x27, 0xa6, 0xa7, 0x0a, 0x09, 0x71, 0xb1, 0xa4, 0x24, 0x96, 0x24, 0x4a, 0x30, 0x2a, + 0x30, 0x6a, 0xf0, 0x04, 0x81, 0xd9, 0x46, 0xab, 0x19, 0xb9, 0x38, 0xdc, 0x32, 0x73, 0x52, 0x83, + 0x2b, 0xf3, 0x92, 0x85, 0xfc, 0xb8, 0x38, 0x5c, 0x32, 0xd3, 0xd2, 0x9c, 0xf3, 0x0b, 0x2a, 0x85, + 0xe4, 0xf4, 0xd0, 0xcd, 0xd3, 0x43, 0x36, 0x4c, 0x8a, 0x80, 0xbc, 0x06, 0xa3, 0x01, 0xa3, 0x90, + 0x3f, 0x17, 0x67, 0x48, 0x62, 0x51, 0x70, 0x49, 0x51, 0x6a, 0x62, 0x2e, 0x35, 0x0c, 0x74, 0x32, + 0xbb, 0xf0, 0x50, 0x8e, 0xe1, 0xc6, 0x43, 0x39, 0x86, 0x0f, 0x0f, 0xe5, 0x18, 0x1b, 0x1e, 0xc9, + 0x31, 0xae, 0x78, 0x24, 0xc7, 0x78, 0xe2, 0x91, 0x1c, 0xe3, 0x85, 0x47, 0x72, 0x8c, 0x0f, 0x1e, + 0xc9, 0x31, 0xbe, 0x78, 0x24, 0xc7, 0xf0, 0xe1, 0x91, 0x1c, 0xe3, 0x84, 0xc7, 0x72, 0x0c, 0x51, + 0x1c, 0x30, 0xb3, 0x92, 0xd8, 0xc0, 0x41, 0x64, 0x0c, 0x08, 0x00, 0x00, 0xff, 0xff, 0x5f, 0x0c, + 0x8d, 0xc5, 0x34, 0x01, 0x00, 0x00, +} diff --git a/vendor/github.com/moby/moby/client/session/filesync/filesync.proto b/vendor/github.com/moby/moby/client/session/filesync/filesync.proto new file mode 100644 index 000000000..2fd5b3ec8 --- /dev/null +++ b/vendor/github.com/moby/moby/client/session/filesync/filesync.proto @@ -0,0 +1,15 @@ +syntax = "proto3"; + +package moby.filesync.v1; + +option go_package = "filesync"; + +service FileSync{ + rpc DiffCopy(stream BytesMessage) returns (stream BytesMessage); + rpc TarStream(stream BytesMessage) returns (stream BytesMessage); +} + +// BytesMessage contains a chunk of byte data +message BytesMessage{ + bytes data = 1; +} \ No newline at end of file diff --git a/vendor/github.com/moby/moby/client/session/filesync/filesync_test.go b/vendor/github.com/moby/moby/client/session/filesync/filesync_test.go new file mode 100644 index 000000000..b48c08b82 --- /dev/null +++ b/vendor/github.com/moby/moby/client/session/filesync/filesync_test.go @@ -0,0 +1,71 @@ +package filesync + +import ( + "context" + "io/ioutil" + "path/filepath" + "testing" + + "github.com/docker/docker/client/session" + "github.com/docker/docker/client/session/testutil" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "golang.org/x/sync/errgroup" +) + +func TestFileSyncIncludePatterns(t *testing.T) { + tmpDir, err := ioutil.TempDir("", "fsynctest") + require.NoError(t, err) + + destDir, err := ioutil.TempDir("", "fsynctest") + require.NoError(t, err) + + err = ioutil.WriteFile(filepath.Join(tmpDir, "foo"), []byte("content1"), 0600) + require.NoError(t, err) + + err = ioutil.WriteFile(filepath.Join(tmpDir, "bar"), []byte("content2"), 0600) + require.NoError(t, err) + + s, err := session.NewSession("foo", "bar") + require.NoError(t, err) + + m, err := session.NewManager() + require.NoError(t, err) + + fs := NewFSSyncProvider(tmpDir, nil) + s.Allow(fs) + + dialer := session.Dialer(testutil.TestStream(testutil.Handler(m.HandleConn))) + + g, ctx := errgroup.WithContext(context.Background()) + + g.Go(func() error { + return s.Run(ctx, dialer) + }) + + g.Go(func() (reterr error) { + c, err := m.Get(ctx, s.UUID()) + if err != nil { + return err + } + if err := FSSync(ctx, c, FSSendRequestOpt{ + DestDir: destDir, + IncludePatterns: []string{"ba*"}, + }); err != nil { + return err + } + + _, err = ioutil.ReadFile(filepath.Join(destDir, "foo")) + assert.Error(t, err) + + dt, err := ioutil.ReadFile(filepath.Join(destDir, "bar")) + if err != nil { + return err + } + assert.Equal(t, "content2", string(dt)) + return s.Close() + }) + + err = g.Wait() + require.NoError(t, err) +} diff --git a/vendor/github.com/moby/moby/client/session/filesync/generate.go b/vendor/github.com/moby/moby/client/session/filesync/generate.go new file mode 100644 index 000000000..261e87627 --- /dev/null +++ b/vendor/github.com/moby/moby/client/session/filesync/generate.go @@ -0,0 +1,3 @@ +package filesync + +//go:generate protoc --gogoslick_out=plugins=grpc:. filesync.proto diff --git a/vendor/github.com/moby/moby/client/session/filesync/tarstream.go b/vendor/github.com/moby/moby/client/session/filesync/tarstream.go new file mode 100644 index 000000000..da139ebf5 --- /dev/null +++ b/vendor/github.com/moby/moby/client/session/filesync/tarstream.go @@ -0,0 +1,83 @@ +package filesync + +import ( + "io" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/chrootarchive" + "github.com/pkg/errors" + "google.golang.org/grpc" +) + +func sendTarStream(stream grpc.Stream, dir string, includes, excludes []string, progress progressCb) error { + a, err := archive.TarWithOptions(dir, &archive.TarOptions{ + ExcludePatterns: excludes, + }) + if err != nil { + return err + } + + size := 0 + buf := make([]byte, 1<<15) + t := new(BytesMessage) + for { + n, err := a.Read(buf) + if err != nil { + if err == io.EOF { + break + } + return err + } + t.Data = buf[:n] + + if err := stream.SendMsg(t); err != nil { + return err + } + size += n + if progress != nil { + progress(size, false) + } + } + if progress != nil { + progress(size, true) + } + return nil +} + +func recvTarStream(ds grpc.Stream, dest string, cs CacheUpdater) error { + + pr, pw := io.Pipe() + + go func() { + var ( + err error + t = new(BytesMessage) + ) + for { + if err = ds.RecvMsg(t); err != nil { + if err == io.EOF { + err = nil + } + break + } + _, err = pw.Write(t.Data) + if err != nil { + break + } + } + if err = pw.CloseWithError(err); err != nil { + logrus.Errorf("failed to close tar transfer pipe") + } + }() + + decompressedStream, err := archive.DecompressStream(pr) + if err != nil { + return errors.Wrap(err, "failed to decompress stream") + } + + if err := chrootarchive.Untar(decompressedStream, dest, nil); err != nil { + return errors.Wrap(err, "failed to untar context") + } + return nil +} diff --git a/vendor/github.com/moby/moby/client/session/grpc.go b/vendor/github.com/moby/moby/client/session/grpc.go new file mode 100644 index 000000000..0f20b1504 --- /dev/null +++ b/vendor/github.com/moby/moby/client/session/grpc.go @@ -0,0 +1,62 @@ +package session + +import ( + "net" + "time" + + "github.com/Sirupsen/logrus" + "github.com/pkg/errors" + "golang.org/x/net/context" + "golang.org/x/net/http2" + "google.golang.org/grpc" + "google.golang.org/grpc/health/grpc_health_v1" +) + +func serve(ctx context.Context, grpcServer *grpc.Server, conn net.Conn) { + go func() { + <-ctx.Done() + conn.Close() + }() + logrus.Debugf("serving grpc connection") + (&http2.Server{}).ServeConn(conn, &http2.ServeConnOpts{Handler: grpcServer}) +} + +func grpcClientConn(ctx context.Context, conn net.Conn) (context.Context, *grpc.ClientConn, error) { + dialOpt := grpc.WithDialer(func(addr string, d time.Duration) (net.Conn, error) { + return conn, nil + }) + + cc, err := grpc.DialContext(ctx, "", dialOpt, grpc.WithInsecure()) + if err != nil { + return nil, nil, errors.Wrap(err, "failed to create grpc client") + } + + ctx, cancel := context.WithCancel(ctx) + go monitorHealth(ctx, cc, cancel) + + return ctx, cc, nil +} + +func monitorHealth(ctx context.Context, cc *grpc.ClientConn, cancelConn func()) { + defer cancelConn() + defer cc.Close() + + ticker := time.NewTicker(500 * time.Millisecond) + defer ticker.Stop() + healthClient := grpc_health_v1.NewHealthClient(cc) + + for { + select { + case <-ctx.Done(): + return + case <-ticker.C: + <-ticker.C + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + _, err := healthClient.Check(ctx, &grpc_health_v1.HealthCheckRequest{}) + cancel() + if err != nil { + return + } + } + } +} diff --git a/vendor/github.com/moby/moby/client/session/manager.go b/vendor/github.com/moby/moby/client/session/manager.go new file mode 100644 index 000000000..9523e6f31 --- /dev/null +++ b/vendor/github.com/moby/moby/client/session/manager.go @@ -0,0 +1,202 @@ +package session + +import ( + "net" + "net/http" + "strings" + "sync" + + "github.com/pkg/errors" + "golang.org/x/net/context" + "google.golang.org/grpc" +) + +// Caller can invoke requests on the session +type Caller interface { + Context() context.Context + Supports(method string) bool + Conn() *grpc.ClientConn + Name() string + SharedKey() string +} + +type client struct { + Session + cc *grpc.ClientConn + supported map[string]struct{} +} + +// Manager is a controller for accessing currently active sessions +type Manager struct { + sessions map[string]*client + mu sync.Mutex + updateCondition *sync.Cond +} + +// NewManager returns a new Manager +func NewManager() (*Manager, error) { + sm := &Manager{ + sessions: make(map[string]*client), + } + sm.updateCondition = sync.NewCond(&sm.mu) + return sm, nil +} + +// HandleHTTPRequest handles an incoming HTTP request +func (sm *Manager) HandleHTTPRequest(ctx context.Context, w http.ResponseWriter, r *http.Request) error { + hijacker, ok := w.(http.Hijacker) + if !ok { + return errors.New("handler does not support hijack") + } + + uuid := r.Header.Get(headerSessionUUID) + + proto := r.Header.Get("Upgrade") + + sm.mu.Lock() + if _, ok := sm.sessions[uuid]; ok { + sm.mu.Unlock() + return errors.Errorf("session %s already exists", uuid) + } + + if proto == "" { + sm.mu.Unlock() + return errors.New("no upgrade proto in request") + } + + if proto != "h2c" { + sm.mu.Unlock() + return errors.Errorf("protocol %s not supported", proto) + } + + conn, _, err := hijacker.Hijack() + if err != nil { + sm.mu.Unlock() + return errors.Wrap(err, "failed to hijack connection") + } + + resp := &http.Response{ + StatusCode: http.StatusSwitchingProtocols, + ProtoMajor: 1, + ProtoMinor: 1, + Header: http.Header{}, + } + resp.Header.Set("Connection", "Upgrade") + resp.Header.Set("Upgrade", proto) + + // set raw mode + conn.Write([]byte{}) + resp.Write(conn) + + return sm.handleConn(ctx, conn, r.Header) +} + +// HandleConn handles an incoming raw connection +func (sm *Manager) HandleConn(ctx context.Context, conn net.Conn, opts map[string][]string) error { + sm.mu.Lock() + return sm.handleConn(ctx, conn, opts) +} + +// caller needs to take lock, this function will release it +func (sm *Manager) handleConn(ctx context.Context, conn net.Conn, opts map[string][]string) error { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + h := http.Header(opts) + uuid := h.Get(headerSessionUUID) + name := h.Get(headerSessionName) + sharedKey := h.Get(headerSessionSharedKey) + + ctx, cc, err := grpcClientConn(ctx, conn) + if err != nil { + sm.mu.Unlock() + return err + } + + c := &client{ + Session: Session{ + uuid: uuid, + name: name, + sharedKey: sharedKey, + ctx: ctx, + cancelCtx: cancel, + done: make(chan struct{}), + }, + cc: cc, + supported: make(map[string]struct{}), + } + + for _, m := range opts[headerSessionMethod] { + c.supported[strings.ToLower(m)] = struct{}{} + } + sm.sessions[uuid] = c + sm.updateCondition.Broadcast() + sm.mu.Unlock() + + defer func() { + sm.mu.Lock() + delete(sm.sessions, uuid) + sm.mu.Unlock() + }() + + <-c.ctx.Done() + conn.Close() + close(c.done) + + return nil +} + +// Get returns a session by UUID +func (sm *Manager) Get(ctx context.Context, uuid string) (Caller, error) { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + go func() { + select { + case <-ctx.Done(): + sm.updateCondition.Broadcast() + } + }() + + var c *client + + sm.mu.Lock() + for { + select { + case <-ctx.Done(): + sm.mu.Unlock() + return nil, errors.Wrapf(ctx.Err(), "no active session for %s", uuid) + default: + } + var ok bool + c, ok = sm.sessions[uuid] + if !ok || c.closed() { + sm.updateCondition.Wait() + continue + } + sm.mu.Unlock() + break + } + + return c, nil +} + +func (c *client) Context() context.Context { + return c.context() +} + +func (c *client) Name() string { + return c.name +} + +func (c *client) SharedKey() string { + return c.sharedKey +} + +func (c *client) Supports(url string) bool { + _, ok := c.supported[strings.ToLower(url)] + return ok +} +func (c *client) Conn() *grpc.ClientConn { + return c.cc +} diff --git a/vendor/github.com/moby/moby/client/session/session.go b/vendor/github.com/moby/moby/client/session/session.go new file mode 100644 index 000000000..147486a75 --- /dev/null +++ b/vendor/github.com/moby/moby/client/session/session.go @@ -0,0 +1,117 @@ +package session + +import ( + "net" + + "github.com/docker/docker/pkg/stringid" + "github.com/pkg/errors" + "golang.org/x/net/context" + "google.golang.org/grpc" + "google.golang.org/grpc/health" + "google.golang.org/grpc/health/grpc_health_v1" +) + +const ( + headerSessionUUID = "X-Docker-Expose-Session-Uuid" + headerSessionName = "X-Docker-Expose-Session-Name" + headerSessionSharedKey = "X-Docker-Expose-Session-Sharedkey" + headerSessionMethod = "X-Docker-Expose-Session-Grpc-Method" +) + +// Dialer returns a connection that can be used by the session +type Dialer func(ctx context.Context, proto string, meta map[string][]string) (net.Conn, error) + +// Attachable defines a feature that can be expsed on a session +type Attachable interface { + Register(*grpc.Server) +} + +// Session is a long running connection between client and a daemon +type Session struct { + uuid string + name string + sharedKey string + ctx context.Context + cancelCtx func() + done chan struct{} + grpcServer *grpc.Server +} + +// NewSession returns a new long running session +func NewSession(name, sharedKey string) (*Session, error) { + uuid := stringid.GenerateRandomID() + s := &Session{ + uuid: uuid, + name: name, + sharedKey: sharedKey, + grpcServer: grpc.NewServer(), + } + + grpc_health_v1.RegisterHealthServer(s.grpcServer, health.NewServer()) + + return s, nil +} + +// Allow enable a given service to be reachable through the grpc session +func (s *Session) Allow(a Attachable) { + a.Register(s.grpcServer) +} + +// UUID returns unique identifier for the session +func (s *Session) UUID() string { + return s.uuid +} + +// Run activates the session +func (s *Session) Run(ctx context.Context, dialer Dialer) error { + ctx, cancel := context.WithCancel(ctx) + s.cancelCtx = cancel + s.done = make(chan struct{}) + + defer cancel() + defer close(s.done) + + meta := make(map[string][]string) + meta[headerSessionUUID] = []string{s.uuid} + meta[headerSessionName] = []string{s.name} + meta[headerSessionSharedKey] = []string{s.sharedKey} + + for name, svc := range s.grpcServer.GetServiceInfo() { + for _, method := range svc.Methods { + meta[headerSessionMethod] = append(meta[headerSessionMethod], MethodURL(name, method.Name)) + } + } + conn, err := dialer(ctx, "h2c", meta) + if err != nil { + return errors.Wrap(err, "failed to dial gRPC") + } + serve(ctx, s.grpcServer, conn) + return nil +} + +// Close closes the session +func (s *Session) Close() error { + if s.cancelCtx != nil && s.done != nil { + s.cancelCtx() + <-s.done + } + return nil +} + +func (s *Session) context() context.Context { + return s.ctx +} + +func (s *Session) closed() bool { + select { + case <-s.context().Done(): + return true + default: + return false + } +} + +// MethodURL returns a gRPC method URL for service and method name +func MethodURL(s, m string) string { + return "/" + s + "/" + m +} diff --git a/vendor/github.com/moby/moby/client/session/testutil/testutil.go b/vendor/github.com/moby/moby/client/session/testutil/testutil.go new file mode 100644 index 000000000..2e145d900 --- /dev/null +++ b/vendor/github.com/moby/moby/client/session/testutil/testutil.go @@ -0,0 +1,70 @@ +package testutil + +import ( + "io" + "net" + "time" + + "github.com/Sirupsen/logrus" + "golang.org/x/net/context" +) + +// Handler is function called to handle incoming connection +type Handler func(ctx context.Context, conn net.Conn, meta map[string][]string) error + +// Dialer is a function for dialing an outgoing connection +type Dialer func(ctx context.Context, proto string, meta map[string][]string) (net.Conn, error) + +// TestStream creates an in memory session dialer for a handler function +func TestStream(handler Handler) Dialer { + s1, s2 := sockPair() + return func(ctx context.Context, proto string, meta map[string][]string) (net.Conn, error) { + go func() { + err := handler(context.TODO(), s1, meta) + if err != nil { + logrus.Error(err) + } + s1.Close() + }() + return s2, nil + } +} + +func sockPair() (*sock, *sock) { + pr1, pw1 := io.Pipe() + pr2, pw2 := io.Pipe() + return &sock{pw1, pr2, pw1}, &sock{pw2, pr1, pw2} +} + +type sock struct { + io.Writer + io.Reader + io.Closer +} + +func (s *sock) LocalAddr() net.Addr { + return dummyAddr{} +} +func (s *sock) RemoteAddr() net.Addr { + return dummyAddr{} +} +func (s *sock) SetDeadline(t time.Time) error { + return nil +} +func (s *sock) SetReadDeadline(t time.Time) error { + return nil +} +func (s *sock) SetWriteDeadline(t time.Time) error { + return nil +} + +type dummyAddr struct { +} + +func (d dummyAddr) Network() string { + return "tcp" +} + +func (d dummyAddr) String() string { + return "localhost" +} diff --git a/vendor/github.com/moby/moby/client/swarm_get_unlock_key.go b/vendor/github.com/moby/moby/client/swarm_get_unlock_key.go new file mode 100644 index 000000000..be28d3262 --- /dev/null +++ b/vendor/github.com/moby/moby/client/swarm_get_unlock_key.go @@ -0,0 +1,21 @@ +package client + +import ( + "encoding/json" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// SwarmGetUnlockKey retrieves the swarm's unlock key. +func (cli *Client) SwarmGetUnlockKey(ctx context.Context) (types.SwarmUnlockKeyResponse, error) { + serverResp, err := cli.get(ctx, "/swarm/unlockkey", nil, nil) + if err != nil { + return types.SwarmUnlockKeyResponse{}, err + } + + var response types.SwarmUnlockKeyResponse + err = json.NewDecoder(serverResp.body).Decode(&response) + ensureReaderClosed(serverResp) + return response, err +} diff --git a/vendor/github.com/moby/moby/client/swarm_get_unlock_key_test.go b/vendor/github.com/moby/moby/client/swarm_get_unlock_key_test.go new file mode 100644 index 000000000..8dd08d95f --- /dev/null +++ b/vendor/github.com/moby/moby/client/swarm_get_unlock_key_test.go @@ -0,0 +1,60 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/pkg/testutil" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "golang.org/x/net/context" +) + +func TestSwarmGetUnlockKeyError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, err := client.SwarmGetUnlockKey(context.Background()) + testutil.ErrorContains(t, err, "Error response from daemon: Server error") +} + +func TestSwarmGetUnlockKey(t *testing.T) { + expectedURL := "/swarm/unlockkey" + unlockKey := "SWMKEY-1-y6guTZNTwpQeTL5RhUfOsdBdXoQjiB2GADHSRJvbXeE" + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "GET" { + return nil, fmt.Errorf("expected GET method, got %s", req.Method) + } + + key := types.SwarmUnlockKeyResponse{ + UnlockKey: unlockKey, + } + + b, err := json.Marshal(key) + if err != nil { + return nil, err + } + + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(b)), + }, nil + }), + } + + resp, err := client.SwarmGetUnlockKey(context.Background()) + require.NoError(t, err) + assert.Equal(t, unlockKey, resp.UnlockKey) +} diff --git a/vendor/github.com/moby/moby/client/swarm_init.go b/vendor/github.com/moby/moby/client/swarm_init.go new file mode 100644 index 000000000..9e65e1cca --- /dev/null +++ b/vendor/github.com/moby/moby/client/swarm_init.go @@ -0,0 +1,21 @@ +package client + +import ( + "encoding/json" + + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +// SwarmInit initializes the swarm. +func (cli *Client) SwarmInit(ctx context.Context, req swarm.InitRequest) (string, error) { + serverResp, err := cli.post(ctx, "/swarm/init", nil, req, nil) + if err != nil { + return "", err + } + + var response string + err = json.NewDecoder(serverResp.body).Decode(&response) + ensureReaderClosed(serverResp) + return response, err +} diff --git a/vendor/github.com/moby/moby/client/swarm_init_test.go b/vendor/github.com/moby/moby/client/swarm_init_test.go new file mode 100644 index 000000000..811155aff --- /dev/null +++ b/vendor/github.com/moby/moby/client/swarm_init_test.go @@ -0,0 +1,54 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types/swarm" +) + +func TestSwarmInitError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, err := client.SwarmInit(context.Background(), swarm.InitRequest{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestSwarmInit(t *testing.T) { + expectedURL := "/swarm/init" + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "POST" { + return nil, fmt.Errorf("expected POST method, got %s", req.Method) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(`"body"`))), + }, nil + }), + } + + resp, err := client.SwarmInit(context.Background(), swarm.InitRequest{ + ListenAddr: "0.0.0.0:2377", + }) + if err != nil { + t.Fatal(err) + } + if resp != "body" { + t.Fatalf("Expected 'body', got %s", resp) + } +} diff --git a/vendor/github.com/moby/moby/client/swarm_inspect.go b/vendor/github.com/moby/moby/client/swarm_inspect.go new file mode 100644 index 000000000..77e72f846 --- /dev/null +++ b/vendor/github.com/moby/moby/client/swarm_inspect.go @@ -0,0 +1,21 @@ +package client + +import ( + "encoding/json" + + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +// SwarmInspect inspects the swarm. +func (cli *Client) SwarmInspect(ctx context.Context) (swarm.Swarm, error) { + serverResp, err := cli.get(ctx, "/swarm", nil, nil) + if err != nil { + return swarm.Swarm{}, err + } + + var response swarm.Swarm + err = json.NewDecoder(serverResp.body).Decode(&response) + ensureReaderClosed(serverResp) + return response, err +} diff --git a/vendor/github.com/moby/moby/client/swarm_inspect_test.go b/vendor/github.com/moby/moby/client/swarm_inspect_test.go new file mode 100644 index 000000000..6432d172b --- /dev/null +++ b/vendor/github.com/moby/moby/client/swarm_inspect_test.go @@ -0,0 +1,56 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +func TestSwarmInspectError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, err := client.SwarmInspect(context.Background()) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestSwarmInspect(t *testing.T) { + expectedURL := "/swarm" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + content, err := json.Marshal(swarm.Swarm{ + ClusterInfo: swarm.ClusterInfo{ + ID: "swarm_id", + }, + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + + swarmInspect, err := client.SwarmInspect(context.Background()) + if err != nil { + t.Fatal(err) + } + if swarmInspect.ID != "swarm_id" { + t.Fatalf("expected `swarm_id`, got %s", swarmInspect.ID) + } +} diff --git a/vendor/github.com/moby/moby/client/swarm_join.go b/vendor/github.com/moby/moby/client/swarm_join.go new file mode 100644 index 000000000..19e5192b9 --- /dev/null +++ b/vendor/github.com/moby/moby/client/swarm_join.go @@ -0,0 +1,13 @@ +package client + +import ( + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +// SwarmJoin joins the swarm. +func (cli *Client) SwarmJoin(ctx context.Context, req swarm.JoinRequest) error { + resp, err := cli.post(ctx, "/swarm/join", nil, req, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/moby/moby/client/swarm_join_test.go b/vendor/github.com/moby/moby/client/swarm_join_test.go new file mode 100644 index 000000000..31ef2a76e --- /dev/null +++ b/vendor/github.com/moby/moby/client/swarm_join_test.go @@ -0,0 +1,51 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types/swarm" +) + +func TestSwarmJoinError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + err := client.SwarmJoin(context.Background(), swarm.JoinRequest{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestSwarmJoin(t *testing.T) { + expectedURL := "/swarm/join" + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "POST" { + return nil, fmt.Errorf("expected POST method, got %s", req.Method) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + + err := client.SwarmJoin(context.Background(), swarm.JoinRequest{ + ListenAddr: "0.0.0.0:2377", + }) + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/moby/moby/client/swarm_leave.go b/vendor/github.com/moby/moby/client/swarm_leave.go new file mode 100644 index 000000000..3a205cf3b --- /dev/null +++ b/vendor/github.com/moby/moby/client/swarm_leave.go @@ -0,0 +1,18 @@ +package client + +import ( + "net/url" + + "golang.org/x/net/context" +) + +// SwarmLeave leaves the swarm. +func (cli *Client) SwarmLeave(ctx context.Context, force bool) error { + query := url.Values{} + if force { + query.Set("force", "1") + } + resp, err := cli.post(ctx, "/swarm/leave", query, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/moby/moby/client/swarm_leave_test.go b/vendor/github.com/moby/moby/client/swarm_leave_test.go new file mode 100644 index 000000000..c96dac812 --- /dev/null +++ b/vendor/github.com/moby/moby/client/swarm_leave_test.go @@ -0,0 +1,66 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" +) + +func TestSwarmLeaveError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + err := client.SwarmLeave(context.Background(), false) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestSwarmLeave(t *testing.T) { + expectedURL := "/swarm/leave" + + leaveCases := []struct { + force bool + expectedForce string + }{ + { + expectedForce: "", + }, + { + force: true, + expectedForce: "1", + }, + } + + for _, leaveCase := range leaveCases { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "POST" { + return nil, fmt.Errorf("expected POST method, got %s", req.Method) + } + force := req.URL.Query().Get("force") + if force != leaveCase.expectedForce { + return nil, fmt.Errorf("force not set in URL query properly. expected '%s', got %s", leaveCase.expectedForce, force) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + + err := client.SwarmLeave(context.Background(), leaveCase.force) + if err != nil { + t.Fatal(err) + } + } +} diff --git a/vendor/github.com/moby/moby/client/swarm_unlock.go b/vendor/github.com/moby/moby/client/swarm_unlock.go new file mode 100644 index 000000000..9ee441fed --- /dev/null +++ b/vendor/github.com/moby/moby/client/swarm_unlock.go @@ -0,0 +1,13 @@ +package client + +import ( + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +// SwarmUnlock unlocks locked swarm. +func (cli *Client) SwarmUnlock(ctx context.Context, req swarm.UnlockRequest) error { + serverResp, err := cli.post(ctx, "/swarm/unlock", nil, req, nil) + ensureReaderClosed(serverResp) + return err +} diff --git a/vendor/github.com/moby/moby/client/swarm_unlock_test.go b/vendor/github.com/moby/moby/client/swarm_unlock_test.go new file mode 100644 index 000000000..c242d4178 --- /dev/null +++ b/vendor/github.com/moby/moby/client/swarm_unlock_test.go @@ -0,0 +1,49 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types/swarm" +) + +func TestSwarmUnlockError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + err := client.SwarmUnlock(context.Background(), swarm.UnlockRequest{"SWMKEY-1-y6guTZNTwpQeTL5RhUfOsdBdXoQjiB2GADHSRJvbXeU"}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestSwarmUnlock(t *testing.T) { + expectedURL := "/swarm/unlock" + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "POST" { + return nil, fmt.Errorf("expected POST method, got %s", req.Method) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + + err := client.SwarmUnlock(context.Background(), swarm.UnlockRequest{"SWMKEY-1-y6guTZNTwpQeTL5RhUfOsdBdXoQjiB2GADHSRJvbXeU"}) + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/moby/moby/client/swarm_update.go b/vendor/github.com/moby/moby/client/swarm_update.go new file mode 100644 index 000000000..7245fd4e3 --- /dev/null +++ b/vendor/github.com/moby/moby/client/swarm_update.go @@ -0,0 +1,22 @@ +package client + +import ( + "fmt" + "net/url" + "strconv" + + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +// SwarmUpdate updates the swarm. +func (cli *Client) SwarmUpdate(ctx context.Context, version swarm.Version, swarm swarm.Spec, flags swarm.UpdateFlags) error { + query := url.Values{} + query.Set("version", strconv.FormatUint(version.Index, 10)) + query.Set("rotateWorkerToken", fmt.Sprintf("%v", flags.RotateWorkerToken)) + query.Set("rotateManagerToken", fmt.Sprintf("%v", flags.RotateManagerToken)) + query.Set("rotateManagerUnlockKey", fmt.Sprintf("%v", flags.RotateManagerUnlockKey)) + resp, err := cli.post(ctx, "/swarm/update", query, swarm, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/moby/moby/client/swarm_update_test.go b/vendor/github.com/moby/moby/client/swarm_update_test.go new file mode 100644 index 000000000..3b23db078 --- /dev/null +++ b/vendor/github.com/moby/moby/client/swarm_update_test.go @@ -0,0 +1,49 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types/swarm" +) + +func TestSwarmUpdateError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + err := client.SwarmUpdate(context.Background(), swarm.Version{}, swarm.Spec{}, swarm.UpdateFlags{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestSwarmUpdate(t *testing.T) { + expectedURL := "/swarm/update" + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "POST" { + return nil, fmt.Errorf("expected POST method, got %s", req.Method) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + + err := client.SwarmUpdate(context.Background(), swarm.Version{}, swarm.Spec{}, swarm.UpdateFlags{}) + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/moby/moby/client/task_inspect.go b/vendor/github.com/moby/moby/client/task_inspect.go new file mode 100644 index 000000000..bc8058fc3 --- /dev/null +++ b/vendor/github.com/moby/moby/client/task_inspect.go @@ -0,0 +1,34 @@ +package client + +import ( + "bytes" + "encoding/json" + "io/ioutil" + "net/http" + + "github.com/docker/docker/api/types/swarm" + + "golang.org/x/net/context" +) + +// TaskInspectWithRaw returns the task information and its raw representation.. +func (cli *Client) TaskInspectWithRaw(ctx context.Context, taskID string) (swarm.Task, []byte, error) { + serverResp, err := cli.get(ctx, "/tasks/"+taskID, nil, nil) + if err != nil { + if serverResp.statusCode == http.StatusNotFound { + return swarm.Task{}, nil, taskNotFoundError{taskID} + } + return swarm.Task{}, nil, err + } + defer ensureReaderClosed(serverResp) + + body, err := ioutil.ReadAll(serverResp.body) + if err != nil { + return swarm.Task{}, nil, err + } + + var response swarm.Task + rdr := bytes.NewReader(body) + err = json.NewDecoder(rdr).Decode(&response) + return response, body, err +} diff --git a/vendor/github.com/moby/moby/client/task_inspect_test.go b/vendor/github.com/moby/moby/client/task_inspect_test.go new file mode 100644 index 000000000..148cdad3a --- /dev/null +++ b/vendor/github.com/moby/moby/client/task_inspect_test.go @@ -0,0 +1,54 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +func TestTaskInspectError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, _, err := client.TaskInspectWithRaw(context.Background(), "nothing") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestTaskInspect(t *testing.T) { + expectedURL := "/tasks/task_id" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + content, err := json.Marshal(swarm.Task{ + ID: "task_id", + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + + taskInspect, _, err := client.TaskInspectWithRaw(context.Background(), "task_id") + if err != nil { + t.Fatal(err) + } + if taskInspect.ID != "task_id" { + t.Fatalf("expected `task_id`, got %s", taskInspect.ID) + } +} diff --git a/vendor/github.com/moby/moby/client/task_list.go b/vendor/github.com/moby/moby/client/task_list.go new file mode 100644 index 000000000..66324da95 --- /dev/null +++ b/vendor/github.com/moby/moby/client/task_list.go @@ -0,0 +1,35 @@ +package client + +import ( + "encoding/json" + "net/url" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +// TaskList returns the list of tasks. +func (cli *Client) TaskList(ctx context.Context, options types.TaskListOptions) ([]swarm.Task, error) { + query := url.Values{} + + if options.Filters.Len() > 0 { + filterJSON, err := filters.ToParam(options.Filters) + if err != nil { + return nil, err + } + + query.Set("filters", filterJSON) + } + + resp, err := cli.get(ctx, "/tasks", query, nil) + if err != nil { + return nil, err + } + + var tasks []swarm.Task + err = json.NewDecoder(resp.body).Decode(&tasks) + ensureReaderClosed(resp) + return tasks, err +} diff --git a/vendor/github.com/moby/moby/client/task_list_test.go b/vendor/github.com/moby/moby/client/task_list_test.go new file mode 100644 index 000000000..2a9a4c434 --- /dev/null +++ b/vendor/github.com/moby/moby/client/task_list_test.go @@ -0,0 +1,94 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +func TestTaskListError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, err := client.TaskList(context.Background(), types.TaskListOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestTaskList(t *testing.T) { + expectedURL := "/tasks" + + filters := filters.NewArgs() + filters.Add("label", "label1") + filters.Add("label", "label2") + + listCases := []struct { + options types.TaskListOptions + expectedQueryParams map[string]string + }{ + { + options: types.TaskListOptions{}, + expectedQueryParams: map[string]string{ + "filters": "", + }, + }, + { + options: types.TaskListOptions{ + Filters: filters, + }, + expectedQueryParams: map[string]string{ + "filters": `{"label":{"label1":true,"label2":true}}`, + }, + }, + } + for _, listCase := range listCases { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + query := req.URL.Query() + for key, expected := range listCase.expectedQueryParams { + actual := query.Get(key) + if actual != expected { + return nil, fmt.Errorf("%s not set in URL query properly. Expected '%s', got %s", key, expected, actual) + } + } + content, err := json.Marshal([]swarm.Task{ + { + ID: "task_id1", + }, + { + ID: "task_id2", + }, + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + + tasks, err := client.TaskList(context.Background(), listCase.options) + if err != nil { + t.Fatal(err) + } + if len(tasks) != 2 { + t.Fatalf("expected 2 tasks, got %v", tasks) + } + } +} diff --git a/vendor/github.com/moby/moby/client/task_logs.go b/vendor/github.com/moby/moby/client/task_logs.go new file mode 100644 index 000000000..2ed19543a --- /dev/null +++ b/vendor/github.com/moby/moby/client/task_logs.go @@ -0,0 +1,52 @@ +package client + +import ( + "io" + "net/url" + "time" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + timetypes "github.com/docker/docker/api/types/time" +) + +// TaskLogs returns the logs generated by a task in an io.ReadCloser. +// It's up to the caller to close the stream. +func (cli *Client) TaskLogs(ctx context.Context, taskID string, options types.ContainerLogsOptions) (io.ReadCloser, error) { + query := url.Values{} + if options.ShowStdout { + query.Set("stdout", "1") + } + + if options.ShowStderr { + query.Set("stderr", "1") + } + + if options.Since != "" { + ts, err := timetypes.GetTimestamp(options.Since, time.Now()) + if err != nil { + return nil, err + } + query.Set("since", ts) + } + + if options.Timestamps { + query.Set("timestamps", "1") + } + + if options.Details { + query.Set("details", "1") + } + + if options.Follow { + query.Set("follow", "1") + } + query.Set("tail", options.Tail) + + resp, err := cli.get(ctx, "/tasks/"+taskID+"/logs", query, nil) + if err != nil { + return nil, err + } + return resp.body, nil +} diff --git a/vendor/github.com/moby/moby/client/transport.go b/vendor/github.com/moby/moby/client/transport.go new file mode 100644 index 000000000..401ab15d3 --- /dev/null +++ b/vendor/github.com/moby/moby/client/transport.go @@ -0,0 +1,25 @@ +package client + +import ( + "crypto/tls" + "net/http" +) + +// transportFunc allows us to inject a mock transport for testing. We define it +// here so we can detect the tlsconfig and return nil for only this type. +type transportFunc func(*http.Request) (*http.Response, error) + +func (tf transportFunc) RoundTrip(req *http.Request) (*http.Response, error) { + return tf(req) +} + +// resolveTLSConfig attempts to resolve the TLS configuration from the +// RoundTripper. +func resolveTLSConfig(transport http.RoundTripper) *tls.Config { + switch tr := transport.(type) { + case *http.Transport: + return tr.TLSClientConfig + default: + return nil + } +} diff --git a/vendor/github.com/moby/moby/client/utils.go b/vendor/github.com/moby/moby/client/utils.go new file mode 100644 index 000000000..f3d8877df --- /dev/null +++ b/vendor/github.com/moby/moby/client/utils.go @@ -0,0 +1,34 @@ +package client + +import ( + "net/url" + "regexp" + + "github.com/docker/docker/api/types/filters" +) + +var headerRegexp = regexp.MustCompile(`\ADocker/.+\s\((.+)\)\z`) + +// getDockerOS returns the operating system based on the server header from the daemon. +func getDockerOS(serverHeader string) string { + var osType string + matches := headerRegexp.FindStringSubmatch(serverHeader) + if len(matches) > 0 { + osType = matches[1] + } + return osType +} + +// getFiltersQuery returns a url query with "filters" query term, based on the +// filters provided. +func getFiltersQuery(f filters.Args) (url.Values, error) { + query := url.Values{} + if f.Len() > 0 { + filterJSON, err := filters.ToParam(f) + if err != nil { + return query, err + } + query.Set("filters", filterJSON) + } + return query, nil +} diff --git a/vendor/github.com/moby/moby/client/version.go b/vendor/github.com/moby/moby/client/version.go new file mode 100644 index 000000000..933ceb4a4 --- /dev/null +++ b/vendor/github.com/moby/moby/client/version.go @@ -0,0 +1,21 @@ +package client + +import ( + "encoding/json" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// ServerVersion returns information of the docker client and server host. +func (cli *Client) ServerVersion(ctx context.Context) (types.Version, error) { + resp, err := cli.get(ctx, "/version", nil, nil) + if err != nil { + return types.Version{}, err + } + + var server types.Version + err = json.NewDecoder(resp.body).Decode(&server) + ensureReaderClosed(resp) + return server, err +} diff --git a/vendor/github.com/moby/moby/client/volume_create.go b/vendor/github.com/moby/moby/client/volume_create.go new file mode 100644 index 000000000..9620c87cb --- /dev/null +++ b/vendor/github.com/moby/moby/client/volume_create.go @@ -0,0 +1,21 @@ +package client + +import ( + "encoding/json" + + "github.com/docker/docker/api/types" + volumetypes "github.com/docker/docker/api/types/volume" + "golang.org/x/net/context" +) + +// VolumeCreate creates a volume in the docker host. +func (cli *Client) VolumeCreate(ctx context.Context, options volumetypes.VolumesCreateBody) (types.Volume, error) { + var volume types.Volume + resp, err := cli.post(ctx, "/volumes/create", nil, options, nil) + if err != nil { + return volume, err + } + err = json.NewDecoder(resp.body).Decode(&volume) + ensureReaderClosed(resp) + return volume, err +} diff --git a/vendor/github.com/moby/moby/client/volume_create_test.go b/vendor/github.com/moby/moby/client/volume_create_test.go new file mode 100644 index 000000000..9f1b2540b --- /dev/null +++ b/vendor/github.com/moby/moby/client/volume_create_test.go @@ -0,0 +1,75 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + volumetypes "github.com/docker/docker/api/types/volume" + "golang.org/x/net/context" +) + +func TestVolumeCreateError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, err := client.VolumeCreate(context.Background(), volumetypes.VolumesCreateBody{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestVolumeCreate(t *testing.T) { + expectedURL := "/volumes/create" + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + + if req.Method != "POST" { + return nil, fmt.Errorf("expected POST method, got %s", req.Method) + } + + content, err := json.Marshal(types.Volume{ + Name: "volume", + Driver: "local", + Mountpoint: "mountpoint", + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + + volume, err := client.VolumeCreate(context.Background(), volumetypes.VolumesCreateBody{ + Name: "myvolume", + Driver: "mydriver", + DriverOpts: map[string]string{ + "opt-key": "opt-value", + }, + }) + if err != nil { + t.Fatal(err) + } + if volume.Name != "volume" { + t.Fatalf("expected volume.Name to be 'volume', got %s", volume.Name) + } + if volume.Driver != "local" { + t.Fatalf("expected volume.Driver to be 'local', got %s", volume.Driver) + } + if volume.Mountpoint != "mountpoint" { + t.Fatalf("expected volume.Mountpoint to be 'mountpoint', got %s", volume.Mountpoint) + } +} diff --git a/vendor/github.com/moby/moby/client/volume_inspect.go b/vendor/github.com/moby/moby/client/volume_inspect.go new file mode 100644 index 000000000..3860e9b22 --- /dev/null +++ b/vendor/github.com/moby/moby/client/volume_inspect.go @@ -0,0 +1,38 @@ +package client + +import ( + "bytes" + "encoding/json" + "io/ioutil" + "net/http" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// VolumeInspect returns the information about a specific volume in the docker host. +func (cli *Client) VolumeInspect(ctx context.Context, volumeID string) (types.Volume, error) { + volume, _, err := cli.VolumeInspectWithRaw(ctx, volumeID) + return volume, err +} + +// VolumeInspectWithRaw returns the information about a specific volume in the docker host and its raw representation +func (cli *Client) VolumeInspectWithRaw(ctx context.Context, volumeID string) (types.Volume, []byte, error) { + var volume types.Volume + resp, err := cli.get(ctx, "/volumes/"+volumeID, nil, nil) + if err != nil { + if resp.statusCode == http.StatusNotFound { + return volume, nil, volumeNotFoundError{volumeID} + } + return volume, nil, err + } + defer ensureReaderClosed(resp) + + body, err := ioutil.ReadAll(resp.body) + if err != nil { + return volume, nil, err + } + rdr := bytes.NewReader(body) + err = json.NewDecoder(rdr).Decode(&volume) + return volume, body, err +} diff --git a/vendor/github.com/moby/moby/client/volume_inspect_test.go b/vendor/github.com/moby/moby/client/volume_inspect_test.go new file mode 100644 index 000000000..0d1d11882 --- /dev/null +++ b/vendor/github.com/moby/moby/client/volume_inspect_test.go @@ -0,0 +1,76 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +func TestVolumeInspectError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, err := client.VolumeInspect(context.Background(), "nothing") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestVolumeInspectNotFound(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusNotFound, "Server error")), + } + + _, err := client.VolumeInspect(context.Background(), "unknown") + if err == nil || !IsErrVolumeNotFound(err) { + t.Fatalf("expected a volumeNotFound error, got %v", err) + } +} + +func TestVolumeInspect(t *testing.T) { + expectedURL := "/volumes/volume_id" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "GET" { + return nil, fmt.Errorf("expected GET method, got %s", req.Method) + } + content, err := json.Marshal(types.Volume{ + Name: "name", + Driver: "driver", + Mountpoint: "mountpoint", + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + + v, err := client.VolumeInspect(context.Background(), "volume_id") + if err != nil { + t.Fatal(err) + } + if v.Name != "name" { + t.Fatalf("expected `name`, got %s", v.Name) + } + if v.Driver != "driver" { + t.Fatalf("expected `driver`, got %s", v.Driver) + } + if v.Mountpoint != "mountpoint" { + t.Fatalf("expected `mountpoint`, got %s", v.Mountpoint) + } +} diff --git a/vendor/github.com/moby/moby/client/volume_list.go b/vendor/github.com/moby/moby/client/volume_list.go new file mode 100644 index 000000000..32247ce11 --- /dev/null +++ b/vendor/github.com/moby/moby/client/volume_list.go @@ -0,0 +1,32 @@ +package client + +import ( + "encoding/json" + "net/url" + + "github.com/docker/docker/api/types/filters" + volumetypes "github.com/docker/docker/api/types/volume" + "golang.org/x/net/context" +) + +// VolumeList returns the volumes configured in the docker host. +func (cli *Client) VolumeList(ctx context.Context, filter filters.Args) (volumetypes.VolumesListOKBody, error) { + var volumes volumetypes.VolumesListOKBody + query := url.Values{} + + if filter.Len() > 0 { + filterJSON, err := filters.ToParamWithVersion(cli.version, filter) + if err != nil { + return volumes, err + } + query.Set("filters", filterJSON) + } + resp, err := cli.get(ctx, "/volumes", query, nil) + if err != nil { + return volumes, err + } + + err = json.NewDecoder(resp.body).Decode(&volumes) + ensureReaderClosed(resp) + return volumes, err +} diff --git a/vendor/github.com/moby/moby/client/volume_list_test.go b/vendor/github.com/moby/moby/client/volume_list_test.go new file mode 100644 index 000000000..f29639be2 --- /dev/null +++ b/vendor/github.com/moby/moby/client/volume_list_test.go @@ -0,0 +1,98 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + volumetypes "github.com/docker/docker/api/types/volume" + "golang.org/x/net/context" +) + +func TestVolumeListError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, err := client.VolumeList(context.Background(), filters.NewArgs()) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestVolumeList(t *testing.T) { + expectedURL := "/volumes" + + noDanglingFilters := filters.NewArgs() + noDanglingFilters.Add("dangling", "false") + + danglingFilters := filters.NewArgs() + danglingFilters.Add("dangling", "true") + + labelFilters := filters.NewArgs() + labelFilters.Add("label", "label1") + labelFilters.Add("label", "label2") + + listCases := []struct { + filters filters.Args + expectedFilters string + }{ + { + filters: filters.NewArgs(), + expectedFilters: "", + }, { + filters: noDanglingFilters, + expectedFilters: `{"dangling":{"false":true}}`, + }, { + filters: danglingFilters, + expectedFilters: `{"dangling":{"true":true}}`, + }, { + filters: labelFilters, + expectedFilters: `{"label":{"label1":true,"label2":true}}`, + }, + } + + for _, listCase := range listCases { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + query := req.URL.Query() + actualFilters := query.Get("filters") + if actualFilters != listCase.expectedFilters { + return nil, fmt.Errorf("filters not set in URL query properly. Expected '%s', got %s", listCase.expectedFilters, actualFilters) + } + content, err := json.Marshal(volumetypes.VolumesListOKBody{ + Volumes: []*types.Volume{ + { + Name: "volume", + Driver: "local", + }, + }, + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + + volumeResponse, err := client.VolumeList(context.Background(), listCase.filters) + if err != nil { + t.Fatal(err) + } + if len(volumeResponse.Volumes) != 1 { + t.Fatalf("expected 1 volume, got %v", volumeResponse.Volumes) + } + } +} diff --git a/vendor/github.com/moby/moby/client/volume_prune.go b/vendor/github.com/moby/moby/client/volume_prune.go new file mode 100644 index 000000000..2e7fea774 --- /dev/null +++ b/vendor/github.com/moby/moby/client/volume_prune.go @@ -0,0 +1,36 @@ +package client + +import ( + "encoding/json" + "fmt" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "golang.org/x/net/context" +) + +// VolumesPrune requests the daemon to delete unused data +func (cli *Client) VolumesPrune(ctx context.Context, pruneFilters filters.Args) (types.VolumesPruneReport, error) { + var report types.VolumesPruneReport + + if err := cli.NewVersionError("1.25", "volume prune"); err != nil { + return report, err + } + + query, err := getFiltersQuery(pruneFilters) + if err != nil { + return report, err + } + + serverResp, err := cli.post(ctx, "/volumes/prune", query, nil, nil) + if err != nil { + return report, err + } + defer ensureReaderClosed(serverResp) + + if err := json.NewDecoder(serverResp.body).Decode(&report); err != nil { + return report, fmt.Errorf("Error retrieving volume prune report: %v", err) + } + + return report, nil +} diff --git a/vendor/github.com/moby/moby/client/volume_remove.go b/vendor/github.com/moby/moby/client/volume_remove.go new file mode 100644 index 000000000..6c26575b4 --- /dev/null +++ b/vendor/github.com/moby/moby/client/volume_remove.go @@ -0,0 +1,21 @@ +package client + +import ( + "net/url" + + "github.com/docker/docker/api/types/versions" + "golang.org/x/net/context" +) + +// VolumeRemove removes a volume from the docker host. +func (cli *Client) VolumeRemove(ctx context.Context, volumeID string, force bool) error { + query := url.Values{} + if versions.GreaterThanOrEqualTo(cli.version, "1.25") { + if force { + query.Set("force", "1") + } + } + resp, err := cli.delete(ctx, "/volumes/"+volumeID, query, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/moby/moby/client/volume_remove_test.go b/vendor/github.com/moby/moby/client/volume_remove_test.go new file mode 100644 index 000000000..1fe657349 --- /dev/null +++ b/vendor/github.com/moby/moby/client/volume_remove_test.go @@ -0,0 +1,47 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" +) + +func TestVolumeRemoveError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + err := client.VolumeRemove(context.Background(), "volume_id", false) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestVolumeRemove(t *testing.T) { + expectedURL := "/volumes/volume_id" + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "DELETE" { + return nil, fmt.Errorf("expected DELETE method, got %s", req.Method) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte("body"))), + }, nil + }), + } + + err := client.VolumeRemove(context.Background(), "volume_id", false) + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/moby/moby/cmd/dockerd/README.md b/vendor/github.com/moby/moby/cmd/dockerd/README.md new file mode 100644 index 000000000..a8c20b354 --- /dev/null +++ b/vendor/github.com/moby/moby/cmd/dockerd/README.md @@ -0,0 +1,3 @@ +docker.go contains Docker daemon's main function. + +This file provides first line CLI argument parsing and environment variable setting. diff --git a/vendor/github.com/moby/moby/cmd/dockerd/config.go b/vendor/github.com/moby/moby/cmd/dockerd/config.go new file mode 100644 index 000000000..11084ec8d --- /dev/null +++ b/vendor/github.com/moby/moby/cmd/dockerd/config.go @@ -0,0 +1,76 @@ +package main + +import ( + "runtime" + + "github.com/docker/docker/daemon/config" + "github.com/docker/docker/opts" + "github.com/spf13/pflag" +) + +const ( + // defaultShutdownTimeout is the default shutdown timeout for the daemon + defaultShutdownTimeout = 15 + // defaultTrustKeyFile is the default filename for the trust key + defaultTrustKeyFile = "key.json" +) + +// installCommonConfigFlags adds flags to the pflag.FlagSet to configure the daemon +func installCommonConfigFlags(conf *config.Config, flags *pflag.FlagSet) { + var maxConcurrentDownloads, maxConcurrentUploads int + + conf.ServiceOptions.InstallCliFlags(flags) + + flags.Var(opts.NewNamedListOptsRef("storage-opts", &conf.GraphOptions, nil), "storage-opt", "Storage driver options") + flags.Var(opts.NewNamedListOptsRef("authorization-plugins", &conf.AuthorizationPlugins, nil), "authorization-plugin", "Authorization plugins to load") + flags.Var(opts.NewNamedListOptsRef("exec-opts", &conf.ExecOptions, nil), "exec-opt", "Runtime execution options") + flags.StringVarP(&conf.Pidfile, "pidfile", "p", defaultPidFile, "Path to use for daemon PID file") + flags.StringVarP(&conf.Root, "graph", "g", defaultDataRoot, "Root of the Docker runtime") + + // "--graph" is "soft-deprecated" in favor of "data-root". This flag was added + // before Docker 1.0, so won't be removed, only hidden, to discourage its usage. + flags.MarkHidden("graph") + + flags.StringVar(&conf.Root, "data-root", defaultDataRoot, "Root directory of persistent Docker state") + + flags.BoolVarP(&conf.AutoRestart, "restart", "r", true, "--restart on the daemon has been deprecated in favor of --restart policies on docker run") + flags.MarkDeprecated("restart", "Please use a restart policy on docker run") + + // Windows doesn't support setting the storage driver - there is no choice as to which ones to use. + if runtime.GOOS != "windows" { + flags.StringVarP(&conf.GraphDriver, "storage-driver", "s", "", "Storage driver to use") + } + + flags.IntVar(&conf.Mtu, "mtu", 0, "Set the containers network MTU") + flags.BoolVar(&conf.RawLogs, "raw-logs", false, "Full timestamps without ANSI coloring") + flags.Var(opts.NewListOptsRef(&conf.DNS, opts.ValidateIPAddress), "dns", "DNS server to use") + flags.Var(opts.NewNamedListOptsRef("dns-opts", &conf.DNSOptions, nil), "dns-opt", "DNS options to use") + flags.Var(opts.NewListOptsRef(&conf.DNSSearch, opts.ValidateDNSSearch), "dns-search", "DNS search domains to use") + flags.Var(opts.NewNamedListOptsRef("labels", &conf.Labels, opts.ValidateLabel), "label", "Set key=value labels to the daemon") + flags.StringVar(&conf.LogConfig.Type, "log-driver", "json-file", "Default driver for container logs") + flags.Var(opts.NewNamedMapOpts("log-opts", conf.LogConfig.Config, nil), "log-opt", "Default log driver options for containers") + flags.StringVar(&conf.ClusterAdvertise, "cluster-advertise", "", "Address or interface name to advertise") + flags.StringVar(&conf.ClusterStore, "cluster-store", "", "URL of the distributed storage backend") + flags.Var(opts.NewNamedMapOpts("cluster-store-opts", conf.ClusterOpts, nil), "cluster-store-opt", "Set cluster store options") + flags.StringVar(&conf.CorsHeaders, "api-cors-header", "", "Set CORS headers in the Engine API") + flags.IntVar(&maxConcurrentDownloads, "max-concurrent-downloads", config.DefaultMaxConcurrentDownloads, "Set the max concurrent downloads for each pull") + flags.IntVar(&maxConcurrentUploads, "max-concurrent-uploads", config.DefaultMaxConcurrentUploads, "Set the max concurrent uploads for each push") + flags.IntVar(&conf.ShutdownTimeout, "shutdown-timeout", defaultShutdownTimeout, "Set the default shutdown timeout") + + flags.StringVar(&conf.SwarmDefaultAdvertiseAddr, "swarm-default-advertise-addr", "", "Set default address or interface for swarm advertised address") + flags.BoolVar(&conf.Experimental, "experimental", false, "Enable experimental features") + + flags.StringVar(&conf.MetricsAddress, "metrics-addr", "", "Set default address and port to serve the metrics api on") + + flags.StringVar(&conf.NodeGenericResources, "node-generic-resources", "", "user defined resources (e.g. fpga=2;gpu={UUID1,UUID2,UUID3})") + + // "--deprecated-key-path" is to allow configuration of the key used + // for the daemon ID and the deprecated image signing. It was never + // exposed as a command line option but is added here to allow + // overriding the default path in configuration. + flags.Var(opts.NewQuotedString(&conf.TrustKeyPath), "deprecated-key-path", "Path to key file for ID and image signing") + flags.MarkHidden("deprecated-key-path") + + conf.MaxConcurrentDownloads = &maxConcurrentDownloads + conf.MaxConcurrentUploads = &maxConcurrentUploads +} diff --git a/vendor/github.com/moby/moby/cmd/dockerd/config_common_unix.go b/vendor/github.com/moby/moby/cmd/dockerd/config_common_unix.go new file mode 100644 index 000000000..b29307b59 --- /dev/null +++ b/vendor/github.com/moby/moby/cmd/dockerd/config_common_unix.go @@ -0,0 +1,34 @@ +// +build solaris linux freebsd + +package main + +import ( + "github.com/docker/docker/api/types" + "github.com/docker/docker/daemon/config" + "github.com/docker/docker/opts" + "github.com/spf13/pflag" +) + +var ( + defaultPidFile = "/var/run/docker.pid" + defaultDataRoot = "/var/lib/docker" + defaultExecRoot = "/var/run/docker" +) + +// installUnixConfigFlags adds command-line options to the top-level flag parser for +// the current process that are common across Unix platforms. +func installUnixConfigFlags(conf *config.Config, flags *pflag.FlagSet) { + conf.Runtimes = make(map[string]types.Runtime) + + flags.StringVarP(&conf.SocketGroup, "group", "G", "docker", "Group for the unix socket") + flags.StringVar(&conf.BridgeConfig.IP, "bip", "", "Specify network bridge IP") + flags.StringVarP(&conf.BridgeConfig.Iface, "bridge", "b", "", "Attach containers to a network bridge") + flags.StringVar(&conf.BridgeConfig.FixedCIDR, "fixed-cidr", "", "IPv4 subnet for fixed IPs") + flags.Var(opts.NewIPOpt(&conf.BridgeConfig.DefaultGatewayIPv4, ""), "default-gateway", "Container default gateway IPv4 address") + flags.Var(opts.NewIPOpt(&conf.BridgeConfig.DefaultGatewayIPv6, ""), "default-gateway-v6", "Container default gateway IPv6 address") + flags.BoolVar(&conf.BridgeConfig.InterContainerCommunication, "icc", true, "Enable inter-container communication") + flags.Var(opts.NewIPOpt(&conf.BridgeConfig.DefaultIP, "0.0.0.0"), "ip", "Default IP when binding container ports") + flags.Var(opts.NewNamedRuntimeOpt("runtimes", &conf.Runtimes, config.StockRuntimeName), "add-runtime", "Register an additional OCI compatible runtime") + flags.StringVar(&conf.DefaultRuntime, "default-runtime", config.StockRuntimeName, "Default OCI runtime for containers") + +} diff --git a/vendor/github.com/moby/moby/cmd/dockerd/config_experimental.go b/vendor/github.com/moby/moby/cmd/dockerd/config_experimental.go new file mode 100644 index 000000000..355a29e85 --- /dev/null +++ b/vendor/github.com/moby/moby/cmd/dockerd/config_experimental.go @@ -0,0 +1,9 @@ +package main + +import ( + "github.com/docker/docker/daemon/config" + "github.com/spf13/pflag" +) + +func attachExperimentalFlags(conf *config.Config, cmd *pflag.FlagSet) { +} diff --git a/vendor/github.com/moby/moby/cmd/dockerd/config_solaris.go b/vendor/github.com/moby/moby/cmd/dockerd/config_solaris.go new file mode 100644 index 000000000..582211c6c --- /dev/null +++ b/vendor/github.com/moby/moby/cmd/dockerd/config_solaris.go @@ -0,0 +1,19 @@ +package main + +import ( + "github.com/docker/docker/daemon/config" + runconfigopts "github.com/docker/docker/runconfig/opts" + units "github.com/docker/go-units" + "github.com/spf13/pflag" +) + +// installConfigFlags adds flags to the pflag.FlagSet to configure the daemon +func installConfigFlags(conf *config.Config, flags *pflag.FlagSet) { + // First handle install flags which are consistent cross-platform + installCommonConfigFlags(conf, flags) + + // Then install flags common to unix platforms + installUnixConfigFlags(conf, flags) + + attachExperimentalFlags(conf, flags) +} diff --git a/vendor/github.com/moby/moby/cmd/dockerd/config_unix.go b/vendor/github.com/moby/moby/cmd/dockerd/config_unix.go new file mode 100644 index 000000000..ba37121e9 --- /dev/null +++ b/vendor/github.com/moby/moby/cmd/dockerd/config_unix.go @@ -0,0 +1,52 @@ +// +build linux,!solaris freebsd,!solaris + +package main + +import ( + "github.com/docker/docker/daemon/config" + "github.com/docker/docker/opts" + units "github.com/docker/go-units" + "github.com/spf13/pflag" +) + +// installConfigFlags adds flags to the pflag.FlagSet to configure the daemon +func installConfigFlags(conf *config.Config, flags *pflag.FlagSet) { + // First handle install flags which are consistent cross-platform + installCommonConfigFlags(conf, flags) + + // Then install flags common to unix platforms + installUnixConfigFlags(conf, flags) + + conf.Ulimits = make(map[string]*units.Ulimit) + + // Set default value for `--default-shm-size` + conf.ShmSize = opts.MemBytes(config.DefaultShmSize) + + // Then platform-specific install flags + flags.BoolVar(&conf.EnableSelinuxSupport, "selinux-enabled", false, "Enable selinux support") + flags.Var(opts.NewNamedUlimitOpt("default-ulimits", &conf.Ulimits), "default-ulimit", "Default ulimits for containers") + flags.BoolVar(&conf.BridgeConfig.EnableIPTables, "iptables", true, "Enable addition of iptables rules") + flags.BoolVar(&conf.BridgeConfig.EnableIPForward, "ip-forward", true, "Enable net.ipv4.ip_forward") + flags.BoolVar(&conf.BridgeConfig.EnableIPMasq, "ip-masq", true, "Enable IP masquerading") + flags.BoolVar(&conf.BridgeConfig.EnableIPv6, "ipv6", false, "Enable IPv6 networking") + flags.StringVar(&conf.ExecRoot, "exec-root", defaultExecRoot, "Root directory for execution state files") + flags.StringVar(&conf.BridgeConfig.FixedCIDRv6, "fixed-cidr-v6", "", "IPv6 subnet for fixed IPs") + flags.BoolVar(&conf.BridgeConfig.EnableUserlandProxy, "userland-proxy", true, "Use userland proxy for loopback traffic") + flags.StringVar(&conf.BridgeConfig.UserlandProxyPath, "userland-proxy-path", "", "Path to the userland proxy binary") + flags.BoolVar(&conf.EnableCors, "api-enable-cors", false, "Enable CORS headers in the Engine API, this is deprecated by --api-cors-header") + flags.MarkDeprecated("api-enable-cors", "Please use --api-cors-header") + flags.StringVar(&conf.CgroupParent, "cgroup-parent", "", "Set parent cgroup for all containers") + flags.StringVar(&conf.RemappedRoot, "userns-remap", "", "User/Group setting for user namespaces") + flags.StringVar(&conf.ContainerdAddr, "containerd", "", "Path to containerd socket") + flags.BoolVar(&conf.LiveRestoreEnabled, "live-restore", false, "Enable live restore of docker when containers are still running") + flags.IntVar(&conf.OOMScoreAdjust, "oom-score-adjust", -500, "Set the oom_score_adj for the daemon") + flags.BoolVar(&conf.Init, "init", false, "Run an init in the container to forward signals and reap processes") + flags.StringVar(&conf.InitPath, "init-path", "", "Path to the docker-init binary") + flags.Int64Var(&conf.CPURealtimePeriod, "cpu-rt-period", 0, "Limit the CPU real-time period in microseconds") + flags.Int64Var(&conf.CPURealtimeRuntime, "cpu-rt-runtime", 0, "Limit the CPU real-time runtime in microseconds") + flags.StringVar(&conf.SeccompProfile, "seccomp-profile", "", "Path to seccomp profile") + flags.Var(&conf.ShmSize, "default-shm-size", "Default shm size for containers") + flags.BoolVar(&conf.NoNewPrivileges, "no-new-privileges", false, "Set no-new-privileges by default for new containers") + + attachExperimentalFlags(conf, flags) +} diff --git a/vendor/github.com/moby/moby/cmd/dockerd/config_unix_test.go b/vendor/github.com/moby/moby/cmd/dockerd/config_unix_test.go new file mode 100644 index 000000000..99b2f90b4 --- /dev/null +++ b/vendor/github.com/moby/moby/cmd/dockerd/config_unix_test.go @@ -0,0 +1,26 @@ +// +build linux,!solaris freebsd,!solaris + +package main + +import ( + "runtime" + "testing" + + "github.com/docker/docker/daemon/config" + "github.com/spf13/pflag" + "github.com/stretchr/testify/assert" +) + +func TestDaemonParseShmSize(t *testing.T) { + if runtime.GOOS == "solaris" { + t.Skip("ShmSize not supported on Solaris\n") + } + flags := pflag.NewFlagSet("test", pflag.ContinueOnError) + + conf := &config.Config{} + installConfigFlags(conf, flags) + // By default `--default-shm-size=64M` + assert.Equal(t, int64(64*1024*1024), conf.ShmSize.Value()) + assert.NoError(t, flags.Set("default-shm-size", "128M")) + assert.Equal(t, int64(128*1024*1024), conf.ShmSize.Value()) +} diff --git a/vendor/github.com/moby/moby/cmd/dockerd/config_windows.go b/vendor/github.com/moby/moby/cmd/dockerd/config_windows.go new file mode 100644 index 000000000..79cdd2504 --- /dev/null +++ b/vendor/github.com/moby/moby/cmd/dockerd/config_windows.go @@ -0,0 +1,25 @@ +package main + +import ( + "os" + "path/filepath" + + "github.com/docker/docker/daemon/config" + "github.com/spf13/pflag" +) + +var ( + defaultPidFile string + defaultDataRoot = filepath.Join(os.Getenv("programdata"), "docker") +) + +// installConfigFlags adds flags to the pflag.FlagSet to configure the daemon +func installConfigFlags(conf *config.Config, flags *pflag.FlagSet) { + // First handle install flags which are consistent cross-platform + installCommonConfigFlags(conf, flags) + + // Then platform-specific install flags. + flags.StringVar(&conf.BridgeConfig.FixedCIDR, "fixed-cidr", "", "IPv4 subnet for fixed IPs") + flags.StringVarP(&conf.BridgeConfig.Iface, "bridge", "b", "", "Attach containers to a virtual switch") + flags.StringVarP(&conf.SocketGroup, "group", "G", "", "Users or groups that can access the named pipe") +} diff --git a/vendor/github.com/moby/moby/cmd/dockerd/daemon.go b/vendor/github.com/moby/moby/cmd/dockerd/daemon.go new file mode 100644 index 000000000..215f9c2d5 --- /dev/null +++ b/vendor/github.com/moby/moby/cmd/dockerd/daemon.go @@ -0,0 +1,570 @@ +package main + +import ( + "context" + "crypto/tls" + "fmt" + "os" + "path/filepath" + "strings" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/uuid" + "github.com/docker/docker/api" + apiserver "github.com/docker/docker/api/server" + buildbackend "github.com/docker/docker/api/server/backend/build" + "github.com/docker/docker/api/server/middleware" + "github.com/docker/docker/api/server/router" + "github.com/docker/docker/api/server/router/build" + checkpointrouter "github.com/docker/docker/api/server/router/checkpoint" + "github.com/docker/docker/api/server/router/container" + distributionrouter "github.com/docker/docker/api/server/router/distribution" + "github.com/docker/docker/api/server/router/image" + "github.com/docker/docker/api/server/router/network" + pluginrouter "github.com/docker/docker/api/server/router/plugin" + sessionrouter "github.com/docker/docker/api/server/router/session" + swarmrouter "github.com/docker/docker/api/server/router/swarm" + systemrouter "github.com/docker/docker/api/server/router/system" + "github.com/docker/docker/api/server/router/volume" + "github.com/docker/docker/builder/dockerfile" + "github.com/docker/docker/builder/fscache" + "github.com/docker/docker/cli/debug" + "github.com/docker/docker/client/session" + "github.com/docker/docker/daemon" + "github.com/docker/docker/daemon/cluster" + "github.com/docker/docker/daemon/config" + "github.com/docker/docker/daemon/logger" + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/libcontainerd" + dopts "github.com/docker/docker/opts" + "github.com/docker/docker/pkg/authorization" + "github.com/docker/docker/pkg/jsonlog" + "github.com/docker/docker/pkg/listeners" + "github.com/docker/docker/pkg/pidfile" + "github.com/docker/docker/pkg/plugingetter" + "github.com/docker/docker/pkg/signal" + "github.com/docker/docker/pkg/system" + "github.com/docker/docker/plugin" + "github.com/docker/docker/registry" + "github.com/docker/docker/runconfig" + "github.com/docker/go-connections/tlsconfig" + swarmapi "github.com/docker/swarmkit/api" + "github.com/pkg/errors" + "github.com/spf13/pflag" +) + +// DaemonCli represents the daemon CLI. +type DaemonCli struct { + *config.Config + configFile *string + flags *pflag.FlagSet + + api *apiserver.Server + d *daemon.Daemon + authzMiddleware *authorization.Middleware // authzMiddleware enables to dynamically reload the authorization plugins +} + +// NewDaemonCli returns a daemon CLI +func NewDaemonCli() *DaemonCli { + return &DaemonCli{} +} + +func (cli *DaemonCli) start(opts *daemonOptions) (err error) { + stopc := make(chan bool) + defer close(stopc) + + // warn from uuid package when running the daemon + uuid.Loggerf = logrus.Warnf + + opts.SetDefaultOptions(opts.flags) + + if cli.Config, err = loadDaemonCliConfig(opts); err != nil { + return err + } + cli.configFile = &opts.configFile + cli.flags = opts.flags + + if cli.Config.Debug { + debug.Enable() + } + + if cli.Config.Experimental { + logrus.Warn("Running experimental build") + } + + logrus.SetFormatter(&logrus.TextFormatter{ + TimestampFormat: jsonlog.RFC3339NanoFixed, + DisableColors: cli.Config.RawLogs, + }) + + if err := setDefaultUmask(); err != nil { + return fmt.Errorf("Failed to set umask: %v", err) + } + + if len(cli.LogConfig.Config) > 0 { + if err := logger.ValidateLogOpts(cli.LogConfig.Type, cli.LogConfig.Config); err != nil { + return fmt.Errorf("Failed to set log opts: %v", err) + } + } + + // Create the daemon root before we create ANY other files (PID, or migrate keys) + // to ensure the appropriate ACL is set (particularly relevant on Windows) + if err := daemon.CreateDaemonRoot(cli.Config); err != nil { + return err + } + + if cli.Pidfile != "" { + pf, err := pidfile.New(cli.Pidfile) + if err != nil { + return fmt.Errorf("Error starting daemon: %v", err) + } + defer func() { + if err := pf.Remove(); err != nil { + logrus.Error(err) + } + }() + } + + // TODO: extract to newApiServerConfig() + serverConfig := &apiserver.Config{ + Logging: true, + SocketGroup: cli.Config.SocketGroup, + Version: dockerversion.Version, + EnableCors: cli.Config.EnableCors, + CorsHeaders: cli.Config.CorsHeaders, + } + + if cli.Config.TLS { + tlsOptions := tlsconfig.Options{ + CAFile: cli.Config.CommonTLSOptions.CAFile, + CertFile: cli.Config.CommonTLSOptions.CertFile, + KeyFile: cli.Config.CommonTLSOptions.KeyFile, + ExclusiveRootPools: true, + } + + if cli.Config.TLSVerify { + // server requires and verifies client's certificate + tlsOptions.ClientAuth = tls.RequireAndVerifyClientCert + } + tlsConfig, err := tlsconfig.Server(tlsOptions) + if err != nil { + return err + } + serverConfig.TLSConfig = tlsConfig + } + + if len(cli.Config.Hosts) == 0 { + cli.Config.Hosts = make([]string, 1) + } + + cli.api = apiserver.New(serverConfig) + + var hosts []string + + for i := 0; i < len(cli.Config.Hosts); i++ { + var err error + if cli.Config.Hosts[i], err = dopts.ParseHost(cli.Config.TLS, cli.Config.Hosts[i]); err != nil { + return fmt.Errorf("error parsing -H %s : %v", cli.Config.Hosts[i], err) + } + + protoAddr := cli.Config.Hosts[i] + protoAddrParts := strings.SplitN(protoAddr, "://", 2) + if len(protoAddrParts) != 2 { + return fmt.Errorf("bad format %s, expected PROTO://ADDR", protoAddr) + } + + proto := protoAddrParts[0] + addr := protoAddrParts[1] + + // It's a bad idea to bind to TCP without tlsverify. + if proto == "tcp" && (serverConfig.TLSConfig == nil || serverConfig.TLSConfig.ClientAuth != tls.RequireAndVerifyClientCert) { + logrus.Warn("[!] DON'T BIND ON ANY IP ADDRESS WITHOUT setting --tlsverify IF YOU DON'T KNOW WHAT YOU'RE DOING [!]") + } + ls, err := listeners.Init(proto, addr, serverConfig.SocketGroup, serverConfig.TLSConfig) + if err != nil { + return err + } + ls = wrapListeners(proto, ls) + // If we're binding to a TCP port, make sure that a container doesn't try to use it. + if proto == "tcp" { + if err := allocateDaemonPort(addr); err != nil { + return err + } + } + logrus.Debugf("Listener created for HTTP on %s (%s)", proto, addr) + hosts = append(hosts, protoAddrParts[1]) + cli.api.Accept(addr, ls...) + } + + registryService := registry.NewService(cli.Config.ServiceOptions) + containerdRemote, err := libcontainerd.New(cli.getLibcontainerdRoot(), cli.getPlatformRemoteOptions()...) + if err != nil { + return err + } + signal.Trap(func() { + cli.stop() + <-stopc // wait for daemonCli.start() to return + }) + + // Notify that the API is active, but before daemon is set up. + preNotifySystem() + + pluginStore := plugin.NewStore() + + if err := cli.initMiddlewares(cli.api, serverConfig, pluginStore); err != nil { + logrus.Fatalf("Error creating middlewares: %v", err) + } + + if system.LCOWSupported() { + logrus.Warnln("LCOW support is enabled - this feature is incomplete") + } + + d, err := daemon.NewDaemon(cli.Config, registryService, containerdRemote, pluginStore) + if err != nil { + return fmt.Errorf("Error starting daemon: %v", err) + } + + d.StoreHosts(hosts) + + // validate after NewDaemon has restored enabled plugins. Dont change order. + if err := validateAuthzPlugins(cli.Config.AuthorizationPlugins, pluginStore); err != nil { + return fmt.Errorf("Error validating authorization plugin: %v", err) + } + + // TODO: move into startMetricsServer() + if cli.Config.MetricsAddress != "" { + if !d.HasExperimental() { + return fmt.Errorf("metrics-addr is only supported when experimental is enabled") + } + if err := startMetricsServer(cli.Config.MetricsAddress); err != nil { + return err + } + } + + // TODO: createAndStartCluster() + name, _ := os.Hostname() + + // Use a buffered channel to pass changes from store watch API to daemon + // A buffer allows store watch API and daemon processing to not wait for each other + watchStream := make(chan *swarmapi.WatchMessage, 32) + + c, err := cluster.New(cluster.Config{ + Root: cli.Config.Root, + Name: name, + Backend: d, + PluginBackend: d.PluginManager(), + NetworkSubnetsProvider: d, + DefaultAdvertiseAddr: cli.Config.SwarmDefaultAdvertiseAddr, + RuntimeRoot: cli.getSwarmRunRoot(), + WatchStream: watchStream, + }) + if err != nil { + logrus.Fatalf("Error creating cluster component: %v", err) + } + d.SetCluster(c) + err = c.Start() + if err != nil { + logrus.Fatalf("Error starting cluster component: %v", err) + } + + // Restart all autostart containers which has a swarm endpoint + // and is not yet running now that we have successfully + // initialized the cluster. + d.RestartSwarmContainers() + + logrus.Info("Daemon has completed initialization") + + cli.d = d + + routerOptions, err := newRouterOptions(cli.Config, d) + if err != nil { + return err + } + routerOptions.api = cli.api + routerOptions.cluster = c + + initRouter(routerOptions) + + // process cluster change notifications + watchCtx, cancel := context.WithCancel(context.Background()) + defer cancel() + go d.ProcessClusterNotifications(watchCtx, watchStream) + + cli.setupConfigReloadTrap() + + // The serve API routine never exits unless an error occurs + // We need to start it as a goroutine and wait on it so + // daemon doesn't exit + serveAPIWait := make(chan error) + go cli.api.Wait(serveAPIWait) + + // after the daemon is done setting up we can notify systemd api + notifySystem() + + // Daemon is fully initialized and handling API traffic + // Wait for serve API to complete + errAPI := <-serveAPIWait + c.Cleanup() + shutdownDaemon(d) + containerdRemote.Cleanup() + if errAPI != nil { + return fmt.Errorf("Shutting down due to ServeAPI error: %v", errAPI) + } + + return nil +} + +type routerOptions struct { + sessionManager *session.Manager + buildBackend *buildbackend.Backend + buildCache *fscache.FSCache + daemon *daemon.Daemon + api *apiserver.Server + cluster *cluster.Cluster +} + +func newRouterOptions(config *config.Config, daemon *daemon.Daemon) (routerOptions, error) { + opts := routerOptions{} + sm, err := session.NewManager() + if err != nil { + return opts, errors.Wrap(err, "failed to create sessionmanager") + } + + builderStateDir := filepath.Join(config.Root, "builder") + + buildCache, err := fscache.NewFSCache(fscache.Opt{ + Backend: fscache.NewNaiveCacheBackend(builderStateDir), + Root: builderStateDir, + GCPolicy: fscache.GCPolicy{ // TODO: expose this in config + MaxSize: 1024 * 1024 * 512, // 512MB + MaxKeepDuration: 7 * 24 * time.Hour, // 1 week + }, + }) + if err != nil { + return opts, errors.Wrap(err, "failed to create fscache") + } + + manager, err := dockerfile.NewBuildManager(daemon, sm, buildCache, daemon.IDMappings()) + if err != nil { + return opts, err + } + + bb, err := buildbackend.NewBackend(daemon, manager, buildCache) + if err != nil { + return opts, errors.Wrap(err, "failed to create buildmanager") + } + + return routerOptions{ + sessionManager: sm, + buildBackend: bb, + buildCache: buildCache, + daemon: daemon, + }, nil +} + +func (cli *DaemonCli) reloadConfig() { + reload := func(config *config.Config) { + + // Revalidate and reload the authorization plugins + if err := validateAuthzPlugins(config.AuthorizationPlugins, cli.d.PluginStore); err != nil { + logrus.Fatalf("Error validating authorization plugin: %v", err) + return + } + cli.authzMiddleware.SetPlugins(config.AuthorizationPlugins) + + if err := cli.d.Reload(config); err != nil { + logrus.Errorf("Error reconfiguring the daemon: %v", err) + return + } + + if config.IsValueSet("debug") { + debugEnabled := debug.IsEnabled() + switch { + case debugEnabled && !config.Debug: // disable debug + debug.Disable() + case config.Debug && !debugEnabled: // enable debug + debug.Enable() + } + + } + } + + if err := config.Reload(*cli.configFile, cli.flags, reload); err != nil { + logrus.Error(err) + } +} + +func (cli *DaemonCli) stop() { + cli.api.Close() +} + +// shutdownDaemon just wraps daemon.Shutdown() to handle a timeout in case +// d.Shutdown() is waiting too long to kill container or worst it's +// blocked there +func shutdownDaemon(d *daemon.Daemon) { + shutdownTimeout := d.ShutdownTimeout() + ch := make(chan struct{}) + go func() { + d.Shutdown() + close(ch) + }() + if shutdownTimeout < 0 { + <-ch + logrus.Debug("Clean shutdown succeeded") + return + } + select { + case <-ch: + logrus.Debug("Clean shutdown succeeded") + case <-time.After(time.Duration(shutdownTimeout) * time.Second): + logrus.Error("Force shutdown daemon") + } +} + +func loadDaemonCliConfig(opts *daemonOptions) (*config.Config, error) { + conf := opts.daemonConfig + flags := opts.flags + conf.Debug = opts.Debug + conf.Hosts = opts.Hosts + conf.LogLevel = opts.LogLevel + conf.TLS = opts.TLS + conf.TLSVerify = opts.TLSVerify + conf.CommonTLSOptions = config.CommonTLSOptions{} + + if opts.TLSOptions != nil { + conf.CommonTLSOptions.CAFile = opts.TLSOptions.CAFile + conf.CommonTLSOptions.CertFile = opts.TLSOptions.CertFile + conf.CommonTLSOptions.KeyFile = opts.TLSOptions.KeyFile + } + + if conf.TrustKeyPath == "" { + conf.TrustKeyPath = filepath.Join( + getDaemonConfDir(conf.Root), + defaultTrustKeyFile) + } + + if flags.Changed("graph") && flags.Changed("data-root") { + return nil, fmt.Errorf(`cannot specify both "--graph" and "--data-root" option`) + } + + if opts.configFile != "" { + c, err := config.MergeDaemonConfigurations(conf, flags, opts.configFile) + if err != nil { + if flags.Changed("config-file") || !os.IsNotExist(err) { + return nil, fmt.Errorf("unable to configure the Docker daemon with file %s: %v\n", opts.configFile, err) + } + } + // the merged configuration can be nil if the config file didn't exist. + // leave the current configuration as it is if when that happens. + if c != nil { + conf = c + } + } + + if err := config.Validate(conf); err != nil { + return nil, err + } + + if conf.V2Only == false { + logrus.Warnf(`The "disable-legacy-registry" option is deprecated and wil be removed in Docker v17.12. Interacting with legacy (v1) registries will no longer be supported in Docker v17.12"`) + } + + if flags.Changed("graph") { + logrus.Warnf(`The "-g / --graph" flag is deprecated. Please use "--data-root" instead`) + } + + // Labels of the docker engine used to allow multiple values associated with the same key. + // This is deprecated in 1.13, and, be removed after 3 release cycles. + // The following will check the conflict of labels, and report a warning for deprecation. + // + // TODO: After 3 release cycles (17.12) an error will be returned, and labels will be + // sanitized to consolidate duplicate key-value pairs (config.Labels = newLabels): + // + // newLabels, err := daemon.GetConflictFreeLabels(config.Labels) + // if err != nil { + // return nil, err + // } + // config.Labels = newLabels + // + if _, err := config.GetConflictFreeLabels(conf.Labels); err != nil { + logrus.Warnf("Engine labels with duplicate keys and conflicting values have been deprecated: %s", err) + } + + // Regardless of whether the user sets it to true or false, if they + // specify TLSVerify at all then we need to turn on TLS + if conf.IsValueSet(FlagTLSVerify) { + conf.TLS = true + } + + // ensure that the log level is the one set after merging configurations + setLogLevel(conf.LogLevel) + + return conf, nil +} + +func initRouter(opts routerOptions) { + decoder := runconfig.ContainerDecoder{} + + routers := []router.Router{ + // we need to add the checkpoint router before the container router or the DELETE gets masked + checkpointrouter.NewRouter(opts.daemon, decoder), + container.NewRouter(opts.daemon, decoder), + image.NewRouter(opts.daemon, decoder), + systemrouter.NewRouter(opts.daemon, opts.cluster, opts.buildCache), + volume.NewRouter(opts.daemon), + build.NewRouter(opts.buildBackend, opts.daemon), + sessionrouter.NewRouter(opts.sessionManager), + swarmrouter.NewRouter(opts.cluster), + pluginrouter.NewRouter(opts.daemon.PluginManager()), + distributionrouter.NewRouter(opts.daemon), + } + + if opts.daemon.NetworkControllerEnabled() { + routers = append(routers, network.NewRouter(opts.daemon, opts.cluster)) + } + + if opts.daemon.HasExperimental() { + for _, r := range routers { + for _, route := range r.Routes() { + if experimental, ok := route.(router.ExperimentalRoute); ok { + experimental.Enable() + } + } + } + } + + opts.api.InitRouter(routers...) +} + +// TODO: remove this from cli and return the authzMiddleware +func (cli *DaemonCli) initMiddlewares(s *apiserver.Server, cfg *apiserver.Config, pluginStore *plugin.Store) error { + v := cfg.Version + + exp := middleware.NewExperimentalMiddleware(cli.Config.Experimental) + s.UseMiddleware(exp) + + vm := middleware.NewVersionMiddleware(v, api.DefaultVersion, api.MinVersion) + s.UseMiddleware(vm) + + if cfg.EnableCors || cfg.CorsHeaders != "" { + c := middleware.NewCORSMiddleware(cfg.CorsHeaders) + s.UseMiddleware(c) + } + + cli.authzMiddleware = authorization.NewMiddleware(cli.Config.AuthorizationPlugins, pluginStore) + cli.Config.AuthzMiddleware = cli.authzMiddleware + s.UseMiddleware(cli.authzMiddleware) + return nil +} + +// validates that the plugins requested with the --authorization-plugin flag are valid AuthzDriver +// plugins present on the host and available to the daemon +func validateAuthzPlugins(requestedPlugins []string, pg plugingetter.PluginGetter) error { + for _, reqPlugin := range requestedPlugins { + if _, err := pg.Get(reqPlugin, authorization.AuthZApiImplements, plugingetter.Lookup); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/moby/moby/cmd/dockerd/daemon_freebsd.go b/vendor/github.com/moby/moby/cmd/dockerd/daemon_freebsd.go new file mode 100644 index 000000000..6d013b810 --- /dev/null +++ b/vendor/github.com/moby/moby/cmd/dockerd/daemon_freebsd.go @@ -0,0 +1,9 @@ +package main + +// preNotifySystem sends a message to the host when the API is active, but before the daemon is +func preNotifySystem() { +} + +// notifySystem sends a message to the host when the server is ready to be used +func notifySystem() { +} diff --git a/vendor/github.com/moby/moby/cmd/dockerd/daemon_linux.go b/vendor/github.com/moby/moby/cmd/dockerd/daemon_linux.go new file mode 100644 index 000000000..a909ee4fb --- /dev/null +++ b/vendor/github.com/moby/moby/cmd/dockerd/daemon_linux.go @@ -0,0 +1,15 @@ +// +build linux + +package main + +import systemdDaemon "github.com/coreos/go-systemd/daemon" + +// preNotifySystem sends a message to the host when the API is active, but before the daemon is +func preNotifySystem() { +} + +// notifySystem sends a message to the host when the server is ready to be used +func notifySystem() { + // Tell the init daemon we are accepting requests + go systemdDaemon.SdNotify("READY=1") +} diff --git a/vendor/github.com/moby/moby/cmd/dockerd/daemon_solaris.go b/vendor/github.com/moby/moby/cmd/dockerd/daemon_solaris.go new file mode 100644 index 000000000..dab4d4aaa --- /dev/null +++ b/vendor/github.com/moby/moby/cmd/dockerd/daemon_solaris.go @@ -0,0 +1,89 @@ +// +build solaris + +package main + +import ( + "fmt" + "net" + "os" + "path/filepath" + + "github.com/docker/docker/libcontainerd" + "github.com/docker/docker/pkg/system" + "golang.org/x/sys/unix" +) + +const defaultDaemonConfigFile = "" + +// currentUserIsOwner checks whether the current user is the owner of the given +// file. +func currentUserIsOwner(f string) bool { + if fileInfo, err := system.Stat(f); err == nil && fileInfo != nil { + if int(fileInfo.UID()) == os.Getuid() { + return true + } + } + return false +} + +// setDefaultUmask sets the umask to 0022 to avoid problems +// caused by custom umask +func setDefaultUmask() error { + desiredUmask := 0022 + unix.Umask(desiredUmask) + if umask := unix.Umask(desiredUmask); umask != desiredUmask { + return fmt.Errorf("failed to set umask: expected %#o, got %#o", desiredUmask, umask) + } + + return nil +} + +func getDaemonConfDir(_ string) string { + return "/etc/docker" +} + +// setupConfigReloadTrap configures the USR2 signal to reload the configuration. +func (cli *DaemonCli) setupConfigReloadTrap() { +} + +// preNotifySystem sends a message to the host when the API is active, but before the daemon is +func preNotifySystem() { +} + +// notifySystem sends a message to the host when the server is ready to be used +func notifySystem() { +} + +func (cli *DaemonCli) getPlatformRemoteOptions() []libcontainerd.RemoteOption { + opts := []libcontainerd.RemoteOption{} + if cli.Config.ContainerdAddr != "" { + opts = append(opts, libcontainerd.WithRemoteAddr(cli.Config.ContainerdAddr)) + } else { + opts = append(opts, libcontainerd.WithStartDaemon(true)) + } + return opts +} + +// getLibcontainerdRoot gets the root directory for libcontainerd/containerd to +// store their state. +func (cli *DaemonCli) getLibcontainerdRoot() string { + return filepath.Join(cli.Config.ExecRoot, "libcontainerd") +} + +// getSwarmRunRoot gets the root directory for swarm to store runtime state +// For example, the control socket +func (cli *DaemonCli) getSwarmRunRoot() string { + return filepath.Join(cli.Config.ExecRoot, "swarm") +} + +func allocateDaemonPort(addr string) error { + return nil +} + +// notifyShutdown is called after the daemon shuts down but before the process exits. +func notifyShutdown(err error) { +} + +func wrapListeners(proto string, ls []net.Listener) []net.Listener { + return ls +} diff --git a/vendor/github.com/moby/moby/cmd/dockerd/daemon_test.go b/vendor/github.com/moby/moby/cmd/dockerd/daemon_test.go new file mode 100644 index 000000000..7ae91fe1a --- /dev/null +++ b/vendor/github.com/moby/moby/cmd/dockerd/daemon_test.go @@ -0,0 +1,145 @@ +package main + +import ( + "testing" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/daemon/config" + "github.com/docker/docker/pkg/testutil" + "github.com/docker/docker/pkg/testutil/tempfile" + "github.com/spf13/pflag" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func defaultOptions(configFile string) *daemonOptions { + opts := newDaemonOptions(&config.Config{}) + opts.flags = &pflag.FlagSet{} + opts.InstallFlags(opts.flags) + installConfigFlags(opts.daemonConfig, opts.flags) + opts.flags.StringVar(&opts.configFile, "config-file", defaultDaemonConfigFile, "") + opts.configFile = configFile + return opts +} + +func TestLoadDaemonCliConfigWithoutOverriding(t *testing.T) { + opts := defaultOptions("") + opts.Debug = true + + loadedConfig, err := loadDaemonCliConfig(opts) + require.NoError(t, err) + require.NotNil(t, loadedConfig) + if !loadedConfig.Debug { + t.Fatalf("expected debug to be copied from the common flags, got false") + } +} + +func TestLoadDaemonCliConfigWithTLS(t *testing.T) { + opts := defaultOptions("") + opts.TLSOptions.CAFile = "/tmp/ca.pem" + opts.TLS = true + + loadedConfig, err := loadDaemonCliConfig(opts) + require.NoError(t, err) + require.NotNil(t, loadedConfig) + assert.Equal(t, "/tmp/ca.pem", loadedConfig.CommonTLSOptions.CAFile) +} + +func TestLoadDaemonCliConfigWithConflicts(t *testing.T) { + tempFile := tempfile.NewTempFile(t, "config", `{"labels": ["l3=foo"]}`) + defer tempFile.Remove() + configFile := tempFile.Name() + + opts := defaultOptions(configFile) + flags := opts.flags + + assert.NoError(t, flags.Set("config-file", configFile)) + assert.NoError(t, flags.Set("label", "l1=bar")) + assert.NoError(t, flags.Set("label", "l2=baz")) + + _, err := loadDaemonCliConfig(opts) + testutil.ErrorContains(t, err, "as a flag and in the configuration file: labels") +} + +func TestLoadDaemonCliConfigWithTLSVerify(t *testing.T) { + tempFile := tempfile.NewTempFile(t, "config", `{"tlsverify": true}`) + defer tempFile.Remove() + + opts := defaultOptions(tempFile.Name()) + opts.TLSOptions.CAFile = "/tmp/ca.pem" + + loadedConfig, err := loadDaemonCliConfig(opts) + require.NoError(t, err) + require.NotNil(t, loadedConfig) + assert.Equal(t, loadedConfig.TLS, true) +} + +func TestLoadDaemonCliConfigWithExplicitTLSVerifyFalse(t *testing.T) { + tempFile := tempfile.NewTempFile(t, "config", `{"tlsverify": false}`) + defer tempFile.Remove() + + opts := defaultOptions(tempFile.Name()) + opts.TLSOptions.CAFile = "/tmp/ca.pem" + + loadedConfig, err := loadDaemonCliConfig(opts) + require.NoError(t, err) + require.NotNil(t, loadedConfig) + assert.True(t, loadedConfig.TLS) +} + +func TestLoadDaemonCliConfigWithoutTLSVerify(t *testing.T) { + tempFile := tempfile.NewTempFile(t, "config", `{}`) + defer tempFile.Remove() + + opts := defaultOptions(tempFile.Name()) + opts.TLSOptions.CAFile = "/tmp/ca.pem" + + loadedConfig, err := loadDaemonCliConfig(opts) + require.NoError(t, err) + require.NotNil(t, loadedConfig) + assert.False(t, loadedConfig.TLS) +} + +func TestLoadDaemonCliConfigWithLogLevel(t *testing.T) { + tempFile := tempfile.NewTempFile(t, "config", `{"log-level": "warn"}`) + defer tempFile.Remove() + + opts := defaultOptions(tempFile.Name()) + loadedConfig, err := loadDaemonCliConfig(opts) + require.NoError(t, err) + require.NotNil(t, loadedConfig) + assert.Equal(t, "warn", loadedConfig.LogLevel) + assert.Equal(t, logrus.WarnLevel, logrus.GetLevel()) +} + +func TestLoadDaemonConfigWithEmbeddedOptions(t *testing.T) { + content := `{"tlscacert": "/etc/certs/ca.pem", "log-driver": "syslog"}` + tempFile := tempfile.NewTempFile(t, "config", content) + defer tempFile.Remove() + + opts := defaultOptions(tempFile.Name()) + loadedConfig, err := loadDaemonCliConfig(opts) + require.NoError(t, err) + require.NotNil(t, loadedConfig) + assert.Equal(t, "/etc/certs/ca.pem", loadedConfig.CommonTLSOptions.CAFile) + assert.Equal(t, "syslog", loadedConfig.LogConfig.Type) +} + +func TestLoadDaemonConfigWithRegistryOptions(t *testing.T) { + content := `{ + "allow-nondistributable-artifacts": ["allow-nondistributable-artifacts.com"], + "registry-mirrors": ["https://mirrors.docker.com"], + "insecure-registries": ["https://insecure.docker.com"] + }` + tempFile := tempfile.NewTempFile(t, "config", content) + defer tempFile.Remove() + + opts := defaultOptions(tempFile.Name()) + loadedConfig, err := loadDaemonCliConfig(opts) + require.NoError(t, err) + require.NotNil(t, loadedConfig) + + assert.Len(t, loadedConfig.AllowNondistributableArtifacts, 1) + assert.Len(t, loadedConfig.Mirrors, 1) + assert.Len(t, loadedConfig.InsecureRegistries, 1) +} diff --git a/vendor/github.com/moby/moby/cmd/dockerd/daemon_unix.go b/vendor/github.com/moby/moby/cmd/dockerd/daemon_unix.go new file mode 100644 index 000000000..7909d98da --- /dev/null +++ b/vendor/github.com/moby/moby/cmd/dockerd/daemon_unix.go @@ -0,0 +1,125 @@ +// +build !windows,!solaris + +package main + +import ( + "fmt" + "net" + "os" + "os/signal" + "path/filepath" + "strconv" + + "github.com/docker/docker/cmd/dockerd/hack" + "github.com/docker/docker/daemon" + "github.com/docker/docker/libcontainerd" + "github.com/docker/libnetwork/portallocator" + "golang.org/x/sys/unix" +) + +const defaultDaemonConfigFile = "/etc/docker/daemon.json" + +// setDefaultUmask sets the umask to 0022 to avoid problems +// caused by custom umask +func setDefaultUmask() error { + desiredUmask := 0022 + unix.Umask(desiredUmask) + if umask := unix.Umask(desiredUmask); umask != desiredUmask { + return fmt.Errorf("failed to set umask: expected %#o, got %#o", desiredUmask, umask) + } + + return nil +} + +func getDaemonConfDir(_ string) string { + return "/etc/docker" +} + +// setupConfigReloadTrap configures the USR2 signal to reload the configuration. +func (cli *DaemonCli) setupConfigReloadTrap() { + c := make(chan os.Signal, 1) + signal.Notify(c, unix.SIGHUP) + go func() { + for range c { + cli.reloadConfig() + } + }() +} + +func (cli *DaemonCli) getPlatformRemoteOptions() []libcontainerd.RemoteOption { + opts := []libcontainerd.RemoteOption{ + libcontainerd.WithDebugLog(cli.Config.Debug), + libcontainerd.WithOOMScore(cli.Config.OOMScoreAdjust), + } + if cli.Config.ContainerdAddr != "" { + opts = append(opts, libcontainerd.WithRemoteAddr(cli.Config.ContainerdAddr)) + } else { + opts = append(opts, libcontainerd.WithStartDaemon(true)) + } + if daemon.UsingSystemd(cli.Config) { + args := []string{"--systemd-cgroup=true"} + opts = append(opts, libcontainerd.WithRuntimeArgs(args)) + } + if cli.Config.LiveRestoreEnabled { + opts = append(opts, libcontainerd.WithLiveRestore(true)) + } + opts = append(opts, libcontainerd.WithRuntimePath(daemon.DefaultRuntimeBinary)) + return opts +} + +// getLibcontainerdRoot gets the root directory for libcontainerd/containerd to +// store their state. +func (cli *DaemonCli) getLibcontainerdRoot() string { + return filepath.Join(cli.Config.ExecRoot, "libcontainerd") +} + +// getSwarmRunRoot gets the root directory for swarm to store runtime state +// For example, the control socket +func (cli *DaemonCli) getSwarmRunRoot() string { + return filepath.Join(cli.Config.ExecRoot, "swarm") +} + +// allocateDaemonPort ensures that there are no containers +// that try to use any port allocated for the docker server. +func allocateDaemonPort(addr string) error { + host, port, err := net.SplitHostPort(addr) + if err != nil { + return err + } + + intPort, err := strconv.Atoi(port) + if err != nil { + return err + } + + var hostIPs []net.IP + if parsedIP := net.ParseIP(host); parsedIP != nil { + hostIPs = append(hostIPs, parsedIP) + } else if hostIPs, err = net.LookupIP(host); err != nil { + return fmt.Errorf("failed to lookup %s address in host specification", host) + } + + pa := portallocator.Get() + for _, hostIP := range hostIPs { + if _, err := pa.RequestPort(hostIP, "tcp", intPort); err != nil { + return fmt.Errorf("failed to allocate daemon listening port %d (err: %v)", intPort, err) + } + } + return nil +} + +// notifyShutdown is called after the daemon shuts down but before the process exits. +func notifyShutdown(err error) { +} + +func wrapListeners(proto string, ls []net.Listener) []net.Listener { + switch proto { + case "unix": + ls[0] = &hack.MalformedHostHeaderOverride{ls[0]} + case "fd": + for i := range ls { + ls[i] = &hack.MalformedHostHeaderOverride{ls[i]} + } + } + return ls +} diff --git a/vendor/github.com/moby/moby/cmd/dockerd/daemon_unix_test.go b/vendor/github.com/moby/moby/cmd/dockerd/daemon_unix_test.go new file mode 100644 index 000000000..ebe73362f --- /dev/null +++ b/vendor/github.com/moby/moby/cmd/dockerd/daemon_unix_test.go @@ -0,0 +1,114 @@ +// +build !windows,!solaris + +// TODO: Create new file for Solaris which tests config parameters +// as described in daemon/config_solaris.go + +package main + +import ( + "testing" + + "github.com/docker/docker/daemon/config" + "github.com/docker/docker/pkg/testutil/tempfile" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestLoadDaemonCliConfigWithDaemonFlags(t *testing.T) { + content := `{"log-opts": {"max-size": "1k"}}` + tempFile := tempfile.NewTempFile(t, "config", content) + defer tempFile.Remove() + + opts := defaultOptions(tempFile.Name()) + opts.Debug = true + opts.LogLevel = "info" + assert.NoError(t, opts.flags.Set("selinux-enabled", "true")) + + loadedConfig, err := loadDaemonCliConfig(opts) + require.NoError(t, err) + require.NotNil(t, loadedConfig) + + assert.True(t, loadedConfig.Debug) + assert.Equal(t, "info", loadedConfig.LogLevel) + assert.True(t, loadedConfig.EnableSelinuxSupport) + assert.Equal(t, "json-file", loadedConfig.LogConfig.Type) + assert.Equal(t, "1k", loadedConfig.LogConfig.Config["max-size"]) +} + +func TestLoadDaemonConfigWithNetwork(t *testing.T) { + content := `{"bip": "127.0.0.2", "ip": "127.0.0.1"}` + tempFile := tempfile.NewTempFile(t, "config", content) + defer tempFile.Remove() + + opts := defaultOptions(tempFile.Name()) + loadedConfig, err := loadDaemonCliConfig(opts) + require.NoError(t, err) + require.NotNil(t, loadedConfig) + + assert.Equal(t, "127.0.0.2", loadedConfig.IP) + assert.Equal(t, "127.0.0.1", loadedConfig.DefaultIP.String()) +} + +func TestLoadDaemonConfigWithMapOptions(t *testing.T) { + content := `{ + "cluster-store-opts": {"kv.cacertfile": "/var/lib/docker/discovery_certs/ca.pem"}, + "log-opts": {"tag": "test"} +}` + tempFile := tempfile.NewTempFile(t, "config", content) + defer tempFile.Remove() + + opts := defaultOptions(tempFile.Name()) + loadedConfig, err := loadDaemonCliConfig(opts) + require.NoError(t, err) + require.NotNil(t, loadedConfig) + assert.NotNil(t, loadedConfig.ClusterOpts) + + expectedPath := "/var/lib/docker/discovery_certs/ca.pem" + assert.Equal(t, expectedPath, loadedConfig.ClusterOpts["kv.cacertfile"]) + assert.NotNil(t, loadedConfig.LogConfig.Config) + assert.Equal(t, "test", loadedConfig.LogConfig.Config["tag"]) +} + +func TestLoadDaemonConfigWithTrueDefaultValues(t *testing.T) { + content := `{ "userland-proxy": false }` + tempFile := tempfile.NewTempFile(t, "config", content) + defer tempFile.Remove() + + opts := defaultOptions(tempFile.Name()) + loadedConfig, err := loadDaemonCliConfig(opts) + require.NoError(t, err) + require.NotNil(t, loadedConfig) + + assert.False(t, loadedConfig.EnableUserlandProxy) + + // make sure reloading doesn't generate configuration + // conflicts after normalizing boolean values. + reload := func(reloadedConfig *config.Config) { + assert.False(t, reloadedConfig.EnableUserlandProxy) + } + assert.NoError(t, config.Reload(opts.configFile, opts.flags, reload)) +} + +func TestLoadDaemonConfigWithTrueDefaultValuesLeaveDefaults(t *testing.T) { + tempFile := tempfile.NewTempFile(t, "config", `{}`) + defer tempFile.Remove() + + opts := defaultOptions(tempFile.Name()) + loadedConfig, err := loadDaemonCliConfig(opts) + require.NoError(t, err) + require.NotNil(t, loadedConfig) + + assert.True(t, loadedConfig.EnableUserlandProxy) +} + +func TestLoadDaemonConfigWithLegacyRegistryOptions(t *testing.T) { + content := `{"disable-legacy-registry": false}` + tempFile := tempfile.NewTempFile(t, "config", content) + defer tempFile.Remove() + + opts := defaultOptions(tempFile.Name()) + loadedConfig, err := loadDaemonCliConfig(opts) + require.NoError(t, err) + require.NotNil(t, loadedConfig) + assert.False(t, loadedConfig.V2Only) +} diff --git a/vendor/github.com/moby/moby/cmd/dockerd/daemon_windows.go b/vendor/github.com/moby/moby/cmd/dockerd/daemon_windows.go new file mode 100644 index 000000000..2e9598d4c --- /dev/null +++ b/vendor/github.com/moby/moby/cmd/dockerd/daemon_windows.go @@ -0,0 +1,98 @@ +package main + +import ( + "fmt" + "net" + "os" + "path/filepath" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/libcontainerd" + "github.com/docker/docker/pkg/system" + "golang.org/x/sys/windows" +) + +var defaultDaemonConfigFile = "" + +// currentUserIsOwner checks whether the current user is the owner of the given +// file. +func currentUserIsOwner(f string) bool { + return false +} + +// setDefaultUmask doesn't do anything on windows +func setDefaultUmask() error { + return nil +} + +func getDaemonConfDir(root string) string { + return filepath.Join(root, `\config`) +} + +// preNotifySystem sends a message to the host when the API is active, but before the daemon is +func preNotifySystem() { + // start the service now to prevent timeouts waiting for daemon to start + // but still (eventually) complete all requests that are sent after this + if service != nil { + err := service.started() + if err != nil { + logrus.Fatal(err) + } + } +} + +// notifySystem sends a message to the host when the server is ready to be used +func notifySystem() { +} + +// notifyShutdown is called after the daemon shuts down but before the process exits. +func notifyShutdown(err error) { + if service != nil { + if err != nil { + logrus.Fatal(err) + } + service.stopped(err) + } +} + +// setupConfigReloadTrap configures a Win32 event to reload the configuration. +func (cli *DaemonCli) setupConfigReloadTrap() { + go func() { + sa := windows.SecurityAttributes{ + Length: 0, + } + ev := "Global\\docker-daemon-config-" + fmt.Sprint(os.Getpid()) + if h, _ := system.CreateEvent(&sa, false, false, ev); h != 0 { + logrus.Debugf("Config reload - waiting signal at %s", ev) + for { + windows.WaitForSingleObject(h, windows.INFINITE) + cli.reloadConfig() + } + } + }() +} + +func (cli *DaemonCli) getPlatformRemoteOptions() []libcontainerd.RemoteOption { + return nil +} + +// getLibcontainerdRoot gets the root directory for libcontainerd to store its +// state. The Windows libcontainerd implementation does not need to write a spec +// or state to disk, so this is a no-op. +func (cli *DaemonCli) getLibcontainerdRoot() string { + return "" +} + +// getSwarmRunRoot gets the root directory for swarm to store runtime state +// For example, the control socket +func (cli *DaemonCli) getSwarmRunRoot() string { + return "" +} + +func allocateDaemonPort(addr string) error { + return nil +} + +func wrapListeners(proto string, ls []net.Listener) []net.Listener { + return ls +} diff --git a/vendor/github.com/moby/moby/cmd/dockerd/docker.go b/vendor/github.com/moby/moby/cmd/dockerd/docker.go new file mode 100644 index 000000000..8a5c8f543 --- /dev/null +++ b/vendor/github.com/moby/moby/cmd/dockerd/docker.go @@ -0,0 +1,109 @@ +package main + +import ( + "fmt" + "os" + "path/filepath" + "runtime" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/cli" + "github.com/docker/docker/daemon/config" + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/pkg/reexec" + "github.com/docker/docker/pkg/term" + "github.com/spf13/cobra" +) + +func newDaemonCommand() *cobra.Command { + opts := newDaemonOptions(config.New()) + + cmd := &cobra.Command{ + Use: "dockerd [OPTIONS]", + Short: "A self-sufficient runtime for containers.", + SilenceUsage: true, + SilenceErrors: true, + Args: cli.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + opts.flags = cmd.Flags() + return runDaemon(opts) + }, + } + cli.SetupRootCommand(cmd) + + flags := cmd.Flags() + flags.BoolVarP(&opts.version, "version", "v", false, "Print version information and quit") + flags.StringVar(&opts.configFile, "config-file", defaultDaemonConfigFile, "Daemon configuration file") + opts.InstallFlags(flags) + installConfigFlags(opts.daemonConfig, flags) + installServiceFlags(flags) + + return cmd +} + +func runDaemon(opts *daemonOptions) error { + if opts.version { + showVersion() + return nil + } + + daemonCli := NewDaemonCli() + + // Windows specific settings as these are not defaulted. + if runtime.GOOS == "windows" { + if opts.daemonConfig.Pidfile == "" { + opts.daemonConfig.Pidfile = filepath.Join(opts.daemonConfig.Root, "docker.pid") + } + if opts.configFile == "" { + opts.configFile = filepath.Join(opts.daemonConfig.Root, `config\daemon.json`) + } + } + + // On Windows, this may be launching as a service or with an option to + // register the service. + stop, runAsService, err := initService(daemonCli) + if err != nil { + logrus.Fatal(err) + } + + if stop { + return nil + } + + // If Windows SCM manages the service - no need for PID files + if runAsService { + opts.daemonConfig.Pidfile = "" + } + + err = daemonCli.start(opts) + notifyShutdown(err) + return err +} + +func showVersion() { + fmt.Printf("Docker version %s, build %s\n", dockerversion.Version, dockerversion.GitCommit) +} + +func main() { + if reexec.Init() { + return + } + + // Set terminal emulation based on platform as required. + _, stdout, stderr := term.StdStreams() + + // @jhowardmsft - maybe there is a historic reason why on non-Windows, stderr is used + // here. However, on Windows it makes no sense and there is no need. + if runtime.GOOS == "windows" { + logrus.SetOutput(stdout) + } else { + logrus.SetOutput(stderr) + } + + cmd := newDaemonCommand() + cmd.SetOutput(stdout) + if err := cmd.Execute(); err != nil { + fmt.Fprintf(stderr, "%s\n", err) + os.Exit(1) + } +} diff --git a/vendor/github.com/moby/moby/cmd/dockerd/docker_windows.go b/vendor/github.com/moby/moby/cmd/dockerd/docker_windows.go new file mode 100644 index 000000000..19c5587cb --- /dev/null +++ b/vendor/github.com/moby/moby/cmd/dockerd/docker_windows.go @@ -0,0 +1,18 @@ +package main + +import ( + "sync/atomic" + + _ "github.com/docker/docker/autogen/winresources/dockerd" +) + +//go:cgo_import_dynamic main.dummy CommandLineToArgvW%2 "shell32.dll" + +var dummy uintptr + +func init() { + // Ensure that this import is not removed by the linker. This is used to + // ensure that shell32.dll is loaded by the system loader, preventing + // go#15286 from triggering on Nano Server TP5. + atomic.LoadUintptr(&dummy) +} diff --git a/vendor/github.com/moby/moby/cmd/dockerd/hack/malformed_host_override.go b/vendor/github.com/moby/moby/cmd/dockerd/hack/malformed_host_override.go new file mode 100644 index 000000000..e7175caa0 --- /dev/null +++ b/vendor/github.com/moby/moby/cmd/dockerd/hack/malformed_host_override.go @@ -0,0 +1,121 @@ +// +build !windows + +package hack + +import "net" + +// MalformedHostHeaderOverride is a wrapper to be able +// to overcome the 400 Bad request coming from old docker +// clients that send an invalid Host header. +type MalformedHostHeaderOverride struct { + net.Listener +} + +// MalformedHostHeaderOverrideConn wraps the underlying unix +// connection and keeps track of the first read from http.Server +// which just reads the headers. +type MalformedHostHeaderOverrideConn struct { + net.Conn + first bool +} + +var closeConnHeader = []byte("\r\nConnection: close\r") + +// Read reads the first *read* request from http.Server to inspect +// the Host header. If the Host starts with / then we're talking to +// an old docker client which send an invalid Host header. To not +// error out in http.Server we rewrite the first bytes of the request +// to sanitize the Host header itself. +// In case we're not dealing with old docker clients the data is just passed +// to the server w/o modification. +func (l *MalformedHostHeaderOverrideConn) Read(b []byte) (n int, err error) { + // http.Server uses a 4k buffer + if l.first && len(b) == 4096 { + // This keeps track of the first read from http.Server which just reads + // the headers + l.first = false + // The first read of the connection by http.Server is done limited to + // DefaultMaxHeaderBytes (usually 1 << 20) + 4096. + // Here we do the first read which gets us all the http headers to + // be inspected and modified below. + c, err := l.Conn.Read(b) + if err != nil { + return c, err + } + + var ( + start, end int + firstLineFeed = -1 + buf []byte + ) + for i := 0; i <= c-1-7; i++ { + if b[i] == '\n' && firstLineFeed == -1 { + firstLineFeed = i + } + if b[i] != '\n' { + continue + } + + if b[i+1] == '\r' && b[i+2] == '\n' { + return c, nil + } + + if b[i+1] != 'H' { + continue + } + if b[i+2] != 'o' { + continue + } + if b[i+3] != 's' { + continue + } + if b[i+4] != 't' { + continue + } + if b[i+5] != ':' { + continue + } + if b[i+6] != ' ' { + continue + } + if b[i+7] != '/' { + continue + } + // ensure clients other than the docker clients do not get this hack + if i != firstLineFeed { + return c, nil + } + start = i + 7 + // now find where the value ends + for ii, bbb := range b[start:c] { + if bbb == '\n' { + end = start + ii + break + } + } + buf = make([]byte, 0, c+len(closeConnHeader)-(end-start)) + // strip the value of the host header and + // inject `Connection: close` to ensure we don't reuse this connection + buf = append(buf, b[:start]...) + buf = append(buf, closeConnHeader...) + buf = append(buf, b[end:c]...) + copy(b, buf) + break + } + if len(buf) == 0 { + return c, nil + } + return len(buf), nil + } + return l.Conn.Read(b) +} + +// Accept makes the listener accepts connections and wraps the connection +// in a MalformedHostHeaderOverrideConn initializing first to true. +func (l *MalformedHostHeaderOverride) Accept() (net.Conn, error) { + c, err := l.Listener.Accept() + if err != nil { + return c, err + } + return &MalformedHostHeaderOverrideConn{c, true}, nil +} diff --git a/vendor/github.com/moby/moby/cmd/dockerd/hack/malformed_host_override_test.go b/vendor/github.com/moby/moby/cmd/dockerd/hack/malformed_host_override_test.go new file mode 100644 index 000000000..1a0a60baf --- /dev/null +++ b/vendor/github.com/moby/moby/cmd/dockerd/hack/malformed_host_override_test.go @@ -0,0 +1,124 @@ +// +build !windows + +package hack + +import ( + "bytes" + "io" + "net" + "strings" + "testing" +) + +type bufConn struct { + net.Conn + buf *bytes.Buffer +} + +func (bc *bufConn) Read(b []byte) (int, error) { + return bc.buf.Read(b) +} + +func TestHeaderOverrideHack(t *testing.T) { + tests := [][2][]byte{ + { + []byte("GET /foo\nHost: /var/run/docker.sock\nUser-Agent: Docker\r\n\r\n"), + []byte("GET /foo\nHost: \r\nConnection: close\r\nUser-Agent: Docker\r\n\r\n"), + }, + { + []byte("GET /foo\nHost: /var/run/docker.sock\nUser-Agent: Docker\nFoo: Bar\r\n"), + []byte("GET /foo\nHost: \r\nConnection: close\r\nUser-Agent: Docker\nFoo: Bar\r\n"), + }, + { + []byte("GET /foo\nHost: /var/run/docker.sock\nUser-Agent: Docker\r\n\r\ntest something!"), + []byte("GET /foo\nHost: \r\nConnection: close\r\nUser-Agent: Docker\r\n\r\ntest something!"), + }, + { + []byte("GET /foo\nHost: /var/run/docker.sock\nUser-Agent: Docker\r\n\r\ntest something! " + strings.Repeat("test", 15000)), + []byte("GET /foo\nHost: \r\nConnection: close\r\nUser-Agent: Docker\r\n\r\ntest something! " + strings.Repeat("test", 15000)), + }, + { + []byte("GET /foo\nFoo: Bar\nHost: /var/run/docker.sock\nUser-Agent: Docker\r\n\r\n"), + []byte("GET /foo\nFoo: Bar\nHost: /var/run/docker.sock\nUser-Agent: Docker\r\n\r\n"), + }, + } + + // Test for https://github.com/docker/docker/issues/23045 + h0 := "GET /foo\nUser-Agent: Docker\r\n\r\n" + h0 = h0 + strings.Repeat("a", 4096-len(h0)-1) + "\n" + tests = append(tests, [2][]byte{[]byte(h0), []byte(h0)}) + + for _, pair := range tests { + read := make([]byte, 4096) + client := &bufConn{ + buf: bytes.NewBuffer(pair[0]), + } + l := MalformedHostHeaderOverrideConn{client, true} + + n, err := l.Read(read) + if err != nil && err != io.EOF { + t.Fatalf("read: %d - %d, err: %v\n%s", n, len(pair[0]), err, string(read[:n])) + } + if !bytes.Equal(read[:n], pair[1][:n]) { + t.Fatalf("\n%s\n%s\n", read[:n], pair[1][:n]) + } + } +} + +func BenchmarkWithHack(b *testing.B) { + client, srv := net.Pipe() + done := make(chan struct{}) + req := []byte("GET /foo\nHost: /var/run/docker.sock\nUser-Agent: Docker\n") + read := make([]byte, 4096) + b.SetBytes(int64(len(req) * 30)) + + l := MalformedHostHeaderOverrideConn{client, true} + go func() { + for { + if _, err := srv.Write(req); err != nil { + srv.Close() + break + } + l.first = true // make sure each subsequent run uses the hack parsing + } + close(done) + }() + + for i := 0; i < b.N; i++ { + for i := 0; i < 30; i++ { + if n, err := l.Read(read); err != nil && err != io.EOF { + b.Fatalf("read: %d - %d, err: %v\n%s", n, len(req), err, string(read[:n])) + } + } + } + l.Close() + <-done +} + +func BenchmarkNoHack(b *testing.B) { + client, srv := net.Pipe() + done := make(chan struct{}) + req := []byte("GET /foo\nHost: /var/run/docker.sock\nUser-Agent: Docker\n") + read := make([]byte, 4096) + b.SetBytes(int64(len(req) * 30)) + + go func() { + for { + if _, err := srv.Write(req); err != nil { + srv.Close() + break + } + } + close(done) + }() + + for i := 0; i < b.N; i++ { + for i := 0; i < 30; i++ { + if _, err := client.Read(read); err != nil && err != io.EOF { + b.Fatal(err) + } + } + } + client.Close() + <-done +} diff --git a/vendor/github.com/moby/moby/cmd/dockerd/metrics.go b/vendor/github.com/moby/moby/cmd/dockerd/metrics.go new file mode 100644 index 000000000..0c8860408 --- /dev/null +++ b/vendor/github.com/moby/moby/cmd/dockerd/metrics.go @@ -0,0 +1,27 @@ +package main + +import ( + "net" + "net/http" + + "github.com/Sirupsen/logrus" + metrics "github.com/docker/go-metrics" +) + +func startMetricsServer(addr string) error { + if err := allocateDaemonPort(addr); err != nil { + return err + } + l, err := net.Listen("tcp", addr) + if err != nil { + return err + } + mux := http.NewServeMux() + mux.Handle("/metrics", metrics.Handler()) + go func() { + if err := http.Serve(l, mux); err != nil { + logrus.Errorf("serve metrics api: %s", err) + } + }() + return nil +} diff --git a/vendor/github.com/moby/moby/cmd/dockerd/options.go b/vendor/github.com/moby/moby/cmd/dockerd/options.go new file mode 100644 index 000000000..629b0223e --- /dev/null +++ b/vendor/github.com/moby/moby/cmd/dockerd/options.go @@ -0,0 +1,123 @@ +package main + +import ( + "fmt" + "os" + "path/filepath" + + "github.com/Sirupsen/logrus" + cliconfig "github.com/docker/docker/cli/config" + "github.com/docker/docker/daemon/config" + "github.com/docker/docker/opts" + "github.com/docker/go-connections/tlsconfig" + "github.com/spf13/pflag" +) + +const ( + // DefaultCaFile is the default filename for the CA pem file + DefaultCaFile = "ca.pem" + // DefaultKeyFile is the default filename for the key pem file + DefaultKeyFile = "key.pem" + // DefaultCertFile is the default filename for the cert pem file + DefaultCertFile = "cert.pem" + // FlagTLSVerify is the flag name for the TLS verification option + FlagTLSVerify = "tlsverify" +) + +var ( + dockerCertPath = os.Getenv("DOCKER_CERT_PATH") + dockerTLSVerify = os.Getenv("DOCKER_TLS_VERIFY") != "" +) + +type daemonOptions struct { + version bool + configFile string + daemonConfig *config.Config + flags *pflag.FlagSet + Debug bool + Hosts []string + LogLevel string + TLS bool + TLSVerify bool + TLSOptions *tlsconfig.Options +} + +// newDaemonOptions returns a new daemonFlags +func newDaemonOptions(config *config.Config) *daemonOptions { + return &daemonOptions{ + daemonConfig: config, + } +} + +// InstallFlags adds flags for the common options on the FlagSet +func (o *daemonOptions) InstallFlags(flags *pflag.FlagSet) { + if dockerCertPath == "" { + dockerCertPath = cliconfig.Dir() + } + + flags.BoolVarP(&o.Debug, "debug", "D", false, "Enable debug mode") + flags.StringVarP(&o.LogLevel, "log-level", "l", "info", `Set the logging level ("debug"|"info"|"warn"|"error"|"fatal")`) + flags.BoolVar(&o.TLS, "tls", false, "Use TLS; implied by --tlsverify") + flags.BoolVar(&o.TLSVerify, FlagTLSVerify, dockerTLSVerify, "Use TLS and verify the remote") + + // TODO use flag flags.String("identity"}, "i", "", "Path to libtrust key file") + + o.TLSOptions = &tlsconfig.Options{ + CAFile: filepath.Join(dockerCertPath, DefaultCaFile), + CertFile: filepath.Join(dockerCertPath, DefaultCertFile), + KeyFile: filepath.Join(dockerCertPath, DefaultKeyFile), + } + tlsOptions := o.TLSOptions + flags.Var(opts.NewQuotedString(&tlsOptions.CAFile), "tlscacert", "Trust certs signed only by this CA") + flags.Var(opts.NewQuotedString(&tlsOptions.CertFile), "tlscert", "Path to TLS certificate file") + flags.Var(opts.NewQuotedString(&tlsOptions.KeyFile), "tlskey", "Path to TLS key file") + + hostOpt := opts.NewNamedListOptsRef("hosts", &o.Hosts, opts.ValidateHost) + flags.VarP(hostOpt, "host", "H", "Daemon socket(s) to connect to") +} + +// SetDefaultOptions sets default values for options after flag parsing is +// complete +func (o *daemonOptions) SetDefaultOptions(flags *pflag.FlagSet) { + // Regardless of whether the user sets it to true or false, if they + // specify --tlsverify at all then we need to turn on TLS + // TLSVerify can be true even if not set due to DOCKER_TLS_VERIFY env var, so we need + // to check that here as well + if flags.Changed(FlagTLSVerify) || o.TLSVerify { + o.TLS = true + } + + if !o.TLS { + o.TLSOptions = nil + } else { + tlsOptions := o.TLSOptions + tlsOptions.InsecureSkipVerify = !o.TLSVerify + + // Reset CertFile and KeyFile to empty string if the user did not specify + // the respective flags and the respective default files were not found. + if !flags.Changed("tlscert") { + if _, err := os.Stat(tlsOptions.CertFile); os.IsNotExist(err) { + tlsOptions.CertFile = "" + } + } + if !flags.Changed("tlskey") { + if _, err := os.Stat(tlsOptions.KeyFile); os.IsNotExist(err) { + tlsOptions.KeyFile = "" + } + } + } +} + +// setLogLevel sets the logrus logging level +func setLogLevel(logLevel string) { + if logLevel != "" { + lvl, err := logrus.ParseLevel(logLevel) + if err != nil { + fmt.Fprintf(os.Stderr, "Unable to parse logging level: %s\n", logLevel) + os.Exit(1) + } + logrus.SetLevel(lvl) + } else { + logrus.SetLevel(logrus.InfoLevel) + } +} diff --git a/vendor/github.com/moby/moby/cmd/dockerd/options_test.go b/vendor/github.com/moby/moby/cmd/dockerd/options_test.go new file mode 100644 index 000000000..c3298a0ac --- /dev/null +++ b/vendor/github.com/moby/moby/cmd/dockerd/options_test.go @@ -0,0 +1,43 @@ +package main + +import ( + "path/filepath" + "testing" + + cliconfig "github.com/docker/docker/cli/config" + "github.com/docker/docker/daemon/config" + "github.com/spf13/pflag" + "github.com/stretchr/testify/assert" +) + +func TestCommonOptionsInstallFlags(t *testing.T) { + flags := pflag.NewFlagSet("testing", pflag.ContinueOnError) + opts := newDaemonOptions(&config.Config{}) + opts.InstallFlags(flags) + + err := flags.Parse([]string{ + "--tlscacert=\"/foo/cafile\"", + "--tlscert=\"/foo/cert\"", + "--tlskey=\"/foo/key\"", + }) + assert.NoError(t, err) + assert.Equal(t, "/foo/cafile", opts.TLSOptions.CAFile) + assert.Equal(t, "/foo/cert", opts.TLSOptions.CertFile) + assert.Equal(t, opts.TLSOptions.KeyFile, "/foo/key") +} + +func defaultPath(filename string) string { + return filepath.Join(cliconfig.Dir(), filename) +} + +func TestCommonOptionsInstallFlagsWithDefaults(t *testing.T) { + flags := pflag.NewFlagSet("testing", pflag.ContinueOnError) + opts := newDaemonOptions(&config.Config{}) + opts.InstallFlags(flags) + + err := flags.Parse([]string{}) + assert.NoError(t, err) + assert.Equal(t, defaultPath("ca.pem"), opts.TLSOptions.CAFile) + assert.Equal(t, defaultPath("cert.pem"), opts.TLSOptions.CertFile) + assert.Equal(t, defaultPath("key.pem"), opts.TLSOptions.KeyFile) +} diff --git a/vendor/github.com/moby/moby/cmd/dockerd/service_unsupported.go b/vendor/github.com/moby/moby/cmd/dockerd/service_unsupported.go new file mode 100644 index 000000000..e67ad474b --- /dev/null +++ b/vendor/github.com/moby/moby/cmd/dockerd/service_unsupported.go @@ -0,0 +1,14 @@ +// +build !windows + +package main + +import ( + "github.com/spf13/pflag" +) + +func initService(daemonCli *DaemonCli) (bool, bool, error) { + return false, false, nil +} + +func installServiceFlags(flags *pflag.FlagSet) { +} diff --git a/vendor/github.com/moby/moby/cmd/dockerd/service_windows.go b/vendor/github.com/moby/moby/cmd/dockerd/service_windows.go new file mode 100644 index 000000000..017216951 --- /dev/null +++ b/vendor/github.com/moby/moby/cmd/dockerd/service_windows.go @@ -0,0 +1,430 @@ +package main + +import ( + "bytes" + "errors" + "fmt" + "io/ioutil" + "log" + "os" + "os/exec" + "path/filepath" + "time" + "unsafe" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/system" + "github.com/spf13/pflag" + "golang.org/x/sys/windows" + "golang.org/x/sys/windows/svc" + "golang.org/x/sys/windows/svc/debug" + "golang.org/x/sys/windows/svc/eventlog" + "golang.org/x/sys/windows/svc/mgr" +) + +var ( + flServiceName *string + flRegisterService *bool + flUnregisterService *bool + flRunService *bool + + setStdHandle = windows.NewLazySystemDLL("kernel32.dll").NewProc("SetStdHandle") + oldStderr windows.Handle + panicFile *os.File + + service *handler +) + +const ( + // These should match the values in event_messages.mc. + eventInfo = 1 + eventWarn = 1 + eventError = 1 + eventDebug = 2 + eventPanic = 3 + eventFatal = 4 + + eventExtraOffset = 10 // Add this to any event to get a string that supports extended data +) + +func installServiceFlags(flags *pflag.FlagSet) { + flServiceName = flags.String("service-name", "docker", "Set the Windows service name") + flRegisterService = flags.Bool("register-service", false, "Register the service and exit") + flUnregisterService = flags.Bool("unregister-service", false, "Unregister the service and exit") + flRunService = flags.Bool("run-service", false, "") + flags.MarkHidden("run-service") +} + +type handler struct { + tosvc chan bool + fromsvc chan error + daemonCli *DaemonCli +} + +type etwHook struct { + log *eventlog.Log +} + +func (h *etwHook) Levels() []logrus.Level { + return []logrus.Level{ + logrus.PanicLevel, + logrus.FatalLevel, + logrus.ErrorLevel, + logrus.WarnLevel, + logrus.InfoLevel, + logrus.DebugLevel, + } +} + +func (h *etwHook) Fire(e *logrus.Entry) error { + var ( + etype uint16 + eid uint32 + ) + + switch e.Level { + case logrus.PanicLevel: + etype = windows.EVENTLOG_ERROR_TYPE + eid = eventPanic + case logrus.FatalLevel: + etype = windows.EVENTLOG_ERROR_TYPE + eid = eventFatal + case logrus.ErrorLevel: + etype = windows.EVENTLOG_ERROR_TYPE + eid = eventError + case logrus.WarnLevel: + etype = windows.EVENTLOG_WARNING_TYPE + eid = eventWarn + case logrus.InfoLevel: + etype = windows.EVENTLOG_INFORMATION_TYPE + eid = eventInfo + case logrus.DebugLevel: + etype = windows.EVENTLOG_INFORMATION_TYPE + eid = eventDebug + default: + return errors.New("unknown level") + } + + // If there is additional data, include it as a second string. + exts := "" + if len(e.Data) > 0 { + fs := bytes.Buffer{} + for k, v := range e.Data { + fs.WriteString(k) + fs.WriteByte('=') + fmt.Fprint(&fs, v) + fs.WriteByte(' ') + } + + exts = fs.String()[:fs.Len()-1] + eid += eventExtraOffset + } + + if h.log == nil { + fmt.Fprintf(os.Stderr, "%s [%s]\n", e.Message, exts) + return nil + } + + var ( + ss [2]*uint16 + err error + ) + + ss[0], err = windows.UTF16PtrFromString(e.Message) + if err != nil { + return err + } + + count := uint16(1) + if exts != "" { + ss[1], err = windows.UTF16PtrFromString(exts) + if err != nil { + return err + } + + count++ + } + + return windows.ReportEvent(h.log.Handle, etype, 0, eid, 0, count, 0, &ss[0], nil) +} + +func getServicePath() (string, error) { + p, err := exec.LookPath(os.Args[0]) + if err != nil { + return "", err + } + return filepath.Abs(p) +} + +func registerService() error { + p, err := getServicePath() + if err != nil { + return err + } + m, err := mgr.Connect() + if err != nil { + return err + } + defer m.Disconnect() + + depends := []string{} + + // This dependency is required on build 14393 (RS1) + // it is added to the platform in newer builds + if system.GetOSVersion().Build == 14393 { + depends = append(depends, "ConDrv") + } + + c := mgr.Config{ + ServiceType: windows.SERVICE_WIN32_OWN_PROCESS, + StartType: mgr.StartAutomatic, + ErrorControl: mgr.ErrorNormal, + Dependencies: depends, + DisplayName: "Docker Engine", + } + + // Configure the service to launch with the arguments that were just passed. + args := []string{"--run-service"} + for _, a := range os.Args[1:] { + if a != "--register-service" && a != "--unregister-service" { + args = append(args, a) + } + } + + s, err := m.CreateService(*flServiceName, p, c, args...) + if err != nil { + return err + } + defer s.Close() + + // See http://stackoverflow.com/questions/35151052/how-do-i-configure-failure-actions-of-a-windows-service-written-in-go + const ( + scActionNone = 0 + scActionRestart = 1 + scActionReboot = 2 + scActionRunCommand = 3 + + serviceConfigFailureActions = 2 + ) + + type serviceFailureActions struct { + ResetPeriod uint32 + RebootMsg *uint16 + Command *uint16 + ActionsCount uint32 + Actions uintptr + } + + type scAction struct { + Type uint32 + Delay uint32 + } + t := []scAction{ + {Type: scActionRestart, Delay: uint32(60 * time.Second / time.Millisecond)}, + {Type: scActionRestart, Delay: uint32(60 * time.Second / time.Millisecond)}, + {Type: scActionNone}, + } + lpInfo := serviceFailureActions{ResetPeriod: uint32(24 * time.Hour / time.Second), ActionsCount: uint32(3), Actions: uintptr(unsafe.Pointer(&t[0]))} + err = windows.ChangeServiceConfig2(s.Handle, serviceConfigFailureActions, (*byte)(unsafe.Pointer(&lpInfo))) + if err != nil { + return err + } + + return eventlog.Install(*flServiceName, p, false, eventlog.Info|eventlog.Warning|eventlog.Error) +} + +func unregisterService() error { + m, err := mgr.Connect() + if err != nil { + return err + } + defer m.Disconnect() + + s, err := m.OpenService(*flServiceName) + if err != nil { + return err + } + defer s.Close() + + eventlog.Remove(*flServiceName) + err = s.Delete() + if err != nil { + return err + } + return nil +} + +// initService is the entry point for running the daemon as a Windows +// service. It returns an indication to stop (if registering/un-registering); +// an indication of whether it is running as a service; and an error. +func initService(daemonCli *DaemonCli) (bool, bool, error) { + if *flUnregisterService { + if *flRegisterService { + return true, false, errors.New("--register-service and --unregister-service cannot be used together") + } + return true, false, unregisterService() + } + + if *flRegisterService { + return true, false, registerService() + } + + if !*flRunService { + return false, false, nil + } + + interactive, err := svc.IsAnInteractiveSession() + if err != nil { + return false, false, err + } + + h := &handler{ + tosvc: make(chan bool), + fromsvc: make(chan error), + daemonCli: daemonCli, + } + + var log *eventlog.Log + if !interactive { + log, err = eventlog.Open(*flServiceName) + if err != nil { + return false, false, err + } + } + + logrus.AddHook(&etwHook{log}) + logrus.SetOutput(ioutil.Discard) + + service = h + go func() { + if interactive { + err = debug.Run(*flServiceName, h) + } else { + err = svc.Run(*flServiceName, h) + } + + h.fromsvc <- err + }() + + // Wait for the first signal from the service handler. + err = <-h.fromsvc + if err != nil { + return false, false, err + } + return false, true, nil +} + +func (h *handler) started() error { + // This must be delayed until daemonCli initializes Config.Root + err := initPanicFile(filepath.Join(h.daemonCli.Config.Root, "panic.log")) + if err != nil { + return err + } + + h.tosvc <- false + return nil +} + +func (h *handler) stopped(err error) { + logrus.Debugf("Stopping service: %v", err) + h.tosvc <- err != nil + <-h.fromsvc +} + +func (h *handler) Execute(_ []string, r <-chan svc.ChangeRequest, s chan<- svc.Status) (bool, uint32) { + s <- svc.Status{State: svc.StartPending, Accepts: 0} + // Unblock initService() + h.fromsvc <- nil + + // Wait for initialization to complete. + failed := <-h.tosvc + if failed { + logrus.Debug("Aborting service start due to failure during initialization") + return true, 1 + } + + s <- svc.Status{State: svc.Running, Accepts: svc.AcceptStop | svc.AcceptShutdown | svc.Accepted(windows.SERVICE_ACCEPT_PARAMCHANGE)} + logrus.Debug("Service running") +Loop: + for { + select { + case failed = <-h.tosvc: + break Loop + case c := <-r: + switch c.Cmd { + case svc.Cmd(windows.SERVICE_CONTROL_PARAMCHANGE): + h.daemonCli.reloadConfig() + case svc.Interrogate: + s <- c.CurrentStatus + case svc.Stop, svc.Shutdown: + s <- svc.Status{State: svc.StopPending, Accepts: 0} + h.daemonCli.stop() + } + } + } + + removePanicFile() + if failed { + return true, 1 + } + return false, 0 +} + +func initPanicFile(path string) error { + var err error + panicFile, err = os.OpenFile(path, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0) + if err != nil { + return err + } + + st, err := panicFile.Stat() + if err != nil { + return err + } + + // If there are contents in the file already, move the file out of the way + // and replace it. + if st.Size() > 0 { + panicFile.Close() + os.Rename(path, path+".old") + panicFile, err = os.Create(path) + if err != nil { + return err + } + } + + // Update STD_ERROR_HANDLE to point to the panic file so that Go writes to + // it when it panics. Remember the old stderr to restore it before removing + // the panic file. + sh := windows.STD_ERROR_HANDLE + h, err := windows.GetStdHandle(uint32(sh)) + if err != nil { + return err + } + + oldStderr = h + + r, _, err := setStdHandle.Call(uintptr(sh), uintptr(panicFile.Fd())) + if r == 0 && err != nil { + return err + } + + // Reset os.Stderr to the panic file (so fmt.Fprintf(os.Stderr,...) actually gets redirected) + os.Stderr = os.NewFile(uintptr(panicFile.Fd()), "/dev/stderr") + + // Force threads that panic to write to stderr (the panicFile handle now), otherwise it will go into the ether + log.SetOutput(os.Stderr) + + return nil +} + +func removePanicFile() { + if st, err := panicFile.Stat(); err == nil { + if st.Size() == 0 { + sh := windows.STD_ERROR_HANDLE + setStdHandle.Call(uintptr(sh), uintptr(oldStderr)) + panicFile.Close() + os.Remove(panicFile.Name()) + } + } +} diff --git a/vendor/github.com/moby/moby/container/archive.go b/vendor/github.com/moby/moby/container/archive.go new file mode 100644 index 000000000..56e6598b9 --- /dev/null +++ b/vendor/github.com/moby/moby/container/archive.go @@ -0,0 +1,76 @@ +package container + +import ( + "os" + "path/filepath" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/system" +) + +// ResolvePath resolves the given path in the container to a resource on the +// host. Returns a resolved path (absolute path to the resource on the host), +// the absolute path to the resource relative to the container's rootfs, and +// an error if the path points to outside the container's rootfs. +func (container *Container) ResolvePath(path string) (resolvedPath, absPath string, err error) { + // Check if a drive letter supplied, it must be the system drive. No-op except on Windows + path, err = system.CheckSystemDriveAndRemoveDriveLetter(path) + if err != nil { + return "", "", err + } + + // Consider the given path as an absolute path in the container. + absPath = archive.PreserveTrailingDotOrSeparator(filepath.Join(string(filepath.Separator), path), path) + + // Split the absPath into its Directory and Base components. We will + // resolve the dir in the scope of the container then append the base. + dirPath, basePath := filepath.Split(absPath) + + resolvedDirPath, err := container.GetResourcePath(dirPath) + if err != nil { + return "", "", err + } + + // resolvedDirPath will have been cleaned (no trailing path separators) so + // we can manually join it with the base path element. + resolvedPath = resolvedDirPath + string(filepath.Separator) + basePath + + return resolvedPath, absPath, nil +} + +// StatPath is the unexported version of StatPath. Locks and mounts should +// be acquired before calling this method and the given path should be fully +// resolved to a path on the host corresponding to the given absolute path +// inside the container. +func (container *Container) StatPath(resolvedPath, absPath string) (stat *types.ContainerPathStat, err error) { + lstat, err := os.Lstat(resolvedPath) + if err != nil { + return nil, err + } + + var linkTarget string + if lstat.Mode()&os.ModeSymlink != 0 { + // Fully evaluate the symlink in the scope of the container rootfs. + hostPath, err := container.GetResourcePath(absPath) + if err != nil { + return nil, err + } + + linkTarget, err = filepath.Rel(container.BaseFS, hostPath) + if err != nil { + return nil, err + } + + // Make it an absolute path. + linkTarget = filepath.Join(string(filepath.Separator), linkTarget) + } + + return &types.ContainerPathStat{ + Name: filepath.Base(absPath), + Size: lstat.Size(), + Mode: lstat.Mode(), + Mtime: lstat.ModTime(), + LinkTarget: linkTarget, + }, nil +} diff --git a/vendor/github.com/moby/moby/container/container.go b/vendor/github.com/moby/moby/container/container.go new file mode 100644 index 000000000..86e011144 --- /dev/null +++ b/vendor/github.com/moby/moby/container/container.go @@ -0,0 +1,1058 @@ +package container + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "net" + "os" + "path/filepath" + "runtime" + "strconv" + "strings" + "sync" + "syscall" + "time" + + "github.com/Sirupsen/logrus" + containertypes "github.com/docker/docker/api/types/container" + mounttypes "github.com/docker/docker/api/types/mount" + networktypes "github.com/docker/docker/api/types/network" + swarmtypes "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/container/stream" + "github.com/docker/docker/daemon/exec" + "github.com/docker/docker/daemon/logger" + "github.com/docker/docker/daemon/logger/jsonfilelog" + "github.com/docker/docker/daemon/network" + "github.com/docker/docker/image" + "github.com/docker/docker/layer" + "github.com/docker/docker/libcontainerd" + "github.com/docker/docker/opts" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/signal" + "github.com/docker/docker/pkg/symlink" + "github.com/docker/docker/pkg/system" + "github.com/docker/docker/restartmanager" + "github.com/docker/docker/runconfig" + "github.com/docker/docker/volume" + "github.com/docker/go-connections/nat" + "github.com/docker/go-units" + "github.com/docker/libnetwork" + "github.com/docker/libnetwork/netlabel" + "github.com/docker/libnetwork/options" + "github.com/docker/libnetwork/types" + agentexec "github.com/docker/swarmkit/agent/exec" + "golang.org/x/net/context" +) + +const configFileName = "config.v2.json" + +const ( + // DefaultStopTimeout is the timeout (in seconds) for the syscall signal used to stop a container. + DefaultStopTimeout = 10 +) + +var ( + errInvalidEndpoint = fmt.Errorf("invalid endpoint while building port map info") + errInvalidNetwork = fmt.Errorf("invalid network settings while building port map info") +) + +// Container holds the structure defining a container object. +type Container struct { + StreamConfig *stream.Config + // embed for Container to support states directly. + *State `json:"State"` // Needed for Engine API version <= 1.11 + Root string `json:"-"` // Path to the "home" of the container, including metadata. + BaseFS string `json:"-"` // Path to the graphdriver mountpoint + RWLayer layer.RWLayer `json:"-"` + ID string + Created time.Time + Managed bool + Path string + Args []string + Config *containertypes.Config + ImageID image.ID `json:"Image"` + NetworkSettings *network.Settings + LogPath string + Name string + Driver string + Platform string + // MountLabel contains the options for the 'mount' command + MountLabel string + ProcessLabel string + RestartCount int + HasBeenStartedBefore bool + HasBeenManuallyStopped bool // used for unless-stopped restart policy + MountPoints map[string]*volume.MountPoint + HostConfig *containertypes.HostConfig `json:"-"` // do not serialize the host config in the json, otherwise we'll make the container unportable + ExecCommands *exec.Store `json:"-"` + DependencyStore agentexec.DependencyGetter `json:"-"` + SecretReferences []*swarmtypes.SecretReference + ConfigReferences []*swarmtypes.ConfigReference + // logDriver for closing + LogDriver logger.Logger `json:"-"` + LogCopier *logger.Copier `json:"-"` + restartManager restartmanager.RestartManager + attachContext *attachContext + + // Fields here are specific to Unix platforms + AppArmorProfile string + HostnamePath string + HostsPath string + ShmPath string + ResolvConfPath string + SeccompProfile string + NoNewPrivileges bool + + // Fields here are specific to Windows + NetworkSharedContainerID string `json:"-"` + SharedEndpointList []string `json:"-"` +} + +// NewBaseContainer creates a new container with its +// basic configuration. +func NewBaseContainer(id, root string) *Container { + return &Container{ + ID: id, + State: NewState(), + ExecCommands: exec.NewStore(), + Root: root, + MountPoints: make(map[string]*volume.MountPoint), + StreamConfig: stream.NewConfig(), + attachContext: &attachContext{}, + } +} + +// FromDisk loads the container configuration stored in the host. +func (container *Container) FromDisk() error { + pth, err := container.ConfigPath() + if err != nil { + return err + } + + jsonSource, err := os.Open(pth) + if err != nil { + return err + } + defer jsonSource.Close() + + dec := json.NewDecoder(jsonSource) + + // Load container settings + if err := dec.Decode(container); err != nil { + return err + } + + // Ensure the platform is set if blank. Assume it is the platform of the + // host OS if not, to ensure containers created before multiple-platform + // support are migrated + if container.Platform == "" { + container.Platform = runtime.GOOS + } + + return container.readHostConfig() +} + +// toDisk saves the container configuration on disk and returns a deep copy. +func (container *Container) toDisk() (*Container, error) { + var ( + buf bytes.Buffer + deepCopy Container + ) + pth, err := container.ConfigPath() + if err != nil { + return nil, err + } + + // Save container settings + f, err := ioutils.NewAtomicFileWriter(pth, 0644) + if err != nil { + return nil, err + } + defer f.Close() + + w := io.MultiWriter(&buf, f) + if err := json.NewEncoder(w).Encode(container); err != nil { + return nil, err + } + + if err := json.NewDecoder(&buf).Decode(&deepCopy); err != nil { + return nil, err + } + deepCopy.HostConfig, err = container.WriteHostConfig() + if err != nil { + return nil, err + } + + return &deepCopy, nil +} + +// CheckpointTo makes the Container's current state visible to queries, and persists state. +// Callers must hold a Container lock. +func (container *Container) CheckpointTo(store ViewDB) error { + deepCopy, err := container.toDisk() + if err != nil { + return err + } + return store.Save(deepCopy) +} + +// readHostConfig reads the host configuration from disk for the container. +func (container *Container) readHostConfig() error { + container.HostConfig = &containertypes.HostConfig{} + // If the hostconfig file does not exist, do not read it. + // (We still have to initialize container.HostConfig, + // but that's OK, since we just did that above.) + pth, err := container.HostConfigPath() + if err != nil { + return err + } + + f, err := os.Open(pth) + if err != nil { + if os.IsNotExist(err) { + return nil + } + return err + } + defer f.Close() + + if err := json.NewDecoder(f).Decode(&container.HostConfig); err != nil { + return err + } + + container.InitDNSHostConfig() + + return nil +} + +// WriteHostConfig saves the host configuration on disk for the container, +// and returns a deep copy of the saved object. Callers must hold a Container lock. +func (container *Container) WriteHostConfig() (*containertypes.HostConfig, error) { + var ( + buf bytes.Buffer + deepCopy containertypes.HostConfig + ) + + pth, err := container.HostConfigPath() + if err != nil { + return nil, err + } + + f, err := ioutils.NewAtomicFileWriter(pth, 0644) + if err != nil { + return nil, err + } + defer f.Close() + + w := io.MultiWriter(&buf, f) + if err := json.NewEncoder(w).Encode(&container.HostConfig); err != nil { + return nil, err + } + + if err := json.NewDecoder(&buf).Decode(&deepCopy); err != nil { + return nil, err + } + return &deepCopy, nil +} + +// SetupWorkingDirectory sets up the container's working directory as set in container.Config.WorkingDir +func (container *Container) SetupWorkingDirectory(rootIDs idtools.IDPair) error { + if container.Config.WorkingDir == "" { + return nil + } + + container.Config.WorkingDir = filepath.Clean(container.Config.WorkingDir) + + pth, err := container.GetResourcePath(container.Config.WorkingDir) + if err != nil { + return err + } + + if err := idtools.MkdirAllAndChownNew(pth, 0755, rootIDs); err != nil { + pthInfo, err2 := os.Stat(pth) + if err2 == nil && pthInfo != nil && !pthInfo.IsDir() { + return fmt.Errorf("Cannot mkdir: %s is not a directory", container.Config.WorkingDir) + } + + return err + } + + return nil +} + +// GetResourcePath evaluates `path` in the scope of the container's BaseFS, with proper path +// sanitisation. Symlinks are all scoped to the BaseFS of the container, as +// though the container's BaseFS was `/`. +// +// The BaseFS of a container is the host-facing path which is bind-mounted as +// `/` inside the container. This method is essentially used to access a +// particular path inside the container as though you were a process in that +// container. +// +// NOTE: The returned path is *only* safely scoped inside the container's BaseFS +// if no component of the returned path changes (such as a component +// symlinking to a different path) between using this method and using the +// path. See symlink.FollowSymlinkInScope for more details. +func (container *Container) GetResourcePath(path string) (string, error) { + // IMPORTANT - These are paths on the OS where the daemon is running, hence + // any filepath operations must be done in an OS agnostic way. + + cleanPath := cleanResourcePath(path) + r, e := symlink.FollowSymlinkInScope(filepath.Join(container.BaseFS, cleanPath), container.BaseFS) + + // Log this here on the daemon side as there's otherwise no indication apart + // from the error being propagated all the way back to the client. This makes + // debugging significantly easier and clearly indicates the error comes from the daemon. + if e != nil { + logrus.Errorf("Failed to FollowSymlinkInScope BaseFS %s cleanPath %s path %s %s\n", container.BaseFS, cleanPath, path, e) + } + return r, e +} + +// GetRootResourcePath evaluates `path` in the scope of the container's root, with proper path +// sanitisation. Symlinks are all scoped to the root of the container, as +// though the container's root was `/`. +// +// The root of a container is the host-facing configuration metadata directory. +// Only use this method to safely access the container's `container.json` or +// other metadata files. If in doubt, use container.GetResourcePath. +// +// NOTE: The returned path is *only* safely scoped inside the container's root +// if no component of the returned path changes (such as a component +// symlinking to a different path) between using this method and using the +// path. See symlink.FollowSymlinkInScope for more details. +func (container *Container) GetRootResourcePath(path string) (string, error) { + // IMPORTANT - These are paths on the OS where the daemon is running, hence + // any filepath operations must be done in an OS agnostic way. + cleanPath := filepath.Join(string(os.PathSeparator), path) + return symlink.FollowSymlinkInScope(filepath.Join(container.Root, cleanPath), container.Root) +} + +// ExitOnNext signals to the monitor that it should not restart the container +// after we send the kill signal. +func (container *Container) ExitOnNext() { + container.RestartManager().Cancel() +} + +// HostConfigPath returns the path to the container's JSON hostconfig +func (container *Container) HostConfigPath() (string, error) { + return container.GetRootResourcePath("hostconfig.json") +} + +// ConfigPath returns the path to the container's JSON config +func (container *Container) ConfigPath() (string, error) { + return container.GetRootResourcePath(configFileName) +} + +// CheckpointDir returns the directory checkpoints are stored in +func (container *Container) CheckpointDir() string { + return filepath.Join(container.Root, "checkpoints") +} + +// StartLogger starts a new logger driver for the container. +func (container *Container) StartLogger() (logger.Logger, error) { + cfg := container.HostConfig.LogConfig + initDriver, err := logger.GetLogDriver(cfg.Type) + if err != nil { + return nil, fmt.Errorf("failed to get logging factory: %v", err) + } + info := logger.Info{ + Config: cfg.Config, + ContainerID: container.ID, + ContainerName: container.Name, + ContainerEntrypoint: container.Path, + ContainerArgs: container.Args, + ContainerImageID: container.ImageID.String(), + ContainerImageName: container.Config.Image, + ContainerCreated: container.Created, + ContainerEnv: container.Config.Env, + ContainerLabels: container.Config.Labels, + DaemonName: "docker", + } + + // Set logging file for "json-logger" + if cfg.Type == jsonfilelog.Name { + info.LogPath, err = container.GetRootResourcePath(fmt.Sprintf("%s-json.log", container.ID)) + if err != nil { + return nil, err + } + } + + l, err := initDriver(info) + if err != nil { + return nil, err + } + + if containertypes.LogMode(cfg.Config["mode"]) == containertypes.LogModeNonBlock { + bufferSize := int64(-1) + if s, exists := cfg.Config["max-buffer-size"]; exists { + bufferSize, err = units.RAMInBytes(s) + if err != nil { + return nil, err + } + } + l = logger.NewRingLogger(l, info, bufferSize) + } + return l, nil +} + +// GetProcessLabel returns the process label for the container. +func (container *Container) GetProcessLabel() string { + // even if we have a process label return "" if we are running + // in privileged mode + if container.HostConfig.Privileged { + return "" + } + return container.ProcessLabel +} + +// GetMountLabel returns the mounting label for the container. +// This label is empty if the container is privileged. +func (container *Container) GetMountLabel() string { + return container.MountLabel +} + +// GetExecIDs returns the list of exec commands running on the container. +func (container *Container) GetExecIDs() []string { + return container.ExecCommands.List() +} + +// ShouldRestart decides whether the daemon should restart the container or not. +// This is based on the container's restart policy. +func (container *Container) ShouldRestart() bool { + shouldRestart, _, _ := container.RestartManager().ShouldRestart(uint32(container.ExitCode()), container.HasBeenManuallyStopped, container.FinishedAt.Sub(container.StartedAt)) + return shouldRestart +} + +// AddMountPointWithVolume adds a new mount point configured with a volume to the container. +func (container *Container) AddMountPointWithVolume(destination string, vol volume.Volume, rw bool) { + container.MountPoints[destination] = &volume.MountPoint{ + Type: mounttypes.TypeVolume, + Name: vol.Name(), + Driver: vol.DriverName(), + Destination: destination, + RW: rw, + Volume: vol, + CopyData: volume.DefaultCopyMode, + } +} + +// UnmountVolumes unmounts all volumes +func (container *Container) UnmountVolumes(volumeEventLog func(name, action string, attributes map[string]string)) error { + var errors []string + for _, volumeMount := range container.MountPoints { + if volumeMount.Volume == nil { + continue + } + + if err := volumeMount.Cleanup(); err != nil { + errors = append(errors, err.Error()) + continue + } + + attributes := map[string]string{ + "driver": volumeMount.Volume.DriverName(), + "container": container.ID, + } + volumeEventLog(volumeMount.Volume.Name(), "unmount", attributes) + } + if len(errors) > 0 { + return fmt.Errorf("error while unmounting volumes for container %s: %s", container.ID, strings.Join(errors, "; ")) + } + return nil +} + +// IsDestinationMounted checks whether a path is mounted on the container or not. +func (container *Container) IsDestinationMounted(destination string) bool { + return container.MountPoints[destination] != nil +} + +// StopSignal returns the signal used to stop the container. +func (container *Container) StopSignal() int { + var stopSignal syscall.Signal + if container.Config.StopSignal != "" { + stopSignal, _ = signal.ParseSignal(container.Config.StopSignal) + } + + if int(stopSignal) == 0 { + stopSignal, _ = signal.ParseSignal(signal.DefaultStopSignal) + } + return int(stopSignal) +} + +// StopTimeout returns the timeout (in seconds) used to stop the container. +func (container *Container) StopTimeout() int { + if container.Config.StopTimeout != nil { + return *container.Config.StopTimeout + } + return DefaultStopTimeout +} + +// InitDNSHostConfig ensures that the dns fields are never nil. +// New containers don't ever have those fields nil, +// but pre created containers can still have those nil values. +// The non-recommended host configuration in the start api can +// make these fields nil again, this corrects that issue until +// we remove that behavior for good. +// See https://github.com/docker/docker/pull/17779 +// for a more detailed explanation on why we don't want that. +func (container *Container) InitDNSHostConfig() { + container.Lock() + defer container.Unlock() + if container.HostConfig.DNS == nil { + container.HostConfig.DNS = make([]string, 0) + } + + if container.HostConfig.DNSSearch == nil { + container.HostConfig.DNSSearch = make([]string, 0) + } + + if container.HostConfig.DNSOptions == nil { + container.HostConfig.DNSOptions = make([]string, 0) + } +} + +// GetEndpointInNetwork returns the container's endpoint to the provided network. +func (container *Container) GetEndpointInNetwork(n libnetwork.Network) (libnetwork.Endpoint, error) { + endpointName := strings.TrimPrefix(container.Name, "/") + return n.EndpointByName(endpointName) +} + +func (container *Container) buildPortMapInfo(ep libnetwork.Endpoint) error { + if ep == nil { + return errInvalidEndpoint + } + + networkSettings := container.NetworkSettings + if networkSettings == nil { + return errInvalidNetwork + } + + if len(networkSettings.Ports) == 0 { + pm, err := getEndpointPortMapInfo(ep) + if err != nil { + return err + } + networkSettings.Ports = pm + } + return nil +} + +func getEndpointPortMapInfo(ep libnetwork.Endpoint) (nat.PortMap, error) { + pm := nat.PortMap{} + driverInfo, err := ep.DriverInfo() + if err != nil { + return pm, err + } + + if driverInfo == nil { + // It is not an error for epInfo to be nil + return pm, nil + } + + if expData, ok := driverInfo[netlabel.ExposedPorts]; ok { + if exposedPorts, ok := expData.([]types.TransportPort); ok { + for _, tp := range exposedPorts { + natPort, err := nat.NewPort(tp.Proto.String(), strconv.Itoa(int(tp.Port))) + if err != nil { + return pm, fmt.Errorf("Error parsing Port value(%v):%v", tp.Port, err) + } + pm[natPort] = nil + } + } + } + + mapData, ok := driverInfo[netlabel.PortMap] + if !ok { + return pm, nil + } + + if portMapping, ok := mapData.([]types.PortBinding); ok { + for _, pp := range portMapping { + natPort, err := nat.NewPort(pp.Proto.String(), strconv.Itoa(int(pp.Port))) + if err != nil { + return pm, err + } + natBndg := nat.PortBinding{HostIP: pp.HostIP.String(), HostPort: strconv.Itoa(int(pp.HostPort))} + pm[natPort] = append(pm[natPort], natBndg) + } + } + + return pm, nil +} + +// GetSandboxPortMapInfo retrieves the current port-mapping programmed for the given sandbox +func GetSandboxPortMapInfo(sb libnetwork.Sandbox) nat.PortMap { + pm := nat.PortMap{} + if sb == nil { + return pm + } + + for _, ep := range sb.Endpoints() { + pm, _ = getEndpointPortMapInfo(ep) + if len(pm) > 0 { + break + } + } + return pm +} + +// BuildEndpointInfo sets endpoint-related fields on container.NetworkSettings based on the provided network and endpoint. +func (container *Container) BuildEndpointInfo(n libnetwork.Network, ep libnetwork.Endpoint) error { + if ep == nil { + return errInvalidEndpoint + } + + networkSettings := container.NetworkSettings + if networkSettings == nil { + return errInvalidNetwork + } + + epInfo := ep.Info() + if epInfo == nil { + // It is not an error to get an empty endpoint info + return nil + } + + if _, ok := networkSettings.Networks[n.Name()]; !ok { + networkSettings.Networks[n.Name()] = &network.EndpointSettings{ + EndpointSettings: &networktypes.EndpointSettings{}, + } + } + networkSettings.Networks[n.Name()].NetworkID = n.ID() + networkSettings.Networks[n.Name()].EndpointID = ep.ID() + + iface := epInfo.Iface() + if iface == nil { + return nil + } + + if iface.MacAddress() != nil { + networkSettings.Networks[n.Name()].MacAddress = iface.MacAddress().String() + } + + if iface.Address() != nil { + ones, _ := iface.Address().Mask.Size() + networkSettings.Networks[n.Name()].IPAddress = iface.Address().IP.String() + networkSettings.Networks[n.Name()].IPPrefixLen = ones + } + + if iface.AddressIPv6() != nil && iface.AddressIPv6().IP.To16() != nil { + onesv6, _ := iface.AddressIPv6().Mask.Size() + networkSettings.Networks[n.Name()].GlobalIPv6Address = iface.AddressIPv6().IP.String() + networkSettings.Networks[n.Name()].GlobalIPv6PrefixLen = onesv6 + } + + return nil +} + +// UpdateJoinInfo updates network settings when container joins network n with endpoint ep. +func (container *Container) UpdateJoinInfo(n libnetwork.Network, ep libnetwork.Endpoint) error { + if err := container.buildPortMapInfo(ep); err != nil { + return err + } + + epInfo := ep.Info() + if epInfo == nil { + // It is not an error to get an empty endpoint info + return nil + } + if epInfo.Gateway() != nil { + container.NetworkSettings.Networks[n.Name()].Gateway = epInfo.Gateway().String() + } + if epInfo.GatewayIPv6().To16() != nil { + container.NetworkSettings.Networks[n.Name()].IPv6Gateway = epInfo.GatewayIPv6().String() + } + + return nil +} + +// UpdateSandboxNetworkSettings updates the sandbox ID and Key. +func (container *Container) UpdateSandboxNetworkSettings(sb libnetwork.Sandbox) error { + container.NetworkSettings.SandboxID = sb.ID() + container.NetworkSettings.SandboxKey = sb.Key() + return nil +} + +// BuildJoinOptions builds endpoint Join options from a given network. +func (container *Container) BuildJoinOptions(n libnetwork.Network) ([]libnetwork.EndpointOption, error) { + var joinOptions []libnetwork.EndpointOption + if epConfig, ok := container.NetworkSettings.Networks[n.Name()]; ok { + for _, str := range epConfig.Links { + name, alias, err := opts.ParseLink(str) + if err != nil { + return nil, err + } + joinOptions = append(joinOptions, libnetwork.CreateOptionAlias(name, alias)) + } + for k, v := range epConfig.DriverOpts { + joinOptions = append(joinOptions, libnetwork.EndpointOptionGeneric(options.Generic{k: v})) + } + } + + return joinOptions, nil +} + +// BuildCreateEndpointOptions builds endpoint options from a given network. +func (container *Container) BuildCreateEndpointOptions(n libnetwork.Network, epConfig *networktypes.EndpointSettings, sb libnetwork.Sandbox, daemonDNS []string) ([]libnetwork.EndpointOption, error) { + var ( + bindings = make(nat.PortMap) + pbList []types.PortBinding + exposeList []types.TransportPort + createOptions []libnetwork.EndpointOption + ) + + defaultNetName := runconfig.DefaultDaemonNetworkMode().NetworkName() + + if (!container.EnableServiceDiscoveryOnDefaultNetwork() && n.Name() == defaultNetName) || + container.NetworkSettings.IsAnonymousEndpoint { + createOptions = append(createOptions, libnetwork.CreateOptionAnonymous()) + } + + if epConfig != nil { + ipam := epConfig.IPAMConfig + + if ipam != nil { + var ( + ipList []net.IP + ip, ip6, linkip net.IP + ) + + for _, ips := range ipam.LinkLocalIPs { + if linkip = net.ParseIP(ips); linkip == nil && ips != "" { + return nil, fmt.Errorf("Invalid link-local IP address:%s", ipam.LinkLocalIPs) + } + ipList = append(ipList, linkip) + + } + + if ip = net.ParseIP(ipam.IPv4Address); ip == nil && ipam.IPv4Address != "" { + return nil, fmt.Errorf("Invalid IPv4 address:%s)", ipam.IPv4Address) + } + + if ip6 = net.ParseIP(ipam.IPv6Address); ip6 == nil && ipam.IPv6Address != "" { + return nil, fmt.Errorf("Invalid IPv6 address:%s)", ipam.IPv6Address) + } + + createOptions = append(createOptions, + libnetwork.CreateOptionIpam(ip, ip6, ipList, nil)) + + } + + for _, alias := range epConfig.Aliases { + createOptions = append(createOptions, libnetwork.CreateOptionMyAlias(alias)) + } + for k, v := range epConfig.DriverOpts { + createOptions = append(createOptions, libnetwork.EndpointOptionGeneric(options.Generic{k: v})) + } + } + + if container.NetworkSettings.Service != nil { + svcCfg := container.NetworkSettings.Service + + var vip string + if svcCfg.VirtualAddresses[n.ID()] != nil { + vip = svcCfg.VirtualAddresses[n.ID()].IPv4 + } + + var portConfigs []*libnetwork.PortConfig + for _, portConfig := range svcCfg.ExposedPorts { + portConfigs = append(portConfigs, &libnetwork.PortConfig{ + Name: portConfig.Name, + Protocol: libnetwork.PortConfig_Protocol(portConfig.Protocol), + TargetPort: portConfig.TargetPort, + PublishedPort: portConfig.PublishedPort, + }) + } + + createOptions = append(createOptions, libnetwork.CreateOptionService(svcCfg.Name, svcCfg.ID, net.ParseIP(vip), portConfigs, svcCfg.Aliases[n.ID()])) + } + + if !containertypes.NetworkMode(n.Name()).IsUserDefined() { + createOptions = append(createOptions, libnetwork.CreateOptionDisableResolution()) + } + + // configs that are applicable only for the endpoint in the network + // to which container was connected to on docker run. + // Ideally all these network-specific endpoint configurations must be moved under + // container.NetworkSettings.Networks[n.Name()] + if n.Name() == container.HostConfig.NetworkMode.NetworkName() || + (n.Name() == defaultNetName && container.HostConfig.NetworkMode.IsDefault()) { + if container.Config.MacAddress != "" { + mac, err := net.ParseMAC(container.Config.MacAddress) + if err != nil { + return nil, err + } + + genericOption := options.Generic{ + netlabel.MacAddress: mac, + } + + createOptions = append(createOptions, libnetwork.EndpointOptionGeneric(genericOption)) + } + + } + + // Port-mapping rules belong to the container & applicable only to non-internal networks + portmaps := GetSandboxPortMapInfo(sb) + if n.Info().Internal() || len(portmaps) > 0 { + return createOptions, nil + } + + if container.HostConfig.PortBindings != nil { + for p, b := range container.HostConfig.PortBindings { + bindings[p] = []nat.PortBinding{} + for _, bb := range b { + bindings[p] = append(bindings[p], nat.PortBinding{ + HostIP: bb.HostIP, + HostPort: bb.HostPort, + }) + } + } + } + + portSpecs := container.Config.ExposedPorts + ports := make([]nat.Port, len(portSpecs)) + var i int + for p := range portSpecs { + ports[i] = p + i++ + } + nat.SortPortMap(ports, bindings) + for _, port := range ports { + expose := types.TransportPort{} + expose.Proto = types.ParseProtocol(port.Proto()) + expose.Port = uint16(port.Int()) + exposeList = append(exposeList, expose) + + pb := types.PortBinding{Port: expose.Port, Proto: expose.Proto} + binding := bindings[port] + for i := 0; i < len(binding); i++ { + pbCopy := pb.GetCopy() + newP, err := nat.NewPort(nat.SplitProtoPort(binding[i].HostPort)) + var portStart, portEnd int + if err == nil { + portStart, portEnd, err = newP.Range() + } + if err != nil { + return nil, fmt.Errorf("Error parsing HostPort value(%s):%v", binding[i].HostPort, err) + } + pbCopy.HostPort = uint16(portStart) + pbCopy.HostPortEnd = uint16(portEnd) + pbCopy.HostIP = net.ParseIP(binding[i].HostIP) + pbList = append(pbList, pbCopy) + } + + if container.HostConfig.PublishAllPorts && len(binding) == 0 { + pbList = append(pbList, pb) + } + } + + var dns []string + + if len(container.HostConfig.DNS) > 0 { + dns = container.HostConfig.DNS + } else if len(daemonDNS) > 0 { + dns = daemonDNS + } + + if len(dns) > 0 { + createOptions = append(createOptions, + libnetwork.CreateOptionDNS(dns)) + } + + createOptions = append(createOptions, + libnetwork.CreateOptionPortMapping(pbList), + libnetwork.CreateOptionExposedPorts(exposeList)) + + return createOptions, nil +} + +// UpdateMonitor updates monitor configure for running container +func (container *Container) UpdateMonitor(restartPolicy containertypes.RestartPolicy) { + type policySetter interface { + SetPolicy(containertypes.RestartPolicy) + } + + if rm, ok := container.RestartManager().(policySetter); ok { + rm.SetPolicy(restartPolicy) + } +} + +// FullHostname returns hostname and optional domain appended to it. +func (container *Container) FullHostname() string { + fullHostname := container.Config.Hostname + if container.Config.Domainname != "" { + fullHostname = fmt.Sprintf("%s.%s", fullHostname, container.Config.Domainname) + } + return fullHostname +} + +// RestartManager returns the current restartmanager instance connected to container. +func (container *Container) RestartManager() restartmanager.RestartManager { + if container.restartManager == nil { + container.restartManager = restartmanager.New(container.HostConfig.RestartPolicy, container.RestartCount) + } + return container.restartManager +} + +// ResetRestartManager initializes new restartmanager based on container config +func (container *Container) ResetRestartManager(resetCount bool) { + if container.restartManager != nil { + container.restartManager.Cancel() + } + if resetCount { + container.RestartCount = 0 + } + container.restartManager = nil +} + +type attachContext struct { + ctx context.Context + cancel context.CancelFunc + mu sync.Mutex +} + +// InitAttachContext initializes or returns existing context for attach calls to +// track container liveness. +func (container *Container) InitAttachContext() context.Context { + container.attachContext.mu.Lock() + defer container.attachContext.mu.Unlock() + if container.attachContext.ctx == nil { + container.attachContext.ctx, container.attachContext.cancel = context.WithCancel(context.Background()) + } + return container.attachContext.ctx +} + +// CancelAttachContext cancels attach context. All attach calls should detach +// after this call. +func (container *Container) CancelAttachContext() { + container.attachContext.mu.Lock() + if container.attachContext.ctx != nil { + container.attachContext.cancel() + container.attachContext.ctx = nil + } + container.attachContext.mu.Unlock() +} + +func (container *Container) startLogging() error { + if container.HostConfig.LogConfig.Type == "none" { + return nil // do not start logging routines + } + + l, err := container.StartLogger() + if err != nil { + return fmt.Errorf("failed to initialize logging driver: %v", err) + } + + copier := logger.NewCopier(map[string]io.Reader{"stdout": container.StdoutPipe(), "stderr": container.StderrPipe()}, l) + container.LogCopier = copier + copier.Run() + container.LogDriver = l + + // set LogPath field only for json-file logdriver + if jl, ok := l.(*jsonfilelog.JSONFileLogger); ok { + container.LogPath = jl.LogPath() + } + + return nil +} + +// StdinPipe gets the stdin stream of the container +func (container *Container) StdinPipe() io.WriteCloser { + return container.StreamConfig.StdinPipe() +} + +// StdoutPipe gets the stdout stream of the container +func (container *Container) StdoutPipe() io.ReadCloser { + return container.StreamConfig.StdoutPipe() +} + +// StderrPipe gets the stderr stream of the container +func (container *Container) StderrPipe() io.ReadCloser { + return container.StreamConfig.StderrPipe() +} + +// CloseStreams closes the container's stdio streams +func (container *Container) CloseStreams() error { + return container.StreamConfig.CloseStreams() +} + +// InitializeStdio is called by libcontainerd to connect the stdio. +func (container *Container) InitializeStdio(iop libcontainerd.IOPipe) error { + if err := container.startLogging(); err != nil { + container.Reset(false) + return err + } + + container.StreamConfig.CopyToPipe(iop) + + if container.StreamConfig.Stdin() == nil && !container.Config.Tty { + if iop.Stdin != nil { + if err := iop.Stdin.Close(); err != nil { + logrus.Warnf("error closing stdin: %+v", err) + } + } + } + + return nil +} + +// SecretMountPath returns the path of the secret mount for the container +func (container *Container) SecretMountPath() string { + return filepath.Join(container.Root, "secrets") +} + +// SecretFilePath returns the path to the location of a secret on the host. +func (container *Container) SecretFilePath(secretRef swarmtypes.SecretReference) string { + return filepath.Join(container.SecretMountPath(), secretRef.SecretID) +} + +func getSecretTargetPath(r *swarmtypes.SecretReference) string { + if filepath.IsAbs(r.File.Name) { + return r.File.Name + } + + return filepath.Join(containerSecretMountPath, r.File.Name) +} + +// ConfigsDirPath returns the path to the directory where configs are stored on +// disk. +func (container *Container) ConfigsDirPath() string { + return filepath.Join(container.Root, "configs") +} + +// ConfigFilePath returns the path to the on-disk location of a config. +func (container *Container) ConfigFilePath(configRef swarmtypes.ConfigReference) string { + return filepath.Join(container.ConfigsDirPath(), configRef.ConfigID) +} + +// CreateDaemonEnvironment creates a new environment variable slice for this container. +func (container *Container) CreateDaemonEnvironment(tty bool, linkedEnv []string) []string { + // Setup environment + // TODO @jhowardmsft LCOW Support. This will need revisiting later. + platform := container.Platform + if platform == "" { + platform = runtime.GOOS + } + env := []string{} + if runtime.GOOS != "windows" || (system.LCOWSupported() && platform == "linux") { + env = []string{ + "PATH=" + system.DefaultPathEnv(platform), + "HOSTNAME=" + container.Config.Hostname, + } + if tty { + env = append(env, "TERM=xterm") + } + env = append(env, linkedEnv...) + } + + // because the env on the container can override certain default values + // we need to replace the 'env' keys where they match and append anything + // else. + env = ReplaceOrAppendEnvValues(env, container.Config.Env) + return env +} diff --git a/vendor/github.com/moby/moby/container/container_linux.go b/vendor/github.com/moby/moby/container/container_linux.go new file mode 100644 index 000000000..4d4c16b56 --- /dev/null +++ b/vendor/github.com/moby/moby/container/container_linux.go @@ -0,0 +1,9 @@ +package container + +import ( + "golang.org/x/sys/unix" +) + +func detachMounted(path string) error { + return unix.Unmount(path, unix.MNT_DETACH) +} diff --git a/vendor/github.com/moby/moby/container/container_notlinux.go b/vendor/github.com/moby/moby/container/container_notlinux.go new file mode 100644 index 000000000..768c762d2 --- /dev/null +++ b/vendor/github.com/moby/moby/container/container_notlinux.go @@ -0,0 +1,23 @@ +// +build solaris freebsd + +package container + +import ( + "golang.org/x/sys/unix" +) + +func detachMounted(path string) error { + //Solaris and FreeBSD do not support the lazy unmount or MNT_DETACH feature. + // Therefore there are separate definitions for this. + return unix.Unmount(path, 0) +} + +// SecretMounts returns the mounts for the secret path +func (container *Container) SecretMounts() []Mount { + return nil +} + +// UnmountSecrets unmounts the fs for secrets +func (container *Container) UnmountSecrets() error { + return nil +} diff --git a/vendor/github.com/moby/moby/container/container_unit_test.go b/vendor/github.com/moby/moby/container/container_unit_test.go new file mode 100644 index 000000000..9ba2991be --- /dev/null +++ b/vendor/github.com/moby/moby/container/container_unit_test.go @@ -0,0 +1,68 @@ +package container + +import ( + "path/filepath" + "testing" + + "github.com/docker/docker/api/types/container" + swarmtypes "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/pkg/signal" +) + +func TestContainerStopSignal(t *testing.T) { + c := &Container{ + Config: &container.Config{}, + } + + def, err := signal.ParseSignal(signal.DefaultStopSignal) + if err != nil { + t.Fatal(err) + } + + s := c.StopSignal() + if s != int(def) { + t.Fatalf("Expected %v, got %v", def, s) + } + + c = &Container{ + Config: &container.Config{StopSignal: "SIGKILL"}, + } + s = c.StopSignal() + if s != 9 { + t.Fatalf("Expected 9, got %v", s) + } +} + +func TestContainerStopTimeout(t *testing.T) { + c := &Container{ + Config: &container.Config{}, + } + + s := c.StopTimeout() + if s != DefaultStopTimeout { + t.Fatalf("Expected %v, got %v", DefaultStopTimeout, s) + } + + stopTimeout := 15 + c = &Container{ + Config: &container.Config{StopTimeout: &stopTimeout}, + } + s = c.StopSignal() + if s != 15 { + t.Fatalf("Expected 15, got %v", s) + } +} + +func TestContainerSecretReferenceDestTarget(t *testing.T) { + ref := &swarmtypes.SecretReference{ + File: &swarmtypes.SecretReferenceFileTarget{ + Name: "app", + }, + } + + d := getSecretTargetPath(ref) + expected := filepath.Join(containerSecretMountPath, "app") + if d != expected { + t.Fatalf("expected secret dest %q; received %q", expected, d) + } +} diff --git a/vendor/github.com/moby/moby/container/container_unix.go b/vendor/github.com/moby/moby/container/container_unix.go new file mode 100644 index 000000000..327f950ff --- /dev/null +++ b/vendor/github.com/moby/moby/container/container_unix.go @@ -0,0 +1,475 @@ +// +build linux freebsd solaris + +package container + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/types" + containertypes "github.com/docker/docker/api/types/container" + mounttypes "github.com/docker/docker/api/types/mount" + "github.com/docker/docker/pkg/chrootarchive" + "github.com/docker/docker/pkg/mount" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/pkg/symlink" + "github.com/docker/docker/pkg/system" + "github.com/docker/docker/volume" + "github.com/opencontainers/selinux/go-selinux/label" + "golang.org/x/sys/unix" +) + +const ( + containerSecretMountPath = "/run/secrets" +) + +// ExitStatus provides exit reasons for a container. +type ExitStatus struct { + // The exit code with which the container exited. + ExitCode int + + // Whether the container encountered an OOM. + OOMKilled bool +} + +// TrySetNetworkMount attempts to set the network mounts given a provided destination and +// the path to use for it; return true if the given destination was a network mount file +func (container *Container) TrySetNetworkMount(destination string, path string) bool { + if destination == "/etc/resolv.conf" { + container.ResolvConfPath = path + return true + } + if destination == "/etc/hostname" { + container.HostnamePath = path + return true + } + if destination == "/etc/hosts" { + container.HostsPath = path + return true + } + + return false +} + +// BuildHostnameFile writes the container's hostname file. +func (container *Container) BuildHostnameFile() error { + hostnamePath, err := container.GetRootResourcePath("hostname") + if err != nil { + return err + } + container.HostnamePath = hostnamePath + return ioutil.WriteFile(container.HostnamePath, []byte(container.Config.Hostname+"\n"), 0644) +} + +// NetworkMounts returns the list of network mounts. +func (container *Container) NetworkMounts() []Mount { + var mounts []Mount + shared := container.HostConfig.NetworkMode.IsContainer() + if container.ResolvConfPath != "" { + if _, err := os.Stat(container.ResolvConfPath); err != nil { + logrus.Warnf("ResolvConfPath set to %q, but can't stat this filename (err = %v); skipping", container.ResolvConfPath, err) + } else { + if !container.HasMountFor("/etc/resolv.conf") { + label.Relabel(container.ResolvConfPath, container.MountLabel, shared) + } + writable := !container.HostConfig.ReadonlyRootfs + if m, exists := container.MountPoints["/etc/resolv.conf"]; exists { + writable = m.RW + } + mounts = append(mounts, Mount{ + Source: container.ResolvConfPath, + Destination: "/etc/resolv.conf", + Writable: writable, + Propagation: string(volume.DefaultPropagationMode), + }) + } + } + if container.HostnamePath != "" { + if _, err := os.Stat(container.HostnamePath); err != nil { + logrus.Warnf("HostnamePath set to %q, but can't stat this filename (err = %v); skipping", container.HostnamePath, err) + } else { + if !container.HasMountFor("/etc/hostname") { + label.Relabel(container.HostnamePath, container.MountLabel, shared) + } + writable := !container.HostConfig.ReadonlyRootfs + if m, exists := container.MountPoints["/etc/hostname"]; exists { + writable = m.RW + } + mounts = append(mounts, Mount{ + Source: container.HostnamePath, + Destination: "/etc/hostname", + Writable: writable, + Propagation: string(volume.DefaultPropagationMode), + }) + } + } + if container.HostsPath != "" { + if _, err := os.Stat(container.HostsPath); err != nil { + logrus.Warnf("HostsPath set to %q, but can't stat this filename (err = %v); skipping", container.HostsPath, err) + } else { + if !container.HasMountFor("/etc/hosts") { + label.Relabel(container.HostsPath, container.MountLabel, shared) + } + writable := !container.HostConfig.ReadonlyRootfs + if m, exists := container.MountPoints["/etc/hosts"]; exists { + writable = m.RW + } + mounts = append(mounts, Mount{ + Source: container.HostsPath, + Destination: "/etc/hosts", + Writable: writable, + Propagation: string(volume.DefaultPropagationMode), + }) + } + } + return mounts +} + +// CopyImagePathContent copies files in destination to the volume. +func (container *Container) CopyImagePathContent(v volume.Volume, destination string) error { + rootfs, err := symlink.FollowSymlinkInScope(filepath.Join(container.BaseFS, destination), container.BaseFS) + if err != nil { + return err + } + + if _, err = ioutil.ReadDir(rootfs); err != nil { + if os.IsNotExist(err) { + return nil + } + return err + } + + id := stringid.GenerateNonCryptoID() + path, err := v.Mount(id) + if err != nil { + return err + } + + defer func() { + if err := v.Unmount(id); err != nil { + logrus.Warnf("error while unmounting volume %s: %v", v.Name(), err) + } + }() + if err := label.Relabel(path, container.MountLabel, true); err != nil && err != unix.ENOTSUP { + return err + } + return copyExistingContents(rootfs, path) +} + +// ShmResourcePath returns path to shm +func (container *Container) ShmResourcePath() (string, error) { + return container.GetRootResourcePath("shm") +} + +// HasMountFor checks if path is a mountpoint +func (container *Container) HasMountFor(path string) bool { + _, exists := container.MountPoints[path] + return exists +} + +// UnmountIpcMounts uses the provided unmount function to unmount shm and mqueue if they were mounted +func (container *Container) UnmountIpcMounts(unmount func(pth string) error) { + if container.HostConfig.IpcMode.IsContainer() || container.HostConfig.IpcMode.IsHost() { + return + } + + var warnings []string + + if !container.HasMountFor("/dev/shm") { + shmPath, err := container.ShmResourcePath() + if err != nil { + logrus.Error(err) + warnings = append(warnings, err.Error()) + } else if shmPath != "" { + if err := unmount(shmPath); err != nil && !os.IsNotExist(err) { + if mounted, mErr := mount.Mounted(shmPath); mounted || mErr != nil { + warnings = append(warnings, fmt.Sprintf("failed to umount %s: %v", shmPath, err)) + } + } + + } + } + + if len(warnings) > 0 { + logrus.Warnf("failed to cleanup ipc mounts:\n%v", strings.Join(warnings, "\n")) + } +} + +// IpcMounts returns the list of IPC mounts +func (container *Container) IpcMounts() []Mount { + var mounts []Mount + + if !container.HasMountFor("/dev/shm") { + label.SetFileLabel(container.ShmPath, container.MountLabel) + mounts = append(mounts, Mount{ + Source: container.ShmPath, + Destination: "/dev/shm", + Writable: true, + Propagation: string(volume.DefaultPropagationMode), + }) + } + + return mounts +} + +// SecretMounts returns the mounts for the secret path. +func (container *Container) SecretMounts() []Mount { + var mounts []Mount + for _, r := range container.SecretReferences { + if r.File == nil { + continue + } + mounts = append(mounts, Mount{ + Source: container.SecretFilePath(*r), + Destination: getSecretTargetPath(r), + Writable: false, + }) + } + + return mounts +} + +// UnmountSecrets unmounts the local tmpfs for secrets +func (container *Container) UnmountSecrets() error { + if _, err := os.Stat(container.SecretMountPath()); err != nil { + if os.IsNotExist(err) { + return nil + } + return err + } + + return detachMounted(container.SecretMountPath()) +} + +// ConfigMounts returns the mounts for configs. +func (container *Container) ConfigMounts() []Mount { + var mounts []Mount + for _, configRef := range container.ConfigReferences { + if configRef.File == nil { + continue + } + mounts = append(mounts, Mount{ + Source: container.ConfigFilePath(*configRef), + Destination: configRef.File.Name, + Writable: false, + }) + } + + return mounts +} + +// UpdateContainer updates configuration of a container. Callers must hold a Lock on the Container. +func (container *Container) UpdateContainer(hostConfig *containertypes.HostConfig) error { + // update resources of container + resources := hostConfig.Resources + cResources := &container.HostConfig.Resources + + // validate NanoCPUs, CPUPeriod, and CPUQuota + // Because NanoCPU effectively updates CPUPeriod/CPUQuota, + // once NanoCPU is already set, updating CPUPeriod/CPUQuota will be blocked, and vice versa. + // In the following we make sure the intended update (resources) does not conflict with the existing (cResource). + if resources.NanoCPUs > 0 && cResources.CPUPeriod > 0 { + return fmt.Errorf("Conflicting options: Nano CPUs cannot be updated as CPU Period has already been set") + } + if resources.NanoCPUs > 0 && cResources.CPUQuota > 0 { + return fmt.Errorf("Conflicting options: Nano CPUs cannot be updated as CPU Quota has already been set") + } + if resources.CPUPeriod > 0 && cResources.NanoCPUs > 0 { + return fmt.Errorf("Conflicting options: CPU Period cannot be updated as NanoCPUs has already been set") + } + if resources.CPUQuota > 0 && cResources.NanoCPUs > 0 { + return fmt.Errorf("Conflicting options: CPU Quota cannot be updated as NanoCPUs has already been set") + } + + if resources.BlkioWeight != 0 { + cResources.BlkioWeight = resources.BlkioWeight + } + if resources.CPUShares != 0 { + cResources.CPUShares = resources.CPUShares + } + if resources.NanoCPUs != 0 { + cResources.NanoCPUs = resources.NanoCPUs + } + if resources.CPUPeriod != 0 { + cResources.CPUPeriod = resources.CPUPeriod + } + if resources.CPUQuota != 0 { + cResources.CPUQuota = resources.CPUQuota + } + if resources.CpusetCpus != "" { + cResources.CpusetCpus = resources.CpusetCpus + } + if resources.CpusetMems != "" { + cResources.CpusetMems = resources.CpusetMems + } + if resources.Memory != 0 { + // if memory limit smaller than already set memoryswap limit and doesn't + // update the memoryswap limit, then error out. + if resources.Memory > cResources.MemorySwap && resources.MemorySwap == 0 { + return fmt.Errorf("Memory limit should be smaller than already set memoryswap limit, update the memoryswap at the same time") + } + cResources.Memory = resources.Memory + } + if resources.MemorySwap != 0 { + cResources.MemorySwap = resources.MemorySwap + } + if resources.MemoryReservation != 0 { + cResources.MemoryReservation = resources.MemoryReservation + } + if resources.KernelMemory != 0 { + cResources.KernelMemory = resources.KernelMemory + } + + // update HostConfig of container + if hostConfig.RestartPolicy.Name != "" { + if container.HostConfig.AutoRemove && !hostConfig.RestartPolicy.IsNone() { + return fmt.Errorf("Restart policy cannot be updated because AutoRemove is enabled for the container") + } + container.HostConfig.RestartPolicy = hostConfig.RestartPolicy + } + + return nil +} + +// DetachAndUnmount uses a detached mount on all mount destinations, then +// unmounts each volume normally. +// This is used from daemon/archive for `docker cp` +func (container *Container) DetachAndUnmount(volumeEventLog func(name, action string, attributes map[string]string)) error { + networkMounts := container.NetworkMounts() + mountPaths := make([]string, 0, len(container.MountPoints)+len(networkMounts)) + + for _, mntPoint := range container.MountPoints { + dest, err := container.GetResourcePath(mntPoint.Destination) + if err != nil { + logrus.Warnf("Failed to get volume destination path for container '%s' at '%s' while lazily unmounting: %v", container.ID, mntPoint.Destination, err) + continue + } + mountPaths = append(mountPaths, dest) + } + + for _, m := range networkMounts { + dest, err := container.GetResourcePath(m.Destination) + if err != nil { + logrus.Warnf("Failed to get volume destination path for container '%s' at '%s' while lazily unmounting: %v", container.ID, m.Destination, err) + continue + } + mountPaths = append(mountPaths, dest) + } + + for _, mountPath := range mountPaths { + if err := detachMounted(mountPath); err != nil { + logrus.Warnf("%s unmountVolumes: Failed to do lazy umount fo volume '%s': %v", container.ID, mountPath, err) + } + } + return container.UnmountVolumes(volumeEventLog) +} + +// copyExistingContents copies from the source to the destination and +// ensures the ownership is appropriately set. +func copyExistingContents(source, destination string) error { + volList, err := ioutil.ReadDir(source) + if err != nil { + return err + } + if len(volList) > 0 { + srcList, err := ioutil.ReadDir(destination) + if err != nil { + return err + } + if len(srcList) == 0 { + // If the source volume is empty, copies files from the root into the volume + if err := chrootarchive.NewArchiver(nil).CopyWithTar(source, destination); err != nil { + return err + } + } + } + return copyOwnership(source, destination) +} + +// copyOwnership copies the permissions and uid:gid of the source file +// to the destination file +func copyOwnership(source, destination string) error { + stat, err := system.Stat(source) + if err != nil { + return err + } + + destStat, err := system.Stat(destination) + if err != nil { + return err + } + + // In some cases, even though UID/GID match and it would effectively be a no-op, + // this can return a permission denied error... for example if this is an NFS + // mount. + // Since it's not really an error that we can't chown to the same UID/GID, don't + // even bother trying in such cases. + if stat.UID() != destStat.UID() || stat.GID() != destStat.GID() { + if err := os.Chown(destination, int(stat.UID()), int(stat.GID())); err != nil { + return err + } + } + + if stat.Mode() != destStat.Mode() { + return os.Chmod(destination, os.FileMode(stat.Mode())) + } + return nil +} + +// TmpfsMounts returns the list of tmpfs mounts +func (container *Container) TmpfsMounts() ([]Mount, error) { + var mounts []Mount + for dest, data := range container.HostConfig.Tmpfs { + mounts = append(mounts, Mount{ + Source: "tmpfs", + Destination: dest, + Data: data, + }) + } + for dest, mnt := range container.MountPoints { + if mnt.Type == mounttypes.TypeTmpfs { + data, err := volume.ConvertTmpfsOptions(mnt.Spec.TmpfsOptions, mnt.Spec.ReadOnly) + if err != nil { + return nil, err + } + mounts = append(mounts, Mount{ + Source: "tmpfs", + Destination: dest, + Data: data, + }) + } + } + return mounts, nil +} + +// cleanResourcePath cleans a resource path and prepares to combine with mnt path +func cleanResourcePath(path string) string { + return filepath.Join(string(os.PathSeparator), path) +} + +// EnableServiceDiscoveryOnDefaultNetwork Enable service discovery on default network +func (container *Container) EnableServiceDiscoveryOnDefaultNetwork() bool { + return false +} + +// GetMountPoints gives a platform specific transformation to types.MountPoint. Callers must hold a Container lock. +func (container *Container) GetMountPoints() []types.MountPoint { + mountPoints := make([]types.MountPoint, 0, len(container.MountPoints)) + for _, m := range container.MountPoints { + mountPoints = append(mountPoints, types.MountPoint{ + Type: m.Type, + Name: m.Name, + Source: m.Path(), + Destination: m.Destination, + Driver: m.Driver, + Mode: m.Mode, + RW: m.RW, + Propagation: m.Propagation, + }) + } + return mountPoints +} diff --git a/vendor/github.com/moby/moby/container/container_windows.go b/vendor/github.com/moby/moby/container/container_windows.go new file mode 100644 index 000000000..0f2a45df9 --- /dev/null +++ b/vendor/github.com/moby/moby/container/container_windows.go @@ -0,0 +1,210 @@ +// +build windows + +package container + +import ( + "fmt" + "os" + "path/filepath" + + "github.com/docker/docker/api/types" + containertypes "github.com/docker/docker/api/types/container" + "github.com/docker/docker/pkg/system" +) + +const ( + containerSecretMountPath = `C:\ProgramData\Docker\secrets` + containerInternalSecretMountPath = `C:\ProgramData\Docker\internal\secrets` + containerInternalConfigsDirPath = `C:\ProgramData\Docker\internal\configs` +) + +// ExitStatus provides exit reasons for a container. +type ExitStatus struct { + // The exit code with which the container exited. + ExitCode int +} + +// UnmountIpcMounts unmounts Ipc related mounts. +// This is a NOOP on windows. +func (container *Container) UnmountIpcMounts(unmount func(pth string) error) { +} + +// IpcMounts returns the list of Ipc related mounts. +func (container *Container) IpcMounts() []Mount { + return nil +} + +// CreateSecretSymlinks creates symlinks to files in the secret mount. +func (container *Container) CreateSecretSymlinks() error { + for _, r := range container.SecretReferences { + if r.File == nil { + continue + } + resolvedPath, _, err := container.ResolvePath(getSecretTargetPath(r)) + if err != nil { + return err + } + if err := system.MkdirAll(filepath.Dir(resolvedPath), 0, ""); err != nil { + return err + } + if err := os.Symlink(filepath.Join(containerInternalSecretMountPath, r.SecretID), resolvedPath); err != nil { + return err + } + } + + return nil +} + +// SecretMounts returns the mount for the secret path. +// All secrets are stored in a single mount on Windows. Target symlinks are +// created for each secret, pointing to the files in this mount. +func (container *Container) SecretMounts() []Mount { + var mounts []Mount + if len(container.SecretReferences) > 0 { + mounts = append(mounts, Mount{ + Source: container.SecretMountPath(), + Destination: containerInternalSecretMountPath, + Writable: false, + }) + } + + return mounts +} + +// UnmountSecrets unmounts the fs for secrets +func (container *Container) UnmountSecrets() error { + return os.RemoveAll(container.SecretMountPath()) +} + +// CreateConfigSymlinks creates symlinks to files in the config mount. +func (container *Container) CreateConfigSymlinks() error { + for _, configRef := range container.ConfigReferences { + if configRef.File == nil { + continue + } + resolvedPath, _, err := container.ResolvePath(configRef.File.Name) + if err != nil { + return err + } + if err := system.MkdirAll(filepath.Dir(resolvedPath), 0, ""); err != nil { + return err + } + if err := os.Symlink(filepath.Join(containerInternalConfigsDirPath, configRef.ConfigID), resolvedPath); err != nil { + return err + } + } + + return nil +} + +// ConfigMounts returns the mount for configs. +// All configs are stored in a single mount on Windows. Target symlinks are +// created for each config, pointing to the files in this mount. +func (container *Container) ConfigMounts() []Mount { + var mounts []Mount + if len(container.ConfigReferences) > 0 { + mounts = append(mounts, Mount{ + Source: container.ConfigsDirPath(), + Destination: containerInternalConfigsDirPath, + Writable: false, + }) + } + + return mounts +} + +// DetachAndUnmount unmounts all volumes. +// On Windows it only delegates to `UnmountVolumes` since there is nothing to +// force unmount. +func (container *Container) DetachAndUnmount(volumeEventLog func(name, action string, attributes map[string]string)) error { + return container.UnmountVolumes(volumeEventLog) +} + +// TmpfsMounts returns the list of tmpfs mounts +func (container *Container) TmpfsMounts() ([]Mount, error) { + var mounts []Mount + return mounts, nil +} + +// UpdateContainer updates configuration of a container. Callers must hold a Lock on the Container. +func (container *Container) UpdateContainer(hostConfig *containertypes.HostConfig) error { + resources := hostConfig.Resources + if resources.CPUShares != 0 || + resources.Memory != 0 || + resources.NanoCPUs != 0 || + resources.CgroupParent != "" || + resources.BlkioWeight != 0 || + len(resources.BlkioWeightDevice) != 0 || + len(resources.BlkioDeviceReadBps) != 0 || + len(resources.BlkioDeviceWriteBps) != 0 || + len(resources.BlkioDeviceReadIOps) != 0 || + len(resources.BlkioDeviceWriteIOps) != 0 || + resources.CPUPeriod != 0 || + resources.CPUQuota != 0 || + resources.CPURealtimePeriod != 0 || + resources.CPURealtimeRuntime != 0 || + resources.CpusetCpus != "" || + resources.CpusetMems != "" || + len(resources.Devices) != 0 || + len(resources.DeviceCgroupRules) != 0 || + resources.DiskQuota != 0 || + resources.KernelMemory != 0 || + resources.MemoryReservation != 0 || + resources.MemorySwap != 0 || + resources.MemorySwappiness != nil || + resources.OomKillDisable != nil || + resources.PidsLimit != 0 || + len(resources.Ulimits) != 0 || + resources.CPUCount != 0 || + resources.CPUPercent != 0 || + resources.IOMaximumIOps != 0 || + resources.IOMaximumBandwidth != 0 { + return fmt.Errorf("resource updating isn't supported on Windows") + } + // update HostConfig of container + if hostConfig.RestartPolicy.Name != "" { + if container.HostConfig.AutoRemove && !hostConfig.RestartPolicy.IsNone() { + return fmt.Errorf("Restart policy cannot be updated because AutoRemove is enabled for the container") + } + container.HostConfig.RestartPolicy = hostConfig.RestartPolicy + } + return nil +} + +// cleanResourcePath cleans a resource path by removing C:\ syntax, and prepares +// to combine with a volume path +func cleanResourcePath(path string) string { + if len(path) >= 2 { + c := path[0] + if path[1] == ':' && ('a' <= c && c <= 'z' || 'A' <= c && c <= 'Z') { + path = path[2:] + } + } + return filepath.Join(string(os.PathSeparator), path) +} + +// BuildHostnameFile writes the container's hostname file. +func (container *Container) BuildHostnameFile() error { + return nil +} + +// EnableServiceDiscoveryOnDefaultNetwork Enable service discovery on default network +func (container *Container) EnableServiceDiscoveryOnDefaultNetwork() bool { + return true +} + +// GetMountPoints gives a platform specific transformation to types.MountPoint. Callers must hold a Container lock. +func (container *Container) GetMountPoints() []types.MountPoint { + mountPoints := make([]types.MountPoint, 0, len(container.MountPoints)) + for _, m := range container.MountPoints { + mountPoints = append(mountPoints, types.MountPoint{ + Type: m.Type, + Name: m.Name, + Source: m.Path(), + Destination: m.Destination, + Driver: m.Driver, + RW: m.RW, + }) + } + return mountPoints +} diff --git a/vendor/github.com/moby/moby/container/env.go b/vendor/github.com/moby/moby/container/env.go new file mode 100644 index 000000000..896a384c4 --- /dev/null +++ b/vendor/github.com/moby/moby/container/env.go @@ -0,0 +1,43 @@ +package container + +import ( + "strings" +) + +// ReplaceOrAppendEnvValues returns the defaults with the overrides either +// replaced by env key or appended to the list +func ReplaceOrAppendEnvValues(defaults, overrides []string) []string { + cache := make(map[string]int, len(defaults)) + for i, e := range defaults { + parts := strings.SplitN(e, "=", 2) + cache[parts[0]] = i + } + + for _, value := range overrides { + // Values w/o = means they want this env to be removed/unset. + if !strings.Contains(value, "=") { + if i, exists := cache[value]; exists { + defaults[i] = "" // Used to indicate it should be removed + } + continue + } + + // Just do a normal set/update + parts := strings.SplitN(value, "=", 2) + if i, exists := cache[parts[0]]; exists { + defaults[i] = value + } else { + defaults = append(defaults, value) + } + } + + // Now remove all entries that we want to "unset" + for i := 0; i < len(defaults); i++ { + if defaults[i] == "" { + defaults = append(defaults[:i], defaults[i+1:]...) + i-- + } + } + + return defaults +} diff --git a/vendor/github.com/moby/moby/container/env_test.go b/vendor/github.com/moby/moby/container/env_test.go new file mode 100644 index 000000000..4ebf2640a --- /dev/null +++ b/vendor/github.com/moby/moby/container/env_test.go @@ -0,0 +1,24 @@ +package container + +import "testing" + +func TestReplaceAndAppendEnvVars(t *testing.T) { + var ( + d = []string{"HOME=/", "FOO=foo_default"} + // remove FOO from env + // remove BAR from env (nop) + o = []string{"HOME=/root", "TERM=xterm", "FOO", "BAR"} + ) + + env := ReplaceOrAppendEnvValues(d, o) + t.Logf("default=%v, override=%v, result=%v", d, o, env) + if len(env) != 2 { + t.Fatalf("expected len of 2 got %d", len(env)) + } + if env[0] != "HOME=/root" { + t.Fatalf("expected HOME=/root got '%s'", env[0]) + } + if env[1] != "TERM=xterm" { + t.Fatalf("expected TERM=xterm got '%s'", env[1]) + } +} diff --git a/vendor/github.com/moby/moby/container/health.go b/vendor/github.com/moby/moby/container/health.go new file mode 100644 index 000000000..31c5600d2 --- /dev/null +++ b/vendor/github.com/moby/moby/container/health.go @@ -0,0 +1,50 @@ +package container + +import ( + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/types" +) + +// Health holds the current container health-check state +type Health struct { + types.Health + stop chan struct{} // Write struct{} to stop the monitor +} + +// String returns a human-readable description of the health-check state +func (s *Health) String() string { + // This happens when the monitor has yet to be setup. + if s.Status == "" { + return types.Unhealthy + } + + switch s.Status { + case types.Starting: + return "health: starting" + default: // Healthy and Unhealthy are clear on their own + return s.Status + } +} + +// OpenMonitorChannel creates and returns a new monitor channel. If there already is one, +// it returns nil. +func (s *Health) OpenMonitorChannel() chan struct{} { + if s.stop == nil { + logrus.Debug("OpenMonitorChannel") + s.stop = make(chan struct{}) + return s.stop + } + return nil +} + +// CloseMonitorChannel closes any existing monitor channel. +func (s *Health) CloseMonitorChannel() { + if s.stop != nil { + logrus.Debug("CloseMonitorChannel: waiting for probe to stop") + close(s.stop) + s.stop = nil + // unhealthy when the monitor has stopped for compatibility reasons + s.Status = types.Unhealthy + logrus.Debug("CloseMonitorChannel done") + } +} diff --git a/vendor/github.com/moby/moby/container/history.go b/vendor/github.com/moby/moby/container/history.go new file mode 100644 index 000000000..c80c2aa0c --- /dev/null +++ b/vendor/github.com/moby/moby/container/history.go @@ -0,0 +1,30 @@ +package container + +import "sort" + +// History is a convenience type for storing a list of containers, +// sorted by creation date in descendant order. +type History []*Container + +// Len returns the number of containers in the history. +func (history *History) Len() int { + return len(*history) +} + +// Less compares two containers and returns true if the second one +// was created before the first one. +func (history *History) Less(i, j int) bool { + containers := *history + return containers[j].Created.Before(containers[i].Created) +} + +// Swap switches containers i and j positions in the history. +func (history *History) Swap(i, j int) { + containers := *history + containers[i], containers[j] = containers[j], containers[i] +} + +// sort orders the history by creation date in descendant order. +func (history *History) sort() { + sort.Sort(history) +} diff --git a/vendor/github.com/moby/moby/container/memory_store.go b/vendor/github.com/moby/moby/container/memory_store.go new file mode 100644 index 000000000..706407a71 --- /dev/null +++ b/vendor/github.com/moby/moby/container/memory_store.go @@ -0,0 +1,95 @@ +package container + +import ( + "sync" +) + +// memoryStore implements a Store in memory. +type memoryStore struct { + s map[string]*Container + sync.RWMutex +} + +// NewMemoryStore initializes a new memory store. +func NewMemoryStore() Store { + return &memoryStore{ + s: make(map[string]*Container), + } +} + +// Add appends a new container to the memory store. +// It overrides the id if it existed before. +func (c *memoryStore) Add(id string, cont *Container) { + c.Lock() + c.s[id] = cont + c.Unlock() +} + +// Get returns a container from the store by id. +func (c *memoryStore) Get(id string) *Container { + var res *Container + c.RLock() + res = c.s[id] + c.RUnlock() + return res +} + +// Delete removes a container from the store by id. +func (c *memoryStore) Delete(id string) { + c.Lock() + delete(c.s, id) + c.Unlock() +} + +// List returns a sorted list of containers from the store. +// The containers are ordered by creation date. +func (c *memoryStore) List() []*Container { + containers := History(c.all()) + containers.sort() + return containers +} + +// Size returns the number of containers in the store. +func (c *memoryStore) Size() int { + c.RLock() + defer c.RUnlock() + return len(c.s) +} + +// First returns the first container found in the store by a given filter. +func (c *memoryStore) First(filter StoreFilter) *Container { + for _, cont := range c.all() { + if filter(cont) { + return cont + } + } + return nil +} + +// ApplyAll calls the reducer function with every container in the store. +// This operation is asynchronous in the memory store. +// NOTE: Modifications to the store MUST NOT be done by the StoreReducer. +func (c *memoryStore) ApplyAll(apply StoreReducer) { + wg := new(sync.WaitGroup) + for _, cont := range c.all() { + wg.Add(1) + go func(container *Container) { + apply(container) + wg.Done() + }(cont) + } + + wg.Wait() +} + +func (c *memoryStore) all() []*Container { + c.RLock() + containers := make([]*Container, 0, len(c.s)) + for _, cont := range c.s { + containers = append(containers, cont) + } + c.RUnlock() + return containers +} + +var _ Store = &memoryStore{} diff --git a/vendor/github.com/moby/moby/container/memory_store_test.go b/vendor/github.com/moby/moby/container/memory_store_test.go new file mode 100644 index 000000000..8d26d1a96 --- /dev/null +++ b/vendor/github.com/moby/moby/container/memory_store_test.go @@ -0,0 +1,106 @@ +package container + +import ( + "testing" + "time" +) + +func TestNewMemoryStore(t *testing.T) { + s := NewMemoryStore() + m, ok := s.(*memoryStore) + if !ok { + t.Fatalf("store is not a memory store %v", s) + } + if m.s == nil { + t.Fatal("expected store map to not be nil") + } +} + +func TestAddContainers(t *testing.T) { + s := NewMemoryStore() + s.Add("id", NewBaseContainer("id", "root")) + if s.Size() != 1 { + t.Fatalf("expected store size 1, got %v", s.Size()) + } +} + +func TestGetContainer(t *testing.T) { + s := NewMemoryStore() + s.Add("id", NewBaseContainer("id", "root")) + c := s.Get("id") + if c == nil { + t.Fatal("expected container to not be nil") + } +} + +func TestDeleteContainer(t *testing.T) { + s := NewMemoryStore() + s.Add("id", NewBaseContainer("id", "root")) + s.Delete("id") + if c := s.Get("id"); c != nil { + t.Fatalf("expected container to be nil after removal, got %v", c) + } + + if s.Size() != 0 { + t.Fatalf("expected store size to be 0, got %v", s.Size()) + } +} + +func TestListContainers(t *testing.T) { + s := NewMemoryStore() + + cont := NewBaseContainer("id", "root") + cont.Created = time.Now() + cont2 := NewBaseContainer("id2", "root") + cont2.Created = time.Now().Add(24 * time.Hour) + + s.Add("id", cont) + s.Add("id2", cont2) + + list := s.List() + if len(list) != 2 { + t.Fatalf("expected list size 2, got %v", len(list)) + } + if list[0].ID != "id2" { + t.Fatalf("expected id2, got %v", list[0].ID) + } +} + +func TestFirstContainer(t *testing.T) { + s := NewMemoryStore() + + s.Add("id", NewBaseContainer("id", "root")) + s.Add("id2", NewBaseContainer("id2", "root")) + + first := s.First(func(cont *Container) bool { + return cont.ID == "id2" + }) + + if first == nil { + t.Fatal("expected container to not be nil") + } + if first.ID != "id2" { + t.Fatalf("expected id2, got %v", first) + } +} + +func TestApplyAllContainer(t *testing.T) { + s := NewMemoryStore() + + s.Add("id", NewBaseContainer("id", "root")) + s.Add("id2", NewBaseContainer("id2", "root")) + + s.ApplyAll(func(cont *Container) { + if cont.ID == "id2" { + cont.ID = "newID" + } + }) + + cont := s.Get("id2") + if cont == nil { + t.Fatal("expected container to not be nil") + } + if cont.ID != "newID" { + t.Fatalf("expected newID, got %v", cont.ID) + } +} diff --git a/vendor/github.com/moby/moby/container/monitor.go b/vendor/github.com/moby/moby/container/monitor.go new file mode 100644 index 000000000..f05e72b25 --- /dev/null +++ b/vendor/github.com/moby/moby/container/monitor.go @@ -0,0 +1,46 @@ +package container + +import ( + "time" + + "github.com/Sirupsen/logrus" +) + +const ( + loggerCloseTimeout = 10 * time.Second +) + +// Reset puts a container into a state where it can be restarted again. +func (container *Container) Reset(lock bool) { + if lock { + container.Lock() + defer container.Unlock() + } + + if err := container.CloseStreams(); err != nil { + logrus.Errorf("%s: %s", container.ID, err) + } + + // Re-create a brand new stdin pipe once the container exited + if container.Config.OpenStdin { + container.StreamConfig.NewInputPipes() + } + + if container.LogDriver != nil { + if container.LogCopier != nil { + exit := make(chan struct{}) + go func() { + container.LogCopier.Wait() + close(exit) + }() + select { + case <-time.After(loggerCloseTimeout): + logrus.Warn("Logger didn't exit in time: logs may be truncated") + case <-exit: + } + } + container.LogDriver.Close() + container.LogCopier = nil + container.LogDriver = nil + } +} diff --git a/vendor/github.com/moby/moby/container/mounts_unix.go b/vendor/github.com/moby/moby/container/mounts_unix.go new file mode 100644 index 000000000..c52abed2d --- /dev/null +++ b/vendor/github.com/moby/moby/container/mounts_unix.go @@ -0,0 +1,12 @@ +// +build !windows + +package container + +// Mount contains information for a mount operation. +type Mount struct { + Source string `json:"source"` + Destination string `json:"destination"` + Writable bool `json:"writable"` + Data string `json:"data"` + Propagation string `json:"mountpropagation"` +} diff --git a/vendor/github.com/moby/moby/container/mounts_windows.go b/vendor/github.com/moby/moby/container/mounts_windows.go new file mode 100644 index 000000000..01b327f78 --- /dev/null +++ b/vendor/github.com/moby/moby/container/mounts_windows.go @@ -0,0 +1,8 @@ +package container + +// Mount contains information for a mount operation. +type Mount struct { + Source string `json:"source"` + Destination string `json:"destination"` + Writable bool `json:"writable"` +} diff --git a/vendor/github.com/moby/moby/container/state.go b/vendor/github.com/moby/moby/container/state.go new file mode 100644 index 000000000..32f3f5b7a --- /dev/null +++ b/vendor/github.com/moby/moby/container/state.go @@ -0,0 +1,382 @@ +package container + +import ( + "errors" + "fmt" + "sync" + "time" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + "github.com/docker/go-units" +) + +// State holds the current container state, and has methods to get and +// set the state. Container has an embed, which allows all of the +// functions defined against State to run against Container. +type State struct { + sync.Mutex + // Note that `Running` and `Paused` are not mutually exclusive: + // When pausing a container (on Linux), the cgroups freezer is used to suspend + // all processes in the container. Freezing the process requires the process to + // be running. As a result, paused containers are both `Running` _and_ `Paused`. + Running bool + Paused bool + Restarting bool + OOMKilled bool + RemovalInProgress bool // Not need for this to be persistent on disk. + Dead bool + Pid int + ExitCodeValue int `json:"ExitCode"` + ErrorMsg string `json:"Error"` // contains last known error when starting the container + StartedAt time.Time + FinishedAt time.Time + Health *Health + + waitStop chan struct{} + waitRemove chan struct{} +} + +// StateStatus is used to return container wait results. +// Implements exec.ExitCode interface. +// This type is needed as State include a sync.Mutex field which make +// copying it unsafe. +type StateStatus struct { + exitCode int + err error +} + +// ExitCode returns current exitcode for the state. +func (s StateStatus) ExitCode() int { + return s.exitCode +} + +// Err returns current error for the state. Returns nil if the container had +// exited on its own. +func (s StateStatus) Err() error { + return s.err +} + +// NewState creates a default state object with a fresh channel for state changes. +func NewState() *State { + return &State{ + waitStop: make(chan struct{}), + waitRemove: make(chan struct{}), + } +} + +// String returns a human-readable description of the state +func (s *State) String() string { + if s.Running { + if s.Paused { + return fmt.Sprintf("Up %s (Paused)", units.HumanDuration(time.Now().UTC().Sub(s.StartedAt))) + } + if s.Restarting { + return fmt.Sprintf("Restarting (%d) %s ago", s.ExitCodeValue, units.HumanDuration(time.Now().UTC().Sub(s.FinishedAt))) + } + + if h := s.Health; h != nil { + return fmt.Sprintf("Up %s (%s)", units.HumanDuration(time.Now().UTC().Sub(s.StartedAt)), h.String()) + } + + return fmt.Sprintf("Up %s", units.HumanDuration(time.Now().UTC().Sub(s.StartedAt))) + } + + if s.RemovalInProgress { + return "Removal In Progress" + } + + if s.Dead { + return "Dead" + } + + if s.StartedAt.IsZero() { + return "Created" + } + + if s.FinishedAt.IsZero() { + return "" + } + + return fmt.Sprintf("Exited (%d) %s ago", s.ExitCodeValue, units.HumanDuration(time.Now().UTC().Sub(s.FinishedAt))) +} + +// HealthString returns a single string to describe health status. +func (s *State) HealthString() string { + if s.Health == nil { + return types.NoHealthcheck + } + + return s.Health.String() +} + +// IsValidHealthString checks if the provided string is a valid container health status or not. +func IsValidHealthString(s string) bool { + return s == types.Starting || + s == types.Healthy || + s == types.Unhealthy || + s == types.NoHealthcheck +} + +// StateString returns a single string to describe state +func (s *State) StateString() string { + if s.Running { + if s.Paused { + return "paused" + } + if s.Restarting { + return "restarting" + } + return "running" + } + + if s.RemovalInProgress { + return "removing" + } + + if s.Dead { + return "dead" + } + + if s.StartedAt.IsZero() { + return "created" + } + + return "exited" +} + +// IsValidStateString checks if the provided string is a valid container state or not. +func IsValidStateString(s string) bool { + if s != "paused" && + s != "restarting" && + s != "removing" && + s != "running" && + s != "dead" && + s != "created" && + s != "exited" { + return false + } + return true +} + +// WaitCondition is an enum type for different states to wait for. +type WaitCondition int + +// Possible WaitCondition Values. +// +// WaitConditionNotRunning (default) is used to wait for any of the non-running +// states: "created", "exited", "dead", "removing", or "removed". +// +// WaitConditionNextExit is used to wait for the next time the state changes +// to a non-running state. If the state is currently "created" or "exited", +// this would cause Wait() to block until either the container runs and exits +// or is removed. +// +// WaitConditionRemoved is used to wait for the container to be removed. +const ( + WaitConditionNotRunning WaitCondition = iota + WaitConditionNextExit + WaitConditionRemoved +) + +// Wait waits until the container is in a certain state indicated by the given +// condition. A context must be used for cancelling the request, controlling +// timeouts, and avoiding goroutine leaks. Wait must be called without holding +// the state lock. Returns a channel from which the caller will receive the +// result. If the container exited on its own, the result's Err() method will +// be nil and its ExitCode() method will return the container's exit code, +// otherwise, the results Err() method will return an error indicating why the +// wait operation failed. +func (s *State) Wait(ctx context.Context, condition WaitCondition) <-chan StateStatus { + s.Lock() + defer s.Unlock() + + if condition == WaitConditionNotRunning && !s.Running { + // Buffer so we can put it in the channel now. + resultC := make(chan StateStatus, 1) + + // Send the current status. + resultC <- StateStatus{ + exitCode: s.ExitCode(), + err: s.Err(), + } + + return resultC + } + + // If we are waiting only for removal, the waitStop channel should + // remain nil and block forever. + var waitStop chan struct{} + if condition < WaitConditionRemoved { + waitStop = s.waitStop + } + + // Always wait for removal, just in case the container gets removed + // while it is still in a "created" state, in which case it is never + // actually stopped. + waitRemove := s.waitRemove + + resultC := make(chan StateStatus) + + go func() { + select { + case <-ctx.Done(): + // Context timeout or cancellation. + resultC <- StateStatus{ + exitCode: -1, + err: ctx.Err(), + } + return + case <-waitStop: + case <-waitRemove: + } + + s.Lock() + result := StateStatus{ + exitCode: s.ExitCode(), + err: s.Err(), + } + s.Unlock() + + resultC <- result + }() + + return resultC +} + +// IsRunning returns whether the running flag is set. Used by Container to check whether a container is running. +func (s *State) IsRunning() bool { + s.Lock() + res := s.Running + s.Unlock() + return res +} + +// GetPID holds the process id of a container. +func (s *State) GetPID() int { + s.Lock() + res := s.Pid + s.Unlock() + return res +} + +// ExitCode returns current exitcode for the state. Take lock before if state +// may be shared. +func (s *State) ExitCode() int { + return s.ExitCodeValue +} + +// SetExitCode sets current exitcode for the state. Take lock before if state +// may be shared. +func (s *State) SetExitCode(ec int) { + s.ExitCodeValue = ec +} + +// SetRunning sets the state of the container to "running". +func (s *State) SetRunning(pid int, initial bool) { + s.ErrorMsg = "" + s.Running = true + s.Restarting = false + if initial { + s.Paused = false + } + s.ExitCodeValue = 0 + s.Pid = pid + if initial { + s.StartedAt = time.Now().UTC() + } +} + +// SetStopped sets the container state to "stopped" without locking. +func (s *State) SetStopped(exitStatus *ExitStatus) { + s.Running = false + s.Paused = false + s.Restarting = false + s.Pid = 0 + s.FinishedAt = time.Now().UTC() + s.setFromExitStatus(exitStatus) + close(s.waitStop) // Fire waiters for stop + s.waitStop = make(chan struct{}) +} + +// SetRestarting sets the container state to "restarting" without locking. +// It also sets the container PID to 0. +func (s *State) SetRestarting(exitStatus *ExitStatus) { + // we should consider the container running when it is restarting because of + // all the checks in docker around rm/stop/etc + s.Running = true + s.Restarting = true + s.Paused = false + s.Pid = 0 + s.FinishedAt = time.Now().UTC() + s.setFromExitStatus(exitStatus) + close(s.waitStop) // Fire waiters for stop + s.waitStop = make(chan struct{}) +} + +// SetError sets the container's error state. This is useful when we want to +// know the error that occurred when container transits to another state +// when inspecting it +func (s *State) SetError(err error) { + s.ErrorMsg = err.Error() +} + +// IsPaused returns whether the container is paused or not. +func (s *State) IsPaused() bool { + s.Lock() + res := s.Paused + s.Unlock() + return res +} + +// IsRestarting returns whether the container is restarting or not. +func (s *State) IsRestarting() bool { + s.Lock() + res := s.Restarting + s.Unlock() + return res +} + +// SetRemovalInProgress sets the container state as being removed. +// It returns true if the container was already in that state. +func (s *State) SetRemovalInProgress() bool { + s.Lock() + defer s.Unlock() + if s.RemovalInProgress { + return true + } + s.RemovalInProgress = true + return false +} + +// ResetRemovalInProgress makes the RemovalInProgress state to false. +func (s *State) ResetRemovalInProgress() { + s.Lock() + s.RemovalInProgress = false + s.Unlock() +} + +// SetDead sets the container state to "dead" +func (s *State) SetDead() { + s.Lock() + s.Dead = true + s.Unlock() +} + +// SetRemoved assumes this container is already in the "dead" state and +// closes the internal waitRemove channel to unblock callers waiting for a +// container to be removed. +func (s *State) SetRemoved() { + s.Lock() + close(s.waitRemove) // Unblock those waiting on remove. + s.Unlock() +} + +// Err returns an error if there is one. +func (s *State) Err() error { + if s.ErrorMsg != "" { + return errors.New(s.ErrorMsg) + } + return nil +} diff --git a/vendor/github.com/moby/moby/container/state_solaris.go b/vendor/github.com/moby/moby/container/state_solaris.go new file mode 100644 index 000000000..1229650ef --- /dev/null +++ b/vendor/github.com/moby/moby/container/state_solaris.go @@ -0,0 +1,7 @@ +package container + +// setFromExitStatus is a platform specific helper function to set the state +// based on the ExitStatus structure. +func (s *State) setFromExitStatus(exitStatus *ExitStatus) { + s.ExitCodeValue = exitStatus.ExitCode +} diff --git a/vendor/github.com/moby/moby/container/state_test.go b/vendor/github.com/moby/moby/container/state_test.go new file mode 100644 index 000000000..2a90e5541 --- /dev/null +++ b/vendor/github.com/moby/moby/container/state_test.go @@ -0,0 +1,168 @@ +package container + +import ( + "context" + "testing" + "time" + + "github.com/docker/docker/api/types" +) + +func TestIsValidHealthString(t *testing.T) { + contexts := []struct { + Health string + Expected bool + }{ + {types.Healthy, true}, + {types.Unhealthy, true}, + {types.Starting, true}, + {types.NoHealthcheck, true}, + {"fail", false}, + } + + for _, c := range contexts { + v := IsValidHealthString(c.Health) + if v != c.Expected { + t.Fatalf("Expected %t, but got %t", c.Expected, v) + } + } +} + +func TestStateRunStop(t *testing.T) { + s := NewState() + + // Begin another wait with WaitConditionRemoved. It should complete + // within 200 milliseconds. + ctx, cancel := context.WithTimeout(context.Background(), 200*time.Millisecond) + defer cancel() + removalWait := s.Wait(ctx, WaitConditionRemoved) + + // Full lifecycle two times. + for i := 1; i <= 2; i++ { + // A wait with WaitConditionNotRunning should return + // immediately since the state is now either "created" (on the + // first iteration) or "exited" (on the second iteration). It + // shouldn't take more than 50 milliseconds. + ctx, cancel := context.WithTimeout(context.Background(), 50*time.Millisecond) + defer cancel() + // Expectx exit code to be i-1 since it should be the exit + // code from the previous loop or 0 for the created state. + if status := <-s.Wait(ctx, WaitConditionNotRunning); status.ExitCode() != i-1 { + t.Fatalf("ExitCode %v, expected %v, err %q", status.ExitCode(), i-1, status.Err()) + } + + // A wait with WaitConditionNextExit should block until the + // container has started and exited. It shouldn't take more + // than 100 milliseconds. + ctx, cancel = context.WithTimeout(context.Background(), 100*time.Millisecond) + defer cancel() + initialWait := s.Wait(ctx, WaitConditionNextExit) + + // Set the state to "Running". + s.Lock() + s.SetRunning(i, true) + s.Unlock() + + // Assert desired state. + if !s.IsRunning() { + t.Fatal("State not running") + } + if s.Pid != i { + t.Fatalf("Pid %v, expected %v", s.Pid, i) + } + if s.ExitCode() != 0 { + t.Fatalf("ExitCode %v, expected 0", s.ExitCode()) + } + + // Now that it's running, a wait with WaitConditionNotRunning + // should block until we stop the container. It shouldn't take + // more than 100 milliseconds. + ctx, cancel = context.WithTimeout(context.Background(), 100*time.Millisecond) + defer cancel() + exitWait := s.Wait(ctx, WaitConditionNotRunning) + + // Set the state to "Exited". + s.Lock() + s.SetStopped(&ExitStatus{ExitCode: i}) + s.Unlock() + + // Assert desired state. + if s.IsRunning() { + t.Fatal("State is running") + } + if s.ExitCode() != i { + t.Fatalf("ExitCode %v, expected %v", s.ExitCode(), i) + } + if s.Pid != 0 { + t.Fatalf("Pid %v, expected 0", s.Pid) + } + + // Receive the initialWait result. + if status := <-initialWait; status.ExitCode() != i { + t.Fatalf("ExitCode %v, expected %v, err %q", status.ExitCode(), i, status.Err()) + } + + // Receive the exitWait result. + if status := <-exitWait; status.ExitCode() != i { + t.Fatalf("ExitCode %v, expected %v, err %q", status.ExitCode(), i, status.Err()) + } + } + + // Set the state to dead and removed. + s.SetDead() + s.SetRemoved() + + // Wait for removed status or timeout. + if status := <-removalWait; status.ExitCode() != 2 { + // Should have the final exit code from the loop. + t.Fatalf("Removal wait exitCode %v, expected %v, err %q", status.ExitCode(), 2, status.Err()) + } +} + +func TestStateTimeoutWait(t *testing.T) { + s := NewState() + + s.Lock() + s.SetRunning(0, true) + s.Unlock() + + // Start a wait with a timeout. + ctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond) + defer cancel() + waitC := s.Wait(ctx, WaitConditionNotRunning) + + // It should timeout *before* this 200ms timer does. + select { + case <-time.After(200 * time.Millisecond): + t.Fatal("Stop callback doesn't fire in 200 milliseconds") + case status := <-waitC: + t.Log("Stop callback fired") + // Should be a timeout error. + if status.Err() == nil { + t.Fatal("expected timeout error, got nil") + } + if status.ExitCode() != -1 { + t.Fatalf("expected exit code %v, got %v", -1, status.ExitCode()) + } + } + + s.Lock() + s.SetStopped(&ExitStatus{ExitCode: 0}) + s.Unlock() + + // Start another wait with a timeout. This one should return + // immediately. + ctx, cancel = context.WithTimeout(context.Background(), 100*time.Millisecond) + defer cancel() + waitC = s.Wait(ctx, WaitConditionNotRunning) + + select { + case <-time.After(200 * time.Millisecond): + t.Fatal("Stop callback doesn't fire in 200 milliseconds") + case status := <-waitC: + t.Log("Stop callback fired") + if status.ExitCode() != 0 { + t.Fatalf("expected exit code %v, got %v, err %q", 0, status.ExitCode(), status.Err()) + } + } +} diff --git a/vendor/github.com/moby/moby/container/state_unix.go b/vendor/github.com/moby/moby/container/state_unix.go new file mode 100644 index 000000000..a2fa5afc2 --- /dev/null +++ b/vendor/github.com/moby/moby/container/state_unix.go @@ -0,0 +1,10 @@ +// +build linux freebsd + +package container + +// setFromExitStatus is a platform specific helper function to set the state +// based on the ExitStatus structure. +func (s *State) setFromExitStatus(exitStatus *ExitStatus) { + s.ExitCodeValue = exitStatus.ExitCode + s.OOMKilled = exitStatus.OOMKilled +} diff --git a/vendor/github.com/moby/moby/container/state_windows.go b/vendor/github.com/moby/moby/container/state_windows.go new file mode 100644 index 000000000..1229650ef --- /dev/null +++ b/vendor/github.com/moby/moby/container/state_windows.go @@ -0,0 +1,7 @@ +package container + +// setFromExitStatus is a platform specific helper function to set the state +// based on the ExitStatus structure. +func (s *State) setFromExitStatus(exitStatus *ExitStatus) { + s.ExitCodeValue = exitStatus.ExitCode +} diff --git a/vendor/github.com/moby/moby/container/store.go b/vendor/github.com/moby/moby/container/store.go new file mode 100644 index 000000000..042fb1a34 --- /dev/null +++ b/vendor/github.com/moby/moby/container/store.go @@ -0,0 +1,28 @@ +package container + +// StoreFilter defines a function to filter +// container in the store. +type StoreFilter func(*Container) bool + +// StoreReducer defines a function to +// manipulate containers in the store +type StoreReducer func(*Container) + +// Store defines an interface that +// any container store must implement. +type Store interface { + // Add appends a new container to the store. + Add(string, *Container) + // Get returns a container from the store by the identifier it was stored with. + Get(string) *Container + // Delete removes a container from the store by the identifier it was stored with. + Delete(string) + // List returns a list of containers from the store. + List() []*Container + // Size returns the number of containers in the store. + Size() int + // First returns the first container found in the store by a given filter. + First(StoreFilter) *Container + // ApplyAll calls the reducer function with every container in the store. + ApplyAll(StoreReducer) +} diff --git a/vendor/github.com/moby/moby/container/stream/attach.go b/vendor/github.com/moby/moby/container/stream/attach.go new file mode 100644 index 000000000..3dd53d335 --- /dev/null +++ b/vendor/github.com/moby/moby/container/stream/attach.go @@ -0,0 +1,179 @@ +package stream + +import ( + "io" + "sync" + + "golang.org/x/net/context" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/pools" + "github.com/docker/docker/pkg/promise" + "github.com/docker/docker/pkg/term" +) + +var defaultEscapeSequence = []byte{16, 17} // ctrl-p, ctrl-q + +// AttachConfig is the config struct used to attach a client to a stream's stdio +type AttachConfig struct { + // Tells the attach copier that the stream's stdin is a TTY and to look for + // escape sequences in stdin to detach from the stream. + // When true the escape sequence is not passed to the underlying stream + TTY bool + // Specifies the detach keys the client will be using + // Only useful when `TTY` is true + DetachKeys []byte + + // CloseStdin signals that once done, stdin for the attached stream should be closed + // For example, this would close the attached container's stdin. + CloseStdin bool + + // UseStd* indicate whether the client has requested to be connected to the + // given stream or not. These flags are used instead of checking Std* != nil + // at points before the client streams Std* are wired up. + UseStdin, UseStdout, UseStderr bool + + // CStd* are the streams directly connected to the container + CStdin io.WriteCloser + CStdout, CStderr io.ReadCloser + + // Provide client streams to wire up to + Stdin io.ReadCloser + Stdout, Stderr io.Writer +} + +// AttachStreams attaches the container's streams to the AttachConfig +func (c *Config) AttachStreams(cfg *AttachConfig) { + if cfg.UseStdin { + cfg.CStdin = c.StdinPipe() + } + + if cfg.UseStdout { + cfg.CStdout = c.StdoutPipe() + } + + if cfg.UseStderr { + cfg.CStderr = c.StderrPipe() + } +} + +// CopyStreams starts goroutines to copy data in and out to/from the container +func (c *Config) CopyStreams(ctx context.Context, cfg *AttachConfig) chan error { + var ( + wg sync.WaitGroup + errors = make(chan error, 3) + ) + + if cfg.Stdin != nil { + wg.Add(1) + } + + if cfg.Stdout != nil { + wg.Add(1) + } + + if cfg.Stderr != nil { + wg.Add(1) + } + + // Connect stdin of container to the attach stdin stream. + go func() { + if cfg.Stdin == nil { + return + } + logrus.Debug("attach: stdin: begin") + + var err error + if cfg.TTY { + _, err = copyEscapable(cfg.CStdin, cfg.Stdin, cfg.DetachKeys) + } else { + _, err = pools.Copy(cfg.CStdin, cfg.Stdin) + } + if err == io.ErrClosedPipe { + err = nil + } + if err != nil { + logrus.Errorf("attach: stdin: %s", err) + errors <- err + } + if cfg.CloseStdin && !cfg.TTY { + cfg.CStdin.Close() + } else { + // No matter what, when stdin is closed (io.Copy unblock), close stdout and stderr + if cfg.CStdout != nil { + cfg.CStdout.Close() + } + if cfg.CStderr != nil { + cfg.CStderr.Close() + } + } + logrus.Debug("attach: stdin: end") + wg.Done() + }() + + attachStream := func(name string, stream io.Writer, streamPipe io.ReadCloser) { + if stream == nil { + return + } + + logrus.Debugf("attach: %s: begin", name) + _, err := pools.Copy(stream, streamPipe) + if err == io.ErrClosedPipe { + err = nil + } + if err != nil { + logrus.Errorf("attach: %s: %v", name, err) + errors <- err + } + // Make sure stdin gets closed + if cfg.Stdin != nil { + cfg.Stdin.Close() + } + streamPipe.Close() + logrus.Debugf("attach: %s: end", name) + wg.Done() + } + + go attachStream("stdout", cfg.Stdout, cfg.CStdout) + go attachStream("stderr", cfg.Stderr, cfg.CStderr) + + return promise.Go(func() error { + done := make(chan struct{}) + go func() { + wg.Wait() + close(done) + }() + select { + case <-done: + case <-ctx.Done(): + // close all pipes + if cfg.CStdin != nil { + cfg.CStdin.Close() + } + if cfg.CStdout != nil { + cfg.CStdout.Close() + } + if cfg.CStderr != nil { + cfg.CStderr.Close() + } + <-done + } + close(errors) + for err := range errors { + if err != nil { + return err + } + } + return nil + }) +} + +func copyEscapable(dst io.Writer, src io.ReadCloser, keys []byte) (written int64, err error) { + if len(keys) == 0 { + keys = defaultEscapeSequence + } + pr := term.NewEscapeProxy(src, keys) + defer src.Close() + + return pools.Copy(dst, pr) +} diff --git a/vendor/github.com/moby/moby/container/stream/streams.go b/vendor/github.com/moby/moby/container/stream/streams.go new file mode 100644 index 000000000..735bab510 --- /dev/null +++ b/vendor/github.com/moby/moby/container/stream/streams.go @@ -0,0 +1,146 @@ +package stream + +import ( + "fmt" + "io" + "io/ioutil" + "strings" + "sync" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/libcontainerd" + "github.com/docker/docker/pkg/broadcaster" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/pools" +) + +// Config holds information about I/O streams managed together. +// +// config.StdinPipe returns a WriteCloser which can be used to feed data +// to the standard input of the streamConfig's active process. +// config.StdoutPipe and streamConfig.StderrPipe each return a ReadCloser +// which can be used to retrieve the standard output (and error) generated +// by the container's active process. The output (and error) are actually +// copied and delivered to all StdoutPipe and StderrPipe consumers, using +// a kind of "broadcaster". +type Config struct { + sync.WaitGroup + stdout *broadcaster.Unbuffered + stderr *broadcaster.Unbuffered + stdin io.ReadCloser + stdinPipe io.WriteCloser +} + +// NewConfig creates a stream config and initializes +// the standard err and standard out to new unbuffered broadcasters. +func NewConfig() *Config { + return &Config{ + stderr: new(broadcaster.Unbuffered), + stdout: new(broadcaster.Unbuffered), + } +} + +// Stdout returns the standard output in the configuration. +func (c *Config) Stdout() *broadcaster.Unbuffered { + return c.stdout +} + +// Stderr returns the standard error in the configuration. +func (c *Config) Stderr() *broadcaster.Unbuffered { + return c.stderr +} + +// Stdin returns the standard input in the configuration. +func (c *Config) Stdin() io.ReadCloser { + return c.stdin +} + +// StdinPipe returns an input writer pipe as an io.WriteCloser. +func (c *Config) StdinPipe() io.WriteCloser { + return c.stdinPipe +} + +// StdoutPipe creates a new io.ReadCloser with an empty bytes pipe. +// It adds this new out pipe to the Stdout broadcaster. +// This will block stdout if unconsumed. +func (c *Config) StdoutPipe() io.ReadCloser { + bytesPipe := ioutils.NewBytesPipe() + c.stdout.Add(bytesPipe) + return bytesPipe +} + +// StderrPipe creates a new io.ReadCloser with an empty bytes pipe. +// It adds this new err pipe to the Stderr broadcaster. +// This will block stderr if unconsumed. +func (c *Config) StderrPipe() io.ReadCloser { + bytesPipe := ioutils.NewBytesPipe() + c.stderr.Add(bytesPipe) + return bytesPipe +} + +// NewInputPipes creates new pipes for both standard inputs, Stdin and StdinPipe. +func (c *Config) NewInputPipes() { + c.stdin, c.stdinPipe = io.Pipe() +} + +// NewNopInputPipe creates a new input pipe that will silently drop all messages in the input. +func (c *Config) NewNopInputPipe() { + c.stdinPipe = ioutils.NopWriteCloser(ioutil.Discard) +} + +// CloseStreams ensures that the configured streams are properly closed. +func (c *Config) CloseStreams() error { + var errors []string + + if c.stdin != nil { + if err := c.stdin.Close(); err != nil { + errors = append(errors, fmt.Sprintf("error close stdin: %s", err)) + } + } + + if err := c.stdout.Clean(); err != nil { + errors = append(errors, fmt.Sprintf("error close stdout: %s", err)) + } + + if err := c.stderr.Clean(); err != nil { + errors = append(errors, fmt.Sprintf("error close stderr: %s", err)) + } + + if len(errors) > 0 { + return fmt.Errorf(strings.Join(errors, "\n")) + } + + return nil +} + +// CopyToPipe connects streamconfig with a libcontainerd.IOPipe +func (c *Config) CopyToPipe(iop libcontainerd.IOPipe) { + copyFunc := func(w io.Writer, r io.ReadCloser) { + c.Add(1) + go func() { + if _, err := pools.Copy(w, r); err != nil { + logrus.Errorf("stream copy error: %+v", err) + } + r.Close() + c.Done() + }() + } + + if iop.Stdout != nil { + copyFunc(c.Stdout(), iop.Stdout) + } + if iop.Stderr != nil { + copyFunc(c.Stderr(), iop.Stderr) + } + + if stdin := c.Stdin(); stdin != nil { + if iop.Stdin != nil { + go func() { + pools.Copy(iop.Stdin, stdin) + if err := iop.Stdin.Close(); err != nil { + logrus.Warnf("failed to close stdin: %+v", err) + } + }() + } + } +} diff --git a/vendor/github.com/moby/moby/container/view.go b/vendor/github.com/moby/moby/container/view.go new file mode 100644 index 000000000..e865e4d5d --- /dev/null +++ b/vendor/github.com/moby/moby/container/view.go @@ -0,0 +1,498 @@ +package container + +import ( + "errors" + "fmt" + "strings" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/network" + "github.com/docker/go-connections/nat" + "github.com/hashicorp/go-memdb" +) + +const ( + memdbContainersTable = "containers" + memdbNamesTable = "names" + + memdbIDIndex = "id" + memdbContainerIDIndex = "containerid" +) + +var ( + // ErrNameReserved is an error which is returned when a name is requested to be reserved that already is reserved + ErrNameReserved = errors.New("name is reserved") + // ErrNameNotReserved is an error which is returned when trying to find a name that is not reserved + ErrNameNotReserved = errors.New("name is not reserved") +) + +// Snapshot is a read only view for Containers. It holds all information necessary to serve container queries in a +// versioned ACID in-memory store. +type Snapshot struct { + types.Container + + // additional info queries need to filter on + // preserve nanosec resolution for queries + CreatedAt time.Time + StartedAt time.Time + Name string + Pid int + ExitCode int + Running bool + Paused bool + Managed bool + ExposedPorts nat.PortSet + PortBindings nat.PortSet + Health string + HostConfig struct { + Isolation string + } +} + +// nameAssociation associates a container id with a name. +type nameAssociation struct { + // name is the name to associate. Note that name is the primary key + // ("id" in memdb). + name string + containerID string +} + +// ViewDB provides an in-memory transactional (ACID) container Store +type ViewDB interface { + Snapshot() View + Save(*Container) error + Delete(*Container) error + + ReserveName(name, containerID string) error + ReleaseName(name string) error +} + +// View can be used by readers to avoid locking +type View interface { + All() ([]Snapshot, error) + Get(id string) (*Snapshot, error) + + GetID(name string) (string, error) + GetAllNames() map[string][]string +} + +var schema = &memdb.DBSchema{ + Tables: map[string]*memdb.TableSchema{ + memdbContainersTable: { + Name: memdbContainersTable, + Indexes: map[string]*memdb.IndexSchema{ + memdbIDIndex: { + Name: memdbIDIndex, + Unique: true, + Indexer: &containerByIDIndexer{}, + }, + }, + }, + memdbNamesTable: { + Name: memdbNamesTable, + Indexes: map[string]*memdb.IndexSchema{ + // Used for names, because "id" is the primary key in memdb. + memdbIDIndex: { + Name: memdbIDIndex, + Unique: true, + Indexer: &namesByNameIndexer{}, + }, + memdbContainerIDIndex: { + Name: memdbContainerIDIndex, + Indexer: &namesByContainerIDIndexer{}, + }, + }, + }, + }, +} + +type memDB struct { + store *memdb.MemDB +} + +// NoSuchContainerError indicates that the container wasn't found in the +// database. +type NoSuchContainerError struct { + id string +} + +// Error satisfies the error interface. +func (e NoSuchContainerError) Error() string { + return "no such container " + e.id +} + +// NewViewDB provides the default implementation, with the default schema +func NewViewDB() (ViewDB, error) { + store, err := memdb.NewMemDB(schema) + if err != nil { + return nil, err + } + return &memDB{store: store}, nil +} + +// Snapshot provides a consistent read-only View of the database +func (db *memDB) Snapshot() View { + return &memdbView{ + txn: db.store.Txn(false), + } +} + +func (db *memDB) withTxn(cb func(*memdb.Txn) error) error { + txn := db.store.Txn(true) + err := cb(txn) + if err != nil { + txn.Abort() + return err + } + txn.Commit() + return nil +} + +// Save atomically updates the in-memory store state for a Container. +// Only read only (deep) copies of containers may be passed in. +func (db *memDB) Save(c *Container) error { + return db.withTxn(func(txn *memdb.Txn) error { + return txn.Insert(memdbContainersTable, c) + }) +} + +// Delete removes an item by ID +func (db *memDB) Delete(c *Container) error { + return db.withTxn(func(txn *memdb.Txn) error { + view := &memdbView{txn: txn} + names := view.getNames(c.ID) + + for _, name := range names { + txn.Delete(memdbNamesTable, nameAssociation{name: name}) + } + + if err := txn.Delete(memdbContainersTable, NewBaseContainer(c.ID, c.Root)); err != nil { + return err + } + return nil + }) +} + +// ReserveName registers a container ID to a name +// ReserveName is idempotent +// Attempting to reserve a container ID to a name that already exists results in an `ErrNameReserved` +// A name reservation is globally unique +func (db *memDB) ReserveName(name, containerID string) error { + return db.withTxn(func(txn *memdb.Txn) error { + s, err := txn.First(memdbNamesTable, memdbIDIndex, name) + if err != nil { + return err + } + if s != nil { + if s.(nameAssociation).containerID != containerID { + return ErrNameReserved + } + return nil + } + + if err := txn.Insert(memdbNamesTable, nameAssociation{name: name, containerID: containerID}); err != nil { + return err + } + return nil + }) +} + +// ReleaseName releases the reserved name +// Once released, a name can be reserved again +func (db *memDB) ReleaseName(name string) error { + return db.withTxn(func(txn *memdb.Txn) error { + if err := txn.Delete(memdbNamesTable, nameAssociation{name: name}); err != nil { + return err + } + return nil + }) +} + +type memdbView struct { + txn *memdb.Txn +} + +// All returns a all items in this snapshot. Returned objects must never be modified. +func (v *memdbView) All() ([]Snapshot, error) { + var all []Snapshot + iter, err := v.txn.Get(memdbContainersTable, memdbIDIndex) + if err != nil { + return nil, err + } + for { + item := iter.Next() + if item == nil { + break + } + snapshot := v.transform(item.(*Container)) + all = append(all, *snapshot) + } + return all, nil +} + +// Get returns an item by id. Returned objects must never be modified. +func (v *memdbView) Get(id string) (*Snapshot, error) { + s, err := v.txn.First(memdbContainersTable, memdbIDIndex, id) + if err != nil { + return nil, err + } + if s == nil { + return nil, NoSuchContainerError{id: id} + } + return v.transform(s.(*Container)), nil +} + +// getNames lists all the reserved names for the given container ID. +func (v *memdbView) getNames(containerID string) []string { + iter, err := v.txn.Get(memdbNamesTable, memdbContainerIDIndex, containerID) + if err != nil { + return nil + } + + var names []string + for { + item := iter.Next() + if item == nil { + break + } + names = append(names, item.(nameAssociation).name) + } + + return names +} + +// GetID returns the container ID that the passed in name is reserved to. +func (v *memdbView) GetID(name string) (string, error) { + s, err := v.txn.First(memdbNamesTable, memdbIDIndex, name) + if err != nil { + return "", err + } + if s == nil { + return "", ErrNameNotReserved + } + return s.(nameAssociation).containerID, nil +} + +// GetAllNames returns all registered names. +func (v *memdbView) GetAllNames() map[string][]string { + iter, err := v.txn.Get(memdbNamesTable, memdbContainerIDIndex) + if err != nil { + return nil + } + + out := make(map[string][]string) + for { + item := iter.Next() + if item == nil { + break + } + assoc := item.(nameAssociation) + out[assoc.containerID] = append(out[assoc.containerID], assoc.name) + } + + return out +} + +// transform maps a (deep) copied Container object to what queries need. +// A lock on the Container is not held because these are immutable deep copies. +func (v *memdbView) transform(container *Container) *Snapshot { + snapshot := &Snapshot{ + Container: types.Container{ + ID: container.ID, + Names: v.getNames(container.ID), + ImageID: container.ImageID.String(), + Ports: []types.Port{}, + Mounts: container.GetMountPoints(), + State: container.State.StateString(), + Status: container.State.String(), + Created: container.Created.Unix(), + }, + CreatedAt: container.Created, + StartedAt: container.StartedAt, + Name: container.Name, + Pid: container.Pid, + Managed: container.Managed, + ExposedPorts: make(nat.PortSet), + PortBindings: make(nat.PortSet), + Health: container.HealthString(), + Running: container.Running, + Paused: container.Paused, + ExitCode: container.ExitCode(), + } + + if snapshot.Names == nil { + // Dead containers will often have no name, so make sure the response isn't null + snapshot.Names = []string{} + } + + if container.HostConfig != nil { + snapshot.Container.HostConfig.NetworkMode = string(container.HostConfig.NetworkMode) + snapshot.HostConfig.Isolation = string(container.HostConfig.Isolation) + for binding := range container.HostConfig.PortBindings { + snapshot.PortBindings[binding] = struct{}{} + } + } + + if container.Config != nil { + snapshot.Image = container.Config.Image + snapshot.Labels = container.Config.Labels + for exposed := range container.Config.ExposedPorts { + snapshot.ExposedPorts[exposed] = struct{}{} + } + } + + if len(container.Args) > 0 { + args := []string{} + for _, arg := range container.Args { + if strings.Contains(arg, " ") { + args = append(args, fmt.Sprintf("'%s'", arg)) + } else { + args = append(args, arg) + } + } + argsAsString := strings.Join(args, " ") + snapshot.Command = fmt.Sprintf("%s %s", container.Path, argsAsString) + } else { + snapshot.Command = container.Path + } + + snapshot.Ports = []types.Port{} + networks := make(map[string]*network.EndpointSettings) + if container.NetworkSettings != nil { + for name, netw := range container.NetworkSettings.Networks { + if netw == nil || netw.EndpointSettings == nil { + continue + } + networks[name] = &network.EndpointSettings{ + EndpointID: netw.EndpointID, + Gateway: netw.Gateway, + IPAddress: netw.IPAddress, + IPPrefixLen: netw.IPPrefixLen, + IPv6Gateway: netw.IPv6Gateway, + GlobalIPv6Address: netw.GlobalIPv6Address, + GlobalIPv6PrefixLen: netw.GlobalIPv6PrefixLen, + MacAddress: netw.MacAddress, + NetworkID: netw.NetworkID, + } + if netw.IPAMConfig != nil { + networks[name].IPAMConfig = &network.EndpointIPAMConfig{ + IPv4Address: netw.IPAMConfig.IPv4Address, + IPv6Address: netw.IPAMConfig.IPv6Address, + } + } + } + for port, bindings := range container.NetworkSettings.Ports { + p, err := nat.ParsePort(port.Port()) + if err != nil { + logrus.Warnf("invalid port map %+v", err) + continue + } + if len(bindings) == 0 { + snapshot.Ports = append(snapshot.Ports, types.Port{ + PrivatePort: uint16(p), + Type: port.Proto(), + }) + continue + } + for _, binding := range bindings { + h, err := nat.ParsePort(binding.HostPort) + if err != nil { + logrus.Warnf("invalid host port map %+v", err) + continue + } + snapshot.Ports = append(snapshot.Ports, types.Port{ + PrivatePort: uint16(p), + PublicPort: uint16(h), + Type: port.Proto(), + IP: binding.HostIP, + }) + } + } + } + snapshot.NetworkSettings = &types.SummaryNetworkSettings{Networks: networks} + + return snapshot +} + +// containerByIDIndexer is used to extract the ID field from Container types. +// memdb.StringFieldIndex can not be used since ID is a field from an embedded struct. +type containerByIDIndexer struct{} + +// FromObject implements the memdb.SingleIndexer interface for Container objects +func (e *containerByIDIndexer) FromObject(obj interface{}) (bool, []byte, error) { + c, ok := obj.(*Container) + if !ok { + return false, nil, fmt.Errorf("%T is not a Container", obj) + } + // Add the null character as a terminator + v := c.ID + "\x00" + return true, []byte(v), nil +} + +// FromArgs implements the memdb.Indexer interface +func (e *containerByIDIndexer) FromArgs(args ...interface{}) ([]byte, error) { + if len(args) != 1 { + return nil, fmt.Errorf("must provide only a single argument") + } + arg, ok := args[0].(string) + if !ok { + return nil, fmt.Errorf("argument must be a string: %#v", args[0]) + } + // Add the null character as a terminator + arg += "\x00" + return []byte(arg), nil +} + +// namesByNameIndexer is used to index container name associations by name. +type namesByNameIndexer struct{} + +func (e *namesByNameIndexer) FromObject(obj interface{}) (bool, []byte, error) { + n, ok := obj.(nameAssociation) + if !ok { + return false, nil, fmt.Errorf(`%T does not have type "nameAssociation"`, obj) + } + + // Add the null character as a terminator + return true, []byte(n.name + "\x00"), nil +} + +func (e *namesByNameIndexer) FromArgs(args ...interface{}) ([]byte, error) { + if len(args) != 1 { + return nil, fmt.Errorf("must provide only a single argument") + } + arg, ok := args[0].(string) + if !ok { + return nil, fmt.Errorf("argument must be a string: %#v", args[0]) + } + // Add the null character as a terminator + arg += "\x00" + return []byte(arg), nil +} + +// namesByContainerIDIndexer is used to index container names by container ID. +type namesByContainerIDIndexer struct{} + +func (e *namesByContainerIDIndexer) FromObject(obj interface{}) (bool, []byte, error) { + n, ok := obj.(nameAssociation) + if !ok { + return false, nil, fmt.Errorf(`%T does not have type "nameAssocation"`, obj) + } + + // Add the null character as a terminator + return true, []byte(n.containerID + "\x00"), nil +} + +func (e *namesByContainerIDIndexer) FromArgs(args ...interface{}) ([]byte, error) { + if len(args) != 1 { + return nil, fmt.Errorf("must provide only a single argument") + } + arg, ok := args[0].(string) + if !ok { + return nil, fmt.Errorf("argument must be a string: %#v", args[0]) + } + // Add the null character as a terminator + arg += "\x00" + return []byte(arg), nil +} diff --git a/vendor/github.com/moby/moby/container/view_test.go b/vendor/github.com/moby/moby/container/view_test.go new file mode 100644 index 000000000..09ba34383 --- /dev/null +++ b/vendor/github.com/moby/moby/container/view_test.go @@ -0,0 +1,153 @@ +package container + +import ( + "io/ioutil" + "os" + "path/filepath" + "testing" + + containertypes "github.com/docker/docker/api/types/container" + "github.com/pborman/uuid" + "github.com/stretchr/testify/assert" +) + +var root string + +func TestMain(m *testing.M) { + var err error + root, err = ioutil.TempDir("", "docker-container-test-") + if err != nil { + panic(err) + } + defer os.RemoveAll(root) + + os.Exit(m.Run()) +} + +func newContainer(t *testing.T) *Container { + var ( + id = uuid.New() + cRoot = filepath.Join(root, id) + ) + if err := os.MkdirAll(cRoot, 0755); err != nil { + t.Fatal(err) + } + c := NewBaseContainer(id, cRoot) + c.HostConfig = &containertypes.HostConfig{} + return c +} + +func TestViewSaveDelete(t *testing.T) { + db, err := NewViewDB() + if err != nil { + t.Fatal(err) + } + c := newContainer(t) + if err := c.CheckpointTo(db); err != nil { + t.Fatal(err) + } + if err := db.Delete(c); err != nil { + t.Fatal(err) + } +} + +func TestViewAll(t *testing.T) { + var ( + db, _ = NewViewDB() + one = newContainer(t) + two = newContainer(t) + ) + one.Pid = 10 + if err := one.CheckpointTo(db); err != nil { + t.Fatal(err) + } + two.Pid = 20 + if err := two.CheckpointTo(db); err != nil { + t.Fatal(err) + } + + all, err := db.Snapshot().All() + if err != nil { + t.Fatal(err) + } + if l := len(all); l != 2 { + t.Fatalf("expected 2 items, got %d", l) + } + byID := make(map[string]Snapshot) + for i := range all { + byID[all[i].ID] = all[i] + } + if s, ok := byID[one.ID]; !ok || s.Pid != 10 { + t.Fatalf("expected something different with for id=%s: %v", one.ID, s) + } + if s, ok := byID[two.ID]; !ok || s.Pid != 20 { + t.Fatalf("expected something different with for id=%s: %v", two.ID, s) + } +} + +func TestViewGet(t *testing.T) { + var ( + db, _ = NewViewDB() + one = newContainer(t) + ) + one.ImageID = "some-image-123" + if err := one.CheckpointTo(db); err != nil { + t.Fatal(err) + } + s, err := db.Snapshot().Get(one.ID) + if err != nil { + t.Fatal(err) + } + if s == nil || s.ImageID != "some-image-123" { + t.Fatalf("expected ImageID=some-image-123. Got: %v", s) + } +} + +func TestNames(t *testing.T) { + db, err := NewViewDB() + if err != nil { + t.Fatal(err) + } + assert.NoError(t, db.ReserveName("name1", "containerid1")) + assert.NoError(t, db.ReserveName("name1", "containerid1")) // idempotent + assert.NoError(t, db.ReserveName("name2", "containerid2")) + assert.EqualError(t, db.ReserveName("name2", "containerid3"), ErrNameReserved.Error()) + + // Releasing a name allows the name to point to something else later. + assert.NoError(t, db.ReleaseName("name2")) + assert.NoError(t, db.ReserveName("name2", "containerid3")) + + view := db.Snapshot() + + id, err := view.GetID("name1") + assert.NoError(t, err) + assert.Equal(t, "containerid1", id) + + id, err = view.GetID("name2") + assert.NoError(t, err) + assert.Equal(t, "containerid3", id) + + _, err = view.GetID("notreserved") + assert.EqualError(t, err, ErrNameNotReserved.Error()) + + // Releasing and re-reserving a name doesn't affect the snapshot. + assert.NoError(t, db.ReleaseName("name2")) + assert.NoError(t, db.ReserveName("name2", "containerid4")) + + id, err = view.GetID("name1") + assert.NoError(t, err) + assert.Equal(t, "containerid1", id) + + id, err = view.GetID("name2") + assert.NoError(t, err) + assert.Equal(t, "containerid3", id) + + // GetAllNames + assert.Equal(t, map[string][]string{"containerid1": {"name1"}, "containerid3": {"name2"}}, view.GetAllNames()) + + assert.NoError(t, db.ReserveName("name3", "containerid1")) + assert.NoError(t, db.ReserveName("name4", "containerid1")) + + view = db.Snapshot() + assert.Equal(t, map[string][]string{"containerid1": {"name1", "name3", "name4"}, "containerid4": {"name2"}}, view.GetAllNames()) +} diff --git a/vendor/github.com/moby/moby/contrib/README.md b/vendor/github.com/moby/moby/contrib/README.md new file mode 100644 index 000000000..92b1d9443 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/README.md @@ -0,0 +1,4 @@ +The `contrib` directory contains scripts, images, and other helpful things +which are not part of the core docker distribution. Please note that they +could be out of date, since they do not receive the same attention as the +rest of the repository. diff --git a/vendor/github.com/moby/moby/contrib/REVIEWERS b/vendor/github.com/moby/moby/contrib/REVIEWERS new file mode 100644 index 000000000..18e05a307 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/REVIEWERS @@ -0,0 +1 @@ +Tianon Gravi (@tianon) diff --git a/vendor/github.com/moby/moby/contrib/apparmor/main.go b/vendor/github.com/moby/moby/contrib/apparmor/main.go new file mode 100644 index 000000000..f4a2978b8 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/apparmor/main.go @@ -0,0 +1,56 @@ +package main + +import ( + "fmt" + "log" + "os" + "path" + "text/template" + + "github.com/docker/docker/pkg/aaparser" +) + +type profileData struct { + Version int +} + +func main() { + if len(os.Args) < 2 { + log.Fatal("pass a filename to save the profile in.") + } + + // parse the arg + apparmorProfilePath := os.Args[1] + + version, err := aaparser.GetVersion() + if err != nil { + log.Fatal(err) + } + data := profileData{ + Version: version, + } + fmt.Printf("apparmor_parser is of version %+v\n", data) + + // parse the template + compiled, err := template.New("apparmor_profile").Parse(dockerProfileTemplate) + if err != nil { + log.Fatalf("parsing template failed: %v", err) + } + + // make sure /etc/apparmor.d exists + if err := os.MkdirAll(path.Dir(apparmorProfilePath), 0755); err != nil { + log.Fatal(err) + } + + f, err := os.OpenFile(apparmorProfilePath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644) + if err != nil { + log.Fatal(err) + } + defer f.Close() + + if err := compiled.Execute(f, data); err != nil { + log.Fatalf("executing template failed: %v", err) + } + + fmt.Printf("created apparmor profile for version %+v at %q\n", data, apparmorProfilePath) +} diff --git a/vendor/github.com/moby/moby/contrib/apparmor/template.go b/vendor/github.com/moby/moby/contrib/apparmor/template.go new file mode 100644 index 000000000..e5e1c8bed --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/apparmor/template.go @@ -0,0 +1,268 @@ +package main + +const dockerProfileTemplate = `@{DOCKER_GRAPH_PATH}=/var/lib/docker + +profile /usr/bin/docker (attach_disconnected, complain) { + # Prevent following links to these files during container setup. + deny /etc/** mkl, + deny /dev/** kl, + deny /sys/** mkl, + deny /proc/** mkl, + + mount -> @{DOCKER_GRAPH_PATH}/**, + mount -> /, + mount -> /proc/**, + mount -> /sys/**, + mount -> /run/docker/netns/**, + mount -> /.pivot_root[0-9]*/, + + / r, + + umount, + pivot_root, +{{if ge .Version 209000}} + signal (receive) peer=@{profile_name}, + signal (receive) peer=unconfined, + signal (send), +{{end}} + network, + capability, + owner /** rw, + @{DOCKER_GRAPH_PATH}/** rwl, + @{DOCKER_GRAPH_PATH}/linkgraph.db k, + @{DOCKER_GRAPH_PATH}/network/files/boltdb.db k, + @{DOCKER_GRAPH_PATH}/network/files/local-kv.db k, + @{DOCKER_GRAPH_PATH}/[0-9]*.[0-9]*/linkgraph.db k, + + # For non-root client use: + /dev/urandom r, + /dev/null rw, + /dev/pts/[0-9]* rw, + /run/docker.sock rw, + /proc/** r, + /proc/[0-9]*/attr/exec w, + /sys/kernel/mm/hugepages/ r, + /etc/localtime r, + /etc/ld.so.cache r, + /etc/passwd r, + +{{if ge .Version 209000}} + ptrace peer=@{profile_name}, + ptrace (read) peer=docker-default, + deny ptrace (trace) peer=docker-default, + deny ptrace peer=/usr/bin/docker///bin/ps, +{{end}} + + /usr/lib/** rm, + /lib/** rm, + + /usr/bin/docker pix, + /sbin/xtables-multi rCx, + /sbin/iptables rCx, + /sbin/modprobe rCx, + /sbin/auplink rCx, + /sbin/mke2fs rCx, + /sbin/tune2fs rCx, + /sbin/blkid rCx, + /bin/kmod rCx, + /usr/bin/xz rCx, + /bin/ps rCx, + /bin/tar rCx, + /bin/cat rCx, + /sbin/zfs rCx, + /sbin/apparmor_parser rCx, + +{{if ge .Version 209000}} + # Transitions + change_profile -> docker-*, + change_profile -> unconfined, +{{end}} + + profile /bin/cat (complain) { + /etc/ld.so.cache r, + /lib/** rm, + /dev/null rw, + /proc r, + /bin/cat mr, + + # For reading in 'docker stats': + /proc/[0-9]*/net/dev r, + } + profile /bin/ps (complain) { + /etc/ld.so.cache r, + /etc/localtime r, + /etc/passwd r, + /etc/nsswitch.conf r, + /lib/** rm, + /proc/[0-9]*/** r, + /dev/null rw, + /bin/ps mr, + +{{if ge .Version 209000}} + # We don't need ptrace so we'll deny and ignore the error. + deny ptrace (read, trace), +{{end}} + + # Quiet dac_override denials + deny capability dac_override, + deny capability dac_read_search, + deny capability sys_ptrace, + + /dev/tty r, + /proc/stat r, + /proc/cpuinfo r, + /proc/meminfo r, + /proc/uptime r, + /sys/devices/system/cpu/online r, + /proc/sys/kernel/pid_max r, + /proc/ r, + /proc/tty/drivers r, + } + profile /sbin/iptables (complain) { +{{if ge .Version 209000}} + signal (receive) peer=/usr/bin/docker, +{{end}} + capability net_admin, + } + profile /sbin/auplink flags=(attach_disconnected, complain) { +{{if ge .Version 209000}} + signal (receive) peer=/usr/bin/docker, +{{end}} + capability sys_admin, + capability dac_override, + + @{DOCKER_GRAPH_PATH}/aufs/** rw, + @{DOCKER_GRAPH_PATH}/tmp/** rw, + # For user namespaces: + @{DOCKER_GRAPH_PATH}/[0-9]*.[0-9]*/** rw, + + /sys/fs/aufs/** r, + /lib/** rm, + /apparmor/.null r, + /dev/null rw, + /etc/ld.so.cache r, + /sbin/auplink rm, + /proc/fs/aufs/** rw, + /proc/[0-9]*/mounts rw, + } + profile /sbin/modprobe /bin/kmod (complain) { +{{if ge .Version 209000}} + signal (receive) peer=/usr/bin/docker, +{{end}} + capability sys_module, + /etc/ld.so.cache r, + /lib/** rm, + /dev/null rw, + /apparmor/.null rw, + /sbin/modprobe rm, + /bin/kmod rm, + /proc/cmdline r, + /sys/module/** r, + /etc/modprobe.d{/,/**} r, + } + # xz works via pipes, so we do not need access to the filesystem. + profile /usr/bin/xz (complain) { +{{if ge .Version 209000}} + signal (receive) peer=/usr/bin/docker, +{{end}} + /etc/ld.so.cache r, + /lib/** rm, + /usr/bin/xz rm, + deny /proc/** rw, + deny /sys/** rw, + } + profile /sbin/xtables-multi (attach_disconnected, complain) { + /etc/ld.so.cache r, + /lib/** rm, + /sbin/xtables-multi rm, + /apparmor/.null w, + /dev/null rw, + + /proc r, + + capability net_raw, + capability net_admin, + network raw, + } + profile /sbin/zfs (attach_disconnected, complain) { + file, + capability, + } + profile /sbin/mke2fs (complain) { + /sbin/mke2fs rm, + + /lib/** rm, + + /apparmor/.null w, + + /etc/ld.so.cache r, + /etc/mke2fs.conf r, + /etc/mtab r, + + /dev/dm-* rw, + /dev/urandom r, + /dev/null rw, + + /proc/swaps r, + /proc/[0-9]*/mounts r, + } + profile /sbin/tune2fs (complain) { + /sbin/tune2fs rm, + + /lib/** rm, + + /apparmor/.null w, + + /etc/blkid.conf r, + /etc/mtab r, + /etc/ld.so.cache r, + + /dev/null rw, + /dev/.blkid.tab r, + /dev/dm-* rw, + + /proc/swaps r, + /proc/[0-9]*/mounts r, + } + profile /sbin/blkid (complain) { + /sbin/blkid rm, + + /lib/** rm, + /apparmor/.null w, + + /etc/ld.so.cache r, + /etc/blkid.conf r, + + /dev/null rw, + /dev/.blkid.tab rl, + /dev/.blkid.tab* rwl, + /dev/dm-* r, + + /sys/devices/virtual/block/** r, + + capability mknod, + + mount -> @{DOCKER_GRAPH_PATH}/**, + } + profile /sbin/apparmor_parser (complain) { + /sbin/apparmor_parser rm, + + /lib/** rm, + + /etc/ld.so.cache r, + /etc/apparmor/** r, + /etc/apparmor.d/** r, + /etc/apparmor.d/cache/** w, + + /dev/null rw, + + /sys/kernel/security/apparmor/** r, + /sys/kernel/security/apparmor/.replace w, + + /proc/[0-9]*/mounts r, + /proc/sys/kernel/osrelease r, + /proc r, + + capability mac_admin, + } +}` diff --git a/vendor/github.com/moby/moby/contrib/builder/deb/aarch64/build.sh b/vendor/github.com/moby/moby/contrib/builder/deb/aarch64/build.sh new file mode 100755 index 000000000..f8b25067b --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/builder/deb/aarch64/build.sh @@ -0,0 +1,10 @@ +#!/usr/bin/env bash +set -e + +cd "$(dirname "$(readlink -f "$BASH_SOURCE")")" + +set -x +./generate.sh +for d in */; do + docker build -t "dockercore/builder-deb:$(basename "$d")" "$d" +done diff --git a/vendor/github.com/moby/moby/contrib/builder/deb/aarch64/debian-jessie/Dockerfile b/vendor/github.com/moby/moby/contrib/builder/deb/aarch64/debian-jessie/Dockerfile new file mode 100644 index 000000000..0e80da4aa --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/builder/deb/aarch64/debian-jessie/Dockerfile @@ -0,0 +1,25 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/aarch64/generate.sh"! +# + +FROM aarch64/debian:jessie + +RUN echo deb http://ftp.debian.org/debian jessie-backports main > /etc/apt/sources.list.d/backports.list +RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev pkg-config vim-common libsystemd-journal-dev golang-1.6-go libseccomp-dev --no-install-recommends && rm -rf /var/lib/apt/lists/* + +RUN update-alternatives --install /usr/bin/go go /usr/lib/go-1.6/bin/go 100 + +# Install Go +# aarch64 doesn't have official go binaries, so use the version of go installed from +# the image to build go from source. +ENV GO_VERSION 1.8.3 +RUN mkdir /usr/src/go && curl -fsSL https://golang.org/dl/go${GO_VERSION}.src.tar.gz | tar -v -C /usr/src/go -xz --strip-components=1 \ + && cd /usr/src/go/src \ + && GOOS=linux GOARCH=arm64 GOROOT_BOOTSTRAP="$(go env GOROOT)" ./make.bash + +ENV PATH /usr/src/go/bin:$PATH + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS apparmor selinux seccomp +ENV RUNC_BUILDTAGS apparmor selinux seccomp diff --git a/vendor/github.com/moby/moby/contrib/builder/deb/aarch64/debian-stretch/Dockerfile b/vendor/github.com/moby/moby/contrib/builder/deb/aarch64/debian-stretch/Dockerfile new file mode 100644 index 000000000..caee1917b --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/builder/deb/aarch64/debian-stretch/Dockerfile @@ -0,0 +1,22 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/aarch64/generate.sh"! +# + +FROM aarch64/debian:stretch + +RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev pkg-config vim-common libsystemd-dev golang-go libseccomp-dev --no-install-recommends && rm -rf /var/lib/apt/lists/* + +# Install Go +# aarch64 doesn't have official go binaries, so use the version of go installed from +# the image to build go from source. +ENV GO_VERSION 1.8.3 +RUN mkdir /usr/src/go && curl -fsSL https://golang.org/dl/go${GO_VERSION}.src.tar.gz | tar -v -C /usr/src/go -xz --strip-components=1 \ + && cd /usr/src/go/src \ + && GOOS=linux GOARCH=arm64 GOROOT_BOOTSTRAP="$(go env GOROOT)" ./make.bash + +ENV PATH /usr/src/go/bin:$PATH + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS apparmor selinux seccomp +ENV RUNC_BUILDTAGS apparmor selinux seccomp diff --git a/vendor/github.com/moby/moby/contrib/builder/deb/aarch64/generate.sh b/vendor/github.com/moby/moby/contrib/builder/deb/aarch64/generate.sh new file mode 100755 index 000000000..7c9217d04 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/builder/deb/aarch64/generate.sh @@ -0,0 +1,135 @@ +#!/usr/bin/env bash +set -e + +# This file is used to auto-generate Dockerfiles for making debs via 'make deb' +# +# usage: ./generate.sh [versions] +# ie: ./generate.sh +# to update all Dockerfiles in this directory +# or: ./generate.sh ubuntu-trusty +# to only update ubuntu-trusty/Dockerfile +# or: ./generate.sh ubuntu-newversion +# to create a new folder and a Dockerfile within it +# +# Note: non-LTS versions are not guaranteed to work. + +cd "$(dirname "$(readlink -f "$BASH_SOURCE")")" + +versions=( "$@" ) +if [ ${#versions[@]} -eq 0 ]; then + versions=( */ ) +fi +versions=( "${versions[@]%/}" ) + +for version in "${versions[@]}"; do + echo "${versions[@]}" + distro="${version%-*}" + suite="${version##*-}" + from="aarch64/${distro}:${suite}" + + mkdir -p "$version" + echo "$version -> FROM $from" + cat > "$version/Dockerfile" <<-EOF + # + # THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/aarch64/generate.sh"! + # + + FROM $from + + EOF + + dockerBuildTags='apparmor selinux' + runcBuildTags='apparmor selinux' + + # this list is sorted alphabetically; please keep it that way + packages=( + apparmor # for apparmor_parser for testing the profile + bash-completion # for bash-completion debhelper integration + btrfs-tools # for "btrfs/ioctl.h" (and "version.h" if possible) + build-essential # "essential for building Debian packages" + cmake # tini dep + curl ca-certificates # for downloading Go + debhelper # for easy ".deb" building + dh-apparmor # for apparmor debhelper + dh-systemd # for systemd debhelper integration + git # for "git commit" info in "docker -v" + libapparmor-dev # for "sys/apparmor.h" + libdevmapper-dev # for "libdevmapper.h" + pkg-config # for detecting things like libsystemd-journal dynamically + vim-common # tini dep + ) + + case "$suite" in + trusty) + packages+=( libsystemd-journal-dev ) + # aarch64 doesn't have an official downloadable binary for go. + # And gccgo for trusty only includes Go 1.2 implementation which + # is too old to build current go source, fortunately trusty has + # golang-1.6-go package can be used as bootstrap. + packages+=( golang-1.6-go ) + ;; + jessie) + packages+=( libsystemd-journal-dev ) + # aarch64 doesn't have an official downloadable binary for go. + # And gccgo for jessie only includes Go 1.2 implementation which + # is too old to build current go source, fortunately jessie backports + # has golang-1.6-go package can be used as bootstrap. + packages+=( golang-1.6-go libseccomp-dev ) + + dockerBuildTags="$dockerBuildTags seccomp" + runcBuildTags="$runcBuildTags seccomp" + ;; + stretch|xenial) + packages+=( libsystemd-dev ) + packages+=( golang-go libseccomp-dev ) + + dockerBuildTags="$dockerBuildTags seccomp" + runcBuildTags="$runcBuildTags seccomp" + ;; + *) + echo "Unsupported distro:" $distro:$suite + rm -fr "$version" + exit 1 + ;; + esac + + case "$suite" in + jessie) + echo 'RUN echo deb http://ftp.debian.org/debian jessie-backports main > /etc/apt/sources.list.d/backports.list' >> "$version/Dockerfile" + ;; + *) + ;; + esac + + # update and install packages + echo "RUN apt-get update && apt-get install -y ${packages[*]} --no-install-recommends && rm -rf /var/lib/apt/lists/*" >> "$version/Dockerfile" + echo >> "$version/Dockerfile" + + case "$suite" in + jessie|trusty) + echo 'RUN update-alternatives --install /usr/bin/go go /usr/lib/go-1.6/bin/go 100' >> "$version/Dockerfile" + echo >> "$version/Dockerfile" + ;; + *) + ;; + esac + + echo "# Install Go" >> "$version/Dockerfile" + echo "# aarch64 doesn't have official go binaries, so use the version of go installed from" >> "$version/Dockerfile" + echo "# the image to build go from source." >> "$version/Dockerfile" + + awk '$1 == "ENV" && $2 == "GO_VERSION" { print; exit }' ../../../../Dockerfile.aarch64 >> "$version/Dockerfile" + echo 'RUN mkdir /usr/src/go && curl -fsSL https://golang.org/dl/go${GO_VERSION}.src.tar.gz | tar -v -C /usr/src/go -xz --strip-components=1 \' >> "$version/Dockerfile" + echo ' && cd /usr/src/go/src \' >> "$version/Dockerfile" + echo ' && GOOS=linux GOARCH=arm64 GOROOT_BOOTSTRAP="$(go env GOROOT)" ./make.bash' >> "$version/Dockerfile" + echo >> "$version/Dockerfile" + + echo 'ENV PATH /usr/src/go/bin:$PATH' >> "$version/Dockerfile" + echo >> "$version/Dockerfile" + + echo "ENV AUTO_GOPATH 1" >> "$version/Dockerfile" + echo >> "$version/Dockerfile" + + echo "ENV DOCKER_BUILDTAGS $dockerBuildTags" >> "$version/Dockerfile" + echo "ENV RUNC_BUILDTAGS $runcBuildTags" >> "$version/Dockerfile" +done diff --git a/vendor/github.com/moby/moby/contrib/builder/deb/aarch64/ubuntu-trusty/Dockerfile b/vendor/github.com/moby/moby/contrib/builder/deb/aarch64/ubuntu-trusty/Dockerfile new file mode 100644 index 000000000..6f4a3e961 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/builder/deb/aarch64/ubuntu-trusty/Dockerfile @@ -0,0 +1,24 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/aarch64/generate.sh"! +# + +FROM aarch64/ubuntu:trusty + +RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev pkg-config vim-common libsystemd-journal-dev golang-1.6-go --no-install-recommends && rm -rf /var/lib/apt/lists/* + +RUN update-alternatives --install /usr/bin/go go /usr/lib/go-1.6/bin/go 100 + +# Install Go +# aarch64 doesn't have official go binaries, so use the version of go installed from +# the image to build go from source. +ENV GO_VERSION 1.8.3 +RUN mkdir /usr/src/go && curl -fsSL https://golang.org/dl/go${GO_VERSION}.src.tar.gz | tar -v -C /usr/src/go -xz --strip-components=1 \ + && cd /usr/src/go/src \ + && GOOS=linux GOARCH=arm64 GOROOT_BOOTSTRAP="$(go env GOROOT)" ./make.bash + +ENV PATH /usr/src/go/bin:$PATH + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS apparmor selinux +ENV RUNC_BUILDTAGS apparmor selinux diff --git a/vendor/github.com/moby/moby/contrib/builder/deb/aarch64/ubuntu-xenial/Dockerfile b/vendor/github.com/moby/moby/contrib/builder/deb/aarch64/ubuntu-xenial/Dockerfile new file mode 100644 index 000000000..19a510b24 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/builder/deb/aarch64/ubuntu-xenial/Dockerfile @@ -0,0 +1,22 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/aarch64/generate.sh"! +# + +FROM aarch64/ubuntu:xenial + +RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev pkg-config vim-common libsystemd-dev golang-go libseccomp-dev --no-install-recommends && rm -rf /var/lib/apt/lists/* + +# Install Go +# aarch64 doesn't have official go binaries, so use the version of go installed from +# the image to build go from source. +ENV GO_VERSION 1.8.3 +RUN mkdir /usr/src/go && curl -fsSL https://golang.org/dl/go${GO_VERSION}.src.tar.gz | tar -v -C /usr/src/go -xz --strip-components=1 \ + && cd /usr/src/go/src \ + && GOOS=linux GOARCH=arm64 GOROOT_BOOTSTRAP="$(go env GOROOT)" ./make.bash + +ENV PATH /usr/src/go/bin:$PATH + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS apparmor selinux seccomp +ENV RUNC_BUILDTAGS apparmor selinux seccomp diff --git a/vendor/github.com/moby/moby/contrib/builder/deb/amd64/README.md b/vendor/github.com/moby/moby/contrib/builder/deb/amd64/README.md new file mode 100644 index 000000000..20a0ff100 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/builder/deb/amd64/README.md @@ -0,0 +1,5 @@ +# `dockercore/builder-deb` + +This image's tags contain the dependencies for building Docker `.deb`s for each of the Debian-based platforms Docker targets. + +To add new tags, see [`contrib/builder/deb/amd64` in https://github.com/docker/docker](https://github.com/docker/docker/tree/master/contrib/builder/deb/amd64), specifically the `generate.sh` script, whose usage is described in a comment at the top of the file. diff --git a/vendor/github.com/moby/moby/contrib/builder/deb/amd64/build.sh b/vendor/github.com/moby/moby/contrib/builder/deb/amd64/build.sh new file mode 100755 index 000000000..f8b25067b --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/builder/deb/amd64/build.sh @@ -0,0 +1,10 @@ +#!/usr/bin/env bash +set -e + +cd "$(dirname "$(readlink -f "$BASH_SOURCE")")" + +set -x +./generate.sh +for d in */; do + docker build -t "dockercore/builder-deb:$(basename "$d")" "$d" +done diff --git a/vendor/github.com/moby/moby/contrib/builder/deb/amd64/debian-jessie/Dockerfile b/vendor/github.com/moby/moby/contrib/builder/deb/amd64/debian-jessie/Dockerfile new file mode 100644 index 000000000..9b6233c92 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/builder/deb/amd64/debian-jessie/Dockerfile @@ -0,0 +1,20 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/amd64/generate.sh"! +# + +FROM debian:jessie + +# allow replacing httpredir or deb mirror +ARG APT_MIRROR=deb.debian.org +RUN sed -ri "s/(httpredir|deb).debian.org/$APT_MIRROR/g" /etc/apt/sources.list + +RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev pkg-config vim-common libsystemd-journal-dev --no-install-recommends && rm -rf /var/lib/apt/lists/* + +ENV GO_VERSION 1.8.3 +RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS apparmor selinux +ENV RUNC_BUILDTAGS apparmor selinux diff --git a/vendor/github.com/moby/moby/contrib/builder/deb/amd64/debian-stretch/Dockerfile b/vendor/github.com/moby/moby/contrib/builder/deb/amd64/debian-stretch/Dockerfile new file mode 100644 index 000000000..d95c194c3 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/builder/deb/amd64/debian-stretch/Dockerfile @@ -0,0 +1,20 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/amd64/generate.sh"! +# + +FROM debian:stretch + +# allow replacing httpredir or deb mirror +ARG APT_MIRROR=deb.debian.org +RUN sed -ri "s/(httpredir|deb).debian.org/$APT_MIRROR/g" /etc/apt/sources.list + +RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev libseccomp-dev pkg-config vim-common libsystemd-dev --no-install-recommends && rm -rf /var/lib/apt/lists/* + +ENV GO_VERSION 1.8.3 +RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS apparmor seccomp selinux +ENV RUNC_BUILDTAGS apparmor seccomp selinux diff --git a/vendor/github.com/moby/moby/contrib/builder/deb/amd64/debian-wheezy/Dockerfile b/vendor/github.com/moby/moby/contrib/builder/deb/amd64/debian-wheezy/Dockerfile new file mode 100644 index 000000000..56763a572 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/builder/deb/amd64/debian-wheezy/Dockerfile @@ -0,0 +1,22 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/amd64/generate.sh"! +# + +FROM debian:wheezy-backports + +# allow replacing httpredir or deb mirror +ARG APT_MIRROR=deb.debian.org +RUN sed -ri "s/(httpredir|deb).debian.org/$APT_MIRROR/g" /etc/apt/sources.list +RUN sed -ri "s/(httpredir|deb).debian.org/$APT_MIRROR/g" /etc/apt/sources.list.d/backports.list + +RUN apt-get update && apt-get install -y -t wheezy-backports btrfs-tools --no-install-recommends && rm -rf /var/lib/apt/lists/* +RUN apt-get update && apt-get install -y apparmor bash-completion build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev pkg-config vim-common --no-install-recommends && rm -rf /var/lib/apt/lists/* + +ENV GO_VERSION 1.8.3 +RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS apparmor selinux +ENV RUNC_BUILDTAGS apparmor selinux diff --git a/vendor/github.com/moby/moby/contrib/builder/deb/amd64/generate.sh b/vendor/github.com/moby/moby/contrib/builder/deb/amd64/generate.sh new file mode 100755 index 000000000..5708defc1 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/builder/deb/amd64/generate.sh @@ -0,0 +1,130 @@ +#!/usr/bin/env bash +set -e + +# usage: ./generate.sh [versions] +# ie: ./generate.sh +# to update all Dockerfiles in this directory +# or: ./generate.sh debian-jessie +# to only update debian-jessie/Dockerfile +# or: ./generate.sh debian-newversion +# to create a new folder and a Dockerfile within it + +cd "$(dirname "$(readlink -f "$BASH_SOURCE")")" + +versions=( "$@" ) +if [ ${#versions[@]} -eq 0 ]; then + versions=( */ ) +fi +versions=( "${versions[@]%/}" ) + +for version in "${versions[@]}"; do + distro="${version%-*}" + suite="${version##*-}" + from="${distro}:${suite}" + + case "$from" in + debian:wheezy) + # add -backports, like our users have to + from+='-backports' + ;; + esac + + mkdir -p "$version" + echo "$version -> FROM $from" + cat > "$version/Dockerfile" <<-EOF + # + # THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/amd64/generate.sh"! + # + + FROM $from + EOF + + echo >> "$version/Dockerfile" + + if [ "$distro" = "debian" ]; then + cat >> "$version/Dockerfile" <<-'EOF' + # allow replacing httpredir or deb mirror + ARG APT_MIRROR=deb.debian.org + RUN sed -ri "s/(httpredir|deb).debian.org/$APT_MIRROR/g" /etc/apt/sources.list + EOF + + if [ "$suite" = "wheezy" ]; then + cat >> "$version/Dockerfile" <<-'EOF' + RUN sed -ri "s/(httpredir|deb).debian.org/$APT_MIRROR/g" /etc/apt/sources.list.d/backports.list + EOF + fi + + echo "" >> "$version/Dockerfile" + fi + + extraBuildTags= + runcBuildTags= + + # this list is sorted alphabetically; please keep it that way + packages=( + apparmor # for apparmor_parser for testing the profile + bash-completion # for bash-completion debhelper integration + btrfs-tools # for "btrfs/ioctl.h" (and "version.h" if possible) + build-essential # "essential for building Debian packages" + cmake # tini dep + curl ca-certificates # for downloading Go + debhelper # for easy ".deb" building + dh-apparmor # for apparmor debhelper + dh-systemd # for systemd debhelper integration + git # for "git commit" info in "docker -v" + libapparmor-dev # for "sys/apparmor.h" + libdevmapper-dev # for "libdevmapper.h" + libseccomp-dev # for "seccomp.h" & "libseccomp.so" + pkg-config # for detecting things like libsystemd-journal dynamically + vim-common # tini dep + ) + # packaging for "sd-journal.h" and libraries varies + case "$suite" in + wheezy) ;; + jessie|trusty) packages+=( libsystemd-journal-dev ) ;; + *) packages+=( libsystemd-dev ) ;; + esac + + # debian wheezy does not have the right libseccomp libs + # debian jessie & ubuntu trusty have a libseccomp < 2.2.1 :( + case "$suite" in + wheezy|jessie|trusty) + packages=( "${packages[@]/libseccomp-dev}" ) + runcBuildTags="apparmor selinux" + ;; + *) + extraBuildTags+=' seccomp' + runcBuildTags="apparmor seccomp selinux" + ;; + esac + + if [ "$suite" = 'wheezy' ]; then + # pull a couple packages from backports explicitly + # (build failures otherwise) + backportsPackages=( btrfs-tools ) + for pkg in "${backportsPackages[@]}"; do + packages=( "${packages[@]/$pkg}" ) + done + echo "RUN apt-get update && apt-get install -y -t $suite-backports ${backportsPackages[*]} --no-install-recommends && rm -rf /var/lib/apt/lists/*" >> "$version/Dockerfile" + fi + + echo "RUN apt-get update && apt-get install -y ${packages[*]} --no-install-recommends && rm -rf /var/lib/apt/lists/*" >> "$version/Dockerfile" + + echo >> "$version/Dockerfile" + + awk '$1 == "ENV" && $2 == "GO_VERSION" { print; exit }' ../../../../Dockerfile >> "$version/Dockerfile" + echo 'RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local' >> "$version/Dockerfile" + echo 'ENV PATH $PATH:/usr/local/go/bin' >> "$version/Dockerfile" + + echo >> "$version/Dockerfile" + + echo 'ENV AUTO_GOPATH 1' >> "$version/Dockerfile" + + echo >> "$version/Dockerfile" + + # print build tags in alphabetical order + buildTags=$( echo "apparmor selinux $extraBuildTags" | xargs -n1 | sort -n | tr '\n' ' ' | sed -e 's/[[:space:]]*$//' ) + + echo "ENV DOCKER_BUILDTAGS $buildTags" >> "$version/Dockerfile" + echo "ENV RUNC_BUILDTAGS $runcBuildTags" >> "$version/Dockerfile" +done diff --git a/vendor/github.com/moby/moby/contrib/builder/deb/amd64/ubuntu-trusty/Dockerfile b/vendor/github.com/moby/moby/contrib/builder/deb/amd64/ubuntu-trusty/Dockerfile new file mode 100644 index 000000000..f6201164f --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/builder/deb/amd64/ubuntu-trusty/Dockerfile @@ -0,0 +1,16 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/amd64/generate.sh"! +# + +FROM ubuntu:trusty + +RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev pkg-config vim-common libsystemd-journal-dev --no-install-recommends && rm -rf /var/lib/apt/lists/* + +ENV GO_VERSION 1.8.3 +RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS apparmor selinux +ENV RUNC_BUILDTAGS apparmor selinux diff --git a/vendor/github.com/moby/moby/contrib/builder/deb/amd64/ubuntu-xenial/Dockerfile b/vendor/github.com/moby/moby/contrib/builder/deb/amd64/ubuntu-xenial/Dockerfile new file mode 100644 index 000000000..6b9370dce --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/builder/deb/amd64/ubuntu-xenial/Dockerfile @@ -0,0 +1,16 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/amd64/generate.sh"! +# + +FROM ubuntu:xenial + +RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev libseccomp-dev pkg-config vim-common libsystemd-dev --no-install-recommends && rm -rf /var/lib/apt/lists/* + +ENV GO_VERSION 1.8.3 +RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS apparmor seccomp selinux +ENV RUNC_BUILDTAGS apparmor seccomp selinux diff --git a/vendor/github.com/moby/moby/contrib/builder/deb/amd64/ubuntu-yakkety/Dockerfile b/vendor/github.com/moby/moby/contrib/builder/deb/amd64/ubuntu-yakkety/Dockerfile new file mode 100644 index 000000000..232c86f21 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/builder/deb/amd64/ubuntu-yakkety/Dockerfile @@ -0,0 +1,16 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/amd64/generate.sh"! +# + +FROM ubuntu:yakkety + +RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev libseccomp-dev pkg-config vim-common libsystemd-dev --no-install-recommends && rm -rf /var/lib/apt/lists/* + +ENV GO_VERSION 1.8.3 +RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS apparmor seccomp selinux +ENV RUNC_BUILDTAGS apparmor seccomp selinux diff --git a/vendor/github.com/moby/moby/contrib/builder/deb/amd64/ubuntu-zesty/Dockerfile b/vendor/github.com/moby/moby/contrib/builder/deb/amd64/ubuntu-zesty/Dockerfile new file mode 100644 index 000000000..0b4e45b34 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/builder/deb/amd64/ubuntu-zesty/Dockerfile @@ -0,0 +1,16 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/amd64/generate.sh"! +# + +FROM ubuntu:zesty + +RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev libseccomp-dev pkg-config vim-common libsystemd-dev --no-install-recommends && rm -rf /var/lib/apt/lists/* + +ENV GO_VERSION 1.8.3 +RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS apparmor seccomp selinux +ENV RUNC_BUILDTAGS apparmor seccomp selinux diff --git a/vendor/github.com/moby/moby/contrib/builder/deb/armhf/debian-jessie/Dockerfile b/vendor/github.com/moby/moby/contrib/builder/deb/armhf/debian-jessie/Dockerfile new file mode 100644 index 000000000..9a50cd17f --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/builder/deb/armhf/debian-jessie/Dockerfile @@ -0,0 +1,20 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/armhf/generate.sh"! +# + +FROM armhf/debian:jessie + +# allow replacing httpredir or deb mirror +ARG APT_MIRROR=deb.debian.org +RUN sed -ri "s/(httpredir|deb).debian.org/$APT_MIRROR/g" /etc/apt/sources.list + +RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev pkg-config vim-common libsystemd-journal-dev --no-install-recommends && rm -rf /var/lib/apt/lists/* + +ENV GO_VERSION 1.8.3 +RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-armv6l.tar.gz" | tar xzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS apparmor selinux +ENV RUNC_BUILDTAGS apparmor selinux diff --git a/vendor/github.com/moby/moby/contrib/builder/deb/armhf/generate.sh b/vendor/github.com/moby/moby/contrib/builder/deb/armhf/generate.sh new file mode 100755 index 000000000..285dbbf52 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/builder/deb/armhf/generate.sh @@ -0,0 +1,139 @@ +#!/usr/bin/env bash +set -e + +# usage: ./generate.sh [versions] +# ie: ./generate.sh +# to update all Dockerfiles in this directory +# or: ./generate.sh debian-jessie +# to only update debian-jessie/Dockerfile +# or: ./generate.sh debian-newversion +# to create a new folder and a Dockerfile within it + +cd "$(dirname "$(readlink -f "$BASH_SOURCE")")" + +versions=( "$@" ) +if [ ${#versions[@]} -eq 0 ]; then + versions=( */ ) +fi +versions=( "${versions[@]%/}" ) + +for version in "${versions[@]}"; do + distro="${version%-*}" + suite="${version##*-}" + from="${distro}:${suite}" + + case "$from" in + raspbian:jessie) + from="resin/rpi-raspbian:jessie" + ;; + *) + from="armhf/$from" + ;; + esac + + mkdir -p "$version" + echo "$version -> FROM $from" + cat > "$version/Dockerfile" <<-EOF + # + # THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/armhf/generate.sh"! + # + + FROM $from + EOF + + echo >> "$version/Dockerfile" + + if [[ "$distro" = "debian" || "$distro" = "raspbian" ]]; then + cat >> "$version/Dockerfile" <<-'EOF' + # allow replacing httpredir or deb mirror + ARG APT_MIRROR=deb.debian.org + RUN sed -ri "s/(httpredir|deb).debian.org/$APT_MIRROR/g" /etc/apt/sources.list + EOF + + if [ "$suite" = "wheezy" ]; then + cat >> "$version/Dockerfile" <<-'EOF' + RUN sed -ri "s/(httpredir|deb).debian.org/$APT_MIRROR/g" /etc/apt/sources.list.d/backports.list + EOF + fi + + echo "" >> "$version/Dockerfile" + fi + + extraBuildTags= + runcBuildTags= + + # this list is sorted alphabetically; please keep it that way + packages=( + apparmor # for apparmor_parser for testing the profile + bash-completion # for bash-completion debhelper integration + btrfs-tools # for "btrfs/ioctl.h" (and "version.h" if possible) + build-essential # "essential for building Debian packages" + cmake # tini dep + curl ca-certificates # for downloading Go + debhelper # for easy ".deb" building + dh-apparmor # for apparmor debhelper + dh-systemd # for systemd debhelper integration + git # for "git commit" info in "docker -v" + libapparmor-dev # for "sys/apparmor.h" + libdevmapper-dev # for "libdevmapper.h" + libseccomp-dev # for "seccomp.h" & "libseccomp.so" + pkg-config # for detecting things like libsystemd-journal dynamically + vim-common # tini dep + ) + # packaging for "sd-journal.h" and libraries varies + case "$suite" in + wheezy) ;; + jessie|trusty) packages+=( libsystemd-journal-dev ) ;; + *) packages+=( libsystemd-dev ) ;; + esac + + # debian wheezy does not have the right libseccomp libs + # debian jessie & ubuntu trusty have a libseccomp < 2.2.1 :( + case "$suite" in + wheezy|jessie|trusty) + packages=( "${packages[@]/libseccomp-dev}" ) + runcBuildTags="apparmor selinux" + ;; + *) + extraBuildTags+=' seccomp' + runcBuildTags="apparmor seccomp selinux" + ;; + esac + + if [ "$suite" = 'wheezy' ]; then + # pull a couple packages from backports explicitly + # (build failures otherwise) + backportsPackages=( btrfs-tools ) + for pkg in "${backportsPackages[@]}"; do + packages=( "${packages[@]/$pkg}" ) + done + echo "RUN apt-get update && apt-get install -y -t $suite-backports ${backportsPackages[*]} --no-install-recommends && rm -rf /var/lib/apt/lists/*" >> "$version/Dockerfile" + fi + + echo "RUN apt-get update && apt-get install -y ${packages[*]} --no-install-recommends && rm -rf /var/lib/apt/lists/*" >> "$version/Dockerfile" + + echo >> "$version/Dockerfile" + + awk '$1 == "ENV" && $2 == "GO_VERSION" { print; exit }' ../../../../Dockerfile.armhf >> "$version/Dockerfile" + if [ "$distro" == 'raspbian' ]; + then + cat <> "$version/Dockerfile" +# GOARM is the ARM architecture version which is unrelated to the above Golang version +ENV GOARM 6 +EOF + fi + echo 'RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-armv6l.tar.gz" | tar xzC /usr/local' >> "$version/Dockerfile" + echo 'ENV PATH $PATH:/usr/local/go/bin' >> "$version/Dockerfile" + + echo >> "$version/Dockerfile" + + echo 'ENV AUTO_GOPATH 1' >> "$version/Dockerfile" + + echo >> "$version/Dockerfile" + + # print build tags in alphabetical order + buildTags=$( echo "apparmor selinux $extraBuildTags" | xargs -n1 | sort -n | tr '\n' ' ' | sed -e 's/[[:space:]]*$//' ) + + echo "ENV DOCKER_BUILDTAGS $buildTags" >> "$version/Dockerfile" + echo "ENV RUNC_BUILDTAGS $runcBuildTags" >> "$version/Dockerfile" +done diff --git a/vendor/github.com/moby/moby/contrib/builder/deb/armhf/raspbian-jessie/Dockerfile b/vendor/github.com/moby/moby/contrib/builder/deb/armhf/raspbian-jessie/Dockerfile new file mode 100644 index 000000000..810cb2ad3 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/builder/deb/armhf/raspbian-jessie/Dockerfile @@ -0,0 +1,22 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/armhf/generate.sh"! +# + +FROM resin/rpi-raspbian:jessie + +# allow replacing httpredir or deb mirror +ARG APT_MIRROR=deb.debian.org +RUN sed -ri "s/(httpredir|deb).debian.org/$APT_MIRROR/g" /etc/apt/sources.list + +RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev pkg-config vim-common libsystemd-journal-dev --no-install-recommends && rm -rf /var/lib/apt/lists/* + +ENV GO_VERSION 1.8.3 +# GOARM is the ARM architecture version which is unrelated to the above Golang version +ENV GOARM 6 +RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-armv6l.tar.gz" | tar xzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS apparmor selinux +ENV RUNC_BUILDTAGS apparmor selinux diff --git a/vendor/github.com/moby/moby/contrib/builder/deb/armhf/ubuntu-trusty/Dockerfile b/vendor/github.com/moby/moby/contrib/builder/deb/armhf/ubuntu-trusty/Dockerfile new file mode 100644 index 000000000..530f9fccd --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/builder/deb/armhf/ubuntu-trusty/Dockerfile @@ -0,0 +1,16 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/armhf/generate.sh"! +# + +FROM armhf/ubuntu:trusty + +RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev pkg-config vim-common libsystemd-journal-dev --no-install-recommends && rm -rf /var/lib/apt/lists/* + +ENV GO_VERSION 1.8.3 +RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-armv6l.tar.gz" | tar xzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS apparmor selinux +ENV RUNC_BUILDTAGS apparmor selinux diff --git a/vendor/github.com/moby/moby/contrib/builder/deb/armhf/ubuntu-xenial/Dockerfile b/vendor/github.com/moby/moby/contrib/builder/deb/armhf/ubuntu-xenial/Dockerfile new file mode 100644 index 000000000..a7f79ee3d --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/builder/deb/armhf/ubuntu-xenial/Dockerfile @@ -0,0 +1,16 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/armhf/generate.sh"! +# + +FROM armhf/ubuntu:xenial + +RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev libseccomp-dev pkg-config vim-common libsystemd-dev --no-install-recommends && rm -rf /var/lib/apt/lists/* + +ENV GO_VERSION 1.8.3 +RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-armv6l.tar.gz" | tar xzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS apparmor seccomp selinux +ENV RUNC_BUILDTAGS apparmor seccomp selinux diff --git a/vendor/github.com/moby/moby/contrib/builder/deb/armhf/ubuntu-yakkety/Dockerfile b/vendor/github.com/moby/moby/contrib/builder/deb/armhf/ubuntu-yakkety/Dockerfile new file mode 100644 index 000000000..69f1bedb4 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/builder/deb/armhf/ubuntu-yakkety/Dockerfile @@ -0,0 +1,16 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/armhf/generate.sh"! +# + +FROM armhf/ubuntu:yakkety + +RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev libseccomp-dev pkg-config vim-common libsystemd-dev --no-install-recommends && rm -rf /var/lib/apt/lists/* + +ENV GO_VERSION 1.8.3 +RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-armv6l.tar.gz" | tar xzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS apparmor seccomp selinux +ENV RUNC_BUILDTAGS apparmor seccomp selinux diff --git a/vendor/github.com/moby/moby/contrib/builder/deb/ppc64le/build.sh b/vendor/github.com/moby/moby/contrib/builder/deb/ppc64le/build.sh new file mode 100755 index 000000000..83dbf943f --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/builder/deb/ppc64le/build.sh @@ -0,0 +1,10 @@ +#!/usr/bin/env bash +set -e + +cd "$(dirname "$(readlink -f "$BASH_SOURCE")")" + +set -x +./generate.sh +for d in */; do + docker build -t "dockercore/builder-deb:$(basename "$d")" "$d" +done diff --git a/vendor/github.com/moby/moby/contrib/builder/deb/ppc64le/generate.sh b/vendor/github.com/moby/moby/contrib/builder/deb/ppc64le/generate.sh new file mode 100755 index 000000000..bf777ab5d --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/builder/deb/ppc64le/generate.sh @@ -0,0 +1,101 @@ +#!/usr/bin/env bash +set -e + +# This file is used to auto-generate Dockerfiles for making debs via 'make deb' +# +# usage: ./generate.sh [versions] +# ie: ./generate.sh +# to update all Dockerfiles in this directory +# or: ./generate.sh ubuntu-xenial +# to only update ubuntu-xenial/Dockerfile +# or: ./generate.sh ubuntu-newversion +# to create a new folder and a Dockerfile within it + +cd "$(dirname "$(readlink -f "$BASH_SOURCE")")" + +versions=( "$@" ) +if [ ${#versions[@]} -eq 0 ]; then + versions=( */ ) +fi +versions=( "${versions[@]%/}" ) + +for version in "${versions[@]}"; do + echo "${versions[@]}" + distro="${version%-*}" + suite="${version##*-}" + from="ppc64le/${distro}:${suite}" + + mkdir -p "$version" + echo "$version -> FROM $from" + cat > "$version/Dockerfile" <<-EOF + # + # THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/ppc64le/generate.sh"! + # + + FROM $from + + EOF + + extraBuildTags= + runcBuildTags= + + # this list is sorted alphabetically; please keep it that way + packages=( + apparmor # for apparmor_parser for testing the profile + bash-completion # for bash-completion debhelper integration + btrfs-tools # for "btrfs/ioctl.h" (and "version.h" if possible) + build-essential # "essential for building Debian packages" + cmake # tini dep + curl ca-certificates # for downloading Go + debhelper # for easy ".deb" building + dh-apparmor # for apparmor debhelper + dh-systemd # for systemd debhelper integration + git # for "git commit" info in "docker -v" + libapparmor-dev # for "sys/apparmor.h" + libdevmapper-dev # for "libdevmapper.h" + pkg-config # for detecting things like libsystemd-journal dynamically + vim-common # tini dep + ) + + case "$suite" in + trusty) + packages+=( libsystemd-journal-dev ) + ;; + *) + # libseccomp isn't available until ubuntu xenial and is required for "seccomp.h" & "libseccomp.so" + packages+=( libseccomp-dev ) + packages+=( libsystemd-dev ) + ;; + esac + + # buildtags + case "$suite" in + # trusty has no seccomp package + trusty) + runcBuildTags="apparmor selinux" + ;; + # ppc64le support was backported into libseccomp 2.2.3-2, + # so enable seccomp by default + *) + extraBuildTags+=' seccomp' + runcBuildTags="apparmor seccomp selinux" + ;; + esac + + # update and install packages + echo "RUN apt-get update && apt-get install -y ${packages[*]} --no-install-recommends && rm -rf /var/lib/apt/lists/*" >> "$version/Dockerfile" + echo >> "$version/Dockerfile" + + awk '$1 == "ENV" && $2 == "GO_VERSION" { print; exit }' ../../../../Dockerfile.ppc64le >> "$version/Dockerfile" + echo 'RUN curl -fsSL "https://golang.org/dl/go${GO_VERSION}.linux-ppc64le.tar.gz" | tar xzC /usr/local' >> "$version/Dockerfile" + echo 'ENV PATH $PATH:/usr/local/go/bin' >> "$version/Dockerfile" + echo >> "$version/Dockerfile" + + echo 'ENV AUTO_GOPATH 1' >> "$version/Dockerfile" + echo >> "$version/Dockerfile" + + # print build tags in alphabetical order + buildTags=$( echo "apparmor selinux $extraBuildTags" | xargs -n1 | sort -n | tr '\n' ' ' | sed -e 's/[[:space:]]*$//' ) + echo "ENV DOCKER_BUILDTAGS $buildTags" >> "$version/Dockerfile" + echo "ENV RUNC_BUILDTAGS $runcBuildTags" >> "$version/Dockerfile" +done diff --git a/vendor/github.com/moby/moby/contrib/builder/deb/ppc64le/ubuntu-trusty/Dockerfile b/vendor/github.com/moby/moby/contrib/builder/deb/ppc64le/ubuntu-trusty/Dockerfile new file mode 100644 index 000000000..0841a990f --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/builder/deb/ppc64le/ubuntu-trusty/Dockerfile @@ -0,0 +1,16 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/ppc64le/generate.sh"! +# + +FROM ppc64le/ubuntu:trusty + +RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev pkg-config vim-common libsystemd-journal-dev --no-install-recommends && rm -rf /var/lib/apt/lists/* + +ENV GO_VERSION 1.8.3 +RUN curl -fsSL "https://golang.org/dl/go${GO_VERSION}.linux-ppc64le.tar.gz" | tar xzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS apparmor selinux +ENV RUNC_BUILDTAGS apparmor selinux diff --git a/vendor/github.com/moby/moby/contrib/builder/deb/ppc64le/ubuntu-xenial/Dockerfile b/vendor/github.com/moby/moby/contrib/builder/deb/ppc64le/ubuntu-xenial/Dockerfile new file mode 100644 index 000000000..f9dd2506e --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/builder/deb/ppc64le/ubuntu-xenial/Dockerfile @@ -0,0 +1,16 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/ppc64le/generate.sh"! +# + +FROM ppc64le/ubuntu:xenial + +RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev pkg-config vim-common libseccomp-dev libsystemd-dev --no-install-recommends && rm -rf /var/lib/apt/lists/* + +ENV GO_VERSION 1.8.3 +RUN curl -fsSL "https://golang.org/dl/go${GO_VERSION}.linux-ppc64le.tar.gz" | tar xzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS apparmor seccomp selinux +ENV RUNC_BUILDTAGS apparmor seccomp selinux diff --git a/vendor/github.com/moby/moby/contrib/builder/deb/ppc64le/ubuntu-yakkety/Dockerfile b/vendor/github.com/moby/moby/contrib/builder/deb/ppc64le/ubuntu-yakkety/Dockerfile new file mode 100644 index 000000000..539969df7 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/builder/deb/ppc64le/ubuntu-yakkety/Dockerfile @@ -0,0 +1,16 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/ppc64le/generate.sh"! +# + +FROM ppc64le/ubuntu:yakkety + +RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev pkg-config vim-common libseccomp-dev libsystemd-dev --no-install-recommends && rm -rf /var/lib/apt/lists/* + +ENV GO_VERSION 1.8.3 +RUN curl -fsSL "https://golang.org/dl/go${GO_VERSION}.linux-ppc64le.tar.gz" | tar xzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS apparmor seccomp selinux +ENV RUNC_BUILDTAGS apparmor seccomp selinux diff --git a/vendor/github.com/moby/moby/contrib/builder/deb/s390x/build.sh b/vendor/github.com/moby/moby/contrib/builder/deb/s390x/build.sh new file mode 100755 index 000000000..f8b25067b --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/builder/deb/s390x/build.sh @@ -0,0 +1,10 @@ +#!/usr/bin/env bash +set -e + +cd "$(dirname "$(readlink -f "$BASH_SOURCE")")" + +set -x +./generate.sh +for d in */; do + docker build -t "dockercore/builder-deb:$(basename "$d")" "$d" +done diff --git a/vendor/github.com/moby/moby/contrib/builder/deb/s390x/generate.sh b/vendor/github.com/moby/moby/contrib/builder/deb/s390x/generate.sh new file mode 100755 index 000000000..c04d24ece --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/builder/deb/s390x/generate.sh @@ -0,0 +1,94 @@ +#!/usr/bin/env bash +set -e + +# This file is used to auto-generate Dockerfiles for making debs via 'make deb' +# +# usage: ./generate.sh [versions] +# ie: ./generate.sh +# to update all Dockerfiles in this directory +# or: ./generate.sh ubuntu-xenial +# to only update ubuntu-xenial/Dockerfile +# or: ./generate.sh ubuntu-newversion +# to create a new folder and a Dockerfile within it + +cd "$(dirname "$(readlink -f "$BASH_SOURCE")")" + +versions=( "$@" ) +if [ ${#versions[@]} -eq 0 ]; then + versions=( */ ) +fi +versions=( "${versions[@]%/}" ) + +for version in "${versions[@]}"; do + echo "${versions[@]}" + distro="${version%-*}" + suite="${version##*-}" + from="s390x/${distro}:${suite}" + + mkdir -p "$version" + echo "$version -> FROM $from" + cat > "$version/Dockerfile" <<-EOF + # + # THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/s390x/generate.sh"! + # + + FROM $from + + EOF + + extraBuildTags= + runcBuildTags= + + # this list is sorted alphabetically; please keep it that way + packages=( + apparmor # for apparmor_parser for testing the profile + bash-completion # for bash-completion debhelper integration + btrfs-tools # for "btrfs/ioctl.h" (and "version.h" if possible) + build-essential # "essential for building Debian packages" + cmake # tini dep + curl ca-certificates # for downloading Go + debhelper # for easy ".deb" building + dh-apparmor # for apparmor debhelper + dh-systemd # for systemd debhelper integration + git # for "git commit" info in "docker -v" + libapparmor-dev # for "sys/apparmor.h" + libdevmapper-dev # for "libdevmapper.h" + libseccomp-dev # for "seccomp.h" & "libseccomp.so" + pkg-config # for detecting things like libsystemd-journal dynamically + libsystemd-dev + vim-common # tini dep + ) + + case "$suite" in + # s390x needs libseccomp 2.3.1 + xenial) + # Ubuntu Xenial has libseccomp 2.2.3 + runcBuildTags="apparmor selinux" + ;; + *) + extraBuildTags+=' seccomp' + runcBuildTags="apparmor selinux seccomp" + ;; + esac + + # update and install packages + echo "RUN apt-get update && apt-get install -y ${packages[*]} --no-install-recommends && rm -rf /var/lib/apt/lists/*" >> "$version/Dockerfile" + + echo >> "$version/Dockerfile" + + awk '$1 == "ENV" && $2 == "GO_VERSION" { print; exit }' ../../../../Dockerfile.s390x >> "$version/Dockerfile" + echo 'RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-s390x.tar.gz" | tar xzC /usr/local' >> "$version/Dockerfile" + echo 'ENV PATH $PATH:/usr/local/go/bin' >> "$version/Dockerfile" + + echo >> "$version/Dockerfile" + + echo 'ENV AUTO_GOPATH 1' >> "$version/Dockerfile" + + echo >> "$version/Dockerfile" + + # print build tags in alphabetical order + buildTags=$( echo "apparmor selinux $extraBuildTags" | xargs -n1 | sort -n | tr '\n' ' ' | sed -e 's/[[:space:]]*$//' ) + + echo "ENV DOCKER_BUILDTAGS $buildTags" >> "$version/Dockerfile" + echo "ENV RUNC_BUILDTAGS $runcBuildTags" >> "$version/Dockerfile" +done diff --git a/vendor/github.com/moby/moby/contrib/builder/deb/s390x/ubuntu-xenial/Dockerfile b/vendor/github.com/moby/moby/contrib/builder/deb/s390x/ubuntu-xenial/Dockerfile new file mode 100644 index 000000000..2ed0ad3b3 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/builder/deb/s390x/ubuntu-xenial/Dockerfile @@ -0,0 +1,16 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/s390x/generate.sh"! +# + +FROM s390x/ubuntu:xenial + +RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev libseccomp-dev pkg-config libsystemd-dev vim-common --no-install-recommends && rm -rf /var/lib/apt/lists/* + +ENV GO_VERSION 1.8.3 +RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-s390x.tar.gz" | tar xzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS apparmor selinux +ENV RUNC_BUILDTAGS apparmor selinux diff --git a/vendor/github.com/moby/moby/contrib/builder/deb/s390x/ubuntu-yakkety/Dockerfile b/vendor/github.com/moby/moby/contrib/builder/deb/s390x/ubuntu-yakkety/Dockerfile new file mode 100644 index 000000000..af79ff5af --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/builder/deb/s390x/ubuntu-yakkety/Dockerfile @@ -0,0 +1,16 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/s390x/generate.sh"! +# + +FROM s390x/ubuntu:yakkety + +RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev libseccomp-dev pkg-config libsystemd-dev vim-common --no-install-recommends && rm -rf /var/lib/apt/lists/* + +ENV GO_VERSION 1.8.3 +RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-s390x.tar.gz" | tar xzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS apparmor seccomp selinux +ENV RUNC_BUILDTAGS apparmor selinux seccomp diff --git a/vendor/github.com/moby/moby/contrib/builder/rpm/amd64/README.md b/vendor/github.com/moby/moby/contrib/builder/rpm/amd64/README.md new file mode 100644 index 000000000..5f2e888c7 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/builder/rpm/amd64/README.md @@ -0,0 +1,5 @@ +# `dockercore/builder-rpm` + +This image's tags contain the dependencies for building Docker `.rpm`s for each of the RPM-based platforms Docker targets. + +To add new tags, see [`contrib/builder/rpm/amd64` in https://github.com/docker/docker](https://github.com/docker/docker/tree/master/contrib/builder/rpm/amd64), specifically the `generate.sh` script, whose usage is described in a comment at the top of the file. diff --git a/vendor/github.com/moby/moby/contrib/builder/rpm/amd64/amazonlinux-latest/Dockerfile b/vendor/github.com/moby/moby/contrib/builder/rpm/amd64/amazonlinux-latest/Dockerfile new file mode 100644 index 000000000..7632daf25 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/builder/rpm/amd64/amazonlinux-latest/Dockerfile @@ -0,0 +1,18 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/rpm/amd64/generate.sh"! +# + +FROM amazonlinux:latest + +RUN yum groupinstall -y "Development Tools" +RUN yum install -y btrfs-progs-devel device-mapper-devel glibc-static libseccomp-devel libselinux-devel libtool-ltdl-devel pkgconfig selinux-policy selinux-policy-devel tar git cmake vim-common + +ENV GO_VERSION 1.8.3 +RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS seccomp selinux +ENV RUNC_BUILDTAGS seccomp selinux + diff --git a/vendor/github.com/moby/moby/contrib/builder/rpm/amd64/build.sh b/vendor/github.com/moby/moby/contrib/builder/rpm/amd64/build.sh new file mode 100755 index 000000000..1e3565a34 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/builder/rpm/amd64/build.sh @@ -0,0 +1,10 @@ +#!/usr/bin/env bash +set -e + +cd "$(dirname "$(readlink -f "$BASH_SOURCE")")" + +set -x +./generate.sh +for d in */; do + docker build -t "dockercore/builder-rpm:$(basename "$d")" "$d" +done diff --git a/vendor/github.com/moby/moby/contrib/builder/rpm/amd64/centos-7/Dockerfile b/vendor/github.com/moby/moby/contrib/builder/rpm/amd64/centos-7/Dockerfile new file mode 100644 index 000000000..82cb9153b --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/builder/rpm/amd64/centos-7/Dockerfile @@ -0,0 +1,19 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/rpm/amd64/generate.sh"! +# + +FROM centos:7 + +RUN yum groupinstall -y "Development Tools" +RUN yum -y swap -- remove systemd-container systemd-container-libs -- install systemd systemd-libs +RUN yum install -y btrfs-progs-devel device-mapper-devel glibc-static libseccomp-devel libselinux-devel libtool-ltdl-devel pkgconfig selinux-policy selinux-policy-devel systemd-devel tar git cmake vim-common + +ENV GO_VERSION 1.8.3 +RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS seccomp selinux +ENV RUNC_BUILDTAGS seccomp selinux + diff --git a/vendor/github.com/moby/moby/contrib/builder/rpm/amd64/fedora-24/Dockerfile b/vendor/github.com/moby/moby/contrib/builder/rpm/amd64/fedora-24/Dockerfile new file mode 100644 index 000000000..cdb922c16 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/builder/rpm/amd64/fedora-24/Dockerfile @@ -0,0 +1,19 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/rpm/amd64/generate.sh"! +# + +FROM fedora:24 + +RUN dnf -y upgrade +RUN dnf install -y @development-tools fedora-packager +RUN dnf install -y btrfs-progs-devel device-mapper-devel glibc-static libseccomp-devel libselinux-devel libtool-ltdl-devel pkgconfig selinux-policy selinux-policy-devel systemd-devel tar git cmake vim-common + +ENV GO_VERSION 1.8.3 +RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS seccomp selinux +ENV RUNC_BUILDTAGS seccomp selinux + diff --git a/vendor/github.com/moby/moby/contrib/builder/rpm/amd64/fedora-25/Dockerfile b/vendor/github.com/moby/moby/contrib/builder/rpm/amd64/fedora-25/Dockerfile new file mode 100644 index 000000000..73e9e9a73 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/builder/rpm/amd64/fedora-25/Dockerfile @@ -0,0 +1,19 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/rpm/amd64/generate.sh"! +# + +FROM fedora:25 + +RUN dnf -y upgrade +RUN dnf install -y @development-tools fedora-packager +RUN dnf install -y btrfs-progs-devel device-mapper-devel glibc-static libseccomp-devel libselinux-devel libtool-ltdl-devel pkgconfig selinux-policy selinux-policy-devel systemd-devel tar git cmake vim-common + +ENV GO_VERSION 1.8.3 +RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS seccomp selinux +ENV RUNC_BUILDTAGS seccomp selinux + diff --git a/vendor/github.com/moby/moby/contrib/builder/rpm/amd64/generate.sh b/vendor/github.com/moby/moby/contrib/builder/rpm/amd64/generate.sh new file mode 100755 index 000000000..15add3f3c --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/builder/rpm/amd64/generate.sh @@ -0,0 +1,187 @@ +#!/usr/bin/env bash +set -e + +# usage: ./generate.sh [versions] +# ie: ./generate.sh +# to update all Dockerfiles in this directory +# or: ./generate.sh centos-7 +# to only update centos-7/Dockerfile +# or: ./generate.sh fedora-newversion +# to create a new folder and a Dockerfile within it + +cd "$(dirname "$(readlink -f "$BASH_SOURCE")")" + +versions=( "$@" ) +if [ ${#versions[@]} -eq 0 ]; then + versions=( */ ) +fi +versions=( "${versions[@]%/}" ) + +for version in "${versions[@]}"; do + distro="${version%-*}" + suite="${version##*-}" + from="${distro}:${suite}" + installer=yum + + if [[ "$distro" == "fedora" ]]; then + installer=dnf + fi + if [[ "$distro" == "photon" ]]; then + installer=tdnf + fi + + mkdir -p "$version" + echo "$version -> FROM $from" + cat > "$version/Dockerfile" <<-EOF + # + # THIS FILE IS AUTOGENERATED; SEE "contrib/builder/rpm/amd64/generate.sh"! + # + + FROM $from + EOF + + echo >> "$version/Dockerfile" + + extraBuildTags= + runcBuildTags= + + case "$from" in + oraclelinux:6) + # We need a known version of the kernel-uek-devel headers to set CGO_CPPFLAGS, so grab the UEKR4 GA version + # This requires using yum-config-manager from yum-utils to enable the UEKR4 yum repo + echo "RUN yum install -y yum-utils && curl -o /etc/yum.repos.d/public-yum-ol6.repo http://yum.oracle.com/public-yum-ol6.repo && yum-config-manager -q --enable ol6_UEKR4" >> "$version/Dockerfile" + echo "RUN yum install -y kernel-uek-devel-4.1.12-32.el6uek" >> "$version/Dockerfile" + echo >> "$version/Dockerfile" + ;; + fedora:*) + echo "RUN ${installer} -y upgrade" >> "$version/Dockerfile" + ;; + *) ;; + esac + + case "$from" in + centos:*|amazonlinux:latest) + # get "Development Tools" packages dependencies + echo 'RUN yum groupinstall -y "Development Tools"' >> "$version/Dockerfile" + + if [[ "$version" == "centos-7" ]]; then + echo 'RUN yum -y swap -- remove systemd-container systemd-container-libs -- install systemd systemd-libs' >> "$version/Dockerfile" + fi + ;; + oraclelinux:*) + # get "Development Tools" packages and dependencies + # we also need yum-utils for yum-config-manager to pull the latest repo file + echo 'RUN yum groupinstall -y "Development Tools"' >> "$version/Dockerfile" + ;; + opensuse:*) + # get rpm-build and curl packages and dependencies + echo 'RUN zypper --non-interactive install ca-certificates* curl gzip rpm-build' >> "$version/Dockerfile" + ;; + photon:*) + echo "RUN ${installer} install -y wget curl ca-certificates gzip make rpm-build sed gcc linux-api-headers glibc-devel binutils libseccomp elfutils" >> "$version/Dockerfile" + ;; + *) + echo "RUN ${installer} install -y @development-tools fedora-packager" >> "$version/Dockerfile" + ;; + esac + + packages=( + btrfs-progs-devel # for "btrfs/ioctl.h" (and "version.h" if possible) + device-mapper-devel # for "libdevmapper.h" + glibc-static + libseccomp-devel # for "seccomp.h" & "libseccomp.so" + libselinux-devel # for "libselinux.so" + pkgconfig # for the pkg-config command + selinux-policy + selinux-policy-devel + systemd-devel # for "sd-journal.h" and libraries + tar # older versions of dev-tools do not have tar + git # required for containerd and runc clone + cmake # tini build + vim-common # tini build + ) + + case "$from" in + oraclelinux:7) + # Enable the optional repository + packages=( --enablerepo=ol7_optional_latest "${packages[*]}" ) + ;; + esac + + case "$from" in + oraclelinux:6|amazonlinux:latest) + # doesn't use systemd, doesn't have a devel package for it + packages=( "${packages[@]/systemd-devel}" ) + ;; + esac + + # opensuse & oraclelinx:6 do not have the right libseccomp libs + case "$from" in + opensuse:*|oraclelinux:6) + packages=( "${packages[@]/libseccomp-devel}" ) + runcBuildTags="selinux" + ;; + *) + extraBuildTags+=' seccomp' + runcBuildTags="seccomp selinux" + ;; + esac + + case "$from" in + opensuse:*) + packages=( "${packages[@]/btrfs-progs-devel/libbtrfs-devel}" ) + packages=( "${packages[@]/pkgconfig/pkg-config}" ) + packages=( "${packages[@]/vim-common/vim}" ) + if [[ "$from" == "opensuse:13."* ]]; then + packages+=( systemd-rpm-macros ) + fi + + # use zypper + echo "RUN zypper --non-interactive install ${packages[*]}" >> "$version/Dockerfile" + ;; + photon:*) + packages=( "${packages[@]/pkgconfig/pkg-config}" ) + echo "RUN ${installer} install -y ${packages[*]}" >> "$version/Dockerfile" + ;; + *) + echo "RUN ${installer} install -y ${packages[*]}" >> "$version/Dockerfile" + ;; + esac + + echo >> "$version/Dockerfile" + + + awk '$1 == "ENV" && $2 == "GO_VERSION" { print; exit }' ../../../../Dockerfile >> "$version/Dockerfile" + echo 'RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local' >> "$version/Dockerfile" + echo 'ENV PATH $PATH:/usr/local/go/bin' >> "$version/Dockerfile" + + echo >> "$version/Dockerfile" + + echo 'ENV AUTO_GOPATH 1' >> "$version/Dockerfile" + + echo >> "$version/Dockerfile" + + # print build tags in alphabetical order + buildTags=$( echo "selinux $extraBuildTags" | xargs -n1 | sort -n | tr '\n' ' ' | sed -e 's/[[:space:]]*$//' ) + + echo "ENV DOCKER_BUILDTAGS $buildTags" >> "$version/Dockerfile" + echo "ENV RUNC_BUILDTAGS $runcBuildTags" >> "$version/Dockerfile" + echo >> "$version/Dockerfile" + + case "$from" in + oraclelinux:6) + # We need to set the CGO_CPPFLAGS environment to use the updated UEKR4 headers with all the userns stuff. + # The ordering is very important and should not be changed. + echo 'ENV CGO_CPPFLAGS -D__EXPORTED_HEADERS__ \' >> "$version/Dockerfile" + echo ' -I/usr/src/kernels/4.1.12-32.el6uek.x86_64/arch/x86/include/generated/uapi \' >> "$version/Dockerfile" + echo ' -I/usr/src/kernels/4.1.12-32.el6uek.x86_64/arch/x86/include/uapi \' >> "$version/Dockerfile" + echo ' -I/usr/src/kernels/4.1.12-32.el6uek.x86_64/include/generated/uapi \' >> "$version/Dockerfile" + echo ' -I/usr/src/kernels/4.1.12-32.el6uek.x86_64/include/uapi \' >> "$version/Dockerfile" + echo ' -I/usr/src/kernels/4.1.12-32.el6uek.x86_64/include' >> "$version/Dockerfile" + echo >> "$version/Dockerfile" + ;; + *) ;; + esac + + +done diff --git a/vendor/github.com/moby/moby/contrib/builder/rpm/amd64/opensuse-13.2/Dockerfile b/vendor/github.com/moby/moby/contrib/builder/rpm/amd64/opensuse-13.2/Dockerfile new file mode 100644 index 000000000..af97fdc1b --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/builder/rpm/amd64/opensuse-13.2/Dockerfile @@ -0,0 +1,18 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/rpm/amd64/generate.sh"! +# + +FROM opensuse:13.2 + +RUN zypper --non-interactive install ca-certificates* curl gzip rpm-build +RUN zypper --non-interactive install libbtrfs-devel device-mapper-devel glibc-static libselinux-devel libtool-ltdl-devel pkg-config selinux-policy selinux-policy-devel systemd-devel tar git cmake vim systemd-rpm-macros + +ENV GO_VERSION 1.8.3 +RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS selinux +ENV RUNC_BUILDTAGS selinux + diff --git a/vendor/github.com/moby/moby/contrib/builder/rpm/amd64/oraclelinux-6/Dockerfile b/vendor/github.com/moby/moby/contrib/builder/rpm/amd64/oraclelinux-6/Dockerfile new file mode 100644 index 000000000..8e1536534 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/builder/rpm/amd64/oraclelinux-6/Dockerfile @@ -0,0 +1,28 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/rpm/amd64/generate.sh"! +# + +FROM oraclelinux:6 + +RUN yum install -y yum-utils && curl -o /etc/yum.repos.d/public-yum-ol6.repo http://yum.oracle.com/public-yum-ol6.repo && yum-config-manager -q --enable ol6_UEKR4 +RUN yum install -y kernel-uek-devel-4.1.12-32.el6uek + +RUN yum groupinstall -y "Development Tools" +RUN yum install -y btrfs-progs-devel device-mapper-devel glibc-static libselinux-devel libtool-ltdl-devel pkgconfig selinux-policy selinux-policy-devel tar git cmake vim-common + +ENV GO_VERSION 1.8.3 +RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS selinux +ENV RUNC_BUILDTAGS selinux + +ENV CGO_CPPFLAGS -D__EXPORTED_HEADERS__ \ + -I/usr/src/kernels/4.1.12-32.el6uek.x86_64/arch/x86/include/generated/uapi \ + -I/usr/src/kernels/4.1.12-32.el6uek.x86_64/arch/x86/include/uapi \ + -I/usr/src/kernels/4.1.12-32.el6uek.x86_64/include/generated/uapi \ + -I/usr/src/kernels/4.1.12-32.el6uek.x86_64/include/uapi \ + -I/usr/src/kernels/4.1.12-32.el6uek.x86_64/include + diff --git a/vendor/github.com/moby/moby/contrib/builder/rpm/amd64/oraclelinux-7/Dockerfile b/vendor/github.com/moby/moby/contrib/builder/rpm/amd64/oraclelinux-7/Dockerfile new file mode 100644 index 000000000..7e5a83220 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/builder/rpm/amd64/oraclelinux-7/Dockerfile @@ -0,0 +1,18 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/rpm/amd64/generate.sh"! +# + +FROM oraclelinux:7 + +RUN yum groupinstall -y "Development Tools" +RUN yum install -y --enablerepo=ol7_optional_latest btrfs-progs-devel device-mapper-devel glibc-static libseccomp-devel libselinux-devel libtool-ltdl-devel pkgconfig selinux-policy selinux-policy-devel systemd-devel tar git cmake vim-common + +ENV GO_VERSION 1.8.3 +RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS seccomp selinux +ENV RUNC_BUILDTAGS seccomp selinux + diff --git a/vendor/github.com/moby/moby/contrib/builder/rpm/amd64/photon-1.0/Dockerfile b/vendor/github.com/moby/moby/contrib/builder/rpm/amd64/photon-1.0/Dockerfile new file mode 100644 index 000000000..6b489d7a6 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/builder/rpm/amd64/photon-1.0/Dockerfile @@ -0,0 +1,18 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/rpm/amd64/generate.sh"! +# + +FROM photon:1.0 + +RUN tdnf install -y wget curl ca-certificates gzip make rpm-build sed gcc linux-api-headers glibc-devel binutils libseccomp elfutils +RUN tdnf install -y btrfs-progs-devel device-mapper-devel glibc-static libseccomp-devel libselinux-devel libtool-ltdl-devel pkg-config selinux-policy selinux-policy-devel systemd-devel tar git cmake vim-common + +ENV GO_VERSION 1.8.3 +RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS seccomp selinux +ENV RUNC_BUILDTAGS seccomp selinux + diff --git a/vendor/github.com/moby/moby/contrib/builder/rpm/armhf/build.sh b/vendor/github.com/moby/moby/contrib/builder/rpm/armhf/build.sh new file mode 100755 index 000000000..1e3565a34 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/builder/rpm/armhf/build.sh @@ -0,0 +1,10 @@ +#!/usr/bin/env bash +set -e + +cd "$(dirname "$(readlink -f "$BASH_SOURCE")")" + +set -x +./generate.sh +for d in */; do + docker build -t "dockercore/builder-rpm:$(basename "$d")" "$d" +done diff --git a/vendor/github.com/moby/moby/contrib/builder/rpm/armhf/centos-7/Dockerfile b/vendor/github.com/moby/moby/contrib/builder/rpm/armhf/centos-7/Dockerfile new file mode 100644 index 000000000..f482fa7ac --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/builder/rpm/armhf/centos-7/Dockerfile @@ -0,0 +1,20 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/rpm/armhf/generate.sh"! +# + +FROM multiarch/centos:7.2.1511-armhfp-clean + +RUN yum install -y yum-plugin-ovl +RUN yum groupinstall --skip-broken -y "Development Tools" +RUN yum -y swap -- remove systemd-container systemd-container-libs -- install systemd systemd-libs +RUN yum install -y btrfs-progs-devel device-mapper-devel glibc-static libseccomp-devel libselinux-devel libtool-ltdl-devel pkgconfig selinux-policy selinux-policy-devel sqlite-devel systemd-devel tar git cmake vim-common + +ENV GO_VERSION 1.8.3 +RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-armv6l.tar.gz" | tar xzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS seccomp selinux +ENV RUNC_BUILDTAGS seccomp selinux + diff --git a/vendor/github.com/moby/moby/contrib/builder/rpm/armhf/generate.sh b/vendor/github.com/moby/moby/contrib/builder/rpm/armhf/generate.sh new file mode 100755 index 000000000..b07231eb9 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/builder/rpm/armhf/generate.sh @@ -0,0 +1,122 @@ +#!/usr/bin/env bash +# vim: set ts=4 sw=4 noet : + +set -e + +# usage: ./generate.sh [versions] +# ie: ./generate.sh +# to update all Dockerfiles in this directory +# or: ./generate.sh centos-7 +# to only update centos-7/Dockerfile +# or: ./generate.sh fedora-newversion +# to create a new folder and a Dockerfile within it + +cd "$(dirname "$(readlink -f "$BASH_SOURCE")")" + +versions=( "$@" ) +if [ ${#versions[@]} -eq 0 ]; then + versions=( */ ) +fi +versions=( "${versions[@]%/}" ) + +for version in "${versions[@]}"; do + distro="${version%-*}" + suite="${version##*-}" + from="${distro}:${suite}" + installer=yum + + if [[ "$distro" == "fedora" ]]; then + installer=dnf + fi + + mkdir -p "$version" + + case "$from" in + centos:*) + # get "Development Tools" packages dependencies + image="multiarch/centos:7.2.1511-armhfp-clean" + ;; + *) + image="${from}" + ;; + esac + + echo "$version -> FROM $image" + cat > "$version/Dockerfile" <<-EOF + # + # THIS FILE IS AUTOGENERATED; SEE "contrib/builder/rpm/armhf/generate.sh"! + # + + FROM $image + EOF + + echo >> "$version/Dockerfile" + + extraBuildTags= + runcBuildTags= + + case "$from" in + fedora:*) + echo "RUN ${installer} -y upgrade" >> "$version/Dockerfile" + ;; + *) ;; + esac + + case "$from" in + centos:*) + # get "Development Tools" packages dependencies + + echo 'RUN yum install -y yum-plugin-ovl' >> "$version/Dockerfile" + + echo 'RUN yum groupinstall --skip-broken -y "Development Tools"' >> "$version/Dockerfile" + + if [[ "$version" == "centos-7" ]]; then + echo 'RUN yum -y swap -- remove systemd-container systemd-container-libs -- install systemd systemd-libs' >> "$version/Dockerfile" + fi + ;; + *) + echo "RUN ${installer} install -y @development-tools fedora-packager" >> "$version/Dockerfile" + ;; + esac + + packages=( + btrfs-progs-devel # for "btrfs/ioctl.h" (and "version.h" if possible) + device-mapper-devel # for "libdevmapper.h" + glibc-static + libseccomp-devel # for "seccomp.h" & "libseccomp.so" + libselinux-devel # for "libselinux.so" + pkgconfig # for the pkg-config command + selinux-policy + selinux-policy-devel + sqlite-devel # for "sqlite3.h" + systemd-devel # for "sd-journal.h" and libraries + tar # older versions of dev-tools do not have tar + git # required for containerd and runc clone + cmake # tini build + vim-common # tini build + ) + + extraBuildTags+=' seccomp' + runcBuildTags="seccomp selinux" + + echo "RUN ${installer} install -y ${packages[*]}" >> "$version/Dockerfile" + + echo >> "$version/Dockerfile" + + awk '$1 == "ENV" && $2 == "GO_VERSION" { print; exit }' ../../../../Dockerfile.armhf >> "$version/Dockerfile" + echo 'RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-armv6l.tar.gz" | tar xzC /usr/local' >> "$version/Dockerfile" + echo 'ENV PATH $PATH:/usr/local/go/bin' >> "$version/Dockerfile" + + echo >> "$version/Dockerfile" + + echo 'ENV AUTO_GOPATH 1' >> "$version/Dockerfile" + + echo >> "$version/Dockerfile" + + # print build tags in alphabetical order + buildTags=$( echo "selinux $extraBuildTags" | xargs -n1 | sort -n | tr '\n' ' ' | sed -e 's/[[:space:]]*$//' ) + + echo "ENV DOCKER_BUILDTAGS $buildTags" >> "$version/Dockerfile" + echo "ENV RUNC_BUILDTAGS $runcBuildTags" >> "$version/Dockerfile" + echo >> "$version/Dockerfile" +done diff --git a/vendor/github.com/moby/moby/contrib/builder/rpm/ppc64le/build.sh b/vendor/github.com/moby/moby/contrib/builder/rpm/ppc64le/build.sh new file mode 100755 index 000000000..1e3565a34 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/builder/rpm/ppc64le/build.sh @@ -0,0 +1,10 @@ +#!/usr/bin/env bash +set -e + +cd "$(dirname "$(readlink -f "$BASH_SOURCE")")" + +set -x +./generate.sh +for d in */; do + docker build -t "dockercore/builder-rpm:$(basename "$d")" "$d" +done diff --git a/vendor/github.com/moby/moby/contrib/builder/rpm/ppc64le/centos-7/Dockerfile b/vendor/github.com/moby/moby/contrib/builder/rpm/ppc64le/centos-7/Dockerfile new file mode 100644 index 000000000..95da27856 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/builder/rpm/ppc64le/centos-7/Dockerfile @@ -0,0 +1,19 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/rpm/ppc64le/generate.sh"! +# + +FROM ppc64le/centos:7 + +RUN yum groupinstall -y "Development Tools" +RUN yum -y swap -- remove systemd-container systemd-container-libs -- install systemd systemd-libs +RUN yum install -y btrfs-progs-devel device-mapper-devel glibc-static libseccomp-devel libselinux-devel libtool-ltdl-devel pkgconfig selinux-policy selinux-policy-devel sqlite-devel systemd-devel tar git cmake vim-common + +ENV GO_VERSION 1.8.3 +RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-ppc64le.tar.gz" | tar xzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS seccomp selinux +ENV RUNC_BUILDTAGS seccomp selinux + diff --git a/vendor/github.com/moby/moby/contrib/builder/rpm/ppc64le/fedora-24/Dockerfile b/vendor/github.com/moby/moby/contrib/builder/rpm/ppc64le/fedora-24/Dockerfile new file mode 100644 index 000000000..70a9f5daf --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/builder/rpm/ppc64le/fedora-24/Dockerfile @@ -0,0 +1,19 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/rpm/ppc64le/generate.sh"! +# + +FROM ppc64le/fedora:24 + +RUN dnf -y upgrade +RUN dnf install -y @development-tools fedora-packager +RUN dnf install -y btrfs-progs-devel device-mapper-devel glibc-static libseccomp-devel libselinux-devel libtool-ltdl-devel pkgconfig selinux-policy selinux-policy-devel systemd-devel tar git cmake + +ENV GO_VERSION 1.8.3 +RUN curl -fsSL "https://golang.org/dl/go${GO_VERSION}.linux-ppc64le.tar.gz" | tar xzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS seccomp selinux +ENV RUNC_BUILDTAGS seccomp selinux + diff --git a/vendor/github.com/moby/moby/contrib/builder/rpm/ppc64le/generate.sh b/vendor/github.com/moby/moby/contrib/builder/rpm/ppc64le/generate.sh new file mode 100755 index 000000000..c89f5a341 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/builder/rpm/ppc64le/generate.sh @@ -0,0 +1,139 @@ +#!/usr/bin/env bash +set -e + +# usage: ./generate.sh [versions] +# ie: ./generate.sh +# to update all Dockerfiles in this directory +# or: ./generate.sh centos-7 +# to only update centos-7/Dockerfile +# or: ./generate.sh fedora-newversion +# to create a new folder and a Dockerfile within it + +cd "$(dirname "$(readlink -f "$BASH_SOURCE")")" + +versions=( "$@" ) +if [ ${#versions[@]} -eq 0 ]; then + versions=( */ ) +fi +versions=( "${versions[@]%/}" ) + +for version in "${versions[@]}"; do + distro="${version%-*}" + suite="${version##*-}" + from="ppc64le/${distro}:${suite}" + installer=yum + + if [[ "$distro" == "fedora" ]]; then + installer=dnf + fi + + mkdir -p "$version" + echo "$version -> FROM $from" + cat > "$version/Dockerfile" <<-EOF + # + # THIS FILE IS AUTOGENERATED; SEE "contrib/builder/rpm/ppc64le/generate.sh"! + # + + FROM $from + EOF + + echo >> "$version/Dockerfile" + + extraBuildTags= + runcBuildTags= + + case "$from" in + ppc64le/fedora:*) + echo "RUN ${installer} -y upgrade" >> "$version/Dockerfile" + ;; + *) ;; + esac + + case "$from" in + ppc64le/centos:*) + # get "Development Tools" packages dependencies + echo 'RUN yum groupinstall -y "Development Tools"' >> "$version/Dockerfile" + + if [[ "$version" == "centos-7" ]]; then + echo 'RUN yum -y swap -- remove systemd-container systemd-container-libs -- install systemd systemd-libs' >> "$version/Dockerfile" + fi + ;; + ppc64le/opensuse:*) + # Add the ppc64le repo (hopefully the image is updated soon) + # get rpm-build and curl packages and dependencies + echo "RUN zypper addrepo -n ppc64le-oss -f https://download.opensuse.org/ports/ppc/distribution/leap/${suite}/repo/oss/ ppc64le-oss" >> "$version/Dockerfile" + echo "RUN zypper addrepo -n ppc64le-updates -f https://download.opensuse.org/ports/update/${suite}/ ppc64le-updates" >> "$version/Dockerfile" + echo 'RUN zypper --non-interactive install ca-certificates* curl gzip rpm-build' >> "$version/Dockerfile" + ;; + *) + echo "RUN ${installer} install -y @development-tools fedora-packager" >> "$version/Dockerfile" + ;; + esac + + packages=( + btrfs-progs-devel # for "btrfs/ioctl.h" (and "version.h" if possible) + device-mapper-devel # for "libdevmapper.h" + glibc-static + libseccomp-devel # for "seccomp.h" & "libseccomp.so" + libselinux-devel # for "libselinux.so" + pkgconfig # for the pkg-config command + selinux-policy + selinux-policy-devel + sqlite-devel # for "sqlite3.h" + systemd-devel # for "sd-journal.h" and libraries + tar # older versions of dev-tools do not have tar + git # required for containerd and runc clone + cmake # tini build + vim-common # tini build + ) + + # opensuse does not have the right libseccomp libs + case "$from" in + ppc64le/opensuse:*) + packages=( "${packages[@]/libseccomp-devel}" ) + runcBuildTags="selinux" + ;; + *) + extraBuildTags+=' seccomp' + runcBuildTags="seccomp selinux" + ;; + esac + + case "$from" in + ppc64le/opensuse:*) + packages=( "${packages[@]/btrfs-progs-devel/libbtrfs-devel}" ) + packages=( "${packages[@]/pkgconfig/pkg-config}" ) + packages=( "${packages[@]/vim-common/vim}" ) + if [[ "$from" == "ppc64le/opensuse:13."* ]]; then + packages+=( systemd-rpm-macros ) + fi + + # use zypper + echo "RUN zypper --non-interactive install ${packages[*]}" >> "$version/Dockerfile" + ;; + *) + echo "RUN ${installer} install -y ${packages[*]}" >> "$version/Dockerfile" + ;; + esac + + echo >> "$version/Dockerfile" + + + awk '$1 == "ENV" && $2 == "GO_VERSION" { print; exit }' ../../../../Dockerfile.ppc64le >> "$version/Dockerfile" + echo 'RUN curl -fsSL "https://golang.org/dl/go${GO_VERSION}.linux-ppc64le.tar.gz" | tar xzC /usr/local' >> "$version/Dockerfile" + echo 'ENV PATH $PATH:/usr/local/go/bin' >> "$version/Dockerfile" + + echo >> "$version/Dockerfile" + + echo 'ENV AUTO_GOPATH 1' >> "$version/Dockerfile" + + echo >> "$version/Dockerfile" + + # print build tags in alphabetical order + buildTags=$( echo "selinux $extraBuildTags" | xargs -n1 | sort -n | tr '\n' ' ' | sed -e 's/[[:space:]]*$//' ) + + echo "ENV DOCKER_BUILDTAGS $buildTags" >> "$version/Dockerfile" + echo "ENV RUNC_BUILDTAGS $runcBuildTags" >> "$version/Dockerfile" + echo >> "$version/Dockerfile" + +done diff --git a/vendor/github.com/moby/moby/contrib/builder/rpm/ppc64le/opensuse-42.1/Dockerfile b/vendor/github.com/moby/moby/contrib/builder/rpm/ppc64le/opensuse-42.1/Dockerfile new file mode 100644 index 000000000..d11caa4cb --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/builder/rpm/ppc64le/opensuse-42.1/Dockerfile @@ -0,0 +1,20 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/rpm/ppc64le/generate.sh"! +# + +FROM ppc64le/opensuse:42.1 + +RUN zypper addrepo -n ppc64le-oss -f https://download.opensuse.org/ports/ppc/distribution/leap/42.1/repo/oss/ ppc64le-oss +RUN zypper addrepo -n ppc64le-updates -f https://download.opensuse.org/ports/update/42.1/ ppc64le-updates +RUN zypper --non-interactive install ca-certificates* curl gzip rpm-build +RUN zypper --non-interactive install libbtrfs-devel device-mapper-devel glibc-static libselinux-devel libtool-ltdl-devel pkg-config selinux-policy selinux-policy-devel sqlite-devel systemd-devel tar git cmake vim + +ENV GO_VERSION 1.8.3 +RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-ppc64le.tar.gz" | tar xzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS selinux +ENV RUNC_BUILDTAGS selinux + diff --git a/vendor/github.com/moby/moby/contrib/builder/rpm/s390x/build.sh b/vendor/github.com/moby/moby/contrib/builder/rpm/s390x/build.sh new file mode 100755 index 000000000..1e3565a34 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/builder/rpm/s390x/build.sh @@ -0,0 +1,10 @@ +#!/usr/bin/env bash +set -e + +cd "$(dirname "$(readlink -f "$BASH_SOURCE")")" + +set -x +./generate.sh +for d in */; do + docker build -t "dockercore/builder-rpm:$(basename "$d")" "$d" +done diff --git a/vendor/github.com/moby/moby/contrib/builder/rpm/s390x/clefos-base-s390x-7/Dockerfile b/vendor/github.com/moby/moby/contrib/builder/rpm/s390x/clefos-base-s390x-7/Dockerfile new file mode 100644 index 000000000..1310b2154 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/builder/rpm/s390x/clefos-base-s390x-7/Dockerfile @@ -0,0 +1,19 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/rpm/s390x/generate.sh"! +# + +FROM sinenomine/clefos-base-s390x + + +RUN touch /var/lib/rpm/* && yum groupinstall -y "Development Tools" +RUN touch /var/lib/rpm/* && yum install -y btrfs-progs-devel device-mapper-devel glibc-static libseccomp-devel libselinux-devel libtool-ltdl-devel pkgconfig selinux-policy selinux-policy-devel sqlite-devel systemd-devel tar git cmake vim-common + +ENV GO_VERSION 1.8.3 +RUN curl -fsSL "https://golang.org/dl/go${GO_VERSION}.linux-s390x.tar.gz" | tar xzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS seccomp selinux +ENV RUNC_BUILDTAGS seccomp selinux +RUN ln -s /usr/bin/gcc /usr/bin/s390x-linux-gnu-gcc diff --git a/vendor/github.com/moby/moby/contrib/builder/rpm/s390x/generate.sh b/vendor/github.com/moby/moby/contrib/builder/rpm/s390x/generate.sh new file mode 100755 index 000000000..81246c5dd --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/builder/rpm/s390x/generate.sh @@ -0,0 +1,144 @@ +#!/usr/bin/env bash +set -e + +# This file is used to auto-generate Dockerfiles for making rpms via 'make rpm' +# +# usage: ./generate.sh [versions] +# ie: ./generate.sh +# to update all Dockerfiles in this directory +# or: ./generate.sh centos-7 +# to only update centos-7/Dockerfile +# or: ./generate.sh fedora-newversion +# to create a new folder and a Dockerfile within it + +cd "$(dirname "$(readlink -f "$BASH_SOURCE")")" + +versions=( "$@" ) +if [ ${#versions[@]} -eq 0 ]; then + versions=( */ ) +fi +versions=( "${versions[@]%/}" ) + +for version in "${versions[@]}"; do + echo "${versions[@]}" + distro="${version%-*}" + suite="${version##*-}" + case "$distro" in + *opensuse*) + from="opensuse/s390x:tumbleweed" + ;; + *clefos*) + from="sinenomine/${distro}" + ;; + *) + echo No appropriate or supported image available. + exit 1 + ;; + esac + installer=yum + + mkdir -p "$version" + echo "$version -> FROM $from" + cat > "$version/Dockerfile" <<-EOF + # + # THIS FILE IS AUTOGENERATED; SEE "contrib/builder/rpm/s390x/generate.sh"! + # + + FROM $from + + EOF + + echo >> "$version/Dockerfile" + + extraBuildTags='' + runcBuildTags= + + case "$from" in + *clefos*) + # Fix for RHBZ #1213602 + get "Development Tools" packages dependencies + echo 'RUN touch /var/lib/rpm/* && yum groupinstall -y "Development Tools"' >> "$version/Dockerfile" + ;; + *opensuse*) + echo "RUN zypper ar https://download.opensuse.org/ports/zsystems/tumbleweed/repo/oss/ tumbleweed" >> "$version/Dockerfile" + # get rpm-build and curl packages and dependencies + echo 'RUN zypper --non-interactive install ca-certificates* curl gzip rpm-build' >> "$version/Dockerfile" + ;; + *) + echo No appropriate or supported image available. + exit 1 + ;; + esac + + packages=( + btrfs-progs-devel # for "btrfs/ioctl.h" (and "version.h" if possible) + device-mapper-devel # for "libdevmapper.h" + glibc-static + libseccomp-devel # for "seccomp.h" & "libseccomp.so" + libselinux-devel # for "libselinux.so" + pkgconfig # for the pkg-config command + selinux-policy + selinux-policy-devel + sqlite-devel # for "sqlite3.h" + systemd-devel # for "sd-journal.h" and libraries + tar # older versions of dev-tools do not have tar + git # required for containerd and runc clone + cmake # tini build + vim-common # tini build + ) + + case "$from" in + *clefos*) + extraBuildTags+=' seccomp' + runcBuildTags="seccomp selinux" + ;; + *opensuse*) + packages=( "${packages[@]/libseccomp-devel}" ) + runcBuildTags="selinux" + ;; + *) + echo No appropriate or supported image available. + exit 1 + ;; + esac + + case "$from" in + *clefos*) + # Same RHBZ fix needed on all yum lines + echo "RUN touch /var/lib/rpm/* && ${installer} install -y ${packages[*]}" >> "$version/Dockerfile" + ;; + *opensuse*) + packages=( "${packages[@]/btrfs-progs-devel/libbtrfs-devel}" ) + packages=( "${packages[@]/pkgconfig/pkg-config}" ) + packages=( "${packages[@]/vim-common/vim}" ) + + packages+=( systemd-rpm-macros ) # for use of >= opensuse:13.* + + # use zypper + echo "RUN zypper --non-interactive install ${packages[*]}" >> "$version/Dockerfile" + ;; + *) + echo No appropriate or supported image available. + exit 1 + ;; + esac + + echo >> "$version/Dockerfile" + + awk '$1 == "ENV" && $2 == "GO_VERSION" { print; exit }' ../../../../Dockerfile.s390x >> "$version/Dockerfile" + echo 'RUN curl -fsSL "https://golang.org/dl/go${GO_VERSION}.linux-s390x.tar.gz" | tar xzC /usr/local' >> "$version/Dockerfile" + echo 'ENV PATH $PATH:/usr/local/go/bin' >> "$version/Dockerfile" + + echo >> "$version/Dockerfile" + + echo 'ENV AUTO_GOPATH 1' >> "$version/Dockerfile" + + echo >> "$version/Dockerfile" + + # print build tags in alphabetical order + buildTags=$( echo "selinux $extraBuildTags" | xargs -n1 | sort -n | tr '\n' ' ' | sed -e 's/[[:space:]]*$//' ) + + echo "ENV DOCKER_BUILDTAGS $buildTags" >> "$version/Dockerfile" + echo "ENV RUNC_BUILDTAGS $runcBuildTags" >> "$version/Dockerfile" + # TODO: Investigate why "s390x-linux-gnu-gcc" is required + echo "RUN ln -s /usr/bin/gcc /usr/bin/s390x-linux-gnu-gcc" >> "$version/Dockerfile" +done diff --git a/vendor/github.com/moby/moby/contrib/builder/rpm/s390x/opensuse-tumbleweed-1/Dockerfile b/vendor/github.com/moby/moby/contrib/builder/rpm/s390x/opensuse-tumbleweed-1/Dockerfile new file mode 100644 index 000000000..05cad426e --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/builder/rpm/s390x/opensuse-tumbleweed-1/Dockerfile @@ -0,0 +1,20 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/rpm/s390x/generate.sh"! +# + +FROM opensuse/s390x:tumbleweed + + +RUN zypper ar https://download.opensuse.org/ports/zsystems/tumbleweed/repo/oss/ tumbleweed +RUN zypper --non-interactive install ca-certificates* curl gzip rpm-build +RUN zypper --non-interactive install libbtrfs-devel device-mapper-devel glibc-static libselinux-devel libtool-ltdl-devel pkg-config selinux-policy selinux-policy-devel sqlite-devel systemd-devel tar git cmake vim systemd-rpm-macros + +ENV GO_VERSION 1.8.3 +RUN curl -fsSL "https://golang.org/dl/go${GO_VERSION}.linux-s390x.tar.gz" | tar xzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS selinux +ENV RUNC_BUILDTAGS selinux +RUN ln -s /usr/bin/gcc /usr/bin/s390x-linux-gnu-gcc diff --git a/vendor/github.com/moby/moby/contrib/check-config.sh b/vendor/github.com/moby/moby/contrib/check-config.sh new file mode 100755 index 000000000..88eb8aa75 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/check-config.sh @@ -0,0 +1,360 @@ +#!/usr/bin/env bash +set -e + +EXITCODE=0 + +# bits of this were adapted from lxc-checkconfig +# see also https://github.com/lxc/lxc/blob/lxc-1.0.2/src/lxc/lxc-checkconfig.in + +possibleConfigs=( + '/proc/config.gz' + "/boot/config-$(uname -r)" + "/usr/src/linux-$(uname -r)/.config" + '/usr/src/linux/.config' +) + +if [ $# -gt 0 ]; then + CONFIG="$1" +else + : ${CONFIG:="${possibleConfigs[0]}"} +fi + +if ! command -v zgrep &> /dev/null; then + zgrep() { + zcat "$2" | grep "$1" + } +fi + +kernelVersion="$(uname -r)" +kernelMajor="${kernelVersion%%.*}" +kernelMinor="${kernelVersion#$kernelMajor.}" +kernelMinor="${kernelMinor%%.*}" + +is_set() { + zgrep "CONFIG_$1=[y|m]" "$CONFIG" > /dev/null +} +is_set_in_kernel() { + zgrep "CONFIG_$1=y" "$CONFIG" > /dev/null +} +is_set_as_module() { + zgrep "CONFIG_$1=m" "$CONFIG" > /dev/null +} + +color() { + local codes=() + if [ "$1" = 'bold' ]; then + codes=( "${codes[@]}" '1' ) + shift + fi + if [ "$#" -gt 0 ]; then + local code= + case "$1" in + # see https://en.wikipedia.org/wiki/ANSI_escape_code#Colors + black) code=30 ;; + red) code=31 ;; + green) code=32 ;; + yellow) code=33 ;; + blue) code=34 ;; + magenta) code=35 ;; + cyan) code=36 ;; + white) code=37 ;; + esac + if [ "$code" ]; then + codes=( "${codes[@]}" "$code" ) + fi + fi + local IFS=';' + echo -en '\033['"${codes[*]}"'m' +} +wrap_color() { + text="$1" + shift + color "$@" + echo -n "$text" + color reset + echo +} + +wrap_good() { + echo "$(wrap_color "$1" white): $(wrap_color "$2" green)" +} +wrap_bad() { + echo "$(wrap_color "$1" bold): $(wrap_color "$2" bold red)" +} +wrap_warning() { + wrap_color >&2 "$*" red +} + +check_flag() { + if is_set_in_kernel "$1"; then + wrap_good "CONFIG_$1" 'enabled' + elif is_set_as_module "$1"; then + wrap_good "CONFIG_$1" 'enabled (as module)' + else + wrap_bad "CONFIG_$1" 'missing' + EXITCODE=1 + fi +} + +check_flags() { + for flag in "$@"; do + echo -n "- "; check_flag "$flag" + done +} + +check_command() { + if command -v "$1" >/dev/null 2>&1; then + wrap_good "$1 command" 'available' + else + wrap_bad "$1 command" 'missing' + EXITCODE=1 + fi +} + +check_device() { + if [ -c "$1" ]; then + wrap_good "$1" 'present' + else + wrap_bad "$1" 'missing' + EXITCODE=1 + fi +} + +check_distro_userns() { + source /etc/os-release 2>/dev/null || /bin/true + if [[ "${ID}" =~ ^(centos|rhel)$ && "${VERSION_ID}" =~ ^7 ]]; then + # this is a CentOS7 or RHEL7 system + grep -q "user_namespace.enable=1" /proc/cmdline || { + # no user namespace support enabled + wrap_bad " (RHEL7/CentOS7" "User namespaces disabled; add 'user_namespace.enable=1' to boot command line)" + EXITCODE=1 + } + fi +} + +if [ ! -e "$CONFIG" ]; then + wrap_warning "warning: $CONFIG does not exist, searching other paths for kernel config ..." + for tryConfig in "${possibleConfigs[@]}"; do + if [ -e "$tryConfig" ]; then + CONFIG="$tryConfig" + break + fi + done + if [ ! -e "$CONFIG" ]; then + wrap_warning "error: cannot find kernel config" + wrap_warning " try running this script again, specifying the kernel config:" + wrap_warning " CONFIG=/path/to/kernel/.config $0 or $0 /path/to/kernel/.config" + exit 1 + fi +fi + +wrap_color "info: reading kernel config from $CONFIG ..." white +echo + +echo 'Generally Necessary:' + +echo -n '- ' +cgroupSubsystemDir="$(awk '/[, ](cpu|cpuacct|cpuset|devices|freezer|memory)[, ]/ && $3 == "cgroup" { print $2 }' /proc/mounts | head -n1)" +cgroupDir="$(dirname "$cgroupSubsystemDir")" +if [ -d "$cgroupDir/cpu" -o -d "$cgroupDir/cpuacct" -o -d "$cgroupDir/cpuset" -o -d "$cgroupDir/devices" -o -d "$cgroupDir/freezer" -o -d "$cgroupDir/memory" ]; then + echo "$(wrap_good 'cgroup hierarchy' 'properly mounted') [$cgroupDir]" +else + if [ "$cgroupSubsystemDir" ]; then + echo "$(wrap_bad 'cgroup hierarchy' 'single mountpoint!') [$cgroupSubsystemDir]" + else + echo "$(wrap_bad 'cgroup hierarchy' 'nonexistent??')" + fi + EXITCODE=1 + echo " $(wrap_color '(see https://github.com/tianon/cgroupfs-mount)' yellow)" +fi + +if [ "$(cat /sys/module/apparmor/parameters/enabled 2>/dev/null)" = 'Y' ]; then + echo -n '- ' + if command -v apparmor_parser &> /dev/null; then + echo "$(wrap_good 'apparmor' 'enabled and tools installed')" + else + echo "$(wrap_bad 'apparmor' 'enabled, but apparmor_parser missing')" + echo -n ' ' + if command -v apt-get &> /dev/null; then + echo "$(wrap_color '(use "apt-get install apparmor" to fix this)')" + elif command -v yum &> /dev/null; then + echo "$(wrap_color '(your best bet is "yum install apparmor-parser")')" + else + echo "$(wrap_color '(look for an "apparmor" package for your distribution)')" + fi + EXITCODE=1 + fi +fi + +flags=( + NAMESPACES {NET,PID,IPC,UTS}_NS + CGROUPS CGROUP_CPUACCT CGROUP_DEVICE CGROUP_FREEZER CGROUP_SCHED CPUSETS MEMCG + KEYS + VETH BRIDGE BRIDGE_NETFILTER + NF_NAT_IPV4 IP_NF_FILTER IP_NF_TARGET_MASQUERADE + NETFILTER_XT_MATCH_{ADDRTYPE,CONNTRACK,IPVS} + IP_NF_NAT NF_NAT NF_NAT_NEEDED + + # required for bind-mounting /dev/mqueue into containers + POSIX_MQUEUE +) +check_flags "${flags[@]}" +if [ "$kernelMajor" -lt 4 ] || [ "$kernelMajor" -eq 4 -a "$kernelMinor" -lt 8 ]; then + check_flags DEVPTS_MULTIPLE_INSTANCES +fi + +echo + +echo 'Optional Features:' +{ + check_flags USER_NS + check_distro_userns +} +{ + check_flags SECCOMP +} +{ + check_flags CGROUP_PIDS +} +{ + CODE=${EXITCODE} + check_flags MEMCG_SWAP MEMCG_SWAP_ENABLED + if [ -e /sys/fs/cgroup/memory/memory.memsw.limit_in_bytes ]; then + echo " $(wrap_color '(cgroup swap accounting is currently enabled)' bold black)" + EXITCODE=${CODE} + elif is_set MEMCG_SWAP && ! is_set MEMCG_SWAP_ENABLED; then + echo " $(wrap_color '(cgroup swap accounting is currently not enabled, you can enable it by setting boot option "swapaccount=1")' bold black)" + fi +} +{ + if is_set LEGACY_VSYSCALL_NATIVE; then + echo -n "- "; wrap_bad "CONFIG_LEGACY_VSYSCALL_NATIVE" 'enabled' + echo " $(wrap_color '(dangerous, provides an ASLR-bypassing target with usable ROP gadgets.)' bold black)" + elif is_set LEGACY_VSYSCALL_EMULATE; then + echo -n "- "; wrap_good "CONFIG_LEGACY_VSYSCALL_EMULATE" 'enabled' + elif is_set LEGACY_VSYSCALL_NONE; then + echo -n "- "; wrap_bad "CONFIG_LEGACY_VSYSCALL_NONE" 'enabled' + echo " $(wrap_color '(containers using eglibc <= 2.13 will not work. Switch to' bold black)" + echo " $(wrap_color ' "CONFIG_VSYSCALL_[NATIVE|EMULATE]" or use "vsyscall=[native|emulate]"' bold black)" + echo " $(wrap_color ' on kernel command line. Note that this will disable ASLR for the,' bold black)" + echo " $(wrap_color ' VDSO which may assist in exploiting security vulnerabilities.)' bold black)" + # else Older kernels (prior to 3dc33bd30f3e, released in v4.40-rc1) do + # not have these LEGACY_VSYSCALL options and are effectively + # LEGACY_VSYSCALL_EMULATE. Even older kernels are presumably + # effectively LEGACY_VSYSCALL_NATIVE. + fi +} + +if [ "$kernelMajor" -lt 4 ] || [ "$kernelMajor" -eq 4 -a "$kernelMinor" -le 5 ]; then + check_flags MEMCG_KMEM +fi + +if [ "$kernelMajor" -lt 3 ] || [ "$kernelMajor" -eq 3 -a "$kernelMinor" -le 18 ]; then + check_flags RESOURCE_COUNTERS +fi + +if [ "$kernelMajor" -lt 3 ] || [ "$kernelMajor" -eq 3 -a "$kernelMinor" -le 13 ]; then + netprio=NETPRIO_CGROUP +else + netprio=CGROUP_NET_PRIO +fi + +flags=( + BLK_CGROUP BLK_DEV_THROTTLING IOSCHED_CFQ CFQ_GROUP_IOSCHED + CGROUP_PERF + CGROUP_HUGETLB + NET_CLS_CGROUP $netprio + CFS_BANDWIDTH FAIR_GROUP_SCHED RT_GROUP_SCHED + IP_VS + IP_VS_NFCT + IP_VS_RR +) +check_flags "${flags[@]}" + +if ! is_set EXT4_USE_FOR_EXT2; then + check_flags EXT3_FS EXT3_FS_XATTR EXT3_FS_POSIX_ACL EXT3_FS_SECURITY + if ! is_set EXT3_FS || ! is_set EXT3_FS_XATTR || ! is_set EXT3_FS_POSIX_ACL || ! is_set EXT3_FS_SECURITY; then + echo " $(wrap_color '(enable these ext3 configs if you are using ext3 as backing filesystem)' bold black)" + fi +fi + +check_flags EXT4_FS EXT4_FS_POSIX_ACL EXT4_FS_SECURITY +if ! is_set EXT4_FS || ! is_set EXT4_FS_POSIX_ACL || ! is_set EXT4_FS_SECURITY; then + if is_set EXT4_USE_FOR_EXT2; then + echo " $(wrap_color 'enable these ext4 configs if you are using ext3 or ext4 as backing filesystem' bold black)" + else + echo " $(wrap_color 'enable these ext4 configs if you are using ext4 as backing filesystem' bold black)" + fi +fi + +echo '- Network Drivers:' +echo ' - "'$(wrap_color 'overlay' blue)'":' +check_flags VXLAN | sed 's/^/ /' +echo ' Optional (for encrypted networks):' +check_flags CRYPTO CRYPTO_AEAD CRYPTO_GCM CRYPTO_SEQIV CRYPTO_GHASH \ + XFRM XFRM_USER XFRM_ALGO INET_ESP INET_XFRM_MODE_TRANSPORT | sed 's/^/ /' +echo ' - "'$(wrap_color 'ipvlan' blue)'":' +check_flags IPVLAN | sed 's/^/ /' +echo ' - "'$(wrap_color 'macvlan' blue)'":' +check_flags MACVLAN DUMMY | sed 's/^/ /' +echo ' - "'$(wrap_color 'ftp,tftp client in container' blue)'":' +check_flags NF_NAT_FTP NF_CONNTRACK_FTP NF_NAT_TFTP NF_CONNTRACK_TFTP | sed 's/^/ /' + +# only fail if no storage drivers available +CODE=${EXITCODE} +EXITCODE=0 +STORAGE=1 + +echo '- Storage Drivers:' +echo ' - "'$(wrap_color 'aufs' blue)'":' +check_flags AUFS_FS | sed 's/^/ /' +if ! is_set AUFS_FS && grep -q aufs /proc/filesystems; then + echo " $(wrap_color '(note that some kernels include AUFS patches but not the AUFS_FS flag)' bold black)" +fi +[ "$EXITCODE" = 0 ] && STORAGE=0 +EXITCODE=0 + +echo ' - "'$(wrap_color 'btrfs' blue)'":' +check_flags BTRFS_FS | sed 's/^/ /' +check_flags BTRFS_FS_POSIX_ACL | sed 's/^/ /' +[ "$EXITCODE" = 0 ] && STORAGE=0 +EXITCODE=0 + +echo ' - "'$(wrap_color 'devicemapper' blue)'":' +check_flags BLK_DEV_DM DM_THIN_PROVISIONING | sed 's/^/ /' +[ "$EXITCODE" = 0 ] && STORAGE=0 +EXITCODE=0 + +echo ' - "'$(wrap_color 'overlay' blue)'":' +check_flags OVERLAY_FS | sed 's/^/ /' +[ "$EXITCODE" = 0 ] && STORAGE=0 +EXITCODE=0 + +echo ' - "'$(wrap_color 'zfs' blue)'":' +echo -n " - "; check_device /dev/zfs +echo -n " - "; check_command zfs +echo -n " - "; check_command zpool +[ "$EXITCODE" = 0 ] && STORAGE=0 +EXITCODE=0 + +EXITCODE=$CODE +[ "$STORAGE" = 1 ] && EXITCODE=1 + +echo + +check_limit_over() +{ + if [ $(cat "$1") -le "$2" ]; then + wrap_bad "- $1" "$(cat $1)" + wrap_color " This should be set to at least $2, for example set: sysctl -w kernel/keys/root_maxkeys=1000000" bold black + EXITCODE=1 + else + wrap_good "- $1" "$(cat $1)" + fi +} + +echo 'Limits:' +check_limit_over /proc/sys/kernel/keys/root_maxkeys 10000 +echo + +exit $EXITCODE diff --git a/vendor/github.com/moby/moby/contrib/desktop-integration/README.md b/vendor/github.com/moby/moby/contrib/desktop-integration/README.md new file mode 100644 index 000000000..85a01b9ee --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/desktop-integration/README.md @@ -0,0 +1,11 @@ +Desktop Integration +=================== + +The ./contrib/desktop-integration contains examples of typical dockerized +desktop applications. + +Examples +======== + +* Chromium: ./chromium/Dockerfile shows a way to dockerize a common application +* Gparted: ./gparted/Dockerfile shows a way to dockerize a common application w devices diff --git a/vendor/github.com/moby/moby/contrib/desktop-integration/chromium/Dockerfile b/vendor/github.com/moby/moby/contrib/desktop-integration/chromium/Dockerfile new file mode 100644 index 000000000..187281644 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/desktop-integration/chromium/Dockerfile @@ -0,0 +1,36 @@ +# VERSION: 0.1 +# DESCRIPTION: Create chromium container with its dependencies +# AUTHOR: Jessica Frazelle +# COMMENTS: +# This file describes how to build a Chromium container with all +# dependencies installed. It uses native X11 unix socket. +# Tested on Debian Jessie +# USAGE: +# # Download Chromium Dockerfile +# wget http://raw.githubusercontent.com/docker/docker/master/contrib/desktop-integration/chromium/Dockerfile +# +# # Build chromium image +# docker build -t chromium . +# +# # Run stateful data-on-host chromium. For ephemeral, remove -v /data/chromium:/data +# docker run -v /data/chromium:/data -v /tmp/.X11-unix:/tmp/.X11-unix \ +# -e DISPLAY=unix$DISPLAY chromium + +# # To run stateful dockerized data containers +# docker run --volumes-from chromium-data -v /tmp/.X11-unix:/tmp/.X11-unix \ +# -e DISPLAY=unix$DISPLAY chromium + +# Base docker image +FROM debian:jessie +LABEL maintainer Jessica Frazelle + +# Install Chromium +RUN apt-get update && apt-get install -y \ + chromium \ + chromium-l10n \ + libcanberra-gtk-module \ + libexif-dev \ + --no-install-recommends + +# Autorun chromium +CMD ["/usr/bin/chromium", "--no-sandbox", "--user-data-dir=/data"] diff --git a/vendor/github.com/moby/moby/contrib/desktop-integration/gparted/Dockerfile b/vendor/github.com/moby/moby/contrib/desktop-integration/gparted/Dockerfile new file mode 100644 index 000000000..8a9b646ee --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/desktop-integration/gparted/Dockerfile @@ -0,0 +1,31 @@ +# VERSION: 0.1 +# DESCRIPTION: Create gparted container with its dependencies +# AUTHOR: Jessica Frazelle +# COMMENTS: +# This file describes how to build a gparted container with all +# dependencies installed. It uses native X11 unix socket. +# Tested on Debian Jessie +# USAGE: +# # Download gparted Dockerfile +# wget http://raw.githubusercontent.com/docker/docker/master/contrib/desktop-integration/gparted/Dockerfile +# +# # Build gparted image +# docker build -t gparted . +# +# docker run -v /tmp/.X11-unix:/tmp/.X11-unix \ +# --device=/dev/sda:/dev/sda \ +# -e DISPLAY=unix$DISPLAY gparted +# + +# Base docker image +FROM debian:jessie +LABEL maintainer Jessica Frazelle + +# Install Gparted and its dependencies +RUN apt-get update && apt-get install -y \ + gparted \ + libcanberra-gtk-module \ + --no-install-recommends + +# Autorun gparted +CMD ["/usr/sbin/gparted"] diff --git a/vendor/github.com/moby/moby/contrib/docker-device-tool/README.md b/vendor/github.com/moby/moby/contrib/docker-device-tool/README.md new file mode 100644 index 000000000..6c54d5995 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/docker-device-tool/README.md @@ -0,0 +1,14 @@ +Docker device tool for devicemapper storage driver backend +=================== + +The ./contrib/docker-device-tool contains a tool to manipulate devicemapper thin-pool. + +Compile +======== + + $ make shell + ## inside build container + $ go build contrib/docker-device-tool/device_tool.go + + # if devicemapper version is old and compilation fails, compile with `libdm_no_deferred_remove` tag + $ go build -tags libdm_no_deferred_remove contrib/docker-device-tool/device_tool.go diff --git a/vendor/github.com/moby/moby/contrib/docker-device-tool/device_tool.go b/vendor/github.com/moby/moby/contrib/docker-device-tool/device_tool.go new file mode 100644 index 000000000..fc171666f --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/docker-device-tool/device_tool.go @@ -0,0 +1,176 @@ +// +build !windows,!solaris + +package main + +import ( + "flag" + "fmt" + "os" + "path" + "sort" + "strconv" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/daemon/graphdriver/devmapper" + "github.com/docker/docker/pkg/devicemapper" +) + +func usage() { + fmt.Fprintf(os.Stderr, "Usage: %s [status] | [list] | [device id] | [resize new-pool-size] | [snap new-id base-id] | [remove id] | [mount id mountpoint]\n", os.Args[0]) + flag.PrintDefaults() + os.Exit(1) +} + +func byteSizeFromString(arg string) (int64, error) { + digits := "" + rest := "" + last := strings.LastIndexAny(arg, "0123456789") + if last >= 0 { + digits = arg[:last+1] + rest = arg[last+1:] + } + + val, err := strconv.ParseInt(digits, 10, 64) + if err != nil { + return val, err + } + + rest = strings.ToLower(strings.TrimSpace(rest)) + + var multiplier int64 = 1 + switch rest { + case "": + multiplier = 1 + case "k", "kb": + multiplier = 1024 + case "m", "mb": + multiplier = 1024 * 1024 + case "g", "gb": + multiplier = 1024 * 1024 * 1024 + case "t", "tb": + multiplier = 1024 * 1024 * 1024 * 1024 + default: + return 0, fmt.Errorf("Unknown size unit: %s", rest) + } + + return val * multiplier, nil +} + +func main() { + root := flag.String("r", "/var/lib/docker", "Docker root dir") + flDebug := flag.Bool("D", false, "Debug mode") + + flag.Parse() + + if *flDebug { + os.Setenv("DEBUG", "1") + logrus.SetLevel(logrus.DebugLevel) + } + + if flag.NArg() < 1 { + usage() + } + + args := flag.Args() + + home := path.Join(*root, "devicemapper") + devices, err := devmapper.NewDeviceSet(home, false, nil, nil, nil) + if err != nil { + fmt.Println("Can't initialize device mapper: ", err) + os.Exit(1) + } + + switch args[0] { + case "status": + status := devices.Status() + fmt.Printf("Pool name: %s\n", status.PoolName) + fmt.Printf("Data Loopback file: %s\n", status.DataLoopback) + fmt.Printf("Metadata Loopback file: %s\n", status.MetadataLoopback) + fmt.Printf("Sector size: %d\n", status.SectorSize) + fmt.Printf("Data use: %d of %d (%.1f %%)\n", status.Data.Used, status.Data.Total, 100.0*float64(status.Data.Used)/float64(status.Data.Total)) + fmt.Printf("Metadata use: %d of %d (%.1f %%)\n", status.Metadata.Used, status.Metadata.Total, 100.0*float64(status.Metadata.Used)/float64(status.Metadata.Total)) + break + case "list": + ids := devices.List() + sort.Strings(ids) + for _, id := range ids { + fmt.Println(id) + } + break + case "device": + if flag.NArg() < 2 { + usage() + } + status, err := devices.GetDeviceStatus(args[1]) + if err != nil { + fmt.Println("Can't get device info: ", err) + os.Exit(1) + } + fmt.Printf("Id: %d\n", status.DeviceID) + fmt.Printf("Size: %d\n", status.Size) + fmt.Printf("Transaction Id: %d\n", status.TransactionID) + fmt.Printf("Size in Sectors: %d\n", status.SizeInSectors) + fmt.Printf("Mapped Sectors: %d\n", status.MappedSectors) + fmt.Printf("Highest Mapped Sector: %d\n", status.HighestMappedSector) + break + case "resize": + if flag.NArg() < 2 { + usage() + } + + size, err := byteSizeFromString(args[1]) + if err != nil { + fmt.Println("Invalid size: ", err) + os.Exit(1) + } + + err = devices.ResizePool(size) + if err != nil { + fmt.Println("Error resizing pool: ", err) + os.Exit(1) + } + + break + case "snap": + if flag.NArg() < 3 { + usage() + } + + err := devices.AddDevice(args[1], args[2], nil) + if err != nil { + fmt.Println("Can't create snap device: ", err) + os.Exit(1) + } + break + case "remove": + if flag.NArg() < 2 { + usage() + } + + err := devicemapper.RemoveDevice(args[1]) + if err != nil { + fmt.Println("Can't remove device: ", err) + os.Exit(1) + } + break + case "mount": + if flag.NArg() < 3 { + usage() + } + + err := devices.MountDevice(args[1], args[2], "") + if err != nil { + fmt.Println("Can't mount device: ", err) + os.Exit(1) + } + break + default: + fmt.Printf("Unknown command %s\n", args[0]) + usage() + + os.Exit(1) + } + + return +} diff --git a/vendor/github.com/moby/moby/contrib/docker-device-tool/device_tool_windows.go b/vendor/github.com/moby/moby/contrib/docker-device-tool/device_tool_windows.go new file mode 100644 index 000000000..da29a2cad --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/docker-device-tool/device_tool_windows.go @@ -0,0 +1,4 @@ +package main + +func main() { +} diff --git a/vendor/github.com/moby/moby/contrib/docker-machine-install-bundle.sh b/vendor/github.com/moby/moby/contrib/docker-machine-install-bundle.sh new file mode 100755 index 000000000..860598943 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/docker-machine-install-bundle.sh @@ -0,0 +1,111 @@ +#!/usr/bin/env bash +# +# This script installs the bundle to Docker Machine instances, for the purpose +# of testing the latest Docker with Swarm mode enabled. +# Do not use in production. +# +# Requirements (on host to run this script) +# - bash is installed +# - Docker Machine is installed +# - GNU tar is installed +# +# Requirements (on Docker machine instances) +# - Docker can be managed via one of `systemctl`, `service`, or `/etc/init.d/docker` +# +set -e +set -o pipefail + +errexit() { + echo "$1" + exit 1 +} + +BUNDLE="bundles/$(cat VERSION)" + +bundle_files(){ + # prefer dynbinary if exists + for f in dockerd docker-proxy; do + if [ -d $BUNDLE/dynbinary-daemon ]; then + echo $BUNDLE/dynbinary-daemon/$f + else + echo $BUNDLE/binary-daemon/$f + fi + done + for f in docker-containerd docker-containerd-ctr docker-containerd-shim docker-init docker-runc; do + echo $BUNDLE/binary-daemon/$f + done + if [ -d $BUNDLE/dynbinary-client ]; then + echo $BUNDLE/dynbinary-client/docker + else + echo $BUNDLE/binary-client/docker + fi +} + +control_docker(){ + m=$1; op=$2 + # NOTE: `docker-machine ssh $m sh -c "foo bar"` does not work + # (but `docker-machine ssh $m sh -c "foo\ bar"` works) + # Anyway we avoid using `sh -c` here for avoiding confusion + cat < /dev/null; then + systemctl $op docker +elif command -v service > /dev/null; then + service docker $op +elif [ -x /etc/init.d/docker ]; then + /etc/init.d/docker $op +else + echo "not sure how to control the docker daemon" + exit 1 +fi +EOF +} + +detect_prefix(){ + m=$1 + script='dirname $(dirname $(which dockerd))' + echo $script | docker-machine ssh $m sh +} + +install_to(){ + m=$1; shift; files=$@ + echo "$m: detecting docker" + prefix=$(detect_prefix $m) + echo "$m: detected docker on $prefix" + echo "$m: stopping docker" + control_docker $m stop + echo "$m: installing docker" + # NOTE: GNU tar is required because we use --transform here + # TODO: compression (should not be default) + tar ch --transform 's/.*\///' $files | docker-machine ssh $m sudo tar Cx $prefix/bin + echo "$m: starting docker" + control_docker $m start + echo "$m: done" +} + +check_prereq(){ + command -v docker-machine > /dev/null || errexit "docker-machine not installed" + ( tar --version | grep GNU > /dev/null ) || errexit "GNU tar not installed" +} + +case "$1" in + "install") + shift; machines=$@ + check_prereq + files=$(bundle_files) + echo "Files to be installed:" + for f in $files; do echo $f; done + pids=() + for m in $machines; do + install_to $m $files & + pids+=($!) + done + status=0 + for pid in ${pids[@]}; do + wait $pid || { status=$?; echo "background process $pid failed with exit status $status"; } + done + exit $status + ;; + *) + errexit "Usage: $0 install MACHINES" + ;; +esac diff --git a/vendor/github.com/moby/moby/contrib/dockerize-disk.sh b/vendor/github.com/moby/moby/contrib/dockerize-disk.sh new file mode 100755 index 000000000..444e243ab --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/dockerize-disk.sh @@ -0,0 +1,118 @@ +#!/usr/bin/env bash +set -e + +if ! command -v qemu-nbd &> /dev/null; then + echo >&2 'error: "qemu-nbd" not found!' + exit 1 +fi + +usage() { + echo "Convert disk image to docker image" + echo "" + echo "usage: $0 image-name disk-image-file [ base-image ]" + echo " ie: $0 cirros:0.3.3 cirros-0.3.3-x86_64-disk.img" + echo " $0 ubuntu:cloud ubuntu-14.04-server-cloudimg-amd64-disk1.img ubuntu:14.04" +} + +if [ "$#" -lt 2 ]; then + usage + exit 1 +fi + +CURDIR=$(pwd) + +image_name="${1%:*}" +image_tag="${1#*:}" +if [ "$image_tag" == "$1" ]; then + image_tag="latest" +fi + +disk_image_file="$2" +docker_base_image="$3" + +block_device=/dev/nbd0 + +builddir=$(mktemp -d) + +cleanup() { + umount "$builddir/disk_image" || true + umount "$builddir/workdir" || true + qemu-nbd -d $block_device &> /dev/null || true + rm -rf $builddir +} +trap cleanup EXIT + +# Mount disk image +modprobe nbd max_part=63 +qemu-nbd -rc ${block_device} -P 1 "$disk_image_file" +mkdir "$builddir/disk_image" +mount -o ro ${block_device} "$builddir/disk_image" + +mkdir "$builddir/workdir" +mkdir "$builddir/diff" + +base_image_mounts="" + +# Unpack base image +if [ -n "$docker_base_image" ]; then + mkdir -p "$builddir/base" + docker pull "$docker_base_image" + docker save "$docker_base_image" | tar -xC "$builddir/base" + + image_id=$(docker inspect -f "{{.Id}}" "$docker_base_image") + while [ -n "$image_id" ]; do + mkdir -p "$builddir/base/$image_id/layer" + tar -xf "$builddir/base/$image_id/layer.tar" -C "$builddir/base/$image_id/layer" + + base_image_mounts="${base_image_mounts}:$builddir/base/$image_id/layer=ro+wh" + image_id=$(docker inspect -f "{{.Parent}}" "$image_id") + done +fi + +# Mount work directory +mount -t aufs -o "br=$builddir/diff=rw${base_image_mounts},dio,xino=/dev/shm/aufs.xino" none "$builddir/workdir" + +# Update files +cd $builddir +LC_ALL=C diff -rq disk_image workdir \ + | sed -re "s|Only in workdir(.*?): |DEL \1/|g;s|Only in disk_image(.*?): |ADD \1/|g;s|Files disk_image/(.+) and workdir/(.+) differ|UPDATE /\1|g" \ + | while read action entry; do + case "$action" in + ADD|UPDATE) + cp -a "disk_image$entry" "workdir$entry" + ;; + DEL) + rm -rf "workdir$entry" + ;; + *) + echo "Error: unknown diff line: $action $entry" >&2 + ;; + esac + done + +# Pack new image +new_image_id="$(for i in $(seq 1 32); do printf "%02x" $(($RANDOM % 256)); done)" +mkdir -p $builddir/result/$new_image_id +cd diff +tar -cf $builddir/result/$new_image_id/layer.tar * +echo "1.0" > $builddir/result/$new_image_id/VERSION +cat > $builddir/result/$new_image_id/json <<-EOS +{ "docker_version": "1.4.1" +, "id": "$new_image_id" +, "created": "$(date -u +%Y-%m-%dT%H:%M:%S.%NZ)" +EOS + +if [ -n "$docker_base_image" ]; then + image_id=$(docker inspect -f "{{.Id}}" "$docker_base_image") + echo ", \"parent\": \"$image_id\"" >> $builddir/result/$new_image_id/json +fi + +echo "}" >> $builddir/result/$new_image_id/json + +echo "{\"$image_name\":{\"$image_tag\":\"$new_image_id\"}}" > $builddir/result/repositories + +cd $builddir/result + +# mkdir -p $CURDIR/$image_name +# cp -r * $CURDIR/$image_name +tar -c * | docker load diff --git a/vendor/github.com/moby/moby/contrib/download-frozen-image-v1.sh b/vendor/github.com/moby/moby/contrib/download-frozen-image-v1.sh new file mode 100755 index 000000000..77c91d1f1 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/download-frozen-image-v1.sh @@ -0,0 +1,108 @@ +#!/usr/bin/env bash +set -e + +# hello-world latest ef872312fe1b 3 months ago 910 B +# hello-world latest ef872312fe1bbc5e05aae626791a47ee9b032efa8f3bda39cc0be7b56bfe59b9 3 months ago 910 B + +# debian latest f6fab3b798be 10 weeks ago 85.1 MB +# debian latest f6fab3b798be3174f45aa1eb731f8182705555f89c9026d8c1ef230cbf8301dd 10 weeks ago 85.1 MB + +if ! command -v curl &> /dev/null; then + echo >&2 'error: "curl" not found!' + exit 1 +fi + +usage() { + echo "usage: $0 dir image[:tag][@image-id] ..." + echo " ie: $0 /tmp/hello-world hello-world" + echo " $0 /tmp/debian-jessie debian:jessie" + echo " $0 /tmp/old-hello-world hello-world@ef872312fe1bbc5e05aae626791a47ee9b032efa8f3bda39cc0be7b56bfe59b9" + echo " $0 /tmp/old-debian debian:latest@f6fab3b798be3174f45aa1eb731f8182705555f89c9026d8c1ef230cbf8301dd" + [ -z "$1" ] || exit "$1" +} + +dir="$1" # dir for building tar in +shift || usage 1 >&2 + +[ $# -gt 0 -a "$dir" ] || usage 2 >&2 +mkdir -p "$dir" + +# hacky workarounds for Bash 3 support (no associative arrays) +images=() +rm -f "$dir"/tags-*.tmp +# repositories[busybox]='"latest": "...", "ubuntu-14.04": "..."' + +while [ $# -gt 0 ]; do + imageTag="$1" + shift + image="${imageTag%%[:@]*}" + tag="${imageTag#*:}" + imageId="${tag##*@}" + [ "$imageId" != "$tag" ] || imageId= + [ "$tag" != "$imageTag" ] || tag='latest' + tag="${tag%@*}" + + imageFile="${image//\//_}" # "/" can't be in filenames :) + + token="$(curl -sSL -o /dev/null -D- -H 'X-Docker-Token: true' "https://index.docker.io/v1/repositories/$image/images" | tr -d '\r' | awk -F ': *' '$1 == "X-Docker-Token" { print $2 }')" + + if [ -z "$imageId" ]; then + imageId="$(curl -sSL -H "Authorization: Token $token" "https://registry-1.docker.io/v1/repositories/$image/tags/$tag")" + imageId="${imageId//\"/}" + fi + + ancestryJson="$(curl -sSL -H "Authorization: Token $token" "https://registry-1.docker.io/v1/images/$imageId/ancestry")" + if [ "${ancestryJson:0:1}" != '[' ]; then + echo >&2 "error: /v1/images/$imageId/ancestry returned something unexpected:" + echo >&2 " $ancestryJson" + exit 1 + fi + + IFS=',' + ancestry=( ${ancestryJson//[\[\] \"]/} ) + unset IFS + + if [ -s "$dir/tags-$imageFile.tmp" ]; then + echo -n ', ' >> "$dir/tags-$imageFile.tmp" + else + images=( "${images[@]}" "$image" ) + fi + echo -n '"'"$tag"'": "'"$imageId"'"' >> "$dir/tags-$imageFile.tmp" + + echo "Downloading '$imageTag' (${#ancestry[@]} layers)..." + for imageId in "${ancestry[@]}"; do + mkdir -p "$dir/$imageId" + echo '1.0' > "$dir/$imageId/VERSION" + + curl -sSL -H "Authorization: Token $token" "https://registry-1.docker.io/v1/images/$imageId/json" -o "$dir/$imageId/json" + + # TODO figure out why "-C -" doesn't work here + # "curl: (33) HTTP server doesn't seem to support byte ranges. Cannot resume." + # "HTTP/1.1 416 Requested Range Not Satisfiable" + if [ -f "$dir/$imageId/layer.tar" ]; then + # TODO hackpatch for no -C support :'( + echo "skipping existing ${imageId:0:12}" + continue + fi + curl -SL --progress -H "Authorization: Token $token" "https://registry-1.docker.io/v1/images/$imageId/layer" -o "$dir/$imageId/layer.tar" # -C - + done + echo +done + +echo -n '{' > "$dir/repositories" +firstImage=1 +for image in "${images[@]}"; do + imageFile="${image//\//_}" # "/" can't be in filenames :) + + [ "$firstImage" ] || echo -n ',' >> "$dir/repositories" + firstImage= + echo -n $'\n\t' >> "$dir/repositories" + echo -n '"'"$image"'": { '"$(cat "$dir/tags-$imageFile.tmp")"' }' >> "$dir/repositories" +done +echo -n $'\n}\n' >> "$dir/repositories" + +rm -f "$dir"/tags-*.tmp + +echo "Download of images into '$dir' complete." +echo "Use something like the following to load the result into a Docker daemon:" +echo " tar -cC '$dir' . | docker load" diff --git a/vendor/github.com/moby/moby/contrib/download-frozen-image-v2.sh b/vendor/github.com/moby/moby/contrib/download-frozen-image-v2.sh new file mode 100755 index 000000000..2dfe247b5 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/download-frozen-image-v2.sh @@ -0,0 +1,307 @@ +#!/usr/bin/env bash +set -eo pipefail + +# hello-world latest ef872312fe1b 3 months ago 910 B +# hello-world latest ef872312fe1bbc5e05aae626791a47ee9b032efa8f3bda39cc0be7b56bfe59b9 3 months ago 910 B + +# debian latest f6fab3b798be 10 weeks ago 85.1 MB +# debian latest f6fab3b798be3174f45aa1eb731f8182705555f89c9026d8c1ef230cbf8301dd 10 weeks ago 85.1 MB + +if ! command -v curl &> /dev/null; then + echo >&2 'error: "curl" not found!' + exit 1 +fi +if ! command -v jq &> /dev/null; then + echo >&2 'error: "jq" not found!' + exit 1 +fi + +usage() { + echo "usage: $0 dir image[:tag][@digest] ..." + echo " $0 /tmp/old-hello-world hello-world:latest@sha256:8be990ef2aeb16dbcb9271ddfe2610fa6658d13f6dfb8bc72074cc1ca36966a7" + [ -z "$1" ] || exit "$1" +} + +dir="$1" # dir for building tar in +shift || usage 1 >&2 + +[ $# -gt 0 -a "$dir" ] || usage 2 >&2 +mkdir -p "$dir" + +# hacky workarounds for Bash 3 support (no associative arrays) +images=() +rm -f "$dir"/tags-*.tmp +manifestJsonEntries=() +doNotGenerateManifestJson= +# repositories[busybox]='"latest": "...", "ubuntu-14.04": "..."' + +# bash v4 on Windows CI requires CRLF separator +newlineIFS=$'\n' +if [ "$(go env GOHOSTOS)" = 'windows' ]; then + major=$(echo ${BASH_VERSION%%[^0.9]} | cut -d. -f1) + if [ "$major" -ge 4 ]; then + newlineIFS=$'\r\n' + fi +fi + +registryBase='https://registry-1.docker.io' +authBase='https://auth.docker.io' +authService='registry.docker.io' + +# https://github.com/moby/moby/issues/33700 +fetch_blob() { + local token="$1"; shift + local image="$1"; shift + local digest="$1"; shift + local targetFile="$1"; shift + local curlArgs=( "$@" ) + + local curlHeaders="$( + curl -S "${curlArgs[@]}" \ + -H "Authorization: Bearer $token" \ + "$registryBase/v2/$image/blobs/$digest" \ + -o "$targetFile" \ + -D- + )" + curlHeaders="$(echo "$curlHeaders" | tr -d '\r')" + if echo "$curlHeaders" | grep -qE "^HTTP/[0-9].[0-9] 3"; then + rm -f "$targetFile" + + local blobRedirect="$(echo "$curlHeaders" | awk -F ': ' 'tolower($1) == "location" { print $2; exit }')" + if [ -z "$blobRedirect" ]; then + echo >&2 "error: failed fetching '$image' blob '$digest'" + echo "$curlHeaders" | head -1 >&2 + return 1 + fi + + curl -fSL "${curlArgs[@]}" \ + "$blobRedirect" \ + -o "$targetFile" + fi +} + +while [ $# -gt 0 ]; do + imageTag="$1" + shift + image="${imageTag%%[:@]*}" + imageTag="${imageTag#*:}" + digest="${imageTag##*@}" + tag="${imageTag%%@*}" + + # add prefix library if passed official image + if [[ "$image" != *"/"* ]]; then + image="library/$image" + fi + + imageFile="${image//\//_}" # "/" can't be in filenames :) + + token="$(curl -fsSL "$authBase/token?service=$authService&scope=repository:$image:pull" | jq --raw-output '.token')" + + manifestJson="$( + curl -fsSL \ + -H "Authorization: Bearer $token" \ + -H 'Accept: application/vnd.docker.distribution.manifest.v2+json' \ + -H 'Accept: application/vnd.docker.distribution.manifest.v1+json' \ + "$registryBase/v2/$image/manifests/$digest" + )" + if [ "${manifestJson:0:1}" != '{' ]; then + echo >&2 "error: /v2/$image/manifests/$digest returned something unexpected:" + echo >&2 " $manifestJson" + exit 1 + fi + + imageIdentifier="$image:$tag@$digest" + + schemaVersion="$(echo "$manifestJson" | jq --raw-output '.schemaVersion')" + case "$schemaVersion" in + 2) + mediaType="$(echo "$manifestJson" | jq --raw-output '.mediaType')" + + case "$mediaType" in + application/vnd.docker.distribution.manifest.v2+json) + configDigest="$(echo "$manifestJson" | jq --raw-output '.config.digest')" + imageId="${configDigest#*:}" # strip off "sha256:" + + configFile="$imageId.json" + fetch_blob "$token" "$image" "$configDigest" "$dir/$configFile" -s + + layersFs="$(echo "$manifestJson" | jq --raw-output --compact-output '.layers[]')" + IFS="$newlineIFS" + layers=( $layersFs ) + unset IFS + + echo "Downloading '$imageIdentifier' (${#layers[@]} layers)..." + layerId= + layerFiles=() + for i in "${!layers[@]}"; do + layerMeta="${layers[$i]}" + + layerMediaType="$(echo "$layerMeta" | jq --raw-output '.mediaType')" + layerDigest="$(echo "$layerMeta" | jq --raw-output '.digest')" + + # save the previous layer's ID + parentId="$layerId" + # create a new fake layer ID based on this layer's digest and the previous layer's fake ID + layerId="$(echo "$parentId"$'\n'"$layerDigest" | sha256sum | cut -d' ' -f1)" + # this accounts for the possibility that an image contains the same layer twice (and thus has a duplicate digest value) + + mkdir -p "$dir/$layerId" + echo '1.0' > "$dir/$layerId/VERSION" + + if [ ! -s "$dir/$layerId/json" ]; then + parentJson="$(printf ', parent: "%s"' "$parentId")" + addJson="$(printf '{ id: "%s"%s }' "$layerId" "${parentId:+$parentJson}")" + # this starter JSON is taken directly from Docker's own "docker save" output for unimportant layers + jq "$addJson + ." > "$dir/$layerId/json" <<-'EOJSON' + { + "created": "0001-01-01T00:00:00Z", + "container_config": { + "Hostname": "", + "Domainname": "", + "User": "", + "AttachStdin": false, + "AttachStdout": false, + "AttachStderr": false, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": null, + "Cmd": null, + "Image": "", + "Volumes": null, + "WorkingDir": "", + "Entrypoint": null, + "OnBuild": null, + "Labels": null + } + } + EOJSON + fi + + case "$layerMediaType" in + application/vnd.docker.image.rootfs.diff.tar.gzip) + layerTar="$layerId/layer.tar" + layerFiles=( "${layerFiles[@]}" "$layerTar" ) + # TODO figure out why "-C -" doesn't work here + # "curl: (33) HTTP server doesn't seem to support byte ranges. Cannot resume." + # "HTTP/1.1 416 Requested Range Not Satisfiable" + if [ -f "$dir/$layerTar" ]; then + # TODO hackpatch for no -C support :'( + echo "skipping existing ${layerId:0:12}" + continue + fi + token="$(curl -fsSL "$authBase/token?service=$authService&scope=repository:$image:pull" | jq --raw-output '.token')" + fetch_blob "$token" "$image" "$layerDigest" "$dir/$layerTar" --progress + ;; + + *) + echo >&2 "error: unknown layer mediaType ($imageIdentifier, $layerDigest): '$layerMediaType'" + exit 1 + ;; + esac + done + + # change "$imageId" to be the ID of the last layer we added (needed for old-style "repositories" file which is created later -- specifically for older Docker daemons) + imageId="$layerId" + + # munge the top layer image manifest to have the appropriate image configuration for older daemons + imageOldConfig="$(jq --raw-output --compact-output '{ id: .id } + if .parent then { parent: .parent } else {} end' "$dir/$imageId/json")" + jq --raw-output "$imageOldConfig + del(.history, .rootfs)" "$dir/$configFile" > "$dir/$imageId/json" + + manifestJsonEntry="$( + echo '{}' | jq --raw-output '. + { + Config: "'"$configFile"'", + RepoTags: ["'"${image#library\/}:$tag"'"], + Layers: '"$(echo '[]' | jq --raw-output ".$(for layerFile in "${layerFiles[@]}"; do echo " + [ \"$layerFile\" ]"; done)")"' + }' + )" + manifestJsonEntries=( "${manifestJsonEntries[@]}" "$manifestJsonEntry" ) + ;; + + *) + echo >&2 "error: unknown manifest mediaType ($imageIdentifier): '$mediaType'" + exit 1 + ;; + esac + ;; + + 1) + if [ -z "$doNotGenerateManifestJson" ]; then + echo >&2 "warning: '$imageIdentifier' uses schemaVersion '$schemaVersion'" + echo >&2 " this script cannot (currently) recreate the 'image config' to put in a 'manifest.json' (thus any schemaVersion 2+ images will be imported in the old way, and their 'docker history' will suffer)" + echo >&2 + doNotGenerateManifestJson=1 + fi + + layersFs="$(echo "$manifestJson" | jq --raw-output '.fsLayers | .[] | .blobSum')" + IFS="$newlineIFS" + layers=( $layersFs ) + unset IFS + + history="$(echo "$manifestJson" | jq '.history | [.[] | .v1Compatibility]')" + imageId="$(echo "$history" | jq --raw-output '.[0]' | jq --raw-output '.id')" + + echo "Downloading '$imageIdentifier' (${#layers[@]} layers)..." + for i in "${!layers[@]}"; do + imageJson="$(echo "$history" | jq --raw-output ".[${i}]")" + layerId="$(echo "$imageJson" | jq --raw-output '.id')" + imageLayer="${layers[$i]}" + + mkdir -p "$dir/$layerId" + echo '1.0' > "$dir/$layerId/VERSION" + + echo "$imageJson" > "$dir/$layerId/json" + + # TODO figure out why "-C -" doesn't work here + # "curl: (33) HTTP server doesn't seem to support byte ranges. Cannot resume." + # "HTTP/1.1 416 Requested Range Not Satisfiable" + if [ -f "$dir/$layerId/layer.tar" ]; then + # TODO hackpatch for no -C support :'( + echo "skipping existing ${layerId:0:12}" + continue + fi + token="$(curl -fsSL "$authBase/token?service=$authService&scope=repository:$image:pull" | jq --raw-output '.token')" + fetch_blob "$token" "$image" "$imageLayer" "$dir/$layerId/layer.tar" --progress + done + ;; + + *) + echo >&2 "error: unknown manifest schemaVersion ($imageIdentifier): '$schemaVersion'" + exit 1 + ;; + esac + + echo + + if [ -s "$dir/tags-$imageFile.tmp" ]; then + echo -n ', ' >> "$dir/tags-$imageFile.tmp" + else + images=( "${images[@]}" "$image" ) + fi + echo -n '"'"$tag"'": "'"$imageId"'"' >> "$dir/tags-$imageFile.tmp" +done + +echo -n '{' > "$dir/repositories" +firstImage=1 +for image in "${images[@]}"; do + imageFile="${image//\//_}" # "/" can't be in filenames :) + image="${image#library\/}" + + [ "$firstImage" ] || echo -n ',' >> "$dir/repositories" + firstImage= + echo -n $'\n\t' >> "$dir/repositories" + echo -n '"'"$image"'": { '"$(cat "$dir/tags-$imageFile.tmp")"' }' >> "$dir/repositories" +done +echo -n $'\n}\n' >> "$dir/repositories" + +rm -f "$dir"/tags-*.tmp + +if [ -z "$doNotGenerateManifestJson" ] && [ "${#manifestJsonEntries[@]}" -gt 0 ]; then + echo '[]' | jq --raw-output ".$(for entry in "${manifestJsonEntries[@]}"; do echo " + [ $entry ]"; done)" > "$dir/manifest.json" +else + rm -f "$dir/manifest.json" +fi + +echo "Download of images into '$dir' complete." +echo "Use something like the following to load the result into a Docker daemon:" +echo " tar -cC '$dir' . | docker load" diff --git a/vendor/github.com/moby/moby/contrib/editorconfig b/vendor/github.com/moby/moby/contrib/editorconfig new file mode 100644 index 000000000..97eda89a4 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/editorconfig @@ -0,0 +1,13 @@ +root = true + +[*] +end_of_line = lf +insert_final_newline = true +charset = utf-8 +indent_style = tab +indent_size = 4 +trim_trailing_whitespace = true + +[*.md] +indent_size = 2 +indent_style = space diff --git a/vendor/github.com/moby/moby/contrib/gitdm/aliases b/vendor/github.com/moby/moby/contrib/gitdm/aliases new file mode 100644 index 000000000..dd5dd3433 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/gitdm/aliases @@ -0,0 +1,148 @@ +Danny.Yates@mailonline.co.uk danny@codeaholics.org +KenCochrane@gmail.com kencochrane@gmail.com +LÉVEIL thomasleveil@gmail.com +Vincent.Bernat@exoscale.ch bernat@luffy.cx +acidburn@docker.com jess@docker.com +admin@jtlebi.fr jt@yadutaf.fr +ahmetalpbalkan@gmail.com ahmetb@microsoft.com +aj@gandi.net aj@gandi.net +albers@users.noreply.github.com github@albersweb.de +alexander.larsson@gmail.com alexl@redhat.com +amurdaca@redhat.com antonio.murdaca@gmail.com +amy@gandi.net aj@gandi.net +andrew.weiss@microsoft.com andrew.weiss@outlook.com +angt@users.noreply.github.com adrien@gallouet.fr +ankushagarwal@users.noreply.github.com ankushagarwal11@gmail.com +anonymouse2048@gmail.com lheckemann@twig-world.com +anusha@docker.com anusha.ragunathan@docker.com +asarai@suse.com asarai@suse.de +avi.miller@gmail.com avi.miller@oracle.com +bernat@luffy.cx Vincent.Bernat@exoscale.ch +bgoff@cpuguy83-mbp.home cpuguy83@gmail.com +brandon@ifup.co brandon@ifup.org +brent@docker.com brent.salisbury@docker.com +charmes.guillaume@gmail.com guillaume.charmes@docker.com +chenchun.feed@gmail.com ramichen@tencent.com +chooper@plumata.com charles.hooper@dotcloud.com +crosby.michael@gmail.com michael@docker.com +crosbymichael@gmail.com michael@docker.com +cyphar@cyphar.com asarai@suse.de +daehyeok@daehyeok-ui-MacBook-Air.local daehyeok@gmail.com +daehyeok@daehyeokui-MacBook-Air.local daehyeok@gmail.com +daniel.norberg@gmail.com dano@spotify.com +daniel@dotcloud.com daniel.mizyrycki@dotcloud.com +darren@rancher.com darren.s.shepherd@gmail.com +dave@dtucker.co.uk dt@docker.com +dev@vvieux.com victor.vieux@docker.com +dgasienica@zynga.com daniel@gasienica.ch +dnephin@gmail.com dnephin@docker.com +dominikh@fork-bomb.org dominik@honnef.co +dqminh89@gmail.com dqminh@cloudflare.com +dsxiao@dataman-inc.com dxiao@redhat.com +duglin@users.noreply.github.com dug@us.ibm.com +eric.hanchrow@gmail.com ehanchrow@ine.com +erik+github@hollensbe.org github@hollensbe.org +estesp@gmail.com estesp@linux.vnet.ibm.com +ewindisch@docker.com eric@windisch.us +f.joffrey@gmail.com joffrey@docker.com +fkautz@alumni.cmu.edu fkautz@redhat.com +frank.rosquin@gmail.com frank.rosquin+github@gmail.com +gh@mattyw.net mattyw@me.com +git@julienbordellier.com julienbordellier@gmail.com +github@metaliveblog.com github@developersupport.net +github@srid.name sridharr@activestate.com +guillaume.charmes@dotcloud.com guillaume.charmes@docker.com +guillaume@charmes.net guillaume.charmes@docker.com +guillaume@docker.com guillaume.charmes@docker.com +guillaume@dotcloud.com guillaume.charmes@docker.com +haoshuwei24@gmail.com haosw@cn.ibm.com +hollie.teal@docker.com hollie@docker.com +hollietealok@users.noreply.github.com hollie@docker.com +hsinko@users.noreply.github.com 21551195@zju.edu.cn +iamironbob@gmail.com altsysrq@gmail.com +icecrime@gmail.com arnaud.porterie@docker.com +jatzen@gmail.com jacob@jacobatzen.dk +jeff@allingeek.com jeff.nickoloff@gmail.com +jefferya@programmerq.net jeff@docker.com +jerome.petazzoni@dotcloud.com jerome.petazzoni@dotcloud.com +jfrazelle@users.noreply.github.com jess@docker.com +jhoward@microsoft.com John.Howard@microsoft.com +jlhawn@berkeley.edu josh.hawn@docker.com +joffrey@dotcloud.com joffrey@docker.com +john.howard@microsoft.com John.Howard@microsoft.com +jp@enix.org jerome.petazzoni@dotcloud.com +justin.cormack@unikernel.com justin.cormack@docker.com +justin.simonelis@PTS-JSIMON2.toronto.exclamation.com justin.p.simonelis@gmail.com +justin@specialbusservice.com justin.cormack@docker.com +katsuta_soshi@cyberagent.co.jp soshi.katsuta@gmail.com +kuehnle@online.de git.nivoc@neverbox.com +kwk@users.noreply.github.com konrad.wilhelm.kleine@gmail.com +leijitang@gmail.com leijitang@huawei.com +liubin0329@gmail.com liubin0329@users.noreply.github.com +lk4d4math@gmail.com lk4d4@docker.com +louis@dotcloud.com kalessin@kalessin.fr +lsm5@redhat.com lsm5@fedoraproject.org +lyndaoleary@hotmail.com lyndaoleary29@gmail.com +madhu@socketplane.io madhu@docker.com +martins@noironetworks.com aanm90@gmail.com +mary@docker.com mary.anthony@docker.com +mastahyeti@users.noreply.github.com mastahyeti@gmail.com +maztaim@users.noreply.github.com taim@bosboot.org +me@runcom.ninja antonio.murdaca@gmail.com +mheon@mheonlaptop.redhat.com mheon@redhat.com +michael@crosbymichael.com michael@docker.com +mohitsoni1989@gmail.com mosoni@ebay.com +moxieandmore@gmail.com mary.anthony@docker.com +moyses.furtado@wplex.com.br moysesb@gmail.com +msabramo@gmail.com marc@marc-abramowitz.com +mzdaniel@glidelink.net daniel.mizyrycki@dotcloud.com +nathan.leclaire@gmail.com nathan.leclaire@docker.com +nathanleclaire@gmail.com nathan.leclaire@docker.com +ostezer@users.noreply.github.com ostezer@gmail.com +peter@scraperwiki.com p@pwaller.net +princess@docker.com jess@docker.com +proppy@aminche.com proppy@google.com +qhuang@10.0.2.15 h.huangqiang@huawei.com +resouer@gmail.com resouer@163.com +roberto_hashioka@hotmail.com roberto.hashioka@docker.com +root@vagrant-ubuntu-12.10.vagrantup.com daniel.mizyrycki@dotcloud.com +runcom@linux.com antonio.murdaca@gmail.com +runcom@redhat.com antonio.murdaca@gmail.com +runcom@users.noreply.github.com antonio.murdaca@gmail.com +s@docker.com solomon@docker.com +shawnlandden@gmail.com shawn@churchofgit.com +singh.gurjeet@gmail.com gurjeet@singh.im +sjoerd@byte.nl sjoerd-github@linuxonly.nl +smahajan@redhat.com shishir.mahajan@redhat.com +solomon.hykes@dotcloud.com solomon@docker.com +solomon@dotcloud.com solomon@docker.com +stefanb@us.ibm.com stefanb@linux.vnet.ibm.com +stevvooe@users.noreply.github.com stephen.day@docker.com +superbaloo+registrations.github@superbaloo.net baloo@gandi.net +tangicolin@gmail.com tangicolin@gmail.com +thaJeztah@users.noreply.github.com github@gone.nl +thatcher@dotcloud.com thatcher@docker.com +thatcher@gmx.net thatcher@docker.com +tibor@docker.com teabee89@gmail.com +tiborvass@users.noreply.github.com teabee89@gmail.com +timruffles@googlemail.com oi@truffles.me.uk +tintypemolly@Ohui-MacBook-Pro.local tintypemolly@gmail.com +tj@init.me tejesh.mehta@gmail.com +tristan.carel@gmail.com tristan@cogniteev.com +unclejack@users.noreply.github.com cristian.staretu@gmail.com +unclejacksons@gmail.com cristian.staretu@gmail.com +vbatts@hashbangbash.com vbatts@redhat.com +victor.vieux@dotcloud.com victor.vieux@docker.com +victor@docker.com victor.vieux@docker.com +victor@dotcloud.com victor.vieux@docker.com +victorvieux@gmail.com victor.vieux@docker.com +vieux@docker.com victor.vieux@docker.com +vincent+github@demeester.fr vincent@sbr.pm +vincent@bernat.im bernat@luffy.cx +vojnovski@gmail.com viktor.vojnovski@amadeus.com +whoshuu@gmail.com huu@prismskylabs.com +xiaods@gmail.com dxiao@redhat.com +xlgao@zju.edu.cn xlgao@zju.edu.cn +yestin.sun@polyera.com sunyi0804@gmail.com +yuchangchun1@huawei.com yuchangchun1@huawei.com +zjaffee@us.ibm.com zij@case.edu diff --git a/vendor/github.com/moby/moby/contrib/gitdm/domain-map b/vendor/github.com/moby/moby/contrib/gitdm/domain-map new file mode 100644 index 000000000..17a287e97 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/gitdm/domain-map @@ -0,0 +1,47 @@ +# +# Docker +# + +docker.com Docker +dotcloud.com Docker + +aluzzardi@gmail.com Docker +cpuguy83@gmail.com Docker +derek@mcgstyle.net Docker +github@gone.nl Docker +kencochrane@gmail.com Docker +mickael.laventure@gmail.com Docker +sam.alba@gmail.com Docker +svendowideit@fosiki.com Docker +svendowideit@home.org.au Docker +tonistiigi@gmail.com Docker + +cristian.staretu@gmail.com Docker < 2015-01-01 +cristian.staretu@gmail.com Cisco + +github@hollensbe.org Docker < 2015-01-01 +github@hollensbe.org Cisco + +david.calavera@gmail.com Docker < 2016-04-01 +david.calavera@gmail.com (Unknown) + +madhu@socketplane.io Docker +ejhazlett@gmail.com Docker +ben@firshman.co.uk Docker + +vincent@sbr.pm (Unknown) < 2016-10-24 +vincent@sbr.pm Docker + +# +# Others +# + +cisco.com Cisco +google.com Google +ibm.com IBM +huawei.com Huawei +microsoft.com Microsoft + +redhat.com Red Hat +mrunalp@gmail.com Red Hat +antonio.murdaca@gmail.com Red Hat diff --git a/vendor/github.com/moby/moby/contrib/gitdm/generate_aliases.sh b/vendor/github.com/moby/moby/contrib/gitdm/generate_aliases.sh new file mode 100755 index 000000000..dfff5ff20 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/gitdm/generate_aliases.sh @@ -0,0 +1,16 @@ +#!/usr/bin/env bash + +# +# This script generates a gitdm compatible email aliases file from a git +# formatted .mailmap file. +# +# Usage: +# $> ./generate_aliases > aliases +# + +cat $1 | \ + grep -v '^#' | \ + sed 's/^[^<]*<\([^>]*\)>/\1/' | \ + grep '<.*>' | sed -e 's/[<>]/ /g' | \ + awk '{if ($3 != "") { print $3" "$1 } else {print $2" "$1}}' | \ + sort | uniq diff --git a/vendor/github.com/moby/moby/contrib/gitdm/gitdm.config b/vendor/github.com/moby/moby/contrib/gitdm/gitdm.config new file mode 100644 index 000000000..d9b62b0b4 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/gitdm/gitdm.config @@ -0,0 +1,17 @@ +# +# EmailAliases lets us cope with developers who use more +# than one address. +# +EmailAliases aliases + +# +# EmailMap does the main work of mapping addresses onto +# employers. +# +EmailMap domain-map + +# +# Use GroupMap to map a file full of addresses to the +# same employer +# +# GroupMap company-Docker Docker diff --git a/vendor/github.com/moby/moby/contrib/httpserver/Dockerfile b/vendor/github.com/moby/moby/contrib/httpserver/Dockerfile new file mode 100644 index 000000000..747dc91bc --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/httpserver/Dockerfile @@ -0,0 +1,4 @@ +FROM busybox +EXPOSE 80/tcp +COPY httpserver . +CMD ["./httpserver"] diff --git a/vendor/github.com/moby/moby/contrib/httpserver/Dockerfile.solaris b/vendor/github.com/moby/moby/contrib/httpserver/Dockerfile.solaris new file mode 100644 index 000000000..3d0d691c1 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/httpserver/Dockerfile.solaris @@ -0,0 +1,4 @@ +FROM solaris +EXPOSE 80/tcp +COPY httpserver . +CMD ["./httpserver"] diff --git a/vendor/github.com/moby/moby/contrib/httpserver/server.go b/vendor/github.com/moby/moby/contrib/httpserver/server.go new file mode 100644 index 000000000..a75d5abb3 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/httpserver/server.go @@ -0,0 +1,12 @@ +package main + +import ( + "log" + "net/http" +) + +func main() { + fs := http.FileServer(http.Dir("/static")) + http.Handle("/", fs) + log.Panic(http.ListenAndServe(":80", nil)) +} diff --git a/vendor/github.com/moby/moby/contrib/init/openrc/docker.confd b/vendor/github.com/moby/moby/contrib/init/openrc/docker.confd new file mode 100644 index 000000000..89183de46 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/init/openrc/docker.confd @@ -0,0 +1,23 @@ +# /etc/conf.d/docker: config file for /etc/init.d/docker + +# where the docker daemon output gets piped +# this contains both stdout and stderr. If you need to separate them, +# see the settings below +#DOCKER_LOGFILE="/var/log/docker.log" + +# where the docker daemon stdout gets piped +# if this is not set, DOCKER_LOGFILE is used +#DOCKER_OUTFILE="/var/log/docker-out.log" + +# where the docker daemon stderr gets piped +# if this is not set, DOCKER_LOGFILE is used +#DOCKER_ERRFILE="/var/log/docker-err.log" + +# where docker's pid get stored +#DOCKER_PIDFILE="/run/docker.pid" + +# where the docker daemon itself is run from +#DOCKERD_BINARY="/usr/bin/dockerd" + +# any other random options you want to pass to docker +DOCKER_OPTS="" diff --git a/vendor/github.com/moby/moby/contrib/init/openrc/docker.initd b/vendor/github.com/moby/moby/contrib/init/openrc/docker.initd new file mode 100644 index 000000000..6c968f607 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/init/openrc/docker.initd @@ -0,0 +1,24 @@ +#!/sbin/openrc-run +# Copyright 1999-2013 Gentoo Foundation +# Distributed under the terms of the GNU General Public License v2 + +command="${DOCKERD_BINARY:-/usr/bin/dockerd}" +pidfile="${DOCKER_PIDFILE:-/run/${RC_SVCNAME}.pid}" +command_args="-p \"${pidfile}\" ${DOCKER_OPTS}" +DOCKER_LOGFILE="${DOCKER_LOGFILE:-/var/log/${RC_SVCNAME}.log}" +DOCKER_ERRFILE="${DOCKER_ERRFILE:-${DOCKER_LOGFILE}}" +DOCKER_OUTFILE="${DOCKER_OUTFILE:-${DOCKER_LOGFILE}}" +start_stop_daemon_args="--background \ + --stderr \"${DOCKER_ERRFILE}\" --stdout \"${DOCKER_OUTFILE}\"" + +start_pre() { + checkpath -f -m 0644 -o root:docker "$DOCKER_LOGFILE" + + ulimit -n 1048576 + + # Having non-zero limits causes performance problems due to accounting overhead + # in the kernel. We recommend using cgroups to do container-local accounting. + ulimit -u unlimited + + return 0 +} diff --git a/vendor/github.com/moby/moby/contrib/init/systemd/REVIEWERS b/vendor/github.com/moby/moby/contrib/init/systemd/REVIEWERS new file mode 100644 index 000000000..b9ba55b3f --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/init/systemd/REVIEWERS @@ -0,0 +1,3 @@ +Lokesh Mandvekar (@lsm5) +Brandon Philips (@philips) +Jessie Frazelle (@jfrazelle) diff --git a/vendor/github.com/moby/moby/contrib/init/systemd/docker.service b/vendor/github.com/moby/moby/contrib/init/systemd/docker.service new file mode 100644 index 000000000..517463172 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/init/systemd/docker.service @@ -0,0 +1,34 @@ +[Unit] +Description=Docker Application Container Engine +Documentation=https://docs.docker.com +After=network-online.target docker.socket firewalld.service +Wants=network-online.target +Requires=docker.socket + +[Service] +Type=notify +# the default is not to use systemd for cgroups because the delegate issues still +# exists and systemd currently does not support the cgroup feature set required +# for containers run by docker +ExecStart=/usr/bin/dockerd -H fd:// +ExecReload=/bin/kill -s HUP $MAINPID +LimitNOFILE=1048576 +# Having non-zero Limit*s causes performance problems due to accounting overhead +# in the kernel. We recommend using cgroups to do container-local accounting. +LimitNPROC=infinity +LimitCORE=infinity +# Uncomment TasksMax if your systemd version supports it. +# Only systemd 226 and above support this version. +#TasksMax=infinity +TimeoutStartSec=0 +# set delegate yes so that systemd does not reset the cgroups of docker containers +Delegate=yes +# kill only the docker process, not all processes in the cgroup +KillMode=process +# restart the docker process if it exits prematurely +Restart=on-failure +StartLimitBurst=3 +StartLimitInterval=60s + +[Install] +WantedBy=multi-user.target diff --git a/vendor/github.com/moby/moby/contrib/init/systemd/docker.service.rpm b/vendor/github.com/moby/moby/contrib/init/systemd/docker.service.rpm new file mode 100644 index 000000000..6c60646b5 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/init/systemd/docker.service.rpm @@ -0,0 +1,33 @@ +[Unit] +Description=Docker Application Container Engine +Documentation=https://docs.docker.com +After=network-online.target firewalld.service +Wants=network-online.target + +[Service] +Type=notify +# the default is not to use systemd for cgroups because the delegate issues still +# exists and systemd currently does not support the cgroup feature set required +# for containers run by docker +ExecStart=/usr/bin/dockerd +ExecReload=/bin/kill -s HUP $MAINPID +# Having non-zero Limit*s causes performance problems due to accounting overhead +# in the kernel. We recommend using cgroups to do container-local accounting. +LimitNOFILE=infinity +LimitNPROC=infinity +LimitCORE=infinity +# Uncomment TasksMax if your systemd version supports it. +# Only systemd 226 and above support this version. +#TasksMax=infinity +TimeoutStartSec=0 +# set delegate yes so that systemd does not reset the cgroups of docker containers +Delegate=yes +# kill only the docker process, not all processes in the cgroup +KillMode=process +# restart the docker process if it exits prematurely +Restart=on-failure +StartLimitBurst=3 +StartLimitInterval=60s + +[Install] +WantedBy=multi-user.target diff --git a/vendor/github.com/moby/moby/contrib/init/systemd/docker.socket b/vendor/github.com/moby/moby/contrib/init/systemd/docker.socket new file mode 100644 index 000000000..7dd95098e --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/init/systemd/docker.socket @@ -0,0 +1,12 @@ +[Unit] +Description=Docker Socket for the API +PartOf=docker.service + +[Socket] +ListenStream=/var/run/docker.sock +SocketMode=0660 +SocketUser=root +SocketGroup=docker + +[Install] +WantedBy=sockets.target diff --git a/vendor/github.com/moby/moby/contrib/init/sysvinit-debian/docker b/vendor/github.com/moby/moby/contrib/init/sysvinit-debian/docker new file mode 100755 index 000000000..9c8fa6be7 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/init/sysvinit-debian/docker @@ -0,0 +1,156 @@ +#!/bin/sh +set -e + +### BEGIN INIT INFO +# Provides: docker +# Required-Start: $syslog $remote_fs +# Required-Stop: $syslog $remote_fs +# Should-Start: cgroupfs-mount cgroup-lite +# Should-Stop: cgroupfs-mount cgroup-lite +# Default-Start: 2 3 4 5 +# Default-Stop: 0 1 6 +# Short-Description: Create lightweight, portable, self-sufficient containers. +# Description: +# Docker is an open-source project to easily create lightweight, portable, +# self-sufficient containers from any application. The same container that a +# developer builds and tests on a laptop can run at scale, in production, on +# VMs, bare metal, OpenStack clusters, public clouds and more. +### END INIT INFO + +export PATH=/sbin:/bin:/usr/sbin:/usr/bin:/usr/local/sbin:/usr/local/bin + +BASE=docker + +# modify these in /etc/default/$BASE (/etc/default/docker) +DOCKERD=/usr/bin/dockerd +# This is the pid file managed by docker itself +DOCKER_PIDFILE=/var/run/$BASE.pid +# This is the pid file created/managed by start-stop-daemon +DOCKER_SSD_PIDFILE=/var/run/$BASE-ssd.pid +DOCKER_LOGFILE=/var/log/$BASE.log +DOCKER_OPTS= +DOCKER_DESC="Docker" + +# Get lsb functions +. /lib/lsb/init-functions + +if [ -f /etc/default/$BASE ]; then + . /etc/default/$BASE +fi + +# Check docker is present +if [ ! -x $DOCKERD ]; then + log_failure_msg "$DOCKERD not present or not executable" + exit 1 +fi + +check_init() { + # see also init_is_upstart in /lib/lsb/init-functions (which isn't available in Ubuntu 12.04, or we'd use it directly) + if [ -x /sbin/initctl ] && /sbin/initctl version 2>/dev/null | grep -q upstart; then + log_failure_msg "$DOCKER_DESC is managed via upstart, try using service $BASE $1" + exit 1 + fi +} + +fail_unless_root() { + if [ "$(id -u)" != '0' ]; then + log_failure_msg "$DOCKER_DESC must be run as root" + exit 1 + fi +} + +cgroupfs_mount() { + # see also https://github.com/tianon/cgroupfs-mount/blob/master/cgroupfs-mount + if grep -v '^#' /etc/fstab | grep -q cgroup \ + || [ ! -e /proc/cgroups ] \ + || [ ! -d /sys/fs/cgroup ]; then + return + fi + if ! mountpoint -q /sys/fs/cgroup; then + mount -t tmpfs -o uid=0,gid=0,mode=0755 cgroup /sys/fs/cgroup + fi + ( + cd /sys/fs/cgroup + for sys in $(awk '!/^#/ { if ($4 == 1) print $1 }' /proc/cgroups); do + mkdir -p $sys + if ! mountpoint -q $sys; then + if ! mount -n -t cgroup -o $sys cgroup $sys; then + rmdir $sys || true + fi + fi + done + ) +} + +case "$1" in + start) + check_init + + fail_unless_root + + cgroupfs_mount + + touch "$DOCKER_LOGFILE" + chgrp docker "$DOCKER_LOGFILE" + + ulimit -n 1048576 + + # Having non-zero limits causes performance problems due to accounting overhead + # in the kernel. We recommend using cgroups to do container-local accounting. + if [ "$BASH" ]; then + ulimit -u unlimited + else + ulimit -p unlimited + fi + + log_begin_msg "Starting $DOCKER_DESC: $BASE" + start-stop-daemon --start --background \ + --no-close \ + --exec "$DOCKERD" \ + --pidfile "$DOCKER_SSD_PIDFILE" \ + --make-pidfile \ + -- \ + -p "$DOCKER_PIDFILE" \ + $DOCKER_OPTS \ + >> "$DOCKER_LOGFILE" 2>&1 + log_end_msg $? + ;; + + stop) + check_init + fail_unless_root + if [ -f "$DOCKER_SSD_PIDFILE" ]; then + log_begin_msg "Stopping $DOCKER_DESC: $BASE" + start-stop-daemon --stop --pidfile "$DOCKER_SSD_PIDFILE" --retry 10 + log_end_msg $? + else + log_warning_msg "Docker already stopped - file $DOCKER_SSD_PIDFILE not found." + fi + ;; + + restart) + check_init + fail_unless_root + docker_pid=`cat "$DOCKER_SSD_PIDFILE" 2>/dev/null` + [ -n "$docker_pid" ] \ + && ps -p $docker_pid > /dev/null 2>&1 \ + && $0 stop + $0 start + ;; + + force-reload) + check_init + fail_unless_root + $0 restart + ;; + + status) + check_init + status_of_proc -p "$DOCKER_SSD_PIDFILE" "$DOCKERD" "$DOCKER_DESC" + ;; + + *) + echo "Usage: service docker {start|stop|restart|status}" + exit 1 + ;; +esac diff --git a/vendor/github.com/moby/moby/contrib/init/sysvinit-debian/docker.default b/vendor/github.com/moby/moby/contrib/init/sysvinit-debian/docker.default new file mode 100644 index 000000000..c4e93199b --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/init/sysvinit-debian/docker.default @@ -0,0 +1,20 @@ +# Docker Upstart and SysVinit configuration file + +# +# THIS FILE DOES NOT APPLY TO SYSTEMD +# +# Please see the documentation for "systemd drop-ins": +# https://docs.docker.com/engine/admin/systemd/ +# + +# Customize location of Docker binary (especially for development testing). +#DOCKERD="/usr/local/bin/dockerd" + +# Use DOCKER_OPTS to modify the daemon startup options. +#DOCKER_OPTS="--dns 8.8.8.8 --dns 8.8.4.4" + +# If you need Docker to use an HTTP proxy, it can also be specified here. +#export http_proxy="http://127.0.0.1:3128/" + +# This is also a handy place to tweak where Docker's temporary files go. +#export DOCKER_TMPDIR="/mnt/bigdrive/docker-tmp" diff --git a/vendor/github.com/moby/moby/contrib/init/sysvinit-redhat/docker b/vendor/github.com/moby/moby/contrib/init/sysvinit-redhat/docker new file mode 100755 index 000000000..df9b02a2a --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/init/sysvinit-redhat/docker @@ -0,0 +1,153 @@ +#!/bin/sh +# +# /etc/rc.d/init.d/docker +# +# Daemon for docker.com +# +# chkconfig: 2345 95 95 +# description: Daemon for docker.com + +### BEGIN INIT INFO +# Provides: docker +# Required-Start: $network cgconfig +# Required-Stop: +# Should-Start: +# Should-Stop: +# Default-Start: 2 3 4 5 +# Default-Stop: 0 1 6 +# Short-Description: start and stop docker +# Description: Daemon for docker.com +### END INIT INFO + +# Source function library. +. /etc/rc.d/init.d/functions + +prog="docker" +unshare=/usr/bin/unshare +exec="/usr/bin/dockerd" +pidfile="/var/run/$prog.pid" +lockfile="/var/lock/subsys/$prog" +logfile="/var/log/$prog" + +[ -e /etc/sysconfig/$prog ] && . /etc/sysconfig/$prog + +prestart() { + service cgconfig status > /dev/null + + if [[ $? != 0 ]]; then + service cgconfig start + fi + +} + +start() { + if [ ! -x $exec ]; then + if [ ! -e $exec ]; then + echo "Docker executable $exec not found" + else + echo "You do not have permission to execute the Docker executable $exec" + fi + exit 5 + fi + + check_for_cleanup + + if ! [ -f $pidfile ]; then + prestart + printf "Starting $prog:\t" + echo "\n$(date)\n" >> $logfile + "$unshare" -m -- $exec $other_args >> $logfile 2>&1 & + pid=$! + touch $lockfile + # wait up to 10 seconds for the pidfile to exist. see + # https://github.com/docker/docker/issues/5359 + tries=0 + while [ ! -f $pidfile -a $tries -lt 10 ]; do + sleep 1 + tries=$((tries + 1)) + echo -n '.' + done + if [ ! -f $pidfile ]; then + failure + echo + exit 1 + fi + success + echo + else + failure + echo + printf "$pidfile still exists...\n" + exit 7 + fi +} + +stop() { + echo -n $"Stopping $prog: " + killproc -p $pidfile -d 300 $prog + retval=$? + echo + [ $retval -eq 0 ] && rm -f $lockfile + return $retval +} + +restart() { + stop + start +} + +reload() { + restart +} + +force_reload() { + restart +} + +rh_status() { + status -p $pidfile $prog +} + +rh_status_q() { + rh_status >/dev/null 2>&1 +} + + +check_for_cleanup() { + if [ -f ${pidfile} ]; then + /bin/ps -fp $(cat ${pidfile}) > /dev/null || rm ${pidfile} + fi +} + +case "$1" in + start) + rh_status_q && exit 0 + $1 + ;; + stop) + rh_status_q || exit 0 + $1 + ;; + restart) + $1 + ;; + reload) + rh_status_q || exit 7 + $1 + ;; + force-reload) + force_reload + ;; + status) + rh_status + ;; + condrestart|try-restart) + rh_status_q || exit 0 + restart + ;; + *) + echo $"Usage: $0 {start|stop|status|restart|condrestart|try-restart|reload|force-reload}" + exit 2 +esac + +exit $? diff --git a/vendor/github.com/moby/moby/contrib/init/sysvinit-redhat/docker.sysconfig b/vendor/github.com/moby/moby/contrib/init/sysvinit-redhat/docker.sysconfig new file mode 100644 index 000000000..0864b3d77 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/init/sysvinit-redhat/docker.sysconfig @@ -0,0 +1,7 @@ +# /etc/sysconfig/docker +# +# Other arguments to pass to the docker daemon process +# These will be parsed by the sysv initscript and appended +# to the arguments list passed to docker daemon + +other_args="" diff --git a/vendor/github.com/moby/moby/contrib/init/upstart/REVIEWERS b/vendor/github.com/moby/moby/contrib/init/upstart/REVIEWERS new file mode 100644 index 000000000..03ee2dde3 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/init/upstart/REVIEWERS @@ -0,0 +1,2 @@ +Tianon Gravi (@tianon) +Jessie Frazelle (@jfrazelle) diff --git a/vendor/github.com/moby/moby/contrib/init/upstart/docker.conf b/vendor/github.com/moby/moby/contrib/init/upstart/docker.conf new file mode 100644 index 000000000..d58f7d6ac --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/init/upstart/docker.conf @@ -0,0 +1,72 @@ +description "Docker daemon" + +start on (filesystem and net-device-up IFACE!=lo) +stop on runlevel [!2345] + +limit nofile 524288 1048576 + +# Having non-zero limits causes performance problems due to accounting overhead +# in the kernel. We recommend using cgroups to do container-local accounting. +limit nproc unlimited unlimited + +respawn + +kill timeout 20 + +pre-start script + # see also https://github.com/tianon/cgroupfs-mount/blob/master/cgroupfs-mount + if grep -v '^#' /etc/fstab | grep -q cgroup \ + || [ ! -e /proc/cgroups ] \ + || [ ! -d /sys/fs/cgroup ]; then + exit 0 + fi + if ! mountpoint -q /sys/fs/cgroup; then + mount -t tmpfs -o uid=0,gid=0,mode=0755 cgroup /sys/fs/cgroup + fi + ( + cd /sys/fs/cgroup + for sys in $(awk '!/^#/ { if ($4 == 1) print $1 }' /proc/cgroups); do + mkdir -p $sys + if ! mountpoint -q $sys; then + if ! mount -n -t cgroup -o $sys cgroup $sys; then + rmdir $sys || true + fi + fi + done + ) +end script + +script + # modify these in /etc/default/$UPSTART_JOB (/etc/default/docker) + DOCKERD=/usr/bin/dockerd + DOCKER_OPTS= + if [ -f /etc/default/$UPSTART_JOB ]; then + . /etc/default/$UPSTART_JOB + fi + exec "$DOCKERD" $DOCKER_OPTS --raw-logs +end script + +# Don't emit "started" event until docker.sock is ready. +# See https://github.com/docker/docker/issues/6647 +post-start script + DOCKER_OPTS= + DOCKER_SOCKET= + if [ -f /etc/default/$UPSTART_JOB ]; then + . /etc/default/$UPSTART_JOB + fi + + if ! printf "%s" "$DOCKER_OPTS" | grep -qE -e '-H|--host'; then + DOCKER_SOCKET=/var/run/docker.sock + else + DOCKER_SOCKET=$(printf "%s" "$DOCKER_OPTS" | grep -oP -e '(-H|--host)\W*unix://\K(\S+)' | sed 1q) + fi + + if [ -n "$DOCKER_SOCKET" ]; then + while ! [ -e "$DOCKER_SOCKET" ]; do + initctl status $UPSTART_JOB | grep -qE "(stop|respawn)/" && exit 1 + echo "Waiting for $DOCKER_SOCKET" + sleep 0.1 + done + echo "$DOCKER_SOCKET is up" + fi +end script diff --git a/vendor/github.com/moby/moby/contrib/mac-install-bundle.sh b/vendor/github.com/moby/moby/contrib/mac-install-bundle.sh new file mode 100755 index 000000000..2110d044d --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/mac-install-bundle.sh @@ -0,0 +1,45 @@ +#!/bin/sh + +set -e + +errexit() { + echo "$1" + exit 1 +} + +[ "$(uname -s)" == "Darwin" ] || errexit "This script can only be used on a Mac" + +[ $# -eq 1 ] || errexit "Usage: $0 install|undo" + +BUNDLE="bundles/$(cat VERSION)" +BUNDLE_PATH="$PWD/$BUNDLE" +CLIENT_PATH="$BUNDLE_PATH/cross/darwin/amd64/docker" +DATABASE="$HOME/Library/Containers/com.docker.docker/Data/database" +DATABASE_KEY="$DATABASE/com.docker.driver.amd64-linux/bundle" + +[ -d "$DATABASE" ] || errexit "Docker for Mac must be installed for this script" + +case "$1" in +"install") + [ -d "$BUNDLE" ] || errexit "cannot find bundle $BUNDLE" + [ -e "$CLIENT_PATH" ] || errexit "you need to run make cross first" + [ -e "$BUNDLE/binary-daemon/dockerd" ] || errexit "you need to build binaries first" + [ -f "$BUNDLE/binary-client/docker" ] || errexit "you need to build binaries first" + git -C "$DATABASE" reset --hard >/dev/null + echo "$BUNDLE_PATH" > "$DATABASE_KEY" + git -C "$DATABASE" add "$DATABASE_KEY" + git -C "$DATABASE" commit -m "update bundle to $BUNDLE_PATH" + rm -f /usr/local/bin/docker + cp "$CLIENT_PATH" /usr/local/bin + echo "Bundle installed. Restart Docker to use. To uninstall, reset Docker to factory defaults." + ;; +"undo") + git -C "$DATABASE" reset --hard >/dev/null + [ -f "$DATABASE_KEY" ] || errexit "bundle not set" + git -C "$DATABASE" rm "$DATABASE_KEY" + git -C "$DATABASE" commit -m "remove bundle" + rm -f /usr/local/bin/docker + ln -s "$HOME/Library/Group Containers/group.com.docker/bin/docker" /usr/local/bin + echo "Bundle removed. Using dev versions may cause issues, a reset to factory defaults is recommended." + ;; +esac diff --git a/vendor/github.com/moby/moby/contrib/mkimage-alpine.sh b/vendor/github.com/moby/moby/contrib/mkimage-alpine.sh new file mode 100755 index 000000000..a271effaa --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/mkimage-alpine.sh @@ -0,0 +1,90 @@ +#!/bin/sh + +set -e + +[ $(id -u) -eq 0 ] || { + printf >&2 '%s requires root\n' "$0" + exit 1 +} + +usage() { + printf >&2 '%s: [-r release] [-m mirror] [-s] [-c additional repository] [-a arch]\n' "$0" + exit 1 +} + +tmp() { + TMP=$(mktemp -d ${TMPDIR:-/var/tmp}/alpine-docker-XXXXXXXXXX) + ROOTFS=$(mktemp -d ${TMPDIR:-/var/tmp}/alpine-docker-rootfs-XXXXXXXXXX) + trap "rm -rf $TMP $ROOTFS" EXIT TERM INT +} + +apkv() { + curl -sSL $MAINREPO/$ARCH/APKINDEX.tar.gz | tar -Oxz | + grep --text '^P:apk-tools-static$' -A1 | tail -n1 | cut -d: -f2 +} + +getapk() { + curl -sSL $MAINREPO/$ARCH/apk-tools-static-$(apkv).apk | + tar -xz -C $TMP sbin/apk.static +} + +mkbase() { + $TMP/sbin/apk.static --repository $MAINREPO --update-cache --allow-untrusted \ + --root $ROOTFS --initdb add alpine-base +} + +conf() { + printf '%s\n' $MAINREPO > $ROOTFS/etc/apk/repositories + printf '%s\n' $ADDITIONALREPO >> $ROOTFS/etc/apk/repositories +} + +pack() { + local id + id=$(tar --numeric-owner -C $ROOTFS -c . | docker import - alpine:$REL) + + docker tag $id alpine:latest + docker run -i -t --rm alpine printf 'alpine:%s with id=%s created!\n' $REL $id +} + +save() { + [ $SAVE -eq 1 ] || return + + tar --numeric-owner -C $ROOTFS -c . | xz > rootfs.tar.xz +} + +while getopts "hr:m:sc:a:" opt; do + case $opt in + r) + REL=$OPTARG + ;; + m) + MIRROR=$OPTARG + ;; + s) + SAVE=1 + ;; + c) + ADDITIONALREPO=$OPTARG + ;; + a) + ARCH=$OPTARG + ;; + *) + usage + ;; + esac +done + +REL=${REL:-edge} +MIRROR=${MIRROR:-http://nl.alpinelinux.org/alpine} +SAVE=${SAVE:-0} +MAINREPO=$MIRROR/$REL/main +ADDITIONALREPO=$MIRROR/$REL/${ADDITIONALREPO:-community} +ARCH=${ARCH:-$(uname -m)} + +tmp +getapk +mkbase +conf +pack +save diff --git a/vendor/github.com/moby/moby/contrib/mkimage-arch-pacman.conf b/vendor/github.com/moby/moby/contrib/mkimage-arch-pacman.conf new file mode 100644 index 000000000..45fe03dc9 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/mkimage-arch-pacman.conf @@ -0,0 +1,92 @@ +# +# /etc/pacman.conf +# +# See the pacman.conf(5) manpage for option and repository directives + +# +# GENERAL OPTIONS +# +[options] +# The following paths are commented out with their default values listed. +# If you wish to use different paths, uncomment and update the paths. +#RootDir = / +#DBPath = /var/lib/pacman/ +#CacheDir = /var/cache/pacman/pkg/ +#LogFile = /var/log/pacman.log +#GPGDir = /etc/pacman.d/gnupg/ +HoldPkg = pacman glibc +#XferCommand = /usr/bin/curl -C - -f %u > %o +#XferCommand = /usr/bin/wget --passive-ftp -c -O %o %u +#CleanMethod = KeepInstalled +#UseDelta = 0.7 +Architecture = auto + +# Pacman won't upgrade packages listed in IgnorePkg and members of IgnoreGroup +#IgnorePkg = +#IgnoreGroup = + +#NoUpgrade = +#NoExtract = + +# Misc options +#UseSyslog +#Color +#TotalDownload +# We cannot check disk space from within a chroot environment +#CheckSpace +#VerbosePkgLists + +# By default, pacman accepts packages signed by keys that its local keyring +# trusts (see pacman-key and its man page), as well as unsigned packages. +SigLevel = Required DatabaseOptional +LocalFileSigLevel = Optional +#RemoteFileSigLevel = Required + +# NOTE: You must run `pacman-key --init` before first using pacman; the local +# keyring can then be populated with the keys of all official Arch Linux +# packagers with `pacman-key --populate archlinux`. + +# +# REPOSITORIES +# - can be defined here or included from another file +# - pacman will search repositories in the order defined here +# - local/custom mirrors can be added here or in separate files +# - repositories listed first will take precedence when packages +# have identical names, regardless of version number +# - URLs will have $repo replaced by the name of the current repo +# - URLs will have $arch replaced by the name of the architecture +# +# Repository entries are of the format: +# [repo-name] +# Server = ServerName +# Include = IncludePath +# +# The header [repo-name] is crucial - it must be present and +# uncommented to enable the repo. +# + +# The testing repositories are disabled by default. To enable, uncomment the +# repo name header and Include lines. You can add preferred servers immediately +# after the header, and they will be used before the default mirrors. + +#[testing] +#Include = /etc/pacman.d/mirrorlist + +[core] +Include = /etc/pacman.d/mirrorlist + +[extra] +Include = /etc/pacman.d/mirrorlist + +#[community-testing] +#Include = /etc/pacman.d/mirrorlist + +[community] +Include = /etc/pacman.d/mirrorlist + +# An example of a custom package repository. See the pacman manpage for +# tips on creating your own repositories. +#[custom] +#SigLevel = Optional TrustAll +#Server = file:///home/custompkgs + diff --git a/vendor/github.com/moby/moby/contrib/mkimage-arch.sh b/vendor/github.com/moby/moby/contrib/mkimage-arch.sh new file mode 100755 index 000000000..f94117712 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/mkimage-arch.sh @@ -0,0 +1,126 @@ +#!/usr/bin/env bash +# Generate a minimal filesystem for archlinux and load it into the local +# docker as "archlinux" +# requires root +set -e + +hash pacstrap &>/dev/null || { + echo "Could not find pacstrap. Run pacman -S arch-install-scripts" + exit 1 +} + +hash expect &>/dev/null || { + echo "Could not find expect. Run pacman -S expect" + exit 1 +} + + +export LANG="C.UTF-8" + +ROOTFS=$(mktemp -d ${TMPDIR:-/var/tmp}/rootfs-archlinux-XXXXXXXXXX) +chmod 755 $ROOTFS + +# packages to ignore for space savings +PKGIGNORE=( + cryptsetup + device-mapper + dhcpcd + iproute2 + jfsutils + linux + lvm2 + man-db + man-pages + mdadm + nano + netctl + openresolv + pciutils + pcmciautils + reiserfsprogs + s-nail + systemd-sysvcompat + usbutils + vi + xfsprogs +) +IFS=',' +PKGIGNORE="${PKGIGNORE[*]}" +unset IFS + +arch="$(uname -m)" +case "$arch" in + armv*) + if pacman -Q archlinuxarm-keyring >/dev/null 2>&1; then + pacman-key --init + pacman-key --populate archlinuxarm + else + echo "Could not find archlinuxarm-keyring. Please, install it and run pacman-key --populate archlinuxarm" + exit 1 + fi + PACMAN_CONF=$(mktemp ${TMPDIR:-/var/tmp}/pacman-conf-archlinux-XXXXXXXXX) + version="$(echo $arch | cut -c 5)" + sed "s/Architecture = armv/Architecture = armv${version}h/g" './mkimage-archarm-pacman.conf' > "${PACMAN_CONF}" + PACMAN_MIRRORLIST='Server = http://mirror.archlinuxarm.org/$arch/$repo' + PACMAN_EXTRA_PKGS='archlinuxarm-keyring' + EXPECT_TIMEOUT=1800 # Most armv* based devices can be very slow (e.g. RPiv1) + ARCH_KEYRING=archlinuxarm + DOCKER_IMAGE_NAME="armv${version}h/archlinux" + ;; + *) + PACMAN_CONF='./mkimage-arch-pacman.conf' + PACMAN_MIRRORLIST='Server = https://mirrors.kernel.org/archlinux/$repo/os/$arch' + PACMAN_EXTRA_PKGS='' + EXPECT_TIMEOUT=60 + ARCH_KEYRING=archlinux + DOCKER_IMAGE_NAME=archlinux + ;; +esac + +export PACMAN_MIRRORLIST + +expect < $ROOTFS/etc/locale.gen +arch-chroot $ROOTFS locale-gen +arch-chroot $ROOTFS /bin/sh -c 'echo $PACMAN_MIRRORLIST > /etc/pacman.d/mirrorlist' + +# udev doesn't work in containers, rebuild /dev +DEV=$ROOTFS/dev +rm -rf $DEV +mkdir -p $DEV +mknod -m 666 $DEV/null c 1 3 +mknod -m 666 $DEV/zero c 1 5 +mknod -m 666 $DEV/random c 1 8 +mknod -m 666 $DEV/urandom c 1 9 +mkdir -m 755 $DEV/pts +mkdir -m 1777 $DEV/shm +mknod -m 666 $DEV/tty c 5 0 +mknod -m 600 $DEV/console c 5 1 +mknod -m 666 $DEV/tty0 c 4 0 +mknod -m 666 $DEV/full c 1 7 +mknod -m 600 $DEV/initctl p +mknod -m 666 $DEV/ptmx c 5 2 +ln -sf /proc/self/fd $DEV/fd + +tar --numeric-owner --xattrs --acls -C $ROOTFS -c . | docker import - $DOCKER_IMAGE_NAME +docker run --rm -t $DOCKER_IMAGE_NAME echo Success. +rm -rf $ROOTFS diff --git a/vendor/github.com/moby/moby/contrib/mkimage-archarm-pacman.conf b/vendor/github.com/moby/moby/contrib/mkimage-archarm-pacman.conf new file mode 100644 index 000000000..f4b45f54d --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/mkimage-archarm-pacman.conf @@ -0,0 +1,98 @@ +# +# /etc/pacman.conf +# +# See the pacman.conf(5) manpage for option and repository directives + +# +# GENERAL OPTIONS +# +[options] +# The following paths are commented out with their default values listed. +# If you wish to use different paths, uncomment and update the paths. +#RootDir = / +#DBPath = /var/lib/pacman/ +#CacheDir = /var/cache/pacman/pkg/ +#LogFile = /var/log/pacman.log +#GPGDir = /etc/pacman.d/gnupg/ +HoldPkg = pacman glibc +#XferCommand = /usr/bin/curl -C - -f %u > %o +#XferCommand = /usr/bin/wget --passive-ftp -c -O %o %u +#CleanMethod = KeepInstalled +#UseDelta = 0.7 +Architecture = armv + +# Pacman won't upgrade packages listed in IgnorePkg and members of IgnoreGroup +#IgnorePkg = +#IgnoreGroup = + +#NoUpgrade = +#NoExtract = + +# Misc options +#UseSyslog +#Color +#TotalDownload +# We cannot check disk space from within a chroot environment +#CheckSpace +#VerbosePkgLists + +# By default, pacman accepts packages signed by keys that its local keyring +# trusts (see pacman-key and its man page), as well as unsigned packages. +SigLevel = Required DatabaseOptional +LocalFileSigLevel = Optional +#RemoteFileSigLevel = Required + +# NOTE: You must run `pacman-key --init` before first using pacman; the local +# keyring can then be populated with the keys of all official Arch Linux +# packagers with `pacman-key --populate archlinux`. + +# +# REPOSITORIES +# - can be defined here or included from another file +# - pacman will search repositories in the order defined here +# - local/custom mirrors can be added here or in separate files +# - repositories listed first will take precedence when packages +# have identical names, regardless of version number +# - URLs will have $repo replaced by the name of the current repo +# - URLs will have $arch replaced by the name of the architecture +# +# Repository entries are of the format: +# [repo-name] +# Server = ServerName +# Include = IncludePath +# +# The header [repo-name] is crucial - it must be present and +# uncommented to enable the repo. +# + +# The testing repositories are disabled by default. To enable, uncomment the +# repo name header and Include lines. You can add preferred servers immediately +# after the header, and they will be used before the default mirrors. + +#[testing] +#Include = /etc/pacman.d/mirrorlist + +[core] +Include = /etc/pacman.d/mirrorlist + +[extra] +Include = /etc/pacman.d/mirrorlist + +#[community-testing] +#Include = /etc/pacman.d/mirrorlist + +[community] +Include = /etc/pacman.d/mirrorlist + +[alarm] +Include = /etc/pacman.d/mirrorlist + +[aur] +Include = /etc/pacman.d/mirrorlist + +# An example of a custom package repository. See the pacman manpage for +# tips on creating your own repositories. +#[custom] +#SigLevel = Optional TrustAll +#Server = file:///home/custompkgs + diff --git a/vendor/github.com/moby/moby/contrib/mkimage-crux.sh b/vendor/github.com/moby/moby/contrib/mkimage-crux.sh new file mode 100755 index 000000000..3f0bdcae3 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/mkimage-crux.sh @@ -0,0 +1,75 @@ +#!/usr/bin/env bash +# Generate a minimal filesystem for CRUX/Linux and load it into the local +# docker as "cruxlinux" +# requires root and the crux iso (http://crux.nu) + +set -e + +die () { + echo >&2 "$@" + exit 1 +} + +[ "$#" -eq 1 ] || die "1 argument(s) required, $# provided. Usage: ./mkimage-crux.sh /path/to/iso" + +ISO=${1} + +ROOTFS=$(mktemp -d ${TMPDIR:-/var/tmp}/rootfs-crux-XXXXXXXXXX) +CRUX=$(mktemp -d ${TMPDIR:-/var/tmp}/crux-XXXXXXXXXX) +TMP=$(mktemp -d ${TMPDIR:-/var/tmp}/XXXXXXXXXX) + +VERSION=$(basename --suffix=.iso $ISO | sed 's/[^0-9.]*\([0-9.]*\).*/\1/') + +# Mount the ISO +mount -o ro,loop $ISO $CRUX + +# Extract pkgutils +tar -C $TMP -xf $CRUX/tools/pkgutils#*.pkg.tar.gz + +# Put pkgadd in the $PATH +export PATH="$TMP/usr/bin:$PATH" + +# Install core packages +mkdir -p $ROOTFS/var/lib/pkg +touch $ROOTFS/var/lib/pkg/db +for pkg in $CRUX/crux/core/*; do + pkgadd -r $ROOTFS $pkg +done + +# Remove agetty and inittab config +if (grep agetty ${ROOTFS}/etc/inittab 2>&1 > /dev/null); then + echo "Removing agetty from /etc/inittab ..." + chroot ${ROOTFS} sed -i -e "/agetty/d" /etc/inittab + chroot ${ROOTFS} sed -i -e "/shutdown/d" /etc/inittab + chroot ${ROOTFS} sed -i -e "/^$/N;/^\n$/d" /etc/inittab +fi + +# Remove kernel source +rm -rf $ROOTFS/usr/src/* + +# udev doesn't work in containers, rebuild /dev +DEV=$ROOTFS/dev +rm -rf $DEV +mkdir -p $DEV +mknod -m 666 $DEV/null c 1 3 +mknod -m 666 $DEV/zero c 1 5 +mknod -m 666 $DEV/random c 1 8 +mknod -m 666 $DEV/urandom c 1 9 +mkdir -m 755 $DEV/pts +mkdir -m 1777 $DEV/shm +mknod -m 666 $DEV/tty c 5 0 +mknod -m 600 $DEV/console c 5 1 +mknod -m 666 $DEV/tty0 c 4 0 +mknod -m 666 $DEV/full c 1 7 +mknod -m 600 $DEV/initctl p +mknod -m 666 $DEV/ptmx c 5 2 + +IMAGE_ID=$(tar --numeric-owner -C $ROOTFS -c . | docker import - crux:$VERSION) +docker tag $IMAGE_ID crux:latest +docker run -i -t crux echo Success. + +# Cleanup +umount $CRUX +rm -rf $ROOTFS +rm -rf $CRUX +rm -rf $TMP diff --git a/vendor/github.com/moby/moby/contrib/mkimage-pld.sh b/vendor/github.com/moby/moby/contrib/mkimage-pld.sh new file mode 100755 index 000000000..615c2030a --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/mkimage-pld.sh @@ -0,0 +1,73 @@ +#!/bin/sh +# +# Generate a minimal filesystem for PLD Linux and load it into the local docker as "pld". +# https://www.pld-linux.org/packages/docker +# +set -e + +if [ "$(id -u)" != "0" ]; then + echo >&2 "$0: requires root" + exit 1 +fi + +image_name=pld + +tmpdir=$(mktemp -d ${TMPDIR:-/var/tmp}/pld-docker-XXXXXX) +root=$tmpdir/rootfs +install -d -m 755 $root + +# to clean up: +docker rmi $image_name || : + +# build +rpm -r $root --initdb + +set +e +install -d $root/dev/pts +mknod $root/dev/random c 1 8 -m 644 +mknod $root/dev/urandom c 1 9 -m 644 +mknod $root/dev/full c 1 7 -m 666 +mknod $root/dev/null c 1 3 -m 666 +mknod $root/dev/zero c 1 5 -m 666 +mknod $root/dev/console c 5 1 -m 660 +set -e + +poldek -r $root --up --noask -u \ + --noignore \ + -O 'rpmdef=_install_langs C' \ + -O 'rpmdef=_excludedocs 1' \ + vserver-packages \ + bash iproute2 coreutils grep poldek + +# fix netsharedpath, so containers would be able to install when some paths are mounted +sed -i -e 's;^#%_netsharedpath.*;%_netsharedpath /dev/shm:/sys:/proc:/dev:/etc/hostname;' $root/etc/rpm/macros + +# no need for alternatives +poldek-config -c $root/etc/poldek/poldek.conf ignore systemd-init + +# this makes initscripts to believe network is up +touch $root/var/lock/subsys/network + +# cleanup large optional packages +remove_packages="ca-certificates" +for pkg in $remove_packages; do + rpm -r $root -q $pkg && rpm -r $root -e $pkg --nodeps +done + +# cleanup more +rm -v $root/etc/ld.so.cache +rm -rfv $root/var/cache/hrmib/* +rm -rfv $root/usr/share/man/man?/* +rm -rfv $root/usr/share/locale/*/ +rm -rfv $root/usr/share/help/*/ +rm -rfv $root/usr/share/doc/* +rm -rfv $root/usr/src/examples/* +rm -rfv $root/usr/share/pixmaps/* + +# and import +tar --numeric-owner --xattrs --acls -C $root -c . | docker import - $image_name + +# and test +docker run -i -u root $image_name /bin/echo Success. + +rm -r $tmpdir diff --git a/vendor/github.com/moby/moby/contrib/mkimage-yum.sh b/vendor/github.com/moby/moby/contrib/mkimage-yum.sh new file mode 100755 index 000000000..29da17048 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/mkimage-yum.sh @@ -0,0 +1,136 @@ +#!/usr/bin/env bash +# +# Create a base CentOS Docker image. +# +# This script is useful on systems with yum installed (e.g., building +# a CentOS image on CentOS). See contrib/mkimage-rinse.sh for a way +# to build CentOS images on other systems. + +set -e + +usage() { + cat < +OPTIONS: + -p "" The list of packages to install in the container. + The default is blank. + -g "" The groups of packages to install in the container. + The default is "Core". + -y The path to the yum config to install packages from. The + default is /etc/yum.conf for Centos/RHEL and /etc/dnf/dnf.conf for Fedora +EOOPTS + exit 1 +} + +# option defaults +yum_config=/etc/yum.conf +if [ -f /etc/dnf/dnf.conf ] && command -v dnf &> /dev/null; then + yum_config=/etc/dnf/dnf.conf + alias yum=dnf +fi +install_groups="Core" +while getopts ":y:p:g:h" opt; do + case $opt in + y) + yum_config=$OPTARG + ;; + h) + usage + ;; + p) + install_packages="$OPTARG" + ;; + g) + install_groups="$OPTARG" + ;; + \?) + echo "Invalid option: -$OPTARG" + usage + ;; + esac +done +shift $((OPTIND - 1)) +name=$1 + +if [[ -z $name ]]; then + usage +fi + +target=$(mktemp -d --tmpdir $(basename $0).XXXXXX) + +set -x + +mkdir -m 755 "$target"/dev +mknod -m 600 "$target"/dev/console c 5 1 +mknod -m 600 "$target"/dev/initctl p +mknod -m 666 "$target"/dev/full c 1 7 +mknod -m 666 "$target"/dev/null c 1 3 +mknod -m 666 "$target"/dev/ptmx c 5 2 +mknod -m 666 "$target"/dev/random c 1 8 +mknod -m 666 "$target"/dev/tty c 5 0 +mknod -m 666 "$target"/dev/tty0 c 4 0 +mknod -m 666 "$target"/dev/urandom c 1 9 +mknod -m 666 "$target"/dev/zero c 1 5 + +# amazon linux yum will fail without vars set +if [ -d /etc/yum/vars ]; then + mkdir -p -m 755 "$target"/etc/yum + cp -a /etc/yum/vars "$target"/etc/yum/ +fi + +if [[ -n "$install_groups" ]]; +then + yum -c "$yum_config" --installroot="$target" --releasever=/ --setopt=tsflags=nodocs \ + --setopt=group_package_types=mandatory -y groupinstall $install_groups +fi + +if [[ -n "$install_packages" ]]; +then + yum -c "$yum_config" --installroot="$target" --releasever=/ --setopt=tsflags=nodocs \ + --setopt=group_package_types=mandatory -y install $install_packages +fi + +yum -c "$yum_config" --installroot="$target" -y clean all + +cat > "$target"/etc/sysconfig/network <&2 "warning: cannot autodetect OS version, using '$name' as tag" + version=$name +fi + +tar --numeric-owner -c -C "$target" . | docker import - $name:$version + +docker run -i -t --rm $name:$version /bin/bash -c 'echo success' + +rm -rf "$target" diff --git a/vendor/github.com/moby/moby/contrib/mkimage.sh b/vendor/github.com/moby/moby/contrib/mkimage.sh new file mode 100755 index 000000000..13298c803 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/mkimage.sh @@ -0,0 +1,128 @@ +#!/usr/bin/env bash +set -e + +mkimg="$(basename "$0")" + +usage() { + echo >&2 "usage: $mkimg [-d dir] [-t tag] [--compression algo| --no-compression] script [script-args]" + echo >&2 " ie: $mkimg -t someuser/debian debootstrap --variant=minbase jessie" + echo >&2 " $mkimg -t someuser/ubuntu debootstrap --include=ubuntu-minimal --components=main,universe trusty" + echo >&2 " $mkimg -t someuser/busybox busybox-static" + echo >&2 " $mkimg -t someuser/centos:5 rinse --distribution centos-5" + echo >&2 " $mkimg -t someuser/mageia:4 mageia-urpmi --version=4" + echo >&2 " $mkimg -t someuser/mageia:4 mageia-urpmi --version=4 --mirror=http://somemirror/" + echo >&2 " $mkimg -t someuser/solaris solaris" + exit 1 +} + +scriptDir="$(dirname "$(readlink -f "$BASH_SOURCE")")/mkimage" + +os= +os=$(uname -o) + +# set up path to gnu tools if solaris +[[ $os == "Solaris" ]] && export PATH=/usr/gnu/bin:$PATH +# TODO check for gnu-tar, gnu-getopt + +# TODO requires root/sudo due to some pkg operations. sigh. +[[ $os == "Solaris" && $EUID != "0" ]] && echo >&2 "image create on Solaris requires superuser privilege" + +optTemp=$(getopt --options '+d:t:c:hC' --longoptions 'dir:,tag:,compression:,no-compression,help' --name "$mkimg" -- "$@") +eval set -- "$optTemp" +unset optTemp + +dir= +tag= +compression="auto" +while true; do + case "$1" in + -d|--dir) dir="$2" ; shift 2 ;; + -t|--tag) tag="$2" ; shift 2 ;; + --compression) compression="$2" ; shift 2 ;; + --no-compression) compression="none" ; shift 1 ;; + -h|--help) usage ;; + --) shift ; break ;; + esac +done + +script="$1" +[ "$script" ] || usage +shift + +if [ "$compression" == 'auto' ] || [ -z "$compression" ] +then + compression='xz' +fi + +[ "$compression" == 'none' ] && compression='' + +if [ ! -x "$scriptDir/$script" ]; then + echo >&2 "error: $script does not exist or is not executable" + echo >&2 " see $scriptDir for possible scripts" + exit 1 +fi + +# don't mistake common scripts like .febootstrap-minimize as image-creators +if [[ "$script" == .* ]]; then + echo >&2 "error: $script is a script helper, not a script" + echo >&2 " see $scriptDir for possible scripts" + exit 1 +fi + +delDir= +if [ -z "$dir" ]; then + dir="$(mktemp -d ${TMPDIR:-/var/tmp}/docker-mkimage.XXXXXXXXXX)" + delDir=1 +fi + +rootfsDir="$dir/rootfs" +( set -x; mkdir -p "$rootfsDir" ) + +# pass all remaining arguments to $script +"$scriptDir/$script" "$rootfsDir" "$@" + +# Docker mounts tmpfs at /dev and procfs at /proc so we can remove them +rm -rf "$rootfsDir/dev" "$rootfsDir/proc" +mkdir -p "$rootfsDir/dev" "$rootfsDir/proc" + +# make sure /etc/resolv.conf has something useful in it +mkdir -p "$rootfsDir/etc" +cat > "$rootfsDir/etc/resolv.conf" <<'EOF' +nameserver 8.8.8.8 +nameserver 8.8.4.4 +EOF + +tarFile="$dir/rootfs.tar${compression:+.$compression}" +touch "$tarFile" + +( + set -x + tar --numeric-owner --create --auto-compress --file "$tarFile" --directory "$rootfsDir" --transform='s,^./,,' . +) + +echo >&2 "+ cat > '$dir/Dockerfile'" +cat > "$dir/Dockerfile" <> "$dir/Dockerfile" ) + break + fi +done + +( set -x; rm -rf "$rootfsDir" ) + +if [ "$tag" ]; then + ( set -x; docker build -t "$tag" "$dir" ) +elif [ "$delDir" ]; then + # if we didn't specify a tag and we're going to delete our dir, let's just build an untagged image so that we did _something_ + ( set -x; docker build "$dir" ) +fi + +if [ "$delDir" ]; then + ( set -x; rm -rf "$dir" ) +fi diff --git a/vendor/github.com/moby/moby/contrib/mkimage/.febootstrap-minimize b/vendor/github.com/moby/moby/contrib/mkimage/.febootstrap-minimize new file mode 100755 index 000000000..7749e63fb --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/mkimage/.febootstrap-minimize @@ -0,0 +1,28 @@ +#!/usr/bin/env bash +set -e + +rootfsDir="$1" +shift + +( + cd "$rootfsDir" + + # effectively: febootstrap-minimize --keep-zoneinfo --keep-rpmdb --keep-services "$target" + # locales + rm -rf usr/{{lib,share}/locale,{lib,lib64}/gconv,bin/localedef,sbin/build-locale-archive} + # docs and man pages + rm -rf usr/share/{man,doc,info,gnome/help} + # cracklib + rm -rf usr/share/cracklib + # i18n + rm -rf usr/share/i18n + # yum cache + rm -rf var/cache/yum + mkdir -p --mode=0755 var/cache/yum + # sln + rm -rf sbin/sln + # ldconfig + #rm -rf sbin/ldconfig + rm -rf etc/ld.so.cache var/cache/ldconfig + mkdir -p --mode=0755 var/cache/ldconfig +) diff --git a/vendor/github.com/moby/moby/contrib/mkimage/busybox-static b/vendor/github.com/moby/moby/contrib/mkimage/busybox-static new file mode 100755 index 000000000..e15322b49 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/mkimage/busybox-static @@ -0,0 +1,34 @@ +#!/usr/bin/env bash +set -e + +rootfsDir="$1" +shift + +busybox="$(which busybox 2>/dev/null || true)" +if [ -z "$busybox" ]; then + echo >&2 'error: busybox: not found' + echo >&2 ' install it with your distribution "busybox-static" package' + exit 1 +fi +if ! ldd "$busybox" 2>&1 | grep -q 'not a dynamic executable'; then + echo >&2 "error: '$busybox' appears to be a dynamic executable" + echo >&2 ' you should install your distribution "busybox-static" package instead' + exit 1 +fi + +mkdir -p "$rootfsDir/bin" +rm -f "$rootfsDir/bin/busybox" # just in case +cp "$busybox" "$rootfsDir/bin/busybox" + +( + cd "$rootfsDir" + + IFS=$'\n' + modules=( $(bin/busybox --list-modules) ) + unset IFS + + for module in "${modules[@]}"; do + mkdir -p "$(dirname "$module")" + ln -sf /bin/busybox "$module" + done +) diff --git a/vendor/github.com/moby/moby/contrib/mkimage/debootstrap b/vendor/github.com/moby/moby/contrib/mkimage/debootstrap new file mode 100755 index 000000000..9f7d8987a --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/mkimage/debootstrap @@ -0,0 +1,251 @@ +#!/usr/bin/env bash +set -e + +mkimgdeb="$(basename "$0")" +mkimg="$(dirname "$0").sh" + +usage() { + echo >&2 "usage: $mkimgdeb rootfsDir suite [debootstrap-args]" + echo >&2 " note: $mkimgdeb meant to be used from $mkimg" + exit 1 +} + +rootfsDir="$1" +if [ -z "$rootfsDir" ]; then + echo >&2 "error: rootfsDir is missing" + echo >&2 + usage +fi +shift + +# we have to do a little fancy footwork to make sure "rootfsDir" becomes the second non-option argument to debootstrap + +before=() +while [ $# -gt 0 ] && [[ "$1" == -* ]]; do + before+=( "$1" ) + shift +done + +suite="$1" +if [ -z "$suite" ]; then + echo >&2 "error: suite is missing" + echo >&2 + usage +fi +shift + +# get path to "chroot" in our current PATH +chrootPath="$(type -P chroot || :)" +if [ -z "$chrootPath" ]; then + echo >&2 "error: chroot not found. Are you root?" + echo >&2 + usage +fi + +rootfs_chroot() { + # "chroot" doesn't set PATH, so we need to set it explicitly to something our new debootstrap chroot can use appropriately! + + # set PATH and chroot away! + PATH='/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin' \ + "$chrootPath" "$rootfsDir" "$@" +} + +# allow for DEBOOTSTRAP=qemu-debootstrap ./mkimage.sh ... +: ${DEBOOTSTRAP:=debootstrap} + +( + set -x + $DEBOOTSTRAP "${before[@]}" "$suite" "$rootfsDir" "$@" +) + +# now for some Docker-specific tweaks + +# prevent init scripts from running during install/update +echo >&2 "+ echo exit 101 > '$rootfsDir/usr/sbin/policy-rc.d'" +cat > "$rootfsDir/usr/sbin/policy-rc.d" <<-'EOF' + #!/bin/sh + + # For most Docker users, "apt-get install" only happens during "docker build", + # where starting services doesn't work and often fails in humorous ways. This + # prevents those failures by stopping the services from attempting to start. + + exit 101 +EOF +chmod +x "$rootfsDir/usr/sbin/policy-rc.d" + +# prevent upstart scripts from running during install/update +( + set -x + rootfs_chroot dpkg-divert --local --rename --add /sbin/initctl + cp -a "$rootfsDir/usr/sbin/policy-rc.d" "$rootfsDir/sbin/initctl" + sed -i 's/^exit.*/exit 0/' "$rootfsDir/sbin/initctl" +) + +# shrink a little, since apt makes us cache-fat (wheezy: ~157.5MB vs ~120MB) +( set -x; rootfs_chroot apt-get clean ) + +# this file is one APT creates to make sure we don't "autoremove" our currently +# in-use kernel, which doesn't really apply to debootstraps/Docker images that +# don't even have kernels installed +rm -f "$rootfsDir/etc/apt/apt.conf.d/01autoremove-kernels" + +# Ubuntu 10.04 sucks... :) +if strings "$rootfsDir/usr/bin/dpkg" | grep -q unsafe-io; then + # force dpkg not to call sync() after package extraction (speeding up installs) + echo >&2 "+ echo force-unsafe-io > '$rootfsDir/etc/dpkg/dpkg.cfg.d/docker-apt-speedup'" + cat > "$rootfsDir/etc/dpkg/dpkg.cfg.d/docker-apt-speedup" <<-'EOF' + # For most Docker users, package installs happen during "docker build", which + # doesn't survive power loss and gets restarted clean afterwards anyhow, so + # this minor tweak gives us a nice speedup (much nicer on spinning disks, + # obviously). + + force-unsafe-io + EOF +fi + +if [ -d "$rootfsDir/etc/apt/apt.conf.d" ]; then + # _keep_ us lean by effectively running "apt-get clean" after every install + aptGetClean='"rm -f /var/cache/apt/archives/*.deb /var/cache/apt/archives/partial/*.deb /var/cache/apt/*.bin || true";' + echo >&2 "+ cat > '$rootfsDir/etc/apt/apt.conf.d/docker-clean'" + cat > "$rootfsDir/etc/apt/apt.conf.d/docker-clean" <<-EOF + # Since for most Docker users, package installs happen in "docker build" steps, + # they essentially become individual layers due to the way Docker handles + # layering, especially using CoW filesystems. What this means for us is that + # the caches that APT keeps end up just wasting space in those layers, making + # our layers unnecessarily large (especially since we'll normally never use + # these caches again and will instead just "docker build" again and make a brand + # new image). + + # Ideally, these would just be invoking "apt-get clean", but in our testing, + # that ended up being cyclic and we got stuck on APT's lock, so we get this fun + # creation that's essentially just "apt-get clean". + DPkg::Post-Invoke { ${aptGetClean} }; + APT::Update::Post-Invoke { ${aptGetClean} }; + + Dir::Cache::pkgcache ""; + Dir::Cache::srcpkgcache ""; + + # Note that we do realize this isn't the ideal way to do this, and are always + # open to better suggestions (https://github.com/docker/docker/issues). + EOF + + # remove apt-cache translations for fast "apt-get update" + echo >&2 "+ echo Acquire::Languages 'none' > '$rootfsDir/etc/apt/apt.conf.d/docker-no-languages'" + cat > "$rootfsDir/etc/apt/apt.conf.d/docker-no-languages" <<-'EOF' + # In Docker, we don't often need the "Translations" files, so we're just wasting + # time and space by downloading them, and this inhibits that. For users that do + # need them, it's a simple matter to delete this file and "apt-get update". :) + + Acquire::Languages "none"; + EOF + + echo >&2 "+ echo Acquire::GzipIndexes 'true' > '$rootfsDir/etc/apt/apt.conf.d/docker-gzip-indexes'" + cat > "$rootfsDir/etc/apt/apt.conf.d/docker-gzip-indexes" <<-'EOF' + # Since Docker users using "RUN apt-get update && apt-get install -y ..." in + # their Dockerfiles don't go delete the lists files afterwards, we want them to + # be as small as possible on-disk, so we explicitly request "gz" versions and + # tell Apt to keep them gzipped on-disk. + + # For comparison, an "apt-get update" layer without this on a pristine + # "debian:wheezy" base image was "29.88 MB", where with this it was only + # "8.273 MB". + + Acquire::GzipIndexes "true"; + Acquire::CompressionTypes::Order:: "gz"; + EOF + + # update "autoremove" configuration to be aggressive about removing suggests deps that weren't manually installed + echo >&2 "+ echo Apt::AutoRemove::SuggestsImportant 'false' > '$rootfsDir/etc/apt/apt.conf.d/docker-autoremove-suggests'" + cat > "$rootfsDir/etc/apt/apt.conf.d/docker-autoremove-suggests" <<-'EOF' + # Since Docker users are looking for the smallest possible final images, the + # following emerges as a very common pattern: + + # RUN apt-get update \ + # && apt-get install -y \ + # && \ + # && apt-get purge -y --auto-remove + + # By default, APT will actually _keep_ packages installed via Recommends or + # Depends if another package Suggests them, even and including if the package + # that originally caused them to be installed is removed. Setting this to + # "false" ensures that APT is appropriately aggressive about removing the + # packages it added. + + # https://aptitude.alioth.debian.org/doc/en/ch02s05s05.html#configApt-AutoRemove-SuggestsImportant + Apt::AutoRemove::SuggestsImportant "false"; + EOF +fi + +if [ -z "$DONT_TOUCH_SOURCES_LIST" ]; then + # tweak sources.list, where appropriate + lsbDist= + if [ -z "$lsbDist" -a -r "$rootfsDir/etc/os-release" ]; then + lsbDist="$(. "$rootfsDir/etc/os-release" && echo "$ID")" + fi + if [ -z "$lsbDist" -a -r "$rootfsDir/etc/lsb-release" ]; then + lsbDist="$(. "$rootfsDir/etc/lsb-release" && echo "$DISTRIB_ID")" + fi + if [ -z "$lsbDist" -a -r "$rootfsDir/etc/debian_version" ]; then + lsbDist='Debian' + fi + # normalize to lowercase for easier matching + lsbDist="$(echo "$lsbDist" | tr '[:upper:]' '[:lower:]')" + case "$lsbDist" in + debian) + # updates and security! + if curl -o /dev/null -s --head --fail "http://security.debian.org/dists/$suite/updates/main/binary-$(rootfs_chroot dpkg --print-architecture)/Packages.gz"; then + ( + set -x + sed -i " + p; + s/ $suite / ${suite}-updates / + " "$rootfsDir/etc/apt/sources.list" + echo "deb http://security.debian.org $suite/updates main" >> "$rootfsDir/etc/apt/sources.list" + ) + fi + ;; + ubuntu) + # add the updates and security repositories + ( + set -x + sed -i " + p; + s/ $suite / ${suite}-updates /; p; + s/ $suite-updates / ${suite}-security / + " "$rootfsDir/etc/apt/sources.list" + ) + ;; + tanglu) + # add the updates repository + if [ "$suite" != 'devel' ]; then + ( + set -x + sed -i " + p; + s/ $suite / ${suite}-updates / + " "$rootfsDir/etc/apt/sources.list" + ) + fi + ;; + steamos) + # add contrib and non-free if "main" is the only component + ( + set -x + sed -i "s/ $suite main$/ $suite main contrib non-free/" "$rootfsDir/etc/apt/sources.list" + ) + ;; + esac +fi + +( + set -x + + # make sure we're fully up-to-date + rootfs_chroot sh -xc 'apt-get update && apt-get dist-upgrade -y' + + # delete all the apt list files since they're big and get stale quickly + rm -rf "$rootfsDir/var/lib/apt/lists"/* + # this forces "apt-get update" in dependent images, which is also good + + mkdir "$rootfsDir/var/lib/apt/lists/partial" # Lucid... "E: Lists directory /var/lib/apt/lists/partial is missing." +) diff --git a/vendor/github.com/moby/moby/contrib/mkimage/mageia-urpmi b/vendor/github.com/moby/moby/contrib/mkimage/mageia-urpmi new file mode 100755 index 000000000..93fb289ca --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/mkimage/mageia-urpmi @@ -0,0 +1,61 @@ +#!/usr/bin/env bash +# +# Needs to be run from Mageia 4 or greater for kernel support for docker. +# +# Mageia 4 does not have docker available in official repos, so please +# install and run the docker binary manually. +# +# Tested working versions are for Mageia 2 onwards (inc. cauldron). +# +set -e + +rootfsDir="$1" +shift + +optTemp=$(getopt --options '+v:,m:' --longoptions 'version:,mirror:' --name mageia-urpmi -- "$@") +eval set -- "$optTemp" +unset optTemp + +installversion= +mirror= +while true; do + case "$1" in + -v|--version) installversion="$2" ; shift 2 ;; + -m|--mirror) mirror="$2" ; shift 2 ;; + --) shift ; break ;; + esac +done + +if [ -z $installversion ]; then + # Attempt to match host version + if [ -r /etc/mageia-release ]; then + installversion="$(sed 's/^[^0-9\]*\([0-9.]\+\).*$/\1/' /etc/mageia-release)" + else + echo "Error: no version supplied and unable to detect host mageia version" + exit 1 + fi +fi + +if [ -z $mirror ]; then + # No mirror provided, default to mirrorlist + mirror="--mirrorlist https://mirrors.mageia.org/api/mageia.$installversion.x86_64.list" +fi + +( + set -x + urpmi.addmedia --distrib \ + $mirror \ + --urpmi-root "$rootfsDir" + urpmi basesystem-minimal urpmi \ + --auto \ + --no-suggests \ + --urpmi-root "$rootfsDir" \ + --root "$rootfsDir" +) + +"$(dirname "$BASH_SOURCE")/.febootstrap-minimize" "$rootfsDir" + +if [ -d "$rootfsDir/etc/sysconfig" ]; then + # allow networking init scripts inside the container to work without extra steps + echo 'NETWORKING=yes' > "$rootfsDir/etc/sysconfig/network" +fi diff --git a/vendor/github.com/moby/moby/contrib/mkimage/rinse b/vendor/github.com/moby/moby/contrib/mkimage/rinse new file mode 100755 index 000000000..75eb4f0d9 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/mkimage/rinse @@ -0,0 +1,25 @@ +#!/usr/bin/env bash +set -e + +rootfsDir="$1" +shift + +# specifying --arch below is safe because "$@" can override it and the "latest" one wins :) + +( + set -x + rinse --directory "$rootfsDir" --arch amd64 "$@" +) + +"$(dirname "$BASH_SOURCE")/.febootstrap-minimize" "$rootfsDir" + +if [ -d "$rootfsDir/etc/sysconfig" ]; then + # allow networking init scripts inside the container to work without extra steps + echo 'NETWORKING=yes' > "$rootfsDir/etc/sysconfig/network" +fi + +# make sure we're fully up-to-date, too +( + set -x + chroot "$rootfsDir" yum update -y +) diff --git a/vendor/github.com/moby/moby/contrib/mkimage/solaris b/vendor/github.com/moby/moby/contrib/mkimage/solaris new file mode 100755 index 000000000..158970e69 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/mkimage/solaris @@ -0,0 +1,89 @@ +#!/usr/bin/env bash +# +# Solaris 12 base image build script. +# +set -e + +# TODO add optional package publisher origin + +rootfsDir="$1" +shift + +# base install +( + set -x + + pkg image-create --full --zone \ + --facet facet.locale.*=false \ + --facet facet.locale.POSIX=true \ + --facet facet.doc=false \ + --facet facet.doc.*=false \ + "$rootfsDir" + + pkg -R "$rootfsDir" set-property use-system-repo true + + pkg -R "$rootfsDir" set-property flush-content-cache-on-success true + + pkg -R "$rootfsDir" install core-os +) + +# Lay in stock configuration, set up milestone +# XXX This all may become optional in a base image +( + # faster to build repository database on tmpfs + REPO_DB=/system/volatile/repository.$$ + export SVCCFG_REPOSITORY=${REPO_DB} + export SVCCFG_DOOR_PATH=$rootfsDir/system/volatile/tmp_repo_door + + # Import base manifests. NOTE These are a combination of basic requirement + # and gleaned from container milestone manifest. They may change. + for m in $rootfsDir/lib/svc/manifest/system/environment.xml \ + $rootfsDir/lib/svc/manifest/system/svc/global.xml \ + $rootfsDir/lib/svc/manifest/system/svc/restarter.xml \ + $rootfsDir/lib/svc/manifest/network/dns/client.xml \ + $rootfsDir/lib/svc/manifest/system/name-service/switch.xml \ + $rootfsDir/lib/svc/manifest/system/name-service/cache.xml \ + $rootfsDir/lib/svc/manifest/milestone/container.xml ; do + svccfg import $m + done + + # Apply system layer profile, deleting unnecessary dependencies + svccfg apply $rootfsDir/etc/svc/profile/generic_container.xml + + # XXX Even if we keep a repo in the base image, this is definitely optional + svccfg apply $rootfsDir/etc/svc/profile/sysconfig/container_sc.xml + + for s in svc:/system/svc/restarter \ + svc:/system/environment \ + svc:/network/dns/client \ + svc:/system/name-service/switch \ + svc:/system/name-service/cache \ + svc:/system/svc/global \ + svc:/milestone/container ;do + svccfg -s $s refresh + done + + # now copy the built up repository into the base rootfs + mv $REPO_DB $rootfsDir/etc/svc/repository.db +) + +# pkg(1) needs the zoneproxy-client running in the container. +# use a simple wrapper to run it as needed. +# XXX maybe we go back to running this in SMF? +mv "$rootfsDir/usr/bin/pkg" "$rootfsDir/usr/bin/wrapped_pkg" +cat > "$rootfsDir/usr/bin/pkg" <<-'EOF' +#!/bin/sh +# +# THIS FILE CREATED DURING DOCKER BASE IMAGE CREATION +# +# The Solaris base image uses the sysrepo proxy mechanism. The +# IPS client pkg(1) requires the zoneproxy-client to reach the +# remote publisher origins through the host. This wrapper script +# enables and disables the proxy client as needed. This is a +# temporary solution. + +/usr/lib/zones/zoneproxy-client -s localhost:1008 +PKG_SYSREPO_URL=http://localhost:1008 /usr/bin/wrapped_pkg "$@" +pkill -9 zoneproxy-client +EOF +chmod +x "$rootfsDir/usr/bin/pkg" diff --git a/vendor/github.com/moby/moby/contrib/nnp-test/Dockerfile b/vendor/github.com/moby/moby/contrib/nnp-test/Dockerfile new file mode 100644 index 000000000..026d86954 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/nnp-test/Dockerfile @@ -0,0 +1,9 @@ +FROM buildpack-deps:jessie + +COPY . /usr/src/ + +WORKDIR /usr/src/ + +RUN gcc -g -Wall -static nnp-test.c -o /usr/bin/nnp-test + +RUN chmod +s /usr/bin/nnp-test diff --git a/vendor/github.com/moby/moby/contrib/nnp-test/nnp-test.c b/vendor/github.com/moby/moby/contrib/nnp-test/nnp-test.c new file mode 100644 index 000000000..b767da7e1 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/nnp-test/nnp-test.c @@ -0,0 +1,10 @@ +#include +#include +#include + +int main(int argc, char *argv[]) +{ + printf("EUID=%d\n", geteuid()); + return 0; +} + diff --git a/vendor/github.com/moby/moby/contrib/nuke-graph-directory.sh b/vendor/github.com/moby/moby/contrib/nuke-graph-directory.sh new file mode 100755 index 000000000..3d2f49e86 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/nuke-graph-directory.sh @@ -0,0 +1,64 @@ +#!/usr/bin/env bash +set -e + +dir="$1" + +if [ -z "$dir" ]; then + { + echo 'This script is for destroying old /var/lib/docker directories more safely than' + echo ' "rm -rf", which can cause data loss or other serious issues.' + echo + echo "usage: $0 directory" + echo " ie: $0 /var/lib/docker" + } >&2 + exit 1 +fi + +if [ "$(id -u)" != 0 ]; then + echo >&2 "error: $0 must be run as root" + exit 1 +fi + +if [ ! -d "$dir" ]; then + echo >&2 "error: $dir is not a directory" + exit 1 +fi + +dir="$(readlink -f "$dir")" + +echo +echo "Nuking $dir ..." +echo ' (if this is wrong, press Ctrl+C NOW!)' +echo + +( set -x; sleep 10 ) +echo + +dir_in_dir() { + inner="$1" + outer="$2" + [ "${inner#$outer}" != "$inner" ] +} + +# let's start by unmounting any submounts in $dir +# (like -v /home:... for example - DON'T DELETE MY HOME DIRECTORY BRU!) +for mount in $(awk '{ print $5 }' /proc/self/mountinfo); do + mount="$(readlink -f "$mount" || true)" + if [ "$dir" != "$mount" ] && dir_in_dir "$mount" "$dir"; then + ( set -x; umount -f "$mount" ) + fi +done + +# now, let's go destroy individual btrfs subvolumes, if any exist +if command -v btrfs > /dev/null 2>&1; then + # Find btrfs subvolumes under $dir checking for inode 256 + # Source: http://stackoverflow.com/a/32865333 + for subvol in $(find "$dir" -type d -inum 256 | sort -r); do + if [ "$dir" != "$subvol" ]; then + ( set -x; btrfs subvolume delete "$subvol" ) + fi + done +fi + +# finally, DESTROY ALL THINGS +( shopt -s dotglob; set -x; rm -rf "$dir"/* ) diff --git a/vendor/github.com/moby/moby/contrib/project-stats.sh b/vendor/github.com/moby/moby/contrib/project-stats.sh new file mode 100755 index 000000000..2691c72ff --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/project-stats.sh @@ -0,0 +1,22 @@ +#!/usr/bin/env bash + +## Run this script from the root of the docker repository +## to query project stats useful to the maintainers. +## You will need to install `pulls` and `issues` from +## https://github.com/crosbymichael/pulls + +set -e + +echo -n "Open pulls: " +PULLS=$(pulls | wc -l); let PULLS=$PULLS-1 +echo $PULLS + +echo -n "Pulls alru: " +pulls alru + +echo -n "Open issues: " +ISSUES=$(issues list | wc -l); let ISSUES=$ISSUES-1 +echo $ISSUES + +echo -n "Issues alru: " +issues alru diff --git a/vendor/github.com/moby/moby/contrib/report-issue.sh b/vendor/github.com/moby/moby/contrib/report-issue.sh new file mode 100755 index 000000000..cb54f1a5b --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/report-issue.sh @@ -0,0 +1,105 @@ +#!/bin/sh + +# This is a convenience script for reporting issues that include a base +# template of information. See https://github.com/docker/docker/pull/8845 + +set -e + +DOCKER_ISSUE_URL=${DOCKER_ISSUE_URL:-"https://github.com/docker/docker/issues/new"} +DOCKER_ISSUE_NAME_PREFIX=${DOCKER_ISSUE_NAME_PREFIX:-"Report: "} +DOCKER=${DOCKER:-"docker"} +DOCKER_COMMAND="${DOCKER}" +export DOCKER_COMMAND + +# pulled from https://gist.github.com/cdown/1163649 +function urlencode() { + # urlencode + + local length="${#1}" + for (( i = 0; i < length; i++ )); do + local c="${1:i:1}" + case $c in + [a-zA-Z0-9.~_-]) printf "$c" ;; + *) printf '%%%02X' "'$c" + esac + done +} + +function template() { +# this should always match the template from CONTRIBUTING.md + cat <<- EOM + Description of problem: + + + \`docker version\`: + `${DOCKER_COMMAND} -D version` + + + \`docker info\`: + `${DOCKER_COMMAND} -D info` + + + \`uname -a\`: + `uname -a` + + + Environment details (AWS, VirtualBox, physical, etc.): + + + How reproducible: + + + Steps to Reproduce: + 1. + 2. + 3. + + + Actual Results: + + + Expected Results: + + + Additional info: + + + EOM +} + +function format_issue_url() { + if [ ${#@} -ne 2 ] ; then + return 1 + fi + local issue_name=$(urlencode "${DOCKER_ISSUE_NAME_PREFIX}${1}") + local issue_body=$(urlencode "${2}") + echo "${DOCKER_ISSUE_URL}?title=${issue_name}&body=${issue_body}" +} + + +echo -ne "Do you use \`sudo\` to call docker? [y|N]: " +read -r -n 1 use_sudo +echo "" + +if [ "x${use_sudo}" = "xy" -o "x${use_sudo}" = "xY" ]; then + export DOCKER_COMMAND="sudo ${DOCKER}" +fi + +echo -ne "Title of new issue?: " +read -r issue_title +echo "" + +issue_url=$(format_issue_url "${issue_title}" "$(template)") + +if which xdg-open 2>/dev/null >/dev/null ; then + echo -ne "Would like to launch this report in your browser? [Y|n]: " + read -r -n 1 launch_now + echo "" + + if [ "${launch_now}" != "n" -a "${launch_now}" != "N" ]; then + xdg-open "${issue_url}" + fi +fi + +echo "If you would like to manually open the url, you can open this link if your browser: ${issue_url}" + diff --git a/vendor/github.com/moby/moby/contrib/reprepro/suites.sh b/vendor/github.com/moby/moby/contrib/reprepro/suites.sh new file mode 100755 index 000000000..badb34af9 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/reprepro/suites.sh @@ -0,0 +1,12 @@ +#!/usr/bin/env bash +set -e + +cd "$(dirname "$BASH_SOURCE")/../.." + +targets_from() { + git fetch -q https://github.com/docker/docker.git "$1" + git ls-tree -r --name-only "$(git rev-parse FETCH_HEAD)" contrib/builder/deb/ | grep '/Dockerfile$' | sed -r 's!^contrib/builder/deb/|^contrib/builder/deb/amd64/|-debootstrap|/Dockerfile$!!g' | grep -v / +} + +release_branch=$(git ls-remote --heads https://github.com/docker/docker.git | awk -F 'refs/heads/' '$2 ~ /^release/ { print $2 }' | sort -V | tail -1) +{ targets_from master; targets_from "$release_branch"; } | sort -u diff --git a/vendor/github.com/moby/moby/contrib/selinux-fedora-24/docker-engine-selinux/LICENSE b/vendor/github.com/moby/moby/contrib/selinux-fedora-24/docker-engine-selinux/LICENSE new file mode 100644 index 000000000..d511905c1 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/selinux-fedora-24/docker-engine-selinux/LICENSE @@ -0,0 +1,339 @@ + GNU GENERAL PUBLIC LICENSE + Version 2, June 1991 + + Copyright (C) 1989, 1991 Free Software Foundation, Inc., + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The licenses for most software are designed to take away your +freedom to share and change it. By contrast, the GNU General Public +License is intended to guarantee your freedom to share and change free +software--to make sure the software is free for all its users. This +General Public License applies to most of the Free Software +Foundation's software and to any other program whose authors commit to +using it. (Some other Free Software Foundation software is covered by +the GNU Lesser General Public License instead.) You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +this service if you wish), that you receive source code or can get it +if you want it, that you can change the software or use pieces of it +in new free programs; and that you know you can do these things. + + To protect your rights, we need to make restrictions that forbid +anyone to deny you these rights or to ask you to surrender the rights. +These restrictions translate to certain responsibilities for you if you +distribute copies of the software, or if you modify it. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must give the recipients all the rights that +you have. You must make sure that they, too, receive or can get the +source code. And you must show them these terms so they know their +rights. + + We protect your rights with two steps: (1) copyright the software, and +(2) offer you this license which gives you legal permission to copy, +distribute and/or modify the software. + + Also, for each author's protection and ours, we want to make certain +that everyone understands that there is no warranty for this free +software. If the software is modified by someone else and passed on, we +want its recipients to know that what they have is not the original, so +that any problems introduced by others will not reflect on the original +authors' reputations. + + Finally, any free program is threatened constantly by software +patents. We wish to avoid the danger that redistributors of a free +program will individually obtain patent licenses, in effect making the +program proprietary. To prevent this, we have made it clear that any +patent must be licensed for everyone's free use or not licensed at all. + + The precise terms and conditions for copying, distribution and +modification follow. + + GNU GENERAL PUBLIC LICENSE + TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + + 0. This License applies to any program or other work which contains +a notice placed by the copyright holder saying it may be distributed +under the terms of this General Public License. The "Program", below, +refers to any such program or work, and a "work based on the Program" +means either the Program or any derivative work under copyright law: +that is to say, a work containing the Program or a portion of it, +either verbatim or with modifications and/or translated into another +language. (Hereinafter, translation is included without limitation in +the term "modification".) Each licensee is addressed as "you". + +Activities other than copying, distribution and modification are not +covered by this License; they are outside its scope. The act of +running the Program is not restricted, and the output from the Program +is covered only if its contents constitute a work based on the +Program (independent of having been made by running the Program). +Whether that is true depends on what the Program does. + + 1. You may copy and distribute verbatim copies of the Program's +source code as you receive it, in any medium, provided that you +conspicuously and appropriately publish on each copy an appropriate +copyright notice and disclaimer of warranty; keep intact all the +notices that refer to this License and to the absence of any warranty; +and give any other recipients of the Program a copy of this License +along with the Program. + +You may charge a fee for the physical act of transferring a copy, and +you may at your option offer warranty protection in exchange for a fee. + + 2. You may modify your copy or copies of the Program or any portion +of it, thus forming a work based on the Program, and copy and +distribute such modifications or work under the terms of Section 1 +above, provided that you also meet all of these conditions: + + a) You must cause the modified files to carry prominent notices + stating that you changed the files and the date of any change. + + b) You must cause any work that you distribute or publish, that in + whole or in part contains or is derived from the Program or any + part thereof, to be licensed as a whole at no charge to all third + parties under the terms of this License. + + c) If the modified program normally reads commands interactively + when run, you must cause it, when started running for such + interactive use in the most ordinary way, to print or display an + announcement including an appropriate copyright notice and a + notice that there is no warranty (or else, saying that you provide + a warranty) and that users may redistribute the program under + these conditions, and telling the user how to view a copy of this + License. (Exception: if the Program itself is interactive but + does not normally print such an announcement, your work based on + the Program is not required to print an announcement.) + +These requirements apply to the modified work as a whole. If +identifiable sections of that work are not derived from the Program, +and can be reasonably considered independent and separate works in +themselves, then this License, and its terms, do not apply to those +sections when you distribute them as separate works. But when you +distribute the same sections as part of a whole which is a work based +on the Program, the distribution of the whole must be on the terms of +this License, whose permissions for other licensees extend to the +entire whole, and thus to each and every part regardless of who wrote it. + +Thus, it is not the intent of this section to claim rights or contest +your rights to work written entirely by you; rather, the intent is to +exercise the right to control the distribution of derivative or +collective works based on the Program. + +In addition, mere aggregation of another work not based on the Program +with the Program (or with a work based on the Program) on a volume of +a storage or distribution medium does not bring the other work under +the scope of this License. + + 3. You may copy and distribute the Program (or a work based on it, +under Section 2) in object code or executable form under the terms of +Sections 1 and 2 above provided that you also do one of the following: + + a) Accompany it with the complete corresponding machine-readable + source code, which must be distributed under the terms of Sections + 1 and 2 above on a medium customarily used for software interchange; or, + + b) Accompany it with a written offer, valid for at least three + years, to give any third party, for a charge no more than your + cost of physically performing source distribution, a complete + machine-readable copy of the corresponding source code, to be + distributed under the terms of Sections 1 and 2 above on a medium + customarily used for software interchange; or, + + c) Accompany it with the information you received as to the offer + to distribute corresponding source code. (This alternative is + allowed only for noncommercial distribution and only if you + received the program in object code or executable form with such + an offer, in accord with Subsection b above.) + +The source code for a work means the preferred form of the work for +making modifications to it. For an executable work, complete source +code means all the source code for all modules it contains, plus any +associated interface definition files, plus the scripts used to +control compilation and installation of the executable. However, as a +special exception, the source code distributed need not include +anything that is normally distributed (in either source or binary +form) with the major components (compiler, kernel, and so on) of the +operating system on which the executable runs, unless that component +itself accompanies the executable. + +If distribution of executable or object code is made by offering +access to copy from a designated place, then offering equivalent +access to copy the source code from the same place counts as +distribution of the source code, even though third parties are not +compelled to copy the source along with the object code. + + 4. You may not copy, modify, sublicense, or distribute the Program +except as expressly provided under this License. Any attempt +otherwise to copy, modify, sublicense or distribute the Program is +void, and will automatically terminate your rights under this License. +However, parties who have received copies, or rights, from you under +this License will not have their licenses terminated so long as such +parties remain in full compliance. + + 5. You are not required to accept this License, since you have not +signed it. However, nothing else grants you permission to modify or +distribute the Program or its derivative works. These actions are +prohibited by law if you do not accept this License. Therefore, by +modifying or distributing the Program (or any work based on the +Program), you indicate your acceptance of this License to do so, and +all its terms and conditions for copying, distributing or modifying +the Program or works based on it. + + 6. Each time you redistribute the Program (or any work based on the +Program), the recipient automatically receives a license from the +original licensor to copy, distribute or modify the Program subject to +these terms and conditions. You may not impose any further +restrictions on the recipients' exercise of the rights granted herein. +You are not responsible for enforcing compliance by third parties to +this License. + + 7. If, as a consequence of a court judgment or allegation of patent +infringement or for any other reason (not limited to patent issues), +conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot +distribute so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you +may not distribute the Program at all. For example, if a patent +license would not permit royalty-free redistribution of the Program by +all those who receive copies directly or indirectly through you, then +the only way you could satisfy both it and this License would be to +refrain entirely from distribution of the Program. + +If any portion of this section is held invalid or unenforceable under +any particular circumstance, the balance of the section is intended to +apply and the section as a whole is intended to apply in other +circumstances. + +It is not the purpose of this section to induce you to infringe any +patents or other property right claims or to contest validity of any +such claims; this section has the sole purpose of protecting the +integrity of the free software distribution system, which is +implemented by public license practices. Many people have made +generous contributions to the wide range of software distributed +through that system in reliance on consistent application of that +system; it is up to the author/donor to decide if he or she is willing +to distribute software through any other system and a licensee cannot +impose that choice. + +This section is intended to make thoroughly clear what is believed to +be a consequence of the rest of this License. + + 8. If the distribution and/or use of the Program is restricted in +certain countries either by patents or by copyrighted interfaces, the +original copyright holder who places the Program under this License +may add an explicit geographical distribution limitation excluding +those countries, so that distribution is permitted only in or among +countries not thus excluded. In such case, this License incorporates +the limitation as if written in the body of this License. + + 9. The Free Software Foundation may publish revised and/or new versions +of the General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + +Each version is given a distinguishing version number. If the Program +specifies a version number of this License which applies to it and "any +later version", you have the option of following the terms and conditions +either of that version or of any later version published by the Free +Software Foundation. If the Program does not specify a version number of +this License, you may choose any version ever published by the Free Software +Foundation. + + 10. If you wish to incorporate parts of the Program into other free +programs whose distribution conditions are different, write to the author +to ask for permission. For software which is copyrighted by the Free +Software Foundation, write to the Free Software Foundation; we sometimes +make exceptions for this. Our decision will be guided by the two goals +of preserving the free status of all derivatives of our free software and +of promoting the sharing and reuse of software generally. + + NO WARRANTY + + 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY +FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN +OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES +PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED +OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS +TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE +PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, +REPAIR OR CORRECTION. + + 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR +REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, +INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING +OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED +TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY +YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER +PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE +POSSIBILITY OF SUCH DAMAGES. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +convey the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License along + with this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +Also add information on how to contact you by electronic and paper mail. + +If the program is interactive, make it output a short notice like this +when it starts in an interactive mode: + + Gnomovision version 69, Copyright (C) year name of author + Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, the commands you use may +be called something other than `show w' and `show c'; they could even be +mouse-clicks or menu items--whatever suits your program. + +You should also get your employer (if you work as a programmer) or your +school, if any, to sign a "copyright disclaimer" for the program, if +necessary. Here is a sample; alter the names: + + Yoyodyne, Inc., hereby disclaims all copyright interest in the program + `Gnomovision' (which makes passes at compilers) written by James Hacker. + + , 1 April 1989 + Ty Coon, President of Vice + +This General Public License does not permit incorporating your program into +proprietary programs. If your program is a subroutine library, you may +consider it more useful to permit linking proprietary applications with the +library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. diff --git a/vendor/github.com/moby/moby/contrib/selinux-fedora-24/docker-engine-selinux/Makefile b/vendor/github.com/moby/moby/contrib/selinux-fedora-24/docker-engine-selinux/Makefile new file mode 100644 index 000000000..16df33ef3 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/selinux-fedora-24/docker-engine-selinux/Makefile @@ -0,0 +1,23 @@ +TARGETS?=docker +MODULES?=${TARGETS:=.pp.bz2} +SHAREDIR?=/usr/share + +all: ${TARGETS:=.pp.bz2} + +%.pp.bz2: %.pp + @echo Compressing $^ -\> $@ + bzip2 -9 $^ + +%.pp: %.te + make -f ${SHAREDIR}/selinux/devel/Makefile $@ + +clean: + rm -f *~ *.tc *.pp *.pp.bz2 + rm -rf tmp *.tar.gz + +man: install + sepolicy manpage --domain ${TARGETS}_t + +install: + semodule -i ${TARGETS} + diff --git a/vendor/github.com/moby/moby/contrib/selinux-fedora-24/docker-engine-selinux/README.md b/vendor/github.com/moby/moby/contrib/selinux-fedora-24/docker-engine-selinux/README.md new file mode 100644 index 000000000..7ea3117a8 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/selinux-fedora-24/docker-engine-selinux/README.md @@ -0,0 +1 @@ +SELinux policy for docker diff --git a/vendor/github.com/moby/moby/contrib/selinux-fedora-24/docker-engine-selinux/docker.fc b/vendor/github.com/moby/moby/contrib/selinux-fedora-24/docker-engine-selinux/docker.fc new file mode 100644 index 000000000..d6cb0e579 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/selinux-fedora-24/docker-engine-selinux/docker.fc @@ -0,0 +1,29 @@ +/root/\.docker gen_context(system_u:object_r:docker_home_t,s0) + +/usr/bin/docker -- gen_context(system_u:object_r:docker_exec_t,s0) +/usr/bin/docker-novolume-plugin -- gen_context(system_u:object_r:docker_auth_exec_t,s0) +/usr/lib/docker/docker-novolume-plugin -- gen_context(system_u:object_r:docker_auth_exec_t,s0) + +/usr/lib/systemd/system/docker.service -- gen_context(system_u:object_r:docker_unit_file_t,s0) +/usr/lib/systemd/system/docker-novolume-plugin.service -- gen_context(system_u:object_r:docker_unit_file_t,s0) + +/etc/docker(/.*)? gen_context(system_u:object_r:docker_config_t,s0) + +/var/lib/docker(/.*)? gen_context(system_u:object_r:docker_var_lib_t,s0) +/var/lib/kublet(/.*)? gen_context(system_u:object_r:docker_var_lib_t,s0) +/var/lib/docker/vfs(/.*)? gen_context(system_u:object_r:svirt_sandbox_file_t,s0) + +/var/run/docker(/.*)? gen_context(system_u:object_r:docker_var_run_t,s0) +/var/run/docker\.pid -- gen_context(system_u:object_r:docker_var_run_t,s0) +/var/run/docker\.sock -s gen_context(system_u:object_r:docker_var_run_t,s0) +/var/run/docker-client(/.*)? gen_context(system_u:object_r:docker_var_run_t,s0) +/var/run/docker/plugins(/.*)? gen_context(system_u:object_r:docker_plugin_var_run_t,s0) + +/var/lock/lxc(/.*)? gen_context(system_u:object_r:docker_lock_t,s0) + +/var/log/lxc(/.*)? gen_context(system_u:object_r:docker_log_t,s0) + +/var/lib/docker/init(/.*)? gen_context(system_u:object_r:docker_share_t,s0) +/var/lib/docker/containers/.*/hosts gen_context(system_u:object_r:docker_share_t,s0) +/var/lib/docker/containers/.*/hostname gen_context(system_u:object_r:docker_share_t,s0) +/var/lib/docker/.*/config\.env gen_context(system_u:object_r:docker_share_t,s0) diff --git a/vendor/github.com/moby/moby/contrib/selinux-fedora-24/docker-engine-selinux/docker.if b/vendor/github.com/moby/moby/contrib/selinux-fedora-24/docker-engine-selinux/docker.if new file mode 100644 index 000000000..e087e8b98 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/selinux-fedora-24/docker-engine-selinux/docker.if @@ -0,0 +1,523 @@ + +## The open-source application container engine. + +######################################## +## +## Execute docker in the docker domain. +## +## +## +## Domain allowed to transition. +## +## +# +interface(`docker_domtrans',` + gen_require(` + type docker_t, docker_exec_t; + ') + + corecmd_search_bin($1) + domtrans_pattern($1, docker_exec_t, docker_t) +') + +######################################## +## +## Execute docker in the caller domain. +## +## +## +## Domain allowed to transition. +## +## +# +interface(`docker_exec',` + gen_require(` + type docker_exec_t; + ') + + corecmd_search_bin($1) + can_exec($1, docker_exec_t) +') + +######################################## +## +## Search docker lib directories. +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_search_lib',` + gen_require(` + type docker_var_lib_t; + ') + + allow $1 docker_var_lib_t:dir search_dir_perms; + files_search_var_lib($1) +') + +######################################## +## +## Execute docker lib directories. +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_exec_lib',` + gen_require(` + type docker_var_lib_t; + ') + + allow $1 docker_var_lib_t:dir search_dir_perms; + can_exec($1, docker_var_lib_t) +') + +######################################## +## +## Read docker lib files. +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_read_lib_files',` + gen_require(` + type docker_var_lib_t; + ') + + files_search_var_lib($1) + read_files_pattern($1, docker_var_lib_t, docker_var_lib_t) +') + +######################################## +## +## Read docker share files. +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_read_share_files',` + gen_require(` + type docker_share_t; + ') + + files_search_var_lib($1) + list_dirs_pattern($1, docker_share_t, docker_share_t) + read_files_pattern($1, docker_share_t, docker_share_t) + read_lnk_files_pattern($1, docker_share_t, docker_share_t) +') + +###################################### +## +## Allow the specified domain to execute apache +## in the caller domain. +## +## +## +## Domain allowed access. +## +## +# +interface(`apache_exec',` + gen_require(` + type httpd_exec_t; + ') + + can_exec($1, httpd_exec_t) +') + +###################################### +## +## Allow the specified domain to execute docker shared files +## in the caller domain. +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_exec_share_files',` + gen_require(` + type docker_share_t; + ') + + can_exec($1, docker_share_t) +') + +######################################## +## +## Manage docker lib files. +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_manage_lib_files',` + gen_require(` + type docker_var_lib_t; + ') + + files_search_var_lib($1) + manage_files_pattern($1, docker_var_lib_t, docker_var_lib_t) + manage_lnk_files_pattern($1, docker_var_lib_t, docker_var_lib_t) +') + +######################################## +## +## Manage docker lib directories. +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_manage_lib_dirs',` + gen_require(` + type docker_var_lib_t; + ') + + files_search_var_lib($1) + manage_dirs_pattern($1, docker_var_lib_t, docker_var_lib_t) +') + +######################################## +## +## Create objects in a docker var lib directory +## with an automatic type transition to +## a specified private type. +## +## +## +## Domain allowed access. +## +## +## +## +## The type of the object to create. +## +## +## +## +## The class of the object to be created. +## +## +## +## +## The name of the object being created. +## +## +# +interface(`docker_lib_filetrans',` + gen_require(` + type docker_var_lib_t; + ') + + filetrans_pattern($1, docker_var_lib_t, $2, $3, $4) +') + +######################################## +## +## Read docker PID files. +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_read_pid_files',` + gen_require(` + type docker_var_run_t; + ') + + files_search_pids($1) + read_files_pattern($1, docker_var_run_t, docker_var_run_t) +') + +######################################## +## +## Execute docker server in the docker domain. +## +## +## +## Domain allowed to transition. +## +## +# +interface(`docker_systemctl',` + gen_require(` + type docker_t; + type docker_unit_file_t; + ') + + systemd_exec_systemctl($1) + init_reload_services($1) + systemd_read_fifo_file_passwd_run($1) + allow $1 docker_unit_file_t:file read_file_perms; + allow $1 docker_unit_file_t:service manage_service_perms; + + ps_process_pattern($1, docker_t) +') + +######################################## +## +## Read and write docker shared memory. +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_rw_sem',` + gen_require(` + type docker_t; + ') + + allow $1 docker_t:sem rw_sem_perms; +') + +####################################### +## +## Read and write the docker pty type. +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_use_ptys',` + gen_require(` + type docker_devpts_t; + ') + + allow $1 docker_devpts_t:chr_file rw_term_perms; +') + +####################################### +## +## Allow domain to create docker content +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_filetrans_named_content',` + + gen_require(` + type docker_var_lib_t; + type docker_share_t; + type docker_log_t; + type docker_var_run_t; + type docker_home_t; + ') + + files_pid_filetrans($1, docker_var_run_t, file, "docker.pid") + files_pid_filetrans($1, docker_var_run_t, sock_file, "docker.sock") + files_pid_filetrans($1, docker_var_run_t, dir, "docker-client") + logging_log_filetrans($1, docker_log_t, dir, "lxc") + files_var_lib_filetrans($1, docker_var_lib_t, dir, "docker") + filetrans_pattern($1, docker_var_lib_t, docker_share_t, file, "config.env") + filetrans_pattern($1, docker_var_lib_t, docker_share_t, file, "hosts") + filetrans_pattern($1, docker_var_lib_t, docker_share_t, file, "hostname") + filetrans_pattern($1, docker_var_lib_t, docker_share_t, file, "resolv.conf") + filetrans_pattern($1, docker_var_lib_t, docker_share_t, dir, "init") + userdom_admin_home_dir_filetrans($1, docker_home_t, dir, ".docker") +') + +######################################## +## +## Connect to docker over a unix stream socket. +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_stream_connect',` + gen_require(` + type docker_t, docker_var_run_t; + ') + + files_search_pids($1) + stream_connect_pattern($1, docker_var_run_t, docker_var_run_t, docker_t) +') + +######################################## +## +## Connect to SPC containers over a unix stream socket. +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_spc_stream_connect',` + gen_require(` + type spc_t, spc_var_run_t; + ') + + files_search_pids($1) + files_write_all_pid_sockets($1) + allow $1 spc_t:unix_stream_socket connectto; +') + +######################################## +## +## All of the rules required to administrate +## an docker environment +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_admin',` + gen_require(` + type docker_t; + type docker_var_lib_t, docker_var_run_t; + type docker_unit_file_t; + type docker_lock_t; + type docker_log_t; + type docker_config_t; + ') + + allow $1 docker_t:process { ptrace signal_perms }; + ps_process_pattern($1, docker_t) + + admin_pattern($1, docker_config_t) + + files_search_var_lib($1) + admin_pattern($1, docker_var_lib_t) + + files_search_pids($1) + admin_pattern($1, docker_var_run_t) + + files_search_locks($1) + admin_pattern($1, docker_lock_t) + + logging_search_logs($1) + admin_pattern($1, docker_log_t) + + docker_systemctl($1) + admin_pattern($1, docker_unit_file_t) + allow $1 docker_unit_file_t:service all_service_perms; + + optional_policy(` + systemd_passwd_agent_exec($1) + systemd_read_fifo_file_passwd_run($1) + ') +') + +######################################## +## +## Execute docker_auth_exec_t in the docker_auth domain. +## +## +## +## Domain allowed to transition. +## +## +# +interface(`docker_auth_domtrans',` + gen_require(` + type docker_auth_t, docker_auth_exec_t; + ') + + corecmd_search_bin($1) + domtrans_pattern($1, docker_auth_exec_t, docker_auth_t) +') + +###################################### +## +## Execute docker_auth in the caller domain. +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_auth_exec',` + gen_require(` + type docker_auth_exec_t; + ') + + corecmd_search_bin($1) + can_exec($1, docker_auth_exec_t) +') + +######################################## +## +## Connect to docker_auth over a unix stream socket. +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_auth_stream_connect',` + gen_require(` + type docker_auth_t, docker_plugin_var_run_t; + ') + + files_search_pids($1) + stream_connect_pattern($1, docker_plugin_var_run_t, docker_plugin_var_run_t, docker_auth_t) +') + +######################################## +## +## docker domain typebounds calling domain. +## +## +## +## Domain to be typebound. +## +## +# +interface(`docker_typebounds',` + gen_require(` + type docker_t; + ') + + typebounds docker_t $1; +') + +######################################## +## +## Allow any docker_exec_t to be an entrypoint of this domain +## +## +## +## Domain allowed access. +## +## +## +# +interface(`docker_entrypoint',` + gen_require(` + type docker_exec_t; + ') + allow $1 docker_exec_t:file entrypoint; +') diff --git a/vendor/github.com/moby/moby/contrib/selinux-fedora-24/docker-engine-selinux/docker.te b/vendor/github.com/moby/moby/contrib/selinux-fedora-24/docker-engine-selinux/docker.te new file mode 100644 index 000000000..423168838 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/selinux-fedora-24/docker-engine-selinux/docker.te @@ -0,0 +1,399 @@ +policy_module(docker, 1.0.0) + +######################################## +# +# Declarations +# + +## +##

    +## Determine whether docker can +## connect to all TCP ports. +##

    +##
    +gen_tunable(docker_connect_any, false) + +type docker_t; +type docker_exec_t; +init_daemon_domain(docker_t, docker_exec_t) +domain_subj_id_change_exemption(docker_t) +domain_role_change_exemption(docker_t) + +type spc_t; +domain_type(spc_t) +role system_r types spc_t; + +type docker_auth_t; +type docker_auth_exec_t; +init_daemon_domain(docker_auth_t, docker_auth_exec_t) + +type spc_var_run_t; +files_pid_file(spc_var_run_t) + +type docker_var_lib_t; +files_type(docker_var_lib_t) + +type docker_home_t; +userdom_user_home_content(docker_home_t) + +type docker_config_t; +files_config_file(docker_config_t) + +type docker_lock_t; +files_lock_file(docker_lock_t) + +type docker_log_t; +logging_log_file(docker_log_t) + +type docker_tmp_t; +files_tmp_file(docker_tmp_t) + +type docker_tmpfs_t; +files_tmpfs_file(docker_tmpfs_t) + +type docker_var_run_t; +files_pid_file(docker_var_run_t) + +type docker_plugin_var_run_t; +files_pid_file(docker_plugin_var_run_t) + +type docker_unit_file_t; +systemd_unit_file(docker_unit_file_t) + +type docker_devpts_t; +term_pty(docker_devpts_t) + +type docker_share_t; +files_type(docker_share_t) + +######################################## +# +# docker local policy +# +allow docker_t self:capability { chown kill fowner fsetid mknod net_admin net_bind_service net_raw setfcap }; +allow docker_t self:tun_socket relabelto; +allow docker_t self:process { getattr signal_perms setrlimit setfscreate }; +allow docker_t self:fifo_file rw_fifo_file_perms; +allow docker_t self:unix_stream_socket create_stream_socket_perms; +allow docker_t self:tcp_socket create_stream_socket_perms; +allow docker_t self:udp_socket create_socket_perms; +allow docker_t self:capability2 block_suspend; + +docker_auth_stream_connect(docker_t) + +manage_files_pattern(docker_t, docker_home_t, docker_home_t) +manage_dirs_pattern(docker_t, docker_home_t, docker_home_t) +manage_lnk_files_pattern(docker_t, docker_home_t, docker_home_t) +userdom_admin_home_dir_filetrans(docker_t, docker_home_t, dir, ".docker") + +manage_dirs_pattern(docker_t, docker_config_t, docker_config_t) +manage_files_pattern(docker_t, docker_config_t, docker_config_t) +files_etc_filetrans(docker_t, docker_config_t, dir, "docker") + +manage_dirs_pattern(docker_t, docker_lock_t, docker_lock_t) +manage_files_pattern(docker_t, docker_lock_t, docker_lock_t) +files_lock_filetrans(docker_t, docker_lock_t, { dir file }, "lxc") + +manage_dirs_pattern(docker_t, docker_log_t, docker_log_t) +manage_files_pattern(docker_t, docker_log_t, docker_log_t) +manage_lnk_files_pattern(docker_t, docker_log_t, docker_log_t) +logging_log_filetrans(docker_t, docker_log_t, { dir file lnk_file }) +allow docker_t docker_log_t:dir_file_class_set { relabelfrom relabelto }; + +manage_dirs_pattern(docker_t, docker_tmp_t, docker_tmp_t) +manage_files_pattern(docker_t, docker_tmp_t, docker_tmp_t) +manage_lnk_files_pattern(docker_t, docker_tmp_t, docker_tmp_t) +files_tmp_filetrans(docker_t, docker_tmp_t, { dir file lnk_file }) + +manage_dirs_pattern(docker_t, docker_tmpfs_t, docker_tmpfs_t) +manage_files_pattern(docker_t, docker_tmpfs_t, docker_tmpfs_t) +manage_lnk_files_pattern(docker_t, docker_tmpfs_t, docker_tmpfs_t) +manage_fifo_files_pattern(docker_t, docker_tmpfs_t, docker_tmpfs_t) +manage_chr_files_pattern(docker_t, docker_tmpfs_t, docker_tmpfs_t) +manage_blk_files_pattern(docker_t, docker_tmpfs_t, docker_tmpfs_t) +allow docker_t docker_tmpfs_t:dir relabelfrom; +can_exec(docker_t, docker_tmpfs_t) +fs_tmpfs_filetrans(docker_t, docker_tmpfs_t, { dir file }) +allow docker_t docker_tmpfs_t:chr_file mounton; + +manage_dirs_pattern(docker_t, docker_share_t, docker_share_t) +manage_files_pattern(docker_t, docker_share_t, docker_share_t) +manage_lnk_files_pattern(docker_t, docker_share_t, docker_share_t) +allow docker_t docker_share_t:dir_file_class_set { relabelfrom relabelto }; + +can_exec(docker_t, docker_share_t) +#docker_filetrans_named_content(docker_t) + +manage_dirs_pattern(docker_t, docker_var_lib_t, docker_var_lib_t) +manage_chr_files_pattern(docker_t, docker_var_lib_t, docker_var_lib_t) +manage_blk_files_pattern(docker_t, docker_var_lib_t, docker_var_lib_t) +manage_files_pattern(docker_t, docker_var_lib_t, docker_var_lib_t) +manage_lnk_files_pattern(docker_t, docker_var_lib_t, docker_var_lib_t) +allow docker_t docker_var_lib_t:dir_file_class_set { relabelfrom relabelto }; +files_var_lib_filetrans(docker_t, docker_var_lib_t, { dir file lnk_file }) + +manage_dirs_pattern(docker_t, docker_var_run_t, docker_var_run_t) +manage_files_pattern(docker_t, docker_var_run_t, docker_var_run_t) +manage_sock_files_pattern(docker_t, docker_var_run_t, docker_var_run_t) +manage_lnk_files_pattern(docker_t, docker_var_run_t, docker_var_run_t) +files_pid_filetrans(docker_t, docker_var_run_t, { dir file lnk_file sock_file }) + +allow docker_t docker_devpts_t:chr_file { relabelfrom rw_chr_file_perms setattr_chr_file_perms }; +term_create_pty(docker_t, docker_devpts_t) + +kernel_read_system_state(docker_t) +kernel_read_network_state(docker_t) +kernel_read_all_sysctls(docker_t) +kernel_rw_net_sysctls(docker_t) +kernel_setsched(docker_t) +kernel_read_all_proc(docker_t) + +domain_use_interactive_fds(docker_t) +domain_dontaudit_read_all_domains_state(docker_t) + +corecmd_exec_bin(docker_t) +corecmd_exec_shell(docker_t) + +corenet_tcp_bind_generic_node(docker_t) +corenet_tcp_sendrecv_generic_if(docker_t) +corenet_tcp_sendrecv_generic_node(docker_t) +corenet_tcp_sendrecv_generic_port(docker_t) +corenet_tcp_bind_all_ports(docker_t) +corenet_tcp_connect_http_port(docker_t) +corenet_tcp_connect_commplex_main_port(docker_t) +corenet_udp_sendrecv_generic_if(docker_t) +corenet_udp_sendrecv_generic_node(docker_t) +corenet_udp_sendrecv_all_ports(docker_t) +corenet_udp_bind_generic_node(docker_t) +corenet_udp_bind_all_ports(docker_t) + +files_read_config_files(docker_t) +files_dontaudit_getattr_all_dirs(docker_t) +files_dontaudit_getattr_all_files(docker_t) + +fs_read_cgroup_files(docker_t) +fs_read_tmpfs_symlinks(docker_t) +fs_search_all(docker_t) +fs_getattr_all_fs(docker_t) + +storage_raw_rw_fixed_disk(docker_t) + +auth_use_nsswitch(docker_t) +auth_dontaudit_getattr_shadow(docker_t) + +init_read_state(docker_t) +init_status(docker_t) + +logging_send_audit_msgs(docker_t) +logging_send_syslog_msg(docker_t) + +miscfiles_read_localization(docker_t) + +mount_domtrans(docker_t) + +seutil_read_default_contexts(docker_t) +seutil_read_config(docker_t) + +sysnet_dns_name_resolve(docker_t) +sysnet_exec_ifconfig(docker_t) + +optional_policy(` + rpm_exec(docker_t) + rpm_read_db(docker_t) + rpm_exec(docker_t) +') + +optional_policy(` + fstools_domtrans(docker_t) +') + +optional_policy(` + iptables_domtrans(docker_t) +') + +optional_policy(` + openvswitch_stream_connect(docker_t) +') + +# +# lxc rules +# + +allow docker_t self:capability { dac_override setgid setpcap setuid sys_admin sys_boot sys_chroot sys_ptrace }; + +allow docker_t self:process { getcap setcap setexec setpgid setsched signal_perms }; + +allow docker_t self:netlink_route_socket rw_netlink_socket_perms;; +allow docker_t self:netlink_audit_socket create_netlink_socket_perms; +allow docker_t self:unix_dgram_socket { create_socket_perms sendto }; +allow docker_t self:unix_stream_socket { create_stream_socket_perms connectto }; + +allow docker_t docker_var_lib_t:dir mounton; +allow docker_t docker_var_lib_t:chr_file mounton; +can_exec(docker_t, docker_var_lib_t) + +kernel_dontaudit_setsched(docker_t) +kernel_get_sysvipc_info(docker_t) +kernel_request_load_module(docker_t) +kernel_mounton_messages(docker_t) +kernel_mounton_all_proc(docker_t) +kernel_mounton_all_sysctls(docker_t) +kernel_unlabeled_entry_type(spc_t) +kernel_unlabeled_domtrans(docker_t, spc_t) + +dev_getattr_all(docker_t) +dev_getattr_sysfs_fs(docker_t) +dev_read_urand(docker_t) +dev_read_lvm_control(docker_t) +dev_rw_sysfs(docker_t) +dev_rw_loop_control(docker_t) +dev_rw_lvm_control(docker_t) + +files_getattr_isid_type_dirs(docker_t) +files_manage_isid_type_dirs(docker_t) +files_manage_isid_type_files(docker_t) +files_manage_isid_type_symlinks(docker_t) +files_manage_isid_type_chr_files(docker_t) +files_manage_isid_type_blk_files(docker_t) +files_exec_isid_files(docker_t) +files_mounton_isid(docker_t) +files_mounton_non_security(docker_t) +files_mounton_isid_type_chr_file(docker_t) + +fs_mount_all_fs(docker_t) +fs_unmount_all_fs(docker_t) +fs_remount_all_fs(docker_t) +files_mounton_isid(docker_t) +fs_manage_cgroup_dirs(docker_t) +fs_manage_cgroup_files(docker_t) +fs_relabelfrom_xattr_fs(docker_t) +fs_relabelfrom_tmpfs(docker_t) +fs_read_tmpfs_symlinks(docker_t) +fs_list_hugetlbfs(docker_t) + +term_use_generic_ptys(docker_t) +term_use_ptmx(docker_t) +term_getattr_pty_fs(docker_t) +term_relabel_pty_fs(docker_t) +term_mounton_unallocated_ttys(docker_t) + +modutils_domtrans_insmod(docker_t) + +systemd_status_all_unit_files(docker_t) +systemd_start_systemd_services(docker_t) + +userdom_stream_connect(docker_t) +userdom_search_user_home_content(docker_t) +userdom_read_all_users_state(docker_t) +userdom_relabel_user_home_files(docker_t) +userdom_relabel_user_tmp_files(docker_t) +userdom_relabel_user_tmp_dirs(docker_t) + +optional_policy(` + gpm_getattr_gpmctl(docker_t) +') + +optional_policy(` + dbus_system_bus_client(docker_t) + init_dbus_chat(docker_t) + init_start_transient_unit(docker_t) + + optional_policy(` + systemd_dbus_chat_logind(docker_t) + systemd_dbus_chat_machined(docker_t) + ') + + optional_policy(` + firewalld_dbus_chat(docker_t) + ') +') + +optional_policy(` + udev_read_db(docker_t) +') + +optional_policy(` + unconfined_domain(docker_t) + unconfined_typebounds(docker_t) +') + +optional_policy(` + virt_read_config(docker_t) + virt_exec(docker_t) + virt_stream_connect(docker_t) + virt_stream_connect_sandbox(docker_t) + virt_exec_sandbox_files(docker_t) + virt_manage_sandbox_files(docker_t) + virt_relabel_sandbox_filesystem(docker_t) + # for lxc + virt_transition_svirt_sandbox(docker_t, system_r) + virt_mounton_sandbox_file(docker_t) +# virt_attach_sandbox_tun_iface(docker_t) + allow docker_t svirt_sandbox_domain:tun_socket relabelfrom; + virt_sandbox_entrypoint(docker_t) +') + +tunable_policy(`docker_connect_any',` + corenet_tcp_connect_all_ports(docker_t) + corenet_sendrecv_all_packets(docker_t) + corenet_tcp_sendrecv_all_ports(docker_t) +') + +######################################## +# +# spc local policy +# +allow spc_t { docker_var_lib_t docker_share_t }:file entrypoint; +role system_r types spc_t; + +domtrans_pattern(docker_t, docker_share_t, spc_t) +domtrans_pattern(docker_t, docker_var_lib_t, spc_t) +allow docker_t spc_t:process { setsched signal_perms }; +ps_process_pattern(docker_t, spc_t) +allow docker_t spc_t:socket_class_set { relabelto relabelfrom }; +filetrans_pattern(docker_t, docker_var_lib_t, docker_share_t, dir, "overlay") + +optional_policy(` + systemd_dbus_chat_machined(spc_t) +') + +optional_policy(` + dbus_chat_system_bus(spc_t) +') + +optional_policy(` + unconfined_domain_noaudit(spc_t) +') + +optional_policy(` + virt_transition_svirt_sandbox(spc_t, system_r) + virt_sandbox_entrypoint(spc_t) +') + +######################################## +# +# docker_auth local policy +# +allow docker_auth_t self:fifo_file rw_fifo_file_perms; +allow docker_auth_t self:unix_stream_socket create_stream_socket_perms; +dontaudit docker_auth_t self:capability net_admin; + +docker_stream_connect(docker_auth_t) + +manage_dirs_pattern(docker_auth_t, docker_plugin_var_run_t, docker_plugin_var_run_t) +manage_files_pattern(docker_auth_t, docker_plugin_var_run_t, docker_plugin_var_run_t) +manage_sock_files_pattern(docker_auth_t, docker_plugin_var_run_t, docker_plugin_var_run_t) +manage_lnk_files_pattern(docker_auth_t, docker_plugin_var_run_t, docker_plugin_var_run_t) +files_pid_filetrans(docker_auth_t, docker_plugin_var_run_t, { dir file lnk_file sock_file }) + +domain_use_interactive_fds(docker_auth_t) + +kernel_read_net_sysctls(docker_auth_t) + +auth_use_nsswitch(docker_auth_t) + +files_read_etc_files(docker_auth_t) + +miscfiles_read_localization(docker_auth_t) + +sysnet_dns_name_resolve(docker_auth_t) diff --git a/vendor/github.com/moby/moby/contrib/selinux-oraclelinux-7/docker-engine-selinux/LICENSE b/vendor/github.com/moby/moby/contrib/selinux-oraclelinux-7/docker-engine-selinux/LICENSE new file mode 100644 index 000000000..5b6e7c66c --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/selinux-oraclelinux-7/docker-engine-selinux/LICENSE @@ -0,0 +1,340 @@ + GNU GENERAL PUBLIC LICENSE + Version 2, June 1991 + + Copyright (C) 1989, 1991 Free Software Foundation, Inc. + 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The licenses for most software are designed to take away your +freedom to share and change it. By contrast, the GNU General Public +License is intended to guarantee your freedom to share and change free +software--to make sure the software is free for all its users. This +General Public License applies to most of the Free Software +Foundation's software and to any other program whose authors commit to +using it. (Some other Free Software Foundation software is covered by +the GNU Library General Public License instead.) You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +this service if you wish), that you receive source code or can get it +if you want it, that you can change the software or use pieces of it +in new free programs; and that you know you can do these things. + + To protect your rights, we need to make restrictions that forbid +anyone to deny you these rights or to ask you to surrender the rights. +These restrictions translate to certain responsibilities for you if you +distribute copies of the software, or if you modify it. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must give the recipients all the rights that +you have. You must make sure that they, too, receive or can get the +source code. And you must show them these terms so they know their +rights. + + We protect your rights with two steps: (1) copyright the software, and +(2) offer you this license which gives you legal permission to copy, +distribute and/or modify the software. + + Also, for each author's protection and ours, we want to make certain +that everyone understands that there is no warranty for this free +software. If the software is modified by someone else and passed on, we +want its recipients to know that what they have is not the original, so +that any problems introduced by others will not reflect on the original +authors' reputations. + + Finally, any free program is threatened constantly by software +patents. We wish to avoid the danger that redistributors of a free +program will individually obtain patent licenses, in effect making the +program proprietary. To prevent this, we have made it clear that any +patent must be licensed for everyone's free use or not licensed at all. + + The precise terms and conditions for copying, distribution and +modification follow. + + GNU GENERAL PUBLIC LICENSE + TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + + 0. This License applies to any program or other work which contains +a notice placed by the copyright holder saying it may be distributed +under the terms of this General Public License. The "Program", below, +refers to any such program or work, and a "work based on the Program" +means either the Program or any derivative work under copyright law: +that is to say, a work containing the Program or a portion of it, +either verbatim or with modifications and/or translated into another +language. (Hereinafter, translation is included without limitation in +the term "modification".) Each licensee is addressed as "you". + +Activities other than copying, distribution and modification are not +covered by this License; they are outside its scope. The act of +running the Program is not restricted, and the output from the Program +is covered only if its contents constitute a work based on the +Program (independent of having been made by running the Program). +Whether that is true depends on what the Program does. + + 1. You may copy and distribute verbatim copies of the Program's +source code as you receive it, in any medium, provided that you +conspicuously and appropriately publish on each copy an appropriate +copyright notice and disclaimer of warranty; keep intact all the +notices that refer to this License and to the absence of any warranty; +and give any other recipients of the Program a copy of this License +along with the Program. + +You may charge a fee for the physical act of transferring a copy, and +you may at your option offer warranty protection in exchange for a fee. + + 2. You may modify your copy or copies of the Program or any portion +of it, thus forming a work based on the Program, and copy and +distribute such modifications or work under the terms of Section 1 +above, provided that you also meet all of these conditions: + + a) You must cause the modified files to carry prominent notices + stating that you changed the files and the date of any change. + + b) You must cause any work that you distribute or publish, that in + whole or in part contains or is derived from the Program or any + part thereof, to be licensed as a whole at no charge to all third + parties under the terms of this License. + + c) If the modified program normally reads commands interactively + when run, you must cause it, when started running for such + interactive use in the most ordinary way, to print or display an + announcement including an appropriate copyright notice and a + notice that there is no warranty (or else, saying that you provide + a warranty) and that users may redistribute the program under + these conditions, and telling the user how to view a copy of this + License. (Exception: if the Program itself is interactive but + does not normally print such an announcement, your work based on + the Program is not required to print an announcement.) + +These requirements apply to the modified work as a whole. If +identifiable sections of that work are not derived from the Program, +and can be reasonably considered independent and separate works in +themselves, then this License, and its terms, do not apply to those +sections when you distribute them as separate works. But when you +distribute the same sections as part of a whole which is a work based +on the Program, the distribution of the whole must be on the terms of +this License, whose permissions for other licensees extend to the +entire whole, and thus to each and every part regardless of who wrote it. + +Thus, it is not the intent of this section to claim rights or contest +your rights to work written entirely by you; rather, the intent is to +exercise the right to control the distribution of derivative or +collective works based on the Program. + +In addition, mere aggregation of another work not based on the Program +with the Program (or with a work based on the Program) on a volume of +a storage or distribution medium does not bring the other work under +the scope of this License. + + 3. You may copy and distribute the Program (or a work based on it, +under Section 2) in object code or executable form under the terms of +Sections 1 and 2 above provided that you also do one of the following: + + a) Accompany it with the complete corresponding machine-readable + source code, which must be distributed under the terms of Sections + 1 and 2 above on a medium customarily used for software interchange; or, + + b) Accompany it with a written offer, valid for at least three + years, to give any third party, for a charge no more than your + cost of physically performing source distribution, a complete + machine-readable copy of the corresponding source code, to be + distributed under the terms of Sections 1 and 2 above on a medium + customarily used for software interchange; or, + + c) Accompany it with the information you received as to the offer + to distribute corresponding source code. (This alternative is + allowed only for noncommercial distribution and only if you + received the program in object code or executable form with such + an offer, in accord with Subsection b above.) + +The source code for a work means the preferred form of the work for +making modifications to it. For an executable work, complete source +code means all the source code for all modules it contains, plus any +associated interface definition files, plus the scripts used to +control compilation and installation of the executable. However, as a +special exception, the source code distributed need not include +anything that is normally distributed (in either source or binary +form) with the major components (compiler, kernel, and so on) of the +operating system on which the executable runs, unless that component +itself accompanies the executable. + +If distribution of executable or object code is made by offering +access to copy from a designated place, then offering equivalent +access to copy the source code from the same place counts as +distribution of the source code, even though third parties are not +compelled to copy the source along with the object code. + + 4. You may not copy, modify, sublicense, or distribute the Program +except as expressly provided under this License. Any attempt +otherwise to copy, modify, sublicense or distribute the Program is +void, and will automatically terminate your rights under this License. +However, parties who have received copies, or rights, from you under +this License will not have their licenses terminated so long as such +parties remain in full compliance. + + 5. You are not required to accept this License, since you have not +signed it. However, nothing else grants you permission to modify or +distribute the Program or its derivative works. These actions are +prohibited by law if you do not accept this License. Therefore, by +modifying or distributing the Program (or any work based on the +Program), you indicate your acceptance of this License to do so, and +all its terms and conditions for copying, distributing or modifying +the Program or works based on it. + + 6. Each time you redistribute the Program (or any work based on the +Program), the recipient automatically receives a license from the +original licensor to copy, distribute or modify the Program subject to +these terms and conditions. You may not impose any further +restrictions on the recipients' exercise of the rights granted herein. +You are not responsible for enforcing compliance by third parties to +this License. + + 7. If, as a consequence of a court judgment or allegation of patent +infringement or for any other reason (not limited to patent issues), +conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot +distribute so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you +may not distribute the Program at all. For example, if a patent +license would not permit royalty-free redistribution of the Program by +all those who receive copies directly or indirectly through you, then +the only way you could satisfy both it and this License would be to +refrain entirely from distribution of the Program. + +If any portion of this section is held invalid or unenforceable under +any particular circumstance, the balance of the section is intended to +apply and the section as a whole is intended to apply in other +circumstances. + +It is not the purpose of this section to induce you to infringe any +patents or other property right claims or to contest validity of any +such claims; this section has the sole purpose of protecting the +integrity of the free software distribution system, which is +implemented by public license practices. Many people have made +generous contributions to the wide range of software distributed +through that system in reliance on consistent application of that +system; it is up to the author/donor to decide if he or she is willing +to distribute software through any other system and a licensee cannot +impose that choice. + +This section is intended to make thoroughly clear what is believed to +be a consequence of the rest of this License. + + 8. If the distribution and/or use of the Program is restricted in +certain countries either by patents or by copyrighted interfaces, the +original copyright holder who places the Program under this License +may add an explicit geographical distribution limitation excluding +those countries, so that distribution is permitted only in or among +countries not thus excluded. In such case, this License incorporates +the limitation as if written in the body of this License. + + 9. The Free Software Foundation may publish revised and/or new versions +of the General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + +Each version is given a distinguishing version number. If the Program +specifies a version number of this License which applies to it and "any +later version", you have the option of following the terms and conditions +either of that version or of any later version published by the Free +Software Foundation. If the Program does not specify a version number of +this License, you may choose any version ever published by the Free Software +Foundation. + + 10. If you wish to incorporate parts of the Program into other free +programs whose distribution conditions are different, write to the author +to ask for permission. For software which is copyrighted by the Free +Software Foundation, write to the Free Software Foundation; we sometimes +make exceptions for this. Our decision will be guided by the two goals +of preserving the free status of all derivatives of our free software and +of promoting the sharing and reuse of software generally. + + NO WARRANTY + + 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY +FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN +OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES +PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED +OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS +TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE +PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, +REPAIR OR CORRECTION. + + 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR +REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, +INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING +OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED +TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY +YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER +PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE +POSSIBILITY OF SUCH DAMAGES. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +convey the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + + +Also add information on how to contact you by electronic and paper mail. + +If the program is interactive, make it output a short notice like this +when it starts in an interactive mode: + + Gnomovision version 69, Copyright (C) year name of author + Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, the commands you use may +be called something other than `show w' and `show c'; they could even be +mouse-clicks or menu items--whatever suits your program. + +You should also get your employer (if you work as a programmer) or your +school, if any, to sign a "copyright disclaimer" for the program, if +necessary. Here is a sample; alter the names: + + Yoyodyne, Inc., hereby disclaims all copyright interest in the program + `Gnomovision' (which makes passes at compilers) written by James Hacker. + + , 1 April 1989 + Ty Coon, President of Vice + +This General Public License does not permit incorporating your program into +proprietary programs. If your program is a subroutine library, you may +consider it more useful to permit linking proprietary applications with the +library. If this is what you want to do, use the GNU Library General +Public License instead of this License. diff --git a/vendor/github.com/moby/moby/contrib/selinux-oraclelinux-7/docker-engine-selinux/Makefile b/vendor/github.com/moby/moby/contrib/selinux-oraclelinux-7/docker-engine-selinux/Makefile new file mode 100644 index 000000000..1bdc695af --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/selinux-oraclelinux-7/docker-engine-selinux/Makefile @@ -0,0 +1,16 @@ +TARGETS?=docker +MODULES?=${TARGETS:=.pp.bz2} +SHAREDIR?=/usr/share + +all: ${TARGETS:=.pp.bz2} + +%.pp.bz2: %.pp + @echo Compressing $^ -\> $@ + bzip2 -9 $^ + +%.pp: %.te + make -f ${SHAREDIR}/selinux/devel/Makefile $@ + +clean: + rm -f *~ *.tc *.pp *.pp.bz2 + rm -rf tmp *.tar.gz diff --git a/vendor/github.com/moby/moby/contrib/selinux-oraclelinux-7/docker-engine-selinux/README.md b/vendor/github.com/moby/moby/contrib/selinux-oraclelinux-7/docker-engine-selinux/README.md new file mode 100644 index 000000000..7ea3117a8 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/selinux-oraclelinux-7/docker-engine-selinux/README.md @@ -0,0 +1 @@ +SELinux policy for docker diff --git a/vendor/github.com/moby/moby/contrib/selinux-oraclelinux-7/docker-engine-selinux/docker.fc b/vendor/github.com/moby/moby/contrib/selinux-oraclelinux-7/docker-engine-selinux/docker.fc new file mode 100644 index 000000000..467d65960 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/selinux-oraclelinux-7/docker-engine-selinux/docker.fc @@ -0,0 +1,18 @@ +/root/\.docker gen_context(system_u:object_r:docker_home_t,s0) + +/usr/bin/dockerd -- gen_context(system_u:object_r:docker_exec_t,s0) + +/usr/lib/systemd/system/docker.service -- gen_context(system_u:object_r:docker_unit_file_t,s0) + +/etc/docker(/.*)? gen_context(system_u:object_r:docker_config_t,s0) + +/var/lib/docker(/.*)? gen_context(system_u:object_r:docker_var_lib_t,s0) + +/var/run/docker\.pid -- gen_context(system_u:object_r:docker_var_run_t,s0) +/var/run/docker\.sock -s gen_context(system_u:object_r:docker_var_run_t,s0) +/var/run/docker-client(/.*)? gen_context(system_u:object_r:docker_var_run_t,s0) + +/var/lib/docker/init(/.*)? gen_context(system_u:object_r:docker_share_t,s0) +/var/lib/docker/containers/.*/hosts gen_context(system_u:object_r:docker_share_t,s0) +/var/lib/docker/containers/.*/hostname gen_context(system_u:object_r:docker_share_t,s0) +/var/lib/docker/.*/config\.env gen_context(system_u:object_r:docker_share_t,s0) diff --git a/vendor/github.com/moby/moby/contrib/selinux-oraclelinux-7/docker-engine-selinux/docker.if b/vendor/github.com/moby/moby/contrib/selinux-oraclelinux-7/docker-engine-selinux/docker.if new file mode 100644 index 000000000..ca075c05c --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/selinux-oraclelinux-7/docker-engine-selinux/docker.if @@ -0,0 +1,461 @@ + +## The open-source application container engine. + +######################################## +## +## Execute docker in the docker domain. +## +## +## +## Domain allowed to transition. +## +## +# +interface(`docker_domtrans',` + gen_require(` + type docker_t, docker_exec_t; + ') + + corecmd_search_bin($1) + domtrans_pattern($1, docker_exec_t, docker_t) +') + +######################################## +## +## Execute docker in the caller domain. +## +## +## +## Domain allowed to transition. +## +## +# +interface(`docker_exec',` + gen_require(` + type docker_exec_t; + ') + + corecmd_search_bin($1) + can_exec($1, docker_exec_t) +') + +######################################## +## +## Search docker lib directories. +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_search_lib',` + gen_require(` + type docker_var_lib_t; + ') + + allow $1 docker_var_lib_t:dir search_dir_perms; + files_search_var_lib($1) +') + +######################################## +## +## Execute docker lib directories. +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_exec_lib',` + gen_require(` + type docker_var_lib_t; + ') + + allow $1 docker_var_lib_t:dir search_dir_perms; + can_exec($1, docker_var_lib_t) +') + +######################################## +## +## Read docker lib files. +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_read_lib_files',` + gen_require(` + type docker_var_lib_t; + ') + + files_search_var_lib($1) + read_files_pattern($1, docker_var_lib_t, docker_var_lib_t) +') + +######################################## +## +## Read docker share files. +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_read_share_files',` + gen_require(` + type docker_share_t; + ') + + files_search_var_lib($1) + read_files_pattern($1, docker_share_t, docker_share_t) +') + +######################################## +## +## Manage docker lib files. +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_manage_lib_files',` + gen_require(` + type docker_var_lib_t; + ') + + files_search_var_lib($1) + manage_files_pattern($1, docker_var_lib_t, docker_var_lib_t) + manage_lnk_files_pattern($1, docker_var_lib_t, docker_var_lib_t) +') + +######################################## +## +## Manage docker lib directories. +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_manage_lib_dirs',` + gen_require(` + type docker_var_lib_t; + ') + + files_search_var_lib($1) + manage_dirs_pattern($1, docker_var_lib_t, docker_var_lib_t) +') + +######################################## +## +## Create objects in a docker var lib directory +## with an automatic type transition to +## a specified private type. +## +## +## +## Domain allowed access. +## +## +## +## +## The type of the object to create. +## +## +## +## +## The class of the object to be created. +## +## +## +## +## The name of the object being created. +## +## +# +interface(`docker_lib_filetrans',` + gen_require(` + type docker_var_lib_t; + ') + + filetrans_pattern($1, docker_var_lib_t, $2, $3, $4) +') + +######################################## +## +## Read docker PID files. +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_read_pid_files',` + gen_require(` + type docker_var_run_t; + ') + + files_search_pids($1) + read_files_pattern($1, docker_var_run_t, docker_var_run_t) +') + +######################################## +## +## Execute docker server in the docker domain. +## +## +## +## Domain allowed to transition. +## +## +# +interface(`docker_systemctl',` + gen_require(` + type docker_t; + type docker_unit_file_t; + ') + + systemd_exec_systemctl($1) + init_reload_services($1) + systemd_read_fifo_file_passwd_run($1) + allow $1 docker_unit_file_t:file read_file_perms; + allow $1 docker_unit_file_t:service manage_service_perms; + + ps_process_pattern($1, docker_t) +') + +######################################## +## +## Read and write docker shared memory. +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_rw_sem',` + gen_require(` + type docker_t; + ') + + allow $1 docker_t:sem rw_sem_perms; +') + +####################################### +## +## Read and write the docker pty type. +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_use_ptys',` + gen_require(` + type docker_devpts_t; + ') + + allow $1 docker_devpts_t:chr_file rw_term_perms; +') + +####################################### +## +## Allow domain to create docker content +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_filetrans_named_content',` + + gen_require(` + type docker_var_lib_t; + type docker_share_t; + type docker_log_t; + type docker_var_run_t; + type docker_home_t; + ') + + files_pid_filetrans($1, docker_var_run_t, file, "docker.pid") + files_pid_filetrans($1, docker_var_run_t, sock_file, "docker.sock") + files_pid_filetrans($1, docker_var_run_t, dir, "docker-client") + files_var_lib_filetrans($1, docker_var_lib_t, dir, "docker") + filetrans_pattern($1, docker_var_lib_t, docker_share_t, file, "config.env") + filetrans_pattern($1, docker_var_lib_t, docker_share_t, file, "hosts") + filetrans_pattern($1, docker_var_lib_t, docker_share_t, file, "hostname") + filetrans_pattern($1, docker_var_lib_t, docker_share_t, file, "resolv.conf") + filetrans_pattern($1, docker_var_lib_t, docker_share_t, dir, "init") + userdom_admin_home_dir_filetrans($1, docker_home_t, dir, ".docker") +') + +######################################## +## +## Connect to docker over a unix stream socket. +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_stream_connect',` + gen_require(` + type docker_t, docker_var_run_t; + ') + + files_search_pids($1) + stream_connect_pattern($1, docker_var_run_t, docker_var_run_t, docker_t) +') + +######################################## +## +## Connect to SPC containers over a unix stream socket. +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_spc_stream_connect',` + gen_require(` + type spc_t, spc_var_run_t; + ') + + files_search_pids($1) + files_write_all_pid_sockets($1) + allow $1 spc_t:unix_stream_socket connectto; +') + + +######################################## +## +## All of the rules required to administrate +## an docker environment +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_admin',` + gen_require(` + type docker_t; + type docker_var_lib_t, docker_var_run_t; + type docker_unit_file_t; + type docker_lock_t; + type docker_log_t; + type docker_config_t; + ') + + allow $1 docker_t:process { ptrace signal_perms }; + ps_process_pattern($1, docker_t) + + admin_pattern($1, docker_config_t) + + files_search_var_lib($1) + admin_pattern($1, docker_var_lib_t) + + files_search_pids($1) + admin_pattern($1, docker_var_run_t) + + files_search_locks($1) + admin_pattern($1, docker_lock_t) + + logging_search_logs($1) + admin_pattern($1, docker_log_t) + + docker_systemctl($1) + admin_pattern($1, docker_unit_file_t) + allow $1 docker_unit_file_t:service all_service_perms; + + optional_policy(` + systemd_passwd_agent_exec($1) + systemd_read_fifo_file_passwd_run($1) + ') +') + +interface(`domain_stub_named_filetrans_domain',` + gen_require(` + attribute named_filetrans_domain; + ') +') + +interface(`lvm_stub',` + gen_require(` + type lvm_t; + ') +') +interface(`staff_stub',` + gen_require(` + type staff_t; + ') +') +interface(`virt_stub_svirt_sandbox_domain',` + gen_require(` + attribute svirt_sandbox_domain; + ') +') +interface(`virt_stub_svirt_sandbox_file',` + gen_require(` + type svirt_sandbox_file_t; + ') +') +interface(`fs_dontaudit_remount_tmpfs',` + gen_require(` + type tmpfs_t; + ') + + dontaudit $1 tmpfs_t:filesystem remount; +') +interface(`dev_dontaudit_list_all_dev_nodes',` + gen_require(` + type device_t; + ') + + dontaudit $1 device_t:dir list_dir_perms; +') +interface(`kernel_unlabeled_entry_type',` + gen_require(` + type unlabeled_t; + ') + + domain_entry_file($1, unlabeled_t) +') +interface(`kernel_unlabeled_domtrans',` + gen_require(` + type unlabeled_t; + ') + + read_lnk_files_pattern($1, unlabeled_t, unlabeled_t) + domain_transition_pattern($1, unlabeled_t, $2) + type_transition $1 unlabeled_t:process $2; +') +interface(`files_write_all_pid_sockets',` + gen_require(` + attribute pidfile; + ') + + allow $1 pidfile:sock_file write_sock_file_perms; +') +interface(`dev_dontaudit_mounton_sysfs',` + gen_require(` + type sysfs_t; + ') + + dontaudit $1 sysfs_t:dir mounton; +') diff --git a/vendor/github.com/moby/moby/contrib/selinux-oraclelinux-7/docker-engine-selinux/docker.te b/vendor/github.com/moby/moby/contrib/selinux-oraclelinux-7/docker-engine-selinux/docker.te new file mode 100644 index 000000000..bad0bb6e4 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/selinux-oraclelinux-7/docker-engine-selinux/docker.te @@ -0,0 +1,407 @@ +policy_module(docker, 1.0.0) + +######################################## +# +# Declarations +# + +## +##

    +## Determine whether docker can +## connect to all TCP ports. +##

    +##
    +gen_tunable(docker_connect_any, false) + +type docker_t; +type docker_exec_t; +init_daemon_domain(docker_t, docker_exec_t) +domain_subj_id_change_exemption(docker_t) +domain_role_change_exemption(docker_t) + +type spc_t; +domain_type(spc_t) +role system_r types spc_t; + +type spc_var_run_t; +files_pid_file(spc_var_run_t) + +type docker_var_lib_t; +files_type(docker_var_lib_t) + +type docker_home_t; +userdom_user_home_content(docker_home_t) + +type docker_config_t; +files_config_file(docker_config_t) + +type docker_lock_t; +files_lock_file(docker_lock_t) + +type docker_log_t; +logging_log_file(docker_log_t) + +type docker_tmp_t; +files_tmp_file(docker_tmp_t) + +type docker_tmpfs_t; +files_tmpfs_file(docker_tmpfs_t) + +type docker_var_run_t; +files_pid_file(docker_var_run_t) + +type docker_unit_file_t; +systemd_unit_file(docker_unit_file_t) + +type docker_devpts_t; +term_pty(docker_devpts_t) + +type docker_share_t; +files_type(docker_share_t) + +######################################## +# +# docker local policy +# +allow docker_t self:capability { chown kill fowner fsetid mknod net_admin net_bind_service net_raw setfcap }; +allow docker_t self:tun_socket relabelto; +allow docker_t self:process { getattr signal_perms setrlimit setfscreate }; +allow docker_t self:fifo_file rw_fifo_file_perms; +allow docker_t self:unix_stream_socket create_stream_socket_perms; +allow docker_t self:tcp_socket create_stream_socket_perms; +allow docker_t self:udp_socket create_socket_perms; +allow docker_t self:capability2 block_suspend; + +manage_files_pattern(docker_t, docker_home_t, docker_home_t) +manage_dirs_pattern(docker_t, docker_home_t, docker_home_t) +manage_lnk_files_pattern(docker_t, docker_home_t, docker_home_t) +userdom_admin_home_dir_filetrans(docker_t, docker_home_t, dir, ".docker") + +manage_dirs_pattern(docker_t, docker_config_t, docker_config_t) +manage_files_pattern(docker_t, docker_config_t, docker_config_t) +files_etc_filetrans(docker_t, docker_config_t, dir, "docker") + +manage_dirs_pattern(docker_t, docker_lock_t, docker_lock_t) +manage_files_pattern(docker_t, docker_lock_t, docker_lock_t) + +manage_dirs_pattern(docker_t, docker_log_t, docker_log_t) +manage_files_pattern(docker_t, docker_log_t, docker_log_t) +manage_lnk_files_pattern(docker_t, docker_log_t, docker_log_t) +logging_log_filetrans(docker_t, docker_log_t, { dir file lnk_file }) +allow docker_t docker_log_t:dir_file_class_set { relabelfrom relabelto }; + +manage_dirs_pattern(docker_t, docker_tmp_t, docker_tmp_t) +manage_files_pattern(docker_t, docker_tmp_t, docker_tmp_t) +manage_lnk_files_pattern(docker_t, docker_tmp_t, docker_tmp_t) +files_tmp_filetrans(docker_t, docker_tmp_t, { dir file lnk_file }) + +manage_dirs_pattern(docker_t, docker_tmpfs_t, docker_tmpfs_t) +manage_files_pattern(docker_t, docker_tmpfs_t, docker_tmpfs_t) +manage_lnk_files_pattern(docker_t, docker_tmpfs_t, docker_tmpfs_t) +manage_fifo_files_pattern(docker_t, docker_tmpfs_t, docker_tmpfs_t) +manage_chr_files_pattern(docker_t, docker_tmpfs_t, docker_tmpfs_t) +manage_blk_files_pattern(docker_t, docker_tmpfs_t, docker_tmpfs_t) +allow docker_t docker_tmpfs_t:dir relabelfrom; +can_exec(docker_t, docker_tmpfs_t) +fs_tmpfs_filetrans(docker_t, docker_tmpfs_t, { dir file }) +allow docker_t docker_tmpfs_t:chr_file mounton; + +manage_dirs_pattern(docker_t, docker_share_t, docker_share_t) +manage_files_pattern(docker_t, docker_share_t, docker_share_t) +manage_lnk_files_pattern(docker_t, docker_share_t, docker_share_t) +allow docker_t docker_share_t:dir_file_class_set { relabelfrom relabelto }; + +can_exec(docker_t, docker_share_t) +#docker_filetrans_named_content(docker_t) + +manage_dirs_pattern(docker_t, docker_var_lib_t, docker_var_lib_t) +manage_chr_files_pattern(docker_t, docker_var_lib_t, docker_var_lib_t) +manage_blk_files_pattern(docker_t, docker_var_lib_t, docker_var_lib_t) +manage_files_pattern(docker_t, docker_var_lib_t, docker_var_lib_t) +manage_lnk_files_pattern(docker_t, docker_var_lib_t, docker_var_lib_t) +allow docker_t docker_var_lib_t:dir_file_class_set { relabelfrom relabelto }; +files_var_lib_filetrans(docker_t, docker_var_lib_t, { dir file lnk_file }) + +manage_dirs_pattern(docker_t, docker_var_run_t, docker_var_run_t) +manage_files_pattern(docker_t, docker_var_run_t, docker_var_run_t) +manage_sock_files_pattern(docker_t, docker_var_run_t, docker_var_run_t) +manage_lnk_files_pattern(docker_t, docker_var_run_t, docker_var_run_t) +files_pid_filetrans(docker_t, docker_var_run_t, { dir file lnk_file sock_file }) + +allow docker_t docker_devpts_t:chr_file { relabelfrom rw_chr_file_perms setattr_chr_file_perms }; +term_create_pty(docker_t, docker_devpts_t) + +kernel_read_system_state(docker_t) +kernel_read_network_state(docker_t) +kernel_read_all_sysctls(docker_t) +kernel_rw_net_sysctls(docker_t) +kernel_setsched(docker_t) +kernel_read_all_proc(docker_t) + +domain_use_interactive_fds(docker_t) +domain_dontaudit_read_all_domains_state(docker_t) + +corecmd_exec_bin(docker_t) +corecmd_exec_shell(docker_t) + +corenet_tcp_bind_generic_node(docker_t) +corenet_tcp_sendrecv_generic_if(docker_t) +corenet_tcp_sendrecv_generic_node(docker_t) +corenet_tcp_sendrecv_generic_port(docker_t) +corenet_tcp_bind_all_ports(docker_t) +corenet_tcp_connect_http_port(docker_t) +corenet_tcp_connect_commplex_main_port(docker_t) +corenet_udp_sendrecv_generic_if(docker_t) +corenet_udp_sendrecv_generic_node(docker_t) +corenet_udp_sendrecv_all_ports(docker_t) +corenet_udp_bind_generic_node(docker_t) +corenet_udp_bind_all_ports(docker_t) + +files_read_config_files(docker_t) +files_dontaudit_getattr_all_dirs(docker_t) +files_dontaudit_getattr_all_files(docker_t) + +fs_read_cgroup_files(docker_t) +fs_read_tmpfs_symlinks(docker_t) +fs_search_all(docker_t) +fs_getattr_all_fs(docker_t) + +storage_raw_rw_fixed_disk(docker_t) + +auth_use_nsswitch(docker_t) +auth_dontaudit_getattr_shadow(docker_t) + +init_read_state(docker_t) +init_status(docker_t) + +logging_send_audit_msgs(docker_t) +logging_send_syslog_msg(docker_t) + +miscfiles_read_localization(docker_t) + +mount_domtrans(docker_t) + +seutil_read_default_contexts(docker_t) +seutil_read_config(docker_t) + +sysnet_dns_name_resolve(docker_t) +sysnet_exec_ifconfig(docker_t) + +optional_policy(` + rpm_exec(docker_t) + rpm_read_db(docker_t) + rpm_exec(docker_t) +') + +optional_policy(` + fstools_domtrans(docker_t) +') + +optional_policy(` + iptables_domtrans(docker_t) +') + +optional_policy(` + openvswitch_stream_connect(docker_t) +') + +allow docker_t self:capability { dac_override setgid setpcap setuid sys_admin sys_boot sys_chroot sys_ptrace }; + +allow docker_t self:process { getcap setcap setexec setpgid setsched signal_perms }; + +allow docker_t self:netlink_route_socket rw_netlink_socket_perms;; +allow docker_t self:netlink_audit_socket create_netlink_socket_perms; +allow docker_t self:unix_dgram_socket { create_socket_perms sendto }; +allow docker_t self:unix_stream_socket { create_stream_socket_perms connectto }; + +allow docker_t docker_var_lib_t:dir mounton; +allow docker_t docker_var_lib_t:chr_file mounton; +can_exec(docker_t, docker_var_lib_t) + +kernel_dontaudit_setsched(docker_t) +kernel_get_sysvipc_info(docker_t) +kernel_request_load_module(docker_t) +kernel_mounton_messages(docker_t) +kernel_mounton_all_proc(docker_t) +kernel_mounton_all_sysctls(docker_t) +kernel_unlabeled_entry_type(spc_t) +kernel_unlabeled_domtrans(docker_t, spc_t) + +dev_getattr_all(docker_t) +dev_getattr_sysfs_fs(docker_t) +dev_read_urand(docker_t) +dev_read_lvm_control(docker_t) +dev_rw_sysfs(docker_t) +dev_rw_loop_control(docker_t) +dev_rw_lvm_control(docker_t) + +files_getattr_isid_type_dirs(docker_t) +files_manage_isid_type_dirs(docker_t) +files_manage_isid_type_files(docker_t) +files_manage_isid_type_symlinks(docker_t) +files_manage_isid_type_chr_files(docker_t) +files_manage_isid_type_blk_files(docker_t) +files_exec_isid_files(docker_t) +files_mounton_isid(docker_t) +files_mounton_non_security(docker_t) +files_mounton_isid_type_chr_file(docker_t) + +fs_mount_all_fs(docker_t) +fs_unmount_all_fs(docker_t) +fs_remount_all_fs(docker_t) +files_mounton_isid(docker_t) +fs_manage_cgroup_dirs(docker_t) +fs_manage_cgroup_files(docker_t) +fs_relabelfrom_xattr_fs(docker_t) +fs_relabelfrom_tmpfs(docker_t) +fs_read_tmpfs_symlinks(docker_t) +fs_list_hugetlbfs(docker_t) + +term_use_generic_ptys(docker_t) +term_use_ptmx(docker_t) +term_getattr_pty_fs(docker_t) +term_relabel_pty_fs(docker_t) +term_mounton_unallocated_ttys(docker_t) + +modutils_domtrans_insmod(docker_t) + +systemd_status_all_unit_files(docker_t) +systemd_start_systemd_services(docker_t) + +userdom_stream_connect(docker_t) +userdom_search_user_home_content(docker_t) +userdom_read_all_users_state(docker_t) +userdom_relabel_user_home_files(docker_t) +userdom_relabel_user_tmp_files(docker_t) +userdom_relabel_user_tmp_dirs(docker_t) + +optional_policy(` + gpm_getattr_gpmctl(docker_t) +') + +optional_policy(` + dbus_system_bus_client(docker_t) + init_dbus_chat(docker_t) + init_start_transient_unit(docker_t) + + optional_policy(` + systemd_dbus_chat_logind(docker_t) + ') + + optional_policy(` + firewalld_dbus_chat(docker_t) + ') +') + +optional_policy(` + udev_read_db(docker_t) +') + +optional_policy(` + virt_read_config(docker_t) + virt_exec(docker_t) + virt_stream_connect(docker_t) + virt_stream_connect_sandbox(docker_t) + virt_exec_sandbox_files(docker_t) + virt_manage_sandbox_files(docker_t) + virt_relabel_sandbox_filesystem(docker_t) + virt_transition_svirt_sandbox(docker_t, system_r) + virt_mounton_sandbox_file(docker_t) +# virt_attach_sandbox_tun_iface(docker_t) + allow docker_t svirt_sandbox_domain:tun_socket relabelfrom; +') + +tunable_policy(`docker_connect_any',` + corenet_tcp_connect_all_ports(docker_t) + corenet_sendrecv_all_packets(docker_t) + corenet_tcp_sendrecv_all_ports(docker_t) +') + +######################################## +# +# spc local policy +# +domain_entry_file(spc_t, docker_share_t) +domain_entry_file(spc_t, docker_var_lib_t) +role system_r types spc_t; + +domain_entry_file(spc_t, docker_share_t) +domain_entry_file(spc_t, docker_var_lib_t) +domtrans_pattern(docker_t, docker_share_t, spc_t) +domtrans_pattern(docker_t, docker_var_lib_t, spc_t) +allow docker_t spc_t:process { setsched signal_perms }; +ps_process_pattern(docker_t, spc_t) +allow docker_t spc_t:socket_class_set { relabelto relabelfrom }; + +optional_policy(` + dbus_chat_system_bus(spc_t) +') + +optional_policy(` + unconfined_domain_noaudit(spc_t) +') + +optional_policy(` + unconfined_domain(docker_t) +') + +optional_policy(` + virt_transition_svirt_sandbox(spc_t, system_r) +') + +######################################## +# +# docker upstream policy +# + +optional_policy(` +# domain_stub_named_filetrans_domain() + gen_require(` + attribute named_filetrans_domain; + ') + + docker_filetrans_named_content(named_filetrans_domain) +') + +optional_policy(` + lvm_stub() + docker_rw_sem(lvm_t) +') + +optional_policy(` + staff_stub() + docker_stream_connect(staff_t) + docker_exec(staff_t) +') + +optional_policy(` + virt_stub_svirt_sandbox_domain() + virt_stub_svirt_sandbox_file() + allow svirt_sandbox_domain self:netlink_kobject_uevent_socket create_socket_perms; + docker_read_share_files(svirt_sandbox_domain) + docker_lib_filetrans(svirt_sandbox_domain,svirt_sandbox_file_t, sock_file) + docker_use_ptys(svirt_sandbox_domain) + docker_spc_stream_connect(svirt_sandbox_domain) + fs_list_tmpfs(svirt_sandbox_domain) + fs_rw_hugetlbfs_files(svirt_sandbox_domain) + fs_dontaudit_remount_tmpfs(svirt_sandbox_domain) + dev_dontaudit_mounton_sysfs(svirt_sandbox_domain) + + tunable_policy(`virt_sandbox_use_fusefs',` + fs_manage_fusefs_dirs(svirt_sandbox_domain) + fs_manage_fusefs_files(svirt_sandbox_domain) + fs_manage_fusefs_symlinks(svirt_sandbox_domain) + ') + gen_require(` + attribute domain; + ') + + dontaudit svirt_sandbox_domain domain:key {search link}; +') + +optional_policy(` + gen_require(` + type pcp_pmcd_t; + ') + docker_manage_lib_files(pcp_pmcd_t) +') diff --git a/vendor/github.com/moby/moby/contrib/selinux-oraclelinux-7/docker-engine-selinux/docker_selinux.8.gz b/vendor/github.com/moby/moby/contrib/selinux-oraclelinux-7/docker-engine-selinux/docker_selinux.8.gz new file mode 100644 index 0000000000000000000000000000000000000000..ab5d59445ac1601ca378aaa3e71fb9cff43a1592 GIT binary patch literal 2847 zcmV+)3*hu0iwFo7v)okz17vSwYh`j@b7gF4ZgqGrH~__3TaVke5`I4V6@*`!6l=S| zL4lsU6wa>G7)ZQ6Yo}<61q526ZDJ)-B`NQ^I6wZ(@S=+?ue_UwK6D$4GsAB#9L|h1 zT74p9kjmtNshEi^7cAB+)14KO+rU$c!fk;-5#O zmV?vz9JoRUq(p7=UrB&Q;!Mydm$39gew3ZrB;ilS8)GkXHjhLJ~Z zb`9~dA;BW%P_PmCCQFh~L6RLy9thu%13cK#JwqnV8WL401Q%PfK6v5y10~;YJ{0bIQB&IB4h8PX!L;;nhe>W6UN2*vY){HP=m;wW%^*n~)VL%-lM6=;wQLDcf$Tqah4DzZ&A-OQ5 zpk}9!d<;9LGN)V+s;qrrJQSwpk3}bnLm)E<+bWW&HyOZUN+Z8! zrYvwTu1*6Pt*!k*0iAMYb~43Bh141ajx6w1(;G*YMQ=Hqr`EP^4~)I(9~ggC#Eqs? zD{L+egeI(L1_4dCa15BrIqU}t4{6QdV-7S)QIVWJI5#x+uY;!+G9tCH0HBZ%Sxi)C z8$>lWY$jj8Y28@j&axe-pr4r%%d53zgn1bWHS|f79H5xC)H0jgI zs0uU)bj^^I3>RHe-bFS9yYM?pRYwLc4Vmq2q_6juX;@({ab1JGR{4 zfw)WDORWuVA|@%o>a>8w)TaS@706>x{ypfAMZF9;#_*bFSi{*fL({Pf9G43qVP2w% zIefPU=Gn9DQa}6`GQB;xg;6xIw?0G;TbJ9dJ-0w6?P0F2$a6Y?)YuAX#LrY*3cta9 znbBSK62e9L9P1w1f-7Y@QM`a6_IzSR@_3V?)m{U-#s5;+q3R`&mIcd5CTWU?x6D`% zV8;+6L+lw|cO#sY_EKF!`4838h8Nl%`!hOJ>#s0)&D)<2@%Y(r-jGtktqviMNwER^ z48UzB*EEZ@e$}nh;O;eIRpUakf#QQ=iR`XhC_rrG0lrx?CC@<(%RcL-uQ2I}h+fpL z9caOv&z5Hp3f=+ka%(o(UvEx4okAy2e(WgrYB{7zbvTC@2yGVCyZlv@2@b z=9Ay1H{|2&^EC99RZZMk!DF%LI|5gCWOU6CMOBv8JxJALYABUav}-9d4&F+u4l=Z! zt$tIpHaGSo&AtLQ{yMwy<-K68`LOBhW^!G%4q$9F$y%XFlC6?ufnD{##t<;$jUKy4 zmY|~YRRVg>(K3^a{nIz&(T{I`?WEsR6=!_ySm4JPevFGmrwyKZ;Z$C|CJP8Jt~=KX zH>2s6DdEf(n*uUl@{rz-3Z5RXd1H00sjUle)ye1-ZW8H-mPzWaX2Z92 z2)V}{CiL_>nKMVNq%`CEQ5d3}lA>0PK!ac7>?t`f8d{Df`Sy8gn~~aa>{ftd?6kTc zF|j|25>LYg?~WqBj^i1)>7ay0aXYDvzLZeVoOJ<)sByEhPdb$d6PF5P+?nTA#c=PA@scfsdyQ}Y5_8NS&t1%dCZ80_lCjU~9q zjg5~^n4io*sRMUjw&e-&PXRHliX zY20}XrJnZnS;I@=y@9J_Lt)mmQkSDND_Fue2MBz)TLn7JCJNW?r(tTxn%2Mxv7ZB5 zT8-6m%Jsu2I&X4wlF-Qy)}Z;JzP3%3&VP8`3&%{68=F@ZwA>hn8)wGbGNbs`rvOw+Ii6L#f+~(-Xebl9tVa4Q=RZL4J-Fm?7Us&zIL%JG!WlDgL{mh6HshYGQfieib=4EQndS1#f<%XMS1_3R~RknrMThpB*A zCYMWHDccRA1lxy7yBA1<_@xnpti)d@-AL;Gr58s<`kYA`A6_^qr^Q>}F{(R=iy*ms z_mz-vzy~*6=s#M}x=+}dR_%&(wP_tsZHrdF6ek~>)suhy9Ri#~Hp*p+-+449V#y8* z2Vd{B>(20^n+gC1%*l?5Ejz8!n$?sqc{{6|sNQ9TW$Yu)$1I|Q6&gyDs=UJFgs-`Q z0ehv#<~$8HY8O8d4mOJ-J2c8J|4Myuef#ALRG`bj8C+l}nrYeoSfF~{9fp7{rG2HY zM<;c3{cS*>;P9>+Vg|o4pzX0HSg7$y!pS!7-9zUVZUj6|-4GUXvo>%SjTOt~zIt=- z-(4J4q<(_iha62Dz7?pde3zq!?w%O>PqiXYgOcCA&On092;Ebjh1w>3&(N6b`qqva z_kj(bC9a^$msVm=VkX>UOUv6-Ye@!LXc8$>j6$ xb`W`pZ+>}u<]*>" "%[qw]\[[^]]*\]" "%[qw]\$[^$]*\$" "%[qw]\^[^^]*\^" "%[qw]![^!]*!" + +## Strings, double-quoted +color brightwhite ""([^"]|(\\"))*"" "%[QW]?\{[^}]*\}" "%[QW]?\([^)]*\)" "%[QW]?<[^>]*>" "%[QW]?\[[^]]*\]" "%[QW]?\$[^$]*\$" "%[QW]?\^[^^]*\^" "%[QW]?![^!]*!" + +## Single and double quotes +color brightyellow "('|\")" diff --git a/vendor/github.com/moby/moby/contrib/syntax/nano/README.md b/vendor/github.com/moby/moby/contrib/syntax/nano/README.md new file mode 100644 index 000000000..5985208b0 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/syntax/nano/README.md @@ -0,0 +1,32 @@ +Dockerfile.nanorc +================= + +Dockerfile syntax highlighting for nano + +Single User Installation +------------------------ +1. Create a nano syntax directory in your home directory: + * `mkdir -p ~/.nano/syntax` + +2. Copy `Dockerfile.nanorc` to` ~/.nano/syntax/` + * `cp Dockerfile.nanorc ~/.nano/syntax/` + +3. Add the following to your `~/.nanorc` to tell nano where to find the `Dockerfile.nanorc` file + ``` +## Dockerfile files +include "~/.nano/syntax/Dockerfile.nanorc" + ``` + +System Wide Installation +------------------------ +1. Create a nano syntax directory: + * `mkdir /usr/local/share/nano` + +2. Copy `Dockerfile.nanorc` to `/usr/local/share/nano` + * `cp Dockerfile.nanorc /usr/local/share/nano/` + +3. Add the following to your `/etc/nanorc`: + ``` +## Dockerfile files +include "/usr/local/share/nano/Dockerfile.nanorc" + ``` diff --git a/vendor/github.com/moby/moby/contrib/syntax/textmate/Docker.tmbundle/Preferences/Dockerfile.tmPreferences b/vendor/github.com/moby/moby/contrib/syntax/textmate/Docker.tmbundle/Preferences/Dockerfile.tmPreferences new file mode 100644 index 000000000..20f0d04ca --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/syntax/textmate/Docker.tmbundle/Preferences/Dockerfile.tmPreferences @@ -0,0 +1,24 @@ + + + + + name + Comments + scope + source.dockerfile + settings + + shellVariables + + + name + TM_COMMENT_START + value + # + + + + uuid + 2B215AC0-A7F3-4090-9FF6-F4842BD56CA7 + + diff --git a/vendor/github.com/moby/moby/contrib/syntax/textmate/Docker.tmbundle/Syntaxes/Dockerfile.tmLanguage b/vendor/github.com/moby/moby/contrib/syntax/textmate/Docker.tmbundle/Syntaxes/Dockerfile.tmLanguage new file mode 100644 index 000000000..5a273337f --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/syntax/textmate/Docker.tmbundle/Syntaxes/Dockerfile.tmLanguage @@ -0,0 +1,160 @@ + + + + + fileTypes + + Dockerfile + + name + Dockerfile + patterns + + + captures + + 1 + + name + keyword.other.special-method.dockerfile + + 2 + + name + keyword.other.special-method.dockerfile + + + match + ^\s*\b(FROM)\b.*?\b(AS)\b + + + captures + + 1 + + name + keyword.control.dockerfile + + 2 + + name + keyword.other.special-method.dockerfile + + + match + ^\s*(?:(ONBUILD)\s+)?(ADD|ARG|CMD|COPY|ENTRYPOINT|ENV|EXPOSE|FROM|HEALTHCHECK|LABEL|MAINTAINER|RUN|SHELL|STOPSIGNAL|USER|VOLUME|WORKDIR)\s + + + captures + + 1 + + name + keyword.operator.dockerfile + + 2 + + name + keyword.other.special-method.dockerfile + + + match + ^\s*(?:(ONBUILD)\s+)?(CMD|ENTRYPOINT)\s + + + begin + " + beginCaptures + + 1 + + name + punctuation.definition.string.begin.dockerfile + + + end + " + endCaptures + + 1 + + name + punctuation.definition.string.end.dockerfile + + + name + string.quoted.double.dockerfile + patterns + + + match + \\. + name + constant.character.escaped.dockerfile + + + + + begin + ' + beginCaptures + + 1 + + name + punctuation.definition.string.begin.dockerfile + + + end + ' + endCaptures + + 1 + + name + punctuation.definition.string.end.dockerfile + + + name + string.quoted.single.dockerfile + patterns + + + match + \\. + name + constant.character.escaped.dockerfile + + + + + captures + + 1 + + name + punctuation.whitespace.comment.leading.dockerfile + + 2 + + name + comment.line.number-sign.dockerfile + + 3 + + name + punctuation.definition.comment.dockerfile + + + comment + comment.line + match + ^(\s*)((#).*$\n?) + + + scopeName + source.dockerfile + uuid + a39d8795-59d2-49af-aa00-fe74ee29576e + + diff --git a/vendor/github.com/moby/moby/contrib/syntax/textmate/Docker.tmbundle/info.plist b/vendor/github.com/moby/moby/contrib/syntax/textmate/Docker.tmbundle/info.plist new file mode 100644 index 000000000..239f4b0a9 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/syntax/textmate/Docker.tmbundle/info.plist @@ -0,0 +1,16 @@ + + + + + contactEmailRot13 + germ@andz.com.ar + contactName + GermanDZ + description + Helpers for Docker. + name + Docker + uuid + 8B9DDBAF-E65C-4E12-FFA7-467D4AA535B1 + + diff --git a/vendor/github.com/moby/moby/contrib/syntax/textmate/README.md b/vendor/github.com/moby/moby/contrib/syntax/textmate/README.md new file mode 100644 index 000000000..ce611018e --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/syntax/textmate/README.md @@ -0,0 +1,17 @@ +# Docker.tmbundle + +Dockerfile syntax highlighting for TextMate and Sublime Text. + +## Install + +### Sublime Text + +Available for Sublime Text under [package control](https://sublime.wbond.net/packages/Dockerfile%20Syntax%20Highlighting). +Search for *Dockerfile Syntax Highlighting* + +### TextMate 2 + +You can install this bundle in TextMate by opening the preferences and going to the bundles tab. After installation it will be automatically updated for you. + +enjoy. + diff --git a/vendor/github.com/moby/moby/contrib/syntax/textmate/REVIEWERS b/vendor/github.com/moby/moby/contrib/syntax/textmate/REVIEWERS new file mode 100644 index 000000000..965743df6 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/syntax/textmate/REVIEWERS @@ -0,0 +1 @@ +Asbjorn Enge (@asbjornenge) diff --git a/vendor/github.com/moby/moby/contrib/syntax/vim/LICENSE b/vendor/github.com/moby/moby/contrib/syntax/vim/LICENSE new file mode 100644 index 000000000..e67cdabd2 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/syntax/vim/LICENSE @@ -0,0 +1,22 @@ +Copyright (c) 2013 Honza Pokorny +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/moby/moby/contrib/syntax/vim/README.md b/vendor/github.com/moby/moby/contrib/syntax/vim/README.md new file mode 100644 index 000000000..5aa9bd825 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/syntax/vim/README.md @@ -0,0 +1,26 @@ +dockerfile.vim +============== + +Syntax highlighting for Dockerfiles + +Installation +------------ +With [pathogen](https://github.com/tpope/vim-pathogen), the usual way... + +With [Vundle](https://github.com/gmarik/Vundle.vim) + + Plugin 'docker/docker' , {'rtp': '/contrib/syntax/vim/'} + +Features +-------- + +The syntax highlighting includes: + +* The directives (e.g. `FROM`) +* Strings +* Comments + +License +------- + +BSD, short and sweet diff --git a/vendor/github.com/moby/moby/contrib/syntax/vim/doc/dockerfile.txt b/vendor/github.com/moby/moby/contrib/syntax/vim/doc/dockerfile.txt new file mode 100644 index 000000000..e69e2b7b3 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/syntax/vim/doc/dockerfile.txt @@ -0,0 +1,18 @@ +*dockerfile.txt* Syntax highlighting for Dockerfiles + +Author: Honza Pokorny +License: BSD + +INSTALLATION *installation* + +Drop it on your Pathogen path and you're all set. + +FEATURES *features* + +The syntax highlighting includes: + +* The directives (e.g. FROM) +* Strings +* Comments + + vim:tw=78:et:ft=help:norl: diff --git a/vendor/github.com/moby/moby/contrib/syntax/vim/ftdetect/dockerfile.vim b/vendor/github.com/moby/moby/contrib/syntax/vim/ftdetect/dockerfile.vim new file mode 100644 index 000000000..ee10e5d6a --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/syntax/vim/ftdetect/dockerfile.vim @@ -0,0 +1 @@ +au BufNewFile,BufRead [Dd]ockerfile,Dockerfile.* set filetype=dockerfile diff --git a/vendor/github.com/moby/moby/contrib/syntax/vim/syntax/dockerfile.vim b/vendor/github.com/moby/moby/contrib/syntax/vim/syntax/dockerfile.vim new file mode 100644 index 000000000..a067e6ad4 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/syntax/vim/syntax/dockerfile.vim @@ -0,0 +1,31 @@ +" dockerfile.vim - Syntax highlighting for Dockerfiles +" Maintainer: Honza Pokorny +" Version: 0.5 + + +if exists("b:current_syntax") + finish +endif + +let b:current_syntax = "dockerfile" + +syntax case ignore + +syntax match dockerfileKeyword /\v^\s*(ONBUILD\s+)?(ADD|ARG|CMD|COPY|ENTRYPOINT|ENV|EXPOSE|FROM|HEALTHCHECK|LABEL|MAINTAINER|RUN|SHELL|STOPSIGNAL|USER|VOLUME|WORKDIR)\s/ +highlight link dockerfileKeyword Keyword + +syntax region dockerfileString start=/\v"/ skip=/\v\\./ end=/\v"/ +highlight link dockerfileString String + +syntax match dockerfileComment "\v^\s*#.*$" +highlight link dockerfileComment Comment + +set commentstring=#\ %s + +" match "RUN", "CMD", and "ENTRYPOINT" lines, and parse them as shell +let s:current_syntax = b:current_syntax +unlet b:current_syntax +syntax include @SH syntax/sh.vim +let b:current_syntax = s:current_syntax +syntax region shLine matchgroup=dockerfileKeyword start=/\v^\s*(RUN|CMD|ENTRYPOINT)\s/ end=/\v$/ contains=@SH +" since @SH will handle "\" as part of the same line automatically, this "just works" for line continuation too, but with the caveat that it will highlight "RUN echo '" followed by a newline as if it were a block because the "'" is shell line continuation... not sure how to fix that just yet (TODO) diff --git a/vendor/github.com/moby/moby/contrib/syscall-test/Dockerfile b/vendor/github.com/moby/moby/contrib/syscall-test/Dockerfile new file mode 100644 index 000000000..f95f1758c --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/syscall-test/Dockerfile @@ -0,0 +1,15 @@ +FROM buildpack-deps:jessie + +COPY . /usr/src/ + +WORKDIR /usr/src/ + +RUN gcc -g -Wall -static userns.c -o /usr/bin/userns-test \ + && gcc -g -Wall -static ns.c -o /usr/bin/ns-test \ + && gcc -g -Wall -static acct.c -o /usr/bin/acct-test \ + && gcc -g -Wall -static setuid.c -o /usr/bin/setuid-test \ + && gcc -g -Wall -static setgid.c -o /usr/bin/setgid-test \ + && gcc -g -Wall -static socket.c -o /usr/bin/socket-test \ + && gcc -g -Wall -static raw.c -o /usr/bin/raw-test + +RUN [ "$(uname -m)" = "x86_64" ] && gcc -s -m32 -nostdlib exit32.s -o /usr/bin/exit32-test || true diff --git a/vendor/github.com/moby/moby/contrib/syscall-test/acct.c b/vendor/github.com/moby/moby/contrib/syscall-test/acct.c new file mode 100644 index 000000000..88ac28796 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/syscall-test/acct.c @@ -0,0 +1,16 @@ +#define _GNU_SOURCE +#include +#include +#include +#include +#include + +int main(int argc, char **argv) +{ + int err = acct("/tmp/t"); + if (err == -1) { + fprintf(stderr, "acct failed: %s\n", strerror(errno)); + exit(EXIT_FAILURE); + } + exit(EXIT_SUCCESS); +} diff --git a/vendor/github.com/moby/moby/contrib/syscall-test/exit32.s b/vendor/github.com/moby/moby/contrib/syscall-test/exit32.s new file mode 100644 index 000000000..8bbb5c58b --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/syscall-test/exit32.s @@ -0,0 +1,7 @@ +.globl _start +.text +_start: + xorl %eax, %eax + incl %eax + movb $0, %bl + int $0x80 diff --git a/vendor/github.com/moby/moby/contrib/syscall-test/ns.c b/vendor/github.com/moby/moby/contrib/syscall-test/ns.c new file mode 100644 index 000000000..624388630 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/syscall-test/ns.c @@ -0,0 +1,63 @@ +#define _GNU_SOURCE +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define STACK_SIZE (1024 * 1024) /* Stack size for cloned child */ + +struct clone_args { + char **argv; +}; + +// child_exec is the func that will be executed as the result of clone +static int child_exec(void *stuff) +{ + struct clone_args *args = (struct clone_args *)stuff; + if (execvp(args->argv[0], args->argv) != 0) { + fprintf(stderr, "failed to execvp arguments %s\n", + strerror(errno)); + exit(-1); + } + // we should never reach here! + exit(EXIT_FAILURE); +} + +int main(int argc, char **argv) +{ + struct clone_args args; + args.argv = &argv[1]; + + int clone_flags = CLONE_NEWNS | CLONE_NEWPID | SIGCHLD; + + // allocate stack for child + char *stack; /* Start of stack buffer */ + char *child_stack; /* End of stack buffer */ + stack = + mmap(NULL, STACK_SIZE, PROT_READ | PROT_WRITE, + MAP_SHARED | MAP_ANON | MAP_STACK, -1, 0); + if (stack == MAP_FAILED) { + fprintf(stderr, "mmap failed: %s\n", strerror(errno)); + exit(EXIT_FAILURE); + } + child_stack = stack + STACK_SIZE; /* Assume stack grows downward */ + + // the result of this call is that our child_exec will be run in another + // process returning its pid + pid_t pid = clone(child_exec, child_stack, clone_flags, &args); + if (pid < 0) { + fprintf(stderr, "clone failed: %s\n", strerror(errno)); + exit(EXIT_FAILURE); + } + // lets wait on our child process here before we, the parent, exits + if (waitpid(pid, NULL, 0) == -1) { + fprintf(stderr, "failed to wait pid %d\n", pid); + exit(EXIT_FAILURE); + } + exit(EXIT_SUCCESS); +} diff --git a/vendor/github.com/moby/moby/contrib/syscall-test/raw.c b/vendor/github.com/moby/moby/contrib/syscall-test/raw.c new file mode 100644 index 000000000..7995a0d3a --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/syscall-test/raw.c @@ -0,0 +1,14 @@ +#include +#include +#include +#include +#include + +int main() { + if (socket(PF_INET, SOCK_RAW, IPPROTO_UDP) == -1) { + perror("socket"); + return 1; + } + + return 0; +} diff --git a/vendor/github.com/moby/moby/contrib/syscall-test/setgid.c b/vendor/github.com/moby/moby/contrib/syscall-test/setgid.c new file mode 100644 index 000000000..df9680c86 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/syscall-test/setgid.c @@ -0,0 +1,11 @@ +#include +#include +#include + +int main() { + if (setgid(1) == -1) { + perror("setgid"); + return 1; + } + return 0; +} diff --git a/vendor/github.com/moby/moby/contrib/syscall-test/setuid.c b/vendor/github.com/moby/moby/contrib/syscall-test/setuid.c new file mode 100644 index 000000000..5b939677e --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/syscall-test/setuid.c @@ -0,0 +1,11 @@ +#include +#include +#include + +int main() { + if (setuid(1) == -1) { + perror("setuid"); + return 1; + } + return 0; +} diff --git a/vendor/github.com/moby/moby/contrib/syscall-test/socket.c b/vendor/github.com/moby/moby/contrib/syscall-test/socket.c new file mode 100644 index 000000000..d26c82f00 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/syscall-test/socket.c @@ -0,0 +1,30 @@ +#include +#include +#include +#include +#include +#include + +int main() { + int s; + struct sockaddr_in sin; + + s = socket(AF_INET, SOCK_STREAM, 0); + if (s == -1) { + perror("socket"); + return 1; + } + + sin.sin_family = AF_INET; + sin.sin_addr.s_addr = INADDR_ANY; + sin.sin_port = htons(80); + + if (bind(s, (struct sockaddr *)&sin, sizeof(sin)) == -1) { + perror("bind"); + return 1; + } + + close(s); + + return 0; +} diff --git a/vendor/github.com/moby/moby/contrib/syscall-test/userns.c b/vendor/github.com/moby/moby/contrib/syscall-test/userns.c new file mode 100644 index 000000000..4c5c8d304 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/syscall-test/userns.c @@ -0,0 +1,63 @@ +#define _GNU_SOURCE +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define STACK_SIZE (1024 * 1024) /* Stack size for cloned child */ + +struct clone_args { + char **argv; +}; + +// child_exec is the func that will be executed as the result of clone +static int child_exec(void *stuff) +{ + struct clone_args *args = (struct clone_args *)stuff; + if (execvp(args->argv[0], args->argv) != 0) { + fprintf(stderr, "failed to execvp arguments %s\n", + strerror(errno)); + exit(-1); + } + // we should never reach here! + exit(EXIT_FAILURE); +} + +int main(int argc, char **argv) +{ + struct clone_args args; + args.argv = &argv[1]; + + int clone_flags = CLONE_NEWUSER | SIGCHLD; + + // allocate stack for child + char *stack; /* Start of stack buffer */ + char *child_stack; /* End of stack buffer */ + stack = + mmap(NULL, STACK_SIZE, PROT_READ | PROT_WRITE, + MAP_SHARED | MAP_ANON | MAP_STACK, -1, 0); + if (stack == MAP_FAILED) { + fprintf(stderr, "mmap failed: %s\n", strerror(errno)); + exit(EXIT_FAILURE); + } + child_stack = stack + STACK_SIZE; /* Assume stack grows downward */ + + // the result of this call is that our child_exec will be run in another + // process returning its pid + pid_t pid = clone(child_exec, child_stack, clone_flags, &args); + if (pid < 0) { + fprintf(stderr, "clone failed: %s\n", strerror(errno)); + exit(EXIT_FAILURE); + } + // lets wait on our child process here before we, the parent, exits + if (waitpid(pid, NULL, 0) == -1) { + fprintf(stderr, "failed to wait pid %d\n", pid); + exit(EXIT_FAILURE); + } + exit(EXIT_SUCCESS); +} diff --git a/vendor/github.com/moby/moby/contrib/udev/80-docker.rules b/vendor/github.com/moby/moby/contrib/udev/80-docker.rules new file mode 100644 index 000000000..f934c0175 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/udev/80-docker.rules @@ -0,0 +1,3 @@ +# hide docker's loopback devices from udisks, and thus from user desktops +SUBSYSTEM=="block", ENV{DM_NAME}=="docker-*", ENV{UDISKS_PRESENTATION_HIDE}="1", ENV{UDISKS_IGNORE}="1" +SUBSYSTEM=="block", DEVPATH=="/devices/virtual/block/loop*", ATTR{loop/backing_file}=="/var/lib/docker/*", ENV{UDISKS_PRESENTATION_HIDE}="1", ENV{UDISKS_IGNORE}="1" diff --git a/vendor/github.com/moby/moby/contrib/vagrant-docker/README.md b/vendor/github.com/moby/moby/contrib/vagrant-docker/README.md new file mode 100644 index 000000000..736c78999 --- /dev/null +++ b/vendor/github.com/moby/moby/contrib/vagrant-docker/README.md @@ -0,0 +1,50 @@ +# Vagrant integration + +Currently there are at least 4 different projects that we are aware of that deals +with integration with [Vagrant](http://vagrantup.com/) at different levels. One +approach is to use Docker as a [provisioner](http://docs.vagrantup.com/v2/provisioning/index.html) +which means you can create containers and pull base images on VMs using Docker's +CLI and the other is to use Docker as a [provider](http://docs.vagrantup.com/v2/providers/index.html), +meaning you can use Vagrant to control Docker containers. + + +### Provisioners + +* [Vocker](https://github.com/fgrehm/vocker) +* [Ventriloquist](https://github.com/fgrehm/ventriloquist) + +### Providers + +* [docker-provider](https://github.com/fgrehm/docker-provider) +* [vagrant-shell](https://github.com/destructuring/vagrant-shell) + +## Setting up Vagrant-docker with the Engine API + +The initial Docker upstart script will not work because it runs on `127.0.0.1`, which is not accessible to the host machine. Instead, we need to change the script to connect to `0.0.0.0`. To do this, modify `/etc/init/docker.conf` to look like this: + +``` +description "Docker daemon" + +start on filesystem +stop on runlevel [!2345] + +respawn + +script + /usr/bin/dockerd -H=tcp://0.0.0.0:2375 +end script +``` + +Once that's done, you need to set up an SSH tunnel between your host machine and the vagrant machine that's running Docker. This can be done by running the following command in a host terminal: + +``` +ssh -L 2375:localhost:2375 -p 2222 vagrant@localhost +``` + +(The first 2375 is what your host can connect to, the second 2375 is what port Docker is running on in the vagrant machine, and the 2222 is the port Vagrant is providing for SSH. If VirtualBox is the VM you're using, you can see what value "2222" should be by going to: Network > Adapter 1 > Advanced > Port Forwarding in the VirtualBox GUI.) + +Note that because the port has been changed, to run docker commands from within the command line you must run them like this: + +``` +sudo docker -H 0.0.0.0:2375 < commands for docker > +``` diff --git a/vendor/github.com/moby/moby/daemon/apparmor_default.go b/vendor/github.com/moby/moby/daemon/apparmor_default.go new file mode 100644 index 000000000..2a418b25c --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/apparmor_default.go @@ -0,0 +1,36 @@ +// +build linux + +package daemon + +import ( + "fmt" + + aaprofile "github.com/docker/docker/profiles/apparmor" + "github.com/opencontainers/runc/libcontainer/apparmor" +) + +// Define constants for native driver +const ( + defaultApparmorProfile = "docker-default" +) + +func ensureDefaultAppArmorProfile() error { + if apparmor.IsEnabled() { + loaded, err := aaprofile.IsLoaded(defaultApparmorProfile) + if err != nil { + return fmt.Errorf("Could not check if %s AppArmor profile was loaded: %s", defaultApparmorProfile, err) + } + + // Nothing to do. + if loaded { + return nil + } + + // Load the profile. + if err := aaprofile.InstallDefault(defaultApparmorProfile); err != nil { + return fmt.Errorf("AppArmor enabled on system but the %s profile could not be loaded: %s", defaultApparmorProfile, err) + } + } + + return nil +} diff --git a/vendor/github.com/moby/moby/daemon/apparmor_default_unsupported.go b/vendor/github.com/moby/moby/daemon/apparmor_default_unsupported.go new file mode 100644 index 000000000..cd2dd9702 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/apparmor_default_unsupported.go @@ -0,0 +1,7 @@ +// +build !linux + +package daemon + +func ensureDefaultAppArmorProfile() error { + return nil +} diff --git a/vendor/github.com/moby/moby/daemon/archive.go b/vendor/github.com/moby/moby/daemon/archive.go new file mode 100644 index 000000000..bd00daca5 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/archive.go @@ -0,0 +1,362 @@ +package daemon + +import ( + "io" + "os" + "path/filepath" + "strings" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/container" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/chrootarchive" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/system" + "github.com/pkg/errors" +) + +// ErrExtractPointNotDirectory is used to convey that the operation to extract +// a tar archive to a directory in a container has failed because the specified +// path does not refer to a directory. +var ErrExtractPointNotDirectory = errors.New("extraction point is not a directory") + +// ContainerCopy performs a deprecated operation of archiving the resource at +// the specified path in the container identified by the given name. +func (daemon *Daemon) ContainerCopy(name string, res string) (io.ReadCloser, error) { + container, err := daemon.GetContainer(name) + if err != nil { + return nil, err + } + + if res[0] == '/' || res[0] == '\\' { + res = res[1:] + } + + // Make sure an online file-system operation is permitted. + if err := daemon.isOnlineFSOperationPermitted(container); err != nil { + return nil, err + } + + return daemon.containerCopy(container, res) +} + +// ContainerStatPath stats the filesystem resource at the specified path in the +// container identified by the given name. +func (daemon *Daemon) ContainerStatPath(name string, path string) (stat *types.ContainerPathStat, err error) { + container, err := daemon.GetContainer(name) + if err != nil { + return nil, err + } + + // Make sure an online file-system operation is permitted. + if err := daemon.isOnlineFSOperationPermitted(container); err != nil { + return nil, err + } + + return daemon.containerStatPath(container, path) +} + +// ContainerArchivePath creates an archive of the filesystem resource at the +// specified path in the container identified by the given name. Returns a +// tar archive of the resource and whether it was a directory or a single file. +func (daemon *Daemon) ContainerArchivePath(name string, path string) (content io.ReadCloser, stat *types.ContainerPathStat, err error) { + container, err := daemon.GetContainer(name) + if err != nil { + return nil, nil, err + } + + // Make sure an online file-system operation is permitted. + if err := daemon.isOnlineFSOperationPermitted(container); err != nil { + return nil, nil, err + } + + return daemon.containerArchivePath(container, path) +} + +// ContainerExtractToDir extracts the given archive to the specified location +// in the filesystem of the container identified by the given name. The given +// path must be of a directory in the container. If it is not, the error will +// be ErrExtractPointNotDirectory. If noOverwriteDirNonDir is true then it will +// be an error if unpacking the given content would cause an existing directory +// to be replaced with a non-directory and vice versa. +func (daemon *Daemon) ContainerExtractToDir(name, path string, copyUIDGID, noOverwriteDirNonDir bool, content io.Reader) error { + container, err := daemon.GetContainer(name) + if err != nil { + return err + } + + // Make sure an online file-system operation is permitted. + if err := daemon.isOnlineFSOperationPermitted(container); err != nil { + return err + } + + return daemon.containerExtractToDir(container, path, copyUIDGID, noOverwriteDirNonDir, content) +} + +// containerStatPath stats the filesystem resource at the specified path in this +// container. Returns stat info about the resource. +func (daemon *Daemon) containerStatPath(container *container.Container, path string) (stat *types.ContainerPathStat, err error) { + container.Lock() + defer container.Unlock() + + if err = daemon.Mount(container); err != nil { + return nil, err + } + defer daemon.Unmount(container) + + err = daemon.mountVolumes(container) + defer container.DetachAndUnmount(daemon.LogVolumeEvent) + if err != nil { + return nil, err + } + + resolvedPath, absPath, err := container.ResolvePath(path) + if err != nil { + return nil, err + } + + return container.StatPath(resolvedPath, absPath) +} + +// containerArchivePath creates an archive of the filesystem resource at the specified +// path in this container. Returns a tar archive of the resource and stat info +// about the resource. +func (daemon *Daemon) containerArchivePath(container *container.Container, path string) (content io.ReadCloser, stat *types.ContainerPathStat, err error) { + container.Lock() + + defer func() { + if err != nil { + // Wait to unlock the container until the archive is fully read + // (see the ReadCloseWrapper func below) or if there is an error + // before that occurs. + container.Unlock() + } + }() + + if err = daemon.Mount(container); err != nil { + return nil, nil, err + } + + defer func() { + if err != nil { + // unmount any volumes + container.DetachAndUnmount(daemon.LogVolumeEvent) + // unmount the container's rootfs + daemon.Unmount(container) + } + }() + + if err = daemon.mountVolumes(container); err != nil { + return nil, nil, err + } + + resolvedPath, absPath, err := container.ResolvePath(path) + if err != nil { + return nil, nil, err + } + + stat, err = container.StatPath(resolvedPath, absPath) + if err != nil { + return nil, nil, err + } + + // We need to rebase the archive entries if the last element of the + // resolved path was a symlink that was evaluated and is now different + // than the requested path. For example, if the given path was "/foo/bar/", + // but it resolved to "/var/lib/docker/containers/{id}/foo/baz/", we want + // to ensure that the archive entries start with "bar" and not "baz". This + // also catches the case when the root directory of the container is + // requested: we want the archive entries to start with "/" and not the + // container ID. + data, err := archive.TarResourceRebase(resolvedPath, filepath.Base(absPath)) + if err != nil { + return nil, nil, err + } + + content = ioutils.NewReadCloserWrapper(data, func() error { + err := data.Close() + container.DetachAndUnmount(daemon.LogVolumeEvent) + daemon.Unmount(container) + container.Unlock() + return err + }) + + daemon.LogContainerEvent(container, "archive-path") + + return content, stat, nil +} + +// containerExtractToDir extracts the given tar archive to the specified location in the +// filesystem of this container. The given path must be of a directory in the +// container. If it is not, the error will be ErrExtractPointNotDirectory. If +// noOverwriteDirNonDir is true then it will be an error if unpacking the +// given content would cause an existing directory to be replaced with a non- +// directory and vice versa. +func (daemon *Daemon) containerExtractToDir(container *container.Container, path string, copyUIDGID, noOverwriteDirNonDir bool, content io.Reader) (err error) { + container.Lock() + defer container.Unlock() + + if err = daemon.Mount(container); err != nil { + return err + } + defer daemon.Unmount(container) + + err = daemon.mountVolumes(container) + defer container.DetachAndUnmount(daemon.LogVolumeEvent) + if err != nil { + return err + } + + // Check if a drive letter supplied, it must be the system drive. No-op except on Windows + path, err = system.CheckSystemDriveAndRemoveDriveLetter(path) + if err != nil { + return err + } + + // The destination path needs to be resolved to a host path, with all + // symbolic links followed in the scope of the container's rootfs. Note + // that we do not use `container.ResolvePath(path)` here because we need + // to also evaluate the last path element if it is a symlink. This is so + // that you can extract an archive to a symlink that points to a directory. + + // Consider the given path as an absolute path in the container. + absPath := archive.PreserveTrailingDotOrSeparator(filepath.Join(string(filepath.Separator), path), path) + + // This will evaluate the last path element if it is a symlink. + resolvedPath, err := container.GetResourcePath(absPath) + if err != nil { + return err + } + + stat, err := os.Lstat(resolvedPath) + if err != nil { + return err + } + + if !stat.IsDir() { + return ErrExtractPointNotDirectory + } + + // Need to check if the path is in a volume. If it is, it cannot be in a + // read-only volume. If it is not in a volume, the container cannot be + // configured with a read-only rootfs. + + // Use the resolved path relative to the container rootfs as the new + // absPath. This way we fully follow any symlinks in a volume that may + // lead back outside the volume. + // + // The Windows implementation of filepath.Rel in golang 1.4 does not + // support volume style file path semantics. On Windows when using the + // filter driver, we are guaranteed that the path will always be + // a volume file path. + var baseRel string + if strings.HasPrefix(resolvedPath, `\\?\Volume{`) { + if strings.HasPrefix(resolvedPath, container.BaseFS) { + baseRel = resolvedPath[len(container.BaseFS):] + if baseRel[:1] == `\` { + baseRel = baseRel[1:] + } + } + } else { + baseRel, err = filepath.Rel(container.BaseFS, resolvedPath) + } + if err != nil { + return err + } + // Make it an absolute path. + absPath = filepath.Join(string(filepath.Separator), baseRel) + + toVolume, err := checkIfPathIsInAVolume(container, absPath) + if err != nil { + return err + } + + if !toVolume && container.HostConfig.ReadonlyRootfs { + return ErrRootFSReadOnly + } + + options := daemon.defaultTarCopyOptions(noOverwriteDirNonDir) + + if copyUIDGID { + var err error + // tarCopyOptions will appropriately pull in the right uid/gid for the + // user/group and will set the options. + options, err = daemon.tarCopyOptions(container, noOverwriteDirNonDir) + if err != nil { + return err + } + } + + if err := chrootarchive.Untar(content, resolvedPath, options); err != nil { + return err + } + + daemon.LogContainerEvent(container, "extract-to-dir") + + return nil +} + +func (daemon *Daemon) containerCopy(container *container.Container, resource string) (rc io.ReadCloser, err error) { + container.Lock() + + defer func() { + if err != nil { + // Wait to unlock the container until the archive is fully read + // (see the ReadCloseWrapper func below) or if there is an error + // before that occurs. + container.Unlock() + } + }() + + if err := daemon.Mount(container); err != nil { + return nil, err + } + + defer func() { + if err != nil { + // unmount any volumes + container.DetachAndUnmount(daemon.LogVolumeEvent) + // unmount the container's rootfs + daemon.Unmount(container) + } + }() + + if err := daemon.mountVolumes(container); err != nil { + return nil, err + } + + basePath, err := container.GetResourcePath(resource) + if err != nil { + return nil, err + } + stat, err := os.Stat(basePath) + if err != nil { + return nil, err + } + var filter []string + if !stat.IsDir() { + d, f := filepath.Split(basePath) + basePath = d + filter = []string{f} + } else { + filter = []string{filepath.Base(basePath)} + basePath = filepath.Dir(basePath) + } + archive, err := archive.TarWithOptions(basePath, &archive.TarOptions{ + Compression: archive.Uncompressed, + IncludeFiles: filter, + }) + if err != nil { + return nil, err + } + + reader := ioutils.NewReadCloserWrapper(archive, func() error { + err := archive.Close() + container.DetachAndUnmount(daemon.LogVolumeEvent) + daemon.Unmount(container) + container.Unlock() + return err + }) + daemon.LogContainerEvent(container, "copy") + return reader, nil +} diff --git a/vendor/github.com/moby/moby/daemon/archive_tarcopyoptions.go b/vendor/github.com/moby/moby/daemon/archive_tarcopyoptions.go new file mode 100644 index 000000000..fe7722fdb --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/archive_tarcopyoptions.go @@ -0,0 +1,15 @@ +package daemon + +import ( + "github.com/docker/docker/pkg/archive" +) + +// defaultTarCopyOptions is the setting that is used when unpacking an archive +// for a copy API event. +func (daemon *Daemon) defaultTarCopyOptions(noOverwriteDirNonDir bool) *archive.TarOptions { + return &archive.TarOptions{ + NoOverwriteDirNonDir: noOverwriteDirNonDir, + UIDMaps: daemon.idMappings.UIDs(), + GIDMaps: daemon.idMappings.GIDs(), + } +} diff --git a/vendor/github.com/moby/moby/daemon/archive_tarcopyoptions_unix.go b/vendor/github.com/moby/moby/daemon/archive_tarcopyoptions_unix.go new file mode 100644 index 000000000..83e6fd9e1 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/archive_tarcopyoptions_unix.go @@ -0,0 +1,25 @@ +// +build !windows + +package daemon + +import ( + "github.com/docker/docker/container" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/idtools" +) + +func (daemon *Daemon) tarCopyOptions(container *container.Container, noOverwriteDirNonDir bool) (*archive.TarOptions, error) { + if container.Config.User == "" { + return daemon.defaultTarCopyOptions(noOverwriteDirNonDir), nil + } + + user, err := idtools.LookupUser(container.Config.User) + if err != nil { + return nil, err + } + + return &archive.TarOptions{ + NoOverwriteDirNonDir: noOverwriteDirNonDir, + ChownOpts: &idtools.IDPair{UID: user.Uid, GID: user.Gid}, + }, nil +} diff --git a/vendor/github.com/moby/moby/daemon/archive_tarcopyoptions_windows.go b/vendor/github.com/moby/moby/daemon/archive_tarcopyoptions_windows.go new file mode 100644 index 000000000..535efd222 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/archive_tarcopyoptions_windows.go @@ -0,0 +1,12 @@ +// +build windows + +package daemon + +import ( + "github.com/docker/docker/container" + "github.com/docker/docker/pkg/archive" +) + +func (daemon *Daemon) tarCopyOptions(container *container.Container, noOverwriteDirNonDir bool) (*archive.TarOptions, error) { + return daemon.defaultTarCopyOptions(noOverwriteDirNonDir), nil +} diff --git a/vendor/github.com/moby/moby/daemon/archive_unix.go b/vendor/github.com/moby/moby/daemon/archive_unix.go new file mode 100644 index 000000000..d5dfad78c --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/archive_unix.go @@ -0,0 +1,29 @@ +// +build !windows + +package daemon + +import ( + "github.com/docker/docker/container" +) + +// checkIfPathIsInAVolume checks if the path is in a volume. If it is, it +// cannot be in a read-only volume. If it is not in a volume, the container +// cannot be configured with a read-only rootfs. +func checkIfPathIsInAVolume(container *container.Container, absPath string) (bool, error) { + var toVolume bool + for _, mnt := range container.MountPoints { + if toVolume = mnt.HasResource(absPath); toVolume { + if mnt.RW { + break + } + return false, ErrVolumeReadonly + } + } + return toVolume, nil +} + +// isOnlineFSOperationPermitted returns an error if an online filesystem operation +// is not permitted. +func (daemon *Daemon) isOnlineFSOperationPermitted(container *container.Container) error { + return nil +} diff --git a/vendor/github.com/moby/moby/daemon/archive_windows.go b/vendor/github.com/moby/moby/daemon/archive_windows.go new file mode 100644 index 000000000..ab105607d --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/archive_windows.go @@ -0,0 +1,39 @@ +package daemon + +import ( + "errors" + + containertypes "github.com/docker/docker/api/types/container" + "github.com/docker/docker/container" +) + +// checkIfPathIsInAVolume checks if the path is in a volume. If it is, it +// cannot be in a read-only volume. If it is not in a volume, the container +// cannot be configured with a read-only rootfs. +// +// This is a no-op on Windows which does not support read-only volumes, or +// extracting to a mount point inside a volume. TODO Windows: FIXME Post-TP5 +func checkIfPathIsInAVolume(container *container.Container, absPath string) (bool, error) { + return false, nil +} + +// isOnlineFSOperationPermitted returns an error if an online filesystem operation +// is not permitted (such as stat or for copying). Running Hyper-V containers +// cannot have their file-system interrogated from the host as the filter is +// loaded inside the utility VM, not the host. +// IMPORTANT: The container lock must NOT be held when calling this function. +func (daemon *Daemon) isOnlineFSOperationPermitted(container *container.Container) error { + if !container.IsRunning() { + return nil + } + + // Determine isolation. If not specified in the hostconfig, use daemon default. + actualIsolation := container.HostConfig.Isolation + if containertypes.Isolation.IsDefault(containertypes.Isolation(actualIsolation)) { + actualIsolation = daemon.defaultIsolation + } + if containertypes.Isolation.IsHyperV(actualIsolation) { + return errors.New("filesystem operations against a running Hyper-V container are not supported") + } + return nil +} diff --git a/vendor/github.com/moby/moby/daemon/attach.go b/vendor/github.com/moby/moby/daemon/attach.go new file mode 100644 index 000000000..32410393a --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/attach.go @@ -0,0 +1,186 @@ +package daemon + +import ( + "context" + "fmt" + "io" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/errors" + "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/container" + "github.com/docker/docker/container/stream" + "github.com/docker/docker/daemon/logger" + "github.com/docker/docker/pkg/stdcopy" + "github.com/docker/docker/pkg/term" +) + +// ContainerAttach attaches to logs according to the config passed in. See ContainerAttachConfig. +func (daemon *Daemon) ContainerAttach(prefixOrName string, c *backend.ContainerAttachConfig) error { + keys := []byte{} + var err error + if c.DetachKeys != "" { + keys, err = term.ToBytes(c.DetachKeys) + if err != nil { + return fmt.Errorf("Invalid detach keys (%s) provided", c.DetachKeys) + } + } + + container, err := daemon.GetContainer(prefixOrName) + if err != nil { + return err + } + if container.IsPaused() { + err := fmt.Errorf("Container %s is paused, unpause the container before attach.", prefixOrName) + return errors.NewRequestConflictError(err) + } + if container.IsRestarting() { + err := fmt.Errorf("Container %s is restarting, wait until the container is running.", prefixOrName) + return errors.NewRequestConflictError(err) + } + + cfg := stream.AttachConfig{ + UseStdin: c.UseStdin, + UseStdout: c.UseStdout, + UseStderr: c.UseStderr, + TTY: container.Config.Tty, + CloseStdin: container.Config.StdinOnce, + DetachKeys: keys, + } + container.StreamConfig.AttachStreams(&cfg) + + inStream, outStream, errStream, err := c.GetStreams() + if err != nil { + return err + } + defer inStream.Close() + + if !container.Config.Tty && c.MuxStreams { + errStream = stdcopy.NewStdWriter(errStream, stdcopy.Stderr) + outStream = stdcopy.NewStdWriter(outStream, stdcopy.Stdout) + } + + if cfg.UseStdin { + cfg.Stdin = inStream + } + if cfg.UseStdout { + cfg.Stdout = outStream + } + if cfg.UseStderr { + cfg.Stderr = errStream + } + + if err := daemon.containerAttach(container, &cfg, c.Logs, c.Stream); err != nil { + fmt.Fprintf(outStream, "Error attaching: %s\n", err) + } + return nil +} + +// ContainerAttachRaw attaches the provided streams to the container's stdio +func (daemon *Daemon) ContainerAttachRaw(prefixOrName string, stdin io.ReadCloser, stdout, stderr io.Writer, doStream bool, attached chan struct{}) error { + container, err := daemon.GetContainer(prefixOrName) + if err != nil { + return err + } + cfg := stream.AttachConfig{ + UseStdin: stdin != nil, + UseStdout: stdout != nil, + UseStderr: stderr != nil, + TTY: container.Config.Tty, + CloseStdin: container.Config.StdinOnce, + } + container.StreamConfig.AttachStreams(&cfg) + close(attached) + if cfg.UseStdin { + cfg.Stdin = stdin + } + if cfg.UseStdout { + cfg.Stdout = stdout + } + if cfg.UseStderr { + cfg.Stderr = stderr + } + + return daemon.containerAttach(container, &cfg, false, doStream) +} + +func (daemon *Daemon) containerAttach(c *container.Container, cfg *stream.AttachConfig, logs, doStream bool) error { + if logs { + logDriver, logCreated, err := daemon.getLogger(c) + if err != nil { + return err + } + if logCreated { + defer func() { + if err = logDriver.Close(); err != nil { + logrus.Errorf("Error closing logger: %v", err) + } + }() + } + cLog, ok := logDriver.(logger.LogReader) + if !ok { + return logger.ErrReadLogsNotSupported + } + logs := cLog.ReadLogs(logger.ReadConfig{Tail: -1}) + defer logs.Close() + + LogLoop: + for { + select { + case msg, ok := <-logs.Msg: + if !ok { + break LogLoop + } + if msg.Source == "stdout" && cfg.Stdout != nil { + cfg.Stdout.Write(msg.Line) + } + if msg.Source == "stderr" && cfg.Stderr != nil { + cfg.Stderr.Write(msg.Line) + } + case err := <-logs.Err: + logrus.Errorf("Error streaming logs: %v", err) + break LogLoop + } + } + } + + daemon.LogContainerEvent(c, "attach") + + if !doStream { + return nil + } + + if cfg.Stdin != nil { + r, w := io.Pipe() + go func(stdin io.ReadCloser) { + defer w.Close() + defer logrus.Debug("Closing buffered stdin pipe") + io.Copy(w, stdin) + }(cfg.Stdin) + cfg.Stdin = r + } + + if !c.Config.OpenStdin { + cfg.Stdin = nil + } + + if c.Config.StdinOnce && !c.Config.Tty { + // Wait for the container to stop before returning. + waitChan := c.Wait(context.Background(), container.WaitConditionNotRunning) + defer func() { + _ = <-waitChan // Ignore returned exit code. + }() + } + + ctx := c.InitAttachContext() + err := <-c.StreamConfig.CopyStreams(ctx, cfg) + if err != nil { + if _, ok := err.(term.EscapeError); ok { + daemon.LogContainerEvent(c, "detach") + } else { + logrus.Errorf("attach failed with error: %v", err) + } + } + + return nil +} diff --git a/vendor/github.com/moby/moby/daemon/auth.go b/vendor/github.com/moby/moby/daemon/auth.go new file mode 100644 index 000000000..f5f4d7bf2 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/auth.go @@ -0,0 +1,13 @@ +package daemon + +import ( + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/dockerversion" +) + +// AuthenticateToRegistry checks the validity of credentials in authConfig +func (daemon *Daemon) AuthenticateToRegistry(ctx context.Context, authConfig *types.AuthConfig) (string, string, error) { + return daemon.RegistryService.Auth(ctx, authConfig, dockerversion.DockerUserAgent(ctx)) +} diff --git a/vendor/github.com/moby/moby/daemon/bindmount_solaris.go b/vendor/github.com/moby/moby/daemon/bindmount_solaris.go new file mode 100644 index 000000000..87bf3ef72 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/bindmount_solaris.go @@ -0,0 +1,5 @@ +// +build solaris + +package daemon + +const bindMountType = "lofs" diff --git a/vendor/github.com/moby/moby/daemon/bindmount_unix.go b/vendor/github.com/moby/moby/daemon/bindmount_unix.go new file mode 100644 index 000000000..3966babb4 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/bindmount_unix.go @@ -0,0 +1,5 @@ +// +build linux freebsd + +package daemon + +const bindMountType = "bind" diff --git a/vendor/github.com/moby/moby/daemon/build.go b/vendor/github.com/moby/moby/daemon/build.go new file mode 100644 index 000000000..9b518d64f --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/build.go @@ -0,0 +1,196 @@ +package daemon + +import ( + "io" + "runtime" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/builder" + "github.com/docker/docker/image" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/registry" + "github.com/pkg/errors" + "golang.org/x/net/context" +) + +type releaseableLayer struct { + released bool + layerStore layer.Store + roLayer layer.Layer + rwLayer layer.RWLayer +} + +func (rl *releaseableLayer) Mount() (string, error) { + var err error + var chainID layer.ChainID + if rl.roLayer != nil { + chainID = rl.roLayer.ChainID() + } + + mountID := stringid.GenerateRandomID() + rl.rwLayer, err = rl.layerStore.CreateRWLayer(mountID, chainID, nil) + if err != nil { + return "", errors.Wrap(err, "failed to create rwlayer") + } + + return rl.rwLayer.Mount("") +} + +func (rl *releaseableLayer) Commit(platform string) (builder.ReleaseableLayer, error) { + var chainID layer.ChainID + if rl.roLayer != nil { + chainID = rl.roLayer.ChainID() + } + + stream, err := rl.rwLayer.TarStream() + if err != nil { + return nil, err + } + + newLayer, err := rl.layerStore.Register(stream, chainID, layer.Platform(platform)) + if err != nil { + return nil, err + } + + if layer.IsEmpty(newLayer.DiffID()) { + _, err := rl.layerStore.Release(newLayer) + return &releaseableLayer{layerStore: rl.layerStore}, err + } + return &releaseableLayer{layerStore: rl.layerStore, roLayer: newLayer}, nil +} + +func (rl *releaseableLayer) DiffID() layer.DiffID { + if rl.roLayer == nil { + return layer.DigestSHA256EmptyTar + } + return rl.roLayer.DiffID() +} + +func (rl *releaseableLayer) Release() error { + if rl.released { + return nil + } + rl.released = true + rl.releaseRWLayer() + return rl.releaseROLayer() +} + +func (rl *releaseableLayer) releaseRWLayer() error { + if rl.rwLayer == nil { + return nil + } + metadata, err := rl.layerStore.ReleaseRWLayer(rl.rwLayer) + layer.LogReleaseMetadata(metadata) + if err != nil { + logrus.Errorf("Failed to release RWLayer: %s", err) + } + return err +} + +func (rl *releaseableLayer) releaseROLayer() error { + if rl.roLayer == nil { + return nil + } + metadata, err := rl.layerStore.Release(rl.roLayer) + layer.LogReleaseMetadata(metadata) + return err +} + +func newReleasableLayerForImage(img *image.Image, layerStore layer.Store) (builder.ReleaseableLayer, error) { + if img == nil || img.RootFS.ChainID() == "" { + return &releaseableLayer{layerStore: layerStore}, nil + } + // Hold a reference to the image layer so that it can't be removed before + // it is released + roLayer, err := layerStore.Get(img.RootFS.ChainID()) + if err != nil { + return nil, errors.Wrapf(err, "failed to get layer for image %s", img.ImageID()) + } + return &releaseableLayer{layerStore: layerStore, roLayer: roLayer}, nil +} + +// TODO: could this use the regular daemon PullImage ? +func (daemon *Daemon) pullForBuilder(ctx context.Context, name string, authConfigs map[string]types.AuthConfig, output io.Writer, platform string) (*image.Image, error) { + ref, err := reference.ParseNormalizedNamed(name) + if err != nil { + return nil, err + } + ref = reference.TagNameOnly(ref) + + pullRegistryAuth := &types.AuthConfig{} + if len(authConfigs) > 0 { + // The request came with a full auth config, use it + repoInfo, err := daemon.RegistryService.ResolveRepository(ref) + if err != nil { + return nil, err + } + + resolvedConfig := registry.ResolveAuthConfig(authConfigs, repoInfo.Index) + pullRegistryAuth = &resolvedConfig + } + + if err := daemon.pullImageWithReference(ctx, ref, platform, nil, pullRegistryAuth, output); err != nil { + return nil, err + } + return daemon.GetImage(name) +} + +// GetImageAndReleasableLayer returns an image and releaseable layer for a reference or ID. +// Every call to GetImageAndReleasableLayer MUST call releasableLayer.Release() to prevent +// leaking of layers. +func (daemon *Daemon) GetImageAndReleasableLayer(ctx context.Context, refOrID string, opts backend.GetImageAndLayerOptions) (builder.Image, builder.ReleaseableLayer, error) { + if refOrID == "" { + layer, err := newReleasableLayerForImage(nil, daemon.stores[opts.Platform].layerStore) + return nil, layer, err + } + + if opts.PullOption != backend.PullOptionForcePull { + image, err := daemon.GetImage(refOrID) + if err != nil && opts.PullOption == backend.PullOptionNoPull { + return nil, nil, err + } + // TODO: shouldn't we error out if error is different from "not found" ? + if image != nil { + layer, err := newReleasableLayerForImage(image, daemon.stores[opts.Platform].layerStore) + return image, layer, err + } + } + + image, err := daemon.pullForBuilder(ctx, refOrID, opts.AuthConfig, opts.Output, opts.Platform) + if err != nil { + return nil, nil, err + } + layer, err := newReleasableLayerForImage(image, daemon.stores[opts.Platform].layerStore) + return image, layer, err +} + +// CreateImage creates a new image by adding a config and ID to the image store. +// This is similar to LoadImage() except that it receives JSON encoded bytes of +// an image instead of a tar archive. +func (daemon *Daemon) CreateImage(config []byte, parent string, platform string) (builder.Image, error) { + if platform == "" { + platform = runtime.GOOS + } + id, err := daemon.stores[platform].imageStore.Create(config) + if err != nil { + return nil, errors.Wrapf(err, "failed to create image") + } + + if parent != "" { + if err := daemon.stores[platform].imageStore.SetParent(id, image.ID(parent)); err != nil { + return nil, errors.Wrapf(err, "failed to set parent %s", parent) + } + } + + return daemon.stores[platform].imageStore.Get(id) +} + +// IDMappings returns uid/gid mappings for the builder +func (daemon *Daemon) IDMappings() *idtools.IDMappings { + return daemon.idMappings +} diff --git a/vendor/github.com/moby/moby/daemon/cache.go b/vendor/github.com/moby/moby/daemon/cache.go new file mode 100644 index 000000000..219b0b38d --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/cache.go @@ -0,0 +1,27 @@ +package daemon + +import ( + "github.com/Sirupsen/logrus" + "github.com/docker/docker/builder" + "github.com/docker/docker/image/cache" +) + +// MakeImageCache creates a stateful image cache. +func (daemon *Daemon) MakeImageCache(sourceRefs []string, platform string) builder.ImageCache { + if len(sourceRefs) == 0 { + return cache.NewLocal(daemon.stores[platform].imageStore) + } + + cache := cache.New(daemon.stores[platform].imageStore) + + for _, ref := range sourceRefs { + img, err := daemon.GetImage(ref) + if err != nil { + logrus.Warnf("Could not look up %s for cache resolution, skipping: %+v", ref, err) + continue + } + cache.Populate(img) + } + + return cache +} diff --git a/vendor/github.com/moby/moby/daemon/caps/utils_unix.go b/vendor/github.com/moby/moby/daemon/caps/utils_unix.go new file mode 100644 index 000000000..c99485f51 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/caps/utils_unix.go @@ -0,0 +1,131 @@ +// +build !windows + +package caps + +import ( + "fmt" + "strings" + + "github.com/docker/docker/pkg/stringutils" + "github.com/syndtr/gocapability/capability" +) + +var capabilityList Capabilities + +func init() { + last := capability.CAP_LAST_CAP + // hack for RHEL6 which has no /proc/sys/kernel/cap_last_cap + if last == capability.Cap(63) { + last = capability.CAP_BLOCK_SUSPEND + } + for _, cap := range capability.List() { + if cap > last { + continue + } + capabilityList = append(capabilityList, + &CapabilityMapping{ + Key: "CAP_" + strings.ToUpper(cap.String()), + Value: cap, + }, + ) + } +} + +type ( + // CapabilityMapping maps linux capability name to its value of capability.Cap type + // Capabilities is one of the security systems in Linux Security Module (LSM) + // framework provided by the kernel. + // For more details on capabilities, see http://man7.org/linux/man-pages/man7/capabilities.7.html + CapabilityMapping struct { + Key string `json:"key,omitempty"` + Value capability.Cap `json:"value,omitempty"` + } + // Capabilities contains all CapabilityMapping + Capabilities []*CapabilityMapping +) + +// String returns of CapabilityMapping +func (c *CapabilityMapping) String() string { + return c.Key +} + +// GetCapability returns CapabilityMapping which contains specific key +func GetCapability(key string) *CapabilityMapping { + for _, capp := range capabilityList { + if capp.Key == key { + cpy := *capp + return &cpy + } + } + return nil +} + +// GetAllCapabilities returns all of the capabilities +func GetAllCapabilities() []string { + output := make([]string, len(capabilityList)) + for i, capability := range capabilityList { + output[i] = capability.String() + } + return output +} + +// TweakCapabilities can tweak capabilities by adding or dropping capabilities +// based on the basics capabilities. +func TweakCapabilities(basics, adds, drops []string) ([]string, error) { + var ( + newCaps []string + allCaps = GetAllCapabilities() + ) + + // FIXME(tonistiigi): docker format is without CAP_ prefix, oci is with prefix + // Currently they are mixed in here. We should do conversion in one place. + + // look for invalid cap in the drop list + for _, cap := range drops { + if strings.ToLower(cap) == "all" { + continue + } + + if !stringutils.InSlice(allCaps, "CAP_"+cap) { + return nil, fmt.Errorf("Unknown capability drop: %q", cap) + } + } + + // handle --cap-add=all + if stringutils.InSlice(adds, "all") { + basics = allCaps + } + + if !stringutils.InSlice(drops, "all") { + for _, cap := range basics { + // skip `all` already handled above + if strings.ToLower(cap) == "all" { + continue + } + + // if we don't drop `all`, add back all the non-dropped caps + if !stringutils.InSlice(drops, cap[4:]) { + newCaps = append(newCaps, strings.ToUpper(cap)) + } + } + } + + for _, cap := range adds { + // skip `all` already handled above + if strings.ToLower(cap) == "all" { + continue + } + + cap = "CAP_" + cap + + if !stringutils.InSlice(allCaps, cap) { + return nil, fmt.Errorf("Unknown capability to add: %q", cap) + } + + // add cap if not already in the list + if !stringutils.InSlice(newCaps, cap) { + newCaps = append(newCaps, strings.ToUpper(cap)) + } + } + return newCaps, nil +} diff --git a/vendor/github.com/moby/moby/daemon/changes.go b/vendor/github.com/moby/moby/daemon/changes.go new file mode 100644 index 000000000..fc8cd2752 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/changes.go @@ -0,0 +1,31 @@ +package daemon + +import ( + "errors" + "runtime" + "time" + + "github.com/docker/docker/pkg/archive" +) + +// ContainerChanges returns a list of container fs changes +func (daemon *Daemon) ContainerChanges(name string) ([]archive.Change, error) { + start := time.Now() + container, err := daemon.GetContainer(name) + if err != nil { + return nil, err + } + + if runtime.GOOS == "windows" && container.IsRunning() { + return nil, errors.New("Windows does not support diff of a running container") + } + + container.Lock() + defer container.Unlock() + c, err := container.RWLayer.Changes() + if err != nil { + return nil, err + } + containerActions.WithValues("changes").UpdateSince(start) + return c, nil +} diff --git a/vendor/github.com/moby/moby/daemon/checkpoint.go b/vendor/github.com/moby/moby/daemon/checkpoint.go new file mode 100644 index 000000000..d3028f1e2 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/checkpoint.go @@ -0,0 +1,137 @@ +package daemon + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "os" + "path/filepath" + + "github.com/docker/docker/api" + "github.com/docker/docker/api/types" +) + +var ( + validCheckpointNameChars = api.RestrictedNameChars + validCheckpointNamePattern = api.RestrictedNamePattern +) + +// getCheckpointDir verifies checkpoint directory for create,remove, list options and checks if checkpoint already exists +func getCheckpointDir(checkDir, checkpointID string, ctrName string, ctrID string, ctrCheckpointDir string, create bool) (string, error) { + var checkpointDir string + var err2 error + if checkDir != "" { + checkpointDir = filepath.Join(checkDir, ctrID, "checkpoints") + } else { + checkpointDir = ctrCheckpointDir + } + checkpointAbsDir := filepath.Join(checkpointDir, checkpointID) + stat, err := os.Stat(checkpointAbsDir) + if create { + switch { + case err == nil && stat.IsDir(): + err2 = fmt.Errorf("checkpoint with name %s already exists for container %s", checkpointID, ctrName) + case err != nil && os.IsNotExist(err): + err2 = nil + case err != nil: + err2 = err + case err == nil: + err2 = fmt.Errorf("%s exists and is not a directory", checkpointAbsDir) + } + } else { + switch { + case err != nil: + err2 = fmt.Errorf("checkpoint %s does not exists for container %s", checkpointID, ctrName) + case err == nil && stat.IsDir(): + err2 = nil + case err == nil: + err2 = fmt.Errorf("%s exists and is not a directory", checkpointAbsDir) + } + } + return checkpointDir, err2 +} + +// CheckpointCreate checkpoints the process running in a container with CRIU +func (daemon *Daemon) CheckpointCreate(name string, config types.CheckpointCreateOptions) error { + container, err := daemon.GetContainer(name) + if err != nil { + return err + } + + if !container.IsRunning() { + return fmt.Errorf("Container %s not running", name) + } + + if !validCheckpointNamePattern.MatchString(config.CheckpointID) { + return fmt.Errorf("Invalid checkpoint ID (%s), only %s are allowed", config.CheckpointID, validCheckpointNameChars) + } + + checkpointDir, err := getCheckpointDir(config.CheckpointDir, config.CheckpointID, name, container.ID, container.CheckpointDir(), true) + if err != nil { + return fmt.Errorf("cannot checkpoint container %s: %s", name, err) + } + + err = daemon.containerd.CreateCheckpoint(container.ID, config.CheckpointID, checkpointDir, config.Exit) + if err != nil { + return fmt.Errorf("Cannot checkpoint container %s: %s", name, err) + } + + daemon.LogContainerEvent(container, "checkpoint") + + return nil +} + +// CheckpointDelete deletes the specified checkpoint +func (daemon *Daemon) CheckpointDelete(name string, config types.CheckpointDeleteOptions) error { + container, err := daemon.GetContainer(name) + if err != nil { + return err + } + checkpointDir, err := getCheckpointDir(config.CheckpointDir, config.CheckpointID, name, container.ID, container.CheckpointDir(), false) + if err == nil { + return os.RemoveAll(filepath.Join(checkpointDir, config.CheckpointID)) + } + return err +} + +// CheckpointList lists all checkpoints of the specified container +func (daemon *Daemon) CheckpointList(name string, config types.CheckpointListOptions) ([]types.Checkpoint, error) { + var out []types.Checkpoint + + container, err := daemon.GetContainer(name) + if err != nil { + return nil, err + } + + checkpointDir, err := getCheckpointDir(config.CheckpointDir, "", name, container.ID, container.CheckpointDir(), false) + if err != nil { + return nil, err + } + + if err := os.MkdirAll(checkpointDir, 0755); err != nil { + return nil, err + } + + dirs, err := ioutil.ReadDir(checkpointDir) + if err != nil { + return nil, err + } + + for _, d := range dirs { + if !d.IsDir() { + continue + } + path := filepath.Join(checkpointDir, d.Name(), "config.json") + data, err := ioutil.ReadFile(path) + if err != nil { + return nil, err + } + var cpt types.Checkpoint + if err := json.Unmarshal(data, &cpt); err != nil { + return nil, err + } + out = append(out, cpt) + } + + return out, nil +} diff --git a/vendor/github.com/moby/moby/daemon/cluster.go b/vendor/github.com/moby/moby/daemon/cluster.go new file mode 100644 index 000000000..d22970bcd --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/cluster.go @@ -0,0 +1,26 @@ +package daemon + +import ( + apitypes "github.com/docker/docker/api/types" + lncluster "github.com/docker/libnetwork/cluster" +) + +// Cluster is the interface for github.com/docker/docker/daemon/cluster.(*Cluster). +type Cluster interface { + ClusterStatus + NetworkManager + SendClusterEvent(event lncluster.ConfigEventType) +} + +// ClusterStatus interface provides information about the Swarm status of the Cluster +type ClusterStatus interface { + IsAgent() bool + IsManager() bool +} + +// NetworkManager provides methods to manage networks +type NetworkManager interface { + GetNetwork(input string) (apitypes.NetworkResource, error) + GetNetworks() ([]apitypes.NetworkResource, error) + RemoveNetwork(input string) error +} diff --git a/vendor/github.com/moby/moby/daemon/cluster/cluster.go b/vendor/github.com/moby/moby/daemon/cluster/cluster.go new file mode 100644 index 000000000..57fc4d2d6 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/cluster/cluster.go @@ -0,0 +1,441 @@ +package cluster + +// +// ## Swarmkit integration +// +// Cluster - static configurable object for accessing everything swarm related. +// Contains methods for connecting and controlling the cluster. Exists always, +// even if swarm mode is not enabled. +// +// NodeRunner - Manager for starting the swarmkit node. Is present only and +// always if swarm mode is enabled. Implements backoff restart loop in case of +// errors. +// +// NodeState - Information about the current node status including access to +// gRPC clients if a manager is active. +// +// ### Locking +// +// `cluster.controlMutex` - taken for the whole lifecycle of the processes that +// can reconfigure cluster(init/join/leave etc). Protects that one +// reconfiguration action has fully completed before another can start. +// +// `cluster.mu` - taken when the actual changes in cluster configurations +// happen. Different from `controlMutex` because in some cases we need to +// access current cluster state even if the long-running reconfiguration is +// going on. For example network stack may ask for the current cluster state in +// the middle of the shutdown. Any time current cluster state is asked you +// should take the read lock of `cluster.mu`. If you are writing an API +// responder that returns synchronously, hold `cluster.mu.RLock()` for the +// duration of the whole handler function. That ensures that node will not be +// shut down until the handler has finished. +// +// NodeRunner implements its internal locks that should not be used outside of +// the struct. Instead, you should just call `nodeRunner.State()` method to get +// the current state of the cluster(still need `cluster.mu.RLock()` to access +// `cluster.nr` reference itself). Most of the changes in NodeRunner happen +// because of an external event(network problem, unexpected swarmkit error) and +// Docker shouldn't take any locks that delay these changes from happening. +// + +import ( + "fmt" + "net" + "os" + "path/filepath" + "sync" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/types/network" + types "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/daemon/cluster/controllers/plugin" + executorpkg "github.com/docker/docker/daemon/cluster/executor" + "github.com/docker/docker/pkg/signal" + lncluster "github.com/docker/libnetwork/cluster" + swarmapi "github.com/docker/swarmkit/api" + swarmnode "github.com/docker/swarmkit/node" + "github.com/pkg/errors" + "golang.org/x/net/context" +) + +const swarmDirName = "swarm" +const controlSocket = "control.sock" +const swarmConnectTimeout = 20 * time.Second +const swarmRequestTimeout = 20 * time.Second +const stateFile = "docker-state.json" +const defaultAddr = "0.0.0.0:2377" + +const ( + initialReconnectDelay = 100 * time.Millisecond + maxReconnectDelay = 30 * time.Second + contextPrefix = "com.docker.swarm" +) + +// errNoSwarm is returned on leaving a cluster that was never initialized +var errNoSwarm = errors.New("This node is not part of a swarm") + +// errSwarmExists is returned on initialize or join request for a cluster that has already been activated +var errSwarmExists = errors.New("This node is already part of a swarm. Use \"docker swarm leave\" to leave this swarm and join another one.") + +// errSwarmJoinTimeoutReached is returned when cluster join could not complete before timeout was reached. +var errSwarmJoinTimeoutReached = errors.New("Timeout was reached before node was joined. The attempt to join the swarm will continue in the background. Use the \"docker info\" command to see the current swarm status of your node.") + +// errSwarmLocked is returned if the swarm is encrypted and needs a key to unlock it. +var errSwarmLocked = errors.New("Swarm is encrypted and needs to be unlocked before it can be used. Please use \"docker swarm unlock\" to unlock it.") + +// errSwarmCertificatesExpired is returned if docker was not started for the whole validity period and they had no chance to renew automatically. +var errSwarmCertificatesExpired = errors.New("Swarm certificates have expired. To replace them, leave the swarm and join again.") + +// NetworkSubnetsProvider exposes functions for retrieving the subnets +// of networks managed by Docker, so they can be filtered. +type NetworkSubnetsProvider interface { + Subnets() ([]net.IPNet, []net.IPNet) +} + +// Config provides values for Cluster. +type Config struct { + Root string + Name string + Backend executorpkg.Backend + PluginBackend plugin.Backend + NetworkSubnetsProvider NetworkSubnetsProvider + + // DefaultAdvertiseAddr is the default host/IP or network interface to use + // if no AdvertiseAddr value is specified. + DefaultAdvertiseAddr string + + // path to store runtime state, such as the swarm control socket + RuntimeRoot string + + // WatchStream is a channel to pass watch API notifications to daemon + WatchStream chan *swarmapi.WatchMessage +} + +// Cluster provides capabilities to participate in a cluster as a worker or a +// manager. +type Cluster struct { + mu sync.RWMutex + controlMutex sync.RWMutex // protect init/join/leave user operations + nr *nodeRunner + root string + runtimeRoot string + config Config + configEvent chan lncluster.ConfigEventType // todo: make this array and goroutine safe + attachers map[string]*attacher + watchStream chan *swarmapi.WatchMessage +} + +// attacher manages the in-memory attachment state of a container +// attachment to a global scope network managed by swarm manager. It +// helps in identifying the attachment ID via the taskID and the +// corresponding attachment configuration obtained from the manager. +type attacher struct { + taskID string + config *network.NetworkingConfig + inProgress bool + attachWaitCh chan *network.NetworkingConfig + attachCompleteCh chan struct{} + detachWaitCh chan struct{} +} + +// New creates a new Cluster instance using provided config. +func New(config Config) (*Cluster, error) { + root := filepath.Join(config.Root, swarmDirName) + if err := os.MkdirAll(root, 0700); err != nil { + return nil, err + } + if config.RuntimeRoot == "" { + config.RuntimeRoot = root + } + if err := os.MkdirAll(config.RuntimeRoot, 0700); err != nil { + return nil, err + } + c := &Cluster{ + root: root, + config: config, + configEvent: make(chan lncluster.ConfigEventType, 10), + runtimeRoot: config.RuntimeRoot, + attachers: make(map[string]*attacher), + watchStream: config.WatchStream, + } + return c, nil +} + +// Start the Cluster instance +// TODO The split between New and Start can be join again when the SendClusterEvent +// method is no longer required +func (c *Cluster) Start() error { + root := filepath.Join(c.config.Root, swarmDirName) + + nodeConfig, err := loadPersistentState(root) + if err != nil { + if os.IsNotExist(err) { + return nil + } + return err + } + + nr, err := c.newNodeRunner(*nodeConfig) + if err != nil { + return err + } + c.nr = nr + + select { + case <-time.After(swarmConnectTimeout): + logrus.Error("swarm component could not be started before timeout was reached") + case err := <-nr.Ready(): + if err != nil { + logrus.WithError(err).Error("swarm component could not be started") + return nil + } + } + return nil +} + +func (c *Cluster) newNodeRunner(conf nodeStartConfig) (*nodeRunner, error) { + if err := c.config.Backend.IsSwarmCompatible(); err != nil { + return nil, err + } + + actualLocalAddr := conf.LocalAddr + if actualLocalAddr == "" { + // If localAddr was not specified, resolve it automatically + // based on the route to joinAddr. localAddr can only be left + // empty on "join". + listenHost, _, err := net.SplitHostPort(conf.ListenAddr) + if err != nil { + return nil, fmt.Errorf("could not parse listen address: %v", err) + } + + listenAddrIP := net.ParseIP(listenHost) + if listenAddrIP == nil || !listenAddrIP.IsUnspecified() { + actualLocalAddr = listenHost + } else { + if conf.RemoteAddr == "" { + // Should never happen except using swarms created by + // old versions that didn't save remoteAddr. + conf.RemoteAddr = "8.8.8.8:53" + } + conn, err := net.Dial("udp", conf.RemoteAddr) + if err != nil { + return nil, fmt.Errorf("could not find local IP address: %v", err) + } + localHostPort := conn.LocalAddr().String() + actualLocalAddr, _, _ = net.SplitHostPort(localHostPort) + conn.Close() + } + } + + nr := &nodeRunner{cluster: c} + nr.actualLocalAddr = actualLocalAddr + + if err := nr.Start(conf); err != nil { + return nil, err + } + + c.config.Backend.DaemonJoinsCluster(c) + + return nr, nil +} + +func (c *Cluster) getRequestContext() (context.Context, func()) { // TODO: not needed when requests don't block on qourum lost + return context.WithTimeout(context.Background(), swarmRequestTimeout) +} + +// IsManager returns true if Cluster is participating as a manager. +func (c *Cluster) IsManager() bool { + c.mu.RLock() + defer c.mu.RUnlock() + return c.currentNodeState().IsActiveManager() +} + +// IsAgent returns true if Cluster is participating as a worker/agent. +func (c *Cluster) IsAgent() bool { + c.mu.RLock() + defer c.mu.RUnlock() + return c.currentNodeState().status == types.LocalNodeStateActive +} + +// GetLocalAddress returns the local address. +func (c *Cluster) GetLocalAddress() string { + c.mu.RLock() + defer c.mu.RUnlock() + return c.currentNodeState().actualLocalAddr +} + +// GetListenAddress returns the listen address. +func (c *Cluster) GetListenAddress() string { + c.mu.RLock() + defer c.mu.RUnlock() + if c.nr != nil { + return c.nr.config.ListenAddr + } + return "" +} + +// GetAdvertiseAddress returns the remotely reachable address of this node. +func (c *Cluster) GetAdvertiseAddress() string { + c.mu.RLock() + defer c.mu.RUnlock() + if c.nr != nil && c.nr.config.AdvertiseAddr != "" { + advertiseHost, _, _ := net.SplitHostPort(c.nr.config.AdvertiseAddr) + return advertiseHost + } + return c.currentNodeState().actualLocalAddr +} + +// GetDataPathAddress returns the address to be used for the data path traffic, if specified. +func (c *Cluster) GetDataPathAddress() string { + c.mu.RLock() + defer c.mu.RUnlock() + if c.nr != nil { + return c.nr.config.DataPathAddr + } + return "" +} + +// GetRemoteAddressList returns the advertise address for each of the remote managers if +// available. +func (c *Cluster) GetRemoteAddressList() []string { + c.mu.RLock() + defer c.mu.RUnlock() + return c.getRemoteAddressList() +} + +func (c *Cluster) getRemoteAddressList() []string { + state := c.currentNodeState() + if state.swarmNode == nil { + return []string{} + } + + nodeID := state.swarmNode.NodeID() + remotes := state.swarmNode.Remotes() + addressList := make([]string, 0, len(remotes)) + for _, r := range remotes { + if r.NodeID != nodeID { + addressList = append(addressList, r.Addr) + } + } + return addressList +} + +// ListenClusterEvents returns a channel that receives messages on cluster +// participation changes. +// todo: make cancelable and accessible to multiple callers +func (c *Cluster) ListenClusterEvents() <-chan lncluster.ConfigEventType { + return c.configEvent +} + +// currentNodeState should not be called without a read lock +func (c *Cluster) currentNodeState() nodeState { + return c.nr.State() +} + +// errNoManager returns error describing why manager commands can't be used. +// Call with read lock. +func (c *Cluster) errNoManager(st nodeState) error { + if st.swarmNode == nil { + if errors.Cause(st.err) == errSwarmLocked { + return errSwarmLocked + } + if st.err == errSwarmCertificatesExpired { + return errSwarmCertificatesExpired + } + return errors.New("This node is not a swarm manager. Use \"docker swarm init\" or \"docker swarm join\" to connect this node to swarm and try again.") + } + if st.swarmNode.Manager() != nil { + return errors.New("This node is not a swarm manager. Manager is being prepared or has trouble connecting to the cluster.") + } + return errors.New("This node is not a swarm manager. Worker nodes can't be used to view or modify cluster state. Please run this command on a manager node or promote the current node to a manager.") +} + +// Cleanup stops active swarm node. This is run before daemon shutdown. +func (c *Cluster) Cleanup() { + c.controlMutex.Lock() + defer c.controlMutex.Unlock() + + c.mu.Lock() + node := c.nr + if node == nil { + c.mu.Unlock() + return + } + state := c.currentNodeState() + c.mu.Unlock() + + if state.IsActiveManager() { + active, reachable, unreachable, err := managerStats(state.controlClient, state.NodeID()) + if err == nil { + singlenode := active && isLastManager(reachable, unreachable) + if active && !singlenode && removingManagerCausesLossOfQuorum(reachable, unreachable) { + logrus.Errorf("Leaving cluster with %v managers left out of %v. Raft quorum will be lost.", reachable-1, reachable+unreachable) + } + } + } + + if err := node.Stop(); err != nil { + logrus.Errorf("failed to shut down cluster node: %v", err) + signal.DumpStacks("") + } + + c.mu.Lock() + c.nr = nil + c.mu.Unlock() +} + +func managerStats(client swarmapi.ControlClient, currentNodeID string) (current bool, reachable int, unreachable int, err error) { + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + nodes, err := client.ListNodes(ctx, &swarmapi.ListNodesRequest{}) + if err != nil { + return false, 0, 0, err + } + for _, n := range nodes.Nodes { + if n.ManagerStatus != nil { + if n.ManagerStatus.Reachability == swarmapi.RaftMemberStatus_REACHABLE { + reachable++ + if n.ID == currentNodeID { + current = true + } + } + if n.ManagerStatus.Reachability == swarmapi.RaftMemberStatus_UNREACHABLE { + unreachable++ + } + } + } + return +} + +func detectLockedError(err error) error { + if err == swarmnode.ErrInvalidUnlockKey { + return errors.WithStack(errSwarmLocked) + } + return err +} + +func (c *Cluster) lockedManagerAction(fn func(ctx context.Context, state nodeState) error) error { + c.mu.RLock() + defer c.mu.RUnlock() + + state := c.currentNodeState() + if !state.IsActiveManager() { + return c.errNoManager(state) + } + + ctx, cancel := c.getRequestContext() + defer cancel() + + return fn(ctx, state) +} + +// SendClusterEvent allows to send cluster events on the configEvent channel +// TODO This method should not be exposed. +// Currently it is used to notify the network controller that the keys are +// available +func (c *Cluster) SendClusterEvent(event lncluster.ConfigEventType) { + c.mu.RLock() + defer c.mu.RUnlock() + c.configEvent <- event +} diff --git a/vendor/github.com/moby/moby/daemon/cluster/configs.go b/vendor/github.com/moby/moby/daemon/cluster/configs.go new file mode 100644 index 000000000..3d418c140 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/cluster/configs.go @@ -0,0 +1,117 @@ +package cluster + +import ( + apitypes "github.com/docker/docker/api/types" + types "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/daemon/cluster/convert" + swarmapi "github.com/docker/swarmkit/api" + "golang.org/x/net/context" +) + +// GetConfig returns a config from a managed swarm cluster +func (c *Cluster) GetConfig(input string) (types.Config, error) { + var config *swarmapi.Config + + if err := c.lockedManagerAction(func(ctx context.Context, state nodeState) error { + s, err := getConfig(ctx, state.controlClient, input) + if err != nil { + return err + } + config = s + return nil + }); err != nil { + return types.Config{}, err + } + return convert.ConfigFromGRPC(config), nil +} + +// GetConfigs returns all configs of a managed swarm cluster. +func (c *Cluster) GetConfigs(options apitypes.ConfigListOptions) ([]types.Config, error) { + c.mu.RLock() + defer c.mu.RUnlock() + + state := c.currentNodeState() + if !state.IsActiveManager() { + return nil, c.errNoManager(state) + } + + filters, err := newListConfigsFilters(options.Filters) + if err != nil { + return nil, err + } + ctx, cancel := c.getRequestContext() + defer cancel() + + r, err := state.controlClient.ListConfigs(ctx, + &swarmapi.ListConfigsRequest{Filters: filters}) + if err != nil { + return nil, err + } + + configs := []types.Config{} + + for _, config := range r.Configs { + configs = append(configs, convert.ConfigFromGRPC(config)) + } + + return configs, nil +} + +// CreateConfig creates a new config in a managed swarm cluster. +func (c *Cluster) CreateConfig(s types.ConfigSpec) (string, error) { + var resp *swarmapi.CreateConfigResponse + if err := c.lockedManagerAction(func(ctx context.Context, state nodeState) error { + configSpec := convert.ConfigSpecToGRPC(s) + + r, err := state.controlClient.CreateConfig(ctx, + &swarmapi.CreateConfigRequest{Spec: &configSpec}) + if err != nil { + return err + } + resp = r + return nil + }); err != nil { + return "", err + } + return resp.Config.ID, nil +} + +// RemoveConfig removes a config from a managed swarm cluster. +func (c *Cluster) RemoveConfig(input string) error { + return c.lockedManagerAction(func(ctx context.Context, state nodeState) error { + config, err := getConfig(ctx, state.controlClient, input) + if err != nil { + return err + } + + req := &swarmapi.RemoveConfigRequest{ + ConfigID: config.ID, + } + + _, err = state.controlClient.RemoveConfig(ctx, req) + return err + }) +} + +// UpdateConfig updates a config in a managed swarm cluster. +// Note: this is not exposed to the CLI but is available from the API only +func (c *Cluster) UpdateConfig(input string, version uint64, spec types.ConfigSpec) error { + return c.lockedManagerAction(func(ctx context.Context, state nodeState) error { + config, err := getConfig(ctx, state.controlClient, input) + if err != nil { + return err + } + + configSpec := convert.ConfigSpecToGRPC(spec) + + _, err = state.controlClient.UpdateConfig(ctx, + &swarmapi.UpdateConfigRequest{ + ConfigID: config.ID, + ConfigVersion: &swarmapi.Version{ + Index: version, + }, + Spec: &configSpec, + }) + return err + }) +} diff --git a/vendor/github.com/moby/moby/daemon/cluster/controllers/plugin/controller.go b/vendor/github.com/moby/moby/daemon/cluster/controllers/plugin/controller.go new file mode 100644 index 000000000..e72edcdd7 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/cluster/controllers/plugin/controller.go @@ -0,0 +1,261 @@ +package plugin + +import ( + "io" + "io/ioutil" + "net/http" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/reference" + enginetypes "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm/runtime" + "github.com/docker/docker/plugin" + "github.com/docker/docker/plugin/v2" + "github.com/docker/swarmkit/api" + "github.com/gogo/protobuf/proto" + "github.com/pkg/errors" + "golang.org/x/net/context" +) + +// Controller is the controller for the plugin backend. +// Plugins are managed as a singleton object with a desired state (different from containers). +// With the the plugin controller instead of having a strict create->start->stop->remove +// task lifecycle like containers, we manage the desired state of the plugin and let +// the plugin manager do what it already does and monitor the plugin. +// We'll also end up with many tasks all pointing to the same plugin ID. +// +// TODO(@cpuguy83): registry auth is intentionally not supported until we work out +// the right way to pass registry crednetials via secrets. +type Controller struct { + backend Backend + spec runtime.PluginSpec + logger *logrus.Entry + + pluginID string + serviceID string + taskID string + + // hook used to signal tests that `Wait()` is actually ready and waiting + signalWaitReady func() +} + +// Backend is the interface for interacting with the plugin manager +// Controller actions are passed to the configured backend to do the real work. +type Backend interface { + Disable(name string, config *enginetypes.PluginDisableConfig) error + Enable(name string, config *enginetypes.PluginEnableConfig) error + Remove(name string, config *enginetypes.PluginRmConfig) error + Pull(ctx context.Context, ref reference.Named, name string, metaHeaders http.Header, authConfig *enginetypes.AuthConfig, privileges enginetypes.PluginPrivileges, outStream io.Writer, opts ...plugin.CreateOpt) error + Upgrade(ctx context.Context, ref reference.Named, name string, metaHeaders http.Header, authConfig *enginetypes.AuthConfig, privileges enginetypes.PluginPrivileges, outStream io.Writer) error + Get(name string) (*v2.Plugin, error) + SubscribeEvents(buffer int, events ...plugin.Event) (eventCh <-chan interface{}, cancel func()) +} + +// NewController returns a new cluster plugin controller +func NewController(backend Backend, t *api.Task) (*Controller, error) { + spec, err := readSpec(t) + if err != nil { + return nil, err + } + return &Controller{ + backend: backend, + spec: spec, + serviceID: t.ServiceID, + logger: logrus.WithFields(logrus.Fields{ + "controller": "plugin", + "task": t.ID, + "plugin": spec.Name, + })}, nil +} + +func readSpec(t *api.Task) (runtime.PluginSpec, error) { + var cfg runtime.PluginSpec + + generic := t.Spec.GetGeneric() + if err := proto.Unmarshal(generic.Payload.Value, &cfg); err != nil { + return cfg, errors.Wrap(err, "error reading plugin spec") + } + return cfg, nil +} + +// Update is the update phase from swarmkit +func (p *Controller) Update(ctx context.Context, t *api.Task) error { + p.logger.Debug("Update") + return nil +} + +// Prepare is the prepare phase from swarmkit +func (p *Controller) Prepare(ctx context.Context) (err error) { + p.logger.Debug("Prepare") + + remote, err := reference.ParseNormalizedNamed(p.spec.Remote) + if err != nil { + return errors.Wrapf(err, "error parsing remote reference %q", p.spec.Remote) + } + + if p.spec.Name == "" { + p.spec.Name = remote.String() + } + + var authConfig enginetypes.AuthConfig + privs := convertPrivileges(p.spec.Privileges) + + pl, err := p.backend.Get(p.spec.Name) + + defer func() { + if pl != nil && err == nil { + pl.Acquire() + } + }() + + if err == nil && pl != nil { + if pl.SwarmServiceID != p.serviceID { + return errors.Errorf("plugin already exists: %s", p.spec.Name) + } + if pl.IsEnabled() { + if err := p.backend.Disable(pl.GetID(), &enginetypes.PluginDisableConfig{ForceDisable: true}); err != nil { + p.logger.WithError(err).Debug("could not disable plugin before running upgrade") + } + } + p.pluginID = pl.GetID() + return p.backend.Upgrade(ctx, remote, p.spec.Name, nil, &authConfig, privs, ioutil.Discard) + } + + if err := p.backend.Pull(ctx, remote, p.spec.Name, nil, &authConfig, privs, ioutil.Discard, plugin.WithSwarmService(p.serviceID)); err != nil { + return err + } + pl, err = p.backend.Get(p.spec.Name) + if err != nil { + return err + } + p.pluginID = pl.GetID() + + return nil +} + +// Start is the start phase from swarmkit +func (p *Controller) Start(ctx context.Context) error { + p.logger.Debug("Start") + + pl, err := p.backend.Get(p.pluginID) + if err != nil { + return err + } + + if p.spec.Disabled { + if pl.IsEnabled() { + return p.backend.Disable(p.pluginID, &enginetypes.PluginDisableConfig{ForceDisable: false}) + } + return nil + } + if !pl.IsEnabled() { + return p.backend.Enable(p.pluginID, &enginetypes.PluginEnableConfig{Timeout: 30}) + } + return nil +} + +// Wait causes the task to wait until returned +func (p *Controller) Wait(ctx context.Context) error { + p.logger.Debug("Wait") + + pl, err := p.backend.Get(p.pluginID) + if err != nil { + return err + } + + events, cancel := p.backend.SubscribeEvents(1, plugin.EventDisable{Plugin: pl.PluginObj}, plugin.EventRemove{Plugin: pl.PluginObj}, plugin.EventEnable{Plugin: pl.PluginObj}) + defer cancel() + + if p.signalWaitReady != nil { + p.signalWaitReady() + } + + if !p.spec.Disabled != pl.IsEnabled() { + return errors.New("mismatched plugin state") + } + + for { + select { + case <-ctx.Done(): + return ctx.Err() + case e := <-events: + p.logger.Debugf("got event %#T", e) + + switch e.(type) { + case plugin.EventEnable: + if p.spec.Disabled { + return errors.New("plugin enabled") + } + case plugin.EventRemove: + return errors.New("plugin removed") + case plugin.EventDisable: + if !p.spec.Disabled { + return errors.New("plugin disabled") + } + } + } + } +} + +func isNotFound(err error) bool { + _, ok := errors.Cause(err).(plugin.ErrNotFound) + return ok +} + +// Shutdown is the shutdown phase from swarmkit +func (p *Controller) Shutdown(ctx context.Context) error { + p.logger.Debug("Shutdown") + return nil +} + +// Terminate is the terminate phase from swarmkit +func (p *Controller) Terminate(ctx context.Context) error { + p.logger.Debug("Terminate") + return nil +} + +// Remove is the remove phase from swarmkit +func (p *Controller) Remove(ctx context.Context) error { + p.logger.Debug("Remove") + + pl, err := p.backend.Get(p.pluginID) + if err != nil { + if isNotFound(err) { + return nil + } + return err + } + + pl.Release() + if pl.GetRefCount() > 0 { + p.logger.Debug("skipping remove due to ref count") + return nil + } + + // This may error because we have exactly 1 plugin, but potentially multiple + // tasks which are calling remove. + err = p.backend.Remove(p.pluginID, &enginetypes.PluginRmConfig{ForceRemove: true}) + if isNotFound(err) { + return nil + } + return err +} + +// Close is the close phase from swarmkit +func (p *Controller) Close() error { + p.logger.Debug("Close") + return nil +} + +func convertPrivileges(ls []*runtime.PluginPrivilege) enginetypes.PluginPrivileges { + var out enginetypes.PluginPrivileges + for _, p := range ls { + pp := enginetypes.PluginPrivilege{ + Name: p.Name, + Description: p.Description, + Value: p.Value, + } + out = append(out, pp) + } + return out +} diff --git a/vendor/github.com/moby/moby/daemon/cluster/controllers/plugin/controller_test.go b/vendor/github.com/moby/moby/daemon/cluster/controllers/plugin/controller_test.go new file mode 100644 index 000000000..17b77cc89 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/cluster/controllers/plugin/controller_test.go @@ -0,0 +1,390 @@ +package plugin + +import ( + "errors" + "io" + "io/ioutil" + "net/http" + "strings" + "testing" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/reference" + enginetypes "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm/runtime" + "github.com/docker/docker/pkg/pubsub" + "github.com/docker/docker/plugin" + "github.com/docker/docker/plugin/v2" + "golang.org/x/net/context" +) + +const ( + pluginTestName = "test" + pluginTestRemote = "testremote" + pluginTestRemoteUpgrade = "testremote2" +) + +func TestPrepare(t *testing.T) { + b := newMockBackend() + c := newTestController(b, false) + ctx := context.Background() + + if err := c.Prepare(ctx); err != nil { + t.Fatal(err) + } + + if b.p == nil { + t.Fatal("pull not performed") + } + + c = newTestController(b, false) + if err := c.Prepare(ctx); err != nil { + t.Fatal(err) + } + if b.p == nil { + t.Fatal("unexpected nil") + } + if b.p.PluginObj.PluginReference != pluginTestRemoteUpgrade { + t.Fatal("upgrade not performed") + } + + c = newTestController(b, false) + c.serviceID = "1" + if err := c.Prepare(ctx); err == nil { + t.Fatal("expected error on prepare") + } +} + +func TestStart(t *testing.T) { + b := newMockBackend() + c := newTestController(b, false) + ctx := context.Background() + + if err := c.Prepare(ctx); err != nil { + t.Fatal(err) + } + + if err := c.Start(ctx); err != nil { + t.Fatal(err) + } + + if !b.p.IsEnabled() { + t.Fatal("expected plugin to be enabled") + } + + c = newTestController(b, true) + if err := c.Prepare(ctx); err != nil { + t.Fatal(err) + } + if err := c.Start(ctx); err != nil { + t.Fatal(err) + } + if b.p.IsEnabled() { + t.Fatal("expected plugin to be disabled") + } + + c = newTestController(b, false) + if err := c.Prepare(ctx); err != nil { + t.Fatal(err) + } + if err := c.Start(ctx); err != nil { + t.Fatal(err) + } + if !b.p.IsEnabled() { + t.Fatal("expected plugin to be enabled") + } +} + +func TestWaitCancel(t *testing.T) { + b := newMockBackend() + c := newTestController(b, true) + ctx := context.Background() + if err := c.Prepare(ctx); err != nil { + t.Fatal(err) + } + if err := c.Start(ctx); err != nil { + t.Fatal(err) + } + + ctxCancel, cancel := context.WithCancel(ctx) + chErr := make(chan error) + go func() { + chErr <- c.Wait(ctxCancel) + }() + cancel() + select { + case err := <-chErr: + if err != context.Canceled { + t.Fatal(err) + } + case <-time.After(10 * time.Second): + t.Fatal("timeout waiting for cancelation") + } +} + +func TestWaitDisabled(t *testing.T) { + b := newMockBackend() + c := newTestController(b, true) + ctx := context.Background() + if err := c.Prepare(ctx); err != nil { + t.Fatal(err) + } + if err := c.Start(ctx); err != nil { + t.Fatal(err) + } + + chErr := make(chan error) + go func() { + chErr <- c.Wait(ctx) + }() + + if err := b.Enable("test", nil); err != nil { + t.Fatal(err) + } + select { + case err := <-chErr: + if err == nil { + t.Fatal("expected error") + } + case <-time.After(10 * time.Second): + t.Fatal("timeout waiting for event") + } + + if err := c.Start(ctx); err != nil { + t.Fatal(err) + } + + ctxWaitReady, cancelCtxWaitReady := context.WithTimeout(ctx, 30*time.Second) + c.signalWaitReady = cancelCtxWaitReady + defer cancelCtxWaitReady() + + go func() { + chErr <- c.Wait(ctx) + }() + + chEvent, cancel := b.SubscribeEvents(1) + defer cancel() + + if err := b.Disable("test", nil); err != nil { + t.Fatal(err) + } + + select { + case <-chEvent: + <-ctxWaitReady.Done() + if err := ctxWaitReady.Err(); err == context.DeadlineExceeded { + t.Fatal(err) + } + select { + case <-chErr: + t.Fatal("wait returned unexpectedly") + default: + // all good + } + case <-chErr: + t.Fatal("wait returned unexpectedly") + case <-time.After(10 * time.Second): + t.Fatal("timeout waiting for event") + } + + if err := b.Remove("test", nil); err != nil { + t.Fatal(err) + } + select { + case err := <-chErr: + if err == nil { + t.Fatal("expected error") + } + if !strings.Contains(err.Error(), "removed") { + t.Fatal(err) + } + case <-time.After(10 * time.Second): + t.Fatal("timeout waiting for event") + } +} + +func TestWaitEnabled(t *testing.T) { + b := newMockBackend() + c := newTestController(b, false) + ctx := context.Background() + if err := c.Prepare(ctx); err != nil { + t.Fatal(err) + } + if err := c.Start(ctx); err != nil { + t.Fatal(err) + } + + chErr := make(chan error) + go func() { + chErr <- c.Wait(ctx) + }() + + if err := b.Disable("test", nil); err != nil { + t.Fatal(err) + } + select { + case err := <-chErr: + if err == nil { + t.Fatal("expected error") + } + case <-time.After(10 * time.Second): + t.Fatal("timeout waiting for event") + } + + if err := c.Start(ctx); err != nil { + t.Fatal(err) + } + + ctxWaitReady, ctxWaitCancel := context.WithCancel(ctx) + c.signalWaitReady = ctxWaitCancel + defer ctxWaitCancel() + + go func() { + chErr <- c.Wait(ctx) + }() + + chEvent, cancel := b.SubscribeEvents(1) + defer cancel() + + if err := b.Enable("test", nil); err != nil { + t.Fatal(err) + } + + select { + case <-chEvent: + <-ctxWaitReady.Done() + if err := ctxWaitReady.Err(); err == context.DeadlineExceeded { + t.Fatal(err) + } + select { + case <-chErr: + t.Fatal("wait returned unexpectedly") + default: + // all good + } + case <-chErr: + t.Fatal("wait returned unexpectedly") + case <-time.After(10 * time.Second): + t.Fatal("timeout waiting for event") + } + + if err := b.Remove("test", nil); err != nil { + t.Fatal(err) + } + select { + case err := <-chErr: + if err == nil { + t.Fatal("expected error") + } + if !strings.Contains(err.Error(), "removed") { + t.Fatal(err) + } + case <-time.After(10 * time.Second): + t.Fatal("timeout waiting for event") + } +} + +func TestRemove(t *testing.T) { + b := newMockBackend() + c := newTestController(b, false) + ctx := context.Background() + + if err := c.Prepare(ctx); err != nil { + t.Fatal(err) + } + if err := c.Shutdown(ctx); err != nil { + t.Fatal(err) + } + + c2 := newTestController(b, false) + if err := c2.Prepare(ctx); err != nil { + t.Fatal(err) + } + + if err := c.Remove(ctx); err != nil { + t.Fatal(err) + } + if b.p == nil { + t.Fatal("plugin removed unexpectedly") + } + if err := c2.Shutdown(ctx); err != nil { + t.Fatal(err) + } + if err := c2.Remove(ctx); err != nil { + t.Fatal(err) + } + if b.p != nil { + t.Fatal("expected plugin to be removed") + } +} + +func newTestController(b Backend, disabled bool) *Controller { + return &Controller{ + logger: &logrus.Entry{Logger: &logrus.Logger{Out: ioutil.Discard}}, + backend: b, + spec: runtime.PluginSpec{ + Name: pluginTestName, + Remote: pluginTestRemote, + Disabled: disabled, + }, + } +} + +func newMockBackend() *mockBackend { + return &mockBackend{ + pub: pubsub.NewPublisher(0, 0), + } +} + +type mockBackend struct { + p *v2.Plugin + pub *pubsub.Publisher +} + +func (m *mockBackend) Disable(name string, config *enginetypes.PluginDisableConfig) error { + m.p.PluginObj.Enabled = false + m.pub.Publish(plugin.EventDisable{}) + return nil +} + +func (m *mockBackend) Enable(name string, config *enginetypes.PluginEnableConfig) error { + m.p.PluginObj.Enabled = true + m.pub.Publish(plugin.EventEnable{}) + return nil +} + +func (m *mockBackend) Remove(name string, config *enginetypes.PluginRmConfig) error { + m.p = nil + m.pub.Publish(plugin.EventRemove{}) + return nil +} + +func (m *mockBackend) Pull(ctx context.Context, ref reference.Named, name string, metaHeaders http.Header, authConfig *enginetypes.AuthConfig, privileges enginetypes.PluginPrivileges, outStream io.Writer, opts ...plugin.CreateOpt) error { + m.p = &v2.Plugin{ + PluginObj: enginetypes.Plugin{ + ID: "1234", + Name: name, + PluginReference: ref.String(), + }, + } + return nil +} + +func (m *mockBackend) Upgrade(ctx context.Context, ref reference.Named, name string, metaHeaders http.Header, authConfig *enginetypes.AuthConfig, privileges enginetypes.PluginPrivileges, outStream io.Writer) error { + m.p.PluginObj.PluginReference = pluginTestRemoteUpgrade + return nil +} + +func (m *mockBackend) Get(name string) (*v2.Plugin, error) { + if m.p == nil { + return nil, errors.New("not found") + } + return m.p, nil +} + +func (m *mockBackend) SubscribeEvents(buffer int, events ...plugin.Event) (eventCh <-chan interface{}, cancel func()) { + ch := m.pub.SubscribeTopicWithBuffer(nil, buffer) + cancel = func() { m.pub.Evict(ch) } + return ch, cancel +} diff --git a/vendor/github.com/moby/moby/daemon/cluster/convert/config.go b/vendor/github.com/moby/moby/daemon/cluster/convert/config.go new file mode 100644 index 000000000..6b28712ff --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/cluster/convert/config.go @@ -0,0 +1,61 @@ +package convert + +import ( + swarmtypes "github.com/docker/docker/api/types/swarm" + swarmapi "github.com/docker/swarmkit/api" + gogotypes "github.com/gogo/protobuf/types" +) + +// ConfigFromGRPC converts a grpc Config to a Config. +func ConfigFromGRPC(s *swarmapi.Config) swarmtypes.Config { + config := swarmtypes.Config{ + ID: s.ID, + Spec: swarmtypes.ConfigSpec{ + Annotations: annotationsFromGRPC(s.Spec.Annotations), + Data: s.Spec.Data, + }, + } + + config.Version.Index = s.Meta.Version.Index + // Meta + config.CreatedAt, _ = gogotypes.TimestampFromProto(s.Meta.CreatedAt) + config.UpdatedAt, _ = gogotypes.TimestampFromProto(s.Meta.UpdatedAt) + + return config +} + +// ConfigSpecToGRPC converts Config to a grpc Config. +func ConfigSpecToGRPC(s swarmtypes.ConfigSpec) swarmapi.ConfigSpec { + return swarmapi.ConfigSpec{ + Annotations: swarmapi.Annotations{ + Name: s.Name, + Labels: s.Labels, + }, + Data: s.Data, + } +} + +// ConfigReferencesFromGRPC converts a slice of grpc ConfigReference to ConfigReference +func ConfigReferencesFromGRPC(s []*swarmapi.ConfigReference) []*swarmtypes.ConfigReference { + refs := []*swarmtypes.ConfigReference{} + + for _, r := range s { + ref := &swarmtypes.ConfigReference{ + ConfigID: r.ConfigID, + ConfigName: r.ConfigName, + } + + if t, ok := r.Target.(*swarmapi.ConfigReference_File); ok { + ref.File = &swarmtypes.ConfigReferenceFileTarget{ + Name: t.File.Name, + UID: t.File.UID, + GID: t.File.GID, + Mode: t.File.Mode, + } + } + + refs = append(refs, ref) + } + + return refs +} diff --git a/vendor/github.com/moby/moby/daemon/cluster/convert/container.go b/vendor/github.com/moby/moby/daemon/cluster/convert/container.go new file mode 100644 index 000000000..6ac6f331f --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/cluster/convert/container.go @@ -0,0 +1,356 @@ +package convert + +import ( + "errors" + "fmt" + "strings" + + "github.com/Sirupsen/logrus" + container "github.com/docker/docker/api/types/container" + mounttypes "github.com/docker/docker/api/types/mount" + types "github.com/docker/docker/api/types/swarm" + swarmapi "github.com/docker/swarmkit/api" + gogotypes "github.com/gogo/protobuf/types" +) + +func containerSpecFromGRPC(c *swarmapi.ContainerSpec) *types.ContainerSpec { + if c == nil { + return nil + } + containerSpec := &types.ContainerSpec{ + Image: c.Image, + Labels: c.Labels, + Command: c.Command, + Args: c.Args, + Hostname: c.Hostname, + Env: c.Env, + Dir: c.Dir, + User: c.User, + Groups: c.Groups, + StopSignal: c.StopSignal, + TTY: c.TTY, + OpenStdin: c.OpenStdin, + ReadOnly: c.ReadOnly, + Hosts: c.Hosts, + Secrets: secretReferencesFromGRPC(c.Secrets), + Configs: configReferencesFromGRPC(c.Configs), + } + + if c.DNSConfig != nil { + containerSpec.DNSConfig = &types.DNSConfig{ + Nameservers: c.DNSConfig.Nameservers, + Search: c.DNSConfig.Search, + Options: c.DNSConfig.Options, + } + } + + // Privileges + if c.Privileges != nil { + containerSpec.Privileges = &types.Privileges{} + + if c.Privileges.CredentialSpec != nil { + containerSpec.Privileges.CredentialSpec = &types.CredentialSpec{} + switch c.Privileges.CredentialSpec.Source.(type) { + case *swarmapi.Privileges_CredentialSpec_File: + containerSpec.Privileges.CredentialSpec.File = c.Privileges.CredentialSpec.GetFile() + case *swarmapi.Privileges_CredentialSpec_Registry: + containerSpec.Privileges.CredentialSpec.Registry = c.Privileges.CredentialSpec.GetRegistry() + } + } + + if c.Privileges.SELinuxContext != nil { + containerSpec.Privileges.SELinuxContext = &types.SELinuxContext{ + Disable: c.Privileges.SELinuxContext.Disable, + User: c.Privileges.SELinuxContext.User, + Type: c.Privileges.SELinuxContext.Type, + Role: c.Privileges.SELinuxContext.Role, + Level: c.Privileges.SELinuxContext.Level, + } + } + } + + // Mounts + for _, m := range c.Mounts { + mount := mounttypes.Mount{ + Target: m.Target, + Source: m.Source, + Type: mounttypes.Type(strings.ToLower(swarmapi.Mount_MountType_name[int32(m.Type)])), + ReadOnly: m.ReadOnly, + } + + if m.BindOptions != nil { + mount.BindOptions = &mounttypes.BindOptions{ + Propagation: mounttypes.Propagation(strings.ToLower(swarmapi.Mount_BindOptions_MountPropagation_name[int32(m.BindOptions.Propagation)])), + } + } + + if m.VolumeOptions != nil { + mount.VolumeOptions = &mounttypes.VolumeOptions{ + NoCopy: m.VolumeOptions.NoCopy, + Labels: m.VolumeOptions.Labels, + } + if m.VolumeOptions.DriverConfig != nil { + mount.VolumeOptions.DriverConfig = &mounttypes.Driver{ + Name: m.VolumeOptions.DriverConfig.Name, + Options: m.VolumeOptions.DriverConfig.Options, + } + } + } + + if m.TmpfsOptions != nil { + mount.TmpfsOptions = &mounttypes.TmpfsOptions{ + SizeBytes: m.TmpfsOptions.SizeBytes, + Mode: m.TmpfsOptions.Mode, + } + } + containerSpec.Mounts = append(containerSpec.Mounts, mount) + } + + if c.StopGracePeriod != nil { + grace, _ := gogotypes.DurationFromProto(c.StopGracePeriod) + containerSpec.StopGracePeriod = &grace + } + + if c.Healthcheck != nil { + containerSpec.Healthcheck = healthConfigFromGRPC(c.Healthcheck) + } + + return containerSpec +} + +func secretReferencesToGRPC(sr []*types.SecretReference) []*swarmapi.SecretReference { + refs := make([]*swarmapi.SecretReference, 0, len(sr)) + for _, s := range sr { + ref := &swarmapi.SecretReference{ + SecretID: s.SecretID, + SecretName: s.SecretName, + } + if s.File != nil { + ref.Target = &swarmapi.SecretReference_File{ + File: &swarmapi.FileTarget{ + Name: s.File.Name, + UID: s.File.UID, + GID: s.File.GID, + Mode: s.File.Mode, + }, + } + } + + refs = append(refs, ref) + } + + return refs +} + +func secretReferencesFromGRPC(sr []*swarmapi.SecretReference) []*types.SecretReference { + refs := make([]*types.SecretReference, 0, len(sr)) + for _, s := range sr { + target := s.GetFile() + if target == nil { + // not a file target + logrus.Warnf("secret target not a file: secret=%s", s.SecretID) + continue + } + refs = append(refs, &types.SecretReference{ + File: &types.SecretReferenceFileTarget{ + Name: target.Name, + UID: target.UID, + GID: target.GID, + Mode: target.Mode, + }, + SecretID: s.SecretID, + SecretName: s.SecretName, + }) + } + + return refs +} + +func configReferencesToGRPC(sr []*types.ConfigReference) []*swarmapi.ConfigReference { + refs := make([]*swarmapi.ConfigReference, 0, len(sr)) + for _, s := range sr { + ref := &swarmapi.ConfigReference{ + ConfigID: s.ConfigID, + ConfigName: s.ConfigName, + } + if s.File != nil { + ref.Target = &swarmapi.ConfigReference_File{ + File: &swarmapi.FileTarget{ + Name: s.File.Name, + UID: s.File.UID, + GID: s.File.GID, + Mode: s.File.Mode, + }, + } + } + + refs = append(refs, ref) + } + + return refs +} + +func configReferencesFromGRPC(sr []*swarmapi.ConfigReference) []*types.ConfigReference { + refs := make([]*types.ConfigReference, 0, len(sr)) + for _, s := range sr { + target := s.GetFile() + if target == nil { + // not a file target + logrus.Warnf("config target not a file: config=%s", s.ConfigID) + continue + } + refs = append(refs, &types.ConfigReference{ + File: &types.ConfigReferenceFileTarget{ + Name: target.Name, + UID: target.UID, + GID: target.GID, + Mode: target.Mode, + }, + ConfigID: s.ConfigID, + ConfigName: s.ConfigName, + }) + } + + return refs +} + +func containerToGRPC(c *types.ContainerSpec) (*swarmapi.ContainerSpec, error) { + containerSpec := &swarmapi.ContainerSpec{ + Image: c.Image, + Labels: c.Labels, + Command: c.Command, + Args: c.Args, + Hostname: c.Hostname, + Env: c.Env, + Dir: c.Dir, + User: c.User, + Groups: c.Groups, + StopSignal: c.StopSignal, + TTY: c.TTY, + OpenStdin: c.OpenStdin, + ReadOnly: c.ReadOnly, + Hosts: c.Hosts, + Secrets: secretReferencesToGRPC(c.Secrets), + Configs: configReferencesToGRPC(c.Configs), + } + + if c.DNSConfig != nil { + containerSpec.DNSConfig = &swarmapi.ContainerSpec_DNSConfig{ + Nameservers: c.DNSConfig.Nameservers, + Search: c.DNSConfig.Search, + Options: c.DNSConfig.Options, + } + } + + if c.StopGracePeriod != nil { + containerSpec.StopGracePeriod = gogotypes.DurationProto(*c.StopGracePeriod) + } + + // Privileges + if c.Privileges != nil { + containerSpec.Privileges = &swarmapi.Privileges{} + + if c.Privileges.CredentialSpec != nil { + containerSpec.Privileges.CredentialSpec = &swarmapi.Privileges_CredentialSpec{} + + if c.Privileges.CredentialSpec.File != "" && c.Privileges.CredentialSpec.Registry != "" { + return nil, errors.New("cannot specify both \"file\" and \"registry\" credential specs") + } + if c.Privileges.CredentialSpec.File != "" { + containerSpec.Privileges.CredentialSpec.Source = &swarmapi.Privileges_CredentialSpec_File{ + File: c.Privileges.CredentialSpec.File, + } + } else if c.Privileges.CredentialSpec.Registry != "" { + containerSpec.Privileges.CredentialSpec.Source = &swarmapi.Privileges_CredentialSpec_Registry{ + Registry: c.Privileges.CredentialSpec.Registry, + } + } else { + return nil, errors.New("must either provide \"file\" or \"registry\" for credential spec") + } + } + + if c.Privileges.SELinuxContext != nil { + containerSpec.Privileges.SELinuxContext = &swarmapi.Privileges_SELinuxContext{ + Disable: c.Privileges.SELinuxContext.Disable, + User: c.Privileges.SELinuxContext.User, + Type: c.Privileges.SELinuxContext.Type, + Role: c.Privileges.SELinuxContext.Role, + Level: c.Privileges.SELinuxContext.Level, + } + } + } + + // Mounts + for _, m := range c.Mounts { + mount := swarmapi.Mount{ + Target: m.Target, + Source: m.Source, + ReadOnly: m.ReadOnly, + } + + if mountType, ok := swarmapi.Mount_MountType_value[strings.ToUpper(string(m.Type))]; ok { + mount.Type = swarmapi.Mount_MountType(mountType) + } else if string(m.Type) != "" { + return nil, fmt.Errorf("invalid MountType: %q", m.Type) + } + + if m.BindOptions != nil { + if mountPropagation, ok := swarmapi.Mount_BindOptions_MountPropagation_value[strings.ToUpper(string(m.BindOptions.Propagation))]; ok { + mount.BindOptions = &swarmapi.Mount_BindOptions{Propagation: swarmapi.Mount_BindOptions_MountPropagation(mountPropagation)} + } else if string(m.BindOptions.Propagation) != "" { + return nil, fmt.Errorf("invalid MountPropagation: %q", m.BindOptions.Propagation) + } + } + + if m.VolumeOptions != nil { + mount.VolumeOptions = &swarmapi.Mount_VolumeOptions{ + NoCopy: m.VolumeOptions.NoCopy, + Labels: m.VolumeOptions.Labels, + } + if m.VolumeOptions.DriverConfig != nil { + mount.VolumeOptions.DriverConfig = &swarmapi.Driver{ + Name: m.VolumeOptions.DriverConfig.Name, + Options: m.VolumeOptions.DriverConfig.Options, + } + } + } + + if m.TmpfsOptions != nil { + mount.TmpfsOptions = &swarmapi.Mount_TmpfsOptions{ + SizeBytes: m.TmpfsOptions.SizeBytes, + Mode: m.TmpfsOptions.Mode, + } + } + + containerSpec.Mounts = append(containerSpec.Mounts, mount) + } + + if c.Healthcheck != nil { + containerSpec.Healthcheck = healthConfigToGRPC(c.Healthcheck) + } + + return containerSpec, nil +} + +func healthConfigFromGRPC(h *swarmapi.HealthConfig) *container.HealthConfig { + interval, _ := gogotypes.DurationFromProto(h.Interval) + timeout, _ := gogotypes.DurationFromProto(h.Timeout) + startPeriod, _ := gogotypes.DurationFromProto(h.StartPeriod) + return &container.HealthConfig{ + Test: h.Test, + Interval: interval, + Timeout: timeout, + Retries: int(h.Retries), + StartPeriod: startPeriod, + } +} + +func healthConfigToGRPC(h *container.HealthConfig) *swarmapi.HealthConfig { + return &swarmapi.HealthConfig{ + Test: h.Test, + Interval: gogotypes.DurationProto(h.Interval), + Timeout: gogotypes.DurationProto(h.Timeout), + Retries: int32(h.Retries), + StartPeriod: gogotypes.DurationProto(h.StartPeriod), + } +} diff --git a/vendor/github.com/moby/moby/daemon/cluster/convert/network.go b/vendor/github.com/moby/moby/daemon/cluster/convert/network.go new file mode 100644 index 000000000..6f8b7938c --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/cluster/convert/network.go @@ -0,0 +1,239 @@ +package convert + +import ( + "strings" + + basictypes "github.com/docker/docker/api/types" + networktypes "github.com/docker/docker/api/types/network" + types "github.com/docker/docker/api/types/swarm" + netconst "github.com/docker/libnetwork/datastore" + swarmapi "github.com/docker/swarmkit/api" + gogotypes "github.com/gogo/protobuf/types" +) + +func networkAttachmentFromGRPC(na *swarmapi.NetworkAttachment) types.NetworkAttachment { + if na != nil { + return types.NetworkAttachment{ + Network: networkFromGRPC(na.Network), + Addresses: na.Addresses, + } + } + return types.NetworkAttachment{} +} + +func networkFromGRPC(n *swarmapi.Network) types.Network { + if n != nil { + network := types.Network{ + ID: n.ID, + Spec: types.NetworkSpec{ + IPv6Enabled: n.Spec.Ipv6Enabled, + Internal: n.Spec.Internal, + Attachable: n.Spec.Attachable, + Ingress: IsIngressNetwork(n), + IPAMOptions: ipamFromGRPC(n.Spec.IPAM), + Scope: netconst.SwarmScope, + }, + IPAMOptions: ipamFromGRPC(n.IPAM), + } + + if n.Spec.GetNetwork() != "" { + network.Spec.ConfigFrom = &networktypes.ConfigReference{ + Network: n.Spec.GetNetwork(), + } + } + + // Meta + network.Version.Index = n.Meta.Version.Index + network.CreatedAt, _ = gogotypes.TimestampFromProto(n.Meta.CreatedAt) + network.UpdatedAt, _ = gogotypes.TimestampFromProto(n.Meta.UpdatedAt) + + //Annotations + network.Spec.Annotations = annotationsFromGRPC(n.Spec.Annotations) + + //DriverConfiguration + if n.Spec.DriverConfig != nil { + network.Spec.DriverConfiguration = &types.Driver{ + Name: n.Spec.DriverConfig.Name, + Options: n.Spec.DriverConfig.Options, + } + } + + //DriverState + if n.DriverState != nil { + network.DriverState = types.Driver{ + Name: n.DriverState.Name, + Options: n.DriverState.Options, + } + } + + return network + } + return types.Network{} +} + +func ipamFromGRPC(i *swarmapi.IPAMOptions) *types.IPAMOptions { + var ipam *types.IPAMOptions + if i != nil { + ipam = &types.IPAMOptions{} + if i.Driver != nil { + ipam.Driver.Name = i.Driver.Name + ipam.Driver.Options = i.Driver.Options + } + + for _, config := range i.Configs { + ipam.Configs = append(ipam.Configs, types.IPAMConfig{ + Subnet: config.Subnet, + Range: config.Range, + Gateway: config.Gateway, + }) + } + } + return ipam +} + +func endpointSpecFromGRPC(es *swarmapi.EndpointSpec) *types.EndpointSpec { + var endpointSpec *types.EndpointSpec + if es != nil { + endpointSpec = &types.EndpointSpec{} + endpointSpec.Mode = types.ResolutionMode(strings.ToLower(es.Mode.String())) + + for _, portState := range es.Ports { + endpointSpec.Ports = append(endpointSpec.Ports, swarmPortConfigToAPIPortConfig(portState)) + } + } + return endpointSpec +} + +func endpointFromGRPC(e *swarmapi.Endpoint) types.Endpoint { + endpoint := types.Endpoint{} + if e != nil { + if espec := endpointSpecFromGRPC(e.Spec); espec != nil { + endpoint.Spec = *espec + } + + for _, portState := range e.Ports { + endpoint.Ports = append(endpoint.Ports, swarmPortConfigToAPIPortConfig(portState)) + } + + for _, v := range e.VirtualIPs { + endpoint.VirtualIPs = append(endpoint.VirtualIPs, types.EndpointVirtualIP{ + NetworkID: v.NetworkID, + Addr: v.Addr}) + } + + } + + return endpoint +} + +func swarmPortConfigToAPIPortConfig(portConfig *swarmapi.PortConfig) types.PortConfig { + return types.PortConfig{ + Name: portConfig.Name, + Protocol: types.PortConfigProtocol(strings.ToLower(swarmapi.PortConfig_Protocol_name[int32(portConfig.Protocol)])), + PublishMode: types.PortConfigPublishMode(strings.ToLower(swarmapi.PortConfig_PublishMode_name[int32(portConfig.PublishMode)])), + TargetPort: portConfig.TargetPort, + PublishedPort: portConfig.PublishedPort, + } +} + +// BasicNetworkFromGRPC converts a grpc Network to a NetworkResource. +func BasicNetworkFromGRPC(n swarmapi.Network) basictypes.NetworkResource { + spec := n.Spec + var ipam networktypes.IPAM + if spec.IPAM != nil { + if spec.IPAM.Driver != nil { + ipam.Driver = spec.IPAM.Driver.Name + ipam.Options = spec.IPAM.Driver.Options + } + ipam.Config = make([]networktypes.IPAMConfig, 0, len(spec.IPAM.Configs)) + for _, ic := range spec.IPAM.Configs { + ipamConfig := networktypes.IPAMConfig{ + Subnet: ic.Subnet, + IPRange: ic.Range, + Gateway: ic.Gateway, + AuxAddress: ic.Reserved, + } + ipam.Config = append(ipam.Config, ipamConfig) + } + } + + nr := basictypes.NetworkResource{ + ID: n.ID, + Name: n.Spec.Annotations.Name, + Scope: netconst.SwarmScope, + EnableIPv6: spec.Ipv6Enabled, + IPAM: ipam, + Internal: spec.Internal, + Attachable: spec.Attachable, + Ingress: IsIngressNetwork(&n), + Labels: n.Spec.Annotations.Labels, + } + + if n.Spec.GetNetwork() != "" { + nr.ConfigFrom = networktypes.ConfigReference{ + Network: n.Spec.GetNetwork(), + } + } + + if n.DriverState != nil { + nr.Driver = n.DriverState.Name + nr.Options = n.DriverState.Options + } + + return nr +} + +// BasicNetworkCreateToGRPC converts a NetworkCreateRequest to a grpc NetworkSpec. +func BasicNetworkCreateToGRPC(create basictypes.NetworkCreateRequest) swarmapi.NetworkSpec { + ns := swarmapi.NetworkSpec{ + Annotations: swarmapi.Annotations{ + Name: create.Name, + Labels: create.Labels, + }, + DriverConfig: &swarmapi.Driver{ + Name: create.Driver, + Options: create.Options, + }, + Ipv6Enabled: create.EnableIPv6, + Internal: create.Internal, + Attachable: create.Attachable, + Ingress: create.Ingress, + } + if create.IPAM != nil { + driver := create.IPAM.Driver + if driver == "" { + driver = "default" + } + ns.IPAM = &swarmapi.IPAMOptions{ + Driver: &swarmapi.Driver{ + Name: driver, + Options: create.IPAM.Options, + }, + } + ipamSpec := make([]*swarmapi.IPAMConfig, 0, len(create.IPAM.Config)) + for _, ipamConfig := range create.IPAM.Config { + ipamSpec = append(ipamSpec, &swarmapi.IPAMConfig{ + Subnet: ipamConfig.Subnet, + Range: ipamConfig.IPRange, + Gateway: ipamConfig.Gateway, + }) + } + ns.IPAM.Configs = ipamSpec + } + if create.ConfigFrom != nil { + ns.ConfigFrom = &swarmapi.NetworkSpec_Network{ + Network: create.ConfigFrom.Network, + } + } + return ns +} + +// IsIngressNetwork check if the swarm network is an ingress network +func IsIngressNetwork(n *swarmapi.Network) bool { + if n.Spec.Ingress { + return true + } + // Check if legacy defined ingress network + _, ok := n.Spec.Annotations.Labels["com.docker.swarm.internal"] + return ok && n.Spec.Annotations.Name == "ingress" +} diff --git a/vendor/github.com/moby/moby/daemon/cluster/convert/node.go b/vendor/github.com/moby/moby/daemon/cluster/convert/node.go new file mode 100644 index 000000000..528ec54a6 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/cluster/convert/node.go @@ -0,0 +1,94 @@ +package convert + +import ( + "fmt" + "strings" + + types "github.com/docker/docker/api/types/swarm" + swarmapi "github.com/docker/swarmkit/api" + gogotypes "github.com/gogo/protobuf/types" +) + +// NodeFromGRPC converts a grpc Node to a Node. +func NodeFromGRPC(n swarmapi.Node) types.Node { + node := types.Node{ + ID: n.ID, + Spec: types.NodeSpec{ + Role: types.NodeRole(strings.ToLower(n.Spec.DesiredRole.String())), + Availability: types.NodeAvailability(strings.ToLower(n.Spec.Availability.String())), + }, + Status: types.NodeStatus{ + State: types.NodeState(strings.ToLower(n.Status.State.String())), + Message: n.Status.Message, + Addr: n.Status.Addr, + }, + } + + // Meta + node.Version.Index = n.Meta.Version.Index + node.CreatedAt, _ = gogotypes.TimestampFromProto(n.Meta.CreatedAt) + node.UpdatedAt, _ = gogotypes.TimestampFromProto(n.Meta.UpdatedAt) + + //Annotations + node.Spec.Annotations = annotationsFromGRPC(n.Spec.Annotations) + + //Description + if n.Description != nil { + node.Description.Hostname = n.Description.Hostname + if n.Description.Platform != nil { + node.Description.Platform.Architecture = n.Description.Platform.Architecture + node.Description.Platform.OS = n.Description.Platform.OS + } + if n.Description.Resources != nil { + node.Description.Resources.NanoCPUs = n.Description.Resources.NanoCPUs + node.Description.Resources.MemoryBytes = n.Description.Resources.MemoryBytes + node.Description.Resources.GenericResources = GenericResourcesFromGRPC(n.Description.Resources.Generic) + } + if n.Description.Engine != nil { + node.Description.Engine.EngineVersion = n.Description.Engine.EngineVersion + node.Description.Engine.Labels = n.Description.Engine.Labels + for _, plugin := range n.Description.Engine.Plugins { + node.Description.Engine.Plugins = append(node.Description.Engine.Plugins, types.PluginDescription{Type: plugin.Type, Name: plugin.Name}) + } + } + if n.Description.TLSInfo != nil { + node.Description.TLSInfo.TrustRoot = string(n.Description.TLSInfo.TrustRoot) + node.Description.TLSInfo.CertIssuerPublicKey = n.Description.TLSInfo.CertIssuerPublicKey + node.Description.TLSInfo.CertIssuerSubject = n.Description.TLSInfo.CertIssuerSubject + } + } + + //Manager + if n.ManagerStatus != nil { + node.ManagerStatus = &types.ManagerStatus{ + Leader: n.ManagerStatus.Leader, + Reachability: types.Reachability(strings.ToLower(n.ManagerStatus.Reachability.String())), + Addr: n.ManagerStatus.Addr, + } + } + + return node +} + +// NodeSpecToGRPC converts a NodeSpec to a grpc NodeSpec. +func NodeSpecToGRPC(s types.NodeSpec) (swarmapi.NodeSpec, error) { + spec := swarmapi.NodeSpec{ + Annotations: swarmapi.Annotations{ + Name: s.Name, + Labels: s.Labels, + }, + } + if role, ok := swarmapi.NodeRole_value[strings.ToUpper(string(s.Role))]; ok { + spec.DesiredRole = swarmapi.NodeRole(role) + } else { + return swarmapi.NodeSpec{}, fmt.Errorf("invalid Role: %q", s.Role) + } + + if availability, ok := swarmapi.NodeSpec_Availability_value[strings.ToUpper(string(s.Availability))]; ok { + spec.Availability = swarmapi.NodeSpec_Availability(availability) + } else { + return swarmapi.NodeSpec{}, fmt.Errorf("invalid Availability: %q", s.Availability) + } + + return spec, nil +} diff --git a/vendor/github.com/moby/moby/daemon/cluster/convert/secret.go b/vendor/github.com/moby/moby/daemon/cluster/convert/secret.go new file mode 100644 index 000000000..edbf8e593 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/cluster/convert/secret.go @@ -0,0 +1,63 @@ +package convert + +import ( + swarmtypes "github.com/docker/docker/api/types/swarm" + swarmapi "github.com/docker/swarmkit/api" + gogotypes "github.com/gogo/protobuf/types" +) + +// SecretFromGRPC converts a grpc Secret to a Secret. +func SecretFromGRPC(s *swarmapi.Secret) swarmtypes.Secret { + secret := swarmtypes.Secret{ + ID: s.ID, + Spec: swarmtypes.SecretSpec{ + Annotations: annotationsFromGRPC(s.Spec.Annotations), + Data: s.Spec.Data, + Driver: driverFromGRPC(s.Spec.Driver), + }, + } + + secret.Version.Index = s.Meta.Version.Index + // Meta + secret.CreatedAt, _ = gogotypes.TimestampFromProto(s.Meta.CreatedAt) + secret.UpdatedAt, _ = gogotypes.TimestampFromProto(s.Meta.UpdatedAt) + + return secret +} + +// SecretSpecToGRPC converts Secret to a grpc Secret. +func SecretSpecToGRPC(s swarmtypes.SecretSpec) swarmapi.SecretSpec { + return swarmapi.SecretSpec{ + Annotations: swarmapi.Annotations{ + Name: s.Name, + Labels: s.Labels, + }, + Data: s.Data, + Driver: driverToGRPC(s.Driver), + } +} + +// SecretReferencesFromGRPC converts a slice of grpc SecretReference to SecretReference +func SecretReferencesFromGRPC(s []*swarmapi.SecretReference) []*swarmtypes.SecretReference { + refs := []*swarmtypes.SecretReference{} + + for _, r := range s { + ref := &swarmtypes.SecretReference{ + SecretID: r.SecretID, + SecretName: r.SecretName, + } + + if t, ok := r.Target.(*swarmapi.SecretReference_File); ok { + ref.File = &swarmtypes.SecretReferenceFileTarget{ + Name: t.File.Name, + UID: t.File.UID, + GID: t.File.GID, + Mode: t.File.Mode, + } + } + + refs = append(refs, ref) + } + + return refs +} diff --git a/vendor/github.com/moby/moby/daemon/cluster/convert/service.go b/vendor/github.com/moby/moby/daemon/cluster/convert/service.go new file mode 100644 index 000000000..f0dee57bd --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/cluster/convert/service.go @@ -0,0 +1,613 @@ +package convert + +import ( + "fmt" + "strings" + + types "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/api/types/swarm/runtime" + "github.com/docker/docker/pkg/namesgenerator" + swarmapi "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/api/genericresource" + "github.com/gogo/protobuf/proto" + gogotypes "github.com/gogo/protobuf/types" + "github.com/pkg/errors" +) + +var ( + // ErrUnsupportedRuntime returns an error if the runtime is not supported by the daemon + ErrUnsupportedRuntime = errors.New("unsupported runtime") +) + +// ServiceFromGRPC converts a grpc Service to a Service. +func ServiceFromGRPC(s swarmapi.Service) (types.Service, error) { + curSpec, err := serviceSpecFromGRPC(&s.Spec) + if err != nil { + return types.Service{}, err + } + prevSpec, err := serviceSpecFromGRPC(s.PreviousSpec) + if err != nil { + return types.Service{}, err + } + service := types.Service{ + ID: s.ID, + Spec: *curSpec, + PreviousSpec: prevSpec, + + Endpoint: endpointFromGRPC(s.Endpoint), + } + + // Meta + service.Version.Index = s.Meta.Version.Index + service.CreatedAt, _ = gogotypes.TimestampFromProto(s.Meta.CreatedAt) + service.UpdatedAt, _ = gogotypes.TimestampFromProto(s.Meta.UpdatedAt) + + // UpdateStatus + if s.UpdateStatus != nil { + service.UpdateStatus = &types.UpdateStatus{} + switch s.UpdateStatus.State { + case swarmapi.UpdateStatus_UPDATING: + service.UpdateStatus.State = types.UpdateStateUpdating + case swarmapi.UpdateStatus_PAUSED: + service.UpdateStatus.State = types.UpdateStatePaused + case swarmapi.UpdateStatus_COMPLETED: + service.UpdateStatus.State = types.UpdateStateCompleted + case swarmapi.UpdateStatus_ROLLBACK_STARTED: + service.UpdateStatus.State = types.UpdateStateRollbackStarted + case swarmapi.UpdateStatus_ROLLBACK_PAUSED: + service.UpdateStatus.State = types.UpdateStateRollbackPaused + case swarmapi.UpdateStatus_ROLLBACK_COMPLETED: + service.UpdateStatus.State = types.UpdateStateRollbackCompleted + } + + startedAt, _ := gogotypes.TimestampFromProto(s.UpdateStatus.StartedAt) + if !startedAt.IsZero() && startedAt.Unix() != 0 { + service.UpdateStatus.StartedAt = &startedAt + } + + completedAt, _ := gogotypes.TimestampFromProto(s.UpdateStatus.CompletedAt) + if !completedAt.IsZero() && completedAt.Unix() != 0 { + service.UpdateStatus.CompletedAt = &completedAt + } + + service.UpdateStatus.Message = s.UpdateStatus.Message + } + + return service, nil +} + +func serviceSpecFromGRPC(spec *swarmapi.ServiceSpec) (*types.ServiceSpec, error) { + if spec == nil { + return nil, nil + } + + serviceNetworks := make([]types.NetworkAttachmentConfig, 0, len(spec.Networks)) + for _, n := range spec.Networks { + netConfig := types.NetworkAttachmentConfig{Target: n.Target, Aliases: n.Aliases, DriverOpts: n.DriverAttachmentOpts} + serviceNetworks = append(serviceNetworks, netConfig) + + } + + taskTemplate, err := taskSpecFromGRPC(spec.Task) + if err != nil { + return nil, err + } + + switch t := spec.Task.GetRuntime().(type) { + case *swarmapi.TaskSpec_Container: + containerConfig := t.Container + taskTemplate.ContainerSpec = containerSpecFromGRPC(containerConfig) + taskTemplate.Runtime = types.RuntimeContainer + case *swarmapi.TaskSpec_Generic: + switch t.Generic.Kind { + case string(types.RuntimePlugin): + taskTemplate.Runtime = types.RuntimePlugin + default: + return nil, fmt.Errorf("unknown task runtime type: %s", t.Generic.Payload.TypeUrl) + } + + default: + return nil, fmt.Errorf("error creating service; unsupported runtime %T", t) + } + + convertedSpec := &types.ServiceSpec{ + Annotations: annotationsFromGRPC(spec.Annotations), + TaskTemplate: taskTemplate, + Networks: serviceNetworks, + EndpointSpec: endpointSpecFromGRPC(spec.Endpoint), + } + + // UpdateConfig + convertedSpec.UpdateConfig = updateConfigFromGRPC(spec.Update) + convertedSpec.RollbackConfig = updateConfigFromGRPC(spec.Rollback) + + // Mode + switch t := spec.GetMode().(type) { + case *swarmapi.ServiceSpec_Global: + convertedSpec.Mode.Global = &types.GlobalService{} + case *swarmapi.ServiceSpec_Replicated: + convertedSpec.Mode.Replicated = &types.ReplicatedService{ + Replicas: &t.Replicated.Replicas, + } + } + + return convertedSpec, nil +} + +// ServiceSpecToGRPC converts a ServiceSpec to a grpc ServiceSpec. +func ServiceSpecToGRPC(s types.ServiceSpec) (swarmapi.ServiceSpec, error) { + name := s.Name + if name == "" { + name = namesgenerator.GetRandomName(0) + } + + serviceNetworks := make([]*swarmapi.NetworkAttachmentConfig, 0, len(s.Networks)) + for _, n := range s.Networks { + netConfig := &swarmapi.NetworkAttachmentConfig{Target: n.Target, Aliases: n.Aliases, DriverAttachmentOpts: n.DriverOpts} + serviceNetworks = append(serviceNetworks, netConfig) + } + + taskNetworks := make([]*swarmapi.NetworkAttachmentConfig, 0, len(s.TaskTemplate.Networks)) + for _, n := range s.TaskTemplate.Networks { + netConfig := &swarmapi.NetworkAttachmentConfig{Target: n.Target, Aliases: n.Aliases, DriverAttachmentOpts: n.DriverOpts} + taskNetworks = append(taskNetworks, netConfig) + + } + + spec := swarmapi.ServiceSpec{ + Annotations: swarmapi.Annotations{ + Name: name, + Labels: s.Labels, + }, + Task: swarmapi.TaskSpec{ + Resources: resourcesToGRPC(s.TaskTemplate.Resources), + LogDriver: driverToGRPC(s.TaskTemplate.LogDriver), + Networks: taskNetworks, + ForceUpdate: s.TaskTemplate.ForceUpdate, + }, + Networks: serviceNetworks, + } + + switch s.TaskTemplate.Runtime { + case types.RuntimeContainer, "": // if empty runtime default to container + if s.TaskTemplate.ContainerSpec != nil { + containerSpec, err := containerToGRPC(s.TaskTemplate.ContainerSpec) + if err != nil { + return swarmapi.ServiceSpec{}, err + } + spec.Task.Runtime = &swarmapi.TaskSpec_Container{Container: containerSpec} + } + case types.RuntimePlugin: + if s.Mode.Replicated != nil { + return swarmapi.ServiceSpec{}, errors.New("plugins must not use replicated mode") + } + + s.Mode.Global = &types.GlobalService{} // must always be global + + if s.TaskTemplate.PluginSpec != nil { + pluginSpec, err := proto.Marshal(s.TaskTemplate.PluginSpec) + if err != nil { + return swarmapi.ServiceSpec{}, err + } + spec.Task.Runtime = &swarmapi.TaskSpec_Generic{ + Generic: &swarmapi.GenericRuntimeSpec{ + Kind: string(types.RuntimePlugin), + Payload: &gogotypes.Any{ + TypeUrl: string(types.RuntimeURLPlugin), + Value: pluginSpec, + }, + }, + } + } + default: + return swarmapi.ServiceSpec{}, ErrUnsupportedRuntime + } + + restartPolicy, err := restartPolicyToGRPC(s.TaskTemplate.RestartPolicy) + if err != nil { + return swarmapi.ServiceSpec{}, err + } + spec.Task.Restart = restartPolicy + + if s.TaskTemplate.Placement != nil { + var preferences []*swarmapi.PlacementPreference + for _, pref := range s.TaskTemplate.Placement.Preferences { + if pref.Spread != nil { + preferences = append(preferences, &swarmapi.PlacementPreference{ + Preference: &swarmapi.PlacementPreference_Spread{ + Spread: &swarmapi.SpreadOver{ + SpreadDescriptor: pref.Spread.SpreadDescriptor, + }, + }, + }) + } + } + var platforms []*swarmapi.Platform + for _, plat := range s.TaskTemplate.Placement.Platforms { + platforms = append(platforms, &swarmapi.Platform{ + Architecture: plat.Architecture, + OS: plat.OS, + }) + } + spec.Task.Placement = &swarmapi.Placement{ + Constraints: s.TaskTemplate.Placement.Constraints, + Preferences: preferences, + Platforms: platforms, + } + } + + spec.Update, err = updateConfigToGRPC(s.UpdateConfig) + if err != nil { + return swarmapi.ServiceSpec{}, err + } + spec.Rollback, err = updateConfigToGRPC(s.RollbackConfig) + if err != nil { + return swarmapi.ServiceSpec{}, err + } + + if s.EndpointSpec != nil { + if s.EndpointSpec.Mode != "" && + s.EndpointSpec.Mode != types.ResolutionModeVIP && + s.EndpointSpec.Mode != types.ResolutionModeDNSRR { + return swarmapi.ServiceSpec{}, fmt.Errorf("invalid resolution mode: %q", s.EndpointSpec.Mode) + } + + spec.Endpoint = &swarmapi.EndpointSpec{} + + spec.Endpoint.Mode = swarmapi.EndpointSpec_ResolutionMode(swarmapi.EndpointSpec_ResolutionMode_value[strings.ToUpper(string(s.EndpointSpec.Mode))]) + + for _, portConfig := range s.EndpointSpec.Ports { + spec.Endpoint.Ports = append(spec.Endpoint.Ports, &swarmapi.PortConfig{ + Name: portConfig.Name, + Protocol: swarmapi.PortConfig_Protocol(swarmapi.PortConfig_Protocol_value[strings.ToUpper(string(portConfig.Protocol))]), + PublishMode: swarmapi.PortConfig_PublishMode(swarmapi.PortConfig_PublishMode_value[strings.ToUpper(string(portConfig.PublishMode))]), + TargetPort: portConfig.TargetPort, + PublishedPort: portConfig.PublishedPort, + }) + } + } + + // Mode + if s.Mode.Global != nil && s.Mode.Replicated != nil { + return swarmapi.ServiceSpec{}, fmt.Errorf("cannot specify both replicated mode and global mode") + } + + if s.Mode.Global != nil { + spec.Mode = &swarmapi.ServiceSpec_Global{ + Global: &swarmapi.GlobalService{}, + } + } else if s.Mode.Replicated != nil && s.Mode.Replicated.Replicas != nil { + spec.Mode = &swarmapi.ServiceSpec_Replicated{ + Replicated: &swarmapi.ReplicatedService{Replicas: *s.Mode.Replicated.Replicas}, + } + } else { + spec.Mode = &swarmapi.ServiceSpec_Replicated{ + Replicated: &swarmapi.ReplicatedService{Replicas: 1}, + } + } + + return spec, nil +} + +func annotationsFromGRPC(ann swarmapi.Annotations) types.Annotations { + a := types.Annotations{ + Name: ann.Name, + Labels: ann.Labels, + } + + if a.Labels == nil { + a.Labels = make(map[string]string) + } + + return a +} + +// GenericResourcesFromGRPC converts a GRPC GenericResource to a GenericResource +func GenericResourcesFromGRPC(genericRes []*swarmapi.GenericResource) []types.GenericResource { + var generic []types.GenericResource + for _, res := range genericRes { + var current types.GenericResource + + switch r := res.Resource.(type) { + case *swarmapi.GenericResource_DiscreteResourceSpec: + current.DiscreteResourceSpec = &types.DiscreteGenericResource{ + Kind: r.DiscreteResourceSpec.Kind, + Value: r.DiscreteResourceSpec.Value, + } + case *swarmapi.GenericResource_NamedResourceSpec: + current.NamedResourceSpec = &types.NamedGenericResource{ + Kind: r.NamedResourceSpec.Kind, + Value: r.NamedResourceSpec.Value, + } + } + + generic = append(generic, current) + } + + return generic +} + +func resourcesFromGRPC(res *swarmapi.ResourceRequirements) *types.ResourceRequirements { + var resources *types.ResourceRequirements + if res != nil { + resources = &types.ResourceRequirements{} + if res.Limits != nil { + resources.Limits = &types.Resources{ + NanoCPUs: res.Limits.NanoCPUs, + MemoryBytes: res.Limits.MemoryBytes, + } + } + if res.Reservations != nil { + resources.Reservations = &types.Resources{ + NanoCPUs: res.Reservations.NanoCPUs, + MemoryBytes: res.Reservations.MemoryBytes, + GenericResources: GenericResourcesFromGRPC(res.Reservations.Generic), + } + } + } + + return resources +} + +// GenericResourcesToGRPC converts a GenericResource to a GRPC GenericResource +func GenericResourcesToGRPC(genericRes []types.GenericResource) []*swarmapi.GenericResource { + var generic []*swarmapi.GenericResource + for _, res := range genericRes { + var r *swarmapi.GenericResource + + if res.DiscreteResourceSpec != nil { + r = genericresource.NewDiscrete(res.DiscreteResourceSpec.Kind, res.DiscreteResourceSpec.Value) + } else if res.NamedResourceSpec != nil { + r = genericresource.NewString(res.NamedResourceSpec.Kind, res.NamedResourceSpec.Value) + } + + generic = append(generic, r) + } + + return generic +} + +func resourcesToGRPC(res *types.ResourceRequirements) *swarmapi.ResourceRequirements { + var reqs *swarmapi.ResourceRequirements + if res != nil { + reqs = &swarmapi.ResourceRequirements{} + if res.Limits != nil { + reqs.Limits = &swarmapi.Resources{ + NanoCPUs: res.Limits.NanoCPUs, + MemoryBytes: res.Limits.MemoryBytes, + } + } + if res.Reservations != nil { + reqs.Reservations = &swarmapi.Resources{ + NanoCPUs: res.Reservations.NanoCPUs, + MemoryBytes: res.Reservations.MemoryBytes, + Generic: GenericResourcesToGRPC(res.Reservations.GenericResources), + } + + } + } + return reqs +} + +func restartPolicyFromGRPC(p *swarmapi.RestartPolicy) *types.RestartPolicy { + var rp *types.RestartPolicy + if p != nil { + rp = &types.RestartPolicy{} + + switch p.Condition { + case swarmapi.RestartOnNone: + rp.Condition = types.RestartPolicyConditionNone + case swarmapi.RestartOnFailure: + rp.Condition = types.RestartPolicyConditionOnFailure + case swarmapi.RestartOnAny: + rp.Condition = types.RestartPolicyConditionAny + default: + rp.Condition = types.RestartPolicyConditionAny + } + + if p.Delay != nil { + delay, _ := gogotypes.DurationFromProto(p.Delay) + rp.Delay = &delay + } + if p.Window != nil { + window, _ := gogotypes.DurationFromProto(p.Window) + rp.Window = &window + } + + rp.MaxAttempts = &p.MaxAttempts + } + return rp +} + +func restartPolicyToGRPC(p *types.RestartPolicy) (*swarmapi.RestartPolicy, error) { + var rp *swarmapi.RestartPolicy + if p != nil { + rp = &swarmapi.RestartPolicy{} + + switch p.Condition { + case types.RestartPolicyConditionNone: + rp.Condition = swarmapi.RestartOnNone + case types.RestartPolicyConditionOnFailure: + rp.Condition = swarmapi.RestartOnFailure + case types.RestartPolicyConditionAny: + rp.Condition = swarmapi.RestartOnAny + default: + if string(p.Condition) != "" { + return nil, fmt.Errorf("invalid RestartCondition: %q", p.Condition) + } + rp.Condition = swarmapi.RestartOnAny + } + + if p.Delay != nil { + rp.Delay = gogotypes.DurationProto(*p.Delay) + } + if p.Window != nil { + rp.Window = gogotypes.DurationProto(*p.Window) + } + if p.MaxAttempts != nil { + rp.MaxAttempts = *p.MaxAttempts + + } + } + return rp, nil +} + +func placementFromGRPC(p *swarmapi.Placement) *types.Placement { + if p == nil { + return nil + } + r := &types.Placement{ + Constraints: p.Constraints, + } + + for _, pref := range p.Preferences { + if spread := pref.GetSpread(); spread != nil { + r.Preferences = append(r.Preferences, types.PlacementPreference{ + Spread: &types.SpreadOver{ + SpreadDescriptor: spread.SpreadDescriptor, + }, + }) + } + } + + for _, plat := range p.Platforms { + r.Platforms = append(r.Platforms, types.Platform{ + Architecture: plat.Architecture, + OS: plat.OS, + }) + } + + return r +} + +func driverFromGRPC(p *swarmapi.Driver) *types.Driver { + if p == nil { + return nil + } + + return &types.Driver{ + Name: p.Name, + Options: p.Options, + } +} + +func driverToGRPC(p *types.Driver) *swarmapi.Driver { + if p == nil { + return nil + } + + return &swarmapi.Driver{ + Name: p.Name, + Options: p.Options, + } +} + +func updateConfigFromGRPC(updateConfig *swarmapi.UpdateConfig) *types.UpdateConfig { + if updateConfig == nil { + return nil + } + + converted := &types.UpdateConfig{ + Parallelism: updateConfig.Parallelism, + MaxFailureRatio: updateConfig.MaxFailureRatio, + } + + converted.Delay = updateConfig.Delay + if updateConfig.Monitor != nil { + converted.Monitor, _ = gogotypes.DurationFromProto(updateConfig.Monitor) + } + + switch updateConfig.FailureAction { + case swarmapi.UpdateConfig_PAUSE: + converted.FailureAction = types.UpdateFailureActionPause + case swarmapi.UpdateConfig_CONTINUE: + converted.FailureAction = types.UpdateFailureActionContinue + case swarmapi.UpdateConfig_ROLLBACK: + converted.FailureAction = types.UpdateFailureActionRollback + } + + switch updateConfig.Order { + case swarmapi.UpdateConfig_STOP_FIRST: + converted.Order = types.UpdateOrderStopFirst + case swarmapi.UpdateConfig_START_FIRST: + converted.Order = types.UpdateOrderStartFirst + } + + return converted +} + +func updateConfigToGRPC(updateConfig *types.UpdateConfig) (*swarmapi.UpdateConfig, error) { + if updateConfig == nil { + return nil, nil + } + + converted := &swarmapi.UpdateConfig{ + Parallelism: updateConfig.Parallelism, + Delay: updateConfig.Delay, + MaxFailureRatio: updateConfig.MaxFailureRatio, + } + + switch updateConfig.FailureAction { + case types.UpdateFailureActionPause, "": + converted.FailureAction = swarmapi.UpdateConfig_PAUSE + case types.UpdateFailureActionContinue: + converted.FailureAction = swarmapi.UpdateConfig_CONTINUE + case types.UpdateFailureActionRollback: + converted.FailureAction = swarmapi.UpdateConfig_ROLLBACK + default: + return nil, fmt.Errorf("unrecognized update failure action %s", updateConfig.FailureAction) + } + if updateConfig.Monitor != 0 { + converted.Monitor = gogotypes.DurationProto(updateConfig.Monitor) + } + + switch updateConfig.Order { + case types.UpdateOrderStopFirst, "": + converted.Order = swarmapi.UpdateConfig_STOP_FIRST + case types.UpdateOrderStartFirst: + converted.Order = swarmapi.UpdateConfig_START_FIRST + default: + return nil, fmt.Errorf("unrecognized update order %s", updateConfig.Order) + } + + return converted, nil +} + +func taskSpecFromGRPC(taskSpec swarmapi.TaskSpec) (types.TaskSpec, error) { + taskNetworks := make([]types.NetworkAttachmentConfig, 0, len(taskSpec.Networks)) + for _, n := range taskSpec.Networks { + netConfig := types.NetworkAttachmentConfig{Target: n.Target, Aliases: n.Aliases, DriverOpts: n.DriverAttachmentOpts} + taskNetworks = append(taskNetworks, netConfig) + } + + t := types.TaskSpec{ + Resources: resourcesFromGRPC(taskSpec.Resources), + RestartPolicy: restartPolicyFromGRPC(taskSpec.Restart), + Placement: placementFromGRPC(taskSpec.Placement), + LogDriver: driverFromGRPC(taskSpec.LogDriver), + Networks: taskNetworks, + ForceUpdate: taskSpec.ForceUpdate, + } + + switch taskSpec.GetRuntime().(type) { + case *swarmapi.TaskSpec_Container, nil: + c := taskSpec.GetContainer() + if c != nil { + t.ContainerSpec = containerSpecFromGRPC(c) + } + case *swarmapi.TaskSpec_Generic: + g := taskSpec.GetGeneric() + if g != nil { + switch g.Kind { + case string(types.RuntimePlugin): + var p runtime.PluginSpec + if err := proto.Unmarshal(g.Payload.Value, &p); err != nil { + return t, errors.Wrap(err, "error unmarshalling plugin spec") + } + t.PluginSpec = &p + } + } + } + + return t, nil +} diff --git a/vendor/github.com/moby/moby/daemon/cluster/convert/service_test.go b/vendor/github.com/moby/moby/daemon/cluster/convert/service_test.go new file mode 100644 index 000000000..1b6598974 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/cluster/convert/service_test.go @@ -0,0 +1,150 @@ +package convert + +import ( + "testing" + + swarmtypes "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/api/types/swarm/runtime" + swarmapi "github.com/docker/swarmkit/api" + google_protobuf3 "github.com/gogo/protobuf/types" +) + +func TestServiceConvertFromGRPCRuntimeContainer(t *testing.T) { + gs := swarmapi.Service{ + Meta: swarmapi.Meta{ + Version: swarmapi.Version{ + Index: 1, + }, + CreatedAt: nil, + UpdatedAt: nil, + }, + SpecVersion: &swarmapi.Version{ + Index: 1, + }, + Spec: swarmapi.ServiceSpec{ + Task: swarmapi.TaskSpec{ + Runtime: &swarmapi.TaskSpec_Container{ + Container: &swarmapi.ContainerSpec{ + Image: "alpine:latest", + }, + }, + }, + }, + } + + svc, err := ServiceFromGRPC(gs) + if err != nil { + t.Fatal(err) + } + + if svc.Spec.TaskTemplate.Runtime != swarmtypes.RuntimeContainer { + t.Fatalf("expected type %s; received %T", swarmtypes.RuntimeContainer, svc.Spec.TaskTemplate.Runtime) + } +} + +func TestServiceConvertFromGRPCGenericRuntimePlugin(t *testing.T) { + kind := string(swarmtypes.RuntimePlugin) + url := swarmtypes.RuntimeURLPlugin + gs := swarmapi.Service{ + Meta: swarmapi.Meta{ + Version: swarmapi.Version{ + Index: 1, + }, + CreatedAt: nil, + UpdatedAt: nil, + }, + SpecVersion: &swarmapi.Version{ + Index: 1, + }, + Spec: swarmapi.ServiceSpec{ + Task: swarmapi.TaskSpec{ + Runtime: &swarmapi.TaskSpec_Generic{ + Generic: &swarmapi.GenericRuntimeSpec{ + Kind: kind, + Payload: &google_protobuf3.Any{ + TypeUrl: string(url), + }, + }, + }, + }, + }, + } + + svc, err := ServiceFromGRPC(gs) + if err != nil { + t.Fatal(err) + } + + if svc.Spec.TaskTemplate.Runtime != swarmtypes.RuntimePlugin { + t.Fatalf("expected type %s; received %T", swarmtypes.RuntimePlugin, svc.Spec.TaskTemplate.Runtime) + } +} + +func TestServiceConvertToGRPCGenericRuntimePlugin(t *testing.T) { + s := swarmtypes.ServiceSpec{ + TaskTemplate: swarmtypes.TaskSpec{ + Runtime: swarmtypes.RuntimePlugin, + PluginSpec: &runtime.PluginSpec{}, + }, + Mode: swarmtypes.ServiceMode{ + Global: &swarmtypes.GlobalService{}, + }, + } + + svc, err := ServiceSpecToGRPC(s) + if err != nil { + t.Fatal(err) + } + + v, ok := svc.Task.Runtime.(*swarmapi.TaskSpec_Generic) + if !ok { + t.Fatal("expected type swarmapi.TaskSpec_Generic") + } + + if v.Generic.Payload.TypeUrl != string(swarmtypes.RuntimeURLPlugin) { + t.Fatalf("expected url %s; received %s", swarmtypes.RuntimeURLPlugin, v.Generic.Payload.TypeUrl) + } +} + +func TestServiceConvertToGRPCContainerRuntime(t *testing.T) { + image := "alpine:latest" + s := swarmtypes.ServiceSpec{ + TaskTemplate: swarmtypes.TaskSpec{ + ContainerSpec: &swarmtypes.ContainerSpec{ + Image: image, + }, + }, + Mode: swarmtypes.ServiceMode{ + Global: &swarmtypes.GlobalService{}, + }, + } + + svc, err := ServiceSpecToGRPC(s) + if err != nil { + t.Fatal(err) + } + + v, ok := svc.Task.Runtime.(*swarmapi.TaskSpec_Container) + if !ok { + t.Fatal("expected type swarmapi.TaskSpec_Container") + } + + if v.Container.Image != image { + t.Fatalf("expected image %s; received %s", image, v.Container.Image) + } +} + +func TestServiceConvertToGRPCGenericRuntimeCustom(t *testing.T) { + s := swarmtypes.ServiceSpec{ + TaskTemplate: swarmtypes.TaskSpec{ + Runtime: "customruntime", + }, + Mode: swarmtypes.ServiceMode{ + Global: &swarmtypes.GlobalService{}, + }, + } + + if _, err := ServiceSpecToGRPC(s); err != ErrUnsupportedRuntime { + t.Fatal(err) + } +} diff --git a/vendor/github.com/moby/moby/daemon/cluster/convert/swarm.go b/vendor/github.com/moby/moby/daemon/cluster/convert/swarm.go new file mode 100644 index 000000000..2ea89b968 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/cluster/convert/swarm.go @@ -0,0 +1,148 @@ +package convert + +import ( + "fmt" + "strings" + "time" + + types "github.com/docker/docker/api/types/swarm" + swarmapi "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/ca" + gogotypes "github.com/gogo/protobuf/types" +) + +// SwarmFromGRPC converts a grpc Cluster to a Swarm. +func SwarmFromGRPC(c swarmapi.Cluster) types.Swarm { + swarm := types.Swarm{ + ClusterInfo: types.ClusterInfo{ + ID: c.ID, + Spec: types.Spec{ + Orchestration: types.OrchestrationConfig{ + TaskHistoryRetentionLimit: &c.Spec.Orchestration.TaskHistoryRetentionLimit, + }, + Raft: types.RaftConfig{ + SnapshotInterval: c.Spec.Raft.SnapshotInterval, + KeepOldSnapshots: &c.Spec.Raft.KeepOldSnapshots, + LogEntriesForSlowFollowers: c.Spec.Raft.LogEntriesForSlowFollowers, + HeartbeatTick: int(c.Spec.Raft.HeartbeatTick), + ElectionTick: int(c.Spec.Raft.ElectionTick), + }, + EncryptionConfig: types.EncryptionConfig{ + AutoLockManagers: c.Spec.EncryptionConfig.AutoLockManagers, + }, + CAConfig: types.CAConfig{ + // do not include the signing CA cert or key (it should already be redacted via the swarm APIs) - + // the key because it's secret, and the cert because otherwise doing a get + update on the spec + // can cause issues because the key would be missing and the cert wouldn't + ForceRotate: c.Spec.CAConfig.ForceRotate, + }, + }, + TLSInfo: types.TLSInfo{ + TrustRoot: string(c.RootCA.CACert), + }, + RootRotationInProgress: c.RootCA.RootRotation != nil, + }, + JoinTokens: types.JoinTokens{ + Worker: c.RootCA.JoinTokens.Worker, + Manager: c.RootCA.JoinTokens.Manager, + }, + } + + issuerInfo, err := ca.IssuerFromAPIRootCA(&c.RootCA) + if err == nil && issuerInfo != nil { + swarm.TLSInfo.CertIssuerSubject = issuerInfo.Subject + swarm.TLSInfo.CertIssuerPublicKey = issuerInfo.PublicKey + } + + heartbeatPeriod, _ := gogotypes.DurationFromProto(c.Spec.Dispatcher.HeartbeatPeriod) + swarm.Spec.Dispatcher.HeartbeatPeriod = heartbeatPeriod + + swarm.Spec.CAConfig.NodeCertExpiry, _ = gogotypes.DurationFromProto(c.Spec.CAConfig.NodeCertExpiry) + + for _, ca := range c.Spec.CAConfig.ExternalCAs { + swarm.Spec.CAConfig.ExternalCAs = append(swarm.Spec.CAConfig.ExternalCAs, &types.ExternalCA{ + Protocol: types.ExternalCAProtocol(strings.ToLower(ca.Protocol.String())), + URL: ca.URL, + Options: ca.Options, + CACert: string(ca.CACert), + }) + } + + // Meta + swarm.Version.Index = c.Meta.Version.Index + swarm.CreatedAt, _ = gogotypes.TimestampFromProto(c.Meta.CreatedAt) + swarm.UpdatedAt, _ = gogotypes.TimestampFromProto(c.Meta.UpdatedAt) + + // Annotations + swarm.Spec.Annotations = annotationsFromGRPC(c.Spec.Annotations) + + return swarm +} + +// SwarmSpecToGRPC converts a Spec to a grpc ClusterSpec. +func SwarmSpecToGRPC(s types.Spec) (swarmapi.ClusterSpec, error) { + return MergeSwarmSpecToGRPC(s, swarmapi.ClusterSpec{}) +} + +// MergeSwarmSpecToGRPC merges a Spec with an initial grpc ClusterSpec +func MergeSwarmSpecToGRPC(s types.Spec, spec swarmapi.ClusterSpec) (swarmapi.ClusterSpec, error) { + // We take the initSpec (either created from scratch, or returned by swarmkit), + // and will only change the value if the one taken from types.Spec is not nil or 0. + // In other words, if the value taken from types.Spec is nil or 0, we will maintain the status quo. + if s.Annotations.Name != "" { + spec.Annotations.Name = s.Annotations.Name + } + if len(s.Annotations.Labels) != 0 { + spec.Annotations.Labels = s.Annotations.Labels + } + + if s.Orchestration.TaskHistoryRetentionLimit != nil { + spec.Orchestration.TaskHistoryRetentionLimit = *s.Orchestration.TaskHistoryRetentionLimit + } + if s.Raft.SnapshotInterval != 0 { + spec.Raft.SnapshotInterval = s.Raft.SnapshotInterval + } + if s.Raft.KeepOldSnapshots != nil { + spec.Raft.KeepOldSnapshots = *s.Raft.KeepOldSnapshots + } + if s.Raft.LogEntriesForSlowFollowers != 0 { + spec.Raft.LogEntriesForSlowFollowers = s.Raft.LogEntriesForSlowFollowers + } + if s.Raft.HeartbeatTick != 0 { + spec.Raft.HeartbeatTick = uint32(s.Raft.HeartbeatTick) + } + if s.Raft.ElectionTick != 0 { + spec.Raft.ElectionTick = uint32(s.Raft.ElectionTick) + } + if s.Dispatcher.HeartbeatPeriod != 0 { + spec.Dispatcher.HeartbeatPeriod = gogotypes.DurationProto(time.Duration(s.Dispatcher.HeartbeatPeriod)) + } + if s.CAConfig.NodeCertExpiry != 0 { + spec.CAConfig.NodeCertExpiry = gogotypes.DurationProto(s.CAConfig.NodeCertExpiry) + } + if s.CAConfig.SigningCACert != "" { + spec.CAConfig.SigningCACert = []byte(s.CAConfig.SigningCACert) + } + if s.CAConfig.SigningCAKey != "" { + // do propagate the signing CA key here because we want to provide it TO the swarm APIs + spec.CAConfig.SigningCAKey = []byte(s.CAConfig.SigningCAKey) + } + spec.CAConfig.ForceRotate = s.CAConfig.ForceRotate + + for _, ca := range s.CAConfig.ExternalCAs { + protocol, ok := swarmapi.ExternalCA_CAProtocol_value[strings.ToUpper(string(ca.Protocol))] + if !ok { + return swarmapi.ClusterSpec{}, fmt.Errorf("invalid protocol: %q", ca.Protocol) + } + spec.CAConfig.ExternalCAs = append(spec.CAConfig.ExternalCAs, &swarmapi.ExternalCA{ + Protocol: swarmapi.ExternalCA_CAProtocol(protocol), + URL: ca.URL, + Options: ca.Options, + CACert: []byte(ca.CACert), + }) + } + + spec.EncryptionConfig.AutoLockManagers = s.EncryptionConfig.AutoLockManagers + + return spec, nil +} diff --git a/vendor/github.com/moby/moby/daemon/cluster/convert/task.go b/vendor/github.com/moby/moby/daemon/cluster/convert/task.go new file mode 100644 index 000000000..bedf2dba9 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/cluster/convert/task.go @@ -0,0 +1,70 @@ +package convert + +import ( + "strings" + + types "github.com/docker/docker/api/types/swarm" + swarmapi "github.com/docker/swarmkit/api" + gogotypes "github.com/gogo/protobuf/types" +) + +// TaskFromGRPC converts a grpc Task to a Task. +func TaskFromGRPC(t swarmapi.Task) (types.Task, error) { + if t.Spec.GetAttachment() != nil { + return types.Task{}, nil + } + containerStatus := t.Status.GetContainer() + taskSpec, err := taskSpecFromGRPC(t.Spec) + if err != nil { + return types.Task{}, err + } + task := types.Task{ + ID: t.ID, + Annotations: annotationsFromGRPC(t.Annotations), + ServiceID: t.ServiceID, + Slot: int(t.Slot), + NodeID: t.NodeID, + Spec: taskSpec, + Status: types.TaskStatus{ + State: types.TaskState(strings.ToLower(t.Status.State.String())), + Message: t.Status.Message, + Err: t.Status.Err, + }, + DesiredState: types.TaskState(strings.ToLower(t.DesiredState.String())), + GenericResources: GenericResourcesFromGRPC(t.AssignedGenericResources), + } + + // Meta + task.Version.Index = t.Meta.Version.Index + task.CreatedAt, _ = gogotypes.TimestampFromProto(t.Meta.CreatedAt) + task.UpdatedAt, _ = gogotypes.TimestampFromProto(t.Meta.UpdatedAt) + + task.Status.Timestamp, _ = gogotypes.TimestampFromProto(t.Status.Timestamp) + + if containerStatus != nil { + task.Status.ContainerStatus.ContainerID = containerStatus.ContainerID + task.Status.ContainerStatus.PID = int(containerStatus.PID) + task.Status.ContainerStatus.ExitCode = int(containerStatus.ExitCode) + } + + // NetworksAttachments + for _, na := range t.Networks { + task.NetworksAttachments = append(task.NetworksAttachments, networkAttachmentFromGRPC(na)) + } + + if t.Status.PortStatus == nil { + return task, nil + } + + for _, p := range t.Status.PortStatus.Ports { + task.Status.PortStatus.Ports = append(task.Status.PortStatus.Ports, types.PortConfig{ + Name: p.Name, + Protocol: types.PortConfigProtocol(strings.ToLower(swarmapi.PortConfig_Protocol_name[int32(p.Protocol)])), + PublishMode: types.PortConfigPublishMode(strings.ToLower(swarmapi.PortConfig_PublishMode_name[int32(p.PublishMode)])), + TargetPort: p.TargetPort, + PublishedPort: p.PublishedPort, + }) + } + + return task, nil +} diff --git a/vendor/github.com/moby/moby/daemon/cluster/executor/backend.go b/vendor/github.com/moby/moby/daemon/cluster/executor/backend.go new file mode 100644 index 000000000..fbe900656 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/cluster/executor/backend.go @@ -0,0 +1,64 @@ +package executor + +import ( + "io" + "time" + + "github.com/docker/distribution" + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/events" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/network" + swarmtypes "github.com/docker/docker/api/types/swarm" + containerpkg "github.com/docker/docker/container" + clustertypes "github.com/docker/docker/daemon/cluster/provider" + "github.com/docker/docker/plugin" + "github.com/docker/libnetwork" + "github.com/docker/libnetwork/cluster" + networktypes "github.com/docker/libnetwork/types" + "github.com/docker/swarmkit/agent/exec" + "golang.org/x/net/context" +) + +// Backend defines the executor component for a swarm agent. +type Backend interface { + CreateManagedNetwork(clustertypes.NetworkCreateRequest) error + DeleteManagedNetwork(name string) error + FindNetwork(idName string) (libnetwork.Network, error) + SetupIngress(clustertypes.NetworkCreateRequest, string) (<-chan struct{}, error) + ReleaseIngress() (<-chan struct{}, error) + PullImage(ctx context.Context, image, tag, platform string, metaHeaders map[string][]string, authConfig *types.AuthConfig, outStream io.Writer) error + CreateManagedContainer(config types.ContainerCreateConfig) (container.ContainerCreateCreatedBody, error) + ContainerStart(name string, hostConfig *container.HostConfig, checkpoint string, checkpointDir string) error + ContainerStop(name string, seconds *int) error + ContainerLogs(context.Context, string, *types.ContainerLogsOptions) (<-chan *backend.LogMessage, error) + ConnectContainerToNetwork(containerName, networkName string, endpointConfig *network.EndpointSettings) error + ActivateContainerServiceBinding(containerName string) error + DeactivateContainerServiceBinding(containerName string) error + UpdateContainerServiceConfig(containerName string, serviceConfig *clustertypes.ServiceConfig) error + ContainerInspectCurrent(name string, size bool) (*types.ContainerJSON, error) + ContainerWait(ctx context.Context, name string, condition containerpkg.WaitCondition) (<-chan containerpkg.StateStatus, error) + ContainerRm(name string, config *types.ContainerRmConfig) error + ContainerKill(name string, sig uint64) error + SetContainerDependencyStore(name string, store exec.DependencyGetter) error + SetContainerSecretReferences(name string, refs []*swarmtypes.SecretReference) error + SetContainerConfigReferences(name string, refs []*swarmtypes.ConfigReference) error + SystemInfo() (*types.Info, error) + VolumeCreate(name, driverName string, opts, labels map[string]string) (*types.Volume, error) + Containers(config *types.ContainerListOptions) ([]*types.Container, error) + SetNetworkBootstrapKeys([]*networktypes.EncryptionKey) error + DaemonJoinsCluster(provider cluster.Provider) + DaemonLeavesCluster() + IsSwarmCompatible() error + SubscribeToEvents(since, until time.Time, filter filters.Args) ([]events.Message, chan interface{}) + UnsubscribeFromEvents(listener chan interface{}) + UpdateAttachment(string, string, string, *network.NetworkingConfig) error + WaitForDetachment(context.Context, string, string, string, string) error + GetRepository(context.Context, reference.Named, *types.AuthConfig) (distribution.Repository, bool, error) + LookupImage(name string) (*types.ImageInspect, error) + PluginManager() *plugin.Manager + PluginGetter() *plugin.Store +} diff --git a/vendor/github.com/moby/moby/daemon/cluster/executor/container/adapter.go b/vendor/github.com/moby/moby/daemon/cluster/executor/container/adapter.go new file mode 100644 index 000000000..7444057c3 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/cluster/executor/container/adapter.go @@ -0,0 +1,472 @@ +package container + +import ( + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "io" + "os" + "runtime" + "strings" + "syscall" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/backend" + containertypes "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/events" + containerpkg "github.com/docker/docker/container" + "github.com/docker/docker/daemon/cluster/convert" + executorpkg "github.com/docker/docker/daemon/cluster/executor" + "github.com/docker/docker/pkg/system" + "github.com/docker/libnetwork" + "github.com/docker/swarmkit/agent/exec" + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/log" + gogotypes "github.com/gogo/protobuf/types" + "github.com/opencontainers/go-digest" + "golang.org/x/net/context" + "golang.org/x/time/rate" +) + +// containerAdapter conducts remote operations for a container. All calls +// are mostly naked calls to the client API, seeded with information from +// containerConfig. +type containerAdapter struct { + backend executorpkg.Backend + container *containerConfig + dependencies exec.DependencyGetter +} + +func newContainerAdapter(b executorpkg.Backend, task *api.Task, dependencies exec.DependencyGetter) (*containerAdapter, error) { + ctnr, err := newContainerConfig(task) + if err != nil { + return nil, err + } + + return &containerAdapter{ + container: ctnr, + backend: b, + dependencies: dependencies, + }, nil +} + +func (c *containerAdapter) pullImage(ctx context.Context) error { + spec := c.container.spec() + + // Skip pulling if the image is referenced by image ID. + if _, err := digest.Parse(spec.Image); err == nil { + return nil + } + + // Skip pulling if the image is referenced by digest and already + // exists locally. + named, err := reference.ParseNormalizedNamed(spec.Image) + if err == nil { + if _, ok := named.(reference.Canonical); ok { + _, err := c.backend.LookupImage(spec.Image) + if err == nil { + return nil + } + } + } + + // if the image needs to be pulled, the auth config will be retrieved and updated + var encodedAuthConfig string + if spec.PullOptions != nil { + encodedAuthConfig = spec.PullOptions.RegistryAuth + } + + authConfig := &types.AuthConfig{} + if encodedAuthConfig != "" { + if err := json.NewDecoder(base64.NewDecoder(base64.URLEncoding, strings.NewReader(encodedAuthConfig))).Decode(authConfig); err != nil { + logrus.Warnf("invalid authconfig: %v", err) + } + } + + pr, pw := io.Pipe() + metaHeaders := map[string][]string{} + go func() { + // TODO @jhowardmsft LCOW Support: This will need revisiting as + // the stack is built up to include LCOW support for swarm. + platform := runtime.GOOS + if system.LCOWSupported() { + platform = "linux" + } + err := c.backend.PullImage(ctx, c.container.image(), "", platform, metaHeaders, authConfig, pw) + pw.CloseWithError(err) + }() + + dec := json.NewDecoder(pr) + dec.UseNumber() + m := map[string]interface{}{} + spamLimiter := rate.NewLimiter(rate.Every(time.Second), 1) + + lastStatus := "" + for { + if err := dec.Decode(&m); err != nil { + if err == io.EOF { + break + } + return err + } + l := log.G(ctx) + // limit pull progress logs unless the status changes + if spamLimiter.Allow() || lastStatus != m["status"] { + // if we have progress details, we have everything we need + if progress, ok := m["progressDetail"].(map[string]interface{}); ok { + // first, log the image and status + l = l.WithFields(logrus.Fields{ + "image": c.container.image(), + "status": m["status"], + }) + // then, if we have progress, log the progress + if progress["current"] != nil && progress["total"] != nil { + l = l.WithFields(logrus.Fields{ + "current": progress["current"], + "total": progress["total"], + }) + } + } + l.Debug("pull in progress") + } + // sometimes, we get no useful information at all, and add no fields + if status, ok := m["status"].(string); ok { + lastStatus = status + } + } + + // if the final stream object contained an error, return it + if errMsg, ok := m["error"]; ok { + return fmt.Errorf("%v", errMsg) + } + return nil +} + +func (c *containerAdapter) createNetworks(ctx context.Context) error { + for _, network := range c.container.networks() { + ncr, err := c.container.networkCreateRequest(network) + if err != nil { + return err + } + + if err := c.backend.CreateManagedNetwork(ncr); err != nil { // todo name missing + if _, ok := err.(libnetwork.NetworkNameError); ok { + continue + } + + return err + } + } + + return nil +} + +func (c *containerAdapter) removeNetworks(ctx context.Context) error { + for _, nid := range c.container.networks() { + if err := c.backend.DeleteManagedNetwork(nid); err != nil { + switch err.(type) { + case *libnetwork.ActiveEndpointsError: + continue + case libnetwork.ErrNoSuchNetwork: + continue + default: + log.G(ctx).Errorf("network %s remove failed: %v", nid, err) + return err + } + } + } + + return nil +} + +func (c *containerAdapter) networkAttach(ctx context.Context) error { + config := c.container.createNetworkingConfig(c.backend) + + var ( + networkName string + networkID string + ) + + if config != nil { + for n, epConfig := range config.EndpointsConfig { + networkName = n + networkID = epConfig.NetworkID + break + } + } + + return c.backend.UpdateAttachment(networkName, networkID, c.container.networkAttachmentContainerID(), config) +} + +func (c *containerAdapter) waitForDetach(ctx context.Context) error { + config := c.container.createNetworkingConfig(c.backend) + + var ( + networkName string + networkID string + ) + + if config != nil { + for n, epConfig := range config.EndpointsConfig { + networkName = n + networkID = epConfig.NetworkID + break + } + } + + return c.backend.WaitForDetachment(ctx, networkName, networkID, c.container.taskID(), c.container.networkAttachmentContainerID()) +} + +func (c *containerAdapter) create(ctx context.Context) error { + var cr containertypes.ContainerCreateCreatedBody + var err error + if cr, err = c.backend.CreateManagedContainer(types.ContainerCreateConfig{ + Name: c.container.name(), + Config: c.container.config(), + HostConfig: c.container.hostConfig(), + // Use the first network in container create + NetworkingConfig: c.container.createNetworkingConfig(c.backend), + }); err != nil { + return err + } + + // Docker daemon currently doesn't support multiple networks in container create + // Connect to all other networks + nc := c.container.connectNetworkingConfig(c.backend) + + if nc != nil { + for n, ep := range nc.EndpointsConfig { + if err := c.backend.ConnectContainerToNetwork(cr.ID, n, ep); err != nil { + return err + } + } + } + + container := c.container.task.Spec.GetContainer() + if container == nil { + return errors.New("unable to get container from task spec") + } + + if err := c.backend.SetContainerDependencyStore(cr.ID, c.dependencies); err != nil { + return err + } + + // configure secrets + secretRefs := convert.SecretReferencesFromGRPC(container.Secrets) + if err := c.backend.SetContainerSecretReferences(cr.ID, secretRefs); err != nil { + return err + } + + configRefs := convert.ConfigReferencesFromGRPC(container.Configs) + if err := c.backend.SetContainerConfigReferences(cr.ID, configRefs); err != nil { + return err + } + + if err := c.backend.UpdateContainerServiceConfig(cr.ID, c.container.serviceConfig()); err != nil { + return err + } + + return nil +} + +// checkMounts ensures that the provided mounts won't have any host-specific +// problems at start up. For example, we disallow bind mounts without an +// existing path, which slightly different from the container API. +func (c *containerAdapter) checkMounts() error { + spec := c.container.spec() + for _, mount := range spec.Mounts { + switch mount.Type { + case api.MountTypeBind: + if _, err := os.Stat(mount.Source); os.IsNotExist(err) { + return fmt.Errorf("invalid bind mount source, source path not found: %s", mount.Source) + } + } + } + + return nil +} + +func (c *containerAdapter) start(ctx context.Context) error { + if err := c.checkMounts(); err != nil { + return err + } + + return c.backend.ContainerStart(c.container.name(), nil, "", "") +} + +func (c *containerAdapter) inspect(ctx context.Context) (types.ContainerJSON, error) { + cs, err := c.backend.ContainerInspectCurrent(c.container.name(), false) + if ctx.Err() != nil { + return types.ContainerJSON{}, ctx.Err() + } + if err != nil { + return types.ContainerJSON{}, err + } + return *cs, nil +} + +// events issues a call to the events API and returns a channel with all +// events. The stream of events can be shutdown by cancelling the context. +func (c *containerAdapter) events(ctx context.Context) <-chan events.Message { + log.G(ctx).Debugf("waiting on events") + buffer, l := c.backend.SubscribeToEvents(time.Time{}, time.Time{}, c.container.eventFilter()) + eventsq := make(chan events.Message, len(buffer)) + + for _, event := range buffer { + eventsq <- event + } + + go func() { + defer c.backend.UnsubscribeFromEvents(l) + + for { + select { + case ev := <-l: + jev, ok := ev.(events.Message) + if !ok { + log.G(ctx).Warnf("unexpected event message: %q", ev) + continue + } + select { + case eventsq <- jev: + case <-ctx.Done(): + return + } + case <-ctx.Done(): + return + } + } + }() + + return eventsq +} + +func (c *containerAdapter) wait(ctx context.Context) (<-chan containerpkg.StateStatus, error) { + return c.backend.ContainerWait(ctx, c.container.nameOrID(), containerpkg.WaitConditionNotRunning) +} + +func (c *containerAdapter) shutdown(ctx context.Context) error { + // Default stop grace period to nil (daemon will use the stopTimeout of the container) + var stopgrace *int + spec := c.container.spec() + if spec.StopGracePeriod != nil { + stopgraceValue := int(spec.StopGracePeriod.Seconds) + stopgrace = &stopgraceValue + } + return c.backend.ContainerStop(c.container.name(), stopgrace) +} + +func (c *containerAdapter) terminate(ctx context.Context) error { + return c.backend.ContainerKill(c.container.name(), uint64(syscall.SIGKILL)) +} + +func (c *containerAdapter) remove(ctx context.Context) error { + return c.backend.ContainerRm(c.container.name(), &types.ContainerRmConfig{ + RemoveVolume: true, + ForceRemove: true, + }) +} + +func (c *containerAdapter) createVolumes(ctx context.Context) error { + // Create plugin volumes that are embedded inside a Mount + for _, mount := range c.container.task.Spec.GetContainer().Mounts { + if mount.Type != api.MountTypeVolume { + continue + } + + if mount.VolumeOptions == nil { + continue + } + + if mount.VolumeOptions.DriverConfig == nil { + continue + } + + req := c.container.volumeCreateRequest(&mount) + + // Check if this volume exists on the engine + if _, err := c.backend.VolumeCreate(req.Name, req.Driver, req.DriverOpts, req.Labels); err != nil { + // TODO(amitshukla): Today, volume create through the engine api does not return an error + // when the named volume with the same parameters already exists. + // It returns an error if the driver name is different - that is a valid error + return err + } + + } + + return nil +} + +func (c *containerAdapter) activateServiceBinding() error { + return c.backend.ActivateContainerServiceBinding(c.container.name()) +} + +func (c *containerAdapter) deactivateServiceBinding() error { + return c.backend.DeactivateContainerServiceBinding(c.container.name()) +} + +func (c *containerAdapter) logs(ctx context.Context, options api.LogSubscriptionOptions) (<-chan *backend.LogMessage, error) { + apiOptions := &types.ContainerLogsOptions{ + Follow: options.Follow, + + // Always say yes to Timestamps and Details. we make the decision + // of whether to return these to the user or not way higher up the + // stack. + Timestamps: true, + Details: true, + } + + if options.Since != nil { + since, err := gogotypes.TimestampFromProto(options.Since) + if err != nil { + return nil, err + } + // print since as this formatted string because the docker container + // logs interface expects it like this. + // see github.com/docker/docker/api/types/time.ParseTimestamps + apiOptions.Since = fmt.Sprintf("%d.%09d", since.Unix(), int64(since.Nanosecond())) + } + + if options.Tail < 0 { + // See protobuf documentation for details of how this works. + apiOptions.Tail = fmt.Sprint(-options.Tail - 1) + } else if options.Tail > 0 { + return nil, errors.New("tail relative to start of logs not supported via docker API") + } + + if len(options.Streams) == 0 { + // empty == all + apiOptions.ShowStdout, apiOptions.ShowStderr = true, true + } else { + for _, stream := range options.Streams { + switch stream { + case api.LogStreamStdout: + apiOptions.ShowStdout = true + case api.LogStreamStderr: + apiOptions.ShowStderr = true + } + } + } + msgs, err := c.backend.ContainerLogs(ctx, c.container.name(), apiOptions) + if err != nil { + return nil, err + } + return msgs, nil +} + +// todo: typed/wrapped errors +func isContainerCreateNameConflict(err error) bool { + return strings.Contains(err.Error(), "Conflict. The name") +} + +func isUnknownContainer(err error) bool { + return strings.Contains(err.Error(), "No such container:") +} + +func isStoppedContainer(err error) bool { + return strings.Contains(err.Error(), "is already stopped") +} diff --git a/vendor/github.com/moby/moby/daemon/cluster/executor/container/attachment.go b/vendor/github.com/moby/moby/daemon/cluster/executor/container/attachment.go new file mode 100644 index 000000000..54f95a1fb --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/cluster/executor/container/attachment.go @@ -0,0 +1,81 @@ +package container + +import ( + executorpkg "github.com/docker/docker/daemon/cluster/executor" + "github.com/docker/swarmkit/agent/exec" + "github.com/docker/swarmkit/api" + "golang.org/x/net/context" +) + +// networkAttacherController implements agent.Controller against docker's API. +// +// networkAttacherController manages the lifecycle of network +// attachment of a docker unmanaged container managed as a task from +// agent point of view. It provides network attachment information to +// the unmanaged container for it to attach to the network and run. +type networkAttacherController struct { + backend executorpkg.Backend + task *api.Task + adapter *containerAdapter + closed chan struct{} +} + +func newNetworkAttacherController(b executorpkg.Backend, task *api.Task, dependencies exec.DependencyGetter) (*networkAttacherController, error) { + adapter, err := newContainerAdapter(b, task, dependencies) + if err != nil { + return nil, err + } + + return &networkAttacherController{ + backend: b, + task: task, + adapter: adapter, + closed: make(chan struct{}), + }, nil +} + +func (nc *networkAttacherController) Update(ctx context.Context, t *api.Task) error { + return nil +} + +func (nc *networkAttacherController) Prepare(ctx context.Context) error { + // Make sure all the networks that the task needs are created. + if err := nc.adapter.createNetworks(ctx); err != nil { + return err + } + + return nil +} + +func (nc *networkAttacherController) Start(ctx context.Context) error { + return nc.adapter.networkAttach(ctx) +} + +func (nc *networkAttacherController) Wait(pctx context.Context) error { + ctx, cancel := context.WithCancel(pctx) + defer cancel() + + return nc.adapter.waitForDetach(ctx) +} + +func (nc *networkAttacherController) Shutdown(ctx context.Context) error { + return nil +} + +func (nc *networkAttacherController) Terminate(ctx context.Context) error { + return nil +} + +func (nc *networkAttacherController) Remove(ctx context.Context) error { + // Try removing the network referenced in this task in case this + // task is the last one referencing it + if err := nc.adapter.removeNetworks(ctx); err != nil { + return err + } + + return nil +} + +func (nc *networkAttacherController) Close() error { + return nil +} diff --git a/vendor/github.com/moby/moby/daemon/cluster/executor/container/container.go b/vendor/github.com/moby/moby/daemon/cluster/executor/container/container.go new file mode 100644 index 000000000..3ca7b5dce --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/cluster/executor/container/container.go @@ -0,0 +1,679 @@ +package container + +import ( + "errors" + "fmt" + "net" + "strconv" + "strings" + "time" + + "github.com/Sirupsen/logrus" + + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" + enginecontainer "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/events" + "github.com/docker/docker/api/types/filters" + enginemount "github.com/docker/docker/api/types/mount" + "github.com/docker/docker/api/types/network" + volumetypes "github.com/docker/docker/api/types/volume" + "github.com/docker/docker/daemon/cluster/convert" + executorpkg "github.com/docker/docker/daemon/cluster/executor" + clustertypes "github.com/docker/docker/daemon/cluster/provider" + "github.com/docker/go-connections/nat" + netconst "github.com/docker/libnetwork/datastore" + "github.com/docker/swarmkit/agent/exec" + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/api/genericresource" + "github.com/docker/swarmkit/template" + gogotypes "github.com/gogo/protobuf/types" +) + +const ( + // Explicitly use the kernel's default setting for CPU quota of 100ms. + // https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt + cpuQuotaPeriod = 100 * time.Millisecond + + // systemLabelPrefix represents the reserved namespace for system labels. + systemLabelPrefix = "com.docker.swarm" +) + +// containerConfig converts task properties into docker container compatible +// components. +type containerConfig struct { + task *api.Task + networksAttachments map[string]*api.NetworkAttachment +} + +// newContainerConfig returns a validated container config. No methods should +// return an error if this function returns without error. +func newContainerConfig(t *api.Task) (*containerConfig, error) { + var c containerConfig + return &c, c.setTask(t) +} + +func (c *containerConfig) setTask(t *api.Task) error { + if t.Spec.GetContainer() == nil && t.Spec.GetAttachment() == nil { + return exec.ErrRuntimeUnsupported + } + + container := t.Spec.GetContainer() + if container != nil { + if container.Image == "" { + return ErrImageRequired + } + + if err := validateMounts(container.Mounts); err != nil { + return err + } + } + + // index the networks by name + c.networksAttachments = make(map[string]*api.NetworkAttachment, len(t.Networks)) + for _, attachment := range t.Networks { + c.networksAttachments[attachment.Network.Spec.Annotations.Name] = attachment + } + + c.task = t + + if t.Spec.GetContainer() != nil { + preparedSpec, err := template.ExpandContainerSpec(nil, t) + if err != nil { + return err + } + c.task.Spec.Runtime = &api.TaskSpec_Container{ + Container: preparedSpec, + } + } + + return nil +} + +func (c *containerConfig) networkAttachmentContainerID() string { + attachment := c.task.Spec.GetAttachment() + if attachment == nil { + return "" + } + + return attachment.ContainerID +} + +func (c *containerConfig) taskID() string { + return c.task.ID +} + +func (c *containerConfig) endpoint() *api.Endpoint { + return c.task.Endpoint +} + +func (c *containerConfig) spec() *api.ContainerSpec { + return c.task.Spec.GetContainer() +} + +func (c *containerConfig) nameOrID() string { + if c.task.Spec.GetContainer() != nil { + return c.name() + } + + return c.networkAttachmentContainerID() +} + +func (c *containerConfig) name() string { + if c.task.Annotations.Name != "" { + // if set, use the container Annotations.Name field, set in the orchestrator. + return c.task.Annotations.Name + } + + slot := fmt.Sprint(c.task.Slot) + if slot == "" || c.task.Slot == 0 { + slot = c.task.NodeID + } + + // fallback to service.slot.id. + return fmt.Sprintf("%s.%s.%s", c.task.ServiceAnnotations.Name, slot, c.task.ID) +} + +func (c *containerConfig) image() string { + raw := c.spec().Image + ref, err := reference.ParseNormalizedNamed(raw) + if err != nil { + return raw + } + return reference.FamiliarString(reference.TagNameOnly(ref)) +} + +func (c *containerConfig) portBindings() nat.PortMap { + portBindings := nat.PortMap{} + if c.task.Endpoint == nil { + return portBindings + } + + for _, portConfig := range c.task.Endpoint.Ports { + if portConfig.PublishMode != api.PublishModeHost { + continue + } + + port := nat.Port(fmt.Sprintf("%d/%s", portConfig.TargetPort, strings.ToLower(portConfig.Protocol.String()))) + binding := []nat.PortBinding{ + {}, + } + + if portConfig.PublishedPort != 0 { + binding[0].HostPort = strconv.Itoa(int(portConfig.PublishedPort)) + } + portBindings[port] = binding + } + + return portBindings +} + +func (c *containerConfig) exposedPorts() map[nat.Port]struct{} { + exposedPorts := make(map[nat.Port]struct{}) + if c.task.Endpoint == nil { + return exposedPorts + } + + for _, portConfig := range c.task.Endpoint.Ports { + if portConfig.PublishMode != api.PublishModeHost { + continue + } + + port := nat.Port(fmt.Sprintf("%d/%s", portConfig.TargetPort, strings.ToLower(portConfig.Protocol.String()))) + exposedPorts[port] = struct{}{} + } + + return exposedPorts +} + +func (c *containerConfig) config() *enginecontainer.Config { + genericEnvs := genericresource.EnvFormat(c.task.AssignedGenericResources, "DOCKER_RESOURCE") + env := append(c.spec().Env, genericEnvs...) + + config := &enginecontainer.Config{ + Labels: c.labels(), + StopSignal: c.spec().StopSignal, + Tty: c.spec().TTY, + OpenStdin: c.spec().OpenStdin, + User: c.spec().User, + Env: env, + Hostname: c.spec().Hostname, + WorkingDir: c.spec().Dir, + Image: c.image(), + ExposedPorts: c.exposedPorts(), + Healthcheck: c.healthcheck(), + } + + if len(c.spec().Command) > 0 { + // If Command is provided, we replace the whole invocation with Command + // by replacing Entrypoint and specifying Cmd. Args is ignored in this + // case. + config.Entrypoint = append(config.Entrypoint, c.spec().Command...) + config.Cmd = append(config.Cmd, c.spec().Args...) + } else if len(c.spec().Args) > 0 { + // In this case, we assume the image has an Entrypoint and Args + // specifies the arguments for that entrypoint. + config.Cmd = c.spec().Args + } + + return config +} + +func (c *containerConfig) labels() map[string]string { + var ( + system = map[string]string{ + "task": "", // mark as cluster task + "task.id": c.task.ID, + "task.name": c.name(), + "node.id": c.task.NodeID, + "service.id": c.task.ServiceID, + "service.name": c.task.ServiceAnnotations.Name, + } + labels = make(map[string]string) + ) + + // base labels are those defined in the spec. + for k, v := range c.spec().Labels { + labels[k] = v + } + + // we then apply the overrides from the task, which may be set via the + // orchestrator. + for k, v := range c.task.Annotations.Labels { + labels[k] = v + } + + // finally, we apply the system labels, which override all labels. + for k, v := range system { + labels[strings.Join([]string{systemLabelPrefix, k}, ".")] = v + } + + return labels +} + +func (c *containerConfig) mounts() []enginemount.Mount { + var r []enginemount.Mount + for _, mount := range c.spec().Mounts { + r = append(r, convertMount(mount)) + } + return r +} + +func convertMount(m api.Mount) enginemount.Mount { + mount := enginemount.Mount{ + Source: m.Source, + Target: m.Target, + ReadOnly: m.ReadOnly, + } + + switch m.Type { + case api.MountTypeBind: + mount.Type = enginemount.TypeBind + case api.MountTypeVolume: + mount.Type = enginemount.TypeVolume + case api.MountTypeTmpfs: + mount.Type = enginemount.TypeTmpfs + } + + if m.BindOptions != nil { + mount.BindOptions = &enginemount.BindOptions{} + switch m.BindOptions.Propagation { + case api.MountPropagationRPrivate: + mount.BindOptions.Propagation = enginemount.PropagationRPrivate + case api.MountPropagationPrivate: + mount.BindOptions.Propagation = enginemount.PropagationPrivate + case api.MountPropagationRSlave: + mount.BindOptions.Propagation = enginemount.PropagationRSlave + case api.MountPropagationSlave: + mount.BindOptions.Propagation = enginemount.PropagationSlave + case api.MountPropagationRShared: + mount.BindOptions.Propagation = enginemount.PropagationRShared + case api.MountPropagationShared: + mount.BindOptions.Propagation = enginemount.PropagationShared + } + } + + if m.VolumeOptions != nil { + mount.VolumeOptions = &enginemount.VolumeOptions{ + NoCopy: m.VolumeOptions.NoCopy, + } + if m.VolumeOptions.Labels != nil { + mount.VolumeOptions.Labels = make(map[string]string, len(m.VolumeOptions.Labels)) + for k, v := range m.VolumeOptions.Labels { + mount.VolumeOptions.Labels[k] = v + } + } + if m.VolumeOptions.DriverConfig != nil { + mount.VolumeOptions.DriverConfig = &enginemount.Driver{ + Name: m.VolumeOptions.DriverConfig.Name, + } + if m.VolumeOptions.DriverConfig.Options != nil { + mount.VolumeOptions.DriverConfig.Options = make(map[string]string, len(m.VolumeOptions.DriverConfig.Options)) + for k, v := range m.VolumeOptions.DriverConfig.Options { + mount.VolumeOptions.DriverConfig.Options[k] = v + } + } + } + } + + if m.TmpfsOptions != nil { + mount.TmpfsOptions = &enginemount.TmpfsOptions{ + SizeBytes: m.TmpfsOptions.SizeBytes, + Mode: m.TmpfsOptions.Mode, + } + } + + return mount +} + +func (c *containerConfig) healthcheck() *enginecontainer.HealthConfig { + hcSpec := c.spec().Healthcheck + if hcSpec == nil { + return nil + } + interval, _ := gogotypes.DurationFromProto(hcSpec.Interval) + timeout, _ := gogotypes.DurationFromProto(hcSpec.Timeout) + startPeriod, _ := gogotypes.DurationFromProto(hcSpec.StartPeriod) + return &enginecontainer.HealthConfig{ + Test: hcSpec.Test, + Interval: interval, + Timeout: timeout, + Retries: int(hcSpec.Retries), + StartPeriod: startPeriod, + } +} + +func (c *containerConfig) hostConfig() *enginecontainer.HostConfig { + hc := &enginecontainer.HostConfig{ + Resources: c.resources(), + GroupAdd: c.spec().Groups, + PortBindings: c.portBindings(), + Mounts: c.mounts(), + ReadonlyRootfs: c.spec().ReadOnly, + } + + if c.spec().DNSConfig != nil { + hc.DNS = c.spec().DNSConfig.Nameservers + hc.DNSSearch = c.spec().DNSConfig.Search + hc.DNSOptions = c.spec().DNSConfig.Options + } + + c.applyPrivileges(hc) + + // The format of extra hosts on swarmkit is specified in: + // http://man7.org/linux/man-pages/man5/hosts.5.html + // IP_address canonical_hostname [aliases...] + // However, the format of ExtraHosts in HostConfig is + // : + // We need to do the conversion here + // (Alias is ignored for now) + for _, entry := range c.spec().Hosts { + parts := strings.Fields(entry) + if len(parts) > 1 { + hc.ExtraHosts = append(hc.ExtraHosts, fmt.Sprintf("%s:%s", parts[1], parts[0])) + } + } + + if c.task.LogDriver != nil { + hc.LogConfig = enginecontainer.LogConfig{ + Type: c.task.LogDriver.Name, + Config: c.task.LogDriver.Options, + } + } + + if len(c.task.Networks) > 0 { + labels := c.task.Networks[0].Network.Spec.Annotations.Labels + name := c.task.Networks[0].Network.Spec.Annotations.Name + if v, ok := labels["com.docker.swarm.predefined"]; ok && v == "true" { + hc.NetworkMode = enginecontainer.NetworkMode(name) + } + } + + return hc +} + +// This handles the case of volumes that are defined inside a service Mount +func (c *containerConfig) volumeCreateRequest(mount *api.Mount) *volumetypes.VolumesCreateBody { + var ( + driverName string + driverOpts map[string]string + labels map[string]string + ) + + if mount.VolumeOptions != nil && mount.VolumeOptions.DriverConfig != nil { + driverName = mount.VolumeOptions.DriverConfig.Name + driverOpts = mount.VolumeOptions.DriverConfig.Options + labels = mount.VolumeOptions.Labels + } + + if mount.VolumeOptions != nil { + return &volumetypes.VolumesCreateBody{ + Name: mount.Source, + Driver: driverName, + DriverOpts: driverOpts, + Labels: labels, + } + } + return nil +} + +func (c *containerConfig) resources() enginecontainer.Resources { + resources := enginecontainer.Resources{} + + // If no limits are specified let the engine use its defaults. + // + // TODO(aluzzardi): We might want to set some limits anyway otherwise + // "unlimited" tasks will step over the reservation of other tasks. + r := c.task.Spec.Resources + if r == nil || r.Limits == nil { + return resources + } + + if r.Limits.MemoryBytes > 0 { + resources.Memory = r.Limits.MemoryBytes + } + + if r.Limits.NanoCPUs > 0 { + // CPU Period must be set in microseconds. + resources.CPUPeriod = int64(cpuQuotaPeriod / time.Microsecond) + resources.CPUQuota = r.Limits.NanoCPUs * resources.CPUPeriod / 1e9 + } + + return resources +} + +// Docker daemon supports just 1 network during container create. +func (c *containerConfig) createNetworkingConfig(b executorpkg.Backend) *network.NetworkingConfig { + var networks []*api.NetworkAttachment + if c.task.Spec.GetContainer() != nil || c.task.Spec.GetAttachment() != nil { + networks = c.task.Networks + } + + epConfig := make(map[string]*network.EndpointSettings) + if len(networks) > 0 { + epConfig[networks[0].Network.Spec.Annotations.Name] = getEndpointConfig(networks[0], b) + } + + return &network.NetworkingConfig{EndpointsConfig: epConfig} +} + +// TODO: Merge this function with createNetworkingConfig after daemon supports multiple networks in container create +func (c *containerConfig) connectNetworkingConfig(b executorpkg.Backend) *network.NetworkingConfig { + var networks []*api.NetworkAttachment + if c.task.Spec.GetContainer() != nil { + networks = c.task.Networks + } + // First network is used during container create. Other networks are used in "docker network connect" + if len(networks) < 2 { + return nil + } + + epConfig := make(map[string]*network.EndpointSettings) + for _, na := range networks[1:] { + epConfig[na.Network.Spec.Annotations.Name] = getEndpointConfig(na, b) + } + return &network.NetworkingConfig{EndpointsConfig: epConfig} +} + +func getEndpointConfig(na *api.NetworkAttachment, b executorpkg.Backend) *network.EndpointSettings { + var ipv4, ipv6 string + for _, addr := range na.Addresses { + ip, _, err := net.ParseCIDR(addr) + if err != nil { + continue + } + + if ip.To4() != nil { + ipv4 = ip.String() + continue + } + + if ip.To16() != nil { + ipv6 = ip.String() + } + } + + n := &network.EndpointSettings{ + NetworkID: na.Network.ID, + IPAMConfig: &network.EndpointIPAMConfig{ + IPv4Address: ipv4, + IPv6Address: ipv6, + }, + DriverOpts: na.DriverAttachmentOpts, + } + if v, ok := na.Network.Spec.Annotations.Labels["com.docker.swarm.predefined"]; ok && v == "true" { + if ln, err := b.FindNetwork(na.Network.Spec.Annotations.Name); err == nil { + n.NetworkID = ln.ID() + } + } + return n +} + +func (c *containerConfig) virtualIP(networkID string) string { + if c.task.Endpoint == nil { + return "" + } + + for _, eVip := range c.task.Endpoint.VirtualIPs { + // We only support IPv4 VIPs for now. + if eVip.NetworkID == networkID { + vip, _, err := net.ParseCIDR(eVip.Addr) + if err != nil { + return "" + } + + return vip.String() + } + } + + return "" +} + +func (c *containerConfig) serviceConfig() *clustertypes.ServiceConfig { + if len(c.task.Networks) == 0 { + return nil + } + + logrus.Debugf("Creating service config in agent for t = %+v", c.task) + svcCfg := &clustertypes.ServiceConfig{ + Name: c.task.ServiceAnnotations.Name, + Aliases: make(map[string][]string), + ID: c.task.ServiceID, + VirtualAddresses: make(map[string]*clustertypes.VirtualAddress), + } + + for _, na := range c.task.Networks { + svcCfg.VirtualAddresses[na.Network.ID] = &clustertypes.VirtualAddress{ + // We support only IPv4 virtual IP for now. + IPv4: c.virtualIP(na.Network.ID), + } + if len(na.Aliases) > 0 { + svcCfg.Aliases[na.Network.ID] = na.Aliases + } + } + + if c.task.Endpoint != nil { + for _, ePort := range c.task.Endpoint.Ports { + if ePort.PublishMode != api.PublishModeIngress { + continue + } + + svcCfg.ExposedPorts = append(svcCfg.ExposedPorts, &clustertypes.PortConfig{ + Name: ePort.Name, + Protocol: int32(ePort.Protocol), + TargetPort: ePort.TargetPort, + PublishedPort: ePort.PublishedPort, + }) + } + } + + return svcCfg +} + +// networks returns a list of network names attached to the container. The +// returned name can be used to lookup the corresponding network create +// options. +func (c *containerConfig) networks() []string { + var networks []string + + for name := range c.networksAttachments { + networks = append(networks, name) + } + + return networks +} + +func (c *containerConfig) networkCreateRequest(name string) (clustertypes.NetworkCreateRequest, error) { + na, ok := c.networksAttachments[name] + if !ok { + return clustertypes.NetworkCreateRequest{}, errors.New("container: unknown network referenced") + } + + options := types.NetworkCreate{ + // ID: na.Network.ID, + Labels: na.Network.Spec.Annotations.Labels, + Internal: na.Network.Spec.Internal, + Attachable: na.Network.Spec.Attachable, + Ingress: convert.IsIngressNetwork(na.Network), + EnableIPv6: na.Network.Spec.Ipv6Enabled, + CheckDuplicate: true, + Scope: netconst.SwarmScope, + } + + if na.Network.Spec.GetNetwork() != "" { + options.ConfigFrom = &network.ConfigReference{ + Network: na.Network.Spec.GetNetwork(), + } + } + + if na.Network.DriverState != nil { + options.Driver = na.Network.DriverState.Name + options.Options = na.Network.DriverState.Options + } + if na.Network.IPAM != nil { + options.IPAM = &network.IPAM{ + Driver: na.Network.IPAM.Driver.Name, + Options: na.Network.IPAM.Driver.Options, + } + for _, ic := range na.Network.IPAM.Configs { + c := network.IPAMConfig{ + Subnet: ic.Subnet, + IPRange: ic.Range, + Gateway: ic.Gateway, + } + options.IPAM.Config = append(options.IPAM.Config, c) + } + } + + return clustertypes.NetworkCreateRequest{ + ID: na.Network.ID, + NetworkCreateRequest: types.NetworkCreateRequest{ + Name: name, + NetworkCreate: options, + }, + }, nil +} + +func (c *containerConfig) applyPrivileges(hc *enginecontainer.HostConfig) { + privileges := c.spec().Privileges + if privileges == nil { + return + } + + credentials := privileges.CredentialSpec + if credentials != nil { + switch credentials.Source.(type) { + case *api.Privileges_CredentialSpec_File: + hc.SecurityOpt = append(hc.SecurityOpt, "credentialspec=file://"+credentials.GetFile()) + case *api.Privileges_CredentialSpec_Registry: + hc.SecurityOpt = append(hc.SecurityOpt, "credentialspec=registry://"+credentials.GetRegistry()) + } + } + + selinux := privileges.SELinuxContext + if selinux != nil { + if selinux.Disable { + hc.SecurityOpt = append(hc.SecurityOpt, "label=disable") + } + if selinux.User != "" { + hc.SecurityOpt = append(hc.SecurityOpt, "label=user:"+selinux.User) + } + if selinux.Role != "" { + hc.SecurityOpt = append(hc.SecurityOpt, "label=role:"+selinux.Role) + } + if selinux.Level != "" { + hc.SecurityOpt = append(hc.SecurityOpt, "label=level:"+selinux.Level) + } + if selinux.Type != "" { + hc.SecurityOpt = append(hc.SecurityOpt, "label=type:"+selinux.Type) + } + } +} + +func (c containerConfig) eventFilter() filters.Args { + filter := filters.NewArgs() + filter.Add("type", events.ContainerEventType) + filter.Add("name", c.name()) + filter.Add("label", fmt.Sprintf("%v.task.id=%v", systemLabelPrefix, c.task.ID)) + return filter +} diff --git a/vendor/github.com/moby/moby/daemon/cluster/executor/container/controller.go b/vendor/github.com/moby/moby/daemon/cluster/executor/container/controller.go new file mode 100644 index 000000000..7fa4a864d --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/cluster/executor/container/controller.go @@ -0,0 +1,690 @@ +package container + +import ( + "fmt" + "os" + "strconv" + "strings" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/events" + executorpkg "github.com/docker/docker/daemon/cluster/executor" + "github.com/docker/go-connections/nat" + "github.com/docker/libnetwork" + "github.com/docker/swarmkit/agent/exec" + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/log" + gogotypes "github.com/gogo/protobuf/types" + "github.com/pkg/errors" + "golang.org/x/net/context" + "golang.org/x/time/rate" +) + +const defaultGossipConvergeDelay = 2 * time.Second + +// controller implements agent.Controller against docker's API. +// +// Most operations against docker's API are done through the container name, +// which is unique to the task. +type controller struct { + task *api.Task + adapter *containerAdapter + closed chan struct{} + err error + pulled chan struct{} // closed after pull + cancelPull func() // cancels pull context if not nil + pullErr error // pull error, only read after pulled closed +} + +var _ exec.Controller = &controller{} + +// NewController returns a docker exec runner for the provided task. +func newController(b executorpkg.Backend, task *api.Task, dependencies exec.DependencyGetter) (*controller, error) { + adapter, err := newContainerAdapter(b, task, dependencies) + if err != nil { + return nil, err + } + + return &controller{ + task: task, + adapter: adapter, + closed: make(chan struct{}), + }, nil +} + +func (r *controller) Task() (*api.Task, error) { + return r.task, nil +} + +// ContainerStatus returns the container-specific status for the task. +func (r *controller) ContainerStatus(ctx context.Context) (*api.ContainerStatus, error) { + ctnr, err := r.adapter.inspect(ctx) + if err != nil { + if isUnknownContainer(err) { + return nil, nil + } + return nil, err + } + return parseContainerStatus(ctnr) +} + +func (r *controller) PortStatus(ctx context.Context) (*api.PortStatus, error) { + ctnr, err := r.adapter.inspect(ctx) + if err != nil { + if isUnknownContainer(err) { + return nil, nil + } + + return nil, err + } + + return parsePortStatus(ctnr) +} + +// Update tasks a recent task update and applies it to the container. +func (r *controller) Update(ctx context.Context, t *api.Task) error { + // TODO(stevvooe): While assignment of tasks is idempotent, we do allow + // updates of metadata, such as labelling, as well as any other properties + // that make sense. + return nil +} + +// Prepare creates a container and ensures the image is pulled. +// +// If the container has already be created, exec.ErrTaskPrepared is returned. +func (r *controller) Prepare(ctx context.Context) error { + if err := r.checkClosed(); err != nil { + return err + } + + // Make sure all the networks that the task needs are created. + if err := r.adapter.createNetworks(ctx); err != nil { + return err + } + + // Make sure all the volumes that the task needs are created. + if err := r.adapter.createVolumes(ctx); err != nil { + return err + } + + if os.Getenv("DOCKER_SERVICE_PREFER_OFFLINE_IMAGE") != "1" { + if r.pulled == nil { + // Fork the pull to a different context to allow pull to continue + // on re-entrant calls to Prepare. This ensures that Prepare can be + // idempotent and not incur the extra cost of pulling when + // cancelled on updates. + var pctx context.Context + + r.pulled = make(chan struct{}) + pctx, r.cancelPull = context.WithCancel(context.Background()) // TODO(stevvooe): Bind a context to the entire controller. + + go func() { + defer close(r.pulled) + r.pullErr = r.adapter.pullImage(pctx) // protected by closing r.pulled + }() + } + + select { + case <-ctx.Done(): + return ctx.Err() + case <-r.pulled: + if r.pullErr != nil { + // NOTE(stevvooe): We always try to pull the image to make sure we have + // the most up to date version. This will return an error, but we only + // log it. If the image truly doesn't exist, the create below will + // error out. + // + // This gives us some nice behavior where we use up to date versions of + // mutable tags, but will still run if the old image is available but a + // registry is down. + // + // If you don't want this behavior, lock down your image to an + // immutable tag or digest. + log.G(ctx).WithError(r.pullErr).Error("pulling image failed") + } + } + } + if err := r.adapter.create(ctx); err != nil { + if isContainerCreateNameConflict(err) { + if _, err := r.adapter.inspect(ctx); err != nil { + return err + } + + // container is already created. success! + return exec.ErrTaskPrepared + } + + return err + } + + return nil +} + +// Start the container. An error will be returned if the container is already started. +func (r *controller) Start(ctx context.Context) error { + if err := r.checkClosed(); err != nil { + return err + } + + ctnr, err := r.adapter.inspect(ctx) + if err != nil { + return err + } + + // Detect whether the container has *ever* been started. If so, we don't + // issue the start. + // + // TODO(stevvooe): This is very racy. While reading inspect, another could + // start the process and we could end up starting it twice. + if ctnr.State.Status != "created" { + return exec.ErrTaskStarted + } + + for { + if err := r.adapter.start(ctx); err != nil { + if _, ok := err.(libnetwork.ErrNoSuchNetwork); ok { + // Retry network creation again if we + // failed because some of the networks + // were not found. + if err := r.adapter.createNetworks(ctx); err != nil { + return err + } + + continue + } + + return errors.Wrap(err, "starting container failed") + } + + break + } + + // no health check + if ctnr.Config == nil || ctnr.Config.Healthcheck == nil || len(ctnr.Config.Healthcheck.Test) == 0 || ctnr.Config.Healthcheck.Test[0] == "NONE" { + if err := r.adapter.activateServiceBinding(); err != nil { + log.G(ctx).WithError(err).Errorf("failed to activate service binding for container %s which has no healthcheck config", r.adapter.container.name()) + return err + } + return nil + } + + // wait for container to be healthy + eventq := r.adapter.events(ctx) + + var healthErr error + for { + select { + case event := <-eventq: + if !r.matchevent(event) { + continue + } + + switch event.Action { + case "die": // exit on terminal events + ctnr, err := r.adapter.inspect(ctx) + if err != nil { + return errors.Wrap(err, "die event received") + } else if ctnr.State.ExitCode != 0 { + return &exitError{code: ctnr.State.ExitCode, cause: healthErr} + } + + return nil + case "destroy": + // If we get here, something has gone wrong but we want to exit + // and report anyways. + return ErrContainerDestroyed + case "health_status: unhealthy": + // in this case, we stop the container and report unhealthy status + if err := r.Shutdown(ctx); err != nil { + return errors.Wrap(err, "unhealthy container shutdown failed") + } + // set health check error, and wait for container to fully exit ("die" event) + healthErr = ErrContainerUnhealthy + case "health_status: healthy": + if err := r.adapter.activateServiceBinding(); err != nil { + log.G(ctx).WithError(err).Errorf("failed to activate service binding for container %s after healthy event", r.adapter.container.name()) + return err + } + return nil + } + case <-ctx.Done(): + return ctx.Err() + case <-r.closed: + return r.err + } + } +} + +// Wait on the container to exit. +func (r *controller) Wait(pctx context.Context) error { + if err := r.checkClosed(); err != nil { + return err + } + + ctx, cancel := context.WithCancel(pctx) + defer cancel() + + healthErr := make(chan error, 1) + go func() { + ectx, cancel := context.WithCancel(ctx) // cancel event context on first event + defer cancel() + if err := r.checkHealth(ectx); err == ErrContainerUnhealthy { + healthErr <- ErrContainerUnhealthy + if err := r.Shutdown(ectx); err != nil { + log.G(ectx).WithError(err).Debug("shutdown failed on unhealthy") + } + } + }() + + waitC, err := r.adapter.wait(ctx) + if err != nil { + return err + } + + if status := <-waitC; status.ExitCode() != 0 { + exitErr := &exitError{ + code: status.ExitCode(), + } + + // Set the cause if it is knowable. + select { + case e := <-healthErr: + exitErr.cause = e + default: + if status.Err() != nil { + exitErr.cause = status.Err() + } + } + + return exitErr + } + + return nil +} + +func (r *controller) hasServiceBinding() bool { + if r.task == nil { + return false + } + + // service is attached to a network besides the default bridge + for _, na := range r.task.Networks { + if na.Network == nil || + na.Network.DriverState == nil || + na.Network.DriverState.Name == "bridge" && na.Network.Spec.Annotations.Name == "bridge" { + continue + } + return true + } + + return false +} + +// Shutdown the container cleanly. +func (r *controller) Shutdown(ctx context.Context) error { + if err := r.checkClosed(); err != nil { + return err + } + + if r.cancelPull != nil { + r.cancelPull() + } + + if r.hasServiceBinding() { + // remove container from service binding + if err := r.adapter.deactivateServiceBinding(); err != nil { + log.G(ctx).WithError(err).Warningf("failed to deactivate service binding for container %s", r.adapter.container.name()) + // Don't return an error here, because failure to deactivate + // the service binding is expected if the container was never + // started. + } + + // add a delay for gossip converge + // TODO(dongluochen): this delay should be configurable to fit different cluster size and network delay. + time.Sleep(defaultGossipConvergeDelay) + } + + if err := r.adapter.shutdown(ctx); err != nil { + if isUnknownContainer(err) || isStoppedContainer(err) { + return nil + } + + return err + } + + return nil +} + +// Terminate the container, with force. +func (r *controller) Terminate(ctx context.Context) error { + if err := r.checkClosed(); err != nil { + return err + } + + if r.cancelPull != nil { + r.cancelPull() + } + + if err := r.adapter.terminate(ctx); err != nil { + if isUnknownContainer(err) { + return nil + } + + return err + } + + return nil +} + +// Remove the container and its resources. +func (r *controller) Remove(ctx context.Context) error { + if err := r.checkClosed(); err != nil { + return err + } + + if r.cancelPull != nil { + r.cancelPull() + } + + // It may be necessary to shut down the task before removing it. + if err := r.Shutdown(ctx); err != nil { + if isUnknownContainer(err) { + return nil + } + // This may fail if the task was already shut down. + log.G(ctx).WithError(err).Debug("shutdown failed on removal") + } + + // Try removing networks referenced in this task in case this + // task is the last one referencing it + if err := r.adapter.removeNetworks(ctx); err != nil { + if isUnknownContainer(err) { + return nil + } + return err + } + + if err := r.adapter.remove(ctx); err != nil { + if isUnknownContainer(err) { + return nil + } + + return err + } + return nil +} + +// waitReady waits for a container to be "ready". +// Ready means it's past the started state. +func (r *controller) waitReady(pctx context.Context) error { + if err := r.checkClosed(); err != nil { + return err + } + + ctx, cancel := context.WithCancel(pctx) + defer cancel() + + eventq := r.adapter.events(ctx) + + ctnr, err := r.adapter.inspect(ctx) + if err != nil { + if !isUnknownContainer(err) { + return errors.Wrap(err, "inspect container failed") + } + } else { + switch ctnr.State.Status { + case "running", "exited", "dead": + return nil + } + } + + for { + select { + case event := <-eventq: + if !r.matchevent(event) { + continue + } + + switch event.Action { + case "start": + return nil + } + case <-ctx.Done(): + return ctx.Err() + case <-r.closed: + return r.err + } + } +} + +func (r *controller) Logs(ctx context.Context, publisher exec.LogPublisher, options api.LogSubscriptionOptions) error { + if err := r.checkClosed(); err != nil { + return err + } + + // if we're following, wait for this container to be ready. there is a + // problem here: if the container will never be ready (for example, it has + // been totally deleted) then this will wait forever. however, this doesn't + // actually cause any UI issues, and shouldn't be a problem. the stuck wait + // will go away when the follow (context) is canceled. + if options.Follow { + if err := r.waitReady(ctx); err != nil { + return errors.Wrap(err, "container not ready for logs") + } + } + // if we're not following, we're not gonna wait for the container to be + // ready. just call logs. if the container isn't ready, the call will fail + // and return an error. no big deal, we don't care, we only want the logs + // we can get RIGHT NOW with no follow + + logsContext, cancel := context.WithCancel(ctx) + msgs, err := r.adapter.logs(logsContext, options) + defer cancel() + if err != nil { + return errors.Wrap(err, "failed getting container logs") + } + + var ( + // use a rate limiter to keep things under control but also provides some + // ability coalesce messages. + limiter = rate.NewLimiter(rate.Every(time.Second), 10<<20) // 10 MB/s + msgctx = api.LogContext{ + NodeID: r.task.NodeID, + ServiceID: r.task.ServiceID, + TaskID: r.task.ID, + } + ) + + for { + msg, ok := <-msgs + if !ok { + // we're done here, no more messages + return nil + } + + if msg.Err != nil { + // the defered cancel closes the adapter's log stream + return msg.Err + } + + // wait here for the limiter to catch up + if err := limiter.WaitN(ctx, len(msg.Line)); err != nil { + return errors.Wrap(err, "failed rate limiter") + } + tsp, err := gogotypes.TimestampProto(msg.Timestamp) + if err != nil { + return errors.Wrap(err, "failed to convert timestamp") + } + var stream api.LogStream + if msg.Source == "stdout" { + stream = api.LogStreamStdout + } else if msg.Source == "stderr" { + stream = api.LogStreamStderr + } + + // parse the details out of the Attrs map + var attrs []api.LogAttr + if len(msg.Attrs) != 0 { + attrs = make([]api.LogAttr, 0, len(msg.Attrs)) + for _, attr := range msg.Attrs { + attrs = append(attrs, api.LogAttr{Key: attr.Key, Value: attr.Value}) + } + } + + if err := publisher.Publish(ctx, api.LogMessage{ + Context: msgctx, + Timestamp: tsp, + Stream: stream, + Attrs: attrs, + Data: msg.Line, + }); err != nil { + return errors.Wrap(err, "failed to publish log message") + } + } +} + +// Close the runner and clean up any ephemeral resources. +func (r *controller) Close() error { + select { + case <-r.closed: + return r.err + default: + if r.cancelPull != nil { + r.cancelPull() + } + + r.err = exec.ErrControllerClosed + close(r.closed) + } + return nil +} + +func (r *controller) matchevent(event events.Message) bool { + if event.Type != events.ContainerEventType { + return false + } + // we can't filter using id since it will have huge chances to introduce a deadlock. see #33377. + return event.Actor.Attributes["name"] == r.adapter.container.name() +} + +func (r *controller) checkClosed() error { + select { + case <-r.closed: + return r.err + default: + return nil + } +} + +func parseContainerStatus(ctnr types.ContainerJSON) (*api.ContainerStatus, error) { + status := &api.ContainerStatus{ + ContainerID: ctnr.ID, + PID: int32(ctnr.State.Pid), + ExitCode: int32(ctnr.State.ExitCode), + } + + return status, nil +} + +func parsePortStatus(ctnr types.ContainerJSON) (*api.PortStatus, error) { + status := &api.PortStatus{} + + if ctnr.NetworkSettings != nil && len(ctnr.NetworkSettings.Ports) > 0 { + exposedPorts, err := parsePortMap(ctnr.NetworkSettings.Ports) + if err != nil { + return nil, err + } + status.Ports = exposedPorts + } + + return status, nil +} + +func parsePortMap(portMap nat.PortMap) ([]*api.PortConfig, error) { + exposedPorts := make([]*api.PortConfig, 0, len(portMap)) + + for portProtocol, mapping := range portMap { + parts := strings.SplitN(string(portProtocol), "/", 2) + if len(parts) != 2 { + return nil, fmt.Errorf("invalid port mapping: %s", portProtocol) + } + + port, err := strconv.ParseUint(parts[0], 10, 16) + if err != nil { + return nil, err + } + + protocol := api.ProtocolTCP + switch strings.ToLower(parts[1]) { + case "tcp": + protocol = api.ProtocolTCP + case "udp": + protocol = api.ProtocolUDP + default: + return nil, fmt.Errorf("invalid protocol: %s", parts[1]) + } + + for _, binding := range mapping { + hostPort, err := strconv.ParseUint(binding.HostPort, 10, 16) + if err != nil { + return nil, err + } + + // TODO(aluzzardi): We're losing the port `name` here since + // there's no way to retrieve it back from the Engine. + exposedPorts = append(exposedPorts, &api.PortConfig{ + PublishMode: api.PublishModeHost, + Protocol: protocol, + TargetPort: uint32(port), + PublishedPort: uint32(hostPort), + }) + } + } + + return exposedPorts, nil +} + +type exitError struct { + code int + cause error +} + +func (e *exitError) Error() string { + if e.cause != nil { + return fmt.Sprintf("task: non-zero exit (%v): %v", e.code, e.cause) + } + + return fmt.Sprintf("task: non-zero exit (%v)", e.code) +} + +func (e *exitError) ExitCode() int { + return int(e.code) +} + +func (e *exitError) Cause() error { + return e.cause +} + +// checkHealth blocks until unhealthy container is detected or ctx exits +func (r *controller) checkHealth(ctx context.Context) error { + eventq := r.adapter.events(ctx) + + for { + select { + case <-ctx.Done(): + return nil + case <-r.closed: + return nil + case event := <-eventq: + if !r.matchevent(event) { + continue + } + + switch event.Action { + case "health_status: unhealthy": + return ErrContainerUnhealthy + } + } + } +} diff --git a/vendor/github.com/moby/moby/daemon/cluster/executor/container/errors.go b/vendor/github.com/moby/moby/daemon/cluster/executor/container/errors.go new file mode 100644 index 000000000..535d9b570 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/cluster/executor/container/errors.go @@ -0,0 +1,17 @@ +package container + +import ( + "errors" +) + +var ( + // ErrImageRequired returned if a task is missing the image definition. + ErrImageRequired = errors.New("dockerexec: image required") + + // ErrContainerDestroyed returned when a container is prematurely destroyed + // during a wait call. + ErrContainerDestroyed = errors.New("dockerexec: container destroyed") + + // ErrContainerUnhealthy returned if controller detects the health check failure + ErrContainerUnhealthy = errors.New("dockerexec: unhealthy container") +) diff --git a/vendor/github.com/moby/moby/daemon/cluster/executor/container/executor.go b/vendor/github.com/moby/moby/daemon/cluster/executor/container/executor.go new file mode 100644 index 000000000..f6fb6e55b --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/cluster/executor/container/executor.go @@ -0,0 +1,245 @@ +package container + +import ( + "fmt" + "sort" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/network" + swarmtypes "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/daemon/cluster/controllers/plugin" + "github.com/docker/docker/daemon/cluster/convert" + executorpkg "github.com/docker/docker/daemon/cluster/executor" + clustertypes "github.com/docker/docker/daemon/cluster/provider" + networktypes "github.com/docker/libnetwork/types" + "github.com/docker/swarmkit/agent" + "github.com/docker/swarmkit/agent/exec" + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/api/naming" + "golang.org/x/net/context" +) + +type executor struct { + backend executorpkg.Backend + pluginBackend plugin.Backend + dependencies exec.DependencyManager +} + +// NewExecutor returns an executor from the docker client. +func NewExecutor(b executorpkg.Backend, p plugin.Backend) exec.Executor { + return &executor{ + backend: b, + pluginBackend: p, + dependencies: agent.NewDependencyManager(), + } +} + +// Describe returns the underlying node description from the docker client. +func (e *executor) Describe(ctx context.Context) (*api.NodeDescription, error) { + info, err := e.backend.SystemInfo() + if err != nil { + return nil, err + } + + plugins := map[api.PluginDescription]struct{}{} + addPlugins := func(typ string, names []string) { + for _, name := range names { + plugins[api.PluginDescription{ + Type: typ, + Name: name, + }] = struct{}{} + } + } + + // add v1 plugins + addPlugins("Volume", info.Plugins.Volume) + // Add builtin driver "overlay" (the only builtin multi-host driver) to + // the plugin list by default. + addPlugins("Network", append([]string{"overlay"}, info.Plugins.Network...)) + addPlugins("Authorization", info.Plugins.Authorization) + addPlugins("Log", info.Plugins.Log) + + // add v2 plugins + v2Plugins, err := e.backend.PluginManager().List(filters.NewArgs()) + if err == nil { + for _, plgn := range v2Plugins { + for _, typ := range plgn.Config.Interface.Types { + if typ.Prefix != "docker" || !plgn.Enabled { + continue + } + plgnTyp := typ.Capability + switch typ.Capability { + case "volumedriver": + plgnTyp = "Volume" + case "networkdriver": + plgnTyp = "Network" + case "logdriver": + plgnTyp = "Log" + } + + plugins[api.PluginDescription{ + Type: plgnTyp, + Name: plgn.Name, + }] = struct{}{} + } + } + } + + pluginFields := make([]api.PluginDescription, 0, len(plugins)) + for k := range plugins { + pluginFields = append(pluginFields, k) + } + + sort.Sort(sortedPlugins(pluginFields)) + + // parse []string labels into a map[string]string + labels := map[string]string{} + for _, l := range info.Labels { + stringSlice := strings.SplitN(l, "=", 2) + // this will take the last value in the list for a given key + // ideally, one shouldn't assign multiple values to the same key + if len(stringSlice) > 1 { + labels[stringSlice[0]] = stringSlice[1] + } + } + + description := &api.NodeDescription{ + Hostname: info.Name, + Platform: &api.Platform{ + Architecture: info.Architecture, + OS: info.OSType, + }, + Engine: &api.EngineDescription{ + EngineVersion: info.ServerVersion, + Labels: labels, + Plugins: pluginFields, + }, + Resources: &api.Resources{ + NanoCPUs: int64(info.NCPU) * 1e9, + MemoryBytes: info.MemTotal, + Generic: convert.GenericResourcesToGRPC(info.GenericResources), + }, + } + + return description, nil +} + +func (e *executor) Configure(ctx context.Context, node *api.Node) error { + na := node.Attachment + if na == nil { + e.backend.ReleaseIngress() + return nil + } + + options := types.NetworkCreate{ + Driver: na.Network.DriverState.Name, + IPAM: &network.IPAM{ + Driver: na.Network.IPAM.Driver.Name, + }, + Options: na.Network.DriverState.Options, + Ingress: true, + CheckDuplicate: true, + } + + for _, ic := range na.Network.IPAM.Configs { + c := network.IPAMConfig{ + Subnet: ic.Subnet, + IPRange: ic.Range, + Gateway: ic.Gateway, + } + options.IPAM.Config = append(options.IPAM.Config, c) + } + + _, err := e.backend.SetupIngress(clustertypes.NetworkCreateRequest{ + ID: na.Network.ID, + NetworkCreateRequest: types.NetworkCreateRequest{ + Name: na.Network.Spec.Annotations.Name, + NetworkCreate: options, + }, + }, na.Addresses[0]) + + return err +} + +// Controller returns a docker container runner. +func (e *executor) Controller(t *api.Task) (exec.Controller, error) { + dependencyGetter := agent.Restrict(e.dependencies, t) + + if t.Spec.GetAttachment() != nil { + return newNetworkAttacherController(e.backend, t, dependencyGetter) + } + + var ctlr exec.Controller + switch r := t.Spec.GetRuntime().(type) { + case *api.TaskSpec_Generic: + logrus.WithFields(logrus.Fields{ + "kind": r.Generic.Kind, + "type_url": r.Generic.Payload.TypeUrl, + }).Debug("custom runtime requested") + runtimeKind, err := naming.Runtime(t.Spec) + if err != nil { + return ctlr, err + } + switch runtimeKind { + case string(swarmtypes.RuntimePlugin): + c, err := plugin.NewController(e.pluginBackend, t) + if err != nil { + return ctlr, err + } + ctlr = c + default: + return ctlr, fmt.Errorf("unsupported runtime type: %q", r.Generic.Kind) + } + case *api.TaskSpec_Container: + c, err := newController(e.backend, t, dependencyGetter) + if err != nil { + return ctlr, err + } + ctlr = c + default: + return ctlr, fmt.Errorf("unsupported runtime: %q", r) + } + + return ctlr, nil +} + +func (e *executor) SetNetworkBootstrapKeys(keys []*api.EncryptionKey) error { + nwKeys := []*networktypes.EncryptionKey{} + for _, key := range keys { + nwKey := &networktypes.EncryptionKey{ + Subsystem: key.Subsystem, + Algorithm: int32(key.Algorithm), + Key: make([]byte, len(key.Key)), + LamportTime: key.LamportTime, + } + copy(nwKey.Key, key.Key) + nwKeys = append(nwKeys, nwKey) + } + e.backend.SetNetworkBootstrapKeys(nwKeys) + + return nil +} + +func (e *executor) Secrets() exec.SecretsManager { + return e.dependencies.Secrets() +} + +func (e *executor) Configs() exec.ConfigsManager { + return e.dependencies.Configs() +} + +type sortedPlugins []api.PluginDescription + +func (sp sortedPlugins) Len() int { return len(sp) } + +func (sp sortedPlugins) Swap(i, j int) { sp[i], sp[j] = sp[j], sp[i] } + +func (sp sortedPlugins) Less(i, j int) bool { + if sp[i].Type != sp[j].Type { + return sp[i].Type < sp[j].Type + } + return sp[i].Name < sp[j].Name +} diff --git a/vendor/github.com/moby/moby/daemon/cluster/executor/container/health_test.go b/vendor/github.com/moby/moby/daemon/cluster/executor/container/health_test.go new file mode 100644 index 000000000..b6f188557 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/cluster/executor/container/health_test.go @@ -0,0 +1,100 @@ +// +build !windows + +package container + +import ( + "testing" + "time" + + containertypes "github.com/docker/docker/api/types/container" + "github.com/docker/docker/container" + "github.com/docker/docker/daemon" + "github.com/docker/docker/daemon/events" + "github.com/docker/swarmkit/api" + "golang.org/x/net/context" +) + +func TestHealthStates(t *testing.T) { + + // set up environment: events, task, container .... + e := events.New() + _, l, _ := e.Subscribe() + defer e.Evict(l) + + task := &api.Task{ + ID: "id", + ServiceID: "sid", + Spec: api.TaskSpec{ + Runtime: &api.TaskSpec_Container{ + Container: &api.ContainerSpec{ + Image: "image_name", + Labels: map[string]string{ + "com.docker.swarm.task.id": "id", + }, + }, + }, + }, + Annotations: api.Annotations{Name: "name"}, + } + + c := &container.Container{ + ID: "id", + Name: "name", + Config: &containertypes.Config{ + Image: "image_name", + Labels: map[string]string{ + "com.docker.swarm.task.id": "id", + }, + }, + } + + daemon := &daemon.Daemon{ + EventsService: e, + } + + controller, err := newController(daemon, task, nil) + if err != nil { + t.Fatalf("create controller fail %v", err) + } + + errChan := make(chan error, 1) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + // fire checkHealth + go func() { + err := controller.checkHealth(ctx) + select { + case errChan <- err: + case <-ctx.Done(): + } + }() + + // send an event and expect to get expectedErr + // if expectedErr is nil, shouldn't get any error + logAndExpect := func(msg string, expectedErr error) { + daemon.LogContainerEvent(c, msg) + + timer := time.NewTimer(1 * time.Second) + defer timer.Stop() + + select { + case err := <-errChan: + if err != expectedErr { + t.Fatalf("expect error %v, but get %v", expectedErr, err) + } + case <-timer.C: + if expectedErr != nil { + t.Fatal("time limit exceeded, didn't get expected error") + } + } + } + + // events that are ignored by checkHealth + logAndExpect("health_status: running", nil) + logAndExpect("health_status: healthy", nil) + logAndExpect("die", nil) + + // unhealthy event will be caught by checkHealth + logAndExpect("health_status: unhealthy", ErrContainerUnhealthy) +} diff --git a/vendor/github.com/moby/moby/daemon/cluster/executor/container/validate.go b/vendor/github.com/moby/moby/daemon/cluster/executor/container/validate.go new file mode 100644 index 000000000..af17f5b81 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/cluster/executor/container/validate.go @@ -0,0 +1,40 @@ +package container + +import ( + "errors" + "fmt" + "path/filepath" + + "github.com/docker/swarmkit/api" +) + +func validateMounts(mounts []api.Mount) error { + for _, mount := range mounts { + // Target must always be absolute + if !filepath.IsAbs(mount.Target) { + return fmt.Errorf("invalid mount target, must be an absolute path: %s", mount.Target) + } + + switch mount.Type { + // The checks on abs paths are required due to the container API confusing + // volume mounts as bind mounts when the source is absolute (and vice-versa) + // See #25253 + // TODO: This is probably not necessary once #22373 is merged + case api.MountTypeBind: + if !filepath.IsAbs(mount.Source) { + return fmt.Errorf("invalid bind mount source, must be an absolute path: %s", mount.Source) + } + case api.MountTypeVolume: + if filepath.IsAbs(mount.Source) { + return fmt.Errorf("invalid volume mount source, must not be an absolute path: %s", mount.Source) + } + case api.MountTypeTmpfs: + if mount.Source != "" { + return errors.New("invalid tmpfs source, source must be empty") + } + default: + return fmt.Errorf("invalid mount type: %s", mount.Type) + } + } + return nil +} diff --git a/vendor/github.com/moby/moby/daemon/cluster/executor/container/validate_test.go b/vendor/github.com/moby/moby/daemon/cluster/executor/container/validate_test.go new file mode 100644 index 000000000..9d98e2c00 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/cluster/executor/container/validate_test.go @@ -0,0 +1,141 @@ +package container + +import ( + "io/ioutil" + "os" + "strings" + "testing" + + "github.com/docker/docker/daemon" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/swarmkit/api" +) + +func newTestControllerWithMount(m api.Mount) (*controller, error) { + return newController(&daemon.Daemon{}, &api.Task{ + ID: stringid.GenerateRandomID(), + ServiceID: stringid.GenerateRandomID(), + Spec: api.TaskSpec{ + Runtime: &api.TaskSpec_Container{ + Container: &api.ContainerSpec{ + Image: "image_name", + Labels: map[string]string{ + "com.docker.swarm.task.id": "id", + }, + Mounts: []api.Mount{m}, + }, + }, + }, + }, nil) +} + +func TestControllerValidateMountBind(t *testing.T) { + // with improper source + if _, err := newTestControllerWithMount(api.Mount{ + Type: api.MountTypeBind, + Source: "foo", + Target: testAbsPath, + }); err == nil || !strings.Contains(err.Error(), "invalid bind mount source") { + t.Fatalf("expected error, got: %v", err) + } + + // with non-existing source + if _, err := newTestControllerWithMount(api.Mount{ + Type: api.MountTypeBind, + Source: testAbsNonExistent, + Target: testAbsPath, + }); err != nil { + t.Fatalf("controller should not error at creation: %v", err) + } + + // with proper source + tmpdir, err := ioutil.TempDir("", "TestControllerValidateMountBind") + if err != nil { + t.Fatalf("failed to create temp dir: %v", err) + } + defer os.Remove(tmpdir) + + if _, err := newTestControllerWithMount(api.Mount{ + Type: api.MountTypeBind, + Source: tmpdir, + Target: testAbsPath, + }); err != nil { + t.Fatalf("expected error, got: %v", err) + } +} + +func TestControllerValidateMountVolume(t *testing.T) { + // with improper source + if _, err := newTestControllerWithMount(api.Mount{ + Type: api.MountTypeVolume, + Source: testAbsPath, + Target: testAbsPath, + }); err == nil || !strings.Contains(err.Error(), "invalid volume mount source") { + t.Fatalf("expected error, got: %v", err) + } + + // with proper source + if _, err := newTestControllerWithMount(api.Mount{ + Type: api.MountTypeVolume, + Source: "foo", + Target: testAbsPath, + }); err != nil { + t.Fatalf("expected error, got: %v", err) + } +} + +func TestControllerValidateMountTarget(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestControllerValidateMountTarget") + if err != nil { + t.Fatalf("failed to create temp dir: %v", err) + } + defer os.Remove(tmpdir) + + // with improper target + if _, err := newTestControllerWithMount(api.Mount{ + Type: api.MountTypeBind, + Source: testAbsPath, + Target: "foo", + }); err == nil || !strings.Contains(err.Error(), "invalid mount target") { + t.Fatalf("expected error, got: %v", err) + } + + // with proper target + if _, err := newTestControllerWithMount(api.Mount{ + Type: api.MountTypeBind, + Source: tmpdir, + Target: testAbsPath, + }); err != nil { + t.Fatalf("expected no error, got: %v", err) + } +} + +func TestControllerValidateMountTmpfs(t *testing.T) { + // with improper target + if _, err := newTestControllerWithMount(api.Mount{ + Type: api.MountTypeTmpfs, + Source: "foo", + Target: testAbsPath, + }); err == nil || !strings.Contains(err.Error(), "invalid tmpfs source") { + t.Fatalf("expected error, got: %v", err) + } + + // with proper target + if _, err := newTestControllerWithMount(api.Mount{ + Type: api.MountTypeTmpfs, + Target: testAbsPath, + }); err != nil { + t.Fatalf("expected no error, got: %v", err) + } +} + +func TestControllerValidateMountInvalidType(t *testing.T) { + // with improper target + if _, err := newTestControllerWithMount(api.Mount{ + Type: api.Mount_MountType(9999), + Source: "foo", + Target: testAbsPath, + }); err == nil || !strings.Contains(err.Error(), "invalid mount type") { + t.Fatalf("expected error, got: %v", err) + } +} diff --git a/vendor/github.com/moby/moby/daemon/cluster/executor/container/validate_unix_test.go b/vendor/github.com/moby/moby/daemon/cluster/executor/container/validate_unix_test.go new file mode 100644 index 000000000..c616eeef9 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/cluster/executor/container/validate_unix_test.go @@ -0,0 +1,8 @@ +// +build !windows + +package container + +const ( + testAbsPath = "/foo" + testAbsNonExistent = "/some-non-existing-host-path/" +) diff --git a/vendor/github.com/moby/moby/daemon/cluster/executor/container/validate_windows_test.go b/vendor/github.com/moby/moby/daemon/cluster/executor/container/validate_windows_test.go new file mode 100644 index 000000000..c346451d3 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/cluster/executor/container/validate_windows_test.go @@ -0,0 +1,8 @@ +// +build windows + +package container + +const ( + testAbsPath = `c:\foo` + testAbsNonExistent = `c:\some-non-existing-host-path\` +) diff --git a/vendor/github.com/moby/moby/daemon/cluster/filters.go b/vendor/github.com/moby/moby/daemon/cluster/filters.go new file mode 100644 index 000000000..efda7dc8d --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/cluster/filters.go @@ -0,0 +1,123 @@ +package cluster + +import ( + "fmt" + "strings" + + "github.com/docker/docker/api/types/filters" + runconfigopts "github.com/docker/docker/runconfig/opts" + swarmapi "github.com/docker/swarmkit/api" +) + +func newListNodesFilters(filter filters.Args) (*swarmapi.ListNodesRequest_Filters, error) { + accepted := map[string]bool{ + "name": true, + "id": true, + "label": true, + "role": true, + "membership": true, + } + if err := filter.Validate(accepted); err != nil { + return nil, err + } + f := &swarmapi.ListNodesRequest_Filters{ + NamePrefixes: filter.Get("name"), + IDPrefixes: filter.Get("id"), + Labels: runconfigopts.ConvertKVStringsToMap(filter.Get("label")), + } + + for _, r := range filter.Get("role") { + if role, ok := swarmapi.NodeRole_value[strings.ToUpper(r)]; ok { + f.Roles = append(f.Roles, swarmapi.NodeRole(role)) + } else if r != "" { + return nil, fmt.Errorf("Invalid role filter: '%s'", r) + } + } + + for _, a := range filter.Get("membership") { + if membership, ok := swarmapi.NodeSpec_Membership_value[strings.ToUpper(a)]; ok { + f.Memberships = append(f.Memberships, swarmapi.NodeSpec_Membership(membership)) + } else if a != "" { + return nil, fmt.Errorf("Invalid membership filter: '%s'", a) + } + } + + return f, nil +} + +func newListTasksFilters(filter filters.Args, transformFunc func(filters.Args) error) (*swarmapi.ListTasksRequest_Filters, error) { + accepted := map[string]bool{ + "name": true, + "id": true, + "label": true, + "service": true, + "node": true, + "desired-state": true, + // UpToDate is not meant to be exposed to users. It's for + // internal use in checking create/update progress. Therefore, + // we prefix it with a '_'. + "_up-to-date": true, + "runtime": true, + } + if err := filter.Validate(accepted); err != nil { + return nil, err + } + if transformFunc != nil { + if err := transformFunc(filter); err != nil { + return nil, err + } + } + f := &swarmapi.ListTasksRequest_Filters{ + NamePrefixes: filter.Get("name"), + IDPrefixes: filter.Get("id"), + Labels: runconfigopts.ConvertKVStringsToMap(filter.Get("label")), + ServiceIDs: filter.Get("service"), + NodeIDs: filter.Get("node"), + UpToDate: len(filter.Get("_up-to-date")) != 0, + Runtimes: filter.Get("runtime"), + } + + for _, s := range filter.Get("desired-state") { + if state, ok := swarmapi.TaskState_value[strings.ToUpper(s)]; ok { + f.DesiredStates = append(f.DesiredStates, swarmapi.TaskState(state)) + } else if s != "" { + return nil, fmt.Errorf("Invalid desired-state filter: '%s'", s) + } + } + + return f, nil +} + +func newListSecretsFilters(filter filters.Args) (*swarmapi.ListSecretsRequest_Filters, error) { + accepted := map[string]bool{ + "names": true, + "name": true, + "id": true, + "label": true, + } + if err := filter.Validate(accepted); err != nil { + return nil, err + } + return &swarmapi.ListSecretsRequest_Filters{ + Names: filter.Get("names"), + NamePrefixes: filter.Get("name"), + IDPrefixes: filter.Get("id"), + Labels: runconfigopts.ConvertKVStringsToMap(filter.Get("label")), + }, nil +} + +func newListConfigsFilters(filter filters.Args) (*swarmapi.ListConfigsRequest_Filters, error) { + accepted := map[string]bool{ + "name": true, + "id": true, + "label": true, + } + if err := filter.Validate(accepted); err != nil { + return nil, err + } + return &swarmapi.ListConfigsRequest_Filters{ + NamePrefixes: filter.Get("name"), + IDPrefixes: filter.Get("id"), + Labels: runconfigopts.ConvertKVStringsToMap(filter.Get("label")), + }, nil +} diff --git a/vendor/github.com/moby/moby/daemon/cluster/filters_test.go b/vendor/github.com/moby/moby/daemon/cluster/filters_test.go new file mode 100644 index 000000000..fd0c8c369 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/cluster/filters_test.go @@ -0,0 +1,102 @@ +package cluster + +import ( + "testing" + + "github.com/docker/docker/api/types/filters" +) + +func TestNewListSecretsFilters(t *testing.T) { + validNameFilter := filters.NewArgs() + validNameFilter.Add("name", "test_name") + + validIDFilter := filters.NewArgs() + validIDFilter.Add("id", "7c9009d6720f6de3b492f5") + + validLabelFilter := filters.NewArgs() + validLabelFilter.Add("label", "type=test") + validLabelFilter.Add("label", "storage=ssd") + validLabelFilter.Add("label", "memory") + + validNamesFilter := filters.NewArgs() + validNamesFilter.Add("names", "test_name") + + validAllFilter := filters.NewArgs() + validAllFilter.Add("name", "nodeName") + validAllFilter.Add("id", "7c9009d6720f6de3b492f5") + validAllFilter.Add("label", "type=test") + validAllFilter.Add("label", "memory") + validAllFilter.Add("names", "test_name") + + validFilters := []filters.Args{ + validNameFilter, + validIDFilter, + validLabelFilter, + validNamesFilter, + validAllFilter, + } + + invalidTypeFilter := filters.NewArgs() + invalidTypeFilter.Add("nonexist", "aaaa") + + invalidFilters := []filters.Args{ + invalidTypeFilter, + } + + for _, filter := range validFilters { + if _, err := newListSecretsFilters(filter); err != nil { + t.Fatalf("Should get no error, got %v", err) + } + } + + for _, filter := range invalidFilters { + if _, err := newListSecretsFilters(filter); err == nil { + t.Fatalf("Should get an error for filter %v, while got nil", filter) + } + } +} + +func TestNewListConfigsFilters(t *testing.T) { + validNameFilter := filters.NewArgs() + validNameFilter.Add("name", "test_name") + + validIDFilter := filters.NewArgs() + validIDFilter.Add("id", "7c9009d6720f6de3b492f5") + + validLabelFilter := filters.NewArgs() + validLabelFilter.Add("label", "type=test") + validLabelFilter.Add("label", "storage=ssd") + validLabelFilter.Add("label", "memory") + + validAllFilter := filters.NewArgs() + validAllFilter.Add("name", "nodeName") + validAllFilter.Add("id", "7c9009d6720f6de3b492f5") + validAllFilter.Add("label", "type=test") + validAllFilter.Add("label", "memory") + + validFilters := []filters.Args{ + validNameFilter, + validIDFilter, + validLabelFilter, + validAllFilter, + } + + invalidTypeFilter := filters.NewArgs() + invalidTypeFilter.Add("nonexist", "aaaa") + + invalidFilters := []filters.Args{ + invalidTypeFilter, + } + + for _, filter := range validFilters { + if _, err := newListConfigsFilters(filter); err != nil { + t.Fatalf("Should get no error, got %v", err) + } + } + + for _, filter := range invalidFilters { + if _, err := newListConfigsFilters(filter); err == nil { + t.Fatalf("Should get an error for filter %v, while got nil", filter) + } + } +} diff --git a/vendor/github.com/moby/moby/daemon/cluster/helpers.go b/vendor/github.com/moby/moby/daemon/cluster/helpers.go new file mode 100644 index 000000000..a74118c42 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/cluster/helpers.go @@ -0,0 +1,245 @@ +package cluster + +import ( + "fmt" + + "github.com/docker/docker/api/errors" + swarmapi "github.com/docker/swarmkit/api" + "golang.org/x/net/context" +) + +func getSwarm(ctx context.Context, c swarmapi.ControlClient) (*swarmapi.Cluster, error) { + rl, err := c.ListClusters(ctx, &swarmapi.ListClustersRequest{}) + if err != nil { + return nil, err + } + + if len(rl.Clusters) == 0 { + return nil, errors.NewRequestNotFoundError(errNoSwarm) + } + + // TODO: assume one cluster only + return rl.Clusters[0], nil +} + +func getNode(ctx context.Context, c swarmapi.ControlClient, input string) (*swarmapi.Node, error) { + // GetNode to match via full ID. + if rg, err := c.GetNode(ctx, &swarmapi.GetNodeRequest{NodeID: input}); err == nil { + return rg.Node, nil + } + + // If any error (including NotFound), ListNodes to match via full name. + rl, err := c.ListNodes(ctx, &swarmapi.ListNodesRequest{ + Filters: &swarmapi.ListNodesRequest_Filters{ + Names: []string{input}, + }, + }) + if err != nil || len(rl.Nodes) == 0 { + // If any error or 0 result, ListNodes to match via ID prefix. + rl, err = c.ListNodes(ctx, &swarmapi.ListNodesRequest{ + Filters: &swarmapi.ListNodesRequest_Filters{ + IDPrefixes: []string{input}, + }, + }) + } + if err != nil { + return nil, err + } + + if len(rl.Nodes) == 0 { + err := fmt.Errorf("node %s not found", input) + return nil, errors.NewRequestNotFoundError(err) + } + + if l := len(rl.Nodes); l > 1 { + return nil, fmt.Errorf("node %s is ambiguous (%d matches found)", input, l) + } + + return rl.Nodes[0], nil +} + +func getService(ctx context.Context, c swarmapi.ControlClient, input string, insertDefaults bool) (*swarmapi.Service, error) { + // GetService to match via full ID. + if rg, err := c.GetService(ctx, &swarmapi.GetServiceRequest{ServiceID: input, InsertDefaults: insertDefaults}); err == nil { + return rg.Service, nil + } + + // If any error (including NotFound), ListServices to match via full name. + rl, err := c.ListServices(ctx, &swarmapi.ListServicesRequest{ + Filters: &swarmapi.ListServicesRequest_Filters{ + Names: []string{input}, + }, + }) + if err != nil || len(rl.Services) == 0 { + // If any error or 0 result, ListServices to match via ID prefix. + rl, err = c.ListServices(ctx, &swarmapi.ListServicesRequest{ + Filters: &swarmapi.ListServicesRequest_Filters{ + IDPrefixes: []string{input}, + }, + }) + } + if err != nil { + return nil, err + } + + if len(rl.Services) == 0 { + err := fmt.Errorf("service %s not found", input) + return nil, errors.NewRequestNotFoundError(err) + } + + if l := len(rl.Services); l > 1 { + return nil, fmt.Errorf("service %s is ambiguous (%d matches found)", input, l) + } + + if !insertDefaults { + return rl.Services[0], nil + } + + rg, err := c.GetService(ctx, &swarmapi.GetServiceRequest{ServiceID: rl.Services[0].ID, InsertDefaults: true}) + if err == nil { + return rg.Service, nil + } + return nil, err +} + +func getTask(ctx context.Context, c swarmapi.ControlClient, input string) (*swarmapi.Task, error) { + // GetTask to match via full ID. + if rg, err := c.GetTask(ctx, &swarmapi.GetTaskRequest{TaskID: input}); err == nil { + return rg.Task, nil + } + + // If any error (including NotFound), ListTasks to match via full name. + rl, err := c.ListTasks(ctx, &swarmapi.ListTasksRequest{ + Filters: &swarmapi.ListTasksRequest_Filters{ + Names: []string{input}, + }, + }) + if err != nil || len(rl.Tasks) == 0 { + // If any error or 0 result, ListTasks to match via ID prefix. + rl, err = c.ListTasks(ctx, &swarmapi.ListTasksRequest{ + Filters: &swarmapi.ListTasksRequest_Filters{ + IDPrefixes: []string{input}, + }, + }) + } + if err != nil { + return nil, err + } + + if len(rl.Tasks) == 0 { + err := fmt.Errorf("task %s not found", input) + return nil, errors.NewRequestNotFoundError(err) + } + + if l := len(rl.Tasks); l > 1 { + return nil, fmt.Errorf("task %s is ambiguous (%d matches found)", input, l) + } + + return rl.Tasks[0], nil +} + +func getSecret(ctx context.Context, c swarmapi.ControlClient, input string) (*swarmapi.Secret, error) { + // attempt to lookup secret by full ID + if rg, err := c.GetSecret(ctx, &swarmapi.GetSecretRequest{SecretID: input}); err == nil { + return rg.Secret, nil + } + + // If any error (including NotFound), ListSecrets to match via full name. + rl, err := c.ListSecrets(ctx, &swarmapi.ListSecretsRequest{ + Filters: &swarmapi.ListSecretsRequest_Filters{ + Names: []string{input}, + }, + }) + if err != nil || len(rl.Secrets) == 0 { + // If any error or 0 result, ListSecrets to match via ID prefix. + rl, err = c.ListSecrets(ctx, &swarmapi.ListSecretsRequest{ + Filters: &swarmapi.ListSecretsRequest_Filters{ + IDPrefixes: []string{input}, + }, + }) + } + if err != nil { + return nil, err + } + + if len(rl.Secrets) == 0 { + err := fmt.Errorf("secret %s not found", input) + return nil, errors.NewRequestNotFoundError(err) + } + + if l := len(rl.Secrets); l > 1 { + return nil, fmt.Errorf("secret %s is ambiguous (%d matches found)", input, l) + } + + return rl.Secrets[0], nil +} + +func getConfig(ctx context.Context, c swarmapi.ControlClient, input string) (*swarmapi.Config, error) { + // attempt to lookup config by full ID + if rg, err := c.GetConfig(ctx, &swarmapi.GetConfigRequest{ConfigID: input}); err == nil { + return rg.Config, nil + } + + // If any error (including NotFound), ListConfigs to match via full name. + rl, err := c.ListConfigs(ctx, &swarmapi.ListConfigsRequest{ + Filters: &swarmapi.ListConfigsRequest_Filters{ + Names: []string{input}, + }, + }) + if err != nil || len(rl.Configs) == 0 { + // If any error or 0 result, ListConfigs to match via ID prefix. + rl, err = c.ListConfigs(ctx, &swarmapi.ListConfigsRequest{ + Filters: &swarmapi.ListConfigsRequest_Filters{ + IDPrefixes: []string{input}, + }, + }) + } + if err != nil { + return nil, err + } + + if len(rl.Configs) == 0 { + err := fmt.Errorf("config %s not found", input) + return nil, errors.NewRequestNotFoundError(err) + } + + if l := len(rl.Configs); l > 1 { + return nil, fmt.Errorf("config %s is ambiguous (%d matches found)", input, l) + } + + return rl.Configs[0], nil +} + +func getNetwork(ctx context.Context, c swarmapi.ControlClient, input string) (*swarmapi.Network, error) { + // GetNetwork to match via full ID. + if rg, err := c.GetNetwork(ctx, &swarmapi.GetNetworkRequest{NetworkID: input}); err == nil { + return rg.Network, nil + } + + // If any error (including NotFound), ListNetworks to match via ID prefix and full name. + rl, err := c.ListNetworks(ctx, &swarmapi.ListNetworksRequest{ + Filters: &swarmapi.ListNetworksRequest_Filters{ + Names: []string{input}, + }, + }) + if err != nil || len(rl.Networks) == 0 { + rl, err = c.ListNetworks(ctx, &swarmapi.ListNetworksRequest{ + Filters: &swarmapi.ListNetworksRequest_Filters{ + IDPrefixes: []string{input}, + }, + }) + } + if err != nil { + return nil, err + } + + if len(rl.Networks) == 0 { + return nil, fmt.Errorf("network %s not found", input) + } + + if l := len(rl.Networks); l > 1 { + return nil, fmt.Errorf("network %s is ambiguous (%d matches found)", input, l) + } + + return rl.Networks[0], nil +} diff --git a/vendor/github.com/moby/moby/daemon/cluster/listen_addr.go b/vendor/github.com/moby/moby/daemon/cluster/listen_addr.go new file mode 100644 index 000000000..993ccb62a --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/cluster/listen_addr.go @@ -0,0 +1,302 @@ +package cluster + +import ( + "errors" + "fmt" + "net" +) + +var ( + errNoSuchInterface = errors.New("no such interface") + errNoIP = errors.New("could not find the system's IP address") + errMustSpecifyListenAddr = errors.New("must specify a listening address because the address to advertise is not recognized as a system address, and a system's IP address to use could not be uniquely identified") + errBadNetworkIdentifier = errors.New("must specify a valid IP address or interface name") + errBadListenAddr = errors.New("listen address must be an IP address or network interface (with optional port number)") + errBadAdvertiseAddr = errors.New("advertise address must be a non-zero IP address or network interface (with optional port number)") + errBadDataPathAddr = errors.New("data path address must be a non-zero IP address or network interface (without a port number)") + errBadDefaultAdvertiseAddr = errors.New("default advertise address must be a non-zero IP address or network interface (without a port number)") +) + +func resolveListenAddr(specifiedAddr string) (string, string, error) { + specifiedHost, specifiedPort, err := net.SplitHostPort(specifiedAddr) + if err != nil { + return "", "", fmt.Errorf("could not parse listen address %s", specifiedAddr) + } + // Does the host component match any of the interface names on the + // system? If so, use the address from that interface. + specifiedIP, err := resolveInputIPAddr(specifiedHost, true) + if err != nil { + if err == errBadNetworkIdentifier { + err = errBadListenAddr + } + return "", "", err + } + + return specifiedIP.String(), specifiedPort, nil +} + +func (c *Cluster) resolveAdvertiseAddr(advertiseAddr, listenAddrPort string) (string, string, error) { + // Approach: + // - If an advertise address is specified, use that. Resolve the + // interface's address if an interface was specified in + // advertiseAddr. Fill in the port from listenAddrPort if necessary. + // - If DefaultAdvertiseAddr is not empty, use that with the port from + // listenAddrPort. Resolve the interface's address from + // if an interface name was specified in DefaultAdvertiseAddr. + // - Otherwise, try to autodetect the system's address. Use the port in + // listenAddrPort with this address if autodetection succeeds. + + if advertiseAddr != "" { + advertiseHost, advertisePort, err := net.SplitHostPort(advertiseAddr) + if err != nil { + // Not a host:port specification + advertiseHost = advertiseAddr + advertisePort = listenAddrPort + } + // Does the host component match any of the interface names on the + // system? If so, use the address from that interface. + advertiseIP, err := resolveInputIPAddr(advertiseHost, false) + if err != nil { + if err == errBadNetworkIdentifier { + err = errBadAdvertiseAddr + } + return "", "", err + } + + return advertiseIP.String(), advertisePort, nil + } + + if c.config.DefaultAdvertiseAddr != "" { + // Does the default advertise address component match any of the + // interface names on the system? If so, use the address from + // that interface. + defaultAdvertiseIP, err := resolveInputIPAddr(c.config.DefaultAdvertiseAddr, false) + if err != nil { + if err == errBadNetworkIdentifier { + err = errBadDefaultAdvertiseAddr + } + return "", "", err + } + + return defaultAdvertiseIP.String(), listenAddrPort, nil + } + + systemAddr, err := c.resolveSystemAddr() + if err != nil { + return "", "", err + } + return systemAddr.String(), listenAddrPort, nil +} + +func resolveDataPathAddr(dataPathAddr string) (string, error) { + if dataPathAddr == "" { + // dataPathAddr is not defined + return "", nil + } + // If a data path flag is specified try to resolve the IP address. + dataPathIP, err := resolveInputIPAddr(dataPathAddr, false) + if err != nil { + if err == errBadNetworkIdentifier { + err = errBadDataPathAddr + } + return "", err + } + return dataPathIP.String(), nil +} + +func resolveInterfaceAddr(specifiedInterface string) (net.IP, error) { + // Use a specific interface's IP address. + intf, err := net.InterfaceByName(specifiedInterface) + if err != nil { + return nil, errNoSuchInterface + } + + addrs, err := intf.Addrs() + if err != nil { + return nil, err + } + + var interfaceAddr4, interfaceAddr6 net.IP + + for _, addr := range addrs { + ipAddr, ok := addr.(*net.IPNet) + + if ok { + if ipAddr.IP.To4() != nil { + // IPv4 + if interfaceAddr4 != nil { + return nil, fmt.Errorf("interface %s has more than one IPv4 address (%s and %s)", specifiedInterface, interfaceAddr4, ipAddr.IP) + } + interfaceAddr4 = ipAddr.IP + } else { + // IPv6 + if interfaceAddr6 != nil { + return nil, fmt.Errorf("interface %s has more than one IPv6 address (%s and %s)", specifiedInterface, interfaceAddr6, ipAddr.IP) + } + interfaceAddr6 = ipAddr.IP + } + } + } + + if interfaceAddr4 == nil && interfaceAddr6 == nil { + return nil, fmt.Errorf("interface %s has no usable IPv4 or IPv6 address", specifiedInterface) + } + + // In the case that there's exactly one IPv4 address + // and exactly one IPv6 address, favor IPv4 over IPv6. + if interfaceAddr4 != nil { + return interfaceAddr4, nil + } + return interfaceAddr6, nil +} + +// resolveInputIPAddr tries to resolve the IP address from the string passed as input +// - tries to match the string as an interface name, if so returns the IP address associated with it +// - on failure of previous step tries to parse the string as an IP address itself +// if succeeds returns the IP address +func resolveInputIPAddr(input string, isUnspecifiedValid bool) (net.IP, error) { + // Try to see if it is an interface name + interfaceAddr, err := resolveInterfaceAddr(input) + if err == nil { + return interfaceAddr, nil + } + // String matched interface but there is a potential ambiguity to be resolved + if err != errNoSuchInterface { + return nil, err + } + + // String is not an interface check if it is a valid IP + if ip := net.ParseIP(input); ip != nil && (isUnspecifiedValid || !ip.IsUnspecified()) { + return ip, nil + } + + // Not valid IP found + return nil, errBadNetworkIdentifier +} + +func (c *Cluster) resolveSystemAddrViaSubnetCheck() (net.IP, error) { + // Use the system's only IP address, or fail if there are + // multiple addresses to choose from. Skip interfaces which + // are managed by docker via subnet check. + interfaces, err := net.Interfaces() + if err != nil { + return nil, err + } + + var systemAddr net.IP + var systemInterface string + + // List Docker-managed subnets + v4Subnets, v6Subnets := c.config.NetworkSubnetsProvider.Subnets() + +ifaceLoop: + for _, intf := range interfaces { + // Skip inactive interfaces and loopback interfaces + if (intf.Flags&net.FlagUp == 0) || (intf.Flags&net.FlagLoopback) != 0 { + continue + } + + addrs, err := intf.Addrs() + if err != nil { + continue + } + + var interfaceAddr4, interfaceAddr6 net.IP + + for _, addr := range addrs { + ipAddr, ok := addr.(*net.IPNet) + + // Skip loopback and link-local addresses + if !ok || !ipAddr.IP.IsGlobalUnicast() { + continue + } + + if ipAddr.IP.To4() != nil { + // IPv4 + + // Ignore addresses in subnets that are managed by Docker. + for _, subnet := range v4Subnets { + if subnet.Contains(ipAddr.IP) { + continue ifaceLoop + } + } + + if interfaceAddr4 != nil { + return nil, errMultipleIPs(intf.Name, intf.Name, interfaceAddr4, ipAddr.IP) + } + + interfaceAddr4 = ipAddr.IP + } else { + // IPv6 + + // Ignore addresses in subnets that are managed by Docker. + for _, subnet := range v6Subnets { + if subnet.Contains(ipAddr.IP) { + continue ifaceLoop + } + } + + if interfaceAddr6 != nil { + return nil, errMultipleIPs(intf.Name, intf.Name, interfaceAddr6, ipAddr.IP) + } + + interfaceAddr6 = ipAddr.IP + } + } + + // In the case that this interface has exactly one IPv4 address + // and exactly one IPv6 address, favor IPv4 over IPv6. + if interfaceAddr4 != nil { + if systemAddr != nil { + return nil, errMultipleIPs(systemInterface, intf.Name, systemAddr, interfaceAddr4) + } + systemAddr = interfaceAddr4 + systemInterface = intf.Name + } else if interfaceAddr6 != nil { + if systemAddr != nil { + return nil, errMultipleIPs(systemInterface, intf.Name, systemAddr, interfaceAddr6) + } + systemAddr = interfaceAddr6 + systemInterface = intf.Name + } + } + + if systemAddr == nil { + return nil, errNoIP + } + + return systemAddr, nil +} + +func listSystemIPs() []net.IP { + interfaces, err := net.Interfaces() + if err != nil { + return nil + } + + var systemAddrs []net.IP + + for _, intf := range interfaces { + addrs, err := intf.Addrs() + if err != nil { + continue + } + + for _, addr := range addrs { + ipAddr, ok := addr.(*net.IPNet) + + if ok { + systemAddrs = append(systemAddrs, ipAddr.IP) + } + } + } + + return systemAddrs +} + +func errMultipleIPs(interfaceA, interfaceB string, addrA, addrB net.IP) error { + if interfaceA == interfaceB { + return fmt.Errorf("could not choose an IP address to advertise since this system has multiple addresses on interface %s (%s and %s)", interfaceA, addrA, addrB) + } + return fmt.Errorf("could not choose an IP address to advertise since this system has multiple addresses on different interfaces (%s on %s and %s on %s)", addrA, interfaceA, addrB, interfaceB) +} diff --git a/vendor/github.com/moby/moby/daemon/cluster/listen_addr_linux.go b/vendor/github.com/moby/moby/daemon/cluster/listen_addr_linux.go new file mode 100644 index 000000000..3d4f239bd --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/cluster/listen_addr_linux.go @@ -0,0 +1,91 @@ +// +build linux + +package cluster + +import ( + "net" + + "github.com/vishvananda/netlink" +) + +func (c *Cluster) resolveSystemAddr() (net.IP, error) { + // Use the system's only device IP address, or fail if there are + // multiple addresses to choose from. + interfaces, err := netlink.LinkList() + if err != nil { + return nil, err + } + + var ( + systemAddr net.IP + systemInterface string + deviceFound bool + ) + + for _, intf := range interfaces { + // Skip non device or inactive interfaces + if intf.Type() != "device" || intf.Attrs().Flags&net.FlagUp == 0 { + continue + } + + addrs, err := netlink.AddrList(intf, netlink.FAMILY_ALL) + if err != nil { + continue + } + + var interfaceAddr4, interfaceAddr6 net.IP + + for _, addr := range addrs { + ipAddr := addr.IPNet.IP + + // Skip loopback and link-local addresses + if !ipAddr.IsGlobalUnicast() { + continue + } + + // At least one non-loopback device is found and it is administratively up + deviceFound = true + + if ipAddr.To4() != nil { + if interfaceAddr4 != nil { + return nil, errMultipleIPs(intf.Attrs().Name, intf.Attrs().Name, interfaceAddr4, ipAddr) + } + interfaceAddr4 = ipAddr + } else { + if interfaceAddr6 != nil { + return nil, errMultipleIPs(intf.Attrs().Name, intf.Attrs().Name, interfaceAddr6, ipAddr) + } + interfaceAddr6 = ipAddr + } + } + + // In the case that this interface has exactly one IPv4 address + // and exactly one IPv6 address, favor IPv4 over IPv6. + if interfaceAddr4 != nil { + if systemAddr != nil { + return nil, errMultipleIPs(systemInterface, intf.Attrs().Name, systemAddr, interfaceAddr4) + } + systemAddr = interfaceAddr4 + systemInterface = intf.Attrs().Name + } else if interfaceAddr6 != nil { + if systemAddr != nil { + return nil, errMultipleIPs(systemInterface, intf.Attrs().Name, systemAddr, interfaceAddr6) + } + systemAddr = interfaceAddr6 + systemInterface = intf.Attrs().Name + } + } + + if systemAddr == nil { + if !deviceFound { + // If no non-loopback device type interface is found, + // fall back to the regular auto-detection mechanism. + // This is to cover the case where docker is running + // inside a container (eths are in fact veths). + return c.resolveSystemAddrViaSubnetCheck() + } + return nil, errNoIP + } + + return systemAddr, nil +} diff --git a/vendor/github.com/moby/moby/daemon/cluster/listen_addr_others.go b/vendor/github.com/moby/moby/daemon/cluster/listen_addr_others.go new file mode 100644 index 000000000..4e845f5c8 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/cluster/listen_addr_others.go @@ -0,0 +1,9 @@ +// +build !linux,!solaris + +package cluster + +import "net" + +func (c *Cluster) resolveSystemAddr() (net.IP, error) { + return c.resolveSystemAddrViaSubnetCheck() +} diff --git a/vendor/github.com/moby/moby/daemon/cluster/listen_addr_solaris.go b/vendor/github.com/moby/moby/daemon/cluster/listen_addr_solaris.go new file mode 100644 index 000000000..57a894b25 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/cluster/listen_addr_solaris.go @@ -0,0 +1,57 @@ +package cluster + +import ( + "bufio" + "fmt" + "net" + "os/exec" + "strings" +) + +func (c *Cluster) resolveSystemAddr() (net.IP, error) { + defRouteCmd := "/usr/sbin/ipadm show-addr -p -o addr " + + "`/usr/sbin/route get default | /usr/bin/grep interface | " + + "/usr/bin/awk '{print $2}'`" + out, err := exec.Command("/usr/bin/bash", "-c", defRouteCmd).Output() + if err != nil { + return nil, fmt.Errorf("cannot get default route: %v", err) + } + + defInterface := strings.SplitN(string(out), "/", 2) + defInterfaceIP := net.ParseIP(defInterface[0]) + + return defInterfaceIP, nil +} + +func listSystemIPs() []net.IP { + var systemAddrs []net.IP + cmd := exec.Command("/usr/sbin/ipadm", "show-addr", "-p", "-o", "addr") + cmdReader, err := cmd.StdoutPipe() + if err != nil { + return nil + } + + if err := cmd.Start(); err != nil { + return nil + } + + scanner := bufio.NewScanner(cmdReader) + go func() { + for scanner.Scan() { + text := scanner.Text() + nameAddrPair := strings.SplitN(text, "/", 2) + // Let go of loopback interfaces and docker interfaces + systemAddrs = append(systemAddrs, net.ParseIP(nameAddrPair[0])) + } + }() + + if err := scanner.Err(); err != nil { + fmt.Printf("scan underwent err: %+v\n", err) + } + + if err := cmd.Wait(); err != nil { + fmt.Printf("run command wait: %+v\n", err) + } + + return systemAddrs +} diff --git a/vendor/github.com/moby/moby/daemon/cluster/networks.go b/vendor/github.com/moby/moby/daemon/cluster/networks.go new file mode 100644 index 000000000..1906c37bd --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/cluster/networks.go @@ -0,0 +1,317 @@ +package cluster + +import ( + "fmt" + + "github.com/Sirupsen/logrus" + apierrors "github.com/docker/docker/api/errors" + apitypes "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/network" + types "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/daemon/cluster/convert" + "github.com/docker/docker/runconfig" + swarmapi "github.com/docker/swarmkit/api" + "github.com/pkg/errors" + "golang.org/x/net/context" +) + +// GetNetworks returns all current cluster managed networks. +func (c *Cluster) GetNetworks() ([]apitypes.NetworkResource, error) { + list, err := c.getNetworks(nil) + if err != nil { + return nil, err + } + removePredefinedNetworks(&list) + return list, nil +} + +func removePredefinedNetworks(networks *[]apitypes.NetworkResource) { + if networks == nil { + return + } + var idxs []int + for i, n := range *networks { + if v, ok := n.Labels["com.docker.swarm.predefined"]; ok && v == "true" { + idxs = append(idxs, i) + } + } + for i, idx := range idxs { + idx -= i + *networks = append((*networks)[:idx], (*networks)[idx+1:]...) + } +} + +func (c *Cluster) getNetworks(filters *swarmapi.ListNetworksRequest_Filters) ([]apitypes.NetworkResource, error) { + c.mu.RLock() + defer c.mu.RUnlock() + + state := c.currentNodeState() + if !state.IsActiveManager() { + return nil, c.errNoManager(state) + } + + ctx, cancel := c.getRequestContext() + defer cancel() + + r, err := state.controlClient.ListNetworks(ctx, &swarmapi.ListNetworksRequest{Filters: filters}) + if err != nil { + return nil, err + } + + networks := make([]apitypes.NetworkResource, 0, len(r.Networks)) + + for _, network := range r.Networks { + networks = append(networks, convert.BasicNetworkFromGRPC(*network)) + } + + return networks, nil +} + +// GetNetwork returns a cluster network by an ID. +func (c *Cluster) GetNetwork(input string) (apitypes.NetworkResource, error) { + var network *swarmapi.Network + + if err := c.lockedManagerAction(func(ctx context.Context, state nodeState) error { + n, err := getNetwork(ctx, state.controlClient, input) + if err != nil { + return err + } + network = n + return nil + }); err != nil { + return apitypes.NetworkResource{}, err + } + return convert.BasicNetworkFromGRPC(*network), nil +} + +// GetNetworksByName returns cluster managed networks by name. +// It is ok to have multiple networks here. #18864 +func (c *Cluster) GetNetworksByName(name string) ([]apitypes.NetworkResource, error) { + // Note that swarmapi.GetNetworkRequest.Name is not functional. + // So we cannot just use that with c.GetNetwork. + return c.getNetworks(&swarmapi.ListNetworksRequest_Filters{ + Names: []string{name}, + }) +} + +func attacherKey(target, containerID string) string { + return containerID + ":" + target +} + +// UpdateAttachment signals the attachment config to the attachment +// waiter who is trying to start or attach the container to the +// network. +func (c *Cluster) UpdateAttachment(target, containerID string, config *network.NetworkingConfig) error { + c.mu.Lock() + attacher, ok := c.attachers[attacherKey(target, containerID)] + if !ok || attacher == nil { + c.mu.Unlock() + return fmt.Errorf("could not find attacher for container %s to network %s", containerID, target) + } + if attacher.inProgress { + logrus.Debugf("Discarding redundant notice of resource allocation on network %s for task id %s", target, attacher.taskID) + c.mu.Unlock() + return nil + } + attacher.inProgress = true + c.mu.Unlock() + + attacher.attachWaitCh <- config + + return nil +} + +// WaitForDetachment waits for the container to stop or detach from +// the network. +func (c *Cluster) WaitForDetachment(ctx context.Context, networkName, networkID, taskID, containerID string) error { + c.mu.RLock() + attacher, ok := c.attachers[attacherKey(networkName, containerID)] + if !ok { + attacher, ok = c.attachers[attacherKey(networkID, containerID)] + } + state := c.currentNodeState() + if state.swarmNode == nil || state.swarmNode.Agent() == nil { + c.mu.RUnlock() + return errors.New("invalid cluster node while waiting for detachment") + } + + c.mu.RUnlock() + agent := state.swarmNode.Agent() + if ok && attacher != nil && + attacher.detachWaitCh != nil && + attacher.attachCompleteCh != nil { + // Attachment may be in progress still so wait for + // attachment to complete. + select { + case <-attacher.attachCompleteCh: + case <-ctx.Done(): + return ctx.Err() + } + + if attacher.taskID == taskID { + select { + case <-attacher.detachWaitCh: + case <-ctx.Done(): + return ctx.Err() + } + } + } + + return agent.ResourceAllocator().DetachNetwork(ctx, taskID) +} + +// AttachNetwork generates an attachment request towards the manager. +func (c *Cluster) AttachNetwork(target string, containerID string, addresses []string) (*network.NetworkingConfig, error) { + aKey := attacherKey(target, containerID) + c.mu.Lock() + state := c.currentNodeState() + if state.swarmNode == nil || state.swarmNode.Agent() == nil { + c.mu.Unlock() + return nil, errors.New("invalid cluster node while attaching to network") + } + if attacher, ok := c.attachers[aKey]; ok { + c.mu.Unlock() + return attacher.config, nil + } + + agent := state.swarmNode.Agent() + attachWaitCh := make(chan *network.NetworkingConfig) + detachWaitCh := make(chan struct{}) + attachCompleteCh := make(chan struct{}) + c.attachers[aKey] = &attacher{ + attachWaitCh: attachWaitCh, + attachCompleteCh: attachCompleteCh, + detachWaitCh: detachWaitCh, + } + c.mu.Unlock() + + ctx, cancel := c.getRequestContext() + defer cancel() + + taskID, err := agent.ResourceAllocator().AttachNetwork(ctx, containerID, target, addresses) + if err != nil { + c.mu.Lock() + delete(c.attachers, aKey) + c.mu.Unlock() + return nil, fmt.Errorf("Could not attach to network %s: %v", target, err) + } + + c.mu.Lock() + c.attachers[aKey].taskID = taskID + close(attachCompleteCh) + c.mu.Unlock() + + logrus.Debugf("Successfully attached to network %s with task id %s", target, taskID) + + release := func() { + ctx, cancel := c.getRequestContext() + defer cancel() + if err := agent.ResourceAllocator().DetachNetwork(ctx, taskID); err != nil { + logrus.Errorf("Failed remove network attachment %s to network %s on allocation failure: %v", + taskID, target, err) + } + } + + var config *network.NetworkingConfig + select { + case config = <-attachWaitCh: + case <-ctx.Done(): + release() + return nil, fmt.Errorf("attaching to network failed, make sure your network options are correct and check manager logs: %v", ctx.Err()) + } + + c.mu.Lock() + c.attachers[aKey].config = config + c.mu.Unlock() + + logrus.Debugf("Successfully allocated resources on network %s for task id %s", target, taskID) + + return config, nil +} + +// DetachNetwork unblocks the waiters waiting on WaitForDetachment so +// that a request to detach can be generated towards the manager. +func (c *Cluster) DetachNetwork(target string, containerID string) error { + aKey := attacherKey(target, containerID) + + c.mu.Lock() + attacher, ok := c.attachers[aKey] + delete(c.attachers, aKey) + c.mu.Unlock() + + if !ok { + return fmt.Errorf("could not find network attachment for container %s to network %s", containerID, target) + } + + close(attacher.detachWaitCh) + return nil +} + +// CreateNetwork creates a new cluster managed network. +func (c *Cluster) CreateNetwork(s apitypes.NetworkCreateRequest) (string, error) { + if runconfig.IsPreDefinedNetwork(s.Name) { + err := fmt.Errorf("%s is a pre-defined network and cannot be created", s.Name) + return "", apierrors.NewRequestForbiddenError(err) + } + + var resp *swarmapi.CreateNetworkResponse + if err := c.lockedManagerAction(func(ctx context.Context, state nodeState) error { + networkSpec := convert.BasicNetworkCreateToGRPC(s) + r, err := state.controlClient.CreateNetwork(ctx, &swarmapi.CreateNetworkRequest{Spec: &networkSpec}) + if err != nil { + return err + } + resp = r + return nil + }); err != nil { + return "", err + } + + return resp.Network.ID, nil +} + +// RemoveNetwork removes a cluster network. +func (c *Cluster) RemoveNetwork(input string) error { + return c.lockedManagerAction(func(ctx context.Context, state nodeState) error { + network, err := getNetwork(ctx, state.controlClient, input) + if err != nil { + return err + } + + _, err = state.controlClient.RemoveNetwork(ctx, &swarmapi.RemoveNetworkRequest{NetworkID: network.ID}) + return err + }) +} + +func (c *Cluster) populateNetworkID(ctx context.Context, client swarmapi.ControlClient, s *types.ServiceSpec) error { + // Always prefer NetworkAttachmentConfigs from TaskTemplate + // but fallback to service spec for backward compatibility + networks := s.TaskTemplate.Networks + if len(networks) == 0 { + networks = s.Networks + } + for i, n := range networks { + apiNetwork, err := getNetwork(ctx, client, n.Target) + if err != nil { + ln, _ := c.config.Backend.FindNetwork(n.Target) + if ln != nil && runconfig.IsPreDefinedNetwork(ln.Name()) { + // Need to retrieve the corresponding predefined swarm network + // and use its id for the request. + apiNetwork, err = getNetwork(ctx, client, ln.Name()) + if err != nil { + err = fmt.Errorf("could not find the corresponding predefined swarm network: %v", err) + return apierrors.NewRequestNotFoundError(err) + } + goto setid + } + if ln != nil && !ln.Info().Dynamic() { + err = fmt.Errorf("The network %s cannot be used with services. Only networks scoped to the swarm can be used, such as those created with the overlay driver.", ln.Name()) + return apierrors.NewRequestForbiddenError(err) + } + return err + } + setid: + networks[i].Target = apiNetwork.ID + } + return nil +} diff --git a/vendor/github.com/moby/moby/daemon/cluster/noderunner.go b/vendor/github.com/moby/moby/daemon/cluster/noderunner.go new file mode 100644 index 000000000..b970e7b21 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/cluster/noderunner.go @@ -0,0 +1,375 @@ +package cluster + +import ( + "fmt" + "path/filepath" + "runtime" + "strings" + "sync" + "time" + + "github.com/Sirupsen/logrus" + types "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/daemon/cluster/executor/container" + lncluster "github.com/docker/libnetwork/cluster" + swarmapi "github.com/docker/swarmkit/api" + swarmnode "github.com/docker/swarmkit/node" + "github.com/pkg/errors" + "golang.org/x/net/context" + "google.golang.org/grpc" +) + +// nodeRunner implements a manager for continuously running swarmkit node, restarting them with backoff delays if needed. +type nodeRunner struct { + nodeState + mu sync.RWMutex + done chan struct{} // closed when swarmNode exits + ready chan struct{} // closed when swarmNode becomes active + reconnectDelay time.Duration + config nodeStartConfig + + repeatedRun bool + cancelReconnect func() + stopping bool + cluster *Cluster // only for accessing config helpers, never call any methods. TODO: change to config struct +} + +// nodeStartConfig holds configuration needed to start a new node. Exported +// fields of this structure are saved to disk in json. Unexported fields +// contain data that shouldn't be persisted between daemon reloads. +type nodeStartConfig struct { + // LocalAddr is this machine's local IP or hostname, if specified. + LocalAddr string + // RemoteAddr is the address that was given to "swarm join". It is used + // to find LocalAddr if necessary. + RemoteAddr string + // ListenAddr is the address we bind to, including a port. + ListenAddr string + // AdvertiseAddr is the address other nodes should connect to, + // including a port. + AdvertiseAddr string + // DataPathAddr is the address that has to be used for the data path + DataPathAddr string + // JoinInProgress is set to true if a join operation has started, but + // not completed yet. + JoinInProgress bool + + joinAddr string + forceNewCluster bool + joinToken string + lockKey []byte + autolock bool + availability types.NodeAvailability +} + +func (n *nodeRunner) Ready() chan error { + c := make(chan error, 1) + n.mu.RLock() + ready, done := n.ready, n.done + n.mu.RUnlock() + go func() { + select { + case <-ready: + case <-done: + } + select { + case <-ready: + default: + n.mu.RLock() + c <- n.err + n.mu.RUnlock() + } + close(c) + }() + return c +} + +func (n *nodeRunner) Start(conf nodeStartConfig) error { + n.mu.Lock() + defer n.mu.Unlock() + + n.reconnectDelay = initialReconnectDelay + + return n.start(conf) +} + +func (n *nodeRunner) start(conf nodeStartConfig) error { + var control string + if runtime.GOOS == "windows" { + control = `\\.\pipe\` + controlSocket + } else { + control = filepath.Join(n.cluster.runtimeRoot, controlSocket) + } + + joinAddr := conf.joinAddr + if joinAddr == "" && conf.JoinInProgress { + // We must have been restarted while trying to join a cluster. + // Continue trying to join instead of forming our own cluster. + joinAddr = conf.RemoteAddr + } + + // Hostname is not set here. Instead, it is obtained from + // the node description that is reported periodically + swarmnodeConfig := swarmnode.Config{ + ForceNewCluster: conf.forceNewCluster, + ListenControlAPI: control, + ListenRemoteAPI: conf.ListenAddr, + AdvertiseRemoteAPI: conf.AdvertiseAddr, + JoinAddr: joinAddr, + StateDir: n.cluster.root, + JoinToken: conf.joinToken, + Executor: container.NewExecutor(n.cluster.config.Backend, n.cluster.config.PluginBackend), + HeartbeatTick: 1, + ElectionTick: 3, + UnlockKey: conf.lockKey, + AutoLockManagers: conf.autolock, + PluginGetter: n.cluster.config.Backend.PluginGetter(), + } + if conf.availability != "" { + avail, ok := swarmapi.NodeSpec_Availability_value[strings.ToUpper(string(conf.availability))] + if !ok { + return fmt.Errorf("invalid Availability: %q", conf.availability) + } + swarmnodeConfig.Availability = swarmapi.NodeSpec_Availability(avail) + } + node, err := swarmnode.New(&swarmnodeConfig) + if err != nil { + return err + } + if err := node.Start(context.Background()); err != nil { + return err + } + + n.done = make(chan struct{}) + n.ready = make(chan struct{}) + n.swarmNode = node + if conf.joinAddr != "" { + conf.JoinInProgress = true + } + n.config = conf + savePersistentState(n.cluster.root, conf) + + ctx, cancel := context.WithCancel(context.Background()) + + go func() { + n.handleNodeExit(node) + cancel() + }() + + go n.handleReadyEvent(ctx, node, n.ready) + go n.handleControlSocketChange(ctx, node) + + return nil +} + +func (n *nodeRunner) handleControlSocketChange(ctx context.Context, node *swarmnode.Node) { + for conn := range node.ListenControlSocket(ctx) { + n.mu.Lock() + if n.grpcConn != conn { + if conn == nil { + n.controlClient = nil + n.logsClient = nil + } else { + n.controlClient = swarmapi.NewControlClient(conn) + n.logsClient = swarmapi.NewLogsClient(conn) + // push store changes to daemon + go n.watchClusterEvents(ctx, conn) + } + } + n.grpcConn = conn + n.mu.Unlock() + n.cluster.SendClusterEvent(lncluster.EventSocketChange) + } +} + +func (n *nodeRunner) watchClusterEvents(ctx context.Context, conn *grpc.ClientConn) { + client := swarmapi.NewWatchClient(conn) + watch, err := client.Watch(ctx, &swarmapi.WatchRequest{ + Entries: []*swarmapi.WatchRequest_WatchEntry{ + { + Kind: "node", + Action: swarmapi.WatchActionKindCreate | swarmapi.WatchActionKindUpdate | swarmapi.WatchActionKindRemove, + }, + { + Kind: "service", + Action: swarmapi.WatchActionKindCreate | swarmapi.WatchActionKindUpdate | swarmapi.WatchActionKindRemove, + }, + { + Kind: "network", + Action: swarmapi.WatchActionKindCreate | swarmapi.WatchActionKindUpdate | swarmapi.WatchActionKindRemove, + }, + { + Kind: "secret", + Action: swarmapi.WatchActionKindCreate | swarmapi.WatchActionKindUpdate | swarmapi.WatchActionKindRemove, + }, + { + Kind: "config", + Action: swarmapi.WatchActionKindCreate | swarmapi.WatchActionKindUpdate | swarmapi.WatchActionKindRemove, + }, + }, + IncludeOldObject: true, + }) + if err != nil { + logrus.WithError(err).Error("failed to watch cluster store") + return + } + for { + msg, err := watch.Recv() + if err != nil { + // store watch is broken + logrus.WithError(err).Error("failed to receive changes from store watch API") + return + } + select { + case <-ctx.Done(): + return + case n.cluster.watchStream <- msg: + } + } +} + +func (n *nodeRunner) handleReadyEvent(ctx context.Context, node *swarmnode.Node, ready chan struct{}) { + select { + case <-node.Ready(): + n.mu.Lock() + n.err = nil + if n.config.JoinInProgress { + n.config.JoinInProgress = false + savePersistentState(n.cluster.root, n.config) + } + n.mu.Unlock() + close(ready) + case <-ctx.Done(): + } + n.cluster.SendClusterEvent(lncluster.EventNodeReady) +} + +func (n *nodeRunner) handleNodeExit(node *swarmnode.Node) { + err := detectLockedError(node.Err(context.Background())) + if err != nil { + logrus.Errorf("cluster exited with error: %v", err) + } + n.mu.Lock() + n.swarmNode = nil + n.err = err + close(n.done) + select { + case <-n.ready: + n.enableReconnectWatcher() + default: + if n.repeatedRun { + n.enableReconnectWatcher() + } + } + n.repeatedRun = true + n.mu.Unlock() +} + +// Stop stops the current swarm node if it is running. +func (n *nodeRunner) Stop() error { + n.mu.Lock() + if n.cancelReconnect != nil { // between restarts + n.cancelReconnect() + n.cancelReconnect = nil + } + if n.swarmNode == nil { + n.mu.Unlock() + return nil + } + n.stopping = true + ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second) + defer cancel() + n.mu.Unlock() + if err := n.swarmNode.Stop(ctx); err != nil && !strings.Contains(err.Error(), "context canceled") { + return err + } + n.cluster.SendClusterEvent(lncluster.EventNodeLeave) + <-n.done + return nil +} + +func (n *nodeRunner) State() nodeState { + if n == nil { + return nodeState{status: types.LocalNodeStateInactive} + } + n.mu.RLock() + defer n.mu.RUnlock() + + ns := n.nodeState + + if ns.err != nil || n.cancelReconnect != nil { + if errors.Cause(ns.err) == errSwarmLocked { + ns.status = types.LocalNodeStateLocked + } else { + ns.status = types.LocalNodeStateError + } + } else { + select { + case <-n.ready: + ns.status = types.LocalNodeStateActive + default: + ns.status = types.LocalNodeStatePending + } + } + + return ns +} + +func (n *nodeRunner) enableReconnectWatcher() { + if n.stopping { + return + } + n.reconnectDelay *= 2 + if n.reconnectDelay > maxReconnectDelay { + n.reconnectDelay = maxReconnectDelay + } + logrus.Warnf("Restarting swarm in %.2f seconds", n.reconnectDelay.Seconds()) + delayCtx, cancel := context.WithTimeout(context.Background(), n.reconnectDelay) + n.cancelReconnect = cancel + + go func() { + <-delayCtx.Done() + if delayCtx.Err() != context.DeadlineExceeded { + return + } + n.mu.Lock() + defer n.mu.Unlock() + if n.stopping { + return + } + + if err := n.start(n.config); err != nil { + n.err = err + } + }() +} + +// nodeState represents information about the current state of the cluster and +// provides access to the grpc clients. +type nodeState struct { + swarmNode *swarmnode.Node + grpcConn *grpc.ClientConn + controlClient swarmapi.ControlClient + logsClient swarmapi.LogsClient + status types.LocalNodeState + actualLocalAddr string + err error +} + +// IsActiveManager returns true if node is a manager ready to accept control requests. It is safe to access the client properties if this returns true. +func (ns nodeState) IsActiveManager() bool { + return ns.controlClient != nil +} + +// IsManager returns true if node is a manager. +func (ns nodeState) IsManager() bool { + return ns.swarmNode != nil && ns.swarmNode.Manager() != nil +} + +// NodeID returns node's ID or empty string if node is inactive. +func (ns nodeState) NodeID() string { + if ns.swarmNode != nil { + return ns.swarmNode.NodeID() + } + return "" +} diff --git a/vendor/github.com/moby/moby/daemon/cluster/nodes.go b/vendor/github.com/moby/moby/daemon/cluster/nodes.go new file mode 100644 index 000000000..839c8f78e --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/cluster/nodes.go @@ -0,0 +1,104 @@ +package cluster + +import ( + apierrors "github.com/docker/docker/api/errors" + apitypes "github.com/docker/docker/api/types" + types "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/daemon/cluster/convert" + swarmapi "github.com/docker/swarmkit/api" + "golang.org/x/net/context" +) + +// GetNodes returns a list of all nodes known to a cluster. +func (c *Cluster) GetNodes(options apitypes.NodeListOptions) ([]types.Node, error) { + c.mu.RLock() + defer c.mu.RUnlock() + + state := c.currentNodeState() + if !state.IsActiveManager() { + return nil, c.errNoManager(state) + } + + filters, err := newListNodesFilters(options.Filters) + if err != nil { + return nil, err + } + + ctx, cancel := c.getRequestContext() + defer cancel() + + r, err := state.controlClient.ListNodes( + ctx, + &swarmapi.ListNodesRequest{Filters: filters}) + if err != nil { + return nil, err + } + + nodes := make([]types.Node, 0, len(r.Nodes)) + + for _, node := range r.Nodes { + nodes = append(nodes, convert.NodeFromGRPC(*node)) + } + return nodes, nil +} + +// GetNode returns a node based on an ID. +func (c *Cluster) GetNode(input string) (types.Node, error) { + var node *swarmapi.Node + + if err := c.lockedManagerAction(func(ctx context.Context, state nodeState) error { + n, err := getNode(ctx, state.controlClient, input) + if err != nil { + return err + } + node = n + return nil + }); err != nil { + return types.Node{}, err + } + + return convert.NodeFromGRPC(*node), nil +} + +// UpdateNode updates existing nodes properties. +func (c *Cluster) UpdateNode(input string, version uint64, spec types.NodeSpec) error { + return c.lockedManagerAction(func(ctx context.Context, state nodeState) error { + nodeSpec, err := convert.NodeSpecToGRPC(spec) + if err != nil { + return apierrors.NewBadRequestError(err) + } + + ctx, cancel := c.getRequestContext() + defer cancel() + + currentNode, err := getNode(ctx, state.controlClient, input) + if err != nil { + return err + } + + _, err = state.controlClient.UpdateNode( + ctx, + &swarmapi.UpdateNodeRequest{ + NodeID: currentNode.ID, + Spec: &nodeSpec, + NodeVersion: &swarmapi.Version{ + Index: version, + }, + }, + ) + return err + }) +} + +// RemoveNode removes a node from a cluster +func (c *Cluster) RemoveNode(input string, force bool) error { + return c.lockedManagerAction(func(ctx context.Context, state nodeState) error { + node, err := getNode(ctx, state.controlClient, input) + if err != nil { + return err + } + + _, err = state.controlClient.RemoveNode(ctx, &swarmapi.RemoveNodeRequest{NodeID: node.ID, Force: force}) + return err + }) +} diff --git a/vendor/github.com/moby/moby/daemon/cluster/provider/network.go b/vendor/github.com/moby/moby/daemon/cluster/provider/network.go new file mode 100644 index 000000000..f4c72ae13 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/cluster/provider/network.go @@ -0,0 +1,37 @@ +package provider + +import "github.com/docker/docker/api/types" + +// NetworkCreateRequest is a request when creating a network. +type NetworkCreateRequest struct { + ID string + types.NetworkCreateRequest +} + +// NetworkCreateResponse is a response when creating a network. +type NetworkCreateResponse struct { + ID string `json:"Id"` +} + +// VirtualAddress represents a virtual address. +type VirtualAddress struct { + IPv4 string + IPv6 string +} + +// PortConfig represents a port configuration. +type PortConfig struct { + Name string + Protocol int32 + TargetPort uint32 + PublishedPort uint32 +} + +// ServiceConfig represents a service configuration. +type ServiceConfig struct { + ID string + Name string + Aliases map[string][]string + VirtualAddresses map[string]*VirtualAddress + ExposedPorts []*PortConfig +} diff --git a/vendor/github.com/moby/moby/daemon/cluster/secrets.go b/vendor/github.com/moby/moby/daemon/cluster/secrets.go new file mode 100644 index 000000000..3947286cb --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/cluster/secrets.go @@ -0,0 +1,117 @@ +package cluster + +import ( + apitypes "github.com/docker/docker/api/types" + types "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/daemon/cluster/convert" + swarmapi "github.com/docker/swarmkit/api" + "golang.org/x/net/context" +) + +// GetSecret returns a secret from a managed swarm cluster +func (c *Cluster) GetSecret(input string) (types.Secret, error) { + var secret *swarmapi.Secret + + if err := c.lockedManagerAction(func(ctx context.Context, state nodeState) error { + s, err := getSecret(ctx, state.controlClient, input) + if err != nil { + return err + } + secret = s + return nil + }); err != nil { + return types.Secret{}, err + } + return convert.SecretFromGRPC(secret), nil +} + +// GetSecrets returns all secrets of a managed swarm cluster. +func (c *Cluster) GetSecrets(options apitypes.SecretListOptions) ([]types.Secret, error) { + c.mu.RLock() + defer c.mu.RUnlock() + + state := c.currentNodeState() + if !state.IsActiveManager() { + return nil, c.errNoManager(state) + } + + filters, err := newListSecretsFilters(options.Filters) + if err != nil { + return nil, err + } + ctx, cancel := c.getRequestContext() + defer cancel() + + r, err := state.controlClient.ListSecrets(ctx, + &swarmapi.ListSecretsRequest{Filters: filters}) + if err != nil { + return nil, err + } + + secrets := make([]types.Secret, 0, len(r.Secrets)) + + for _, secret := range r.Secrets { + secrets = append(secrets, convert.SecretFromGRPC(secret)) + } + + return secrets, nil +} + +// CreateSecret creates a new secret in a managed swarm cluster. +func (c *Cluster) CreateSecret(s types.SecretSpec) (string, error) { + var resp *swarmapi.CreateSecretResponse + if err := c.lockedManagerAction(func(ctx context.Context, state nodeState) error { + secretSpec := convert.SecretSpecToGRPC(s) + + r, err := state.controlClient.CreateSecret(ctx, + &swarmapi.CreateSecretRequest{Spec: &secretSpec}) + if err != nil { + return err + } + resp = r + return nil + }); err != nil { + return "", err + } + return resp.Secret.ID, nil +} + +// RemoveSecret removes a secret from a managed swarm cluster. +func (c *Cluster) RemoveSecret(input string) error { + return c.lockedManagerAction(func(ctx context.Context, state nodeState) error { + secret, err := getSecret(ctx, state.controlClient, input) + if err != nil { + return err + } + + req := &swarmapi.RemoveSecretRequest{ + SecretID: secret.ID, + } + + _, err = state.controlClient.RemoveSecret(ctx, req) + return err + }) +} + +// UpdateSecret updates a secret in a managed swarm cluster. +// Note: this is not exposed to the CLI but is available from the API only +func (c *Cluster) UpdateSecret(input string, version uint64, spec types.SecretSpec) error { + return c.lockedManagerAction(func(ctx context.Context, state nodeState) error { + secret, err := getSecret(ctx, state.controlClient, input) + if err != nil { + return err + } + + secretSpec := convert.SecretSpecToGRPC(spec) + + _, err = state.controlClient.UpdateSecret(ctx, + &swarmapi.UpdateSecretRequest{ + SecretID: secret.ID, + SecretVersion: &swarmapi.Version{ + Index: version, + }, + Spec: &secretSpec, + }) + return err + }) +} diff --git a/vendor/github.com/moby/moby/daemon/cluster/services.go b/vendor/github.com/moby/moby/daemon/cluster/services.go new file mode 100644 index 000000000..4f9980206 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/cluster/services.go @@ -0,0 +1,591 @@ +package cluster + +import ( + "encoding/base64" + "encoding/json" + "fmt" + "io" + "os" + "strconv" + "strings" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/reference" + apierrors "github.com/docker/docker/api/errors" + apitypes "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/backend" + types "github.com/docker/docker/api/types/swarm" + timetypes "github.com/docker/docker/api/types/time" + "github.com/docker/docker/daemon/cluster/convert" + runconfigopts "github.com/docker/docker/runconfig/opts" + swarmapi "github.com/docker/swarmkit/api" + gogotypes "github.com/gogo/protobuf/types" + "github.com/pkg/errors" + "golang.org/x/net/context" +) + +// GetServices returns all services of a managed swarm cluster. +func (c *Cluster) GetServices(options apitypes.ServiceListOptions) ([]types.Service, error) { + c.mu.RLock() + defer c.mu.RUnlock() + + state := c.currentNodeState() + if !state.IsActiveManager() { + return nil, c.errNoManager(state) + } + + // We move the accepted filter check here as "mode" filter + // is processed in the daemon, not in SwarmKit. So it might + // be good to have accepted file check in the same file as + // the filter processing (in the for loop below). + accepted := map[string]bool{ + "name": true, + "id": true, + "label": true, + "mode": true, + "runtime": true, + } + if err := options.Filters.Validate(accepted); err != nil { + return nil, err + } + + if len(options.Filters.Get("runtime")) == 0 { + // Default to using the container runtime filter + options.Filters.Add("runtime", string(types.RuntimeContainer)) + } + + filters := &swarmapi.ListServicesRequest_Filters{ + NamePrefixes: options.Filters.Get("name"), + IDPrefixes: options.Filters.Get("id"), + Labels: runconfigopts.ConvertKVStringsToMap(options.Filters.Get("label")), + Runtimes: options.Filters.Get("runtime"), + } + + ctx, cancel := c.getRequestContext() + defer cancel() + + r, err := state.controlClient.ListServices( + ctx, + &swarmapi.ListServicesRequest{Filters: filters}) + if err != nil { + return nil, err + } + + services := make([]types.Service, 0, len(r.Services)) + + for _, service := range r.Services { + if options.Filters.Include("mode") { + var mode string + switch service.Spec.GetMode().(type) { + case *swarmapi.ServiceSpec_Global: + mode = "global" + case *swarmapi.ServiceSpec_Replicated: + mode = "replicated" + } + + if !options.Filters.ExactMatch("mode", mode) { + continue + } + } + svcs, err := convert.ServiceFromGRPC(*service) + if err != nil { + return nil, err + } + services = append(services, svcs) + } + + return services, nil +} + +// GetService returns a service based on an ID or name. +func (c *Cluster) GetService(input string, insertDefaults bool) (types.Service, error) { + var service *swarmapi.Service + if err := c.lockedManagerAction(func(ctx context.Context, state nodeState) error { + s, err := getService(ctx, state.controlClient, input, insertDefaults) + if err != nil { + return err + } + service = s + return nil + }); err != nil { + return types.Service{}, err + } + svc, err := convert.ServiceFromGRPC(*service) + if err != nil { + return types.Service{}, err + } + return svc, nil +} + +// CreateService creates a new service in a managed swarm cluster. +func (c *Cluster) CreateService(s types.ServiceSpec, encodedAuth string, queryRegistry bool) (*apitypes.ServiceCreateResponse, error) { + var resp *apitypes.ServiceCreateResponse + err := c.lockedManagerAction(func(ctx context.Context, state nodeState) error { + err := c.populateNetworkID(ctx, state.controlClient, &s) + if err != nil { + return err + } + + serviceSpec, err := convert.ServiceSpecToGRPC(s) + if err != nil { + return apierrors.NewBadRequestError(err) + } + + resp = &apitypes.ServiceCreateResponse{} + + switch serviceSpec.Task.Runtime.(type) { + // handle other runtimes here + case *swarmapi.TaskSpec_Generic: + switch serviceSpec.Task.GetGeneric().Kind { + case string(types.RuntimePlugin): + if s.TaskTemplate.PluginSpec == nil { + return errors.New("plugin spec must be set") + } + } + + r, err := state.controlClient.CreateService(ctx, &swarmapi.CreateServiceRequest{Spec: &serviceSpec}) + if err != nil { + return err + } + + resp.ID = r.Service.ID + case *swarmapi.TaskSpec_Container: + ctnr := serviceSpec.Task.GetContainer() + if ctnr == nil { + return errors.New("service does not use container tasks") + } + if encodedAuth != "" { + ctnr.PullOptions = &swarmapi.ContainerSpec_PullOptions{RegistryAuth: encodedAuth} + } + + // retrieve auth config from encoded auth + authConfig := &apitypes.AuthConfig{} + if encodedAuth != "" { + authReader := strings.NewReader(encodedAuth) + dec := json.NewDecoder(base64.NewDecoder(base64.URLEncoding, authReader)) + if err := dec.Decode(authConfig); err != nil { + logrus.Warnf("invalid authconfig: %v", err) + } + } + + // pin image by digest for API versions < 1.30 + // TODO(nishanttotla): The check on "DOCKER_SERVICE_PREFER_OFFLINE_IMAGE" + // should be removed in the future. Since integration tests only use the + // latest API version, so this is no longer required. + if os.Getenv("DOCKER_SERVICE_PREFER_OFFLINE_IMAGE") != "1" && queryRegistry { + digestImage, err := c.imageWithDigestString(ctx, ctnr.Image, authConfig) + if err != nil { + logrus.Warnf("unable to pin image %s to digest: %s", ctnr.Image, err.Error()) + // warning in the client response should be concise + resp.Warnings = append(resp.Warnings, digestWarning(ctnr.Image)) + + } else if ctnr.Image != digestImage { + logrus.Debugf("pinning image %s by digest: %s", ctnr.Image, digestImage) + ctnr.Image = digestImage + + } else { + logrus.Debugf("creating service using supplied digest reference %s", ctnr.Image) + + } + + // Replace the context with a fresh one. + // If we timed out while communicating with the + // registry, then "ctx" will already be expired, which + // would cause UpdateService below to fail. Reusing + // "ctx" could make it impossible to create a service + // if the registry is slow or unresponsive. + var cancel func() + ctx, cancel = c.getRequestContext() + defer cancel() + } + + r, err := state.controlClient.CreateService(ctx, &swarmapi.CreateServiceRequest{Spec: &serviceSpec}) + if err != nil { + return err + } + + resp.ID = r.Service.ID + } + return nil + }) + + return resp, err +} + +// UpdateService updates existing service to match new properties. +func (c *Cluster) UpdateService(serviceIDOrName string, version uint64, spec types.ServiceSpec, flags apitypes.ServiceUpdateOptions, queryRegistry bool) (*apitypes.ServiceUpdateResponse, error) { + var resp *apitypes.ServiceUpdateResponse + + err := c.lockedManagerAction(func(ctx context.Context, state nodeState) error { + + err := c.populateNetworkID(ctx, state.controlClient, &spec) + if err != nil { + return err + } + + serviceSpec, err := convert.ServiceSpecToGRPC(spec) + if err != nil { + return apierrors.NewBadRequestError(err) + } + + currentService, err := getService(ctx, state.controlClient, serviceIDOrName, false) + if err != nil { + return err + } + + resp = &apitypes.ServiceUpdateResponse{} + + switch serviceSpec.Task.Runtime.(type) { + case *swarmapi.TaskSpec_Generic: + switch serviceSpec.Task.GetGeneric().Kind { + case string(types.RuntimePlugin): + if spec.TaskTemplate.PluginSpec == nil { + return errors.New("plugin spec must be set") + } + } + case *swarmapi.TaskSpec_Container: + newCtnr := serviceSpec.Task.GetContainer() + if newCtnr == nil { + return errors.New("service does not use container tasks") + } + + encodedAuth := flags.EncodedRegistryAuth + if encodedAuth != "" { + newCtnr.PullOptions = &swarmapi.ContainerSpec_PullOptions{RegistryAuth: encodedAuth} + } else { + // this is needed because if the encodedAuth isn't being updated then we + // shouldn't lose it, and continue to use the one that was already present + var ctnr *swarmapi.ContainerSpec + switch flags.RegistryAuthFrom { + case apitypes.RegistryAuthFromSpec, "": + ctnr = currentService.Spec.Task.GetContainer() + case apitypes.RegistryAuthFromPreviousSpec: + if currentService.PreviousSpec == nil { + return errors.New("service does not have a previous spec") + } + ctnr = currentService.PreviousSpec.Task.GetContainer() + default: + return errors.New("unsupported registryAuthFrom value") + } + if ctnr == nil { + return errors.New("service does not use container tasks") + } + newCtnr.PullOptions = ctnr.PullOptions + // update encodedAuth so it can be used to pin image by digest + if ctnr.PullOptions != nil { + encodedAuth = ctnr.PullOptions.RegistryAuth + } + } + + // retrieve auth config from encoded auth + authConfig := &apitypes.AuthConfig{} + if encodedAuth != "" { + if err := json.NewDecoder(base64.NewDecoder(base64.URLEncoding, strings.NewReader(encodedAuth))).Decode(authConfig); err != nil { + logrus.Warnf("invalid authconfig: %v", err) + } + } + + // pin image by digest for API versions < 1.30 + // TODO(nishanttotla): The check on "DOCKER_SERVICE_PREFER_OFFLINE_IMAGE" + // should be removed in the future. Since integration tests only use the + // latest API version, so this is no longer required. + if os.Getenv("DOCKER_SERVICE_PREFER_OFFLINE_IMAGE") != "1" && queryRegistry { + digestImage, err := c.imageWithDigestString(ctx, newCtnr.Image, authConfig) + if err != nil { + logrus.Warnf("unable to pin image %s to digest: %s", newCtnr.Image, err.Error()) + // warning in the client response should be concise + resp.Warnings = append(resp.Warnings, digestWarning(newCtnr.Image)) + } else if newCtnr.Image != digestImage { + logrus.Debugf("pinning image %s by digest: %s", newCtnr.Image, digestImage) + newCtnr.Image = digestImage + } else { + logrus.Debugf("updating service using supplied digest reference %s", newCtnr.Image) + } + + // Replace the context with a fresh one. + // If we timed out while communicating with the + // registry, then "ctx" will already be expired, which + // would cause UpdateService below to fail. Reusing + // "ctx" could make it impossible to update a service + // if the registry is slow or unresponsive. + var cancel func() + ctx, cancel = c.getRequestContext() + defer cancel() + } + } + + var rollback swarmapi.UpdateServiceRequest_Rollback + switch flags.Rollback { + case "", "none": + rollback = swarmapi.UpdateServiceRequest_NONE + case "previous": + rollback = swarmapi.UpdateServiceRequest_PREVIOUS + default: + return fmt.Errorf("unrecognized rollback option %s", flags.Rollback) + } + + _, err = state.controlClient.UpdateService( + ctx, + &swarmapi.UpdateServiceRequest{ + ServiceID: currentService.ID, + Spec: &serviceSpec, + ServiceVersion: &swarmapi.Version{ + Index: version, + }, + Rollback: rollback, + }, + ) + return err + }) + return resp, err +} + +// RemoveService removes a service from a managed swarm cluster. +func (c *Cluster) RemoveService(input string) error { + return c.lockedManagerAction(func(ctx context.Context, state nodeState) error { + service, err := getService(ctx, state.controlClient, input, false) + if err != nil { + return err + } + + _, err = state.controlClient.RemoveService(ctx, &swarmapi.RemoveServiceRequest{ServiceID: service.ID}) + return err + }) +} + +// ServiceLogs collects service logs and writes them back to `config.OutStream` +func (c *Cluster) ServiceLogs(ctx context.Context, selector *backend.LogSelector, config *apitypes.ContainerLogsOptions) (<-chan *backend.LogMessage, error) { + c.mu.RLock() + defer c.mu.RUnlock() + + state := c.currentNodeState() + if !state.IsActiveManager() { + return nil, c.errNoManager(state) + } + + swarmSelector, err := convertSelector(ctx, state.controlClient, selector) + if err != nil { + return nil, errors.Wrap(err, "error making log selector") + } + + // set the streams we'll use + stdStreams := []swarmapi.LogStream{} + if config.ShowStdout { + stdStreams = append(stdStreams, swarmapi.LogStreamStdout) + } + if config.ShowStderr { + stdStreams = append(stdStreams, swarmapi.LogStreamStderr) + } + + // Get tail value squared away - the number of previous log lines we look at + var tail int64 + // in ContainerLogs, if the tail value is ANYTHING non-integer, we just set + // it to -1 (all). i don't agree with that, but i also think no tail value + // should be legitimate. if you don't pass tail, we assume you want "all" + if config.Tail == "all" || config.Tail == "" { + // tail of 0 means send all logs on the swarmkit side + tail = 0 + } else { + t, err := strconv.Atoi(config.Tail) + if err != nil { + return nil, errors.New("tail value must be a positive integer or \"all\"") + } + if t < 0 { + return nil, errors.New("negative tail values not supported") + } + // we actually use negative tail in swarmkit to represent messages + // backwards starting from the beginning. also, -1 means no logs. so, + // basically, for api compat with docker container logs, add one and + // flip the sign. we error above if you try to negative tail, which + // isn't supported by docker (and would error deeper in the stack + // anyway) + // + // See the logs protobuf for more information + tail = int64(-(t + 1)) + } + + // get the since value - the time in the past we're looking at logs starting from + var sinceProto *gogotypes.Timestamp + if config.Since != "" { + s, n, err := timetypes.ParseTimestamps(config.Since, 0) + if err != nil { + return nil, errors.Wrap(err, "could not parse since timestamp") + } + since := time.Unix(s, n) + sinceProto, err = gogotypes.TimestampProto(since) + if err != nil { + return nil, errors.Wrap(err, "could not parse timestamp to proto") + } + } + + stream, err := state.logsClient.SubscribeLogs(ctx, &swarmapi.SubscribeLogsRequest{ + Selector: swarmSelector, + Options: &swarmapi.LogSubscriptionOptions{ + Follow: config.Follow, + Streams: stdStreams, + Tail: tail, + Since: sinceProto, + }, + }) + if err != nil { + return nil, err + } + + messageChan := make(chan *backend.LogMessage, 1) + go func() { + defer close(messageChan) + for { + // Check the context before doing anything. + select { + case <-ctx.Done(): + return + default: + } + subscribeMsg, err := stream.Recv() + if err == io.EOF { + return + } + // if we're not io.EOF, push the message in and return + if err != nil { + select { + case <-ctx.Done(): + case messageChan <- &backend.LogMessage{Err: err}: + } + return + } + + for _, msg := range subscribeMsg.Messages { + // make a new message + m := new(backend.LogMessage) + m.Attrs = make([]backend.LogAttr, 0, len(msg.Attrs)+3) + // add the timestamp, adding the error if it fails + m.Timestamp, err = gogotypes.TimestampFromProto(msg.Timestamp) + if err != nil { + m.Err = err + } + + nodeKey := contextPrefix + ".node.id" + serviceKey := contextPrefix + ".service.id" + taskKey := contextPrefix + ".task.id" + + // copy over all of the details + for _, d := range msg.Attrs { + switch d.Key { + case nodeKey, serviceKey, taskKey: + // we have the final say over context details (in case there + // is a conflict (if the user added a detail with a context's + // key for some reason)) + default: + m.Attrs = append(m.Attrs, backend.LogAttr{Key: d.Key, Value: d.Value}) + } + } + m.Attrs = append(m.Attrs, + backend.LogAttr{Key: nodeKey, Value: msg.Context.NodeID}, + backend.LogAttr{Key: serviceKey, Value: msg.Context.ServiceID}, + backend.LogAttr{Key: taskKey, Value: msg.Context.TaskID}, + ) + + switch msg.Stream { + case swarmapi.LogStreamStdout: + m.Source = "stdout" + case swarmapi.LogStreamStderr: + m.Source = "stderr" + } + m.Line = msg.Data + + // there could be a case where the reader stops accepting + // messages and the context is canceled. we need to check that + // here, or otherwise we risk blocking forever on the message + // send. + select { + case <-ctx.Done(): + return + case messageChan <- m: + } + } + } + }() + return messageChan, nil +} + +// convertSelector takes a backend.LogSelector, which contains raw names that +// may or may not be valid, and converts them to an api.LogSelector proto. It +// returns an error if something fails +func convertSelector(ctx context.Context, cc swarmapi.ControlClient, selector *backend.LogSelector) (*swarmapi.LogSelector, error) { + // don't rely on swarmkit to resolve IDs, do it ourselves + swarmSelector := &swarmapi.LogSelector{} + for _, s := range selector.Services { + service, err := getService(ctx, cc, s, false) + if err != nil { + return nil, err + } + c := service.Spec.Task.GetContainer() + if c == nil { + return nil, errors.New("logs only supported on container tasks") + } + swarmSelector.ServiceIDs = append(swarmSelector.ServiceIDs, service.ID) + } + for _, t := range selector.Tasks { + task, err := getTask(ctx, cc, t) + if err != nil { + return nil, err + } + c := task.Spec.GetContainer() + if c == nil { + return nil, errors.New("logs only supported on container tasks") + } + swarmSelector.TaskIDs = append(swarmSelector.TaskIDs, task.ID) + } + return swarmSelector, nil +} + +// imageWithDigestString takes an image such as name or name:tag +// and returns the image pinned to a digest, such as name@sha256:34234 +func (c *Cluster) imageWithDigestString(ctx context.Context, image string, authConfig *apitypes.AuthConfig) (string, error) { + ref, err := reference.ParseAnyReference(image) + if err != nil { + return "", err + } + namedRef, ok := ref.(reference.Named) + if !ok { + if _, ok := ref.(reference.Digested); ok { + return image, nil + } + return "", errors.Errorf("unknown image reference format: %s", image) + } + // only query registry if not a canonical reference (i.e. with digest) + if _, ok := namedRef.(reference.Canonical); !ok { + namedRef = reference.TagNameOnly(namedRef) + + taggedRef, ok := namedRef.(reference.NamedTagged) + if !ok { + return "", errors.Errorf("image reference not tagged: %s", image) + } + + repo, _, err := c.config.Backend.GetRepository(ctx, taggedRef, authConfig) + if err != nil { + return "", err + } + dscrptr, err := repo.Tags(ctx).Get(ctx, taggedRef.Tag()) + if err != nil { + return "", err + } + + namedDigestedRef, err := reference.WithDigest(taggedRef, dscrptr.Digest) + if err != nil { + return "", err + } + // return familiar form until interface updated to return type + return reference.FamiliarString(namedDigestedRef), nil + } + // reference already contains a digest, so just return it + return reference.FamiliarString(ref), nil +} + +// digestWarning constructs a formatted warning string +// using the image name that could not be pinned by digest. The +// formatting is hardcoded, but could me made smarter in the future +func digestWarning(image string) string { + return fmt.Sprintf("image %s could not be accessed on a registry to record\nits digest. Each node will access %s independently,\npossibly leading to different nodes running different\nversions of the image.\n", image, image) +} diff --git a/vendor/github.com/moby/moby/daemon/cluster/swarm.go b/vendor/github.com/moby/moby/daemon/cluster/swarm.go new file mode 100644 index 000000000..ef0596b6c --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/cluster/swarm.go @@ -0,0 +1,549 @@ +package cluster + +import ( + "fmt" + "net" + "strings" + "time" + + "github.com/Sirupsen/logrus" + apierrors "github.com/docker/docker/api/errors" + apitypes "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + types "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/daemon/cluster/convert" + "github.com/docker/docker/opts" + "github.com/docker/docker/pkg/signal" + swarmapi "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/manager/encryption" + swarmnode "github.com/docker/swarmkit/node" + "github.com/pkg/errors" + "golang.org/x/net/context" +) + +// Init initializes new cluster from user provided request. +func (c *Cluster) Init(req types.InitRequest) (string, error) { + c.controlMutex.Lock() + defer c.controlMutex.Unlock() + if c.nr != nil { + if req.ForceNewCluster { + // Take c.mu temporarily to wait for presently running + // API handlers to finish before shutting down the node. + c.mu.Lock() + c.mu.Unlock() + + if err := c.nr.Stop(); err != nil { + return "", err + } + } else { + return "", errSwarmExists + } + } + + if err := validateAndSanitizeInitRequest(&req); err != nil { + return "", apierrors.NewBadRequestError(err) + } + + listenHost, listenPort, err := resolveListenAddr(req.ListenAddr) + if err != nil { + return "", err + } + + advertiseHost, advertisePort, err := c.resolveAdvertiseAddr(req.AdvertiseAddr, listenPort) + if err != nil { + return "", err + } + + dataPathAddr, err := resolveDataPathAddr(req.DataPathAddr) + if err != nil { + return "", err + } + + localAddr := listenHost + + // If the local address is undetermined, the advertise address + // will be used as local address, if it belongs to this system. + // If the advertise address is not local, then we try to find + // a system address to use as local address. If this fails, + // we give up and ask the user to pass the listen address. + if net.ParseIP(localAddr).IsUnspecified() { + advertiseIP := net.ParseIP(advertiseHost) + + found := false + for _, systemIP := range listSystemIPs() { + if systemIP.Equal(advertiseIP) { + localAddr = advertiseIP.String() + found = true + break + } + } + + if !found { + ip, err := c.resolveSystemAddr() + if err != nil { + logrus.Warnf("Could not find a local address: %v", err) + return "", errMustSpecifyListenAddr + } + localAddr = ip.String() + } + } + + nr, err := c.newNodeRunner(nodeStartConfig{ + forceNewCluster: req.ForceNewCluster, + autolock: req.AutoLockManagers, + LocalAddr: localAddr, + ListenAddr: net.JoinHostPort(listenHost, listenPort), + AdvertiseAddr: net.JoinHostPort(advertiseHost, advertisePort), + DataPathAddr: dataPathAddr, + availability: req.Availability, + }) + if err != nil { + return "", err + } + c.mu.Lock() + c.nr = nr + c.mu.Unlock() + + if err := <-nr.Ready(); err != nil { + c.mu.Lock() + c.nr = nil + c.mu.Unlock() + if !req.ForceNewCluster { // if failure on first attempt don't keep state + if err := clearPersistentState(c.root); err != nil { + return "", err + } + } + return "", err + } + state := nr.State() + if state.swarmNode == nil { // should never happen but protect from panic + return "", errors.New("invalid cluster state for spec initialization") + } + if err := initClusterSpec(state.swarmNode, req.Spec); err != nil { + return "", err + } + return state.NodeID(), nil +} + +// Join makes current Cluster part of an existing swarm cluster. +func (c *Cluster) Join(req types.JoinRequest) error { + c.controlMutex.Lock() + defer c.controlMutex.Unlock() + c.mu.Lock() + if c.nr != nil { + c.mu.Unlock() + return errSwarmExists + } + c.mu.Unlock() + + if err := validateAndSanitizeJoinRequest(&req); err != nil { + return apierrors.NewBadRequestError(err) + } + + listenHost, listenPort, err := resolveListenAddr(req.ListenAddr) + if err != nil { + return err + } + + var advertiseAddr string + if req.AdvertiseAddr != "" { + advertiseHost, advertisePort, err := c.resolveAdvertiseAddr(req.AdvertiseAddr, listenPort) + // For joining, we don't need to provide an advertise address, + // since the remote side can detect it. + if err == nil { + advertiseAddr = net.JoinHostPort(advertiseHost, advertisePort) + } + } + + dataPathAddr, err := resolveDataPathAddr(req.DataPathAddr) + if err != nil { + return err + } + + nr, err := c.newNodeRunner(nodeStartConfig{ + RemoteAddr: req.RemoteAddrs[0], + ListenAddr: net.JoinHostPort(listenHost, listenPort), + AdvertiseAddr: advertiseAddr, + DataPathAddr: dataPathAddr, + joinAddr: req.RemoteAddrs[0], + joinToken: req.JoinToken, + availability: req.Availability, + }) + if err != nil { + return err + } + + c.mu.Lock() + c.nr = nr + c.mu.Unlock() + + select { + case <-time.After(swarmConnectTimeout): + return errSwarmJoinTimeoutReached + case err := <-nr.Ready(): + if err != nil { + c.mu.Lock() + c.nr = nil + c.mu.Unlock() + if err := clearPersistentState(c.root); err != nil { + return err + } + } + return err + } +} + +// Inspect retrieves the configuration properties of a managed swarm cluster. +func (c *Cluster) Inspect() (types.Swarm, error) { + var swarm *swarmapi.Cluster + if err := c.lockedManagerAction(func(ctx context.Context, state nodeState) error { + s, err := getSwarm(ctx, state.controlClient) + if err != nil { + return err + } + swarm = s + return nil + }); err != nil { + return types.Swarm{}, err + } + return convert.SwarmFromGRPC(*swarm), nil +} + +// Update updates configuration of a managed swarm cluster. +func (c *Cluster) Update(version uint64, spec types.Spec, flags types.UpdateFlags) error { + return c.lockedManagerAction(func(ctx context.Context, state nodeState) error { + swarm, err := getSwarm(ctx, state.controlClient) + if err != nil { + return err + } + + // In update, client should provide the complete spec of the swarm, including + // Name and Labels. If a field is specified with 0 or nil, then the default value + // will be used to swarmkit. + clusterSpec, err := convert.SwarmSpecToGRPC(spec) + if err != nil { + return apierrors.NewBadRequestError(err) + } + + _, err = state.controlClient.UpdateCluster( + ctx, + &swarmapi.UpdateClusterRequest{ + ClusterID: swarm.ID, + Spec: &clusterSpec, + ClusterVersion: &swarmapi.Version{ + Index: version, + }, + Rotation: swarmapi.KeyRotation{ + WorkerJoinToken: flags.RotateWorkerToken, + ManagerJoinToken: flags.RotateManagerToken, + ManagerUnlockKey: flags.RotateManagerUnlockKey, + }, + }, + ) + return err + }) +} + +// GetUnlockKey returns the unlock key for the swarm. +func (c *Cluster) GetUnlockKey() (string, error) { + var resp *swarmapi.GetUnlockKeyResponse + if err := c.lockedManagerAction(func(ctx context.Context, state nodeState) error { + client := swarmapi.NewCAClient(state.grpcConn) + + r, err := client.GetUnlockKey(ctx, &swarmapi.GetUnlockKeyRequest{}) + if err != nil { + return err + } + resp = r + return nil + }); err != nil { + return "", err + } + if len(resp.UnlockKey) == 0 { + // no key + return "", nil + } + return encryption.HumanReadableKey(resp.UnlockKey), nil +} + +// UnlockSwarm provides a key to decrypt data that is encrypted at rest. +func (c *Cluster) UnlockSwarm(req types.UnlockRequest) error { + c.controlMutex.Lock() + defer c.controlMutex.Unlock() + + c.mu.RLock() + state := c.currentNodeState() + + if !state.IsActiveManager() { + // when manager is not active, + // unless it is locked, otherwise return error. + if err := c.errNoManager(state); err != errSwarmLocked { + c.mu.RUnlock() + return err + } + } else { + // when manager is active, return an error of "not locked" + c.mu.RUnlock() + return errors.New("swarm is not locked") + } + + // only when swarm is locked, code running reaches here + nr := c.nr + c.mu.RUnlock() + + key, err := encryption.ParseHumanReadableKey(req.UnlockKey) + if err != nil { + return err + } + + config := nr.config + config.lockKey = key + if err := nr.Stop(); err != nil { + return err + } + nr, err = c.newNodeRunner(config) + if err != nil { + return err + } + + c.mu.Lock() + c.nr = nr + c.mu.Unlock() + + if err := <-nr.Ready(); err != nil { + if errors.Cause(err) == errSwarmLocked { + return errors.New("swarm could not be unlocked: invalid key provided") + } + return fmt.Errorf("swarm component could not be started: %v", err) + } + return nil +} + +// Leave shuts down Cluster and removes current state. +func (c *Cluster) Leave(force bool) error { + c.controlMutex.Lock() + defer c.controlMutex.Unlock() + + c.mu.Lock() + nr := c.nr + if nr == nil { + c.mu.Unlock() + return errNoSwarm + } + + state := c.currentNodeState() + + c.mu.Unlock() + + if errors.Cause(state.err) == errSwarmLocked && !force { + // leave a locked swarm without --force is not allowed + return errors.New("Swarm is encrypted and locked. Please unlock it first or use `--force` to ignore this message.") + } + + if state.IsManager() && !force { + msg := "You are attempting to leave the swarm on a node that is participating as a manager. " + if state.IsActiveManager() { + active, reachable, unreachable, err := managerStats(state.controlClient, state.NodeID()) + if err == nil { + if active && removingManagerCausesLossOfQuorum(reachable, unreachable) { + if isLastManager(reachable, unreachable) { + msg += "Removing the last manager erases all current state of the swarm. Use `--force` to ignore this message. " + return errors.New(msg) + } + msg += fmt.Sprintf("Removing this node leaves %v managers out of %v. Without a Raft quorum your swarm will be inaccessible. ", reachable-1, reachable+unreachable) + } + } + } else { + msg += "Doing so may lose the consensus of your cluster. " + } + + msg += "The only way to restore a swarm that has lost consensus is to reinitialize it with `--force-new-cluster`. Use `--force` to suppress this message." + return errors.New(msg) + } + // release readers in here + if err := nr.Stop(); err != nil { + logrus.Errorf("failed to shut down cluster node: %v", err) + signal.DumpStacks("") + return err + } + + c.mu.Lock() + c.nr = nil + c.mu.Unlock() + + if nodeID := state.NodeID(); nodeID != "" { + nodeContainers, err := c.listContainerForNode(nodeID) + if err != nil { + return err + } + for _, id := range nodeContainers { + if err := c.config.Backend.ContainerRm(id, &apitypes.ContainerRmConfig{ForceRemove: true}); err != nil { + logrus.Errorf("error removing %v: %v", id, err) + } + } + } + + // todo: cleanup optional? + if err := clearPersistentState(c.root); err != nil { + return err + } + c.config.Backend.DaemonLeavesCluster() + return nil +} + +// Info returns information about the current cluster state. +func (c *Cluster) Info() types.Info { + info := types.Info{ + NodeAddr: c.GetAdvertiseAddress(), + } + c.mu.RLock() + defer c.mu.RUnlock() + + state := c.currentNodeState() + info.LocalNodeState = state.status + if state.err != nil { + info.Error = state.err.Error() + } + + ctx, cancel := c.getRequestContext() + defer cancel() + + if state.IsActiveManager() { + info.ControlAvailable = true + swarm, err := c.Inspect() + if err != nil { + info.Error = err.Error() + } + + info.Cluster = &swarm.ClusterInfo + + if r, err := state.controlClient.ListNodes(ctx, &swarmapi.ListNodesRequest{}); err != nil { + info.Error = err.Error() + } else { + info.Nodes = len(r.Nodes) + for _, n := range r.Nodes { + if n.ManagerStatus != nil { + info.Managers = info.Managers + 1 + } + } + } + } + + if state.swarmNode != nil { + for _, r := range state.swarmNode.Remotes() { + info.RemoteManagers = append(info.RemoteManagers, types.Peer{NodeID: r.NodeID, Addr: r.Addr}) + } + info.NodeID = state.swarmNode.NodeID() + } + + return info +} + +func validateAndSanitizeInitRequest(req *types.InitRequest) error { + var err error + req.ListenAddr, err = validateAddr(req.ListenAddr) + if err != nil { + return fmt.Errorf("invalid ListenAddr %q: %v", req.ListenAddr, err) + } + + if req.Spec.Annotations.Name == "" { + req.Spec.Annotations.Name = "default" + } else if req.Spec.Annotations.Name != "default" { + return errors.New(`swarm spec must be named "default"`) + } + + return nil +} + +func validateAndSanitizeJoinRequest(req *types.JoinRequest) error { + var err error + req.ListenAddr, err = validateAddr(req.ListenAddr) + if err != nil { + return fmt.Errorf("invalid ListenAddr %q: %v", req.ListenAddr, err) + } + if len(req.RemoteAddrs) == 0 { + return errors.New("at least 1 RemoteAddr is required to join") + } + for i := range req.RemoteAddrs { + req.RemoteAddrs[i], err = validateAddr(req.RemoteAddrs[i]) + if err != nil { + return fmt.Errorf("invalid remoteAddr %q: %v", req.RemoteAddrs[i], err) + } + } + return nil +} + +func validateAddr(addr string) (string, error) { + if addr == "" { + return addr, errors.New("invalid empty address") + } + newaddr, err := opts.ParseTCPAddr(addr, defaultAddr) + if err != nil { + return addr, nil + } + return strings.TrimPrefix(newaddr, "tcp://"), nil +} + +func initClusterSpec(node *swarmnode.Node, spec types.Spec) error { + ctx, _ := context.WithTimeout(context.Background(), 5*time.Second) + for conn := range node.ListenControlSocket(ctx) { + if ctx.Err() != nil { + return ctx.Err() + } + if conn != nil { + client := swarmapi.NewControlClient(conn) + var cluster *swarmapi.Cluster + for i := 0; ; i++ { + lcr, err := client.ListClusters(ctx, &swarmapi.ListClustersRequest{}) + if err != nil { + return fmt.Errorf("error on listing clusters: %v", err) + } + if len(lcr.Clusters) == 0 { + if i < 10 { + time.Sleep(200 * time.Millisecond) + continue + } + return errors.New("empty list of clusters was returned") + } + cluster = lcr.Clusters[0] + break + } + // In init, we take the initial default values from swarmkit, and merge + // any non nil or 0 value from spec to GRPC spec. This will leave the + // default value alone. + // Note that this is different from Update(), as in Update() we expect + // user to specify the complete spec of the cluster (as they already know + // the existing one and knows which field to update) + clusterSpec, err := convert.MergeSwarmSpecToGRPC(spec, cluster.Spec) + if err != nil { + return fmt.Errorf("error updating cluster settings: %v", err) + } + _, err = client.UpdateCluster(ctx, &swarmapi.UpdateClusterRequest{ + ClusterID: cluster.ID, + ClusterVersion: &cluster.Meta.Version, + Spec: &clusterSpec, + }) + if err != nil { + return fmt.Errorf("error updating cluster settings: %v", err) + } + return nil + } + } + return ctx.Err() +} + +func (c *Cluster) listContainerForNode(nodeID string) ([]string, error) { + var ids []string + filters := filters.NewArgs() + filters.Add("label", fmt.Sprintf("com.docker.swarm.node.id=%s", nodeID)) + containers, err := c.config.Backend.Containers(&apitypes.ContainerListOptions{ + Filters: filters, + }) + if err != nil { + return []string{}, err + } + for _, c := range containers { + ids = append(ids, c.ID) + } + return ids, nil +} diff --git a/vendor/github.com/moby/moby/daemon/cluster/tasks.go b/vendor/github.com/moby/moby/daemon/cluster/tasks.go new file mode 100644 index 000000000..26706a2fa --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/cluster/tasks.go @@ -0,0 +1,86 @@ +package cluster + +import ( + apitypes "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + types "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/daemon/cluster/convert" + swarmapi "github.com/docker/swarmkit/api" + "golang.org/x/net/context" +) + +// GetTasks returns a list of tasks matching the filter options. +func (c *Cluster) GetTasks(options apitypes.TaskListOptions) ([]types.Task, error) { + var r *swarmapi.ListTasksResponse + + if err := c.lockedManagerAction(func(ctx context.Context, state nodeState) error { + filterTransform := func(filter filters.Args) error { + if filter.Include("service") { + serviceFilters := filter.Get("service") + for _, serviceFilter := range serviceFilters { + service, err := getService(ctx, state.controlClient, serviceFilter, false) + if err != nil { + return err + } + filter.Del("service", serviceFilter) + filter.Add("service", service.ID) + } + } + if filter.Include("node") { + nodeFilters := filter.Get("node") + for _, nodeFilter := range nodeFilters { + node, err := getNode(ctx, state.controlClient, nodeFilter) + if err != nil { + return err + } + filter.Del("node", nodeFilter) + filter.Add("node", node.ID) + } + } + if !filter.Include("runtime") { + // default to only showing container tasks + filter.Add("runtime", "container") + filter.Add("runtime", "") + } + return nil + } + + filters, err := newListTasksFilters(options.Filters, filterTransform) + if err != nil { + return err + } + + r, err = state.controlClient.ListTasks( + ctx, + &swarmapi.ListTasksRequest{Filters: filters}) + return err + }); err != nil { + return nil, err + } + + tasks := make([]types.Task, 0, len(r.Tasks)) + for _, task := range r.Tasks { + t, err := convert.TaskFromGRPC(*task) + if err != nil { + return nil, err + } + tasks = append(tasks, t) + } + return tasks, nil +} + +// GetTask returns a task by an ID. +func (c *Cluster) GetTask(input string) (types.Task, error) { + var task *swarmapi.Task + if err := c.lockedManagerAction(func(ctx context.Context, state nodeState) error { + t, err := getTask(ctx, state.controlClient, input) + if err != nil { + return err + } + task = t + return nil + }); err != nil { + return types.Task{}, err + } + return convert.TaskFromGRPC(*task) +} diff --git a/vendor/github.com/moby/moby/daemon/cluster/utils.go b/vendor/github.com/moby/moby/daemon/cluster/utils.go new file mode 100644 index 000000000..93827961e --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/cluster/utils.go @@ -0,0 +1,63 @@ +package cluster + +import ( + "encoding/json" + "io/ioutil" + "os" + "path/filepath" + + "github.com/docker/docker/pkg/ioutils" +) + +func loadPersistentState(root string) (*nodeStartConfig, error) { + dt, err := ioutil.ReadFile(filepath.Join(root, stateFile)) + if err != nil { + return nil, err + } + // missing certificate means no actual state to restore from + if _, err := os.Stat(filepath.Join(root, "certificates/swarm-node.crt")); err != nil { + if os.IsNotExist(err) { + clearPersistentState(root) + } + return nil, err + } + var st nodeStartConfig + if err := json.Unmarshal(dt, &st); err != nil { + return nil, err + } + return &st, nil +} + +func savePersistentState(root string, config nodeStartConfig) error { + dt, err := json.Marshal(config) + if err != nil { + return err + } + return ioutils.AtomicWriteFile(filepath.Join(root, stateFile), dt, 0600) +} + +func clearPersistentState(root string) error { + // todo: backup this data instead of removing? + // rather than delete the entire swarm directory, delete the contents in order to preserve the inode + // (for example, allowing it to be bind-mounted) + files, err := ioutil.ReadDir(root) + if err != nil { + return err + } + + for _, f := range files { + if err := os.RemoveAll(filepath.Join(root, f.Name())); err != nil { + return err + } + } + + return nil +} + +func removingManagerCausesLossOfQuorum(reachable, unreachable int) bool { + return reachable-2 <= unreachable +} + +func isLastManager(reachable, unreachable int) bool { + return reachable == 1 && unreachable == 0 +} diff --git a/vendor/github.com/moby/moby/daemon/commit.go b/vendor/github.com/moby/moby/daemon/commit.go new file mode 100644 index 000000000..084f48858 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/commit.go @@ -0,0 +1,252 @@ +package daemon + +import ( + "encoding/json" + "io" + "runtime" + "strings" + "time" + + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types/backend" + containertypes "github.com/docker/docker/api/types/container" + "github.com/docker/docker/builder/dockerfile" + "github.com/docker/docker/container" + "github.com/docker/docker/image" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/ioutils" + "github.com/pkg/errors" +) + +// merge merges two Config, the image container configuration (defaults values), +// and the user container configuration, either passed by the API or generated +// by the cli. +// It will mutate the specified user configuration (userConf) with the image +// configuration where the user configuration is incomplete. +func merge(userConf, imageConf *containertypes.Config) error { + if userConf.User == "" { + userConf.User = imageConf.User + } + if len(userConf.ExposedPorts) == 0 { + userConf.ExposedPorts = imageConf.ExposedPorts + } else if imageConf.ExposedPorts != nil { + for port := range imageConf.ExposedPorts { + if _, exists := userConf.ExposedPorts[port]; !exists { + userConf.ExposedPorts[port] = struct{}{} + } + } + } + + if len(userConf.Env) == 0 { + userConf.Env = imageConf.Env + } else { + for _, imageEnv := range imageConf.Env { + found := false + imageEnvKey := strings.Split(imageEnv, "=")[0] + for _, userEnv := range userConf.Env { + userEnvKey := strings.Split(userEnv, "=")[0] + if runtime.GOOS == "windows" { + // Case insensitive environment variables on Windows + imageEnvKey = strings.ToUpper(imageEnvKey) + userEnvKey = strings.ToUpper(userEnvKey) + } + if imageEnvKey == userEnvKey { + found = true + break + } + } + if !found { + userConf.Env = append(userConf.Env, imageEnv) + } + } + } + + if userConf.Labels == nil { + userConf.Labels = map[string]string{} + } + for l, v := range imageConf.Labels { + if _, ok := userConf.Labels[l]; !ok { + userConf.Labels[l] = v + } + } + + if len(userConf.Entrypoint) == 0 { + if len(userConf.Cmd) == 0 { + userConf.Cmd = imageConf.Cmd + userConf.ArgsEscaped = imageConf.ArgsEscaped + } + + if userConf.Entrypoint == nil { + userConf.Entrypoint = imageConf.Entrypoint + } + } + if imageConf.Healthcheck != nil { + if userConf.Healthcheck == nil { + userConf.Healthcheck = imageConf.Healthcheck + } else { + if len(userConf.Healthcheck.Test) == 0 { + userConf.Healthcheck.Test = imageConf.Healthcheck.Test + } + if userConf.Healthcheck.Interval == 0 { + userConf.Healthcheck.Interval = imageConf.Healthcheck.Interval + } + if userConf.Healthcheck.Timeout == 0 { + userConf.Healthcheck.Timeout = imageConf.Healthcheck.Timeout + } + if userConf.Healthcheck.StartPeriod == 0 { + userConf.Healthcheck.StartPeriod = imageConf.Healthcheck.StartPeriod + } + if userConf.Healthcheck.Retries == 0 { + userConf.Healthcheck.Retries = imageConf.Healthcheck.Retries + } + } + } + + if userConf.WorkingDir == "" { + userConf.WorkingDir = imageConf.WorkingDir + } + if len(userConf.Volumes) == 0 { + userConf.Volumes = imageConf.Volumes + } else { + for k, v := range imageConf.Volumes { + userConf.Volumes[k] = v + } + } + + if userConf.StopSignal == "" { + userConf.StopSignal = imageConf.StopSignal + } + return nil +} + +// Commit creates a new filesystem image from the current state of a container. +// The image can optionally be tagged into a repository. +func (daemon *Daemon) Commit(name string, c *backend.ContainerCommitConfig) (string, error) { + start := time.Now() + container, err := daemon.GetContainer(name) + if err != nil { + return "", err + } + + // It is not possible to commit a running container on Windows and on Solaris. + if (runtime.GOOS == "windows" || runtime.GOOS == "solaris") && container.IsRunning() { + return "", errors.Errorf("%+v does not support commit of a running container", runtime.GOOS) + } + + if c.Pause && !container.IsPaused() { + daemon.containerPause(container) + defer daemon.containerUnpause(container) + } + + newConfig, err := dockerfile.BuildFromConfig(c.Config, c.Changes) + if err != nil { + return "", err + } + + if c.MergeConfigs { + if err := merge(newConfig, container.Config); err != nil { + return "", err + } + } + + rwTar, err := daemon.exportContainerRw(container) + if err != nil { + return "", err + } + defer func() { + if rwTar != nil { + rwTar.Close() + } + }() + + var parent *image.Image + if container.ImageID == "" { + parent = new(image.Image) + parent.RootFS = image.NewRootFS() + } else { + parent, err = daemon.stores[container.Platform].imageStore.Get(container.ImageID) + if err != nil { + return "", err + } + } + + l, err := daemon.stores[container.Platform].layerStore.Register(rwTar, parent.RootFS.ChainID(), layer.Platform(container.Platform)) + if err != nil { + return "", err + } + defer layer.ReleaseAndLog(daemon.stores[container.Platform].layerStore, l) + + containerConfig := c.ContainerConfig + if containerConfig == nil { + containerConfig = container.Config + } + cc := image.ChildConfig{ + ContainerID: container.ID, + Author: c.Author, + Comment: c.Comment, + ContainerConfig: containerConfig, + Config: newConfig, + DiffID: l.DiffID(), + } + config, err := json.Marshal(image.NewChildImage(parent, cc, container.Platform)) + if err != nil { + return "", err + } + + id, err := daemon.stores[container.Platform].imageStore.Create(config) + if err != nil { + return "", err + } + + if container.ImageID != "" { + if err := daemon.stores[container.Platform].imageStore.SetParent(id, container.ImageID); err != nil { + return "", err + } + } + + imageRef := "" + if c.Repo != "" { + newTag, err := reference.ParseNormalizedNamed(c.Repo) // todo: should move this to API layer + if err != nil { + return "", err + } + if !reference.IsNameOnly(newTag) { + return "", errors.Errorf("unexpected repository name: %s", c.Repo) + } + if c.Tag != "" { + if newTag, err = reference.WithTag(newTag, c.Tag); err != nil { + return "", err + } + } + if err := daemon.TagImageWithReference(id, container.Platform, newTag); err != nil { + return "", err + } + imageRef = reference.FamiliarString(newTag) + } + + attributes := map[string]string{ + "comment": c.Comment, + "imageID": id.String(), + "imageRef": imageRef, + } + daemon.LogContainerEventWithAttributes(container, "commit", attributes) + containerActions.WithValues("commit").UpdateSince(start) + return id.String(), nil +} + +func (daemon *Daemon) exportContainerRw(container *container.Container) (io.ReadCloser, error) { + if err := daemon.Mount(container); err != nil { + return nil, err + } + + archive, err := container.RWLayer.TarStream() + if err != nil { + daemon.Unmount(container) // logging is already handled in the `Unmount` function + return nil, err + } + return ioutils.NewReadCloserWrapper(archive, func() error { + archive.Close() + return container.RWLayer.Unmount() + }), + nil +} diff --git a/vendor/github.com/moby/moby/daemon/config/config.go b/vendor/github.com/moby/moby/daemon/config/config.go new file mode 100644 index 000000000..86a1356c5 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/config/config.go @@ -0,0 +1,530 @@ +package config + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "reflect" + "runtime" + "strings" + "sync" + + "github.com/Sirupsen/logrus" + daemondiscovery "github.com/docker/docker/daemon/discovery" + "github.com/docker/docker/opts" + "github.com/docker/docker/pkg/authorization" + "github.com/docker/docker/pkg/discovery" + "github.com/docker/docker/registry" + "github.com/imdario/mergo" + "github.com/spf13/pflag" +) + +const ( + // DefaultMaxConcurrentDownloads is the default value for + // maximum number of downloads that + // may take place at a time for each pull. + DefaultMaxConcurrentDownloads = 3 + // DefaultMaxConcurrentUploads is the default value for + // maximum number of uploads that + // may take place at a time for each push. + DefaultMaxConcurrentUploads = 5 + // StockRuntimeName is the reserved name/alias used to represent the + // OCI runtime being shipped with the docker daemon package. + StockRuntimeName = "runc" + // DefaultShmSize is the default value for container's shm size + DefaultShmSize = int64(67108864) + // DefaultNetworkMtu is the default value for network MTU + DefaultNetworkMtu = 1500 + // DisableNetworkBridge is the default value of the option to disable network bridge + DisableNetworkBridge = "none" + // DefaultInitBinary is the name of the default init binary + DefaultInitBinary = "docker-init" +) + +// flatOptions contains configuration keys +// that MUST NOT be parsed as deep structures. +// Use this to differentiate these options +// with others like the ones in CommonTLSOptions. +var flatOptions = map[string]bool{ + "cluster-store-opts": true, + "log-opts": true, + "runtimes": true, + "default-ulimits": true, +} + +// LogConfig represents the default log configuration. +// It includes json tags to deserialize configuration from a file +// using the same names that the flags in the command line use. +type LogConfig struct { + Type string `json:"log-driver,omitempty"` + Config map[string]string `json:"log-opts,omitempty"` +} + +// commonBridgeConfig stores all the platform-common bridge driver specific +// configuration. +type commonBridgeConfig struct { + Iface string `json:"bridge,omitempty"` + FixedCIDR string `json:"fixed-cidr,omitempty"` +} + +// CommonTLSOptions defines TLS configuration for the daemon server. +// It includes json tags to deserialize configuration from a file +// using the same names that the flags in the command line use. +type CommonTLSOptions struct { + CAFile string `json:"tlscacert,omitempty"` + CertFile string `json:"tlscert,omitempty"` + KeyFile string `json:"tlskey,omitempty"` +} + +// CommonConfig defines the configuration of a docker daemon which is +// common across platforms. +// It includes json tags to deserialize configuration from a file +// using the same names that the flags in the command line use. +type CommonConfig struct { + AuthzMiddleware *authorization.Middleware `json:"-"` + AuthorizationPlugins []string `json:"authorization-plugins,omitempty"` // AuthorizationPlugins holds list of authorization plugins + AutoRestart bool `json:"-"` + Context map[string][]string `json:"-"` + DisableBridge bool `json:"-"` + DNS []string `json:"dns,omitempty"` + DNSOptions []string `json:"dns-opts,omitempty"` + DNSSearch []string `json:"dns-search,omitempty"` + ExecOptions []string `json:"exec-opts,omitempty"` + GraphDriver string `json:"storage-driver,omitempty"` + GraphOptions []string `json:"storage-opts,omitempty"` + Labels []string `json:"labels,omitempty"` + Mtu int `json:"mtu,omitempty"` + Pidfile string `json:"pidfile,omitempty"` + RawLogs bool `json:"raw-logs,omitempty"` + RootDeprecated string `json:"graph,omitempty"` + Root string `json:"data-root,omitempty"` + SocketGroup string `json:"group,omitempty"` + CorsHeaders string `json:"api-cors-header,omitempty"` + EnableCors bool `json:"api-enable-cors,omitempty"` + + // TrustKeyPath is used to generate the daemon ID and for signing schema 1 manifests + // when pushing to a registry which does not support schema 2. This field is marked as + // deprecated because schema 1 manifests are deprecated in favor of schema 2 and the + // daemon ID will use a dedicated identifier not shared with exported signatures. + TrustKeyPath string `json:"deprecated-key-path,omitempty"` + + // LiveRestoreEnabled determines whether we should keep containers + // alive upon daemon shutdown/start + LiveRestoreEnabled bool `json:"live-restore,omitempty"` + + // ClusterStore is the storage backend used for the cluster information. It is used by both + // multihost networking (to store networks and endpoints information) and by the node discovery + // mechanism. + ClusterStore string `json:"cluster-store,omitempty"` + + // ClusterOpts is used to pass options to the discovery package for tuning libkv settings, such + // as TLS configuration settings. + ClusterOpts map[string]string `json:"cluster-store-opts,omitempty"` + + // ClusterAdvertise is the network endpoint that the Engine advertises for the purpose of node + // discovery. This should be a 'host:port' combination on which that daemon instance is + // reachable by other hosts. + ClusterAdvertise string `json:"cluster-advertise,omitempty"` + + // MaxConcurrentDownloads is the maximum number of downloads that + // may take place at a time for each pull. + MaxConcurrentDownloads *int `json:"max-concurrent-downloads,omitempty"` + + // MaxConcurrentUploads is the maximum number of uploads that + // may take place at a time for each push. + MaxConcurrentUploads *int `json:"max-concurrent-uploads,omitempty"` + + // ShutdownTimeout is the timeout value (in seconds) the daemon will wait for the container + // to stop when daemon is being shutdown + ShutdownTimeout int `json:"shutdown-timeout,omitempty"` + + Debug bool `json:"debug,omitempty"` + Hosts []string `json:"hosts,omitempty"` + LogLevel string `json:"log-level,omitempty"` + TLS bool `json:"tls,omitempty"` + TLSVerify bool `json:"tlsverify,omitempty"` + + // Embedded structs that allow config + // deserialization without the full struct. + CommonTLSOptions + + // SwarmDefaultAdvertiseAddr is the default host/IP or network interface + // to use if a wildcard address is specified in the ListenAddr value + // given to the /swarm/init endpoint and no advertise address is + // specified. + SwarmDefaultAdvertiseAddr string `json:"swarm-default-advertise-addr"` + MetricsAddress string `json:"metrics-addr"` + + LogConfig + BridgeConfig // bridgeConfig holds bridge network specific configuration. + registry.ServiceOptions + + sync.Mutex + // FIXME(vdemeester) This part is not that clear and is mainly dependent on cli flags + // It should probably be handled outside this package. + ValuesSet map[string]interface{} + + Experimental bool `json:"experimental"` // Experimental indicates whether experimental features should be exposed or not + + // Exposed node Generic Resources + NodeGenericResources string `json:"node-generic-resources,omitempty"` +} + +// IsValueSet returns true if a configuration value +// was explicitly set in the configuration file. +func (conf *Config) IsValueSet(name string) bool { + if conf.ValuesSet == nil { + return false + } + _, ok := conf.ValuesSet[name] + return ok +} + +// New returns a new fully initialized Config struct +func New() *Config { + config := Config{} + config.LogConfig.Config = make(map[string]string) + config.ClusterOpts = make(map[string]string) + + if runtime.GOOS != "linux" { + config.V2Only = true + } + return &config +} + +// ParseClusterAdvertiseSettings parses the specified advertise settings +func ParseClusterAdvertiseSettings(clusterStore, clusterAdvertise string) (string, error) { + if runtime.GOOS == "solaris" && (clusterAdvertise != "" || clusterStore != "") { + return "", errors.New("Cluster Advertise Settings not supported on Solaris") + } + if clusterAdvertise == "" { + return "", daemondiscovery.ErrDiscoveryDisabled + } + if clusterStore == "" { + return "", errors.New("invalid cluster configuration. --cluster-advertise must be accompanied by --cluster-store configuration") + } + + advertise, err := discovery.ParseAdvertise(clusterAdvertise) + if err != nil { + return "", fmt.Errorf("discovery advertise parsing failed (%v)", err) + } + return advertise, nil +} + +// GetConflictFreeLabels validates Labels for conflict +// In swarm the duplicates for labels are removed +// so we only take same values here, no conflict values +// If the key-value is the same we will only take the last label +func GetConflictFreeLabels(labels []string) ([]string, error) { + labelMap := map[string]string{} + for _, label := range labels { + stringSlice := strings.SplitN(label, "=", 2) + if len(stringSlice) > 1 { + // If there is a conflict we will return an error + if v, ok := labelMap[stringSlice[0]]; ok && v != stringSlice[1] { + return nil, fmt.Errorf("conflict labels for %s=%s and %s=%s", stringSlice[0], stringSlice[1], stringSlice[0], v) + } + labelMap[stringSlice[0]] = stringSlice[1] + } + } + + newLabels := []string{} + for k, v := range labelMap { + newLabels = append(newLabels, fmt.Sprintf("%s=%s", k, v)) + } + return newLabels, nil +} + +// Reload reads the configuration in the host and reloads the daemon and server. +func Reload(configFile string, flags *pflag.FlagSet, reload func(*Config)) error { + logrus.Infof("Got signal to reload configuration, reloading from: %s", configFile) + newConfig, err := getConflictFreeConfiguration(configFile, flags) + if err != nil { + return err + } + + if err := Validate(newConfig); err != nil { + return fmt.Errorf("file configuration validation failed (%v)", err) + } + + // Labels of the docker engine used to allow multiple values associated with the same key. + // This is deprecated in 1.13, and, be removed after 3 release cycles. + // The following will check the conflict of labels, and report a warning for deprecation. + // + // TODO: After 3 release cycles (17.12) an error will be returned, and labels will be + // sanitized to consolidate duplicate key-value pairs (config.Labels = newLabels): + // + // newLabels, err := GetConflictFreeLabels(newConfig.Labels) + // if err != nil { + // return err + // } + // newConfig.Labels = newLabels + // + if _, err := GetConflictFreeLabels(newConfig.Labels); err != nil { + logrus.Warnf("Engine labels with duplicate keys and conflicting values have been deprecated: %s", err) + } + + reload(newConfig) + return nil +} + +// boolValue is an interface that boolean value flags implement +// to tell the command line how to make -name equivalent to -name=true. +type boolValue interface { + IsBoolFlag() bool +} + +// MergeDaemonConfigurations reads a configuration file, +// loads the file configuration in an isolated structure, +// and merges the configuration provided from flags on top +// if there are no conflicts. +func MergeDaemonConfigurations(flagsConfig *Config, flags *pflag.FlagSet, configFile string) (*Config, error) { + fileConfig, err := getConflictFreeConfiguration(configFile, flags) + if err != nil { + return nil, err + } + + if err := Validate(fileConfig); err != nil { + return nil, fmt.Errorf("configuration validation from file failed (%v)", err) + } + + // merge flags configuration on top of the file configuration + if err := mergo.Merge(fileConfig, flagsConfig); err != nil { + return nil, err + } + + // We need to validate again once both fileConfig and flagsConfig + // have been merged + if err := Validate(fileConfig); err != nil { + return nil, fmt.Errorf("merged configuration validation from file and command line flags failed (%v)", err) + } + + return fileConfig, nil +} + +// getConflictFreeConfiguration loads the configuration from a JSON file. +// It compares that configuration with the one provided by the flags, +// and returns an error if there are conflicts. +func getConflictFreeConfiguration(configFile string, flags *pflag.FlagSet) (*Config, error) { + b, err := ioutil.ReadFile(configFile) + if err != nil { + return nil, err + } + + var config Config + var reader io.Reader + if flags != nil { + var jsonConfig map[string]interface{} + reader = bytes.NewReader(b) + if err := json.NewDecoder(reader).Decode(&jsonConfig); err != nil { + return nil, err + } + + configSet := configValuesSet(jsonConfig) + + if err := findConfigurationConflicts(configSet, flags); err != nil { + return nil, err + } + + // Override flag values to make sure the values set in the config file with nullable values, like `false`, + // are not overridden by default truthy values from the flags that were not explicitly set. + // See https://github.com/docker/docker/issues/20289 for an example. + // + // TODO: Rewrite configuration logic to avoid same issue with other nullable values, like numbers. + namedOptions := make(map[string]interface{}) + for key, value := range configSet { + f := flags.Lookup(key) + if f == nil { // ignore named flags that don't match + namedOptions[key] = value + continue + } + + if _, ok := f.Value.(boolValue); ok { + f.Value.Set(fmt.Sprintf("%v", value)) + } + } + if len(namedOptions) > 0 { + // set also default for mergeVal flags that are boolValue at the same time. + flags.VisitAll(func(f *pflag.Flag) { + if opt, named := f.Value.(opts.NamedOption); named { + v, set := namedOptions[opt.Name()] + _, boolean := f.Value.(boolValue) + if set && boolean { + f.Value.Set(fmt.Sprintf("%v", v)) + } + } + }) + } + + config.ValuesSet = configSet + } + + reader = bytes.NewReader(b) + if err := json.NewDecoder(reader).Decode(&config); err != nil { + return nil, err + } + + if config.RootDeprecated != "" { + logrus.Warn(`The "graph" config file option is deprecated. Please use "data-root" instead.`) + + if config.Root != "" { + return nil, fmt.Errorf(`cannot specify both "graph" and "data-root" config file options`) + } + + config.Root = config.RootDeprecated + } + + return &config, nil +} + +// configValuesSet returns the configuration values explicitly set in the file. +func configValuesSet(config map[string]interface{}) map[string]interface{} { + flatten := make(map[string]interface{}) + for k, v := range config { + if m, isMap := v.(map[string]interface{}); isMap && !flatOptions[k] { + for km, vm := range m { + flatten[km] = vm + } + continue + } + + flatten[k] = v + } + return flatten +} + +// findConfigurationConflicts iterates over the provided flags searching for +// duplicated configurations and unknown keys. It returns an error with all the conflicts if +// it finds any. +func findConfigurationConflicts(config map[string]interface{}, flags *pflag.FlagSet) error { + // 1. Search keys from the file that we don't recognize as flags. + unknownKeys := make(map[string]interface{}) + for key, value := range config { + if flag := flags.Lookup(key); flag == nil { + unknownKeys[key] = value + } + } + + // 2. Discard values that implement NamedOption. + // Their configuration name differs from their flag name, like `labels` and `label`. + if len(unknownKeys) > 0 { + unknownNamedConflicts := func(f *pflag.Flag) { + if namedOption, ok := f.Value.(opts.NamedOption); ok { + if _, valid := unknownKeys[namedOption.Name()]; valid { + delete(unknownKeys, namedOption.Name()) + } + } + } + flags.VisitAll(unknownNamedConflicts) + } + + if len(unknownKeys) > 0 { + var unknown []string + for key := range unknownKeys { + unknown = append(unknown, key) + } + return fmt.Errorf("the following directives don't match any configuration option: %s", strings.Join(unknown, ", ")) + } + + var conflicts []string + printConflict := func(name string, flagValue, fileValue interface{}) string { + return fmt.Sprintf("%s: (from flag: %v, from file: %v)", name, flagValue, fileValue) + } + + // 3. Search keys that are present as a flag and as a file option. + duplicatedConflicts := func(f *pflag.Flag) { + // search option name in the json configuration payload if the value is a named option + if namedOption, ok := f.Value.(opts.NamedOption); ok { + if optsValue, ok := config[namedOption.Name()]; ok { + conflicts = append(conflicts, printConflict(namedOption.Name(), f.Value.String(), optsValue)) + } + } else { + // search flag name in the json configuration payload + for _, name := range []string{f.Name, f.Shorthand} { + if value, ok := config[name]; ok { + conflicts = append(conflicts, printConflict(name, f.Value.String(), value)) + break + } + } + } + } + + flags.Visit(duplicatedConflicts) + + if len(conflicts) > 0 { + return fmt.Errorf("the following directives are specified both as a flag and in the configuration file: %s", strings.Join(conflicts, ", ")) + } + return nil +} + +// Validate validates some specific configs. +// such as config.DNS, config.Labels, config.DNSSearch, +// as well as config.MaxConcurrentDownloads, config.MaxConcurrentUploads. +func Validate(config *Config) error { + // validate DNS + for _, dns := range config.DNS { + if _, err := opts.ValidateIPAddress(dns); err != nil { + return err + } + } + + // validate DNSSearch + for _, dnsSearch := range config.DNSSearch { + if _, err := opts.ValidateDNSSearch(dnsSearch); err != nil { + return err + } + } + + // validate Labels + for _, label := range config.Labels { + if _, err := opts.ValidateLabel(label); err != nil { + return err + } + } + // validate MaxConcurrentDownloads + if config.MaxConcurrentDownloads != nil && *config.MaxConcurrentDownloads < 0 { + return fmt.Errorf("invalid max concurrent downloads: %d", *config.MaxConcurrentDownloads) + } + // validate MaxConcurrentUploads + if config.MaxConcurrentUploads != nil && *config.MaxConcurrentUploads < 0 { + return fmt.Errorf("invalid max concurrent uploads: %d", *config.MaxConcurrentUploads) + } + + // validate that "default" runtime is not reset + if runtimes := config.GetAllRuntimes(); len(runtimes) > 0 { + if _, ok := runtimes[StockRuntimeName]; ok { + return fmt.Errorf("runtime name '%s' is reserved", StockRuntimeName) + } + } + + if _, err := opts.ParseGenericResources(config.NodeGenericResources); err != nil { + return err + } + + if defaultRuntime := config.GetDefaultRuntimeName(); defaultRuntime != "" && defaultRuntime != StockRuntimeName { + runtimes := config.GetAllRuntimes() + if _, ok := runtimes[defaultRuntime]; !ok { + return fmt.Errorf("specified default runtime '%s' does not exist", defaultRuntime) + } + } + + return nil +} + +// ModifiedDiscoverySettings returns whether the discovery configuration has been modified or not. +func ModifiedDiscoverySettings(config *Config, backendType, advertise string, clusterOpts map[string]string) bool { + if config.ClusterStore != backendType || config.ClusterAdvertise != advertise { + return true + } + + if (config.ClusterOpts == nil && clusterOpts == nil) || + (config.ClusterOpts == nil && len(clusterOpts) == 0) || + (len(config.ClusterOpts) == 0 && clusterOpts == nil) { + return false + } + + return !reflect.DeepEqual(config.ClusterOpts, clusterOpts) +} diff --git a/vendor/github.com/moby/moby/daemon/config/config_common_unix.go b/vendor/github.com/moby/moby/daemon/config/config_common_unix.go new file mode 100644 index 000000000..d11cceba2 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/config/config_common_unix.go @@ -0,0 +1,73 @@ +// +build solaris linux freebsd + +package config + +import ( + "net" + + "github.com/docker/docker/api/types" +) + +// CommonUnixConfig defines configuration of a docker daemon that is +// common across Unix platforms. +type CommonUnixConfig struct { + ExecRoot string `json:"exec-root,omitempty"` + ContainerdAddr string `json:"containerd,omitempty"` + Runtimes map[string]types.Runtime `json:"runtimes,omitempty"` + DefaultRuntime string `json:"default-runtime,omitempty"` + DefaultInitBinary string `json:"default-init,omitempty"` +} + +type commonUnixBridgeConfig struct { + DefaultIP net.IP `json:"ip,omitempty"` + IP string `json:"bip,omitempty"` + DefaultGatewayIPv4 net.IP `json:"default-gateway,omitempty"` + DefaultGatewayIPv6 net.IP `json:"default-gateway-v6,omitempty"` + InterContainerCommunication bool `json:"icc,omitempty"` +} + +// GetRuntime returns the runtime path and arguments for a given +// runtime name +func (conf *Config) GetRuntime(name string) *types.Runtime { + conf.Lock() + defer conf.Unlock() + if rt, ok := conf.Runtimes[name]; ok { + return &rt + } + return nil +} + +// GetDefaultRuntimeName returns the current default runtime +func (conf *Config) GetDefaultRuntimeName() string { + conf.Lock() + rt := conf.DefaultRuntime + conf.Unlock() + + return rt +} + +// GetAllRuntimes returns a copy of the runtimes map +func (conf *Config) GetAllRuntimes() map[string]types.Runtime { + conf.Lock() + rts := conf.Runtimes + conf.Unlock() + return rts +} + +// GetExecRoot returns the user configured Exec-root +func (conf *Config) GetExecRoot() string { + return conf.ExecRoot +} + +// GetInitPath returns the configured docker-init path +func (conf *Config) GetInitPath() string { + conf.Lock() + defer conf.Unlock() + if conf.InitPath != "" { + return conf.InitPath + } + if conf.DefaultInitBinary != "" { + return conf.DefaultInitBinary + } + return DefaultInitBinary +} diff --git a/vendor/github.com/moby/moby/daemon/config/config_common_unix_test.go b/vendor/github.com/moby/moby/daemon/config/config_common_unix_test.go new file mode 100644 index 000000000..8647a2206 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/config/config_common_unix_test.go @@ -0,0 +1,84 @@ +// +build !windows + +package config + +import ( + "testing" + + "github.com/docker/docker/api/types" +) + +func TestCommonUnixValidateConfigurationErrors(t *testing.T) { + testCases := []struct { + config *Config + }{ + // Can't override the stock runtime + { + config: &Config{ + CommonUnixConfig: CommonUnixConfig{ + Runtimes: map[string]types.Runtime{ + StockRuntimeName: {}, + }, + }, + }, + }, + // Default runtime should be present in runtimes + { + config: &Config{ + CommonUnixConfig: CommonUnixConfig{ + Runtimes: map[string]types.Runtime{ + "foo": {}, + }, + DefaultRuntime: "bar", + }, + }, + }, + } + for _, tc := range testCases { + err := Validate(tc.config) + if err == nil { + t.Fatalf("expected error, got nil for config %v", tc.config) + } + } +} + +func TestCommonUnixGetInitPath(t *testing.T) { + testCases := []struct { + config *Config + expectedInitPath string + }{ + { + config: &Config{ + InitPath: "some-init-path", + }, + expectedInitPath: "some-init-path", + }, + { + config: &Config{ + CommonUnixConfig: CommonUnixConfig{ + DefaultInitBinary: "foo-init-bin", + }, + }, + expectedInitPath: "foo-init-bin", + }, + { + config: &Config{ + InitPath: "init-path-A", + CommonUnixConfig: CommonUnixConfig{ + DefaultInitBinary: "init-path-B", + }, + }, + expectedInitPath: "init-path-A", + }, + { + config: &Config{}, + expectedInitPath: "docker-init", + }, + } + for _, tc := range testCases { + initPath := tc.config.GetInitPath() + if initPath != tc.expectedInitPath { + t.Fatalf("expected initPath to be %v, got %v", tc.expectedInitPath, initPath) + } + } +} diff --git a/vendor/github.com/moby/moby/daemon/config/config_solaris.go b/vendor/github.com/moby/moby/daemon/config/config_solaris.go new file mode 100644 index 000000000..f4f080270 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/config/config_solaris.go @@ -0,0 +1,29 @@ +package config + +import ( + "github.com/spf13/pflag" +) + +// Config defines the configuration of a docker daemon. +// These are the configuration settings that you pass +// to the docker daemon when you launch it with say: `docker -d -e lxc` +type Config struct { + CommonConfig + + // These fields are common to all unix platforms. + CommonUnixConfig +} + +// BridgeConfig stores all the bridge driver specific +// configuration. +type BridgeConfig struct { + commonBridgeConfig + + // Fields below here are platform specific. + commonUnixBridgeConfig +} + +// IsSwarmCompatible defines if swarm mode can be enabled in this config +func (conf *Config) IsSwarmCompatible() error { + return nil +} diff --git a/vendor/github.com/moby/moby/daemon/config/config_test.go b/vendor/github.com/moby/moby/daemon/config/config_test.go new file mode 100644 index 000000000..cc5f01063 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/config/config_test.go @@ -0,0 +1,391 @@ +package config + +import ( + "io/ioutil" + "os" + "runtime" + "strings" + "testing" + + "github.com/docker/docker/daemon/discovery" + "github.com/docker/docker/opts" + "github.com/docker/docker/pkg/testutil" + "github.com/spf13/pflag" + "github.com/stretchr/testify/assert" +) + +func TestDaemonConfigurationNotFound(t *testing.T) { + _, err := MergeDaemonConfigurations(&Config{}, nil, "/tmp/foo-bar-baz-docker") + if err == nil || !os.IsNotExist(err) { + t.Fatalf("expected does not exist error, got %v", err) + } +} + +func TestDaemonBrokenConfiguration(t *testing.T) { + f, err := ioutil.TempFile("", "docker-config-") + if err != nil { + t.Fatal(err) + } + + configFile := f.Name() + f.Write([]byte(`{"Debug": tru`)) + f.Close() + + _, err = MergeDaemonConfigurations(&Config{}, nil, configFile) + if err == nil { + t.Fatalf("expected error, got %v", err) + } +} + +func TestParseClusterAdvertiseSettings(t *testing.T) { + if runtime.GOOS == "solaris" { + t.Skip("ClusterSettings not supported on Solaris\n") + } + _, err := ParseClusterAdvertiseSettings("something", "") + if err != discovery.ErrDiscoveryDisabled { + t.Fatalf("expected discovery disabled error, got %v\n", err) + } + + _, err = ParseClusterAdvertiseSettings("", "something") + if err == nil { + t.Fatalf("expected discovery store error, got %v\n", err) + } + + _, err = ParseClusterAdvertiseSettings("etcd", "127.0.0.1:8080") + if err != nil { + t.Fatal(err) + } +} + +func TestFindConfigurationConflicts(t *testing.T) { + config := map[string]interface{}{"authorization-plugins": "foobar"} + flags := pflag.NewFlagSet("test", pflag.ContinueOnError) + + flags.String("authorization-plugins", "", "") + assert.NoError(t, flags.Set("authorization-plugins", "asdf")) + + testutil.ErrorContains(t, + findConfigurationConflicts(config, flags), + "authorization-plugins: (from flag: asdf, from file: foobar)") +} + +func TestFindConfigurationConflictsWithNamedOptions(t *testing.T) { + config := map[string]interface{}{"hosts": []string{"qwer"}} + flags := pflag.NewFlagSet("test", pflag.ContinueOnError) + + var hosts []string + flags.VarP(opts.NewNamedListOptsRef("hosts", &hosts, opts.ValidateHost), "host", "H", "Daemon socket(s) to connect to") + assert.NoError(t, flags.Set("host", "tcp://127.0.0.1:4444")) + assert.NoError(t, flags.Set("host", "unix:///var/run/docker.sock")) + + testutil.ErrorContains(t, findConfigurationConflicts(config, flags), "hosts") +} + +func TestDaemonConfigurationMergeConflicts(t *testing.T) { + f, err := ioutil.TempFile("", "docker-config-") + if err != nil { + t.Fatal(err) + } + + configFile := f.Name() + f.Write([]byte(`{"debug": true}`)) + f.Close() + + flags := pflag.NewFlagSet("test", pflag.ContinueOnError) + flags.Bool("debug", false, "") + flags.Set("debug", "false") + + _, err = MergeDaemonConfigurations(&Config{}, flags, configFile) + if err == nil { + t.Fatal("expected error, got nil") + } + if !strings.Contains(err.Error(), "debug") { + t.Fatalf("expected debug conflict, got %v", err) + } +} + +func TestDaemonConfigurationMergeConcurrent(t *testing.T) { + f, err := ioutil.TempFile("", "docker-config-") + if err != nil { + t.Fatal(err) + } + + configFile := f.Name() + f.Write([]byte(`{"max-concurrent-downloads": 1}`)) + f.Close() + + _, err = MergeDaemonConfigurations(&Config{}, nil, configFile) + if err != nil { + t.Fatal("expected error, got nil") + } +} + +func TestDaemonConfigurationMergeConcurrentError(t *testing.T) { + f, err := ioutil.TempFile("", "docker-config-") + if err != nil { + t.Fatal(err) + } + + configFile := f.Name() + f.Write([]byte(`{"max-concurrent-downloads": -1}`)) + f.Close() + + _, err = MergeDaemonConfigurations(&Config{}, nil, configFile) + if err == nil { + t.Fatalf("expected no error, got error %v", err) + } +} + +func TestDaemonConfigurationMergeConflictsWithInnerStructs(t *testing.T) { + f, err := ioutil.TempFile("", "docker-config-") + if err != nil { + t.Fatal(err) + } + + configFile := f.Name() + f.Write([]byte(`{"tlscacert": "/etc/certificates/ca.pem"}`)) + f.Close() + + flags := pflag.NewFlagSet("test", pflag.ContinueOnError) + flags.String("tlscacert", "", "") + flags.Set("tlscacert", "~/.docker/ca.pem") + + _, err = MergeDaemonConfigurations(&Config{}, flags, configFile) + if err == nil { + t.Fatal("expected error, got nil") + } + if !strings.Contains(err.Error(), "tlscacert") { + t.Fatalf("expected tlscacert conflict, got %v", err) + } +} + +func TestFindConfigurationConflictsWithUnknownKeys(t *testing.T) { + config := map[string]interface{}{"tls-verify": "true"} + flags := pflag.NewFlagSet("test", pflag.ContinueOnError) + + flags.Bool("tlsverify", false, "") + err := findConfigurationConflicts(config, flags) + if err == nil { + t.Fatal("expected error, got nil") + } + if !strings.Contains(err.Error(), "the following directives don't match any configuration option: tls-verify") { + t.Fatalf("expected tls-verify conflict, got %v", err) + } +} + +func TestFindConfigurationConflictsWithMergedValues(t *testing.T) { + var hosts []string + config := map[string]interface{}{"hosts": "tcp://127.0.0.1:2345"} + flags := pflag.NewFlagSet("base", pflag.ContinueOnError) + flags.VarP(opts.NewNamedListOptsRef("hosts", &hosts, nil), "host", "H", "") + + err := findConfigurationConflicts(config, flags) + if err != nil { + t.Fatal(err) + } + + flags.Set("host", "unix:///var/run/docker.sock") + err = findConfigurationConflicts(config, flags) + if err == nil { + t.Fatal("expected error, got nil") + } + if !strings.Contains(err.Error(), "hosts: (from flag: [unix:///var/run/docker.sock], from file: tcp://127.0.0.1:2345)") { + t.Fatalf("expected hosts conflict, got %v", err) + } +} + +func TestValidateConfigurationErrors(t *testing.T) { + minusNumber := -10 + testCases := []struct { + config *Config + }{ + { + config: &Config{ + CommonConfig: CommonConfig{ + Labels: []string{"one"}, + }, + }, + }, + { + config: &Config{ + CommonConfig: CommonConfig{ + Labels: []string{"foo=bar", "one"}, + }, + }, + }, + { + config: &Config{ + CommonConfig: CommonConfig{ + DNS: []string{"1.1.1.1o"}, + }, + }, + }, + { + config: &Config{ + CommonConfig: CommonConfig{ + DNS: []string{"2.2.2.2", "1.1.1.1o"}, + }, + }, + }, + { + config: &Config{ + CommonConfig: CommonConfig{ + DNSSearch: []string{"123456"}, + }, + }, + }, + { + config: &Config{ + CommonConfig: CommonConfig{ + DNSSearch: []string{"a.b.c", "123456"}, + }, + }, + }, + { + config: &Config{ + CommonConfig: CommonConfig{ + MaxConcurrentDownloads: &minusNumber, + // This is weird... + ValuesSet: map[string]interface{}{ + "max-concurrent-downloads": -1, + }, + }, + }, + }, + { + config: &Config{ + CommonConfig: CommonConfig{ + MaxConcurrentUploads: &minusNumber, + // This is weird... + ValuesSet: map[string]interface{}{ + "max-concurrent-uploads": -1, + }, + }, + }, + }, + } + for _, tc := range testCases { + err := Validate(tc.config) + if err == nil { + t.Fatalf("expected error, got nil for config %v", tc.config) + } + } +} + +func TestValidateConfiguration(t *testing.T) { + minusNumber := 4 + testCases := []struct { + config *Config + }{ + { + config: &Config{ + CommonConfig: CommonConfig{ + Labels: []string{"one=two"}, + }, + }, + }, + { + config: &Config{ + CommonConfig: CommonConfig{ + DNS: []string{"1.1.1.1"}, + }, + }, + }, + { + config: &Config{ + CommonConfig: CommonConfig{ + DNSSearch: []string{"a.b.c"}, + }, + }, + }, + { + config: &Config{ + CommonConfig: CommonConfig{ + MaxConcurrentDownloads: &minusNumber, + // This is weird... + ValuesSet: map[string]interface{}{ + "max-concurrent-downloads": -1, + }, + }, + }, + }, + { + config: &Config{ + CommonConfig: CommonConfig{ + MaxConcurrentUploads: &minusNumber, + // This is weird... + ValuesSet: map[string]interface{}{ + "max-concurrent-uploads": -1, + }, + }, + }, + }, + } + for _, tc := range testCases { + err := Validate(tc.config) + if err != nil { + t.Fatalf("expected no error, got error %v", err) + } + } +} + +func TestModifiedDiscoverySettings(t *testing.T) { + cases := []struct { + current *Config + modified *Config + expected bool + }{ + { + current: discoveryConfig("foo", "bar", map[string]string{}), + modified: discoveryConfig("foo", "bar", map[string]string{}), + expected: false, + }, + { + current: discoveryConfig("foo", "bar", map[string]string{"foo": "bar"}), + modified: discoveryConfig("foo", "bar", map[string]string{"foo": "bar"}), + expected: false, + }, + { + current: discoveryConfig("foo", "bar", map[string]string{}), + modified: discoveryConfig("foo", "bar", nil), + expected: false, + }, + { + current: discoveryConfig("foo", "bar", nil), + modified: discoveryConfig("foo", "bar", map[string]string{}), + expected: false, + }, + { + current: discoveryConfig("foo", "bar", nil), + modified: discoveryConfig("baz", "bar", nil), + expected: true, + }, + { + current: discoveryConfig("foo", "bar", nil), + modified: discoveryConfig("foo", "baz", nil), + expected: true, + }, + { + current: discoveryConfig("foo", "bar", nil), + modified: discoveryConfig("foo", "bar", map[string]string{"foo": "bar"}), + expected: true, + }, + } + + for _, c := range cases { + got := ModifiedDiscoverySettings(c.current, c.modified.ClusterStore, c.modified.ClusterAdvertise, c.modified.ClusterOpts) + if c.expected != got { + t.Fatalf("expected %v, got %v: current config %v, new config %v", c.expected, got, c.current, c.modified) + } + } +} + +func discoveryConfig(backendAddr, advertiseAddr string, opts map[string]string) *Config { + return &Config{ + CommonConfig: CommonConfig{ + ClusterStore: backendAddr, + ClusterAdvertise: advertiseAddr, + ClusterOpts: opts, + }, + } +} diff --git a/vendor/github.com/moby/moby/daemon/config/config_unix.go b/vendor/github.com/moby/moby/daemon/config/config_unix.go new file mode 100644 index 000000000..8f1da5919 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/config/config_unix.go @@ -0,0 +1,63 @@ +// +build linux freebsd + +package config + +import ( + "fmt" + + "github.com/docker/docker/opts" + units "github.com/docker/go-units" +) + +// Config defines the configuration of a docker daemon. +// It includes json tags to deserialize configuration from a file +// using the same names that the flags in the command line uses. +type Config struct { + CommonConfig + + // These fields are common to all unix platforms. + CommonUnixConfig + + // Fields below here are platform specific. + CgroupParent string `json:"cgroup-parent,omitempty"` + EnableSelinuxSupport bool `json:"selinux-enabled,omitempty"` + RemappedRoot string `json:"userns-remap,omitempty"` + Ulimits map[string]*units.Ulimit `json:"default-ulimits,omitempty"` + CPURealtimePeriod int64 `json:"cpu-rt-period,omitempty"` + CPURealtimeRuntime int64 `json:"cpu-rt-runtime,omitempty"` + OOMScoreAdjust int `json:"oom-score-adjust,omitempty"` + Init bool `json:"init,omitempty"` + InitPath string `json:"init-path,omitempty"` + SeccompProfile string `json:"seccomp-profile,omitempty"` + ShmSize opts.MemBytes `json:"default-shm-size,omitempty"` + NoNewPrivileges bool `json:"no-new-privileges,omitempty"` +} + +// BridgeConfig stores all the bridge driver specific +// configuration. +type BridgeConfig struct { + commonBridgeConfig + + // These fields are common to all unix platforms. + commonUnixBridgeConfig + + // Fields below here are platform specific. + EnableIPv6 bool `json:"ipv6,omitempty"` + EnableIPTables bool `json:"iptables,omitempty"` + EnableIPForward bool `json:"ip-forward,omitempty"` + EnableIPMasq bool `json:"ip-masq,omitempty"` + EnableUserlandProxy bool `json:"userland-proxy,omitempty"` + UserlandProxyPath string `json:"userland-proxy-path,omitempty"` + FixedCIDRv6 string `json:"fixed-cidr-v6,omitempty"` +} + +// IsSwarmCompatible defines if swarm mode can be enabled in this config +func (conf *Config) IsSwarmCompatible() error { + if conf.ClusterStore != "" || conf.ClusterAdvertise != "" { + return fmt.Errorf("--cluster-store and --cluster-advertise daemon configurations are incompatible with swarm mode") + } + if conf.LiveRestoreEnabled { + return fmt.Errorf("--live-restore daemon configuration is incompatible with swarm mode") + } + return nil +} diff --git a/vendor/github.com/moby/moby/daemon/config/config_unix_test.go b/vendor/github.com/moby/moby/daemon/config/config_unix_test.go new file mode 100644 index 000000000..9e52cb70f --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/config/config_unix_test.go @@ -0,0 +1,139 @@ +// +build !windows + +package config + +import ( + "testing" + + "github.com/docker/docker/opts" + "github.com/docker/docker/pkg/testutil/tempfile" + "github.com/docker/go-units" + "github.com/spf13/pflag" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestGetConflictFreeConfiguration(t *testing.T) { + configFileData := string([]byte(` + { + "debug": true, + "default-ulimits": { + "nofile": { + "Name": "nofile", + "Hard": 2048, + "Soft": 1024 + } + }, + "log-opts": { + "tag": "test_tag" + } + }`)) + + file := tempfile.NewTempFile(t, "docker-config", configFileData) + defer file.Remove() + + flags := pflag.NewFlagSet("test", pflag.ContinueOnError) + var debug bool + flags.BoolVarP(&debug, "debug", "D", false, "") + flags.Var(opts.NewNamedUlimitOpt("default-ulimits", nil), "default-ulimit", "") + flags.Var(opts.NewNamedMapOpts("log-opts", nil, nil), "log-opt", "") + + cc, err := getConflictFreeConfiguration(file.Name(), flags) + require.NoError(t, err) + + assert.True(t, cc.Debug) + + expectedUlimits := map[string]*units.Ulimit{ + "nofile": { + Name: "nofile", + Hard: 2048, + Soft: 1024, + }, + } + + assert.Equal(t, expectedUlimits, cc.Ulimits) +} + +func TestDaemonConfigurationMerge(t *testing.T) { + configFileData := string([]byte(` + { + "debug": true, + "default-ulimits": { + "nofile": { + "Name": "nofile", + "Hard": 2048, + "Soft": 1024 + } + }, + "log-opts": { + "tag": "test_tag" + } + }`)) + + file := tempfile.NewTempFile(t, "docker-config", configFileData) + defer file.Remove() + + c := &Config{ + CommonConfig: CommonConfig{ + AutoRestart: true, + LogConfig: LogConfig{ + Type: "syslog", + Config: map[string]string{"tag": "test"}, + }, + }, + } + + flags := pflag.NewFlagSet("test", pflag.ContinueOnError) + + var debug bool + flags.BoolVarP(&debug, "debug", "D", false, "") + flags.Var(opts.NewNamedUlimitOpt("default-ulimits", nil), "default-ulimit", "") + flags.Var(opts.NewNamedMapOpts("log-opts", nil, nil), "log-opt", "") + + cc, err := MergeDaemonConfigurations(c, flags, file.Name()) + require.NoError(t, err) + + assert.True(t, cc.Debug) + assert.True(t, cc.AutoRestart) + + expectedLogConfig := LogConfig{ + Type: "syslog", + Config: map[string]string{"tag": "test_tag"}, + } + + assert.Equal(t, expectedLogConfig, cc.LogConfig) + + expectedUlimits := map[string]*units.Ulimit{ + "nofile": { + Name: "nofile", + Hard: 2048, + Soft: 1024, + }, + } + + assert.Equal(t, expectedUlimits, cc.Ulimits) +} + +func TestDaemonConfigurationMergeShmSize(t *testing.T) { + data := string([]byte(` + { + "default-shm-size": "1g" + }`)) + + file := tempfile.NewTempFile(t, "docker-config", data) + defer file.Remove() + + c := &Config{} + + flags := pflag.NewFlagSet("test", pflag.ContinueOnError) + shmSize := opts.MemBytes(DefaultShmSize) + flags.Var(&shmSize, "default-shm-size", "") + + cc, err := MergeDaemonConfigurations(c, flags, file.Name()) + require.NoError(t, err) + + expectedValue := 1 * 1024 * 1024 * 1024 + if cc.ShmSize.Value() != int64(expectedValue) { + t.Fatalf("expected default shm size %d, got %d", expectedValue, cc.ShmSize.Value()) + } +} diff --git a/vendor/github.com/moby/moby/daemon/config/config_windows.go b/vendor/github.com/moby/moby/daemon/config/config_windows.go new file mode 100644 index 000000000..849acc1ac --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/config/config_windows.go @@ -0,0 +1,52 @@ +package config + +import ( + "github.com/docker/docker/api/types" +) + +// BridgeConfig stores all the bridge driver specific +// configuration. +type BridgeConfig struct { + commonBridgeConfig +} + +// Config defines the configuration of a docker daemon. +// These are the configuration settings that you pass +// to the docker daemon when you launch it with say: `dockerd -e windows` +type Config struct { + CommonConfig + + // Fields below here are platform specific. (There are none presently + // for the Windows daemon.) +} + +// GetRuntime returns the runtime path and arguments for a given +// runtime name +func (conf *Config) GetRuntime(name string) *types.Runtime { + return nil +} + +// GetInitPath returns the configure docker-init path +func (conf *Config) GetInitPath() string { + return "" +} + +// GetDefaultRuntimeName returns the current default runtime +func (conf *Config) GetDefaultRuntimeName() string { + return StockRuntimeName +} + +// GetAllRuntimes returns a copy of the runtimes map +func (conf *Config) GetAllRuntimes() map[string]types.Runtime { + return map[string]types.Runtime{} +} + +// GetExecRoot returns the user configured Exec-root +func (conf *Config) GetExecRoot() string { + return "" +} + +// IsSwarmCompatible defines if swarm mode can be enabled in this config +func (conf *Config) IsSwarmCompatible() error { + return nil +} diff --git a/vendor/github.com/moby/moby/daemon/config/config_windows_test.go b/vendor/github.com/moby/moby/daemon/config/config_windows_test.go new file mode 100644 index 000000000..92ee8e4ac --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/config/config_windows_test.go @@ -0,0 +1,60 @@ +// +build windows + +package config + +import ( + "io/ioutil" + "testing" + + "github.com/docker/docker/opts" + "github.com/spf13/pflag" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestDaemonConfigurationMerge(t *testing.T) { + f, err := ioutil.TempFile("", "docker-config-") + if err != nil { + t.Fatal(err) + } + + configFile := f.Name() + + f.Write([]byte(` + { + "debug": true, + "log-opts": { + "tag": "test_tag" + } + }`)) + + f.Close() + + c := &Config{ + CommonConfig: CommonConfig{ + AutoRestart: true, + LogConfig: LogConfig{ + Type: "syslog", + Config: map[string]string{"tag": "test"}, + }, + }, + } + + flags := pflag.NewFlagSet("test", pflag.ContinueOnError) + var debug bool + flags.BoolVarP(&debug, "debug", "D", false, "") + flags.Var(opts.NewNamedMapOpts("log-opts", nil, nil), "log-opt", "") + + cc, err := MergeDaemonConfigurations(c, flags, configFile) + require.NoError(t, err) + + assert.True(t, cc.Debug) + assert.True(t, cc.AutoRestart) + + expectedLogConfig := LogConfig{ + Type: "syslog", + Config: map[string]string{"tag": "test_tag"}, + } + + assert.Equal(t, expectedLogConfig, cc.LogConfig) +} diff --git a/vendor/github.com/moby/moby/daemon/configs.go b/vendor/github.com/moby/moby/daemon/configs.go new file mode 100644 index 000000000..31da56b2d --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/configs.go @@ -0,0 +1,23 @@ +package daemon + +import ( + "github.com/Sirupsen/logrus" + swarmtypes "github.com/docker/docker/api/types/swarm" +) + +// SetContainerConfigReferences sets the container config references needed +func (daemon *Daemon) SetContainerConfigReferences(name string, refs []*swarmtypes.ConfigReference) error { + if !configsSupported() && len(refs) > 0 { + logrus.Warn("configs are not supported on this platform") + return nil + } + + c, err := daemon.GetContainer(name) + if err != nil { + return err + } + + c.ConfigReferences = refs + + return nil +} diff --git a/vendor/github.com/moby/moby/daemon/configs_linux.go b/vendor/github.com/moby/moby/daemon/configs_linux.go new file mode 100644 index 000000000..af20ad78b --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/configs_linux.go @@ -0,0 +1,7 @@ +// +build linux + +package daemon + +func configsSupported() bool { + return true +} diff --git a/vendor/github.com/moby/moby/daemon/configs_unsupported.go b/vendor/github.com/moby/moby/daemon/configs_unsupported.go new file mode 100644 index 000000000..1a7cbc9dc --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/configs_unsupported.go @@ -0,0 +1,7 @@ +// +build !linux,!windows + +package daemon + +func configsSupported() bool { + return false +} diff --git a/vendor/github.com/moby/moby/daemon/configs_windows.go b/vendor/github.com/moby/moby/daemon/configs_windows.go new file mode 100644 index 000000000..7cb2e9c43 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/configs_windows.go @@ -0,0 +1,7 @@ +// +build windows + +package daemon + +func configsSupported() bool { + return true +} diff --git a/vendor/github.com/moby/moby/daemon/container.go b/vendor/github.com/moby/moby/daemon/container.go new file mode 100644 index 000000000..4c015b70d --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/container.go @@ -0,0 +1,321 @@ +package daemon + +import ( + "fmt" + "os" + "path/filepath" + "time" + + "github.com/docker/docker/api/errors" + containertypes "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/strslice" + "github.com/docker/docker/container" + "github.com/docker/docker/daemon/network" + "github.com/docker/docker/image" + "github.com/docker/docker/opts" + "github.com/docker/docker/pkg/signal" + "github.com/docker/docker/pkg/system" + "github.com/docker/docker/pkg/truncindex" + "github.com/docker/docker/runconfig" + "github.com/docker/go-connections/nat" + "github.com/opencontainers/selinux/go-selinux/label" +) + +// GetContainer looks for a container using the provided information, which could be +// one of the following inputs from the caller: +// - A full container ID, which will exact match a container in daemon's list +// - A container name, which will only exact match via the GetByName() function +// - A partial container ID prefix (e.g. short ID) of any length that is +// unique enough to only return a single container object +// If none of these searches succeed, an error is returned +func (daemon *Daemon) GetContainer(prefixOrName string) (*container.Container, error) { + if len(prefixOrName) == 0 { + return nil, errors.NewBadRequestError(fmt.Errorf("No container name or ID supplied")) + } + + if containerByID := daemon.containers.Get(prefixOrName); containerByID != nil { + // prefix is an exact match to a full container ID + return containerByID, nil + } + + // GetByName will match only an exact name provided; we ignore errors + if containerByName, _ := daemon.GetByName(prefixOrName); containerByName != nil { + // prefix is an exact match to a full container Name + return containerByName, nil + } + + containerID, indexError := daemon.idIndex.Get(prefixOrName) + if indexError != nil { + // When truncindex defines an error type, use that instead + if indexError == truncindex.ErrNotExist { + err := fmt.Errorf("No such container: %s", prefixOrName) + return nil, errors.NewRequestNotFoundError(err) + } + return nil, indexError + } + return daemon.containers.Get(containerID), nil +} + +// checkContainer make sure the specified container validates the specified conditions +func (daemon *Daemon) checkContainer(container *container.Container, conditions ...func(*container.Container) error) error { + for _, condition := range conditions { + if err := condition(container); err != nil { + return err + } + } + return nil +} + +// Exists returns a true if a container of the specified ID or name exists, +// false otherwise. +func (daemon *Daemon) Exists(id string) bool { + c, _ := daemon.GetContainer(id) + return c != nil +} + +// IsPaused returns a bool indicating if the specified container is paused. +func (daemon *Daemon) IsPaused(id string) bool { + c, _ := daemon.GetContainer(id) + return c.State.IsPaused() +} + +func (daemon *Daemon) containerRoot(id string) string { + return filepath.Join(daemon.repository, id) +} + +// Load reads the contents of a container from disk +// This is typically done at startup. +func (daemon *Daemon) load(id string) (*container.Container, error) { + container := daemon.newBaseContainer(id) + + if err := container.FromDisk(); err != nil { + return nil, err + } + if err := label.ReserveLabel(container.ProcessLabel); err != nil { + return nil, err + } + + if container.ID != id { + return container, fmt.Errorf("Container %s is stored at %s", container.ID, id) + } + + return container, nil +} + +// Register makes a container object usable by the daemon as +func (daemon *Daemon) Register(c *container.Container) error { + // Attach to stdout and stderr + if c.Config.OpenStdin { + c.StreamConfig.NewInputPipes() + } else { + c.StreamConfig.NewNopInputPipe() + } + + // once in the memory store it is visible to other goroutines + // grab a Lock until it has been checkpointed to avoid races + c.Lock() + defer c.Unlock() + + daemon.containers.Add(c.ID, c) + daemon.idIndex.Add(c.ID) + return c.CheckpointTo(daemon.containersReplica) +} + +func (daemon *Daemon) newContainer(name string, platform string, config *containertypes.Config, hostConfig *containertypes.HostConfig, imgID image.ID, managed bool) (*container.Container, error) { + var ( + id string + err error + noExplicitName = name == "" + ) + id, name, err = daemon.generateIDAndName(name) + if err != nil { + return nil, err + } + + if hostConfig.NetworkMode.IsHost() { + if config.Hostname == "" { + config.Hostname, err = os.Hostname() + if err != nil { + return nil, err + } + } + } else { + daemon.generateHostname(id, config) + } + entrypoint, args := daemon.getEntrypointAndArgs(config.Entrypoint, config.Cmd) + + base := daemon.newBaseContainer(id) + base.Created = time.Now().UTC() + base.Managed = managed + base.Path = entrypoint + base.Args = args //FIXME: de-duplicate from config + base.Config = config + base.HostConfig = &containertypes.HostConfig{} + base.ImageID = imgID + base.NetworkSettings = &network.Settings{IsAnonymousEndpoint: noExplicitName} + base.Name = name + base.Driver = daemon.GraphDriverName(platform) + base.Platform = platform + return base, err +} + +// GetByName returns a container given a name. +func (daemon *Daemon) GetByName(name string) (*container.Container, error) { + if len(name) == 0 { + return nil, fmt.Errorf("No container name supplied") + } + fullName := name + if name[0] != '/' { + fullName = "/" + name + } + id, err := daemon.containersReplica.Snapshot().GetID(fullName) + if err != nil { + return nil, fmt.Errorf("Could not find entity for %s", name) + } + e := daemon.containers.Get(id) + if e == nil { + return nil, fmt.Errorf("Could not find container for entity id %s", id) + } + return e, nil +} + +// newBaseContainer creates a new container with its initial +// configuration based on the root storage from the daemon. +func (daemon *Daemon) newBaseContainer(id string) *container.Container { + return container.NewBaseContainer(id, daemon.containerRoot(id)) +} + +func (daemon *Daemon) getEntrypointAndArgs(configEntrypoint strslice.StrSlice, configCmd strslice.StrSlice) (string, []string) { + if len(configEntrypoint) != 0 { + return configEntrypoint[0], append(configEntrypoint[1:], configCmd...) + } + return configCmd[0], configCmd[1:] +} + +func (daemon *Daemon) generateHostname(id string, config *containertypes.Config) { + // Generate default hostname + if config.Hostname == "" { + config.Hostname = id[:12] + } +} + +func (daemon *Daemon) setSecurityOptions(container *container.Container, hostConfig *containertypes.HostConfig) error { + container.Lock() + defer container.Unlock() + return daemon.parseSecurityOpt(container, hostConfig) +} + +func (daemon *Daemon) setHostConfig(container *container.Container, hostConfig *containertypes.HostConfig) error { + // Do not lock while creating volumes since this could be calling out to external plugins + // Don't want to block other actions, like `docker ps` because we're waiting on an external plugin + if err := daemon.registerMountPoints(container, hostConfig); err != nil { + return err + } + + container.Lock() + defer container.Unlock() + + // Register any links from the host config before starting the container + if err := daemon.registerLinks(container, hostConfig); err != nil { + return err + } + + runconfig.SetDefaultNetModeIfBlank(hostConfig) + container.HostConfig = hostConfig + return container.CheckpointTo(daemon.containersReplica) +} + +// verifyContainerSettings performs validation of the hostconfig and config +// structures. +func (daemon *Daemon) verifyContainerSettings(hostConfig *containertypes.HostConfig, config *containertypes.Config, update bool) ([]string, error) { + + // First perform verification of settings common across all platforms. + if config != nil { + if config.WorkingDir != "" { + config.WorkingDir = filepath.FromSlash(config.WorkingDir) // Ensure in platform semantics + if !system.IsAbs(config.WorkingDir) { + return nil, fmt.Errorf("the working directory '%s' is invalid, it needs to be an absolute path", config.WorkingDir) + } + } + + if len(config.StopSignal) > 0 { + _, err := signal.ParseSignal(config.StopSignal) + if err != nil { + return nil, err + } + } + + // Validate if Env contains empty variable or not (e.g., ``, `=foo`) + for _, env := range config.Env { + if _, err := opts.ValidateEnv(env); err != nil { + return nil, err + } + } + + // Validate the healthcheck params of Config + if config.Healthcheck != nil { + if config.Healthcheck.Interval != 0 && config.Healthcheck.Interval < containertypes.MinimumDuration { + return nil, fmt.Errorf("Interval in Healthcheck cannot be less than %s", containertypes.MinimumDuration) + } + + if config.Healthcheck.Timeout != 0 && config.Healthcheck.Timeout < containertypes.MinimumDuration { + return nil, fmt.Errorf("Timeout in Healthcheck cannot be less than %s", containertypes.MinimumDuration) + } + + if config.Healthcheck.Retries < 0 { + return nil, fmt.Errorf("Retries in Healthcheck cannot be negative") + } + + if config.Healthcheck.StartPeriod != 0 && config.Healthcheck.StartPeriod < containertypes.MinimumDuration { + return nil, fmt.Errorf("StartPeriod in Healthcheck cannot be less than %s", containertypes.MinimumDuration) + } + } + } + + if hostConfig == nil { + return nil, nil + } + + if hostConfig.AutoRemove && !hostConfig.RestartPolicy.IsNone() { + return nil, fmt.Errorf("can't create 'AutoRemove' container with restart policy") + } + + for _, extraHost := range hostConfig.ExtraHosts { + if _, err := opts.ValidateExtraHost(extraHost); err != nil { + return nil, err + } + } + + for port := range hostConfig.PortBindings { + _, portStr := nat.SplitProtoPort(string(port)) + if _, err := nat.ParsePort(portStr); err != nil { + return nil, fmt.Errorf("invalid port specification: %q", portStr) + } + for _, pb := range hostConfig.PortBindings[port] { + _, err := nat.NewPort(nat.SplitProtoPort(pb.HostPort)) + if err != nil { + return nil, fmt.Errorf("invalid port specification: %q", pb.HostPort) + } + } + } + + p := hostConfig.RestartPolicy + + switch p.Name { + case "always", "unless-stopped", "no": + if p.MaximumRetryCount != 0 { + return nil, fmt.Errorf("maximum retry count cannot be used with restart policy '%s'", p.Name) + } + case "on-failure": + if p.MaximumRetryCount < 0 { + return nil, fmt.Errorf("maximum retry count cannot be negative") + } + case "": + // do nothing + default: + return nil, fmt.Errorf("invalid restart policy '%s'", p.Name) + } + + // Now do platform-specific verification + return verifyPlatformContainerSettings(daemon, hostConfig, config, update) +} diff --git a/vendor/github.com/moby/moby/daemon/container_linux.go b/vendor/github.com/moby/moby/daemon/container_linux.go new file mode 100644 index 000000000..2c8771575 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/container_linux.go @@ -0,0 +1,29 @@ +//+build !windows + +package daemon + +import ( + "github.com/docker/docker/container" +) + +func (daemon *Daemon) saveApparmorConfig(container *container.Container) error { + container.AppArmorProfile = "" //we don't care about the previous value. + + if !daemon.apparmorEnabled { + return nil // if apparmor is disabled there is nothing to do here. + } + + if err := parseSecurityOpt(container, container.HostConfig); err != nil { + return err + } + + if !container.HostConfig.Privileged { + if container.AppArmorProfile == "" { + container.AppArmorProfile = defaultApparmorProfile + } + + } else { + container.AppArmorProfile = "unconfined" + } + return nil +} diff --git a/vendor/github.com/moby/moby/daemon/container_operations.go b/vendor/github.com/moby/moby/daemon/container_operations.go new file mode 100644 index 000000000..7c7dcc7ce --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/container_operations.go @@ -0,0 +1,1090 @@ +package daemon + +import ( + "errors" + "fmt" + "net" + "os" + "path" + "runtime" + "strings" + "time" + + "github.com/Sirupsen/logrus" + derr "github.com/docker/docker/api/errors" + containertypes "github.com/docker/docker/api/types/container" + networktypes "github.com/docker/docker/api/types/network" + "github.com/docker/docker/container" + "github.com/docker/docker/daemon/network" + "github.com/docker/docker/opts" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/runconfig" + "github.com/docker/go-connections/nat" + "github.com/docker/libnetwork" + "github.com/docker/libnetwork/netlabel" + "github.com/docker/libnetwork/options" + "github.com/docker/libnetwork/types" +) + +var ( + // ErrRootFSReadOnly is returned when a container + // rootfs is marked readonly. + ErrRootFSReadOnly = errors.New("container rootfs is marked read-only") + getPortMapInfo = container.GetSandboxPortMapInfo +) + +func (daemon *Daemon) getDNSSearchSettings(container *container.Container) []string { + if len(container.HostConfig.DNSSearch) > 0 { + return container.HostConfig.DNSSearch + } + + if len(daemon.configStore.DNSSearch) > 0 { + return daemon.configStore.DNSSearch + } + + return nil +} + +func (daemon *Daemon) buildSandboxOptions(container *container.Container) ([]libnetwork.SandboxOption, error) { + var ( + sboxOptions []libnetwork.SandboxOption + err error + dns []string + dnsOptions []string + bindings = make(nat.PortMap) + pbList []types.PortBinding + exposeList []types.TransportPort + ) + + defaultNetName := runconfig.DefaultDaemonNetworkMode().NetworkName() + sboxOptions = append(sboxOptions, libnetwork.OptionHostname(container.Config.Hostname), + libnetwork.OptionDomainname(container.Config.Domainname)) + + if container.HostConfig.NetworkMode.IsHost() { + sboxOptions = append(sboxOptions, libnetwork.OptionUseDefaultSandbox()) + if len(container.HostConfig.ExtraHosts) == 0 { + sboxOptions = append(sboxOptions, libnetwork.OptionOriginHostsPath("/etc/hosts")) + } + if len(container.HostConfig.DNS) == 0 && len(daemon.configStore.DNS) == 0 && + len(container.HostConfig.DNSSearch) == 0 && len(daemon.configStore.DNSSearch) == 0 && + len(container.HostConfig.DNSOptions) == 0 && len(daemon.configStore.DNSOptions) == 0 { + sboxOptions = append(sboxOptions, libnetwork.OptionOriginResolvConfPath("/etc/resolv.conf")) + } + } else { + // OptionUseExternalKey is mandatory for userns support. + // But optional for non-userns support + sboxOptions = append(sboxOptions, libnetwork.OptionUseExternalKey()) + } + + if err = setupPathsAndSandboxOptions(container, &sboxOptions); err != nil { + return nil, err + } + + if len(container.HostConfig.DNS) > 0 { + dns = container.HostConfig.DNS + } else if len(daemon.configStore.DNS) > 0 { + dns = daemon.configStore.DNS + } + + for _, d := range dns { + sboxOptions = append(sboxOptions, libnetwork.OptionDNS(d)) + } + + dnsSearch := daemon.getDNSSearchSettings(container) + + for _, ds := range dnsSearch { + sboxOptions = append(sboxOptions, libnetwork.OptionDNSSearch(ds)) + } + + if len(container.HostConfig.DNSOptions) > 0 { + dnsOptions = container.HostConfig.DNSOptions + } else if len(daemon.configStore.DNSOptions) > 0 { + dnsOptions = daemon.configStore.DNSOptions + } + + for _, ds := range dnsOptions { + sboxOptions = append(sboxOptions, libnetwork.OptionDNSOptions(ds)) + } + + if container.NetworkSettings.SecondaryIPAddresses != nil { + name := container.Config.Hostname + if container.Config.Domainname != "" { + name = name + "." + container.Config.Domainname + } + + for _, a := range container.NetworkSettings.SecondaryIPAddresses { + sboxOptions = append(sboxOptions, libnetwork.OptionExtraHost(name, a.Addr)) + } + } + + for _, extraHost := range container.HostConfig.ExtraHosts { + // allow IPv6 addresses in extra hosts; only split on first ":" + if _, err := opts.ValidateExtraHost(extraHost); err != nil { + return nil, err + } + parts := strings.SplitN(extraHost, ":", 2) + sboxOptions = append(sboxOptions, libnetwork.OptionExtraHost(parts[0], parts[1])) + } + + if container.HostConfig.PortBindings != nil { + for p, b := range container.HostConfig.PortBindings { + bindings[p] = []nat.PortBinding{} + for _, bb := range b { + bindings[p] = append(bindings[p], nat.PortBinding{ + HostIP: bb.HostIP, + HostPort: bb.HostPort, + }) + } + } + } + + portSpecs := container.Config.ExposedPorts + ports := make([]nat.Port, len(portSpecs)) + var i int + for p := range portSpecs { + ports[i] = p + i++ + } + nat.SortPortMap(ports, bindings) + for _, port := range ports { + expose := types.TransportPort{} + expose.Proto = types.ParseProtocol(port.Proto()) + expose.Port = uint16(port.Int()) + exposeList = append(exposeList, expose) + + pb := types.PortBinding{Port: expose.Port, Proto: expose.Proto} + binding := bindings[port] + for i := 0; i < len(binding); i++ { + pbCopy := pb.GetCopy() + newP, err := nat.NewPort(nat.SplitProtoPort(binding[i].HostPort)) + var portStart, portEnd int + if err == nil { + portStart, portEnd, err = newP.Range() + } + if err != nil { + return nil, fmt.Errorf("Error parsing HostPort value(%s):%v", binding[i].HostPort, err) + } + pbCopy.HostPort = uint16(portStart) + pbCopy.HostPortEnd = uint16(portEnd) + pbCopy.HostIP = net.ParseIP(binding[i].HostIP) + pbList = append(pbList, pbCopy) + } + + if container.HostConfig.PublishAllPorts && len(binding) == 0 { + pbList = append(pbList, pb) + } + } + + sboxOptions = append(sboxOptions, + libnetwork.OptionPortMapping(pbList), + libnetwork.OptionExposedPorts(exposeList)) + + // Legacy Link feature is supported only for the default bridge network. + // return if this call to build join options is not for default bridge network + // Legacy Link is only supported by docker run --link + bridgeSettings, ok := container.NetworkSettings.Networks[defaultNetName] + if !ok || bridgeSettings.EndpointSettings == nil { + return sboxOptions, nil + } + + if bridgeSettings.EndpointID == "" { + return sboxOptions, nil + } + + var ( + childEndpoints, parentEndpoints []string + cEndpointID string + ) + + children := daemon.children(container) + for linkAlias, child := range children { + if !isLinkable(child) { + return nil, fmt.Errorf("Cannot link to %s, as it does not belong to the default network", child.Name) + } + _, alias := path.Split(linkAlias) + // allow access to the linked container via the alias, real name, and container hostname + aliasList := alias + " " + child.Config.Hostname + // only add the name if alias isn't equal to the name + if alias != child.Name[1:] { + aliasList = aliasList + " " + child.Name[1:] + } + sboxOptions = append(sboxOptions, libnetwork.OptionExtraHost(aliasList, child.NetworkSettings.Networks[defaultNetName].IPAddress)) + cEndpointID = child.NetworkSettings.Networks[defaultNetName].EndpointID + if cEndpointID != "" { + childEndpoints = append(childEndpoints, cEndpointID) + } + } + + for alias, parent := range daemon.parents(container) { + if daemon.configStore.DisableBridge || !container.HostConfig.NetworkMode.IsPrivate() { + continue + } + + _, alias = path.Split(alias) + logrus.Debugf("Update /etc/hosts of %s for alias %s with ip %s", parent.ID, alias, bridgeSettings.IPAddress) + sboxOptions = append(sboxOptions, libnetwork.OptionParentUpdate( + parent.ID, + alias, + bridgeSettings.IPAddress, + )) + if cEndpointID != "" { + parentEndpoints = append(parentEndpoints, cEndpointID) + } + } + + linkOptions := options.Generic{ + netlabel.GenericData: options.Generic{ + "ParentEndpoints": parentEndpoints, + "ChildEndpoints": childEndpoints, + }, + } + + sboxOptions = append(sboxOptions, libnetwork.OptionGeneric(linkOptions)) + return sboxOptions, nil +} + +func (daemon *Daemon) updateNetworkSettings(container *container.Container, n libnetwork.Network, endpointConfig *networktypes.EndpointSettings) error { + if container.NetworkSettings == nil { + container.NetworkSettings = &network.Settings{Networks: make(map[string]*network.EndpointSettings)} + } + + if !container.HostConfig.NetworkMode.IsHost() && containertypes.NetworkMode(n.Type()).IsHost() { + return runconfig.ErrConflictHostNetwork + } + + for s := range container.NetworkSettings.Networks { + sn, err := daemon.FindNetwork(s) + if err != nil { + continue + } + + if sn.Name() == n.Name() { + // Avoid duplicate config + return nil + } + if !containertypes.NetworkMode(sn.Type()).IsPrivate() || + !containertypes.NetworkMode(n.Type()).IsPrivate() { + return runconfig.ErrConflictSharedNetwork + } + if containertypes.NetworkMode(sn.Name()).IsNone() || + containertypes.NetworkMode(n.Name()).IsNone() { + return runconfig.ErrConflictNoNetwork + } + } + + if _, ok := container.NetworkSettings.Networks[n.Name()]; !ok { + container.NetworkSettings.Networks[n.Name()] = &network.EndpointSettings{ + EndpointSettings: endpointConfig, + } + } + + return nil +} + +func (daemon *Daemon) updateEndpointNetworkSettings(container *container.Container, n libnetwork.Network, ep libnetwork.Endpoint) error { + if err := container.BuildEndpointInfo(n, ep); err != nil { + return err + } + + if container.HostConfig.NetworkMode == runconfig.DefaultDaemonNetworkMode() { + container.NetworkSettings.Bridge = daemon.configStore.BridgeConfig.Iface + } + + return nil +} + +// UpdateNetwork is used to update the container's network (e.g. when linked containers +// get removed/unlinked). +func (daemon *Daemon) updateNetwork(container *container.Container) error { + var ( + start = time.Now() + ctrl = daemon.netController + sid = container.NetworkSettings.SandboxID + ) + + sb, err := ctrl.SandboxByID(sid) + if err != nil { + return fmt.Errorf("error locating sandbox id %s: %v", sid, err) + } + + // Find if container is connected to the default bridge network + var n libnetwork.Network + for name := range container.NetworkSettings.Networks { + sn, err := daemon.FindNetwork(name) + if err != nil { + continue + } + if sn.Name() == runconfig.DefaultDaemonNetworkMode().NetworkName() { + n = sn + break + } + } + + if n == nil { + // Not connected to the default bridge network; Nothing to do + return nil + } + + options, err := daemon.buildSandboxOptions(container) + if err != nil { + return fmt.Errorf("Update network failed: %v", err) + } + + if err := sb.Refresh(options...); err != nil { + return fmt.Errorf("Update network failed: Failure in refresh sandbox %s: %v", sid, err) + } + + networkActions.WithValues("update").UpdateSince(start) + + return nil +} + +func (daemon *Daemon) findAndAttachNetwork(container *container.Container, idOrName string, epConfig *networktypes.EndpointSettings) (libnetwork.Network, *networktypes.NetworkingConfig, error) { + n, err := daemon.FindNetwork(idOrName) + if err != nil { + // We should always be able to find the network for a + // managed container. + if container.Managed { + return nil, nil, err + } + } + + // If we found a network and if it is not dynamically created + // we should never attempt to attach to that network here. + if n != nil { + if container.Managed || !n.Info().Dynamic() { + return n, nil, nil + } + } + + var addresses []string + if epConfig != nil && epConfig.IPAMConfig != nil { + if epConfig.IPAMConfig.IPv4Address != "" { + addresses = append(addresses, epConfig.IPAMConfig.IPv4Address) + } + + if epConfig.IPAMConfig.IPv6Address != "" { + addresses = append(addresses, epConfig.IPAMConfig.IPv6Address) + } + } + + var ( + config *networktypes.NetworkingConfig + retryCount int + ) + + for { + // In all other cases, attempt to attach to the network to + // trigger attachment in the swarm cluster manager. + if daemon.clusterProvider != nil { + var err error + config, err = daemon.clusterProvider.AttachNetwork(idOrName, container.ID, addresses) + if err != nil { + return nil, nil, err + } + } + + n, err = daemon.FindNetwork(idOrName) + if err != nil { + if daemon.clusterProvider != nil { + if err := daemon.clusterProvider.DetachNetwork(idOrName, container.ID); err != nil { + logrus.Warnf("Could not rollback attachment for container %s to network %s: %v", container.ID, idOrName, err) + } + } + + // Retry network attach again if we failed to + // find the network after successful + // attachment because the only reason that + // would happen is if some other container + // attached to the swarm scope network went down + // and removed the network while we were in + // the process of attaching. + if config != nil { + if _, ok := err.(libnetwork.ErrNoSuchNetwork); ok { + if retryCount >= 5 { + return nil, nil, fmt.Errorf("could not find network %s after successful attachment", idOrName) + } + retryCount++ + continue + } + } + + return nil, nil, err + } + + break + } + + // This container has attachment to a swarm scope + // network. Update the container network settings accordingly. + container.NetworkSettings.HasSwarmEndpoint = true + return n, config, nil +} + +// updateContainerNetworkSettings updates the network settings +func (daemon *Daemon) updateContainerNetworkSettings(container *container.Container, endpointsConfig map[string]*networktypes.EndpointSettings) { + var n libnetwork.Network + + mode := container.HostConfig.NetworkMode + if container.Config.NetworkDisabled || mode.IsContainer() { + return + } + + networkName := mode.NetworkName() + if mode.IsDefault() { + networkName = daemon.netController.Config().Daemon.DefaultNetwork + } + + if mode.IsUserDefined() { + var err error + + n, err = daemon.FindNetwork(networkName) + if err == nil { + networkName = n.Name() + } + } + + if container.NetworkSettings == nil { + container.NetworkSettings = &network.Settings{} + } + + if len(endpointsConfig) > 0 { + if container.NetworkSettings.Networks == nil { + container.NetworkSettings.Networks = make(map[string]*network.EndpointSettings) + } + + for name, epConfig := range endpointsConfig { + container.NetworkSettings.Networks[name] = &network.EndpointSettings{ + EndpointSettings: epConfig, + } + } + } + + if container.NetworkSettings.Networks == nil { + container.NetworkSettings.Networks = make(map[string]*network.EndpointSettings) + container.NetworkSettings.Networks[networkName] = &network.EndpointSettings{ + EndpointSettings: &networktypes.EndpointSettings{}, + } + } + + // Convert any settings added by client in default name to + // engine's default network name key + if mode.IsDefault() { + if nConf, ok := container.NetworkSettings.Networks[mode.NetworkName()]; ok { + container.NetworkSettings.Networks[networkName] = nConf + delete(container.NetworkSettings.Networks, mode.NetworkName()) + } + } + + if !mode.IsUserDefined() { + return + } + // Make sure to internally store the per network endpoint config by network name + if _, ok := container.NetworkSettings.Networks[networkName]; ok { + return + } + + if n != nil { + if nwConfig, ok := container.NetworkSettings.Networks[n.ID()]; ok { + container.NetworkSettings.Networks[networkName] = nwConfig + delete(container.NetworkSettings.Networks, n.ID()) + return + } + } +} + +func (daemon *Daemon) allocateNetwork(container *container.Container) error { + start := time.Now() + controller := daemon.netController + + if daemon.netController == nil { + return nil + } + + // Cleanup any stale sandbox left over due to ungraceful daemon shutdown + if err := controller.SandboxDestroy(container.ID); err != nil { + logrus.Errorf("failed to cleanup up stale network sandbox for container %s", container.ID) + } + + if container.Config.NetworkDisabled || container.HostConfig.NetworkMode.IsContainer() { + return nil + } + + updateSettings := false + + if len(container.NetworkSettings.Networks) == 0 { + daemon.updateContainerNetworkSettings(container, nil) + updateSettings = true + } + + // always connect default network first since only default + // network mode support link and we need do some setting + // on sandbox initialize for link, but the sandbox only be initialized + // on first network connecting. + defaultNetName := runconfig.DefaultDaemonNetworkMode().NetworkName() + if nConf, ok := container.NetworkSettings.Networks[defaultNetName]; ok { + cleanOperationalData(nConf) + if err := daemon.connectToNetwork(container, defaultNetName, nConf.EndpointSettings, updateSettings); err != nil { + return err + } + + } + + // the intermediate map is necessary because "connectToNetwork" modifies "container.NetworkSettings.Networks" + networks := make(map[string]*network.EndpointSettings) + for n, epConf := range container.NetworkSettings.Networks { + if n == defaultNetName { + continue + } + + networks[n] = epConf + } + + for netName, epConf := range networks { + cleanOperationalData(epConf) + if err := daemon.connectToNetwork(container, netName, epConf.EndpointSettings, updateSettings); err != nil { + return err + } + } + + // If the container is not to be connected to any network, + // create its network sandbox now if not present + if len(networks) == 0 { + if nil == daemon.getNetworkSandbox(container) { + options, err := daemon.buildSandboxOptions(container) + if err != nil { + return err + } + sb, err := daemon.netController.NewSandbox(container.ID, options...) + if err != nil { + return err + } + container.UpdateSandboxNetworkSettings(sb) + defer func() { + if err != nil { + sb.Delete() + } + }() + } + + } + + if _, err := container.WriteHostConfig(); err != nil { + return err + } + networkActions.WithValues("allocate").UpdateSince(start) + return nil +} + +func (daemon *Daemon) getNetworkSandbox(container *container.Container) libnetwork.Sandbox { + var sb libnetwork.Sandbox + daemon.netController.WalkSandboxes(func(s libnetwork.Sandbox) bool { + if s.ContainerID() == container.ID { + sb = s + return true + } + return false + }) + return sb +} + +// hasUserDefinedIPAddress returns whether the passed endpoint configuration contains IP address configuration +func hasUserDefinedIPAddress(epConfig *networktypes.EndpointSettings) bool { + return epConfig != nil && epConfig.IPAMConfig != nil && (len(epConfig.IPAMConfig.IPv4Address) > 0 || len(epConfig.IPAMConfig.IPv6Address) > 0) +} + +// User specified ip address is acceptable only for networks with user specified subnets. +func validateNetworkingConfig(n libnetwork.Network, epConfig *networktypes.EndpointSettings) error { + if n == nil || epConfig == nil { + return nil + } + if !hasUserDefinedIPAddress(epConfig) { + return nil + } + _, _, nwIPv4Configs, nwIPv6Configs := n.Info().IpamConfig() + for _, s := range []struct { + ipConfigured bool + subnetConfigs []*libnetwork.IpamConf + }{ + { + ipConfigured: len(epConfig.IPAMConfig.IPv4Address) > 0, + subnetConfigs: nwIPv4Configs, + }, + { + ipConfigured: len(epConfig.IPAMConfig.IPv6Address) > 0, + subnetConfigs: nwIPv6Configs, + }, + } { + if s.ipConfigured { + foundSubnet := false + for _, cfg := range s.subnetConfigs { + if len(cfg.PreferredPool) > 0 { + foundSubnet = true + break + } + } + if !foundSubnet { + return runconfig.ErrUnsupportedNetworkNoSubnetAndIP + } + } + } + + return nil +} + +// cleanOperationalData resets the operational data from the passed endpoint settings +func cleanOperationalData(es *network.EndpointSettings) { + es.EndpointID = "" + es.Gateway = "" + es.IPAddress = "" + es.IPPrefixLen = 0 + es.IPv6Gateway = "" + es.GlobalIPv6Address = "" + es.GlobalIPv6PrefixLen = 0 + es.MacAddress = "" + if es.IPAMOperational { + es.IPAMConfig = nil + } +} + +func (daemon *Daemon) updateNetworkConfig(container *container.Container, n libnetwork.Network, endpointConfig *networktypes.EndpointSettings, updateSettings bool) error { + + if !containertypes.NetworkMode(n.Name()).IsUserDefined() { + if hasUserDefinedIPAddress(endpointConfig) && !enableIPOnPredefinedNetwork() { + return runconfig.ErrUnsupportedNetworkAndIP + } + if endpointConfig != nil && len(endpointConfig.Aliases) > 0 && !container.EnableServiceDiscoveryOnDefaultNetwork() { + return runconfig.ErrUnsupportedNetworkAndAlias + } + } else { + addShortID := true + shortID := stringid.TruncateID(container.ID) + for _, alias := range endpointConfig.Aliases { + if alias == shortID { + addShortID = false + break + } + } + if addShortID { + endpointConfig.Aliases = append(endpointConfig.Aliases, shortID) + } + } + + if err := validateNetworkingConfig(n, endpointConfig); err != nil { + return err + } + + if updateSettings { + if err := daemon.updateNetworkSettings(container, n, endpointConfig); err != nil { + return err + } + } + return nil +} + +func (daemon *Daemon) connectToNetwork(container *container.Container, idOrName string, endpointConfig *networktypes.EndpointSettings, updateSettings bool) (err error) { + start := time.Now() + if container.HostConfig.NetworkMode.IsContainer() { + return runconfig.ErrConflictSharedNetwork + } + if containertypes.NetworkMode(idOrName).IsBridge() && + daemon.configStore.DisableBridge { + container.Config.NetworkDisabled = true + return nil + } + if endpointConfig == nil { + endpointConfig = &networktypes.EndpointSettings{} + } + + n, config, err := daemon.findAndAttachNetwork(container, idOrName, endpointConfig) + if err != nil { + return err + } + if n == nil { + return nil + } + + var operIPAM bool + if config != nil { + if epConfig, ok := config.EndpointsConfig[n.Name()]; ok { + if endpointConfig.IPAMConfig == nil || + (endpointConfig.IPAMConfig.IPv4Address == "" && + endpointConfig.IPAMConfig.IPv6Address == "" && + len(endpointConfig.IPAMConfig.LinkLocalIPs) == 0) { + operIPAM = true + } + + // copy IPAMConfig and NetworkID from epConfig via AttachNetwork + endpointConfig.IPAMConfig = epConfig.IPAMConfig + endpointConfig.NetworkID = epConfig.NetworkID + } + } + + err = daemon.updateNetworkConfig(container, n, endpointConfig, updateSettings) + if err != nil { + return err + } + + controller := daemon.netController + sb := daemon.getNetworkSandbox(container) + createOptions, err := container.BuildCreateEndpointOptions(n, endpointConfig, sb, daemon.configStore.DNS) + if err != nil { + return err + } + + endpointName := strings.TrimPrefix(container.Name, "/") + ep, err := n.CreateEndpoint(endpointName, createOptions...) + if err != nil { + return err + } + defer func() { + if err != nil { + if e := ep.Delete(false); e != nil { + logrus.Warnf("Could not rollback container connection to network %s", idOrName) + } + } + }() + container.NetworkSettings.Networks[n.Name()] = &network.EndpointSettings{ + EndpointSettings: endpointConfig, + IPAMOperational: operIPAM, + } + if _, ok := container.NetworkSettings.Networks[n.ID()]; ok { + delete(container.NetworkSettings.Networks, n.ID()) + } + + if err := daemon.updateEndpointNetworkSettings(container, n, ep); err != nil { + return err + } + + if sb == nil { + options, err := daemon.buildSandboxOptions(container) + if err != nil { + return err + } + sb, err = controller.NewSandbox(container.ID, options...) + if err != nil { + return err + } + + container.UpdateSandboxNetworkSettings(sb) + } + + joinOptions, err := container.BuildJoinOptions(n) + if err != nil { + return err + } + + if err := ep.Join(sb, joinOptions...); err != nil { + return err + } + + if !container.Managed { + // add container name/alias to DNS + if err := daemon.ActivateContainerServiceBinding(container.Name); err != nil { + return fmt.Errorf("Activate container service binding for %s failed: %v", container.Name, err) + } + } + + if err := container.UpdateJoinInfo(n, ep); err != nil { + return fmt.Errorf("Updating join info failed: %v", err) + } + + container.NetworkSettings.Ports = getPortMapInfo(sb) + + daemon.LogNetworkEventWithAttributes(n, "connect", map[string]string{"container": container.ID}) + networkActions.WithValues("connect").UpdateSince(start) + return nil +} + +// ForceEndpointDelete deletes an endpoint from a network forcefully +func (daemon *Daemon) ForceEndpointDelete(name string, networkName string) error { + n, err := daemon.FindNetwork(networkName) + if err != nil { + return err + } + + ep, err := n.EndpointByName(name) + if err != nil { + return err + } + return ep.Delete(true) +} + +func (daemon *Daemon) disconnectFromNetwork(container *container.Container, n libnetwork.Network, force bool) error { + var ( + ep libnetwork.Endpoint + sbox libnetwork.Sandbox + ) + + s := func(current libnetwork.Endpoint) bool { + epInfo := current.Info() + if epInfo == nil { + return false + } + if sb := epInfo.Sandbox(); sb != nil { + if sb.ContainerID() == container.ID { + ep = current + sbox = sb + return true + } + } + return false + } + n.WalkEndpoints(s) + + if ep == nil && force { + epName := strings.TrimPrefix(container.Name, "/") + ep, err := n.EndpointByName(epName) + if err != nil { + return err + } + return ep.Delete(force) + } + + if ep == nil { + return fmt.Errorf("container %s is not connected to network %s", container.ID, n.Name()) + } + + if err := ep.Leave(sbox); err != nil { + return fmt.Errorf("container %s failed to leave network %s: %v", container.ID, n.Name(), err) + } + + container.NetworkSettings.Ports = getPortMapInfo(sbox) + + if err := ep.Delete(false); err != nil { + return fmt.Errorf("endpoint delete failed for container %s on network %s: %v", container.ID, n.Name(), err) + } + + delete(container.NetworkSettings.Networks, n.Name()) + + daemon.tryDetachContainerFromClusterNetwork(n, container) + + return nil +} + +func (daemon *Daemon) tryDetachContainerFromClusterNetwork(network libnetwork.Network, container *container.Container) { + if daemon.clusterProvider != nil && network.Info().Dynamic() && !container.Managed { + if err := daemon.clusterProvider.DetachNetwork(network.Name(), container.ID); err != nil { + logrus.Warnf("error detaching from network %s: %v", network.Name(), err) + if err := daemon.clusterProvider.DetachNetwork(network.ID(), container.ID); err != nil { + logrus.Warnf("error detaching from network %s: %v", network.ID(), err) + } + } + } + attributes := map[string]string{ + "container": container.ID, + } + daemon.LogNetworkEventWithAttributes(network, "disconnect", attributes) +} + +func (daemon *Daemon) initializeNetworking(container *container.Container) error { + var err error + + if container.HostConfig.NetworkMode.IsContainer() { + // we need to get the hosts files from the container to join + nc, err := daemon.getNetworkedContainer(container.ID, container.HostConfig.NetworkMode.ConnectedContainer()) + if err != nil { + return err + } + + err = daemon.initializeNetworkingPaths(container, nc) + if err != nil { + return err + } + + container.Config.Hostname = nc.Config.Hostname + container.Config.Domainname = nc.Config.Domainname + return nil + } + + if container.HostConfig.NetworkMode.IsHost() { + if container.Config.Hostname == "" { + container.Config.Hostname, err = os.Hostname() + if err != nil { + return err + } + } + } + + if err := daemon.allocateNetwork(container); err != nil { + return err + } + + return container.BuildHostnameFile() +} + +func (daemon *Daemon) getNetworkedContainer(containerID, connectedContainerID string) (*container.Container, error) { + nc, err := daemon.GetContainer(connectedContainerID) + if err != nil { + return nil, err + } + if containerID == nc.ID { + return nil, fmt.Errorf("cannot join own network") + } + if !nc.IsRunning() { + err := fmt.Errorf("cannot join network of a non running container: %s", connectedContainerID) + return nil, derr.NewRequestConflictError(err) + } + if nc.IsRestarting() { + return nil, errContainerIsRestarting(connectedContainerID) + } + return nc, nil +} + +func (daemon *Daemon) releaseNetwork(container *container.Container) { + start := time.Now() + if daemon.netController == nil { + return + } + if container.HostConfig.NetworkMode.IsContainer() || container.Config.NetworkDisabled { + return + } + + sid := container.NetworkSettings.SandboxID + settings := container.NetworkSettings.Networks + container.NetworkSettings.Ports = nil + + if sid == "" { + return + } + + var networks []libnetwork.Network + for n, epSettings := range settings { + if nw, err := daemon.FindNetwork(n); err == nil { + networks = append(networks, nw) + } + + if epSettings.EndpointSettings == nil { + continue + } + + cleanOperationalData(epSettings) + } + + sb, err := daemon.netController.SandboxByID(sid) + if err != nil { + logrus.Warnf("error locating sandbox id %s: %v", sid, err) + return + } + + if err := sb.Delete(); err != nil { + logrus.Errorf("Error deleting sandbox id %s for container %s: %v", sid, container.ID, err) + } + + for _, nw := range networks { + daemon.tryDetachContainerFromClusterNetwork(nw, container) + } + networkActions.WithValues("release").UpdateSince(start) +} + +func errRemovalContainer(containerID string) error { + return fmt.Errorf("Container %s is marked for removal and cannot be connected or disconnected to the network", containerID) +} + +// ConnectToNetwork connects a container to a network +func (daemon *Daemon) ConnectToNetwork(container *container.Container, idOrName string, endpointConfig *networktypes.EndpointSettings) error { + if endpointConfig == nil { + endpointConfig = &networktypes.EndpointSettings{} + } + container.Lock() + defer container.Unlock() + + if !container.Running { + if container.RemovalInProgress || container.Dead { + return errRemovalContainer(container.ID) + } + + n, err := daemon.FindNetwork(idOrName) + if err == nil && n != nil { + if err := daemon.updateNetworkConfig(container, n, endpointConfig, true); err != nil { + return err + } + } else { + container.NetworkSettings.Networks[idOrName] = &network.EndpointSettings{ + EndpointSettings: endpointConfig, + } + } + } else if !daemon.isNetworkHotPluggable() { + return fmt.Errorf(runtime.GOOS + " does not support connecting a running container to a network") + } else { + if err := daemon.connectToNetwork(container, idOrName, endpointConfig, true); err != nil { + return err + } + } + + return container.CheckpointTo(daemon.containersReplica) +} + +// DisconnectFromNetwork disconnects container from network n. +func (daemon *Daemon) DisconnectFromNetwork(container *container.Container, networkName string, force bool) error { + n, err := daemon.FindNetwork(networkName) + container.Lock() + defer container.Unlock() + + if !container.Running || (err != nil && force) { + if container.RemovalInProgress || container.Dead { + return errRemovalContainer(container.ID) + } + // In case networkName is resolved we will use n.Name() + // this will cover the case where network id is passed. + if n != nil { + networkName = n.Name() + } + if _, ok := container.NetworkSettings.Networks[networkName]; !ok { + return fmt.Errorf("container %s is not connected to the network %s", container.ID, networkName) + } + delete(container.NetworkSettings.Networks, networkName) + } else if err == nil && !daemon.isNetworkHotPluggable() { + return fmt.Errorf(runtime.GOOS + " does not support connecting a running container to a network") + } else if err == nil { + if container.HostConfig.NetworkMode.IsHost() && containertypes.NetworkMode(n.Type()).IsHost() { + return runconfig.ErrConflictHostNetwork + } + + if err := daemon.disconnectFromNetwork(container, n, false); err != nil { + return err + } + } else { + return err + } + + if err := container.CheckpointTo(daemon.containersReplica); err != nil { + return err + } + + if n != nil { + daemon.LogNetworkEventWithAttributes(n, "disconnect", map[string]string{ + "container": container.ID, + }) + } + + return nil +} + +// ActivateContainerServiceBinding puts this container into load balancer active rotation and DNS response +func (daemon *Daemon) ActivateContainerServiceBinding(containerName string) error { + container, err := daemon.GetContainer(containerName) + if err != nil { + return err + } + sb := daemon.getNetworkSandbox(container) + if sb == nil { + return fmt.Errorf("network sandbox does not exist for container %s", containerName) + } + return sb.EnableService() +} + +// DeactivateContainerServiceBinding removes this container from load balancer active rotation, and DNS response +func (daemon *Daemon) DeactivateContainerServiceBinding(containerName string) error { + container, err := daemon.GetContainer(containerName) + if err != nil { + return err + } + sb := daemon.getNetworkSandbox(container) + if sb == nil { + // If the network sandbox is not found, then there is nothing to deactivate + logrus.Debugf("Could not find network sandbox for container %s on service binding deactivation request", containerName) + return nil + } + return sb.DisableService() +} diff --git a/vendor/github.com/moby/moby/daemon/container_operations_solaris.go b/vendor/github.com/moby/moby/daemon/container_operations_solaris.go new file mode 100644 index 000000000..c5728d0ee --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/container_operations_solaris.go @@ -0,0 +1,47 @@ +// +build solaris + +package daemon + +import ( + "github.com/docker/docker/container" + "github.com/docker/docker/runconfig" + "github.com/docker/libnetwork" +) + +func (daemon *Daemon) setupLinkedContainers(container *container.Container) ([]string, error) { + return nil, nil +} + +func (daemon *Daemon) setupIpcDirs(container *container.Container) error { + return nil +} + +func killProcessDirectly(container *container.Container) error { + return nil +} + +func detachMounted(path string) error { + return nil +} + +func isLinkable(child *container.Container) bool { + // A container is linkable only if it belongs to the default network + _, ok := child.NetworkSettings.Networks[runconfig.DefaultDaemonNetworkMode().NetworkName()] + return ok +} + +func enableIPOnPredefinedNetwork() bool { + return false +} + +func (daemon *Daemon) isNetworkHotPluggable() bool { + return false +} + +func setupPathsAndSandboxOptions(container *container.Container, sboxOptions *[]libnetwork.SandboxOption) error { + return nil +} + +func (daemon *Daemon) initializeNetworkingPaths(container *container.Container, nc *container.Container) error { + return nil +} diff --git a/vendor/github.com/moby/moby/daemon/container_operations_unix.go b/vendor/github.com/moby/moby/daemon/container_operations_unix.go new file mode 100644 index 000000000..8c1b44b60 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/container_operations_unix.go @@ -0,0 +1,357 @@ +// +build linux freebsd + +package daemon + +import ( + "context" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strconv" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/container" + "github.com/docker/docker/daemon/links" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/mount" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/runconfig" + "github.com/docker/libnetwork" + "github.com/opencontainers/selinux/go-selinux/label" + "github.com/pkg/errors" + "golang.org/x/sys/unix" +) + +func (daemon *Daemon) setupLinkedContainers(container *container.Container) ([]string, error) { + var env []string + children := daemon.children(container) + + bridgeSettings := container.NetworkSettings.Networks[runconfig.DefaultDaemonNetworkMode().NetworkName()] + if bridgeSettings == nil || bridgeSettings.EndpointSettings == nil { + return nil, nil + } + + for linkAlias, child := range children { + if !child.IsRunning() { + return nil, fmt.Errorf("Cannot link to a non running container: %s AS %s", child.Name, linkAlias) + } + + childBridgeSettings := child.NetworkSettings.Networks[runconfig.DefaultDaemonNetworkMode().NetworkName()] + if childBridgeSettings == nil || childBridgeSettings.EndpointSettings == nil { + return nil, fmt.Errorf("container %s not attached to default bridge network", child.ID) + } + + link := links.NewLink( + bridgeSettings.IPAddress, + childBridgeSettings.IPAddress, + linkAlias, + child.Config.Env, + child.Config.ExposedPorts, + ) + + env = append(env, link.ToEnv()...) + } + + return env, nil +} + +func (daemon *Daemon) getIpcContainer(container *container.Container) (*container.Container, error) { + containerID := container.HostConfig.IpcMode.Container() + container, err := daemon.GetContainer(containerID) + if err != nil { + return nil, errors.Wrapf(err, "cannot join IPC of a non running container: %s", container.ID) + } + return container, daemon.checkContainer(container, containerIsRunning, containerIsNotRestarting) +} + +func (daemon *Daemon) getPidContainer(container *container.Container) (*container.Container, error) { + containerID := container.HostConfig.PidMode.Container() + container, err := daemon.GetContainer(containerID) + if err != nil { + return nil, errors.Wrapf(err, "cannot join PID of a non running container: %s", container.ID) + } + return container, daemon.checkContainer(container, containerIsRunning, containerIsNotRestarting) +} + +func containerIsRunning(c *container.Container) error { + if !c.IsRunning() { + return errors.Errorf("container %s is not running", c.ID) + } + return nil +} + +func containerIsNotRestarting(c *container.Container) error { + if c.IsRestarting() { + return errContainerIsRestarting(c.ID) + } + return nil +} + +func (daemon *Daemon) setupIpcDirs(c *container.Container) error { + var err error + + c.ShmPath, err = c.ShmResourcePath() + if err != nil { + return err + } + + if c.HostConfig.IpcMode.IsContainer() { + ic, err := daemon.getIpcContainer(c) + if err != nil { + return err + } + c.ShmPath = ic.ShmPath + } else if c.HostConfig.IpcMode.IsHost() { + if _, err := os.Stat("/dev/shm"); err != nil { + return fmt.Errorf("/dev/shm is not mounted, but must be for --ipc=host") + } + c.ShmPath = "/dev/shm" + } else { + rootIDs := daemon.idMappings.RootPair() + if !c.HasMountFor("/dev/shm") { + shmPath, err := c.ShmResourcePath() + if err != nil { + return err + } + + if err := idtools.MkdirAllAndChown(shmPath, 0700, rootIDs); err != nil { + return err + } + + shmSize := int64(daemon.configStore.ShmSize) + if c.HostConfig.ShmSize != 0 { + shmSize = c.HostConfig.ShmSize + } + shmproperty := "mode=1777,size=" + strconv.FormatInt(shmSize, 10) + if err := unix.Mount("shm", shmPath, "tmpfs", uintptr(unix.MS_NOEXEC|unix.MS_NOSUID|unix.MS_NODEV), label.FormatMountLabel(shmproperty, c.GetMountLabel())); err != nil { + return fmt.Errorf("mounting shm tmpfs: %s", err) + } + if err := os.Chown(shmPath, rootIDs.UID, rootIDs.GID); err != nil { + return err + } + } + + } + + return nil +} + +func (daemon *Daemon) setupSecretDir(c *container.Container) (setupErr error) { + if len(c.SecretReferences) == 0 { + return nil + } + + localMountPath := c.SecretMountPath() + logrus.Debugf("secrets: setting up secret dir: %s", localMountPath) + + // retrieve possible remapped range start for root UID, GID + rootIDs := daemon.idMappings.RootPair() + // create tmpfs + if err := idtools.MkdirAllAndChown(localMountPath, 0700, rootIDs); err != nil { + return errors.Wrap(err, "error creating secret local mount path") + } + + defer func() { + if setupErr != nil { + // cleanup + _ = detachMounted(localMountPath) + + if err := os.RemoveAll(localMountPath); err != nil { + logrus.Errorf("error cleaning up secret mount: %s", err) + } + } + }() + + tmpfsOwnership := fmt.Sprintf("uid=%d,gid=%d", rootIDs.UID, rootIDs.GID) + if err := mount.Mount("tmpfs", localMountPath, "tmpfs", "nodev,nosuid,noexec,"+tmpfsOwnership); err != nil { + return errors.Wrap(err, "unable to setup secret mount") + } + + if c.DependencyStore == nil { + return fmt.Errorf("secret store is not initialized") + } + + for _, s := range c.SecretReferences { + // TODO (ehazlett): use type switch when more are supported + if s.File == nil { + logrus.Error("secret target type is not a file target") + continue + } + + // secrets are created in the SecretMountPath on the host, at a + // single level + fPath := c.SecretFilePath(*s) + if err := idtools.MkdirAllAndChown(filepath.Dir(fPath), 0700, rootIDs); err != nil { + return errors.Wrap(err, "error creating secret mount path") + } + + logrus.WithFields(logrus.Fields{ + "name": s.File.Name, + "path": fPath, + }).Debug("injecting secret") + secret, err := c.DependencyStore.Secrets().Get(s.SecretID) + if err != nil { + return errors.Wrap(err, "unable to get secret from secret store") + } + if err := ioutil.WriteFile(fPath, secret.Spec.Data, s.File.Mode); err != nil { + return errors.Wrap(err, "error injecting secret") + } + + uid, err := strconv.Atoi(s.File.UID) + if err != nil { + return err + } + gid, err := strconv.Atoi(s.File.GID) + if err != nil { + return err + } + + if err := os.Chown(fPath, rootIDs.UID+uid, rootIDs.GID+gid); err != nil { + return errors.Wrap(err, "error setting ownership for secret") + } + } + + label.Relabel(localMountPath, c.MountLabel, false) + + // remount secrets ro + if err := mount.Mount("tmpfs", localMountPath, "tmpfs", "remount,ro,"+tmpfsOwnership); err != nil { + return errors.Wrap(err, "unable to remount secret dir as readonly") + } + + return nil +} + +func (daemon *Daemon) setupConfigDir(c *container.Container) (setupErr error) { + if len(c.ConfigReferences) == 0 { + return nil + } + + localPath := c.ConfigsDirPath() + logrus.Debugf("configs: setting up config dir: %s", localPath) + + // retrieve possible remapped range start for root UID, GID + rootIDs := daemon.idMappings.RootPair() + // create tmpfs + if err := idtools.MkdirAllAndChown(localPath, 0700, rootIDs); err != nil { + return errors.Wrap(err, "error creating config dir") + } + + defer func() { + if setupErr != nil { + if err := os.RemoveAll(localPath); err != nil { + logrus.Errorf("error cleaning up config dir: %s", err) + } + } + }() + + if c.DependencyStore == nil { + return fmt.Errorf("config store is not initialized") + } + + for _, configRef := range c.ConfigReferences { + // TODO (ehazlett): use type switch when more are supported + if configRef.File == nil { + logrus.Error("config target type is not a file target") + continue + } + + fPath := c.ConfigFilePath(*configRef) + + log := logrus.WithFields(logrus.Fields{"name": configRef.File.Name, "path": fPath}) + + if err := idtools.MkdirAllAndChown(filepath.Dir(fPath), 0700, rootIDs); err != nil { + return errors.Wrap(err, "error creating config path") + } + + log.Debug("injecting config") + config, err := c.DependencyStore.Configs().Get(configRef.ConfigID) + if err != nil { + return errors.Wrap(err, "unable to get config from config store") + } + if err := ioutil.WriteFile(fPath, config.Spec.Data, configRef.File.Mode); err != nil { + return errors.Wrap(err, "error injecting config") + } + + uid, err := strconv.Atoi(configRef.File.UID) + if err != nil { + return err + } + gid, err := strconv.Atoi(configRef.File.GID) + if err != nil { + return err + } + + if err := os.Chown(fPath, rootIDs.UID+uid, rootIDs.GID+gid); err != nil { + return errors.Wrap(err, "error setting ownership for config") + } + } + + return nil +} + +func killProcessDirectly(cntr *container.Container) error { + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + // Block until the container to stops or timeout. + status := <-cntr.Wait(ctx, container.WaitConditionNotRunning) + if status.Err() != nil { + // Ensure that we don't kill ourselves + if pid := cntr.GetPID(); pid != 0 { + logrus.Infof("Container %s failed to exit within 10 seconds of kill - trying direct SIGKILL", stringid.TruncateID(cntr.ID)) + if err := unix.Kill(pid, 9); err != nil { + if err != unix.ESRCH { + return err + } + e := errNoSuchProcess{pid, 9} + logrus.Debug(e) + return e + } + } + } + return nil +} + +func detachMounted(path string) error { + return unix.Unmount(path, unix.MNT_DETACH) +} + +func isLinkable(child *container.Container) bool { + // A container is linkable only if it belongs to the default network + _, ok := child.NetworkSettings.Networks[runconfig.DefaultDaemonNetworkMode().NetworkName()] + return ok +} + +func enableIPOnPredefinedNetwork() bool { + return false +} + +func (daemon *Daemon) isNetworkHotPluggable() bool { + return true +} + +func setupPathsAndSandboxOptions(container *container.Container, sboxOptions *[]libnetwork.SandboxOption) error { + var err error + + container.HostsPath, err = container.GetRootResourcePath("hosts") + if err != nil { + return err + } + *sboxOptions = append(*sboxOptions, libnetwork.OptionHostsPath(container.HostsPath)) + + container.ResolvConfPath, err = container.GetRootResourcePath("resolv.conf") + if err != nil { + return err + } + *sboxOptions = append(*sboxOptions, libnetwork.OptionResolvConfPath(container.ResolvConfPath)) + return nil +} + +func (daemon *Daemon) initializeNetworkingPaths(container *container.Container, nc *container.Container) error { + container.HostnamePath = nc.HostnamePath + container.HostsPath = nc.HostsPath + container.ResolvConfPath = nc.ResolvConfPath + return nil +} diff --git a/vendor/github.com/moby/moby/daemon/container_operations_windows.go b/vendor/github.com/moby/moby/daemon/container_operations_windows.go new file mode 100644 index 000000000..2788f1a7c --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/container_operations_windows.go @@ -0,0 +1,202 @@ +package daemon + +import ( + "fmt" + "io/ioutil" + "os" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/container" + "github.com/docker/docker/pkg/system" + "github.com/docker/libnetwork" + "github.com/pkg/errors" +) + +func (daemon *Daemon) setupLinkedContainers(container *container.Container) ([]string, error) { + return nil, nil +} + +func (daemon *Daemon) setupConfigDir(c *container.Container) (setupErr error) { + if len(c.ConfigReferences) == 0 { + return nil + } + + localPath := c.ConfigsDirPath() + logrus.Debugf("configs: setting up config dir: %s", localPath) + + // create local config root + if err := system.MkdirAllWithACL(localPath, 0, system.SddlAdministratorsLocalSystem); err != nil { + return errors.Wrap(err, "error creating config dir") + } + + defer func() { + if setupErr != nil { + if err := os.RemoveAll(localPath); err != nil { + logrus.Errorf("error cleaning up config dir: %s", err) + } + } + }() + + if c.DependencyStore == nil { + return fmt.Errorf("config store is not initialized") + } + + for _, configRef := range c.ConfigReferences { + // TODO (ehazlett): use type switch when more are supported + if configRef.File == nil { + logrus.Error("config target type is not a file target") + continue + } + + fPath := c.ConfigFilePath(*configRef) + + log := logrus.WithFields(logrus.Fields{"name": configRef.File.Name, "path": fPath}) + + log.Debug("injecting config") + config, err := c.DependencyStore.Configs().Get(configRef.ConfigID) + if err != nil { + return errors.Wrap(err, "unable to get config from config store") + } + if err := ioutil.WriteFile(fPath, config.Spec.Data, configRef.File.Mode); err != nil { + return errors.Wrap(err, "error injecting config") + } + } + + return nil +} + +// getSize returns real size & virtual size +func (daemon *Daemon) getSize(containerID string) (int64, int64) { + // TODO Windows + return 0, 0 +} + +func (daemon *Daemon) setupIpcDirs(container *container.Container) error { + return nil +} + +// TODO Windows: Fix Post-TP5. This is a hack to allow docker cp to work +// against containers which have volumes. You will still be able to cp +// to somewhere on the container drive, but not to any mounted volumes +// inside the container. Without this fix, docker cp is broken to any +// container which has a volume, regardless of where the file is inside the +// container. +func (daemon *Daemon) mountVolumes(container *container.Container) error { + return nil +} + +func detachMounted(path string) error { + return nil +} + +func (daemon *Daemon) setupSecretDir(c *container.Container) (setupErr error) { + if len(c.SecretReferences) == 0 { + return nil + } + + localMountPath := c.SecretMountPath() + logrus.Debugf("secrets: setting up secret dir: %s", localMountPath) + + // create local secret root + if err := system.MkdirAllWithACL(localMountPath, 0, system.SddlAdministratorsLocalSystem); err != nil { + return errors.Wrap(err, "error creating secret local directory") + } + + defer func() { + if setupErr != nil { + if err := os.RemoveAll(localMountPath); err != nil { + logrus.Errorf("error cleaning up secret mount: %s", err) + } + } + }() + + if c.DependencyStore == nil { + return fmt.Errorf("secret store is not initialized") + } + + for _, s := range c.SecretReferences { + // TODO (ehazlett): use type switch when more are supported + if s.File == nil { + logrus.Error("secret target type is not a file target") + continue + } + + // secrets are created in the SecretMountPath on the host, at a + // single level + fPath := c.SecretFilePath(*s) + logrus.WithFields(logrus.Fields{ + "name": s.File.Name, + "path": fPath, + }).Debug("injecting secret") + secret, err := c.DependencyStore.Secrets().Get(s.SecretID) + if err != nil { + return errors.Wrap(err, "unable to get secret from secret store") + } + if err := ioutil.WriteFile(fPath, secret.Spec.Data, s.File.Mode); err != nil { + return errors.Wrap(err, "error injecting secret") + } + } + + return nil +} + +func killProcessDirectly(container *container.Container) error { + return nil +} + +func isLinkable(child *container.Container) bool { + return false +} + +func enableIPOnPredefinedNetwork() bool { + return true +} + +func (daemon *Daemon) isNetworkHotPluggable() bool { + return false +} + +func setupPathsAndSandboxOptions(container *container.Container, sboxOptions *[]libnetwork.SandboxOption) error { + return nil +} + +func (daemon *Daemon) initializeNetworkingPaths(container *container.Container, nc *container.Container) error { + + if nc.HostConfig.Isolation.IsHyperV() { + return fmt.Errorf("sharing of hyperv containers network is not supported") + } + + container.NetworkSharedContainerID = nc.ID + + if nc.NetworkSettings != nil { + for n := range nc.NetworkSettings.Networks { + sn, err := daemon.FindNetwork(n) + if err != nil { + continue + } + + ep, err := nc.GetEndpointInNetwork(sn) + if err != nil { + continue + } + + data, err := ep.DriverInfo() + if err != nil { + continue + } + + if data["GW_INFO"] != nil { + gwInfo := data["GW_INFO"].(map[string]interface{}) + if gwInfo["hnsid"] != nil { + container.SharedEndpointList = append(container.SharedEndpointList, gwInfo["hnsid"].(string)) + } + } + + if data["hnsid"] != nil { + container.SharedEndpointList = append(container.SharedEndpointList, data["hnsid"].(string)) + } + } + } + + return nil +} diff --git a/vendor/github.com/moby/moby/daemon/container_windows.go b/vendor/github.com/moby/moby/daemon/container_windows.go new file mode 100644 index 000000000..6fdd1e678 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/container_windows.go @@ -0,0 +1,11 @@ +//+build windows + +package daemon + +import ( + "github.com/docker/docker/container" +) + +func (daemon *Daemon) saveApparmorConfig(container *container.Container) error { + return nil +} diff --git a/vendor/github.com/moby/moby/daemon/create.go b/vendor/github.com/moby/moby/daemon/create.go new file mode 100644 index 000000000..78070fd29 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/create.go @@ -0,0 +1,326 @@ +package daemon + +import ( + "fmt" + "net" + "runtime" + "strings" + "time" + + "github.com/pkg/errors" + + "github.com/Sirupsen/logrus" + apierrors "github.com/docker/docker/api/errors" + "github.com/docker/docker/api/types" + containertypes "github.com/docker/docker/api/types/container" + networktypes "github.com/docker/docker/api/types/network" + "github.com/docker/docker/container" + "github.com/docker/docker/image" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/pkg/system" + "github.com/docker/docker/runconfig" + "github.com/opencontainers/selinux/go-selinux/label" +) + +// CreateManagedContainer creates a container that is managed by a Service +func (daemon *Daemon) CreateManagedContainer(params types.ContainerCreateConfig) (containertypes.ContainerCreateCreatedBody, error) { + return daemon.containerCreate(params, true) +} + +// ContainerCreate creates a regular container +func (daemon *Daemon) ContainerCreate(params types.ContainerCreateConfig) (containertypes.ContainerCreateCreatedBody, error) { + return daemon.containerCreate(params, false) +} + +func (daemon *Daemon) containerCreate(params types.ContainerCreateConfig, managed bool) (containertypes.ContainerCreateCreatedBody, error) { + start := time.Now() + if params.Config == nil { + return containertypes.ContainerCreateCreatedBody{}, fmt.Errorf("Config cannot be empty in order to create a container") + } + + warnings, err := daemon.verifyContainerSettings(params.HostConfig, params.Config, false) + if err != nil { + return containertypes.ContainerCreateCreatedBody{Warnings: warnings}, err + } + + err = daemon.verifyNetworkingConfig(params.NetworkingConfig) + if err != nil { + return containertypes.ContainerCreateCreatedBody{Warnings: warnings}, err + } + + if params.HostConfig == nil { + params.HostConfig = &containertypes.HostConfig{} + } + err = daemon.adaptContainerSettings(params.HostConfig, params.AdjustCPUShares) + if err != nil { + return containertypes.ContainerCreateCreatedBody{Warnings: warnings}, err + } + + container, err := daemon.create(params, managed) + if err != nil { + return containertypes.ContainerCreateCreatedBody{Warnings: warnings}, daemon.imageNotExistToErrcode(err) + } + containerActions.WithValues("create").UpdateSince(start) + + return containertypes.ContainerCreateCreatedBody{ID: container.ID, Warnings: warnings}, nil +} + +// Create creates a new container from the given configuration with a given name. +func (daemon *Daemon) create(params types.ContainerCreateConfig, managed bool) (retC *container.Container, retErr error) { + var ( + container *container.Container + img *image.Image + imgID image.ID + err error + ) + + // TODO: @jhowardmsft LCOW support - at a later point, can remove the hard-coding + // to force the platform to be linux. + // Default the platform if not supplied + if params.Platform == "" { + params.Platform = runtime.GOOS + } + if system.LCOWSupported() { + params.Platform = "linux" + } + + if params.Config.Image != "" { + img, err = daemon.GetImage(params.Config.Image) + if err != nil { + return nil, err + } + + if runtime.GOOS == "solaris" && img.OS != "solaris " { + return nil, errors.New("platform on which parent image was created is not Solaris") + } + imgID = img.ID() + + if runtime.GOOS == "windows" && img.OS == "linux" && !system.LCOWSupported() { + return nil, errors.New("platform on which parent image was created is not Windows") + } + } + + // Make sure the platform requested matches the image + if img != nil { + if params.Platform != img.Platform() { + // Ignore this in LCOW mode. @jhowardmsft TODO - This will need revisiting later. + if !system.LCOWSupported() { + return nil, fmt.Errorf("cannot create a %s container from a %s image", params.Platform, img.Platform()) + } + } + } + + if err := daemon.mergeAndVerifyConfig(params.Config, img); err != nil { + return nil, err + } + + if err := daemon.mergeAndVerifyLogConfig(¶ms.HostConfig.LogConfig); err != nil { + return nil, err + } + + if container, err = daemon.newContainer(params.Name, params.Platform, params.Config, params.HostConfig, imgID, managed); err != nil { + return nil, err + } + defer func() { + if retErr != nil { + if err := daemon.cleanupContainer(container, true, true); err != nil { + logrus.Errorf("failed to cleanup container on create error: %v", err) + } + } + }() + + if err := daemon.setSecurityOptions(container, params.HostConfig); err != nil { + return nil, err + } + + container.HostConfig.StorageOpt = params.HostConfig.StorageOpt + + // Set RWLayer for container after mount labels have been set + if err := daemon.setRWLayer(container); err != nil { + return nil, err + } + + rootIDs := daemon.idMappings.RootPair() + if err := idtools.MkdirAndChown(container.Root, 0700, rootIDs); err != nil { + return nil, err + } + if err := idtools.MkdirAndChown(container.CheckpointDir(), 0700, rootIDs); err != nil { + return nil, err + } + + if err := daemon.setHostConfig(container, params.HostConfig); err != nil { + return nil, err + } + + if err := daemon.createContainerPlatformSpecificSettings(container, params.Config, params.HostConfig); err != nil { + return nil, err + } + + var endpointsConfigs map[string]*networktypes.EndpointSettings + if params.NetworkingConfig != nil { + endpointsConfigs = params.NetworkingConfig.EndpointsConfig + } + // Make sure NetworkMode has an acceptable value. We do this to ensure + // backwards API compatibility. + runconfig.SetDefaultNetModeIfBlank(container.HostConfig) + + daemon.updateContainerNetworkSettings(container, endpointsConfigs) + if err := daemon.Register(container); err != nil { + return nil, err + } + stateCtr.set(container.ID, "stopped") + daemon.LogContainerEvent(container, "create") + return container, nil +} + +func toHostConfigSelinuxLabels(labels []string) []string { + for i, l := range labels { + labels[i] = "label=" + l + } + return labels +} + +func (daemon *Daemon) generateSecurityOpt(hostConfig *containertypes.HostConfig) ([]string, error) { + for _, opt := range hostConfig.SecurityOpt { + con := strings.Split(opt, "=") + if con[0] == "label" { + // Caller overrode SecurityOpts + return nil, nil + } + } + ipcMode := hostConfig.IpcMode + pidMode := hostConfig.PidMode + privileged := hostConfig.Privileged + if ipcMode.IsHost() || pidMode.IsHost() || privileged { + return toHostConfigSelinuxLabels(label.DisableSecOpt()), nil + } + + var ipcLabel []string + var pidLabel []string + ipcContainer := ipcMode.Container() + pidContainer := pidMode.Container() + if ipcContainer != "" { + c, err := daemon.GetContainer(ipcContainer) + if err != nil { + return nil, err + } + ipcLabel = label.DupSecOpt(c.ProcessLabel) + if pidContainer == "" { + return toHostConfigSelinuxLabels(ipcLabel), err + } + } + if pidContainer != "" { + c, err := daemon.GetContainer(pidContainer) + if err != nil { + return nil, err + } + + pidLabel = label.DupSecOpt(c.ProcessLabel) + if ipcContainer == "" { + return toHostConfigSelinuxLabels(pidLabel), err + } + } + + if pidLabel != nil && ipcLabel != nil { + for i := 0; i < len(pidLabel); i++ { + if pidLabel[i] != ipcLabel[i] { + return nil, fmt.Errorf("--ipc and --pid containers SELinux labels aren't the same") + } + } + return toHostConfigSelinuxLabels(pidLabel), nil + } + return nil, nil +} + +func (daemon *Daemon) setRWLayer(container *container.Container) error { + var layerID layer.ChainID + if container.ImageID != "" { + img, err := daemon.stores[container.Platform].imageStore.Get(container.ImageID) + if err != nil { + return err + } + layerID = img.RootFS.ChainID() + } + + rwLayerOpts := &layer.CreateRWLayerOpts{ + MountLabel: container.MountLabel, + InitFunc: daemon.getLayerInit(), + StorageOpt: container.HostConfig.StorageOpt, + } + + rwLayer, err := daemon.stores[container.Platform].layerStore.CreateRWLayer(container.ID, layerID, rwLayerOpts) + if err != nil { + return err + } + container.RWLayer = rwLayer + + return nil +} + +// VolumeCreate creates a volume with the specified name, driver, and opts +// This is called directly from the Engine API +func (daemon *Daemon) VolumeCreate(name, driverName string, opts, labels map[string]string) (*types.Volume, error) { + if name == "" { + name = stringid.GenerateNonCryptoID() + } + + v, err := daemon.volumes.Create(name, driverName, opts, labels) + if err != nil { + return nil, err + } + + daemon.LogVolumeEvent(v.Name(), "create", map[string]string{"driver": v.DriverName()}) + apiV := volumeToAPIType(v) + apiV.Mountpoint = v.Path() + return apiV, nil +} + +func (daemon *Daemon) mergeAndVerifyConfig(config *containertypes.Config, img *image.Image) error { + if img != nil && img.Config != nil { + if err := merge(config, img.Config); err != nil { + return err + } + } + // Reset the Entrypoint if it is [""] + if len(config.Entrypoint) == 1 && config.Entrypoint[0] == "" { + config.Entrypoint = nil + } + if len(config.Entrypoint) == 0 && len(config.Cmd) == 0 { + return fmt.Errorf("No command specified") + } + return nil +} + +// Checks if the client set configurations for more than one network while creating a container +// Also checks if the IPAMConfig is valid +func (daemon *Daemon) verifyNetworkingConfig(nwConfig *networktypes.NetworkingConfig) error { + if nwConfig == nil || len(nwConfig.EndpointsConfig) == 0 { + return nil + } + if len(nwConfig.EndpointsConfig) == 1 { + for _, v := range nwConfig.EndpointsConfig { + if v != nil && v.IPAMConfig != nil { + if v.IPAMConfig.IPv4Address != "" && net.ParseIP(v.IPAMConfig.IPv4Address).To4() == nil { + return apierrors.NewBadRequestError(fmt.Errorf("invalid IPv4 address: %s", v.IPAMConfig.IPv4Address)) + } + if v.IPAMConfig.IPv6Address != "" { + n := net.ParseIP(v.IPAMConfig.IPv6Address) + // if the address is an invalid network address (ParseIP == nil) or if it is + // an IPv4 address (To4() != nil), then it is an invalid IPv6 address + if n == nil || n.To4() != nil { + return apierrors.NewBadRequestError(fmt.Errorf("invalid IPv6 address: %s", v.IPAMConfig.IPv6Address)) + } + } + } + } + return nil + } + l := make([]string, 0, len(nwConfig.EndpointsConfig)) + for k := range nwConfig.EndpointsConfig { + l = append(l, k) + } + err := fmt.Errorf("Container cannot be connected to network endpoints: %s", strings.Join(l, ", ")) + return apierrors.NewBadRequestError(err) +} diff --git a/vendor/github.com/moby/moby/daemon/create_unix.go b/vendor/github.com/moby/moby/daemon/create_unix.go new file mode 100644 index 000000000..2501a3374 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/create_unix.go @@ -0,0 +1,81 @@ +// +build !windows + +package daemon + +import ( + "fmt" + "os" + "path/filepath" + + "github.com/Sirupsen/logrus" + containertypes "github.com/docker/docker/api/types/container" + mounttypes "github.com/docker/docker/api/types/mount" + "github.com/docker/docker/container" + "github.com/docker/docker/pkg/stringid" + "github.com/opencontainers/selinux/go-selinux/label" +) + +// createContainerPlatformSpecificSettings performs platform specific container create functionality +func (daemon *Daemon) createContainerPlatformSpecificSettings(container *container.Container, config *containertypes.Config, hostConfig *containertypes.HostConfig) error { + if err := daemon.Mount(container); err != nil { + return err + } + defer daemon.Unmount(container) + + rootIDs := daemon.idMappings.RootPair() + if err := container.SetupWorkingDirectory(rootIDs); err != nil { + return err + } + + for spec := range config.Volumes { + name := stringid.GenerateNonCryptoID() + destination := filepath.Clean(spec) + + // Skip volumes for which we already have something mounted on that + // destination because of a --volume-from. + if container.IsDestinationMounted(destination) { + continue + } + path, err := container.GetResourcePath(destination) + if err != nil { + return err + } + + stat, err := os.Stat(path) + if err == nil && !stat.IsDir() { + return fmt.Errorf("cannot mount volume over existing file, file exists %s", path) + } + + v, err := daemon.volumes.CreateWithRef(name, hostConfig.VolumeDriver, container.ID, nil, nil) + if err != nil { + return err + } + + if err := label.Relabel(v.Path(), container.MountLabel, true); err != nil { + return err + } + + container.AddMountPointWithVolume(destination, v, true) + } + return daemon.populateVolumes(container) +} + +// populateVolumes copies data from the container's rootfs into the volume for non-binds. +// this is only called when the container is created. +func (daemon *Daemon) populateVolumes(c *container.Container) error { + for _, mnt := range c.MountPoints { + if mnt.Volume == nil { + continue + } + + if mnt.Type != mounttypes.TypeVolume || !mnt.CopyData { + continue + } + + logrus.Debugf("copying image data from %s:%s, to %s", c.ID, mnt.Destination, mnt.Name) + if err := c.CopyImagePathContent(mnt.Volume, mnt.Destination); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/moby/moby/daemon/create_windows.go b/vendor/github.com/moby/moby/daemon/create_windows.go new file mode 100644 index 000000000..bbf0dbe7b --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/create_windows.go @@ -0,0 +1,80 @@ +package daemon + +import ( + "fmt" + + containertypes "github.com/docker/docker/api/types/container" + "github.com/docker/docker/container" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/volume" +) + +// createContainerPlatformSpecificSettings performs platform specific container create functionality +func (daemon *Daemon) createContainerPlatformSpecificSettings(container *container.Container, config *containertypes.Config, hostConfig *containertypes.HostConfig) error { + // Make sure the host config has the default daemon isolation if not specified by caller. + if containertypes.Isolation.IsDefault(containertypes.Isolation(hostConfig.Isolation)) { + hostConfig.Isolation = daemon.defaultIsolation + } + + for spec := range config.Volumes { + + mp, err := volume.ParseMountRaw(spec, hostConfig.VolumeDriver) + if err != nil { + return fmt.Errorf("Unrecognised volume spec: %v", err) + } + + // If the mountpoint doesn't have a name, generate one. + if len(mp.Name) == 0 { + mp.Name = stringid.GenerateNonCryptoID() + } + + // Skip volumes for which we already have something mounted on that + // destination because of a --volume-from. + if container.IsDestinationMounted(mp.Destination) { + continue + } + + volumeDriver := hostConfig.VolumeDriver + + // Create the volume in the volume driver. If it doesn't exist, + // a new one will be created. + v, err := daemon.volumes.CreateWithRef(mp.Name, volumeDriver, container.ID, nil, nil) + if err != nil { + return err + } + + // FIXME Windows: This code block is present in the Linux version and + // allows the contents to be copied to the container FS prior to it + // being started. However, the function utilizes the FollowSymLinkInScope + // path which does not cope with Windows volume-style file paths. There + // is a separate effort to resolve this (@swernli), so this processing + // is deferred for now. A case where this would be useful is when + // a dockerfile includes a VOLUME statement, but something is created + // in that directory during the dockerfile processing. What this means + // on Windows for TP5 is that in that scenario, the contents will not + // copied, but that's (somewhat) OK as HCS will bomb out soon after + // at it doesn't support mapped directories which have contents in the + // destination path anyway. + // + // Example for repro later: + // FROM windowsservercore + // RUN mkdir c:\myvol + // RUN copy c:\windows\system32\ntdll.dll c:\myvol + // VOLUME "c:\myvol" + // + // Then + // docker build -t vol . + // docker run -it --rm vol cmd <-- This is where HCS will error out. + // + // // never attempt to copy existing content in a container FS to a shared volume + // if v.DriverName() == volume.DefaultDriverName { + // if err := container.CopyImagePathContent(v, mp.Destination); err != nil { + // return err + // } + // } + + // Add it to container.MountPoints + container.AddMountPointWithVolume(mp.Destination, v, mp.RW) + } + return nil +} diff --git a/vendor/github.com/moby/moby/daemon/daemon.go b/vendor/github.com/moby/moby/daemon/daemon.go new file mode 100644 index 000000000..e62ef08b6 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/daemon.go @@ -0,0 +1,1233 @@ +// Package daemon exposes the functions that occur on the host server +// that the Docker daemon is running. +// +// In implementing the various functions of the daemon, there is often +// a method-specific struct for configuring the runtime behavior. +package daemon + +import ( + "context" + "fmt" + "io/ioutil" + "net" + "os" + "path" + "path/filepath" + "runtime" + "strings" + "sync" + "time" + + "github.com/Sirupsen/logrus" + containerd "github.com/containerd/containerd/api/grpc/types" + "github.com/docker/docker/api" + "github.com/docker/docker/api/types" + containertypes "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/container" + "github.com/docker/docker/daemon/config" + "github.com/docker/docker/daemon/discovery" + "github.com/docker/docker/daemon/events" + "github.com/docker/docker/daemon/exec" + "github.com/docker/docker/daemon/logger" + "github.com/docker/docker/opts" + // register graph drivers + _ "github.com/docker/docker/daemon/graphdriver/register" + "github.com/docker/docker/daemon/initlayer" + "github.com/docker/docker/daemon/stats" + dmetadata "github.com/docker/docker/distribution/metadata" + "github.com/docker/docker/distribution/xfer" + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/image" + "github.com/docker/docker/layer" + "github.com/docker/docker/libcontainerd" + "github.com/docker/docker/migrate/v1" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/plugingetter" + "github.com/docker/docker/pkg/sysinfo" + "github.com/docker/docker/pkg/system" + "github.com/docker/docker/pkg/truncindex" + "github.com/docker/docker/plugin" + refstore "github.com/docker/docker/reference" + "github.com/docker/docker/registry" + "github.com/docker/docker/runconfig" + volumedrivers "github.com/docker/docker/volume/drivers" + "github.com/docker/docker/volume/local" + "github.com/docker/docker/volume/store" + "github.com/docker/libnetwork" + "github.com/docker/libnetwork/cluster" + nwconfig "github.com/docker/libnetwork/config" + "github.com/docker/libtrust" + "github.com/pkg/errors" +) + +var ( + // DefaultRuntimeBinary is the default runtime to be used by + // containerd if none is specified + DefaultRuntimeBinary = "docker-runc" + + errSystemNotSupported = errors.New("The Docker daemon is not supported on this platform.") +) + +type daemonStore struct { + graphDriver string + imageRoot string + imageStore image.Store + layerStore layer.Store + distributionMetadataStore dmetadata.Store + referenceStore refstore.Store +} + +// Daemon holds information about the Docker daemon. +type Daemon struct { + ID string + repository string + containers container.Store + containersReplica container.ViewDB + execCommands *exec.Store + downloadManager *xfer.LayerDownloadManager + uploadManager *xfer.LayerUploadManager + trustKey libtrust.PrivateKey + idIndex *truncindex.TruncIndex + configStore *config.Config + statsCollector *stats.Collector + defaultLogConfig containertypes.LogConfig + RegistryService registry.Service + EventsService *events.Events + netController libnetwork.NetworkController + volumes *store.VolumeStore + discoveryWatcher discovery.Reloader + root string + seccompEnabled bool + apparmorEnabled bool + shutdown bool + idMappings *idtools.IDMappings + stores map[string]daemonStore // By container target platform + PluginStore *plugin.Store // todo: remove + pluginManager *plugin.Manager + linkIndex *linkIndex + containerd libcontainerd.Client + containerdRemote libcontainerd.Remote + defaultIsolation containertypes.Isolation // Default isolation mode on Windows + clusterProvider cluster.Provider + cluster Cluster + genericResources []swarm.GenericResource + metricsPluginListener net.Listener + + machineMemory uint64 + + seccompProfile []byte + seccompProfilePath string + + diskUsageRunning int32 + pruneRunning int32 + hosts map[string]bool // hosts stores the addresses the daemon is listening on + startupDone chan struct{} +} + +// StoreHosts stores the addresses the daemon is listening on +func (daemon *Daemon) StoreHosts(hosts []string) { + if daemon.hosts == nil { + daemon.hosts = make(map[string]bool) + } + for _, h := range hosts { + daemon.hosts[h] = true + } +} + +// HasExperimental returns whether the experimental features of the daemon are enabled or not +func (daemon *Daemon) HasExperimental() bool { + if daemon.configStore != nil && daemon.configStore.Experimental { + return true + } + return false +} + +func (daemon *Daemon) restore() error { + containers := make(map[string]*container.Container) + + logrus.Info("Loading containers: start.") + + dir, err := ioutil.ReadDir(daemon.repository) + if err != nil { + return err + } + + for _, v := range dir { + id := v.Name() + container, err := daemon.load(id) + if err != nil { + logrus.Errorf("Failed to load container %v: %v", id, err) + continue + } + + // Ignore the container if it does not support the current driver being used by the graph + currentDriverForContainerPlatform := daemon.stores[container.Platform].graphDriver + if (container.Driver == "" && currentDriverForContainerPlatform == "aufs") || container.Driver == currentDriverForContainerPlatform { + rwlayer, err := daemon.stores[container.Platform].layerStore.GetRWLayer(container.ID) + if err != nil { + logrus.Errorf("Failed to load container mount %v: %v", id, err) + continue + } + container.RWLayer = rwlayer + logrus.Debugf("Loaded container %v", container.ID) + + containers[container.ID] = container + } else { + logrus.Debugf("Cannot load container %s because it was created with another graph driver.", container.ID) + } + } + + removeContainers := make(map[string]*container.Container) + restartContainers := make(map[*container.Container]chan struct{}) + activeSandboxes := make(map[string]interface{}) + for id, c := range containers { + if err := daemon.registerName(c); err != nil { + logrus.Errorf("Failed to register container name %s: %s", c.ID, err) + delete(containers, id) + continue + } + // verify that all volumes valid and have been migrated from the pre-1.7 layout + if err := daemon.verifyVolumesInfo(c); err != nil { + // don't skip the container due to error + logrus.Errorf("Failed to verify volumes for container '%s': %v", c.ID, err) + } + if err := daemon.Register(c); err != nil { + logrus.Errorf("Failed to register container %s: %s", c.ID, err) + delete(containers, id) + continue + } + + // The LogConfig.Type is empty if the container was created before docker 1.12 with default log driver. + // We should rewrite it to use the daemon defaults. + // Fixes https://github.com/docker/docker/issues/22536 + if c.HostConfig.LogConfig.Type == "" { + if err := daemon.mergeAndVerifyLogConfig(&c.HostConfig.LogConfig); err != nil { + logrus.Errorf("Failed to verify log config for container %s: %q", c.ID, err) + continue + } + } + } + + var wg sync.WaitGroup + var mapLock sync.Mutex + for _, c := range containers { + wg.Add(1) + go func(c *container.Container) { + defer wg.Done() + daemon.backportMountSpec(c) + if err := daemon.checkpointAndSave(c); err != nil { + logrus.WithError(err).WithField("container", c.ID).Error("error saving backported mountspec to disk") + } + + daemon.setStateCounter(c) + if c.IsRunning() || c.IsPaused() { + c.RestartManager().Cancel() // manually start containers because some need to wait for swarm networking + if err := daemon.containerd.Restore(c.ID, c.InitializeStdio); err != nil { + logrus.Errorf("Failed to restore %s with containerd: %s", c.ID, err) + return + } + + // we call Mount and then Unmount to get BaseFs of the container + if err := daemon.Mount(c); err != nil { + // The mount is unlikely to fail. However, in case mount fails + // the container should be allowed to restore here. Some functionalities + // (like docker exec -u user) might be missing but container is able to be + // stopped/restarted/removed. + // See #29365 for related information. + // The error is only logged here. + logrus.Warnf("Failed to mount container on getting BaseFs path %v: %v", c.ID, err) + } else { + if err := daemon.Unmount(c); err != nil { + logrus.Warnf("Failed to umount container on getting BaseFs path %v: %v", c.ID, err) + } + } + + c.ResetRestartManager(false) + if !c.HostConfig.NetworkMode.IsContainer() && c.IsRunning() { + options, err := daemon.buildSandboxOptions(c) + if err != nil { + logrus.Warnf("Failed build sandbox option to restore container %s: %v", c.ID, err) + } + mapLock.Lock() + activeSandboxes[c.NetworkSettings.SandboxID] = options + mapLock.Unlock() + } + + } + // fixme: only if not running + // get list of containers we need to restart + if !c.IsRunning() && !c.IsPaused() { + // Do not autostart containers which + // has endpoints in a swarm scope + // network yet since the cluster is + // not initialized yet. We will start + // it after the cluster is + // initialized. + if daemon.configStore.AutoRestart && c.ShouldRestart() && !c.NetworkSettings.HasSwarmEndpoint { + mapLock.Lock() + restartContainers[c] = make(chan struct{}) + mapLock.Unlock() + } else if c.HostConfig != nil && c.HostConfig.AutoRemove { + mapLock.Lock() + removeContainers[c.ID] = c + mapLock.Unlock() + } + } + + c.Lock() + if c.RemovalInProgress { + // We probably crashed in the middle of a removal, reset + // the flag. + // + // We DO NOT remove the container here as we do not + // know if the user had requested for either the + // associated volumes, network links or both to also + // be removed. So we put the container in the "dead" + // state and leave further processing up to them. + logrus.Debugf("Resetting RemovalInProgress flag from %v", c.ID) + c.RemovalInProgress = false + c.Dead = true + if err := c.CheckpointTo(daemon.containersReplica); err != nil { + logrus.Errorf("Failed to update container %s state: %v", c.ID, err) + } + } + c.Unlock() + }(c) + } + wg.Wait() + daemon.netController, err = daemon.initNetworkController(daemon.configStore, activeSandboxes) + if err != nil { + return fmt.Errorf("Error initializing network controller: %v", err) + } + + // Now that all the containers are registered, register the links + for _, c := range containers { + if err := daemon.registerLinks(c, c.HostConfig); err != nil { + logrus.Errorf("failed to register link for container %s: %v", c.ID, err) + } + } + + group := sync.WaitGroup{} + for c, notifier := range restartContainers { + group.Add(1) + + go func(c *container.Container, chNotify chan struct{}) { + defer group.Done() + + logrus.Debugf("Starting container %s", c.ID) + + // ignore errors here as this is a best effort to wait for children to be + // running before we try to start the container + children := daemon.children(c) + timeout := time.After(5 * time.Second) + for _, child := range children { + if notifier, exists := restartContainers[child]; exists { + select { + case <-notifier: + case <-timeout: + } + } + } + + // Make sure networks are available before starting + daemon.waitForNetworks(c) + if err := daemon.containerStart(c, "", "", true); err != nil { + logrus.Errorf("Failed to start container %s: %s", c.ID, err) + } + close(chNotify) + }(c, notifier) + + } + group.Wait() + + removeGroup := sync.WaitGroup{} + for id := range removeContainers { + removeGroup.Add(1) + go func(cid string) { + if err := daemon.ContainerRm(cid, &types.ContainerRmConfig{ForceRemove: true, RemoveVolume: true}); err != nil { + logrus.Errorf("Failed to remove container %s: %s", cid, err) + } + removeGroup.Done() + }(id) + } + removeGroup.Wait() + + // any containers that were started above would already have had this done, + // however we need to now prepare the mountpoints for the rest of the containers as well. + // This shouldn't cause any issue running on the containers that already had this run. + // This must be run after any containers with a restart policy so that containerized plugins + // can have a chance to be running before we try to initialize them. + for _, c := range containers { + // if the container has restart policy, do not + // prepare the mountpoints since it has been done on restarting. + // This is to speed up the daemon start when a restart container + // has a volume and the volume driver is not available. + if _, ok := restartContainers[c]; ok { + continue + } else if _, ok := removeContainers[c.ID]; ok { + // container is automatically removed, skip it. + continue + } + + group.Add(1) + go func(c *container.Container) { + defer group.Done() + if err := daemon.prepareMountPoints(c); err != nil { + logrus.Error(err) + } + }(c) + } + + group.Wait() + + logrus.Info("Loading containers: done.") + + return nil +} + +// RestartSwarmContainers restarts any autostart container which has a +// swarm endpoint. +func (daemon *Daemon) RestartSwarmContainers() { + group := sync.WaitGroup{} + for _, c := range daemon.List() { + if !c.IsRunning() && !c.IsPaused() { + // Autostart all the containers which has a + // swarm endpoint now that the cluster is + // initialized. + if daemon.configStore.AutoRestart && c.ShouldRestart() && c.NetworkSettings.HasSwarmEndpoint { + group.Add(1) + go func(c *container.Container) { + defer group.Done() + if err := daemon.containerStart(c, "", "", true); err != nil { + logrus.Error(err) + } + }(c) + } + } + + } + group.Wait() +} + +// waitForNetworks is used during daemon initialization when starting up containers +// It ensures that all of a container's networks are available before the daemon tries to start the container. +// In practice it just makes sure the discovery service is available for containers which use a network that require discovery. +func (daemon *Daemon) waitForNetworks(c *container.Container) { + if daemon.discoveryWatcher == nil { + return + } + // Make sure if the container has a network that requires discovery that the discovery service is available before starting + for netName := range c.NetworkSettings.Networks { + // If we get `ErrNoSuchNetwork` here, we can assume that it is due to discovery not being ready + // Most likely this is because the K/V store used for discovery is in a container and needs to be started + if _, err := daemon.netController.NetworkByName(netName); err != nil { + if _, ok := err.(libnetwork.ErrNoSuchNetwork); !ok { + continue + } + // use a longish timeout here due to some slowdowns in libnetwork if the k/v store is on anything other than --net=host + // FIXME: why is this slow??? + logrus.Debugf("Container %s waiting for network to be ready", c.Name) + select { + case <-daemon.discoveryWatcher.ReadyCh(): + case <-time.After(60 * time.Second): + } + return + } + } +} + +func (daemon *Daemon) children(c *container.Container) map[string]*container.Container { + return daemon.linkIndex.children(c) +} + +// parents returns the names of the parent containers of the container +// with the given name. +func (daemon *Daemon) parents(c *container.Container) map[string]*container.Container { + return daemon.linkIndex.parents(c) +} + +func (daemon *Daemon) registerLink(parent, child *container.Container, alias string) error { + fullName := path.Join(parent.Name, alias) + if err := daemon.containersReplica.ReserveName(fullName, child.ID); err != nil { + if err == container.ErrNameReserved { + logrus.Warnf("error registering link for %s, to %s, as alias %s, ignoring: %v", parent.ID, child.ID, alias, err) + return nil + } + return err + } + daemon.linkIndex.link(parent, child, fullName) + return nil +} + +// DaemonJoinsCluster informs the daemon has joined the cluster and provides +// the handler to query the cluster component +func (daemon *Daemon) DaemonJoinsCluster(clusterProvider cluster.Provider) { + daemon.setClusterProvider(clusterProvider) +} + +// DaemonLeavesCluster informs the daemon has left the cluster +func (daemon *Daemon) DaemonLeavesCluster() { + // Daemon is in charge of removing the attachable networks with + // connected containers when the node leaves the swarm + daemon.clearAttachableNetworks() + // We no longer need the cluster provider, stop it now so that + // the network agent will stop listening to cluster events. + daemon.setClusterProvider(nil) + // Wait for the networking cluster agent to stop + daemon.netController.AgentStopWait() + // Daemon is in charge of removing the ingress network when the + // node leaves the swarm. Wait for job to be done or timeout. + // This is called also on graceful daemon shutdown. We need to + // wait, because the ingress release has to happen before the + // network controller is stopped. + if done, err := daemon.ReleaseIngress(); err == nil { + select { + case <-done: + case <-time.After(5 * time.Second): + logrus.Warnf("timeout while waiting for ingress network removal") + } + } else { + logrus.Warnf("failed to initiate ingress network removal: %v", err) + } +} + +// setClusterProvider sets a component for querying the current cluster state. +func (daemon *Daemon) setClusterProvider(clusterProvider cluster.Provider) { + daemon.clusterProvider = clusterProvider + daemon.netController.SetClusterProvider(clusterProvider) +} + +// IsSwarmCompatible verifies if the current daemon +// configuration is compatible with the swarm mode +func (daemon *Daemon) IsSwarmCompatible() error { + if daemon.configStore == nil { + return nil + } + return daemon.configStore.IsSwarmCompatible() +} + +// NewDaemon sets up everything for the daemon to be able to service +// requests from the webserver. +func NewDaemon(config *config.Config, registryService registry.Service, containerdRemote libcontainerd.Remote, pluginStore *plugin.Store) (daemon *Daemon, err error) { + setDefaultMtu(config) + + // Ensure that we have a correct root key limit for launching containers. + if err := ModifyRootKeyLimit(); err != nil { + logrus.Warnf("unable to modify root key limit, number of containers could be limited by this quota: %v", err) + } + + // Ensure we have compatible and valid configuration options + if err := verifyDaemonSettings(config); err != nil { + return nil, err + } + + // Do we have a disabled network? + config.DisableBridge = isBridgeNetworkDisabled(config) + + // Verify the platform is supported as a daemon + if !platformSupported { + return nil, errSystemNotSupported + } + + // Validate platform-specific requirements + if err := checkSystem(); err != nil { + return nil, err + } + + idMappings, err := setupRemappedRoot(config) + if err != nil { + return nil, err + } + rootIDs := idMappings.RootPair() + if err := setupDaemonProcess(config); err != nil { + return nil, err + } + + // set up the tmpDir to use a canonical path + tmp, err := prepareTempDir(config.Root, rootIDs) + if err != nil { + return nil, fmt.Errorf("Unable to get the TempDir under %s: %s", config.Root, err) + } + realTmp, err := getRealPath(tmp) + if err != nil { + return nil, fmt.Errorf("Unable to get the full path to the TempDir (%s): %s", tmp, err) + } + os.Setenv("TMPDIR", realTmp) + + d := &Daemon{ + configStore: config, + startupDone: make(chan struct{}), + } + // Ensure the daemon is properly shutdown if there is a failure during + // initialization + defer func() { + if err != nil { + if err := d.Shutdown(); err != nil { + logrus.Error(err) + } + } + }() + + if err := d.setGenericResources(config); err != nil { + return nil, err + } + // set up SIGUSR1 handler on Unix-like systems, or a Win32 global event + // on Windows to dump Go routine stacks + stackDumpDir := config.Root + if execRoot := config.GetExecRoot(); execRoot != "" { + stackDumpDir = execRoot + } + d.setupDumpStackTrap(stackDumpDir) + + if err := d.setupSeccompProfile(); err != nil { + return nil, err + } + + // Set the default isolation mode (only applicable on Windows) + if err := d.setDefaultIsolation(); err != nil { + return nil, fmt.Errorf("error setting default isolation mode: %v", err) + } + + logrus.Debugf("Using default logging driver %s", config.LogConfig.Type) + + if err := configureMaxThreads(config); err != nil { + logrus.Warnf("Failed to configure golang's threads limit: %v", err) + } + + if err := ensureDefaultAppArmorProfile(); err != nil { + logrus.Errorf(err.Error()) + } + + daemonRepo := filepath.Join(config.Root, "containers") + if err := idtools.MkdirAllAndChown(daemonRepo, 0700, rootIDs); err != nil && !os.IsExist(err) { + return nil, err + } + + if runtime.GOOS == "windows" { + if err := system.MkdirAll(filepath.Join(config.Root, "credentialspecs"), 0, ""); err != nil && !os.IsExist(err) { + return nil, err + } + } + + // On Windows we don't support the environment variable, or a user supplied graphdriver + // as Windows has no choice in terms of which graphdrivers to use. It's a case of + // running Windows containers on Windows - windowsfilter, running Linux containers on Windows, + // lcow. Unix platforms however run a single graphdriver for all containers, and it can + // be set through an environment variable, a daemon start parameter, or chosen through + // initialization of the layerstore through driver priority order for example. + d.stores = make(map[string]daemonStore) + if runtime.GOOS == "windows" { + d.stores["windows"] = daemonStore{graphDriver: "windowsfilter"} + if system.LCOWSupported() { + d.stores["linux"] = daemonStore{graphDriver: "lcow"} + } + } else { + driverName := os.Getenv("DOCKER_DRIVER") + if driverName == "" { + driverName = config.GraphDriver + } else { + logrus.Infof("Setting the storage driver from the $DOCKER_DRIVER environment variable (%s)", driverName) + } + d.stores[runtime.GOOS] = daemonStore{graphDriver: driverName} // May still be empty. Layerstore init determines instead. + } + + d.RegistryService = registryService + d.PluginStore = pluginStore + logger.RegisterPluginGetter(d.PluginStore) + + metricsSockPath, err := d.listenMetricsSock() + if err != nil { + return nil, err + } + registerMetricsPluginCallback(d.PluginStore, metricsSockPath) + + // Plugin system initialization should happen before restore. Do not change order. + d.pluginManager, err = plugin.NewManager(plugin.ManagerConfig{ + Root: filepath.Join(config.Root, "plugins"), + ExecRoot: getPluginExecRoot(config.Root), + Store: d.PluginStore, + Executor: containerdRemote, + RegistryService: registryService, + LiveRestoreEnabled: config.LiveRestoreEnabled, + LogPluginEvent: d.LogPluginEvent, // todo: make private + AuthzMiddleware: config.AuthzMiddleware, + }) + if err != nil { + return nil, errors.Wrap(err, "couldn't create plugin manager") + } + + var graphDrivers []string + for platform, ds := range d.stores { + ls, err := layer.NewStoreFromOptions(layer.StoreOptions{ + StorePath: config.Root, + MetadataStorePathTemplate: filepath.Join(config.Root, "image", "%s", "layerdb"), + GraphDriver: ds.graphDriver, + GraphDriverOptions: config.GraphOptions, + IDMappings: idMappings, + PluginGetter: d.PluginStore, + ExperimentalEnabled: config.Experimental, + Platform: platform, + }) + if err != nil { + return nil, err + } + ds.graphDriver = ls.DriverName() // As layerstore may set the driver + ds.layerStore = ls + d.stores[platform] = ds + graphDrivers = append(graphDrivers, ls.DriverName()) + } + + // Configure and validate the kernels security support + if err := configureKernelSecuritySupport(config, graphDrivers); err != nil { + return nil, err + } + + logrus.Debugf("Max Concurrent Downloads: %d", *config.MaxConcurrentDownloads) + lsMap := make(map[string]layer.Store) + for platform, ds := range d.stores { + lsMap[platform] = ds.layerStore + } + d.downloadManager = xfer.NewLayerDownloadManager(lsMap, *config.MaxConcurrentDownloads) + logrus.Debugf("Max Concurrent Uploads: %d", *config.MaxConcurrentUploads) + d.uploadManager = xfer.NewLayerUploadManager(*config.MaxConcurrentUploads) + + for platform, ds := range d.stores { + imageRoot := filepath.Join(config.Root, "image", ds.graphDriver) + ifs, err := image.NewFSStoreBackend(filepath.Join(imageRoot, "imagedb")) + if err != nil { + return nil, err + } + + var is image.Store + is, err = image.NewImageStore(ifs, platform, ds.layerStore) + if err != nil { + return nil, err + } + ds.imageRoot = imageRoot + ds.imageStore = is + d.stores[platform] = ds + } + + // Configure the volumes driver + volStore, err := d.configureVolumes(rootIDs) + if err != nil { + return nil, err + } + + trustKey, err := api.LoadOrCreateTrustKey(config.TrustKeyPath) + if err != nil { + return nil, err + } + + trustDir := filepath.Join(config.Root, "trust") + + if err := system.MkdirAll(trustDir, 0700, ""); err != nil { + return nil, err + } + + eventsService := events.New() + + for platform, ds := range d.stores { + dms, err := dmetadata.NewFSMetadataStore(filepath.Join(ds.imageRoot, "distribution"), platform) + if err != nil { + return nil, err + } + + rs, err := refstore.NewReferenceStore(filepath.Join(ds.imageRoot, "repositories.json"), platform) + if err != nil { + return nil, fmt.Errorf("Couldn't create Tag store repositories: %s", err) + } + ds.distributionMetadataStore = dms + ds.referenceStore = rs + d.stores[platform] = ds + + // No content-addressability migration on Windows as it never supported pre-CA + if runtime.GOOS != "windows" { + migrationStart := time.Now() + if err := v1.Migrate(config.Root, ds.graphDriver, ds.layerStore, ds.imageStore, rs, dms); err != nil { + logrus.Errorf("Graph migration failed: %q. Your old graph data was found to be too inconsistent for upgrading to content-addressable storage. Some of the old data was probably not upgraded. We recommend starting over with a clean storage directory if possible.", err) + } + logrus.Infof("Graph migration to content-addressability took %.2f seconds", time.Since(migrationStart).Seconds()) + } + } + + // Discovery is only enabled when the daemon is launched with an address to advertise. When + // initialized, the daemon is registered and we can store the discovery backend as it's read-only + if err := d.initDiscovery(config); err != nil { + return nil, err + } + + sysInfo := sysinfo.New(false) + // Check if Devices cgroup is mounted, it is hard requirement for container security, + // on Linux. + if runtime.GOOS == "linux" && !sysInfo.CgroupDevicesEnabled { + return nil, errors.New("Devices cgroup isn't mounted") + } + + d.ID = trustKey.PublicKey().KeyID() + d.repository = daemonRepo + d.containers = container.NewMemoryStore() + if d.containersReplica, err = container.NewViewDB(); err != nil { + return nil, err + } + d.execCommands = exec.NewStore() + d.trustKey = trustKey + d.idIndex = truncindex.NewTruncIndex([]string{}) + d.statsCollector = d.newStatsCollector(1 * time.Second) + d.defaultLogConfig = containertypes.LogConfig{ + Type: config.LogConfig.Type, + Config: config.LogConfig.Config, + } + d.EventsService = eventsService + d.volumes = volStore + d.root = config.Root + d.idMappings = idMappings + d.seccompEnabled = sysInfo.Seccomp + d.apparmorEnabled = sysInfo.AppArmor + + d.linkIndex = newLinkIndex() + d.containerdRemote = containerdRemote + + go d.execCommandGC() + + d.containerd, err = containerdRemote.Client(d) + if err != nil { + return nil, err + } + + if err := d.restore(); err != nil { + return nil, err + } + close(d.startupDone) + + // FIXME: this method never returns an error + info, _ := d.SystemInfo() + + engineInfo.WithValues( + dockerversion.Version, + dockerversion.GitCommit, + info.Architecture, + info.Driver, + info.KernelVersion, + info.OperatingSystem, + info.OSType, + info.ID, + ).Set(1) + engineCpus.Set(float64(info.NCPU)) + engineMemory.Set(float64(info.MemTotal)) + + gd := "" + for platform, ds := range d.stores { + if len(gd) > 0 { + gd += ", " + } + gd += ds.graphDriver + if len(d.stores) > 1 { + gd = fmt.Sprintf("%s (%s)", gd, platform) + } + } + logrus.WithFields(logrus.Fields{ + "version": dockerversion.Version, + "commit": dockerversion.GitCommit, + "graphdriver(s)": gd, + }).Info("Docker daemon") + + return d, nil +} + +func (daemon *Daemon) waitForStartupDone() { + <-daemon.startupDone +} + +func (daemon *Daemon) shutdownContainer(c *container.Container) error { + stopTimeout := c.StopTimeout() + + // If container failed to exit in stopTimeout seconds of SIGTERM, then using the force + if err := daemon.containerStop(c, stopTimeout); err != nil { + return fmt.Errorf("Failed to stop container %s with error: %v", c.ID, err) + } + + // Wait without timeout for the container to exit. + // Ignore the result. + _ = <-c.Wait(context.Background(), container.WaitConditionNotRunning) + return nil +} + +// ShutdownTimeout returns the shutdown timeout based on the max stopTimeout of the containers, +// and is limited by daemon's ShutdownTimeout. +func (daemon *Daemon) ShutdownTimeout() int { + // By default we use daemon's ShutdownTimeout. + shutdownTimeout := daemon.configStore.ShutdownTimeout + + graceTimeout := 5 + if daemon.containers != nil { + for _, c := range daemon.containers.List() { + if shutdownTimeout >= 0 { + stopTimeout := c.StopTimeout() + if stopTimeout < 0 { + shutdownTimeout = -1 + } else { + if stopTimeout+graceTimeout > shutdownTimeout { + shutdownTimeout = stopTimeout + graceTimeout + } + } + } + } + } + return shutdownTimeout +} + +// Shutdown stops the daemon. +func (daemon *Daemon) Shutdown() error { + daemon.shutdown = true + // Keep mounts and networking running on daemon shutdown if + // we are to keep containers running and restore them. + + if daemon.configStore.LiveRestoreEnabled && daemon.containers != nil { + // check if there are any running containers, if none we should do some cleanup + if ls, err := daemon.Containers(&types.ContainerListOptions{}); len(ls) != 0 || err != nil { + // metrics plugins still need some cleanup + daemon.cleanupMetricsPlugins() + return nil + } + } + + if daemon.containers != nil { + logrus.Debugf("start clean shutdown of all containers with a %d seconds timeout...", daemon.configStore.ShutdownTimeout) + daemon.containers.ApplyAll(func(c *container.Container) { + if !c.IsRunning() { + return + } + logrus.Debugf("stopping %s", c.ID) + if err := daemon.shutdownContainer(c); err != nil { + logrus.Errorf("Stop container error: %v", err) + return + } + if mountid, err := daemon.stores[c.Platform].layerStore.GetMountID(c.ID); err == nil { + daemon.cleanupMountsByID(mountid) + } + logrus.Debugf("container stopped %s", c.ID) + }) + } + + if daemon.volumes != nil { + if err := daemon.volumes.Shutdown(); err != nil { + logrus.Errorf("Error shutting down volume store: %v", err) + } + } + + for platform, ds := range daemon.stores { + if ds.layerStore != nil { + if err := ds.layerStore.Cleanup(); err != nil { + logrus.Errorf("Error during layer Store.Cleanup(): %v %s", err, platform) + } + } + } + + // If we are part of a cluster, clean up cluster's stuff + if daemon.clusterProvider != nil { + logrus.Debugf("start clean shutdown of cluster resources...") + daemon.DaemonLeavesCluster() + } + + daemon.cleanupMetricsPlugins() + + // Shutdown plugins after containers and layerstore. Don't change the order. + daemon.pluginShutdown() + + // trigger libnetwork Stop only if it's initialized + if daemon.netController != nil { + daemon.netController.Stop() + } + + if err := daemon.cleanupMounts(); err != nil { + return err + } + + return nil +} + +// Mount sets container.BaseFS +// (is it not set coming in? why is it unset?) +func (daemon *Daemon) Mount(container *container.Container) error { + dir, err := container.RWLayer.Mount(container.GetMountLabel()) + if err != nil { + return err + } + logrus.Debugf("container mounted via layerStore: %v", dir) + + if container.BaseFS != dir { + // The mount path reported by the graph driver should always be trusted on Windows, since the + // volume path for a given mounted layer may change over time. This should only be an error + // on non-Windows operating systems. + if container.BaseFS != "" && runtime.GOOS != "windows" { + daemon.Unmount(container) + return fmt.Errorf("Error: driver %s is returning inconsistent paths for container %s ('%s' then '%s')", + daemon.GraphDriverName(container.Platform), container.ID, container.BaseFS, dir) + } + } + container.BaseFS = dir // TODO: combine these fields + return nil +} + +// Unmount unsets the container base filesystem +func (daemon *Daemon) Unmount(container *container.Container) error { + if err := container.RWLayer.Unmount(); err != nil { + logrus.Errorf("Error unmounting container %s: %s", container.ID, err) + return err + } + + return nil +} + +// Subnets return the IPv4 and IPv6 subnets of networks that are manager by Docker. +func (daemon *Daemon) Subnets() ([]net.IPNet, []net.IPNet) { + var v4Subnets []net.IPNet + var v6Subnets []net.IPNet + + managedNetworks := daemon.netController.Networks() + + for _, managedNetwork := range managedNetworks { + v4infos, v6infos := managedNetwork.Info().IpamInfo() + for _, info := range v4infos { + if info.IPAMData.Pool != nil { + v4Subnets = append(v4Subnets, *info.IPAMData.Pool) + } + } + for _, info := range v6infos { + if info.IPAMData.Pool != nil { + v6Subnets = append(v6Subnets, *info.IPAMData.Pool) + } + } + } + + return v4Subnets, v6Subnets +} + +// GraphDriverName returns the name of the graph driver used by the layer.Store +func (daemon *Daemon) GraphDriverName(platform string) string { + return daemon.stores[platform].layerStore.DriverName() +} + +// prepareTempDir prepares and returns the default directory to use +// for temporary files. +// If it doesn't exist, it is created. If it exists, its content is removed. +func prepareTempDir(rootDir string, rootIDs idtools.IDPair) (string, error) { + var tmpDir string + if tmpDir = os.Getenv("DOCKER_TMPDIR"); tmpDir == "" { + tmpDir = filepath.Join(rootDir, "tmp") + newName := tmpDir + "-old" + if err := os.Rename(tmpDir, newName); err == nil { + go func() { + if err := os.RemoveAll(newName); err != nil { + logrus.Warnf("failed to delete old tmp directory: %s", newName) + } + }() + } else { + logrus.Warnf("failed to rename %s for background deletion: %s. Deleting synchronously", tmpDir, err) + if err := os.RemoveAll(tmpDir); err != nil { + logrus.Warnf("failed to delete old tmp directory: %s", tmpDir) + } + } + } + // We don't remove the content of tmpdir if it's not the default, + // it may hold things that do not belong to us. + return tmpDir, idtools.MkdirAllAndChown(tmpDir, 0700, rootIDs) +} + +func (daemon *Daemon) setupInitLayer(initPath string) error { + rootIDs := daemon.idMappings.RootPair() + return initlayer.Setup(initPath, rootIDs) +} + +func (daemon *Daemon) setGenericResources(conf *config.Config) error { + genericResources, err := opts.ParseGenericResources(conf.NodeGenericResources) + if err != nil { + return err + } + + daemon.genericResources = genericResources + + return nil +} + +func setDefaultMtu(conf *config.Config) { + // do nothing if the config does not have the default 0 value. + if conf.Mtu != 0 { + return + } + conf.Mtu = config.DefaultNetworkMtu +} + +func (daemon *Daemon) configureVolumes(rootIDs idtools.IDPair) (*store.VolumeStore, error) { + volumesDriver, err := local.New(daemon.configStore.Root, rootIDs) + if err != nil { + return nil, err + } + + volumedrivers.RegisterPluginGetter(daemon.PluginStore) + + if !volumedrivers.Register(volumesDriver, volumesDriver.Name()) { + return nil, errors.New("local volume driver could not be registered") + } + return store.New(daemon.configStore.Root) +} + +// IsShuttingDown tells whether the daemon is shutting down or not +func (daemon *Daemon) IsShuttingDown() bool { + return daemon.shutdown +} + +// initDiscovery initializes the discovery watcher for this daemon. +func (daemon *Daemon) initDiscovery(conf *config.Config) error { + advertise, err := config.ParseClusterAdvertiseSettings(conf.ClusterStore, conf.ClusterAdvertise) + if err != nil { + if err == discovery.ErrDiscoveryDisabled { + return nil + } + return err + } + + conf.ClusterAdvertise = advertise + discoveryWatcher, err := discovery.Init(conf.ClusterStore, conf.ClusterAdvertise, conf.ClusterOpts) + if err != nil { + return fmt.Errorf("discovery initialization failed (%v)", err) + } + + daemon.discoveryWatcher = discoveryWatcher + return nil +} + +func isBridgeNetworkDisabled(conf *config.Config) bool { + return conf.BridgeConfig.Iface == config.DisableNetworkBridge +} + +func (daemon *Daemon) networkOptions(dconfig *config.Config, pg plugingetter.PluginGetter, activeSandboxes map[string]interface{}) ([]nwconfig.Option, error) { + options := []nwconfig.Option{} + if dconfig == nil { + return options, nil + } + + options = append(options, nwconfig.OptionExperimental(dconfig.Experimental)) + options = append(options, nwconfig.OptionDataDir(dconfig.Root)) + options = append(options, nwconfig.OptionExecRoot(dconfig.GetExecRoot())) + + dd := runconfig.DefaultDaemonNetworkMode() + dn := runconfig.DefaultDaemonNetworkMode().NetworkName() + options = append(options, nwconfig.OptionDefaultDriver(string(dd))) + options = append(options, nwconfig.OptionDefaultNetwork(dn)) + + if strings.TrimSpace(dconfig.ClusterStore) != "" { + kv := strings.Split(dconfig.ClusterStore, "://") + if len(kv) != 2 { + return nil, errors.New("kv store daemon config must be of the form KV-PROVIDER://KV-URL") + } + options = append(options, nwconfig.OptionKVProvider(kv[0])) + options = append(options, nwconfig.OptionKVProviderURL(kv[1])) + } + if len(dconfig.ClusterOpts) > 0 { + options = append(options, nwconfig.OptionKVOpts(dconfig.ClusterOpts)) + } + + if daemon.discoveryWatcher != nil { + options = append(options, nwconfig.OptionDiscoveryWatcher(daemon.discoveryWatcher)) + } + + if dconfig.ClusterAdvertise != "" { + options = append(options, nwconfig.OptionDiscoveryAddress(dconfig.ClusterAdvertise)) + } + + options = append(options, nwconfig.OptionLabels(dconfig.Labels)) + options = append(options, driverOptions(dconfig)...) + + if daemon.configStore != nil && daemon.configStore.LiveRestoreEnabled && len(activeSandboxes) != 0 { + options = append(options, nwconfig.OptionActiveSandboxes(activeSandboxes)) + } + + if pg != nil { + options = append(options, nwconfig.OptionPluginGetter(pg)) + } + + return options, nil +} + +func copyBlkioEntry(entries []*containerd.BlkioStatsEntry) []types.BlkioStatEntry { + out := make([]types.BlkioStatEntry, len(entries)) + for i, re := range entries { + out[i] = types.BlkioStatEntry{ + Major: re.Major, + Minor: re.Minor, + Op: re.Op, + Value: re.Value, + } + } + return out +} + +// GetCluster returns the cluster +func (daemon *Daemon) GetCluster() Cluster { + return daemon.cluster +} + +// SetCluster sets the cluster +func (daemon *Daemon) SetCluster(cluster Cluster) { + daemon.cluster = cluster +} + +func (daemon *Daemon) pluginShutdown() { + manager := daemon.pluginManager + // Check for a valid manager object. In error conditions, daemon init can fail + // and shutdown called, before plugin manager is initialized. + if manager != nil { + manager.Shutdown() + } +} + +// PluginManager returns current pluginManager associated with the daemon +func (daemon *Daemon) PluginManager() *plugin.Manager { // set up before daemon to avoid this method + return daemon.pluginManager +} + +// PluginGetter returns current pluginStore associated with the daemon +func (daemon *Daemon) PluginGetter() *plugin.Store { + return daemon.PluginStore +} + +// CreateDaemonRoot creates the root for the daemon +func CreateDaemonRoot(config *config.Config) error { + // get the canonical path to the Docker root directory + var realRoot string + if _, err := os.Stat(config.Root); err != nil && os.IsNotExist(err) { + realRoot = config.Root + } else { + realRoot, err = getRealPath(config.Root) + if err != nil { + return fmt.Errorf("Unable to get the full path to root (%s): %s", config.Root, err) + } + } + + idMappings, err := setupRemappedRoot(config) + if err != nil { + return err + } + return setupDaemonRoot(config, realRoot, idMappings.RootPair()) +} + +// checkpointAndSave grabs a container lock to safely call container.CheckpointTo +func (daemon *Daemon) checkpointAndSave(container *container.Container) error { + container.Lock() + defer container.Unlock() + if err := container.CheckpointTo(daemon.containersReplica); err != nil { + return fmt.Errorf("Error saving container state: %v", err) + } + return nil +} + +// because the CLI sends a -1 when it wants to unset the swappiness value +// we need to clear it on the server side +func fixMemorySwappiness(resources *containertypes.Resources) { + if resources.MemorySwappiness != nil && *resources.MemorySwappiness == -1 { + resources.MemorySwappiness = nil + } +} diff --git a/vendor/github.com/moby/moby/daemon/daemon_experimental.go b/vendor/github.com/moby/moby/daemon/daemon_experimental.go new file mode 100644 index 000000000..fb0251d4a --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/daemon_experimental.go @@ -0,0 +1,7 @@ +package daemon + +import "github.com/docker/docker/api/types/container" + +func (daemon *Daemon) verifyExperimentalContainerSettings(hostConfig *container.HostConfig, config *container.Config) ([]string, error) { + return nil, nil +} diff --git a/vendor/github.com/moby/moby/daemon/daemon_linux.go b/vendor/github.com/moby/moby/daemon/daemon_linux.go new file mode 100644 index 000000000..000a04869 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/daemon_linux.go @@ -0,0 +1,93 @@ +package daemon + +import ( + "bufio" + "fmt" + "io" + "os" + "regexp" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/fileutils" + "github.com/docker/docker/pkg/mount" +) + +// On Linux, plugins use a static path for storing execution state, +// instead of deriving path from daemon's exec-root. This is because +// plugin socket files are created here and they cannot exceed max +// path length of 108 bytes. +func getPluginExecRoot(root string) string { + return "/run/docker/plugins" +} + +func (daemon *Daemon) cleanupMountsByID(id string) error { + logrus.Debugf("Cleaning up old mountid %s: start.", id) + f, err := os.Open("/proc/self/mountinfo") + if err != nil { + return err + } + defer f.Close() + + return daemon.cleanupMountsFromReaderByID(f, id, mount.Unmount) +} + +func (daemon *Daemon) cleanupMountsFromReaderByID(reader io.Reader, id string, unmount func(target string) error) error { + if daemon.root == "" { + return nil + } + var errors []string + + regexps := getCleanPatterns(id) + sc := bufio.NewScanner(reader) + for sc.Scan() { + if fields := strings.Fields(sc.Text()); len(fields) >= 4 { + if mnt := fields[4]; strings.HasPrefix(mnt, daemon.root) { + for _, p := range regexps { + if p.MatchString(mnt) { + if err := unmount(mnt); err != nil { + logrus.Error(err) + errors = append(errors, err.Error()) + } + } + } + } + } + } + + if err := sc.Err(); err != nil { + return err + } + + if len(errors) > 0 { + return fmt.Errorf("Error cleaning up mounts:\n%v", strings.Join(errors, "\n")) + } + + logrus.Debugf("Cleaning up old mountid %v: done.", id) + return nil +} + +// cleanupMounts umounts shm/mqueue mounts for old containers +func (daemon *Daemon) cleanupMounts() error { + return daemon.cleanupMountsByID("") +} + +func getCleanPatterns(id string) (regexps []*regexp.Regexp) { + var patterns []string + if id == "" { + id = "[0-9a-f]{64}" + patterns = append(patterns, "containers/"+id+"/shm") + } + patterns = append(patterns, "aufs/mnt/"+id+"$", "overlay/"+id+"/merged$", "zfs/graph/"+id+"$") + for _, p := range patterns { + r, err := regexp.Compile(p) + if err == nil { + regexps = append(regexps, r) + } + } + return +} + +func getRealPath(path string) (string, error) { + return fileutils.ReadSymlinkedDirectory(path) +} diff --git a/vendor/github.com/moby/moby/daemon/daemon_linux_test.go b/vendor/github.com/moby/moby/daemon/daemon_linux_test.go new file mode 100644 index 000000000..c7d511719 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/daemon_linux_test.go @@ -0,0 +1,104 @@ +// +build linux + +package daemon + +import ( + "strings" + "testing" +) + +const mountsFixture = `142 78 0:38 / / rw,relatime - aufs none rw,si=573b861da0b3a05b,dio +143 142 0:60 / /proc rw,nosuid,nodev,noexec,relatime - proc proc rw +144 142 0:67 / /dev rw,nosuid - tmpfs tmpfs rw,mode=755 +145 144 0:78 / /dev/pts rw,nosuid,noexec,relatime - devpts devpts rw,gid=5,mode=620,ptmxmode=666 +146 144 0:49 / /dev/mqueue rw,nosuid,nodev,noexec,relatime - mqueue mqueue rw +147 142 0:84 / /sys rw,nosuid,nodev,noexec,relatime - sysfs sysfs rw +148 147 0:86 / /sys/fs/cgroup rw,nosuid,nodev,noexec,relatime - tmpfs tmpfs rw,mode=755 +149 148 0:22 /docker/5425782a95e643181d8a485a2bab3c0bb21f51d7dfc03511f0e6fbf3f3aa356a /sys/fs/cgroup/cpuset rw,nosuid,nodev,noexec,relatime - cgroup cgroup rw,cpuset +150 148 0:25 /docker/5425782a95e643181d8a485a2bab3c0bb21f51d7dfc03511f0e6fbf3f3aa356a /sys/fs/cgroup/cpu rw,nosuid,nodev,noexec,relatime - cgroup cgroup rw,cpu +151 148 0:27 /docker/5425782a95e643181d8a485a2bab3c0bb21f51d7dfc03511f0e6fbf3f3aa356a /sys/fs/cgroup/cpuacct rw,nosuid,nodev,noexec,relatime - cgroup cgroup rw,cpuacct +152 148 0:28 /docker/5425782a95e643181d8a485a2bab3c0bb21f51d7dfc03511f0e6fbf3f3aa356a /sys/fs/cgroup/memory rw,nosuid,nodev,noexec,relatime - cgroup cgroup rw,memory +153 148 0:29 /docker/5425782a95e643181d8a485a2bab3c0bb21f51d7dfc03511f0e6fbf3f3aa356a /sys/fs/cgroup/devices rw,nosuid,nodev,noexec,relatime - cgroup cgroup rw,devices +154 148 0:30 /docker/5425782a95e643181d8a485a2bab3c0bb21f51d7dfc03511f0e6fbf3f3aa356a /sys/fs/cgroup/freezer rw,nosuid,nodev,noexec,relatime - cgroup cgroup rw,freezer +155 148 0:31 /docker/5425782a95e643181d8a485a2bab3c0bb21f51d7dfc03511f0e6fbf3f3aa356a /sys/fs/cgroup/blkio rw,nosuid,nodev,noexec,relatime - cgroup cgroup rw,blkio +156 148 0:32 /docker/5425782a95e643181d8a485a2bab3c0bb21f51d7dfc03511f0e6fbf3f3aa356a /sys/fs/cgroup/perf_event rw,nosuid,nodev,noexec,relatime - cgroup cgroup rw,perf_event +157 148 0:33 /docker/5425782a95e643181d8a485a2bab3c0bb21f51d7dfc03511f0e6fbf3f3aa356a /sys/fs/cgroup/hugetlb rw,nosuid,nodev,noexec,relatime - cgroup cgroup rw,hugetlb +158 148 0:35 /docker/5425782a95e643181d8a485a2bab3c0bb21f51d7dfc03511f0e6fbf3f3aa356a /sys/fs/cgroup/systemd rw,nosuid,nodev,noexec,relatime - cgroup systemd rw,name=systemd +159 142 8:4 /home/mlaventure/gopath /home/mlaventure/gopath rw,relatime - ext4 /dev/disk/by-uuid/d99e196c-1fc4-4b4f-bab9-9962b2b34e99 rw,errors=remount-ro,data=ordered +160 142 8:4 /var/lib/docker/volumes/9a428b651ee4c538130143cad8d87f603a4bf31b928afe7ff3ecd65480692b35/_data /var/lib/docker rw,relatime - ext4 /dev/disk/by-uuid/d99e196c-1fc4-4b4f-bab9-9962b2b34e99 rw,errors=remount-ro,data=ordered +164 142 8:4 /home/mlaventure/gopath/src/github.com/docker/docker /go/src/github.com/docker/docker rw,relatime - ext4 /dev/disk/by-uuid/d99e196c-1fc4-4b4f-bab9-9962b2b34e99 rw,errors=remount-ro,data=ordered +165 142 8:4 /var/lib/docker/containers/5425782a95e643181d8a485a2bab3c0bb21f51d7dfc03511f0e6fbf3f3aa356a/resolv.conf /etc/resolv.conf rw,relatime - ext4 /dev/disk/by-uuid/d99e196c-1fc4-4b4f-bab9-9962b2b34e99 rw,errors=remount-ro,data=ordered +166 142 8:4 /var/lib/docker/containers/5425782a95e643181d8a485a2bab3c0bb21f51d7dfc03511f0e6fbf3f3aa356a/hostname /etc/hostname rw,relatime - ext4 /dev/disk/by-uuid/d99e196c-1fc4-4b4f-bab9-9962b2b34e99 rw,errors=remount-ro,data=ordered +167 142 8:4 /var/lib/docker/containers/5425782a95e643181d8a485a2bab3c0bb21f51d7dfc03511f0e6fbf3f3aa356a/hosts /etc/hosts rw,relatime - ext4 /dev/disk/by-uuid/d99e196c-1fc4-4b4f-bab9-9962b2b34e99 rw,errors=remount-ro,data=ordered +168 144 0:39 / /dev/shm rw,nosuid,nodev,noexec,relatime - tmpfs shm rw,size=65536k +169 144 0:12 /14 /dev/console rw,nosuid,noexec,relatime - devpts devpts rw,gid=5,mode=620,ptmxmode=000 +83 147 0:10 / /sys/kernel/security rw,relatime - securityfs none rw +89 142 0:87 / /tmp rw,relatime - tmpfs none rw +97 142 0:60 / /run/docker/netns/default rw,nosuid,nodev,noexec,relatime - proc proc rw +100 160 8:4 /var/lib/docker/volumes/9a428b651ee4c538130143cad8d87f603a4bf31b928afe7ff3ecd65480692b35/_data/aufs /var/lib/docker/aufs rw,relatime - ext4 /dev/disk/by-uuid/d99e196c-1fc4-4b4f-bab9-9962b2b34e99 rw,errors=remount-ro,data=ordered +115 100 0:102 / /var/lib/docker/aufs/mnt/0ecda1c63e5b58b3d89ff380bf646c95cc980252cf0b52466d43619aec7c8432 rw,relatime - aufs none rw,si=573b861dbc01905b,dio +116 160 0:107 / /var/lib/docker/containers/d045dc441d2e2e1d5b3e328d47e5943811a40819fb47497c5f5a5df2d6d13c37/shm rw,nosuid,nodev,noexec,relatime - tmpfs shm rw,size=65536k +118 142 0:102 / /run/docker/libcontainerd/d045dc441d2e2e1d5b3e328d47e5943811a40819fb47497c5f5a5df2d6d13c37/rootfs rw,relatime - aufs none rw,si=573b861dbc01905b,dio +242 142 0:60 / /run/docker/netns/c3664df2a0f7 rw,nosuid,nodev,noexec,relatime - proc proc rw +120 100 0:122 / /var/lib/docker/aufs/mnt/03ca4b49e71f1e49a41108829f4d5c70ac95934526e2af8984a1f65f1de0715d rw,relatime - aufs none rw,si=573b861eb147805b,dio +171 142 0:122 / /run/docker/libcontainerd/e406ff6f3e18516d50e03dbca4de54767a69a403a6f7ec1edc2762812824521e/rootfs rw,relatime - aufs none rw,si=573b861eb147805b,dio +310 142 0:60 / /run/docker/netns/71a18572176b rw,nosuid,nodev,noexec,relatime - proc proc rw +` + +func TestCleanupMounts(t *testing.T) { + d := &Daemon{ + root: "/var/lib/docker/", + } + + expected := "/var/lib/docker/containers/d045dc441d2e2e1d5b3e328d47e5943811a40819fb47497c5f5a5df2d6d13c37/shm" + var unmounted int + unmount := func(target string) error { + if target == expected { + unmounted++ + } + return nil + } + + d.cleanupMountsFromReaderByID(strings.NewReader(mountsFixture), "", unmount) + + if unmounted != 1 { + t.Fatal("Expected to unmount the shm (and the shm only)") + } +} + +func TestCleanupMountsByID(t *testing.T) { + d := &Daemon{ + root: "/var/lib/docker/", + } + + expected := "/var/lib/docker/aufs/mnt/03ca4b49e71f1e49a41108829f4d5c70ac95934526e2af8984a1f65f1de0715d" + var unmounted int + unmount := func(target string) error { + if target == expected { + unmounted++ + } + return nil + } + + d.cleanupMountsFromReaderByID(strings.NewReader(mountsFixture), "03ca4b49e71f1e49a41108829f4d5c70ac95934526e2af8984a1f65f1de0715d", unmount) + + if unmounted != 1 { + t.Fatal("Expected to unmount the auf root (and that only)") + } +} + +func TestNotCleanupMounts(t *testing.T) { + d := &Daemon{ + repository: "", + } + var unmounted bool + unmount := func(target string) error { + unmounted = true + return nil + } + mountInfo := `234 232 0:59 / /dev/shm rw,nosuid,nodev,noexec,relatime - tmpfs shm rw,size=65536k` + d.cleanupMountsFromReaderByID(strings.NewReader(mountInfo), "", unmount) + if unmounted { + t.Fatal("Expected not to clean up /dev/shm") + } +} diff --git a/vendor/github.com/moby/moby/daemon/daemon_solaris.go b/vendor/github.com/moby/moby/daemon/daemon_solaris.go new file mode 100644 index 000000000..f464ee34b --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/daemon_solaris.go @@ -0,0 +1,533 @@ +// +build solaris,cgo + +package daemon + +import ( + "fmt" + "net" + "strconv" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/types" + containertypes "github.com/docker/docker/api/types/container" + "github.com/docker/docker/container" + "github.com/docker/docker/image" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/fileutils" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/parsers/kernel" + "github.com/docker/docker/pkg/sysinfo" + refstore "github.com/docker/docker/reference" + "github.com/docker/libnetwork" + nwconfig "github.com/docker/libnetwork/config" + "github.com/docker/libnetwork/drivers/solaris/bridge" + "github.com/docker/libnetwork/netlabel" + "github.com/docker/libnetwork/netutils" + lntypes "github.com/docker/libnetwork/types" + "github.com/opencontainers/runtime-spec/specs-go" + "github.com/opencontainers/selinux/go-selinux/label" + "github.com/pkg/errors" +) + +//#include +import "C" + +const ( + defaultVirtualSwitch = "Virtual Switch" + platformSupported = true + solarisMinCPUShares = 1 + solarisMaxCPUShares = 65535 +) + +func getMemoryResources(config containertypes.Resources) specs.CappedMemory { + memory := specs.CappedMemory{} + + if config.Memory > 0 { + memory.Physical = strconv.FormatInt(config.Memory, 10) + } + + if config.MemorySwap != 0 { + memory.Swap = strconv.FormatInt(config.MemorySwap, 10) + } + + return memory +} + +func getCPUResources(config containertypes.Resources) specs.CappedCPU { + cpu := specs.CappedCPU{} + + if config.CpusetCpus != "" { + cpu.Ncpus = config.CpusetCpus + } + + return cpu +} + +func (daemon *Daemon) cleanupMountsByID(id string) error { + return nil +} + +func (daemon *Daemon) parseSecurityOpt(container *container.Container, hostConfig *containertypes.HostConfig) error { + return parseSecurityOpt(container, hostConfig) +} + +func parseSecurityOpt(container *container.Container, config *containertypes.HostConfig) error { + //Since config.SecurityOpt is specifically defined as a "List of string values to + //customize labels for MLs systems, such as SELinux" + //until we figure out how to map to Trusted Extensions + //this is being disabled for now on Solaris + var ( + labelOpts []string + err error + ) + + if len(config.SecurityOpt) > 0 { + return errors.New("Security options are not supported on Solaris") + } + + container.ProcessLabel, container.MountLabel, err = label.InitLabels(labelOpts) + return err +} + +func setupRemappedRoot(config *Config) ([]idtools.IDMap, []idtools.IDMap, error) { + return nil, nil, nil +} + +func setupDaemonRoot(config *Config, rootDir string, rootUID, rootGID int) error { + return nil +} + +func (daemon *Daemon) getLayerInit() func(string) error { + return nil +} + +func checkKernel() error { + // solaris can rely upon checkSystem() below, we don't skew kernel versions + return nil +} + +func (daemon *Daemon) getCgroupDriver() string { + return "" +} + +func (daemon *Daemon) adaptContainerSettings(hostConfig *containertypes.HostConfig, adjustCPUShares bool) error { + if hostConfig.CPUShares < 0 { + logrus.Warnf("Changing requested CPUShares of %d to minimum allowed of %d", hostConfig.CPUShares, solarisMinCPUShares) + hostConfig.CPUShares = solarisMinCPUShares + } else if hostConfig.CPUShares > solarisMaxCPUShares { + logrus.Warnf("Changing requested CPUShares of %d to maximum allowed of %d", hostConfig.CPUShares, solarisMaxCPUShares) + hostConfig.CPUShares = solarisMaxCPUShares + } + + if hostConfig.Memory > 0 && hostConfig.MemorySwap == 0 { + // By default, MemorySwap is set to twice the size of Memory. + hostConfig.MemorySwap = hostConfig.Memory * 2 + } + + if hostConfig.ShmSize != 0 { + hostConfig.ShmSize = container.DefaultSHMSize + } + if hostConfig.OomKillDisable == nil { + defaultOomKillDisable := false + hostConfig.OomKillDisable = &defaultOomKillDisable + } + + return nil +} + +// UsingSystemd returns true if cli option includes native.cgroupdriver=systemd +func UsingSystemd(config *Config) bool { + return false +} + +// verifyPlatformContainerSettings performs platform-specific validation of the +// hostconfig and config structures. +func verifyPlatformContainerSettings(daemon *Daemon, hostConfig *containertypes.HostConfig, config *containertypes.Config, update bool) ([]string, error) { + fixMemorySwappiness(resources) + warnings := []string{} + sysInfo := sysinfo.New(true) + // NOTE: We do not enforce a minimum value for swap limits for zones on Solaris and + // therefore we will not do that for Docker container either. + if hostConfig.Memory > 0 && !sysInfo.MemoryLimit { + warnings = append(warnings, "Your kernel does not support memory limit capabilities. Limitation discarded.") + logrus.Warnf("Your kernel does not support memory limit capabilities. Limitation discarded.") + hostConfig.Memory = 0 + hostConfig.MemorySwap = -1 + } + if hostConfig.Memory > 0 && hostConfig.MemorySwap != -1 && !sysInfo.SwapLimit { + warnings = append(warnings, "Your kernel does not support swap limit capabilities, memory limited without swap.") + logrus.Warnf("Your kernel does not support swap limit capabilities, memory limited without swap.") + hostConfig.MemorySwap = -1 + } + if hostConfig.Memory > 0 && hostConfig.MemorySwap > 0 && hostConfig.MemorySwap < hostConfig.Memory { + return warnings, fmt.Errorf("Minimum memoryswap limit should be larger than memory limit, see usage.") + } + // Solaris NOTE: We allow and encourage setting the swap without setting the memory limit. + + if hostConfig.MemorySwappiness != nil && !sysInfo.MemorySwappiness { + warnings = append(warnings, "Your kernel does not support memory swappiness capabilities, memory swappiness discarded.") + logrus.Warnf("Your kernel does not support memory swappiness capabilities, memory swappiness discarded.") + hostConfig.MemorySwappiness = nil + } + if hostConfig.MemoryReservation > 0 && !sysInfo.MemoryReservation { + warnings = append(warnings, "Your kernel does not support memory soft limit capabilities. Limitation discarded.") + logrus.Warnf("Your kernel does not support memory soft limit capabilities. Limitation discarded.") + hostConfig.MemoryReservation = 0 + } + if hostConfig.Memory > 0 && hostConfig.MemoryReservation > 0 && hostConfig.Memory < hostConfig.MemoryReservation { + return warnings, fmt.Errorf("Minimum memory limit should be larger than memory reservation limit, see usage.") + } + if hostConfig.KernelMemory > 0 && !sysInfo.KernelMemory { + warnings = append(warnings, "Your kernel does not support kernel memory limit capabilities. Limitation discarded.") + logrus.Warnf("Your kernel does not support kernel memory limit capabilities. Limitation discarded.") + hostConfig.KernelMemory = 0 + } + if hostConfig.CPUShares != 0 && !sysInfo.CPUShares { + warnings = append(warnings, "Your kernel does not support CPU shares. Shares discarded.") + logrus.Warnf("Your kernel does not support CPU shares. Shares discarded.") + hostConfig.CPUShares = 0 + } + if hostConfig.CPUShares < 0 { + warnings = append(warnings, "Invalid CPUShares value. Must be positive. Discarding.") + logrus.Warnf("Invalid CPUShares value. Must be positive. Discarding.") + hostConfig.CPUQuota = 0 + } + if hostConfig.CPUShares > 0 && !sysinfo.IsCPUSharesAvailable() { + warnings = append(warnings, "Global zone default scheduling class not FSS. Discarding shares.") + logrus.Warnf("Global zone default scheduling class not FSS. Discarding shares.") + hostConfig.CPUShares = 0 + } + + // Solaris NOTE: Linux does not do negative checking for CPUShares and Quota here. But it makes sense to. + if hostConfig.CPUPeriod > 0 && !sysInfo.CPUCfsPeriod { + warnings = append(warnings, "Your kernel does not support CPU cfs period. Period discarded.") + logrus.Warnf("Your kernel does not support CPU cfs period. Period discarded.") + if hostConfig.CPUQuota > 0 { + warnings = append(warnings, "Quota will be applied on default period, not period specified.") + logrus.Warnf("Quota will be applied on default period, not period specified.") + } + hostConfig.CPUPeriod = 0 + } + if hostConfig.CPUQuota != 0 && !sysInfo.CPUCfsQuota { + warnings = append(warnings, "Your kernel does not support CPU cfs quota. Quota discarded.") + logrus.Warnf("Your kernel does not support CPU cfs quota. Quota discarded.") + hostConfig.CPUQuota = 0 + } + if hostConfig.CPUQuota < 0 { + warnings = append(warnings, "Invalid CPUQuota value. Must be positive. Discarding.") + logrus.Warnf("Invalid CPUQuota value. Must be positive. Discarding.") + hostConfig.CPUQuota = 0 + } + if (hostConfig.CpusetCpus != "" || hostConfig.CpusetMems != "") && !sysInfo.Cpuset { + warnings = append(warnings, "Your kernel does not support cpuset. Cpuset discarded.") + logrus.Warnf("Your kernel does not support cpuset. Cpuset discarded.") + hostConfig.CpusetCpus = "" + hostConfig.CpusetMems = "" + } + cpusAvailable, err := sysInfo.IsCpusetCpusAvailable(hostConfig.CpusetCpus) + if err != nil { + return warnings, fmt.Errorf("Invalid value %s for cpuset cpus.", hostConfig.CpusetCpus) + } + if !cpusAvailable { + return warnings, fmt.Errorf("Requested CPUs are not available - requested %s, available: %s.", hostConfig.CpusetCpus, sysInfo.Cpus) + } + memsAvailable, err := sysInfo.IsCpusetMemsAvailable(hostConfig.CpusetMems) + if err != nil { + return warnings, fmt.Errorf("Invalid value %s for cpuset mems.", hostConfig.CpusetMems) + } + if !memsAvailable { + return warnings, fmt.Errorf("Requested memory nodes are not available - requested %s, available: %s.", hostConfig.CpusetMems, sysInfo.Mems) + } + if hostConfig.BlkioWeight > 0 && !sysInfo.BlkioWeight { + warnings = append(warnings, "Your kernel does not support Block I/O weight. Weight discarded.") + logrus.Warnf("Your kernel does not support Block I/O weight. Weight discarded.") + hostConfig.BlkioWeight = 0 + } + if hostConfig.OomKillDisable != nil && !sysInfo.OomKillDisable { + *hostConfig.OomKillDisable = false + // Don't warn; this is the default setting but only applicable to Linux + } + + if sysInfo.IPv4ForwardingDisabled { + warnings = append(warnings, "IPv4 forwarding is disabled. Networking will not work.") + logrus.Warnf("IPv4 forwarding is disabled. Networking will not work") + } + + // Solaris NOTE: We do not allow setting Linux specific options, so check and warn for all of them. + + if hostConfig.CapAdd != nil || hostConfig.CapDrop != nil { + warnings = append(warnings, "Adding or dropping kernel capabilities unsupported on Solaris.Discarding capabilities lists.") + logrus.Warnf("Adding or dropping kernel capabilities unsupported on Solaris.Discarding capabilities lists.") + hostConfig.CapAdd = nil + hostConfig.CapDrop = nil + } + + if hostConfig.GroupAdd != nil { + warnings = append(warnings, "Additional groups unsupported on Solaris.Discarding groups lists.") + logrus.Warnf("Additional groups unsupported on Solaris.Discarding groups lists.") + hostConfig.GroupAdd = nil + } + + if hostConfig.IpcMode != "" { + warnings = append(warnings, "IPC namespace assignment unsupported on Solaris.Discarding IPC setting.") + logrus.Warnf("IPC namespace assignment unsupported on Solaris.Discarding IPC setting.") + hostConfig.IpcMode = "" + } + + if hostConfig.PidMode != "" { + warnings = append(warnings, "PID namespace setting unsupported on Solaris. Running container in host PID namespace.") + logrus.Warnf("PID namespace setting unsupported on Solaris. Running container in host PID namespace.") + hostConfig.PidMode = "" + } + + if hostConfig.Privileged { + warnings = append(warnings, "Privileged mode unsupported on Solaris. Discarding privileged mode setting.") + logrus.Warnf("Privileged mode unsupported on Solaris. Discarding privileged mode setting.") + hostConfig.Privileged = false + } + + if hostConfig.UTSMode != "" { + warnings = append(warnings, "UTS namespace assignment unsupported on Solaris.Discarding UTS setting.") + logrus.Warnf("UTS namespace assignment unsupported on Solaris.Discarding UTS setting.") + hostConfig.UTSMode = "" + } + + if hostConfig.CgroupParent != "" { + warnings = append(warnings, "Specifying Cgroup parent unsupported on Solaris. Discarding cgroup parent setting.") + logrus.Warnf("Specifying Cgroup parent unsupported on Solaris. Discarding cgroup parent setting.") + hostConfig.CgroupParent = "" + } + + if hostConfig.Ulimits != nil { + warnings = append(warnings, "Specifying ulimits unsupported on Solaris. Discarding ulimits setting.") + logrus.Warnf("Specifying ulimits unsupported on Solaris. Discarding ulimits setting.") + hostConfig.Ulimits = nil + } + + return warnings, nil +} + +// reloadPlatform updates configuration with platform specific options +// and updates the passed attributes +func (daemon *Daemon) reloadPlatform(config *Config, attributes map[string]string) { +} + +// verifyDaemonSettings performs validation of daemon config struct +func verifyDaemonSettings(config *Config) error { + + if config.DefaultRuntime == "" { + config.DefaultRuntime = stockRuntimeName + } + if config.Runtimes == nil { + config.Runtimes = make(map[string]types.Runtime) + } + stockRuntimeOpts := []string{} + config.Runtimes[stockRuntimeName] = types.Runtime{Path: DefaultRuntimeBinary, Args: stockRuntimeOpts} + + // checkSystem validates platform-specific requirements + return nil +} + +func checkSystem() error { + // check OS version for compatibility, ensure running in global zone + var err error + var id C.zoneid_t + + if id, err = C.getzoneid(); err != nil { + return fmt.Errorf("Exiting. Error getting zone id: %+v", err) + } + if int(id) != 0 { + return fmt.Errorf("Exiting because the Docker daemon is not running in the global zone") + } + + v, err := kernel.GetKernelVersion() + if kernel.CompareKernelVersion(*v, kernel.VersionInfo{Kernel: 5, Major: 12, Minor: 0}) < 0 { + return fmt.Errorf("Your Solaris kernel version: %s doesn't support Docker. Please upgrade to 5.12.0", v.String()) + } + return err +} + +// configureMaxThreads sets the Go runtime max threads threshold +// which is 90% of the kernel setting from /proc/sys/kernel/threads-max +func configureMaxThreads(config *Config) error { + return nil +} + +// configureKernelSecuritySupport configures and validate security support for the kernel +func configureKernelSecuritySupport(config *config.Config, driverNames []string) error { + return nil +} + +func (daemon *Daemon) initNetworkController(config *Config, activeSandboxes map[string]interface{}) (libnetwork.NetworkController, error) { + netOptions, err := daemon.networkOptions(config, daemon.PluginStore, activeSandboxes) + if err != nil { + return nil, err + } + + controller, err := libnetwork.New(netOptions...) + if err != nil { + return nil, fmt.Errorf("error obtaining controller instance: %v", err) + } + + // Initialize default network on "null" + if _, err := controller.NewNetwork("null", "none", "", libnetwork.NetworkOptionPersist(false)); err != nil { + return nil, fmt.Errorf("Error creating default 'null' network: %v", err) + } + + if !config.DisableBridge { + // Initialize default driver "bridge" + if err := initBridgeDriver(controller, config); err != nil { + return nil, err + } + } + + return controller, nil +} + +func initBridgeDriver(controller libnetwork.NetworkController, config *Config) error { + if n, err := controller.NetworkByName("bridge"); err == nil { + if err = n.Delete(); err != nil { + return fmt.Errorf("could not delete the default bridge network: %v", err) + } + } + + bridgeName := bridge.DefaultBridgeName + if config.bridgeConfig.Iface != "" { + bridgeName = config.bridgeConfig.Iface + } + netOption := map[string]string{ + bridge.BridgeName: bridgeName, + bridge.DefaultBridge: strconv.FormatBool(true), + netlabel.DriverMTU: strconv.Itoa(config.Mtu), + bridge.EnableICC: strconv.FormatBool(config.bridgeConfig.InterContainerCommunication), + } + + // --ip processing + if config.bridgeConfig.DefaultIP != nil { + netOption[bridge.DefaultBindingIP] = config.bridgeConfig.DefaultIP.String() + } + + var ipamV4Conf *libnetwork.IpamConf + + ipamV4Conf = &libnetwork.IpamConf{AuxAddresses: make(map[string]string)} + + nwList, _, err := netutils.ElectInterfaceAddresses(bridgeName) + if err != nil { + return errors.Wrap(err, "list bridge addresses failed") + } + + nw := nwList[0] + if len(nwList) > 1 && config.bridgeConfig.FixedCIDR != "" { + _, fCIDR, err := net.ParseCIDR(config.bridgeConfig.FixedCIDR) + if err != nil { + return errors.Wrap(err, "parse CIDR failed") + } + // Iterate through in case there are multiple addresses for the bridge + for _, entry := range nwList { + if fCIDR.Contains(entry.IP) { + nw = entry + break + } + } + } + + ipamV4Conf.PreferredPool = lntypes.GetIPNetCanonical(nw).String() + hip, _ := lntypes.GetHostPartIP(nw.IP, nw.Mask) + if hip.IsGlobalUnicast() { + ipamV4Conf.Gateway = nw.IP.String() + } + + if config.bridgeConfig.IP != "" { + ipamV4Conf.PreferredPool = config.bridgeConfig.IP + ip, _, err := net.ParseCIDR(config.bridgeConfig.IP) + if err != nil { + return err + } + ipamV4Conf.Gateway = ip.String() + } else if bridgeName == bridge.DefaultBridgeName && ipamV4Conf.PreferredPool != "" { + logrus.Infof("Default bridge (%s) is assigned with an IP address %s. Daemon option --bip can be used to set a preferred IP address", bridgeName, ipamV4Conf.PreferredPool) + } + + if config.bridgeConfig.FixedCIDR != "" { + _, fCIDR, err := net.ParseCIDR(config.bridgeConfig.FixedCIDR) + if err != nil { + return err + } + + ipamV4Conf.SubPool = fCIDR.String() + } + + if config.bridgeConfig.DefaultGatewayIPv4 != nil { + ipamV4Conf.AuxAddresses["DefaultGatewayIPv4"] = config.bridgeConfig.DefaultGatewayIPv4.String() + } + + v4Conf := []*libnetwork.IpamConf{ipamV4Conf} + v6Conf := []*libnetwork.IpamConf{} + + // Initialize default network on "bridge" with the same name + _, err = controller.NewNetwork("bridge", "bridge", "", + libnetwork.NetworkOptionDriverOpts(netOption), + libnetwork.NetworkOptionIpam("default", "", v4Conf, v6Conf, nil), + libnetwork.NetworkOptionDeferIPv6Alloc(false)) + if err != nil { + return fmt.Errorf("Error creating default 'bridge' network: %v", err) + } + return nil +} + +// registerLinks sets up links between containers and writes the +// configuration out for persistence. +func (daemon *Daemon) registerLinks(container *container.Container, hostConfig *containertypes.HostConfig) error { + return nil +} + +func (daemon *Daemon) cleanupMounts() error { + return nil +} + +// conditionalMountOnStart is a platform specific helper function during the +// container start to call mount. +func (daemon *Daemon) conditionalMountOnStart(container *container.Container) error { + return daemon.Mount(container) +} + +// conditionalUnmountOnCleanup is a platform specific helper function called +// during the cleanup of a container to unmount. +func (daemon *Daemon) conditionalUnmountOnCleanup(container *container.Container) error { + return daemon.Unmount(container) +} + +func restoreCustomImage(is image.Store, ls layer.Store, rs refstore.Store) error { + // Solaris has no custom images to register + return nil +} + +func driverOptions(config *Config) []nwconfig.Option { + return []nwconfig.Option{} +} + +func (daemon *Daemon) stats(c *container.Container) (*types.StatsJSON, error) { + return nil, nil +} + +// setDefaultIsolation determine the default isolation mode for the +// daemon to run in. This is only applicable on Windows +func (daemon *Daemon) setDefaultIsolation() error { + return nil +} + +func rootFSToAPIType(rootfs *image.RootFS) types.RootFS { + return types.RootFS{} +} + +func setupDaemonProcess(config *Config) error { + return nil +} + +func (daemon *Daemon) setupSeccompProfile() error { + return nil +} + +func getRealPath(path string) (string, error) { + return fileutils.ReadSymlinkedDirectory(path) +} diff --git a/vendor/github.com/moby/moby/daemon/daemon_test.go b/vendor/github.com/moby/moby/daemon/daemon_test.go new file mode 100644 index 000000000..13d1059c1 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/daemon_test.go @@ -0,0 +1,306 @@ +// +build !solaris + +package daemon + +import ( + "io/ioutil" + "os" + "path/filepath" + "testing" + + containertypes "github.com/docker/docker/api/types/container" + "github.com/docker/docker/container" + _ "github.com/docker/docker/pkg/discovery/memory" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/truncindex" + "github.com/docker/docker/volume" + volumedrivers "github.com/docker/docker/volume/drivers" + "github.com/docker/docker/volume/local" + "github.com/docker/docker/volume/store" + "github.com/docker/go-connections/nat" +) + +// +// https://github.com/docker/docker/issues/8069 +// + +func TestGetContainer(t *testing.T) { + c1 := &container.Container{ + ID: "5a4ff6a163ad4533d22d69a2b8960bf7fafdcba06e72d2febdba229008b0bf57", + Name: "tender_bardeen", + } + + c2 := &container.Container{ + ID: "3cdbd1aa394fd68559fd1441d6eff2ab7c1e6363582c82febfaa8045df3bd8de", + Name: "drunk_hawking", + } + + c3 := &container.Container{ + ID: "3cdbd1aa394fd68559fd1441d6eff2abfafdcba06e72d2febdba229008b0bf57", + Name: "3cdbd1aa", + } + + c4 := &container.Container{ + ID: "75fb0b800922abdbef2d27e60abcdfaf7fb0698b2a96d22d3354da361a6ff4a5", + Name: "5a4ff6a163ad4533d22d69a2b8960bf7fafdcba06e72d2febdba229008b0bf57", + } + + c5 := &container.Container{ + ID: "d22d69a2b8960bf7fafdcba06e72d2febdba960bf7fafdcba06e72d2f9008b060b", + Name: "d22d69a2b896", + } + + store := container.NewMemoryStore() + store.Add(c1.ID, c1) + store.Add(c2.ID, c2) + store.Add(c3.ID, c3) + store.Add(c4.ID, c4) + store.Add(c5.ID, c5) + + index := truncindex.NewTruncIndex([]string{}) + index.Add(c1.ID) + index.Add(c2.ID) + index.Add(c3.ID) + index.Add(c4.ID) + index.Add(c5.ID) + + containersReplica, err := container.NewViewDB() + if err != nil { + t.Fatalf("could not create ViewDB: %v", err) + } + + daemon := &Daemon{ + containers: store, + containersReplica: containersReplica, + idIndex: index, + } + + daemon.reserveName(c1.ID, c1.Name) + daemon.reserveName(c2.ID, c2.Name) + daemon.reserveName(c3.ID, c3.Name) + daemon.reserveName(c4.ID, c4.Name) + daemon.reserveName(c5.ID, c5.Name) + + if container, _ := daemon.GetContainer("3cdbd1aa394fd68559fd1441d6eff2ab7c1e6363582c82febfaa8045df3bd8de"); container != c2 { + t.Fatal("Should explicitly match full container IDs") + } + + if container, _ := daemon.GetContainer("75fb0b8009"); container != c4 { + t.Fatal("Should match a partial ID") + } + + if container, _ := daemon.GetContainer("drunk_hawking"); container != c2 { + t.Fatal("Should match a full name") + } + + // c3.Name is a partial match for both c3.ID and c2.ID + if c, _ := daemon.GetContainer("3cdbd1aa"); c != c3 { + t.Fatal("Should match a full name even though it collides with another container's ID") + } + + if container, _ := daemon.GetContainer("d22d69a2b896"); container != c5 { + t.Fatal("Should match a container where the provided prefix is an exact match to the its name, and is also a prefix for its ID") + } + + if _, err := daemon.GetContainer("3cdbd1"); err == nil { + t.Fatal("Should return an error when provided a prefix that partially matches multiple container ID's") + } + + if _, err := daemon.GetContainer("nothing"); err == nil { + t.Fatal("Should return an error when provided a prefix that is neither a name or a partial match to an ID") + } +} + +func initDaemonWithVolumeStore(tmp string) (*Daemon, error) { + var err error + daemon := &Daemon{ + repository: tmp, + root: tmp, + } + daemon.volumes, err = store.New(tmp) + if err != nil { + return nil, err + } + + volumesDriver, err := local.New(tmp, idtools.IDPair{UID: 0, GID: 0}) + if err != nil { + return nil, err + } + volumedrivers.Register(volumesDriver, volumesDriver.Name()) + + return daemon, nil +} + +func TestValidContainerNames(t *testing.T) { + invalidNames := []string{"-rm", "&sdfsfd", "safd%sd"} + validNames := []string{"word-word", "word_word", "1weoid"} + + for _, name := range invalidNames { + if validContainerNamePattern.MatchString(name) { + t.Fatalf("%q is not a valid container name and was returned as valid.", name) + } + } + + for _, name := range validNames { + if !validContainerNamePattern.MatchString(name) { + t.Fatalf("%q is a valid container name and was returned as invalid.", name) + } + } +} + +func TestContainerInitDNS(t *testing.T) { + tmp, err := ioutil.TempDir("", "docker-container-test-") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmp) + + containerID := "d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e" + containerPath := filepath.Join(tmp, containerID) + if err := os.MkdirAll(containerPath, 0755); err != nil { + t.Fatal(err) + } + + config := `{"State":{"Running":true,"Paused":false,"Restarting":false,"OOMKilled":false,"Dead":false,"Pid":2464,"ExitCode":0, +"Error":"","StartedAt":"2015-05-26T16:48:53.869308965Z","FinishedAt":"0001-01-01T00:00:00Z"}, +"ID":"d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e","Created":"2015-05-26T16:48:53.7987917Z","Path":"top", +"Args":[],"Config":{"Hostname":"d59df5276e7b","Domainname":"","User":"","Memory":0,"MemorySwap":0,"CpuShares":0,"Cpuset":"", +"AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"PortSpecs":null,"ExposedPorts":null,"Tty":true,"OpenStdin":true, +"StdinOnce":false,"Env":null,"Cmd":["top"],"Image":"ubuntu:latest","Volumes":null,"WorkingDir":"","Entrypoint":null, +"NetworkDisabled":false,"MacAddress":"","OnBuild":null,"Labels":{}},"Image":"07f8e8c5e66084bef8f848877857537ffe1c47edd01a93af27e7161672ad0e95", +"NetworkSettings":{"IPAddress":"172.17.0.1","IPPrefixLen":16,"MacAddress":"02:42:ac:11:00:01","LinkLocalIPv6Address":"fe80::42:acff:fe11:1", +"LinkLocalIPv6PrefixLen":64,"GlobalIPv6Address":"","GlobalIPv6PrefixLen":0,"Gateway":"172.17.42.1","IPv6Gateway":"","Bridge":"docker0","Ports":{}}, +"ResolvConfPath":"/var/lib/docker/containers/d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e/resolv.conf", +"HostnamePath":"/var/lib/docker/containers/d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e/hostname", +"HostsPath":"/var/lib/docker/containers/d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e/hosts", +"LogPath":"/var/lib/docker/containers/d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e/d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e-json.log", +"Name":"/ubuntu","Driver":"aufs","MountLabel":"","ProcessLabel":"","AppArmorProfile":"","RestartCount":0, +"UpdateDns":false,"Volumes":{},"VolumesRW":{},"AppliedVolumesFrom":null}` + + // Container struct only used to retrieve path to config file + container := &container.Container{Root: containerPath} + configPath, err := container.ConfigPath() + if err != nil { + t.Fatal(err) + } + if err = ioutil.WriteFile(configPath, []byte(config), 0644); err != nil { + t.Fatal(err) + } + + hostConfig := `{"Binds":[],"ContainerIDFile":"","Memory":0,"MemorySwap":0,"CpuShares":0,"CpusetCpus":"", +"Privileged":false,"PortBindings":{},"Links":null,"PublishAllPorts":false,"Dns":null,"DnsOptions":null,"DnsSearch":null,"ExtraHosts":null,"VolumesFrom":null, +"Devices":[],"NetworkMode":"bridge","IpcMode":"","PidMode":"","CapAdd":null,"CapDrop":null,"RestartPolicy":{"Name":"no","MaximumRetryCount":0}, +"SecurityOpt":null,"ReadonlyRootfs":false,"Ulimits":null,"LogConfig":{"Type":"","Config":null},"CgroupParent":""}` + + hostConfigPath, err := container.HostConfigPath() + if err != nil { + t.Fatal(err) + } + if err = ioutil.WriteFile(hostConfigPath, []byte(hostConfig), 0644); err != nil { + t.Fatal(err) + } + + daemon, err := initDaemonWithVolumeStore(tmp) + if err != nil { + t.Fatal(err) + } + defer volumedrivers.Unregister(volume.DefaultDriverName) + + c, err := daemon.load(containerID) + if err != nil { + t.Fatal(err) + } + + if c.HostConfig.DNS == nil { + t.Fatal("Expected container DNS to not be nil") + } + + if c.HostConfig.DNSSearch == nil { + t.Fatal("Expected container DNSSearch to not be nil") + } + + if c.HostConfig.DNSOptions == nil { + t.Fatal("Expected container DNSOptions to not be nil") + } +} + +func newPortNoError(proto, port string) nat.Port { + p, _ := nat.NewPort(proto, port) + return p +} + +func TestMerge(t *testing.T) { + volumesImage := make(map[string]struct{}) + volumesImage["/test1"] = struct{}{} + volumesImage["/test2"] = struct{}{} + portsImage := make(nat.PortSet) + portsImage[newPortNoError("tcp", "1111")] = struct{}{} + portsImage[newPortNoError("tcp", "2222")] = struct{}{} + configImage := &containertypes.Config{ + ExposedPorts: portsImage, + Env: []string{"VAR1=1", "VAR2=2"}, + Volumes: volumesImage, + } + + portsUser := make(nat.PortSet) + portsUser[newPortNoError("tcp", "2222")] = struct{}{} + portsUser[newPortNoError("tcp", "3333")] = struct{}{} + volumesUser := make(map[string]struct{}) + volumesUser["/test3"] = struct{}{} + configUser := &containertypes.Config{ + ExposedPorts: portsUser, + Env: []string{"VAR2=3", "VAR3=3"}, + Volumes: volumesUser, + } + + if err := merge(configUser, configImage); err != nil { + t.Error(err) + } + + if len(configUser.ExposedPorts) != 3 { + t.Fatalf("Expected 3 ExposedPorts, 1111, 2222 and 3333, found %d", len(configUser.ExposedPorts)) + } + for portSpecs := range configUser.ExposedPorts { + if portSpecs.Port() != "1111" && portSpecs.Port() != "2222" && portSpecs.Port() != "3333" { + t.Fatalf("Expected 1111 or 2222 or 3333, found %s", portSpecs) + } + } + if len(configUser.Env) != 3 { + t.Fatalf("Expected 3 env var, VAR1=1, VAR2=3 and VAR3=3, found %d", len(configUser.Env)) + } + for _, env := range configUser.Env { + if env != "VAR1=1" && env != "VAR2=3" && env != "VAR3=3" { + t.Fatalf("Expected VAR1=1 or VAR2=3 or VAR3=3, found %s", env) + } + } + + if len(configUser.Volumes) != 3 { + t.Fatalf("Expected 3 volumes, /test1, /test2 and /test3, found %d", len(configUser.Volumes)) + } + for v := range configUser.Volumes { + if v != "/test1" && v != "/test2" && v != "/test3" { + t.Fatalf("Expected /test1 or /test2 or /test3, found %s", v) + } + } + + ports, _, err := nat.ParsePortSpecs([]string{"0000"}) + if err != nil { + t.Error(err) + } + configImage2 := &containertypes.Config{ + ExposedPorts: ports, + } + + if err := merge(configUser, configImage2); err != nil { + t.Error(err) + } + + if len(configUser.ExposedPorts) != 4 { + t.Fatalf("Expected 4 ExposedPorts, 0000, 1111, 2222 and 3333, found %d", len(configUser.ExposedPorts)) + } + for portSpecs := range configUser.ExposedPorts { + if portSpecs.Port() != "0" && portSpecs.Port() != "1111" && portSpecs.Port() != "2222" && portSpecs.Port() != "3333" { + t.Fatalf("Expected %q or %q or %q or %q, found %s", 0, 1111, 2222, 3333, portSpecs) + } + } +} diff --git a/vendor/github.com/moby/moby/daemon/daemon_unix.go b/vendor/github.com/moby/moby/daemon/daemon_unix.go new file mode 100644 index 000000000..ff9eadf75 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/daemon_unix.go @@ -0,0 +1,1325 @@ +// +build linux freebsd + +package daemon + +import ( + "bufio" + "bytes" + "fmt" + "io/ioutil" + "net" + "os" + "path/filepath" + "runtime" + "runtime/debug" + "strconv" + "strings" + "syscall" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/blkiodev" + pblkiodev "github.com/docker/docker/api/types/blkiodev" + containertypes "github.com/docker/docker/api/types/container" + "github.com/docker/docker/container" + "github.com/docker/docker/daemon/config" + "github.com/docker/docker/image" + "github.com/docker/docker/opts" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/parsers" + "github.com/docker/docker/pkg/parsers/kernel" + "github.com/docker/docker/pkg/sysinfo" + "github.com/docker/docker/runconfig" + "github.com/docker/docker/volume" + "github.com/docker/libnetwork" + nwconfig "github.com/docker/libnetwork/config" + "github.com/docker/libnetwork/drivers/bridge" + "github.com/docker/libnetwork/netlabel" + "github.com/docker/libnetwork/netutils" + "github.com/docker/libnetwork/options" + lntypes "github.com/docker/libnetwork/types" + "github.com/golang/protobuf/ptypes" + "github.com/opencontainers/runc/libcontainer/cgroups" + rsystem "github.com/opencontainers/runc/libcontainer/system" + specs "github.com/opencontainers/runtime-spec/specs-go" + "github.com/opencontainers/selinux/go-selinux/label" + "github.com/pkg/errors" + "github.com/vishvananda/netlink" +) + +const ( + // See https://git.kernel.org/cgit/linux/kernel/git/tip/tip.git/tree/kernel/sched/sched.h?id=8cd9234c64c584432f6992fe944ca9e46ca8ea76#n269 + linuxMinCPUShares = 2 + linuxMaxCPUShares = 262144 + platformSupported = true + // It's not kernel limit, we want this 4M limit to supply a reasonable functional container + linuxMinMemory = 4194304 + // constants for remapped root settings + defaultIDSpecifier string = "default" + defaultRemappedID string = "dockremap" + + // constant for cgroup drivers + cgroupFsDriver = "cgroupfs" + cgroupSystemdDriver = "systemd" +) + +func getMemoryResources(config containertypes.Resources) *specs.LinuxMemory { + memory := specs.LinuxMemory{} + + if config.Memory > 0 { + limit := uint64(config.Memory) + memory.Limit = &limit + } + + if config.MemoryReservation > 0 { + reservation := uint64(config.MemoryReservation) + memory.Reservation = &reservation + } + + if config.MemorySwap > 0 { + swap := uint64(config.MemorySwap) + memory.Swap = &swap + } + + if config.MemorySwappiness != nil { + swappiness := uint64(*config.MemorySwappiness) + memory.Swappiness = &swappiness + } + + if config.KernelMemory != 0 { + kernelMemory := uint64(config.KernelMemory) + memory.Kernel = &kernelMemory + } + + return &memory +} + +func getCPUResources(config containertypes.Resources) (*specs.LinuxCPU, error) { + cpu := specs.LinuxCPU{} + + if config.CPUShares < 0 { + return nil, fmt.Errorf("shares: invalid argument") + } + if config.CPUShares >= 0 { + shares := uint64(config.CPUShares) + cpu.Shares = &shares + } + + if config.CpusetCpus != "" { + cpu.Cpus = config.CpusetCpus + } + + if config.CpusetMems != "" { + cpu.Mems = config.CpusetMems + } + + if config.NanoCPUs > 0 { + // https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt + period := uint64(100 * time.Millisecond / time.Microsecond) + quota := config.NanoCPUs * int64(period) / 1e9 + cpu.Period = &period + cpu.Quota = "a + } + + if config.CPUPeriod != 0 { + period := uint64(config.CPUPeriod) + cpu.Period = &period + } + + if config.CPUQuota != 0 { + q := config.CPUQuota + cpu.Quota = &q + } + + if config.CPURealtimePeriod != 0 { + period := uint64(config.CPURealtimePeriod) + cpu.RealtimePeriod = &period + } + + if config.CPURealtimeRuntime != 0 { + c := config.CPURealtimeRuntime + cpu.RealtimeRuntime = &c + } + + return &cpu, nil +} + +func getBlkioWeightDevices(config containertypes.Resources) ([]specs.LinuxWeightDevice, error) { + var stat syscall.Stat_t + var blkioWeightDevices []specs.LinuxWeightDevice + + for _, weightDevice := range config.BlkioWeightDevice { + if err := syscall.Stat(weightDevice.Path, &stat); err != nil { + return nil, err + } + weight := weightDevice.Weight + d := specs.LinuxWeightDevice{Weight: &weight} + d.Major = int64(stat.Rdev / 256) + d.Minor = int64(stat.Rdev % 256) + blkioWeightDevices = append(blkioWeightDevices, d) + } + + return blkioWeightDevices, nil +} + +func (daemon *Daemon) parseSecurityOpt(container *container.Container, hostConfig *containertypes.HostConfig) error { + container.NoNewPrivileges = daemon.configStore.NoNewPrivileges + return parseSecurityOpt(container, hostConfig) +} + +func parseSecurityOpt(container *container.Container, config *containertypes.HostConfig) error { + var ( + labelOpts []string + err error + ) + + for _, opt := range config.SecurityOpt { + if opt == "no-new-privileges" { + container.NoNewPrivileges = true + continue + } + if opt == "disable" { + labelOpts = append(labelOpts, "disable") + continue + } + + var con []string + if strings.Contains(opt, "=") { + con = strings.SplitN(opt, "=", 2) + } else if strings.Contains(opt, ":") { + con = strings.SplitN(opt, ":", 2) + logrus.Warn("Security options with `:` as a separator are deprecated and will be completely unsupported in 17.04, use `=` instead.") + } + if len(con) != 2 { + return fmt.Errorf("invalid --security-opt 1: %q", opt) + } + + switch con[0] { + case "label": + labelOpts = append(labelOpts, con[1]) + case "apparmor": + container.AppArmorProfile = con[1] + case "seccomp": + container.SeccompProfile = con[1] + case "no-new-privileges": + noNewPrivileges, err := strconv.ParseBool(con[1]) + if err != nil { + return fmt.Errorf("invalid --security-opt 2: %q", opt) + } + container.NoNewPrivileges = noNewPrivileges + default: + return fmt.Errorf("invalid --security-opt 2: %q", opt) + } + } + + container.ProcessLabel, container.MountLabel, err = label.InitLabels(labelOpts) + return err +} + +func getBlkioThrottleDevices(devs []*blkiodev.ThrottleDevice) ([]specs.LinuxThrottleDevice, error) { + var throttleDevices []specs.LinuxThrottleDevice + var stat syscall.Stat_t + + for _, d := range devs { + if err := syscall.Stat(d.Path, &stat); err != nil { + return nil, err + } + d := specs.LinuxThrottleDevice{Rate: d.Rate} + d.Major = int64(stat.Rdev / 256) + d.Minor = int64(stat.Rdev % 256) + throttleDevices = append(throttleDevices, d) + } + + return throttleDevices, nil +} + +func checkKernel() error { + // Check for unsupported kernel versions + // FIXME: it would be cleaner to not test for specific versions, but rather + // test for specific functionalities. + // Unfortunately we can't test for the feature "does not cause a kernel panic" + // without actually causing a kernel panic, so we need this workaround until + // the circumstances of pre-3.10 crashes are clearer. + // For details see https://github.com/docker/docker/issues/407 + // Docker 1.11 and above doesn't actually run on kernels older than 3.4, + // due to containerd-shim usage of PR_SET_CHILD_SUBREAPER (introduced in 3.4). + if !kernel.CheckKernelVersion(3, 10, 0) { + v, _ := kernel.GetKernelVersion() + if os.Getenv("DOCKER_NOWARN_KERNEL_VERSION") == "" { + logrus.Fatalf("Your Linux kernel version %s is not supported for running docker. Please upgrade your kernel to 3.10.0 or newer.", v.String()) + } + } + return nil +} + +// adaptContainerSettings is called during container creation to modify any +// settings necessary in the HostConfig structure. +func (daemon *Daemon) adaptContainerSettings(hostConfig *containertypes.HostConfig, adjustCPUShares bool) error { + if adjustCPUShares && hostConfig.CPUShares > 0 { + // Handle unsupported CPUShares + if hostConfig.CPUShares < linuxMinCPUShares { + logrus.Warnf("Changing requested CPUShares of %d to minimum allowed of %d", hostConfig.CPUShares, linuxMinCPUShares) + hostConfig.CPUShares = linuxMinCPUShares + } else if hostConfig.CPUShares > linuxMaxCPUShares { + logrus.Warnf("Changing requested CPUShares of %d to maximum allowed of %d", hostConfig.CPUShares, linuxMaxCPUShares) + hostConfig.CPUShares = linuxMaxCPUShares + } + } + if hostConfig.Memory > 0 && hostConfig.MemorySwap == 0 { + // By default, MemorySwap is set to twice the size of Memory. + hostConfig.MemorySwap = hostConfig.Memory * 2 + } + if hostConfig.ShmSize == 0 { + hostConfig.ShmSize = config.DefaultShmSize + if daemon.configStore != nil { + hostConfig.ShmSize = int64(daemon.configStore.ShmSize) + } + } + var err error + opts, err := daemon.generateSecurityOpt(hostConfig) + if err != nil { + return err + } + hostConfig.SecurityOpt = append(hostConfig.SecurityOpt, opts...) + if hostConfig.OomKillDisable == nil { + defaultOomKillDisable := false + hostConfig.OomKillDisable = &defaultOomKillDisable + } + + return nil +} + +func verifyContainerResources(resources *containertypes.Resources, sysInfo *sysinfo.SysInfo, update bool) ([]string, error) { + warnings := []string{} + fixMemorySwappiness(resources) + + // memory subsystem checks and adjustments + if resources.Memory != 0 && resources.Memory < linuxMinMemory { + return warnings, fmt.Errorf("Minimum memory limit allowed is 4MB") + } + if resources.Memory > 0 && !sysInfo.MemoryLimit { + warnings = append(warnings, "Your kernel does not support memory limit capabilities or the cgroup is not mounted. Limitation discarded.") + logrus.Warn("Your kernel does not support memory limit capabilities or the cgroup is not mounted. Limitation discarded.") + resources.Memory = 0 + resources.MemorySwap = -1 + } + if resources.Memory > 0 && resources.MemorySwap != -1 && !sysInfo.SwapLimit { + warnings = append(warnings, "Your kernel does not support swap limit capabilities or the cgroup is not mounted. Memory limited without swap.") + logrus.Warn("Your kernel does not support swap limit capabilities,or the cgroup is not mounted. Memory limited without swap.") + resources.MemorySwap = -1 + } + if resources.Memory > 0 && resources.MemorySwap > 0 && resources.MemorySwap < resources.Memory { + return warnings, fmt.Errorf("Minimum memoryswap limit should be larger than memory limit, see usage") + } + if resources.Memory == 0 && resources.MemorySwap > 0 && !update { + return warnings, fmt.Errorf("You should always set the Memory limit when using Memoryswap limit, see usage") + } + if resources.MemorySwappiness != nil && !sysInfo.MemorySwappiness { + warnings = append(warnings, "Your kernel does not support memory swappiness capabilities or the cgroup is not mounted. Memory swappiness discarded.") + logrus.Warn("Your kernel does not support memory swappiness capabilities, or the cgroup is not mounted. Memory swappiness discarded.") + resources.MemorySwappiness = nil + } + if resources.MemorySwappiness != nil { + swappiness := *resources.MemorySwappiness + if swappiness < 0 || swappiness > 100 { + return warnings, fmt.Errorf("Invalid value: %v, valid memory swappiness range is 0-100", swappiness) + } + } + if resources.MemoryReservation > 0 && !sysInfo.MemoryReservation { + warnings = append(warnings, "Your kernel does not support memory soft limit capabilities or the cgroup is not mounted. Limitation discarded.") + logrus.Warn("Your kernel does not support memory soft limit capabilities or the cgroup is not mounted. Limitation discarded.") + resources.MemoryReservation = 0 + } + if resources.MemoryReservation > 0 && resources.MemoryReservation < linuxMinMemory { + return warnings, fmt.Errorf("Minimum memory reservation allowed is 4MB") + } + if resources.Memory > 0 && resources.MemoryReservation > 0 && resources.Memory < resources.MemoryReservation { + return warnings, fmt.Errorf("Minimum memory limit can not be less than memory reservation limit, see usage") + } + if resources.KernelMemory > 0 && !sysInfo.KernelMemory { + warnings = append(warnings, "Your kernel does not support kernel memory limit capabilities or the cgroup is not mounted. Limitation discarded.") + logrus.Warn("Your kernel does not support kernel memory limit capabilities or the cgroup is not mounted. Limitation discarded.") + resources.KernelMemory = 0 + } + if resources.KernelMemory > 0 && resources.KernelMemory < linuxMinMemory { + return warnings, fmt.Errorf("Minimum kernel memory limit allowed is 4MB") + } + if resources.KernelMemory > 0 && !kernel.CheckKernelVersion(4, 0, 0) { + warnings = append(warnings, "You specified a kernel memory limit on a kernel older than 4.0. Kernel memory limits are experimental on older kernels, it won't work as expected and can cause your system to be unstable.") + logrus.Warn("You specified a kernel memory limit on a kernel older than 4.0. Kernel memory limits are experimental on older kernels, it won't work as expected and can cause your system to be unstable.") + } + if resources.OomKillDisable != nil && !sysInfo.OomKillDisable { + // only produce warnings if the setting wasn't to *disable* the OOM Kill; no point + // warning the caller if they already wanted the feature to be off + if *resources.OomKillDisable { + warnings = append(warnings, "Your kernel does not support OomKillDisable. OomKillDisable discarded.") + logrus.Warn("Your kernel does not support OomKillDisable. OomKillDisable discarded.") + } + resources.OomKillDisable = nil + } + + if resources.PidsLimit != 0 && !sysInfo.PidsLimit { + warnings = append(warnings, "Your kernel does not support pids limit capabilities or the cgroup is not mounted. PIDs limit discarded.") + logrus.Warn("Your kernel does not support pids limit capabilities or the cgroup is not mounted. PIDs limit discarded.") + resources.PidsLimit = 0 + } + + // cpu subsystem checks and adjustments + if resources.NanoCPUs > 0 && resources.CPUPeriod > 0 { + return warnings, fmt.Errorf("Conflicting options: Nano CPUs and CPU Period cannot both be set") + } + if resources.NanoCPUs > 0 && resources.CPUQuota > 0 { + return warnings, fmt.Errorf("Conflicting options: Nano CPUs and CPU Quota cannot both be set") + } + if resources.NanoCPUs > 0 && (!sysInfo.CPUCfsPeriod || !sysInfo.CPUCfsQuota) { + return warnings, fmt.Errorf("NanoCPUs can not be set, as your kernel does not support CPU cfs period/quota or the cgroup is not mounted") + } + // The highest precision we could get on Linux is 0.001, by setting + // cpu.cfs_period_us=1000ms + // cpu.cfs_quota=1ms + // See the following link for details: + // https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt + // Here we don't set the lower limit and it is up to the underlying platform (e.g., Linux) to return an error. + // The error message is 0.01 so that this is consistent with Windows + if resources.NanoCPUs < 0 || resources.NanoCPUs > int64(sysinfo.NumCPU())*1e9 { + return warnings, fmt.Errorf("Range of CPUs is from 0.01 to %d.00, as there are only %d CPUs available", sysinfo.NumCPU(), sysinfo.NumCPU()) + } + + if resources.CPUShares > 0 && !sysInfo.CPUShares { + warnings = append(warnings, "Your kernel does not support CPU shares or the cgroup is not mounted. Shares discarded.") + logrus.Warn("Your kernel does not support CPU shares or the cgroup is not mounted. Shares discarded.") + resources.CPUShares = 0 + } + if resources.CPUPeriod > 0 && !sysInfo.CPUCfsPeriod { + warnings = append(warnings, "Your kernel does not support CPU cfs period or the cgroup is not mounted. Period discarded.") + logrus.Warn("Your kernel does not support CPU cfs period or the cgroup is not mounted. Period discarded.") + resources.CPUPeriod = 0 + } + if resources.CPUPeriod != 0 && (resources.CPUPeriod < 1000 || resources.CPUPeriod > 1000000) { + return warnings, fmt.Errorf("CPU cfs period can not be less than 1ms (i.e. 1000) or larger than 1s (i.e. 1000000)") + } + if resources.CPUQuota > 0 && !sysInfo.CPUCfsQuota { + warnings = append(warnings, "Your kernel does not support CPU cfs quota or the cgroup is not mounted. Quota discarded.") + logrus.Warn("Your kernel does not support CPU cfs quota or the cgroup is not mounted. Quota discarded.") + resources.CPUQuota = 0 + } + if resources.CPUQuota > 0 && resources.CPUQuota < 1000 { + return warnings, fmt.Errorf("CPU cfs quota can not be less than 1ms (i.e. 1000)") + } + if resources.CPUPercent > 0 { + warnings = append(warnings, fmt.Sprintf("%s does not support CPU percent. Percent discarded.", runtime.GOOS)) + logrus.Warnf("%s does not support CPU percent. Percent discarded.", runtime.GOOS) + resources.CPUPercent = 0 + } + + // cpuset subsystem checks and adjustments + if (resources.CpusetCpus != "" || resources.CpusetMems != "") && !sysInfo.Cpuset { + warnings = append(warnings, "Your kernel does not support cpuset or the cgroup is not mounted. Cpuset discarded.") + logrus.Warn("Your kernel does not support cpuset or the cgroup is not mounted. Cpuset discarded.") + resources.CpusetCpus = "" + resources.CpusetMems = "" + } + cpusAvailable, err := sysInfo.IsCpusetCpusAvailable(resources.CpusetCpus) + if err != nil { + return warnings, fmt.Errorf("Invalid value %s for cpuset cpus", resources.CpusetCpus) + } + if !cpusAvailable { + return warnings, fmt.Errorf("Requested CPUs are not available - requested %s, available: %s", resources.CpusetCpus, sysInfo.Cpus) + } + memsAvailable, err := sysInfo.IsCpusetMemsAvailable(resources.CpusetMems) + if err != nil { + return warnings, fmt.Errorf("Invalid value %s for cpuset mems", resources.CpusetMems) + } + if !memsAvailable { + return warnings, fmt.Errorf("Requested memory nodes are not available - requested %s, available: %s", resources.CpusetMems, sysInfo.Mems) + } + + // blkio subsystem checks and adjustments + if resources.BlkioWeight > 0 && !sysInfo.BlkioWeight { + warnings = append(warnings, "Your kernel does not support Block I/O weight or the cgroup is not mounted. Weight discarded.") + logrus.Warn("Your kernel does not support Block I/O weight or the cgroup is not mounted. Weight discarded.") + resources.BlkioWeight = 0 + } + if resources.BlkioWeight > 0 && (resources.BlkioWeight < 10 || resources.BlkioWeight > 1000) { + return warnings, fmt.Errorf("Range of blkio weight is from 10 to 1000") + } + if resources.IOMaximumBandwidth != 0 || resources.IOMaximumIOps != 0 { + return warnings, fmt.Errorf("Invalid QoS settings: %s does not support Maximum IO Bandwidth or Maximum IO IOps", runtime.GOOS) + } + if len(resources.BlkioWeightDevice) > 0 && !sysInfo.BlkioWeightDevice { + warnings = append(warnings, "Your kernel does not support Block I/O weight_device or the cgroup is not mounted. Weight-device discarded.") + logrus.Warn("Your kernel does not support Block I/O weight_device or the cgroup is not mounted. Weight-device discarded.") + resources.BlkioWeightDevice = []*pblkiodev.WeightDevice{} + } + if len(resources.BlkioDeviceReadBps) > 0 && !sysInfo.BlkioReadBpsDevice { + warnings = append(warnings, "Your kernel does not support BPS Block I/O read limit or the cgroup is not mounted. Block I/O BPS read limit discarded.") + logrus.Warn("Your kernel does not support BPS Block I/O read limit or the cgroup is not mounted. Block I/O BPS read limit discarded") + resources.BlkioDeviceReadBps = []*pblkiodev.ThrottleDevice{} + } + if len(resources.BlkioDeviceWriteBps) > 0 && !sysInfo.BlkioWriteBpsDevice { + warnings = append(warnings, "Your kernel does not support BPS Block I/O write limit or the cgroup is not mounted. Block I/O BPS write limit discarded.") + logrus.Warn("Your kernel does not support BPS Block I/O write limit or the cgroup is not mounted. Block I/O BPS write limit discarded.") + resources.BlkioDeviceWriteBps = []*pblkiodev.ThrottleDevice{} + } + if len(resources.BlkioDeviceReadIOps) > 0 && !sysInfo.BlkioReadIOpsDevice { + warnings = append(warnings, "Your kernel does not support IOPS Block read limit or the cgroup is not mounted. Block I/O IOPS read limit discarded.") + logrus.Warn("Your kernel does not support IOPS Block I/O read limit in IO or the cgroup is not mounted. Block I/O IOPS read limit discarded.") + resources.BlkioDeviceReadIOps = []*pblkiodev.ThrottleDevice{} + } + if len(resources.BlkioDeviceWriteIOps) > 0 && !sysInfo.BlkioWriteIOpsDevice { + warnings = append(warnings, "Your kernel does not support IOPS Block write limit or the cgroup is not mounted. Block I/O IOPS write limit discarded.") + logrus.Warn("Your kernel does not support IOPS Block I/O write limit or the cgroup is not mounted. Block I/O IOPS write limit discarded.") + resources.BlkioDeviceWriteIOps = []*pblkiodev.ThrottleDevice{} + } + + return warnings, nil +} + +func (daemon *Daemon) getCgroupDriver() string { + cgroupDriver := cgroupFsDriver + + if UsingSystemd(daemon.configStore) { + cgroupDriver = cgroupSystemdDriver + } + return cgroupDriver +} + +// getCD gets the raw value of the native.cgroupdriver option, if set. +func getCD(config *config.Config) string { + for _, option := range config.ExecOptions { + key, val, err := parsers.ParseKeyValueOpt(option) + if err != nil || !strings.EqualFold(key, "native.cgroupdriver") { + continue + } + return val + } + return "" +} + +// VerifyCgroupDriver validates native.cgroupdriver +func VerifyCgroupDriver(config *config.Config) error { + cd := getCD(config) + if cd == "" || cd == cgroupFsDriver || cd == cgroupSystemdDriver { + return nil + } + return fmt.Errorf("native.cgroupdriver option %s not supported", cd) +} + +// UsingSystemd returns true if cli option includes native.cgroupdriver=systemd +func UsingSystemd(config *config.Config) bool { + return getCD(config) == cgroupSystemdDriver +} + +// verifyPlatformContainerSettings performs platform-specific validation of the +// hostconfig and config structures. +func verifyPlatformContainerSettings(daemon *Daemon, hostConfig *containertypes.HostConfig, config *containertypes.Config, update bool) ([]string, error) { + var warnings []string + sysInfo := sysinfo.New(true) + + warnings, err := daemon.verifyExperimentalContainerSettings(hostConfig, config) + if err != nil { + return warnings, err + } + + w, err := verifyContainerResources(&hostConfig.Resources, sysInfo, update) + + // no matter err is nil or not, w could have data in itself. + warnings = append(warnings, w...) + + if err != nil { + return warnings, err + } + + if hostConfig.ShmSize < 0 { + return warnings, fmt.Errorf("SHM size can not be less than 0") + } + + if hostConfig.OomScoreAdj < -1000 || hostConfig.OomScoreAdj > 1000 { + return warnings, fmt.Errorf("Invalid value %d, range for oom score adj is [-1000, 1000]", hostConfig.OomScoreAdj) + } + + // ip-forwarding does not affect container with '--net=host' (or '--net=none') + if sysInfo.IPv4ForwardingDisabled && !(hostConfig.NetworkMode.IsHost() || hostConfig.NetworkMode.IsNone()) { + warnings = append(warnings, "IPv4 forwarding is disabled. Networking will not work.") + logrus.Warn("IPv4 forwarding is disabled. Networking will not work") + } + // check for various conflicting options with user namespaces + if daemon.configStore.RemappedRoot != "" && hostConfig.UsernsMode.IsPrivate() { + if hostConfig.Privileged { + return warnings, fmt.Errorf("Privileged mode is incompatible with user namespaces") + } + if hostConfig.NetworkMode.IsHost() && !hostConfig.UsernsMode.IsHost() { + return warnings, fmt.Errorf("Cannot share the host's network namespace when user namespaces are enabled") + } + if hostConfig.PidMode.IsHost() && !hostConfig.UsernsMode.IsHost() { + return warnings, fmt.Errorf("Cannot share the host PID namespace when user namespaces are enabled") + } + } + if hostConfig.CgroupParent != "" && UsingSystemd(daemon.configStore) { + // CgroupParent for systemd cgroup should be named as "xxx.slice" + if len(hostConfig.CgroupParent) <= 6 || !strings.HasSuffix(hostConfig.CgroupParent, ".slice") { + return warnings, fmt.Errorf("cgroup-parent for systemd cgroup should be a valid slice named as \"xxx.slice\"") + } + } + if hostConfig.Runtime == "" { + hostConfig.Runtime = daemon.configStore.GetDefaultRuntimeName() + } + + if rt := daemon.configStore.GetRuntime(hostConfig.Runtime); rt == nil { + return warnings, fmt.Errorf("Unknown runtime specified %s", hostConfig.Runtime) + } + + for dest := range hostConfig.Tmpfs { + if err := volume.ValidateTmpfsMountDestination(dest); err != nil { + return warnings, err + } + } + + return warnings, nil +} + +// reloadPlatform updates configuration with platform specific options +// and updates the passed attributes +func (daemon *Daemon) reloadPlatform(conf *config.Config, attributes map[string]string) { + if conf.IsValueSet("runtimes") { + daemon.configStore.Runtimes = conf.Runtimes + // Always set the default one + daemon.configStore.Runtimes[config.StockRuntimeName] = types.Runtime{Path: DefaultRuntimeBinary} + } + + if conf.DefaultRuntime != "" { + daemon.configStore.DefaultRuntime = conf.DefaultRuntime + } + + if conf.IsValueSet("default-shm-size") { + daemon.configStore.ShmSize = conf.ShmSize + } + + // Update attributes + var runtimeList bytes.Buffer + for name, rt := range daemon.configStore.Runtimes { + if runtimeList.Len() > 0 { + runtimeList.WriteRune(' ') + } + runtimeList.WriteString(fmt.Sprintf("%s:%s", name, rt)) + } + + attributes["runtimes"] = runtimeList.String() + attributes["default-runtime"] = daemon.configStore.DefaultRuntime + attributes["default-shm-size"] = fmt.Sprintf("%d", daemon.configStore.ShmSize) +} + +// verifyDaemonSettings performs validation of daemon config struct +func verifyDaemonSettings(conf *config.Config) error { + // Check for mutually incompatible config options + if conf.BridgeConfig.Iface != "" && conf.BridgeConfig.IP != "" { + return fmt.Errorf("You specified -b & --bip, mutually exclusive options. Please specify only one") + } + if !conf.BridgeConfig.EnableIPTables && !conf.BridgeConfig.InterContainerCommunication { + return fmt.Errorf("You specified --iptables=false with --icc=false. ICC=false uses iptables to function. Please set --icc or --iptables to true") + } + if !conf.BridgeConfig.EnableIPTables && conf.BridgeConfig.EnableIPMasq { + conf.BridgeConfig.EnableIPMasq = false + } + if err := VerifyCgroupDriver(conf); err != nil { + return err + } + if conf.CgroupParent != "" && UsingSystemd(conf) { + if len(conf.CgroupParent) <= 6 || !strings.HasSuffix(conf.CgroupParent, ".slice") { + return fmt.Errorf("cgroup-parent for systemd cgroup should be a valid slice named as \"xxx.slice\"") + } + } + + if conf.DefaultRuntime == "" { + conf.DefaultRuntime = config.StockRuntimeName + } + if conf.Runtimes == nil { + conf.Runtimes = make(map[string]types.Runtime) + } + conf.Runtimes[config.StockRuntimeName] = types.Runtime{Path: DefaultRuntimeBinary} + + return nil +} + +// checkSystem validates platform-specific requirements +func checkSystem() error { + if os.Geteuid() != 0 { + return fmt.Errorf("The Docker daemon needs to be run as root") + } + return checkKernel() +} + +// configureMaxThreads sets the Go runtime max threads threshold +// which is 90% of the kernel setting from /proc/sys/kernel/threads-max +func configureMaxThreads(config *config.Config) error { + mt, err := ioutil.ReadFile("/proc/sys/kernel/threads-max") + if err != nil { + return err + } + mtint, err := strconv.Atoi(strings.TrimSpace(string(mt))) + if err != nil { + return err + } + maxThreads := (mtint / 100) * 90 + debug.SetMaxThreads(maxThreads) + logrus.Debugf("Golang's threads limit set to %d", maxThreads) + return nil +} + +func overlaySupportsSelinux() (bool, error) { + f, err := os.Open("/proc/kallsyms") + if err != nil { + if os.IsNotExist(err) { + return false, nil + } + return false, err + } + defer f.Close() + + var symAddr, symType, symName, text string + + s := bufio.NewScanner(f) + for s.Scan() { + if err := s.Err(); err != nil { + return false, err + } + + text = s.Text() + if _, err := fmt.Sscanf(text, "%s %s %s", &symAddr, &symType, &symName); err != nil { + return false, fmt.Errorf("Scanning '%s' failed: %s", text, err) + } + + // Check for presence of symbol security_inode_copy_up. + if symName == "security_inode_copy_up" { + return true, nil + } + } + return false, nil +} + +// configureKernelSecuritySupport configures and validates security support for the kernel +func configureKernelSecuritySupport(config *config.Config, driverNames []string) error { + if config.EnableSelinuxSupport { + if !selinuxEnabled() { + logrus.Warn("Docker could not enable SELinux on the host system") + return nil + } + + overlayFound := false + for _, d := range driverNames { + if d == "overlay" || d == "overlay2" { + overlayFound = true + break + } + } + + if overlayFound { + // If driver is overlay or overlay2, make sure kernel + // supports selinux with overlay. + supported, err := overlaySupportsSelinux() + if err != nil { + return err + } + + if !supported { + logrus.Warnf("SELinux is not supported with the %v graph driver on this kernel", driverNames) + } + } + } else { + selinuxSetDisabled() + } + return nil +} + +func (daemon *Daemon) initNetworkController(config *config.Config, activeSandboxes map[string]interface{}) (libnetwork.NetworkController, error) { + netOptions, err := daemon.networkOptions(config, daemon.PluginStore, activeSandboxes) + if err != nil { + return nil, err + } + + controller, err := libnetwork.New(netOptions...) + if err != nil { + return nil, fmt.Errorf("error obtaining controller instance: %v", err) + } + + if len(activeSandboxes) > 0 { + logrus.Info("There are old running containers, the network config will not take affect") + return controller, nil + } + + // Initialize default network on "null" + if n, _ := controller.NetworkByName("none"); n == nil { + if _, err := controller.NewNetwork("null", "none", "", libnetwork.NetworkOptionPersist(true)); err != nil { + return nil, fmt.Errorf("Error creating default \"null\" network: %v", err) + } + } + + // Initialize default network on "host" + if n, _ := controller.NetworkByName("host"); n == nil { + if _, err := controller.NewNetwork("host", "host", "", libnetwork.NetworkOptionPersist(true)); err != nil { + return nil, fmt.Errorf("Error creating default \"host\" network: %v", err) + } + } + + // Clear stale bridge network + if n, err := controller.NetworkByName("bridge"); err == nil { + if err = n.Delete(); err != nil { + return nil, fmt.Errorf("could not delete the default bridge network: %v", err) + } + } + + if !config.DisableBridge { + // Initialize default driver "bridge" + if err := initBridgeDriver(controller, config); err != nil { + return nil, err + } + } else { + removeDefaultBridgeInterface() + } + + return controller, nil +} + +func driverOptions(config *config.Config) []nwconfig.Option { + bridgeConfig := options.Generic{ + "EnableIPForwarding": config.BridgeConfig.EnableIPForward, + "EnableIPTables": config.BridgeConfig.EnableIPTables, + "EnableUserlandProxy": config.BridgeConfig.EnableUserlandProxy, + "UserlandProxyPath": config.BridgeConfig.UserlandProxyPath} + bridgeOption := options.Generic{netlabel.GenericData: bridgeConfig} + + dOptions := []nwconfig.Option{} + dOptions = append(dOptions, nwconfig.OptionDriverConfig("bridge", bridgeOption)) + return dOptions +} + +func initBridgeDriver(controller libnetwork.NetworkController, config *config.Config) error { + bridgeName := bridge.DefaultBridgeName + if config.BridgeConfig.Iface != "" { + bridgeName = config.BridgeConfig.Iface + } + netOption := map[string]string{ + bridge.BridgeName: bridgeName, + bridge.DefaultBridge: strconv.FormatBool(true), + netlabel.DriverMTU: strconv.Itoa(config.Mtu), + bridge.EnableIPMasquerade: strconv.FormatBool(config.BridgeConfig.EnableIPMasq), + bridge.EnableICC: strconv.FormatBool(config.BridgeConfig.InterContainerCommunication), + } + + // --ip processing + if config.BridgeConfig.DefaultIP != nil { + netOption[bridge.DefaultBindingIP] = config.BridgeConfig.DefaultIP.String() + } + + var ( + ipamV4Conf *libnetwork.IpamConf + ipamV6Conf *libnetwork.IpamConf + ) + + ipamV4Conf = &libnetwork.IpamConf{AuxAddresses: make(map[string]string)} + + nwList, nw6List, err := netutils.ElectInterfaceAddresses(bridgeName) + if err != nil { + return errors.Wrap(err, "list bridge addresses failed") + } + + nw := nwList[0] + if len(nwList) > 1 && config.BridgeConfig.FixedCIDR != "" { + _, fCIDR, err := net.ParseCIDR(config.BridgeConfig.FixedCIDR) + if err != nil { + return errors.Wrap(err, "parse CIDR failed") + } + // Iterate through in case there are multiple addresses for the bridge + for _, entry := range nwList { + if fCIDR.Contains(entry.IP) { + nw = entry + break + } + } + } + + ipamV4Conf.PreferredPool = lntypes.GetIPNetCanonical(nw).String() + hip, _ := lntypes.GetHostPartIP(nw.IP, nw.Mask) + if hip.IsGlobalUnicast() { + ipamV4Conf.Gateway = nw.IP.String() + } + + if config.BridgeConfig.IP != "" { + ipamV4Conf.PreferredPool = config.BridgeConfig.IP + ip, _, err := net.ParseCIDR(config.BridgeConfig.IP) + if err != nil { + return err + } + ipamV4Conf.Gateway = ip.String() + } else if bridgeName == bridge.DefaultBridgeName && ipamV4Conf.PreferredPool != "" { + logrus.Infof("Default bridge (%s) is assigned with an IP address %s. Daemon option --bip can be used to set a preferred IP address", bridgeName, ipamV4Conf.PreferredPool) + } + + if config.BridgeConfig.FixedCIDR != "" { + _, fCIDR, err := net.ParseCIDR(config.BridgeConfig.FixedCIDR) + if err != nil { + return err + } + + ipamV4Conf.SubPool = fCIDR.String() + } + + if config.BridgeConfig.DefaultGatewayIPv4 != nil { + ipamV4Conf.AuxAddresses["DefaultGatewayIPv4"] = config.BridgeConfig.DefaultGatewayIPv4.String() + } + + var deferIPv6Alloc bool + if config.BridgeConfig.FixedCIDRv6 != "" { + _, fCIDRv6, err := net.ParseCIDR(config.BridgeConfig.FixedCIDRv6) + if err != nil { + return err + } + + // In case user has specified the daemon flag --fixed-cidr-v6 and the passed network has + // at least 48 host bits, we need to guarantee the current behavior where the containers' + // IPv6 addresses will be constructed based on the containers' interface MAC address. + // We do so by telling libnetwork to defer the IPv6 address allocation for the endpoints + // on this network until after the driver has created the endpoint and returned the + // constructed address. Libnetwork will then reserve this address with the ipam driver. + ones, _ := fCIDRv6.Mask.Size() + deferIPv6Alloc = ones <= 80 + + if ipamV6Conf == nil { + ipamV6Conf = &libnetwork.IpamConf{AuxAddresses: make(map[string]string)} + } + ipamV6Conf.PreferredPool = fCIDRv6.String() + + // In case the --fixed-cidr-v6 is specified and the current docker0 bridge IPv6 + // address belongs to the same network, we need to inform libnetwork about it, so + // that it can be reserved with IPAM and it will not be given away to somebody else + for _, nw6 := range nw6List { + if fCIDRv6.Contains(nw6.IP) { + ipamV6Conf.Gateway = nw6.IP.String() + break + } + } + } + + if config.BridgeConfig.DefaultGatewayIPv6 != nil { + if ipamV6Conf == nil { + ipamV6Conf = &libnetwork.IpamConf{AuxAddresses: make(map[string]string)} + } + ipamV6Conf.AuxAddresses["DefaultGatewayIPv6"] = config.BridgeConfig.DefaultGatewayIPv6.String() + } + + v4Conf := []*libnetwork.IpamConf{ipamV4Conf} + v6Conf := []*libnetwork.IpamConf{} + if ipamV6Conf != nil { + v6Conf = append(v6Conf, ipamV6Conf) + } + // Initialize default network on "bridge" with the same name + _, err = controller.NewNetwork("bridge", "bridge", "", + libnetwork.NetworkOptionEnableIPv6(config.BridgeConfig.EnableIPv6), + libnetwork.NetworkOptionDriverOpts(netOption), + libnetwork.NetworkOptionIpam("default", "", v4Conf, v6Conf, nil), + libnetwork.NetworkOptionDeferIPv6Alloc(deferIPv6Alloc)) + if err != nil { + return fmt.Errorf("Error creating default \"bridge\" network: %v", err) + } + return nil +} + +// Remove default bridge interface if present (--bridge=none use case) +func removeDefaultBridgeInterface() { + if lnk, err := netlink.LinkByName(bridge.DefaultBridgeName); err == nil { + if err := netlink.LinkDel(lnk); err != nil { + logrus.Warnf("Failed to remove bridge interface (%s): %v", bridge.DefaultBridgeName, err) + } + } +} + +func (daemon *Daemon) getLayerInit() func(string) error { + return daemon.setupInitLayer +} + +// Parse the remapped root (user namespace) option, which can be one of: +// username - valid username from /etc/passwd +// username:groupname - valid username; valid groupname from /etc/group +// uid - 32-bit unsigned int valid Linux UID value +// uid:gid - uid value; 32-bit unsigned int Linux GID value +// +// If no groupname is specified, and a username is specified, an attempt +// will be made to lookup a gid for that username as a groupname +// +// If names are used, they are verified to exist in passwd/group +func parseRemappedRoot(usergrp string) (string, string, error) { + + var ( + userID, groupID int + username, groupname string + ) + + idparts := strings.Split(usergrp, ":") + if len(idparts) > 2 { + return "", "", fmt.Errorf("Invalid user/group specification in --userns-remap: %q", usergrp) + } + + if uid, err := strconv.ParseInt(idparts[0], 10, 32); err == nil { + // must be a uid; take it as valid + userID = int(uid) + luser, err := idtools.LookupUID(userID) + if err != nil { + return "", "", fmt.Errorf("Uid %d has no entry in /etc/passwd: %v", userID, err) + } + username = luser.Name + if len(idparts) == 1 { + // if the uid was numeric and no gid was specified, take the uid as the gid + groupID = userID + lgrp, err := idtools.LookupGID(groupID) + if err != nil { + return "", "", fmt.Errorf("Gid %d has no entry in /etc/group: %v", groupID, err) + } + groupname = lgrp.Name + } + } else { + lookupName := idparts[0] + // special case: if the user specified "default", they want Docker to create or + // use (after creation) the "dockremap" user/group for root remapping + if lookupName == defaultIDSpecifier { + lookupName = defaultRemappedID + } + luser, err := idtools.LookupUser(lookupName) + if err != nil && idparts[0] != defaultIDSpecifier { + // error if the name requested isn't the special "dockremap" ID + return "", "", fmt.Errorf("Error during uid lookup for %q: %v", lookupName, err) + } else if err != nil { + // special case-- if the username == "default", then we have been asked + // to create a new entry pair in /etc/{passwd,group} for which the /etc/sub{uid,gid} + // ranges will be used for the user and group mappings in user namespaced containers + _, _, err := idtools.AddNamespaceRangesUser(defaultRemappedID) + if err == nil { + return defaultRemappedID, defaultRemappedID, nil + } + return "", "", fmt.Errorf("Error during %q user creation: %v", defaultRemappedID, err) + } + username = luser.Name + if len(idparts) == 1 { + // we only have a string username, and no group specified; look up gid from username as group + group, err := idtools.LookupGroup(lookupName) + if err != nil { + return "", "", fmt.Errorf("Error during gid lookup for %q: %v", lookupName, err) + } + groupname = group.Name + } + } + + if len(idparts) == 2 { + // groupname or gid is separately specified and must be resolved + // to an unsigned 32-bit gid + if gid, err := strconv.ParseInt(idparts[1], 10, 32); err == nil { + // must be a gid, take it as valid + groupID = int(gid) + lgrp, err := idtools.LookupGID(groupID) + if err != nil { + return "", "", fmt.Errorf("Gid %d has no entry in /etc/passwd: %v", groupID, err) + } + groupname = lgrp.Name + } else { + // not a number; attempt a lookup + if _, err := idtools.LookupGroup(idparts[1]); err != nil { + return "", "", fmt.Errorf("Error during groupname lookup for %q: %v", idparts[1], err) + } + groupname = idparts[1] + } + } + return username, groupname, nil +} + +func setupRemappedRoot(config *config.Config) (*idtools.IDMappings, error) { + if runtime.GOOS != "linux" && config.RemappedRoot != "" { + return nil, fmt.Errorf("User namespaces are only supported on Linux") + } + + // if the daemon was started with remapped root option, parse + // the config option to the int uid,gid values + if config.RemappedRoot != "" { + username, groupname, err := parseRemappedRoot(config.RemappedRoot) + if err != nil { + return nil, err + } + if username == "root" { + // Cannot setup user namespaces with a 1-to-1 mapping; "--root=0:0" is a no-op + // effectively + logrus.Warn("User namespaces: root cannot be remapped with itself; user namespaces are OFF") + return &idtools.IDMappings{}, nil + } + logrus.Infof("User namespaces: ID ranges will be mapped to subuid/subgid ranges of: %s:%s", username, groupname) + // update remapped root setting now that we have resolved them to actual names + config.RemappedRoot = fmt.Sprintf("%s:%s", username, groupname) + + mappings, err := idtools.NewIDMappings(username, groupname) + if err != nil { + return nil, errors.Wrapf(err, "Can't create ID mappings: %v") + } + return mappings, nil + } + return &idtools.IDMappings{}, nil +} + +func setupDaemonRoot(config *config.Config, rootDir string, rootIDs idtools.IDPair) error { + config.Root = rootDir + // the docker root metadata directory needs to have execute permissions for all users (g+x,o+x) + // so that syscalls executing as non-root, operating on subdirectories of the graph root + // (e.g. mounted layers of a container) can traverse this path. + // The user namespace support will create subdirectories for the remapped root host uid:gid + // pair owned by that same uid:gid pair for proper write access to those needed metadata and + // layer content subtrees. + if _, err := os.Stat(rootDir); err == nil { + // root current exists; verify the access bits are correct by setting them + if err = os.Chmod(rootDir, 0711); err != nil { + return err + } + } else if os.IsNotExist(err) { + // no root exists yet, create it 0711 with root:root ownership + if err := os.MkdirAll(rootDir, 0711); err != nil { + return err + } + } + + // if user namespaces are enabled we will create a subtree underneath the specified root + // with any/all specified remapped root uid/gid options on the daemon creating + // a new subdirectory with ownership set to the remapped uid/gid (so as to allow + // `chdir()` to work for containers namespaced to that uid/gid) + if config.RemappedRoot != "" { + config.Root = filepath.Join(rootDir, fmt.Sprintf("%d.%d", rootIDs.UID, rootIDs.GID)) + logrus.Debugf("Creating user namespaced daemon root: %s", config.Root) + // Create the root directory if it doesn't exist + if err := idtools.MkdirAllAndChown(config.Root, 0700, rootIDs); err != nil { + return fmt.Errorf("Cannot create daemon root: %s: %v", config.Root, err) + } + // we also need to verify that any pre-existing directories in the path to + // the graphroot won't block access to remapped root--if any pre-existing directory + // has strict permissions that don't allow "x", container start will fail, so + // better to warn and fail now + dirPath := config.Root + for { + dirPath = filepath.Dir(dirPath) + if dirPath == "/" { + break + } + if !idtools.CanAccess(dirPath, rootIDs) { + return fmt.Errorf("A subdirectory in your graphroot path (%s) restricts access to the remapped root uid/gid; please fix by allowing 'o+x' permissions on existing directories.", config.Root) + } + } + } + return nil +} + +// registerLinks writes the links to a file. +func (daemon *Daemon) registerLinks(container *container.Container, hostConfig *containertypes.HostConfig) error { + if hostConfig == nil || hostConfig.NetworkMode.IsUserDefined() { + return nil + } + + for _, l := range hostConfig.Links { + name, alias, err := opts.ParseLink(l) + if err != nil { + return err + } + child, err := daemon.GetContainer(name) + if err != nil { + return fmt.Errorf("Could not get container for %s", name) + } + for child.HostConfig.NetworkMode.IsContainer() { + parts := strings.SplitN(string(child.HostConfig.NetworkMode), ":", 2) + child, err = daemon.GetContainer(parts[1]) + if err != nil { + return fmt.Errorf("Could not get container for %s", parts[1]) + } + } + if child.HostConfig.NetworkMode.IsHost() { + return runconfig.ErrConflictHostNetworkAndLinks + } + if err := daemon.registerLink(container, child, alias); err != nil { + return err + } + } + + // After we load all the links into the daemon + // set them to nil on the hostconfig + _, err := container.WriteHostConfig() + return err +} + +// conditionalMountOnStart is a platform specific helper function during the +// container start to call mount. +func (daemon *Daemon) conditionalMountOnStart(container *container.Container) error { + return daemon.Mount(container) +} + +// conditionalUnmountOnCleanup is a platform specific helper function called +// during the cleanup of a container to unmount. +func (daemon *Daemon) conditionalUnmountOnCleanup(container *container.Container) error { + return daemon.Unmount(container) +} + +func (daemon *Daemon) stats(c *container.Container) (*types.StatsJSON, error) { + if !c.IsRunning() { + return nil, errNotRunning{c.ID} + } + stats, err := daemon.containerd.Stats(c.ID) + if err != nil { + if strings.Contains(err.Error(), "container not found") { + return nil, errNotFound{c.ID} + } + return nil, err + } + s := &types.StatsJSON{} + cgs := stats.CgroupStats + if cgs != nil { + s.BlkioStats = types.BlkioStats{ + IoServiceBytesRecursive: copyBlkioEntry(cgs.BlkioStats.IoServiceBytesRecursive), + IoServicedRecursive: copyBlkioEntry(cgs.BlkioStats.IoServicedRecursive), + IoQueuedRecursive: copyBlkioEntry(cgs.BlkioStats.IoQueuedRecursive), + IoServiceTimeRecursive: copyBlkioEntry(cgs.BlkioStats.IoServiceTimeRecursive), + IoWaitTimeRecursive: copyBlkioEntry(cgs.BlkioStats.IoWaitTimeRecursive), + IoMergedRecursive: copyBlkioEntry(cgs.BlkioStats.IoMergedRecursive), + IoTimeRecursive: copyBlkioEntry(cgs.BlkioStats.IoTimeRecursive), + SectorsRecursive: copyBlkioEntry(cgs.BlkioStats.SectorsRecursive), + } + cpu := cgs.CpuStats + s.CPUStats = types.CPUStats{ + CPUUsage: types.CPUUsage{ + TotalUsage: cpu.CpuUsage.TotalUsage, + PercpuUsage: cpu.CpuUsage.PercpuUsage, + UsageInKernelmode: cpu.CpuUsage.UsageInKernelmode, + UsageInUsermode: cpu.CpuUsage.UsageInUsermode, + }, + ThrottlingData: types.ThrottlingData{ + Periods: cpu.ThrottlingData.Periods, + ThrottledPeriods: cpu.ThrottlingData.ThrottledPeriods, + ThrottledTime: cpu.ThrottlingData.ThrottledTime, + }, + } + mem := cgs.MemoryStats.Usage + s.MemoryStats = types.MemoryStats{ + Usage: mem.Usage, + MaxUsage: mem.MaxUsage, + Stats: cgs.MemoryStats.Stats, + Failcnt: mem.Failcnt, + Limit: mem.Limit, + } + // if the container does not set memory limit, use the machineMemory + if mem.Limit > daemon.machineMemory && daemon.machineMemory > 0 { + s.MemoryStats.Limit = daemon.machineMemory + } + if cgs.PidsStats != nil { + s.PidsStats = types.PidsStats{ + Current: cgs.PidsStats.Current, + } + } + } + s.Read, err = ptypes.Timestamp(stats.Timestamp) + if err != nil { + return nil, err + } + return s, nil +} + +// setDefaultIsolation determines the default isolation mode for the +// daemon to run in. This is only applicable on Windows +func (daemon *Daemon) setDefaultIsolation() error { + return nil +} + +func rootFSToAPIType(rootfs *image.RootFS) types.RootFS { + var layers []string + for _, l := range rootfs.DiffIDs { + layers = append(layers, l.String()) + } + return types.RootFS{ + Type: rootfs.Type, + Layers: layers, + } +} + +// setupDaemonProcess sets various settings for the daemon's process +func setupDaemonProcess(config *config.Config) error { + // setup the daemons oom_score_adj + return setupOOMScoreAdj(config.OOMScoreAdjust) +} + +func setupOOMScoreAdj(score int) error { + f, err := os.OpenFile("/proc/self/oom_score_adj", os.O_WRONLY, 0) + if err != nil { + return err + } + defer f.Close() + stringScore := strconv.Itoa(score) + _, err = f.WriteString(stringScore) + if os.IsPermission(err) { + // Setting oom_score_adj does not work in an + // unprivileged container. Ignore the error, but log + // it if we appear not to be in that situation. + if !rsystem.RunningInUserNS() { + logrus.Debugf("Permission denied writing %q to /proc/self/oom_score_adj", stringScore) + } + return nil + } + + return err +} + +func (daemon *Daemon) initCgroupsPath(path string) error { + if path == "/" || path == "." { + return nil + } + + if daemon.configStore.CPURealtimePeriod == 0 && daemon.configStore.CPURealtimeRuntime == 0 { + return nil + } + + // Recursively create cgroup to ensure that the system and all parent cgroups have values set + // for the period and runtime as this limits what the children can be set to. + daemon.initCgroupsPath(filepath.Dir(path)) + + mnt, root, err := cgroups.FindCgroupMountpointAndRoot("cpu") + if err != nil { + return err + } + // When docker is run inside docker, the root is based of the host cgroup. + // Should this be handled in runc/libcontainer/cgroups ? + if strings.HasPrefix(root, "/docker/") { + root = "/" + } + + path = filepath.Join(mnt, root, path) + sysinfo := sysinfo.New(true) + if err := maybeCreateCPURealTimeFile(sysinfo.CPURealtimePeriod, daemon.configStore.CPURealtimePeriod, "cpu.rt_period_us", path); err != nil { + return err + } + if err := maybeCreateCPURealTimeFile(sysinfo.CPURealtimeRuntime, daemon.configStore.CPURealtimeRuntime, "cpu.rt_runtime_us", path); err != nil { + return err + } + return nil +} + +func maybeCreateCPURealTimeFile(sysinfoPresent bool, configValue int64, file string, path string) error { + if sysinfoPresent && configValue != 0 { + if err := os.MkdirAll(path, 0755); err != nil && !os.IsExist(err) { + return err + } + if err := ioutil.WriteFile(filepath.Join(path, file), []byte(strconv.FormatInt(configValue, 10)), 0700); err != nil { + return err + } + } + return nil +} + +func (daemon *Daemon) setupSeccompProfile() error { + if daemon.configStore.SeccompProfile != "" { + daemon.seccompProfilePath = daemon.configStore.SeccompProfile + b, err := ioutil.ReadFile(daemon.configStore.SeccompProfile) + if err != nil { + return fmt.Errorf("opening seccomp profile (%s) failed: %v", daemon.configStore.SeccompProfile, err) + } + daemon.seccompProfile = b + } + return nil +} diff --git a/vendor/github.com/moby/moby/daemon/daemon_unix_test.go b/vendor/github.com/moby/moby/daemon/daemon_unix_test.go new file mode 100644 index 000000000..c3aa443e4 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/daemon_unix_test.go @@ -0,0 +1,318 @@ +// +build !windows,!solaris + +package daemon + +import ( + "io/ioutil" + "os" + "path/filepath" + "testing" + + containertypes "github.com/docker/docker/api/types/container" + "github.com/docker/docker/container" + "github.com/docker/docker/daemon/config" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/volume" + "github.com/docker/docker/volume/drivers" + "github.com/docker/docker/volume/local" + "github.com/docker/docker/volume/store" +) + +// Unix test as uses settings which are not available on Windows +func TestAdjustCPUShares(t *testing.T) { + tmp, err := ioutil.TempDir("", "docker-daemon-unix-test-") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmp) + daemon := &Daemon{ + repository: tmp, + root: tmp, + } + + hostConfig := &containertypes.HostConfig{ + Resources: containertypes.Resources{CPUShares: linuxMinCPUShares - 1}, + } + daemon.adaptContainerSettings(hostConfig, true) + if hostConfig.CPUShares != linuxMinCPUShares { + t.Errorf("Expected CPUShares to be %d", linuxMinCPUShares) + } + + hostConfig.CPUShares = linuxMaxCPUShares + 1 + daemon.adaptContainerSettings(hostConfig, true) + if hostConfig.CPUShares != linuxMaxCPUShares { + t.Errorf("Expected CPUShares to be %d", linuxMaxCPUShares) + } + + hostConfig.CPUShares = 0 + daemon.adaptContainerSettings(hostConfig, true) + if hostConfig.CPUShares != 0 { + t.Error("Expected CPUShares to be unchanged") + } + + hostConfig.CPUShares = 1024 + daemon.adaptContainerSettings(hostConfig, true) + if hostConfig.CPUShares != 1024 { + t.Error("Expected CPUShares to be unchanged") + } +} + +// Unix test as uses settings which are not available on Windows +func TestAdjustCPUSharesNoAdjustment(t *testing.T) { + tmp, err := ioutil.TempDir("", "docker-daemon-unix-test-") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmp) + daemon := &Daemon{ + repository: tmp, + root: tmp, + } + + hostConfig := &containertypes.HostConfig{ + Resources: containertypes.Resources{CPUShares: linuxMinCPUShares - 1}, + } + daemon.adaptContainerSettings(hostConfig, false) + if hostConfig.CPUShares != linuxMinCPUShares-1 { + t.Errorf("Expected CPUShares to be %d", linuxMinCPUShares-1) + } + + hostConfig.CPUShares = linuxMaxCPUShares + 1 + daemon.adaptContainerSettings(hostConfig, false) + if hostConfig.CPUShares != linuxMaxCPUShares+1 { + t.Errorf("Expected CPUShares to be %d", linuxMaxCPUShares+1) + } + + hostConfig.CPUShares = 0 + daemon.adaptContainerSettings(hostConfig, false) + if hostConfig.CPUShares != 0 { + t.Error("Expected CPUShares to be unchanged") + } + + hostConfig.CPUShares = 1024 + daemon.adaptContainerSettings(hostConfig, false) + if hostConfig.CPUShares != 1024 { + t.Error("Expected CPUShares to be unchanged") + } +} + +// Unix test as uses settings which are not available on Windows +func TestParseSecurityOptWithDeprecatedColon(t *testing.T) { + container := &container.Container{} + config := &containertypes.HostConfig{} + + // test apparmor + config.SecurityOpt = []string{"apparmor=test_profile"} + if err := parseSecurityOpt(container, config); err != nil { + t.Fatalf("Unexpected parseSecurityOpt error: %v", err) + } + if container.AppArmorProfile != "test_profile" { + t.Fatalf("Unexpected AppArmorProfile, expected: \"test_profile\", got %q", container.AppArmorProfile) + } + + // test seccomp + sp := "/path/to/seccomp_test.json" + config.SecurityOpt = []string{"seccomp=" + sp} + if err := parseSecurityOpt(container, config); err != nil { + t.Fatalf("Unexpected parseSecurityOpt error: %v", err) + } + if container.SeccompProfile != sp { + t.Fatalf("Unexpected AppArmorProfile, expected: %q, got %q", sp, container.SeccompProfile) + } + + // test valid label + config.SecurityOpt = []string{"label=user:USER"} + if err := parseSecurityOpt(container, config); err != nil { + t.Fatalf("Unexpected parseSecurityOpt error: %v", err) + } + + // test invalid label + config.SecurityOpt = []string{"label"} + if err := parseSecurityOpt(container, config); err == nil { + t.Fatal("Expected parseSecurityOpt error, got nil") + } + + // test invalid opt + config.SecurityOpt = []string{"test"} + if err := parseSecurityOpt(container, config); err == nil { + t.Fatal("Expected parseSecurityOpt error, got nil") + } +} + +func TestParseSecurityOpt(t *testing.T) { + container := &container.Container{} + config := &containertypes.HostConfig{} + + // test apparmor + config.SecurityOpt = []string{"apparmor=test_profile"} + if err := parseSecurityOpt(container, config); err != nil { + t.Fatalf("Unexpected parseSecurityOpt error: %v", err) + } + if container.AppArmorProfile != "test_profile" { + t.Fatalf("Unexpected AppArmorProfile, expected: \"test_profile\", got %q", container.AppArmorProfile) + } + + // test seccomp + sp := "/path/to/seccomp_test.json" + config.SecurityOpt = []string{"seccomp=" + sp} + if err := parseSecurityOpt(container, config); err != nil { + t.Fatalf("Unexpected parseSecurityOpt error: %v", err) + } + if container.SeccompProfile != sp { + t.Fatalf("Unexpected SeccompProfile, expected: %q, got %q", sp, container.SeccompProfile) + } + + // test valid label + config.SecurityOpt = []string{"label=user:USER"} + if err := parseSecurityOpt(container, config); err != nil { + t.Fatalf("Unexpected parseSecurityOpt error: %v", err) + } + + // test invalid label + config.SecurityOpt = []string{"label"} + if err := parseSecurityOpt(container, config); err == nil { + t.Fatal("Expected parseSecurityOpt error, got nil") + } + + // test invalid opt + config.SecurityOpt = []string{"test"} + if err := parseSecurityOpt(container, config); err == nil { + t.Fatal("Expected parseSecurityOpt error, got nil") + } +} + +func TestParseNNPSecurityOptions(t *testing.T) { + daemon := &Daemon{ + configStore: &config.Config{NoNewPrivileges: true}, + } + container := &container.Container{} + config := &containertypes.HostConfig{} + + // test NNP when "daemon:true" and "no-new-privileges=false"" + config.SecurityOpt = []string{"no-new-privileges=false"} + + if err := daemon.parseSecurityOpt(container, config); err != nil { + t.Fatalf("Unexpected daemon.parseSecurityOpt error: %v", err) + } + if container.NoNewPrivileges { + t.Fatalf("container.NoNewPrivileges should be FALSE: %v", container.NoNewPrivileges) + } + + // test NNP when "daemon:false" and "no-new-privileges=true"" + daemon.configStore.NoNewPrivileges = false + config.SecurityOpt = []string{"no-new-privileges=true"} + + if err := daemon.parseSecurityOpt(container, config); err != nil { + t.Fatalf("Unexpected daemon.parseSecurityOpt error: %v", err) + } + if !container.NoNewPrivileges { + t.Fatalf("container.NoNewPrivileges should be TRUE: %v", container.NoNewPrivileges) + } +} + +func TestNetworkOptions(t *testing.T) { + daemon := &Daemon{} + dconfigCorrect := &config.Config{ + CommonConfig: config.CommonConfig{ + ClusterStore: "consul://localhost:8500", + ClusterAdvertise: "192.168.0.1:8000", + }, + } + + if _, err := daemon.networkOptions(dconfigCorrect, nil, nil); err != nil { + t.Fatalf("Expect networkOptions success, got error: %v", err) + } + + dconfigWrong := &config.Config{ + CommonConfig: config.CommonConfig{ + ClusterStore: "consul://localhost:8500://test://bbb", + }, + } + + if _, err := daemon.networkOptions(dconfigWrong, nil, nil); err == nil { + t.Fatal("Expected networkOptions error, got nil") + } +} + +func TestMigratePre17Volumes(t *testing.T) { + rootDir, err := ioutil.TempDir("", "test-daemon-volumes") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(rootDir) + + volumeRoot := filepath.Join(rootDir, "volumes") + err = os.MkdirAll(volumeRoot, 0755) + if err != nil { + t.Fatal(err) + } + + containerRoot := filepath.Join(rootDir, "containers") + cid := "1234" + err = os.MkdirAll(filepath.Join(containerRoot, cid), 0755) + + vid := "5678" + vfsPath := filepath.Join(rootDir, "vfs", "dir", vid) + err = os.MkdirAll(vfsPath, 0755) + if err != nil { + t.Fatal(err) + } + + config := []byte(` + { + "ID": "` + cid + `", + "Volumes": { + "/foo": "` + vfsPath + `", + "/bar": "/foo", + "/quux": "/quux" + }, + "VolumesRW": { + "/foo": true, + "/bar": true, + "/quux": false + } + } + `) + + volStore, err := store.New(volumeRoot) + if err != nil { + t.Fatal(err) + } + drv, err := local.New(volumeRoot, idtools.IDPair{UID: 0, GID: 0}) + if err != nil { + t.Fatal(err) + } + volumedrivers.Register(drv, volume.DefaultDriverName) + + daemon := &Daemon{ + root: rootDir, + repository: containerRoot, + volumes: volStore, + } + err = ioutil.WriteFile(filepath.Join(containerRoot, cid, "config.v2.json"), config, 600) + if err != nil { + t.Fatal(err) + } + c, err := daemon.load(cid) + if err != nil { + t.Fatal(err) + } + if err := daemon.verifyVolumesInfo(c); err != nil { + t.Fatal(err) + } + + expected := map[string]volume.MountPoint{ + "/foo": {Destination: "/foo", RW: true, Name: vid}, + "/bar": {Source: "/foo", Destination: "/bar", RW: true}, + "/quux": {Source: "/quux", Destination: "/quux", RW: false}, + } + for id, mp := range c.MountPoints { + x, exists := expected[id] + if !exists { + t.Fatal("volume not migrated") + } + if mp.Source != x.Source || mp.Destination != x.Destination || mp.RW != x.RW || mp.Name != x.Name { + t.Fatalf("got unexpected mountpoint, expected: %+v, got: %+v", x, mp) + } + } +} diff --git a/vendor/github.com/moby/moby/daemon/daemon_unsupported.go b/vendor/github.com/moby/moby/daemon/daemon_unsupported.go new file mode 100644 index 000000000..cb1acf63d --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/daemon_unsupported.go @@ -0,0 +1,5 @@ +// +build !linux,!freebsd,!windows,!solaris + +package daemon + +const platformSupported = false diff --git a/vendor/github.com/moby/moby/daemon/daemon_windows.go b/vendor/github.com/moby/moby/daemon/daemon_windows.go new file mode 100644 index 000000000..798ba3997 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/daemon_windows.go @@ -0,0 +1,657 @@ +package daemon + +import ( + "fmt" + "os" + "path/filepath" + "strings" + "syscall" + + "github.com/Microsoft/hcsshim" + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/types" + containertypes "github.com/docker/docker/api/types/container" + "github.com/docker/docker/container" + "github.com/docker/docker/daemon/config" + "github.com/docker/docker/image" + "github.com/docker/docker/pkg/fileutils" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/parsers" + "github.com/docker/docker/pkg/platform" + "github.com/docker/docker/pkg/sysinfo" + "github.com/docker/docker/pkg/system" + "github.com/docker/docker/runconfig" + "github.com/docker/libnetwork" + nwconfig "github.com/docker/libnetwork/config" + "github.com/docker/libnetwork/datastore" + winlibnetwork "github.com/docker/libnetwork/drivers/windows" + "github.com/docker/libnetwork/netlabel" + "github.com/docker/libnetwork/options" + blkiodev "github.com/opencontainers/runc/libcontainer/configs" + "golang.org/x/sys/windows" +) + +const ( + defaultNetworkSpace = "172.16.0.0/12" + platformSupported = true + windowsMinCPUShares = 1 + windowsMaxCPUShares = 10000 + windowsMinCPUPercent = 1 + windowsMaxCPUPercent = 100 + windowsMinCPUCount = 1 + + errInvalidState = syscall.Errno(0x139F) +) + +// Windows has no concept of an execution state directory. So use config.Root here. +func getPluginExecRoot(root string) string { + return filepath.Join(root, "plugins") +} + +func getBlkioWeightDevices(config *containertypes.HostConfig) ([]blkiodev.WeightDevice, error) { + return nil, nil +} + +func (daemon *Daemon) parseSecurityOpt(container *container.Container, hostConfig *containertypes.HostConfig) error { + return parseSecurityOpt(container, hostConfig) +} + +func parseSecurityOpt(container *container.Container, config *containertypes.HostConfig) error { + return nil +} + +func getBlkioReadIOpsDevices(config *containertypes.HostConfig) ([]blkiodev.ThrottleDevice, error) { + return nil, nil +} + +func getBlkioWriteIOpsDevices(config *containertypes.HostConfig) ([]blkiodev.ThrottleDevice, error) { + return nil, nil +} + +func getBlkioReadBpsDevices(config *containertypes.HostConfig) ([]blkiodev.ThrottleDevice, error) { + return nil, nil +} + +func getBlkioWriteBpsDevices(config *containertypes.HostConfig) ([]blkiodev.ThrottleDevice, error) { + return nil, nil +} + +func (daemon *Daemon) getLayerInit() func(string) error { + return nil +} + +func checkKernel() error { + return nil +} + +func (daemon *Daemon) getCgroupDriver() string { + return "" +} + +// adaptContainerSettings is called during container creation to modify any +// settings necessary in the HostConfig structure. +func (daemon *Daemon) adaptContainerSettings(hostConfig *containertypes.HostConfig, adjustCPUShares bool) error { + if hostConfig == nil { + return nil + } + + return nil +} + +func verifyContainerResources(resources *containertypes.Resources, isHyperv bool) ([]string, error) { + warnings := []string{} + fixMemorySwappiness(resources) + if !isHyperv { + // The processor resource controls are mutually exclusive on + // Windows Server Containers, the order of precedence is + // CPUCount first, then CPUShares, and CPUPercent last. + if resources.CPUCount > 0 { + if resources.CPUShares > 0 { + warnings = append(warnings, "Conflicting options: CPU count takes priority over CPU shares on Windows Server Containers. CPU shares discarded") + logrus.Warn("Conflicting options: CPU count takes priority over CPU shares on Windows Server Containers. CPU shares discarded") + resources.CPUShares = 0 + } + if resources.CPUPercent > 0 { + warnings = append(warnings, "Conflicting options: CPU count takes priority over CPU percent on Windows Server Containers. CPU percent discarded") + logrus.Warn("Conflicting options: CPU count takes priority over CPU percent on Windows Server Containers. CPU percent discarded") + resources.CPUPercent = 0 + } + } else if resources.CPUShares > 0 { + if resources.CPUPercent > 0 { + warnings = append(warnings, "Conflicting options: CPU shares takes priority over CPU percent on Windows Server Containers. CPU percent discarded") + logrus.Warn("Conflicting options: CPU shares takes priority over CPU percent on Windows Server Containers. CPU percent discarded") + resources.CPUPercent = 0 + } + } + } + + if resources.CPUShares < 0 || resources.CPUShares > windowsMaxCPUShares { + return warnings, fmt.Errorf("range of CPUShares is from %d to %d", windowsMinCPUShares, windowsMaxCPUShares) + } + if resources.CPUPercent < 0 || resources.CPUPercent > windowsMaxCPUPercent { + return warnings, fmt.Errorf("range of CPUPercent is from %d to %d", windowsMinCPUPercent, windowsMaxCPUPercent) + } + if resources.CPUCount < 0 { + return warnings, fmt.Errorf("invalid CPUCount: CPUCount cannot be negative") + } + + if resources.NanoCPUs > 0 && resources.CPUPercent > 0 { + return warnings, fmt.Errorf("conflicting options: Nano CPUs and CPU Percent cannot both be set") + } + if resources.NanoCPUs > 0 && resources.CPUShares > 0 { + return warnings, fmt.Errorf("conflicting options: Nano CPUs and CPU Shares cannot both be set") + } + // The precision we could get is 0.01, because on Windows we have to convert to CPUPercent. + // We don't set the lower limit here and it is up to the underlying platform (e.g., Windows) to return an error. + if resources.NanoCPUs < 0 || resources.NanoCPUs > int64(sysinfo.NumCPU())*1e9 { + return warnings, fmt.Errorf("range of CPUs is from 0.01 to %d.00, as there are only %d CPUs available", sysinfo.NumCPU(), sysinfo.NumCPU()) + } + + osv := system.GetOSVersion() + if resources.NanoCPUs > 0 && isHyperv && osv.Build < 16175 { + leftoverNanoCPUs := resources.NanoCPUs % 1e9 + if leftoverNanoCPUs != 0 && resources.NanoCPUs > 1e9 { + resources.NanoCPUs = ((resources.NanoCPUs + 1e9/2) / 1e9) * 1e9 + warningString := fmt.Sprintf("Your current OS version does not support Hyper-V containers with NanoCPUs greater than 1000000000 but not divisible by 1000000000. NanoCPUs rounded to %d", resources.NanoCPUs) + warnings = append(warnings, warningString) + logrus.Warn(warningString) + } + } + + if len(resources.BlkioDeviceReadBps) > 0 { + return warnings, fmt.Errorf("invalid option: Windows does not support BlkioDeviceReadBps") + } + if len(resources.BlkioDeviceReadIOps) > 0 { + return warnings, fmt.Errorf("invalid option: Windows does not support BlkioDeviceReadIOps") + } + if len(resources.BlkioDeviceWriteBps) > 0 { + return warnings, fmt.Errorf("invalid option: Windows does not support BlkioDeviceWriteBps") + } + if len(resources.BlkioDeviceWriteIOps) > 0 { + return warnings, fmt.Errorf("invalid option: Windows does not support BlkioDeviceWriteIOps") + } + if resources.BlkioWeight > 0 { + return warnings, fmt.Errorf("invalid option: Windows does not support BlkioWeight") + } + if len(resources.BlkioWeightDevice) > 0 { + return warnings, fmt.Errorf("invalid option: Windows does not support BlkioWeightDevice") + } + if resources.CgroupParent != "" { + return warnings, fmt.Errorf("invalid option: Windows does not support CgroupParent") + } + if resources.CPUPeriod != 0 { + return warnings, fmt.Errorf("invalid option: Windows does not support CPUPeriod") + } + if resources.CpusetCpus != "" { + return warnings, fmt.Errorf("invalid option: Windows does not support CpusetCpus") + } + if resources.CpusetMems != "" { + return warnings, fmt.Errorf("invalid option: Windows does not support CpusetMems") + } + if resources.KernelMemory != 0 { + return warnings, fmt.Errorf("invalid option: Windows does not support KernelMemory") + } + if resources.MemoryReservation != 0 { + return warnings, fmt.Errorf("invalid option: Windows does not support MemoryReservation") + } + if resources.MemorySwap != 0 { + return warnings, fmt.Errorf("invalid option: Windows does not support MemorySwap") + } + if resources.MemorySwappiness != nil { + return warnings, fmt.Errorf("invalid option: Windows does not support MemorySwappiness") + } + if resources.OomKillDisable != nil && *resources.OomKillDisable { + return warnings, fmt.Errorf("invalid option: Windows does not support OomKillDisable") + } + if resources.PidsLimit != 0 { + return warnings, fmt.Errorf("invalid option: Windows does not support PidsLimit") + } + if len(resources.Ulimits) != 0 { + return warnings, fmt.Errorf("invalid option: Windows does not support Ulimits") + } + return warnings, nil +} + +// verifyPlatformContainerSettings performs platform-specific validation of the +// hostconfig and config structures. +func verifyPlatformContainerSettings(daemon *Daemon, hostConfig *containertypes.HostConfig, config *containertypes.Config, update bool) ([]string, error) { + warnings := []string{} + + hyperv := daemon.runAsHyperVContainer(hostConfig) + if !hyperv && system.IsWindowsClient() && !system.IsIoTCore() { + // @engine maintainers. This block should not be removed. It partially enforces licensing + // restrictions on Windows. Ping @jhowardmsft if there are concerns or PRs to change this. + return warnings, fmt.Errorf("Windows client operating systems only support Hyper-V containers") + } + + w, err := verifyContainerResources(&hostConfig.Resources, hyperv) + warnings = append(warnings, w...) + return warnings, err +} + +// reloadPlatform updates configuration with platform specific options +// and updates the passed attributes +func (daemon *Daemon) reloadPlatform(config *config.Config, attributes map[string]string) { +} + +// verifyDaemonSettings performs validation of daemon config struct +func verifyDaemonSettings(config *config.Config) error { + return nil +} + +// checkSystem validates platform-specific requirements +func checkSystem() error { + // Validate the OS version. Note that docker.exe must be manifested for this + // call to return the correct version. + osv := system.GetOSVersion() + if osv.MajorVersion < 10 { + return fmt.Errorf("This version of Windows does not support the docker daemon") + } + if osv.Build < 14393 { + return fmt.Errorf("The docker daemon requires build 14393 or later of Windows Server 2016 or Windows 10") + } + + vmcompute := windows.NewLazySystemDLL("vmcompute.dll") + if vmcompute.Load() != nil { + return fmt.Errorf("Failed to load vmcompute.dll. Ensure that the Containers role is installed.") + } + + return nil +} + +// configureKernelSecuritySupport configures and validate security support for the kernel +func configureKernelSecuritySupport(config *config.Config, driverNames []string) error { + return nil +} + +// configureMaxThreads sets the Go runtime max threads threshold +func configureMaxThreads(config *config.Config) error { + return nil +} + +func (daemon *Daemon) initNetworkController(config *config.Config, activeSandboxes map[string]interface{}) (libnetwork.NetworkController, error) { + netOptions, err := daemon.networkOptions(config, nil, nil) + if err != nil { + return nil, err + } + controller, err := libnetwork.New(netOptions...) + if err != nil { + return nil, fmt.Errorf("error obtaining controller instance: %v", err) + } + + hnsresponse, err := hcsshim.HNSListNetworkRequest("GET", "", "") + if err != nil { + return nil, err + } + + // Remove networks not present in HNS + for _, v := range controller.Networks() { + options := v.Info().DriverOptions() + hnsid := options[winlibnetwork.HNSID] + found := false + + for _, v := range hnsresponse { + if v.Id == hnsid { + found = true + break + } + } + + if !found { + // global networks should not be deleted by local HNS + if v.Info().Scope() != datastore.GlobalScope { + err = v.Delete() + if err != nil { + logrus.Errorf("Error occurred when removing network %v", err) + } + } + } + } + + _, err = controller.NewNetwork("null", "none", "", libnetwork.NetworkOptionPersist(false)) + if err != nil { + return nil, err + } + + defaultNetworkExists := false + + if network, err := controller.NetworkByName(runconfig.DefaultDaemonNetworkMode().NetworkName()); err == nil { + options := network.Info().DriverOptions() + for _, v := range hnsresponse { + if options[winlibnetwork.HNSID] == v.Id { + defaultNetworkExists = true + break + } + } + } + + // discover and add HNS networks to windows + // network that exist are removed and added again + for _, v := range hnsresponse { + if strings.ToLower(v.Type) == "private" { + continue // workaround for HNS reporting unsupported networks + } + var n libnetwork.Network + s := func(current libnetwork.Network) bool { + options := current.Info().DriverOptions() + if options[winlibnetwork.HNSID] == v.Id { + n = current + return true + } + return false + } + + controller.WalkNetworks(s) + if n != nil { + // global networks should not be deleted by local HNS + if n.Info().Scope() == datastore.GlobalScope { + continue + } + v.Name = n.Name() + // This will not cause network delete from HNS as the network + // is not yet populated in the libnetwork windows driver + n.Delete() + } + + netOption := map[string]string{ + winlibnetwork.NetworkName: v.Name, + winlibnetwork.HNSID: v.Id, + } + + v4Conf := []*libnetwork.IpamConf{} + for _, subnet := range v.Subnets { + ipamV4Conf := libnetwork.IpamConf{} + ipamV4Conf.PreferredPool = subnet.AddressPrefix + ipamV4Conf.Gateway = subnet.GatewayAddress + v4Conf = append(v4Conf, &ipamV4Conf) + } + + name := v.Name + + // If there is no nat network create one from the first NAT network + // encountered if it doesn't already exist + if !defaultNetworkExists && + runconfig.DefaultDaemonNetworkMode() == containertypes.NetworkMode(strings.ToLower(v.Type)) && + n == nil { + name = runconfig.DefaultDaemonNetworkMode().NetworkName() + defaultNetworkExists = true + } + + v6Conf := []*libnetwork.IpamConf{} + _, err := controller.NewNetwork(strings.ToLower(v.Type), name, "", + libnetwork.NetworkOptionGeneric(options.Generic{ + netlabel.GenericData: netOption, + }), + libnetwork.NetworkOptionIpam("default", "", v4Conf, v6Conf, nil), + ) + + if err != nil { + logrus.Errorf("Error occurred when creating network %v", err) + } + } + + if !config.DisableBridge { + // Initialize default driver "bridge" + if err := initBridgeDriver(controller, config); err != nil { + return nil, err + } + } + + return controller, nil +} + +func initBridgeDriver(controller libnetwork.NetworkController, config *config.Config) error { + if _, err := controller.NetworkByName(runconfig.DefaultDaemonNetworkMode().NetworkName()); err == nil { + return nil + } + + netOption := map[string]string{ + winlibnetwork.NetworkName: runconfig.DefaultDaemonNetworkMode().NetworkName(), + } + + var ipamOption libnetwork.NetworkOption + var subnetPrefix string + + if config.BridgeConfig.FixedCIDR != "" { + subnetPrefix = config.BridgeConfig.FixedCIDR + } else { + // TP5 doesn't support properly detecting subnet + osv := system.GetOSVersion() + if osv.Build < 14360 { + subnetPrefix = defaultNetworkSpace + } + } + + if subnetPrefix != "" { + ipamV4Conf := libnetwork.IpamConf{} + ipamV4Conf.PreferredPool = subnetPrefix + v4Conf := []*libnetwork.IpamConf{&ipamV4Conf} + v6Conf := []*libnetwork.IpamConf{} + ipamOption = libnetwork.NetworkOptionIpam("default", "", v4Conf, v6Conf, nil) + } + + _, err := controller.NewNetwork(string(runconfig.DefaultDaemonNetworkMode()), runconfig.DefaultDaemonNetworkMode().NetworkName(), "", + libnetwork.NetworkOptionGeneric(options.Generic{ + netlabel.GenericData: netOption, + }), + ipamOption, + ) + + if err != nil { + return fmt.Errorf("Error creating default network: %v", err) + } + + return nil +} + +// registerLinks sets up links between containers and writes the +// configuration out for persistence. As of Windows TP4, links are not supported. +func (daemon *Daemon) registerLinks(container *container.Container, hostConfig *containertypes.HostConfig) error { + return nil +} + +func (daemon *Daemon) cleanupMountsByID(in string) error { + return nil +} + +func (daemon *Daemon) cleanupMounts() error { + return nil +} + +func setupRemappedRoot(config *config.Config) (*idtools.IDMappings, error) { + return &idtools.IDMappings{}, nil +} + +func setupDaemonRoot(config *config.Config, rootDir string, rootIDs idtools.IDPair) error { + config.Root = rootDir + // Create the root directory if it doesn't exists + if err := system.MkdirAllWithACL(config.Root, 0, system.SddlAdministratorsLocalSystem); err != nil && !os.IsExist(err) { + return err + } + return nil +} + +// runasHyperVContainer returns true if we are going to run as a Hyper-V container +func (daemon *Daemon) runAsHyperVContainer(hostConfig *containertypes.HostConfig) bool { + if hostConfig.Isolation.IsDefault() { + // Container is set to use the default, so take the default from the daemon configuration + return daemon.defaultIsolation.IsHyperV() + } + + // Container is requesting an isolation mode. Honour it. + return hostConfig.Isolation.IsHyperV() + +} + +// conditionalMountOnStart is a platform specific helper function during the +// container start to call mount. +func (daemon *Daemon) conditionalMountOnStart(container *container.Container) error { + // Bail out now for Linux containers + if system.LCOWSupported() && container.Platform != "windows" { + return nil + } + + // We do not mount if a Hyper-V container + if !daemon.runAsHyperVContainer(container.HostConfig) { + return daemon.Mount(container) + } + return nil +} + +// conditionalUnmountOnCleanup is a platform specific helper function called +// during the cleanup of a container to unmount. +func (daemon *Daemon) conditionalUnmountOnCleanup(container *container.Container) error { + // Bail out now for Linux containers + if system.LCOWSupported() && container.Platform != "windows" { + return nil + } + + // We do not unmount if a Hyper-V container + if !daemon.runAsHyperVContainer(container.HostConfig) { + return daemon.Unmount(container) + } + return nil +} + +func driverOptions(config *config.Config) []nwconfig.Option { + return []nwconfig.Option{} +} + +func (daemon *Daemon) stats(c *container.Container) (*types.StatsJSON, error) { + if !c.IsRunning() { + return nil, errNotRunning{c.ID} + } + + // Obtain the stats from HCS via libcontainerd + stats, err := daemon.containerd.Stats(c.ID) + if err != nil { + if strings.Contains(err.Error(), "container not found") { + return nil, errNotFound{c.ID} + } + return nil, err + } + + // Start with an empty structure + s := &types.StatsJSON{} + + // Populate the CPU/processor statistics + s.CPUStats = types.CPUStats{ + CPUUsage: types.CPUUsage{ + TotalUsage: stats.Processor.TotalRuntime100ns, + UsageInKernelmode: stats.Processor.RuntimeKernel100ns, + UsageInUsermode: stats.Processor.RuntimeKernel100ns, + }, + } + + // Populate the memory statistics + s.MemoryStats = types.MemoryStats{ + Commit: stats.Memory.UsageCommitBytes, + CommitPeak: stats.Memory.UsageCommitPeakBytes, + PrivateWorkingSet: stats.Memory.UsagePrivateWorkingSetBytes, + } + + // Populate the storage statistics + s.StorageStats = types.StorageStats{ + ReadCountNormalized: stats.Storage.ReadCountNormalized, + ReadSizeBytes: stats.Storage.ReadSizeBytes, + WriteCountNormalized: stats.Storage.WriteCountNormalized, + WriteSizeBytes: stats.Storage.WriteSizeBytes, + } + + // Populate the network statistics + s.Networks = make(map[string]types.NetworkStats) + + for _, nstats := range stats.Network { + s.Networks[nstats.EndpointId] = types.NetworkStats{ + RxBytes: nstats.BytesReceived, + RxPackets: nstats.PacketsReceived, + RxDropped: nstats.DroppedPacketsIncoming, + TxBytes: nstats.BytesSent, + TxPackets: nstats.PacketsSent, + TxDropped: nstats.DroppedPacketsOutgoing, + } + } + + // Set the timestamp + s.Stats.Read = stats.Timestamp + s.Stats.NumProcs = platform.NumProcs() + + return s, nil +} + +// setDefaultIsolation determine the default isolation mode for the +// daemon to run in. This is only applicable on Windows +func (daemon *Daemon) setDefaultIsolation() error { + daemon.defaultIsolation = containertypes.Isolation("process") + // On client SKUs, default to Hyper-V. Note that IoT reports as a client SKU + // but it should not be treated as such. + if system.IsWindowsClient() && !system.IsIoTCore() { + daemon.defaultIsolation = containertypes.Isolation("hyperv") + } + for _, option := range daemon.configStore.ExecOptions { + key, val, err := parsers.ParseKeyValueOpt(option) + if err != nil { + return err + } + key = strings.ToLower(key) + switch key { + + case "isolation": + if !containertypes.Isolation(val).IsValid() { + return fmt.Errorf("Invalid exec-opt value for 'isolation':'%s'", val) + } + if containertypes.Isolation(val).IsHyperV() { + daemon.defaultIsolation = containertypes.Isolation("hyperv") + } + if containertypes.Isolation(val).IsProcess() { + if system.IsWindowsClient() && !system.IsIoTCore() { + // @engine maintainers. This block should not be removed. It partially enforces licensing + // restrictions on Windows. Ping @jhowardmsft if there are concerns or PRs to change this. + return fmt.Errorf("Windows client operating systems only support Hyper-V containers") + } + daemon.defaultIsolation = containertypes.Isolation("process") + } + default: + return fmt.Errorf("Unrecognised exec-opt '%s'\n", key) + } + } + + logrus.Infof("Windows default isolation mode: %s", daemon.defaultIsolation) + return nil +} + +func rootFSToAPIType(rootfs *image.RootFS) types.RootFS { + var layers []string + for _, l := range rootfs.DiffIDs { + layers = append(layers, l.String()) + } + return types.RootFS{ + Type: rootfs.Type, + Layers: layers, + } +} + +func setupDaemonProcess(config *config.Config) error { + return nil +} + +// verifyVolumesInfo is a no-op on windows. +// This is called during daemon initialization to migrate volumes from pre-1.7. +// volumes were not supported on windows pre-1.7 +func (daemon *Daemon) verifyVolumesInfo(container *container.Container) error { + return nil +} + +func (daemon *Daemon) setupSeccompProfile() error { + return nil +} + +func getRealPath(path string) (string, error) { + if system.IsIoTCore() { + // Due to https://github.com/golang/go/issues/20506, path expansion + // does not work correctly on the default IoT Core configuration. + // TODO @darrenstahlmsft remove this once golang/go/20506 is fixed + return path, nil + } + return fileutils.ReadSymlinkedDirectory(path) +} diff --git a/vendor/github.com/moby/moby/daemon/debugtrap_unix.go b/vendor/github.com/moby/moby/daemon/debugtrap_unix.go new file mode 100644 index 000000000..39298dfd6 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/debugtrap_unix.go @@ -0,0 +1,27 @@ +// +build !windows + +package daemon + +import ( + "os" + "os/signal" + + "github.com/Sirupsen/logrus" + stackdump "github.com/docker/docker/pkg/signal" + "golang.org/x/sys/unix" +) + +func (d *Daemon) setupDumpStackTrap(root string) { + c := make(chan os.Signal, 1) + signal.Notify(c, unix.SIGUSR1) + go func() { + for range c { + path, err := stackdump.DumpStacks(root) + if err != nil { + logrus.WithError(err).Error("failed to write goroutines dump") + } else { + logrus.Infof("goroutine stacks written to %s", path) + } + } + }() +} diff --git a/vendor/github.com/moby/moby/daemon/debugtrap_unsupported.go b/vendor/github.com/moby/moby/daemon/debugtrap_unsupported.go new file mode 100644 index 000000000..f5b917090 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/debugtrap_unsupported.go @@ -0,0 +1,7 @@ +// +build !linux,!darwin,!freebsd,!windows,!solaris + +package daemon + +func (d *Daemon) setupDumpStackTrap(_ string) { + return +} diff --git a/vendor/github.com/moby/moby/daemon/debugtrap_windows.go b/vendor/github.com/moby/moby/daemon/debugtrap_windows.go new file mode 100644 index 000000000..dfaf95324 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/debugtrap_windows.go @@ -0,0 +1,46 @@ +package daemon + +import ( + "fmt" + "os" + "unsafe" + + winio "github.com/Microsoft/go-winio" + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/signal" + "github.com/docker/docker/pkg/system" + "golang.org/x/sys/windows" +) + +func (d *Daemon) setupDumpStackTrap(root string) { + // Windows does not support signals like *nix systems. So instead of + // trapping on SIGUSR1 to dump stacks, we wait on a Win32 event to be + // signaled. ACL'd to builtin administrators and local system + ev := "Global\\docker-daemon-" + fmt.Sprint(os.Getpid()) + sd, err := winio.SddlToSecurityDescriptor("D:P(A;;GA;;;BA)(A;;GA;;;SY)") + if err != nil { + logrus.Errorf("failed to get security descriptor for debug stackdump event %s: %s", ev, err.Error()) + return + } + var sa windows.SecurityAttributes + sa.Length = uint32(unsafe.Sizeof(sa)) + sa.InheritHandle = 1 + sa.SecurityDescriptor = uintptr(unsafe.Pointer(&sd[0])) + h, err := system.CreateEvent(&sa, false, false, ev) + if h == 0 || err != nil { + logrus.Errorf("failed to create debug stackdump event %s: %s", ev, err.Error()) + return + } + go func() { + logrus.Debugf("Stackdump - waiting signal at %s", ev) + for { + windows.WaitForSingleObject(h, windows.INFINITE) + path, err := signal.DumpStacks(root) + if err != nil { + logrus.WithError(err).Error("failed to write goroutines dump") + } else { + logrus.Infof("goroutine stacks written to %s", path) + } + } + }() +} diff --git a/vendor/github.com/moby/moby/daemon/delete.go b/vendor/github.com/moby/moby/daemon/delete.go new file mode 100644 index 000000000..61ee6eb68 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/delete.go @@ -0,0 +1,171 @@ +package daemon + +import ( + "fmt" + "os" + "path" + "strings" + "time" + + "github.com/Sirupsen/logrus" + apierrors "github.com/docker/docker/api/errors" + "github.com/docker/docker/api/types" + "github.com/docker/docker/container" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/system" + volumestore "github.com/docker/docker/volume/store" + "github.com/pkg/errors" +) + +// ContainerRm removes the container id from the filesystem. An error +// is returned if the container is not found, or if the remove +// fails. If the remove succeeds, the container name is released, and +// network links are removed. +func (daemon *Daemon) ContainerRm(name string, config *types.ContainerRmConfig) error { + start := time.Now() + container, err := daemon.GetContainer(name) + if err != nil { + return err + } + + // Container state RemovalInProgress should be used to avoid races. + if inProgress := container.SetRemovalInProgress(); inProgress { + err := fmt.Errorf("removal of container %s is already in progress", name) + return apierrors.NewBadRequestError(err) + } + defer container.ResetRemovalInProgress() + + // check if container wasn't deregistered by previous rm since Get + if c := daemon.containers.Get(container.ID); c == nil { + return nil + } + + if config.RemoveLink { + return daemon.rmLink(container, name) + } + + err = daemon.cleanupContainer(container, config.ForceRemove, config.RemoveVolume) + containerActions.WithValues("delete").UpdateSince(start) + + return err +} + +func (daemon *Daemon) rmLink(container *container.Container, name string) error { + if name[0] != '/' { + name = "/" + name + } + parent, n := path.Split(name) + if parent == "/" { + return fmt.Errorf("Conflict, cannot remove the default name of the container") + } + + parent = strings.TrimSuffix(parent, "/") + pe, err := daemon.containersReplica.Snapshot().GetID(parent) + if err != nil { + return fmt.Errorf("Cannot get parent %s for name %s", parent, name) + } + + daemon.releaseName(name) + parentContainer, _ := daemon.GetContainer(pe) + if parentContainer != nil { + daemon.linkIndex.unlink(name, container, parentContainer) + if err := daemon.updateNetwork(parentContainer); err != nil { + logrus.Debugf("Could not update network to remove link %s: %v", n, err) + } + } + return nil +} + +// cleanupContainer unregisters a container from the daemon, stops stats +// collection and cleanly removes contents and metadata from the filesystem. +func (daemon *Daemon) cleanupContainer(container *container.Container, forceRemove, removeVolume bool) (err error) { + if container.IsRunning() { + if !forceRemove { + state := container.StateString() + procedure := "Stop the container before attempting removal or force remove" + if state == "paused" { + procedure = "Unpause and then " + strings.ToLower(procedure) + } + err := fmt.Errorf("You cannot remove a %s container %s. %s", state, container.ID, procedure) + return apierrors.NewRequestConflictError(err) + } + if err := daemon.Kill(container); err != nil { + return fmt.Errorf("Could not kill running container %s, cannot remove - %v", container.ID, err) + } + } + + // stop collection of stats for the container regardless + // if stats are currently getting collected. + daemon.statsCollector.StopCollection(container) + + if err = daemon.containerStop(container, 3); err != nil { + return err + } + + // Mark container dead. We don't want anybody to be restarting it. + container.Lock() + container.Dead = true + + // Save container state to disk. So that if error happens before + // container meta file got removed from disk, then a restart of + // docker should not make a dead container alive. + if err := container.CheckpointTo(daemon.containersReplica); err != nil && !os.IsNotExist(err) { + logrus.Errorf("Error saving dying container to disk: %v", err) + } + container.Unlock() + + // When container creation fails and `RWLayer` has not been created yet, we + // do not call `ReleaseRWLayer` + if container.RWLayer != nil { + metadata, err := daemon.stores[container.Platform].layerStore.ReleaseRWLayer(container.RWLayer) + layer.LogReleaseMetadata(metadata) + if err != nil && err != layer.ErrMountDoesNotExist && !os.IsNotExist(errors.Cause(err)) { + return errors.Wrapf(err, "driver %q failed to remove root filesystem for %s", daemon.GraphDriverName(container.Platform), container.ID) + } + } + + if err := system.EnsureRemoveAll(container.Root); err != nil { + return errors.Wrapf(err, "unable to remove filesystem for %s", container.ID) + } + + daemon.linkIndex.delete(container) + selinuxFreeLxcContexts(container.ProcessLabel) + daemon.idIndex.Delete(container.ID) + daemon.containers.Delete(container.ID) + daemon.containersReplica.Delete(container) + if e := daemon.removeMountPoints(container, removeVolume); e != nil { + logrus.Error(e) + } + container.SetRemoved() + stateCtr.del(container.ID) + daemon.LogContainerEvent(container, "destroy") + return nil +} + +// VolumeRm removes the volume with the given name. +// If the volume is referenced by a container it is not removed +// This is called directly from the Engine API +func (daemon *Daemon) VolumeRm(name string, force bool) error { + err := daemon.volumeRm(name) + if err != nil && volumestore.IsInUse(err) { + return apierrors.NewRequestConflictError(err) + } + if err == nil || force { + daemon.volumes.Purge(name) + return nil + } + return err +} + +func (daemon *Daemon) volumeRm(name string) error { + v, err := daemon.volumes.Get(name) + if err != nil { + return err + } + + if err := daemon.volumes.Remove(v); err != nil { + return errors.Wrap(err, "unable to remove volume") + } + daemon.LogVolumeEvent(v.Name(), "destroy", map[string]string{"driver": v.DriverName()}) + return nil +} diff --git a/vendor/github.com/moby/moby/daemon/delete_test.go b/vendor/github.com/moby/moby/daemon/delete_test.go new file mode 100644 index 000000000..f1a979003 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/delete_test.go @@ -0,0 +1,96 @@ +package daemon + +import ( + "fmt" + "io/ioutil" + "os" + "testing" + + "github.com/docker/docker/api/types" + containertypes "github.com/docker/docker/api/types/container" + "github.com/docker/docker/container" + "github.com/docker/docker/pkg/testutil" + "github.com/stretchr/testify/require" +) + +func newDaemonWithTmpRoot(t *testing.T) (*Daemon, func()) { + tmp, err := ioutil.TempDir("", "docker-daemon-unix-test-") + require.NoError(t, err) + d := &Daemon{ + repository: tmp, + root: tmp, + } + d.containers = container.NewMemoryStore() + return d, func() { os.RemoveAll(tmp) } +} + +func newContainerWithState(state *container.State) *container.Container { + return &container.Container{ + ID: "test", + State: state, + Config: &containertypes.Config{}, + } + +} + +// TestContainerDelete tests that a useful error message and instructions is +// given when attempting to remove a container (#30842) +func TestContainerDelete(t *testing.T) { + tt := []struct { + errMsg string + fixMsg string + initContainer func() *container.Container + }{ + // a paused container + { + errMsg: "cannot remove a paused container", + fixMsg: "Unpause and then stop the container before attempting removal or force remove", + initContainer: func() *container.Container { + return newContainerWithState(&container.State{Paused: true, Running: true}) + }}, + // a restarting container + { + errMsg: "cannot remove a restarting container", + fixMsg: "Stop the container before attempting removal or force remove", + initContainer: func() *container.Container { + c := newContainerWithState(container.NewState()) + c.SetRunning(0, true) + c.SetRestarting(&container.ExitStatus{}) + return c + }}, + // a running container + { + errMsg: "cannot remove a running container", + fixMsg: "Stop the container before attempting removal or force remove", + initContainer: func() *container.Container { + return newContainerWithState(&container.State{Running: true}) + }}, + } + + for _, te := range tt { + c := te.initContainer() + d, cleanup := newDaemonWithTmpRoot(t) + defer cleanup() + d.containers.Add(c.ID, c) + + err := d.ContainerRm(c.ID, &types.ContainerRmConfig{ForceRemove: false}) + testutil.ErrorContains(t, err, te.errMsg) + testutil.ErrorContains(t, err, te.fixMsg) + } +} + +func TestContainerDoubleDelete(t *testing.T) { + c := newContainerWithState(container.NewState()) + + // Mark the container as having a delete in progress + c.SetRemovalInProgress() + + d, cleanup := newDaemonWithTmpRoot(t) + defer cleanup() + d.containers.Add(c.ID, c) + + // Try to remove the container when its state is removalInProgress. + // It should return an error indicating it is under removal progress. + err := d.ContainerRm(c.ID, &types.ContainerRmConfig{ForceRemove: true}) + testutil.ErrorContains(t, err, fmt.Sprintf("removal of container %s is already in progress", c.ID)) +} diff --git a/vendor/github.com/moby/moby/daemon/dependency.go b/vendor/github.com/moby/moby/daemon/dependency.go new file mode 100644 index 000000000..83144e686 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/dependency.go @@ -0,0 +1,17 @@ +package daemon + +import ( + "github.com/docker/swarmkit/agent/exec" +) + +// SetContainerDependencyStore sets the dependency store backend for the container +func (daemon *Daemon) SetContainerDependencyStore(name string, store exec.DependencyGetter) error { + c, err := daemon.GetContainer(name) + if err != nil { + return err + } + + c.DependencyStore = store + + return nil +} diff --git a/vendor/github.com/moby/moby/daemon/discovery/discovery.go b/vendor/github.com/moby/moby/daemon/discovery/discovery.go new file mode 100644 index 000000000..509155cbb --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/discovery/discovery.go @@ -0,0 +1,202 @@ +package discovery + +import ( + "errors" + "fmt" + "strconv" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/discovery" + + // Register the libkv backends for discovery. + _ "github.com/docker/docker/pkg/discovery/kv" +) + +const ( + // defaultDiscoveryHeartbeat is the default value for discovery heartbeat interval. + defaultDiscoveryHeartbeat = 20 * time.Second + // defaultDiscoveryTTLFactor is the default TTL factor for discovery + defaultDiscoveryTTLFactor = 3 +) + +// ErrDiscoveryDisabled is an error returned if the discovery is disabled +var ErrDiscoveryDisabled = errors.New("discovery is disabled") + +// Reloader is the discovery reloader of the daemon +type Reloader interface { + discovery.Watcher + Stop() + Reload(backend, address string, clusterOpts map[string]string) error + ReadyCh() <-chan struct{} +} + +type daemonDiscoveryReloader struct { + backend discovery.Backend + ticker *time.Ticker + term chan bool + readyCh chan struct{} +} + +func (d *daemonDiscoveryReloader) Watch(stopCh <-chan struct{}) (<-chan discovery.Entries, <-chan error) { + return d.backend.Watch(stopCh) +} + +func (d *daemonDiscoveryReloader) ReadyCh() <-chan struct{} { + return d.readyCh +} + +func discoveryOpts(clusterOpts map[string]string) (time.Duration, time.Duration, error) { + var ( + heartbeat = defaultDiscoveryHeartbeat + ttl = defaultDiscoveryTTLFactor * defaultDiscoveryHeartbeat + ) + + if hb, ok := clusterOpts["discovery.heartbeat"]; ok { + h, err := strconv.Atoi(hb) + if err != nil { + return time.Duration(0), time.Duration(0), err + } + + if h <= 0 { + return time.Duration(0), time.Duration(0), + fmt.Errorf("discovery.heartbeat must be positive") + } + + heartbeat = time.Duration(h) * time.Second + ttl = defaultDiscoveryTTLFactor * heartbeat + } + + if tstr, ok := clusterOpts["discovery.ttl"]; ok { + t, err := strconv.Atoi(tstr) + if err != nil { + return time.Duration(0), time.Duration(0), err + } + + if t <= 0 { + return time.Duration(0), time.Duration(0), + fmt.Errorf("discovery.ttl must be positive") + } + + ttl = time.Duration(t) * time.Second + + if _, ok := clusterOpts["discovery.heartbeat"]; !ok { + heartbeat = time.Duration(t) * time.Second / time.Duration(defaultDiscoveryTTLFactor) + } + + if ttl <= heartbeat { + return time.Duration(0), time.Duration(0), + fmt.Errorf("discovery.ttl timer must be greater than discovery.heartbeat") + } + } + + return heartbeat, ttl, nil +} + +// Init initializes the nodes discovery subsystem by connecting to the specified backend +// and starts a registration loop to advertise the current node under the specified address. +func Init(backendAddress, advertiseAddress string, clusterOpts map[string]string) (Reloader, error) { + heartbeat, backend, err := parseDiscoveryOptions(backendAddress, clusterOpts) + if err != nil { + return nil, err + } + + reloader := &daemonDiscoveryReloader{ + backend: backend, + ticker: time.NewTicker(heartbeat), + term: make(chan bool), + readyCh: make(chan struct{}), + } + // We call Register() on the discovery backend in a loop for the whole lifetime of the daemon, + // but we never actually Watch() for nodes appearing and disappearing for the moment. + go reloader.advertiseHeartbeat(advertiseAddress) + return reloader, nil +} + +// advertiseHeartbeat registers the current node against the discovery backend using the specified +// address. The function never returns, as registration against the backend comes with a TTL and +// requires regular heartbeats. +func (d *daemonDiscoveryReloader) advertiseHeartbeat(address string) { + var ready bool + if err := d.initHeartbeat(address); err == nil { + ready = true + close(d.readyCh) + } else { + logrus.WithError(err).Debug("First discovery heartbeat failed") + } + + for { + select { + case <-d.ticker.C: + if err := d.backend.Register(address); err != nil { + logrus.Warnf("Registering as %q in discovery failed: %v", address, err) + } else { + if !ready { + close(d.readyCh) + ready = true + } + } + case <-d.term: + return + } + } +} + +// initHeartbeat is used to do the first heartbeat. It uses a tight loop until +// either the timeout period is reached or the heartbeat is successful and returns. +func (d *daemonDiscoveryReloader) initHeartbeat(address string) error { + // Setup a short ticker until the first heartbeat has succeeded + t := time.NewTicker(500 * time.Millisecond) + defer t.Stop() + // timeout makes sure that after a period of time we stop being so aggressive trying to reach the discovery service + timeout := time.After(60 * time.Second) + + for { + select { + case <-timeout: + return errors.New("timeout waiting for initial discovery") + case <-d.term: + return errors.New("terminated") + case <-t.C: + if err := d.backend.Register(address); err == nil { + return nil + } + } + } +} + +// Reload makes the watcher to stop advertising and reconfigures it to advertise in a new address. +func (d *daemonDiscoveryReloader) Reload(backendAddress, advertiseAddress string, clusterOpts map[string]string) error { + d.Stop() + + heartbeat, backend, err := parseDiscoveryOptions(backendAddress, clusterOpts) + if err != nil { + return err + } + + d.backend = backend + d.ticker = time.NewTicker(heartbeat) + d.readyCh = make(chan struct{}) + + go d.advertiseHeartbeat(advertiseAddress) + return nil +} + +// Stop terminates the discovery advertising. +func (d *daemonDiscoveryReloader) Stop() { + d.ticker.Stop() + d.term <- true +} + +func parseDiscoveryOptions(backendAddress string, clusterOpts map[string]string) (time.Duration, discovery.Backend, error) { + heartbeat, ttl, err := discoveryOpts(clusterOpts) + if err != nil { + return 0, nil, err + } + + backend, err := discovery.New(backendAddress, heartbeat, ttl, clusterOpts) + if err != nil { + return 0, nil, err + } + return heartbeat, backend, nil +} diff --git a/vendor/github.com/moby/moby/daemon/discovery/discovery_test.go b/vendor/github.com/moby/moby/daemon/discovery/discovery_test.go new file mode 100644 index 000000000..f084a649a --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/discovery/discovery_test.go @@ -0,0 +1,111 @@ +package discovery + +import ( + "fmt" + "testing" + "time" +) + +func TestDiscoveryOpts(t *testing.T) { + clusterOpts := map[string]string{"discovery.heartbeat": "10", "discovery.ttl": "5"} + heartbeat, ttl, err := discoveryOpts(clusterOpts) + if err == nil { + t.Fatal("discovery.ttl < discovery.heartbeat must fail") + } + + clusterOpts = map[string]string{"discovery.heartbeat": "10", "discovery.ttl": "10"} + heartbeat, ttl, err = discoveryOpts(clusterOpts) + if err == nil { + t.Fatal("discovery.ttl == discovery.heartbeat must fail") + } + + clusterOpts = map[string]string{"discovery.heartbeat": "-10", "discovery.ttl": "10"} + heartbeat, ttl, err = discoveryOpts(clusterOpts) + if err == nil { + t.Fatal("negative discovery.heartbeat must fail") + } + + clusterOpts = map[string]string{"discovery.heartbeat": "10", "discovery.ttl": "-10"} + heartbeat, ttl, err = discoveryOpts(clusterOpts) + if err == nil { + t.Fatal("negative discovery.ttl must fail") + } + + clusterOpts = map[string]string{"discovery.heartbeat": "invalid"} + heartbeat, ttl, err = discoveryOpts(clusterOpts) + if err == nil { + t.Fatal("invalid discovery.heartbeat must fail") + } + + clusterOpts = map[string]string{"discovery.ttl": "invalid"} + heartbeat, ttl, err = discoveryOpts(clusterOpts) + if err == nil { + t.Fatal("invalid discovery.ttl must fail") + } + + clusterOpts = map[string]string{"discovery.heartbeat": "10", "discovery.ttl": "20"} + heartbeat, ttl, err = discoveryOpts(clusterOpts) + if err != nil { + t.Fatal(err) + } + + if heartbeat != 10*time.Second { + t.Fatalf("Heartbeat - Expected : %v, Actual : %v", 10*time.Second, heartbeat) + } + + if ttl != 20*time.Second { + t.Fatalf("TTL - Expected : %v, Actual : %v", 20*time.Second, ttl) + } + + clusterOpts = map[string]string{"discovery.heartbeat": "10"} + heartbeat, ttl, err = discoveryOpts(clusterOpts) + if err != nil { + t.Fatal(err) + } + + if heartbeat != 10*time.Second { + t.Fatalf("Heartbeat - Expected : %v, Actual : %v", 10*time.Second, heartbeat) + } + + expected := 10 * defaultDiscoveryTTLFactor * time.Second + if ttl != expected { + t.Fatalf("TTL - Expected : %v, Actual : %v", expected, ttl) + } + + clusterOpts = map[string]string{"discovery.ttl": "30"} + heartbeat, ttl, err = discoveryOpts(clusterOpts) + if err != nil { + t.Fatal(err) + } + + if ttl != 30*time.Second { + t.Fatalf("TTL - Expected : %v, Actual : %v", 30*time.Second, ttl) + } + + expected = 30 * time.Second / defaultDiscoveryTTLFactor + if heartbeat != expected { + t.Fatalf("Heartbeat - Expected : %v, Actual : %v", expected, heartbeat) + } + + discoveryTTL := fmt.Sprintf("%d", defaultDiscoveryTTLFactor-1) + clusterOpts = map[string]string{"discovery.ttl": discoveryTTL} + heartbeat, ttl, err = discoveryOpts(clusterOpts) + if err == nil && heartbeat == 0 { + t.Fatal("discovery.heartbeat must be positive") + } + + clusterOpts = map[string]string{} + heartbeat, ttl, err = discoveryOpts(clusterOpts) + if err != nil { + t.Fatal(err) + } + + if heartbeat != defaultDiscoveryHeartbeat { + t.Fatalf("Heartbeat - Expected : %v, Actual : %v", defaultDiscoveryHeartbeat, heartbeat) + } + + expected = defaultDiscoveryHeartbeat * defaultDiscoveryTTLFactor + if ttl != expected { + t.Fatalf("TTL - Expected : %v, Actual : %v", expected, ttl) + } +} diff --git a/vendor/github.com/moby/moby/daemon/disk_usage.go b/vendor/github.com/moby/moby/daemon/disk_usage.go new file mode 100644 index 000000000..c64a24330 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/disk_usage.go @@ -0,0 +1,128 @@ +package daemon + +import ( + "fmt" + "sync/atomic" + + "golang.org/x/net/context" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/directory" + "github.com/docker/docker/volume" + "github.com/opencontainers/go-digest" +) + +func (daemon *Daemon) getLayerRefs(platform string) map[layer.ChainID]int { + tmpImages := daemon.stores[platform].imageStore.Map() + layerRefs := map[layer.ChainID]int{} + for id, img := range tmpImages { + dgst := digest.Digest(id) + if len(daemon.stores[platform].referenceStore.References(dgst)) == 0 && len(daemon.stores[platform].imageStore.Children(id)) != 0 { + continue + } + + rootFS := *img.RootFS + rootFS.DiffIDs = nil + for _, id := range img.RootFS.DiffIDs { + rootFS.Append(id) + chid := rootFS.ChainID() + layerRefs[chid]++ + } + } + + return layerRefs +} + +// SystemDiskUsage returns information about the daemon data disk usage +func (daemon *Daemon) SystemDiskUsage(ctx context.Context) (*types.DiskUsage, error) { + if !atomic.CompareAndSwapInt32(&daemon.diskUsageRunning, 0, 1) { + return nil, fmt.Errorf("a disk usage operation is already running") + } + defer atomic.StoreInt32(&daemon.diskUsageRunning, 0) + + // Retrieve container list + allContainers, err := daemon.Containers(&types.ContainerListOptions{ + Size: true, + All: true, + }) + if err != nil { + return nil, fmt.Errorf("failed to retrieve container list: %v", err) + } + + // Get all top images with extra attributes + // TODO @jhowardmsft LCOW. This may need revisiting + allImages, err := daemon.Images(filters.NewArgs(), false, true) + if err != nil { + return nil, fmt.Errorf("failed to retrieve image list: %v", err) + } + + // Get all local volumes + allVolumes := []*types.Volume{} + getLocalVols := func(v volume.Volume) error { + select { + case <-ctx.Done(): + return ctx.Err() + default: + if d, ok := v.(volume.DetailedVolume); ok { + // skip local volumes with mount options since these could have external + // mounted filesystems that will be slow to enumerate. + if len(d.Options()) > 0 { + return nil + } + } + name := v.Name() + refs := daemon.volumes.Refs(v) + + tv := volumeToAPIType(v) + sz, err := directory.Size(v.Path()) + if err != nil { + logrus.Warnf("failed to determine size of volume %v", name) + sz = -1 + } + tv.UsageData = &types.VolumeUsageData{Size: sz, RefCount: int64(len(refs))} + allVolumes = append(allVolumes, tv) + } + + return nil + } + + err = daemon.traverseLocalVolumes(getLocalVols) + if err != nil { + return nil, err + } + + // Get total layers size on disk + var allLayersSize int64 + for platform := range daemon.stores { + layerRefs := daemon.getLayerRefs(platform) + allLayers := daemon.stores[platform].layerStore.Map() + var allLayersSize int64 + for _, l := range allLayers { + select { + case <-ctx.Done(): + return nil, ctx.Err() + default: + size, err := l.DiffSize() + if err == nil { + if _, ok := layerRefs[l.ChainID()]; ok { + allLayersSize += size + } else { + logrus.Warnf("found leaked image layer %v platform %s", l.ChainID(), platform) + } + } else { + logrus.Warnf("failed to get diff size for layer %v %s", l.ChainID(), platform) + } + } + } + } + + return &types.DiskUsage{ + LayersSize: allLayersSize, + Containers: allContainers, + Volumes: allVolumes, + Images: allImages, + }, nil +} diff --git a/vendor/github.com/moby/moby/daemon/errors.go b/vendor/github.com/moby/moby/daemon/errors.go new file mode 100644 index 000000000..5050f87e4 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/errors.go @@ -0,0 +1,53 @@ +package daemon + +import ( + "fmt" + + "github.com/docker/docker/api/errors" +) + +func (d *Daemon) imageNotExistToErrcode(err error) error { + if dne, isDNE := err.(ErrImageDoesNotExist); isDNE { + return errors.NewRequestNotFoundError(dne) + } + return err +} + +type errNotRunning struct { + containerID string +} + +func (e errNotRunning) Error() string { + return fmt.Sprintf("Container %s is not running", e.containerID) +} + +func (e errNotRunning) ContainerIsRunning() bool { + return false +} + +func errContainerIsRestarting(containerID string) error { + err := fmt.Errorf("Container %s is restarting, wait until the container is running", containerID) + return errors.NewRequestConflictError(err) +} + +func errExecNotFound(id string) error { + err := fmt.Errorf("No such exec instance '%s' found in daemon", id) + return errors.NewRequestNotFoundError(err) +} + +func errExecPaused(id string) error { + err := fmt.Errorf("Container %s is paused, unpause the container before exec", id) + return errors.NewRequestConflictError(err) +} + +type errNotFound struct { + containerID string +} + +func (e errNotFound) Error() string { + return fmt.Sprintf("Container %s is not found", e.containerID) +} + +func (e errNotFound) ContainerNotFound() bool { + return true +} diff --git a/vendor/github.com/moby/moby/daemon/events.go b/vendor/github.com/moby/moby/daemon/events.go new file mode 100644 index 000000000..7ae851802 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/events.go @@ -0,0 +1,332 @@ +package daemon + +import ( + "context" + "strconv" + "strings" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/types/events" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/container" + daemonevents "github.com/docker/docker/daemon/events" + "github.com/docker/libnetwork" + swarmapi "github.com/docker/swarmkit/api" + gogotypes "github.com/gogo/protobuf/types" +) + +var ( + clusterEventAction = map[swarmapi.WatchActionKind]string{ + swarmapi.WatchActionKindCreate: "create", + swarmapi.WatchActionKindUpdate: "update", + swarmapi.WatchActionKindRemove: "remove", + } +) + +// LogContainerEvent generates an event related to a container with only the default attributes. +func (daemon *Daemon) LogContainerEvent(container *container.Container, action string) { + daemon.LogContainerEventWithAttributes(container, action, map[string]string{}) +} + +// LogContainerEventWithAttributes generates an event related to a container with specific given attributes. +func (daemon *Daemon) LogContainerEventWithAttributes(container *container.Container, action string, attributes map[string]string) { + copyAttributes(attributes, container.Config.Labels) + if container.Config.Image != "" { + attributes["image"] = container.Config.Image + } + attributes["name"] = strings.TrimLeft(container.Name, "/") + + actor := events.Actor{ + ID: container.ID, + Attributes: attributes, + } + daemon.EventsService.Log(action, events.ContainerEventType, actor) +} + +// LogImageEvent generates an event related to an image with only the default attributes. +func (daemon *Daemon) LogImageEvent(imageID, refName, action string) { + daemon.LogImageEventWithAttributes(imageID, refName, action, map[string]string{}) +} + +// LogImageEventWithAttributes generates an event related to an image with specific given attributes. +func (daemon *Daemon) LogImageEventWithAttributes(imageID, refName, action string, attributes map[string]string) { + img, err := daemon.GetImage(imageID) + if err == nil && img.Config != nil { + // image has not been removed yet. + // it could be missing if the event is `delete`. + copyAttributes(attributes, img.Config.Labels) + } + if refName != "" { + attributes["name"] = refName + } + actor := events.Actor{ + ID: imageID, + Attributes: attributes, + } + + daemon.EventsService.Log(action, events.ImageEventType, actor) +} + +// LogPluginEvent generates an event related to a plugin with only the default attributes. +func (daemon *Daemon) LogPluginEvent(pluginID, refName, action string) { + daemon.LogPluginEventWithAttributes(pluginID, refName, action, map[string]string{}) +} + +// LogPluginEventWithAttributes generates an event related to a plugin with specific given attributes. +func (daemon *Daemon) LogPluginEventWithAttributes(pluginID, refName, action string, attributes map[string]string) { + attributes["name"] = refName + actor := events.Actor{ + ID: pluginID, + Attributes: attributes, + } + daemon.EventsService.Log(action, events.PluginEventType, actor) +} + +// LogVolumeEvent generates an event related to a volume. +func (daemon *Daemon) LogVolumeEvent(volumeID, action string, attributes map[string]string) { + actor := events.Actor{ + ID: volumeID, + Attributes: attributes, + } + daemon.EventsService.Log(action, events.VolumeEventType, actor) +} + +// LogNetworkEvent generates an event related to a network with only the default attributes. +func (daemon *Daemon) LogNetworkEvent(nw libnetwork.Network, action string) { + daemon.LogNetworkEventWithAttributes(nw, action, map[string]string{}) +} + +// LogNetworkEventWithAttributes generates an event related to a network with specific given attributes. +func (daemon *Daemon) LogNetworkEventWithAttributes(nw libnetwork.Network, action string, attributes map[string]string) { + attributes["name"] = nw.Name() + attributes["type"] = nw.Type() + actor := events.Actor{ + ID: nw.ID(), + Attributes: attributes, + } + daemon.EventsService.Log(action, events.NetworkEventType, actor) +} + +// LogDaemonEventWithAttributes generates an event related to the daemon itself with specific given attributes. +func (daemon *Daemon) LogDaemonEventWithAttributes(action string, attributes map[string]string) { + if daemon.EventsService != nil { + if info, err := daemon.SystemInfo(); err == nil && info.Name != "" { + attributes["name"] = info.Name + } + actor := events.Actor{ + ID: daemon.ID, + Attributes: attributes, + } + daemon.EventsService.Log(action, events.DaemonEventType, actor) + } +} + +// SubscribeToEvents returns the currently record of events, a channel to stream new events from, and a function to cancel the stream of events. +func (daemon *Daemon) SubscribeToEvents(since, until time.Time, filter filters.Args) ([]events.Message, chan interface{}) { + ef := daemonevents.NewFilter(filter) + return daemon.EventsService.SubscribeTopic(since, until, ef) +} + +// UnsubscribeFromEvents stops the event subscription for a client by closing the +// channel where the daemon sends events to. +func (daemon *Daemon) UnsubscribeFromEvents(listener chan interface{}) { + daemon.EventsService.Evict(listener) +} + +// copyAttributes guarantees that labels are not mutated by event triggers. +func copyAttributes(attributes, labels map[string]string) { + if labels == nil { + return + } + for k, v := range labels { + attributes[k] = v + } +} + +// ProcessClusterNotifications gets changes from store and add them to event list +func (daemon *Daemon) ProcessClusterNotifications(ctx context.Context, watchStream chan *swarmapi.WatchMessage) { + for { + select { + case <-ctx.Done(): + return + case message, ok := <-watchStream: + if !ok { + logrus.Debug("cluster event channel has stopped") + return + } + daemon.generateClusterEvent(message) + } + } +} + +func (daemon *Daemon) generateClusterEvent(msg *swarmapi.WatchMessage) { + for _, event := range msg.Events { + if event.Object == nil { + logrus.Errorf("event without object: %v", event) + continue + } + switch v := event.Object.GetObject().(type) { + case *swarmapi.Object_Node: + daemon.logNodeEvent(event.Action, v.Node, event.OldObject.GetNode()) + case *swarmapi.Object_Service: + daemon.logServiceEvent(event.Action, v.Service, event.OldObject.GetService()) + case *swarmapi.Object_Network: + daemon.logNetworkEvent(event.Action, v.Network, event.OldObject.GetNetwork()) + case *swarmapi.Object_Secret: + daemon.logSecretEvent(event.Action, v.Secret, event.OldObject.GetSecret()) + case *swarmapi.Object_Config: + daemon.logConfigEvent(event.Action, v.Config, event.OldObject.GetConfig()) + default: + logrus.Warnf("unrecognized event: %v", event) + } + } +} + +func (daemon *Daemon) logNetworkEvent(action swarmapi.WatchActionKind, net *swarmapi.Network, oldNet *swarmapi.Network) { + attributes := map[string]string{ + "name": net.Spec.Annotations.Name, + } + eventTime := eventTimestamp(net.Meta, action) + daemon.logClusterEvent(action, net.ID, "network", attributes, eventTime) +} + +func (daemon *Daemon) logSecretEvent(action swarmapi.WatchActionKind, secret *swarmapi.Secret, oldSecret *swarmapi.Secret) { + attributes := map[string]string{ + "name": secret.Spec.Annotations.Name, + } + eventTime := eventTimestamp(secret.Meta, action) + daemon.logClusterEvent(action, secret.ID, "secret", attributes, eventTime) +} + +func (daemon *Daemon) logConfigEvent(action swarmapi.WatchActionKind, config *swarmapi.Config, oldConfig *swarmapi.Config) { + attributes := map[string]string{ + "name": config.Spec.Annotations.Name, + } + eventTime := eventTimestamp(config.Meta, action) + daemon.logClusterEvent(action, config.ID, "config", attributes, eventTime) +} + +func (daemon *Daemon) logNodeEvent(action swarmapi.WatchActionKind, node *swarmapi.Node, oldNode *swarmapi.Node) { + name := node.Spec.Annotations.Name + if name == "" && node.Description != nil { + name = node.Description.Hostname + } + attributes := map[string]string{ + "name": name, + } + eventTime := eventTimestamp(node.Meta, action) + // In an update event, display the changes in attributes + if action == swarmapi.WatchActionKindUpdate && oldNode != nil { + if node.Spec.Availability != oldNode.Spec.Availability { + attributes["availability.old"] = strings.ToLower(oldNode.Spec.Availability.String()) + attributes["availability.new"] = strings.ToLower(node.Spec.Availability.String()) + } + if node.Role != oldNode.Role { + attributes["role.old"] = strings.ToLower(oldNode.Role.String()) + attributes["role.new"] = strings.ToLower(node.Role.String()) + } + if node.Status.State != oldNode.Status.State { + attributes["state.old"] = strings.ToLower(oldNode.Status.State.String()) + attributes["state.new"] = strings.ToLower(node.Status.State.String()) + } + // This handles change within manager role + if node.ManagerStatus != nil && oldNode.ManagerStatus != nil { + // leader change + if node.ManagerStatus.Leader != oldNode.ManagerStatus.Leader { + if node.ManagerStatus.Leader { + attributes["leader.old"] = "false" + attributes["leader.new"] = "true" + } else { + attributes["leader.old"] = "true" + attributes["leader.new"] = "false" + } + } + if node.ManagerStatus.Reachability != oldNode.ManagerStatus.Reachability { + attributes["reachability.old"] = strings.ToLower(oldNode.ManagerStatus.Reachability.String()) + attributes["reachability.new"] = strings.ToLower(node.ManagerStatus.Reachability.String()) + } + } + } + + daemon.logClusterEvent(action, node.ID, "node", attributes, eventTime) +} + +func (daemon *Daemon) logServiceEvent(action swarmapi.WatchActionKind, service *swarmapi.Service, oldService *swarmapi.Service) { + attributes := map[string]string{ + "name": service.Spec.Annotations.Name, + } + eventTime := eventTimestamp(service.Meta, action) + + if action == swarmapi.WatchActionKindUpdate && oldService != nil { + // check image + if x, ok := service.Spec.Task.GetRuntime().(*swarmapi.TaskSpec_Container); ok { + containerSpec := x.Container + if y, ok := oldService.Spec.Task.GetRuntime().(*swarmapi.TaskSpec_Container); ok { + oldContainerSpec := y.Container + if containerSpec.Image != oldContainerSpec.Image { + attributes["image.old"] = oldContainerSpec.Image + attributes["image.new"] = containerSpec.Image + } + } else { + // This should not happen. + logrus.Errorf("service %s runtime changed from %T to %T", service.Spec.Annotations.Name, oldService.Spec.Task.GetRuntime(), service.Spec.Task.GetRuntime()) + } + } + // check replicated count change + if x, ok := service.Spec.GetMode().(*swarmapi.ServiceSpec_Replicated); ok { + replicas := x.Replicated.Replicas + if y, ok := oldService.Spec.GetMode().(*swarmapi.ServiceSpec_Replicated); ok { + oldReplicas := y.Replicated.Replicas + if replicas != oldReplicas { + attributes["replicas.old"] = strconv.FormatUint(oldReplicas, 10) + attributes["replicas.new"] = strconv.FormatUint(replicas, 10) + } + } else { + // This should not happen. + logrus.Errorf("service %s mode changed from %T to %T", service.Spec.Annotations.Name, oldService.Spec.GetMode(), service.Spec.GetMode()) + } + } + if service.UpdateStatus != nil { + if oldService.UpdateStatus == nil { + attributes["updatestate.new"] = strings.ToLower(service.UpdateStatus.State.String()) + } else if service.UpdateStatus.State != oldService.UpdateStatus.State { + attributes["updatestate.old"] = strings.ToLower(oldService.UpdateStatus.State.String()) + attributes["updatestate.new"] = strings.ToLower(service.UpdateStatus.State.String()) + } + } + } + daemon.logClusterEvent(action, service.ID, "service", attributes, eventTime) +} + +func (daemon *Daemon) logClusterEvent(action swarmapi.WatchActionKind, id, eventType string, attributes map[string]string, eventTime time.Time) { + actor := events.Actor{ + ID: id, + Attributes: attributes, + } + + jm := events.Message{ + Action: clusterEventAction[action], + Type: eventType, + Actor: actor, + Scope: "swarm", + Time: eventTime.UTC().Unix(), + TimeNano: eventTime.UTC().UnixNano(), + } + daemon.EventsService.PublishMessage(jm) +} + +func eventTimestamp(meta swarmapi.Meta, action swarmapi.WatchActionKind) time.Time { + var eventTime time.Time + switch action { + case swarmapi.WatchActionKindCreate: + eventTime, _ = gogotypes.TimestampFromProto(meta.CreatedAt) + case swarmapi.WatchActionKindUpdate: + eventTime, _ = gogotypes.TimestampFromProto(meta.UpdatedAt) + case swarmapi.WatchActionKindRemove: + // There is no timestamp from store message for remove operations. + // Use current time. + eventTime = time.Now() + } + return eventTime +} diff --git a/vendor/github.com/moby/moby/daemon/events/events.go b/vendor/github.com/moby/moby/daemon/events/events.go new file mode 100644 index 000000000..d1529e1ce --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/events/events.go @@ -0,0 +1,165 @@ +package events + +import ( + "sync" + "time" + + eventtypes "github.com/docker/docker/api/types/events" + "github.com/docker/docker/pkg/pubsub" +) + +const ( + eventsLimit = 256 + bufferSize = 1024 +) + +// Events is pubsub channel for events generated by the engine. +type Events struct { + mu sync.Mutex + events []eventtypes.Message + pub *pubsub.Publisher +} + +// New returns new *Events instance +func New() *Events { + return &Events{ + events: make([]eventtypes.Message, 0, eventsLimit), + pub: pubsub.NewPublisher(100*time.Millisecond, bufferSize), + } +} + +// Subscribe adds new listener to events, returns slice of 64 stored +// last events, a channel in which you can expect new events (in form +// of interface{}, so you need type assertion), and a function to call +// to stop the stream of events. +func (e *Events) Subscribe() ([]eventtypes.Message, chan interface{}, func()) { + eventSubscribers.Inc() + e.mu.Lock() + current := make([]eventtypes.Message, len(e.events)) + copy(current, e.events) + l := e.pub.Subscribe() + e.mu.Unlock() + + cancel := func() { + e.Evict(l) + } + return current, l, cancel +} + +// SubscribeTopic adds new listener to events, returns slice of 64 stored +// last events, a channel in which you can expect new events (in form +// of interface{}, so you need type assertion). +func (e *Events) SubscribeTopic(since, until time.Time, ef *Filter) ([]eventtypes.Message, chan interface{}) { + eventSubscribers.Inc() + e.mu.Lock() + + var topic func(m interface{}) bool + if ef != nil && ef.filter.Len() > 0 { + topic = func(m interface{}) bool { return ef.Include(m.(eventtypes.Message)) } + } + + buffered := e.loadBufferedEvents(since, until, topic) + + var ch chan interface{} + if topic != nil { + ch = e.pub.SubscribeTopic(topic) + } else { + // Subscribe to all events if there are no filters + ch = e.pub.Subscribe() + } + + e.mu.Unlock() + return buffered, ch +} + +// Evict evicts listener from pubsub +func (e *Events) Evict(l chan interface{}) { + eventSubscribers.Dec() + e.pub.Evict(l) +} + +// Log creates a local scope message and publishes it +func (e *Events) Log(action, eventType string, actor eventtypes.Actor) { + now := time.Now().UTC() + jm := eventtypes.Message{ + Action: action, + Type: eventType, + Actor: actor, + Scope: "local", + Time: now.Unix(), + TimeNano: now.UnixNano(), + } + + // fill deprecated fields for container and images + switch eventType { + case eventtypes.ContainerEventType: + jm.ID = actor.ID + jm.Status = action + jm.From = actor.Attributes["image"] + case eventtypes.ImageEventType: + jm.ID = actor.ID + jm.Status = action + } + + e.PublishMessage(jm) +} + +// PublishMessage broadcasts event to listeners. Each listener has 100 milliseconds to +// receive the event or it will be skipped. +func (e *Events) PublishMessage(jm eventtypes.Message) { + eventsCounter.Inc() + + e.mu.Lock() + if len(e.events) == cap(e.events) { + // discard oldest event + copy(e.events, e.events[1:]) + e.events[len(e.events)-1] = jm + } else { + e.events = append(e.events, jm) + } + e.mu.Unlock() + e.pub.Publish(jm) +} + +// SubscribersCount returns number of event listeners +func (e *Events) SubscribersCount() int { + return e.pub.Len() +} + +// loadBufferedEvents iterates over the cached events in the buffer +// and returns those that were emitted between two specific dates. +// It uses `time.Unix(seconds, nanoseconds)` to generate valid dates with those arguments. +// It filters those buffered messages with a topic function if it's not nil, otherwise it adds all messages. +func (e *Events) loadBufferedEvents(since, until time.Time, topic func(interface{}) bool) []eventtypes.Message { + var buffered []eventtypes.Message + if since.IsZero() && until.IsZero() { + return buffered + } + + var sinceNanoUnix int64 + if !since.IsZero() { + sinceNanoUnix = since.UnixNano() + } + + var untilNanoUnix int64 + if !until.IsZero() { + untilNanoUnix = until.UnixNano() + } + + for i := len(e.events) - 1; i >= 0; i-- { + ev := e.events[i] + + if ev.TimeNano < sinceNanoUnix { + break + } + + if untilNanoUnix > 0 && ev.TimeNano > untilNanoUnix { + continue + } + + if topic == nil || topic(ev) { + buffered = append([]eventtypes.Message{ev}, buffered...) + } + } + return buffered +} diff --git a/vendor/github.com/moby/moby/daemon/events/events_test.go b/vendor/github.com/moby/moby/daemon/events/events_test.go new file mode 100644 index 000000000..ebb222cfb --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/events/events_test.go @@ -0,0 +1,275 @@ +package events + +import ( + "fmt" + "testing" + "time" + + "github.com/docker/docker/api/types/events" + timetypes "github.com/docker/docker/api/types/time" + eventstestutils "github.com/docker/docker/daemon/events/testutils" +) + +func TestEventsLog(t *testing.T) { + e := New() + _, l1, _ := e.Subscribe() + _, l2, _ := e.Subscribe() + defer e.Evict(l1) + defer e.Evict(l2) + count := e.SubscribersCount() + if count != 2 { + t.Fatalf("Must be 2 subscribers, got %d", count) + } + actor := events.Actor{ + ID: "cont", + Attributes: map[string]string{"image": "image"}, + } + e.Log("test", events.ContainerEventType, actor) + select { + case msg := <-l1: + jmsg, ok := msg.(events.Message) + if !ok { + t.Fatalf("Unexpected type %T", msg) + } + if len(e.events) != 1 { + t.Fatalf("Must be only one event, got %d", len(e.events)) + } + if jmsg.Status != "test" { + t.Fatalf("Status should be test, got %s", jmsg.Status) + } + if jmsg.ID != "cont" { + t.Fatalf("ID should be cont, got %s", jmsg.ID) + } + if jmsg.From != "image" { + t.Fatalf("From should be image, got %s", jmsg.From) + } + case <-time.After(1 * time.Second): + t.Fatal("Timeout waiting for broadcasted message") + } + select { + case msg := <-l2: + jmsg, ok := msg.(events.Message) + if !ok { + t.Fatalf("Unexpected type %T", msg) + } + if len(e.events) != 1 { + t.Fatalf("Must be only one event, got %d", len(e.events)) + } + if jmsg.Status != "test" { + t.Fatalf("Status should be test, got %s", jmsg.Status) + } + if jmsg.ID != "cont" { + t.Fatalf("ID should be cont, got %s", jmsg.ID) + } + if jmsg.From != "image" { + t.Fatalf("From should be image, got %s", jmsg.From) + } + case <-time.After(1 * time.Second): + t.Fatal("Timeout waiting for broadcasted message") + } +} + +func TestEventsLogTimeout(t *testing.T) { + e := New() + _, l, _ := e.Subscribe() + defer e.Evict(l) + + c := make(chan struct{}) + go func() { + actor := events.Actor{ + ID: "image", + } + e.Log("test", events.ImageEventType, actor) + close(c) + }() + + select { + case <-c: + case <-time.After(time.Second): + t.Fatal("Timeout publishing message") + } +} + +func TestLogEvents(t *testing.T) { + e := New() + + for i := 0; i < eventsLimit+16; i++ { + action := fmt.Sprintf("action_%d", i) + id := fmt.Sprintf("cont_%d", i) + from := fmt.Sprintf("image_%d", i) + + actor := events.Actor{ + ID: id, + Attributes: map[string]string{"image": from}, + } + e.Log(action, events.ContainerEventType, actor) + } + time.Sleep(50 * time.Millisecond) + current, l, _ := e.Subscribe() + for i := 0; i < 10; i++ { + num := i + eventsLimit + 16 + action := fmt.Sprintf("action_%d", num) + id := fmt.Sprintf("cont_%d", num) + from := fmt.Sprintf("image_%d", num) + + actor := events.Actor{ + ID: id, + Attributes: map[string]string{"image": from}, + } + e.Log(action, events.ContainerEventType, actor) + } + if len(e.events) != eventsLimit { + t.Fatalf("Must be %d events, got %d", eventsLimit, len(e.events)) + } + + var msgs []events.Message + for len(msgs) < 10 { + m := <-l + jm, ok := (m).(events.Message) + if !ok { + t.Fatalf("Unexpected type %T", m) + } + msgs = append(msgs, jm) + } + if len(current) != eventsLimit { + t.Fatalf("Must be %d events, got %d", eventsLimit, len(current)) + } + first := current[0] + if first.Status != "action_16" { + t.Fatalf("First action is %s, must be action_16", first.Status) + } + last := current[len(current)-1] + if last.Status != "action_271" { + t.Fatalf("Last action is %s, must be action_271", last.Status) + } + + firstC := msgs[0] + if firstC.Status != "action_272" { + t.Fatalf("First action is %s, must be action_272", firstC.Status) + } + lastC := msgs[len(msgs)-1] + if lastC.Status != "action_281" { + t.Fatalf("Last action is %s, must be action_281", lastC.Status) + } +} + +// https://github.com/docker/docker/issues/20999 +// Fixtures: +// +//2016-03-07T17:28:03.022433271+02:00 container die 0b863f2a26c18557fc6cdadda007c459f9ec81b874780808138aea78a3595079 (image=ubuntu, name=small_hoover) +//2016-03-07T17:28:03.091719377+02:00 network disconnect 19c5ed41acb798f26b751e0035cd7821741ab79e2bbd59a66b5fd8abf954eaa0 (type=bridge, container=0b863f2a26c18557fc6cdadda007c459f9ec81b874780808138aea78a3595079, name=bridge) +//2016-03-07T17:28:03.129014751+02:00 container destroy 0b863f2a26c18557fc6cdadda007c459f9ec81b874780808138aea78a3595079 (image=ubuntu, name=small_hoover) +func TestLoadBufferedEvents(t *testing.T) { + now := time.Now() + f, err := timetypes.GetTimestamp("2016-03-07T17:28:03.100000000+02:00", now) + if err != nil { + t.Fatal(err) + } + s, sNano, err := timetypes.ParseTimestamps(f, -1) + if err != nil { + t.Fatal(err) + } + + m1, err := eventstestutils.Scan("2016-03-07T17:28:03.022433271+02:00 container die 0b863f2a26c18557fc6cdadda007c459f9ec81b874780808138aea78a3595079 (image=ubuntu, name=small_hoover)") + if err != nil { + t.Fatal(err) + } + m2, err := eventstestutils.Scan("2016-03-07T17:28:03.091719377+02:00 network disconnect 19c5ed41acb798f26b751e0035cd7821741ab79e2bbd59a66b5fd8abf954eaa0 (type=bridge, container=0b863f2a26c18557fc6cdadda007c459f9ec81b874780808138aea78a3595079, name=bridge)") + if err != nil { + t.Fatal(err) + } + m3, err := eventstestutils.Scan("2016-03-07T17:28:03.129014751+02:00 container destroy 0b863f2a26c18557fc6cdadda007c459f9ec81b874780808138aea78a3595079 (image=ubuntu, name=small_hoover)") + if err != nil { + t.Fatal(err) + } + + events := &Events{ + events: []events.Message{*m1, *m2, *m3}, + } + + since := time.Unix(s, sNano) + until := time.Time{} + + out := events.loadBufferedEvents(since, until, nil) + if len(out) != 1 { + t.Fatalf("expected 1 message, got %d: %v", len(out), out) + } +} + +func TestLoadBufferedEventsOnlyFromPast(t *testing.T) { + now := time.Now() + f, err := timetypes.GetTimestamp("2016-03-07T17:28:03.090000000+02:00", now) + if err != nil { + t.Fatal(err) + } + s, sNano, err := timetypes.ParseTimestamps(f, 0) + if err != nil { + t.Fatal(err) + } + + f, err = timetypes.GetTimestamp("2016-03-07T17:28:03.100000000+02:00", now) + if err != nil { + t.Fatal(err) + } + u, uNano, err := timetypes.ParseTimestamps(f, 0) + if err != nil { + t.Fatal(err) + } + + m1, err := eventstestutils.Scan("2016-03-07T17:28:03.022433271+02:00 container die 0b863f2a26c18557fc6cdadda007c459f9ec81b874780808138aea78a3595079 (image=ubuntu, name=small_hoover)") + if err != nil { + t.Fatal(err) + } + m2, err := eventstestutils.Scan("2016-03-07T17:28:03.091719377+02:00 network disconnect 19c5ed41acb798f26b751e0035cd7821741ab79e2bbd59a66b5fd8abf954eaa0 (type=bridge, container=0b863f2a26c18557fc6cdadda007c459f9ec81b874780808138aea78a3595079, name=bridge)") + if err != nil { + t.Fatal(err) + } + m3, err := eventstestutils.Scan("2016-03-07T17:28:03.129014751+02:00 container destroy 0b863f2a26c18557fc6cdadda007c459f9ec81b874780808138aea78a3595079 (image=ubuntu, name=small_hoover)") + if err != nil { + t.Fatal(err) + } + + events := &Events{ + events: []events.Message{*m1, *m2, *m3}, + } + + since := time.Unix(s, sNano) + until := time.Unix(u, uNano) + + out := events.loadBufferedEvents(since, until, nil) + if len(out) != 1 { + t.Fatalf("expected 1 message, got %d: %v", len(out), out) + } + + if out[0].Type != "network" { + t.Fatalf("expected network event, got %s", out[0].Type) + } +} + +// #13753 +func TestIgnoreBufferedWhenNoTimes(t *testing.T) { + m1, err := eventstestutils.Scan("2016-03-07T17:28:03.022433271+02:00 container die 0b863f2a26c18557fc6cdadda007c459f9ec81b874780808138aea78a3595079 (image=ubuntu, name=small_hoover)") + if err != nil { + t.Fatal(err) + } + m2, err := eventstestutils.Scan("2016-03-07T17:28:03.091719377+02:00 network disconnect 19c5ed41acb798f26b751e0035cd7821741ab79e2bbd59a66b5fd8abf954eaa0 (type=bridge, container=0b863f2a26c18557fc6cdadda007c459f9ec81b874780808138aea78a3595079, name=bridge)") + if err != nil { + t.Fatal(err) + } + m3, err := eventstestutils.Scan("2016-03-07T17:28:03.129014751+02:00 container destroy 0b863f2a26c18557fc6cdadda007c459f9ec81b874780808138aea78a3595079 (image=ubuntu, name=small_hoover)") + if err != nil { + t.Fatal(err) + } + + events := &Events{ + events: []events.Message{*m1, *m2, *m3}, + } + + since := time.Time{} + until := time.Time{} + + out := events.loadBufferedEvents(since, until, nil) + if len(out) != 0 { + t.Fatalf("expected 0 buffered events, got %q", out) + } +} diff --git a/vendor/github.com/moby/moby/daemon/events/filter.go b/vendor/github.com/moby/moby/daemon/events/filter.go new file mode 100644 index 000000000..645f1ca91 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/events/filter.go @@ -0,0 +1,134 @@ +package events + +import ( + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types/events" + "github.com/docker/docker/api/types/filters" +) + +// Filter can filter out docker events from a stream +type Filter struct { + filter filters.Args +} + +// NewFilter creates a new Filter +func NewFilter(filter filters.Args) *Filter { + return &Filter{filter: filter} +} + +// Include returns true when the event ev is included by the filters +func (ef *Filter) Include(ev events.Message) bool { + return ef.matchEvent(ev) && + ef.filter.ExactMatch("type", ev.Type) && + ef.matchScope(ev.Scope) && + ef.matchDaemon(ev) && + ef.matchContainer(ev) && + ef.matchPlugin(ev) && + ef.matchVolume(ev) && + ef.matchNetwork(ev) && + ef.matchImage(ev) && + ef.matchLabels(ev.Actor.Attributes) +} + +func (ef *Filter) matchEvent(ev events.Message) bool { + // #25798 if an event filter contains either health_status, exec_create or exec_start without a colon + // Let's to a FuzzyMatch instead of an ExactMatch. + if ef.filterContains("event", map[string]struct{}{"health_status": {}, "exec_create": {}, "exec_start": {}}) { + return ef.filter.FuzzyMatch("event", ev.Action) + } + return ef.filter.ExactMatch("event", ev.Action) +} + +func (ef *Filter) filterContains(field string, values map[string]struct{}) bool { + for _, v := range ef.filter.Get(field) { + if _, ok := values[v]; ok { + return true + } + } + return false +} + +func (ef *Filter) matchScope(scope string) bool { + if !ef.filter.Include("scope") { + return true + } + return ef.filter.ExactMatch("scope", scope) +} + +func (ef *Filter) matchLabels(attributes map[string]string) bool { + if !ef.filter.Include("label") { + return true + } + return ef.filter.MatchKVList("label", attributes) +} + +func (ef *Filter) matchDaemon(ev events.Message) bool { + return ef.fuzzyMatchName(ev, events.DaemonEventType) +} + +func (ef *Filter) matchContainer(ev events.Message) bool { + return ef.fuzzyMatchName(ev, events.ContainerEventType) +} + +func (ef *Filter) matchPlugin(ev events.Message) bool { + return ef.fuzzyMatchName(ev, events.PluginEventType) +} + +func (ef *Filter) matchVolume(ev events.Message) bool { + return ef.fuzzyMatchName(ev, events.VolumeEventType) +} + +func (ef *Filter) matchNetwork(ev events.Message) bool { + return ef.fuzzyMatchName(ev, events.NetworkEventType) +} + +func (ef *Filter) matchService(ev events.Message) bool { + return ef.fuzzyMatchName(ev, events.ServiceEventType) +} + +func (ef *Filter) matchNode(ev events.Message) bool { + return ef.fuzzyMatchName(ev, events.NodeEventType) +} + +func (ef *Filter) matchSecret(ev events.Message) bool { + return ef.fuzzyMatchName(ev, events.SecretEventType) +} + +func (ef *Filter) matchConfig(ev events.Message) bool { + return ef.fuzzyMatchName(ev, events.ConfigEventType) +} + +func (ef *Filter) fuzzyMatchName(ev events.Message, eventType string) bool { + return ef.filter.FuzzyMatch(eventType, ev.Actor.ID) || + ef.filter.FuzzyMatch(eventType, ev.Actor.Attributes["name"]) +} + +// matchImage matches against both event.Actor.ID (for image events) +// and event.Actor.Attributes["image"] (for container events), so that any container that was created +// from an image will be included in the image events. Also compare both +// against the stripped repo name without any tags. +func (ef *Filter) matchImage(ev events.Message) bool { + id := ev.Actor.ID + nameAttr := "image" + var imageName string + + if ev.Type == events.ImageEventType { + nameAttr = "name" + } + + if n, ok := ev.Actor.Attributes[nameAttr]; ok { + imageName = n + } + return ef.filter.ExactMatch("image", id) || + ef.filter.ExactMatch("image", imageName) || + ef.filter.ExactMatch("image", stripTag(id)) || + ef.filter.ExactMatch("image", stripTag(imageName)) +} + +func stripTag(image string) string { + ref, err := reference.ParseNormalizedNamed(image) + if err != nil { + return image + } + return reference.FamiliarName(ref) +} diff --git a/vendor/github.com/moby/moby/daemon/events/metrics.go b/vendor/github.com/moby/moby/daemon/events/metrics.go new file mode 100644 index 000000000..c9a89ec0e --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/events/metrics.go @@ -0,0 +1,15 @@ +package events + +import "github.com/docker/go-metrics" + +var ( + eventsCounter metrics.Counter + eventSubscribers metrics.Gauge +) + +func init() { + ns := metrics.NewNamespace("engine", "daemon", nil) + eventsCounter = ns.NewCounter("events", "The number of events logged") + eventSubscribers = ns.NewGauge("events_subscribers", "The number of current subscribers to events", metrics.Total) + metrics.Register(ns) +} diff --git a/vendor/github.com/moby/moby/daemon/events/testutils/testutils.go b/vendor/github.com/moby/moby/daemon/events/testutils/testutils.go new file mode 100644 index 000000000..3544446e1 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/events/testutils/testutils.go @@ -0,0 +1,76 @@ +package testutils + +import ( + "fmt" + "regexp" + "strings" + "time" + + "github.com/docker/docker/api/types/events" + timetypes "github.com/docker/docker/api/types/time" +) + +var ( + reTimestamp = `(?P\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}.\d{9}(:?(:?(:?-|\+)\d{2}:\d{2})|Z))` + reEventType = `(?P\w+)` + reAction = `(?P\w+)` + reID = `(?P[^\s]+)` + reAttributes = `(\s\((?P[^\)]+)\))?` + reString = fmt.Sprintf(`\A%s\s%s\s%s\s%s%s\z`, reTimestamp, reEventType, reAction, reID, reAttributes) + + // eventCliRegexp is a regular expression that matches all possible event outputs in the cli + eventCliRegexp = regexp.MustCompile(reString) +) + +// ScanMap turns an event string like the default ones formatted in the cli output +// and turns it into map. +func ScanMap(text string) map[string]string { + matches := eventCliRegexp.FindAllStringSubmatch(text, -1) + md := map[string]string{} + if len(matches) == 0 { + return md + } + + names := eventCliRegexp.SubexpNames() + for i, n := range matches[0] { + md[names[i]] = n + } + return md +} + +// Scan turns an event string like the default ones formatted in the cli output +// and turns it into an event message. +func Scan(text string) (*events.Message, error) { + md := ScanMap(text) + if len(md) == 0 { + return nil, fmt.Errorf("text is not an event: %s", text) + } + + f, err := timetypes.GetTimestamp(md["timestamp"], time.Now()) + if err != nil { + return nil, err + } + + t, tn, err := timetypes.ParseTimestamps(f, -1) + if err != nil { + return nil, err + } + + attrs := make(map[string]string) + for _, a := range strings.SplitN(md["attributes"], ", ", -1) { + kv := strings.SplitN(a, "=", 2) + attrs[kv[0]] = kv[1] + } + + tu := time.Unix(t, tn) + return &events.Message{ + Time: t, + TimeNano: tu.UnixNano(), + Type: md["eventType"], + Action: md["action"], + Actor: events.Actor{ + ID: md["id"], + Attributes: attrs, + }, + }, nil +} diff --git a/vendor/github.com/moby/moby/daemon/events_test.go b/vendor/github.com/moby/moby/daemon/events_test.go new file mode 100644 index 000000000..7048de292 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/events_test.go @@ -0,0 +1,90 @@ +package daemon + +import ( + "testing" + "time" + + containertypes "github.com/docker/docker/api/types/container" + eventtypes "github.com/docker/docker/api/types/events" + "github.com/docker/docker/container" + "github.com/docker/docker/daemon/events" +) + +func TestLogContainerEventCopyLabels(t *testing.T) { + e := events.New() + _, l, _ := e.Subscribe() + defer e.Evict(l) + + container := &container.Container{ + ID: "container_id", + Name: "container_name", + Config: &containertypes.Config{ + Image: "image_name", + Labels: map[string]string{ + "node": "1", + "os": "alpine", + }, + }, + } + daemon := &Daemon{ + EventsService: e, + } + daemon.LogContainerEvent(container, "create") + + if _, mutated := container.Config.Labels["image"]; mutated { + t.Fatalf("Expected to not mutate the container labels, got %q", container.Config.Labels) + } + + validateTestAttributes(t, l, map[string]string{ + "node": "1", + "os": "alpine", + }) +} + +func TestLogContainerEventWithAttributes(t *testing.T) { + e := events.New() + _, l, _ := e.Subscribe() + defer e.Evict(l) + + container := &container.Container{ + ID: "container_id", + Name: "container_name", + Config: &containertypes.Config{ + Labels: map[string]string{ + "node": "1", + "os": "alpine", + }, + }, + } + daemon := &Daemon{ + EventsService: e, + } + attributes := map[string]string{ + "node": "2", + "foo": "bar", + } + daemon.LogContainerEventWithAttributes(container, "create", attributes) + + validateTestAttributes(t, l, map[string]string{ + "node": "1", + "foo": "bar", + }) +} + +func validateTestAttributes(t *testing.T, l chan interface{}, expectedAttributesToTest map[string]string) { + select { + case ev := <-l: + event, ok := ev.(eventtypes.Message) + if !ok { + t.Fatalf("Unexpected event message: %q", ev) + } + for key, expected := range expectedAttributesToTest { + actual, ok := event.Actor.Attributes[key] + if !ok || actual != expected { + t.Fatalf("Expected value for key %s to be %s, but was %s (event:%v)", key, expected, actual, event) + } + } + case <-time.After(10 * time.Second): + t.Fatal("LogEvent test timed out") + } +} diff --git a/vendor/github.com/moby/moby/daemon/exec.go b/vendor/github.com/moby/moby/daemon/exec.go new file mode 100644 index 000000000..72d01c8c2 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/exec.go @@ -0,0 +1,299 @@ +package daemon + +import ( + "fmt" + "io" + "strings" + "time" + + "golang.org/x/net/context" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/errors" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/strslice" + "github.com/docker/docker/container" + "github.com/docker/docker/container/stream" + "github.com/docker/docker/daemon/exec" + "github.com/docker/docker/libcontainerd" + "github.com/docker/docker/pkg/pools" + "github.com/docker/docker/pkg/signal" + "github.com/docker/docker/pkg/term" +) + +// Seconds to wait after sending TERM before trying KILL +const termProcessTimeout = 10 + +func (d *Daemon) registerExecCommand(container *container.Container, config *exec.Config) { + // Storing execs in container in order to kill them gracefully whenever the container is stopped or removed. + container.ExecCommands.Add(config.ID, config) + // Storing execs in daemon for easy access via Engine API. + d.execCommands.Add(config.ID, config) +} + +// ExecExists looks up the exec instance and returns a bool if it exists or not. +// It will also return the error produced by `getConfig` +func (d *Daemon) ExecExists(name string) (bool, error) { + if _, err := d.getExecConfig(name); err != nil { + return false, err + } + return true, nil +} + +// getExecConfig looks up the exec instance by name. If the container associated +// with the exec instance is stopped or paused, it will return an error. +func (d *Daemon) getExecConfig(name string) (*exec.Config, error) { + ec := d.execCommands.Get(name) + + // If the exec is found but its container is not in the daemon's list of + // containers then it must have been deleted, in which case instead of + // saying the container isn't running, we should return a 404 so that + // the user sees the same error now that they will after the + // 5 minute clean-up loop is run which erases old/dead execs. + + if ec != nil { + if container := d.containers.Get(ec.ContainerID); container != nil { + if !container.IsRunning() { + return nil, fmt.Errorf("Container %s is not running: %s", container.ID, container.State.String()) + } + if container.IsPaused() { + return nil, errExecPaused(container.ID) + } + if container.IsRestarting() { + return nil, errContainerIsRestarting(container.ID) + } + return ec, nil + } + } + + return nil, errExecNotFound(name) +} + +func (d *Daemon) unregisterExecCommand(container *container.Container, execConfig *exec.Config) { + container.ExecCommands.Delete(execConfig.ID) + d.execCommands.Delete(execConfig.ID) +} + +func (d *Daemon) getActiveContainer(name string) (*container.Container, error) { + container, err := d.GetContainer(name) + if err != nil { + return nil, err + } + + if !container.IsRunning() { + return nil, errNotRunning{container.ID} + } + if container.IsPaused() { + return nil, errExecPaused(name) + } + if container.IsRestarting() { + return nil, errContainerIsRestarting(container.ID) + } + return container, nil +} + +// ContainerExecCreate sets up an exec in a running container. +func (d *Daemon) ContainerExecCreate(name string, config *types.ExecConfig) (string, error) { + cntr, err := d.getActiveContainer(name) + if err != nil { + return "", err + } + + cmd := strslice.StrSlice(config.Cmd) + entrypoint, args := d.getEntrypointAndArgs(strslice.StrSlice{}, cmd) + + keys := []byte{} + if config.DetachKeys != "" { + keys, err = term.ToBytes(config.DetachKeys) + if err != nil { + err = fmt.Errorf("Invalid escape keys (%s) provided", config.DetachKeys) + return "", err + } + } + + execConfig := exec.NewConfig() + execConfig.OpenStdin = config.AttachStdin + execConfig.OpenStdout = config.AttachStdout + execConfig.OpenStderr = config.AttachStderr + execConfig.ContainerID = cntr.ID + execConfig.DetachKeys = keys + execConfig.Entrypoint = entrypoint + execConfig.Args = args + execConfig.Tty = config.Tty + execConfig.Privileged = config.Privileged + execConfig.User = config.User + + linkedEnv, err := d.setupLinkedContainers(cntr) + if err != nil { + return "", err + } + execConfig.Env = container.ReplaceOrAppendEnvValues(cntr.CreateDaemonEnvironment(config.Tty, linkedEnv), config.Env) + if len(execConfig.User) == 0 { + execConfig.User = cntr.Config.User + } + + d.registerExecCommand(cntr, execConfig) + + d.LogContainerEvent(cntr, "exec_create: "+execConfig.Entrypoint+" "+strings.Join(execConfig.Args, " ")) + + return execConfig.ID, nil +} + +// ContainerExecStart starts a previously set up exec instance. The +// std streams are set up. +// If ctx is cancelled, the process is terminated. +func (d *Daemon) ContainerExecStart(ctx context.Context, name string, stdin io.ReadCloser, stdout io.Writer, stderr io.Writer) (err error) { + var ( + cStdin io.ReadCloser + cStdout, cStderr io.Writer + ) + + ec, err := d.getExecConfig(name) + if err != nil { + return errExecNotFound(name) + } + + ec.Lock() + if ec.ExitCode != nil { + ec.Unlock() + err := fmt.Errorf("Error: Exec command %s has already run", ec.ID) + return errors.NewRequestConflictError(err) + } + + if ec.Running { + ec.Unlock() + return fmt.Errorf("Error: Exec command %s is already running", ec.ID) + } + ec.Running = true + ec.Unlock() + + c := d.containers.Get(ec.ContainerID) + logrus.Debugf("starting exec command %s in container %s", ec.ID, c.ID) + d.LogContainerEvent(c, "exec_start: "+ec.Entrypoint+" "+strings.Join(ec.Args, " ")) + + defer func() { + if err != nil { + ec.Lock() + ec.Running = false + exitCode := 126 + ec.ExitCode = &exitCode + if err := ec.CloseStreams(); err != nil { + logrus.Errorf("failed to cleanup exec %s streams: %s", c.ID, err) + } + ec.Unlock() + c.ExecCommands.Delete(ec.ID) + } + }() + + if ec.OpenStdin && stdin != nil { + r, w := io.Pipe() + go func() { + defer w.Close() + defer logrus.Debug("Closing buffered stdin pipe") + pools.Copy(w, stdin) + }() + cStdin = r + } + if ec.OpenStdout { + cStdout = stdout + } + if ec.OpenStderr { + cStderr = stderr + } + + if ec.OpenStdin { + ec.StreamConfig.NewInputPipes() + } else { + ec.StreamConfig.NewNopInputPipe() + } + + p := libcontainerd.Process{ + Args: append([]string{ec.Entrypoint}, ec.Args...), + Env: ec.Env, + Terminal: ec.Tty, + } + + if err := execSetPlatformOpt(c, ec, &p); err != nil { + return err + } + + attachConfig := stream.AttachConfig{ + TTY: ec.Tty, + UseStdin: cStdin != nil, + UseStdout: cStdout != nil, + UseStderr: cStderr != nil, + Stdin: cStdin, + Stdout: cStdout, + Stderr: cStderr, + DetachKeys: ec.DetachKeys, + CloseStdin: true, + } + ec.StreamConfig.AttachStreams(&attachConfig) + attachErr := ec.StreamConfig.CopyStreams(ctx, &attachConfig) + + systemPid, err := d.containerd.AddProcess(ctx, c.ID, name, p, ec.InitializeStdio) + if err != nil { + return err + } + ec.Lock() + ec.Pid = systemPid + ec.Unlock() + + select { + case <-ctx.Done(): + logrus.Debugf("Sending TERM signal to process %v in container %v", name, c.ID) + d.containerd.SignalProcess(c.ID, name, int(signal.SignalMap["TERM"])) + select { + case <-time.After(termProcessTimeout * time.Second): + logrus.Infof("Container %v, process %v failed to exit within %d seconds of signal TERM - using the force", c.ID, name, termProcessTimeout) + d.containerd.SignalProcess(c.ID, name, int(signal.SignalMap["KILL"])) + case <-attachErr: + // TERM signal worked + } + return fmt.Errorf("context cancelled") + case err := <-attachErr: + if err != nil { + if _, ok := err.(term.EscapeError); !ok { + return fmt.Errorf("exec attach failed with error: %v", err) + } + d.LogContainerEvent(c, "exec_detach") + } + } + return nil +} + +// execCommandGC runs a ticker to clean up the daemon references +// of exec configs that are no longer part of the container. +func (d *Daemon) execCommandGC() { + for range time.Tick(5 * time.Minute) { + var ( + cleaned int + liveExecCommands = d.containerExecIds() + ) + for id, config := range d.execCommands.Commands() { + if config.CanRemove { + cleaned++ + d.execCommands.Delete(id) + } else { + if _, exists := liveExecCommands[id]; !exists { + config.CanRemove = true + } + } + } + if cleaned > 0 { + logrus.Debugf("clean %d unused exec commands", cleaned) + } + } +} + +// containerExecIds returns a list of all the current exec ids that are in use +// and running inside a container. +func (d *Daemon) containerExecIds() map[string]struct{} { + ids := map[string]struct{}{} + for _, c := range d.containers.List() { + for _, id := range c.ExecCommands.List() { + ids[id] = struct{}{} + } + } + return ids +} diff --git a/vendor/github.com/moby/moby/daemon/exec/exec.go b/vendor/github.com/moby/moby/daemon/exec/exec.go new file mode 100644 index 000000000..933136f96 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/exec/exec.go @@ -0,0 +1,118 @@ +package exec + +import ( + "runtime" + "sync" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/container/stream" + "github.com/docker/docker/libcontainerd" + "github.com/docker/docker/pkg/stringid" +) + +// Config holds the configurations for execs. The Daemon keeps +// track of both running and finished execs so that they can be +// examined both during and after completion. +type Config struct { + sync.Mutex + StreamConfig *stream.Config + ID string + Running bool + ExitCode *int + OpenStdin bool + OpenStderr bool + OpenStdout bool + CanRemove bool + ContainerID string + DetachKeys []byte + Entrypoint string + Args []string + Tty bool + Privileged bool + User string + Env []string + Pid int +} + +// NewConfig initializes the a new exec configuration +func NewConfig() *Config { + return &Config{ + ID: stringid.GenerateNonCryptoID(), + StreamConfig: stream.NewConfig(), + } +} + +// InitializeStdio is called by libcontainerd to connect the stdio. +func (c *Config) InitializeStdio(iop libcontainerd.IOPipe) error { + c.StreamConfig.CopyToPipe(iop) + + if c.StreamConfig.Stdin() == nil && !c.Tty && runtime.GOOS == "windows" { + if iop.Stdin != nil { + if err := iop.Stdin.Close(); err != nil { + logrus.Errorf("error closing exec stdin: %+v", err) + } + } + } + + return nil +} + +// CloseStreams closes the stdio streams for the exec +func (c *Config) CloseStreams() error { + return c.StreamConfig.CloseStreams() +} + +// Store keeps track of the exec configurations. +type Store struct { + commands map[string]*Config + sync.RWMutex +} + +// NewStore initializes a new exec store. +func NewStore() *Store { + return &Store{commands: make(map[string]*Config, 0)} +} + +// Commands returns the exec configurations in the store. +func (e *Store) Commands() map[string]*Config { + e.RLock() + commands := make(map[string]*Config, len(e.commands)) + for id, config := range e.commands { + commands[id] = config + } + e.RUnlock() + return commands +} + +// Add adds a new exec configuration to the store. +func (e *Store) Add(id string, Config *Config) { + e.Lock() + e.commands[id] = Config + e.Unlock() +} + +// Get returns an exec configuration by its id. +func (e *Store) Get(id string) *Config { + e.RLock() + res := e.commands[id] + e.RUnlock() + return res +} + +// Delete removes an exec configuration from the store. +func (e *Store) Delete(id string) { + e.Lock() + delete(e.commands, id) + e.Unlock() +} + +// List returns the list of exec ids in the store. +func (e *Store) List() []string { + var IDs []string + e.RLock() + for id := range e.commands { + IDs = append(IDs, id) + } + e.RUnlock() + return IDs +} diff --git a/vendor/github.com/moby/moby/daemon/exec_linux.go b/vendor/github.com/moby/moby/daemon/exec_linux.go new file mode 100644 index 000000000..bb11c11e4 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/exec_linux.go @@ -0,0 +1,50 @@ +package daemon + +import ( + "github.com/docker/docker/container" + "github.com/docker/docker/daemon/caps" + "github.com/docker/docker/daemon/exec" + "github.com/docker/docker/libcontainerd" + "github.com/opencontainers/runc/libcontainer/apparmor" + "github.com/opencontainers/runtime-spec/specs-go" +) + +func execSetPlatformOpt(c *container.Container, ec *exec.Config, p *libcontainerd.Process) error { + if len(ec.User) > 0 { + uid, gid, additionalGids, err := getUser(c, ec.User) + if err != nil { + return err + } + p.User = &specs.User{ + UID: uid, + GID: gid, + AdditionalGids: additionalGids, + } + } + if ec.Privileged { + p.Capabilities = caps.GetAllCapabilities() + } + if apparmor.IsEnabled() { + var appArmorProfile string + if c.AppArmorProfile != "" { + appArmorProfile = c.AppArmorProfile + } else if c.HostConfig.Privileged { + appArmorProfile = "unconfined" + } else { + appArmorProfile = "docker-default" + } + + if appArmorProfile == "docker-default" { + // Unattended upgrades and other fun services can unload AppArmor + // profiles inadvertently. Since we cannot store our profile in + // /etc/apparmor.d, nor can we practically add other ways of + // telling the system to keep our profile loaded, in order to make + // sure that we keep the default profile enabled we dynamically + // reload it if necessary. + if err := ensureDefaultAppArmorProfile(); err != nil { + return err + } + } + } + return nil +} diff --git a/vendor/github.com/moby/moby/daemon/exec_solaris.go b/vendor/github.com/moby/moby/daemon/exec_solaris.go new file mode 100644 index 000000000..7003355d9 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/exec_solaris.go @@ -0,0 +1,11 @@ +package daemon + +import ( + "github.com/docker/docker/container" + "github.com/docker/docker/daemon/exec" + "github.com/docker/docker/libcontainerd" +) + +func execSetPlatformOpt(c *container.Container, ec *exec.Config, p *libcontainerd.Process) error { + return nil +} diff --git a/vendor/github.com/moby/moby/daemon/exec_windows.go b/vendor/github.com/moby/moby/daemon/exec_windows.go new file mode 100644 index 000000000..b7b45149c --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/exec_windows.go @@ -0,0 +1,16 @@ +package daemon + +import ( + "github.com/docker/docker/container" + "github.com/docker/docker/daemon/exec" + "github.com/docker/docker/libcontainerd" +) + +func execSetPlatformOpt(c *container.Container, ec *exec.Config, p *libcontainerd.Process) error { + // Process arguments need to be escaped before sending to OCI. + if c.Platform == "windows" { + p.Args = escapeArgs(p.Args) + p.User.Username = ec.User + } + return nil +} diff --git a/vendor/github.com/moby/moby/daemon/export.go b/vendor/github.com/moby/moby/daemon/export.go new file mode 100644 index 000000000..402e67583 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/export.go @@ -0,0 +1,59 @@ +package daemon + +import ( + "fmt" + "io" + "runtime" + + "github.com/docker/docker/container" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/ioutils" +) + +// ContainerExport writes the contents of the container to the given +// writer. An error is returned if the container cannot be found. +func (daemon *Daemon) ContainerExport(name string, out io.Writer) error { + if runtime.GOOS == "windows" { + return fmt.Errorf("the daemon on this platform does not support export of a container") + } + + container, err := daemon.GetContainer(name) + if err != nil { + return err + } + + data, err := daemon.containerExport(container) + if err != nil { + return fmt.Errorf("Error exporting container %s: %v", name, err) + } + defer data.Close() + + // Stream the entire contents of the container (basically a volatile snapshot) + if _, err := io.Copy(out, data); err != nil { + return fmt.Errorf("Error exporting container %s: %v", name, err) + } + return nil +} + +func (daemon *Daemon) containerExport(container *container.Container) (io.ReadCloser, error) { + if err := daemon.Mount(container); err != nil { + return nil, err + } + + archive, err := archive.TarWithOptions(container.BaseFS, &archive.TarOptions{ + Compression: archive.Uncompressed, + UIDMaps: daemon.idMappings.UIDs(), + GIDMaps: daemon.idMappings.GIDs(), + }) + if err != nil { + daemon.Unmount(container) + return nil, err + } + arch := ioutils.NewReadCloserWrapper(archive, func() error { + err := archive.Close() + daemon.Unmount(container) + return err + }) + daemon.LogContainerEvent(container, "export") + return arch, err +} diff --git a/vendor/github.com/moby/moby/daemon/getsize_unix.go b/vendor/github.com/moby/moby/daemon/getsize_unix.go new file mode 100644 index 000000000..434fa4388 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/getsize_unix.go @@ -0,0 +1,43 @@ +// +build linux freebsd solaris + +package daemon + +import ( + "runtime" + + "github.com/Sirupsen/logrus" +) + +// getSize returns the real size & virtual size of the container. +func (daemon *Daemon) getSize(containerID string) (int64, int64) { + var ( + sizeRw, sizeRootfs int64 + err error + ) + + rwlayer, err := daemon.stores[runtime.GOOS].layerStore.GetRWLayer(containerID) + if err != nil { + logrus.Errorf("Failed to compute size of container rootfs %v: %v", containerID, err) + return sizeRw, sizeRootfs + } + defer daemon.stores[runtime.GOOS].layerStore.ReleaseRWLayer(rwlayer) + + sizeRw, err = rwlayer.Size() + if err != nil { + logrus.Errorf("Driver %s couldn't return diff size of container %s: %s", + daemon.GraphDriverName(runtime.GOOS), containerID, err) + // FIXME: GetSize should return an error. Not changing it now in case + // there is a side-effect. + sizeRw = -1 + } + + if parent := rwlayer.Parent(); parent != nil { + sizeRootfs, err = parent.Size() + if err != nil { + sizeRootfs = -1 + } else if sizeRw != -1 { + sizeRootfs += sizeRw + } + } + return sizeRw, sizeRootfs +} diff --git a/vendor/github.com/moby/moby/daemon/graphdriver/aufs/aufs.go b/vendor/github.com/moby/moby/daemon/graphdriver/aufs/aufs.go new file mode 100644 index 000000000..c099b88ae --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/graphdriver/aufs/aufs.go @@ -0,0 +1,649 @@ +// +build linux + +/* + +aufs driver directory structure + + . + ├── layers // Metadata of layers + │ ├── 1 + │ ├── 2 + │ └── 3 + ├── diff // Content of the layer + │ ├── 1 // Contains layers that need to be mounted for the id + │ ├── 2 + │ └── 3 + └── mnt // Mount points for the rw layers to be mounted + ├── 1 + ├── 2 + └── 3 + +*/ + +package aufs + +import ( + "bufio" + "fmt" + "io" + "io/ioutil" + "os" + "os/exec" + "path" + "path/filepath" + "strings" + "sync" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/chrootarchive" + "github.com/docker/docker/pkg/directory" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/locker" + mountpk "github.com/docker/docker/pkg/mount" + "github.com/docker/docker/pkg/system" + rsystem "github.com/opencontainers/runc/libcontainer/system" + "github.com/opencontainers/selinux/go-selinux/label" + "github.com/pkg/errors" + "github.com/vbatts/tar-split/tar/storage" + "golang.org/x/sys/unix" +) + +var ( + // ErrAufsNotSupported is returned if aufs is not supported by the host. + ErrAufsNotSupported = fmt.Errorf("AUFS was not found in /proc/filesystems") + // ErrAufsNested means aufs cannot be used bc we are in a user namespace + ErrAufsNested = fmt.Errorf("AUFS cannot be used in non-init user namespace") + backingFs = "" + + enableDirpermLock sync.Once + enableDirperm bool +) + +func init() { + graphdriver.Register("aufs", Init) +} + +// Driver contains information about the filesystem mounted. +type Driver struct { + sync.Mutex + root string + uidMaps []idtools.IDMap + gidMaps []idtools.IDMap + ctr *graphdriver.RefCounter + pathCacheLock sync.Mutex + pathCache map[string]string + naiveDiff graphdriver.DiffDriver + locker *locker.Locker +} + +// Init returns a new AUFS driver. +// An error is returned if AUFS is not supported. +func Init(root string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) { + + // Try to load the aufs kernel module + if err := supportsAufs(); err != nil { + return nil, graphdriver.ErrNotSupported + } + + fsMagic, err := graphdriver.GetFSMagic(root) + if err != nil { + return nil, err + } + if fsName, ok := graphdriver.FsNames[fsMagic]; ok { + backingFs = fsName + } + + switch fsMagic { + case graphdriver.FsMagicAufs, graphdriver.FsMagicBtrfs, graphdriver.FsMagicEcryptfs: + logrus.Errorf("AUFS is not supported over %s", backingFs) + return nil, graphdriver.ErrIncompatibleFS + } + + paths := []string{ + "mnt", + "diff", + "layers", + } + + a := &Driver{ + root: root, + uidMaps: uidMaps, + gidMaps: gidMaps, + pathCache: make(map[string]string), + ctr: graphdriver.NewRefCounter(graphdriver.NewFsChecker(graphdriver.FsMagicAufs)), + locker: locker.New(), + } + + rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps) + if err != nil { + return nil, err + } + // Create the root aufs driver dir and return + // if it already exists + // If not populate the dir structure + if err := idtools.MkdirAllAs(root, 0700, rootUID, rootGID); err != nil { + if os.IsExist(err) { + return a, nil + } + return nil, err + } + + if err := mountpk.MakePrivate(root); err != nil { + return nil, err + } + + // Populate the dir structure + for _, p := range paths { + if err := idtools.MkdirAllAs(path.Join(root, p), 0700, rootUID, rootGID); err != nil { + return nil, err + } + } + + a.naiveDiff = graphdriver.NewNaiveDiffDriver(a, uidMaps, gidMaps) + return a, nil +} + +// Return a nil error if the kernel supports aufs +// We cannot modprobe because inside dind modprobe fails +// to run +func supportsAufs() error { + // We can try to modprobe aufs first before looking at + // proc/filesystems for when aufs is supported + exec.Command("modprobe", "aufs").Run() + + if rsystem.RunningInUserNS() { + return ErrAufsNested + } + + f, err := os.Open("/proc/filesystems") + if err != nil { + return err + } + defer f.Close() + + s := bufio.NewScanner(f) + for s.Scan() { + if strings.Contains(s.Text(), "aufs") { + return nil + } + } + return ErrAufsNotSupported +} + +func (a *Driver) rootPath() string { + return a.root +} + +func (*Driver) String() string { + return "aufs" +} + +// Status returns current information about the filesystem such as root directory, number of directories mounted, etc. +func (a *Driver) Status() [][2]string { + ids, _ := loadIds(path.Join(a.rootPath(), "layers")) + return [][2]string{ + {"Root Dir", a.rootPath()}, + {"Backing Filesystem", backingFs}, + {"Dirs", fmt.Sprintf("%d", len(ids))}, + {"Dirperm1 Supported", fmt.Sprintf("%v", useDirperm())}, + } +} + +// GetMetadata not implemented +func (a *Driver) GetMetadata(id string) (map[string]string, error) { + return nil, nil +} + +// Exists returns true if the given id is registered with +// this driver +func (a *Driver) Exists(id string) bool { + if _, err := os.Lstat(path.Join(a.rootPath(), "layers", id)); err != nil { + return false + } + return true +} + +// CreateReadWrite creates a layer that is writable for use as a container +// file system. +func (a *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error { + return a.Create(id, parent, opts) +} + +// Create three folders for each id +// mnt, layers, and diff +func (a *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error { + + if opts != nil && len(opts.StorageOpt) != 0 { + return fmt.Errorf("--storage-opt is not supported for aufs") + } + + if err := a.createDirsFor(id); err != nil { + return err + } + // Write the layers metadata + f, err := os.Create(path.Join(a.rootPath(), "layers", id)) + if err != nil { + return err + } + defer f.Close() + + if parent != "" { + ids, err := getParentIDs(a.rootPath(), parent) + if err != nil { + return err + } + + if _, err := fmt.Fprintln(f, parent); err != nil { + return err + } + for _, i := range ids { + if _, err := fmt.Fprintln(f, i); err != nil { + return err + } + } + } + + return nil +} + +// createDirsFor creates two directories for the given id. +// mnt and diff +func (a *Driver) createDirsFor(id string) error { + paths := []string{ + "mnt", + "diff", + } + + rootUID, rootGID, err := idtools.GetRootUIDGID(a.uidMaps, a.gidMaps) + if err != nil { + return err + } + // Directory permission is 0755. + // The path of directories are /mnt/ + // and /diff/ + for _, p := range paths { + if err := idtools.MkdirAllAs(path.Join(a.rootPath(), p, id), 0755, rootUID, rootGID); err != nil { + return err + } + } + return nil +} + +// Remove will unmount and remove the given id. +func (a *Driver) Remove(id string) error { + a.locker.Lock(id) + defer a.locker.Unlock(id) + a.pathCacheLock.Lock() + mountpoint, exists := a.pathCache[id] + a.pathCacheLock.Unlock() + if !exists { + mountpoint = a.getMountpoint(id) + } + + logger := logrus.WithFields(logrus.Fields{ + "module": "graphdriver", + "driver": "aufs", + "layer": id, + }) + + var retries int + for { + mounted, err := a.mounted(mountpoint) + if err != nil { + if os.IsNotExist(err) { + break + } + return err + } + if !mounted { + break + } + + err = a.unmount(mountpoint) + if err == nil { + break + } + + if err != unix.EBUSY { + return errors.Wrapf(err, "aufs: unmount error: %s", mountpoint) + } + if retries >= 5 { + return errors.Wrapf(err, "aufs: unmount error after retries: %s", mountpoint) + } + // If unmount returns EBUSY, it could be a transient error. Sleep and retry. + retries++ + logger.Warnf("unmount failed due to EBUSY: retry count: %d", retries) + time.Sleep(100 * time.Millisecond) + continue + } + + // Atomically remove each directory in turn by first moving it out of the + // way (so that docker doesn't find it anymore) before doing removal of + // the whole tree. + tmpMntPath := path.Join(a.mntPath(), fmt.Sprintf("%s-removing", id)) + if err := os.Rename(mountpoint, tmpMntPath); err != nil && !os.IsNotExist(err) { + if err == unix.EBUSY { + logger.WithField("dir", mountpoint).WithError(err).Warn("os.Rename err due to EBUSY") + } + return errors.Wrapf(err, "error preparing atomic delete of aufs mountpoint for id: %s", id) + } + if err := system.EnsureRemoveAll(tmpMntPath); err != nil { + return errors.Wrapf(err, "error removing aufs layer %s", id) + } + + tmpDiffpath := path.Join(a.diffPath(), fmt.Sprintf("%s-removing", id)) + if err := os.Rename(a.getDiffPath(id), tmpDiffpath); err != nil && !os.IsNotExist(err) { + return errors.Wrapf(err, "error preparing atomic delete of aufs diff dir for id: %s", id) + } + + // Remove the layers file for the id + if err := os.Remove(path.Join(a.rootPath(), "layers", id)); err != nil && !os.IsNotExist(err) { + return errors.Wrapf(err, "error removing layers dir for %s", id) + } + + a.pathCacheLock.Lock() + delete(a.pathCache, id) + a.pathCacheLock.Unlock() + return nil +} + +// Get returns the rootfs path for the id. +// This will mount the dir at its given path +func (a *Driver) Get(id, mountLabel string) (string, error) { + a.locker.Lock(id) + defer a.locker.Unlock(id) + parents, err := a.getParentLayerPaths(id) + if err != nil && !os.IsNotExist(err) { + return "", err + } + + a.pathCacheLock.Lock() + m, exists := a.pathCache[id] + a.pathCacheLock.Unlock() + + if !exists { + m = a.getDiffPath(id) + if len(parents) > 0 { + m = a.getMountpoint(id) + } + } + if count := a.ctr.Increment(m); count > 1 { + return m, nil + } + + // If a dir does not have a parent ( no layers )do not try to mount + // just return the diff path to the data + if len(parents) > 0 { + if err := a.mount(id, m, mountLabel, parents); err != nil { + return "", err + } + } + + a.pathCacheLock.Lock() + a.pathCache[id] = m + a.pathCacheLock.Unlock() + return m, nil +} + +// Put unmounts and updates list of active mounts. +func (a *Driver) Put(id string) error { + a.locker.Lock(id) + defer a.locker.Unlock(id) + a.pathCacheLock.Lock() + m, exists := a.pathCache[id] + if !exists { + m = a.getMountpoint(id) + a.pathCache[id] = m + } + a.pathCacheLock.Unlock() + if count := a.ctr.Decrement(m); count > 0 { + return nil + } + + err := a.unmount(m) + if err != nil { + logrus.Debugf("Failed to unmount %s aufs: %v", id, err) + } + return err +} + +// isParent returns if the passed in parent is the direct parent of the passed in layer +func (a *Driver) isParent(id, parent string) bool { + parents, _ := getParentIDs(a.rootPath(), id) + if parent == "" && len(parents) > 0 { + return false + } + return !(len(parents) > 0 && parent != parents[0]) +} + +// Diff produces an archive of the changes between the specified +// layer and its parent layer which may be "". +func (a *Driver) Diff(id, parent string) (io.ReadCloser, error) { + if !a.isParent(id, parent) { + return a.naiveDiff.Diff(id, parent) + } + + // AUFS doesn't need the parent layer to produce a diff. + return archive.TarWithOptions(path.Join(a.rootPath(), "diff", id), &archive.TarOptions{ + Compression: archive.Uncompressed, + ExcludePatterns: []string{archive.WhiteoutMetaPrefix + "*", "!" + archive.WhiteoutOpaqueDir}, + UIDMaps: a.uidMaps, + GIDMaps: a.gidMaps, + }) +} + +type fileGetNilCloser struct { + storage.FileGetter +} + +func (f fileGetNilCloser) Close() error { + return nil +} + +// DiffGetter returns a FileGetCloser that can read files from the directory that +// contains files for the layer differences. Used for direct access for tar-split. +func (a *Driver) DiffGetter(id string) (graphdriver.FileGetCloser, error) { + p := path.Join(a.rootPath(), "diff", id) + return fileGetNilCloser{storage.NewPathFileGetter(p)}, nil +} + +func (a *Driver) applyDiff(id string, diff io.Reader) error { + return chrootarchive.UntarUncompressed(diff, path.Join(a.rootPath(), "diff", id), &archive.TarOptions{ + UIDMaps: a.uidMaps, + GIDMaps: a.gidMaps, + }) +} + +// DiffSize calculates the changes between the specified id +// and its parent and returns the size in bytes of the changes +// relative to its base filesystem directory. +func (a *Driver) DiffSize(id, parent string) (size int64, err error) { + if !a.isParent(id, parent) { + return a.naiveDiff.DiffSize(id, parent) + } + // AUFS doesn't need the parent layer to calculate the diff size. + return directory.Size(path.Join(a.rootPath(), "diff", id)) +} + +// ApplyDiff extracts the changeset from the given diff into the +// layer with the specified id and parent, returning the size of the +// new layer in bytes. +func (a *Driver) ApplyDiff(id, parent string, diff io.Reader) (size int64, err error) { + if !a.isParent(id, parent) { + return a.naiveDiff.ApplyDiff(id, parent, diff) + } + + // AUFS doesn't need the parent id to apply the diff if it is the direct parent. + if err = a.applyDiff(id, diff); err != nil { + return + } + + return a.DiffSize(id, parent) +} + +// Changes produces a list of changes between the specified layer +// and its parent layer. If parent is "", then all changes will be ADD changes. +func (a *Driver) Changes(id, parent string) ([]archive.Change, error) { + if !a.isParent(id, parent) { + return a.naiveDiff.Changes(id, parent) + } + + // AUFS doesn't have snapshots, so we need to get changes from all parent + // layers. + layers, err := a.getParentLayerPaths(id) + if err != nil { + return nil, err + } + return archive.Changes(layers, path.Join(a.rootPath(), "diff", id)) +} + +func (a *Driver) getParentLayerPaths(id string) ([]string, error) { + parentIds, err := getParentIDs(a.rootPath(), id) + if err != nil { + return nil, err + } + layers := make([]string, len(parentIds)) + + // Get the diff paths for all the parent ids + for i, p := range parentIds { + layers[i] = path.Join(a.rootPath(), "diff", p) + } + return layers, nil +} + +func (a *Driver) mount(id string, target string, mountLabel string, layers []string) error { + a.Lock() + defer a.Unlock() + + // If the id is mounted or we get an error return + if mounted, err := a.mounted(target); err != nil || mounted { + return err + } + + rw := a.getDiffPath(id) + + if err := a.aufsMount(layers, rw, target, mountLabel); err != nil { + return fmt.Errorf("error creating aufs mount to %s: %v", target, err) + } + return nil +} + +func (a *Driver) unmount(mountPath string) error { + a.Lock() + defer a.Unlock() + + if mounted, err := a.mounted(mountPath); err != nil || !mounted { + return err + } + if err := Unmount(mountPath); err != nil { + return err + } + return nil +} + +func (a *Driver) mounted(mountpoint string) (bool, error) { + return graphdriver.Mounted(graphdriver.FsMagicAufs, mountpoint) +} + +// Cleanup aufs and unmount all mountpoints +func (a *Driver) Cleanup() error { + var dirs []string + if err := filepath.Walk(a.mntPath(), func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + if !info.IsDir() { + return nil + } + dirs = append(dirs, path) + return nil + }); err != nil { + return err + } + + for _, m := range dirs { + if err := a.unmount(m); err != nil { + logrus.Debugf("aufs error unmounting %s: %s", m, err) + } + } + return mountpk.Unmount(a.root) +} + +func (a *Driver) aufsMount(ro []string, rw, target, mountLabel string) (err error) { + defer func() { + if err != nil { + Unmount(target) + } + }() + + // Mount options are clipped to page size(4096 bytes). If there are more + // layers then these are remounted individually using append. + + offset := 54 + if useDirperm() { + offset += len(",dirperm1") + } + b := make([]byte, unix.Getpagesize()-len(mountLabel)-offset) // room for xino & mountLabel + bp := copy(b, fmt.Sprintf("br:%s=rw", rw)) + + index := 0 + for ; index < len(ro); index++ { + layer := fmt.Sprintf(":%s=ro+wh", ro[index]) + if bp+len(layer) > len(b) { + break + } + bp += copy(b[bp:], layer) + } + + opts := "dio,xino=/dev/shm/aufs.xino" + if useDirperm() { + opts += ",dirperm1" + } + data := label.FormatMountLabel(fmt.Sprintf("%s,%s", string(b[:bp]), opts), mountLabel) + if err = mount("none", target, "aufs", 0, data); err != nil { + return + } + + for ; index < len(ro); index++ { + layer := fmt.Sprintf(":%s=ro+wh", ro[index]) + data := label.FormatMountLabel(fmt.Sprintf("append%s", layer), mountLabel) + if err = mount("none", target, "aufs", unix.MS_REMOUNT, data); err != nil { + return + } + } + + return +} + +// useDirperm checks dirperm1 mount option can be used with the current +// version of aufs. +func useDirperm() bool { + enableDirpermLock.Do(func() { + base, err := ioutil.TempDir("", "docker-aufs-base") + if err != nil { + logrus.Errorf("error checking dirperm1: %v", err) + return + } + defer os.RemoveAll(base) + + union, err := ioutil.TempDir("", "docker-aufs-union") + if err != nil { + logrus.Errorf("error checking dirperm1: %v", err) + return + } + defer os.RemoveAll(union) + + opts := fmt.Sprintf("br:%s,dirperm1,xino=/dev/shm/aufs.xino", base) + if err := mount("none", union, "aufs", 0, opts); err != nil { + return + } + enableDirperm = true + if err := Unmount(union); err != nil { + logrus.Errorf("error checking dirperm1: failed to unmount %v", err) + } + }) + return enableDirperm +} diff --git a/vendor/github.com/moby/moby/daemon/graphdriver/aufs/aufs_test.go b/vendor/github.com/moby/moby/daemon/graphdriver/aufs/aufs_test.go new file mode 100644 index 000000000..baf0fd89f --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/graphdriver/aufs/aufs_test.go @@ -0,0 +1,802 @@ +// +build linux + +package aufs + +import ( + "crypto/sha256" + "encoding/hex" + "fmt" + "io/ioutil" + "os" + "path" + "sync" + "testing" + + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/reexec" + "github.com/docker/docker/pkg/stringid" +) + +var ( + tmpOuter = path.Join(os.TempDir(), "aufs-tests") + tmp = path.Join(tmpOuter, "aufs") +) + +func init() { + reexec.Init() +} + +func testInit(dir string, t testing.TB) graphdriver.Driver { + d, err := Init(dir, nil, nil, nil) + if err != nil { + if err == graphdriver.ErrNotSupported { + t.Skip(err) + } else { + t.Fatal(err) + } + } + return d +} + +func newDriver(t testing.TB) *Driver { + if err := os.MkdirAll(tmp, 0755); err != nil { + t.Fatal(err) + } + + d := testInit(tmp, t) + return d.(*Driver) +} + +func TestNewDriver(t *testing.T) { + if err := os.MkdirAll(tmp, 0755); err != nil { + t.Fatal(err) + } + + d := testInit(tmp, t) + defer os.RemoveAll(tmp) + if d == nil { + t.Fatal("Driver should not be nil") + } +} + +func TestAufsString(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + + if d.String() != "aufs" { + t.Fatalf("Expected aufs got %s", d.String()) + } +} + +func TestCreateDirStructure(t *testing.T) { + newDriver(t) + defer os.RemoveAll(tmp) + + paths := []string{ + "mnt", + "layers", + "diff", + } + + for _, p := range paths { + if _, err := os.Stat(path.Join(tmp, p)); err != nil { + t.Fatal(err) + } + } +} + +// We should be able to create two drivers with the same dir structure +func TestNewDriverFromExistingDir(t *testing.T) { + if err := os.MkdirAll(tmp, 0755); err != nil { + t.Fatal(err) + } + + testInit(tmp, t) + testInit(tmp, t) + os.RemoveAll(tmp) +} + +func TestCreateNewDir(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + + if err := d.Create("1", "", nil); err != nil { + t.Fatal(err) + } +} + +func TestCreateNewDirStructure(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + + if err := d.Create("1", "", nil); err != nil { + t.Fatal(err) + } + + paths := []string{ + "mnt", + "diff", + "layers", + } + + for _, p := range paths { + if _, err := os.Stat(path.Join(tmp, p, "1")); err != nil { + t.Fatal(err) + } + } +} + +func TestRemoveImage(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + + if err := d.Create("1", "", nil); err != nil { + t.Fatal(err) + } + + if err := d.Remove("1"); err != nil { + t.Fatal(err) + } + + paths := []string{ + "mnt", + "diff", + "layers", + } + + for _, p := range paths { + if _, err := os.Stat(path.Join(tmp, p, "1")); err == nil { + t.Fatalf("Error should not be nil because dirs with id 1 should be delted: %s", p) + } + } +} + +func TestGetWithoutParent(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + + if err := d.Create("1", "", nil); err != nil { + t.Fatal(err) + } + + diffPath, err := d.Get("1", "") + if err != nil { + t.Fatal(err) + } + expected := path.Join(tmp, "diff", "1") + if diffPath != expected { + t.Fatalf("Expected path %s got %s", expected, diffPath) + } +} + +func TestCleanupWithNoDirs(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + + if err := d.Cleanup(); err != nil { + t.Fatal(err) + } +} + +func TestCleanupWithDir(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + + if err := d.Create("1", "", nil); err != nil { + t.Fatal(err) + } + + if err := d.Cleanup(); err != nil { + t.Fatal(err) + } +} + +func TestMountedFalseResponse(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + + if err := d.Create("1", "", nil); err != nil { + t.Fatal(err) + } + + response, err := d.mounted(d.getDiffPath("1")) + if err != nil { + t.Fatal(err) + } + + if response != false { + t.Fatal("Response if dir id 1 is mounted should be false") + } +} + +func TestMountedTrueResponse(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + defer d.Cleanup() + + if err := d.Create("1", "", nil); err != nil { + t.Fatal(err) + } + if err := d.Create("2", "1", nil); err != nil { + t.Fatal(err) + } + + _, err := d.Get("2", "") + if err != nil { + t.Fatal(err) + } + + response, err := d.mounted(d.pathCache["2"]) + if err != nil { + t.Fatal(err) + } + + if response != true { + t.Fatal("Response if dir id 2 is mounted should be true") + } +} + +func TestMountWithParent(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + + if err := d.Create("1", "", nil); err != nil { + t.Fatal(err) + } + if err := d.Create("2", "1", nil); err != nil { + t.Fatal(err) + } + + defer func() { + if err := d.Cleanup(); err != nil { + t.Fatal(err) + } + }() + + mntPath, err := d.Get("2", "") + if err != nil { + t.Fatal(err) + } + if mntPath == "" { + t.Fatal("mntPath should not be empty string") + } + + expected := path.Join(tmp, "mnt", "2") + if mntPath != expected { + t.Fatalf("Expected %s got %s", expected, mntPath) + } +} + +func TestRemoveMountedDir(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + + if err := d.Create("1", "", nil); err != nil { + t.Fatal(err) + } + if err := d.Create("2", "1", nil); err != nil { + t.Fatal(err) + } + + defer func() { + if err := d.Cleanup(); err != nil { + t.Fatal(err) + } + }() + + mntPath, err := d.Get("2", "") + if err != nil { + t.Fatal(err) + } + if mntPath == "" { + t.Fatal("mntPath should not be empty string") + } + + mounted, err := d.mounted(d.pathCache["2"]) + if err != nil { + t.Fatal(err) + } + + if !mounted { + t.Fatal("Dir id 2 should be mounted") + } + + if err := d.Remove("2"); err != nil { + t.Fatal(err) + } +} + +func TestCreateWithInvalidParent(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + + if err := d.Create("1", "docker", nil); err == nil { + t.Fatal("Error should not be nil with parent does not exist") + } +} + +func TestGetDiff(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + + if err := d.CreateReadWrite("1", "", nil); err != nil { + t.Fatal(err) + } + + diffPath, err := d.Get("1", "") + if err != nil { + t.Fatal(err) + } + + // Add a file to the diff path with a fixed size + size := int64(1024) + + f, err := os.Create(path.Join(diffPath, "test_file")) + if err != nil { + t.Fatal(err) + } + if err := f.Truncate(size); err != nil { + t.Fatal(err) + } + f.Close() + + a, err := d.Diff("1", "") + if err != nil { + t.Fatal(err) + } + if a == nil { + t.Fatal("Archive should not be nil") + } +} + +func TestChanges(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + + if err := d.Create("1", "", nil); err != nil { + t.Fatal(err) + } + + if err := d.CreateReadWrite("2", "1", nil); err != nil { + t.Fatal(err) + } + + defer func() { + if err := d.Cleanup(); err != nil { + t.Fatal(err) + } + }() + + mntPoint, err := d.Get("2", "") + if err != nil { + t.Fatal(err) + } + + // Create a file to save in the mountpoint + f, err := os.Create(path.Join(mntPoint, "test.txt")) + if err != nil { + t.Fatal(err) + } + + if _, err := f.WriteString("testline"); err != nil { + t.Fatal(err) + } + if err := f.Close(); err != nil { + t.Fatal(err) + } + + changes, err := d.Changes("2", "") + if err != nil { + t.Fatal(err) + } + if len(changes) != 1 { + t.Fatalf("Dir 2 should have one change from parent got %d", len(changes)) + } + change := changes[0] + + expectedPath := "/test.txt" + if change.Path != expectedPath { + t.Fatalf("Expected path %s got %s", expectedPath, change.Path) + } + + if change.Kind != archive.ChangeAdd { + t.Fatalf("Change kind should be ChangeAdd got %s", change.Kind) + } + + if err := d.CreateReadWrite("3", "2", nil); err != nil { + t.Fatal(err) + } + mntPoint, err = d.Get("3", "") + if err != nil { + t.Fatal(err) + } + + // Create a file to save in the mountpoint + f, err = os.Create(path.Join(mntPoint, "test2.txt")) + if err != nil { + t.Fatal(err) + } + + if _, err := f.WriteString("testline"); err != nil { + t.Fatal(err) + } + if err := f.Close(); err != nil { + t.Fatal(err) + } + + changes, err = d.Changes("3", "2") + if err != nil { + t.Fatal(err) + } + + if len(changes) != 1 { + t.Fatalf("Dir 2 should have one change from parent got %d", len(changes)) + } + change = changes[0] + + expectedPath = "/test2.txt" + if change.Path != expectedPath { + t.Fatalf("Expected path %s got %s", expectedPath, change.Path) + } + + if change.Kind != archive.ChangeAdd { + t.Fatalf("Change kind should be ChangeAdd got %s", change.Kind) + } +} + +func TestDiffSize(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + + if err := d.CreateReadWrite("1", "", nil); err != nil { + t.Fatal(err) + } + + diffPath, err := d.Get("1", "") + if err != nil { + t.Fatal(err) + } + + // Add a file to the diff path with a fixed size + size := int64(1024) + + f, err := os.Create(path.Join(diffPath, "test_file")) + if err != nil { + t.Fatal(err) + } + if err := f.Truncate(size); err != nil { + t.Fatal(err) + } + s, err := f.Stat() + if err != nil { + t.Fatal(err) + } + size = s.Size() + if err := f.Close(); err != nil { + t.Fatal(err) + } + + diffSize, err := d.DiffSize("1", "") + if err != nil { + t.Fatal(err) + } + if diffSize != size { + t.Fatalf("Expected size to be %d got %d", size, diffSize) + } +} + +func TestChildDiffSize(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + defer d.Cleanup() + + if err := d.CreateReadWrite("1", "", nil); err != nil { + t.Fatal(err) + } + + diffPath, err := d.Get("1", "") + if err != nil { + t.Fatal(err) + } + + // Add a file to the diff path with a fixed size + size := int64(1024) + + f, err := os.Create(path.Join(diffPath, "test_file")) + if err != nil { + t.Fatal(err) + } + if err := f.Truncate(size); err != nil { + t.Fatal(err) + } + s, err := f.Stat() + if err != nil { + t.Fatal(err) + } + size = s.Size() + if err := f.Close(); err != nil { + t.Fatal(err) + } + + diffSize, err := d.DiffSize("1", "") + if err != nil { + t.Fatal(err) + } + if diffSize != size { + t.Fatalf("Expected size to be %d got %d", size, diffSize) + } + + if err := d.Create("2", "1", nil); err != nil { + t.Fatal(err) + } + + diffSize, err = d.DiffSize("2", "1") + if err != nil { + t.Fatal(err) + } + // The diff size for the child should be zero + if diffSize != 0 { + t.Fatalf("Expected size to be %d got %d", 0, diffSize) + } +} + +func TestExists(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + defer d.Cleanup() + + if err := d.Create("1", "", nil); err != nil { + t.Fatal(err) + } + + if d.Exists("none") { + t.Fatal("id none should not exist in the driver") + } + + if !d.Exists("1") { + t.Fatal("id 1 should exist in the driver") + } +} + +func TestStatus(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + defer d.Cleanup() + + if err := d.Create("1", "", nil); err != nil { + t.Fatal(err) + } + + status := d.Status() + if status == nil || len(status) == 0 { + t.Fatal("Status should not be nil or empty") + } + rootDir := status[0] + dirs := status[2] + if rootDir[0] != "Root Dir" { + t.Fatalf("Expected Root Dir got %s", rootDir[0]) + } + if rootDir[1] != d.rootPath() { + t.Fatalf("Expected %s got %s", d.rootPath(), rootDir[1]) + } + if dirs[0] != "Dirs" { + t.Fatalf("Expected Dirs got %s", dirs[0]) + } + if dirs[1] != "1" { + t.Fatalf("Expected 1 got %s", dirs[1]) + } +} + +func TestApplyDiff(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + defer d.Cleanup() + + if err := d.CreateReadWrite("1", "", nil); err != nil { + t.Fatal(err) + } + + diffPath, err := d.Get("1", "") + if err != nil { + t.Fatal(err) + } + + // Add a file to the diff path with a fixed size + size := int64(1024) + + f, err := os.Create(path.Join(diffPath, "test_file")) + if err != nil { + t.Fatal(err) + } + if err := f.Truncate(size); err != nil { + t.Fatal(err) + } + f.Close() + + diff, err := d.Diff("1", "") + if err != nil { + t.Fatal(err) + } + + if err := d.Create("2", "", nil); err != nil { + t.Fatal(err) + } + if err := d.Create("3", "2", nil); err != nil { + t.Fatal(err) + } + + if err := d.applyDiff("3", diff); err != nil { + t.Fatal(err) + } + + // Ensure that the file is in the mount point for id 3 + + mountPoint, err := d.Get("3", "") + if err != nil { + t.Fatal(err) + } + if _, err := os.Stat(path.Join(mountPoint, "test_file")); err != nil { + t.Fatal(err) + } +} + +func hash(c string) string { + h := sha256.New() + fmt.Fprint(h, c) + return hex.EncodeToString(h.Sum(nil)) +} + +func testMountMoreThan42Layers(t *testing.T, mountPath string) { + if err := os.MkdirAll(mountPath, 0755); err != nil { + t.Fatal(err) + } + + defer os.RemoveAll(mountPath) + d := testInit(mountPath, t).(*Driver) + defer d.Cleanup() + var last string + var expected int + + for i := 1; i < 127; i++ { + expected++ + var ( + parent = fmt.Sprintf("%d", i-1) + current = fmt.Sprintf("%d", i) + ) + + if parent == "0" { + parent = "" + } else { + parent = hash(parent) + } + current = hash(current) + + if err := d.CreateReadWrite(current, parent, nil); err != nil { + t.Logf("Current layer %d", i) + t.Error(err) + } + point, err := d.Get(current, "") + if err != nil { + t.Logf("Current layer %d", i) + t.Error(err) + } + f, err := os.Create(path.Join(point, current)) + if err != nil { + t.Logf("Current layer %d", i) + t.Error(err) + } + f.Close() + + if i%10 == 0 { + if err := os.Remove(path.Join(point, parent)); err != nil { + t.Logf("Current layer %d", i) + t.Error(err) + } + expected-- + } + last = current + } + + // Perform the actual mount for the top most image + point, err := d.Get(last, "") + if err != nil { + t.Error(err) + } + files, err := ioutil.ReadDir(point) + if err != nil { + t.Error(err) + } + if len(files) != expected { + t.Errorf("Expected %d got %d", expected, len(files)) + } +} + +func TestMountMoreThan42Layers(t *testing.T) { + os.RemoveAll(tmpOuter) + testMountMoreThan42Layers(t, tmp) +} + +func TestMountMoreThan42LayersMatchingPathLength(t *testing.T) { + defer os.RemoveAll(tmpOuter) + zeroes := "0" + for { + // This finds a mount path so that when combined into aufs mount options + // 4096 byte boundary would be in between the paths or in permission + // section. For '/tmp' it will use '/tmp/aufs-tests/00000000/aufs' + mountPath := path.Join(tmpOuter, zeroes, "aufs") + pathLength := 77 + len(mountPath) + + if mod := 4095 % pathLength; mod == 0 || mod > pathLength-2 { + t.Logf("Using path: %s", mountPath) + testMountMoreThan42Layers(t, mountPath) + return + } + zeroes += "0" + } +} + +func BenchmarkConcurrentAccess(b *testing.B) { + b.StopTimer() + b.ResetTimer() + + d := newDriver(b) + defer os.RemoveAll(tmp) + defer d.Cleanup() + + numConcurrent := 256 + // create a bunch of ids + var ids []string + for i := 0; i < numConcurrent; i++ { + ids = append(ids, stringid.GenerateNonCryptoID()) + } + + if err := d.Create(ids[0], "", nil); err != nil { + b.Fatal(err) + } + + if err := d.Create(ids[1], ids[0], nil); err != nil { + b.Fatal(err) + } + + parent := ids[1] + ids = append(ids[2:]) + + chErr := make(chan error, numConcurrent) + var outerGroup sync.WaitGroup + outerGroup.Add(len(ids)) + b.StartTimer() + + // here's the actual bench + for _, id := range ids { + go func(id string) { + defer outerGroup.Done() + if err := d.Create(id, parent, nil); err != nil { + b.Logf("Create %s failed", id) + chErr <- err + return + } + var innerGroup sync.WaitGroup + for i := 0; i < b.N; i++ { + innerGroup.Add(1) + go func() { + d.Get(id, "") + d.Put(id) + innerGroup.Done() + }() + } + innerGroup.Wait() + d.Remove(id) + }(id) + } + + outerGroup.Wait() + b.StopTimer() + close(chErr) + for err := range chErr { + if err != nil { + b.Log(err) + b.Fail() + } + } +} diff --git a/vendor/github.com/moby/moby/daemon/graphdriver/aufs/dirs.go b/vendor/github.com/moby/moby/daemon/graphdriver/aufs/dirs.go new file mode 100644 index 000000000..d2325fc46 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/graphdriver/aufs/dirs.go @@ -0,0 +1,64 @@ +// +build linux + +package aufs + +import ( + "bufio" + "io/ioutil" + "os" + "path" +) + +// Return all the directories +func loadIds(root string) ([]string, error) { + dirs, err := ioutil.ReadDir(root) + if err != nil { + return nil, err + } + out := []string{} + for _, d := range dirs { + if !d.IsDir() { + out = append(out, d.Name()) + } + } + return out, nil +} + +// Read the layers file for the current id and return all the +// layers represented by new lines in the file +// +// If there are no lines in the file then the id has no parent +// and an empty slice is returned. +func getParentIDs(root, id string) ([]string, error) { + f, err := os.Open(path.Join(root, "layers", id)) + if err != nil { + return nil, err + } + defer f.Close() + + out := []string{} + s := bufio.NewScanner(f) + + for s.Scan() { + if t := s.Text(); t != "" { + out = append(out, s.Text()) + } + } + return out, s.Err() +} + +func (a *Driver) getMountpoint(id string) string { + return path.Join(a.mntPath(), id) +} + +func (a *Driver) mntPath() string { + return path.Join(a.rootPath(), "mnt") +} + +func (a *Driver) getDiffPath(id string) string { + return path.Join(a.diffPath(), id) +} + +func (a *Driver) diffPath() string { + return path.Join(a.rootPath(), "diff") +} diff --git a/vendor/github.com/moby/moby/daemon/graphdriver/aufs/mount.go b/vendor/github.com/moby/moby/daemon/graphdriver/aufs/mount.go new file mode 100644 index 000000000..890213b80 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/graphdriver/aufs/mount.go @@ -0,0 +1,21 @@ +// +build linux + +package aufs + +import ( + "os/exec" + + "github.com/Sirupsen/logrus" + "golang.org/x/sys/unix" +) + +// Unmount the target specified. +func Unmount(target string) error { + if err := exec.Command("auplink", target, "flush").Run(); err != nil { + logrus.Warnf("Couldn't run auplink before unmount %s: %s", target, err) + } + if err := unix.Unmount(target, 0); err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/moby/moby/daemon/graphdriver/aufs/mount_linux.go b/vendor/github.com/moby/moby/daemon/graphdriver/aufs/mount_linux.go new file mode 100644 index 000000000..937104ba3 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/graphdriver/aufs/mount_linux.go @@ -0,0 +1,7 @@ +package aufs + +import "golang.org/x/sys/unix" + +func mount(source string, target string, fstype string, flags uintptr, data string) error { + return unix.Mount(source, target, fstype, flags, data) +} diff --git a/vendor/github.com/moby/moby/daemon/graphdriver/aufs/mount_unsupported.go b/vendor/github.com/moby/moby/daemon/graphdriver/aufs/mount_unsupported.go new file mode 100644 index 000000000..d030b0663 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/graphdriver/aufs/mount_unsupported.go @@ -0,0 +1,12 @@ +// +build !linux + +package aufs + +import "errors" + +// MsRemount declared to specify a non-linux system mount. +const MsRemount = 0 + +func mount(source string, target string, fstype string, flags uintptr, data string) (err error) { + return errors.New("mount is not implemented on this platform") +} diff --git a/vendor/github.com/moby/moby/daemon/graphdriver/btrfs/btrfs.go b/vendor/github.com/moby/moby/daemon/graphdriver/btrfs/btrfs.go new file mode 100644 index 000000000..25998422c --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/graphdriver/btrfs/btrfs.go @@ -0,0 +1,671 @@ +// +build linux + +package btrfs + +/* +#include +#include +#include +#include + +static void set_name_btrfs_ioctl_vol_args_v2(struct btrfs_ioctl_vol_args_v2* btrfs_struct, const char* value) { + snprintf(btrfs_struct->name, BTRFS_SUBVOL_NAME_MAX, "%s", value); +} +*/ +import "C" + +import ( + "fmt" + "io/ioutil" + "math" + "os" + "path" + "path/filepath" + "strconv" + "strings" + "sync" + "unsafe" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/mount" + "github.com/docker/docker/pkg/parsers" + "github.com/docker/docker/pkg/system" + "github.com/docker/go-units" + "github.com/opencontainers/selinux/go-selinux/label" + "golang.org/x/sys/unix" +) + +func init() { + graphdriver.Register("btrfs", Init) +} + +type btrfsOptions struct { + minSpace uint64 + size uint64 +} + +// Init returns a new BTRFS driver. +// An error is returned if BTRFS is not supported. +func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) { + + fsMagic, err := graphdriver.GetFSMagic(home) + if err != nil { + return nil, err + } + + if fsMagic != graphdriver.FsMagicBtrfs { + return nil, graphdriver.ErrPrerequisites + } + + rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps) + if err != nil { + return nil, err + } + if err := idtools.MkdirAllAs(home, 0700, rootUID, rootGID); err != nil { + return nil, err + } + + if err := mount.MakePrivate(home); err != nil { + return nil, err + } + + opt, userDiskQuota, err := parseOptions(options) + if err != nil { + return nil, err + } + + driver := &Driver{ + home: home, + uidMaps: uidMaps, + gidMaps: gidMaps, + options: opt, + } + + if userDiskQuota { + if err := driver.subvolEnableQuota(); err != nil { + return nil, err + } + } + + return graphdriver.NewNaiveDiffDriver(driver, uidMaps, gidMaps), nil +} + +func parseOptions(opt []string) (btrfsOptions, bool, error) { + var options btrfsOptions + userDiskQuota := false + for _, option := range opt { + key, val, err := parsers.ParseKeyValueOpt(option) + if err != nil { + return options, userDiskQuota, err + } + key = strings.ToLower(key) + switch key { + case "btrfs.min_space": + minSpace, err := units.RAMInBytes(val) + if err != nil { + return options, userDiskQuota, err + } + userDiskQuota = true + options.minSpace = uint64(minSpace) + default: + return options, userDiskQuota, fmt.Errorf("Unknown option %s", key) + } + } + return options, userDiskQuota, nil +} + +// Driver contains information about the filesystem mounted. +type Driver struct { + //root of the file system + home string + uidMaps []idtools.IDMap + gidMaps []idtools.IDMap + options btrfsOptions + quotaEnabled bool + once sync.Once +} + +// String prints the name of the driver (btrfs). +func (d *Driver) String() string { + return "btrfs" +} + +// Status returns current driver information in a two dimensional string array. +// Output contains "Build Version" and "Library Version" of the btrfs libraries used. +// Version information can be used to check compatibility with your kernel. +func (d *Driver) Status() [][2]string { + status := [][2]string{} + if bv := btrfsBuildVersion(); bv != "-" { + status = append(status, [2]string{"Build Version", bv}) + } + if lv := btrfsLibVersion(); lv != -1 { + status = append(status, [2]string{"Library Version", fmt.Sprintf("%d", lv)}) + } + return status +} + +// GetMetadata returns empty metadata for this driver. +func (d *Driver) GetMetadata(id string) (map[string]string, error) { + return nil, nil +} + +// Cleanup unmounts the home directory. +func (d *Driver) Cleanup() error { + if err := d.subvolDisableQuota(); err != nil { + return err + } + + return mount.Unmount(d.home) +} + +func free(p *C.char) { + C.free(unsafe.Pointer(p)) +} + +func openDir(path string) (*C.DIR, error) { + Cpath := C.CString(path) + defer free(Cpath) + + dir := C.opendir(Cpath) + if dir == nil { + return nil, fmt.Errorf("Can't open dir") + } + return dir, nil +} + +func closeDir(dir *C.DIR) { + if dir != nil { + C.closedir(dir) + } +} + +func getDirFd(dir *C.DIR) uintptr { + return uintptr(C.dirfd(dir)) +} + +func subvolCreate(path, name string) error { + dir, err := openDir(path) + if err != nil { + return err + } + defer closeDir(dir) + + var args C.struct_btrfs_ioctl_vol_args + for i, c := range []byte(name) { + args.name[i] = C.char(c) + } + + _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_SUBVOL_CREATE, + uintptr(unsafe.Pointer(&args))) + if errno != 0 { + return fmt.Errorf("Failed to create btrfs subvolume: %v", errno.Error()) + } + return nil +} + +func subvolSnapshot(src, dest, name string) error { + srcDir, err := openDir(src) + if err != nil { + return err + } + defer closeDir(srcDir) + + destDir, err := openDir(dest) + if err != nil { + return err + } + defer closeDir(destDir) + + var args C.struct_btrfs_ioctl_vol_args_v2 + args.fd = C.__s64(getDirFd(srcDir)) + + var cs = C.CString(name) + C.set_name_btrfs_ioctl_vol_args_v2(&args, cs) + C.free(unsafe.Pointer(cs)) + + _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(destDir), C.BTRFS_IOC_SNAP_CREATE_V2, + uintptr(unsafe.Pointer(&args))) + if errno != 0 { + return fmt.Errorf("Failed to create btrfs snapshot: %v", errno.Error()) + } + return nil +} + +func isSubvolume(p string) (bool, error) { + var bufStat unix.Stat_t + if err := unix.Lstat(p, &bufStat); err != nil { + return false, err + } + + // return true if it is a btrfs subvolume + return bufStat.Ino == C.BTRFS_FIRST_FREE_OBJECTID, nil +} + +func subvolDelete(dirpath, name string, quotaEnabled bool) error { + dir, err := openDir(dirpath) + if err != nil { + return err + } + defer closeDir(dir) + fullPath := path.Join(dirpath, name) + + var args C.struct_btrfs_ioctl_vol_args + + // walk the btrfs subvolumes + walkSubvolumes := func(p string, f os.FileInfo, err error) error { + if err != nil { + if os.IsNotExist(err) && p != fullPath { + // missing most likely because the path was a subvolume that got removed in the previous iteration + // since it's gone anyway, we don't care + return nil + } + return fmt.Errorf("error walking subvolumes: %v", err) + } + // we want to check children only so skip itself + // it will be removed after the filepath walk anyways + if f.IsDir() && p != fullPath { + sv, err := isSubvolume(p) + if err != nil { + return fmt.Errorf("Failed to test if %s is a btrfs subvolume: %v", p, err) + } + if sv { + if err := subvolDelete(path.Dir(p), f.Name(), quotaEnabled); err != nil { + return fmt.Errorf("Failed to destroy btrfs child subvolume (%s) of parent (%s): %v", p, dirpath, err) + } + } + } + return nil + } + if err := filepath.Walk(path.Join(dirpath, name), walkSubvolumes); err != nil { + return fmt.Errorf("Recursively walking subvolumes for %s failed: %v", dirpath, err) + } + + if quotaEnabled { + if qgroupid, err := subvolLookupQgroup(fullPath); err == nil { + var args C.struct_btrfs_ioctl_qgroup_create_args + args.qgroupid = C.__u64(qgroupid) + + _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_QGROUP_CREATE, + uintptr(unsafe.Pointer(&args))) + if errno != 0 { + logrus.Errorf("Failed to delete btrfs qgroup %v for %s: %v", qgroupid, fullPath, errno.Error()) + } + } else { + logrus.Errorf("Failed to lookup btrfs qgroup for %s: %v", fullPath, err.Error()) + } + } + + // all subvolumes have been removed + // now remove the one originally passed in + for i, c := range []byte(name) { + args.name[i] = C.char(c) + } + _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_SNAP_DESTROY, + uintptr(unsafe.Pointer(&args))) + if errno != 0 { + return fmt.Errorf("Failed to destroy btrfs snapshot %s for %s: %v", dirpath, name, errno.Error()) + } + return nil +} + +func (d *Driver) updateQuotaStatus() { + d.once.Do(func() { + if !d.quotaEnabled { + // In case quotaEnabled is not set, check qgroup and update quotaEnabled as needed + if err := subvolQgroupStatus(d.home); err != nil { + // quota is still not enabled + return + } + d.quotaEnabled = true + } + }) +} + +func (d *Driver) subvolEnableQuota() error { + d.updateQuotaStatus() + + if d.quotaEnabled { + return nil + } + + dir, err := openDir(d.home) + if err != nil { + return err + } + defer closeDir(dir) + + var args C.struct_btrfs_ioctl_quota_ctl_args + args.cmd = C.BTRFS_QUOTA_CTL_ENABLE + _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_QUOTA_CTL, + uintptr(unsafe.Pointer(&args))) + if errno != 0 { + return fmt.Errorf("Failed to enable btrfs quota for %s: %v", dir, errno.Error()) + } + + d.quotaEnabled = true + + return nil +} + +func (d *Driver) subvolDisableQuota() error { + d.updateQuotaStatus() + + if !d.quotaEnabled { + return nil + } + + dir, err := openDir(d.home) + if err != nil { + return err + } + defer closeDir(dir) + + var args C.struct_btrfs_ioctl_quota_ctl_args + args.cmd = C.BTRFS_QUOTA_CTL_DISABLE + _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_QUOTA_CTL, + uintptr(unsafe.Pointer(&args))) + if errno != 0 { + return fmt.Errorf("Failed to disable btrfs quota for %s: %v", dir, errno.Error()) + } + + d.quotaEnabled = false + + return nil +} + +func (d *Driver) subvolRescanQuota() error { + d.updateQuotaStatus() + + if !d.quotaEnabled { + return nil + } + + dir, err := openDir(d.home) + if err != nil { + return err + } + defer closeDir(dir) + + var args C.struct_btrfs_ioctl_quota_rescan_args + _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_QUOTA_RESCAN_WAIT, + uintptr(unsafe.Pointer(&args))) + if errno != 0 { + return fmt.Errorf("Failed to rescan btrfs quota for %s: %v", dir, errno.Error()) + } + + return nil +} + +func subvolLimitQgroup(path string, size uint64) error { + dir, err := openDir(path) + if err != nil { + return err + } + defer closeDir(dir) + + var args C.struct_btrfs_ioctl_qgroup_limit_args + args.lim.max_referenced = C.__u64(size) + args.lim.flags = C.BTRFS_QGROUP_LIMIT_MAX_RFER + _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_QGROUP_LIMIT, + uintptr(unsafe.Pointer(&args))) + if errno != 0 { + return fmt.Errorf("Failed to limit qgroup for %s: %v", dir, errno.Error()) + } + + return nil +} + +// subvolQgroupStatus performs a BTRFS_IOC_TREE_SEARCH on the root path +// with search key of BTRFS_QGROUP_STATUS_KEY. +// In case qgroup is enabled, the retuned key type will match BTRFS_QGROUP_STATUS_KEY. +// For more details please see https://github.com/kdave/btrfs-progs/blob/v4.9/qgroup.c#L1035 +func subvolQgroupStatus(path string) error { + dir, err := openDir(path) + if err != nil { + return err + } + defer closeDir(dir) + + var args C.struct_btrfs_ioctl_search_args + args.key.tree_id = C.BTRFS_QUOTA_TREE_OBJECTID + args.key.min_type = C.BTRFS_QGROUP_STATUS_KEY + args.key.max_type = C.BTRFS_QGROUP_STATUS_KEY + args.key.max_objectid = C.__u64(math.MaxUint64) + args.key.max_offset = C.__u64(math.MaxUint64) + args.key.max_transid = C.__u64(math.MaxUint64) + args.key.nr_items = 4096 + + _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_TREE_SEARCH, + uintptr(unsafe.Pointer(&args))) + if errno != 0 { + return fmt.Errorf("Failed to search qgroup for %s: %v", path, errno.Error()) + } + sh := (*C.struct_btrfs_ioctl_search_header)(unsafe.Pointer(&args.buf)) + if sh._type != C.BTRFS_QGROUP_STATUS_KEY { + return fmt.Errorf("Invalid qgroup search header type for %s: %v", path, sh._type) + } + return nil +} + +func subvolLookupQgroup(path string) (uint64, error) { + dir, err := openDir(path) + if err != nil { + return 0, err + } + defer closeDir(dir) + + var args C.struct_btrfs_ioctl_ino_lookup_args + args.objectid = C.BTRFS_FIRST_FREE_OBJECTID + + _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_INO_LOOKUP, + uintptr(unsafe.Pointer(&args))) + if errno != 0 { + return 0, fmt.Errorf("Failed to lookup qgroup for %s: %v", dir, errno.Error()) + } + if args.treeid == 0 { + return 0, fmt.Errorf("Invalid qgroup id for %s: 0", dir) + } + + return uint64(args.treeid), nil +} + +func (d *Driver) subvolumesDir() string { + return path.Join(d.home, "subvolumes") +} + +func (d *Driver) subvolumesDirID(id string) string { + return path.Join(d.subvolumesDir(), id) +} + +func (d *Driver) quotasDir() string { + return path.Join(d.home, "quotas") +} + +func (d *Driver) quotasDirID(id string) string { + return path.Join(d.quotasDir(), id) +} + +// CreateReadWrite creates a layer that is writable for use as a container +// file system. +func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error { + return d.Create(id, parent, opts) +} + +// Create the filesystem with given id. +func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error { + quotas := path.Join(d.home, "quotas") + subvolumes := path.Join(d.home, "subvolumes") + rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) + if err != nil { + return err + } + if err := idtools.MkdirAllAs(subvolumes, 0700, rootUID, rootGID); err != nil { + return err + } + if parent == "" { + if err := subvolCreate(subvolumes, id); err != nil { + return err + } + } else { + parentDir := d.subvolumesDirID(parent) + st, err := os.Stat(parentDir) + if err != nil { + return err + } + if !st.IsDir() { + return fmt.Errorf("%s: not a directory", parentDir) + } + if err := subvolSnapshot(parentDir, subvolumes, id); err != nil { + return err + } + } + + var storageOpt map[string]string + if opts != nil { + storageOpt = opts.StorageOpt + } + + if _, ok := storageOpt["size"]; ok { + driver := &Driver{} + if err := d.parseStorageOpt(storageOpt, driver); err != nil { + return err + } + + if err := d.setStorageSize(path.Join(subvolumes, id), driver); err != nil { + return err + } + if err := idtools.MkdirAllAs(quotas, 0700, rootUID, rootGID); err != nil { + return err + } + if err := ioutil.WriteFile(path.Join(quotas, id), []byte(fmt.Sprint(driver.options.size)), 0644); err != nil { + return err + } + } + + // if we have a remapped root (user namespaces enabled), change the created snapshot + // dir ownership to match + if rootUID != 0 || rootGID != 0 { + if err := os.Chown(path.Join(subvolumes, id), rootUID, rootGID); err != nil { + return err + } + } + + mountLabel := "" + if opts != nil { + mountLabel = opts.MountLabel + } + + return label.Relabel(path.Join(subvolumes, id), mountLabel, false) +} + +// Parse btrfs storage options +func (d *Driver) parseStorageOpt(storageOpt map[string]string, driver *Driver) error { + // Read size to change the subvolume disk quota per container + for key, val := range storageOpt { + key := strings.ToLower(key) + switch key { + case "size": + size, err := units.RAMInBytes(val) + if err != nil { + return err + } + driver.options.size = uint64(size) + default: + return fmt.Errorf("Unknown option %s", key) + } + } + + return nil +} + +// Set btrfs storage size +func (d *Driver) setStorageSize(dir string, driver *Driver) error { + if driver.options.size <= 0 { + return fmt.Errorf("btrfs: invalid storage size: %s", units.HumanSize(float64(driver.options.size))) + } + if d.options.minSpace > 0 && driver.options.size < d.options.minSpace { + return fmt.Errorf("btrfs: storage size cannot be less than %s", units.HumanSize(float64(d.options.minSpace))) + } + + if err := d.subvolEnableQuota(); err != nil { + return err + } + + if err := subvolLimitQgroup(dir, driver.options.size); err != nil { + return err + } + + return nil +} + +// Remove the filesystem with given id. +func (d *Driver) Remove(id string) error { + dir := d.subvolumesDirID(id) + if _, err := os.Stat(dir); err != nil { + return err + } + quotasDir := d.quotasDirID(id) + if _, err := os.Stat(quotasDir); err == nil { + if err := os.Remove(quotasDir); err != nil { + return err + } + } else if !os.IsNotExist(err) { + return err + } + + // Call updateQuotaStatus() to invoke status update + d.updateQuotaStatus() + + if err := subvolDelete(d.subvolumesDir(), id, d.quotaEnabled); err != nil { + return err + } + if err := system.EnsureRemoveAll(dir); err != nil { + return err + } + if err := d.subvolRescanQuota(); err != nil { + return err + } + return nil +} + +// Get the requested filesystem id. +func (d *Driver) Get(id, mountLabel string) (string, error) { + dir := d.subvolumesDirID(id) + st, err := os.Stat(dir) + if err != nil { + return "", err + } + + if !st.IsDir() { + return "", fmt.Errorf("%s: not a directory", dir) + } + + if quota, err := ioutil.ReadFile(d.quotasDirID(id)); err == nil { + if size, err := strconv.ParseUint(string(quota), 10, 64); err == nil && size >= d.options.minSpace { + if err := d.subvolEnableQuota(); err != nil { + return "", err + } + if err := subvolLimitQgroup(dir, size); err != nil { + return "", err + } + } + } + + return dir, nil +} + +// Put is not implemented for BTRFS as there is no cleanup required for the id. +func (d *Driver) Put(id string) error { + // Get() creates no runtime resources (like e.g. mounts) + // so this doesn't need to do anything. + return nil +} + +// Exists checks if the id exists in the filesystem. +func (d *Driver) Exists(id string) bool { + dir := d.subvolumesDirID(id) + _, err := os.Stat(dir) + return err == nil +} diff --git a/vendor/github.com/moby/moby/daemon/graphdriver/btrfs/btrfs_test.go b/vendor/github.com/moby/moby/daemon/graphdriver/btrfs/btrfs_test.go new file mode 100644 index 000000000..0038dbcdc --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/graphdriver/btrfs/btrfs_test.go @@ -0,0 +1,63 @@ +// +build linux + +package btrfs + +import ( + "os" + "path" + "testing" + + "github.com/docker/docker/daemon/graphdriver/graphtest" +) + +// This avoids creating a new driver for each test if all tests are run +// Make sure to put new tests between TestBtrfsSetup and TestBtrfsTeardown +func TestBtrfsSetup(t *testing.T) { + graphtest.GetDriver(t, "btrfs") +} + +func TestBtrfsCreateEmpty(t *testing.T) { + graphtest.DriverTestCreateEmpty(t, "btrfs") +} + +func TestBtrfsCreateBase(t *testing.T) { + graphtest.DriverTestCreateBase(t, "btrfs") +} + +func TestBtrfsCreateSnap(t *testing.T) { + graphtest.DriverTestCreateSnap(t, "btrfs") +} + +func TestBtrfsSubvolDelete(t *testing.T) { + d := graphtest.GetDriver(t, "btrfs") + if err := d.CreateReadWrite("test", "", nil); err != nil { + t.Fatal(err) + } + defer graphtest.PutDriver(t) + + dir, err := d.Get("test", "") + if err != nil { + t.Fatal(err) + } + defer d.Put("test") + + if err := subvolCreate(dir, "subvoltest"); err != nil { + t.Fatal(err) + } + + if _, err := os.Stat(path.Join(dir, "subvoltest")); err != nil { + t.Fatal(err) + } + + if err := d.Remove("test"); err != nil { + t.Fatal(err) + } + + if _, err := os.Stat(path.Join(dir, "subvoltest")); !os.IsNotExist(err) { + t.Fatalf("expected not exist error on nested subvol, got: %v", err) + } +} + +func TestBtrfsTeardown(t *testing.T) { + graphtest.PutDriver(t) +} diff --git a/vendor/github.com/moby/moby/daemon/graphdriver/btrfs/dummy_unsupported.go b/vendor/github.com/moby/moby/daemon/graphdriver/btrfs/dummy_unsupported.go new file mode 100644 index 000000000..f07088887 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/graphdriver/btrfs/dummy_unsupported.go @@ -0,0 +1,3 @@ +// +build !linux !cgo + +package btrfs diff --git a/vendor/github.com/moby/moby/daemon/graphdriver/btrfs/version.go b/vendor/github.com/moby/moby/daemon/graphdriver/btrfs/version.go new file mode 100644 index 000000000..73d90cdd7 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/graphdriver/btrfs/version.go @@ -0,0 +1,26 @@ +// +build linux,!btrfs_noversion + +package btrfs + +/* +#include + +// around version 3.16, they did not define lib version yet +#ifndef BTRFS_LIB_VERSION +#define BTRFS_LIB_VERSION -1 +#endif + +// upstream had removed it, but now it will be coming back +#ifndef BTRFS_BUILD_VERSION +#define BTRFS_BUILD_VERSION "-" +#endif +*/ +import "C" + +func btrfsBuildVersion() string { + return string(C.BTRFS_BUILD_VERSION) +} + +func btrfsLibVersion() int { + return int(C.BTRFS_LIB_VERSION) +} diff --git a/vendor/github.com/moby/moby/daemon/graphdriver/btrfs/version_none.go b/vendor/github.com/moby/moby/daemon/graphdriver/btrfs/version_none.go new file mode 100644 index 000000000..f802fbc62 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/graphdriver/btrfs/version_none.go @@ -0,0 +1,14 @@ +// +build linux,btrfs_noversion + +package btrfs + +// TODO(vbatts) remove this work-around once supported linux distros are on +// btrfs utilities of >= 3.16.1 + +func btrfsBuildVersion() string { + return "-" +} + +func btrfsLibVersion() int { + return -1 +} diff --git a/vendor/github.com/moby/moby/daemon/graphdriver/btrfs/version_test.go b/vendor/github.com/moby/moby/daemon/graphdriver/btrfs/version_test.go new file mode 100644 index 000000000..d78d57717 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/graphdriver/btrfs/version_test.go @@ -0,0 +1,13 @@ +// +build linux,!btrfs_noversion + +package btrfs + +import ( + "testing" +) + +func TestLibVersion(t *testing.T) { + if btrfsLibVersion() <= 0 { + t.Error("expected output from btrfs lib version > 0") + } +} diff --git a/vendor/github.com/moby/moby/daemon/graphdriver/counter.go b/vendor/github.com/moby/moby/daemon/graphdriver/counter.go new file mode 100644 index 000000000..72551a38d --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/graphdriver/counter.go @@ -0,0 +1,59 @@ +package graphdriver + +import "sync" + +type minfo struct { + check bool + count int +} + +// RefCounter is a generic counter for use by graphdriver Get/Put calls +type RefCounter struct { + counts map[string]*minfo + mu sync.Mutex + checker Checker +} + +// NewRefCounter returns a new RefCounter +func NewRefCounter(c Checker) *RefCounter { + return &RefCounter{ + checker: c, + counts: make(map[string]*minfo), + } +} + +// Increment increases the ref count for the given id and returns the current count +func (c *RefCounter) Increment(path string) int { + return c.incdec(path, func(minfo *minfo) { + minfo.count++ + }) +} + +// Decrement decreases the ref count for the given id and returns the current count +func (c *RefCounter) Decrement(path string) int { + return c.incdec(path, func(minfo *minfo) { + minfo.count-- + }) +} + +func (c *RefCounter) incdec(path string, infoOp func(minfo *minfo)) int { + c.mu.Lock() + m := c.counts[path] + if m == nil { + m = &minfo{} + c.counts[path] = m + } + // if we are checking this path for the first time check to make sure + // if it was already mounted on the system and make sure we have a correct ref + // count if it is mounted as it is in use. + if !m.check { + m.check = true + if c.checker.IsMounted(path) { + m.count++ + } + } + infoOp(m) + count := m.count + c.mu.Unlock() + return count +} diff --git a/vendor/github.com/moby/moby/daemon/graphdriver/devmapper/README.md b/vendor/github.com/moby/moby/daemon/graphdriver/devmapper/README.md new file mode 100644 index 000000000..6594fa65f --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/graphdriver/devmapper/README.md @@ -0,0 +1,98 @@ +# devicemapper - a storage backend based on Device Mapper + +## Theory of operation + +The device mapper graphdriver uses the device mapper thin provisioning +module (dm-thinp) to implement CoW snapshots. The preferred model is +to have a thin pool reserved outside of Docker and passed to the +daemon via the `--storage-opt dm.thinpooldev` option. Alternatively, +the device mapper graphdriver can setup a block device to handle this +for you via the `--storage-opt dm.directlvm_device` option. + +As a fallback if no thin pool is provided, loopback files will be +created. Loopback is very slow, but can be used without any +pre-configuration of storage. It is strongly recommended that you do +not use loopback in production. Ensure your Docker daemon has a +`--storage-opt dm.thinpooldev` argument provided. + +In loopback, a thin pool is created at `/var/lib/docker/devicemapper` +(devicemapper graph location) based on two block devices, one for +data and one for metadata. By default these block devices are created +automatically by using loopback mounts of automatically created sparse +files. + +The default loopback files used are +`/var/lib/docker/devicemapper/devicemapper/data` and +`/var/lib/docker/devicemapper/devicemapper/metadata`. Additional metadata +required to map from docker entities to the corresponding devicemapper +volumes is stored in the `/var/lib/docker/devicemapper/devicemapper/json` +file (encoded as Json). + +In order to support multiple devicemapper graphs on a system, the thin +pool will be named something like: `docker-0:33-19478248-pool`, where +the `0:33` part is the minor/major device nr and `19478248` is the +inode number of the `/var/lib/docker/devicemapper` directory. + +On the thin pool, docker automatically creates a base thin device, +called something like `docker-0:33-19478248-base` of a fixed +size. This is automatically formatted with an empty filesystem on +creation. This device is the base of all docker images and +containers. All base images are snapshots of this device and those +images are then in turn used as snapshots for other images and +eventually containers. + +## Information on `docker info` + +As of docker-1.4.1, `docker info` when using the `devicemapper` storage driver +will display something like: + + $ sudo docker info + [...] + Storage Driver: devicemapper + Pool Name: docker-253:1-17538953-pool + Pool Blocksize: 65.54 kB + Base Device Size: 107.4 GB + Data file: /dev/loop4 + Metadata file: /dev/loop4 + Data Space Used: 2.536 GB + Data Space Total: 107.4 GB + Data Space Available: 104.8 GB + Metadata Space Used: 7.93 MB + Metadata Space Total: 2.147 GB + Metadata Space Available: 2.14 GB + Udev Sync Supported: true + Data loop file: /home/docker/devicemapper/devicemapper/data + Metadata loop file: /home/docker/devicemapper/devicemapper/metadata + Library Version: 1.02.82-git (2013-10-04) + [...] + +### status items + +Each item in the indented section under `Storage Driver: devicemapper` are +status information about the driver. + * `Pool Name` name of the devicemapper pool for this driver. + * `Pool Blocksize` tells the blocksize the thin pool was initialized with. This only changes on creation. + * `Base Device Size` tells the maximum size of a container and image + * `Data file` blockdevice file used for the devicemapper data + * `Metadata file` blockdevice file used for the devicemapper metadata + * `Data Space Used` tells how much of `Data file` is currently used + * `Data Space Total` tells max size the `Data file` + * `Data Space Available` tells how much free space there is in the `Data file`. If you are using a loop device this will report the actual space available to the loop device on the underlying filesystem. + * `Metadata Space Used` tells how much of `Metadata file` is currently used + * `Metadata Space Total` tells max size the `Metadata file` + * `Metadata Space Available` tells how much free space there is in the `Metadata file`. If you are using a loop device this will report the actual space available to the loop device on the underlying filesystem. + * `Udev Sync Supported` tells whether devicemapper is able to sync with Udev. Should be `true`. + * `Data loop file` file attached to `Data file`, if loopback device is used + * `Metadata loop file` file attached to `Metadata file`, if loopback device is used + * `Library Version` from the libdevmapper used + +## About the devicemapper options + +The devicemapper backend supports some options that you can specify +when starting the docker daemon using the `--storage-opt` flags. +This uses the `dm` prefix and would be used something like `dockerd --storage-opt dm.foo=bar`. + +These options are currently documented both in [the man +page](../../../man/docker.1.md) and in [the online +documentation](https://docs.docker.com/engine/reference/commandline/dockerd/#/storage-driver-options). +If you add an options, update both the `man` page and the documentation. diff --git a/vendor/github.com/moby/moby/daemon/graphdriver/devmapper/device_setup.go b/vendor/github.com/moby/moby/daemon/graphdriver/devmapper/device_setup.go new file mode 100644 index 000000000..ef6cffbf2 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/graphdriver/devmapper/device_setup.go @@ -0,0 +1,247 @@ +package devmapper + +import ( + "bufio" + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "reflect" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/pkg/errors" +) + +type directLVMConfig struct { + Device string + ThinpPercent uint64 + ThinpMetaPercent uint64 + AutoExtendPercent uint64 + AutoExtendThreshold uint64 +} + +var ( + errThinpPercentMissing = errors.New("must set both `dm.thinp_percent` and `dm.thinp_metapercent` if either is specified") + errThinpPercentTooBig = errors.New("combined `dm.thinp_percent` and `dm.thinp_metapercent` must not be greater than 100") + errMissingSetupDevice = errors.New("must provide device path in `dm.setup_device` in order to configure direct-lvm") +) + +func validateLVMConfig(cfg directLVMConfig) error { + if reflect.DeepEqual(cfg, directLVMConfig{}) { + return nil + } + if cfg.Device == "" { + return errMissingSetupDevice + } + if (cfg.ThinpPercent > 0 && cfg.ThinpMetaPercent == 0) || cfg.ThinpMetaPercent > 0 && cfg.ThinpPercent == 0 { + return errThinpPercentMissing + } + + if cfg.ThinpPercent+cfg.ThinpMetaPercent > 100 { + return errThinpPercentTooBig + } + return nil +} + +func checkDevAvailable(dev string) error { + lvmScan, err := exec.LookPath("lvmdiskscan") + if err != nil { + logrus.Debug("could not find lvmdiskscan") + return nil + } + + out, err := exec.Command(lvmScan).CombinedOutput() + if err != nil { + logrus.WithError(err).Error(string(out)) + return nil + } + + if !bytes.Contains(out, []byte(dev)) { + return errors.Errorf("%s is not available for use with devicemapper", dev) + } + return nil +} + +func checkDevInVG(dev string) error { + pvDisplay, err := exec.LookPath("pvdisplay") + if err != nil { + logrus.Debug("could not find pvdisplay") + return nil + } + + out, err := exec.Command(pvDisplay, dev).CombinedOutput() + if err != nil { + logrus.WithError(err).Error(string(out)) + return nil + } + + scanner := bufio.NewScanner(bytes.NewReader(bytes.TrimSpace(out))) + for scanner.Scan() { + fields := strings.SplitAfter(strings.TrimSpace(scanner.Text()), "VG Name") + if len(fields) > 1 { + // got "VG Name" line" + vg := strings.TrimSpace(fields[1]) + if len(vg) > 0 { + return errors.Errorf("%s is already part of a volume group %q: must remove this device from any volume group or provide a different device", dev, vg) + } + logrus.Error(fields) + break + } + } + return nil +} + +func checkDevHasFS(dev string) error { + blkid, err := exec.LookPath("blkid") + if err != nil { + logrus.Debug("could not find blkid") + return nil + } + + out, err := exec.Command(blkid, dev).CombinedOutput() + if err != nil { + logrus.WithError(err).Error(string(out)) + return nil + } + + fields := bytes.Fields(out) + for _, f := range fields { + kv := bytes.Split(f, []byte{'='}) + if bytes.Equal(kv[0], []byte("TYPE")) { + v := bytes.Trim(kv[1], "\"") + if len(v) > 0 { + return errors.Errorf("%s has a filesystem already, use dm.directlvm_device_force=true if you want to wipe the device", dev) + } + return nil + } + } + return nil +} + +func verifyBlockDevice(dev string, force bool) error { + if err := checkDevAvailable(dev); err != nil { + return err + } + if err := checkDevInVG(dev); err != nil { + return err + } + + if force { + return nil + } + + if err := checkDevHasFS(dev); err != nil { + return err + } + return nil +} + +func readLVMConfig(root string) (directLVMConfig, error) { + var cfg directLVMConfig + + p := filepath.Join(root, "setup-config.json") + b, err := ioutil.ReadFile(p) + if err != nil { + if os.IsNotExist(err) { + return cfg, nil + } + return cfg, errors.Wrap(err, "error reading existing setup config") + } + + // check if this is just an empty file, no need to produce a json error later if so + if len(b) == 0 { + return cfg, nil + } + + err = json.Unmarshal(b, &cfg) + return cfg, errors.Wrap(err, "error unmarshaling previous device setup config") +} + +func writeLVMConfig(root string, cfg directLVMConfig) error { + p := filepath.Join(root, "setup-config.json") + b, err := json.Marshal(cfg) + if err != nil { + return errors.Wrap(err, "error marshalling direct lvm config") + } + err = ioutil.WriteFile(p, b, 0600) + return errors.Wrap(err, "error writing direct lvm config to file") +} + +func setupDirectLVM(cfg directLVMConfig) error { + pvCreate, err := exec.LookPath("pvcreate") + if err != nil { + return errors.Wrap(err, "error looking up command `pvcreate` while setting up direct lvm") + } + + vgCreate, err := exec.LookPath("vgcreate") + if err != nil { + return errors.Wrap(err, "error looking up command `vgcreate` while setting up direct lvm") + } + + lvCreate, err := exec.LookPath("lvcreate") + if err != nil { + return errors.Wrap(err, "error looking up command `lvcreate` while setting up direct lvm") + } + + lvConvert, err := exec.LookPath("lvconvert") + if err != nil { + return errors.Wrap(err, "error looking up command `lvconvert` while setting up direct lvm") + } + + lvChange, err := exec.LookPath("lvchange") + if err != nil { + return errors.Wrap(err, "error looking up command `lvchange` while setting up direct lvm") + } + + if cfg.AutoExtendPercent == 0 { + cfg.AutoExtendPercent = 20 + } + + if cfg.AutoExtendThreshold == 0 { + cfg.AutoExtendThreshold = 80 + } + + if cfg.ThinpPercent == 0 { + cfg.ThinpPercent = 95 + } + if cfg.ThinpMetaPercent == 0 { + cfg.ThinpMetaPercent = 1 + } + + out, err := exec.Command(pvCreate, "-f", cfg.Device).CombinedOutput() + if err != nil { + return errors.Wrap(err, string(out)) + } + + out, err = exec.Command(vgCreate, "docker", cfg.Device).CombinedOutput() + if err != nil { + return errors.Wrap(err, string(out)) + } + + out, err = exec.Command(lvCreate, "--wipesignatures", "y", "-n", "thinpool", "docker", "--extents", fmt.Sprintf("%d%%VG", cfg.ThinpPercent)).CombinedOutput() + if err != nil { + return errors.Wrap(err, string(out)) + } + out, err = exec.Command(lvCreate, "--wipesignatures", "y", "-n", "thinpoolmeta", "docker", "--extents", fmt.Sprintf("%d%%VG", cfg.ThinpMetaPercent)).CombinedOutput() + if err != nil { + return errors.Wrap(err, string(out)) + } + + out, err = exec.Command(lvConvert, "-y", "--zero", "n", "-c", "512K", "--thinpool", "docker/thinpool", "--poolmetadata", "docker/thinpoolmeta").CombinedOutput() + if err != nil { + return errors.Wrap(err, string(out)) + } + + profile := fmt.Sprintf("activation{\nthin_pool_autoextend_threshold=%d\nthin_pool_autoextend_percent=%d\n}", cfg.AutoExtendThreshold, cfg.AutoExtendPercent) + err = ioutil.WriteFile("/etc/lvm/profile/docker-thinpool.profile", []byte(profile), 0600) + if err != nil { + return errors.Wrap(err, "error writing docker thinp autoextend profile") + } + + out, err = exec.Command(lvChange, "--metadataprofile", "docker-thinpool", "docker/thinpool").CombinedOutput() + return errors.Wrap(err, string(out)) +} diff --git a/vendor/github.com/moby/moby/daemon/graphdriver/devmapper/deviceset.go b/vendor/github.com/moby/moby/daemon/graphdriver/devmapper/deviceset.go new file mode 100644 index 000000000..a7e06e1f9 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/graphdriver/devmapper/deviceset.go @@ -0,0 +1,2813 @@ +// +build linux + +package devmapper + +import ( + "bufio" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "os" + "os/exec" + "path" + "path/filepath" + "reflect" + "strconv" + "strings" + "sync" + "syscall" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/pkg/devicemapper" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/loopback" + "github.com/docker/docker/pkg/mount" + "github.com/docker/docker/pkg/parsers" + units "github.com/docker/go-units" + "github.com/opencontainers/selinux/go-selinux/label" + "github.com/pkg/errors" + "golang.org/x/sys/unix" +) + +var ( + defaultDataLoopbackSize int64 = 100 * 1024 * 1024 * 1024 + defaultMetaDataLoopbackSize int64 = 2 * 1024 * 1024 * 1024 + defaultBaseFsSize uint64 = 10 * 1024 * 1024 * 1024 + defaultThinpBlockSize uint32 = 128 // 64K = 128 512b sectors + defaultUdevSyncOverride = false + maxDeviceID = 0xffffff // 24 bit, pool limit + deviceIDMapSz = (maxDeviceID + 1) / 8 + driverDeferredRemovalSupport = false + enableDeferredRemoval = false + enableDeferredDeletion = false + userBaseSize = false + defaultMinFreeSpacePercent uint32 = 10 + lvmSetupConfigForce bool +) + +const deviceSetMetaFile string = "deviceset-metadata" +const transactionMetaFile string = "transaction-metadata" + +type transaction struct { + OpenTransactionID uint64 `json:"open_transaction_id"` + DeviceIDHash string `json:"device_hash"` + DeviceID int `json:"device_id"` +} + +type devInfo struct { + Hash string `json:"-"` + DeviceID int `json:"device_id"` + Size uint64 `json:"size"` + TransactionID uint64 `json:"transaction_id"` + Initialized bool `json:"initialized"` + Deleted bool `json:"deleted"` + devices *DeviceSet + + // The global DeviceSet lock guarantees that we serialize all + // the calls to libdevmapper (which is not threadsafe), but we + // sometimes release that lock while sleeping. In that case + // this per-device lock is still held, protecting against + // other accesses to the device that we're doing the wait on. + // + // WARNING: In order to avoid AB-BA deadlocks when releasing + // the global lock while holding the per-device locks all + // device locks must be acquired *before* the device lock, and + // multiple device locks should be acquired parent before child. + lock sync.Mutex +} + +type metaData struct { + Devices map[string]*devInfo `json:"Devices"` +} + +// DeviceSet holds information about list of devices +type DeviceSet struct { + metaData `json:"-"` + sync.Mutex `json:"-"` // Protects all fields of DeviceSet and serializes calls into libdevmapper + root string + devicePrefix string + TransactionID uint64 `json:"-"` + NextDeviceID int `json:"next_device_id"` + deviceIDMap []byte + + // Options + dataLoopbackSize int64 + metaDataLoopbackSize int64 + baseFsSize uint64 + filesystem string + mountOptions string + mkfsArgs []string + dataDevice string // block or loop dev + dataLoopFile string // loopback file, if used + metadataDevice string // block or loop dev + metadataLoopFile string // loopback file, if used + doBlkDiscard bool + thinpBlockSize uint32 + thinPoolDevice string + transaction `json:"-"` + overrideUdevSyncCheck bool + deferredRemove bool // use deferred removal + deferredDelete bool // use deferred deletion + BaseDeviceUUID string // save UUID of base device + BaseDeviceFilesystem string // save filesystem of base device + nrDeletedDevices uint // number of deleted devices + deletionWorkerTicker *time.Ticker + uidMaps []idtools.IDMap + gidMaps []idtools.IDMap + minFreeSpacePercent uint32 //min free space percentage in thinpool + xfsNospaceRetries string // max retries when xfs receives ENOSPC + lvmSetupConfig directLVMConfig +} + +// DiskUsage contains information about disk usage and is used when reporting Status of a device. +type DiskUsage struct { + // Used bytes on the disk. + Used uint64 + // Total bytes on the disk. + Total uint64 + // Available bytes on the disk. + Available uint64 +} + +// Status returns the information about the device. +type Status struct { + // PoolName is the name of the data pool. + PoolName string + // DataFile is the actual block device for data. + DataFile string + // DataLoopback loopback file, if used. + DataLoopback string + // MetadataFile is the actual block device for metadata. + MetadataFile string + // MetadataLoopback is the loopback file, if used. + MetadataLoopback string + // Data is the disk used for data. + Data DiskUsage + // Metadata is the disk used for meta data. + Metadata DiskUsage + // BaseDeviceSize is base size of container and image + BaseDeviceSize uint64 + // BaseDeviceFS is backing filesystem. + BaseDeviceFS string + // SectorSize size of the vector. + SectorSize uint64 + // UdevSyncSupported is true if sync is supported. + UdevSyncSupported bool + // DeferredRemoveEnabled is true then the device is not unmounted. + DeferredRemoveEnabled bool + // True if deferred deletion is enabled. This is different from + // deferred removal. "removal" means that device mapper device is + // deactivated. Thin device is still in thin pool and can be activated + // again. But "deletion" means that thin device will be deleted from + // thin pool and it can't be activated again. + DeferredDeleteEnabled bool + DeferredDeletedDeviceCount uint + MinFreeSpace uint64 +} + +// Structure used to export image/container metadata in docker inspect. +type deviceMetadata struct { + deviceID int + deviceSize uint64 // size in bytes + deviceName string // Device name as used during activation +} + +// DevStatus returns information about device mounted containing its id, size and sector information. +type DevStatus struct { + // DeviceID is the id of the device. + DeviceID int + // Size is the size of the filesystem. + Size uint64 + // TransactionID is a unique integer per device set used to identify an operation on the file system, this number is incremental. + TransactionID uint64 + // SizeInSectors indicates the size of the sectors allocated. + SizeInSectors uint64 + // MappedSectors indicates number of mapped sectors. + MappedSectors uint64 + // HighestMappedSector is the pointer to the highest mapped sector. + HighestMappedSector uint64 +} + +func getDevName(name string) string { + return "/dev/mapper/" + name +} + +func (info *devInfo) Name() string { + hash := info.Hash + if hash == "" { + hash = "base" + } + return fmt.Sprintf("%s-%s", info.devices.devicePrefix, hash) +} + +func (info *devInfo) DevName() string { + return getDevName(info.Name()) +} + +func (devices *DeviceSet) loopbackDir() string { + return path.Join(devices.root, "devicemapper") +} + +func (devices *DeviceSet) metadataDir() string { + return path.Join(devices.root, "metadata") +} + +func (devices *DeviceSet) metadataFile(info *devInfo) string { + file := info.Hash + if file == "" { + file = "base" + } + return path.Join(devices.metadataDir(), file) +} + +func (devices *DeviceSet) transactionMetaFile() string { + return path.Join(devices.metadataDir(), transactionMetaFile) +} + +func (devices *DeviceSet) deviceSetMetaFile() string { + return path.Join(devices.metadataDir(), deviceSetMetaFile) +} + +func (devices *DeviceSet) oldMetadataFile() string { + return path.Join(devices.loopbackDir(), "json") +} + +func (devices *DeviceSet) getPoolName() string { + if devices.thinPoolDevice == "" { + return devices.devicePrefix + "-pool" + } + return devices.thinPoolDevice +} + +func (devices *DeviceSet) getPoolDevName() string { + return getDevName(devices.getPoolName()) +} + +func (devices *DeviceSet) hasImage(name string) bool { + dirname := devices.loopbackDir() + filename := path.Join(dirname, name) + + _, err := os.Stat(filename) + return err == nil +} + +// ensureImage creates a sparse file of bytes at the path +// /devicemapper/. +// If the file already exists and new size is larger than its current size, it grows to the new size. +// Either way it returns the full path. +func (devices *DeviceSet) ensureImage(name string, size int64) (string, error) { + dirname := devices.loopbackDir() + filename := path.Join(dirname, name) + + uid, gid, err := idtools.GetRootUIDGID(devices.uidMaps, devices.gidMaps) + if err != nil { + return "", err + } + if err := idtools.MkdirAllAs(dirname, 0700, uid, gid); err != nil && !os.IsExist(err) { + return "", err + } + + if fi, err := os.Stat(filename); err != nil { + if !os.IsNotExist(err) { + return "", err + } + logrus.Debugf("devmapper: Creating loopback file %s for device-manage use", filename) + file, err := os.OpenFile(filename, os.O_RDWR|os.O_CREATE, 0600) + if err != nil { + return "", err + } + defer file.Close() + + if err := file.Truncate(size); err != nil { + return "", err + } + } else { + if fi.Size() < size { + file, err := os.OpenFile(filename, os.O_RDWR|os.O_CREATE, 0600) + if err != nil { + return "", err + } + defer file.Close() + if err := file.Truncate(size); err != nil { + return "", fmt.Errorf("devmapper: Unable to grow loopback file %s: %v", filename, err) + } + } else if fi.Size() > size { + logrus.Warnf("devmapper: Can't shrink loopback file %s", filename) + } + } + return filename, nil +} + +func (devices *DeviceSet) allocateTransactionID() uint64 { + devices.OpenTransactionID = devices.TransactionID + 1 + return devices.OpenTransactionID +} + +func (devices *DeviceSet) updatePoolTransactionID() error { + if err := devicemapper.SetTransactionID(devices.getPoolDevName(), devices.TransactionID, devices.OpenTransactionID); err != nil { + return fmt.Errorf("devmapper: Error setting devmapper transaction ID: %s", err) + } + devices.TransactionID = devices.OpenTransactionID + return nil +} + +func (devices *DeviceSet) removeMetadata(info *devInfo) error { + if err := os.RemoveAll(devices.metadataFile(info)); err != nil { + return fmt.Errorf("devmapper: Error removing metadata file %s: %s", devices.metadataFile(info), err) + } + return nil +} + +// Given json data and file path, write it to disk +func (devices *DeviceSet) writeMetaFile(jsonData []byte, filePath string) error { + tmpFile, err := ioutil.TempFile(devices.metadataDir(), ".tmp") + if err != nil { + return fmt.Errorf("devmapper: Error creating metadata file: %s", err) + } + + n, err := tmpFile.Write(jsonData) + if err != nil { + return fmt.Errorf("devmapper: Error writing metadata to %s: %s", tmpFile.Name(), err) + } + if n < len(jsonData) { + return io.ErrShortWrite + } + if err := tmpFile.Sync(); err != nil { + return fmt.Errorf("devmapper: Error syncing metadata file %s: %s", tmpFile.Name(), err) + } + if err := tmpFile.Close(); err != nil { + return fmt.Errorf("devmapper: Error closing metadata file %s: %s", tmpFile.Name(), err) + } + if err := os.Rename(tmpFile.Name(), filePath); err != nil { + return fmt.Errorf("devmapper: Error committing metadata file %s: %s", tmpFile.Name(), err) + } + + return nil +} + +func (devices *DeviceSet) saveMetadata(info *devInfo) error { + jsonData, err := json.Marshal(info) + if err != nil { + return fmt.Errorf("devmapper: Error encoding metadata to json: %s", err) + } + if err := devices.writeMetaFile(jsonData, devices.metadataFile(info)); err != nil { + return err + } + return nil +} + +func (devices *DeviceSet) markDeviceIDUsed(deviceID int) { + var mask byte + i := deviceID % 8 + mask = 1 << uint(i) + devices.deviceIDMap[deviceID/8] = devices.deviceIDMap[deviceID/8] | mask +} + +func (devices *DeviceSet) markDeviceIDFree(deviceID int) { + var mask byte + i := deviceID % 8 + mask = ^(1 << uint(i)) + devices.deviceIDMap[deviceID/8] = devices.deviceIDMap[deviceID/8] & mask +} + +func (devices *DeviceSet) isDeviceIDFree(deviceID int) bool { + var mask byte + i := deviceID % 8 + mask = (1 << uint(i)) + return (devices.deviceIDMap[deviceID/8] & mask) == 0 +} + +// Should be called with devices.Lock() held. +func (devices *DeviceSet) lookupDevice(hash string) (*devInfo, error) { + info := devices.Devices[hash] + if info == nil { + info = devices.loadMetadata(hash) + if info == nil { + return nil, fmt.Errorf("devmapper: Unknown device %s", hash) + } + + devices.Devices[hash] = info + } + return info, nil +} + +func (devices *DeviceSet) lookupDeviceWithLock(hash string) (*devInfo, error) { + devices.Lock() + defer devices.Unlock() + info, err := devices.lookupDevice(hash) + return info, err +} + +// This function relies on that device hash map has been loaded in advance. +// Should be called with devices.Lock() held. +func (devices *DeviceSet) constructDeviceIDMap() { + logrus.Debug("devmapper: constructDeviceIDMap()") + defer logrus.Debug("devmapper: constructDeviceIDMap() END") + + for _, info := range devices.Devices { + devices.markDeviceIDUsed(info.DeviceID) + logrus.Debugf("devmapper: Added deviceId=%d to DeviceIdMap", info.DeviceID) + } +} + +func (devices *DeviceSet) deviceFileWalkFunction(path string, finfo os.FileInfo) error { + + // Skip some of the meta files which are not device files. + if strings.HasSuffix(finfo.Name(), ".migrated") { + logrus.Debugf("devmapper: Skipping file %s", path) + return nil + } + + if strings.HasPrefix(finfo.Name(), ".") { + logrus.Debugf("devmapper: Skipping file %s", path) + return nil + } + + if finfo.Name() == deviceSetMetaFile { + logrus.Debugf("devmapper: Skipping file %s", path) + return nil + } + + if finfo.Name() == transactionMetaFile { + logrus.Debugf("devmapper: Skipping file %s", path) + return nil + } + + logrus.Debugf("devmapper: Loading data for file %s", path) + + hash := finfo.Name() + if hash == "base" { + hash = "" + } + + // Include deleted devices also as cleanup delete device logic + // will go through it and see if there are any deleted devices. + if _, err := devices.lookupDevice(hash); err != nil { + return fmt.Errorf("devmapper: Error looking up device %s:%v", hash, err) + } + + return nil +} + +func (devices *DeviceSet) loadDeviceFilesOnStart() error { + logrus.Debug("devmapper: loadDeviceFilesOnStart()") + defer logrus.Debug("devmapper: loadDeviceFilesOnStart() END") + + var scan = func(path string, info os.FileInfo, err error) error { + if err != nil { + logrus.Debugf("devmapper: Can't walk the file %s", path) + return nil + } + + // Skip any directories + if info.IsDir() { + return nil + } + + return devices.deviceFileWalkFunction(path, info) + } + + return filepath.Walk(devices.metadataDir(), scan) +} + +// Should be called with devices.Lock() held. +func (devices *DeviceSet) unregisterDevice(hash string) error { + logrus.Debugf("devmapper: unregisterDevice(%v)", hash) + info := &devInfo{ + Hash: hash, + } + + delete(devices.Devices, hash) + + if err := devices.removeMetadata(info); err != nil { + logrus.Debugf("devmapper: Error removing metadata: %s", err) + return err + } + + return nil +} + +// Should be called with devices.Lock() held. +func (devices *DeviceSet) registerDevice(id int, hash string, size uint64, transactionID uint64) (*devInfo, error) { + logrus.Debugf("devmapper: registerDevice(%v, %v)", id, hash) + info := &devInfo{ + Hash: hash, + DeviceID: id, + Size: size, + TransactionID: transactionID, + Initialized: false, + devices: devices, + } + + devices.Devices[hash] = info + + if err := devices.saveMetadata(info); err != nil { + // Try to remove unused device + delete(devices.Devices, hash) + return nil, err + } + + return info, nil +} + +func (devices *DeviceSet) activateDeviceIfNeeded(info *devInfo, ignoreDeleted bool) error { + logrus.Debugf("devmapper: activateDeviceIfNeeded(%v)", info.Hash) + + if info.Deleted && !ignoreDeleted { + return fmt.Errorf("devmapper: Can't activate device %v as it is marked for deletion", info.Hash) + } + + // Make sure deferred removal on device is canceled, if one was + // scheduled. + if err := devices.cancelDeferredRemovalIfNeeded(info); err != nil { + return fmt.Errorf("devmapper: Device Deferred Removal Cancellation Failed: %s", err) + } + + if devinfo, _ := devicemapper.GetInfo(info.Name()); devinfo != nil && devinfo.Exists != 0 { + return nil + } + + return devicemapper.ActivateDevice(devices.getPoolDevName(), info.Name(), info.DeviceID, info.Size) +} + +// Return true only if kernel supports xfs and mkfs.xfs is available +func xfsSupported() bool { + // Make sure mkfs.xfs is available + if _, err := exec.LookPath("mkfs.xfs"); err != nil { + return false + } + + // Check if kernel supports xfs filesystem or not. + exec.Command("modprobe", "xfs").Run() + + f, err := os.Open("/proc/filesystems") + if err != nil { + logrus.Warnf("devmapper: Could not check if xfs is supported: %v", err) + return false + } + defer f.Close() + + s := bufio.NewScanner(f) + for s.Scan() { + if strings.HasSuffix(s.Text(), "\txfs") { + return true + } + } + + if err := s.Err(); err != nil { + logrus.Warnf("devmapper: Could not check if xfs is supported: %v", err) + } + return false +} + +func determineDefaultFS() string { + if xfsSupported() { + return "xfs" + } + + logrus.Warn("devmapper: XFS is not supported in your system. Either the kernel doesn't support it or mkfs.xfs is not in your PATH. Defaulting to ext4 filesystem") + return "ext4" +} + +func (devices *DeviceSet) createFilesystem(info *devInfo) (err error) { + devname := info.DevName() + + args := []string{} + args = append(args, devices.mkfsArgs...) + + args = append(args, devname) + + if devices.filesystem == "" { + devices.filesystem = determineDefaultFS() + } + if err := devices.saveBaseDeviceFilesystem(devices.filesystem); err != nil { + return err + } + + logrus.Infof("devmapper: Creating filesystem %s on device %s", devices.filesystem, info.Name()) + defer func() { + if err != nil { + logrus.Infof("devmapper: Error while creating filesystem %s on device %s: %v", devices.filesystem, info.Name(), err) + } else { + logrus.Infof("devmapper: Successfully created filesystem %s on device %s", devices.filesystem, info.Name()) + } + }() + + switch devices.filesystem { + case "xfs": + err = exec.Command("mkfs.xfs", args...).Run() + case "ext4": + err = exec.Command("mkfs.ext4", append([]string{"-E", "nodiscard,lazy_itable_init=0,lazy_journal_init=0"}, args...)...).Run() + if err != nil { + err = exec.Command("mkfs.ext4", append([]string{"-E", "nodiscard,lazy_itable_init=0"}, args...)...).Run() + } + if err != nil { + return err + } + err = exec.Command("tune2fs", append([]string{"-c", "-1", "-i", "0"}, devname)...).Run() + default: + err = fmt.Errorf("devmapper: Unsupported filesystem type %s", devices.filesystem) + } + return +} + +func (devices *DeviceSet) migrateOldMetaData() error { + // Migrate old metadata file + jsonData, err := ioutil.ReadFile(devices.oldMetadataFile()) + if err != nil && !os.IsNotExist(err) { + return err + } + + if jsonData != nil { + m := metaData{Devices: make(map[string]*devInfo)} + + if err := json.Unmarshal(jsonData, &m); err != nil { + return err + } + + for hash, info := range m.Devices { + info.Hash = hash + devices.saveMetadata(info) + } + if err := os.Rename(devices.oldMetadataFile(), devices.oldMetadataFile()+".migrated"); err != nil { + return err + } + + } + + return nil +} + +// Cleanup deleted devices. It assumes that all the devices have been +// loaded in the hash table. +func (devices *DeviceSet) cleanupDeletedDevices() error { + devices.Lock() + + // If there are no deleted devices, there is nothing to do. + if devices.nrDeletedDevices == 0 { + devices.Unlock() + return nil + } + + var deletedDevices []*devInfo + + for _, info := range devices.Devices { + if !info.Deleted { + continue + } + logrus.Debugf("devmapper: Found deleted device %s.", info.Hash) + deletedDevices = append(deletedDevices, info) + } + + // Delete the deleted devices. DeleteDevice() first takes the info lock + // and then devices.Lock(). So drop it to avoid deadlock. + devices.Unlock() + + for _, info := range deletedDevices { + // This will again try deferred deletion. + if err := devices.DeleteDevice(info.Hash, false); err != nil { + logrus.Warnf("devmapper: Deletion of device %s, device_id=%v failed:%v", info.Hash, info.DeviceID, err) + } + } + + return nil +} + +func (devices *DeviceSet) countDeletedDevices() { + for _, info := range devices.Devices { + if !info.Deleted { + continue + } + devices.nrDeletedDevices++ + } +} + +func (devices *DeviceSet) startDeviceDeletionWorker() { + // Deferred deletion is not enabled. Don't do anything. + if !devices.deferredDelete { + return + } + + logrus.Debug("devmapper: Worker to cleanup deleted devices started") + for range devices.deletionWorkerTicker.C { + devices.cleanupDeletedDevices() + } +} + +func (devices *DeviceSet) initMetaData() error { + devices.Lock() + defer devices.Unlock() + + if err := devices.migrateOldMetaData(); err != nil { + return err + } + + _, transactionID, _, _, _, _, err := devices.poolStatus() + if err != nil { + return err + } + + devices.TransactionID = transactionID + + if err := devices.loadDeviceFilesOnStart(); err != nil { + return fmt.Errorf("devmapper: Failed to load device files:%v", err) + } + + devices.constructDeviceIDMap() + devices.countDeletedDevices() + + if err := devices.processPendingTransaction(); err != nil { + return err + } + + // Start a goroutine to cleanup Deleted Devices + go devices.startDeviceDeletionWorker() + return nil +} + +func (devices *DeviceSet) incNextDeviceID() { + // IDs are 24bit, so wrap around + devices.NextDeviceID = (devices.NextDeviceID + 1) & maxDeviceID +} + +func (devices *DeviceSet) getNextFreeDeviceID() (int, error) { + devices.incNextDeviceID() + for i := 0; i <= maxDeviceID; i++ { + if devices.isDeviceIDFree(devices.NextDeviceID) { + devices.markDeviceIDUsed(devices.NextDeviceID) + return devices.NextDeviceID, nil + } + devices.incNextDeviceID() + } + + return 0, fmt.Errorf("devmapper: Unable to find a free device ID") +} + +func (devices *DeviceSet) poolHasFreeSpace() error { + if devices.minFreeSpacePercent == 0 { + return nil + } + + _, _, dataUsed, dataTotal, metadataUsed, metadataTotal, err := devices.poolStatus() + if err != nil { + return err + } + + minFreeData := (dataTotal * uint64(devices.minFreeSpacePercent)) / 100 + if minFreeData < 1 { + minFreeData = 1 + } + dataFree := dataTotal - dataUsed + if dataFree < minFreeData { + return fmt.Errorf("devmapper: Thin Pool has %v free data blocks which is less than minimum required %v free data blocks. Create more free space in thin pool or use dm.min_free_space option to change behavior", (dataTotal - dataUsed), minFreeData) + } + + minFreeMetadata := (metadataTotal * uint64(devices.minFreeSpacePercent)) / 100 + if minFreeMetadata < 1 { + minFreeMetadata = 1 + } + + metadataFree := metadataTotal - metadataUsed + if metadataFree < minFreeMetadata { + return fmt.Errorf("devmapper: Thin Pool has %v free metadata blocks which is less than minimum required %v free metadata blocks. Create more free metadata space in thin pool or use dm.min_free_space option to change behavior", (metadataTotal - metadataUsed), minFreeMetadata) + } + + return nil +} + +func (devices *DeviceSet) createRegisterDevice(hash string) (*devInfo, error) { + devices.Lock() + defer devices.Unlock() + + deviceID, err := devices.getNextFreeDeviceID() + if err != nil { + return nil, err + } + + if err := devices.openTransaction(hash, deviceID); err != nil { + logrus.Debugf("devmapper: Error opening transaction hash = %s deviceID = %d", hash, deviceID) + devices.markDeviceIDFree(deviceID) + return nil, err + } + + for { + if err := devicemapper.CreateDevice(devices.getPoolDevName(), deviceID); err != nil { + if devicemapper.DeviceIDExists(err) { + // Device ID already exists. This should not + // happen. Now we have a mechanism to find + // a free device ID. So something is not right. + // Give a warning and continue. + logrus.Errorf("devmapper: Device ID %d exists in pool but it is supposed to be unused", deviceID) + deviceID, err = devices.getNextFreeDeviceID() + if err != nil { + return nil, err + } + // Save new device id into transaction + devices.refreshTransaction(deviceID) + continue + } + logrus.Debugf("devmapper: Error creating device: %s", err) + devices.markDeviceIDFree(deviceID) + return nil, err + } + break + } + + logrus.Debugf("devmapper: Registering device (id %v) with FS size %v", deviceID, devices.baseFsSize) + info, err := devices.registerDevice(deviceID, hash, devices.baseFsSize, devices.OpenTransactionID) + if err != nil { + _ = devicemapper.DeleteDevice(devices.getPoolDevName(), deviceID) + devices.markDeviceIDFree(deviceID) + return nil, err + } + + if err := devices.closeTransaction(); err != nil { + devices.unregisterDevice(hash) + devicemapper.DeleteDevice(devices.getPoolDevName(), deviceID) + devices.markDeviceIDFree(deviceID) + return nil, err + } + return info, nil +} + +func (devices *DeviceSet) takeSnapshot(hash string, baseInfo *devInfo, size uint64) error { + var ( + devinfo *devicemapper.Info + err error + ) + + if err = devices.poolHasFreeSpace(); err != nil { + return err + } + + if devices.deferredRemove { + devinfo, err = devicemapper.GetInfoWithDeferred(baseInfo.Name()) + if err != nil { + return err + } + if devinfo != nil && devinfo.DeferredRemove != 0 { + err = devices.cancelDeferredRemoval(baseInfo) + if err != nil { + // If Error is ErrEnxio. Device is probably already gone. Continue. + if err != devicemapper.ErrEnxio { + return err + } + devinfo = nil + } else { + defer devices.deactivateDevice(baseInfo) + } + } + } else { + devinfo, err = devicemapper.GetInfo(baseInfo.Name()) + if err != nil { + return err + } + } + + doSuspend := devinfo != nil && devinfo.Exists != 0 + + if doSuspend { + if err = devicemapper.SuspendDevice(baseInfo.Name()); err != nil { + return err + } + defer devicemapper.ResumeDevice(baseInfo.Name()) + } + + if err = devices.createRegisterSnapDevice(hash, baseInfo, size); err != nil { + return err + } + + return nil +} + +func (devices *DeviceSet) createRegisterSnapDevice(hash string, baseInfo *devInfo, size uint64) error { + deviceID, err := devices.getNextFreeDeviceID() + if err != nil { + return err + } + + if err := devices.openTransaction(hash, deviceID); err != nil { + logrus.Debugf("devmapper: Error opening transaction hash = %s deviceID = %d", hash, deviceID) + devices.markDeviceIDFree(deviceID) + return err + } + + for { + if err := devicemapper.CreateSnapDeviceRaw(devices.getPoolDevName(), deviceID, baseInfo.DeviceID); err != nil { + if devicemapper.DeviceIDExists(err) { + // Device ID already exists. This should not + // happen. Now we have a mechanism to find + // a free device ID. So something is not right. + // Give a warning and continue. + logrus.Errorf("devmapper: Device ID %d exists in pool but it is supposed to be unused", deviceID) + deviceID, err = devices.getNextFreeDeviceID() + if err != nil { + return err + } + // Save new device id into transaction + devices.refreshTransaction(deviceID) + continue + } + logrus.Debugf("devmapper: Error creating snap device: %s", err) + devices.markDeviceIDFree(deviceID) + return err + } + break + } + + if _, err := devices.registerDevice(deviceID, hash, size, devices.OpenTransactionID); err != nil { + devicemapper.DeleteDevice(devices.getPoolDevName(), deviceID) + devices.markDeviceIDFree(deviceID) + logrus.Debugf("devmapper: Error registering device: %s", err) + return err + } + + if err := devices.closeTransaction(); err != nil { + devices.unregisterDevice(hash) + devicemapper.DeleteDevice(devices.getPoolDevName(), deviceID) + devices.markDeviceIDFree(deviceID) + return err + } + return nil +} + +func (devices *DeviceSet) loadMetadata(hash string) *devInfo { + info := &devInfo{Hash: hash, devices: devices} + + jsonData, err := ioutil.ReadFile(devices.metadataFile(info)) + if err != nil { + logrus.Debugf("devmapper: Failed to read %s with err: %v", devices.metadataFile(info), err) + return nil + } + + if err := json.Unmarshal(jsonData, &info); err != nil { + logrus.Debugf("devmapper: Failed to unmarshal devInfo from %s with err: %v", devices.metadataFile(info), err) + return nil + } + + if info.DeviceID > maxDeviceID { + logrus.Errorf("devmapper: Ignoring Invalid DeviceId=%d", info.DeviceID) + return nil + } + + return info +} + +func getDeviceUUID(device string) (string, error) { + out, err := exec.Command("blkid", "-s", "UUID", "-o", "value", device).Output() + if err != nil { + return "", fmt.Errorf("devmapper: Failed to find uuid for device %s:%v", device, err) + } + + uuid := strings.TrimSuffix(string(out), "\n") + uuid = strings.TrimSpace(uuid) + logrus.Debugf("devmapper: UUID for device: %s is:%s", device, uuid) + return uuid, nil +} + +func (devices *DeviceSet) getBaseDeviceSize() uint64 { + info, _ := devices.lookupDevice("") + if info == nil { + return 0 + } + return info.Size +} + +func (devices *DeviceSet) getBaseDeviceFS() string { + return devices.BaseDeviceFilesystem +} + +func (devices *DeviceSet) verifyBaseDeviceUUIDFS(baseInfo *devInfo) error { + devices.Lock() + defer devices.Unlock() + + if err := devices.activateDeviceIfNeeded(baseInfo, false); err != nil { + return err + } + defer devices.deactivateDevice(baseInfo) + + uuid, err := getDeviceUUID(baseInfo.DevName()) + if err != nil { + return err + } + + if devices.BaseDeviceUUID != uuid { + return fmt.Errorf("devmapper: Current Base Device UUID:%s does not match with stored UUID:%s. Possibly using a different thin pool than last invocation", uuid, devices.BaseDeviceUUID) + } + + if devices.BaseDeviceFilesystem == "" { + fsType, err := ProbeFsType(baseInfo.DevName()) + if err != nil { + return err + } + if err := devices.saveBaseDeviceFilesystem(fsType); err != nil { + return err + } + } + + // If user specified a filesystem using dm.fs option and current + // file system of base image is not same, warn user that dm.fs + // will be ignored. + if devices.BaseDeviceFilesystem != devices.filesystem { + logrus.Warnf("devmapper: Base device already exists and has filesystem %s on it. User specified filesystem %s will be ignored.", devices.BaseDeviceFilesystem, devices.filesystem) + devices.filesystem = devices.BaseDeviceFilesystem + } + return nil +} + +func (devices *DeviceSet) saveBaseDeviceFilesystem(fs string) error { + devices.BaseDeviceFilesystem = fs + return devices.saveDeviceSetMetaData() +} + +func (devices *DeviceSet) saveBaseDeviceUUID(baseInfo *devInfo) error { + devices.Lock() + defer devices.Unlock() + + if err := devices.activateDeviceIfNeeded(baseInfo, false); err != nil { + return err + } + defer devices.deactivateDevice(baseInfo) + + uuid, err := getDeviceUUID(baseInfo.DevName()) + if err != nil { + return err + } + + devices.BaseDeviceUUID = uuid + return devices.saveDeviceSetMetaData() +} + +func (devices *DeviceSet) createBaseImage() error { + logrus.Debug("devmapper: Initializing base device-mapper thin volume") + + // Create initial device + info, err := devices.createRegisterDevice("") + if err != nil { + return err + } + + logrus.Debug("devmapper: Creating filesystem on base device-mapper thin volume") + + if err := devices.activateDeviceIfNeeded(info, false); err != nil { + return err + } + + if err := devices.createFilesystem(info); err != nil { + return err + } + + info.Initialized = true + if err := devices.saveMetadata(info); err != nil { + info.Initialized = false + return err + } + + if err := devices.saveBaseDeviceUUID(info); err != nil { + return fmt.Errorf("devmapper: Could not query and save base device UUID:%v", err) + } + + return nil +} + +// Returns if thin pool device exists or not. If device exists, also makes +// sure it is a thin pool device and not some other type of device. +func (devices *DeviceSet) thinPoolExists(thinPoolDevice string) (bool, error) { + logrus.Debugf("devmapper: Checking for existence of the pool %s", thinPoolDevice) + + info, err := devicemapper.GetInfo(thinPoolDevice) + if err != nil { + return false, fmt.Errorf("devmapper: GetInfo() on device %s failed: %v", thinPoolDevice, err) + } + + // Device does not exist. + if info.Exists == 0 { + return false, nil + } + + _, _, deviceType, _, err := devicemapper.GetStatus(thinPoolDevice) + if err != nil { + return false, fmt.Errorf("devmapper: GetStatus() on device %s failed: %v", thinPoolDevice, err) + } + + if deviceType != "thin-pool" { + return false, fmt.Errorf("devmapper: Device %s is not a thin pool", thinPoolDevice) + } + + return true, nil +} + +func (devices *DeviceSet) checkThinPool() error { + _, transactionID, dataUsed, _, _, _, err := devices.poolStatus() + if err != nil { + return err + } + if dataUsed != 0 { + return fmt.Errorf("devmapper: Unable to take ownership of thin-pool (%s) that already has used data blocks", + devices.thinPoolDevice) + } + if transactionID != 0 { + return fmt.Errorf("devmapper: Unable to take ownership of thin-pool (%s) with non-zero transaction ID", + devices.thinPoolDevice) + } + return nil +} + +// Base image is initialized properly. Either save UUID for first time (for +// upgrade case or verify UUID. +func (devices *DeviceSet) setupVerifyBaseImageUUIDFS(baseInfo *devInfo) error { + // If BaseDeviceUUID is nil (upgrade case), save it and return success. + if devices.BaseDeviceUUID == "" { + if err := devices.saveBaseDeviceUUID(baseInfo); err != nil { + return fmt.Errorf("devmapper: Could not query and save base device UUID:%v", err) + } + return nil + } + + if err := devices.verifyBaseDeviceUUIDFS(baseInfo); err != nil { + return fmt.Errorf("devmapper: Base Device UUID and Filesystem verification failed: %v", err) + } + + return nil +} + +func (devices *DeviceSet) checkGrowBaseDeviceFS(info *devInfo) error { + + if !userBaseSize { + return nil + } + + if devices.baseFsSize < devices.getBaseDeviceSize() { + return fmt.Errorf("devmapper: Base device size cannot be smaller than %s", units.HumanSize(float64(devices.getBaseDeviceSize()))) + } + + if devices.baseFsSize == devices.getBaseDeviceSize() { + return nil + } + + info.lock.Lock() + defer info.lock.Unlock() + + devices.Lock() + defer devices.Unlock() + + info.Size = devices.baseFsSize + + if err := devices.saveMetadata(info); err != nil { + // Try to remove unused device + delete(devices.Devices, info.Hash) + return err + } + + return devices.growFS(info) +} + +func (devices *DeviceSet) growFS(info *devInfo) error { + if err := devices.activateDeviceIfNeeded(info, false); err != nil { + return fmt.Errorf("Error activating devmapper device: %s", err) + } + + defer devices.deactivateDevice(info) + + fsMountPoint := "/run/docker/mnt" + if _, err := os.Stat(fsMountPoint); os.IsNotExist(err) { + if err := os.MkdirAll(fsMountPoint, 0700); err != nil { + return err + } + defer os.RemoveAll(fsMountPoint) + } + + options := "" + if devices.BaseDeviceFilesystem == "xfs" { + // XFS needs nouuid or it can't mount filesystems with the same fs + options = joinMountOptions(options, "nouuid") + } + options = joinMountOptions(options, devices.mountOptions) + + if err := mount.Mount(info.DevName(), fsMountPoint, devices.BaseDeviceFilesystem, options); err != nil { + return fmt.Errorf("Error mounting '%s' on '%s': %s", info.DevName(), fsMountPoint, err) + } + + defer unix.Unmount(fsMountPoint, unix.MNT_DETACH) + + switch devices.BaseDeviceFilesystem { + case "ext4": + if out, err := exec.Command("resize2fs", info.DevName()).CombinedOutput(); err != nil { + return fmt.Errorf("Failed to grow rootfs:%v:%s", err, string(out)) + } + case "xfs": + if out, err := exec.Command("xfs_growfs", info.DevName()).CombinedOutput(); err != nil { + return fmt.Errorf("Failed to grow rootfs:%v:%s", err, string(out)) + } + default: + return fmt.Errorf("Unsupported filesystem type %s", devices.BaseDeviceFilesystem) + } + return nil +} + +func (devices *DeviceSet) setupBaseImage() error { + oldInfo, _ := devices.lookupDeviceWithLock("") + + // base image already exists. If it is initialized properly, do UUID + // verification and return. Otherwise remove image and set it up + // fresh. + + if oldInfo != nil { + if oldInfo.Initialized && !oldInfo.Deleted { + if err := devices.setupVerifyBaseImageUUIDFS(oldInfo); err != nil { + return err + } + + if err := devices.checkGrowBaseDeviceFS(oldInfo); err != nil { + return err + } + + return nil + } + + logrus.Debug("devmapper: Removing uninitialized base image") + // If previous base device is in deferred delete state, + // that needs to be cleaned up first. So don't try + // deferred deletion. + if err := devices.DeleteDevice("", true); err != nil { + return err + } + } + + // If we are setting up base image for the first time, make sure + // thin pool is empty. + if devices.thinPoolDevice != "" && oldInfo == nil { + if err := devices.checkThinPool(); err != nil { + return err + } + } + + // Create new base image device + if err := devices.createBaseImage(); err != nil { + return err + } + + return nil +} + +func setCloseOnExec(name string) { + if fileInfos, _ := ioutil.ReadDir("/proc/self/fd"); fileInfos != nil { + for _, i := range fileInfos { + link, _ := os.Readlink(filepath.Join("/proc/self/fd", i.Name())) + if link == name { + fd, err := strconv.Atoi(i.Name()) + if err == nil { + unix.CloseOnExec(fd) + } + } + } + } +} + +func major(device uint64) uint64 { + return (device >> 8) & 0xfff +} + +func minor(device uint64) uint64 { + return (device & 0xff) | ((device >> 12) & 0xfff00) +} + +// ResizePool increases the size of the pool. +func (devices *DeviceSet) ResizePool(size int64) error { + dirname := devices.loopbackDir() + datafilename := path.Join(dirname, "data") + if len(devices.dataDevice) > 0 { + datafilename = devices.dataDevice + } + metadatafilename := path.Join(dirname, "metadata") + if len(devices.metadataDevice) > 0 { + metadatafilename = devices.metadataDevice + } + + datafile, err := os.OpenFile(datafilename, os.O_RDWR, 0) + if datafile == nil { + return err + } + defer datafile.Close() + + fi, err := datafile.Stat() + if fi == nil { + return err + } + + if fi.Size() > size { + return fmt.Errorf("devmapper: Can't shrink file") + } + + dataloopback := loopback.FindLoopDeviceFor(datafile) + if dataloopback == nil { + return fmt.Errorf("devmapper: Unable to find loopback mount for: %s", datafilename) + } + defer dataloopback.Close() + + metadatafile, err := os.OpenFile(metadatafilename, os.O_RDWR, 0) + if metadatafile == nil { + return err + } + defer metadatafile.Close() + + metadataloopback := loopback.FindLoopDeviceFor(metadatafile) + if metadataloopback == nil { + return fmt.Errorf("devmapper: Unable to find loopback mount for: %s", metadatafilename) + } + defer metadataloopback.Close() + + // Grow loopback file + if err := datafile.Truncate(size); err != nil { + return fmt.Errorf("devmapper: Unable to grow loopback file: %s", err) + } + + // Reload size for loopback device + if err := loopback.SetCapacity(dataloopback); err != nil { + return fmt.Errorf("Unable to update loopback capacity: %s", err) + } + + // Suspend the pool + if err := devicemapper.SuspendDevice(devices.getPoolName()); err != nil { + return fmt.Errorf("devmapper: Unable to suspend pool: %s", err) + } + + // Reload with the new block sizes + if err := devicemapper.ReloadPool(devices.getPoolName(), dataloopback, metadataloopback, devices.thinpBlockSize); err != nil { + return fmt.Errorf("devmapper: Unable to reload pool: %s", err) + } + + // Resume the pool + if err := devicemapper.ResumeDevice(devices.getPoolName()); err != nil { + return fmt.Errorf("devmapper: Unable to resume pool: %s", err) + } + + return nil +} + +func (devices *DeviceSet) loadTransactionMetaData() error { + jsonData, err := ioutil.ReadFile(devices.transactionMetaFile()) + if err != nil { + // There is no active transaction. This will be the case + // during upgrade. + if os.IsNotExist(err) { + devices.OpenTransactionID = devices.TransactionID + return nil + } + return err + } + + json.Unmarshal(jsonData, &devices.transaction) + return nil +} + +func (devices *DeviceSet) saveTransactionMetaData() error { + jsonData, err := json.Marshal(&devices.transaction) + if err != nil { + return fmt.Errorf("devmapper: Error encoding metadata to json: %s", err) + } + + return devices.writeMetaFile(jsonData, devices.transactionMetaFile()) +} + +func (devices *DeviceSet) removeTransactionMetaData() error { + return os.RemoveAll(devices.transactionMetaFile()) +} + +func (devices *DeviceSet) rollbackTransaction() error { + logrus.Debugf("devmapper: Rolling back open transaction: TransactionID=%d hash=%s device_id=%d", devices.OpenTransactionID, devices.DeviceIDHash, devices.DeviceID) + + // A device id might have already been deleted before transaction + // closed. In that case this call will fail. Just leave a message + // in case of failure. + if err := devicemapper.DeleteDevice(devices.getPoolDevName(), devices.DeviceID); err != nil { + logrus.Errorf("devmapper: Unable to delete device: %s", err) + } + + dinfo := &devInfo{Hash: devices.DeviceIDHash} + if err := devices.removeMetadata(dinfo); err != nil { + logrus.Errorf("devmapper: Unable to remove metadata: %s", err) + } else { + devices.markDeviceIDFree(devices.DeviceID) + } + + if err := devices.removeTransactionMetaData(); err != nil { + logrus.Errorf("devmapper: Unable to remove transaction meta file %s: %s", devices.transactionMetaFile(), err) + } + + return nil +} + +func (devices *DeviceSet) processPendingTransaction() error { + if err := devices.loadTransactionMetaData(); err != nil { + return err + } + + // If there was open transaction but pool transaction ID is same + // as open transaction ID, nothing to roll back. + if devices.TransactionID == devices.OpenTransactionID { + return nil + } + + // If open transaction ID is less than pool transaction ID, something + // is wrong. Bail out. + if devices.OpenTransactionID < devices.TransactionID { + logrus.Errorf("devmapper: Open Transaction id %d is less than pool transaction id %d", devices.OpenTransactionID, devices.TransactionID) + return nil + } + + // Pool transaction ID is not same as open transaction. There is + // a transaction which was not completed. + if err := devices.rollbackTransaction(); err != nil { + return fmt.Errorf("devmapper: Rolling back open transaction failed: %s", err) + } + + devices.OpenTransactionID = devices.TransactionID + return nil +} + +func (devices *DeviceSet) loadDeviceSetMetaData() error { + jsonData, err := ioutil.ReadFile(devices.deviceSetMetaFile()) + if err != nil { + // For backward compatibility return success if file does + // not exist. + if os.IsNotExist(err) { + return nil + } + return err + } + + return json.Unmarshal(jsonData, devices) +} + +func (devices *DeviceSet) saveDeviceSetMetaData() error { + jsonData, err := json.Marshal(devices) + if err != nil { + return fmt.Errorf("devmapper: Error encoding metadata to json: %s", err) + } + + return devices.writeMetaFile(jsonData, devices.deviceSetMetaFile()) +} + +func (devices *DeviceSet) openTransaction(hash string, DeviceID int) error { + devices.allocateTransactionID() + devices.DeviceIDHash = hash + devices.DeviceID = DeviceID + if err := devices.saveTransactionMetaData(); err != nil { + return fmt.Errorf("devmapper: Error saving transaction metadata: %s", err) + } + return nil +} + +func (devices *DeviceSet) refreshTransaction(DeviceID int) error { + devices.DeviceID = DeviceID + if err := devices.saveTransactionMetaData(); err != nil { + return fmt.Errorf("devmapper: Error saving transaction metadata: %s", err) + } + return nil +} + +func (devices *DeviceSet) closeTransaction() error { + if err := devices.updatePoolTransactionID(); err != nil { + logrus.Debug("devmapper: Failed to close Transaction") + return err + } + return nil +} + +func determineDriverCapabilities(version string) error { + /* + * Driver version 4.27.0 and greater support deferred activation + * feature. + */ + + logrus.Debugf("devicemapper: driver version is %s", version) + + versionSplit := strings.Split(version, ".") + major, err := strconv.Atoi(versionSplit[0]) + if err != nil { + return graphdriver.ErrNotSupported + } + + if major > 4 { + driverDeferredRemovalSupport = true + return nil + } + + if major < 4 { + return nil + } + + minor, err := strconv.Atoi(versionSplit[1]) + if err != nil { + return graphdriver.ErrNotSupported + } + + /* + * If major is 4 and minor is 27, then there is no need to + * check for patch level as it can not be less than 0. + */ + if minor >= 27 { + driverDeferredRemovalSupport = true + return nil + } + + return nil +} + +// Determine the major and minor number of loopback device +func getDeviceMajorMinor(file *os.File) (uint64, uint64, error) { + stat, err := file.Stat() + if err != nil { + return 0, 0, err + } + + dev := stat.Sys().(*syscall.Stat_t).Rdev + majorNum := major(dev) + minorNum := minor(dev) + + logrus.Debugf("devmapper: Major:Minor for device: %s is:%v:%v", file.Name(), majorNum, minorNum) + return majorNum, minorNum, nil +} + +// Given a file which is backing file of a loop back device, find the +// loopback device name and its major/minor number. +func getLoopFileDeviceMajMin(filename string) (string, uint64, uint64, error) { + file, err := os.Open(filename) + if err != nil { + logrus.Debugf("devmapper: Failed to open file %s", filename) + return "", 0, 0, err + } + + defer file.Close() + loopbackDevice := loopback.FindLoopDeviceFor(file) + if loopbackDevice == nil { + return "", 0, 0, fmt.Errorf("devmapper: Unable to find loopback mount for: %s", filename) + } + defer loopbackDevice.Close() + + Major, Minor, err := getDeviceMajorMinor(loopbackDevice) + if err != nil { + return "", 0, 0, err + } + return loopbackDevice.Name(), Major, Minor, nil +} + +// Get the major/minor numbers of thin pool data and metadata devices +func (devices *DeviceSet) getThinPoolDataMetaMajMin() (uint64, uint64, uint64, uint64, error) { + var params, poolDataMajMin, poolMetadataMajMin string + + _, _, _, params, err := devicemapper.GetTable(devices.getPoolName()) + if err != nil { + return 0, 0, 0, 0, err + } + + if _, err = fmt.Sscanf(params, "%s %s", &poolMetadataMajMin, &poolDataMajMin); err != nil { + return 0, 0, 0, 0, err + } + + logrus.Debugf("devmapper: poolDataMajMin=%s poolMetaMajMin=%s\n", poolDataMajMin, poolMetadataMajMin) + + poolDataMajMinorSplit := strings.Split(poolDataMajMin, ":") + poolDataMajor, err := strconv.ParseUint(poolDataMajMinorSplit[0], 10, 32) + if err != nil { + return 0, 0, 0, 0, err + } + + poolDataMinor, err := strconv.ParseUint(poolDataMajMinorSplit[1], 10, 32) + if err != nil { + return 0, 0, 0, 0, err + } + + poolMetadataMajMinorSplit := strings.Split(poolMetadataMajMin, ":") + poolMetadataMajor, err := strconv.ParseUint(poolMetadataMajMinorSplit[0], 10, 32) + if err != nil { + return 0, 0, 0, 0, err + } + + poolMetadataMinor, err := strconv.ParseUint(poolMetadataMajMinorSplit[1], 10, 32) + if err != nil { + return 0, 0, 0, 0, err + } + + return poolDataMajor, poolDataMinor, poolMetadataMajor, poolMetadataMinor, nil +} + +func (devices *DeviceSet) loadThinPoolLoopBackInfo() error { + poolDataMajor, poolDataMinor, poolMetadataMajor, poolMetadataMinor, err := devices.getThinPoolDataMetaMajMin() + if err != nil { + return err + } + + dirname := devices.loopbackDir() + + // data device has not been passed in. So there should be a data file + // which is being mounted as loop device. + if devices.dataDevice == "" { + datafilename := path.Join(dirname, "data") + dataLoopDevice, dataMajor, dataMinor, err := getLoopFileDeviceMajMin(datafilename) + if err != nil { + return err + } + + // Compare the two + if poolDataMajor == dataMajor && poolDataMinor == dataMinor { + devices.dataDevice = dataLoopDevice + devices.dataLoopFile = datafilename + } + + } + + // metadata device has not been passed in. So there should be a + // metadata file which is being mounted as loop device. + if devices.metadataDevice == "" { + metadatafilename := path.Join(dirname, "metadata") + metadataLoopDevice, metadataMajor, metadataMinor, err := getLoopFileDeviceMajMin(metadatafilename) + if err != nil { + return err + } + if poolMetadataMajor == metadataMajor && poolMetadataMinor == metadataMinor { + devices.metadataDevice = metadataLoopDevice + devices.metadataLoopFile = metadatafilename + } + } + + return nil +} + +func (devices *DeviceSet) enableDeferredRemovalDeletion() error { + + // If user asked for deferred removal then check both libdm library + // and kernel driver support deferred removal otherwise error out. + if enableDeferredRemoval { + if !driverDeferredRemovalSupport { + return fmt.Errorf("devmapper: Deferred removal can not be enabled as kernel does not support it") + } + if !devicemapper.LibraryDeferredRemovalSupport { + return fmt.Errorf("devmapper: Deferred removal can not be enabled as libdm does not support it") + } + logrus.Debug("devmapper: Deferred removal support enabled.") + devices.deferredRemove = true + } + + if enableDeferredDeletion { + if !devices.deferredRemove { + return fmt.Errorf("devmapper: Deferred deletion can not be enabled as deferred removal is not enabled. Enable deferred removal using --storage-opt dm.use_deferred_removal=true parameter") + } + logrus.Debug("devmapper: Deferred deletion support enabled.") + devices.deferredDelete = true + } + return nil +} + +func (devices *DeviceSet) initDevmapper(doInit bool) (retErr error) { + if err := devices.enableDeferredRemovalDeletion(); err != nil { + return err + } + + // https://github.com/docker/docker/issues/4036 + if supported := devicemapper.UdevSetSyncSupport(true); !supported { + if dockerversion.IAmStatic == "true" { + logrus.Error("devmapper: Udev sync is not supported. This will lead to data loss and unexpected behavior. Install a dynamic binary to use devicemapper or select a different storage driver. For more information, see https://docs.docker.com/engine/reference/commandline/dockerd/#storage-driver-options") + } else { + logrus.Error("devmapper: Udev sync is not supported. This will lead to data loss and unexpected behavior. Install a more recent version of libdevmapper or select a different storage driver. For more information, see https://docs.docker.com/engine/reference/commandline/dockerd/#storage-driver-options") + } + + if !devices.overrideUdevSyncCheck { + return graphdriver.ErrNotSupported + } + } + + //create the root dir of the devmapper driver ownership to match this + //daemon's remapped root uid/gid so containers can start properly + uid, gid, err := idtools.GetRootUIDGID(devices.uidMaps, devices.gidMaps) + if err != nil { + return err + } + if err := idtools.MkdirAs(devices.root, 0700, uid, gid); err != nil && !os.IsExist(err) { + return err + } + if err := os.MkdirAll(devices.metadataDir(), 0700); err != nil && !os.IsExist(err) { + return err + } + + prevSetupConfig, err := readLVMConfig(devices.root) + if err != nil { + return err + } + + if !reflect.DeepEqual(devices.lvmSetupConfig, directLVMConfig{}) { + if devices.thinPoolDevice != "" { + return errors.New("cannot setup direct-lvm when `dm.thinpooldev` is also specified") + } + + if !reflect.DeepEqual(prevSetupConfig, devices.lvmSetupConfig) { + if !reflect.DeepEqual(prevSetupConfig, directLVMConfig{}) { + return errors.New("changing direct-lvm config is not supported") + } + logrus.WithField("storage-driver", "devicemapper").WithField("direct-lvm-config", devices.lvmSetupConfig).Debugf("Setting up direct lvm mode") + if err := verifyBlockDevice(devices.lvmSetupConfig.Device, lvmSetupConfigForce); err != nil { + return err + } + if err := setupDirectLVM(devices.lvmSetupConfig); err != nil { + return err + } + if err := writeLVMConfig(devices.root, devices.lvmSetupConfig); err != nil { + return err + } + } + devices.thinPoolDevice = "docker-thinpool" + logrus.WithField("storage-driver", "devicemapper").Debugf("Setting dm.thinpooldev to %q", devices.thinPoolDevice) + } + + // Set the device prefix from the device id and inode of the docker root dir + st, err := os.Stat(devices.root) + if err != nil { + return fmt.Errorf("devmapper: Error looking up dir %s: %s", devices.root, err) + } + sysSt := st.Sys().(*syscall.Stat_t) + // "reg-" stands for "regular file". + // In the future we might use "dev-" for "device file", etc. + // docker-maj,min[-inode] stands for: + // - Managed by docker + // - The target of this device is at major and minor + // - If is defined, use that file inside the device as a loopback image. Otherwise use the device itself. + devices.devicePrefix = fmt.Sprintf("docker-%d:%d-%d", major(sysSt.Dev), minor(sysSt.Dev), sysSt.Ino) + logrus.Debugf("devmapper: Generated prefix: %s", devices.devicePrefix) + + // Check for the existence of the thin-pool device + poolExists, err := devices.thinPoolExists(devices.getPoolName()) + if err != nil { + return err + } + + // It seems libdevmapper opens this without O_CLOEXEC, and go exec will not close files + // that are not Close-on-exec, + // so we add this badhack to make sure it closes itself + setCloseOnExec("/dev/mapper/control") + + // Make sure the sparse images exist in /devicemapper/data and + // /devicemapper/metadata + + createdLoopback := false + + // If the pool doesn't exist, create it + if !poolExists && devices.thinPoolDevice == "" { + logrus.Debug("devmapper: Pool doesn't exist. Creating it.") + + var ( + dataFile *os.File + metadataFile *os.File + ) + + if devices.dataDevice == "" { + // Make sure the sparse images exist in /devicemapper/data + + hasData := devices.hasImage("data") + + if !doInit && !hasData { + return errors.New("loopback data file not found") + } + + if !hasData { + createdLoopback = true + } + + data, err := devices.ensureImage("data", devices.dataLoopbackSize) + if err != nil { + logrus.Debugf("devmapper: Error device ensureImage (data): %s", err) + return err + } + + dataFile, err = loopback.AttachLoopDevice(data) + if err != nil { + return err + } + devices.dataLoopFile = data + devices.dataDevice = dataFile.Name() + } else { + dataFile, err = os.OpenFile(devices.dataDevice, os.O_RDWR, 0600) + if err != nil { + return err + } + } + defer dataFile.Close() + + if devices.metadataDevice == "" { + // Make sure the sparse images exist in /devicemapper/metadata + + hasMetadata := devices.hasImage("metadata") + + if !doInit && !hasMetadata { + return errors.New("loopback metadata file not found") + } + + if !hasMetadata { + createdLoopback = true + } + + metadata, err := devices.ensureImage("metadata", devices.metaDataLoopbackSize) + if err != nil { + logrus.Debugf("devmapper: Error device ensureImage (metadata): %s", err) + return err + } + + metadataFile, err = loopback.AttachLoopDevice(metadata) + if err != nil { + return err + } + devices.metadataLoopFile = metadata + devices.metadataDevice = metadataFile.Name() + } else { + metadataFile, err = os.OpenFile(devices.metadataDevice, os.O_RDWR, 0600) + if err != nil { + return err + } + } + defer metadataFile.Close() + + if err := devicemapper.CreatePool(devices.getPoolName(), dataFile, metadataFile, devices.thinpBlockSize); err != nil { + return err + } + defer func() { + if retErr != nil { + err = devices.deactivatePool() + if err != nil { + logrus.Warnf("devmapper: Failed to deactivatePool: %v", err) + } + } + }() + } + + // Pool already exists and caller did not pass us a pool. That means + // we probably created pool earlier and could not remove it as some + // containers were still using it. Detect some of the properties of + // pool, like is it using loop devices. + if poolExists && devices.thinPoolDevice == "" { + if err := devices.loadThinPoolLoopBackInfo(); err != nil { + logrus.Debugf("devmapper: Failed to load thin pool loopback device information:%v", err) + return err + } + } + + // If we didn't just create the data or metadata image, we need to + // load the transaction id and migrate old metadata + if !createdLoopback { + if err := devices.initMetaData(); err != nil { + return err + } + } + + if devices.thinPoolDevice == "" { + if devices.metadataLoopFile != "" || devices.dataLoopFile != "" { + logrus.Warn("devmapper: Usage of loopback devices is strongly discouraged for production use. Please use `--storage-opt dm.thinpooldev` or use `man docker` to refer to dm.thinpooldev section.") + } + } + + // Right now this loads only NextDeviceID. If there is more metadata + // down the line, we might have to move it earlier. + if err := devices.loadDeviceSetMetaData(); err != nil { + return err + } + + // Setup the base image + if doInit { + if err := devices.setupBaseImage(); err != nil { + logrus.Debugf("devmapper: Error device setupBaseImage: %s", err) + return err + } + } + + return nil +} + +// AddDevice adds a device and registers in the hash. +func (devices *DeviceSet) AddDevice(hash, baseHash string, storageOpt map[string]string) error { + logrus.Debugf("devmapper: AddDevice START(hash=%s basehash=%s)", hash, baseHash) + defer logrus.Debugf("devmapper: AddDevice END(hash=%s basehash=%s)", hash, baseHash) + + // If a deleted device exists, return error. + baseInfo, err := devices.lookupDeviceWithLock(baseHash) + if err != nil { + return err + } + + if baseInfo.Deleted { + return fmt.Errorf("devmapper: Base device %v has been marked for deferred deletion", baseInfo.Hash) + } + + baseInfo.lock.Lock() + defer baseInfo.lock.Unlock() + + devices.Lock() + defer devices.Unlock() + + // Also include deleted devices in case hash of new device is + // same as one of the deleted devices. + if info, _ := devices.lookupDevice(hash); info != nil { + return fmt.Errorf("devmapper: device %s already exists. Deleted=%v", hash, info.Deleted) + } + + size, err := devices.parseStorageOpt(storageOpt) + if err != nil { + return err + } + + if size == 0 { + size = baseInfo.Size + } + + if size < baseInfo.Size { + return fmt.Errorf("devmapper: Container size cannot be smaller than %s", units.HumanSize(float64(baseInfo.Size))) + } + + if err := devices.takeSnapshot(hash, baseInfo, size); err != nil { + return err + } + + // Grow the container rootfs. + if size > baseInfo.Size { + info, err := devices.lookupDevice(hash) + if err != nil { + return err + } + + if err := devices.growFS(info); err != nil { + return err + } + } + + return nil +} + +func (devices *DeviceSet) parseStorageOpt(storageOpt map[string]string) (uint64, error) { + + // Read size to change the block device size per container. + for key, val := range storageOpt { + key := strings.ToLower(key) + switch key { + case "size": + size, err := units.RAMInBytes(val) + if err != nil { + return 0, err + } + return uint64(size), nil + default: + return 0, fmt.Errorf("Unknown option %s", key) + } + } + + return 0, nil +} + +func (devices *DeviceSet) markForDeferredDeletion(info *devInfo) error { + // If device is already in deleted state, there is nothing to be done. + if info.Deleted { + return nil + } + + logrus.Debugf("devmapper: Marking device %s for deferred deletion.", info.Hash) + + info.Deleted = true + + // save device metadata to reflect deleted state. + if err := devices.saveMetadata(info); err != nil { + info.Deleted = false + return err + } + + devices.nrDeletedDevices++ + return nil +} + +// Should be called with devices.Lock() held. +func (devices *DeviceSet) deleteTransaction(info *devInfo, syncDelete bool) error { + if err := devices.openTransaction(info.Hash, info.DeviceID); err != nil { + logrus.Debugf("devmapper: Error opening transaction hash = %s deviceId = %d", "", info.DeviceID) + return err + } + + defer devices.closeTransaction() + + err := devicemapper.DeleteDevice(devices.getPoolDevName(), info.DeviceID) + if err != nil { + // If syncDelete is true, we want to return error. If deferred + // deletion is not enabled, we return an error. If error is + // something other then EBUSY, return an error. + if syncDelete || !devices.deferredDelete || err != devicemapper.ErrBusy { + logrus.Debugf("devmapper: Error deleting device: %s", err) + return err + } + } + + if err == nil { + if err := devices.unregisterDevice(info.Hash); err != nil { + return err + } + // If device was already in deferred delete state that means + // deletion was being tried again later. Reduce the deleted + // device count. + if info.Deleted { + devices.nrDeletedDevices-- + } + devices.markDeviceIDFree(info.DeviceID) + } else { + if err := devices.markForDeferredDeletion(info); err != nil { + return err + } + } + + return nil +} + +// Issue discard only if device open count is zero. +func (devices *DeviceSet) issueDiscard(info *devInfo) error { + logrus.Debugf("devmapper: issueDiscard START(device: %s).", info.Hash) + defer logrus.Debugf("devmapper: issueDiscard END(device: %s).", info.Hash) + // This is a workaround for the kernel not discarding block so + // on the thin pool when we remove a thinp device, so we do it + // manually. + // Even if device is deferred deleted, activate it and issue + // discards. + if err := devices.activateDeviceIfNeeded(info, true); err != nil { + return err + } + + devinfo, err := devicemapper.GetInfo(info.Name()) + if err != nil { + return err + } + + if devinfo.OpenCount != 0 { + logrus.Debugf("devmapper: Device: %s is in use. OpenCount=%d. Not issuing discards.", info.Hash, devinfo.OpenCount) + return nil + } + + if err := devicemapper.BlockDeviceDiscard(info.DevName()); err != nil { + logrus.Debugf("devmapper: Error discarding block on device: %s (ignoring)", err) + } + return nil +} + +// Should be called with devices.Lock() held. +func (devices *DeviceSet) deleteDevice(info *devInfo, syncDelete bool) error { + if devices.doBlkDiscard { + devices.issueDiscard(info) + } + + // Try to deactivate device in case it is active. + // If deferred removal is enabled and deferred deletion is disabled + // then make sure device is removed synchronously. There have been + // some cases of device being busy for short duration and we would + // rather busy wait for device removal to take care of these cases. + deferredRemove := devices.deferredRemove + if !devices.deferredDelete { + deferredRemove = false + } + + if err := devices.deactivateDeviceMode(info, deferredRemove); err != nil { + logrus.Debugf("devmapper: Error deactivating device: %s", err) + return err + } + + if err := devices.deleteTransaction(info, syncDelete); err != nil { + return err + } + + return nil +} + +// DeleteDevice will return success if device has been marked for deferred +// removal. If one wants to override that and want DeleteDevice() to fail if +// device was busy and could not be deleted, set syncDelete=true. +func (devices *DeviceSet) DeleteDevice(hash string, syncDelete bool) error { + logrus.Debugf("devmapper: DeleteDevice START(hash=%v syncDelete=%v)", hash, syncDelete) + defer logrus.Debugf("devmapper: DeleteDevice END(hash=%v syncDelete=%v)", hash, syncDelete) + info, err := devices.lookupDeviceWithLock(hash) + if err != nil { + return err + } + + info.lock.Lock() + defer info.lock.Unlock() + + devices.Lock() + defer devices.Unlock() + + return devices.deleteDevice(info, syncDelete) +} + +func (devices *DeviceSet) deactivatePool() error { + logrus.Debug("devmapper: deactivatePool() START") + defer logrus.Debug("devmapper: deactivatePool() END") + devname := devices.getPoolDevName() + + devinfo, err := devicemapper.GetInfo(devname) + if err != nil { + return err + } + + if devinfo.Exists == 0 { + return nil + } + if err := devicemapper.RemoveDevice(devname); err != nil { + return err + } + + if d, err := devicemapper.GetDeps(devname); err == nil { + logrus.Warnf("devmapper: device %s still has %d active dependents", devname, d.Count) + } + + return nil +} + +func (devices *DeviceSet) deactivateDevice(info *devInfo) error { + return devices.deactivateDeviceMode(info, devices.deferredRemove) +} + +func (devices *DeviceSet) deactivateDeviceMode(info *devInfo, deferredRemove bool) error { + var err error + logrus.Debugf("devmapper: deactivateDevice START(%s)", info.Hash) + defer logrus.Debugf("devmapper: deactivateDevice END(%s)", info.Hash) + + devinfo, err := devicemapper.GetInfo(info.Name()) + if err != nil { + return err + } + + if devinfo.Exists == 0 { + return nil + } + + if deferredRemove { + err = devicemapper.RemoveDeviceDeferred(info.Name()) + } else { + err = devices.removeDevice(info.Name()) + } + + // This function's semantics is such that it does not return an + // error if device does not exist. So if device went away by + // the time we actually tried to remove it, do not return error. + if err != devicemapper.ErrEnxio { + return err + } + return nil +} + +// Issues the underlying dm remove operation. +func (devices *DeviceSet) removeDevice(devname string) error { + var err error + + logrus.Debugf("devmapper: removeDevice START(%s)", devname) + defer logrus.Debugf("devmapper: removeDevice END(%s)", devname) + + for i := 0; i < 200; i++ { + err = devicemapper.RemoveDevice(devname) + if err == nil { + break + } + if err != devicemapper.ErrBusy { + return err + } + + // If we see EBUSY it may be a transient error, + // sleep a bit a retry a few times. + devices.Unlock() + time.Sleep(100 * time.Millisecond) + devices.Lock() + } + + return err +} + +func (devices *DeviceSet) cancelDeferredRemovalIfNeeded(info *devInfo) error { + if !devices.deferredRemove { + return nil + } + + logrus.Debugf("devmapper: cancelDeferredRemovalIfNeeded START(%s)", info.Name()) + defer logrus.Debugf("devmapper: cancelDeferredRemovalIfNeeded END(%s)", info.Name()) + + devinfo, err := devicemapper.GetInfoWithDeferred(info.Name()) + if err != nil { + return err + } + + if devinfo != nil && devinfo.DeferredRemove == 0 { + return nil + } + + // Cancel deferred remove + if err := devices.cancelDeferredRemoval(info); err != nil { + // If Error is ErrEnxio. Device is probably already gone. Continue. + if err != devicemapper.ErrEnxio { + return err + } + } + return nil +} + +func (devices *DeviceSet) cancelDeferredRemoval(info *devInfo) error { + logrus.Debugf("devmapper: cancelDeferredRemoval START(%s)", info.Name()) + defer logrus.Debugf("devmapper: cancelDeferredRemoval END(%s)", info.Name()) + + var err error + + // Cancel deferred remove + for i := 0; i < 100; i++ { + err = devicemapper.CancelDeferredRemove(info.Name()) + if err != nil { + if err == devicemapper.ErrBusy { + // If we see EBUSY it may be a transient error, + // sleep a bit a retry a few times. + devices.Unlock() + time.Sleep(100 * time.Millisecond) + devices.Lock() + continue + } + } + break + } + return err +} + +// Shutdown shuts down the device by unmounting the root. +func (devices *DeviceSet) Shutdown(home string) error { + logrus.Debugf("devmapper: [deviceset %s] Shutdown()", devices.devicePrefix) + logrus.Debugf("devmapper: Shutting down DeviceSet: %s", devices.root) + defer logrus.Debugf("devmapper: [deviceset %s] Shutdown() END", devices.devicePrefix) + + // Stop deletion worker. This should start delivering new events to + // ticker channel. That means no new instance of cleanupDeletedDevice() + // will run after this call. If one instance is already running at + // the time of the call, it must be holding devices.Lock() and + // we will block on this lock till cleanup function exits. + devices.deletionWorkerTicker.Stop() + + devices.Lock() + // Save DeviceSet Metadata first. Docker kills all threads if they + // don't finish in certain time. It is possible that Shutdown() + // routine does not finish in time as we loop trying to deactivate + // some devices while these are busy. In that case shutdown() routine + // will be killed and we will not get a chance to save deviceset + // metadata. Hence save this early before trying to deactivate devices. + devices.saveDeviceSetMetaData() + + // ignore the error since it's just a best effort to not try to unmount something that's mounted + mounts, _ := mount.GetMounts() + mounted := make(map[string]bool, len(mounts)) + for _, mnt := range mounts { + mounted[mnt.Mountpoint] = true + } + + if err := filepath.Walk(path.Join(home, "mnt"), func(p string, info os.FileInfo, err error) error { + if err != nil { + return err + } + if !info.IsDir() { + return nil + } + + if mounted[p] { + // We use MNT_DETACH here in case it is still busy in some running + // container. This means it'll go away from the global scope directly, + // and the device will be released when that container dies. + if err := unix.Unmount(p, unix.MNT_DETACH); err != nil { + logrus.Debugf("devmapper: Shutdown unmounting %s, error: %s", p, err) + } + } + + if devInfo, err := devices.lookupDevice(path.Base(p)); err != nil { + logrus.Debugf("devmapper: Shutdown lookup device %s, error: %s", path.Base(p), err) + } else { + if err := devices.deactivateDevice(devInfo); err != nil { + logrus.Debugf("devmapper: Shutdown deactivate %s , error: %s", devInfo.Hash, err) + } + } + + return nil + }); err != nil && !os.IsNotExist(err) { + devices.Unlock() + return err + } + + devices.Unlock() + + info, _ := devices.lookupDeviceWithLock("") + if info != nil { + info.lock.Lock() + devices.Lock() + if err := devices.deactivateDevice(info); err != nil { + logrus.Debugf("devmapper: Shutdown deactivate base , error: %s", err) + } + devices.Unlock() + info.lock.Unlock() + } + + devices.Lock() + if devices.thinPoolDevice == "" { + if err := devices.deactivatePool(); err != nil { + logrus.Debugf("devmapper: Shutdown deactivate pool , error: %s", err) + } + } + devices.Unlock() + + return nil +} + +// Recent XFS changes allow changing behavior of filesystem in case of errors. +// When thin pool gets full and XFS gets ENOSPC error, currently it tries +// IO infinitely and sometimes it can block the container process +// and process can't be killWith 0 value, XFS will not retry upon error +// and instead will shutdown filesystem. + +func (devices *DeviceSet) xfsSetNospaceRetries(info *devInfo) error { + dmDevicePath, err := os.Readlink(info.DevName()) + if err != nil { + return fmt.Errorf("devmapper: readlink failed for device %v:%v", info.DevName(), err) + } + + dmDeviceName := path.Base(dmDevicePath) + filePath := "/sys/fs/xfs/" + dmDeviceName + "/error/metadata/ENOSPC/max_retries" + maxRetriesFile, err := os.OpenFile(filePath, os.O_WRONLY, 0) + if err != nil { + return fmt.Errorf("devmapper: user specified daemon option dm.xfs_nospace_max_retries but it does not seem to be supported on this system :%v", err) + } + defer maxRetriesFile.Close() + + // Set max retries to 0 + _, err = maxRetriesFile.WriteString(devices.xfsNospaceRetries) + if err != nil { + return fmt.Errorf("devmapper: Failed to write string %v to file %v:%v", devices.xfsNospaceRetries, filePath, err) + } + return nil +} + +// MountDevice mounts the device if not already mounted. +func (devices *DeviceSet) MountDevice(hash, path, mountLabel string) error { + info, err := devices.lookupDeviceWithLock(hash) + if err != nil { + return err + } + + if info.Deleted { + return fmt.Errorf("devmapper: Can't mount device %v as it has been marked for deferred deletion", info.Hash) + } + + info.lock.Lock() + defer info.lock.Unlock() + + devices.Lock() + defer devices.Unlock() + + if err := devices.activateDeviceIfNeeded(info, false); err != nil { + return fmt.Errorf("devmapper: Error activating devmapper device for '%s': %s", hash, err) + } + + fstype, err := ProbeFsType(info.DevName()) + if err != nil { + return err + } + + options := "" + + if fstype == "xfs" { + // XFS needs nouuid or it can't mount filesystems with the same fs + options = joinMountOptions(options, "nouuid") + } + + options = joinMountOptions(options, devices.mountOptions) + options = joinMountOptions(options, label.FormatMountLabel("", mountLabel)) + + if err := mount.Mount(info.DevName(), path, fstype, options); err != nil { + return fmt.Errorf("devmapper: Error mounting '%s' on '%s': %s", info.DevName(), path, err) + } + + if fstype == "xfs" && devices.xfsNospaceRetries != "" { + if err := devices.xfsSetNospaceRetries(info); err != nil { + unix.Unmount(path, unix.MNT_DETACH) + devices.deactivateDevice(info) + return err + } + } + + return nil +} + +// UnmountDevice unmounts the device and removes it from hash. +func (devices *DeviceSet) UnmountDevice(hash, mountPath string) error { + logrus.Debugf("devmapper: UnmountDevice START(hash=%s)", hash) + defer logrus.Debugf("devmapper: UnmountDevice END(hash=%s)", hash) + + info, err := devices.lookupDeviceWithLock(hash) + if err != nil { + return err + } + + info.lock.Lock() + defer info.lock.Unlock() + + devices.Lock() + defer devices.Unlock() + + logrus.Debugf("devmapper: Unmount(%s)", mountPath) + if err := unix.Unmount(mountPath, unix.MNT_DETACH); err != nil { + return err + } + logrus.Debug("devmapper: Unmount done") + + return devices.deactivateDevice(info) +} + +// HasDevice returns true if the device metadata exists. +func (devices *DeviceSet) HasDevice(hash string) bool { + info, _ := devices.lookupDeviceWithLock(hash) + return info != nil +} + +// List returns a list of device ids. +func (devices *DeviceSet) List() []string { + devices.Lock() + defer devices.Unlock() + + ids := make([]string, len(devices.Devices)) + i := 0 + for k := range devices.Devices { + ids[i] = k + i++ + } + return ids +} + +func (devices *DeviceSet) deviceStatus(devName string) (sizeInSectors, mappedSectors, highestMappedSector uint64, err error) { + var params string + _, sizeInSectors, _, params, err = devicemapper.GetStatus(devName) + if err != nil { + return + } + if _, err = fmt.Sscanf(params, "%d %d", &mappedSectors, &highestMappedSector); err == nil { + return + } + return +} + +// GetDeviceStatus provides size, mapped sectors +func (devices *DeviceSet) GetDeviceStatus(hash string) (*DevStatus, error) { + info, err := devices.lookupDeviceWithLock(hash) + if err != nil { + return nil, err + } + + info.lock.Lock() + defer info.lock.Unlock() + + devices.Lock() + defer devices.Unlock() + + status := &DevStatus{ + DeviceID: info.DeviceID, + Size: info.Size, + TransactionID: info.TransactionID, + } + + if err := devices.activateDeviceIfNeeded(info, false); err != nil { + return nil, fmt.Errorf("devmapper: Error activating devmapper device for '%s': %s", hash, err) + } + + sizeInSectors, mappedSectors, highestMappedSector, err := devices.deviceStatus(info.DevName()) + + if err != nil { + return nil, err + } + + status.SizeInSectors = sizeInSectors + status.MappedSectors = mappedSectors + status.HighestMappedSector = highestMappedSector + + return status, nil +} + +func (devices *DeviceSet) poolStatus() (totalSizeInSectors, transactionID, dataUsed, dataTotal, metadataUsed, metadataTotal uint64, err error) { + var params string + if _, totalSizeInSectors, _, params, err = devicemapper.GetStatus(devices.getPoolName()); err == nil { + _, err = fmt.Sscanf(params, "%d %d/%d %d/%d", &transactionID, &metadataUsed, &metadataTotal, &dataUsed, &dataTotal) + } + return +} + +// DataDevicePath returns the path to the data storage for this deviceset, +// regardless of loopback or block device +func (devices *DeviceSet) DataDevicePath() string { + return devices.dataDevice +} + +// MetadataDevicePath returns the path to the metadata storage for this deviceset, +// regardless of loopback or block device +func (devices *DeviceSet) MetadataDevicePath() string { + return devices.metadataDevice +} + +func (devices *DeviceSet) getUnderlyingAvailableSpace(loopFile string) (uint64, error) { + buf := new(unix.Statfs_t) + if err := unix.Statfs(loopFile, buf); err != nil { + logrus.Warnf("devmapper: Couldn't stat loopfile filesystem %v: %v", loopFile, err) + return 0, err + } + return buf.Bfree * uint64(buf.Bsize), nil +} + +func (devices *DeviceSet) isRealFile(loopFile string) (bool, error) { + if loopFile != "" { + fi, err := os.Stat(loopFile) + if err != nil { + logrus.Warnf("devmapper: Couldn't stat loopfile %v: %v", loopFile, err) + return false, err + } + return fi.Mode().IsRegular(), nil + } + return false, nil +} + +// Status returns the current status of this deviceset +func (devices *DeviceSet) Status() *Status { + devices.Lock() + defer devices.Unlock() + + status := &Status{} + + status.PoolName = devices.getPoolName() + status.DataFile = devices.DataDevicePath() + status.DataLoopback = devices.dataLoopFile + status.MetadataFile = devices.MetadataDevicePath() + status.MetadataLoopback = devices.metadataLoopFile + status.UdevSyncSupported = devicemapper.UdevSyncSupported() + status.DeferredRemoveEnabled = devices.deferredRemove + status.DeferredDeleteEnabled = devices.deferredDelete + status.DeferredDeletedDeviceCount = devices.nrDeletedDevices + status.BaseDeviceSize = devices.getBaseDeviceSize() + status.BaseDeviceFS = devices.getBaseDeviceFS() + + totalSizeInSectors, _, dataUsed, dataTotal, metadataUsed, metadataTotal, err := devices.poolStatus() + if err == nil { + // Convert from blocks to bytes + blockSizeInSectors := totalSizeInSectors / dataTotal + + status.Data.Used = dataUsed * blockSizeInSectors * 512 + status.Data.Total = dataTotal * blockSizeInSectors * 512 + status.Data.Available = status.Data.Total - status.Data.Used + + // metadata blocks are always 4k + status.Metadata.Used = metadataUsed * 4096 + status.Metadata.Total = metadataTotal * 4096 + status.Metadata.Available = status.Metadata.Total - status.Metadata.Used + + status.SectorSize = blockSizeInSectors * 512 + + if check, _ := devices.isRealFile(devices.dataLoopFile); check { + actualSpace, err := devices.getUnderlyingAvailableSpace(devices.dataLoopFile) + if err == nil && actualSpace < status.Data.Available { + status.Data.Available = actualSpace + } + } + + if check, _ := devices.isRealFile(devices.metadataLoopFile); check { + actualSpace, err := devices.getUnderlyingAvailableSpace(devices.metadataLoopFile) + if err == nil && actualSpace < status.Metadata.Available { + status.Metadata.Available = actualSpace + } + } + + minFreeData := (dataTotal * uint64(devices.minFreeSpacePercent)) / 100 + status.MinFreeSpace = minFreeData * blockSizeInSectors * 512 + } + + return status +} + +// Status returns the current status of this deviceset +func (devices *DeviceSet) exportDeviceMetadata(hash string) (*deviceMetadata, error) { + info, err := devices.lookupDeviceWithLock(hash) + if err != nil { + return nil, err + } + + info.lock.Lock() + defer info.lock.Unlock() + + metadata := &deviceMetadata{info.DeviceID, info.Size, info.Name()} + return metadata, nil +} + +// NewDeviceSet creates the device set based on the options provided. +func NewDeviceSet(root string, doInit bool, options []string, uidMaps, gidMaps []idtools.IDMap) (*DeviceSet, error) { + devicemapper.SetDevDir("/dev") + + devices := &DeviceSet{ + root: root, + metaData: metaData{Devices: make(map[string]*devInfo)}, + dataLoopbackSize: defaultDataLoopbackSize, + metaDataLoopbackSize: defaultMetaDataLoopbackSize, + baseFsSize: defaultBaseFsSize, + overrideUdevSyncCheck: defaultUdevSyncOverride, + doBlkDiscard: true, + thinpBlockSize: defaultThinpBlockSize, + deviceIDMap: make([]byte, deviceIDMapSz), + deletionWorkerTicker: time.NewTicker(time.Second * 30), + uidMaps: uidMaps, + gidMaps: gidMaps, + minFreeSpacePercent: defaultMinFreeSpacePercent, + } + + version, err := devicemapper.GetDriverVersion() + if err != nil { + // Can't even get driver version, assume not supported + return nil, graphdriver.ErrNotSupported + } + + if err := determineDriverCapabilities(version); err != nil { + return nil, graphdriver.ErrNotSupported + } + + if driverDeferredRemovalSupport && devicemapper.LibraryDeferredRemovalSupport { + // enable deferred stuff by default + enableDeferredDeletion = true + enableDeferredRemoval = true + } + + foundBlkDiscard := false + var lvmSetupConfig directLVMConfig + for _, option := range options { + key, val, err := parsers.ParseKeyValueOpt(option) + if err != nil { + return nil, err + } + key = strings.ToLower(key) + switch key { + case "dm.basesize": + size, err := units.RAMInBytes(val) + if err != nil { + return nil, err + } + userBaseSize = true + devices.baseFsSize = uint64(size) + case "dm.loopdatasize": + size, err := units.RAMInBytes(val) + if err != nil { + return nil, err + } + devices.dataLoopbackSize = size + case "dm.loopmetadatasize": + size, err := units.RAMInBytes(val) + if err != nil { + return nil, err + } + devices.metaDataLoopbackSize = size + case "dm.fs": + if val != "ext4" && val != "xfs" { + return nil, fmt.Errorf("devmapper: Unsupported filesystem %s\n", val) + } + devices.filesystem = val + case "dm.mkfsarg": + devices.mkfsArgs = append(devices.mkfsArgs, val) + case "dm.mountopt": + devices.mountOptions = joinMountOptions(devices.mountOptions, val) + case "dm.metadatadev": + devices.metadataDevice = val + case "dm.datadev": + devices.dataDevice = val + case "dm.thinpooldev": + devices.thinPoolDevice = strings.TrimPrefix(val, "/dev/mapper/") + case "dm.blkdiscard": + foundBlkDiscard = true + devices.doBlkDiscard, err = strconv.ParseBool(val) + if err != nil { + return nil, err + } + case "dm.blocksize": + size, err := units.RAMInBytes(val) + if err != nil { + return nil, err + } + // convert to 512b sectors + devices.thinpBlockSize = uint32(size) >> 9 + case "dm.override_udev_sync_check": + devices.overrideUdevSyncCheck, err = strconv.ParseBool(val) + if err != nil { + return nil, err + } + + case "dm.use_deferred_removal": + enableDeferredRemoval, err = strconv.ParseBool(val) + if err != nil { + return nil, err + } + + case "dm.use_deferred_deletion": + enableDeferredDeletion, err = strconv.ParseBool(val) + if err != nil { + return nil, err + } + + case "dm.min_free_space": + if !strings.HasSuffix(val, "%") { + return nil, fmt.Errorf("devmapper: Option dm.min_free_space requires %% suffix") + } + + valstring := strings.TrimSuffix(val, "%") + minFreeSpacePercent, err := strconv.ParseUint(valstring, 10, 32) + if err != nil { + return nil, err + } + + if minFreeSpacePercent >= 100 { + return nil, fmt.Errorf("devmapper: Invalid value %v for option dm.min_free_space", val) + } + + devices.minFreeSpacePercent = uint32(minFreeSpacePercent) + case "dm.xfs_nospace_max_retries": + _, err := strconv.ParseUint(val, 10, 64) + if err != nil { + return nil, err + } + devices.xfsNospaceRetries = val + case "dm.directlvm_device": + lvmSetupConfig.Device = val + case "dm.directlvm_device_force": + lvmSetupConfigForce, err = strconv.ParseBool(val) + if err != nil { + return nil, err + } + case "dm.thinp_percent": + per, err := strconv.ParseUint(strings.TrimSuffix(val, "%"), 10, 32) + if err != nil { + return nil, errors.Wrapf(err, "could not parse `dm.thinp_percent=%s`", val) + } + if per >= 100 { + return nil, errors.New("dm.thinp_percent must be greater than 0 and less than 100") + } + lvmSetupConfig.ThinpPercent = per + case "dm.thinp_metapercent": + per, err := strconv.ParseUint(strings.TrimSuffix(val, "%"), 10, 32) + if err != nil { + return nil, errors.Wrapf(err, "could not parse `dm.thinp_metapercent=%s`", val) + } + if per >= 100 { + return nil, errors.New("dm.thinp_metapercent must be greater than 0 and less than 100") + } + lvmSetupConfig.ThinpMetaPercent = per + case "dm.thinp_autoextend_percent": + per, err := strconv.ParseUint(strings.TrimSuffix(val, "%"), 10, 32) + if err != nil { + return nil, errors.Wrapf(err, "could not parse `dm.thinp_autoextend_percent=%s`", val) + } + if per > 100 { + return nil, errors.New("dm.thinp_autoextend_percent must be greater than 0 and less than 100") + } + lvmSetupConfig.AutoExtendPercent = per + case "dm.thinp_autoextend_threshold": + per, err := strconv.ParseUint(strings.TrimSuffix(val, "%"), 10, 32) + if err != nil { + return nil, errors.Wrapf(err, "could not parse `dm.thinp_autoextend_threshold=%s`", val) + } + if per > 100 { + return nil, errors.New("dm.thinp_autoextend_threshold must be greater than 0 and less than 100") + } + lvmSetupConfig.AutoExtendThreshold = per + case "dm.libdm_log_level": + level, err := strconv.ParseInt(val, 10, 32) + if err != nil { + return nil, errors.Wrapf(err, "could not parse `dm.libdm_log_level=%s`", val) + } + if level < devicemapper.LogLevelFatal || level > devicemapper.LogLevelDebug { + return nil, errors.Errorf("dm.libdm_log_level must be in range [%d,%d]", devicemapper.LogLevelFatal, devicemapper.LogLevelDebug) + } + // Register a new logging callback with the specified level. + devicemapper.LogInit(devicemapper.DefaultLogger{ + Level: int(level), + }) + default: + return nil, fmt.Errorf("devmapper: Unknown option %s\n", key) + } + } + + if err := validateLVMConfig(lvmSetupConfig); err != nil { + return nil, err + } + + devices.lvmSetupConfig = lvmSetupConfig + + // By default, don't do blk discard hack on raw devices, its rarely useful and is expensive + if !foundBlkDiscard && (devices.dataDevice != "" || devices.thinPoolDevice != "") { + devices.doBlkDiscard = false + } + + if err := devices.initDevmapper(doInit); err != nil { + return nil, err + } + + return devices, nil +} diff --git a/vendor/github.com/moby/moby/daemon/graphdriver/devmapper/devmapper_doc.go b/vendor/github.com/moby/moby/daemon/graphdriver/devmapper/devmapper_doc.go new file mode 100644 index 000000000..9ab3e4f86 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/graphdriver/devmapper/devmapper_doc.go @@ -0,0 +1,106 @@ +package devmapper + +// Definition of struct dm_task and sub structures (from lvm2) +// +// struct dm_ioctl { +// /* +// * The version number is made up of three parts: +// * major - no backward or forward compatibility, +// * minor - only backwards compatible, +// * patch - both backwards and forwards compatible. +// * +// * All clients of the ioctl interface should fill in the +// * version number of the interface that they were +// * compiled with. +// * +// * All recognized ioctl commands (ie. those that don't +// * return -ENOTTY) fill out this field, even if the +// * command failed. +// */ +// uint32_t version[3]; /* in/out */ +// uint32_t data_size; /* total size of data passed in +// * including this struct */ + +// uint32_t data_start; /* offset to start of data +// * relative to start of this struct */ + +// uint32_t target_count; /* in/out */ +// int32_t open_count; /* out */ +// uint32_t flags; /* in/out */ + +// /* +// * event_nr holds either the event number (input and output) or the +// * udev cookie value (input only). +// * The DM_DEV_WAIT ioctl takes an event number as input. +// * The DM_SUSPEND, DM_DEV_REMOVE and DM_DEV_RENAME ioctls +// * use the field as a cookie to return in the DM_COOKIE +// * variable with the uevents they issue. +// * For output, the ioctls return the event number, not the cookie. +// */ +// uint32_t event_nr; /* in/out */ +// uint32_t padding; + +// uint64_t dev; /* in/out */ + +// char name[DM_NAME_LEN]; /* device name */ +// char uuid[DM_UUID_LEN]; /* unique identifier for +// * the block device */ +// char data[7]; /* padding or data */ +// }; + +// struct target { +// uint64_t start; +// uint64_t length; +// char *type; +// char *params; + +// struct target *next; +// }; + +// typedef enum { +// DM_ADD_NODE_ON_RESUME, /* add /dev/mapper node with dmsetup resume */ +// DM_ADD_NODE_ON_CREATE /* add /dev/mapper node with dmsetup create */ +// } dm_add_node_t; + +// struct dm_task { +// int type; +// char *dev_name; +// char *mangled_dev_name; + +// struct target *head, *tail; + +// int read_only; +// uint32_t event_nr; +// int major; +// int minor; +// int allow_default_major_fallback; +// uid_t uid; +// gid_t gid; +// mode_t mode; +// uint32_t read_ahead; +// uint32_t read_ahead_flags; +// union { +// struct dm_ioctl *v4; +// } dmi; +// char *newname; +// char *message; +// char *geometry; +// uint64_t sector; +// int no_flush; +// int no_open_count; +// int skip_lockfs; +// int query_inactive_table; +// int suppress_identical_reload; +// dm_add_node_t add_node; +// uint64_t existing_table_size; +// int cookie_set; +// int new_uuid; +// int secure_data; +// int retry_remove; +// int enable_checks; +// int expected_errno; + +// char *uuid; +// char *mangled_uuid; +// }; +// diff --git a/vendor/github.com/moby/moby/daemon/graphdriver/devmapper/devmapper_test.go b/vendor/github.com/moby/moby/daemon/graphdriver/devmapper/devmapper_test.go new file mode 100644 index 000000000..7501397fd --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/graphdriver/devmapper/devmapper_test.go @@ -0,0 +1,152 @@ +// +build linux + +package devmapper + +import ( + "fmt" + "os" + "syscall" + "testing" + "time" + + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/daemon/graphdriver/graphtest" +) + +func init() { + // Reduce the size of the base fs and loopback for the tests + defaultDataLoopbackSize = 300 * 1024 * 1024 + defaultMetaDataLoopbackSize = 200 * 1024 * 1024 + defaultBaseFsSize = 300 * 1024 * 1024 + defaultUdevSyncOverride = true + if err := initLoopbacks(); err != nil { + panic(err) + } +} + +// initLoopbacks ensures that the loopback devices are properly created within +// the system running the device mapper tests. +func initLoopbacks() error { + statT, err := getBaseLoopStats() + if err != nil { + return err + } + // create at least 8 loopback files, ya, that is a good number + for i := 0; i < 8; i++ { + loopPath := fmt.Sprintf("/dev/loop%d", i) + // only create new loopback files if they don't exist + if _, err := os.Stat(loopPath); err != nil { + if mkerr := syscall.Mknod(loopPath, + uint32(statT.Mode|syscall.S_IFBLK), int((7<<8)|(i&0xff)|((i&0xfff00)<<12))); mkerr != nil { + return mkerr + } + os.Chown(loopPath, int(statT.Uid), int(statT.Gid)) + } + } + return nil +} + +// getBaseLoopStats inspects /dev/loop0 to collect uid,gid, and mode for the +// loop0 device on the system. If it does not exist we assume 0,0,0660 for the +// stat data +func getBaseLoopStats() (*syscall.Stat_t, error) { + loop0, err := os.Stat("/dev/loop0") + if err != nil { + if os.IsNotExist(err) { + return &syscall.Stat_t{ + Uid: 0, + Gid: 0, + Mode: 0660, + }, nil + } + return nil, err + } + return loop0.Sys().(*syscall.Stat_t), nil +} + +// This avoids creating a new driver for each test if all tests are run +// Make sure to put new tests between TestDevmapperSetup and TestDevmapperTeardown +func TestDevmapperSetup(t *testing.T) { + graphtest.GetDriver(t, "devicemapper") +} + +func TestDevmapperCreateEmpty(t *testing.T) { + graphtest.DriverTestCreateEmpty(t, "devicemapper") +} + +func TestDevmapperCreateBase(t *testing.T) { + graphtest.DriverTestCreateBase(t, "devicemapper") +} + +func TestDevmapperCreateSnap(t *testing.T) { + graphtest.DriverTestCreateSnap(t, "devicemapper") +} + +func TestDevmapperTeardown(t *testing.T) { + graphtest.PutDriver(t) +} + +func TestDevmapperReduceLoopBackSize(t *testing.T) { + tenMB := int64(10 * 1024 * 1024) + testChangeLoopBackSize(t, -tenMB, defaultDataLoopbackSize, defaultMetaDataLoopbackSize) +} + +func TestDevmapperIncreaseLoopBackSize(t *testing.T) { + tenMB := int64(10 * 1024 * 1024) + testChangeLoopBackSize(t, tenMB, defaultDataLoopbackSize+tenMB, defaultMetaDataLoopbackSize+tenMB) +} + +func testChangeLoopBackSize(t *testing.T, delta, expectDataSize, expectMetaDataSize int64) { + driver := graphtest.GetDriver(t, "devicemapper").(*graphtest.Driver).Driver.(*graphdriver.NaiveDiffDriver).ProtoDriver.(*Driver) + defer graphtest.PutDriver(t) + // make sure data or metadata loopback size are the default size + if s := driver.DeviceSet.Status(); s.Data.Total != uint64(defaultDataLoopbackSize) || s.Metadata.Total != uint64(defaultMetaDataLoopbackSize) { + t.Fatal("data or metadata loop back size is incorrect") + } + if err := driver.Cleanup(); err != nil { + t.Fatal(err) + } + //Reload + d, err := Init(driver.home, []string{ + fmt.Sprintf("dm.loopdatasize=%d", defaultDataLoopbackSize+delta), + fmt.Sprintf("dm.loopmetadatasize=%d", defaultMetaDataLoopbackSize+delta), + }, nil, nil) + if err != nil { + t.Fatalf("error creating devicemapper driver: %v", err) + } + driver = d.(*graphdriver.NaiveDiffDriver).ProtoDriver.(*Driver) + if s := driver.DeviceSet.Status(); s.Data.Total != uint64(expectDataSize) || s.Metadata.Total != uint64(expectMetaDataSize) { + t.Fatal("data or metadata loop back size is incorrect") + } + if err := driver.Cleanup(); err != nil { + t.Fatal(err) + } +} + +// Make sure devices.Lock() has been release upon return from cleanupDeletedDevices() function +func TestDevmapperLockReleasedDeviceDeletion(t *testing.T) { + driver := graphtest.GetDriver(t, "devicemapper").(*graphtest.Driver).Driver.(*graphdriver.NaiveDiffDriver).ProtoDriver.(*Driver) + defer graphtest.PutDriver(t) + + // Call cleanupDeletedDevices() and after the call take and release + // DeviceSet Lock. If lock has not been released, this will hang. + driver.DeviceSet.cleanupDeletedDevices() + + doneChan := make(chan bool) + + go func() { + driver.DeviceSet.Lock() + defer driver.DeviceSet.Unlock() + doneChan <- true + }() + + select { + case <-time.After(time.Second * 5): + // Timer expired. That means lock was not released upon + // function return and we are deadlocked. Release lock + // here so that cleanup could succeed and fail the test. + driver.DeviceSet.Unlock() + t.Fatal("Could not acquire devices lock after call to cleanupDeletedDevices()") + case <-doneChan: + } +} diff --git a/vendor/github.com/moby/moby/daemon/graphdriver/devmapper/driver.go b/vendor/github.com/moby/moby/daemon/graphdriver/devmapper/driver.go new file mode 100644 index 000000000..243d88a8b --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/graphdriver/devmapper/driver.go @@ -0,0 +1,241 @@ +// +build linux + +package devmapper + +import ( + "fmt" + "io/ioutil" + "os" + "path" + "strconv" + + "github.com/Sirupsen/logrus" + + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/pkg/devicemapper" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/locker" + "github.com/docker/docker/pkg/mount" + "github.com/docker/docker/pkg/system" + units "github.com/docker/go-units" +) + +func init() { + graphdriver.Register("devicemapper", Init) +} + +// Driver contains the device set mounted and the home directory +type Driver struct { + *DeviceSet + home string + uidMaps []idtools.IDMap + gidMaps []idtools.IDMap + ctr *graphdriver.RefCounter + locker *locker.Locker +} + +// Init creates a driver with the given home and the set of options. +func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) { + deviceSet, err := NewDeviceSet(home, true, options, uidMaps, gidMaps) + if err != nil { + return nil, err + } + + if err := mount.MakePrivate(home); err != nil { + return nil, err + } + + d := &Driver{ + DeviceSet: deviceSet, + home: home, + uidMaps: uidMaps, + gidMaps: gidMaps, + ctr: graphdriver.NewRefCounter(graphdriver.NewDefaultChecker()), + locker: locker.New(), + } + + return graphdriver.NewNaiveDiffDriver(d, uidMaps, gidMaps), nil +} + +func (d *Driver) String() string { + return "devicemapper" +} + +// Status returns the status about the driver in a printable format. +// Information returned contains Pool Name, Data File, Metadata file, disk usage by +// the data and metadata, etc. +func (d *Driver) Status() [][2]string { + s := d.DeviceSet.Status() + + status := [][2]string{ + {"Pool Name", s.PoolName}, + {"Pool Blocksize", fmt.Sprintf("%s", units.HumanSize(float64(s.SectorSize)))}, + {"Base Device Size", fmt.Sprintf("%s", units.HumanSize(float64(s.BaseDeviceSize)))}, + {"Backing Filesystem", s.BaseDeviceFS}, + {"Data file", s.DataFile}, + {"Metadata file", s.MetadataFile}, + {"Data Space Used", fmt.Sprintf("%s", units.HumanSize(float64(s.Data.Used)))}, + {"Data Space Total", fmt.Sprintf("%s", units.HumanSize(float64(s.Data.Total)))}, + {"Data Space Available", fmt.Sprintf("%s", units.HumanSize(float64(s.Data.Available)))}, + {"Metadata Space Used", fmt.Sprintf("%s", units.HumanSize(float64(s.Metadata.Used)))}, + {"Metadata Space Total", fmt.Sprintf("%s", units.HumanSize(float64(s.Metadata.Total)))}, + {"Metadata Space Available", fmt.Sprintf("%s", units.HumanSize(float64(s.Metadata.Available)))}, + {"Thin Pool Minimum Free Space", fmt.Sprintf("%s", units.HumanSize(float64(s.MinFreeSpace)))}, + {"Udev Sync Supported", fmt.Sprintf("%v", s.UdevSyncSupported)}, + {"Deferred Removal Enabled", fmt.Sprintf("%v", s.DeferredRemoveEnabled)}, + {"Deferred Deletion Enabled", fmt.Sprintf("%v", s.DeferredDeleteEnabled)}, + {"Deferred Deleted Device Count", fmt.Sprintf("%v", s.DeferredDeletedDeviceCount)}, + } + if len(s.DataLoopback) > 0 { + status = append(status, [2]string{"Data loop file", s.DataLoopback}) + } + if len(s.MetadataLoopback) > 0 { + status = append(status, [2]string{"Metadata loop file", s.MetadataLoopback}) + } + if vStr, err := devicemapper.GetLibraryVersion(); err == nil { + status = append(status, [2]string{"Library Version", vStr}) + } + return status +} + +// GetMetadata returns a map of information about the device. +func (d *Driver) GetMetadata(id string) (map[string]string, error) { + m, err := d.DeviceSet.exportDeviceMetadata(id) + + if err != nil { + return nil, err + } + + metadata := make(map[string]string) + metadata["DeviceId"] = strconv.Itoa(m.deviceID) + metadata["DeviceSize"] = strconv.FormatUint(m.deviceSize, 10) + metadata["DeviceName"] = m.deviceName + return metadata, nil +} + +// Cleanup unmounts a device. +func (d *Driver) Cleanup() error { + err := d.DeviceSet.Shutdown(d.home) + + if err2 := mount.Unmount(d.home); err == nil { + err = err2 + } + + return err +} + +// CreateReadWrite creates a layer that is writable for use as a container +// file system. +func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error { + return d.Create(id, parent, opts) +} + +// Create adds a device with a given id and the parent. +func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error { + var storageOpt map[string]string + if opts != nil { + storageOpt = opts.StorageOpt + } + + if err := d.DeviceSet.AddDevice(id, parent, storageOpt); err != nil { + return err + } + + return nil +} + +// Remove removes a device with a given id, unmounts the filesystem. +func (d *Driver) Remove(id string) error { + d.locker.Lock(id) + defer d.locker.Unlock(id) + if !d.DeviceSet.HasDevice(id) { + // Consider removing a non-existing device a no-op + // This is useful to be able to progress on container removal + // if the underlying device has gone away due to earlier errors + return nil + } + + // This assumes the device has been properly Get/Put:ed and thus is unmounted + if err := d.DeviceSet.DeleteDevice(id, false); err != nil { + return fmt.Errorf("failed to remove device %s: %v", id, err) + } + + mp := path.Join(d.home, "mnt", id) + if err := system.EnsureRemoveAll(mp); err != nil { + return err + } + + return nil +} + +// Get mounts a device with given id into the root filesystem +func (d *Driver) Get(id, mountLabel string) (string, error) { + d.locker.Lock(id) + defer d.locker.Unlock(id) + mp := path.Join(d.home, "mnt", id) + rootFs := path.Join(mp, "rootfs") + if count := d.ctr.Increment(mp); count > 1 { + return rootFs, nil + } + + uid, gid, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) + if err != nil { + d.ctr.Decrement(mp) + return "", err + } + + // Create the target directories if they don't exist + if err := idtools.MkdirAllAs(path.Join(d.home, "mnt"), 0755, uid, gid); err != nil && !os.IsExist(err) { + d.ctr.Decrement(mp) + return "", err + } + if err := idtools.MkdirAs(mp, 0755, uid, gid); err != nil && !os.IsExist(err) { + d.ctr.Decrement(mp) + return "", err + } + + // Mount the device + if err := d.DeviceSet.MountDevice(id, mp, mountLabel); err != nil { + d.ctr.Decrement(mp) + return "", err + } + + if err := idtools.MkdirAllAs(rootFs, 0755, uid, gid); err != nil && !os.IsExist(err) { + d.ctr.Decrement(mp) + d.DeviceSet.UnmountDevice(id, mp) + return "", err + } + + idFile := path.Join(mp, "id") + if _, err := os.Stat(idFile); err != nil && os.IsNotExist(err) { + // Create an "id" file with the container/image id in it to help reconstruct this in case + // of later problems + if err := ioutil.WriteFile(idFile, []byte(id), 0600); err != nil { + d.ctr.Decrement(mp) + d.DeviceSet.UnmountDevice(id, mp) + return "", err + } + } + + return rootFs, nil +} + +// Put unmounts a device and removes it. +func (d *Driver) Put(id string) error { + d.locker.Lock(id) + defer d.locker.Unlock(id) + mp := path.Join(d.home, "mnt", id) + if count := d.ctr.Decrement(mp); count > 0 { + return nil + } + err := d.DeviceSet.UnmountDevice(id, mp) + if err != nil { + logrus.Errorf("devmapper: Error unmounting device %s: %s", id, err) + } + return err +} + +// Exists checks to see if the device exists. +func (d *Driver) Exists(id string) bool { + return d.DeviceSet.HasDevice(id) +} diff --git a/vendor/github.com/moby/moby/daemon/graphdriver/devmapper/mount.go b/vendor/github.com/moby/moby/daemon/graphdriver/devmapper/mount.go new file mode 100644 index 000000000..cca1fe1b3 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/graphdriver/devmapper/mount.go @@ -0,0 +1,89 @@ +// +build linux + +package devmapper + +import ( + "bytes" + "fmt" + "os" + "path/filepath" + "syscall" +) + +// FIXME: this is copy-pasted from the aufs driver. +// It should be moved into the core. + +// Mounted returns true if a mount point exists. +func Mounted(mountpoint string) (bool, error) { + mntpoint, err := os.Stat(mountpoint) + if err != nil { + if os.IsNotExist(err) { + return false, nil + } + return false, err + } + parent, err := os.Stat(filepath.Join(mountpoint, "..")) + if err != nil { + return false, err + } + mntpointSt := mntpoint.Sys().(*syscall.Stat_t) + parentSt := parent.Sys().(*syscall.Stat_t) + return mntpointSt.Dev != parentSt.Dev, nil +} + +type probeData struct { + fsName string + magic string + offset uint64 +} + +// ProbeFsType returns the filesystem name for the given device id. +func ProbeFsType(device string) (string, error) { + probes := []probeData{ + {"btrfs", "_BHRfS_M", 0x10040}, + {"ext4", "\123\357", 0x438}, + {"xfs", "XFSB", 0}, + } + + maxLen := uint64(0) + for _, p := range probes { + l := p.offset + uint64(len(p.magic)) + if l > maxLen { + maxLen = l + } + } + + file, err := os.Open(device) + if err != nil { + return "", err + } + defer file.Close() + + buffer := make([]byte, maxLen) + l, err := file.Read(buffer) + if err != nil { + return "", err + } + + if uint64(l) != maxLen { + return "", fmt.Errorf("devmapper: unable to detect filesystem type of %s, short read", device) + } + + for _, p := range probes { + if bytes.Equal([]byte(p.magic), buffer[p.offset:p.offset+uint64(len(p.magic))]) { + return p.fsName, nil + } + } + + return "", fmt.Errorf("devmapper: Unknown filesystem type on %s", device) +} + +func joinMountOptions(a, b string) string { + if a == "" { + return b + } + if b == "" { + return a + } + return a + "," + b +} diff --git a/vendor/github.com/moby/moby/daemon/graphdriver/driver.go b/vendor/github.com/moby/moby/daemon/graphdriver/driver.go new file mode 100644 index 000000000..88f190d9e --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/graphdriver/driver.go @@ -0,0 +1,287 @@ +package graphdriver + +import ( + "errors" + "fmt" + "io" + "os" + "path/filepath" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/vbatts/tar-split/tar/storage" + + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/plugingetter" +) + +// FsMagic unsigned id of the filesystem in use. +type FsMagic uint32 + +const ( + // FsMagicUnsupported is a predefined constant value other than a valid filesystem id. + FsMagicUnsupported = FsMagic(0x00000000) +) + +var ( + // All registered drivers + drivers map[string]InitFunc + + // ErrNotSupported returned when driver is not supported. + ErrNotSupported = errors.New("driver not supported") + // ErrPrerequisites returned when driver does not meet prerequisites. + ErrPrerequisites = errors.New("prerequisites for driver not satisfied (wrong filesystem?)") + // ErrIncompatibleFS returned when file system is not supported. + ErrIncompatibleFS = fmt.Errorf("backing file system is unsupported for this graph driver") +) + +//CreateOpts contains optional arguments for Create() and CreateReadWrite() +// methods. +type CreateOpts struct { + MountLabel string + StorageOpt map[string]string +} + +// InitFunc initializes the storage driver. +type InitFunc func(root string, options []string, uidMaps, gidMaps []idtools.IDMap) (Driver, error) + +// ProtoDriver defines the basic capabilities of a driver. +// This interface exists solely to be a minimum set of methods +// for client code which choose not to implement the entire Driver +// interface and use the NaiveDiffDriver wrapper constructor. +// +// Use of ProtoDriver directly by client code is not recommended. +type ProtoDriver interface { + // String returns a string representation of this driver. + String() string + // CreateReadWrite creates a new, empty filesystem layer that is ready + // to be used as the storage for a container. Additional options can + // be passed in opts. parent may be "" and opts may be nil. + CreateReadWrite(id, parent string, opts *CreateOpts) error + // Create creates a new, empty, filesystem layer with the + // specified id and parent and options passed in opts. Parent + // may be "" and opts may be nil. + Create(id, parent string, opts *CreateOpts) error + // Remove attempts to remove the filesystem layer with this id. + Remove(id string) error + // Get returns the mountpoint for the layered filesystem referred + // to by this id. You can optionally specify a mountLabel or "". + // Returns the absolute path to the mounted layered filesystem. + Get(id, mountLabel string) (dir string, err error) + // Put releases the system resources for the specified id, + // e.g, unmounting layered filesystem. + Put(id string) error + // Exists returns whether a filesystem layer with the specified + // ID exists on this driver. + Exists(id string) bool + // Status returns a set of key-value pairs which give low + // level diagnostic status about this driver. + Status() [][2]string + // Returns a set of key-value pairs which give low level information + // about the image/container driver is managing. + GetMetadata(id string) (map[string]string, error) + // Cleanup performs necessary tasks to release resources + // held by the driver, e.g., unmounting all layered filesystems + // known to this driver. + Cleanup() error +} + +// DiffDriver is the interface to use to implement graph diffs +type DiffDriver interface { + // Diff produces an archive of the changes between the specified + // layer and its parent layer which may be "". + Diff(id, parent string) (io.ReadCloser, error) + // Changes produces a list of changes between the specified layer + // and its parent layer. If parent is "", then all changes will be ADD changes. + Changes(id, parent string) ([]archive.Change, error) + // ApplyDiff extracts the changeset from the given diff into the + // layer with the specified id and parent, returning the size of the + // new layer in bytes. + // The archive.Reader must be an uncompressed stream. + ApplyDiff(id, parent string, diff io.Reader) (size int64, err error) + // DiffSize calculates the changes between the specified id + // and its parent and returns the size in bytes of the changes + // relative to its base filesystem directory. + DiffSize(id, parent string) (size int64, err error) +} + +// Driver is the interface for layered/snapshot file system drivers. +type Driver interface { + ProtoDriver + DiffDriver +} + +// Capabilities defines a list of capabilities a driver may implement. +// These capabilities are not required; however, they do determine how a +// graphdriver can be used. +type Capabilities struct { + // Flags that this driver is capable of reproducing exactly equivalent + // diffs for read-only layers. If set, clients can rely on the driver + // for consistent tar streams, and avoid extra processing to account + // for potential differences (eg: the layer store's use of tar-split). + ReproducesExactDiffs bool +} + +// CapabilityDriver is the interface for layered file system drivers that +// can report on their Capabilities. +type CapabilityDriver interface { + Capabilities() Capabilities +} + +// DiffGetterDriver is the interface for layered file system drivers that +// provide a specialized function for getting file contents for tar-split. +type DiffGetterDriver interface { + Driver + // DiffGetter returns an interface to efficiently retrieve the contents + // of files in a layer. + DiffGetter(id string) (FileGetCloser, error) +} + +// FileGetCloser extends the storage.FileGetter interface with a Close method +// for cleaning up. +type FileGetCloser interface { + storage.FileGetter + // Close cleans up any resources associated with the FileGetCloser. + Close() error +} + +// Checker makes checks on specified filesystems. +type Checker interface { + // IsMounted returns true if the provided path is mounted for the specific checker + IsMounted(path string) bool +} + +func init() { + drivers = make(map[string]InitFunc) +} + +// Register registers an InitFunc for the driver. +func Register(name string, initFunc InitFunc) error { + if _, exists := drivers[name]; exists { + return fmt.Errorf("Name already registered %s", name) + } + drivers[name] = initFunc + + return nil +} + +// GetDriver initializes and returns the registered driver +func GetDriver(name string, pg plugingetter.PluginGetter, config Options) (Driver, error) { + if initFunc, exists := drivers[name]; exists { + return initFunc(filepath.Join(config.Root, name), config.DriverOptions, config.UIDMaps, config.GIDMaps) + } + + pluginDriver, err := lookupPlugin(name, pg, config) + if err == nil { + return pluginDriver, nil + } + logrus.WithError(err).WithField("driver", name).WithField("home-dir", config.Root).Error("Failed to GetDriver graph") + return nil, ErrNotSupported +} + +// getBuiltinDriver initializes and returns the registered driver, but does not try to load from plugins +func getBuiltinDriver(name, home string, options []string, uidMaps, gidMaps []idtools.IDMap) (Driver, error) { + if initFunc, exists := drivers[name]; exists { + return initFunc(filepath.Join(home, name), options, uidMaps, gidMaps) + } + logrus.Errorf("Failed to built-in GetDriver graph %s %s", name, home) + return nil, ErrNotSupported +} + +// Options is used to initialize a graphdriver +type Options struct { + Root string + DriverOptions []string + UIDMaps []idtools.IDMap + GIDMaps []idtools.IDMap + ExperimentalEnabled bool +} + +// New creates the driver and initializes it at the specified root. +func New(name string, pg plugingetter.PluginGetter, config Options) (Driver, error) { + if name != "" { + logrus.Debugf("[graphdriver] trying provided driver: %s", name) // so the logs show specified driver + return GetDriver(name, pg, config) + } + + // Guess for prior driver + driversMap := scanPriorDrivers(config.Root) + for _, name := range priority { + if name == "vfs" { + // don't use vfs even if there is state present. + continue + } + if _, prior := driversMap[name]; prior { + // of the state found from prior drivers, check in order of our priority + // which we would prefer + driver, err := getBuiltinDriver(name, config.Root, config.DriverOptions, config.UIDMaps, config.GIDMaps) + if err != nil { + // unlike below, we will return error here, because there is prior + // state, and now it is no longer supported/prereq/compatible, so + // something changed and needs attention. Otherwise the daemon's + // images would just "disappear". + logrus.Errorf("[graphdriver] prior storage driver %s failed: %s", name, err) + return nil, err + } + + // abort starting when there are other prior configured drivers + // to ensure the user explicitly selects the driver to load + if len(driversMap)-1 > 0 { + var driversSlice []string + for name := range driversMap { + driversSlice = append(driversSlice, name) + } + + return nil, fmt.Errorf("%s contains several valid graphdrivers: %s; Please cleanup or explicitly choose storage driver (-s )", config.Root, strings.Join(driversSlice, ", ")) + } + + logrus.Infof("[graphdriver] using prior storage driver: %s", name) + return driver, nil + } + } + + // Check for priority drivers first + for _, name := range priority { + driver, err := getBuiltinDriver(name, config.Root, config.DriverOptions, config.UIDMaps, config.GIDMaps) + if err != nil { + if isDriverNotSupported(err) { + continue + } + return nil, err + } + return driver, nil + } + + // Check all registered drivers if no priority driver is found + for name, initFunc := range drivers { + driver, err := initFunc(filepath.Join(config.Root, name), config.DriverOptions, config.UIDMaps, config.GIDMaps) + if err != nil { + if isDriverNotSupported(err) { + continue + } + return nil, err + } + return driver, nil + } + return nil, fmt.Errorf("No supported storage backend found") +} + +// isDriverNotSupported returns true if the error initializing +// the graph driver is a non-supported error. +func isDriverNotSupported(err error) bool { + return err == ErrNotSupported || err == ErrPrerequisites || err == ErrIncompatibleFS +} + +// scanPriorDrivers returns an un-ordered scan of directories of prior storage drivers +func scanPriorDrivers(root string) map[string]bool { + driversMap := make(map[string]bool) + + for driver := range drivers { + p := filepath.Join(root, driver) + if _, err := os.Stat(p); err == nil && driver != "vfs" { + driversMap[driver] = true + } + } + return driversMap +} diff --git a/vendor/github.com/moby/moby/daemon/graphdriver/driver_freebsd.go b/vendor/github.com/moby/moby/daemon/graphdriver/driver_freebsd.go new file mode 100644 index 000000000..fb13ac3d5 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/graphdriver/driver_freebsd.go @@ -0,0 +1,19 @@ +package graphdriver + +import "golang.org/x/sys/unix" + +var ( + // Slice of drivers that should be used in an order + priority = []string{ + "zfs", + } +) + +// Mounted checks if the given path is mounted as the fs type +func Mounted(fsType FsMagic, mountPath string) (bool, error) { + var buf unix.Statfs_t + if err := syscall.Statfs(mountPath, &buf); err != nil { + return false, err + } + return FsMagic(buf.Type) == fsType, nil +} diff --git a/vendor/github.com/moby/moby/daemon/graphdriver/driver_linux.go b/vendor/github.com/moby/moby/daemon/graphdriver/driver_linux.go new file mode 100644 index 000000000..a92993d45 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/graphdriver/driver_linux.go @@ -0,0 +1,135 @@ +// +build linux + +package graphdriver + +import ( + "path/filepath" + + "github.com/docker/docker/pkg/mount" + "golang.org/x/sys/unix" +) + +const ( + // FsMagicAufs filesystem id for Aufs + FsMagicAufs = FsMagic(0x61756673) + // FsMagicBtrfs filesystem id for Btrfs + FsMagicBtrfs = FsMagic(0x9123683E) + // FsMagicCramfs filesystem id for Cramfs + FsMagicCramfs = FsMagic(0x28cd3d45) + // FsMagicEcryptfs filesystem id for eCryptfs + FsMagicEcryptfs = FsMagic(0xf15f) + // FsMagicExtfs filesystem id for Extfs + FsMagicExtfs = FsMagic(0x0000EF53) + // FsMagicF2fs filesystem id for F2fs + FsMagicF2fs = FsMagic(0xF2F52010) + // FsMagicGPFS filesystem id for GPFS + FsMagicGPFS = FsMagic(0x47504653) + // FsMagicJffs2Fs filesystem if for Jffs2Fs + FsMagicJffs2Fs = FsMagic(0x000072b6) + // FsMagicJfs filesystem id for Jfs + FsMagicJfs = FsMagic(0x3153464a) + // FsMagicNfsFs filesystem id for NfsFs + FsMagicNfsFs = FsMagic(0x00006969) + // FsMagicRAMFs filesystem id for RamFs + FsMagicRAMFs = FsMagic(0x858458f6) + // FsMagicReiserFs filesystem id for ReiserFs + FsMagicReiserFs = FsMagic(0x52654973) + // FsMagicSmbFs filesystem id for SmbFs + FsMagicSmbFs = FsMagic(0x0000517B) + // FsMagicSquashFs filesystem id for SquashFs + FsMagicSquashFs = FsMagic(0x73717368) + // FsMagicTmpFs filesystem id for TmpFs + FsMagicTmpFs = FsMagic(0x01021994) + // FsMagicVxFS filesystem id for VxFs + FsMagicVxFS = FsMagic(0xa501fcf5) + // FsMagicXfs filesystem id for Xfs + FsMagicXfs = FsMagic(0x58465342) + // FsMagicZfs filesystem id for Zfs + FsMagicZfs = FsMagic(0x2fc12fc1) + // FsMagicOverlay filesystem id for overlay + FsMagicOverlay = FsMagic(0x794C7630) +) + +var ( + // Slice of drivers that should be used in an order + priority = []string{ + "aufs", + "btrfs", + "zfs", + "overlay2", + "overlay", + "devicemapper", + "vfs", + } + + // FsNames maps filesystem id to name of the filesystem. + FsNames = map[FsMagic]string{ + FsMagicAufs: "aufs", + FsMagicBtrfs: "btrfs", + FsMagicCramfs: "cramfs", + FsMagicExtfs: "extfs", + FsMagicF2fs: "f2fs", + FsMagicGPFS: "gpfs", + FsMagicJffs2Fs: "jffs2", + FsMagicJfs: "jfs", + FsMagicNfsFs: "nfs", + FsMagicOverlay: "overlayfs", + FsMagicRAMFs: "ramfs", + FsMagicReiserFs: "reiserfs", + FsMagicSmbFs: "smb", + FsMagicSquashFs: "squashfs", + FsMagicTmpFs: "tmpfs", + FsMagicUnsupported: "unsupported", + FsMagicVxFS: "vxfs", + FsMagicXfs: "xfs", + FsMagicZfs: "zfs", + } +) + +// GetFSMagic returns the filesystem id given the path. +func GetFSMagic(rootpath string) (FsMagic, error) { + var buf unix.Statfs_t + if err := unix.Statfs(filepath.Dir(rootpath), &buf); err != nil { + return 0, err + } + return FsMagic(buf.Type), nil +} + +// NewFsChecker returns a checker configured for the provided FsMagic +func NewFsChecker(t FsMagic) Checker { + return &fsChecker{ + t: t, + } +} + +type fsChecker struct { + t FsMagic +} + +func (c *fsChecker) IsMounted(path string) bool { + m, _ := Mounted(c.t, path) + return m +} + +// NewDefaultChecker returns a check that parses /proc/mountinfo to check +// if the specified path is mounted. +func NewDefaultChecker() Checker { + return &defaultChecker{} +} + +type defaultChecker struct { +} + +func (c *defaultChecker) IsMounted(path string) bool { + m, _ := mount.Mounted(path) + return m +} + +// Mounted checks if the given path is mounted as the fs type +func Mounted(fsType FsMagic, mountPath string) (bool, error) { + var buf unix.Statfs_t + if err := unix.Statfs(mountPath, &buf); err != nil { + return false, err + } + return FsMagic(buf.Type) == fsType, nil +} diff --git a/vendor/github.com/moby/moby/daemon/graphdriver/driver_solaris.go b/vendor/github.com/moby/moby/daemon/graphdriver/driver_solaris.go new file mode 100644 index 000000000..06dc360cf --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/graphdriver/driver_solaris.go @@ -0,0 +1,97 @@ +// +build solaris,cgo + +package graphdriver + +/* +#include +#include + +static inline struct statvfs *getstatfs(char *s) { + struct statvfs *buf; + int err; + buf = (struct statvfs *)malloc(sizeof(struct statvfs)); + err = statvfs(s, buf); + return buf; +} +*/ +import "C" +import ( + "path/filepath" + "unsafe" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/mount" +) + +const ( + // FsMagicZfs filesystem id for Zfs + FsMagicZfs = FsMagic(0x2fc12fc1) +) + +var ( + // Slice of drivers that should be used in an order + priority = []string{ + "zfs", + } + + // FsNames maps filesystem id to name of the filesystem. + FsNames = map[FsMagic]string{ + FsMagicZfs: "zfs", + } +) + +// GetFSMagic returns the filesystem id given the path. +func GetFSMagic(rootpath string) (FsMagic, error) { + return 0, nil +} + +type fsChecker struct { + t FsMagic +} + +func (c *fsChecker) IsMounted(path string) bool { + m, _ := Mounted(c.t, path) + return m +} + +// NewFsChecker returns a checker configured for the provided FsMagic +func NewFsChecker(t FsMagic) Checker { + return &fsChecker{ + t: t, + } +} + +// NewDefaultChecker returns a check that parses /proc/mountinfo to check +// if the specified path is mounted. +// No-op on Solaris. +func NewDefaultChecker() Checker { + return &defaultChecker{} +} + +type defaultChecker struct { +} + +func (c *defaultChecker) IsMounted(path string) bool { + m, _ := mount.Mounted(path) + return m +} + +// Mounted checks if the given path is mounted as the fs type +//Solaris supports only ZFS for now +func Mounted(fsType FsMagic, mountPath string) (bool, error) { + + cs := C.CString(filepath.Dir(mountPath)) + buf := C.getstatfs(cs) + + // on Solaris buf.f_basetype contains ['z', 'f', 's', 0 ... ] + if (buf.f_basetype[0] != 122) || (buf.f_basetype[1] != 102) || (buf.f_basetype[2] != 115) || + (buf.f_basetype[3] != 0) { + logrus.Debugf("[zfs] no zfs dataset found for rootdir '%s'", mountPath) + C.free(unsafe.Pointer(buf)) + return false, ErrPrerequisites + } + + C.free(unsafe.Pointer(buf)) + C.free(unsafe.Pointer(cs)) + return true, nil +} diff --git a/vendor/github.com/moby/moby/daemon/graphdriver/driver_unsupported.go b/vendor/github.com/moby/moby/daemon/graphdriver/driver_unsupported.go new file mode 100644 index 000000000..4a875608b --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/graphdriver/driver_unsupported.go @@ -0,0 +1,15 @@ +// +build !linux,!windows,!freebsd,!solaris + +package graphdriver + +var ( + // Slice of drivers that should be used in an order + priority = []string{ + "unsupported", + } +) + +// GetFSMagic returns the filesystem id given the path. +func GetFSMagic(rootpath string) (FsMagic, error) { + return FsMagicUnsupported, nil +} diff --git a/vendor/github.com/moby/moby/daemon/graphdriver/driver_windows.go b/vendor/github.com/moby/moby/daemon/graphdriver/driver_windows.go new file mode 100644 index 000000000..ffd30c295 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/graphdriver/driver_windows.go @@ -0,0 +1,14 @@ +package graphdriver + +var ( + // Slice of drivers that should be used in order + priority = []string{ + "windowsfilter", + } +) + +// GetFSMagic returns the filesystem id given the path. +func GetFSMagic(rootpath string) (FsMagic, error) { + // Note it is OK to return FsMagicUnsupported on Windows. + return FsMagicUnsupported, nil +} diff --git a/vendor/github.com/moby/moby/daemon/graphdriver/fsdiff.go b/vendor/github.com/moby/moby/daemon/graphdriver/fsdiff.go new file mode 100644 index 000000000..20826cd7d --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/graphdriver/fsdiff.go @@ -0,0 +1,169 @@ +package graphdriver + +import ( + "io" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/chrootarchive" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/ioutils" +) + +var ( + // ApplyUncompressedLayer defines the unpack method used by the graph + // driver. + ApplyUncompressedLayer = chrootarchive.ApplyUncompressedLayer +) + +// NaiveDiffDriver takes a ProtoDriver and adds the +// capability of the Diffing methods which it may or may not +// support on its own. See the comment on the exported +// NewNaiveDiffDriver function below. +// Notably, the AUFS driver doesn't need to be wrapped like this. +type NaiveDiffDriver struct { + ProtoDriver + uidMaps []idtools.IDMap + gidMaps []idtools.IDMap +} + +// NewNaiveDiffDriver returns a fully functional driver that wraps the +// given ProtoDriver and adds the capability of the following methods which +// it may or may not support on its own: +// Diff(id, parent string) (archive.Archive, error) +// Changes(id, parent string) ([]archive.Change, error) +// ApplyDiff(id, parent string, diff archive.Reader) (size int64, err error) +// DiffSize(id, parent string) (size int64, err error) +func NewNaiveDiffDriver(driver ProtoDriver, uidMaps, gidMaps []idtools.IDMap) Driver { + return &NaiveDiffDriver{ProtoDriver: driver, + uidMaps: uidMaps, + gidMaps: gidMaps} +} + +// Diff produces an archive of the changes between the specified +// layer and its parent layer which may be "". +func (gdw *NaiveDiffDriver) Diff(id, parent string) (arch io.ReadCloser, err error) { + startTime := time.Now() + driver := gdw.ProtoDriver + + layerFs, err := driver.Get(id, "") + if err != nil { + return nil, err + } + + defer func() { + if err != nil { + driver.Put(id) + } + }() + + if parent == "" { + archive, err := archive.Tar(layerFs, archive.Uncompressed) + if err != nil { + return nil, err + } + return ioutils.NewReadCloserWrapper(archive, func() error { + err := archive.Close() + driver.Put(id) + return err + }), nil + } + + parentFs, err := driver.Get(parent, "") + if err != nil { + return nil, err + } + defer driver.Put(parent) + + changes, err := archive.ChangesDirs(layerFs, parentFs) + if err != nil { + return nil, err + } + + archive, err := archive.ExportChanges(layerFs, changes, gdw.uidMaps, gdw.gidMaps) + if err != nil { + return nil, err + } + + return ioutils.NewReadCloserWrapper(archive, func() error { + err := archive.Close() + driver.Put(id) + + // NaiveDiffDriver compares file metadata with parent layers. Parent layers + // are extracted from tar's with full second precision on modified time. + // We need this hack here to make sure calls within same second receive + // correct result. + time.Sleep(startTime.Truncate(time.Second).Add(time.Second).Sub(time.Now())) + return err + }), nil +} + +// Changes produces a list of changes between the specified layer +// and its parent layer. If parent is "", then all changes will be ADD changes. +func (gdw *NaiveDiffDriver) Changes(id, parent string) ([]archive.Change, error) { + driver := gdw.ProtoDriver + + layerFs, err := driver.Get(id, "") + if err != nil { + return nil, err + } + defer driver.Put(id) + + parentFs := "" + + if parent != "" { + parentFs, err = driver.Get(parent, "") + if err != nil { + return nil, err + } + defer driver.Put(parent) + } + + return archive.ChangesDirs(layerFs, parentFs) +} + +// ApplyDiff extracts the changeset from the given diff into the +// layer with the specified id and parent, returning the size of the +// new layer in bytes. +func (gdw *NaiveDiffDriver) ApplyDiff(id, parent string, diff io.Reader) (size int64, err error) { + driver := gdw.ProtoDriver + + // Mount the root filesystem so we can apply the diff/layer. + layerFs, err := driver.Get(id, "") + if err != nil { + return + } + defer driver.Put(id) + + options := &archive.TarOptions{UIDMaps: gdw.uidMaps, + GIDMaps: gdw.gidMaps} + start := time.Now().UTC() + logrus.Debug("Start untar layer") + if size, err = ApplyUncompressedLayer(layerFs, diff, options); err != nil { + return + } + logrus.Debugf("Untar time: %vs", time.Now().UTC().Sub(start).Seconds()) + + return +} + +// DiffSize calculates the changes between the specified layer +// and its parent and returns the size in bytes of the changes +// relative to its base filesystem directory. +func (gdw *NaiveDiffDriver) DiffSize(id, parent string) (size int64, err error) { + driver := gdw.ProtoDriver + + changes, err := gdw.Changes(id, parent) + if err != nil { + return + } + + layerFs, err := driver.Get(id, "") + if err != nil { + return + } + defer driver.Put(id) + + return archive.ChangesSize(layerFs, changes), nil +} diff --git a/vendor/github.com/moby/moby/daemon/graphdriver/graphtest/graphbench_unix.go b/vendor/github.com/moby/moby/daemon/graphdriver/graphtest/graphbench_unix.go new file mode 100644 index 000000000..def822b9a --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/graphdriver/graphtest/graphbench_unix.go @@ -0,0 +1,259 @@ +// +build linux freebsd + +package graphtest + +import ( + "bytes" + "io" + "io/ioutil" + "path/filepath" + "testing" + + "github.com/docker/docker/pkg/stringid" +) + +// DriverBenchExists benchmarks calls to exist +func DriverBenchExists(b *testing.B, drivername string, driveroptions ...string) { + driver := GetDriver(b, drivername, driveroptions...) + defer PutDriver(b) + + base := stringid.GenerateRandomID() + + if err := driver.Create(base, "", nil); err != nil { + b.Fatal(err) + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + if !driver.Exists(base) { + b.Fatal("Newly created image doesn't exist") + } + } +} + +// DriverBenchGetEmpty benchmarks calls to get on an empty layer +func DriverBenchGetEmpty(b *testing.B, drivername string, driveroptions ...string) { + driver := GetDriver(b, drivername, driveroptions...) + defer PutDriver(b) + + base := stringid.GenerateRandomID() + + if err := driver.Create(base, "", nil); err != nil { + b.Fatal(err) + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := driver.Get(base, "") + b.StopTimer() + if err != nil { + b.Fatalf("Error getting mount: %s", err) + } + if err := driver.Put(base); err != nil { + b.Fatalf("Error putting mount: %s", err) + } + b.StartTimer() + } +} + +// DriverBenchDiffBase benchmarks calls to diff on a root layer +func DriverBenchDiffBase(b *testing.B, drivername string, driveroptions ...string) { + driver := GetDriver(b, drivername, driveroptions...) + defer PutDriver(b) + + base := stringid.GenerateRandomID() + if err := driver.Create(base, "", nil); err != nil { + b.Fatal(err) + } + + if err := addFiles(driver, base, 3); err != nil { + b.Fatal(err) + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + arch, err := driver.Diff(base, "") + if err != nil { + b.Fatal(err) + } + _, err = io.Copy(ioutil.Discard, arch) + if err != nil { + b.Fatalf("Error copying archive: %s", err) + } + arch.Close() + } +} + +// DriverBenchDiffN benchmarks calls to diff on two layers with +// a provided number of files on the lower and upper layers. +func DriverBenchDiffN(b *testing.B, bottom, top int, drivername string, driveroptions ...string) { + driver := GetDriver(b, drivername, driveroptions...) + defer PutDriver(b) + base := stringid.GenerateRandomID() + upper := stringid.GenerateRandomID() + if err := driver.Create(base, "", nil); err != nil { + b.Fatal(err) + } + + if err := addManyFiles(driver, base, bottom, 3); err != nil { + b.Fatal(err) + } + + if err := driver.Create(upper, base, nil); err != nil { + b.Fatal(err) + } + + if err := addManyFiles(driver, upper, top, 6); err != nil { + b.Fatal(err) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + arch, err := driver.Diff(upper, "") + if err != nil { + b.Fatal(err) + } + _, err = io.Copy(ioutil.Discard, arch) + if err != nil { + b.Fatalf("Error copying archive: %s", err) + } + arch.Close() + } +} + +// DriverBenchDiffApplyN benchmarks calls to diff and apply together +func DriverBenchDiffApplyN(b *testing.B, fileCount int, drivername string, driveroptions ...string) { + driver := GetDriver(b, drivername, driveroptions...) + defer PutDriver(b) + base := stringid.GenerateRandomID() + upper := stringid.GenerateRandomID() + if err := driver.Create(base, "", nil); err != nil { + b.Fatal(err) + } + + if err := addManyFiles(driver, base, fileCount, 3); err != nil { + b.Fatal(err) + } + + if err := driver.Create(upper, base, nil); err != nil { + b.Fatal(err) + } + + if err := addManyFiles(driver, upper, fileCount, 6); err != nil { + b.Fatal(err) + } + diffSize, err := driver.DiffSize(upper, "") + if err != nil { + b.Fatal(err) + } + b.ResetTimer() + b.StopTimer() + for i := 0; i < b.N; i++ { + diff := stringid.GenerateRandomID() + if err := driver.Create(diff, base, nil); err != nil { + b.Fatal(err) + } + + if err := checkManyFiles(driver, diff, fileCount, 3); err != nil { + b.Fatal(err) + } + + b.StartTimer() + + arch, err := driver.Diff(upper, "") + if err != nil { + b.Fatal(err) + } + + applyDiffSize, err := driver.ApplyDiff(diff, "", arch) + if err != nil { + b.Fatal(err) + } + + b.StopTimer() + arch.Close() + + if applyDiffSize != diffSize { + // TODO: enforce this + //b.Fatalf("Apply diff size different, got %d, expected %s", applyDiffSize, diffSize) + } + if err := checkManyFiles(driver, diff, fileCount, 6); err != nil { + b.Fatal(err) + } + } +} + +// DriverBenchDeepLayerDiff benchmarks calls to diff on top of a given number of layers. +func DriverBenchDeepLayerDiff(b *testing.B, layerCount int, drivername string, driveroptions ...string) { + driver := GetDriver(b, drivername, driveroptions...) + defer PutDriver(b) + + base := stringid.GenerateRandomID() + if err := driver.Create(base, "", nil); err != nil { + b.Fatal(err) + } + + if err := addFiles(driver, base, 50); err != nil { + b.Fatal(err) + } + + topLayer, err := addManyLayers(driver, base, layerCount) + if err != nil { + b.Fatal(err) + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + arch, err := driver.Diff(topLayer, "") + if err != nil { + b.Fatal(err) + } + _, err = io.Copy(ioutil.Discard, arch) + if err != nil { + b.Fatalf("Error copying archive: %s", err) + } + arch.Close() + } +} + +// DriverBenchDeepLayerRead benchmarks calls to read a file under a given number of layers. +func DriverBenchDeepLayerRead(b *testing.B, layerCount int, drivername string, driveroptions ...string) { + driver := GetDriver(b, drivername, driveroptions...) + defer PutDriver(b) + + base := stringid.GenerateRandomID() + if err := driver.Create(base, "", nil); err != nil { + b.Fatal(err) + } + + content := []byte("test content") + if err := addFile(driver, base, "testfile.txt", content); err != nil { + b.Fatal(err) + } + + topLayer, err := addManyLayers(driver, base, layerCount) + if err != nil { + b.Fatal(err) + } + + root, err := driver.Get(topLayer, "") + if err != nil { + b.Fatal(err) + } + defer driver.Put(topLayer) + + b.ResetTimer() + for i := 0; i < b.N; i++ { + + // Read content + c, err := ioutil.ReadFile(filepath.Join(root, "testfile.txt")) + if err != nil { + b.Fatal(err) + } + + b.StopTimer() + if bytes.Compare(c, content) != 0 { + b.Fatalf("Wrong content in file %v, expected %v", c, content) + } + b.StartTimer() + } +} diff --git a/vendor/github.com/moby/moby/daemon/graphdriver/graphtest/graphtest_unix.go b/vendor/github.com/moby/moby/daemon/graphdriver/graphtest/graphtest_unix.go new file mode 100644 index 000000000..2f8ae5477 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/graphdriver/graphtest/graphtest_unix.go @@ -0,0 +1,336 @@ +// +build linux freebsd solaris + +package graphtest + +import ( + "bytes" + "io/ioutil" + "math/rand" + "os" + "path" + "reflect" + "testing" + "unsafe" + + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/go-units" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "golang.org/x/sys/unix" +) + +var ( + drv *Driver +) + +// Driver conforms to graphdriver.Driver interface and +// contains information such as root and reference count of the number of clients using it. +// This helps in testing drivers added into the framework. +type Driver struct { + graphdriver.Driver + root string + refCount int +} + +func newDriver(t testing.TB, name string, options []string) *Driver { + root, err := ioutil.TempDir("", "docker-graphtest-") + require.NoError(t, err) + + require.NoError(t, os.MkdirAll(root, 0755)) + d, err := graphdriver.GetDriver(name, nil, graphdriver.Options{DriverOptions: options, Root: root}) + if err != nil { + t.Logf("graphdriver: %v\n", err) + if err == graphdriver.ErrNotSupported || err == graphdriver.ErrPrerequisites || err == graphdriver.ErrIncompatibleFS { + t.Skipf("Driver %s not supported", name) + } + t.Fatal(err) + } + return &Driver{d, root, 1} +} + +func cleanup(t testing.TB, d *Driver) { + if err := drv.Cleanup(); err != nil { + t.Fatal(err) + } + os.RemoveAll(d.root) +} + +// GetDriver create a new driver with given name or return an existing driver with the name updating the reference count. +func GetDriver(t testing.TB, name string, options ...string) graphdriver.Driver { + if drv == nil { + drv = newDriver(t, name, options) + } else { + drv.refCount++ + } + return drv +} + +// PutDriver removes the driver if it is no longer used and updates the reference count. +func PutDriver(t testing.TB) { + if drv == nil { + t.Skip("No driver to put!") + } + drv.refCount-- + if drv.refCount == 0 { + cleanup(t, drv) + drv = nil + } +} + +// DriverTestCreateEmpty creates a new image and verifies it is empty and the right metadata +func DriverTestCreateEmpty(t testing.TB, drivername string, driverOptions ...string) { + driver := GetDriver(t, drivername, driverOptions...) + defer PutDriver(t) + + err := driver.Create("empty", "", nil) + require.NoError(t, err) + + defer func() { + require.NoError(t, driver.Remove("empty")) + }() + + if !driver.Exists("empty") { + t.Fatal("Newly created image doesn't exist") + } + + dir, err := driver.Get("empty", "") + require.NoError(t, err) + + verifyFile(t, dir, 0755|os.ModeDir, 0, 0) + + // Verify that the directory is empty + fis, err := readDir(dir) + require.NoError(t, err) + assert.Len(t, fis, 0) + + driver.Put("empty") +} + +// DriverTestCreateBase create a base driver and verify. +func DriverTestCreateBase(t testing.TB, drivername string, driverOptions ...string) { + driver := GetDriver(t, drivername, driverOptions...) + defer PutDriver(t) + + createBase(t, driver, "Base") + defer func() { + require.NoError(t, driver.Remove("Base")) + }() + verifyBase(t, driver, "Base") +} + +// DriverTestCreateSnap Create a driver and snap and verify. +func DriverTestCreateSnap(t testing.TB, drivername string, driverOptions ...string) { + driver := GetDriver(t, drivername, driverOptions...) + defer PutDriver(t) + + createBase(t, driver, "Base") + defer func() { + require.NoError(t, driver.Remove("Base")) + }() + + err := driver.Create("Snap", "Base", nil) + require.NoError(t, err) + defer func() { + require.NoError(t, driver.Remove("Snap")) + }() + + verifyBase(t, driver, "Snap") +} + +// DriverTestDeepLayerRead reads a file from a lower layer under a given number of layers +func DriverTestDeepLayerRead(t testing.TB, layerCount int, drivername string, driverOptions ...string) { + driver := GetDriver(t, drivername, driverOptions...) + defer PutDriver(t) + + base := stringid.GenerateRandomID() + if err := driver.Create(base, "", nil); err != nil { + t.Fatal(err) + } + + content := []byte("test content") + if err := addFile(driver, base, "testfile.txt", content); err != nil { + t.Fatal(err) + } + + topLayer, err := addManyLayers(driver, base, layerCount) + if err != nil { + t.Fatal(err) + } + + err = checkManyLayers(driver, topLayer, layerCount) + if err != nil { + t.Fatal(err) + } + + if err := checkFile(driver, topLayer, "testfile.txt", content); err != nil { + t.Fatal(err) + } +} + +// DriverTestDiffApply tests diffing and applying produces the same layer +func DriverTestDiffApply(t testing.TB, fileCount int, drivername string, driverOptions ...string) { + driver := GetDriver(t, drivername, driverOptions...) + defer PutDriver(t) + base := stringid.GenerateRandomID() + upper := stringid.GenerateRandomID() + deleteFile := "file-remove.txt" + deleteFileContent := []byte("This file should get removed in upper!") + deleteDir := "var/lib" + + if err := driver.Create(base, "", nil); err != nil { + t.Fatal(err) + } + + if err := addManyFiles(driver, base, fileCount, 3); err != nil { + t.Fatal(err) + } + + if err := addFile(driver, base, deleteFile, deleteFileContent); err != nil { + t.Fatal(err) + } + + if err := addDirectory(driver, base, deleteDir); err != nil { + t.Fatal(err) + } + + if err := driver.Create(upper, base, nil); err != nil { + t.Fatal(err) + } + + if err := addManyFiles(driver, upper, fileCount, 6); err != nil { + t.Fatal(err) + } + + if err := removeAll(driver, upper, deleteFile, deleteDir); err != nil { + t.Fatal(err) + } + + diffSize, err := driver.DiffSize(upper, "") + if err != nil { + t.Fatal(err) + } + + diff := stringid.GenerateRandomID() + if err := driver.Create(diff, base, nil); err != nil { + t.Fatal(err) + } + + if err := checkManyFiles(driver, diff, fileCount, 3); err != nil { + t.Fatal(err) + } + + if err := checkFile(driver, diff, deleteFile, deleteFileContent); err != nil { + t.Fatal(err) + } + + arch, err := driver.Diff(upper, base) + if err != nil { + t.Fatal(err) + } + + buf := bytes.NewBuffer(nil) + if _, err := buf.ReadFrom(arch); err != nil { + t.Fatal(err) + } + if err := arch.Close(); err != nil { + t.Fatal(err) + } + + applyDiffSize, err := driver.ApplyDiff(diff, base, bytes.NewReader(buf.Bytes())) + if err != nil { + t.Fatal(err) + } + + if applyDiffSize != diffSize { + t.Fatalf("Apply diff size different, got %d, expected %d", applyDiffSize, diffSize) + } + + if err := checkManyFiles(driver, diff, fileCount, 6); err != nil { + t.Fatal(err) + } + + if err := checkFileRemoved(driver, diff, deleteFile); err != nil { + t.Fatal(err) + } + + if err := checkFileRemoved(driver, diff, deleteDir); err != nil { + t.Fatal(err) + } +} + +// DriverTestChanges tests computed changes on a layer matches changes made +func DriverTestChanges(t testing.TB, drivername string, driverOptions ...string) { + driver := GetDriver(t, drivername, driverOptions...) + defer PutDriver(t) + base := stringid.GenerateRandomID() + upper := stringid.GenerateRandomID() + if err := driver.Create(base, "", nil); err != nil { + t.Fatal(err) + } + + if err := addManyFiles(driver, base, 20, 3); err != nil { + t.Fatal(err) + } + + if err := driver.Create(upper, base, nil); err != nil { + t.Fatal(err) + } + + expectedChanges, err := changeManyFiles(driver, upper, 20, 6) + if err != nil { + t.Fatal(err) + } + + changes, err := driver.Changes(upper, base) + if err != nil { + t.Fatal(err) + } + + if err = checkChanges(expectedChanges, changes); err != nil { + t.Fatal(err) + } +} + +func writeRandomFile(path string, size uint64) error { + buf := make([]int64, size/8) + + r := rand.NewSource(0) + for i := range buf { + buf[i] = r.Int63() + } + + // Cast to []byte + header := *(*reflect.SliceHeader)(unsafe.Pointer(&buf)) + header.Len *= 8 + header.Cap *= 8 + data := *(*[]byte)(unsafe.Pointer(&header)) + + return ioutil.WriteFile(path, data, 0700) +} + +// DriverTestSetQuota Create a driver and test setting quota. +func DriverTestSetQuota(t *testing.T, drivername string) { + driver := GetDriver(t, drivername) + defer PutDriver(t) + + createBase(t, driver, "Base") + createOpts := &graphdriver.CreateOpts{} + createOpts.StorageOpt = make(map[string]string, 1) + createOpts.StorageOpt["size"] = "50M" + if err := driver.Create("zfsTest", "Base", createOpts); err != nil { + t.Fatal(err) + } + + mountPath, err := driver.Get("zfsTest", "") + if err != nil { + t.Fatal(err) + } + + quota := uint64(50 * units.MiB) + err = writeRandomFile(path.Join(mountPath, "file"), quota*2) + if pathError, ok := err.(*os.PathError); ok && pathError.Err != unix.EDQUOT { + t.Fatalf("expect write() to fail with %v, got %v", unix.EDQUOT, err) + } + +} diff --git a/vendor/github.com/moby/moby/daemon/graphdriver/graphtest/graphtest_windows.go b/vendor/github.com/moby/moby/daemon/graphdriver/graphtest/graphtest_windows.go new file mode 100644 index 000000000..a50c5211e --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/graphdriver/graphtest/graphtest_windows.go @@ -0,0 +1 @@ +package graphtest diff --git a/vendor/github.com/moby/moby/daemon/graphdriver/graphtest/testutil.go b/vendor/github.com/moby/moby/daemon/graphdriver/graphtest/testutil.go new file mode 100644 index 000000000..35bf6d17b --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/graphdriver/graphtest/testutil.go @@ -0,0 +1,342 @@ +package graphtest + +import ( + "bytes" + "fmt" + "io/ioutil" + "math/rand" + "os" + "path" + "sort" + + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/stringid" +) + +func randomContent(size int, seed int64) []byte { + s := rand.NewSource(seed) + content := make([]byte, size) + + for i := 0; i < len(content); i += 7 { + val := s.Int63() + for j := 0; i+j < len(content) && j < 7; j++ { + content[i+j] = byte(val) + val >>= 8 + } + } + + return content +} + +func addFiles(drv graphdriver.Driver, layer string, seed int64) error { + root, err := drv.Get(layer, "") + if err != nil { + return err + } + defer drv.Put(layer) + + if err := ioutil.WriteFile(path.Join(root, "file-a"), randomContent(64, seed), 0755); err != nil { + return err + } + if err := os.MkdirAll(path.Join(root, "dir-b"), 0755); err != nil { + return err + } + if err := ioutil.WriteFile(path.Join(root, "dir-b", "file-b"), randomContent(128, seed+1), 0755); err != nil { + return err + } + + return ioutil.WriteFile(path.Join(root, "file-c"), randomContent(128*128, seed+2), 0755) +} + +func checkFile(drv graphdriver.Driver, layer, filename string, content []byte) error { + root, err := drv.Get(layer, "") + if err != nil { + return err + } + defer drv.Put(layer) + + fileContent, err := ioutil.ReadFile(path.Join(root, filename)) + if err != nil { + return err + } + + if bytes.Compare(fileContent, content) != 0 { + return fmt.Errorf("mismatched file content %v, expecting %v", fileContent, content) + } + + return nil +} + +func addFile(drv graphdriver.Driver, layer, filename string, content []byte) error { + root, err := drv.Get(layer, "") + if err != nil { + return err + } + defer drv.Put(layer) + + return ioutil.WriteFile(path.Join(root, filename), content, 0755) +} + +func addDirectory(drv graphdriver.Driver, layer, dir string) error { + root, err := drv.Get(layer, "") + if err != nil { + return err + } + defer drv.Put(layer) + + return os.MkdirAll(path.Join(root, dir), 0755) +} + +func removeAll(drv graphdriver.Driver, layer string, names ...string) error { + root, err := drv.Get(layer, "") + if err != nil { + return err + } + defer drv.Put(layer) + + for _, filename := range names { + if err := os.RemoveAll(path.Join(root, filename)); err != nil { + return err + } + } + return nil +} + +func checkFileRemoved(drv graphdriver.Driver, layer, filename string) error { + root, err := drv.Get(layer, "") + if err != nil { + return err + } + defer drv.Put(layer) + + if _, err := os.Stat(path.Join(root, filename)); err == nil { + return fmt.Errorf("file still exists: %s", path.Join(root, filename)) + } else if !os.IsNotExist(err) { + return err + } + + return nil +} + +func addManyFiles(drv graphdriver.Driver, layer string, count int, seed int64) error { + root, err := drv.Get(layer, "") + if err != nil { + return err + } + defer drv.Put(layer) + + for i := 0; i < count; i += 100 { + dir := path.Join(root, fmt.Sprintf("directory-%d", i)) + if err := os.MkdirAll(dir, 0755); err != nil { + return err + } + for j := 0; i+j < count && j < 100; j++ { + file := path.Join(dir, fmt.Sprintf("file-%d", i+j)) + if err := ioutil.WriteFile(file, randomContent(64, seed+int64(i+j)), 0755); err != nil { + return err + } + } + } + + return nil +} + +func changeManyFiles(drv graphdriver.Driver, layer string, count int, seed int64) ([]archive.Change, error) { + root, err := drv.Get(layer, "") + if err != nil { + return nil, err + } + defer drv.Put(layer) + + changes := []archive.Change{} + for i := 0; i < count; i += 100 { + archiveRoot := fmt.Sprintf("/directory-%d", i) + if err := os.MkdirAll(path.Join(root, archiveRoot), 0755); err != nil { + return nil, err + } + for j := 0; i+j < count && j < 100; j++ { + if j == 0 { + changes = append(changes, archive.Change{ + Path: archiveRoot, + Kind: archive.ChangeModify, + }) + } + var change archive.Change + switch j % 3 { + // Update file + case 0: + change.Path = path.Join(archiveRoot, fmt.Sprintf("file-%d", i+j)) + change.Kind = archive.ChangeModify + if err := ioutil.WriteFile(path.Join(root, change.Path), randomContent(64, seed+int64(i+j)), 0755); err != nil { + return nil, err + } + // Add file + case 1: + change.Path = path.Join(archiveRoot, fmt.Sprintf("file-%d-%d", seed, i+j)) + change.Kind = archive.ChangeAdd + if err := ioutil.WriteFile(path.Join(root, change.Path), randomContent(64, seed+int64(i+j)), 0755); err != nil { + return nil, err + } + // Remove file + case 2: + change.Path = path.Join(archiveRoot, fmt.Sprintf("file-%d", i+j)) + change.Kind = archive.ChangeDelete + if err := os.Remove(path.Join(root, change.Path)); err != nil { + return nil, err + } + } + changes = append(changes, change) + } + } + + return changes, nil +} + +func checkManyFiles(drv graphdriver.Driver, layer string, count int, seed int64) error { + root, err := drv.Get(layer, "") + if err != nil { + return err + } + defer drv.Put(layer) + + for i := 0; i < count; i += 100 { + dir := path.Join(root, fmt.Sprintf("directory-%d", i)) + for j := 0; i+j < count && j < 100; j++ { + file := path.Join(dir, fmt.Sprintf("file-%d", i+j)) + fileContent, err := ioutil.ReadFile(file) + if err != nil { + return err + } + + content := randomContent(64, seed+int64(i+j)) + + if bytes.Compare(fileContent, content) != 0 { + return fmt.Errorf("mismatched file content %v, expecting %v", fileContent, content) + } + } + } + + return nil +} + +type changeList []archive.Change + +func (c changeList) Less(i, j int) bool { + if c[i].Path == c[j].Path { + return c[i].Kind < c[j].Kind + } + return c[i].Path < c[j].Path +} +func (c changeList) Len() int { return len(c) } +func (c changeList) Swap(i, j int) { c[j], c[i] = c[i], c[j] } + +func checkChanges(expected, actual []archive.Change) error { + if len(expected) != len(actual) { + return fmt.Errorf("unexpected number of changes, expected %d, got %d", len(expected), len(actual)) + } + sort.Sort(changeList(expected)) + sort.Sort(changeList(actual)) + + for i := range expected { + if expected[i] != actual[i] { + return fmt.Errorf("unexpected change, expecting %v, got %v", expected[i], actual[i]) + } + } + + return nil +} + +func addLayerFiles(drv graphdriver.Driver, layer, parent string, i int) error { + root, err := drv.Get(layer, "") + if err != nil { + return err + } + defer drv.Put(layer) + + if err := ioutil.WriteFile(path.Join(root, "top-id"), []byte(layer), 0755); err != nil { + return err + } + layerDir := path.Join(root, fmt.Sprintf("layer-%d", i)) + if err := os.MkdirAll(layerDir, 0755); err != nil { + return err + } + if err := ioutil.WriteFile(path.Join(layerDir, "layer-id"), []byte(layer), 0755); err != nil { + return err + } + if err := ioutil.WriteFile(path.Join(layerDir, "parent-id"), []byte(parent), 0755); err != nil { + return err + } + + return nil +} + +func addManyLayers(drv graphdriver.Driver, baseLayer string, count int) (string, error) { + lastLayer := baseLayer + for i := 1; i <= count; i++ { + nextLayer := stringid.GenerateRandomID() + if err := drv.Create(nextLayer, lastLayer, nil); err != nil { + return "", err + } + if err := addLayerFiles(drv, nextLayer, lastLayer, i); err != nil { + return "", err + } + + lastLayer = nextLayer + + } + return lastLayer, nil +} + +func checkManyLayers(drv graphdriver.Driver, layer string, count int) error { + root, err := drv.Get(layer, "") + if err != nil { + return err + } + defer drv.Put(layer) + + layerIDBytes, err := ioutil.ReadFile(path.Join(root, "top-id")) + if err != nil { + return err + } + + if bytes.Compare(layerIDBytes, []byte(layer)) != 0 { + return fmt.Errorf("mismatched file content %v, expecting %v", layerIDBytes, []byte(layer)) + } + + for i := count; i > 0; i-- { + layerDir := path.Join(root, fmt.Sprintf("layer-%d", i)) + + thisLayerIDBytes, err := ioutil.ReadFile(path.Join(layerDir, "layer-id")) + if err != nil { + return err + } + if bytes.Compare(thisLayerIDBytes, layerIDBytes) != 0 { + return fmt.Errorf("mismatched file content %v, expecting %v", thisLayerIDBytes, layerIDBytes) + } + layerIDBytes, err = ioutil.ReadFile(path.Join(layerDir, "parent-id")) + if err != nil { + return err + } + } + return nil +} + +// readDir reads a directory just like ioutil.ReadDir() +// then hides specific files (currently "lost+found") +// so the tests don't "see" it +func readDir(dir string) ([]os.FileInfo, error) { + a, err := ioutil.ReadDir(dir) + if err != nil { + return nil, err + } + + b := a[:0] + for _, x := range a { + if x.Name() != "lost+found" { // ext4 always have this dir + b = append(b, x) + } + } + + return b, nil +} diff --git a/vendor/github.com/moby/moby/daemon/graphdriver/graphtest/testutil_unix.go b/vendor/github.com/moby/moby/daemon/graphdriver/graphtest/testutil_unix.go new file mode 100644 index 000000000..96474487a --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/graphdriver/graphtest/testutil_unix.go @@ -0,0 +1,70 @@ +// +build linux freebsd + +package graphtest + +import ( + "io/ioutil" + "os" + "path" + "syscall" + "testing" + + "github.com/docker/docker/daemon/graphdriver" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "golang.org/x/sys/unix" +) + +func verifyFile(t testing.TB, path string, mode os.FileMode, uid, gid uint32) { + fi, err := os.Stat(path) + require.NoError(t, err) + + actual := fi.Mode() + assert.Equal(t, mode&os.ModeType, actual&os.ModeType, path) + assert.Equal(t, mode&os.ModePerm, actual&os.ModePerm, path) + assert.Equal(t, mode&os.ModeSticky, actual&os.ModeSticky, path) + assert.Equal(t, mode&os.ModeSetuid, actual&os.ModeSetuid, path) + assert.Equal(t, mode&os.ModeSetgid, actual&os.ModeSetgid, path) + + if stat, ok := fi.Sys().(*syscall.Stat_t); ok { + assert.Equal(t, uid, stat.Uid, path) + assert.Equal(t, gid, stat.Gid, path) + } +} + +func createBase(t testing.TB, driver graphdriver.Driver, name string) { + // We need to be able to set any perms + oldmask := unix.Umask(0) + defer unix.Umask(oldmask) + + err := driver.CreateReadWrite(name, "", nil) + require.NoError(t, err) + + dir, err := driver.Get(name, "") + require.NoError(t, err) + defer driver.Put(name) + + subdir := path.Join(dir, "a subdir") + require.NoError(t, os.Mkdir(subdir, 0705|os.ModeSticky)) + require.NoError(t, os.Chown(subdir, 1, 2)) + + file := path.Join(dir, "a file") + err = ioutil.WriteFile(file, []byte("Some data"), 0222|os.ModeSetuid) + require.NoError(t, err) +} + +func verifyBase(t testing.TB, driver graphdriver.Driver, name string) { + dir, err := driver.Get(name, "") + require.NoError(t, err) + defer driver.Put(name) + + subdir := path.Join(dir, "a subdir") + verifyFile(t, subdir, 0705|os.ModeDir|os.ModeSticky, 1, 2) + + file := path.Join(dir, "a file") + verifyFile(t, file, 0222|os.ModeSetuid, 0, 0) + + files, err := readDir(dir) + require.NoError(t, err) + assert.Len(t, files, 2) +} diff --git a/vendor/github.com/moby/moby/daemon/graphdriver/lcow/lcow.go b/vendor/github.com/moby/moby/daemon/graphdriver/lcow/lcow.go new file mode 100644 index 000000000..75c775bb5 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/graphdriver/lcow/lcow.go @@ -0,0 +1,929 @@ +// +build windows + +// Maintainer: jhowardmsft +// Locale: en-gb +// About: Graph-driver for Linux Containers On Windows (LCOW) +// +// This graphdriver runs in two modes. Yet to be determined which one will +// be the shipping mode. The global mode is where a single utility VM +// is used for all service VM tool operations. This isn't safe security-wise +// as it's attaching a sandbox of multiple containers to it, containing +// untrusted data. This may be fine for client devops scenarios. In +// safe mode, a unique utility VM is instantiated for all service VM tool +// operations. The downside of safe-mode is that operations are slower as +// a new service utility VM has to be started and torn-down when needed. +// +// To enable global mode, run with --storage-opt lcow.globalmode=true + +// TODO: Grab logs from SVM at terminate or errors + +package lcow + +import ( + "encoding/json" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "strconv" + "strings" + "sync" + "time" + + "github.com/Microsoft/hcsshim" + "github.com/Sirupsen/logrus" + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/system" + "github.com/jhowardmsft/opengcs/gogcs/client" +) + +// init registers this driver to the register. It gets initialised by the +// function passed in the second parameter, implemented in this file. +func init() { + graphdriver.Register("lcow", InitDriver) +} + +const ( + // sandboxFilename is the name of the file containing a layer's sandbox (read-write layer). + sandboxFilename = "sandbox.vhdx" + + // scratchFilename is the name of the scratch-space used by an SVM to avoid running out of memory. + scratchFilename = "scratch.vhdx" + + // layerFilename is the name of the file containing a layer's read-only contents. + // Note this really is VHD format, not VHDX. + layerFilename = "layer.vhd" + + // toolsScratchPath is a location in a service utility VM that the tools can use as a + // scratch space to avoid running out of memory. + // TODO @jhowardmsft. I really dislike this path! But needs a platform change or passing parameters to the tools. + toolsScratchPath = "/mnt/gcs/LinuxServiceVM/scratch" + + // svmGlobalID is the ID used in the serviceVMs map for the global service VM when running in "global" mode. + svmGlobalID = "_lcow_global_svm_" + + // cacheDirectory is the sub-folder under the driver's data-root used to cache blank sandbox and scratch VHDs. + cacheDirectory = "cache" + + // scratchDirectory is the sub-folder under the driver's data-root used for scratch VHDs in service VMs + scratchDirectory = "scratch" +) + +// cacheItem is our internal structure representing an item in our local cache +// of things that have been mounted. +type cacheItem struct { + sync.Mutex // Protects operations performed on this item + uvmPath string // Path in utility VM + hostPath string // Path on host + refCount int // How many times its been mounted + isSandbox bool // True if a sandbox + isMounted bool // True when mounted in a service VM +} + +// serviceVMItem is our internal structure representing an item in our +// map of service VMs we are maintaining. +type serviceVMItem struct { + sync.Mutex // Serialises operations being performed in this service VM. + scratchAttached bool // Has a scratch been attached? + config *client.Config // Represents the service VM item. +} + +// Driver represents an LCOW graph driver. +type Driver struct { + dataRoot string // Root path on the host where we are storing everything. + cachedSandboxFile string // Location of the local default-sized cached sandbox. + cachedSandboxMutex sync.Mutex // Protects race conditions from multiple threads creating the cached sandbox. + cachedScratchFile string // Location of the local cached empty scratch space. + cachedScratchMutex sync.Mutex // Protects race conditions from multiple threads creating the cached scratch. + options []string // Graphdriver options we are initialised with. + serviceVmsMutex sync.Mutex // Protects add/updates/delete to the serviceVMs map. + serviceVms map[string]*serviceVMItem // Map of the configs representing the service VM(s) we are running. + globalMode bool // Indicates if running in an unsafe/global service VM mode. + + // NOTE: It is OK to use a cache here because Windows does not support + // restoring containers when the daemon dies. + + cacheMutex sync.Mutex // Protects add/update/deletes to cache. + cache map[string]*cacheItem // Map holding a cache of all the IDs we've mounted/unmounted. +} + +// deletefiles is a helper function for initialisation where we delete any +// left-over scratch files in case we were previously forcibly terminated. +func deletefiles(path string, f os.FileInfo, err error) error { + if strings.HasSuffix(f.Name(), ".vhdx") { + logrus.Warnf("lcowdriver: init: deleting stale scratch file %s", path) + return os.Remove(path) + } + return nil +} + +// InitDriver returns a new LCOW storage driver. +func InitDriver(dataRoot string, options []string, _, _ []idtools.IDMap) (graphdriver.Driver, error) { + title := "lcowdriver: init:" + + cd := filepath.Join(dataRoot, cacheDirectory) + sd := filepath.Join(dataRoot, scratchDirectory) + + d := &Driver{ + dataRoot: dataRoot, + options: options, + cachedSandboxFile: filepath.Join(cd, sandboxFilename), + cachedScratchFile: filepath.Join(cd, scratchFilename), + cache: make(map[string]*cacheItem), + serviceVms: make(map[string]*serviceVMItem), + globalMode: false, + } + + // Looks for relevant options + for _, v := range options { + opt := strings.SplitN(v, "=", 2) + if len(opt) == 2 { + switch strings.ToLower(opt[0]) { + case "lcow.globalmode": + var err error + d.globalMode, err = strconv.ParseBool(opt[1]) + if err != nil { + return nil, fmt.Errorf("%s failed to parse value for 'lcow.globalmode' - must be 'true' or 'false'", title) + } + break + } + } + } + + // Make sure the dataRoot directory is created + if err := idtools.MkdirAllAs(dataRoot, 0700, 0, 0); err != nil { + return nil, fmt.Errorf("%s failed to create '%s': %v", title, dataRoot, err) + } + + // Make sure the cache directory is created under dataRoot + if err := idtools.MkdirAllAs(cd, 0700, 0, 0); err != nil { + return nil, fmt.Errorf("%s failed to create '%s': %v", title, cd, err) + } + + // Make sure the scratch directory is created under dataRoot + if err := idtools.MkdirAllAs(sd, 0700, 0, 0); err != nil { + return nil, fmt.Errorf("%s failed to create '%s': %v", title, sd, err) + } + + // Delete any items in the scratch directory + filepath.Walk(sd, deletefiles) + + logrus.Infof("%s dataRoot: %s globalMode: %t", title, dataRoot, d.globalMode) + + return d, nil +} + +// startServiceVMIfNotRunning starts a service utility VM if it is not currently running. +// It can optionally be started with a mapped virtual disk. Returns a opengcs config structure +// representing the VM. +func (d *Driver) startServiceVMIfNotRunning(id string, mvdToAdd *hcsshim.MappedVirtualDisk, context string) (*serviceVMItem, error) { + // Use the global ID if in global mode + if d.globalMode { + id = svmGlobalID + } + + title := fmt.Sprintf("lcowdriver: startservicevmifnotrunning %s:", id) + + // Make sure thread-safe when interrogating the map + logrus.Debugf("%s taking serviceVmsMutex", title) + d.serviceVmsMutex.Lock() + + // Nothing to do if it's already running except add the mapped drive if supplied. + if svm, ok := d.serviceVms[id]; ok { + logrus.Debugf("%s exists, releasing serviceVmsMutex", title) + d.serviceVmsMutex.Unlock() + + if mvdToAdd != nil { + logrus.Debugf("hot-adding %s to %s", mvdToAdd.HostPath, mvdToAdd.ContainerPath) + + // Ensure the item is locked while doing this + logrus.Debugf("%s locking serviceVmItem %s", title, svm.config.Name) + svm.Lock() + + if err := svm.config.HotAddVhd(mvdToAdd.HostPath, mvdToAdd.ContainerPath); err != nil { + logrus.Debugf("%s releasing serviceVmItem %s on hot-add failure %s", title, svm.config.Name, err) + svm.Unlock() + return nil, fmt.Errorf("%s hot add %s to %s failed: %s", title, mvdToAdd.HostPath, mvdToAdd.ContainerPath, err) + } + + logrus.Debugf("%s releasing serviceVmItem %s", title, svm.config.Name) + svm.Unlock() + } + return svm, nil + } + + // Release the lock early + logrus.Debugf("%s releasing serviceVmsMutex", title) + d.serviceVmsMutex.Unlock() + + // So we are starting one. First need an enpty structure. + svm := &serviceVMItem{ + config: &client.Config{}, + } + + // Generate a default configuration + if err := svm.config.GenerateDefault(d.options); err != nil { + return nil, fmt.Errorf("%s failed to generate default gogcs configuration for global svm (%s): %s", title, context, err) + } + + // For the name, we deliberately suffix if safe-mode to ensure that it doesn't + // clash with another utility VM which may be running for the container itself. + // This also makes it easier to correlate through Get-ComputeProcess. + if id == svmGlobalID { + svm.config.Name = svmGlobalID + } else { + svm.config.Name = fmt.Sprintf("%s_svm", id) + } + + // Ensure we take the cached scratch mutex around the check to ensure the file is complete + // and not in the process of being created by another thread. + scratchTargetFile := filepath.Join(d.dataRoot, scratchDirectory, fmt.Sprintf("%s.vhdx", id)) + + logrus.Debugf("%s locking cachedScratchMutex", title) + d.cachedScratchMutex.Lock() + if _, err := os.Stat(d.cachedScratchFile); err == nil { + // Make a copy of cached scratch to the scratch directory + logrus.Debugf("lcowdriver: startServiceVmIfNotRunning: (%s) cloning cached scratch for mvd", context) + if err := client.CopyFile(d.cachedScratchFile, scratchTargetFile, true); err != nil { + logrus.Debugf("%s releasing cachedScratchMutex on err: %s", title, err) + d.cachedScratchMutex.Unlock() + return nil, err + } + + // Add the cached clone as a mapped virtual disk + logrus.Debugf("lcowdriver: startServiceVmIfNotRunning: (%s) adding cloned scratch as mvd", context) + mvd := hcsshim.MappedVirtualDisk{ + HostPath: scratchTargetFile, + ContainerPath: toolsScratchPath, + CreateInUtilityVM: true, + } + svm.config.MappedVirtualDisks = append(svm.config.MappedVirtualDisks, mvd) + svm.scratchAttached = true + } + logrus.Debugf("%s releasing cachedScratchMutex", title) + d.cachedScratchMutex.Unlock() + + // If requested to start it with a mapped virtual disk, add it now. + if mvdToAdd != nil { + svm.config.MappedVirtualDisks = append(svm.config.MappedVirtualDisks, *mvdToAdd) + } + + // Start it. + logrus.Debugf("lcowdriver: startServiceVmIfNotRunning: (%s) starting %s", context, svm.config.Name) + if err := svm.config.Create(); err != nil { + return nil, fmt.Errorf("failed to start service utility VM (%s): %s", context, err) + } + + // As it's now running, add it to the map, checking for a race where another + // thread has simultaneously tried to start it. + logrus.Debugf("%s locking serviceVmsMutex for insertion", title) + d.serviceVmsMutex.Lock() + if svm, ok := d.serviceVms[id]; ok { + logrus.Debugf("%s releasing serviceVmsMutex after insertion but exists", title) + d.serviceVmsMutex.Unlock() + return svm, nil + } + d.serviceVms[id] = svm + logrus.Debugf("%s releasing serviceVmsMutex after insertion", title) + d.serviceVmsMutex.Unlock() + + // Now we have a running service VM, we can create the cached scratch file if it doesn't exist. + logrus.Debugf("%s locking cachedScratchMutex", title) + d.cachedScratchMutex.Lock() + if _, err := os.Stat(d.cachedScratchFile); err != nil { + // TODO: Not a typo, but needs fixing when the platform sandbox stuff has been sorted out. + logrus.Debugf("%s (%s): creating an SVM scratch - locking serviceVM", title, context) + svm.Lock() + if err := svm.config.CreateSandbox(d.cachedScratchFile, client.DefaultSandboxSizeMB, d.cachedSandboxFile); err != nil { + logrus.Debugf("%s (%s): releasing serviceVM on error path", title, context) + svm.Unlock() + logrus.Debugf("%s (%s): releasing cachedScratchMutex on error path", title, context) + d.cachedScratchMutex.Unlock() + // TODO: NEED TO REMOVE FROM MAP HERE AND STOP IT + return nil, fmt.Errorf("failed to create SVM scratch VHDX (%s): %s", context, err) + } + logrus.Debugf("%s (%s): releasing serviceVM on error path", title, context) + svm.Unlock() + } + logrus.Debugf("%s (%s): releasing cachedScratchMutex", title, context) + d.cachedScratchMutex.Unlock() + + // Hot-add the scratch-space if not already attached + if !svm.scratchAttached { + // Make a copy of it to the layer directory + logrus.Debugf("lcowdriver: startServiceVmIfNotRunning: (%s) cloning cached scratch for hot-add", context) + if err := client.CopyFile(d.cachedScratchFile, scratchTargetFile, true); err != nil { + // TODO: NEED TO REMOVE FROM MAP HERE AND STOP IT + return nil, err + } + + logrus.Debugf("lcowdriver: startServiceVmIfNotRunning: (%s) hot-adding scratch %s - locking serviceVM", context, scratchTargetFile) + svm.Lock() + if err := svm.config.HotAddVhd(scratchTargetFile, toolsScratchPath); err != nil { + logrus.Debugf("%s (%s): releasing serviceVM on error path", title, context) + svm.Unlock() + // TODOL NEED TO REMOVE FROM MAP HERE AND STOP IT + return nil, fmt.Errorf("failed to hot-add %s failed: %s", scratchTargetFile, err) + } + logrus.Debugf("%s (%s): releasing serviceVM", title, context) + svm.Unlock() + } + + logrus.Debugf("lcowdriver: startServiceVmIfNotRunning: (%s) success", context) + return svm, nil +} + +// getServiceVM returns the appropriate service utility VM instance, optionally +// deleting it from the map (but not the global one) +func (d *Driver) getServiceVM(id string, deleteFromMap bool) (*serviceVMItem, error) { + logrus.Debugf("lcowdriver: getservicevm:locking serviceVmsMutex") + d.serviceVmsMutex.Lock() + defer func() { + logrus.Debugf("lcowdriver: getservicevm:releasing serviceVmsMutex") + d.serviceVmsMutex.Unlock() + }() + if d.globalMode { + id = svmGlobalID + } + if _, ok := d.serviceVms[id]; !ok { + return nil, fmt.Errorf("getservicevm for %s failed as not found", id) + } + svm := d.serviceVms[id] + if deleteFromMap && id != svmGlobalID { + logrus.Debugf("lcowdriver: getservicevm: removing %s from map", id) + delete(d.serviceVms, id) + } + return svm, nil +} + +// terminateServiceVM terminates a service utility VM if its running, but does nothing +// when in global mode as it's lifetime is limited to that of the daemon. +func (d *Driver) terminateServiceVM(id, context string, force bool) error { + + // We don't do anything in safe mode unless the force flag has been passed, which + // is only the case for cleanup at driver termination. + if d.globalMode { + if !force { + logrus.Debugf("lcowdriver: terminateservicevm: %s (%s) - doing nothing as in global mode", id, context) + return nil + } + id = svmGlobalID + } + + // Get the service VM and delete it from the map + svm, err := d.getServiceVM(id, true) + if err != nil { + return err + } + + // We run the deletion of the scratch as a deferred function to at least attempt + // clean-up in case of errors. + defer func() { + if svm.scratchAttached { + scratchTargetFile := filepath.Join(d.dataRoot, scratchDirectory, fmt.Sprintf("%s.vhdx", id)) + logrus.Debugf("lcowdriver: terminateservicevm: %s (%s) - deleting scratch %s", id, context, scratchTargetFile) + if err := os.Remove(scratchTargetFile); err != nil { + logrus.Warnf("failed to remove scratch file %s (%s): %s", scratchTargetFile, context, err) + } + } + }() + + // Nothing to do if it's not running + if svm.config.Uvm != nil { + logrus.Debugf("lcowdriver: terminateservicevm: %s (%s) - calling terminate", id, context) + if err := svm.config.Uvm.Terminate(); err != nil { + return fmt.Errorf("failed to terminate utility VM (%s): %s", context, err) + } + + logrus.Debugf("lcowdriver: terminateservicevm: %s (%s) - waiting for utility VM to terminate", id, context) + if err := svm.config.Uvm.WaitTimeout(time.Duration(svm.config.UvmTimeoutSeconds) * time.Second); err != nil { + return fmt.Errorf("failed waiting for utility VM to terminate (%s): %s", context, err) + } + } + + logrus.Debugf("lcowdriver: terminateservicevm: %s (%s) - success", id, context) + return nil +} + +// String returns the string representation of a driver. This should match +// the name the graph driver has been registered with. +func (d *Driver) String() string { + return "lcow" +} + +// Status returns the status of the driver. +func (d *Driver) Status() [][2]string { + return [][2]string{ + {"LCOW", ""}, + // TODO: Add some more info here - mode, home, .... + } +} + +// Exists returns true if the given id is registered with this driver. +func (d *Driver) Exists(id string) bool { + _, err := os.Lstat(d.dir(id)) + logrus.Debugf("lcowdriver: exists: id %s %t", id, err == nil) + return err == nil +} + +// CreateReadWrite creates a layer that is writable for use as a container +// file system. That equates to creating a sandbox. +func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error { + title := fmt.Sprintf("lcowdriver: createreadwrite: id %s", id) + logrus.Debugf(title) + + // First we need to create the folder + if err := d.Create(id, parent, opts); err != nil { + return err + } + + // Massive perf optimisation here. If we know that the RW layer is the default size, + // and that the cached sandbox already exists, and we are running in safe mode, we + // can just do a simple copy into the layers sandbox file without needing to start a + // unique service VM. For a global service VM, it doesn't really matter. + // + // TODO: @jhowardmsft Where are we going to get the required size from? + // We need to look at the CreateOpts for that, I think.... + + // Make sure we have the sandbox mutex taken while we are examining it. + logrus.Debugf("%s: locking cachedSandboxMutex", title) + d.cachedSandboxMutex.Lock() + _, err := os.Stat(d.cachedSandboxFile) + logrus.Debugf("%s: releasing cachedSandboxMutex", title) + d.cachedSandboxMutex.Unlock() + if err == nil { + logrus.Debugf("%s: using cached sandbox to populate", title) + if err := client.CopyFile(d.cachedSandboxFile, filepath.Join(d.dir(id), sandboxFilename), true); err != nil { + return err + } + return nil + } + + logrus.Debugf("%s: creating SVM to create sandbox", title) + svm, err := d.startServiceVMIfNotRunning(id, nil, "createreadwrite") + if err != nil { + return err + } + defer d.terminateServiceVM(id, "createreadwrite", false) + + // So the cached sandbox needs creating. Ensure we are the only thread creating it. + logrus.Debugf("%s: locking cachedSandboxMutex for creation", title) + d.cachedSandboxMutex.Lock() + defer func() { + logrus.Debugf("%s: releasing cachedSandboxMutex for creation", title) + d.cachedSandboxMutex.Unlock() + }() + + // Synchronise the operation in the service VM. + logrus.Debugf("%s: locking svm for sandbox creation", title) + svm.Lock() + defer func() { + logrus.Debugf("%s: releasing svm for sandbox creation", title) + svm.Unlock() + }() + if err := svm.config.CreateSandbox(filepath.Join(d.dir(id), sandboxFilename), client.DefaultSandboxSizeMB, d.cachedSandboxFile); err != nil { + return err + } + + return nil +} + +// Create creates the folder for the layer with the given id, and +// adds it to the layer chain. +func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error { + logrus.Debugf("lcowdriver: create: id %s parent: %s", id, parent) + + parentChain, err := d.getLayerChain(parent) + if err != nil { + return err + } + + var layerChain []string + if parent != "" { + if !d.Exists(parent) { + return fmt.Errorf("lcowdriver: cannot create layer folder with missing parent %s", parent) + } + layerChain = []string{d.dir(parent)} + } + layerChain = append(layerChain, parentChain...) + + // Make sure layers are created with the correct ACL so that VMs can access them. + layerPath := d.dir(id) + logrus.Debugf("lcowdriver: create: id %s: creating %s", id, layerPath) + if err := system.MkdirAllWithACL(layerPath, 755, system.SddlNtvmAdministratorsLocalSystem); err != nil { + return err + } + + if err := d.setLayerChain(id, layerChain); err != nil { + if err2 := os.RemoveAll(layerPath); err2 != nil { + logrus.Warnf("failed to remove layer %s: %s", layerPath, err2) + } + return err + } + logrus.Debugf("lcowdriver: create: id %s: success", id) + + return nil +} + +// Remove unmounts and removes the dir information. +func (d *Driver) Remove(id string) error { + logrus.Debugf("lcowdriver: remove: id %s", id) + tmpID := fmt.Sprintf("%s-removing", id) + tmpLayerPath := d.dir(tmpID) + layerPath := d.dir(id) + + logrus.Debugf("lcowdriver: remove: id %s: layerPath %s", id, layerPath) + if err := os.Rename(layerPath, tmpLayerPath); err != nil && !os.IsNotExist(err) { + return err + } + + if err := os.RemoveAll(tmpLayerPath); err != nil { + return err + } + + logrus.Debugf("lcowdriver: remove: id %s: layerPath %s succeeded", id, layerPath) + return nil +} + +// Get returns the rootfs path for the id. It is reference counted and +// effectively can be thought of as a "mount the layer into the utility +// vm if it isn't already". The contract from the caller of this is that +// all Gets and Puts are matched. It -should- be the case that on cleanup, +// nothing is mounted. +// +// For optimisation, we don't actually mount the filesystem (which in our +// case means [hot-]adding it to a service VM. But we track that and defer +// the actual adding to the point we need to access it. +func (d *Driver) Get(id, mountLabel string) (string, error) { + title := fmt.Sprintf("lcowdriver: get: %s", id) + logrus.Debugf(title) + + // Work out what we are working on + vhdFilename, vhdSize, isSandbox, err := getLayerDetails(d.dir(id)) + if err != nil { + logrus.Debugf("%s failed to get layer details from %s: %s", title, d.dir(id), err) + return "", fmt.Errorf("%s failed to open layer or sandbox VHD to open in %s: %s", title, d.dir(id), err) + } + logrus.Debugf("%s %s, size %d, isSandbox %t", title, vhdFilename, vhdSize, isSandbox) + + // Add item to cache, or update existing item, but ensure we have the + // lock while updating items. + logrus.Debugf("%s: locking cacheMutex", title) + d.cacheMutex.Lock() + var cacheEntry *cacheItem + if entry, ok := d.cache[id]; !ok { + // The item is not currently in the cache. + cacheEntry = &cacheItem{ + refCount: 1, + isSandbox: isSandbox, + hostPath: vhdFilename, + uvmPath: fmt.Sprintf("/mnt/%s", id), + isMounted: false, // we defer this as an optimisation + } + d.cache[id] = cacheEntry + logrus.Debugf("%s: added cache entry %+v", title, cacheEntry) + } else { + // Increment the reference counter in the cache. + logrus.Debugf("%s: locking cache item for increment", title) + entry.Lock() + entry.refCount++ + logrus.Debugf("%s: releasing cache item for increment", title) + entry.Unlock() + logrus.Debugf("%s: incremented refcount on cache entry %+v", title, cacheEntry) + } + logrus.Debugf("%s: releasing cacheMutex", title) + d.cacheMutex.Unlock() + + logrus.Debugf("%s %s success. %s: %+v: size %d", title, id, d.dir(id), cacheEntry, vhdSize) + return d.dir(id), nil +} + +// Put does the reverse of get. If there are no more references to +// the layer, it unmounts it from the utility VM. +func (d *Driver) Put(id string) error { + title := fmt.Sprintf("lcowdriver: put: %s", id) + + logrus.Debugf("%s: locking cacheMutex", title) + d.cacheMutex.Lock() + entry, ok := d.cache[id] + if !ok { + logrus.Debugf("%s: releasing cacheMutex on error path", title) + d.cacheMutex.Unlock() + return fmt.Errorf("%s possible ref-count error, or invalid id was passed to the graphdriver. Cannot handle id %s as it's not in the cache", title, id) + } + + // Are we just decrementing the reference count? + logrus.Debugf("%s: locking cache item for possible decrement", title) + entry.Lock() + if entry.refCount > 1 { + entry.refCount-- + logrus.Debugf("%s: releasing cache item for decrement and early get-out as refCount is now %d", title, entry.refCount) + entry.Unlock() + logrus.Debugf("%s: refCount decremented to %d. Releasing cacheMutex", title, entry.refCount) + d.cacheMutex.Unlock() + return nil + } + logrus.Debugf("%s: releasing cache item", title) + entry.Unlock() + logrus.Debugf("%s: releasing cacheMutex. Ref count has dropped to zero", title) + d.cacheMutex.Unlock() + + // To reach this point, the reference count has dropped to zero. If we have + // done a mount and we are in global mode, then remove it. We don't + // need to remove in safe mode as the service VM is going to be torn down + // anyway. + + if d.globalMode { + logrus.Debugf("%s: locking cache item at zero ref-count", title) + entry.Lock() + defer func() { + logrus.Debugf("%s: releasing cache item at zero ref-count", title) + entry.Unlock() + }() + if entry.isMounted { + svm, err := d.getServiceVM(id, false) + if err != nil { + return err + } + + logrus.Debugf("%s: Hot-Removing %s. Locking svm", title, entry.hostPath) + svm.Lock() + if err := svm.config.HotRemoveVhd(entry.hostPath); err != nil { + logrus.Debugf("%s: releasing svm on error path", title) + svm.Unlock() + return fmt.Errorf("%s failed to hot-remove %s from global service utility VM: %s", title, entry.hostPath, err) + } + logrus.Debugf("%s: releasing svm", title) + svm.Unlock() + } + } + + // Remove from the cache map. + logrus.Debugf("%s: Locking cacheMutex to delete item from cache", title) + d.cacheMutex.Lock() + delete(d.cache, id) + logrus.Debugf("%s: releasing cacheMutex after item deleted from cache", title) + d.cacheMutex.Unlock() + + logrus.Debugf("%s %s: refCount 0. %s (%s) completed successfully", title, id, entry.hostPath, entry.uvmPath) + return nil +} + +// Cleanup ensures the information the driver stores is properly removed. +// We use this opportunity to cleanup any -removing folders which may be +// still left if the daemon was killed while it was removing a layer. +func (d *Driver) Cleanup() error { + title := "lcowdriver: cleanup" + + d.cacheMutex.Lock() + for k, v := range d.cache { + logrus.Debugf("%s cache entry: %s: %+v", title, k, v) + if v.refCount > 0 { + logrus.Warnf("%s leaked %s: %+v", title, k, v) + } + } + d.cacheMutex.Unlock() + + items, err := ioutil.ReadDir(d.dataRoot) + if err != nil { + if os.IsNotExist(err) { + return nil + } + return err + } + + // Note we don't return an error below - it's possible the files + // are locked. However, next time around after the daemon exits, + // we likely will be able to to cleanup successfully. Instead we log + // warnings if there are errors. + for _, item := range items { + if item.IsDir() && strings.HasSuffix(item.Name(), "-removing") { + if err := os.RemoveAll(filepath.Join(d.dataRoot, item.Name())); err != nil { + logrus.Warnf("%s failed to cleanup %s: %s", title, item.Name(), err) + } else { + logrus.Infof("%s cleaned up %s", title, item.Name()) + } + } + } + + // Cleanup any service VMs we have running, along with their scratch spaces. + // We don't take the lock for this as it's taken in terminateServiceVm. + for k, v := range d.serviceVms { + logrus.Debugf("%s svm entry: %s: %+v", title, k, v) + d.terminateServiceVM(k, "cleanup", true) + } + + return nil +} + +// Diff takes a layer (and it's parent layer which may be null, but +// is ignored by this implementation below) and returns a reader for +// a tarstream representing the layers contents. The id could be +// a read-only "layer.vhd" or a read-write "sandbox.vhdx". The semantics +// of this function dictate that the layer is already mounted. +// However, as we do lazy mounting as a performance optimisation, +// this will likely not be the case. +func (d *Driver) Diff(id, parent string) (io.ReadCloser, error) { + title := fmt.Sprintf("lcowdriver: diff: %s", id) + + logrus.Debugf("%s: locking cacheMutex", title) + d.cacheMutex.Lock() + if _, ok := d.cache[id]; !ok { + logrus.Debugf("%s: releasing cacheMutex on error path", title) + d.cacheMutex.Unlock() + return nil, fmt.Errorf("%s fail as %s is not in the cache", title, id) + } + cacheEntry := d.cache[id] + logrus.Debugf("%s: releasing cacheMutex", title) + d.cacheMutex.Unlock() + + // Stat to get size + logrus.Debugf("%s: locking cacheEntry", title) + cacheEntry.Lock() + fileInfo, err := os.Stat(cacheEntry.hostPath) + if err != nil { + logrus.Debugf("%s: releasing cacheEntry on error path", title) + cacheEntry.Unlock() + return nil, fmt.Errorf("%s failed to stat %s: %s", title, cacheEntry.hostPath, err) + } + logrus.Debugf("%s: releasing cacheEntry", title) + cacheEntry.Unlock() + + // Start the SVM with a mapped virtual disk. Note that if the SVM is + // already runing and we are in global mode, this will be + // hot-added. + mvd := &hcsshim.MappedVirtualDisk{ + HostPath: cacheEntry.hostPath, + ContainerPath: cacheEntry.uvmPath, + CreateInUtilityVM: true, + ReadOnly: true, + } + + logrus.Debugf("%s: starting service VM", title) + svm, err := d.startServiceVMIfNotRunning(id, mvd, fmt.Sprintf("diff %s", id)) + if err != nil { + return nil, err + } + + // Set `isMounted` for the cache entry. Note that we re-scan the cache + // at this point as it's possible the cacheEntry changed during the long- + // running operation above when we weren't holding the cacheMutex lock. + logrus.Debugf("%s: locking cacheMutex for updating isMounted", title) + d.cacheMutex.Lock() + if _, ok := d.cache[id]; !ok { + logrus.Debugf("%s: releasing cacheMutex on error path of isMounted", title) + d.cacheMutex.Unlock() + d.terminateServiceVM(id, fmt.Sprintf("diff %s", id), false) + return nil, fmt.Errorf("%s fail as %s is not in the cache when updating isMounted", title, id) + } + cacheEntry = d.cache[id] + logrus.Debugf("%s: locking cacheEntry for updating isMounted", title) + cacheEntry.Lock() + cacheEntry.isMounted = true + logrus.Debugf("%s: releasing cacheEntry for updating isMounted", title) + cacheEntry.Unlock() + logrus.Debugf("%s: releasing cacheMutex for updating isMounted", title) + d.cacheMutex.Unlock() + + // Obtain the tar stream for it + logrus.Debugf("%s %s, size %d, isSandbox %t", title, cacheEntry.hostPath, fileInfo.Size(), cacheEntry.isSandbox) + tarReadCloser, err := svm.config.VhdToTar(cacheEntry.hostPath, cacheEntry.uvmPath, cacheEntry.isSandbox, fileInfo.Size()) + if err != nil { + d.terminateServiceVM(id, fmt.Sprintf("diff %s", id), false) + return nil, fmt.Errorf("%s failed to export layer to tar stream for id: %s, parent: %s : %s", title, id, parent, err) + } + + logrus.Debugf("%s id %s parent %s completed successfully", title, id, parent) + + // In safe/non-global mode, we can't tear down the service VM until things have been read. + if !d.globalMode { + return ioutils.NewReadCloserWrapper(tarReadCloser, func() error { + tarReadCloser.Close() + d.terminateServiceVM(id, fmt.Sprintf("diff %s", id), false) + return nil + }), nil + } + return tarReadCloser, nil +} + +// ApplyDiff extracts the changeset from the given diff into the +// layer with the specified id and parent, returning the size of the +// new layer in bytes. The layer should not be mounted when calling +// this function. Another way of describing this is that ApplyDiff writes +// to a new layer (a VHD in LCOW) the contents of a tarstream it's given. +func (d *Driver) ApplyDiff(id, parent string, diff io.Reader) (int64, error) { + logrus.Debugf("lcowdriver: applydiff: id %s", id) + + svm, err := d.startServiceVMIfNotRunning(id, nil, fmt.Sprintf("applydiff %s", id)) + if err != nil { + return 0, err + } + defer d.terminateServiceVM(id, fmt.Sprintf("applydiff %s", id), false) + + // TODO @jhowardmsft - the retries are temporary to overcome platform reliablity issues. + // Obviously this will be removed as platform bugs are fixed. + retries := 0 + for { + retries++ + size, err := svm.config.TarToVhd(filepath.Join(d.dataRoot, id, layerFilename), diff) + if err != nil { + if retries <= 10 { + continue + } + return 0, err + } + return size, err + } +} + +// Changes produces a list of changes between the specified layer +// and its parent layer. If parent is "", then all changes will be ADD changes. +// The layer should not be mounted when calling this function. +func (d *Driver) Changes(id, parent string) ([]archive.Change, error) { + logrus.Debugf("lcowdriver: changes: id %s parent %s", id, parent) + // TODO @gupta-ak. Needs implementation with assistance from service VM + return nil, nil +} + +// DiffSize calculates the changes between the specified layer +// and its parent and returns the size in bytes of the changes +// relative to its base filesystem directory. +func (d *Driver) DiffSize(id, parent string) (size int64, err error) { + logrus.Debugf("lcowdriver: diffsize: id %s", id) + // TODO @gupta-ak. Needs implementation with assistance from service VM + return 0, nil +} + +// GetMetadata returns custom driver information. +func (d *Driver) GetMetadata(id string) (map[string]string, error) { + logrus.Debugf("lcowdriver: getmetadata: id %s", id) + m := make(map[string]string) + m["dir"] = d.dir(id) + return m, nil +} + +// dir returns the absolute path to the layer. +func (d *Driver) dir(id string) string { + return filepath.Join(d.dataRoot, filepath.Base(id)) +} + +// getLayerChain returns the layer chain information. +func (d *Driver) getLayerChain(id string) ([]string, error) { + jPath := filepath.Join(d.dir(id), "layerchain.json") + logrus.Debugf("lcowdriver: getlayerchain: id %s json %s", id, jPath) + content, err := ioutil.ReadFile(jPath) + if os.IsNotExist(err) { + return nil, nil + } else if err != nil { + return nil, fmt.Errorf("lcowdriver: getlayerchain: %s unable to read layerchain file %s: %s", id, jPath, err) + } + + var layerChain []string + err = json.Unmarshal(content, &layerChain) + if err != nil { + return nil, fmt.Errorf("lcowdriver: getlayerchain: %s failed to unmarshall layerchain file %s: %s", id, jPath, err) + } + return layerChain, nil +} + +// setLayerChain stores the layer chain information on disk. +func (d *Driver) setLayerChain(id string, chain []string) error { + content, err := json.Marshal(&chain) + if err != nil { + return fmt.Errorf("lcowdriver: setlayerchain: %s failed to marshall layerchain json: %s", id, err) + } + + jPath := filepath.Join(d.dir(id), "layerchain.json") + logrus.Debugf("lcowdriver: setlayerchain: id %s json %s", id, jPath) + err = ioutil.WriteFile(jPath, content, 0600) + if err != nil { + return fmt.Errorf("lcowdriver: setlayerchain: %s failed to write layerchain file: %s", id, err) + } + return nil +} + +// getLayerDetails is a utility for getting a file name, size and indication of +// sandbox for a VHD(x) in a folder. A read-only layer will be layer.vhd. A +// read-write layer will be sandbox.vhdx. +func getLayerDetails(folder string) (string, int64, bool, error) { + var fileInfo os.FileInfo + isSandbox := false + filename := filepath.Join(folder, layerFilename) + var err error + + if fileInfo, err = os.Stat(filename); err != nil { + filename = filepath.Join(folder, sandboxFilename) + if fileInfo, err = os.Stat(filename); err != nil { + if os.IsNotExist(err) { + return "", 0, isSandbox, fmt.Errorf("could not find layer or sandbox in %s", folder) + } + return "", 0, isSandbox, fmt.Errorf("error locating layer or sandbox in %s: %s", folder, err) + } + isSandbox = true + } + return filename, fileInfo.Size(), isSandbox, nil +} diff --git a/vendor/github.com/moby/moby/daemon/graphdriver/overlay/copy.go b/vendor/github.com/moby/moby/daemon/graphdriver/overlay/copy.go new file mode 100644 index 000000000..53ea5bff1 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/graphdriver/overlay/copy.go @@ -0,0 +1,175 @@ +// +build linux + +package overlay + +import ( + "fmt" + "os" + "path/filepath" + "syscall" + "time" + + "github.com/docker/docker/pkg/pools" + "github.com/docker/docker/pkg/system" + rsystem "github.com/opencontainers/runc/libcontainer/system" + "golang.org/x/sys/unix" +) + +type copyFlags int + +const ( + copyHardlink copyFlags = 1 << iota +) + +func copyRegular(srcPath, dstPath string, mode os.FileMode) error { + srcFile, err := os.Open(srcPath) + if err != nil { + return err + } + defer srcFile.Close() + + dstFile, err := os.OpenFile(dstPath, os.O_WRONLY|os.O_CREATE, mode) + if err != nil { + return err + } + defer dstFile.Close() + + _, err = pools.Copy(dstFile, srcFile) + + return err +} + +func copyXattr(srcPath, dstPath, attr string) error { + data, err := system.Lgetxattr(srcPath, attr) + if err != nil { + return err + } + if data != nil { + if err := system.Lsetxattr(dstPath, attr, data, 0); err != nil { + return err + } + } + return nil +} + +func copyDir(srcDir, dstDir string, flags copyFlags) error { + err := filepath.Walk(srcDir, func(srcPath string, f os.FileInfo, err error) error { + if err != nil { + return err + } + + // Rebase path + relPath, err := filepath.Rel(srcDir, srcPath) + if err != nil { + return err + } + + dstPath := filepath.Join(dstDir, relPath) + if err != nil { + return err + } + + stat, ok := f.Sys().(*syscall.Stat_t) + if !ok { + return fmt.Errorf("Unable to get raw syscall.Stat_t data for %s", srcPath) + } + + isHardlink := false + + switch f.Mode() & os.ModeType { + case 0: // Regular file + if flags©Hardlink != 0 { + isHardlink = true + if err := os.Link(srcPath, dstPath); err != nil { + return err + } + } else { + if err := copyRegular(srcPath, dstPath, f.Mode()); err != nil { + return err + } + } + + case os.ModeDir: + if err := os.Mkdir(dstPath, f.Mode()); err != nil && !os.IsExist(err) { + return err + } + + case os.ModeSymlink: + link, err := os.Readlink(srcPath) + if err != nil { + return err + } + + if err := os.Symlink(link, dstPath); err != nil { + return err + } + + case os.ModeNamedPipe: + fallthrough + case os.ModeSocket: + if rsystem.RunningInUserNS() { + // cannot create a device if running in user namespace + return nil + } + if err := unix.Mkfifo(dstPath, stat.Mode); err != nil { + return err + } + + case os.ModeDevice: + if err := unix.Mknod(dstPath, stat.Mode, int(stat.Rdev)); err != nil { + return err + } + + default: + return fmt.Errorf("Unknown file type for %s\n", srcPath) + } + + // Everything below is copying metadata from src to dst. All this metadata + // already shares an inode for hardlinks. + if isHardlink { + return nil + } + + if err := os.Lchown(dstPath, int(stat.Uid), int(stat.Gid)); err != nil { + return err + } + + if err := copyXattr(srcPath, dstPath, "security.capability"); err != nil { + return err + } + + // We need to copy this attribute if it appears in an overlay upper layer, as + // this function is used to copy those. It is set by overlay if a directory + // is removed and then re-created and should not inherit anything from the + // same dir in the lower dir. + if err := copyXattr(srcPath, dstPath, "trusted.overlay.opaque"); err != nil { + return err + } + + isSymlink := f.Mode()&os.ModeSymlink != 0 + + // There is no LChmod, so ignore mode for symlink. Also, this + // must happen after chown, as that can modify the file mode + if !isSymlink { + if err := os.Chmod(dstPath, f.Mode()); err != nil { + return err + } + } + + // system.Chtimes doesn't support a NOFOLLOW flag atm + if !isSymlink { + aTime := time.Unix(int64(stat.Atim.Sec), int64(stat.Atim.Nsec)) + mTime := time.Unix(int64(stat.Mtim.Sec), int64(stat.Mtim.Nsec)) + if err := system.Chtimes(dstPath, aTime, mTime); err != nil { + return err + } + } else { + ts := []syscall.Timespec{stat.Atim, stat.Mtim} + if err := system.LUtimesNano(dstPath, ts); err != nil { + return err + } + } + return nil + }) + return err +} diff --git a/vendor/github.com/moby/moby/daemon/graphdriver/overlay/overlay.go b/vendor/github.com/moby/moby/daemon/graphdriver/overlay/overlay.go new file mode 100644 index 000000000..9db2e9405 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/graphdriver/overlay/overlay.go @@ -0,0 +1,469 @@ +// +build linux + +package overlay + +import ( + "bufio" + "fmt" + "io" + "io/ioutil" + "os" + "os/exec" + "path" + "strconv" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/daemon/graphdriver/overlayutils" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/fsutils" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/locker" + "github.com/docker/docker/pkg/mount" + "github.com/docker/docker/pkg/system" + "github.com/opencontainers/selinux/go-selinux/label" + "golang.org/x/sys/unix" +) + +// This is a small wrapper over the NaiveDiffWriter that lets us have a custom +// implementation of ApplyDiff() + +var ( + // ErrApplyDiffFallback is returned to indicate that a normal ApplyDiff is applied as a fallback from Naive diff writer. + ErrApplyDiffFallback = fmt.Errorf("Fall back to normal ApplyDiff") + backingFs = "" +) + +// ApplyDiffProtoDriver wraps the ProtoDriver by extending the interface with ApplyDiff method. +type ApplyDiffProtoDriver interface { + graphdriver.ProtoDriver + // ApplyDiff writes the diff to the archive for the given id and parent id. + // It returns the size in bytes written if successful, an error ErrApplyDiffFallback is returned otherwise. + ApplyDiff(id, parent string, diff io.Reader) (size int64, err error) +} + +type naiveDiffDriverWithApply struct { + graphdriver.Driver + applyDiff ApplyDiffProtoDriver +} + +// NaiveDiffDriverWithApply returns a NaiveDiff driver with custom ApplyDiff. +func NaiveDiffDriverWithApply(driver ApplyDiffProtoDriver, uidMaps, gidMaps []idtools.IDMap) graphdriver.Driver { + return &naiveDiffDriverWithApply{ + Driver: graphdriver.NewNaiveDiffDriver(driver, uidMaps, gidMaps), + applyDiff: driver, + } +} + +// ApplyDiff creates a diff layer with either the NaiveDiffDriver or with a fallback. +func (d *naiveDiffDriverWithApply) ApplyDiff(id, parent string, diff io.Reader) (int64, error) { + b, err := d.applyDiff.ApplyDiff(id, parent, diff) + if err == ErrApplyDiffFallback { + return d.Driver.ApplyDiff(id, parent, diff) + } + return b, err +} + +// This backend uses the overlay union filesystem for containers +// plus hard link file sharing for images. + +// Each container/image can have a "root" subdirectory which is a plain +// filesystem hierarchy, or they can use overlay. + +// If they use overlay there is a "upper" directory and a "lower-id" +// file, as well as "merged" and "work" directories. The "upper" +// directory has the upper layer of the overlay, and "lower-id" contains +// the id of the parent whose "root" directory shall be used as the lower +// layer in the overlay. The overlay itself is mounted in the "merged" +// directory, and the "work" dir is needed for overlay to work. + +// When an overlay layer is created there are two cases, either the +// parent has a "root" dir, then we start out with an empty "upper" +// directory overlaid on the parents root. This is typically the +// case with the init layer of a container which is based on an image. +// If there is no "root" in the parent, we inherit the lower-id from +// the parent and start by making a copy in the parent's "upper" dir. +// This is typically the case for a container layer which copies +// its parent -init upper layer. + +// Additionally we also have a custom implementation of ApplyLayer +// which makes a recursive copy of the parent "root" layer using +// hardlinks to share file data, and then applies the layer on top +// of that. This means all child images share file (but not directory) +// data with the parent. + +// Driver contains information about the home directory and the list of active mounts that are created using this driver. +type Driver struct { + home string + uidMaps []idtools.IDMap + gidMaps []idtools.IDMap + ctr *graphdriver.RefCounter + supportsDType bool + locker *locker.Locker +} + +func init() { + graphdriver.Register("overlay", Init) +} + +// Init returns the NaiveDiffDriver, a native diff driver for overlay filesystem. +// If overlay filesystem is not supported on the host, graphdriver.ErrNotSupported is returned as error. +// If an overlay filesystem is not supported over an existing filesystem then error graphdriver.ErrIncompatibleFS is returned. +func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) { + + if err := supportsOverlay(); err != nil { + return nil, graphdriver.ErrNotSupported + } + + fsMagic, err := graphdriver.GetFSMagic(home) + if err != nil { + return nil, err + } + if fsName, ok := graphdriver.FsNames[fsMagic]; ok { + backingFs = fsName + } + + switch fsMagic { + case graphdriver.FsMagicAufs, graphdriver.FsMagicBtrfs, graphdriver.FsMagicOverlay, graphdriver.FsMagicZfs, graphdriver.FsMagicEcryptfs: + logrus.Errorf("'overlay' is not supported over %s", backingFs) + return nil, graphdriver.ErrIncompatibleFS + } + + rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps) + if err != nil { + return nil, err + } + // Create the driver home dir + if err := idtools.MkdirAllAs(home, 0700, rootUID, rootGID); err != nil && !os.IsExist(err) { + return nil, err + } + + if err := mount.MakePrivate(home); err != nil { + return nil, err + } + + supportsDType, err := fsutils.SupportsDType(home) + if err != nil { + return nil, err + } + if !supportsDType { + // not a fatal error until v17.12 (#27443) + logrus.Warn(overlayutils.ErrDTypeNotSupported("overlay", backingFs)) + } + + d := &Driver{ + home: home, + uidMaps: uidMaps, + gidMaps: gidMaps, + ctr: graphdriver.NewRefCounter(graphdriver.NewFsChecker(graphdriver.FsMagicOverlay)), + supportsDType: supportsDType, + locker: locker.New(), + } + + return NaiveDiffDriverWithApply(d, uidMaps, gidMaps), nil +} + +func supportsOverlay() error { + // We can try to modprobe overlay first before looking at + // proc/filesystems for when overlay is supported + exec.Command("modprobe", "overlay").Run() + + f, err := os.Open("/proc/filesystems") + if err != nil { + return err + } + defer f.Close() + + s := bufio.NewScanner(f) + for s.Scan() { + if s.Text() == "nodev\toverlay" { + return nil + } + } + logrus.Error("'overlay' not found as a supported filesystem on this host. Please ensure kernel is new enough and has overlay support loaded.") + return graphdriver.ErrNotSupported +} + +func (d *Driver) String() string { + return "overlay" +} + +// Status returns current driver information in a two dimensional string array. +// Output contains "Backing Filesystem" used in this implementation. +func (d *Driver) Status() [][2]string { + return [][2]string{ + {"Backing Filesystem", backingFs}, + {"Supports d_type", strconv.FormatBool(d.supportsDType)}, + } +} + +// GetMetadata returns meta data about the overlay driver such as root, LowerDir, UpperDir, WorkDir and MergeDir used to store data. +func (d *Driver) GetMetadata(id string) (map[string]string, error) { + dir := d.dir(id) + if _, err := os.Stat(dir); err != nil { + return nil, err + } + + metadata := make(map[string]string) + + // If id has a root, it is an image + rootDir := path.Join(dir, "root") + if _, err := os.Stat(rootDir); err == nil { + metadata["RootDir"] = rootDir + return metadata, nil + } + + lowerID, err := ioutil.ReadFile(path.Join(dir, "lower-id")) + if err != nil { + return nil, err + } + + metadata["LowerDir"] = path.Join(d.dir(string(lowerID)), "root") + metadata["UpperDir"] = path.Join(dir, "upper") + metadata["WorkDir"] = path.Join(dir, "work") + metadata["MergedDir"] = path.Join(dir, "merged") + + return metadata, nil +} + +// Cleanup any state created by overlay which should be cleaned when daemon +// is being shutdown. For now, we just have to unmount the bind mounted +// we had created. +func (d *Driver) Cleanup() error { + return mount.Unmount(d.home) +} + +// CreateReadWrite creates a layer that is writable for use as a container +// file system. +func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error { + return d.Create(id, parent, opts) +} + +// Create is used to create the upper, lower, and merge directories required for overlay fs for a given id. +// The parent filesystem is used to configure these directories for the overlay. +func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) (retErr error) { + + if opts != nil && len(opts.StorageOpt) != 0 { + return fmt.Errorf("--storage-opt is not supported for overlay") + } + + dir := d.dir(id) + + rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) + if err != nil { + return err + } + if err := idtools.MkdirAllAs(path.Dir(dir), 0700, rootUID, rootGID); err != nil { + return err + } + if err := idtools.MkdirAs(dir, 0700, rootUID, rootGID); err != nil { + return err + } + + defer func() { + // Clean up on failure + if retErr != nil { + os.RemoveAll(dir) + } + }() + + // Toplevel images are just a "root" dir + if parent == "" { + if err := idtools.MkdirAs(path.Join(dir, "root"), 0755, rootUID, rootGID); err != nil { + return err + } + return nil + } + + parentDir := d.dir(parent) + + // Ensure parent exists + if _, err := os.Lstat(parentDir); err != nil { + return err + } + + // If parent has a root, just do an overlay to it + parentRoot := path.Join(parentDir, "root") + + if s, err := os.Lstat(parentRoot); err == nil { + if err := idtools.MkdirAs(path.Join(dir, "upper"), s.Mode(), rootUID, rootGID); err != nil { + return err + } + if err := idtools.MkdirAs(path.Join(dir, "work"), 0700, rootUID, rootGID); err != nil { + return err + } + if err := idtools.MkdirAs(path.Join(dir, "merged"), 0700, rootUID, rootGID); err != nil { + return err + } + if err := ioutil.WriteFile(path.Join(dir, "lower-id"), []byte(parent), 0666); err != nil { + return err + } + return nil + } + + // Otherwise, copy the upper and the lower-id from the parent + + lowerID, err := ioutil.ReadFile(path.Join(parentDir, "lower-id")) + if err != nil { + return err + } + + if err := ioutil.WriteFile(path.Join(dir, "lower-id"), lowerID, 0666); err != nil { + return err + } + + parentUpperDir := path.Join(parentDir, "upper") + s, err := os.Lstat(parentUpperDir) + if err != nil { + return err + } + + upperDir := path.Join(dir, "upper") + if err := idtools.MkdirAs(upperDir, s.Mode(), rootUID, rootGID); err != nil { + return err + } + if err := idtools.MkdirAs(path.Join(dir, "work"), 0700, rootUID, rootGID); err != nil { + return err + } + if err := idtools.MkdirAs(path.Join(dir, "merged"), 0700, rootUID, rootGID); err != nil { + return err + } + + return copyDir(parentUpperDir, upperDir, 0) +} + +func (d *Driver) dir(id string) string { + return path.Join(d.home, id) +} + +// Remove cleans the directories that are created for this id. +func (d *Driver) Remove(id string) error { + d.locker.Lock(id) + defer d.locker.Unlock(id) + return system.EnsureRemoveAll(d.dir(id)) +} + +// Get creates and mounts the required file system for the given id and returns the mount path. +func (d *Driver) Get(id string, mountLabel string) (s string, err error) { + d.locker.Lock(id) + defer d.locker.Unlock(id) + dir := d.dir(id) + if _, err := os.Stat(dir); err != nil { + return "", err + } + // If id has a root, just return it + rootDir := path.Join(dir, "root") + if _, err := os.Stat(rootDir); err == nil { + return rootDir, nil + } + mergedDir := path.Join(dir, "merged") + if count := d.ctr.Increment(mergedDir); count > 1 { + return mergedDir, nil + } + defer func() { + if err != nil { + if c := d.ctr.Decrement(mergedDir); c <= 0 { + unix.Unmount(mergedDir, 0) + } + } + }() + lowerID, err := ioutil.ReadFile(path.Join(dir, "lower-id")) + if err != nil { + return "", err + } + var ( + lowerDir = path.Join(d.dir(string(lowerID)), "root") + upperDir = path.Join(dir, "upper") + workDir = path.Join(dir, "work") + opts = fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", lowerDir, upperDir, workDir) + ) + if err := unix.Mount("overlay", mergedDir, "overlay", 0, label.FormatMountLabel(opts, mountLabel)); err != nil { + return "", fmt.Errorf("error creating overlay mount to %s: %v", mergedDir, err) + } + // chown "workdir/work" to the remapped root UID/GID. Overlay fs inside a + // user namespace requires this to move a directory from lower to upper. + rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) + if err != nil { + return "", err + } + if err := os.Chown(path.Join(workDir, "work"), rootUID, rootGID); err != nil { + return "", err + } + return mergedDir, nil +} + +// Put unmounts the mount path created for the give id. +func (d *Driver) Put(id string) error { + d.locker.Lock(id) + defer d.locker.Unlock(id) + // If id has a root, just return + if _, err := os.Stat(path.Join(d.dir(id), "root")); err == nil { + return nil + } + mountpoint := path.Join(d.dir(id), "merged") + if count := d.ctr.Decrement(mountpoint); count > 0 { + return nil + } + if err := unix.Unmount(mountpoint, unix.MNT_DETACH); err != nil { + logrus.Debugf("Failed to unmount %s overlay: %v", id, err) + } + return nil +} + +// ApplyDiff applies the new layer on top of the root, if parent does not exist with will return an ErrApplyDiffFallback error. +func (d *Driver) ApplyDiff(id string, parent string, diff io.Reader) (size int64, err error) { + dir := d.dir(id) + + if parent == "" { + return 0, ErrApplyDiffFallback + } + + parentRootDir := path.Join(d.dir(parent), "root") + if _, err := os.Stat(parentRootDir); err != nil { + return 0, ErrApplyDiffFallback + } + + // We now know there is a parent, and it has a "root" directory containing + // the full root filesystem. We can just hardlink it and apply the + // layer. This relies on two things: + // 1) ApplyDiff is only run once on a clean (no writes to upper layer) container + // 2) ApplyDiff doesn't do any in-place writes to files (would break hardlinks) + // These are all currently true and are not expected to break + + tmpRootDir, err := ioutil.TempDir(dir, "tmproot") + if err != nil { + return 0, err + } + defer func() { + if err != nil { + os.RemoveAll(tmpRootDir) + } else { + os.RemoveAll(path.Join(dir, "upper")) + os.RemoveAll(path.Join(dir, "work")) + os.RemoveAll(path.Join(dir, "merged")) + os.RemoveAll(path.Join(dir, "lower-id")) + } + }() + + if err = copyDir(parentRootDir, tmpRootDir, copyHardlink); err != nil { + return 0, err + } + + options := &archive.TarOptions{UIDMaps: d.uidMaps, GIDMaps: d.gidMaps} + if size, err = graphdriver.ApplyUncompressedLayer(tmpRootDir, diff, options); err != nil { + return 0, err + } + + rootDir := path.Join(dir, "root") + if err := os.Rename(tmpRootDir, rootDir); err != nil { + return 0, err + } + + return +} + +// Exists checks to see if the id is already mounted. +func (d *Driver) Exists(id string) bool { + _, err := os.Stat(d.dir(id)) + return err == nil +} diff --git a/vendor/github.com/moby/moby/daemon/graphdriver/overlay/overlay_test.go b/vendor/github.com/moby/moby/daemon/graphdriver/overlay/overlay_test.go new file mode 100644 index 000000000..34b6d801f --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/graphdriver/overlay/overlay_test.go @@ -0,0 +1,93 @@ +// +build linux + +package overlay + +import ( + "testing" + + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/daemon/graphdriver/graphtest" + "github.com/docker/docker/pkg/archive" +) + +func init() { + // Do not sure chroot to speed run time and allow archive + // errors or hangs to be debugged directly from the test process. + graphdriver.ApplyUncompressedLayer = archive.ApplyUncompressedLayer +} + +// This avoids creating a new driver for each test if all tests are run +// Make sure to put new tests between TestOverlaySetup and TestOverlayTeardown +func TestOverlaySetup(t *testing.T) { + graphtest.GetDriver(t, "overlay") +} + +func TestOverlayCreateEmpty(t *testing.T) { + graphtest.DriverTestCreateEmpty(t, "overlay") +} + +func TestOverlayCreateBase(t *testing.T) { + graphtest.DriverTestCreateBase(t, "overlay") +} + +func TestOverlayCreateSnap(t *testing.T) { + graphtest.DriverTestCreateSnap(t, "overlay") +} + +func TestOverlay50LayerRead(t *testing.T) { + graphtest.DriverTestDeepLayerRead(t, 50, "overlay") +} + +// Fails due to bug in calculating changes after apply +// likely related to https://github.com/docker/docker/issues/21555 +func TestOverlayDiffApply10Files(t *testing.T) { + t.Skipf("Fails to compute changes after apply intermittently") + graphtest.DriverTestDiffApply(t, 10, "overlay") +} + +func TestOverlayChanges(t *testing.T) { + t.Skipf("Fails to compute changes intermittently") + graphtest.DriverTestChanges(t, "overlay") +} + +func TestOverlayTeardown(t *testing.T) { + graphtest.PutDriver(t) +} + +// Benchmarks should always setup new driver + +func BenchmarkExists(b *testing.B) { + graphtest.DriverBenchExists(b, "overlay") +} + +func BenchmarkGetEmpty(b *testing.B) { + graphtest.DriverBenchGetEmpty(b, "overlay") +} + +func BenchmarkDiffBase(b *testing.B) { + graphtest.DriverBenchDiffBase(b, "overlay") +} + +func BenchmarkDiffSmallUpper(b *testing.B) { + graphtest.DriverBenchDiffN(b, 10, 10, "overlay") +} + +func BenchmarkDiff10KFileUpper(b *testing.B) { + graphtest.DriverBenchDiffN(b, 10, 10000, "overlay") +} + +func BenchmarkDiff10KFilesBottom(b *testing.B) { + graphtest.DriverBenchDiffN(b, 10000, 10, "overlay") +} + +func BenchmarkDiffApply100(b *testing.B) { + graphtest.DriverBenchDiffApplyN(b, 100, "overlay") +} + +func BenchmarkDiff20Layers(b *testing.B) { + graphtest.DriverBenchDeepLayerDiff(b, 20, "overlay") +} + +func BenchmarkRead20Layers(b *testing.B) { + graphtest.DriverBenchDeepLayerRead(b, 20, "overlay") +} diff --git a/vendor/github.com/moby/moby/daemon/graphdriver/overlay/overlay_unsupported.go b/vendor/github.com/moby/moby/daemon/graphdriver/overlay/overlay_unsupported.go new file mode 100644 index 000000000..3dbb4de44 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/graphdriver/overlay/overlay_unsupported.go @@ -0,0 +1,3 @@ +// +build !linux + +package overlay diff --git a/vendor/github.com/moby/moby/daemon/graphdriver/overlay2/check.go b/vendor/github.com/moby/moby/daemon/graphdriver/overlay2/check.go new file mode 100644 index 000000000..35e088aa3 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/graphdriver/overlay2/check.go @@ -0,0 +1,79 @@ +// +build linux + +package overlay2 + +import ( + "fmt" + "io/ioutil" + "os" + "path" + "path/filepath" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/system" + "github.com/pkg/errors" + "golang.org/x/sys/unix" +) + +// hasOpaqueCopyUpBug checks whether the filesystem has a bug +// which copies up the opaque flag when copying up an opaque +// directory. When this bug exists naive diff should be used. +func hasOpaqueCopyUpBug(d string) error { + td, err := ioutil.TempDir(d, "opaque-bug-check") + if err != nil { + return err + } + defer func() { + if err := os.RemoveAll(td); err != nil { + logrus.Warnf("Failed to remove check directory %v: %v", td, err) + } + }() + + // Make directories l1/d, l2/d, l3, work, merged + if err := os.MkdirAll(filepath.Join(td, "l1", "d"), 0755); err != nil { + return err + } + if err := os.MkdirAll(filepath.Join(td, "l2", "d"), 0755); err != nil { + return err + } + if err := os.Mkdir(filepath.Join(td, "l3"), 0755); err != nil { + return err + } + if err := os.Mkdir(filepath.Join(td, "work"), 0755); err != nil { + return err + } + if err := os.Mkdir(filepath.Join(td, "merged"), 0755); err != nil { + return err + } + + // Mark l2/d as opaque + if err := system.Lsetxattr(filepath.Join(td, "l2", "d"), "trusted.overlay.opaque", []byte("y"), 0); err != nil { + return errors.Wrap(err, "failed to set opaque flag on middle layer") + } + + opts := fmt.Sprintf("lowerdir=%s:%s,upperdir=%s,workdir=%s", path.Join(td, "l2"), path.Join(td, "l1"), path.Join(td, "l3"), path.Join(td, "work")) + if err := unix.Mount("overlay", filepath.Join(td, "merged"), "overlay", 0, opts); err != nil { + return errors.Wrap(err, "failed to mount overlay") + } + defer func() { + if err := unix.Unmount(filepath.Join(td, "merged"), 0); err != nil { + logrus.Warnf("Failed to unmount check directory %v: %v", filepath.Join(td, "merged"), err) + } + }() + + // Touch file in d to force copy up of opaque directory "d" from "l2" to "l3" + if err := ioutil.WriteFile(filepath.Join(td, "merged", "d", "f"), []byte{}, 0644); err != nil { + return errors.Wrap(err, "failed to write to merged directory") + } + + // Check l3/d does not have opaque flag + xattrOpaque, err := system.Lgetxattr(filepath.Join(td, "l3", "d"), "trusted.overlay.opaque") + if err != nil { + return errors.Wrap(err, "failed to read opaque flag on upper layer") + } + if string(xattrOpaque) == "y" { + return errors.New("opaque flag erroneously copied up, consider update to kernel 4.8 or later to fix") + } + + return nil +} diff --git a/vendor/github.com/moby/moby/daemon/graphdriver/overlay2/mount.go b/vendor/github.com/moby/moby/daemon/graphdriver/overlay2/mount.go new file mode 100644 index 000000000..77bff0662 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/graphdriver/overlay2/mount.go @@ -0,0 +1,88 @@ +// +build linux + +package overlay2 + +import ( + "bytes" + "encoding/json" + "flag" + "fmt" + "os" + "runtime" + + "github.com/docker/docker/pkg/reexec" + "golang.org/x/sys/unix" +) + +func init() { + reexec.Register("docker-mountfrom", mountFromMain) +} + +func fatal(err error) { + fmt.Fprint(os.Stderr, err) + os.Exit(1) +} + +type mountOptions struct { + Device string + Target string + Type string + Label string + Flag uint32 +} + +func mountFrom(dir, device, target, mType string, flags uintptr, label string) error { + options := &mountOptions{ + Device: device, + Target: target, + Type: mType, + Flag: uint32(flags), + Label: label, + } + + cmd := reexec.Command("docker-mountfrom", dir) + w, err := cmd.StdinPipe() + if err != nil { + return fmt.Errorf("mountfrom error on pipe creation: %v", err) + } + + output := bytes.NewBuffer(nil) + cmd.Stdout = output + cmd.Stderr = output + + if err := cmd.Start(); err != nil { + return fmt.Errorf("mountfrom error on re-exec cmd: %v", err) + } + //write the options to the pipe for the untar exec to read + if err := json.NewEncoder(w).Encode(options); err != nil { + return fmt.Errorf("mountfrom json encode to pipe failed: %v", err) + } + w.Close() + + if err := cmd.Wait(); err != nil { + return fmt.Errorf("mountfrom re-exec error: %v: output: %s", err, output) + } + return nil +} + +// mountfromMain is the entry-point for docker-mountfrom on re-exec. +func mountFromMain() { + runtime.LockOSThread() + flag.Parse() + + var options *mountOptions + + if err := json.NewDecoder(os.Stdin).Decode(&options); err != nil { + fatal(err) + } + + if err := os.Chdir(flag.Arg(0)); err != nil { + fatal(err) + } + + if err := unix.Mount(options.Device, options.Target, options.Type, uintptr(options.Flag), options.Label); err != nil { + fatal(err) + } + + os.Exit(0) +} diff --git a/vendor/github.com/moby/moby/daemon/graphdriver/overlay2/overlay.go b/vendor/github.com/moby/moby/daemon/graphdriver/overlay2/overlay.go new file mode 100644 index 000000000..4ef2a8caa --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/graphdriver/overlay2/overlay.go @@ -0,0 +1,724 @@ +// +build linux + +package overlay2 + +import ( + "bufio" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "os/exec" + "path" + "path/filepath" + "strconv" + "strings" + "sync" + + "github.com/Sirupsen/logrus" + + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/daemon/graphdriver/overlayutils" + "github.com/docker/docker/daemon/graphdriver/quota" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/chrootarchive" + "github.com/docker/docker/pkg/directory" + "github.com/docker/docker/pkg/fsutils" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/locker" + "github.com/docker/docker/pkg/mount" + "github.com/docker/docker/pkg/parsers" + "github.com/docker/docker/pkg/parsers/kernel" + "github.com/docker/docker/pkg/system" + units "github.com/docker/go-units" + + "github.com/opencontainers/selinux/go-selinux/label" + "golang.org/x/sys/unix" +) + +var ( + // untar defines the untar method + untar = chrootarchive.UntarUncompressed +) + +// This backend uses the overlay union filesystem for containers +// with diff directories for each layer. + +// This version of the overlay driver requires at least kernel +// 4.0.0 in order to support mounting multiple diff directories. + +// Each container/image has at least a "diff" directory and "link" file. +// If there is also a "lower" file when there are diff layers +// below as well as "merged" and "work" directories. The "diff" directory +// has the upper layer of the overlay and is used to capture any +// changes to the layer. The "lower" file contains all the lower layer +// mounts separated by ":" and ordered from uppermost to lowermost +// layers. The overlay itself is mounted in the "merged" directory, +// and the "work" dir is needed for overlay to work. + +// The "link" file for each layer contains a unique string for the layer. +// Under the "l" directory at the root there will be a symbolic link +// with that unique string pointing the "diff" directory for the layer. +// The symbolic links are used to reference lower layers in the "lower" +// file and on mount. The links are used to shorten the total length +// of a layer reference without requiring changes to the layer identifier +// or root directory. Mounts are always done relative to root and +// referencing the symbolic links in order to ensure the number of +// lower directories can fit in a single page for making the mount +// syscall. A hard upper limit of 128 lower layers is enforced to ensure +// that mounts do not fail due to length. + +const ( + driverName = "overlay2" + linkDir = "l" + lowerFile = "lower" + maxDepth = 128 + + // idLength represents the number of random characters + // which can be used to create the unique link identifer + // for every layer. If this value is too long then the + // page size limit for the mount command may be exceeded. + // The idLength should be selected such that following equation + // is true (512 is a buffer for label metadata). + // ((idLength + len(linkDir) + 1) * maxDepth) <= (pageSize - 512) + idLength = 26 +) + +type overlayOptions struct { + overrideKernelCheck bool + quota quota.Quota +} + +// Driver contains information about the home directory and the list of active mounts that are created using this driver. +type Driver struct { + home string + uidMaps []idtools.IDMap + gidMaps []idtools.IDMap + ctr *graphdriver.RefCounter + quotaCtl *quota.Control + options overlayOptions + naiveDiff graphdriver.DiffDriver + supportsDType bool + locker *locker.Locker +} + +var ( + backingFs = "" + projectQuotaSupported = false + + useNaiveDiffLock sync.Once + useNaiveDiffOnly bool +) + +func init() { + graphdriver.Register(driverName, Init) +} + +// Init returns the a native diff driver for overlay filesystem. +// If overlay filesystem is not supported on the host, graphdriver.ErrNotSupported is returned as error. +// If an overlay filesystem is not supported over an existing filesystem then error graphdriver.ErrIncompatibleFS is returned. +func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) { + opts, err := parseOptions(options) + if err != nil { + return nil, err + } + + if err := supportsOverlay(); err != nil { + return nil, graphdriver.ErrNotSupported + } + + // require kernel 4.0.0 to ensure multiple lower dirs are supported + v, err := kernel.GetKernelVersion() + if err != nil { + return nil, err + } + if kernel.CompareKernelVersion(*v, kernel.VersionInfo{Kernel: 4, Major: 0, Minor: 0}) < 0 { + if !opts.overrideKernelCheck { + return nil, graphdriver.ErrNotSupported + } + logrus.Warn("Using pre-4.0.0 kernel for overlay2, mount failures may require kernel update") + } + + fsMagic, err := graphdriver.GetFSMagic(home) + if err != nil { + return nil, err + } + if fsName, ok := graphdriver.FsNames[fsMagic]; ok { + backingFs = fsName + } + + // check if they are running over btrfs, aufs, zfs, overlay, or ecryptfs + switch fsMagic { + case graphdriver.FsMagicAufs, graphdriver.FsMagicZfs, graphdriver.FsMagicOverlay, graphdriver.FsMagicEcryptfs: + logrus.Errorf("'overlay2' is not supported over %s", backingFs) + return nil, graphdriver.ErrIncompatibleFS + case graphdriver.FsMagicBtrfs: + // Support for OverlayFS on BTRFS was added in kernel 4.7 + // See https://btrfs.wiki.kernel.org/index.php/Changelog + if kernel.CompareKernelVersion(*v, kernel.VersionInfo{Kernel: 4, Major: 7, Minor: 0}) < 0 { + if !opts.overrideKernelCheck { + logrus.Errorf("'overlay2' requires kernel 4.7 to use on %s", backingFs) + return nil, graphdriver.ErrIncompatibleFS + } + logrus.Warn("Using pre-4.7.0 kernel for overlay2 on btrfs, may require kernel update") + } + } + + rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps) + if err != nil { + return nil, err + } + // Create the driver home dir + if err := idtools.MkdirAllAs(path.Join(home, linkDir), 0700, rootUID, rootGID); err != nil && !os.IsExist(err) { + return nil, err + } + + if err := mount.MakePrivate(home); err != nil { + return nil, err + } + + supportsDType, err := fsutils.SupportsDType(home) + if err != nil { + return nil, err + } + if !supportsDType { + // not a fatal error until v17.12 (#27443) + logrus.Warn(overlayutils.ErrDTypeNotSupported("overlay2", backingFs)) + } + + d := &Driver{ + home: home, + uidMaps: uidMaps, + gidMaps: gidMaps, + ctr: graphdriver.NewRefCounter(graphdriver.NewFsChecker(graphdriver.FsMagicOverlay)), + supportsDType: supportsDType, + locker: locker.New(), + options: *opts, + } + + d.naiveDiff = graphdriver.NewNaiveDiffDriver(d, uidMaps, gidMaps) + + if backingFs == "xfs" { + // Try to enable project quota support over xfs. + if d.quotaCtl, err = quota.NewControl(home); err == nil { + projectQuotaSupported = true + } else if opts.quota.Size > 0 { + return nil, fmt.Errorf("Storage option overlay2.size not supported. Filesystem does not support Project Quota: %v", err) + } + } else if opts.quota.Size > 0 { + // if xfs is not the backing fs then error out if the storage-opt overlay2.size is used. + return nil, fmt.Errorf("Storage Option overlay2.size only supported for backingFS XFS. Found %v", backingFs) + } + + logrus.Debugf("backingFs=%s, projectQuotaSupported=%v", backingFs, projectQuotaSupported) + + return d, nil +} + +func parseOptions(options []string) (*overlayOptions, error) { + o := &overlayOptions{} + for _, option := range options { + key, val, err := parsers.ParseKeyValueOpt(option) + if err != nil { + return nil, err + } + key = strings.ToLower(key) + switch key { + case "overlay2.override_kernel_check": + o.overrideKernelCheck, err = strconv.ParseBool(val) + if err != nil { + return nil, err + } + case "overlay2.size": + size, err := units.RAMInBytes(val) + if err != nil { + return nil, err + } + o.quota.Size = uint64(size) + default: + return nil, fmt.Errorf("overlay2: unknown option %s", key) + } + } + return o, nil +} + +func supportsOverlay() error { + // We can try to modprobe overlay first before looking at + // proc/filesystems for when overlay is supported + exec.Command("modprobe", "overlay").Run() + + f, err := os.Open("/proc/filesystems") + if err != nil { + return err + } + defer f.Close() + + s := bufio.NewScanner(f) + for s.Scan() { + if s.Text() == "nodev\toverlay" { + return nil + } + } + logrus.Error("'overlay' not found as a supported filesystem on this host. Please ensure kernel is new enough and has overlay support loaded.") + return graphdriver.ErrNotSupported +} + +func useNaiveDiff(home string) bool { + useNaiveDiffLock.Do(func() { + if err := hasOpaqueCopyUpBug(home); err != nil { + logrus.Warnf("Not using native diff for overlay2: %v", err) + useNaiveDiffOnly = true + } + }) + return useNaiveDiffOnly +} + +func (d *Driver) String() string { + return driverName +} + +// Status returns current driver information in a two dimensional string array. +// Output contains "Backing Filesystem" used in this implementation. +func (d *Driver) Status() [][2]string { + return [][2]string{ + {"Backing Filesystem", backingFs}, + {"Supports d_type", strconv.FormatBool(d.supportsDType)}, + {"Native Overlay Diff", strconv.FormatBool(!useNaiveDiff(d.home))}, + } +} + +// GetMetadata returns meta data about the overlay driver such as +// LowerDir, UpperDir, WorkDir and MergeDir used to store data. +func (d *Driver) GetMetadata(id string) (map[string]string, error) { + dir := d.dir(id) + if _, err := os.Stat(dir); err != nil { + return nil, err + } + + metadata := map[string]string{ + "WorkDir": path.Join(dir, "work"), + "MergedDir": path.Join(dir, "merged"), + "UpperDir": path.Join(dir, "diff"), + } + + lowerDirs, err := d.getLowerDirs(id) + if err != nil { + return nil, err + } + if len(lowerDirs) > 0 { + metadata["LowerDir"] = strings.Join(lowerDirs, ":") + } + + return metadata, nil +} + +// Cleanup any state created by overlay which should be cleaned when daemon +// is being shutdown. For now, we just have to unmount the bind mounted +// we had created. +func (d *Driver) Cleanup() error { + return mount.Unmount(d.home) +} + +// CreateReadWrite creates a layer that is writable for use as a container +// file system. +func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error { + if opts != nil && len(opts.StorageOpt) != 0 && !projectQuotaSupported { + return fmt.Errorf("--storage-opt is supported only for overlay over xfs with 'pquota' mount option") + } + + if opts == nil { + opts = &graphdriver.CreateOpts{ + StorageOpt: map[string]string{}, + } + } + + if _, ok := opts.StorageOpt["size"]; !ok { + if opts.StorageOpt == nil { + opts.StorageOpt = map[string]string{} + } + opts.StorageOpt["size"] = strconv.FormatUint(d.options.quota.Size, 10) + } + + return d.create(id, parent, opts) +} + +// Create is used to create the upper, lower, and merge directories required for overlay fs for a given id. +// The parent filesystem is used to configure these directories for the overlay. +func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) (retErr error) { + if opts != nil && len(opts.StorageOpt) != 0 { + if _, ok := opts.StorageOpt["size"]; ok { + return fmt.Errorf("--storage-opt size is only supported for ReadWrite Layers") + } + } + return d.create(id, parent, opts) +} + +func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts) (retErr error) { + dir := d.dir(id) + + rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) + if err != nil { + return err + } + if err := idtools.MkdirAllAs(path.Dir(dir), 0700, rootUID, rootGID); err != nil { + return err + } + if err := idtools.MkdirAs(dir, 0700, rootUID, rootGID); err != nil { + return err + } + + defer func() { + // Clean up on failure + if retErr != nil { + os.RemoveAll(dir) + } + }() + + if opts != nil && len(opts.StorageOpt) > 0 { + driver := &Driver{} + if err := d.parseStorageOpt(opts.StorageOpt, driver); err != nil { + return err + } + + if driver.options.quota.Size > 0 { + // Set container disk quota limit + if err := d.quotaCtl.SetQuota(dir, driver.options.quota); err != nil { + return err + } + } + } + + if err := idtools.MkdirAs(path.Join(dir, "diff"), 0755, rootUID, rootGID); err != nil { + return err + } + + lid := generateID(idLength) + if err := os.Symlink(path.Join("..", id, "diff"), path.Join(d.home, linkDir, lid)); err != nil { + return err + } + + // Write link id to link file + if err := ioutil.WriteFile(path.Join(dir, "link"), []byte(lid), 0644); err != nil { + return err + } + + // if no parent directory, done + if parent == "" { + return nil + } + + if err := idtools.MkdirAs(path.Join(dir, "work"), 0700, rootUID, rootGID); err != nil { + return err + } + if err := idtools.MkdirAs(path.Join(dir, "merged"), 0700, rootUID, rootGID); err != nil { + return err + } + + lower, err := d.getLower(parent) + if err != nil { + return err + } + if lower != "" { + if err := ioutil.WriteFile(path.Join(dir, lowerFile), []byte(lower), 0666); err != nil { + return err + } + } + + return nil +} + +// Parse overlay storage options +func (d *Driver) parseStorageOpt(storageOpt map[string]string, driver *Driver) error { + // Read size to set the disk project quota per container + for key, val := range storageOpt { + key := strings.ToLower(key) + switch key { + case "size": + size, err := units.RAMInBytes(val) + if err != nil { + return err + } + driver.options.quota.Size = uint64(size) + default: + return fmt.Errorf("Unknown option %s", key) + } + } + + return nil +} + +func (d *Driver) getLower(parent string) (string, error) { + parentDir := d.dir(parent) + + // Ensure parent exists + if _, err := os.Lstat(parentDir); err != nil { + return "", err + } + + // Read Parent link fileA + parentLink, err := ioutil.ReadFile(path.Join(parentDir, "link")) + if err != nil { + return "", err + } + lowers := []string{path.Join(linkDir, string(parentLink))} + + parentLower, err := ioutil.ReadFile(path.Join(parentDir, lowerFile)) + if err == nil { + parentLowers := strings.Split(string(parentLower), ":") + lowers = append(lowers, parentLowers...) + } + if len(lowers) > maxDepth { + return "", errors.New("max depth exceeded") + } + return strings.Join(lowers, ":"), nil +} + +func (d *Driver) dir(id string) string { + return path.Join(d.home, id) +} + +func (d *Driver) getLowerDirs(id string) ([]string, error) { + var lowersArray []string + lowers, err := ioutil.ReadFile(path.Join(d.dir(id), lowerFile)) + if err == nil { + for _, s := range strings.Split(string(lowers), ":") { + lp, err := os.Readlink(path.Join(d.home, s)) + if err != nil { + return nil, err + } + lowersArray = append(lowersArray, path.Clean(path.Join(d.home, linkDir, lp))) + } + } else if !os.IsNotExist(err) { + return nil, err + } + return lowersArray, nil +} + +// Remove cleans the directories that are created for this id. +func (d *Driver) Remove(id string) error { + d.locker.Lock(id) + defer d.locker.Unlock(id) + dir := d.dir(id) + lid, err := ioutil.ReadFile(path.Join(dir, "link")) + if err == nil { + if err := os.RemoveAll(path.Join(d.home, linkDir, string(lid))); err != nil { + logrus.Debugf("Failed to remove link: %v", err) + } + } + + if err := system.EnsureRemoveAll(dir); err != nil && !os.IsNotExist(err) { + return err + } + return nil +} + +// Get creates and mounts the required file system for the given id and returns the mount path. +func (d *Driver) Get(id string, mountLabel string) (s string, err error) { + d.locker.Lock(id) + defer d.locker.Unlock(id) + dir := d.dir(id) + if _, err := os.Stat(dir); err != nil { + return "", err + } + + diffDir := path.Join(dir, "diff") + lowers, err := ioutil.ReadFile(path.Join(dir, lowerFile)) + if err != nil { + // If no lower, just return diff directory + if os.IsNotExist(err) { + return diffDir, nil + } + return "", err + } + + mergedDir := path.Join(dir, "merged") + if count := d.ctr.Increment(mergedDir); count > 1 { + return mergedDir, nil + } + defer func() { + if err != nil { + if c := d.ctr.Decrement(mergedDir); c <= 0 { + unix.Unmount(mergedDir, 0) + } + } + }() + + workDir := path.Join(dir, "work") + splitLowers := strings.Split(string(lowers), ":") + absLowers := make([]string, len(splitLowers)) + for i, s := range splitLowers { + absLowers[i] = path.Join(d.home, s) + } + opts := fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", strings.Join(absLowers, ":"), path.Join(dir, "diff"), path.Join(dir, "work")) + mountData := label.FormatMountLabel(opts, mountLabel) + mount := unix.Mount + mountTarget := mergedDir + + pageSize := unix.Getpagesize() + + // Go can return a larger page size than supported by the system + // as of go 1.7. This will be fixed in 1.8 and this block can be + // removed when building with 1.8. + // See https://github.com/golang/go/commit/1b9499b06989d2831e5b156161d6c07642926ee1 + // See https://github.com/docker/docker/issues/27384 + if pageSize > 4096 { + pageSize = 4096 + } + + // Use relative paths and mountFrom when the mount data has exceeded + // the page size. The mount syscall fails if the mount data cannot + // fit within a page and relative links make the mount data much + // smaller at the expense of requiring a fork exec to chroot. + if len(mountData) > pageSize { + opts = fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", string(lowers), path.Join(id, "diff"), path.Join(id, "work")) + mountData = label.FormatMountLabel(opts, mountLabel) + if len(mountData) > pageSize { + return "", fmt.Errorf("cannot mount layer, mount label too large %d", len(mountData)) + } + + mount = func(source string, target string, mType string, flags uintptr, label string) error { + return mountFrom(d.home, source, target, mType, flags, label) + } + mountTarget = path.Join(id, "merged") + } + + if err := mount("overlay", mountTarget, "overlay", 0, mountData); err != nil { + return "", fmt.Errorf("error creating overlay mount to %s: %v", mergedDir, err) + } + + // chown "workdir/work" to the remapped root UID/GID. Overlay fs inside a + // user namespace requires this to move a directory from lower to upper. + rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) + if err != nil { + return "", err + } + + if err := os.Chown(path.Join(workDir, "work"), rootUID, rootGID); err != nil { + return "", err + } + + return mergedDir, nil +} + +// Put unmounts the mount path created for the give id. +func (d *Driver) Put(id string) error { + d.locker.Lock(id) + defer d.locker.Unlock(id) + dir := d.dir(id) + _, err := ioutil.ReadFile(path.Join(dir, lowerFile)) + if err != nil { + // If no lower, no mount happened and just return directly + if os.IsNotExist(err) { + return nil + } + return err + } + + mountpoint := path.Join(dir, "merged") + if count := d.ctr.Decrement(mountpoint); count > 0 { + return nil + } + if err := unix.Unmount(mountpoint, unix.MNT_DETACH); err != nil { + logrus.Debugf("Failed to unmount %s overlay: %s - %v", id, mountpoint, err) + } + return nil +} + +// Exists checks to see if the id is already mounted. +func (d *Driver) Exists(id string) bool { + _, err := os.Stat(d.dir(id)) + return err == nil +} + +// isParent returns if the passed in parent is the direct parent of the passed in layer +func (d *Driver) isParent(id, parent string) bool { + lowers, err := d.getLowerDirs(id) + if err != nil { + return false + } + if parent == "" && len(lowers) > 0 { + return false + } + + parentDir := d.dir(parent) + var ld string + if len(lowers) > 0 { + ld = filepath.Dir(lowers[0]) + } + if ld == "" && parent == "" { + return true + } + return ld == parentDir +} + +// ApplyDiff applies the new layer into a root +func (d *Driver) ApplyDiff(id string, parent string, diff io.Reader) (size int64, err error) { + if !d.isParent(id, parent) { + return d.naiveDiff.ApplyDiff(id, parent, diff) + } + + applyDir := d.getDiffPath(id) + + logrus.Debugf("Applying tar in %s", applyDir) + // Overlay doesn't need the parent id to apply the diff + if err := untar(diff, applyDir, &archive.TarOptions{ + UIDMaps: d.uidMaps, + GIDMaps: d.gidMaps, + WhiteoutFormat: archive.OverlayWhiteoutFormat, + }); err != nil { + return 0, err + } + + return directory.Size(applyDir) +} + +func (d *Driver) getDiffPath(id string) string { + dir := d.dir(id) + + return path.Join(dir, "diff") +} + +// DiffSize calculates the changes between the specified id +// and its parent and returns the size in bytes of the changes +// relative to its base filesystem directory. +func (d *Driver) DiffSize(id, parent string) (size int64, err error) { + if useNaiveDiff(d.home) || !d.isParent(id, parent) { + return d.naiveDiff.DiffSize(id, parent) + } + return directory.Size(d.getDiffPath(id)) +} + +// Diff produces an archive of the changes between the specified +// layer and its parent layer which may be "". +func (d *Driver) Diff(id, parent string) (io.ReadCloser, error) { + if useNaiveDiff(d.home) || !d.isParent(id, parent) { + return d.naiveDiff.Diff(id, parent) + } + + diffPath := d.getDiffPath(id) + logrus.Debugf("Tar with options on %s", diffPath) + return archive.TarWithOptions(diffPath, &archive.TarOptions{ + Compression: archive.Uncompressed, + UIDMaps: d.uidMaps, + GIDMaps: d.gidMaps, + WhiteoutFormat: archive.OverlayWhiteoutFormat, + }) +} + +// Changes produces a list of changes between the specified layer +// and its parent layer. If parent is "", then all changes will be ADD changes. +func (d *Driver) Changes(id, parent string) ([]archive.Change, error) { + if useNaiveDiff(d.home) || !d.isParent(id, parent) { + return d.naiveDiff.Changes(id, parent) + } + // Overlay doesn't have snapshots, so we need to get changes from all parent + // layers. + diffPath := d.getDiffPath(id) + layers, err := d.getLowerDirs(id) + if err != nil { + return nil, err + } + + return archive.OverlayChanges(layers, diffPath) +} diff --git a/vendor/github.com/moby/moby/daemon/graphdriver/overlay2/overlay_test.go b/vendor/github.com/moby/moby/daemon/graphdriver/overlay2/overlay_test.go new file mode 100644 index 000000000..2ff0b339e --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/graphdriver/overlay2/overlay_test.go @@ -0,0 +1,121 @@ +// +build linux + +package overlay2 + +import ( + "io/ioutil" + "os" + "testing" + + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/daemon/graphdriver/graphtest" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/reexec" + "golang.org/x/sys/unix" +) + +func init() { + // Do not sure chroot to speed run time and allow archive + // errors or hangs to be debugged directly from the test process. + untar = archive.UntarUncompressed + graphdriver.ApplyUncompressedLayer = archive.ApplyUncompressedLayer + + reexec.Init() +} + +func cdMountFrom(dir, device, target, mType, label string) error { + wd, err := os.Getwd() + if err != nil { + return err + } + os.Chdir(dir) + defer os.Chdir(wd) + + return unix.Mount(device, target, mType, 0, label) +} + +func skipIfNaive(t *testing.T) { + td, err := ioutil.TempDir("", "naive-check-") + if err != nil { + t.Fatalf("Failed to create temp dir: %v", err) + } + defer os.RemoveAll(td) + + if useNaiveDiff(td) { + t.Skipf("Cannot run test with naive diff") + } +} + +// This avoids creating a new driver for each test if all tests are run +// Make sure to put new tests between TestOverlaySetup and TestOverlayTeardown +func TestOverlaySetup(t *testing.T) { + graphtest.GetDriver(t, driverName) +} + +func TestOverlayCreateEmpty(t *testing.T) { + graphtest.DriverTestCreateEmpty(t, driverName) +} + +func TestOverlayCreateBase(t *testing.T) { + graphtest.DriverTestCreateBase(t, driverName) +} + +func TestOverlayCreateSnap(t *testing.T) { + graphtest.DriverTestCreateSnap(t, driverName) +} + +func TestOverlay128LayerRead(t *testing.T) { + graphtest.DriverTestDeepLayerRead(t, 128, driverName) +} + +func TestOverlayDiffApply10Files(t *testing.T) { + skipIfNaive(t) + graphtest.DriverTestDiffApply(t, 10, driverName) +} + +func TestOverlayChanges(t *testing.T) { + skipIfNaive(t) + graphtest.DriverTestChanges(t, driverName) +} + +func TestOverlayTeardown(t *testing.T) { + graphtest.PutDriver(t) +} + +// Benchmarks should always setup new driver + +func BenchmarkExists(b *testing.B) { + graphtest.DriverBenchExists(b, driverName) +} + +func BenchmarkGetEmpty(b *testing.B) { + graphtest.DriverBenchGetEmpty(b, driverName) +} + +func BenchmarkDiffBase(b *testing.B) { + graphtest.DriverBenchDiffBase(b, driverName) +} + +func BenchmarkDiffSmallUpper(b *testing.B) { + graphtest.DriverBenchDiffN(b, 10, 10, driverName) +} + +func BenchmarkDiff10KFileUpper(b *testing.B) { + graphtest.DriverBenchDiffN(b, 10, 10000, driverName) +} + +func BenchmarkDiff10KFilesBottom(b *testing.B) { + graphtest.DriverBenchDiffN(b, 10000, 10, driverName) +} + +func BenchmarkDiffApply100(b *testing.B) { + graphtest.DriverBenchDiffApplyN(b, 100, driverName) +} + +func BenchmarkDiff20Layers(b *testing.B) { + graphtest.DriverBenchDeepLayerDiff(b, 20, driverName) +} + +func BenchmarkRead20Layers(b *testing.B) { + graphtest.DriverBenchDeepLayerRead(b, 20, driverName) +} diff --git a/vendor/github.com/moby/moby/daemon/graphdriver/overlay2/overlay_unsupported.go b/vendor/github.com/moby/moby/daemon/graphdriver/overlay2/overlay_unsupported.go new file mode 100644 index 000000000..e5ac4ca8c --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/graphdriver/overlay2/overlay_unsupported.go @@ -0,0 +1,3 @@ +// +build !linux + +package overlay2 diff --git a/vendor/github.com/moby/moby/daemon/graphdriver/overlay2/randomid.go b/vendor/github.com/moby/moby/daemon/graphdriver/overlay2/randomid.go new file mode 100644 index 000000000..04212c069 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/graphdriver/overlay2/randomid.go @@ -0,0 +1,81 @@ +// +build linux + +package overlay2 + +import ( + "crypto/rand" + "encoding/base32" + "fmt" + "io" + "os" + "syscall" + "time" + + "github.com/Sirupsen/logrus" + "golang.org/x/sys/unix" +) + +// generateID creates a new random string identifier with the given length +func generateID(l int) string { + const ( + // ensures we backoff for less than 450ms total. Use the following to + // select new value, in units of 10ms: + // n*(n+1)/2 = d -> n^2 + n - 2d -> n = (sqrt(8d + 1) - 1)/2 + maxretries = 9 + backoff = time.Millisecond * 10 + ) + + var ( + totalBackoff time.Duration + count int + retries int + size = (l*5 + 7) / 8 + u = make([]byte, size) + ) + // TODO: Include time component, counter component, random component + + for { + // This should never block but the read may fail. Because of this, + // we just try to read the random number generator until we get + // something. This is a very rare condition but may happen. + b := time.Duration(retries) * backoff + time.Sleep(b) + totalBackoff += b + + n, err := io.ReadFull(rand.Reader, u[count:]) + if err != nil { + if retryOnError(err) && retries < maxretries { + count += n + retries++ + logrus.Errorf("error generating version 4 uuid, retrying: %v", err) + continue + } + + // Any other errors represent a system problem. What did someone + // do to /dev/urandom? + panic(fmt.Errorf("error reading random number generator, retried for %v: %v", totalBackoff.String(), err)) + } + + break + } + + s := base32.StdEncoding.EncodeToString(u) + + return s[:l] +} + +// retryOnError tries to detect whether or not retrying would be fruitful. +func retryOnError(err error) bool { + switch err := err.(type) { + case *os.PathError: + return retryOnError(err.Err) // unpack the target error + case syscall.Errno: + if err == unix.EPERM { + // EPERM represents an entropy pool exhaustion, a condition under + // which we backoff and retry. + return true + } + } + + return false +} diff --git a/vendor/github.com/moby/moby/daemon/graphdriver/overlayutils/overlayutils.go b/vendor/github.com/moby/moby/daemon/graphdriver/overlayutils/overlayutils.go new file mode 100644 index 000000000..7491c3457 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/graphdriver/overlayutils/overlayutils.go @@ -0,0 +1,18 @@ +// +build linux + +package overlayutils + +import ( + "errors" + "fmt" +) + +// ErrDTypeNotSupported denotes that the backing filesystem doesn't support d_type. +func ErrDTypeNotSupported(driver, backingFs string) error { + msg := fmt.Sprintf("%s: the backing %s filesystem is formatted without d_type support, which leads to incorrect behavior.", driver, backingFs) + if backingFs == "xfs" { + msg += " Reformat the filesystem with ftype=1 to enable d_type support." + } + msg += " Running without d_type support will no longer be supported in Docker 17.12." + return errors.New(msg) +} diff --git a/vendor/github.com/moby/moby/daemon/graphdriver/plugin.go b/vendor/github.com/moby/moby/daemon/graphdriver/plugin.go new file mode 100644 index 000000000..f6852f075 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/graphdriver/plugin.go @@ -0,0 +1,43 @@ +package graphdriver + +import ( + "fmt" + "io" + "path/filepath" + + "github.com/docker/docker/pkg/plugingetter" + "github.com/docker/docker/plugin/v2" +) + +type pluginClient interface { + // Call calls the specified method with the specified arguments for the plugin. + Call(string, interface{}, interface{}) error + // Stream calls the specified method with the specified arguments for the plugin and returns the response IO stream + Stream(string, interface{}) (io.ReadCloser, error) + // SendFile calls the specified method, and passes through the IO stream + SendFile(string, io.Reader, interface{}) error +} + +func lookupPlugin(name string, pg plugingetter.PluginGetter, config Options) (Driver, error) { + if !config.ExperimentalEnabled { + return nil, fmt.Errorf("graphdriver plugins are only supported with experimental mode") + } + pl, err := pg.Get(name, "GraphDriver", plugingetter.Acquire) + if err != nil { + return nil, fmt.Errorf("Error looking up graphdriver plugin %s: %v", name, err) + } + return newPluginDriver(name, pl, config) +} + +func newPluginDriver(name string, pl plugingetter.CompatPlugin, config Options) (Driver, error) { + home := config.Root + if !pl.IsV1() { + if p, ok := pl.(*v2.Plugin); ok { + if p.PropagatedMount != "" { + home = p.PluginObj.Config.PropagatedMount + } + } + } + proxy := &graphDriverProxy{name, pl, Capabilities{}} + return proxy, proxy.Init(filepath.Join(home, name), config.DriverOptions, config.UIDMaps, config.GIDMaps) +} diff --git a/vendor/github.com/moby/moby/daemon/graphdriver/proxy.go b/vendor/github.com/moby/moby/daemon/graphdriver/proxy.go new file mode 100644 index 000000000..120afad45 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/graphdriver/proxy.go @@ -0,0 +1,263 @@ +package graphdriver + +import ( + "errors" + "fmt" + "io" + "path/filepath" + + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/plugingetter" + "github.com/docker/docker/pkg/plugins" +) + +type graphDriverProxy struct { + name string + p plugingetter.CompatPlugin + caps Capabilities +} + +type graphDriverRequest struct { + ID string `json:",omitempty"` + Parent string `json:",omitempty"` + MountLabel string `json:",omitempty"` + StorageOpt map[string]string `json:",omitempty"` +} + +type graphDriverResponse struct { + Err string `json:",omitempty"` + Dir string `json:",omitempty"` + Exists bool `json:",omitempty"` + Status [][2]string `json:",omitempty"` + Changes []archive.Change `json:",omitempty"` + Size int64 `json:",omitempty"` + Metadata map[string]string `json:",omitempty"` + Capabilities Capabilities `json:",omitempty"` +} + +type graphDriverInitRequest struct { + Home string + Opts []string `json:"Opts"` + UIDMaps []idtools.IDMap `json:"UIDMaps"` + GIDMaps []idtools.IDMap `json:"GIDMaps"` +} + +func (d *graphDriverProxy) Init(home string, opts []string, uidMaps, gidMaps []idtools.IDMap) error { + if !d.p.IsV1() { + if cp, ok := d.p.(plugingetter.CountedPlugin); ok { + // always acquire here, it will be cleaned up on daemon shutdown + cp.Acquire() + } + } + args := &graphDriverInitRequest{ + Home: home, + Opts: opts, + UIDMaps: uidMaps, + GIDMaps: gidMaps, + } + var ret graphDriverResponse + if err := d.p.Client().Call("GraphDriver.Init", args, &ret); err != nil { + return err + } + if ret.Err != "" { + return errors.New(ret.Err) + } + caps, err := d.fetchCaps() + if err != nil { + return err + } + d.caps = caps + return nil +} + +func (d *graphDriverProxy) fetchCaps() (Capabilities, error) { + args := &graphDriverRequest{} + var ret graphDriverResponse + if err := d.p.Client().Call("GraphDriver.Capabilities", args, &ret); err != nil { + if !plugins.IsNotFound(err) { + return Capabilities{}, err + } + } + return ret.Capabilities, nil +} + +func (d *graphDriverProxy) String() string { + return d.name +} + +func (d *graphDriverProxy) Capabilities() Capabilities { + return d.caps +} + +func (d *graphDriverProxy) CreateReadWrite(id, parent string, opts *CreateOpts) error { + return d.create("GraphDriver.CreateReadWrite", id, parent, opts) +} + +func (d *graphDriverProxy) Create(id, parent string, opts *CreateOpts) error { + return d.create("GraphDriver.Create", id, parent, opts) +} + +func (d *graphDriverProxy) create(method, id, parent string, opts *CreateOpts) error { + args := &graphDriverRequest{ + ID: id, + Parent: parent, + } + if opts != nil { + args.MountLabel = opts.MountLabel + args.StorageOpt = opts.StorageOpt + } + var ret graphDriverResponse + if err := d.p.Client().Call(method, args, &ret); err != nil { + return err + } + if ret.Err != "" { + return errors.New(ret.Err) + } + return nil +} + +func (d *graphDriverProxy) Remove(id string) error { + args := &graphDriverRequest{ID: id} + var ret graphDriverResponse + if err := d.p.Client().Call("GraphDriver.Remove", args, &ret); err != nil { + return err + } + if ret.Err != "" { + return errors.New(ret.Err) + } + return nil +} + +func (d *graphDriverProxy) Get(id, mountLabel string) (string, error) { + args := &graphDriverRequest{ + ID: id, + MountLabel: mountLabel, + } + var ret graphDriverResponse + if err := d.p.Client().Call("GraphDriver.Get", args, &ret); err != nil { + return "", err + } + var err error + if ret.Err != "" { + err = errors.New(ret.Err) + } + return filepath.Join(d.p.BasePath(), ret.Dir), err +} + +func (d *graphDriverProxy) Put(id string) error { + args := &graphDriverRequest{ID: id} + var ret graphDriverResponse + if err := d.p.Client().Call("GraphDriver.Put", args, &ret); err != nil { + return err + } + if ret.Err != "" { + return errors.New(ret.Err) + } + return nil +} + +func (d *graphDriverProxy) Exists(id string) bool { + args := &graphDriverRequest{ID: id} + var ret graphDriverResponse + if err := d.p.Client().Call("GraphDriver.Exists", args, &ret); err != nil { + return false + } + return ret.Exists +} + +func (d *graphDriverProxy) Status() [][2]string { + args := &graphDriverRequest{} + var ret graphDriverResponse + if err := d.p.Client().Call("GraphDriver.Status", args, &ret); err != nil { + return nil + } + return ret.Status +} + +func (d *graphDriverProxy) GetMetadata(id string) (map[string]string, error) { + args := &graphDriverRequest{ + ID: id, + } + var ret graphDriverResponse + if err := d.p.Client().Call("GraphDriver.GetMetadata", args, &ret); err != nil { + return nil, err + } + if ret.Err != "" { + return nil, errors.New(ret.Err) + } + return ret.Metadata, nil +} + +func (d *graphDriverProxy) Cleanup() error { + if !d.p.IsV1() { + if cp, ok := d.p.(plugingetter.CountedPlugin); ok { + // always release + defer cp.Release() + } + } + + args := &graphDriverRequest{} + var ret graphDriverResponse + if err := d.p.Client().Call("GraphDriver.Cleanup", args, &ret); err != nil { + return nil + } + if ret.Err != "" { + return errors.New(ret.Err) + } + return nil +} + +func (d *graphDriverProxy) Diff(id, parent string) (io.ReadCloser, error) { + args := &graphDriverRequest{ + ID: id, + Parent: parent, + } + body, err := d.p.Client().Stream("GraphDriver.Diff", args) + if err != nil { + return nil, err + } + return body, nil +} + +func (d *graphDriverProxy) Changes(id, parent string) ([]archive.Change, error) { + args := &graphDriverRequest{ + ID: id, + Parent: parent, + } + var ret graphDriverResponse + if err := d.p.Client().Call("GraphDriver.Changes", args, &ret); err != nil { + return nil, err + } + if ret.Err != "" { + return nil, errors.New(ret.Err) + } + + return ret.Changes, nil +} + +func (d *graphDriverProxy) ApplyDiff(id, parent string, diff io.Reader) (int64, error) { + var ret graphDriverResponse + if err := d.p.Client().SendFile(fmt.Sprintf("GraphDriver.ApplyDiff?id=%s&parent=%s", id, parent), diff, &ret); err != nil { + return -1, err + } + if ret.Err != "" { + return -1, errors.New(ret.Err) + } + return ret.Size, nil +} + +func (d *graphDriverProxy) DiffSize(id, parent string) (int64, error) { + args := &graphDriverRequest{ + ID: id, + Parent: parent, + } + var ret graphDriverResponse + if err := d.p.Client().Call("GraphDriver.DiffSize", args, &ret); err != nil { + return -1, err + } + if ret.Err != "" { + return -1, errors.New(ret.Err) + } + return ret.Size, nil +} diff --git a/vendor/github.com/moby/moby/daemon/graphdriver/quota/projectquota.go b/vendor/github.com/moby/moby/daemon/graphdriver/quota/projectquota.go new file mode 100644 index 000000000..194c30e9a --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/graphdriver/quota/projectquota.go @@ -0,0 +1,340 @@ +// +build linux + +// +// projectquota.go - implements XFS project quota controls +// for setting quota limits on a newly created directory. +// It currently supports the legacy XFS specific ioctls. +// +// TODO: use generic quota control ioctl FS_IOC_FS{GET,SET}XATTR +// for both xfs/ext4 for kernel version >= v4.5 +// + +package quota + +/* +#include +#include +#include +#include +#include + +#ifndef FS_XFLAG_PROJINHERIT +struct fsxattr { + __u32 fsx_xflags; + __u32 fsx_extsize; + __u32 fsx_nextents; + __u32 fsx_projid; + unsigned char fsx_pad[12]; +}; +#define FS_XFLAG_PROJINHERIT 0x00000200 +#endif +#ifndef FS_IOC_FSGETXATTR +#define FS_IOC_FSGETXATTR _IOR ('X', 31, struct fsxattr) +#endif +#ifndef FS_IOC_FSSETXATTR +#define FS_IOC_FSSETXATTR _IOW ('X', 32, struct fsxattr) +#endif + +#ifndef PRJQUOTA +#define PRJQUOTA 2 +#endif +#ifndef XFS_PROJ_QUOTA +#define XFS_PROJ_QUOTA 2 +#endif +#ifndef Q_XSETPQLIM +#define Q_XSETPQLIM QCMD(Q_XSETQLIM, PRJQUOTA) +#endif +#ifndef Q_XGETPQUOTA +#define Q_XGETPQUOTA QCMD(Q_XGETQUOTA, PRJQUOTA) +#endif +*/ +import "C" +import ( + "fmt" + "io/ioutil" + "os" + "path" + "path/filepath" + "syscall" + "unsafe" + + "github.com/Sirupsen/logrus" + "golang.org/x/sys/unix" +) + +// Quota limit params - currently we only control blocks hard limit +type Quota struct { + Size uint64 +} + +// Control - Context to be used by storage driver (e.g. overlay) +// who wants to apply project quotas to container dirs +type Control struct { + backingFsBlockDev string + nextProjectID uint32 + quotas map[string]uint32 +} + +// NewControl - initialize project quota support. +// Test to make sure that quota can be set on a test dir and find +// the first project id to be used for the next container create. +// +// Returns nil (and error) if project quota is not supported. +// +// First get the project id of the home directory. +// This test will fail if the backing fs is not xfs. +// +// xfs_quota tool can be used to assign a project id to the driver home directory, e.g.: +// echo 999:/var/lib/docker/overlay2 >> /etc/projects +// echo docker:999 >> /etc/projid +// xfs_quota -x -c 'project -s docker' / +// +// In that case, the home directory project id will be used as a "start offset" +// and all containers will be assigned larger project ids (e.g. >= 1000). +// This is a way to prevent xfs_quota management from conflicting with docker. +// +// Then try to create a test directory with the next project id and set a quota +// on it. If that works, continue to scan existing containers to map allocated +// project ids. +// +func NewControl(basePath string) (*Control, error) { + // + // Get project id of parent dir as minimal id to be used by driver + // + minProjectID, err := getProjectID(basePath) + if err != nil { + return nil, err + } + minProjectID++ + + // + // create backing filesystem device node + // + backingFsBlockDev, err := makeBackingFsDev(basePath) + if err != nil { + return nil, err + } + + // + // Test if filesystem supports project quotas by trying to set + // a quota on the first available project id + // + quota := Quota{ + Size: 0, + } + if err := setProjectQuota(backingFsBlockDev, minProjectID, quota); err != nil { + return nil, err + } + + q := Control{ + backingFsBlockDev: backingFsBlockDev, + nextProjectID: minProjectID + 1, + quotas: make(map[string]uint32), + } + + // + // get first project id to be used for next container + // + err = q.findNextProjectID(basePath) + if err != nil { + return nil, err + } + + logrus.Debugf("NewControl(%s): nextProjectID = %d", basePath, q.nextProjectID) + return &q, nil +} + +// SetQuota - assign a unique project id to directory and set the quota limits +// for that project id +func (q *Control) SetQuota(targetPath string, quota Quota) error { + + projectID, ok := q.quotas[targetPath] + if !ok { + projectID = q.nextProjectID + + // + // assign project id to new container directory + // + err := setProjectID(targetPath, projectID) + if err != nil { + return err + } + + q.quotas[targetPath] = projectID + q.nextProjectID++ + } + + // + // set the quota limit for the container's project id + // + logrus.Debugf("SetQuota(%s, %d): projectID=%d", targetPath, quota.Size, projectID) + return setProjectQuota(q.backingFsBlockDev, projectID, quota) +} + +// setProjectQuota - set the quota for project id on xfs block device +func setProjectQuota(backingFsBlockDev string, projectID uint32, quota Quota) error { + var d C.fs_disk_quota_t + d.d_version = C.FS_DQUOT_VERSION + d.d_id = C.__u32(projectID) + d.d_flags = C.XFS_PROJ_QUOTA + + d.d_fieldmask = C.FS_DQ_BHARD | C.FS_DQ_BSOFT + d.d_blk_hardlimit = C.__u64(quota.Size / 512) + d.d_blk_softlimit = d.d_blk_hardlimit + + var cs = C.CString(backingFsBlockDev) + defer C.free(unsafe.Pointer(cs)) + + _, _, errno := unix.Syscall6(unix.SYS_QUOTACTL, C.Q_XSETPQLIM, + uintptr(unsafe.Pointer(cs)), uintptr(d.d_id), + uintptr(unsafe.Pointer(&d)), 0, 0) + if errno != 0 { + return fmt.Errorf("Failed to set quota limit for projid %d on %s: %v", + projectID, backingFsBlockDev, errno.Error()) + } + + return nil +} + +// GetQuota - get the quota limits of a directory that was configured with SetQuota +func (q *Control) GetQuota(targetPath string, quota *Quota) error { + + projectID, ok := q.quotas[targetPath] + if !ok { + return fmt.Errorf("quota not found for path : %s", targetPath) + } + + // + // get the quota limit for the container's project id + // + var d C.fs_disk_quota_t + + var cs = C.CString(q.backingFsBlockDev) + defer C.free(unsafe.Pointer(cs)) + + _, _, errno := unix.Syscall6(unix.SYS_QUOTACTL, C.Q_XGETPQUOTA, + uintptr(unsafe.Pointer(cs)), uintptr(C.__u32(projectID)), + uintptr(unsafe.Pointer(&d)), 0, 0) + if errno != 0 { + return fmt.Errorf("Failed to get quota limit for projid %d on %s: %v", + projectID, q.backingFsBlockDev, errno.Error()) + } + quota.Size = uint64(d.d_blk_hardlimit) * 512 + + return nil +} + +// getProjectID - get the project id of path on xfs +func getProjectID(targetPath string) (uint32, error) { + dir, err := openDir(targetPath) + if err != nil { + return 0, err + } + defer closeDir(dir) + + var fsx C.struct_fsxattr + _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.FS_IOC_FSGETXATTR, + uintptr(unsafe.Pointer(&fsx))) + if errno != 0 { + return 0, fmt.Errorf("Failed to get projid for %s: %v", targetPath, errno.Error()) + } + + return uint32(fsx.fsx_projid), nil +} + +// setProjectID - set the project id of path on xfs +func setProjectID(targetPath string, projectID uint32) error { + dir, err := openDir(targetPath) + if err != nil { + return err + } + defer closeDir(dir) + + var fsx C.struct_fsxattr + _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.FS_IOC_FSGETXATTR, + uintptr(unsafe.Pointer(&fsx))) + if errno != 0 { + return fmt.Errorf("Failed to get projid for %s: %v", targetPath, errno.Error()) + } + fsx.fsx_projid = C.__u32(projectID) + fsx.fsx_xflags |= C.FS_XFLAG_PROJINHERIT + _, _, errno = unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.FS_IOC_FSSETXATTR, + uintptr(unsafe.Pointer(&fsx))) + if errno != 0 { + return fmt.Errorf("Failed to set projid for %s: %v", targetPath, errno.Error()) + } + + return nil +} + +// findNextProjectID - find the next project id to be used for containers +// by scanning driver home directory to find used project ids +func (q *Control) findNextProjectID(home string) error { + files, err := ioutil.ReadDir(home) + if err != nil { + return fmt.Errorf("read directory failed : %s", home) + } + for _, file := range files { + if !file.IsDir() { + continue + } + path := filepath.Join(home, file.Name()) + projid, err := getProjectID(path) + if err != nil { + return err + } + if projid > 0 { + q.quotas[path] = projid + } + if q.nextProjectID <= projid { + q.nextProjectID = projid + 1 + } + } + + return nil +} + +func free(p *C.char) { + C.free(unsafe.Pointer(p)) +} + +func openDir(path string) (*C.DIR, error) { + Cpath := C.CString(path) + defer free(Cpath) + + dir := C.opendir(Cpath) + if dir == nil { + return nil, fmt.Errorf("Can't open dir") + } + return dir, nil +} + +func closeDir(dir *C.DIR) { + if dir != nil { + C.closedir(dir) + } +} + +func getDirFd(dir *C.DIR) uintptr { + return uintptr(C.dirfd(dir)) +} + +// Get the backing block device of the driver home directory +// and create a block device node under the home directory +// to be used by quotactl commands +func makeBackingFsDev(home string) (string, error) { + fileinfo, err := os.Stat(home) + if err != nil { + return "", err + } + + backingFsBlockDev := path.Join(home, "backingFsBlockDev") + // Re-create just in case someone copied the home directory over to a new device + unix.Unlink(backingFsBlockDev) + stat := fileinfo.Sys().(*syscall.Stat_t) + if err := unix.Mknod(backingFsBlockDev, unix.S_IFBLK|0600, int(stat.Dev)); err != nil { + return "", fmt.Errorf("Failed to mknod %s: %v", backingFsBlockDev, err) + } + + return backingFsBlockDev, nil +} diff --git a/vendor/github.com/moby/moby/daemon/graphdriver/register/register_aufs.go b/vendor/github.com/moby/moby/daemon/graphdriver/register/register_aufs.go new file mode 100644 index 000000000..262954d6e --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/graphdriver/register/register_aufs.go @@ -0,0 +1,8 @@ +// +build !exclude_graphdriver_aufs,linux + +package register + +import ( + // register the aufs graphdriver + _ "github.com/docker/docker/daemon/graphdriver/aufs" +) diff --git a/vendor/github.com/moby/moby/daemon/graphdriver/register/register_btrfs.go b/vendor/github.com/moby/moby/daemon/graphdriver/register/register_btrfs.go new file mode 100644 index 000000000..f456cc5ce --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/graphdriver/register/register_btrfs.go @@ -0,0 +1,8 @@ +// +build !exclude_graphdriver_btrfs,linux + +package register + +import ( + // register the btrfs graphdriver + _ "github.com/docker/docker/daemon/graphdriver/btrfs" +) diff --git a/vendor/github.com/moby/moby/daemon/graphdriver/register/register_devicemapper.go b/vendor/github.com/moby/moby/daemon/graphdriver/register/register_devicemapper.go new file mode 100644 index 000000000..bb2e9ef54 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/graphdriver/register/register_devicemapper.go @@ -0,0 +1,8 @@ +// +build !exclude_graphdriver_devicemapper,linux + +package register + +import ( + // register the devmapper graphdriver + _ "github.com/docker/docker/daemon/graphdriver/devmapper" +) diff --git a/vendor/github.com/moby/moby/daemon/graphdriver/register/register_overlay.go b/vendor/github.com/moby/moby/daemon/graphdriver/register/register_overlay.go new file mode 100644 index 000000000..9ba849ced --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/graphdriver/register/register_overlay.go @@ -0,0 +1,9 @@ +// +build !exclude_graphdriver_overlay,linux + +package register + +import ( + // register the overlay graphdriver + _ "github.com/docker/docker/daemon/graphdriver/overlay" + _ "github.com/docker/docker/daemon/graphdriver/overlay2" +) diff --git a/vendor/github.com/moby/moby/daemon/graphdriver/register/register_vfs.go b/vendor/github.com/moby/moby/daemon/graphdriver/register/register_vfs.go new file mode 100644 index 000000000..98fad23b2 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/graphdriver/register/register_vfs.go @@ -0,0 +1,6 @@ +package register + +import ( + // register vfs + _ "github.com/docker/docker/daemon/graphdriver/vfs" +) diff --git a/vendor/github.com/moby/moby/daemon/graphdriver/register/register_windows.go b/vendor/github.com/moby/moby/daemon/graphdriver/register/register_windows.go new file mode 100644 index 000000000..5bb1fd62a --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/graphdriver/register/register_windows.go @@ -0,0 +1,7 @@ +package register + +import ( + // register the windows graph drivers + _ "github.com/docker/docker/daemon/graphdriver/lcow" + _ "github.com/docker/docker/daemon/graphdriver/windows" +) diff --git a/vendor/github.com/moby/moby/daemon/graphdriver/register/register_zfs.go b/vendor/github.com/moby/moby/daemon/graphdriver/register/register_zfs.go new file mode 100644 index 000000000..8f34e3553 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/graphdriver/register/register_zfs.go @@ -0,0 +1,8 @@ +// +build !exclude_graphdriver_zfs,linux !exclude_graphdriver_zfs,freebsd, solaris + +package register + +import ( + // register the zfs driver + _ "github.com/docker/docker/daemon/graphdriver/zfs" +) diff --git a/vendor/github.com/moby/moby/daemon/graphdriver/vfs/driver.go b/vendor/github.com/moby/moby/daemon/graphdriver/vfs/driver.go new file mode 100644 index 000000000..15a4de360 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/graphdriver/vfs/driver.go @@ -0,0 +1,134 @@ +package vfs + +import ( + "fmt" + "os" + "path/filepath" + + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/pkg/chrootarchive" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/system" + "github.com/opencontainers/selinux/go-selinux/label" +) + +var ( + // CopyWithTar defines the copy method to use. + CopyWithTar = chrootarchive.NewArchiver(nil).CopyWithTar +) + +func init() { + graphdriver.Register("vfs", Init) +} + +// Init returns a new VFS driver. +// This sets the home directory for the driver and returns NaiveDiffDriver. +func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) { + d := &Driver{ + home: home, + idMappings: idtools.NewIDMappingsFromMaps(uidMaps, gidMaps), + } + rootIDs := d.idMappings.RootPair() + if err := idtools.MkdirAllAndChown(home, 0700, rootIDs); err != nil { + return nil, err + } + return graphdriver.NewNaiveDiffDriver(d, uidMaps, gidMaps), nil +} + +// Driver holds information about the driver, home directory of the driver. +// Driver implements graphdriver.ProtoDriver. It uses only basic vfs operations. +// In order to support layering, files are copied from the parent layer into the new layer. There is no copy-on-write support. +// Driver must be wrapped in NaiveDiffDriver to be used as a graphdriver.Driver +type Driver struct { + home string + idMappings *idtools.IDMappings +} + +func (d *Driver) String() string { + return "vfs" +} + +// Status is used for implementing the graphdriver.ProtoDriver interface. VFS does not currently have any status information. +func (d *Driver) Status() [][2]string { + return nil +} + +// GetMetadata is used for implementing the graphdriver.ProtoDriver interface. VFS does not currently have any meta data. +func (d *Driver) GetMetadata(id string) (map[string]string, error) { + return nil, nil +} + +// Cleanup is used to implement graphdriver.ProtoDriver. There is no cleanup required for this driver. +func (d *Driver) Cleanup() error { + return nil +} + +// CreateReadWrite creates a layer that is writable for use as a container +// file system. +func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error { + return d.Create(id, parent, opts) +} + +// Create prepares the filesystem for the VFS driver and copies the directory for the given id under the parent. +func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error { + if opts != nil && len(opts.StorageOpt) != 0 { + return fmt.Errorf("--storage-opt is not supported for vfs") + } + + dir := d.dir(id) + rootIDs := d.idMappings.RootPair() + if err := idtools.MkdirAllAndChown(filepath.Dir(dir), 0700, rootIDs); err != nil { + return err + } + if err := idtools.MkdirAndChown(dir, 0755, rootIDs); err != nil { + return err + } + labelOpts := []string{"level:s0"} + if _, mountLabel, err := label.InitLabels(labelOpts); err == nil { + label.SetFileLabel(dir, mountLabel) + } + if parent == "" { + return nil + } + parentDir, err := d.Get(parent, "") + if err != nil { + return fmt.Errorf("%s: %s", parent, err) + } + return CopyWithTar(parentDir, dir) +} + +func (d *Driver) dir(id string) string { + return filepath.Join(d.home, "dir", filepath.Base(id)) +} + +// Remove deletes the content from the directory for a given id. +func (d *Driver) Remove(id string) error { + if err := system.EnsureRemoveAll(d.dir(id)); err != nil { + return err + } + return nil +} + +// Get returns the directory for the given id. +func (d *Driver) Get(id, mountLabel string) (string, error) { + dir := d.dir(id) + if st, err := os.Stat(dir); err != nil { + return "", err + } else if !st.IsDir() { + return "", fmt.Errorf("%s: not a directory", dir) + } + return dir, nil +} + +// Put is a noop for vfs that return nil for the error, since this driver has no runtime resources to clean up. +func (d *Driver) Put(id string) error { + // The vfs driver has no runtime resources (e.g. mounts) + // to clean up, so we don't need anything here + return nil +} + +// Exists checks to see if the directory exists for the given id. +func (d *Driver) Exists(id string) bool { + _, err := os.Stat(d.dir(id)) + return err == nil +} diff --git a/vendor/github.com/moby/moby/daemon/graphdriver/vfs/vfs_test.go b/vendor/github.com/moby/moby/daemon/graphdriver/vfs/vfs_test.go new file mode 100644 index 000000000..9ecf21dba --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/graphdriver/vfs/vfs_test.go @@ -0,0 +1,37 @@ +// +build linux + +package vfs + +import ( + "testing" + + "github.com/docker/docker/daemon/graphdriver/graphtest" + + "github.com/docker/docker/pkg/reexec" +) + +func init() { + reexec.Init() +} + +// This avoids creating a new driver for each test if all tests are run +// Make sure to put new tests between TestVfsSetup and TestVfsTeardown +func TestVfsSetup(t *testing.T) { + graphtest.GetDriver(t, "vfs") +} + +func TestVfsCreateEmpty(t *testing.T) { + graphtest.DriverTestCreateEmpty(t, "vfs") +} + +func TestVfsCreateBase(t *testing.T) { + graphtest.DriverTestCreateBase(t, "vfs") +} + +func TestVfsCreateSnap(t *testing.T) { + graphtest.DriverTestCreateSnap(t, "vfs") +} + +func TestVfsTeardown(t *testing.T) { + graphtest.PutDriver(t) +} diff --git a/vendor/github.com/moby/moby/daemon/graphdriver/windows/windows.go b/vendor/github.com/moby/moby/daemon/graphdriver/windows/windows.go new file mode 100644 index 000000000..49c8d34a5 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/graphdriver/windows/windows.go @@ -0,0 +1,960 @@ +//+build windows + +package windows + +import ( + "bufio" + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "path" + "path/filepath" + "strconv" + "strings" + "sync" + "syscall" + "time" + "unsafe" + + "github.com/Microsoft/go-winio" + "github.com/Microsoft/go-winio/archive/tar" + "github.com/Microsoft/go-winio/backuptar" + "github.com/Microsoft/hcsshim" + "github.com/Sirupsen/logrus" + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/longpath" + "github.com/docker/docker/pkg/reexec" + "github.com/docker/docker/pkg/system" + units "github.com/docker/go-units" + "golang.org/x/sys/windows" +) + +// filterDriver is an HCSShim driver type for the Windows Filter driver. +const filterDriver = 1 + +var ( + // mutatedFiles is a list of files that are mutated by the import process + // and must be backed up and restored. + mutatedFiles = map[string]string{ + "UtilityVM/Files/EFI/Microsoft/Boot/BCD": "bcd.bak", + "UtilityVM/Files/EFI/Microsoft/Boot/BCD.LOG": "bcd.log.bak", + "UtilityVM/Files/EFI/Microsoft/Boot/BCD.LOG1": "bcd.log1.bak", + "UtilityVM/Files/EFI/Microsoft/Boot/BCD.LOG2": "bcd.log2.bak", + } + noreexec = false +) + +// init registers the windows graph drivers to the register. +func init() { + graphdriver.Register("windowsfilter", InitFilter) + // DOCKER_WINDOWSFILTER_NOREEXEC allows for inline processing which makes + // debugging issues in the re-exec codepath significantly easier. + if os.Getenv("DOCKER_WINDOWSFILTER_NOREEXEC") != "" { + logrus.Warnf("WindowsGraphDriver is set to not re-exec. This is intended for debugging purposes only.") + noreexec = true + } else { + reexec.Register("docker-windows-write-layer", writeLayerReexec) + } +} + +type checker struct { +} + +func (c *checker) IsMounted(path string) bool { + return false +} + +// Driver represents a windows graph driver. +type Driver struct { + // info stores the shim driver information + info hcsshim.DriverInfo + ctr *graphdriver.RefCounter + // it is safe for windows to use a cache here because it does not support + // restoring containers when the daemon dies. + cacheMu sync.Mutex + cache map[string]string +} + +// InitFilter returns a new Windows storage filter driver. +func InitFilter(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) { + logrus.Debugf("WindowsGraphDriver InitFilter at %s", home) + + fsType, err := getFileSystemType(string(home[0])) + if err != nil { + return nil, err + } + if strings.ToLower(fsType) == "refs" { + return nil, fmt.Errorf("%s is on an ReFS volume - ReFS volumes are not supported", home) + } + + if err := idtools.MkdirAllAs(home, 0700, 0, 0); err != nil { + return nil, fmt.Errorf("windowsfilter failed to create '%s': %v", home, err) + } + + d := &Driver{ + info: hcsshim.DriverInfo{ + HomeDir: home, + Flavour: filterDriver, + }, + cache: make(map[string]string), + ctr: graphdriver.NewRefCounter(&checker{}), + } + return d, nil +} + +// win32FromHresult is a helper function to get the win32 error code from an HRESULT +func win32FromHresult(hr uintptr) uintptr { + if hr&0x1fff0000 == 0x00070000 { + return hr & 0xffff + } + return hr +} + +// getFileSystemType obtains the type of a file system through GetVolumeInformation +// https://msdn.microsoft.com/en-us/library/windows/desktop/aa364993(v=vs.85).aspx +func getFileSystemType(drive string) (fsType string, hr error) { + var ( + modkernel32 = windows.NewLazySystemDLL("kernel32.dll") + procGetVolumeInformation = modkernel32.NewProc("GetVolumeInformationW") + buf = make([]uint16, 255) + size = windows.MAX_PATH + 1 + ) + if len(drive) != 1 { + hr = errors.New("getFileSystemType must be called with a drive letter") + return + } + drive += `:\` + n := uintptr(unsafe.Pointer(nil)) + r0, _, _ := syscall.Syscall9(procGetVolumeInformation.Addr(), 8, uintptr(unsafe.Pointer(windows.StringToUTF16Ptr(drive))), n, n, n, n, n, uintptr(unsafe.Pointer(&buf[0])), uintptr(size), 0) + if int32(r0) < 0 { + hr = syscall.Errno(win32FromHresult(r0)) + } + fsType = windows.UTF16ToString(buf) + return +} + +// String returns the string representation of a driver. This should match +// the name the graph driver has been registered with. +func (d *Driver) String() string { + return "windowsfilter" +} + +// Status returns the status of the driver. +func (d *Driver) Status() [][2]string { + return [][2]string{ + {"Windows", ""}, + } +} + +// panicIfUsedByLcow does exactly what it says. +// TODO @jhowardmsft - this is a temporary measure for the bring-up of +// Linux containers on Windows. It is a failsafe to ensure that the right +// graphdriver is used. +func panicIfUsedByLcow() { + if system.LCOWSupported() { + panic("inconsistency - windowsfilter graphdriver should not be used when in LCOW mode") + } +} + +// Exists returns true if the given id is registered with this driver. +func (d *Driver) Exists(id string) bool { + panicIfUsedByLcow() + rID, err := d.resolveID(id) + if err != nil { + return false + } + result, err := hcsshim.LayerExists(d.info, rID) + if err != nil { + return false + } + return result +} + +// CreateReadWrite creates a layer that is writable for use as a container +// file system. +func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error { + panicIfUsedByLcow() + if opts != nil { + return d.create(id, parent, opts.MountLabel, false, opts.StorageOpt) + } + return d.create(id, parent, "", false, nil) +} + +// Create creates a new read-only layer with the given id. +func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error { + panicIfUsedByLcow() + if opts != nil { + return d.create(id, parent, opts.MountLabel, true, opts.StorageOpt) + } + return d.create(id, parent, "", true, nil) +} + +func (d *Driver) create(id, parent, mountLabel string, readOnly bool, storageOpt map[string]string) error { + rPId, err := d.resolveID(parent) + if err != nil { + return err + } + + parentChain, err := d.getLayerChain(rPId) + if err != nil { + return err + } + + var layerChain []string + + if rPId != "" { + parentPath, err := hcsshim.GetLayerMountPath(d.info, rPId) + if err != nil { + return err + } + if _, err := os.Stat(filepath.Join(parentPath, "Files")); err == nil { + // This is a legitimate parent layer (not the empty "-init" layer), + // so include it in the layer chain. + layerChain = []string{parentPath} + } + } + + layerChain = append(layerChain, parentChain...) + + if readOnly { + if err := hcsshim.CreateLayer(d.info, id, rPId); err != nil { + return err + } + } else { + var parentPath string + if len(layerChain) != 0 { + parentPath = layerChain[0] + } + + if err := hcsshim.CreateSandboxLayer(d.info, id, parentPath, layerChain); err != nil { + return err + } + + storageOptions, err := parseStorageOpt(storageOpt) + if err != nil { + return fmt.Errorf("Failed to parse storage options - %s", err) + } + + if storageOptions.size != 0 { + if err := hcsshim.ExpandSandboxSize(d.info, id, storageOptions.size); err != nil { + return err + } + } + } + + if _, err := os.Lstat(d.dir(parent)); err != nil { + if err2 := hcsshim.DestroyLayer(d.info, id); err2 != nil { + logrus.Warnf("Failed to DestroyLayer %s: %s", id, err2) + } + return fmt.Errorf("Cannot create layer with missing parent %s: %s", parent, err) + } + + if err := d.setLayerChain(id, layerChain); err != nil { + if err2 := hcsshim.DestroyLayer(d.info, id); err2 != nil { + logrus.Warnf("Failed to DestroyLayer %s: %s", id, err2) + } + return err + } + + return nil +} + +// dir returns the absolute path to the layer. +func (d *Driver) dir(id string) string { + return filepath.Join(d.info.HomeDir, filepath.Base(id)) +} + +// Remove unmounts and removes the dir information. +func (d *Driver) Remove(id string) error { + panicIfUsedByLcow() + rID, err := d.resolveID(id) + if err != nil { + return err + } + + // This retry loop is due to a bug in Windows (Internal bug #9432268) + // if GetContainers fails with ErrVmcomputeOperationInvalidState + // it is a transient error. Retry until it succeeds. + var computeSystems []hcsshim.ContainerProperties + retryCount := 0 + osv := system.GetOSVersion() + for { + // Get and terminate any template VMs that are currently using the layer. + // Note: It is unfortunate that we end up in the graphdrivers Remove() call + // for both containers and images, but the logic for template VMs is only + // needed for images - specifically we are looking to see if a base layer + // is in use by a template VM as a result of having started a Hyper-V + // container at some point. + // + // We have a retry loop for ErrVmcomputeOperationInvalidState and + // ErrVmcomputeOperationAccessIsDenied as there is a race condition + // in RS1 and RS2 building during enumeration when a silo is going away + // for example under it, in HCS. AccessIsDenied added to fix 30278. + // + // TODO @jhowardmsft - For RS3, we can remove the retries. Also consider + // using platform APIs (if available) to get this more succinctly. Also + // consider enhancing the Remove() interface to have context of why + // the remove is being called - that could improve efficiency by not + // enumerating compute systems during a remove of a container as it's + // not required. + computeSystems, err = hcsshim.GetContainers(hcsshim.ComputeSystemQuery{}) + if err != nil { + if (osv.Build < 15139) && + ((err == hcsshim.ErrVmcomputeOperationInvalidState) || (err == hcsshim.ErrVmcomputeOperationAccessIsDenied)) { + if retryCount >= 500 { + break + } + retryCount++ + time.Sleep(10 * time.Millisecond) + continue + } + return err + } + break + } + + for _, computeSystem := range computeSystems { + if strings.Contains(computeSystem.RuntimeImagePath, id) && computeSystem.IsRuntimeTemplate { + container, err := hcsshim.OpenContainer(computeSystem.ID) + if err != nil { + return err + } + defer container.Close() + err = container.Terminate() + if hcsshim.IsPending(err) { + err = container.Wait() + } else if hcsshim.IsAlreadyStopped(err) { + err = nil + } + + if err != nil { + return err + } + } + } + + layerPath := filepath.Join(d.info.HomeDir, rID) + tmpID := fmt.Sprintf("%s-removing", rID) + tmpLayerPath := filepath.Join(d.info.HomeDir, tmpID) + if err := os.Rename(layerPath, tmpLayerPath); err != nil && !os.IsNotExist(err) { + return err + } + if err := hcsshim.DestroyLayer(d.info, tmpID); err != nil { + logrus.Errorf("Failed to DestroyLayer %s: %s", id, err) + } + + return nil +} + +// Get returns the rootfs path for the id. This will mount the dir at its given path. +func (d *Driver) Get(id, mountLabel string) (string, error) { + panicIfUsedByLcow() + logrus.Debugf("WindowsGraphDriver Get() id %s mountLabel %s", id, mountLabel) + var dir string + + rID, err := d.resolveID(id) + if err != nil { + return "", err + } + if count := d.ctr.Increment(rID); count > 1 { + return d.cache[rID], nil + } + + // Getting the layer paths must be done outside of the lock. + layerChain, err := d.getLayerChain(rID) + if err != nil { + d.ctr.Decrement(rID) + return "", err + } + + if err := hcsshim.ActivateLayer(d.info, rID); err != nil { + d.ctr.Decrement(rID) + return "", err + } + if err := hcsshim.PrepareLayer(d.info, rID, layerChain); err != nil { + d.ctr.Decrement(rID) + if err2 := hcsshim.DeactivateLayer(d.info, rID); err2 != nil { + logrus.Warnf("Failed to Deactivate %s: %s", id, err) + } + return "", err + } + + mountPath, err := hcsshim.GetLayerMountPath(d.info, rID) + if err != nil { + d.ctr.Decrement(rID) + if err := hcsshim.UnprepareLayer(d.info, rID); err != nil { + logrus.Warnf("Failed to Unprepare %s: %s", id, err) + } + if err2 := hcsshim.DeactivateLayer(d.info, rID); err2 != nil { + logrus.Warnf("Failed to Deactivate %s: %s", id, err) + } + return "", err + } + d.cacheMu.Lock() + d.cache[rID] = mountPath + d.cacheMu.Unlock() + + // If the layer has a mount path, use that. Otherwise, use the + // folder path. + if mountPath != "" { + dir = mountPath + } else { + dir = d.dir(id) + } + + return dir, nil +} + +// Put adds a new layer to the driver. +func (d *Driver) Put(id string) error { + panicIfUsedByLcow() + logrus.Debugf("WindowsGraphDriver Put() id %s", id) + + rID, err := d.resolveID(id) + if err != nil { + return err + } + if count := d.ctr.Decrement(rID); count > 0 { + return nil + } + d.cacheMu.Lock() + _, exists := d.cache[rID] + delete(d.cache, rID) + d.cacheMu.Unlock() + + // If the cache was not populated, then the layer was left unprepared and deactivated + if !exists { + return nil + } + + if err := hcsshim.UnprepareLayer(d.info, rID); err != nil { + return err + } + return hcsshim.DeactivateLayer(d.info, rID) +} + +// Cleanup ensures the information the driver stores is properly removed. +// We use this opportunity to cleanup any -removing folders which may be +// still left if the daemon was killed while it was removing a layer. +func (d *Driver) Cleanup() error { + items, err := ioutil.ReadDir(d.info.HomeDir) + if err != nil { + if os.IsNotExist(err) { + return nil + } + return err + } + + // Note we don't return an error below - it's possible the files + // are locked. However, next time around after the daemon exits, + // we likely will be able to to cleanup successfully. Instead we log + // warnings if there are errors. + for _, item := range items { + if item.IsDir() && strings.HasSuffix(item.Name(), "-removing") { + if err := hcsshim.DestroyLayer(d.info, item.Name()); err != nil { + logrus.Warnf("Failed to cleanup %s: %s", item.Name(), err) + } else { + logrus.Infof("Cleaned up %s", item.Name()) + } + } + } + + return nil +} + +// Diff produces an archive of the changes between the specified +// layer and its parent layer which may be "". +// The layer should be mounted when calling this function +func (d *Driver) Diff(id, parent string) (_ io.ReadCloser, err error) { + panicIfUsedByLcow() + rID, err := d.resolveID(id) + if err != nil { + return + } + + layerChain, err := d.getLayerChain(rID) + if err != nil { + return + } + + // this is assuming that the layer is unmounted + if err := hcsshim.UnprepareLayer(d.info, rID); err != nil { + return nil, err + } + prepare := func() { + if err := hcsshim.PrepareLayer(d.info, rID, layerChain); err != nil { + logrus.Warnf("Failed to Deactivate %s: %s", rID, err) + } + } + + arch, err := d.exportLayer(rID, layerChain) + if err != nil { + prepare() + return + } + return ioutils.NewReadCloserWrapper(arch, func() error { + err := arch.Close() + prepare() + return err + }), nil +} + +// Changes produces a list of changes between the specified layer +// and its parent layer. If parent is "", then all changes will be ADD changes. +// The layer should not be mounted when calling this function. +func (d *Driver) Changes(id, parent string) ([]archive.Change, error) { + panicIfUsedByLcow() + rID, err := d.resolveID(id) + if err != nil { + return nil, err + } + parentChain, err := d.getLayerChain(rID) + if err != nil { + return nil, err + } + + if err := hcsshim.ActivateLayer(d.info, rID); err != nil { + return nil, err + } + defer func() { + if err2 := hcsshim.DeactivateLayer(d.info, rID); err2 != nil { + logrus.Errorf("changes() failed to DeactivateLayer %s %s: %s", id, rID, err2) + } + }() + + var changes []archive.Change + err = winio.RunWithPrivilege(winio.SeBackupPrivilege, func() error { + r, err := hcsshim.NewLayerReader(d.info, id, parentChain) + if err != nil { + return err + } + defer r.Close() + + for { + name, _, fileInfo, err := r.Next() + if err == io.EOF { + return nil + } + if err != nil { + return err + } + name = filepath.ToSlash(name) + if fileInfo == nil { + changes = append(changes, archive.Change{Path: name, Kind: archive.ChangeDelete}) + } else { + // Currently there is no way to tell between an add and a modify. + changes = append(changes, archive.Change{Path: name, Kind: archive.ChangeModify}) + } + } + }) + if err != nil { + return nil, err + } + + return changes, nil +} + +// ApplyDiff extracts the changeset from the given diff into the +// layer with the specified id and parent, returning the size of the +// new layer in bytes. +// The layer should not be mounted when calling this function +func (d *Driver) ApplyDiff(id, parent string, diff io.Reader) (int64, error) { + panicIfUsedByLcow() + var layerChain []string + if parent != "" { + rPId, err := d.resolveID(parent) + if err != nil { + return 0, err + } + parentChain, err := d.getLayerChain(rPId) + if err != nil { + return 0, err + } + parentPath, err := hcsshim.GetLayerMountPath(d.info, rPId) + if err != nil { + return 0, err + } + layerChain = append(layerChain, parentPath) + layerChain = append(layerChain, parentChain...) + } + + size, err := d.importLayer(id, diff, layerChain) + if err != nil { + return 0, err + } + + if err = d.setLayerChain(id, layerChain); err != nil { + return 0, err + } + + return size, nil +} + +// DiffSize calculates the changes between the specified layer +// and its parent and returns the size in bytes of the changes +// relative to its base filesystem directory. +func (d *Driver) DiffSize(id, parent string) (size int64, err error) { + panicIfUsedByLcow() + rPId, err := d.resolveID(parent) + if err != nil { + return + } + + changes, err := d.Changes(id, rPId) + if err != nil { + return + } + + layerFs, err := d.Get(id, "") + if err != nil { + return + } + defer d.Put(id) + + return archive.ChangesSize(layerFs, changes), nil +} + +// GetMetadata returns custom driver information. +func (d *Driver) GetMetadata(id string) (map[string]string, error) { + panicIfUsedByLcow() + m := make(map[string]string) + m["dir"] = d.dir(id) + return m, nil +} + +func writeTarFromLayer(r hcsshim.LayerReader, w io.Writer) error { + t := tar.NewWriter(w) + for { + name, size, fileInfo, err := r.Next() + if err == io.EOF { + break + } + if err != nil { + return err + } + if fileInfo == nil { + // Write a whiteout file. + hdr := &tar.Header{ + Name: filepath.ToSlash(filepath.Join(filepath.Dir(name), archive.WhiteoutPrefix+filepath.Base(name))), + } + err := t.WriteHeader(hdr) + if err != nil { + return err + } + } else { + err = backuptar.WriteTarFileFromBackupStream(t, r, name, size, fileInfo) + if err != nil { + return err + } + } + } + return t.Close() +} + +// exportLayer generates an archive from a layer based on the given ID. +func (d *Driver) exportLayer(id string, parentLayerPaths []string) (io.ReadCloser, error) { + archive, w := io.Pipe() + go func() { + err := winio.RunWithPrivilege(winio.SeBackupPrivilege, func() error { + r, err := hcsshim.NewLayerReader(d.info, id, parentLayerPaths) + if err != nil { + return err + } + + err = writeTarFromLayer(r, w) + cerr := r.Close() + if err == nil { + err = cerr + } + return err + }) + w.CloseWithError(err) + }() + + return archive, nil +} + +// writeBackupStreamFromTarAndSaveMutatedFiles reads data from a tar stream and +// writes it to a backup stream, and also saves any files that will be mutated +// by the import layer process to a backup location. +func writeBackupStreamFromTarAndSaveMutatedFiles(buf *bufio.Writer, w io.Writer, t *tar.Reader, hdr *tar.Header, root string) (nextHdr *tar.Header, err error) { + var bcdBackup *os.File + var bcdBackupWriter *winio.BackupFileWriter + if backupPath, ok := mutatedFiles[hdr.Name]; ok { + bcdBackup, err = os.Create(filepath.Join(root, backupPath)) + if err != nil { + return nil, err + } + defer func() { + cerr := bcdBackup.Close() + if err == nil { + err = cerr + } + }() + + bcdBackupWriter = winio.NewBackupFileWriter(bcdBackup, false) + defer func() { + cerr := bcdBackupWriter.Close() + if err == nil { + err = cerr + } + }() + + buf.Reset(io.MultiWriter(w, bcdBackupWriter)) + } else { + buf.Reset(w) + } + + defer func() { + ferr := buf.Flush() + if err == nil { + err = ferr + } + }() + + return backuptar.WriteBackupStreamFromTarFile(buf, t, hdr) +} + +func writeLayerFromTar(r io.Reader, w hcsshim.LayerWriter, root string) (int64, error) { + t := tar.NewReader(r) + hdr, err := t.Next() + totalSize := int64(0) + buf := bufio.NewWriter(nil) + for err == nil { + base := path.Base(hdr.Name) + if strings.HasPrefix(base, archive.WhiteoutPrefix) { + name := path.Join(path.Dir(hdr.Name), base[len(archive.WhiteoutPrefix):]) + err = w.Remove(filepath.FromSlash(name)) + if err != nil { + return 0, err + } + hdr, err = t.Next() + } else if hdr.Typeflag == tar.TypeLink { + err = w.AddLink(filepath.FromSlash(hdr.Name), filepath.FromSlash(hdr.Linkname)) + if err != nil { + return 0, err + } + hdr, err = t.Next() + } else { + var ( + name string + size int64 + fileInfo *winio.FileBasicInfo + ) + name, size, fileInfo, err = backuptar.FileInfoFromHeader(hdr) + if err != nil { + return 0, err + } + err = w.Add(filepath.FromSlash(name), fileInfo) + if err != nil { + return 0, err + } + hdr, err = writeBackupStreamFromTarAndSaveMutatedFiles(buf, w, t, hdr, root) + totalSize += size + } + } + if err != io.EOF { + return 0, err + } + return totalSize, nil +} + +// importLayer adds a new layer to the tag and graph store based on the given data. +func (d *Driver) importLayer(id string, layerData io.Reader, parentLayerPaths []string) (size int64, err error) { + if !noreexec { + cmd := reexec.Command(append([]string{"docker-windows-write-layer", d.info.HomeDir, id}, parentLayerPaths...)...) + output := bytes.NewBuffer(nil) + cmd.Stdin = layerData + cmd.Stdout = output + cmd.Stderr = output + + if err = cmd.Start(); err != nil { + return + } + + if err = cmd.Wait(); err != nil { + return 0, fmt.Errorf("re-exec error: %v: output: %s", err, output) + } + + return strconv.ParseInt(output.String(), 10, 64) + } + return writeLayer(layerData, d.info.HomeDir, id, parentLayerPaths...) +} + +// writeLayerReexec is the re-exec entry point for writing a layer from a tar file +func writeLayerReexec() { + size, err := writeLayer(os.Stdin, os.Args[1], os.Args[2], os.Args[3:]...) + if err != nil { + fmt.Fprint(os.Stderr, err) + os.Exit(1) + } + fmt.Fprint(os.Stdout, size) +} + +// writeLayer writes a layer from a tar file. +func writeLayer(layerData io.Reader, home string, id string, parentLayerPaths ...string) (int64, error) { + err := winio.EnableProcessPrivileges([]string{winio.SeBackupPrivilege, winio.SeRestorePrivilege}) + if err != nil { + return 0, err + } + if noreexec { + defer func() { + if err := winio.DisableProcessPrivileges([]string{winio.SeBackupPrivilege, winio.SeRestorePrivilege}); err != nil { + // This should never happen, but just in case when in debugging mode. + // See https://github.com/docker/docker/pull/28002#discussion_r86259241 for rationale. + panic("Failed to disabled process privileges while in non re-exec mode") + } + }() + } + + info := hcsshim.DriverInfo{ + Flavour: filterDriver, + HomeDir: home, + } + + w, err := hcsshim.NewLayerWriter(info, id, parentLayerPaths) + if err != nil { + return 0, err + } + + size, err := writeLayerFromTar(layerData, w, filepath.Join(home, id)) + if err != nil { + return 0, err + } + + err = w.Close() + if err != nil { + return 0, err + } + + return size, nil +} + +// resolveID computes the layerID information based on the given id. +func (d *Driver) resolveID(id string) (string, error) { + content, err := ioutil.ReadFile(filepath.Join(d.dir(id), "layerID")) + if os.IsNotExist(err) { + return id, nil + } else if err != nil { + return "", err + } + return string(content), nil +} + +// setID stores the layerId in disk. +func (d *Driver) setID(id, altID string) error { + return ioutil.WriteFile(filepath.Join(d.dir(id), "layerId"), []byte(altID), 0600) +} + +// getLayerChain returns the layer chain information. +func (d *Driver) getLayerChain(id string) ([]string, error) { + jPath := filepath.Join(d.dir(id), "layerchain.json") + content, err := ioutil.ReadFile(jPath) + if os.IsNotExist(err) { + return nil, nil + } else if err != nil { + return nil, fmt.Errorf("Unable to read layerchain file - %s", err) + } + + var layerChain []string + err = json.Unmarshal(content, &layerChain) + if err != nil { + return nil, fmt.Errorf("Failed to unmarshall layerchain json - %s", err) + } + + return layerChain, nil +} + +// setLayerChain stores the layer chain information in disk. +func (d *Driver) setLayerChain(id string, chain []string) error { + content, err := json.Marshal(&chain) + if err != nil { + return fmt.Errorf("Failed to marshall layerchain json - %s", err) + } + + jPath := filepath.Join(d.dir(id), "layerchain.json") + err = ioutil.WriteFile(jPath, content, 0600) + if err != nil { + return fmt.Errorf("Unable to write layerchain file - %s", err) + } + + return nil +} + +type fileGetCloserWithBackupPrivileges struct { + path string +} + +func (fg *fileGetCloserWithBackupPrivileges) Get(filename string) (io.ReadCloser, error) { + if backupPath, ok := mutatedFiles[filename]; ok { + return os.Open(filepath.Join(fg.path, backupPath)) + } + + var f *os.File + // Open the file while holding the Windows backup privilege. This ensures that the + // file can be opened even if the caller does not actually have access to it according + // to the security descriptor. Also use sequential file access to avoid depleting the + // standby list - Microsoft VSO Bug Tracker #9900466 + err := winio.RunWithPrivilege(winio.SeBackupPrivilege, func() error { + path := longpath.AddPrefix(filepath.Join(fg.path, filename)) + p, err := windows.UTF16FromString(path) + if err != nil { + return err + } + const fileFlagSequentialScan = 0x08000000 // FILE_FLAG_SEQUENTIAL_SCAN + h, err := windows.CreateFile(&p[0], windows.GENERIC_READ, windows.FILE_SHARE_READ, nil, windows.OPEN_EXISTING, windows.FILE_FLAG_BACKUP_SEMANTICS|fileFlagSequentialScan, 0) + if err != nil { + return &os.PathError{Op: "open", Path: path, Err: err} + } + f = os.NewFile(uintptr(h), path) + return nil + }) + return f, err +} + +func (fg *fileGetCloserWithBackupPrivileges) Close() error { + return nil +} + +// DiffGetter returns a FileGetCloser that can read files from the directory that +// contains files for the layer differences. Used for direct access for tar-split. +func (d *Driver) DiffGetter(id string) (graphdriver.FileGetCloser, error) { + panicIfUsedByLcow() + id, err := d.resolveID(id) + if err != nil { + return nil, err + } + + return &fileGetCloserWithBackupPrivileges{d.dir(id)}, nil +} + +type storageOptions struct { + size uint64 +} + +func parseStorageOpt(storageOpt map[string]string) (*storageOptions, error) { + options := storageOptions{} + + // Read size to change the block device size per container. + for key, val := range storageOpt { + key := strings.ToLower(key) + switch key { + case "size": + size, err := units.RAMInBytes(val) + if err != nil { + return nil, err + } + options.size = uint64(size) + default: + return nil, fmt.Errorf("Unknown storage option: %s", key) + } + } + return &options, nil +} diff --git a/vendor/github.com/moby/moby/daemon/graphdriver/zfs/MAINTAINERS b/vendor/github.com/moby/moby/daemon/graphdriver/zfs/MAINTAINERS new file mode 100644 index 000000000..9c270c541 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/graphdriver/zfs/MAINTAINERS @@ -0,0 +1,2 @@ +Jörg Thalheim (@Mic92) +Arthur Gautier (@baloose) diff --git a/vendor/github.com/moby/moby/daemon/graphdriver/zfs/zfs.go b/vendor/github.com/moby/moby/daemon/graphdriver/zfs/zfs.go new file mode 100644 index 000000000..a772cc9ee --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/graphdriver/zfs/zfs.go @@ -0,0 +1,420 @@ +// +build linux freebsd solaris + +package zfs + +import ( + "fmt" + "os" + "os/exec" + "path" + "strconv" + "strings" + "sync" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/mount" + "github.com/docker/docker/pkg/parsers" + zfs "github.com/mistifyio/go-zfs" + "github.com/opencontainers/selinux/go-selinux/label" + "golang.org/x/sys/unix" +) + +type zfsOptions struct { + fsName string + mountPath string +} + +func init() { + graphdriver.Register("zfs", Init) +} + +// Logger returns a zfs logger implementation. +type Logger struct{} + +// Log wraps log message from ZFS driver with a prefix '[zfs]'. +func (*Logger) Log(cmd []string) { + logrus.Debugf("[zfs] %s", strings.Join(cmd, " ")) +} + +// Init returns a new ZFS driver. +// It takes base mount path and an array of options which are represented as key value pairs. +// Each option is in the for key=value. 'zfs.fsname' is expected to be a valid key in the options. +func Init(base string, opt []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) { + var err error + + if _, err := exec.LookPath("zfs"); err != nil { + logrus.Debugf("[zfs] zfs command is not available: %v", err) + return nil, graphdriver.ErrPrerequisites + } + + file, err := os.OpenFile("/dev/zfs", os.O_RDWR, 600) + if err != nil { + logrus.Debugf("[zfs] cannot open /dev/zfs: %v", err) + return nil, graphdriver.ErrPrerequisites + } + defer file.Close() + + options, err := parseOptions(opt) + if err != nil { + return nil, err + } + options.mountPath = base + + rootdir := path.Dir(base) + + if options.fsName == "" { + err = checkRootdirFs(rootdir) + if err != nil { + return nil, err + } + } + + if options.fsName == "" { + options.fsName, err = lookupZfsDataset(rootdir) + if err != nil { + return nil, err + } + } + + zfs.SetLogger(new(Logger)) + + filesystems, err := zfs.Filesystems(options.fsName) + if err != nil { + return nil, fmt.Errorf("Cannot find root filesystem %s: %v", options.fsName, err) + } + + filesystemsCache := make(map[string]bool, len(filesystems)) + var rootDataset *zfs.Dataset + for _, fs := range filesystems { + if fs.Name == options.fsName { + rootDataset = fs + } + filesystemsCache[fs.Name] = true + } + + if rootDataset == nil { + return nil, fmt.Errorf("BUG: zfs get all -t filesystem -rHp '%s' should contain '%s'", options.fsName, options.fsName) + } + + rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps) + if err != nil { + return nil, fmt.Errorf("Failed to get root uid/guid: %v", err) + } + if err := idtools.MkdirAllAs(base, 0700, rootUID, rootGID); err != nil { + return nil, fmt.Errorf("Failed to create '%s': %v", base, err) + } + + if err := mount.MakePrivate(base); err != nil { + return nil, err + } + d := &Driver{ + dataset: rootDataset, + options: options, + filesystemsCache: filesystemsCache, + uidMaps: uidMaps, + gidMaps: gidMaps, + ctr: graphdriver.NewRefCounter(graphdriver.NewDefaultChecker()), + } + return graphdriver.NewNaiveDiffDriver(d, uidMaps, gidMaps), nil +} + +func parseOptions(opt []string) (zfsOptions, error) { + var options zfsOptions + options.fsName = "" + for _, option := range opt { + key, val, err := parsers.ParseKeyValueOpt(option) + if err != nil { + return options, err + } + key = strings.ToLower(key) + switch key { + case "zfs.fsname": + options.fsName = val + default: + return options, fmt.Errorf("Unknown option %s", key) + } + } + return options, nil +} + +func lookupZfsDataset(rootdir string) (string, error) { + var stat unix.Stat_t + if err := unix.Stat(rootdir, &stat); err != nil { + return "", fmt.Errorf("Failed to access '%s': %s", rootdir, err) + } + wantedDev := stat.Dev + + mounts, err := mount.GetMounts() + if err != nil { + return "", err + } + for _, m := range mounts { + if err := unix.Stat(m.Mountpoint, &stat); err != nil { + logrus.Debugf("[zfs] failed to stat '%s' while scanning for zfs mount: %v", m.Mountpoint, err) + continue // may fail on fuse file systems + } + + if stat.Dev == wantedDev && m.Fstype == "zfs" { + return m.Source, nil + } + } + + return "", fmt.Errorf("Failed to find zfs dataset mounted on '%s' in /proc/mounts", rootdir) +} + +// Driver holds information about the driver, such as zfs dataset, options and cache. +type Driver struct { + dataset *zfs.Dataset + options zfsOptions + sync.Mutex // protects filesystem cache against concurrent access + filesystemsCache map[string]bool + uidMaps []idtools.IDMap + gidMaps []idtools.IDMap + ctr *graphdriver.RefCounter +} + +func (d *Driver) String() string { + return "zfs" +} + +// Cleanup is used to implement graphdriver.ProtoDriver. There is no cleanup required for this driver. +func (d *Driver) Cleanup() error { + return nil +} + +// Status returns information about the ZFS filesystem. It returns a two dimensional array of information +// such as pool name, dataset name, disk usage, parent quota and compression used. +// Currently it return 'Zpool', 'Zpool Health', 'Parent Dataset', 'Space Used By Parent', +// 'Space Available', 'Parent Quota' and 'Compression'. +func (d *Driver) Status() [][2]string { + parts := strings.Split(d.dataset.Name, "/") + pool, err := zfs.GetZpool(parts[0]) + + var poolName, poolHealth string + if err == nil { + poolName = pool.Name + poolHealth = pool.Health + } else { + poolName = fmt.Sprintf("error while getting pool information %v", err) + poolHealth = "not available" + } + + quota := "no" + if d.dataset.Quota != 0 { + quota = strconv.FormatUint(d.dataset.Quota, 10) + } + + return [][2]string{ + {"Zpool", poolName}, + {"Zpool Health", poolHealth}, + {"Parent Dataset", d.dataset.Name}, + {"Space Used By Parent", strconv.FormatUint(d.dataset.Used, 10)}, + {"Space Available", strconv.FormatUint(d.dataset.Avail, 10)}, + {"Parent Quota", quota}, + {"Compression", d.dataset.Compression}, + } +} + +// GetMetadata returns image/container metadata related to graph driver +func (d *Driver) GetMetadata(id string) (map[string]string, error) { + return map[string]string{ + "Mountpoint": d.mountPath(id), + "Dataset": d.zfsPath(id), + }, nil +} + +func (d *Driver) cloneFilesystem(name, parentName string) error { + snapshotName := fmt.Sprintf("%d", time.Now().Nanosecond()) + parentDataset := zfs.Dataset{Name: parentName} + snapshot, err := parentDataset.Snapshot(snapshotName /*recursive */, false) + if err != nil { + return err + } + + _, err = snapshot.Clone(name, map[string]string{"mountpoint": "legacy"}) + if err == nil { + d.Lock() + d.filesystemsCache[name] = true + d.Unlock() + } + + if err != nil { + snapshot.Destroy(zfs.DestroyDeferDeletion) + return err + } + return snapshot.Destroy(zfs.DestroyDeferDeletion) +} + +func (d *Driver) zfsPath(id string) string { + return d.options.fsName + "/" + id +} + +func (d *Driver) mountPath(id string) string { + return path.Join(d.options.mountPath, "graph", getMountpoint(id)) +} + +// CreateReadWrite creates a layer that is writable for use as a container +// file system. +func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error { + return d.Create(id, parent, opts) +} + +// Create prepares the dataset and filesystem for the ZFS driver for the given id under the parent. +func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error { + var storageOpt map[string]string + if opts != nil { + storageOpt = opts.StorageOpt + } + + err := d.create(id, parent, storageOpt) + if err == nil { + return nil + } + if zfsError, ok := err.(*zfs.Error); ok { + if !strings.HasSuffix(zfsError.Stderr, "dataset already exists\n") { + return err + } + // aborted build -> cleanup + } else { + return err + } + + dataset := zfs.Dataset{Name: d.zfsPath(id)} + if err := dataset.Destroy(zfs.DestroyRecursiveClones); err != nil { + return err + } + + // retry + return d.create(id, parent, storageOpt) +} + +func (d *Driver) create(id, parent string, storageOpt map[string]string) error { + name := d.zfsPath(id) + quota, err := parseStorageOpt(storageOpt) + if err != nil { + return err + } + if parent == "" { + mountoptions := map[string]string{"mountpoint": "legacy"} + fs, err := zfs.CreateFilesystem(name, mountoptions) + if err == nil { + err = setQuota(name, quota) + if err == nil { + d.Lock() + d.filesystemsCache[fs.Name] = true + d.Unlock() + } + } + return err + } + err = d.cloneFilesystem(name, d.zfsPath(parent)) + if err == nil { + err = setQuota(name, quota) + } + return err +} + +func parseStorageOpt(storageOpt map[string]string) (string, error) { + // Read size to change the disk quota per container + for k, v := range storageOpt { + key := strings.ToLower(k) + switch key { + case "size": + return v, nil + default: + return "0", fmt.Errorf("Unknown option %s", key) + } + } + return "0", nil +} + +func setQuota(name string, quota string) error { + if quota == "0" { + return nil + } + fs, err := zfs.GetDataset(name) + if err != nil { + return err + } + return fs.SetProperty("quota", quota) +} + +// Remove deletes the dataset, filesystem and the cache for the given id. +func (d *Driver) Remove(id string) error { + name := d.zfsPath(id) + dataset := zfs.Dataset{Name: name} + err := dataset.Destroy(zfs.DestroyRecursive) + if err == nil { + d.Lock() + delete(d.filesystemsCache, name) + d.Unlock() + } + return err +} + +// Get returns the mountpoint for the given id after creating the target directories if necessary. +func (d *Driver) Get(id, mountLabel string) (string, error) { + mountpoint := d.mountPath(id) + if count := d.ctr.Increment(mountpoint); count > 1 { + return mountpoint, nil + } + + filesystem := d.zfsPath(id) + options := label.FormatMountLabel("", mountLabel) + logrus.Debugf(`[zfs] mount("%s", "%s", "%s")`, filesystem, mountpoint, options) + + rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) + if err != nil { + d.ctr.Decrement(mountpoint) + return "", err + } + // Create the target directories if they don't exist + if err := idtools.MkdirAllAs(mountpoint, 0755, rootUID, rootGID); err != nil { + d.ctr.Decrement(mountpoint) + return "", err + } + + if err := mount.Mount(filesystem, mountpoint, "zfs", options); err != nil { + d.ctr.Decrement(mountpoint) + return "", fmt.Errorf("error creating zfs mount of %s to %s: %v", filesystem, mountpoint, err) + } + + // this could be our first mount after creation of the filesystem, and the root dir may still have root + // permissions instead of the remapped root uid:gid (if user namespaces are enabled): + if err := os.Chown(mountpoint, rootUID, rootGID); err != nil { + mount.Unmount(mountpoint) + d.ctr.Decrement(mountpoint) + return "", fmt.Errorf("error modifying zfs mountpoint (%s) directory ownership: %v", mountpoint, err) + } + + return mountpoint, nil +} + +// Put removes the existing mountpoint for the given id if it exists. +func (d *Driver) Put(id string) error { + mountpoint := d.mountPath(id) + if count := d.ctr.Decrement(mountpoint); count > 0 { + return nil + } + mounted, err := graphdriver.Mounted(graphdriver.FsMagicZfs, mountpoint) + if err != nil || !mounted { + return err + } + + logrus.Debugf(`[zfs] unmount("%s")`, mountpoint) + + if err := mount.Unmount(mountpoint); err != nil { + return fmt.Errorf("error unmounting to %s: %v", mountpoint, err) + } + return nil +} + +// Exists checks to see if the cache entry exists for the given id. +func (d *Driver) Exists(id string) bool { + d.Lock() + defer d.Unlock() + return d.filesystemsCache[d.zfsPath(id)] == true +} diff --git a/vendor/github.com/moby/moby/daemon/graphdriver/zfs/zfs_freebsd.go b/vendor/github.com/moby/moby/daemon/graphdriver/zfs/zfs_freebsd.go new file mode 100644 index 000000000..e02012afe --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/graphdriver/zfs/zfs_freebsd.go @@ -0,0 +1,38 @@ +package zfs + +import ( + "fmt" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/daemon/graphdriver" + "golang.org/x/sys/unix" +) + +func checkRootdirFs(rootdir string) error { + var buf unix.Statfs_t + if err := unix.Statfs(rootdir, &buf); err != nil { + return fmt.Errorf("Failed to access '%s': %s", rootdir, err) + } + + // on FreeBSD buf.Fstypename contains ['z', 'f', 's', 0 ... ] + if (buf.Fstypename[0] != 122) || (buf.Fstypename[1] != 102) || (buf.Fstypename[2] != 115) || (buf.Fstypename[3] != 0) { + logrus.Debugf("[zfs] no zfs dataset found for rootdir '%s'", rootdir) + return graphdriver.ErrPrerequisites + } + + return nil +} + +func getMountpoint(id string) string { + maxlen := 12 + + // we need to preserve filesystem suffix + suffix := strings.SplitN(id, "-", 2) + + if len(suffix) > 1 { + return id[:maxlen] + "-" + suffix[1] + } + + return id[:maxlen] +} diff --git a/vendor/github.com/moby/moby/daemon/graphdriver/zfs/zfs_linux.go b/vendor/github.com/moby/moby/daemon/graphdriver/zfs/zfs_linux.go new file mode 100644 index 000000000..53aa4c8c6 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/graphdriver/zfs/zfs_linux.go @@ -0,0 +1,27 @@ +package zfs + +import ( + "fmt" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/daemon/graphdriver" + "golang.org/x/sys/unix" +) + +func checkRootdirFs(rootdir string) error { + var buf unix.Statfs_t + if err := unix.Statfs(rootdir, &buf); err != nil { + return fmt.Errorf("Failed to access '%s': %s", rootdir, err) + } + + if graphdriver.FsMagic(buf.Type) != graphdriver.FsMagicZfs { + logrus.Debugf("[zfs] no zfs dataset found for rootdir '%s'", rootdir) + return graphdriver.ErrPrerequisites + } + + return nil +} + +func getMountpoint(id string) string { + return id +} diff --git a/vendor/github.com/moby/moby/daemon/graphdriver/zfs/zfs_solaris.go b/vendor/github.com/moby/moby/daemon/graphdriver/zfs/zfs_solaris.go new file mode 100644 index 000000000..bb4a85bd6 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/graphdriver/zfs/zfs_solaris.go @@ -0,0 +1,59 @@ +// +build solaris,cgo + +package zfs + +/* +#include +#include + +static inline struct statvfs *getstatfs(char *s) { + struct statvfs *buf; + int err; + buf = (struct statvfs *)malloc(sizeof(struct statvfs)); + err = statvfs(s, buf); + return buf; +} +*/ +import "C" +import ( + "path/filepath" + "strings" + "unsafe" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/daemon/graphdriver" +) + +func checkRootdirFs(rootdir string) error { + + cs := C.CString(filepath.Dir(rootdir)) + buf := C.getstatfs(cs) + + // on Solaris buf.f_basetype contains ['z', 'f', 's', 0 ... ] + if (buf.f_basetype[0] != 122) || (buf.f_basetype[1] != 102) || (buf.f_basetype[2] != 115) || + (buf.f_basetype[3] != 0) { + logrus.Debugf("[zfs] no zfs dataset found for rootdir '%s'", rootdir) + C.free(unsafe.Pointer(buf)) + return graphdriver.ErrPrerequisites + } + + C.free(unsafe.Pointer(buf)) + C.free(unsafe.Pointer(cs)) + return nil +} + +/* rootfs is introduced to comply with the OCI spec +which states that root filesystem must be mounted at /rootfs/ instead of / +*/ +func getMountpoint(id string) string { + maxlen := 12 + + // we need to preserve filesystem suffix + suffix := strings.SplitN(id, "-", 2) + + if len(suffix) > 1 { + return filepath.Join(id[:maxlen]+"-"+suffix[1], "rootfs", "root") + } + + return filepath.Join(id[:maxlen], "rootfs", "root") +} diff --git a/vendor/github.com/moby/moby/daemon/graphdriver/zfs/zfs_test.go b/vendor/github.com/moby/moby/daemon/graphdriver/zfs/zfs_test.go new file mode 100644 index 000000000..3e2292843 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/graphdriver/zfs/zfs_test.go @@ -0,0 +1,35 @@ +// +build linux + +package zfs + +import ( + "testing" + + "github.com/docker/docker/daemon/graphdriver/graphtest" +) + +// This avoids creating a new driver for each test if all tests are run +// Make sure to put new tests between TestZfsSetup and TestZfsTeardown +func TestZfsSetup(t *testing.T) { + graphtest.GetDriver(t, "zfs") +} + +func TestZfsCreateEmpty(t *testing.T) { + graphtest.DriverTestCreateEmpty(t, "zfs") +} + +func TestZfsCreateBase(t *testing.T) { + graphtest.DriverTestCreateBase(t, "zfs") +} + +func TestZfsCreateSnap(t *testing.T) { + graphtest.DriverTestCreateSnap(t, "zfs") +} + +func TestZfsSetQuota(t *testing.T) { + graphtest.DriverTestSetQuota(t, "zfs") +} + +func TestZfsTeardown(t *testing.T) { + graphtest.PutDriver(t) +} diff --git a/vendor/github.com/moby/moby/daemon/graphdriver/zfs/zfs_unsupported.go b/vendor/github.com/moby/moby/daemon/graphdriver/zfs/zfs_unsupported.go new file mode 100644 index 000000000..ce8daadaf --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/graphdriver/zfs/zfs_unsupported.go @@ -0,0 +1,11 @@ +// +build !linux,!freebsd,!solaris + +package zfs + +func checkRootdirFs(rootdir string) error { + return nil +} + +func getMountpoint(id string) string { + return id +} diff --git a/vendor/github.com/moby/moby/daemon/health.go b/vendor/github.com/moby/moby/daemon/health.go new file mode 100644 index 000000000..61b531484 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/health.go @@ -0,0 +1,376 @@ +package daemon + +import ( + "bytes" + "fmt" + "runtime" + "strings" + "sync" + "time" + + "golang.org/x/net/context" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/types" + containertypes "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/strslice" + "github.com/docker/docker/container" + "github.com/docker/docker/daemon/exec" +) + +const ( + // Longest healthcheck probe output message to store. Longer messages will be truncated. + maxOutputLen = 4096 + + // Default interval between probe runs (from the end of the first to the start of the second). + // Also the time before the first probe. + defaultProbeInterval = 30 * time.Second + + // The maximum length of time a single probe run should take. If the probe takes longer + // than this, the check is considered to have failed. + defaultProbeTimeout = 30 * time.Second + + // The time given for the container to start before the health check starts considering + // the container unstable. Defaults to none. + defaultStartPeriod = 0 * time.Second + + // Default number of consecutive failures of the health check + // for the container to be considered unhealthy. + defaultProbeRetries = 3 + + // Maximum number of entries to record + maxLogEntries = 5 +) + +const ( + // Exit status codes that can be returned by the probe command. + + exitStatusHealthy = 0 // Container is healthy + exitStatusUnhealthy = 1 // Container is unhealthy +) + +// probe implementations know how to run a particular type of probe. +type probe interface { + // Perform one run of the check. Returns the exit code and an optional + // short diagnostic string. + run(context.Context, *Daemon, *container.Container) (*types.HealthcheckResult, error) +} + +// cmdProbe implements the "CMD" probe type. +type cmdProbe struct { + // Run the command with the system's default shell instead of execing it directly. + shell bool +} + +// exec the healthcheck command in the container. +// Returns the exit code and probe output (if any) +func (p *cmdProbe) run(ctx context.Context, d *Daemon, cntr *container.Container) (*types.HealthcheckResult, error) { + cmdSlice := strslice.StrSlice(cntr.Config.Healthcheck.Test)[1:] + if p.shell { + cmdSlice = append(getShell(cntr.Config), cmdSlice...) + } + entrypoint, args := d.getEntrypointAndArgs(strslice.StrSlice{}, cmdSlice) + execConfig := exec.NewConfig() + execConfig.OpenStdin = false + execConfig.OpenStdout = true + execConfig.OpenStderr = true + execConfig.ContainerID = cntr.ID + execConfig.DetachKeys = []byte{} + execConfig.Entrypoint = entrypoint + execConfig.Args = args + execConfig.Tty = false + execConfig.Privileged = false + execConfig.User = cntr.Config.User + + linkedEnv, err := d.setupLinkedContainers(cntr) + if err != nil { + return nil, err + } + execConfig.Env = container.ReplaceOrAppendEnvValues(cntr.CreateDaemonEnvironment(execConfig.Tty, linkedEnv), execConfig.Env) + + d.registerExecCommand(cntr, execConfig) + d.LogContainerEvent(cntr, "exec_create: "+execConfig.Entrypoint+" "+strings.Join(execConfig.Args, " ")) + + output := &limitedBuffer{} + err = d.ContainerExecStart(ctx, execConfig.ID, nil, output, output) + if err != nil { + return nil, err + } + info, err := d.getExecConfig(execConfig.ID) + if err != nil { + return nil, err + } + if info.ExitCode == nil { + return nil, fmt.Errorf("Healthcheck for container %s has no exit code!", cntr.ID) + } + // Note: Go's json package will handle invalid UTF-8 for us + out := output.String() + return &types.HealthcheckResult{ + End: time.Now(), + ExitCode: *info.ExitCode, + Output: out, + }, nil +} + +// Update the container's Status.Health struct based on the latest probe's result. +func handleProbeResult(d *Daemon, c *container.Container, result *types.HealthcheckResult, done chan struct{}) { + c.Lock() + defer c.Unlock() + + // probe may have been cancelled while waiting on lock. Ignore result then + select { + case <-done: + return + default: + } + + retries := c.Config.Healthcheck.Retries + if retries <= 0 { + retries = defaultProbeRetries + } + + h := c.State.Health + oldStatus := h.Status + + if len(h.Log) >= maxLogEntries { + h.Log = append(h.Log[len(h.Log)+1-maxLogEntries:], result) + } else { + h.Log = append(h.Log, result) + } + + if result.ExitCode == exitStatusHealthy { + h.FailingStreak = 0 + h.Status = types.Healthy + } else { // Failure (including invalid exit code) + shouldIncrementStreak := true + + // If the container is starting (i.e. we never had a successful health check) + // then we check if we are within the start period of the container in which + // case we do not increment the failure streak. + if h.Status == types.Starting { + startPeriod := timeoutWithDefault(c.Config.Healthcheck.StartPeriod, defaultStartPeriod) + timeSinceStart := result.Start.Sub(c.State.StartedAt) + + // If still within the start period, then don't increment failing streak. + if timeSinceStart < startPeriod { + shouldIncrementStreak = false + } + } + + if shouldIncrementStreak { + h.FailingStreak++ + + if h.FailingStreak >= retries { + h.Status = types.Unhealthy + } + } + // Else we're starting or healthy. Stay in that state. + } + + // replicate Health status changes + if err := c.CheckpointTo(d.containersReplica); err != nil { + // queries will be inconsistent until the next probe runs or other state mutations + // checkpoint the container + logrus.Errorf("Error replicating health state for container %s: %v", c.ID, err) + } + + if oldStatus != h.Status { + d.LogContainerEvent(c, "health_status: "+h.Status) + } +} + +// Run the container's monitoring thread until notified via "stop". +// There is never more than one monitor thread running per container at a time. +func monitor(d *Daemon, c *container.Container, stop chan struct{}, probe probe) { + probeTimeout := timeoutWithDefault(c.Config.Healthcheck.Timeout, defaultProbeTimeout) + probeInterval := timeoutWithDefault(c.Config.Healthcheck.Interval, defaultProbeInterval) + for { + select { + case <-stop: + logrus.Debugf("Stop healthcheck monitoring for container %s (received while idle)", c.ID) + return + case <-time.After(probeInterval): + logrus.Debugf("Running health check for container %s ...", c.ID) + startTime := time.Now() + ctx, cancelProbe := context.WithTimeout(context.Background(), probeTimeout) + results := make(chan *types.HealthcheckResult, 1) + go func() { + healthChecksCounter.Inc() + result, err := probe.run(ctx, d, c) + if err != nil { + healthChecksFailedCounter.Inc() + logrus.Warnf("Health check for container %s error: %v", c.ID, err) + results <- &types.HealthcheckResult{ + ExitCode: -1, + Output: err.Error(), + Start: startTime, + End: time.Now(), + } + } else { + result.Start = startTime + logrus.Debugf("Health check for container %s done (exitCode=%d)", c.ID, result.ExitCode) + results <- result + } + close(results) + }() + select { + case <-stop: + logrus.Debugf("Stop healthcheck monitoring for container %s (received while probing)", c.ID) + cancelProbe() + // Wait for probe to exit (it might take a while to respond to the TERM + // signal and we don't want dying probes to pile up). + <-results + return + case result := <-results: + handleProbeResult(d, c, result, stop) + // Stop timeout + cancelProbe() + case <-ctx.Done(): + logrus.Debugf("Health check for container %s taking too long", c.ID) + handleProbeResult(d, c, &types.HealthcheckResult{ + ExitCode: -1, + Output: fmt.Sprintf("Health check exceeded timeout (%v)", probeTimeout), + Start: startTime, + End: time.Now(), + }, stop) + cancelProbe() + // Wait for probe to exit (it might take a while to respond to the TERM + // signal and we don't want dying probes to pile up). + <-results + } + } + } +} + +// Get a suitable probe implementation for the container's healthcheck configuration. +// Nil will be returned if no healthcheck was configured or NONE was set. +func getProbe(c *container.Container) probe { + config := c.Config.Healthcheck + if config == nil || len(config.Test) == 0 { + return nil + } + switch config.Test[0] { + case "CMD": + return &cmdProbe{shell: false} + case "CMD-SHELL": + return &cmdProbe{shell: true} + default: + logrus.Warnf("Unknown healthcheck type '%s' (expected 'CMD') in container %s", config.Test[0], c.ID) + return nil + } +} + +// Ensure the health-check monitor is running or not, depending on the current +// state of the container. +// Called from monitor.go, with c locked. +func (d *Daemon) updateHealthMonitor(c *container.Container) { + h := c.State.Health + if h == nil { + return // No healthcheck configured + } + + probe := getProbe(c) + wantRunning := c.Running && !c.Paused && probe != nil + if wantRunning { + if stop := h.OpenMonitorChannel(); stop != nil { + go monitor(d, c, stop, probe) + } + } else { + h.CloseMonitorChannel() + } +} + +// Reset the health state for a newly-started, restarted or restored container. +// initHealthMonitor is called from monitor.go and we should never be running +// two instances at once. +// Called with c locked. +func (d *Daemon) initHealthMonitor(c *container.Container) { + // If no healthcheck is setup then don't init the monitor + if getProbe(c) == nil { + return + } + + // This is needed in case we're auto-restarting + d.stopHealthchecks(c) + + if h := c.State.Health; h != nil { + h.Status = types.Starting + h.FailingStreak = 0 + } else { + h := &container.Health{} + h.Status = types.Starting + c.State.Health = h + } + + d.updateHealthMonitor(c) +} + +// Called when the container is being stopped (whether because the health check is +// failing or for any other reason). +func (d *Daemon) stopHealthchecks(c *container.Container) { + h := c.State.Health + if h != nil { + h.CloseMonitorChannel() + } +} + +// Buffer up to maxOutputLen bytes. Further data is discarded. +type limitedBuffer struct { + buf bytes.Buffer + mu sync.Mutex + truncated bool // indicates that data has been lost +} + +// Append to limitedBuffer while there is room. +func (b *limitedBuffer) Write(data []byte) (int, error) { + b.mu.Lock() + defer b.mu.Unlock() + + bufLen := b.buf.Len() + dataLen := len(data) + keep := min(maxOutputLen-bufLen, dataLen) + if keep > 0 { + b.buf.Write(data[:keep]) + } + if keep < dataLen { + b.truncated = true + } + return dataLen, nil +} + +// The contents of the buffer, with "..." appended if it overflowed. +func (b *limitedBuffer) String() string { + b.mu.Lock() + defer b.mu.Unlock() + + out := b.buf.String() + if b.truncated { + out = out + "..." + } + return out +} + +// If configuredValue is zero, use defaultValue instead. +func timeoutWithDefault(configuredValue time.Duration, defaultValue time.Duration) time.Duration { + if configuredValue == 0 { + return defaultValue + } + return configuredValue +} + +func min(x, y int) int { + if x < y { + return x + } + return y +} + +func getShell(config *containertypes.Config) []string { + if len(config.Shell) != 0 { + return config.Shell + } + if runtime.GOOS != "windows" { + return []string{"/bin/sh", "-c"} + } + return []string{"cmd", "/S", "/C"} +} diff --git a/vendor/github.com/moby/moby/daemon/health_test.go b/vendor/github.com/moby/moby/daemon/health_test.go new file mode 100644 index 000000000..4fd89140d --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/health_test.go @@ -0,0 +1,154 @@ +package daemon + +import ( + "testing" + "time" + + "github.com/docker/docker/api/types" + containertypes "github.com/docker/docker/api/types/container" + eventtypes "github.com/docker/docker/api/types/events" + "github.com/docker/docker/container" + "github.com/docker/docker/daemon/events" +) + +func reset(c *container.Container) { + c.State = &container.State{} + c.State.Health = &container.Health{} + c.State.Health.Status = types.Starting +} + +func TestNoneHealthcheck(t *testing.T) { + c := &container.Container{ + ID: "container_id", + Name: "container_name", + Config: &containertypes.Config{ + Image: "image_name", + Healthcheck: &containertypes.HealthConfig{ + Test: []string{"NONE"}, + }, + }, + State: &container.State{}, + } + store, err := container.NewViewDB() + if err != nil { + t.Fatal(err) + } + daemon := &Daemon{ + containersReplica: store, + } + + daemon.initHealthMonitor(c) + if c.State.Health != nil { + t.Error("Expecting Health to be nil, but was not") + } +} + +// FIXME(vdemeester) This takes around 3s… This is *way* too long +func TestHealthStates(t *testing.T) { + e := events.New() + _, l, _ := e.Subscribe() + defer e.Evict(l) + + expect := func(expected string) { + select { + case event := <-l: + ev := event.(eventtypes.Message) + if ev.Status != expected { + t.Errorf("Expecting event %#v, but got %#v\n", expected, ev.Status) + } + case <-time.After(1 * time.Second): + t.Errorf("Expecting event %#v, but got nothing\n", expected) + } + } + + c := &container.Container{ + ID: "container_id", + Name: "container_name", + Config: &containertypes.Config{ + Image: "image_name", + }, + } + + store, err := container.NewViewDB() + if err != nil { + t.Fatal(err) + } + + daemon := &Daemon{ + EventsService: e, + containersReplica: store, + } + + c.Config.Healthcheck = &containertypes.HealthConfig{ + Retries: 1, + } + + reset(c) + + handleResult := func(startTime time.Time, exitCode int) { + handleProbeResult(daemon, c, &types.HealthcheckResult{ + Start: startTime, + End: startTime, + ExitCode: exitCode, + }, nil) + } + + // starting -> failed -> success -> failed + + handleResult(c.State.StartedAt.Add(1*time.Second), 1) + expect("health_status: unhealthy") + + handleResult(c.State.StartedAt.Add(2*time.Second), 0) + expect("health_status: healthy") + + handleResult(c.State.StartedAt.Add(3*time.Second), 1) + expect("health_status: unhealthy") + + // Test retries + + reset(c) + c.Config.Healthcheck.Retries = 3 + + handleResult(c.State.StartedAt.Add(20*time.Second), 1) + handleResult(c.State.StartedAt.Add(40*time.Second), 1) + if c.State.Health.Status != types.Starting { + t.Errorf("Expecting starting, but got %#v\n", c.State.Health.Status) + } + if c.State.Health.FailingStreak != 2 { + t.Errorf("Expecting FailingStreak=2, but got %d\n", c.State.Health.FailingStreak) + } + handleResult(c.State.StartedAt.Add(60*time.Second), 1) + expect("health_status: unhealthy") + + handleResult(c.State.StartedAt.Add(80*time.Second), 0) + expect("health_status: healthy") + if c.State.Health.FailingStreak != 0 { + t.Errorf("Expecting FailingStreak=0, but got %d\n", c.State.Health.FailingStreak) + } + + // Test start period + + reset(c) + c.Config.Healthcheck.Retries = 2 + c.Config.Healthcheck.StartPeriod = 30 * time.Second + + handleResult(c.State.StartedAt.Add(20*time.Second), 1) + if c.State.Health.Status != types.Starting { + t.Errorf("Expecting starting, but got %#v\n", c.State.Health.Status) + } + if c.State.Health.FailingStreak != 0 { + t.Errorf("Expecting FailingStreak=0, but got %d\n", c.State.Health.FailingStreak) + } + handleResult(c.State.StartedAt.Add(50*time.Second), 1) + if c.State.Health.Status != types.Starting { + t.Errorf("Expecting starting, but got %#v\n", c.State.Health.Status) + } + if c.State.Health.FailingStreak != 1 { + t.Errorf("Expecting FailingStreak=1, but got %d\n", c.State.Health.FailingStreak) + } + handleResult(c.State.StartedAt.Add(80*time.Second), 0) + expect("health_status: healthy") + if c.State.Health.FailingStreak != 0 { + t.Errorf("Expecting FailingStreak=0, but got %d\n", c.State.Health.FailingStreak) + } +} diff --git a/vendor/github.com/moby/moby/daemon/image.go b/vendor/github.com/moby/moby/daemon/image.go new file mode 100644 index 000000000..a51049dbb --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/image.go @@ -0,0 +1,84 @@ +package daemon + +import ( + "fmt" + + "github.com/docker/distribution/reference" + "github.com/docker/docker/image" + "github.com/docker/docker/pkg/stringid" +) + +// ErrImageDoesNotExist is error returned when no image can be found for a reference. +type ErrImageDoesNotExist struct { + ref reference.Reference +} + +func (e ErrImageDoesNotExist) Error() string { + ref := e.ref + if named, ok := ref.(reference.Named); ok { + ref = reference.TagNameOnly(named) + } + return fmt.Sprintf("No such image: %s", reference.FamiliarString(ref)) +} + +// GetImageIDAndPlatform returns an image ID and platform corresponding to the image referred to by +// refOrID. +func (daemon *Daemon) GetImageIDAndPlatform(refOrID string) (image.ID, string, error) { + ref, err := reference.ParseAnyReference(refOrID) + if err != nil { + return "", "", err + } + namedRef, ok := ref.(reference.Named) + if !ok { + digested, ok := ref.(reference.Digested) + if !ok { + return "", "", ErrImageDoesNotExist{ref} + } + id := image.IDFromDigest(digested.Digest()) + for platform := range daemon.stores { + if _, err = daemon.stores[platform].imageStore.Get(id); err == nil { + return id, platform, nil + } + } + return "", "", ErrImageDoesNotExist{ref} + } + + for platform := range daemon.stores { + if id, err := daemon.stores[platform].referenceStore.Get(namedRef); err == nil { + return image.IDFromDigest(id), platform, nil + } + } + + // deprecated: repo:shortid https://github.com/docker/docker/pull/799 + if tagged, ok := namedRef.(reference.Tagged); ok { + if tag := tagged.Tag(); stringid.IsShortID(stringid.TruncateID(tag)) { + for platform := range daemon.stores { + if id, err := daemon.stores[platform].imageStore.Search(tag); err == nil { + for _, storeRef := range daemon.stores[platform].referenceStore.References(id.Digest()) { + if storeRef.Name() == namedRef.Name() { + return id, platform, nil + } + } + } + } + } + } + + // Search based on ID + for platform := range daemon.stores { + if id, err := daemon.stores[platform].imageStore.Search(refOrID); err == nil { + return id, platform, nil + } + } + + return "", "", ErrImageDoesNotExist{ref} +} + +// GetImage returns an image corresponding to the image referred to by refOrID. +func (daemon *Daemon) GetImage(refOrID string) (*image.Image, error) { + imgID, platform, err := daemon.GetImageIDAndPlatform(refOrID) + if err != nil { + return nil, err + } + return daemon.stores[platform].imageStore.Get(imgID) +} diff --git a/vendor/github.com/moby/moby/daemon/image_delete.go b/vendor/github.com/moby/moby/daemon/image_delete.go new file mode 100644 index 000000000..4e228594b --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/image_delete.go @@ -0,0 +1,413 @@ +package daemon + +import ( + "fmt" + "strings" + "time" + + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/errors" + "github.com/docker/docker/api/types" + "github.com/docker/docker/container" + "github.com/docker/docker/image" + "github.com/docker/docker/pkg/stringid" +) + +type conflictType int + +const ( + conflictDependentChild conflictType = (1 << iota) + conflictRunningContainer + conflictActiveReference + conflictStoppedContainer + conflictHard = conflictDependentChild | conflictRunningContainer + conflictSoft = conflictActiveReference | conflictStoppedContainer +) + +// ImageDelete deletes the image referenced by the given imageRef from this +// daemon. The given imageRef can be an image ID, ID prefix, or a repository +// reference (with an optional tag or digest, defaulting to the tag name +// "latest"). There is differing behavior depending on whether the given +// imageRef is a repository reference or not. +// +// If the given imageRef is a repository reference then that repository +// reference will be removed. However, if there exists any containers which +// were created using the same image reference then the repository reference +// cannot be removed unless either there are other repository references to the +// same image or force is true. Following removal of the repository reference, +// the referenced image itself will attempt to be deleted as described below +// but quietly, meaning any image delete conflicts will cause the image to not +// be deleted and the conflict will not be reported. +// +// There may be conflicts preventing deletion of an image and these conflicts +// are divided into two categories grouped by their severity: +// +// Hard Conflict: +// - a pull or build using the image. +// - any descendant image. +// - any running container using the image. +// +// Soft Conflict: +// - any stopped container using the image. +// - any repository tag or digest references to the image. +// +// The image cannot be removed if there are any hard conflicts and can be +// removed if there are soft conflicts only if force is true. +// +// If prune is true, ancestor images will each attempt to be deleted quietly, +// meaning any delete conflicts will cause the image to not be deleted and the +// conflict will not be reported. +// +// FIXME: remove ImageDelete's dependency on Daemon, then move to the graph +// package. This would require that we no longer need the daemon to determine +// whether images are being used by a stopped or running container. +func (daemon *Daemon) ImageDelete(imageRef string, force, prune bool) ([]types.ImageDeleteResponseItem, error) { + start := time.Now() + records := []types.ImageDeleteResponseItem{} + + imgID, platform, err := daemon.GetImageIDAndPlatform(imageRef) + if err != nil { + return nil, daemon.imageNotExistToErrcode(err) + } + + repoRefs := daemon.stores[platform].referenceStore.References(imgID.Digest()) + + var removedRepositoryRef bool + if !isImageIDPrefix(imgID.String(), imageRef) { + // A repository reference was given and should be removed + // first. We can only remove this reference if either force is + // true, there are multiple repository references to this + // image, or there are no containers using the given reference. + if !force && isSingleReference(repoRefs) { + if container := daemon.getContainerUsingImage(imgID); container != nil { + // If we removed the repository reference then + // this image would remain "dangling" and since + // we really want to avoid that the client must + // explicitly force its removal. + err := fmt.Errorf("conflict: unable to remove repository reference %q (must force) - container %s is using its referenced image %s", imageRef, stringid.TruncateID(container.ID), stringid.TruncateID(imgID.String())) + return nil, errors.NewRequestConflictError(err) + } + } + + parsedRef, err := reference.ParseNormalizedNamed(imageRef) + if err != nil { + return nil, err + } + + parsedRef, err = daemon.removeImageRef(platform, parsedRef) + if err != nil { + return nil, err + } + + untaggedRecord := types.ImageDeleteResponseItem{Untagged: reference.FamiliarString(parsedRef)} + + daemon.LogImageEvent(imgID.String(), imgID.String(), "untag") + records = append(records, untaggedRecord) + + repoRefs = daemon.stores[platform].referenceStore.References(imgID.Digest()) + + // If a tag reference was removed and the only remaining + // references to the same repository are digest references, + // then clean up those digest references. + if _, isCanonical := parsedRef.(reference.Canonical); !isCanonical { + foundRepoTagRef := false + for _, repoRef := range repoRefs { + if _, repoRefIsCanonical := repoRef.(reference.Canonical); !repoRefIsCanonical && parsedRef.Name() == repoRef.Name() { + foundRepoTagRef = true + break + } + } + if !foundRepoTagRef { + // Remove canonical references from same repository + remainingRefs := []reference.Named{} + for _, repoRef := range repoRefs { + if _, repoRefIsCanonical := repoRef.(reference.Canonical); repoRefIsCanonical && parsedRef.Name() == repoRef.Name() { + if _, err := daemon.removeImageRef(platform, repoRef); err != nil { + return records, err + } + + untaggedRecord := types.ImageDeleteResponseItem{Untagged: reference.FamiliarString(repoRef)} + records = append(records, untaggedRecord) + } else { + remainingRefs = append(remainingRefs, repoRef) + + } + } + repoRefs = remainingRefs + } + } + + // If it has remaining references then the untag finished the remove + if len(repoRefs) > 0 { + return records, nil + } + + removedRepositoryRef = true + } else { + // If an ID reference was given AND there is at most one tag + // reference to the image AND all references are within one + // repository, then remove all references. + if isSingleReference(repoRefs) { + c := conflictHard + if !force { + c |= conflictSoft &^ conflictActiveReference + } + if conflict := daemon.checkImageDeleteConflict(imgID, platform, c); conflict != nil { + return nil, conflict + } + + for _, repoRef := range repoRefs { + parsedRef, err := daemon.removeImageRef(platform, repoRef) + if err != nil { + return nil, err + } + + untaggedRecord := types.ImageDeleteResponseItem{Untagged: reference.FamiliarString(parsedRef)} + + daemon.LogImageEvent(imgID.String(), imgID.String(), "untag") + records = append(records, untaggedRecord) + } + } + } + + if err := daemon.imageDeleteHelper(imgID, platform, &records, force, prune, removedRepositoryRef); err != nil { + return nil, err + } + + imageActions.WithValues("delete").UpdateSince(start) + + return records, nil +} + +// isSingleReference returns true when all references are from one repository +// and there is at most one tag. Returns false for empty input. +func isSingleReference(repoRefs []reference.Named) bool { + if len(repoRefs) <= 1 { + return len(repoRefs) == 1 + } + var singleRef reference.Named + canonicalRefs := map[string]struct{}{} + for _, repoRef := range repoRefs { + if _, isCanonical := repoRef.(reference.Canonical); isCanonical { + canonicalRefs[repoRef.Name()] = struct{}{} + } else if singleRef == nil { + singleRef = repoRef + } else { + return false + } + } + if singleRef == nil { + // Just use first canonical ref + singleRef = repoRefs[0] + } + _, ok := canonicalRefs[singleRef.Name()] + return len(canonicalRefs) == 1 && ok +} + +// isImageIDPrefix returns whether the given possiblePrefix is a prefix of the +// given imageID. +func isImageIDPrefix(imageID, possiblePrefix string) bool { + if strings.HasPrefix(imageID, possiblePrefix) { + return true + } + + if i := strings.IndexRune(imageID, ':'); i >= 0 { + return strings.HasPrefix(imageID[i+1:], possiblePrefix) + } + + return false +} + +// getContainerUsingImage returns a container that was created using the given +// imageID. Returns nil if there is no such container. +func (daemon *Daemon) getContainerUsingImage(imageID image.ID) *container.Container { + return daemon.containers.First(func(c *container.Container) bool { + return c.ImageID == imageID + }) +} + +// removeImageRef attempts to parse and remove the given image reference from +// this daemon's store of repository tag/digest references. The given +// repositoryRef must not be an image ID but a repository name followed by an +// optional tag or digest reference. If tag or digest is omitted, the default +// tag is used. Returns the resolved image reference and an error. +func (daemon *Daemon) removeImageRef(platform string, ref reference.Named) (reference.Named, error) { + ref = reference.TagNameOnly(ref) + + // Ignore the boolean value returned, as far as we're concerned, this + // is an idempotent operation and it's okay if the reference didn't + // exist in the first place. + _, err := daemon.stores[platform].referenceStore.Delete(ref) + + return ref, err +} + +// removeAllReferencesToImageID attempts to remove every reference to the given +// imgID from this daemon's store of repository tag/digest references. Returns +// on the first encountered error. Removed references are logged to this +// daemon's event service. An "Untagged" types.ImageDeleteResponseItem is added to the +// given list of records. +func (daemon *Daemon) removeAllReferencesToImageID(imgID image.ID, platform string, records *[]types.ImageDeleteResponseItem) error { + imageRefs := daemon.stores[platform].referenceStore.References(imgID.Digest()) + + for _, imageRef := range imageRefs { + parsedRef, err := daemon.removeImageRef(platform, imageRef) + if err != nil { + return err + } + + untaggedRecord := types.ImageDeleteResponseItem{Untagged: reference.FamiliarString(parsedRef)} + + daemon.LogImageEvent(imgID.String(), imgID.String(), "untag") + *records = append(*records, untaggedRecord) + } + + return nil +} + +// ImageDeleteConflict holds a soft or hard conflict and an associated error. +// Implements the error interface. +type imageDeleteConflict struct { + hard bool + used bool + imgID image.ID + message string +} + +func (idc *imageDeleteConflict) Error() string { + var forceMsg string + if idc.hard { + forceMsg = "cannot be forced" + } else { + forceMsg = "must be forced" + } + + return fmt.Sprintf("conflict: unable to delete %s (%s) - %s", stringid.TruncateID(idc.imgID.String()), forceMsg, idc.message) +} + +// imageDeleteHelper attempts to delete the given image from this daemon. If +// the image has any hard delete conflicts (child images or running containers +// using the image) then it cannot be deleted. If the image has any soft delete +// conflicts (any tags/digests referencing the image or any stopped container +// using the image) then it can only be deleted if force is true. If the delete +// succeeds and prune is true, the parent images are also deleted if they do +// not have any soft or hard delete conflicts themselves. Any deleted images +// and untagged references are appended to the given records. If any error or +// conflict is encountered, it will be returned immediately without deleting +// the image. If quiet is true, any encountered conflicts will be ignored and +// the function will return nil immediately without deleting the image. +func (daemon *Daemon) imageDeleteHelper(imgID image.ID, platform string, records *[]types.ImageDeleteResponseItem, force, prune, quiet bool) error { + // First, determine if this image has any conflicts. Ignore soft conflicts + // if force is true. + c := conflictHard + if !force { + c |= conflictSoft + } + if conflict := daemon.checkImageDeleteConflict(imgID, platform, c); conflict != nil { + if quiet && (!daemon.imageIsDangling(imgID, platform) || conflict.used) { + // Ignore conflicts UNLESS the image is "dangling" or not being used in + // which case we want the user to know. + return nil + } + + // There was a conflict and it's either a hard conflict OR we are not + // forcing deletion on soft conflicts. + return conflict + } + + parent, err := daemon.stores[platform].imageStore.GetParent(imgID) + if err != nil { + // There may be no parent + parent = "" + } + + // Delete all repository tag/digest references to this image. + if err := daemon.removeAllReferencesToImageID(imgID, platform, records); err != nil { + return err + } + + removedLayers, err := daemon.stores[platform].imageStore.Delete(imgID) + if err != nil { + return err + } + + daemon.LogImageEvent(imgID.String(), imgID.String(), "delete") + *records = append(*records, types.ImageDeleteResponseItem{Deleted: imgID.String()}) + for _, removedLayer := range removedLayers { + *records = append(*records, types.ImageDeleteResponseItem{Deleted: removedLayer.ChainID.String()}) + } + + if !prune || parent == "" { + return nil + } + + // We need to prune the parent image. This means delete it if there are + // no tags/digests referencing it and there are no containers using it ( + // either running or stopped). + // Do not force prunings, but do so quietly (stopping on any encountered + // conflicts). + return daemon.imageDeleteHelper(parent, platform, records, false, true, true) +} + +// checkImageDeleteConflict determines whether there are any conflicts +// preventing deletion of the given image from this daemon. A hard conflict is +// any image which has the given image as a parent or any running container +// using the image. A soft conflict is any tags/digest referencing the given +// image or any stopped container using the image. If ignoreSoftConflicts is +// true, this function will not check for soft conflict conditions. +func (daemon *Daemon) checkImageDeleteConflict(imgID image.ID, platform string, mask conflictType) *imageDeleteConflict { + // Check if the image has any descendant images. + if mask&conflictDependentChild != 0 && len(daemon.stores[platform].imageStore.Children(imgID)) > 0 { + return &imageDeleteConflict{ + hard: true, + imgID: imgID, + message: "image has dependent child images", + } + } + + if mask&conflictRunningContainer != 0 { + // Check if any running container is using the image. + running := func(c *container.Container) bool { + return c.IsRunning() && c.ImageID == imgID + } + if container := daemon.containers.First(running); container != nil { + return &imageDeleteConflict{ + imgID: imgID, + hard: true, + used: true, + message: fmt.Sprintf("image is being used by running container %s", stringid.TruncateID(container.ID)), + } + } + } + + // Check if any repository tags/digest reference this image. + if mask&conflictActiveReference != 0 && len(daemon.stores[platform].referenceStore.References(imgID.Digest())) > 0 { + return &imageDeleteConflict{ + imgID: imgID, + message: "image is referenced in multiple repositories", + } + } + + if mask&conflictStoppedContainer != 0 { + // Check if any stopped containers reference this image. + stopped := func(c *container.Container) bool { + return !c.IsRunning() && c.ImageID == imgID + } + if container := daemon.containers.First(stopped); container != nil { + return &imageDeleteConflict{ + imgID: imgID, + used: true, + message: fmt.Sprintf("image is being used by stopped container %s", stringid.TruncateID(container.ID)), + } + } + } + + return nil +} + +// imageIsDangling returns whether the given image is "dangling" which means +// that there are no repository references to the given image and it has no +// child images. +func (daemon *Daemon) imageIsDangling(imgID image.ID, platform string) bool { + return !(len(daemon.stores[platform].referenceStore.References(imgID.Digest())) > 0 || len(daemon.stores[platform].imageStore.Children(imgID)) > 0) +} diff --git a/vendor/github.com/moby/moby/daemon/image_exporter.go b/vendor/github.com/moby/moby/daemon/image_exporter.go new file mode 100644 index 000000000..a7b0be64c --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/image_exporter.go @@ -0,0 +1,37 @@ +package daemon + +import ( + "io" + "runtime" + + "github.com/docker/docker/image/tarexport" + "github.com/docker/docker/pkg/system" +) + +// ExportImage exports a list of images to the given output stream. The +// exported images are archived into a tar when written to the output +// stream. All images with the given tag and all versions containing +// the same tag are exported. names is the set of tags to export, and +// outStream is the writer which the images are written to. +func (daemon *Daemon) ExportImage(names []string, outStream io.Writer) error { + // TODO @jhowardmsft LCOW. This will need revisiting later. + platform := runtime.GOOS + if system.LCOWSupported() { + platform = "linux" + } + imageExporter := tarexport.NewTarExporter(daemon.stores[platform].imageStore, daemon.stores[platform].layerStore, daemon.stores[platform].referenceStore, daemon) + return imageExporter.Save(names, outStream) +} + +// LoadImage uploads a set of images into the repository. This is the +// complement of ImageExport. The input stream is an uncompressed tar +// ball containing images and metadata. +func (daemon *Daemon) LoadImage(inTar io.ReadCloser, outStream io.Writer, quiet bool) error { + // TODO @jhowardmsft LCOW. This will need revisiting later. + platform := runtime.GOOS + if system.LCOWSupported() { + platform = "linux" + } + imageExporter := tarexport.NewTarExporter(daemon.stores[platform].imageStore, daemon.stores[platform].layerStore, daemon.stores[platform].referenceStore, daemon) + return imageExporter.Load(inTar, outStream, quiet) +} diff --git a/vendor/github.com/moby/moby/daemon/image_history.go b/vendor/github.com/moby/moby/daemon/image_history.go new file mode 100644 index 000000000..c9e81554e --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/image_history.go @@ -0,0 +1,91 @@ +package daemon + +import ( + "fmt" + "runtime" + "time" + + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types/image" + "github.com/docker/docker/layer" +) + +// ImageHistory returns a slice of ImageHistory structures for the specified image +// name by walking the image lineage. +func (daemon *Daemon) ImageHistory(name string) ([]*image.HistoryResponseItem, error) { + start := time.Now() + img, err := daemon.GetImage(name) + if err != nil { + return nil, err + } + + // If the image OS isn't set, assume it's the host OS + platform := img.OS + if platform == "" { + platform = runtime.GOOS + } + + history := []*image.HistoryResponseItem{} + + layerCounter := 0 + rootFS := *img.RootFS + rootFS.DiffIDs = nil + + for _, h := range img.History { + var layerSize int64 + + if !h.EmptyLayer { + if len(img.RootFS.DiffIDs) <= layerCounter { + return nil, fmt.Errorf("too many non-empty layers in History section") + } + + rootFS.Append(img.RootFS.DiffIDs[layerCounter]) + l, err := daemon.stores[platform].layerStore.Get(rootFS.ChainID()) + if err != nil { + return nil, err + } + layerSize, err = l.DiffSize() + layer.ReleaseAndLog(daemon.stores[platform].layerStore, l) + if err != nil { + return nil, err + } + + layerCounter++ + } + + history = append([]*image.HistoryResponseItem{{ + ID: "", + Created: h.Created.Unix(), + CreatedBy: h.CreatedBy, + Comment: h.Comment, + Size: layerSize, + }}, history...) + } + + // Fill in image IDs and tags + histImg := img + id := img.ID() + for _, h := range history { + h.ID = id.String() + + var tags []string + for _, r := range daemon.stores[platform].referenceStore.References(id.Digest()) { + if _, ok := r.(reference.NamedTagged); ok { + tags = append(tags, reference.FamiliarString(r)) + } + } + + h.Tags = tags + + id = histImg.Parent + if id == "" { + break + } + histImg, err = daemon.GetImage(id.String()) + if err != nil { + break + } + } + imageActions.WithValues("history").UpdateSince(start) + return history, nil +} diff --git a/vendor/github.com/moby/moby/daemon/image_inspect.go b/vendor/github.com/moby/moby/daemon/image_inspect.go new file mode 100644 index 000000000..3baf265da --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/image_inspect.go @@ -0,0 +1,96 @@ +package daemon + +import ( + "runtime" + "time" + + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" + "github.com/docker/docker/layer" + "github.com/pkg/errors" +) + +// LookupImage looks up an image by name and returns it as an ImageInspect +// structure. +func (daemon *Daemon) LookupImage(name string) (*types.ImageInspect, error) { + img, err := daemon.GetImage(name) + if err != nil { + return nil, errors.Wrapf(err, "no such image: %s", name) + } + + // If the image OS isn't set, assume it's the host OS + platform := img.OS + if platform == "" { + platform = runtime.GOOS + } + + refs := daemon.stores[platform].referenceStore.References(img.ID().Digest()) + repoTags := []string{} + repoDigests := []string{} + for _, ref := range refs { + switch ref.(type) { + case reference.NamedTagged: + repoTags = append(repoTags, reference.FamiliarString(ref)) + case reference.Canonical: + repoDigests = append(repoDigests, reference.FamiliarString(ref)) + } + } + + var size int64 + var layerMetadata map[string]string + layerID := img.RootFS.ChainID() + if layerID != "" { + l, err := daemon.stores[platform].layerStore.Get(layerID) + if err != nil { + return nil, err + } + defer layer.ReleaseAndLog(daemon.stores[platform].layerStore, l) + size, err = l.Size() + if err != nil { + return nil, err + } + + layerMetadata, err = l.Metadata() + if err != nil { + return nil, err + } + } + + comment := img.Comment + if len(comment) == 0 && len(img.History) > 0 { + comment = img.History[len(img.History)-1].Comment + } + + lastUpdated, err := daemon.stores[platform].imageStore.GetLastUpdated(img.ID()) + if err != nil { + return nil, err + } + + imageInspect := &types.ImageInspect{ + ID: img.ID().String(), + RepoTags: repoTags, + RepoDigests: repoDigests, + Parent: img.Parent.String(), + Comment: comment, + Created: img.Created.Format(time.RFC3339Nano), + Container: img.Container, + ContainerConfig: &img.ContainerConfig, + DockerVersion: img.DockerVersion, + Author: img.Author, + Config: img.Config, + Architecture: img.Architecture, + Os: platform, + OsVersion: img.OSVersion, + Size: size, + VirtualSize: size, // TODO: field unused, deprecate + RootFS: rootFSToAPIType(img.RootFS), + Metadata: types.ImageMetadata{ + LastTagTime: lastUpdated, + }, + } + + imageInspect.GraphDriver.Name = daemon.GraphDriverName(platform) + imageInspect.GraphDriver.Data = layerMetadata + + return imageInspect, nil +} diff --git a/vendor/github.com/moby/moby/daemon/image_pull.go b/vendor/github.com/moby/moby/daemon/image_pull.go new file mode 100644 index 000000000..abc81ec67 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/image_pull.go @@ -0,0 +1,126 @@ +package daemon + +import ( + "io" + "runtime" + "strings" + + dist "github.com/docker/distribution" + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" + "github.com/docker/docker/distribution" + progressutils "github.com/docker/docker/distribution/utils" + "github.com/docker/docker/pkg/progress" + "github.com/docker/docker/registry" + "github.com/opencontainers/go-digest" + "golang.org/x/net/context" +) + +// PullImage initiates a pull operation. image is the repository name to pull, and +// tag may be either empty, or indicate a specific tag to pull. +func (daemon *Daemon) PullImage(ctx context.Context, image, tag, platform string, metaHeaders map[string][]string, authConfig *types.AuthConfig, outStream io.Writer) error { + // Special case: "pull -a" may send an image name with a + // trailing :. This is ugly, but let's not break API + // compatibility. + image = strings.TrimSuffix(image, ":") + + ref, err := reference.ParseNormalizedNamed(image) + if err != nil { + return err + } + + if tag != "" { + // The "tag" could actually be a digest. + var dgst digest.Digest + dgst, err = digest.Parse(tag) + if err == nil { + ref, err = reference.WithDigest(reference.TrimNamed(ref), dgst) + } else { + ref, err = reference.WithTag(ref, tag) + } + if err != nil { + return err + } + } + + return daemon.pullImageWithReference(ctx, ref, platform, metaHeaders, authConfig, outStream) +} + +func (daemon *Daemon) pullImageWithReference(ctx context.Context, ref reference.Named, platform string, metaHeaders map[string][]string, authConfig *types.AuthConfig, outStream io.Writer) error { + // Include a buffer so that slow client connections don't affect + // transfer performance. + progressChan := make(chan progress.Progress, 100) + + writesDone := make(chan struct{}) + + ctx, cancelFunc := context.WithCancel(ctx) + + go func() { + progressutils.WriteDistributionProgress(cancelFunc, outStream, progressChan) + close(writesDone) + }() + + // Default to the host OS platform in case it hasn't been populated with an explicit value. + if platform == "" { + platform = runtime.GOOS + } + + imagePullConfig := &distribution.ImagePullConfig{ + Config: distribution.Config{ + MetaHeaders: metaHeaders, + AuthConfig: authConfig, + ProgressOutput: progress.ChanOutput(progressChan), + RegistryService: daemon.RegistryService, + ImageEventLogger: daemon.LogImageEvent, + MetadataStore: daemon.stores[platform].distributionMetadataStore, + ImageStore: distribution.NewImageConfigStoreFromStore(daemon.stores[platform].imageStore), + ReferenceStore: daemon.stores[platform].referenceStore, + }, + DownloadManager: daemon.downloadManager, + Schema2Types: distribution.ImageTypes, + Platform: platform, + } + + err := distribution.Pull(ctx, ref, imagePullConfig) + close(progressChan) + <-writesDone + return err +} + +// GetRepository returns a repository from the registry. +func (daemon *Daemon) GetRepository(ctx context.Context, ref reference.Named, authConfig *types.AuthConfig) (dist.Repository, bool, error) { + // get repository info + repoInfo, err := daemon.RegistryService.ResolveRepository(ref) + if err != nil { + return nil, false, err + } + // makes sure name is not empty or `scratch` + if err := distribution.ValidateRepoName(repoInfo.Name); err != nil { + return nil, false, err + } + + // get endpoints + endpoints, err := daemon.RegistryService.LookupPullEndpoints(reference.Domain(repoInfo.Name)) + if err != nil { + return nil, false, err + } + + // retrieve repository + var ( + confirmedV2 bool + repository dist.Repository + lastError error + ) + + for _, endpoint := range endpoints { + if endpoint.Version == registry.APIVersion1 { + continue + } + + repository, confirmedV2, lastError = distribution.NewV2Repository(ctx, repoInfo, endpoint, nil, authConfig, "pull") + if lastError == nil && confirmedV2 { + break + } + } + return repository, confirmedV2, lastError +} diff --git a/vendor/github.com/moby/moby/daemon/image_push.go b/vendor/github.com/moby/moby/daemon/image_push.go new file mode 100644 index 000000000..c2e5967b1 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/image_push.go @@ -0,0 +1,71 @@ +package daemon + +import ( + "io" + "runtime" + + "github.com/docker/distribution/manifest/schema2" + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" + "github.com/docker/docker/distribution" + progressutils "github.com/docker/docker/distribution/utils" + "github.com/docker/docker/pkg/progress" + "github.com/docker/docker/pkg/system" + "golang.org/x/net/context" +) + +// PushImage initiates a push operation on the repository named localName. +func (daemon *Daemon) PushImage(ctx context.Context, image, tag string, metaHeaders map[string][]string, authConfig *types.AuthConfig, outStream io.Writer) error { + ref, err := reference.ParseNormalizedNamed(image) + if err != nil { + return err + } + if tag != "" { + // Push by digest is not supported, so only tags are supported. + ref, err = reference.WithTag(ref, tag) + if err != nil { + return err + } + } + + // Include a buffer so that slow client connections don't affect + // transfer performance. + progressChan := make(chan progress.Progress, 100) + + writesDone := make(chan struct{}) + + ctx, cancelFunc := context.WithCancel(ctx) + + go func() { + progressutils.WriteDistributionProgress(cancelFunc, outStream, progressChan) + close(writesDone) + }() + + // TODO @jhowardmsft LCOW Support. This will require revisiting. For now, hard-code. + platform := runtime.GOOS + if system.LCOWSupported() { + platform = "linux" + } + + imagePushConfig := &distribution.ImagePushConfig{ + Config: distribution.Config{ + MetaHeaders: metaHeaders, + AuthConfig: authConfig, + ProgressOutput: progress.ChanOutput(progressChan), + RegistryService: daemon.RegistryService, + ImageEventLogger: daemon.LogImageEvent, + MetadataStore: daemon.stores[platform].distributionMetadataStore, + ImageStore: distribution.NewImageConfigStoreFromStore(daemon.stores[platform].imageStore), + ReferenceStore: daemon.stores[platform].referenceStore, + }, + ConfigMediaType: schema2.MediaTypeImageConfig, + LayerStore: distribution.NewLayerProviderFromStore(daemon.stores[platform].layerStore), + TrustKey: daemon.trustKey, + UploadManager: daemon.uploadManager, + } + + err = distribution.Push(ctx, ref, imagePushConfig) + close(progressChan) + <-writesDone + return err +} diff --git a/vendor/github.com/moby/moby/daemon/image_tag.go b/vendor/github.com/moby/moby/daemon/image_tag.go new file mode 100644 index 000000000..5f28daed0 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/image_tag.go @@ -0,0 +1,40 @@ +package daemon + +import ( + "github.com/docker/distribution/reference" + "github.com/docker/docker/image" +) + +// TagImage creates the tag specified by newTag, pointing to the image named +// imageName (alternatively, imageName can also be an image ID). +func (daemon *Daemon) TagImage(imageName, repository, tag string) error { + imageID, platform, err := daemon.GetImageIDAndPlatform(imageName) + if err != nil { + return err + } + + newTag, err := reference.ParseNormalizedNamed(repository) + if err != nil { + return err + } + if tag != "" { + if newTag, err = reference.WithTag(reference.TrimNamed(newTag), tag); err != nil { + return err + } + } + + return daemon.TagImageWithReference(imageID, platform, newTag) +} + +// TagImageWithReference adds the given reference to the image ID provided. +func (daemon *Daemon) TagImageWithReference(imageID image.ID, platform string, newTag reference.Named) error { + if err := daemon.stores[platform].referenceStore.AddTag(newTag, imageID.Digest(), true); err != nil { + return err + } + + if err := daemon.stores[platform].imageStore.SetLastUpdated(imageID); err != nil { + return err + } + daemon.LogImageEvent(imageID.String(), reference.FamiliarString(newTag), "tag") + return nil +} diff --git a/vendor/github.com/moby/moby/daemon/images.go b/vendor/github.com/moby/moby/daemon/images.go new file mode 100644 index 000000000..4baf70371 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/images.go @@ -0,0 +1,359 @@ +package daemon + +import ( + "encoding/json" + "fmt" + "runtime" + "sort" + "time" + + "github.com/pkg/errors" + + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/container" + "github.com/docker/docker/image" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/system" +) + +var acceptedImageFilterTags = map[string]bool{ + "dangling": true, + "label": true, + "before": true, + "since": true, + "reference": true, +} + +// byCreated is a temporary type used to sort a list of images by creation +// time. +type byCreated []*types.ImageSummary + +func (r byCreated) Len() int { return len(r) } +func (r byCreated) Swap(i, j int) { r[i], r[j] = r[j], r[i] } +func (r byCreated) Less(i, j int) bool { return r[i].Created < r[j].Created } + +// Map returns a map of all images in the ImageStore +func (daemon *Daemon) Map() map[image.ID]*image.Image { + // TODO @jhowardmsft LCOW. This will need work to enumerate the stores for all platforms. + platform := runtime.GOOS + if system.LCOWSupported() { + platform = "linux" + } + return daemon.stores[platform].imageStore.Map() +} + +// Images returns a filtered list of images. filterArgs is a JSON-encoded set +// of filter arguments which will be interpreted by api/types/filters. +// filter is a shell glob string applied to repository names. The argument +// named all controls whether all images in the graph are filtered, or just +// the heads. +func (daemon *Daemon) Images(imageFilters filters.Args, all bool, withExtraAttrs bool) ([]*types.ImageSummary, error) { + + // TODO @jhowardmsft LCOW. This will need work to enumerate the stores for all platforms. + platform := runtime.GOOS + if system.LCOWSupported() { + platform = "linux" + } + + var ( + allImages map[image.ID]*image.Image + err error + danglingOnly = false + ) + + if err := imageFilters.Validate(acceptedImageFilterTags); err != nil { + return nil, err + } + + if imageFilters.Include("dangling") { + if imageFilters.ExactMatch("dangling", "true") { + danglingOnly = true + } else if !imageFilters.ExactMatch("dangling", "false") { + return nil, fmt.Errorf("Invalid filter 'dangling=%s'", imageFilters.Get("dangling")) + } + } + if danglingOnly { + allImages = daemon.stores[platform].imageStore.Heads() + } else { + allImages = daemon.stores[platform].imageStore.Map() + } + + var beforeFilter, sinceFilter *image.Image + err = imageFilters.WalkValues("before", func(value string) error { + beforeFilter, err = daemon.GetImage(value) + return err + }) + if err != nil { + return nil, err + } + + err = imageFilters.WalkValues("since", func(value string) error { + sinceFilter, err = daemon.GetImage(value) + return err + }) + if err != nil { + return nil, err + } + + images := []*types.ImageSummary{} + var imagesMap map[*image.Image]*types.ImageSummary + var layerRefs map[layer.ChainID]int + var allLayers map[layer.ChainID]layer.Layer + var allContainers []*container.Container + + for id, img := range allImages { + if beforeFilter != nil { + if img.Created.Equal(beforeFilter.Created) || img.Created.After(beforeFilter.Created) { + continue + } + } + + if sinceFilter != nil { + if img.Created.Equal(sinceFilter.Created) || img.Created.Before(sinceFilter.Created) { + continue + } + } + + if imageFilters.Include("label") { + // Very old image that do not have image.Config (or even labels) + if img.Config == nil { + continue + } + // We are now sure image.Config is not nil + if !imageFilters.MatchKVList("label", img.Config.Labels) { + continue + } + } + + layerID := img.RootFS.ChainID() + var size int64 + if layerID != "" { + l, err := daemon.stores[platform].layerStore.Get(layerID) + if err != nil { + // The layer may have been deleted between the call to `Map()` or + // `Heads()` and the call to `Get()`, so we just ignore this error + if err == layer.ErrLayerDoesNotExist { + continue + } + return nil, err + } + + size, err = l.Size() + layer.ReleaseAndLog(daemon.stores[platform].layerStore, l) + if err != nil { + return nil, err + } + } + + newImage := newImage(img, size) + + for _, ref := range daemon.stores[platform].referenceStore.References(id.Digest()) { + if imageFilters.Include("reference") { + var found bool + var matchErr error + for _, pattern := range imageFilters.Get("reference") { + found, matchErr = reference.FamiliarMatch(pattern, ref) + if matchErr != nil { + return nil, matchErr + } + } + if !found { + continue + } + } + if _, ok := ref.(reference.Canonical); ok { + newImage.RepoDigests = append(newImage.RepoDigests, reference.FamiliarString(ref)) + } + if _, ok := ref.(reference.NamedTagged); ok { + newImage.RepoTags = append(newImage.RepoTags, reference.FamiliarString(ref)) + } + } + if newImage.RepoDigests == nil && newImage.RepoTags == nil { + if all || len(daemon.stores[platform].imageStore.Children(id)) == 0 { + + if imageFilters.Include("dangling") && !danglingOnly { + //dangling=false case, so dangling image is not needed + continue + } + if imageFilters.Include("reference") { // skip images with no references if filtering by reference + continue + } + newImage.RepoDigests = []string{"@"} + newImage.RepoTags = []string{":"} + } else { + continue + } + } else if danglingOnly && len(newImage.RepoTags) > 0 { + continue + } + + if withExtraAttrs { + // lazily init variables + if imagesMap == nil { + allContainers = daemon.List() + allLayers = daemon.stores[platform].layerStore.Map() + imagesMap = make(map[*image.Image]*types.ImageSummary) + layerRefs = make(map[layer.ChainID]int) + } + + // Get container count + newImage.Containers = 0 + for _, c := range allContainers { + if c.ImageID == id { + newImage.Containers++ + } + } + + // count layer references + rootFS := *img.RootFS + rootFS.DiffIDs = nil + for _, id := range img.RootFS.DiffIDs { + rootFS.Append(id) + chid := rootFS.ChainID() + layerRefs[chid]++ + if _, ok := allLayers[chid]; !ok { + return nil, fmt.Errorf("layer %v was not found (corruption?)", chid) + } + } + imagesMap[img] = newImage + } + + images = append(images, newImage) + } + + if withExtraAttrs { + // Get Shared sizes + for img, newImage := range imagesMap { + rootFS := *img.RootFS + rootFS.DiffIDs = nil + + newImage.SharedSize = 0 + for _, id := range img.RootFS.DiffIDs { + rootFS.Append(id) + chid := rootFS.ChainID() + + diffSize, err := allLayers[chid].DiffSize() + if err != nil { + return nil, err + } + + if layerRefs[chid] > 1 { + newImage.SharedSize += diffSize + } + } + } + } + + sort.Sort(sort.Reverse(byCreated(images))) + + return images, nil +} + +// SquashImage creates a new image with the diff of the specified image and the specified parent. +// This new image contains only the layers from it's parent + 1 extra layer which contains the diff of all the layers in between. +// The existing image(s) is not destroyed. +// If no parent is specified, a new image with the diff of all the specified image's layers merged into a new layer that has no parents. +func (daemon *Daemon) SquashImage(id, parent string) (string, error) { + + var ( + img *image.Image + err error + ) + for _, ds := range daemon.stores { + if img, err = ds.imageStore.Get(image.ID(id)); err == nil { + break + } + } + if err != nil { + return "", err + } + + var parentImg *image.Image + var parentChainID layer.ChainID + if len(parent) != 0 { + parentImg, err = daemon.stores[img.Platform()].imageStore.Get(image.ID(parent)) + if err != nil { + return "", errors.Wrap(err, "error getting specified parent layer") + } + parentChainID = parentImg.RootFS.ChainID() + } else { + rootFS := image.NewRootFS() + parentImg = &image.Image{RootFS: rootFS} + } + + l, err := daemon.stores[img.Platform()].layerStore.Get(img.RootFS.ChainID()) + if err != nil { + return "", errors.Wrap(err, "error getting image layer") + } + defer daemon.stores[img.Platform()].layerStore.Release(l) + + ts, err := l.TarStreamFrom(parentChainID) + if err != nil { + return "", errors.Wrapf(err, "error getting tar stream to parent") + } + defer ts.Close() + + newL, err := daemon.stores[img.Platform()].layerStore.Register(ts, parentChainID, layer.Platform(img.Platform())) + if err != nil { + return "", errors.Wrap(err, "error registering layer") + } + defer daemon.stores[img.Platform()].layerStore.Release(newL) + + var newImage image.Image + newImage = *img + newImage.RootFS = nil + + var rootFS image.RootFS + rootFS = *parentImg.RootFS + rootFS.DiffIDs = append(rootFS.DiffIDs, newL.DiffID()) + newImage.RootFS = &rootFS + + for i, hi := range newImage.History { + if i >= len(parentImg.History) { + hi.EmptyLayer = true + } + newImage.History[i] = hi + } + + now := time.Now() + var historyComment string + if len(parent) > 0 { + historyComment = fmt.Sprintf("merge %s to %s", id, parent) + } else { + historyComment = fmt.Sprintf("create new from %s", id) + } + + newImage.History = append(newImage.History, image.History{ + Created: now, + Comment: historyComment, + }) + newImage.Created = now + + b, err := json.Marshal(&newImage) + if err != nil { + return "", errors.Wrap(err, "error marshalling image config") + } + + newImgID, err := daemon.stores[img.Platform()].imageStore.Create(b) + if err != nil { + return "", errors.Wrap(err, "error creating new image after squash") + } + return string(newImgID), nil +} + +func newImage(image *image.Image, size int64) *types.ImageSummary { + newImage := new(types.ImageSummary) + newImage.ParentID = image.Parent.String() + newImage.ID = image.ID().String() + newImage.Created = image.Created.Unix() + newImage.Size = size + newImage.VirtualSize = size + newImage.SharedSize = -1 + newImage.Containers = -1 + if image.Config != nil { + newImage.Labels = image.Config.Labels + } + return newImage +} diff --git a/vendor/github.com/moby/moby/daemon/import.go b/vendor/github.com/moby/moby/daemon/import.go new file mode 100644 index 000000000..0409cd6bd --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/import.go @@ -0,0 +1,137 @@ +package daemon + +import ( + "encoding/json" + "io" + "net/http" + "net/url" + "runtime" + "strings" + "time" + + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/builder/dockerfile" + "github.com/docker/docker/builder/remotecontext" + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/image" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/progress" + "github.com/docker/docker/pkg/streamformatter" + "github.com/pkg/errors" +) + +// ImportImage imports an image, getting the archived layer data either from +// inConfig (if src is "-"), or from a URI specified in src. Progress output is +// written to outStream. Repository and tag names can optionally be given in +// the repo and tag arguments, respectively. +func (daemon *Daemon) ImportImage(src string, repository, platform string, tag string, msg string, inConfig io.ReadCloser, outStream io.Writer, changes []string) error { + var ( + rc io.ReadCloser + resp *http.Response + newRef reference.Named + ) + + // Default the platform if not supplied. + if platform == "" { + platform = runtime.GOOS + } + + if repository != "" { + var err error + newRef, err = reference.ParseNormalizedNamed(repository) + if err != nil { + return err + } + if _, isCanonical := newRef.(reference.Canonical); isCanonical { + return errors.New("cannot import digest reference") + } + + if tag != "" { + newRef, err = reference.WithTag(newRef, tag) + if err != nil { + return err + } + } + } + + config, err := dockerfile.BuildFromConfig(&container.Config{}, changes) + if err != nil { + return err + } + if src == "-" { + rc = inConfig + } else { + inConfig.Close() + if len(strings.Split(src, "://")) == 1 { + src = "http://" + src + } + u, err := url.Parse(src) + if err != nil { + return err + } + + resp, err = remotecontext.GetWithStatusError(u.String()) + if err != nil { + return err + } + outStream.Write(streamformatter.FormatStatus("", "Downloading from %s", u)) + progressOutput := streamformatter.NewJSONProgressOutput(outStream, true) + rc = progress.NewProgressReader(resp.Body, progressOutput, resp.ContentLength, "", "Importing") + } + + defer rc.Close() + if len(msg) == 0 { + msg = "Imported from " + src + } + + inflatedLayerData, err := archive.DecompressStream(rc) + if err != nil { + return err + } + l, err := daemon.stores[platform].layerStore.Register(inflatedLayerData, "", layer.Platform(platform)) + if err != nil { + return err + } + defer layer.ReleaseAndLog(daemon.stores[platform].layerStore, l) + + created := time.Now().UTC() + imgConfig, err := json.Marshal(&image.Image{ + V1Image: image.V1Image{ + DockerVersion: dockerversion.Version, + Config: config, + Architecture: runtime.GOARCH, + OS: platform, + Created: created, + Comment: msg, + }, + RootFS: &image.RootFS{ + Type: "layers", + DiffIDs: []layer.DiffID{l.DiffID()}, + }, + History: []image.History{{ + Created: created, + Comment: msg, + }}, + }) + if err != nil { + return err + } + + id, err := daemon.stores[platform].imageStore.Create(imgConfig) + if err != nil { + return err + } + + // FIXME: connect with commit code and call refstore directly + if newRef != nil { + if err := daemon.TagImageWithReference(id, platform, newRef); err != nil { + return err + } + } + + daemon.LogImageEvent(id.String(), id.String(), "import") + outStream.Write(streamformatter.FormatStatus("", id.String())) + return nil +} diff --git a/vendor/github.com/moby/moby/daemon/info.go b/vendor/github.com/moby/moby/daemon/info.go new file mode 100644 index 000000000..1de899f79 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/info.go @@ -0,0 +1,190 @@ +package daemon + +import ( + "fmt" + "os" + "runtime" + "strings" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api" + "github.com/docker/docker/api/types" + "github.com/docker/docker/cli/debug" + "github.com/docker/docker/daemon/logger" + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/pkg/fileutils" + "github.com/docker/docker/pkg/parsers/kernel" + "github.com/docker/docker/pkg/parsers/operatingsystem" + "github.com/docker/docker/pkg/platform" + "github.com/docker/docker/pkg/sysinfo" + "github.com/docker/docker/pkg/system" + "github.com/docker/docker/registry" + "github.com/docker/docker/volume/drivers" + "github.com/docker/go-connections/sockets" +) + +// SystemInfo returns information about the host server the daemon is running on. +func (daemon *Daemon) SystemInfo() (*types.Info, error) { + kernelVersion := "" + if kv, err := kernel.GetKernelVersion(); err != nil { + logrus.Warnf("Could not get kernel version: %v", err) + } else { + kernelVersion = kv.String() + } + + operatingSystem := "" + if s, err := operatingsystem.GetOperatingSystem(); err != nil { + logrus.Warnf("Could not get operating system name: %v", err) + } else { + operatingSystem = s + } + + // Don't do containerized check on Windows + if runtime.GOOS != "windows" { + if inContainer, err := operatingsystem.IsContainerized(); err != nil { + logrus.Errorf("Could not determine if daemon is containerized: %v", err) + operatingSystem += " (error determining if containerized)" + } else if inContainer { + operatingSystem += " (containerized)" + } + } + + meminfo, err := system.ReadMemInfo() + if err != nil { + logrus.Errorf("Could not read system memory info: %v", err) + meminfo = &system.MemInfo{} + } + + sysInfo := sysinfo.New(true) + cRunning, cPaused, cStopped := stateCtr.get() + + securityOptions := []string{} + if sysInfo.AppArmor { + securityOptions = append(securityOptions, "name=apparmor") + } + if sysInfo.Seccomp && supportsSeccomp { + profile := daemon.seccompProfilePath + if profile == "" { + profile = "default" + } + securityOptions = append(securityOptions, fmt.Sprintf("name=seccomp,profile=%s", profile)) + } + if selinuxEnabled() { + securityOptions = append(securityOptions, "name=selinux") + } + rootIDs := daemon.idMappings.RootPair() + if rootIDs.UID != 0 || rootIDs.GID != 0 { + securityOptions = append(securityOptions, "name=userns") + } + + imageCount := 0 + drivers := "" + for p, ds := range daemon.stores { + imageCount += len(ds.imageStore.Map()) + drivers += daemon.GraphDriverName(p) + if len(daemon.stores) > 1 { + drivers += fmt.Sprintf(" (%s) ", p) + } + } + + // TODO @jhowardmsft LCOW support. For now, hard-code the platform shown for the driver status + p := runtime.GOOS + if system.LCOWSupported() { + p = "linux" + } + + drivers = strings.TrimSpace(drivers) + v := &types.Info{ + ID: daemon.ID, + Containers: int(cRunning + cPaused + cStopped), + ContainersRunning: int(cRunning), + ContainersPaused: int(cPaused), + ContainersStopped: int(cStopped), + Images: imageCount, + Driver: drivers, + DriverStatus: daemon.stores[p].layerStore.DriverStatus(), + Plugins: daemon.showPluginsInfo(), + IPv4Forwarding: !sysInfo.IPv4ForwardingDisabled, + BridgeNfIptables: !sysInfo.BridgeNFCallIPTablesDisabled, + BridgeNfIP6tables: !sysInfo.BridgeNFCallIP6TablesDisabled, + Debug: debug.IsEnabled(), + NFd: fileutils.GetTotalUsedFds(), + NGoroutines: runtime.NumGoroutine(), + SystemTime: time.Now().Format(time.RFC3339Nano), + LoggingDriver: daemon.defaultLogConfig.Type, + CgroupDriver: daemon.getCgroupDriver(), + NEventsListener: daemon.EventsService.SubscribersCount(), + KernelVersion: kernelVersion, + OperatingSystem: operatingSystem, + IndexServerAddress: registry.IndexServer, + OSType: platform.OSType, + Architecture: platform.Architecture, + RegistryConfig: daemon.RegistryService.ServiceConfig(), + NCPU: sysinfo.NumCPU(), + MemTotal: meminfo.MemTotal, + GenericResources: daemon.genericResources, + DockerRootDir: daemon.configStore.Root, + Labels: daemon.configStore.Labels, + ExperimentalBuild: daemon.configStore.Experimental, + ServerVersion: dockerversion.Version, + ClusterStore: daemon.configStore.ClusterStore, + ClusterAdvertise: daemon.configStore.ClusterAdvertise, + HTTPProxy: sockets.GetProxyEnv("http_proxy"), + HTTPSProxy: sockets.GetProxyEnv("https_proxy"), + NoProxy: sockets.GetProxyEnv("no_proxy"), + LiveRestoreEnabled: daemon.configStore.LiveRestoreEnabled, + SecurityOptions: securityOptions, + Isolation: daemon.defaultIsolation, + } + + // Retrieve platform specific info + daemon.FillPlatformInfo(v, sysInfo) + + hostname := "" + if hn, err := os.Hostname(); err != nil { + logrus.Warnf("Could not get hostname: %v", err) + } else { + hostname = hn + } + v.Name = hostname + + return v, nil +} + +// SystemVersion returns version information about the daemon. +func (daemon *Daemon) SystemVersion() types.Version { + v := types.Version{ + Version: dockerversion.Version, + GitCommit: dockerversion.GitCommit, + MinAPIVersion: api.MinVersion, + GoVersion: runtime.Version(), + Os: runtime.GOOS, + Arch: runtime.GOARCH, + BuildTime: dockerversion.BuildTime, + Experimental: daemon.configStore.Experimental, + } + + kernelVersion := "" + if kv, err := kernel.GetKernelVersion(); err != nil { + logrus.Warnf("Could not get kernel version: %v", err) + } else { + kernelVersion = kv.String() + } + v.KernelVersion = kernelVersion + + return v +} + +func (daemon *Daemon) showPluginsInfo() types.PluginsInfo { + var pluginsInfo types.PluginsInfo + + pluginsInfo.Volume = volumedrivers.GetDriverList() + pluginsInfo.Network = daemon.GetNetworkDriverList() + // The authorization plugins are returned in the order they are + // used as they constitute a request/response modification chain. + pluginsInfo.Authorization = daemon.configStore.AuthorizationPlugins + pluginsInfo.Log = logger.ListDrivers() + + return pluginsInfo +} diff --git a/vendor/github.com/moby/moby/daemon/info_unix.go b/vendor/github.com/moby/moby/daemon/info_unix.go new file mode 100644 index 000000000..e816f8dff --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/info_unix.go @@ -0,0 +1,93 @@ +// +build !windows + +package daemon + +import ( + "context" + "os/exec" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/types" + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/pkg/sysinfo" + "github.com/pkg/errors" +) + +// FillPlatformInfo fills the platform related info. +func (daemon *Daemon) FillPlatformInfo(v *types.Info, sysInfo *sysinfo.SysInfo) { + v.MemoryLimit = sysInfo.MemoryLimit + v.SwapLimit = sysInfo.SwapLimit + v.KernelMemory = sysInfo.KernelMemory + v.OomKillDisable = sysInfo.OomKillDisable + v.CPUCfsPeriod = sysInfo.CPUCfsPeriod + v.CPUCfsQuota = sysInfo.CPUCfsQuota + v.CPUShares = sysInfo.CPUShares + v.CPUSet = sysInfo.Cpuset + v.Runtimes = daemon.configStore.GetAllRuntimes() + v.DefaultRuntime = daemon.configStore.GetDefaultRuntimeName() + v.InitBinary = daemon.configStore.GetInitPath() + + v.ContainerdCommit.Expected = dockerversion.ContainerdCommitID + if sv, err := daemon.containerd.GetServerVersion(context.Background()); err == nil { + v.ContainerdCommit.ID = sv.Revision + } else { + logrus.Warnf("failed to retrieve containerd version: %v", err) + v.ContainerdCommit.ID = "N/A" + } + + v.RuncCommit.Expected = dockerversion.RuncCommitID + defaultRuntimeBinary := daemon.configStore.GetRuntime(daemon.configStore.GetDefaultRuntimeName()).Path + if rv, err := exec.Command(defaultRuntimeBinary, "--version").Output(); err == nil { + parts := strings.Split(strings.TrimSpace(string(rv)), "\n") + if len(parts) == 3 { + parts = strings.Split(parts[1], ": ") + if len(parts) == 2 { + v.RuncCommit.ID = strings.TrimSpace(parts[1]) + } + } + + if v.RuncCommit.ID == "" { + logrus.Warnf("failed to retrieve %s version: unknown output format: %s", defaultRuntimeBinary, string(rv)) + v.RuncCommit.ID = "N/A" + } + } else { + logrus.Warnf("failed to retrieve %s version: %v", defaultRuntimeBinary, err) + v.RuncCommit.ID = "N/A" + } + + defaultInitBinary := daemon.configStore.GetInitPath() + if rv, err := exec.Command(defaultInitBinary, "--version").Output(); err == nil { + ver, err := parseInitVersion(string(rv)) + + if err != nil { + logrus.Warnf("failed to retrieve %s version: %s", defaultInitBinary, err) + } + v.InitCommit = ver + } else { + logrus.Warnf("failed to retrieve %s version: %s", defaultInitBinary, err) + v.InitCommit.ID = "N/A" + } +} + +// parseInitVersion parses a Tini version string, and extracts the version. +func parseInitVersion(v string) (types.Commit, error) { + version := types.Commit{ID: "", Expected: dockerversion.InitCommitID} + parts := strings.Split(strings.TrimSpace(v), " - ") + + if len(parts) >= 2 { + gitParts := strings.Split(parts[1], ".") + if len(gitParts) == 2 && gitParts[0] == "git" { + version.ID = gitParts[1] + version.Expected = dockerversion.InitCommitID[0:len(version.ID)] + } + } + if version.ID == "" && strings.HasPrefix(parts[0], "tini version ") { + version.ID = "v" + strings.TrimPrefix(parts[0], "tini version ") + } + if version.ID == "" { + version.ID = "N/A" + return version, errors.Errorf("unknown output format: %s", v) + } + return version, nil +} diff --git a/vendor/github.com/moby/moby/daemon/info_unix_test.go b/vendor/github.com/moby/moby/daemon/info_unix_test.go new file mode 100644 index 000000000..ef36c40e3 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/info_unix_test.go @@ -0,0 +1,52 @@ +// +build !windows + +package daemon + +import ( + "testing" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/dockerversion" + "github.com/stretchr/testify/assert" +) + +func TestParseInitVersion(t *testing.T) { + tests := []struct { + version string + result types.Commit + invalid bool + }{ + { + version: "tini version 0.13.0 - git.949e6fa", + result: types.Commit{ID: "949e6fa", Expected: dockerversion.InitCommitID[0:7]}, + }, { + version: "tini version 0.13.0\n", + result: types.Commit{ID: "v0.13.0", Expected: dockerversion.InitCommitID}, + }, { + version: "tini version 0.13.2", + result: types.Commit{ID: "v0.13.2", Expected: dockerversion.InitCommitID}, + }, { + version: "tini version0.13.2", + result: types.Commit{ID: "N/A", Expected: dockerversion.InitCommitID}, + invalid: true, + }, { + version: "", + result: types.Commit{ID: "N/A", Expected: dockerversion.InitCommitID}, + invalid: true, + }, { + version: "hello world", + result: types.Commit{ID: "N/A", Expected: dockerversion.InitCommitID}, + invalid: true, + }, + } + + for _, test := range tests { + ver, err := parseInitVersion(string(test.version)) + if test.invalid { + assert.Error(t, err) + } else { + assert.NoError(t, err) + } + assert.Equal(t, test.result, ver) + } +} diff --git a/vendor/github.com/moby/moby/daemon/info_windows.go b/vendor/github.com/moby/moby/daemon/info_windows.go new file mode 100644 index 000000000..c700911eb --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/info_windows.go @@ -0,0 +1,10 @@ +package daemon + +import ( + "github.com/docker/docker/api/types" + "github.com/docker/docker/pkg/sysinfo" +) + +// FillPlatformInfo fills the platform related info. +func (daemon *Daemon) FillPlatformInfo(v *types.Info, sysInfo *sysinfo.SysInfo) { +} diff --git a/vendor/github.com/moby/moby/daemon/initlayer/setup_solaris.go b/vendor/github.com/moby/moby/daemon/initlayer/setup_solaris.go new file mode 100644 index 000000000..66d53f0ee --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/initlayer/setup_solaris.go @@ -0,0 +1,13 @@ +// +build solaris,cgo + +package initlayer + +// Setup populates a directory with mountpoints suitable +// for bind-mounting dockerinit into the container. The mountpoint is simply an +// empty file at /.dockerinit +// +// This extra layer is used by all containers as the top-most ro layer. It protects +// the container from unwanted side-effects on the rw layer. +func Setup(initLayer string, rootUID, rootGID int) error { + return nil +} diff --git a/vendor/github.com/moby/moby/daemon/initlayer/setup_unix.go b/vendor/github.com/moby/moby/daemon/initlayer/setup_unix.go new file mode 100644 index 000000000..e26d3a05f --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/initlayer/setup_unix.go @@ -0,0 +1,69 @@ +// +build linux freebsd + +package initlayer + +import ( + "os" + "path/filepath" + "strings" + + "github.com/docker/docker/pkg/idtools" + "golang.org/x/sys/unix" +) + +// Setup populates a directory with mountpoints suitable +// for bind-mounting things into the container. +// +// This extra layer is used by all containers as the top-most ro layer. It protects +// the container from unwanted side-effects on the rw layer. +func Setup(initLayer string, rootIDs idtools.IDPair) error { + for pth, typ := range map[string]string{ + "/dev/pts": "dir", + "/dev/shm": "dir", + "/proc": "dir", + "/sys": "dir", + "/.dockerenv": "file", + "/etc/resolv.conf": "file", + "/etc/hosts": "file", + "/etc/hostname": "file", + "/dev/console": "file", + "/etc/mtab": "/proc/mounts", + } { + parts := strings.Split(pth, "/") + prev := "/" + for _, p := range parts[1:] { + prev = filepath.Join(prev, p) + unix.Unlink(filepath.Join(initLayer, prev)) + } + + if _, err := os.Stat(filepath.Join(initLayer, pth)); err != nil { + if os.IsNotExist(err) { + if err := idtools.MkdirAllAndChownNew(filepath.Join(initLayer, filepath.Dir(pth)), 0755, rootIDs); err != nil { + return err + } + switch typ { + case "dir": + if err := idtools.MkdirAllAndChownNew(filepath.Join(initLayer, pth), 0755, rootIDs); err != nil { + return err + } + case "file": + f, err := os.OpenFile(filepath.Join(initLayer, pth), os.O_CREATE, 0755) + if err != nil { + return err + } + f.Chown(rootIDs.UID, rootIDs.GID) + f.Close() + default: + if err := os.Symlink(typ, filepath.Join(initLayer, pth)); err != nil { + return err + } + } + } else { + return err + } + } + } + + // Layer is ready to use, if it wasn't before. + return nil +} diff --git a/vendor/github.com/moby/moby/daemon/initlayer/setup_windows.go b/vendor/github.com/moby/moby/daemon/initlayer/setup_windows.go new file mode 100644 index 000000000..2b22f58b5 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/initlayer/setup_windows.go @@ -0,0 +1,17 @@ +// +build windows + +package initlayer + +import ( + "github.com/docker/docker/pkg/idtools" +) + +// Setup populates a directory with mountpoints suitable +// for bind-mounting dockerinit into the container. The mountpoint is simply an +// empty file at /.dockerinit +// +// This extra layer is used by all containers as the top-most ro layer. It protects +// the container from unwanted side-effects on the rw layer. +func Setup(initLayer string, rootIDs idtools.IDPair) error { + return nil +} diff --git a/vendor/github.com/moby/moby/daemon/inspect.go b/vendor/github.com/moby/moby/daemon/inspect.go new file mode 100644 index 000000000..fcdeb81ab --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/inspect.go @@ -0,0 +1,274 @@ +package daemon + +import ( + "fmt" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/backend" + networktypes "github.com/docker/docker/api/types/network" + "github.com/docker/docker/api/types/versions" + "github.com/docker/docker/api/types/versions/v1p20" + "github.com/docker/docker/container" + "github.com/docker/docker/daemon/network" + "github.com/docker/go-connections/nat" +) + +// ContainerInspect returns low-level information about a +// container. Returns an error if the container cannot be found, or if +// there is an error getting the data. +func (daemon *Daemon) ContainerInspect(name string, size bool, version string) (interface{}, error) { + switch { + case versions.LessThan(version, "1.20"): + return daemon.containerInspectPre120(name) + case versions.Equal(version, "1.20"): + return daemon.containerInspect120(name) + } + return daemon.ContainerInspectCurrent(name, size) +} + +// ContainerInspectCurrent returns low-level information about a +// container in a most recent api version. +func (daemon *Daemon) ContainerInspectCurrent(name string, size bool) (*types.ContainerJSON, error) { + container, err := daemon.GetContainer(name) + if err != nil { + return nil, err + } + + container.Lock() + + base, err := daemon.getInspectData(container) + if err != nil { + container.Unlock() + return nil, err + } + + apiNetworks := make(map[string]*networktypes.EndpointSettings) + for name, epConf := range container.NetworkSettings.Networks { + if epConf.EndpointSettings != nil { + // We must make a copy of this pointer object otherwise it can race with other operations + apiNetworks[name] = epConf.EndpointSettings.Copy() + } + } + + mountPoints := container.GetMountPoints() + networkSettings := &types.NetworkSettings{ + NetworkSettingsBase: types.NetworkSettingsBase{ + Bridge: container.NetworkSettings.Bridge, + SandboxID: container.NetworkSettings.SandboxID, + HairpinMode: container.NetworkSettings.HairpinMode, + LinkLocalIPv6Address: container.NetworkSettings.LinkLocalIPv6Address, + LinkLocalIPv6PrefixLen: container.NetworkSettings.LinkLocalIPv6PrefixLen, + SandboxKey: container.NetworkSettings.SandboxKey, + SecondaryIPAddresses: container.NetworkSettings.SecondaryIPAddresses, + SecondaryIPv6Addresses: container.NetworkSettings.SecondaryIPv6Addresses, + }, + DefaultNetworkSettings: daemon.getDefaultNetworkSettings(container.NetworkSettings.Networks), + Networks: apiNetworks, + } + + ports := make(nat.PortMap, len(container.NetworkSettings.Ports)) + for k, pm := range container.NetworkSettings.Ports { + ports[k] = pm + } + networkSettings.NetworkSettingsBase.Ports = ports + + container.Unlock() + + if size { + sizeRw, sizeRootFs := daemon.getSize(base.ID) + base.SizeRw = &sizeRw + base.SizeRootFs = &sizeRootFs + } + + return &types.ContainerJSON{ + ContainerJSONBase: base, + Mounts: mountPoints, + Config: container.Config, + NetworkSettings: networkSettings, + }, nil +} + +// containerInspect120 serializes the master version of a container into a json type. +func (daemon *Daemon) containerInspect120(name string) (*v1p20.ContainerJSON, error) { + container, err := daemon.GetContainer(name) + if err != nil { + return nil, err + } + + container.Lock() + defer container.Unlock() + + base, err := daemon.getInspectData(container) + if err != nil { + return nil, err + } + + mountPoints := container.GetMountPoints() + config := &v1p20.ContainerConfig{ + Config: container.Config, + MacAddress: container.Config.MacAddress, + NetworkDisabled: container.Config.NetworkDisabled, + ExposedPorts: container.Config.ExposedPorts, + VolumeDriver: container.HostConfig.VolumeDriver, + } + networkSettings := daemon.getBackwardsCompatibleNetworkSettings(container.NetworkSettings) + + return &v1p20.ContainerJSON{ + ContainerJSONBase: base, + Mounts: mountPoints, + Config: config, + NetworkSettings: networkSettings, + }, nil +} + +func (daemon *Daemon) getInspectData(container *container.Container) (*types.ContainerJSONBase, error) { + // make a copy to play with + hostConfig := *container.HostConfig + + children := daemon.children(container) + hostConfig.Links = nil // do not expose the internal structure + for linkAlias, child := range children { + hostConfig.Links = append(hostConfig.Links, fmt.Sprintf("%s:%s", child.Name, linkAlias)) + } + + // We merge the Ulimits from hostConfig with daemon default + daemon.mergeUlimits(&hostConfig) + + var containerHealth *types.Health + if container.State.Health != nil { + containerHealth = &types.Health{ + Status: container.State.Health.Status, + FailingStreak: container.State.Health.FailingStreak, + Log: append([]*types.HealthcheckResult{}, container.State.Health.Log...), + } + } + + containerState := &types.ContainerState{ + Status: container.State.StateString(), + Running: container.State.Running, + Paused: container.State.Paused, + Restarting: container.State.Restarting, + OOMKilled: container.State.OOMKilled, + Dead: container.State.Dead, + Pid: container.State.Pid, + ExitCode: container.State.ExitCode(), + Error: container.State.ErrorMsg, + StartedAt: container.State.StartedAt.Format(time.RFC3339Nano), + FinishedAt: container.State.FinishedAt.Format(time.RFC3339Nano), + Health: containerHealth, + } + + contJSONBase := &types.ContainerJSONBase{ + ID: container.ID, + Created: container.Created.Format(time.RFC3339Nano), + Path: container.Path, + Args: container.Args, + State: containerState, + Image: container.ImageID.String(), + LogPath: container.LogPath, + Name: container.Name, + RestartCount: container.RestartCount, + Driver: container.Driver, + Platform: container.Platform, + MountLabel: container.MountLabel, + ProcessLabel: container.ProcessLabel, + ExecIDs: container.GetExecIDs(), + HostConfig: &hostConfig, + } + + // Now set any platform-specific fields + contJSONBase = setPlatformSpecificContainerFields(container, contJSONBase) + + contJSONBase.GraphDriver.Name = container.Driver + + graphDriverData, err := container.RWLayer.Metadata() + // If container is marked as Dead, the container's graphdriver metadata + // could have been removed, it will cause error if we try to get the metadata, + // we can ignore the error if the container is dead. + if err != nil && !container.Dead { + return nil, err + } + contJSONBase.GraphDriver.Data = graphDriverData + + return contJSONBase, nil +} + +// ContainerExecInspect returns low-level information about the exec +// command. An error is returned if the exec cannot be found. +func (daemon *Daemon) ContainerExecInspect(id string) (*backend.ExecInspect, error) { + e := daemon.execCommands.Get(id) + if e == nil { + return nil, errExecNotFound(id) + } + + if container := daemon.containers.Get(e.ContainerID); container == nil { + return nil, errExecNotFound(id) + } + + pc := inspectExecProcessConfig(e) + + return &backend.ExecInspect{ + ID: e.ID, + Running: e.Running, + ExitCode: e.ExitCode, + ProcessConfig: pc, + OpenStdin: e.OpenStdin, + OpenStdout: e.OpenStdout, + OpenStderr: e.OpenStderr, + CanRemove: e.CanRemove, + ContainerID: e.ContainerID, + DetachKeys: e.DetachKeys, + Pid: e.Pid, + }, nil +} + +// VolumeInspect looks up a volume by name. An error is returned if +// the volume cannot be found. +func (daemon *Daemon) VolumeInspect(name string) (*types.Volume, error) { + v, err := daemon.volumes.Get(name) + if err != nil { + return nil, err + } + apiV := volumeToAPIType(v) + apiV.Mountpoint = v.Path() + apiV.Status = v.Status() + return apiV, nil +} + +func (daemon *Daemon) getBackwardsCompatibleNetworkSettings(settings *network.Settings) *v1p20.NetworkSettings { + result := &v1p20.NetworkSettings{ + NetworkSettingsBase: types.NetworkSettingsBase{ + Bridge: settings.Bridge, + SandboxID: settings.SandboxID, + HairpinMode: settings.HairpinMode, + LinkLocalIPv6Address: settings.LinkLocalIPv6Address, + LinkLocalIPv6PrefixLen: settings.LinkLocalIPv6PrefixLen, + Ports: settings.Ports, + SandboxKey: settings.SandboxKey, + SecondaryIPAddresses: settings.SecondaryIPAddresses, + SecondaryIPv6Addresses: settings.SecondaryIPv6Addresses, + }, + DefaultNetworkSettings: daemon.getDefaultNetworkSettings(settings.Networks), + } + + return result +} + +// getDefaultNetworkSettings creates the deprecated structure that holds the information +// about the bridge network for a container. +func (daemon *Daemon) getDefaultNetworkSettings(networks map[string]*network.EndpointSettings) types.DefaultNetworkSettings { + var settings types.DefaultNetworkSettings + + if defaultNetwork, ok := networks["bridge"]; ok && defaultNetwork.EndpointSettings != nil { + settings.EndpointID = defaultNetwork.EndpointID + settings.Gateway = defaultNetwork.Gateway + settings.GlobalIPv6Address = defaultNetwork.GlobalIPv6Address + settings.GlobalIPv6PrefixLen = defaultNetwork.GlobalIPv6PrefixLen + settings.IPAddress = defaultNetwork.IPAddress + settings.IPPrefixLen = defaultNetwork.IPPrefixLen + settings.IPv6Gateway = defaultNetwork.IPv6Gateway + settings.MacAddress = defaultNetwork.MacAddress + } + return settings +} diff --git a/vendor/github.com/moby/moby/daemon/inspect_solaris.go b/vendor/github.com/moby/moby/daemon/inspect_solaris.go new file mode 100644 index 000000000..0b275c141 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/inspect_solaris.go @@ -0,0 +1,27 @@ +package daemon + +import ( + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/api/types/versions/v1p19" + "github.com/docker/docker/container" + "github.com/docker/docker/daemon/exec" +) + +// This sets platform-specific fields +func setPlatformSpecificContainerFields(container *container.Container, contJSONBase *types.ContainerJSONBase) *types.ContainerJSONBase { + return contJSONBase +} + +// containerInspectPre120 get containers for pre 1.20 APIs. +func (daemon *Daemon) containerInspectPre120(name string) (*v1p19.ContainerJSON, error) { + return &v1p19.ContainerJSON{}, nil +} + +func inspectExecProcessConfig(e *exec.Config) *backend.ExecProcessConfig { + return &backend.ExecProcessConfig{ + Tty: e.Tty, + Entrypoint: e.Entrypoint, + Arguments: e.Args, + } +} diff --git a/vendor/github.com/moby/moby/daemon/inspect_unix.go b/vendor/github.com/moby/moby/daemon/inspect_unix.go new file mode 100644 index 000000000..bd28481e6 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/inspect_unix.go @@ -0,0 +1,75 @@ +// +build !windows,!solaris + +package daemon + +import ( + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/api/types/versions/v1p19" + "github.com/docker/docker/container" + "github.com/docker/docker/daemon/exec" +) + +// This sets platform-specific fields +func setPlatformSpecificContainerFields(container *container.Container, contJSONBase *types.ContainerJSONBase) *types.ContainerJSONBase { + contJSONBase.AppArmorProfile = container.AppArmorProfile + contJSONBase.ResolvConfPath = container.ResolvConfPath + contJSONBase.HostnamePath = container.HostnamePath + contJSONBase.HostsPath = container.HostsPath + + return contJSONBase +} + +// containerInspectPre120 gets containers for pre 1.20 APIs. +func (daemon *Daemon) containerInspectPre120(name string) (*v1p19.ContainerJSON, error) { + container, err := daemon.GetContainer(name) + if err != nil { + return nil, err + } + + container.Lock() + defer container.Unlock() + + base, err := daemon.getInspectData(container) + if err != nil { + return nil, err + } + + volumes := make(map[string]string) + volumesRW := make(map[string]bool) + for _, m := range container.MountPoints { + volumes[m.Destination] = m.Path() + volumesRW[m.Destination] = m.RW + } + + config := &v1p19.ContainerConfig{ + Config: container.Config, + MacAddress: container.Config.MacAddress, + NetworkDisabled: container.Config.NetworkDisabled, + ExposedPorts: container.Config.ExposedPorts, + VolumeDriver: container.HostConfig.VolumeDriver, + Memory: container.HostConfig.Memory, + MemorySwap: container.HostConfig.MemorySwap, + CPUShares: container.HostConfig.CPUShares, + CPUSet: container.HostConfig.CpusetCpus, + } + networkSettings := daemon.getBackwardsCompatibleNetworkSettings(container.NetworkSettings) + + return &v1p19.ContainerJSON{ + ContainerJSONBase: base, + Volumes: volumes, + VolumesRW: volumesRW, + Config: config, + NetworkSettings: networkSettings, + }, nil +} + +func inspectExecProcessConfig(e *exec.Config) *backend.ExecProcessConfig { + return &backend.ExecProcessConfig{ + Tty: e.Tty, + Entrypoint: e.Entrypoint, + Arguments: e.Args, + Privileged: &e.Privileged, + User: e.User, + } +} diff --git a/vendor/github.com/moby/moby/daemon/inspect_windows.go b/vendor/github.com/moby/moby/daemon/inspect_windows.go new file mode 100644 index 000000000..5b12902db --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/inspect_windows.go @@ -0,0 +1,26 @@ +package daemon + +import ( + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/container" + "github.com/docker/docker/daemon/exec" +) + +// This sets platform-specific fields +func setPlatformSpecificContainerFields(container *container.Container, contJSONBase *types.ContainerJSONBase) *types.ContainerJSONBase { + return contJSONBase +} + +// containerInspectPre120 get containers for pre 1.20 APIs. +func (daemon *Daemon) containerInspectPre120(name string) (*types.ContainerJSON, error) { + return daemon.ContainerInspectCurrent(name, false) +} + +func inspectExecProcessConfig(e *exec.Config) *backend.ExecProcessConfig { + return &backend.ExecProcessConfig{ + Tty: e.Tty, + Entrypoint: e.Entrypoint, + Arguments: e.Args, + } +} diff --git a/vendor/github.com/moby/moby/daemon/keys.go b/vendor/github.com/moby/moby/daemon/keys.go new file mode 100644 index 000000000..055d488a5 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/keys.go @@ -0,0 +1,59 @@ +// +build linux + +package daemon + +import ( + "fmt" + "io/ioutil" + "os" + "strconv" + "strings" +) + +const ( + rootKeyFile = "/proc/sys/kernel/keys/root_maxkeys" + rootBytesFile = "/proc/sys/kernel/keys/root_maxbytes" + rootKeyLimit = 1000000 + // it is standard configuration to allocate 25 bytes per key + rootKeyByteMultiplier = 25 +) + +// ModifyRootKeyLimit checks to see if the root key limit is set to +// at least 1000000 and changes it to that limit along with the maxbytes +// allocated to the keys at a 25 to 1 multiplier. +func ModifyRootKeyLimit() error { + value, err := readRootKeyLimit(rootKeyFile) + if err != nil { + return err + } + if value < rootKeyLimit { + return setRootKeyLimit(rootKeyLimit) + } + return nil +} + +func setRootKeyLimit(limit int) error { + keys, err := os.OpenFile(rootKeyFile, os.O_WRONLY, 0) + if err != nil { + return err + } + defer keys.Close() + if _, err := fmt.Fprintf(keys, "%d", limit); err != nil { + return err + } + bytes, err := os.OpenFile(rootBytesFile, os.O_WRONLY, 0) + if err != nil { + return err + } + defer bytes.Close() + _, err = fmt.Fprintf(bytes, "%d", limit*rootKeyByteMultiplier) + return err +} + +func readRootKeyLimit(path string) (int, error) { + data, err := ioutil.ReadFile(path) + if err != nil { + return -1, err + } + return strconv.Atoi(strings.Trim(string(data), "\n")) +} diff --git a/vendor/github.com/moby/moby/daemon/keys_unsupported.go b/vendor/github.com/moby/moby/daemon/keys_unsupported.go new file mode 100644 index 000000000..e49baf945 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/keys_unsupported.go @@ -0,0 +1,8 @@ +// +build !linux + +package daemon + +// ModifyRootKeyLimit is a noop on unsupported platforms. +func ModifyRootKeyLimit() error { + return nil +} diff --git a/vendor/github.com/moby/moby/daemon/kill.go b/vendor/github.com/moby/moby/daemon/kill.go new file mode 100644 index 000000000..b118160f8 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/kill.go @@ -0,0 +1,177 @@ +package daemon + +import ( + "context" + "fmt" + "runtime" + "strings" + "syscall" + "time" + + "github.com/Sirupsen/logrus" + containerpkg "github.com/docker/docker/container" + "github.com/docker/docker/pkg/signal" +) + +type errNoSuchProcess struct { + pid int + signal int +} + +func (e errNoSuchProcess) Error() string { + return fmt.Sprintf("Cannot kill process (pid=%d) with signal %d: no such process.", e.pid, e.signal) +} + +// isErrNoSuchProcess returns true if the error +// is an instance of errNoSuchProcess. +func isErrNoSuchProcess(err error) bool { + _, ok := err.(errNoSuchProcess) + return ok +} + +// ContainerKill sends signal to the container +// If no signal is given (sig 0), then Kill with SIGKILL and wait +// for the container to exit. +// If a signal is given, then just send it to the container and return. +func (daemon *Daemon) ContainerKill(name string, sig uint64) error { + container, err := daemon.GetContainer(name) + if err != nil { + return err + } + + if sig != 0 && !signal.ValidSignalForPlatform(syscall.Signal(sig)) { + return fmt.Errorf("The %s daemon does not support signal %d", runtime.GOOS, sig) + } + + // If no signal is passed, or SIGKILL, perform regular Kill (SIGKILL + wait()) + if sig == 0 || syscall.Signal(sig) == syscall.SIGKILL { + return daemon.Kill(container) + } + return daemon.killWithSignal(container, int(sig)) +} + +// killWithSignal sends the container the given signal. This wrapper for the +// host specific kill command prepares the container before attempting +// to send the signal. An error is returned if the container is paused +// or not running, or if there is a problem returned from the +// underlying kill command. +func (daemon *Daemon) killWithSignal(container *containerpkg.Container, sig int) error { + logrus.Debugf("Sending kill signal %d to container %s", sig, container.ID) + container.Lock() + defer container.Unlock() + + if !container.Running { + return errNotRunning{container.ID} + } + + var unpause bool + if container.Config.StopSignal != "" && syscall.Signal(sig) != syscall.SIGKILL { + containerStopSignal, err := signal.ParseSignal(container.Config.StopSignal) + if err != nil { + return err + } + if containerStopSignal == syscall.Signal(sig) { + container.ExitOnNext() + unpause = container.Paused + } + } else { + container.ExitOnNext() + unpause = container.Paused + } + + if !daemon.IsShuttingDown() { + container.HasBeenManuallyStopped = true + } + + // if the container is currently restarting we do not need to send the signal + // to the process. Telling the monitor that it should exit on its next event + // loop is enough + if container.Restarting { + return nil + } + + if err := daemon.kill(container, sig); err != nil { + err = fmt.Errorf("Cannot kill container %s: %s", container.ID, err) + // if container or process not exists, ignore the error + if strings.Contains(err.Error(), "container not found") || + strings.Contains(err.Error(), "no such process") { + logrus.Warnf("container kill failed because of 'container not found' or 'no such process': %s", err.Error()) + unpause = false + } else { + return err + } + } + + if unpause { + // above kill signal will be sent once resume is finished + if err := daemon.containerd.Resume(container.ID); err != nil { + logrus.Warn("Cannot unpause container %s: %s", container.ID, err) + } + } + + attributes := map[string]string{ + "signal": fmt.Sprintf("%d", sig), + } + daemon.LogContainerEventWithAttributes(container, "kill", attributes) + return nil +} + +// Kill forcefully terminates a container. +func (daemon *Daemon) Kill(container *containerpkg.Container) error { + if !container.IsRunning() { + return errNotRunning{container.ID} + } + + // 1. Send SIGKILL + if err := daemon.killPossiblyDeadProcess(container, int(syscall.SIGKILL)); err != nil { + // While normally we might "return err" here we're not going to + // because if we can't stop the container by this point then + // it's probably because it's already stopped. Meaning, between + // the time of the IsRunning() call above and now it stopped. + // Also, since the err return will be environment specific we can't + // look for any particular (common) error that would indicate + // that the process is already dead vs something else going wrong. + // So, instead we'll give it up to 2 more seconds to complete and if + // by that time the container is still running, then the error + // we got is probably valid and so we return it to the caller. + if isErrNoSuchProcess(err) { + return nil + } + + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) + defer cancel() + + if status := <-container.Wait(ctx, containerpkg.WaitConditionNotRunning); status.Err() != nil { + return err + } + } + + // 2. Wait for the process to die, in last resort, try to kill the process directly + if err := killProcessDirectly(container); err != nil { + if isErrNoSuchProcess(err) { + return nil + } + return err + } + + // Wait for exit with no timeout. + // Ignore returned status. + _ = <-container.Wait(context.Background(), containerpkg.WaitConditionNotRunning) + + return nil +} + +// killPossibleDeadProcess is a wrapper around killSig() suppressing "no such process" error. +func (daemon *Daemon) killPossiblyDeadProcess(container *containerpkg.Container, sig int) error { + err := daemon.killWithSignal(container, sig) + if err == syscall.ESRCH { + e := errNoSuchProcess{container.GetPID(), sig} + logrus.Debug(e) + return e + } + return err +} + +func (daemon *Daemon) kill(c *containerpkg.Container, sig int) error { + return daemon.containerd.Signal(c.ID, sig) +} diff --git a/vendor/github.com/moby/moby/daemon/links.go b/vendor/github.com/moby/moby/daemon/links.go new file mode 100644 index 000000000..7f691d4f1 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/links.go @@ -0,0 +1,87 @@ +package daemon + +import ( + "sync" + + "github.com/docker/docker/container" +) + +// linkIndex stores link relationships between containers, including their specified alias +// The alias is the name the parent uses to reference the child +type linkIndex struct { + // idx maps a parent->alias->child relationship + idx map[*container.Container]map[string]*container.Container + // childIdx maps child->parent->aliases + childIdx map[*container.Container]map[*container.Container]map[string]struct{} + mu sync.Mutex +} + +func newLinkIndex() *linkIndex { + return &linkIndex{ + idx: make(map[*container.Container]map[string]*container.Container), + childIdx: make(map[*container.Container]map[*container.Container]map[string]struct{}), + } +} + +// link adds indexes for the passed in parent/child/alias relationships +func (l *linkIndex) link(parent, child *container.Container, alias string) { + l.mu.Lock() + + if l.idx[parent] == nil { + l.idx[parent] = make(map[string]*container.Container) + } + l.idx[parent][alias] = child + if l.childIdx[child] == nil { + l.childIdx[child] = make(map[*container.Container]map[string]struct{}) + } + if l.childIdx[child][parent] == nil { + l.childIdx[child][parent] = make(map[string]struct{}) + } + l.childIdx[child][parent][alias] = struct{}{} + + l.mu.Unlock() +} + +// unlink removes the requested alias for the given parent/child +func (l *linkIndex) unlink(alias string, child, parent *container.Container) { + l.mu.Lock() + delete(l.idx[parent], alias) + delete(l.childIdx[child], parent) + l.mu.Unlock() +} + +// children maps all the aliases-> children for the passed in parent +// aliases here are the aliases the parent uses to refer to the child +func (l *linkIndex) children(parent *container.Container) map[string]*container.Container { + l.mu.Lock() + children := l.idx[parent] + l.mu.Unlock() + return children +} + +// parents maps all the aliases->parent for the passed in child +// aliases here are the aliases the parents use to refer to the child +func (l *linkIndex) parents(child *container.Container) map[string]*container.Container { + l.mu.Lock() + + parents := make(map[string]*container.Container) + for parent, aliases := range l.childIdx[child] { + for alias := range aliases { + parents[alias] = parent + } + } + + l.mu.Unlock() + return parents +} + +// delete deletes all link relationships referencing this container +func (l *linkIndex) delete(container *container.Container) { + l.mu.Lock() + for _, child := range l.idx[container] { + delete(l.childIdx[child], container) + } + delete(l.idx, container) + delete(l.childIdx, container) + l.mu.Unlock() +} diff --git a/vendor/github.com/moby/moby/daemon/links/links.go b/vendor/github.com/moby/moby/daemon/links/links.go new file mode 100644 index 000000000..af15de046 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/links/links.go @@ -0,0 +1,141 @@ +package links + +import ( + "fmt" + "path" + "strings" + + "github.com/docker/go-connections/nat" +) + +// Link struct holds informations about parent/child linked container +type Link struct { + // Parent container IP address + ParentIP string + // Child container IP address + ChildIP string + // Link name + Name string + // Child environments variables + ChildEnvironment []string + // Child exposed ports + Ports []nat.Port +} + +// NewLink initializes a new Link struct with the provided options. +func NewLink(parentIP, childIP, name string, env []string, exposedPorts map[nat.Port]struct{}) *Link { + var ( + i int + ports = make([]nat.Port, len(exposedPorts)) + ) + + for p := range exposedPorts { + ports[i] = p + i++ + } + + return &Link{ + Name: name, + ChildIP: childIP, + ParentIP: parentIP, + ChildEnvironment: env, + Ports: ports, + } +} + +// ToEnv creates a string's slice containing child container informations in +// the form of environment variables which will be later exported on container +// startup. +func (l *Link) ToEnv() []string { + env := []string{} + + _, n := path.Split(l.Name) + alias := strings.Replace(strings.ToUpper(n), "-", "_", -1) + + if p := l.getDefaultPort(); p != nil { + env = append(env, fmt.Sprintf("%s_PORT=%s://%s:%s", alias, p.Proto(), l.ChildIP, p.Port())) + } + + //sort the ports so that we can bulk the continuous ports together + nat.Sort(l.Ports, func(ip, jp nat.Port) bool { + // If the two ports have the same number, tcp takes priority + // Sort in desc order + return ip.Int() < jp.Int() || (ip.Int() == jp.Int() && strings.ToLower(ip.Proto()) == "tcp") + }) + + for i := 0; i < len(l.Ports); { + p := l.Ports[i] + j := nextContiguous(l.Ports, p.Int(), i) + if j > i+1 { + env = append(env, fmt.Sprintf("%s_PORT_%s_%s_START=%s://%s:%s", alias, p.Port(), strings.ToUpper(p.Proto()), p.Proto(), l.ChildIP, p.Port())) + env = append(env, fmt.Sprintf("%s_PORT_%s_%s_ADDR=%s", alias, p.Port(), strings.ToUpper(p.Proto()), l.ChildIP)) + env = append(env, fmt.Sprintf("%s_PORT_%s_%s_PROTO=%s", alias, p.Port(), strings.ToUpper(p.Proto()), p.Proto())) + env = append(env, fmt.Sprintf("%s_PORT_%s_%s_PORT_START=%s", alias, p.Port(), strings.ToUpper(p.Proto()), p.Port())) + + q := l.Ports[j] + env = append(env, fmt.Sprintf("%s_PORT_%s_%s_END=%s://%s:%s", alias, p.Port(), strings.ToUpper(q.Proto()), q.Proto(), l.ChildIP, q.Port())) + env = append(env, fmt.Sprintf("%s_PORT_%s_%s_PORT_END=%s", alias, p.Port(), strings.ToUpper(q.Proto()), q.Port())) + + i = j + 1 + continue + } else { + i++ + } + } + for _, p := range l.Ports { + env = append(env, fmt.Sprintf("%s_PORT_%s_%s=%s://%s:%s", alias, p.Port(), strings.ToUpper(p.Proto()), p.Proto(), l.ChildIP, p.Port())) + env = append(env, fmt.Sprintf("%s_PORT_%s_%s_ADDR=%s", alias, p.Port(), strings.ToUpper(p.Proto()), l.ChildIP)) + env = append(env, fmt.Sprintf("%s_PORT_%s_%s_PORT=%s", alias, p.Port(), strings.ToUpper(p.Proto()), p.Port())) + env = append(env, fmt.Sprintf("%s_PORT_%s_%s_PROTO=%s", alias, p.Port(), strings.ToUpper(p.Proto()), p.Proto())) + } + + // Load the linked container's name into the environment + env = append(env, fmt.Sprintf("%s_NAME=%s", alias, l.Name)) + + if l.ChildEnvironment != nil { + for _, v := range l.ChildEnvironment { + parts := strings.SplitN(v, "=", 2) + if len(parts) < 2 { + continue + } + // Ignore a few variables that are added during docker build (and not really relevant to linked containers) + if parts[0] == "HOME" || parts[0] == "PATH" { + continue + } + env = append(env, fmt.Sprintf("%s_ENV_%s=%s", alias, parts[0], parts[1])) + } + } + return env +} + +func nextContiguous(ports []nat.Port, value int, index int) int { + if index+1 == len(ports) { + return index + } + for i := index + 1; i < len(ports); i++ { + if ports[i].Int() > value+1 { + return i - 1 + } + + value++ + } + return len(ports) - 1 +} + +// Default port rules +func (l *Link) getDefaultPort() *nat.Port { + var p nat.Port + i := len(l.Ports) + + if i == 0 { + return nil + } else if i > 1 { + nat.Sort(l.Ports, func(ip, jp nat.Port) bool { + // If the two ports have the same number, tcp takes priority + // Sort in desc order + return ip.Int() < jp.Int() || (ip.Int() == jp.Int() && strings.ToLower(ip.Proto()) == "tcp") + }) + } + p = l.Ports[0] + return &p +} diff --git a/vendor/github.com/moby/moby/daemon/links/links_test.go b/vendor/github.com/moby/moby/daemon/links/links_test.go new file mode 100644 index 000000000..b852c4443 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/links/links_test.go @@ -0,0 +1,213 @@ +package links + +import ( + "fmt" + "strings" + "testing" + + "github.com/docker/go-connections/nat" +) + +// Just to make life easier +func newPortNoError(proto, port string) nat.Port { + p, _ := nat.NewPort(proto, port) + return p +} + +func TestLinkNaming(t *testing.T) { + ports := make(nat.PortSet) + ports[newPortNoError("tcp", "6379")] = struct{}{} + + link := NewLink("172.0.17.3", "172.0.17.2", "/db/docker-1", nil, ports) + + rawEnv := link.ToEnv() + env := make(map[string]string, len(rawEnv)) + for _, e := range rawEnv { + parts := strings.Split(e, "=") + if len(parts) != 2 { + t.FailNow() + } + env[parts[0]] = parts[1] + } + + value, ok := env["DOCKER_1_PORT"] + + if !ok { + t.Fatal("DOCKER_1_PORT not found in env") + } + + if value != "tcp://172.0.17.2:6379" { + t.Fatalf("Expected 172.0.17.2:6379, got %s", env["DOCKER_1_PORT"]) + } +} + +func TestLinkNew(t *testing.T) { + ports := make(nat.PortSet) + ports[newPortNoError("tcp", "6379")] = struct{}{} + + link := NewLink("172.0.17.3", "172.0.17.2", "/db/docker", nil, ports) + + if link.Name != "/db/docker" { + t.Fail() + } + if link.ParentIP != "172.0.17.3" { + t.Fail() + } + if link.ChildIP != "172.0.17.2" { + t.Fail() + } + for _, p := range link.Ports { + if p != newPortNoError("tcp", "6379") { + t.Fail() + } + } +} + +func TestLinkEnv(t *testing.T) { + ports := make(nat.PortSet) + ports[newPortNoError("tcp", "6379")] = struct{}{} + + link := NewLink("172.0.17.3", "172.0.17.2", "/db/docker", []string{"PASSWORD=gordon"}, ports) + + rawEnv := link.ToEnv() + env := make(map[string]string, len(rawEnv)) + for _, e := range rawEnv { + parts := strings.Split(e, "=") + if len(parts) != 2 { + t.FailNow() + } + env[parts[0]] = parts[1] + } + if env["DOCKER_PORT"] != "tcp://172.0.17.2:6379" { + t.Fatalf("Expected 172.0.17.2:6379, got %s", env["DOCKER_PORT"]) + } + if env["DOCKER_PORT_6379_TCP"] != "tcp://172.0.17.2:6379" { + t.Fatalf("Expected tcp://172.0.17.2:6379, got %s", env["DOCKER_PORT_6379_TCP"]) + } + if env["DOCKER_PORT_6379_TCP_PROTO"] != "tcp" { + t.Fatalf("Expected tcp, got %s", env["DOCKER_PORT_6379_TCP_PROTO"]) + } + if env["DOCKER_PORT_6379_TCP_ADDR"] != "172.0.17.2" { + t.Fatalf("Expected 172.0.17.2, got %s", env["DOCKER_PORT_6379_TCP_ADDR"]) + } + if env["DOCKER_PORT_6379_TCP_PORT"] != "6379" { + t.Fatalf("Expected 6379, got %s", env["DOCKER_PORT_6379_TCP_PORT"]) + } + if env["DOCKER_NAME"] != "/db/docker" { + t.Fatalf("Expected /db/docker, got %s", env["DOCKER_NAME"]) + } + if env["DOCKER_ENV_PASSWORD"] != "gordon" { + t.Fatalf("Expected gordon, got %s", env["DOCKER_ENV_PASSWORD"]) + } +} + +func TestLinkMultipleEnv(t *testing.T) { + ports := make(nat.PortSet) + ports[newPortNoError("tcp", "6379")] = struct{}{} + ports[newPortNoError("tcp", "6380")] = struct{}{} + ports[newPortNoError("tcp", "6381")] = struct{}{} + + link := NewLink("172.0.17.3", "172.0.17.2", "/db/docker", []string{"PASSWORD=gordon"}, ports) + + rawEnv := link.ToEnv() + env := make(map[string]string, len(rawEnv)) + for _, e := range rawEnv { + parts := strings.Split(e, "=") + if len(parts) != 2 { + t.FailNow() + } + env[parts[0]] = parts[1] + } + if env["DOCKER_PORT"] != "tcp://172.0.17.2:6379" { + t.Fatalf("Expected 172.0.17.2:6379, got %s", env["DOCKER_PORT"]) + } + if env["DOCKER_PORT_6379_TCP_START"] != "tcp://172.0.17.2:6379" { + t.Fatalf("Expected tcp://172.0.17.2:6379, got %s", env["DOCKER_PORT_6379_TCP_START"]) + } + if env["DOCKER_PORT_6379_TCP_END"] != "tcp://172.0.17.2:6381" { + t.Fatalf("Expected tcp://172.0.17.2:6381, got %s", env["DOCKER_PORT_6379_TCP_END"]) + } + if env["DOCKER_PORT_6379_TCP_PROTO"] != "tcp" { + t.Fatalf("Expected tcp, got %s", env["DOCKER_PORT_6379_TCP_PROTO"]) + } + if env["DOCKER_PORT_6379_TCP_ADDR"] != "172.0.17.2" { + t.Fatalf("Expected 172.0.17.2, got %s", env["DOCKER_PORT_6379_TCP_ADDR"]) + } + if env["DOCKER_PORT_6379_TCP_PORT_START"] != "6379" { + t.Fatalf("Expected 6379, got %s", env["DOCKER_PORT_6379_TCP_PORT_START"]) + } + if env["DOCKER_PORT_6379_TCP_PORT_END"] != "6381" { + t.Fatalf("Expected 6381, got %s", env["DOCKER_PORT_6379_TCP_PORT_END"]) + } + if env["DOCKER_NAME"] != "/db/docker" { + t.Fatalf("Expected /db/docker, got %s", env["DOCKER_NAME"]) + } + if env["DOCKER_ENV_PASSWORD"] != "gordon" { + t.Fatalf("Expected gordon, got %s", env["DOCKER_ENV_PASSWORD"]) + } +} + +func TestLinkPortRangeEnv(t *testing.T) { + ports := make(nat.PortSet) + ports[newPortNoError("tcp", "6379")] = struct{}{} + ports[newPortNoError("tcp", "6380")] = struct{}{} + ports[newPortNoError("tcp", "6381")] = struct{}{} + + link := NewLink("172.0.17.3", "172.0.17.2", "/db/docker", []string{"PASSWORD=gordon"}, ports) + + rawEnv := link.ToEnv() + env := make(map[string]string, len(rawEnv)) + for _, e := range rawEnv { + parts := strings.Split(e, "=") + if len(parts) != 2 { + t.FailNow() + } + env[parts[0]] = parts[1] + } + + if env["DOCKER_PORT"] != "tcp://172.0.17.2:6379" { + t.Fatalf("Expected 172.0.17.2:6379, got %s", env["DOCKER_PORT"]) + } + if env["DOCKER_PORT_6379_TCP_START"] != "tcp://172.0.17.2:6379" { + t.Fatalf("Expected tcp://172.0.17.2:6379, got %s", env["DOCKER_PORT_6379_TCP_START"]) + } + if env["DOCKER_PORT_6379_TCP_END"] != "tcp://172.0.17.2:6381" { + t.Fatalf("Expected tcp://172.0.17.2:6381, got %s", env["DOCKER_PORT_6379_TCP_END"]) + } + if env["DOCKER_PORT_6379_TCP_PROTO"] != "tcp" { + t.Fatalf("Expected tcp, got %s", env["DOCKER_PORT_6379_TCP_PROTO"]) + } + if env["DOCKER_PORT_6379_TCP_ADDR"] != "172.0.17.2" { + t.Fatalf("Expected 172.0.17.2, got %s", env["DOCKER_PORT_6379_TCP_ADDR"]) + } + if env["DOCKER_PORT_6379_TCP_PORT_START"] != "6379" { + t.Fatalf("Expected 6379, got %s", env["DOCKER_PORT_6379_TCP_PORT_START"]) + } + if env["DOCKER_PORT_6379_TCP_PORT_END"] != "6381" { + t.Fatalf("Expected 6381, got %s", env["DOCKER_PORT_6379_TCP_PORT_END"]) + } + if env["DOCKER_NAME"] != "/db/docker" { + t.Fatalf("Expected /db/docker, got %s", env["DOCKER_NAME"]) + } + if env["DOCKER_ENV_PASSWORD"] != "gordon" { + t.Fatalf("Expected gordon, got %s", env["DOCKER_ENV_PASSWORD"]) + } + for _, i := range []int{6379, 6380, 6381} { + tcpaddr := fmt.Sprintf("DOCKER_PORT_%d_TCP_ADDR", i) + tcpport := fmt.Sprintf("DOCKER_PORT_%d_TCP_PORT", i) + tcpproto := fmt.Sprintf("DOCKER_PORT_%d_TCP_PROTO", i) + tcp := fmt.Sprintf("DOCKER_PORT_%d_TCP", i) + if env[tcpaddr] != "172.0.17.2" { + t.Fatalf("Expected env %s = 172.0.17.2, got %s", tcpaddr, env[tcpaddr]) + } + if env[tcpport] != fmt.Sprintf("%d", i) { + t.Fatalf("Expected env %s = %d, got %s", tcpport, i, env[tcpport]) + } + if env[tcpproto] != "tcp" { + t.Fatalf("Expected env %s = tcp, got %s", tcpproto, env[tcpproto]) + } + if env[tcp] != fmt.Sprintf("tcp://172.0.17.2:%d", i) { + t.Fatalf("Expected env %s = tcp://172.0.17.2:%d, got %s", tcp, i, env[tcp]) + } + } +} diff --git a/vendor/github.com/moby/moby/daemon/list.go b/vendor/github.com/moby/moby/daemon/list.go new file mode 100644 index 000000000..6889c5588 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/list.go @@ -0,0 +1,670 @@ +package daemon + +import ( + "errors" + "fmt" + "sort" + "strconv" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/container" + "github.com/docker/docker/image" + "github.com/docker/docker/volume" + "github.com/docker/go-connections/nat" +) + +var acceptedVolumeFilterTags = map[string]bool{ + "dangling": true, + "name": true, + "driver": true, + "label": true, +} + +var acceptedPsFilterTags = map[string]bool{ + "ancestor": true, + "before": true, + "exited": true, + "id": true, + "isolation": true, + "label": true, + "name": true, + "status": true, + "health": true, + "since": true, + "volume": true, + "network": true, + "is-task": true, + "publish": true, + "expose": true, +} + +// iterationAction represents possible outcomes happening during the container iteration. +type iterationAction int + +// containerReducer represents a reducer for a container. +// Returns the object to serialize by the api. +type containerReducer func(*container.Snapshot, *listContext) (*types.Container, error) + +const ( + // includeContainer is the action to include a container in the reducer. + includeContainer iterationAction = iota + // excludeContainer is the action to exclude a container in the reducer. + excludeContainer + // stopIteration is the action to stop iterating over the list of containers. + stopIteration +) + +// errStopIteration makes the iterator to stop without returning an error. +var errStopIteration = errors.New("container list iteration stopped") + +// List returns an array of all containers registered in the daemon. +func (daemon *Daemon) List() []*container.Container { + return daemon.containers.List() +} + +// listContext is the daemon generated filtering to iterate over containers. +// This is created based on the user specification from types.ContainerListOptions. +type listContext struct { + // idx is the container iteration index for this context + idx int + // ancestorFilter tells whether it should check ancestors or not + ancestorFilter bool + // names is a list of container names to filter with + names map[string][]string + // images is a list of images to filter with + images map[image.ID]bool + // filters is a collection of arguments to filter with, specified by the user + filters filters.Args + // exitAllowed is a list of exit codes allowed to filter with + exitAllowed []int + + // beforeFilter is a filter to ignore containers that appear before the one given + beforeFilter *container.Snapshot + // sinceFilter is a filter to stop the filtering when the iterator arrive to the given container + sinceFilter *container.Snapshot + + // taskFilter tells if we should filter based on wether a container is part of a task + taskFilter bool + // isTask tells us if the we should filter container that are a task (true) or not (false) + isTask bool + + // publish is a list of published ports to filter with + publish map[nat.Port]bool + // expose is a list of exposed ports to filter with + expose map[nat.Port]bool + + // ContainerListOptions is the filters set by the user + *types.ContainerListOptions +} + +// byCreatedDescending is a temporary type used to sort a list of containers by creation time. +type byCreatedDescending []container.Snapshot + +func (r byCreatedDescending) Len() int { return len(r) } +func (r byCreatedDescending) Swap(i, j int) { r[i], r[j] = r[j], r[i] } +func (r byCreatedDescending) Less(i, j int) bool { + return r[j].CreatedAt.UnixNano() < r[i].CreatedAt.UnixNano() +} + +// Containers returns the list of containers to show given the user's filtering. +func (daemon *Daemon) Containers(config *types.ContainerListOptions) ([]*types.Container, error) { + return daemon.reduceContainers(config, daemon.refreshImage) +} + +func (daemon *Daemon) filterByNameIDMatches(view container.View, ctx *listContext) ([]container.Snapshot, error) { + idSearch := false + names := ctx.filters.Get("name") + ids := ctx.filters.Get("id") + if len(names)+len(ids) == 0 { + // if name or ID filters are not in use, return to + // standard behavior of walking the entire container + // list from the daemon's in-memory store + all, err := view.All() + sort.Sort(byCreatedDescending(all)) + return all, err + } + + // idSearch will determine if we limit name matching to the IDs + // matched from any IDs which were specified as filters + if len(ids) > 0 { + idSearch = true + } + + matches := make(map[string]bool) + // find ID matches; errors represent "not found" and can be ignored + for _, id := range ids { + if fullID, err := daemon.idIndex.Get(id); err == nil { + matches[fullID] = true + } + } + + // look for name matches; if ID filtering was used, then limit the + // search space to the matches map only; errors represent "not found" + // and can be ignored + if len(names) > 0 { + for id, idNames := range ctx.names { + // if ID filters were used and no matches on that ID were + // found, continue to next ID in the list + if idSearch && !matches[id] { + continue + } + for _, eachName := range idNames { + if ctx.filters.Match("name", eachName) { + matches[id] = true + } + } + } + } + + cntrs := make([]container.Snapshot, 0, len(matches)) + for id := range matches { + c, err := view.Get(id) + switch err.(type) { + case nil: + cntrs = append(cntrs, *c) + case container.NoSuchContainerError: + // ignore error + default: + return nil, err + } + } + + // Restore sort-order after filtering + // Created gives us nanosec resolution for sorting + sort.Sort(byCreatedDescending(cntrs)) + + return cntrs, nil +} + +// reduceContainers parses the user's filtering options and generates the list of containers to return based on a reducer. +func (daemon *Daemon) reduceContainers(config *types.ContainerListOptions, reducer containerReducer) ([]*types.Container, error) { + var ( + view = daemon.containersReplica.Snapshot() + containers = []*types.Container{} + ) + + ctx, err := daemon.foldFilter(view, config) + if err != nil { + return nil, err + } + + // fastpath to only look at a subset of containers if specific name + // or ID matches were provided by the user--otherwise we potentially + // end up querying many more containers than intended + containerList, err := daemon.filterByNameIDMatches(view, ctx) + if err != nil { + return nil, err + } + + for i := range containerList { + t, err := daemon.reducePsContainer(&containerList[i], ctx, reducer) + if err != nil { + if err != errStopIteration { + return nil, err + } + break + } + if t != nil { + containers = append(containers, t) + ctx.idx++ + } + } + + return containers, nil +} + +// reducePsContainer is the basic representation for a container as expected by the ps command. +func (daemon *Daemon) reducePsContainer(container *container.Snapshot, ctx *listContext, reducer containerReducer) (*types.Container, error) { + // filter containers to return + switch includeContainerInList(container, ctx) { + case excludeContainer: + return nil, nil + case stopIteration: + return nil, errStopIteration + } + + // transform internal container struct into api structs + newC, err := reducer(container, ctx) + if err != nil { + return nil, err + } + + // release lock because size calculation is slow + if ctx.Size { + sizeRw, sizeRootFs := daemon.getSize(newC.ID) + newC.SizeRw = sizeRw + newC.SizeRootFs = sizeRootFs + } + return newC, nil +} + +// foldFilter generates the container filter based on the user's filtering options. +func (daemon *Daemon) foldFilter(view container.View, config *types.ContainerListOptions) (*listContext, error) { + psFilters := config.Filters + + if err := psFilters.Validate(acceptedPsFilterTags); err != nil { + return nil, err + } + + var filtExited []int + + err := psFilters.WalkValues("exited", func(value string) error { + code, err := strconv.Atoi(value) + if err != nil { + return err + } + filtExited = append(filtExited, code) + return nil + }) + if err != nil { + return nil, err + } + + err = psFilters.WalkValues("status", func(value string) error { + if !container.IsValidStateString(value) { + return fmt.Errorf("Unrecognised filter value for status: %s", value) + } + + config.All = true + return nil + }) + if err != nil { + return nil, err + } + + var taskFilter, isTask bool + if psFilters.Include("is-task") { + if psFilters.ExactMatch("is-task", "true") { + taskFilter = true + isTask = true + } else if psFilters.ExactMatch("is-task", "false") { + taskFilter = true + isTask = false + } else { + return nil, fmt.Errorf("Invalid filter 'is-task=%s'", psFilters.Get("is-task")) + } + } + + err = psFilters.WalkValues("health", func(value string) error { + if !container.IsValidHealthString(value) { + return fmt.Errorf("Unrecognised filter value for health: %s", value) + } + + return nil + }) + if err != nil { + return nil, err + } + + var beforeContFilter, sinceContFilter *container.Snapshot + + err = psFilters.WalkValues("before", func(value string) error { + beforeContFilter, err = view.Get(value) + return err + }) + if err != nil { + return nil, err + } + + err = psFilters.WalkValues("since", func(value string) error { + sinceContFilter, err = view.Get(value) + return err + }) + if err != nil { + return nil, err + } + + imagesFilter := map[image.ID]bool{} + var ancestorFilter bool + if psFilters.Include("ancestor") { + ancestorFilter = true + psFilters.WalkValues("ancestor", func(ancestor string) error { + id, platform, err := daemon.GetImageIDAndPlatform(ancestor) + if err != nil { + logrus.Warnf("Error while looking up for image %v", ancestor) + return nil + } + if imagesFilter[id] { + // Already seen this ancestor, skip it + return nil + } + // Then walk down the graph and put the imageIds in imagesFilter + populateImageFilterByParents(imagesFilter, id, daemon.stores[platform].imageStore.Children) + return nil + }) + } + + publishFilter := map[nat.Port]bool{} + err = psFilters.WalkValues("publish", portOp("publish", publishFilter)) + if err != nil { + return nil, err + } + + exposeFilter := map[nat.Port]bool{} + err = psFilters.WalkValues("expose", portOp("expose", exposeFilter)) + if err != nil { + return nil, err + } + + return &listContext{ + filters: psFilters, + ancestorFilter: ancestorFilter, + images: imagesFilter, + exitAllowed: filtExited, + beforeFilter: beforeContFilter, + sinceFilter: sinceContFilter, + taskFilter: taskFilter, + isTask: isTask, + publish: publishFilter, + expose: exposeFilter, + ContainerListOptions: config, + names: view.GetAllNames(), + }, nil +} +func portOp(key string, filter map[nat.Port]bool) func(value string) error { + return func(value string) error { + if strings.Contains(value, ":") { + return fmt.Errorf("filter for '%s' should not contain ':': %s", key, value) + } + //support two formats, original format /[] or /[] + proto, port := nat.SplitProtoPort(value) + start, end, err := nat.ParsePortRange(port) + if err != nil { + return fmt.Errorf("error while looking up for %s %s: %s", key, value, err) + } + for i := start; i <= end; i++ { + p, err := nat.NewPort(proto, strconv.FormatUint(i, 10)) + if err != nil { + return fmt.Errorf("error while looking up for %s %s: %s", key, value, err) + } + filter[p] = true + } + return nil + } +} + +// includeContainerInList decides whether a container should be included in the output or not based in the filter. +// It also decides if the iteration should be stopped or not. +func includeContainerInList(container *container.Snapshot, ctx *listContext) iterationAction { + // Do not include container if it's in the list before the filter container. + // Set the filter container to nil to include the rest of containers after this one. + if ctx.beforeFilter != nil { + if container.ID == ctx.beforeFilter.ID { + ctx.beforeFilter = nil + } + return excludeContainer + } + + // Stop iteration when the container arrives to the filter container + if ctx.sinceFilter != nil { + if container.ID == ctx.sinceFilter.ID { + return stopIteration + } + } + + // Do not include container if it's stopped and we're not filters + if !container.Running && !ctx.All && ctx.Limit <= 0 { + return excludeContainer + } + + // Do not include container if the name doesn't match + if !ctx.filters.Match("name", container.Name) { + return excludeContainer + } + + // Do not include container if the id doesn't match + if !ctx.filters.Match("id", container.ID) { + return excludeContainer + } + + if ctx.taskFilter { + if ctx.isTask != container.Managed { + return excludeContainer + } + } + + // Do not include container if any of the labels don't match + if !ctx.filters.MatchKVList("label", container.Labels) { + return excludeContainer + } + + // Do not include container if isolation doesn't match + if excludeContainer == excludeByIsolation(container, ctx) { + return excludeContainer + } + + // Stop iteration when the index is over the limit + if ctx.Limit > 0 && ctx.idx == ctx.Limit { + return stopIteration + } + + // Do not include container if its exit code is not in the filter + if len(ctx.exitAllowed) > 0 { + shouldSkip := true + for _, code := range ctx.exitAllowed { + if code == container.ExitCode && !container.Running && !container.StartedAt.IsZero() { + shouldSkip = false + break + } + } + if shouldSkip { + return excludeContainer + } + } + + // Do not include container if its status doesn't match the filter + if !ctx.filters.Match("status", container.State) { + return excludeContainer + } + + // Do not include container if its health doesn't match the filter + if !ctx.filters.ExactMatch("health", container.Health) { + return excludeContainer + } + + if ctx.filters.Include("volume") { + volumesByName := make(map[string]types.MountPoint) + for _, m := range container.Mounts { + if m.Name != "" { + volumesByName[m.Name] = m + } else { + volumesByName[m.Source] = m + } + } + volumesByDestination := make(map[string]types.MountPoint) + for _, m := range container.Mounts { + if m.Destination != "" { + volumesByDestination[m.Destination] = m + } + } + + volumeExist := fmt.Errorf("volume mounted in container") + err := ctx.filters.WalkValues("volume", func(value string) error { + if _, exist := volumesByDestination[value]; exist { + return volumeExist + } + if _, exist := volumesByName[value]; exist { + return volumeExist + } + return nil + }) + if err != volumeExist { + return excludeContainer + } + } + + if ctx.ancestorFilter { + if len(ctx.images) == 0 { + return excludeContainer + } + if !ctx.images[image.ID(container.ImageID)] { + return excludeContainer + } + } + + var ( + networkExist = errors.New("container part of network") + noNetworks = errors.New("container is not part of any networks") + ) + if ctx.filters.Include("network") { + err := ctx.filters.WalkValues("network", func(value string) error { + if container.NetworkSettings == nil { + return noNetworks + } + if _, ok := container.NetworkSettings.Networks[value]; ok { + return networkExist + } + for _, nw := range container.NetworkSettings.Networks { + if nw == nil { + continue + } + if strings.HasPrefix(nw.NetworkID, value) { + return networkExist + } + } + return nil + }) + if err != networkExist { + return excludeContainer + } + } + + if len(ctx.publish) > 0 { + shouldSkip := true + for port := range ctx.publish { + if _, ok := container.PortBindings[port]; ok { + shouldSkip = false + break + } + } + if shouldSkip { + return excludeContainer + } + } + + if len(ctx.expose) > 0 { + shouldSkip := true + for port := range ctx.expose { + if _, ok := container.ExposedPorts[port]; ok { + shouldSkip = false + break + } + } + if shouldSkip { + return excludeContainer + } + } + + return includeContainer +} + +// refreshImage checks if the Image ref still points to the correct ID, and updates the ref to the actual ID when it doesn't +func (daemon *Daemon) refreshImage(s *container.Snapshot, ctx *listContext) (*types.Container, error) { + c := s.Container + image := s.Image // keep the original ref if still valid (hasn't changed) + if image != s.ImageID { + id, _, err := daemon.GetImageIDAndPlatform(image) + if _, isDNE := err.(ErrImageDoesNotExist); err != nil && !isDNE { + return nil, err + } + if err != nil || id.String() != s.ImageID { + // ref changed, we need to use original ID + image = s.ImageID + } + } + c.Image = image + return &c, nil +} + +// Volumes lists known volumes, using the filter to restrict the range +// of volumes returned. +func (daemon *Daemon) Volumes(filter string) ([]*types.Volume, []string, error) { + var ( + volumesOut []*types.Volume + ) + volFilters, err := filters.FromParam(filter) + if err != nil { + return nil, nil, err + } + + if err := volFilters.Validate(acceptedVolumeFilterTags); err != nil { + return nil, nil, err + } + + volumes, warnings, err := daemon.volumes.List() + if err != nil { + return nil, nil, err + } + + filterVolumes, err := daemon.filterVolumes(volumes, volFilters) + if err != nil { + return nil, nil, err + } + for _, v := range filterVolumes { + apiV := volumeToAPIType(v) + if vv, ok := v.(interface { + CachedPath() string + }); ok { + apiV.Mountpoint = vv.CachedPath() + } else { + apiV.Mountpoint = v.Path() + } + volumesOut = append(volumesOut, apiV) + } + return volumesOut, warnings, nil +} + +// filterVolumes filters volume list according to user specified filter +// and returns user chosen volumes +func (daemon *Daemon) filterVolumes(vols []volume.Volume, filter filters.Args) ([]volume.Volume, error) { + // if filter is empty, return original volume list + if filter.Len() == 0 { + return vols, nil + } + + var retVols []volume.Volume + for _, vol := range vols { + if filter.Include("name") { + if !filter.Match("name", vol.Name()) { + continue + } + } + if filter.Include("driver") { + if !filter.ExactMatch("driver", vol.DriverName()) { + continue + } + } + if filter.Include("label") { + v, ok := vol.(volume.DetailedVolume) + if !ok { + continue + } + if !filter.MatchKVList("label", v.Labels()) { + continue + } + } + retVols = append(retVols, vol) + } + danglingOnly := false + if filter.Include("dangling") { + if filter.ExactMatch("dangling", "true") || filter.ExactMatch("dangling", "1") { + danglingOnly = true + } else if !filter.ExactMatch("dangling", "false") && !filter.ExactMatch("dangling", "0") { + return nil, fmt.Errorf("Invalid filter 'dangling=%s'", filter.Get("dangling")) + } + retVols = daemon.volumes.FilterByUsed(retVols, !danglingOnly) + } + return retVols, nil +} + +func populateImageFilterByParents(ancestorMap map[image.ID]bool, imageID image.ID, getChildren func(image.ID) []image.ID) { + if !ancestorMap[imageID] { + for _, id := range getChildren(imageID) { + populateImageFilterByParents(ancestorMap, id, getChildren) + } + ancestorMap[imageID] = true + } +} diff --git a/vendor/github.com/moby/moby/daemon/list_unix.go b/vendor/github.com/moby/moby/daemon/list_unix.go new file mode 100644 index 000000000..ebaae4560 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/list_unix.go @@ -0,0 +1,11 @@ +// +build linux freebsd solaris + +package daemon + +import "github.com/docker/docker/container" + +// excludeByIsolation is a platform specific helper function to support PS +// filtering by Isolation. This is a Windows-only concept, so is a no-op on Unix. +func excludeByIsolation(container *container.Snapshot, ctx *listContext) iterationAction { + return includeContainer +} diff --git a/vendor/github.com/moby/moby/daemon/list_windows.go b/vendor/github.com/moby/moby/daemon/list_windows.go new file mode 100644 index 000000000..ab563c535 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/list_windows.go @@ -0,0 +1,20 @@ +package daemon + +import ( + "strings" + + "github.com/docker/docker/container" +) + +// excludeByIsolation is a platform specific helper function to support PS +// filtering by Isolation. This is a Windows-only concept, so is a no-op on Unix. +func excludeByIsolation(container *container.Snapshot, ctx *listContext) iterationAction { + i := strings.ToLower(string(container.HostConfig.Isolation)) + if i == "" { + i = "default" + } + if !ctx.filters.Match("isolation", i) { + return excludeContainer + } + return includeContainer +} diff --git a/vendor/github.com/moby/moby/daemon/logdrivers_linux.go b/vendor/github.com/moby/moby/daemon/logdrivers_linux.go new file mode 100644 index 000000000..ad343c1e8 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/logdrivers_linux.go @@ -0,0 +1,15 @@ +package daemon + +import ( + // Importing packages here only to make sure their init gets called and + // therefore they register themselves to the logdriver factory. + _ "github.com/docker/docker/daemon/logger/awslogs" + _ "github.com/docker/docker/daemon/logger/fluentd" + _ "github.com/docker/docker/daemon/logger/gcplogs" + _ "github.com/docker/docker/daemon/logger/gelf" + _ "github.com/docker/docker/daemon/logger/journald" + _ "github.com/docker/docker/daemon/logger/jsonfilelog" + _ "github.com/docker/docker/daemon/logger/logentries" + _ "github.com/docker/docker/daemon/logger/splunk" + _ "github.com/docker/docker/daemon/logger/syslog" +) diff --git a/vendor/github.com/moby/moby/daemon/logdrivers_windows.go b/vendor/github.com/moby/moby/daemon/logdrivers_windows.go new file mode 100644 index 000000000..f3002b97e --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/logdrivers_windows.go @@ -0,0 +1,13 @@ +package daemon + +import ( + // Importing packages here only to make sure their init gets called and + // therefore they register themselves to the logdriver factory. + _ "github.com/docker/docker/daemon/logger/awslogs" + _ "github.com/docker/docker/daemon/logger/etwlogs" + _ "github.com/docker/docker/daemon/logger/fluentd" + _ "github.com/docker/docker/daemon/logger/jsonfilelog" + _ "github.com/docker/docker/daemon/logger/logentries" + _ "github.com/docker/docker/daemon/logger/splunk" + _ "github.com/docker/docker/daemon/logger/syslog" +) diff --git a/vendor/github.com/moby/moby/daemon/logger/adapter.go b/vendor/github.com/moby/moby/daemon/logger/adapter.go new file mode 100644 index 000000000..a187b30fd --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/logger/adapter.go @@ -0,0 +1,137 @@ +package logger + +import ( + "io" + "os" + "strings" + "sync" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/types/plugins/logdriver" + "github.com/docker/docker/pkg/plugingetter" + "github.com/pkg/errors" +) + +// pluginAdapter takes a plugin and implements the Logger interface for logger +// instances +type pluginAdapter struct { + driverName string + id string + plugin logPlugin + basePath string + fifoPath string + capabilities Capability + logInfo Info + + // synchronize access to the log stream and shared buffer + mu sync.Mutex + enc logdriver.LogEntryEncoder + stream io.WriteCloser + // buf is shared for each `Log()` call to reduce allocations. + // buf must be protected by mutex + buf logdriver.LogEntry +} + +func (a *pluginAdapter) Log(msg *Message) error { + a.mu.Lock() + + a.buf.Line = msg.Line + a.buf.TimeNano = msg.Timestamp.UnixNano() + a.buf.Partial = msg.Partial + a.buf.Source = msg.Source + + err := a.enc.Encode(&a.buf) + a.buf.Reset() + + a.mu.Unlock() + + PutMessage(msg) + return err +} + +func (a *pluginAdapter) Name() string { + return a.driverName +} + +func (a *pluginAdapter) Close() error { + a.mu.Lock() + defer a.mu.Unlock() + + if err := a.plugin.StopLogging(strings.TrimPrefix(a.fifoPath, a.basePath)); err != nil { + return err + } + + if err := a.stream.Close(); err != nil { + logrus.WithError(err).Error("error closing plugin fifo") + } + if err := os.Remove(a.fifoPath); err != nil && !os.IsNotExist(err) { + logrus.WithError(err).Error("error cleaning up plugin fifo") + } + + // may be nil, especially for unit tests + if pluginGetter != nil { + pluginGetter.Get(a.Name(), extName, plugingetter.Release) + } + return nil +} + +type pluginAdapterWithRead struct { + *pluginAdapter +} + +func (a *pluginAdapterWithRead) ReadLogs(config ReadConfig) *LogWatcher { + watcher := NewLogWatcher() + + go func() { + defer close(watcher.Msg) + stream, err := a.plugin.ReadLogs(a.logInfo, config) + if err != nil { + watcher.Err <- errors.Wrap(err, "error getting log reader") + return + } + defer stream.Close() + + dec := logdriver.NewLogEntryDecoder(stream) + for { + select { + case <-watcher.WatchClose(): + return + default: + } + + var buf logdriver.LogEntry + if err := dec.Decode(&buf); err != nil { + if err == io.EOF { + return + } + select { + case watcher.Err <- errors.Wrap(err, "error decoding log message"): + case <-watcher.WatchClose(): + } + return + } + + msg := &Message{ + Timestamp: time.Unix(0, buf.TimeNano), + Line: buf.Line, + Source: buf.Source, + } + + // plugin should handle this, but check just in case + if !config.Since.IsZero() && msg.Timestamp.Before(config.Since) { + continue + } + + select { + case watcher.Msg <- msg: + case <-watcher.WatchClose(): + // make sure the message we consumed is sent + watcher.Msg <- msg + return + } + } + }() + + return watcher +} diff --git a/vendor/github.com/moby/moby/daemon/logger/adapter_test.go b/vendor/github.com/moby/moby/daemon/logger/adapter_test.go new file mode 100644 index 000000000..b8c069ffb --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/logger/adapter_test.go @@ -0,0 +1,180 @@ +package logger + +import ( + "encoding/binary" + "io" + "io/ioutil" + "os" + "testing" + "time" + + "github.com/docker/docker/api/types/plugins/logdriver" + protoio "github.com/gogo/protobuf/io" + "github.com/stretchr/testify/assert" +) + +// mockLoggingPlugin implements the loggingPlugin interface for testing purposes +// it only supports a single log stream +type mockLoggingPlugin struct { + inStream io.ReadCloser + f *os.File + closed chan struct{} + t *testing.T +} + +func (l *mockLoggingPlugin) StartLogging(file string, info Info) error { + go func() { + io.Copy(l.f, l.inStream) + close(l.closed) + }() + return nil +} + +func (l *mockLoggingPlugin) StopLogging(file string) error { + l.inStream.Close() + l.f.Close() + os.Remove(l.f.Name()) + return nil +} + +func (l *mockLoggingPlugin) Capabilities() (cap Capability, err error) { + return Capability{ReadLogs: true}, nil +} + +func (l *mockLoggingPlugin) ReadLogs(info Info, config ReadConfig) (io.ReadCloser, error) { + r, w := io.Pipe() + f, err := os.Open(l.f.Name()) + if err != nil { + return nil, err + } + go func() { + defer f.Close() + dec := protoio.NewUint32DelimitedReader(f, binary.BigEndian, 1e6) + enc := logdriver.NewLogEntryEncoder(w) + + for { + select { + case <-l.closed: + w.Close() + return + default: + } + + var msg logdriver.LogEntry + if err := dec.ReadMsg(&msg); err != nil { + if err == io.EOF { + if !config.Follow { + w.Close() + return + } + dec = protoio.NewUint32DelimitedReader(f, binary.BigEndian, 1e6) + continue + } + + l.t.Fatal(err) + continue + } + + if err := enc.Encode(&msg); err != nil { + w.CloseWithError(err) + return + } + } + }() + + return r, nil +} + +func newMockPluginAdapter(t *testing.T) Logger { + r, w := io.Pipe() + f, err := ioutil.TempFile("", "mock-plugin-adapter") + assert.NoError(t, err) + + enc := logdriver.NewLogEntryEncoder(w) + a := &pluginAdapterWithRead{ + &pluginAdapter{ + plugin: &mockLoggingPlugin{ + inStream: r, + f: f, + closed: make(chan struct{}), + t: t, + }, + stream: w, + enc: enc, + }, + } + a.plugin.StartLogging("", Info{}) + return a +} + +func TestAdapterReadLogs(t *testing.T) { + l := newMockPluginAdapter(t) + + testMsg := []Message{ + {Line: []byte("Are you the keymaker?"), Timestamp: time.Now()}, + {Line: []byte("Follow the white rabbit"), Timestamp: time.Now()}, + } + for _, msg := range testMsg { + m := msg.copy() + assert.NoError(t, l.Log(m)) + } + + lr, ok := l.(LogReader) + assert.NotNil(t, ok) + + lw := lr.ReadLogs(ReadConfig{}) + + for _, x := range testMsg { + select { + case msg := <-lw.Msg: + testMessageEqual(t, &x, msg) + case <-time.After(10 * time.Second): + t.Fatal("timeout reading logs") + } + } + + select { + case _, ok := <-lw.Msg: + assert.False(t, ok, "expected message channel to be closed") + case <-time.After(10 * time.Second): + t.Fatal("timeout waiting for message channel to close") + + } + lw.Close() + + lw = lr.ReadLogs(ReadConfig{Follow: true}) + for _, x := range testMsg { + select { + case msg := <-lw.Msg: + testMessageEqual(t, &x, msg) + case <-time.After(10 * time.Second): + t.Fatal("timeout reading logs") + } + } + + x := Message{Line: []byte("Too infinity and beyond!"), Timestamp: time.Now()} + assert.NoError(t, l.Log(x.copy())) + + select { + case msg, ok := <-lw.Msg: + assert.NotNil(t, ok, "message channel unexpectedly closed") + testMessageEqual(t, &x, msg) + case <-time.After(10 * time.Second): + t.Fatal("timeout reading logs") + } + + l.Close() + select { + case msg, ok := <-lw.Msg: + assert.False(t, ok, "expected message channel to be closed") + assert.Nil(t, msg) + case <-time.After(10 * time.Second): + t.Fatal("timeout waiting for logger to close") + } +} + +func testMessageEqual(t *testing.T, a, b *Message) { + assert.Equal(t, a.Line, b.Line) + assert.Equal(t, a.Timestamp.UnixNano(), b.Timestamp.UnixNano()) + assert.Equal(t, a.Source, b.Source) +} diff --git a/vendor/github.com/moby/moby/daemon/logger/awslogs/cloudwatchlogs.go b/vendor/github.com/moby/moby/daemon/logger/awslogs/cloudwatchlogs.go new file mode 100644 index 000000000..4d98468a7 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/logger/awslogs/cloudwatchlogs.go @@ -0,0 +1,598 @@ +// Package awslogs provides the logdriver for forwarding container logs to Amazon CloudWatch Logs +package awslogs + +import ( + "bytes" + "fmt" + "os" + "regexp" + "runtime" + "sort" + "strconv" + "strings" + "sync" + "time" + + "github.com/Sirupsen/logrus" + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/ec2metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/cloudwatchlogs" + "github.com/docker/docker/daemon/logger" + "github.com/docker/docker/daemon/logger/loggerutils" + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/pkg/templates" + "github.com/pkg/errors" +) + +const ( + name = "awslogs" + regionKey = "awslogs-region" + regionEnvKey = "AWS_REGION" + logGroupKey = "awslogs-group" + logStreamKey = "awslogs-stream" + logCreateGroupKey = "awslogs-create-group" + tagKey = "tag" + datetimeFormatKey = "awslogs-datetime-format" + multilinePatternKey = "awslogs-multiline-pattern" + batchPublishFrequency = 5 * time.Second + + // See: http://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutLogEvents.html + perEventBytes = 26 + maximumBytesPerPut = 1048576 + maximumLogEventsPerPut = 10000 + + // See: http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/cloudwatch_limits.html + maximumBytesPerEvent = 262144 - perEventBytes + + resourceAlreadyExistsCode = "ResourceAlreadyExistsException" + dataAlreadyAcceptedCode = "DataAlreadyAcceptedException" + invalidSequenceTokenCode = "InvalidSequenceTokenException" + resourceNotFoundCode = "ResourceNotFoundException" + + userAgentHeader = "User-Agent" +) + +type logStream struct { + logStreamName string + logGroupName string + logCreateGroup bool + multilinePattern *regexp.Regexp + client api + messages chan *logger.Message + lock sync.RWMutex + closed bool + sequenceToken *string +} + +type api interface { + CreateLogGroup(*cloudwatchlogs.CreateLogGroupInput) (*cloudwatchlogs.CreateLogGroupOutput, error) + CreateLogStream(*cloudwatchlogs.CreateLogStreamInput) (*cloudwatchlogs.CreateLogStreamOutput, error) + PutLogEvents(*cloudwatchlogs.PutLogEventsInput) (*cloudwatchlogs.PutLogEventsOutput, error) +} + +type regionFinder interface { + Region() (string, error) +} + +type wrappedEvent struct { + inputLogEvent *cloudwatchlogs.InputLogEvent + insertOrder int +} +type byTimestamp []wrappedEvent + +// init registers the awslogs driver +func init() { + if err := logger.RegisterLogDriver(name, New); err != nil { + logrus.Fatal(err) + } + if err := logger.RegisterLogOptValidator(name, ValidateLogOpt); err != nil { + logrus.Fatal(err) + } +} + +// New creates an awslogs logger using the configuration passed in on the +// context. Supported context configuration variables are awslogs-region, +// awslogs-group, awslogs-stream, awslogs-create-group, awslogs-multiline-pattern +// and awslogs-datetime-format. When available, configuration is +// also taken from environment variables AWS_REGION, AWS_ACCESS_KEY_ID, +// AWS_SECRET_ACCESS_KEY, the shared credentials file (~/.aws/credentials), and +// the EC2 Instance Metadata Service. +func New(info logger.Info) (logger.Logger, error) { + logGroupName := info.Config[logGroupKey] + logStreamName, err := loggerutils.ParseLogTag(info, "{{.FullID}}") + if err != nil { + return nil, err + } + logCreateGroup := false + if info.Config[logCreateGroupKey] != "" { + logCreateGroup, err = strconv.ParseBool(info.Config[logCreateGroupKey]) + if err != nil { + return nil, err + } + } + + if info.Config[logStreamKey] != "" { + logStreamName = info.Config[logStreamKey] + } + + multilinePattern, err := parseMultilineOptions(info) + if err != nil { + return nil, err + } + + client, err := newAWSLogsClient(info) + if err != nil { + return nil, err + } + containerStream := &logStream{ + logStreamName: logStreamName, + logGroupName: logGroupName, + logCreateGroup: logCreateGroup, + multilinePattern: multilinePattern, + client: client, + messages: make(chan *logger.Message, 4096), + } + err = containerStream.create() + if err != nil { + return nil, err + } + go containerStream.collectBatch() + + return containerStream, nil +} + +// Parses awslogs-multiline-pattern and awslogs-datetime-format options +// If awslogs-datetime-format is present, convert the format from strftime +// to regexp and return. +// If awslogs-multiline-pattern is present, compile regexp and return +func parseMultilineOptions(info logger.Info) (*regexp.Regexp, error) { + dateTimeFormat := info.Config[datetimeFormatKey] + multilinePatternKey := info.Config[multilinePatternKey] + // strftime input is parsed into a regular expression + if dateTimeFormat != "" { + // %. matches each strftime format sequence and ReplaceAllStringFunc + // looks up each format sequence in the conversion table strftimeToRegex + // to replace with a defined regular expression + r := regexp.MustCompile("%.") + multilinePatternKey = r.ReplaceAllStringFunc(dateTimeFormat, func(s string) string { + return strftimeToRegex[s] + }) + } + if multilinePatternKey != "" { + multilinePattern, err := regexp.Compile(multilinePatternKey) + if err != nil { + return nil, errors.Wrapf(err, "awslogs could not parse multiline pattern key %q", multilinePatternKey) + } + return multilinePattern, nil + } + return nil, nil +} + +// Maps strftime format strings to regex +var strftimeToRegex = map[string]string{ + /*weekdayShort */ `%a`: `(?:Mon|Tue|Wed|Thu|Fri|Sat|Sun)`, + /*weekdayFull */ `%A`: `(?:Monday|Tuesday|Wednesday|Thursday|Friday|Saturday|Sunday)`, + /*weekdayZeroIndex */ `%w`: `[0-6]`, + /*dayZeroPadded */ `%d`: `(?:0[1-9]|[1,2][0-9]|3[0,1])`, + /*monthShort */ `%b`: `(?:Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec)`, + /*monthFull */ `%B`: `(?:January|February|March|April|May|June|July|August|September|October|November|December)`, + /*monthZeroPadded */ `%m`: `(?:0[1-9]|1[0-2])`, + /*yearCentury */ `%Y`: `\d{4}`, + /*yearZeroPadded */ `%y`: `\d{2}`, + /*hour24ZeroPadded */ `%H`: `(?:[0,1][0-9]|2[0-3])`, + /*hour12ZeroPadded */ `%I`: `(?:0[0-9]|1[0-2])`, + /*AM or PM */ `%p`: "[A,P]M", + /*minuteZeroPadded */ `%M`: `[0-5][0-9]`, + /*secondZeroPadded */ `%S`: `[0-5][0-9]`, + /*microsecondZeroPadded */ `%f`: `\d{6}`, + /*utcOffset */ `%z`: `[+-]\d{4}`, + /*tzName */ `%Z`: `[A-Z]{1,4}T`, + /*dayOfYearZeroPadded */ `%j`: `(?:0[0-9][1-9]|[1,2][0-9][0-9]|3[0-5][0-9]|36[0-6])`, + /*milliseconds */ `%L`: `\.\d{3}`, +} + +func parseLogGroup(info logger.Info, groupTemplate string) (string, error) { + tmpl, err := templates.NewParse("log-group", groupTemplate) + if err != nil { + return "", err + } + buf := new(bytes.Buffer) + if err := tmpl.Execute(buf, &info); err != nil { + return "", err + } + + return buf.String(), nil +} + +// newRegionFinder is a variable such that the implementation +// can be swapped out for unit tests. +var newRegionFinder = func() regionFinder { + return ec2metadata.New(session.New()) +} + +// newAWSLogsClient creates the service client for Amazon CloudWatch Logs. +// Customizations to the default client from the SDK include a Docker-specific +// User-Agent string and automatic region detection using the EC2 Instance +// Metadata Service when region is otherwise unspecified. +func newAWSLogsClient(info logger.Info) (api, error) { + var region *string + if os.Getenv(regionEnvKey) != "" { + region = aws.String(os.Getenv(regionEnvKey)) + } + if info.Config[regionKey] != "" { + region = aws.String(info.Config[regionKey]) + } + if region == nil || *region == "" { + logrus.Info("Trying to get region from EC2 Metadata") + ec2MetadataClient := newRegionFinder() + r, err := ec2MetadataClient.Region() + if err != nil { + logrus.WithFields(logrus.Fields{ + "error": err, + }).Error("Could not get region from EC2 metadata, environment, or log option") + return nil, errors.New("Cannot determine region for awslogs driver") + } + region = &r + } + logrus.WithFields(logrus.Fields{ + "region": *region, + }).Debug("Created awslogs client") + + client := cloudwatchlogs.New(session.New(), aws.NewConfig().WithRegion(*region)) + + client.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "DockerUserAgentHandler", + Fn: func(r *request.Request) { + currentAgent := r.HTTPRequest.Header.Get(userAgentHeader) + r.HTTPRequest.Header.Set(userAgentHeader, + fmt.Sprintf("Docker %s (%s) %s", + dockerversion.Version, runtime.GOOS, currentAgent)) + }, + }) + return client, nil +} + +// Name returns the name of the awslogs logging driver +func (l *logStream) Name() string { + return name +} + +// Log submits messages for logging by an instance of the awslogs logging driver +func (l *logStream) Log(msg *logger.Message) error { + l.lock.RLock() + defer l.lock.RUnlock() + if !l.closed { + l.messages <- msg + } + return nil +} + +// Close closes the instance of the awslogs logging driver +func (l *logStream) Close() error { + l.lock.Lock() + defer l.lock.Unlock() + if !l.closed { + close(l.messages) + } + l.closed = true + return nil +} + +// create creates log group and log stream for the instance of the awslogs logging driver +func (l *logStream) create() error { + if err := l.createLogStream(); err != nil { + if l.logCreateGroup { + if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == resourceNotFoundCode { + if err := l.createLogGroup(); err != nil { + return err + } + return l.createLogStream() + } + } + return err + } + + return nil +} + +// createLogGroup creates a log group for the instance of the awslogs logging driver +func (l *logStream) createLogGroup() error { + if _, err := l.client.CreateLogGroup(&cloudwatchlogs.CreateLogGroupInput{ + LogGroupName: aws.String(l.logGroupName), + }); err != nil { + if awsErr, ok := err.(awserr.Error); ok { + fields := logrus.Fields{ + "errorCode": awsErr.Code(), + "message": awsErr.Message(), + "origError": awsErr.OrigErr(), + "logGroupName": l.logGroupName, + "logCreateGroup": l.logCreateGroup, + } + if awsErr.Code() == resourceAlreadyExistsCode { + // Allow creation to succeed + logrus.WithFields(fields).Info("Log group already exists") + return nil + } + logrus.WithFields(fields).Error("Failed to create log group") + } + return err + } + return nil +} + +// createLogStream creates a log stream for the instance of the awslogs logging driver +func (l *logStream) createLogStream() error { + input := &cloudwatchlogs.CreateLogStreamInput{ + LogGroupName: aws.String(l.logGroupName), + LogStreamName: aws.String(l.logStreamName), + } + + _, err := l.client.CreateLogStream(input) + + if err != nil { + if awsErr, ok := err.(awserr.Error); ok { + fields := logrus.Fields{ + "errorCode": awsErr.Code(), + "message": awsErr.Message(), + "origError": awsErr.OrigErr(), + "logGroupName": l.logGroupName, + "logStreamName": l.logStreamName, + } + if awsErr.Code() == resourceAlreadyExistsCode { + // Allow creation to succeed + logrus.WithFields(fields).Info("Log stream already exists") + return nil + } + logrus.WithFields(fields).Error("Failed to create log stream") + } + } + return err +} + +// newTicker is used for time-based batching. newTicker is a variable such +// that the implementation can be swapped out for unit tests. +var newTicker = func(freq time.Duration) *time.Ticker { + return time.NewTicker(freq) +} + +// collectBatch executes as a goroutine to perform batching of log events for +// submission to the log stream. If the awslogs-multiline-pattern or +// awslogs-datetime-format options have been configured, multiline processing +// is enabled, where log messages are stored in an event buffer until a multiline +// pattern match is found, at which point the messages in the event buffer are +// pushed to CloudWatch logs as a single log event. Multiline messages are processed +// according to the maximumBytesPerPut constraint, and the implementation only +// allows for messages to be buffered for a maximum of 2*batchPublishFrequency +// seconds. When events are ready to be processed for submission to CloudWatch +// Logs, the processEvents method is called. If a multiline pattern is not +// configured, log events are submitted to the processEvents method immediately. +func (l *logStream) collectBatch() { + timer := newTicker(batchPublishFrequency) + var events []wrappedEvent + var eventBuffer []byte + var eventBufferTimestamp int64 + for { + select { + case t := <-timer.C: + // If event buffer is older than batch publish frequency flush the event buffer + if eventBufferTimestamp > 0 && len(eventBuffer) > 0 { + eventBufferAge := t.UnixNano()/int64(time.Millisecond) - eventBufferTimestamp + eventBufferExpired := eventBufferAge > int64(batchPublishFrequency)/int64(time.Millisecond) + eventBufferNegative := eventBufferAge < 0 + if eventBufferExpired || eventBufferNegative { + events = l.processEvent(events, eventBuffer, eventBufferTimestamp) + } + } + l.publishBatch(events) + events = events[:0] + case msg, more := <-l.messages: + if !more { + // Flush event buffer + events = l.processEvent(events, eventBuffer, eventBufferTimestamp) + l.publishBatch(events) + return + } + if eventBufferTimestamp == 0 { + eventBufferTimestamp = msg.Timestamp.UnixNano() / int64(time.Millisecond) + } + unprocessedLine := msg.Line + if l.multilinePattern != nil { + if l.multilinePattern.Match(unprocessedLine) { + // This is a new log event so flush the current eventBuffer to events + events = l.processEvent(events, eventBuffer, eventBufferTimestamp) + eventBufferTimestamp = msg.Timestamp.UnixNano() / int64(time.Millisecond) + eventBuffer = eventBuffer[:0] + } + // If we will exceed max bytes per event flush the current event buffer before appending + if len(eventBuffer)+len(unprocessedLine) > maximumBytesPerEvent { + events = l.processEvent(events, eventBuffer, eventBufferTimestamp) + eventBuffer = eventBuffer[:0] + } + // Append new line + processedLine := append(unprocessedLine, "\n"...) + eventBuffer = append(eventBuffer, processedLine...) + logger.PutMessage(msg) + } else { + events = l.processEvent(events, unprocessedLine, msg.Timestamp.UnixNano()/int64(time.Millisecond)) + logger.PutMessage(msg) + } + } + } +} + +// processEvent processes log events that are ready for submission to CloudWatch +// logs. Batching is performed on time- and size-bases. Time-based batching +// occurs at a 5 second interval (defined in the batchPublishFrequency const). +// Size-based batching is performed on the maximum number of events per batch +// (defined in maximumLogEventsPerPut) and the maximum number of total bytes in a +// batch (defined in maximumBytesPerPut). Log messages are split by the maximum +// bytes per event (defined in maximumBytesPerEvent). There is a fixed per-event +// byte overhead (defined in perEventBytes) which is accounted for in split- and +// batch-calculations. +func (l *logStream) processEvent(events []wrappedEvent, unprocessedLine []byte, timestamp int64) []wrappedEvent { + bytes := 0 + for len(unprocessedLine) > 0 { + // Split line length so it does not exceed the maximum + lineBytes := len(unprocessedLine) + if lineBytes > maximumBytesPerEvent { + lineBytes = maximumBytesPerEvent + } + line := unprocessedLine[:lineBytes] + unprocessedLine = unprocessedLine[lineBytes:] + if (len(events) >= maximumLogEventsPerPut) || (bytes+lineBytes+perEventBytes > maximumBytesPerPut) { + // Publish an existing batch if it's already over the maximum number of events or if adding this + // event would push it over the maximum number of total bytes. + l.publishBatch(events) + events = events[:0] + bytes = 0 + } + events = append(events, wrappedEvent{ + inputLogEvent: &cloudwatchlogs.InputLogEvent{ + Message: aws.String(string(line)), + Timestamp: aws.Int64(timestamp), + }, + insertOrder: len(events), + }) + bytes += (lineBytes + perEventBytes) + } + return events +} + +// publishBatch calls PutLogEvents for a given set of InputLogEvents, +// accounting for sequencing requirements (each request must reference the +// sequence token returned by the previous request). +func (l *logStream) publishBatch(events []wrappedEvent) { + if len(events) == 0 { + return + } + + // events in a batch must be sorted by timestamp + // see http://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutLogEvents.html + sort.Sort(byTimestamp(events)) + cwEvents := unwrapEvents(events) + + nextSequenceToken, err := l.putLogEvents(cwEvents, l.sequenceToken) + + if err != nil { + if awsErr, ok := err.(awserr.Error); ok { + if awsErr.Code() == dataAlreadyAcceptedCode { + // already submitted, just grab the correct sequence token + parts := strings.Split(awsErr.Message(), " ") + nextSequenceToken = &parts[len(parts)-1] + logrus.WithFields(logrus.Fields{ + "errorCode": awsErr.Code(), + "message": awsErr.Message(), + "logGroupName": l.logGroupName, + "logStreamName": l.logStreamName, + }).Info("Data already accepted, ignoring error") + err = nil + } else if awsErr.Code() == invalidSequenceTokenCode { + // sequence code is bad, grab the correct one and retry + parts := strings.Split(awsErr.Message(), " ") + token := parts[len(parts)-1] + nextSequenceToken, err = l.putLogEvents(cwEvents, &token) + } + } + } + if err != nil { + logrus.Error(err) + } else { + l.sequenceToken = nextSequenceToken + } +} + +// putLogEvents wraps the PutLogEvents API +func (l *logStream) putLogEvents(events []*cloudwatchlogs.InputLogEvent, sequenceToken *string) (*string, error) { + input := &cloudwatchlogs.PutLogEventsInput{ + LogEvents: events, + SequenceToken: sequenceToken, + LogGroupName: aws.String(l.logGroupName), + LogStreamName: aws.String(l.logStreamName), + } + resp, err := l.client.PutLogEvents(input) + if err != nil { + if awsErr, ok := err.(awserr.Error); ok { + logrus.WithFields(logrus.Fields{ + "errorCode": awsErr.Code(), + "message": awsErr.Message(), + "origError": awsErr.OrigErr(), + "logGroupName": l.logGroupName, + "logStreamName": l.logStreamName, + }).Error("Failed to put log events") + } + return nil, err + } + return resp.NextSequenceToken, nil +} + +// ValidateLogOpt looks for awslogs-specific log options awslogs-region, +// awslogs-group, awslogs-stream, awslogs-create-group, awslogs-datetime-format, +// awslogs-multiline-pattern +func ValidateLogOpt(cfg map[string]string) error { + for key := range cfg { + switch key { + case logGroupKey: + case logStreamKey: + case logCreateGroupKey: + case regionKey: + case tagKey: + case datetimeFormatKey: + case multilinePatternKey: + default: + return fmt.Errorf("unknown log opt '%s' for %s log driver", key, name) + } + } + if cfg[logGroupKey] == "" { + return fmt.Errorf("must specify a value for log opt '%s'", logGroupKey) + } + if cfg[logCreateGroupKey] != "" { + if _, err := strconv.ParseBool(cfg[logCreateGroupKey]); err != nil { + return fmt.Errorf("must specify valid value for log opt '%s': %v", logCreateGroupKey, err) + } + } + _, datetimeFormatKeyExists := cfg[datetimeFormatKey] + _, multilinePatternKeyExists := cfg[multilinePatternKey] + if datetimeFormatKeyExists && multilinePatternKeyExists { + return fmt.Errorf("you cannot configure log opt '%s' and '%s' at the same time", datetimeFormatKey, multilinePatternKey) + } + return nil +} + +// Len returns the length of a byTimestamp slice. Len is required by the +// sort.Interface interface. +func (slice byTimestamp) Len() int { + return len(slice) +} + +// Less compares two values in a byTimestamp slice by Timestamp. Less is +// required by the sort.Interface interface. +func (slice byTimestamp) Less(i, j int) bool { + iTimestamp, jTimestamp := int64(0), int64(0) + if slice != nil && slice[i].inputLogEvent.Timestamp != nil { + iTimestamp = *slice[i].inputLogEvent.Timestamp + } + if slice != nil && slice[j].inputLogEvent.Timestamp != nil { + jTimestamp = *slice[j].inputLogEvent.Timestamp + } + if iTimestamp == jTimestamp { + return slice[i].insertOrder < slice[j].insertOrder + } + return iTimestamp < jTimestamp +} + +// Swap swaps two values in a byTimestamp slice with each other. Swap is +// required by the sort.Interface interface. +func (slice byTimestamp) Swap(i, j int) { + slice[i], slice[j] = slice[j], slice[i] +} + +func unwrapEvents(events []wrappedEvent) []*cloudwatchlogs.InputLogEvent { + cwEvents := make([]*cloudwatchlogs.InputLogEvent, len(events)) + for i, input := range events { + cwEvents[i] = input.inputLogEvent + } + return cwEvents +} diff --git a/vendor/github.com/moby/moby/daemon/logger/awslogs/cloudwatchlogs_test.go b/vendor/github.com/moby/moby/daemon/logger/awslogs/cloudwatchlogs_test.go new file mode 100644 index 000000000..e3862ffeb --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/logger/awslogs/cloudwatchlogs_test.go @@ -0,0 +1,1053 @@ +package awslogs + +import ( + "errors" + "fmt" + "net/http" + "reflect" + "regexp" + "runtime" + "strings" + "testing" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/cloudwatchlogs" + "github.com/docker/docker/daemon/logger" + "github.com/docker/docker/daemon/logger/loggerutils" + "github.com/docker/docker/dockerversion" + "github.com/stretchr/testify/assert" +) + +const ( + groupName = "groupName" + streamName = "streamName" + sequenceToken = "sequenceToken" + nextSequenceToken = "nextSequenceToken" + logline = "this is a log line\r" + multilineLogline = "2017-01-01 01:01:44 This is a multiline log entry\r" +) + +// Generates i multi-line events each with j lines +func (l *logStream) logGenerator(lineCount int, multilineCount int) { + for i := 0; i < multilineCount; i++ { + l.Log(&logger.Message{ + Line: []byte(multilineLogline), + Timestamp: time.Time{}, + }) + for j := 0; j < lineCount; j++ { + l.Log(&logger.Message{ + Line: []byte(logline), + Timestamp: time.Time{}, + }) + } + } +} + +func TestNewAWSLogsClientUserAgentHandler(t *testing.T) { + info := logger.Info{ + Config: map[string]string{ + regionKey: "us-east-1", + }, + } + + client, err := newAWSLogsClient(info) + if err != nil { + t.Fatal(err) + } + realClient, ok := client.(*cloudwatchlogs.CloudWatchLogs) + if !ok { + t.Fatal("Could not cast client to cloudwatchlogs.CloudWatchLogs") + } + buildHandlerList := realClient.Handlers.Build + request := &request.Request{ + HTTPRequest: &http.Request{ + Header: http.Header{}, + }, + } + buildHandlerList.Run(request) + expectedUserAgentString := fmt.Sprintf("Docker %s (%s) %s/%s (%s; %s; %s)", + dockerversion.Version, runtime.GOOS, aws.SDKName, aws.SDKVersion, runtime.Version(), runtime.GOOS, runtime.GOARCH) + userAgent := request.HTTPRequest.Header.Get("User-Agent") + if userAgent != expectedUserAgentString { + t.Errorf("Wrong User-Agent string, expected \"%s\" but was \"%s\"", + expectedUserAgentString, userAgent) + } +} + +func TestNewAWSLogsClientRegionDetect(t *testing.T) { + info := logger.Info{ + Config: map[string]string{}, + } + + mockMetadata := newMockMetadataClient() + newRegionFinder = func() regionFinder { + return mockMetadata + } + mockMetadata.regionResult <- ®ionResult{ + successResult: "us-east-1", + } + + _, err := newAWSLogsClient(info) + if err != nil { + t.Fatal(err) + } +} + +func TestCreateSuccess(t *testing.T) { + mockClient := newMockClient() + stream := &logStream{ + client: mockClient, + logGroupName: groupName, + logStreamName: streamName, + } + mockClient.createLogStreamResult <- &createLogStreamResult{} + + err := stream.create() + + if err != nil { + t.Errorf("Received unexpected err: %v\n", err) + } + argument := <-mockClient.createLogStreamArgument + if argument.LogGroupName == nil { + t.Fatal("Expected non-nil LogGroupName") + } + if *argument.LogGroupName != groupName { + t.Errorf("Expected LogGroupName to be %s", groupName) + } + if argument.LogStreamName == nil { + t.Fatal("Expected non-nil LogStreamName") + } + if *argument.LogStreamName != streamName { + t.Errorf("Expected LogStreamName to be %s", streamName) + } +} + +func TestCreateLogGroupSuccess(t *testing.T) { + mockClient := newMockClient() + stream := &logStream{ + client: mockClient, + logGroupName: groupName, + logStreamName: streamName, + logCreateGroup: true, + } + mockClient.createLogGroupResult <- &createLogGroupResult{} + mockClient.createLogStreamResult <- &createLogStreamResult{} + + err := stream.create() + + if err != nil { + t.Errorf("Received unexpected err: %v\n", err) + } + argument := <-mockClient.createLogStreamArgument + if argument.LogGroupName == nil { + t.Fatal("Expected non-nil LogGroupName") + } + if *argument.LogGroupName != groupName { + t.Errorf("Expected LogGroupName to be %s", groupName) + } + if argument.LogStreamName == nil { + t.Fatal("Expected non-nil LogStreamName") + } + if *argument.LogStreamName != streamName { + t.Errorf("Expected LogStreamName to be %s", streamName) + } +} + +func TestCreateError(t *testing.T) { + mockClient := newMockClient() + stream := &logStream{ + client: mockClient, + } + mockClient.createLogStreamResult <- &createLogStreamResult{ + errorResult: errors.New("Error!"), + } + + err := stream.create() + + if err == nil { + t.Fatal("Expected non-nil err") + } +} + +func TestCreateAlreadyExists(t *testing.T) { + mockClient := newMockClient() + stream := &logStream{ + client: mockClient, + } + mockClient.createLogStreamResult <- &createLogStreamResult{ + errorResult: awserr.New(resourceAlreadyExistsCode, "", nil), + } + + err := stream.create() + + if err != nil { + t.Fatal("Expected nil err") + } +} + +func TestPublishBatchSuccess(t *testing.T) { + mockClient := newMockClient() + stream := &logStream{ + client: mockClient, + logGroupName: groupName, + logStreamName: streamName, + sequenceToken: aws.String(sequenceToken), + } + mockClient.putLogEventsResult <- &putLogEventsResult{ + successResult: &cloudwatchlogs.PutLogEventsOutput{ + NextSequenceToken: aws.String(nextSequenceToken), + }, + } + events := []wrappedEvent{ + { + inputLogEvent: &cloudwatchlogs.InputLogEvent{ + Message: aws.String(logline), + }, + }, + } + + stream.publishBatch(events) + if stream.sequenceToken == nil { + t.Fatal("Expected non-nil sequenceToken") + } + if *stream.sequenceToken != nextSequenceToken { + t.Errorf("Expected sequenceToken to be %s, but was %s", nextSequenceToken, *stream.sequenceToken) + } + argument := <-mockClient.putLogEventsArgument + if argument == nil { + t.Fatal("Expected non-nil PutLogEventsInput") + } + if argument.SequenceToken == nil { + t.Fatal("Expected non-nil PutLogEventsInput.SequenceToken") + } + if *argument.SequenceToken != sequenceToken { + t.Errorf("Expected PutLogEventsInput.SequenceToken to be %s, but was %s", sequenceToken, *argument.SequenceToken) + } + if len(argument.LogEvents) != 1 { + t.Errorf("Expected LogEvents to contain 1 element, but contains %d", len(argument.LogEvents)) + } + if argument.LogEvents[0] != events[0].inputLogEvent { + t.Error("Expected event to equal input") + } +} + +func TestPublishBatchError(t *testing.T) { + mockClient := newMockClient() + stream := &logStream{ + client: mockClient, + logGroupName: groupName, + logStreamName: streamName, + sequenceToken: aws.String(sequenceToken), + } + mockClient.putLogEventsResult <- &putLogEventsResult{ + errorResult: errors.New("Error!"), + } + + events := []wrappedEvent{ + { + inputLogEvent: &cloudwatchlogs.InputLogEvent{ + Message: aws.String(logline), + }, + }, + } + + stream.publishBatch(events) + if stream.sequenceToken == nil { + t.Fatal("Expected non-nil sequenceToken") + } + if *stream.sequenceToken != sequenceToken { + t.Errorf("Expected sequenceToken to be %s, but was %s", sequenceToken, *stream.sequenceToken) + } +} + +func TestPublishBatchInvalidSeqSuccess(t *testing.T) { + mockClient := newMockClientBuffered(2) + stream := &logStream{ + client: mockClient, + logGroupName: groupName, + logStreamName: streamName, + sequenceToken: aws.String(sequenceToken), + } + mockClient.putLogEventsResult <- &putLogEventsResult{ + errorResult: awserr.New(invalidSequenceTokenCode, "use token token", nil), + } + mockClient.putLogEventsResult <- &putLogEventsResult{ + successResult: &cloudwatchlogs.PutLogEventsOutput{ + NextSequenceToken: aws.String(nextSequenceToken), + }, + } + + events := []wrappedEvent{ + { + inputLogEvent: &cloudwatchlogs.InputLogEvent{ + Message: aws.String(logline), + }, + }, + } + + stream.publishBatch(events) + if stream.sequenceToken == nil { + t.Fatal("Expected non-nil sequenceToken") + } + if *stream.sequenceToken != nextSequenceToken { + t.Errorf("Expected sequenceToken to be %s, but was %s", nextSequenceToken, *stream.sequenceToken) + } + + argument := <-mockClient.putLogEventsArgument + if argument == nil { + t.Fatal("Expected non-nil PutLogEventsInput") + } + if argument.SequenceToken == nil { + t.Fatal("Expected non-nil PutLogEventsInput.SequenceToken") + } + if *argument.SequenceToken != sequenceToken { + t.Errorf("Expected PutLogEventsInput.SequenceToken to be %s, but was %s", sequenceToken, *argument.SequenceToken) + } + if len(argument.LogEvents) != 1 { + t.Errorf("Expected LogEvents to contain 1 element, but contains %d", len(argument.LogEvents)) + } + if argument.LogEvents[0] != events[0].inputLogEvent { + t.Error("Expected event to equal input") + } + + argument = <-mockClient.putLogEventsArgument + if argument == nil { + t.Fatal("Expected non-nil PutLogEventsInput") + } + if argument.SequenceToken == nil { + t.Fatal("Expected non-nil PutLogEventsInput.SequenceToken") + } + if *argument.SequenceToken != "token" { + t.Errorf("Expected PutLogEventsInput.SequenceToken to be %s, but was %s", "token", *argument.SequenceToken) + } + if len(argument.LogEvents) != 1 { + t.Errorf("Expected LogEvents to contain 1 element, but contains %d", len(argument.LogEvents)) + } + if argument.LogEvents[0] != events[0].inputLogEvent { + t.Error("Expected event to equal input") + } +} + +func TestPublishBatchAlreadyAccepted(t *testing.T) { + mockClient := newMockClient() + stream := &logStream{ + client: mockClient, + logGroupName: groupName, + logStreamName: streamName, + sequenceToken: aws.String(sequenceToken), + } + mockClient.putLogEventsResult <- &putLogEventsResult{ + errorResult: awserr.New(dataAlreadyAcceptedCode, "use token token", nil), + } + + events := []wrappedEvent{ + { + inputLogEvent: &cloudwatchlogs.InputLogEvent{ + Message: aws.String(logline), + }, + }, + } + + stream.publishBatch(events) + if stream.sequenceToken == nil { + t.Fatal("Expected non-nil sequenceToken") + } + if *stream.sequenceToken != "token" { + t.Errorf("Expected sequenceToken to be %s, but was %s", "token", *stream.sequenceToken) + } + + argument := <-mockClient.putLogEventsArgument + if argument == nil { + t.Fatal("Expected non-nil PutLogEventsInput") + } + if argument.SequenceToken == nil { + t.Fatal("Expected non-nil PutLogEventsInput.SequenceToken") + } + if *argument.SequenceToken != sequenceToken { + t.Errorf("Expected PutLogEventsInput.SequenceToken to be %s, but was %s", sequenceToken, *argument.SequenceToken) + } + if len(argument.LogEvents) != 1 { + t.Errorf("Expected LogEvents to contain 1 element, but contains %d", len(argument.LogEvents)) + } + if argument.LogEvents[0] != events[0].inputLogEvent { + t.Error("Expected event to equal input") + } +} + +func TestCollectBatchSimple(t *testing.T) { + mockClient := newMockClient() + stream := &logStream{ + client: mockClient, + logGroupName: groupName, + logStreamName: streamName, + sequenceToken: aws.String(sequenceToken), + messages: make(chan *logger.Message), + } + mockClient.putLogEventsResult <- &putLogEventsResult{ + successResult: &cloudwatchlogs.PutLogEventsOutput{ + NextSequenceToken: aws.String(nextSequenceToken), + }, + } + ticks := make(chan time.Time) + newTicker = func(_ time.Duration) *time.Ticker { + return &time.Ticker{ + C: ticks, + } + } + + go stream.collectBatch() + + stream.Log(&logger.Message{ + Line: []byte(logline), + Timestamp: time.Time{}, + }) + + ticks <- time.Time{} + stream.Close() + + argument := <-mockClient.putLogEventsArgument + if argument == nil { + t.Fatal("Expected non-nil PutLogEventsInput") + } + if len(argument.LogEvents) != 1 { + t.Errorf("Expected LogEvents to contain 1 element, but contains %d", len(argument.LogEvents)) + } + if *argument.LogEvents[0].Message != logline { + t.Errorf("Expected message to be %s but was %s", logline, *argument.LogEvents[0].Message) + } +} + +func TestCollectBatchTicker(t *testing.T) { + mockClient := newMockClient() + stream := &logStream{ + client: mockClient, + logGroupName: groupName, + logStreamName: streamName, + sequenceToken: aws.String(sequenceToken), + messages: make(chan *logger.Message), + } + mockClient.putLogEventsResult <- &putLogEventsResult{ + successResult: &cloudwatchlogs.PutLogEventsOutput{ + NextSequenceToken: aws.String(nextSequenceToken), + }, + } + ticks := make(chan time.Time) + newTicker = func(_ time.Duration) *time.Ticker { + return &time.Ticker{ + C: ticks, + } + } + + go stream.collectBatch() + + stream.Log(&logger.Message{ + Line: []byte(logline + " 1"), + Timestamp: time.Time{}, + }) + stream.Log(&logger.Message{ + Line: []byte(logline + " 2"), + Timestamp: time.Time{}, + }) + + ticks <- time.Time{} + + // Verify first batch + argument := <-mockClient.putLogEventsArgument + if argument == nil { + t.Fatal("Expected non-nil PutLogEventsInput") + } + if len(argument.LogEvents) != 2 { + t.Errorf("Expected LogEvents to contain 2 elements, but contains %d", len(argument.LogEvents)) + } + if *argument.LogEvents[0].Message != logline+" 1" { + t.Errorf("Expected message to be %s but was %s", logline+" 1", *argument.LogEvents[0].Message) + } + if *argument.LogEvents[1].Message != logline+" 2" { + t.Errorf("Expected message to be %s but was %s", logline+" 2", *argument.LogEvents[0].Message) + } + + stream.Log(&logger.Message{ + Line: []byte(logline + " 3"), + Timestamp: time.Time{}, + }) + + ticks <- time.Time{} + argument = <-mockClient.putLogEventsArgument + if argument == nil { + t.Fatal("Expected non-nil PutLogEventsInput") + } + if len(argument.LogEvents) != 1 { + t.Errorf("Expected LogEvents to contain 1 elements, but contains %d", len(argument.LogEvents)) + } + if *argument.LogEvents[0].Message != logline+" 3" { + t.Errorf("Expected message to be %s but was %s", logline+" 3", *argument.LogEvents[0].Message) + } + + stream.Close() + +} + +func TestCollectBatchMultilinePattern(t *testing.T) { + mockClient := newMockClient() + multilinePattern := regexp.MustCompile("xxxx") + stream := &logStream{ + client: mockClient, + logGroupName: groupName, + logStreamName: streamName, + multilinePattern: multilinePattern, + sequenceToken: aws.String(sequenceToken), + messages: make(chan *logger.Message), + } + mockClient.putLogEventsResult <- &putLogEventsResult{ + successResult: &cloudwatchlogs.PutLogEventsOutput{ + NextSequenceToken: aws.String(nextSequenceToken), + }, + } + ticks := make(chan time.Time) + newTicker = func(_ time.Duration) *time.Ticker { + return &time.Ticker{ + C: ticks, + } + } + + go stream.collectBatch() + + stream.Log(&logger.Message{ + Line: []byte(logline), + Timestamp: time.Now(), + }) + stream.Log(&logger.Message{ + Line: []byte(logline), + Timestamp: time.Now(), + }) + stream.Log(&logger.Message{ + Line: []byte("xxxx " + logline), + Timestamp: time.Now(), + }) + + ticks <- time.Now() + + // Verify single multiline event + argument := <-mockClient.putLogEventsArgument + assert.NotNil(t, argument, "Expected non-nil PutLogEventsInput") + assert.Equal(t, 1, len(argument.LogEvents), "Expected single multiline event") + assert.Equal(t, logline+"\n"+logline+"\n", *argument.LogEvents[0].Message, "Received incorrect multiline message") + + stream.Close() + + // Verify single event + argument = <-mockClient.putLogEventsArgument + assert.NotNil(t, argument, "Expected non-nil PutLogEventsInput") + assert.Equal(t, 1, len(argument.LogEvents), "Expected single multiline event") + assert.Equal(t, "xxxx "+logline+"\n", *argument.LogEvents[0].Message, "Received incorrect multiline message") +} + +func BenchmarkCollectBatch(b *testing.B) { + for i := 0; i < b.N; i++ { + mockClient := newMockClient() + stream := &logStream{ + client: mockClient, + logGroupName: groupName, + logStreamName: streamName, + sequenceToken: aws.String(sequenceToken), + messages: make(chan *logger.Message), + } + mockClient.putLogEventsResult <- &putLogEventsResult{ + successResult: &cloudwatchlogs.PutLogEventsOutput{ + NextSequenceToken: aws.String(nextSequenceToken), + }, + } + ticks := make(chan time.Time) + newTicker = func(_ time.Duration) *time.Ticker { + return &time.Ticker{ + C: ticks, + } + } + + go stream.collectBatch() + stream.logGenerator(10, 100) + ticks <- time.Time{} + stream.Close() + } +} + +func BenchmarkCollectBatchMultilinePattern(b *testing.B) { + for i := 0; i < b.N; i++ { + mockClient := newMockClient() + multilinePattern := regexp.MustCompile(`\d{4}-(?:0[1-9]|1[0-2])-(?:0[1-9]|[1,2][0-9]|3[0,1]) (?:[0,1][0-9]|2[0-3]):[0-5][0-9]:[0-5][0-9]`) + stream := &logStream{ + client: mockClient, + logGroupName: groupName, + logStreamName: streamName, + multilinePattern: multilinePattern, + sequenceToken: aws.String(sequenceToken), + messages: make(chan *logger.Message), + } + mockClient.putLogEventsResult <- &putLogEventsResult{ + successResult: &cloudwatchlogs.PutLogEventsOutput{ + NextSequenceToken: aws.String(nextSequenceToken), + }, + } + ticks := make(chan time.Time) + newTicker = func(_ time.Duration) *time.Ticker { + return &time.Ticker{ + C: ticks, + } + } + go stream.collectBatch() + stream.logGenerator(10, 100) + ticks <- time.Time{} + stream.Close() + } +} + +func TestCollectBatchMultilinePatternMaxEventAge(t *testing.T) { + mockClient := newMockClient() + multilinePattern := regexp.MustCompile("xxxx") + stream := &logStream{ + client: mockClient, + logGroupName: groupName, + logStreamName: streamName, + multilinePattern: multilinePattern, + sequenceToken: aws.String(sequenceToken), + messages: make(chan *logger.Message), + } + mockClient.putLogEventsResult <- &putLogEventsResult{ + successResult: &cloudwatchlogs.PutLogEventsOutput{ + NextSequenceToken: aws.String(nextSequenceToken), + }, + } + ticks := make(chan time.Time) + newTicker = func(_ time.Duration) *time.Ticker { + return &time.Ticker{ + C: ticks, + } + } + + go stream.collectBatch() + + stream.Log(&logger.Message{ + Line: []byte(logline), + Timestamp: time.Now(), + }) + + // Log an event 1 second later + stream.Log(&logger.Message{ + Line: []byte(logline), + Timestamp: time.Now().Add(time.Second), + }) + + // Fire ticker batchPublishFrequency seconds later + ticks <- time.Now().Add(batchPublishFrequency * time.Second) + + // Verify single multiline event is flushed after maximum event buffer age (batchPublishFrequency) + argument := <-mockClient.putLogEventsArgument + assert.NotNil(t, argument, "Expected non-nil PutLogEventsInput") + assert.Equal(t, 1, len(argument.LogEvents), "Expected single multiline event") + assert.Equal(t, logline+"\n"+logline+"\n", *argument.LogEvents[0].Message, "Received incorrect multiline message") + + stream.Close() +} + +func TestCollectBatchMultilinePatternNegativeEventAge(t *testing.T) { + mockClient := newMockClient() + multilinePattern := regexp.MustCompile("xxxx") + stream := &logStream{ + client: mockClient, + logGroupName: groupName, + logStreamName: streamName, + multilinePattern: multilinePattern, + sequenceToken: aws.String(sequenceToken), + messages: make(chan *logger.Message), + } + mockClient.putLogEventsResult <- &putLogEventsResult{ + successResult: &cloudwatchlogs.PutLogEventsOutput{ + NextSequenceToken: aws.String(nextSequenceToken), + }, + } + ticks := make(chan time.Time) + newTicker = func(_ time.Duration) *time.Ticker { + return &time.Ticker{ + C: ticks, + } + } + + go stream.collectBatch() + + stream.Log(&logger.Message{ + Line: []byte(logline), + Timestamp: time.Now(), + }) + + // Log an event 1 second later + stream.Log(&logger.Message{ + Line: []byte(logline), + Timestamp: time.Now().Add(time.Second), + }) + + // Fire ticker in past to simulate negative event buffer age + ticks <- time.Now().Add(-time.Second) + + // Verify single multiline event is flushed with a negative event buffer age + argument := <-mockClient.putLogEventsArgument + assert.NotNil(t, argument, "Expected non-nil PutLogEventsInput") + assert.Equal(t, 1, len(argument.LogEvents), "Expected single multiline event") + assert.Equal(t, logline+"\n"+logline+"\n", *argument.LogEvents[0].Message, "Received incorrect multiline message") + + stream.Close() +} + +func TestCollectBatchClose(t *testing.T) { + mockClient := newMockClient() + stream := &logStream{ + client: mockClient, + logGroupName: groupName, + logStreamName: streamName, + sequenceToken: aws.String(sequenceToken), + messages: make(chan *logger.Message), + } + mockClient.putLogEventsResult <- &putLogEventsResult{ + successResult: &cloudwatchlogs.PutLogEventsOutput{ + NextSequenceToken: aws.String(nextSequenceToken), + }, + } + var ticks = make(chan time.Time) + newTicker = func(_ time.Duration) *time.Ticker { + return &time.Ticker{ + C: ticks, + } + } + + go stream.collectBatch() + + stream.Log(&logger.Message{ + Line: []byte(logline), + Timestamp: time.Time{}, + }) + + // no ticks + stream.Close() + + argument := <-mockClient.putLogEventsArgument + if argument == nil { + t.Fatal("Expected non-nil PutLogEventsInput") + } + if len(argument.LogEvents) != 1 { + t.Errorf("Expected LogEvents to contain 1 element, but contains %d", len(argument.LogEvents)) + } + if *argument.LogEvents[0].Message != logline { + t.Errorf("Expected message to be %s but was %s", logline, *argument.LogEvents[0].Message) + } +} + +func TestCollectBatchLineSplit(t *testing.T) { + mockClient := newMockClient() + stream := &logStream{ + client: mockClient, + logGroupName: groupName, + logStreamName: streamName, + sequenceToken: aws.String(sequenceToken), + messages: make(chan *logger.Message), + } + mockClient.putLogEventsResult <- &putLogEventsResult{ + successResult: &cloudwatchlogs.PutLogEventsOutput{ + NextSequenceToken: aws.String(nextSequenceToken), + }, + } + var ticks = make(chan time.Time) + newTicker = func(_ time.Duration) *time.Ticker { + return &time.Ticker{ + C: ticks, + } + } + + go stream.collectBatch() + + longline := strings.Repeat("A", maximumBytesPerEvent) + stream.Log(&logger.Message{ + Line: []byte(longline + "B"), + Timestamp: time.Time{}, + }) + + // no ticks + stream.Close() + + argument := <-mockClient.putLogEventsArgument + if argument == nil { + t.Fatal("Expected non-nil PutLogEventsInput") + } + if len(argument.LogEvents) != 2 { + t.Errorf("Expected LogEvents to contain 2 elements, but contains %d", len(argument.LogEvents)) + } + if *argument.LogEvents[0].Message != longline { + t.Errorf("Expected message to be %s but was %s", longline, *argument.LogEvents[0].Message) + } + if *argument.LogEvents[1].Message != "B" { + t.Errorf("Expected message to be %s but was %s", "B", *argument.LogEvents[1].Message) + } +} + +func TestCollectBatchMaxEvents(t *testing.T) { + mockClient := newMockClientBuffered(1) + stream := &logStream{ + client: mockClient, + logGroupName: groupName, + logStreamName: streamName, + sequenceToken: aws.String(sequenceToken), + messages: make(chan *logger.Message), + } + mockClient.putLogEventsResult <- &putLogEventsResult{ + successResult: &cloudwatchlogs.PutLogEventsOutput{ + NextSequenceToken: aws.String(nextSequenceToken), + }, + } + var ticks = make(chan time.Time) + newTicker = func(_ time.Duration) *time.Ticker { + return &time.Ticker{ + C: ticks, + } + } + + go stream.collectBatch() + + line := "A" + for i := 0; i <= maximumLogEventsPerPut; i++ { + stream.Log(&logger.Message{ + Line: []byte(line), + Timestamp: time.Time{}, + }) + } + + // no ticks + stream.Close() + + argument := <-mockClient.putLogEventsArgument + if argument == nil { + t.Fatal("Expected non-nil PutLogEventsInput") + } + if len(argument.LogEvents) != maximumLogEventsPerPut { + t.Errorf("Expected LogEvents to contain %d elements, but contains %d", maximumLogEventsPerPut, len(argument.LogEvents)) + } + + argument = <-mockClient.putLogEventsArgument + if argument == nil { + t.Fatal("Expected non-nil PutLogEventsInput") + } + if len(argument.LogEvents) != 1 { + t.Errorf("Expected LogEvents to contain %d elements, but contains %d", 1, len(argument.LogEvents)) + } +} + +func TestCollectBatchMaxTotalBytes(t *testing.T) { + mockClient := newMockClientBuffered(1) + stream := &logStream{ + client: mockClient, + logGroupName: groupName, + logStreamName: streamName, + sequenceToken: aws.String(sequenceToken), + messages: make(chan *logger.Message), + } + mockClient.putLogEventsResult <- &putLogEventsResult{ + successResult: &cloudwatchlogs.PutLogEventsOutput{ + NextSequenceToken: aws.String(nextSequenceToken), + }, + } + var ticks = make(chan time.Time) + newTicker = func(_ time.Duration) *time.Ticker { + return &time.Ticker{ + C: ticks, + } + } + + go stream.collectBatch() + + longline := strings.Repeat("A", maximumBytesPerPut) + stream.Log(&logger.Message{ + Line: []byte(longline + "B"), + Timestamp: time.Time{}, + }) + + // no ticks + stream.Close() + + argument := <-mockClient.putLogEventsArgument + if argument == nil { + t.Fatal("Expected non-nil PutLogEventsInput") + } + bytes := 0 + for _, event := range argument.LogEvents { + bytes += len(*event.Message) + } + if bytes > maximumBytesPerPut { + t.Errorf("Expected <= %d bytes but was %d", maximumBytesPerPut, bytes) + } + + argument = <-mockClient.putLogEventsArgument + if len(argument.LogEvents) != 1 { + t.Errorf("Expected LogEvents to contain 1 elements, but contains %d", len(argument.LogEvents)) + } + message := *argument.LogEvents[0].Message + if message[len(message)-1:] != "B" { + t.Errorf("Expected message to be %s but was %s", "B", message[len(message)-1:]) + } +} + +func TestCollectBatchWithDuplicateTimestamps(t *testing.T) { + mockClient := newMockClient() + stream := &logStream{ + client: mockClient, + logGroupName: groupName, + logStreamName: streamName, + sequenceToken: aws.String(sequenceToken), + messages: make(chan *logger.Message), + } + mockClient.putLogEventsResult <- &putLogEventsResult{ + successResult: &cloudwatchlogs.PutLogEventsOutput{ + NextSequenceToken: aws.String(nextSequenceToken), + }, + } + ticks := make(chan time.Time) + newTicker = func(_ time.Duration) *time.Ticker { + return &time.Ticker{ + C: ticks, + } + } + + go stream.collectBatch() + + times := maximumLogEventsPerPut + expectedEvents := []*cloudwatchlogs.InputLogEvent{} + timestamp := time.Now() + for i := 0; i < times; i++ { + line := fmt.Sprintf("%d", i) + if i%2 == 0 { + timestamp.Add(1 * time.Nanosecond) + } + stream.Log(&logger.Message{ + Line: []byte(line), + Timestamp: timestamp, + }) + expectedEvents = append(expectedEvents, &cloudwatchlogs.InputLogEvent{ + Message: aws.String(line), + Timestamp: aws.Int64(timestamp.UnixNano() / int64(time.Millisecond)), + }) + } + + ticks <- time.Time{} + stream.Close() + + argument := <-mockClient.putLogEventsArgument + if argument == nil { + t.Fatal("Expected non-nil PutLogEventsInput") + } + if len(argument.LogEvents) != times { + t.Errorf("Expected LogEvents to contain %d elements, but contains %d", times, len(argument.LogEvents)) + } + for i := 0; i < times; i++ { + if !reflect.DeepEqual(*argument.LogEvents[i], *expectedEvents[i]) { + t.Errorf("Expected event to be %v but was %v", *expectedEvents[i], *argument.LogEvents[i]) + } + } +} + +func TestParseLogOptionsMultilinePattern(t *testing.T) { + info := logger.Info{ + Config: map[string]string{ + multilinePatternKey: "^xxxx", + }, + } + + multilinePattern, err := parseMultilineOptions(info) + assert.Nil(t, err, "Received unexpected error") + assert.True(t, multilinePattern.MatchString("xxxx"), "No multiline pattern match found") +} + +func TestParseLogOptionsDatetimeFormat(t *testing.T) { + datetimeFormatTests := []struct { + format string + match string + }{ + {"%d/%m/%y %a %H:%M:%S%L %Z", "31/12/10 Mon 08:42:44.345 NZDT"}, + {"%Y-%m-%d %A %I:%M:%S.%f%p%z", "2007-12-04 Monday 08:42:44.123456AM+1200"}, + {"%b|%b|%b|%b|%b|%b|%b|%b|%b|%b|%b|%b", "Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec"}, + {"%B|%B|%B|%B|%B|%B|%B|%B|%B|%B|%B|%B", "January|February|March|April|May|June|July|August|September|October|November|December"}, + {"%A|%A|%A|%A|%A|%A|%A", "Monday|Tuesday|Wednesday|Thursday|Friday|Saturday|Sunday"}, + {"%a|%a|%a|%a|%a|%a|%a", "Mon|Tue|Wed|Thu|Fri|Sat|Sun"}, + {"Day of the week: %w, Day of the year: %j", "Day of the week: 4, Day of the year: 091"}, + } + for _, dt := range datetimeFormatTests { + t.Run(dt.match, func(t *testing.T) { + info := logger.Info{ + Config: map[string]string{ + datetimeFormatKey: dt.format, + }, + } + multilinePattern, err := parseMultilineOptions(info) + assert.Nil(t, err, "Received unexpected error") + assert.True(t, multilinePattern.MatchString(dt.match), "No multiline pattern match found") + }) + } +} + +func TestValidateLogOptionsDatetimeFormatAndMultilinePattern(t *testing.T) { + cfg := map[string]string{ + multilinePatternKey: "^xxxx", + datetimeFormatKey: "%Y-%m-%d", + logGroupKey: groupName, + } + conflictingLogOptionsError := "you cannot configure log opt 'awslogs-datetime-format' and 'awslogs-multiline-pattern' at the same time" + + err := ValidateLogOpt(cfg) + assert.NotNil(t, err, "Expected an error") + assert.Equal(t, err.Error(), conflictingLogOptionsError, "Received invalid error") +} + +func TestCreateTagSuccess(t *testing.T) { + mockClient := newMockClient() + info := logger.Info{ + ContainerName: "/test-container", + ContainerID: "container-abcdefghijklmnopqrstuvwxyz01234567890", + Config: map[string]string{"tag": "{{.Name}}/{{.FullID}}"}, + } + logStreamName, e := loggerutils.ParseLogTag(info, loggerutils.DefaultTemplate) + if e != nil { + t.Errorf("Error generating tag: %q", e) + } + stream := &logStream{ + client: mockClient, + logGroupName: groupName, + logStreamName: logStreamName, + } + mockClient.createLogStreamResult <- &createLogStreamResult{} + + err := stream.create() + + if err != nil { + t.Errorf("Received unexpected err: %v\n", err) + } + argument := <-mockClient.createLogStreamArgument + + if *argument.LogStreamName != "test-container/container-abcdefghijklmnopqrstuvwxyz01234567890" { + t.Errorf("Expected LogStreamName to be %s", "test-container/container-abcdefghijklmnopqrstuvwxyz01234567890") + } +} + +func BenchmarkUnwrapEvents(b *testing.B) { + events := make([]wrappedEvent, maximumLogEventsPerPut) + for i := 0; i < maximumLogEventsPerPut; i++ { + mes := strings.Repeat("0", maximumBytesPerEvent) + events[i].inputLogEvent = &cloudwatchlogs.InputLogEvent{ + Message: &mes, + } + } + + as := assert.New(b) + b.ResetTimer() + for i := 0; i < b.N; i++ { + res := unwrapEvents(events) + as.Len(res, maximumLogEventsPerPut) + } +} diff --git a/vendor/github.com/moby/moby/daemon/logger/awslogs/cwlogsiface_mock_test.go b/vendor/github.com/moby/moby/daemon/logger/awslogs/cwlogsiface_mock_test.go new file mode 100644 index 000000000..82bb34b0a --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/logger/awslogs/cwlogsiface_mock_test.go @@ -0,0 +1,92 @@ +package awslogs + +import "github.com/aws/aws-sdk-go/service/cloudwatchlogs" + +type mockcwlogsclient struct { + createLogGroupArgument chan *cloudwatchlogs.CreateLogGroupInput + createLogGroupResult chan *createLogGroupResult + createLogStreamArgument chan *cloudwatchlogs.CreateLogStreamInput + createLogStreamResult chan *createLogStreamResult + putLogEventsArgument chan *cloudwatchlogs.PutLogEventsInput + putLogEventsResult chan *putLogEventsResult +} + +type createLogGroupResult struct { + successResult *cloudwatchlogs.CreateLogGroupOutput + errorResult error +} + +type createLogStreamResult struct { + successResult *cloudwatchlogs.CreateLogStreamOutput + errorResult error +} + +type putLogEventsResult struct { + successResult *cloudwatchlogs.PutLogEventsOutput + errorResult error +} + +func newMockClient() *mockcwlogsclient { + return &mockcwlogsclient{ + createLogGroupArgument: make(chan *cloudwatchlogs.CreateLogGroupInput, 1), + createLogGroupResult: make(chan *createLogGroupResult, 1), + createLogStreamArgument: make(chan *cloudwatchlogs.CreateLogStreamInput, 1), + createLogStreamResult: make(chan *createLogStreamResult, 1), + putLogEventsArgument: make(chan *cloudwatchlogs.PutLogEventsInput, 1), + putLogEventsResult: make(chan *putLogEventsResult, 1), + } +} + +func newMockClientBuffered(buflen int) *mockcwlogsclient { + return &mockcwlogsclient{ + createLogStreamArgument: make(chan *cloudwatchlogs.CreateLogStreamInput, buflen), + createLogStreamResult: make(chan *createLogStreamResult, buflen), + putLogEventsArgument: make(chan *cloudwatchlogs.PutLogEventsInput, buflen), + putLogEventsResult: make(chan *putLogEventsResult, buflen), + } +} + +func (m *mockcwlogsclient) CreateLogGroup(input *cloudwatchlogs.CreateLogGroupInput) (*cloudwatchlogs.CreateLogGroupOutput, error) { + m.createLogGroupArgument <- input + output := <-m.createLogGroupResult + return output.successResult, output.errorResult +} + +func (m *mockcwlogsclient) CreateLogStream(input *cloudwatchlogs.CreateLogStreamInput) (*cloudwatchlogs.CreateLogStreamOutput, error) { + m.createLogStreamArgument <- input + output := <-m.createLogStreamResult + return output.successResult, output.errorResult +} + +func (m *mockcwlogsclient) PutLogEvents(input *cloudwatchlogs.PutLogEventsInput) (*cloudwatchlogs.PutLogEventsOutput, error) { + events := make([]*cloudwatchlogs.InputLogEvent, len(input.LogEvents)) + copy(events, input.LogEvents) + m.putLogEventsArgument <- &cloudwatchlogs.PutLogEventsInput{ + LogEvents: events, + SequenceToken: input.SequenceToken, + LogGroupName: input.LogGroupName, + LogStreamName: input.LogStreamName, + } + output := <-m.putLogEventsResult + return output.successResult, output.errorResult +} + +type mockmetadataclient struct { + regionResult chan *regionResult +} + +type regionResult struct { + successResult string + errorResult error +} + +func newMockMetadataClient() *mockmetadataclient { + return &mockmetadataclient{ + regionResult: make(chan *regionResult, 1), + } +} + +func (m *mockmetadataclient) Region() (string, error) { + output := <-m.regionResult + return output.successResult, output.errorResult +} diff --git a/vendor/github.com/moby/moby/daemon/logger/copier.go b/vendor/github.com/moby/moby/daemon/logger/copier.go new file mode 100644 index 000000000..65d8fb148 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/logger/copier.go @@ -0,0 +1,135 @@ +package logger + +import ( + "bytes" + "io" + "sync" + "time" + + "github.com/Sirupsen/logrus" +) + +const ( + bufSize = 16 * 1024 + readSize = 2 * 1024 +) + +// Copier can copy logs from specified sources to Logger and attach Timestamp. +// Writes are concurrent, so you need implement some sync in your logger. +type Copier struct { + // srcs is map of name -> reader pairs, for example "stdout", "stderr" + srcs map[string]io.Reader + dst Logger + copyJobs sync.WaitGroup + closeOnce sync.Once + closed chan struct{} +} + +// NewCopier creates a new Copier +func NewCopier(srcs map[string]io.Reader, dst Logger) *Copier { + return &Copier{ + srcs: srcs, + dst: dst, + closed: make(chan struct{}), + } +} + +// Run starts logs copying +func (c *Copier) Run() { + for src, w := range c.srcs { + c.copyJobs.Add(1) + go c.copySrc(src, w) + } +} + +func (c *Copier) copySrc(name string, src io.Reader) { + defer c.copyJobs.Done() + buf := make([]byte, bufSize) + n := 0 + eof := false + + for { + select { + case <-c.closed: + return + default: + // Work out how much more data we are okay with reading this time. + upto := n + readSize + if upto > cap(buf) { + upto = cap(buf) + } + // Try to read that data. + if upto > n { + read, err := src.Read(buf[n:upto]) + if err != nil { + if err != io.EOF { + logrus.Errorf("Error scanning log stream: %s", err) + return + } + eof = true + } + n += read + } + // If we have no data to log, and there's no more coming, we're done. + if n == 0 && eof { + return + } + // Break up the data that we've buffered up into lines, and log each in turn. + p := 0 + for q := bytes.IndexByte(buf[p:n], '\n'); q >= 0; q = bytes.IndexByte(buf[p:n], '\n') { + select { + case <-c.closed: + return + default: + msg := NewMessage() + msg.Source = name + msg.Timestamp = time.Now().UTC() + msg.Line = append(msg.Line, buf[p:p+q]...) + + if logErr := c.dst.Log(msg); logErr != nil { + logrus.Errorf("Failed to log msg %q for logger %s: %s", msg.Line, c.dst.Name(), logErr) + } + } + p += q + 1 + } + // If there's no more coming, or the buffer is full but + // has no newlines, log whatever we haven't logged yet, + // noting that it's a partial log line. + if eof || (p == 0 && n == len(buf)) { + if p < n { + msg := NewMessage() + msg.Source = name + msg.Timestamp = time.Now().UTC() + msg.Line = append(msg.Line, buf[p:n]...) + msg.Partial = true + + if logErr := c.dst.Log(msg); logErr != nil { + logrus.Errorf("Failed to log msg %q for logger %s: %s", msg.Line, c.dst.Name(), logErr) + } + p = 0 + n = 0 + } + if eof { + return + } + } + // Move any unlogged data to the front of the buffer in preparation for another read. + if p > 0 { + copy(buf[0:], buf[p:n]) + n -= p + } + } + } +} + +// Wait waits until all copying is done +func (c *Copier) Wait() { + c.copyJobs.Wait() +} + +// Close closes the copier +func (c *Copier) Close() { + c.closeOnce.Do(func() { + close(c.closed) + }) +} diff --git a/vendor/github.com/moby/moby/daemon/logger/copier_test.go b/vendor/github.com/moby/moby/daemon/logger/copier_test.go new file mode 100644 index 000000000..4210022dc --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/logger/copier_test.go @@ -0,0 +1,296 @@ +package logger + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "os" + "strings" + "sync" + "testing" + "time" +) + +type TestLoggerJSON struct { + *json.Encoder + mu sync.Mutex + delay time.Duration +} + +func (l *TestLoggerJSON) Log(m *Message) error { + if l.delay > 0 { + time.Sleep(l.delay) + } + l.mu.Lock() + defer l.mu.Unlock() + return l.Encode(m) +} + +func (l *TestLoggerJSON) Close() error { return nil } + +func (l *TestLoggerJSON) Name() string { return "json" } + +func TestCopier(t *testing.T) { + stdoutLine := "Line that thinks that it is log line from docker stdout" + stderrLine := "Line that thinks that it is log line from docker stderr" + stdoutTrailingLine := "stdout trailing line" + stderrTrailingLine := "stderr trailing line" + + var stdout bytes.Buffer + var stderr bytes.Buffer + for i := 0; i < 30; i++ { + if _, err := stdout.WriteString(stdoutLine + "\n"); err != nil { + t.Fatal(err) + } + if _, err := stderr.WriteString(stderrLine + "\n"); err != nil { + t.Fatal(err) + } + } + + // Test remaining lines without line-endings + if _, err := stdout.WriteString(stdoutTrailingLine); err != nil { + t.Fatal(err) + } + if _, err := stderr.WriteString(stderrTrailingLine); err != nil { + t.Fatal(err) + } + + var jsonBuf bytes.Buffer + + jsonLog := &TestLoggerJSON{Encoder: json.NewEncoder(&jsonBuf)} + + c := NewCopier( + map[string]io.Reader{ + "stdout": &stdout, + "stderr": &stderr, + }, + jsonLog) + c.Run() + wait := make(chan struct{}) + go func() { + c.Wait() + close(wait) + }() + select { + case <-time.After(1 * time.Second): + t.Fatal("Copier failed to do its work in 1 second") + case <-wait: + } + dec := json.NewDecoder(&jsonBuf) + for { + var msg Message + if err := dec.Decode(&msg); err != nil { + if err == io.EOF { + break + } + t.Fatal(err) + } + if msg.Source != "stdout" && msg.Source != "stderr" { + t.Fatalf("Wrong Source: %q, should be %q or %q", msg.Source, "stdout", "stderr") + } + if msg.Source == "stdout" { + if string(msg.Line) != stdoutLine && string(msg.Line) != stdoutTrailingLine { + t.Fatalf("Wrong Line: %q, expected %q or %q", msg.Line, stdoutLine, stdoutTrailingLine) + } + } + if msg.Source == "stderr" { + if string(msg.Line) != stderrLine && string(msg.Line) != stderrTrailingLine { + t.Fatalf("Wrong Line: %q, expected %q or %q", msg.Line, stderrLine, stderrTrailingLine) + } + } + } +} + +// TestCopierLongLines tests long lines without line breaks +func TestCopierLongLines(t *testing.T) { + // Long lines (should be split at "bufSize") + const bufSize = 16 * 1024 + stdoutLongLine := strings.Repeat("a", bufSize) + stderrLongLine := strings.Repeat("b", bufSize) + stdoutTrailingLine := "stdout trailing line" + stderrTrailingLine := "stderr trailing line" + + var stdout bytes.Buffer + var stderr bytes.Buffer + + for i := 0; i < 3; i++ { + if _, err := stdout.WriteString(stdoutLongLine); err != nil { + t.Fatal(err) + } + if _, err := stderr.WriteString(stderrLongLine); err != nil { + t.Fatal(err) + } + } + + if _, err := stdout.WriteString(stdoutTrailingLine); err != nil { + t.Fatal(err) + } + if _, err := stderr.WriteString(stderrTrailingLine); err != nil { + t.Fatal(err) + } + + var jsonBuf bytes.Buffer + + jsonLog := &TestLoggerJSON{Encoder: json.NewEncoder(&jsonBuf)} + + c := NewCopier( + map[string]io.Reader{ + "stdout": &stdout, + "stderr": &stderr, + }, + jsonLog) + c.Run() + wait := make(chan struct{}) + go func() { + c.Wait() + close(wait) + }() + select { + case <-time.After(1 * time.Second): + t.Fatal("Copier failed to do its work in 1 second") + case <-wait: + } + dec := json.NewDecoder(&jsonBuf) + for { + var msg Message + if err := dec.Decode(&msg); err != nil { + if err == io.EOF { + break + } + t.Fatal(err) + } + if msg.Source != "stdout" && msg.Source != "stderr" { + t.Fatalf("Wrong Source: %q, should be %q or %q", msg.Source, "stdout", "stderr") + } + if msg.Source == "stdout" { + if string(msg.Line) != stdoutLongLine && string(msg.Line) != stdoutTrailingLine { + t.Fatalf("Wrong Line: %q, expected 'stdoutLongLine' or 'stdoutTrailingLine'", msg.Line) + } + } + if msg.Source == "stderr" { + if string(msg.Line) != stderrLongLine && string(msg.Line) != stderrTrailingLine { + t.Fatalf("Wrong Line: %q, expected 'stderrLongLine' or 'stderrTrailingLine'", msg.Line) + } + } + } +} + +func TestCopierSlow(t *testing.T) { + stdoutLine := "Line that thinks that it is log line from docker stdout" + var stdout bytes.Buffer + for i := 0; i < 30; i++ { + if _, err := stdout.WriteString(stdoutLine + "\n"); err != nil { + t.Fatal(err) + } + } + + var jsonBuf bytes.Buffer + //encoder := &encodeCloser{Encoder: json.NewEncoder(&jsonBuf)} + jsonLog := &TestLoggerJSON{Encoder: json.NewEncoder(&jsonBuf), delay: 100 * time.Millisecond} + + c := NewCopier(map[string]io.Reader{"stdout": &stdout}, jsonLog) + c.Run() + wait := make(chan struct{}) + go func() { + c.Wait() + close(wait) + }() + <-time.After(150 * time.Millisecond) + c.Close() + select { + case <-time.After(200 * time.Millisecond): + t.Fatal("failed to exit in time after the copier is closed") + case <-wait: + } +} + +type BenchmarkLoggerDummy struct { +} + +func (l *BenchmarkLoggerDummy) Log(m *Message) error { PutMessage(m); return nil } + +func (l *BenchmarkLoggerDummy) Close() error { return nil } + +func (l *BenchmarkLoggerDummy) Name() string { return "dummy" } + +func BenchmarkCopier64(b *testing.B) { + benchmarkCopier(b, 1<<6) +} +func BenchmarkCopier128(b *testing.B) { + benchmarkCopier(b, 1<<7) +} +func BenchmarkCopier256(b *testing.B) { + benchmarkCopier(b, 1<<8) +} +func BenchmarkCopier512(b *testing.B) { + benchmarkCopier(b, 1<<9) +} +func BenchmarkCopier1K(b *testing.B) { + benchmarkCopier(b, 1<<10) +} +func BenchmarkCopier2K(b *testing.B) { + benchmarkCopier(b, 1<<11) +} +func BenchmarkCopier4K(b *testing.B) { + benchmarkCopier(b, 1<<12) +} +func BenchmarkCopier8K(b *testing.B) { + benchmarkCopier(b, 1<<13) +} +func BenchmarkCopier16K(b *testing.B) { + benchmarkCopier(b, 1<<14) +} +func BenchmarkCopier32K(b *testing.B) { + benchmarkCopier(b, 1<<15) +} +func BenchmarkCopier64K(b *testing.B) { + benchmarkCopier(b, 1<<16) +} +func BenchmarkCopier128K(b *testing.B) { + benchmarkCopier(b, 1<<17) +} +func BenchmarkCopier256K(b *testing.B) { + benchmarkCopier(b, 1<<18) +} + +func piped(b *testing.B, iterations int, delay time.Duration, buf []byte) io.Reader { + r, w, err := os.Pipe() + if err != nil { + b.Fatal(err) + return nil + } + go func() { + for i := 0; i < iterations; i++ { + time.Sleep(delay) + if n, err := w.Write(buf); err != nil || n != len(buf) { + if err != nil { + b.Fatal(err) + } + b.Fatal(fmt.Errorf("short write")) + } + } + w.Close() + }() + return r +} + +func benchmarkCopier(b *testing.B, length int) { + b.StopTimer() + buf := []byte{'A'} + for len(buf) < length { + buf = append(buf, buf...) + } + buf = append(buf[:length-1], []byte{'\n'}...) + b.StartTimer() + for i := 0; i < b.N; i++ { + c := NewCopier( + map[string]io.Reader{ + "buffer": piped(b, 10, time.Nanosecond, buf), + }, + &BenchmarkLoggerDummy{}) + c.Run() + c.Wait() + c.Close() + } +} diff --git a/vendor/github.com/moby/moby/daemon/logger/etwlogs/etwlogs_windows.go b/vendor/github.com/moby/moby/daemon/logger/etwlogs/etwlogs_windows.go new file mode 100644 index 000000000..8608f15ca --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/logger/etwlogs/etwlogs_windows.go @@ -0,0 +1,168 @@ +// Package etwlogs provides a log driver for forwarding container logs +// as ETW events.(ETW stands for Event Tracing for Windows) +// A client can then create an ETW listener to listen for events that are sent +// by the ETW provider that we register, using the provider's GUID "a3693192-9ed6-46d2-a981-f8226c8363bd". +// Here is an example of how to do this using the logman utility: +// 1. logman start -ets DockerContainerLogs -p {a3693192-9ed6-46d2-a981-f8226c8363bd} 0 0 -o trace.etl +// 2. Run container(s) and generate log messages +// 3. logman stop -ets DockerContainerLogs +// 4. You can then convert the etl log file to XML using: tracerpt -y trace.etl +// +// Each container log message generates an ETW event that also contains: +// the container name and ID, the timestamp, and the stream type. +package etwlogs + +import ( + "errors" + "fmt" + "sync" + "unsafe" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/daemon/logger" + "golang.org/x/sys/windows" +) + +type etwLogs struct { + containerName string + imageName string + containerID string + imageID string +} + +const ( + name = "etwlogs" + win32CallSuccess = 0 +) + +var ( + modAdvapi32 = windows.NewLazySystemDLL("Advapi32.dll") + procEventRegister = modAdvapi32.NewProc("EventRegister") + procEventWriteString = modAdvapi32.NewProc("EventWriteString") + procEventUnregister = modAdvapi32.NewProc("EventUnregister") +) +var providerHandle windows.Handle +var refCount int +var mu sync.Mutex + +func init() { + providerHandle = windows.InvalidHandle + if err := logger.RegisterLogDriver(name, New); err != nil { + logrus.Fatal(err) + } +} + +// New creates a new etwLogs logger for the given container and registers the EWT provider. +func New(info logger.Info) (logger.Logger, error) { + if err := registerETWProvider(); err != nil { + return nil, err + } + logrus.Debugf("logging driver etwLogs configured for container: %s.", info.ContainerID) + + return &etwLogs{ + containerName: info.Name(), + imageName: info.ContainerImageName, + containerID: info.ContainerID, + imageID: info.ContainerImageID, + }, nil +} + +// Log logs the message to the ETW stream. +func (etwLogger *etwLogs) Log(msg *logger.Message) error { + if providerHandle == windows.InvalidHandle { + // This should never be hit, if it is, it indicates a programming error. + errorMessage := "ETWLogs cannot log the message, because the event provider has not been registered." + logrus.Error(errorMessage) + return errors.New(errorMessage) + } + m := createLogMessage(etwLogger, msg) + logger.PutMessage(msg) + return callEventWriteString(m) +} + +// Close closes the logger by unregistering the ETW provider. +func (etwLogger *etwLogs) Close() error { + unregisterETWProvider() + return nil +} + +func (etwLogger *etwLogs) Name() string { + return name +} + +func createLogMessage(etwLogger *etwLogs, msg *logger.Message) string { + return fmt.Sprintf("container_name: %s, image_name: %s, container_id: %s, image_id: %s, source: %s, log: %s", + etwLogger.containerName, + etwLogger.imageName, + etwLogger.containerID, + etwLogger.imageID, + msg.Source, + msg.Line) +} + +func registerETWProvider() error { + mu.Lock() + defer mu.Unlock() + if refCount == 0 { + var err error + if err = callEventRegister(); err != nil { + return err + } + } + + refCount++ + return nil +} + +func unregisterETWProvider() { + mu.Lock() + defer mu.Unlock() + if refCount == 1 { + if callEventUnregister() { + refCount-- + providerHandle = windows.InvalidHandle + } + // Not returning an error if EventUnregister fails, because etwLogs will continue to work + } else { + refCount-- + } +} + +func callEventRegister() error { + // The provider's GUID is {a3693192-9ed6-46d2-a981-f8226c8363bd} + guid := windows.GUID{ + Data1: 0xa3693192, + Data2: 0x9ed6, + Data3: 0x46d2, + Data4: [8]byte{0xa9, 0x81, 0xf8, 0x22, 0x6c, 0x83, 0x63, 0xbd}, + } + + ret, _, _ := procEventRegister.Call(uintptr(unsafe.Pointer(&guid)), 0, 0, uintptr(unsafe.Pointer(&providerHandle))) + if ret != win32CallSuccess { + errorMessage := fmt.Sprintf("Failed to register ETW provider. Error: %d", ret) + logrus.Error(errorMessage) + return errors.New(errorMessage) + } + return nil +} + +func callEventWriteString(message string) error { + utf16message, err := windows.UTF16FromString(message) + + if err != nil { + return err + } + + ret, _, _ := procEventWriteString.Call(uintptr(providerHandle), 0, 0, uintptr(unsafe.Pointer(&utf16message[0]))) + if ret != win32CallSuccess { + errorMessage := fmt.Sprintf("ETWLogs provider failed to log message. Error: %d", ret) + logrus.Error(errorMessage) + return errors.New(errorMessage) + } + return nil +} + +func callEventUnregister() bool { + ret, _, _ := procEventUnregister.Call(uintptr(providerHandle)) + return ret == win32CallSuccess +} diff --git a/vendor/github.com/moby/moby/daemon/logger/factory.go b/vendor/github.com/moby/moby/daemon/logger/factory.go new file mode 100644 index 000000000..32001590d --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/logger/factory.go @@ -0,0 +1,162 @@ +package logger + +import ( + "fmt" + "sort" + "sync" + + containertypes "github.com/docker/docker/api/types/container" + "github.com/docker/docker/pkg/plugingetter" + units "github.com/docker/go-units" + "github.com/pkg/errors" +) + +// Creator builds a logging driver instance with given context. +type Creator func(Info) (Logger, error) + +// LogOptValidator checks the options specific to the underlying +// logging implementation. +type LogOptValidator func(cfg map[string]string) error + +type logdriverFactory struct { + registry map[string]Creator + optValidator map[string]LogOptValidator + m sync.Mutex +} + +func (lf *logdriverFactory) list() []string { + ls := make([]string, 0, len(lf.registry)) + lf.m.Lock() + for name := range lf.registry { + ls = append(ls, name) + } + lf.m.Unlock() + sort.Strings(ls) + return ls +} + +// ListDrivers gets the list of registered log driver names +func ListDrivers() []string { + return factory.list() +} + +func (lf *logdriverFactory) register(name string, c Creator) error { + if lf.driverRegistered(name) { + return fmt.Errorf("logger: log driver named '%s' is already registered", name) + } + + lf.m.Lock() + lf.registry[name] = c + lf.m.Unlock() + return nil +} + +func (lf *logdriverFactory) driverRegistered(name string) bool { + lf.m.Lock() + _, ok := lf.registry[name] + lf.m.Unlock() + if !ok { + if pluginGetter != nil { // this can be nil when the init functions are running + if l, _ := getPlugin(name, plugingetter.Lookup); l != nil { + return true + } + } + } + return ok +} + +func (lf *logdriverFactory) registerLogOptValidator(name string, l LogOptValidator) error { + lf.m.Lock() + defer lf.m.Unlock() + + if _, ok := lf.optValidator[name]; ok { + return fmt.Errorf("logger: log validator named '%s' is already registered", name) + } + lf.optValidator[name] = l + return nil +} + +func (lf *logdriverFactory) get(name string) (Creator, error) { + lf.m.Lock() + defer lf.m.Unlock() + + c, ok := lf.registry[name] + if ok { + return c, nil + } + + c, err := getPlugin(name, plugingetter.Acquire) + return c, errors.Wrapf(err, "logger: no log driver named '%s' is registered", name) +} + +func (lf *logdriverFactory) getLogOptValidator(name string) LogOptValidator { + lf.m.Lock() + defer lf.m.Unlock() + + c, _ := lf.optValidator[name] + return c +} + +var factory = &logdriverFactory{registry: make(map[string]Creator), optValidator: make(map[string]LogOptValidator)} // global factory instance + +// RegisterLogDriver registers the given logging driver builder with given logging +// driver name. +func RegisterLogDriver(name string, c Creator) error { + return factory.register(name, c) +} + +// RegisterLogOptValidator registers the logging option validator with +// the given logging driver name. +func RegisterLogOptValidator(name string, l LogOptValidator) error { + return factory.registerLogOptValidator(name, l) +} + +// GetLogDriver provides the logging driver builder for a logging driver name. +func GetLogDriver(name string) (Creator, error) { + return factory.get(name) +} + +var builtInLogOpts = map[string]bool{ + "mode": true, + "max-buffer-size": true, +} + +// ValidateLogOpts checks the options for the given log driver. The +// options supported are specific to the LogDriver implementation. +func ValidateLogOpts(name string, cfg map[string]string) error { + if name == "none" { + return nil + } + + switch containertypes.LogMode(cfg["mode"]) { + case containertypes.LogModeBlocking, containertypes.LogModeNonBlock, containertypes.LogModeUnset: + default: + return fmt.Errorf("logger: logging mode not supported: %s", cfg["mode"]) + } + + if s, ok := cfg["max-buffer-size"]; ok { + if containertypes.LogMode(cfg["mode"]) != containertypes.LogModeNonBlock { + return fmt.Errorf("logger: max-buffer-size option is only supported with 'mode=%s'", containertypes.LogModeNonBlock) + } + if _, err := units.RAMInBytes(s); err != nil { + return errors.Wrap(err, "error parsing option max-buffer-size") + } + } + + if !factory.driverRegistered(name) { + return fmt.Errorf("logger: no log driver named '%s' is registered", name) + } + + filteredOpts := make(map[string]string, len(builtInLogOpts)) + for k, v := range cfg { + if !builtInLogOpts[k] { + filteredOpts[k] = v + } + } + + validator := factory.getLogOptValidator(name) + if validator != nil { + return validator(filteredOpts) + } + return nil +} diff --git a/vendor/github.com/moby/moby/daemon/logger/fluentd/fluentd.go b/vendor/github.com/moby/moby/daemon/logger/fluentd/fluentd.go new file mode 100644 index 000000000..c8977ec0d --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/logger/fluentd/fluentd.go @@ -0,0 +1,250 @@ +// Package fluentd provides the log driver for forwarding server logs +// to fluentd endpoints. +package fluentd + +import ( + "fmt" + "math" + "net" + "net/url" + "strconv" + "strings" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/daemon/logger" + "github.com/docker/docker/daemon/logger/loggerutils" + "github.com/docker/docker/pkg/urlutil" + "github.com/docker/go-units" + "github.com/fluent/fluent-logger-golang/fluent" + "github.com/pkg/errors" +) + +type fluentd struct { + tag string + containerID string + containerName string + writer *fluent.Fluent + extra map[string]string +} + +type location struct { + protocol string + host string + port int + path string +} + +const ( + name = "fluentd" + + defaultProtocol = "tcp" + defaultHost = "127.0.0.1" + defaultPort = 24224 + defaultBufferLimit = 1024 * 1024 + + // logger tries to reconnect 2**32 - 1 times + // failed (and panic) after 204 years [ 1.5 ** (2**32 - 1) - 1 seconds] + defaultRetryWait = 1000 + defaultMaxRetries = math.MaxInt32 + + addressKey = "fluentd-address" + bufferLimitKey = "fluentd-buffer-limit" + retryWaitKey = "fluentd-retry-wait" + maxRetriesKey = "fluentd-max-retries" + asyncConnectKey = "fluentd-async-connect" +) + +func init() { + if err := logger.RegisterLogDriver(name, New); err != nil { + logrus.Fatal(err) + } + if err := logger.RegisterLogOptValidator(name, ValidateLogOpt); err != nil { + logrus.Fatal(err) + } +} + +// New creates a fluentd logger using the configuration passed in on +// the context. The supported context configuration variable is +// fluentd-address. +func New(info logger.Info) (logger.Logger, error) { + loc, err := parseAddress(info.Config[addressKey]) + if err != nil { + return nil, err + } + + tag, err := loggerutils.ParseLogTag(info, loggerutils.DefaultTemplate) + if err != nil { + return nil, err + } + + extra, err := info.ExtraAttributes(nil) + if err != nil { + return nil, err + } + + bufferLimit := defaultBufferLimit + if info.Config[bufferLimitKey] != "" { + bl64, err := units.RAMInBytes(info.Config[bufferLimitKey]) + if err != nil { + return nil, err + } + bufferLimit = int(bl64) + } + + retryWait := defaultRetryWait + if info.Config[retryWaitKey] != "" { + rwd, err := time.ParseDuration(info.Config[retryWaitKey]) + if err != nil { + return nil, err + } + retryWait = int(rwd.Seconds() * 1000) + } + + maxRetries := defaultMaxRetries + if info.Config[maxRetriesKey] != "" { + mr64, err := strconv.ParseUint(info.Config[maxRetriesKey], 10, strconv.IntSize) + if err != nil { + return nil, err + } + maxRetries = int(mr64) + } + + asyncConnect := false + if info.Config[asyncConnectKey] != "" { + if asyncConnect, err = strconv.ParseBool(info.Config[asyncConnectKey]); err != nil { + return nil, err + } + } + + fluentConfig := fluent.Config{ + FluentPort: loc.port, + FluentHost: loc.host, + FluentNetwork: loc.protocol, + FluentSocketPath: loc.path, + BufferLimit: bufferLimit, + RetryWait: retryWait, + MaxRetry: maxRetries, + AsyncConnect: asyncConnect, + } + + logrus.WithField("container", info.ContainerID).WithField("config", fluentConfig). + Debug("logging driver fluentd configured") + + log, err := fluent.New(fluentConfig) + if err != nil { + return nil, err + } + return &fluentd{ + tag: tag, + containerID: info.ContainerID, + containerName: info.ContainerName, + writer: log, + extra: extra, + }, nil +} + +func (f *fluentd) Log(msg *logger.Message) error { + data := map[string]string{ + "container_id": f.containerID, + "container_name": f.containerName, + "source": msg.Source, + "log": string(msg.Line), + } + for k, v := range f.extra { + data[k] = v + } + + ts := msg.Timestamp + logger.PutMessage(msg) + // fluent-logger-golang buffers logs from failures and disconnections, + // and these are transferred again automatically. + return f.writer.PostWithTime(f.tag, ts, data) +} + +func (f *fluentd) Close() error { + return f.writer.Close() +} + +func (f *fluentd) Name() string { + return name +} + +// ValidateLogOpt looks for fluentd specific log option fluentd-address. +func ValidateLogOpt(cfg map[string]string) error { + for key := range cfg { + switch key { + case "env": + case "env-regex": + case "labels": + case "tag": + case addressKey: + case bufferLimitKey: + case retryWaitKey: + case maxRetriesKey: + case asyncConnectKey: + // Accepted + default: + return fmt.Errorf("unknown log opt '%s' for fluentd log driver", key) + } + } + + _, err := parseAddress(cfg[addressKey]) + return err +} + +func parseAddress(address string) (*location, error) { + if address == "" { + return &location{ + protocol: defaultProtocol, + host: defaultHost, + port: defaultPort, + path: "", + }, nil + } + + protocol := defaultProtocol + givenAddress := address + if urlutil.IsTransportURL(address) { + url, err := url.Parse(address) + if err != nil { + return nil, errors.Wrapf(err, "invalid fluentd-address %s", givenAddress) + } + // unix and unixgram socket + if url.Scheme == "unix" || url.Scheme == "unixgram" { + return &location{ + protocol: url.Scheme, + host: "", + port: 0, + path: url.Path, + }, nil + } + // tcp|udp + protocol = url.Scheme + address = url.Host + } + + host, port, err := net.SplitHostPort(address) + if err != nil { + if !strings.Contains(err.Error(), "missing port in address") { + return nil, errors.Wrapf(err, "invalid fluentd-address %s", givenAddress) + } + return &location{ + protocol: protocol, + host: host, + port: defaultPort, + path: "", + }, nil + } + + portnum, err := strconv.Atoi(port) + if err != nil { + return nil, errors.Wrapf(err, "invalid fluentd-address %s", givenAddress) + } + return &location{ + protocol: protocol, + host: host, + port: portnum, + path: "", + }, nil +} diff --git a/vendor/github.com/moby/moby/daemon/logger/gcplogs/gcplogging.go b/vendor/github.com/moby/moby/daemon/logger/gcplogs/gcplogging.go new file mode 100644 index 000000000..a33566ae1 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/logger/gcplogs/gcplogging.go @@ -0,0 +1,244 @@ +package gcplogs + +import ( + "fmt" + "sync" + "sync/atomic" + "time" + + "github.com/docker/docker/daemon/logger" + + "cloud.google.com/go/compute/metadata" + "cloud.google.com/go/logging" + "github.com/Sirupsen/logrus" + "golang.org/x/net/context" + mrpb "google.golang.org/genproto/googleapis/api/monitoredres" +) + +const ( + name = "gcplogs" + + projectOptKey = "gcp-project" + logLabelsKey = "labels" + logEnvKey = "env" + logEnvRegexKey = "env-regex" + logCmdKey = "gcp-log-cmd" + logZoneKey = "gcp-meta-zone" + logNameKey = "gcp-meta-name" + logIDKey = "gcp-meta-id" +) + +var ( + // The number of logs the gcplogs driver has dropped. + droppedLogs uint64 + + onGCE bool + + // instance metadata populated from the metadata server if available + projectID string + zone string + instanceName string + instanceID string +) + +func init() { + + if err := logger.RegisterLogDriver(name, New); err != nil { + logrus.Fatal(err) + } + + if err := logger.RegisterLogOptValidator(name, ValidateLogOpts); err != nil { + logrus.Fatal(err) + } +} + +type gcplogs struct { + logger *logging.Logger + instance *instanceInfo + container *containerInfo +} + +type dockerLogEntry struct { + Instance *instanceInfo `json:"instance,omitempty"` + Container *containerInfo `json:"container,omitempty"` + Message string `json:"message,omitempty"` +} + +type instanceInfo struct { + Zone string `json:"zone,omitempty"` + Name string `json:"name,omitempty"` + ID string `json:"id,omitempty"` +} + +type containerInfo struct { + Name string `json:"name,omitempty"` + ID string `json:"id,omitempty"` + ImageName string `json:"imageName,omitempty"` + ImageID string `json:"imageId,omitempty"` + Created time.Time `json:"created,omitempty"` + Command string `json:"command,omitempty"` + Metadata map[string]string `json:"metadata,omitempty"` +} + +var initGCPOnce sync.Once + +func initGCP() { + initGCPOnce.Do(func() { + onGCE = metadata.OnGCE() + if onGCE { + // These will fail on instances if the metadata service is + // down or the client is compiled with an API version that + // has been removed. Since these are not vital, let's ignore + // them and make their fields in the dockerLogEntry ,omitempty + projectID, _ = metadata.ProjectID() + zone, _ = metadata.Zone() + instanceName, _ = metadata.InstanceName() + instanceID, _ = metadata.InstanceID() + } + }) +} + +// New creates a new logger that logs to Google Cloud Logging using the application +// default credentials. +// +// See https://developers.google.com/identity/protocols/application-default-credentials +func New(info logger.Info) (logger.Logger, error) { + initGCP() + + var project string + if projectID != "" { + project = projectID + } + if projectID, found := info.Config[projectOptKey]; found { + project = projectID + } + if project == "" { + return nil, fmt.Errorf("No project was specified and couldn't read project from the metadata server. Please specify a project") + } + + // Issue #29344: gcplogs segfaults (static binary) + // If HOME is not set, logging.NewClient() will call os/user.Current() via oauth2/google. + // However, in static binary, os/user.Current() leads to segfault due to a glibc issue that won't be fixed + // in a short term. (golang/go#13470, https://sourceware.org/bugzilla/show_bug.cgi?id=19341) + // So we forcibly set HOME so as to avoid call to os/user/Current() + if err := ensureHomeIfIAmStatic(); err != nil { + return nil, err + } + + c, err := logging.NewClient(context.Background(), project) + if err != nil { + return nil, err + } + var instanceResource *instanceInfo + if onGCE { + instanceResource = &instanceInfo{ + Zone: zone, + Name: instanceName, + ID: instanceID, + } + } else if info.Config[logZoneKey] != "" || info.Config[logNameKey] != "" || info.Config[logIDKey] != "" { + instanceResource = &instanceInfo{ + Zone: info.Config[logZoneKey], + Name: info.Config[logNameKey], + ID: info.Config[logIDKey], + } + } + + options := []logging.LoggerOption{} + if instanceResource != nil { + vmMrpb := logging.CommonResource( + &mrpb.MonitoredResource{ + Type: "gce_instance", + Labels: map[string]string{ + "instance_id": instanceResource.ID, + "zone": instanceResource.Zone, + }, + }, + ) + options = []logging.LoggerOption{vmMrpb} + } + lg := c.Logger("gcplogs-docker-driver", options...) + + if err := c.Ping(context.Background()); err != nil { + return nil, fmt.Errorf("unable to connect or authenticate with Google Cloud Logging: %v", err) + } + + extraAttributes, err := info.ExtraAttributes(nil) + if err != nil { + return nil, err + } + + l := &gcplogs{ + logger: lg, + container: &containerInfo{ + Name: info.ContainerName, + ID: info.ContainerID, + ImageName: info.ContainerImageName, + ImageID: info.ContainerImageID, + Created: info.ContainerCreated, + Metadata: extraAttributes, + }, + } + + if info.Config[logCmdKey] == "true" { + l.container.Command = info.Command() + } + + if instanceResource != nil { + l.instance = instanceResource + } + + // The logger "overflows" at a rate of 10,000 logs per second and this + // overflow func is called. We want to surface the error to the user + // without overly spamming /var/log/docker.log so we log the first time + // we overflow and every 1000th time after. + c.OnError = func(err error) { + if err == logging.ErrOverflow { + if i := atomic.AddUint64(&droppedLogs, 1); i%1000 == 1 { + logrus.Errorf("gcplogs driver has dropped %v logs", i) + } + } else { + logrus.Error(err) + } + } + + return l, nil +} + +// ValidateLogOpts validates the opts passed to the gcplogs driver. Currently, the gcplogs +// driver doesn't take any arguments. +func ValidateLogOpts(cfg map[string]string) error { + for k := range cfg { + switch k { + case projectOptKey, logLabelsKey, logEnvKey, logEnvRegexKey, logCmdKey, logZoneKey, logNameKey, logIDKey: + default: + return fmt.Errorf("%q is not a valid option for the gcplogs driver", k) + } + } + return nil +} + +func (l *gcplogs) Log(m *logger.Message) error { + message := string(m.Line) + ts := m.Timestamp + logger.PutMessage(m) + + l.logger.Log(logging.Entry{ + Timestamp: ts, + Payload: &dockerLogEntry{ + Instance: l.instance, + Container: l.container, + Message: message, + }, + }) + return nil +} + +func (l *gcplogs) Close() error { + l.logger.Flush() + return nil +} + +func (l *gcplogs) Name() string { + return name +} diff --git a/vendor/github.com/moby/moby/daemon/logger/gcplogs/gcplogging_linux.go b/vendor/github.com/moby/moby/daemon/logger/gcplogs/gcplogging_linux.go new file mode 100644 index 000000000..9af8b3c17 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/logger/gcplogs/gcplogging_linux.go @@ -0,0 +1,31 @@ +// +build linux + +package gcplogs + +import ( + "os" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/pkg/homedir" +) + +// ensureHomeIfIAmStatic ensure $HOME to be set if dockerversion.IAmStatic is "true". +// See issue #29344: gcplogs segfaults (static binary) +// If HOME is not set, logging.NewClient() will call os/user.Current() via oauth2/google. +// However, in static binary, os/user.Current() leads to segfault due to a glibc issue that won't be fixed +// in a short term. (golang/go#13470, https://sourceware.org/bugzilla/show_bug.cgi?id=19341) +// So we forcibly set HOME so as to avoid call to os/user/Current() +func ensureHomeIfIAmStatic() error { + // Note: dockerversion.IAmStatic and homedir.GetStatic() is only available for linux. + // So we need to use them in this gcplogging_linux.go rather than in gcplogging.go + if dockerversion.IAmStatic == "true" && os.Getenv("HOME") == "" { + home, err := homedir.GetStatic() + if err != nil { + return err + } + logrus.Warnf("gcplogs requires HOME to be set for static daemon binary. Forcibly setting HOME to %s.", home) + os.Setenv("HOME", home) + } + return nil +} diff --git a/vendor/github.com/moby/moby/daemon/logger/gcplogs/gcplogging_others.go b/vendor/github.com/moby/moby/daemon/logger/gcplogs/gcplogging_others.go new file mode 100644 index 000000000..45e3b8d6d --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/logger/gcplogs/gcplogging_others.go @@ -0,0 +1,7 @@ +// +build !linux + +package gcplogs + +func ensureHomeIfIAmStatic() error { + return nil +} diff --git a/vendor/github.com/moby/moby/daemon/logger/gelf/gelf.go b/vendor/github.com/moby/moby/daemon/logger/gelf/gelf.go new file mode 100644 index 000000000..4b0130dfb --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/logger/gelf/gelf.go @@ -0,0 +1,209 @@ +// +build linux + +// Package gelf provides the log driver for forwarding server logs to +// endpoints that support the Graylog Extended Log Format. +package gelf + +import ( + "compress/flate" + "encoding/json" + "fmt" + "net" + "net/url" + "strconv" + "time" + + "github.com/Graylog2/go-gelf/gelf" + "github.com/Sirupsen/logrus" + "github.com/docker/docker/daemon/logger" + "github.com/docker/docker/daemon/logger/loggerutils" + "github.com/docker/docker/pkg/urlutil" +) + +const name = "gelf" + +type gelfLogger struct { + writer *gelf.Writer + info logger.Info + hostname string + rawExtra json.RawMessage +} + +func init() { + if err := logger.RegisterLogDriver(name, New); err != nil { + logrus.Fatal(err) + } + if err := logger.RegisterLogOptValidator(name, ValidateLogOpt); err != nil { + logrus.Fatal(err) + } +} + +// New creates a gelf logger using the configuration passed in on the +// context. The supported context configuration variable is gelf-address. +func New(info logger.Info) (logger.Logger, error) { + // parse gelf address + address, err := parseAddress(info.Config["gelf-address"]) + if err != nil { + return nil, err + } + + // collect extra data for GELF message + hostname, err := info.Hostname() + if err != nil { + return nil, fmt.Errorf("gelf: cannot access hostname to set source field") + } + + // parse log tag + tag, err := loggerutils.ParseLogTag(info, loggerutils.DefaultTemplate) + if err != nil { + return nil, err + } + + extra := map[string]interface{}{ + "_container_id": info.ContainerID, + "_container_name": info.Name(), + "_image_id": info.ContainerImageID, + "_image_name": info.ContainerImageName, + "_command": info.Command(), + "_tag": tag, + "_created": info.ContainerCreated, + } + + extraAttrs, err := info.ExtraAttributes(func(key string) string { + if key[0] == '_' { + return key + } + return "_" + key + }) + + if err != nil { + return nil, err + } + + for k, v := range extraAttrs { + extra[k] = v + } + + rawExtra, err := json.Marshal(extra) + if err != nil { + return nil, err + } + + // create new gelfWriter + gelfWriter, err := gelf.NewWriter(address) + if err != nil { + return nil, fmt.Errorf("gelf: cannot connect to GELF endpoint: %s %v", address, err) + } + + if v, ok := info.Config["gelf-compression-type"]; ok { + switch v { + case "gzip": + gelfWriter.CompressionType = gelf.CompressGzip + case "zlib": + gelfWriter.CompressionType = gelf.CompressZlib + case "none": + gelfWriter.CompressionType = gelf.CompressNone + default: + return nil, fmt.Errorf("gelf: invalid compression type %q", v) + } + } + + if v, ok := info.Config["gelf-compression-level"]; ok { + val, err := strconv.Atoi(v) + if err != nil { + return nil, fmt.Errorf("gelf: invalid compression level %s, err %v", v, err) + } + gelfWriter.CompressionLevel = val + } + + return &gelfLogger{ + writer: gelfWriter, + info: info, + hostname: hostname, + rawExtra: rawExtra, + }, nil +} + +func (s *gelfLogger) Log(msg *logger.Message) error { + level := gelf.LOG_INFO + if msg.Source == "stderr" { + level = gelf.LOG_ERR + } + + m := gelf.Message{ + Version: "1.1", + Host: s.hostname, + Short: string(msg.Line), + TimeUnix: float64(msg.Timestamp.UnixNano()/int64(time.Millisecond)) / 1000.0, + Level: level, + RawExtra: s.rawExtra, + } + logger.PutMessage(msg) + + if err := s.writer.WriteMessage(&m); err != nil { + return fmt.Errorf("gelf: cannot send GELF message: %v", err) + } + return nil +} + +func (s *gelfLogger) Close() error { + return s.writer.Close() +} + +func (s *gelfLogger) Name() string { + return name +} + +// ValidateLogOpt looks for gelf specific log option gelf-address. +func ValidateLogOpt(cfg map[string]string) error { + for key, val := range cfg { + switch key { + case "gelf-address": + case "tag": + case "labels": + case "env": + case "env-regex": + case "gelf-compression-level": + i, err := strconv.Atoi(val) + if err != nil || i < flate.DefaultCompression || i > flate.BestCompression { + return fmt.Errorf("unknown value %q for log opt %q for gelf log driver", val, key) + } + case "gelf-compression-type": + switch val { + case "gzip", "zlib", "none": + default: + return fmt.Errorf("unknown value %q for log opt %q for gelf log driver", val, key) + } + default: + return fmt.Errorf("unknown log opt %q for gelf log driver", key) + } + } + + _, err := parseAddress(cfg["gelf-address"]) + return err +} + +func parseAddress(address string) (string, error) { + if address == "" { + return "", nil + } + if !urlutil.IsTransportURL(address) { + return "", fmt.Errorf("gelf-address should be in form proto://address, got %v", address) + } + url, err := url.Parse(address) + if err != nil { + return "", err + } + + // we support only udp + if url.Scheme != "udp" { + return "", fmt.Errorf("gelf: endpoint needs to be UDP") + } + + // get host and port + if _, _, err = net.SplitHostPort(url.Host); err != nil { + return "", fmt.Errorf("gelf: please provide gelf-address as udp://host:port") + } + + return url.Host, nil +} diff --git a/vendor/github.com/moby/moby/daemon/logger/gelf/gelf_unsupported.go b/vendor/github.com/moby/moby/daemon/logger/gelf/gelf_unsupported.go new file mode 100644 index 000000000..266f73b18 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/logger/gelf/gelf_unsupported.go @@ -0,0 +1,3 @@ +// +build !linux + +package gelf diff --git a/vendor/github.com/moby/moby/daemon/logger/journald/journald.go b/vendor/github.com/moby/moby/daemon/logger/journald/journald.go new file mode 100644 index 000000000..86d7378b5 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/logger/journald/journald.go @@ -0,0 +1,126 @@ +// +build linux + +// Package journald provides the log driver for forwarding server logs +// to endpoints that receive the systemd format. +package journald + +import ( + "fmt" + "sync" + "unicode" + + "github.com/Sirupsen/logrus" + "github.com/coreos/go-systemd/journal" + "github.com/docker/docker/daemon/logger" + "github.com/docker/docker/daemon/logger/loggerutils" +) + +const name = "journald" + +type journald struct { + mu sync.Mutex + vars map[string]string // additional variables and values to send to the journal along with the log message + readers readerList + closed bool +} + +type readerList struct { + readers map[*logger.LogWatcher]*logger.LogWatcher +} + +func init() { + if err := logger.RegisterLogDriver(name, New); err != nil { + logrus.Fatal(err) + } + if err := logger.RegisterLogOptValidator(name, validateLogOpt); err != nil { + logrus.Fatal(err) + } +} + +// sanitizeKeyMode returns the sanitized string so that it could be used in journald. +// In journald log, there are special requirements for fields. +// Fields must be composed of uppercase letters, numbers, and underscores, but must +// not start with an underscore. +func sanitizeKeyMod(s string) string { + n := "" + for _, v := range s { + if 'a' <= v && v <= 'z' { + v = unicode.ToUpper(v) + } else if ('Z' < v || v < 'A') && ('9' < v || v < '0') { + v = '_' + } + // If (n == "" && v == '_'), then we will skip as this is the beginning with '_' + if !(n == "" && v == '_') { + n += string(v) + } + } + return n +} + +// New creates a journald logger using the configuration passed in on +// the context. +func New(info logger.Info) (logger.Logger, error) { + if !journal.Enabled() { + return nil, fmt.Errorf("journald is not enabled on this host") + } + + // parse log tag + tag, err := loggerutils.ParseLogTag(info, loggerutils.DefaultTemplate) + if err != nil { + return nil, err + } + + vars := map[string]string{ + "CONTAINER_ID": info.ContainerID[:12], + "CONTAINER_ID_FULL": info.ContainerID, + "CONTAINER_NAME": info.Name(), + "CONTAINER_TAG": tag, + } + extraAttrs, err := info.ExtraAttributes(sanitizeKeyMod) + if err != nil { + return nil, err + } + for k, v := range extraAttrs { + vars[k] = v + } + return &journald{vars: vars, readers: readerList{readers: make(map[*logger.LogWatcher]*logger.LogWatcher)}}, nil +} + +// We don't actually accept any options, but we have to supply a callback for +// the factory to pass the (probably empty) configuration map to. +func validateLogOpt(cfg map[string]string) error { + for key := range cfg { + switch key { + case "labels": + case "env": + case "env-regex": + case "tag": + default: + return fmt.Errorf("unknown log opt '%s' for journald log driver", key) + } + } + return nil +} + +func (s *journald) Log(msg *logger.Message) error { + vars := map[string]string{} + for k, v := range s.vars { + vars[k] = v + } + if msg.Partial { + vars["CONTAINER_PARTIAL_MESSAGE"] = "true" + } + + line := string(msg.Line) + source := msg.Source + logger.PutMessage(msg) + + if source == "stderr" { + return journal.Send(line, journal.PriErr, vars) + } + return journal.Send(line, journal.PriInfo, vars) +} + +func (s *journald) Name() string { + return name +} diff --git a/vendor/github.com/moby/moby/daemon/logger/journald/journald_test.go b/vendor/github.com/moby/moby/daemon/logger/journald/journald_test.go new file mode 100644 index 000000000..224423fd0 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/logger/journald/journald_test.go @@ -0,0 +1,23 @@ +// +build linux + +package journald + +import ( + "testing" +) + +func TestSanitizeKeyMod(t *testing.T) { + entries := map[string]string{ + "io.kubernetes.pod.name": "IO_KUBERNETES_POD_NAME", + "io?.kubernetes.pod.name": "IO__KUBERNETES_POD_NAME", + "?io.kubernetes.pod.name": "IO_KUBERNETES_POD_NAME", + "io123.kubernetes.pod.name": "IO123_KUBERNETES_POD_NAME", + "_io123.kubernetes.pod.name": "IO123_KUBERNETES_POD_NAME", + "__io123_kubernetes.pod.name": "IO123_KUBERNETES_POD_NAME", + } + for k, v := range entries { + if sanitizeKeyMod(k) != v { + t.Fatalf("Failed to sanitize %s, got %s, expected %s", k, sanitizeKeyMod(k), v) + } + } +} diff --git a/vendor/github.com/moby/moby/daemon/logger/journald/journald_unsupported.go b/vendor/github.com/moby/moby/daemon/logger/journald/journald_unsupported.go new file mode 100644 index 000000000..d52ca92e4 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/logger/journald/journald_unsupported.go @@ -0,0 +1,6 @@ +// +build !linux + +package journald + +type journald struct { +} diff --git a/vendor/github.com/moby/moby/daemon/logger/journald/read.go b/vendor/github.com/moby/moby/daemon/logger/journald/read.go new file mode 100644 index 000000000..4c6301a20 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/logger/journald/read.go @@ -0,0 +1,423 @@ +// +build linux,cgo,!static_build,journald + +package journald + +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// +//static int get_message(sd_journal *j, const char **msg, size_t *length, int *partial) +//{ +// int rc; +// size_t plength; +// *msg = NULL; +// *length = 0; +// plength = strlen("CONTAINER_PARTIAL_MESSAGE=true"); +// rc = sd_journal_get_data(j, "CONTAINER_PARTIAL_MESSAGE", (const void **) msg, length); +// *partial = ((rc == 0) && (*length == plength) && (memcmp(*msg, "CONTAINER_PARTIAL_MESSAGE=true", plength) == 0)); +// rc = sd_journal_get_data(j, "MESSAGE", (const void **) msg, length); +// if (rc == 0) { +// if (*length > 8) { +// (*msg) += 8; +// *length -= 8; +// } else { +// *msg = NULL; +// *length = 0; +// rc = -ENOENT; +// } +// } +// return rc; +//} +//static int get_priority(sd_journal *j, int *priority) +//{ +// const void *data; +// size_t i, length; +// int rc; +// *priority = -1; +// rc = sd_journal_get_data(j, "PRIORITY", &data, &length); +// if (rc == 0) { +// if ((length > 9) && (strncmp(data, "PRIORITY=", 9) == 0)) { +// *priority = 0; +// for (i = 9; i < length; i++) { +// *priority = *priority * 10 + ((const char *)data)[i] - '0'; +// } +// if (length > 9) { +// rc = 0; +// } +// } +// } +// return rc; +//} +//static int is_attribute_field(const char *msg, size_t length) +//{ +// static const struct known_field { +// const char *name; +// size_t length; +// } fields[] = { +// {"MESSAGE", sizeof("MESSAGE") - 1}, +// {"MESSAGE_ID", sizeof("MESSAGE_ID") - 1}, +// {"PRIORITY", sizeof("PRIORITY") - 1}, +// {"CODE_FILE", sizeof("CODE_FILE") - 1}, +// {"CODE_LINE", sizeof("CODE_LINE") - 1}, +// {"CODE_FUNC", sizeof("CODE_FUNC") - 1}, +// {"ERRNO", sizeof("ERRNO") - 1}, +// {"SYSLOG_FACILITY", sizeof("SYSLOG_FACILITY") - 1}, +// {"SYSLOG_IDENTIFIER", sizeof("SYSLOG_IDENTIFIER") - 1}, +// {"SYSLOG_PID", sizeof("SYSLOG_PID") - 1}, +// {"CONTAINER_NAME", sizeof("CONTAINER_NAME") - 1}, +// {"CONTAINER_ID", sizeof("CONTAINER_ID") - 1}, +// {"CONTAINER_ID_FULL", sizeof("CONTAINER_ID_FULL") - 1}, +// {"CONTAINER_TAG", sizeof("CONTAINER_TAG") - 1}, +// }; +// unsigned int i; +// void *p; +// if ((length < 1) || (msg[0] == '_') || ((p = memchr(msg, '=', length)) == NULL)) { +// return -1; +// } +// length = ((const char *) p) - msg; +// for (i = 0; i < sizeof(fields) / sizeof(fields[0]); i++) { +// if ((fields[i].length == length) && (memcmp(fields[i].name, msg, length) == 0)) { +// return -1; +// } +// } +// return 0; +//} +//static int get_attribute_field(sd_journal *j, const char **msg, size_t *length) +//{ +// int rc; +// *msg = NULL; +// *length = 0; +// while ((rc = sd_journal_enumerate_data(j, (const void **) msg, length)) > 0) { +// if (is_attribute_field(*msg, *length) == 0) { +// break; +// } +// rc = -ENOENT; +// } +// return rc; +//} +//static int wait_for_data_cancelable(sd_journal *j, int pipefd) +//{ +// struct pollfd fds[2]; +// uint64_t when = 0; +// int timeout, jevents, i; +// struct timespec ts; +// uint64_t now; +// +// memset(&fds, 0, sizeof(fds)); +// fds[0].fd = pipefd; +// fds[0].events = POLLHUP; +// fds[1].fd = sd_journal_get_fd(j); +// if (fds[1].fd < 0) { +// return fds[1].fd; +// } +// +// do { +// jevents = sd_journal_get_events(j); +// if (jevents < 0) { +// return jevents; +// } +// fds[1].events = jevents; +// sd_journal_get_timeout(j, &when); +// if (when == -1) { +// timeout = -1; +// } else { +// clock_gettime(CLOCK_MONOTONIC, &ts); +// now = (uint64_t) ts.tv_sec * 1000000 + ts.tv_nsec / 1000; +// timeout = when > now ? (int) ((when - now + 999) / 1000) : 0; +// } +// i = poll(fds, 2, timeout); +// if ((i == -1) && (errno != EINTR)) { +// /* An unexpected error. */ +// return (errno != 0) ? -errno : -EINTR; +// } +// if (fds[0].revents & POLLHUP) { +// /* The close notification pipe was closed. */ +// return 0; +// } +// if (sd_journal_process(j) == SD_JOURNAL_APPEND) { +// /* Data, which we might care about, was appended. */ +// return 1; +// } +// } while ((fds[0].revents & POLLHUP) == 0); +// return 0; +//} +import "C" + +import ( + "fmt" + "strings" + "time" + "unsafe" + + "github.com/Sirupsen/logrus" + "github.com/coreos/go-systemd/journal" + "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/daemon/logger" +) + +func (s *journald) Close() error { + s.mu.Lock() + s.closed = true + for reader := range s.readers.readers { + reader.Close() + } + s.mu.Unlock() + return nil +} + +func (s *journald) drainJournal(logWatcher *logger.LogWatcher, config logger.ReadConfig, j *C.sd_journal, oldCursor *C.char) *C.char { + var msg, data, cursor *C.char + var length C.size_t + var stamp C.uint64_t + var priority, partial C.int + + // Walk the journal from here forward until we run out of new entries. +drain: + for { + // Try not to send a given entry twice. + if oldCursor != nil { + for C.sd_journal_test_cursor(j, oldCursor) > 0 { + if C.sd_journal_next(j) <= 0 { + break drain + } + } + } + // Read and send the logged message, if there is one to read. + i := C.get_message(j, &msg, &length, &partial) + if i != -C.ENOENT && i != -C.EADDRNOTAVAIL { + // Read the entry's timestamp. + if C.sd_journal_get_realtime_usec(j, &stamp) != 0 { + break + } + // Set up the time and text of the entry. + timestamp := time.Unix(int64(stamp)/1000000, (int64(stamp)%1000000)*1000) + line := C.GoBytes(unsafe.Pointer(msg), C.int(length)) + if partial == 0 { + line = append(line, "\n"...) + } + // Recover the stream name by mapping + // from the journal priority back to + // the stream that we would have + // assigned that value. + source := "" + if C.get_priority(j, &priority) != 0 { + source = "" + } else if priority == C.int(journal.PriErr) { + source = "stderr" + } else if priority == C.int(journal.PriInfo) { + source = "stdout" + } + // Retrieve the values of any variables we're adding to the journal. + var attrs []backend.LogAttr + C.sd_journal_restart_data(j) + for C.get_attribute_field(j, &data, &length) > C.int(0) { + kv := strings.SplitN(C.GoStringN(data, C.int(length)), "=", 2) + attrs = append(attrs, backend.LogAttr{Key: kv[0], Value: kv[1]}) + } + // Send the log message. + logWatcher.Msg <- &logger.Message{ + Line: line, + Source: source, + Timestamp: timestamp.In(time.UTC), + Attrs: attrs, + } + } + // If we're at the end of the journal, we're done (for now). + if C.sd_journal_next(j) <= 0 { + break + } + } + + // free(NULL) is safe + C.free(unsafe.Pointer(oldCursor)) + if C.sd_journal_get_cursor(j, &cursor) != 0 { + // ensure that we won't be freeing an address that's invalid + cursor = nil + } + return cursor +} + +func (s *journald) followJournal(logWatcher *logger.LogWatcher, config logger.ReadConfig, j *C.sd_journal, pfd [2]C.int, cursor *C.char) *C.char { + s.mu.Lock() + s.readers.readers[logWatcher] = logWatcher + if s.closed { + // the journald Logger is closed, presumably because the container has been + // reset. So we shouldn't follow, because we'll never be woken up. But we + // should make one more drainJournal call to be sure we've got all the logs. + // Close pfd[1] so that one drainJournal happens, then cleanup, then return. + C.close(pfd[1]) + } + s.mu.Unlock() + + newCursor := make(chan *C.char) + + go func() { + for { + // Keep copying journal data out until we're notified to stop + // or we hit an error. + status := C.wait_for_data_cancelable(j, pfd[0]) + if status < 0 { + cerrstr := C.strerror(C.int(-status)) + errstr := C.GoString(cerrstr) + fmtstr := "error %q while attempting to follow journal for container %q" + logrus.Errorf(fmtstr, errstr, s.vars["CONTAINER_ID_FULL"]) + break + } + + cursor = s.drainJournal(logWatcher, config, j, cursor) + + if status != 1 { + // We were notified to stop + break + } + } + + // Clean up. + C.close(pfd[0]) + s.mu.Lock() + delete(s.readers.readers, logWatcher) + s.mu.Unlock() + close(logWatcher.Msg) + newCursor <- cursor + }() + + // Wait until we're told to stop. + select { + case cursor = <-newCursor: + case <-logWatcher.WatchClose(): + // Notify the other goroutine that its work is done. + C.close(pfd[1]) + cursor = <-newCursor + } + + return cursor +} + +func (s *journald) readLogs(logWatcher *logger.LogWatcher, config logger.ReadConfig) { + var j *C.sd_journal + var cmatch, cursor *C.char + var stamp C.uint64_t + var sinceUnixMicro uint64 + var pipes [2]C.int + + // Get a handle to the journal. + rc := C.sd_journal_open(&j, C.int(0)) + if rc != 0 { + logWatcher.Err <- fmt.Errorf("error opening journal") + close(logWatcher.Msg) + return + } + // If we end up following the log, we can set the journal context + // pointer and the channel pointer to nil so that we won't close them + // here, potentially while the goroutine that uses them is still + // running. Otherwise, close them when we return from this function. + following := false + defer func(pfollowing *bool) { + if !*pfollowing { + close(logWatcher.Msg) + } + C.sd_journal_close(j) + }(&following) + // Remove limits on the size of data items that we'll retrieve. + rc = C.sd_journal_set_data_threshold(j, C.size_t(0)) + if rc != 0 { + logWatcher.Err <- fmt.Errorf("error setting journal data threshold") + return + } + // Add a match to have the library do the searching for us. + cmatch = C.CString("CONTAINER_ID_FULL=" + s.vars["CONTAINER_ID_FULL"]) + defer C.free(unsafe.Pointer(cmatch)) + rc = C.sd_journal_add_match(j, unsafe.Pointer(cmatch), C.strlen(cmatch)) + if rc != 0 { + logWatcher.Err <- fmt.Errorf("error setting journal match") + return + } + // If we have a cutoff time, convert it to Unix time once. + if !config.Since.IsZero() { + nano := config.Since.UnixNano() + sinceUnixMicro = uint64(nano / 1000) + } + if config.Tail > 0 { + lines := config.Tail + // Start at the end of the journal. + if C.sd_journal_seek_tail(j) < 0 { + logWatcher.Err <- fmt.Errorf("error seeking to end of journal") + return + } + if C.sd_journal_previous(j) < 0 { + logWatcher.Err <- fmt.Errorf("error backtracking to previous journal entry") + return + } + // Walk backward. + for lines > 0 { + // Stop if the entry time is before our cutoff. + // We'll need the entry time if it isn't, so go + // ahead and parse it now. + if C.sd_journal_get_realtime_usec(j, &stamp) != 0 { + break + } else { + // Compare the timestamp on the entry + // to our threshold value. + if sinceUnixMicro != 0 && sinceUnixMicro > uint64(stamp) { + break + } + } + lines-- + // If we're at the start of the journal, or + // don't need to back up past any more entries, + // stop. + if lines == 0 || C.sd_journal_previous(j) <= 0 { + break + } + } + } else { + // Start at the beginning of the journal. + if C.sd_journal_seek_head(j) < 0 { + logWatcher.Err <- fmt.Errorf("error seeking to start of journal") + return + } + // If we have a cutoff date, fast-forward to it. + if sinceUnixMicro != 0 && C.sd_journal_seek_realtime_usec(j, C.uint64_t(sinceUnixMicro)) != 0 { + logWatcher.Err <- fmt.Errorf("error seeking to start time in journal") + return + } + if C.sd_journal_next(j) < 0 { + logWatcher.Err <- fmt.Errorf("error skipping to next journal entry") + return + } + } + cursor = s.drainJournal(logWatcher, config, j, nil) + if config.Follow { + // Allocate a descriptor for following the journal, if we'll + // need one. Do it here so that we can report if it fails. + if fd := C.sd_journal_get_fd(j); fd < C.int(0) { + logWatcher.Err <- fmt.Errorf("error opening journald follow descriptor: %q", C.GoString(C.strerror(-fd))) + } else { + // Create a pipe that we can poll at the same time as + // the journald descriptor. + if C.pipe(&pipes[0]) == C.int(-1) { + logWatcher.Err <- fmt.Errorf("error opening journald close notification pipe") + } else { + cursor = s.followJournal(logWatcher, config, j, pipes, cursor) + // Let followJournal handle freeing the journal context + // object and closing the channel. + following = true + } + } + } + + C.free(unsafe.Pointer(cursor)) + return +} + +func (s *journald) ReadLogs(config logger.ReadConfig) *logger.LogWatcher { + logWatcher := logger.NewLogWatcher() + go s.readLogs(logWatcher, config) + return logWatcher +} diff --git a/vendor/github.com/moby/moby/daemon/logger/journald/read_native.go b/vendor/github.com/moby/moby/daemon/logger/journald/read_native.go new file mode 100644 index 000000000..bba6de55b --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/logger/journald/read_native.go @@ -0,0 +1,6 @@ +// +build linux,cgo,!static_build,journald,!journald_compat + +package journald + +// #cgo pkg-config: libsystemd +import "C" diff --git a/vendor/github.com/moby/moby/daemon/logger/journald/read_native_compat.go b/vendor/github.com/moby/moby/daemon/logger/journald/read_native_compat.go new file mode 100644 index 000000000..3f7a43c59 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/logger/journald/read_native_compat.go @@ -0,0 +1,6 @@ +// +build linux,cgo,!static_build,journald,journald_compat + +package journald + +// #cgo pkg-config: libsystemd-journal +import "C" diff --git a/vendor/github.com/moby/moby/daemon/logger/journald/read_unsupported.go b/vendor/github.com/moby/moby/daemon/logger/journald/read_unsupported.go new file mode 100644 index 000000000..b43abdcaf --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/logger/journald/read_unsupported.go @@ -0,0 +1,7 @@ +// +build !linux !cgo static_build !journald + +package journald + +func (s *journald) Close() error { + return nil +} diff --git a/vendor/github.com/moby/moby/daemon/logger/jsonfilelog/jsonfilelog.go b/vendor/github.com/moby/moby/daemon/logger/jsonfilelog/jsonfilelog.go new file mode 100644 index 000000000..e8df0ecbd --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/logger/jsonfilelog/jsonfilelog.go @@ -0,0 +1,174 @@ +// Package jsonfilelog provides the default Logger implementation for +// Docker logging. This logger logs to files on the host server in the +// JSON format. +package jsonfilelog + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "strconv" + "sync" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/daemon/logger" + "github.com/docker/docker/daemon/logger/loggerutils" + "github.com/docker/docker/pkg/jsonlog" + "github.com/docker/go-units" + "github.com/pkg/errors" +) + +// Name is the name of the file that the jsonlogger logs to. +const Name = "json-file" + +// JSONFileLogger is Logger implementation for default Docker logging. +type JSONFileLogger struct { + extra []byte // json-encoded extra attributes + + mu sync.RWMutex + buf *bytes.Buffer // avoids allocating a new buffer on each call to `Log()` + closed bool + writer *loggerutils.RotateFileWriter + readers map[*logger.LogWatcher]struct{} // stores the active log followers +} + +func init() { + if err := logger.RegisterLogDriver(Name, New); err != nil { + logrus.Fatal(err) + } + if err := logger.RegisterLogOptValidator(Name, ValidateLogOpt); err != nil { + logrus.Fatal(err) + } +} + +// New creates new JSONFileLogger which writes to filename passed in +// on given context. +func New(info logger.Info) (logger.Logger, error) { + var capval int64 = -1 + if capacity, ok := info.Config["max-size"]; ok { + var err error + capval, err = units.FromHumanSize(capacity) + if err != nil { + return nil, err + } + } + var maxFiles = 1 + if maxFileString, ok := info.Config["max-file"]; ok { + var err error + maxFiles, err = strconv.Atoi(maxFileString) + if err != nil { + return nil, err + } + if maxFiles < 1 { + return nil, fmt.Errorf("max-file cannot be less than 1") + } + } + + writer, err := loggerutils.NewRotateFileWriter(info.LogPath, capval, maxFiles) + if err != nil { + return nil, err + } + + var extra []byte + attrs, err := info.ExtraAttributes(nil) + if err != nil { + return nil, err + } + if len(attrs) > 0 { + var err error + extra, err = json.Marshal(attrs) + if err != nil { + return nil, err + } + } + + return &JSONFileLogger{ + buf: bytes.NewBuffer(nil), + writer: writer, + readers: make(map[*logger.LogWatcher]struct{}), + extra: extra, + }, nil +} + +// Log converts logger.Message to jsonlog.JSONLog and serializes it to file. +func (l *JSONFileLogger) Log(msg *logger.Message) error { + l.mu.Lock() + err := writeMessageBuf(l.writer, msg, l.extra, l.buf) + l.buf.Reset() + l.mu.Unlock() + return err +} + +func writeMessageBuf(w io.Writer, m *logger.Message, extra json.RawMessage, buf *bytes.Buffer) error { + if err := marshalMessage(m, extra, buf); err != nil { + logger.PutMessage(m) + return err + } + logger.PutMessage(m) + if _, err := w.Write(buf.Bytes()); err != nil { + return errors.Wrap(err, "error writing log entry") + } + return nil +} + +func marshalMessage(msg *logger.Message, extra json.RawMessage, buf *bytes.Buffer) error { + timestamp, err := jsonlog.FastTimeMarshalJSON(msg.Timestamp) + if err != nil { + return err + } + logLine := msg.Line + if !msg.Partial { + logLine = append(msg.Line, '\n') + } + err = (&jsonlog.JSONLogs{ + Log: logLine, + Stream: msg.Source, + Created: timestamp, + RawAttrs: extra, + }).MarshalJSONBuf(buf) + if err != nil { + return errors.Wrap(err, "error writing log message to buffer") + } + err = buf.WriteByte('\n') + return errors.Wrap(err, "error finalizing log buffer") +} + +// ValidateLogOpt looks for json specific log options max-file & max-size. +func ValidateLogOpt(cfg map[string]string) error { + for key := range cfg { + switch key { + case "max-file": + case "max-size": + case "labels": + case "env": + case "env-regex": + default: + return fmt.Errorf("unknown log opt '%s' for json-file log driver", key) + } + } + return nil +} + +// LogPath returns the location the given json logger logs to. +func (l *JSONFileLogger) LogPath() string { + return l.writer.LogPath() +} + +// Close closes underlying file and signals all readers to stop. +func (l *JSONFileLogger) Close() error { + l.mu.Lock() + l.closed = true + err := l.writer.Close() + for r := range l.readers { + r.Close() + delete(l.readers, r) + } + l.mu.Unlock() + return err +} + +// Name returns name of this logger. +func (l *JSONFileLogger) Name() string { + return Name +} diff --git a/vendor/github.com/moby/moby/daemon/logger/jsonfilelog/jsonfilelog_test.go b/vendor/github.com/moby/moby/daemon/logger/jsonfilelog/jsonfilelog_test.go new file mode 100644 index 000000000..d2d36e943 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/logger/jsonfilelog/jsonfilelog_test.go @@ -0,0 +1,249 @@ +package jsonfilelog + +import ( + "encoding/json" + "io/ioutil" + "os" + "path/filepath" + "reflect" + "strconv" + "testing" + "time" + + "github.com/docker/docker/daemon/logger" + "github.com/docker/docker/pkg/jsonlog" +) + +func TestJSONFileLogger(t *testing.T) { + cid := "a7317399f3f857173c6179d44823594f8294678dea9999662e5c625b5a1c7657" + tmp, err := ioutil.TempDir("", "docker-logger-") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmp) + filename := filepath.Join(tmp, "container.log") + l, err := New(logger.Info{ + ContainerID: cid, + LogPath: filename, + }) + if err != nil { + t.Fatal(err) + } + defer l.Close() + + if err := l.Log(&logger.Message{Line: []byte("line1"), Source: "src1"}); err != nil { + t.Fatal(err) + } + if err := l.Log(&logger.Message{Line: []byte("line2"), Source: "src2"}); err != nil { + t.Fatal(err) + } + if err := l.Log(&logger.Message{Line: []byte("line3"), Source: "src3"}); err != nil { + t.Fatal(err) + } + res, err := ioutil.ReadFile(filename) + if err != nil { + t.Fatal(err) + } + expected := `{"log":"line1\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +{"log":"line2\n","stream":"src2","time":"0001-01-01T00:00:00Z"} +{"log":"line3\n","stream":"src3","time":"0001-01-01T00:00:00Z"} +` + + if string(res) != expected { + t.Fatalf("Wrong log content: %q, expected %q", res, expected) + } +} + +func BenchmarkJSONFileLogger(b *testing.B) { + cid := "a7317399f3f857173c6179d44823594f8294678dea9999662e5c625b5a1c7657" + tmp, err := ioutil.TempDir("", "docker-logger-") + if err != nil { + b.Fatal(err) + } + defer os.RemoveAll(tmp) + filename := filepath.Join(tmp, "container.log") + l, err := New(logger.Info{ + ContainerID: cid, + LogPath: filename, + }) + if err != nil { + b.Fatal(err) + } + defer l.Close() + + testLine := "Line that thinks that it is log line from docker\n" + msg := &logger.Message{Line: []byte(testLine), Source: "stderr", Timestamp: time.Now().UTC()} + jsonlog, err := (&jsonlog.JSONLog{Log: string(msg.Line) + "\n", Stream: msg.Source, Created: msg.Timestamp}).MarshalJSON() + if err != nil { + b.Fatal(err) + } + b.SetBytes(int64(len(jsonlog)+1) * 30) + b.ResetTimer() + for i := 0; i < b.N; i++ { + for j := 0; j < 30; j++ { + if err := l.Log(msg); err != nil { + b.Fatal(err) + } + } + } +} + +func TestJSONFileLoggerWithOpts(t *testing.T) { + cid := "a7317399f3f857173c6179d44823594f8294678dea9999662e5c625b5a1c7657" + tmp, err := ioutil.TempDir("", "docker-logger-") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmp) + filename := filepath.Join(tmp, "container.log") + config := map[string]string{"max-file": "2", "max-size": "1k"} + l, err := New(logger.Info{ + ContainerID: cid, + LogPath: filename, + Config: config, + }) + if err != nil { + t.Fatal(err) + } + defer l.Close() + for i := 0; i < 20; i++ { + if err := l.Log(&logger.Message{Line: []byte("line" + strconv.Itoa(i)), Source: "src1"}); err != nil { + t.Fatal(err) + } + } + res, err := ioutil.ReadFile(filename) + if err != nil { + t.Fatal(err) + } + penUlt, err := ioutil.ReadFile(filename + ".1") + if err != nil { + t.Fatal(err) + } + + expectedPenultimate := `{"log":"line0\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +{"log":"line1\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +{"log":"line2\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +{"log":"line3\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +{"log":"line4\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +{"log":"line5\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +{"log":"line6\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +{"log":"line7\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +{"log":"line8\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +{"log":"line9\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +{"log":"line10\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +{"log":"line11\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +{"log":"line12\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +{"log":"line13\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +{"log":"line14\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +{"log":"line15\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +` + expected := `{"log":"line16\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +{"log":"line17\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +{"log":"line18\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +{"log":"line19\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +` + + if string(res) != expected { + t.Fatalf("Wrong log content: %q, expected %q", res, expected) + } + if string(penUlt) != expectedPenultimate { + t.Fatalf("Wrong log content: %q, expected %q", penUlt, expectedPenultimate) + } + +} + +func TestJSONFileLoggerWithLabelsEnv(t *testing.T) { + cid := "a7317399f3f857173c6179d44823594f8294678dea9999662e5c625b5a1c7657" + tmp, err := ioutil.TempDir("", "docker-logger-") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmp) + filename := filepath.Join(tmp, "container.log") + config := map[string]string{"labels": "rack,dc", "env": "environ,debug,ssl", "env-regex": "^dc"} + l, err := New(logger.Info{ + ContainerID: cid, + LogPath: filename, + Config: config, + ContainerLabels: map[string]string{"rack": "101", "dc": "lhr"}, + ContainerEnv: []string{"environ=production", "debug=false", "port=10001", "ssl=true", "dc_region=west"}, + }) + if err != nil { + t.Fatal(err) + } + defer l.Close() + if err := l.Log(&logger.Message{Line: []byte("line"), Source: "src1"}); err != nil { + t.Fatal(err) + } + res, err := ioutil.ReadFile(filename) + if err != nil { + t.Fatal(err) + } + + var jsonLog jsonlog.JSONLogs + if err := json.Unmarshal(res, &jsonLog); err != nil { + t.Fatal(err) + } + extra := make(map[string]string) + if err := json.Unmarshal(jsonLog.RawAttrs, &extra); err != nil { + t.Fatal(err) + } + expected := map[string]string{ + "rack": "101", + "dc": "lhr", + "environ": "production", + "debug": "false", + "ssl": "true", + "dc_region": "west", + } + if !reflect.DeepEqual(extra, expected) { + t.Fatalf("Wrong log attrs: %q, expected %q", extra, expected) + } +} + +func BenchmarkJSONFileLoggerWithReader(b *testing.B) { + b.StopTimer() + b.ResetTimer() + cid := "a7317399f3f857173c6179d44823594f8294678dea9999662e5c625b5a1c7657" + dir, err := ioutil.TempDir("", "json-logger-bench") + if err != nil { + b.Fatal(err) + } + defer os.RemoveAll(dir) + + l, err := New(logger.Info{ + ContainerID: cid, + LogPath: filepath.Join(dir, "container.log"), + }) + if err != nil { + b.Fatal(err) + } + defer l.Close() + msg := &logger.Message{Line: []byte("line"), Source: "src1"} + jsonlog, err := (&jsonlog.JSONLog{Log: string(msg.Line) + "\n", Stream: msg.Source, Created: msg.Timestamp}).MarshalJSON() + if err != nil { + b.Fatal(err) + } + b.SetBytes(int64(len(jsonlog)+1) * 30) + + b.StartTimer() + + go func() { + for i := 0; i < b.N; i++ { + for j := 0; j < 30; j++ { + l.Log(msg) + } + } + l.Close() + }() + + lw := l.(logger.LogReader).ReadLogs(logger.ReadConfig{Follow: true}) + watchClose := lw.WatchClose() + for { + select { + case <-lw.Msg: + case <-watchClose: + return + } + } +} diff --git a/vendor/github.com/moby/moby/daemon/logger/jsonfilelog/multireader/multireader.go b/vendor/github.com/moby/moby/daemon/logger/jsonfilelog/multireader/multireader.go new file mode 100644 index 000000000..1993f1d76 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/logger/jsonfilelog/multireader/multireader.go @@ -0,0 +1,228 @@ +package multireader + +import ( + "bytes" + "fmt" + "io" + "os" +) + +type pos struct { + idx int + offset int64 +} + +type multiReadSeeker struct { + readers []io.ReadSeeker + pos *pos + posIdx map[io.ReadSeeker]int +} + +func (r *multiReadSeeker) Seek(offset int64, whence int) (int64, error) { + var tmpOffset int64 + switch whence { + case os.SEEK_SET: + for i, rdr := range r.readers { + // get size of the current reader + s, err := rdr.Seek(0, os.SEEK_END) + if err != nil { + return -1, err + } + + if offset > tmpOffset+s { + if i == len(r.readers)-1 { + rdrOffset := s + (offset - tmpOffset) + if _, err := rdr.Seek(rdrOffset, os.SEEK_SET); err != nil { + return -1, err + } + r.pos = &pos{i, rdrOffset} + return offset, nil + } + + tmpOffset += s + continue + } + + rdrOffset := offset - tmpOffset + idx := i + + if _, err := rdr.Seek(rdrOffset, os.SEEK_SET); err != nil { + return -1, err + } + // make sure all following readers are at 0 + for _, rdr := range r.readers[i+1:] { + rdr.Seek(0, os.SEEK_SET) + } + + if rdrOffset == s && i != len(r.readers)-1 { + idx++ + rdrOffset = 0 + } + r.pos = &pos{idx, rdrOffset} + return offset, nil + } + case os.SEEK_END: + for _, rdr := range r.readers { + s, err := rdr.Seek(0, os.SEEK_END) + if err != nil { + return -1, err + } + tmpOffset += s + } + if _, err := r.Seek(tmpOffset+offset, os.SEEK_SET); err != nil { + return -1, err + } + return tmpOffset + offset, nil + case os.SEEK_CUR: + if r.pos == nil { + return r.Seek(offset, os.SEEK_SET) + } + // Just return the current offset + if offset == 0 { + return r.getCurOffset() + } + + curOffset, err := r.getCurOffset() + if err != nil { + return -1, err + } + rdr, rdrOffset, err := r.getReaderForOffset(curOffset + offset) + if err != nil { + return -1, err + } + + r.pos = &pos{r.posIdx[rdr], rdrOffset} + return curOffset + offset, nil + default: + return -1, fmt.Errorf("Invalid whence: %d", whence) + } + + return -1, fmt.Errorf("Error seeking for whence: %d, offset: %d", whence, offset) +} + +func (r *multiReadSeeker) getReaderForOffset(offset int64) (io.ReadSeeker, int64, error) { + + var offsetTo int64 + + for _, rdr := range r.readers { + size, err := getReadSeekerSize(rdr) + if err != nil { + return nil, -1, err + } + if offsetTo+size > offset { + return rdr, offset - offsetTo, nil + } + if rdr == r.readers[len(r.readers)-1] { + return rdr, offsetTo + offset, nil + } + offsetTo += size + } + + return nil, 0, nil +} + +func (r *multiReadSeeker) getCurOffset() (int64, error) { + var totalSize int64 + for _, rdr := range r.readers[:r.pos.idx+1] { + if r.posIdx[rdr] == r.pos.idx { + totalSize += r.pos.offset + break + } + + size, err := getReadSeekerSize(rdr) + if err != nil { + return -1, fmt.Errorf("error getting seeker size: %v", err) + } + totalSize += size + } + return totalSize, nil +} + +func (r *multiReadSeeker) getOffsetToReader(rdr io.ReadSeeker) (int64, error) { + var offset int64 + for _, r := range r.readers { + if r == rdr { + break + } + + size, err := getReadSeekerSize(rdr) + if err != nil { + return -1, err + } + offset += size + } + return offset, nil +} + +func (r *multiReadSeeker) Read(b []byte) (int, error) { + if r.pos == nil { + // make sure all readers are at 0 + r.Seek(0, os.SEEK_SET) + } + + bLen := int64(len(b)) + buf := bytes.NewBuffer(nil) + var rdr io.ReadSeeker + + for _, rdr = range r.readers[r.pos.idx:] { + readBytes, err := io.CopyN(buf, rdr, bLen) + if err != nil && err != io.EOF { + return -1, err + } + bLen -= readBytes + + if bLen == 0 { + break + } + } + + rdrPos, err := rdr.Seek(0, os.SEEK_CUR) + if err != nil { + return -1, err + } + r.pos = &pos{r.posIdx[rdr], rdrPos} + return buf.Read(b) +} + +func getReadSeekerSize(rdr io.ReadSeeker) (int64, error) { + // save the current position + pos, err := rdr.Seek(0, os.SEEK_CUR) + if err != nil { + return -1, err + } + + // get the size + size, err := rdr.Seek(0, os.SEEK_END) + if err != nil { + return -1, err + } + + // reset the position + if _, err := rdr.Seek(pos, os.SEEK_SET); err != nil { + return -1, err + } + return size, nil +} + +// MultiReadSeeker returns a ReadSeeker that's the logical concatenation of the provided +// input readseekers. After calling this method the initial position is set to the +// beginning of the first ReadSeeker. At the end of a ReadSeeker, Read always advances +// to the beginning of the next ReadSeeker and returns EOF at the end of the last ReadSeeker. +// Seek can be used over the sum of lengths of all readseekers. +// +// When a MultiReadSeeker is used, no Read and Seek operations should be made on +// its ReadSeeker components. Also, users should make no assumption on the state +// of individual readseekers while the MultiReadSeeker is used. +func MultiReadSeeker(readers ...io.ReadSeeker) io.ReadSeeker { + if len(readers) == 1 { + return readers[0] + } + idx := make(map[io.ReadSeeker]int) + for i, rdr := range readers { + idx[rdr] = i + } + return &multiReadSeeker{ + readers: readers, + posIdx: idx, + } +} diff --git a/vendor/github.com/moby/moby/daemon/logger/jsonfilelog/multireader/multireader_test.go b/vendor/github.com/moby/moby/daemon/logger/jsonfilelog/multireader/multireader_test.go new file mode 100644 index 000000000..bd59b78ca --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/logger/jsonfilelog/multireader/multireader_test.go @@ -0,0 +1,225 @@ +package multireader + +import ( + "bytes" + "encoding/binary" + "fmt" + "io" + "io/ioutil" + "os" + "strings" + "testing" +) + +func TestMultiReadSeekerReadAll(t *testing.T) { + str := "hello world" + s1 := strings.NewReader(str + " 1") + s2 := strings.NewReader(str + " 2") + s3 := strings.NewReader(str + " 3") + mr := MultiReadSeeker(s1, s2, s3) + + expectedSize := int64(s1.Len() + s2.Len() + s3.Len()) + + b, err := ioutil.ReadAll(mr) + if err != nil { + t.Fatal(err) + } + + expected := "hello world 1hello world 2hello world 3" + if string(b) != expected { + t.Fatalf("ReadAll failed, got: %q, expected %q", string(b), expected) + } + + size, err := mr.Seek(0, os.SEEK_END) + if err != nil { + t.Fatal(err) + } + if size != expectedSize { + t.Fatalf("reader size does not match, got %d, expected %d", size, expectedSize) + } + + // Reset the position and read again + pos, err := mr.Seek(0, os.SEEK_SET) + if err != nil { + t.Fatal(err) + } + if pos != 0 { + t.Fatalf("expected position to be set to 0, got %d", pos) + } + + b, err = ioutil.ReadAll(mr) + if err != nil { + t.Fatal(err) + } + + if string(b) != expected { + t.Fatalf("ReadAll failed, got: %q, expected %q", string(b), expected) + } + + // The positions of some readers are not 0 + s1.Seek(0, os.SEEK_SET) + s2.Seek(0, os.SEEK_END) + s3.Seek(0, os.SEEK_SET) + mr = MultiReadSeeker(s1, s2, s3) + b, err = ioutil.ReadAll(mr) + if err != nil { + t.Fatal(err) + } + + if string(b) != expected { + t.Fatalf("ReadAll failed, got: %q, expected %q", string(b), expected) + } +} + +func TestMultiReadSeekerReadEach(t *testing.T) { + str := "hello world" + s1 := strings.NewReader(str + " 1") + s2 := strings.NewReader(str + " 2") + s3 := strings.NewReader(str + " 3") + mr := MultiReadSeeker(s1, s2, s3) + + var totalBytes int64 + for i, s := range []*strings.Reader{s1, s2, s3} { + sLen := int64(s.Len()) + buf := make([]byte, s.Len()) + expected := []byte(fmt.Sprintf("%s %d", str, i+1)) + + if _, err := mr.Read(buf); err != nil && err != io.EOF { + t.Fatal(err) + } + + if !bytes.Equal(buf, expected) { + t.Fatalf("expected %q to be %q", string(buf), string(expected)) + } + + pos, err := mr.Seek(0, os.SEEK_CUR) + if err != nil { + t.Fatalf("iteration: %d, error: %v", i+1, err) + } + + // check that the total bytes read is the current position of the seeker + totalBytes += sLen + if pos != totalBytes { + t.Fatalf("expected current position to be: %d, got: %d, iteration: %d", totalBytes, pos, i+1) + } + + // This tests not only that SEEK_SET and SEEK_CUR give the same values, but that the next iteration is in the expected position as well + newPos, err := mr.Seek(pos, os.SEEK_SET) + if err != nil { + t.Fatal(err) + } + if newPos != pos { + t.Fatalf("expected to get same position when calling SEEK_SET with value from SEEK_CUR, cur: %d, set: %d", pos, newPos) + } + } +} + +func TestMultiReadSeekerReadSpanningChunks(t *testing.T) { + str := "hello world" + s1 := strings.NewReader(str + " 1") + s2 := strings.NewReader(str + " 2") + s3 := strings.NewReader(str + " 3") + mr := MultiReadSeeker(s1, s2, s3) + + buf := make([]byte, s1.Len()+3) + _, err := mr.Read(buf) + if err != nil { + t.Fatal(err) + } + + // expected is the contents of s1 + 3 bytes from s2, ie, the `hel` at the end of this string + expected := "hello world 1hel" + if string(buf) != expected { + t.Fatalf("expected %s to be %s", string(buf), expected) + } +} + +func TestMultiReadSeekerNegativeSeek(t *testing.T) { + str := "hello world" + s1 := strings.NewReader(str + " 1") + s2 := strings.NewReader(str + " 2") + s3 := strings.NewReader(str + " 3") + mr := MultiReadSeeker(s1, s2, s3) + + s1Len := s1.Len() + s2Len := s2.Len() + s3Len := s3.Len() + + s, err := mr.Seek(int64(-1*s3.Len()), os.SEEK_END) + if err != nil { + t.Fatal(err) + } + if s != int64(s1Len+s2Len) { + t.Fatalf("expected %d to be %d", s, s1.Len()+s2.Len()) + } + + buf := make([]byte, s3Len) + if _, err := mr.Read(buf); err != nil && err != io.EOF { + t.Fatal(err) + } + expected := fmt.Sprintf("%s %d", str, 3) + if string(buf) != fmt.Sprintf("%s %d", str, 3) { + t.Fatalf("expected %q to be %q", string(buf), expected) + } +} + +func TestMultiReadSeekerCurAfterSet(t *testing.T) { + str := "hello world" + s1 := strings.NewReader(str + " 1") + s2 := strings.NewReader(str + " 2") + s3 := strings.NewReader(str + " 3") + mr := MultiReadSeeker(s1, s2, s3) + + mid := int64(s1.Len() + s2.Len()/2) + + size, err := mr.Seek(mid, os.SEEK_SET) + if err != nil { + t.Fatal(err) + } + if size != mid { + t.Fatalf("reader size does not match, got %d, expected %d", size, mid) + } + + size, err = mr.Seek(3, os.SEEK_CUR) + if err != nil { + t.Fatal(err) + } + if size != mid+3 { + t.Fatalf("reader size does not match, got %d, expected %d", size, mid+3) + } + size, err = mr.Seek(5, os.SEEK_CUR) + if err != nil { + t.Fatal(err) + } + if size != mid+8 { + t.Fatalf("reader size does not match, got %d, expected %d", size, mid+8) + } + + size, err = mr.Seek(10, os.SEEK_CUR) + if err != nil { + t.Fatal(err) + } + if size != mid+18 { + t.Fatalf("reader size does not match, got %d, expected %d", size, mid+18) + } +} + +func TestMultiReadSeekerSmallReads(t *testing.T) { + readers := []io.ReadSeeker{} + for i := 0; i < 10; i++ { + integer := make([]byte, 4) + binary.BigEndian.PutUint32(integer, uint32(i)) + readers = append(readers, bytes.NewReader(integer)) + } + + reader := MultiReadSeeker(readers...) + for i := 0; i < 10; i++ { + var integer uint32 + if err := binary.Read(reader, binary.BigEndian, &integer); err != nil { + t.Fatalf("Read from NewMultiReadSeeker failed: %v", err) + } + if uint32(i) != integer { + t.Fatalf("Read wrong value from NewMultiReadSeeker: %d != %d", i, integer) + } + } +} diff --git a/vendor/github.com/moby/moby/daemon/logger/jsonfilelog/read.go b/vendor/github.com/moby/moby/daemon/logger/jsonfilelog/read.go new file mode 100644 index 000000000..db53fd53e --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/logger/jsonfilelog/read.go @@ -0,0 +1,349 @@ +package jsonfilelog + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "os" + "time" + + "github.com/fsnotify/fsnotify" + "golang.org/x/net/context" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/daemon/logger" + "github.com/docker/docker/daemon/logger/jsonfilelog/multireader" + "github.com/docker/docker/pkg/filenotify" + "github.com/docker/docker/pkg/jsonlog" + "github.com/docker/docker/pkg/tailfile" + "github.com/pkg/errors" +) + +const maxJSONDecodeRetry = 20000 + +func decodeLogLine(dec *json.Decoder, l *jsonlog.JSONLog) (*logger.Message, error) { + l.Reset() + if err := dec.Decode(l); err != nil { + return nil, err + } + var attrs []backend.LogAttr + if len(l.Attrs) != 0 { + attrs = make([]backend.LogAttr, 0, len(l.Attrs)) + for k, v := range l.Attrs { + attrs = append(attrs, backend.LogAttr{Key: k, Value: v}) + } + } + msg := &logger.Message{ + Source: l.Stream, + Timestamp: l.Created, + Line: []byte(l.Log), + Attrs: attrs, + } + return msg, nil +} + +// ReadLogs implements the logger's LogReader interface for the logs +// created by this driver. +func (l *JSONFileLogger) ReadLogs(config logger.ReadConfig) *logger.LogWatcher { + logWatcher := logger.NewLogWatcher() + + go l.readLogs(logWatcher, config) + return logWatcher +} + +func (l *JSONFileLogger) readLogs(logWatcher *logger.LogWatcher, config logger.ReadConfig) { + defer close(logWatcher.Msg) + + // lock so the read stream doesn't get corrupted due to rotations or other log data written while we open these files + // This will block writes!!! + l.mu.RLock() + + // TODO it would be nice to move a lot of this reader implementation to the rotate logger object + pth := l.writer.LogPath() + var files []io.ReadSeeker + for i := l.writer.MaxFiles(); i > 1; i-- { + f, err := os.Open(fmt.Sprintf("%s.%d", pth, i-1)) + if err != nil { + if !os.IsNotExist(err) { + logWatcher.Err <- err + l.mu.RUnlock() + return + } + continue + } + defer f.Close() + files = append(files, f) + } + + latestFile, err := os.Open(pth) + if err != nil { + logWatcher.Err <- errors.Wrap(err, "error opening latest log file") + l.mu.RUnlock() + return + } + defer latestFile.Close() + + latestChunk, err := newSectionReader(latestFile) + + // Now we have the reader sectioned, all fd's opened, we can unlock. + // New writes/rotates will not affect seeking through these files + l.mu.RUnlock() + + if err != nil { + logWatcher.Err <- err + return + } + + if config.Tail != 0 { + tailer := multireader.MultiReadSeeker(append(files, latestChunk)...) + tailFile(tailer, logWatcher, config.Tail, config.Since) + } + + // close all the rotated files + for _, f := range files { + if err := f.(io.Closer).Close(); err != nil { + logrus.WithField("logger", "json-file").Warnf("error closing tailed log file: %v", err) + } + } + + if !config.Follow || l.closed { + return + } + + notifyRotate := l.writer.NotifyRotate() + defer l.writer.NotifyRotateEvict(notifyRotate) + + l.mu.Lock() + l.readers[logWatcher] = struct{}{} + l.mu.Unlock() + + followLogs(latestFile, logWatcher, notifyRotate, config.Since) + + l.mu.Lock() + delete(l.readers, logWatcher) + l.mu.Unlock() +} + +func newSectionReader(f *os.File) (*io.SectionReader, error) { + // seek to the end to get the size + // we'll leave this at the end of the file since section reader does not advance the reader + size, err := f.Seek(0, os.SEEK_END) + if err != nil { + return nil, errors.Wrap(err, "error getting current file size") + } + return io.NewSectionReader(f, 0, size), nil +} + +func tailFile(f io.ReadSeeker, logWatcher *logger.LogWatcher, tail int, since time.Time) { + var rdr io.Reader + rdr = f + if tail > 0 { + ls, err := tailfile.TailFile(f, tail) + if err != nil { + logWatcher.Err <- err + return + } + rdr = bytes.NewBuffer(bytes.Join(ls, []byte("\n"))) + } + dec := json.NewDecoder(rdr) + l := &jsonlog.JSONLog{} + for { + msg, err := decodeLogLine(dec, l) + if err != nil { + if err != io.EOF { + logWatcher.Err <- err + } + return + } + if !since.IsZero() && msg.Timestamp.Before(since) { + continue + } + select { + case <-logWatcher.WatchClose(): + return + case logWatcher.Msg <- msg: + } + } +} + +func watchFile(name string) (filenotify.FileWatcher, error) { + fileWatcher, err := filenotify.New() + if err != nil { + return nil, err + } + + if err := fileWatcher.Add(name); err != nil { + logrus.WithField("logger", "json-file").Warnf("falling back to file poller due to error: %v", err) + fileWatcher.Close() + fileWatcher = filenotify.NewPollingWatcher() + + if err := fileWatcher.Add(name); err != nil { + fileWatcher.Close() + logrus.Debugf("error watching log file for modifications: %v", err) + return nil, err + } + } + return fileWatcher, nil +} + +func followLogs(f *os.File, logWatcher *logger.LogWatcher, notifyRotate chan interface{}, since time.Time) { + dec := json.NewDecoder(f) + l := &jsonlog.JSONLog{} + + name := f.Name() + fileWatcher, err := watchFile(name) + if err != nil { + logWatcher.Err <- err + return + } + defer func() { + f.Close() + fileWatcher.Remove(name) + fileWatcher.Close() + }() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + go func() { + select { + case <-logWatcher.WatchClose(): + fileWatcher.Remove(name) + cancel() + case <-ctx.Done(): + return + } + }() + + var retries int + handleRotate := func() error { + f.Close() + fileWatcher.Remove(name) + + // retry when the file doesn't exist + for retries := 0; retries <= 5; retries++ { + f, err = os.Open(name) + if err == nil || !os.IsNotExist(err) { + break + } + } + if err != nil { + return err + } + if err := fileWatcher.Add(name); err != nil { + return err + } + dec = json.NewDecoder(f) + return nil + } + + errRetry := errors.New("retry") + errDone := errors.New("done") + waitRead := func() error { + select { + case e := <-fileWatcher.Events(): + switch e.Op { + case fsnotify.Write: + dec = json.NewDecoder(f) + return nil + case fsnotify.Rename, fsnotify.Remove: + select { + case <-notifyRotate: + case <-ctx.Done(): + return errDone + } + if err := handleRotate(); err != nil { + return err + } + return nil + } + return errRetry + case err := <-fileWatcher.Errors(): + logrus.Debug("logger got error watching file: %v", err) + // Something happened, let's try and stay alive and create a new watcher + if retries <= 5 { + fileWatcher.Close() + fileWatcher, err = watchFile(name) + if err != nil { + return err + } + retries++ + return errRetry + } + return err + case <-ctx.Done(): + return errDone + } + } + + handleDecodeErr := func(err error) error { + if err == io.EOF { + for { + err := waitRead() + if err == nil { + break + } + if err == errRetry { + continue + } + return err + } + return nil + } + // try again because this shouldn't happen + if _, ok := err.(*json.SyntaxError); ok && retries <= maxJSONDecodeRetry { + dec = json.NewDecoder(f) + retries++ + return nil + } + // io.ErrUnexpectedEOF is returned from json.Decoder when there is + // remaining data in the parser's buffer while an io.EOF occurs. + // If the json logger writes a partial json log entry to the disk + // while at the same time the decoder tries to decode it, the race condition happens. + if err == io.ErrUnexpectedEOF && retries <= maxJSONDecodeRetry { + reader := io.MultiReader(dec.Buffered(), f) + dec = json.NewDecoder(reader) + retries++ + return nil + } + return err + } + + // main loop + for { + msg, err := decodeLogLine(dec, l) + if err != nil { + if err := handleDecodeErr(err); err != nil { + if err == errDone { + return + } + // we got an unrecoverable error, so return + logWatcher.Err <- err + return + } + // ready to try again + continue + } + + retries = 0 // reset retries since we've succeeded + if !since.IsZero() && msg.Timestamp.Before(since) { + continue + } + select { + case logWatcher.Msg <- msg: + case <-ctx.Done(): + logWatcher.Msg <- msg + for { + msg, err := decodeLogLine(dec, l) + if err != nil { + return + } + if !since.IsZero() && msg.Timestamp.Before(since) { + continue + } + logWatcher.Msg <- msg + } + } + } +} diff --git a/vendor/github.com/moby/moby/daemon/logger/logentries/logentries.go b/vendor/github.com/moby/moby/daemon/logger/logentries/logentries.go new file mode 100644 index 000000000..a353d9d49 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/logger/logentries/logentries.go @@ -0,0 +1,97 @@ +// Package logentries provides the log driver for forwarding server logs +// to logentries endpoints. +package logentries + +import ( + "fmt" + + "github.com/Sirupsen/logrus" + "github.com/bsphere/le_go" + "github.com/docker/docker/daemon/logger" +) + +type logentries struct { + tag string + containerID string + containerName string + writer *le_go.Logger + extra map[string]string +} + +const ( + name = "logentries" + token = "logentries-token" +) + +func init() { + if err := logger.RegisterLogDriver(name, New); err != nil { + logrus.Fatal(err) + } + if err := logger.RegisterLogOptValidator(name, ValidateLogOpt); err != nil { + logrus.Fatal(err) + } +} + +// New creates a logentries logger using the configuration passed in on +// the context. The supported context configuration variable is +// logentries-token. +func New(info logger.Info) (logger.Logger, error) { + logrus.WithField("container", info.ContainerID). + WithField("token", info.Config[token]). + Debug("logging driver logentries configured") + + log, err := le_go.Connect(info.Config[token]) + if err != nil { + return nil, err + } + return &logentries{ + containerID: info.ContainerID, + containerName: info.ContainerName, + writer: log, + }, nil +} + +func (f *logentries) Log(msg *logger.Message) error { + data := map[string]string{ + "container_id": f.containerID, + "container_name": f.containerName, + "source": msg.Source, + "log": string(msg.Line), + } + for k, v := range f.extra { + data[k] = v + } + ts := msg.Timestamp + logger.PutMessage(msg) + f.writer.Println(f.tag, ts, data) + return nil +} + +func (f *logentries) Close() error { + return f.writer.Close() +} + +func (f *logentries) Name() string { + return name +} + +// ValidateLogOpt looks for logentries specific log option logentries-address. +func ValidateLogOpt(cfg map[string]string) error { + for key := range cfg { + switch key { + case "env": + case "env-regex": + case "labels": + case "tag": + case key: + default: + return fmt.Errorf("unknown log opt '%s' for logentries log driver", key) + } + } + + if cfg[token] == "" { + return fmt.Errorf("Missing logentries token") + } + + return nil +} diff --git a/vendor/github.com/moby/moby/daemon/logger/logger.go b/vendor/github.com/moby/moby/daemon/logger/logger.go new file mode 100644 index 000000000..258a5dc5f --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/logger/logger.go @@ -0,0 +1,131 @@ +// Package logger defines interfaces that logger drivers implement to +// log messages. +// +// The other half of a logger driver is the implementation of the +// factory, which holds the contextual instance information that +// allows multiple loggers of the same type to perform different +// actions, such as logging to different locations. +package logger + +import ( + "errors" + "sync" + "time" + + "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/pkg/jsonlog" +) + +// ErrReadLogsNotSupported is returned when the logger does not support reading logs. +var ErrReadLogsNotSupported = errors.New("configured logging driver does not support reading") + +const ( + // TimeFormat is the time format used for timestamps sent to log readers. + TimeFormat = jsonlog.RFC3339NanoFixed + logWatcherBufferSize = 4096 +) + +var messagePool = &sync.Pool{New: func() interface{} { return &Message{Line: make([]byte, 0, 256)} }} + +// NewMessage returns a new message from the message sync.Pool +func NewMessage() *Message { + return messagePool.Get().(*Message) +} + +// PutMessage puts the specified message back n the message pool. +// The message fields are reset before putting into the pool. +func PutMessage(msg *Message) { + msg.reset() + messagePool.Put(msg) +} + +// Message is datastructure that represents piece of output produced by some +// container. The Line member is a slice of an array whose contents can be +// changed after a log driver's Log() method returns. +// +// Message is subtyped from backend.LogMessage because there is a lot of +// internal complexity around the Message type that should not be exposed +// to any package not explicitly importing the logger type. +// +// Any changes made to this struct must also be updated in the `reset` function +type Message backend.LogMessage + +// reset sets the message back to default values +// This is used when putting a message back into the message pool. +// Any changes to the `Message` struct should be reflected here. +func (m *Message) reset() { + m.Line = m.Line[:0] + m.Source = "" + m.Attrs = nil + m.Partial = false + + m.Err = nil +} + +// AsLogMessage returns a pointer to the message as a pointer to +// backend.LogMessage, which is an identical type with a different purpose +func (m *Message) AsLogMessage() *backend.LogMessage { + return (*backend.LogMessage)(m) +} + +// Logger is the interface for docker logging drivers. +type Logger interface { + Log(*Message) error + Name() string + Close() error +} + +// ReadConfig is the configuration passed into ReadLogs. +type ReadConfig struct { + Since time.Time + Tail int + Follow bool +} + +// LogReader is the interface for reading log messages for loggers that support reading. +type LogReader interface { + // Read logs from underlying logging backend + ReadLogs(ReadConfig) *LogWatcher +} + +// LogWatcher is used when consuming logs read from the LogReader interface. +type LogWatcher struct { + // For sending log messages to a reader. + Msg chan *Message + // For sending error messages that occur while while reading logs. + Err chan error + closeOnce sync.Once + closeNotifier chan struct{} +} + +// NewLogWatcher returns a new LogWatcher. +func NewLogWatcher() *LogWatcher { + return &LogWatcher{ + Msg: make(chan *Message, logWatcherBufferSize), + Err: make(chan error, 1), + closeNotifier: make(chan struct{}), + } +} + +// Close notifies the underlying log reader to stop. +func (w *LogWatcher) Close() { + // only close if not already closed + w.closeOnce.Do(func() { + close(w.closeNotifier) + }) +} + +// WatchClose returns a channel receiver that receives notification +// when the watcher has been closed. This should only be called from +// one goroutine. +func (w *LogWatcher) WatchClose() <-chan struct{} { + return w.closeNotifier +} + +// Capability defines the list of capabilties that a driver can implement +// These capabilities are not required to be a logging driver, however do +// determine how a logging driver can be used +type Capability struct { + // Determines if a log driver can read back logs + ReadLogs bool +} diff --git a/vendor/github.com/moby/moby/daemon/logger/logger_test.go b/vendor/github.com/moby/moby/daemon/logger/logger_test.go new file mode 100644 index 000000000..15f9b8145 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/logger/logger_test.go @@ -0,0 +1,21 @@ +package logger + +import ( + "github.com/docker/docker/api/types/backend" +) + +func (m *Message) copy() *Message { + msg := &Message{ + Source: m.Source, + Partial: m.Partial, + Timestamp: m.Timestamp, + } + + if m.Attrs != nil { + msg.Attrs = make([]backend.LogAttr, len(m.Attrs)) + copy(msg.Attrs, m.Attrs) + } + + msg.Line = append(make([]byte, 0, len(m.Line)), m.Line...) + return msg +} diff --git a/vendor/github.com/moby/moby/daemon/logger/loggerutils/log_tag.go b/vendor/github.com/moby/moby/daemon/logger/loggerutils/log_tag.go new file mode 100644 index 000000000..f801047c4 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/logger/loggerutils/log_tag.go @@ -0,0 +1,31 @@ +package loggerutils + +import ( + "bytes" + + "github.com/docker/docker/daemon/logger" + "github.com/docker/docker/pkg/templates" +) + +// DefaultTemplate defines the defaults template logger should use. +const DefaultTemplate = "{{.ID}}" + +// ParseLogTag generates a context aware tag for consistency across different +// log drivers based on the context of the running container. +func ParseLogTag(info logger.Info, defaultTemplate string) (string, error) { + tagTemplate := info.Config["tag"] + if tagTemplate == "" { + tagTemplate = defaultTemplate + } + + tmpl, err := templates.NewParse("log-tag", tagTemplate) + if err != nil { + return "", err + } + buf := new(bytes.Buffer) + if err := tmpl.Execute(buf, &info); err != nil { + return "", err + } + + return buf.String(), nil +} diff --git a/vendor/github.com/moby/moby/daemon/logger/loggerutils/log_tag_test.go b/vendor/github.com/moby/moby/daemon/logger/loggerutils/log_tag_test.go new file mode 100644 index 000000000..1a6d9f151 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/logger/loggerutils/log_tag_test.go @@ -0,0 +1,47 @@ +package loggerutils + +import ( + "testing" + + "github.com/docker/docker/daemon/logger" +) + +func TestParseLogTagDefaultTag(t *testing.T) { + info := buildContext(map[string]string{}) + tag, e := ParseLogTag(info, "{{.ID}}") + assertTag(t, e, tag, info.ID()) +} + +func TestParseLogTag(t *testing.T) { + info := buildContext(map[string]string{"tag": "{{.ImageName}}/{{.Name}}/{{.ID}}"}) + tag, e := ParseLogTag(info, "{{.ID}}") + assertTag(t, e, tag, "test-image/test-container/container-ab") +} + +func TestParseLogTagEmptyTag(t *testing.T) { + info := buildContext(map[string]string{}) + tag, e := ParseLogTag(info, "{{.DaemonName}}/{{.ID}}") + assertTag(t, e, tag, "test-dockerd/container-ab") +} + +// Helpers + +func buildContext(cfg map[string]string) logger.Info { + return logger.Info{ + ContainerID: "container-abcdefghijklmnopqrstuvwxyz01234567890", + ContainerName: "/test-container", + ContainerImageID: "image-abcdefghijklmnopqrstuvwxyz01234567890", + ContainerImageName: "test-image", + Config: cfg, + DaemonName: "test-dockerd", + } +} + +func assertTag(t *testing.T, e error, tag string, expected string) { + if e != nil { + t.Fatalf("Error generating tag: %q", e) + } + if tag != expected { + t.Fatalf("Wrong tag: %q, should be %q", tag, expected) + } +} diff --git a/vendor/github.com/moby/moby/daemon/logger/loggerutils/rotatefilewriter.go b/vendor/github.com/moby/moby/daemon/logger/loggerutils/rotatefilewriter.go new file mode 100644 index 000000000..457a39b5a --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/logger/loggerutils/rotatefilewriter.go @@ -0,0 +1,141 @@ +package loggerutils + +import ( + "errors" + "os" + "strconv" + "sync" + + "github.com/docker/docker/pkg/pubsub" +) + +// RotateFileWriter is Logger implementation for default Docker logging. +type RotateFileWriter struct { + f *os.File // store for closing + closed bool + mu sync.Mutex + capacity int64 //maximum size of each file + currentSize int64 // current size of the latest file + maxFiles int //maximum number of files + notifyRotate *pubsub.Publisher +} + +//NewRotateFileWriter creates new RotateFileWriter +func NewRotateFileWriter(logPath string, capacity int64, maxFiles int) (*RotateFileWriter, error) { + log, err := os.OpenFile(logPath, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0640) + if err != nil { + return nil, err + } + + size, err := log.Seek(0, os.SEEK_END) + if err != nil { + return nil, err + } + + return &RotateFileWriter{ + f: log, + capacity: capacity, + currentSize: size, + maxFiles: maxFiles, + notifyRotate: pubsub.NewPublisher(0, 1), + }, nil +} + +//WriteLog write log message to File +func (w *RotateFileWriter) Write(message []byte) (int, error) { + w.mu.Lock() + if w.closed { + w.mu.Unlock() + return -1, errors.New("cannot write because the output file was closed") + } + if err := w.checkCapacityAndRotate(); err != nil { + w.mu.Unlock() + return -1, err + } + + n, err := w.f.Write(message) + if err == nil { + w.currentSize += int64(n) + } + w.mu.Unlock() + return n, err +} + +func (w *RotateFileWriter) checkCapacityAndRotate() error { + if w.capacity == -1 { + return nil + } + + if w.currentSize >= w.capacity { + name := w.f.Name() + if err := w.f.Close(); err != nil { + return err + } + if err := rotate(name, w.maxFiles); err != nil { + return err + } + file, err := os.OpenFile(name, os.O_WRONLY|os.O_TRUNC|os.O_CREATE, 0640) + if err != nil { + return err + } + w.f = file + w.currentSize = 0 + w.notifyRotate.Publish(struct{}{}) + } + + return nil +} + +func rotate(name string, maxFiles int) error { + if maxFiles < 2 { + return nil + } + for i := maxFiles - 1; i > 1; i-- { + toPath := name + "." + strconv.Itoa(i) + fromPath := name + "." + strconv.Itoa(i-1) + if err := os.Rename(fromPath, toPath); err != nil && !os.IsNotExist(err) { + return err + } + } + + if err := os.Rename(name, name+".1"); err != nil && !os.IsNotExist(err) { + return err + } + return nil +} + +// LogPath returns the location the given writer logs to. +func (w *RotateFileWriter) LogPath() string { + w.mu.Lock() + defer w.mu.Unlock() + return w.f.Name() +} + +// MaxFiles return maximum number of files +func (w *RotateFileWriter) MaxFiles() int { + return w.maxFiles +} + +//NotifyRotate returns the new subscriber +func (w *RotateFileWriter) NotifyRotate() chan interface{} { + return w.notifyRotate.Subscribe() +} + +//NotifyRotateEvict removes the specified subscriber from receiving any more messages. +func (w *RotateFileWriter) NotifyRotateEvict(sub chan interface{}) { + w.notifyRotate.Evict(sub) +} + +// Close closes underlying file and signals all readers to stop. +func (w *RotateFileWriter) Close() error { + w.mu.Lock() + defer w.mu.Unlock() + if w.closed { + return nil + } + if err := w.f.Close(); err != nil { + return err + } + w.closed = true + return nil +} diff --git a/vendor/github.com/moby/moby/daemon/logger/loginfo.go b/vendor/github.com/moby/moby/daemon/logger/loginfo.go new file mode 100644 index 000000000..4c930b905 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/logger/loginfo.go @@ -0,0 +1,129 @@ +package logger + +import ( + "fmt" + "os" + "regexp" + "strings" + "time" +) + +// Info provides enough information for a logging driver to do its function. +type Info struct { + Config map[string]string + ContainerID string + ContainerName string + ContainerEntrypoint string + ContainerArgs []string + ContainerImageID string + ContainerImageName string + ContainerCreated time.Time + ContainerEnv []string + ContainerLabels map[string]string + LogPath string + DaemonName string +} + +// ExtraAttributes returns the user-defined extra attributes (labels, +// environment variables) in key-value format. This can be used by log drivers +// that support metadata to add more context to a log. +func (info *Info) ExtraAttributes(keyMod func(string) string) (map[string]string, error) { + extra := make(map[string]string) + labels, ok := info.Config["labels"] + if ok && len(labels) > 0 { + for _, l := range strings.Split(labels, ",") { + if v, ok := info.ContainerLabels[l]; ok { + if keyMod != nil { + l = keyMod(l) + } + extra[l] = v + } + } + } + + envMapping := make(map[string]string) + for _, e := range info.ContainerEnv { + if kv := strings.SplitN(e, "=", 2); len(kv) == 2 { + envMapping[kv[0]] = kv[1] + } + } + + env, ok := info.Config["env"] + if ok && len(env) > 0 { + for _, l := range strings.Split(env, ",") { + if v, ok := envMapping[l]; ok { + if keyMod != nil { + l = keyMod(l) + } + extra[l] = v + } + } + } + + envRegex, ok := info.Config["env-regex"] + if ok && len(envRegex) > 0 { + re, err := regexp.Compile(envRegex) + if err != nil { + return nil, err + } + for k, v := range envMapping { + if re.MatchString(k) { + if keyMod != nil { + k = keyMod(k) + } + extra[k] = v + } + } + } + + return extra, nil +} + +// Hostname returns the hostname from the underlying OS. +func (info *Info) Hostname() (string, error) { + hostname, err := os.Hostname() + if err != nil { + return "", fmt.Errorf("logger: can not resolve hostname: %v", err) + } + return hostname, nil +} + +// Command returns the command that the container being logged was +// started with. The Entrypoint is prepended to the container +// arguments. +func (info *Info) Command() string { + terms := []string{info.ContainerEntrypoint} + terms = append(terms, info.ContainerArgs...) + command := strings.Join(terms, " ") + return command +} + +// ID Returns the Container ID shortened to 12 characters. +func (info *Info) ID() string { + return info.ContainerID[:12] +} + +// FullID is an alias of ContainerID. +func (info *Info) FullID() string { + return info.ContainerID +} + +// Name returns the ContainerName without a preceding '/'. +func (info *Info) Name() string { + return strings.TrimPrefix(info.ContainerName, "/") +} + +// ImageID returns the ContainerImageID shortened to 12 characters. +func (info *Info) ImageID() string { + return info.ContainerImageID[:12] +} + +// ImageFullID is an alias of ContainerImageID. +func (info *Info) ImageFullID() string { + return info.ContainerImageID +} + +// ImageName is an alias of ContainerImageName +func (info *Info) ImageName() string { + return info.ContainerImageName +} diff --git a/vendor/github.com/moby/moby/daemon/logger/plugin.go b/vendor/github.com/moby/moby/daemon/logger/plugin.go new file mode 100644 index 000000000..bdccea5b2 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/logger/plugin.go @@ -0,0 +1,90 @@ +package logger + +import ( + "fmt" + "io" + "os" + "path/filepath" + "strings" + + "github.com/docker/docker/api/types/plugins/logdriver" + getter "github.com/docker/docker/pkg/plugingetter" + "github.com/docker/docker/pkg/stringid" + "github.com/pkg/errors" +) + +var pluginGetter getter.PluginGetter + +const extName = "LogDriver" + +// logPlugin defines the available functions that logging plugins must implement. +type logPlugin interface { + StartLogging(streamPath string, info Info) (err error) + StopLogging(streamPath string) (err error) + Capabilities() (cap Capability, err error) + ReadLogs(info Info, config ReadConfig) (stream io.ReadCloser, err error) +} + +// RegisterPluginGetter sets the plugingetter +func RegisterPluginGetter(plugingetter getter.PluginGetter) { + pluginGetter = plugingetter +} + +// GetDriver returns a logging driver by its name. +// If the driver is empty, it looks for the local driver. +func getPlugin(name string, mode int) (Creator, error) { + p, err := pluginGetter.Get(name, extName, mode) + if err != nil { + return nil, fmt.Errorf("error looking up logging plugin %s: %v", name, err) + } + + d := &logPluginProxy{p.Client()} + return makePluginCreator(name, d, p.BasePath()), nil +} + +func makePluginCreator(name string, l *logPluginProxy, basePath string) Creator { + return func(logCtx Info) (logger Logger, err error) { + defer func() { + if err != nil { + pluginGetter.Get(name, extName, getter.Release) + } + }() + root := filepath.Join(basePath, "run", "docker", "logging") + if err := os.MkdirAll(root, 0700); err != nil { + return nil, err + } + + id := stringid.GenerateNonCryptoID() + a := &pluginAdapter{ + driverName: name, + id: id, + plugin: l, + basePath: basePath, + fifoPath: filepath.Join(root, id), + logInfo: logCtx, + } + + cap, err := a.plugin.Capabilities() + if err == nil { + a.capabilities = cap + } + + stream, err := openPluginStream(a) + if err != nil { + return nil, err + } + + a.stream = stream + a.enc = logdriver.NewLogEntryEncoder(a.stream) + + if err := l.StartLogging(strings.TrimPrefix(a.fifoPath, basePath), logCtx); err != nil { + return nil, errors.Wrapf(err, "error creating logger") + } + + if cap.ReadLogs { + return &pluginAdapterWithRead{a}, nil + } + + return a, nil + } +} diff --git a/vendor/github.com/moby/moby/daemon/logger/plugin_unix.go b/vendor/github.com/moby/moby/daemon/logger/plugin_unix.go new file mode 100644 index 000000000..f254c9c57 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/logger/plugin_unix.go @@ -0,0 +1,20 @@ +// +build linux solaris freebsd + +package logger + +import ( + "context" + "io" + + "github.com/pkg/errors" + "github.com/tonistiigi/fifo" + "golang.org/x/sys/unix" +) + +func openPluginStream(a *pluginAdapter) (io.WriteCloser, error) { + f, err := fifo.OpenFifo(context.Background(), a.fifoPath, unix.O_WRONLY|unix.O_CREAT|unix.O_NONBLOCK, 0700) + if err != nil { + return nil, errors.Wrapf(err, "error creating i/o pipe for log plugin: %s", a.Name()) + } + return f, nil +} diff --git a/vendor/github.com/moby/moby/daemon/logger/plugin_unsupported.go b/vendor/github.com/moby/moby/daemon/logger/plugin_unsupported.go new file mode 100644 index 000000000..0a2036c83 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/logger/plugin_unsupported.go @@ -0,0 +1,12 @@ +// +build !linux,!solaris,!freebsd + +package logger + +import ( + "errors" + "io" +) + +func openPluginStream(a *pluginAdapter) (io.WriteCloser, error) { + return nil, errors.New("log plugin not supported") +} diff --git a/vendor/github.com/moby/moby/daemon/logger/proxy.go b/vendor/github.com/moby/moby/daemon/logger/proxy.go new file mode 100644 index 000000000..53860eba6 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/logger/proxy.go @@ -0,0 +1,107 @@ +package logger + +import ( + "errors" + "io" +) + +type client interface { + Call(string, interface{}, interface{}) error + Stream(string, interface{}) (io.ReadCloser, error) +} + +type logPluginProxy struct { + client +} + +type logPluginProxyStartLoggingRequest struct { + File string + Info Info +} + +type logPluginProxyStartLoggingResponse struct { + Err string +} + +func (pp *logPluginProxy) StartLogging(file string, info Info) (err error) { + var ( + req logPluginProxyStartLoggingRequest + ret logPluginProxyStartLoggingResponse + ) + + req.File = file + req.Info = info + if err = pp.Call("LogDriver.StartLogging", req, &ret); err != nil { + return + } + + if ret.Err != "" { + err = errors.New(ret.Err) + } + + return +} + +type logPluginProxyStopLoggingRequest struct { + File string +} + +type logPluginProxyStopLoggingResponse struct { + Err string +} + +func (pp *logPluginProxy) StopLogging(file string) (err error) { + var ( + req logPluginProxyStopLoggingRequest + ret logPluginProxyStopLoggingResponse + ) + + req.File = file + if err = pp.Call("LogDriver.StopLogging", req, &ret); err != nil { + return + } + + if ret.Err != "" { + err = errors.New(ret.Err) + } + + return +} + +type logPluginProxyCapabilitiesResponse struct { + Cap Capability + Err string +} + +func (pp *logPluginProxy) Capabilities() (cap Capability, err error) { + var ( + ret logPluginProxyCapabilitiesResponse + ) + + if err = pp.Call("LogDriver.Capabilities", nil, &ret); err != nil { + return + } + + cap = ret.Cap + + if ret.Err != "" { + err = errors.New(ret.Err) + } + + return +} + +type logPluginProxyReadLogsRequest struct { + Info Info + Config ReadConfig +} + +func (pp *logPluginProxy) ReadLogs(info Info, config ReadConfig) (stream io.ReadCloser, err error) { + var ( + req logPluginProxyReadLogsRequest + ) + + req.Info = info + req.Config = config + return pp.Stream("LogDriver.ReadLogs", req) +} diff --git a/vendor/github.com/moby/moby/daemon/logger/ring.go b/vendor/github.com/moby/moby/daemon/logger/ring.go new file mode 100644 index 000000000..5c5595547 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/logger/ring.go @@ -0,0 +1,218 @@ +package logger + +import ( + "errors" + "sync" + "sync/atomic" + + "github.com/Sirupsen/logrus" +) + +const ( + defaultRingMaxSize = 1e6 // 1MB +) + +// RingLogger is a ring buffer that implements the Logger interface. +// This is used when lossy logging is OK. +type RingLogger struct { + buffer *messageRing + l Logger + logInfo Info + closeFlag int32 +} + +type ringWithReader struct { + *RingLogger +} + +func (r *ringWithReader) ReadLogs(cfg ReadConfig) *LogWatcher { + reader, ok := r.l.(LogReader) + if !ok { + // something is wrong if we get here + panic("expected log reader") + } + return reader.ReadLogs(cfg) +} + +func newRingLogger(driver Logger, logInfo Info, maxSize int64) *RingLogger { + l := &RingLogger{ + buffer: newRing(maxSize), + l: driver, + logInfo: logInfo, + } + go l.run() + return l +} + +// NewRingLogger creates a new Logger that is implemented as a RingBuffer wrapping +// the passed in logger. +func NewRingLogger(driver Logger, logInfo Info, maxSize int64) Logger { + if maxSize < 0 { + maxSize = defaultRingMaxSize + } + l := newRingLogger(driver, logInfo, maxSize) + if _, ok := driver.(LogReader); ok { + return &ringWithReader{l} + } + return l +} + +// Log queues messages into the ring buffer +func (r *RingLogger) Log(msg *Message) error { + if r.closed() { + return errClosed + } + return r.buffer.Enqueue(msg) +} + +// Name returns the name of the underlying logger +func (r *RingLogger) Name() string { + return r.l.Name() +} + +func (r *RingLogger) closed() bool { + return atomic.LoadInt32(&r.closeFlag) == 1 +} + +func (r *RingLogger) setClosed() { + atomic.StoreInt32(&r.closeFlag, 1) +} + +// Close closes the logger +func (r *RingLogger) Close() error { + r.setClosed() + r.buffer.Close() + // empty out the queue + var logErr bool + for _, msg := range r.buffer.Drain() { + if logErr { + // some error logging a previous message, so re-insert to message pool + // and assume log driver is hosed + PutMessage(msg) + continue + } + + if err := r.l.Log(msg); err != nil { + logrus.WithField("driver", r.l.Name()).WithField("container", r.logInfo.ContainerID).Errorf("Error writing log message: %v", r.l) + logErr = true + } + } + return r.l.Close() +} + +// run consumes messages from the ring buffer and forwards them to the underling +// logger. +// This is run in a goroutine when the RingLogger is created +func (r *RingLogger) run() { + for { + if r.closed() { + return + } + msg, err := r.buffer.Dequeue() + if err != nil { + // buffer is closed + return + } + if err := r.l.Log(msg); err != nil { + logrus.WithField("driver", r.l.Name()).WithField("container", r.logInfo.ContainerID).Errorf("Error writing log message: %v", r.l) + } + } +} + +type messageRing struct { + mu sync.Mutex + // signals callers of `Dequeue` to wake up either on `Close` or when a new `Message` is added + wait *sync.Cond + + sizeBytes int64 // current buffer size + maxBytes int64 // max buffer size size + queue []*Message + closed bool +} + +func newRing(maxBytes int64) *messageRing { + queueSize := 1000 + if maxBytes == 0 || maxBytes == 1 { + // With 0 or 1 max byte size, the maximum size of the queue would only ever be 1 + // message long. + queueSize = 1 + } + + r := &messageRing{queue: make([]*Message, 0, queueSize), maxBytes: maxBytes} + r.wait = sync.NewCond(&r.mu) + return r +} + +// Enqueue adds a message to the buffer queue +// If the message is too big for the buffer it drops the oldest messages to make room +// If there are no messages in the queue and the message is still too big, it adds the message anyway. +func (r *messageRing) Enqueue(m *Message) error { + mSize := int64(len(m.Line)) + + r.mu.Lock() + if r.closed { + r.mu.Unlock() + return errClosed + } + if mSize+r.sizeBytes > r.maxBytes && len(r.queue) > 0 { + r.wait.Signal() + r.mu.Unlock() + return nil + } + + r.queue = append(r.queue, m) + r.sizeBytes += mSize + r.wait.Signal() + r.mu.Unlock() + return nil +} + +// Dequeue pulls a message off the queue +// If there are no messages, it waits for one. +// If the buffer is closed, it will return immediately. +func (r *messageRing) Dequeue() (*Message, error) { + r.mu.Lock() + for len(r.queue) == 0 && !r.closed { + r.wait.Wait() + } + + if r.closed { + r.mu.Unlock() + return nil, errClosed + } + + msg := r.queue[0] + r.queue = r.queue[1:] + r.sizeBytes -= int64(len(msg.Line)) + r.mu.Unlock() + return msg, nil +} + +var errClosed = errors.New("closed") + +// Close closes the buffer ensuring no new messages can be added. +// Any callers waiting to dequeue a message will be woken up. +func (r *messageRing) Close() { + r.mu.Lock() + if r.closed { + r.mu.Unlock() + return + } + + r.closed = true + r.wait.Broadcast() + r.mu.Unlock() + return +} + +// Drain drains all messages from the queue. +// This can be used after `Close()` to get any remaining messages that were in queue. +func (r *messageRing) Drain() []*Message { + r.mu.Lock() + ls := make([]*Message, 0, len(r.queue)) + ls = append(ls, r.queue...) + r.sizeBytes = 0 + r.queue = r.queue[:0] + r.mu.Unlock() + return ls +} diff --git a/vendor/github.com/moby/moby/daemon/logger/ring_test.go b/vendor/github.com/moby/moby/daemon/logger/ring_test.go new file mode 100644 index 000000000..9afbb44d2 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/logger/ring_test.go @@ -0,0 +1,299 @@ +package logger + +import ( + "context" + "strconv" + "testing" + "time" +) + +type mockLogger struct{ c chan *Message } + +func (l *mockLogger) Log(msg *Message) error { + l.c <- msg + return nil +} + +func (l *mockLogger) Name() string { + return "mock" +} + +func (l *mockLogger) Close() error { + return nil +} + +func TestRingLogger(t *testing.T) { + mockLog := &mockLogger{make(chan *Message)} // no buffer on this channel + ring := newRingLogger(mockLog, Info{}, 1) + defer ring.setClosed() + + // this should never block + ring.Log(&Message{Line: []byte("1")}) + ring.Log(&Message{Line: []byte("2")}) + ring.Log(&Message{Line: []byte("3")}) + + select { + case msg := <-mockLog.c: + if string(msg.Line) != "1" { + t.Fatalf("got unexpected msg: %q", string(msg.Line)) + } + case <-time.After(100 * time.Millisecond): + t.Fatal("timeout reading log message") + } + + select { + case msg := <-mockLog.c: + t.Fatalf("expected no more messages in the queue, got: %q", string(msg.Line)) + default: + } +} + +func TestRingCap(t *testing.T) { + r := newRing(5) + for i := 0; i < 10; i++ { + // queue messages with "0" to "10" + // the "5" to "10" messages should be dropped since we only allow 5 bytes in the buffer + if err := r.Enqueue(&Message{Line: []byte(strconv.Itoa(i))}); err != nil { + t.Fatal(err) + } + } + + // should have messages in the queue for "5" to "10" + for i := 0; i < 5; i++ { + m, err := r.Dequeue() + if err != nil { + t.Fatal(err) + } + if string(m.Line) != strconv.Itoa(i) { + t.Fatalf("got unexpected message for iter %d: %s", i, string(m.Line)) + } + } + + // queue a message that's bigger than the buffer cap + if err := r.Enqueue(&Message{Line: []byte("hello world")}); err != nil { + t.Fatal(err) + } + + // queue another message that's bigger than the buffer cap + if err := r.Enqueue(&Message{Line: []byte("eat a banana")}); err != nil { + t.Fatal(err) + } + + m, err := r.Dequeue() + if err != nil { + t.Fatal(err) + } + if string(m.Line) != "hello world" { + t.Fatalf("got unexpected message: %s", string(m.Line)) + } + if len(r.queue) != 0 { + t.Fatalf("expected queue to be empty, got: %d", len(r.queue)) + } +} + +func TestRingClose(t *testing.T) { + r := newRing(1) + if err := r.Enqueue(&Message{Line: []byte("hello")}); err != nil { + t.Fatal(err) + } + r.Close() + if err := r.Enqueue(&Message{}); err != errClosed { + t.Fatalf("expected errClosed, got: %v", err) + } + if len(r.queue) != 1 { + t.Fatal("expected empty queue") + } + if m, err := r.Dequeue(); err == nil || m != nil { + t.Fatal("expected err on Dequeue after close") + } + + ls := r.Drain() + if len(ls) != 1 { + t.Fatalf("expected one message: %v", ls) + } + if string(ls[0].Line) != "hello" { + t.Fatalf("got unexpected message: %s", string(ls[0].Line)) + } +} + +func TestRingDrain(t *testing.T) { + r := newRing(5) + for i := 0; i < 5; i++ { + if err := r.Enqueue(&Message{Line: []byte(strconv.Itoa(i))}); err != nil { + t.Fatal(err) + } + } + + ls := r.Drain() + if len(ls) != 5 { + t.Fatal("got unexpected length after drain") + } + + for i := 0; i < 5; i++ { + if string(ls[i].Line) != strconv.Itoa(i) { + t.Fatalf("got unexpected message at position %d: %s", i, string(ls[i].Line)) + } + } + if r.sizeBytes != 0 { + t.Fatalf("expected buffer size to be 0 after drain, got: %d", r.sizeBytes) + } + + ls = r.Drain() + if len(ls) != 0 { + t.Fatalf("expected 0 messages on 2nd drain: %v", ls) + } + +} + +type nopLogger struct{} + +func (nopLogger) Name() string { return "nopLogger" } +func (nopLogger) Close() error { return nil } +func (nopLogger) Log(*Message) error { return nil } + +func BenchmarkRingLoggerThroughputNoReceiver(b *testing.B) { + mockLog := &mockLogger{make(chan *Message)} + defer mockLog.Close() + l := NewRingLogger(mockLog, Info{}, -1) + msg := &Message{Line: []byte("hello humans and everyone else!")} + b.SetBytes(int64(len(msg.Line))) + + for i := 0; i < b.N; i++ { + if err := l.Log(msg); err != nil { + b.Fatal(err) + } + } +} + +func BenchmarkRingLoggerThroughputWithReceiverDelay0(b *testing.B) { + l := NewRingLogger(nopLogger{}, Info{}, -1) + msg := &Message{Line: []byte("hello humans and everyone else!")} + b.SetBytes(int64(len(msg.Line))) + + for i := 0; i < b.N; i++ { + if err := l.Log(msg); err != nil { + b.Fatal(err) + } + } +} + +func consumeWithDelay(delay time.Duration, c <-chan *Message) (cancel func()) { + started := make(chan struct{}) + ctx, cancel := context.WithCancel(context.Background()) + go func() { + close(started) + ticker := time.NewTicker(delay) + for range ticker.C { + select { + case <-ctx.Done(): + ticker.Stop() + return + case <-c: + } + } + }() + <-started + return cancel +} + +func BenchmarkRingLoggerThroughputConsumeDelay1(b *testing.B) { + mockLog := &mockLogger{make(chan *Message)} + defer mockLog.Close() + l := NewRingLogger(mockLog, Info{}, -1) + msg := &Message{Line: []byte("hello humans and everyone else!")} + b.SetBytes(int64(len(msg.Line))) + + cancel := consumeWithDelay(1*time.Millisecond, mockLog.c) + defer cancel() + + for i := 0; i < b.N; i++ { + if err := l.Log(msg); err != nil { + b.Fatal(err) + } + } +} + +func BenchmarkRingLoggerThroughputConsumeDelay10(b *testing.B) { + mockLog := &mockLogger{make(chan *Message)} + defer mockLog.Close() + l := NewRingLogger(mockLog, Info{}, -1) + msg := &Message{Line: []byte("hello humans and everyone else!")} + b.SetBytes(int64(len(msg.Line))) + + cancel := consumeWithDelay(10*time.Millisecond, mockLog.c) + defer cancel() + + for i := 0; i < b.N; i++ { + if err := l.Log(msg); err != nil { + b.Fatal(err) + } + } +} + +func BenchmarkRingLoggerThroughputConsumeDelay50(b *testing.B) { + mockLog := &mockLogger{make(chan *Message)} + defer mockLog.Close() + l := NewRingLogger(mockLog, Info{}, -1) + msg := &Message{Line: []byte("hello humans and everyone else!")} + b.SetBytes(int64(len(msg.Line))) + + cancel := consumeWithDelay(50*time.Millisecond, mockLog.c) + defer cancel() + + for i := 0; i < b.N; i++ { + if err := l.Log(msg); err != nil { + b.Fatal(err) + } + } +} + +func BenchmarkRingLoggerThroughputConsumeDelay100(b *testing.B) { + mockLog := &mockLogger{make(chan *Message)} + defer mockLog.Close() + l := NewRingLogger(mockLog, Info{}, -1) + msg := &Message{Line: []byte("hello humans and everyone else!")} + b.SetBytes(int64(len(msg.Line))) + + cancel := consumeWithDelay(100*time.Millisecond, mockLog.c) + defer cancel() + + for i := 0; i < b.N; i++ { + if err := l.Log(msg); err != nil { + b.Fatal(err) + } + } +} + +func BenchmarkRingLoggerThroughputConsumeDelay300(b *testing.B) { + mockLog := &mockLogger{make(chan *Message)} + defer mockLog.Close() + l := NewRingLogger(mockLog, Info{}, -1) + msg := &Message{Line: []byte("hello humans and everyone else!")} + b.SetBytes(int64(len(msg.Line))) + + cancel := consumeWithDelay(300*time.Millisecond, mockLog.c) + defer cancel() + + for i := 0; i < b.N; i++ { + if err := l.Log(msg); err != nil { + b.Fatal(err) + } + } +} + +func BenchmarkRingLoggerThroughputConsumeDelay500(b *testing.B) { + mockLog := &mockLogger{make(chan *Message)} + defer mockLog.Close() + l := NewRingLogger(mockLog, Info{}, -1) + msg := &Message{Line: []byte("hello humans and everyone else!")} + b.SetBytes(int64(len(msg.Line))) + + cancel := consumeWithDelay(500*time.Millisecond, mockLog.c) + defer cancel() + + for i := 0; i < b.N; i++ { + if err := l.Log(msg); err != nil { + b.Fatal(err) + } + } +} diff --git a/vendor/github.com/moby/moby/daemon/logger/splunk/splunk.go b/vendor/github.com/moby/moby/daemon/logger/splunk/splunk.go new file mode 100644 index 000000000..233a2db96 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/logger/splunk/splunk.go @@ -0,0 +1,626 @@ +// Package splunk provides the log driver for forwarding server logs to +// Splunk HTTP Event Collector endpoint. +package splunk + +import ( + "bytes" + "compress/gzip" + "crypto/tls" + "crypto/x509" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "os" + "strconv" + "sync" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/daemon/logger" + "github.com/docker/docker/daemon/logger/loggerutils" + "github.com/docker/docker/pkg/urlutil" +) + +const ( + driverName = "splunk" + splunkURLKey = "splunk-url" + splunkTokenKey = "splunk-token" + splunkSourceKey = "splunk-source" + splunkSourceTypeKey = "splunk-sourcetype" + splunkIndexKey = "splunk-index" + splunkCAPathKey = "splunk-capath" + splunkCANameKey = "splunk-caname" + splunkInsecureSkipVerifyKey = "splunk-insecureskipverify" + splunkFormatKey = "splunk-format" + splunkVerifyConnectionKey = "splunk-verify-connection" + splunkGzipCompressionKey = "splunk-gzip" + splunkGzipCompressionLevelKey = "splunk-gzip-level" + envKey = "env" + envRegexKey = "env-regex" + labelsKey = "labels" + tagKey = "tag" +) + +const ( + // How often do we send messages (if we are not reaching batch size) + defaultPostMessagesFrequency = 5 * time.Second + // How big can be batch of messages + defaultPostMessagesBatchSize = 1000 + // Maximum number of messages we can store in buffer + defaultBufferMaximum = 10 * defaultPostMessagesBatchSize + // Number of messages allowed to be queued in the channel + defaultStreamChannelSize = 4 * defaultPostMessagesBatchSize +) + +const ( + envVarPostMessagesFrequency = "SPLUNK_LOGGING_DRIVER_POST_MESSAGES_FREQUENCY" + envVarPostMessagesBatchSize = "SPLUNK_LOGGING_DRIVER_POST_MESSAGES_BATCH_SIZE" + envVarBufferMaximum = "SPLUNK_LOGGING_DRIVER_BUFFER_MAX" + envVarStreamChannelSize = "SPLUNK_LOGGING_DRIVER_CHANNEL_SIZE" +) + +type splunkLoggerInterface interface { + logger.Logger + worker() +} + +type splunkLogger struct { + client *http.Client + transport *http.Transport + + url string + auth string + nullMessage *splunkMessage + + // http compression + gzipCompression bool + gzipCompressionLevel int + + // Advanced options + postMessagesFrequency time.Duration + postMessagesBatchSize int + bufferMaximum int + + // For synchronization between background worker and logger. + // We use channel to send messages to worker go routine. + // All other variables for blocking Close call before we flush all messages to HEC + stream chan *splunkMessage + lock sync.RWMutex + closed bool + closedCond *sync.Cond +} + +type splunkLoggerInline struct { + *splunkLogger + + nullEvent *splunkMessageEvent +} + +type splunkLoggerJSON struct { + *splunkLoggerInline +} + +type splunkLoggerRaw struct { + *splunkLogger + + prefix []byte +} + +type splunkMessage struct { + Event interface{} `json:"event"` + Time string `json:"time"` + Host string `json:"host"` + Source string `json:"source,omitempty"` + SourceType string `json:"sourcetype,omitempty"` + Index string `json:"index,omitempty"` +} + +type splunkMessageEvent struct { + Line interface{} `json:"line"` + Source string `json:"source"` + Tag string `json:"tag,omitempty"` + Attrs map[string]string `json:"attrs,omitempty"` +} + +const ( + splunkFormatRaw = "raw" + splunkFormatJSON = "json" + splunkFormatInline = "inline" +) + +func init() { + if err := logger.RegisterLogDriver(driverName, New); err != nil { + logrus.Fatal(err) + } + if err := logger.RegisterLogOptValidator(driverName, ValidateLogOpt); err != nil { + logrus.Fatal(err) + } +} + +// New creates splunk logger driver using configuration passed in context +func New(info logger.Info) (logger.Logger, error) { + hostname, err := info.Hostname() + if err != nil { + return nil, fmt.Errorf("%s: cannot access hostname to set source field", driverName) + } + + // Parse and validate Splunk URL + splunkURL, err := parseURL(info) + if err != nil { + return nil, err + } + + // Splunk Token is required parameter + splunkToken, ok := info.Config[splunkTokenKey] + if !ok { + return nil, fmt.Errorf("%s: %s is expected", driverName, splunkTokenKey) + } + + tlsConfig := &tls.Config{} + + // Splunk is using autogenerated certificates by default, + // allow users to trust them with skipping verification + if insecureSkipVerifyStr, ok := info.Config[splunkInsecureSkipVerifyKey]; ok { + insecureSkipVerify, err := strconv.ParseBool(insecureSkipVerifyStr) + if err != nil { + return nil, err + } + tlsConfig.InsecureSkipVerify = insecureSkipVerify + } + + // If path to the root certificate is provided - load it + if caPath, ok := info.Config[splunkCAPathKey]; ok { + caCert, err := ioutil.ReadFile(caPath) + if err != nil { + return nil, err + } + caPool := x509.NewCertPool() + caPool.AppendCertsFromPEM(caCert) + tlsConfig.RootCAs = caPool + } + + if caName, ok := info.Config[splunkCANameKey]; ok { + tlsConfig.ServerName = caName + } + + gzipCompression := false + if gzipCompressionStr, ok := info.Config[splunkGzipCompressionKey]; ok { + gzipCompression, err = strconv.ParseBool(gzipCompressionStr) + if err != nil { + return nil, err + } + } + + gzipCompressionLevel := gzip.DefaultCompression + if gzipCompressionLevelStr, ok := info.Config[splunkGzipCompressionLevelKey]; ok { + var err error + gzipCompressionLevel64, err := strconv.ParseInt(gzipCompressionLevelStr, 10, 32) + if err != nil { + return nil, err + } + gzipCompressionLevel = int(gzipCompressionLevel64) + if gzipCompressionLevel < gzip.DefaultCompression || gzipCompressionLevel > gzip.BestCompression { + err := fmt.Errorf("Not supported level '%s' for %s (supported values between %d and %d).", + gzipCompressionLevelStr, splunkGzipCompressionLevelKey, gzip.DefaultCompression, gzip.BestCompression) + return nil, err + } + } + + transport := &http.Transport{ + TLSClientConfig: tlsConfig, + } + client := &http.Client{ + Transport: transport, + } + + source := info.Config[splunkSourceKey] + sourceType := info.Config[splunkSourceTypeKey] + index := info.Config[splunkIndexKey] + + var nullMessage = &splunkMessage{ + Host: hostname, + Source: source, + SourceType: sourceType, + Index: index, + } + + // Allow user to remove tag from the messages by setting tag to empty string + tag := "" + if tagTemplate, ok := info.Config[tagKey]; !ok || tagTemplate != "" { + tag, err = loggerutils.ParseLogTag(info, loggerutils.DefaultTemplate) + if err != nil { + return nil, err + } + } + + attrs, err := info.ExtraAttributes(nil) + if err != nil { + return nil, err + } + + var ( + postMessagesFrequency = getAdvancedOptionDuration(envVarPostMessagesFrequency, defaultPostMessagesFrequency) + postMessagesBatchSize = getAdvancedOptionInt(envVarPostMessagesBatchSize, defaultPostMessagesBatchSize) + bufferMaximum = getAdvancedOptionInt(envVarBufferMaximum, defaultBufferMaximum) + streamChannelSize = getAdvancedOptionInt(envVarStreamChannelSize, defaultStreamChannelSize) + ) + + logger := &splunkLogger{ + client: client, + transport: transport, + url: splunkURL.String(), + auth: "Splunk " + splunkToken, + nullMessage: nullMessage, + gzipCompression: gzipCompression, + gzipCompressionLevel: gzipCompressionLevel, + stream: make(chan *splunkMessage, streamChannelSize), + postMessagesFrequency: postMessagesFrequency, + postMessagesBatchSize: postMessagesBatchSize, + bufferMaximum: bufferMaximum, + } + + // By default we verify connection, but we allow use to skip that + verifyConnection := true + if verifyConnectionStr, ok := info.Config[splunkVerifyConnectionKey]; ok { + var err error + verifyConnection, err = strconv.ParseBool(verifyConnectionStr) + if err != nil { + return nil, err + } + } + if verifyConnection { + err = verifySplunkConnection(logger) + if err != nil { + return nil, err + } + } + + var splunkFormat string + if splunkFormatParsed, ok := info.Config[splunkFormatKey]; ok { + switch splunkFormatParsed { + case splunkFormatInline: + case splunkFormatJSON: + case splunkFormatRaw: + default: + return nil, fmt.Errorf("Unknown format specified %s, supported formats are inline, json and raw", splunkFormat) + } + splunkFormat = splunkFormatParsed + } else { + splunkFormat = splunkFormatInline + } + + var loggerWrapper splunkLoggerInterface + + switch splunkFormat { + case splunkFormatInline: + nullEvent := &splunkMessageEvent{ + Tag: tag, + Attrs: attrs, + } + + loggerWrapper = &splunkLoggerInline{logger, nullEvent} + case splunkFormatJSON: + nullEvent := &splunkMessageEvent{ + Tag: tag, + Attrs: attrs, + } + + loggerWrapper = &splunkLoggerJSON{&splunkLoggerInline{logger, nullEvent}} + case splunkFormatRaw: + var prefix bytes.Buffer + if tag != "" { + prefix.WriteString(tag) + prefix.WriteString(" ") + } + for key, value := range attrs { + prefix.WriteString(key) + prefix.WriteString("=") + prefix.WriteString(value) + prefix.WriteString(" ") + } + + loggerWrapper = &splunkLoggerRaw{logger, prefix.Bytes()} + default: + return nil, fmt.Errorf("Unexpected format %s", splunkFormat) + } + + go loggerWrapper.worker() + + return loggerWrapper, nil +} + +func (l *splunkLoggerInline) Log(msg *logger.Message) error { + message := l.createSplunkMessage(msg) + + event := *l.nullEvent + event.Line = string(msg.Line) + event.Source = msg.Source + + message.Event = &event + logger.PutMessage(msg) + return l.queueMessageAsync(message) +} + +func (l *splunkLoggerJSON) Log(msg *logger.Message) error { + message := l.createSplunkMessage(msg) + event := *l.nullEvent + + var rawJSONMessage json.RawMessage + if err := json.Unmarshal(msg.Line, &rawJSONMessage); err == nil { + event.Line = &rawJSONMessage + } else { + event.Line = string(msg.Line) + } + + event.Source = msg.Source + + message.Event = &event + logger.PutMessage(msg) + return l.queueMessageAsync(message) +} + +func (l *splunkLoggerRaw) Log(msg *logger.Message) error { + message := l.createSplunkMessage(msg) + + message.Event = string(append(l.prefix, msg.Line...)) + logger.PutMessage(msg) + return l.queueMessageAsync(message) +} + +func (l *splunkLogger) queueMessageAsync(message *splunkMessage) error { + l.lock.RLock() + defer l.lock.RUnlock() + if l.closedCond != nil { + return fmt.Errorf("%s: driver is closed", driverName) + } + l.stream <- message + return nil +} + +func (l *splunkLogger) worker() { + timer := time.NewTicker(l.postMessagesFrequency) + var messages []*splunkMessage + for { + select { + case message, open := <-l.stream: + if !open { + l.postMessages(messages, true) + l.lock.Lock() + defer l.lock.Unlock() + l.transport.CloseIdleConnections() + l.closed = true + l.closedCond.Signal() + return + } + messages = append(messages, message) + // Only sending when we get exactly to the batch size, + // This also helps not to fire postMessages on every new message, + // when previous try failed. + if len(messages)%l.postMessagesBatchSize == 0 { + messages = l.postMessages(messages, false) + } + case <-timer.C: + messages = l.postMessages(messages, false) + } + } +} + +func (l *splunkLogger) postMessages(messages []*splunkMessage, lastChance bool) []*splunkMessage { + messagesLen := len(messages) + for i := 0; i < messagesLen; i += l.postMessagesBatchSize { + upperBound := i + l.postMessagesBatchSize + if upperBound > messagesLen { + upperBound = messagesLen + } + if err := l.tryPostMessages(messages[i:upperBound]); err != nil { + logrus.Error(err) + if messagesLen-i >= l.bufferMaximum || lastChance { + // If this is last chance - print them all to the daemon log + if lastChance { + upperBound = messagesLen + } + // Not all sent, but buffer has got to its maximum, let's log all messages + // we could not send and return buffer minus one batch size + for j := i; j < upperBound; j++ { + if jsonEvent, err := json.Marshal(messages[j]); err != nil { + logrus.Error(err) + } else { + logrus.Error(fmt.Errorf("Failed to send a message '%s'", string(jsonEvent))) + } + } + return messages[upperBound:messagesLen] + } + // Not all sent, returning buffer from where we have not sent messages + return messages[i:messagesLen] + } + } + // All sent, return empty buffer + return messages[:0] +} + +func (l *splunkLogger) tryPostMessages(messages []*splunkMessage) error { + if len(messages) == 0 { + return nil + } + var buffer bytes.Buffer + var writer io.Writer + var gzipWriter *gzip.Writer + var err error + // If gzip compression is enabled - create gzip writer with specified compression + // level. If gzip compression is disabled, use standard buffer as a writer + if l.gzipCompression { + gzipWriter, err = gzip.NewWriterLevel(&buffer, l.gzipCompressionLevel) + if err != nil { + return err + } + writer = gzipWriter + } else { + writer = &buffer + } + for _, message := range messages { + jsonEvent, err := json.Marshal(message) + if err != nil { + return err + } + if _, err := writer.Write(jsonEvent); err != nil { + return err + } + } + // If gzip compression is enabled, tell it, that we are done + if l.gzipCompression { + err = gzipWriter.Close() + if err != nil { + return err + } + } + req, err := http.NewRequest("POST", l.url, bytes.NewBuffer(buffer.Bytes())) + if err != nil { + return err + } + req.Header.Set("Authorization", l.auth) + // Tell if we are sending gzip compressed body + if l.gzipCompression { + req.Header.Set("Content-Encoding", "gzip") + } + res, err := l.client.Do(req) + if err != nil { + return err + } + defer res.Body.Close() + if res.StatusCode != http.StatusOK { + var body []byte + body, err = ioutil.ReadAll(res.Body) + if err != nil { + return err + } + return fmt.Errorf("%s: failed to send event - %s - %s", driverName, res.Status, body) + } + io.Copy(ioutil.Discard, res.Body) + return nil +} + +func (l *splunkLogger) Close() error { + l.lock.Lock() + defer l.lock.Unlock() + if l.closedCond == nil { + l.closedCond = sync.NewCond(&l.lock) + close(l.stream) + for !l.closed { + l.closedCond.Wait() + } + } + return nil +} + +func (l *splunkLogger) Name() string { + return driverName +} + +func (l *splunkLogger) createSplunkMessage(msg *logger.Message) *splunkMessage { + message := *l.nullMessage + message.Time = fmt.Sprintf("%f", float64(msg.Timestamp.UnixNano())/float64(time.Second)) + return &message +} + +// ValidateLogOpt looks for all supported by splunk driver options +func ValidateLogOpt(cfg map[string]string) error { + for key := range cfg { + switch key { + case splunkURLKey: + case splunkTokenKey: + case splunkSourceKey: + case splunkSourceTypeKey: + case splunkIndexKey: + case splunkCAPathKey: + case splunkCANameKey: + case splunkInsecureSkipVerifyKey: + case splunkFormatKey: + case splunkVerifyConnectionKey: + case splunkGzipCompressionKey: + case splunkGzipCompressionLevelKey: + case envKey: + case envRegexKey: + case labelsKey: + case tagKey: + default: + return fmt.Errorf("unknown log opt '%s' for %s log driver", key, driverName) + } + } + return nil +} + +func parseURL(info logger.Info) (*url.URL, error) { + splunkURLStr, ok := info.Config[splunkURLKey] + if !ok { + return nil, fmt.Errorf("%s: %s is expected", driverName, splunkURLKey) + } + + splunkURL, err := url.Parse(splunkURLStr) + if err != nil { + return nil, fmt.Errorf("%s: failed to parse %s as url value in %s", driverName, splunkURLStr, splunkURLKey) + } + + if !urlutil.IsURL(splunkURLStr) || + !splunkURL.IsAbs() || + (splunkURL.Path != "" && splunkURL.Path != "/") || + splunkURL.RawQuery != "" || + splunkURL.Fragment != "" { + return nil, fmt.Errorf("%s: expected format scheme://dns_name_or_ip:port for %s", driverName, splunkURLKey) + } + + splunkURL.Path = "/services/collector/event/1.0" + + return splunkURL, nil +} + +func verifySplunkConnection(l *splunkLogger) error { + req, err := http.NewRequest(http.MethodOptions, l.url, nil) + if err != nil { + return err + } + res, err := l.client.Do(req) + if err != nil { + return err + } + if res.Body != nil { + defer res.Body.Close() + } + if res.StatusCode != http.StatusOK { + var body []byte + body, err = ioutil.ReadAll(res.Body) + if err != nil { + return err + } + return fmt.Errorf("%s: failed to verify connection - %s - %s", driverName, res.Status, body) + } + return nil +} + +func getAdvancedOptionDuration(envName string, defaultValue time.Duration) time.Duration { + valueStr := os.Getenv(envName) + if valueStr == "" { + return defaultValue + } + parsedValue, err := time.ParseDuration(valueStr) + if err != nil { + logrus.Error(fmt.Sprintf("Failed to parse value of %s as duration. Using default %v. %v", envName, defaultValue, err)) + return defaultValue + } + return parsedValue +} + +func getAdvancedOptionInt(envName string, defaultValue int) int { + valueStr := os.Getenv(envName) + if valueStr == "" { + return defaultValue + } + parsedValue, err := strconv.ParseInt(valueStr, 10, 32) + if err != nil { + logrus.Error(fmt.Sprintf("Failed to parse value of %s as integer. Using default %d. %v", envName, defaultValue, err)) + return defaultValue + } + return int(parsedValue) +} diff --git a/vendor/github.com/moby/moby/daemon/logger/splunk/splunk_test.go b/vendor/github.com/moby/moby/daemon/logger/splunk/splunk_test.go new file mode 100644 index 000000000..cbe9a55bf --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/logger/splunk/splunk_test.go @@ -0,0 +1,1306 @@ +package splunk + +import ( + "compress/gzip" + "fmt" + "os" + "testing" + "time" + + "github.com/docker/docker/daemon/logger" +) + +// Validate options +func TestValidateLogOpt(t *testing.T) { + err := ValidateLogOpt(map[string]string{ + splunkURLKey: "http://127.0.0.1", + splunkTokenKey: "2160C7EF-2CE9-4307-A180-F852B99CF417", + splunkSourceKey: "mysource", + splunkSourceTypeKey: "mysourcetype", + splunkIndexKey: "myindex", + splunkCAPathKey: "/usr/cert.pem", + splunkCANameKey: "ca_name", + splunkInsecureSkipVerifyKey: "true", + splunkFormatKey: "json", + splunkVerifyConnectionKey: "true", + splunkGzipCompressionKey: "true", + splunkGzipCompressionLevelKey: "1", + envKey: "a", + envRegexKey: "^foo", + labelsKey: "b", + tagKey: "c", + }) + if err != nil { + t.Fatal(err) + } + + err = ValidateLogOpt(map[string]string{ + "not-supported-option": "a", + }) + if err == nil { + t.Fatal("Expecting error on unsupported options") + } +} + +// Driver require user to specify required options +func TestNewMissedConfig(t *testing.T) { + info := logger.Info{ + Config: map[string]string{}, + } + _, err := New(info) + if err == nil { + t.Fatal("Logger driver should fail when no required parameters specified") + } +} + +// Driver require user to specify splunk-url +func TestNewMissedUrl(t *testing.T) { + info := logger.Info{ + Config: map[string]string{ + splunkTokenKey: "4642492F-D8BD-47F1-A005-0C08AE4657DF", + }, + } + _, err := New(info) + if err.Error() != "splunk: splunk-url is expected" { + t.Fatal("Logger driver should fail when no required parameters specified") + } +} + +// Driver require user to specify splunk-token +func TestNewMissedToken(t *testing.T) { + info := logger.Info{ + Config: map[string]string{ + splunkURLKey: "http://127.0.0.1:8088", + }, + } + _, err := New(info) + if err.Error() != "splunk: splunk-token is expected" { + t.Fatal("Logger driver should fail when no required parameters specified") + } +} + +// Test default settings +func TestDefault(t *testing.T) { + hec := NewHTTPEventCollectorMock(t) + + go hec.Serve() + + info := logger.Info{ + Config: map[string]string{ + splunkURLKey: hec.URL(), + splunkTokenKey: hec.token, + }, + ContainerID: "containeriid", + ContainerName: "container_name", + ContainerImageID: "contaimageid", + ContainerImageName: "container_image_name", + } + + hostname, err := info.Hostname() + if err != nil { + t.Fatal(err) + } + + loggerDriver, err := New(info) + if err != nil { + t.Fatal(err) + } + + if loggerDriver.Name() != driverName { + t.Fatal("Unexpected logger driver name") + } + + if !hec.connectionVerified { + t.Fatal("By default connection should be verified") + } + + splunkLoggerDriver, ok := loggerDriver.(*splunkLoggerInline) + if !ok { + t.Fatal("Unexpected Splunk Logging Driver type") + } + + if splunkLoggerDriver.url != hec.URL()+"/services/collector/event/1.0" || + splunkLoggerDriver.auth != "Splunk "+hec.token || + splunkLoggerDriver.nullMessage.Host != hostname || + splunkLoggerDriver.nullMessage.Source != "" || + splunkLoggerDriver.nullMessage.SourceType != "" || + splunkLoggerDriver.nullMessage.Index != "" || + splunkLoggerDriver.gzipCompression != false || + splunkLoggerDriver.postMessagesFrequency != defaultPostMessagesFrequency || + splunkLoggerDriver.postMessagesBatchSize != defaultPostMessagesBatchSize || + splunkLoggerDriver.bufferMaximum != defaultBufferMaximum || + cap(splunkLoggerDriver.stream) != defaultStreamChannelSize { + t.Fatal("Found not default values setup in Splunk Logging Driver.") + } + + message1Time := time.Now() + if err := loggerDriver.Log(&logger.Message{Line: []byte("{\"a\":\"b\"}"), Source: "stdout", Timestamp: message1Time}); err != nil { + t.Fatal(err) + } + message2Time := time.Now() + if err := loggerDriver.Log(&logger.Message{Line: []byte("notajson"), Source: "stdout", Timestamp: message2Time}); err != nil { + t.Fatal(err) + } + + err = loggerDriver.Close() + if err != nil { + t.Fatal(err) + } + + if len(hec.messages) != 2 { + t.Fatal("Expected two messages") + } + + if *hec.gzipEnabled { + t.Fatal("Gzip should not be used") + } + + message1 := hec.messages[0] + if message1.Time != fmt.Sprintf("%f", float64(message1Time.UnixNano())/float64(time.Second)) || + message1.Host != hostname || + message1.Source != "" || + message1.SourceType != "" || + message1.Index != "" { + t.Fatalf("Unexpected values of message 1 %v", message1) + } + + if event, err := message1.EventAsMap(); err != nil { + t.Fatal(err) + } else { + if event["line"] != "{\"a\":\"b\"}" || + event["source"] != "stdout" || + event["tag"] != "containeriid" || + len(event) != 3 { + t.Fatalf("Unexpected event in message %v", event) + } + } + + message2 := hec.messages[1] + if message2.Time != fmt.Sprintf("%f", float64(message2Time.UnixNano())/float64(time.Second)) || + message2.Host != hostname || + message2.Source != "" || + message2.SourceType != "" || + message2.Index != "" { + t.Fatalf("Unexpected values of message 1 %v", message2) + } + + if event, err := message2.EventAsMap(); err != nil { + t.Fatal(err) + } else { + if event["line"] != "notajson" || + event["source"] != "stdout" || + event["tag"] != "containeriid" || + len(event) != 3 { + t.Fatalf("Unexpected event in message %v", event) + } + } + + err = hec.Close() + if err != nil { + t.Fatal(err) + } +} + +// Verify inline format with a not default settings for most of options +func TestInlineFormatWithNonDefaultOptions(t *testing.T) { + hec := NewHTTPEventCollectorMock(t) + + go hec.Serve() + + info := logger.Info{ + Config: map[string]string{ + splunkURLKey: hec.URL(), + splunkTokenKey: hec.token, + splunkSourceKey: "mysource", + splunkSourceTypeKey: "mysourcetype", + splunkIndexKey: "myindex", + splunkFormatKey: splunkFormatInline, + splunkGzipCompressionKey: "true", + tagKey: "{{.ImageName}}/{{.Name}}", + labelsKey: "a", + envRegexKey: "^foo", + }, + ContainerID: "containeriid", + ContainerName: "/container_name", + ContainerImageID: "contaimageid", + ContainerImageName: "container_image_name", + ContainerLabels: map[string]string{ + "a": "b", + }, + ContainerEnv: []string{"foo_finder=bar"}, + } + + hostname, err := info.Hostname() + if err != nil { + t.Fatal(err) + } + + loggerDriver, err := New(info) + if err != nil { + t.Fatal(err) + } + + if !hec.connectionVerified { + t.Fatal("By default connection should be verified") + } + + splunkLoggerDriver, ok := loggerDriver.(*splunkLoggerInline) + if !ok { + t.Fatal("Unexpected Splunk Logging Driver type") + } + + if splunkLoggerDriver.url != hec.URL()+"/services/collector/event/1.0" || + splunkLoggerDriver.auth != "Splunk "+hec.token || + splunkLoggerDriver.nullMessage.Host != hostname || + splunkLoggerDriver.nullMessage.Source != "mysource" || + splunkLoggerDriver.nullMessage.SourceType != "mysourcetype" || + splunkLoggerDriver.nullMessage.Index != "myindex" || + splunkLoggerDriver.gzipCompression != true || + splunkLoggerDriver.gzipCompressionLevel != gzip.DefaultCompression || + splunkLoggerDriver.postMessagesFrequency != defaultPostMessagesFrequency || + splunkLoggerDriver.postMessagesBatchSize != defaultPostMessagesBatchSize || + splunkLoggerDriver.bufferMaximum != defaultBufferMaximum || + cap(splunkLoggerDriver.stream) != defaultStreamChannelSize { + t.Fatal("Values do not match configuration.") + } + + messageTime := time.Now() + if err := loggerDriver.Log(&logger.Message{Line: []byte("1"), Source: "stdout", Timestamp: messageTime}); err != nil { + t.Fatal(err) + } + + err = loggerDriver.Close() + if err != nil { + t.Fatal(err) + } + + if len(hec.messages) != 1 { + t.Fatal("Expected one message") + } + + if !*hec.gzipEnabled { + t.Fatal("Gzip should be used") + } + + message := hec.messages[0] + if message.Time != fmt.Sprintf("%f", float64(messageTime.UnixNano())/float64(time.Second)) || + message.Host != hostname || + message.Source != "mysource" || + message.SourceType != "mysourcetype" || + message.Index != "myindex" { + t.Fatalf("Unexpected values of message %v", message) + } + + if event, err := message.EventAsMap(); err != nil { + t.Fatal(err) + } else { + if event["line"] != "1" || + event["source"] != "stdout" || + event["tag"] != "container_image_name/container_name" || + event["attrs"].(map[string]interface{})["a"] != "b" || + event["attrs"].(map[string]interface{})["foo_finder"] != "bar" || + len(event) != 4 { + t.Fatalf("Unexpected event in message %v", event) + } + } + + err = hec.Close() + if err != nil { + t.Fatal(err) + } +} + +// Verify JSON format +func TestJsonFormat(t *testing.T) { + hec := NewHTTPEventCollectorMock(t) + + go hec.Serve() + + info := logger.Info{ + Config: map[string]string{ + splunkURLKey: hec.URL(), + splunkTokenKey: hec.token, + splunkFormatKey: splunkFormatJSON, + splunkGzipCompressionKey: "true", + splunkGzipCompressionLevelKey: "1", + }, + ContainerID: "containeriid", + ContainerName: "/container_name", + ContainerImageID: "contaimageid", + ContainerImageName: "container_image_name", + } + + hostname, err := info.Hostname() + if err != nil { + t.Fatal(err) + } + + loggerDriver, err := New(info) + if err != nil { + t.Fatal(err) + } + + if !hec.connectionVerified { + t.Fatal("By default connection should be verified") + } + + splunkLoggerDriver, ok := loggerDriver.(*splunkLoggerJSON) + if !ok { + t.Fatal("Unexpected Splunk Logging Driver type") + } + + if splunkLoggerDriver.url != hec.URL()+"/services/collector/event/1.0" || + splunkLoggerDriver.auth != "Splunk "+hec.token || + splunkLoggerDriver.nullMessage.Host != hostname || + splunkLoggerDriver.nullMessage.Source != "" || + splunkLoggerDriver.nullMessage.SourceType != "" || + splunkLoggerDriver.nullMessage.Index != "" || + splunkLoggerDriver.gzipCompression != true || + splunkLoggerDriver.gzipCompressionLevel != gzip.BestSpeed || + splunkLoggerDriver.postMessagesFrequency != defaultPostMessagesFrequency || + splunkLoggerDriver.postMessagesBatchSize != defaultPostMessagesBatchSize || + splunkLoggerDriver.bufferMaximum != defaultBufferMaximum || + cap(splunkLoggerDriver.stream) != defaultStreamChannelSize { + t.Fatal("Values do not match configuration.") + } + + message1Time := time.Now() + if err := loggerDriver.Log(&logger.Message{Line: []byte("{\"a\":\"b\"}"), Source: "stdout", Timestamp: message1Time}); err != nil { + t.Fatal(err) + } + message2Time := time.Now() + if err := loggerDriver.Log(&logger.Message{Line: []byte("notjson"), Source: "stdout", Timestamp: message2Time}); err != nil { + t.Fatal(err) + } + + err = loggerDriver.Close() + if err != nil { + t.Fatal(err) + } + + if len(hec.messages) != 2 { + t.Fatal("Expected two messages") + } + + message1 := hec.messages[0] + if message1.Time != fmt.Sprintf("%f", float64(message1Time.UnixNano())/float64(time.Second)) || + message1.Host != hostname || + message1.Source != "" || + message1.SourceType != "" || + message1.Index != "" { + t.Fatalf("Unexpected values of message 1 %v", message1) + } + + if event, err := message1.EventAsMap(); err != nil { + t.Fatal(err) + } else { + if event["line"].(map[string]interface{})["a"] != "b" || + event["source"] != "stdout" || + event["tag"] != "containeriid" || + len(event) != 3 { + t.Fatalf("Unexpected event in message 1 %v", event) + } + } + + message2 := hec.messages[1] + if message2.Time != fmt.Sprintf("%f", float64(message2Time.UnixNano())/float64(time.Second)) || + message2.Host != hostname || + message2.Source != "" || + message2.SourceType != "" || + message2.Index != "" { + t.Fatalf("Unexpected values of message 2 %v", message2) + } + + // If message cannot be parsed as JSON - it should be sent as a line + if event, err := message2.EventAsMap(); err != nil { + t.Fatal(err) + } else { + if event["line"] != "notjson" || + event["source"] != "stdout" || + event["tag"] != "containeriid" || + len(event) != 3 { + t.Fatalf("Unexpected event in message 2 %v", event) + } + } + + err = hec.Close() + if err != nil { + t.Fatal(err) + } +} + +// Verify raw format +func TestRawFormat(t *testing.T) { + hec := NewHTTPEventCollectorMock(t) + + go hec.Serve() + + info := logger.Info{ + Config: map[string]string{ + splunkURLKey: hec.URL(), + splunkTokenKey: hec.token, + splunkFormatKey: splunkFormatRaw, + }, + ContainerID: "containeriid", + ContainerName: "/container_name", + ContainerImageID: "contaimageid", + ContainerImageName: "container_image_name", + } + + hostname, err := info.Hostname() + if err != nil { + t.Fatal(err) + } + + loggerDriver, err := New(info) + if err != nil { + t.Fatal(err) + } + + if !hec.connectionVerified { + t.Fatal("By default connection should be verified") + } + + splunkLoggerDriver, ok := loggerDriver.(*splunkLoggerRaw) + if !ok { + t.Fatal("Unexpected Splunk Logging Driver type") + } + + if splunkLoggerDriver.url != hec.URL()+"/services/collector/event/1.0" || + splunkLoggerDriver.auth != "Splunk "+hec.token || + splunkLoggerDriver.nullMessage.Host != hostname || + splunkLoggerDriver.nullMessage.Source != "" || + splunkLoggerDriver.nullMessage.SourceType != "" || + splunkLoggerDriver.nullMessage.Index != "" || + splunkLoggerDriver.gzipCompression != false || + splunkLoggerDriver.postMessagesFrequency != defaultPostMessagesFrequency || + splunkLoggerDriver.postMessagesBatchSize != defaultPostMessagesBatchSize || + splunkLoggerDriver.bufferMaximum != defaultBufferMaximum || + cap(splunkLoggerDriver.stream) != defaultStreamChannelSize || + string(splunkLoggerDriver.prefix) != "containeriid " { + t.Fatal("Values do not match configuration.") + } + + message1Time := time.Now() + if err := loggerDriver.Log(&logger.Message{Line: []byte("{\"a\":\"b\"}"), Source: "stdout", Timestamp: message1Time}); err != nil { + t.Fatal(err) + } + message2Time := time.Now() + if err := loggerDriver.Log(&logger.Message{Line: []byte("notjson"), Source: "stdout", Timestamp: message2Time}); err != nil { + t.Fatal(err) + } + + err = loggerDriver.Close() + if err != nil { + t.Fatal(err) + } + + if len(hec.messages) != 2 { + t.Fatal("Expected two messages") + } + + message1 := hec.messages[0] + if message1.Time != fmt.Sprintf("%f", float64(message1Time.UnixNano())/float64(time.Second)) || + message1.Host != hostname || + message1.Source != "" || + message1.SourceType != "" || + message1.Index != "" { + t.Fatalf("Unexpected values of message 1 %v", message1) + } + + if event, err := message1.EventAsString(); err != nil { + t.Fatal(err) + } else { + if event != "containeriid {\"a\":\"b\"}" { + t.Fatalf("Unexpected event in message 1 %v", event) + } + } + + message2 := hec.messages[1] + if message2.Time != fmt.Sprintf("%f", float64(message2Time.UnixNano())/float64(time.Second)) || + message2.Host != hostname || + message2.Source != "" || + message2.SourceType != "" || + message2.Index != "" { + t.Fatalf("Unexpected values of message 2 %v", message2) + } + + if event, err := message2.EventAsString(); err != nil { + t.Fatal(err) + } else { + if event != "containeriid notjson" { + t.Fatalf("Unexpected event in message 1 %v", event) + } + } + + err = hec.Close() + if err != nil { + t.Fatal(err) + } +} + +// Verify raw format with labels +func TestRawFormatWithLabels(t *testing.T) { + hec := NewHTTPEventCollectorMock(t) + + go hec.Serve() + + info := logger.Info{ + Config: map[string]string{ + splunkURLKey: hec.URL(), + splunkTokenKey: hec.token, + splunkFormatKey: splunkFormatRaw, + labelsKey: "a", + }, + ContainerID: "containeriid", + ContainerName: "/container_name", + ContainerImageID: "contaimageid", + ContainerImageName: "container_image_name", + ContainerLabels: map[string]string{ + "a": "b", + }, + } + + hostname, err := info.Hostname() + if err != nil { + t.Fatal(err) + } + + loggerDriver, err := New(info) + if err != nil { + t.Fatal(err) + } + + if !hec.connectionVerified { + t.Fatal("By default connection should be verified") + } + + splunkLoggerDriver, ok := loggerDriver.(*splunkLoggerRaw) + if !ok { + t.Fatal("Unexpected Splunk Logging Driver type") + } + + if splunkLoggerDriver.url != hec.URL()+"/services/collector/event/1.0" || + splunkLoggerDriver.auth != "Splunk "+hec.token || + splunkLoggerDriver.nullMessage.Host != hostname || + splunkLoggerDriver.nullMessage.Source != "" || + splunkLoggerDriver.nullMessage.SourceType != "" || + splunkLoggerDriver.nullMessage.Index != "" || + splunkLoggerDriver.gzipCompression != false || + splunkLoggerDriver.postMessagesFrequency != defaultPostMessagesFrequency || + splunkLoggerDriver.postMessagesBatchSize != defaultPostMessagesBatchSize || + splunkLoggerDriver.bufferMaximum != defaultBufferMaximum || + cap(splunkLoggerDriver.stream) != defaultStreamChannelSize || + string(splunkLoggerDriver.prefix) != "containeriid a=b " { + t.Fatal("Values do not match configuration.") + } + + message1Time := time.Now() + if err := loggerDriver.Log(&logger.Message{Line: []byte("{\"a\":\"b\"}"), Source: "stdout", Timestamp: message1Time}); err != nil { + t.Fatal(err) + } + message2Time := time.Now() + if err := loggerDriver.Log(&logger.Message{Line: []byte("notjson"), Source: "stdout", Timestamp: message2Time}); err != nil { + t.Fatal(err) + } + + err = loggerDriver.Close() + if err != nil { + t.Fatal(err) + } + + if len(hec.messages) != 2 { + t.Fatal("Expected two messages") + } + + message1 := hec.messages[0] + if message1.Time != fmt.Sprintf("%f", float64(message1Time.UnixNano())/float64(time.Second)) || + message1.Host != hostname || + message1.Source != "" || + message1.SourceType != "" || + message1.Index != "" { + t.Fatalf("Unexpected values of message 1 %v", message1) + } + + if event, err := message1.EventAsString(); err != nil { + t.Fatal(err) + } else { + if event != "containeriid a=b {\"a\":\"b\"}" { + t.Fatalf("Unexpected event in message 1 %v", event) + } + } + + message2 := hec.messages[1] + if message2.Time != fmt.Sprintf("%f", float64(message2Time.UnixNano())/float64(time.Second)) || + message2.Host != hostname || + message2.Source != "" || + message2.SourceType != "" || + message2.Index != "" { + t.Fatalf("Unexpected values of message 2 %v", message2) + } + + if event, err := message2.EventAsString(); err != nil { + t.Fatal(err) + } else { + if event != "containeriid a=b notjson" { + t.Fatalf("Unexpected event in message 2 %v", event) + } + } + + err = hec.Close() + if err != nil { + t.Fatal(err) + } +} + +// Verify that Splunk Logging Driver can accept tag="" which will allow to send raw messages +// in the same way we get them in stdout/stderr +func TestRawFormatWithoutTag(t *testing.T) { + hec := NewHTTPEventCollectorMock(t) + + go hec.Serve() + + info := logger.Info{ + Config: map[string]string{ + splunkURLKey: hec.URL(), + splunkTokenKey: hec.token, + splunkFormatKey: splunkFormatRaw, + tagKey: "", + }, + ContainerID: "containeriid", + ContainerName: "/container_name", + ContainerImageID: "contaimageid", + ContainerImageName: "container_image_name", + } + + hostname, err := info.Hostname() + if err != nil { + t.Fatal(err) + } + + loggerDriver, err := New(info) + if err != nil { + t.Fatal(err) + } + + if !hec.connectionVerified { + t.Fatal("By default connection should be verified") + } + + splunkLoggerDriver, ok := loggerDriver.(*splunkLoggerRaw) + if !ok { + t.Fatal("Unexpected Splunk Logging Driver type") + } + + if splunkLoggerDriver.url != hec.URL()+"/services/collector/event/1.0" || + splunkLoggerDriver.auth != "Splunk "+hec.token || + splunkLoggerDriver.nullMessage.Host != hostname || + splunkLoggerDriver.nullMessage.Source != "" || + splunkLoggerDriver.nullMessage.SourceType != "" || + splunkLoggerDriver.nullMessage.Index != "" || + splunkLoggerDriver.gzipCompression != false || + splunkLoggerDriver.postMessagesFrequency != defaultPostMessagesFrequency || + splunkLoggerDriver.postMessagesBatchSize != defaultPostMessagesBatchSize || + splunkLoggerDriver.bufferMaximum != defaultBufferMaximum || + cap(splunkLoggerDriver.stream) != defaultStreamChannelSize || + string(splunkLoggerDriver.prefix) != "" { + t.Log(string(splunkLoggerDriver.prefix) + "a") + t.Fatal("Values do not match configuration.") + } + + message1Time := time.Now() + if err := loggerDriver.Log(&logger.Message{Line: []byte("{\"a\":\"b\"}"), Source: "stdout", Timestamp: message1Time}); err != nil { + t.Fatal(err) + } + message2Time := time.Now() + if err := loggerDriver.Log(&logger.Message{Line: []byte("notjson"), Source: "stdout", Timestamp: message2Time}); err != nil { + t.Fatal(err) + } + + err = loggerDriver.Close() + if err != nil { + t.Fatal(err) + } + + if len(hec.messages) != 2 { + t.Fatal("Expected two messages") + } + + message1 := hec.messages[0] + if message1.Time != fmt.Sprintf("%f", float64(message1Time.UnixNano())/float64(time.Second)) || + message1.Host != hostname || + message1.Source != "" || + message1.SourceType != "" || + message1.Index != "" { + t.Fatalf("Unexpected values of message 1 %v", message1) + } + + if event, err := message1.EventAsString(); err != nil { + t.Fatal(err) + } else { + if event != "{\"a\":\"b\"}" { + t.Fatalf("Unexpected event in message 1 %v", event) + } + } + + message2 := hec.messages[1] + if message2.Time != fmt.Sprintf("%f", float64(message2Time.UnixNano())/float64(time.Second)) || + message2.Host != hostname || + message2.Source != "" || + message2.SourceType != "" || + message2.Index != "" { + t.Fatalf("Unexpected values of message 2 %v", message2) + } + + if event, err := message2.EventAsString(); err != nil { + t.Fatal(err) + } else { + if event != "notjson" { + t.Fatalf("Unexpected event in message 2 %v", event) + } + } + + err = hec.Close() + if err != nil { + t.Fatal(err) + } +} + +// Verify that we will send messages in batches with default batching parameters, +// but change frequency to be sure that numOfRequests will match expected 17 requests +func TestBatching(t *testing.T) { + if err := os.Setenv(envVarPostMessagesFrequency, "10h"); err != nil { + t.Fatal(err) + } + + hec := NewHTTPEventCollectorMock(t) + + go hec.Serve() + + info := logger.Info{ + Config: map[string]string{ + splunkURLKey: hec.URL(), + splunkTokenKey: hec.token, + }, + ContainerID: "containeriid", + ContainerName: "/container_name", + ContainerImageID: "contaimageid", + ContainerImageName: "container_image_name", + } + + loggerDriver, err := New(info) + if err != nil { + t.Fatal(err) + } + + for i := 0; i < defaultStreamChannelSize*4; i++ { + if err := loggerDriver.Log(&logger.Message{Line: []byte(fmt.Sprintf("%d", i)), Source: "stdout", Timestamp: time.Now()}); err != nil { + t.Fatal(err) + } + } + + err = loggerDriver.Close() + if err != nil { + t.Fatal(err) + } + + if len(hec.messages) != defaultStreamChannelSize*4 { + t.Fatal("Not all messages delivered") + } + + for i, message := range hec.messages { + if event, err := message.EventAsMap(); err != nil { + t.Fatal(err) + } else { + if event["line"] != fmt.Sprintf("%d", i) { + t.Fatalf("Unexpected event in message %v", event) + } + } + } + + // 1 to verify connection and 16 batches + if hec.numOfRequests != 17 { + t.Fatalf("Unexpected number of requests %d", hec.numOfRequests) + } + + err = hec.Close() + if err != nil { + t.Fatal(err) + } + + if err := os.Setenv(envVarPostMessagesFrequency, ""); err != nil { + t.Fatal(err) + } +} + +// Verify that test is using time to fire events not rare than specified frequency +func TestFrequency(t *testing.T) { + if err := os.Setenv(envVarPostMessagesFrequency, "5ms"); err != nil { + t.Fatal(err) + } + + hec := NewHTTPEventCollectorMock(t) + + go hec.Serve() + + info := logger.Info{ + Config: map[string]string{ + splunkURLKey: hec.URL(), + splunkTokenKey: hec.token, + }, + ContainerID: "containeriid", + ContainerName: "/container_name", + ContainerImageID: "contaimageid", + ContainerImageName: "container_image_name", + } + + loggerDriver, err := New(info) + if err != nil { + t.Fatal(err) + } + + for i := 0; i < 10; i++ { + if err := loggerDriver.Log(&logger.Message{Line: []byte(fmt.Sprintf("%d", i)), Source: "stdout", Timestamp: time.Now()}); err != nil { + t.Fatal(err) + } + time.Sleep(15 * time.Millisecond) + } + + err = loggerDriver.Close() + if err != nil { + t.Fatal(err) + } + + if len(hec.messages) != 10 { + t.Fatal("Not all messages delivered") + } + + for i, message := range hec.messages { + if event, err := message.EventAsMap(); err != nil { + t.Fatal(err) + } else { + if event["line"] != fmt.Sprintf("%d", i) { + t.Fatalf("Unexpected event in message %v", event) + } + } + } + + // 1 to verify connection and 10 to verify that we have sent messages with required frequency, + // but because frequency is too small (to keep test quick), instead of 11, use 9 if context switches will be slow + if hec.numOfRequests < 9 { + t.Fatalf("Unexpected number of requests %d", hec.numOfRequests) + } + + err = hec.Close() + if err != nil { + t.Fatal(err) + } + + if err := os.Setenv(envVarPostMessagesFrequency, ""); err != nil { + t.Fatal(err) + } +} + +// Simulate behavior similar to first version of Splunk Logging Driver, when we were sending one message +// per request +func TestOneMessagePerRequest(t *testing.T) { + if err := os.Setenv(envVarPostMessagesFrequency, "10h"); err != nil { + t.Fatal(err) + } + + if err := os.Setenv(envVarPostMessagesBatchSize, "1"); err != nil { + t.Fatal(err) + } + + if err := os.Setenv(envVarBufferMaximum, "1"); err != nil { + t.Fatal(err) + } + + if err := os.Setenv(envVarStreamChannelSize, "0"); err != nil { + t.Fatal(err) + } + + hec := NewHTTPEventCollectorMock(t) + + go hec.Serve() + + info := logger.Info{ + Config: map[string]string{ + splunkURLKey: hec.URL(), + splunkTokenKey: hec.token, + }, + ContainerID: "containeriid", + ContainerName: "/container_name", + ContainerImageID: "contaimageid", + ContainerImageName: "container_image_name", + } + + loggerDriver, err := New(info) + if err != nil { + t.Fatal(err) + } + + for i := 0; i < 10; i++ { + if err := loggerDriver.Log(&logger.Message{Line: []byte(fmt.Sprintf("%d", i)), Source: "stdout", Timestamp: time.Now()}); err != nil { + t.Fatal(err) + } + } + + err = loggerDriver.Close() + if err != nil { + t.Fatal(err) + } + + if len(hec.messages) != 10 { + t.Fatal("Not all messages delivered") + } + + for i, message := range hec.messages { + if event, err := message.EventAsMap(); err != nil { + t.Fatal(err) + } else { + if event["line"] != fmt.Sprintf("%d", i) { + t.Fatalf("Unexpected event in message %v", event) + } + } + } + + // 1 to verify connection and 10 messages + if hec.numOfRequests != 11 { + t.Fatalf("Unexpected number of requests %d", hec.numOfRequests) + } + + err = hec.Close() + if err != nil { + t.Fatal(err) + } + + if err := os.Setenv(envVarPostMessagesFrequency, ""); err != nil { + t.Fatal(err) + } + + if err := os.Setenv(envVarPostMessagesBatchSize, ""); err != nil { + t.Fatal(err) + } + + if err := os.Setenv(envVarBufferMaximum, ""); err != nil { + t.Fatal(err) + } + + if err := os.Setenv(envVarStreamChannelSize, ""); err != nil { + t.Fatal(err) + } +} + +// Driver should not be created when HEC is unresponsive +func TestVerify(t *testing.T) { + hec := NewHTTPEventCollectorMock(t) + hec.simulateServerError = true + go hec.Serve() + + info := logger.Info{ + Config: map[string]string{ + splunkURLKey: hec.URL(), + splunkTokenKey: hec.token, + }, + ContainerID: "containeriid", + ContainerName: "/container_name", + ContainerImageID: "contaimageid", + ContainerImageName: "container_image_name", + } + + _, err := New(info) + if err == nil { + t.Fatal("Expecting driver to fail, when server is unresponsive") + } + + err = hec.Close() + if err != nil { + t.Fatal(err) + } +} + +// Verify that user can specify to skip verification that Splunk HEC is working. +// Also in this test we verify retry logic. +func TestSkipVerify(t *testing.T) { + hec := NewHTTPEventCollectorMock(t) + hec.simulateServerError = true + go hec.Serve() + + info := logger.Info{ + Config: map[string]string{ + splunkURLKey: hec.URL(), + splunkTokenKey: hec.token, + splunkVerifyConnectionKey: "false", + }, + ContainerID: "containeriid", + ContainerName: "/container_name", + ContainerImageID: "contaimageid", + ContainerImageName: "container_image_name", + } + + loggerDriver, err := New(info) + if err != nil { + t.Fatal(err) + } + + if hec.connectionVerified { + t.Fatal("Connection should not be verified") + } + + for i := 0; i < defaultStreamChannelSize*2; i++ { + if err := loggerDriver.Log(&logger.Message{Line: []byte(fmt.Sprintf("%d", i)), Source: "stdout", Timestamp: time.Now()}); err != nil { + t.Fatal(err) + } + } + + if len(hec.messages) != 0 { + t.Fatal("No messages should be accepted at this point") + } + + hec.simulateServerError = false + + for i := defaultStreamChannelSize * 2; i < defaultStreamChannelSize*4; i++ { + if err := loggerDriver.Log(&logger.Message{Line: []byte(fmt.Sprintf("%d", i)), Source: "stdout", Timestamp: time.Now()}); err != nil { + t.Fatal(err) + } + } + + err = loggerDriver.Close() + if err != nil { + t.Fatal(err) + } + + if len(hec.messages) != defaultStreamChannelSize*4 { + t.Fatal("Not all messages delivered") + } + + for i, message := range hec.messages { + if event, err := message.EventAsMap(); err != nil { + t.Fatal(err) + } else { + if event["line"] != fmt.Sprintf("%d", i) { + t.Fatalf("Unexpected event in message %v", event) + } + } + } + + err = hec.Close() + if err != nil { + t.Fatal(err) + } +} + +// Verify logic for when we filled whole buffer +func TestBufferMaximum(t *testing.T) { + if err := os.Setenv(envVarPostMessagesBatchSize, "2"); err != nil { + t.Fatal(err) + } + + if err := os.Setenv(envVarBufferMaximum, "10"); err != nil { + t.Fatal(err) + } + + if err := os.Setenv(envVarStreamChannelSize, "0"); err != nil { + t.Fatal(err) + } + + hec := NewHTTPEventCollectorMock(t) + hec.simulateServerError = true + go hec.Serve() + + info := logger.Info{ + Config: map[string]string{ + splunkURLKey: hec.URL(), + splunkTokenKey: hec.token, + splunkVerifyConnectionKey: "false", + }, + ContainerID: "containeriid", + ContainerName: "/container_name", + ContainerImageID: "contaimageid", + ContainerImageName: "container_image_name", + } + + loggerDriver, err := New(info) + if err != nil { + t.Fatal(err) + } + + if hec.connectionVerified { + t.Fatal("Connection should not be verified") + } + + for i := 0; i < 11; i++ { + if err := loggerDriver.Log(&logger.Message{Line: []byte(fmt.Sprintf("%d", i)), Source: "stdout", Timestamp: time.Now()}); err != nil { + t.Fatal(err) + } + } + + if len(hec.messages) != 0 { + t.Fatal("No messages should be accepted at this point") + } + + hec.simulateServerError = false + + err = loggerDriver.Close() + if err != nil { + t.Fatal(err) + } + + if len(hec.messages) != 9 { + t.Fatalf("Expected # of messages %d, got %d", 9, len(hec.messages)) + } + + // First 1000 messages are written to daemon log when buffer was full + for i, message := range hec.messages { + if event, err := message.EventAsMap(); err != nil { + t.Fatal(err) + } else { + if event["line"] != fmt.Sprintf("%d", i+2) { + t.Fatalf("Unexpected event in message %v", event) + } + } + } + + err = hec.Close() + if err != nil { + t.Fatal(err) + } + + if err := os.Setenv(envVarPostMessagesBatchSize, ""); err != nil { + t.Fatal(err) + } + + if err := os.Setenv(envVarBufferMaximum, ""); err != nil { + t.Fatal(err) + } + + if err := os.Setenv(envVarStreamChannelSize, ""); err != nil { + t.Fatal(err) + } +} + +// Verify that we are not blocking close when HEC is down for the whole time +func TestServerAlwaysDown(t *testing.T) { + if err := os.Setenv(envVarPostMessagesBatchSize, "2"); err != nil { + t.Fatal(err) + } + + if err := os.Setenv(envVarBufferMaximum, "4"); err != nil { + t.Fatal(err) + } + + if err := os.Setenv(envVarStreamChannelSize, "0"); err != nil { + t.Fatal(err) + } + + hec := NewHTTPEventCollectorMock(t) + hec.simulateServerError = true + go hec.Serve() + + info := logger.Info{ + Config: map[string]string{ + splunkURLKey: hec.URL(), + splunkTokenKey: hec.token, + splunkVerifyConnectionKey: "false", + }, + ContainerID: "containeriid", + ContainerName: "/container_name", + ContainerImageID: "contaimageid", + ContainerImageName: "container_image_name", + } + + loggerDriver, err := New(info) + if err != nil { + t.Fatal(err) + } + + if hec.connectionVerified { + t.Fatal("Connection should not be verified") + } + + for i := 0; i < 5; i++ { + if err := loggerDriver.Log(&logger.Message{Line: []byte(fmt.Sprintf("%d", i)), Source: "stdout", Timestamp: time.Now()}); err != nil { + t.Fatal(err) + } + } + + err = loggerDriver.Close() + if err != nil { + t.Fatal(err) + } + + if len(hec.messages) != 0 { + t.Fatal("No messages should be sent") + } + + err = hec.Close() + if err != nil { + t.Fatal(err) + } + + if err := os.Setenv(envVarPostMessagesBatchSize, ""); err != nil { + t.Fatal(err) + } + + if err := os.Setenv(envVarBufferMaximum, ""); err != nil { + t.Fatal(err) + } + + if err := os.Setenv(envVarStreamChannelSize, ""); err != nil { + t.Fatal(err) + } +} + +// Cannot send messages after we close driver +func TestCannotSendAfterClose(t *testing.T) { + hec := NewHTTPEventCollectorMock(t) + go hec.Serve() + + info := logger.Info{ + Config: map[string]string{ + splunkURLKey: hec.URL(), + splunkTokenKey: hec.token, + }, + ContainerID: "containeriid", + ContainerName: "/container_name", + ContainerImageID: "contaimageid", + ContainerImageName: "container_image_name", + } + + loggerDriver, err := New(info) + if err != nil { + t.Fatal(err) + } + + if err := loggerDriver.Log(&logger.Message{Line: []byte("message1"), Source: "stdout", Timestamp: time.Now()}); err != nil { + t.Fatal(err) + } + + err = loggerDriver.Close() + if err != nil { + t.Fatal(err) + } + + if err := loggerDriver.Log(&logger.Message{Line: []byte("message2"), Source: "stdout", Timestamp: time.Now()}); err == nil { + t.Fatal("Driver should not allow to send messages after close") + } + + if len(hec.messages) != 1 { + t.Fatal("Only one message should be sent") + } + + message := hec.messages[0] + if event, err := message.EventAsMap(); err != nil { + t.Fatal(err) + } else { + if event["line"] != "message1" { + t.Fatalf("Unexpected event in message %v", event) + } + } + + err = hec.Close() + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/moby/moby/daemon/logger/splunk/splunkhecmock_test.go b/vendor/github.com/moby/moby/daemon/logger/splunk/splunkhecmock_test.go new file mode 100644 index 000000000..e50894828 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/logger/splunk/splunkhecmock_test.go @@ -0,0 +1,157 @@ +package splunk + +import ( + "compress/gzip" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net" + "net/http" + "testing" +) + +func (message *splunkMessage) EventAsString() (string, error) { + if val, ok := message.Event.(string); ok { + return val, nil + } + return "", fmt.Errorf("Cannot cast Event %v to string", message.Event) +} + +func (message *splunkMessage) EventAsMap() (map[string]interface{}, error) { + if val, ok := message.Event.(map[string]interface{}); ok { + return val, nil + } + return nil, fmt.Errorf("Cannot cast Event %v to map", message.Event) +} + +type HTTPEventCollectorMock struct { + tcpAddr *net.TCPAddr + tcpListener *net.TCPListener + + token string + simulateServerError bool + + test *testing.T + + connectionVerified bool + gzipEnabled *bool + messages []*splunkMessage + numOfRequests int +} + +func NewHTTPEventCollectorMock(t *testing.T) *HTTPEventCollectorMock { + tcpAddr := &net.TCPAddr{IP: []byte{127, 0, 0, 1}, Port: 0, Zone: ""} + tcpListener, err := net.ListenTCP("tcp", tcpAddr) + if err != nil { + t.Fatal(err) + } + return &HTTPEventCollectorMock{ + tcpAddr: tcpAddr, + tcpListener: tcpListener, + token: "4642492F-D8BD-47F1-A005-0C08AE4657DF", + simulateServerError: false, + test: t, + connectionVerified: false} +} + +func (hec *HTTPEventCollectorMock) URL() string { + return "http://" + hec.tcpListener.Addr().String() +} + +func (hec *HTTPEventCollectorMock) Serve() error { + return http.Serve(hec.tcpListener, hec) +} + +func (hec *HTTPEventCollectorMock) Close() error { + return hec.tcpListener.Close() +} + +func (hec *HTTPEventCollectorMock) ServeHTTP(writer http.ResponseWriter, request *http.Request) { + var err error + + hec.numOfRequests++ + + if hec.simulateServerError { + if request.Body != nil { + defer request.Body.Close() + } + writer.WriteHeader(http.StatusInternalServerError) + return + } + + switch request.Method { + case http.MethodOptions: + // Verify that options method is getting called only once + if hec.connectionVerified { + hec.test.Errorf("Connection should not be verified more than once. Got second request with %s method.", request.Method) + } + hec.connectionVerified = true + writer.WriteHeader(http.StatusOK) + case http.MethodPost: + // Always verify that Driver is using correct path to HEC + if request.URL.String() != "/services/collector/event/1.0" { + hec.test.Errorf("Unexpected path %v", request.URL) + } + defer request.Body.Close() + + if authorization, ok := request.Header["Authorization"]; !ok || authorization[0] != ("Splunk "+hec.token) { + hec.test.Error("Authorization header is invalid.") + } + + gzipEnabled := false + if contentEncoding, ok := request.Header["Content-Encoding"]; ok && contentEncoding[0] == "gzip" { + gzipEnabled = true + } + + if hec.gzipEnabled == nil { + hec.gzipEnabled = &gzipEnabled + } else if gzipEnabled != *hec.gzipEnabled { + // Nothing wrong with that, but we just know that Splunk Logging Driver does not do that + hec.test.Error("Driver should not change Content Encoding.") + } + + var gzipReader *gzip.Reader + var reader io.Reader + if gzipEnabled { + gzipReader, err = gzip.NewReader(request.Body) + if err != nil { + hec.test.Fatal(err) + } + reader = gzipReader + } else { + reader = request.Body + } + + // Read body + var body []byte + body, err = ioutil.ReadAll(reader) + if err != nil { + hec.test.Fatal(err) + } + + // Parse message + messageStart := 0 + for i := 0; i < len(body); i++ { + if i == len(body)-1 || (body[i] == '}' && body[i+1] == '{') { + var message splunkMessage + err = json.Unmarshal(body[messageStart:i+1], &message) + if err != nil { + hec.test.Log(string(body[messageStart : i+1])) + hec.test.Fatal(err) + } + hec.messages = append(hec.messages, &message) + messageStart = i + 1 + } + } + + if gzipEnabled { + gzipReader.Close() + } + + writer.WriteHeader(http.StatusOK) + default: + hec.test.Errorf("Unexpected HTTP method %s", http.MethodOptions) + writer.WriteHeader(http.StatusBadRequest) + } +} diff --git a/vendor/github.com/moby/moby/daemon/logger/syslog/syslog.go b/vendor/github.com/moby/moby/daemon/logger/syslog/syslog.go new file mode 100644 index 000000000..42855e117 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/logger/syslog/syslog.go @@ -0,0 +1,266 @@ +// Package syslog provides the logdriver for forwarding server logs to syslog endpoints. +package syslog + +import ( + "crypto/tls" + "errors" + "fmt" + "net" + "net/url" + "os" + "strconv" + "strings" + "time" + + syslog "github.com/RackSec/srslog" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/daemon/logger" + "github.com/docker/docker/daemon/logger/loggerutils" + "github.com/docker/docker/pkg/urlutil" + "github.com/docker/go-connections/tlsconfig" +) + +const ( + name = "syslog" + secureProto = "tcp+tls" +) + +var facilities = map[string]syslog.Priority{ + "kern": syslog.LOG_KERN, + "user": syslog.LOG_USER, + "mail": syslog.LOG_MAIL, + "daemon": syslog.LOG_DAEMON, + "auth": syslog.LOG_AUTH, + "syslog": syslog.LOG_SYSLOG, + "lpr": syslog.LOG_LPR, + "news": syslog.LOG_NEWS, + "uucp": syslog.LOG_UUCP, + "cron": syslog.LOG_CRON, + "authpriv": syslog.LOG_AUTHPRIV, + "ftp": syslog.LOG_FTP, + "local0": syslog.LOG_LOCAL0, + "local1": syslog.LOG_LOCAL1, + "local2": syslog.LOG_LOCAL2, + "local3": syslog.LOG_LOCAL3, + "local4": syslog.LOG_LOCAL4, + "local5": syslog.LOG_LOCAL5, + "local6": syslog.LOG_LOCAL6, + "local7": syslog.LOG_LOCAL7, +} + +type syslogger struct { + writer *syslog.Writer +} + +func init() { + if err := logger.RegisterLogDriver(name, New); err != nil { + logrus.Fatal(err) + } + if err := logger.RegisterLogOptValidator(name, ValidateLogOpt); err != nil { + logrus.Fatal(err) + } +} + +// rsyslog uses appname part of syslog message to fill in an %syslogtag% template +// attribute in rsyslog.conf. In order to be backward compatible to rfc3164 +// tag will be also used as an appname +func rfc5424formatterWithAppNameAsTag(p syslog.Priority, hostname, tag, content string) string { + timestamp := time.Now().Format(time.RFC3339) + pid := os.Getpid() + msg := fmt.Sprintf("<%d>%d %s %s %s %d %s - %s", + p, 1, timestamp, hostname, tag, pid, tag, content) + return msg +} + +// The timestamp field in rfc5424 is derived from rfc3339. Whereas rfc3339 makes allowances +// for multiple syntaxes, there are further restrictions in rfc5424, i.e., the maximum +// resolution is limited to "TIME-SECFRAC" which is 6 (microsecond resolution) +func rfc5424microformatterWithAppNameAsTag(p syslog.Priority, hostname, tag, content string) string { + timestamp := time.Now().Format("2006-01-02T15:04:05.999999Z07:00") + pid := os.Getpid() + msg := fmt.Sprintf("<%d>%d %s %s %s %d %s - %s", + p, 1, timestamp, hostname, tag, pid, tag, content) + return msg +} + +// New creates a syslog logger using the configuration passed in on +// the context. Supported context configuration variables are +// syslog-address, syslog-facility, syslog-format. +func New(info logger.Info) (logger.Logger, error) { + tag, err := loggerutils.ParseLogTag(info, loggerutils.DefaultTemplate) + if err != nil { + return nil, err + } + + proto, address, err := parseAddress(info.Config["syslog-address"]) + if err != nil { + return nil, err + } + + facility, err := parseFacility(info.Config["syslog-facility"]) + if err != nil { + return nil, err + } + + syslogFormatter, syslogFramer, err := parseLogFormat(info.Config["syslog-format"], proto) + if err != nil { + return nil, err + } + + var log *syslog.Writer + if proto == secureProto { + tlsConfig, tlsErr := parseTLSConfig(info.Config) + if tlsErr != nil { + return nil, tlsErr + } + log, err = syslog.DialWithTLSConfig(proto, address, facility, tag, tlsConfig) + } else { + log, err = syslog.Dial(proto, address, facility, tag) + } + + if err != nil { + return nil, err + } + + log.SetFormatter(syslogFormatter) + log.SetFramer(syslogFramer) + + return &syslogger{ + writer: log, + }, nil +} + +func (s *syslogger) Log(msg *logger.Message) error { + line := string(msg.Line) + source := msg.Source + logger.PutMessage(msg) + if source == "stderr" { + return s.writer.Err(line) + } + return s.writer.Info(line) +} + +func (s *syslogger) Close() error { + return s.writer.Close() +} + +func (s *syslogger) Name() string { + return name +} + +func parseAddress(address string) (string, string, error) { + if address == "" { + return "", "", nil + } + if !urlutil.IsTransportURL(address) { + return "", "", fmt.Errorf("syslog-address should be in form proto://address, got %v", address) + } + url, err := url.Parse(address) + if err != nil { + return "", "", err + } + + // unix and unixgram socket validation + if url.Scheme == "unix" || url.Scheme == "unixgram" { + if _, err := os.Stat(url.Path); err != nil { + return "", "", err + } + return url.Scheme, url.Path, nil + } + + // here we process tcp|udp + host := url.Host + if _, _, err := net.SplitHostPort(host); err != nil { + if !strings.Contains(err.Error(), "missing port in address") { + return "", "", err + } + host = host + ":514" + } + + return url.Scheme, host, nil +} + +// ValidateLogOpt looks for syslog specific log options +// syslog-address, syslog-facility. +func ValidateLogOpt(cfg map[string]string) error { + for key := range cfg { + switch key { + case "env": + case "env-regex": + case "labels": + case "syslog-address": + case "syslog-facility": + case "syslog-tls-ca-cert": + case "syslog-tls-cert": + case "syslog-tls-key": + case "syslog-tls-skip-verify": + case "tag": + case "syslog-format": + default: + return fmt.Errorf("unknown log opt '%s' for syslog log driver", key) + } + } + if _, _, err := parseAddress(cfg["syslog-address"]); err != nil { + return err + } + if _, err := parseFacility(cfg["syslog-facility"]); err != nil { + return err + } + if _, _, err := parseLogFormat(cfg["syslog-format"], ""); err != nil { + return err + } + return nil +} + +func parseFacility(facility string) (syslog.Priority, error) { + if facility == "" { + return syslog.LOG_DAEMON, nil + } + + if syslogFacility, valid := facilities[facility]; valid { + return syslogFacility, nil + } + + fInt, err := strconv.Atoi(facility) + if err == nil && 0 <= fInt && fInt <= 23 { + return syslog.Priority(fInt << 3), nil + } + + return syslog.Priority(0), errors.New("invalid syslog facility") +} + +func parseTLSConfig(cfg map[string]string) (*tls.Config, error) { + _, skipVerify := cfg["syslog-tls-skip-verify"] + + opts := tlsconfig.Options{ + CAFile: cfg["syslog-tls-ca-cert"], + CertFile: cfg["syslog-tls-cert"], + KeyFile: cfg["syslog-tls-key"], + InsecureSkipVerify: skipVerify, + } + + return tlsconfig.Client(opts) +} + +func parseLogFormat(logFormat, proto string) (syslog.Formatter, syslog.Framer, error) { + switch logFormat { + case "": + return syslog.UnixFormatter, syslog.DefaultFramer, nil + case "rfc3164": + return syslog.RFC3164Formatter, syslog.DefaultFramer, nil + case "rfc5424": + if proto == secureProto { + return rfc5424formatterWithAppNameAsTag, syslog.RFC5425MessageLengthFramer, nil + } + return rfc5424formatterWithAppNameAsTag, syslog.DefaultFramer, nil + case "rfc5424micro": + if proto == secureProto { + return rfc5424microformatterWithAppNameAsTag, syslog.RFC5425MessageLengthFramer, nil + } + return rfc5424microformatterWithAppNameAsTag, syslog.DefaultFramer, nil + default: + return nil, nil, errors.New("Invalid syslog format") + } + +} diff --git a/vendor/github.com/moby/moby/daemon/logger/syslog/syslog_test.go b/vendor/github.com/moby/moby/daemon/logger/syslog/syslog_test.go new file mode 100644 index 000000000..501561064 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/logger/syslog/syslog_test.go @@ -0,0 +1,62 @@ +package syslog + +import ( + "reflect" + "testing" + + syslog "github.com/RackSec/srslog" +) + +func functionMatches(expectedFun interface{}, actualFun interface{}) bool { + return reflect.ValueOf(expectedFun).Pointer() == reflect.ValueOf(actualFun).Pointer() +} + +func TestParseLogFormat(t *testing.T) { + formatter, framer, err := parseLogFormat("rfc5424", "udp") + if err != nil || !functionMatches(rfc5424formatterWithAppNameAsTag, formatter) || + !functionMatches(syslog.DefaultFramer, framer) { + t.Fatal("Failed to parse rfc5424 format", err, formatter, framer) + } + + formatter, framer, err = parseLogFormat("rfc5424", "tcp+tls") + if err != nil || !functionMatches(rfc5424formatterWithAppNameAsTag, formatter) || + !functionMatches(syslog.RFC5425MessageLengthFramer, framer) { + t.Fatal("Failed to parse rfc5424 format", err, formatter, framer) + } + + formatter, framer, err = parseLogFormat("rfc5424micro", "udp") + if err != nil || !functionMatches(rfc5424microformatterWithAppNameAsTag, formatter) || + !functionMatches(syslog.DefaultFramer, framer) { + t.Fatal("Failed to parse rfc5424 (microsecond) format", err, formatter, framer) + } + + formatter, framer, err = parseLogFormat("rfc5424micro", "tcp+tls") + if err != nil || !functionMatches(rfc5424microformatterWithAppNameAsTag, formatter) || + !functionMatches(syslog.RFC5425MessageLengthFramer, framer) { + t.Fatal("Failed to parse rfc5424 (microsecond) format", err, formatter, framer) + } + + formatter, framer, err = parseLogFormat("rfc3164", "") + if err != nil || !functionMatches(syslog.RFC3164Formatter, formatter) || + !functionMatches(syslog.DefaultFramer, framer) { + t.Fatal("Failed to parse rfc3164 format", err, formatter, framer) + } + + formatter, framer, err = parseLogFormat("", "") + if err != nil || !functionMatches(syslog.UnixFormatter, formatter) || + !functionMatches(syslog.DefaultFramer, framer) { + t.Fatal("Failed to parse empty format", err, formatter, framer) + } + + formatter, framer, err = parseLogFormat("invalid", "") + if err == nil { + t.Fatal("Failed to parse invalid format", err, formatter, framer) + } +} + +func TestValidateLogOptEmpty(t *testing.T) { + emptyConfig := make(map[string]string) + if err := ValidateLogOpt(emptyConfig); err != nil { + t.Fatal("Failed to parse empty config", err) + } +} diff --git a/vendor/github.com/moby/moby/daemon/logs.go b/vendor/github.com/moby/moby/daemon/logs.go new file mode 100644 index 000000000..96e1b8a49 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/logs.go @@ -0,0 +1,175 @@ +package daemon + +import ( + "errors" + "strconv" + "time" + + "golang.org/x/net/context" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/backend" + containertypes "github.com/docker/docker/api/types/container" + timetypes "github.com/docker/docker/api/types/time" + "github.com/docker/docker/container" + "github.com/docker/docker/daemon/logger" +) + +// ContainerLogs copies the container's log channel to the channel provided in +// the config. If ContainerLogs returns an error, no messages have been copied. +// and the channel will be closed without data. +// +// if it returns nil, the config channel will be active and return log +// messages until it runs out or the context is canceled. +func (daemon *Daemon) ContainerLogs(ctx context.Context, containerName string, config *types.ContainerLogsOptions) (<-chan *backend.LogMessage, error) { + lg := logrus.WithFields(logrus.Fields{ + "module": "daemon", + "method": "(*Daemon).ContainerLogs", + "container": containerName, + }) + + if !(config.ShowStdout || config.ShowStderr) { + return nil, errors.New("You must choose at least one stream") + } + container, err := daemon.GetContainer(containerName) + if err != nil { + return nil, err + } + + if container.RemovalInProgress || container.Dead { + return nil, errors.New("can not get logs from container which is dead or marked for removal") + } + + if container.HostConfig.LogConfig.Type == "none" { + return nil, logger.ErrReadLogsNotSupported + } + + cLog, cLogCreated, err := daemon.getLogger(container) + if err != nil { + return nil, err + } + if cLogCreated { + defer func() { + if err = cLog.Close(); err != nil { + logrus.Errorf("Error closing logger: %v", err) + } + }() + } + + logReader, ok := cLog.(logger.LogReader) + if !ok { + return nil, logger.ErrReadLogsNotSupported + } + + follow := config.Follow && !cLogCreated + tailLines, err := strconv.Atoi(config.Tail) + if err != nil { + tailLines = -1 + } + + var since time.Time + if config.Since != "" { + s, n, err := timetypes.ParseTimestamps(config.Since, 0) + if err != nil { + return nil, err + } + since = time.Unix(s, n) + } + + readConfig := logger.ReadConfig{ + Since: since, + Tail: tailLines, + Follow: follow, + } + + logs := logReader.ReadLogs(readConfig) + + // past this point, we can't possibly return any errors, so we can just + // start a goroutine and return to tell the caller not to expect errors + // (if the caller wants to give up on logs, they have to cancel the context) + // this goroutine functions as a shim between the logger and the caller. + messageChan := make(chan *backend.LogMessage, 1) + go func() { + // set up some defers + defer logs.Close() + + // close the messages channel. closing is the only way to signal above + // that we're doing with logs (other than context cancel i guess). + defer close(messageChan) + + lg.Debug("begin logs") + for { + select { + // i do not believe as the system is currently designed any error + // is possible, but we should be prepared to handle it anyway. if + // we do get an error, copy only the error field to a new object so + // we don't end up with partial data in the other fields + case err := <-logs.Err: + lg.Errorf("Error streaming logs: %v", err) + select { + case <-ctx.Done(): + case messageChan <- &backend.LogMessage{Err: err}: + } + return + case <-ctx.Done(): + lg.Debug("logs: end stream, ctx is done: %v", ctx.Err()) + return + case msg, ok := <-logs.Msg: + // there is some kind of pool or ring buffer in the logger that + // produces these messages, and a possible future optimization + // might be to use that pool and reuse message objects + if !ok { + lg.Debug("end logs") + return + } + m := msg.AsLogMessage() // just a pointer conversion, does not copy data + + // there could be a case where the reader stops accepting + // messages and the context is canceled. we need to check that + // here, or otherwise we risk blocking forever on the message + // send. + select { + case <-ctx.Done(): + return + case messageChan <- m: + } + } + } + }() + return messageChan, nil +} + +func (daemon *Daemon) getLogger(container *container.Container) (l logger.Logger, created bool, err error) { + container.Lock() + if container.State.Running { + l = container.LogDriver + } + container.Unlock() + if l == nil { + created = true + l, err = container.StartLogger() + } + return +} + +// mergeLogConfig merges the daemon log config to the container's log config if the container's log driver is not specified. +func (daemon *Daemon) mergeAndVerifyLogConfig(cfg *containertypes.LogConfig) error { + if cfg.Type == "" { + cfg.Type = daemon.defaultLogConfig.Type + } + + if cfg.Config == nil { + cfg.Config = make(map[string]string) + } + + if cfg.Type == daemon.defaultLogConfig.Type { + for k, v := range daemon.defaultLogConfig.Config { + if _, ok := cfg.Config[k]; !ok { + cfg.Config[k] = v + } + } + } + + return logger.ValidateLogOpts(cfg.Type, cfg.Config) +} diff --git a/vendor/github.com/moby/moby/daemon/logs_test.go b/vendor/github.com/moby/moby/daemon/logs_test.go new file mode 100644 index 000000000..0c36299e0 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/logs_test.go @@ -0,0 +1,15 @@ +package daemon + +import ( + "testing" + + containertypes "github.com/docker/docker/api/types/container" +) + +func TestMergeAndVerifyLogConfigNilConfig(t *testing.T) { + d := &Daemon{defaultLogConfig: containertypes.LogConfig{Type: "json-file", Config: map[string]string{"max-file": "1"}}} + cfg := containertypes.LogConfig{Type: d.defaultLogConfig.Type} + if err := d.mergeAndVerifyLogConfig(&cfg); err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/moby/moby/daemon/metrics.go b/vendor/github.com/moby/moby/daemon/metrics.go new file mode 100644 index 000000000..bf9e49d04 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/metrics.go @@ -0,0 +1,174 @@ +package daemon + +import ( + "path/filepath" + "sync" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/mount" + "github.com/docker/docker/pkg/plugingetter" + "github.com/docker/go-metrics" + "github.com/pkg/errors" + "github.com/prometheus/client_golang/prometheus" +) + +const metricsPluginType = "MetricsCollector" + +var ( + containerActions metrics.LabeledTimer + containerStates metrics.LabeledGauge + imageActions metrics.LabeledTimer + networkActions metrics.LabeledTimer + engineInfo metrics.LabeledGauge + engineCpus metrics.Gauge + engineMemory metrics.Gauge + healthChecksCounter metrics.Counter + healthChecksFailedCounter metrics.Counter + + stateCtr *stateCounter +) + +func init() { + ns := metrics.NewNamespace("engine", "daemon", nil) + containerActions = ns.NewLabeledTimer("container_actions", "The number of seconds it takes to process each container action", "action") + for _, a := range []string{ + "start", + "changes", + "commit", + "create", + "delete", + } { + containerActions.WithValues(a).Update(0) + } + + networkActions = ns.NewLabeledTimer("network_actions", "The number of seconds it takes to process each network action", "action") + engineInfo = ns.NewLabeledGauge("engine", "The information related to the engine and the OS it is running on", metrics.Unit("info"), + "version", + "commit", + "architecture", + "graphdriver", + "kernel", "os", + "os_type", + "daemon_id", // ID is a randomly generated unique identifier (e.g. UUID4) + ) + engineCpus = ns.NewGauge("engine_cpus", "The number of cpus that the host system of the engine has", metrics.Unit("cpus")) + engineMemory = ns.NewGauge("engine_memory", "The number of bytes of memory that the host system of the engine has", metrics.Bytes) + healthChecksCounter = ns.NewCounter("health_checks", "The total number of health checks") + healthChecksFailedCounter = ns.NewCounter("health_checks_failed", "The total number of failed health checks") + imageActions = ns.NewLabeledTimer("image_actions", "The number of seconds it takes to process each image action", "action") + + stateCtr = newStateCounter(ns.NewDesc("container_states", "The count of containers in various states", metrics.Unit("containers"), "state")) + ns.Add(stateCtr) + + metrics.Register(ns) +} + +type stateCounter struct { + mu sync.Mutex + states map[string]string + desc *prometheus.Desc +} + +func newStateCounter(desc *prometheus.Desc) *stateCounter { + return &stateCounter{ + states: make(map[string]string), + desc: desc, + } +} + +func (ctr *stateCounter) get() (running int, paused int, stopped int) { + ctr.mu.Lock() + defer ctr.mu.Unlock() + + states := map[string]int{ + "running": 0, + "paused": 0, + "stopped": 0, + } + for _, state := range ctr.states { + states[state]++ + } + return states["running"], states["paused"], states["stopped"] +} + +func (ctr *stateCounter) set(id, label string) { + ctr.mu.Lock() + ctr.states[id] = label + ctr.mu.Unlock() +} + +func (ctr *stateCounter) del(id string) { + ctr.mu.Lock() + delete(ctr.states, id) + ctr.mu.Unlock() +} + +func (ctr *stateCounter) Describe(ch chan<- *prometheus.Desc) { + ch <- ctr.desc +} + +func (ctr *stateCounter) Collect(ch chan<- prometheus.Metric) { + running, paused, stopped := ctr.get() + ch <- prometheus.MustNewConstMetric(ctr.desc, prometheus.GaugeValue, float64(running), "running") + ch <- prometheus.MustNewConstMetric(ctr.desc, prometheus.GaugeValue, float64(paused), "paused") + ch <- prometheus.MustNewConstMetric(ctr.desc, prometheus.GaugeValue, float64(stopped), "stopped") +} + +func (d *Daemon) cleanupMetricsPlugins() { + ls := d.PluginStore.GetAllManagedPluginsByCap(metricsPluginType) + var wg sync.WaitGroup + wg.Add(len(ls)) + + for _, p := range ls { + go func() { + defer wg.Done() + pluginStopMetricsCollection(p) + }() + } + wg.Wait() + + if d.metricsPluginListener != nil { + d.metricsPluginListener.Close() + } +} + +type metricsPlugin struct { + plugingetter.CompatPlugin +} + +func (p metricsPlugin) sock() string { + return "metrics.sock" +} + +func (p metricsPlugin) sockBase() string { + return filepath.Join(p.BasePath(), "run", "docker") +} + +func pluginStartMetricsCollection(p plugingetter.CompatPlugin) error { + type metricsPluginResponse struct { + Err string + } + var res metricsPluginResponse + if err := p.Client().Call(metricsPluginType+".StartMetrics", nil, &res); err != nil { + return errors.Wrap(err, "could not start metrics plugin") + } + if res.Err != "" { + return errors.New(res.Err) + } + return nil +} + +func pluginStopMetricsCollection(p plugingetter.CompatPlugin) { + if err := p.Client().Call(metricsPluginType+".StopMetrics", nil, nil); err != nil { + logrus.WithError(err).WithField("name", p.Name()).Error("error stopping metrics collector") + } + + mp := metricsPlugin{p} + sockPath := filepath.Join(mp.sockBase(), mp.sock()) + if err := mount.Unmount(sockPath); err != nil { + if mounted, _ := mount.Mounted(sockPath); mounted { + logrus.WithError(err).WithField("name", p.Name()).WithField("socket", sockPath).Error("error unmounting metrics socket for plugin") + } + } + return +} diff --git a/vendor/github.com/moby/moby/daemon/metrics_unix.go b/vendor/github.com/moby/moby/daemon/metrics_unix.go new file mode 100644 index 000000000..84166d1a8 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/metrics_unix.go @@ -0,0 +1,86 @@ +// +build !windows + +package daemon + +import ( + "net" + "net/http" + "os" + "path/filepath" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/mount" + "github.com/docker/docker/pkg/plugingetter" + "github.com/docker/docker/pkg/plugins" + metrics "github.com/docker/go-metrics" + "github.com/pkg/errors" + "golang.org/x/sys/unix" +) + +func (daemon *Daemon) listenMetricsSock() (string, error) { + path := filepath.Join(daemon.configStore.ExecRoot, "metrics.sock") + unix.Unlink(path) + l, err := net.Listen("unix", path) + if err != nil { + return "", errors.Wrap(err, "error setting up metrics plugin listener") + } + + mux := http.NewServeMux() + mux.Handle("/metrics", metrics.Handler()) + go func() { + http.Serve(l, mux) + }() + daemon.metricsPluginListener = l + return path, nil +} + +func registerMetricsPluginCallback(getter plugingetter.PluginGetter, sockPath string) { + getter.Handle(metricsPluginType, func(name string, client *plugins.Client) { + // Use lookup since nothing in the system can really reference it, no need + // to protect against removal + p, err := getter.Get(name, metricsPluginType, plugingetter.Lookup) + if err != nil { + return + } + + mp := metricsPlugin{p} + sockBase := mp.sockBase() + if err := os.MkdirAll(sockBase, 0755); err != nil { + logrus.WithError(err).WithField("name", name).WithField("path", sockBase).Error("error creating metrics plugin base path") + return + } + + defer func() { + if err != nil { + os.RemoveAll(sockBase) + } + }() + + pluginSockPath := filepath.Join(sockBase, mp.sock()) + _, err = os.Stat(pluginSockPath) + if err == nil { + mount.Unmount(pluginSockPath) + } else { + logrus.WithField("path", pluginSockPath).Debugf("creating plugin socket") + f, err := os.OpenFile(pluginSockPath, os.O_CREATE, 0600) + if err != nil { + return + } + f.Close() + } + + if err := mount.Mount(sockPath, pluginSockPath, "none", "bind,ro"); err != nil { + logrus.WithError(err).WithField("name", name).Error("could not mount metrics socket to plugin") + return + } + + if err := pluginStartMetricsCollection(p); err != nil { + if err := mount.Unmount(pluginSockPath); err != nil { + if mounted, _ := mount.Mounted(pluginSockPath); mounted { + logrus.WithError(err).WithField("sock_path", pluginSockPath).Error("error unmounting metrics socket from plugin during cleanup") + } + } + logrus.WithError(err).WithField("name", name).Error("error while initializing metrics plugin") + } + }) +} diff --git a/vendor/github.com/moby/moby/daemon/metrics_unsupported.go b/vendor/github.com/moby/moby/daemon/metrics_unsupported.go new file mode 100644 index 000000000..64dc1817a --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/metrics_unsupported.go @@ -0,0 +1,12 @@ +// +build windows + +package daemon + +import "github.com/docker/docker/pkg/plugingetter" + +func registerMetricsPluginCallback(getter plugingetter.PluginGetter, sockPath string) { +} + +func (daemon *Daemon) listenMetricsSock() (string, error) { + return "", nil +} diff --git a/vendor/github.com/moby/moby/daemon/monitor.go b/vendor/github.com/moby/moby/daemon/monitor.go new file mode 100644 index 000000000..5156d9a8e --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/monitor.go @@ -0,0 +1,173 @@ +package daemon + +import ( + "errors" + "fmt" + "runtime" + "strconv" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/types" + "github.com/docker/docker/container" + "github.com/docker/docker/libcontainerd" + "github.com/docker/docker/restartmanager" +) + +func (daemon *Daemon) setStateCounter(c *container.Container) { + switch c.StateString() { + case "paused": + stateCtr.set(c.ID, "paused") + case "running": + stateCtr.set(c.ID, "running") + default: + stateCtr.set(c.ID, "stopped") + } +} + +// StateChanged updates daemon state changes from containerd +func (daemon *Daemon) StateChanged(id string, e libcontainerd.StateInfo) error { + c := daemon.containers.Get(id) + if c == nil { + return fmt.Errorf("no such container: %s", id) + } + + switch e.State { + case libcontainerd.StateOOM: + // StateOOM is Linux specific and should never be hit on Windows + if runtime.GOOS == "windows" { + return errors.New("Received StateOOM from libcontainerd on Windows. This should never happen.") + } + daemon.updateHealthMonitor(c) + if err := c.CheckpointTo(daemon.containersReplica); err != nil { + return err + } + daemon.LogContainerEvent(c, "oom") + case libcontainerd.StateExit: + + c.Lock() + c.StreamConfig.Wait() + c.Reset(false) + + // If daemon is being shutdown, don't let the container restart + restart, wait, err := c.RestartManager().ShouldRestart(e.ExitCode, daemon.IsShuttingDown() || c.HasBeenManuallyStopped, time.Since(c.StartedAt)) + if err == nil && restart { + c.RestartCount++ + c.SetRestarting(platformConstructExitStatus(e)) + } else { + c.SetStopped(platformConstructExitStatus(e)) + defer daemon.autoRemove(c) + } + + // cancel healthcheck here, they will be automatically + // restarted if/when the container is started again + daemon.stopHealthchecks(c) + attributes := map[string]string{ + "exitCode": strconv.Itoa(int(e.ExitCode)), + } + daemon.LogContainerEventWithAttributes(c, "die", attributes) + daemon.Cleanup(c) + + if err == nil && restart { + go func() { + err := <-wait + if err == nil { + // daemon.netController is initialized when daemon is restoring containers. + // But containerStart will use daemon.netController segment. + // So to avoid panic at startup process, here must wait util daemon restore done. + daemon.waitForStartupDone() + if err = daemon.containerStart(c, "", "", false); err != nil { + logrus.Debugf("failed to restart container: %+v", err) + } + } + if err != nil { + c.SetStopped(platformConstructExitStatus(e)) + defer daemon.autoRemove(c) + if err != restartmanager.ErrRestartCanceled { + logrus.Errorf("restartmanger wait error: %+v", err) + } + } + }() + } + + daemon.setStateCounter(c) + + defer c.Unlock() + if err := c.CheckpointTo(daemon.containersReplica); err != nil { + return err + } + return daemon.postRunProcessing(c, e) + case libcontainerd.StateExitProcess: + if execConfig := c.ExecCommands.Get(e.ProcessID); execConfig != nil { + ec := int(e.ExitCode) + execConfig.Lock() + defer execConfig.Unlock() + execConfig.ExitCode = &ec + execConfig.Running = false + execConfig.StreamConfig.Wait() + if err := execConfig.CloseStreams(); err != nil { + logrus.Errorf("failed to cleanup exec %s streams: %s", c.ID, err) + } + + // remove the exec command from the container's store only and not the + // daemon's store so that the exec command can be inspected. + c.ExecCommands.Delete(execConfig.ID) + } else { + logrus.Warnf("Ignoring StateExitProcess for %v but no exec command found", e) + } + case libcontainerd.StateStart, libcontainerd.StateRestore: + // Container is already locked in this case + c.SetRunning(int(e.Pid), e.State == libcontainerd.StateStart) + c.HasBeenManuallyStopped = false + c.HasBeenStartedBefore = true + daemon.setStateCounter(c) + + daemon.initHealthMonitor(c) + if err := c.CheckpointTo(daemon.containersReplica); err != nil { + c.Reset(false) + return err + } + + daemon.LogContainerEvent(c, "start") + case libcontainerd.StatePause: + // Container is already locked in this case + c.Paused = true + daemon.setStateCounter(c) + daemon.updateHealthMonitor(c) + if err := c.CheckpointTo(daemon.containersReplica); err != nil { + return err + } + daemon.LogContainerEvent(c, "pause") + case libcontainerd.StateResume: + // Container is already locked in this case + c.Paused = false + daemon.setStateCounter(c) + daemon.updateHealthMonitor(c) + if err := c.CheckpointTo(daemon.containersReplica); err != nil { + return err + } + daemon.LogContainerEvent(c, "unpause") + } + return nil +} + +func (daemon *Daemon) autoRemove(c *container.Container) { + c.Lock() + ar := c.HostConfig.AutoRemove + c.Unlock() + if !ar { + return + } + + var err error + if err = daemon.ContainerRm(c.ID, &types.ContainerRmConfig{ForceRemove: true, RemoveVolume: true}); err == nil { + return + } + if c := daemon.containers.Get(c.ID); c == nil { + return + } + + if err != nil { + logrus.WithError(err).WithField("container", c.ID).Error("error removing container") + } +} diff --git a/vendor/github.com/moby/moby/daemon/monitor_linux.go b/vendor/github.com/moby/moby/daemon/monitor_linux.go new file mode 100644 index 000000000..09f5af50c --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/monitor_linux.go @@ -0,0 +1,19 @@ +package daemon + +import ( + "github.com/docker/docker/container" + "github.com/docker/docker/libcontainerd" +) + +// platformConstructExitStatus returns a platform specific exit status structure +func platformConstructExitStatus(e libcontainerd.StateInfo) *container.ExitStatus { + return &container.ExitStatus{ + ExitCode: int(e.ExitCode), + OOMKilled: e.OOMKilled, + } +} + +// postRunProcessing perfoms any processing needed on the container after it has stopped. +func (daemon *Daemon) postRunProcessing(container *container.Container, e libcontainerd.StateInfo) error { + return nil +} diff --git a/vendor/github.com/moby/moby/daemon/monitor_solaris.go b/vendor/github.com/moby/moby/daemon/monitor_solaris.go new file mode 100644 index 000000000..5ccfada76 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/monitor_solaris.go @@ -0,0 +1,18 @@ +package daemon + +import ( + "github.com/docker/docker/container" + "github.com/docker/docker/libcontainerd" +) + +// platformConstructExitStatus returns a platform specific exit status structure +func platformConstructExitStatus(e libcontainerd.StateInfo) *container.ExitStatus { + return &container.ExitStatus{ + ExitCode: int(e.ExitCode), + } +} + +// postRunProcessing perfoms any processing needed on the container after it has stopped. +func (daemon *Daemon) postRunProcessing(container *container.Container, e libcontainerd.StateInfo) error { + return nil +} diff --git a/vendor/github.com/moby/moby/daemon/monitor_windows.go b/vendor/github.com/moby/moby/daemon/monitor_windows.go new file mode 100644 index 000000000..9648b1b41 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/monitor_windows.go @@ -0,0 +1,46 @@ +package daemon + +import ( + "fmt" + + "github.com/docker/docker/container" + "github.com/docker/docker/libcontainerd" +) + +// platformConstructExitStatus returns a platform specific exit status structure +func platformConstructExitStatus(e libcontainerd.StateInfo) *container.ExitStatus { + return &container.ExitStatus{ + ExitCode: int(e.ExitCode), + } +} + +// postRunProcessing perfoms any processing needed on the container after it has stopped. +func (daemon *Daemon) postRunProcessing(container *container.Container, e libcontainerd.StateInfo) error { + if e.ExitCode == 0 && e.UpdatePending { + spec, err := daemon.createSpec(container) + if err != nil { + return err + } + + newOpts := []libcontainerd.CreateOption{&libcontainerd.ServicingOption{ + IsServicing: true, + }} + + copts, err := daemon.getLibcontainerdCreateOptions(container) + if err != nil { + return err + } + + if copts != nil { + newOpts = append(newOpts, copts...) + } + + // Create a new servicing container, which will start, complete the update, and merge back the + // results if it succeeded, all as part of the below function call. + if err := daemon.containerd.Create((container.ID + "_servicing"), "", "", *spec, container.InitializeStdio, newOpts...); err != nil { + container.SetExitCode(-1) + return fmt.Errorf("Post-run update servicing failed: %s", err) + } + } + return nil +} diff --git a/vendor/github.com/moby/moby/daemon/mounts.go b/vendor/github.com/moby/moby/daemon/mounts.go new file mode 100644 index 000000000..35c6ed59a --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/mounts.go @@ -0,0 +1,53 @@ +package daemon + +import ( + "fmt" + "strings" + + mounttypes "github.com/docker/docker/api/types/mount" + "github.com/docker/docker/container" + volumestore "github.com/docker/docker/volume/store" +) + +func (daemon *Daemon) prepareMountPoints(container *container.Container) error { + for _, config := range container.MountPoints { + if err := daemon.lazyInitializeVolume(container.ID, config); err != nil { + return err + } + } + return nil +} + +func (daemon *Daemon) removeMountPoints(container *container.Container, rm bool) error { + var rmErrors []string + for _, m := range container.MountPoints { + if m.Type != mounttypes.TypeVolume || m.Volume == nil { + continue + } + daemon.volumes.Dereference(m.Volume, container.ID) + if !rm { + continue + } + + // Do not remove named mountpoints + // these are mountpoints specified like `docker run -v :/foo` + if m.Spec.Source != "" { + continue + } + + err := daemon.volumes.Remove(m.Volume) + // Ignore volume in use errors because having this + // volume being referenced by other container is + // not an error, but an implementation detail. + // This prevents docker from logging "ERROR: Volume in use" + // where there is another container using the volume. + if err != nil && !volumestore.IsInUse(err) { + rmErrors = append(rmErrors, err.Error()) + } + } + + if len(rmErrors) > 0 { + return fmt.Errorf("Error removing volumes:\n%v", strings.Join(rmErrors, "\n")) + } + return nil +} diff --git a/vendor/github.com/moby/moby/daemon/names.go b/vendor/github.com/moby/moby/daemon/names.go new file mode 100644 index 000000000..7cdabeba9 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/names.go @@ -0,0 +1,111 @@ +package daemon + +import ( + "fmt" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api" + "github.com/docker/docker/container" + "github.com/docker/docker/pkg/namesgenerator" + "github.com/docker/docker/pkg/stringid" +) + +var ( + validContainerNameChars = api.RestrictedNameChars + validContainerNamePattern = api.RestrictedNamePattern +) + +func (daemon *Daemon) registerName(container *container.Container) error { + if daemon.Exists(container.ID) { + return fmt.Errorf("Container is already loaded") + } + if err := validateID(container.ID); err != nil { + return err + } + if container.Name == "" { + name, err := daemon.generateNewName(container.ID) + if err != nil { + return err + } + container.Name = name + } + return daemon.containersReplica.ReserveName(container.Name, container.ID) +} + +func (daemon *Daemon) generateIDAndName(name string) (string, string, error) { + var ( + err error + id = stringid.GenerateNonCryptoID() + ) + + if name == "" { + if name, err = daemon.generateNewName(id); err != nil { + return "", "", err + } + return id, name, nil + } + + if name, err = daemon.reserveName(id, name); err != nil { + return "", "", err + } + + return id, name, nil +} + +func (daemon *Daemon) reserveName(id, name string) (string, error) { + if !validContainerNamePattern.MatchString(strings.TrimPrefix(name, "/")) { + return "", fmt.Errorf("Invalid container name (%s), only %s are allowed", name, validContainerNameChars) + } + if name[0] != '/' { + name = "/" + name + } + + if err := daemon.containersReplica.ReserveName(name, id); err != nil { + if err == container.ErrNameReserved { + id, err := daemon.containersReplica.Snapshot().GetID(name) + if err != nil { + logrus.Errorf("got unexpected error while looking up reserved name: %v", err) + return "", err + } + return "", fmt.Errorf("Conflict. The container name %q is already in use by container %q. You have to remove (or rename) that container to be able to reuse that name.", name, id) + } + return "", fmt.Errorf("error reserving name: %q, error: %v", name, err) + } + return name, nil +} + +func (daemon *Daemon) releaseName(name string) { + daemon.containersReplica.ReleaseName(name) +} + +func (daemon *Daemon) generateNewName(id string) (string, error) { + var name string + for i := 0; i < 6; i++ { + name = namesgenerator.GetRandomName(i) + if name[0] != '/' { + name = "/" + name + } + + if err := daemon.containersReplica.ReserveName(name, id); err != nil { + if err == container.ErrNameReserved { + continue + } + return "", err + } + return name, nil + } + + name = "/" + stringid.TruncateID(id) + if err := daemon.containersReplica.ReserveName(name, id); err != nil { + return "", err + } + return name, nil +} + +func validateID(id string) error { + if id == "" { + return fmt.Errorf("Invalid empty id") + } + return nil +} diff --git a/vendor/github.com/moby/moby/daemon/network.go b/vendor/github.com/moby/moby/daemon/network.go new file mode 100644 index 000000000..366c2a59e --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/network.go @@ -0,0 +1,567 @@ +package daemon + +import ( + "fmt" + "net" + "runtime" + "sort" + "strings" + "sync" + + "github.com/Sirupsen/logrus" + apierrors "github.com/docker/docker/api/errors" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/network" + clustertypes "github.com/docker/docker/daemon/cluster/provider" + "github.com/docker/docker/pkg/plugingetter" + "github.com/docker/docker/runconfig" + "github.com/docker/libnetwork" + lncluster "github.com/docker/libnetwork/cluster" + "github.com/docker/libnetwork/driverapi" + "github.com/docker/libnetwork/ipamapi" + networktypes "github.com/docker/libnetwork/types" + "github.com/pkg/errors" + "golang.org/x/net/context" +) + +// NetworkControllerEnabled checks if the networking stack is enabled. +// This feature depends on OS primitives and it's disabled in systems like Windows. +func (daemon *Daemon) NetworkControllerEnabled() bool { + return daemon.netController != nil +} + +// FindNetwork function finds a network for a given string that can represent network name or id +func (daemon *Daemon) FindNetwork(idName string) (libnetwork.Network, error) { + // Find by Name + n, err := daemon.GetNetworkByName(idName) + if err != nil && !isNoSuchNetworkError(err) { + return nil, err + } + + if n != nil { + return n, nil + } + + // Find by id + return daemon.GetNetworkByID(idName) +} + +func isNoSuchNetworkError(err error) bool { + _, ok := err.(libnetwork.ErrNoSuchNetwork) + return ok +} + +// GetNetworkByID function returns a network whose ID begins with the given prefix. +// It fails with an error if no matching, or more than one matching, networks are found. +func (daemon *Daemon) GetNetworkByID(partialID string) (libnetwork.Network, error) { + list := daemon.GetNetworksByID(partialID) + + if len(list) == 0 { + return nil, libnetwork.ErrNoSuchNetwork(partialID) + } + if len(list) > 1 { + return nil, libnetwork.ErrInvalidID(partialID) + } + return list[0], nil +} + +// GetNetworkByName function returns a network for a given network name. +// If no network name is given, the default network is returned. +func (daemon *Daemon) GetNetworkByName(name string) (libnetwork.Network, error) { + c := daemon.netController + if c == nil { + return nil, libnetwork.ErrNoSuchNetwork(name) + } + if name == "" { + name = c.Config().Daemon.DefaultNetwork + } + return c.NetworkByName(name) +} + +// GetNetworksByID returns a list of networks whose ID partially matches zero or more networks +func (daemon *Daemon) GetNetworksByID(partialID string) []libnetwork.Network { + c := daemon.netController + if c == nil { + return nil + } + list := []libnetwork.Network{} + l := func(nw libnetwork.Network) bool { + if strings.HasPrefix(nw.ID(), partialID) { + list = append(list, nw) + } + return false + } + c.WalkNetworks(l) + + return list +} + +// getAllNetworks returns a list containing all networks +func (daemon *Daemon) getAllNetworks() []libnetwork.Network { + return daemon.netController.Networks() +} + +type ingressJob struct { + create *clustertypes.NetworkCreateRequest + ip net.IP + jobDone chan struct{} +} + +var ( + ingressWorkerOnce sync.Once + ingressJobsChannel chan *ingressJob + ingressID string +) + +func (daemon *Daemon) startIngressWorker() { + ingressJobsChannel = make(chan *ingressJob, 100) + go func() { + for { + select { + case r := <-ingressJobsChannel: + if r.create != nil { + daemon.setupIngress(r.create, r.ip, ingressID) + ingressID = r.create.ID + } else { + daemon.releaseIngress(ingressID) + ingressID = "" + } + close(r.jobDone) + } + } + }() +} + +// enqueueIngressJob adds a ingress add/rm request to the worker queue. +// It guarantees the worker is started. +func (daemon *Daemon) enqueueIngressJob(job *ingressJob) { + ingressWorkerOnce.Do(daemon.startIngressWorker) + ingressJobsChannel <- job +} + +// SetupIngress setups ingress networking. +// The function returns a channel which will signal the caller when the programming is completed. +func (daemon *Daemon) SetupIngress(create clustertypes.NetworkCreateRequest, nodeIP string) (<-chan struct{}, error) { + ip, _, err := net.ParseCIDR(nodeIP) + if err != nil { + return nil, err + } + done := make(chan struct{}) + daemon.enqueueIngressJob(&ingressJob{&create, ip, done}) + return done, nil +} + +// ReleaseIngress releases the ingress networking. +// The function returns a channel which will signal the caller when the programming is completed. +func (daemon *Daemon) ReleaseIngress() (<-chan struct{}, error) { + done := make(chan struct{}) + daemon.enqueueIngressJob(&ingressJob{nil, nil, done}) + return done, nil +} + +func (daemon *Daemon) setupIngress(create *clustertypes.NetworkCreateRequest, ip net.IP, staleID string) { + controller := daemon.netController + controller.AgentInitWait() + + if staleID != "" && staleID != create.ID { + daemon.releaseIngress(staleID) + } + + if _, err := daemon.createNetwork(create.NetworkCreateRequest, create.ID, true); err != nil { + // If it is any other error other than already + // exists error log error and return. + if _, ok := err.(libnetwork.NetworkNameError); !ok { + logrus.Errorf("Failed creating ingress network: %v", err) + return + } + // Otherwise continue down the call to create or recreate sandbox. + } + + n, err := daemon.GetNetworkByID(create.ID) + if err != nil { + logrus.Errorf("Failed getting ingress network by id after creating: %v", err) + } + + sb, err := controller.NewSandbox("ingress-sbox", libnetwork.OptionIngress()) + if err != nil { + if _, ok := err.(networktypes.ForbiddenError); !ok { + logrus.Errorf("Failed creating ingress sandbox: %v", err) + } + return + } + + ep, err := n.CreateEndpoint("ingress-endpoint", libnetwork.CreateOptionIpam(ip, nil, nil, nil)) + if err != nil { + logrus.Errorf("Failed creating ingress endpoint: %v", err) + return + } + + if err := ep.Join(sb, nil); err != nil { + logrus.Errorf("Failed joining ingress sandbox to ingress endpoint: %v", err) + return + } + + if err := sb.EnableService(); err != nil { + logrus.Errorf("Failed enabling service for ingress sandbox") + } +} + +func (daemon *Daemon) releaseIngress(id string) { + controller := daemon.netController + if err := controller.SandboxDestroy("ingress-sbox"); err != nil { + logrus.Errorf("Failed to delete ingress sandbox: %v", err) + } + + if id == "" { + return + } + + n, err := controller.NetworkByID(id) + if err != nil { + logrus.Errorf("failed to retrieve ingress network %s: %v", id, err) + return + } + + for _, ep := range n.Endpoints() { + if err := ep.Delete(true); err != nil { + logrus.Errorf("Failed to delete endpoint %s (%s): %v", ep.Name(), ep.ID(), err) + return + } + } + + if err := n.Delete(); err != nil { + logrus.Errorf("Failed to delete ingress network %s: %v", n.ID(), err) + return + } + return +} + +// SetNetworkBootstrapKeys sets the bootstrap keys. +func (daemon *Daemon) SetNetworkBootstrapKeys(keys []*networktypes.EncryptionKey) error { + err := daemon.netController.SetKeys(keys) + if err == nil { + // Upon successful key setting dispatch the keys available event + daemon.cluster.SendClusterEvent(lncluster.EventNetworkKeysAvailable) + } + return err +} + +// UpdateAttachment notifies the attacher about the attachment config. +func (daemon *Daemon) UpdateAttachment(networkName, networkID, containerID string, config *network.NetworkingConfig) error { + if daemon.clusterProvider == nil { + return fmt.Errorf("cluster provider is not initialized") + } + + if err := daemon.clusterProvider.UpdateAttachment(networkName, containerID, config); err != nil { + return daemon.clusterProvider.UpdateAttachment(networkID, containerID, config) + } + + return nil +} + +// WaitForDetachment makes the cluster manager wait for detachment of +// the container from the network. +func (daemon *Daemon) WaitForDetachment(ctx context.Context, networkName, networkID, taskID, containerID string) error { + if daemon.clusterProvider == nil { + return fmt.Errorf("cluster provider is not initialized") + } + + return daemon.clusterProvider.WaitForDetachment(ctx, networkName, networkID, taskID, containerID) +} + +// CreateManagedNetwork creates an agent network. +func (daemon *Daemon) CreateManagedNetwork(create clustertypes.NetworkCreateRequest) error { + _, err := daemon.createNetwork(create.NetworkCreateRequest, create.ID, true) + return err +} + +// CreateNetwork creates a network with the given name, driver and other optional parameters +func (daemon *Daemon) CreateNetwork(create types.NetworkCreateRequest) (*types.NetworkCreateResponse, error) { + resp, err := daemon.createNetwork(create, "", false) + if err != nil { + return nil, err + } + return resp, err +} + +func (daemon *Daemon) createNetwork(create types.NetworkCreateRequest, id string, agent bool) (*types.NetworkCreateResponse, error) { + if runconfig.IsPreDefinedNetwork(create.Name) && !agent { + err := fmt.Errorf("%s is a pre-defined network and cannot be created", create.Name) + return nil, apierrors.NewRequestForbiddenError(err) + } + + var warning string + nw, err := daemon.GetNetworkByName(create.Name) + if err != nil { + if _, ok := err.(libnetwork.ErrNoSuchNetwork); !ok { + return nil, err + } + } + if nw != nil { + // check if user defined CheckDuplicate, if set true, return err + // otherwise prepare a warning message + if create.CheckDuplicate { + return nil, libnetwork.NetworkNameError(create.Name) + } + warning = fmt.Sprintf("Network with name %s (id : %s) already exists", nw.Name(), nw.ID()) + } + + c := daemon.netController + driver := create.Driver + if driver == "" { + driver = c.Config().Daemon.DefaultDriver + } + + nwOptions := []libnetwork.NetworkOption{ + libnetwork.NetworkOptionEnableIPv6(create.EnableIPv6), + libnetwork.NetworkOptionDriverOpts(create.Options), + libnetwork.NetworkOptionLabels(create.Labels), + libnetwork.NetworkOptionAttachable(create.Attachable), + libnetwork.NetworkOptionIngress(create.Ingress), + libnetwork.NetworkOptionScope(create.Scope), + } + + if create.ConfigOnly { + nwOptions = append(nwOptions, libnetwork.NetworkOptionConfigOnly()) + } + + if create.IPAM != nil { + ipam := create.IPAM + v4Conf, v6Conf, err := getIpamConfig(ipam.Config) + if err != nil { + return nil, err + } + nwOptions = append(nwOptions, libnetwork.NetworkOptionIpam(ipam.Driver, "", v4Conf, v6Conf, ipam.Options)) + } + + if create.Internal { + nwOptions = append(nwOptions, libnetwork.NetworkOptionInternalNetwork()) + } + if agent { + nwOptions = append(nwOptions, libnetwork.NetworkOptionDynamic()) + nwOptions = append(nwOptions, libnetwork.NetworkOptionPersist(false)) + } + + if create.ConfigFrom != nil { + nwOptions = append(nwOptions, libnetwork.NetworkOptionConfigFrom(create.ConfigFrom.Network)) + } + + n, err := c.NewNetwork(driver, create.Name, id, nwOptions...) + if err != nil { + if _, ok := err.(libnetwork.ErrDataStoreNotInitialized); ok { + return nil, errors.New("This node is not a swarm manager. Use \"docker swarm init\" or \"docker swarm join\" to connect this node to swarm and try again.") + } + return nil, err + } + + daemon.pluginRefCount(driver, driverapi.NetworkPluginEndpointType, plugingetter.Acquire) + if create.IPAM != nil { + daemon.pluginRefCount(create.IPAM.Driver, ipamapi.PluginEndpointType, plugingetter.Acquire) + } + daemon.LogNetworkEvent(n, "create") + + return &types.NetworkCreateResponse{ + ID: n.ID(), + Warning: warning, + }, nil +} + +func (daemon *Daemon) pluginRefCount(driver, capability string, mode int) { + var builtinDrivers []string + + if capability == driverapi.NetworkPluginEndpointType { + builtinDrivers = daemon.netController.BuiltinDrivers() + } else if capability == ipamapi.PluginEndpointType { + builtinDrivers = daemon.netController.BuiltinIPAMDrivers() + } + + for _, d := range builtinDrivers { + if d == driver { + return + } + } + + if daemon.PluginStore != nil { + _, err := daemon.PluginStore.Get(driver, capability, mode) + if err != nil { + logrus.WithError(err).WithFields(logrus.Fields{"mode": mode, "driver": driver}).Error("Error handling plugin refcount operation") + } + } +} + +func getIpamConfig(data []network.IPAMConfig) ([]*libnetwork.IpamConf, []*libnetwork.IpamConf, error) { + ipamV4Cfg := []*libnetwork.IpamConf{} + ipamV6Cfg := []*libnetwork.IpamConf{} + for _, d := range data { + iCfg := libnetwork.IpamConf{} + iCfg.PreferredPool = d.Subnet + iCfg.SubPool = d.IPRange + iCfg.Gateway = d.Gateway + iCfg.AuxAddresses = d.AuxAddress + ip, _, err := net.ParseCIDR(d.Subnet) + if err != nil { + return nil, nil, fmt.Errorf("Invalid subnet %s : %v", d.Subnet, err) + } + if ip.To4() != nil { + ipamV4Cfg = append(ipamV4Cfg, &iCfg) + } else { + ipamV6Cfg = append(ipamV6Cfg, &iCfg) + } + } + return ipamV4Cfg, ipamV6Cfg, nil +} + +// UpdateContainerServiceConfig updates a service configuration. +func (daemon *Daemon) UpdateContainerServiceConfig(containerName string, serviceConfig *clustertypes.ServiceConfig) error { + container, err := daemon.GetContainer(containerName) + if err != nil { + return err + } + + container.NetworkSettings.Service = serviceConfig + return nil +} + +// ConnectContainerToNetwork connects the given container to the given +// network. If either cannot be found, an err is returned. If the +// network cannot be set up, an err is returned. +func (daemon *Daemon) ConnectContainerToNetwork(containerName, networkName string, endpointConfig *network.EndpointSettings) error { + if runtime.GOOS == "solaris" { + return errors.New("docker network connect is unsupported on Solaris platform") + } + container, err := daemon.GetContainer(containerName) + if err != nil { + return err + } + return daemon.ConnectToNetwork(container, networkName, endpointConfig) +} + +// DisconnectContainerFromNetwork disconnects the given container from +// the given network. If either cannot be found, an err is returned. +func (daemon *Daemon) DisconnectContainerFromNetwork(containerName string, networkName string, force bool) error { + if runtime.GOOS == "solaris" { + return errors.New("docker network disconnect is unsupported on Solaris platform") + } + container, err := daemon.GetContainer(containerName) + if err != nil { + if force { + return daemon.ForceEndpointDelete(containerName, networkName) + } + return err + } + return daemon.DisconnectFromNetwork(container, networkName, force) +} + +// GetNetworkDriverList returns the list of plugins drivers +// registered for network. +func (daemon *Daemon) GetNetworkDriverList() []string { + if !daemon.NetworkControllerEnabled() { + return nil + } + + pluginList := daemon.netController.BuiltinDrivers() + + managedPlugins := daemon.PluginStore.GetAllManagedPluginsByCap(driverapi.NetworkPluginEndpointType) + + for _, plugin := range managedPlugins { + pluginList = append(pluginList, plugin.Name()) + } + + pluginMap := make(map[string]bool) + for _, plugin := range pluginList { + pluginMap[plugin] = true + } + + networks := daemon.netController.Networks() + + for _, network := range networks { + if !pluginMap[network.Type()] { + pluginList = append(pluginList, network.Type()) + pluginMap[network.Type()] = true + } + } + + sort.Strings(pluginList) + + return pluginList +} + +// DeleteManagedNetwork deletes an agent network. +func (daemon *Daemon) DeleteManagedNetwork(networkID string) error { + return daemon.deleteNetwork(networkID, true) +} + +// DeleteNetwork destroys a network unless it's one of docker's predefined networks. +func (daemon *Daemon) DeleteNetwork(networkID string) error { + return daemon.deleteNetwork(networkID, false) +} + +func (daemon *Daemon) deleteNetwork(networkID string, dynamic bool) error { + nw, err := daemon.FindNetwork(networkID) + if err != nil { + return err + } + + if runconfig.IsPreDefinedNetwork(nw.Name()) && !dynamic { + err := fmt.Errorf("%s is a pre-defined network and cannot be removed", nw.Name()) + return apierrors.NewRequestForbiddenError(err) + } + + if dynamic && !nw.Info().Dynamic() { + if runconfig.IsPreDefinedNetwork(nw.Name()) { + // Predefined networks now support swarm services. Make this + // a no-op when cluster requests to remove the predefined network. + return nil + } + err := fmt.Errorf("%s is not a dynamic network", nw.Name()) + return apierrors.NewRequestForbiddenError(err) + } + + if err := nw.Delete(); err != nil { + return err + } + + // If this is not a configuration only network, we need to + // update the corresponding remote drivers' reference counts + if !nw.Info().ConfigOnly() { + daemon.pluginRefCount(nw.Type(), driverapi.NetworkPluginEndpointType, plugingetter.Release) + ipamType, _, _, _ := nw.Info().IpamConfig() + daemon.pluginRefCount(ipamType, ipamapi.PluginEndpointType, plugingetter.Release) + daemon.LogNetworkEvent(nw, "destroy") + } + + return nil +} + +// GetNetworks returns a list of all networks +func (daemon *Daemon) GetNetworks() []libnetwork.Network { + return daemon.getAllNetworks() +} + +// clearAttachableNetworks removes the attachable networks +// after disconnecting any connected container +func (daemon *Daemon) clearAttachableNetworks() { + for _, n := range daemon.GetNetworks() { + if !n.Info().Attachable() { + continue + } + for _, ep := range n.Endpoints() { + epInfo := ep.Info() + if epInfo == nil { + continue + } + sb := epInfo.Sandbox() + if sb == nil { + continue + } + containerID := sb.ContainerID() + if err := daemon.DisconnectContainerFromNetwork(containerID, n.ID(), true); err != nil { + logrus.Warnf("Failed to disconnect container %s from swarm network %s on cluster leave: %v", + containerID, n.Name(), err) + } + } + if err := daemon.DeleteManagedNetwork(n.ID()); err != nil { + logrus.Warnf("Failed to remove swarm network %s on cluster leave: %v", n.Name(), err) + } + } +} diff --git a/vendor/github.com/moby/moby/daemon/network/settings.go b/vendor/github.com/moby/moby/daemon/network/settings.go new file mode 100644 index 000000000..8f6b7dd59 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/network/settings.go @@ -0,0 +1,33 @@ +package network + +import ( + networktypes "github.com/docker/docker/api/types/network" + clustertypes "github.com/docker/docker/daemon/cluster/provider" + "github.com/docker/go-connections/nat" +) + +// Settings stores configuration details about the daemon network config +// TODO Windows. Many of these fields can be factored out., +type Settings struct { + Bridge string + SandboxID string + HairpinMode bool + LinkLocalIPv6Address string + LinkLocalIPv6PrefixLen int + Networks map[string]*EndpointSettings + Service *clustertypes.ServiceConfig + Ports nat.PortMap + SandboxKey string + SecondaryIPAddresses []networktypes.Address + SecondaryIPv6Addresses []networktypes.Address + IsAnonymousEndpoint bool + HasSwarmEndpoint bool +} + +// EndpointSettings is a package local wrapper for +// networktypes.EndpointSettings which stores Endpoint state that +// needs to be persisted to disk but not exposed in the api. +type EndpointSettings struct { + *networktypes.EndpointSettings + IPAMOperational bool +} diff --git a/vendor/github.com/moby/moby/daemon/oci_linux.go b/vendor/github.com/moby/moby/daemon/oci_linux.go new file mode 100644 index 000000000..6d74301a0 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/oci_linux.go @@ -0,0 +1,838 @@ +package daemon + +import ( + "fmt" + "io" + "os" + "os/exec" + "path/filepath" + "regexp" + "sort" + "strconv" + "strings" + + "github.com/Sirupsen/logrus" + containertypes "github.com/docker/docker/api/types/container" + "github.com/docker/docker/container" + "github.com/docker/docker/daemon/caps" + daemonconfig "github.com/docker/docker/daemon/config" + "github.com/docker/docker/oci" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/mount" + "github.com/docker/docker/pkg/stringutils" + "github.com/docker/docker/pkg/symlink" + "github.com/docker/docker/volume" + "github.com/opencontainers/runc/libcontainer/apparmor" + "github.com/opencontainers/runc/libcontainer/cgroups" + "github.com/opencontainers/runc/libcontainer/devices" + "github.com/opencontainers/runc/libcontainer/user" + specs "github.com/opencontainers/runtime-spec/specs-go" +) + +var ( + deviceCgroupRuleRegex = regexp.MustCompile("^([acb]) ([0-9]+|\\*):([0-9]+|\\*) ([rwm]{1,3})$") +) + +func setResources(s *specs.Spec, r containertypes.Resources) error { + weightDevices, err := getBlkioWeightDevices(r) + if err != nil { + return err + } + readBpsDevice, err := getBlkioThrottleDevices(r.BlkioDeviceReadBps) + if err != nil { + return err + } + writeBpsDevice, err := getBlkioThrottleDevices(r.BlkioDeviceWriteBps) + if err != nil { + return err + } + readIOpsDevice, err := getBlkioThrottleDevices(r.BlkioDeviceReadIOps) + if err != nil { + return err + } + writeIOpsDevice, err := getBlkioThrottleDevices(r.BlkioDeviceWriteIOps) + if err != nil { + return err + } + + memoryRes := getMemoryResources(r) + cpuRes, err := getCPUResources(r) + if err != nil { + return err + } + blkioWeight := r.BlkioWeight + + specResources := &specs.LinuxResources{ + Memory: memoryRes, + CPU: cpuRes, + BlockIO: &specs.LinuxBlockIO{ + Weight: &blkioWeight, + WeightDevice: weightDevices, + ThrottleReadBpsDevice: readBpsDevice, + ThrottleWriteBpsDevice: writeBpsDevice, + ThrottleReadIOPSDevice: readIOpsDevice, + ThrottleWriteIOPSDevice: writeIOpsDevice, + }, + DisableOOMKiller: r.OomKillDisable, + Pids: &specs.LinuxPids{ + Limit: r.PidsLimit, + }, + } + + if s.Linux.Resources != nil && len(s.Linux.Resources.Devices) > 0 { + specResources.Devices = s.Linux.Resources.Devices + } + + s.Linux.Resources = specResources + return nil +} + +func setDevices(s *specs.Spec, c *container.Container) error { + // Build lists of devices allowed and created within the container. + var devs []specs.LinuxDevice + devPermissions := s.Linux.Resources.Devices + if c.HostConfig.Privileged { + hostDevices, err := devices.HostDevices() + if err != nil { + return err + } + for _, d := range hostDevices { + devs = append(devs, oci.Device(d)) + } + devPermissions = []specs.LinuxDeviceCgroup{ + { + Allow: true, + Access: "rwm", + }, + } + } else { + for _, deviceMapping := range c.HostConfig.Devices { + d, dPermissions, err := oci.DevicesFromPath(deviceMapping.PathOnHost, deviceMapping.PathInContainer, deviceMapping.CgroupPermissions) + if err != nil { + return err + } + devs = append(devs, d...) + devPermissions = append(devPermissions, dPermissions...) + } + + for _, deviceCgroupRule := range c.HostConfig.DeviceCgroupRules { + ss := deviceCgroupRuleRegex.FindAllStringSubmatch(deviceCgroupRule, -1) + if len(ss[0]) != 5 { + return fmt.Errorf("invalid device cgroup rule format: '%s'", deviceCgroupRule) + } + matches := ss[0] + + dPermissions := specs.LinuxDeviceCgroup{ + Allow: true, + Type: matches[1], + Access: matches[4], + } + if matches[2] == "*" { + major := int64(-1) + dPermissions.Major = &major + } else { + major, err := strconv.ParseInt(matches[2], 10, 64) + if err != nil { + return fmt.Errorf("invalid major value in device cgroup rule format: '%s'", deviceCgroupRule) + } + dPermissions.Major = &major + } + if matches[3] == "*" { + minor := int64(-1) + dPermissions.Minor = &minor + } else { + minor, err := strconv.ParseInt(matches[3], 10, 64) + if err != nil { + return fmt.Errorf("invalid minor value in device cgroup rule format: '%s'", deviceCgroupRule) + } + dPermissions.Minor = &minor + } + devPermissions = append(devPermissions, dPermissions) + } + } + + s.Linux.Devices = append(s.Linux.Devices, devs...) + s.Linux.Resources.Devices = devPermissions + return nil +} + +func setRlimits(daemon *Daemon, s *specs.Spec, c *container.Container) error { + var rlimits []specs.LinuxRlimit + + // We want to leave the original HostConfig alone so make a copy here + hostConfig := *c.HostConfig + // Merge with the daemon defaults + daemon.mergeUlimits(&hostConfig) + for _, ul := range hostConfig.Ulimits { + rlimits = append(rlimits, specs.LinuxRlimit{ + Type: "RLIMIT_" + strings.ToUpper(ul.Name), + Soft: uint64(ul.Soft), + Hard: uint64(ul.Hard), + }) + } + + s.Process.Rlimits = rlimits + return nil +} + +func setUser(s *specs.Spec, c *container.Container) error { + uid, gid, additionalGids, err := getUser(c, c.Config.User) + if err != nil { + return err + } + s.Process.User.UID = uid + s.Process.User.GID = gid + s.Process.User.AdditionalGids = additionalGids + return nil +} + +func readUserFile(c *container.Container, p string) (io.ReadCloser, error) { + fp, err := symlink.FollowSymlinkInScope(filepath.Join(c.BaseFS, p), c.BaseFS) + if err != nil { + return nil, err + } + return os.Open(fp) +} + +func getUser(c *container.Container, username string) (uint32, uint32, []uint32, error) { + passwdPath, err := user.GetPasswdPath() + if err != nil { + return 0, 0, nil, err + } + groupPath, err := user.GetGroupPath() + if err != nil { + return 0, 0, nil, err + } + passwdFile, err := readUserFile(c, passwdPath) + if err == nil { + defer passwdFile.Close() + } + groupFile, err := readUserFile(c, groupPath) + if err == nil { + defer groupFile.Close() + } + + execUser, err := user.GetExecUser(username, nil, passwdFile, groupFile) + if err != nil { + return 0, 0, nil, err + } + + // todo: fix this double read by a change to libcontainer/user pkg + groupFile, err = readUserFile(c, groupPath) + if err == nil { + defer groupFile.Close() + } + var addGroups []int + if len(c.HostConfig.GroupAdd) > 0 { + addGroups, err = user.GetAdditionalGroups(c.HostConfig.GroupAdd, groupFile) + if err != nil { + return 0, 0, nil, err + } + } + uid := uint32(execUser.Uid) + gid := uint32(execUser.Gid) + sgids := append(execUser.Sgids, addGroups...) + var additionalGids []uint32 + for _, g := range sgids { + additionalGids = append(additionalGids, uint32(g)) + } + return uid, gid, additionalGids, nil +} + +func setNamespace(s *specs.Spec, ns specs.LinuxNamespace) { + for i, n := range s.Linux.Namespaces { + if n.Type == ns.Type { + s.Linux.Namespaces[i] = ns + return + } + } + s.Linux.Namespaces = append(s.Linux.Namespaces, ns) +} + +func setCapabilities(s *specs.Spec, c *container.Container) error { + var caplist []string + var err error + if c.HostConfig.Privileged { + caplist = caps.GetAllCapabilities() + } else { + caplist, err = caps.TweakCapabilities(s.Process.Capabilities.Effective, c.HostConfig.CapAdd, c.HostConfig.CapDrop) + if err != nil { + return err + } + } + s.Process.Capabilities.Effective = caplist + s.Process.Capabilities.Bounding = caplist + s.Process.Capabilities.Permitted = caplist + s.Process.Capabilities.Inheritable = caplist + return nil +} + +func setNamespaces(daemon *Daemon, s *specs.Spec, c *container.Container) error { + userNS := false + // user + if c.HostConfig.UsernsMode.IsPrivate() { + uidMap := daemon.idMappings.UIDs() + if uidMap != nil { + userNS = true + ns := specs.LinuxNamespace{Type: "user"} + setNamespace(s, ns) + s.Linux.UIDMappings = specMapping(uidMap) + s.Linux.GIDMappings = specMapping(daemon.idMappings.GIDs()) + } + } + // network + if !c.Config.NetworkDisabled { + ns := specs.LinuxNamespace{Type: "network"} + parts := strings.SplitN(string(c.HostConfig.NetworkMode), ":", 2) + if parts[0] == "container" { + nc, err := daemon.getNetworkedContainer(c.ID, c.HostConfig.NetworkMode.ConnectedContainer()) + if err != nil { + return err + } + ns.Path = fmt.Sprintf("/proc/%d/ns/net", nc.State.GetPID()) + if userNS { + // to share a net namespace, they must also share a user namespace + nsUser := specs.LinuxNamespace{Type: "user"} + nsUser.Path = fmt.Sprintf("/proc/%d/ns/user", nc.State.GetPID()) + setNamespace(s, nsUser) + } + } else if c.HostConfig.NetworkMode.IsHost() { + ns.Path = c.NetworkSettings.SandboxKey + } + setNamespace(s, ns) + } + // ipc + if c.HostConfig.IpcMode.IsContainer() { + ns := specs.LinuxNamespace{Type: "ipc"} + ic, err := daemon.getIpcContainer(c) + if err != nil { + return err + } + ns.Path = fmt.Sprintf("/proc/%d/ns/ipc", ic.State.GetPID()) + setNamespace(s, ns) + if userNS { + // to share an IPC namespace, they must also share a user namespace + nsUser := specs.LinuxNamespace{Type: "user"} + nsUser.Path = fmt.Sprintf("/proc/%d/ns/user", ic.State.GetPID()) + setNamespace(s, nsUser) + } + } else if c.HostConfig.IpcMode.IsHost() { + oci.RemoveNamespace(s, specs.LinuxNamespaceType("ipc")) + } else { + ns := specs.LinuxNamespace{Type: "ipc"} + setNamespace(s, ns) + } + // pid + if c.HostConfig.PidMode.IsContainer() { + ns := specs.LinuxNamespace{Type: "pid"} + pc, err := daemon.getPidContainer(c) + if err != nil { + return err + } + ns.Path = fmt.Sprintf("/proc/%d/ns/pid", pc.State.GetPID()) + setNamespace(s, ns) + if userNS { + // to share a PID namespace, they must also share a user namespace + nsUser := specs.LinuxNamespace{Type: "user"} + nsUser.Path = fmt.Sprintf("/proc/%d/ns/user", pc.State.GetPID()) + setNamespace(s, nsUser) + } + } else if c.HostConfig.PidMode.IsHost() { + oci.RemoveNamespace(s, specs.LinuxNamespaceType("pid")) + } else { + ns := specs.LinuxNamespace{Type: "pid"} + setNamespace(s, ns) + } + // uts + if c.HostConfig.UTSMode.IsHost() { + oci.RemoveNamespace(s, specs.LinuxNamespaceType("uts")) + s.Hostname = "" + } + + return nil +} + +func specMapping(s []idtools.IDMap) []specs.LinuxIDMapping { + var ids []specs.LinuxIDMapping + for _, item := range s { + ids = append(ids, specs.LinuxIDMapping{ + HostID: uint32(item.HostID), + ContainerID: uint32(item.ContainerID), + Size: uint32(item.Size), + }) + } + return ids +} + +func getMountInfo(mountinfo []*mount.Info, dir string) *mount.Info { + for _, m := range mountinfo { + if m.Mountpoint == dir { + return m + } + } + return nil +} + +// Get the source mount point of directory passed in as argument. Also return +// optional fields. +func getSourceMount(source string) (string, string, error) { + // Ensure any symlinks are resolved. + sourcePath, err := filepath.EvalSymlinks(source) + if err != nil { + return "", "", err + } + + mountinfos, err := mount.GetMounts() + if err != nil { + return "", "", err + } + + mountinfo := getMountInfo(mountinfos, sourcePath) + if mountinfo != nil { + return sourcePath, mountinfo.Optional, nil + } + + path := sourcePath + for { + path = filepath.Dir(path) + + mountinfo = getMountInfo(mountinfos, path) + if mountinfo != nil { + return path, mountinfo.Optional, nil + } + + if path == "/" { + break + } + } + + // If we are here, we did not find parent mount. Something is wrong. + return "", "", fmt.Errorf("Could not find source mount of %s", source) +} + +// Ensure mount point on which path is mounted, is shared. +func ensureShared(path string) error { + sharedMount := false + + sourceMount, optionalOpts, err := getSourceMount(path) + if err != nil { + return err + } + // Make sure source mount point is shared. + optsSplit := strings.Split(optionalOpts, " ") + for _, opt := range optsSplit { + if strings.HasPrefix(opt, "shared:") { + sharedMount = true + break + } + } + + if !sharedMount { + return fmt.Errorf("Path %s is mounted on %s but it is not a shared mount.", path, sourceMount) + } + return nil +} + +// Ensure mount point on which path is mounted, is either shared or slave. +func ensureSharedOrSlave(path string) error { + sharedMount := false + slaveMount := false + + sourceMount, optionalOpts, err := getSourceMount(path) + if err != nil { + return err + } + // Make sure source mount point is shared. + optsSplit := strings.Split(optionalOpts, " ") + for _, opt := range optsSplit { + if strings.HasPrefix(opt, "shared:") { + sharedMount = true + break + } else if strings.HasPrefix(opt, "master:") { + slaveMount = true + break + } + } + + if !sharedMount && !slaveMount { + return fmt.Errorf("Path %s is mounted on %s but it is not a shared or slave mount.", path, sourceMount) + } + return nil +} + +var ( + mountPropagationMap = map[string]int{ + "private": mount.PRIVATE, + "rprivate": mount.RPRIVATE, + "shared": mount.SHARED, + "rshared": mount.RSHARED, + "slave": mount.SLAVE, + "rslave": mount.RSLAVE, + } + + mountPropagationReverseMap = map[int]string{ + mount.PRIVATE: "private", + mount.RPRIVATE: "rprivate", + mount.SHARED: "shared", + mount.RSHARED: "rshared", + mount.SLAVE: "slave", + mount.RSLAVE: "rslave", + } +) + +func setMounts(daemon *Daemon, s *specs.Spec, c *container.Container, mounts []container.Mount) error { + userMounts := make(map[string]struct{}) + for _, m := range mounts { + userMounts[m.Destination] = struct{}{} + } + + // Filter out mounts that are overridden by user supplied mounts + var defaultMounts []specs.Mount + _, mountDev := userMounts["/dev"] + for _, m := range s.Mounts { + if _, ok := userMounts[m.Destination]; !ok { + if mountDev && strings.HasPrefix(m.Destination, "/dev/") { + continue + } + defaultMounts = append(defaultMounts, m) + } + } + + s.Mounts = defaultMounts + for _, m := range mounts { + for _, cm := range s.Mounts { + if cm.Destination == m.Destination { + return fmt.Errorf("Duplicate mount point '%s'", m.Destination) + } + } + + if m.Source == "tmpfs" { + data := m.Data + options := []string{"noexec", "nosuid", "nodev", string(volume.DefaultPropagationMode)} + if data != "" { + options = append(options, strings.Split(data, ",")...) + } + + merged, err := mount.MergeTmpfsOptions(options) + if err != nil { + return err + } + + s.Mounts = append(s.Mounts, specs.Mount{Destination: m.Destination, Source: m.Source, Type: "tmpfs", Options: merged}) + continue + } + + mt := specs.Mount{Destination: m.Destination, Source: m.Source, Type: "bind"} + + // Determine property of RootPropagation based on volume + // properties. If a volume is shared, then keep root propagation + // shared. This should work for slave and private volumes too. + // + // For slave volumes, it can be either [r]shared/[r]slave. + // + // For private volumes any root propagation value should work. + pFlag := mountPropagationMap[m.Propagation] + if pFlag == mount.SHARED || pFlag == mount.RSHARED { + if err := ensureShared(m.Source); err != nil { + return err + } + rootpg := mountPropagationMap[s.Linux.RootfsPropagation] + if rootpg != mount.SHARED && rootpg != mount.RSHARED { + s.Linux.RootfsPropagation = mountPropagationReverseMap[mount.SHARED] + } + } else if pFlag == mount.SLAVE || pFlag == mount.RSLAVE { + if err := ensureSharedOrSlave(m.Source); err != nil { + return err + } + rootpg := mountPropagationMap[s.Linux.RootfsPropagation] + if rootpg != mount.SHARED && rootpg != mount.RSHARED && rootpg != mount.SLAVE && rootpg != mount.RSLAVE { + s.Linux.RootfsPropagation = mountPropagationReverseMap[mount.RSLAVE] + } + } + + opts := []string{"rbind"} + if !m.Writable { + opts = append(opts, "ro") + } + if pFlag != 0 { + opts = append(opts, mountPropagationReverseMap[pFlag]) + } + + mt.Options = opts + s.Mounts = append(s.Mounts, mt) + } + + if s.Root.Readonly { + for i, m := range s.Mounts { + switch m.Destination { + case "/proc", "/dev/pts", "/dev/mqueue": // /dev is remounted by runc + continue + } + if _, ok := userMounts[m.Destination]; !ok { + if !stringutils.InSlice(m.Options, "ro") { + s.Mounts[i].Options = append(s.Mounts[i].Options, "ro") + } + } + } + } + + if c.HostConfig.Privileged { + if !s.Root.Readonly { + // clear readonly for /sys + for i := range s.Mounts { + if s.Mounts[i].Destination == "/sys" { + clearReadOnly(&s.Mounts[i]) + } + } + } + s.Linux.ReadonlyPaths = nil + s.Linux.MaskedPaths = nil + } + + // TODO: until a kernel/mount solution exists for handling remount in a user namespace, + // we must clear the readonly flag for the cgroups mount (@mrunalp concurs) + if uidMap := daemon.idMappings.UIDs(); uidMap != nil || c.HostConfig.Privileged { + for i, m := range s.Mounts { + if m.Type == "cgroup" { + clearReadOnly(&s.Mounts[i]) + } + } + } + + return nil +} + +func (daemon *Daemon) populateCommonSpec(s *specs.Spec, c *container.Container) error { + linkedEnv, err := daemon.setupLinkedContainers(c) + if err != nil { + return err + } + s.Root = specs.Root{ + Path: c.BaseFS, + Readonly: c.HostConfig.ReadonlyRootfs, + } + if err := c.SetupWorkingDirectory(daemon.idMappings.RootPair()); err != nil { + return err + } + cwd := c.Config.WorkingDir + if len(cwd) == 0 { + cwd = "/" + } + s.Process.Args = append([]string{c.Path}, c.Args...) + + // only add the custom init if it is specified and the container is running in its + // own private pid namespace. It does not make sense to add if it is running in the + // host namespace or another container's pid namespace where we already have an init + if c.HostConfig.PidMode.IsPrivate() { + if (c.HostConfig.Init != nil && *c.HostConfig.Init) || + (c.HostConfig.Init == nil && daemon.configStore.Init) { + s.Process.Args = append([]string{"/dev/init", "--", c.Path}, c.Args...) + var path string + if daemon.configStore.InitPath == "" { + path, err = exec.LookPath(daemonconfig.DefaultInitBinary) + if err != nil { + return err + } + } + if daemon.configStore.InitPath != "" { + path = daemon.configStore.InitPath + } + s.Mounts = append(s.Mounts, specs.Mount{ + Destination: "/dev/init", + Type: "bind", + Source: path, + Options: []string{"bind", "ro"}, + }) + } + } + s.Process.Cwd = cwd + s.Process.Env = c.CreateDaemonEnvironment(c.Config.Tty, linkedEnv) + s.Process.Terminal = c.Config.Tty + s.Hostname = c.FullHostname() + + return nil +} + +func (daemon *Daemon) createSpec(c *container.Container) (*specs.Spec, error) { + s := oci.DefaultSpec() + if err := daemon.populateCommonSpec(&s, c); err != nil { + return nil, err + } + + var cgroupsPath string + scopePrefix := "docker" + parent := "/docker" + useSystemd := UsingSystemd(daemon.configStore) + if useSystemd { + parent = "system.slice" + } + + if c.HostConfig.CgroupParent != "" { + parent = c.HostConfig.CgroupParent + } else if daemon.configStore.CgroupParent != "" { + parent = daemon.configStore.CgroupParent + } + + if useSystemd { + cgroupsPath = parent + ":" + scopePrefix + ":" + c.ID + logrus.Debugf("createSpec: cgroupsPath: %s", cgroupsPath) + } else { + cgroupsPath = filepath.Join(parent, c.ID) + } + s.Linux.CgroupsPath = cgroupsPath + + if err := setResources(&s, c.HostConfig.Resources); err != nil { + return nil, fmt.Errorf("linux runtime spec resources: %v", err) + } + s.Linux.Resources.OOMScoreAdj = &c.HostConfig.OomScoreAdj + s.Linux.Sysctl = c.HostConfig.Sysctls + + p := s.Linux.CgroupsPath + if useSystemd { + initPath, err := cgroups.GetInitCgroup("cpu") + if err != nil { + return nil, err + } + p, _ = cgroups.GetOwnCgroup("cpu") + if err != nil { + return nil, err + } + p = filepath.Join(initPath, p) + } + + // Clean path to guard against things like ../../../BAD + parentPath := filepath.Dir(p) + if !filepath.IsAbs(parentPath) { + parentPath = filepath.Clean("/" + parentPath) + } + + if err := daemon.initCgroupsPath(parentPath); err != nil { + return nil, fmt.Errorf("linux init cgroups path: %v", err) + } + if err := setDevices(&s, c); err != nil { + return nil, fmt.Errorf("linux runtime spec devices: %v", err) + } + if err := setRlimits(daemon, &s, c); err != nil { + return nil, fmt.Errorf("linux runtime spec rlimits: %v", err) + } + if err := setUser(&s, c); err != nil { + return nil, fmt.Errorf("linux spec user: %v", err) + } + if err := setNamespaces(daemon, &s, c); err != nil { + return nil, fmt.Errorf("linux spec namespaces: %v", err) + } + if err := setCapabilities(&s, c); err != nil { + return nil, fmt.Errorf("linux spec capabilities: %v", err) + } + if err := setSeccomp(daemon, &s, c); err != nil { + return nil, fmt.Errorf("linux seccomp: %v", err) + } + + if err := daemon.setupIpcDirs(c); err != nil { + return nil, err + } + + if err := daemon.setupSecretDir(c); err != nil { + return nil, err + } + + if err := daemon.setupConfigDir(c); err != nil { + return nil, err + } + + ms, err := daemon.setupMounts(c) + if err != nil { + return nil, err + } + + ms = append(ms, c.IpcMounts()...) + + tmpfsMounts, err := c.TmpfsMounts() + if err != nil { + return nil, err + } + ms = append(ms, tmpfsMounts...) + + if m := c.SecretMounts(); m != nil { + ms = append(ms, m...) + } + + ms = append(ms, c.ConfigMounts()...) + + sort.Sort(mounts(ms)) + if err := setMounts(daemon, &s, c, ms); err != nil { + return nil, fmt.Errorf("linux mounts: %v", err) + } + + for _, ns := range s.Linux.Namespaces { + if ns.Type == "network" && ns.Path == "" && !c.Config.NetworkDisabled { + target, err := os.Readlink(filepath.Join("/proc", strconv.Itoa(os.Getpid()), "exe")) + if err != nil { + return nil, err + } + + s.Hooks = &specs.Hooks{ + Prestart: []specs.Hook{{ + Path: target, // FIXME: cross-platform + Args: []string{"libnetwork-setkey", c.ID, daemon.netController.ID()}, + }}, + } + } + } + + if apparmor.IsEnabled() { + var appArmorProfile string + if c.AppArmorProfile != "" { + appArmorProfile = c.AppArmorProfile + } else if c.HostConfig.Privileged { + appArmorProfile = "unconfined" + } else { + appArmorProfile = "docker-default" + } + + if appArmorProfile == "docker-default" { + // Unattended upgrades and other fun services can unload AppArmor + // profiles inadvertently. Since we cannot store our profile in + // /etc/apparmor.d, nor can we practically add other ways of + // telling the system to keep our profile loaded, in order to make + // sure that we keep the default profile enabled we dynamically + // reload it if necessary. + if err := ensureDefaultAppArmorProfile(); err != nil { + return nil, err + } + } + + s.Process.ApparmorProfile = appArmorProfile + } + s.Process.SelinuxLabel = c.GetProcessLabel() + s.Process.NoNewPrivileges = c.NoNewPrivileges + s.Linux.MountLabel = c.MountLabel + + return (*specs.Spec)(&s), nil +} + +func clearReadOnly(m *specs.Mount) { + var opt []string + for _, o := range m.Options { + if o != "ro" { + opt = append(opt, o) + } + } + m.Options = opt +} + +// mergeUlimits merge the Ulimits from HostConfig with daemon defaults, and update HostConfig +func (daemon *Daemon) mergeUlimits(c *containertypes.HostConfig) { + ulimits := c.Ulimits + // Merge ulimits with daemon defaults + ulIdx := make(map[string]struct{}) + for _, ul := range ulimits { + ulIdx[ul.Name] = struct{}{} + } + for name, ul := range daemon.configStore.Ulimits { + if _, exists := ulIdx[name]; !exists { + ulimits = append(ulimits, ul) + } + } + c.Ulimits = ulimits +} diff --git a/vendor/github.com/moby/moby/daemon/oci_solaris.go b/vendor/github.com/moby/moby/daemon/oci_solaris.go new file mode 100644 index 000000000..610efe10a --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/oci_solaris.go @@ -0,0 +1,187 @@ +package daemon + +import ( + "fmt" + "path/filepath" + "sort" + "strconv" + + containertypes "github.com/docker/docker/api/types/container" + "github.com/docker/docker/container" + "github.com/docker/docker/oci" + "github.com/docker/libnetwork" + "github.com/opencontainers/runtime-spec/specs-go" +) + +func setResources(s *specs.Spec, r containertypes.Resources) error { + mem := getMemoryResources(r) + s.Solaris.CappedMemory = &mem + + capCPU := getCPUResources(r) + s.Solaris.CappedCPU = &capCPU + + return nil +} + +func setUser(s *specs.Spec, c *container.Container) error { + uid, gid, additionalGids, err := getUser(c, c.Config.User) + if err != nil { + return err + } + s.Process.User.UID = uid + s.Process.User.GID = gid + s.Process.User.AdditionalGids = additionalGids + return nil +} + +func getUser(c *container.Container, username string) (uint32, uint32, []uint32, error) { + return 0, 0, nil, nil +} + +func (daemon *Daemon) getRunzAnet(ep libnetwork.Endpoint) (specs.Anet, error) { + var ( + linkName string + lowerLink string + defRouter string + ) + + epInfo := ep.Info() + if epInfo == nil { + return specs.Anet{}, fmt.Errorf("invalid endpoint") + } + + nw, err := daemon.GetNetworkByName(ep.Network()) + if err != nil { + return specs.Anet{}, fmt.Errorf("Failed to get network %s: %v", ep.Network(), err) + } + + // Evaluate default router, linkname and lowerlink for interface endpoint + switch nw.Type() { + case "bridge": + defRouter = epInfo.Gateway().String() + linkName = "net0" // Should always be net0 for a container + + // TODO We construct lowerlink here exactly as done for solaris bridge + // initialization. Need modular code to reuse. + options := nw.Info().DriverOptions() + nwName := options["com.docker.network.bridge.name"] + lastChar := nwName[len(nwName)-1:] + if _, err = strconv.Atoi(lastChar); err != nil { + lowerLink = nwName + "_0" + } else { + lowerLink = nwName + } + + case "overlay": + defRouter = "" + linkName = "net1" + + // TODO Follows generateVxlanName() in solaris overlay. + id := nw.ID() + if len(nw.ID()) > 12 { + id = nw.ID()[:12] + } + lowerLink = "vx_" + id + "_0" + } + + runzanet := specs.Anet{ + Linkname: linkName, + Lowerlink: lowerLink, + Allowedaddr: epInfo.Iface().Address().String(), + Configallowedaddr: "true", + Defrouter: defRouter, + Linkprotection: "mac-nospoof, ip-nospoof", + Macaddress: epInfo.Iface().MacAddress().String(), + } + + return runzanet, nil +} + +func (daemon *Daemon) setNetworkInterface(s *specs.Spec, c *container.Container) error { + var anets []specs.Anet + + sb, err := daemon.netController.SandboxByID(c.NetworkSettings.SandboxID) + if err != nil { + return fmt.Errorf("Could not obtain sandbox for container") + } + + // Populate interfaces required for each endpoint + for _, ep := range sb.Endpoints() { + runzanet, err := daemon.getRunzAnet(ep) + if err != nil { + return fmt.Errorf("Failed to get interface information for endpoint %d: %v", ep.ID(), err) + } + anets = append(anets, runzanet) + } + + s.Solaris.Anet = anets + if anets != nil { + s.Solaris.Milestone = "svc:/milestone/container:default" + } + return nil +} + +func (daemon *Daemon) populateCommonSpec(s *specs.Spec, c *container.Container) error { + linkedEnv, err := daemon.setupLinkedContainers(c) + if err != nil { + return err + } + s.Root = specs.Root{ + Path: filepath.Dir(c.BaseFS), + Readonly: c.HostConfig.ReadonlyRootfs, + } + if err := c.SetupWorkingDirectory(daemon.idMappings.RootPair()); err != nil { + return err + } + cwd := c.Config.WorkingDir + s.Process.Args = append([]string{c.Path}, c.Args...) + s.Process.Cwd = cwd + s.Process.Env = c.CreateDaemonEnvironment(c.Config.Tty, linkedEnv) + s.Process.Terminal = c.Config.Tty + s.Hostname = c.FullHostname() + + return nil +} + +func (daemon *Daemon) createSpec(c *container.Container) (*specs.Spec, error) { + s := oci.DefaultSpec() + if err := daemon.populateCommonSpec(&s, c); err != nil { + return nil, err + } + + if err := setResources(&s, c.HostConfig.Resources); err != nil { + return nil, fmt.Errorf("runtime spec resources: %v", err) + } + + if err := setUser(&s, c); err != nil { + return nil, fmt.Errorf("spec user: %v", err) + } + + if err := daemon.setNetworkInterface(&s, c); err != nil { + return nil, err + } + + if err := daemon.setupIpcDirs(c); err != nil { + return nil, err + } + + ms, err := daemon.setupMounts(c) + if err != nil { + return nil, err + } + ms = append(ms, c.IpcMounts()...) + tmpfsMounts, err := c.TmpfsMounts() + if err != nil { + return nil, err + } + ms = append(ms, tmpfsMounts...) + sort.Sort(mounts(ms)) + + return (*specs.Spec)(&s), nil +} + +// mergeUlimits merge the Ulimits from HostConfig with daemon defaults, and update HostConfig +// It will do nothing on non-Linux platform +func (daemon *Daemon) mergeUlimits(c *containertypes.HostConfig) { + return +} diff --git a/vendor/github.com/moby/moby/daemon/oci_windows.go b/vendor/github.com/moby/moby/daemon/oci_windows.go new file mode 100644 index 000000000..555a466fe --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/oci_windows.go @@ -0,0 +1,207 @@ +package daemon + +import ( + containertypes "github.com/docker/docker/api/types/container" + "github.com/docker/docker/container" + "github.com/docker/docker/oci" + "github.com/docker/docker/pkg/sysinfo" + "github.com/docker/docker/pkg/system" + "github.com/opencontainers/runtime-spec/specs-go" + "golang.org/x/sys/windows" +) + +func (daemon *Daemon) createSpec(c *container.Container) (*specs.Spec, error) { + img, err := daemon.GetImage(string(c.ImageID)) + if err != nil { + return nil, err + } + + s := oci.DefaultOSSpec(img.OS) + + linkedEnv, err := daemon.setupLinkedContainers(c) + if err != nil { + return nil, err + } + + // Note, unlike Unix, we do NOT call into SetupWorkingDirectory as + // this is done in VMCompute. Further, we couldn't do it for Hyper-V + // containers anyway. + + // In base spec + s.Hostname = c.FullHostname() + + if err := daemon.setupSecretDir(c); err != nil { + return nil, err + } + + if err := daemon.setupConfigDir(c); err != nil { + return nil, err + } + + // In s.Mounts + mounts, err := daemon.setupMounts(c) + if err != nil { + return nil, err + } + + var isHyperV bool + if c.HostConfig.Isolation.IsDefault() { + // Container using default isolation, so take the default from the daemon configuration + isHyperV = daemon.defaultIsolation.IsHyperV() + } else { + // Container may be requesting an explicit isolation mode. + isHyperV = c.HostConfig.Isolation.IsHyperV() + } + + // If the container has not been started, and has configs or secrets + // secrets, create symlinks to each config and secret. If it has been + // started before, the symlinks should have already been created. Also, it + // is important to not mount a Hyper-V container that has been started + // before, to protect the host from the container; for example, from + // malicious mutation of NTFS data structures. + if !c.HasBeenStartedBefore && (len(c.SecretReferences) > 0 || len(c.ConfigReferences) > 0) { + // The container file system is mounted before this function is called, + // except for Hyper-V containers, so mount it here in that case. + if isHyperV { + if err := daemon.Mount(c); err != nil { + return nil, err + } + defer daemon.Unmount(c) + } + if err := c.CreateSecretSymlinks(); err != nil { + return nil, err + } + if err := c.CreateConfigSymlinks(); err != nil { + return nil, err + } + } + + if m := c.SecretMounts(); m != nil { + mounts = append(mounts, m...) + } + + if m := c.ConfigMounts(); m != nil { + mounts = append(mounts, m...) + } + + for _, mount := range mounts { + m := specs.Mount{ + Source: mount.Source, + Destination: mount.Destination, + } + if !mount.Writable { + m.Options = append(m.Options, "ro") + } + s.Mounts = append(s.Mounts, m) + } + + // In s.Process + s.Process.Args = append([]string{c.Path}, c.Args...) + if !c.Config.ArgsEscaped && img.OS == "windows" { + s.Process.Args = escapeArgs(s.Process.Args) + } + + s.Process.Cwd = c.Config.WorkingDir + s.Process.Env = c.CreateDaemonEnvironment(c.Config.Tty, linkedEnv) + if c.Config.Tty { + s.Process.Terminal = c.Config.Tty + s.Process.ConsoleSize.Height = c.HostConfig.ConsoleSize[0] + s.Process.ConsoleSize.Width = c.HostConfig.ConsoleSize[1] + } + s.Process.User.Username = c.Config.User + + if img.OS == "windows" { + daemon.createSpecWindowsFields(c, &s, isHyperV) + } else { + // TODO @jhowardmsft LCOW Support. Modify this check when running in dual-mode + if system.LCOWSupported() && img.OS == "linux" { + daemon.createSpecLinuxFields(c, &s) + } + } + + return (*specs.Spec)(&s), nil +} + +// Sets the Windows-specific fields of the OCI spec +func (daemon *Daemon) createSpecWindowsFields(c *container.Container, s *specs.Spec, isHyperV bool) { + if len(s.Process.Cwd) == 0 { + // We default to C:\ to workaround the oddity of the case that the + // default directory for cmd running as LocalSystem (or + // ContainerAdministrator) is c:\windows\system32. Hence docker run + // cmd will by default end in c:\windows\system32, rather + // than 'root' (/) on Linux. The oddity is that if you have a dockerfile + // which has no WORKDIR and has a COPY file ., . will be interpreted + // as c:\. Hence, setting it to default of c:\ makes for consistency. + s.Process.Cwd = `C:\` + } + + s.Root.Readonly = false // Windows does not support a read-only root filesystem + if !isHyperV { + s.Root.Path = c.BaseFS // This is not set for Hyper-V containers + } + + // In s.Windows.Resources + cpuShares := uint16(c.HostConfig.CPUShares) + cpuMaximum := uint16(c.HostConfig.CPUPercent) * 100 + cpuCount := uint64(c.HostConfig.CPUCount) + if c.HostConfig.NanoCPUs > 0 { + if isHyperV { + cpuCount = uint64(c.HostConfig.NanoCPUs / 1e9) + leftoverNanoCPUs := c.HostConfig.NanoCPUs % 1e9 + if leftoverNanoCPUs != 0 { + cpuCount++ + cpuMaximum = uint16(c.HostConfig.NanoCPUs / int64(cpuCount) / (1e9 / 10000)) + if cpuMaximum < 1 { + // The requested NanoCPUs is so small that we rounded to 0, use 1 instead + cpuMaximum = 1 + } + } + } else { + cpuMaximum = uint16(c.HostConfig.NanoCPUs / int64(sysinfo.NumCPU()) / (1e9 / 10000)) + if cpuMaximum < 1 { + // The requested NanoCPUs is so small that we rounded to 0, use 1 instead + cpuMaximum = 1 + } + } + } + memoryLimit := uint64(c.HostConfig.Memory) + s.Windows.Resources = &specs.WindowsResources{ + CPU: &specs.WindowsCPUResources{ + Maximum: &cpuMaximum, + Shares: &cpuShares, + Count: &cpuCount, + }, + Memory: &specs.WindowsMemoryResources{ + Limit: &memoryLimit, + }, + Storage: &specs.WindowsStorageResources{ + Bps: &c.HostConfig.IOMaximumBandwidth, + Iops: &c.HostConfig.IOMaximumIOps, + }, + } +} + +// Sets the Linux-specific fields of the OCI spec +// TODO: @jhowardmsft LCOW Support. We need to do a lot more pulling in what can +// be pulled in from oci_linux.go. +func (daemon *Daemon) createSpecLinuxFields(c *container.Container, s *specs.Spec) { + if len(s.Process.Cwd) == 0 { + s.Process.Cwd = `/` + } + s.Root.Path = "rootfs" + s.Root.Readonly = c.HostConfig.ReadonlyRootfs +} + +func escapeArgs(args []string) []string { + escapedArgs := make([]string, len(args)) + for i, a := range args { + escapedArgs[i] = windows.EscapeArg(a) + } + return escapedArgs +} + +// mergeUlimits merge the Ulimits from HostConfig with daemon defaults, and update HostConfig +// It will do nothing on non-Linux platform +func (daemon *Daemon) mergeUlimits(c *containertypes.HostConfig) { + return +} diff --git a/vendor/github.com/moby/moby/daemon/pause.go b/vendor/github.com/moby/moby/daemon/pause.go new file mode 100644 index 000000000..dbfafbc5f --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/pause.go @@ -0,0 +1,49 @@ +package daemon + +import ( + "fmt" + + "github.com/docker/docker/container" +) + +// ContainerPause pauses a container +func (daemon *Daemon) ContainerPause(name string) error { + container, err := daemon.GetContainer(name) + if err != nil { + return err + } + + if err := daemon.containerPause(container); err != nil { + return err + } + + return nil +} + +// containerPause pauses the container execution without stopping the process. +// The execution can be resumed by calling containerUnpause. +func (daemon *Daemon) containerPause(container *container.Container) error { + container.Lock() + defer container.Unlock() + + // We cannot Pause the container which is not running + if !container.Running { + return errNotRunning{container.ID} + } + + // We cannot Pause the container which is already paused + if container.Paused { + return fmt.Errorf("Container %s is already paused", container.ID) + } + + // We cannot Pause the container which is restarting + if container.Restarting { + return errContainerIsRestarting(container.ID) + } + + if err := daemon.containerd.Pause(container.ID); err != nil { + return fmt.Errorf("Cannot pause container %s: %s", container.ID, err) + } + + return nil +} diff --git a/vendor/github.com/moby/moby/daemon/prune.go b/vendor/github.com/moby/moby/daemon/prune.go new file mode 100644 index 000000000..9f8a545e7 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/prune.go @@ -0,0 +1,474 @@ +package daemon + +import ( + "fmt" + "regexp" + "runtime" + "sync/atomic" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + timetypes "github.com/docker/docker/api/types/time" + "github.com/docker/docker/image" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/directory" + "github.com/docker/docker/pkg/system" + "github.com/docker/docker/runconfig" + "github.com/docker/docker/volume" + "github.com/docker/libnetwork" + digest "github.com/opencontainers/go-digest" + "golang.org/x/net/context" +) + +var ( + // errPruneRunning is returned when a prune request is received while + // one is in progress + errPruneRunning = fmt.Errorf("a prune operation is already running") + + containersAcceptedFilters = map[string]bool{ + "label": true, + "label!": true, + "until": true, + } + volumesAcceptedFilters = map[string]bool{ + "label": true, + "label!": true, + } + imagesAcceptedFilters = map[string]bool{ + "dangling": true, + "label": true, + "label!": true, + "until": true, + } + networksAcceptedFilters = map[string]bool{ + "label": true, + "label!": true, + "until": true, + } +) + +// ContainersPrune removes unused containers +func (daemon *Daemon) ContainersPrune(ctx context.Context, pruneFilters filters.Args) (*types.ContainersPruneReport, error) { + if !atomic.CompareAndSwapInt32(&daemon.pruneRunning, 0, 1) { + return nil, errPruneRunning + } + defer atomic.StoreInt32(&daemon.pruneRunning, 0) + + rep := &types.ContainersPruneReport{} + + // make sure that only accepted filters have been received + err := pruneFilters.Validate(containersAcceptedFilters) + if err != nil { + return nil, err + } + + until, err := getUntilFromPruneFilters(pruneFilters) + if err != nil { + return nil, err + } + + allContainers := daemon.List() + for _, c := range allContainers { + select { + case <-ctx.Done(): + logrus.Debugf("ContainersPrune operation cancelled: %#v", *rep) + return rep, nil + default: + } + + if !c.IsRunning() { + if !until.IsZero() && c.Created.After(until) { + continue + } + if !matchLabels(pruneFilters, c.Config.Labels) { + continue + } + cSize, _ := daemon.getSize(c.ID) + // TODO: sets RmLink to true? + err := daemon.ContainerRm(c.ID, &types.ContainerRmConfig{}) + if err != nil { + logrus.Warnf("failed to prune container %s: %v", c.ID, err) + continue + } + if cSize > 0 { + rep.SpaceReclaimed += uint64(cSize) + } + rep.ContainersDeleted = append(rep.ContainersDeleted, c.ID) + } + } + + return rep, nil +} + +// VolumesPrune removes unused local volumes +func (daemon *Daemon) VolumesPrune(ctx context.Context, pruneFilters filters.Args) (*types.VolumesPruneReport, error) { + if !atomic.CompareAndSwapInt32(&daemon.pruneRunning, 0, 1) { + return nil, errPruneRunning + } + defer atomic.StoreInt32(&daemon.pruneRunning, 0) + + // make sure that only accepted filters have been received + err := pruneFilters.Validate(volumesAcceptedFilters) + if err != nil { + return nil, err + } + + rep := &types.VolumesPruneReport{} + + pruneVols := func(v volume.Volume) error { + select { + case <-ctx.Done(): + logrus.Debugf("VolumesPrune operation cancelled: %#v", *rep) + return ctx.Err() + default: + } + + name := v.Name() + refs := daemon.volumes.Refs(v) + + if len(refs) == 0 { + detailedVolume, ok := v.(volume.DetailedVolume) + if ok { + if !matchLabels(pruneFilters, detailedVolume.Labels()) { + return nil + } + } + vSize, err := directory.Size(v.Path()) + if err != nil { + logrus.Warnf("could not determine size of volume %s: %v", name, err) + } + err = daemon.volumes.Remove(v) + if err != nil { + logrus.Warnf("could not remove volume %s: %v", name, err) + return nil + } + rep.SpaceReclaimed += uint64(vSize) + rep.VolumesDeleted = append(rep.VolumesDeleted, name) + } + + return nil + } + + err = daemon.traverseLocalVolumes(pruneVols) + if err == context.Canceled { + return rep, nil + } + + return rep, err +} + +// ImagesPrune removes unused images +func (daemon *Daemon) ImagesPrune(ctx context.Context, pruneFilters filters.Args) (*types.ImagesPruneReport, error) { + // TODO @jhowardmsft LCOW Support: This will need revisiting later. + platform := runtime.GOOS + if system.LCOWSupported() { + platform = "linux" + } + + if !atomic.CompareAndSwapInt32(&daemon.pruneRunning, 0, 1) { + return nil, errPruneRunning + } + defer atomic.StoreInt32(&daemon.pruneRunning, 0) + + // make sure that only accepted filters have been received + err := pruneFilters.Validate(imagesAcceptedFilters) + if err != nil { + return nil, err + } + + rep := &types.ImagesPruneReport{} + + danglingOnly := true + if pruneFilters.Include("dangling") { + if pruneFilters.ExactMatch("dangling", "false") || pruneFilters.ExactMatch("dangling", "0") { + danglingOnly = false + } else if !pruneFilters.ExactMatch("dangling", "true") && !pruneFilters.ExactMatch("dangling", "1") { + return nil, fmt.Errorf("Invalid filter 'dangling=%s'", pruneFilters.Get("dangling")) + } + } + + until, err := getUntilFromPruneFilters(pruneFilters) + if err != nil { + return nil, err + } + + var allImages map[image.ID]*image.Image + if danglingOnly { + allImages = daemon.stores[platform].imageStore.Heads() + } else { + allImages = daemon.stores[platform].imageStore.Map() + } + allContainers := daemon.List() + imageRefs := map[string]bool{} + for _, c := range allContainers { + select { + case <-ctx.Done(): + return nil, ctx.Err() + default: + imageRefs[c.ID] = true + } + } + + // Filter intermediary images and get their unique size + allLayers := daemon.stores[platform].layerStore.Map() + topImages := map[image.ID]*image.Image{} + for id, img := range allImages { + select { + case <-ctx.Done(): + return nil, ctx.Err() + default: + dgst := digest.Digest(id) + if len(daemon.stores[platform].referenceStore.References(dgst)) == 0 && len(daemon.stores[platform].imageStore.Children(id)) != 0 { + continue + } + if !until.IsZero() && img.Created.After(until) { + continue + } + if img.Config != nil && !matchLabels(pruneFilters, img.Config.Labels) { + continue + } + topImages[id] = img + } + } + + canceled := false +deleteImagesLoop: + for id := range topImages { + select { + case <-ctx.Done(): + // we still want to calculate freed size and return the data + canceled = true + break deleteImagesLoop + default: + } + + dgst := digest.Digest(id) + hex := dgst.Hex() + if _, ok := imageRefs[hex]; ok { + continue + } + + deletedImages := []types.ImageDeleteResponseItem{} + refs := daemon.stores[platform].referenceStore.References(dgst) + if len(refs) > 0 { + shouldDelete := !danglingOnly + if !shouldDelete { + hasTag := false + for _, ref := range refs { + if _, ok := ref.(reference.NamedTagged); ok { + hasTag = true + break + } + } + + // Only delete if it's untagged (i.e. repo:) + shouldDelete = !hasTag + } + + if shouldDelete { + for _, ref := range refs { + imgDel, err := daemon.ImageDelete(ref.String(), false, true) + if err != nil { + logrus.Warnf("could not delete reference %s: %v", ref.String(), err) + continue + } + deletedImages = append(deletedImages, imgDel...) + } + } + } else { + imgDel, err := daemon.ImageDelete(hex, false, true) + if err != nil { + logrus.Warnf("could not delete image %s: %v", hex, err) + continue + } + deletedImages = append(deletedImages, imgDel...) + } + + rep.ImagesDeleted = append(rep.ImagesDeleted, deletedImages...) + } + + // Compute how much space was freed + for _, d := range rep.ImagesDeleted { + if d.Deleted != "" { + chid := layer.ChainID(d.Deleted) + if l, ok := allLayers[chid]; ok { + diffSize, err := l.DiffSize() + if err != nil { + logrus.Warnf("failed to get layer %s size: %v", chid, err) + continue + } + rep.SpaceReclaimed += uint64(diffSize) + } + } + } + + if canceled { + logrus.Debugf("ImagesPrune operation cancelled: %#v", *rep) + } + + return rep, nil +} + +// localNetworksPrune removes unused local networks +func (daemon *Daemon) localNetworksPrune(ctx context.Context, pruneFilters filters.Args) *types.NetworksPruneReport { + rep := &types.NetworksPruneReport{} + + until, _ := getUntilFromPruneFilters(pruneFilters) + + // When the function returns true, the walk will stop. + l := func(nw libnetwork.Network) bool { + select { + case <-ctx.Done(): + // context cancelled + return true + default: + } + if nw.Info().ConfigOnly() { + return false + } + if !until.IsZero() && nw.Info().Created().After(until) { + return false + } + if !matchLabels(pruneFilters, nw.Info().Labels()) { + return false + } + nwName := nw.Name() + if runconfig.IsPreDefinedNetwork(nwName) { + return false + } + if len(nw.Endpoints()) > 0 { + return false + } + if err := daemon.DeleteNetwork(nw.ID()); err != nil { + logrus.Warnf("could not remove local network %s: %v", nwName, err) + return false + } + rep.NetworksDeleted = append(rep.NetworksDeleted, nwName) + return false + } + daemon.netController.WalkNetworks(l) + return rep +} + +// clusterNetworksPrune removes unused cluster networks +func (daemon *Daemon) clusterNetworksPrune(ctx context.Context, pruneFilters filters.Args) (*types.NetworksPruneReport, error) { + rep := &types.NetworksPruneReport{} + + until, _ := getUntilFromPruneFilters(pruneFilters) + + cluster := daemon.GetCluster() + + if !cluster.IsManager() { + return rep, nil + } + + networks, err := cluster.GetNetworks() + if err != nil { + return rep, err + } + networkIsInUse := regexp.MustCompile(`network ([[:alnum:]]+) is in use`) + for _, nw := range networks { + select { + case <-ctx.Done(): + return rep, nil + default: + if nw.Ingress { + // Routing-mesh network removal has to be explicitly invoked by user + continue + } + if !until.IsZero() && nw.Created.After(until) { + continue + } + if !matchLabels(pruneFilters, nw.Labels) { + continue + } + // https://github.com/docker/docker/issues/24186 + // `docker network inspect` unfortunately displays ONLY those containers that are local to that node. + // So we try to remove it anyway and check the error + err = cluster.RemoveNetwork(nw.ID) + if err != nil { + // we can safely ignore the "network .. is in use" error + match := networkIsInUse.FindStringSubmatch(err.Error()) + if len(match) != 2 || match[1] != nw.ID { + logrus.Warnf("could not remove cluster network %s: %v", nw.Name, err) + } + continue + } + rep.NetworksDeleted = append(rep.NetworksDeleted, nw.Name) + } + } + return rep, nil +} + +// NetworksPrune removes unused networks +func (daemon *Daemon) NetworksPrune(ctx context.Context, pruneFilters filters.Args) (*types.NetworksPruneReport, error) { + if !atomic.CompareAndSwapInt32(&daemon.pruneRunning, 0, 1) { + return nil, errPruneRunning + } + defer atomic.StoreInt32(&daemon.pruneRunning, 0) + + // make sure that only accepted filters have been received + err := pruneFilters.Validate(networksAcceptedFilters) + if err != nil { + return nil, err + } + + if _, err := getUntilFromPruneFilters(pruneFilters); err != nil { + return nil, err + } + + rep := &types.NetworksPruneReport{} + if clusterRep, err := daemon.clusterNetworksPrune(ctx, pruneFilters); err == nil { + rep.NetworksDeleted = append(rep.NetworksDeleted, clusterRep.NetworksDeleted...) + } + + localRep := daemon.localNetworksPrune(ctx, pruneFilters) + rep.NetworksDeleted = append(rep.NetworksDeleted, localRep.NetworksDeleted...) + + select { + case <-ctx.Done(): + logrus.Debugf("NetworksPrune operation cancelled: %#v", *rep) + return rep, nil + default: + } + + return rep, nil +} + +func getUntilFromPruneFilters(pruneFilters filters.Args) (time.Time, error) { + until := time.Time{} + if !pruneFilters.Include("until") { + return until, nil + } + untilFilters := pruneFilters.Get("until") + if len(untilFilters) > 1 { + return until, fmt.Errorf("more than one until filter specified") + } + ts, err := timetypes.GetTimestamp(untilFilters[0], time.Now()) + if err != nil { + return until, err + } + seconds, nanoseconds, err := timetypes.ParseTimestamps(ts, 0) + if err != nil { + return until, err + } + until = time.Unix(seconds, nanoseconds) + return until, nil +} + +func matchLabels(pruneFilters filters.Args, labels map[string]string) bool { + if !pruneFilters.MatchKVList("label", labels) { + return false + } + // By default MatchKVList will return true if field (like 'label!') does not exist + // So we have to add additional Include("label!") check + if pruneFilters.Include("label!") { + if pruneFilters.MatchKVList("label!", labels) { + return false + } + } + return true +} diff --git a/vendor/github.com/moby/moby/daemon/reload.go b/vendor/github.com/moby/moby/daemon/reload.go new file mode 100644 index 000000000..0200bcf06 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/reload.go @@ -0,0 +1,312 @@ +package daemon + +import ( + "encoding/json" + "fmt" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/daemon/config" + "github.com/docker/docker/daemon/discovery" + "github.com/docker/docker/libcontainerd" +) + +// Reload reads configuration changes and modifies the +// daemon according to those changes. +// These are the settings that Reload changes: +// - Platform runtime +// - Daemon debug log level +// - Daemon max concurrent downloads +// - Daemon max concurrent uploads +// - Daemon shutdown timeout (in seconds) +// - Cluster discovery (reconfigure and restart) +// - Daemon labels +// - Insecure registries +// - Registry mirrors +// - Daemon live restore +func (daemon *Daemon) Reload(conf *config.Config) (err error) { + daemon.configStore.Lock() + attributes := map[string]string{} + + defer func() { + // we're unlocking here, because + // LogDaemonEventWithAttributes() -> SystemInfo() -> GetAllRuntimes() + // holds that lock too. + daemon.configStore.Unlock() + if err == nil { + daemon.LogDaemonEventWithAttributes("reload", attributes) + } + }() + + daemon.reloadPlatform(conf, attributes) + daemon.reloadDebug(conf, attributes) + daemon.reloadMaxConcurrentDownloadsAndUploads(conf, attributes) + daemon.reloadShutdownTimeout(conf, attributes) + + if err := daemon.reloadClusterDiscovery(conf, attributes); err != nil { + return err + } + if err := daemon.reloadLabels(conf, attributes); err != nil { + return err + } + if err := daemon.reloadAllowNondistributableArtifacts(conf, attributes); err != nil { + return err + } + if err := daemon.reloadInsecureRegistries(conf, attributes); err != nil { + return err + } + if err := daemon.reloadRegistryMirrors(conf, attributes); err != nil { + return err + } + if err := daemon.reloadLiveRestore(conf, attributes); err != nil { + return err + } + return nil +} + +// reloadDebug updates configuration with Debug option +// and updates the passed attributes +func (daemon *Daemon) reloadDebug(conf *config.Config, attributes map[string]string) { + // update corresponding configuration + if conf.IsValueSet("debug") { + daemon.configStore.Debug = conf.Debug + } + // prepare reload event attributes with updatable configurations + attributes["debug"] = fmt.Sprintf("%t", daemon.configStore.Debug) +} + +// reloadMaxConcurrentDownloadsAndUploads updates configuration with max concurrent +// download and upload options and updates the passed attributes +func (daemon *Daemon) reloadMaxConcurrentDownloadsAndUploads(conf *config.Config, attributes map[string]string) { + // If no value is set for max-concurrent-downloads we assume it is the default value + // We always "reset" as the cost is lightweight and easy to maintain. + if conf.IsValueSet("max-concurrent-downloads") && conf.MaxConcurrentDownloads != nil { + *daemon.configStore.MaxConcurrentDownloads = *conf.MaxConcurrentDownloads + } else { + maxConcurrentDownloads := config.DefaultMaxConcurrentDownloads + daemon.configStore.MaxConcurrentDownloads = &maxConcurrentDownloads + } + logrus.Debugf("Reset Max Concurrent Downloads: %d", *daemon.configStore.MaxConcurrentDownloads) + if daemon.downloadManager != nil { + daemon.downloadManager.SetConcurrency(*daemon.configStore.MaxConcurrentDownloads) + } + + // prepare reload event attributes with updatable configurations + attributes["max-concurrent-downloads"] = fmt.Sprintf("%d", *daemon.configStore.MaxConcurrentDownloads) + + // If no value is set for max-concurrent-upload we assume it is the default value + // We always "reset" as the cost is lightweight and easy to maintain. + if conf.IsValueSet("max-concurrent-uploads") && conf.MaxConcurrentUploads != nil { + *daemon.configStore.MaxConcurrentUploads = *conf.MaxConcurrentUploads + } else { + maxConcurrentUploads := config.DefaultMaxConcurrentUploads + daemon.configStore.MaxConcurrentUploads = &maxConcurrentUploads + } + logrus.Debugf("Reset Max Concurrent Uploads: %d", *daemon.configStore.MaxConcurrentUploads) + if daemon.uploadManager != nil { + daemon.uploadManager.SetConcurrency(*daemon.configStore.MaxConcurrentUploads) + } + + // prepare reload event attributes with updatable configurations + attributes["max-concurrent-uploads"] = fmt.Sprintf("%d", *daemon.configStore.MaxConcurrentUploads) +} + +// reloadShutdownTimeout updates configuration with daemon shutdown timeout option +// and updates the passed attributes +func (daemon *Daemon) reloadShutdownTimeout(conf *config.Config, attributes map[string]string) { + // update corresponding configuration + if conf.IsValueSet("shutdown-timeout") { + daemon.configStore.ShutdownTimeout = conf.ShutdownTimeout + logrus.Debugf("Reset Shutdown Timeout: %d", daemon.configStore.ShutdownTimeout) + } + + // prepare reload event attributes with updatable configurations + attributes["shutdown-timeout"] = fmt.Sprintf("%d", daemon.configStore.ShutdownTimeout) +} + +// reloadClusterDiscovery updates configuration with cluster discovery options +// and updates the passed attributes +func (daemon *Daemon) reloadClusterDiscovery(conf *config.Config, attributes map[string]string) (err error) { + defer func() { + // prepare reload event attributes with updatable configurations + attributes["cluster-store"] = conf.ClusterStore + attributes["cluster-advertise"] = conf.ClusterAdvertise + + attributes["cluster-store-opts"] = "{}" + if daemon.configStore.ClusterOpts != nil { + opts, err2 := json.Marshal(conf.ClusterOpts) + if err != nil { + err = err2 + } + attributes["cluster-store-opts"] = string(opts) + } + }() + + newAdvertise := conf.ClusterAdvertise + newClusterStore := daemon.configStore.ClusterStore + if conf.IsValueSet("cluster-advertise") { + if conf.IsValueSet("cluster-store") { + newClusterStore = conf.ClusterStore + } + newAdvertise, err = config.ParseClusterAdvertiseSettings(newClusterStore, conf.ClusterAdvertise) + if err != nil && err != discovery.ErrDiscoveryDisabled { + return err + } + } + + if daemon.clusterProvider != nil { + if err := conf.IsSwarmCompatible(); err != nil { + return err + } + } + + // check discovery modifications + if !config.ModifiedDiscoverySettings(daemon.configStore, newClusterStore, newAdvertise, conf.ClusterOpts) { + return nil + } + + // enable discovery for the first time if it was not previously enabled + if daemon.discoveryWatcher == nil { + discoveryWatcher, err := discovery.Init(newClusterStore, newAdvertise, conf.ClusterOpts) + if err != nil { + return fmt.Errorf("failed to initialize discovery: %v", err) + } + daemon.discoveryWatcher = discoveryWatcher + } else if err == discovery.ErrDiscoveryDisabled { + // disable discovery if it was previously enabled and it's disabled now + daemon.discoveryWatcher.Stop() + } else if err = daemon.discoveryWatcher.Reload(conf.ClusterStore, newAdvertise, conf.ClusterOpts); err != nil { + // reload discovery + return err + } + + daemon.configStore.ClusterStore = newClusterStore + daemon.configStore.ClusterOpts = conf.ClusterOpts + daemon.configStore.ClusterAdvertise = newAdvertise + + if daemon.netController == nil { + return nil + } + netOptions, err := daemon.networkOptions(daemon.configStore, daemon.PluginStore, nil) + if err != nil { + logrus.WithError(err).Warnf("failed to get options with network controller") + return nil + } + err = daemon.netController.ReloadConfiguration(netOptions...) + if err != nil { + logrus.Warnf("Failed to reload configuration with network controller: %v", err) + } + return nil +} + +// reloadLabels updates configuration with engine labels +// and updates the passed attributes +func (daemon *Daemon) reloadLabels(conf *config.Config, attributes map[string]string) error { + // update corresponding configuration + if conf.IsValueSet("labels") { + daemon.configStore.Labels = conf.Labels + } + + // prepare reload event attributes with updatable configurations + if daemon.configStore.Labels != nil { + labels, err := json.Marshal(daemon.configStore.Labels) + if err != nil { + return err + } + attributes["labels"] = string(labels) + } else { + attributes["labels"] = "[]" + } + + return nil +} + +// reloadAllowNondistributableArtifacts updates the configuration with allow-nondistributable-artifacts options +// and updates the passed attributes. +func (daemon *Daemon) reloadAllowNondistributableArtifacts(conf *config.Config, attributes map[string]string) error { + // Update corresponding configuration. + if conf.IsValueSet("allow-nondistributable-artifacts") { + daemon.configStore.AllowNondistributableArtifacts = conf.AllowNondistributableArtifacts + if err := daemon.RegistryService.LoadAllowNondistributableArtifacts(conf.AllowNondistributableArtifacts); err != nil { + return err + } + } + + // Prepare reload event attributes with updatable configurations. + if daemon.configStore.AllowNondistributableArtifacts != nil { + v, err := json.Marshal(daemon.configStore.AllowNondistributableArtifacts) + if err != nil { + return err + } + attributes["allow-nondistributable-artifacts"] = string(v) + } else { + attributes["allow-nondistributable-artifacts"] = "[]" + } + + return nil +} + +// reloadInsecureRegistries updates configuration with insecure registry option +// and updates the passed attributes +func (daemon *Daemon) reloadInsecureRegistries(conf *config.Config, attributes map[string]string) error { + // update corresponding configuration + if conf.IsValueSet("insecure-registries") { + daemon.configStore.InsecureRegistries = conf.InsecureRegistries + if err := daemon.RegistryService.LoadInsecureRegistries(conf.InsecureRegistries); err != nil { + return err + } + } + + // prepare reload event attributes with updatable configurations + if daemon.configStore.InsecureRegistries != nil { + insecureRegistries, err := json.Marshal(daemon.configStore.InsecureRegistries) + if err != nil { + return err + } + attributes["insecure-registries"] = string(insecureRegistries) + } else { + attributes["insecure-registries"] = "[]" + } + + return nil +} + +// reloadRegistryMirrors updates configuration with registry mirror options +// and updates the passed attributes +func (daemon *Daemon) reloadRegistryMirrors(conf *config.Config, attributes map[string]string) error { + // update corresponding configuration + if conf.IsValueSet("registry-mirrors") { + daemon.configStore.Mirrors = conf.Mirrors + if err := daemon.RegistryService.LoadMirrors(conf.Mirrors); err != nil { + return err + } + } + + // prepare reload event attributes with updatable configurations + if daemon.configStore.Mirrors != nil { + mirrors, err := json.Marshal(daemon.configStore.Mirrors) + if err != nil { + return err + } + attributes["registry-mirrors"] = string(mirrors) + } else { + attributes["registry-mirrors"] = "[]" + } + + return nil +} + +// reloadLiveRestore updates configuration with live retore option +// and updates the passed attributes +func (daemon *Daemon) reloadLiveRestore(conf *config.Config, attributes map[string]string) error { + // update corresponding configuration + if conf.IsValueSet("live-restore") { + daemon.configStore.LiveRestoreEnabled = conf.LiveRestoreEnabled + if err := daemon.containerdRemote.UpdateOptions(libcontainerd.WithLiveRestore(conf.LiveRestoreEnabled)); err != nil { + return err + } + } + + // prepare reload event attributes with updatable configurations + attributes["live-restore"] = fmt.Sprintf("%t", daemon.configStore.LiveRestoreEnabled) + return nil +} diff --git a/vendor/github.com/moby/moby/daemon/reload_test.go b/vendor/github.com/moby/moby/daemon/reload_test.go new file mode 100644 index 000000000..bf11b6bd5 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/reload_test.go @@ -0,0 +1,474 @@ +// +build !solaris + +package daemon + +import ( + "reflect" + "sort" + "testing" + "time" + + "github.com/docker/docker/daemon/config" + "github.com/docker/docker/pkg/discovery" + _ "github.com/docker/docker/pkg/discovery/memory" + "github.com/docker/docker/registry" +) + +func TestDaemonReloadLabels(t *testing.T) { + daemon := &Daemon{} + daemon.configStore = &config.Config{ + CommonConfig: config.CommonConfig{ + Labels: []string{"foo:bar"}, + }, + } + + valuesSets := make(map[string]interface{}) + valuesSets["labels"] = "foo:baz" + newConfig := &config.Config{ + CommonConfig: config.CommonConfig{ + Labels: []string{"foo:baz"}, + ValuesSet: valuesSets, + }, + } + + if err := daemon.Reload(newConfig); err != nil { + t.Fatal(err) + } + + label := daemon.configStore.Labels[0] + if label != "foo:baz" { + t.Fatalf("Expected daemon label `foo:baz`, got %s", label) + } +} + +func TestDaemonReloadAllowNondistributableArtifacts(t *testing.T) { + daemon := &Daemon{ + configStore: &config.Config{}, + } + + // Initialize daemon with some registries. + daemon.RegistryService = registry.NewService(registry.ServiceOptions{ + AllowNondistributableArtifacts: []string{ + "127.0.0.0/8", + "10.10.1.11:5000", + "10.10.1.22:5000", // This will be removed during reload. + "docker1.com", + "docker2.com", // This will be removed during reload. + }, + }) + + registries := []string{ + "127.0.0.0/8", + "10.10.1.11:5000", + "10.10.1.33:5000", // This will be added during reload. + "docker1.com", + "docker3.com", // This will be added during reload. + } + + newConfig := &config.Config{ + CommonConfig: config.CommonConfig{ + ServiceOptions: registry.ServiceOptions{ + AllowNondistributableArtifacts: registries, + }, + ValuesSet: map[string]interface{}{ + "allow-nondistributable-artifacts": registries, + }, + }, + } + + if err := daemon.Reload(newConfig); err != nil { + t.Fatal(err) + } + + actual := []string{} + serviceConfig := daemon.RegistryService.ServiceConfig() + for _, value := range serviceConfig.AllowNondistributableArtifactsCIDRs { + actual = append(actual, value.String()) + } + for _, value := range serviceConfig.AllowNondistributableArtifactsHostnames { + actual = append(actual, value) + } + + sort.Strings(registries) + sort.Strings(actual) + if !reflect.DeepEqual(registries, actual) { + t.Fatalf("expected %v, got %v\n", registries, actual) + } +} + +func TestDaemonReloadMirrors(t *testing.T) { + daemon := &Daemon{} + daemon.RegistryService = registry.NewService(registry.ServiceOptions{ + InsecureRegistries: []string{}, + Mirrors: []string{ + "https://mirror.test1.com", + "https://mirror.test2.com", // this will be removed when reloading + "https://mirror.test3.com", // this will be removed when reloading + }, + }) + + daemon.configStore = &config.Config{} + + type pair struct { + valid bool + mirrors []string + after []string + } + + loadMirrors := []pair{ + { + valid: false, + mirrors: []string{"10.10.1.11:5000"}, // this mirror is invalid + after: []string{}, + }, + { + valid: false, + mirrors: []string{"mirror.test1.com"}, // this mirror is invalid + after: []string{}, + }, + { + valid: false, + mirrors: []string{"10.10.1.11:5000", "mirror.test1.com"}, // mirrors are invalid + after: []string{}, + }, + { + valid: true, + mirrors: []string{"https://mirror.test1.com", "https://mirror.test4.com"}, + after: []string{"https://mirror.test1.com/", "https://mirror.test4.com/"}, + }, + } + + for _, value := range loadMirrors { + valuesSets := make(map[string]interface{}) + valuesSets["registry-mirrors"] = value.mirrors + + newConfig := &config.Config{ + CommonConfig: config.CommonConfig{ + ServiceOptions: registry.ServiceOptions{ + Mirrors: value.mirrors, + }, + ValuesSet: valuesSets, + }, + } + + err := daemon.Reload(newConfig) + if !value.valid && err == nil { + // mirrors should be invalid, should be a non-nil error + t.Fatalf("Expected daemon reload error with invalid mirrors: %s, while get nil", value.mirrors) + } + + if value.valid { + if err != nil { + // mirrors should be valid, should be no error + t.Fatal(err) + } + registryService := daemon.RegistryService.ServiceConfig() + + if len(registryService.Mirrors) != len(value.after) { + t.Fatalf("Expected %d daemon mirrors %s while get %d with %s", + len(value.after), + value.after, + len(registryService.Mirrors), + registryService.Mirrors) + } + + dataMap := map[string]struct{}{} + + for _, mirror := range registryService.Mirrors { + if _, exist := dataMap[mirror]; !exist { + dataMap[mirror] = struct{}{} + } + } + + for _, address := range value.after { + if _, exist := dataMap[address]; !exist { + t.Fatalf("Expected %s in daemon mirrors, while get none", address) + } + } + } + } +} + +func TestDaemonReloadInsecureRegistries(t *testing.T) { + daemon := &Daemon{} + // initialize daemon with existing insecure registries: "127.0.0.0/8", "10.10.1.11:5000", "10.10.1.22:5000" + daemon.RegistryService = registry.NewService(registry.ServiceOptions{ + InsecureRegistries: []string{ + "127.0.0.0/8", + "10.10.1.11:5000", + "10.10.1.22:5000", // this will be removed when reloading + "docker1.com", + "docker2.com", // this will be removed when reloading + }, + }) + + daemon.configStore = &config.Config{} + + insecureRegistries := []string{ + "127.0.0.0/8", // this will be kept + "10.10.1.11:5000", // this will be kept + "10.10.1.33:5000", // this will be newly added + "docker1.com", // this will be kept + "docker3.com", // this will be newly added + } + + valuesSets := make(map[string]interface{}) + valuesSets["insecure-registries"] = insecureRegistries + + newConfig := &config.Config{ + CommonConfig: config.CommonConfig{ + ServiceOptions: registry.ServiceOptions{ + InsecureRegistries: insecureRegistries, + }, + ValuesSet: valuesSets, + }, + } + + if err := daemon.Reload(newConfig); err != nil { + t.Fatal(err) + } + + // After Reload, daemon.RegistryService will be changed which is useful + // for registry communication in daemon. + registries := daemon.RegistryService.ServiceConfig() + + // After Reload(), newConfig has come to registries.InsecureRegistryCIDRs and registries.IndexConfigs in daemon. + // Then collect registries.InsecureRegistryCIDRs in dataMap. + // When collecting, we need to convert CIDRS into string as a key, + // while the times of key appears as value. + dataMap := map[string]int{} + for _, value := range registries.InsecureRegistryCIDRs { + if _, ok := dataMap[value.String()]; !ok { + dataMap[value.String()] = 1 + } else { + dataMap[value.String()]++ + } + } + + for _, value := range registries.IndexConfigs { + if _, ok := dataMap[value.Name]; !ok { + dataMap[value.Name] = 1 + } else { + dataMap[value.Name]++ + } + } + + // Finally compare dataMap with the original insecureRegistries. + // Each value in insecureRegistries should appear in daemon's insecure registries, + // and each can only appear exactly ONCE. + for _, r := range insecureRegistries { + if value, ok := dataMap[r]; !ok { + t.Fatalf("Expected daemon insecure registry %s, got none", r) + } else if value != 1 { + t.Fatalf("Expected only 1 daemon insecure registry %s, got %d", r, value) + } + } + + // assert if "10.10.1.22:5000" is removed when reloading + if value, ok := dataMap["10.10.1.22:5000"]; ok { + t.Fatalf("Expected no insecure registry of 10.10.1.22:5000, got %d", value) + } + + // assert if "docker2.com" is removed when reloading + if value, ok := dataMap["docker2.com"]; ok { + t.Fatalf("Expected no insecure registry of docker2.com, got %d", value) + } +} + +func TestDaemonReloadNotAffectOthers(t *testing.T) { + daemon := &Daemon{} + daemon.configStore = &config.Config{ + CommonConfig: config.CommonConfig{ + Labels: []string{"foo:bar"}, + Debug: true, + }, + } + + valuesSets := make(map[string]interface{}) + valuesSets["labels"] = "foo:baz" + newConfig := &config.Config{ + CommonConfig: config.CommonConfig{ + Labels: []string{"foo:baz"}, + ValuesSet: valuesSets, + }, + } + + if err := daemon.Reload(newConfig); err != nil { + t.Fatal(err) + } + + label := daemon.configStore.Labels[0] + if label != "foo:baz" { + t.Fatalf("Expected daemon label `foo:baz`, got %s", label) + } + debug := daemon.configStore.Debug + if !debug { + t.Fatal("Expected debug 'enabled', got 'disabled'") + } +} + +func TestDaemonDiscoveryReload(t *testing.T) { + daemon := &Daemon{} + daemon.configStore = &config.Config{ + CommonConfig: config.CommonConfig{ + ClusterStore: "memory://127.0.0.1", + ClusterAdvertise: "127.0.0.1:3333", + }, + } + + if err := daemon.initDiscovery(daemon.configStore); err != nil { + t.Fatal(err) + } + + expected := discovery.Entries{ + &discovery.Entry{Host: "127.0.0.1", Port: "3333"}, + } + + select { + case <-time.After(10 * time.Second): + t.Fatal("timeout waiting for discovery") + case <-daemon.discoveryWatcher.ReadyCh(): + } + + stopCh := make(chan struct{}) + defer close(stopCh) + ch, errCh := daemon.discoveryWatcher.Watch(stopCh) + + select { + case <-time.After(1 * time.Second): + t.Fatal("failed to get discovery advertisements in time") + case e := <-ch: + if !reflect.DeepEqual(e, expected) { + t.Fatalf("expected %v, got %v\n", expected, e) + } + case e := <-errCh: + t.Fatal(e) + } + + valuesSets := make(map[string]interface{}) + valuesSets["cluster-store"] = "memory://127.0.0.1:2222" + valuesSets["cluster-advertise"] = "127.0.0.1:5555" + newConfig := &config.Config{ + CommonConfig: config.CommonConfig{ + ClusterStore: "memory://127.0.0.1:2222", + ClusterAdvertise: "127.0.0.1:5555", + ValuesSet: valuesSets, + }, + } + + expected = discovery.Entries{ + &discovery.Entry{Host: "127.0.0.1", Port: "5555"}, + } + + if err := daemon.Reload(newConfig); err != nil { + t.Fatal(err) + } + + select { + case <-time.After(10 * time.Second): + t.Fatal("timeout waiting for discovery") + case <-daemon.discoveryWatcher.ReadyCh(): + } + + ch, errCh = daemon.discoveryWatcher.Watch(stopCh) + + select { + case <-time.After(1 * time.Second): + t.Fatal("failed to get discovery advertisements in time") + case e := <-ch: + if !reflect.DeepEqual(e, expected) { + t.Fatalf("expected %v, got %v\n", expected, e) + } + case e := <-errCh: + t.Fatal(e) + } +} + +func TestDaemonDiscoveryReloadFromEmptyDiscovery(t *testing.T) { + daemon := &Daemon{} + daemon.configStore = &config.Config{} + + valuesSet := make(map[string]interface{}) + valuesSet["cluster-store"] = "memory://127.0.0.1:2222" + valuesSet["cluster-advertise"] = "127.0.0.1:5555" + newConfig := &config.Config{ + CommonConfig: config.CommonConfig{ + ClusterStore: "memory://127.0.0.1:2222", + ClusterAdvertise: "127.0.0.1:5555", + ValuesSet: valuesSet, + }, + } + + expected := discovery.Entries{ + &discovery.Entry{Host: "127.0.0.1", Port: "5555"}, + } + + if err := daemon.Reload(newConfig); err != nil { + t.Fatal(err) + } + + select { + case <-time.After(10 * time.Second): + t.Fatal("timeout waiting for discovery") + case <-daemon.discoveryWatcher.ReadyCh(): + } + + stopCh := make(chan struct{}) + defer close(stopCh) + ch, errCh := daemon.discoveryWatcher.Watch(stopCh) + + select { + case <-time.After(1 * time.Second): + t.Fatal("failed to get discovery advertisements in time") + case e := <-ch: + if !reflect.DeepEqual(e, expected) { + t.Fatalf("expected %v, got %v\n", expected, e) + } + case e := <-errCh: + t.Fatal(e) + } +} + +func TestDaemonDiscoveryReloadOnlyClusterAdvertise(t *testing.T) { + daemon := &Daemon{} + daemon.configStore = &config.Config{ + CommonConfig: config.CommonConfig{ + ClusterStore: "memory://127.0.0.1", + }, + } + valuesSets := make(map[string]interface{}) + valuesSets["cluster-advertise"] = "127.0.0.1:5555" + newConfig := &config.Config{ + CommonConfig: config.CommonConfig{ + ClusterAdvertise: "127.0.0.1:5555", + ValuesSet: valuesSets, + }, + } + expected := discovery.Entries{ + &discovery.Entry{Host: "127.0.0.1", Port: "5555"}, + } + + if err := daemon.Reload(newConfig); err != nil { + t.Fatal(err) + } + + select { + case <-daemon.discoveryWatcher.ReadyCh(): + case <-time.After(10 * time.Second): + t.Fatal("Timeout waiting for discovery") + } + stopCh := make(chan struct{}) + defer close(stopCh) + ch, errCh := daemon.discoveryWatcher.Watch(stopCh) + + select { + case <-time.After(1 * time.Second): + t.Fatal("failed to get discovery advertisements in time") + case e := <-ch: + if !reflect.DeepEqual(e, expected) { + t.Fatalf("expected %v, got %v\n", expected, e) + } + case e := <-errCh: + t.Fatal(e) + } +} diff --git a/vendor/github.com/moby/moby/daemon/rename.go b/vendor/github.com/moby/moby/daemon/rename.go new file mode 100644 index 000000000..686fbd3b9 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/rename.go @@ -0,0 +1,123 @@ +package daemon + +import ( + "errors" + "fmt" + "strings" + + "github.com/Sirupsen/logrus" + dockercontainer "github.com/docker/docker/container" + "github.com/docker/libnetwork" +) + +// ContainerRename changes the name of a container, using the oldName +// to find the container. An error is returned if newName is already +// reserved. +func (daemon *Daemon) ContainerRename(oldName, newName string) error { + var ( + sid string + sb libnetwork.Sandbox + ) + + if oldName == "" || newName == "" { + return errors.New("Neither old nor new names may be empty") + } + + if newName[0] != '/' { + newName = "/" + newName + } + + container, err := daemon.GetContainer(oldName) + if err != nil { + return err + } + + container.Lock() + defer container.Unlock() + + oldName = container.Name + oldIsAnonymousEndpoint := container.NetworkSettings.IsAnonymousEndpoint + + if oldName == newName { + return errors.New("Renaming a container with the same name as its current name") + } + + links := map[string]*dockercontainer.Container{} + for k, v := range daemon.linkIndex.children(container) { + if !strings.HasPrefix(k, oldName) { + return fmt.Errorf("Linked container %s does not match parent %s", k, oldName) + } + links[strings.TrimPrefix(k, oldName)] = v + } + + if newName, err = daemon.reserveName(container.ID, newName); err != nil { + return fmt.Errorf("Error when allocating new name: %v", err) + } + + for k, v := range links { + daemon.containersReplica.ReserveName(newName+k, v.ID) + daemon.linkIndex.link(container, v, newName+k) + } + + container.Name = newName + container.NetworkSettings.IsAnonymousEndpoint = false + + defer func() { + if err != nil { + container.Name = oldName + container.NetworkSettings.IsAnonymousEndpoint = oldIsAnonymousEndpoint + daemon.reserveName(container.ID, oldName) + for k, v := range links { + daemon.containersReplica.ReserveName(oldName+k, v.ID) + daemon.linkIndex.link(container, v, oldName+k) + daemon.linkIndex.unlink(newName+k, v, container) + daemon.containersReplica.ReleaseName(newName + k) + } + daemon.releaseName(newName) + } + }() + + for k, v := range links { + daemon.linkIndex.unlink(oldName+k, v, container) + daemon.containersReplica.ReleaseName(oldName + k) + } + daemon.releaseName(oldName) + if err = container.CheckpointTo(daemon.containersReplica); err != nil { + return err + } + + attributes := map[string]string{ + "oldName": oldName, + } + + if !container.Running { + daemon.LogContainerEventWithAttributes(container, "rename", attributes) + return nil + } + + defer func() { + if err != nil { + container.Name = oldName + container.NetworkSettings.IsAnonymousEndpoint = oldIsAnonymousEndpoint + if e := container.CheckpointTo(daemon.containersReplica); e != nil { + logrus.Errorf("%s: Failed in writing to Disk on rename failure: %v", container.ID, e) + } + } + }() + + sid = container.NetworkSettings.SandboxID + if sid != "" && daemon.netController != nil { + sb, err = daemon.netController.SandboxByID(sid) + if err != nil { + return err + } + + err = sb.Rename(strings.TrimPrefix(container.Name, "/")) + if err != nil { + return err + } + } + + daemon.LogContainerEventWithAttributes(container, "rename", attributes) + return nil +} diff --git a/vendor/github.com/moby/moby/daemon/resize.go b/vendor/github.com/moby/moby/daemon/resize.go new file mode 100644 index 000000000..747353852 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/resize.go @@ -0,0 +1,40 @@ +package daemon + +import ( + "fmt" + + "github.com/docker/docker/libcontainerd" +) + +// ContainerResize changes the size of the TTY of the process running +// in the container with the given name to the given height and width. +func (daemon *Daemon) ContainerResize(name string, height, width int) error { + container, err := daemon.GetContainer(name) + if err != nil { + return err + } + + if !container.IsRunning() { + return errNotRunning{container.ID} + } + + if err = daemon.containerd.Resize(container.ID, libcontainerd.InitFriendlyName, width, height); err == nil { + attributes := map[string]string{ + "height": fmt.Sprintf("%d", height), + "width": fmt.Sprintf("%d", width), + } + daemon.LogContainerEventWithAttributes(container, "resize", attributes) + } + return err +} + +// ContainerExecResize changes the size of the TTY of the process +// running in the exec with the given name to the given height and +// width. +func (daemon *Daemon) ContainerExecResize(name string, height, width int) error { + ec, err := daemon.getExecConfig(name) + if err != nil { + return err + } + return daemon.containerd.Resize(ec.ContainerID, ec.ID, width, height) +} diff --git a/vendor/github.com/moby/moby/daemon/restart.go b/vendor/github.com/moby/moby/daemon/restart.go new file mode 100644 index 000000000..9f2ef569a --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/restart.go @@ -0,0 +1,70 @@ +package daemon + +import ( + "fmt" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/container" +) + +// ContainerRestart stops and starts a container. It attempts to +// gracefully stop the container within the given timeout, forcefully +// stopping it if the timeout is exceeded. If given a negative +// timeout, ContainerRestart will wait forever until a graceful +// stop. Returns an error if the container cannot be found, or if +// there is an underlying error at any stage of the restart. +func (daemon *Daemon) ContainerRestart(name string, seconds *int) error { + container, err := daemon.GetContainer(name) + if err != nil { + return err + } + if seconds == nil { + stopTimeout := container.StopTimeout() + seconds = &stopTimeout + } + if err := daemon.containerRestart(container, *seconds); err != nil { + return fmt.Errorf("Cannot restart container %s: %v", name, err) + } + return nil + +} + +// containerRestart attempts to gracefully stop and then start the +// container. When stopping, wait for the given duration in seconds to +// gracefully stop, before forcefully terminating the container. If +// given a negative duration, wait forever for a graceful stop. +func (daemon *Daemon) containerRestart(container *container.Container, seconds int) error { + // Avoid unnecessarily unmounting and then directly mounting + // the container when the container stops and then starts + // again + if err := daemon.Mount(container); err == nil { + defer daemon.Unmount(container) + } + + if container.IsRunning() { + // set AutoRemove flag to false before stop so the container won't be + // removed during restart process + autoRemove := container.HostConfig.AutoRemove + + container.HostConfig.AutoRemove = false + err := daemon.containerStop(container, seconds) + // restore AutoRemove irrespective of whether the stop worked or not + container.HostConfig.AutoRemove = autoRemove + // containerStop will write HostConfig to disk, we shall restore AutoRemove + // in disk too + if toDiskErr := daemon.checkpointAndSave(container); toDiskErr != nil { + logrus.Errorf("Write container to disk error: %v", toDiskErr) + } + + if err != nil { + return err + } + } + + if err := daemon.containerStart(container, "", "", true); err != nil { + return err + } + + daemon.LogContainerEvent(container, "restart") + return nil +} diff --git a/vendor/github.com/moby/moby/daemon/search.go b/vendor/github.com/moby/moby/daemon/search.go new file mode 100644 index 000000000..5d2ac5d22 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/search.go @@ -0,0 +1,94 @@ +package daemon + +import ( + "fmt" + "strconv" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + registrytypes "github.com/docker/docker/api/types/registry" + "github.com/docker/docker/dockerversion" +) + +var acceptedSearchFilterTags = map[string]bool{ + "is-automated": true, + "is-official": true, + "stars": true, +} + +// SearchRegistryForImages queries the registry for images matching +// term. authConfig is used to login. +func (daemon *Daemon) SearchRegistryForImages(ctx context.Context, filtersArgs string, term string, limit int, + authConfig *types.AuthConfig, + headers map[string][]string) (*registrytypes.SearchResults, error) { + + searchFilters, err := filters.FromParam(filtersArgs) + if err != nil { + return nil, err + } + if err := searchFilters.Validate(acceptedSearchFilterTags); err != nil { + return nil, err + } + + var isAutomated, isOfficial bool + var hasStarFilter = 0 + if searchFilters.Include("is-automated") { + if searchFilters.UniqueExactMatch("is-automated", "true") { + isAutomated = true + } else if !searchFilters.UniqueExactMatch("is-automated", "false") { + return nil, fmt.Errorf("Invalid filter 'is-automated=%s'", searchFilters.Get("is-automated")) + } + } + if searchFilters.Include("is-official") { + if searchFilters.UniqueExactMatch("is-official", "true") { + isOfficial = true + } else if !searchFilters.UniqueExactMatch("is-official", "false") { + return nil, fmt.Errorf("Invalid filter 'is-official=%s'", searchFilters.Get("is-official")) + } + } + if searchFilters.Include("stars") { + hasStars := searchFilters.Get("stars") + for _, hasStar := range hasStars { + iHasStar, err := strconv.Atoi(hasStar) + if err != nil { + return nil, fmt.Errorf("Invalid filter 'stars=%s'", hasStar) + } + if iHasStar > hasStarFilter { + hasStarFilter = iHasStar + } + } + } + + unfilteredResult, err := daemon.RegistryService.Search(ctx, term, limit, authConfig, dockerversion.DockerUserAgent(ctx), headers) + if err != nil { + return nil, err + } + + filteredResults := []registrytypes.SearchResult{} + for _, result := range unfilteredResult.Results { + if searchFilters.Include("is-automated") { + if isAutomated != result.IsAutomated { + continue + } + } + if searchFilters.Include("is-official") { + if isOfficial != result.IsOfficial { + continue + } + } + if searchFilters.Include("stars") { + if result.StarCount < hasStarFilter { + continue + } + } + filteredResults = append(filteredResults, result) + } + + return ®istrytypes.SearchResults{ + Query: unfilteredResult.Query, + NumResults: len(filteredResults), + Results: filteredResults, + }, nil +} diff --git a/vendor/github.com/moby/moby/daemon/search_test.go b/vendor/github.com/moby/moby/daemon/search_test.go new file mode 100644 index 000000000..85237192e --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/search_test.go @@ -0,0 +1,358 @@ +package daemon + +import ( + "errors" + "strings" + "testing" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + registrytypes "github.com/docker/docker/api/types/registry" + "github.com/docker/docker/registry" +) + +type FakeService struct { + registry.DefaultService + + shouldReturnError bool + + term string + results []registrytypes.SearchResult +} + +func (s *FakeService) Search(ctx context.Context, term string, limit int, authConfig *types.AuthConfig, userAgent string, headers map[string][]string) (*registrytypes.SearchResults, error) { + if s.shouldReturnError { + return nil, errors.New("Search unknown error") + } + return ®istrytypes.SearchResults{ + Query: s.term, + NumResults: len(s.results), + Results: s.results, + }, nil +} + +func TestSearchRegistryForImagesErrors(t *testing.T) { + errorCases := []struct { + filtersArgs string + shouldReturnError bool + expectedError string + }{ + { + expectedError: "Search unknown error", + shouldReturnError: true, + }, + { + filtersArgs: "invalid json", + expectedError: "invalid character 'i' looking for beginning of value", + }, + { + filtersArgs: `{"type":{"custom":true}}`, + expectedError: "Invalid filter 'type'", + }, + { + filtersArgs: `{"is-automated":{"invalid":true}}`, + expectedError: "Invalid filter 'is-automated=[invalid]'", + }, + { + filtersArgs: `{"is-automated":{"true":true,"false":true}}`, + expectedError: "Invalid filter 'is-automated", + }, + { + filtersArgs: `{"is-official":{"invalid":true}}`, + expectedError: "Invalid filter 'is-official=[invalid]'", + }, + { + filtersArgs: `{"is-official":{"true":true,"false":true}}`, + expectedError: "Invalid filter 'is-official", + }, + { + filtersArgs: `{"stars":{"invalid":true}}`, + expectedError: "Invalid filter 'stars=invalid'", + }, + { + filtersArgs: `{"stars":{"1":true,"invalid":true}}`, + expectedError: "Invalid filter 'stars=invalid'", + }, + } + for index, e := range errorCases { + daemon := &Daemon{ + RegistryService: &FakeService{ + shouldReturnError: e.shouldReturnError, + }, + } + _, err := daemon.SearchRegistryForImages(context.Background(), e.filtersArgs, "term", 25, nil, map[string][]string{}) + if err == nil { + t.Errorf("%d: expected an error, got nothing", index) + } + if !strings.Contains(err.Error(), e.expectedError) { + t.Errorf("%d: expected error to contain %s, got %s", index, e.expectedError, err.Error()) + } + } +} + +func TestSearchRegistryForImages(t *testing.T) { + term := "term" + successCases := []struct { + filtersArgs string + registryResults []registrytypes.SearchResult + expectedResults []registrytypes.SearchResult + }{ + { + filtersArgs: "", + registryResults: []registrytypes.SearchResult{}, + expectedResults: []registrytypes.SearchResult{}, + }, + { + filtersArgs: "", + registryResults: []registrytypes.SearchResult{ + { + Name: "name", + Description: "description", + }, + }, + expectedResults: []registrytypes.SearchResult{ + { + Name: "name", + Description: "description", + }, + }, + }, + { + filtersArgs: `{"is-automated":{"true":true}}`, + registryResults: []registrytypes.SearchResult{ + { + Name: "name", + Description: "description", + }, + }, + expectedResults: []registrytypes.SearchResult{}, + }, + { + filtersArgs: `{"is-automated":{"true":true}}`, + registryResults: []registrytypes.SearchResult{ + { + Name: "name", + Description: "description", + IsAutomated: true, + }, + }, + expectedResults: []registrytypes.SearchResult{ + { + Name: "name", + Description: "description", + IsAutomated: true, + }, + }, + }, + { + filtersArgs: `{"is-automated":{"false":true}}`, + registryResults: []registrytypes.SearchResult{ + { + Name: "name", + Description: "description", + IsAutomated: true, + }, + }, + expectedResults: []registrytypes.SearchResult{}, + }, + { + filtersArgs: `{"is-automated":{"false":true}}`, + registryResults: []registrytypes.SearchResult{ + { + Name: "name", + Description: "description", + IsAutomated: false, + }, + }, + expectedResults: []registrytypes.SearchResult{ + { + Name: "name", + Description: "description", + IsAutomated: false, + }, + }, + }, + { + filtersArgs: `{"is-official":{"true":true}}`, + registryResults: []registrytypes.SearchResult{ + { + Name: "name", + Description: "description", + }, + }, + expectedResults: []registrytypes.SearchResult{}, + }, + { + filtersArgs: `{"is-official":{"true":true}}`, + registryResults: []registrytypes.SearchResult{ + { + Name: "name", + Description: "description", + IsOfficial: true, + }, + }, + expectedResults: []registrytypes.SearchResult{ + { + Name: "name", + Description: "description", + IsOfficial: true, + }, + }, + }, + { + filtersArgs: `{"is-official":{"false":true}}`, + registryResults: []registrytypes.SearchResult{ + { + Name: "name", + Description: "description", + IsOfficial: true, + }, + }, + expectedResults: []registrytypes.SearchResult{}, + }, + { + filtersArgs: `{"is-official":{"false":true}}`, + registryResults: []registrytypes.SearchResult{ + { + Name: "name", + Description: "description", + IsOfficial: false, + }, + }, + expectedResults: []registrytypes.SearchResult{ + { + Name: "name", + Description: "description", + IsOfficial: false, + }, + }, + }, + { + filtersArgs: `{"stars":{"0":true}}`, + registryResults: []registrytypes.SearchResult{ + { + Name: "name", + Description: "description", + StarCount: 0, + }, + }, + expectedResults: []registrytypes.SearchResult{ + { + Name: "name", + Description: "description", + StarCount: 0, + }, + }, + }, + { + filtersArgs: `{"stars":{"1":true}}`, + registryResults: []registrytypes.SearchResult{ + { + Name: "name", + Description: "description", + StarCount: 0, + }, + }, + expectedResults: []registrytypes.SearchResult{}, + }, + { + filtersArgs: `{"stars":{"1":true}}`, + registryResults: []registrytypes.SearchResult{ + { + Name: "name0", + Description: "description0", + StarCount: 0, + }, + { + Name: "name1", + Description: "description1", + StarCount: 1, + }, + }, + expectedResults: []registrytypes.SearchResult{ + { + Name: "name1", + Description: "description1", + StarCount: 1, + }, + }, + }, + { + filtersArgs: `{"stars":{"1":true}, "is-official":{"true":true}, "is-automated":{"true":true}}`, + registryResults: []registrytypes.SearchResult{ + { + Name: "name0", + Description: "description0", + StarCount: 0, + IsOfficial: true, + IsAutomated: true, + }, + { + Name: "name1", + Description: "description1", + StarCount: 1, + IsOfficial: false, + IsAutomated: true, + }, + { + Name: "name2", + Description: "description2", + StarCount: 1, + IsOfficial: true, + IsAutomated: false, + }, + { + Name: "name3", + Description: "description3", + StarCount: 2, + IsOfficial: true, + IsAutomated: true, + }, + }, + expectedResults: []registrytypes.SearchResult{ + { + Name: "name3", + Description: "description3", + StarCount: 2, + IsOfficial: true, + IsAutomated: true, + }, + }, + }, + } + for index, s := range successCases { + daemon := &Daemon{ + RegistryService: &FakeService{ + term: term, + results: s.registryResults, + }, + } + results, err := daemon.SearchRegistryForImages(context.Background(), s.filtersArgs, term, 25, nil, map[string][]string{}) + if err != nil { + t.Errorf("%d: %v", index, err) + } + if results.Query != term { + t.Errorf("%d: expected Query to be %s, got %s", index, term, results.Query) + } + if results.NumResults != len(s.expectedResults) { + t.Errorf("%d: expected NumResults to be %d, got %d", index, len(s.expectedResults), results.NumResults) + } + for _, result := range results.Results { + found := false + for _, expectedResult := range s.expectedResults { + if expectedResult.Name == result.Name && + expectedResult.Description == result.Description && + expectedResult.IsAutomated == result.IsAutomated && + expectedResult.IsOfficial == result.IsOfficial && + expectedResult.StarCount == result.StarCount { + found = true + break + } + } + if !found { + t.Errorf("%d: expected results %v, got %v", index, s.expectedResults, results.Results) + } + } + } +} diff --git a/vendor/github.com/moby/moby/daemon/seccomp_disabled.go b/vendor/github.com/moby/moby/daemon/seccomp_disabled.go new file mode 100644 index 000000000..ff1127b6c --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/seccomp_disabled.go @@ -0,0 +1,19 @@ +// +build linux,!seccomp + +package daemon + +import ( + "fmt" + + "github.com/docker/docker/container" + "github.com/opencontainers/runtime-spec/specs-go" +) + +var supportsSeccomp = false + +func setSeccomp(daemon *Daemon, rs *specs.Spec, c *container.Container) error { + if c.SeccompProfile != "" && c.SeccompProfile != "unconfined" { + return fmt.Errorf("seccomp profiles are not supported on this daemon, you cannot specify a custom seccomp profile") + } + return nil +} diff --git a/vendor/github.com/moby/moby/daemon/seccomp_linux.go b/vendor/github.com/moby/moby/daemon/seccomp_linux.go new file mode 100644 index 000000000..472e3133c --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/seccomp_linux.go @@ -0,0 +1,55 @@ +// +build linux,seccomp + +package daemon + +import ( + "fmt" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/container" + "github.com/docker/docker/profiles/seccomp" + "github.com/opencontainers/runtime-spec/specs-go" +) + +var supportsSeccomp = true + +func setSeccomp(daemon *Daemon, rs *specs.Spec, c *container.Container) error { + var profile *specs.LinuxSeccomp + var err error + + if c.HostConfig.Privileged { + return nil + } + + if !daemon.seccompEnabled { + if c.SeccompProfile != "" && c.SeccompProfile != "unconfined" { + return fmt.Errorf("Seccomp is not enabled in your kernel, cannot run a custom seccomp profile.") + } + logrus.Warn("Seccomp is not enabled in your kernel, running container without default profile.") + c.SeccompProfile = "unconfined" + } + if c.SeccompProfile == "unconfined" { + return nil + } + if c.SeccompProfile != "" { + profile, err = seccomp.LoadProfile(c.SeccompProfile, rs) + if err != nil { + return err + } + } else { + if daemon.seccompProfile != nil { + profile, err = seccomp.LoadProfile(string(daemon.seccompProfile), rs) + if err != nil { + return err + } + } else { + profile, err = seccomp.GetDefaultProfile(rs) + if err != nil { + return err + } + } + } + + rs.Linux.Seccomp = profile + return nil +} diff --git a/vendor/github.com/moby/moby/daemon/seccomp_unsupported.go b/vendor/github.com/moby/moby/daemon/seccomp_unsupported.go new file mode 100644 index 000000000..b3691e96a --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/seccomp_unsupported.go @@ -0,0 +1,5 @@ +// +build !linux + +package daemon + +var supportsSeccomp = false diff --git a/vendor/github.com/moby/moby/daemon/secrets.go b/vendor/github.com/moby/moby/daemon/secrets.go new file mode 100644 index 000000000..90fa99e98 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/secrets.go @@ -0,0 +1,23 @@ +package daemon + +import ( + "github.com/Sirupsen/logrus" + swarmtypes "github.com/docker/docker/api/types/swarm" +) + +// SetContainerSecretReferences sets the container secret references needed +func (daemon *Daemon) SetContainerSecretReferences(name string, refs []*swarmtypes.SecretReference) error { + if !secretsSupported() && len(refs) > 0 { + logrus.Warn("secrets are not supported on this platform") + return nil + } + + c, err := daemon.GetContainer(name) + if err != nil { + return err + } + + c.SecretReferences = refs + + return nil +} diff --git a/vendor/github.com/moby/moby/daemon/secrets_linux.go b/vendor/github.com/moby/moby/daemon/secrets_linux.go new file mode 100644 index 000000000..fca4e1259 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/secrets_linux.go @@ -0,0 +1,7 @@ +// +build linux + +package daemon + +func secretsSupported() bool { + return true +} diff --git a/vendor/github.com/moby/moby/daemon/secrets_unsupported.go b/vendor/github.com/moby/moby/daemon/secrets_unsupported.go new file mode 100644 index 000000000..d55e8624d --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/secrets_unsupported.go @@ -0,0 +1,7 @@ +// +build !linux,!windows + +package daemon + +func secretsSupported() bool { + return false +} diff --git a/vendor/github.com/moby/moby/daemon/secrets_windows.go b/vendor/github.com/moby/moby/daemon/secrets_windows.go new file mode 100644 index 000000000..9054354c8 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/secrets_windows.go @@ -0,0 +1,7 @@ +// +build windows + +package daemon + +func secretsSupported() bool { + return true +} diff --git a/vendor/github.com/moby/moby/daemon/selinux_linux.go b/vendor/github.com/moby/moby/daemon/selinux_linux.go new file mode 100644 index 000000000..fb2578bf4 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/selinux_linux.go @@ -0,0 +1,17 @@ +// +build linux + +package daemon + +import "github.com/opencontainers/selinux/go-selinux" + +func selinuxSetDisabled() { + selinux.SetDisabled() +} + +func selinuxFreeLxcContexts(label string) { + selinux.ReleaseLabel(label) +} + +func selinuxEnabled() bool { + return selinux.GetEnabled() +} diff --git a/vendor/github.com/moby/moby/daemon/selinux_unsupported.go b/vendor/github.com/moby/moby/daemon/selinux_unsupported.go new file mode 100644 index 000000000..25a56ad15 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/selinux_unsupported.go @@ -0,0 +1,13 @@ +// +build !linux + +package daemon + +func selinuxSetDisabled() { +} + +func selinuxFreeLxcContexts(label string) { +} + +func selinuxEnabled() bool { + return false +} diff --git a/vendor/github.com/moby/moby/daemon/start.go b/vendor/github.com/moby/moby/daemon/start.go new file mode 100644 index 000000000..8d938519c --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/start.go @@ -0,0 +1,225 @@ +package daemon + +import ( + "fmt" + "net/http" + "runtime" + "strings" + "syscall" + "time" + + "google.golang.org/grpc" + + "github.com/Sirupsen/logrus" + apierrors "github.com/docker/docker/api/errors" + "github.com/docker/docker/api/types" + containertypes "github.com/docker/docker/api/types/container" + "github.com/docker/docker/container" +) + +// ContainerStart starts a container. +func (daemon *Daemon) ContainerStart(name string, hostConfig *containertypes.HostConfig, checkpoint string, checkpointDir string) error { + if checkpoint != "" && !daemon.HasExperimental() { + return apierrors.NewBadRequestError(fmt.Errorf("checkpoint is only supported in experimental mode")) + } + + container, err := daemon.GetContainer(name) + if err != nil { + return err + } + + if container.IsPaused() { + return fmt.Errorf("Cannot start a paused container, try unpause instead.") + } + + if container.IsRunning() { + err := fmt.Errorf("Container already started") + return apierrors.NewErrorWithStatusCode(err, http.StatusNotModified) + } + + // Windows does not have the backwards compatibility issue here. + if runtime.GOOS != "windows" { + // This is kept for backward compatibility - hostconfig should be passed when + // creating a container, not during start. + if hostConfig != nil { + logrus.Warn("DEPRECATED: Setting host configuration options when the container starts is deprecated and has been removed in Docker 1.12") + oldNetworkMode := container.HostConfig.NetworkMode + if err := daemon.setSecurityOptions(container, hostConfig); err != nil { + return err + } + if err := daemon.mergeAndVerifyLogConfig(&hostConfig.LogConfig); err != nil { + return err + } + if err := daemon.setHostConfig(container, hostConfig); err != nil { + return err + } + newNetworkMode := container.HostConfig.NetworkMode + if string(oldNetworkMode) != string(newNetworkMode) { + // if user has change the network mode on starting, clean up the + // old networks. It is a deprecated feature and has been removed in Docker 1.12 + container.NetworkSettings.Networks = nil + if err := container.CheckpointTo(daemon.containersReplica); err != nil { + return err + } + } + container.InitDNSHostConfig() + } + } else { + if hostConfig != nil { + return fmt.Errorf("Supplying a hostconfig on start is not supported. It should be supplied on create") + } + } + + // check if hostConfig is in line with the current system settings. + // It may happen cgroups are umounted or the like. + if _, err = daemon.verifyContainerSettings(container.HostConfig, nil, false); err != nil { + return err + } + // Adapt for old containers in case we have updates in this function and + // old containers never have chance to call the new function in create stage. + if hostConfig != nil { + if err := daemon.adaptContainerSettings(container.HostConfig, false); err != nil { + return err + } + } + + return daemon.containerStart(container, checkpoint, checkpointDir, true) +} + +// containerStart prepares the container to run by setting up everything the +// container needs, such as storage and networking, as well as links +// between containers. The container is left waiting for a signal to +// begin running. +func (daemon *Daemon) containerStart(container *container.Container, checkpoint string, checkpointDir string, resetRestartManager bool) (err error) { + start := time.Now() + container.Lock() + defer container.Unlock() + + if resetRestartManager && container.Running { // skip this check if already in restarting step and resetRestartManager==false + return nil + } + + if container.RemovalInProgress || container.Dead { + return fmt.Errorf("Container is marked for removal and cannot be started.") + } + + // if we encounter an error during start we need to ensure that any other + // setup has been cleaned up properly + defer func() { + if err != nil { + container.SetError(err) + // if no one else has set it, make sure we don't leave it at zero + if container.ExitCode() == 0 { + container.SetExitCode(128) + } + if err := container.CheckpointTo(daemon.containersReplica); err != nil { + logrus.Errorf("%s: failed saving state on start failure: %v", container.ID, err) + } + container.Reset(false) + + daemon.Cleanup(container) + // if containers AutoRemove flag is set, remove it after clean up + if container.HostConfig.AutoRemove { + container.Unlock() + if err := daemon.ContainerRm(container.ID, &types.ContainerRmConfig{ForceRemove: true, RemoveVolume: true}); err != nil { + logrus.Errorf("can't remove container %s: %v", container.ID, err) + } + container.Lock() + } + } + }() + + if err := daemon.conditionalMountOnStart(container); err != nil { + return err + } + + if err := daemon.initializeNetworking(container); err != nil { + return err + } + + spec, err := daemon.createSpec(container) + if err != nil { + return err + } + + createOptions, err := daemon.getLibcontainerdCreateOptions(container) + if err != nil { + return err + } + + if resetRestartManager { + container.ResetRestartManager(true) + } + + if checkpointDir == "" { + checkpointDir = container.CheckpointDir() + } + + if daemon.saveApparmorConfig(container); err != nil { + return err + } + + if err := daemon.containerd.Create(container.ID, checkpoint, checkpointDir, *spec, container.InitializeStdio, createOptions...); err != nil { + errDesc := grpc.ErrorDesc(err) + contains := func(s1, s2 string) bool { + return strings.Contains(strings.ToLower(s1), s2) + } + logrus.Errorf("Create container failed with error: %s", errDesc) + // if we receive an internal error from the initial start of a container then lets + // return it instead of entering the restart loop + // set to 127 for container cmd not found/does not exist) + if contains(errDesc, container.Path) && + (contains(errDesc, "executable file not found") || + contains(errDesc, "no such file or directory") || + contains(errDesc, "system cannot find the file specified")) { + container.SetExitCode(127) + } + // set to 126 for container cmd can't be invoked errors + if contains(errDesc, syscall.EACCES.Error()) { + container.SetExitCode(126) + } + + // attempted to mount a file onto a directory, or a directory onto a file, maybe from user specified bind mounts + if contains(errDesc, syscall.ENOTDIR.Error()) { + errDesc += ": Are you trying to mount a directory onto a file (or vice-versa)? Check if the specified host path exists and is the expected type" + container.SetExitCode(127) + } + + return fmt.Errorf("%s", errDesc) + } + + containerActions.WithValues("start").UpdateSince(start) + + return nil +} + +// Cleanup releases any network resources allocated to the container along with any rules +// around how containers are linked together. It also unmounts the container's root filesystem. +func (daemon *Daemon) Cleanup(container *container.Container) { + daemon.releaseNetwork(container) + + container.UnmountIpcMounts(detachMounted) + + if err := daemon.conditionalUnmountOnCleanup(container); err != nil { + // FIXME: remove once reference counting for graphdrivers has been refactored + // Ensure that all the mounts are gone + if mountid, err := daemon.stores[container.Platform].layerStore.GetMountID(container.ID); err == nil { + daemon.cleanupMountsByID(mountid) + } + } + + if err := container.UnmountSecrets(); err != nil { + logrus.Warnf("%s cleanup: failed to unmount secrets: %s", container.ID, err) + } + + for _, eConfig := range container.ExecCommands.Commands() { + daemon.unregisterExecCommand(container, eConfig) + } + + if container.BaseFS != "" { + if err := container.UnmountVolumes(daemon.LogVolumeEvent); err != nil { + logrus.Warnf("%s cleanup: Failed to umount volumes: %v", container.ID, err) + } + } + container.CancelAttachContext() +} diff --git a/vendor/github.com/moby/moby/daemon/start_unix.go b/vendor/github.com/moby/moby/daemon/start_unix.go new file mode 100644 index 000000000..12ecdab2d --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/start_unix.go @@ -0,0 +1,32 @@ +// +build !windows + +package daemon + +import ( + "fmt" + + "github.com/docker/docker/container" + "github.com/docker/docker/libcontainerd" +) + +// getLibcontainerdCreateOptions callers must hold a lock on the container +func (daemon *Daemon) getLibcontainerdCreateOptions(container *container.Container) ([]libcontainerd.CreateOption, error) { + createOptions := []libcontainerd.CreateOption{} + + // Ensure a runtime has been assigned to this container + if container.HostConfig.Runtime == "" { + container.HostConfig.Runtime = daemon.configStore.GetDefaultRuntimeName() + container.CheckpointTo(daemon.containersReplica) + } + + rt := daemon.configStore.GetRuntime(container.HostConfig.Runtime) + if rt == nil { + return nil, fmt.Errorf("no such runtime '%s'", container.HostConfig.Runtime) + } + if UsingSystemd(daemon.configStore) { + rt.Args = append(rt.Args, "--systemd-cgroup=true") + } + createOptions = append(createOptions, libcontainerd.WithRuntime(rt.Path, rt.Args)) + + return createOptions, nil +} diff --git a/vendor/github.com/moby/moby/daemon/start_windows.go b/vendor/github.com/moby/moby/daemon/start_windows.go new file mode 100644 index 000000000..74129bd61 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/start_windows.go @@ -0,0 +1,214 @@ +package daemon + +import ( + "fmt" + "io/ioutil" + "path/filepath" + "strings" + + "github.com/docker/docker/container" + "github.com/docker/docker/layer" + "github.com/docker/docker/libcontainerd" + "golang.org/x/sys/windows/registry" +) + +const ( + credentialSpecRegistryLocation = `SOFTWARE\Microsoft\Windows NT\CurrentVersion\Virtualization\Containers\CredentialSpecs` + credentialSpecFileLocation = "CredentialSpecs" +) + +func (daemon *Daemon) getLibcontainerdCreateOptions(container *container.Container) ([]libcontainerd.CreateOption, error) { + createOptions := []libcontainerd.CreateOption{} + + // Are we going to run as a Hyper-V container? + hvOpts := &libcontainerd.HyperVIsolationOption{} + if container.HostConfig.Isolation.IsDefault() { + // Container is set to use the default, so take the default from the daemon configuration + hvOpts.IsHyperV = daemon.defaultIsolation.IsHyperV() + } else { + // Container is requesting an isolation mode. Honour it. + hvOpts.IsHyperV = container.HostConfig.Isolation.IsHyperV() + } + + dnsSearch := daemon.getDNSSearchSettings(container) + + // Generate the layer folder of the layer options + layerOpts := &libcontainerd.LayerOption{} + m, err := container.RWLayer.Metadata() + if err != nil { + return nil, fmt.Errorf("failed to get layer metadata - %s", err) + } + layerOpts.LayerFolderPath = m["dir"] + + // Generate the layer paths of the layer options + img, err := daemon.stores[container.Platform].imageStore.Get(container.ImageID) + if err != nil { + return nil, fmt.Errorf("failed to graph.Get on ImageID %s - %s", container.ImageID, err) + } + // Get the layer path for each layer. + max := len(img.RootFS.DiffIDs) + for i := 1; i <= max; i++ { + img.RootFS.DiffIDs = img.RootFS.DiffIDs[:i] + layerPath, err := layer.GetLayerPath(daemon.stores[container.Platform].layerStore, img.RootFS.ChainID()) + if err != nil { + return nil, fmt.Errorf("failed to get layer path from graphdriver %s for ImageID %s - %s", daemon.stores[container.Platform].layerStore, img.RootFS.ChainID(), err) + } + // Reverse order, expecting parent most first + layerOpts.LayerPaths = append([]string{layerPath}, layerOpts.LayerPaths...) + } + + // Get endpoints for the libnetwork allocated networks to the container + var epList []string + AllowUnqualifiedDNSQuery := false + gwHNSID := "" + if container.NetworkSettings != nil { + for n := range container.NetworkSettings.Networks { + sn, err := daemon.FindNetwork(n) + if err != nil { + continue + } + + ep, err := container.GetEndpointInNetwork(sn) + if err != nil { + continue + } + + data, err := ep.DriverInfo() + if err != nil { + continue + } + + if data["GW_INFO"] != nil { + gwInfo := data["GW_INFO"].(map[string]interface{}) + if gwInfo["hnsid"] != nil { + gwHNSID = gwInfo["hnsid"].(string) + } + } + + if data["hnsid"] != nil { + epList = append(epList, data["hnsid"].(string)) + } + + if data["AllowUnqualifiedDNSQuery"] != nil { + AllowUnqualifiedDNSQuery = true + } + } + } + + if gwHNSID != "" { + epList = append(epList, gwHNSID) + } + + // Read and add credentials from the security options if a credential spec has been provided. + if container.HostConfig.SecurityOpt != nil { + for _, sOpt := range container.HostConfig.SecurityOpt { + sOpt = strings.ToLower(sOpt) + if !strings.Contains(sOpt, "=") { + return nil, fmt.Errorf("invalid security option: no equals sign in supplied value %s", sOpt) + } + var splitsOpt []string + splitsOpt = strings.SplitN(sOpt, "=", 2) + if len(splitsOpt) != 2 { + return nil, fmt.Errorf("invalid security option: %s", sOpt) + } + if splitsOpt[0] != "credentialspec" { + return nil, fmt.Errorf("security option not supported: %s", splitsOpt[0]) + } + + credentialsOpts := &libcontainerd.CredentialsOption{} + var ( + match bool + csValue string + err error + ) + if match, csValue = getCredentialSpec("file://", splitsOpt[1]); match { + if csValue == "" { + return nil, fmt.Errorf("no value supplied for file:// credential spec security option") + } + if credentialsOpts.Credentials, err = readCredentialSpecFile(container.ID, daemon.root, filepath.Clean(csValue)); err != nil { + return nil, err + } + } else if match, csValue = getCredentialSpec("registry://", splitsOpt[1]); match { + if csValue == "" { + return nil, fmt.Errorf("no value supplied for registry:// credential spec security option") + } + if credentialsOpts.Credentials, err = readCredentialSpecRegistry(container.ID, csValue); err != nil { + return nil, err + } + } else { + return nil, fmt.Errorf("invalid credential spec security option - value must be prefixed file:// or registry:// followed by a value") + } + createOptions = append(createOptions, credentialsOpts) + } + } + + // Now add the remaining options. + createOptions = append(createOptions, &libcontainerd.FlushOption{IgnoreFlushesDuringBoot: !container.HasBeenStartedBefore}) + createOptions = append(createOptions, hvOpts) + createOptions = append(createOptions, layerOpts) + + var networkSharedContainerID string + if container.HostConfig.NetworkMode.IsContainer() { + networkSharedContainerID = container.NetworkSharedContainerID + for _, ep := range container.SharedEndpointList { + epList = append(epList, ep) + } + } + + createOptions = append(createOptions, &libcontainerd.NetworkEndpointsOption{ + Endpoints: epList, + AllowUnqualifiedDNSQuery: AllowUnqualifiedDNSQuery, + DNSSearchList: dnsSearch, + NetworkSharedContainerID: networkSharedContainerID, + }) + return createOptions, nil +} + +// getCredentialSpec is a helper function to get the value of a credential spec supplied +// on the CLI, stripping the prefix +func getCredentialSpec(prefix, value string) (bool, string) { + if strings.HasPrefix(value, prefix) { + return true, strings.TrimPrefix(value, prefix) + } + return false, "" +} + +// readCredentialSpecRegistry is a helper function to read a credential spec from +// the registry. If not found, we return an empty string and warn in the log. +// This allows for staging on machines which do not have the necessary components. +func readCredentialSpecRegistry(id, name string) (string, error) { + var ( + k registry.Key + err error + val string + ) + if k, err = registry.OpenKey(registry.LOCAL_MACHINE, credentialSpecRegistryLocation, registry.QUERY_VALUE); err != nil { + return "", fmt.Errorf("failed handling spec %q for container %s - %s could not be opened", name, id, credentialSpecRegistryLocation) + } + if val, _, err = k.GetStringValue(name); err != nil { + if err == registry.ErrNotExist { + return "", fmt.Errorf("credential spec %q for container %s as it was not found", name, id) + } + return "", fmt.Errorf("error %v reading credential spec %q from registry for container %s", err, name, id) + } + return val, nil +} + +// readCredentialSpecFile is a helper function to read a credential spec from +// a file. If not found, we return an empty string and warn in the log. +// This allows for staging on machines which do not have the necessary components. +func readCredentialSpecFile(id, root, location string) (string, error) { + if filepath.IsAbs(location) { + return "", fmt.Errorf("invalid credential spec - file:// path cannot be absolute") + } + base := filepath.Join(root, credentialSpecFileLocation) + full := filepath.Join(base, location) + if !strings.HasPrefix(full, base) { + return "", fmt.Errorf("invalid credential spec - file:// path must be under %s", base) + } + bcontents, err := ioutil.ReadFile(full) + if err != nil { + return "", fmt.Errorf("credential spec '%s' for container %s as the file could not be read: %q", full, id, err) + } + return string(bcontents[:]), nil +} diff --git a/vendor/github.com/moby/moby/daemon/stats.go b/vendor/github.com/moby/moby/daemon/stats.go new file mode 100644 index 000000000..926f32efd --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/stats.go @@ -0,0 +1,160 @@ +package daemon + +import ( + "encoding/json" + "errors" + "fmt" + "runtime" + "time" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/api/types/versions" + "github.com/docker/docker/api/types/versions/v1p20" + "github.com/docker/docker/container" + "github.com/docker/docker/pkg/ioutils" +) + +// ContainerStats writes information about the container to the stream +// given in the config object. +func (daemon *Daemon) ContainerStats(ctx context.Context, prefixOrName string, config *backend.ContainerStatsConfig) error { + if runtime.GOOS == "solaris" { + return fmt.Errorf("%+v does not support stats", runtime.GOOS) + } + // Engine API version (used for backwards compatibility) + apiVersion := config.Version + + container, err := daemon.GetContainer(prefixOrName) + if err != nil { + return err + } + + // If the container is either not running or restarting and requires no stream, return an empty stats. + if (!container.IsRunning() || container.IsRestarting()) && !config.Stream { + return json.NewEncoder(config.OutStream).Encode(&types.StatsJSON{ + Name: container.Name, + ID: container.ID}) + } + + outStream := config.OutStream + if config.Stream { + wf := ioutils.NewWriteFlusher(outStream) + defer wf.Close() + wf.Flush() + outStream = wf + } + + var preCPUStats types.CPUStats + var preRead time.Time + getStatJSON := func(v interface{}) *types.StatsJSON { + ss := v.(types.StatsJSON) + ss.Name = container.Name + ss.ID = container.ID + ss.PreCPUStats = preCPUStats + ss.PreRead = preRead + preCPUStats = ss.CPUStats + preRead = ss.Read + return &ss + } + + enc := json.NewEncoder(outStream) + + updates := daemon.subscribeToContainerStats(container) + defer daemon.unsubscribeToContainerStats(container, updates) + + noStreamFirstFrame := true + for { + select { + case v, ok := <-updates: + if !ok { + return nil + } + + var statsJSON interface{} + statsJSONPost120 := getStatJSON(v) + if versions.LessThan(apiVersion, "1.21") { + if runtime.GOOS == "windows" { + return errors.New("API versions pre v1.21 do not support stats on Windows") + } + var ( + rxBytes uint64 + rxPackets uint64 + rxErrors uint64 + rxDropped uint64 + txBytes uint64 + txPackets uint64 + txErrors uint64 + txDropped uint64 + ) + for _, v := range statsJSONPost120.Networks { + rxBytes += v.RxBytes + rxPackets += v.RxPackets + rxErrors += v.RxErrors + rxDropped += v.RxDropped + txBytes += v.TxBytes + txPackets += v.TxPackets + txErrors += v.TxErrors + txDropped += v.TxDropped + } + statsJSON = &v1p20.StatsJSON{ + Stats: statsJSONPost120.Stats, + Network: types.NetworkStats{ + RxBytes: rxBytes, + RxPackets: rxPackets, + RxErrors: rxErrors, + RxDropped: rxDropped, + TxBytes: txBytes, + TxPackets: txPackets, + TxErrors: txErrors, + TxDropped: txDropped, + }, + } + } else { + statsJSON = statsJSONPost120 + } + + if !config.Stream && noStreamFirstFrame { + // prime the cpu stats so they aren't 0 in the final output + noStreamFirstFrame = false + continue + } + + if err := enc.Encode(statsJSON); err != nil { + return err + } + + if !config.Stream { + return nil + } + case <-ctx.Done(): + return nil + } + } +} + +func (daemon *Daemon) subscribeToContainerStats(c *container.Container) chan interface{} { + return daemon.statsCollector.Collect(c) +} + +func (daemon *Daemon) unsubscribeToContainerStats(c *container.Container, ch chan interface{}) { + daemon.statsCollector.Unsubscribe(c, ch) +} + +// GetContainerStats collects all the stats published by a container +func (daemon *Daemon) GetContainerStats(container *container.Container) (*types.StatsJSON, error) { + stats, err := daemon.stats(container) + if err != nil { + return nil, err + } + + // We already have the network stats on Windows directly from HCS. + if !container.Config.NetworkDisabled && runtime.GOOS != "windows" { + if stats.Networks, err = daemon.getNetworkStats(container); err != nil { + return nil, err + } + } + + return stats, nil +} diff --git a/vendor/github.com/moby/moby/daemon/stats/collector.go b/vendor/github.com/moby/moby/daemon/stats/collector.go new file mode 100644 index 000000000..0520efa23 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/stats/collector.go @@ -0,0 +1,122 @@ +// +build !solaris + +package stats + +import ( + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/types" + "github.com/docker/docker/container" + "github.com/docker/docker/pkg/pubsub" +) + +// Collect registers the container with the collector and adds it to +// the event loop for collection on the specified interval returning +// a channel for the subscriber to receive on. +func (s *Collector) Collect(c *container.Container) chan interface{} { + s.m.Lock() + defer s.m.Unlock() + publisher, exists := s.publishers[c] + if !exists { + publisher = pubsub.NewPublisher(100*time.Millisecond, 1024) + s.publishers[c] = publisher + } + return publisher.Subscribe() +} + +// StopCollection closes the channels for all subscribers and removes +// the container from metrics collection. +func (s *Collector) StopCollection(c *container.Container) { + s.m.Lock() + if publisher, exists := s.publishers[c]; exists { + publisher.Close() + delete(s.publishers, c) + } + s.m.Unlock() +} + +// Unsubscribe removes a specific subscriber from receiving updates for a container's stats. +func (s *Collector) Unsubscribe(c *container.Container, ch chan interface{}) { + s.m.Lock() + publisher := s.publishers[c] + if publisher != nil { + publisher.Evict(ch) + if publisher.Len() == 0 { + delete(s.publishers, c) + } + } + s.m.Unlock() +} + +// Run starts the collectors and will indefinitely collect stats from the supervisor +func (s *Collector) Run() { + type publishersPair struct { + container *container.Container + publisher *pubsub.Publisher + } + // we cannot determine the capacity here. + // it will grow enough in first iteration + var pairs []publishersPair + + for range time.Tick(s.interval) { + // it does not make sense in the first iteration, + // but saves allocations in further iterations + pairs = pairs[:0] + + s.m.Lock() + for container, publisher := range s.publishers { + // copy pointers here to release the lock ASAP + pairs = append(pairs, publishersPair{container, publisher}) + } + s.m.Unlock() + if len(pairs) == 0 { + continue + } + + systemUsage, err := s.getSystemCPUUsage() + if err != nil { + logrus.Errorf("collecting system cpu usage: %v", err) + continue + } + + onlineCPUs, err := s.getNumberOnlineCPUs() + if err != nil { + logrus.Errorf("collecting system online cpu count: %v", err) + continue + } + + for _, pair := range pairs { + stats, err := s.supervisor.GetContainerStats(pair.container) + + switch err.(type) { + case nil: + // FIXME: move to containerd on Linux (not Windows) + stats.CPUStats.SystemUsage = systemUsage + stats.CPUStats.OnlineCPUs = onlineCPUs + + pair.publisher.Publish(*stats) + + case notRunningErr, notFoundErr: + // publish empty stats containing only name and ID if not running or not found + pair.publisher.Publish(types.StatsJSON{ + Name: pair.container.Name, + ID: pair.container.ID, + }) + + default: + logrus.Errorf("collecting stats for %s: %v", pair.container.ID, err) + } + } + } +} + +type notRunningErr interface { + error + ContainerIsRunning() bool +} + +type notFoundErr interface { + error + ContainerNotFound() bool +} diff --git a/vendor/github.com/moby/moby/daemon/stats/collector_solaris.go b/vendor/github.com/moby/moby/daemon/stats/collector_solaris.go new file mode 100644 index 000000000..3699d08c1 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/stats/collector_solaris.go @@ -0,0 +1,29 @@ +package stats + +import ( + "github.com/docker/docker/container" +) + +// platformNewStatsCollector performs platform specific initialisation of the +// Collector structure. This is a no-op on Windows. +func platformNewStatsCollector(s *Collector) { +} + +// Collect registers the container with the collector and adds it to +// the event loop for collection on the specified interval returning +// a channel for the subscriber to receive on. +// Currently not supported on Solaris +func (s *Collector) Collect(c *container.Container) chan interface{} { + return nil +} + +// StopCollection closes the channels for all subscribers and removes +// the container from metrics collection. +// Currently not supported on Solaris +func (s *Collector) StopCollection(c *container.Container) { +} + +// Unsubscribe removes a specific subscriber from receiving updates for a container's stats. +// Currently not supported on Solaris +func (s *Collector) Unsubscribe(c *container.Container, ch chan interface{}) { +} diff --git a/vendor/github.com/moby/moby/daemon/stats/collector_unix.go b/vendor/github.com/moby/moby/daemon/stats/collector_unix.go new file mode 100644 index 000000000..cd522e07c --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/stats/collector_unix.go @@ -0,0 +1,83 @@ +// +build !windows,!solaris + +package stats + +import ( + "fmt" + "os" + "strconv" + "strings" + + "github.com/opencontainers/runc/libcontainer/system" +) + +/* +#include +*/ +import "C" + +// platformNewStatsCollector performs platform specific initialisation of the +// Collector structure. +func platformNewStatsCollector(s *Collector) { + s.clockTicksPerSecond = uint64(system.GetClockTicks()) +} + +const nanoSecondsPerSecond = 1e9 + +// getSystemCPUUsage returns the host system's cpu usage in +// nanoseconds. An error is returned if the format of the underlying +// file does not match. +// +// Uses /proc/stat defined by POSIX. Looks for the cpu +// statistics line and then sums up the first seven fields +// provided. See `man 5 proc` for details on specific field +// information. +func (s *Collector) getSystemCPUUsage() (uint64, error) { + var line string + f, err := os.Open("/proc/stat") + if err != nil { + return 0, err + } + defer func() { + s.bufReader.Reset(nil) + f.Close() + }() + s.bufReader.Reset(f) + err = nil + for err == nil { + line, err = s.bufReader.ReadString('\n') + if err != nil { + break + } + parts := strings.Fields(line) + switch parts[0] { + case "cpu": + if len(parts) < 8 { + return 0, fmt.Errorf("invalid number of cpu fields") + } + var totalClockTicks uint64 + for _, i := range parts[1:8] { + v, err := strconv.ParseUint(i, 10, 64) + if err != nil { + return 0, fmt.Errorf("Unable to convert value %s to int: %s", i, err) + } + totalClockTicks += v + } + return (totalClockTicks * nanoSecondsPerSecond) / + s.clockTicksPerSecond, nil + } + } + return 0, fmt.Errorf("invalid stat format. Error trying to parse the '/proc/stat' file") +} + +func (s *Collector) getNumberOnlineCPUs() (uint32, error) { + i, err := C.sysconf(C._SC_NPROCESSORS_ONLN) + // According to POSIX - errno is undefined after successful + // sysconf, and can be non-zero in several cases, so look for + // error in returned value not in errno. + // (https://sourceware.org/bugzilla/show_bug.cgi?id=21536) + if i == -1 { + return 0, err + } + return uint32(i), nil +} diff --git a/vendor/github.com/moby/moby/daemon/stats/collector_windows.go b/vendor/github.com/moby/moby/daemon/stats/collector_windows.go new file mode 100644 index 000000000..5fb27ced6 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/stats/collector_windows.go @@ -0,0 +1,19 @@ +// +build windows + +package stats + +// platformNewStatsCollector performs platform specific initialisation of the +// Collector structure. This is a no-op on Windows. +func platformNewStatsCollector(s *Collector) { +} + +// getSystemCPUUsage returns the host system's cpu usage in +// nanoseconds. An error is returned if the format of the underlying +// file does not match. This is a no-op on Windows. +func (s *Collector) getSystemCPUUsage() (uint64, error) { + return 0, nil +} + +func (s *Collector) getNumberOnlineCPUs() (uint32, error) { + return 0, nil +} diff --git a/vendor/github.com/moby/moby/daemon/stats/types.go b/vendor/github.com/moby/moby/daemon/stats/types.go new file mode 100644 index 000000000..e48783c9a --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/stats/types.go @@ -0,0 +1,42 @@ +package stats + +import ( + "bufio" + "sync" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/container" + "github.com/docker/docker/pkg/pubsub" +) + +type supervisor interface { + // GetContainerStats collects all the stats related to a container + GetContainerStats(container *container.Container) (*types.StatsJSON, error) +} + +// NewCollector creates a stats collector that will poll the supervisor with the specified interval +func NewCollector(supervisor supervisor, interval time.Duration) *Collector { + s := &Collector{ + interval: interval, + supervisor: supervisor, + publishers: make(map[*container.Container]*pubsub.Publisher), + bufReader: bufio.NewReaderSize(nil, 128), + } + + platformNewStatsCollector(s) + + return s +} + +// Collector manages and provides container resource stats +type Collector struct { + m sync.Mutex + supervisor supervisor + interval time.Duration + publishers map[*container.Container]*pubsub.Publisher + bufReader *bufio.Reader + + // The following fields are not set on Windows currently. + clockTicksPerSecond uint64 +} diff --git a/vendor/github.com/moby/moby/daemon/stats_collector.go b/vendor/github.com/moby/moby/daemon/stats_collector.go new file mode 100644 index 000000000..7daf26f9f --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/stats_collector.go @@ -0,0 +1,26 @@ +package daemon + +import ( + "runtime" + "time" + + "github.com/docker/docker/daemon/stats" + "github.com/docker/docker/pkg/system" +) + +// newStatsCollector returns a new statsCollector that collections +// stats for a registered container at the specified interval. +// The collector allows non-running containers to be added +// and will start processing stats when they are started. +func (daemon *Daemon) newStatsCollector(interval time.Duration) *stats.Collector { + // FIXME(vdemeester) move this elsewhere + if runtime.GOOS == "linux" { + meminfo, err := system.ReadMemInfo() + if err == nil && meminfo.MemTotal > 0 { + daemon.machineMemory = uint64(meminfo.MemTotal) + } + } + s := stats.NewCollector(daemon, interval) + go s.Run() + return s +} diff --git a/vendor/github.com/moby/moby/daemon/stats_unix.go b/vendor/github.com/moby/moby/daemon/stats_unix.go new file mode 100644 index 000000000..d875607b3 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/stats_unix.go @@ -0,0 +1,58 @@ +// +build !windows + +package daemon + +import ( + "fmt" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/container" +) + +// Resolve Network SandboxID in case the container reuse another container's network stack +func (daemon *Daemon) getNetworkSandboxID(c *container.Container) (string, error) { + curr := c + for curr.HostConfig.NetworkMode.IsContainer() { + containerID := curr.HostConfig.NetworkMode.ConnectedContainer() + connected, err := daemon.GetContainer(containerID) + if err != nil { + return "", fmt.Errorf("Could not get container for %s", containerID) + } + curr = connected + } + return curr.NetworkSettings.SandboxID, nil +} + +func (daemon *Daemon) getNetworkStats(c *container.Container) (map[string]types.NetworkStats, error) { + sandboxID, err := daemon.getNetworkSandboxID(c) + if err != nil { + return nil, err + } + + sb, err := daemon.netController.SandboxByID(sandboxID) + if err != nil { + return nil, err + } + + lnstats, err := sb.Statistics() + if err != nil { + return nil, err + } + + stats := make(map[string]types.NetworkStats) + // Convert libnetwork nw stats into api stats + for ifName, ifStats := range lnstats { + stats[ifName] = types.NetworkStats{ + RxBytes: ifStats.RxBytes, + RxPackets: ifStats.RxPackets, + RxErrors: ifStats.RxErrors, + RxDropped: ifStats.RxDropped, + TxBytes: ifStats.TxBytes, + TxPackets: ifStats.TxPackets, + TxErrors: ifStats.TxErrors, + TxDropped: ifStats.TxDropped, + } + } + + return stats, nil +} diff --git a/vendor/github.com/moby/moby/daemon/stats_windows.go b/vendor/github.com/moby/moby/daemon/stats_windows.go new file mode 100644 index 000000000..f8e6f6f84 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/stats_windows.go @@ -0,0 +1,11 @@ +package daemon + +import ( + "github.com/docker/docker/api/types" + "github.com/docker/docker/container" +) + +// Windows network stats are obtained directly through HCS, hence this is a no-op. +func (daemon *Daemon) getNetworkStats(c *container.Container) (map[string]types.NetworkStats, error) { + return make(map[string]types.NetworkStats), nil +} diff --git a/vendor/github.com/moby/moby/daemon/stop.go b/vendor/github.com/moby/moby/daemon/stop.go new file mode 100644 index 000000000..6a4776d15 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/stop.go @@ -0,0 +1,91 @@ +package daemon + +import ( + "context" + "fmt" + "net/http" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/errors" + containerpkg "github.com/docker/docker/container" +) + +// ContainerStop looks for the given container and terminates it, +// waiting the given number of seconds before forcefully killing the +// container. If a negative number of seconds is given, ContainerStop +// will wait for a graceful termination. An error is returned if the +// container is not found, is already stopped, or if there is a +// problem stopping the container. +func (daemon *Daemon) ContainerStop(name string, seconds *int) error { + container, err := daemon.GetContainer(name) + if err != nil { + return err + } + if !container.IsRunning() { + err := fmt.Errorf("Container %s is already stopped", name) + return errors.NewErrorWithStatusCode(err, http.StatusNotModified) + } + if seconds == nil { + stopTimeout := container.StopTimeout() + seconds = &stopTimeout + } + if err := daemon.containerStop(container, *seconds); err != nil { + return fmt.Errorf("Cannot stop container %s: %v", name, err) + } + return nil +} + +// containerStop halts a container by sending a stop signal, waiting for the given +// duration in seconds, and then calling SIGKILL and waiting for the +// process to exit. If a negative duration is given, Stop will wait +// for the initial signal forever. If the container is not running Stop returns +// immediately. +func (daemon *Daemon) containerStop(container *containerpkg.Container, seconds int) error { + if !container.IsRunning() { + return nil + } + + daemon.stopHealthchecks(container) + + stopSignal := container.StopSignal() + // 1. Send a stop signal + if err := daemon.killPossiblyDeadProcess(container, stopSignal); err != nil { + // While normally we might "return err" here we're not going to + // because if we can't stop the container by this point then + // it's probably because it's already stopped. Meaning, between + // the time of the IsRunning() call above and now it stopped. + // Also, since the err return will be environment specific we can't + // look for any particular (common) error that would indicate + // that the process is already dead vs something else going wrong. + // So, instead we'll give it up to 2 more seconds to complete and if + // by that time the container is still running, then the error + // we got is probably valid and so we force kill it. + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) + defer cancel() + + if status := <-container.Wait(ctx, containerpkg.WaitConditionNotRunning); status.Err() != nil { + logrus.Infof("Container failed to stop after sending signal %d to the process, force killing", stopSignal) + if err := daemon.killPossiblyDeadProcess(container, 9); err != nil { + return err + } + } + } + + // 2. Wait for the process to exit on its own + ctx, cancel := context.WithTimeout(context.Background(), time.Duration(seconds)*time.Second) + defer cancel() + + if status := <-container.Wait(ctx, containerpkg.WaitConditionNotRunning); status.Err() != nil { + logrus.Infof("Container %v failed to exit within %d seconds of signal %d - using the force", container.ID, seconds, stopSignal) + // 3. If it doesn't, then send SIGKILL + if err := daemon.Kill(container); err != nil { + // Wait without a timeout, ignore result. + _ = <-container.Wait(context.Background(), containerpkg.WaitConditionNotRunning) + logrus.Warn(err) // Don't return error because we only care that container is stopped, not what function stopped it + } + } + + daemon.LogContainerEvent(container, "stop") + return nil +} diff --git a/vendor/github.com/moby/moby/daemon/top_unix.go b/vendor/github.com/moby/moby/daemon/top_unix.go new file mode 100644 index 000000000..4d94d4a62 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/top_unix.go @@ -0,0 +1,155 @@ +//+build !windows + +package daemon + +import ( + "fmt" + "os/exec" + "regexp" + "strconv" + "strings" + + "github.com/docker/docker/api/types/container" +) + +func validatePSArgs(psArgs string) error { + // NOTE: \\s does not detect unicode whitespaces. + // So we use fieldsASCII instead of strings.Fields in parsePSOutput. + // See https://github.com/docker/docker/pull/24358 + re := regexp.MustCompile("\\s+([^\\s]*)=\\s*(PID[^\\s]*)") + for _, group := range re.FindAllStringSubmatch(psArgs, -1) { + if len(group) >= 3 { + k := group[1] + v := group[2] + if k != "pid" { + return fmt.Errorf("specifying \"%s=%s\" is not allowed", k, v) + } + } + } + return nil +} + +// fieldsASCII is similar to strings.Fields but only allows ASCII whitespaces +func fieldsASCII(s string) []string { + fn := func(r rune) bool { + switch r { + case '\t', '\n', '\f', '\r', ' ': + return true + } + return false + } + return strings.FieldsFunc(s, fn) +} + +func appendProcess2ProcList(procList *container.ContainerTopOKBody, fields []string) { + // Make sure number of fields equals number of header titles + // merging "overhanging" fields + process := fields[:len(procList.Titles)-1] + process = append(process, strings.Join(fields[len(procList.Titles)-1:], " ")) + procList.Processes = append(procList.Processes, process) +} + +func hasPid(pids []int, pid int) bool { + for _, i := range pids { + if i == pid { + return true + } + } + return false +} + +func parsePSOutput(output []byte, pids []int) (*container.ContainerTopOKBody, error) { + procList := &container.ContainerTopOKBody{} + + lines := strings.Split(string(output), "\n") + procList.Titles = fieldsASCII(lines[0]) + + pidIndex := -1 + for i, name := range procList.Titles { + if name == "PID" { + pidIndex = i + } + } + if pidIndex == -1 { + return nil, fmt.Errorf("Couldn't find PID field in ps output") + } + + // loop through the output and extract the PID from each line + // fixing #30580, be able to display thread line also when "m" option used + // in "docker top" client command + preContainedPidFlag := false + for _, line := range lines[1:] { + if len(line) == 0 { + continue + } + fields := fieldsASCII(line) + + var ( + p int + err error + ) + + if fields[pidIndex] == "-" { + if preContainedPidFlag { + appendProcess2ProcList(procList, fields) + } + continue + } + p, err = strconv.Atoi(fields[pidIndex]) + if err != nil { + return nil, fmt.Errorf("Unexpected pid '%s': %s", fields[pidIndex], err) + } + + if hasPid(pids, p) { + preContainedPidFlag = true + appendProcess2ProcList(procList, fields) + continue + } + preContainedPidFlag = false + } + return procList, nil +} + +// ContainerTop lists the processes running inside of the given +// container by calling ps with the given args, or with the flags +// "-ef" if no args are given. An error is returned if the container +// is not found, or is not running, or if there are any problems +// running ps, or parsing the output. +func (daemon *Daemon) ContainerTop(name string, psArgs string) (*container.ContainerTopOKBody, error) { + if psArgs == "" { + psArgs = "-ef" + } + + if err := validatePSArgs(psArgs); err != nil { + return nil, err + } + + container, err := daemon.GetContainer(name) + if err != nil { + return nil, err + } + + if !container.IsRunning() { + return nil, errNotRunning{container.ID} + } + + if container.IsRestarting() { + return nil, errContainerIsRestarting(container.ID) + } + + pids, err := daemon.containerd.GetPidsForContainer(container.ID) + if err != nil { + return nil, err + } + + output, err := exec.Command("ps", strings.Split(psArgs, " ")...).Output() + if err != nil { + return nil, fmt.Errorf("Error running ps: %v", err) + } + procList, err := parsePSOutput(output, pids) + if err != nil { + return nil, err + } + daemon.LogContainerEvent(container, "top") + return procList, nil +} diff --git a/vendor/github.com/moby/moby/daemon/top_unix_test.go b/vendor/github.com/moby/moby/daemon/top_unix_test.go new file mode 100644 index 000000000..9a3749f71 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/top_unix_test.go @@ -0,0 +1,79 @@ +//+build !windows + +package daemon + +import ( + "testing" +) + +func TestContainerTopValidatePSArgs(t *testing.T) { + tests := map[string]bool{ + "ae -o uid=PID": true, + "ae -o \"uid= PID\"": true, // ascii space (0x20) + "ae -o \"uid= PID\"": false, // unicode space (U+2003, 0xe2 0x80 0x83) + "ae o uid=PID": true, + "aeo uid=PID": true, + "ae -O uid=PID": true, + "ae -o pid=PID2 -o uid=PID": true, + "ae -o pid=PID": false, + "ae -o pid=PID -o uid=PIDX": true, // FIXME: we do not need to prohibit this + "aeo pid=PID": false, + "ae": false, + "": false, + } + for psArgs, errExpected := range tests { + err := validatePSArgs(psArgs) + t.Logf("tested %q, got err=%v", psArgs, err) + if errExpected && err == nil { + t.Fatalf("expected error, got %v (%q)", err, psArgs) + } + if !errExpected && err != nil { + t.Fatalf("expected nil, got %v (%q)", err, psArgs) + } + } +} + +func TestContainerTopParsePSOutput(t *testing.T) { + tests := []struct { + output []byte + pids []int + errExpected bool + }{ + {[]byte(` PID COMMAND + 42 foo + 43 bar + - - + 100 baz +`), []int{42, 43}, false}, + {[]byte(` UID COMMAND + 42 foo + 43 bar + - - + 100 baz +`), []int{42, 43}, true}, + // unicode space (U+2003, 0xe2 0x80 0x83) + {[]byte(` PID COMMAND + 42 foo + 43 bar + - - + 100 baz +`), []int{42, 43}, true}, + // the first space is U+2003, the second one is ascii. + {[]byte(` PID COMMAND + 42 foo + 43 bar + 100 baz +`), []int{42, 43}, true}, + } + + for _, f := range tests { + _, err := parsePSOutput(f.output, f.pids) + t.Logf("tested %q, got err=%v", string(f.output), err) + if f.errExpected && err == nil { + t.Fatalf("expected error, got %v (%q)", err, string(f.output)) + } + if !f.errExpected && err != nil { + t.Fatalf("expected nil, got %v (%q)", err, string(f.output)) + } + } +} diff --git a/vendor/github.com/moby/moby/daemon/top_windows.go b/vendor/github.com/moby/moby/daemon/top_windows.go new file mode 100644 index 000000000..000720b00 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/top_windows.go @@ -0,0 +1,53 @@ +package daemon + +import ( + "errors" + "fmt" + "time" + + containertypes "github.com/docker/docker/api/types/container" + "github.com/docker/go-units" +) + +// ContainerTop handles `docker top` client requests. +// Future considerations: +// -- Windows users are far more familiar with CPU% total. +// Further, users on Windows rarely see user/kernel CPU stats split. +// The kernel returns everything in terms of 100ns. To obtain +// CPU%, we could do something like docker stats does which takes two +// samples, subtract the difference and do the maths. Unfortunately this +// would slow the stat call down and require two kernel calls. So instead, +// we do something similar to linux and display the CPU as combined HH:MM:SS.mmm. +// -- Perhaps we could add an argument to display "raw" stats +// -- "Memory" is an extremely overloaded term in Windows. Hence we do what +// task manager does and use the private working set as the memory counter. +// We could return more info for those who really understand how memory +// management works in Windows if we introduced a "raw" stats (above). +func (daemon *Daemon) ContainerTop(name string, psArgs string) (*containertypes.ContainerTopOKBody, error) { + // It's not at all an equivalent to linux 'ps' on Windows + if psArgs != "" { + return nil, errors.New("Windows does not support arguments to top") + } + + container, err := daemon.GetContainer(name) + if err != nil { + return nil, err + } + + s, err := daemon.containerd.Summary(container.ID) + if err != nil { + return nil, err + } + procList := &containertypes.ContainerTopOKBody{} + procList.Titles = []string{"Name", "PID", "CPU", "Private Working Set"} + + for _, j := range s { + d := time.Duration((j.KernelTime100ns + j.UserTime100ns) * 100) // Combined time in nanoseconds + procList.Processes = append(procList.Processes, []string{ + j.ImageName, + fmt.Sprint(j.ProcessId), + fmt.Sprintf("%02d:%02d:%02d.%03d", int(d.Hours()), int(d.Minutes())%60, int(d.Seconds())%60, int(d.Nanoseconds()/1000000)%1000), + units.HumanSize(float64(j.MemoryWorkingSetPrivateBytes))}) + } + return procList, nil +} diff --git a/vendor/github.com/moby/moby/daemon/unpause.go b/vendor/github.com/moby/moby/daemon/unpause.go new file mode 100644 index 000000000..e66b3868d --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/unpause.go @@ -0,0 +1,38 @@ +package daemon + +import ( + "fmt" + + "github.com/docker/docker/container" +) + +// ContainerUnpause unpauses a container +func (daemon *Daemon) ContainerUnpause(name string) error { + container, err := daemon.GetContainer(name) + if err != nil { + return err + } + + if err := daemon.containerUnpause(container); err != nil { + return err + } + + return nil +} + +// containerUnpause resumes the container execution after the container is paused. +func (daemon *Daemon) containerUnpause(container *container.Container) error { + container.Lock() + defer container.Unlock() + + // We cannot unpause the container which is not paused + if !container.Paused { + return fmt.Errorf("Container %s is not paused", container.ID) + } + + if err := daemon.containerd.Resume(container.ID); err != nil { + return fmt.Errorf("Cannot unpause container %s: %s", container.ID, err) + } + + return nil +} diff --git a/vendor/github.com/moby/moby/daemon/update.go b/vendor/github.com/moby/moby/daemon/update.go new file mode 100644 index 000000000..a65cbd51b --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/update.go @@ -0,0 +1,86 @@ +package daemon + +import ( + "fmt" + + "github.com/docker/docker/api/types/container" +) + +// ContainerUpdate updates configuration of the container +func (daemon *Daemon) ContainerUpdate(name string, hostConfig *container.HostConfig) (container.ContainerUpdateOKBody, error) { + var warnings []string + + warnings, err := daemon.verifyContainerSettings(hostConfig, nil, true) + if err != nil { + return container.ContainerUpdateOKBody{Warnings: warnings}, err + } + + if err := daemon.update(name, hostConfig); err != nil { + return container.ContainerUpdateOKBody{Warnings: warnings}, err + } + + return container.ContainerUpdateOKBody{Warnings: warnings}, nil +} + +func (daemon *Daemon) update(name string, hostConfig *container.HostConfig) error { + if hostConfig == nil { + return nil + } + + container, err := daemon.GetContainer(name) + if err != nil { + return err + } + + restoreConfig := false + backupHostConfig := *container.HostConfig + defer func() { + if restoreConfig { + container.Lock() + container.HostConfig = &backupHostConfig + container.CheckpointTo(daemon.containersReplica) + container.Unlock() + } + }() + + if container.RemovalInProgress || container.Dead { + return errCannotUpdate(container.ID, fmt.Errorf("Container is marked for removal and cannot be \"update\".")) + } + + container.Lock() + if err := container.UpdateContainer(hostConfig); err != nil { + restoreConfig = true + container.Unlock() + return errCannotUpdate(container.ID, err) + } + if err := container.CheckpointTo(daemon.containersReplica); err != nil { + restoreConfig = true + container.Unlock() + return errCannotUpdate(container.ID, err) + } + container.Unlock() + + // if Restart Policy changed, we need to update container monitor + if hostConfig.RestartPolicy.Name != "" { + container.UpdateMonitor(hostConfig.RestartPolicy) + } + + // If container is not running, update hostConfig struct is enough, + // resources will be updated when the container is started again. + // If container is running (including paused), we need to update configs + // to the real world. + if container.IsRunning() && !container.IsRestarting() { + if err := daemon.containerd.UpdateResources(container.ID, toContainerdResources(hostConfig.Resources)); err != nil { + restoreConfig = true + return errCannotUpdate(container.ID, err) + } + } + + daemon.LogContainerEvent(container, "update") + + return nil +} + +func errCannotUpdate(containerID string, err error) error { + return fmt.Errorf("Cannot update container %s: %v", containerID, err) +} diff --git a/vendor/github.com/moby/moby/daemon/update_linux.go b/vendor/github.com/moby/moby/daemon/update_linux.go new file mode 100644 index 000000000..c12896721 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/update_linux.go @@ -0,0 +1,32 @@ +// +build linux + +package daemon + +import ( + "time" + + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/libcontainerd" +) + +func toContainerdResources(resources container.Resources) libcontainerd.Resources { + var r libcontainerd.Resources + r.BlkioWeight = uint64(resources.BlkioWeight) + r.CpuShares = uint64(resources.CPUShares) + if resources.NanoCPUs != 0 { + r.CpuPeriod = uint64(100 * time.Millisecond / time.Microsecond) + r.CpuQuota = uint64(resources.NanoCPUs) * r.CpuPeriod / 1e9 + } else { + r.CpuPeriod = uint64(resources.CPUPeriod) + r.CpuQuota = uint64(resources.CPUQuota) + } + r.CpusetCpus = resources.CpusetCpus + r.CpusetMems = resources.CpusetMems + r.MemoryLimit = uint64(resources.Memory) + if resources.MemorySwap > 0 { + r.MemorySwap = uint64(resources.MemorySwap) + } + r.MemoryReservation = uint64(resources.MemoryReservation) + r.KernelMemoryLimit = uint64(resources.KernelMemory) + return r +} diff --git a/vendor/github.com/moby/moby/daemon/update_solaris.go b/vendor/github.com/moby/moby/daemon/update_solaris.go new file mode 100644 index 000000000..f3b545c5f --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/update_solaris.go @@ -0,0 +1,11 @@ +package daemon + +import ( + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/libcontainerd" +) + +func toContainerdResources(resources container.Resources) libcontainerd.Resources { + var r libcontainerd.Resources + return r +} diff --git a/vendor/github.com/moby/moby/daemon/update_windows.go b/vendor/github.com/moby/moby/daemon/update_windows.go new file mode 100644 index 000000000..01466260b --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/update_windows.go @@ -0,0 +1,13 @@ +// +build windows + +package daemon + +import ( + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/libcontainerd" +) + +func toContainerdResources(resources container.Resources) libcontainerd.Resources { + var r libcontainerd.Resources + return r +} diff --git a/vendor/github.com/moby/moby/daemon/volumes.go b/vendor/github.com/moby/moby/daemon/volumes.go new file mode 100644 index 000000000..6f24f0591 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/volumes.go @@ -0,0 +1,395 @@ +package daemon + +import ( + "errors" + "fmt" + "os" + "path/filepath" + "reflect" + "strings" + "time" + + "github.com/Sirupsen/logrus" + dockererrors "github.com/docker/docker/api/errors" + "github.com/docker/docker/api/types" + containertypes "github.com/docker/docker/api/types/container" + mounttypes "github.com/docker/docker/api/types/mount" + "github.com/docker/docker/container" + "github.com/docker/docker/volume" + "github.com/docker/docker/volume/drivers" +) + +var ( + // ErrVolumeReadonly is used to signal an error when trying to copy data into + // a volume mount that is not writable. + ErrVolumeReadonly = errors.New("mounted volume is marked read-only") +) + +type mounts []container.Mount + +// volumeToAPIType converts a volume.Volume to the type used by the Engine API +func volumeToAPIType(v volume.Volume) *types.Volume { + createdAt, _ := v.CreatedAt() + tv := &types.Volume{ + Name: v.Name(), + Driver: v.DriverName(), + CreatedAt: createdAt.Format(time.RFC3339), + } + if v, ok := v.(volume.DetailedVolume); ok { + tv.Labels = v.Labels() + tv.Options = v.Options() + tv.Scope = v.Scope() + } + + return tv +} + +// Len returns the number of mounts. Used in sorting. +func (m mounts) Len() int { + return len(m) +} + +// Less returns true if the number of parts (a/b/c would be 3 parts) in the +// mount indexed by parameter 1 is less than that of the mount indexed by +// parameter 2. Used in sorting. +func (m mounts) Less(i, j int) bool { + return m.parts(i) < m.parts(j) +} + +// Swap swaps two items in an array of mounts. Used in sorting +func (m mounts) Swap(i, j int) { + m[i], m[j] = m[j], m[i] +} + +// parts returns the number of parts in the destination of a mount. Used in sorting. +func (m mounts) parts(i int) int { + return strings.Count(filepath.Clean(m[i].Destination), string(os.PathSeparator)) +} + +// registerMountPoints initializes the container mount points with the configured volumes and bind mounts. +// It follows the next sequence to decide what to mount in each final destination: +// +// 1. Select the previously configured mount points for the containers, if any. +// 2. Select the volumes mounted from another containers. Overrides previously configured mount point destination. +// 3. Select the bind mounts set by the client. Overrides previously configured mount point destinations. +// 4. Cleanup old volumes that are about to be reassigned. +func (daemon *Daemon) registerMountPoints(container *container.Container, hostConfig *containertypes.HostConfig) (retErr error) { + binds := map[string]bool{} + mountPoints := map[string]*volume.MountPoint{} + defer func() { + // clean up the container mountpoints once return with error + if retErr != nil { + for _, m := range mountPoints { + if m.Volume == nil { + continue + } + daemon.volumes.Dereference(m.Volume, container.ID) + } + } + }() + + dereferenceIfExists := func(destination string) { + if v, ok := mountPoints[destination]; ok { + logrus.Debugf("Duplicate mount point '%s'", destination) + if v.Volume != nil { + daemon.volumes.Dereference(v.Volume, container.ID) + } + } + } + + // 1. Read already configured mount points. + for destination, point := range container.MountPoints { + mountPoints[destination] = point + } + + // 2. Read volumes from other containers. + for _, v := range hostConfig.VolumesFrom { + containerID, mode, err := volume.ParseVolumesFrom(v) + if err != nil { + return err + } + + c, err := daemon.GetContainer(containerID) + if err != nil { + return err + } + + for _, m := range c.MountPoints { + cp := &volume.MountPoint{ + Type: m.Type, + Name: m.Name, + Source: m.Source, + RW: m.RW && volume.ReadWrite(mode), + Driver: m.Driver, + Destination: m.Destination, + Propagation: m.Propagation, + Spec: m.Spec, + CopyData: false, + } + + if len(cp.Source) == 0 { + v, err := daemon.volumes.GetWithRef(cp.Name, cp.Driver, container.ID) + if err != nil { + return err + } + cp.Volume = v + } + dereferenceIfExists(cp.Destination) + mountPoints[cp.Destination] = cp + } + } + + // 3. Read bind mounts + for _, b := range hostConfig.Binds { + bind, err := volume.ParseMountRaw(b, hostConfig.VolumeDriver) + if err != nil { + return err + } + + // #10618 + _, tmpfsExists := hostConfig.Tmpfs[bind.Destination] + if binds[bind.Destination] || tmpfsExists { + return fmt.Errorf("Duplicate mount point '%s'", bind.Destination) + } + + if bind.Type == mounttypes.TypeVolume { + // create the volume + v, err := daemon.volumes.CreateWithRef(bind.Name, bind.Driver, container.ID, nil, nil) + if err != nil { + return err + } + bind.Volume = v + bind.Source = v.Path() + // bind.Name is an already existing volume, we need to use that here + bind.Driver = v.DriverName() + if bind.Driver == volume.DefaultDriverName { + setBindModeIfNull(bind) + } + } + + binds[bind.Destination] = true + dereferenceIfExists(bind.Destination) + mountPoints[bind.Destination] = bind + } + + for _, cfg := range hostConfig.Mounts { + mp, err := volume.ParseMountSpec(cfg) + if err != nil { + return dockererrors.NewBadRequestError(err) + } + + if binds[mp.Destination] { + return fmt.Errorf("Duplicate mount point '%s'", cfg.Target) + } + + if mp.Type == mounttypes.TypeVolume { + var v volume.Volume + if cfg.VolumeOptions != nil { + var driverOpts map[string]string + if cfg.VolumeOptions.DriverConfig != nil { + driverOpts = cfg.VolumeOptions.DriverConfig.Options + } + v, err = daemon.volumes.CreateWithRef(mp.Name, mp.Driver, container.ID, driverOpts, cfg.VolumeOptions.Labels) + } else { + v, err = daemon.volumes.CreateWithRef(mp.Name, mp.Driver, container.ID, nil, nil) + } + if err != nil { + return err + } + + mp.Volume = v + mp.Name = v.Name() + mp.Driver = v.DriverName() + + // only use the cached path here since getting the path is not necessary right now and calling `Path()` may be slow + if cv, ok := v.(interface { + CachedPath() string + }); ok { + mp.Source = cv.CachedPath() + } + } + + binds[mp.Destination] = true + dereferenceIfExists(mp.Destination) + mountPoints[mp.Destination] = mp + } + + container.Lock() + + // 4. Cleanup old volumes that are about to be reassigned. + for _, m := range mountPoints { + if m.BackwardsCompatible() { + if mp, exists := container.MountPoints[m.Destination]; exists && mp.Volume != nil { + daemon.volumes.Dereference(mp.Volume, container.ID) + } + } + } + container.MountPoints = mountPoints + + container.Unlock() + + return nil +} + +// lazyInitializeVolume initializes a mountpoint's volume if needed. +// This happens after a daemon restart. +func (daemon *Daemon) lazyInitializeVolume(containerID string, m *volume.MountPoint) error { + if len(m.Driver) > 0 && m.Volume == nil { + v, err := daemon.volumes.GetWithRef(m.Name, m.Driver, containerID) + if err != nil { + return err + } + m.Volume = v + } + return nil +} + +// backportMountSpec resolves mount specs (introduced in 1.13) from pre-1.13 +// mount configurations +// The container lock should not be held when calling this function. +// Changes are only made in-memory and may make changes to containers referenced +// by `container.HostConfig.VolumesFrom` +func (daemon *Daemon) backportMountSpec(container *container.Container) { + container.Lock() + defer container.Unlock() + + maybeUpdate := make(map[string]bool) + for _, mp := range container.MountPoints { + if mp.Spec.Source != "" && mp.Type != "" { + continue + } + maybeUpdate[mp.Destination] = true + } + if len(maybeUpdate) == 0 { + return + } + + mountSpecs := make(map[string]bool, len(container.HostConfig.Mounts)) + for _, m := range container.HostConfig.Mounts { + mountSpecs[m.Target] = true + } + + binds := make(map[string]*volume.MountPoint, len(container.HostConfig.Binds)) + for _, rawSpec := range container.HostConfig.Binds { + mp, err := volume.ParseMountRaw(rawSpec, container.HostConfig.VolumeDriver) + if err != nil { + logrus.WithError(err).Error("Got unexpected error while re-parsing raw volume spec during spec backport") + continue + } + binds[mp.Destination] = mp + } + + volumesFrom := make(map[string]volume.MountPoint) + for _, fromSpec := range container.HostConfig.VolumesFrom { + from, _, err := volume.ParseVolumesFrom(fromSpec) + if err != nil { + logrus.WithError(err).WithField("id", container.ID).Error("Error reading volumes-from spec during mount spec backport") + continue + } + fromC, err := daemon.GetContainer(from) + if err != nil { + logrus.WithError(err).WithField("from-container", from).Error("Error looking up volumes-from container") + continue + } + + // make sure from container's specs have been backported + daemon.backportMountSpec(fromC) + + fromC.Lock() + for t, mp := range fromC.MountPoints { + volumesFrom[t] = *mp + } + fromC.Unlock() + } + + needsUpdate := func(containerMount, other *volume.MountPoint) bool { + if containerMount.Type != other.Type || !reflect.DeepEqual(containerMount.Spec, other.Spec) { + return true + } + return false + } + + // main + for _, cm := range container.MountPoints { + if !maybeUpdate[cm.Destination] { + continue + } + // nothing to backport if from hostconfig.Mounts + if mountSpecs[cm.Destination] { + continue + } + + if mp, exists := binds[cm.Destination]; exists { + if needsUpdate(cm, mp) { + cm.Spec = mp.Spec + cm.Type = mp.Type + } + continue + } + + if cm.Name != "" { + if mp, exists := volumesFrom[cm.Destination]; exists { + if needsUpdate(cm, &mp) { + cm.Spec = mp.Spec + cm.Type = mp.Type + } + continue + } + + if cm.Type != "" { + // probably specified via the hostconfig.Mounts + continue + } + + // anon volume + cm.Type = mounttypes.TypeVolume + cm.Spec.Type = mounttypes.TypeVolume + } else { + if cm.Type != "" { + // already updated + continue + } + + cm.Type = mounttypes.TypeBind + cm.Spec.Type = mounttypes.TypeBind + cm.Spec.Source = cm.Source + if cm.Propagation != "" { + cm.Spec.BindOptions = &mounttypes.BindOptions{ + Propagation: cm.Propagation, + } + } + } + + cm.Spec.Target = cm.Destination + cm.Spec.ReadOnly = !cm.RW + } +} + +func (daemon *Daemon) traverseLocalVolumes(fn func(volume.Volume) error) error { + localVolumeDriver, err := volumedrivers.GetDriver(volume.DefaultDriverName) + if err != nil { + return fmt.Errorf("can't retrieve local volume driver: %v", err) + } + vols, err := localVolumeDriver.List() + if err != nil { + return fmt.Errorf("can't retrieve local volumes: %v", err) + } + + for _, v := range vols { + name := v.Name() + vol, err := daemon.volumes.Get(name) + if err != nil { + logrus.Warnf("failed to retrieve volume %s from store: %v", name, err) + } else { + // daemon.volumes.Get will return DetailedVolume + v = vol + } + + err = fn(v) + if err != nil { + return err + } + } + + return nil +} diff --git a/vendor/github.com/moby/moby/daemon/volumes_unit_test.go b/vendor/github.com/moby/moby/daemon/volumes_unit_test.go new file mode 100644 index 000000000..450d17f97 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/volumes_unit_test.go @@ -0,0 +1,39 @@ +package daemon + +import ( + "testing" + + "github.com/docker/docker/volume" +) + +func TestParseVolumesFrom(t *testing.T) { + cases := []struct { + spec string + expID string + expMode string + fail bool + }{ + {"", "", "", true}, + {"foobar", "foobar", "rw", false}, + {"foobar:rw", "foobar", "rw", false}, + {"foobar:ro", "foobar", "ro", false}, + {"foobar:baz", "", "", true}, + } + + for _, c := range cases { + id, mode, err := volume.ParseVolumesFrom(c.spec) + if c.fail { + if err == nil { + t.Fatalf("Expected error, was nil, for spec %s\n", c.spec) + } + continue + } + + if id != c.expID { + t.Fatalf("Expected id %s, was %s, for spec %s\n", c.expID, id, c.spec) + } + if mode != c.expMode { + t.Fatalf("Expected mode %s, was %s for spec %s\n", c.expMode, mode, c.spec) + } + } +} diff --git a/vendor/github.com/moby/moby/daemon/volumes_unix.go b/vendor/github.com/moby/moby/daemon/volumes_unix.go new file mode 100644 index 000000000..0a4cbf849 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/volumes_unix.go @@ -0,0 +1,232 @@ +// +build !windows + +// TODO(amitkris): We need to split this file for solaris. + +package daemon + +import ( + "encoding/json" + "fmt" + "os" + "path/filepath" + "sort" + "strconv" + "strings" + + "github.com/docker/docker/container" + "github.com/docker/docker/pkg/fileutils" + "github.com/docker/docker/pkg/mount" + "github.com/docker/docker/volume" + "github.com/docker/docker/volume/drivers" + "github.com/docker/docker/volume/local" + "github.com/pkg/errors" +) + +// setupMounts iterates through each of the mount points for a container and +// calls Setup() on each. It also looks to see if is a network mount such as +// /etc/resolv.conf, and if it is not, appends it to the array of mounts. +func (daemon *Daemon) setupMounts(c *container.Container) ([]container.Mount, error) { + var mounts []container.Mount + // TODO: tmpfs mounts should be part of Mountpoints + tmpfsMounts := make(map[string]bool) + tmpfsMountInfo, err := c.TmpfsMounts() + if err != nil { + return nil, err + } + for _, m := range tmpfsMountInfo { + tmpfsMounts[m.Destination] = true + } + for _, m := range c.MountPoints { + if tmpfsMounts[m.Destination] { + continue + } + if err := daemon.lazyInitializeVolume(c.ID, m); err != nil { + return nil, err + } + // If the daemon is being shutdown, we should not let a container start if it is trying to + // mount the socket the daemon is listening on. During daemon shutdown, the socket + // (/var/run/docker.sock by default) doesn't exist anymore causing the call to m.Setup to + // create at directory instead. This in turn will prevent the daemon to restart. + checkfunc := func(m *volume.MountPoint) error { + if _, exist := daemon.hosts[m.Source]; exist && daemon.IsShuttingDown() { + return fmt.Errorf("Could not mount %q to container while the daemon is shutting down", m.Source) + } + return nil + } + + path, err := m.Setup(c.MountLabel, daemon.idMappings.RootPair(), checkfunc) + if err != nil { + return nil, err + } + if !c.TrySetNetworkMount(m.Destination, path) { + mnt := container.Mount{ + Source: path, + Destination: m.Destination, + Writable: m.RW, + Propagation: string(m.Propagation), + } + if m.Volume != nil { + attributes := map[string]string{ + "driver": m.Volume.DriverName(), + "container": c.ID, + "destination": m.Destination, + "read/write": strconv.FormatBool(m.RW), + "propagation": string(m.Propagation), + } + daemon.LogVolumeEvent(m.Volume.Name(), "mount", attributes) + } + mounts = append(mounts, mnt) + } + } + + mounts = sortMounts(mounts) + netMounts := c.NetworkMounts() + // if we are going to mount any of the network files from container + // metadata, the ownership must be set properly for potential container + // remapped root (user namespaces) + rootIDs := daemon.idMappings.RootPair() + for _, mount := range netMounts { + if err := os.Chown(mount.Source, rootIDs.UID, rootIDs.GID); err != nil { + return nil, err + } + } + return append(mounts, netMounts...), nil +} + +// sortMounts sorts an array of mounts in lexicographic order. This ensure that +// when mounting, the mounts don't shadow other mounts. For example, if mounting +// /etc and /etc/resolv.conf, /etc/resolv.conf must not be mounted first. +func sortMounts(m []container.Mount) []container.Mount { + sort.Sort(mounts(m)) + return m +} + +// setBindModeIfNull is platform specific processing to ensure the +// shared mode is set to 'z' if it is null. This is called in the case +// of processing a named volume and not a typical bind. +func setBindModeIfNull(bind *volume.MountPoint) { + if bind.Mode == "" { + bind.Mode = "z" + } +} + +// migrateVolume links the contents of a volume created pre Docker 1.7 +// into the location expected by the local driver. +// It creates a symlink from DOCKER_ROOT/vfs/dir/VOLUME_ID to DOCKER_ROOT/volumes/VOLUME_ID/_container_data. +// It preserves the volume json configuration generated pre Docker 1.7 to be able to +// downgrade from Docker 1.7 to Docker 1.6 without losing volume compatibility. +func migrateVolume(id, vfs string) error { + l, err := volumedrivers.GetDriver(volume.DefaultDriverName) + if err != nil { + return err + } + + newDataPath := l.(*local.Root).DataPath(id) + fi, err := os.Stat(newDataPath) + if err != nil && !os.IsNotExist(err) { + return err + } + + if fi != nil && fi.IsDir() { + return nil + } + + return os.Symlink(vfs, newDataPath) +} + +// verifyVolumesInfo ports volumes configured for the containers pre docker 1.7. +// It reads the container configuration and creates valid mount points for the old volumes. +func (daemon *Daemon) verifyVolumesInfo(container *container.Container) error { + container.Lock() + defer container.Unlock() + + // Inspect old structures only when we're upgrading from old versions + // to versions >= 1.7 and the MountPoints has not been populated with volumes data. + type volumes struct { + Volumes map[string]string + VolumesRW map[string]bool + } + cfgPath, err := container.ConfigPath() + if err != nil { + return err + } + f, err := os.Open(cfgPath) + if err != nil { + return errors.Wrap(err, "could not open container config") + } + defer f.Close() + var cv volumes + if err := json.NewDecoder(f).Decode(&cv); err != nil { + return errors.Wrap(err, "could not decode container config") + } + + if len(container.MountPoints) == 0 && len(cv.Volumes) > 0 { + for destination, hostPath := range cv.Volumes { + vfsPath := filepath.Join(daemon.root, "vfs", "dir") + rw := cv.VolumesRW != nil && cv.VolumesRW[destination] + + if strings.HasPrefix(hostPath, vfsPath) { + id := filepath.Base(hostPath) + v, err := daemon.volumes.CreateWithRef(id, volume.DefaultDriverName, container.ID, nil, nil) + if err != nil { + return err + } + if err := migrateVolume(id, hostPath); err != nil { + return err + } + container.AddMountPointWithVolume(destination, v, true) + } else { // Bind mount + m := volume.MountPoint{Source: hostPath, Destination: destination, RW: rw} + container.MountPoints[destination] = &m + } + } + } + return nil +} + +func (daemon *Daemon) mountVolumes(container *container.Container) error { + mounts, err := daemon.setupMounts(container) + if err != nil { + return err + } + + for _, m := range mounts { + dest, err := container.GetResourcePath(m.Destination) + if err != nil { + return err + } + + var stat os.FileInfo + stat, err = os.Stat(m.Source) + if err != nil { + return err + } + if err = fileutils.CreateIfNotExists(dest, stat.IsDir()); err != nil { + return err + } + + opts := "rbind,ro" + if m.Writable { + opts = "rbind,rw" + } + + if err := mount.Mount(m.Source, dest, bindMountType, opts); err != nil { + return err + } + + // mountVolumes() seems to be called for temporary mounts + // outside the container. Soon these will be unmounted with + // lazy unmount option and given we have mounted the rbind, + // all the submounts will propagate if these are shared. If + // daemon is running in host namespace and has / as shared + // then these unmounts will propagate and unmount original + // mount as well. So make all these mounts rprivate. + // Do not use propagation property of volume as that should + // apply only when mounting happen inside the container. + if err := mount.MakeRPrivate(dest); err != nil { + return err + } + } + + return nil +} diff --git a/vendor/github.com/moby/moby/daemon/volumes_unix_test.go b/vendor/github.com/moby/moby/daemon/volumes_unix_test.go new file mode 100644 index 000000000..3a81eeeb7 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/volumes_unix_test.go @@ -0,0 +1,256 @@ +// +build !windows + +package daemon + +import ( + "encoding/json" + "fmt" + "reflect" + "testing" + + containertypes "github.com/docker/docker/api/types/container" + mounttypes "github.com/docker/docker/api/types/mount" + "github.com/docker/docker/container" + "github.com/docker/docker/volume" +) + +func TestBackportMountSpec(t *testing.T) { + d := Daemon{containers: container.NewMemoryStore()} + + c := &container.Container{ + State: &container.State{}, + MountPoints: map[string]*volume.MountPoint{ + "/apple": {Destination: "/apple", Source: "/var/lib/docker/volumes/12345678", Name: "12345678", RW: true, CopyData: true}, // anonymous volume + "/banana": {Destination: "/banana", Source: "/var/lib/docker/volumes/data", Name: "data", RW: true, CopyData: true}, // named volume + "/cherry": {Destination: "/cherry", Source: "/var/lib/docker/volumes/data", Name: "data", CopyData: true}, // RO named volume + "/dates": {Destination: "/dates", Source: "/var/lib/docker/volumes/data", Name: "data"}, // named volume nocopy + "/elderberry": {Destination: "/elderberry", Source: "/var/lib/docker/volumes/data", Name: "data"}, // masks anon vol + "/fig": {Destination: "/fig", Source: "/data", RW: true}, // RW bind + "/guava": {Destination: "/guava", Source: "/data", RW: false, Propagation: "shared"}, // RO bind + propagation + "/kumquat": {Destination: "/kumquat", Name: "data", RW: false, CopyData: true}, // volumes-from + + // partially configured mountpoint due to #32613 + // specifically, `mp.Spec.Source` is not set + "/honeydew": { + Type: mounttypes.TypeVolume, + Destination: "/honeydew", + Name: "data", + Source: "/var/lib/docker/volumes/data", + Spec: mounttypes.Mount{Type: mounttypes.TypeVolume, Target: "/honeydew", VolumeOptions: &mounttypes.VolumeOptions{NoCopy: true}}, + }, + + // from hostconfig.Mounts + "/jambolan": { + Type: mounttypes.TypeVolume, + Destination: "/jambolan", + Source: "/var/lib/docker/volumes/data", + RW: true, + Name: "data", + Spec: mounttypes.Mount{Type: mounttypes.TypeVolume, Target: "/jambolan", Source: "data"}, + }, + }, + HostConfig: &containertypes.HostConfig{ + Binds: []string{ + "data:/banana", + "data:/cherry:ro", + "data:/dates:ro,nocopy", + "data:/elderberry:ro,nocopy", + "/data:/fig", + "/data:/guava:ro,shared", + "data:/honeydew:nocopy", + }, + VolumesFrom: []string{"1:ro"}, + Mounts: []mounttypes.Mount{ + {Type: mounttypes.TypeVolume, Target: "/jambolan"}, + }, + }, + Config: &containertypes.Config{Volumes: map[string]struct{}{ + "/apple": {}, + "/elderberry": {}, + }}, + } + + d.containers.Add("1", &container.Container{ + State: &container.State{}, + ID: "1", + MountPoints: map[string]*volume.MountPoint{ + "/kumquat": {Destination: "/kumquat", Name: "data", RW: false, CopyData: true}, + }, + HostConfig: &containertypes.HostConfig{ + Binds: []string{ + "data:/kumquat:ro", + }, + }, + }) + + type expected struct { + mp *volume.MountPoint + comment string + } + + pretty := func(mp *volume.MountPoint) string { + b, err := json.MarshalIndent(mp, "\t", " ") + if err != nil { + return fmt.Sprintf("%#v", mp) + } + return string(b) + } + + for _, x := range []expected{ + { + mp: &volume.MountPoint{ + Type: mounttypes.TypeVolume, + Destination: "/apple", + RW: true, + Name: "12345678", + Source: "/var/lib/docker/volumes/12345678", + CopyData: true, + Spec: mounttypes.Mount{ + Type: mounttypes.TypeVolume, + Source: "", + Target: "/apple", + }, + }, + comment: "anonymous volume", + }, + { + mp: &volume.MountPoint{ + Type: mounttypes.TypeVolume, + Destination: "/banana", + RW: true, + Name: "data", + Source: "/var/lib/docker/volumes/data", + CopyData: true, + Spec: mounttypes.Mount{ + Type: mounttypes.TypeVolume, + Source: "data", + Target: "/banana", + }, + }, + comment: "named volume", + }, + { + mp: &volume.MountPoint{ + Type: mounttypes.TypeVolume, + Destination: "/cherry", + Name: "data", + Source: "/var/lib/docker/volumes/data", + CopyData: true, + Spec: mounttypes.Mount{ + Type: mounttypes.TypeVolume, + Source: "data", + Target: "/cherry", + ReadOnly: true, + }, + }, + comment: "read-only named volume", + }, + { + mp: &volume.MountPoint{ + Type: mounttypes.TypeVolume, + Destination: "/dates", + Name: "data", + Source: "/var/lib/docker/volumes/data", + Spec: mounttypes.Mount{ + Type: mounttypes.TypeVolume, + Source: "data", + Target: "/dates", + ReadOnly: true, + VolumeOptions: &mounttypes.VolumeOptions{NoCopy: true}, + }, + }, + comment: "named volume with nocopy", + }, + { + mp: &volume.MountPoint{ + Type: mounttypes.TypeVolume, + Destination: "/elderberry", + Name: "data", + Source: "/var/lib/docker/volumes/data", + Spec: mounttypes.Mount{ + Type: mounttypes.TypeVolume, + Source: "data", + Target: "/elderberry", + ReadOnly: true, + VolumeOptions: &mounttypes.VolumeOptions{NoCopy: true}, + }, + }, + comment: "masks an anonymous volume", + }, + { + mp: &volume.MountPoint{ + Type: mounttypes.TypeBind, + Destination: "/fig", + Source: "/data", + RW: true, + Spec: mounttypes.Mount{ + Type: mounttypes.TypeBind, + Source: "/data", + Target: "/fig", + }, + }, + comment: "bind mount with read/write", + }, + { + mp: &volume.MountPoint{ + Type: mounttypes.TypeBind, + Destination: "/guava", + Source: "/data", + RW: false, + Propagation: "shared", + Spec: mounttypes.Mount{ + Type: mounttypes.TypeBind, + Source: "/data", + Target: "/guava", + ReadOnly: true, + BindOptions: &mounttypes.BindOptions{Propagation: "shared"}, + }, + }, + comment: "bind mount with read/write + shared propagation", + }, + { + mp: &volume.MountPoint{ + Type: mounttypes.TypeVolume, + Destination: "/honeydew", + Source: "/var/lib/docker/volumes/data", + RW: true, + Propagation: "shared", + Spec: mounttypes.Mount{ + Type: mounttypes.TypeVolume, + Source: "data", + Target: "/honeydew", + VolumeOptions: &mounttypes.VolumeOptions{NoCopy: true}, + }, + }, + comment: "partially configured named volume caused by #32613", + }, + { + mp: &(*c.MountPoints["/jambolan"]), // copy the mountpoint, expect no changes + comment: "volume defined in mounts API", + }, + { + mp: &volume.MountPoint{ + Type: mounttypes.TypeVolume, + Destination: "/kumquat", + Source: "/var/lib/docker/volumes/data", + RW: false, + Name: "data", + Spec: mounttypes.Mount{ + Type: mounttypes.TypeVolume, + Source: "data", + Target: "/kumquat", + ReadOnly: true, + }, + }, + comment: "partially configured named volume caused by #32613", + }, + } { + + mp := c.MountPoints[x.mp.Destination] + d.backportMountSpec(c) + + if !reflect.DeepEqual(mp.Spec, x.mp.Spec) { + t.Fatalf("%s\nexpected:\n\t%s\n\ngot:\n\t%s", x.comment, pretty(x.mp), pretty(mp)) + } + } +} diff --git a/vendor/github.com/moby/moby/daemon/volumes_windows.go b/vendor/github.com/moby/moby/daemon/volumes_windows.go new file mode 100644 index 000000000..62c9e23ac --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/volumes_windows.go @@ -0,0 +1,48 @@ +// +build windows + +package daemon + +import ( + "sort" + + "github.com/docker/docker/container" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/volume" +) + +// setupMounts configures the mount points for a container by appending each +// of the configured mounts on the container to the OCI mount structure +// which will ultimately be passed into the oci runtime during container creation. +// It also ensures each of the mounts are lexicographically sorted. + +// BUGBUG TODO Windows containerd. This would be much better if it returned +// an array of runtime spec mounts, not container mounts. Then no need to +// do multiple transitions. + +func (daemon *Daemon) setupMounts(c *container.Container) ([]container.Mount, error) { + var mnts []container.Mount + for _, mount := range c.MountPoints { // type is volume.MountPoint + if err := daemon.lazyInitializeVolume(c.ID, mount); err != nil { + return nil, err + } + s, err := mount.Setup(c.MountLabel, idtools.IDPair{0, 0}, nil) + if err != nil { + return nil, err + } + + mnts = append(mnts, container.Mount{ + Source: s, + Destination: mount.Destination, + Writable: mount.RW, + }) + } + + sort.Sort(mounts(mnts)) + return mnts, nil +} + +// setBindModeIfNull is platform specific processing which is a no-op on +// Windows. +func setBindModeIfNull(bind *volume.MountPoint) { + return +} diff --git a/vendor/github.com/moby/moby/daemon/wait.go b/vendor/github.com/moby/moby/daemon/wait.go new file mode 100644 index 000000000..76c16b9ef --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/wait.go @@ -0,0 +1,22 @@ +package daemon + +import ( + "github.com/docker/docker/container" + "golang.org/x/net/context" +) + +// ContainerWait waits until the given container is in a certain state +// indicated by the given condition. If the container is not found, a nil +// channel and non-nil error is returned immediately. If the container is +// found, a status result will be sent on the returned channel once the wait +// condition is met or if an error occurs waiting for the container (such as a +// context timeout or cancellation). On a successful wait, the exit code of the +// container is returned in the status with a non-nil Err() value. +func (daemon *Daemon) ContainerWait(ctx context.Context, name string, condition container.WaitCondition) (<-chan container.StateStatus, error) { + cntr, err := daemon.GetContainer(name) + if err != nil { + return nil, err + } + + return cntr.Wait(ctx, condition), nil +} diff --git a/vendor/github.com/moby/moby/daemon/workdir.go b/vendor/github.com/moby/moby/daemon/workdir.go new file mode 100644 index 000000000..6360f2413 --- /dev/null +++ b/vendor/github.com/moby/moby/daemon/workdir.go @@ -0,0 +1,20 @@ +package daemon + +// ContainerCreateWorkdir creates the working directory. This solves the +// issue arising from https://github.com/docker/docker/issues/27545, +// which was initially fixed by https://github.com/docker/docker/pull/27884. But that fix +// was too expensive in terms of performance on Windows. Instead, +// https://github.com/docker/docker/pull/28514 introduces this new functionality +// where the builder calls into the backend here to create the working directory. +func (daemon *Daemon) ContainerCreateWorkdir(cID string) error { + container, err := daemon.GetContainer(cID) + if err != nil { + return err + } + err = daemon.Mount(container) + if err != nil { + return err + } + defer daemon.Unmount(container) + return container.SetupWorkingDirectory(daemon.idMappings.RootPair()) +} diff --git a/vendor/github.com/moby/moby/distribution/config.go b/vendor/github.com/moby/moby/distribution/config.go new file mode 100644 index 000000000..1c10533f6 --- /dev/null +++ b/vendor/github.com/moby/moby/distribution/config.go @@ -0,0 +1,252 @@ +package distribution + +import ( + "encoding/json" + "fmt" + "io" + "runtime" + + "github.com/docker/distribution" + "github.com/docker/distribution/manifest/schema2" + "github.com/docker/docker/api/types" + "github.com/docker/docker/distribution/metadata" + "github.com/docker/docker/distribution/xfer" + "github.com/docker/docker/image" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/progress" + "github.com/docker/docker/pkg/system" + refstore "github.com/docker/docker/reference" + "github.com/docker/docker/registry" + "github.com/docker/libtrust" + "github.com/opencontainers/go-digest" + "golang.org/x/net/context" +) + +// Config stores configuration for communicating +// with a registry. +type Config struct { + // MetaHeaders stores HTTP headers with metadata about the image + MetaHeaders map[string][]string + // AuthConfig holds authentication credentials for authenticating with + // the registry. + AuthConfig *types.AuthConfig + // ProgressOutput is the interface for showing the status of the pull + // operation. + ProgressOutput progress.Output + // RegistryService is the registry service to use for TLS configuration + // and endpoint lookup. + RegistryService registry.Service + // ImageEventLogger notifies events for a given image + ImageEventLogger func(id, name, action string) + // MetadataStore is the storage backend for distribution-specific + // metadata. + MetadataStore metadata.Store + // ImageStore manages images. + ImageStore ImageConfigStore + // ReferenceStore manages tags. This value is optional, when excluded + // content will not be tagged. + ReferenceStore refstore.Store + // RequireSchema2 ensures that only schema2 manifests are used. + RequireSchema2 bool +} + +// ImagePullConfig stores pull configuration. +type ImagePullConfig struct { + Config + + // DownloadManager manages concurrent pulls. + DownloadManager RootFSDownloadManager + // Schema2Types is the valid schema2 configuration types allowed + // by the pull operation. + Schema2Types []string + // Platform is the requested platform of the image being pulled to ensure it can be validated + // when the host platform supports multiple image operating systems. + Platform string +} + +// ImagePushConfig stores push configuration. +type ImagePushConfig struct { + Config + + // ConfigMediaType is the configuration media type for + // schema2 manifests. + ConfigMediaType string + // LayerStore manages layers. + LayerStore PushLayerProvider + // TrustKey is the private key for legacy signatures. This is typically + // an ephemeral key, since these signatures are no longer verified. + TrustKey libtrust.PrivateKey + // UploadManager dispatches uploads. + UploadManager *xfer.LayerUploadManager +} + +// ImageConfigStore handles storing and getting image configurations +// by digest. Allows getting an image configurations rootfs from the +// configuration. +type ImageConfigStore interface { + Put([]byte) (digest.Digest, error) + Get(digest.Digest) ([]byte, error) + RootFSAndPlatformFromConfig([]byte) (*image.RootFS, layer.Platform, error) +} + +// PushLayerProvider provides layers to be pushed by ChainID. +type PushLayerProvider interface { + Get(layer.ChainID) (PushLayer, error) +} + +// PushLayer is a pushable layer with metadata about the layer +// and access to the content of the layer. +type PushLayer interface { + ChainID() layer.ChainID + DiffID() layer.DiffID + Parent() PushLayer + Open() (io.ReadCloser, error) + Size() (int64, error) + MediaType() string + Release() +} + +// RootFSDownloadManager handles downloading of the rootfs +type RootFSDownloadManager interface { + // Download downloads the layers into the given initial rootfs and + // returns the final rootfs. + // Given progress output to track download progress + // Returns function to release download resources + Download(ctx context.Context, initialRootFS image.RootFS, platform layer.Platform, layers []xfer.DownloadDescriptor, progressOutput progress.Output) (image.RootFS, func(), error) +} + +type imageConfigStore struct { + image.Store +} + +// NewImageConfigStoreFromStore returns an ImageConfigStore backed +// by an image.Store for container images. +func NewImageConfigStoreFromStore(is image.Store) ImageConfigStore { + return &imageConfigStore{ + Store: is, + } +} + +func (s *imageConfigStore) Put(c []byte) (digest.Digest, error) { + id, err := s.Store.Create(c) + return digest.Digest(id), err +} + +func (s *imageConfigStore) Get(d digest.Digest) ([]byte, error) { + img, err := s.Store.Get(image.IDFromDigest(d)) + if err != nil { + return nil, err + } + return img.RawJSON(), nil +} + +func (s *imageConfigStore) RootFSAndPlatformFromConfig(c []byte) (*image.RootFS, layer.Platform, error) { + var unmarshalledConfig image.Image + if err := json.Unmarshal(c, &unmarshalledConfig); err != nil { + return nil, "", err + } + + // fail immediately on Windows when downloading a non-Windows image + // and vice versa. Exception on Windows if Linux Containers are enabled. + if runtime.GOOS == "windows" && unmarshalledConfig.OS == "linux" && !system.LCOWSupported() { + return nil, "", fmt.Errorf("image operating system %q cannot be used on this platform", unmarshalledConfig.OS) + } else if runtime.GOOS != "windows" && unmarshalledConfig.OS == "windows" { + return nil, "", fmt.Errorf("image operating system %q cannot be used on this platform", unmarshalledConfig.OS) + } + + platform := "" + if runtime.GOOS == "windows" { + platform = unmarshalledConfig.OS + } + return unmarshalledConfig.RootFS, layer.Platform(platform), nil +} + +type storeLayerProvider struct { + ls layer.Store +} + +// NewLayerProviderFromStore returns a layer provider backed by +// an instance of LayerStore. Only getting layers as gzipped +// tars is supported. +func NewLayerProviderFromStore(ls layer.Store) PushLayerProvider { + return &storeLayerProvider{ + ls: ls, + } +} + +func (p *storeLayerProvider) Get(lid layer.ChainID) (PushLayer, error) { + if lid == "" { + return &storeLayer{ + Layer: layer.EmptyLayer, + }, nil + } + l, err := p.ls.Get(lid) + if err != nil { + return nil, err + } + + sl := storeLayer{ + Layer: l, + ls: p.ls, + } + if d, ok := l.(distribution.Describable); ok { + return &describableStoreLayer{ + storeLayer: sl, + describable: d, + }, nil + } + + return &sl, nil +} + +type storeLayer struct { + layer.Layer + ls layer.Store +} + +func (l *storeLayer) Parent() PushLayer { + p := l.Layer.Parent() + if p == nil { + return nil + } + sl := storeLayer{ + Layer: p, + ls: l.ls, + } + if d, ok := p.(distribution.Describable); ok { + return &describableStoreLayer{ + storeLayer: sl, + describable: d, + } + } + + return &sl +} + +func (l *storeLayer) Open() (io.ReadCloser, error) { + return l.Layer.TarStream() +} + +func (l *storeLayer) Size() (int64, error) { + return l.Layer.DiffSize() +} + +func (l *storeLayer) MediaType() string { + // layer store always returns uncompressed tars + return schema2.MediaTypeUncompressedLayer +} + +func (l *storeLayer) Release() { + if l.ls != nil { + layer.ReleaseAndLog(l.ls, l.Layer) + } +} + +type describableStoreLayer struct { + storeLayer + describable distribution.Describable +} + +func (l *describableStoreLayer) Descriptor() distribution.Descriptor { + return l.describable.Descriptor() +} diff --git a/vendor/github.com/moby/moby/distribution/errors.go b/vendor/github.com/moby/moby/distribution/errors.go new file mode 100644 index 000000000..f453c01cc --- /dev/null +++ b/vendor/github.com/moby/moby/distribution/errors.go @@ -0,0 +1,159 @@ +package distribution + +import ( + "net/url" + "strings" + "syscall" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution" + "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/api/errcode" + "github.com/docker/distribution/registry/api/v2" + "github.com/docker/distribution/registry/client" + "github.com/docker/distribution/registry/client/auth" + "github.com/docker/docker/distribution/xfer" + "github.com/pkg/errors" +) + +// ErrNoSupport is an error type used for errors indicating that an operation +// is not supported. It encapsulates a more specific error. +type ErrNoSupport struct{ Err error } + +func (e ErrNoSupport) Error() string { + if e.Err == nil { + return "not supported" + } + return e.Err.Error() +} + +// fallbackError wraps an error that can possibly allow fallback to a different +// endpoint. +type fallbackError struct { + // err is the error being wrapped. + err error + // confirmedV2 is set to true if it was confirmed that the registry + // supports the v2 protocol. This is used to limit fallbacks to the v1 + // protocol. + confirmedV2 bool + // transportOK is set to true if we managed to speak HTTP with the + // registry. This confirms that we're using appropriate TLS settings + // (or lack of TLS). + transportOK bool +} + +// Error renders the FallbackError as a string. +func (f fallbackError) Error() string { + return f.Cause().Error() +} + +func (f fallbackError) Cause() error { + return f.err +} + +// shouldV2Fallback returns true if this error is a reason to fall back to v1. +func shouldV2Fallback(err errcode.Error) bool { + switch err.Code { + case errcode.ErrorCodeUnauthorized, v2.ErrorCodeManifestUnknown, v2.ErrorCodeNameUnknown: + return true + } + return false +} + +// TranslatePullError is used to convert an error from a registry pull +// operation to an error representing the entire pull operation. Any error +// information which is not used by the returned error gets output to +// log at info level. +func TranslatePullError(err error, ref reference.Named) error { + switch v := err.(type) { + case errcode.Errors: + if len(v) != 0 { + for _, extra := range v[1:] { + logrus.Infof("Ignoring extra error returned from registry: %v", extra) + } + return TranslatePullError(v[0], ref) + } + case errcode.Error: + var newErr error + switch v.Code { + case errcode.ErrorCodeDenied: + // ErrorCodeDenied is used when access to the repository was denied + newErr = errors.Errorf("pull access denied for %s, repository does not exist or may require 'docker login'", reference.FamiliarName(ref)) + case v2.ErrorCodeManifestUnknown: + newErr = errors.Errorf("manifest for %s not found", reference.FamiliarString(ref)) + case v2.ErrorCodeNameUnknown: + newErr = errors.Errorf("repository %s not found", reference.FamiliarName(ref)) + } + if newErr != nil { + logrus.Infof("Translating %q to %q", err, newErr) + return newErr + } + case xfer.DoNotRetry: + return TranslatePullError(v.Err, ref) + } + + return err +} + +// continueOnError returns true if we should fallback to the next endpoint +// as a result of this error. +func continueOnError(err error) bool { + switch v := err.(type) { + case errcode.Errors: + if len(v) == 0 { + return true + } + return continueOnError(v[0]) + case ErrNoSupport: + return continueOnError(v.Err) + case errcode.Error: + return shouldV2Fallback(v) + case *client.UnexpectedHTTPResponseError: + return true + case ImageConfigPullError: + return false + case error: + return !strings.Contains(err.Error(), strings.ToLower(syscall.ESRCH.Error())) + } + // let's be nice and fallback if the error is a completely + // unexpected one. + // If new errors have to be handled in some way, please + // add them to the switch above. + return true +} + +// retryOnError wraps the error in xfer.DoNotRetry if we should not retry the +// operation after this error. +func retryOnError(err error) error { + switch v := err.(type) { + case errcode.Errors: + if len(v) != 0 { + return retryOnError(v[0]) + } + case errcode.Error: + switch v.Code { + case errcode.ErrorCodeUnauthorized, errcode.ErrorCodeUnsupported, errcode.ErrorCodeDenied, errcode.ErrorCodeTooManyRequests, v2.ErrorCodeNameUnknown: + return xfer.DoNotRetry{Err: err} + } + case *url.Error: + switch v.Err { + case auth.ErrNoBasicAuthCredentials, auth.ErrNoToken: + return xfer.DoNotRetry{Err: v.Err} + } + return retryOnError(v.Err) + case *client.UnexpectedHTTPResponseError: + return xfer.DoNotRetry{Err: err} + case error: + if err == distribution.ErrBlobUnknown { + return xfer.DoNotRetry{Err: err} + } + if strings.Contains(err.Error(), strings.ToLower(syscall.ENOSPC.Error())) { + return xfer.DoNotRetry{Err: err} + } + } + // let's be nice and fallback if the error is a completely + // unexpected one. + // If new errors have to be handled in some way, please + // add them to the switch above. + return err +} diff --git a/vendor/github.com/moby/moby/distribution/fixtures/validate_manifest/bad_manifest b/vendor/github.com/moby/moby/distribution/fixtures/validate_manifest/bad_manifest new file mode 100644 index 000000000..a1f02a62a --- /dev/null +++ b/vendor/github.com/moby/moby/distribution/fixtures/validate_manifest/bad_manifest @@ -0,0 +1,38 @@ +{ + "schemaVersion": 2, + "name": "library/hello-world", + "tag": "latest", + "architecture": "amd64", + "fsLayers": [ + { + "blobSum": "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4" + }, + { + "blobSum": "sha256:03f4658f8b782e12230c1783426bd3bacce651ce582a4ffb6fbbfa2079428ecb" + } + ], + "history": [ + { + "v1Compatibility": "{\"id\":\"af340544ed62de0680f441c71fa1a80cb084678fed42bae393e543faea3a572c\",\"parent\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"created\":\"2015-08-06T23:53:22.608577814Z\",\"container\":\"c2b715156f640c7ac7d98472ea24335aba5432a1323a3bb722697e6d37ef794f\",\"container_config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) CMD [\\\"/hello\\\"]\"],\"Image\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":{}},\"docker_version\":\"1.7.1\",\"config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/hello\"],\"Image\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n" + }, + { + "v1Compatibility": "{\"id\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"created\":\"2015-08-06T23:53:22.241352727Z\",\"container\":\"9aeb0006ffa72a8287564caaea87625896853701459261d3b569e320c0c9d5dc\",\"container_config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) COPY file:4abd3bff60458ca3b079d7b131ce26b2719055a030dfa96ff827da2b7c7038a7 in /\"],\"Image\":\"\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":null},\"docker_version\":\"1.7.1\",\"config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":null,\"Image\":\"\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":null},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":960}\n" + } + ], + "signatures": [ + { + "header": { + "jwk": { + "crv": "P-256", + "kid": "OIH7:HQFS:44FK:45VB:3B53:OIAG:TPL4:ATF5:6PNE:MGHN:NHQX:2GE4", + "kty": "EC", + "x": "Cu_UyxwLgHzE9rvlYSmvVdqYCXY42E9eNhBb0xNv0SQ", + "y": "zUsjWJkeKQ5tv7S-hl1Tg71cd-CqnrtiiLxSi6N_yc8" + }, + "alg": "ES256" + }, + "signature": "Y6xaFz9Sy-OtcnKQS1Ilq3Dh8cu4h3nBTJCpOTF1XF7vKtcxxA_xMP8-SgDo869SJ3VsvgPL9-Xn-OoYG2rb1A", + "protected": "eyJmb3JtYXRMZW5ndGgiOjMxOTcsImZvcm1hdFRhaWwiOiJDbjAiLCJ0aW1lIjoiMjAxNS0wOS0xMVQwNDoxMzo0OFoifQ" + } + ] +} diff --git a/vendor/github.com/moby/moby/distribution/fixtures/validate_manifest/extra_data_manifest b/vendor/github.com/moby/moby/distribution/fixtures/validate_manifest/extra_data_manifest new file mode 100644 index 000000000..beec19a80 --- /dev/null +++ b/vendor/github.com/moby/moby/distribution/fixtures/validate_manifest/extra_data_manifest @@ -0,0 +1,46 @@ +{ + "schemaVersion": 1, + "name": "library/hello-world", + "tag": "latest", + "architecture": "amd64", + "fsLayers": [ + { + "blobSum": "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4" + }, + { + "blobSum": "sha256:03f4658f8b782e12230c1783426bd3bacce651ce582a4ffb6fbbfa2079428ecb" + } + ], + "history": [ + { + "v1Compatibility": "{\"id\":\"af340544ed62de0680f441c71fa1a80cb084678fed42bae393e543faea3a572c\",\"parent\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"created\":\"2015-08-06T23:53:22.608577814Z\",\"container\":\"c2b715156f640c7ac7d98472ea24335aba5432a1323a3bb722697e6d37ef794f\",\"container_config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) CMD [\\\"/hello\\\"]\"],\"Image\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":{}},\"docker_version\":\"1.7.1\",\"config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/hello\"],\"Image\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n" + }, + { + "v1Compatibility": "{\"id\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"created\":\"2015-08-06T23:53:22.241352727Z\",\"container\":\"9aeb0006ffa72a8287564caaea87625896853701459261d3b569e320c0c9d5dc\",\"container_config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) COPY file:4abd3bff60458ca3b079d7b131ce26b2719055a030dfa96ff827da2b7c7038a7 in /\"],\"Image\":\"\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":null},\"docker_version\":\"1.7.1\",\"config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":null,\"Image\":\"\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":null},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":960}\n" + } + ], + "fsLayers": [ + { + "blobSum": "sha256:ffff95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4" + }, + { + "blobSum": "sha256:ffff658f8b782e12230c1783426bd3bacce651ce582a4ffb6fbbfa2079428ecb" + } + ], + "signatures": [ + { + "header": { + "jwk": { + "crv": "P-256", + "kid": "OIH7:HQFS:44FK:45VB:3B53:OIAG:TPL4:ATF5:6PNE:MGHN:NHQX:2GE4", + "kty": "EC", + "x": "Cu_UyxwLgHzE9rvlYSmvVdqYCXY42E9eNhBb0xNv0SQ", + "y": "zUsjWJkeKQ5tv7S-hl1Tg71cd-CqnrtiiLxSi6N_yc8" + }, + "alg": "ES256" + }, + "signature": "Y6xaFz9Sy-OtcnKQS1Ilq3Dh8cu4h3nBTJCpOTF1XF7vKtcxxA_xMP8-SgDo869SJ3VsvgPL9-Xn-OoYG2rb1A", + "protected": "eyJmb3JtYXRMZW5ndGgiOjMxOTcsImZvcm1hdFRhaWwiOiJDbjAiLCJ0aW1lIjoiMjAxNS0wOS0xMVQwNDoxMzo0OFoifQ" + } + ] +} diff --git a/vendor/github.com/moby/moby/distribution/fixtures/validate_manifest/good_manifest b/vendor/github.com/moby/moby/distribution/fixtures/validate_manifest/good_manifest new file mode 100644 index 000000000..b107de322 --- /dev/null +++ b/vendor/github.com/moby/moby/distribution/fixtures/validate_manifest/good_manifest @@ -0,0 +1,38 @@ +{ + "schemaVersion": 1, + "name": "library/hello-world", + "tag": "latest", + "architecture": "amd64", + "fsLayers": [ + { + "blobSum": "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4" + }, + { + "blobSum": "sha256:03f4658f8b782e12230c1783426bd3bacce651ce582a4ffb6fbbfa2079428ecb" + } + ], + "history": [ + { + "v1Compatibility": "{\"id\":\"af340544ed62de0680f441c71fa1a80cb084678fed42bae393e543faea3a572c\",\"parent\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"created\":\"2015-08-06T23:53:22.608577814Z\",\"container\":\"c2b715156f640c7ac7d98472ea24335aba5432a1323a3bb722697e6d37ef794f\",\"container_config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) CMD [\\\"/hello\\\"]\"],\"Image\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":{}},\"docker_version\":\"1.7.1\",\"config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/hello\"],\"Image\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n" + }, + { + "v1Compatibility": "{\"id\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"created\":\"2015-08-06T23:53:22.241352727Z\",\"container\":\"9aeb0006ffa72a8287564caaea87625896853701459261d3b569e320c0c9d5dc\",\"container_config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) COPY file:4abd3bff60458ca3b079d7b131ce26b2719055a030dfa96ff827da2b7c7038a7 in /\"],\"Image\":\"\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":null},\"docker_version\":\"1.7.1\",\"config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":null,\"Image\":\"\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":null},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":960}\n" + } + ], + "signatures": [ + { + "header": { + "jwk": { + "crv": "P-256", + "kid": "OIH7:HQFS:44FK:45VB:3B53:OIAG:TPL4:ATF5:6PNE:MGHN:NHQX:2GE4", + "kty": "EC", + "x": "Cu_UyxwLgHzE9rvlYSmvVdqYCXY42E9eNhBb0xNv0SQ", + "y": "zUsjWJkeKQ5tv7S-hl1Tg71cd-CqnrtiiLxSi6N_yc8" + }, + "alg": "ES256" + }, + "signature": "Y6xaFz9Sy-OtcnKQS1Ilq3Dh8cu4h3nBTJCpOTF1XF7vKtcxxA_xMP8-SgDo869SJ3VsvgPL9-Xn-OoYG2rb1A", + "protected": "eyJmb3JtYXRMZW5ndGgiOjMxOTcsImZvcm1hdFRhaWwiOiJDbjAiLCJ0aW1lIjoiMjAxNS0wOS0xMVQwNDoxMzo0OFoifQ" + } + ] +} \ No newline at end of file diff --git a/vendor/github.com/moby/moby/distribution/metadata/metadata.go b/vendor/github.com/moby/moby/distribution/metadata/metadata.go new file mode 100644 index 000000000..3dae79555 --- /dev/null +++ b/vendor/github.com/moby/moby/distribution/metadata/metadata.go @@ -0,0 +1,77 @@ +package metadata + +import ( + "io/ioutil" + "os" + "path/filepath" + "sync" + + "github.com/docker/docker/pkg/ioutils" +) + +// Store implements a K/V store for mapping distribution-related IDs +// to on-disk layer IDs and image IDs. The namespace identifies the type of +// mapping (i.e. "v1ids" or "artifacts"). MetadataStore is goroutine-safe. +type Store interface { + // Get retrieves data by namespace and key. + Get(namespace string, key string) ([]byte, error) + // Set writes data indexed by namespace and key. + Set(namespace, key string, value []byte) error + // Delete removes data indexed by namespace and key. + Delete(namespace, key string) error +} + +// FSMetadataStore uses the filesystem to associate metadata with layer and +// image IDs. +type FSMetadataStore struct { + sync.RWMutex + basePath string + platform string +} + +// NewFSMetadataStore creates a new filesystem-based metadata store. +func NewFSMetadataStore(basePath, platform string) (*FSMetadataStore, error) { + if err := os.MkdirAll(basePath, 0700); err != nil { + return nil, err + } + return &FSMetadataStore{ + basePath: basePath, + platform: platform, + }, nil +} + +func (store *FSMetadataStore) path(namespace, key string) string { + return filepath.Join(store.basePath, namespace, key) +} + +// Get retrieves data by namespace and key. The data is read from a file named +// after the key, stored in the namespace's directory. +func (store *FSMetadataStore) Get(namespace string, key string) ([]byte, error) { + store.RLock() + defer store.RUnlock() + + return ioutil.ReadFile(store.path(namespace, key)) +} + +// Set writes data indexed by namespace and key. The data is written to a file +// named after the key, stored in the namespace's directory. +func (store *FSMetadataStore) Set(namespace, key string, value []byte) error { + store.Lock() + defer store.Unlock() + + path := store.path(namespace, key) + if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil { + return err + } + return ioutils.AtomicWriteFile(path, value, 0644) +} + +// Delete removes data indexed by namespace and key. The data file named after +// the key, stored in the namespace's directory is deleted. +func (store *FSMetadataStore) Delete(namespace, key string) error { + store.Lock() + defer store.Unlock() + + path := store.path(namespace, key) + return os.Remove(path) +} diff --git a/vendor/github.com/moby/moby/distribution/metadata/v1_id_service.go b/vendor/github.com/moby/moby/distribution/metadata/v1_id_service.go new file mode 100644 index 000000000..f262d4dc3 --- /dev/null +++ b/vendor/github.com/moby/moby/distribution/metadata/v1_id_service.go @@ -0,0 +1,51 @@ +package metadata + +import ( + "github.com/docker/docker/image/v1" + "github.com/docker/docker/layer" + "github.com/pkg/errors" +) + +// V1IDService maps v1 IDs to layers on disk. +type V1IDService struct { + store Store +} + +// NewV1IDService creates a new V1 ID mapping service. +func NewV1IDService(store Store) *V1IDService { + return &V1IDService{ + store: store, + } +} + +// namespace returns the namespace used by this service. +func (idserv *V1IDService) namespace() string { + return "v1id" +} + +// Get finds a layer by its V1 ID. +func (idserv *V1IDService) Get(v1ID, registry string) (layer.DiffID, error) { + if idserv.store == nil { + return "", errors.New("no v1IDService storage") + } + if err := v1.ValidateID(v1ID); err != nil { + return layer.DiffID(""), err + } + + idBytes, err := idserv.store.Get(idserv.namespace(), registry+","+v1ID) + if err != nil { + return layer.DiffID(""), err + } + return layer.DiffID(idBytes), nil +} + +// Set associates an image with a V1 ID. +func (idserv *V1IDService) Set(v1ID, registry string, id layer.DiffID) error { + if idserv.store == nil { + return nil + } + if err := v1.ValidateID(v1ID); err != nil { + return err + } + return idserv.store.Set(idserv.namespace(), registry+","+v1ID, []byte(id)) +} diff --git a/vendor/github.com/moby/moby/distribution/metadata/v1_id_service_test.go b/vendor/github.com/moby/moby/distribution/metadata/v1_id_service_test.go new file mode 100644 index 000000000..385901ec4 --- /dev/null +++ b/vendor/github.com/moby/moby/distribution/metadata/v1_id_service_test.go @@ -0,0 +1,84 @@ +package metadata + +import ( + "io/ioutil" + "os" + "runtime" + "testing" + + "github.com/docker/docker/layer" +) + +func TestV1IDService(t *testing.T) { + tmpDir, err := ioutil.TempDir("", "v1-id-service-test") + if err != nil { + t.Fatalf("could not create temp dir: %v", err) + } + defer os.RemoveAll(tmpDir) + + metadataStore, err := NewFSMetadataStore(tmpDir, runtime.GOOS) + if err != nil { + t.Fatalf("could not create metadata store: %v", err) + } + v1IDService := NewV1IDService(metadataStore) + + testVectors := []struct { + registry string + v1ID string + layerID layer.DiffID + }{ + { + registry: "registry1", + v1ID: "f0cd5ca10b07f35512fc2f1cbf9a6cefbdb5cba70ac6b0c9e5988f4497f71937", + layerID: layer.DiffID("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4"), + }, + { + registry: "registry2", + v1ID: "9e3447ca24cb96d86ebd5960cb34d1299b07e0a0e03801d90b9969a2c187dd6e", + layerID: layer.DiffID("sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa"), + }, + { + registry: "registry1", + v1ID: "9e3447ca24cb96d86ebd5960cb34d1299b07e0a0e03801d90b9969a2c187dd6e", + layerID: layer.DiffID("sha256:03f4658f8b782e12230c1783426bd3bacce651ce582a4ffb6fbbfa2079428ecb"), + }, + } + + // Set some associations + for _, vec := range testVectors { + err := v1IDService.Set(vec.v1ID, vec.registry, vec.layerID) + if err != nil { + t.Fatalf("error calling Set: %v", err) + } + } + + // Check the correct values are read back + for _, vec := range testVectors { + layerID, err := v1IDService.Get(vec.v1ID, vec.registry) + if err != nil { + t.Fatalf("error calling Get: %v", err) + } + if layerID != vec.layerID { + t.Fatal("Get returned incorrect layer ID") + } + } + + // Test Get on a nonexistent entry + _, err = v1IDService.Get("82379823067823853223359023576437723560923756b03560378f4497753917", "registry1") + if err == nil { + t.Fatal("expected error looking up nonexistent entry") + } + + // Overwrite one of the entries and read it back + err = v1IDService.Set(testVectors[0].v1ID, testVectors[0].registry, testVectors[1].layerID) + if err != nil { + t.Fatalf("error calling Set: %v", err) + } + layerID, err := v1IDService.Get(testVectors[0].v1ID, testVectors[0].registry) + if err != nil { + t.Fatalf("error calling Get: %v", err) + } + if layerID != testVectors[1].layerID { + t.Fatal("Get returned incorrect layer ID") + } +} diff --git a/vendor/github.com/moby/moby/distribution/metadata/v2_metadata_service.go b/vendor/github.com/moby/moby/distribution/metadata/v2_metadata_service.go new file mode 100644 index 000000000..7524f63ce --- /dev/null +++ b/vendor/github.com/moby/moby/distribution/metadata/v2_metadata_service.go @@ -0,0 +1,241 @@ +package metadata + +import ( + "crypto/hmac" + "crypto/sha256" + "encoding/hex" + "encoding/json" + "errors" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/layer" + "github.com/opencontainers/go-digest" +) + +// V2MetadataService maps layer IDs to a set of known metadata for +// the layer. +type V2MetadataService interface { + GetMetadata(diffID layer.DiffID) ([]V2Metadata, error) + GetDiffID(dgst digest.Digest) (layer.DiffID, error) + Add(diffID layer.DiffID, metadata V2Metadata) error + TagAndAdd(diffID layer.DiffID, hmacKey []byte, metadata V2Metadata) error + Remove(metadata V2Metadata) error +} + +// v2MetadataService implements V2MetadataService +type v2MetadataService struct { + store Store +} + +var _ V2MetadataService = &v2MetadataService{} + +// V2Metadata contains the digest and source repository information for a layer. +type V2Metadata struct { + Digest digest.Digest + SourceRepository string + // HMAC hashes above attributes with recent authconfig digest used as a key in order to determine matching + // metadata entries accompanied by the same credentials without actually exposing them. + HMAC string +} + +// CheckV2MetadataHMAC returns true if the given "meta" is tagged with a hmac hashed by the given "key". +func CheckV2MetadataHMAC(meta *V2Metadata, key []byte) bool { + if len(meta.HMAC) == 0 || len(key) == 0 { + return len(meta.HMAC) == 0 && len(key) == 0 + } + mac := hmac.New(sha256.New, key) + mac.Write([]byte(meta.Digest)) + mac.Write([]byte(meta.SourceRepository)) + expectedMac := mac.Sum(nil) + + storedMac, err := hex.DecodeString(meta.HMAC) + if err != nil { + return false + } + + return hmac.Equal(storedMac, expectedMac) +} + +// ComputeV2MetadataHMAC returns a hmac for the given "meta" hash by the given key. +func ComputeV2MetadataHMAC(key []byte, meta *V2Metadata) string { + if len(key) == 0 || meta == nil { + return "" + } + mac := hmac.New(sha256.New, key) + mac.Write([]byte(meta.Digest)) + mac.Write([]byte(meta.SourceRepository)) + return hex.EncodeToString(mac.Sum(nil)) +} + +// ComputeV2MetadataHMACKey returns a key for the given "authConfig" that can be used to hash v2 metadata +// entries. +func ComputeV2MetadataHMACKey(authConfig *types.AuthConfig) ([]byte, error) { + if authConfig == nil { + return nil, nil + } + key := authConfigKeyInput{ + Username: authConfig.Username, + Password: authConfig.Password, + Auth: authConfig.Auth, + IdentityToken: authConfig.IdentityToken, + RegistryToken: authConfig.RegistryToken, + } + buf, err := json.Marshal(&key) + if err != nil { + return nil, err + } + return []byte(digest.FromBytes([]byte(buf))), nil +} + +// authConfigKeyInput is a reduced AuthConfig structure holding just relevant credential data eligible for +// hmac key creation. +type authConfigKeyInput struct { + Username string `json:"username,omitempty"` + Password string `json:"password,omitempty"` + Auth string `json:"auth,omitempty"` + + IdentityToken string `json:"identitytoken,omitempty"` + RegistryToken string `json:"registrytoken,omitempty"` +} + +// maxMetadata is the number of metadata entries to keep per layer DiffID. +const maxMetadata = 50 + +// NewV2MetadataService creates a new diff ID to v2 metadata mapping service. +func NewV2MetadataService(store Store) V2MetadataService { + return &v2MetadataService{ + store: store, + } +} + +func (serv *v2MetadataService) diffIDNamespace() string { + return "v2metadata-by-diffid" +} + +func (serv *v2MetadataService) digestNamespace() string { + return "diffid-by-digest" +} + +func (serv *v2MetadataService) diffIDKey(diffID layer.DiffID) string { + return string(digest.Digest(diffID).Algorithm()) + "/" + digest.Digest(diffID).Hex() +} + +func (serv *v2MetadataService) digestKey(dgst digest.Digest) string { + return string(dgst.Algorithm()) + "/" + dgst.Hex() +} + +// GetMetadata finds the metadata associated with a layer DiffID. +func (serv *v2MetadataService) GetMetadata(diffID layer.DiffID) ([]V2Metadata, error) { + if serv.store == nil { + return nil, errors.New("no metadata storage") + } + jsonBytes, err := serv.store.Get(serv.diffIDNamespace(), serv.diffIDKey(diffID)) + if err != nil { + return nil, err + } + + var metadata []V2Metadata + if err := json.Unmarshal(jsonBytes, &metadata); err != nil { + return nil, err + } + + return metadata, nil +} + +// GetDiffID finds a layer DiffID from a digest. +func (serv *v2MetadataService) GetDiffID(dgst digest.Digest) (layer.DiffID, error) { + if serv.store == nil { + return layer.DiffID(""), errors.New("no metadata storage") + } + diffIDBytes, err := serv.store.Get(serv.digestNamespace(), serv.digestKey(dgst)) + if err != nil { + return layer.DiffID(""), err + } + + return layer.DiffID(diffIDBytes), nil +} + +// Add associates metadata with a layer DiffID. If too many metadata entries are +// present, the oldest one is dropped. +func (serv *v2MetadataService) Add(diffID layer.DiffID, metadata V2Metadata) error { + if serv.store == nil { + // Support a service which has no backend storage, in this case + // an add becomes a no-op. + // TODO: implement in memory storage + return nil + } + oldMetadata, err := serv.GetMetadata(diffID) + if err != nil { + oldMetadata = nil + } + newMetadata := make([]V2Metadata, 0, len(oldMetadata)+1) + + // Copy all other metadata to new slice + for _, oldMeta := range oldMetadata { + if oldMeta != metadata { + newMetadata = append(newMetadata, oldMeta) + } + } + + newMetadata = append(newMetadata, metadata) + + if len(newMetadata) > maxMetadata { + newMetadata = newMetadata[len(newMetadata)-maxMetadata:] + } + + jsonBytes, err := json.Marshal(newMetadata) + if err != nil { + return err + } + + err = serv.store.Set(serv.diffIDNamespace(), serv.diffIDKey(diffID), jsonBytes) + if err != nil { + return err + } + + return serv.store.Set(serv.digestNamespace(), serv.digestKey(metadata.Digest), []byte(diffID)) +} + +// TagAndAdd amends the given "meta" for hmac hashed by the given "hmacKey" and associates it with a layer +// DiffID. If too many metadata entries are present, the oldest one is dropped. +func (serv *v2MetadataService) TagAndAdd(diffID layer.DiffID, hmacKey []byte, meta V2Metadata) error { + meta.HMAC = ComputeV2MetadataHMAC(hmacKey, &meta) + return serv.Add(diffID, meta) +} + +// Remove disassociates a metadata entry from a layer DiffID. +func (serv *v2MetadataService) Remove(metadata V2Metadata) error { + if serv.store == nil { + // Support a service which has no backend storage, in this case + // an remove becomes a no-op. + // TODO: implement in memory storage + return nil + } + diffID, err := serv.GetDiffID(metadata.Digest) + if err != nil { + return err + } + oldMetadata, err := serv.GetMetadata(diffID) + if err != nil { + oldMetadata = nil + } + newMetadata := make([]V2Metadata, 0, len(oldMetadata)) + + // Copy all other metadata to new slice + for _, oldMeta := range oldMetadata { + if oldMeta != metadata { + newMetadata = append(newMetadata, oldMeta) + } + } + + if len(newMetadata) == 0 { + return serv.store.Delete(serv.diffIDNamespace(), serv.diffIDKey(diffID)) + } + + jsonBytes, err := json.Marshal(newMetadata) + if err != nil { + return err + } + + return serv.store.Set(serv.diffIDNamespace(), serv.diffIDKey(diffID), jsonBytes) +} diff --git a/vendor/github.com/moby/moby/distribution/metadata/v2_metadata_service_test.go b/vendor/github.com/moby/moby/distribution/metadata/v2_metadata_service_test.go new file mode 100644 index 000000000..b5d59b229 --- /dev/null +++ b/vendor/github.com/moby/moby/distribution/metadata/v2_metadata_service_test.go @@ -0,0 +1,116 @@ +package metadata + +import ( + "encoding/hex" + "io/ioutil" + "math/rand" + "os" + "reflect" + "runtime" + "testing" + + "github.com/docker/docker/layer" + "github.com/opencontainers/go-digest" +) + +func TestV2MetadataService(t *testing.T) { + tmpDir, err := ioutil.TempDir("", "blobsum-storage-service-test") + if err != nil { + t.Fatalf("could not create temp dir: %v", err) + } + defer os.RemoveAll(tmpDir) + + metadataStore, err := NewFSMetadataStore(tmpDir, runtime.GOOS) + if err != nil { + t.Fatalf("could not create metadata store: %v", err) + } + V2MetadataService := NewV2MetadataService(metadataStore) + + tooManyBlobSums := make([]V2Metadata, 100) + for i := range tooManyBlobSums { + randDigest := randomDigest() + tooManyBlobSums[i] = V2Metadata{Digest: randDigest} + } + + testVectors := []struct { + diffID layer.DiffID + metadata []V2Metadata + }{ + { + diffID: layer.DiffID("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4"), + metadata: []V2Metadata{ + {Digest: digest.Digest("sha256:f0cd5ca10b07f35512fc2f1cbf9a6cefbdb5cba70ac6b0c9e5988f4497f71937")}, + }, + }, + { + diffID: layer.DiffID("sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa"), + metadata: []V2Metadata{ + {Digest: digest.Digest("sha256:f0cd5ca10b07f35512fc2f1cbf9a6cefbdb5cba70ac6b0c9e5988f4497f71937")}, + {Digest: digest.Digest("sha256:9e3447ca24cb96d86ebd5960cb34d1299b07e0a0e03801d90b9969a2c187dd6e")}, + }, + }, + { + diffID: layer.DiffID("sha256:03f4658f8b782e12230c1783426bd3bacce651ce582a4ffb6fbbfa2079428ecb"), + metadata: tooManyBlobSums, + }, + } + + // Set some associations + for _, vec := range testVectors { + for _, blobsum := range vec.metadata { + err := V2MetadataService.Add(vec.diffID, blobsum) + if err != nil { + t.Fatalf("error calling Set: %v", err) + } + } + } + + // Check the correct values are read back + for _, vec := range testVectors { + metadata, err := V2MetadataService.GetMetadata(vec.diffID) + if err != nil { + t.Fatalf("error calling Get: %v", err) + } + expectedMetadataEntries := len(vec.metadata) + if expectedMetadataEntries > 50 { + expectedMetadataEntries = 50 + } + if !reflect.DeepEqual(metadata, vec.metadata[len(vec.metadata)-expectedMetadataEntries:len(vec.metadata)]) { + t.Fatal("Get returned incorrect layer ID") + } + } + + // Test GetMetadata on a nonexistent entry + _, err = V2MetadataService.GetMetadata(layer.DiffID("sha256:82379823067823853223359023576437723560923756b03560378f4497753917")) + if err == nil { + t.Fatal("expected error looking up nonexistent entry") + } + + // Test GetDiffID on a nonexistent entry + _, err = V2MetadataService.GetDiffID(digest.Digest("sha256:82379823067823853223359023576437723560923756b03560378f4497753917")) + if err == nil { + t.Fatal("expected error looking up nonexistent entry") + } + + // Overwrite one of the entries and read it back + err = V2MetadataService.Add(testVectors[1].diffID, testVectors[0].metadata[0]) + if err != nil { + t.Fatalf("error calling Add: %v", err) + } + diffID, err := V2MetadataService.GetDiffID(testVectors[0].metadata[0].Digest) + if err != nil { + t.Fatalf("error calling GetDiffID: %v", err) + } + if diffID != testVectors[1].diffID { + t.Fatal("GetDiffID returned incorrect diffID") + } +} + +func randomDigest() digest.Digest { + b := [32]byte{} + for i := 0; i < len(b); i++ { + b[i] = byte(rand.Intn(256)) + } + d := hex.EncodeToString(b[:]) + return digest.Digest("sha256:" + d) +} diff --git a/vendor/github.com/moby/moby/distribution/pull.go b/vendor/github.com/moby/moby/distribution/pull.go new file mode 100644 index 000000000..c5bdbd6d3 --- /dev/null +++ b/vendor/github.com/moby/moby/distribution/pull.go @@ -0,0 +1,198 @@ +package distribution + +import ( + "fmt" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/reference" + "github.com/docker/docker/api" + "github.com/docker/docker/distribution/metadata" + "github.com/docker/docker/pkg/progress" + refstore "github.com/docker/docker/reference" + "github.com/docker/docker/registry" + "github.com/opencontainers/go-digest" + "golang.org/x/net/context" +) + +// Puller is an interface that abstracts pulling for different API versions. +type Puller interface { + // Pull tries to pull the image referenced by `tag` + // Pull returns an error if any, as well as a boolean that determines whether to retry Pull on the next configured endpoint. + // + Pull(ctx context.Context, ref reference.Named) error +} + +// newPuller returns a Puller interface that will pull from either a v1 or v2 +// registry. The endpoint argument contains a Version field that determines +// whether a v1 or v2 puller will be created. The other parameters are passed +// through to the underlying puller implementation for use during the actual +// pull operation. +func newPuller(endpoint registry.APIEndpoint, repoInfo *registry.RepositoryInfo, imagePullConfig *ImagePullConfig) (Puller, error) { + switch endpoint.Version { + case registry.APIVersion2: + return &v2Puller{ + V2MetadataService: metadata.NewV2MetadataService(imagePullConfig.MetadataStore), + endpoint: endpoint, + config: imagePullConfig, + repoInfo: repoInfo, + }, nil + case registry.APIVersion1: + return &v1Puller{ + v1IDService: metadata.NewV1IDService(imagePullConfig.MetadataStore), + endpoint: endpoint, + config: imagePullConfig, + repoInfo: repoInfo, + }, nil + } + return nil, fmt.Errorf("unknown version %d for registry %s", endpoint.Version, endpoint.URL) +} + +// Pull initiates a pull operation. image is the repository name to pull, and +// tag may be either empty, or indicate a specific tag to pull. +func Pull(ctx context.Context, ref reference.Named, imagePullConfig *ImagePullConfig) error { + // Resolve the Repository name from fqn to RepositoryInfo + repoInfo, err := imagePullConfig.RegistryService.ResolveRepository(ref) + if err != nil { + return err + } + + // makes sure name is not `scratch` + if err := ValidateRepoName(repoInfo.Name); err != nil { + return err + } + + endpoints, err := imagePullConfig.RegistryService.LookupPullEndpoints(reference.Domain(repoInfo.Name)) + if err != nil { + return err + } + + var ( + lastErr error + + // discardNoSupportErrors is used to track whether an endpoint encountered an error of type registry.ErrNoSupport + // By default it is false, which means that if an ErrNoSupport error is encountered, it will be saved in lastErr. + // As soon as another kind of error is encountered, discardNoSupportErrors is set to true, avoiding the saving of + // any subsequent ErrNoSupport errors in lastErr. + // It's needed for pull-by-digest on v1 endpoints: if there are only v1 endpoints configured, the error should be + // returned and displayed, but if there was a v2 endpoint which supports pull-by-digest, then the last relevant + // error is the ones from v2 endpoints not v1. + discardNoSupportErrors bool + + // confirmedV2 is set to true if a pull attempt managed to + // confirm that it was talking to a v2 registry. This will + // prevent fallback to the v1 protocol. + confirmedV2 bool + + // confirmedTLSRegistries is a map indicating which registries + // are known to be using TLS. There should never be a plaintext + // retry for any of these. + confirmedTLSRegistries = make(map[string]struct{}) + ) + for _, endpoint := range endpoints { + if imagePullConfig.RequireSchema2 && endpoint.Version == registry.APIVersion1 { + continue + } + + if confirmedV2 && endpoint.Version == registry.APIVersion1 { + logrus.Debugf("Skipping v1 endpoint %s because v2 registry was detected", endpoint.URL) + continue + } + + if endpoint.URL.Scheme != "https" { + if _, confirmedTLS := confirmedTLSRegistries[endpoint.URL.Host]; confirmedTLS { + logrus.Debugf("Skipping non-TLS endpoint %s for host/port that appears to use TLS", endpoint.URL) + continue + } + } + + logrus.Debugf("Trying to pull %s from %s %s", reference.FamiliarName(repoInfo.Name), endpoint.URL, endpoint.Version) + + puller, err := newPuller(endpoint, repoInfo, imagePullConfig) + if err != nil { + lastErr = err + continue + } + if err := puller.Pull(ctx, ref); err != nil { + // Was this pull cancelled? If so, don't try to fall + // back. + fallback := false + select { + case <-ctx.Done(): + default: + if fallbackErr, ok := err.(fallbackError); ok { + fallback = true + confirmedV2 = confirmedV2 || fallbackErr.confirmedV2 + if fallbackErr.transportOK && endpoint.URL.Scheme == "https" { + confirmedTLSRegistries[endpoint.URL.Host] = struct{}{} + } + err = fallbackErr.err + } + } + if fallback { + if _, ok := err.(ErrNoSupport); !ok { + // Because we found an error that's not ErrNoSupport, discard all subsequent ErrNoSupport errors. + discardNoSupportErrors = true + // append subsequent errors + lastErr = err + } else if !discardNoSupportErrors { + // Save the ErrNoSupport error, because it's either the first error or all encountered errors + // were also ErrNoSupport errors. + // append subsequent errors + lastErr = err + } + logrus.Infof("Attempting next endpoint for pull after error: %v", err) + continue + } + logrus.Errorf("Not continuing with pull after error: %v", err) + return TranslatePullError(err, ref) + } + + imagePullConfig.ImageEventLogger(reference.FamiliarString(ref), reference.FamiliarName(repoInfo.Name), "pull") + return nil + } + + if lastErr == nil { + lastErr = fmt.Errorf("no endpoints found for %s", reference.FamiliarString(ref)) + } + + return TranslatePullError(lastErr, ref) +} + +// writeStatus writes a status message to out. If layersDownloaded is true, the +// status message indicates that a newer image was downloaded. Otherwise, it +// indicates that the image is up to date. requestedTag is the tag the message +// will refer to. +func writeStatus(requestedTag string, out progress.Output, layersDownloaded bool) { + if layersDownloaded { + progress.Message(out, "", "Status: Downloaded newer image for "+requestedTag) + } else { + progress.Message(out, "", "Status: Image is up to date for "+requestedTag) + } +} + +// ValidateRepoName validates the name of a repository. +func ValidateRepoName(name reference.Named) error { + if reference.FamiliarName(name) == api.NoBaseImageSpecifier { + return fmt.Errorf("'%s' is a reserved name", api.NoBaseImageSpecifier) + } + return nil +} + +func addDigestReference(store refstore.Store, ref reference.Named, dgst digest.Digest, id digest.Digest) error { + dgstRef, err := reference.WithDigest(reference.TrimNamed(ref), dgst) + if err != nil { + return err + } + + if oldTagID, err := store.Get(dgstRef); err == nil { + if oldTagID != id { + // Updating digests not supported by reference store + logrus.Errorf("Image ID for digest %s changed from %s to %s, cannot update", dgst.String(), oldTagID, id) + } + return nil + } else if err != refstore.ErrDoesNotExist { + return err + } + + return store.AddDigest(dgstRef, id, true) +} diff --git a/vendor/github.com/moby/moby/distribution/pull_v1.go b/vendor/github.com/moby/moby/distribution/pull_v1.go new file mode 100644 index 000000000..7151a7584 --- /dev/null +++ b/vendor/github.com/moby/moby/distribution/pull_v1.go @@ -0,0 +1,368 @@ +package distribution + +import ( + "errors" + "fmt" + "io" + "io/ioutil" + "net" + "net/url" + "os" + "strings" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/client/transport" + "github.com/docker/docker/distribution/metadata" + "github.com/docker/docker/distribution/xfer" + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/image" + "github.com/docker/docker/image/v1" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/progress" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/registry" + "golang.org/x/net/context" +) + +type v1Puller struct { + v1IDService *metadata.V1IDService + endpoint registry.APIEndpoint + config *ImagePullConfig + repoInfo *registry.RepositoryInfo + session *registry.Session +} + +func (p *v1Puller) Pull(ctx context.Context, ref reference.Named) error { + if _, isCanonical := ref.(reference.Canonical); isCanonical { + // Allowing fallback, because HTTPS v1 is before HTTP v2 + return fallbackError{err: ErrNoSupport{Err: errors.New("Cannot pull by digest with v1 registry")}} + } + + tlsConfig, err := p.config.RegistryService.TLSConfig(p.repoInfo.Index.Name) + if err != nil { + return err + } + // Adds Docker-specific headers as well as user-specified headers (metaHeaders) + tr := transport.NewTransport( + // TODO(tiborvass): was ReceiveTimeout + registry.NewTransport(tlsConfig), + registry.DockerHeaders(dockerversion.DockerUserAgent(ctx), p.config.MetaHeaders)..., + ) + client := registry.HTTPClient(tr) + v1Endpoint, err := p.endpoint.ToV1Endpoint(dockerversion.DockerUserAgent(ctx), p.config.MetaHeaders) + if err != nil { + logrus.Debugf("Could not get v1 endpoint: %v", err) + return fallbackError{err: err} + } + p.session, err = registry.NewSession(client, p.config.AuthConfig, v1Endpoint) + if err != nil { + // TODO(dmcgowan): Check if should fallback + logrus.Debugf("Fallback from error: %s", err) + return fallbackError{err: err} + } + if err := p.pullRepository(ctx, ref); err != nil { + // TODO(dmcgowan): Check if should fallback + return err + } + progress.Message(p.config.ProgressOutput, "", p.repoInfo.Name.Name()+": this image was pulled from a legacy registry. Important: This registry version will not be supported in future versions of docker.") + + return nil +} + +func (p *v1Puller) pullRepository(ctx context.Context, ref reference.Named) error { + progress.Message(p.config.ProgressOutput, "", "Pulling repository "+p.repoInfo.Name.Name()) + + tagged, isTagged := ref.(reference.NamedTagged) + + repoData, err := p.session.GetRepositoryData(p.repoInfo.Name) + if err != nil { + if strings.Contains(err.Error(), "HTTP code: 404") { + if isTagged { + return fmt.Errorf("Error: image %s:%s not found", reference.Path(p.repoInfo.Name), tagged.Tag()) + } + return fmt.Errorf("Error: image %s not found", reference.Path(p.repoInfo.Name)) + } + // Unexpected HTTP error + return err + } + + logrus.Debug("Retrieving the tag list") + var tagsList map[string]string + if !isTagged { + tagsList, err = p.session.GetRemoteTags(repoData.Endpoints, p.repoInfo.Name) + } else { + var tagID string + tagsList = make(map[string]string) + tagID, err = p.session.GetRemoteTag(repoData.Endpoints, p.repoInfo.Name, tagged.Tag()) + if err == registry.ErrRepoNotFound { + return fmt.Errorf("Tag %s not found in repository %s", tagged.Tag(), p.repoInfo.Name.Name()) + } + tagsList[tagged.Tag()] = tagID + } + if err != nil { + logrus.Errorf("unable to get remote tags: %s", err) + return err + } + + for tag, id := range tagsList { + repoData.ImgList[id] = ®istry.ImgData{ + ID: id, + Tag: tag, + Checksum: "", + } + } + + layersDownloaded := false + for _, imgData := range repoData.ImgList { + if isTagged && imgData.Tag != tagged.Tag() { + continue + } + + err := p.downloadImage(ctx, repoData, imgData, &layersDownloaded) + if err != nil { + return err + } + } + + writeStatus(reference.FamiliarString(ref), p.config.ProgressOutput, layersDownloaded) + return nil +} + +func (p *v1Puller) downloadImage(ctx context.Context, repoData *registry.RepositoryData, img *registry.ImgData, layersDownloaded *bool) error { + if img.Tag == "" { + logrus.Debugf("Image (id: %s) present in this repository but untagged, skipping", img.ID) + return nil + } + + localNameRef, err := reference.WithTag(p.repoInfo.Name, img.Tag) + if err != nil { + retErr := fmt.Errorf("Image (id: %s) has invalid tag: %s", img.ID, img.Tag) + logrus.Debug(retErr.Error()) + return retErr + } + + if err := v1.ValidateID(img.ID); err != nil { + return err + } + + progress.Updatef(p.config.ProgressOutput, stringid.TruncateID(img.ID), "Pulling image (%s) from %s", img.Tag, p.repoInfo.Name.Name()) + success := false + var lastErr error + for _, ep := range p.repoInfo.Index.Mirrors { + ep += "v1/" + progress.Updatef(p.config.ProgressOutput, stringid.TruncateID(img.ID), fmt.Sprintf("Pulling image (%s) from %s, mirror: %s", img.Tag, p.repoInfo.Name.Name(), ep)) + if err = p.pullImage(ctx, img.ID, ep, localNameRef, layersDownloaded); err != nil { + // Don't report errors when pulling from mirrors. + logrus.Debugf("Error pulling image (%s) from %s, mirror: %s, %s", img.Tag, p.repoInfo.Name.Name(), ep, err) + continue + } + success = true + break + } + if !success { + for _, ep := range repoData.Endpoints { + progress.Updatef(p.config.ProgressOutput, stringid.TruncateID(img.ID), "Pulling image (%s) from %s, endpoint: %s", img.Tag, p.repoInfo.Name.Name(), ep) + if err = p.pullImage(ctx, img.ID, ep, localNameRef, layersDownloaded); err != nil { + // It's not ideal that only the last error is returned, it would be better to concatenate the errors. + // As the error is also given to the output stream the user will see the error. + lastErr = err + progress.Updatef(p.config.ProgressOutput, stringid.TruncateID(img.ID), "Error pulling image (%s) from %s, endpoint: %s, %s", img.Tag, p.repoInfo.Name.Name(), ep, err) + continue + } + success = true + break + } + } + if !success { + err := fmt.Errorf("Error pulling image (%s) from %s, %v", img.Tag, p.repoInfo.Name.Name(), lastErr) + progress.Update(p.config.ProgressOutput, stringid.TruncateID(img.ID), err.Error()) + return err + } + return nil +} + +func (p *v1Puller) pullImage(ctx context.Context, v1ID, endpoint string, localNameRef reference.Named, layersDownloaded *bool) (err error) { + var history []string + history, err = p.session.GetRemoteHistory(v1ID, endpoint) + if err != nil { + return err + } + if len(history) < 1 { + return fmt.Errorf("empty history for image %s", v1ID) + } + progress.Update(p.config.ProgressOutput, stringid.TruncateID(v1ID), "Pulling dependent layers") + + var ( + descriptors []xfer.DownloadDescriptor + newHistory []image.History + imgJSON []byte + imgSize int64 + ) + + // Iterate over layers, in order from bottom-most to top-most. Download + // config for all layers and create descriptors. + for i := len(history) - 1; i >= 0; i-- { + v1LayerID := history[i] + imgJSON, imgSize, err = p.downloadLayerConfig(v1LayerID, endpoint) + if err != nil { + return err + } + + // Create a new-style config from the legacy configs + h, err := v1.HistoryFromConfig(imgJSON, false) + if err != nil { + return err + } + newHistory = append(newHistory, h) + + layerDescriptor := &v1LayerDescriptor{ + v1LayerID: v1LayerID, + indexName: p.repoInfo.Index.Name, + endpoint: endpoint, + v1IDService: p.v1IDService, + layersDownloaded: layersDownloaded, + layerSize: imgSize, + session: p.session, + } + + descriptors = append(descriptors, layerDescriptor) + } + + rootFS := image.NewRootFS() + resultRootFS, release, err := p.config.DownloadManager.Download(ctx, *rootFS, "", descriptors, p.config.ProgressOutput) + if err != nil { + return err + } + defer release() + + config, err := v1.MakeConfigFromV1Config(imgJSON, &resultRootFS, newHistory) + if err != nil { + return err + } + + imageID, err := p.config.ImageStore.Put(config) + if err != nil { + return err + } + + if p.config.ReferenceStore != nil { + if err := p.config.ReferenceStore.AddTag(localNameRef, imageID, true); err != nil { + return err + } + } + + return nil +} + +func (p *v1Puller) downloadLayerConfig(v1LayerID, endpoint string) (imgJSON []byte, imgSize int64, err error) { + progress.Update(p.config.ProgressOutput, stringid.TruncateID(v1LayerID), "Pulling metadata") + + retries := 5 + for j := 1; j <= retries; j++ { + imgJSON, imgSize, err := p.session.GetRemoteImageJSON(v1LayerID, endpoint) + if err != nil && j == retries { + progress.Update(p.config.ProgressOutput, stringid.TruncateID(v1LayerID), "Error pulling layer metadata") + return nil, 0, err + } else if err != nil { + time.Sleep(time.Duration(j) * 500 * time.Millisecond) + continue + } + + return imgJSON, imgSize, nil + } + + // not reached + return nil, 0, nil +} + +type v1LayerDescriptor struct { + v1LayerID string + indexName string + endpoint string + v1IDService *metadata.V1IDService + layersDownloaded *bool + layerSize int64 + session *registry.Session + tmpFile *os.File +} + +func (ld *v1LayerDescriptor) Key() string { + return "v1:" + ld.v1LayerID +} + +func (ld *v1LayerDescriptor) ID() string { + return stringid.TruncateID(ld.v1LayerID) +} + +func (ld *v1LayerDescriptor) DiffID() (layer.DiffID, error) { + return ld.v1IDService.Get(ld.v1LayerID, ld.indexName) +} + +func (ld *v1LayerDescriptor) Download(ctx context.Context, progressOutput progress.Output) (io.ReadCloser, int64, error) { + progress.Update(progressOutput, ld.ID(), "Pulling fs layer") + layerReader, err := ld.session.GetRemoteImageLayer(ld.v1LayerID, ld.endpoint, ld.layerSize) + if err != nil { + progress.Update(progressOutput, ld.ID(), "Error pulling dependent layers") + if uerr, ok := err.(*url.Error); ok { + err = uerr.Err + } + if terr, ok := err.(net.Error); ok && terr.Timeout() { + return nil, 0, err + } + return nil, 0, xfer.DoNotRetry{Err: err} + } + *ld.layersDownloaded = true + + ld.tmpFile, err = ioutil.TempFile("", "GetImageBlob") + if err != nil { + layerReader.Close() + return nil, 0, err + } + + reader := progress.NewProgressReader(ioutils.NewCancelReadCloser(ctx, layerReader), progressOutput, ld.layerSize, ld.ID(), "Downloading") + defer reader.Close() + + _, err = io.Copy(ld.tmpFile, reader) + if err != nil { + ld.Close() + return nil, 0, err + } + + progress.Update(progressOutput, ld.ID(), "Download complete") + + logrus.Debugf("Downloaded %s to tempfile %s", ld.ID(), ld.tmpFile.Name()) + + ld.tmpFile.Seek(0, 0) + + // hand off the temporary file to the download manager, so it will only + // be closed once + tmpFile := ld.tmpFile + ld.tmpFile = nil + + return ioutils.NewReadCloserWrapper(tmpFile, func() error { + tmpFile.Close() + err := os.RemoveAll(tmpFile.Name()) + if err != nil { + logrus.Errorf("Failed to remove temp file: %s", tmpFile.Name()) + } + return err + }), ld.layerSize, nil +} + +func (ld *v1LayerDescriptor) Close() { + if ld.tmpFile != nil { + ld.tmpFile.Close() + if err := os.RemoveAll(ld.tmpFile.Name()); err != nil { + logrus.Errorf("Failed to remove temp file: %s", ld.tmpFile.Name()) + } + ld.tmpFile = nil + } +} + +func (ld *v1LayerDescriptor) Registered(diffID layer.DiffID) { + // Cache mapping from this layer's DiffID to the blobsum + ld.v1IDService.Set(ld.v1LayerID, ld.indexName, diffID) +} diff --git a/vendor/github.com/moby/moby/distribution/pull_v2.go b/vendor/github.com/moby/moby/distribution/pull_v2.go new file mode 100644 index 000000000..50257f5cb --- /dev/null +++ b/vendor/github.com/moby/moby/distribution/pull_v2.go @@ -0,0 +1,920 @@ +package distribution + +import ( + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "net/url" + "os" + "runtime" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution" + "github.com/docker/distribution/manifest/manifestlist" + "github.com/docker/distribution/manifest/schema1" + "github.com/docker/distribution/manifest/schema2" + "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/api/errcode" + "github.com/docker/distribution/registry/client/auth" + "github.com/docker/distribution/registry/client/transport" + "github.com/docker/docker/distribution/metadata" + "github.com/docker/docker/distribution/xfer" + "github.com/docker/docker/image" + "github.com/docker/docker/image/v1" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/progress" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/pkg/system" + refstore "github.com/docker/docker/reference" + "github.com/docker/docker/registry" + "github.com/opencontainers/go-digest" + "golang.org/x/net/context" +) + +var ( + errRootFSMismatch = errors.New("layers from manifest don't match image configuration") + errRootFSInvalid = errors.New("invalid rootfs in image configuration") +) + +// ImageConfigPullError is an error pulling the image config blob +// (only applies to schema2). +type ImageConfigPullError struct { + Err error +} + +// Error returns the error string for ImageConfigPullError. +func (e ImageConfigPullError) Error() string { + return "error pulling image configuration: " + e.Err.Error() +} + +type v2Puller struct { + V2MetadataService metadata.V2MetadataService + endpoint registry.APIEndpoint + config *ImagePullConfig + repoInfo *registry.RepositoryInfo + repo distribution.Repository + // confirmedV2 is set to true if we confirm we're talking to a v2 + // registry. This is used to limit fallbacks to the v1 protocol. + confirmedV2 bool +} + +func (p *v2Puller) Pull(ctx context.Context, ref reference.Named) (err error) { + // TODO(tiborvass): was ReceiveTimeout + p.repo, p.confirmedV2, err = NewV2Repository(ctx, p.repoInfo, p.endpoint, p.config.MetaHeaders, p.config.AuthConfig, "pull") + if err != nil { + logrus.Warnf("Error getting v2 registry: %v", err) + return err + } + + if err = p.pullV2Repository(ctx, ref); err != nil { + if _, ok := err.(fallbackError); ok { + return err + } + if continueOnError(err) { + return fallbackError{ + err: err, + confirmedV2: p.confirmedV2, + transportOK: true, + } + } + } + return err +} + +func (p *v2Puller) pullV2Repository(ctx context.Context, ref reference.Named) (err error) { + var layersDownloaded bool + if !reference.IsNameOnly(ref) { + layersDownloaded, err = p.pullV2Tag(ctx, ref) + if err != nil { + return err + } + } else { + tags, err := p.repo.Tags(ctx).All(ctx) + if err != nil { + // If this repository doesn't exist on V2, we should + // permit a fallback to V1. + return allowV1Fallback(err) + } + + // The v2 registry knows about this repository, so we will not + // allow fallback to the v1 protocol even if we encounter an + // error later on. + p.confirmedV2 = true + + for _, tag := range tags { + tagRef, err := reference.WithTag(ref, tag) + if err != nil { + return err + } + pulledNew, err := p.pullV2Tag(ctx, tagRef) + if err != nil { + // Since this is the pull-all-tags case, don't + // allow an error pulling a particular tag to + // make the whole pull fall back to v1. + if fallbackErr, ok := err.(fallbackError); ok { + return fallbackErr.err + } + return err + } + // pulledNew is true if either new layers were downloaded OR if existing images were newly tagged + // TODO(tiborvass): should we change the name of `layersDownload`? What about message in WriteStatus? + layersDownloaded = layersDownloaded || pulledNew + } + } + + writeStatus(reference.FamiliarString(ref), p.config.ProgressOutput, layersDownloaded) + + return nil +} + +type v2LayerDescriptor struct { + digest digest.Digest + diffID layer.DiffID + repoInfo *registry.RepositoryInfo + repo distribution.Repository + V2MetadataService metadata.V2MetadataService + tmpFile *os.File + verifier digest.Verifier + src distribution.Descriptor +} + +func (ld *v2LayerDescriptor) Key() string { + return "v2:" + ld.digest.String() +} + +func (ld *v2LayerDescriptor) ID() string { + return stringid.TruncateID(ld.digest.String()) +} + +func (ld *v2LayerDescriptor) DiffID() (layer.DiffID, error) { + if ld.diffID != "" { + return ld.diffID, nil + } + return ld.V2MetadataService.GetDiffID(ld.digest) +} + +func (ld *v2LayerDescriptor) Download(ctx context.Context, progressOutput progress.Output) (io.ReadCloser, int64, error) { + logrus.Debugf("pulling blob %q", ld.digest) + + var ( + err error + offset int64 + ) + + if ld.tmpFile == nil { + ld.tmpFile, err = createDownloadFile() + if err != nil { + return nil, 0, xfer.DoNotRetry{Err: err} + } + } else { + offset, err = ld.tmpFile.Seek(0, os.SEEK_END) + if err != nil { + logrus.Debugf("error seeking to end of download file: %v", err) + offset = 0 + + ld.tmpFile.Close() + if err := os.Remove(ld.tmpFile.Name()); err != nil { + logrus.Errorf("Failed to remove temp file: %s", ld.tmpFile.Name()) + } + ld.tmpFile, err = createDownloadFile() + if err != nil { + return nil, 0, xfer.DoNotRetry{Err: err} + } + } else if offset != 0 { + logrus.Debugf("attempting to resume download of %q from %d bytes", ld.digest, offset) + } + } + + tmpFile := ld.tmpFile + + layerDownload, err := ld.open(ctx) + if err != nil { + logrus.Errorf("Error initiating layer download: %v", err) + return nil, 0, retryOnError(err) + } + + if offset != 0 { + _, err := layerDownload.Seek(offset, os.SEEK_SET) + if err != nil { + if err := ld.truncateDownloadFile(); err != nil { + return nil, 0, xfer.DoNotRetry{Err: err} + } + return nil, 0, err + } + } + size, err := layerDownload.Seek(0, os.SEEK_END) + if err != nil { + // Seek failed, perhaps because there was no Content-Length + // header. This shouldn't fail the download, because we can + // still continue without a progress bar. + size = 0 + } else { + if size != 0 && offset > size { + logrus.Debug("Partial download is larger than full blob. Starting over") + offset = 0 + if err := ld.truncateDownloadFile(); err != nil { + return nil, 0, xfer.DoNotRetry{Err: err} + } + } + + // Restore the seek offset either at the beginning of the + // stream, or just after the last byte we have from previous + // attempts. + _, err = layerDownload.Seek(offset, os.SEEK_SET) + if err != nil { + return nil, 0, err + } + } + + reader := progress.NewProgressReader(ioutils.NewCancelReadCloser(ctx, layerDownload), progressOutput, size-offset, ld.ID(), "Downloading") + defer reader.Close() + + if ld.verifier == nil { + ld.verifier = ld.digest.Verifier() + } + + _, err = io.Copy(tmpFile, io.TeeReader(reader, ld.verifier)) + if err != nil { + if err == transport.ErrWrongCodeForByteRange { + if err := ld.truncateDownloadFile(); err != nil { + return nil, 0, xfer.DoNotRetry{Err: err} + } + return nil, 0, err + } + return nil, 0, retryOnError(err) + } + + progress.Update(progressOutput, ld.ID(), "Verifying Checksum") + + if !ld.verifier.Verified() { + err = fmt.Errorf("filesystem layer verification failed for digest %s", ld.digest) + logrus.Error(err) + + // Allow a retry if this digest verification error happened + // after a resumed download. + if offset != 0 { + if err := ld.truncateDownloadFile(); err != nil { + return nil, 0, xfer.DoNotRetry{Err: err} + } + + return nil, 0, err + } + return nil, 0, xfer.DoNotRetry{Err: err} + } + + progress.Update(progressOutput, ld.ID(), "Download complete") + + logrus.Debugf("Downloaded %s to tempfile %s", ld.ID(), tmpFile.Name()) + + _, err = tmpFile.Seek(0, os.SEEK_SET) + if err != nil { + tmpFile.Close() + if err := os.Remove(tmpFile.Name()); err != nil { + logrus.Errorf("Failed to remove temp file: %s", tmpFile.Name()) + } + ld.tmpFile = nil + ld.verifier = nil + return nil, 0, xfer.DoNotRetry{Err: err} + } + + // hand off the temporary file to the download manager, so it will only + // be closed once + ld.tmpFile = nil + + return ioutils.NewReadCloserWrapper(tmpFile, func() error { + tmpFile.Close() + err := os.RemoveAll(tmpFile.Name()) + if err != nil { + logrus.Errorf("Failed to remove temp file: %s", tmpFile.Name()) + } + return err + }), size, nil +} + +func (ld *v2LayerDescriptor) Close() { + if ld.tmpFile != nil { + ld.tmpFile.Close() + if err := os.RemoveAll(ld.tmpFile.Name()); err != nil { + logrus.Errorf("Failed to remove temp file: %s", ld.tmpFile.Name()) + } + } +} + +func (ld *v2LayerDescriptor) truncateDownloadFile() error { + // Need a new hash context since we will be redoing the download + ld.verifier = nil + + if _, err := ld.tmpFile.Seek(0, os.SEEK_SET); err != nil { + logrus.Errorf("error seeking to beginning of download file: %v", err) + return err + } + + if err := ld.tmpFile.Truncate(0); err != nil { + logrus.Errorf("error truncating download file: %v", err) + return err + } + + return nil +} + +func (ld *v2LayerDescriptor) Registered(diffID layer.DiffID) { + // Cache mapping from this layer's DiffID to the blobsum + ld.V2MetadataService.Add(diffID, metadata.V2Metadata{Digest: ld.digest, SourceRepository: ld.repoInfo.Name.Name()}) +} + +func (p *v2Puller) pullV2Tag(ctx context.Context, ref reference.Named) (tagUpdated bool, err error) { + manSvc, err := p.repo.Manifests(ctx) + if err != nil { + return false, err + } + + var ( + manifest distribution.Manifest + tagOrDigest string // Used for logging/progress only + ) + if digested, isDigested := ref.(reference.Canonical); isDigested { + manifest, err = manSvc.Get(ctx, digested.Digest()) + if err != nil { + return false, err + } + tagOrDigest = digested.Digest().String() + } else if tagged, isTagged := ref.(reference.NamedTagged); isTagged { + manifest, err = manSvc.Get(ctx, "", distribution.WithTag(tagged.Tag())) + if err != nil { + return false, allowV1Fallback(err) + } + tagOrDigest = tagged.Tag() + } else { + return false, fmt.Errorf("internal error: reference has neither a tag nor a digest: %s", reference.FamiliarString(ref)) + } + + if manifest == nil { + return false, fmt.Errorf("image manifest does not exist for tag or digest %q", tagOrDigest) + } + + if m, ok := manifest.(*schema2.DeserializedManifest); ok { + var allowedMediatype bool + for _, t := range p.config.Schema2Types { + if m.Manifest.Config.MediaType == t { + allowedMediatype = true + break + } + } + if !allowedMediatype { + configClass := mediaTypeClasses[m.Manifest.Config.MediaType] + if configClass == "" { + configClass = "unknown" + } + return false, fmt.Errorf("Encountered remote %q(%s) when fetching", m.Manifest.Config.MediaType, configClass) + } + } + + // If manSvc.Get succeeded, we can be confident that the registry on + // the other side speaks the v2 protocol. + p.confirmedV2 = true + + logrus.Debugf("Pulling ref from V2 registry: %s", reference.FamiliarString(ref)) + progress.Message(p.config.ProgressOutput, tagOrDigest, "Pulling from "+reference.FamiliarName(p.repo.Named())) + + var ( + id digest.Digest + manifestDigest digest.Digest + ) + + switch v := manifest.(type) { + case *schema1.SignedManifest: + if p.config.RequireSchema2 { + return false, fmt.Errorf("invalid manifest: not schema2") + } + id, manifestDigest, err = p.pullSchema1(ctx, ref, v) + if err != nil { + return false, err + } + case *schema2.DeserializedManifest: + id, manifestDigest, err = p.pullSchema2(ctx, ref, v) + if err != nil { + return false, err + } + case *manifestlist.DeserializedManifestList: + id, manifestDigest, err = p.pullManifestList(ctx, ref, v) + if err != nil { + return false, err + } + default: + return false, errors.New("unsupported manifest format") + } + + progress.Message(p.config.ProgressOutput, "", "Digest: "+manifestDigest.String()) + + if p.config.ReferenceStore != nil { + oldTagID, err := p.config.ReferenceStore.Get(ref) + if err == nil { + if oldTagID == id { + return false, addDigestReference(p.config.ReferenceStore, ref, manifestDigest, id) + } + } else if err != refstore.ErrDoesNotExist { + return false, err + } + + if canonical, ok := ref.(reference.Canonical); ok { + if err = p.config.ReferenceStore.AddDigest(canonical, id, true); err != nil { + return false, err + } + } else { + if err = addDigestReference(p.config.ReferenceStore, ref, manifestDigest, id); err != nil { + return false, err + } + if err = p.config.ReferenceStore.AddTag(ref, id, true); err != nil { + return false, err + } + } + } + return true, nil +} + +func (p *v2Puller) pullSchema1(ctx context.Context, ref reference.Named, unverifiedManifest *schema1.SignedManifest) (id digest.Digest, manifestDigest digest.Digest, err error) { + var verifiedManifest *schema1.Manifest + verifiedManifest, err = verifySchema1Manifest(unverifiedManifest, ref) + if err != nil { + return "", "", err + } + + rootFS := image.NewRootFS() + + // remove duplicate layers and check parent chain validity + err = fixManifestLayers(verifiedManifest) + if err != nil { + return "", "", err + } + + var descriptors []xfer.DownloadDescriptor + + // Image history converted to the new format + var history []image.History + + // Note that the order of this loop is in the direction of bottom-most + // to top-most, so that the downloads slice gets ordered correctly. + for i := len(verifiedManifest.FSLayers) - 1; i >= 0; i-- { + blobSum := verifiedManifest.FSLayers[i].BlobSum + + var throwAway struct { + ThrowAway bool `json:"throwaway,omitempty"` + } + if err := json.Unmarshal([]byte(verifiedManifest.History[i].V1Compatibility), &throwAway); err != nil { + return "", "", err + } + + h, err := v1.HistoryFromConfig([]byte(verifiedManifest.History[i].V1Compatibility), throwAway.ThrowAway) + if err != nil { + return "", "", err + } + history = append(history, h) + + if throwAway.ThrowAway { + continue + } + + layerDescriptor := &v2LayerDescriptor{ + digest: blobSum, + repoInfo: p.repoInfo, + repo: p.repo, + V2MetadataService: p.V2MetadataService, + } + + descriptors = append(descriptors, layerDescriptor) + } + + // The v1 manifest itself doesn't directly contain a platform. However, + // the history does, but unfortunately that's a string, so search through + // all the history until hopefully we find one which indicates the os. + platform := runtime.GOOS + if system.LCOWSupported() { + type config struct { + Os string `json:"os,omitempty"` + } + for _, v := range verifiedManifest.History { + var c config + if err := json.Unmarshal([]byte(v.V1Compatibility), &c); err == nil { + if c.Os != "" { + platform = c.Os + break + } + } + } + } + + resultRootFS, release, err := p.config.DownloadManager.Download(ctx, *rootFS, layer.Platform(platform), descriptors, p.config.ProgressOutput) + if err != nil { + return "", "", err + } + defer release() + + config, err := v1.MakeConfigFromV1Config([]byte(verifiedManifest.History[0].V1Compatibility), &resultRootFS, history) + if err != nil { + return "", "", err + } + + imageID, err := p.config.ImageStore.Put(config) + if err != nil { + return "", "", err + } + + manifestDigest = digest.FromBytes(unverifiedManifest.Canonical) + + return imageID, manifestDigest, nil +} + +func (p *v2Puller) pullSchema2(ctx context.Context, ref reference.Named, mfst *schema2.DeserializedManifest) (id digest.Digest, manifestDigest digest.Digest, err error) { + manifestDigest, err = schema2ManifestDigest(ref, mfst) + if err != nil { + return "", "", err + } + + target := mfst.Target() + if _, err := p.config.ImageStore.Get(target.Digest); err == nil { + // If the image already exists locally, no need to pull + // anything. + return target.Digest, manifestDigest, nil + } + + var descriptors []xfer.DownloadDescriptor + + // Note that the order of this loop is in the direction of bottom-most + // to top-most, so that the downloads slice gets ordered correctly. + for _, d := range mfst.Layers { + layerDescriptor := &v2LayerDescriptor{ + digest: d.Digest, + repo: p.repo, + repoInfo: p.repoInfo, + V2MetadataService: p.V2MetadataService, + src: d, + } + + descriptors = append(descriptors, layerDescriptor) + } + + configChan := make(chan []byte, 1) + configErrChan := make(chan error, 1) + layerErrChan := make(chan error, 1) + downloadsDone := make(chan struct{}) + var cancel func() + ctx, cancel = context.WithCancel(ctx) + defer cancel() + + // Pull the image config + go func() { + configJSON, err := p.pullSchema2Config(ctx, target.Digest) + if err != nil { + configErrChan <- ImageConfigPullError{Err: err} + cancel() + return + } + configChan <- configJSON + }() + + var ( + configJSON []byte // raw serialized image config + downloadedRootFS *image.RootFS // rootFS from registered layers + configRootFS *image.RootFS // rootFS from configuration + release func() // release resources from rootFS download + platform layer.Platform // for LCOW when registering downloaded layers + ) + + // https://github.com/docker/docker/issues/24766 - Err on the side of caution, + // explicitly blocking images intended for linux from the Windows daemon. On + // Windows, we do this before the attempt to download, effectively serialising + // the download slightly slowing it down. We have to do it this way, as + // chances are the download of layers itself would fail due to file names + // which aren't suitable for NTFS. At some point in the future, if a similar + // check to block Windows images being pulled on Linux is implemented, it + // may be necessary to perform the same type of serialisation. + if runtime.GOOS == "windows" { + configJSON, configRootFS, platform, err = receiveConfig(p.config.ImageStore, configChan, configErrChan) + if err != nil { + return "", "", err + } + + if configRootFS == nil { + return "", "", errRootFSInvalid + } + + if len(descriptors) != len(configRootFS.DiffIDs) { + return "", "", errRootFSMismatch + } + + // Populate diff ids in descriptors to avoid downloading foreign layers + // which have been side loaded + for i := range descriptors { + descriptors[i].(*v2LayerDescriptor).diffID = configRootFS.DiffIDs[i] + } + } + + if p.config.DownloadManager != nil { + go func() { + var ( + err error + rootFS image.RootFS + ) + downloadRootFS := *image.NewRootFS() + rootFS, release, err = p.config.DownloadManager.Download(ctx, downloadRootFS, platform, descriptors, p.config.ProgressOutput) + if err != nil { + // Intentionally do not cancel the config download here + // as the error from config download (if there is one) + // is more interesting than the layer download error + layerErrChan <- err + return + } + + downloadedRootFS = &rootFS + close(downloadsDone) + }() + } else { + // We have nothing to download + close(downloadsDone) + } + + if configJSON == nil { + configJSON, configRootFS, _, err = receiveConfig(p.config.ImageStore, configChan, configErrChan) + if err == nil && configRootFS == nil { + err = errRootFSInvalid + } + if err != nil { + cancel() + select { + case <-downloadsDone: + case <-layerErrChan: + } + return "", "", err + } + } + + select { + case <-downloadsDone: + case err = <-layerErrChan: + return "", "", err + } + + if release != nil { + defer release() + } + + if downloadedRootFS != nil { + // The DiffIDs returned in rootFS MUST match those in the config. + // Otherwise the image config could be referencing layers that aren't + // included in the manifest. + if len(downloadedRootFS.DiffIDs) != len(configRootFS.DiffIDs) { + return "", "", errRootFSMismatch + } + + for i := range downloadedRootFS.DiffIDs { + if downloadedRootFS.DiffIDs[i] != configRootFS.DiffIDs[i] { + return "", "", errRootFSMismatch + } + } + } + + imageID, err := p.config.ImageStore.Put(configJSON) + if err != nil { + return "", "", err + } + + return imageID, manifestDigest, nil +} + +func receiveConfig(s ImageConfigStore, configChan <-chan []byte, errChan <-chan error) ([]byte, *image.RootFS, layer.Platform, error) { + select { + case configJSON := <-configChan: + rootfs, platform, err := s.RootFSAndPlatformFromConfig(configJSON) + if err != nil { + return nil, nil, "", err + } + return configJSON, rootfs, platform, nil + case err := <-errChan: + return nil, nil, "", err + // Don't need a case for ctx.Done in the select because cancellation + // will trigger an error in p.pullSchema2ImageConfig. + } +} + +// pullManifestList handles "manifest lists" which point to various +// platform-specific manifests. +func (p *v2Puller) pullManifestList(ctx context.Context, ref reference.Named, mfstList *manifestlist.DeserializedManifestList) (id digest.Digest, manifestListDigest digest.Digest, err error) { + manifestListDigest, err = schema2ManifestDigest(ref, mfstList) + if err != nil { + return "", "", err + } + + logrus.Debugf("%s resolved to a manifestList object with %d entries; looking for a os/arch match", ref, len(mfstList.Manifests)) + var manifestDigest digest.Digest + for _, manifestDescriptor := range mfstList.Manifests { + // TODO(aaronl): The manifest list spec supports optional + // "features" and "variant" fields. These are not yet used. + // Once they are, their values should be interpreted here. + if manifestDescriptor.Platform.Architecture == runtime.GOARCH && manifestDescriptor.Platform.OS == runtime.GOOS { + manifestDigest = manifestDescriptor.Digest + logrus.Debugf("found match for %s/%s with media type %s, digest %s", runtime.GOOS, runtime.GOARCH, manifestDescriptor.MediaType, manifestDigest.String()) + break + } + } + + if manifestDigest == "" { + errMsg := fmt.Sprintf("no matching manifest for %s/%s in the manifest list entries", runtime.GOOS, runtime.GOARCH) + logrus.Debugf(errMsg) + return "", "", errors.New(errMsg) + } + + manSvc, err := p.repo.Manifests(ctx) + if err != nil { + return "", "", err + } + + manifest, err := manSvc.Get(ctx, manifestDigest) + if err != nil { + return "", "", err + } + + manifestRef, err := reference.WithDigest(reference.TrimNamed(ref), manifestDigest) + if err != nil { + return "", "", err + } + + switch v := manifest.(type) { + case *schema1.SignedManifest: + id, _, err = p.pullSchema1(ctx, manifestRef, v) + if err != nil { + return "", "", err + } + case *schema2.DeserializedManifest: + id, _, err = p.pullSchema2(ctx, manifestRef, v) + if err != nil { + return "", "", err + } + default: + return "", "", errors.New("unsupported manifest format") + } + + return id, manifestListDigest, err +} + +func (p *v2Puller) pullSchema2Config(ctx context.Context, dgst digest.Digest) (configJSON []byte, err error) { + blobs := p.repo.Blobs(ctx) + configJSON, err = blobs.Get(ctx, dgst) + if err != nil { + return nil, err + } + + // Verify image config digest + verifier := dgst.Verifier() + if _, err := verifier.Write(configJSON); err != nil { + return nil, err + } + if !verifier.Verified() { + err := fmt.Errorf("image config verification failed for digest %s", dgst) + logrus.Error(err) + return nil, err + } + + return configJSON, nil +} + +// schema2ManifestDigest computes the manifest digest, and, if pulling by +// digest, ensures that it matches the requested digest. +func schema2ManifestDigest(ref reference.Named, mfst distribution.Manifest) (digest.Digest, error) { + _, canonical, err := mfst.Payload() + if err != nil { + return "", err + } + + // If pull by digest, then verify the manifest digest. + if digested, isDigested := ref.(reference.Canonical); isDigested { + verifier := digested.Digest().Verifier() + if _, err := verifier.Write(canonical); err != nil { + return "", err + } + if !verifier.Verified() { + err := fmt.Errorf("manifest verification failed for digest %s", digested.Digest()) + logrus.Error(err) + return "", err + } + return digested.Digest(), nil + } + + return digest.FromBytes(canonical), nil +} + +// allowV1Fallback checks if the error is a possible reason to fallback to v1 +// (even if confirmedV2 has been set already), and if so, wraps the error in +// a fallbackError with confirmedV2 set to false. Otherwise, it returns the +// error unmodified. +func allowV1Fallback(err error) error { + switch v := err.(type) { + case errcode.Errors: + if len(v) != 0 { + if v0, ok := v[0].(errcode.Error); ok && shouldV2Fallback(v0) { + return fallbackError{ + err: err, + confirmedV2: false, + transportOK: true, + } + } + } + case errcode.Error: + if shouldV2Fallback(v) { + return fallbackError{ + err: err, + confirmedV2: false, + transportOK: true, + } + } + case *url.Error: + if v.Err == auth.ErrNoBasicAuthCredentials { + return fallbackError{err: err, confirmedV2: false} + } + } + + return err +} + +func verifySchema1Manifest(signedManifest *schema1.SignedManifest, ref reference.Named) (m *schema1.Manifest, err error) { + // If pull by digest, then verify the manifest digest. NOTE: It is + // important to do this first, before any other content validation. If the + // digest cannot be verified, don't even bother with those other things. + if digested, isCanonical := ref.(reference.Canonical); isCanonical { + verifier := digested.Digest().Verifier() + if _, err := verifier.Write(signedManifest.Canonical); err != nil { + return nil, err + } + if !verifier.Verified() { + err := fmt.Errorf("image verification failed for digest %s", digested.Digest()) + logrus.Error(err) + return nil, err + } + } + m = &signedManifest.Manifest + + if m.SchemaVersion != 1 { + return nil, fmt.Errorf("unsupported schema version %d for %q", m.SchemaVersion, reference.FamiliarString(ref)) + } + if len(m.FSLayers) != len(m.History) { + return nil, fmt.Errorf("length of history not equal to number of layers for %q", reference.FamiliarString(ref)) + } + if len(m.FSLayers) == 0 { + return nil, fmt.Errorf("no FSLayers in manifest for %q", reference.FamiliarString(ref)) + } + return m, nil +} + +// fixManifestLayers removes repeated layers from the manifest and checks the +// correctness of the parent chain. +func fixManifestLayers(m *schema1.Manifest) error { + imgs := make([]*image.V1Image, len(m.FSLayers)) + for i := range m.FSLayers { + img := &image.V1Image{} + + if err := json.Unmarshal([]byte(m.History[i].V1Compatibility), img); err != nil { + return err + } + + imgs[i] = img + if err := v1.ValidateID(img.ID); err != nil { + return err + } + } + + if imgs[len(imgs)-1].Parent != "" && runtime.GOOS != "windows" { + // Windows base layer can point to a base layer parent that is not in manifest. + return errors.New("invalid parent ID in the base layer of the image") + } + + // check general duplicates to error instead of a deadlock + idmap := make(map[string]struct{}) + + var lastID string + for _, img := range imgs { + // skip IDs that appear after each other, we handle those later + if _, exists := idmap[img.ID]; img.ID != lastID && exists { + return fmt.Errorf("ID %+v appears multiple times in manifest", img.ID) + } + lastID = img.ID + idmap[lastID] = struct{}{} + } + + // backwards loop so that we keep the remaining indexes after removing items + for i := len(imgs) - 2; i >= 0; i-- { + if imgs[i].ID == imgs[i+1].ID { // repeated ID. remove and continue + m.FSLayers = append(m.FSLayers[:i], m.FSLayers[i+1:]...) + m.History = append(m.History[:i], m.History[i+1:]...) + } else if imgs[i].Parent != imgs[i+1].ID { + return fmt.Errorf("Invalid parent ID. Expected %v, got %v.", imgs[i+1].ID, imgs[i].Parent) + } + } + + return nil +} + +func createDownloadFile() (*os.File, error) { + return ioutil.TempFile("", "GetImageBlob") +} diff --git a/vendor/github.com/moby/moby/distribution/pull_v2_test.go b/vendor/github.com/moby/moby/distribution/pull_v2_test.go new file mode 100644 index 000000000..df93c1ef8 --- /dev/null +++ b/vendor/github.com/moby/moby/distribution/pull_v2_test.go @@ -0,0 +1,183 @@ +package distribution + +import ( + "encoding/json" + "io/ioutil" + "reflect" + "runtime" + "strings" + "testing" + + "github.com/docker/distribution/manifest/schema1" + "github.com/docker/distribution/reference" + "github.com/opencontainers/go-digest" +) + +// TestFixManifestLayers checks that fixManifestLayers removes a duplicate +// layer, and that it makes no changes to the manifest when called a second +// time, after the duplicate is removed. +func TestFixManifestLayers(t *testing.T) { + duplicateLayerManifest := schema1.Manifest{ + FSLayers: []schema1.FSLayer{ + {BlobSum: digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")}, + {BlobSum: digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")}, + {BlobSum: digest.Digest("sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa")}, + }, + History: []schema1.History{ + {V1Compatibility: "{\"id\":\"3b38edc92eb7c074812e217b41a6ade66888531009d6286a6f5f36a06f9841b9\",\"parent\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"created\":\"2015-08-19T16:49:11.368300679Z\",\"container\":\"d91be3479d5b1e84b0c00d18eea9dc777ca0ad166d51174b24283e2e6f104253\",\"container_config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) ENTRYPOINT [\\\"/go/bin/dnsdock\\\"]\"],\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"docker_version\":\"1.6.2\",\"config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":null,\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n"}, + {V1Compatibility: "{\"id\":\"3b38edc92eb7c074812e217b41a6ade66888531009d6286a6f5f36a06f9841b9\",\"parent\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"created\":\"2015-08-19T16:49:11.368300679Z\",\"container\":\"d91be3479d5b1e84b0c00d18eea9dc777ca0ad166d51174b24283e2e6f104253\",\"container_config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) ENTRYPOINT [\\\"/go/bin/dnsdock\\\"]\"],\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"docker_version\":\"1.6.2\",\"config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":null,\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n"}, + {V1Compatibility: "{\"id\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"created\":\"2015-08-19T16:49:07.568027497Z\",\"container\":\"fe9e5a5264a843c9292d17b736c92dd19bdb49986a8782d7389964ddaff887cc\",\"container_config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"cd /go/src/github.com/tonistiigi/dnsdock \\u0026\\u0026 go get -v github.com/tools/godep \\u0026\\u0026 godep restore \\u0026\\u0026 go install -ldflags \\\"-X main.version `git describe --tags HEAD``if [[ -n $(command git status --porcelain --untracked-files=no 2\\u003e/dev/null) ]]; then echo \\\"-dirty\\\"; fi`\\\" ./...\"],\"Image\":\"e3b0ff09e647595dafee15c54cd632c900df9e82b1d4d313b1e20639a1461779\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"docker_version\":\"1.6.2\",\"config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/bash\"],\"Image\":\"e3b0ff09e647595dafee15c54cd632c900df9e82b1d4d313b1e20639a1461779\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":118430532}\n"}, + }, + } + + duplicateLayerManifestExpectedOutput := schema1.Manifest{ + FSLayers: []schema1.FSLayer{ + {BlobSum: digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")}, + {BlobSum: digest.Digest("sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa")}, + }, + History: []schema1.History{ + {V1Compatibility: "{\"id\":\"3b38edc92eb7c074812e217b41a6ade66888531009d6286a6f5f36a06f9841b9\",\"parent\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"created\":\"2015-08-19T16:49:11.368300679Z\",\"container\":\"d91be3479d5b1e84b0c00d18eea9dc777ca0ad166d51174b24283e2e6f104253\",\"container_config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) ENTRYPOINT [\\\"/go/bin/dnsdock\\\"]\"],\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"docker_version\":\"1.6.2\",\"config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":null,\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n"}, + {V1Compatibility: "{\"id\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"created\":\"2015-08-19T16:49:07.568027497Z\",\"container\":\"fe9e5a5264a843c9292d17b736c92dd19bdb49986a8782d7389964ddaff887cc\",\"container_config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"cd /go/src/github.com/tonistiigi/dnsdock \\u0026\\u0026 go get -v github.com/tools/godep \\u0026\\u0026 godep restore \\u0026\\u0026 go install -ldflags \\\"-X main.version `git describe --tags HEAD``if [[ -n $(command git status --porcelain --untracked-files=no 2\\u003e/dev/null) ]]; then echo \\\"-dirty\\\"; fi`\\\" ./...\"],\"Image\":\"e3b0ff09e647595dafee15c54cd632c900df9e82b1d4d313b1e20639a1461779\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"docker_version\":\"1.6.2\",\"config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/bash\"],\"Image\":\"e3b0ff09e647595dafee15c54cd632c900df9e82b1d4d313b1e20639a1461779\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":118430532}\n"}, + }, + } + + if err := fixManifestLayers(&duplicateLayerManifest); err != nil { + t.Fatalf("unexpected error from fixManifestLayers: %v", err) + } + + if !reflect.DeepEqual(duplicateLayerManifest, duplicateLayerManifestExpectedOutput) { + t.Fatal("incorrect output from fixManifestLayers on duplicate layer manifest") + } + + // Run fixManifestLayers again and confirm that it doesn't change the + // manifest (which no longer has duplicate layers). + if err := fixManifestLayers(&duplicateLayerManifest); err != nil { + t.Fatalf("unexpected error from fixManifestLayers: %v", err) + } + + if !reflect.DeepEqual(duplicateLayerManifest, duplicateLayerManifestExpectedOutput) { + t.Fatal("incorrect output from fixManifestLayers on duplicate layer manifest (second pass)") + } +} + +// TestFixManifestLayersBaseLayerParent makes sure that fixManifestLayers fails +// if the base layer configuration specifies a parent. +func TestFixManifestLayersBaseLayerParent(t *testing.T) { + // TODO Windows: Fix this unit text + if runtime.GOOS == "windows" { + t.Skip("Needs fixing on Windows") + } + duplicateLayerManifest := schema1.Manifest{ + FSLayers: []schema1.FSLayer{ + {BlobSum: digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")}, + {BlobSum: digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")}, + {BlobSum: digest.Digest("sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa")}, + }, + History: []schema1.History{ + {V1Compatibility: "{\"id\":\"3b38edc92eb7c074812e217b41a6ade66888531009d6286a6f5f36a06f9841b9\",\"parent\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"created\":\"2015-08-19T16:49:11.368300679Z\",\"container\":\"d91be3479d5b1e84b0c00d18eea9dc777ca0ad166d51174b24283e2e6f104253\",\"container_config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) ENTRYPOINT [\\\"/go/bin/dnsdock\\\"]\"],\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"docker_version\":\"1.6.2\",\"config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":null,\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n"}, + {V1Compatibility: "{\"id\":\"3b38edc92eb7c074812e217b41a6ade66888531009d6286a6f5f36a06f9841b9\",\"parent\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"created\":\"2015-08-19T16:49:11.368300679Z\",\"container\":\"d91be3479d5b1e84b0c00d18eea9dc777ca0ad166d51174b24283e2e6f104253\",\"container_config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) ENTRYPOINT [\\\"/go/bin/dnsdock\\\"]\"],\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"docker_version\":\"1.6.2\",\"config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":null,\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n"}, + {V1Compatibility: "{\"id\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"parent\":\"e3b0ff09e647595dafee15c54cd632c900df9e82b1d4d313b1e20639a1461779\",\"created\":\"2015-08-19T16:49:07.568027497Z\",\"container\":\"fe9e5a5264a843c9292d17b736c92dd19bdb49986a8782d7389964ddaff887cc\",\"container_config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"cd /go/src/github.com/tonistiigi/dnsdock \\u0026\\u0026 go get -v github.com/tools/godep \\u0026\\u0026 godep restore \\u0026\\u0026 go install -ldflags \\\"-X main.version `git describe --tags HEAD``if [[ -n $(command git status --porcelain --untracked-files=no 2\\u003e/dev/null) ]]; then echo \\\"-dirty\\\"; fi`\\\" ./...\"],\"Image\":\"e3b0ff09e647595dafee15c54cd632c900df9e82b1d4d313b1e20639a1461779\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"docker_version\":\"1.6.2\",\"config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/bash\"],\"Image\":\"e3b0ff09e647595dafee15c54cd632c900df9e82b1d4d313b1e20639a1461779\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":118430532}\n"}, + }, + } + + if err := fixManifestLayers(&duplicateLayerManifest); err == nil || !strings.Contains(err.Error(), "invalid parent ID in the base layer of the image") { + t.Fatalf("expected an invalid parent ID error from fixManifestLayers") + } +} + +// TestFixManifestLayersBadParent makes sure that fixManifestLayers fails +// if an image configuration specifies a parent that doesn't directly follow +// that (deduplicated) image in the image history. +func TestFixManifestLayersBadParent(t *testing.T) { + duplicateLayerManifest := schema1.Manifest{ + FSLayers: []schema1.FSLayer{ + {BlobSum: digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")}, + {BlobSum: digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")}, + {BlobSum: digest.Digest("sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa")}, + }, + History: []schema1.History{ + {V1Compatibility: "{\"id\":\"3b38edc92eb7c074812e217b41a6ade66888531009d6286a6f5f36a06f9841b9\",\"parent\":\"ac3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"created\":\"2015-08-19T16:49:11.368300679Z\",\"container\":\"d91be3479d5b1e84b0c00d18eea9dc777ca0ad166d51174b24283e2e6f104253\",\"container_config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) ENTRYPOINT [\\\"/go/bin/dnsdock\\\"]\"],\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"docker_version\":\"1.6.2\",\"config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":null,\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n"}, + {V1Compatibility: "{\"id\":\"3b38edc92eb7c074812e217b41a6ade66888531009d6286a6f5f36a06f9841b9\",\"parent\":\"ac3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"created\":\"2015-08-19T16:49:11.368300679Z\",\"container\":\"d91be3479d5b1e84b0c00d18eea9dc777ca0ad166d51174b24283e2e6f104253\",\"container_config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) ENTRYPOINT [\\\"/go/bin/dnsdock\\\"]\"],\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"docker_version\":\"1.6.2\",\"config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":null,\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n"}, + {V1Compatibility: "{\"id\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"created\":\"2015-08-19T16:49:07.568027497Z\",\"container\":\"fe9e5a5264a843c9292d17b736c92dd19bdb49986a8782d7389964ddaff887cc\",\"container_config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"cd /go/src/github.com/tonistiigi/dnsdock \\u0026\\u0026 go get -v github.com/tools/godep \\u0026\\u0026 godep restore \\u0026\\u0026 go install -ldflags \\\"-X main.version `git describe --tags HEAD``if [[ -n $(command git status --porcelain --untracked-files=no 2\\u003e/dev/null) ]]; then echo \\\"-dirty\\\"; fi`\\\" ./...\"],\"Image\":\"e3b0ff09e647595dafee15c54cd632c900df9e82b1d4d313b1e20639a1461779\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"docker_version\":\"1.6.2\",\"config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/bash\"],\"Image\":\"e3b0ff09e647595dafee15c54cd632c900df9e82b1d4d313b1e20639a1461779\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":118430532}\n"}, + }, + } + + if err := fixManifestLayers(&duplicateLayerManifest); err == nil || !strings.Contains(err.Error(), "Invalid parent ID.") { + t.Fatalf("expected an invalid parent ID error from fixManifestLayers") + } +} + +// TestValidateManifest verifies the validateManifest function +func TestValidateManifest(t *testing.T) { + // TODO Windows: Fix this unit text + if runtime.GOOS == "windows" { + t.Skip("Needs fixing on Windows") + } + expectedDigest, err := reference.ParseNormalizedNamed("repo@sha256:02fee8c3220ba806531f606525eceb83f4feb654f62b207191b1c9209188dedd") + if err != nil { + t.Fatal("could not parse reference") + } + expectedFSLayer0 := digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4") + + // Good manifest + + goodManifestBytes, err := ioutil.ReadFile("fixtures/validate_manifest/good_manifest") + if err != nil { + t.Fatal("error reading fixture:", err) + } + + var goodSignedManifest schema1.SignedManifest + err = json.Unmarshal(goodManifestBytes, &goodSignedManifest) + if err != nil { + t.Fatal("error unmarshaling manifest:", err) + } + + verifiedManifest, err := verifySchema1Manifest(&goodSignedManifest, expectedDigest) + if err != nil { + t.Fatal("validateManifest failed:", err) + } + + if verifiedManifest.FSLayers[0].BlobSum != expectedFSLayer0 { + t.Fatal("unexpected FSLayer in good manifest") + } + + // "Extra data" manifest + + extraDataManifestBytes, err := ioutil.ReadFile("fixtures/validate_manifest/extra_data_manifest") + if err != nil { + t.Fatal("error reading fixture:", err) + } + + var extraDataSignedManifest schema1.SignedManifest + err = json.Unmarshal(extraDataManifestBytes, &extraDataSignedManifest) + if err != nil { + t.Fatal("error unmarshaling manifest:", err) + } + + verifiedManifest, err = verifySchema1Manifest(&extraDataSignedManifest, expectedDigest) + if err != nil { + t.Fatal("validateManifest failed:", err) + } + + if verifiedManifest.FSLayers[0].BlobSum != expectedFSLayer0 { + t.Fatal("unexpected FSLayer in extra data manifest") + } + + // Bad manifest + + badManifestBytes, err := ioutil.ReadFile("fixtures/validate_manifest/bad_manifest") + if err != nil { + t.Fatal("error reading fixture:", err) + } + + var badSignedManifest schema1.SignedManifest + err = json.Unmarshal(badManifestBytes, &badSignedManifest) + if err != nil { + t.Fatal("error unmarshaling manifest:", err) + } + + verifiedManifest, err = verifySchema1Manifest(&badSignedManifest, expectedDigest) + if err == nil || !strings.HasPrefix(err.Error(), "image verification failed for digest") { + t.Fatal("expected validateManifest to fail with digest error") + } +} diff --git a/vendor/github.com/moby/moby/distribution/pull_v2_unix.go b/vendor/github.com/moby/moby/distribution/pull_v2_unix.go new file mode 100644 index 000000000..45a7a0c15 --- /dev/null +++ b/vendor/github.com/moby/moby/distribution/pull_v2_unix.go @@ -0,0 +1,13 @@ +// +build !windows + +package distribution + +import ( + "github.com/docker/distribution" + "github.com/docker/distribution/context" +) + +func (ld *v2LayerDescriptor) open(ctx context.Context) (distribution.ReadSeekCloser, error) { + blobs := ld.repo.Blobs(ctx) + return blobs.Open(ctx, ld.digest) +} diff --git a/vendor/github.com/moby/moby/distribution/pull_v2_windows.go b/vendor/github.com/moby/moby/distribution/pull_v2_windows.go new file mode 100644 index 000000000..543ecc10e --- /dev/null +++ b/vendor/github.com/moby/moby/distribution/pull_v2_windows.go @@ -0,0 +1,57 @@ +// +build windows + +package distribution + +import ( + "net/http" + "os" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/manifest/schema2" + "github.com/docker/distribution/registry/client/transport" +) + +var _ distribution.Describable = &v2LayerDescriptor{} + +func (ld *v2LayerDescriptor) Descriptor() distribution.Descriptor { + if ld.src.MediaType == schema2.MediaTypeForeignLayer && len(ld.src.URLs) > 0 { + return ld.src + } + return distribution.Descriptor{} +} + +func (ld *v2LayerDescriptor) open(ctx context.Context) (distribution.ReadSeekCloser, error) { + blobs := ld.repo.Blobs(ctx) + rsc, err := blobs.Open(ctx, ld.digest) + + if len(ld.src.URLs) == 0 { + return rsc, err + } + + // We're done if the registry has this blob. + if err == nil { + // Seek does an HTTP GET. If it succeeds, the blob really is accessible. + if _, err = rsc.Seek(0, os.SEEK_SET); err == nil { + return rsc, nil + } + rsc.Close() + } + + // Find the first URL that results in a 200 result code. + for _, url := range ld.src.URLs { + logrus.Debugf("Pulling %v from foreign URL %v", ld.digest, url) + rsc = transport.NewHTTPReadSeeker(http.DefaultClient, url, nil) + + // Seek does an HTTP GET. If it succeeds, the blob really is accessible. + _, err = rsc.Seek(0, os.SEEK_SET) + if err == nil { + break + } + logrus.Debugf("Download for %v failed: %v", ld.digest, err) + rsc.Close() + rsc = nil + } + return rsc, err +} diff --git a/vendor/github.com/moby/moby/distribution/push.go b/vendor/github.com/moby/moby/distribution/push.go new file mode 100644 index 000000000..395e4d151 --- /dev/null +++ b/vendor/github.com/moby/moby/distribution/push.go @@ -0,0 +1,186 @@ +package distribution + +import ( + "bufio" + "compress/gzip" + "fmt" + "io" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/reference" + "github.com/docker/docker/distribution/metadata" + "github.com/docker/docker/pkg/progress" + "github.com/docker/docker/registry" + "golang.org/x/net/context" +) + +// Pusher is an interface that abstracts pushing for different API versions. +type Pusher interface { + // Push tries to push the image configured at the creation of Pusher. + // Push returns an error if any, as well as a boolean that determines whether to retry Push on the next configured endpoint. + // + // TODO(tiborvass): have Push() take a reference to repository + tag, so that the pusher itself is repository-agnostic. + Push(ctx context.Context) error +} + +const compressionBufSize = 32768 + +// NewPusher creates a new Pusher interface that will push to either a v1 or v2 +// registry. The endpoint argument contains a Version field that determines +// whether a v1 or v2 pusher will be created. The other parameters are passed +// through to the underlying pusher implementation for use during the actual +// push operation. +func NewPusher(ref reference.Named, endpoint registry.APIEndpoint, repoInfo *registry.RepositoryInfo, imagePushConfig *ImagePushConfig) (Pusher, error) { + switch endpoint.Version { + case registry.APIVersion2: + return &v2Pusher{ + v2MetadataService: metadata.NewV2MetadataService(imagePushConfig.MetadataStore), + ref: ref, + endpoint: endpoint, + repoInfo: repoInfo, + config: imagePushConfig, + }, nil + case registry.APIVersion1: + return &v1Pusher{ + v1IDService: metadata.NewV1IDService(imagePushConfig.MetadataStore), + ref: ref, + endpoint: endpoint, + repoInfo: repoInfo, + config: imagePushConfig, + }, nil + } + return nil, fmt.Errorf("unknown version %d for registry %s", endpoint.Version, endpoint.URL) +} + +// Push initiates a push operation on ref. +// ref is the specific variant of the image to be pushed. +// If no tag is provided, all tags will be pushed. +func Push(ctx context.Context, ref reference.Named, imagePushConfig *ImagePushConfig) error { + // FIXME: Allow to interrupt current push when new push of same image is done. + + // Resolve the Repository name from fqn to RepositoryInfo + repoInfo, err := imagePushConfig.RegistryService.ResolveRepository(ref) + if err != nil { + return err + } + + endpoints, err := imagePushConfig.RegistryService.LookupPushEndpoints(reference.Domain(repoInfo.Name)) + if err != nil { + return err + } + + progress.Messagef(imagePushConfig.ProgressOutput, "", "The push refers to a repository [%s]", repoInfo.Name.Name()) + + associations := imagePushConfig.ReferenceStore.ReferencesByName(repoInfo.Name) + if len(associations) == 0 { + return fmt.Errorf("An image does not exist locally with the tag: %s", reference.FamiliarName(repoInfo.Name)) + } + + var ( + lastErr error + + // confirmedV2 is set to true if a push attempt managed to + // confirm that it was talking to a v2 registry. This will + // prevent fallback to the v1 protocol. + confirmedV2 bool + + // confirmedTLSRegistries is a map indicating which registries + // are known to be using TLS. There should never be a plaintext + // retry for any of these. + confirmedTLSRegistries = make(map[string]struct{}) + ) + + for _, endpoint := range endpoints { + if imagePushConfig.RequireSchema2 && endpoint.Version == registry.APIVersion1 { + continue + } + if confirmedV2 && endpoint.Version == registry.APIVersion1 { + logrus.Debugf("Skipping v1 endpoint %s because v2 registry was detected", endpoint.URL) + continue + } + + if endpoint.URL.Scheme != "https" { + if _, confirmedTLS := confirmedTLSRegistries[endpoint.URL.Host]; confirmedTLS { + logrus.Debugf("Skipping non-TLS endpoint %s for host/port that appears to use TLS", endpoint.URL) + continue + } + } + + logrus.Debugf("Trying to push %s to %s %s", repoInfo.Name.Name(), endpoint.URL, endpoint.Version) + + pusher, err := NewPusher(ref, endpoint, repoInfo, imagePushConfig) + if err != nil { + lastErr = err + continue + } + if err := pusher.Push(ctx); err != nil { + // Was this push cancelled? If so, don't try to fall + // back. + select { + case <-ctx.Done(): + default: + if fallbackErr, ok := err.(fallbackError); ok { + confirmedV2 = confirmedV2 || fallbackErr.confirmedV2 + if fallbackErr.transportOK && endpoint.URL.Scheme == "https" { + confirmedTLSRegistries[endpoint.URL.Host] = struct{}{} + } + err = fallbackErr.err + lastErr = err + logrus.Infof("Attempting next endpoint for push after error: %v", err) + continue + } + } + + logrus.Errorf("Not continuing with push after error: %v", err) + return err + } + + imagePushConfig.ImageEventLogger(reference.FamiliarString(ref), reference.FamiliarName(repoInfo.Name), "push") + return nil + } + + if lastErr == nil { + lastErr = fmt.Errorf("no endpoints found for %s", repoInfo.Name.Name()) + } + return lastErr +} + +// compress returns an io.ReadCloser which will supply a compressed version of +// the provided Reader. The caller must close the ReadCloser after reading the +// compressed data. +// +// Note that this function returns a reader instead of taking a writer as an +// argument so that it can be used with httpBlobWriter's ReadFrom method. +// Using httpBlobWriter's Write method would send a PATCH request for every +// Write call. +// +// The second return value is a channel that gets closed when the goroutine +// is finished. This allows the caller to make sure the goroutine finishes +// before it releases any resources connected with the reader that was +// passed in. +func compress(in io.Reader) (io.ReadCloser, chan struct{}) { + compressionDone := make(chan struct{}) + + pipeReader, pipeWriter := io.Pipe() + // Use a bufio.Writer to avoid excessive chunking in HTTP request. + bufWriter := bufio.NewWriterSize(pipeWriter, compressionBufSize) + compressor := gzip.NewWriter(bufWriter) + + go func() { + _, err := io.Copy(compressor, in) + if err == nil { + err = compressor.Close() + } + if err == nil { + err = bufWriter.Flush() + } + if err != nil { + pipeWriter.CloseWithError(err) + } else { + pipeWriter.Close() + } + close(compressionDone) + }() + + return pipeReader, compressionDone +} diff --git a/vendor/github.com/moby/moby/distribution/push_v1.go b/vendor/github.com/moby/moby/distribution/push_v1.go new file mode 100644 index 000000000..431faaf28 --- /dev/null +++ b/vendor/github.com/moby/moby/distribution/push_v1.go @@ -0,0 +1,457 @@ +package distribution + +import ( + "fmt" + "sync" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/client/transport" + "github.com/docker/docker/distribution/metadata" + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/image" + "github.com/docker/docker/image/v1" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/progress" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/registry" + "github.com/opencontainers/go-digest" + "golang.org/x/net/context" +) + +type v1Pusher struct { + v1IDService *metadata.V1IDService + endpoint registry.APIEndpoint + ref reference.Named + repoInfo *registry.RepositoryInfo + config *ImagePushConfig + session *registry.Session +} + +func (p *v1Pusher) Push(ctx context.Context) error { + tlsConfig, err := p.config.RegistryService.TLSConfig(p.repoInfo.Index.Name) + if err != nil { + return err + } + // Adds Docker-specific headers as well as user-specified headers (metaHeaders) + tr := transport.NewTransport( + // TODO(tiborvass): was NoTimeout + registry.NewTransport(tlsConfig), + registry.DockerHeaders(dockerversion.DockerUserAgent(ctx), p.config.MetaHeaders)..., + ) + client := registry.HTTPClient(tr) + v1Endpoint, err := p.endpoint.ToV1Endpoint(dockerversion.DockerUserAgent(ctx), p.config.MetaHeaders) + if err != nil { + logrus.Debugf("Could not get v1 endpoint: %v", err) + return fallbackError{err: err} + } + p.session, err = registry.NewSession(client, p.config.AuthConfig, v1Endpoint) + if err != nil { + // TODO(dmcgowan): Check if should fallback + return fallbackError{err: err} + } + if err := p.pushRepository(ctx); err != nil { + // TODO(dmcgowan): Check if should fallback + return err + } + return nil +} + +// v1Image exposes the configuration, filesystem layer ID, and a v1 ID for an +// image being pushed to a v1 registry. +type v1Image interface { + Config() []byte + Layer() layer.Layer + V1ID() string +} + +type v1ImageCommon struct { + layer layer.Layer + config []byte + v1ID string +} + +func (common *v1ImageCommon) Config() []byte { + return common.config +} + +func (common *v1ImageCommon) V1ID() string { + return common.v1ID +} + +func (common *v1ImageCommon) Layer() layer.Layer { + return common.layer +} + +// v1TopImage defines a runnable (top layer) image being pushed to a v1 +// registry. +type v1TopImage struct { + v1ImageCommon + imageID image.ID +} + +func newV1TopImage(imageID image.ID, img *image.Image, l layer.Layer, parent *v1DependencyImage) (*v1TopImage, error) { + v1ID := imageID.Digest().Hex() + parentV1ID := "" + if parent != nil { + parentV1ID = parent.V1ID() + } + + config, err := v1.MakeV1ConfigFromConfig(img, v1ID, parentV1ID, false) + if err != nil { + return nil, err + } + + return &v1TopImage{ + v1ImageCommon: v1ImageCommon{ + v1ID: v1ID, + config: config, + layer: l, + }, + imageID: imageID, + }, nil +} + +// v1DependencyImage defines a dependency layer being pushed to a v1 registry. +type v1DependencyImage struct { + v1ImageCommon +} + +func newV1DependencyImage(l layer.Layer, parent *v1DependencyImage) *v1DependencyImage { + v1ID := digest.Digest(l.ChainID()).Hex() + + var config string + if parent != nil { + config = fmt.Sprintf(`{"id":"%s","parent":"%s"}`, v1ID, parent.V1ID()) + } else { + config = fmt.Sprintf(`{"id":"%s"}`, v1ID) + } + return &v1DependencyImage{ + v1ImageCommon: v1ImageCommon{ + v1ID: v1ID, + config: []byte(config), + layer: l, + }, + } +} + +// Retrieve the all the images to be uploaded in the correct order +func (p *v1Pusher) getImageList() (imageList []v1Image, tagsByImage map[image.ID][]string, referencedLayers []PushLayer, err error) { + tagsByImage = make(map[image.ID][]string) + + // Ignore digest references + if _, isCanonical := p.ref.(reference.Canonical); isCanonical { + return + } + + tagged, isTagged := p.ref.(reference.NamedTagged) + if isTagged { + // Push a specific tag + var imgID image.ID + var dgst digest.Digest + dgst, err = p.config.ReferenceStore.Get(p.ref) + if err != nil { + return + } + imgID = image.IDFromDigest(dgst) + + imageList, err = p.imageListForTag(imgID, nil, &referencedLayers) + if err != nil { + return + } + + tagsByImage[imgID] = []string{tagged.Tag()} + + return + } + + imagesSeen := make(map[digest.Digest]struct{}) + dependenciesSeen := make(map[layer.ChainID]*v1DependencyImage) + + associations := p.config.ReferenceStore.ReferencesByName(p.ref) + for _, association := range associations { + if tagged, isTagged = association.Ref.(reference.NamedTagged); !isTagged { + // Ignore digest references. + continue + } + + imgID := image.IDFromDigest(association.ID) + tagsByImage[imgID] = append(tagsByImage[imgID], tagged.Tag()) + + if _, present := imagesSeen[association.ID]; present { + // Skip generating image list for already-seen image + continue + } + imagesSeen[association.ID] = struct{}{} + + imageListForThisTag, err := p.imageListForTag(imgID, dependenciesSeen, &referencedLayers) + if err != nil { + return nil, nil, nil, err + } + + // append to main image list + imageList = append(imageList, imageListForThisTag...) + } + if len(imageList) == 0 { + return nil, nil, nil, fmt.Errorf("No images found for the requested repository / tag") + } + logrus.Debugf("Image list: %v", imageList) + logrus.Debugf("Tags by image: %v", tagsByImage) + + return +} + +func (p *v1Pusher) imageListForTag(imgID image.ID, dependenciesSeen map[layer.ChainID]*v1DependencyImage, referencedLayers *[]PushLayer) (imageListForThisTag []v1Image, err error) { + ics, ok := p.config.ImageStore.(*imageConfigStore) + if !ok { + return nil, fmt.Errorf("only image store images supported for v1 push") + } + img, err := ics.Store.Get(imgID) + if err != nil { + return nil, err + } + + topLayerID := img.RootFS.ChainID() + + pl, err := p.config.LayerStore.Get(topLayerID) + *referencedLayers = append(*referencedLayers, pl) + if err != nil { + return nil, fmt.Errorf("failed to get top layer from image: %v", err) + } + + // V1 push is deprecated, only support existing layerstore layers + lsl, ok := pl.(*storeLayer) + if !ok { + return nil, fmt.Errorf("only layer store layers supported for v1 push") + } + l := lsl.Layer + + dependencyImages, parent := generateDependencyImages(l.Parent(), dependenciesSeen) + + topImage, err := newV1TopImage(imgID, img, l, parent) + if err != nil { + return nil, err + } + + imageListForThisTag = append(dependencyImages, topImage) + + return +} + +func generateDependencyImages(l layer.Layer, dependenciesSeen map[layer.ChainID]*v1DependencyImage) (imageListForThisTag []v1Image, parent *v1DependencyImage) { + if l == nil { + return nil, nil + } + + imageListForThisTag, parent = generateDependencyImages(l.Parent(), dependenciesSeen) + + if dependenciesSeen != nil { + if dependencyImage, present := dependenciesSeen[l.ChainID()]; present { + // This layer is already on the list, we can ignore it + // and all its parents. + return imageListForThisTag, dependencyImage + } + } + + dependencyImage := newV1DependencyImage(l, parent) + imageListForThisTag = append(imageListForThisTag, dependencyImage) + + if dependenciesSeen != nil { + dependenciesSeen[l.ChainID()] = dependencyImage + } + + return imageListForThisTag, dependencyImage +} + +// createImageIndex returns an index of an image's layer IDs and tags. +func createImageIndex(images []v1Image, tags map[image.ID][]string) []*registry.ImgData { + var imageIndex []*registry.ImgData + for _, img := range images { + v1ID := img.V1ID() + + if topImage, isTopImage := img.(*v1TopImage); isTopImage { + if tags, hasTags := tags[topImage.imageID]; hasTags { + // If an image has tags you must add an entry in the image index + // for each tag + for _, tag := range tags { + imageIndex = append(imageIndex, ®istry.ImgData{ + ID: v1ID, + Tag: tag, + }) + } + continue + } + } + + // If the image does not have a tag it still needs to be sent to the + // registry with an empty tag so that it is associated with the repository + imageIndex = append(imageIndex, ®istry.ImgData{ + ID: v1ID, + Tag: "", + }) + } + return imageIndex +} + +// lookupImageOnEndpoint checks the specified endpoint to see if an image exists +// and if it is absent then it sends the image id to the channel to be pushed. +func (p *v1Pusher) lookupImageOnEndpoint(wg *sync.WaitGroup, endpoint string, images chan v1Image, imagesToPush chan string) { + defer wg.Done() + for image := range images { + v1ID := image.V1ID() + truncID := stringid.TruncateID(image.Layer().DiffID().String()) + if err := p.session.LookupRemoteImage(v1ID, endpoint); err != nil { + logrus.Errorf("Error in LookupRemoteImage: %s", err) + imagesToPush <- v1ID + progress.Update(p.config.ProgressOutput, truncID, "Waiting") + } else { + progress.Update(p.config.ProgressOutput, truncID, "Already exists") + } + } +} + +func (p *v1Pusher) pushImageToEndpoint(ctx context.Context, endpoint string, imageList []v1Image, tags map[image.ID][]string, repo *registry.RepositoryData) error { + workerCount := len(imageList) + // start a maximum of 5 workers to check if images exist on the specified endpoint. + if workerCount > 5 { + workerCount = 5 + } + var ( + wg = &sync.WaitGroup{} + imageData = make(chan v1Image, workerCount*2) + imagesToPush = make(chan string, workerCount*2) + pushes = make(chan map[string]struct{}, 1) + ) + for i := 0; i < workerCount; i++ { + wg.Add(1) + go p.lookupImageOnEndpoint(wg, endpoint, imageData, imagesToPush) + } + // start a go routine that consumes the images to push + go func() { + shouldPush := make(map[string]struct{}) + for id := range imagesToPush { + shouldPush[id] = struct{}{} + } + pushes <- shouldPush + }() + for _, v1Image := range imageList { + imageData <- v1Image + } + // close the channel to notify the workers that there will be no more images to check. + close(imageData) + wg.Wait() + close(imagesToPush) + // wait for all the images that require pushes to be collected into a consumable map. + shouldPush := <-pushes + // finish by pushing any images and tags to the endpoint. The order that the images are pushed + // is very important that is why we are still iterating over the ordered list of imageIDs. + for _, img := range imageList { + v1ID := img.V1ID() + if _, push := shouldPush[v1ID]; push { + if _, err := p.pushImage(ctx, img, endpoint); err != nil { + // FIXME: Continue on error? + return err + } + } + if topImage, isTopImage := img.(*v1TopImage); isTopImage { + for _, tag := range tags[topImage.imageID] { + progress.Messagef(p.config.ProgressOutput, "", "Pushing tag for rev [%s] on {%s}", stringid.TruncateID(v1ID), endpoint+"repositories/"+reference.Path(p.repoInfo.Name)+"/tags/"+tag) + if err := p.session.PushRegistryTag(p.repoInfo.Name, v1ID, tag, endpoint); err != nil { + return err + } + } + } + } + return nil +} + +// pushRepository pushes layers that do not already exist on the registry. +func (p *v1Pusher) pushRepository(ctx context.Context) error { + imgList, tags, referencedLayers, err := p.getImageList() + defer func() { + for _, l := range referencedLayers { + l.Release() + } + }() + if err != nil { + return err + } + + imageIndex := createImageIndex(imgList, tags) + for _, data := range imageIndex { + logrus.Debugf("Pushing ID: %s with Tag: %s", data.ID, data.Tag) + } + + // Register all the images in a repository with the registry + // If an image is not in this list it will not be associated with the repository + repoData, err := p.session.PushImageJSONIndex(p.repoInfo.Name, imageIndex, false, nil) + if err != nil { + return err + } + // push the repository to each of the endpoints only if it does not exist. + for _, endpoint := range repoData.Endpoints { + if err := p.pushImageToEndpoint(ctx, endpoint, imgList, tags, repoData); err != nil { + return err + } + } + _, err = p.session.PushImageJSONIndex(p.repoInfo.Name, imageIndex, true, repoData.Endpoints) + return err +} + +func (p *v1Pusher) pushImage(ctx context.Context, v1Image v1Image, ep string) (checksum string, err error) { + l := v1Image.Layer() + v1ID := v1Image.V1ID() + truncID := stringid.TruncateID(l.DiffID().String()) + + jsonRaw := v1Image.Config() + progress.Update(p.config.ProgressOutput, truncID, "Pushing") + + // General rule is to use ID for graph accesses and compatibilityID for + // calls to session.registry() + imgData := ®istry.ImgData{ + ID: v1ID, + } + + // Send the json + if err := p.session.PushImageJSONRegistry(imgData, jsonRaw, ep); err != nil { + if err == registry.ErrAlreadyExists { + progress.Update(p.config.ProgressOutput, truncID, "Image already pushed, skipping") + return "", nil + } + return "", err + } + + arch, err := l.TarStream() + if err != nil { + return "", err + } + defer arch.Close() + + // don't care if this fails; best effort + size, _ := l.DiffSize() + + // Send the layer + logrus.Debugf("rendered layer for %s of [%d] size", v1ID, size) + + reader := progress.NewProgressReader(ioutils.NewCancelReadCloser(ctx, arch), p.config.ProgressOutput, size, truncID, "Pushing") + defer reader.Close() + + checksum, checksumPayload, err := p.session.PushImageLayerRegistry(v1ID, reader, ep, jsonRaw) + if err != nil { + return "", err + } + imgData.Checksum = checksum + imgData.ChecksumPayload = checksumPayload + // Send the checksum + if err := p.session.PushImageChecksumRegistry(imgData, ep); err != nil { + return "", err + } + + if err := p.v1IDService.Set(v1ID, p.repoInfo.Index.Name, l.DiffID()); err != nil { + logrus.Warnf("Could not set v1 ID mapping: %v", err) + } + + progress.Update(p.config.ProgressOutput, truncID, "Image successfully pushed") + return imgData.Checksum, nil +} diff --git a/vendor/github.com/moby/moby/distribution/push_v2.go b/vendor/github.com/moby/moby/distribution/push_v2.go new file mode 100644 index 000000000..ffc7d6810 --- /dev/null +++ b/vendor/github.com/moby/moby/distribution/push_v2.go @@ -0,0 +1,691 @@ +package distribution + +import ( + "errors" + "fmt" + "io" + "runtime" + "sort" + "strings" + "sync" + + "golang.org/x/net/context" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution" + "github.com/docker/distribution/manifest/schema1" + "github.com/docker/distribution/manifest/schema2" + "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/client" + apitypes "github.com/docker/docker/api/types" + "github.com/docker/docker/distribution/metadata" + "github.com/docker/docker/distribution/xfer" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/progress" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/registry" + "github.com/opencontainers/go-digest" +) + +const ( + smallLayerMaximumSize = 100 * (1 << 10) // 100KB + middleLayerMaximumSize = 10 * (1 << 20) // 10MB +) + +type v2Pusher struct { + v2MetadataService metadata.V2MetadataService + ref reference.Named + endpoint registry.APIEndpoint + repoInfo *registry.RepositoryInfo + config *ImagePushConfig + repo distribution.Repository + + // pushState is state built by the Upload functions. + pushState pushState +} + +type pushState struct { + sync.Mutex + // remoteLayers is the set of layers known to exist on the remote side. + // This avoids redundant queries when pushing multiple tags that + // involve the same layers. It is also used to fill in digest and size + // information when building the manifest. + remoteLayers map[layer.DiffID]distribution.Descriptor + // confirmedV2 is set to true if we confirm we're talking to a v2 + // registry. This is used to limit fallbacks to the v1 protocol. + confirmedV2 bool +} + +func (p *v2Pusher) Push(ctx context.Context) (err error) { + p.pushState.remoteLayers = make(map[layer.DiffID]distribution.Descriptor) + + p.repo, p.pushState.confirmedV2, err = NewV2Repository(ctx, p.repoInfo, p.endpoint, p.config.MetaHeaders, p.config.AuthConfig, "push", "pull") + if err != nil { + logrus.Debugf("Error getting v2 registry: %v", err) + return err + } + + if err = p.pushV2Repository(ctx); err != nil { + if continueOnError(err) { + return fallbackError{ + err: err, + confirmedV2: p.pushState.confirmedV2, + transportOK: true, + } + } + } + return err +} + +func (p *v2Pusher) pushV2Repository(ctx context.Context) (err error) { + if namedTagged, isNamedTagged := p.ref.(reference.NamedTagged); isNamedTagged { + imageID, err := p.config.ReferenceStore.Get(p.ref) + if err != nil { + return fmt.Errorf("tag does not exist: %s", reference.FamiliarString(p.ref)) + } + + return p.pushV2Tag(ctx, namedTagged, imageID) + } + + if !reference.IsNameOnly(p.ref) { + return errors.New("cannot push a digest reference") + } + + // Pull all tags + pushed := 0 + for _, association := range p.config.ReferenceStore.ReferencesByName(p.ref) { + if namedTagged, isNamedTagged := association.Ref.(reference.NamedTagged); isNamedTagged { + pushed++ + if err := p.pushV2Tag(ctx, namedTagged, association.ID); err != nil { + return err + } + } + } + + if pushed == 0 { + return fmt.Errorf("no tags to push for %s", reference.FamiliarName(p.repoInfo.Name)) + } + + return nil +} + +func (p *v2Pusher) pushV2Tag(ctx context.Context, ref reference.NamedTagged, id digest.Digest) error { + logrus.Debugf("Pushing repository: %s", reference.FamiliarString(ref)) + + imgConfig, err := p.config.ImageStore.Get(id) + if err != nil { + return fmt.Errorf("could not find image from tag %s: %v", reference.FamiliarString(ref), err) + } + + rootfs, _, err := p.config.ImageStore.RootFSAndPlatformFromConfig(imgConfig) + if err != nil { + return fmt.Errorf("unable to get rootfs for image %s: %s", reference.FamiliarString(ref), err) + } + + l, err := p.config.LayerStore.Get(rootfs.ChainID()) + if err != nil { + return fmt.Errorf("failed to get top layer from image: %v", err) + } + defer l.Release() + + hmacKey, err := metadata.ComputeV2MetadataHMACKey(p.config.AuthConfig) + if err != nil { + return fmt.Errorf("failed to compute hmac key of auth config: %v", err) + } + + var descriptors []xfer.UploadDescriptor + + descriptorTemplate := v2PushDescriptor{ + v2MetadataService: p.v2MetadataService, + hmacKey: hmacKey, + repoInfo: p.repoInfo.Name, + ref: p.ref, + endpoint: p.endpoint, + repo: p.repo, + pushState: &p.pushState, + } + + // Loop bounds condition is to avoid pushing the base layer on Windows. + for range rootfs.DiffIDs { + descriptor := descriptorTemplate + descriptor.layer = l + descriptor.checkedDigests = make(map[digest.Digest]struct{}) + descriptors = append(descriptors, &descriptor) + + l = l.Parent() + } + + if err := p.config.UploadManager.Upload(ctx, descriptors, p.config.ProgressOutput); err != nil { + return err + } + + // Try schema2 first + builder := schema2.NewManifestBuilder(p.repo.Blobs(ctx), p.config.ConfigMediaType, imgConfig) + manifest, err := manifestFromBuilder(ctx, builder, descriptors) + if err != nil { + return err + } + + manSvc, err := p.repo.Manifests(ctx) + if err != nil { + return err + } + + putOptions := []distribution.ManifestServiceOption{distribution.WithTag(ref.Tag())} + if _, err = manSvc.Put(ctx, manifest, putOptions...); err != nil { + if runtime.GOOS == "windows" || p.config.TrustKey == nil || p.config.RequireSchema2 { + logrus.Warnf("failed to upload schema2 manifest: %v", err) + return err + } + + logrus.Warnf("failed to upload schema2 manifest: %v - falling back to schema1", err) + + manifestRef, err := reference.WithTag(p.repo.Named(), ref.Tag()) + if err != nil { + return err + } + builder = schema1.NewConfigManifestBuilder(p.repo.Blobs(ctx), p.config.TrustKey, manifestRef, imgConfig) + manifest, err = manifestFromBuilder(ctx, builder, descriptors) + if err != nil { + return err + } + + if _, err = manSvc.Put(ctx, manifest, putOptions...); err != nil { + return err + } + } + + var canonicalManifest []byte + + switch v := manifest.(type) { + case *schema1.SignedManifest: + canonicalManifest = v.Canonical + case *schema2.DeserializedManifest: + _, canonicalManifest, err = v.Payload() + if err != nil { + return err + } + } + + manifestDigest := digest.FromBytes(canonicalManifest) + progress.Messagef(p.config.ProgressOutput, "", "%s: digest: %s size: %d", ref.Tag(), manifestDigest, len(canonicalManifest)) + + if err := addDigestReference(p.config.ReferenceStore, ref, manifestDigest, id); err != nil { + return err + } + + // Signal digest to the trust client so it can sign the + // push, if appropriate. + progress.Aux(p.config.ProgressOutput, apitypes.PushResult{Tag: ref.Tag(), Digest: manifestDigest.String(), Size: len(canonicalManifest)}) + + return nil +} + +func manifestFromBuilder(ctx context.Context, builder distribution.ManifestBuilder, descriptors []xfer.UploadDescriptor) (distribution.Manifest, error) { + // descriptors is in reverse order; iterate backwards to get references + // appended in the right order. + for i := len(descriptors) - 1; i >= 0; i-- { + if err := builder.AppendReference(descriptors[i].(*v2PushDescriptor)); err != nil { + return nil, err + } + } + + return builder.Build(ctx) +} + +type v2PushDescriptor struct { + layer PushLayer + v2MetadataService metadata.V2MetadataService + hmacKey []byte + repoInfo reference.Named + ref reference.Named + endpoint registry.APIEndpoint + repo distribution.Repository + pushState *pushState + remoteDescriptor distribution.Descriptor + // a set of digests whose presence has been checked in a target repository + checkedDigests map[digest.Digest]struct{} +} + +func (pd *v2PushDescriptor) Key() string { + return "v2push:" + pd.ref.Name() + " " + pd.layer.DiffID().String() +} + +func (pd *v2PushDescriptor) ID() string { + return stringid.TruncateID(pd.layer.DiffID().String()) +} + +func (pd *v2PushDescriptor) DiffID() layer.DiffID { + return pd.layer.DiffID() +} + +func (pd *v2PushDescriptor) Upload(ctx context.Context, progressOutput progress.Output) (distribution.Descriptor, error) { + // Skip foreign layers unless this registry allows nondistributable artifacts. + if !pd.endpoint.AllowNondistributableArtifacts { + if fs, ok := pd.layer.(distribution.Describable); ok { + if d := fs.Descriptor(); len(d.URLs) > 0 { + progress.Update(progressOutput, pd.ID(), "Skipped foreign layer") + return d, nil + } + } + } + + diffID := pd.DiffID() + + pd.pushState.Lock() + if descriptor, ok := pd.pushState.remoteLayers[diffID]; ok { + // it is already known that the push is not needed and + // therefore doing a stat is unnecessary + pd.pushState.Unlock() + progress.Update(progressOutput, pd.ID(), "Layer already exists") + return descriptor, nil + } + pd.pushState.Unlock() + + maxMountAttempts, maxExistenceChecks, checkOtherRepositories := getMaxMountAndExistenceCheckAttempts(pd.layer) + + // Do we have any metadata associated with this layer's DiffID? + v2Metadata, err := pd.v2MetadataService.GetMetadata(diffID) + if err == nil { + // check for blob existence in the target repository + descriptor, exists, err := pd.layerAlreadyExists(ctx, progressOutput, diffID, true, 1, v2Metadata) + if exists || err != nil { + return descriptor, err + } + } + + // if digest was empty or not saved, or if blob does not exist on the remote repository, + // then push the blob. + bs := pd.repo.Blobs(ctx) + + var layerUpload distribution.BlobWriter + + // Attempt to find another repository in the same registry to mount the layer from to avoid an unnecessary upload + candidates := getRepositoryMountCandidates(pd.repoInfo, pd.hmacKey, maxMountAttempts, v2Metadata) + for _, mountCandidate := range candidates { + logrus.Debugf("attempting to mount layer %s (%s) from %s", diffID, mountCandidate.Digest, mountCandidate.SourceRepository) + createOpts := []distribution.BlobCreateOption{} + + if len(mountCandidate.SourceRepository) > 0 { + namedRef, err := reference.ParseNormalizedNamed(mountCandidate.SourceRepository) + if err != nil { + logrus.Errorf("failed to parse source repository reference %v: %v", reference.FamiliarString(namedRef), err) + pd.v2MetadataService.Remove(mountCandidate) + continue + } + + // Candidates are always under same domain, create remote reference + // with only path to set mount from with + remoteRef, err := reference.WithName(reference.Path(namedRef)) + if err != nil { + logrus.Errorf("failed to make remote reference out of %q: %v", reference.Path(namedRef), err) + continue + } + + canonicalRef, err := reference.WithDigest(reference.TrimNamed(remoteRef), mountCandidate.Digest) + if err != nil { + logrus.Errorf("failed to make canonical reference: %v", err) + continue + } + + createOpts = append(createOpts, client.WithMountFrom(canonicalRef)) + } + + // send the layer + lu, err := bs.Create(ctx, createOpts...) + switch err := err.(type) { + case nil: + // noop + case distribution.ErrBlobMounted: + progress.Updatef(progressOutput, pd.ID(), "Mounted from %s", err.From.Name()) + + err.Descriptor.MediaType = schema2.MediaTypeLayer + + pd.pushState.Lock() + pd.pushState.confirmedV2 = true + pd.pushState.remoteLayers[diffID] = err.Descriptor + pd.pushState.Unlock() + + // Cache mapping from this layer's DiffID to the blobsum + if err := pd.v2MetadataService.TagAndAdd(diffID, pd.hmacKey, metadata.V2Metadata{ + Digest: err.Descriptor.Digest, + SourceRepository: pd.repoInfo.Name(), + }); err != nil { + return distribution.Descriptor{}, xfer.DoNotRetry{Err: err} + } + return err.Descriptor, nil + default: + logrus.Infof("failed to mount layer %s (%s) from %s: %v", diffID, mountCandidate.Digest, mountCandidate.SourceRepository, err) + } + + if len(mountCandidate.SourceRepository) > 0 && + (metadata.CheckV2MetadataHMAC(&mountCandidate, pd.hmacKey) || + len(mountCandidate.HMAC) == 0) { + cause := "blob mount failure" + if err != nil { + cause = fmt.Sprintf("an error: %v", err.Error()) + } + logrus.Debugf("removing association between layer %s and %s due to %s", mountCandidate.Digest, mountCandidate.SourceRepository, cause) + pd.v2MetadataService.Remove(mountCandidate) + } + + if lu != nil { + // cancel previous upload + cancelLayerUpload(ctx, mountCandidate.Digest, layerUpload) + layerUpload = lu + } + } + + if maxExistenceChecks-len(pd.checkedDigests) > 0 { + // do additional layer existence checks with other known digests if any + descriptor, exists, err := pd.layerAlreadyExists(ctx, progressOutput, diffID, checkOtherRepositories, maxExistenceChecks-len(pd.checkedDigests), v2Metadata) + if exists || err != nil { + return descriptor, err + } + } + + logrus.Debugf("Pushing layer: %s", diffID) + if layerUpload == nil { + layerUpload, err = bs.Create(ctx) + if err != nil { + return distribution.Descriptor{}, retryOnError(err) + } + } + defer layerUpload.Close() + + // upload the blob + desc, err := pd.uploadUsingSession(ctx, progressOutput, diffID, layerUpload) + if err != nil { + return desc, err + } + + return desc, nil +} + +func (pd *v2PushDescriptor) SetRemoteDescriptor(descriptor distribution.Descriptor) { + pd.remoteDescriptor = descriptor +} + +func (pd *v2PushDescriptor) Descriptor() distribution.Descriptor { + return pd.remoteDescriptor +} + +func (pd *v2PushDescriptor) uploadUsingSession( + ctx context.Context, + progressOutput progress.Output, + diffID layer.DiffID, + layerUpload distribution.BlobWriter, +) (distribution.Descriptor, error) { + var reader io.ReadCloser + + contentReader, err := pd.layer.Open() + if err != nil { + return distribution.Descriptor{}, retryOnError(err) + } + + size, _ := pd.layer.Size() + + reader = progress.NewProgressReader(ioutils.NewCancelReadCloser(ctx, contentReader), progressOutput, size, pd.ID(), "Pushing") + + switch m := pd.layer.MediaType(); m { + case schema2.MediaTypeUncompressedLayer: + compressedReader, compressionDone := compress(reader) + defer func(closer io.Closer) { + closer.Close() + <-compressionDone + }(reader) + reader = compressedReader + case schema2.MediaTypeLayer: + default: + reader.Close() + return distribution.Descriptor{}, fmt.Errorf("unsupported layer media type %s", m) + } + + digester := digest.Canonical.Digester() + tee := io.TeeReader(reader, digester.Hash()) + + nn, err := layerUpload.ReadFrom(tee) + reader.Close() + if err != nil { + return distribution.Descriptor{}, retryOnError(err) + } + + pushDigest := digester.Digest() + if _, err := layerUpload.Commit(ctx, distribution.Descriptor{Digest: pushDigest}); err != nil { + return distribution.Descriptor{}, retryOnError(err) + } + + logrus.Debugf("uploaded layer %s (%s), %d bytes", diffID, pushDigest, nn) + progress.Update(progressOutput, pd.ID(), "Pushed") + + // Cache mapping from this layer's DiffID to the blobsum + if err := pd.v2MetadataService.TagAndAdd(diffID, pd.hmacKey, metadata.V2Metadata{ + Digest: pushDigest, + SourceRepository: pd.repoInfo.Name(), + }); err != nil { + return distribution.Descriptor{}, xfer.DoNotRetry{Err: err} + } + + desc := distribution.Descriptor{ + Digest: pushDigest, + MediaType: schema2.MediaTypeLayer, + Size: nn, + } + + pd.pushState.Lock() + // If Commit succeeded, that's an indication that the remote registry speaks the v2 protocol. + pd.pushState.confirmedV2 = true + pd.pushState.remoteLayers[diffID] = desc + pd.pushState.Unlock() + + return desc, nil +} + +// layerAlreadyExists checks if the registry already knows about any of the metadata passed in the "metadata" +// slice. If it finds one that the registry knows about, it returns the known digest and "true". If +// "checkOtherRepositories" is true, stat will be performed also with digests mapped to any other repository +// (not just the target one). +func (pd *v2PushDescriptor) layerAlreadyExists( + ctx context.Context, + progressOutput progress.Output, + diffID layer.DiffID, + checkOtherRepositories bool, + maxExistenceCheckAttempts int, + v2Metadata []metadata.V2Metadata, +) (desc distribution.Descriptor, exists bool, err error) { + // filter the metadata + candidates := []metadata.V2Metadata{} + for _, meta := range v2Metadata { + if len(meta.SourceRepository) > 0 && !checkOtherRepositories && meta.SourceRepository != pd.repoInfo.Name() { + continue + } + candidates = append(candidates, meta) + } + // sort the candidates by similarity + sortV2MetadataByLikenessAndAge(pd.repoInfo, pd.hmacKey, candidates) + + digestToMetadata := make(map[digest.Digest]*metadata.V2Metadata) + // an array of unique blob digests ordered from the best mount candidates to worst + layerDigests := []digest.Digest{} + for i := 0; i < len(candidates); i++ { + if len(layerDigests) >= maxExistenceCheckAttempts { + break + } + meta := &candidates[i] + if _, exists := digestToMetadata[meta.Digest]; exists { + // keep reference just to the first mapping (the best mount candidate) + continue + } + if _, exists := pd.checkedDigests[meta.Digest]; exists { + // existence of this digest has already been tested + continue + } + digestToMetadata[meta.Digest] = meta + layerDigests = append(layerDigests, meta.Digest) + } + +attempts: + for _, dgst := range layerDigests { + meta := digestToMetadata[dgst] + logrus.Debugf("Checking for presence of layer %s (%s) in %s", diffID, dgst, pd.repoInfo.Name()) + desc, err = pd.repo.Blobs(ctx).Stat(ctx, dgst) + pd.checkedDigests[meta.Digest] = struct{}{} + switch err { + case nil: + if m, ok := digestToMetadata[desc.Digest]; !ok || m.SourceRepository != pd.repoInfo.Name() || !metadata.CheckV2MetadataHMAC(m, pd.hmacKey) { + // cache mapping from this layer's DiffID to the blobsum + if err := pd.v2MetadataService.TagAndAdd(diffID, pd.hmacKey, metadata.V2Metadata{ + Digest: desc.Digest, + SourceRepository: pd.repoInfo.Name(), + }); err != nil { + return distribution.Descriptor{}, false, xfer.DoNotRetry{Err: err} + } + } + desc.MediaType = schema2.MediaTypeLayer + exists = true + break attempts + case distribution.ErrBlobUnknown: + if meta.SourceRepository == pd.repoInfo.Name() { + // remove the mapping to the target repository + pd.v2MetadataService.Remove(*meta) + } + default: + logrus.WithError(err).Debugf("Failed to check for presence of layer %s (%s) in %s", diffID, dgst, pd.repoInfo.Name()) + } + } + + if exists { + progress.Update(progressOutput, pd.ID(), "Layer already exists") + pd.pushState.Lock() + pd.pushState.remoteLayers[diffID] = desc + pd.pushState.Unlock() + } + + return desc, exists, nil +} + +// getMaxMountAndExistenceCheckAttempts returns a maximum number of cross repository mount attempts from +// source repositories of target registry, maximum number of layer existence checks performed on the target +// repository and whether the check shall be done also with digests mapped to different repositories. The +// decision is based on layer size. The smaller the layer, the fewer attempts shall be made because the cost +// of upload does not outweigh a latency. +func getMaxMountAndExistenceCheckAttempts(layer PushLayer) (maxMountAttempts, maxExistenceCheckAttempts int, checkOtherRepositories bool) { + size, err := layer.Size() + switch { + // big blob + case size > middleLayerMaximumSize: + // 1st attempt to mount the blob few times + // 2nd few existence checks with digests associated to any repository + // then fallback to upload + return 4, 3, true + + // middle sized blobs; if we could not get the size, assume we deal with middle sized blob + case size > smallLayerMaximumSize, err != nil: + // 1st attempt to mount blobs of average size few times + // 2nd try at most 1 existence check if there's an existing mapping to the target repository + // then fallback to upload + return 3, 1, false + + // small blobs, do a minimum number of checks + default: + return 1, 1, false + } +} + +// getRepositoryMountCandidates returns an array of v2 metadata items belonging to the given registry. The +// array is sorted from youngest to oldest. If requireRegistryMatch is true, the resulting array will contain +// only metadata entries having registry part of SourceRepository matching the part of repoInfo. +func getRepositoryMountCandidates( + repoInfo reference.Named, + hmacKey []byte, + max int, + v2Metadata []metadata.V2Metadata, +) []metadata.V2Metadata { + candidates := []metadata.V2Metadata{} + for _, meta := range v2Metadata { + sourceRepo, err := reference.ParseNamed(meta.SourceRepository) + if err != nil || reference.Domain(repoInfo) != reference.Domain(sourceRepo) { + continue + } + // target repository is not a viable candidate + if meta.SourceRepository == repoInfo.Name() { + continue + } + candidates = append(candidates, meta) + } + + sortV2MetadataByLikenessAndAge(repoInfo, hmacKey, candidates) + if max >= 0 && len(candidates) > max { + // select the youngest metadata + candidates = candidates[:max] + } + + return candidates +} + +// byLikeness is a sorting container for v2 metadata candidates for cross repository mount. The +// candidate "a" is preferred over "b": +// +// 1. if it was hashed using the same AuthConfig as the one used to authenticate to target repository and the +// "b" was not +// 2. if a number of its repository path components exactly matching path components of target repository is higher +type byLikeness struct { + arr []metadata.V2Metadata + hmacKey []byte + pathComponents []string +} + +func (bla byLikeness) Less(i, j int) bool { + aMacMatch := metadata.CheckV2MetadataHMAC(&bla.arr[i], bla.hmacKey) + bMacMatch := metadata.CheckV2MetadataHMAC(&bla.arr[j], bla.hmacKey) + if aMacMatch != bMacMatch { + return aMacMatch + } + aMatch := numOfMatchingPathComponents(bla.arr[i].SourceRepository, bla.pathComponents) + bMatch := numOfMatchingPathComponents(bla.arr[j].SourceRepository, bla.pathComponents) + return aMatch > bMatch +} +func (bla byLikeness) Swap(i, j int) { + bla.arr[i], bla.arr[j] = bla.arr[j], bla.arr[i] +} +func (bla byLikeness) Len() int { return len(bla.arr) } + +func sortV2MetadataByLikenessAndAge(repoInfo reference.Named, hmacKey []byte, marr []metadata.V2Metadata) { + // reverse the metadata array to shift the newest entries to the beginning + for i := 0; i < len(marr)/2; i++ { + marr[i], marr[len(marr)-i-1] = marr[len(marr)-i-1], marr[i] + } + // keep equal entries ordered from the youngest to the oldest + sort.Stable(byLikeness{ + arr: marr, + hmacKey: hmacKey, + pathComponents: getPathComponents(repoInfo.Name()), + }) +} + +// numOfMatchingPathComponents returns a number of path components in "pth" that exactly match "matchComponents". +func numOfMatchingPathComponents(pth string, matchComponents []string) int { + pthComponents := getPathComponents(pth) + i := 0 + for ; i < len(pthComponents) && i < len(matchComponents); i++ { + if matchComponents[i] != pthComponents[i] { + return i + } + } + return i +} + +func getPathComponents(path string) []string { + return strings.Split(path, "/") +} + +func cancelLayerUpload(ctx context.Context, dgst digest.Digest, layerUpload distribution.BlobWriter) { + if layerUpload != nil { + logrus.Debugf("cancelling upload of blob %s", dgst) + err := layerUpload.Cancel(ctx) + if err != nil { + logrus.Warnf("failed to cancel upload: %v", err) + } + } +} diff --git a/vendor/github.com/moby/moby/distribution/push_v2_test.go b/vendor/github.com/moby/moby/distribution/push_v2_test.go new file mode 100644 index 000000000..99f5acb0c --- /dev/null +++ b/vendor/github.com/moby/moby/distribution/push_v2_test.go @@ -0,0 +1,583 @@ +package distribution + +import ( + "net/http" + "reflect" + "testing" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/manifest/schema2" + "github.com/docker/distribution/reference" + "github.com/docker/docker/distribution/metadata" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/progress" + "github.com/opencontainers/go-digest" +) + +func TestGetRepositoryMountCandidates(t *testing.T) { + for _, tc := range []struct { + name string + hmacKey string + targetRepo string + maxCandidates int + metadata []metadata.V2Metadata + candidates []metadata.V2Metadata + }{ + { + name: "empty metadata", + targetRepo: "busybox", + maxCandidates: -1, + metadata: []metadata.V2Metadata{}, + candidates: []metadata.V2Metadata{}, + }, + { + name: "one item not matching", + targetRepo: "busybox", + maxCandidates: -1, + metadata: []metadata.V2Metadata{taggedMetadata("key", "dgst", "127.0.0.1/repo")}, + candidates: []metadata.V2Metadata{}, + }, + { + name: "one item matching", + targetRepo: "busybox", + maxCandidates: -1, + metadata: []metadata.V2Metadata{taggedMetadata("hash", "1", "docker.io/library/hello-world")}, + candidates: []metadata.V2Metadata{taggedMetadata("hash", "1", "docker.io/library/hello-world")}, + }, + { + name: "allow missing SourceRepository", + targetRepo: "busybox", + maxCandidates: -1, + metadata: []metadata.V2Metadata{ + {Digest: digest.Digest("1")}, + {Digest: digest.Digest("3")}, + {Digest: digest.Digest("2")}, + }, + candidates: []metadata.V2Metadata{}, + }, + { + name: "handle docker.io", + targetRepo: "user/app", + maxCandidates: -1, + metadata: []metadata.V2Metadata{ + {Digest: digest.Digest("1"), SourceRepository: "docker.io/user/foo"}, + {Digest: digest.Digest("3"), SourceRepository: "docker.io/user/bar"}, + {Digest: digest.Digest("2"), SourceRepository: "docker.io/library/app"}, + }, + candidates: []metadata.V2Metadata{ + {Digest: digest.Digest("3"), SourceRepository: "docker.io/user/bar"}, + {Digest: digest.Digest("1"), SourceRepository: "docker.io/user/foo"}, + {Digest: digest.Digest("2"), SourceRepository: "docker.io/library/app"}, + }, + }, + { + name: "sort more items", + hmacKey: "abcd", + targetRepo: "127.0.0.1/foo/bar", + maxCandidates: -1, + metadata: []metadata.V2Metadata{ + taggedMetadata("hash", "1", "docker.io/library/hello-world"), + taggedMetadata("efgh", "2", "127.0.0.1/hello-world"), + taggedMetadata("abcd", "3", "docker.io/library/busybox"), + taggedMetadata("hash", "4", "docker.io/library/busybox"), + taggedMetadata("hash", "5", "127.0.0.1/foo"), + taggedMetadata("hash", "6", "127.0.0.1/bar"), + taggedMetadata("efgh", "7", "127.0.0.1/foo/bar"), + taggedMetadata("abcd", "8", "127.0.0.1/xyz"), + taggedMetadata("hash", "9", "127.0.0.1/foo/app"), + }, + candidates: []metadata.V2Metadata{ + // first by matching hash + taggedMetadata("abcd", "8", "127.0.0.1/xyz"), + // then by longest matching prefix + taggedMetadata("hash", "9", "127.0.0.1/foo/app"), + taggedMetadata("hash", "5", "127.0.0.1/foo"), + // sort the rest of the matching items in reversed order + taggedMetadata("hash", "6", "127.0.0.1/bar"), + taggedMetadata("efgh", "2", "127.0.0.1/hello-world"), + }, + }, + { + name: "limit max candidates", + hmacKey: "abcd", + targetRepo: "user/app", + maxCandidates: 3, + metadata: []metadata.V2Metadata{ + taggedMetadata("abcd", "1", "docker.io/user/app1"), + taggedMetadata("abcd", "2", "docker.io/user/app/base"), + taggedMetadata("hash", "3", "docker.io/user/app"), + taggedMetadata("abcd", "4", "127.0.0.1/user/app"), + taggedMetadata("hash", "5", "docker.io/user/foo"), + taggedMetadata("hash", "6", "docker.io/app/bar"), + }, + candidates: []metadata.V2Metadata{ + // first by matching hash + taggedMetadata("abcd", "2", "docker.io/user/app/base"), + taggedMetadata("abcd", "1", "docker.io/user/app1"), + // then by longest matching prefix + // "docker.io/usr/app" is excluded since candidates must + // be from a different repository + taggedMetadata("hash", "5", "docker.io/user/foo"), + }, + }, + } { + repoInfo, err := reference.ParseNormalizedNamed(tc.targetRepo) + if err != nil { + t.Fatalf("[%s] failed to parse reference name: %v", tc.name, err) + } + candidates := getRepositoryMountCandidates(repoInfo, []byte(tc.hmacKey), tc.maxCandidates, tc.metadata) + if len(candidates) != len(tc.candidates) { + t.Errorf("[%s] got unexpected number of candidates: %d != %d", tc.name, len(candidates), len(tc.candidates)) + } + for i := 0; i < len(candidates) && i < len(tc.candidates); i++ { + if !reflect.DeepEqual(candidates[i], tc.candidates[i]) { + t.Errorf("[%s] candidate %d does not match expected: %#+v != %#+v", tc.name, i, candidates[i], tc.candidates[i]) + } + } + for i := len(candidates); i < len(tc.candidates); i++ { + t.Errorf("[%s] missing expected candidate at position %d (%#+v)", tc.name, i, tc.candidates[i]) + } + for i := len(tc.candidates); i < len(candidates); i++ { + t.Errorf("[%s] got unexpected candidate at position %d (%#+v)", tc.name, i, candidates[i]) + } + } +} + +func TestLayerAlreadyExists(t *testing.T) { + for _, tc := range []struct { + name string + metadata []metadata.V2Metadata + targetRepo string + hmacKey string + maxExistenceChecks int + checkOtherRepositories bool + remoteBlobs map[digest.Digest]distribution.Descriptor + remoteErrors map[digest.Digest]error + expectedDescriptor distribution.Descriptor + expectedExists bool + expectedError error + expectedRequests []string + expectedAdditions []metadata.V2Metadata + expectedRemovals []metadata.V2Metadata + }{ + { + name: "empty metadata", + targetRepo: "busybox", + maxExistenceChecks: 3, + checkOtherRepositories: true, + }, + { + name: "single not existent metadata", + targetRepo: "busybox", + metadata: []metadata.V2Metadata{{Digest: digest.Digest("pear"), SourceRepository: "docker.io/library/busybox"}}, + maxExistenceChecks: 3, + expectedRequests: []string{"pear"}, + expectedRemovals: []metadata.V2Metadata{{Digest: digest.Digest("pear"), SourceRepository: "docker.io/library/busybox"}}, + }, + { + name: "access denied", + targetRepo: "busybox", + maxExistenceChecks: 1, + metadata: []metadata.V2Metadata{{Digest: digest.Digest("apple"), SourceRepository: "docker.io/library/busybox"}}, + remoteErrors: map[digest.Digest]error{digest.Digest("apple"): distribution.ErrAccessDenied}, + expectedError: nil, + expectedRequests: []string{"apple"}, + }, + { + name: "not matching repositories", + targetRepo: "busybox", + maxExistenceChecks: 3, + metadata: []metadata.V2Metadata{ + {Digest: digest.Digest("apple"), SourceRepository: "docker.io/library/hello-world"}, + {Digest: digest.Digest("orange"), SourceRepository: "docker.io/library/busybox/subapp"}, + {Digest: digest.Digest("pear"), SourceRepository: "docker.io/busybox"}, + {Digest: digest.Digest("plum"), SourceRepository: "busybox"}, + {Digest: digest.Digest("banana"), SourceRepository: "127.0.0.1/busybox"}, + }, + }, + { + name: "check other repositories", + targetRepo: "busybox", + maxExistenceChecks: 10, + checkOtherRepositories: true, + metadata: []metadata.V2Metadata{ + {Digest: digest.Digest("apple"), SourceRepository: "docker.io/library/hello-world"}, + {Digest: digest.Digest("orange"), SourceRepository: "docker.io/busybox/subapp"}, + {Digest: digest.Digest("pear"), SourceRepository: "docker.io/busybox"}, + {Digest: digest.Digest("plum"), SourceRepository: "docker.io/library/busybox"}, + {Digest: digest.Digest("banana"), SourceRepository: "127.0.0.1/busybox"}, + }, + expectedRequests: []string{"plum", "apple", "pear", "orange", "banana"}, + expectedRemovals: []metadata.V2Metadata{ + {Digest: digest.Digest("plum"), SourceRepository: "docker.io/library/busybox"}, + }, + }, + { + name: "find existing blob", + targetRepo: "busybox", + metadata: []metadata.V2Metadata{{Digest: digest.Digest("apple"), SourceRepository: "docker.io/library/busybox"}}, + maxExistenceChecks: 3, + remoteBlobs: map[digest.Digest]distribution.Descriptor{digest.Digest("apple"): {Digest: digest.Digest("apple")}}, + expectedDescriptor: distribution.Descriptor{Digest: digest.Digest("apple"), MediaType: schema2.MediaTypeLayer}, + expectedExists: true, + expectedRequests: []string{"apple"}, + }, + { + name: "find existing blob with different hmac", + targetRepo: "busybox", + metadata: []metadata.V2Metadata{{SourceRepository: "docker.io/library/busybox", Digest: digest.Digest("apple"), HMAC: "dummyhmac"}}, + maxExistenceChecks: 3, + remoteBlobs: map[digest.Digest]distribution.Descriptor{digest.Digest("apple"): {Digest: digest.Digest("apple")}}, + expectedDescriptor: distribution.Descriptor{Digest: digest.Digest("apple"), MediaType: schema2.MediaTypeLayer}, + expectedExists: true, + expectedRequests: []string{"apple"}, + expectedAdditions: []metadata.V2Metadata{{Digest: digest.Digest("apple"), SourceRepository: "docker.io/library/busybox"}}, + }, + { + name: "overwrite media types", + targetRepo: "busybox", + metadata: []metadata.V2Metadata{{Digest: digest.Digest("apple"), SourceRepository: "docker.io/library/busybox"}}, + hmacKey: "key", + maxExistenceChecks: 3, + remoteBlobs: map[digest.Digest]distribution.Descriptor{digest.Digest("apple"): {Digest: digest.Digest("apple"), MediaType: "custom-media-type"}}, + expectedDescriptor: distribution.Descriptor{Digest: digest.Digest("apple"), MediaType: schema2.MediaTypeLayer}, + expectedExists: true, + expectedRequests: []string{"apple"}, + expectedAdditions: []metadata.V2Metadata{taggedMetadata("key", "apple", "docker.io/library/busybox")}, + }, + { + name: "find existing blob among many", + targetRepo: "127.0.0.1/myapp", + hmacKey: "key", + metadata: []metadata.V2Metadata{ + taggedMetadata("someotherkey", "pear", "127.0.0.1/myapp"), + taggedMetadata("key", "apple", "127.0.0.1/myapp"), + taggedMetadata("", "plum", "127.0.0.1/myapp"), + }, + maxExistenceChecks: 3, + remoteBlobs: map[digest.Digest]distribution.Descriptor{digest.Digest("pear"): {Digest: digest.Digest("pear")}}, + expectedDescriptor: distribution.Descriptor{Digest: digest.Digest("pear"), MediaType: schema2.MediaTypeLayer}, + expectedExists: true, + expectedRequests: []string{"apple", "plum", "pear"}, + expectedAdditions: []metadata.V2Metadata{taggedMetadata("key", "pear", "127.0.0.1/myapp")}, + expectedRemovals: []metadata.V2Metadata{ + taggedMetadata("key", "apple", "127.0.0.1/myapp"), + {Digest: digest.Digest("plum"), SourceRepository: "127.0.0.1/myapp"}, + }, + }, + { + name: "reach maximum existence checks", + targetRepo: "user/app", + metadata: []metadata.V2Metadata{ + {Digest: digest.Digest("pear"), SourceRepository: "docker.io/user/app"}, + {Digest: digest.Digest("apple"), SourceRepository: "docker.io/user/app"}, + {Digest: digest.Digest("plum"), SourceRepository: "docker.io/user/app"}, + {Digest: digest.Digest("banana"), SourceRepository: "docker.io/user/app"}, + }, + maxExistenceChecks: 3, + remoteBlobs: map[digest.Digest]distribution.Descriptor{digest.Digest("pear"): {Digest: digest.Digest("pear")}}, + expectedExists: false, + expectedRequests: []string{"banana", "plum", "apple"}, + expectedRemovals: []metadata.V2Metadata{ + {Digest: digest.Digest("banana"), SourceRepository: "docker.io/user/app"}, + {Digest: digest.Digest("plum"), SourceRepository: "docker.io/user/app"}, + {Digest: digest.Digest("apple"), SourceRepository: "docker.io/user/app"}, + }, + }, + { + name: "zero allowed existence checks", + targetRepo: "user/app", + metadata: []metadata.V2Metadata{ + {Digest: digest.Digest("pear"), SourceRepository: "docker.io/user/app"}, + {Digest: digest.Digest("apple"), SourceRepository: "docker.io/user/app"}, + {Digest: digest.Digest("plum"), SourceRepository: "docker.io/user/app"}, + {Digest: digest.Digest("banana"), SourceRepository: "docker.io/user/app"}, + }, + maxExistenceChecks: 0, + remoteBlobs: map[digest.Digest]distribution.Descriptor{digest.Digest("pear"): {Digest: digest.Digest("pear")}}, + }, + { + name: "stat single digest just once", + targetRepo: "busybox", + metadata: []metadata.V2Metadata{ + taggedMetadata("key1", "pear", "docker.io/library/busybox"), + taggedMetadata("key2", "apple", "docker.io/library/busybox"), + taggedMetadata("key3", "apple", "docker.io/library/busybox"), + }, + maxExistenceChecks: 3, + remoteBlobs: map[digest.Digest]distribution.Descriptor{digest.Digest("pear"): {Digest: digest.Digest("pear")}}, + expectedDescriptor: distribution.Descriptor{Digest: digest.Digest("pear"), MediaType: schema2.MediaTypeLayer}, + expectedExists: true, + expectedRequests: []string{"apple", "pear"}, + expectedAdditions: []metadata.V2Metadata{{Digest: digest.Digest("pear"), SourceRepository: "docker.io/library/busybox"}}, + expectedRemovals: []metadata.V2Metadata{taggedMetadata("key3", "apple", "docker.io/library/busybox")}, + }, + { + name: "don't stop on first error", + targetRepo: "user/app", + hmacKey: "key", + metadata: []metadata.V2Metadata{ + taggedMetadata("key", "banana", "docker.io/user/app"), + taggedMetadata("key", "orange", "docker.io/user/app"), + taggedMetadata("key", "plum", "docker.io/user/app"), + }, + maxExistenceChecks: 3, + remoteErrors: map[digest.Digest]error{"orange": distribution.ErrAccessDenied}, + remoteBlobs: map[digest.Digest]distribution.Descriptor{digest.Digest("apple"): {}}, + expectedError: nil, + expectedRequests: []string{"plum", "orange", "banana"}, + expectedRemovals: []metadata.V2Metadata{ + taggedMetadata("key", "plum", "docker.io/user/app"), + taggedMetadata("key", "banana", "docker.io/user/app"), + }, + }, + { + name: "remove outdated metadata", + targetRepo: "docker.io/user/app", + metadata: []metadata.V2Metadata{ + {Digest: digest.Digest("plum"), SourceRepository: "docker.io/library/busybox"}, + {Digest: digest.Digest("orange"), SourceRepository: "docker.io/user/app"}, + }, + maxExistenceChecks: 3, + remoteErrors: map[digest.Digest]error{"orange": distribution.ErrBlobUnknown}, + remoteBlobs: map[digest.Digest]distribution.Descriptor{digest.Digest("plum"): {}}, + expectedExists: false, + expectedRequests: []string{"orange"}, + expectedRemovals: []metadata.V2Metadata{{Digest: digest.Digest("orange"), SourceRepository: "docker.io/user/app"}}, + }, + { + name: "missing SourceRepository", + targetRepo: "busybox", + metadata: []metadata.V2Metadata{ + {Digest: digest.Digest("1")}, + {Digest: digest.Digest("3")}, + {Digest: digest.Digest("2")}, + }, + maxExistenceChecks: 3, + expectedExists: false, + expectedRequests: []string{"2", "3", "1"}, + }, + + { + name: "with and without SourceRepository", + targetRepo: "busybox", + metadata: []metadata.V2Metadata{ + {Digest: digest.Digest("1")}, + {Digest: digest.Digest("2"), SourceRepository: "docker.io/library/busybox"}, + {Digest: digest.Digest("3")}, + }, + remoteBlobs: map[digest.Digest]distribution.Descriptor{digest.Digest("1"): {Digest: digest.Digest("1")}}, + maxExistenceChecks: 3, + expectedDescriptor: distribution.Descriptor{Digest: digest.Digest("1"), MediaType: schema2.MediaTypeLayer}, + expectedExists: true, + expectedRequests: []string{"2", "3", "1"}, + expectedAdditions: []metadata.V2Metadata{{Digest: digest.Digest("1"), SourceRepository: "docker.io/library/busybox"}}, + expectedRemovals: []metadata.V2Metadata{ + {Digest: digest.Digest("2"), SourceRepository: "docker.io/library/busybox"}, + }, + }, + } { + repoInfo, err := reference.ParseNormalizedNamed(tc.targetRepo) + if err != nil { + t.Fatalf("[%s] failed to parse reference name: %v", tc.name, err) + } + repo := &mockRepo{ + t: t, + errors: tc.remoteErrors, + blobs: tc.remoteBlobs, + requests: []string{}, + } + ctx := context.Background() + ms := &mockV2MetadataService{} + pd := &v2PushDescriptor{ + hmacKey: []byte(tc.hmacKey), + repoInfo: repoInfo, + layer: &storeLayer{ + Layer: layer.EmptyLayer, + }, + repo: repo, + v2MetadataService: ms, + pushState: &pushState{remoteLayers: make(map[layer.DiffID]distribution.Descriptor)}, + checkedDigests: make(map[digest.Digest]struct{}), + } + + desc, exists, err := pd.layerAlreadyExists(ctx, &progressSink{t}, layer.EmptyLayer.DiffID(), tc.checkOtherRepositories, tc.maxExistenceChecks, tc.metadata) + + if !reflect.DeepEqual(desc, tc.expectedDescriptor) { + t.Errorf("[%s] got unexpected descriptor: %#+v != %#+v", tc.name, desc, tc.expectedDescriptor) + } + if exists != tc.expectedExists { + t.Errorf("[%s] got unexpected exists: %t != %t", tc.name, exists, tc.expectedExists) + } + if !reflect.DeepEqual(err, tc.expectedError) { + t.Errorf("[%s] got unexpected error: %#+v != %#+v", tc.name, err, tc.expectedError) + } + + if len(repo.requests) != len(tc.expectedRequests) { + t.Errorf("[%s] got unexpected number of requests: %d != %d", tc.name, len(repo.requests), len(tc.expectedRequests)) + } + for i := 0; i < len(repo.requests) && i < len(tc.expectedRequests); i++ { + if repo.requests[i] != tc.expectedRequests[i] { + t.Errorf("[%s] request %d does not match expected: %q != %q", tc.name, i, repo.requests[i], tc.expectedRequests[i]) + } + } + for i := len(repo.requests); i < len(tc.expectedRequests); i++ { + t.Errorf("[%s] missing expected request at position %d (%q)", tc.name, i, tc.expectedRequests[i]) + } + for i := len(tc.expectedRequests); i < len(repo.requests); i++ { + t.Errorf("[%s] got unexpected request at position %d (%q)", tc.name, i, repo.requests[i]) + } + + if len(ms.added) != len(tc.expectedAdditions) { + t.Errorf("[%s] got unexpected number of additions: %d != %d", tc.name, len(ms.added), len(tc.expectedAdditions)) + } + for i := 0; i < len(ms.added) && i < len(tc.expectedAdditions); i++ { + if ms.added[i] != tc.expectedAdditions[i] { + t.Errorf("[%s] added metadata at %d does not match expected: %q != %q", tc.name, i, ms.added[i], tc.expectedAdditions[i]) + } + } + for i := len(ms.added); i < len(tc.expectedAdditions); i++ { + t.Errorf("[%s] missing expected addition at position %d (%q)", tc.name, i, tc.expectedAdditions[i]) + } + for i := len(tc.expectedAdditions); i < len(ms.added); i++ { + t.Errorf("[%s] unexpected metadata addition at position %d (%q)", tc.name, i, ms.added[i]) + } + + if len(ms.removed) != len(tc.expectedRemovals) { + t.Errorf("[%s] got unexpected number of removals: %d != %d", tc.name, len(ms.removed), len(tc.expectedRemovals)) + } + for i := 0; i < len(ms.removed) && i < len(tc.expectedRemovals); i++ { + if ms.removed[i] != tc.expectedRemovals[i] { + t.Errorf("[%s] removed metadata at %d does not match expected: %q != %q", tc.name, i, ms.removed[i], tc.expectedRemovals[i]) + } + } + for i := len(ms.removed); i < len(tc.expectedRemovals); i++ { + t.Errorf("[%s] missing expected removal at position %d (%q)", tc.name, i, tc.expectedRemovals[i]) + } + for i := len(tc.expectedRemovals); i < len(ms.removed); i++ { + t.Errorf("[%s] removed unexpected metadata at position %d (%q)", tc.name, i, ms.removed[i]) + } + } +} + +func taggedMetadata(key string, dgst string, sourceRepo string) metadata.V2Metadata { + meta := metadata.V2Metadata{ + Digest: digest.Digest(dgst), + SourceRepository: sourceRepo, + } + + meta.HMAC = metadata.ComputeV2MetadataHMAC([]byte(key), &meta) + return meta +} + +type mockRepo struct { + t *testing.T + errors map[digest.Digest]error + blobs map[digest.Digest]distribution.Descriptor + requests []string +} + +var _ distribution.Repository = &mockRepo{} + +func (m *mockRepo) Named() reference.Named { + m.t.Fatalf("Named() not implemented") + return nil +} +func (m *mockRepo) Manifests(ctc context.Context, options ...distribution.ManifestServiceOption) (distribution.ManifestService, error) { + m.t.Fatalf("Manifests() not implemented") + return nil, nil +} +func (m *mockRepo) Tags(ctc context.Context) distribution.TagService { + m.t.Fatalf("Tags() not implemented") + return nil +} +func (m *mockRepo) Blobs(ctx context.Context) distribution.BlobStore { + return &mockBlobStore{ + repo: m, + } +} + +type mockBlobStore struct { + repo *mockRepo +} + +var _ distribution.BlobStore = &mockBlobStore{} + +func (m *mockBlobStore) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { + m.repo.requests = append(m.repo.requests, dgst.String()) + if err, exists := m.repo.errors[dgst]; exists { + return distribution.Descriptor{}, err + } + if desc, exists := m.repo.blobs[dgst]; exists { + return desc, nil + } + return distribution.Descriptor{}, distribution.ErrBlobUnknown +} +func (m *mockBlobStore) Get(ctx context.Context, dgst digest.Digest) ([]byte, error) { + m.repo.t.Fatal("Get() not implemented") + return nil, nil +} + +func (m *mockBlobStore) Open(ctx context.Context, dgst digest.Digest) (distribution.ReadSeekCloser, error) { + m.repo.t.Fatal("Open() not implemented") + return nil, nil +} + +func (m *mockBlobStore) Put(ctx context.Context, mediaType string, p []byte) (distribution.Descriptor, error) { + m.repo.t.Fatal("Put() not implemented") + return distribution.Descriptor{}, nil +} + +func (m *mockBlobStore) Create(ctx context.Context, options ...distribution.BlobCreateOption) (distribution.BlobWriter, error) { + m.repo.t.Fatal("Create() not implemented") + return nil, nil +} +func (m *mockBlobStore) Resume(ctx context.Context, id string) (distribution.BlobWriter, error) { + m.repo.t.Fatal("Resume() not implemented") + return nil, nil +} +func (m *mockBlobStore) Delete(ctx context.Context, dgst digest.Digest) error { + m.repo.t.Fatal("Delete() not implemented") + return nil +} +func (m *mockBlobStore) ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error { + m.repo.t.Fatalf("ServeBlob() not implemented") + return nil +} + +type mockV2MetadataService struct { + added []metadata.V2Metadata + removed []metadata.V2Metadata +} + +var _ metadata.V2MetadataService = &mockV2MetadataService{} + +func (*mockV2MetadataService) GetMetadata(diffID layer.DiffID) ([]metadata.V2Metadata, error) { + return nil, nil +} +func (*mockV2MetadataService) GetDiffID(dgst digest.Digest) (layer.DiffID, error) { + return "", nil +} +func (m *mockV2MetadataService) Add(diffID layer.DiffID, metadata metadata.V2Metadata) error { + m.added = append(m.added, metadata) + return nil +} +func (m *mockV2MetadataService) TagAndAdd(diffID layer.DiffID, hmacKey []byte, meta metadata.V2Metadata) error { + meta.HMAC = metadata.ComputeV2MetadataHMAC(hmacKey, &meta) + m.Add(diffID, meta) + return nil +} +func (m *mockV2MetadataService) Remove(metadata metadata.V2Metadata) error { + m.removed = append(m.removed, metadata) + return nil +} + +type progressSink struct { + t *testing.T +} + +func (s *progressSink) WriteProgress(p progress.Progress) error { + s.t.Logf("progress update: %#+v", p) + return nil +} diff --git a/vendor/github.com/moby/moby/distribution/registry.go b/vendor/github.com/moby/moby/distribution/registry.go new file mode 100644 index 000000000..bce270a5e --- /dev/null +++ b/vendor/github.com/moby/moby/distribution/registry.go @@ -0,0 +1,156 @@ +package distribution + +import ( + "fmt" + "net" + "net/http" + "time" + + "github.com/docker/distribution" + "github.com/docker/distribution/manifest/schema2" + "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/client" + "github.com/docker/distribution/registry/client/auth" + "github.com/docker/distribution/registry/client/transport" + "github.com/docker/docker/api/types" + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/registry" + "github.com/docker/go-connections/sockets" + "golang.org/x/net/context" +) + +// ImageTypes represents the schema2 config types for images +var ImageTypes = []string{ + schema2.MediaTypeImageConfig, + // Handle unexpected values from https://github.com/docker/distribution/issues/1621 + // (see also https://github.com/docker/docker/issues/22378, + // https://github.com/docker/docker/issues/30083) + "application/octet-stream", + "application/json", + "text/html", + // Treat defaulted values as images, newer types cannot be implied + "", +} + +// PluginTypes represents the schema2 config types for plugins +var PluginTypes = []string{ + schema2.MediaTypePluginConfig, +} + +var mediaTypeClasses map[string]string + +func init() { + // initialize media type classes with all know types for + // plugin + mediaTypeClasses = map[string]string{} + for _, t := range ImageTypes { + mediaTypeClasses[t] = "image" + } + for _, t := range PluginTypes { + mediaTypeClasses[t] = "plugin" + } +} + +// NewV2Repository returns a repository (v2 only). It creates an HTTP transport +// providing timeout settings and authentication support, and also verifies the +// remote API version. +func NewV2Repository(ctx context.Context, repoInfo *registry.RepositoryInfo, endpoint registry.APIEndpoint, metaHeaders http.Header, authConfig *types.AuthConfig, actions ...string) (repo distribution.Repository, foundVersion bool, err error) { + repoName := repoInfo.Name.Name() + // If endpoint does not support CanonicalName, use the RemoteName instead + if endpoint.TrimHostname { + repoName = reference.Path(repoInfo.Name) + } + + direct := &net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + DualStack: true, + } + + // TODO(dmcgowan): Call close idle connections when complete, use keep alive + base := &http.Transport{ + Proxy: http.ProxyFromEnvironment, + Dial: direct.Dial, + TLSHandshakeTimeout: 10 * time.Second, + TLSClientConfig: endpoint.TLSConfig, + // TODO(dmcgowan): Call close idle connections when complete and use keep alive + DisableKeepAlives: true, + } + + proxyDialer, err := sockets.DialerFromEnvironment(direct) + if err == nil { + base.Dial = proxyDialer.Dial + } + + modifiers := registry.DockerHeaders(dockerversion.DockerUserAgent(ctx), metaHeaders) + authTransport := transport.NewTransport(base, modifiers...) + + challengeManager, foundVersion, err := registry.PingV2Registry(endpoint.URL, authTransport) + if err != nil { + transportOK := false + if responseErr, ok := err.(registry.PingResponseError); ok { + transportOK = true + err = responseErr.Err + } + return nil, foundVersion, fallbackError{ + err: err, + confirmedV2: foundVersion, + transportOK: transportOK, + } + } + + if authConfig.RegistryToken != "" { + passThruTokenHandler := &existingTokenHandler{token: authConfig.RegistryToken} + modifiers = append(modifiers, auth.NewAuthorizer(challengeManager, passThruTokenHandler)) + } else { + scope := auth.RepositoryScope{ + Repository: repoName, + Actions: actions, + Class: repoInfo.Class, + } + + creds := registry.NewStaticCredentialStore(authConfig) + tokenHandlerOptions := auth.TokenHandlerOptions{ + Transport: authTransport, + Credentials: creds, + Scopes: []auth.Scope{scope}, + ClientID: registry.AuthClientID, + } + tokenHandler := auth.NewTokenHandlerWithOptions(tokenHandlerOptions) + basicHandler := auth.NewBasicHandler(creds) + modifiers = append(modifiers, auth.NewAuthorizer(challengeManager, tokenHandler, basicHandler)) + } + tr := transport.NewTransport(base, modifiers...) + + repoNameRef, err := reference.WithName(repoName) + if err != nil { + return nil, foundVersion, fallbackError{ + err: err, + confirmedV2: foundVersion, + transportOK: true, + } + } + + repo, err = client.NewRepository(ctx, repoNameRef, endpoint.URL.String(), tr) + if err != nil { + err = fallbackError{ + err: err, + confirmedV2: foundVersion, + transportOK: true, + } + } + return +} + +type existingTokenHandler struct { + token string +} + +func (th *existingTokenHandler) Scheme() string { + return "bearer" +} + +func (th *existingTokenHandler) AuthorizeRequest(req *http.Request, params map[string]string) error { + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", th.token)) + return nil +} diff --git a/vendor/github.com/moby/moby/distribution/registry_unit_test.go b/vendor/github.com/moby/moby/distribution/registry_unit_test.go new file mode 100644 index 000000000..910061f45 --- /dev/null +++ b/vendor/github.com/moby/moby/distribution/registry_unit_test.go @@ -0,0 +1,172 @@ +package distribution + +import ( + "fmt" + "io/ioutil" + "net/http" + "net/http/httptest" + "net/url" + "os" + "runtime" + "strings" + "testing" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" + registrytypes "github.com/docker/docker/api/types/registry" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/registry" + "golang.org/x/net/context" +) + +const secretRegistryToken = "mysecrettoken" + +type tokenPassThruHandler struct { + reached bool + gotToken bool + shouldSend401 func(url string) bool +} + +func (h *tokenPassThruHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + h.reached = true + if strings.Contains(r.Header.Get("Authorization"), secretRegistryToken) { + logrus.Debug("Detected registry token in auth header") + h.gotToken = true + } + if h.shouldSend401 == nil || h.shouldSend401(r.RequestURI) { + w.Header().Set("WWW-Authenticate", `Bearer realm="foorealm"`) + w.WriteHeader(401) + } +} + +func testTokenPassThru(t *testing.T, ts *httptest.Server) { + tmp, err := testDirectory("") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmp) + + uri, err := url.Parse(ts.URL) + if err != nil { + t.Fatalf("could not parse url from test server: %v", err) + } + + endpoint := registry.APIEndpoint{ + Mirror: false, + URL: uri, + Version: 2, + Official: false, + TrimHostname: false, + TLSConfig: nil, + } + n, _ := reference.ParseNormalizedNamed("testremotename") + repoInfo := ®istry.RepositoryInfo{ + Name: n, + Index: ®istrytypes.IndexInfo{ + Name: "testrepo", + Mirrors: nil, + Secure: false, + Official: false, + }, + Official: false, + } + imagePullConfig := &ImagePullConfig{ + Config: Config{ + MetaHeaders: http.Header{}, + AuthConfig: &types.AuthConfig{ + RegistryToken: secretRegistryToken, + }, + }, + Schema2Types: ImageTypes, + } + puller, err := newPuller(endpoint, repoInfo, imagePullConfig) + if err != nil { + t.Fatal(err) + } + p := puller.(*v2Puller) + ctx := context.Background() + p.repo, _, err = NewV2Repository(ctx, p.repoInfo, p.endpoint, p.config.MetaHeaders, p.config.AuthConfig, "pull") + if err != nil { + t.Fatal(err) + } + + logrus.Debug("About to pull") + // We expect it to fail, since we haven't mock'd the full registry exchange in our handler above + tag, _ := reference.WithTag(n, "tag_goes_here") + _ = p.pullV2Repository(ctx, tag) +} + +func TestTokenPassThru(t *testing.T) { + handler := &tokenPassThruHandler{shouldSend401: func(url string) bool { return url == "/v2/" }} + ts := httptest.NewServer(handler) + defer ts.Close() + + testTokenPassThru(t, ts) + + if !handler.reached { + t.Fatal("Handler not reached") + } + if !handler.gotToken { + t.Fatal("Failed to receive registry token") + } +} + +func TestTokenPassThruDifferentHost(t *testing.T) { + handler := new(tokenPassThruHandler) + ts := httptest.NewServer(handler) + defer ts.Close() + + tsredirect := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.RequestURI == "/v2/" { + w.Header().Set("WWW-Authenticate", `Bearer realm="foorealm"`) + w.WriteHeader(401) + return + } + http.Redirect(w, r, ts.URL+r.URL.Path, http.StatusMovedPermanently) + })) + defer tsredirect.Close() + + testTokenPassThru(t, tsredirect) + + if !handler.reached { + t.Fatal("Handler not reached") + } + if handler.gotToken { + t.Fatal("Redirect should not forward Authorization header to another host") + } +} + +// testDirectory creates a new temporary directory and returns its path. +// The contents of directory at path `templateDir` is copied into the +// new directory. +func testDirectory(templateDir string) (dir string, err error) { + testID := stringid.GenerateNonCryptoID()[:4] + prefix := fmt.Sprintf("docker-test%s-%s-", testID, getCallerName(2)) + if prefix == "" { + prefix = "docker-test-" + } + dir, err = ioutil.TempDir("", prefix) + if err = os.Remove(dir); err != nil { + return + } + if templateDir != "" { + if err = archive.NewDefaultArchiver().CopyWithTar(templateDir, dir); err != nil { + return + } + } + return +} + +// getCallerName introspects the call stack and returns the name of the +// function `depth` levels down in the stack. +func getCallerName(depth int) string { + // Use the caller function name as a prefix. + // This helps trace temp directories back to their test. + pc, _, _, _ := runtime.Caller(depth + 1) + callerLongName := runtime.FuncForPC(pc).Name() + parts := strings.Split(callerLongName, ".") + callerShortName := parts[len(parts)-1] + return callerShortName +} diff --git a/vendor/github.com/moby/moby/distribution/utils/progress.go b/vendor/github.com/moby/moby/distribution/utils/progress.go new file mode 100644 index 000000000..cc3632a53 --- /dev/null +++ b/vendor/github.com/moby/moby/distribution/utils/progress.go @@ -0,0 +1,44 @@ +package utils + +import ( + "io" + "net" + "os" + "syscall" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/progress" + "github.com/docker/docker/pkg/streamformatter" +) + +// WriteDistributionProgress is a helper for writing progress from chan to JSON +// stream with an optional cancel function. +func WriteDistributionProgress(cancelFunc func(), outStream io.Writer, progressChan <-chan progress.Progress) { + progressOutput := streamformatter.NewJSONProgressOutput(outStream, false) + operationCancelled := false + + for prog := range progressChan { + if err := progressOutput.WriteProgress(prog); err != nil && !operationCancelled { + // don't log broken pipe errors as this is the normal case when a client aborts + if isBrokenPipe(err) { + logrus.Info("Pull session cancelled") + } else { + logrus.Errorf("error writing progress to client: %v", err) + } + cancelFunc() + operationCancelled = true + // Don't return, because we need to continue draining + // progressChan until it's closed to avoid a deadlock. + } + } +} + +func isBrokenPipe(e error) bool { + if netErr, ok := e.(*net.OpError); ok { + e = netErr.Err + if sysErr, ok := netErr.Err.(*os.SyscallError); ok { + e = sysErr.Err + } + } + return e == syscall.EPIPE +} diff --git a/vendor/github.com/moby/moby/distribution/xfer/download.go b/vendor/github.com/moby/moby/distribution/xfer/download.go new file mode 100644 index 000000000..6769ee1cd --- /dev/null +++ b/vendor/github.com/moby/moby/distribution/xfer/download.go @@ -0,0 +1,469 @@ +package xfer + +import ( + "errors" + "fmt" + "io" + "runtime" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution" + "github.com/docker/docker/image" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/progress" + "golang.org/x/net/context" +) + +const maxDownloadAttempts = 5 + +// LayerDownloadManager figures out which layers need to be downloaded, then +// registers and downloads those, taking into account dependencies between +// layers. +type LayerDownloadManager struct { + layerStores map[string]layer.Store + tm TransferManager + waitDuration time.Duration +} + +// SetConcurrency sets the max concurrent downloads for each pull +func (ldm *LayerDownloadManager) SetConcurrency(concurrency int) { + ldm.tm.SetConcurrency(concurrency) +} + +// NewLayerDownloadManager returns a new LayerDownloadManager. +func NewLayerDownloadManager(layerStores map[string]layer.Store, concurrencyLimit int, options ...func(*LayerDownloadManager)) *LayerDownloadManager { + manager := LayerDownloadManager{ + layerStores: layerStores, + tm: NewTransferManager(concurrencyLimit), + waitDuration: time.Second, + } + for _, option := range options { + option(&manager) + } + return &manager +} + +type downloadTransfer struct { + Transfer + + layerStore layer.Store + layer layer.Layer + err error +} + +// result returns the layer resulting from the download, if the download +// and registration were successful. +func (d *downloadTransfer) result() (layer.Layer, error) { + return d.layer, d.err +} + +// A DownloadDescriptor references a layer that may need to be downloaded. +type DownloadDescriptor interface { + // Key returns the key used to deduplicate downloads. + Key() string + // ID returns the ID for display purposes. + ID() string + // DiffID should return the DiffID for this layer, or an error + // if it is unknown (for example, if it has not been downloaded + // before). + DiffID() (layer.DiffID, error) + // Download is called to perform the download. + Download(ctx context.Context, progressOutput progress.Output) (io.ReadCloser, int64, error) + // Close is called when the download manager is finished with this + // descriptor and will not call Download again or read from the reader + // that Download returned. + Close() +} + +// DownloadDescriptorWithRegistered is a DownloadDescriptor that has an +// additional Registered method which gets called after a downloaded layer is +// registered. This allows the user of the download manager to know the DiffID +// of each registered layer. This method is called if a cast to +// DownloadDescriptorWithRegistered is successful. +type DownloadDescriptorWithRegistered interface { + DownloadDescriptor + Registered(diffID layer.DiffID) +} + +// Download is a blocking function which ensures the requested layers are +// present in the layer store. It uses the string returned by the Key method to +// deduplicate downloads. If a given layer is not already known to present in +// the layer store, and the key is not used by an in-progress download, the +// Download method is called to get the layer tar data. Layers are then +// registered in the appropriate order. The caller must call the returned +// release function once it is done with the returned RootFS object. +func (ldm *LayerDownloadManager) Download(ctx context.Context, initialRootFS image.RootFS, platform layer.Platform, layers []DownloadDescriptor, progressOutput progress.Output) (image.RootFS, func(), error) { + var ( + topLayer layer.Layer + topDownload *downloadTransfer + watcher *Watcher + missingLayer bool + transferKey = "" + downloadsByKey = make(map[string]*downloadTransfer) + ) + + // Assume that the platform is the host OS if blank + if platform == "" { + platform = layer.Platform(runtime.GOOS) + } + + rootFS := initialRootFS + for _, descriptor := range layers { + key := descriptor.Key() + transferKey += key + + if !missingLayer { + missingLayer = true + diffID, err := descriptor.DiffID() + if err == nil { + getRootFS := rootFS + getRootFS.Append(diffID) + l, err := ldm.layerStores[string(platform)].Get(getRootFS.ChainID()) + if err == nil { + // Layer already exists. + logrus.Debugf("Layer already exists: %s", descriptor.ID()) + progress.Update(progressOutput, descriptor.ID(), "Already exists") + if topLayer != nil { + layer.ReleaseAndLog(ldm.layerStores[string(platform)], topLayer) + } + topLayer = l + missingLayer = false + rootFS.Append(diffID) + // Register this repository as a source of this layer. + withRegistered, hasRegistered := descriptor.(DownloadDescriptorWithRegistered) + if hasRegistered { + withRegistered.Registered(diffID) + } + continue + } + } + } + + // Does this layer have the same data as a previous layer in + // the stack? If so, avoid downloading it more than once. + var topDownloadUncasted Transfer + if existingDownload, ok := downloadsByKey[key]; ok { + xferFunc := ldm.makeDownloadFuncFromDownload(descriptor, existingDownload, topDownload, platform) + defer topDownload.Transfer.Release(watcher) + topDownloadUncasted, watcher = ldm.tm.Transfer(transferKey, xferFunc, progressOutput) + topDownload = topDownloadUncasted.(*downloadTransfer) + continue + } + + // Layer is not known to exist - download and register it. + progress.Update(progressOutput, descriptor.ID(), "Pulling fs layer") + + var xferFunc DoFunc + if topDownload != nil { + xferFunc = ldm.makeDownloadFunc(descriptor, "", topDownload, platform) + defer topDownload.Transfer.Release(watcher) + } else { + xferFunc = ldm.makeDownloadFunc(descriptor, rootFS.ChainID(), nil, platform) + } + topDownloadUncasted, watcher = ldm.tm.Transfer(transferKey, xferFunc, progressOutput) + topDownload = topDownloadUncasted.(*downloadTransfer) + downloadsByKey[key] = topDownload + } + + if topDownload == nil { + return rootFS, func() { + if topLayer != nil { + layer.ReleaseAndLog(ldm.layerStores[string(platform)], topLayer) + } + }, nil + } + + // Won't be using the list built up so far - will generate it + // from downloaded layers instead. + rootFS.DiffIDs = []layer.DiffID{} + + defer func() { + if topLayer != nil { + layer.ReleaseAndLog(ldm.layerStores[string(platform)], topLayer) + } + }() + + select { + case <-ctx.Done(): + topDownload.Transfer.Release(watcher) + return rootFS, func() {}, ctx.Err() + case <-topDownload.Done(): + break + } + + l, err := topDownload.result() + if err != nil { + topDownload.Transfer.Release(watcher) + return rootFS, func() {}, err + } + + // Must do this exactly len(layers) times, so we don't include the + // base layer on Windows. + for range layers { + if l == nil { + topDownload.Transfer.Release(watcher) + return rootFS, func() {}, errors.New("internal error: too few parent layers") + } + rootFS.DiffIDs = append([]layer.DiffID{l.DiffID()}, rootFS.DiffIDs...) + l = l.Parent() + } + return rootFS, func() { topDownload.Transfer.Release(watcher) }, err +} + +// makeDownloadFunc returns a function that performs the layer download and +// registration. If parentDownload is non-nil, it waits for that download to +// complete before the registration step, and registers the downloaded data +// on top of parentDownload's resulting layer. Otherwise, it registers the +// layer on top of the ChainID given by parentLayer. +func (ldm *LayerDownloadManager) makeDownloadFunc(descriptor DownloadDescriptor, parentLayer layer.ChainID, parentDownload *downloadTransfer, platform layer.Platform) DoFunc { + return func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer { + d := &downloadTransfer{ + Transfer: NewTransfer(), + layerStore: ldm.layerStores[string(platform)], + } + + go func() { + defer func() { + close(progressChan) + }() + + progressOutput := progress.ChanOutput(progressChan) + + select { + case <-start: + default: + progress.Update(progressOutput, descriptor.ID(), "Waiting") + <-start + } + + if parentDownload != nil { + // Did the parent download already fail or get + // cancelled? + select { + case <-parentDownload.Done(): + _, err := parentDownload.result() + if err != nil { + d.err = err + return + } + default: + } + } + + var ( + downloadReader io.ReadCloser + size int64 + err error + retries int + ) + + defer descriptor.Close() + + for { + downloadReader, size, err = descriptor.Download(d.Transfer.Context(), progressOutput) + if err == nil { + break + } + + // If an error was returned because the context + // was cancelled, we shouldn't retry. + select { + case <-d.Transfer.Context().Done(): + d.err = err + return + default: + } + + retries++ + if _, isDNR := err.(DoNotRetry); isDNR || retries == maxDownloadAttempts { + logrus.Errorf("Download failed: %v", err) + d.err = err + return + } + + logrus.Errorf("Download failed, retrying: %v", err) + delay := retries * 5 + ticker := time.NewTicker(ldm.waitDuration) + + selectLoop: + for { + progress.Updatef(progressOutput, descriptor.ID(), "Retrying in %d second%s", delay, (map[bool]string{true: "s"})[delay != 1]) + select { + case <-ticker.C: + delay-- + if delay == 0 { + ticker.Stop() + break selectLoop + } + case <-d.Transfer.Context().Done(): + ticker.Stop() + d.err = errors.New("download cancelled during retry delay") + return + } + + } + } + + close(inactive) + + if parentDownload != nil { + select { + case <-d.Transfer.Context().Done(): + d.err = errors.New("layer registration cancelled") + downloadReader.Close() + return + case <-parentDownload.Done(): + } + + l, err := parentDownload.result() + if err != nil { + d.err = err + downloadReader.Close() + return + } + parentLayer = l.ChainID() + } + + reader := progress.NewProgressReader(ioutils.NewCancelReadCloser(d.Transfer.Context(), downloadReader), progressOutput, size, descriptor.ID(), "Extracting") + defer reader.Close() + + inflatedLayerData, err := archive.DecompressStream(reader) + if err != nil { + d.err = fmt.Errorf("could not get decompression stream: %v", err) + return + } + + var src distribution.Descriptor + if fs, ok := descriptor.(distribution.Describable); ok { + src = fs.Descriptor() + } + if ds, ok := d.layerStore.(layer.DescribableStore); ok { + d.layer, err = ds.RegisterWithDescriptor(inflatedLayerData, parentLayer, platform, src) + } else { + d.layer, err = d.layerStore.Register(inflatedLayerData, parentLayer, platform) + } + if err != nil { + select { + case <-d.Transfer.Context().Done(): + d.err = errors.New("layer registration cancelled") + default: + d.err = fmt.Errorf("failed to register layer: %v", err) + } + return + } + + progress.Update(progressOutput, descriptor.ID(), "Pull complete") + withRegistered, hasRegistered := descriptor.(DownloadDescriptorWithRegistered) + if hasRegistered { + withRegistered.Registered(d.layer.DiffID()) + } + + // Doesn't actually need to be its own goroutine, but + // done like this so we can defer close(c). + go func() { + <-d.Transfer.Released() + if d.layer != nil { + layer.ReleaseAndLog(d.layerStore, d.layer) + } + }() + }() + + return d + } +} + +// makeDownloadFuncFromDownload returns a function that performs the layer +// registration when the layer data is coming from an existing download. It +// waits for sourceDownload and parentDownload to complete, and then +// reregisters the data from sourceDownload's top layer on top of +// parentDownload. This function does not log progress output because it would +// interfere with the progress reporting for sourceDownload, which has the same +// Key. +func (ldm *LayerDownloadManager) makeDownloadFuncFromDownload(descriptor DownloadDescriptor, sourceDownload *downloadTransfer, parentDownload *downloadTransfer, platform layer.Platform) DoFunc { + return func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer { + d := &downloadTransfer{ + Transfer: NewTransfer(), + layerStore: ldm.layerStores[string(platform)], + } + + go func() { + defer func() { + close(progressChan) + }() + + <-start + + close(inactive) + + select { + case <-d.Transfer.Context().Done(): + d.err = errors.New("layer registration cancelled") + return + case <-parentDownload.Done(): + } + + l, err := parentDownload.result() + if err != nil { + d.err = err + return + } + parentLayer := l.ChainID() + + // sourceDownload should have already finished if + // parentDownload finished, but wait for it explicitly + // to be sure. + select { + case <-d.Transfer.Context().Done(): + d.err = errors.New("layer registration cancelled") + return + case <-sourceDownload.Done(): + } + + l, err = sourceDownload.result() + if err != nil { + d.err = err + return + } + + layerReader, err := l.TarStream() + if err != nil { + d.err = err + return + } + defer layerReader.Close() + + var src distribution.Descriptor + if fs, ok := l.(distribution.Describable); ok { + src = fs.Descriptor() + } + if ds, ok := d.layerStore.(layer.DescribableStore); ok { + d.layer, err = ds.RegisterWithDescriptor(layerReader, parentLayer, platform, src) + } else { + d.layer, err = d.layerStore.Register(layerReader, parentLayer, platform) + } + if err != nil { + d.err = fmt.Errorf("failed to register layer: %v", err) + return + } + + withRegistered, hasRegistered := descriptor.(DownloadDescriptorWithRegistered) + if hasRegistered { + withRegistered.Registered(d.layer.DiffID()) + } + + // Doesn't actually need to be its own goroutine, but + // done like this so we can defer close(c). + go func() { + <-d.Transfer.Released() + if d.layer != nil { + layer.ReleaseAndLog(d.layerStore, d.layer) + } + }() + }() + + return d + } +} diff --git a/vendor/github.com/moby/moby/distribution/xfer/download_test.go b/vendor/github.com/moby/moby/distribution/xfer/download_test.go new file mode 100644 index 000000000..e5aba02e3 --- /dev/null +++ b/vendor/github.com/moby/moby/distribution/xfer/download_test.go @@ -0,0 +1,367 @@ +package xfer + +import ( + "bytes" + "errors" + "fmt" + "io" + "io/ioutil" + "runtime" + "sync/atomic" + "testing" + "time" + + "github.com/docker/distribution" + "github.com/docker/docker/image" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/progress" + "github.com/opencontainers/go-digest" + "golang.org/x/net/context" +) + +const maxDownloadConcurrency = 3 + +type mockLayer struct { + layerData bytes.Buffer + diffID layer.DiffID + chainID layer.ChainID + parent layer.Layer + platform layer.Platform +} + +func (ml *mockLayer) TarStream() (io.ReadCloser, error) { + return ioutil.NopCloser(bytes.NewBuffer(ml.layerData.Bytes())), nil +} + +func (ml *mockLayer) TarStreamFrom(layer.ChainID) (io.ReadCloser, error) { + return nil, fmt.Errorf("not implemented") +} + +func (ml *mockLayer) ChainID() layer.ChainID { + return ml.chainID +} + +func (ml *mockLayer) DiffID() layer.DiffID { + return ml.diffID +} + +func (ml *mockLayer) Parent() layer.Layer { + return ml.parent +} + +func (ml *mockLayer) Size() (size int64, err error) { + return 0, nil +} + +func (ml *mockLayer) DiffSize() (size int64, err error) { + return 0, nil +} + +func (ml *mockLayer) Platform() layer.Platform { + return ml.platform +} + +func (ml *mockLayer) Metadata() (map[string]string, error) { + return make(map[string]string), nil +} + +type mockLayerStore struct { + layers map[layer.ChainID]*mockLayer +} + +func createChainIDFromParent(parent layer.ChainID, dgsts ...layer.DiffID) layer.ChainID { + if len(dgsts) == 0 { + return parent + } + if parent == "" { + return createChainIDFromParent(layer.ChainID(dgsts[0]), dgsts[1:]...) + } + // H = "H(n-1) SHA256(n)" + dgst := digest.FromBytes([]byte(string(parent) + " " + string(dgsts[0]))) + return createChainIDFromParent(layer.ChainID(dgst), dgsts[1:]...) +} + +func (ls *mockLayerStore) Map() map[layer.ChainID]layer.Layer { + layers := map[layer.ChainID]layer.Layer{} + + for k, v := range ls.layers { + layers[k] = v + } + + return layers +} + +func (ls *mockLayerStore) Register(reader io.Reader, parentID layer.ChainID, platform layer.Platform) (layer.Layer, error) { + return ls.RegisterWithDescriptor(reader, parentID, distribution.Descriptor{}) +} + +func (ls *mockLayerStore) RegisterWithDescriptor(reader io.Reader, parentID layer.ChainID, _ distribution.Descriptor) (layer.Layer, error) { + var ( + parent layer.Layer + err error + ) + + if parentID != "" { + parent, err = ls.Get(parentID) + if err != nil { + return nil, err + } + } + + l := &mockLayer{parent: parent} + _, err = l.layerData.ReadFrom(reader) + if err != nil { + return nil, err + } + l.diffID = layer.DiffID(digest.FromBytes(l.layerData.Bytes())) + l.chainID = createChainIDFromParent(parentID, l.diffID) + + ls.layers[l.chainID] = l + return l, nil +} + +func (ls *mockLayerStore) Get(chainID layer.ChainID) (layer.Layer, error) { + l, ok := ls.layers[chainID] + if !ok { + return nil, layer.ErrLayerDoesNotExist + } + return l, nil +} + +func (ls *mockLayerStore) Release(l layer.Layer) ([]layer.Metadata, error) { + return []layer.Metadata{}, nil +} +func (ls *mockLayerStore) CreateRWLayer(string, layer.ChainID, *layer.CreateRWLayerOpts) (layer.RWLayer, error) { + return nil, errors.New("not implemented") +} + +func (ls *mockLayerStore) GetRWLayer(string) (layer.RWLayer, error) { + return nil, errors.New("not implemented") +} + +func (ls *mockLayerStore) ReleaseRWLayer(layer.RWLayer) ([]layer.Metadata, error) { + return nil, errors.New("not implemented") +} +func (ls *mockLayerStore) GetMountID(string) (string, error) { + return "", errors.New("not implemented") +} + +func (ls *mockLayerStore) Cleanup() error { + return nil +} + +func (ls *mockLayerStore) DriverStatus() [][2]string { + return [][2]string{} +} + +func (ls *mockLayerStore) DriverName() string { + return "mock" +} + +type mockDownloadDescriptor struct { + currentDownloads *int32 + id string + diffID layer.DiffID + registeredDiffID layer.DiffID + expectedDiffID layer.DiffID + simulateRetries int +} + +// Key returns the key used to deduplicate downloads. +func (d *mockDownloadDescriptor) Key() string { + return d.id +} + +// ID returns the ID for display purposes. +func (d *mockDownloadDescriptor) ID() string { + return d.id +} + +// DiffID should return the DiffID for this layer, or an error +// if it is unknown (for example, if it has not been downloaded +// before). +func (d *mockDownloadDescriptor) DiffID() (layer.DiffID, error) { + if d.diffID != "" { + return d.diffID, nil + } + return "", errors.New("no diffID available") +} + +func (d *mockDownloadDescriptor) Registered(diffID layer.DiffID) { + d.registeredDiffID = diffID +} + +func (d *mockDownloadDescriptor) mockTarStream() io.ReadCloser { + // The mock implementation returns the ID repeated 5 times as a tar + // stream instead of actual tar data. The data is ignored except for + // computing IDs. + return ioutil.NopCloser(bytes.NewBuffer([]byte(d.id + d.id + d.id + d.id + d.id))) +} + +// Download is called to perform the download. +func (d *mockDownloadDescriptor) Download(ctx context.Context, progressOutput progress.Output) (io.ReadCloser, int64, error) { + if d.currentDownloads != nil { + defer atomic.AddInt32(d.currentDownloads, -1) + + if atomic.AddInt32(d.currentDownloads, 1) > maxDownloadConcurrency { + return nil, 0, errors.New("concurrency limit exceeded") + } + } + + // Sleep a bit to simulate a time-consuming download. + for i := int64(0); i <= 10; i++ { + select { + case <-ctx.Done(): + return nil, 0, ctx.Err() + case <-time.After(10 * time.Millisecond): + progressOutput.WriteProgress(progress.Progress{ID: d.ID(), Action: "Downloading", Current: i, Total: 10}) + } + } + + if d.simulateRetries != 0 { + d.simulateRetries-- + return nil, 0, errors.New("simulating retry") + } + + return d.mockTarStream(), 0, nil +} + +func (d *mockDownloadDescriptor) Close() { +} + +func downloadDescriptors(currentDownloads *int32) []DownloadDescriptor { + return []DownloadDescriptor{ + &mockDownloadDescriptor{ + currentDownloads: currentDownloads, + id: "id1", + expectedDiffID: layer.DiffID("sha256:68e2c75dc5c78ea9240689c60d7599766c213ae210434c53af18470ae8c53ec1"), + }, + &mockDownloadDescriptor{ + currentDownloads: currentDownloads, + id: "id2", + expectedDiffID: layer.DiffID("sha256:64a636223116aa837973a5d9c2bdd17d9b204e4f95ac423e20e65dfbb3655473"), + }, + &mockDownloadDescriptor{ + currentDownloads: currentDownloads, + id: "id3", + expectedDiffID: layer.DiffID("sha256:58745a8bbd669c25213e9de578c4da5c8ee1c836b3581432c2b50e38a6753300"), + }, + &mockDownloadDescriptor{ + currentDownloads: currentDownloads, + id: "id2", + expectedDiffID: layer.DiffID("sha256:64a636223116aa837973a5d9c2bdd17d9b204e4f95ac423e20e65dfbb3655473"), + }, + &mockDownloadDescriptor{ + currentDownloads: currentDownloads, + id: "id4", + expectedDiffID: layer.DiffID("sha256:0dfb5b9577716cc173e95af7c10289322c29a6453a1718addc00c0c5b1330936"), + simulateRetries: 1, + }, + &mockDownloadDescriptor{ + currentDownloads: currentDownloads, + id: "id5", + expectedDiffID: layer.DiffID("sha256:0a5f25fa1acbc647f6112a6276735d0fa01e4ee2aa7ec33015e337350e1ea23d"), + }, + } +} + +func TestSuccessfulDownload(t *testing.T) { + // TODO Windows: Fix this unit text + if runtime.GOOS == "windows" { + t.Skip("Needs fixing on Windows") + } + + layerStore := &mockLayerStore{make(map[layer.ChainID]*mockLayer)} + lsMap := make(map[string]layer.Store) + lsMap[runtime.GOOS] = layerStore + ldm := NewLayerDownloadManager(lsMap, maxDownloadConcurrency, func(m *LayerDownloadManager) { m.waitDuration = time.Millisecond }) + + progressChan := make(chan progress.Progress) + progressDone := make(chan struct{}) + receivedProgress := make(map[string]progress.Progress) + + go func() { + for p := range progressChan { + receivedProgress[p.ID] = p + } + close(progressDone) + }() + + var currentDownloads int32 + descriptors := downloadDescriptors(¤tDownloads) + + firstDescriptor := descriptors[0].(*mockDownloadDescriptor) + + // Pre-register the first layer to simulate an already-existing layer + l, err := layerStore.Register(firstDescriptor.mockTarStream(), "", layer.Platform(runtime.GOOS)) + if err != nil { + t.Fatal(err) + } + firstDescriptor.diffID = l.DiffID() + + rootFS, releaseFunc, err := ldm.Download(context.Background(), *image.NewRootFS(), layer.Platform(runtime.GOOS), descriptors, progress.ChanOutput(progressChan)) + if err != nil { + t.Fatalf("download error: %v", err) + } + + releaseFunc() + + close(progressChan) + <-progressDone + + if len(rootFS.DiffIDs) != len(descriptors) { + t.Fatal("got wrong number of diffIDs in rootfs") + } + + for i, d := range descriptors { + descriptor := d.(*mockDownloadDescriptor) + + if descriptor.diffID != "" { + if receivedProgress[d.ID()].Action != "Already exists" { + t.Fatalf("did not get 'Already exists' message for %v", d.ID()) + } + } else if receivedProgress[d.ID()].Action != "Pull complete" { + t.Fatalf("did not get 'Pull complete' message for %v", d.ID()) + } + + if rootFS.DiffIDs[i] != descriptor.expectedDiffID { + t.Fatalf("rootFS item %d has the wrong diffID (expected: %v got: %v)", i, descriptor.expectedDiffID, rootFS.DiffIDs[i]) + } + + if descriptor.diffID == "" && descriptor.registeredDiffID != rootFS.DiffIDs[i] { + t.Fatal("diffID mismatch between rootFS and Registered callback") + } + } +} + +func TestCancelledDownload(t *testing.T) { + layerStore := &mockLayerStore{make(map[layer.ChainID]*mockLayer)} + lsMap := make(map[string]layer.Store) + lsMap[runtime.GOOS] = layerStore + ldm := NewLayerDownloadManager(lsMap, maxDownloadConcurrency, func(m *LayerDownloadManager) { m.waitDuration = time.Millisecond }) + + progressChan := make(chan progress.Progress) + progressDone := make(chan struct{}) + + go func() { + for range progressChan { + } + close(progressDone) + }() + + ctx, cancel := context.WithCancel(context.Background()) + + go func() { + <-time.After(time.Millisecond) + cancel() + }() + + descriptors := downloadDescriptors(nil) + _, _, err := ldm.Download(ctx, *image.NewRootFS(), layer.Platform(runtime.GOOS), descriptors, progress.ChanOutput(progressChan)) + if err != context.Canceled { + t.Fatal("expected download to be cancelled") + } + + close(progressChan) + <-progressDone +} diff --git a/vendor/github.com/moby/moby/distribution/xfer/transfer.go b/vendor/github.com/moby/moby/distribution/xfer/transfer.go new file mode 100644 index 000000000..b86c503a0 --- /dev/null +++ b/vendor/github.com/moby/moby/distribution/xfer/transfer.go @@ -0,0 +1,401 @@ +package xfer + +import ( + "runtime" + "sync" + + "github.com/docker/docker/pkg/progress" + "golang.org/x/net/context" +) + +// DoNotRetry is an error wrapper indicating that the error cannot be resolved +// with a retry. +type DoNotRetry struct { + Err error +} + +// Error returns the stringified representation of the encapsulated error. +func (e DoNotRetry) Error() string { + return e.Err.Error() +} + +// Watcher is returned by Watch and can be passed to Release to stop watching. +type Watcher struct { + // signalChan is used to signal to the watcher goroutine that + // new progress information is available, or that the transfer + // has finished. + signalChan chan struct{} + // releaseChan signals to the watcher goroutine that the watcher + // should be detached. + releaseChan chan struct{} + // running remains open as long as the watcher is watching the + // transfer. It gets closed if the transfer finishes or the + // watcher is detached. + running chan struct{} +} + +// Transfer represents an in-progress transfer. +type Transfer interface { + Watch(progressOutput progress.Output) *Watcher + Release(*Watcher) + Context() context.Context + Close() + Done() <-chan struct{} + Released() <-chan struct{} + Broadcast(masterProgressChan <-chan progress.Progress) +} + +type transfer struct { + mu sync.Mutex + + ctx context.Context + cancel context.CancelFunc + + // watchers keeps track of the goroutines monitoring progress output, + // indexed by the channels that release them. + watchers map[chan struct{}]*Watcher + + // lastProgress is the most recently received progress event. + lastProgress progress.Progress + // hasLastProgress is true when lastProgress has been set. + hasLastProgress bool + + // running remains open as long as the transfer is in progress. + running chan struct{} + // released stays open until all watchers release the transfer and + // the transfer is no longer tracked by the transfer manager. + released chan struct{} + + // broadcastDone is true if the master progress channel has closed. + broadcastDone bool + // closed is true if Close has been called + closed bool + // broadcastSyncChan allows watchers to "ping" the broadcasting + // goroutine to wait for it for deplete its input channel. This ensures + // a detaching watcher won't miss an event that was sent before it + // started detaching. + broadcastSyncChan chan struct{} +} + +// NewTransfer creates a new transfer. +func NewTransfer() Transfer { + t := &transfer{ + watchers: make(map[chan struct{}]*Watcher), + running: make(chan struct{}), + released: make(chan struct{}), + broadcastSyncChan: make(chan struct{}), + } + + // This uses context.Background instead of a caller-supplied context + // so that a transfer won't be cancelled automatically if the client + // which requested it is ^C'd (there could be other viewers). + t.ctx, t.cancel = context.WithCancel(context.Background()) + + return t +} + +// Broadcast copies the progress and error output to all viewers. +func (t *transfer) Broadcast(masterProgressChan <-chan progress.Progress) { + for { + var ( + p progress.Progress + ok bool + ) + select { + case p, ok = <-masterProgressChan: + default: + // We've depleted the channel, so now we can handle + // reads on broadcastSyncChan to let detaching watchers + // know we're caught up. + select { + case <-t.broadcastSyncChan: + continue + case p, ok = <-masterProgressChan: + } + } + + t.mu.Lock() + if ok { + t.lastProgress = p + t.hasLastProgress = true + for _, w := range t.watchers { + select { + case w.signalChan <- struct{}{}: + default: + } + } + } else { + t.broadcastDone = true + } + t.mu.Unlock() + if !ok { + close(t.running) + return + } + } +} + +// Watch adds a watcher to the transfer. The supplied channel gets progress +// updates and is closed when the transfer finishes. +func (t *transfer) Watch(progressOutput progress.Output) *Watcher { + t.mu.Lock() + defer t.mu.Unlock() + + w := &Watcher{ + releaseChan: make(chan struct{}), + signalChan: make(chan struct{}), + running: make(chan struct{}), + } + + t.watchers[w.releaseChan] = w + + if t.broadcastDone { + close(w.running) + return w + } + + go func() { + defer func() { + close(w.running) + }() + var ( + done bool + lastWritten progress.Progress + hasLastWritten bool + ) + for { + t.mu.Lock() + hasLastProgress := t.hasLastProgress + lastProgress := t.lastProgress + t.mu.Unlock() + + // Make sure we don't write the last progress item + // twice. + if hasLastProgress && (!done || !hasLastWritten || lastProgress != lastWritten) { + progressOutput.WriteProgress(lastProgress) + lastWritten = lastProgress + hasLastWritten = true + } + + if done { + return + } + + select { + case <-w.signalChan: + case <-w.releaseChan: + done = true + // Since the watcher is going to detach, make + // sure the broadcaster is caught up so we + // don't miss anything. + select { + case t.broadcastSyncChan <- struct{}{}: + case <-t.running: + } + case <-t.running: + done = true + } + } + }() + + return w +} + +// Release is the inverse of Watch; indicating that the watcher no longer wants +// to be notified about the progress of the transfer. All calls to Watch must +// be paired with later calls to Release so that the lifecycle of the transfer +// is properly managed. +func (t *transfer) Release(watcher *Watcher) { + t.mu.Lock() + delete(t.watchers, watcher.releaseChan) + + if len(t.watchers) == 0 { + if t.closed { + // released may have been closed already if all + // watchers were released, then another one was added + // while waiting for a previous watcher goroutine to + // finish. + select { + case <-t.released: + default: + close(t.released) + } + } else { + t.cancel() + } + } + t.mu.Unlock() + + close(watcher.releaseChan) + // Block until the watcher goroutine completes + <-watcher.running +} + +// Done returns a channel which is closed if the transfer completes or is +// cancelled. Note that having 0 watchers causes a transfer to be cancelled. +func (t *transfer) Done() <-chan struct{} { + // Note that this doesn't return t.ctx.Done() because that channel will + // be closed the moment Cancel is called, and we need to return a + // channel that blocks until a cancellation is actually acknowledged by + // the transfer function. + return t.running +} + +// Released returns a channel which is closed once all watchers release the +// transfer AND the transfer is no longer tracked by the transfer manager. +func (t *transfer) Released() <-chan struct{} { + return t.released +} + +// Context returns the context associated with the transfer. +func (t *transfer) Context() context.Context { + return t.ctx +} + +// Close is called by the transfer manager when the transfer is no longer +// being tracked. +func (t *transfer) Close() { + t.mu.Lock() + t.closed = true + if len(t.watchers) == 0 { + close(t.released) + } + t.mu.Unlock() +} + +// DoFunc is a function called by the transfer manager to actually perform +// a transfer. It should be non-blocking. It should wait until the start channel +// is closed before transferring any data. If the function closes inactive, that +// signals to the transfer manager that the job is no longer actively moving +// data - for example, it may be waiting for a dependent transfer to finish. +// This prevents it from taking up a slot. +type DoFunc func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer + +// TransferManager is used by LayerDownloadManager and LayerUploadManager to +// schedule and deduplicate transfers. It is up to the TransferManager +// implementation to make the scheduling and concurrency decisions. +type TransferManager interface { + // Transfer checks if a transfer with the given key is in progress. If + // so, it returns progress and error output from that transfer. + // Otherwise, it will call xferFunc to initiate the transfer. + Transfer(key string, xferFunc DoFunc, progressOutput progress.Output) (Transfer, *Watcher) + // SetConcurrency set the concurrencyLimit so that it is adjustable daemon reload + SetConcurrency(concurrency int) +} + +type transferManager struct { + mu sync.Mutex + + concurrencyLimit int + activeTransfers int + transfers map[string]Transfer + waitingTransfers []chan struct{} +} + +// NewTransferManager returns a new TransferManager. +func NewTransferManager(concurrencyLimit int) TransferManager { + return &transferManager{ + concurrencyLimit: concurrencyLimit, + transfers: make(map[string]Transfer), + } +} + +// SetConcurrency sets the concurrencyLimit +func (tm *transferManager) SetConcurrency(concurrency int) { + tm.mu.Lock() + tm.concurrencyLimit = concurrency + tm.mu.Unlock() +} + +// Transfer checks if a transfer matching the given key is in progress. If not, +// it starts one by calling xferFunc. The caller supplies a channel which +// receives progress output from the transfer. +func (tm *transferManager) Transfer(key string, xferFunc DoFunc, progressOutput progress.Output) (Transfer, *Watcher) { + tm.mu.Lock() + defer tm.mu.Unlock() + + for { + xfer, present := tm.transfers[key] + if !present { + break + } + // Transfer is already in progress. + watcher := xfer.Watch(progressOutput) + + select { + case <-xfer.Context().Done(): + // We don't want to watch a transfer that has been cancelled. + // Wait for it to be removed from the map and try again. + xfer.Release(watcher) + tm.mu.Unlock() + // The goroutine that removes this transfer from the + // map is also waiting for xfer.Done(), so yield to it. + // This could be avoided by adding a Closed method + // to Transfer to allow explicitly waiting for it to be + // removed the map, but forcing a scheduling round in + // this very rare case seems better than bloating the + // interface definition. + runtime.Gosched() + <-xfer.Done() + tm.mu.Lock() + default: + return xfer, watcher + } + } + + start := make(chan struct{}) + inactive := make(chan struct{}) + + if tm.concurrencyLimit == 0 || tm.activeTransfers < tm.concurrencyLimit { + close(start) + tm.activeTransfers++ + } else { + tm.waitingTransfers = append(tm.waitingTransfers, start) + } + + masterProgressChan := make(chan progress.Progress) + xfer := xferFunc(masterProgressChan, start, inactive) + watcher := xfer.Watch(progressOutput) + go xfer.Broadcast(masterProgressChan) + tm.transfers[key] = xfer + + // When the transfer is finished, remove from the map. + go func() { + for { + select { + case <-inactive: + tm.mu.Lock() + tm.inactivate(start) + tm.mu.Unlock() + inactive = nil + case <-xfer.Done(): + tm.mu.Lock() + if inactive != nil { + tm.inactivate(start) + } + delete(tm.transfers, key) + tm.mu.Unlock() + xfer.Close() + return + } + } + }() + + return xfer, watcher +} + +func (tm *transferManager) inactivate(start chan struct{}) { + // If the transfer was started, remove it from the activeTransfers + // count. + select { + case <-start: + // Start next transfer if any are waiting + if len(tm.waitingTransfers) != 0 { + close(tm.waitingTransfers[0]) + tm.waitingTransfers = tm.waitingTransfers[1:] + } else { + tm.activeTransfers-- + } + default: + } +} diff --git a/vendor/github.com/moby/moby/distribution/xfer/transfer_test.go b/vendor/github.com/moby/moby/distribution/xfer/transfer_test.go new file mode 100644 index 000000000..6c50ce352 --- /dev/null +++ b/vendor/github.com/moby/moby/distribution/xfer/transfer_test.go @@ -0,0 +1,410 @@ +package xfer + +import ( + "sync/atomic" + "testing" + "time" + + "github.com/docker/docker/pkg/progress" +) + +func TestTransfer(t *testing.T) { + makeXferFunc := func(id string) DoFunc { + return func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer { + select { + case <-start: + default: + t.Fatalf("transfer function not started even though concurrency limit not reached") + } + + xfer := NewTransfer() + go func() { + for i := 0; i <= 10; i++ { + progressChan <- progress.Progress{ID: id, Action: "testing", Current: int64(i), Total: 10} + time.Sleep(10 * time.Millisecond) + } + close(progressChan) + }() + return xfer + } + } + + tm := NewTransferManager(5) + progressChan := make(chan progress.Progress) + progressDone := make(chan struct{}) + receivedProgress := make(map[string]int64) + + go func() { + for p := range progressChan { + val, present := receivedProgress[p.ID] + if present && p.Current <= val { + t.Fatalf("got unexpected progress value: %d (expected %d)", p.Current, val+1) + } + receivedProgress[p.ID] = p.Current + } + close(progressDone) + }() + + // Start a few transfers + ids := []string{"id1", "id2", "id3"} + xfers := make([]Transfer, len(ids)) + watchers := make([]*Watcher, len(ids)) + for i, id := range ids { + xfers[i], watchers[i] = tm.Transfer(id, makeXferFunc(id), progress.ChanOutput(progressChan)) + } + + for i, xfer := range xfers { + <-xfer.Done() + xfer.Release(watchers[i]) + } + close(progressChan) + <-progressDone + + for _, id := range ids { + if receivedProgress[id] != 10 { + t.Fatalf("final progress value %d instead of 10", receivedProgress[id]) + } + } +} + +func TestConcurrencyLimit(t *testing.T) { + concurrencyLimit := 3 + var runningJobs int32 + + makeXferFunc := func(id string) DoFunc { + return func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer { + xfer := NewTransfer() + go func() { + <-start + totalJobs := atomic.AddInt32(&runningJobs, 1) + if int(totalJobs) > concurrencyLimit { + t.Fatalf("too many jobs running") + } + for i := 0; i <= 10; i++ { + progressChan <- progress.Progress{ID: id, Action: "testing", Current: int64(i), Total: 10} + time.Sleep(10 * time.Millisecond) + } + atomic.AddInt32(&runningJobs, -1) + close(progressChan) + }() + return xfer + } + } + + tm := NewTransferManager(concurrencyLimit) + progressChan := make(chan progress.Progress) + progressDone := make(chan struct{}) + receivedProgress := make(map[string]int64) + + go func() { + for p := range progressChan { + receivedProgress[p.ID] = p.Current + } + close(progressDone) + }() + + // Start more transfers than the concurrency limit + ids := []string{"id1", "id2", "id3", "id4", "id5", "id6", "id7", "id8"} + xfers := make([]Transfer, len(ids)) + watchers := make([]*Watcher, len(ids)) + for i, id := range ids { + xfers[i], watchers[i] = tm.Transfer(id, makeXferFunc(id), progress.ChanOutput(progressChan)) + } + + for i, xfer := range xfers { + <-xfer.Done() + xfer.Release(watchers[i]) + } + close(progressChan) + <-progressDone + + for _, id := range ids { + if receivedProgress[id] != 10 { + t.Fatalf("final progress value %d instead of 10", receivedProgress[id]) + } + } +} + +func TestInactiveJobs(t *testing.T) { + concurrencyLimit := 3 + var runningJobs int32 + testDone := make(chan struct{}) + + makeXferFunc := func(id string) DoFunc { + return func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer { + xfer := NewTransfer() + go func() { + <-start + totalJobs := atomic.AddInt32(&runningJobs, 1) + if int(totalJobs) > concurrencyLimit { + t.Fatalf("too many jobs running") + } + for i := 0; i <= 10; i++ { + progressChan <- progress.Progress{ID: id, Action: "testing", Current: int64(i), Total: 10} + time.Sleep(10 * time.Millisecond) + } + atomic.AddInt32(&runningJobs, -1) + close(inactive) + <-testDone + close(progressChan) + }() + return xfer + } + } + + tm := NewTransferManager(concurrencyLimit) + progressChan := make(chan progress.Progress) + progressDone := make(chan struct{}) + receivedProgress := make(map[string]int64) + + go func() { + for p := range progressChan { + receivedProgress[p.ID] = p.Current + } + close(progressDone) + }() + + // Start more transfers than the concurrency limit + ids := []string{"id1", "id2", "id3", "id4", "id5", "id6", "id7", "id8"} + xfers := make([]Transfer, len(ids)) + watchers := make([]*Watcher, len(ids)) + for i, id := range ids { + xfers[i], watchers[i] = tm.Transfer(id, makeXferFunc(id), progress.ChanOutput(progressChan)) + } + + close(testDone) + for i, xfer := range xfers { + <-xfer.Done() + xfer.Release(watchers[i]) + } + close(progressChan) + <-progressDone + + for _, id := range ids { + if receivedProgress[id] != 10 { + t.Fatalf("final progress value %d instead of 10", receivedProgress[id]) + } + } +} + +func TestWatchRelease(t *testing.T) { + ready := make(chan struct{}) + + makeXferFunc := func(id string) DoFunc { + return func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer { + xfer := NewTransfer() + go func() { + defer func() { + close(progressChan) + }() + <-ready + for i := int64(0); ; i++ { + select { + case <-time.After(10 * time.Millisecond): + case <-xfer.Context().Done(): + return + } + progressChan <- progress.Progress{ID: id, Action: "testing", Current: i, Total: 10} + } + }() + return xfer + } + } + + tm := NewTransferManager(5) + + type watcherInfo struct { + watcher *Watcher + progressChan chan progress.Progress + progressDone chan struct{} + receivedFirstProgress chan struct{} + } + + progressConsumer := func(w watcherInfo) { + first := true + for range w.progressChan { + if first { + close(w.receivedFirstProgress) + } + first = false + } + close(w.progressDone) + } + + // Start a transfer + watchers := make([]watcherInfo, 5) + var xfer Transfer + watchers[0].progressChan = make(chan progress.Progress) + watchers[0].progressDone = make(chan struct{}) + watchers[0].receivedFirstProgress = make(chan struct{}) + xfer, watchers[0].watcher = tm.Transfer("id1", makeXferFunc("id1"), progress.ChanOutput(watchers[0].progressChan)) + go progressConsumer(watchers[0]) + + // Give it multiple watchers + for i := 1; i != len(watchers); i++ { + watchers[i].progressChan = make(chan progress.Progress) + watchers[i].progressDone = make(chan struct{}) + watchers[i].receivedFirstProgress = make(chan struct{}) + watchers[i].watcher = xfer.Watch(progress.ChanOutput(watchers[i].progressChan)) + go progressConsumer(watchers[i]) + } + + // Now that the watchers are set up, allow the transfer goroutine to + // proceed. + close(ready) + + // Confirm that each watcher gets progress output. + for _, w := range watchers { + <-w.receivedFirstProgress + } + + // Release one watcher every 5ms + for _, w := range watchers { + xfer.Release(w.watcher) + <-time.After(5 * time.Millisecond) + } + + // Now that all watchers have been released, Released() should + // return a closed channel. + <-xfer.Released() + + // Done() should return a closed channel because the xfer func returned + // due to cancellation. + <-xfer.Done() + + for _, w := range watchers { + close(w.progressChan) + <-w.progressDone + } +} + +func TestWatchFinishedTransfer(t *testing.T) { + makeXferFunc := func(id string) DoFunc { + return func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer { + xfer := NewTransfer() + go func() { + // Finish immediately + close(progressChan) + }() + return xfer + } + } + + tm := NewTransferManager(5) + + // Start a transfer + watchers := make([]*Watcher, 3) + var xfer Transfer + xfer, watchers[0] = tm.Transfer("id1", makeXferFunc("id1"), progress.ChanOutput(make(chan progress.Progress))) + + // Give it a watcher immediately + watchers[1] = xfer.Watch(progress.ChanOutput(make(chan progress.Progress))) + + // Wait for the transfer to complete + <-xfer.Done() + + // Set up another watcher + watchers[2] = xfer.Watch(progress.ChanOutput(make(chan progress.Progress))) + + // Release the watchers + for _, w := range watchers { + xfer.Release(w) + } + + // Now that all watchers have been released, Released() should + // return a closed channel. + <-xfer.Released() +} + +func TestDuplicateTransfer(t *testing.T) { + ready := make(chan struct{}) + + var xferFuncCalls int32 + + makeXferFunc := func(id string) DoFunc { + return func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer { + atomic.AddInt32(&xferFuncCalls, 1) + xfer := NewTransfer() + go func() { + defer func() { + close(progressChan) + }() + <-ready + for i := int64(0); ; i++ { + select { + case <-time.After(10 * time.Millisecond): + case <-xfer.Context().Done(): + return + } + progressChan <- progress.Progress{ID: id, Action: "testing", Current: i, Total: 10} + } + }() + return xfer + } + } + + tm := NewTransferManager(5) + + type transferInfo struct { + xfer Transfer + watcher *Watcher + progressChan chan progress.Progress + progressDone chan struct{} + receivedFirstProgress chan struct{} + } + + progressConsumer := func(t transferInfo) { + first := true + for range t.progressChan { + if first { + close(t.receivedFirstProgress) + } + first = false + } + close(t.progressDone) + } + + // Try to start multiple transfers with the same ID + transfers := make([]transferInfo, 5) + for i := range transfers { + t := &transfers[i] + t.progressChan = make(chan progress.Progress) + t.progressDone = make(chan struct{}) + t.receivedFirstProgress = make(chan struct{}) + t.xfer, t.watcher = tm.Transfer("id1", makeXferFunc("id1"), progress.ChanOutput(t.progressChan)) + go progressConsumer(*t) + } + + // Allow the transfer goroutine to proceed. + close(ready) + + // Confirm that each watcher gets progress output. + for _, t := range transfers { + <-t.receivedFirstProgress + } + + // Confirm that the transfer function was called exactly once. + if xferFuncCalls != 1 { + t.Fatal("transfer function wasn't called exactly once") + } + + // Release one watcher every 5ms + for _, t := range transfers { + t.xfer.Release(t.watcher) + <-time.After(5 * time.Millisecond) + } + + for _, t := range transfers { + // Now that all watchers have been released, Released() should + // return a closed channel. + <-t.xfer.Released() + // Done() should return a closed channel because the xfer func returned + // due to cancellation. + <-t.xfer.Done() + } + + for _, t := range transfers { + close(t.progressChan) + <-t.progressDone + } +} diff --git a/vendor/github.com/moby/moby/distribution/xfer/upload.go b/vendor/github.com/moby/moby/distribution/xfer/upload.go new file mode 100644 index 000000000..58422e57a --- /dev/null +++ b/vendor/github.com/moby/moby/distribution/xfer/upload.go @@ -0,0 +1,174 @@ +package xfer + +import ( + "errors" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/progress" + "golang.org/x/net/context" +) + +const maxUploadAttempts = 5 + +// LayerUploadManager provides task management and progress reporting for +// uploads. +type LayerUploadManager struct { + tm TransferManager + waitDuration time.Duration +} + +// SetConcurrency sets the max concurrent uploads for each push +func (lum *LayerUploadManager) SetConcurrency(concurrency int) { + lum.tm.SetConcurrency(concurrency) +} + +// NewLayerUploadManager returns a new LayerUploadManager. +func NewLayerUploadManager(concurrencyLimit int, options ...func(*LayerUploadManager)) *LayerUploadManager { + manager := LayerUploadManager{ + tm: NewTransferManager(concurrencyLimit), + waitDuration: time.Second, + } + for _, option := range options { + option(&manager) + } + return &manager +} + +type uploadTransfer struct { + Transfer + + remoteDescriptor distribution.Descriptor + err error +} + +// An UploadDescriptor references a layer that may need to be uploaded. +type UploadDescriptor interface { + // Key returns the key used to deduplicate uploads. + Key() string + // ID returns the ID for display purposes. + ID() string + // DiffID should return the DiffID for this layer. + DiffID() layer.DiffID + // Upload is called to perform the Upload. + Upload(ctx context.Context, progressOutput progress.Output) (distribution.Descriptor, error) + // SetRemoteDescriptor provides the distribution.Descriptor that was + // returned by Upload. This descriptor is not to be confused with + // the UploadDescriptor interface, which is used for internally + // identifying layers that are being uploaded. + SetRemoteDescriptor(descriptor distribution.Descriptor) +} + +// Upload is a blocking function which ensures the listed layers are present on +// the remote registry. It uses the string returned by the Key method to +// deduplicate uploads. +func (lum *LayerUploadManager) Upload(ctx context.Context, layers []UploadDescriptor, progressOutput progress.Output) error { + var ( + uploads []*uploadTransfer + dedupDescriptors = make(map[string]*uploadTransfer) + ) + + for _, descriptor := range layers { + progress.Update(progressOutput, descriptor.ID(), "Preparing") + + key := descriptor.Key() + if _, present := dedupDescriptors[key]; present { + continue + } + + xferFunc := lum.makeUploadFunc(descriptor) + upload, watcher := lum.tm.Transfer(descriptor.Key(), xferFunc, progressOutput) + defer upload.Release(watcher) + uploads = append(uploads, upload.(*uploadTransfer)) + dedupDescriptors[key] = upload.(*uploadTransfer) + } + + for _, upload := range uploads { + select { + case <-ctx.Done(): + return ctx.Err() + case <-upload.Transfer.Done(): + if upload.err != nil { + return upload.err + } + } + } + for _, l := range layers { + l.SetRemoteDescriptor(dedupDescriptors[l.Key()].remoteDescriptor) + } + + return nil +} + +func (lum *LayerUploadManager) makeUploadFunc(descriptor UploadDescriptor) DoFunc { + return func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer { + u := &uploadTransfer{ + Transfer: NewTransfer(), + } + + go func() { + defer func() { + close(progressChan) + }() + + progressOutput := progress.ChanOutput(progressChan) + + select { + case <-start: + default: + progress.Update(progressOutput, descriptor.ID(), "Waiting") + <-start + } + + retries := 0 + for { + remoteDescriptor, err := descriptor.Upload(u.Transfer.Context(), progressOutput) + if err == nil { + u.remoteDescriptor = remoteDescriptor + break + } + + // If an error was returned because the context + // was cancelled, we shouldn't retry. + select { + case <-u.Transfer.Context().Done(): + u.err = err + return + default: + } + + retries++ + if _, isDNR := err.(DoNotRetry); isDNR || retries == maxUploadAttempts { + logrus.Errorf("Upload failed: %v", err) + u.err = err + return + } + + logrus.Errorf("Upload failed, retrying: %v", err) + delay := retries * 5 + ticker := time.NewTicker(lum.waitDuration) + + selectLoop: + for { + progress.Updatef(progressOutput, descriptor.ID(), "Retrying in %d second%s", delay, (map[bool]string{true: "s"})[delay != 1]) + select { + case <-ticker.C: + delay-- + if delay == 0 { + ticker.Stop() + break selectLoop + } + case <-u.Transfer.Context().Done(): + ticker.Stop() + u.err = errors.New("upload cancelled during retry delay") + return + } + } + } + }() + + return u + } +} diff --git a/vendor/github.com/moby/moby/distribution/xfer/upload_test.go b/vendor/github.com/moby/moby/distribution/xfer/upload_test.go new file mode 100644 index 000000000..066019f26 --- /dev/null +++ b/vendor/github.com/moby/moby/distribution/xfer/upload_test.go @@ -0,0 +1,134 @@ +package xfer + +import ( + "errors" + "sync/atomic" + "testing" + "time" + + "github.com/docker/distribution" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/progress" + "golang.org/x/net/context" +) + +const maxUploadConcurrency = 3 + +type mockUploadDescriptor struct { + currentUploads *int32 + diffID layer.DiffID + simulateRetries int +} + +// Key returns the key used to deduplicate downloads. +func (u *mockUploadDescriptor) Key() string { + return u.diffID.String() +} + +// ID returns the ID for display purposes. +func (u *mockUploadDescriptor) ID() string { + return u.diffID.String() +} + +// DiffID should return the DiffID for this layer. +func (u *mockUploadDescriptor) DiffID() layer.DiffID { + return u.diffID +} + +// SetRemoteDescriptor is not used in the mock. +func (u *mockUploadDescriptor) SetRemoteDescriptor(remoteDescriptor distribution.Descriptor) { +} + +// Upload is called to perform the upload. +func (u *mockUploadDescriptor) Upload(ctx context.Context, progressOutput progress.Output) (distribution.Descriptor, error) { + if u.currentUploads != nil { + defer atomic.AddInt32(u.currentUploads, -1) + + if atomic.AddInt32(u.currentUploads, 1) > maxUploadConcurrency { + return distribution.Descriptor{}, errors.New("concurrency limit exceeded") + } + } + + // Sleep a bit to simulate a time-consuming upload. + for i := int64(0); i <= 10; i++ { + select { + case <-ctx.Done(): + return distribution.Descriptor{}, ctx.Err() + case <-time.After(10 * time.Millisecond): + progressOutput.WriteProgress(progress.Progress{ID: u.ID(), Current: i, Total: 10}) + } + } + + if u.simulateRetries != 0 { + u.simulateRetries-- + return distribution.Descriptor{}, errors.New("simulating retry") + } + + return distribution.Descriptor{}, nil +} + +func uploadDescriptors(currentUploads *int32) []UploadDescriptor { + return []UploadDescriptor{ + &mockUploadDescriptor{currentUploads, layer.DiffID("sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf"), 0}, + &mockUploadDescriptor{currentUploads, layer.DiffID("sha256:1515325234325236634634608943609283523908626098235490238423902343"), 0}, + &mockUploadDescriptor{currentUploads, layer.DiffID("sha256:6929356290463485374960346430698374523437683470934634534953453453"), 0}, + &mockUploadDescriptor{currentUploads, layer.DiffID("sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf"), 0}, + &mockUploadDescriptor{currentUploads, layer.DiffID("sha256:8159352387436803946235346346368745389534789534897538734598734987"), 1}, + &mockUploadDescriptor{currentUploads, layer.DiffID("sha256:4637863963478346897346987346987346789346789364879364897364987346"), 0}, + } +} + +func TestSuccessfulUpload(t *testing.T) { + lum := NewLayerUploadManager(maxUploadConcurrency, func(m *LayerUploadManager) { m.waitDuration = time.Millisecond }) + + progressChan := make(chan progress.Progress) + progressDone := make(chan struct{}) + receivedProgress := make(map[string]int64) + + go func() { + for p := range progressChan { + receivedProgress[p.ID] = p.Current + } + close(progressDone) + }() + + var currentUploads int32 + descriptors := uploadDescriptors(¤tUploads) + + err := lum.Upload(context.Background(), descriptors, progress.ChanOutput(progressChan)) + if err != nil { + t.Fatalf("upload error: %v", err) + } + + close(progressChan) + <-progressDone +} + +func TestCancelledUpload(t *testing.T) { + lum := NewLayerUploadManager(maxUploadConcurrency, func(m *LayerUploadManager) { m.waitDuration = time.Millisecond }) + + progressChan := make(chan progress.Progress) + progressDone := make(chan struct{}) + + go func() { + for range progressChan { + } + close(progressDone) + }() + + ctx, cancel := context.WithCancel(context.Background()) + + go func() { + <-time.After(time.Millisecond) + cancel() + }() + + descriptors := uploadDescriptors(nil) + err := lum.Upload(ctx, descriptors, progress.ChanOutput(progressChan)) + if err != context.Canceled { + t.Fatal("expected upload to be cancelled") + } + + close(progressChan) + <-progressDone +} diff --git a/vendor/github.com/moby/moby/dockerversion/useragent.go b/vendor/github.com/moby/moby/dockerversion/useragent.go new file mode 100644 index 000000000..c02d0fda1 --- /dev/null +++ b/vendor/github.com/moby/moby/dockerversion/useragent.go @@ -0,0 +1,76 @@ +package dockerversion + +import ( + "fmt" + "runtime" + + "github.com/docker/docker/pkg/parsers/kernel" + "github.com/docker/docker/pkg/useragent" + "golang.org/x/net/context" +) + +// UAStringKey is used as key type for user-agent string in net/context struct +const UAStringKey = "upstream-user-agent" + +// DockerUserAgent is the User-Agent the Docker client uses to identify itself. +// In accordance with RFC 7231 (5.5.3) is of the form: +// [docker client's UA] UpstreamClient([upstream client's UA]) +func DockerUserAgent(ctx context.Context) string { + httpVersion := make([]useragent.VersionInfo, 0, 6) + httpVersion = append(httpVersion, useragent.VersionInfo{Name: "docker", Version: Version}) + httpVersion = append(httpVersion, useragent.VersionInfo{Name: "go", Version: runtime.Version()}) + httpVersion = append(httpVersion, useragent.VersionInfo{Name: "git-commit", Version: GitCommit}) + if kernelVersion, err := kernel.GetKernelVersion(); err == nil { + httpVersion = append(httpVersion, useragent.VersionInfo{Name: "kernel", Version: kernelVersion.String()}) + } + httpVersion = append(httpVersion, useragent.VersionInfo{Name: "os", Version: runtime.GOOS}) + httpVersion = append(httpVersion, useragent.VersionInfo{Name: "arch", Version: runtime.GOARCH}) + + dockerUA := useragent.AppendVersions("", httpVersion...) + upstreamUA := getUserAgentFromContext(ctx) + if len(upstreamUA) > 0 { + ret := insertUpstreamUserAgent(upstreamUA, dockerUA) + return ret + } + return dockerUA +} + +// getUserAgentFromContext returns the previously saved user-agent context stored in ctx, if one exists +func getUserAgentFromContext(ctx context.Context) string { + var upstreamUA string + if ctx != nil { + var ki interface{} = ctx.Value(UAStringKey) + if ki != nil { + upstreamUA = ctx.Value(UAStringKey).(string) + } + } + return upstreamUA +} + +// escapeStr returns s with every rune in charsToEscape escaped by a backslash +func escapeStr(s string, charsToEscape string) string { + var ret string + for _, currRune := range s { + appended := false + for _, escapableRune := range charsToEscape { + if currRune == escapableRune { + ret += `\` + string(currRune) + appended = true + break + } + } + if !appended { + ret += string(currRune) + } + } + return ret +} + +// insertUpstreamUserAgent adds the upstream client useragent to create a user-agent +// string of the form: +// $dockerUA UpstreamClient($upstreamUA) +func insertUpstreamUserAgent(upstreamUA string, dockerUA string) string { + charsToEscape := `();\` + upstreamUAEscaped := escapeStr(upstreamUA, charsToEscape) + return fmt.Sprintf("%s UpstreamClient(%s)", dockerUA, upstreamUAEscaped) +} diff --git a/vendor/github.com/moby/moby/dockerversion/version_lib.go b/vendor/github.com/moby/moby/dockerversion/version_lib.go new file mode 100644 index 000000000..33f77d3ce --- /dev/null +++ b/vendor/github.com/moby/moby/dockerversion/version_lib.go @@ -0,0 +1,16 @@ +// +build !autogen + +// Package dockerversion is auto-generated at build-time +package dockerversion + +// Default build-time variable for library-import. +// This file is overridden on build with build-time informations. +const ( + GitCommit string = "library-import" + Version string = "library-import" + BuildTime string = "library-import" + IAmStatic string = "library-import" + ContainerdCommitID string = "library-import" + RuncCommitID string = "library-import" + InitCommitID string = "library-import" +) diff --git a/vendor/github.com/moby/moby/docs/api/v1.18.md b/vendor/github.com/moby/moby/docs/api/v1.18.md new file mode 100644 index 000000000..3c82371b4 --- /dev/null +++ b/vendor/github.com/moby/moby/docs/api/v1.18.md @@ -0,0 +1,2159 @@ +--- +title: "Engine API v1.18" +description: "API Documentation for Docker" +keywords: "API, Docker, rcli, REST, documentation" +redirect_from: +- /engine/reference/api/docker_remote_api_v1.18/ +- /reference/api/docker_remote_api_v1.18/ +--- + + + +## 1. Brief introduction + + - The daemon listens on `unix:///var/run/docker.sock` but you can + [Bind Docker to another host/port or a Unix socket](../reference/commandline/dockerd.md#bind-docker-to-another-host-port-or-a-unix-socket). + - The API tends to be REST, but for some complex commands, like `attach` + or `pull`, the HTTP connection is hijacked to transport `stdout`, + `stdin` and `stderr`. + +## 2. Endpoints + +### 2.1 Containers + +#### List containers + +`GET /containers/json` + +List containers + +**Example request**: + + GET /v1.18/containers/json?all=1&before=8dfafdbc3a40&size=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Id": "8dfafdbc3a40", + "Names":["/boring_feynman"], + "Image": "ubuntu:latest", + "Command": "echo 1", + "Created": 1367854155, + "Status": "Exit 0", + "Ports": [{"PrivatePort": 2222, "PublicPort": 3333, "Type": "tcp"}], + "Labels": { + "com.example.vendor": "Acme", + "com.example.license": "GPL", + "com.example.version": "1.0" + }, + "SizeRw": 12288, + "SizeRootFs": 0 + }, + { + "Id": "9cd87474be90", + "Names":["/coolName"], + "Image": "ubuntu:latest", + "Command": "echo 222222", + "Created": 1367854155, + "Status": "Exit 0", + "Ports": [], + "Labels": {}, + "SizeRw": 12288, + "SizeRootFs": 0 + }, + { + "Id": "3176a2479c92", + "Names":["/sleepy_dog"], + "Image": "ubuntu:latest", + "Command": "echo 3333333333333333", + "Created": 1367854154, + "Status": "Exit 0", + "Ports":[], + "Labels": {}, + "SizeRw":12288, + "SizeRootFs":0 + }, + { + "Id": "4cb07b47f9fb", + "Names":["/running_cat"], + "Image": "ubuntu:latest", + "Command": "echo 444444444444444444444444444444444", + "Created": 1367854152, + "Status": "Exit 0", + "Ports": [], + "Labels": {}, + "SizeRw": 12288, + "SizeRootFs": 0 + } + ] + +**Query parameters**: + +- **all** – 1/True/true or 0/False/false, Show all containers. + Only running containers are shown by default (i.e., this defaults to false) +- **limit** – Show `limit` last created + containers, include non-running ones. +- **since** – Show only containers created since Id, include + non-running ones. +- **before** – Show only containers created before Id, include + non-running ones. +- **size** – 1/True/true or 0/False/false, Show the containers + sizes +- **filters** - a JSON encoded value of the filters (a `map[string][]string`) to process on the containers list. Available filters: + - `exited=`; -- containers with exit code of `` ; + - `status=`(`restarting`|`running`|`paused`|`exited`) + - `label=key` or `label="key=value"` of a container label + +**Status codes**: + +- **200** – no error +- **400** – bad parameter +- **500** – server error + +#### Create a container + +`POST /containers/create` + +Create a container + +**Example request**: + + POST /v1.18/containers/create HTTP/1.1 + Content-Type: application/json + + { + "Hostname": "", + "Domainname": "", + "User": "", + "AttachStdin": false, + "AttachStdout": true, + "AttachStderr": true, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": [ + "FOO=bar", + "BAZ=quux" + ], + "Cmd": [ + "date" + ], + "Entrypoint": null, + "Image": "ubuntu", + "Labels": { + "com.example.vendor": "Acme", + "com.example.license": "GPL", + "com.example.version": "1.0" + }, + "Volumes": { + "/volumes/data": {} + }, + "WorkingDir": "", + "NetworkDisabled": false, + "MacAddress": "12:34:56:78:9a:bc", + "ExposedPorts": { + "22/tcp": {} + }, + "HostConfig": { + "Binds": ["/tmp:/tmp"], + "Links": ["redis3:redis"], + "LxcConf": {"lxc.utsname":"docker"}, + "Memory": 0, + "MemorySwap": 0, + "CpuShares": 512, + "CpusetCpus": "0,1", + "PidMode": "", + "PortBindings": { "22/tcp": [{ "HostPort": "11022" }] }, + "PublishAllPorts": false, + "Privileged": false, + "ReadonlyRootfs": false, + "Dns": ["8.8.8.8"], + "DnsSearch": [""], + "ExtraHosts": null, + "VolumesFrom": ["parent", "other:ro"], + "CapAdd": ["NET_ADMIN"], + "CapDrop": ["MKNOD"], + "RestartPolicy": { "Name": "", "MaximumRetryCount": 0 }, + "NetworkMode": "bridge", + "Devices": [], + "Ulimits": [{}], + "LogConfig": { "Type": "json-file", "Config": {} }, + "SecurityOpt": [], + "CgroupParent": "" + } + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + { + "Id":"e90e34656806", + "Warnings":[] + } + +**JSON parameters**: + +- **Hostname** - A string value containing the hostname to use for the + container. +- **Domainname** - A string value containing the domain name to use + for the container. +- **User** - A string value specifying the user inside the container. +- **AttachStdin** - Boolean value, attaches to `stdin`. +- **AttachStdout** - Boolean value, attaches to `stdout`. +- **AttachStderr** - Boolean value, attaches to `stderr`. +- **Tty** - Boolean value, Attach standard streams to a `tty`, including `stdin` if it is not closed. +- **OpenStdin** - Boolean value, opens `stdin`, +- **StdinOnce** - Boolean value, close `stdin` after the 1 attached client disconnects. +- **Env** - A list of environment variables in the form of `["VAR=value", ...]` +- **Labels** - Adds a map of labels to a container. To specify a map: `{"key":"value", ... }` +- **Cmd** - Command to run specified as a string or an array of strings. +- **Entrypoint** - Set the entry point for the container as a string or an array + of strings. +- **Image** - A string specifying the image name to use for the container. +- **Volumes** - An object mapping mount point paths (strings) inside the + container to empty objects. +- **WorkingDir** - A string specifying the working directory for commands to + run in. +- **NetworkDisabled** - Boolean value, when true disables networking for the + container +- **ExposedPorts** - An object mapping ports to an empty object in the form of: + `"ExposedPorts": { "/: {}" }` +- **HostConfig** + - **Binds** – A list of bind-mounts for this container. Each item is a string in one of these forms: + + `host-src:container-dest` to bind-mount a host path into the + container. Both `host-src`, and `container-dest` must be an + _absolute_ path. + + `host-src:container-dest:ro` to make the bind-mount read-only + inside the container. Both `host-src`, and `container-dest` must be + an _absolute_ path. + - **Links** - A list of links for the container. Each link entry should be + in the form of `container_name:alias`. + - **LxcConf** - LXC specific configurations. These configurations only + work when using the `lxc` execution driver. + - **Memory** - Memory limit in bytes. + - **MemorySwap** - Total memory limit (memory + swap); set `-1` to enable unlimited swap. + You must use this with `memory` and make the swap value larger than `memory`. + - **CpuShares** - An integer value containing the container's CPU Shares + (ie. the relative weight vs other containers). + - **CpusetCpus** - String value containing the `cgroups CpusetCpus` to use. + - **PidMode** - Set the PID (Process) Namespace mode for the container; + `"container:"`: joins another container's PID namespace + `"host"`: use the host's PID namespace inside the container + - **PortBindings** - A map of exposed container ports and the host port they + should map to. A JSON object in the form + `{ /: [{ "HostPort": "" }] }` + Take note that `port` is specified as a string and not an integer value. + - **PublishAllPorts** - Allocates a random host port for all of a container's + exposed ports. Specified as a boolean value. + - **Privileged** - Gives the container full access to the host. Specified as + a boolean value. + - **ReadonlyRootfs** - Mount the container's root filesystem as read only. + Specified as a boolean value. + - **Dns** - A list of DNS servers for the container to use. + - **DnsSearch** - A list of DNS search domains + - **ExtraHosts** - A list of hostnames/IP mappings to add to the + container's `/etc/hosts` file. Specified in the form `["hostname:IP"]`. + - **VolumesFrom** - A list of volumes to inherit from another container. + Specified in the form `[:]` + - **CapAdd** - A list of kernel capabilities to add to the container. + - **Capdrop** - A list of kernel capabilities to drop from the container. + - **RestartPolicy** – The behavior to apply when the container exits. The + value is an object with a `Name` property of either `"always"` to + always restart or `"on-failure"` to restart only when the container + exit code is non-zero. If `on-failure` is used, `MaximumRetryCount` + controls the number of times to retry before giving up. + The default is not to restart. (optional) + An ever increasing delay (double the previous delay, starting at 100mS) + is added before each restart to prevent flooding the server. + - **NetworkMode** - Sets the networking mode for the container. Supported + values are: `bridge`, `host`, `none`, and `container:` + - **Devices** - A list of devices to add to the container specified as a JSON object in the + form + `{ "PathOnHost": "/dev/deviceName", "PathInContainer": "/dev/deviceName", "CgroupPermissions": "mrw"}` + - **Ulimits** - A list of ulimits to set in the container, specified as + `{ "Name": , "Soft": , "Hard": }`, for example: + `Ulimits: { "Name": "nofile", "Soft": 1024, "Hard": 2048 }` + - **SecurityOpt**: A list of string values to customize labels for MLS + systems, such as SELinux. + - **LogConfig** - Log configuration for the container, specified as a JSON object in the form + `{ "Type": "", "Config": {"key1": "val1"}}`. + Available types: `json-file`, `syslog`, `journald`, `none`. + `json-file` logging driver. + - **CgroupParent** - Path to `cgroups` under which the container's `cgroup` is created. If the path is not absolute, the path is considered to be relative to the `cgroups` path of the init process. Cgroups are created if they do not already exist. + +**Query parameters**: + +- **name** – Assign the specified name to the container. Must + match `/?[a-zA-Z0-9_-]+`. + +**Status codes**: + +- **201** – no error +- **400** – bad parameter +- **404** – no such container +- **406** – impossible to attach (container not running) +- **409** – conflict +- **500** – server error + +#### Inspect a container + +`GET /containers/(id or name)/json` + +Return low-level information on the container `id` + +**Example request**: + + GET /v1.18/containers/4fa6e0f0c678/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "AppArmorProfile": "", + "Args": [ + "-c", + "exit 9" + ], + "Config": { + "AttachStderr": true, + "AttachStdin": false, + "AttachStdout": true, + "Cmd": [ + "/bin/sh", + "-c", + "exit 9" + ], + "Domainname": "", + "Entrypoint": null, + "Env": [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + ], + "ExposedPorts": null, + "Hostname": "ba033ac44011", + "Image": "ubuntu", + "Labels": { + "com.example.vendor": "Acme", + "com.example.license": "GPL", + "com.example.version": "1.0" + }, + "MacAddress": "", + "NetworkDisabled": false, + "OnBuild": null, + "OpenStdin": false, + "PortSpecs": null, + "StdinOnce": false, + "Tty": false, + "User": "", + "Volumes": null, + "WorkingDir": "" + }, + "Created": "2015-01-06T15:47:31.485331387Z", + "Driver": "devicemapper", + "ExecDriver": "native-0.2", + "ExecIDs": null, + "HostConfig": { + "Binds": null, + "CapAdd": null, + "CapDrop": null, + "ContainerIDFile": "", + "CpusetCpus": "", + "CpuShares": 0, + "Devices": [], + "Dns": null, + "DnsSearch": null, + "ExtraHosts": null, + "IpcMode": "", + "Links": null, + "LxcConf": [], + "Memory": 0, + "MemorySwap": 0, + "NetworkMode": "bridge", + "PidMode": "", + "PortBindings": {}, + "Privileged": false, + "ReadonlyRootfs": false, + "PublishAllPorts": false, + "RestartPolicy": { + "MaximumRetryCount": 2, + "Name": "on-failure" + }, + "LogConfig": { + "Config": null, + "Type": "json-file" + }, + "SecurityOpt": null, + "VolumesFrom": null, + "Ulimits": [{}] + }, + "HostnamePath": "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hostname", + "HostsPath": "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hosts", + "LogPath": "/var/lib/docker/containers/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b-json.log", + "Id": "ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39", + "Image": "04c5d3b7b0656168630d3ba35d8889bd0e9caafcaeb3004d2bfbc47e7c5d35d2", + "MountLabel": "", + "Name": "/boring_euclid", + "NetworkSettings": { + "Bridge": "", + "Gateway": "", + "IPAddress": "", + "IPPrefixLen": 0, + "MacAddress": "", + "PortMapping": null, + "Ports": null + }, + "Path": "/bin/sh", + "ProcessLabel": "", + "ResolvConfPath": "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/resolv.conf", + "RestartCount": 1, + "State": { + "Error": "", + "ExitCode": 9, + "FinishedAt": "2015-01-06T15:47:32.080254511Z", + "OOMKilled": false, + "Paused": false, + "Pid": 0, + "Restarting": false, + "Running": true, + "StartedAt": "2015-01-06T15:47:32.072697474Z" + }, + "Volumes": {}, + "VolumesRW": {} + } + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### List processes running inside a container + +`GET /containers/(id or name)/top` + +List processes running inside the container `id`. On Unix systems this +is done by running the `ps` command. This endpoint is not +supported on Windows. + +**Example request**: + + GET /v1.18/containers/4fa6e0f0c678/top HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Titles" : [ + "UID", "PID", "PPID", "C", "STIME", "TTY", "TIME", "CMD" + ], + "Processes" : [ + [ + "root", "13642", "882", "0", "17:03", "pts/0", "00:00:00", "/bin/bash" + ], + [ + "root", "13735", "13642", "0", "17:06", "pts/0", "00:00:00", "sleep 10" + ] + ] + } + +**Example request**: + + GET /v1.18/containers/4fa6e0f0c678/top?ps_args=aux HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Titles" : [ + "USER","PID","%CPU","%MEM","VSZ","RSS","TTY","STAT","START","TIME","COMMAND" + ] + "Processes" : [ + [ + "root","13642","0.0","0.1","18172","3184","pts/0","Ss","17:03","0:00","/bin/bash" + ], + [ + "root","13895","0.0","0.0","4348","692","pts/0","S+","17:15","0:00","sleep 10" + ] + ], + } + +**Query parameters**: + +- **ps_args** – `ps` arguments to use (e.g., `aux`), defaults to `-ef` + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Get container logs + +`GET /containers/(id or name)/logs` + +Get `stdout` and `stderr` logs from the container ``id`` + +> **Note**: +> This endpoint works only for containers with the `json-file` or `journald` logging drivers. + +**Example request**: + + GET /v1.18/containers/4fa6e0f0c678/logs?stderr=1&stdout=1×tamps=1&follow=1&tail=10 HTTP/1.1 + +**Example response**: + + HTTP/1.1 101 UPGRADED + Content-Type: application/vnd.docker.raw-stream + Connection: Upgrade + Upgrade: tcp + + {% raw %} + {{ STREAM }} + {% endraw %} + +**Query parameters**: + +- **follow** – 1/True/true or 0/False/false, return stream. Default `false`. +- **stdout** – 1/True/true or 0/False/false, show `stdout` log. Default `false`. +- **stderr** – 1/True/true or 0/False/false, show `stderr` log. Default `false`. +- **timestamps** – 1/True/true or 0/False/false, print timestamps for + every log line. Default `false`. +- **tail** – Output specified number of lines at the end of logs: `all` or ``. Default all. + +**Status codes**: + +- **101** – no error, hints proxy about hijacking +- **200** – no error, no upgrade header found +- **404** – no such container +- **500** – server error + +#### Inspect changes on a container's filesystem + +`GET /containers/(id or name)/changes` + +Inspect changes on container `id`'s filesystem + +**Example request**: + + GET /v1.18/containers/4fa6e0f0c678/changes HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Path": "/dev", + "Kind": 0 + }, + { + "Path": "/dev/kmsg", + "Kind": 1 + }, + { + "Path": "/test", + "Kind": 1 + } + ] + +Values for `Kind`: + +- `0`: Modify +- `1`: Add +- `2`: Delete + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Export a container + +`GET /containers/(id or name)/export` + +Export the contents of container `id` + +**Example request**: + + GET /v1.18/containers/4fa6e0f0c678/export HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/octet-stream + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Get container stats based on resource usage + +`GET /containers/(id or name)/stats` + +This endpoint returns a live stream of a container's resource usage statistics. + +**Example request**: + + GET /v1.18/containers/redis1/stats HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "read" : "2015-01-08T22:57:31.547920715Z", + "network" : { + "rx_dropped" : 0, + "rx_bytes" : 648, + "rx_errors" : 0, + "tx_packets" : 8, + "tx_dropped" : 0, + "rx_packets" : 8, + "tx_errors" : 0, + "tx_bytes" : 648 + }, + "memory_stats" : { + "stats" : { + "total_pgmajfault" : 0, + "cache" : 0, + "mapped_file" : 0, + "total_inactive_file" : 0, + "pgpgout" : 414, + "rss" : 6537216, + "total_mapped_file" : 0, + "writeback" : 0, + "unevictable" : 0, + "pgpgin" : 477, + "total_unevictable" : 0, + "pgmajfault" : 0, + "total_rss" : 6537216, + "total_rss_huge" : 6291456, + "total_writeback" : 0, + "total_inactive_anon" : 0, + "rss_huge" : 6291456, + "hierarchical_memory_limit" : 67108864, + "total_pgfault" : 964, + "total_active_file" : 0, + "active_anon" : 6537216, + "total_active_anon" : 6537216, + "total_pgpgout" : 414, + "total_cache" : 0, + "inactive_anon" : 0, + "active_file" : 0, + "pgfault" : 964, + "inactive_file" : 0, + "total_pgpgin" : 477 + }, + "max_usage" : 6651904, + "usage" : 6537216, + "failcnt" : 0, + "limit" : 67108864 + }, + "blkio_stats" : {}, + "cpu_stats" : { + "cpu_usage" : { + "percpu_usage" : [ + 16970827, + 1839451, + 7107380, + 10571290 + ], + "usage_in_usermode" : 10000000, + "total_usage" : 36488948, + "usage_in_kernelmode" : 20000000 + }, + "system_cpu_usage" : 20091722000000000, + "throttling_data" : {} + } + } + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Resize a container TTY + +`POST /containers/(id or name)/resize?h=&w=` + +Resize the TTY for container with `id`. You must restart the container for the resize to take effect. + +**Example request**: + + POST /v1.18/containers/4fa6e0f0c678/resize?h=40&w=80 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Length: 0 + Content-Type: text/plain; charset=utf-8 + +**Query parameters**: + +- **h** – height of `tty` session +- **w** – width + +**Status codes**: + +- **200** – no error +- **404** – No such container +- **500** – Cannot resize container + +#### Start a container + +`POST /containers/(id or name)/start` + +Start the container `id` + +> **Note**: +> For backwards compatibility, this endpoint accepts a `HostConfig` as JSON-encoded request body. +> See [create a container](#create-a-container) for details. + +**Example request**: + + POST /v1.18/containers/e90e34656806/start HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Status codes**: + +- **204** – no error +- **304** – container already started +- **404** – no such container +- **500** – server error + +#### Stop a container + +`POST /containers/(id or name)/stop` + +Stop the container `id` + +**Example request**: + + POST /v1.18/containers/e90e34656806/stop?t=5 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **t** – number of seconds to wait before killing the container + +**Status codes**: + +- **204** – no error +- **304** – container already stopped +- **404** – no such container +- **500** – server error + +#### Restart a container + +`POST /containers/(id or name)/restart` + +Restart the container `id` + +**Example request**: + + POST /v1.18/containers/e90e34656806/restart?t=5 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **t** – number of seconds to wait before killing the container + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **500** – server error + +#### Kill a container + +`POST /containers/(id or name)/kill` + +Kill the container `id` + +**Example request**: + + POST /v1.18/containers/e90e34656806/kill HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **signal** - Signal to send to the container: integer or string like `SIGINT`. + When not set, `SIGKILL` is assumed and the call waits for the container to exit. + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **500** – server error + +#### Rename a container + +`POST /containers/(id or name)/rename` + +Rename the container `id` to a `new_name` + +**Example request**: + + POST /v1.18/containers/e90e34656806/rename?name=new_name HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **name** – new name for the container + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **409** - conflict name already assigned +- **500** – server error + +#### Pause a container + +`POST /containers/(id or name)/pause` + +Pause the container `id` + +**Example request**: + + POST /v1.18/containers/e90e34656806/pause HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **500** – server error + +#### Unpause a container + +`POST /containers/(id or name)/unpause` + +Unpause the container `id` + +**Example request**: + + POST /v1.18/containers/e90e34656806/unpause HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **500** – server error + +#### Attach to a container + +`POST /containers/(id or name)/attach` + +Attach to the container `id` + +**Example request**: + + POST /v1.18/containers/16253994b7c4/attach?logs=1&stream=0&stdout=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 101 UPGRADED + Content-Type: application/vnd.docker.raw-stream + Connection: Upgrade + Upgrade: tcp + + {% raw %} + {{ STREAM }} + {% endraw %} + +**Query parameters**: + +- **logs** – 1/True/true or 0/False/false, return logs. Default `false`. +- **stream** – 1/True/true or 0/False/false, return stream. + Default `false`. +- **stdin** – 1/True/true or 0/False/false, if `stream=true`, attach + to `stdin`. Default `false`. +- **stdout** – 1/True/true or 0/False/false, if `logs=true`, return + `stdout` log, if `stream=true`, attach to `stdout`. Default `false`. +- **stderr** – 1/True/true or 0/False/false, if `logs=true`, return + `stderr` log, if `stream=true`, attach to `stderr`. Default `false`. + +**Status codes**: + +- **101** – no error, hints proxy about hijacking +- **200** – no error, no upgrade header found +- **400** – bad parameter +- **404** – no such container +- **500** – server error + +**Stream details**: + +When using the TTY setting is enabled in +[`POST /containers/create` +](#create-a-container), +the stream is the raw data from the process PTY and client's `stdin`. +When the TTY is disabled, then the stream is multiplexed to separate +`stdout` and `stderr`. + +The format is a **Header** and a **Payload** (frame). + +**HEADER** + +The header contains the information which the stream writes (`stdout` or +`stderr`). It also contains the size of the associated frame encoded in the +last four bytes (`uint32`). + +It is encoded on the first eight bytes like this: + + header := [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4} + +`STREAM_TYPE` can be: + +- 0: `stdin` (is written on `stdout`) +- 1: `stdout` +- 2: `stderr` + +`SIZE1, SIZE2, SIZE3, SIZE4` are the four bytes of +the `uint32` size encoded as big endian. + +**PAYLOAD** + +The payload is the raw stream. + +**IMPLEMENTATION** + +The simplest way to implement the Attach protocol is the following: + + 1. Read eight bytes. + 2. Choose `stdout` or `stderr` depending on the first byte. + 3. Extract the frame size from the last four bytes. + 4. Read the extracted size and output it on the correct output. + 5. Goto 1. + +#### Attach to a container (websocket) + +`GET /containers/(id or name)/attach/ws` + +Attach to the container `id` via websocket + +Implements websocket protocol handshake according to [RFC 6455](http://tools.ietf.org/html/rfc6455) + +**Example request** + + GET /v1.18/containers/e90e34656806/attach/ws?logs=0&stream=1&stdin=1&stdout=1&stderr=1 HTTP/1.1 + +**Example response** + + {% raw %} + {{ STREAM }} + {% endraw %} + +**Query parameters**: + +- **logs** – 1/True/true or 0/False/false, return logs. Default `false`. +- **stream** – 1/True/true or 0/False/false, return stream. + Default `false`. +- **stdin** – 1/True/true or 0/False/false, if `stream=true`, attach + to `stdin`. Default `false`. +- **stdout** – 1/True/true or 0/False/false, if `logs=true`, return + `stdout` log, if `stream=true`, attach to `stdout`. Default `false`. +- **stderr** – 1/True/true or 0/False/false, if `logs=true`, return + `stderr` log, if `stream=true`, attach to `stderr`. Default `false`. + +**Status codes**: + +- **200** – no error +- **400** – bad parameter +- **404** – no such container +- **500** – server error + +#### Wait a container + +`POST /containers/(id or name)/wait` + +Block until container `id` stops, then returns the exit code + +**Example request**: + + POST /v1.18/containers/16253994b7c4/wait HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"StatusCode": 0} + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Remove a container + +`DELETE /containers/(id or name)` + +Remove the container `id` from the filesystem + +**Example request**: + + DELETE /v1.18/containers/16253994b7c4?v=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **v** – 1/True/true or 0/False/false, Remove the volumes + associated to the container. Default `false`. +- **force** - 1/True/true or 0/False/false, Kill then remove the container. + Default `false`. +- **link** - 1/True/true or 0/False/false, Remove the specified + link associated to the container. Default `false`. + +**Status codes**: + +- **204** – no error +- **400** – bad parameter +- **404** – no such container +- **409** – conflict +- **500** – server error + +#### Copy files or folders from a container + +`POST /containers/(id or name)/copy` + +Copy files or folders of container `id` + +**Example request**: + + POST /v1.18/containers/4fa6e0f0c678/copy HTTP/1.1 + Content-Type: application/json + + { + "Resource": "test.txt" + } + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +### 2.2 Images + +#### List Images + +`GET /images/json` + +**Example request**: + + GET /v1.18/images/json?all=0 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "RepoTags": [ + "ubuntu:12.04", + "ubuntu:precise", + "ubuntu:latest" + ], + "Id": "8dbd9e392a964056420e5d58ca5cc376ef18e2de93b5cc90e868a1bbc8318c1c", + "Created": 1365714795, + "Size": 131506275, + "VirtualSize": 131506275 + }, + { + "RepoTags": [ + "ubuntu:12.10", + "ubuntu:quantal" + ], + "ParentId": "27cf784147099545", + "Id": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", + "Created": 1364102658, + "Size": 24653, + "VirtualSize": 180116135 + } + ] + +**Example request, with digest information**: + + GET /v1.18/images/json?digests=1 HTTP/1.1 + +**Example response, with digest information**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Created": 1420064636, + "Id": "4986bf8c15363d1c5d15512d5266f8777bfba4974ac56e3270e7760f6f0a8125", + "ParentId": "ea13149945cb6b1e746bf28032f02e9b5a793523481a0a18645fc77ad53c4ea2", + "RepoDigests": [ + "localhost:5000/test/busybox@sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf" + ], + "RepoTags": [ + "localhost:5000/test/busybox:latest", + "playdate:latest" + ], + "Size": 0, + "VirtualSize": 2429728 + } + ] + +The response shows a single image `Id` associated with two repositories +(`RepoTags`): `localhost:5000/test/busybox`: and `playdate`. A caller can use +either of the `RepoTags` values `localhost:5000/test/busybox:latest` or +`playdate:latest` to reference the image. + +You can also use `RepoDigests` values to reference an image. In this response, +the array has only one reference and that is to the +`localhost:5000/test/busybox` repository; the `playdate` repository has no +digest. You can reference this digest using the value: +`localhost:5000/test/busybox@sha256:cbbf2f9a99b47fc460d...` + +See the `docker run` and `docker build` commands for examples of digest and tag +references on the command line. + +**Query parameters**: + +- **all** – 1/True/true or 0/False/false, default false +- **filters** – a JSON encoded value of the filters (a map[string][]string) to process on the images list. Available filters: + - `dangling=true` + - `label=key` or `label="key=value"` of an image label +- **filter** - only return images with the specified name + +#### Build image from a Dockerfile + +`POST /build` + +Build an image from a Dockerfile + +**Example request**: + + POST /v1.18/build HTTP/1.1 + Content-Type: application/x-tar + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"stream": "Step 1/5..."} + {"stream": "..."} + {"error": "Error...", "errorDetail": {"code": 123, "message": "Error..."}} + +The input stream must be a `tar` archive compressed with one of the +following algorithms: `identity` (no compression), `gzip`, `bzip2`, `xz`. + +The archive must include a build instructions file, typically called +`Dockerfile` at the archive's root. The `dockerfile` parameter may be +used to specify a different build instructions file. To do this, its value must be +the path to the alternate build instructions file to use. + +The archive may include any number of other files, +which are accessible in the build context (See the [*ADD build +command*](../reference/builder.md#add)). + +The Docker daemon performs a preliminary validation of the `Dockerfile` before +starting the build, and returns an error if the syntax is incorrect. After that, +each instruction is run one-by-one until the ID of the new image is output. + +The build is canceled if the client drops the connection by quitting +or being killed. + +**Query parameters**: + +- **dockerfile** - Path within the build context to the Dockerfile. This is + ignored if `remote` is specified and points to an individual filename. +- **t** – A name and optional tag to apply to the image in the `name:tag` format. + If you omit the `tag` the default `latest` value is assumed. +- **remote** – A Git repository URI or HTTP/HTTPS context URI. If the + URI points to a single text file, the file's contents are placed into + a file called `Dockerfile` and the image is built from that file. +- **q** – Suppress verbose build output. +- **nocache** – Do not use the cache when building the image. +- **pull** - Attempt to pull the image even if an older image exists locally. +- **rm** - Remove intermediate containers after a successful build (default behavior). +- **forcerm** - Always remove intermediate containers (includes `rm`). +- **memory** - Set memory limit for build. +- **memswap** - Total memory (memory + swap), `-1` to enable unlimited swap. +- **cpushares** - CPU shares (relative weight). +- **cpusetcpus** - CPUs in which to allow execution (e.g., `0-3`, `0,1`). + +**Request Headers**: + +- **Content-type** – Set to `"application/x-tar"`. +- **X-Registry-Config** – base64-encoded ConfigFile object + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Create an image + +`POST /images/create` + +Create an image either by pulling it from the registry or by importing it + +**Example request**: + + POST /v1.18/images/create?fromImage=busybox&tag=latest HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status": "Pulling..."} + {"status": "Pulling", "progress": "1 B/ 100 B", "progressDetail": {"current": 1, "total": 100}} + {"error": "Invalid..."} + ... + +When using this endpoint to pull an image from the registry, the +`X-Registry-Auth` header can be used to include +a base64-encoded AuthConfig object. + +**Query parameters**: + +- **fromImage** – Name of the image to pull. +- **fromSrc** – Source to import. The value may be a URL from which the image + can be retrieved or `-` to read the image from the request body. +- **repo** – Repository name. +- **tag** – Tag. If empty when pulling an image, this causes all tags + for the given image to be pulled. + +**Request Headers**: + +- **X-Registry-Auth** – base64-encoded AuthConfig object + +**Status codes**: + +- **200** – no error +- **404** - repository does not exist or no read access +- **500** – server error + + + +#### Inspect an image + +`GET /images/(name)/json` + +Return low-level information on the image `name` + +**Example request**: + + GET /v1.18/images/ubuntu/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Created": "2013-03-23T22:24:18.818426-07:00", + "Container": "3d67245a8d72ecf13f33dffac9f79dcdf70f75acb84d308770391510e0c23ad0", + "ContainerConfig": { + "Hostname": "", + "User": "", + "AttachStdin": false, + "AttachStdout": false, + "AttachStderr": false, + "Tty": true, + "OpenStdin": true, + "StdinOnce": false, + "Env": null, + "Cmd": ["/bin/bash"], + "Dns": null, + "Image": "ubuntu", + "Labels": { + "com.example.vendor": "Acme", + "com.example.license": "GPL", + "com.example.version": "1.0" + }, + "Volumes": null, + "VolumesFrom": "", + "WorkingDir": "" + }, + "Id": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", + "Parent": "27cf784147099545", + "Size": 6824592 + } + +**Status codes**: + +- **200** – no error +- **404** – no such image +- **500** – server error + +#### Get the history of an image + +`GET /images/(name)/history` + +Return the history of the image `name` + +**Example request**: + + GET /v1.18/images/ubuntu/history HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Id": "b750fe79269d", + "Created": 1364102658, + "CreatedBy": "/bin/bash" + }, + { + "Id": "27cf78414709", + "Created": 1364068391, + "CreatedBy": "" + } + ] + +**Status codes**: + +- **200** – no error +- **404** – no such image +- **500** – server error + +#### Push an image on the registry + +`POST /images/(name)/push` + +Push the image `name` on the registry + +**Example request**: + + POST /v1.18/images/test/push HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status": "Pushing..."} + {"status": "Pushing", "progress": "1/? (n/a)", "progressDetail": {"current": 1}}} + {"error": "Invalid..."} + ... + +If you wish to push an image on to a private registry, that image must already have a tag +into a repository which references that registry `hostname` and `port`. This repository name should +then be used in the URL. This duplicates the command line's flow. + +**Example request**: + + POST /v1.18/images/registry.acme.com:5000/test/push HTTP/1.1 + + +**Query parameters**: + +- **tag** – The tag to associate with the image on the registry. This is optional. + +**Request Headers**: + +- **X-Registry-Auth** – base64-encoded AuthConfig object. + +**Status codes**: + +- **200** – no error +- **404** – no such image +- **500** – server error + +#### Tag an image into a repository + +`POST /images/(name)/tag` + +Tag the image `name` into a repository + +**Example request**: + + POST /v1.18/images/test/tag?repo=myrepo&force=0&tag=v42 HTTP/1.1 + +**Example response**: + + HTTP/1.1 201 Created + +**Query parameters**: + +- **repo** – The repository to tag in +- **force** – 1/True/true or 0/False/false, default false +- **tag** - The new tag name + +**Status codes**: + +- **201** – no error +- **400** – bad parameter +- **404** – no such image +- **409** – conflict +- **500** – server error + +#### Remove an image + +`DELETE /images/(name)` + +Remove the image `name` from the filesystem + +**Example request**: + + DELETE /v1.18/images/test HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-type: application/json + + [ + {"Untagged": "3e2f21a89f"}, + {"Deleted": "3e2f21a89f"}, + {"Deleted": "53b4f83ac9"} + ] + +**Query parameters**: + +- **force** – 1/True/true or 0/False/false, default false +- **noprune** – 1/True/true or 0/False/false, default false + +**Status codes**: + +- **200** – no error +- **404** – no such image +- **409** – conflict +- **500** – server error + +#### Search images + +`GET /images/search` + +Search for an image on [Docker Hub](https://hub.docker.com). + +> **Note**: +> The response keys have changed from API v1.6 to reflect the JSON +> sent by the registry server to the docker daemon's request. + +**Example request**: + + GET /v1.18/images/search?term=sshd HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "star_count": 12, + "is_official": false, + "name": "wma55/u1210sshd", + "is_automated": false, + "description": "" + }, + { + "star_count": 10, + "is_official": false, + "name": "jdswinbank/sshd", + "is_automated": false, + "description": "" + }, + { + "star_count": 18, + "is_official": false, + "name": "vgauthier/sshd", + "is_automated": false, + "description": "" + } + ... + ] + +**Query parameters**: + +- **term** – term to search + +**Status codes**: + +- **200** – no error +- **500** – server error + +### 2.3 Misc + +#### Check auth configuration + +`POST /auth` + +Get the default username and email + +**Example request**: + + POST /v1.18/auth HTTP/1.1 + Content-Type: application/json + + { + "username": "hannibal", + "password": "xxxx", + "email": "hannibal@a-team.com", + "serveraddress": "https://index.docker.io/v1/" + } + +**Example response**: + + HTTP/1.1 200 OK + +**Status codes**: + +- **200** – no error +- **204** – no error +- **500** – server error + +#### Display system-wide information + +`GET /info` + +Display system-wide information + +**Example request**: + + GET /v1.18/info HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Containers": 11, + "Debug": 0, + "DockerRootDir": "/var/lib/docker", + "Driver": "btrfs", + "DriverStatus": [[""]], + "ExecutionDriver": "native-0.1", + "HttpProxy": "http://test:test@localhost:8080", + "HttpsProxy": "https://test:test@localhost:8080", + "ID": "7TRN:IPZB:QYBB:VPBQ:UMPP:KARE:6ZNR:XE6T:7EWV:PKF4:ZOJD:TPYS", + "IPv4Forwarding": 1, + "Images": 16, + "IndexServerAddress": "https://index.docker.io/v1/", + "InitPath": "/usr/bin/docker", + "InitSha1": "", + "KernelVersion": "3.12.0-1-amd64", + "Labels": [ + "storage=ssd" + ], + "MemTotal": 2099236864, + "MemoryLimit": 1, + "NCPU": 1, + "NEventsListener": 0, + "NFd": 11, + "NGoroutines": 21, + "Name": "prod-server-42", + "NoProxy": "9.81.1.160", + "OperatingSystem": "Boot2Docker", + "RegistryConfig": { + "IndexConfigs": { + "docker.io": { + "Mirrors": null, + "Name": "docker.io", + "Official": true, + "Secure": true + } + }, + "InsecureRegistryCIDRs": [ + "127.0.0.0/8" + ] + }, + "SwapLimit": 0, + "SystemTime": "2015-03-10T11:11:23.730591467-07:00" + } + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Show the docker version information + +`GET /version` + +Show the docker version information + +**Example request**: + + GET /v1.18/version HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Version": "1.5.0", + "Os": "linux", + "KernelVersion": "3.18.5-tinycore64", + "GoVersion": "go1.4.1", + "GitCommit": "a8a31ef", + "Arch": "amd64", + "ApiVersion": "1.18" + } + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Ping the docker server + +`GET /_ping` + +Ping the docker server + +**Example request**: + + GET /v1.18/_ping HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: text/plain + + OK + +**Status codes**: + +- **200** - no error +- **500** - server error + +#### Create a new image from a container's changes + +`POST /commit` + +Create a new image from a container's changes + +**Example request**: + + POST /v1.18/commit?container=44c004db4b17&comment=message&repo=myrepo HTTP/1.1 + Content-Type: application/json + + { + "Hostname": "", + "Domainname": "", + "User": "", + "AttachStdin": false, + "AttachStdout": true, + "AttachStderr": true, + "PortSpecs": null, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": null, + "Cmd": [ + "date" + ], + "Volumes": { + "/tmp": {} + }, + "WorkingDir": "", + "NetworkDisabled": false, + "ExposedPorts": { + "22/tcp": {} + } + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + {"Id": "596069db4bf5"} + +**JSON parameters**: + +- **config** - the container's configuration + +**Query parameters**: + +- **container** – source container +- **repo** – repository +- **tag** – tag +- **comment** – commit message +- **author** – author (e.g., "John Hannibal Smith + <[hannibal@a-team.com](mailto:hannibal%40a-team.com)>") + +**Status codes**: + +- **201** – no error +- **404** – no such container +- **500** – server error + +#### Monitor Docker's events + +`GET /events` + +Get container events from docker, in real time via streaming. + +Docker containers report the following events: + + create, destroy, die, exec_create, exec_start, export, kill, oom, pause, restart, start, stop, unpause + +Docker images report the following events: + + untag, delete + +**Example request**: + + GET /v1.18/events?since=1374067924 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status": "create", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067924} + {"status": "start", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067924} + {"status": "stop", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067966} + {"status": "destroy", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067970} + +**Query parameters**: + +- **since** – Timestamp. Show all events created since timestamp and then stream +- **until** – Timestamp. Show events created until given timestamp and stop streaming +- **filters** – A json encoded value of the filters (a map[string][]string) to process on the event list. Available filters: + - `container=`; -- container to filter + - `event=`; -- event to filter + - `image=`; -- image to filter + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Get a tarball containing all images in a repository + +`GET /images/(name)/get` + +Get a tarball containing all images and metadata for the repository specified +by `name`. + +If `name` is a specific name and tag (e.g. ubuntu:latest), then only that image +(and its parents) are returned. If `name` is an image ID, similarly only that +image (and its parents) are returned, but with the exclusion of the +'repositories' file in the tarball, as there were no image names referenced. + +See the [image tarball format](#image-tarball-format) for more details. + +**Example request** + + GET /v1.18/images/ubuntu/get + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + + Binary data stream + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Get a tarball containing all images + +`GET /images/get` + +Get a tarball containing all images and metadata for one or more repositories. + +For each value of the `names` parameter: if it is a specific name and tag (e.g. +`ubuntu:latest`), then only that image (and its parents) are returned; if it is +an image ID, similarly only that image (and its parents) are returned and there +would be no names referenced in the 'repositories' file for this image ID. + +See the [image tarball format](#image-tarball-format) for more details. + +**Example request** + + GET /v1.18/images/get?names=myname%2Fmyapp%3Alatest&names=busybox + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + + Binary data stream + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Load a tarball with a set of images and tags into docker + +`POST /images/load` + +Load a set of images and tags into a Docker repository. +See the [image tarball format](#image-tarball-format) for more details. + +**Example request** + + POST /v1.18/images/load + Content-Type: application/x-tar + + Tarball in body + +**Example response**: + + HTTP/1.1 200 OK + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Image tarball format + +An image tarball contains one directory per image layer (named using its long ID), +each containing these files: + +- `VERSION`: currently `1.0` - the file format version +- `json`: detailed layer information, similar to `docker inspect layer_id` +- `layer.tar`: A tarfile containing the filesystem changes in this layer + +The `layer.tar` file contains `aufs` style `.wh..wh.aufs` files and directories +for storing attribute changes and deletions. + +If the tarball defines a repository, the tarball should also include a `repositories` file at +the root that contains a list of repository and tag names mapped to layer IDs. + +``` +{"hello-world": + {"latest": "565a9d68a73f6706862bfe8409a7f659776d4d60a8d096eb4a3cbce6999cc2a1"} +} +``` + +#### Exec Create + +`POST /containers/(id or name)/exec` + +Sets up an exec instance in a running container `id` + +**Example request**: + + POST /v1.18/containers/e90e34656806/exec HTTP/1.1 + Content-Type: application/json + + { + "AttachStdin": true, + "AttachStdout": true, + "AttachStderr": true, + "Cmd": ["sh"], + "Tty": true + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + { + "Id": "f90e34656806", + "Warnings":[] + } + +**JSON parameters**: + +- **AttachStdin** - Boolean value, attaches to `stdin` of the `exec` command. +- **AttachStdout** - Boolean value, attaches to `stdout` of the `exec` command. +- **AttachStderr** - Boolean value, attaches to `stderr` of the `exec` command. +- **Tty** - Boolean value to allocate a pseudo-TTY. +- **Cmd** - Command to run specified as a string or an array of strings. + + +**Status codes**: + +- **201** – no error +- **404** – no such container + +#### Exec Start + +`POST /exec/(id)/start` + +Starts a previously set up `exec` instance `id`. If `detach` is true, this API +returns after starting the `exec` command. Otherwise, this API sets up an +interactive session with the `exec` command. + +**Example request**: + + POST /v1.18/exec/e90e34656806/start HTTP/1.1 + Content-Type: application/json + + { + "Detach": false, + "Tty": false + } + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/vnd.docker.raw-stream + + {% raw %} + {{ STREAM }} + {% endraw %} + +**JSON parameters**: + +- **Detach** - Detach from the `exec` command. +- **Tty** - Boolean value to allocate a pseudo-TTY. + +**Status codes**: + +- **200** – no error +- **404** – no such exec instance + +**Stream details**: + +Similar to the stream behavior of `POST /containers/(id or name)/attach` API + +#### Exec Resize + +`POST /exec/(id)/resize` + +Resizes the `tty` session used by the `exec` command `id`. The unit is number of characters. +This API is valid only if `tty` was specified as part of creating and starting the `exec` command. + +**Example request**: + + POST /v1.18/exec/e90e34656806/resize?h=40&w=80 HTTP/1.1 + Content-Type: text/plain + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: text/plain + +**Query parameters**: + +- **h** – height of `tty` session +- **w** – width + +**Status codes**: + +- **201** – no error +- **404** – no such exec instance + +#### Exec Inspect + +`GET /exec/(id)/json` + +Return low-level information about the `exec` command `id`. + +**Example request**: + + GET /v1.18/exec/11fb006128e8ceb3942e7c58d77750f24210e35f879dd204ac975c184b820b39/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: plain/text + + { + "ID" : "11fb006128e8ceb3942e7c58d77750f24210e35f879dd204ac975c184b820b39", + "Running" : false, + "ExitCode" : 2, + "ProcessConfig" : { + "privileged" : false, + "user" : "", + "tty" : false, + "entrypoint" : "sh", + "arguments" : [ + "-c", + "exit 2" + ] + }, + "OpenStdin" : false, + "OpenStderr" : false, + "OpenStdout" : false, + "Container" : { + "State" : { + "Running" : true, + "Paused" : false, + "Restarting" : false, + "OOMKilled" : false, + "Pid" : 3650, + "ExitCode" : 0, + "Error" : "", + "StartedAt" : "2014-11-17T22:26:03.717657531Z", + "FinishedAt" : "0001-01-01T00:00:00Z" + }, + "ID" : "8f177a186b977fb451136e0fdf182abff5599a08b3c7f6ef0d36a55aaf89634c", + "Created" : "2014-11-17T22:26:03.626304998Z", + "Path" : "date", + "Args" : [], + "Config" : { + "Hostname" : "8f177a186b97", + "Domainname" : "", + "User" : "", + "AttachStdin" : false, + "AttachStdout" : false, + "AttachStderr" : false, + "PortSpecs": null, + "ExposedPorts" : null, + "Tty" : false, + "OpenStdin" : false, + "StdinOnce" : false, + "Env" : [ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" ], + "Cmd" : [ + "date" + ], + "Image" : "ubuntu", + "Volumes" : null, + "WorkingDir" : "", + "Entrypoint" : null, + "NetworkDisabled" : false, + "MacAddress" : "", + "OnBuild" : null, + "SecurityOpt" : null + }, + "Image" : "5506de2b643be1e6febbf3b8a240760c6843244c41e12aa2f60ccbb7153d17f5", + "NetworkSettings" : { + "IPAddress" : "172.17.0.2", + "IPPrefixLen" : 16, + "MacAddress" : "02:42:ac:11:00:02", + "Gateway" : "172.17.42.1", + "Bridge" : "docker0", + "PortMapping" : null, + "Ports" : {} + }, + "ResolvConfPath" : "/var/lib/docker/containers/8f177a186b977fb451136e0fdf182abff5599a08b3c7f6ef0d36a55aaf89634c/resolv.conf", + "HostnamePath" : "/var/lib/docker/containers/8f177a186b977fb451136e0fdf182abff5599a08b3c7f6ef0d36a55aaf89634c/hostname", + "HostsPath" : "/var/lib/docker/containers/8f177a186b977fb451136e0fdf182abff5599a08b3c7f6ef0d36a55aaf89634c/hosts", + "LogPath": "/var/lib/docker/containers/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b-json.log", + "Name" : "/test", + "Driver" : "aufs", + "ExecDriver" : "native-0.2", + "MountLabel" : "", + "ProcessLabel" : "", + "AppArmorProfile" : "", + "RestartCount" : 0, + "Volumes" : {}, + "VolumesRW" : {} + } + } + +**Status codes**: + +- **200** – no error +- **404** – no such exec instance +- **500** - server error + +## 3. Going further + +### 3.1 Inside `docker run` + +As an example, the `docker run` command line makes the following API calls: + +- Create the container + +- If the status code is 404, it means the image doesn't exist: + - Try to pull it. + - Then, retry to create the container. + +- Start the container. + +- If you are not in detached mode: +- Attach to the container, using `logs=1` (to have `stdout` and + `stderr` from the container's start) and `stream=1` + +- If in detached mode or only `stdin` is attached, display the container's id. + +### 3.2 Hijacking + +In this version of the API, `/attach`, uses hijacking to transport `stdin`, +`stdout`, and `stderr` on the same socket. + +To hint potential proxies about connection hijacking, Docker client sends +connection upgrade headers similarly to websocket. + + Upgrade: tcp + Connection: Upgrade + +When Docker daemon detects the `Upgrade` header, it switches its status code +from **200 OK** to **101 UPGRADED** and resends the same headers. + +This might change in the future. + +### 3.3 CORS Requests + +To set cross origin requests to the Engine API please give values to +`--api-cors-header` when running Docker in daemon mode. Set * (asterisk) allows all, +default or blank means CORS disabled + + $ docker -d -H="192.168.1.9:2375" --api-cors-header="http://foo.bar" diff --git a/vendor/github.com/moby/moby/docs/api/v1.19.md b/vendor/github.com/moby/moby/docs/api/v1.19.md new file mode 100644 index 000000000..bd5f09f67 --- /dev/null +++ b/vendor/github.com/moby/moby/docs/api/v1.19.md @@ -0,0 +1,2241 @@ +--- +title: "Engine API v1.19" +description: "API Documentation for Docker" +keywords: "API, Docker, rcli, REST, documentation" +redirect_from: +- /engine/reference/api/docker_remote_api_v1.19/ +- /reference/api/docker_remote_api_v1.19/ +--- + + + +## 1. Brief introduction + + - The daemon listens on `unix:///var/run/docker.sock` but you can + [Bind Docker to another host/port or a Unix socket](../reference/commandline/dockerd.md#bind-docker-to-another-host-port-or-a-unix-socket). + - The API tends to be REST. However, for some complex commands, like `attach` + or `pull`, the HTTP connection is hijacked to transport `stdout`, + `stdin` and `stderr`. + - When the client API version is newer than the daemon's, these calls return an HTTP + `400 Bad Request` error message. + +## 2. Endpoints + +### 2.1 Containers + +#### List containers + +`GET /containers/json` + +List containers + +**Example request**: + + GET /v1.19/containers/json?all=1&before=8dfafdbc3a40&size=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Id": "8dfafdbc3a40", + "Names":["/boring_feynman"], + "Image": "ubuntu:latest", + "Command": "echo 1", + "Created": 1367854155, + "Status": "Exit 0", + "Ports": [{"PrivatePort": 2222, "PublicPort": 3333, "Type": "tcp"}], + "Labels": { + "com.example.vendor": "Acme", + "com.example.license": "GPL", + "com.example.version": "1.0" + }, + "SizeRw": 12288, + "SizeRootFs": 0 + }, + { + "Id": "9cd87474be90", + "Names":["/coolName"], + "Image": "ubuntu:latest", + "Command": "echo 222222", + "Created": 1367854155, + "Status": "Exit 0", + "Ports": [], + "Labels": {}, + "SizeRw": 12288, + "SizeRootFs": 0 + }, + { + "Id": "3176a2479c92", + "Names":["/sleepy_dog"], + "Image": "ubuntu:latest", + "Command": "echo 3333333333333333", + "Created": 1367854154, + "Status": "Exit 0", + "Ports":[], + "Labels": {}, + "SizeRw":12288, + "SizeRootFs":0 + }, + { + "Id": "4cb07b47f9fb", + "Names":["/running_cat"], + "Image": "ubuntu:latest", + "Command": "echo 444444444444444444444444444444444", + "Created": 1367854152, + "Status": "Exit 0", + "Ports": [], + "Labels": {}, + "SizeRw": 12288, + "SizeRootFs": 0 + } + ] + +**Query parameters**: + +- **all** – 1/True/true or 0/False/false, Show all containers. + Only running containers are shown by default (i.e., this defaults to false) +- **limit** – Show `limit` last created + containers, include non-running ones. +- **since** – Show only containers created since Id, include + non-running ones. +- **before** – Show only containers created before Id, include + non-running ones. +- **size** – 1/True/true or 0/False/false, Show the containers + sizes +- **filters** - a JSON encoded value of the filters (a `map[string][]string`) to process on the containers list. Available filters: + - `exited=`; -- containers with exit code of `` ; + - `status=`(`restarting`|`running`|`paused`|`exited`) + - `label=key` or `label="key=value"` of a container label + +**Status codes**: + +- **200** – no error +- **400** – bad parameter +- **500** – server error + +#### Create a container + +`POST /containers/create` + +Create a container + +**Example request**: + + POST /v1.19/containers/create HTTP/1.1 + Content-Type: application/json + + { + "Hostname": "", + "Domainname": "", + "User": "", + "AttachStdin": false, + "AttachStdout": true, + "AttachStderr": true, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": [ + "FOO=bar", + "BAZ=quux" + ], + "Cmd": [ + "date" + ], + "Entrypoint": null, + "Image": "ubuntu", + "Labels": { + "com.example.vendor": "Acme", + "com.example.license": "GPL", + "com.example.version": "1.0" + }, + "Volumes": { + "/volumes/data": {} + }, + "WorkingDir": "", + "NetworkDisabled": false, + "MacAddress": "12:34:56:78:9a:bc", + "ExposedPorts": { + "22/tcp": {} + }, + "HostConfig": { + "Binds": ["/tmp:/tmp"], + "Links": ["redis3:redis"], + "LxcConf": {"lxc.utsname":"docker"}, + "Memory": 0, + "MemorySwap": 0, + "CpuShares": 512, + "CpuPeriod": 100000, + "CpuQuota": 50000, + "CpusetCpus": "0,1", + "CpusetMems": "0,1", + "BlkioWeight": 300, + "OomKillDisable": false, + "PidMode": "", + "PortBindings": { "22/tcp": [{ "HostPort": "11022" }] }, + "PublishAllPorts": false, + "Privileged": false, + "ReadonlyRootfs": false, + "Dns": ["8.8.8.8"], + "DnsSearch": [""], + "ExtraHosts": null, + "VolumesFrom": ["parent", "other:ro"], + "CapAdd": ["NET_ADMIN"], + "CapDrop": ["MKNOD"], + "RestartPolicy": { "Name": "", "MaximumRetryCount": 0 }, + "NetworkMode": "bridge", + "Devices": [], + "Ulimits": [{}], + "LogConfig": { "Type": "json-file", "Config": {} }, + "SecurityOpt": [], + "CgroupParent": "" + } + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + { + "Id":"e90e34656806", + "Warnings":[] + } + +**JSON parameters**: + +- **Hostname** - A string value containing the hostname to use for the + container. +- **Domainname** - A string value containing the domain name to use + for the container. +- **User** - A string value specifying the user inside the container. +- **AttachStdin** - Boolean value, attaches to `stdin`. +- **AttachStdout** - Boolean value, attaches to `stdout`. +- **AttachStderr** - Boolean value, attaches to `stderr`. +- **Tty** - Boolean value, Attach standard streams to a `tty`, including `stdin` if it is not closed. +- **OpenStdin** - Boolean value, opens `stdin`, +- **StdinOnce** - Boolean value, close `stdin` after the 1 attached client disconnects. +- **Env** - A list of environment variables in the form of `["VAR=value", ...]` +- **Labels** - Adds a map of labels to a container. To specify a map: `{"key":"value", ... }` +- **Cmd** - Command to run specified as a string or an array of strings. +- **Entrypoint** - Set the entry point for the container as a string or an array + of strings. +- **Image** - A string specifying the image name to use for the container. +- **Volumes** - An object mapping mount point paths (strings) inside the + container to empty objects. +- **WorkingDir** - A string specifying the working directory for commands to + run in. +- **NetworkDisabled** - Boolean value, when true disables networking for the + container +- **ExposedPorts** - An object mapping ports to an empty object in the form of: + `"ExposedPorts": { "/: {}" }` +- **HostConfig** + - **Binds** – A list of bind-mounts for this container. Each item is a string in one of these forms: + + `host-src:container-dest` to bind-mount a host path into the + container. Both `host-src`, and `container-dest` must be an + _absolute_ path. + + `host-src:container-dest:ro` to make the bind-mount read-only + inside the container. Both `host-src`, and `container-dest` must be + an _absolute_ path. + - **Links** - A list of links for the container. Each link entry should be + in the form of `container_name:alias`. + - **LxcConf** - LXC specific configurations. These configurations only + work when using the `lxc` execution driver. + - **Memory** - Memory limit in bytes. + - **MemorySwap** - Total memory limit (memory + swap); set `-1` to enable unlimited swap. + You must use this with `memory` and make the swap value larger than `memory`. + - **CpuShares** - An integer value containing the container's CPU Shares + (ie. the relative weight vs other containers). + - **CpuPeriod** - The length of a CPU period in microseconds. + - **CpuQuota** - Microseconds of CPU time that the container can get in a CPU period. + - **CpusetCpus** - String value containing the `cgroups CpusetCpus` to use. + - **CpusetMems** - Memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only effective on NUMA systems. + - **BlkioWeight** - Block IO weight (relative weight) accepts a weight value between 10 and 1000. + - **OomKillDisable** - Boolean value, whether to disable OOM Killer for the container or not. + - **PidMode** - Set the PID (Process) Namespace mode for the container; + `"container:"`: joins another container's PID namespace + `"host"`: use the host's PID namespace inside the container + - **PortBindings** - A map of exposed container ports and the host port they + should map to. A JSON object in the form + `{ /: [{ "HostPort": "" }] }` + Take note that `port` is specified as a string and not an integer value. + - **PublishAllPorts** - Allocates a random host port for all of a container's + exposed ports. Specified as a boolean value. + - **Privileged** - Gives the container full access to the host. Specified as + a boolean value. + - **ReadonlyRootfs** - Mount the container's root filesystem as read only. + Specified as a boolean value. + - **Dns** - A list of DNS servers for the container to use. + - **DnsSearch** - A list of DNS search domains + - **ExtraHosts** - A list of hostnames/IP mappings to add to the + container's `/etc/hosts` file. Specified in the form `["hostname:IP"]`. + - **VolumesFrom** - A list of volumes to inherit from another container. + Specified in the form `[:]` + - **CapAdd** - A list of kernel capabilities to add to the container. + - **Capdrop** - A list of kernel capabilities to drop from the container. + - **RestartPolicy** – The behavior to apply when the container exits. The + value is an object with a `Name` property of either `"always"` to + always restart or `"on-failure"` to restart only when the container + exit code is non-zero. If `on-failure` is used, `MaximumRetryCount` + controls the number of times to retry before giving up. + The default is not to restart. (optional) + An ever increasing delay (double the previous delay, starting at 100mS) + is added before each restart to prevent flooding the server. + - **NetworkMode** - Sets the networking mode for the container. Supported + values are: `bridge`, `host`, `none`, and `container:` + - **Devices** - A list of devices to add to the container specified as a JSON object in the + form + `{ "PathOnHost": "/dev/deviceName", "PathInContainer": "/dev/deviceName", "CgroupPermissions": "mrw"}` + - **Ulimits** - A list of ulimits to set in the container, specified as + `{ "Name": , "Soft": , "Hard": }`, for example: + `Ulimits: { "Name": "nofile", "Soft": 1024, "Hard": 2048 }` + - **SecurityOpt**: A list of string values to customize labels for MLS + systems, such as SELinux. + - **LogConfig** - Log configuration for the container, specified as a JSON object in the form + `{ "Type": "", "Config": {"key1": "val1"}}`. + Available types: `json-file`, `syslog`, `journald`, `none`. + `syslog` available options are: `address`. + - **CgroupParent** - Path to `cgroups` under which the container's `cgroup` is created. If the path is not absolute, the path is considered to be relative to the `cgroups` path of the init process. Cgroups are created if they do not already exist. + +**Query parameters**: + +- **name** – Assign the specified name to the container. Must + match `/?[a-zA-Z0-9_-]+`. + +**Status codes**: + +- **201** – no error +- **400** – bad parameter +- **404** – no such container +- **406** – impossible to attach (container not running) +- **409** – conflict +- **500** – server error + +#### Inspect a container + +`GET /containers/(id or name)/json` + +Return low-level information on the container `id` + +**Example request**: + + GET /v1.19/containers/4fa6e0f0c678/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "AppArmorProfile": "", + "Args": [ + "-c", + "exit 9" + ], + "Config": { + "AttachStderr": true, + "AttachStdin": false, + "AttachStdout": true, + "Cmd": [ + "/bin/sh", + "-c", + "exit 9" + ], + "Domainname": "", + "Entrypoint": null, + "Env": [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + ], + "ExposedPorts": null, + "Hostname": "ba033ac44011", + "Image": "ubuntu", + "Labels": { + "com.example.vendor": "Acme", + "com.example.license": "GPL", + "com.example.version": "1.0" + }, + "MacAddress": "", + "NetworkDisabled": false, + "OnBuild": null, + "OpenStdin": false, + "PortSpecs": null, + "StdinOnce": false, + "Tty": false, + "User": "", + "Volumes": null, + "WorkingDir": "" + }, + "Created": "2015-01-06T15:47:31.485331387Z", + "Driver": "devicemapper", + "ExecDriver": "native-0.2", + "ExecIDs": null, + "HostConfig": { + "Binds": null, + "BlkioWeight": 0, + "CapAdd": null, + "CapDrop": null, + "ContainerIDFile": "", + "CpusetCpus": "", + "CpusetMems": "", + "CpuShares": 0, + "CpuPeriod": 100000, + "Devices": [], + "Dns": null, + "DnsSearch": null, + "ExtraHosts": null, + "IpcMode": "", + "Links": null, + "LxcConf": [], + "Memory": 0, + "MemorySwap": 0, + "OomKillDisable": false, + "NetworkMode": "bridge", + "PidMode": "", + "PortBindings": {}, + "Privileged": false, + "ReadonlyRootfs": false, + "PublishAllPorts": false, + "RestartPolicy": { + "MaximumRetryCount": 2, + "Name": "on-failure" + }, + "LogConfig": { + "Config": null, + "Type": "json-file" + }, + "SecurityOpt": null, + "VolumesFrom": null, + "Ulimits": [{}] + }, + "HostnamePath": "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hostname", + "HostsPath": "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hosts", + "LogPath": "/var/lib/docker/containers/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b-json.log", + "Id": "ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39", + "Image": "04c5d3b7b0656168630d3ba35d8889bd0e9caafcaeb3004d2bfbc47e7c5d35d2", + "MountLabel": "", + "Name": "/boring_euclid", + "NetworkSettings": { + "Bridge": "", + "Gateway": "", + "IPAddress": "", + "IPPrefixLen": 0, + "MacAddress": "", + "PortMapping": null, + "Ports": null + }, + "Path": "/bin/sh", + "ProcessLabel": "", + "ResolvConfPath": "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/resolv.conf", + "RestartCount": 1, + "State": { + "Error": "", + "ExitCode": 9, + "FinishedAt": "2015-01-06T15:47:32.080254511Z", + "OOMKilled": false, + "Paused": false, + "Pid": 0, + "Restarting": false, + "Running": true, + "StartedAt": "2015-01-06T15:47:32.072697474Z" + }, + "Volumes": {}, + "VolumesRW": {} + } + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### List processes running inside a container + +`GET /containers/(id or name)/top` + +List processes running inside the container `id`. On Unix systems this +is done by running the `ps` command. This endpoint is not +supported on Windows. + +**Example request**: + + GET /v1.19/containers/4fa6e0f0c678/top HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Titles" : [ + "UID", "PID", "PPID", "C", "STIME", "TTY", "TIME", "CMD" + ], + "Processes" : [ + [ + "root", "13642", "882", "0", "17:03", "pts/0", "00:00:00", "/bin/bash" + ], + [ + "root", "13735", "13642", "0", "17:06", "pts/0", "00:00:00", "sleep 10" + ] + ] + } + +**Example request**: + + GET /v1.19/containers/4fa6e0f0c678/top?ps_args=aux HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Titles" : [ + "USER","PID","%CPU","%MEM","VSZ","RSS","TTY","STAT","START","TIME","COMMAND" + ] + "Processes" : [ + [ + "root","13642","0.0","0.1","18172","3184","pts/0","Ss","17:03","0:00","/bin/bash" + ], + [ + "root","13895","0.0","0.0","4348","692","pts/0","S+","17:15","0:00","sleep 10" + ] + ], + } + +**Query parameters**: + +- **ps_args** – `ps` arguments to use (e.g., `aux`), defaults to `-ef` + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Get container logs + +`GET /containers/(id or name)/logs` + +Get `stdout` and `stderr` logs from the container ``id`` + +> **Note**: +> This endpoint works only for containers with the `json-file` or `journald` logging drivers. + +**Example request**: + + GET /v1.19/containers/4fa6e0f0c678/logs?stderr=1&stdout=1×tamps=1&follow=1&tail=10&since=1428990821 HTTP/1.1 + +**Example response**: + + HTTP/1.1 101 UPGRADED + Content-Type: application/vnd.docker.raw-stream + Connection: Upgrade + Upgrade: tcp + + {% raw %} + {{ STREAM }} + {% endraw %} + +**Query parameters**: + +- **follow** – 1/True/true or 0/False/false, return stream. Default `false`. +- **stdout** – 1/True/true or 0/False/false, show `stdout` log. Default `false`. +- **stderr** – 1/True/true or 0/False/false, show `stderr` log. Default `false`. +- **since** – UNIX timestamp (integer) to filter logs. Specifying a timestamp + will only output log-entries since that timestamp. Default: 0 (unfiltered) +- **timestamps** – 1/True/true or 0/False/false, print timestamps for + every log line. Default `false`. +- **tail** – Output specified number of lines at the end of logs: `all` or ``. Default all. + +**Status codes**: + +- **101** – no error, hints proxy about hijacking +- **200** – no error, no upgrade header found +- **404** – no such container +- **500** – server error + +#### Inspect changes on a container's filesystem + +`GET /containers/(id or name)/changes` + +Inspect changes on container `id`'s filesystem + +**Example request**: + + GET /v1.19/containers/4fa6e0f0c678/changes HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Path": "/dev", + "Kind": 0 + }, + { + "Path": "/dev/kmsg", + "Kind": 1 + }, + { + "Path": "/test", + "Kind": 1 + } + ] + +Values for `Kind`: + +- `0`: Modify +- `1`: Add +- `2`: Delete + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Export a container + +`GET /containers/(id or name)/export` + +Export the contents of container `id` + +**Example request**: + + GET /v1.19/containers/4fa6e0f0c678/export HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/octet-stream + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Get container stats based on resource usage + +`GET /containers/(id or name)/stats` + +This endpoint returns a live stream of a container's resource usage statistics. + +**Example request**: + + GET /v1.19/containers/redis1/stats HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "read" : "2015-01-08T22:57:31.547920715Z", + "network" : { + "rx_dropped" : 0, + "rx_bytes" : 648, + "rx_errors" : 0, + "tx_packets" : 8, + "tx_dropped" : 0, + "rx_packets" : 8, + "tx_errors" : 0, + "tx_bytes" : 648 + }, + "memory_stats" : { + "stats" : { + "total_pgmajfault" : 0, + "cache" : 0, + "mapped_file" : 0, + "total_inactive_file" : 0, + "pgpgout" : 414, + "rss" : 6537216, + "total_mapped_file" : 0, + "writeback" : 0, + "unevictable" : 0, + "pgpgin" : 477, + "total_unevictable" : 0, + "pgmajfault" : 0, + "total_rss" : 6537216, + "total_rss_huge" : 6291456, + "total_writeback" : 0, + "total_inactive_anon" : 0, + "rss_huge" : 6291456, + "hierarchical_memory_limit" : 67108864, + "total_pgfault" : 964, + "total_active_file" : 0, + "active_anon" : 6537216, + "total_active_anon" : 6537216, + "total_pgpgout" : 414, + "total_cache" : 0, + "inactive_anon" : 0, + "active_file" : 0, + "pgfault" : 964, + "inactive_file" : 0, + "total_pgpgin" : 477 + }, + "max_usage" : 6651904, + "usage" : 6537216, + "failcnt" : 0, + "limit" : 67108864 + }, + "blkio_stats" : {}, + "cpu_stats" : { + "cpu_usage" : { + "percpu_usage" : [ + 8646879, + 24472255, + 36438778, + 30657443 + ], + "usage_in_usermode" : 50000000, + "total_usage" : 100215355, + "usage_in_kernelmode" : 30000000 + }, + "system_cpu_usage" : 739306590000000, + "throttling_data" : {"periods":0,"throttled_periods":0,"throttled_time":0} + }, + "precpu_stats" : { + "cpu_usage" : { + "percpu_usage" : [ + 8646879, + 24350896, + 36438778, + 30657443 + ], + "usage_in_usermode" : 50000000, + "total_usage" : 100093996, + "usage_in_kernelmode" : 30000000 + }, + "system_cpu_usage" : 9492140000000, + "throttling_data" : {"periods":0,"throttled_periods":0,"throttled_time":0} + } + } + +The `precpu_stats` is the cpu statistic of last read, which is used for calculating the cpu usage percent. It is not the exact copy of the `cpu_stats` field. + +**Query parameters**: + +- **stream** – 1/True/true or 0/False/false, pull stats once then disconnect. Default `true`. + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Resize a container TTY + +`POST /containers/(id or name)/resize?h=&w=` + +Resize the TTY for container with `id`. You must restart the container for the resize to take effect. + +**Example request**: + + POST /v1.19/containers/4fa6e0f0c678/resize?h=40&w=80 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Length: 0 + Content-Type: text/plain; charset=utf-8 + +**Query parameters**: + +- **h** – height of `tty` session +- **w** – width + +**Status codes**: + +- **200** – no error +- **404** – No such container +- **500** – Cannot resize container + +#### Start a container + +`POST /containers/(id or name)/start` + +Start the container `id` + +> **Note**: +> For backwards compatibility, this endpoint accepts a `HostConfig` as JSON-encoded request body. +> See [create a container](#create-a-container) for details. + +**Example request**: + + POST /v1.19/containers/e90e34656806/start HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Status codes**: + +- **204** – no error +- **304** – container already started +- **404** – no such container +- **500** – server error + +#### Stop a container + +`POST /containers/(id or name)/stop` + +Stop the container `id` + +**Example request**: + + POST /v1.19/containers/e90e34656806/stop?t=5 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **t** – number of seconds to wait before killing the container + +**Status codes**: + +- **204** – no error +- **304** – container already stopped +- **404** – no such container +- **500** – server error + +#### Restart a container + +`POST /containers/(id or name)/restart` + +Restart the container `id` + +**Example request**: + + POST /v1.19/containers/e90e34656806/restart?t=5 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **t** – number of seconds to wait before killing the container + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **500** – server error + +#### Kill a container + +`POST /containers/(id or name)/kill` + +Kill the container `id` + +**Example request**: + + POST /v1.19/containers/e90e34656806/kill HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **signal** - Signal to send to the container: integer or string like `SIGINT`. + When not set, `SIGKILL` is assumed and the call waits for the container to exit. + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **500** – server error + +#### Rename a container + +`POST /containers/(id or name)/rename` + +Rename the container `id` to a `new_name` + +**Example request**: + + POST /v1.19/containers/e90e34656806/rename?name=new_name HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **name** – new name for the container + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **409** - conflict name already assigned +- **500** – server error + +#### Pause a container + +`POST /containers/(id or name)/pause` + +Pause the container `id` + +**Example request**: + + POST /v1.19/containers/e90e34656806/pause HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **500** – server error + +#### Unpause a container + +`POST /containers/(id or name)/unpause` + +Unpause the container `id` + +**Example request**: + + POST /v1.19/containers/e90e34656806/unpause HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **500** – server error + +#### Attach to a container + +`POST /containers/(id or name)/attach` + +Attach to the container `id` + +**Example request**: + + POST /v1.19/containers/16253994b7c4/attach?logs=1&stream=0&stdout=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 101 UPGRADED + Content-Type: application/vnd.docker.raw-stream + Connection: Upgrade + Upgrade: tcp + + {% raw %} + {{ STREAM }} + {% endraw %} + +**Query parameters**: + +- **logs** – 1/True/true or 0/False/false, return logs. Default `false`. +- **stream** – 1/True/true or 0/False/false, return stream. + Default `false`. +- **stdin** – 1/True/true or 0/False/false, if `stream=true`, attach + to `stdin`. Default `false`. +- **stdout** – 1/True/true or 0/False/false, if `logs=true`, return + `stdout` log, if `stream=true`, attach to `stdout`. Default `false`. +- **stderr** – 1/True/true or 0/False/false, if `logs=true`, return + `stderr` log, if `stream=true`, attach to `stderr`. Default `false`. + +**Status codes**: + +- **101** – no error, hints proxy about hijacking +- **200** – no error, no upgrade header found +- **400** – bad parameter +- **404** – no such container +- **500** – server error + +**Stream details**: + +When using the TTY setting is enabled in +[`POST /containers/create` +](#create-a-container), +the stream is the raw data from the process PTY and client's `stdin`. +When the TTY is disabled, then the stream is multiplexed to separate +`stdout` and `stderr`. + +The format is a **Header** and a **Payload** (frame). + +**HEADER** + +The header contains the information which the stream writes (`stdout` or +`stderr`). It also contains the size of the associated frame encoded in the +last four bytes (`uint32`). + +It is encoded on the first eight bytes like this: + + header := [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4} + +`STREAM_TYPE` can be: + +- 0: `stdin` (is written on `stdout`) +- 1: `stdout` +- 2: `stderr` + +`SIZE1, SIZE2, SIZE3, SIZE4` are the four bytes of +the `uint32` size encoded as big endian. + +**PAYLOAD** + +The payload is the raw stream. + +**IMPLEMENTATION** + +The simplest way to implement the Attach protocol is the following: + + 1. Read eight bytes. + 2. Choose `stdout` or `stderr` depending on the first byte. + 3. Extract the frame size from the last four bytes. + 4. Read the extracted size and output it on the correct output. + 5. Goto 1. + +#### Attach to a container (websocket) + +`GET /containers/(id or name)/attach/ws` + +Attach to the container `id` via websocket + +Implements websocket protocol handshake according to [RFC 6455](http://tools.ietf.org/html/rfc6455) + +**Example request** + + GET /v1.19/containers/e90e34656806/attach/ws?logs=0&stream=1&stdin=1&stdout=1&stderr=1 HTTP/1.1 + +**Example response** + + {% raw %} + {{ STREAM }} + {% endraw %} + +**Query parameters**: + +- **logs** – 1/True/true or 0/False/false, return logs. Default `false`. +- **stream** – 1/True/true or 0/False/false, return stream. + Default `false`. +- **stdin** – 1/True/true or 0/False/false, if `stream=true`, attach + to `stdin`. Default `false`. +- **stdout** – 1/True/true or 0/False/false, if `logs=true`, return + `stdout` log, if `stream=true`, attach to `stdout`. Default `false`. +- **stderr** – 1/True/true or 0/False/false, if `logs=true`, return + `stderr` log, if `stream=true`, attach to `stderr`. Default `false`. + +**Status codes**: + +- **200** – no error +- **400** – bad parameter +- **404** – no such container +- **500** – server error + +#### Wait a container + +`POST /containers/(id or name)/wait` + +Block until container `id` stops, then returns the exit code + +**Example request**: + + POST /v1.19/containers/16253994b7c4/wait HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"StatusCode": 0} + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Remove a container + +`DELETE /containers/(id or name)` + +Remove the container `id` from the filesystem + +**Example request**: + + DELETE /v1.19/containers/16253994b7c4?v=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **v** – 1/True/true or 0/False/false, Remove the volumes + associated to the container. Default `false`. +- **force** - 1/True/true or 0/False/false, Kill then remove the container. + Default `false`. +- **link** - 1/True/true or 0/False/false, Remove the specified + link associated to the container. Default `false`. + +**Status codes**: + +- **204** – no error +- **400** – bad parameter +- **404** – no such container +- **409** – conflict +- **500** – server error + +#### Copy files or folders from a container + +`POST /containers/(id or name)/copy` + +Copy files or folders of container `id` + +**Example request**: + + POST /v1.19/containers/4fa6e0f0c678/copy HTTP/1.1 + Content-Type: application/json + + { + "Resource": "test.txt" + } + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +### 2.2 Images + +#### List Images + +`GET /images/json` + +**Example request**: + + GET /v1.19/images/json?all=0 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "RepoTags": [ + "ubuntu:12.04", + "ubuntu:precise", + "ubuntu:latest" + ], + "Id": "8dbd9e392a964056420e5d58ca5cc376ef18e2de93b5cc90e868a1bbc8318c1c", + "Created": 1365714795, + "Size": 131506275, + "VirtualSize": 131506275, + "Labels": {} + }, + { + "RepoTags": [ + "ubuntu:12.10", + "ubuntu:quantal" + ], + "ParentId": "27cf784147099545", + "Id": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", + "Created": 1364102658, + "Size": 24653, + "VirtualSize": 180116135, + "Labels": { + "com.example.version": "v1" + } + } + ] + +**Example request, with digest information**: + + GET /v1.19/images/json?digests=1 HTTP/1.1 + +**Example response, with digest information**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Created": 1420064636, + "Id": "4986bf8c15363d1c5d15512d5266f8777bfba4974ac56e3270e7760f6f0a8125", + "ParentId": "ea13149945cb6b1e746bf28032f02e9b5a793523481a0a18645fc77ad53c4ea2", + "RepoDigests": [ + "localhost:5000/test/busybox@sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf" + ], + "RepoTags": [ + "localhost:5000/test/busybox:latest", + "playdate:latest" + ], + "Size": 0, + "VirtualSize": 2429728, + "Labels": {} + } + ] + +The response shows a single image `Id` associated with two repositories +(`RepoTags`): `localhost:5000/test/busybox`: and `playdate`. A caller can use +either of the `RepoTags` values `localhost:5000/test/busybox:latest` or +`playdate:latest` to reference the image. + +You can also use `RepoDigests` values to reference an image. In this response, +the array has only one reference and that is to the +`localhost:5000/test/busybox` repository; the `playdate` repository has no +digest. You can reference this digest using the value: +`localhost:5000/test/busybox@sha256:cbbf2f9a99b47fc460d...` + +See the `docker run` and `docker build` commands for examples of digest and tag +references on the command line. + +**Query parameters**: + +- **all** – 1/True/true or 0/False/false, default false +- **filters** – a JSON encoded value of the filters (a map[string][]string) to process on the images list. Available filters: + - `dangling=true` + - `label=key` or `label="key=value"` of an image label +- **filter** - only return images with the specified name + +#### Build image from a Dockerfile + +`POST /build` + +Build an image from a Dockerfile + +**Example request**: + + POST /v1.19/build HTTP/1.1 + Content-Type: application/x-tar + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"stream": "Step 1/5..."} + {"stream": "..."} + {"error": "Error...", "errorDetail": {"code": 123, "message": "Error..."}} + +The input stream must be a `tar` archive compressed with one of the +following algorithms: `identity` (no compression), `gzip`, `bzip2`, `xz`. + +The archive must include a build instructions file, typically called +`Dockerfile` at the archive's root. The `dockerfile` parameter may be +used to specify a different build instructions file. To do this, its value must be +the path to the alternate build instructions file to use. + +The archive may include any number of other files, +which are accessible in the build context (See the [*ADD build +command*](../reference/builder.md#add)). + +The Docker daemon performs a preliminary validation of the `Dockerfile` before +starting the build, and returns an error if the syntax is incorrect. After that, +each instruction is run one-by-one until the ID of the new image is output. + +The build is canceled if the client drops the connection by quitting +or being killed. + +**Query parameters**: + +- **dockerfile** - Path within the build context to the Dockerfile. This is + ignored if `remote` is specified and points to an individual filename. +- **t** – A name and optional tag to apply to the image in the `name:tag` format. + If you omit the `tag` the default `latest` value is assumed. +- **remote** – A Git repository URI or HTTP/HTTPS URI build source. If the + URI specifies a filename, the file's contents are placed into a file + called `Dockerfile`. +- **q** – Suppress verbose build output. +- **nocache** – Do not use the cache when building the image. +- **pull** - Attempt to pull the image even if an older image exists locally. +- **rm** - Remove intermediate containers after a successful build (default behavior). +- **forcerm** - Always remove intermediate containers (includes `rm`). +- **memory** - Set memory limit for build. +- **memswap** - Total memory (memory + swap), `-1` to enable unlimited swap. +- **cpushares** - CPU shares (relative weight). +- **cpusetcpus** - CPUs in which to allow execution (e.g., `0-3`, `0,1`). +- **cpuperiod** - The length of a CPU period in microseconds. +- **cpuquota** - Microseconds of CPU time that the container can get in a CPU period. + +**Request Headers**: + +- **Content-type** – Set to `"application/x-tar"`. +- **X-Registry-Config** – base64-encoded ConfigFile object + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Create an image + +`POST /images/create` + +Create an image either by pulling it from the registry or by importing it + +**Example request**: + + POST /v1.19/images/create?fromImage=busybox&tag=latest HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status": "Pulling..."} + {"status": "Pulling", "progress": "1 B/ 100 B", "progressDetail": {"current": 1, "total": 100}} + {"error": "Invalid..."} + ... + +When using this endpoint to pull an image from the registry, the +`X-Registry-Auth` header can be used to include +a base64-encoded AuthConfig object. + +**Query parameters**: + +- **fromImage** – Name of the image to pull. +- **fromSrc** – Source to import. The value may be a URL from which the image + can be retrieved or `-` to read the image from the request body. +- **repo** – Repository name. +- **tag** – Tag. If empty when pulling an image, this causes all tags + for the given image to be pulled. + +**Request Headers**: + +- **X-Registry-Auth** – base64-encoded AuthConfig object + +**Status codes**: + +- **200** – no error +- **404** - repository does not exist or no read access +- **500** – server error + + + +#### Inspect an image + +`GET /images/(name)/json` + +Return low-level information on the image `name` + +**Example request**: + + GET /v1.19/images/ubuntu/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Created": "2013-03-23T22:24:18.818426-07:00", + "Container": "3d67245a8d72ecf13f33dffac9f79dcdf70f75acb84d308770391510e0c23ad0", + "ContainerConfig": { + "Hostname": "", + "User": "", + "AttachStdin": false, + "AttachStdout": false, + "AttachStderr": false, + "Tty": true, + "OpenStdin": true, + "StdinOnce": false, + "Env": null, + "Cmd": ["/bin/bash"], + "Dns": null, + "Image": "ubuntu", + "Labels": { + "com.example.vendor": "Acme", + "com.example.license": "GPL", + "com.example.version": "1.0" + }, + "Volumes": null, + "VolumesFrom": "", + "WorkingDir": "" + }, + "Id": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", + "Parent": "27cf784147099545", + "Size": 6824592 + } + +**Status codes**: + +- **200** – no error +- **404** – no such image +- **500** – server error + +#### Get the history of an image + +`GET /images/(name)/history` + +Return the history of the image `name` + +**Example request**: + + GET /v1.19/images/ubuntu/history HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Id": "3db9c44f45209632d6050b35958829c3a2aa256d81b9a7be45b362ff85c54710", + "Created": 1398108230, + "CreatedBy": "/bin/sh -c #(nop) ADD file:eb15dbd63394e063b805a3c32ca7bf0266ef64676d5a6fab4801f2e81e2a5148 in /", + "Tags": [ + "ubuntu:lucid", + "ubuntu:10.04" + ], + "Size": 182964289, + "Comment": "" + }, + { + "Id": "6cfa4d1f33fb861d4d114f43b25abd0ac737509268065cdfd69d544a59c85ab8", + "Created": 1398108222, + "CreatedBy": "/bin/sh -c #(nop) MAINTAINER Tianon Gravi - mkimage-debootstrap.sh -i iproute,iputils-ping,ubuntu-minimal -t lucid.tar.xz lucid http://archive.ubuntu.com/ubuntu/", + "Tags": null, + "Size": 0, + "Comment": "" + }, + { + "Id": "511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158", + "Created": 1371157430, + "CreatedBy": "", + "Tags": [ + "scratch12:latest", + "scratch:latest" + ], + "Size": 0, + "Comment": "Imported from -" + } + ] + +**Status codes**: + +- **200** – no error +- **404** – no such image +- **500** – server error + +#### Push an image on the registry + +`POST /images/(name)/push` + +Push the image `name` on the registry + +**Example request**: + + POST /v1.19/images/test/push HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status": "Pushing..."} + {"status": "Pushing", "progress": "1/? (n/a)", "progressDetail": {"current": 1}}} + {"error": "Invalid..."} + ... + +If you wish to push an image on to a private registry, that image must already have a tag +into a repository which references that registry `hostname` and `port`. This repository name should +then be used in the URL. This duplicates the command line's flow. + +**Example request**: + + POST /v1.19/images/registry.acme.com:5000/test/push HTTP/1.1 + + +**Query parameters**: + +- **tag** – The tag to associate with the image on the registry. This is optional. + +**Request Headers**: + +- **X-Registry-Auth** – base64-encoded AuthConfig object. + +**Status codes**: + +- **200** – no error +- **404** – no such image +- **500** – server error + +#### Tag an image into a repository + +`POST /images/(name)/tag` + +Tag the image `name` into a repository + +**Example request**: + + POST /v1.19/images/test/tag?repo=myrepo&force=0&tag=v42 HTTP/1.1 + +**Example response**: + + HTTP/1.1 201 Created + +**Query parameters**: + +- **repo** – The repository to tag in +- **force** – 1/True/true or 0/False/false, default false +- **tag** - The new tag name + +**Status codes**: + +- **201** – no error +- **400** – bad parameter +- **404** – no such image +- **409** – conflict +- **500** – server error + +#### Remove an image + +`DELETE /images/(name)` + +Remove the image `name` from the filesystem + +**Example request**: + + DELETE /v1.19/images/test HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-type: application/json + + [ + {"Untagged": "3e2f21a89f"}, + {"Deleted": "3e2f21a89f"}, + {"Deleted": "53b4f83ac9"} + ] + +**Query parameters**: + +- **force** – 1/True/true or 0/False/false, default false +- **noprune** – 1/True/true or 0/False/false, default false + +**Status codes**: + +- **200** – no error +- **404** – no such image +- **409** – conflict +- **500** – server error + +#### Search images + +`GET /images/search` + +Search for an image on [Docker Hub](https://hub.docker.com). This API +returns both `is_trusted` and `is_automated` images. Currently, they +are considered identical. In the future, the `is_trusted` property will +be deprecated and replaced by the `is_automated` property. + +> **Note**: +> The response keys have changed from API v1.6 to reflect the JSON +> sent by the registry server to the docker daemon's request. + +**Example request**: + + GET /v1.19/images/search?term=sshd HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "star_count": 12, + "is_official": false, + "name": "wma55/u1210sshd", + "is_trusted": false, + "is_automated": false, + "description": "" + }, + { + "star_count": 10, + "is_official": false, + "name": "jdswinbank/sshd", + "is_trusted": false, + "is_automated": false, + "description": "" + }, + { + "star_count": 18, + "is_official": false, + "name": "vgauthier/sshd", + "is_trusted": false, + "is_automated": false, + "description": "" + } + ... + ] + +**Query parameters**: + +- **term** – term to search + +**Status codes**: + +- **200** – no error +- **500** – server error + +### 2.3 Misc + +#### Check auth configuration + +`POST /auth` + +Get the default username and email + +**Example request**: + + POST /v1.19/auth HTTP/1.1 + Content-Type: application/json + + { + "username": "hannibal", + "password": "xxxx", + "email": "hannibal@a-team.com", + "serveraddress": "https://index.docker.io/v1/" + } + +**Example response**: + + HTTP/1.1 200 OK + +**Status codes**: + +- **200** – no error +- **204** – no error +- **500** – server error + +#### Display system-wide information + +`GET /info` + +Display system-wide information + +**Example request**: + + GET /v1.19/info HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Containers": 11, + "CpuCfsPeriod": true, + "CpuCfsQuota": true, + "Debug": false, + "DockerRootDir": "/var/lib/docker", + "Driver": "btrfs", + "DriverStatus": [[""]], + "ExecutionDriver": "native-0.1", + "ExperimentalBuild": false, + "HttpProxy": "http://test:test@localhost:8080", + "HttpsProxy": "https://test:test@localhost:8080", + "ID": "7TRN:IPZB:QYBB:VPBQ:UMPP:KARE:6ZNR:XE6T:7EWV:PKF4:ZOJD:TPYS", + "IPv4Forwarding": true, + "Images": 16, + "IndexServerAddress": "https://index.docker.io/v1/", + "InitPath": "/usr/bin/docker", + "InitSha1": "", + "KernelVersion": "3.12.0-1-amd64", + "Labels": [ + "storage=ssd" + ], + "MemTotal": 2099236864, + "MemoryLimit": true, + "NCPU": 1, + "NEventsListener": 0, + "NFd": 11, + "NGoroutines": 21, + "Name": "prod-server-42", + "NoProxy": "9.81.1.160", + "OomKillDisable": true, + "OperatingSystem": "Boot2Docker", + "RegistryConfig": { + "IndexConfigs": { + "docker.io": { + "Mirrors": null, + "Name": "docker.io", + "Official": true, + "Secure": true + } + }, + "InsecureRegistryCIDRs": [ + "127.0.0.0/8" + ] + }, + "SwapLimit": false, + "SystemTime": "2015-03-10T11:11:23.730591467-07:00" + } + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Show the docker version information + +`GET /version` + +Show the docker version information + +**Example request**: + + GET /v1.19/version HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Version": "1.5.0", + "Os": "linux", + "KernelVersion": "3.18.5-tinycore64", + "GoVersion": "go1.4.1", + "GitCommit": "a8a31ef", + "Arch": "amd64", + "ApiVersion": "1.19" + } + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Ping the docker server + +`GET /_ping` + +Ping the docker server + +**Example request**: + + GET /v1.19/_ping HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: text/plain + + OK + +**Status codes**: + +- **200** - no error +- **500** - server error + +#### Create a new image from a container's changes + +`POST /commit` + +Create a new image from a container's changes + +**Example request**: + + POST /v1.19/commit?container=44c004db4b17&comment=message&repo=myrepo HTTP/1.1 + Content-Type: application/json + + { + "Hostname": "", + "Domainname": "", + "User": "", + "AttachStdin": false, + "AttachStdout": true, + "AttachStderr": true, + "PortSpecs": null, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": null, + "Cmd": [ + "date" + ], + "Volumes": { + "/tmp": {} + }, + "Labels": { + "key1": "value1", + "key2": "value2" + }, + "WorkingDir": "", + "NetworkDisabled": false, + "ExposedPorts": { + "22/tcp": {} + } + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + {"Id": "596069db4bf5"} + +**JSON parameters**: + +- **config** - the container's configuration + +**Query parameters**: + +- **container** – source container +- **repo** – repository +- **tag** – tag +- **comment** – commit message +- **author** – author (e.g., "John Hannibal Smith + <[hannibal@a-team.com](mailto:hannibal%40a-team.com)>") + +**Status codes**: + +- **201** – no error +- **404** – no such container +- **500** – server error + +#### Monitor Docker's events + +`GET /events` + +Get container events from docker, in real time via streaming. + +Docker containers report the following events: + + attach, commit, copy, create, destroy, die, exec_create, exec_start, export, kill, oom, pause, rename, resize, restart, start, stop, top, unpause + +Docker images report the following events: + + untag, delete + +**Example request**: + + GET /v1.19/events?since=1374067924 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status": "create", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067924} + {"status": "start", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067924} + {"status": "stop", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067966} + {"status": "destroy", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067970} + +**Query parameters**: + +- **since** – Timestamp. Show all events created since timestamp and then stream +- **until** – Timestamp. Show events created until given timestamp and stop streaming +- **filters** – A json encoded value of the filters (a map[string][]string) to process on the event list. Available filters: + - `container=`; -- container to filter + - `event=`; -- event to filter + - `image=`; -- image to filter + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Get a tarball containing all images in a repository + +`GET /images/(name)/get` + +Get a tarball containing all images and metadata for the repository specified +by `name`. + +If `name` is a specific name and tag (e.g. ubuntu:latest), then only that image +(and its parents) are returned. If `name` is an image ID, similarly only that +image (and its parents) are returned, but with the exclusion of the +'repositories' file in the tarball, as there were no image names referenced. + +See the [image tarball format](#image-tarball-format) for more details. + +**Example request** + + GET /v1.19/images/ubuntu/get + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + + Binary data stream + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Get a tarball containing all images + +`GET /images/get` + +Get a tarball containing all images and metadata for one or more repositories. + +For each value of the `names` parameter: if it is a specific name and tag (e.g. +`ubuntu:latest`), then only that image (and its parents) are returned; if it is +an image ID, similarly only that image (and its parents) are returned and there +would be no names referenced in the 'repositories' file for this image ID. + +See the [image tarball format](#image-tarball-format) for more details. + +**Example request** + + GET /v1.19/images/get?names=myname%2Fmyapp%3Alatest&names=busybox + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + + Binary data stream + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Load a tarball with a set of images and tags into docker + +`POST /images/load` + +Load a set of images and tags into a Docker repository. +See the [image tarball format](#image-tarball-format) for more details. + +**Example request** + + POST /v1.19/images/load + Content-Type: application/x-tar + + Tarball in body + +**Example response**: + + HTTP/1.1 200 OK + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Image tarball format + +An image tarball contains one directory per image layer (named using its long ID), +each containing these files: + +- `VERSION`: currently `1.0` - the file format version +- `json`: detailed layer information, similar to `docker inspect layer_id` +- `layer.tar`: A tarfile containing the filesystem changes in this layer + +The `layer.tar` file contains `aufs` style `.wh..wh.aufs` files and directories +for storing attribute changes and deletions. + +If the tarball defines a repository, the tarball should also include a `repositories` file at +the root that contains a list of repository and tag names mapped to layer IDs. + +``` +{"hello-world": + {"latest": "565a9d68a73f6706862bfe8409a7f659776d4d60a8d096eb4a3cbce6999cc2a1"} +} +``` + +#### Exec Create + +`POST /containers/(id or name)/exec` + +Sets up an exec instance in a running container `id` + +**Example request**: + + POST /v1.19/containers/e90e34656806/exec HTTP/1.1 + Content-Type: application/json + + { + "AttachStdin": true, + "AttachStdout": true, + "AttachStderr": true, + "Cmd": ["sh"], + "Tty": true, + "User": "123:456" + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + { + "Id": "f90e34656806", + "Warnings":[] + } + +**JSON parameters**: + +- **AttachStdin** - Boolean value, attaches to `stdin` of the `exec` command. +- **AttachStdout** - Boolean value, attaches to `stdout` of the `exec` command. +- **AttachStderr** - Boolean value, attaches to `stderr` of the `exec` command. +- **Tty** - Boolean value to allocate a pseudo-TTY. +- **Cmd** - Command to run specified as a string or an array of strings. +- **User** - A string value specifying the user, and optionally, group to run + the exec process inside the container. Format is one of: `"user"`, + `"user:group"`, `"uid"`, or `"uid:gid"`. + +**Status codes**: + +- **201** – no error +- **404** – no such container + +#### Exec Start + +`POST /exec/(id)/start` + +Starts a previously set up `exec` instance `id`. If `detach` is true, this API +returns after starting the `exec` command. Otherwise, this API sets up an +interactive session with the `exec` command. + +**Example request**: + + POST /v1.19/exec/e90e34656806/start HTTP/1.1 + Content-Type: application/json + + { + "Detach": false, + "Tty": false + } + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/vnd.docker.raw-stream + + {% raw %} + {{ STREAM }} + {% endraw %} + +**JSON parameters**: + +- **Detach** - Detach from the `exec` command. +- **Tty** - Boolean value to allocate a pseudo-TTY. + +**Status codes**: + +- **200** – no error +- **404** – no such exec instance + +**Stream details**: + +Similar to the stream behavior of `POST /containers/(id or name)/attach` API + +#### Exec Resize + +`POST /exec/(id)/resize` + +Resizes the `tty` session used by the `exec` command `id`. The unit is number of characters. +This API is valid only if `tty` was specified as part of creating and starting the `exec` command. + +**Example request**: + + POST /v1.19/exec/e90e34656806/resize?h=40&w=80 HTTP/1.1 + Content-Type: text/plain + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: text/plain + +**Query parameters**: + +- **h** – height of `tty` session +- **w** – width + +**Status codes**: + +- **201** – no error +- **404** – no such exec instance + +#### Exec Inspect + +`GET /exec/(id)/json` + +Return low-level information about the `exec` command `id`. + +**Example request**: + + GET /v1.19/exec/11fb006128e8ceb3942e7c58d77750f24210e35f879dd204ac975c184b820b39/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: plain/text + + { + "ID" : "11fb006128e8ceb3942e7c58d77750f24210e35f879dd204ac975c184b820b39", + "Running" : false, + "ExitCode" : 2, + "ProcessConfig" : { + "privileged" : false, + "user" : "", + "tty" : false, + "entrypoint" : "sh", + "arguments" : [ + "-c", + "exit 2" + ] + }, + "OpenStdin" : false, + "OpenStderr" : false, + "OpenStdout" : false, + "Container" : { + "State" : { + "Running" : true, + "Paused" : false, + "Restarting" : false, + "OOMKilled" : false, + "Pid" : 3650, + "ExitCode" : 0, + "Error" : "", + "StartedAt" : "2014-11-17T22:26:03.717657531Z", + "FinishedAt" : "0001-01-01T00:00:00Z" + }, + "ID" : "8f177a186b977fb451136e0fdf182abff5599a08b3c7f6ef0d36a55aaf89634c", + "Created" : "2014-11-17T22:26:03.626304998Z", + "Path" : "date", + "Args" : [], + "Config" : { + "Hostname" : "8f177a186b97", + "Domainname" : "", + "User" : "", + "AttachStdin" : false, + "AttachStdout" : false, + "AttachStderr" : false, + "PortSpecs": null, + "ExposedPorts" : null, + "Tty" : false, + "OpenStdin" : false, + "StdinOnce" : false, + "Env" : [ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" ], + "Cmd" : [ + "date" + ], + "Image" : "ubuntu", + "Volumes" : null, + "WorkingDir" : "", + "Entrypoint" : null, + "NetworkDisabled" : false, + "MacAddress" : "", + "OnBuild" : null, + "SecurityOpt" : null + }, + "Image" : "5506de2b643be1e6febbf3b8a240760c6843244c41e12aa2f60ccbb7153d17f5", + "NetworkSettings" : { + "IPAddress" : "172.17.0.2", + "IPPrefixLen" : 16, + "MacAddress" : "02:42:ac:11:00:02", + "Gateway" : "172.17.42.1", + "Bridge" : "docker0", + "PortMapping" : null, + "Ports" : {} + }, + "ResolvConfPath" : "/var/lib/docker/containers/8f177a186b977fb451136e0fdf182abff5599a08b3c7f6ef0d36a55aaf89634c/resolv.conf", + "HostnamePath" : "/var/lib/docker/containers/8f177a186b977fb451136e0fdf182abff5599a08b3c7f6ef0d36a55aaf89634c/hostname", + "HostsPath" : "/var/lib/docker/containers/8f177a186b977fb451136e0fdf182abff5599a08b3c7f6ef0d36a55aaf89634c/hosts", + "LogPath": "/var/lib/docker/containers/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b-json.log", + "Name" : "/test", + "Driver" : "aufs", + "ExecDriver" : "native-0.2", + "MountLabel" : "", + "ProcessLabel" : "", + "AppArmorProfile" : "", + "RestartCount" : 0, + "Volumes" : {}, + "VolumesRW" : {} + } + } + +**Status codes**: + +- **200** – no error +- **404** – no such exec instance +- **500** - server error + +## 3. Going further + +### 3.1 Inside `docker run` + +As an example, the `docker run` command line makes the following API calls: + +- Create the container + +- If the status code is 404, it means the image doesn't exist: + - Try to pull it. + - Then, retry to create the container. + +- Start the container. + +- If you are not in detached mode: +- Attach to the container, using `logs=1` (to have `stdout` and + `stderr` from the container's start) and `stream=1` + +- If in detached mode or only `stdin` is attached, display the container's id. + +### 3.2 Hijacking + +In this version of the API, `/attach`, uses hijacking to transport `stdin`, +`stdout`, and `stderr` on the same socket. + +To hint potential proxies about connection hijacking, Docker client sends +connection upgrade headers similarly to websocket. + + Upgrade: tcp + Connection: Upgrade + +When Docker daemon detects the `Upgrade` header, it switches its status code +from **200 OK** to **101 UPGRADED** and resends the same headers. + + +### 3.3 CORS Requests + +To set cross origin requests to the Engine API please give values to +`--api-cors-header` when running Docker in daemon mode. Set * (asterisk) allows all, +default or blank means CORS disabled + + $ docker -d -H="192.168.1.9:2375" --api-cors-header="http://foo.bar" diff --git a/vendor/github.com/moby/moby/docs/api/v1.20.md b/vendor/github.com/moby/moby/docs/api/v1.20.md new file mode 100644 index 000000000..a7fc999ae --- /dev/null +++ b/vendor/github.com/moby/moby/docs/api/v1.20.md @@ -0,0 +1,2394 @@ +--- +title: "Engine API v1.20" +description: "API Documentation for Docker" +keywords: "API, Docker, rcli, REST, documentation" +redirect_from: +- /engine/reference/api/docker_remote_api_v1.20/ +- /reference/api/docker_remote_api_v1.20/ +--- + + + +## 1. Brief introduction + + - The daemon listens on `unix:///var/run/docker.sock` but you can + [Bind Docker to another host/port or a Unix socket](../reference/commandline/dockerd.md#bind-docker-to-another-host-port-or-a-unix-socket). + - The API tends to be REST. However, for some complex commands, like `attach` + or `pull`, the HTTP connection is hijacked to transport `stdout`, + `stdin` and `stderr`. + +## 2. Endpoints + +### 2.1 Containers + +#### List containers + +`GET /containers/json` + +List containers + +**Example request**: + + GET /v1.20/containers/json?all=1&before=8dfafdbc3a40&size=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Id": "8dfafdbc3a40", + "Names":["/boring_feynman"], + "Image": "ubuntu:latest", + "Command": "echo 1", + "Created": 1367854155, + "Status": "Exit 0", + "Ports": [{"PrivatePort": 2222, "PublicPort": 3333, "Type": "tcp"}], + "Labels": { + "com.example.vendor": "Acme", + "com.example.license": "GPL", + "com.example.version": "1.0" + }, + "SizeRw": 12288, + "SizeRootFs": 0 + }, + { + "Id": "9cd87474be90", + "Names":["/coolName"], + "Image": "ubuntu:latest", + "Command": "echo 222222", + "Created": 1367854155, + "Status": "Exit 0", + "Ports": [], + "Labels": {}, + "SizeRw": 12288, + "SizeRootFs": 0 + }, + { + "Id": "3176a2479c92", + "Names":["/sleepy_dog"], + "Image": "ubuntu:latest", + "Command": "echo 3333333333333333", + "Created": 1367854154, + "Status": "Exit 0", + "Ports":[], + "Labels": {}, + "SizeRw":12288, + "SizeRootFs":0 + }, + { + "Id": "4cb07b47f9fb", + "Names":["/running_cat"], + "Image": "ubuntu:latest", + "Command": "echo 444444444444444444444444444444444", + "Created": 1367854152, + "Status": "Exit 0", + "Ports": [], + "Labels": {}, + "SizeRw": 12288, + "SizeRootFs": 0 + } + ] + +**Query parameters**: + +- **all** – 1/True/true or 0/False/false, Show all containers. + Only running containers are shown by default (i.e., this defaults to false) +- **limit** – Show `limit` last created + containers, include non-running ones. +- **since** – Show only containers created since Id, include + non-running ones. +- **before** – Show only containers created before Id, include + non-running ones. +- **size** – 1/True/true or 0/False/false, Show the containers + sizes +- **filters** - a JSON encoded value of the filters (a `map[string][]string`) to process on the containers list. Available filters: + - `exited=`; -- containers with exit code of `` ; + - `status=`(`created`|`restarting`|`running`|`paused`|`exited`) + - `label=key` or `label="key=value"` of a container label + +**Status codes**: + +- **200** – no error +- **400** – bad parameter +- **500** – server error + +#### Create a container + +`POST /containers/create` + +Create a container + +**Example request**: + + POST /v1.20/containers/create HTTP/1.1 + Content-Type: application/json + + { + "Hostname": "", + "Domainname": "", + "User": "", + "AttachStdin": false, + "AttachStdout": true, + "AttachStderr": true, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": [ + "FOO=bar", + "BAZ=quux" + ], + "Cmd": [ + "date" + ], + "Entrypoint": null, + "Image": "ubuntu", + "Labels": { + "com.example.vendor": "Acme", + "com.example.license": "GPL", + "com.example.version": "1.0" + }, + "Volumes": { + "/volumes/data": {} + }, + "WorkingDir": "", + "NetworkDisabled": false, + "MacAddress": "12:34:56:78:9a:bc", + "ExposedPorts": { + "22/tcp": {} + }, + "HostConfig": { + "Binds": ["/tmp:/tmp"], + "Links": ["redis3:redis"], + "LxcConf": {"lxc.utsname":"docker"}, + "Memory": 0, + "MemorySwap": 0, + "CpuShares": 512, + "CpuPeriod": 100000, + "CpuQuota": 50000, + "CpusetCpus": "0,1", + "CpusetMems": "0,1", + "BlkioWeight": 300, + "MemorySwappiness": 60, + "OomKillDisable": false, + "PidMode": "", + "PortBindings": { "22/tcp": [{ "HostPort": "11022" }] }, + "PublishAllPorts": false, + "Privileged": false, + "ReadonlyRootfs": false, + "Dns": ["8.8.8.8"], + "DnsSearch": [""], + "ExtraHosts": null, + "VolumesFrom": ["parent", "other:ro"], + "CapAdd": ["NET_ADMIN"], + "CapDrop": ["MKNOD"], + "GroupAdd": ["newgroup"], + "RestartPolicy": { "Name": "", "MaximumRetryCount": 0 }, + "NetworkMode": "bridge", + "Devices": [], + "Ulimits": [{}], + "LogConfig": { "Type": "json-file", "Config": {} }, + "SecurityOpt": [], + "CgroupParent": "" + } + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + { + "Id":"e90e34656806", + "Warnings":[] + } + +**JSON parameters**: + +- **Hostname** - A string value containing the hostname to use for the + container. +- **Domainname** - A string value containing the domain name to use + for the container. +- **User** - A string value specifying the user inside the container. +- **AttachStdin** - Boolean value, attaches to `stdin`. +- **AttachStdout** - Boolean value, attaches to `stdout`. +- **AttachStderr** - Boolean value, attaches to `stderr`. +- **Tty** - Boolean value, Attach standard streams to a `tty`, including `stdin` if it is not closed. +- **OpenStdin** - Boolean value, opens `stdin`, +- **StdinOnce** - Boolean value, close `stdin` after the 1 attached client disconnects. +- **Env** - A list of environment variables in the form of `["VAR=value", ...]` +- **Labels** - Adds a map of labels to a container. To specify a map: `{"key":"value", ... }` +- **Cmd** - Command to run specified as a string or an array of strings. +- **Entrypoint** - Set the entry point for the container as a string or an array + of strings. +- **Image** - A string specifying the image name to use for the container. +- **Volumes** - An object mapping mount point paths (strings) inside the + container to empty objects. +- **WorkingDir** - A string specifying the working directory for commands to + run in. +- **NetworkDisabled** - Boolean value, when true disables networking for the + container +- **ExposedPorts** - An object mapping ports to an empty object in the form of: + `"ExposedPorts": { "/: {}" }` +- **HostConfig** + - **Binds** – A list of bind-mounts for this container. Each item is a string in one of these forms: + + `host-src:container-dest` to bind-mount a host path into the + container. Both `host-src`, and `container-dest` must be an + _absolute_ path. + + `host-src:container-dest:ro` to make the bind-mount read-only + inside the container. Both `host-src`, and `container-dest` must be + an _absolute_ path. + - **Links** - A list of links for the container. Each link entry should be + in the form of `container_name:alias`. + - **LxcConf** - LXC specific configurations. These configurations only + work when using the `lxc` execution driver. + - **Memory** - Memory limit in bytes. + - **MemorySwap** - Total memory limit (memory + swap); set `-1` to enable unlimited swap. + You must use this with `memory` and make the swap value larger than `memory`. + - **CpuShares** - An integer value containing the container's CPU Shares + (ie. the relative weight vs other containers). + - **CpuPeriod** - The length of a CPU period in microseconds. + - **CpuQuota** - Microseconds of CPU time that the container can get in a CPU period. + - **CpusetCpus** - String value containing the `cgroups CpusetCpus` to use. + - **CpusetMems** - Memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only effective on NUMA systems. + - **BlkioWeight** - Block IO weight (relative weight) accepts a weight value between 10 and 1000. + - **MemorySwappiness** - Tune a container's memory swappiness behavior. Accepts an integer between 0 and 100. + - **OomKillDisable** - Boolean value, whether to disable OOM Killer for the container or not. + - **PidMode** - Set the PID (Process) Namespace mode for the container; + `"container:"`: joins another container's PID namespace + `"host"`: use the host's PID namespace inside the container + - **PortBindings** - A map of exposed container ports and the host port they + should map to. A JSON object in the form + `{ /: [{ "HostPort": "" }] }` + Take note that `port` is specified as a string and not an integer value. + - **PublishAllPorts** - Allocates a random host port for all of a container's + exposed ports. Specified as a boolean value. + - **Privileged** - Gives the container full access to the host. Specified as + a boolean value. + - **ReadonlyRootfs** - Mount the container's root filesystem as read only. + Specified as a boolean value. + - **Dns** - A list of DNS servers for the container to use. + - **DnsSearch** - A list of DNS search domains + - **ExtraHosts** - A list of hostnames/IP mappings to add to the + container's `/etc/hosts` file. Specified in the form `["hostname:IP"]`. + - **VolumesFrom** - A list of volumes to inherit from another container. + Specified in the form `[:]` + - **CapAdd** - A list of kernel capabilities to add to the container. + - **Capdrop** - A list of kernel capabilities to drop from the container. + - **GroupAdd** - A list of additional groups that the container process will run as + - **RestartPolicy** – The behavior to apply when the container exits. The + value is an object with a `Name` property of either `"always"` to + always restart or `"on-failure"` to restart only when the container + exit code is non-zero. If `on-failure` is used, `MaximumRetryCount` + controls the number of times to retry before giving up. + The default is not to restart. (optional) + An ever increasing delay (double the previous delay, starting at 100mS) + is added before each restart to prevent flooding the server. + - **NetworkMode** - Sets the networking mode for the container. Supported + values are: `bridge`, `host`, `none`, and `container:` + - **Devices** - A list of devices to add to the container specified as a JSON object in the + form + `{ "PathOnHost": "/dev/deviceName", "PathInContainer": "/dev/deviceName", "CgroupPermissions": "mrw"}` + - **Ulimits** - A list of ulimits to set in the container, specified as + `{ "Name": , "Soft": , "Hard": }`, for example: + `Ulimits: { "Name": "nofile", "Soft": 1024, "Hard": 2048 }` + - **SecurityOpt**: A list of string values to customize labels for MLS + systems, such as SELinux. + - **LogConfig** - Log configuration for the container, specified as a JSON object in the form + `{ "Type": "", "Config": {"key1": "val1"}}`. + Available types: `json-file`, `syslog`, `journald`, `gelf`, `none`. + `json-file` logging driver. + - **CgroupParent** - Path to `cgroups` under which the container's `cgroup` is created. If the path is not absolute, the path is considered to be relative to the `cgroups` path of the init process. Cgroups are created if they do not already exist. + +**Query parameters**: + +- **name** – Assign the specified name to the container. Must + match `/?[a-zA-Z0-9_-]+`. + +**Status codes**: + +- **201** – no error +- **400** – bad parameter +- **404** – no such container +- **406** – impossible to attach (container not running) +- **409** – conflict +- **500** – server error + +#### Inspect a container + +`GET /containers/(id or name)/json` + +Return low-level information on the container `id` + +**Example request**: + + GET /v1.20/containers/4fa6e0f0c678/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "AppArmorProfile": "", + "Args": [ + "-c", + "exit 9" + ], + "Config": { + "AttachStderr": true, + "AttachStdin": false, + "AttachStdout": true, + "Cmd": [ + "/bin/sh", + "-c", + "exit 9" + ], + "Domainname": "", + "Entrypoint": null, + "Env": [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + ], + "ExposedPorts": null, + "Hostname": "ba033ac44011", + "Image": "ubuntu", + "Labels": { + "com.example.vendor": "Acme", + "com.example.license": "GPL", + "com.example.version": "1.0" + }, + "MacAddress": "", + "NetworkDisabled": false, + "OnBuild": null, + "OpenStdin": false, + "StdinOnce": false, + "Tty": false, + "User": "", + "Volumes": null, + "WorkingDir": "" + }, + "Created": "2015-01-06T15:47:31.485331387Z", + "Driver": "devicemapper", + "ExecDriver": "native-0.2", + "ExecIDs": null, + "HostConfig": { + "Binds": null, + "BlkioWeight": 0, + "CapAdd": null, + "CapDrop": null, + "ContainerIDFile": "", + "CpusetCpus": "", + "CpusetMems": "", + "CpuShares": 0, + "CpuPeriod": 100000, + "Devices": [], + "Dns": null, + "DnsSearch": null, + "ExtraHosts": null, + "IpcMode": "", + "Links": null, + "LxcConf": [], + "Memory": 0, + "MemorySwap": 0, + "OomKillDisable": false, + "NetworkMode": "bridge", + "PidMode": "", + "PortBindings": {}, + "Privileged": false, + "ReadonlyRootfs": false, + "PublishAllPorts": false, + "RestartPolicy": { + "MaximumRetryCount": 2, + "Name": "on-failure" + }, + "LogConfig": { + "Config": null, + "Type": "json-file" + }, + "SecurityOpt": null, + "VolumesFrom": null, + "Ulimits": [{}] + }, + "HostnamePath": "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hostname", + "HostsPath": "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hosts", + "LogPath": "/var/lib/docker/containers/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b-json.log", + "Id": "ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39", + "Image": "04c5d3b7b0656168630d3ba35d8889bd0e9caafcaeb3004d2bfbc47e7c5d35d2", + "MountLabel": "", + "Name": "/boring_euclid", + "NetworkSettings": { + "Bridge": "", + "Gateway": "", + "IPAddress": "", + "IPPrefixLen": 0, + "MacAddress": "", + "PortMapping": null, + "Ports": null + }, + "Path": "/bin/sh", + "ProcessLabel": "", + "ResolvConfPath": "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/resolv.conf", + "RestartCount": 1, + "State": { + "Error": "", + "ExitCode": 9, + "FinishedAt": "2015-01-06T15:47:32.080254511Z", + "OOMKilled": false, + "Paused": false, + "Pid": 0, + "Restarting": false, + "Running": true, + "StartedAt": "2015-01-06T15:47:32.072697474Z" + }, + "Mounts": [ + { + "Source": "/data", + "Destination": "/data", + "Mode": "ro,Z", + "RW": false + } + ] + } + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### List processes running inside a container + +`GET /containers/(id or name)/top` + +List processes running inside the container `id`. On Unix systems this +is done by running the `ps` command. This endpoint is not +supported on Windows. + +**Example request**: + + GET /v1.20/containers/4fa6e0f0c678/top HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Titles" : [ + "UID", "PID", "PPID", "C", "STIME", "TTY", "TIME", "CMD" + ], + "Processes" : [ + [ + "root", "13642", "882", "0", "17:03", "pts/0", "00:00:00", "/bin/bash" + ], + [ + "root", "13735", "13642", "0", "17:06", "pts/0", "00:00:00", "sleep 10" + ] + ] + } + +**Example request**: + + GET /v1.20/containers/4fa6e0f0c678/top?ps_args=aux HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Titles" : [ + "USER","PID","%CPU","%MEM","VSZ","RSS","TTY","STAT","START","TIME","COMMAND" + ] + "Processes" : [ + [ + "root","13642","0.0","0.1","18172","3184","pts/0","Ss","17:03","0:00","/bin/bash" + ], + [ + "root","13895","0.0","0.0","4348","692","pts/0","S+","17:15","0:00","sleep 10" + ] + ], + } + +**Query parameters**: + +- **ps_args** – `ps` arguments to use (e.g., `aux`), defaults to `-ef` + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Get container logs + +`GET /containers/(id or name)/logs` + +Get `stdout` and `stderr` logs from the container ``id`` + +> **Note**: +> This endpoint works only for containers with the `json-file` or `journald` logging drivers. + +**Example request**: + + GET /v1.20/containers/4fa6e0f0c678/logs?stderr=1&stdout=1×tamps=1&follow=1&tail=10&since=1428990821 HTTP/1.1 + +**Example response**: + + HTTP/1.1 101 UPGRADED + Content-Type: application/vnd.docker.raw-stream + Connection: Upgrade + Upgrade: tcp + + {% raw %} + {{ STREAM }} + {% endraw %} + +**Query parameters**: + +- **follow** – 1/True/true or 0/False/false, return stream. Default `false`. +- **stdout** – 1/True/true or 0/False/false, show `stdout` log. Default `false`. +- **stderr** – 1/True/true or 0/False/false, show `stderr` log. Default `false`. +- **since** – UNIX timestamp (integer) to filter logs. Specifying a timestamp + will only output log-entries since that timestamp. Default: 0 (unfiltered) +- **timestamps** – 1/True/true or 0/False/false, print timestamps for + every log line. Default `false`. +- **tail** – Output specified number of lines at the end of logs: `all` or ``. Default all. + +**Status codes**: + +- **101** – no error, hints proxy about hijacking +- **200** – no error, no upgrade header found +- **404** – no such container +- **500** – server error + +#### Inspect changes on a container's filesystem + +`GET /containers/(id or name)/changes` + +Inspect changes on container `id`'s filesystem + +**Example request**: + + GET /v1.20/containers/4fa6e0f0c678/changes HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Path": "/dev", + "Kind": 0 + }, + { + "Path": "/dev/kmsg", + "Kind": 1 + }, + { + "Path": "/test", + "Kind": 1 + } + ] + +Values for `Kind`: + +- `0`: Modify +- `1`: Add +- `2`: Delete + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Export a container + +`GET /containers/(id or name)/export` + +Export the contents of container `id` + +**Example request**: + + GET /v1.20/containers/4fa6e0f0c678/export HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/octet-stream + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Get container stats based on resource usage + +`GET /containers/(id or name)/stats` + +This endpoint returns a live stream of a container's resource usage statistics. + +**Example request**: + + GET /v1.20/containers/redis1/stats HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "read" : "2015-01-08T22:57:31.547920715Z", + "network" : { + "rx_dropped" : 0, + "rx_bytes" : 648, + "rx_errors" : 0, + "tx_packets" : 8, + "tx_dropped" : 0, + "rx_packets" : 8, + "tx_errors" : 0, + "tx_bytes" : 648 + }, + "memory_stats" : { + "stats" : { + "total_pgmajfault" : 0, + "cache" : 0, + "mapped_file" : 0, + "total_inactive_file" : 0, + "pgpgout" : 414, + "rss" : 6537216, + "total_mapped_file" : 0, + "writeback" : 0, + "unevictable" : 0, + "pgpgin" : 477, + "total_unevictable" : 0, + "pgmajfault" : 0, + "total_rss" : 6537216, + "total_rss_huge" : 6291456, + "total_writeback" : 0, + "total_inactive_anon" : 0, + "rss_huge" : 6291456, + "hierarchical_memory_limit" : 67108864, + "total_pgfault" : 964, + "total_active_file" : 0, + "active_anon" : 6537216, + "total_active_anon" : 6537216, + "total_pgpgout" : 414, + "total_cache" : 0, + "inactive_anon" : 0, + "active_file" : 0, + "pgfault" : 964, + "inactive_file" : 0, + "total_pgpgin" : 477 + }, + "max_usage" : 6651904, + "usage" : 6537216, + "failcnt" : 0, + "limit" : 67108864 + }, + "blkio_stats" : {}, + "cpu_stats" : { + "cpu_usage" : { + "percpu_usage" : [ + 8646879, + 24472255, + 36438778, + 30657443 + ], + "usage_in_usermode" : 50000000, + "total_usage" : 100215355, + "usage_in_kernelmode" : 30000000 + }, + "system_cpu_usage" : 739306590000000, + "throttling_data" : {"periods":0,"throttled_periods":0,"throttled_time":0} + }, + "precpu_stats" : { + "cpu_usage" : { + "percpu_usage" : [ + 8646879, + 24350896, + 36438778, + 30657443 + ], + "usage_in_usermode" : 50000000, + "total_usage" : 100093996, + "usage_in_kernelmode" : 30000000 + }, + "system_cpu_usage" : 9492140000000, + "throttling_data" : {"periods":0,"throttled_periods":0,"throttled_time":0} + } + } + +The `precpu_stats` is the cpu statistic of last read, which is used for calculating the cpu usage percent. It is not the exact copy of the `cpu_stats` field. + +**Query parameters**: + +- **stream** – 1/True/true or 0/False/false, pull stats once then disconnect. Default `true`. + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Resize a container TTY + +`POST /containers/(id or name)/resize?h=&w=` + +Resize the TTY for container with `id`. You must restart the container for the resize to take effect. + +**Example request**: + + POST /v1.20/containers/4fa6e0f0c678/resize?h=40&w=80 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Length: 0 + Content-Type: text/plain; charset=utf-8 + +**Query parameters**: + +- **h** – height of `tty` session +- **w** – width + +**Status codes**: + +- **200** – no error +- **404** – No such container +- **500** – Cannot resize container + +#### Start a container + +`POST /containers/(id or name)/start` + +Start the container `id` + +> **Note**: +> For backwards compatibility, this endpoint accepts a `HostConfig` as JSON-encoded request body. +> See [create a container](#create-a-container) for details. + +**Example request**: + + POST /v1.20/containers/e90e34656806/start HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Status codes**: + +- **204** – no error +- **304** – container already started +- **404** – no such container +- **500** – server error + +#### Stop a container + +`POST /containers/(id or name)/stop` + +Stop the container `id` + +**Example request**: + + POST /v1.20/containers/e90e34656806/stop?t=5 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **t** – number of seconds to wait before killing the container + +**Status codes**: + +- **204** – no error +- **304** – container already stopped +- **404** – no such container +- **500** – server error + +#### Restart a container + +`POST /containers/(id or name)/restart` + +Restart the container `id` + +**Example request**: + + POST /v1.20/containers/e90e34656806/restart?t=5 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **t** – number of seconds to wait before killing the container + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **500** – server error + +#### Kill a container + +`POST /containers/(id or name)/kill` + +Kill the container `id` + +**Example request**: + + POST /v1.20/containers/e90e34656806/kill HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **signal** - Signal to send to the container: integer or string like `SIGINT`. + When not set, `SIGKILL` is assumed and the call waits for the container to exit. + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **500** – server error + +#### Rename a container + +`POST /containers/(id or name)/rename` + +Rename the container `id` to a `new_name` + +**Example request**: + + POST /v1.20/containers/e90e34656806/rename?name=new_name HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **name** – new name for the container + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **409** - conflict name already assigned +- **500** – server error + +#### Pause a container + +`POST /containers/(id or name)/pause` + +Pause the container `id` + +**Example request**: + + POST /v1.20/containers/e90e34656806/pause HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **500** – server error + +#### Unpause a container + +`POST /containers/(id or name)/unpause` + +Unpause the container `id` + +**Example request**: + + POST /v1.20/containers/e90e34656806/unpause HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **500** – server error + +#### Attach to a container + +`POST /containers/(id or name)/attach` + +Attach to the container `id` + +**Example request**: + + POST /v1.20/containers/16253994b7c4/attach?logs=1&stream=0&stdout=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 101 UPGRADED + Content-Type: application/vnd.docker.raw-stream + Connection: Upgrade + Upgrade: tcp + + {% raw %} + {{ STREAM }} + {% endraw %} + +**Query parameters**: + +- **logs** – 1/True/true or 0/False/false, return logs. Default `false`. +- **stream** – 1/True/true or 0/False/false, return stream. + Default `false`. +- **stdin** – 1/True/true or 0/False/false, if `stream=true`, attach + to `stdin`. Default `false`. +- **stdout** – 1/True/true or 0/False/false, if `logs=true`, return + `stdout` log, if `stream=true`, attach to `stdout`. Default `false`. +- **stderr** – 1/True/true or 0/False/false, if `logs=true`, return + `stderr` log, if `stream=true`, attach to `stderr`. Default `false`. + +**Status codes**: + +- **101** – no error, hints proxy about hijacking +- **200** – no error, no upgrade header found +- **400** – bad parameter +- **404** – no such container +- **500** – server error + +**Stream details**: + +When using the TTY setting is enabled in +[`POST /containers/create` +](#create-a-container), +the stream is the raw data from the process PTY and client's `stdin`. +When the TTY is disabled, then the stream is multiplexed to separate +`stdout` and `stderr`. + +The format is a **Header** and a **Payload** (frame). + +**HEADER** + +The header contains the information which the stream writes (`stdout` or +`stderr`). It also contains the size of the associated frame encoded in the +last four bytes (`uint32`). + +It is encoded on the first eight bytes like this: + + header := [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4} + +`STREAM_TYPE` can be: + +- 0: `stdin` (is written on `stdout`) +- 1: `stdout` +- 2: `stderr` + +`SIZE1, SIZE2, SIZE3, SIZE4` are the four bytes of +the `uint32` size encoded as big endian. + +**PAYLOAD** + +The payload is the raw stream. + +**IMPLEMENTATION** + +The simplest way to implement the Attach protocol is the following: + + 1. Read eight bytes. + 2. Choose `stdout` or `stderr` depending on the first byte. + 3. Extract the frame size from the last four bytes. + 4. Read the extracted size and output it on the correct output. + 5. Goto 1. + +#### Attach to a container (websocket) + +`GET /containers/(id or name)/attach/ws` + +Attach to the container `id` via websocket + +Implements websocket protocol handshake according to [RFC 6455](http://tools.ietf.org/html/rfc6455) + +**Example request** + + GET /v1.20/containers/e90e34656806/attach/ws?logs=0&stream=1&stdin=1&stdout=1&stderr=1 HTTP/1.1 + +**Example response** + + {% raw %} + {{ STREAM }} + {% endraw %} + +**Query parameters**: + +- **logs** – 1/True/true or 0/False/false, return logs. Default `false`. +- **stream** – 1/True/true or 0/False/false, return stream. + Default `false`. +- **stdin** – 1/True/true or 0/False/false, if `stream=true`, attach + to `stdin`. Default `false`. +- **stdout** – 1/True/true or 0/False/false, if `logs=true`, return + `stdout` log, if `stream=true`, attach to `stdout`. Default `false`. +- **stderr** – 1/True/true or 0/False/false, if `logs=true`, return + `stderr` log, if `stream=true`, attach to `stderr`. Default `false`. + +**Status codes**: + +- **200** – no error +- **400** – bad parameter +- **404** – no such container +- **500** – server error + +#### Wait a container + +`POST /containers/(id or name)/wait` + +Block until container `id` stops, then returns the exit code + +**Example request**: + + POST /v1.20/containers/16253994b7c4/wait HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"StatusCode": 0} + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Remove a container + +`DELETE /containers/(id or name)` + +Remove the container `id` from the filesystem + +**Example request**: + + DELETE /v1.20/containers/16253994b7c4?v=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **v** – 1/True/true or 0/False/false, Remove the volumes + associated to the container. Default `false`. +- **force** - 1/True/true or 0/False/false, Kill then remove the container. + Default `false`. +- **link** - 1/True/true or 0/False/false, Remove the specified + link associated to the container. Default `false`. + +**Status codes**: + +- **204** – no error +- **400** – bad parameter +- **404** – no such container +- **409** – conflict +- **500** – server error + +#### Copy files or folders from a container + +`POST /containers/(id or name)/copy` + +Copy files or folders of container `id` + +**Deprecated** in favor of the `archive` endpoint below. + +**Example request**: + + POST /v1.20/containers/4fa6e0f0c678/copy HTTP/1.1 + Content-Type: application/json + + { + "Resource": "test.txt" + } + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Retrieving information about files and folders in a container + +`HEAD /containers/(id or name)/archive` + +See the description of the `X-Docker-Container-Path-Stat` header in the +following section. + +#### Get an archive of a filesystem resource in a container + +`GET /containers/(id or name)/archive` + +Get a tar archive of a resource in the filesystem of container `id`. + +**Query parameters**: + +- **path** - resource in the container's filesystem to archive. Required. + + If not an absolute path, it is relative to the container's root directory. + The resource specified by **path** must exist. To assert that the resource + is expected to be a directory, **path** should end in `/` or `/.` + (assuming a path separator of `/`). If **path** ends in `/.` then this + indicates that only the contents of the **path** directory should be + copied. A symlink is always resolved to its target. + + > **Note**: It is not possible to copy certain system files such as resources + > under `/proc`, `/sys`, `/dev`, and mounts created by the user in the + > container. + +**Example request**: + + GET /v1.20/containers/8cce319429b2/archive?path=/root HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + X-Docker-Container-Path-Stat: eyJuYW1lIjoicm9vdCIsInNpemUiOjQwOTYsIm1vZGUiOjIxNDc0ODQwOTYsIm10aW1lIjoiMjAxNC0wMi0yN1QyMDo1MToyM1oiLCJsaW5rVGFyZ2V0IjoiIn0= + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +On success, a response header `X-Docker-Container-Path-Stat` will be set to a +base64-encoded JSON object containing some filesystem header information about +the archived resource. The above example value would decode to the following +JSON object (whitespace added for readability): + +```json +{ + "name": "root", + "size": 4096, + "mode": 2147484096, + "mtime": "2014-02-27T20:51:23Z", + "linkTarget": "" +} +``` + +A `HEAD` request can also be made to this endpoint if only this information is +desired. + +**Status codes**: + +- **200** - success, returns archive of copied resource +- **400** - client error, bad parameter, details in JSON response body, one of: + - must specify path parameter (**path** cannot be empty) + - not a directory (**path** was asserted to be a directory but exists as a + file) +- **404** - client error, resource not found, one of: + – no such container (container `id` does not exist) + - no such file or directory (**path** does not exist) +- **500** - server error + +#### Extract an archive of files or folders to a directory in a container + +`PUT /containers/(id or name)/archive` + +Upload a tar archive to be extracted to a path in the filesystem of container +`id`. + +**Query parameters**: + +- **path** - path to a directory in the container + to extract the archive's contents into. Required. + + If not an absolute path, it is relative to the container's root directory. + The **path** resource must exist. +- **noOverwriteDirNonDir** - If "1", "true", or "True" then it will be an error + if unpacking the given content would cause an existing directory to be + replaced with a non-directory and vice versa. + +**Example request**: + + PUT /v1.20/containers/8cce319429b2/archive?path=/vol1 HTTP/1.1 + Content-Type: application/x-tar + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +**Example response**: + + HTTP/1.1 200 OK + +**Status codes**: + +- **200** – the content was extracted successfully +- **400** - client error, bad parameter, details in JSON response body, one of: + - must specify path parameter (**path** cannot be empty) + - not a directory (**path** should be a directory but exists as a file) + - unable to overwrite existing directory with non-directory + (if **noOverwriteDirNonDir**) + - unable to overwrite existing non-directory with directory + (if **noOverwriteDirNonDir**) +- **403** - client error, permission denied, the volume + or container rootfs is marked as read-only. +- **404** - client error, resource not found, one of: + – no such container (container `id` does not exist) + - no such file or directory (**path** resource does not exist) +- **500** – server error + +### 2.2 Images + +#### List Images + +`GET /images/json` + +**Example request**: + + GET /v1.20/images/json?all=0 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "RepoTags": [ + "ubuntu:12.04", + "ubuntu:precise", + "ubuntu:latest" + ], + "Id": "8dbd9e392a964056420e5d58ca5cc376ef18e2de93b5cc90e868a1bbc8318c1c", + "Created": 1365714795, + "Size": 131506275, + "VirtualSize": 131506275, + "Labels": {} + }, + { + "RepoTags": [ + "ubuntu:12.10", + "ubuntu:quantal" + ], + "ParentId": "27cf784147099545", + "Id": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", + "Created": 1364102658, + "Size": 24653, + "VirtualSize": 180116135, + "Labels": { + "com.example.version": "v1" + } + } + ] + +**Example request, with digest information**: + + GET /v1.20/images/json?digests=1 HTTP/1.1 + +**Example response, with digest information**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Created": 1420064636, + "Id": "4986bf8c15363d1c5d15512d5266f8777bfba4974ac56e3270e7760f6f0a8125", + "ParentId": "ea13149945cb6b1e746bf28032f02e9b5a793523481a0a18645fc77ad53c4ea2", + "RepoDigests": [ + "localhost:5000/test/busybox@sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf" + ], + "RepoTags": [ + "localhost:5000/test/busybox:latest", + "playdate:latest" + ], + "Size": 0, + "VirtualSize": 2429728, + "Labels": {} + } + ] + +The response shows a single image `Id` associated with two repositories +(`RepoTags`): `localhost:5000/test/busybox`: and `playdate`. A caller can use +either of the `RepoTags` values `localhost:5000/test/busybox:latest` or +`playdate:latest` to reference the image. + +You can also use `RepoDigests` values to reference an image. In this response, +the array has only one reference and that is to the +`localhost:5000/test/busybox` repository; the `playdate` repository has no +digest. You can reference this digest using the value: +`localhost:5000/test/busybox@sha256:cbbf2f9a99b47fc460d...` + +See the `docker run` and `docker build` commands for examples of digest and tag +references on the command line. + +**Query parameters**: + +- **all** – 1/True/true or 0/False/false, default false +- **filters** – a JSON encoded value of the filters (a map[string][]string) to process on the images list. Available filters: + - `dangling=true` + - `label=key` or `label="key=value"` of an image label +- **filter** - only return images with the specified name + +#### Build image from a Dockerfile + +`POST /build` + +Build an image from a Dockerfile + +**Example request**: + + POST /v1.20/build HTTP/1.1 + Content-Type: application/x-tar + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"stream": "Step 1/5..."} + {"stream": "..."} + {"error": "Error...", "errorDetail": {"code": 123, "message": "Error..."}} + +The input stream must be a `tar` archive compressed with one of the +following algorithms: `identity` (no compression), `gzip`, `bzip2`, `xz`. + +The archive must include a build instructions file, typically called +`Dockerfile` at the archive's root. The `dockerfile` parameter may be +used to specify a different build instructions file. To do this, its value must be +the path to the alternate build instructions file to use. + +The archive may include any number of other files, +which are accessible in the build context (See the [*ADD build +command*](../reference/builder.md#add)). + +The Docker daemon performs a preliminary validation of the `Dockerfile` before +starting the build, and returns an error if the syntax is incorrect. After that, +each instruction is run one-by-one until the ID of the new image is output. + +The build is canceled if the client drops the connection by quitting +or being killed. + +**Query parameters**: + +- **dockerfile** - Path within the build context to the `Dockerfile`. This is + ignored if `remote` is specified and points to an external `Dockerfile`. +- **t** – A name and optional tag to apply to the image in the `name:tag` format. + If you omit the `tag` the default `latest` value is assumed. +- **remote** – A Git repository URI or HTTP/HTTPS context URI. If the + URI points to a single text file, the file's contents are placed into + a file called `Dockerfile` and the image is built from that file. If + the URI points to a tarball, the file is downloaded by the daemon and + the contents therein used as the context for the build. If the URI + points to a tarball and the `dockerfile` parameter is also specified, + there must be a file with the corresponding path inside the tarball. +- **q** – Suppress verbose build output. +- **nocache** – Do not use the cache when building the image. +- **pull** - Attempt to pull the image even if an older image exists locally. +- **rm** - Remove intermediate containers after a successful build (default behavior). +- **forcerm** - Always remove intermediate containers (includes `rm`). +- **memory** - Set memory limit for build. +- **memswap** - Total memory (memory + swap), `-1` to enable unlimited swap. +- **cpushares** - CPU shares (relative weight). +- **cpusetcpus** - CPUs in which to allow execution (e.g., `0-3`, `0,1`). +- **cpuperiod** - The length of a CPU period in microseconds. +- **cpuquota** - Microseconds of CPU time that the container can get in a CPU period. + +**Request Headers**: + +- **Content-type** – Set to `"application/x-tar"`. +- **X-Registry-Config** – A base64-url-safe-encoded Registry Auth Config JSON + object with the following structure: + + { + "docker.example.com": { + "username": "janedoe", + "password": "hunter2" + }, + "https://index.docker.io/v1/": { + "username": "mobydock", + "password": "conta1n3rize14" + } + } + + This object maps the hostname of a registry to an object containing the + "username" and "password" for that registry. Multiple registries may + be specified as the build may be based on an image requiring + authentication to pull from any arbitrary registry. Only the registry + domain name (and port if not the default "443") are required. However + (for legacy reasons) the "official" Docker, Inc. hosted registry must + be specified with both a "https://" prefix and a "/v1/" suffix even + though Docker will prefer to use the v2 registry API. + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Create an image + +`POST /images/create` + +Create an image either by pulling it from the registry or by importing it + +**Example request**: + + POST /v1.20/images/create?fromImage=busybox&tag=latest HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status": "Pulling..."} + {"status": "Pulling", "progress": "1 B/ 100 B", "progressDetail": {"current": 1, "total": 100}} + {"error": "Invalid..."} + ... + +When using this endpoint to pull an image from the registry, the +`X-Registry-Auth` header can be used to include +a base64-encoded AuthConfig object. + +**Query parameters**: + +- **fromImage** – Name of the image to pull. +- **fromSrc** – Source to import. The value may be a URL from which the image + can be retrieved or `-` to read the image from the request body. +- **repo** – Repository name. +- **tag** – Tag. If empty when pulling an image, this causes all tags + for the given image to be pulled. + +**Request Headers**: + +- **X-Registry-Auth** – base64-encoded AuthConfig object + +**Status codes**: + +- **200** – no error +- **404** - repository does not exist or no read access +- **500** – server error + + + +#### Inspect an image + +`GET /images/(name)/json` + +Return low-level information on the image `name` + +**Example request**: + + GET /v1.20/images/ubuntu/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Created": "2013-03-23T22:24:18.818426-07:00", + "Container": "3d67245a8d72ecf13f33dffac9f79dcdf70f75acb84d308770391510e0c23ad0", + "ContainerConfig": { + "Hostname": "", + "User": "", + "AttachStdin": false, + "AttachStdout": false, + "AttachStderr": false, + "Tty": true, + "OpenStdin": true, + "StdinOnce": false, + "Env": null, + "Cmd": ["/bin/bash"], + "Dns": null, + "Image": "ubuntu", + "Labels": { + "com.example.vendor": "Acme", + "com.example.license": "GPL", + "com.example.version": "1.0" + }, + "Volumes": null, + "VolumesFrom": "", + "WorkingDir": "" + }, + "Id": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", + "Parent": "27cf784147099545", + "Size": 6824592 + } + +**Status codes**: + +- **200** – no error +- **404** – no such image +- **500** – server error + +#### Get the history of an image + +`GET /images/(name)/history` + +Return the history of the image `name` + +**Example request**: + + GET /v1.20/images/ubuntu/history HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Id": "3db9c44f45209632d6050b35958829c3a2aa256d81b9a7be45b362ff85c54710", + "Created": 1398108230, + "CreatedBy": "/bin/sh -c #(nop) ADD file:eb15dbd63394e063b805a3c32ca7bf0266ef64676d5a6fab4801f2e81e2a5148 in /", + "Tags": [ + "ubuntu:lucid", + "ubuntu:10.04" + ], + "Size": 182964289, + "Comment": "" + }, + { + "Id": "6cfa4d1f33fb861d4d114f43b25abd0ac737509268065cdfd69d544a59c85ab8", + "Created": 1398108222, + "CreatedBy": "/bin/sh -c #(nop) MAINTAINER Tianon Gravi - mkimage-debootstrap.sh -i iproute,iputils-ping,ubuntu-minimal -t lucid.tar.xz lucid http://archive.ubuntu.com/ubuntu/", + "Tags": null, + "Size": 0, + "Comment": "" + }, + { + "Id": "511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158", + "Created": 1371157430, + "CreatedBy": "", + "Tags": [ + "scratch12:latest", + "scratch:latest" + ], + "Size": 0, + "Comment": "Imported from -" + } + ] + +**Status codes**: + +- **200** – no error +- **404** – no such image +- **500** – server error + +#### Push an image on the registry + +`POST /images/(name)/push` + +Push the image `name` on the registry + +**Example request**: + + POST /v1.20/images/test/push HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status": "Pushing..."} + {"status": "Pushing", "progress": "1/? (n/a)", "progressDetail": {"current": 1}}} + {"error": "Invalid..."} + ... + +If you wish to push an image on to a private registry, that image must already have a tag +into a repository which references that registry `hostname` and `port`. This repository name should +then be used in the URL. This duplicates the command line's flow. + +**Example request**: + + POST /v1.20/images/registry.acme.com:5000/test/push HTTP/1.1 + + +**Query parameters**: + +- **tag** – The tag to associate with the image on the registry. This is optional. + +**Request Headers**: + +- **X-Registry-Auth** – base64-encoded AuthConfig object. + +**Status codes**: + +- **200** – no error +- **404** – no such image +- **500** – server error + +#### Tag an image into a repository + +`POST /images/(name)/tag` + +Tag the image `name` into a repository + +**Example request**: + + POST /v1.20/images/test/tag?repo=myrepo&force=0&tag=v42 HTTP/1.1 + +**Example response**: + + HTTP/1.1 201 Created + +**Query parameters**: + +- **repo** – The repository to tag in +- **force** – 1/True/true or 0/False/false, default false +- **tag** - The new tag name + +**Status codes**: + +- **201** – no error +- **400** – bad parameter +- **404** – no such image +- **409** – conflict +- **500** – server error + +#### Remove an image + +`DELETE /images/(name)` + +Remove the image `name` from the filesystem + +**Example request**: + + DELETE /v1.20/images/test HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-type: application/json + + [ + {"Untagged": "3e2f21a89f"}, + {"Deleted": "3e2f21a89f"}, + {"Deleted": "53b4f83ac9"} + ] + +**Query parameters**: + +- **force** – 1/True/true or 0/False/false, default false +- **noprune** – 1/True/true or 0/False/false, default false + +**Status codes**: + +- **200** – no error +- **404** – no such image +- **409** – conflict +- **500** – server error + +#### Search images + +`GET /images/search` + +Search for an image on [Docker Hub](https://hub.docker.com). + +> **Note**: +> The response keys have changed from API v1.6 to reflect the JSON +> sent by the registry server to the docker daemon's request. + +**Example request**: + + GET /v1.20/images/search?term=sshd HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "description": "", + "is_official": false, + "is_automated": false, + "name": "wma55/u1210sshd", + "star_count": 0 + }, + { + "description": "", + "is_official": false, + "is_automated": false, + "name": "jdswinbank/sshd", + "star_count": 0 + }, + { + "description": "", + "is_official": false, + "is_automated": false, + "name": "vgauthier/sshd", + "star_count": 0 + } + ... + ] + +**Query parameters**: + +- **term** – term to search + +**Status codes**: + +- **200** – no error +- **500** – server error + +### 2.3 Misc + +#### Check auth configuration + +`POST /auth` + +Get the default username and email + +**Example request**: + + POST /v1.20/auth HTTP/1.1 + Content-Type: application/json + + { + "username": "hannibal", + "password": "xxxx", + "email": "hannibal@a-team.com", + "serveraddress": "https://index.docker.io/v1/" + } + +**Example response**: + + HTTP/1.1 200 OK + +**Status codes**: + +- **200** – no error +- **204** – no error +- **500** – server error + +#### Display system-wide information + +`GET /info` + +Display system-wide information + +**Example request**: + + GET /v1.20/info HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Containers": 11, + "CpuCfsPeriod": true, + "CpuCfsQuota": true, + "Debug": false, + "DockerRootDir": "/var/lib/docker", + "Driver": "btrfs", + "DriverStatus": [[""]], + "ExecutionDriver": "native-0.1", + "ExperimentalBuild": false, + "HttpProxy": "http://test:test@localhost:8080", + "HttpsProxy": "https://test:test@localhost:8080", + "ID": "7TRN:IPZB:QYBB:VPBQ:UMPP:KARE:6ZNR:XE6T:7EWV:PKF4:ZOJD:TPYS", + "IPv4Forwarding": true, + "Images": 16, + "IndexServerAddress": "https://index.docker.io/v1/", + "InitPath": "/usr/bin/docker", + "InitSha1": "", + "KernelVersion": "3.12.0-1-amd64", + "Labels": [ + "storage=ssd" + ], + "MemTotal": 2099236864, + "MemoryLimit": true, + "NCPU": 1, + "NEventsListener": 0, + "NFd": 11, + "NGoroutines": 21, + "Name": "prod-server-42", + "NoProxy": "9.81.1.160", + "OomKillDisable": true, + "OperatingSystem": "Boot2Docker", + "RegistryConfig": { + "IndexConfigs": { + "docker.io": { + "Mirrors": null, + "Name": "docker.io", + "Official": true, + "Secure": true + } + }, + "InsecureRegistryCIDRs": [ + "127.0.0.0/8" + ] + }, + "SwapLimit": false, + "SystemTime": "2015-03-10T11:11:23.730591467-07:00" + } + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Show the docker version information + +`GET /version` + +Show the docker version information + +**Example request**: + + GET /v1.20/version HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Version": "1.5.0", + "Os": "linux", + "KernelVersion": "3.18.5-tinycore64", + "GoVersion": "go1.4.1", + "GitCommit": "a8a31ef", + "Arch": "amd64", + "ApiVersion": "1.20", + "Experimental": false + } + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Ping the docker server + +`GET /_ping` + +Ping the docker server + +**Example request**: + + GET /v1.20/_ping HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: text/plain + + OK + +**Status codes**: + +- **200** - no error +- **500** - server error + +#### Create a new image from a container's changes + +`POST /commit` + +Create a new image from a container's changes + +**Example request**: + + POST /v1.20/commit?container=44c004db4b17&comment=message&repo=myrepo HTTP/1.1 + Content-Type: application/json + + { + "Hostname": "", + "Domainname": "", + "User": "", + "AttachStdin": false, + "AttachStdout": true, + "AttachStderr": true, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": null, + "Cmd": [ + "date" + ], + "Mounts": [ + { + "Source": "/data", + "Destination": "/data", + "Mode": "ro,Z", + "RW": false + } + ], + "Labels": { + "key1": "value1", + "key2": "value2" + }, + "WorkingDir": "", + "NetworkDisabled": false, + "ExposedPorts": { + "22/tcp": {} + } + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + {"Id": "596069db4bf5"} + +**JSON parameters**: + +- **config** - the container's configuration + +**Query parameters**: + +- **container** – source container +- **repo** – repository +- **tag** – tag +- **comment** – commit message +- **author** – author (e.g., "John Hannibal Smith + <[hannibal@a-team.com](mailto:hannibal%40a-team.com)>") +- **pause** – 1/True/true or 0/False/false, whether to pause the container before committing +- **changes** – Dockerfile instructions to apply while committing + +**Status codes**: + +- **201** – no error +- **404** – no such container +- **500** – server error + +#### Monitor Docker's events + +`GET /events` + +Get container events from docker, in real time via streaming. + +Docker containers report the following events: + + attach, commit, copy, create, destroy, die, exec_create, exec_start, export, kill, oom, pause, rename, resize, restart, start, stop, top, unpause + +Docker images report the following events: + + delete, import, pull, push, tag, untag + +**Example request**: + + GET /v1.20/events?since=1374067924 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status": "create", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067924} + {"status": "start", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067924} + {"status": "stop", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067966} + {"status": "destroy", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067970} + +**Query parameters**: + +- **since** – Timestamp. Show all events created since timestamp and then stream +- **until** – Timestamp. Show events created until given timestamp and stop streaming +- **filters** – A json encoded value of the filters (a map[string][]string) to process on the event list. Available filters: + - `container=`; -- container to filter + - `event=`; -- event to filter + - `image=`; -- image to filter + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Get a tarball containing all images in a repository + +`GET /images/(name)/get` + +Get a tarball containing all images and metadata for the repository specified +by `name`. + +If `name` is a specific name and tag (e.g. ubuntu:latest), then only that image +(and its parents) are returned. If `name` is an image ID, similarly only that +image (and its parents) are returned, but with the exclusion of the +'repositories' file in the tarball, as there were no image names referenced. + +See the [image tarball format](#image-tarball-format) for more details. + +**Example request** + + GET /v1.20/images/ubuntu/get + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + + Binary data stream + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Get a tarball containing all images + +`GET /images/get` + +Get a tarball containing all images and metadata for one or more repositories. + +For each value of the `names` parameter: if it is a specific name and tag (e.g. +`ubuntu:latest`), then only that image (and its parents) are returned; if it is +an image ID, similarly only that image (and its parents) are returned and there +would be no names referenced in the 'repositories' file for this image ID. + +See the [image tarball format](#image-tarball-format) for more details. + +**Example request** + + GET /v1.20/images/get?names=myname%2Fmyapp%3Alatest&names=busybox + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + + Binary data stream + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Load a tarball with a set of images and tags into docker + +`POST /images/load` + +Load a set of images and tags into a Docker repository. +See the [image tarball format](#image-tarball-format) for more details. + +**Example request** + + POST /v1.20/images/load + Content-Type: application/x-tar + + Tarball in body + +**Example response**: + + HTTP/1.1 200 OK + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Image tarball format + +An image tarball contains one directory per image layer (named using its long ID), +each containing these files: + +- `VERSION`: currently `1.0` - the file format version +- `json`: detailed layer information, similar to `docker inspect layer_id` +- `layer.tar`: A tarfile containing the filesystem changes in this layer + +The `layer.tar` file contains `aufs` style `.wh..wh.aufs` files and directories +for storing attribute changes and deletions. + +If the tarball defines a repository, the tarball should also include a `repositories` file at +the root that contains a list of repository and tag names mapped to layer IDs. + +``` +{"hello-world": + {"latest": "565a9d68a73f6706862bfe8409a7f659776d4d60a8d096eb4a3cbce6999cc2a1"} +} +``` + +#### Exec Create + +`POST /containers/(id or name)/exec` + +Sets up an exec instance in a running container `id` + +**Example request**: + + POST /v1.20/containers/e90e34656806/exec HTTP/1.1 + Content-Type: application/json + + { + "AttachStdin": true, + "AttachStdout": true, + "AttachStderr": true, + "Cmd": ["sh"], + "Tty": true, + "User": "123:456" + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + { + "Id": "f90e34656806", + "Warnings":[] + } + +**JSON parameters**: + +- **AttachStdin** - Boolean value, attaches to `stdin` of the `exec` command. +- **AttachStdout** - Boolean value, attaches to `stdout` of the `exec` command. +- **AttachStderr** - Boolean value, attaches to `stderr` of the `exec` command. +- **Tty** - Boolean value to allocate a pseudo-TTY. +- **Cmd** - Command to run specified as a string or an array of strings. +- **User** - A string value specifying the user, and optionally, group to run + the exec process inside the container. Format is one of: `"user"`, + `"user:group"`, `"uid"`, or `"uid:gid"`. + +**Status codes**: + +- **201** – no error +- **404** – no such container + +#### Exec Start + +`POST /exec/(id)/start` + +Starts a previously set up `exec` instance `id`. If `detach` is true, this API +returns after starting the `exec` command. Otherwise, this API sets up an +interactive session with the `exec` command. + +**Example request**: + + POST /v1.20/exec/e90e34656806/start HTTP/1.1 + Content-Type: application/json + + { + "Detach": false, + "Tty": false + } + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/vnd.docker.raw-stream + + {% raw %} + {{ STREAM }} + {% endraw %} + +**JSON parameters**: + +- **Detach** - Detach from the `exec` command. +- **Tty** - Boolean value to allocate a pseudo-TTY. + +**Status codes**: + +- **200** – no error +- **404** – no such exec instance + +**Stream details**: + +Similar to the stream behavior of `POST /containers/(id or name)/attach` API + +#### Exec Resize + +`POST /exec/(id)/resize` + +Resizes the `tty` session used by the `exec` command `id`. The unit is number of characters. +This API is valid only if `tty` was specified as part of creating and starting the `exec` command. + +**Example request**: + + POST /v1.20/exec/e90e34656806/resize?h=40&w=80 HTTP/1.1 + Content-Type: text/plain + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: text/plain + +**Query parameters**: + +- **h** – height of `tty` session +- **w** – width + +**Status codes**: + +- **201** – no error +- **404** – no such exec instance + +#### Exec Inspect + +`GET /exec/(id)/json` + +Return low-level information about the `exec` command `id`. + +**Example request**: + + GET /v1.20/exec/11fb006128e8ceb3942e7c58d77750f24210e35f879dd204ac975c184b820b39/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: plain/text + + { + "ID" : "11fb006128e8ceb3942e7c58d77750f24210e35f879dd204ac975c184b820b39", + "Running" : false, + "ExitCode" : 2, + "ProcessConfig" : { + "privileged" : false, + "user" : "", + "tty" : false, + "entrypoint" : "sh", + "arguments" : [ + "-c", + "exit 2" + ] + }, + "OpenStdin" : false, + "OpenStderr" : false, + "OpenStdout" : false, + "Container" : { + "State" : { + "Running" : true, + "Paused" : false, + "Restarting" : false, + "OOMKilled" : false, + "Pid" : 3650, + "ExitCode" : 0, + "Error" : "", + "StartedAt" : "2014-11-17T22:26:03.717657531Z", + "FinishedAt" : "0001-01-01T00:00:00Z" + }, + "ID" : "8f177a186b977fb451136e0fdf182abff5599a08b3c7f6ef0d36a55aaf89634c", + "Created" : "2014-11-17T22:26:03.626304998Z", + "Path" : "date", + "Args" : [], + "Config" : { + "Hostname" : "8f177a186b97", + "Domainname" : "", + "User" : "", + "AttachStdin" : false, + "AttachStdout" : false, + "AttachStderr" : false, + "ExposedPorts" : null, + "Tty" : false, + "OpenStdin" : false, + "StdinOnce" : false, + "Env" : [ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" ], + "Cmd" : [ + "date" + ], + "Image" : "ubuntu", + "Volumes" : null, + "WorkingDir" : "", + "Entrypoint" : null, + "NetworkDisabled" : false, + "MacAddress" : "", + "OnBuild" : null, + "SecurityOpt" : null + }, + "Image" : "5506de2b643be1e6febbf3b8a240760c6843244c41e12aa2f60ccbb7153d17f5", + "NetworkSettings" : { + "IPAddress" : "172.17.0.2", + "IPPrefixLen" : 16, + "MacAddress" : "02:42:ac:11:00:02", + "Gateway" : "172.17.42.1", + "Bridge" : "docker0", + "PortMapping" : null, + "Ports" : {} + }, + "ResolvConfPath" : "/var/lib/docker/containers/8f177a186b977fb451136e0fdf182abff5599a08b3c7f6ef0d36a55aaf89634c/resolv.conf", + "HostnamePath" : "/var/lib/docker/containers/8f177a186b977fb451136e0fdf182abff5599a08b3c7f6ef0d36a55aaf89634c/hostname", + "HostsPath" : "/var/lib/docker/containers/8f177a186b977fb451136e0fdf182abff5599a08b3c7f6ef0d36a55aaf89634c/hosts", + "LogPath": "/var/lib/docker/containers/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b-json.log", + "Name" : "/test", + "Driver" : "aufs", + "ExecDriver" : "native-0.2", + "MountLabel" : "", + "ProcessLabel" : "", + "AppArmorProfile" : "", + "RestartCount" : 0, + "Mounts" : [] + } + } + +**Status codes**: + +- **200** – no error +- **404** – no such exec instance +- **500** - server error + +## 3. Going further + +### 3.1 Inside `docker run` + +As an example, the `docker run` command line makes the following API calls: + +- Create the container + +- If the status code is 404, it means the image doesn't exist: + - Try to pull it. + - Then, retry to create the container. + +- Start the container. + +- If you are not in detached mode: +- Attach to the container, using `logs=1` (to have `stdout` and + `stderr` from the container's start) and `stream=1` + +- If in detached mode or only `stdin` is attached, display the container's id. + +### 3.2 Hijacking + +In this version of the API, `/attach`, uses hijacking to transport `stdin`, +`stdout`, and `stderr` on the same socket. + +To hint potential proxies about connection hijacking, Docker client sends +connection upgrade headers similarly to websocket. + + Upgrade: tcp + Connection: Upgrade + +When Docker daemon detects the `Upgrade` header, it switches its status code +from **200 OK** to **101 UPGRADED** and resends the same headers. + + +### 3.3 CORS Requests + +To set cross origin requests to the Engine API please give values to +`--api-cors-header` when running Docker in daemon mode. Set * (asterisk) allows all, +default or blank means CORS disabled + + $ dockerd -H="192.168.1.9:2375" --api-cors-header="http://foo.bar" diff --git a/vendor/github.com/moby/moby/docs/api/v1.21.md b/vendor/github.com/moby/moby/docs/api/v1.21.md new file mode 100644 index 000000000..1d42fd0ec --- /dev/null +++ b/vendor/github.com/moby/moby/docs/api/v1.21.md @@ -0,0 +1,2981 @@ +--- +title: "Engine API v1.21" +description: "API Documentation for Docker" +keywords: "API, Docker, rcli, REST, documentation" +redirect_from: +- /engine/reference/api/docker_remote_api_v1.21/ +- /reference/api/docker_remote_api_v1.21/ +--- + + + +## 1. Brief introduction + + - The daemon listens on `unix:///var/run/docker.sock` but you can + [Bind Docker to another host/port or a Unix socket](../reference/commandline/dockerd.md#bind-docker-to-another-host-port-or-a-unix-socket). + - The API tends to be REST. However, for some complex commands, like `attach` + or `pull`, the HTTP connection is hijacked to transport `stdout`, + `stdin` and `stderr`. + - When the client API version is newer than the daemon's, these calls return an HTTP + `400 Bad Request` error message. + +## 2. Endpoints + +### 2.1 Containers + +#### List containers + +`GET /containers/json` + +List containers + +**Example request**: + + GET /v1.21/containers/json?all=1&before=8dfafdbc3a40&size=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Id": "8dfafdbc3a40", + "Names":["/boring_feynman"], + "Image": "ubuntu:latest", + "ImageID": "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82", + "Command": "echo 1", + "Created": 1367854155, + "Status": "Exit 0", + "Ports": [{"PrivatePort": 2222, "PublicPort": 3333, "Type": "tcp"}], + "Labels": { + "com.example.vendor": "Acme", + "com.example.license": "GPL", + "com.example.version": "1.0" + }, + "SizeRw": 12288, + "SizeRootFs": 0 + }, + { + "Id": "9cd87474be90", + "Names":["/coolName"], + "Image": "ubuntu:latest", + "ImageID": "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82", + "Command": "echo 222222", + "Created": 1367854155, + "Status": "Exit 0", + "Ports": [], + "Labels": {}, + "SizeRw": 12288, + "SizeRootFs": 0 + }, + { + "Id": "3176a2479c92", + "Names":["/sleepy_dog"], + "Image": "ubuntu:latest", + "ImageID": "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82", + "Command": "echo 3333333333333333", + "Created": 1367854154, + "Status": "Exit 0", + "Ports":[], + "Labels": {}, + "SizeRw":12288, + "SizeRootFs":0 + }, + { + "Id": "4cb07b47f9fb", + "Names":["/running_cat"], + "Image": "ubuntu:latest", + "ImageID": "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82", + "Command": "echo 444444444444444444444444444444444", + "Created": 1367854152, + "Status": "Exit 0", + "Ports": [], + "Labels": {}, + "SizeRw": 12288, + "SizeRootFs": 0 + } + ] + +**Query parameters**: + +- **all** – 1/True/true or 0/False/false, Show all containers. + Only running containers are shown by default (i.e., this defaults to false) +- **limit** – Show `limit` last created + containers, include non-running ones. +- **since** – Show only containers created since Id, include + non-running ones. +- **before** – Show only containers created before Id, include + non-running ones. +- **size** – 1/True/true or 0/False/false, Show the containers + sizes +- **filters** - a JSON encoded value of the filters (a `map[string][]string`) to process on the containers list. Available filters: + - `exited=`; -- containers with exit code of `` ; + - `status=`(`created`|`restarting`|`running`|`paused`|`exited`) + - `label=key` or `label="key=value"` of a container label + +**Status codes**: + +- **200** – no error +- **400** – bad parameter +- **500** – server error + +#### Create a container + +`POST /containers/create` + +Create a container + +**Example request**: + + POST /v1.21/containers/create HTTP/1.1 + Content-Type: application/json + + { + "Hostname": "", + "Domainname": "", + "User": "", + "AttachStdin": false, + "AttachStdout": true, + "AttachStderr": true, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": [ + "FOO=bar", + "BAZ=quux" + ], + "Cmd": [ + "date" + ], + "Entrypoint": null, + "Image": "ubuntu", + "Labels": { + "com.example.vendor": "Acme", + "com.example.license": "GPL", + "com.example.version": "1.0" + }, + "Volumes": { + "/volumes/data": {} + }, + "WorkingDir": "", + "NetworkDisabled": false, + "MacAddress": "12:34:56:78:9a:bc", + "ExposedPorts": { + "22/tcp": {} + }, + "StopSignal": "SIGTERM", + "HostConfig": { + "Binds": ["/tmp:/tmp"], + "Links": ["redis3:redis"], + "LxcConf": {"lxc.utsname":"docker"}, + "Memory": 0, + "MemorySwap": 0, + "MemoryReservation": 0, + "KernelMemory": 0, + "CpuShares": 512, + "CpuPeriod": 100000, + "CpuQuota": 50000, + "CpusetCpus": "0,1", + "CpusetMems": "0,1", + "BlkioWeight": 300, + "MemorySwappiness": 60, + "OomKillDisable": false, + "PidMode": "", + "PortBindings": { "22/tcp": [{ "HostPort": "11022" }] }, + "PublishAllPorts": false, + "Privileged": false, + "ReadonlyRootfs": false, + "Dns": ["8.8.8.8"], + "DnsOptions": [""], + "DnsSearch": [""], + "ExtraHosts": null, + "VolumesFrom": ["parent", "other:ro"], + "CapAdd": ["NET_ADMIN"], + "CapDrop": ["MKNOD"], + "GroupAdd": ["newgroup"], + "RestartPolicy": { "Name": "", "MaximumRetryCount": 0 }, + "NetworkMode": "bridge", + "Devices": [], + "Ulimits": [{}], + "LogConfig": { "Type": "json-file", "Config": {} }, + "SecurityOpt": [], + "CgroupParent": "", + "VolumeDriver": "" + } + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + { + "Id":"e90e34656806", + "Warnings":[] + } + +**JSON parameters**: + +- **Hostname** - A string value containing the hostname to use for the + container. +- **Domainname** - A string value containing the domain name to use + for the container. +- **User** - A string value specifying the user inside the container. +- **AttachStdin** - Boolean value, attaches to `stdin`. +- **AttachStdout** - Boolean value, attaches to `stdout`. +- **AttachStderr** - Boolean value, attaches to `stderr`. +- **Tty** - Boolean value, Attach standard streams to a `tty`, including `stdin` if it is not closed. +- **OpenStdin** - Boolean value, opens `stdin`, +- **StdinOnce** - Boolean value, close `stdin` after the 1 attached client disconnects. +- **Env** - A list of environment variables in the form of `["VAR=value", ...]` +- **Labels** - Adds a map of labels to a container. To specify a map: `{"key":"value", ... }` +- **Cmd** - Command to run specified as a string or an array of strings. +- **Entrypoint** - Set the entry point for the container as a string or an array + of strings. +- **Image** - A string specifying the image name to use for the container. +- **Volumes** - An object mapping mount point paths (strings) inside the + container to empty objects. +- **WorkingDir** - A string specifying the working directory for commands to + run in. +- **NetworkDisabled** - Boolean value, when true disables networking for the + container +- **ExposedPorts** - An object mapping ports to an empty object in the form of: + `"ExposedPorts": { "/: {}" }` +- **StopSignal** - Signal to stop a container as a string or unsigned integer. `SIGTERM` by default. +- **HostConfig** + - **Binds** – A list of volume bindings for this container. Each volume binding is a string in one of these forms: + + `host-src:container-dest` to bind-mount a host path into the + container. Both `host-src`, and `container-dest` must be an + _absolute_ path. + + `host-src:container-dest:ro` to make the bind-mount read-only + inside the container. Both `host-src`, and `container-dest` must be + an _absolute_ path. + + `volume-name:container-dest` to bind-mount a volume managed by a + volume driver into the container. `container-dest` must be an + _absolute_ path. + + `volume-name:container-dest:ro` to mount the volume read-only + inside the container. `container-dest` must be an _absolute_ path. + - **Links** - A list of links for the container. Each link entry should be + in the form of `container_name:alias`. + - **LxcConf** - LXC specific configurations. These configurations only + work when using the `lxc` execution driver. + - **Memory** - Memory limit in bytes. + - **MemorySwap** - Total memory limit (memory + swap); set `-1` to enable unlimited swap. + You must use this with `memory` and make the swap value larger than `memory`. + - **MemoryReservation** - Memory soft limit in bytes. + - **KernelMemory** - Kernel memory limit in bytes. + - **CpuShares** - An integer value containing the container's CPU Shares + (ie. the relative weight vs other containers). + - **CpuPeriod** - The length of a CPU period in microseconds. + - **CpuQuota** - Microseconds of CPU time that the container can get in a CPU period. + - **CpusetCpus** - String value containing the `cgroups CpusetCpus` to use. + - **CpusetMems** - Memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only effective on NUMA systems. + - **BlkioWeight** - Block IO weight (relative weight) accepts a weight value between 10 and 1000. + - **MemorySwappiness** - Tune a container's memory swappiness behavior. Accepts an integer between 0 and 100. + - **OomKillDisable** - Boolean value, whether to disable OOM Killer for the container or not. + - **PidMode** - Set the PID (Process) Namespace mode for the container; + `"container:"`: joins another container's PID namespace + `"host"`: use the host's PID namespace inside the container + - **PortBindings** - A map of exposed container ports and the host port they + should map to. A JSON object in the form + `{ /: [{ "HostPort": "" }] }` + Take note that `port` is specified as a string and not an integer value. + - **PublishAllPorts** - Allocates a random host port for all of a container's + exposed ports. Specified as a boolean value. + - **Privileged** - Gives the container full access to the host. Specified as + a boolean value. + - **ReadonlyRootfs** - Mount the container's root filesystem as read only. + Specified as a boolean value. + - **Dns** - A list of DNS servers for the container to use. + - **DnsOptions** - A list of DNS options + - **DnsSearch** - A list of DNS search domains + - **ExtraHosts** - A list of hostnames/IP mappings to add to the + container's `/etc/hosts` file. Specified in the form `["hostname:IP"]`. + - **VolumesFrom** - A list of volumes to inherit from another container. + Specified in the form `[:]` + - **CapAdd** - A list of kernel capabilities to add to the container. + - **Capdrop** - A list of kernel capabilities to drop from the container. + - **GroupAdd** - A list of additional groups that the container process will run as + - **RestartPolicy** – The behavior to apply when the container exits. The + value is an object with a `Name` property of either `"always"` to + always restart, `"unless-stopped"` to restart always except when + user has manually stopped the container or `"on-failure"` to restart only when the container + exit code is non-zero. If `on-failure` is used, `MaximumRetryCount` + controls the number of times to retry before giving up. + The default is not to restart. (optional) + An ever increasing delay (double the previous delay, starting at 100mS) + is added before each restart to prevent flooding the server. + - **NetworkMode** - Sets the networking mode for the container. Supported + standard values are: `bridge`, `host`, `none`, and `container:`. Any other value is taken + as a custom network's name to which this container should connect to. + - **Devices** - A list of devices to add to the container specified as a JSON object in the + form + `{ "PathOnHost": "/dev/deviceName", "PathInContainer": "/dev/deviceName", "CgroupPermissions": "mrw"}` + - **Ulimits** - A list of ulimits to set in the container, specified as + `{ "Name": , "Soft": , "Hard": }`, for example: + `Ulimits: { "Name": "nofile", "Soft": 1024, "Hard": 2048 }` + - **SecurityOpt**: A list of string values to customize labels for MLS + systems, such as SELinux. + - **LogConfig** - Log configuration for the container, specified as a JSON object in the form + `{ "Type": "", "Config": {"key1": "val1"}}`. + Available types: `json-file`, `syslog`, `journald`, `gelf`, `awslogs`, `none`. + `json-file` logging driver. + - **CgroupParent** - Path to `cgroups` under which the container's `cgroup` is created. If the path is not absolute, the path is considered to be relative to the `cgroups` path of the init process. Cgroups are created if they do not already exist. + - **VolumeDriver** - Driver that this container users to mount volumes. + +**Query parameters**: + +- **name** – Assign the specified name to the container. Must + match `/?[a-zA-Z0-9_-]+`. + +**Status codes**: + +- **201** – no error +- **400** – bad parameter +- **404** – no such container +- **406** – impossible to attach (container not running) +- **409** – conflict +- **500** – server error + +#### Inspect a container + +`GET /containers/(id or name)/json` + +Return low-level information on the container `id` + +**Example request**: + + GET /v1.21/containers/4fa6e0f0c678/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "AppArmorProfile": "", + "Args": [ + "-c", + "exit 9" + ], + "Config": { + "AttachStderr": true, + "AttachStdin": false, + "AttachStdout": true, + "Cmd": [ + "/bin/sh", + "-c", + "exit 9" + ], + "Domainname": "", + "Entrypoint": null, + "Env": [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + ], + "ExposedPorts": null, + "Hostname": "ba033ac44011", + "Image": "ubuntu", + "Labels": { + "com.example.vendor": "Acme", + "com.example.license": "GPL", + "com.example.version": "1.0" + }, + "MacAddress": "", + "NetworkDisabled": false, + "OnBuild": null, + "OpenStdin": false, + "StdinOnce": false, + "Tty": false, + "User": "", + "Volumes": null, + "WorkingDir": "", + "StopSignal": "SIGTERM" + }, + "Created": "2015-01-06T15:47:31.485331387Z", + "Driver": "devicemapper", + "ExecDriver": "native-0.2", + "ExecIDs": null, + "HostConfig": { + "Binds": null, + "BlkioWeight": 0, + "CapAdd": null, + "CapDrop": null, + "ContainerIDFile": "", + "CpusetCpus": "", + "CpusetMems": "", + "CpuShares": 0, + "CpuPeriod": 100000, + "Devices": [], + "Dns": null, + "DnsOptions": null, + "DnsSearch": null, + "ExtraHosts": null, + "IpcMode": "", + "Links": null, + "LxcConf": [], + "Memory": 0, + "MemorySwap": 0, + "MemoryReservation": 0, + "KernelMemory": 0, + "OomKillDisable": false, + "NetworkMode": "bridge", + "PidMode": "", + "PortBindings": {}, + "Privileged": false, + "ReadonlyRootfs": false, + "PublishAllPorts": false, + "RestartPolicy": { + "MaximumRetryCount": 2, + "Name": "on-failure" + }, + "LogConfig": { + "Config": null, + "Type": "json-file" + }, + "SecurityOpt": null, + "VolumesFrom": null, + "Ulimits": [{}], + "VolumeDriver": "" + }, + "HostnamePath": "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hostname", + "HostsPath": "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hosts", + "LogPath": "/var/lib/docker/containers/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b-json.log", + "Id": "ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39", + "Image": "04c5d3b7b0656168630d3ba35d8889bd0e9caafcaeb3004d2bfbc47e7c5d35d2", + "MountLabel": "", + "Name": "/boring_euclid", + "NetworkSettings": { + "Bridge": "", + "SandboxID": "", + "HairpinMode": false, + "LinkLocalIPv6Address": "", + "LinkLocalIPv6PrefixLen": 0, + "Ports": null, + "SandboxKey": "", + "SecondaryIPAddresses": null, + "SecondaryIPv6Addresses": null, + "EndpointID": "", + "Gateway": "", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "IPAddress": "", + "IPPrefixLen": 0, + "IPv6Gateway": "", + "MacAddress": "", + "Networks": { + "bridge": { + "EndpointID": "7587b82f0dada3656fda26588aee72630c6fab1536d36e394b2bfbcf898c971d", + "Gateway": "172.17.0.1", + "IPAddress": "172.17.0.2", + "IPPrefixLen": 16, + "IPv6Gateway": "", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "MacAddress": "02:42:ac:12:00:02" + } + } + }, + "Path": "/bin/sh", + "ProcessLabel": "", + "ResolvConfPath": "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/resolv.conf", + "RestartCount": 1, + "State": { + "Error": "", + "ExitCode": 9, + "FinishedAt": "2015-01-06T15:47:32.080254511Z", + "OOMKilled": false, + "Paused": false, + "Pid": 0, + "Restarting": false, + "Running": true, + "StartedAt": "2015-01-06T15:47:32.072697474Z", + "Status": "running" + }, + "Mounts": [ + { + "Source": "/data", + "Destination": "/data", + "Mode": "ro,Z", + "RW": false + } + ] + } + +**Example request, with size information**: + + GET /v1.21/containers/4fa6e0f0c678/json?size=1 HTTP/1.1 + +**Example response, with size information**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + .... + "SizeRw": 0, + "SizeRootFs": 972, + .... + } + +**Query parameters**: + +- **size** – 1/True/true or 0/False/false, return container size information. Default is `false`. + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### List processes running inside a container + +`GET /containers/(id or name)/top` + +List processes running inside the container `id`. On Unix systems this +is done by running the `ps` command. This endpoint is not +supported on Windows. + +**Example request**: + + GET /v1.21/containers/4fa6e0f0c678/top HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Titles" : [ + "UID", "PID", "PPID", "C", "STIME", "TTY", "TIME", "CMD" + ], + "Processes" : [ + [ + "root", "13642", "882", "0", "17:03", "pts/0", "00:00:00", "/bin/bash" + ], + [ + "root", "13735", "13642", "0", "17:06", "pts/0", "00:00:00", "sleep 10" + ] + ] + } + +**Example request**: + + GET /v1.21/containers/4fa6e0f0c678/top?ps_args=aux HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Titles" : [ + "USER","PID","%CPU","%MEM","VSZ","RSS","TTY","STAT","START","TIME","COMMAND" + ] + "Processes" : [ + [ + "root","13642","0.0","0.1","18172","3184","pts/0","Ss","17:03","0:00","/bin/bash" + ], + [ + "root","13895","0.0","0.0","4348","692","pts/0","S+","17:15","0:00","sleep 10" + ] + ], + } + +**Query parameters**: + +- **ps_args** – `ps` arguments to use (e.g., `aux`), defaults to `-ef` + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Get container logs + +`GET /containers/(id or name)/logs` + +Get `stdout` and `stderr` logs from the container ``id`` + +> **Note**: +> This endpoint works only for containers with the `json-file` or `journald` logging drivers. + +**Example request**: + + GET /v1.21/containers/4fa6e0f0c678/logs?stderr=1&stdout=1×tamps=1&follow=1&tail=10&since=1428990821 HTTP/1.1 + +**Example response**: + + HTTP/1.1 101 UPGRADED + Content-Type: application/vnd.docker.raw-stream + Connection: Upgrade + Upgrade: tcp + + {% raw %} + {{ STREAM }} + {% endraw %} + +**Query parameters**: + +- **follow** – 1/True/true or 0/False/false, return stream. Default `false`. +- **stdout** – 1/True/true or 0/False/false, show `stdout` log. Default `false`. +- **stderr** – 1/True/true or 0/False/false, show `stderr` log. Default `false`. +- **since** – UNIX timestamp (integer) to filter logs. Specifying a timestamp + will only output log-entries since that timestamp. Default: 0 (unfiltered) +- **timestamps** – 1/True/true or 0/False/false, print timestamps for + every log line. Default `false`. +- **tail** – Output specified number of lines at the end of logs: `all` or ``. Default all. + +**Status codes**: + +- **101** – no error, hints proxy about hijacking +- **200** – no error, no upgrade header found +- **404** – no such container +- **500** – server error + +#### Inspect changes on a container's filesystem + +`GET /containers/(id or name)/changes` + +Inspect changes on container `id`'s filesystem + +**Example request**: + + GET /v1.21/containers/4fa6e0f0c678/changes HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Path": "/dev", + "Kind": 0 + }, + { + "Path": "/dev/kmsg", + "Kind": 1 + }, + { + "Path": "/test", + "Kind": 1 + } + ] + +Values for `Kind`: + +- `0`: Modify +- `1`: Add +- `2`: Delete + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Export a container + +`GET /containers/(id or name)/export` + +Export the contents of container `id` + +**Example request**: + + GET /v1.21/containers/4fa6e0f0c678/export HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/octet-stream + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Get container stats based on resource usage + +`GET /containers/(id or name)/stats` + +This endpoint returns a live stream of a container's resource usage statistics. + +**Example request**: + + GET /v1.21/containers/redis1/stats HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "read" : "2015-01-08T22:57:31.547920715Z", + "networks": { + "eth0": { + "rx_bytes": 5338, + "rx_dropped": 0, + "rx_errors": 0, + "rx_packets": 36, + "tx_bytes": 648, + "tx_dropped": 0, + "tx_errors": 0, + "tx_packets": 8 + }, + "eth5": { + "rx_bytes": 4641, + "rx_dropped": 0, + "rx_errors": 0, + "rx_packets": 26, + "tx_bytes": 690, + "tx_dropped": 0, + "tx_errors": 0, + "tx_packets": 9 + } + }, + "memory_stats" : { + "stats" : { + "total_pgmajfault" : 0, + "cache" : 0, + "mapped_file" : 0, + "total_inactive_file" : 0, + "pgpgout" : 414, + "rss" : 6537216, + "total_mapped_file" : 0, + "writeback" : 0, + "unevictable" : 0, + "pgpgin" : 477, + "total_unevictable" : 0, + "pgmajfault" : 0, + "total_rss" : 6537216, + "total_rss_huge" : 6291456, + "total_writeback" : 0, + "total_inactive_anon" : 0, + "rss_huge" : 6291456, + "hierarchical_memory_limit" : 67108864, + "total_pgfault" : 964, + "total_active_file" : 0, + "active_anon" : 6537216, + "total_active_anon" : 6537216, + "total_pgpgout" : 414, + "total_cache" : 0, + "inactive_anon" : 0, + "active_file" : 0, + "pgfault" : 964, + "inactive_file" : 0, + "total_pgpgin" : 477 + }, + "max_usage" : 6651904, + "usage" : 6537216, + "failcnt" : 0, + "limit" : 67108864 + }, + "blkio_stats" : {}, + "cpu_stats" : { + "cpu_usage" : { + "percpu_usage" : [ + 8646879, + 24472255, + 36438778, + 30657443 + ], + "usage_in_usermode" : 50000000, + "total_usage" : 100215355, + "usage_in_kernelmode" : 30000000 + }, + "system_cpu_usage" : 739306590000000, + "throttling_data" : {"periods":0,"throttled_periods":0,"throttled_time":0} + }, + "precpu_stats" : { + "cpu_usage" : { + "percpu_usage" : [ + 8646879, + 24350896, + 36438778, + 30657443 + ], + "usage_in_usermode" : 50000000, + "total_usage" : 100093996, + "usage_in_kernelmode" : 30000000 + }, + "system_cpu_usage" : 9492140000000, + "throttling_data" : {"periods":0,"throttled_periods":0,"throttled_time":0} + } + } + +The `precpu_stats` is the cpu statistic of last read, which is used for calculating the cpu usage percent. It is not the exact copy of the `cpu_stats` field. + +**Query parameters**: + +- **stream** – 1/True/true or 0/False/false, pull stats once then disconnect. Default `true`. + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Resize a container TTY + +`POST /containers/(id or name)/resize` + +Resize the TTY for container with `id`. The unit is number of characters. You must restart the container for the resize to take effect. + +**Example request**: + + POST /v1.21/containers/4fa6e0f0c678/resize?h=40&w=80 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Length: 0 + Content-Type: text/plain; charset=utf-8 + +**Query parameters**: + +- **h** – height of `tty` session +- **w** – width + +**Status codes**: + +- **200** – no error +- **404** – No such container +- **500** – Cannot resize container + +#### Start a container + +`POST /containers/(id or name)/start` + +Start the container `id` + +> **Note**: +> For backwards compatibility, this endpoint accepts a `HostConfig` as JSON-encoded request body. +> See [create a container](#create-a-container) for details. + +**Example request**: + + POST /v1.21/containers/e90e34656806/start HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Status codes**: + +- **204** – no error +- **304** – container already started +- **404** – no such container +- **500** – server error + +#### Stop a container + +`POST /containers/(id or name)/stop` + +Stop the container `id` + +**Example request**: + + POST /v1.21/containers/e90e34656806/stop?t=5 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **t** – number of seconds to wait before killing the container + +**Status codes**: + +- **204** – no error +- **304** – container already stopped +- **404** – no such container +- **500** – server error + +#### Restart a container + +`POST /containers/(id or name)/restart` + +Restart the container `id` + +**Example request**: + + POST /v1.21/containers/e90e34656806/restart?t=5 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **t** – number of seconds to wait before killing the container + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **500** – server error + +#### Kill a container + +`POST /containers/(id or name)/kill` + +Kill the container `id` + +**Example request**: + + POST /v1.21/containers/e90e34656806/kill HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **signal** - Signal to send to the container: integer or string like `SIGINT`. + When not set, `SIGKILL` is assumed and the call waits for the container to exit. + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **500** – server error + +#### Rename a container + +`POST /containers/(id or name)/rename` + +Rename the container `id` to a `new_name` + +**Example request**: + + POST /v1.21/containers/e90e34656806/rename?name=new_name HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **name** – new name for the container + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **409** - conflict name already assigned +- **500** – server error + +#### Pause a container + +`POST /containers/(id or name)/pause` + +Pause the container `id` + +**Example request**: + + POST /v1.21/containers/e90e34656806/pause HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **500** – server error + +#### Unpause a container + +`POST /containers/(id or name)/unpause` + +Unpause the container `id` + +**Example request**: + + POST /v1.21/containers/e90e34656806/unpause HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **500** – server error + +#### Attach to a container + +`POST /containers/(id or name)/attach` + +Attach to the container `id` + +**Example request**: + + POST /v1.21/containers/16253994b7c4/attach?logs=1&stream=0&stdout=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 101 UPGRADED + Content-Type: application/vnd.docker.raw-stream + Connection: Upgrade + Upgrade: tcp + + {% raw %} + {{ STREAM }} + {% endraw %} + +**Query parameters**: + +- **logs** – 1/True/true or 0/False/false, return logs. Default `false`. +- **stream** – 1/True/true or 0/False/false, return stream. + Default `false`. +- **stdin** – 1/True/true or 0/False/false, if `stream=true`, attach + to `stdin`. Default `false`. +- **stdout** – 1/True/true or 0/False/false, if `logs=true`, return + `stdout` log, if `stream=true`, attach to `stdout`. Default `false`. +- **stderr** – 1/True/true or 0/False/false, if `logs=true`, return + `stderr` log, if `stream=true`, attach to `stderr`. Default `false`. + +**Status codes**: + +- **101** – no error, hints proxy about hijacking +- **200** – no error, no upgrade header found +- **400** – bad parameter +- **404** – no such container +- **500** – server error + +**Stream details**: + +When using the TTY setting is enabled in +[`POST /containers/create` +](#create-a-container), +the stream is the raw data from the process PTY and client's `stdin`. +When the TTY is disabled, then the stream is multiplexed to separate +`stdout` and `stderr`. + +The format is a **Header** and a **Payload** (frame). + +**HEADER** + +The header contains the information which the stream writes (`stdout` or +`stderr`). It also contains the size of the associated frame encoded in the +last four bytes (`uint32`). + +It is encoded on the first eight bytes like this: + + header := [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4} + +`STREAM_TYPE` can be: + +- 0: `stdin` (is written on `stdout`) +- 1: `stdout` +- 2: `stderr` + +`SIZE1, SIZE2, SIZE3, SIZE4` are the four bytes of +the `uint32` size encoded as big endian. + +**PAYLOAD** + +The payload is the raw stream. + +**IMPLEMENTATION** + +The simplest way to implement the Attach protocol is the following: + + 1. Read eight bytes. + 2. Choose `stdout` or `stderr` depending on the first byte. + 3. Extract the frame size from the last four bytes. + 4. Read the extracted size and output it on the correct output. + 5. Goto 1. + +#### Attach to a container (websocket) + +`GET /containers/(id or name)/attach/ws` + +Attach to the container `id` via websocket + +Implements websocket protocol handshake according to [RFC 6455](http://tools.ietf.org/html/rfc6455) + +**Example request** + + GET /v1.21/containers/e90e34656806/attach/ws?logs=0&stream=1&stdin=1&stdout=1&stderr=1 HTTP/1.1 + +**Example response** + + {% raw %} + {{ STREAM }} + {% endraw %} + +**Query parameters**: + +- **logs** – 1/True/true or 0/False/false, return logs. Default `false`. +- **stream** – 1/True/true or 0/False/false, return stream. + Default `false`. +- **stdin** – 1/True/true or 0/False/false, if `stream=true`, attach + to `stdin`. Default `false`. +- **stdout** – 1/True/true or 0/False/false, if `logs=true`, return + `stdout` log, if `stream=true`, attach to `stdout`. Default `false`. +- **stderr** – 1/True/true or 0/False/false, if `logs=true`, return + `stderr` log, if `stream=true`, attach to `stderr`. Default `false`. + +**Status codes**: + +- **200** – no error +- **400** – bad parameter +- **404** – no such container +- **500** – server error + +#### Wait a container + +`POST /containers/(id or name)/wait` + +Block until container `id` stops, then returns the exit code + +**Example request**: + + POST /v1.21/containers/16253994b7c4/wait HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"StatusCode": 0} + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Remove a container + +`DELETE /containers/(id or name)` + +Remove the container `id` from the filesystem + +**Example request**: + + DELETE /v1.21/containers/16253994b7c4?v=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **v** – 1/True/true or 0/False/false, Remove the volumes + associated to the container. Default `false`. +- **force** - 1/True/true or 0/False/false, Kill then remove the container. + Default `false`. +- **link** - 1/True/true or 0/False/false, Remove the specified + link associated to the container. Default `false`. + +**Status codes**: + +- **204** – no error +- **400** – bad parameter +- **404** – no such container +- **409** – conflict +- **500** – server error + +#### Copy files or folders from a container + +`POST /containers/(id or name)/copy` + +Copy files or folders of container `id` + +**Deprecated** in favor of the `archive` endpoint below. + +**Example request**: + + POST /v1.21/containers/4fa6e0f0c678/copy HTTP/1.1 + Content-Type: application/json + + { + "Resource": "test.txt" + } + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Retrieving information about files and folders in a container + +`HEAD /containers/(id or name)/archive` + +See the description of the `X-Docker-Container-Path-Stat` header in the +following section. + +#### Get an archive of a filesystem resource in a container + +`GET /containers/(id or name)/archive` + +Get a tar archive of a resource in the filesystem of container `id`. + +**Query parameters**: + +- **path** - resource in the container's filesystem to archive. Required. + + If not an absolute path, it is relative to the container's root directory. + The resource specified by **path** must exist. To assert that the resource + is expected to be a directory, **path** should end in `/` or `/.` + (assuming a path separator of `/`). If **path** ends in `/.` then this + indicates that only the contents of the **path** directory should be + copied. A symlink is always resolved to its target. + + > **Note**: It is not possible to copy certain system files such as resources + > under `/proc`, `/sys`, `/dev`, and mounts created by the user in the + > container. + +**Example request**: + + GET /v1.21/containers/8cce319429b2/archive?path=/root HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + X-Docker-Container-Path-Stat: eyJuYW1lIjoicm9vdCIsInNpemUiOjQwOTYsIm1vZGUiOjIxNDc0ODQwOTYsIm10aW1lIjoiMjAxNC0wMi0yN1QyMDo1MToyM1oiLCJsaW5rVGFyZ2V0IjoiIn0= + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +On success, a response header `X-Docker-Container-Path-Stat` will be set to a +base64-encoded JSON object containing some filesystem header information about +the archived resource. The above example value would decode to the following +JSON object (whitespace added for readability): + +```json +{ + "name": "root", + "size": 4096, + "mode": 2147484096, + "mtime": "2014-02-27T20:51:23Z", + "linkTarget": "" +} +``` + +A `HEAD` request can also be made to this endpoint if only this information is +desired. + +**Status codes**: + +- **200** - success, returns archive of copied resource +- **400** - client error, bad parameter, details in JSON response body, one of: + - must specify path parameter (**path** cannot be empty) + - not a directory (**path** was asserted to be a directory but exists as a + file) +- **404** - client error, resource not found, one of: + – no such container (container `id` does not exist) + - no such file or directory (**path** does not exist) +- **500** - server error + +#### Extract an archive of files or folders to a directory in a container + +`PUT /containers/(id or name)/archive` + +Upload a tar archive to be extracted to a path in the filesystem of container +`id`. + +**Query parameters**: + +- **path** - path to a directory in the container + to extract the archive's contents into. Required. + + If not an absolute path, it is relative to the container's root directory. + The **path** resource must exist. +- **noOverwriteDirNonDir** - If "1", "true", or "True" then it will be an error + if unpacking the given content would cause an existing directory to be + replaced with a non-directory and vice versa. + +**Example request**: + + PUT /v1.21/containers/8cce319429b2/archive?path=/vol1 HTTP/1.1 + Content-Type: application/x-tar + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +**Example response**: + + HTTP/1.1 200 OK + +**Status codes**: + +- **200** – the content was extracted successfully +- **400** - client error, bad parameter, details in JSON response body, one of: + - must specify path parameter (**path** cannot be empty) + - not a directory (**path** should be a directory but exists as a file) + - unable to overwrite existing directory with non-directory + (if **noOverwriteDirNonDir**) + - unable to overwrite existing non-directory with directory + (if **noOverwriteDirNonDir**) +- **403** - client error, permission denied, the volume + or container rootfs is marked as read-only. +- **404** - client error, resource not found, one of: + – no such container (container `id` does not exist) + - no such file or directory (**path** resource does not exist) +- **500** – server error + +### 2.2 Images + +#### List Images + +`GET /images/json` + +**Example request**: + + GET /v1.21/images/json?all=0 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "RepoTags": [ + "ubuntu:12.04", + "ubuntu:precise", + "ubuntu:latest" + ], + "Id": "8dbd9e392a964056420e5d58ca5cc376ef18e2de93b5cc90e868a1bbc8318c1c", + "Created": 1365714795, + "Size": 131506275, + "VirtualSize": 131506275, + "Labels": {} + }, + { + "RepoTags": [ + "ubuntu:12.10", + "ubuntu:quantal" + ], + "ParentId": "27cf784147099545", + "Id": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", + "Created": 1364102658, + "Size": 24653, + "VirtualSize": 180116135, + "Labels": { + "com.example.version": "v1" + } + } + ] + +**Example request, with digest information**: + + GET /v1.21/images/json?digests=1 HTTP/1.1 + +**Example response, with digest information**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Created": 1420064636, + "Id": "4986bf8c15363d1c5d15512d5266f8777bfba4974ac56e3270e7760f6f0a8125", + "ParentId": "ea13149945cb6b1e746bf28032f02e9b5a793523481a0a18645fc77ad53c4ea2", + "RepoDigests": [ + "localhost:5000/test/busybox@sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf" + ], + "RepoTags": [ + "localhost:5000/test/busybox:latest", + "playdate:latest" + ], + "Size": 0, + "VirtualSize": 2429728, + "Labels": {} + } + ] + +The response shows a single image `Id` associated with two repositories +(`RepoTags`): `localhost:5000/test/busybox`: and `playdate`. A caller can use +either of the `RepoTags` values `localhost:5000/test/busybox:latest` or +`playdate:latest` to reference the image. + +You can also use `RepoDigests` values to reference an image. In this response, +the array has only one reference and that is to the +`localhost:5000/test/busybox` repository; the `playdate` repository has no +digest. You can reference this digest using the value: +`localhost:5000/test/busybox@sha256:cbbf2f9a99b47fc460d...` + +See the `docker run` and `docker build` commands for examples of digest and tag +references on the command line. + +**Query parameters**: + +- **all** – 1/True/true or 0/False/false, default false +- **filters** – a JSON encoded value of the filters (a map[string][]string) to process on the images list. Available filters: + - `dangling=true` + - `label=key` or `label="key=value"` of an image label +- **filter** - only return images with the specified name + +#### Build image from a Dockerfile + +`POST /build` + +Build an image from a Dockerfile + +**Example request**: + + POST /v1.21/build HTTP/1.1 + Content-Type: application/x-tar + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"stream": "Step 1/5..."} + {"stream": "..."} + {"error": "Error...", "errorDetail": {"code": 123, "message": "Error..."}} + +The input stream must be a `tar` archive compressed with one of the +following algorithms: `identity` (no compression), `gzip`, `bzip2`, `xz`. + +The archive must include a build instructions file, typically called +`Dockerfile` at the archive's root. The `dockerfile` parameter may be +used to specify a different build instructions file. To do this, its value must be +the path to the alternate build instructions file to use. + +The archive may include any number of other files, +which are accessible in the build context (See the [*ADD build +command*](../reference/builder.md#add)). + +The Docker daemon performs a preliminary validation of the `Dockerfile` before +starting the build, and returns an error if the syntax is incorrect. After that, +each instruction is run one-by-one until the ID of the new image is output. + +The build is canceled if the client drops the connection by quitting +or being killed. + +**Query parameters**: + +- **dockerfile** - Path within the build context to the `Dockerfile`. This is + ignored if `remote` is specified and points to an external `Dockerfile`. +- **t** – A name and optional tag to apply to the image in the `name:tag` format. + If you omit the `tag` the default `latest` value is assumed. + You can provide one or more `t` parameters. +- **remote** – A Git repository URI or HTTP/HTTPS context URI. If the + URI points to a single text file, the file's contents are placed into + a file called `Dockerfile` and the image is built from that file. If + the URI points to a tarball, the file is downloaded by the daemon and + the contents therein used as the context for the build. If the URI + points to a tarball and the `dockerfile` parameter is also specified, + there must be a file with the corresponding path inside the tarball. +- **q** – Suppress verbose build output. +- **nocache** – Do not use the cache when building the image. +- **pull** - Attempt to pull the image even if an older image exists locally. +- **rm** - Remove intermediate containers after a successful build (default behavior). +- **forcerm** - Always remove intermediate containers (includes `rm`). +- **memory** - Set memory limit for build. +- **memswap** - Total memory (memory + swap), `-1` to enable unlimited swap. +- **cpushares** - CPU shares (relative weight). +- **cpusetcpus** - CPUs in which to allow execution (e.g., `0-3`, `0,1`). +- **cpuperiod** - The length of a CPU period in microseconds. +- **cpuquota** - Microseconds of CPU time that the container can get in a CPU period. +- **buildargs** – JSON map of string pairs for build-time variables. Users pass + these values at build-time. Docker uses the `buildargs` as the environment + context for command(s) run via the Dockerfile's `RUN` instruction or for + variable expansion in other Dockerfile instructions. This is not meant for + passing secret values. [Read more about the buildargs instruction](../reference/builder.md#arg) + +**Request Headers**: + +- **Content-type** – Set to `"application/x-tar"`. +- **X-Registry-Config** – A base64-url-safe-encoded Registry Auth Config JSON + object with the following structure: + + { + "docker.example.com": { + "username": "janedoe", + "password": "hunter2" + }, + "https://index.docker.io/v1/": { + "username": "mobydock", + "password": "conta1n3rize14" + } + } + + This object maps the hostname of a registry to an object containing the + "username" and "password" for that registry. Multiple registries may + be specified as the build may be based on an image requiring + authentication to pull from any arbitrary registry. Only the registry + domain name (and port if not the default "443") are required. However + (for legacy reasons) the "official" Docker, Inc. hosted registry must + be specified with both a "https://" prefix and a "/v1/" suffix even + though Docker will prefer to use the v2 registry API. + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Create an image + +`POST /images/create` + +Create an image either by pulling it from the registry or by importing it + +**Example request**: + + POST /v1.21/images/create?fromImage=busybox&tag=latest HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status": "Pulling..."} + {"status": "Pulling", "progress": "1 B/ 100 B", "progressDetail": {"current": 1, "total": 100}} + {"error": "Invalid..."} + ... + +When using this endpoint to pull an image from the registry, the +`X-Registry-Auth` header can be used to include +a base64-encoded AuthConfig object. + +**Query parameters**: + +- **fromImage** – Name of the image to pull. The name may include a tag or + digest. This parameter may only be used when pulling an image. +- **fromSrc** – Source to import. The value may be a URL from which the image + can be retrieved or `-` to read the image from the request body. + This parameter may only be used when importing an image. +- **repo** – Repository name given to an image when it is imported. + The repo may include a tag. This parameter may only be used when importing + an image. +- **tag** – Tag or digest. If empty when pulling an image, this causes all tags + for the given image to be pulled. + +**Request Headers**: + +- **X-Registry-Auth** – base64-encoded AuthConfig object + +**Status codes**: + +- **200** – no error +- **404** - repository does not exist or no read access +- **500** – server error + + + +#### Inspect an image + +`GET /images/(name)/json` + +Return low-level information on the image `name` + +**Example request**: + + GET /v1.21/images/example/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Id" : "85f05633ddc1c50679be2b16a0479ab6f7637f8884e0cfe0f4d20e1ebb3d6e7c", + "Container" : "cb91e48a60d01f1e27028b4fc6819f4f290b3cf12496c8176ec714d0d390984a", + "Comment" : "", + "Os" : "linux", + "Architecture" : "amd64", + "Parent" : "91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c", + "ContainerConfig" : { + "Tty" : false, + "Hostname" : "e611e15f9c9d", + "Volumes" : null, + "Domainname" : "", + "AttachStdout" : false, + "PublishService" : "", + "AttachStdin" : false, + "OpenStdin" : false, + "StdinOnce" : false, + "NetworkDisabled" : false, + "OnBuild" : [], + "Image" : "91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c", + "User" : "", + "WorkingDir" : "", + "Entrypoint" : null, + "MacAddress" : "", + "AttachStderr" : false, + "Labels" : { + "com.example.license" : "GPL", + "com.example.version" : "1.0", + "com.example.vendor" : "Acme" + }, + "Env" : [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + ], + "ExposedPorts" : null, + "Cmd" : [ + "/bin/sh", + "-c", + "#(nop) LABEL com.example.vendor=Acme com.example.license=GPL com.example.version=1.0" + ] + }, + "DockerVersion" : "1.9.0-dev", + "VirtualSize" : 188359297, + "Size" : 0, + "Author" : "", + "Created" : "2015-09-10T08:30:53.26995814Z", + "GraphDriver" : { + "Name" : "aufs", + "Data" : null + }, + "RepoDigests" : [ + "localhost:5000/test/busybox/example@sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf" + ], + "RepoTags" : [ + "example:1.0", + "example:latest", + "example:stable" + ], + "Config" : { + "Image" : "91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c", + "NetworkDisabled" : false, + "OnBuild" : [], + "StdinOnce" : false, + "PublishService" : "", + "AttachStdin" : false, + "OpenStdin" : false, + "Domainname" : "", + "AttachStdout" : false, + "Tty" : false, + "Hostname" : "e611e15f9c9d", + "Volumes" : null, + "Cmd" : [ + "/bin/bash" + ], + "ExposedPorts" : null, + "Env" : [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + ], + "Labels" : { + "com.example.vendor" : "Acme", + "com.example.version" : "1.0", + "com.example.license" : "GPL" + }, + "Entrypoint" : null, + "MacAddress" : "", + "AttachStderr" : false, + "WorkingDir" : "", + "User" : "" + } + } + +**Status codes**: + +- **200** – no error +- **404** – no such image +- **500** – server error + +#### Get the history of an image + +`GET /images/(name)/history` + +Return the history of the image `name` + +**Example request**: + + GET /v1.21/images/ubuntu/history HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Id": "3db9c44f45209632d6050b35958829c3a2aa256d81b9a7be45b362ff85c54710", + "Created": 1398108230, + "CreatedBy": "/bin/sh -c #(nop) ADD file:eb15dbd63394e063b805a3c32ca7bf0266ef64676d5a6fab4801f2e81e2a5148 in /", + "Tags": [ + "ubuntu:lucid", + "ubuntu:10.04" + ], + "Size": 182964289, + "Comment": "" + }, + { + "Id": "6cfa4d1f33fb861d4d114f43b25abd0ac737509268065cdfd69d544a59c85ab8", + "Created": 1398108222, + "CreatedBy": "/bin/sh -c #(nop) MAINTAINER Tianon Gravi - mkimage-debootstrap.sh -i iproute,iputils-ping,ubuntu-minimal -t lucid.tar.xz lucid http://archive.ubuntu.com/ubuntu/", + "Tags": null, + "Size": 0, + "Comment": "" + }, + { + "Id": "511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158", + "Created": 1371157430, + "CreatedBy": "", + "Tags": [ + "scratch12:latest", + "scratch:latest" + ], + "Size": 0, + "Comment": "Imported from -" + } + ] + +**Status codes**: + +- **200** – no error +- **404** – no such image +- **500** – server error + +#### Push an image on the registry + +`POST /images/(name)/push` + +Push the image `name` on the registry + +**Example request**: + + POST /v1.21/images/test/push HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status": "Pushing..."} + {"status": "Pushing", "progress": "1/? (n/a)", "progressDetail": {"current": 1}}} + {"error": "Invalid..."} + ... + +If you wish to push an image on to a private registry, that image must already have a tag +into a repository which references that registry `hostname` and `port`. This repository name should +then be used in the URL. This duplicates the command line's flow. + +**Example request**: + + POST /v1.21/images/registry.acme.com:5000/test/push HTTP/1.1 + + +**Query parameters**: + +- **tag** – The tag to associate with the image on the registry. This is optional. + +**Request Headers**: + +- **X-Registry-Auth** – base64-encoded AuthConfig object. + +**Status codes**: + +- **200** – no error +- **404** – no such image +- **500** – server error + +#### Tag an image into a repository + +`POST /images/(name)/tag` + +Tag the image `name` into a repository + +**Example request**: + + POST /v1.21/images/test/tag?repo=myrepo&force=0&tag=v42 HTTP/1.1 + +**Example response**: + + HTTP/1.1 201 Created + +**Query parameters**: + +- **repo** – The repository to tag in +- **force** – 1/True/true or 0/False/false, default false +- **tag** - The new tag name + +**Status codes**: + +- **201** – no error +- **400** – bad parameter +- **404** – no such image +- **409** – conflict +- **500** – server error + +#### Remove an image + +`DELETE /images/(name)` + +Remove the image `name` from the filesystem + +**Example request**: + + DELETE /v1.21/images/test HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-type: application/json + + [ + {"Untagged": "3e2f21a89f"}, + {"Deleted": "3e2f21a89f"}, + {"Deleted": "53b4f83ac9"} + ] + +**Query parameters**: + +- **force** – 1/True/true or 0/False/false, default false +- **noprune** – 1/True/true or 0/False/false, default false + +**Status codes**: + +- **200** – no error +- **404** – no such image +- **409** – conflict +- **500** – server error + +#### Search images + +`GET /images/search` + +Search for an image on [Docker Hub](https://hub.docker.com). + +> **Note**: +> The response keys have changed from API v1.6 to reflect the JSON +> sent by the registry server to the docker daemon's request. + +**Example request**: + + GET /v1.21/images/search?term=sshd HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "description": "", + "is_official": false, + "is_automated": false, + "name": "wma55/u1210sshd", + "star_count": 0 + }, + { + "description": "", + "is_official": false, + "is_automated": false, + "name": "jdswinbank/sshd", + "star_count": 0 + }, + { + "description": "", + "is_official": false, + "is_automated": false, + "name": "vgauthier/sshd", + "star_count": 0 + } + ... + ] + +**Query parameters**: + +- **term** – term to search + +**Status codes**: + +- **200** – no error +- **500** – server error + +### 2.3 Misc + +#### Check auth configuration + +`POST /auth` + +Get the default username and email + +**Example request**: + + POST /v1.21/auth HTTP/1.1 + Content-Type: application/json + + { + "username": "hannibal", + "password": "xxxx", + "email": "hannibal@a-team.com", + "serveraddress": "https://index.docker.io/v1/" + } + +**Example response**: + + HTTP/1.1 200 OK + +**Status codes**: + +- **200** – no error +- **204** – no error +- **500** – server error + +#### Display system-wide information + +`GET /info` + +Display system-wide information + +**Example request**: + + GET /v1.21/info HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "ClusterStore": "etcd://localhost:2379", + "Containers": 11, + "CpuCfsPeriod": true, + "CpuCfsQuota": true, + "Debug": false, + "DockerRootDir": "/var/lib/docker", + "Driver": "btrfs", + "DriverStatus": [[""]], + "ExecutionDriver": "native-0.1", + "ExperimentalBuild": false, + "HttpProxy": "http://test:test@localhost:8080", + "HttpsProxy": "https://test:test@localhost:8080", + "ID": "7TRN:IPZB:QYBB:VPBQ:UMPP:KARE:6ZNR:XE6T:7EWV:PKF4:ZOJD:TPYS", + "IPv4Forwarding": true, + "Images": 16, + "IndexServerAddress": "https://index.docker.io/v1/", + "InitPath": "/usr/bin/docker", + "InitSha1": "", + "KernelVersion": "3.12.0-1-amd64", + "Labels": [ + "storage=ssd" + ], + "MemTotal": 2099236864, + "MemoryLimit": true, + "NCPU": 1, + "NEventsListener": 0, + "NFd": 11, + "NGoroutines": 21, + "Name": "prod-server-42", + "NoProxy": "9.81.1.160", + "OomKillDisable": true, + "OperatingSystem": "Boot2Docker", + "RegistryConfig": { + "IndexConfigs": { + "docker.io": { + "Mirrors": null, + "Name": "docker.io", + "Official": true, + "Secure": true + } + }, + "InsecureRegistryCIDRs": [ + "127.0.0.0/8" + ] + }, + "ServerVersion": "1.9.0", + "SwapLimit": false, + "SystemTime": "2015-03-10T11:11:23.730591467-07:00" + } + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Show the docker version information + +`GET /version` + +Show the docker version information + +**Example request**: + + GET /v1.21/version HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Version": "1.5.0", + "Os": "linux", + "KernelVersion": "3.18.5-tinycore64", + "GoVersion": "go1.4.1", + "GitCommit": "a8a31ef", + "Arch": "amd64", + "ApiVersion": "1.20", + "Experimental": false + } + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Ping the docker server + +`GET /_ping` + +Ping the docker server + +**Example request**: + + GET /v1.21/_ping HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: text/plain + + OK + +**Status codes**: + +- **200** - no error +- **500** - server error + +#### Create a new image from a container's changes + +`POST /commit` + +Create a new image from a container's changes + +**Example request**: + + POST /v1.21/commit?container=44c004db4b17&comment=message&repo=myrepo HTTP/1.1 + Content-Type: application/json + + { + "Hostname": "", + "Domainname": "", + "User": "", + "AttachStdin": false, + "AttachStdout": true, + "AttachStderr": true, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": null, + "Cmd": [ + "date" + ], + "Mounts": [ + { + "Source": "/data", + "Destination": "/data", + "Mode": "ro,Z", + "RW": false + } + ], + "Labels": { + "key1": "value1", + "key2": "value2" + }, + "WorkingDir": "", + "NetworkDisabled": false, + "ExposedPorts": { + "22/tcp": {} + } + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + {"Id": "596069db4bf5"} + +**JSON parameters**: + +- **config** - the container's configuration + +**Query parameters**: + +- **container** – source container +- **repo** – repository +- **tag** – tag +- **comment** – commit message +- **author** – author (e.g., "John Hannibal Smith + <[hannibal@a-team.com](mailto:hannibal%40a-team.com)>") +- **pause** – 1/True/true or 0/False/false, whether to pause the container before committing +- **changes** – Dockerfile instructions to apply while committing + +**Status codes**: + +- **201** – no error +- **404** – no such container +- **500** – server error + +#### Monitor Docker's events + +`GET /events` + +Get container events from docker, in real time via streaming. + +Docker containers report the following events: + + attach, commit, copy, create, destroy, die, exec_create, exec_start, export, kill, oom, pause, rename, resize, restart, start, stop, top, unpause + +Docker images report the following events: + + delete, import, pull, push, tag, untag + +**Example request**: + + GET /v1.21/events?since=1374067924 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status":"pull","id":"busybox:latest","time":1442421700,"timeNano":1442421700598988358} + {"status":"create","id":"5745704abe9caa5","from":"busybox","time":1442421716,"timeNano":1442421716853979870} + {"status":"attach","id":"5745704abe9caa5","from":"busybox","time":1442421716,"timeNano":1442421716894759198} + {"status":"start","id":"5745704abe9caa5","from":"busybox","time":1442421716,"timeNano":1442421716983607193} + +**Query parameters**: + +- **since** – Timestamp. Show all events created since timestamp and then stream +- **until** – Timestamp. Show events created until given timestamp and stop streaming +- **filters** – A json encoded value of the filters (a map[string][]string) to process on the event list. Available filters: + - `container=`; -- container to filter + - `event=`; -- event to filter + - `image=`; -- image to filter + - `label=`; -- image and container label to filter + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Get a tarball containing all images in a repository + +`GET /images/(name)/get` + +Get a tarball containing all images and metadata for the repository specified +by `name`. + +If `name` is a specific name and tag (e.g. ubuntu:latest), then only that image +(and its parents) are returned. If `name` is an image ID, similarly only that +image (and its parents) are returned, but with the exclusion of the +'repositories' file in the tarball, as there were no image names referenced. + +See the [image tarball format](#image-tarball-format) for more details. + +**Example request** + + GET /v1.21/images/ubuntu/get + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + + Binary data stream + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Get a tarball containing all images + +`GET /images/get` + +Get a tarball containing all images and metadata for one or more repositories. + +For each value of the `names` parameter: if it is a specific name and tag (e.g. +`ubuntu:latest`), then only that image (and its parents) are returned; if it is +an image ID, similarly only that image (and its parents) are returned and there +would be no names referenced in the 'repositories' file for this image ID. + +See the [image tarball format](#image-tarball-format) for more details. + +**Example request** + + GET /v1.21/images/get?names=myname%2Fmyapp%3Alatest&names=busybox + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + + Binary data stream + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Load a tarball with a set of images and tags into docker + +`POST /images/load` + +Load a set of images and tags into a Docker repository. +See the [image tarball format](#image-tarball-format) for more details. + +**Example request** + + POST /v1.21/images/load + Content-Type: application/x-tar + + Tarball in body + +**Example response**: + + HTTP/1.1 200 OK + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Image tarball format + +An image tarball contains one directory per image layer (named using its long ID), +each containing these files: + +- `VERSION`: currently `1.0` - the file format version +- `json`: detailed layer information, similar to `docker inspect layer_id` +- `layer.tar`: A tarfile containing the filesystem changes in this layer + +The `layer.tar` file contains `aufs` style `.wh..wh.aufs` files and directories +for storing attribute changes and deletions. + +If the tarball defines a repository, the tarball should also include a `repositories` file at +the root that contains a list of repository and tag names mapped to layer IDs. + +``` +{"hello-world": + {"latest": "565a9d68a73f6706862bfe8409a7f659776d4d60a8d096eb4a3cbce6999cc2a1"} +} +``` + +#### Exec Create + +`POST /containers/(id or name)/exec` + +Sets up an exec instance in a running container `id` + +**Example request**: + + POST /v1.21/containers/e90e34656806/exec HTTP/1.1 + Content-Type: application/json + + { + "AttachStdin": true, + "AttachStdout": true, + "AttachStderr": true, + "Cmd": ["sh"], + "Privileged": true, + "Tty": true, + "User": "123:456" + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + { + "Id": "f90e34656806", + "Warnings":[] + } + +**JSON parameters**: + +- **AttachStdin** - Boolean value, attaches to `stdin` of the `exec` command. +- **AttachStdout** - Boolean value, attaches to `stdout` of the `exec` command. +- **AttachStderr** - Boolean value, attaches to `stderr` of the `exec` command. +- **Tty** - Boolean value to allocate a pseudo-TTY. +- **Cmd** - Command to run specified as a string or an array of strings. +- **Privileged** - Boolean value, runs the exec process with extended privileges. +- **User** - A string value specifying the user, and optionally, group to run + the exec process inside the container. Format is one of: `"user"`, + `"user:group"`, `"uid"`, or `"uid:gid"`. + +**Status codes**: + +- **201** – no error +- **404** – no such container +- **409** - container is paused +- **500** - server error + +#### Exec Start + +`POST /exec/(id)/start` + +Starts a previously set up `exec` instance `id`. If `detach` is true, this API +returns after starting the `exec` command. Otherwise, this API sets up an +interactive session with the `exec` command. + +**Example request**: + + POST /v1.21/exec/e90e34656806/start HTTP/1.1 + Content-Type: application/json + + { + "Detach": false, + "Tty": false + } + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/vnd.docker.raw-stream + + {% raw %} + {{ STREAM }} + {% endraw %} + +**JSON parameters**: + +- **Detach** - Detach from the `exec` command. +- **Tty** - Boolean value to allocate a pseudo-TTY. + +**Status codes**: + +- **200** – no error +- **404** – no such exec instance +- **409** - container is paused + +**Stream details**: + +Similar to the stream behavior of `POST /containers/(id or name)/attach` API + +#### Exec Resize + +`POST /exec/(id)/resize` + +Resizes the `tty` session used by the `exec` command `id`. The unit is number of characters. +This API is valid only if `tty` was specified as part of creating and starting the `exec` command. + +**Example request**: + + POST /v1.21/exec/e90e34656806/resize?h=40&w=80 HTTP/1.1 + Content-Type: text/plain + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: text/plain + +**Query parameters**: + +- **h** – height of `tty` session +- **w** – width + +**Status codes**: + +- **201** – no error +- **404** – no such exec instance + +#### Exec Inspect + +`GET /exec/(id)/json` + +Return low-level information about the `exec` command `id`. + +**Example request**: + + GET /v1.21/exec/11fb006128e8ceb3942e7c58d77750f24210e35f879dd204ac975c184b820b39/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: plain/text + + { + "ID" : "11fb006128e8ceb3942e7c58d77750f24210e35f879dd204ac975c184b820b39", + "Running" : false, + "ExitCode" : 2, + "ProcessConfig" : { + "privileged" : false, + "user" : "", + "tty" : false, + "entrypoint" : "sh", + "arguments" : [ + "-c", + "exit 2" + ] + }, + "OpenStdin" : false, + "OpenStderr" : false, + "OpenStdout" : false, + "Container" : { + "State" : { + "Status" : "running", + "Running" : true, + "Paused" : false, + "Restarting" : false, + "OOMKilled" : false, + "Pid" : 3650, + "ExitCode" : 0, + "Error" : "", + "StartedAt" : "2014-11-17T22:26:03.717657531Z", + "FinishedAt" : "0001-01-01T00:00:00Z" + }, + "ID" : "8f177a186b977fb451136e0fdf182abff5599a08b3c7f6ef0d36a55aaf89634c", + "Created" : "2014-11-17T22:26:03.626304998Z", + "Path" : "date", + "Args" : [], + "Config" : { + "Hostname" : "8f177a186b97", + "Domainname" : "", + "User" : "", + "AttachStdin" : false, + "AttachStdout" : false, + "AttachStderr" : false, + "ExposedPorts" : null, + "Tty" : false, + "OpenStdin" : false, + "StdinOnce" : false, + "Env" : [ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" ], + "Cmd" : [ + "date" + ], + "Image" : "ubuntu", + "Volumes" : null, + "WorkingDir" : "", + "Entrypoint" : null, + "NetworkDisabled" : false, + "MacAddress" : "", + "OnBuild" : null, + "SecurityOpt" : null + }, + "Image" : "5506de2b643be1e6febbf3b8a240760c6843244c41e12aa2f60ccbb7153d17f5", + "NetworkSettings" : { + "Bridge": "", + "SandboxID": "", + "HairpinMode": false, + "LinkLocalIPv6Address": "", + "LinkLocalIPv6PrefixLen": 0, + "Ports": null, + "SandboxKey": "", + "SecondaryIPAddresses": null, + "SecondaryIPv6Addresses": null, + "EndpointID": "", + "Gateway": "", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "IPAddress": "", + "IPPrefixLen": 0, + "IPv6Gateway": "", + "MacAddress": "", + "Networks": { + "bridge": { + "EndpointID": "", + "Gateway": "", + "IPAddress": "", + "IPPrefixLen": 0, + "IPv6Gateway": "", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "MacAddress": "" + } + } + }, + "ResolvConfPath" : "/var/lib/docker/containers/8f177a186b977fb451136e0fdf182abff5599a08b3c7f6ef0d36a55aaf89634c/resolv.conf", + "HostnamePath" : "/var/lib/docker/containers/8f177a186b977fb451136e0fdf182abff5599a08b3c7f6ef0d36a55aaf89634c/hostname", + "HostsPath" : "/var/lib/docker/containers/8f177a186b977fb451136e0fdf182abff5599a08b3c7f6ef0d36a55aaf89634c/hosts", + "LogPath": "/var/lib/docker/containers/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b-json.log", + "Name" : "/test", + "Driver" : "aufs", + "ExecDriver" : "native-0.2", + "MountLabel" : "", + "ProcessLabel" : "", + "AppArmorProfile" : "", + "RestartCount" : 0, + "Mounts" : [] + } + } + +**Status codes**: + +- **200** – no error +- **404** – no such exec instance +- **500** - server error + +### 2.4 Volumes + +#### List volumes + +`GET /volumes` + +**Example request**: + + GET /v1.21/volumes HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Volumes": [ + { + "Name": "tardis", + "Driver": "local", + "Mountpoint": "/var/lib/docker/volumes/tardis" + } + ] + } + +**Query parameters**: + +- **filters** - JSON encoded value of the filters (a `map[string][]string`) to process on the volumes list. There is one available filter: `dangling=true` + +**Status codes**: + +- **200** - no error +- **500** - server error + +#### Create a volume + +`POST /volumes/create` + +Create a volume + +**Example request**: + + POST /v1.21/volumes/create HTTP/1.1 + Content-Type: application/json + + { + "Name": "tardis" + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + { + "Name": "tardis", + "Driver": "local", + "Mountpoint": "/var/lib/docker/volumes/tardis" + } + +**Status codes**: + +- **201** - no error +- **500** - server error + +**JSON parameters**: + +- **Name** - The new volume's name. If not specified, Docker generates a name. +- **Driver** - Name of the volume driver to use. Defaults to `local` for the name. +- **DriverOpts** - A mapping of driver options and values. These options are + passed directly to the driver and are driver specific. + +#### Inspect a volume + +`GET /volumes/(name)` + +Return low-level information on the volume `name` + +**Example request**: + + GET /volumes/tardis + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Name": "tardis", + "Driver": "local", + "Mountpoint": "/var/lib/docker/volumes/tardis" + } + +**Status codes**: + +- **200** - no error +- **404** - no such volume +- **500** - server error + +#### Remove a volume + +`DELETE /volumes/(name)` + +Instruct the driver to remove the volume (`name`). + +**Example request**: + + DELETE /v1.21/volumes/tardis HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Status codes**: + +- **204** - no error +- **404** - no such volume or volume driver +- **409** - volume is in use and cannot be removed +- **500** - server error + +### 2.5 Networks + +#### List networks + +`GET /networks` + +**Example request**: + + GET /v1.21/networks HTTP/1.1 + +**Example response**: + +``` +HTTP/1.1 200 OK +Content-Type: application/json + +[ + { + "Name": "bridge", + "Id": "f2de39df4171b0dc801e8002d1d999b77256983dfc63041c0f34030aa3977566", + "Scope": "local", + "Driver": "bridge", + "IPAM": { + "Driver": "default", + "Config": [ + { + "Subnet": "172.17.0.0/16" + } + ] + }, + "Containers": { + "39b69226f9d79f5634485fb236a23b2fe4e96a0a94128390a7fbbcc167065867": { + "EndpointID": "ed2419a97c1d9954d05b46e462e7002ea552f216e9b136b80a7db8d98b442eda", + "MacAddress": "02:42:ac:11:00:02", + "IPv4Address": "172.17.0.2/16", + "IPv6Address": "" + } + }, + "Options": { + "com.docker.network.bridge.default_bridge": "true", + "com.docker.network.bridge.enable_icc": "true", + "com.docker.network.bridge.enable_ip_masquerade": "true", + "com.docker.network.bridge.host_binding_ipv4": "0.0.0.0", + "com.docker.network.bridge.name": "docker0", + "com.docker.network.driver.mtu": "1500" + } + }, + { + "Name": "none", + "Id": "e086a3893b05ab69242d3c44e49483a3bbbd3a26b46baa8f61ab797c1088d794", + "Scope": "local", + "Driver": "null", + "IPAM": { + "Driver": "default", + "Config": [] + }, + "Containers": {}, + "Options": {} + }, + { + "Name": "host", + "Id": "13e871235c677f196c4e1ecebb9dc733b9b2d2ab589e30c539efeda84a24215e", + "Scope": "local", + "Driver": "host", + "IPAM": { + "Driver": "default", + "Config": [] + }, + "Containers": {}, + "Options": {} + } +] +``` + +**Query parameters**: + +- **filters** - JSON encoded value of the filters (a `map[string][]string`) to process on the networks list. Available filters: `name=[network-names]` , `id=[network-ids]` + +**Status codes**: + +- **200** - no error +- **500** - server error + +#### Inspect network + +`GET /networks/(id or name)` + +Return low-level information on the network `id` + +**Example request**: + + GET /v1.21/networks/f2de39df4171b0dc801e8002d1d999b77256983dfc63041c0f34030aa3977566 HTTP/1.1 + +**Example response**: + +``` +HTTP/1.1 200 OK +Content-Type: application/json + +{ + "Name": "bridge", + "Id": "f2de39df4171b0dc801e8002d1d999b77256983dfc63041c0f34030aa3977566", + "Scope": "local", + "Driver": "bridge", + "IPAM": { + "Driver": "default", + "Config": [ + { + "Subnet": "172.17.0.0/16" + } + ] + }, + "Containers": { + "39b69226f9d79f5634485fb236a23b2fe4e96a0a94128390a7fbbcc167065867": { + "EndpointID": "ed2419a97c1d9954d05b46e462e7002ea552f216e9b136b80a7db8d98b442eda", + "MacAddress": "02:42:ac:11:00:02", + "IPv4Address": "172.17.0.2/16", + "IPv6Address": "" + } + }, + "Options": { + "com.docker.network.bridge.default_bridge": "true", + "com.docker.network.bridge.enable_icc": "true", + "com.docker.network.bridge.enable_ip_masquerade": "true", + "com.docker.network.bridge.host_binding_ipv4": "0.0.0.0", + "com.docker.network.bridge.name": "docker0", + "com.docker.network.driver.mtu": "1500" + } +} +``` + +**Status codes**: + +- **200** - no error +- **404** - network not found +- **500** - server error + +#### Create a network + +`POST /networks/create` + +Create a network + +**Example request**: + +``` +POST /v1.21/networks/create HTTP/1.1 +Content-Type: application/json + +{ + "Name":"isolated_nw", + "CheckDuplicate":true, + "Driver":"bridge", + "IPAM":{ + "Driver": "default", + "Config":[ + { + "Subnet":"172.20.0.0/16", + "IPRange":"172.20.10.0/24", + "Gateway":"172.20.10.11" + } + ] + } +} +``` + +**Example response**: + +``` +HTTP/1.1 201 Created +Content-Type: application/json + +{ + "Id": "22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30", + "Warning": "" +} +``` + +**Status codes**: + +- **201** - no error +- **404** - plugin not found +- **500** - server error + +**JSON parameters**: + +- **Name** - The new network's name. this is a mandatory field +- **CheckDuplicate** - Requests daemon to check for networks with same name. Defaults to `false`. + Since Network is primarily keyed based on a random ID and not on the name, + and network name is strictly a user-friendly alias to the network which is uniquely identified using ID, + there is no guaranteed way to check for duplicates across a cluster of docker hosts. + This parameter CheckDuplicate is there to provide a best effort checking of any networks + which has the same name but it is not guaranteed to catch all name collisions. +- **Driver** - Name of the network driver plugin to use. Defaults to `bridge` driver +- **IPAM** - Optional custom IP scheme for the network + - **Driver** - Name of the IPAM driver to use. Defaults to `default` driver + - **Config** - List of IPAM configuration options, specified as a map: + `{"Subnet": , "IPRange": , "Gateway": , "AuxAddress": }` +- **Options** - Network specific options to be used by the drivers + +#### Connect a container to a network + +`POST /networks/(id or name)/connect` + +Connect a container to a network + +**Example request**: + +``` +POST /v1.21/networks/22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30/connect HTTP/1.1 +Content-Type: application/json + +{ + "Container":"3613f73ba0e4" +} +``` + +**Example response**: + + HTTP/1.1 200 OK + +**Status codes**: + +- **200** - no error +- **404** - network or container is not found +- **500** - Internal Server Error + +**JSON parameters**: + +- **container** - container-id/name to be connected to the network + +#### Disconnect a container from a network + +`POST /networks/(id or name)/disconnect` + +Disconnect a container from a network + +**Example request**: + +``` +POST /v1.21/networks/22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30/disconnect HTTP/1.1 +Content-Type: application/json + +{ + "Container":"3613f73ba0e4" +} +``` + +**Example response**: + + HTTP/1.1 200 OK + +**Status codes**: + +- **200** - no error +- **404** - network or container not found +- **500** - Internal Server Error + +**JSON parameters**: + +- **Container** - container-id/name to be disconnected from a network + +#### Remove a network + +`DELETE /networks/(id or name)` + +Instruct the driver to remove the network (`id`). + +**Example request**: + + DELETE /v1.21/networks/22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + +**Status codes**: + +- **200** - no error +- **403** - operation not supported for pre-defined networks +- **404** - no such network +- **500** - server error + +## 3. Going further + +### 3.1 Inside `docker run` + +As an example, the `docker run` command line makes the following API calls: + +- Create the container + +- If the status code is 404, it means the image doesn't exist: + - Try to pull it. + - Then, retry to create the container. + +- Start the container. + +- If you are not in detached mode: +- Attach to the container, using `logs=1` (to have `stdout` and + `stderr` from the container's start) and `stream=1` + +- If in detached mode or only `stdin` is attached, display the container's id. + +### 3.2 Hijacking + +In this version of the API, `/attach`, uses hijacking to transport `stdin`, +`stdout`, and `stderr` on the same socket. + +To hint potential proxies about connection hijacking, Docker client sends +connection upgrade headers similarly to websocket. + + Upgrade: tcp + Connection: Upgrade + +When Docker daemon detects the `Upgrade` header, it switches its status code +from **200 OK** to **101 UPGRADED** and resends the same headers. + + +### 3.3 CORS Requests + +To set cross origin requests to the Engine API please give values to +`--api-cors-header` when running Docker in daemon mode. Set * (asterisk) allows all, +default or blank means CORS disabled + + $ dockerd -H="192.168.1.9:2375" --api-cors-header="http://foo.bar" diff --git a/vendor/github.com/moby/moby/docs/api/v1.22.md b/vendor/github.com/moby/moby/docs/api/v1.22.md new file mode 100644 index 000000000..9bf64b7e9 --- /dev/null +++ b/vendor/github.com/moby/moby/docs/api/v1.22.md @@ -0,0 +1,3319 @@ +--- +title: "Engine API v1.22" +description: "API Documentation for Docker" +keywords: "API, Docker, rcli, REST, documentation" +redirect_from: +- /engine/reference/api/docker_remote_api_v1.22/ +- /reference/api/docker_remote_api_v1.22/ +--- + + + +## 1. Brief introduction + + - The daemon listens on `unix:///var/run/docker.sock` but you can + [Bind Docker to another host/port or a Unix socket](../reference/commandline/dockerd.md#bind-docker-to-another-host-port-or-a-unix-socket). + - The API tends to be REST. However, for some complex commands, like `attach` + or `pull`, the HTTP connection is hijacked to transport `stdout`, + `stdin` and `stderr`. + +## 2. Endpoints + +### 2.1 Containers + +#### List containers + +`GET /containers/json` + +List containers + +**Example request**: + + GET /v1.22/containers/json?all=1&before=8dfafdbc3a40&size=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Id": "8dfafdbc3a40", + "Names":["/boring_feynman"], + "Image": "ubuntu:latest", + "ImageID": "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82", + "Command": "echo 1", + "Created": 1367854155, + "Status": "Exit 0", + "Ports": [{"PrivatePort": 2222, "PublicPort": 3333, "Type": "tcp"}], + "Labels": { + "com.example.vendor": "Acme", + "com.example.license": "GPL", + "com.example.version": "1.0" + }, + "SizeRw": 12288, + "SizeRootFs": 0, + "HostConfig": { + "NetworkMode": "default" + }, + "NetworkSettings": { + "Networks": { + "bridge": { + "IPAMConfig": null, + "Links": null, + "Aliases": null, + "NetworkID": "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812", + "EndpointID": "2cdc4edb1ded3631c81f57966563e5c8525b81121bb3706a9a9a3ae102711f3f", + "Gateway": "172.17.0.1", + "IPAddress": "172.17.0.2", + "IPPrefixLen": 16, + "IPv6Gateway": "", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "MacAddress": "02:42:ac:11:00:02" + } + } + } + }, + { + "Id": "9cd87474be90", + "Names":["/coolName"], + "Image": "ubuntu:latest", + "ImageID": "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82", + "Command": "echo 222222", + "Created": 1367854155, + "Status": "Exit 0", + "Ports": [], + "Labels": {}, + "SizeRw": 12288, + "SizeRootFs": 0, + "HostConfig": { + "NetworkMode": "default" + }, + "NetworkSettings": { + "Networks": { + "bridge": { + "IPAMConfig": null, + "Links": null, + "Aliases": null, + "NetworkID": "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812", + "EndpointID": "88eaed7b37b38c2a3f0c4bc796494fdf51b270c2d22656412a2ca5d559a64d7a", + "Gateway": "172.17.0.1", + "IPAddress": "172.17.0.8", + "IPPrefixLen": 16, + "IPv6Gateway": "", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "MacAddress": "02:42:ac:11:00:08" + } + } + } + }, + { + "Id": "3176a2479c92", + "Names":["/sleepy_dog"], + "Image": "ubuntu:latest", + "ImageID": "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82", + "Command": "echo 3333333333333333", + "Created": 1367854154, + "Status": "Exit 0", + "Ports":[], + "Labels": {}, + "SizeRw":12288, + "SizeRootFs":0, + "HostConfig": { + "NetworkMode": "default" + }, + "NetworkSettings": { + "Networks": { + "bridge": { + "IPAMConfig": null, + "Links": null, + "Aliases": null, + "NetworkID": "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812", + "EndpointID": "8b27c041c30326d59cd6e6f510d4f8d1d570a228466f956edf7815508f78e30d", + "Gateway": "172.17.0.1", + "IPAddress": "172.17.0.6", + "IPPrefixLen": 16, + "IPv6Gateway": "", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "MacAddress": "02:42:ac:11:00:06" + } + } + } + }, + { + "Id": "4cb07b47f9fb", + "Names":["/running_cat"], + "Image": "ubuntu:latest", + "ImageID": "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82", + "Command": "echo 444444444444444444444444444444444", + "Created": 1367854152, + "Status": "Exit 0", + "Ports": [], + "Labels": {}, + "SizeRw": 12288, + "SizeRootFs": 0, + "HostConfig": { + "NetworkMode": "default" + }, + "NetworkSettings": { + "Networks": { + "bridge": { + "IPAMConfig": null, + "Links": null, + "Aliases": null, + "NetworkID": "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812", + "EndpointID": "d91c7b2f0644403d7ef3095985ea0e2370325cd2332ff3a3225c4247328e66e9", + "Gateway": "172.17.0.1", + "IPAddress": "172.17.0.5", + "IPPrefixLen": 16, + "IPv6Gateway": "", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "MacAddress": "02:42:ac:11:00:05" + } + } + } + } + ] + +**Query parameters**: + +- **all** – 1/True/true or 0/False/false, Show all containers. + Only running containers are shown by default (i.e., this defaults to false) +- **limit** – Show `limit` last created + containers, include non-running ones. +- **since** – Show only containers created since Id, include + non-running ones. +- **before** – Show only containers created before Id, include + non-running ones. +- **size** – 1/True/true or 0/False/false, Show the containers + sizes +- **filters** - a JSON encoded value of the filters (a `map[string][]string`) to process on the containers list. Available filters: + - `exited=`; -- containers with exit code of `` ; + - `status=`(`created`|`restarting`|`running`|`paused`|`exited`|`dead`) + - `label=key` or `label="key=value"` of a container label + - `isolation=`(`default`|`process`|`hyperv`) (Windows daemon only) + +**Status codes**: + +- **200** – no error +- **400** – bad parameter +- **500** – server error + +#### Create a container + +`POST /containers/create` + +Create a container + +**Example request**: + + POST /v1.22/containers/create HTTP/1.1 + Content-Type: application/json + + { + "Hostname": "", + "Domainname": "", + "User": "", + "AttachStdin": false, + "AttachStdout": true, + "AttachStderr": true, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": [ + "FOO=bar", + "BAZ=quux" + ], + "Cmd": [ + "date" + ], + "Entrypoint": null, + "Image": "ubuntu", + "Labels": { + "com.example.vendor": "Acme", + "com.example.license": "GPL", + "com.example.version": "1.0" + }, + "Volumes": { + "/volumes/data": {} + }, + "WorkingDir": "", + "NetworkDisabled": false, + "MacAddress": "12:34:56:78:9a:bc", + "ExposedPorts": { + "22/tcp": {} + }, + "StopSignal": "SIGTERM", + "HostConfig": { + "Binds": ["/tmp:/tmp"], + "Tmpfs": { "/run": "rw,noexec,nosuid,size=65536k" }, + "Links": ["redis3:redis"], + "Memory": 0, + "MemorySwap": 0, + "MemoryReservation": 0, + "KernelMemory": 0, + "CpuShares": 512, + "CpuPeriod": 100000, + "CpuQuota": 50000, + "CpusetCpus": "0,1", + "CpusetMems": "0,1", + "BlkioWeight": 300, + "BlkioWeightDevice": [{}], + "BlkioDeviceReadBps": [{}], + "BlkioDeviceReadIOps": [{}], + "BlkioDeviceWriteBps": [{}], + "BlkioDeviceWriteIOps": [{}], + "MemorySwappiness": 60, + "OomKillDisable": false, + "OomScoreAdj": 500, + "PidMode": "", + "PortBindings": { "22/tcp": [{ "HostPort": "11022" }] }, + "PublishAllPorts": false, + "Privileged": false, + "ReadonlyRootfs": false, + "Dns": ["8.8.8.8"], + "DnsOptions": [""], + "DnsSearch": [""], + "ExtraHosts": null, + "VolumesFrom": ["parent", "other:ro"], + "CapAdd": ["NET_ADMIN"], + "CapDrop": ["MKNOD"], + "GroupAdd": ["newgroup"], + "RestartPolicy": { "Name": "", "MaximumRetryCount": 0 }, + "NetworkMode": "bridge", + "Devices": [], + "Ulimits": [{}], + "LogConfig": { "Type": "json-file", "Config": {} }, + "SecurityOpt": [], + "CgroupParent": "", + "VolumeDriver": "", + "ShmSize": 67108864 + }, + "NetworkingConfig": { + "EndpointsConfig": { + "isolated_nw" : { + "IPAMConfig": { + "IPv4Address":"172.20.30.33", + "IPv6Address":"2001:db8:abcd::3033" + }, + "Links":["container_1", "container_2"], + "Aliases":["server_x", "server_y"] + } + } + } + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + { + "Id":"e90e34656806", + "Warnings":[] + } + +**JSON parameters**: + +- **Hostname** - A string value containing the hostname to use for the + container. +- **Domainname** - A string value containing the domain name to use + for the container. +- **User** - A string value specifying the user inside the container. +- **AttachStdin** - Boolean value, attaches to `stdin`. +- **AttachStdout** - Boolean value, attaches to `stdout`. +- **AttachStderr** - Boolean value, attaches to `stderr`. +- **Tty** - Boolean value, Attach standard streams to a `tty`, including `stdin` if it is not closed. +- **OpenStdin** - Boolean value, opens `stdin`, +- **StdinOnce** - Boolean value, close `stdin` after the 1 attached client disconnects. +- **Env** - A list of environment variables in the form of `["VAR=value", ...]` +- **Labels** - Adds a map of labels to a container. To specify a map: `{"key":"value", ... }` +- **Cmd** - Command to run specified as a string or an array of strings. +- **Entrypoint** - Set the entry point for the container as a string or an array + of strings. +- **Image** - A string specifying the image name to use for the container. +- **Volumes** - An object mapping mount point paths (strings) inside the + container to empty objects. +- **WorkingDir** - A string specifying the working directory for commands to + run in. +- **NetworkDisabled** - Boolean value, when true disables networking for the + container +- **ExposedPorts** - An object mapping ports to an empty object in the form of: + `"ExposedPorts": { "/: {}" }` +- **StopSignal** - Signal to stop a container as a string or unsigned integer. `SIGTERM` by default. +- **HostConfig** + - **Binds** – A list of volume bindings for this container. Each volume binding is a string in one of these forms: + + `host-src:container-dest` to bind-mount a host path into the + container. Both `host-src`, and `container-dest` must be an + _absolute_ path. + + `host-src:container-dest:ro` to make the bind-mount read-only + inside the container. Both `host-src`, and `container-dest` must be + an _absolute_ path. + + `volume-name:container-dest` to bind-mount a volume managed by a + volume driver into the container. `container-dest` must be an + _absolute_ path. + + `volume-name:container-dest:ro` to mount the volume read-only + inside the container. `container-dest` must be an _absolute_ path. + - **Tmpfs** – A map of container directories which should be replaced by tmpfs mounts, and their corresponding + mount options. A JSON object in the form `{ "/run": "rw,noexec,nosuid,size=65536k" }`. + - **Links** - A list of links for the container. Each link entry should be + in the form of `container_name:alias`. + - **Memory** - Memory limit in bytes. + - **MemorySwap** - Total memory limit (memory + swap); set `-1` to enable unlimited swap. + You must use this with `memory` and make the swap value larger than `memory`. + - **MemoryReservation** - Memory soft limit in bytes. + - **KernelMemory** - Kernel memory limit in bytes. + - **CpuShares** - An integer value containing the container's CPU Shares + (ie. the relative weight vs other containers). + - **CpuPeriod** - The length of a CPU period in microseconds. + - **CpuQuota** - Microseconds of CPU time that the container can get in a CPU period. + - **CpusetCpus** - String value containing the `cgroups CpusetCpus` to use. + - **CpusetMems** - Memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only effective on NUMA systems. + - **BlkioWeight** - Block IO weight (relative weight) accepts a weight value between 10 and 1000. + - **BlkioWeightDevice** - Block IO weight (relative device weight) in the form of: `"BlkioWeightDevice": [{"Path": "device_path", "Weight": weight}]` + - **BlkioDeviceReadBps** - Limit read rate (bytes per second) from a device in the form of: `"BlkioDeviceReadBps": [{"Path": "device_path", "Rate": rate}]`, for example: + `"BlkioDeviceReadBps": [{"Path": "/dev/sda", "Rate": "1024"}]"` + - **BlkioDeviceWriteBps** - Limit write rate (bytes per second) to a device in the form of: `"BlkioDeviceWriteBps": [{"Path": "device_path", "Rate": rate}]`, for example: + `"BlkioDeviceWriteBps": [{"Path": "/dev/sda", "Rate": "1024"}]"` + - **BlkioDeviceReadIOps** - Limit read rate (IO per second) from a device in the form of: `"BlkioDeviceReadIOps": [{"Path": "device_path", "Rate": rate}]`, for example: + `"BlkioDeviceReadIOps": [{"Path": "/dev/sda", "Rate": "1000"}]` + - **BlkioDeviceWriteIOps** - Limit write rate (IO per second) to a device in the form of: `"BlkioDeviceWriteIOps": [{"Path": "device_path", "Rate": rate}]`, for example: + `"BlkioDeviceWriteIOps": [{"Path": "/dev/sda", "Rate": "1000"}]` + - **MemorySwappiness** - Tune a container's memory swappiness behavior. Accepts an integer between 0 and 100. + - **OomKillDisable** - Boolean value, whether to disable OOM Killer for the container or not. + - **OomScoreAdj** - An integer value containing the score given to the container in order to tune OOM killer preferences. + - **PidMode** - Set the PID (Process) Namespace mode for the container; + `"container:"`: joins another container's PID namespace + `"host"`: use the host's PID namespace inside the container + - **PortBindings** - A map of exposed container ports and the host port they + should map to. A JSON object in the form + `{ /: [{ "HostPort": "" }] }` + Take note that `port` is specified as a string and not an integer value. + - **PublishAllPorts** - Allocates a random host port for all of a container's + exposed ports. Specified as a boolean value. + - **Privileged** - Gives the container full access to the host. Specified as + a boolean value. + - **ReadonlyRootfs** - Mount the container's root filesystem as read only. + Specified as a boolean value. + - **Dns** - A list of DNS servers for the container to use. + - **DnsOptions** - A list of DNS options + - **DnsSearch** - A list of DNS search domains + - **ExtraHosts** - A list of hostnames/IP mappings to add to the + container's `/etc/hosts` file. Specified in the form `["hostname:IP"]`. + - **VolumesFrom** - A list of volumes to inherit from another container. + Specified in the form `[:]` + - **CapAdd** - A list of kernel capabilities to add to the container. + - **Capdrop** - A list of kernel capabilities to drop from the container. + - **GroupAdd** - A list of additional groups that the container process will run as + - **RestartPolicy** – The behavior to apply when the container exits. The + value is an object with a `Name` property of either `"always"` to + always restart, `"unless-stopped"` to restart always except when + user has manually stopped the container or `"on-failure"` to restart only when the container + exit code is non-zero. If `on-failure` is used, `MaximumRetryCount` + controls the number of times to retry before giving up. + The default is not to restart. (optional) + An ever increasing delay (double the previous delay, starting at 100mS) + is added before each restart to prevent flooding the server. + - **NetworkMode** - Sets the networking mode for the container. Supported + standard values are: `bridge`, `host`, `none`, and `container:`. Any other value is taken + as a custom network's name to which this container should connect to. + - **Devices** - A list of devices to add to the container specified as a JSON object in the + form + `{ "PathOnHost": "/dev/deviceName", "PathInContainer": "/dev/deviceName", "CgroupPermissions": "mrw"}` + - **Ulimits** - A list of ulimits to set in the container, specified as + `{ "Name": , "Soft": , "Hard": }`, for example: + `Ulimits: { "Name": "nofile", "Soft": 1024, "Hard": 2048 }` + - **SecurityOpt**: A list of string values to customize labels for MLS + systems, such as SELinux. + - **LogConfig** - Log configuration for the container, specified as a JSON object in the form + `{ "Type": "", "Config": {"key1": "val1"}}`. + Available types: `json-file`, `syslog`, `journald`, `gelf`, `awslogs`, `splunk`, `none`. + `json-file` logging driver. + - **CgroupParent** - Path to `cgroups` under which the container's `cgroup` is created. If the path is not absolute, the path is considered to be relative to the `cgroups` path of the init process. Cgroups are created if they do not already exist. + - **VolumeDriver** - Driver that this container users to mount volumes. + - **ShmSize** - Size of `/dev/shm` in bytes. The size must be greater than 0. If omitted the system uses 64MB. + +**Query parameters**: + +- **name** – Assign the specified name to the container. Must + match `/?[a-zA-Z0-9_-]+`. + +**Status codes**: + +- **201** – no error +- **400** – bad parameter +- **404** – no such container +- **406** – impossible to attach (container not running) +- **409** – conflict +- **500** – server error + +#### Inspect a container + +`GET /containers/(id or name)/json` + +Return low-level information on the container `id` + +**Example request**: + + GET /v1.22/containers/4fa6e0f0c678/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "AppArmorProfile": "", + "Args": [ + "-c", + "exit 9" + ], + "Config": { + "AttachStderr": true, + "AttachStdin": false, + "AttachStdout": true, + "Cmd": [ + "/bin/sh", + "-c", + "exit 9" + ], + "Domainname": "", + "Entrypoint": null, + "Env": [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + ], + "ExposedPorts": null, + "Hostname": "ba033ac44011", + "Image": "ubuntu", + "Labels": { + "com.example.vendor": "Acme", + "com.example.license": "GPL", + "com.example.version": "1.0" + }, + "MacAddress": "", + "NetworkDisabled": false, + "OnBuild": null, + "OpenStdin": false, + "StdinOnce": false, + "Tty": false, + "User": "", + "Volumes": { + "/volumes/data": {} + }, + "WorkingDir": "", + "StopSignal": "SIGTERM" + }, + "Created": "2015-01-06T15:47:31.485331387Z", + "Driver": "devicemapper", + "ExecIDs": null, + "HostConfig": { + "Binds": null, + "BlkioWeight": 0, + "BlkioWeightDevice": [{}], + "BlkioDeviceReadBps": [{}], + "BlkioDeviceWriteBps": [{}], + "BlkioDeviceReadIOps": [{}], + "BlkioDeviceWriteIOps": [{}], + "CapAdd": null, + "CapDrop": null, + "ContainerIDFile": "", + "CpusetCpus": "", + "CpusetMems": "", + "CpuShares": 0, + "CpuPeriod": 100000, + "Devices": [], + "Dns": null, + "DnsOptions": null, + "DnsSearch": null, + "ExtraHosts": null, + "IpcMode": "", + "Links": null, + "LxcConf": [], + "Memory": 0, + "MemorySwap": 0, + "MemoryReservation": 0, + "KernelMemory": 0, + "OomKillDisable": false, + "OomScoreAdj": 500, + "NetworkMode": "bridge", + "PidMode": "", + "PortBindings": {}, + "Privileged": false, + "ReadonlyRootfs": false, + "PublishAllPorts": false, + "RestartPolicy": { + "MaximumRetryCount": 2, + "Name": "on-failure" + }, + "LogConfig": { + "Config": null, + "Type": "json-file" + }, + "SecurityOpt": null, + "VolumesFrom": null, + "Ulimits": [{}], + "VolumeDriver": "", + "ShmSize": 67108864 + }, + "HostnamePath": "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hostname", + "HostsPath": "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hosts", + "LogPath": "/var/lib/docker/containers/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b-json.log", + "Id": "ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39", + "Image": "04c5d3b7b0656168630d3ba35d8889bd0e9caafcaeb3004d2bfbc47e7c5d35d2", + "MountLabel": "", + "Name": "/boring_euclid", + "NetworkSettings": { + "Bridge": "", + "SandboxID": "", + "HairpinMode": false, + "LinkLocalIPv6Address": "", + "LinkLocalIPv6PrefixLen": 0, + "Ports": null, + "SandboxKey": "", + "SecondaryIPAddresses": null, + "SecondaryIPv6Addresses": null, + "EndpointID": "", + "Gateway": "", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "IPAddress": "", + "IPPrefixLen": 0, + "IPv6Gateway": "", + "MacAddress": "", + "Networks": { + "bridge": { + "NetworkID": "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812", + "EndpointID": "7587b82f0dada3656fda26588aee72630c6fab1536d36e394b2bfbcf898c971d", + "Gateway": "172.17.0.1", + "IPAddress": "172.17.0.2", + "IPPrefixLen": 16, + "IPv6Gateway": "", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "MacAddress": "02:42:ac:12:00:02" + } + } + }, + "Path": "/bin/sh", + "ProcessLabel": "", + "ResolvConfPath": "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/resolv.conf", + "RestartCount": 1, + "State": { + "Error": "", + "ExitCode": 9, + "FinishedAt": "2015-01-06T15:47:32.080254511Z", + "OOMKilled": false, + "Dead": false, + "Paused": false, + "Pid": 0, + "Restarting": false, + "Running": true, + "StartedAt": "2015-01-06T15:47:32.072697474Z", + "Status": "running" + }, + "Mounts": [ + { + "Name": "fac362...80535", + "Source": "/data", + "Destination": "/data", + "Driver": "local", + "Mode": "ro,Z", + "RW": false, + "Propagation": "" + } + ] + } + +**Example request, with size information**: + + GET /v1.22/containers/4fa6e0f0c678/json?size=1 HTTP/1.1 + +**Example response, with size information**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + .... + "SizeRw": 0, + "SizeRootFs": 972, + .... + } + +**Query parameters**: + +- **size** – 1/True/true or 0/False/false, return container size information. Default is `false`. + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### List processes running inside a container + +`GET /containers/(id or name)/top` + +List processes running inside the container `id`. On Unix systems this +is done by running the `ps` command. This endpoint is not +supported on Windows. + +**Example request**: + + GET /v1.22/containers/4fa6e0f0c678/top HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Titles" : [ + "UID", "PID", "PPID", "C", "STIME", "TTY", "TIME", "CMD" + ], + "Processes" : [ + [ + "root", "13642", "882", "0", "17:03", "pts/0", "00:00:00", "/bin/bash" + ], + [ + "root", "13735", "13642", "0", "17:06", "pts/0", "00:00:00", "sleep 10" + ] + ] + } + +**Example request**: + + GET /v1.22/containers/4fa6e0f0c678/top?ps_args=aux HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Titles" : [ + "USER","PID","%CPU","%MEM","VSZ","RSS","TTY","STAT","START","TIME","COMMAND" + ] + "Processes" : [ + [ + "root","13642","0.0","0.1","18172","3184","pts/0","Ss","17:03","0:00","/bin/bash" + ], + [ + "root","13895","0.0","0.0","4348","692","pts/0","S+","17:15","0:00","sleep 10" + ] + ], + } + +**Query parameters**: + +- **ps_args** – `ps` arguments to use (e.g., `aux`), defaults to `-ef` + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Get container logs + +`GET /containers/(id or name)/logs` + +Get `stdout` and `stderr` logs from the container ``id`` + +> **Note**: +> This endpoint works only for containers with the `json-file` or `journald` logging drivers. + +**Example request**: + + GET /v1.22/containers/4fa6e0f0c678/logs?stderr=1&stdout=1×tamps=1&follow=1&tail=10&since=1428990821 HTTP/1.1 + +**Example response**: + + HTTP/1.1 101 UPGRADED + Content-Type: application/vnd.docker.raw-stream + Connection: Upgrade + Upgrade: tcp + + {% raw %} + {{ STREAM }} + {% endraw %} + +**Query parameters**: + +- **follow** – 1/True/true or 0/False/false, return stream. Default `false`. +- **stdout** – 1/True/true or 0/False/false, show `stdout` log. Default `false`. +- **stderr** – 1/True/true or 0/False/false, show `stderr` log. Default `false`. +- **since** – UNIX timestamp (integer) to filter logs. Specifying a timestamp + will only output log-entries since that timestamp. Default: 0 (unfiltered) +- **timestamps** – 1/True/true or 0/False/false, print timestamps for + every log line. Default `false`. +- **tail** – Output specified number of lines at the end of logs: `all` or ``. Default all. + +**Status codes**: + +- **101** – no error, hints proxy about hijacking +- **200** – no error, no upgrade header found +- **404** – no such container +- **500** – server error + +#### Inspect changes on a container's filesystem + +`GET /containers/(id or name)/changes` + +Inspect changes on container `id`'s filesystem + +**Example request**: + + GET /v1.22/containers/4fa6e0f0c678/changes HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Path": "/dev", + "Kind": 0 + }, + { + "Path": "/dev/kmsg", + "Kind": 1 + }, + { + "Path": "/test", + "Kind": 1 + } + ] + +Values for `Kind`: + +- `0`: Modify +- `1`: Add +- `2`: Delete + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Export a container + +`GET /containers/(id or name)/export` + +Export the contents of container `id` + +**Example request**: + + GET /v1.22/containers/4fa6e0f0c678/export HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/octet-stream + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Get container stats based on resource usage + +`GET /containers/(id or name)/stats` + +This endpoint returns a live stream of a container's resource usage statistics. + +**Example request**: + + GET /v1.22/containers/redis1/stats HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "read" : "2015-01-08T22:57:31.547920715Z", + "networks": { + "eth0": { + "rx_bytes": 5338, + "rx_dropped": 0, + "rx_errors": 0, + "rx_packets": 36, + "tx_bytes": 648, + "tx_dropped": 0, + "tx_errors": 0, + "tx_packets": 8 + }, + "eth5": { + "rx_bytes": 4641, + "rx_dropped": 0, + "rx_errors": 0, + "rx_packets": 26, + "tx_bytes": 690, + "tx_dropped": 0, + "tx_errors": 0, + "tx_packets": 9 + } + }, + "memory_stats" : { + "stats" : { + "total_pgmajfault" : 0, + "cache" : 0, + "mapped_file" : 0, + "total_inactive_file" : 0, + "pgpgout" : 414, + "rss" : 6537216, + "total_mapped_file" : 0, + "writeback" : 0, + "unevictable" : 0, + "pgpgin" : 477, + "total_unevictable" : 0, + "pgmajfault" : 0, + "total_rss" : 6537216, + "total_rss_huge" : 6291456, + "total_writeback" : 0, + "total_inactive_anon" : 0, + "rss_huge" : 6291456, + "hierarchical_memory_limit" : 67108864, + "total_pgfault" : 964, + "total_active_file" : 0, + "active_anon" : 6537216, + "total_active_anon" : 6537216, + "total_pgpgout" : 414, + "total_cache" : 0, + "inactive_anon" : 0, + "active_file" : 0, + "pgfault" : 964, + "inactive_file" : 0, + "total_pgpgin" : 477 + }, + "max_usage" : 6651904, + "usage" : 6537216, + "failcnt" : 0, + "limit" : 67108864 + }, + "blkio_stats" : {}, + "cpu_stats" : { + "cpu_usage" : { + "percpu_usage" : [ + 8646879, + 24472255, + 36438778, + 30657443 + ], + "usage_in_usermode" : 50000000, + "total_usage" : 100215355, + "usage_in_kernelmode" : 30000000 + }, + "system_cpu_usage" : 739306590000000, + "throttling_data" : {"periods":0,"throttled_periods":0,"throttled_time":0} + }, + "precpu_stats" : { + "cpu_usage" : { + "percpu_usage" : [ + 8646879, + 24350896, + 36438778, + 30657443 + ], + "usage_in_usermode" : 50000000, + "total_usage" : 100093996, + "usage_in_kernelmode" : 30000000 + }, + "system_cpu_usage" : 9492140000000, + "throttling_data" : {"periods":0,"throttled_periods":0,"throttled_time":0} + } + } + +The `precpu_stats` is the cpu statistic of last read, which is used for calculating the cpu usage percent. It is not the exact copy of the `cpu_stats` field. + +**Query parameters**: + +- **stream** – 1/True/true or 0/False/false, pull stats once then disconnect. Default `true`. + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Resize a container TTY + +`POST /containers/(id or name)/resize` + +Resize the TTY for container with `id`. The unit is number of characters. You must restart the container for the resize to take effect. + +**Example request**: + + POST /v1.22/containers/4fa6e0f0c678/resize?h=40&w=80 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Length: 0 + Content-Type: text/plain; charset=utf-8 + +**Query parameters**: + +- **h** – height of `tty` session +- **w** – width + +**Status codes**: + +- **200** – no error +- **404** – No such container +- **500** – Cannot resize container + +#### Start a container + +`POST /containers/(id or name)/start` + +Start the container `id` + +> **Note**: +> For backwards compatibility, this endpoint accepts a `HostConfig` as JSON-encoded request body. +> See [create a container](#create-a-container) for details. + +**Example request**: + + POST /v1.22/containers/e90e34656806/start HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **detachKeys** – Override the key sequence for detaching a + container. Format is a single character `[a-Z]` or `ctrl-` + where `` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`. + +**Status codes**: + +- **204** – no error +- **304** – container already started +- **404** – no such container +- **500** – server error + +#### Stop a container + +`POST /containers/(id or name)/stop` + +Stop the container `id` + +**Example request**: + + POST /v1.22/containers/e90e34656806/stop?t=5 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **t** – number of seconds to wait before killing the container + +**Status codes**: + +- **204** – no error +- **304** – container already stopped +- **404** – no such container +- **500** – server error + +#### Restart a container + +`POST /containers/(id or name)/restart` + +Restart the container `id` + +**Example request**: + + POST /v1.22/containers/e90e34656806/restart?t=5 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **t** – number of seconds to wait before killing the container + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **500** – server error + +#### Kill a container + +`POST /containers/(id or name)/kill` + +Kill the container `id` + +**Example request**: + + POST /v1.22/containers/e90e34656806/kill HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **signal** - Signal to send to the container: integer or string like `SIGINT`. + When not set, `SIGKILL` is assumed and the call waits for the container to exit. + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **500** – server error + +#### Update a container + +`POST /containers/(id or name)/update` + +Update resource configs of one or more containers. + +**Example request**: + + POST /v1.22/containers/e90e34656806/update HTTP/1.1 + Content-Type: application/json + + { + "BlkioWeight": 300, + "CpuShares": 512, + "CpuPeriod": 100000, + "CpuQuota": 50000, + "CpusetCpus": "0,1", + "CpusetMems": "0", + "Memory": 314572800, + "MemorySwap": 514288000, + "MemoryReservation": 209715200, + "KernelMemory": 52428800 + } + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Warnings": [] + } + +**Status codes**: + +- **200** – no error +- **400** – bad parameter +- **404** – no such container +- **500** – server error + +#### Rename a container + +`POST /containers/(id or name)/rename` + +Rename the container `id` to a `new_name` + +**Example request**: + + POST /v1.22/containers/e90e34656806/rename?name=new_name HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **name** – new name for the container + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **409** - conflict name already assigned +- **500** – server error + +#### Pause a container + +`POST /containers/(id or name)/pause` + +Pause the container `id` + +**Example request**: + + POST /v1.22/containers/e90e34656806/pause HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **500** – server error + +#### Unpause a container + +`POST /containers/(id or name)/unpause` + +Unpause the container `id` + +**Example request**: + + POST /v1.22/containers/e90e34656806/unpause HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **500** – server error + +#### Attach to a container + +`POST /containers/(id or name)/attach` + +Attach to the container `id` + +**Example request**: + + POST /v1.22/containers/16253994b7c4/attach?logs=1&stream=0&stdout=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 101 UPGRADED + Content-Type: application/vnd.docker.raw-stream + Connection: Upgrade + Upgrade: tcp + + {% raw %} + {{ STREAM }} + {% endraw %} + +**Query parameters**: + +- **detachKeys** – Override the key sequence for detaching a + container. Format is a single character `[a-Z]` or `ctrl-` + where `` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`. +- **logs** – 1/True/true or 0/False/false, return logs. Default `false`. +- **stream** – 1/True/true or 0/False/false, return stream. + Default `false`. +- **stdin** – 1/True/true or 0/False/false, if `stream=true`, attach + to `stdin`. Default `false`. +- **stdout** – 1/True/true or 0/False/false, if `logs=true`, return + `stdout` log, if `stream=true`, attach to `stdout`. Default `false`. +- **stderr** – 1/True/true or 0/False/false, if `logs=true`, return + `stderr` log, if `stream=true`, attach to `stderr`. Default `false`. + +**Status codes**: + +- **101** – no error, hints proxy about hijacking +- **200** – no error, no upgrade header found +- **400** – bad parameter +- **404** – no such container +- **409** - container is paused +- **500** – server error + +**Stream details**: + +When using the TTY setting is enabled in +[`POST /containers/create` +](#create-a-container), +the stream is the raw data from the process PTY and client's `stdin`. +When the TTY is disabled, then the stream is multiplexed to separate +`stdout` and `stderr`. + +The format is a **Header** and a **Payload** (frame). + +**HEADER** + +The header contains the information which the stream writes (`stdout` or +`stderr`). It also contains the size of the associated frame encoded in the +last four bytes (`uint32`). + +It is encoded on the first eight bytes like this: + + header := [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4} + +`STREAM_TYPE` can be: + +- 0: `stdin` (is written on `stdout`) +- 1: `stdout` +- 2: `stderr` + +`SIZE1, SIZE2, SIZE3, SIZE4` are the four bytes of +the `uint32` size encoded as big endian. + +**PAYLOAD** + +The payload is the raw stream. + +**IMPLEMENTATION** + +The simplest way to implement the Attach protocol is the following: + + 1. Read eight bytes. + 2. Choose `stdout` or `stderr` depending on the first byte. + 3. Extract the frame size from the last four bytes. + 4. Read the extracted size and output it on the correct output. + 5. Goto 1. + +#### Attach to a container (websocket) + +`GET /containers/(id or name)/attach/ws` + +Attach to the container `id` via websocket + +Implements websocket protocol handshake according to [RFC 6455](http://tools.ietf.org/html/rfc6455) + +**Example request** + + GET /v1.22/containers/e90e34656806/attach/ws?logs=0&stream=1&stdin=1&stdout=1&stderr=1 HTTP/1.1 + +**Example response** + + {% raw %} + {{ STREAM }} + {% endraw %} + +**Query parameters**: + +- **detachKeys** – Override the key sequence for detaching a + container. Format is a single character `[a-Z]` or `ctrl-` + where `` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`. +- **logs** – 1/True/true or 0/False/false, return logs. Default `false`. +- **stream** – 1/True/true or 0/False/false, return stream. + Default `false`. +- **stdin** – 1/True/true or 0/False/false, if `stream=true`, attach + to `stdin`. Default `false`. +- **stdout** – 1/True/true or 0/False/false, if `logs=true`, return + `stdout` log, if `stream=true`, attach to `stdout`. Default `false`. +- **stderr** – 1/True/true or 0/False/false, if `logs=true`, return + `stderr` log, if `stream=true`, attach to `stderr`. Default `false`. + +**Status codes**: + +- **200** – no error +- **400** – bad parameter +- **404** – no such container +- **500** – server error + +#### Wait a container + +`POST /containers/(id or name)/wait` + +Block until container `id` stops, then returns the exit code + +**Example request**: + + POST /v1.22/containers/16253994b7c4/wait HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"StatusCode": 0} + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Remove a container + +`DELETE /containers/(id or name)` + +Remove the container `id` from the filesystem + +**Example request**: + + DELETE /v1.22/containers/16253994b7c4?v=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **v** – 1/True/true or 0/False/false, Remove the volumes + associated to the container. Default `false`. +- **force** - 1/True/true or 0/False/false, Kill then remove the container. + Default `false`. +- **link** - 1/True/true or 0/False/false, Remove the specified + link associated to the container. Default `false`. + +**Status codes**: + +- **204** – no error +- **400** – bad parameter +- **404** – no such container +- **409** – conflict +- **500** – server error + +#### Copy files or folders from a container + +`POST /containers/(id or name)/copy` + +Copy files or folders of container `id` + +**Deprecated** in favor of the `archive` endpoint below. + +**Example request**: + + POST /v1.22/containers/4fa6e0f0c678/copy HTTP/1.1 + Content-Type: application/json + + { + "Resource": "test.txt" + } + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Retrieving information about files and folders in a container + +`HEAD /containers/(id or name)/archive` + +See the description of the `X-Docker-Container-Path-Stat` header in the +following section. + +#### Get an archive of a filesystem resource in a container + +`GET /containers/(id or name)/archive` + +Get a tar archive of a resource in the filesystem of container `id`. + +**Query parameters**: + +- **path** - resource in the container's filesystem to archive. Required. + + If not an absolute path, it is relative to the container's root directory. + The resource specified by **path** must exist. To assert that the resource + is expected to be a directory, **path** should end in `/` or `/.` + (assuming a path separator of `/`). If **path** ends in `/.` then this + indicates that only the contents of the **path** directory should be + copied. A symlink is always resolved to its target. + + > **Note**: It is not possible to copy certain system files such as resources + > under `/proc`, `/sys`, `/dev`, and mounts created by the user in the + > container. + +**Example request**: + + GET /v1.22/containers/8cce319429b2/archive?path=/root HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + X-Docker-Container-Path-Stat: eyJuYW1lIjoicm9vdCIsInNpemUiOjQwOTYsIm1vZGUiOjIxNDc0ODQwOTYsIm10aW1lIjoiMjAxNC0wMi0yN1QyMDo1MToyM1oiLCJsaW5rVGFyZ2V0IjoiIn0= + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +On success, a response header `X-Docker-Container-Path-Stat` will be set to a +base64-encoded JSON object containing some filesystem header information about +the archived resource. The above example value would decode to the following +JSON object (whitespace added for readability): + +```json +{ + "name": "root", + "size": 4096, + "mode": 2147484096, + "mtime": "2014-02-27T20:51:23Z", + "linkTarget": "" +} +``` + +A `HEAD` request can also be made to this endpoint if only this information is +desired. + +**Status codes**: + +- **200** - success, returns archive of copied resource +- **400** - client error, bad parameter, details in JSON response body, one of: + - must specify path parameter (**path** cannot be empty) + - not a directory (**path** was asserted to be a directory but exists as a + file) +- **404** - client error, resource not found, one of: + – no such container (container `id` does not exist) + - no such file or directory (**path** does not exist) +- **500** - server error + +#### Extract an archive of files or folders to a directory in a container + +`PUT /containers/(id or name)/archive` + +Upload a tar archive to be extracted to a path in the filesystem of container +`id`. + +**Query parameters**: + +- **path** - path to a directory in the container + to extract the archive's contents into. Required. + + If not an absolute path, it is relative to the container's root directory. + The **path** resource must exist. +- **noOverwriteDirNonDir** - If "1", "true", or "True" then it will be an error + if unpacking the given content would cause an existing directory to be + replaced with a non-directory and vice versa. + +**Example request**: + + PUT /v1.22/containers/8cce319429b2/archive?path=/vol1 HTTP/1.1 + Content-Type: application/x-tar + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +**Example response**: + + HTTP/1.1 200 OK + +**Status codes**: + +- **200** – the content was extracted successfully +- **400** - client error, bad parameter, details in JSON response body, one of: + - must specify path parameter (**path** cannot be empty) + - not a directory (**path** should be a directory but exists as a file) + - unable to overwrite existing directory with non-directory + (if **noOverwriteDirNonDir**) + - unable to overwrite existing non-directory with directory + (if **noOverwriteDirNonDir**) +- **403** - client error, permission denied, the volume + or container rootfs is marked as read-only. +- **404** - client error, resource not found, one of: + – no such container (container `id` does not exist) + - no such file or directory (**path** resource does not exist) +- **500** – server error + +### 2.2 Images + +#### List Images + +`GET /images/json` + +**Example request**: + + GET /v1.22/images/json?all=0 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "RepoTags": [ + "ubuntu:12.04", + "ubuntu:precise", + "ubuntu:latest" + ], + "Id": "8dbd9e392a964056420e5d58ca5cc376ef18e2de93b5cc90e868a1bbc8318c1c", + "Created": 1365714795, + "Size": 131506275, + "VirtualSize": 131506275, + "Labels": {} + }, + { + "RepoTags": [ + "ubuntu:12.10", + "ubuntu:quantal" + ], + "ParentId": "27cf784147099545", + "Id": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", + "Created": 1364102658, + "Size": 24653, + "VirtualSize": 180116135, + "Labels": { + "com.example.version": "v1" + } + } + ] + +**Example request, with digest information**: + + GET /v1.22/images/json?digests=1 HTTP/1.1 + +**Example response, with digest information**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Created": 1420064636, + "Id": "4986bf8c15363d1c5d15512d5266f8777bfba4974ac56e3270e7760f6f0a8125", + "ParentId": "ea13149945cb6b1e746bf28032f02e9b5a793523481a0a18645fc77ad53c4ea2", + "RepoDigests": [ + "localhost:5000/test/busybox@sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf" + ], + "RepoTags": [ + "localhost:5000/test/busybox:latest", + "playdate:latest" + ], + "Size": 0, + "VirtualSize": 2429728, + "Labels": {} + } + ] + +The response shows a single image `Id` associated with two repositories +(`RepoTags`): `localhost:5000/test/busybox`: and `playdate`. A caller can use +either of the `RepoTags` values `localhost:5000/test/busybox:latest` or +`playdate:latest` to reference the image. + +You can also use `RepoDigests` values to reference an image. In this response, +the array has only one reference and that is to the +`localhost:5000/test/busybox` repository; the `playdate` repository has no +digest. You can reference this digest using the value: +`localhost:5000/test/busybox@sha256:cbbf2f9a99b47fc460d...` + +See the `docker run` and `docker build` commands for examples of digest and tag +references on the command line. + +**Query parameters**: + +- **all** – 1/True/true or 0/False/false, default false +- **filters** – a JSON encoded value of the filters (a map[string][]string) to process on the images list. Available filters: + - `dangling=true` + - `label=key` or `label="key=value"` of an image label +- **filter** - only return images with the specified name + +#### Build image from a Dockerfile + +`POST /build` + +Build an image from a Dockerfile + +**Example request**: + + POST /v1.22/build HTTP/1.1 + Content-Type: application/x-tar + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"stream": "Step 1/5..."} + {"stream": "..."} + {"error": "Error...", "errorDetail": {"code": 123, "message": "Error..."}} + +The input stream must be a `tar` archive compressed with one of the +following algorithms: `identity` (no compression), `gzip`, `bzip2`, `xz`. + +The archive must include a build instructions file, typically called +`Dockerfile` at the archive's root. The `dockerfile` parameter may be +used to specify a different build instructions file. To do this, its value must be +the path to the alternate build instructions file to use. + +The archive may include any number of other files, +which are accessible in the build context (See the [*ADD build +command*](../reference/builder.md#add)). + +The Docker daemon performs a preliminary validation of the `Dockerfile` before +starting the build, and returns an error if the syntax is incorrect. After that, +each instruction is run one-by-one until the ID of the new image is output. + +The build is canceled if the client drops the connection by quitting +or being killed. + +**Query parameters**: + +- **dockerfile** - Path within the build context to the `Dockerfile`. This is + ignored if `remote` is specified and points to an external `Dockerfile`. +- **t** – A name and optional tag to apply to the image in the `name:tag` format. + If you omit the `tag` the default `latest` value is assumed. + You can provide one or more `t` parameters. +- **remote** – A Git repository URI or HTTP/HTTPS context URI. If the + URI points to a single text file, the file's contents are placed into + a file called `Dockerfile` and the image is built from that file. If + the URI points to a tarball, the file is downloaded by the daemon and + the contents therein used as the context for the build. If the URI + points to a tarball and the `dockerfile` parameter is also specified, + there must be a file with the corresponding path inside the tarball. +- **q** – Suppress verbose build output. +- **nocache** – Do not use the cache when building the image. +- **pull** - Attempt to pull the image even if an older image exists locally. +- **rm** - Remove intermediate containers after a successful build (default behavior). +- **forcerm** - Always remove intermediate containers (includes `rm`). +- **memory** - Set memory limit for build. +- **memswap** - Total memory (memory + swap), `-1` to enable unlimited swap. +- **cpushares** - CPU shares (relative weight). +- **cpusetcpus** - CPUs in which to allow execution (e.g., `0-3`, `0,1`). +- **cpuperiod** - The length of a CPU period in microseconds. +- **cpuquota** - Microseconds of CPU time that the container can get in a CPU period. +- **buildargs** – JSON map of string pairs for build-time variables. Users pass + these values at build-time. Docker uses the `buildargs` as the environment + context for command(s) run via the Dockerfile's `RUN` instruction or for + variable expansion in other Dockerfile instructions. This is not meant for + passing secret values. [Read more about the buildargs instruction](../reference/builder.md#arg) +- **shmsize** - Size of `/dev/shm` in bytes. The size must be greater than 0. If omitted the system uses 64MB. + +**Request Headers**: + +- **Content-type** – Set to `"application/x-tar"`. +- **X-Registry-Config** – A base64-url-safe-encoded Registry Auth Config JSON + object with the following structure: + + { + "docker.example.com": { + "username": "janedoe", + "password": "hunter2" + }, + "https://index.docker.io/v1/": { + "username": "mobydock", + "password": "conta1n3rize14" + } + } + + This object maps the hostname of a registry to an object containing the + "username" and "password" for that registry. Multiple registries may + be specified as the build may be based on an image requiring + authentication to pull from any arbitrary registry. Only the registry + domain name (and port if not the default "443") are required. However + (for legacy reasons) the "official" Docker, Inc. hosted registry must + be specified with both a "https://" prefix and a "/v1/" suffix even + though Docker will prefer to use the v2 registry API. + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Create an image + +`POST /images/create` + +Create an image either by pulling it from the registry or by importing it + +**Example request**: + + POST /v1.22/images/create?fromImage=busybox&tag=latest HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status": "Pulling..."} + {"status": "Pulling", "progress": "1 B/ 100 B", "progressDetail": {"current": 1, "total": 100}} + {"error": "Invalid..."} + ... + +When using this endpoint to pull an image from the registry, the +`X-Registry-Auth` header can be used to include +a base64-encoded AuthConfig object. + +**Query parameters**: + +- **fromImage** – Name of the image to pull. The name may include a tag or + digest. This parameter may only be used when pulling an image. + The pull is cancelled if the HTTP connection is closed. +- **fromSrc** – Source to import. The value may be a URL from which the image + can be retrieved or `-` to read the image from the request body. + This parameter may only be used when importing an image. +- **repo** – Repository name given to an image when it is imported. + The repo may include a tag. This parameter may only be used when importing + an image. +- **tag** – Tag or digest. If empty when pulling an image, this causes all tags + for the given image to be pulled. + +**Request Headers**: + +- **X-Registry-Auth** – base64-encoded AuthConfig object, containing either login information, or a token + - Credential based login: + + ``` + { + "username": "jdoe", + "password": "secret", + "email": "jdoe@acme.com" + } + ``` + + - Token based login: + + ``` + { + "registrytoken": "9cbaf023786cd7..." + } + ``` + +**Status codes**: + +- **200** – no error +- **404** - repository does not exist or no read access +- **500** – server error + + + +#### Inspect an image + +`GET /images/(name)/json` + +Return low-level information on the image `name` + +**Example request**: + + GET /v1.22/images/example/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Id" : "85f05633ddc1c50679be2b16a0479ab6f7637f8884e0cfe0f4d20e1ebb3d6e7c", + "Container" : "cb91e48a60d01f1e27028b4fc6819f4f290b3cf12496c8176ec714d0d390984a", + "Comment" : "", + "Os" : "linux", + "Architecture" : "amd64", + "Parent" : "91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c", + "ContainerConfig" : { + "Tty" : false, + "Hostname" : "e611e15f9c9d", + "Volumes" : null, + "Domainname" : "", + "AttachStdout" : false, + "PublishService" : "", + "AttachStdin" : false, + "OpenStdin" : false, + "StdinOnce" : false, + "NetworkDisabled" : false, + "OnBuild" : [], + "Image" : "91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c", + "User" : "", + "WorkingDir" : "", + "Entrypoint" : null, + "MacAddress" : "", + "AttachStderr" : false, + "Labels" : { + "com.example.license" : "GPL", + "com.example.version" : "1.0", + "com.example.vendor" : "Acme" + }, + "Env" : [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + ], + "ExposedPorts" : null, + "Cmd" : [ + "/bin/sh", + "-c", + "#(nop) LABEL com.example.vendor=Acme com.example.license=GPL com.example.version=1.0" + ] + }, + "DockerVersion" : "1.9.0-dev", + "VirtualSize" : 188359297, + "Size" : 0, + "Author" : "", + "Created" : "2015-09-10T08:30:53.26995814Z", + "GraphDriver" : { + "Name" : "aufs", + "Data" : null + }, + "RepoDigests" : [ + "localhost:5000/test/busybox/example@sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf" + ], + "RepoTags" : [ + "example:1.0", + "example:latest", + "example:stable" + ], + "Config" : { + "Image" : "91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c", + "NetworkDisabled" : false, + "OnBuild" : [], + "StdinOnce" : false, + "PublishService" : "", + "AttachStdin" : false, + "OpenStdin" : false, + "Domainname" : "", + "AttachStdout" : false, + "Tty" : false, + "Hostname" : "e611e15f9c9d", + "Volumes" : null, + "Cmd" : [ + "/bin/bash" + ], + "ExposedPorts" : null, + "Env" : [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + ], + "Labels" : { + "com.example.vendor" : "Acme", + "com.example.version" : "1.0", + "com.example.license" : "GPL" + }, + "Entrypoint" : null, + "MacAddress" : "", + "AttachStderr" : false, + "WorkingDir" : "", + "User" : "" + } + } + +**Status codes**: + +- **200** – no error +- **404** – no such image +- **500** – server error + +#### Get the history of an image + +`GET /images/(name)/history` + +Return the history of the image `name` + +**Example request**: + + GET /v1.22/images/ubuntu/history HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Id": "3db9c44f45209632d6050b35958829c3a2aa256d81b9a7be45b362ff85c54710", + "Created": 1398108230, + "CreatedBy": "/bin/sh -c #(nop) ADD file:eb15dbd63394e063b805a3c32ca7bf0266ef64676d5a6fab4801f2e81e2a5148 in /", + "Tags": [ + "ubuntu:lucid", + "ubuntu:10.04" + ], + "Size": 182964289, + "Comment": "" + }, + { + "Id": "6cfa4d1f33fb861d4d114f43b25abd0ac737509268065cdfd69d544a59c85ab8", + "Created": 1398108222, + "CreatedBy": "/bin/sh -c #(nop) MAINTAINER Tianon Gravi - mkimage-debootstrap.sh -i iproute,iputils-ping,ubuntu-minimal -t lucid.tar.xz lucid http://archive.ubuntu.com/ubuntu/", + "Tags": null, + "Size": 0, + "Comment": "" + }, + { + "Id": "511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158", + "Created": 1371157430, + "CreatedBy": "", + "Tags": [ + "scratch12:latest", + "scratch:latest" + ], + "Size": 0, + "Comment": "Imported from -" + } + ] + +**Status codes**: + +- **200** – no error +- **404** – no such image +- **500** – server error + +#### Push an image on the registry + +`POST /images/(name)/push` + +Push the image `name` on the registry + +**Example request**: + + POST /v1.22/images/test/push HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status": "Pushing..."} + {"status": "Pushing", "progress": "1/? (n/a)", "progressDetail": {"current": 1}}} + {"error": "Invalid..."} + ... + +If you wish to push an image on to a private registry, that image must already have a tag +into a repository which references that registry `hostname` and `port`. This repository name should +then be used in the URL. This duplicates the command line's flow. + +The push is cancelled if the HTTP connection is closed. + +**Example request**: + + POST /v1.22/images/registry.acme.com:5000/test/push HTTP/1.1 + + +**Query parameters**: + +- **tag** – The tag to associate with the image on the registry. This is optional. + +**Request Headers**: + +- **X-Registry-Auth** – base64-encoded AuthConfig object, containing either login information, or a token + - Credential based login: + + ``` + { + "username": "jdoe", + "password": "secret", + "email": "jdoe@acme.com", + } + ``` + + - Token based login: + + ``` + { + "registrytoken": "9cbaf023786cd7..." + } + ``` + +**Status codes**: + +- **200** – no error +- **404** – no such image +- **500** – server error + +#### Tag an image into a repository + +`POST /images/(name)/tag` + +Tag the image `name` into a repository + +**Example request**: + + POST /v1.22/images/test/tag?repo=myrepo&force=0&tag=v42 HTTP/1.1 + +**Example response**: + + HTTP/1.1 201 Created + +**Query parameters**: + +- **repo** – The repository to tag in +- **force** – 1/True/true or 0/False/false, default false +- **tag** - The new tag name + +**Status codes**: + +- **201** – no error +- **400** – bad parameter +- **404** – no such image +- **409** – conflict +- **500** – server error + +#### Remove an image + +`DELETE /images/(name)` + +Remove the image `name` from the filesystem + +**Example request**: + + DELETE /v1.22/images/test HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-type: application/json + + [ + {"Untagged": "3e2f21a89f"}, + {"Deleted": "3e2f21a89f"}, + {"Deleted": "53b4f83ac9"} + ] + +**Query parameters**: + +- **force** – 1/True/true or 0/False/false, default false +- **noprune** – 1/True/true or 0/False/false, default false + +**Status codes**: + +- **200** – no error +- **404** – no such image +- **409** – conflict +- **500** – server error + +#### Search images + +`GET /images/search` + +Search for an image on [Docker Hub](https://hub.docker.com). + +> **Note**: +> The response keys have changed from API v1.6 to reflect the JSON +> sent by the registry server to the docker daemon's request. + +**Example request**: + + GET /v1.22/images/search?term=sshd HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "description": "", + "is_official": false, + "is_automated": false, + "name": "wma55/u1210sshd", + "star_count": 0 + }, + { + "description": "", + "is_official": false, + "is_automated": false, + "name": "jdswinbank/sshd", + "star_count": 0 + }, + { + "description": "", + "is_official": false, + "is_automated": false, + "name": "vgauthier/sshd", + "star_count": 0 + } + ... + ] + +**Query parameters**: + +- **term** – term to search + +**Status codes**: + +- **200** – no error +- **500** – server error + +### 2.3 Misc + +#### Check auth configuration + +`POST /auth` + +Get the default username and email + +**Example request**: + + POST /v1.22/auth HTTP/1.1 + Content-Type: application/json + + { + "username": "hannibal", + "password": "xxxx", + "email": "hannibal@a-team.com", + "serveraddress": "https://index.docker.io/v1/" + } + +**Example response**: + + HTTP/1.1 200 OK + +**Status codes**: + +- **200** – no error +- **204** – no error +- **500** – server error + +#### Display system-wide information + +`GET /info` + +Display system-wide information + +**Example request**: + + GET /v1.22/info HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Architecture": "x86_64", + "ClusterStore": "etcd://localhost:2379", + "Containers": 11, + "ContainersRunning": 7, + "ContainersStopped": 3, + "ContainersPaused": 1, + "CpuCfsPeriod": true, + "CpuCfsQuota": true, + "Debug": false, + "DockerRootDir": "/var/lib/docker", + "Driver": "btrfs", + "DriverStatus": [[""]], + "ExecutionDriver": "native-0.1", + "ExperimentalBuild": false, + "HttpProxy": "http://test:test@localhost:8080", + "HttpsProxy": "https://test:test@localhost:8080", + "ID": "7TRN:IPZB:QYBB:VPBQ:UMPP:KARE:6ZNR:XE6T:7EWV:PKF4:ZOJD:TPYS", + "IPv4Forwarding": true, + "Images": 16, + "IndexServerAddress": "https://index.docker.io/v1/", + "InitPath": "/usr/bin/docker", + "InitSha1": "", + "KernelVersion": "3.12.0-1-amd64", + "Labels": [ + "storage=ssd" + ], + "MemTotal": 2099236864, + "MemoryLimit": true, + "NCPU": 1, + "NEventsListener": 0, + "NFd": 11, + "NGoroutines": 21, + "Name": "prod-server-42", + "NoProxy": "9.81.1.160", + "OomKillDisable": true, + "OSType": "linux", + "OperatingSystem": "Boot2Docker", + "Plugins": { + "Volume": [ + "local" + ], + "Network": [ + "null", + "host", + "bridge" + ] + }, + "RegistryConfig": { + "IndexConfigs": { + "docker.io": { + "Mirrors": null, + "Name": "docker.io", + "Official": true, + "Secure": true + } + }, + "InsecureRegistryCIDRs": [ + "127.0.0.0/8" + ] + }, + "ServerVersion": "1.9.0", + "SwapLimit": false, + "SystemStatus": [["State", "Healthy"]], + "SystemTime": "2015-03-10T11:11:23.730591467-07:00" + } + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Show the docker version information + +`GET /version` + +Show the docker version information + +**Example request**: + + GET /v1.22/version HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Version": "1.10.0", + "Os": "linux", + "KernelVersion": "3.19.0-23-generic", + "GoVersion": "go1.4.2", + "GitCommit": "e75da4b", + "Arch": "amd64", + "ApiVersion": "1.22", + "BuildTime": "2015-12-01T07:09:13.444803460+00:00", + "Experimental": true + } + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Ping the docker server + +`GET /_ping` + +Ping the docker server + +**Example request**: + + GET /v1.22/_ping HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: text/plain + + OK + +**Status codes**: + +- **200** - no error +- **500** - server error + +#### Create a new image from a container's changes + +`POST /commit` + +Create a new image from a container's changes + +**Example request**: + + POST /v1.22/commit?container=44c004db4b17&comment=message&repo=myrepo HTTP/1.1 + Content-Type: application/json + + { + "Hostname": "", + "Domainname": "", + "User": "", + "AttachStdin": false, + "AttachStdout": true, + "AttachStderr": true, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": null, + "Cmd": [ + "date" + ], + "Mounts": [ + { + "Source": "/data", + "Destination": "/data", + "Mode": "ro,Z", + "RW": false + } + ], + "Labels": { + "key1": "value1", + "key2": "value2" + }, + "WorkingDir": "", + "NetworkDisabled": false, + "ExposedPorts": { + "22/tcp": {} + } + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + {"Id": "596069db4bf5"} + +**JSON parameters**: + +- **config** - the container's configuration + +**Query parameters**: + +- **container** – source container +- **repo** – repository +- **tag** – tag +- **comment** – commit message +- **author** – author (e.g., "John Hannibal Smith + <[hannibal@a-team.com](mailto:hannibal%40a-team.com)>") +- **pause** – 1/True/true or 0/False/false, whether to pause the container before committing +- **changes** – Dockerfile instructions to apply while committing + +**Status codes**: + +- **201** – no error +- **404** – no such container +- **500** – server error + +#### Monitor Docker's events + +`GET /events` + +Get container events from docker, in real time via streaming. + +Docker containers report the following events: + + attach, commit, copy, create, destroy, die, exec_create, exec_start, export, kill, oom, pause, rename, resize, restart, start, stop, top, unpause, update + +Docker images report the following events: + + delete, import, pull, push, tag, untag + +Docker volumes report the following events: + + create, mount, unmount, destroy + +Docker networks report the following events: + + create, connect, disconnect, destroy + +**Example request**: + + GET /v1.22/events?since=1374067924 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + Server: Docker/1.10.0 (linux) + Date: Fri, 29 Apr 2016 15:18:06 GMT + Transfer-Encoding: chunked + + { + "status": "pull", + "id": "alpine:latest", + "Type": "image", + "Action": "pull", + "Actor": { + "ID": "alpine:latest", + "Attributes": { + "name": "alpine" + } + }, + "time": 1461943101, + "timeNano": 1461943101301854122 + } + { + "status": "create", + "id": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "from": "alpine", + "Type": "container", + "Action": "create", + "Actor": { + "ID": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "Attributes": { + "com.example.some-label": "some-label-value", + "image": "alpine", + "name": "my-container" + } + }, + "time": 1461943101, + "timeNano": 1461943101381709551 + } + { + "status": "attach", + "id": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "from": "alpine", + "Type": "container", + "Action": "attach", + "Actor": { + "ID": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "Attributes": { + "com.example.some-label": "some-label-value", + "image": "alpine", + "name": "my-container" + } + }, + "time": 1461943101, + "timeNano": 1461943101383858412 + } + { + "Type": "network", + "Action": "connect", + "Actor": { + "ID": "7dc8ac97d5d29ef6c31b6052f3938c1e8f2749abbd17d1bd1febf2608db1b474", + "Attributes": { + "container": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "name": "bridge", + "type": "bridge" + } + }, + "time": 1461943101, + "timeNano": 1461943101394865557 + } + { + "status": "start", + "id": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "from": "alpine", + "Type": "container", + "Action": "start", + "Actor": { + "ID": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "Attributes": { + "com.example.some-label": "some-label-value", + "image": "alpine", + "name": "my-container" + } + }, + "time": 1461943101, + "timeNano": 1461943101607533796 + } + { + "status": "resize", + "id": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "from": "alpine", + "Type": "container", + "Action": "resize", + "Actor": { + "ID": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "Attributes": { + "com.example.some-label": "some-label-value", + "height": "46", + "image": "alpine", + "name": "my-container", + "width": "204" + } + }, + "time": 1461943101, + "timeNano": 1461943101610269268 + } + { + "status": "die", + "id": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "from": "alpine", + "Type": "container", + "Action": "die", + "Actor": { + "ID": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "Attributes": { + "com.example.some-label": "some-label-value", + "exitCode": "0", + "image": "alpine", + "name": "my-container" + } + }, + "time": 1461943105, + "timeNano": 1461943105079144137 + } + { + "Type": "network", + "Action": "disconnect", + "Actor": { + "ID": "7dc8ac97d5d29ef6c31b6052f3938c1e8f2749abbd17d1bd1febf2608db1b474", + "Attributes": { + "container": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "name": "bridge", + "type": "bridge" + } + }, + "time": 1461943105, + "timeNano": 1461943105230860245 + } + { + "status": "destroy", + "id": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "from": "alpine", + "Type": "container", + "Action": "destroy", + "Actor": { + "ID": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "Attributes": { + "com.example.some-label": "some-label-value", + "image": "alpine", + "name": "my-container" + } + }, + "time": 1461943105, + "timeNano": 1461943105338056026 + } + +**Query parameters**: + +- **since** – Timestamp. Show all events created since timestamp and then stream +- **until** – Timestamp. Show events created until given timestamp and stop streaming +- **filters** – A json encoded value of the filters (a map[string][]string) to process on the event list. Available filters: + - `container=`; -- container to filter + - `event=`; -- event to filter + - `image=`; -- image to filter + - `label=`; -- image and container label to filter + - `type=`; -- either `container` or `image` or `volume` or `network` + - `volume=`; -- volume to filter + - `network=`; -- network to filter + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Get a tarball containing all images in a repository + +`GET /images/(name)/get` + +Get a tarball containing all images and metadata for the repository specified +by `name`. + +If `name` is a specific name and tag (e.g. ubuntu:latest), then only that image +(and its parents) are returned. If `name` is an image ID, similarly only that +image (and its parents) are returned, but with the exclusion of the +'repositories' file in the tarball, as there were no image names referenced. + +See the [image tarball format](#image-tarball-format) for more details. + +**Example request** + + GET /v1.22/images/ubuntu/get + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + + Binary data stream + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Get a tarball containing all images + +`GET /images/get` + +Get a tarball containing all images and metadata for one or more repositories. + +For each value of the `names` parameter: if it is a specific name and tag (e.g. +`ubuntu:latest`), then only that image (and its parents) are returned; if it is +an image ID, similarly only that image (and its parents) are returned and there +would be no names referenced in the 'repositories' file for this image ID. + +See the [image tarball format](#image-tarball-format) for more details. + +**Example request** + + GET /v1.22/images/get?names=myname%2Fmyapp%3Alatest&names=busybox + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + + Binary data stream + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Load a tarball with a set of images and tags into docker + +`POST /images/load` + +Load a set of images and tags into a Docker repository. +See the [image tarball format](#image-tarball-format) for more details. + +**Example request** + + POST /v1.22/images/load + Content-Type: application/x-tar + + Tarball in body + +**Example response**: + + HTTP/1.1 200 OK + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Image tarball format + +An image tarball contains one directory per image layer (named using its long ID), +each containing these files: + +- `VERSION`: currently `1.0` - the file format version +- `json`: detailed layer information, similar to `docker inspect layer_id` +- `layer.tar`: A tarfile containing the filesystem changes in this layer + +The `layer.tar` file contains `aufs` style `.wh..wh.aufs` files and directories +for storing attribute changes and deletions. + +If the tarball defines a repository, the tarball should also include a `repositories` file at +the root that contains a list of repository and tag names mapped to layer IDs. + +``` +{"hello-world": + {"latest": "565a9d68a73f6706862bfe8409a7f659776d4d60a8d096eb4a3cbce6999cc2a1"} +} +``` + +#### Exec Create + +`POST /containers/(id or name)/exec` + +Sets up an exec instance in a running container `id` + +**Example request**: + + POST /v1.22/containers/e90e34656806/exec HTTP/1.1 + Content-Type: application/json + + { + "AttachStdin": true, + "AttachStdout": true, + "AttachStderr": true, + "Cmd": ["sh"], + "DetachKeys": "ctrl-p,ctrl-q", + "Privileged": true, + "Tty": true, + "User": "123:456" + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + { + "Id": "f90e34656806", + "Warnings":[] + } + +**JSON parameters**: + +- **AttachStdin** - Boolean value, attaches to `stdin` of the `exec` command. +- **AttachStdout** - Boolean value, attaches to `stdout` of the `exec` command. +- **AttachStderr** - Boolean value, attaches to `stderr` of the `exec` command. +- **DetachKeys** – Override the key sequence for detaching a + container. Format is a single character `[a-Z]` or `ctrl-` + where `` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`. +- **Tty** - Boolean value to allocate a pseudo-TTY. +- **Cmd** - Command to run specified as a string or an array of strings. +- **Privileged** - Boolean value, runs the exec process with extended privileges. +- **User** - A string value specifying the user, and optionally, group to run + the exec process inside the container. Format is one of: `"user"`, + `"user:group"`, `"uid"`, or `"uid:gid"`. + +**Status codes**: + +- **201** – no error +- **404** – no such container +- **409** - container is paused +- **500** - server error + +#### Exec Start + +`POST /exec/(id)/start` + +Starts a previously set up `exec` instance `id`. If `detach` is true, this API +returns after starting the `exec` command. Otherwise, this API sets up an +interactive session with the `exec` command. + +**Example request**: + + POST /v1.22/exec/e90e34656806/start HTTP/1.1 + Content-Type: application/json + + { + "Detach": false, + "Tty": false + } + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/vnd.docker.raw-stream + + {% raw %} + {{ STREAM }} + {% endraw %} + +**JSON parameters**: + +- **Detach** - Detach from the `exec` command. +- **Tty** - Boolean value to allocate a pseudo-TTY. + +**Status codes**: + +- **200** – no error +- **404** – no such exec instance +- **409** - container is paused + +**Stream details**: + +Similar to the stream behavior of `POST /containers/(id or name)/attach` API + +#### Exec Resize + +`POST /exec/(id)/resize` + +Resizes the `tty` session used by the `exec` command `id`. The unit is number of characters. +This API is valid only if `tty` was specified as part of creating and starting the `exec` command. + +**Example request**: + + POST /v1.22/exec/e90e34656806/resize?h=40&w=80 HTTP/1.1 + Content-Type: text/plain + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: text/plain + +**Query parameters**: + +- **h** – height of `tty` session +- **w** – width + +**Status codes**: + +- **201** – no error +- **404** – no such exec instance + +#### Exec Inspect + +`GET /exec/(id)/json` + +Return low-level information about the `exec` command `id`. + +**Example request**: + + GET /v1.22/exec/11fb006128e8ceb3942e7c58d77750f24210e35f879dd204ac975c184b820b39/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "CanRemove": false, + "ContainerID": "b53ee82b53a40c7dca428523e34f741f3abc51d9f297a14ff874bf761b995126", + "DetachKeys": "", + "ExitCode": 2, + "ID": "f33bbfb39f5b142420f4759b2348913bd4a8d1a6d7fd56499cb41a1bb91d7b3b", + "OpenStderr": true, + "OpenStdin": true, + "OpenStdout": true, + "ProcessConfig": { + "arguments": [ + "-c", + "exit 2" + ], + "entrypoint": "sh", + "privileged": false, + "tty": true, + "user": "1000" + }, + "Running": false + } + +**Status codes**: + +- **200** – no error +- **404** – no such exec instance +- **500** - server error + +### 2.4 Volumes + +#### List volumes + +`GET /volumes` + +**Example request**: + + GET /v1.22/volumes HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Volumes": [ + { + "Name": "tardis", + "Driver": "local", + "Mountpoint": "/var/lib/docker/volumes/tardis" + } + ], + "Warnings": [] + } + +**Query parameters**: + +- **filters** - JSON encoded value of the filters (a `map[string][]string`) to process on the volumes list. There is one available filter: `dangling=true` + +**Status codes**: + +- **200** - no error +- **500** - server error + +#### Create a volume + +`POST /volumes/create` + +Create a volume + +**Example request**: + + POST /v1.22/volumes/create HTTP/1.1 + Content-Type: application/json + + { + "Name": "tardis" + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + { + "Name": "tardis", + "Driver": "local", + "Mountpoint": "/var/lib/docker/volumes/tardis" + } + +**Status codes**: + +- **201** - no error +- **500** - server error + +**JSON parameters**: + +- **Name** - The new volume's name. If not specified, Docker generates a name. +- **Driver** - Name of the volume driver to use. Defaults to `local` for the name. +- **DriverOpts** - A mapping of driver options and values. These options are + passed directly to the driver and are driver specific. + +#### Inspect a volume + +`GET /volumes/(name)` + +Return low-level information on the volume `name` + +**Example request**: + + GET /volumes/tardis + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Name": "tardis", + "Driver": "local", + "Mountpoint": "/var/lib/docker/volumes/tardis" + } + +**Status codes**: + +- **200** - no error +- **404** - no such volume +- **500** - server error + +#### Remove a volume + +`DELETE /volumes/(name)` + +Instruct the driver to remove the volume (`name`). + +**Example request**: + + DELETE /v1.22/volumes/tardis HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Status codes**: + +- **204** - no error +- **404** - no such volume or volume driver +- **409** - volume is in use and cannot be removed +- **500** - server error + +### 2.5 Networks + +#### List networks + +`GET /networks` + +**Example request**: + + GET /v1.22/networks?filters={"type":{"custom":true}} HTTP/1.1 + +**Example response**: + +``` +HTTP/1.1 200 OK +Content-Type: application/json + +[ + { + "Name": "bridge", + "Id": "f2de39df4171b0dc801e8002d1d999b77256983dfc63041c0f34030aa3977566", + "Scope": "local", + "Driver": "bridge", + "IPAM": { + "Driver": "default", + "Config": [ + { + "Subnet": "172.17.0.0/16" + } + ] + }, + "Containers": { + "39b69226f9d79f5634485fb236a23b2fe4e96a0a94128390a7fbbcc167065867": { + "EndpointID": "ed2419a97c1d9954d05b46e462e7002ea552f216e9b136b80a7db8d98b442eda", + "MacAddress": "02:42:ac:11:00:02", + "IPv4Address": "172.17.0.2/16", + "IPv6Address": "" + } + }, + "Options": { + "com.docker.network.bridge.default_bridge": "true", + "com.docker.network.bridge.enable_icc": "true", + "com.docker.network.bridge.enable_ip_masquerade": "true", + "com.docker.network.bridge.host_binding_ipv4": "0.0.0.0", + "com.docker.network.bridge.name": "docker0", + "com.docker.network.driver.mtu": "1500" + } + }, + { + "Name": "none", + "Id": "e086a3893b05ab69242d3c44e49483a3bbbd3a26b46baa8f61ab797c1088d794", + "Scope": "local", + "Driver": "null", + "IPAM": { + "Driver": "default", + "Config": [] + }, + "Containers": {}, + "Options": {} + }, + { + "Name": "host", + "Id": "13e871235c677f196c4e1ecebb9dc733b9b2d2ab589e30c539efeda84a24215e", + "Scope": "local", + "Driver": "host", + "IPAM": { + "Driver": "default", + "Config": [] + }, + "Containers": {}, + "Options": {} + } +] +``` + +**Query parameters**: + +- **filters** - JSON encoded network list filter. The filter value is one of: + - `id=` Matches all or part of a network id. + - `name=` Matches all or part of a network name. + - `type=["custom"|"builtin"]` Filters networks by type. The `custom` keyword returns all user-defined networks. + +**Status codes**: + +- **200** - no error +- **500** - server error + +#### Inspect network + +`GET /networks/(id or name)` + +Return low-level information on the network `id` + +**Example request**: + + GET /v1.22/networks/7d86d31b1478e7cca9ebed7e73aa0fdeec46c5ca29497431d3007d2d9e15ed99 HTTP/1.1 + +**Example response**: + +``` +HTTP/1.1 200 OK +Content-Type: application/json + +{ + "Name": "net01", + "Id": "7d86d31b1478e7cca9ebed7e73aa0fdeec46c5ca29497431d3007d2d9e15ed99", + "Scope": "local", + "Driver": "bridge", + "IPAM": { + "Driver": "default", + "Config": [ + { + "Subnet": "172.19.0.0/16", + "Gateway": "172.19.0.1/16" + } + ], + "Options": { + "foo": "bar" + } + }, + "Containers": { + "19a4d5d687db25203351ed79d478946f861258f018fe384f229f2efa4b23513c": { + "Name": "test", + "EndpointID": "628cadb8bcb92de107b2a1e516cbffe463e321f548feb37697cce00ad694f21a", + "MacAddress": "02:42:ac:13:00:02", + "IPv4Address": "172.19.0.2/16", + "IPv6Address": "" + } + }, + "Options": { + "com.docker.network.bridge.default_bridge": "true", + "com.docker.network.bridge.enable_icc": "true", + "com.docker.network.bridge.enable_ip_masquerade": "true", + "com.docker.network.bridge.host_binding_ipv4": "0.0.0.0", + "com.docker.network.bridge.name": "docker0", + "com.docker.network.driver.mtu": "1500" + } +} +``` + +**Status codes**: + +- **200** - no error +- **404** - network not found +- **500** - server error + +#### Create a network + +`POST /networks/create` + +Create a network + +**Example request**: + +``` +POST /v1.22/networks/create HTTP/1.1 +Content-Type: application/json + +{ + "Name":"isolated_nw", + "CheckDuplicate":true, + "Driver":"bridge", + "IPAM":{ + "Driver": "default", + "Config":[ + { + "Subnet":"172.20.0.0/16", + "IPRange":"172.20.10.0/24", + "Gateway":"172.20.10.11" + }, + { + "Subnet":"2001:db8:abcd::/64", + "Gateway":"2001:db8:abcd::1011" + } + ], + "Options": { + "foo": "bar" + } + }, + "Internal":true +} +``` + +**Example response**: + +``` +HTTP/1.1 201 Created +Content-Type: application/json + +{ + "Id": "22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30", + "Warning": "" +} +``` + +**Status codes**: + +- **201** - no error +- **404** - plugin not found +- **500** - server error + +**JSON parameters**: + +- **Name** - The new network's name. this is a mandatory field +- **CheckDuplicate** - Requests daemon to check for networks with same name. Defaults to `false`. + Since Network is primarily keyed based on a random ID and not on the name, + and network name is strictly a user-friendly alias to the network + which is uniquely identified using ID, there is no guaranteed way to check for duplicates. + This parameter CheckDuplicate is there to provide a best effort checking of any networks + which has the same name but it is not guaranteed to catch all name collisions. +- **Driver** - Name of the network driver plugin to use. Defaults to `bridge` driver +- **IPAM** - Optional custom IP scheme for the network + - **Driver** - Name of the IPAM driver to use. Defaults to `default` driver + - **Config** - List of IPAM configuration options, specified as a map: + `{"Subnet": , "IPRange": , "Gateway": , "AuxAddress": }` + - **Options** - Driver-specific options, specified as a map: `{"option":"value" [,"option2":"value2"]}` +- **Options** - Network specific options to be used by the drivers + +#### Connect a container to a network + +`POST /networks/(id or name)/connect` + +Connect a container to a network + +**Example request**: + +``` +POST /v1.22/networks/22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30/connect HTTP/1.1 +Content-Type: application/json + +{ + "Container":"3613f73ba0e4", + "EndpointConfig": { + "IPAMConfig": { + "IPv4Address":"172.24.56.89", + "IPv6Address":"2001:db8::5689" + } + } +} +``` + +**Example response**: + + HTTP/1.1 200 OK + +**Status codes**: + +- **200** - no error +- **404** - network or container is not found +- **500** - Internal Server Error + +**JSON parameters**: + +- **container** - container-id/name to be connected to the network + +#### Disconnect a container from a network + +`POST /networks/(id or name)/disconnect` + +Disconnect a container from a network + +**Example request**: + +``` +POST /v1.22/networks/22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30/disconnect HTTP/1.1 +Content-Type: application/json + +{ + "Container":"3613f73ba0e4", + "Force":false +} +``` + +**Example response**: + + HTTP/1.1 200 OK + +**Status codes**: + +- **200** - no error +- **404** - network or container not found +- **500** - Internal Server Error + +**JSON parameters**: + +- **Container** - container-id/name to be disconnected from a network +- **Force** - Force the container to disconnect from a network + +#### Remove a network + +`DELETE /networks/(id or name)` + +Instruct the driver to remove the network (`id`). + +**Example request**: + + DELETE /v1.22/networks/22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + +**Status codes**: + +- **200** - no error +- **403** - operation not supported for pre-defined networks +- **404** - no such network +- **500** - server error + +## 3. Going further + +### 3.1 Inside `docker run` + +As an example, the `docker run` command line makes the following API calls: + +- Create the container + +- If the status code is 404, it means the image doesn't exist: + - Try to pull it. + - Then, retry to create the container. + +- Start the container. + +- If you are not in detached mode: +- Attach to the container, using `logs=1` (to have `stdout` and + `stderr` from the container's start) and `stream=1` + +- If in detached mode or only `stdin` is attached, display the container's id. + +### 3.2 Hijacking + +In this version of the API, `/attach`, uses hijacking to transport `stdin`, +`stdout`, and `stderr` on the same socket. + +To hint potential proxies about connection hijacking, Docker client sends +connection upgrade headers similarly to websocket. + + Upgrade: tcp + Connection: Upgrade + +When Docker daemon detects the `Upgrade` header, it switches its status code +from **200 OK** to **101 UPGRADED** and resends the same headers. + + +### 3.3 CORS Requests + +To set cross origin requests to the Engine API please give values to +`--api-cors-header` when running Docker in daemon mode. Set * (asterisk) allows all, +default or blank means CORS disabled + + $ dockerd -H="192.168.1.9:2375" --api-cors-header="http://foo.bar" diff --git a/vendor/github.com/moby/moby/docs/api/v1.23.md b/vendor/github.com/moby/moby/docs/api/v1.23.md new file mode 100644 index 000000000..508a721c7 --- /dev/null +++ b/vendor/github.com/moby/moby/docs/api/v1.23.md @@ -0,0 +1,3436 @@ +--- +title: "Engine API v1.23" +description: "API Documentation for Docker" +keywords: "API, Docker, rcli, REST, documentation" +redirect_from: +- /engine/reference/api/docker_remote_api_v1.23/ +- /reference/api/docker_remote_api_v1.23/ +--- + + + +## 1. Brief introduction + + - The daemon listens on `unix:///var/run/docker.sock` but you can + [Bind Docker to another host/port or a Unix socket](../reference/commandline/dockerd.md#bind-docker-to-another-host-port-or-a-unix-socket). + - The API tends to be REST. However, for some complex commands, like `attach` + or `pull`, the HTTP connection is hijacked to transport `stdout`, + `stdin` and `stderr`. + - When the client API version is newer than the daemon's, these calls return an HTTP + `400 Bad Request` error message. + +## 2. Endpoints + +### 2.1 Containers + +#### List containers + +`GET /containers/json` + +List containers + +**Example request**: + + GET /v1.23/containers/json?all=1&before=8dfafdbc3a40&size=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Id": "8dfafdbc3a40", + "Names":["/boring_feynman"], + "Image": "ubuntu:latest", + "ImageID": "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82", + "Command": "echo 1", + "Created": 1367854155, + "State": "exited", + "Status": "Exit 0", + "Ports": [{"PrivatePort": 2222, "PublicPort": 3333, "Type": "tcp"}], + "Labels": { + "com.example.vendor": "Acme", + "com.example.license": "GPL", + "com.example.version": "1.0" + }, + "SizeRw": 12288, + "SizeRootFs": 0, + "HostConfig": { + "NetworkMode": "default" + }, + "NetworkSettings": { + "Networks": { + "bridge": { + "IPAMConfig": null, + "Links": null, + "Aliases": null, + "NetworkID": "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812", + "EndpointID": "2cdc4edb1ded3631c81f57966563e5c8525b81121bb3706a9a9a3ae102711f3f", + "Gateway": "172.17.0.1", + "IPAddress": "172.17.0.2", + "IPPrefixLen": 16, + "IPv6Gateway": "", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "MacAddress": "02:42:ac:11:00:02" + } + } + }, + "Mounts": [ + { + "Name": "fac362...80535", + "Source": "/data", + "Destination": "/data", + "Driver": "local", + "Mode": "ro,Z", + "RW": false, + "Propagation": "" + } + ] + }, + { + "Id": "9cd87474be90", + "Names":["/coolName"], + "Image": "ubuntu:latest", + "ImageID": "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82", + "Command": "echo 222222", + "Created": 1367854155, + "State": "exited", + "Status": "Exit 0", + "Ports": [], + "Labels": {}, + "SizeRw": 12288, + "SizeRootFs": 0, + "HostConfig": { + "NetworkMode": "default" + }, + "NetworkSettings": { + "Networks": { + "bridge": { + "IPAMConfig": null, + "Links": null, + "Aliases": null, + "NetworkID": "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812", + "EndpointID": "88eaed7b37b38c2a3f0c4bc796494fdf51b270c2d22656412a2ca5d559a64d7a", + "Gateway": "172.17.0.1", + "IPAddress": "172.17.0.8", + "IPPrefixLen": 16, + "IPv6Gateway": "", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "MacAddress": "02:42:ac:11:00:08" + } + } + }, + "Mounts": [] + }, + { + "Id": "3176a2479c92", + "Names":["/sleepy_dog"], + "Image": "ubuntu:latest", + "ImageID": "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82", + "Command": "echo 3333333333333333", + "Created": 1367854154, + "State": "exited", + "Status": "Exit 0", + "Ports":[], + "Labels": {}, + "SizeRw":12288, + "SizeRootFs":0, + "HostConfig": { + "NetworkMode": "default" + }, + "NetworkSettings": { + "Networks": { + "bridge": { + "IPAMConfig": null, + "Links": null, + "Aliases": null, + "NetworkID": "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812", + "EndpointID": "8b27c041c30326d59cd6e6f510d4f8d1d570a228466f956edf7815508f78e30d", + "Gateway": "172.17.0.1", + "IPAddress": "172.17.0.6", + "IPPrefixLen": 16, + "IPv6Gateway": "", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "MacAddress": "02:42:ac:11:00:06" + } + } + }, + "Mounts": [] + }, + { + "Id": "4cb07b47f9fb", + "Names":["/running_cat"], + "Image": "ubuntu:latest", + "ImageID": "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82", + "Command": "echo 444444444444444444444444444444444", + "Created": 1367854152, + "State": "exited", + "Status": "Exit 0", + "Ports": [], + "Labels": {}, + "SizeRw": 12288, + "SizeRootFs": 0, + "HostConfig": { + "NetworkMode": "default" + }, + "NetworkSettings": { + "Networks": { + "bridge": { + "IPAMConfig": null, + "Links": null, + "Aliases": null, + "NetworkID": "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812", + "EndpointID": "d91c7b2f0644403d7ef3095985ea0e2370325cd2332ff3a3225c4247328e66e9", + "Gateway": "172.17.0.1", + "IPAddress": "172.17.0.5", + "IPPrefixLen": 16, + "IPv6Gateway": "", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "MacAddress": "02:42:ac:11:00:05" + } + } + }, + "Mounts": [] + } + ] + +**Query parameters**: + +- **all** – 1/True/true or 0/False/false, Show all containers. + Only running containers are shown by default (i.e., this defaults to false) +- **limit** – Show `limit` last created + containers, include non-running ones. +- **since** – Show only containers created since Id, include + non-running ones. +- **before** – Show only containers created before Id, include + non-running ones. +- **size** – 1/True/true or 0/False/false, Show the containers + sizes +- **filters** - a JSON encoded value of the filters (a `map[string][]string`) to process on the containers list. Available filters: + - `exited=`; -- containers with exit code of `` ; + - `status=`(`created`|`restarting`|`running`|`paused`|`exited`|`dead`) + - `label=key` or `label="key=value"` of a container label + - `isolation=`(`default`|`process`|`hyperv`) (Windows daemon only) + - `ancestor`=(`[:]`, `` or ``) + - `before`=(`` or ``) + - `since`=(`` or ``) + - `volume`=(`` or ``) + +**Status codes**: + +- **200** – no error +- **400** – bad parameter +- **500** – server error + +#### Create a container + +`POST /containers/create` + +Create a container + +**Example request**: + + POST /v1.23/containers/create HTTP/1.1 + Content-Type: application/json + + { + "Hostname": "", + "Domainname": "", + "User": "", + "AttachStdin": false, + "AttachStdout": true, + "AttachStderr": true, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": [ + "FOO=bar", + "BAZ=quux" + ], + "Cmd": [ + "date" + ], + "Entrypoint": "", + "Image": "ubuntu", + "Labels": { + "com.example.vendor": "Acme", + "com.example.license": "GPL", + "com.example.version": "1.0" + }, + "Volumes": { + "/volumes/data": {} + }, + "WorkingDir": "", + "NetworkDisabled": false, + "MacAddress": "12:34:56:78:9a:bc", + "ExposedPorts": { + "22/tcp": {} + }, + "StopSignal": "SIGTERM", + "HostConfig": { + "Binds": ["/tmp:/tmp"], + "Tmpfs": { "/run": "rw,noexec,nosuid,size=65536k" }, + "Links": ["redis3:redis"], + "Memory": 0, + "MemorySwap": 0, + "MemoryReservation": 0, + "KernelMemory": 0, + "CpuShares": 512, + "CpuPeriod": 100000, + "CpuQuota": 50000, + "CpusetCpus": "0,1", + "CpusetMems": "0,1", + "BlkioWeight": 300, + "BlkioWeightDevice": [{}], + "BlkioDeviceReadBps": [{}], + "BlkioDeviceReadIOps": [{}], + "BlkioDeviceWriteBps": [{}], + "BlkioDeviceWriteIOps": [{}], + "MemorySwappiness": 60, + "OomKillDisable": false, + "OomScoreAdj": 500, + "PidMode": "", + "PidsLimit": -1, + "PortBindings": { "22/tcp": [{ "HostPort": "11022" }] }, + "PublishAllPorts": false, + "Privileged": false, + "ReadonlyRootfs": false, + "Dns": ["8.8.8.8"], + "DnsOptions": [""], + "DnsSearch": [""], + "ExtraHosts": null, + "VolumesFrom": ["parent", "other:ro"], + "CapAdd": ["NET_ADMIN"], + "CapDrop": ["MKNOD"], + "GroupAdd": ["newgroup"], + "RestartPolicy": { "Name": "", "MaximumRetryCount": 0 }, + "NetworkMode": "bridge", + "Devices": [], + "Ulimits": [{}], + "LogConfig": { "Type": "json-file", "Config": {} }, + "SecurityOpt": [], + "CgroupParent": "", + "VolumeDriver": "", + "ShmSize": 67108864 + }, + "NetworkingConfig": { + "EndpointsConfig": { + "isolated_nw" : { + "IPAMConfig": { + "IPv4Address":"172.20.30.33", + "IPv6Address":"2001:db8:abcd::3033" + }, + "Links":["container_1", "container_2"], + "Aliases":["server_x", "server_y"] + } + } + } + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + { + "Id":"e90e34656806", + "Warnings":[] + } + +**JSON parameters**: + +- **Hostname** - A string value containing the hostname to use for the + container. +- **Domainname** - A string value containing the domain name to use + for the container. +- **User** - A string value specifying the user inside the container. +- **AttachStdin** - Boolean value, attaches to `stdin`. +- **AttachStdout** - Boolean value, attaches to `stdout`. +- **AttachStderr** - Boolean value, attaches to `stderr`. +- **Tty** - Boolean value, Attach standard streams to a `tty`, including `stdin` if it is not closed. +- **OpenStdin** - Boolean value, opens `stdin`, +- **StdinOnce** - Boolean value, close `stdin` after the 1 attached client disconnects. +- **Env** - A list of environment variables in the form of `["VAR=value", ...]` +- **Labels** - Adds a map of labels to a container. To specify a map: `{"key":"value", ... }` +- **Cmd** - Command to run specified as a string or an array of strings. +- **Entrypoint** - Set the entry point for the container as a string or an array + of strings. +- **Image** - A string specifying the image name to use for the container. +- **Volumes** - An object mapping mount point paths (strings) inside the + container to empty objects. +- **WorkingDir** - A string specifying the working directory for commands to + run in. +- **NetworkDisabled** - Boolean value, when true disables networking for the + container +- **ExposedPorts** - An object mapping ports to an empty object in the form of: + `"ExposedPorts": { "/: {}" }` +- **StopSignal** - Signal to stop a container as a string or unsigned integer. `SIGTERM` by default. +- **HostConfig** + - **Binds** – A list of volume bindings for this container. Each volume binding is a string in one of these forms: + + `host-src:container-dest` to bind-mount a host path into the + container. Both `host-src`, and `container-dest` must be an + _absolute_ path. + + `host-src:container-dest:ro` to make the bind-mount read-only + inside the container. Both `host-src`, and `container-dest` must be + an _absolute_ path. + + `volume-name:container-dest` to bind-mount a volume managed by a + volume driver into the container. `container-dest` must be an + _absolute_ path. + + `volume-name:container-dest:ro` to mount the volume read-only + inside the container. `container-dest` must be an _absolute_ path. + - **Tmpfs** – A map of container directories which should be replaced by tmpfs mounts, and their corresponding + mount options. A JSON object in the form `{ "/run": "rw,noexec,nosuid,size=65536k" }`. + - **Links** - A list of links for the container. Each link entry should be + in the form of `container_name:alias`. + - **Memory** - Memory limit in bytes. + - **MemorySwap** - Total memory limit (memory + swap); set `-1` to enable unlimited swap. + You must use this with `memory` and make the swap value larger than `memory`. + - **MemoryReservation** - Memory soft limit in bytes. + - **KernelMemory** - Kernel memory limit in bytes. + - **CpuShares** - An integer value containing the container's CPU Shares + (ie. the relative weight vs other containers). + - **CpuPeriod** - The length of a CPU period in microseconds. + - **CpuQuota** - Microseconds of CPU time that the container can get in a CPU period. + - **CpusetCpus** - String value containing the `cgroups CpusetCpus` to use. + - **CpusetMems** - Memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only effective on NUMA systems. + - **BlkioWeight** - Block IO weight (relative weight) accepts a weight value between 10 and 1000. + - **BlkioWeightDevice** - Block IO weight (relative device weight) in the form of: `"BlkioWeightDevice": [{"Path": "device_path", "Weight": weight}]` + - **BlkioDeviceReadBps** - Limit read rate (bytes per second) from a device in the form of: `"BlkioDeviceReadBps": [{"Path": "device_path", "Rate": rate}]`, for example: + `"BlkioDeviceReadBps": [{"Path": "/dev/sda", "Rate": "1024"}]"` + - **BlkioDeviceWriteBps** - Limit write rate (bytes per second) to a device in the form of: `"BlkioDeviceWriteBps": [{"Path": "device_path", "Rate": rate}]`, for example: + `"BlkioDeviceWriteBps": [{"Path": "/dev/sda", "Rate": "1024"}]"` + - **BlkioDeviceReadIOps** - Limit read rate (IO per second) from a device in the form of: `"BlkioDeviceReadIOps": [{"Path": "device_path", "Rate": rate}]`, for example: + `"BlkioDeviceReadIOps": [{"Path": "/dev/sda", "Rate": "1000"}]` + - **BlkioDeviceWriteIOps** - Limit write rate (IO per second) to a device in the form of: `"BlkioDeviceWriteIOps": [{"Path": "device_path", "Rate": rate}]`, for example: + `"BlkioDeviceWriteIOps": [{"Path": "/dev/sda", "Rate": "1000"}]` + - **MemorySwappiness** - Tune a container's memory swappiness behavior. Accepts an integer between 0 and 100. + - **OomKillDisable** - Boolean value, whether to disable OOM Killer for the container or not. + - **OomScoreAdj** - An integer value containing the score given to the container in order to tune OOM killer preferences. + - **PidMode** - Set the PID (Process) Namespace mode for the container; + `"container:"`: joins another container's PID namespace + `"host"`: use the host's PID namespace inside the container + - **PidsLimit** - Tune a container's pids limit. Set -1 for unlimited. + - **PortBindings** - A map of exposed container ports and the host port they + should map to. A JSON object in the form + `{ /: [{ "HostPort": "" }] }` + Take note that `port` is specified as a string and not an integer value. + - **PublishAllPorts** - Allocates a random host port for all of a container's + exposed ports. Specified as a boolean value. + - **Privileged** - Gives the container full access to the host. Specified as + a boolean value. + - **ReadonlyRootfs** - Mount the container's root filesystem as read only. + Specified as a boolean value. + - **Dns** - A list of DNS servers for the container to use. + - **DnsOptions** - A list of DNS options + - **DnsSearch** - A list of DNS search domains + - **ExtraHosts** - A list of hostnames/IP mappings to add to the + container's `/etc/hosts` file. Specified in the form `["hostname:IP"]`. + - **VolumesFrom** - A list of volumes to inherit from another container. + Specified in the form `[:]` + - **CapAdd** - A list of kernel capabilities to add to the container. + - **Capdrop** - A list of kernel capabilities to drop from the container. + - **GroupAdd** - A list of additional groups that the container process will run as + - **RestartPolicy** – The behavior to apply when the container exits. The + value is an object with a `Name` property of either `"always"` to + always restart, `"unless-stopped"` to restart always except when + user has manually stopped the container or `"on-failure"` to restart only when the container + exit code is non-zero. If `on-failure` is used, `MaximumRetryCount` + controls the number of times to retry before giving up. + The default is not to restart. (optional) + An ever increasing delay (double the previous delay, starting at 100mS) + is added before each restart to prevent flooding the server. + - **UsernsMode** - Sets the usernamespace mode for the container when usernamespace remapping option is enabled. + supported values are: `host`. + - **NetworkMode** - Sets the networking mode for the container. Supported + standard values are: `bridge`, `host`, `none`, and `container:`. Any other value is taken + as a custom network's name to which this container should connect to. + - **Devices** - A list of devices to add to the container specified as a JSON object in the + form + `{ "PathOnHost": "/dev/deviceName", "PathInContainer": "/dev/deviceName", "CgroupPermissions": "mrw"}` + - **Ulimits** - A list of ulimits to set in the container, specified as + `{ "Name": , "Soft": , "Hard": }`, for example: + `Ulimits: { "Name": "nofile", "Soft": 1024, "Hard": 2048 }` + - **SecurityOpt**: A list of string values to customize labels for MLS + systems, such as SELinux. + - **LogConfig** - Log configuration for the container, specified as a JSON object in the form + `{ "Type": "", "Config": {"key1": "val1"}}`. + Available types: `json-file`, `syslog`, `journald`, `gelf`, `fluentd`, `awslogs`, `splunk`, `etwlogs`, `none`. + `json-file` logging driver. + - **CgroupParent** - Path to `cgroups` under which the container's `cgroup` is created. If the path is not absolute, the path is considered to be relative to the `cgroups` path of the init process. Cgroups are created if they do not already exist. + - **VolumeDriver** - Driver that this container users to mount volumes. + - **ShmSize** - Size of `/dev/shm` in bytes. The size must be greater than 0. If omitted the system uses 64MB. + +**Query parameters**: + +- **name** – Assign the specified name to the container. Must + match `/?[a-zA-Z0-9_-]+`. + +**Status codes**: + +- **201** – no error +- **400** – bad parameter +- **404** – no such container +- **406** – impossible to attach (container not running) +- **409** – conflict +- **500** – server error + +#### Inspect a container + +`GET /containers/(id or name)/json` + +Return low-level information on the container `id` + +**Example request**: + + GET /v1.23/containers/4fa6e0f0c678/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "AppArmorProfile": "", + "Args": [ + "-c", + "exit 9" + ], + "Config": { + "AttachStderr": true, + "AttachStdin": false, + "AttachStdout": true, + "Cmd": [ + "/bin/sh", + "-c", + "exit 9" + ], + "Domainname": "", + "Entrypoint": null, + "Env": [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + ], + "ExposedPorts": null, + "Hostname": "ba033ac44011", + "Image": "ubuntu", + "Labels": { + "com.example.vendor": "Acme", + "com.example.license": "GPL", + "com.example.version": "1.0" + }, + "MacAddress": "", + "NetworkDisabled": false, + "OnBuild": null, + "OpenStdin": false, + "StdinOnce": false, + "Tty": false, + "User": "", + "Volumes": { + "/volumes/data": {} + }, + "WorkingDir": "", + "StopSignal": "SIGTERM" + }, + "Created": "2015-01-06T15:47:31.485331387Z", + "Driver": "devicemapper", + "ExecIDs": null, + "HostConfig": { + "Binds": null, + "BlkioWeight": 0, + "BlkioWeightDevice": [{}], + "BlkioDeviceReadBps": [{}], + "BlkioDeviceWriteBps": [{}], + "BlkioDeviceReadIOps": [{}], + "BlkioDeviceWriteIOps": [{}], + "CapAdd": null, + "CapDrop": null, + "ContainerIDFile": "", + "CpusetCpus": "", + "CpusetMems": "", + "CpuShares": 0, + "CpuPeriod": 100000, + "Devices": [], + "Dns": null, + "DnsOptions": null, + "DnsSearch": null, + "ExtraHosts": null, + "IpcMode": "", + "Links": null, + "LxcConf": [], + "Memory": 0, + "MemorySwap": 0, + "MemoryReservation": 0, + "KernelMemory": 0, + "OomKillDisable": false, + "OomScoreAdj": 500, + "NetworkMode": "bridge", + "PidMode": "", + "PortBindings": {}, + "Privileged": false, + "ReadonlyRootfs": false, + "PublishAllPorts": false, + "RestartPolicy": { + "MaximumRetryCount": 2, + "Name": "on-failure" + }, + "LogConfig": { + "Config": null, + "Type": "json-file" + }, + "SecurityOpt": null, + "VolumesFrom": null, + "Ulimits": [{}], + "VolumeDriver": "", + "ShmSize": 67108864 + }, + "HostnamePath": "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hostname", + "HostsPath": "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hosts", + "LogPath": "/var/lib/docker/containers/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b-json.log", + "Id": "ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39", + "Image": "04c5d3b7b0656168630d3ba35d8889bd0e9caafcaeb3004d2bfbc47e7c5d35d2", + "MountLabel": "", + "Name": "/boring_euclid", + "NetworkSettings": { + "Bridge": "", + "SandboxID": "", + "HairpinMode": false, + "LinkLocalIPv6Address": "", + "LinkLocalIPv6PrefixLen": 0, + "Ports": null, + "SandboxKey": "", + "SecondaryIPAddresses": null, + "SecondaryIPv6Addresses": null, + "EndpointID": "", + "Gateway": "", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "IPAddress": "", + "IPPrefixLen": 0, + "IPv6Gateway": "", + "MacAddress": "", + "Networks": { + "bridge": { + "NetworkID": "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812", + "EndpointID": "7587b82f0dada3656fda26588aee72630c6fab1536d36e394b2bfbcf898c971d", + "Gateway": "172.17.0.1", + "IPAddress": "172.17.0.2", + "IPPrefixLen": 16, + "IPv6Gateway": "", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "MacAddress": "02:42:ac:12:00:02" + } + } + }, + "Path": "/bin/sh", + "ProcessLabel": "", + "ResolvConfPath": "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/resolv.conf", + "RestartCount": 1, + "State": { + "Error": "", + "ExitCode": 9, + "FinishedAt": "2015-01-06T15:47:32.080254511Z", + "OOMKilled": false, + "Dead": false, + "Paused": false, + "Pid": 0, + "Restarting": false, + "Running": true, + "StartedAt": "2015-01-06T15:47:32.072697474Z", + "Status": "running" + }, + "Mounts": [ + { + "Name": "fac362...80535", + "Source": "/data", + "Destination": "/data", + "Driver": "local", + "Mode": "ro,Z", + "RW": false, + "Propagation": "" + } + ] + } + +**Example request, with size information**: + + GET /v1.23/containers/4fa6e0f0c678/json?size=1 HTTP/1.1 + +**Example response, with size information**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + .... + "SizeRw": 0, + "SizeRootFs": 972, + .... + } + +**Query parameters**: + +- **size** – 1/True/true or 0/False/false, return container size information. Default is `false`. + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### List processes running inside a container + +`GET /containers/(id or name)/top` + +List processes running inside the container `id`. On Unix systems this +is done by running the `ps` command. This endpoint is not +supported on Windows. + +**Example request**: + + GET /v1.23/containers/4fa6e0f0c678/top HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Titles" : [ + "UID", "PID", "PPID", "C", "STIME", "TTY", "TIME", "CMD" + ], + "Processes" : [ + [ + "root", "13642", "882", "0", "17:03", "pts/0", "00:00:00", "/bin/bash" + ], + [ + "root", "13735", "13642", "0", "17:06", "pts/0", "00:00:00", "sleep 10" + ] + ] + } + +**Example request**: + + GET /v1.23/containers/4fa6e0f0c678/top?ps_args=aux HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Titles" : [ + "USER","PID","%CPU","%MEM","VSZ","RSS","TTY","STAT","START","TIME","COMMAND" + ] + "Processes" : [ + [ + "root","13642","0.0","0.1","18172","3184","pts/0","Ss","17:03","0:00","/bin/bash" + ], + [ + "root","13895","0.0","0.0","4348","692","pts/0","S+","17:15","0:00","sleep 10" + ] + ], + } + +**Query parameters**: + +- **ps_args** – `ps` arguments to use (e.g., `aux`), defaults to `-ef` + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Get container logs + +`GET /containers/(id or name)/logs` + +Get `stdout` and `stderr` logs from the container ``id`` + +> **Note**: +> This endpoint works only for containers with the `json-file` or `journald` logging drivers. + +**Example request**: + + GET /v1.23/containers/4fa6e0f0c678/logs?stderr=1&stdout=1×tamps=1&follow=1&tail=10&since=1428990821 HTTP/1.1 + +**Example response**: + + HTTP/1.1 101 UPGRADED + Content-Type: application/vnd.docker.raw-stream + Connection: Upgrade + Upgrade: tcp + + {% raw %} + {{ STREAM }} + {% endraw %} + +**Query parameters**: + +- **follow** – 1/True/true or 0/False/false, return stream. Default `false`. +- **stdout** – 1/True/true or 0/False/false, show `stdout` log. Default `false`. +- **stderr** – 1/True/true or 0/False/false, show `stderr` log. Default `false`. +- **since** – UNIX timestamp (integer) to filter logs. Specifying a timestamp + will only output log-entries since that timestamp. Default: 0 (unfiltered) +- **timestamps** – 1/True/true or 0/False/false, print timestamps for + every log line. Default `false`. +- **tail** – Output specified number of lines at the end of logs: `all` or ``. Default all. + +**Status codes**: + +- **101** – no error, hints proxy about hijacking +- **200** – no error, no upgrade header found +- **404** – no such container +- **500** – server error + +#### Inspect changes on a container's filesystem + +`GET /containers/(id or name)/changes` + +Inspect changes on container `id`'s filesystem + +**Example request**: + + GET /v1.23/containers/4fa6e0f0c678/changes HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Path": "/dev", + "Kind": 0 + }, + { + "Path": "/dev/kmsg", + "Kind": 1 + }, + { + "Path": "/test", + "Kind": 1 + } + ] + +Values for `Kind`: + +- `0`: Modify +- `1`: Add +- `2`: Delete + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Export a container + +`GET /containers/(id or name)/export` + +Export the contents of container `id` + +**Example request**: + + GET /v1.23/containers/4fa6e0f0c678/export HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/octet-stream + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Get container stats based on resource usage + +`GET /containers/(id or name)/stats` + +This endpoint returns a live stream of a container's resource usage statistics. + +**Example request**: + + GET /v1.23/containers/redis1/stats HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "read" : "2015-01-08T22:57:31.547920715Z", + "pids_stats": { + "current": 3 + }, + "networks": { + "eth0": { + "rx_bytes": 5338, + "rx_dropped": 0, + "rx_errors": 0, + "rx_packets": 36, + "tx_bytes": 648, + "tx_dropped": 0, + "tx_errors": 0, + "tx_packets": 8 + }, + "eth5": { + "rx_bytes": 4641, + "rx_dropped": 0, + "rx_errors": 0, + "rx_packets": 26, + "tx_bytes": 690, + "tx_dropped": 0, + "tx_errors": 0, + "tx_packets": 9 + } + }, + "memory_stats" : { + "stats" : { + "total_pgmajfault" : 0, + "cache" : 0, + "mapped_file" : 0, + "total_inactive_file" : 0, + "pgpgout" : 414, + "rss" : 6537216, + "total_mapped_file" : 0, + "writeback" : 0, + "unevictable" : 0, + "pgpgin" : 477, + "total_unevictable" : 0, + "pgmajfault" : 0, + "total_rss" : 6537216, + "total_rss_huge" : 6291456, + "total_writeback" : 0, + "total_inactive_anon" : 0, + "rss_huge" : 6291456, + "hierarchical_memory_limit" : 67108864, + "total_pgfault" : 964, + "total_active_file" : 0, + "active_anon" : 6537216, + "total_active_anon" : 6537216, + "total_pgpgout" : 414, + "total_cache" : 0, + "inactive_anon" : 0, + "active_file" : 0, + "pgfault" : 964, + "inactive_file" : 0, + "total_pgpgin" : 477 + }, + "max_usage" : 6651904, + "usage" : 6537216, + "failcnt" : 0, + "limit" : 67108864 + }, + "blkio_stats" : {}, + "cpu_stats" : { + "cpu_usage" : { + "percpu_usage" : [ + 8646879, + 24472255, + 36438778, + 30657443 + ], + "usage_in_usermode" : 50000000, + "total_usage" : 100215355, + "usage_in_kernelmode" : 30000000 + }, + "system_cpu_usage" : 739306590000000, + "throttling_data" : {"periods":0,"throttled_periods":0,"throttled_time":0} + }, + "precpu_stats" : { + "cpu_usage" : { + "percpu_usage" : [ + 8646879, + 24350896, + 36438778, + 30657443 + ], + "usage_in_usermode" : 50000000, + "total_usage" : 100093996, + "usage_in_kernelmode" : 30000000 + }, + "system_cpu_usage" : 9492140000000, + "throttling_data" : {"periods":0,"throttled_periods":0,"throttled_time":0} + } + } + +The `precpu_stats` is the cpu statistic of last read, which is used for calculating the cpu usage percent. It is not the exact copy of the `cpu_stats` field. + +**Query parameters**: + +- **stream** – 1/True/true or 0/False/false, pull stats once then disconnect. Default `true`. + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Resize a container TTY + +`POST /containers/(id or name)/resize` + +Resize the TTY for container with `id`. The unit is number of characters. You must restart the container for the resize to take effect. + +**Example request**: + + POST /v1.23/containers/4fa6e0f0c678/resize?h=40&w=80 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Length: 0 + Content-Type: text/plain; charset=utf-8 + +**Query parameters**: + +- **h** – height of `tty` session +- **w** – width + +**Status codes**: + +- **200** – no error +- **404** – No such container +- **500** – Cannot resize container + +#### Start a container + +`POST /containers/(id or name)/start` + +Start the container `id` + +> **Note**: +> For backwards compatibility, this endpoint accepts a `HostConfig` as JSON-encoded request body. +> See [create a container](#create-a-container) for details. + +**Example request**: + + POST /v1.23/containers/e90e34656806/start HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **detachKeys** – Override the key sequence for detaching a + container. Format is a single character `[a-Z]` or `ctrl-` + where `` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`. + +**Status codes**: + +- **204** – no error +- **304** – container already started +- **404** – no such container +- **500** – server error + +#### Stop a container + +`POST /containers/(id or name)/stop` + +Stop the container `id` + +**Example request**: + + POST /v1.23/containers/e90e34656806/stop?t=5 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **t** – number of seconds to wait before killing the container + +**Status codes**: + +- **204** – no error +- **304** – container already stopped +- **404** – no such container +- **500** – server error + +#### Restart a container + +`POST /containers/(id or name)/restart` + +Restart the container `id` + +**Example request**: + + POST /v1.23/containers/e90e34656806/restart?t=5 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **t** – number of seconds to wait before killing the container + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **500** – server error + +#### Kill a container + +`POST /containers/(id or name)/kill` + +Kill the container `id` + +**Example request**: + + POST /v1.23/containers/e90e34656806/kill HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **signal** - Signal to send to the container: integer or string like `SIGINT`. + When not set, `SIGKILL` is assumed and the call waits for the container to exit. + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **500** – server error + +#### Update a container + +`POST /containers/(id or name)/update` + +Update configuration of one or more containers. + +**Example request**: + + POST /v1.23/containers/e90e34656806/update HTTP/1.1 + Content-Type: application/json + + { + "BlkioWeight": 300, + "CpuShares": 512, + "CpuPeriod": 100000, + "CpuQuota": 50000, + "CpusetCpus": "0,1", + "CpusetMems": "0", + "Memory": 314572800, + "MemorySwap": 514288000, + "MemoryReservation": 209715200, + "KernelMemory": 52428800, + "RestartPolicy": { + "MaximumRetryCount": 4, + "Name": "on-failure" + } + } + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Warnings": [] + } + +**Status codes**: + +- **200** – no error +- **400** – bad parameter +- **404** – no such container +- **500** – server error + +#### Rename a container + +`POST /containers/(id or name)/rename` + +Rename the container `id` to a `new_name` + +**Example request**: + + POST /v1.23/containers/e90e34656806/rename?name=new_name HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **name** – new name for the container + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **409** - conflict name already assigned +- **500** – server error + +#### Pause a container + +`POST /containers/(id or name)/pause` + +Pause the container `id` + +**Example request**: + + POST /v1.23/containers/e90e34656806/pause HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **500** – server error + +#### Unpause a container + +`POST /containers/(id or name)/unpause` + +Unpause the container `id` + +**Example request**: + + POST /v1.23/containers/e90e34656806/unpause HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **500** – server error + +#### Attach to a container + +`POST /containers/(id or name)/attach` + +Attach to the container `id` + +**Example request**: + + POST /v1.23/containers/16253994b7c4/attach?logs=1&stream=0&stdout=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 101 UPGRADED + Content-Type: application/vnd.docker.raw-stream + Connection: Upgrade + Upgrade: tcp + + {% raw %} + {{ STREAM }} + {% endraw %} + +**Query parameters**: + +- **detachKeys** – Override the key sequence for detaching a + container. Format is a single character `[a-Z]` or `ctrl-` + where `` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`. +- **logs** – 1/True/true or 0/False/false, return logs. Default `false`. +- **stream** – 1/True/true or 0/False/false, return stream. + Default `false`. +- **stdin** – 1/True/true or 0/False/false, if `stream=true`, attach + to `stdin`. Default `false`. +- **stdout** – 1/True/true or 0/False/false, if `logs=true`, return + `stdout` log, if `stream=true`, attach to `stdout`. Default `false`. +- **stderr** – 1/True/true or 0/False/false, if `logs=true`, return + `stderr` log, if `stream=true`, attach to `stderr`. Default `false`. + +**Status codes**: + +- **101** – no error, hints proxy about hijacking +- **200** – no error, no upgrade header found +- **400** – bad parameter +- **404** – no such container +- **409** - container is paused +- **500** – server error + +**Stream details**: + +When using the TTY setting is enabled in +[`POST /containers/create` +](#create-a-container), +the stream is the raw data from the process PTY and client's `stdin`. +When the TTY is disabled, then the stream is multiplexed to separate +`stdout` and `stderr`. + +The format is a **Header** and a **Payload** (frame). + +**HEADER** + +The header contains the information which the stream writes (`stdout` or +`stderr`). It also contains the size of the associated frame encoded in the +last four bytes (`uint32`). + +It is encoded on the first eight bytes like this: + + header := [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4} + +`STREAM_TYPE` can be: + +- 0: `stdin` (is written on `stdout`) +- 1: `stdout` +- 2: `stderr` + +`SIZE1, SIZE2, SIZE3, SIZE4` are the four bytes of +the `uint32` size encoded as big endian. + +**PAYLOAD** + +The payload is the raw stream. + +**IMPLEMENTATION** + +The simplest way to implement the Attach protocol is the following: + + 1. Read eight bytes. + 2. Choose `stdout` or `stderr` depending on the first byte. + 3. Extract the frame size from the last four bytes. + 4. Read the extracted size and output it on the correct output. + 5. Goto 1. + +#### Attach to a container (websocket) + +`GET /containers/(id or name)/attach/ws` + +Attach to the container `id` via websocket + +Implements websocket protocol handshake according to [RFC 6455](http://tools.ietf.org/html/rfc6455) + +**Example request** + + GET /v1.23/containers/e90e34656806/attach/ws?logs=0&stream=1&stdin=1&stdout=1&stderr=1 HTTP/1.1 + +**Example response** + + {% raw %} + {{ STREAM }} + {% endraw %} + +**Query parameters**: + +- **detachKeys** – Override the key sequence for detaching a + container. Format is a single character `[a-Z]` or `ctrl-` + where `` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`. +- **logs** – 1/True/true or 0/False/false, return logs. Default `false`. +- **stream** – 1/True/true or 0/False/false, return stream. + Default `false`. +- **stdin** – 1/True/true or 0/False/false, if `stream=true`, attach + to `stdin`. Default `false`. +- **stdout** – 1/True/true or 0/False/false, if `logs=true`, return + `stdout` log, if `stream=true`, attach to `stdout`. Default `false`. +- **stderr** – 1/True/true or 0/False/false, if `logs=true`, return + `stderr` log, if `stream=true`, attach to `stderr`. Default `false`. + +**Status codes**: + +- **200** – no error +- **400** – bad parameter +- **404** – no such container +- **500** – server error + +#### Wait a container + +`POST /containers/(id or name)/wait` + +Block until container `id` stops, then returns the exit code + +**Example request**: + + POST /v1.23/containers/16253994b7c4/wait HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"StatusCode": 0} + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Remove a container + +`DELETE /containers/(id or name)` + +Remove the container `id` from the filesystem + +**Example request**: + + DELETE /v1.23/containers/16253994b7c4?v=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **v** – 1/True/true or 0/False/false, Remove the volumes + associated to the container. Default `false`. +- **force** - 1/True/true or 0/False/false, Kill then remove the container. + Default `false`. +- **link** - 1/True/true or 0/False/false, Remove the specified + link associated to the container. Default `false`. + +**Status codes**: + +- **204** – no error +- **400** – bad parameter +- **404** – no such container +- **409** – conflict +- **500** – server error + +#### Copy files or folders from a container + +`POST /containers/(id or name)/copy` + +Copy files or folders of container `id` + +**Deprecated** in favor of the `archive` endpoint below. + +**Example request**: + + POST /v1.23/containers/4fa6e0f0c678/copy HTTP/1.1 + Content-Type: application/json + + { + "Resource": "test.txt" + } + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Retrieving information about files and folders in a container + +`HEAD /containers/(id or name)/archive` + +See the description of the `X-Docker-Container-Path-Stat` header in the +following section. + +#### Get an archive of a filesystem resource in a container + +`GET /containers/(id or name)/archive` + +Get a tar archive of a resource in the filesystem of container `id`. + +**Query parameters**: + +- **path** - resource in the container's filesystem to archive. Required. + + If not an absolute path, it is relative to the container's root directory. + The resource specified by **path** must exist. To assert that the resource + is expected to be a directory, **path** should end in `/` or `/.` + (assuming a path separator of `/`). If **path** ends in `/.` then this + indicates that only the contents of the **path** directory should be + copied. A symlink is always resolved to its target. + + > **Note**: It is not possible to copy certain system files such as resources + > under `/proc`, `/sys`, `/dev`, and mounts created by the user in the + > container. + +**Example request**: + + GET /v1.23/containers/8cce319429b2/archive?path=/root HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + X-Docker-Container-Path-Stat: eyJuYW1lIjoicm9vdCIsInNpemUiOjQwOTYsIm1vZGUiOjIxNDc0ODQwOTYsIm10aW1lIjoiMjAxNC0wMi0yN1QyMDo1MToyM1oiLCJsaW5rVGFyZ2V0IjoiIn0= + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +On success, a response header `X-Docker-Container-Path-Stat` will be set to a +base64-encoded JSON object containing some filesystem header information about +the archived resource. The above example value would decode to the following +JSON object (whitespace added for readability): + +```json +{ + "name": "root", + "size": 4096, + "mode": 2147484096, + "mtime": "2014-02-27T20:51:23Z", + "linkTarget": "" +} +``` + +A `HEAD` request can also be made to this endpoint if only this information is +desired. + +**Status codes**: + +- **200** - success, returns archive of copied resource +- **400** - client error, bad parameter, details in JSON response body, one of: + - must specify path parameter (**path** cannot be empty) + - not a directory (**path** was asserted to be a directory but exists as a + file) +- **404** - client error, resource not found, one of: + – no such container (container `id` does not exist) + - no such file or directory (**path** does not exist) +- **500** - server error + +#### Extract an archive of files or folders to a directory in a container + +`PUT /containers/(id or name)/archive` + +Upload a tar archive to be extracted to a path in the filesystem of container +`id`. + +**Query parameters**: + +- **path** - path to a directory in the container + to extract the archive's contents into. Required. + + If not an absolute path, it is relative to the container's root directory. + The **path** resource must exist. +- **noOverwriteDirNonDir** - If "1", "true", or "True" then it will be an error + if unpacking the given content would cause an existing directory to be + replaced with a non-directory and vice versa. + +**Example request**: + + PUT /v1.23/containers/8cce319429b2/archive?path=/vol1 HTTP/1.1 + Content-Type: application/x-tar + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +**Example response**: + + HTTP/1.1 200 OK + +**Status codes**: + +- **200** – the content was extracted successfully +- **400** - client error, bad parameter, details in JSON response body, one of: + - must specify path parameter (**path** cannot be empty) + - not a directory (**path** should be a directory but exists as a file) + - unable to overwrite existing directory with non-directory + (if **noOverwriteDirNonDir**) + - unable to overwrite existing non-directory with directory + (if **noOverwriteDirNonDir**) +- **403** - client error, permission denied, the volume + or container rootfs is marked as read-only. +- **404** - client error, resource not found, one of: + – no such container (container `id` does not exist) + - no such file or directory (**path** resource does not exist) +- **500** – server error + +### 2.2 Images + +#### List Images + +`GET /images/json` + +**Example request**: + + GET /v1.23/images/json?all=0 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "RepoTags": [ + "ubuntu:12.04", + "ubuntu:precise", + "ubuntu:latest" + ], + "Id": "8dbd9e392a964056420e5d58ca5cc376ef18e2de93b5cc90e868a1bbc8318c1c", + "Created": 1365714795, + "Size": 131506275, + "VirtualSize": 131506275, + "Labels": {} + }, + { + "RepoTags": [ + "ubuntu:12.10", + "ubuntu:quantal" + ], + "ParentId": "27cf784147099545", + "Id": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", + "Created": 1364102658, + "Size": 24653, + "VirtualSize": 180116135, + "Labels": { + "com.example.version": "v1" + } + } + ] + +**Example request, with digest information**: + + GET /v1.23/images/json?digests=1 HTTP/1.1 + +**Example response, with digest information**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Created": 1420064636, + "Id": "4986bf8c15363d1c5d15512d5266f8777bfba4974ac56e3270e7760f6f0a8125", + "ParentId": "ea13149945cb6b1e746bf28032f02e9b5a793523481a0a18645fc77ad53c4ea2", + "RepoDigests": [ + "localhost:5000/test/busybox@sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf" + ], + "RepoTags": [ + "localhost:5000/test/busybox:latest", + "playdate:latest" + ], + "Size": 0, + "VirtualSize": 2429728, + "Labels": {} + } + ] + +The response shows a single image `Id` associated with two repositories +(`RepoTags`): `localhost:5000/test/busybox`: and `playdate`. A caller can use +either of the `RepoTags` values `localhost:5000/test/busybox:latest` or +`playdate:latest` to reference the image. + +You can also use `RepoDigests` values to reference an image. In this response, +the array has only one reference and that is to the +`localhost:5000/test/busybox` repository; the `playdate` repository has no +digest. You can reference this digest using the value: +`localhost:5000/test/busybox@sha256:cbbf2f9a99b47fc460d...` + +See the `docker run` and `docker build` commands for examples of digest and tag +references on the command line. + +**Query parameters**: + +- **all** – 1/True/true or 0/False/false, default false +- **filters** – a JSON encoded value of the filters (a map[string][]string) to process on the images list. Available filters: + - `dangling=true` + - `label=key` or `label="key=value"` of an image label +- **filter** - only return images with the specified name + +#### Build image from a Dockerfile + +`POST /build` + +Build an image from a Dockerfile + +**Example request**: + + POST /v1.23/build HTTP/1.1 + Content-Type: application/x-tar + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"stream": "Step 1/5..."} + {"stream": "..."} + {"error": "Error...", "errorDetail": {"code": 123, "message": "Error..."}} + +The input stream must be a `tar` archive compressed with one of the +following algorithms: `identity` (no compression), `gzip`, `bzip2`, `xz`. + +The archive must include a build instructions file, typically called +`Dockerfile` at the archive's root. The `dockerfile` parameter may be +used to specify a different build instructions file. To do this, its value must be +the path to the alternate build instructions file to use. + +The archive may include any number of other files, +which are accessible in the build context (See the [*ADD build +command*](../reference/builder.md#add)). + +The Docker daemon performs a preliminary validation of the `Dockerfile` before +starting the build, and returns an error if the syntax is incorrect. After that, +each instruction is run one-by-one until the ID of the new image is output. + +The build is canceled if the client drops the connection by quitting +or being killed. + +**Query parameters**: + +- **dockerfile** - Path within the build context to the `Dockerfile`. This is + ignored if `remote` is specified and points to an external `Dockerfile`. +- **t** – A name and optional tag to apply to the image in the `name:tag` format. + If you omit the `tag` the default `latest` value is assumed. + You can provide one or more `t` parameters. +- **remote** – A Git repository URI or HTTP/HTTPS context URI. If the + URI points to a single text file, the file's contents are placed into + a file called `Dockerfile` and the image is built from that file. If + the URI points to a tarball, the file is downloaded by the daemon and + the contents therein used as the context for the build. If the URI + points to a tarball and the `dockerfile` parameter is also specified, + there must be a file with the corresponding path inside the tarball. +- **q** – Suppress verbose build output. +- **nocache** – Do not use the cache when building the image. +- **pull** - Attempt to pull the image even if an older image exists locally. +- **rm** - Remove intermediate containers after a successful build (default behavior). +- **forcerm** - Always remove intermediate containers (includes `rm`). +- **memory** - Set memory limit for build. +- **memswap** - Total memory (memory + swap), `-1` to enable unlimited swap. +- **cpushares** - CPU shares (relative weight). +- **cpusetcpus** - CPUs in which to allow execution (e.g., `0-3`, `0,1`). +- **cpuperiod** - The length of a CPU period in microseconds. +- **cpuquota** - Microseconds of CPU time that the container can get in a CPU period. +- **buildargs** – JSON map of string pairs for build-time variables. Users pass + these values at build-time. Docker uses the `buildargs` as the environment + context for command(s) run via the Dockerfile's `RUN` instruction or for + variable expansion in other Dockerfile instructions. This is not meant for + passing secret values. [Read more about the buildargs instruction](../reference/builder.md#arg) +- **shmsize** - Size of `/dev/shm` in bytes. The size must be greater than 0. If omitted the system uses 64MB. +- **labels** – JSON map of string pairs for labels to set on the image. + +**Request Headers**: + +- **Content-type** – Set to `"application/x-tar"`. +- **X-Registry-Config** – A base64-url-safe-encoded Registry Auth Config JSON + object with the following structure: + + { + "docker.example.com": { + "username": "janedoe", + "password": "hunter2" + }, + "https://index.docker.io/v1/": { + "username": "mobydock", + "password": "conta1n3rize14" + } + } + + This object maps the hostname of a registry to an object containing the + "username" and "password" for that registry. Multiple registries may + be specified as the build may be based on an image requiring + authentication to pull from any arbitrary registry. Only the registry + domain name (and port if not the default "443") are required. However + (for legacy reasons) the "official" Docker, Inc. hosted registry must + be specified with both a "https://" prefix and a "/v1/" suffix even + though Docker will prefer to use the v2 registry API. + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Create an image + +`POST /images/create` + +Create an image either by pulling it from the registry or by importing it + +**Example request**: + + POST /v1.23/images/create?fromImage=busybox&tag=latest HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status": "Pulling..."} + {"status": "Pulling", "progress": "1 B/ 100 B", "progressDetail": {"current": 1, "total": 100}} + {"error": "Invalid..."} + ... + +When using this endpoint to pull an image from the registry, the +`X-Registry-Auth` header can be used to include +a base64-encoded AuthConfig object. + +**Query parameters**: + +- **fromImage** – Name of the image to pull. The name may include a tag or + digest. This parameter may only be used when pulling an image. + The pull is cancelled if the HTTP connection is closed. +- **fromSrc** – Source to import. The value may be a URL from which the image + can be retrieved or `-` to read the image from the request body. + This parameter may only be used when importing an image. +- **repo** – Repository name given to an image when it is imported. + The repo may include a tag. This parameter may only be used when importing + an image. +- **tag** – Tag or digest. If empty when pulling an image, this causes all tags + for the given image to be pulled. + +**Request Headers**: + +- **X-Registry-Auth** – base64-encoded AuthConfig object, containing either login information, or a token + - Credential based login: + + ``` + { + "username": "jdoe", + "password": "secret", + "email": "jdoe@acme.com" + } + ``` + + - Token based login: + + ``` + { + "identitytoken": "9cbaf023786cd7..." + } + ``` + +**Status codes**: + +- **200** – no error +- **404** - repository does not exist or no read access +- **500** – server error + + + +#### Inspect an image + +`GET /images/(name)/json` + +Return low-level information on the image `name` + +**Example request**: + + GET /v1.23/images/example/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Id" : "sha256:85f05633ddc1c50679be2b16a0479ab6f7637f8884e0cfe0f4d20e1ebb3d6e7c", + "Container" : "cb91e48a60d01f1e27028b4fc6819f4f290b3cf12496c8176ec714d0d390984a", + "Comment" : "", + "Os" : "linux", + "Architecture" : "amd64", + "Parent" : "sha256:91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c", + "ContainerConfig" : { + "Tty" : false, + "Hostname" : "e611e15f9c9d", + "Volumes" : null, + "Domainname" : "", + "AttachStdout" : false, + "PublishService" : "", + "AttachStdin" : false, + "OpenStdin" : false, + "StdinOnce" : false, + "NetworkDisabled" : false, + "OnBuild" : [], + "Image" : "91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c", + "User" : "", + "WorkingDir" : "", + "Entrypoint" : null, + "MacAddress" : "", + "AttachStderr" : false, + "Labels" : { + "com.example.license" : "GPL", + "com.example.version" : "1.0", + "com.example.vendor" : "Acme" + }, + "Env" : [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + ], + "ExposedPorts" : null, + "Cmd" : [ + "/bin/sh", + "-c", + "#(nop) LABEL com.example.vendor=Acme com.example.license=GPL com.example.version=1.0" + ] + }, + "DockerVersion" : "1.9.0-dev", + "VirtualSize" : 188359297, + "Size" : 0, + "Author" : "", + "Created" : "2015-09-10T08:30:53.26995814Z", + "GraphDriver" : { + "Name" : "aufs", + "Data" : null + }, + "RepoDigests" : [ + "localhost:5000/test/busybox/example@sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf" + ], + "RepoTags" : [ + "example:1.0", + "example:latest", + "example:stable" + ], + "Config" : { + "Image" : "91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c", + "NetworkDisabled" : false, + "OnBuild" : [], + "StdinOnce" : false, + "PublishService" : "", + "AttachStdin" : false, + "OpenStdin" : false, + "Domainname" : "", + "AttachStdout" : false, + "Tty" : false, + "Hostname" : "e611e15f9c9d", + "Volumes" : null, + "Cmd" : [ + "/bin/bash" + ], + "ExposedPorts" : null, + "Env" : [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + ], + "Labels" : { + "com.example.vendor" : "Acme", + "com.example.version" : "1.0", + "com.example.license" : "GPL" + }, + "Entrypoint" : null, + "MacAddress" : "", + "AttachStderr" : false, + "WorkingDir" : "", + "User" : "" + }, + "RootFS": { + "Type": "layers", + "Layers": [ + "sha256:1834950e52ce4d5a88a1bbd131c537f4d0e56d10ff0dd69e66be3b7dfa9df7e6", + "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef" + ] + } + } + +**Status codes**: + +- **200** – no error +- **404** – no such image +- **500** – server error + +#### Get the history of an image + +`GET /images/(name)/history` + +Return the history of the image `name` + +**Example request**: + + GET /v1.23/images/ubuntu/history HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Id": "3db9c44f45209632d6050b35958829c3a2aa256d81b9a7be45b362ff85c54710", + "Created": 1398108230, + "CreatedBy": "/bin/sh -c #(nop) ADD file:eb15dbd63394e063b805a3c32ca7bf0266ef64676d5a6fab4801f2e81e2a5148 in /", + "Tags": [ + "ubuntu:lucid", + "ubuntu:10.04" + ], + "Size": 182964289, + "Comment": "" + }, + { + "Id": "6cfa4d1f33fb861d4d114f43b25abd0ac737509268065cdfd69d544a59c85ab8", + "Created": 1398108222, + "CreatedBy": "/bin/sh -c #(nop) MAINTAINER Tianon Gravi - mkimage-debootstrap.sh -i iproute,iputils-ping,ubuntu-minimal -t lucid.tar.xz lucid http://archive.ubuntu.com/ubuntu/", + "Tags": null, + "Size": 0, + "Comment": "" + }, + { + "Id": "511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158", + "Created": 1371157430, + "CreatedBy": "", + "Tags": [ + "scratch12:latest", + "scratch:latest" + ], + "Size": 0, + "Comment": "Imported from -" + } + ] + +**Status codes**: + +- **200** – no error +- **404** – no such image +- **500** – server error + +#### Push an image on the registry + +`POST /images/(name)/push` + +Push the image `name` on the registry + +**Example request**: + + POST /v1.23/images/test/push HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status": "Pushing..."} + {"status": "Pushing", "progress": "1/? (n/a)", "progressDetail": {"current": 1}}} + {"error": "Invalid..."} + ... + +If you wish to push an image on to a private registry, that image must already have a tag +into a repository which references that registry `hostname` and `port`. This repository name should +then be used in the URL. This duplicates the command line's flow. + +The push is cancelled if the HTTP connection is closed. + +**Example request**: + + POST /v1.23/images/registry.acme.com:5000/test/push HTTP/1.1 + + +**Query parameters**: + +- **tag** – The tag to associate with the image on the registry. This is optional. + +**Request Headers**: + +- **X-Registry-Auth** – base64-encoded AuthConfig object, containing either login information, or a token + - Credential based login: + + ``` + { + "username": "jdoe", + "password": "secret", + "email": "jdoe@acme.com", + } + ``` + + - Identity token based login: + + ``` + { + "identitytoken": "9cbaf023786cd7..." + } + ``` + +**Status codes**: + +- **200** – no error +- **404** – no such image +- **500** – server error + +#### Tag an image into a repository + +`POST /images/(name)/tag` + +Tag the image `name` into a repository + +**Example request**: + + POST /v1.23/images/test/tag?repo=myrepo&force=0&tag=v42 HTTP/1.1 + +**Example response**: + + HTTP/1.1 201 Created + +**Query parameters**: + +- **repo** – The repository to tag in +- **force** – 1/True/true or 0/False/false, default false +- **tag** - The new tag name + +**Status codes**: + +- **201** – no error +- **400** – bad parameter +- **404** – no such image +- **409** – conflict +- **500** – server error + +#### Remove an image + +`DELETE /images/(name)` + +Remove the image `name` from the filesystem + +**Example request**: + + DELETE /v1.23/images/test HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-type: application/json + + [ + {"Untagged": "3e2f21a89f"}, + {"Deleted": "3e2f21a89f"}, + {"Deleted": "53b4f83ac9"} + ] + +**Query parameters**: + +- **force** – 1/True/true or 0/False/false, default false +- **noprune** – 1/True/true or 0/False/false, default false + +**Status codes**: + +- **200** – no error +- **404** – no such image +- **409** – conflict +- **500** – server error + +#### Search images + +`GET /images/search` + +Search for an image on [Docker Hub](https://hub.docker.com). + +> **Note**: +> The response keys have changed from API v1.6 to reflect the JSON +> sent by the registry server to the docker daemon's request. + +**Example request**: + + GET /v1.23/images/search?term=sshd HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "description": "", + "is_official": false, + "is_automated": false, + "name": "wma55/u1210sshd", + "star_count": 0 + }, + { + "description": "", + "is_official": false, + "is_automated": false, + "name": "jdswinbank/sshd", + "star_count": 0 + }, + { + "description": "", + "is_official": false, + "is_automated": false, + "name": "vgauthier/sshd", + "star_count": 0 + } + ... + ] + +**Query parameters**: + +- **term** – term to search + +**Status codes**: + +- **200** – no error +- **500** – server error + +### 2.3 Misc + +#### Check auth configuration + +`POST /auth` + +Validate credentials for a registry and get identity token, +if available, for accessing the registry without password. + +**Example request**: + + POST /v1.23/auth HTTP/1.1 + Content-Type: application/json + + { + "username": "hannibal", + "password": "xxxx", + "serveraddress": "https://index.docker.io/v1/" + } + +**Example response**: + + HTTP/1.1 200 OK + + { + "Status": "Login Succeeded", + "IdentityToken": "9cbaf023786cd7..." + } + +**Status codes**: + +- **200** – no error +- **204** – no error +- **500** – server error + +#### Display system-wide information + +`GET /info` + +Display system-wide information + +**Example request**: + + GET /v1.23/info HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Architecture": "x86_64", + "ClusterStore": "etcd://localhost:2379", + "CgroupDriver": "cgroupfs", + "Containers": 11, + "ContainersRunning": 7, + "ContainersStopped": 3, + "ContainersPaused": 1, + "CpuCfsPeriod": true, + "CpuCfsQuota": true, + "Debug": false, + "DockerRootDir": "/var/lib/docker", + "Driver": "btrfs", + "DriverStatus": [[""]], + "ExecutionDriver": "native-0.1", + "ExperimentalBuild": false, + "HttpProxy": "http://test:test@localhost:8080", + "HttpsProxy": "https://test:test@localhost:8080", + "ID": "7TRN:IPZB:QYBB:VPBQ:UMPP:KARE:6ZNR:XE6T:7EWV:PKF4:ZOJD:TPYS", + "IPv4Forwarding": true, + "Images": 16, + "IndexServerAddress": "https://index.docker.io/v1/", + "InitPath": "/usr/bin/docker", + "InitSha1": "", + "KernelMemory": true, + "KernelVersion": "3.12.0-1-amd64", + "Labels": [ + "storage=ssd" + ], + "MemTotal": 2099236864, + "MemoryLimit": true, + "NCPU": 1, + "NEventsListener": 0, + "NFd": 11, + "NGoroutines": 21, + "Name": "prod-server-42", + "NoProxy": "9.81.1.160", + "OomKillDisable": true, + "OSType": "linux", + "OperatingSystem": "Boot2Docker", + "Plugins": { + "Volume": [ + "local" + ], + "Network": [ + "null", + "host", + "bridge" + ] + }, + "RegistryConfig": { + "IndexConfigs": { + "docker.io": { + "Mirrors": null, + "Name": "docker.io", + "Official": true, + "Secure": true + } + }, + "InsecureRegistryCIDRs": [ + "127.0.0.0/8" + ] + }, + "ServerVersion": "1.9.0", + "SwapLimit": false, + "SystemStatus": [["State", "Healthy"]], + "SystemTime": "2015-03-10T11:11:23.730591467-07:00" + } + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Show the docker version information + +`GET /version` + +Show the docker version information + +**Example request**: + + GET /v1.23/version HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Version": "1.11.0", + "Os": "linux", + "KernelVersion": "3.19.0-23-generic", + "GoVersion": "go1.4.2", + "GitCommit": "e75da4b", + "Arch": "amd64", + "ApiVersion": "1.23", + "BuildTime": "2015-12-01T07:09:13.444803460+00:00", + "Experimental": true + } + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Ping the docker server + +`GET /_ping` + +Ping the docker server + +**Example request**: + + GET /v1.23/_ping HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: text/plain + + OK + +**Status codes**: + +- **200** - no error +- **500** - server error + +#### Create a new image from a container's changes + +`POST /commit` + +Create a new image from a container's changes + +**Example request**: + + POST /v1.23/commit?container=44c004db4b17&comment=message&repo=myrepo HTTP/1.1 + Content-Type: application/json + + { + "Hostname": "", + "Domainname": "", + "User": "", + "AttachStdin": false, + "AttachStdout": true, + "AttachStderr": true, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": null, + "Cmd": [ + "date" + ], + "Mounts": [ + { + "Source": "/data", + "Destination": "/data", + "Mode": "ro,Z", + "RW": false + } + ], + "Labels": { + "key1": "value1", + "key2": "value2" + }, + "WorkingDir": "", + "NetworkDisabled": false, + "ExposedPorts": { + "22/tcp": {} + } + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + {"Id": "596069db4bf5"} + +**JSON parameters**: + +- **config** - the container's configuration + +**Query parameters**: + +- **container** – source container +- **repo** – repository +- **tag** – tag +- **comment** – commit message +- **author** – author (e.g., "John Hannibal Smith + <[hannibal@a-team.com](mailto:hannibal%40a-team.com)>") +- **pause** – 1/True/true or 0/False/false, whether to pause the container before committing +- **changes** – Dockerfile instructions to apply while committing + +**Status codes**: + +- **201** – no error +- **404** – no such container +- **500** – server error + +#### Monitor Docker's events + +`GET /events` + +Get container events from docker, in real time via streaming. + +Docker containers report the following events: + + attach, commit, copy, create, destroy, die, exec_create, exec_start, export, kill, oom, pause, rename, resize, restart, start, stop, top, unpause, update + +Docker images report the following events: + + delete, import, pull, push, tag, untag + +Docker volumes report the following events: + + create, mount, unmount, destroy + +Docker networks report the following events: + + create, connect, disconnect, destroy + +**Example request**: + + GET /v1.23/events?since=1374067924 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + Server: Docker/1.11.0 (linux) + Date: Fri, 29 Apr 2016 15:18:06 GMT + Transfer-Encoding: chunked + + { + "status": "pull", + "id": "alpine:latest", + "Type": "image", + "Action": "pull", + "Actor": { + "ID": "alpine:latest", + "Attributes": { + "name": "alpine" + } + }, + "time": 1461943101, + "timeNano": 1461943101301854122 + } + { + "status": "create", + "id": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "from": "alpine", + "Type": "container", + "Action": "create", + "Actor": { + "ID": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "Attributes": { + "com.example.some-label": "some-label-value", + "image": "alpine", + "name": "my-container" + } + }, + "time": 1461943101, + "timeNano": 1461943101381709551 + } + { + "status": "attach", + "id": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "from": "alpine", + "Type": "container", + "Action": "attach", + "Actor": { + "ID": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "Attributes": { + "com.example.some-label": "some-label-value", + "image": "alpine", + "name": "my-container" + } + }, + "time": 1461943101, + "timeNano": 1461943101383858412 + } + { + "Type": "network", + "Action": "connect", + "Actor": { + "ID": "7dc8ac97d5d29ef6c31b6052f3938c1e8f2749abbd17d1bd1febf2608db1b474", + "Attributes": { + "container": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "name": "bridge", + "type": "bridge" + } + }, + "time": 1461943101, + "timeNano": 1461943101394865557 + } + { + "status": "start", + "id": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "from": "alpine", + "Type": "container", + "Action": "start", + "Actor": { + "ID": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "Attributes": { + "com.example.some-label": "some-label-value", + "image": "alpine", + "name": "my-container" + } + }, + "time": 1461943101, + "timeNano": 1461943101607533796 + } + { + "status": "resize", + "id": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "from": "alpine", + "Type": "container", + "Action": "resize", + "Actor": { + "ID": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "Attributes": { + "com.example.some-label": "some-label-value", + "height": "46", + "image": "alpine", + "name": "my-container", + "width": "204" + } + }, + "time": 1461943101, + "timeNano": 1461943101610269268 + } + { + "status": "die", + "id": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "from": "alpine", + "Type": "container", + "Action": "die", + "Actor": { + "ID": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "Attributes": { + "com.example.some-label": "some-label-value", + "exitCode": "0", + "image": "alpine", + "name": "my-container" + } + }, + "time": 1461943105, + "timeNano": 1461943105079144137 + } + { + "Type": "network", + "Action": "disconnect", + "Actor": { + "ID": "7dc8ac97d5d29ef6c31b6052f3938c1e8f2749abbd17d1bd1febf2608db1b474", + "Attributes": { + "container": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "name": "bridge", + "type": "bridge" + } + }, + "time": 1461943105, + "timeNano": 1461943105230860245 + } + { + "status": "destroy", + "id": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "from": "alpine", + "Type": "container", + "Action": "destroy", + "Actor": { + "ID": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "Attributes": { + "com.example.some-label": "some-label-value", + "image": "alpine", + "name": "my-container" + } + }, + "time": 1461943105, + "timeNano": 1461943105338056026 + } + +**Query parameters**: + +- **since** – Timestamp. Show all events created since timestamp and then stream +- **until** – Timestamp. Show events created until given timestamp and stop streaming +- **filters** – A json encoded value of the filters (a map[string][]string) to process on the event list. Available filters: + - `container=`; -- container to filter + - `event=`; -- event to filter + - `image=`; -- image to filter + - `label=`; -- image and container label to filter + - `type=`; -- either `container` or `image` or `volume` or `network` + - `volume=`; -- volume to filter + - `network=`; -- network to filter + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Get a tarball containing all images in a repository + +`GET /images/(name)/get` + +Get a tarball containing all images and metadata for the repository specified +by `name`. + +If `name` is a specific name and tag (e.g. ubuntu:latest), then only that image +(and its parents) are returned. If `name` is an image ID, similarly only that +image (and its parents) are returned, but with the exclusion of the +'repositories' file in the tarball, as there were no image names referenced. + +See the [image tarball format](#image-tarball-format) for more details. + +**Example request** + + GET /v1.23/images/ubuntu/get + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + + Binary data stream + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Get a tarball containing all images + +`GET /images/get` + +Get a tarball containing all images and metadata for one or more repositories. + +For each value of the `names` parameter: if it is a specific name and tag (e.g. +`ubuntu:latest`), then only that image (and its parents) are returned; if it is +an image ID, similarly only that image (and its parents) are returned and there +would be no names referenced in the 'repositories' file for this image ID. + +See the [image tarball format](#image-tarball-format) for more details. + +**Example request** + + GET /v1.23/images/get?names=myname%2Fmyapp%3Alatest&names=busybox + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + + Binary data stream + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Load a tarball with a set of images and tags into docker + +`POST /images/load` + +Load a set of images and tags into a Docker repository. +See the [image tarball format](#image-tarball-format) for more details. + +**Example request** + + POST /v1.23/images/load + Content-Type: application/x-tar + + Tarball in body + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + Transfer-Encoding: chunked + + {"status":"Loading layer","progressDetail":{"current":32768,"total":1292800},"progress":"[= ] 32.77 kB/1.293 MB","id":"8ac8bfaff55a"} + {"status":"Loading layer","progressDetail":{"current":65536,"total":1292800},"progress":"[== ] 65.54 kB/1.293 MB","id":"8ac8bfaff55a"} + {"status":"Loading layer","progressDetail":{"current":98304,"total":1292800},"progress":"[=== ] 98.3 kB/1.293 MB","id":"8ac8bfaff55a"} + {"status":"Loading layer","progressDetail":{"current":131072,"total":1292800},"progress":"[===== ] 131.1 kB/1.293 MB","id":"8ac8bfaff55a"} + ... + {"stream":"Loaded image: busybox:latest\n"} + +**Example response**: + +If the "quiet" query parameter is set to `true` / `1` (`?quiet=1`), progress +details are suppressed, and only a confirmation message is returned once the +action completes. + + HTTP/1.1 200 OK + Content-Type: application/json + Transfer-Encoding: chunked + + {"stream":"Loaded image: busybox:latest\n"} + +**Query parameters**: + +- **quiet** – Boolean value, suppress progress details during load. Defaults + to `0` / `false` if omitted. + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Image tarball format + +An image tarball contains one directory per image layer (named using its long ID), +each containing these files: + +- `VERSION`: currently `1.0` - the file format version +- `json`: detailed layer information, similar to `docker inspect layer_id` +- `layer.tar`: A tarfile containing the filesystem changes in this layer + +The `layer.tar` file contains `aufs` style `.wh..wh.aufs` files and directories +for storing attribute changes and deletions. + +If the tarball defines a repository, the tarball should also include a `repositories` file at +the root that contains a list of repository and tag names mapped to layer IDs. + +``` +{"hello-world": + {"latest": "565a9d68a73f6706862bfe8409a7f659776d4d60a8d096eb4a3cbce6999cc2a1"} +} +``` + +#### Exec Create + +`POST /containers/(id or name)/exec` + +Sets up an exec instance in a running container `id` + +**Example request**: + + POST /v1.23/containers/e90e34656806/exec HTTP/1.1 + Content-Type: application/json + + { + "AttachStdin": true, + "AttachStdout": true, + "AttachStderr": true, + "Cmd": ["sh"], + "DetachKeys": "ctrl-p,ctrl-q", + "Privileged": true, + "Tty": true, + "User": "123:456" + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + { + "Id": "f90e34656806", + "Warnings":[] + } + +**JSON parameters**: + +- **AttachStdin** - Boolean value, attaches to `stdin` of the `exec` command. +- **AttachStdout** - Boolean value, attaches to `stdout` of the `exec` command. +- **AttachStderr** - Boolean value, attaches to `stderr` of the `exec` command. +- **DetachKeys** – Override the key sequence for detaching a + container. Format is a single character `[a-Z]` or `ctrl-` + where `` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`. +- **Tty** - Boolean value to allocate a pseudo-TTY. +- **Cmd** - Command to run specified as a string or an array of strings. +- **Privileged** - Boolean value, runs the exec process with extended privileges. +- **User** - A string value specifying the user, and optionally, group to run + the exec process inside the container. Format is one of: `"user"`, + `"user:group"`, `"uid"`, or `"uid:gid"`. + +**Status codes**: + +- **201** – no error +- **404** – no such container +- **409** - container is paused +- **500** - server error + +#### Exec Start + +`POST /exec/(id)/start` + +Starts a previously set up `exec` instance `id`. If `detach` is true, this API +returns after starting the `exec` command. Otherwise, this API sets up an +interactive session with the `exec` command. + +**Example request**: + + POST /v1.23/exec/e90e34656806/start HTTP/1.1 + Content-Type: application/json + + { + "Detach": false, + "Tty": false + } + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/vnd.docker.raw-stream + + {% raw %} + {{ STREAM }} + {% endraw %} + +**JSON parameters**: + +- **Detach** - Detach from the `exec` command. +- **Tty** - Boolean value to allocate a pseudo-TTY. + +**Status codes**: + +- **200** – no error +- **404** – no such exec instance +- **409** - container is paused + +**Stream details**: + +Similar to the stream behavior of `POST /containers/(id or name)/attach` API + +#### Exec Resize + +`POST /exec/(id)/resize` + +Resizes the `tty` session used by the `exec` command `id`. The unit is number of characters. +This API is valid only if `tty` was specified as part of creating and starting the `exec` command. + +**Example request**: + + POST /v1.23/exec/e90e34656806/resize?h=40&w=80 HTTP/1.1 + Content-Type: text/plain + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: text/plain + +**Query parameters**: + +- **h** – height of `tty` session +- **w** – width + +**Status codes**: + +- **201** – no error +- **404** – no such exec instance + +#### Exec Inspect + +`GET /exec/(id)/json` + +Return low-level information about the `exec` command `id`. + +**Example request**: + + GET /v1.23/exec/11fb006128e8ceb3942e7c58d77750f24210e35f879dd204ac975c184b820b39/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "CanRemove": false, + "ContainerID": "b53ee82b53a40c7dca428523e34f741f3abc51d9f297a14ff874bf761b995126", + "DetachKeys": "", + "ExitCode": 2, + "ID": "f33bbfb39f5b142420f4759b2348913bd4a8d1a6d7fd56499cb41a1bb91d7b3b", + "OpenStderr": true, + "OpenStdin": true, + "OpenStdout": true, + "ProcessConfig": { + "arguments": [ + "-c", + "exit 2" + ], + "entrypoint": "sh", + "privileged": false, + "tty": true, + "user": "1000" + }, + "Running": false + } + +**Status codes**: + +- **200** – no error +- **404** – no such exec instance +- **500** - server error + +### 2.4 Volumes + +#### List volumes + +`GET /volumes` + +**Example request**: + + GET /v1.23/volumes HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Volumes": [ + { + "Name": "tardis", + "Driver": "local", + "Mountpoint": "/var/lib/docker/volumes/tardis" + } + ], + "Warnings": [] + } + +**Query parameters**: + +- **filters** - JSON encoded value of the filters (a `map[string][]string`) to process on the volumes list. There is one available filter: `dangling=true` + +**Status codes**: + +- **200** - no error +- **500** - server error + +#### Create a volume + +`POST /volumes/create` + +Create a volume + +**Example request**: + + POST /v1.23/volumes/create HTTP/1.1 + Content-Type: application/json + + { + "Name": "tardis", + "Labels": { + "com.example.some-label": "some-value", + "com.example.some-other-label": "some-other-value" + } + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + { + "Name": "tardis", + "Driver": "local", + "Mountpoint": "/var/lib/docker/volumes/tardis", + "Labels": { + "com.example.some-label": "some-value", + "com.example.some-other-label": "some-other-value" + } + } + +**Status codes**: + +- **201** - no error +- **500** - server error + +**JSON parameters**: + +- **Name** - The new volume's name. If not specified, Docker generates a name. +- **Driver** - Name of the volume driver to use. Defaults to `local` for the name. +- **DriverOpts** - A mapping of driver options and values. These options are + passed directly to the driver and are driver specific. +- **Labels** - Labels to set on the volume, specified as a map: `{"key":"value","key2":"value2"}` + +#### Inspect a volume + +`GET /volumes/(name)` + +Return low-level information on the volume `name` + +**Example request**: + + GET /v1.23/volumes/tardis + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Name": "tardis", + "Driver": "local", + "Mountpoint": "/var/lib/docker/volumes/tardis/_data", + "Labels": { + "com.example.some-label": "some-value", + "com.example.some-other-label": "some-other-value" + } + } + +**Status codes**: + +- **200** - no error +- **404** - no such volume +- **500** - server error + +#### Remove a volume + +`DELETE /volumes/(name)` + +Instruct the driver to remove the volume (`name`). + +**Example request**: + + DELETE /v1.23/volumes/tardis HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Status codes**: + +- **204** - no error +- **404** - no such volume or volume driver +- **409** - volume is in use and cannot be removed +- **500** - server error + +### 3.5 Networks + +#### List networks + +`GET /networks` + +**Example request**: + + GET /v1.23/networks?filters={"type":{"custom":true}} HTTP/1.1 + +**Example response**: + +``` +HTTP/1.1 200 OK +Content-Type: application/json + +[ + { + "Name": "bridge", + "Id": "f2de39df4171b0dc801e8002d1d999b77256983dfc63041c0f34030aa3977566", + "Scope": "local", + "Driver": "bridge", + "EnableIPv6": false, + "Internal": false, + "IPAM": { + "Driver": "default", + "Config": [ + { + "Subnet": "172.17.0.0/16" + } + ] + }, + "Containers": { + "39b69226f9d79f5634485fb236a23b2fe4e96a0a94128390a7fbbcc167065867": { + "EndpointID": "ed2419a97c1d9954d05b46e462e7002ea552f216e9b136b80a7db8d98b442eda", + "MacAddress": "02:42:ac:11:00:02", + "IPv4Address": "172.17.0.2/16", + "IPv6Address": "" + } + }, + "Options": { + "com.docker.network.bridge.default_bridge": "true", + "com.docker.network.bridge.enable_icc": "true", + "com.docker.network.bridge.enable_ip_masquerade": "true", + "com.docker.network.bridge.host_binding_ipv4": "0.0.0.0", + "com.docker.network.bridge.name": "docker0", + "com.docker.network.driver.mtu": "1500" + } + }, + { + "Name": "none", + "Id": "e086a3893b05ab69242d3c44e49483a3bbbd3a26b46baa8f61ab797c1088d794", + "Scope": "local", + "Driver": "null", + "EnableIPv6": false, + "Internal": false, + "IPAM": { + "Driver": "default", + "Config": [] + }, + "Containers": {}, + "Options": {} + }, + { + "Name": "host", + "Id": "13e871235c677f196c4e1ecebb9dc733b9b2d2ab589e30c539efeda84a24215e", + "Scope": "local", + "Driver": "host", + "EnableIPv6": false, + "Internal": false, + "IPAM": { + "Driver": "default", + "Config": [] + }, + "Containers": {}, + "Options": {} + } +] +``` + +**Query parameters**: + +- **filters** - JSON encoded network list filter. The filter value is one of: + - `id=` Matches all or part of a network id. + - `name=` Matches all or part of a network name. + - `type=["custom"|"builtin"]` Filters networks by type. The `custom` keyword returns all user-defined networks. + +**Status codes**: + +- **200** - no error +- **500** - server error + +#### Inspect network + +`GET /networks/(id or name)` + +Return low-level information on the network `id` + +**Example request**: + + GET /v1.23/networks/7d86d31b1478e7cca9ebed7e73aa0fdeec46c5ca29497431d3007d2d9e15ed99 HTTP/1.1 + +**Example response**: + +``` +HTTP/1.1 200 OK +Content-Type: application/json + +{ + "Name": "net01", + "Id": "7d86d31b1478e7cca9ebed7e73aa0fdeec46c5ca29497431d3007d2d9e15ed99", + "Scope": "local", + "Driver": "bridge", + "EnableIPv6": false, + "IPAM": { + "Driver": "default", + "Config": [ + { + "Subnet": "172.19.0.0/16", + "Gateway": "172.19.0.1/16" + } + ], + "Options": { + "foo": "bar" + } + }, + "Internal": false, + "Containers": { + "19a4d5d687db25203351ed79d478946f861258f018fe384f229f2efa4b23513c": { + "Name": "test", + "EndpointID": "628cadb8bcb92de107b2a1e516cbffe463e321f548feb37697cce00ad694f21a", + "MacAddress": "02:42:ac:13:00:02", + "IPv4Address": "172.19.0.2/16", + "IPv6Address": "" + } + }, + "Options": { + "com.docker.network.bridge.default_bridge": "true", + "com.docker.network.bridge.enable_icc": "true", + "com.docker.network.bridge.enable_ip_masquerade": "true", + "com.docker.network.bridge.host_binding_ipv4": "0.0.0.0", + "com.docker.network.bridge.name": "docker0", + "com.docker.network.driver.mtu": "1500" + }, + "Labels": { + "com.example.some-label": "some-value", + "com.example.some-other-label": "some-other-value" + } +} +``` + +**Status codes**: + +- **200** - no error +- **404** - network not found +- **500** - server error + +#### Create a network + +`POST /networks/create` + +Create a network + +**Example request**: + +``` +POST /v1.23/networks/create HTTP/1.1 +Content-Type: application/json + +{ + "Name":"isolated_nw", + "CheckDuplicate":true, + "Driver":"bridge", + "EnableIPv6": true, + "IPAM":{ + "Driver": "default", + "Config":[ + { + "Subnet":"172.20.0.0/16", + "IPRange":"172.20.10.0/24", + "Gateway":"172.20.10.11" + }, + { + "Subnet":"2001:db8:abcd::/64", + "Gateway":"2001:db8:abcd::1011" + } + ], + "Options": { + "foo": "bar" + } + }, + "Internal":true, + "Options": { + "com.docker.network.bridge.default_bridge": "true", + "com.docker.network.bridge.enable_icc": "true", + "com.docker.network.bridge.enable_ip_masquerade": "true", + "com.docker.network.bridge.host_binding_ipv4": "0.0.0.0", + "com.docker.network.bridge.name": "docker0", + "com.docker.network.driver.mtu": "1500" + }, + "Labels": { + "com.example.some-label": "some-value", + "com.example.some-other-label": "some-other-value" + } +} +``` + +**Example response**: + +``` +HTTP/1.1 201 Created +Content-Type: application/json + +{ + "Id": "22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30", + "Warning": "" +} +``` + +**Status codes**: + +- **201** - no error +- **404** - plugin not found +- **500** - server error + +**JSON parameters**: + +- **Name** - The new network's name. this is a mandatory field +- **CheckDuplicate** - Requests daemon to check for networks with same name. Defaults to `false`. + Since Network is primarily keyed based on a random ID and not on the name, + and network name is strictly a user-friendly alias to the network + which is uniquely identified using ID, there is no guaranteed way to check for duplicates. + This parameter CheckDuplicate is there to provide a best effort checking of any networks + which has the same name but it is not guaranteed to catch all name collisions. +- **Driver** - Name of the network driver plugin to use. Defaults to `bridge` driver +- **Internal** - Restrict external access to the network +- **IPAM** - Optional custom IP scheme for the network + - **Driver** - Name of the IPAM driver to use. Defaults to `default` driver + - **Config** - List of IPAM configuration options, specified as a map: + `{"Subnet": , "IPRange": , "Gateway": , "AuxAddress": }` + - **Options** - Driver-specific options, specified as a map: `{"option":"value" [,"option2":"value2"]}` +- **EnableIPv6** - Enable IPv6 on the network +- **Options** - Network specific options to be used by the drivers +- **Labels** - Labels to set on the network, specified as a map: `{"key":"value" [,"key2":"value2"]}` + +#### Connect a container to a network + +`POST /networks/(id or name)/connect` + +Connect a container to a network + +**Example request**: + +``` +POST /v1.23/networks/22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30/connect HTTP/1.1 +Content-Type: application/json + +{ + "Container":"3613f73ba0e4", + "EndpointConfig": { + "IPAMConfig": { + "IPv4Address":"172.24.56.89", + "IPv6Address":"2001:db8::5689" + } + } +} +``` + +**Example response**: + + HTTP/1.1 200 OK + +**Status codes**: + +- **200** - no error +- **404** - network or container is not found +- **500** - Internal Server Error + +**JSON parameters**: + +- **container** - container-id/name to be connected to the network + +#### Disconnect a container from a network + +`POST /networks/(id or name)/disconnect` + +Disconnect a container from a network + +**Example request**: + +``` +POST /v1.23/networks/22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30/disconnect HTTP/1.1 +Content-Type: application/json + +{ + "Container":"3613f73ba0e4", + "Force":false +} +``` + +**Example response**: + + HTTP/1.1 200 OK + +**Status codes**: + +- **200** - no error +- **404** - network or container not found +- **500** - Internal Server Error + +**JSON parameters**: + +- **Container** - container-id/name to be disconnected from a network +- **Force** - Force the container to disconnect from a network + +#### Remove a network + +`DELETE /networks/(id or name)` + +Instruct the driver to remove the network (`id`). + +**Example request**: + + DELETE /v1.23/networks/22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Status codes**: + +- **204** - no error +- **403** - operation not supported for pre-defined networks +- **404** - no such network +- **500** - server error + +## 3. Going further + +### 3.1 Inside `docker run` + +As an example, the `docker run` command line makes the following API calls: + +- Create the container + +- If the status code is 404, it means the image doesn't exist: + - Try to pull it. + - Then, retry to create the container. + +- Start the container. + +- If you are not in detached mode: +- Attach to the container, using `logs=1` (to have `stdout` and + `stderr` from the container's start) and `stream=1` + +- If in detached mode or only `stdin` is attached, display the container's id. + +### 3.2 Hijacking + +In this version of the API, `/attach`, uses hijacking to transport `stdin`, +`stdout`, and `stderr` on the same socket. + +To hint potential proxies about connection hijacking, Docker client sends +connection upgrade headers similarly to websocket. + + Upgrade: tcp + Connection: Upgrade + +When Docker daemon detects the `Upgrade` header, it switches its status code +from **200 OK** to **101 UPGRADED** and resends the same headers. + + +### 3.3 CORS Requests + +To set cross origin requests to the Engine API please give values to +`--api-cors-header` when running Docker in daemon mode. Set * (asterisk) allows all, +default or blank means CORS disabled + + $ dockerd -H="192.168.1.9:2375" --api-cors-header="http://foo.bar" diff --git a/vendor/github.com/moby/moby/docs/api/v1.24.md b/vendor/github.com/moby/moby/docs/api/v1.24.md new file mode 100644 index 000000000..d07ea84b3 --- /dev/null +++ b/vendor/github.com/moby/moby/docs/api/v1.24.md @@ -0,0 +1,5348 @@ +--- +title: "Engine API v1.24" +description: "API Documentation for Docker" +keywords: "API, Docker, rcli, REST, documentation" +redirect_from: +- /engine/reference/api/docker_remote_api_v1.24/ +- /reference/api/docker_remote_api_v1.24/ +--- + + + +## 1. Brief introduction + + - The daemon listens on `unix:///var/run/docker.sock` but you can + [Bind Docker to another host/port or a Unix socket](../reference/commandline/dockerd.md#bind-docker-to-another-host-port-or-a-unix-socket). + - The API tends to be REST. However, for some complex commands, like `attach` + or `pull`, the HTTP connection is hijacked to transport `stdout`, + `stdin` and `stderr`. + +## 2. Errors + +The Engine API uses standard HTTP status codes to indicate the success or failure of the API call. The body of the response will be JSON in the following format: + + { + "message": "page not found" + } + +The status codes that are returned for each endpoint are specified in the endpoint documentation below. + +## 3. Endpoints + +### 3.1 Containers + +#### List containers + +`GET /containers/json` + +List containers + +**Example request**: + + GET /v1.24/containers/json?all=1&before=8dfafdbc3a40&size=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Id": "8dfafdbc3a40", + "Names":["/boring_feynman"], + "Image": "ubuntu:latest", + "ImageID": "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82", + "Command": "echo 1", + "Created": 1367854155, + "State": "exited", + "Status": "Exit 0", + "Ports": [{"PrivatePort": 2222, "PublicPort": 3333, "Type": "tcp"}], + "Labels": { + "com.example.vendor": "Acme", + "com.example.license": "GPL", + "com.example.version": "1.0" + }, + "SizeRw": 12288, + "SizeRootFs": 0, + "HostConfig": { + "NetworkMode": "default" + }, + "NetworkSettings": { + "Networks": { + "bridge": { + "IPAMConfig": null, + "Links": null, + "Aliases": null, + "NetworkID": "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812", + "EndpointID": "2cdc4edb1ded3631c81f57966563e5c8525b81121bb3706a9a9a3ae102711f3f", + "Gateway": "172.17.0.1", + "IPAddress": "172.17.0.2", + "IPPrefixLen": 16, + "IPv6Gateway": "", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "MacAddress": "02:42:ac:11:00:02" + } + } + }, + "Mounts": [ + { + "Name": "fac362...80535", + "Source": "/data", + "Destination": "/data", + "Driver": "local", + "Mode": "ro,Z", + "RW": false, + "Propagation": "" + } + ] + }, + { + "Id": "9cd87474be90", + "Names":["/coolName"], + "Image": "ubuntu:latest", + "ImageID": "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82", + "Command": "echo 222222", + "Created": 1367854155, + "State": "exited", + "Status": "Exit 0", + "Ports": [], + "Labels": {}, + "SizeRw": 12288, + "SizeRootFs": 0, + "HostConfig": { + "NetworkMode": "default" + }, + "NetworkSettings": { + "Networks": { + "bridge": { + "IPAMConfig": null, + "Links": null, + "Aliases": null, + "NetworkID": "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812", + "EndpointID": "88eaed7b37b38c2a3f0c4bc796494fdf51b270c2d22656412a2ca5d559a64d7a", + "Gateway": "172.17.0.1", + "IPAddress": "172.17.0.8", + "IPPrefixLen": 16, + "IPv6Gateway": "", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "MacAddress": "02:42:ac:11:00:08" + } + } + }, + "Mounts": [] + }, + { + "Id": "3176a2479c92", + "Names":["/sleepy_dog"], + "Image": "ubuntu:latest", + "ImageID": "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82", + "Command": "echo 3333333333333333", + "Created": 1367854154, + "State": "exited", + "Status": "Exit 0", + "Ports":[], + "Labels": {}, + "SizeRw":12288, + "SizeRootFs":0, + "HostConfig": { + "NetworkMode": "default" + }, + "NetworkSettings": { + "Networks": { + "bridge": { + "IPAMConfig": null, + "Links": null, + "Aliases": null, + "NetworkID": "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812", + "EndpointID": "8b27c041c30326d59cd6e6f510d4f8d1d570a228466f956edf7815508f78e30d", + "Gateway": "172.17.0.1", + "IPAddress": "172.17.0.6", + "IPPrefixLen": 16, + "IPv6Gateway": "", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "MacAddress": "02:42:ac:11:00:06" + } + } + }, + "Mounts": [] + }, + { + "Id": "4cb07b47f9fb", + "Names":["/running_cat"], + "Image": "ubuntu:latest", + "ImageID": "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82", + "Command": "echo 444444444444444444444444444444444", + "Created": 1367854152, + "State": "exited", + "Status": "Exit 0", + "Ports": [], + "Labels": {}, + "SizeRw": 12288, + "SizeRootFs": 0, + "HostConfig": { + "NetworkMode": "default" + }, + "NetworkSettings": { + "Networks": { + "bridge": { + "IPAMConfig": null, + "Links": null, + "Aliases": null, + "NetworkID": "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812", + "EndpointID": "d91c7b2f0644403d7ef3095985ea0e2370325cd2332ff3a3225c4247328e66e9", + "Gateway": "172.17.0.1", + "IPAddress": "172.17.0.5", + "IPPrefixLen": 16, + "IPv6Gateway": "", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "MacAddress": "02:42:ac:11:00:05" + } + } + }, + "Mounts": [] + } + ] + +**Query parameters**: + +- **all** – 1/True/true or 0/False/false, Show all containers. + Only running containers are shown by default (i.e., this defaults to false) +- **limit** – Show `limit` last created + containers, include non-running ones. +- **since** – Show only containers created since Id, include + non-running ones. +- **before** – Show only containers created before Id, include + non-running ones. +- **size** – 1/True/true or 0/False/false, Show the containers + sizes +- **filters** - a JSON encoded value of the filters (a `map[string][]string`) to process on the containers list. Available filters: + - `exited=`; -- containers with exit code of `` ; + - `status=`(`created`|`restarting`|`running`|`paused`|`exited`|`dead`) + - `label=key` or `label="key=value"` of a container label + - `isolation=`(`default`|`process`|`hyperv`) (Windows daemon only) + - `ancestor`=(`[:]`, `` or ``) + - `before`=(`` or ``) + - `since`=(`` or ``) + - `volume`=(`` or ``) + - `network`=(`` or ``) + +**Status codes**: + +- **200** – no error +- **400** – bad parameter +- **500** – server error + +#### Create a container + +`POST /containers/create` + +Create a container + +**Example request**: + + POST /v1.24/containers/create HTTP/1.1 + Content-Type: application/json + + { + "Hostname": "", + "Domainname": "", + "User": "", + "AttachStdin": false, + "AttachStdout": true, + "AttachStderr": true, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": [ + "FOO=bar", + "BAZ=quux" + ], + "Cmd": [ + "date" + ], + "Entrypoint": "", + "Image": "ubuntu", + "Labels": { + "com.example.vendor": "Acme", + "com.example.license": "GPL", + "com.example.version": "1.0" + }, + "Volumes": { + "/volumes/data": {} + }, + "Healthcheck":{ + "Test": ["CMD-SHELL", "curl localhost:3000"], + "Interval": 1000000000, + "Timeout": 10000000000, + "Retries": 10, + "StartPeriod": 60000000000 + }, + "WorkingDir": "", + "NetworkDisabled": false, + "MacAddress": "12:34:56:78:9a:bc", + "ExposedPorts": { + "22/tcp": {} + }, + "StopSignal": "SIGTERM", + "HostConfig": { + "Binds": ["/tmp:/tmp"], + "Tmpfs": { "/run": "rw,noexec,nosuid,size=65536k" }, + "Links": ["redis3:redis"], + "Memory": 0, + "MemorySwap": 0, + "MemoryReservation": 0, + "KernelMemory": 0, + "CpuPercent": 80, + "CpuShares": 512, + "CpuPeriod": 100000, + "CpuQuota": 50000, + "CpusetCpus": "0,1", + "CpusetMems": "0,1", + "IOMaximumBandwidth": 0, + "IOMaximumIOps": 0, + "BlkioWeight": 300, + "BlkioWeightDevice": [{}], + "BlkioDeviceReadBps": [{}], + "BlkioDeviceReadIOps": [{}], + "BlkioDeviceWriteBps": [{}], + "BlkioDeviceWriteIOps": [{}], + "MemorySwappiness": 60, + "OomKillDisable": false, + "OomScoreAdj": 500, + "PidMode": "", + "PidsLimit": -1, + "PortBindings": { "22/tcp": [{ "HostPort": "11022" }] }, + "PublishAllPorts": false, + "Privileged": false, + "ReadonlyRootfs": false, + "Dns": ["8.8.8.8"], + "DnsOptions": [""], + "DnsSearch": [""], + "ExtraHosts": null, + "VolumesFrom": ["parent", "other:ro"], + "CapAdd": ["NET_ADMIN"], + "CapDrop": ["MKNOD"], + "GroupAdd": ["newgroup"], + "RestartPolicy": { "Name": "", "MaximumRetryCount": 0 }, + "NetworkMode": "bridge", + "Devices": [], + "Sysctls": { "net.ipv4.ip_forward": "1" }, + "Ulimits": [{}], + "LogConfig": { "Type": "json-file", "Config": {} }, + "SecurityOpt": [], + "StorageOpt": {}, + "CgroupParent": "", + "VolumeDriver": "", + "ShmSize": 67108864 + }, + "NetworkingConfig": { + "EndpointsConfig": { + "isolated_nw" : { + "IPAMConfig": { + "IPv4Address":"172.20.30.33", + "IPv6Address":"2001:db8:abcd::3033", + "LinkLocalIPs":["169.254.34.68", "fe80::3468"] + }, + "Links":["container_1", "container_2"], + "Aliases":["server_x", "server_y"] + } + } + } + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + { + "Id":"e90e34656806", + "Warnings":[] + } + +**JSON parameters**: + +- **Hostname** - A string value containing the hostname to use for the + container. This must be a valid RFC 1123 hostname. +- **Domainname** - A string value containing the domain name to use + for the container. +- **User** - A string value specifying the user inside the container. +- **AttachStdin** - Boolean value, attaches to `stdin`. +- **AttachStdout** - Boolean value, attaches to `stdout`. +- **AttachStderr** - Boolean value, attaches to `stderr`. +- **Tty** - Boolean value, Attach standard streams to a `tty`, including `stdin` if it is not closed. +- **OpenStdin** - Boolean value, opens `stdin`, +- **StdinOnce** - Boolean value, close `stdin` after the 1 attached client disconnects. +- **Env** - A list of environment variables in the form of `["VAR=value", ...]` +- **Labels** - Adds a map of labels to a container. To specify a map: `{"key":"value", ... }` +- **Cmd** - Command to run specified as a string or an array of strings. +- **Entrypoint** - Set the entry point for the container as a string or an array + of strings. +- **Image** - A string specifying the image name to use for the container. +- **Volumes** - An object mapping mount point paths (strings) inside the + container to empty objects. +- **Healthcheck** - A test to perform to check that the container is healthy. + - **Test** - The test to perform. Possible values are: + + `{}` inherit healthcheck from image or parent image + + `{"NONE"}` disable healthcheck + + `{"CMD", args...}` exec arguments directly + + `{"CMD-SHELL", command}` run command with system's default shell + - **Interval** - The time to wait between checks in nanoseconds. It should be 0 or at least 1000000 (1 ms). 0 means inherit. + - **Timeout** - The time to wait before considering the check to have hung. It should be 0 or at least 1000000 (1 ms). 0 means inherit. + - **Retries** - The number of consecutive failures needed to consider a container as unhealthy. 0 means inherit. + - **StartPeriod** - The time to wait for container initialization before starting health-retries countdown in nanoseconds. It should be 0 or at least 1000000 (1 ms). 0 means inherit. +- **WorkingDir** - A string specifying the working directory for commands to + run in. +- **NetworkDisabled** - Boolean value, when true disables networking for the + container +- **ExposedPorts** - An object mapping ports to an empty object in the form of: + `"ExposedPorts": { "/: {}" }` +- **StopSignal** - Signal to stop a container as a string or unsigned integer. `SIGTERM` by default. +- **HostConfig** + - **Binds** – A list of volume bindings for this container. Each volume binding is a string in one of these forms: + + `host-src:container-dest` to bind-mount a host path into the + container. Both `host-src`, and `container-dest` must be an + _absolute_ path. + + `host-src:container-dest:ro` to make the bind-mount read-only + inside the container. Both `host-src`, and `container-dest` must be + an _absolute_ path. + + `volume-name:container-dest` to bind-mount a volume managed by a + volume driver into the container. `container-dest` must be an + _absolute_ path. + + `volume-name:container-dest:ro` to mount the volume read-only + inside the container. `container-dest` must be an _absolute_ path. + - **Tmpfs** – A map of container directories which should be replaced by tmpfs mounts, and their corresponding + mount options. A JSON object in the form `{ "/run": "rw,noexec,nosuid,size=65536k" }`. + - **Links** - A list of links for the container. Each link entry should be + in the form of `container_name:alias`. + - **Memory** - Memory limit in bytes. + - **MemorySwap** - Total memory limit (memory + swap); set `-1` to enable unlimited swap. + You must use this with `memory` and make the swap value larger than `memory`. + - **MemoryReservation** - Memory soft limit in bytes. + - **KernelMemory** - Kernel memory limit in bytes. + - **CpuPercent** - An integer value containing the usable percentage of the available CPUs. (Windows daemon only) + - **CpuShares** - An integer value containing the container's CPU Shares + (ie. the relative weight vs other containers). + - **CpuPeriod** - The length of a CPU period in microseconds. + - **CpuQuota** - Microseconds of CPU time that the container can get in a CPU period. + - **CpusetCpus** - String value containing the `cgroups CpusetCpus` to use. + - **CpusetMems** - Memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only effective on NUMA systems. + - **IOMaximumBandwidth** - Maximum IO absolute rate in terms of IOps. + - **IOMaximumIOps** - Maximum IO absolute rate in terms of bytes per second. + - **BlkioWeight** - Block IO weight (relative weight) accepts a weight value between 10 and 1000. + - **BlkioWeightDevice** - Block IO weight (relative device weight) in the form of: `"BlkioWeightDevice": [{"Path": "device_path", "Weight": weight}]` + - **BlkioDeviceReadBps** - Limit read rate (bytes per second) from a device in the form of: `"BlkioDeviceReadBps": [{"Path": "device_path", "Rate": rate}]`, for example: + `"BlkioDeviceReadBps": [{"Path": "/dev/sda", "Rate": "1024"}]"` + - **BlkioDeviceWriteBps** - Limit write rate (bytes per second) to a device in the form of: `"BlkioDeviceWriteBps": [{"Path": "device_path", "Rate": rate}]`, for example: + `"BlkioDeviceWriteBps": [{"Path": "/dev/sda", "Rate": "1024"}]"` + - **BlkioDeviceReadIOps** - Limit read rate (IO per second) from a device in the form of: `"BlkioDeviceReadIOps": [{"Path": "device_path", "Rate": rate}]`, for example: + `"BlkioDeviceReadIOps": [{"Path": "/dev/sda", "Rate": "1000"}]` + - **BlkioDeviceWriteIOps** - Limit write rate (IO per second) to a device in the form of: `"BlkioDeviceWriteIOps": [{"Path": "device_path", "Rate": rate}]`, for example: + `"BlkioDeviceWriteIOps": [{"Path": "/dev/sda", "Rate": "1000"}]` + - **MemorySwappiness** - Tune a container's memory swappiness behavior. Accepts an integer between 0 and 100. + - **OomKillDisable** - Boolean value, whether to disable OOM Killer for the container or not. + - **OomScoreAdj** - An integer value containing the score given to the container in order to tune OOM killer preferences. + - **PidMode** - Set the PID (Process) Namespace mode for the container; + `"container:"`: joins another container's PID namespace + `"host"`: use the host's PID namespace inside the container + - **PidsLimit** - Tune a container's pids limit. Set -1 for unlimited. + - **PortBindings** - A map of exposed container ports and the host port they + should map to. A JSON object in the form + `{ /: [{ "HostPort": "" }] }` + Take note that `port` is specified as a string and not an integer value. + - **PublishAllPorts** - Allocates a random host port for all of a container's + exposed ports. Specified as a boolean value. + - **Privileged** - Gives the container full access to the host. Specified as + a boolean value. + - **ReadonlyRootfs** - Mount the container's root filesystem as read only. + Specified as a boolean value. + - **Dns** - A list of DNS servers for the container to use. + - **DnsOptions** - A list of DNS options + - **DnsSearch** - A list of DNS search domains + - **ExtraHosts** - A list of hostnames/IP mappings to add to the + container's `/etc/hosts` file. Specified in the form `["hostname:IP"]`. + - **VolumesFrom** - A list of volumes to inherit from another container. + Specified in the form `[:]` + - **CapAdd** - A list of kernel capabilities to add to the container. + - **Capdrop** - A list of kernel capabilities to drop from the container. + - **GroupAdd** - A list of additional groups that the container process will run as + - **RestartPolicy** – The behavior to apply when the container exits. The + value is an object with a `Name` property of either `"always"` to + always restart, `"unless-stopped"` to restart always except when + user has manually stopped the container or `"on-failure"` to restart only when the container + exit code is non-zero. If `on-failure` is used, `MaximumRetryCount` + controls the number of times to retry before giving up. + The default is not to restart. (optional) + An ever increasing delay (double the previous delay, starting at 100mS) + is added before each restart to prevent flooding the server. + - **UsernsMode** - Sets the usernamespace mode for the container when usernamespace remapping option is enabled. + supported values are: `host`. + - **NetworkMode** - Sets the networking mode for the container. Supported + standard values are: `bridge`, `host`, `none`, and `container:`. Any other value is taken + as a custom network's name to which this container should connect to. + - **Devices** - A list of devices to add to the container specified as a JSON object in the + form + `{ "PathOnHost": "/dev/deviceName", "PathInContainer": "/dev/deviceName", "CgroupPermissions": "mrw"}` + - **Ulimits** - A list of ulimits to set in the container, specified as + `{ "Name": , "Soft": , "Hard": }`, for example: + `Ulimits: { "Name": "nofile", "Soft": 1024, "Hard": 2048 }` + - **Sysctls** - A list of kernel parameters (sysctls) to set in the container, specified as + `{ : }`, for example: + `{ "net.ipv4.ip_forward": "1" }` + - **SecurityOpt**: A list of string values to customize labels for MLS + systems, such as SELinux. + - **StorageOpt**: Storage driver options per container. Options can be passed in the form + `{"size":"120G"}` + - **LogConfig** - Log configuration for the container, specified as a JSON object in the form + `{ "Type": "", "Config": {"key1": "val1"}}`. + Available types: `json-file`, `syslog`, `journald`, `gelf`, `fluentd`, `awslogs`, `splunk`, `etwlogs`, `none`. + `json-file` logging driver. + - **CgroupParent** - Path to `cgroups` under which the container's `cgroup` is created. If the path is not absolute, the path is considered to be relative to the `cgroups` path of the init process. Cgroups are created if they do not already exist. + - **VolumeDriver** - Driver that this container users to mount volumes. + - **ShmSize** - Size of `/dev/shm` in bytes. The size must be greater than 0. If omitted the system uses 64MB. + +**Query parameters**: + +- **name** – Assign the specified name to the container. Must + match `/?[a-zA-Z0-9_-]+`. + +**Status codes**: + +- **201** – no error +- **400** – bad parameter +- **404** – no such container +- **406** – impossible to attach (container not running) +- **409** – conflict +- **500** – server error + +#### Inspect a container + +`GET /containers/(id or name)/json` + +Return low-level information on the container `id` + +**Example request**: + + GET /v1.24/containers/4fa6e0f0c678/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "AppArmorProfile": "", + "Args": [ + "-c", + "exit 9" + ], + "Config": { + "AttachStderr": true, + "AttachStdin": false, + "AttachStdout": true, + "Cmd": [ + "/bin/sh", + "-c", + "exit 9" + ], + "Domainname": "", + "Entrypoint": null, + "Env": [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + ], + "ExposedPorts": null, + "Hostname": "ba033ac44011", + "Image": "ubuntu", + "Labels": { + "com.example.vendor": "Acme", + "com.example.license": "GPL", + "com.example.version": "1.0" + }, + "MacAddress": "", + "NetworkDisabled": false, + "OnBuild": null, + "OpenStdin": false, + "StdinOnce": false, + "Tty": false, + "User": "", + "Volumes": { + "/volumes/data": {} + }, + "WorkingDir": "", + "StopSignal": "SIGTERM" + }, + "Created": "2015-01-06T15:47:31.485331387Z", + "Driver": "devicemapper", + "ExecIDs": null, + "HostConfig": { + "Binds": null, + "IOMaximumBandwidth": 0, + "IOMaximumIOps": 0, + "BlkioWeight": 0, + "BlkioWeightDevice": [{}], + "BlkioDeviceReadBps": [{}], + "BlkioDeviceWriteBps": [{}], + "BlkioDeviceReadIOps": [{}], + "BlkioDeviceWriteIOps": [{}], + "CapAdd": null, + "CapDrop": null, + "ContainerIDFile": "", + "CpusetCpus": "", + "CpusetMems": "", + "CpuPercent": 80, + "CpuShares": 0, + "CpuPeriod": 100000, + "Devices": [], + "Dns": null, + "DnsOptions": null, + "DnsSearch": null, + "ExtraHosts": null, + "IpcMode": "", + "Links": null, + "LxcConf": [], + "Memory": 0, + "MemorySwap": 0, + "MemoryReservation": 0, + "KernelMemory": 0, + "OomKillDisable": false, + "OomScoreAdj": 500, + "NetworkMode": "bridge", + "PidMode": "", + "PortBindings": {}, + "Privileged": false, + "ReadonlyRootfs": false, + "PublishAllPorts": false, + "RestartPolicy": { + "MaximumRetryCount": 2, + "Name": "on-failure" + }, + "LogConfig": { + "Config": null, + "Type": "json-file" + }, + "SecurityOpt": null, + "Sysctls": { + "net.ipv4.ip_forward": "1" + }, + "StorageOpt": null, + "VolumesFrom": null, + "Ulimits": [{}], + "VolumeDriver": "", + "ShmSize": 67108864 + }, + "HostnamePath": "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hostname", + "HostsPath": "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hosts", + "LogPath": "/var/lib/docker/containers/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b-json.log", + "Id": "ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39", + "Image": "04c5d3b7b0656168630d3ba35d8889bd0e9caafcaeb3004d2bfbc47e7c5d35d2", + "MountLabel": "", + "Name": "/boring_euclid", + "NetworkSettings": { + "Bridge": "", + "SandboxID": "", + "HairpinMode": false, + "LinkLocalIPv6Address": "", + "LinkLocalIPv6PrefixLen": 0, + "Ports": null, + "SandboxKey": "", + "SecondaryIPAddresses": null, + "SecondaryIPv6Addresses": null, + "EndpointID": "", + "Gateway": "", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "IPAddress": "", + "IPPrefixLen": 0, + "IPv6Gateway": "", + "MacAddress": "", + "Networks": { + "bridge": { + "NetworkID": "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812", + "EndpointID": "7587b82f0dada3656fda26588aee72630c6fab1536d36e394b2bfbcf898c971d", + "Gateway": "172.17.0.1", + "IPAddress": "172.17.0.2", + "IPPrefixLen": 16, + "IPv6Gateway": "", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "MacAddress": "02:42:ac:12:00:02" + } + } + }, + "Path": "/bin/sh", + "ProcessLabel": "", + "ResolvConfPath": "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/resolv.conf", + "RestartCount": 1, + "State": { + "Error": "", + "ExitCode": 9, + "FinishedAt": "2015-01-06T15:47:32.080254511Z", + "OOMKilled": false, + "Dead": false, + "Paused": false, + "Pid": 0, + "Restarting": false, + "Running": true, + "StartedAt": "2015-01-06T15:47:32.072697474Z", + "Status": "running" + }, + "Mounts": [ + { + "Name": "fac362...80535", + "Source": "/data", + "Destination": "/data", + "Driver": "local", + "Mode": "ro,Z", + "RW": false, + "Propagation": "" + } + ] + } + +**Example request, with size information**: + + GET /v1.24/containers/4fa6e0f0c678/json?size=1 HTTP/1.1 + +**Example response, with size information**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + .... + "SizeRw": 0, + "SizeRootFs": 972, + .... + } + +**Query parameters**: + +- **size** – 1/True/true or 0/False/false, return container size information. Default is `false`. + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### List processes running inside a container + +`GET /containers/(id or name)/top` + +List processes running inside the container `id`. On Unix systems this +is done by running the `ps` command. This endpoint is not +supported on Windows. + +**Example request**: + + GET /v1.24/containers/4fa6e0f0c678/top HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Titles" : [ + "UID", "PID", "PPID", "C", "STIME", "TTY", "TIME", "CMD" + ], + "Processes" : [ + [ + "root", "13642", "882", "0", "17:03", "pts/0", "00:00:00", "/bin/bash" + ], + [ + "root", "13735", "13642", "0", "17:06", "pts/0", "00:00:00", "sleep 10" + ] + ] + } + +**Example request**: + + GET /v1.24/containers/4fa6e0f0c678/top?ps_args=aux HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Titles" : [ + "USER","PID","%CPU","%MEM","VSZ","RSS","TTY","STAT","START","TIME","COMMAND" + ] + "Processes" : [ + [ + "root","13642","0.0","0.1","18172","3184","pts/0","Ss","17:03","0:00","/bin/bash" + ], + [ + "root","13895","0.0","0.0","4348","692","pts/0","S+","17:15","0:00","sleep 10" + ] + ], + } + +**Query parameters**: + +- **ps_args** – `ps` arguments to use (e.g., `aux`), defaults to `-ef` + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Get container logs + +`GET /containers/(id or name)/logs` + +Get `stdout` and `stderr` logs from the container ``id`` + +> **Note**: +> This endpoint works only for containers with the `json-file` or `journald` logging drivers. + +**Example request**: + + GET /v1.24/containers/4fa6e0f0c678/logs?stderr=1&stdout=1×tamps=1&follow=1&tail=10&since=1428990821 HTTP/1.1 + +**Example response**: + + HTTP/1.1 101 UPGRADED + Content-Type: application/vnd.docker.raw-stream + Connection: Upgrade + Upgrade: tcp + + {% raw %} + {{ STREAM }} + {% endraw %} + +**Query parameters**: + +- **details** - 1/True/true or 0/False/false, Show extra details provided to logs. Default `false`. +- **follow** – 1/True/true or 0/False/false, return stream. Default `false`. +- **stdout** – 1/True/true or 0/False/false, show `stdout` log. Default `false`. +- **stderr** – 1/True/true or 0/False/false, show `stderr` log. Default `false`. +- **since** – UNIX timestamp (integer) to filter logs. Specifying a timestamp + will only output log-entries since that timestamp. Default: 0 (unfiltered) +- **timestamps** – 1/True/true or 0/False/false, print timestamps for + every log line. Default `false`. +- **tail** – Output specified number of lines at the end of logs: `all` or ``. Default all. + +**Status codes**: + +- **101** – no error, hints proxy about hijacking +- **200** – no error, no upgrade header found +- **404** – no such container +- **500** – server error + +#### Inspect changes on a container's filesystem + +`GET /containers/(id or name)/changes` + +Inspect changes on container `id`'s filesystem + +**Example request**: + + GET /v1.24/containers/4fa6e0f0c678/changes HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Path": "/dev", + "Kind": 0 + }, + { + "Path": "/dev/kmsg", + "Kind": 1 + }, + { + "Path": "/test", + "Kind": 1 + } + ] + +Values for `Kind`: + +- `0`: Modify +- `1`: Add +- `2`: Delete + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Export a container + +`GET /containers/(id or name)/export` + +Export the contents of container `id` + +**Example request**: + + GET /v1.24/containers/4fa6e0f0c678/export HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/octet-stream + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Get container stats based on resource usage + +`GET /containers/(id or name)/stats` + +This endpoint returns a live stream of a container's resource usage statistics. + +**Example request**: + + GET /v1.24/containers/redis1/stats HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "read" : "2015-01-08T22:57:31.547920715Z", + "pids_stats": { + "current": 3 + }, + "networks": { + "eth0": { + "rx_bytes": 5338, + "rx_dropped": 0, + "rx_errors": 0, + "rx_packets": 36, + "tx_bytes": 648, + "tx_dropped": 0, + "tx_errors": 0, + "tx_packets": 8 + }, + "eth5": { + "rx_bytes": 4641, + "rx_dropped": 0, + "rx_errors": 0, + "rx_packets": 26, + "tx_bytes": 690, + "tx_dropped": 0, + "tx_errors": 0, + "tx_packets": 9 + } + }, + "memory_stats" : { + "stats" : { + "total_pgmajfault" : 0, + "cache" : 0, + "mapped_file" : 0, + "total_inactive_file" : 0, + "pgpgout" : 414, + "rss" : 6537216, + "total_mapped_file" : 0, + "writeback" : 0, + "unevictable" : 0, + "pgpgin" : 477, + "total_unevictable" : 0, + "pgmajfault" : 0, + "total_rss" : 6537216, + "total_rss_huge" : 6291456, + "total_writeback" : 0, + "total_inactive_anon" : 0, + "rss_huge" : 6291456, + "hierarchical_memory_limit" : 67108864, + "total_pgfault" : 964, + "total_active_file" : 0, + "active_anon" : 6537216, + "total_active_anon" : 6537216, + "total_pgpgout" : 414, + "total_cache" : 0, + "inactive_anon" : 0, + "active_file" : 0, + "pgfault" : 964, + "inactive_file" : 0, + "total_pgpgin" : 477 + }, + "max_usage" : 6651904, + "usage" : 6537216, + "failcnt" : 0, + "limit" : 67108864 + }, + "blkio_stats" : {}, + "cpu_stats" : { + "cpu_usage" : { + "percpu_usage" : [ + 8646879, + 24472255, + 36438778, + 30657443 + ], + "usage_in_usermode" : 50000000, + "total_usage" : 100215355, + "usage_in_kernelmode" : 30000000 + }, + "system_cpu_usage" : 739306590000000, + "throttling_data" : {"periods":0,"throttled_periods":0,"throttled_time":0} + }, + "precpu_stats" : { + "cpu_usage" : { + "percpu_usage" : [ + 8646879, + 24350896, + 36438778, + 30657443 + ], + "usage_in_usermode" : 50000000, + "total_usage" : 100093996, + "usage_in_kernelmode" : 30000000 + }, + "system_cpu_usage" : 9492140000000, + "throttling_data" : {"periods":0,"throttled_periods":0,"throttled_time":0} + } + } + +The `precpu_stats` is the cpu statistic of last read, which is used for calculating the cpu usage percent. It is not the exact copy of the `cpu_stats` field. + +**Query parameters**: + +- **stream** – 1/True/true or 0/False/false, pull stats once then disconnect. Default `true`. + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Resize a container TTY + +`POST /containers/(id or name)/resize` + +Resize the TTY for container with `id`. The unit is number of characters. You must restart the container for the resize to take effect. + +**Example request**: + + POST /v1.24/containers/4fa6e0f0c678/resize?h=40&w=80 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Length: 0 + Content-Type: text/plain; charset=utf-8 + +**Query parameters**: + +- **h** – height of `tty` session +- **w** – width + +**Status codes**: + +- **200** – no error +- **404** – No such container +- **500** – Cannot resize container + +#### Start a container + +`POST /containers/(id or name)/start` + +Start the container `id` + +**Example request**: + + POST /v1.24/containers/e90e34656806/start HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **detachKeys** – Override the key sequence for detaching a + container. Format is a single character `[a-Z]` or `ctrl-` + where `` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`. + +**Status codes**: + +- **204** – no error +- **304** – container already started +- **404** – no such container +- **500** – server error + +#### Stop a container + +`POST /containers/(id or name)/stop` + +Stop the container `id` + +**Example request**: + + POST /v1.24/containers/e90e34656806/stop?t=5 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **t** – number of seconds to wait before killing the container + +**Status codes**: + +- **204** – no error +- **304** – container already stopped +- **404** – no such container +- **500** – server error + +#### Restart a container + +`POST /containers/(id or name)/restart` + +Restart the container `id` + +**Example request**: + + POST /v1.24/containers/e90e34656806/restart?t=5 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **t** – number of seconds to wait before killing the container + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **500** – server error + +#### Kill a container + +`POST /containers/(id or name)/kill` + +Kill the container `id` + +**Example request**: + + POST /v1.24/containers/e90e34656806/kill HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **signal** - Signal to send to the container: integer or string like `SIGINT`. + When not set, `SIGKILL` is assumed and the call waits for the container to exit. + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **500** – server error + +#### Update a container + +`POST /containers/(id or name)/update` + +Update configuration of one or more containers. + +**Example request**: + + POST /v1.24/containers/e90e34656806/update HTTP/1.1 + Content-Type: application/json + + { + "BlkioWeight": 300, + "CpuShares": 512, + "CpuPeriod": 100000, + "CpuQuota": 50000, + "CpusetCpus": "0,1", + "CpusetMems": "0", + "Memory": 314572800, + "MemorySwap": 514288000, + "MemoryReservation": 209715200, + "KernelMemory": 52428800, + "RestartPolicy": { + "MaximumRetryCount": 4, + "Name": "on-failure" + } + } + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Warnings": [] + } + +**Status codes**: + +- **200** – no error +- **400** – bad parameter +- **404** – no such container +- **500** – server error + +#### Rename a container + +`POST /containers/(id or name)/rename` + +Rename the container `id` to a `new_name` + +**Example request**: + + POST /v1.24/containers/e90e34656806/rename?name=new_name HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **name** – new name for the container + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **409** - conflict name already assigned +- **500** – server error + +#### Pause a container + +`POST /containers/(id or name)/pause` + +Pause the container `id` + +**Example request**: + + POST /v1.24/containers/e90e34656806/pause HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **500** – server error + +#### Unpause a container + +`POST /containers/(id or name)/unpause` + +Unpause the container `id` + +**Example request**: + + POST /v1.24/containers/e90e34656806/unpause HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **500** – server error + +#### Attach to a container + +`POST /containers/(id or name)/attach` + +Attach to the container `id` + +**Example request**: + + POST /v1.24/containers/16253994b7c4/attach?logs=1&stream=0&stdout=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 101 UPGRADED + Content-Type: application/vnd.docker.raw-stream + Connection: Upgrade + Upgrade: tcp + + {% raw %} + {{ STREAM }} + {% endraw %} + +**Query parameters**: + +- **detachKeys** – Override the key sequence for detaching a + container. Format is a single character `[a-Z]` or `ctrl-` + where `` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`. +- **logs** – 1/True/true or 0/False/false, return logs. Default `false`. +- **stream** – 1/True/true or 0/False/false, return stream. + Default `false`. +- **stdin** – 1/True/true or 0/False/false, if `stream=true`, attach + to `stdin`. Default `false`. +- **stdout** – 1/True/true or 0/False/false, if `logs=true`, return + `stdout` log, if `stream=true`, attach to `stdout`. Default `false`. +- **stderr** – 1/True/true or 0/False/false, if `logs=true`, return + `stderr` log, if `stream=true`, attach to `stderr`. Default `false`. + +**Status codes**: + +- **101** – no error, hints proxy about hijacking +- **200** – no error, no upgrade header found +- **400** – bad parameter +- **404** – no such container +- **409** - container is paused +- **500** – server error + +**Stream details**: + +When using the TTY setting is enabled in +[`POST /containers/create` +](#create-a-container), +the stream is the raw data from the process PTY and client's `stdin`. +When the TTY is disabled, then the stream is multiplexed to separate +`stdout` and `stderr`. + +The format is a **Header** and a **Payload** (frame). + +**HEADER** + +The header contains the information which the stream writes (`stdout` or +`stderr`). It also contains the size of the associated frame encoded in the +last four bytes (`uint32`). + +It is encoded on the first eight bytes like this: + + header := [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4} + +`STREAM_TYPE` can be: + +- 0: `stdin` (is written on `stdout`) +- 1: `stdout` +- 2: `stderr` + +`SIZE1, SIZE2, SIZE3, SIZE4` are the four bytes of +the `uint32` size encoded as big endian. + +**PAYLOAD** + +The payload is the raw stream. + +**IMPLEMENTATION** + +The simplest way to implement the Attach protocol is the following: + + 1. Read eight bytes. + 2. Choose `stdout` or `stderr` depending on the first byte. + 3. Extract the frame size from the last four bytes. + 4. Read the extracted size and output it on the correct output. + 5. Goto 1. + +#### Attach to a container (websocket) + +`GET /containers/(id or name)/attach/ws` + +Attach to the container `id` via websocket + +Implements websocket protocol handshake according to [RFC 6455](http://tools.ietf.org/html/rfc6455) + +**Example request** + + GET /v1.24/containers/e90e34656806/attach/ws?logs=0&stream=1&stdin=1&stdout=1&stderr=1 HTTP/1.1 + +**Example response** + + {% raw %} + {{ STREAM }} + {% endraw %} + +**Query parameters**: + +- **detachKeys** – Override the key sequence for detaching a + container. Format is a single character `[a-Z]` or `ctrl-` + where `` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`. +- **logs** – 1/True/true or 0/False/false, return logs. Default `false`. +- **stream** – 1/True/true or 0/False/false, return stream. + Default `false`. +- **stdin** – 1/True/true or 0/False/false, if `stream=true`, attach + to `stdin`. Default `false`. +- **stdout** – 1/True/true or 0/False/false, if `logs=true`, return + `stdout` log, if `stream=true`, attach to `stdout`. Default `false`. +- **stderr** – 1/True/true or 0/False/false, if `logs=true`, return + `stderr` log, if `stream=true`, attach to `stderr`. Default `false`. + +**Status codes**: + +- **200** – no error +- **400** – bad parameter +- **404** – no such container +- **500** – server error + +#### Wait a container + +`POST /containers/(id or name)/wait` + +Block until container `id` stops, then returns the exit code + +**Example request**: + + POST /v1.24/containers/16253994b7c4/wait HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"StatusCode": 0} + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Remove a container + +`DELETE /containers/(id or name)` + +Remove the container `id` from the filesystem + +**Example request**: + + DELETE /v1.24/containers/16253994b7c4?v=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **v** – 1/True/true or 0/False/false, Remove the volumes + associated to the container. Default `false`. +- **force** - 1/True/true or 0/False/false, Kill then remove the container. + Default `false`. +- **link** - 1/True/true or 0/False/false, Remove the specified + link associated to the container. Default `false`. + +**Status codes**: + +- **204** – no error +- **400** – bad parameter +- **404** – no such container +- **409** – conflict +- **500** – server error + +#### Retrieving information about files and folders in a container + +`HEAD /containers/(id or name)/archive` + +See the description of the `X-Docker-Container-Path-Stat` header in the +following section. + +#### Get an archive of a filesystem resource in a container + +`GET /containers/(id or name)/archive` + +Get a tar archive of a resource in the filesystem of container `id`. + +**Query parameters**: + +- **path** - resource in the container's filesystem to archive. Required. + + If not an absolute path, it is relative to the container's root directory. + The resource specified by **path** must exist. To assert that the resource + is expected to be a directory, **path** should end in `/` or `/.` + (assuming a path separator of `/`). If **path** ends in `/.` then this + indicates that only the contents of the **path** directory should be + copied. A symlink is always resolved to its target. + + > **Note**: It is not possible to copy certain system files such as resources + > under `/proc`, `/sys`, `/dev`, and mounts created by the user in the + > container. + +**Example request**: + + GET /v1.24/containers/8cce319429b2/archive?path=/root HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + X-Docker-Container-Path-Stat: eyJuYW1lIjoicm9vdCIsInNpemUiOjQwOTYsIm1vZGUiOjIxNDc0ODQwOTYsIm10aW1lIjoiMjAxNC0wMi0yN1QyMDo1MToyM1oiLCJsaW5rVGFyZ2V0IjoiIn0= + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +On success, a response header `X-Docker-Container-Path-Stat` will be set to a +base64-encoded JSON object containing some filesystem header information about +the archived resource. The above example value would decode to the following +JSON object (whitespace added for readability): + +```json +{ + "name": "root", + "size": 4096, + "mode": 2147484096, + "mtime": "2014-02-27T20:51:23Z", + "linkTarget": "" +} +``` + +A `HEAD` request can also be made to this endpoint if only this information is +desired. + +**Status codes**: + +- **200** - success, returns archive of copied resource +- **400** - client error, bad parameter, details in JSON response body, one of: + - must specify path parameter (**path** cannot be empty) + - not a directory (**path** was asserted to be a directory but exists as a + file) +- **404** - client error, resource not found, one of: + – no such container (container `id` does not exist) + - no such file or directory (**path** does not exist) +- **500** - server error + +#### Extract an archive of files or folders to a directory in a container + +`PUT /containers/(id or name)/archive` + +Upload a tar archive to be extracted to a path in the filesystem of container +`id`. + +**Query parameters**: + +- **path** - path to a directory in the container + to extract the archive's contents into. Required. + + If not an absolute path, it is relative to the container's root directory. + The **path** resource must exist. +- **noOverwriteDirNonDir** - If "1", "true", or "True" then it will be an error + if unpacking the given content would cause an existing directory to be + replaced with a non-directory and vice versa. + +**Example request**: + + PUT /v1.24/containers/8cce319429b2/archive?path=/vol1 HTTP/1.1 + Content-Type: application/x-tar + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +**Example response**: + + HTTP/1.1 200 OK + +**Status codes**: + +- **200** – the content was extracted successfully +- **400** - client error, bad parameter, details in JSON response body, one of: + - must specify path parameter (**path** cannot be empty) + - not a directory (**path** should be a directory but exists as a file) + - unable to overwrite existing directory with non-directory + (if **noOverwriteDirNonDir**) + - unable to overwrite existing non-directory with directory + (if **noOverwriteDirNonDir**) +- **403** - client error, permission denied, the volume + or container rootfs is marked as read-only. +- **404** - client error, resource not found, one of: + – no such container (container `id` does not exist) + - no such file or directory (**path** resource does not exist) +- **500** – server error + +### 3.2 Images + +#### List Images + +`GET /images/json` + +**Example request**: + + GET /v1.24/images/json?all=0 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "RepoTags": [ + "ubuntu:12.04", + "ubuntu:precise", + "ubuntu:latest" + ], + "Id": "8dbd9e392a964056420e5d58ca5cc376ef18e2de93b5cc90e868a1bbc8318c1c", + "Created": 1365714795, + "Size": 131506275, + "VirtualSize": 131506275, + "Labels": {} + }, + { + "RepoTags": [ + "ubuntu:12.10", + "ubuntu:quantal" + ], + "ParentId": "27cf784147099545", + "Id": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", + "Created": 1364102658, + "Size": 24653, + "VirtualSize": 180116135, + "Labels": { + "com.example.version": "v1" + } + } + ] + +**Example request, with digest information**: + + GET /v1.24/images/json?digests=1 HTTP/1.1 + +**Example response, with digest information**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Created": 1420064636, + "Id": "4986bf8c15363d1c5d15512d5266f8777bfba4974ac56e3270e7760f6f0a8125", + "ParentId": "ea13149945cb6b1e746bf28032f02e9b5a793523481a0a18645fc77ad53c4ea2", + "RepoDigests": [ + "localhost:5000/test/busybox@sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf" + ], + "RepoTags": [ + "localhost:5000/test/busybox:latest", + "playdate:latest" + ], + "Size": 0, + "VirtualSize": 2429728, + "Labels": {} + } + ] + +The response shows a single image `Id` associated with two repositories +(`RepoTags`): `localhost:5000/test/busybox`: and `playdate`. A caller can use +either of the `RepoTags` values `localhost:5000/test/busybox:latest` or +`playdate:latest` to reference the image. + +You can also use `RepoDigests` values to reference an image. In this response, +the array has only one reference and that is to the +`localhost:5000/test/busybox` repository; the `playdate` repository has no +digest. You can reference this digest using the value: +`localhost:5000/test/busybox@sha256:cbbf2f9a99b47fc460d...` + +See the `docker run` and `docker build` commands for examples of digest and tag +references on the command line. + +**Query parameters**: + +- **all** – 1/True/true or 0/False/false, default false +- **filters** – a JSON encoded value of the filters (a map[string][]string) to process on the images list. Available filters: + - `dangling=true` + - `label=key` or `label="key=value"` of an image label + - `before`=(`[:]`, `` or ``) + - `since`=(`[:]`, `` or ``) +- **filter** - only return images with the specified name + +#### Build image from a Dockerfile + +`POST /build` + +Build an image from a Dockerfile + +**Example request**: + + POST /v1.24/build HTTP/1.1 + Content-Type: application/x-tar + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"stream": "Step 1/5..."} + {"stream": "..."} + {"error": "Error...", "errorDetail": {"code": 123, "message": "Error..."}} + +The input stream must be a `tar` archive compressed with one of the +following algorithms: `identity` (no compression), `gzip`, `bzip2`, `xz`. + +The archive must include a build instructions file, typically called +`Dockerfile` at the archive's root. The `dockerfile` parameter may be +used to specify a different build instructions file. To do this, its value must be +the path to the alternate build instructions file to use. + +The archive may include any number of other files, +which are accessible in the build context (See the [*ADD build +command*](../reference/builder.md#add)). + +The Docker daemon performs a preliminary validation of the `Dockerfile` before +starting the build, and returns an error if the syntax is incorrect. After that, +each instruction is run one-by-one until the ID of the new image is output. + +The build is canceled if the client drops the connection by quitting +or being killed. + +**Query parameters**: + +- **dockerfile** - Path within the build context to the `Dockerfile`. This is + ignored if `remote` is specified and points to an external `Dockerfile`. +- **t** – A name and optional tag to apply to the image in the `name:tag` format. + If you omit the `tag` the default `latest` value is assumed. + You can provide one or more `t` parameters. +- **remote** – A Git repository URI or HTTP/HTTPS context URI. If the + URI points to a single text file, the file's contents are placed into + a file called `Dockerfile` and the image is built from that file. If + the URI points to a tarball, the file is downloaded by the daemon and + the contents therein used as the context for the build. If the URI + points to a tarball and the `dockerfile` parameter is also specified, + there must be a file with the corresponding path inside the tarball. +- **q** – Suppress verbose build output. +- **nocache** – Do not use the cache when building the image. +- **pull** - Attempt to pull the image even if an older image exists locally. +- **rm** - Remove intermediate containers after a successful build (default behavior). +- **forcerm** - Always remove intermediate containers (includes `rm`). +- **memory** - Set memory limit for build. +- **memswap** - Total memory (memory + swap), `-1` to enable unlimited swap. +- **cpushares** - CPU shares (relative weight). +- **cpusetcpus** - CPUs in which to allow execution (e.g., `0-3`, `0,1`). +- **cpuperiod** - The length of a CPU period in microseconds. +- **cpuquota** - Microseconds of CPU time that the container can get in a CPU period. +- **buildargs** – JSON map of string pairs for build-time variables. Users pass + these values at build-time. Docker uses the `buildargs` as the environment + context for command(s) run via the Dockerfile's `RUN` instruction or for + variable expansion in other Dockerfile instructions. This is not meant for + passing secret values. [Read more about the buildargs instruction](../reference/builder.md#arg) +- **shmsize** - Size of `/dev/shm` in bytes. The size must be greater than 0. If omitted the system uses 64MB. +- **labels** – JSON map of string pairs for labels to set on the image. + +**Request Headers**: + +- **Content-type** – Set to `"application/x-tar"`. +- **X-Registry-Config** – A base64-url-safe-encoded Registry Auth Config JSON + object with the following structure: + + { + "docker.example.com": { + "username": "janedoe", + "password": "hunter2" + }, + "https://index.docker.io/v1/": { + "username": "mobydock", + "password": "conta1n3rize14" + } + } + + This object maps the hostname of a registry to an object containing the + "username" and "password" for that registry. Multiple registries may + be specified as the build may be based on an image requiring + authentication to pull from any arbitrary registry. Only the registry + domain name (and port if not the default "443") are required. However + (for legacy reasons) the "official" Docker, Inc. hosted registry must + be specified with both a "https://" prefix and a "/v1/" suffix even + though Docker will prefer to use the v2 registry API. + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Create an image + +`POST /images/create` + +Create an image either by pulling it from the registry or by importing it + +**Example request**: + + POST /v1.24/images/create?fromImage=busybox&tag=latest HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status": "Pulling..."} + {"status": "Pulling", "progress": "1 B/ 100 B", "progressDetail": {"current": 1, "total": 100}} + {"error": "Invalid..."} + ... + +When using this endpoint to pull an image from the registry, the +`X-Registry-Auth` header can be used to include +a base64-encoded AuthConfig object. + +**Query parameters**: + +- **fromImage** – Name of the image to pull. The name may include a tag or + digest. This parameter may only be used when pulling an image. + The pull is cancelled if the HTTP connection is closed. +- **fromSrc** – Source to import. The value may be a URL from which the image + can be retrieved or `-` to read the image from the request body. + This parameter may only be used when importing an image. +- **repo** – Repository name given to an image when it is imported. + The repo may include a tag. This parameter may only be used when importing + an image. +- **tag** – Tag or digest. If empty when pulling an image, this causes all tags + for the given image to be pulled. + +**Request Headers**: + +- **X-Registry-Auth** – base64-encoded AuthConfig object, containing either login information, or a token + - Credential based login: + + ``` + { + "username": "jdoe", + "password": "secret", + "email": "jdoe@acme.com" + } + ``` + + - Token based login: + + ``` + { + "identitytoken": "9cbaf023786cd7..." + } + ``` + +**Status codes**: + +- **200** – no error +- **404** - repository does not exist or no read access +- **500** – server error + + + +#### Inspect an image + +`GET /images/(name)/json` + +Return low-level information on the image `name` + +**Example request**: + + GET /v1.24/images/example/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Id" : "sha256:85f05633ddc1c50679be2b16a0479ab6f7637f8884e0cfe0f4d20e1ebb3d6e7c", + "Container" : "cb91e48a60d01f1e27028b4fc6819f4f290b3cf12496c8176ec714d0d390984a", + "Comment" : "", + "Os" : "linux", + "Architecture" : "amd64", + "Parent" : "sha256:91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c", + "ContainerConfig" : { + "Tty" : false, + "Hostname" : "e611e15f9c9d", + "Volumes" : null, + "Domainname" : "", + "AttachStdout" : false, + "PublishService" : "", + "AttachStdin" : false, + "OpenStdin" : false, + "StdinOnce" : false, + "NetworkDisabled" : false, + "OnBuild" : [], + "Image" : "91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c", + "User" : "", + "WorkingDir" : "", + "Entrypoint" : null, + "MacAddress" : "", + "AttachStderr" : false, + "Labels" : { + "com.example.license" : "GPL", + "com.example.version" : "1.0", + "com.example.vendor" : "Acme" + }, + "Env" : [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + ], + "ExposedPorts" : null, + "Cmd" : [ + "/bin/sh", + "-c", + "#(nop) LABEL com.example.vendor=Acme com.example.license=GPL com.example.version=1.0" + ] + }, + "DockerVersion" : "1.9.0-dev", + "VirtualSize" : 188359297, + "Size" : 0, + "Author" : "", + "Created" : "2015-09-10T08:30:53.26995814Z", + "GraphDriver" : { + "Name" : "aufs", + "Data" : null + }, + "RepoDigests" : [ + "localhost:5000/test/busybox/example@sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf" + ], + "RepoTags" : [ + "example:1.0", + "example:latest", + "example:stable" + ], + "Config" : { + "Image" : "91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c", + "NetworkDisabled" : false, + "OnBuild" : [], + "StdinOnce" : false, + "PublishService" : "", + "AttachStdin" : false, + "OpenStdin" : false, + "Domainname" : "", + "AttachStdout" : false, + "Tty" : false, + "Hostname" : "e611e15f9c9d", + "Volumes" : null, + "Cmd" : [ + "/bin/bash" + ], + "ExposedPorts" : null, + "Env" : [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + ], + "Labels" : { + "com.example.vendor" : "Acme", + "com.example.version" : "1.0", + "com.example.license" : "GPL" + }, + "Entrypoint" : null, + "MacAddress" : "", + "AttachStderr" : false, + "WorkingDir" : "", + "User" : "" + }, + "RootFS": { + "Type": "layers", + "Layers": [ + "sha256:1834950e52ce4d5a88a1bbd131c537f4d0e56d10ff0dd69e66be3b7dfa9df7e6", + "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef" + ] + } + } + +**Status codes**: + +- **200** – no error +- **404** – no such image +- **500** – server error + +#### Get the history of an image + +`GET /images/(name)/history` + +Return the history of the image `name` + +**Example request**: + + GET /v1.24/images/ubuntu/history HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Id": "3db9c44f45209632d6050b35958829c3a2aa256d81b9a7be45b362ff85c54710", + "Created": 1398108230, + "CreatedBy": "/bin/sh -c #(nop) ADD file:eb15dbd63394e063b805a3c32ca7bf0266ef64676d5a6fab4801f2e81e2a5148 in /", + "Tags": [ + "ubuntu:lucid", + "ubuntu:10.04" + ], + "Size": 182964289, + "Comment": "" + }, + { + "Id": "6cfa4d1f33fb861d4d114f43b25abd0ac737509268065cdfd69d544a59c85ab8", + "Created": 1398108222, + "CreatedBy": "/bin/sh -c #(nop) MAINTAINER Tianon Gravi - mkimage-debootstrap.sh -i iproute,iputils-ping,ubuntu-minimal -t lucid.tar.xz lucid http://archive.ubuntu.com/ubuntu/", + "Tags": null, + "Size": 0, + "Comment": "" + }, + { + "Id": "511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158", + "Created": 1371157430, + "CreatedBy": "", + "Tags": [ + "scratch12:latest", + "scratch:latest" + ], + "Size": 0, + "Comment": "Imported from -" + } + ] + +**Status codes**: + +- **200** – no error +- **404** – no such image +- **500** – server error + +#### Push an image on the registry + +`POST /images/(name)/push` + +Push the image `name` on the registry + +**Example request**: + + POST /v1.24/images/test/push HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status": "Pushing..."} + {"status": "Pushing", "progress": "1/? (n/a)", "progressDetail": {"current": 1}}} + {"error": "Invalid..."} + ... + +If you wish to push an image on to a private registry, that image must already have a tag +into a repository which references that registry `hostname` and `port`. This repository name should +then be used in the URL. This duplicates the command line's flow. + +The push is cancelled if the HTTP connection is closed. + +**Example request**: + + POST /v1.24/images/registry.acme.com:5000/test/push HTTP/1.1 + + +**Query parameters**: + +- **tag** – The tag to associate with the image on the registry. This is optional. + +**Request Headers**: + +- **X-Registry-Auth** – base64-encoded AuthConfig object, containing either login information, or a token + - Credential based login: + + ``` + { + "username": "jdoe", + "password": "secret", + "email": "jdoe@acme.com", + } + ``` + + - Identity token based login: + + ``` + { + "identitytoken": "9cbaf023786cd7..." + } + ``` + +**Status codes**: + +- **200** – no error +- **404** – no such image +- **500** – server error + +#### Tag an image into a repository + +`POST /images/(name)/tag` + +Tag the image `name` into a repository + +**Example request**: + + POST /v1.24/images/test/tag?repo=myrepo&tag=v42 HTTP/1.1 + +**Example response**: + + HTTP/1.1 201 Created + +**Query parameters**: + +- **repo** – The repository to tag in +- **tag** - The new tag name + +**Status codes**: + +- **201** – no error +- **400** – bad parameter +- **404** – no such image +- **409** – conflict +- **500** – server error + +#### Remove an image + +`DELETE /images/(name)` + +Remove the image `name` from the filesystem + +**Example request**: + + DELETE /v1.24/images/test HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-type: application/json + + [ + {"Untagged": "3e2f21a89f"}, + {"Deleted": "3e2f21a89f"}, + {"Deleted": "53b4f83ac9"} + ] + +**Query parameters**: + +- **force** – 1/True/true or 0/False/false, default false +- **noprune** – 1/True/true or 0/False/false, default false + +**Status codes**: + +- **200** – no error +- **404** – no such image +- **409** – conflict +- **500** – server error + +#### Search images + +`GET /images/search` + +Search for an image on [Docker Hub](https://hub.docker.com). + +> **Note**: +> The response keys have changed from API v1.6 to reflect the JSON +> sent by the registry server to the docker daemon's request. + +**Example request**: + + GET /v1.24/images/search?term=sshd HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "description": "", + "is_official": false, + "is_automated": false, + "name": "wma55/u1210sshd", + "star_count": 0 + }, + { + "description": "", + "is_official": false, + "is_automated": false, + "name": "jdswinbank/sshd", + "star_count": 0 + }, + { + "description": "", + "is_official": false, + "is_automated": false, + "name": "vgauthier/sshd", + "star_count": 0 + } + ... + ] + +**Query parameters**: + +- **term** – term to search +- **limit** – maximum returned search results +- **filters** – a JSON encoded value of the filters (a map[string][]string) to process on the images list. Available filters: + - `stars=` + - `is-automated=(true|false)` + - `is-official=(true|false)` + +**Status codes**: + +- **200** – no error +- **500** – server error + +### 3.3 Misc + +#### Check auth configuration + +`POST /auth` + +Validate credentials for a registry and get identity token, +if available, for accessing the registry without password. + +**Example request**: + + POST /v1.24/auth HTTP/1.1 + Content-Type: application/json + + { + "username": "hannibal", + "password": "xxxx", + "serveraddress": "https://index.docker.io/v1/" + } + +**Example response**: + + HTTP/1.1 200 OK + + { + "Status": "Login Succeeded", + "IdentityToken": "9cbaf023786cd7..." + } + +**Status codes**: + +- **200** – no error +- **204** – no error +- **500** – server error + +#### Display system-wide information + +`GET /info` + +Display system-wide information + +**Example request**: + + GET /v1.24/info HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Architecture": "x86_64", + "ClusterStore": "etcd://localhost:2379", + "CgroupDriver": "cgroupfs", + "Containers": 11, + "ContainersRunning": 7, + "ContainersStopped": 3, + "ContainersPaused": 1, + "CpuCfsPeriod": true, + "CpuCfsQuota": true, + "Debug": false, + "DockerRootDir": "/var/lib/docker", + "Driver": "btrfs", + "DriverStatus": [[""]], + "ExperimentalBuild": false, + "HttpProxy": "http://test:test@localhost:8080", + "HttpsProxy": "https://test:test@localhost:8080", + "ID": "7TRN:IPZB:QYBB:VPBQ:UMPP:KARE:6ZNR:XE6T:7EWV:PKF4:ZOJD:TPYS", + "IPv4Forwarding": true, + "Images": 16, + "IndexServerAddress": "https://index.docker.io/v1/", + "InitPath": "/usr/bin/docker", + "InitSha1": "", + "KernelMemory": true, + "KernelVersion": "3.12.0-1-amd64", + "Labels": [ + "storage=ssd" + ], + "MemTotal": 2099236864, + "MemoryLimit": true, + "NCPU": 1, + "NEventsListener": 0, + "NFd": 11, + "NGoroutines": 21, + "Name": "prod-server-42", + "NoProxy": "9.81.1.160", + "OomKillDisable": true, + "OSType": "linux", + "OperatingSystem": "Boot2Docker", + "Plugins": { + "Volume": [ + "local" + ], + "Network": [ + "null", + "host", + "bridge" + ] + }, + "RegistryConfig": { + "IndexConfigs": { + "docker.io": { + "Mirrors": null, + "Name": "docker.io", + "Official": true, + "Secure": true + } + }, + "InsecureRegistryCIDRs": [ + "127.0.0.0/8" + ] + }, + "SecurityOptions": [ + "apparmor", + "seccomp", + "selinux" + ], + "ServerVersion": "1.9.0", + "SwapLimit": false, + "SystemStatus": [["State", "Healthy"]], + "SystemTime": "2015-03-10T11:11:23.730591467-07:00" + } + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Show the docker version information + +`GET /version` + +Show the docker version information + +**Example request**: + + GET /v1.24/version HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Version": "1.12.0", + "Os": "linux", + "KernelVersion": "3.19.0-23-generic", + "GoVersion": "go1.6.3", + "GitCommit": "deadbee", + "Arch": "amd64", + "ApiVersion": "1.24", + "BuildTime": "2016-06-14T07:09:13.444803460+00:00", + "Experimental": true + } + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Ping the docker server + +`GET /_ping` + +Ping the docker server + +**Example request**: + + GET /v1.24/_ping HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: text/plain + + OK + +**Status codes**: + +- **200** - no error +- **500** - server error + +#### Create a new image from a container's changes + +`POST /commit` + +Create a new image from a container's changes + +**Example request**: + + POST /v1.24/commit?container=44c004db4b17&comment=message&repo=myrepo HTTP/1.1 + Content-Type: application/json + + { + "Hostname": "", + "Domainname": "", + "User": "", + "AttachStdin": false, + "AttachStdout": true, + "AttachStderr": true, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": null, + "Cmd": [ + "date" + ], + "Mounts": [ + { + "Source": "/data", + "Destination": "/data", + "Mode": "ro,Z", + "RW": false + } + ], + "Labels": { + "key1": "value1", + "key2": "value2" + }, + "WorkingDir": "", + "NetworkDisabled": false, + "ExposedPorts": { + "22/tcp": {} + } + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + {"Id": "596069db4bf5"} + +**JSON parameters**: + +- **config** - the container's configuration + +**Query parameters**: + +- **container** – source container +- **repo** – repository +- **tag** – tag +- **comment** – commit message +- **author** – author (e.g., "John Hannibal Smith + <[hannibal@a-team.com](mailto:hannibal%40a-team.com)>") +- **pause** – 1/True/true or 0/False/false, whether to pause the container before committing +- **changes** – Dockerfile instructions to apply while committing + +**Status codes**: + +- **201** – no error +- **404** – no such container +- **500** – server error + +#### Monitor Docker's events + +`GET /events` + +Get container events from docker, in real time via streaming. + +Docker containers report the following events: + + attach, commit, copy, create, destroy, detach, die, exec_create, exec_detach, exec_start, export, health_status, kill, oom, pause, rename, resize, restart, start, stop, top, unpause, update + +Docker images report the following events: + + delete, import, load, pull, push, save, tag, untag + +Docker volumes report the following events: + + create, mount, unmount, destroy + +Docker networks report the following events: + + create, connect, disconnect, destroy + +Docker daemon report the following event: + + reload + +**Example request**: + + GET /v1.24/events?since=1374067924 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + Server: Docker/1.12.0 (linux) + Date: Fri, 29 Apr 2016 15:18:06 GMT + Transfer-Encoding: chunked + + { + "status": "pull", + "id": "alpine:latest", + "Type": "image", + "Action": "pull", + "Actor": { + "ID": "alpine:latest", + "Attributes": { + "name": "alpine" + } + }, + "time": 1461943101, + "timeNano": 1461943101301854122 + } + { + "status": "create", + "id": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "from": "alpine", + "Type": "container", + "Action": "create", + "Actor": { + "ID": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "Attributes": { + "com.example.some-label": "some-label-value", + "image": "alpine", + "name": "my-container" + } + }, + "time": 1461943101, + "timeNano": 1461943101381709551 + } + { + "status": "attach", + "id": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "from": "alpine", + "Type": "container", + "Action": "attach", + "Actor": { + "ID": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "Attributes": { + "com.example.some-label": "some-label-value", + "image": "alpine", + "name": "my-container" + } + }, + "time": 1461943101, + "timeNano": 1461943101383858412 + } + { + "Type": "network", + "Action": "connect", + "Actor": { + "ID": "7dc8ac97d5d29ef6c31b6052f3938c1e8f2749abbd17d1bd1febf2608db1b474", + "Attributes": { + "container": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "name": "bridge", + "type": "bridge" + } + }, + "time": 1461943101, + "timeNano": 1461943101394865557 + } + { + "status": "start", + "id": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "from": "alpine", + "Type": "container", + "Action": "start", + "Actor": { + "ID": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "Attributes": { + "com.example.some-label": "some-label-value", + "image": "alpine", + "name": "my-container" + } + }, + "time": 1461943101, + "timeNano": 1461943101607533796 + } + { + "status": "resize", + "id": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "from": "alpine", + "Type": "container", + "Action": "resize", + "Actor": { + "ID": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "Attributes": { + "com.example.some-label": "some-label-value", + "height": "46", + "image": "alpine", + "name": "my-container", + "width": "204" + } + }, + "time": 1461943101, + "timeNano": 1461943101610269268 + } + { + "status": "die", + "id": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "from": "alpine", + "Type": "container", + "Action": "die", + "Actor": { + "ID": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "Attributes": { + "com.example.some-label": "some-label-value", + "exitCode": "0", + "image": "alpine", + "name": "my-container" + } + }, + "time": 1461943105, + "timeNano": 1461943105079144137 + } + { + "Type": "network", + "Action": "disconnect", + "Actor": { + "ID": "7dc8ac97d5d29ef6c31b6052f3938c1e8f2749abbd17d1bd1febf2608db1b474", + "Attributes": { + "container": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "name": "bridge", + "type": "bridge" + } + }, + "time": 1461943105, + "timeNano": 1461943105230860245 + } + { + "status": "destroy", + "id": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "from": "alpine", + "Type": "container", + "Action": "destroy", + "Actor": { + "ID": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "Attributes": { + "com.example.some-label": "some-label-value", + "image": "alpine", + "name": "my-container" + } + }, + "time": 1461943105, + "timeNano": 1461943105338056026 + } + +**Query parameters**: + +- **since** – Timestamp. Show all events created since timestamp and then stream +- **until** – Timestamp. Show events created until given timestamp and stop streaming +- **filters** – A json encoded value of the filters (a map[string][]string) to process on the event list. Available filters: + - `container=`; -- container to filter + - `event=`; -- event to filter + - `image=`; -- image to filter + - `label=`; -- image and container label to filter + - `type=`; -- either `container` or `image` or `volume` or `network` or `daemon` + - `volume=`; -- volume to filter + - `network=`; -- network to filter + - `daemon=`; -- daemon name or id to filter + +**Status codes**: + +- **200** – no error +- **400** - bad parameter +- **500** – server error + +#### Get a tarball containing all images in a repository + +`GET /images/(name)/get` + +Get a tarball containing all images and metadata for the repository specified +by `name`. + +If `name` is a specific name and tag (e.g. ubuntu:latest), then only that image +(and its parents) are returned. If `name` is an image ID, similarly only that +image (and its parents) are returned, but with the exclusion of the +'repositories' file in the tarball, as there were no image names referenced. + +See the [image tarball format](#image-tarball-format) for more details. + +**Example request** + + GET /v1.24/images/ubuntu/get + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + + Binary data stream + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Get a tarball containing all images + +`GET /images/get` + +Get a tarball containing all images and metadata for one or more repositories. + +For each value of the `names` parameter: if it is a specific name and tag (e.g. +`ubuntu:latest`), then only that image (and its parents) are returned; if it is +an image ID, similarly only that image (and its parents) are returned and there +would be no names referenced in the 'repositories' file for this image ID. + +See the [image tarball format](#image-tarball-format) for more details. + +**Example request** + + GET /v1.24/images/get?names=myname%2Fmyapp%3Alatest&names=busybox + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + + Binary data stream + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Load a tarball with a set of images and tags into docker + +`POST /images/load` + +Load a set of images and tags into a Docker repository. +See the [image tarball format](#image-tarball-format) for more details. + +**Example request** + + POST /v1.24/images/load + Content-Type: application/x-tar + + Tarball in body + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + Transfer-Encoding: chunked + + {"status":"Loading layer","progressDetail":{"current":32768,"total":1292800},"progress":"[= ] 32.77 kB/1.293 MB","id":"8ac8bfaff55a"} + {"status":"Loading layer","progressDetail":{"current":65536,"total":1292800},"progress":"[== ] 65.54 kB/1.293 MB","id":"8ac8bfaff55a"} + {"status":"Loading layer","progressDetail":{"current":98304,"total":1292800},"progress":"[=== ] 98.3 kB/1.293 MB","id":"8ac8bfaff55a"} + {"status":"Loading layer","progressDetail":{"current":131072,"total":1292800},"progress":"[===== ] 131.1 kB/1.293 MB","id":"8ac8bfaff55a"} + ... + {"stream":"Loaded image: busybox:latest\n"} + +**Example response**: + +If the "quiet" query parameter is set to `true` / `1` (`?quiet=1`), progress +details are suppressed, and only a confirmation message is returned once the +action completes. + + HTTP/1.1 200 OK + Content-Type: application/json + Transfer-Encoding: chunked + + {"stream":"Loaded image: busybox:latest\n"} + +**Query parameters**: + +- **quiet** – Boolean value, suppress progress details during load. Defaults + to `0` / `false` if omitted. + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Image tarball format + +An image tarball contains one directory per image layer (named using its long ID), +each containing these files: + +- `VERSION`: currently `1.0` - the file format version +- `json`: detailed layer information, similar to `docker inspect layer_id` +- `layer.tar`: A tarfile containing the filesystem changes in this layer + +The `layer.tar` file contains `aufs` style `.wh..wh.aufs` files and directories +for storing attribute changes and deletions. + +If the tarball defines a repository, the tarball should also include a `repositories` file at +the root that contains a list of repository and tag names mapped to layer IDs. + +``` +{"hello-world": + {"latest": "565a9d68a73f6706862bfe8409a7f659776d4d60a8d096eb4a3cbce6999cc2a1"} +} +``` + +#### Exec Create + +`POST /containers/(id or name)/exec` + +Sets up an exec instance in a running container `id` + +**Example request**: + + POST /v1.24/containers/e90e34656806/exec HTTP/1.1 + Content-Type: application/json + + { + "AttachStdin": true, + "AttachStdout": true, + "AttachStderr": true, + "Cmd": ["sh"], + "DetachKeys": "ctrl-p,ctrl-q", + "Privileged": true, + "Tty": true, + "User": "123:456" + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + { + "Id": "f90e34656806", + "Warnings":[] + } + +**JSON parameters**: + +- **AttachStdin** - Boolean value, attaches to `stdin` of the `exec` command. +- **AttachStdout** - Boolean value, attaches to `stdout` of the `exec` command. +- **AttachStderr** - Boolean value, attaches to `stderr` of the `exec` command. +- **DetachKeys** – Override the key sequence for detaching a + container. Format is a single character `[a-Z]` or `ctrl-` + where `` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`. +- **Tty** - Boolean value to allocate a pseudo-TTY. +- **Cmd** - Command to run specified as a string or an array of strings. +- **Privileged** - Boolean value, runs the exec process with extended privileges. +- **User** - A string value specifying the user, and optionally, group to run + the exec process inside the container. Format is one of: `"user"`, + `"user:group"`, `"uid"`, or `"uid:gid"`. + +**Status codes**: + +- **201** – no error +- **404** – no such container +- **409** - container is paused +- **500** - server error + +#### Exec Start + +`POST /exec/(id)/start` + +Starts a previously set up `exec` instance `id`. If `detach` is true, this API +returns after starting the `exec` command. Otherwise, this API sets up an +interactive session with the `exec` command. + +**Example request**: + + POST /v1.24/exec/e90e34656806/start HTTP/1.1 + Content-Type: application/json + + { + "Detach": false, + "Tty": false + } + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/vnd.docker.raw-stream + + {% raw %} + {{ STREAM }} + {% endraw %} + +**JSON parameters**: + +- **Detach** - Detach from the `exec` command. +- **Tty** - Boolean value to allocate a pseudo-TTY. + +**Status codes**: + +- **200** – no error +- **404** – no such exec instance +- **409** - container is paused + +**Stream details**: + +Similar to the stream behavior of `POST /containers/(id or name)/attach` API + +#### Exec Resize + +`POST /exec/(id)/resize` + +Resizes the `tty` session used by the `exec` command `id`. The unit is number of characters. +This API is valid only if `tty` was specified as part of creating and starting the `exec` command. + +**Example request**: + + POST /v1.24/exec/e90e34656806/resize?h=40&w=80 HTTP/1.1 + Content-Type: text/plain + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: text/plain + +**Query parameters**: + +- **h** – height of `tty` session +- **w** – width + +**Status codes**: + +- **201** – no error +- **404** – no such exec instance + +#### Exec Inspect + +`GET /exec/(id)/json` + +Return low-level information about the `exec` command `id`. + +**Example request**: + + GET /v1.24/exec/11fb006128e8ceb3942e7c58d77750f24210e35f879dd204ac975c184b820b39/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "CanRemove": false, + "ContainerID": "b53ee82b53a40c7dca428523e34f741f3abc51d9f297a14ff874bf761b995126", + "DetachKeys": "", + "ExitCode": 2, + "ID": "f33bbfb39f5b142420f4759b2348913bd4a8d1a6d7fd56499cb41a1bb91d7b3b", + "OpenStderr": true, + "OpenStdin": true, + "OpenStdout": true, + "ProcessConfig": { + "arguments": [ + "-c", + "exit 2" + ], + "entrypoint": "sh", + "privileged": false, + "tty": true, + "user": "1000" + }, + "Running": false + } + +**Status codes**: + +- **200** – no error +- **404** – no such exec instance +- **500** - server error + +### 3.4 Volumes + +#### List volumes + +`GET /volumes` + +**Example request**: + + GET /v1.24/volumes HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Volumes": [ + { + "Name": "tardis", + "Driver": "local", + "Mountpoint": "/var/lib/docker/volumes/tardis", + "Labels": null, + "Scope": "local" + } + ], + "Warnings": [] + } + +**Query parameters**: + +- **filters** - JSON encoded value of the filters (a `map[string][]string`) to process on the volumes list. Available filters: + - `name=` Matches all or part of a volume name. + - `dangling=` When set to `true` (or `1`), returns all volumes that are "dangling" (not in use by a container). When set to `false` (or `0`), only volumes that are in use by one or more containers are returned. + - `driver=` Matches all or part of a volume driver name. + +**Status codes**: + +- **200** - no error +- **500** - server error + +#### Create a volume + +`POST /volumes/create` + +Create a volume + +**Example request**: + + POST /v1.24/volumes/create HTTP/1.1 + Content-Type: application/json + + { + "Name": "tardis", + "Labels": { + "com.example.some-label": "some-value", + "com.example.some-other-label": "some-other-value" + }, + "Driver": "custom" + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + { + "Name": "tardis", + "Driver": "custom", + "Mountpoint": "/var/lib/docker/volumes/tardis", + "Status": { + "hello": "world" + }, + "Labels": { + "com.example.some-label": "some-value", + "com.example.some-other-label": "some-other-value" + }, + "Scope": "local" + } + +**Status codes**: + +- **201** - no error +- **500** - server error + +**JSON parameters**: + +- **Name** - The new volume's name. If not specified, Docker generates a name. +- **Driver** - Name of the volume driver to use. Defaults to `local` for the name. +- **DriverOpts** - A mapping of driver options and values. These options are + passed directly to the driver and are driver specific. +- **Labels** - Labels to set on the volume, specified as a map: `{"key":"value","key2":"value2"}` + +**JSON fields in response**: + +Refer to the [inspect a volume](#inspect-a-volume) section or details about the +JSON fields returned in the response. + +#### Inspect a volume + +`GET /volumes/(name)` + +Return low-level information on the volume `name` + +**Example request**: + + GET /v1.24/volumes/tardis + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Name": "tardis", + "Driver": "custom", + "Mountpoint": "/var/lib/docker/volumes/tardis/_data", + "Status": { + "hello": "world" + }, + "Labels": { + "com.example.some-label": "some-value", + "com.example.some-other-label": "some-other-value" + }, + "Scope": "local" + } + +**Status codes**: + +- **200** - no error +- **404** - no such volume +- **500** - server error + +**JSON fields in response**: + +The following fields can be returned in the API response. Empty fields, or +fields that are not supported by the volume's driver may be omitted in the +response. + +- **Name** - Name of the volume. +- **Driver** - Name of the volume driver used by the volume. +- **Mountpoint** - Mount path of the volume on the host. +- **Status** - Low-level details about the volume, provided by the volume driver. + Details are returned as a map with key/value pairs: `{"key":"value","key2":"value2"}`. + The `Status` field is optional, and is omitted if the volume driver does not + support this feature. +- **Labels** - Labels set on the volume, specified as a map: `{"key":"value","key2":"value2"}`. +- **Scope** - Scope describes the level at which the volume exists, can be one of + `global` for cluster-wide or `local` for machine level. The default is `local`. + +#### Remove a volume + +`DELETE /volumes/(name)` + +Instruct the driver to remove the volume (`name`). + +**Example request**: + + DELETE /v1.24/volumes/tardis HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Status codes**: + +- **204** - no error +- **404** - no such volume or volume driver +- **409** - volume is in use and cannot be removed +- **500** - server error + +### 3.5 Networks + +#### List networks + +`GET /networks` + +**Example request**: + + GET /v1.24/networks?filters={"type":{"custom":true}} HTTP/1.1 + +**Example response**: + +``` +HTTP/1.1 200 OK +Content-Type: application/json + +[ + { + "Name": "bridge", + "Id": "f2de39df4171b0dc801e8002d1d999b77256983dfc63041c0f34030aa3977566", + "Scope": "local", + "Driver": "bridge", + "EnableIPv6": false, + "Internal": false, + "IPAM": { + "Driver": "default", + "Config": [ + { + "Subnet": "172.17.0.0/16" + } + ] + }, + "Containers": { + "39b69226f9d79f5634485fb236a23b2fe4e96a0a94128390a7fbbcc167065867": { + "EndpointID": "ed2419a97c1d9954d05b46e462e7002ea552f216e9b136b80a7db8d98b442eda", + "MacAddress": "02:42:ac:11:00:02", + "IPv4Address": "172.17.0.2/16", + "IPv6Address": "" + } + }, + "Options": { + "com.docker.network.bridge.default_bridge": "true", + "com.docker.network.bridge.enable_icc": "true", + "com.docker.network.bridge.enable_ip_masquerade": "true", + "com.docker.network.bridge.host_binding_ipv4": "0.0.0.0", + "com.docker.network.bridge.name": "docker0", + "com.docker.network.driver.mtu": "1500" + } + }, + { + "Name": "none", + "Id": "e086a3893b05ab69242d3c44e49483a3bbbd3a26b46baa8f61ab797c1088d794", + "Scope": "local", + "Driver": "null", + "EnableIPv6": false, + "Internal": false, + "IPAM": { + "Driver": "default", + "Config": [] + }, + "Containers": {}, + "Options": {} + }, + { + "Name": "host", + "Id": "13e871235c677f196c4e1ecebb9dc733b9b2d2ab589e30c539efeda84a24215e", + "Scope": "local", + "Driver": "host", + "EnableIPv6": false, + "Internal": false, + "IPAM": { + "Driver": "default", + "Config": [] + }, + "Containers": {}, + "Options": {} + } +] +``` + +**Query parameters**: + +- **filters** - JSON encoded network list filter. The filter value is one of: + - `driver=` Matches a network's driver. + - `id=` Matches all or part of a network id. + - `label=` or `label==` of a network label. + - `name=` Matches all or part of a network name. + - `type=["custom"|"builtin"]` Filters networks by type. The `custom` keyword returns all user-defined networks. + +**Status codes**: + +- **200** - no error +- **500** - server error + +#### Inspect network + +`GET /networks/(id or name)` + +Return low-level information on the network `id` + +**Example request**: + + GET /v1.24/networks/7d86d31b1478e7cca9ebed7e73aa0fdeec46c5ca29497431d3007d2d9e15ed99 HTTP/1.1 + +**Example response**: + +``` +HTTP/1.1 200 OK +Content-Type: application/json + +{ + "Name": "net01", + "Id": "7d86d31b1478e7cca9ebed7e73aa0fdeec46c5ca29497431d3007d2d9e15ed99", + "Scope": "local", + "Driver": "bridge", + "EnableIPv6": false, + "IPAM": { + "Driver": "default", + "Config": [ + { + "Subnet": "172.19.0.0/16", + "Gateway": "172.19.0.1" + } + ], + "Options": { + "foo": "bar" + } + }, + "Internal": false, + "Containers": { + "19a4d5d687db25203351ed79d478946f861258f018fe384f229f2efa4b23513c": { + "Name": "test", + "EndpointID": "628cadb8bcb92de107b2a1e516cbffe463e321f548feb37697cce00ad694f21a", + "MacAddress": "02:42:ac:13:00:02", + "IPv4Address": "172.19.0.2/16", + "IPv6Address": "" + } + }, + "Options": { + "com.docker.network.bridge.default_bridge": "true", + "com.docker.network.bridge.enable_icc": "true", + "com.docker.network.bridge.enable_ip_masquerade": "true", + "com.docker.network.bridge.host_binding_ipv4": "0.0.0.0", + "com.docker.network.bridge.name": "docker0", + "com.docker.network.driver.mtu": "1500" + }, + "Labels": { + "com.example.some-label": "some-value", + "com.example.some-other-label": "some-other-value" + } +} +``` + +**Status codes**: + +- **200** - no error +- **404** - network not found +- **500** - server error + +#### Create a network + +`POST /networks/create` + +Create a network + +**Example request**: + +``` +POST /v1.24/networks/create HTTP/1.1 +Content-Type: application/json + +{ + "Name":"isolated_nw", + "CheckDuplicate":true, + "Driver":"bridge", + "EnableIPv6": true, + "IPAM":{ + "Driver": "default", + "Config":[ + { + "Subnet":"172.20.0.0/16", + "IPRange":"172.20.10.0/24", + "Gateway":"172.20.10.11" + }, + { + "Subnet":"2001:db8:abcd::/64", + "Gateway":"2001:db8:abcd::1011" + } + ], + "Options": { + "foo": "bar" + } + }, + "Internal":true, + "Options": { + "com.docker.network.bridge.default_bridge": "true", + "com.docker.network.bridge.enable_icc": "true", + "com.docker.network.bridge.enable_ip_masquerade": "true", + "com.docker.network.bridge.host_binding_ipv4": "0.0.0.0", + "com.docker.network.bridge.name": "docker0", + "com.docker.network.driver.mtu": "1500" + }, + "Labels": { + "com.example.some-label": "some-value", + "com.example.some-other-label": "some-other-value" + } +} +``` + +**Example response**: + +``` +HTTP/1.1 201 Created +Content-Type: application/json + +{ + "Id": "22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30", + "Warning": "" +} +``` + +**Status codes**: + +- **201** - no error +- **403** - operation not supported for pre-defined networks +- **404** - plugin not found +- **500** - server error + +**JSON parameters**: + +- **Name** - The new network's name. this is a mandatory field +- **CheckDuplicate** - Requests daemon to check for networks with same name. Defaults to `false`. + Since Network is primarily keyed based on a random ID and not on the name, + and network name is strictly a user-friendly alias to the network + which is uniquely identified using ID, there is no guaranteed way to check for duplicates. + This parameter CheckDuplicate is there to provide a best effort checking of any networks + which has the same name but it is not guaranteed to catch all name collisions. +- **Driver** - Name of the network driver plugin to use. Defaults to `bridge` driver +- **Internal** - Restrict external access to the network +- **IPAM** - Optional custom IP scheme for the network + - **Driver** - Name of the IPAM driver to use. Defaults to `default` driver + - **Config** - List of IPAM configuration options, specified as a map: + `{"Subnet": , "IPRange": , "Gateway": , "AuxAddress": }` + - **Options** - Driver-specific options, specified as a map: `{"option":"value" [,"option2":"value2"]}` +- **EnableIPv6** - Enable IPv6 on the network +- **Options** - Network specific options to be used by the drivers +- **Labels** - Labels to set on the network, specified as a map: `{"key":"value" [,"key2":"value2"]}` + +#### Connect a container to a network + +`POST /networks/(id or name)/connect` + +Connect a container to a network + +**Example request**: + +``` +POST /v1.24/networks/22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30/connect HTTP/1.1 +Content-Type: application/json + +{ + "Container":"3613f73ba0e4", + "EndpointConfig": { + "IPAMConfig": { + "IPv4Address":"172.24.56.89", + "IPv6Address":"2001:db8::5689" + } + } +} +``` + +**Example response**: + + HTTP/1.1 200 OK + +**Status codes**: + +- **200** - no error +- **403** - operation not supported for swarm scoped networks +- **404** - network or container is not found +- **500** - Internal Server Error + +**JSON parameters**: + +- **container** - container-id/name to be connected to the network + +#### Disconnect a container from a network + +`POST /networks/(id or name)/disconnect` + +Disconnect a container from a network + +**Example request**: + +``` +POST /v1.24/networks/22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30/disconnect HTTP/1.1 +Content-Type: application/json + +{ + "Container":"3613f73ba0e4", + "Force":false +} +``` + +**Example response**: + + HTTP/1.1 200 OK + +**Status codes**: + +- **200** - no error +- **403** - operation not supported for swarm scoped networks +- **404** - network or container not found +- **500** - Internal Server Error + +**JSON parameters**: + +- **Container** - container-id/name to be disconnected from a network +- **Force** - Force the container to disconnect from a network + +#### Remove a network + +`DELETE /networks/(id or name)` + +Instruct the driver to remove the network (`id`). + +**Example request**: + + DELETE /v1.24/networks/22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Status codes**: + +- **204** - no error +- **403** - operation not supported for pre-defined networks +- **404** - no such network +- **500** - server error + +### 3.6 Plugins (experimental) + +#### List plugins + +`GET /plugins` + +Returns information about installed plugins. + +**Example request**: + + GET /v1.24/plugins HTTP/1.1 + +**Example response**: + +``` +HTTP/1.1 200 OK +Content-Type: application/json + +[ + { + "Id": "5724e2c8652da337ab2eedd19fc6fc0ec908e4bd907c7421bf6a8dfc70c4c078", + "Name": "tiborvass/no-remove", + "Tag": "latest", + "Active": true, + "Config": { + "Mounts": [ + { + "Name": "", + "Description": "", + "Settable": null, + "Source": "/data", + "Destination": "/data", + "Type": "bind", + "Options": [ + "shared", + "rbind" + ] + }, + { + "Name": "", + "Description": "", + "Settable": null, + "Source": null, + "Destination": "/foobar", + "Type": "tmpfs", + "Options": null + } + ], + "Env": [ + "DEBUG=1" + ], + "Args": null, + "Devices": null + }, + "Manifest": { + "ManifestVersion": "v0", + "Description": "A test plugin for Docker", + "Documentation": "https://docs.docker.com/engine/extend/plugins/", + "Interface": { + "Types": [ + "docker.volumedriver/1.0" + ], + "Socket": "plugins.sock" + }, + "Entrypoint": [ + "plugin-no-remove", + "/data" + ], + "Workdir": "", + "User": { + }, + "Network": { + "Type": "host" + }, + "Capabilities": null, + "Mounts": [ + { + "Name": "", + "Description": "", + "Settable": null, + "Source": "/data", + "Destination": "/data", + "Type": "bind", + "Options": [ + "shared", + "rbind" + ] + }, + { + "Name": "", + "Description": "", + "Settable": null, + "Source": null, + "Destination": "/foobar", + "Type": "tmpfs", + "Options": null + } + ], + "Devices": [ + { + "Name": "device", + "Description": "a host device to mount", + "Settable": null, + "Path": "/dev/cpu_dma_latency" + } + ], + "Env": [ + { + "Name": "DEBUG", + "Description": "If set, prints debug messages", + "Settable": null, + "Value": "1" + } + ], + "Args": { + "Name": "args", + "Description": "command line arguments", + "Settable": null, + "Value": [ + + ] + } + } + } +] +``` + +**Status codes**: + +- **200** - no error +- **500** - server error + +#### Install a plugin + +`POST /plugins/pull?name=` + +Pulls and installs a plugin. After the plugin is installed, it can be enabled +using the [`POST /plugins/(plugin name)/enable` endpoint](#enable-a-plugin). + +**Example request**: + +``` +POST /v1.24/plugins/pull?name=tiborvass/no-remove:latest HTTP/1.1 +``` + +The `:latest` tag is optional, and is used as default if omitted. When using +this endpoint to pull a plugin from the registry, the `X-Registry-Auth` header +can be used to include a base64-encoded AuthConfig object. Refer to the [create +an image](#create-an-image) section for more details. + +**Example response**: + +``` +HTTP/1.1 200 OK +Content-Type: application/json +Content-Length: 175 + +[ + { + "Name": "network", + "Description": "", + "Value": [ + "host" + ] + }, + { + "Name": "mount", + "Description": "", + "Value": [ + "/data" + ] + }, + { + "Name": "device", + "Description": "", + "Value": [ + "/dev/cpu_dma_latency" + ] + } +] +``` + +**Query parameters**: + +- **name** - Name of the plugin to pull. The name may include a tag or digest. + This parameter is required. + +**Status codes**: + +- **200** - no error +- **500** - error parsing reference / not a valid repository/tag: repository + name must have at least one component +- **500** - plugin already exists + +#### Inspect a plugin + +`GET /plugins/(plugin name)` + +Returns detailed information about an installed plugin. + +**Example request**: + +``` +GET /v1.24/plugins/tiborvass/no-remove:latest HTTP/1.1 +``` + +The `:latest` tag is optional, and is used as default if omitted. + + +**Example response**: + +``` +HTTP/1.1 200 OK +Content-Type: application/json + +{ + "Id": "5724e2c8652da337ab2eedd19fc6fc0ec908e4bd907c7421bf6a8dfc70c4c078", + "Name": "tiborvass/no-remove", + "Tag": "latest", + "Active": false, + "Config": { + "Mounts": [ + { + "Name": "", + "Description": "", + "Settable": null, + "Source": "/data", + "Destination": "/data", + "Type": "bind", + "Options": [ + "shared", + "rbind" + ] + }, + { + "Name": "", + "Description": "", + "Settable": null, + "Source": null, + "Destination": "/foobar", + "Type": "tmpfs", + "Options": null + } + ], + "Env": [ + "DEBUG=1" + ], + "Args": null, + "Devices": null + }, + "Manifest": { + "ManifestVersion": "v0", + "Description": "A test plugin for Docker", + "Documentation": "https://docs.docker.com/engine/extend/plugins/", + "Interface": { + "Types": [ + "docker.volumedriver/1.0" + ], + "Socket": "plugins.sock" + }, + "Entrypoint": [ + "plugin-no-remove", + "/data" + ], + "Workdir": "", + "User": { + }, + "Network": { + "Type": "host" + }, + "Capabilities": null, + "Mounts": [ + { + "Name": "", + "Description": "", + "Settable": null, + "Source": "/data", + "Destination": "/data", + "Type": "bind", + "Options": [ + "shared", + "rbind" + ] + }, + { + "Name": "", + "Description": "", + "Settable": null, + "Source": null, + "Destination": "/foobar", + "Type": "tmpfs", + "Options": null + } + ], + "Devices": [ + { + "Name": "device", + "Description": "a host device to mount", + "Settable": null, + "Path": "/dev/cpu_dma_latency" + } + ], + "Env": [ + { + "Name": "DEBUG", + "Description": "If set, prints debug messages", + "Settable": null, + "Value": "1" + } + ], + "Args": { + "Name": "args", + "Description": "command line arguments", + "Settable": null, + "Value": [ + + ] + } + } +} +``` + +**Status codes**: + +- **200** - no error +- **404** - plugin not installed + +#### Enable a plugin + +`POST /plugins/(plugin name)/enable` + +Enables a plugin + +**Example request**: + +``` +POST /v1.24/plugins/tiborvass/no-remove:latest/enable HTTP/1.1 +``` + +The `:latest` tag is optional, and is used as default if omitted. + + +**Example response**: + +``` +HTTP/1.1 200 OK +Content-Length: 0 +Content-Type: text/plain; charset=utf-8 +``` + +**Status codes**: + +- **200** - no error +- **404** - plugin not installed +- **500** - plugin is already enabled + +#### Disable a plugin + +`POST /plugins/(plugin name)/disable` + +Disables a plugin + +**Example request**: + +``` +POST /v1.24/plugins/tiborvass/no-remove:latest/disable HTTP/1.1 +``` + +The `:latest` tag is optional, and is used as default if omitted. + + +**Example response**: + +``` +HTTP/1.1 200 OK +Content-Length: 0 +Content-Type: text/plain; charset=utf-8 +``` + +**Status codes**: + +- **200** - no error +- **404** - plugin not installed +- **500** - plugin is already disabled + +#### Remove a plugin + +`DELETE /plugins/(plugin name)` + +Removes a plugin + +**Example request**: + +``` +DELETE /v1.24/plugins/tiborvass/no-remove:latest HTTP/1.1 +``` + +The `:latest` tag is optional, and is used as default if omitted. + +**Example response**: + +``` +HTTP/1.1 200 OK +Content-Length: 0 +Content-Type: text/plain; charset=utf-8 +``` + +**Status codes**: + +- **200** - no error +- **404** - plugin not installed +- **500** - plugin is active + + + +### 3.7 Nodes + +**Note**: Node operations require the engine to be part of a swarm. + +#### List nodes + + +`GET /nodes` + +List nodes + +**Example request**: + + GET /v1.24/nodes HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "ID": "24ifsmvkjbyhk", + "Version": { + "Index": 8 + }, + "CreatedAt": "2016-06-07T20:31:11.853781916Z", + "UpdatedAt": "2016-06-07T20:31:11.999868824Z", + "Spec": { + "Name": "my-node", + "Role": "manager", + "Availability": "active" + "Labels": { + "foo": "bar" + } + }, + "Description": { + "Hostname": "bf3067039e47", + "Platform": { + "Architecture": "x86_64", + "OS": "linux" + }, + "Resources": { + "NanoCPUs": 4000000000, + "MemoryBytes": 8272408576 + }, + "Engine": { + "EngineVersion": "1.12.0", + "Labels": { + "foo": "bar", + } + "Plugins": [ + { + "Type": "Volume", + "Name": "local" + }, + { + "Type": "Network", + "Name": "bridge" + } + { + "Type": "Network", + "Name": "null" + } + { + "Type": "Network", + "Name": "overlay" + } + ] + } + }, + "Status": { + "State": "ready" + }, + "ManagerStatus": { + "Leader": true, + "Reachability": "reachable", + "Addr": "172.17.0.2:2377"" + } + } + ] + +**Query parameters**: + +- **filters** – a JSON encoded value of the filters (a `map[string][]string`) to process on the + nodes list. Available filters: + - `id=` + - `label=` + - `membership=`(`accepted`|`pending`)` + - `name=` + - `role=`(`manager`|`worker`)` + +**Status codes**: + +- **200** – no error +- **406** - node is not part of a swarm +- **500** – server error + +#### Inspect a node + + +`GET /nodes/(id or name)` + +Return low-level information on the node `id` + +**Example request**: + + GET /v1.24/nodes/24ifsmvkjbyhk HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "ID": "24ifsmvkjbyhk", + "Version": { + "Index": 8 + }, + "CreatedAt": "2016-06-07T20:31:11.853781916Z", + "UpdatedAt": "2016-06-07T20:31:11.999868824Z", + "Spec": { + "Name": "my-node", + "Role": "manager", + "Availability": "active" + "Labels": { + "foo": "bar" + } + }, + "Description": { + "Hostname": "bf3067039e47", + "Platform": { + "Architecture": "x86_64", + "OS": "linux" + }, + "Resources": { + "NanoCPUs": 4000000000, + "MemoryBytes": 8272408576 + }, + "Engine": { + "EngineVersion": "1.12.0", + "Labels": { + "foo": "bar", + } + "Plugins": [ + { + "Type": "Volume", + "Name": "local" + }, + { + "Type": "Network", + "Name": "bridge" + } + { + "Type": "Network", + "Name": "null" + } + { + "Type": "Network", + "Name": "overlay" + } + ] + } + }, + "Status": { + "State": "ready" + }, + "ManagerStatus": { + "Leader": true, + "Reachability": "reachable", + "Addr": "172.17.0.2:2377"" + } + } + +**Status codes**: + +- **200** – no error +- **404** – no such node +- **406** – node is not part of a swarm +- **500** – server error + +#### Remove a node + + +`DELETE /nodes/(id or name)` + +Remove a node from the swarm. + +**Example request**: + + DELETE /v1.24/nodes/24ifsmvkjbyhk HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Length: 0 + Content-Type: text/plain; charset=utf-8 + +**Query parameters**: + +- **force** - 1/True/true or 0/False/false, Force remove a node from the swarm. + Default `false`. + +**Status codes**: + +- **200** – no error +- **404** – no such node +- **406** – node is not part of a swarm +- **500** – server error + +#### Update a node + + +`POST /nodes/(id)/update` + +Update a node. + +The payload of the `POST` request is the new `NodeSpec` and +overrides the current `NodeSpec` for the specified node. + +If `Availability` or `Role` are omitted, this returns an +error. Any other field omitted resets the current value to either +an empty value or the default cluster-wide value. + +**Example Request** + + POST /v1.24/nodes/24ifsmvkjbyhk/update?version=8 HTTP/1.1 + Content-Type: application/json + + { + "Availability": "active", + "Name": "node-name", + "Role": "manager", + "Labels": { + "foo": "bar" + } + } + +**Example response**: + + HTTP/1.1 200 OK + Content-Length: 0 + Content-Type: text/plain; charset=utf-8 + +**Query parameters**: + +- **version** – The version number of the node object being updated. This is + required to avoid conflicting writes. + +JSON Parameters: + +- **Annotations** – Optional medata to associate with the node. + - **Name** – User-defined name for the node. + - **Labels** – A map of labels to associate with the node (e.g., + `{"key":"value", "key2":"value2"}`). +- **Role** - Role of the node (worker|manager). +- **Availability** - Availability of the node (active|pause|drain). + + +**Status codes**: + +- **200** – no error +- **404** – no such node +- **406** – node is not part of a swarm +- **500** – server error + +### 3.8 Swarm + +#### Inspect swarm + + +`GET /swarm` + +Inspect swarm + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "CreatedAt" : "2016-08-15T16:00:20.349727406Z", + "Spec" : { + "Dispatcher" : { + "HeartbeatPeriod" : 5000000000 + }, + "Orchestration" : { + "TaskHistoryRetentionLimit" : 10 + }, + "CAConfig" : { + "NodeCertExpiry" : 7776000000000000 + }, + "Raft" : { + "LogEntriesForSlowFollowers" : 500, + "HeartbeatTick" : 1, + "SnapshotInterval" : 10000, + "ElectionTick" : 3 + }, + "TaskDefaults" : {}, + "Name" : "default" + }, + "JoinTokens" : { + "Worker" : "SWMTKN-1-1h8aps2yszaiqmz2l3oc5392pgk8e49qhx2aj3nyv0ui0hez2a-6qmn92w6bu3jdvnglku58u11a", + "Manager" : "SWMTKN-1-1h8aps2yszaiqmz2l3oc5392pgk8e49qhx2aj3nyv0ui0hez2a-8llk83c4wm9lwioey2s316r9l" + }, + "ID" : "70ilmkj2f6sp2137c753w2nmt", + "UpdatedAt" : "2016-08-15T16:32:09.623207604Z", + "Version" : { + "Index" : 51 + } + } + +**Status codes**: + +- **200** - no error +- **406** – node is not part of a swarm +- **500** - sever error + +#### Initialize a new swarm + + +`POST /swarm/init` + +Initialize a new swarm. The body of the HTTP response includes the node ID. + +**Example request**: + + POST /v1.24/swarm/init HTTP/1.1 + Content-Type: application/json + + { + "ListenAddr": "0.0.0.0:2377", + "AdvertiseAddr": "192.168.1.1:2377", + "ForceNewCluster": false, + "Spec": { + "Orchestration": {}, + "Raft": {}, + "Dispatcher": {}, + "CAConfig": {} + } + } + +**Example response**: + + HTTP/1.1 200 OK + Content-Length: 28 + Content-Type: application/json + Date: Thu, 01 Sep 2016 21:49:13 GMT + Server: Docker/1.12.0 (linux) + + "7v2t30z9blmxuhnyo6s4cpenp" + +**Status codes**: + +- **200** – no error +- **400** – bad parameter +- **406** – node is already part of a swarm +- **500** - server error + +JSON Parameters: + +- **ListenAddr** – Listen address used for inter-manager communication, as well as determining + the networking interface used for the VXLAN Tunnel Endpoint (VTEP). This can either be an + address/port combination in the form `192.168.1.1:4567`, or an interface followed by a port + number, like `eth0:4567`. If the port number is omitted, the default swarm listening port is + used. +- **AdvertiseAddr** – Externally reachable address advertised to other nodes. This can either be + an address/port combination in the form `192.168.1.1:4567`, or an interface followed by a port + number, like `eth0:4567`. If the port number is omitted, the port number from the listen + address is used. If `AdvertiseAddr` is not specified, it will be automatically detected when + possible. +- **ForceNewCluster** – Force creation of a new swarm. +- **Spec** – Configuration settings for the new swarm. + - **Orchestration** – Configuration settings for the orchestration aspects of the swarm. + - **TaskHistoryRetentionLimit** – Maximum number of tasks history stored. + - **Raft** – Raft related configuration. + - **SnapshotInterval** – Number of logs entries between snapshot. + - **KeepOldSnapshots** – Number of snapshots to keep beyond the current snapshot. + - **LogEntriesForSlowFollowers** – Number of log entries to keep around to sync up slow + followers after a snapshot is created. + - **HeartbeatTick** – Amount of ticks (in seconds) between each heartbeat. + - **ElectionTick** – Amount of ticks (in seconds) needed without a leader to trigger a new + election. + - **Dispatcher** – Configuration settings for the task dispatcher. + - **HeartbeatPeriod** – The delay for an agent to send a heartbeat to the dispatcher. + - **CAConfig** – Certificate authority configuration. + - **NodeCertExpiry** – Automatic expiry for nodes certificates. + - **ExternalCA** - Configuration for forwarding signing requests to an external + certificate authority. + - **Protocol** - Protocol for communication with the external CA + (currently only "cfssl" is supported). + - **URL** - URL where certificate signing requests should be sent. + - **Options** - An object with key/value pairs that are interpreted + as protocol-specific options for the external CA driver. + +#### Join an existing swarm + +`POST /swarm/join` + +Join an existing swarm + +**Example request**: + + POST /v1.24/swarm/join HTTP/1.1 + Content-Type: application/json + + { + "ListenAddr": "0.0.0.0:2377", + "AdvertiseAddr": "192.168.1.1:2377", + "RemoteAddrs": ["node1:2377"], + "JoinToken": "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-7p73s1dx5in4tatdymyhg9hu2" + } + +**Example response**: + + HTTP/1.1 200 OK + Content-Length: 0 + Content-Type: text/plain; charset=utf-8 + +**Status codes**: + +- **200** – no error +- **400** – bad parameter +- **406** – node is already part of a swarm +- **500** - server error + +JSON Parameters: + +- **ListenAddr** – Listen address used for inter-manager communication if the node gets promoted to + manager, as well as determining the networking interface used for the VXLAN Tunnel Endpoint (VTEP). +- **AdvertiseAddr** – Externally reachable address advertised to other nodes. This can either be + an address/port combination in the form `192.168.1.1:4567`, or an interface followed by a port + number, like `eth0:4567`. If the port number is omitted, the port number from the listen + address is used. If `AdvertiseAddr` is not specified, it will be automatically detected when + possible. +- **RemoteAddr** – Address of any manager node already participating in the swarm. +- **JoinToken** – Secret token for joining this swarm. + +#### Leave a swarm + + +`POST /swarm/leave` + +Leave a swarm + +**Example request**: + + POST /v1.24/swarm/leave HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Length: 0 + Content-Type: text/plain; charset=utf-8 + +**Query parameters**: + +- **force** - Boolean (0/1, false/true). Force leave swarm, even if this is the last manager or that it will break the cluster. + +**Status codes**: + +- **200** – no error +- **406** – node is not part of a swarm +- **500** - server error + +#### Update a swarm + + +`POST /swarm/update` + +Update a swarm + +**Example request**: + + POST /v1.24/swarm/update HTTP/1.1 + + { + "Name": "default", + "Orchestration": { + "TaskHistoryRetentionLimit": 10 + }, + "Raft": { + "SnapshotInterval": 10000, + "LogEntriesForSlowFollowers": 500, + "HeartbeatTick": 1, + "ElectionTick": 3 + }, + "Dispatcher": { + "HeartbeatPeriod": 5000000000 + }, + "CAConfig": { + "NodeCertExpiry": 7776000000000000 + }, + "JoinTokens": { + "Worker": "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-1awxwuwd3z9j1z3puu7rcgdbx", + "Manager": "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-7p73s1dx5in4tatdymyhg9hu2" + } + } + + +**Example response**: + + HTTP/1.1 200 OK + Content-Length: 0 + Content-Type: text/plain; charset=utf-8 + +**Query parameters**: + +- **version** – The version number of the swarm object being updated. This is + required to avoid conflicting writes. +- **rotateWorkerToken** - Set to `true` (or `1`) to rotate the worker join token. +- **rotateManagerToken** - Set to `true` (or `1`) to rotate the manager join token. + +**Status codes**: + +- **200** – no error +- **400** – bad parameter +- **406** – node is not part of a swarm +- **500** - server error + +JSON Parameters: + +- **Orchestration** – Configuration settings for the orchestration aspects of the swarm. + - **TaskHistoryRetentionLimit** – Maximum number of tasks history stored. +- **Raft** – Raft related configuration. + - **SnapshotInterval** – Number of logs entries between snapshot. + - **KeepOldSnapshots** – Number of snapshots to keep beyond the current snapshot. + - **LogEntriesForSlowFollowers** – Number of log entries to keep around to sync up slow + followers after a snapshot is created. + - **HeartbeatTick** – Amount of ticks (in seconds) between each heartbeat. + - **ElectionTick** – Amount of ticks (in seconds) needed without a leader to trigger a new + election. +- **Dispatcher** – Configuration settings for the task dispatcher. + - **HeartbeatPeriod** – The delay for an agent to send a heartbeat to the dispatcher. +- **CAConfig** – CA configuration. + - **NodeCertExpiry** – Automatic expiry for nodes certificates. + - **ExternalCA** - Configuration for forwarding signing requests to an external + certificate authority. + - **Protocol** - Protocol for communication with the external CA + (currently only "cfssl" is supported). + - **URL** - URL where certificate signing requests should be sent. + - **Options** - An object with key/value pairs that are interpreted + as protocol-specific options for the external CA driver. +- **JoinTokens** - Tokens that can be used by other nodes to join the swarm. + - **Worker** - Token to use for joining as a worker. + - **Manager** - Token to use for joining as a manager. + +### 3.9 Services + +**Note**: Service operations require to first be part of a swarm. + +#### List services + + +`GET /services` + +List services + +**Example request**: + + GET /v1.24/services HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "ID": "9mnpnzenvg8p8tdbtq4wvbkcz", + "Version": { + "Index": 19 + }, + "CreatedAt": "2016-06-07T21:05:51.880065305Z", + "UpdatedAt": "2016-06-07T21:07:29.962229872Z", + "Spec": { + "Name": "hopeful_cori", + "TaskTemplate": { + "ContainerSpec": { + "Image": "redis" + }, + "Resources": { + "Limits": {}, + "Reservations": {} + }, + "RestartPolicy": { + "Condition": "any", + "MaxAttempts": 0 + }, + "Placement": { + "Constraints": [ + "node.role == worker" + ] + } + }, + "Mode": { + "Replicated": { + "Replicas": 1 + } + }, + "UpdateConfig": { + "Parallelism": 1, + "FailureAction": "pause" + }, + "EndpointSpec": { + "Mode": "vip", + "Ports": [ + { + "Protocol": "tcp", + "TargetPort": 6379, + "PublishedPort": 30001 + } + ] + } + }, + "Endpoint": { + "Spec": { + "Mode": "vip", + "Ports": [ + { + "Protocol": "tcp", + "TargetPort": 6379, + "PublishedPort": 30001 + } + ] + }, + "Ports": [ + { + "Protocol": "tcp", + "TargetPort": 6379, + "PublishedPort": 30001 + } + ], + "VirtualIPs": [ + { + "NetworkID": "4qvuz4ko70xaltuqbt8956gd1", + "Addr": "10.255.0.2/16" + }, + { + "NetworkID": "4qvuz4ko70xaltuqbt8956gd1", + "Addr": "10.255.0.3/16" + } + ] + } + } + ] + +**Query parameters**: + +- **filters** – a JSON encoded value of the filters (a `map[string][]string`) to process on the + services list. Available filters: + - `id=` + - `label=` + - `name=` + +**Status codes**: + +- **200** – no error +- **406** – node is not part of a swarm +- **500** – server error + +#### Create a service + +`POST /services/create` + +Create a service. When using this endpoint to create a service using a private +repository from the registry, the `X-Registry-Auth` header must be used to +include a base64-encoded AuthConfig object. Refer to the [create an +image](#create-an-image) section for more details. + +**Example request**: + + POST /v1.24/services/create HTTP/1.1 + Content-Type: application/json + + { + "Name": "web", + "TaskTemplate": { + "ContainerSpec": { + "Image": "nginx:alpine", + "Mounts": [ + { + "ReadOnly": true, + "Source": "web-data", + "Target": "/usr/share/nginx/html", + "Type": "volume", + "VolumeOptions": { + "DriverConfig": { + }, + "Labels": { + "com.example.something": "something-value" + } + } + } + ], + "User": "33" + }, + "Networks": [ + { + "Target": "overlay1" + } + ], + "LogDriver": { + "Name": "json-file", + "Options": { + "max-file": "3", + "max-size": "10M" + } + }, + "Placement": { + "Constraints": [ + "node.role == worker" + ] + }, + "Resources": { + "Limits": { + "MemoryBytes": 104857600 + }, + "Reservations": { + } + }, + "RestartPolicy": { + "Condition": "on-failure", + "Delay": 10000000000, + "MaxAttempts": 10 + } + }, + "Mode": { + "Replicated": { + "Replicas": 4 + } + }, + "UpdateConfig": { + "Delay": 30000000000, + "Parallelism": 2, + "FailureAction": "pause" + }, + "EndpointSpec": { + "Ports": [ + { + "Protocol": "tcp", + "PublishedPort": 8080, + "TargetPort": 80 + } + ] + }, + "Labels": { + "foo": "bar" + } + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + { + "ID":"ak7w3gjqoa3kuz8xcpnyy0pvl" + } + +**Status codes**: + +- **201** – no error +- **403** - network is not eligible for services +- **406** – node is not part of a swarm +- **409** – name conflicts with an existing object +- **500** - server error + +**JSON Parameters**: + +- **Name** – User-defined name for the service. +- **Labels** – A map of labels to associate with the service (e.g., `{"key":"value", "key2":"value2"}`). +- **TaskTemplate** – Specification of the tasks to start as part of the new service. + - **ContainerSpec** - Container settings for containers started as part of this task. + - **Image** – A string specifying the image name to use for the container. + - **Command** – The command to be run in the image. + - **Args** – Arguments to the command. + - **Env** – A list of environment variables in the form of `["VAR=value"[,"VAR2=value2"]]`. + - **Dir** – A string specifying the working directory for commands to run in. + - **User** – A string value specifying the user inside the container. + - **Labels** – A map of labels to associate with the service (e.g., + `{"key":"value", "key2":"value2"}`). + - **Mounts** – Specification for mounts to be added to containers + created as part of the service. + - **Target** – Container path. + - **Source** – Mount source (e.g. a volume name, a host path). + - **Type** – The mount type (`bind`, or `volume`). + - **ReadOnly** – A boolean indicating whether the mount should be read-only. + - **BindOptions** - Optional configuration for the `bind` type. + - **Propagation** – A propagation mode with the value `[r]private`, `[r]shared`, or `[r]slave`. + - **VolumeOptions** – Optional configuration for the `volume` type. + - **NoCopy** – A boolean indicating if volume should be + populated with the data from the target. (Default false) + - **Labels** – User-defined name and labels for the volume. + - **DriverConfig** – Map of driver-specific options. + - **Name** - Name of the driver to use to create the volume. + - **Options** - key/value map of driver specific options. + - **StopGracePeriod** – Amount of time to wait for the container to terminate before + forcefully killing it. + - **LogDriver** - Log configuration for containers created as part of the + service. + - **Name** - Name of the logging driver to use (`json-file`, `syslog`, + `journald`, `gelf`, `fluentd`, `awslogs`, `splunk`, `etwlogs`, `none`). + - **Options** - Driver-specific options. + - **Resources** – Resource requirements which apply to each individual container created as part + of the service. + - **Limits** – Define resources limits. + - **NanoCPUs** – CPU limit in units of 10-9 CPU shares. + - **MemoryBytes** – Memory limit in Bytes. + - **Reservation** – Define resources reservation. + - **NanoCPUs** – CPU reservation in units of 10-9 CPU shares. + - **MemoryBytes** – Memory reservation in Bytes. + - **RestartPolicy** – Specification for the restart policy which applies to containers created + as part of this service. + - **Condition** – Condition for restart (`none`, `on-failure`, or `any`). + - **Delay** – Delay between restart attempts. + - **MaxAttempts** – Maximum attempts to restart a given container before giving up (default value + is 0, which is ignored). + - **Window** – Windows is the time window used to evaluate the restart policy (default value is + 0, which is unbounded). + - **Placement** – Restrictions on where a service can run. + - **Constraints** – An array of constraints, e.g. `[ "node.role == manager" ]`. +- **Mode** – Scheduling mode for the service (`replicated` or `global`, defaults to `replicated`). +- **UpdateConfig** – Specification for the update strategy of the service. + - **Parallelism** – Maximum number of tasks to be updated in one iteration (0 means unlimited + parallelism). + - **Delay** – Amount of time between updates. + - **FailureAction** - Action to take if an updated task fails to run, or stops running during the + update. Values are `continue` and `pause`. +- **Networks** – Array of network names or IDs to attach the service to. +- **EndpointSpec** – Properties that can be configured to access and load balance a service. + - **Mode** – The mode of resolution to use for internal load balancing + between tasks (`vip` or `dnsrr`). Defaults to `vip` if not provided. + - **Ports** – List of exposed ports that this service is accessible on from + the outside, in the form of: + `{"Protocol": <"tcp"|"udp">, "PublishedPort": , "TargetPort": }`. + Ports can only be provided if `vip` resolution mode is used. + +**Request Headers**: + +- **Content-type** – Set to `"application/json"`. +- **X-Registry-Auth** – base64-encoded AuthConfig object, containing either + login information, or a token. Refer to the [create an image](#create-an-image) + section for more details. + + +#### Remove a service + + +`DELETE /services/(id or name)` + +Stop and remove the service `id` + +**Example request**: + + DELETE /v1.24/services/16253994b7c4 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Length: 0 + Content-Type: text/plain; charset=utf-8 + +**Status codes**: + +- **200** – no error +- **404** – no such service +- **406** - node is not part of a swarm +- **500** – server error + +#### Inspect one or more services + + +`GET /services/(id or name)` + +Return information on the service `id`. + +**Example request**: + + GET /v1.24/services/1cb4dnqcyx6m66g2t538x3rxha HTTP/1.1 + +**Example response**: + + { + "ID": "ak7w3gjqoa3kuz8xcpnyy0pvl", + "Version": { + "Index": 95 + }, + "CreatedAt": "2016-06-07T21:10:20.269723157Z", + "UpdatedAt": "2016-06-07T21:10:20.276301259Z", + "Spec": { + "Name": "redis", + "TaskTemplate": { + "ContainerSpec": { + "Image": "redis" + }, + "Resources": { + "Limits": {}, + "Reservations": {} + }, + "RestartPolicy": { + "Condition": "any", + "MaxAttempts": 0 + }, + "Placement": {} + }, + "Mode": { + "Replicated": { + "Replicas": 1 + } + }, + "UpdateConfig": { + "Parallelism": 1, + "FailureAction": "pause" + }, + "EndpointSpec": { + "Mode": "vip", + "Ports": [ + { + "Protocol": "tcp", + "TargetPort": 6379, + "PublishedPort": 30001 + } + ] + } + }, + "Endpoint": { + "Spec": { + "Mode": "vip", + "Ports": [ + { + "Protocol": "tcp", + "TargetPort": 6379, + "PublishedPort": 30001 + } + ] + }, + "Ports": [ + { + "Protocol": "tcp", + "TargetPort": 6379, + "PublishedPort": 30001 + } + ], + "VirtualIPs": [ + { + "NetworkID": "4qvuz4ko70xaltuqbt8956gd1", + "Addr": "10.255.0.4/16" + } + ] + } + } + +**Status codes**: + +- **200** – no error +- **404** – no such service +- **406** - node is not part of a swarm +- **500** – server error + +#### Update a service + +`POST /services/(id)/update` + +Update a service. When using this endpoint to create a service using a +private repository from the registry, the `X-Registry-Auth` header can be used +to update the authentication information for that is stored for the service. +The header contains a base64-encoded AuthConfig object. Refer to the [create an +image](#create-an-image) section for more details. + +**Example request**: + + POST /v1.24/services/1cb4dnqcyx6m66g2t538x3rxha/update?version=23 HTTP/1.1 + Content-Type: application/json + + { + "Name": "top", + "TaskTemplate": { + "ContainerSpec": { + "Image": "busybox", + "Args": [ + "top" + ] + }, + "Resources": { + "Limits": {}, + "Reservations": {} + }, + "RestartPolicy": { + "Condition": "any", + "MaxAttempts": 0 + }, + "Placement": {} + }, + "Mode": { + "Replicated": { + "Replicas": 1 + } + }, + "UpdateConfig": { + "Parallelism": 1 + }, + "EndpointSpec": { + "Mode": "vip" + } + } + +**Example response**: + + HTTP/1.1 200 OK + Content-Length: 0 + Content-Type: text/plain; charset=utf-8 + +**JSON Parameters**: + +- **Name** – User-defined name for the service. Note that renaming services is not supported. +- **Labels** – A map of labels to associate with the service (e.g., `{"key":"value", "key2":"value2"}`). +- **TaskTemplate** – Specification of the tasks to start as part of the new service. + - **ContainerSpec** - Container settings for containers started as part of this task. + - **Image** – A string specifying the image name to use for the container. + - **Command** – The command to be run in the image. + - **Args** – Arguments to the command. + - **Env** – A list of environment variables in the form of `["VAR=value"[,"VAR2=value2"]]`. + - **Dir** – A string specifying the working directory for commands to run in. + - **User** – A string value specifying the user inside the container. + - **Labels** – A map of labels to associate with the service (e.g., + `{"key":"value", "key2":"value2"}`). + - **Mounts** – Specification for mounts to be added to containers created as part of the new + service. + - **Target** – Container path. + - **Source** – Mount source (e.g. a volume name, a host path). + - **Type** – The mount type (`bind`, or `volume`). + - **ReadOnly** – A boolean indicating whether the mount should be read-only. + - **BindOptions** - Optional configuration for the `bind` type + - **Propagation** – A propagation mode with the value `[r]private`, `[r]shared`, or `[r]slave`. + - **VolumeOptions** – Optional configuration for the `volume` type. + - **NoCopy** – A boolean indicating if volume should be + populated with the data from the target. (Default false) + - **Labels** – User-defined name and labels for the volume. + - **DriverConfig** – Map of driver-specific options. + - **Name** - Name of the driver to use to create the volume + - **Options** - key/value map of driver specific options + - **StopGracePeriod** – Amount of time to wait for the container to terminate before + forcefully killing it. + - **Resources** – Resource requirements which apply to each individual container created as part + of the service. + - **Limits** – Define resources limits. + - **CPU** – CPU limit + - **Memory** – Memory limit + - **Reservation** – Define resources reservation. + - **CPU** – CPU reservation + - **Memory** – Memory reservation + - **RestartPolicy** – Specification for the restart policy which applies to containers created + as part of this service. + - **Condition** – Condition for restart (`none`, `on-failure`, or `any`). + - **Delay** – Delay between restart attempts. + - **MaxAttempts** – Maximum attempts to restart a given container before giving up (default value + is 0, which is ignored). + - **Window** – Windows is the time window used to evaluate the restart policy (default value is + 0, which is unbounded). + - **Placement** – Restrictions on where a service can run. + - **Constraints** – An array of constraints, e.g. `[ "node.role == manager" ]`. +- **Mode** – Scheduling mode for the service (`replicated` or `global`, defaults to `replicated`). +- **UpdateConfig** – Specification for the update strategy of the service. + - **Parallelism** – Maximum number of tasks to be updated in one iteration (0 means unlimited + parallelism). + - **Delay** – Amount of time between updates. +- **Networks** – Array of network names or IDs to attach the service to. +- **EndpointSpec** – Properties that can be configured to access and load balance a service. + - **Mode** – The mode of resolution to use for internal load balancing + between tasks (`vip` or `dnsrr`). Defaults to `vip` if not provided. + - **Ports** – List of exposed ports that this service is accessible on from + the outside, in the form of: + `{"Protocol": <"tcp"|"udp">, "PublishedPort": , "TargetPort": }`. + Ports can only be provided if `vip` resolution mode is used. + +**Query parameters**: + +- **version** – The version number of the service object being updated. This is + required to avoid conflicting writes. + +**Request Headers**: + +- **Content-type** – Set to `"application/json"`. +- **X-Registry-Auth** – base64-encoded AuthConfig object, containing either + login information, or a token. Refer to the [create an image](#create-an-image) + section for more details. + +**Status codes**: + +- **200** – no error +- **404** – no such service +- **406** - node is not part of a swarm +- **500** – server error + +### 3.10 Tasks + +**Note**: Task operations require the engine to be part of a swarm. + +#### List tasks + + +`GET /tasks` + +List tasks + +**Example request**: + + GET /v1.24/tasks HTTP/1.1 + +**Example response**: + + [ + { + "ID": "0kzzo1i0y4jz6027t0k7aezc7", + "Version": { + "Index": 71 + }, + "CreatedAt": "2016-06-07T21:07:31.171892745Z", + "UpdatedAt": "2016-06-07T21:07:31.376370513Z", + "Spec": { + "ContainerSpec": { + "Image": "redis" + }, + "Resources": { + "Limits": {}, + "Reservations": {} + }, + "RestartPolicy": { + "Condition": "any", + "MaxAttempts": 0 + }, + "Placement": {} + }, + "ServiceID": "9mnpnzenvg8p8tdbtq4wvbkcz", + "Slot": 1, + "NodeID": "60gvrl6tm78dmak4yl7srz94v", + "Status": { + "Timestamp": "2016-06-07T21:07:31.290032978Z", + "State": "running", + "Message": "started", + "ContainerStatus": { + "ContainerID": "e5d62702a1b48d01c3e02ca1e0212a250801fa8d67caca0b6f35919ebc12f035", + "PID": 677 + } + }, + "DesiredState": "running", + "NetworksAttachments": [ + { + "Network": { + "ID": "4qvuz4ko70xaltuqbt8956gd1", + "Version": { + "Index": 18 + }, + "CreatedAt": "2016-06-07T20:31:11.912919752Z", + "UpdatedAt": "2016-06-07T21:07:29.955277358Z", + "Spec": { + "Name": "ingress", + "Labels": { + "com.docker.swarm.internal": "true" + }, + "DriverConfiguration": {}, + "IPAMOptions": { + "Driver": {}, + "Configs": [ + { + "Subnet": "10.255.0.0/16", + "Gateway": "10.255.0.1" + } + ] + } + }, + "DriverState": { + "Name": "overlay", + "Options": { + "com.docker.network.driver.overlay.vxlanid_list": "256" + } + }, + "IPAMOptions": { + "Driver": { + "Name": "default" + }, + "Configs": [ + { + "Subnet": "10.255.0.0/16", + "Gateway": "10.255.0.1" + } + ] + } + }, + "Addresses": [ + "10.255.0.10/16" + ] + } + ], + }, + { + "ID": "1yljwbmlr8er2waf8orvqpwms", + "Version": { + "Index": 30 + }, + "CreatedAt": "2016-06-07T21:07:30.019104782Z", + "UpdatedAt": "2016-06-07T21:07:30.231958098Z", + "Name": "hopeful_cori", + "Spec": { + "ContainerSpec": { + "Image": "redis" + }, + "Resources": { + "Limits": {}, + "Reservations": {} + }, + "RestartPolicy": { + "Condition": "any", + "MaxAttempts": 0 + }, + "Placement": {} + }, + "ServiceID": "9mnpnzenvg8p8tdbtq4wvbkcz", + "Slot": 1, + "NodeID": "60gvrl6tm78dmak4yl7srz94v", + "Status": { + "Timestamp": "2016-06-07T21:07:30.202183143Z", + "State": "shutdown", + "Message": "shutdown", + "ContainerStatus": { + "ContainerID": "1cf8d63d18e79668b0004a4be4c6ee58cddfad2dae29506d8781581d0688a213" + } + }, + "DesiredState": "shutdown", + "NetworksAttachments": [ + { + "Network": { + "ID": "4qvuz4ko70xaltuqbt8956gd1", + "Version": { + "Index": 18 + }, + "CreatedAt": "2016-06-07T20:31:11.912919752Z", + "UpdatedAt": "2016-06-07T21:07:29.955277358Z", + "Spec": { + "Name": "ingress", + "Labels": { + "com.docker.swarm.internal": "true" + }, + "DriverConfiguration": {}, + "IPAMOptions": { + "Driver": {}, + "Configs": [ + { + "Subnet": "10.255.0.0/16", + "Gateway": "10.255.0.1" + } + ] + } + }, + "DriverState": { + "Name": "overlay", + "Options": { + "com.docker.network.driver.overlay.vxlanid_list": "256" + } + }, + "IPAMOptions": { + "Driver": { + "Name": "default" + }, + "Configs": [ + { + "Subnet": "10.255.0.0/16", + "Gateway": "10.255.0.1" + } + ] + } + }, + "Addresses": [ + "10.255.0.5/16" + ] + } + ] + } + ] + +**Query parameters**: + +- **filters** – a JSON encoded value of the filters (a `map[string][]string`) to process on the + services list. Available filters: + - `id=` + - `name=` + - `service=` + - `node=` + - `label=key` or `label="key=value"` + - `desired-state=(running | shutdown | accepted)` + +**Status codes**: + +- **200** – no error +- **406** - node is not part of a swarm +- **500** – server error + +#### Inspect a task + + +`GET /tasks/(id)` + +Get details on the task `id` + +**Example request**: + + GET /v1.24/tasks/0kzzo1i0y4jz6027t0k7aezc7 HTTP/1.1 + +**Example response**: + + { + "ID": "0kzzo1i0y4jz6027t0k7aezc7", + "Version": { + "Index": 71 + }, + "CreatedAt": "2016-06-07T21:07:31.171892745Z", + "UpdatedAt": "2016-06-07T21:07:31.376370513Z", + "Spec": { + "ContainerSpec": { + "Image": "redis" + }, + "Resources": { + "Limits": {}, + "Reservations": {} + }, + "RestartPolicy": { + "Condition": "any", + "MaxAttempts": 0 + }, + "Placement": {} + }, + "ServiceID": "9mnpnzenvg8p8tdbtq4wvbkcz", + "Slot": 1, + "NodeID": "60gvrl6tm78dmak4yl7srz94v", + "Status": { + "Timestamp": "2016-06-07T21:07:31.290032978Z", + "State": "running", + "Message": "started", + "ContainerStatus": { + "ContainerID": "e5d62702a1b48d01c3e02ca1e0212a250801fa8d67caca0b6f35919ebc12f035", + "PID": 677 + } + }, + "DesiredState": "running", + "NetworksAttachments": [ + { + "Network": { + "ID": "4qvuz4ko70xaltuqbt8956gd1", + "Version": { + "Index": 18 + }, + "CreatedAt": "2016-06-07T20:31:11.912919752Z", + "UpdatedAt": "2016-06-07T21:07:29.955277358Z", + "Spec": { + "Name": "ingress", + "Labels": { + "com.docker.swarm.internal": "true" + }, + "DriverConfiguration": {}, + "IPAMOptions": { + "Driver": {}, + "Configs": [ + { + "Subnet": "10.255.0.0/16", + "Gateway": "10.255.0.1" + } + ] + } + }, + "DriverState": { + "Name": "overlay", + "Options": { + "com.docker.network.driver.overlay.vxlanid_list": "256" + } + }, + "IPAMOptions": { + "Driver": { + "Name": "default" + }, + "Configs": [ + { + "Subnet": "10.255.0.0/16", + "Gateway": "10.255.0.1" + } + ] + } + }, + "Addresses": [ + "10.255.0.10/16" + ] + } + ] + } + +**Status codes**: + +- **200** – no error +- **404** – unknown task +- **406** - node is not part of a swarm +- **500** – server error + +## 4. Going further + +### 4.1 Inside `docker run` + +As an example, the `docker run` command line makes the following API calls: + +- Create the container + +- If the status code is 404, it means the image doesn't exist: + - Try to pull it. + - Then, retry to create the container. + +- Start the container. + +- If you are not in detached mode: +- Attach to the container, using `logs=1` (to have `stdout` and + `stderr` from the container's start) and `stream=1` + +- If in detached mode or only `stdin` is attached, display the container's id. + +### 4.2 Hijacking + +In this version of the API, `/attach`, uses hijacking to transport `stdin`, +`stdout`, and `stderr` on the same socket. + +To hint potential proxies about connection hijacking, Docker client sends +connection upgrade headers similarly to websocket. + + Upgrade: tcp + Connection: Upgrade + +When Docker daemon detects the `Upgrade` header, it switches its status code +from **200 OK** to **101 UPGRADED** and resends the same headers. + + +### 4.3 CORS Requests + +To set cross origin requests to the Engine API please give values to +`--api-cors-header` when running Docker in daemon mode. Set * (asterisk) allows all, +default or blank means CORS disabled + + $ dockerd -H="192.168.1.9:2375" --api-cors-header="http://foo.bar" diff --git a/vendor/github.com/moby/moby/docs/api/version-history.md b/vendor/github.com/moby/moby/docs/api/version-history.md new file mode 100644 index 000000000..e5d9d8d4c --- /dev/null +++ b/vendor/github.com/moby/moby/docs/api/version-history.md @@ -0,0 +1,334 @@ +--- +title: "Engine API version history" +description: "Documentation of changes that have been made to Engine API." +keywords: "API, Docker, rcli, REST, documentation" +--- + + + +## v1.31 API changes + +[Docker Engine API v1.31](https://docs.docker.com/engine/api/v1.31/) documentation + +* `DELETE /secrets/(name)` now returns status code 404 instead of 500 when the secret does not exist. +* `POST /secrets/create` now returns status code 409 instead of 500 when creating an already existing secret. +* `POST /secrets/(name)/update` now returns status code 400 instead of 500 when updating a secret's content which is not the labels. +* `POST /nodes/(name)/update` now returns status code 400 instead of 500 when demoting last node fails. +* `GET /networks/(id or name)` now takes an optional query parameter `scope` that will filter the network based on the scope (`local`, `swarm`, or `global`). +* `POST /session` is a new endpoint that can be used for running interactive long-running protocols between client and + the daemon. This endpoint is experimental and only available if the daemon is started with experimental features + enabled. +* `GET /images/(name)/get` now includes an `ImageMetadata` field which contains image metadata that is local to the engine and not part of the image config. +* `POST /services/create` now accepts a `PluginSpec` when `TaskTemplate.Runtime` is set to `plugin` +* `GET /events` now supports config events `create`, `update` and `remove` that are emitted when users create, update or remove a config +* `GET /volumes/` and `GET /volumes/{name}` now return a `CreatedAt` field, + containing the date/time the volume was created. This field is omitted if the + creation date/time for the volume is unknown. For volumes with scope "global", + this field represents the creation date/time of the local _instance_ of the + volume, which may differ from instances of the same volume on different nodes. +* `GET /system/df` now returns a `CreatedAt` field for `Volumes`. Refer to the + `/volumes/` endpoint for a description of this field. + +## v1.30 API changes + +[Docker Engine API v1.30](https://docs.docker.com/engine/api/v1.30/) documentation + +* `GET /info` now returns the list of supported logging drivers, including plugins. +* `GET /info` and `GET /swarm` now returns the cluster-wide swarm CA info if the node is in a swarm: the cluster root CA certificate, and the cluster TLS + leaf certificate issuer's subject and public key. It also displays the desired CA signing certificate, if any was provided as part of the spec. +* `POST /build/` now (when not silent) produces an `Aux` message in the JSON output stream with payload `types.BuildResult` for each image produced. The final such message will reference the image resulting from the build. +* `GET /nodes` and `GET /nodes/{id}` now returns additional information about swarm TLS info if the node is part of a swarm: the trusted root CA, and the + issuer's subject and public key. +* `GET /distribution/(name)/json` is a new endpoint that returns a JSON output stream with payload `types.DistributionInspect` for an image name. It includes a descriptor with the digest, and supported platforms retrieved from directly contacting the registry. +* `POST /swarm/update` now accepts 3 additional parameters as part of the swarm spec's CA configuration; the desired CA certificate for + the swarm, the desired CA key for the swarm (if not using an external certificate), and an optional parameter to force swarm to + generate and rotate to a new CA certificate/key pair. +* `POST /service/create` and `POST /services/(id or name)/update` now take the field `Platforms` as part of the service `Placement`, allowing to specify platforms supported by the service. +* `POST /containers/(name)/wait` now accepts a `condition` query parameter to indicate which state change condition to wait for. Also, response headers are now returned immediately to acknowledge that the server has registered a wait callback for the client. +* `POST /swarm/init` now accepts a `DataPathAddr` property to set the IP-address or network interface to use for data traffic +* `POST /swarm/join` now accepts a `DataPathAddr` property to set the IP-address or network interface to use for data traffic +* `GET /events` now supports service, node and secret events which are emmited when users create, update and remove service, node and secret +* `GET /events` now supports network remove event which is emmitted when users remove a swarm scoped network +* `GET /events` now supports a filter type `scope` in which supported value could be swarm and local + +## v1.29 API changes + +[Docker Engine API v1.29](https://docs.docker.com/engine/api/v1.29/) documentation + +* `DELETE /networks/(name)` now allows to remove the ingress network, the one used to provide the routing-mesh. +* `POST /networks/create` now supports creating the ingress network, by specifying an `Ingress` boolean field. As of now this is supported only when using the overlay network driver. +* `GET /networks/(name)` now returns an `Ingress` field showing whether the network is the ingress one. +* `GET /networks/` now supports a `scope` filter to filter networks based on the network mode (`swarm`, `global`, or `local`). +* `POST /containers/create`, `POST /service/create` and `POST /services/(id or name)/update` now takes the field `StartPeriod` as a part of the `HealthConfig` allowing for specification of a period during which the container should not be considered unhealthy even if health checks do not pass. +* `GET /services/(id)` now accepts an `insertDefaults` query-parameter to merge default values into the service inspect output. +* `POST /containers/prune`, `POST /images/prune`, `POST /volumes/prune`, and `POST /networks/prune` now support a `label` filter to filter containers, images, volumes, or networks based on the label. The format of the label filter could be `label=`/`label==` to remove those with the specified labels, or `label!=`/`label!==` to remove those without the specified labels. +* `POST /services/create` now accepts `Privileges` as part of `ContainerSpec`. Privileges currently include + `CredentialSpec` and `SELinuxContext`. + +## v1.28 API changes + +[Docker Engine API v1.28](https://docs.docker.com/engine/api/v1.28/) documentation + +* `POST /containers/create` now includes a `Consistency` field to specify the consistency level for each `Mount`, with possible values `default`, `consistent`, `cached`, or `delegated`. +* `GET /containers/create` now takes a `DeviceCgroupRules` field in `HostConfig` allowing to set custom device cgroup rules for the created container. +* Optional query parameter `verbose` for `GET /networks/(id or name)` will now list all services with all the tasks, including the non-local tasks on the given network. +* `GET /containers/(id or name)/attach/ws` now returns WebSocket in binary frame format for API version >= v1.28, and returns WebSocket in text frame format for API version< v1.28, for the purpose of backward-compatibility. +* `GET /networks` is optimised only to return list of all networks and network specific information. List of all containers attached to a specific network is removed from this API and is only available using the network specific `GET /networks/{network-id}. +* `GET /containers/json` now supports `publish` and `expose` filters to filter containers that expose or publish certain ports. +* `POST /services/create` and `POST /services/(id or name)/update` now accept the `ReadOnly` parameter, which mounts the container's root filesystem as read only. +* `POST /build` now accepts `extrahosts` parameter to specify a host to ip mapping to use during the build. +* `POST /services/create` and `POST /services/(id or name)/update` now accept a `rollback` value for `FailureAction`. +* `POST /services/create` and `POST /services/(id or name)/update` now accept an optional `RollbackConfig` object which specifies rollback options. +* `GET /services` now supports a `mode` filter to filter services based on the service mode (either `global` or `replicated`). +* `POST /containers/(name)/update` now supports updating `NanoCPUs` that represents CPU quota in units of 10-9 CPUs. + +## v1.27 API changes + +[Docker Engine API v1.27](https://docs.docker.com/engine/api/v1.27/) documentation + +* `GET /containers/(id or name)/stats` now includes an `online_cpus` field in both `precpu_stats` and `cpu_stats`. If this field is `nil` then for compatibility with older daemons the length of the corresponding `cpu_usage.percpu_usage` array should be used. + +## v1.26 API changes + +[Docker Engine API v1.26](https://docs.docker.com/engine/api/v1.26/) documentation + +* `POST /plugins/(plugin name)/upgrade` upgrade a plugin. + +## v1.25 API changes + +[Docker Engine API v1.25](https://docs.docker.com/engine/api/v1.25/) documentation + +* The API version is now required in all API calls. Instead of just requesting, for example, the URL `/containers/json`, you must now request `/v1.25/containers/json`. +* `GET /version` now returns `MinAPIVersion`. +* `POST /build` accepts `networkmode` parameter to specify network used during build. +* `GET /images/(name)/json` now returns `OsVersion` if populated +* `GET /info` now returns `Isolation`. +* `POST /containers/create` now takes `AutoRemove` in HostConfig, to enable auto-removal of the container on daemon side when the container's process exits. +* `GET /containers/json` and `GET /containers/(id or name)/json` now return `"removing"` as a value for the `State.Status` field if the container is being removed. Previously, "exited" was returned as status. +* `GET /containers/json` now accepts `removing` as a valid value for the `status` filter. +* `GET /containers/json` now supports filtering containers by `health` status. +* `DELETE /volumes/(name)` now accepts a `force` query parameter to force removal of volumes that were already removed out of band by the volume driver plugin. +* `POST /containers/create/` and `POST /containers/(name)/update` now validates restart policies. +* `POST /containers/create` now validates IPAMConfig in NetworkingConfig, and returns error for invalid IPv4 and IPv6 addresses (`--ip` and `--ip6` in `docker create/run`). +* `POST /containers/create` now takes a `Mounts` field in `HostConfig` which replaces `Binds`, `Volumes`, and `Tmpfs`. *note*: `Binds`, `Volumes`, and `Tmpfs` are still available and can be combined with `Mounts`. +* `POST /build` now performs a preliminary validation of the `Dockerfile` before starting the build, and returns an error if the syntax is incorrect. Note that this change is _unversioned_ and applied to all API versions. +* `POST /build` accepts `cachefrom` parameter to specify images used for build cache. +* `GET /networks/` endpoint now correctly returns a list of *all* networks, + instead of the default network if a trailing slash is provided, but no `name` + or `id`. +* `DELETE /containers/(name)` endpoint now returns an error of `removal of container name is already in progress` with status code of 400, when container name is in a state of removal in progress. +* `GET /containers/json` now supports a `is-task` filter to filter + containers that are tasks (part of a service in swarm mode). +* `POST /containers/create` now takes `StopTimeout` field. +* `POST /services/create` and `POST /services/(id or name)/update` now accept `Monitor` and `MaxFailureRatio` parameters, which control the response to failures during service updates. +* `POST /services/(id or name)/update` now accepts a `ForceUpdate` parameter inside the `TaskTemplate`, which causes the service to be updated even if there are no changes which would ordinarily trigger an update. +* `POST /services/create` and `POST /services/(id or name)/update` now return a `Warnings` array. +* `GET /networks/(name)` now returns field `Created` in response to show network created time. +* `POST /containers/(id or name)/exec` now accepts an `Env` field, which holds a list of environment variables to be set in the context of the command execution. +* `GET /volumes`, `GET /volumes/(name)`, and `POST /volumes/create` now return the `Options` field which holds the driver specific options to use for when creating the volume. +* `GET /exec/(id)/json` now returns `Pid`, which is the system pid for the exec'd process. +* `POST /containers/prune` prunes stopped containers. +* `POST /images/prune` prunes unused images. +* `POST /volumes/prune` prunes unused volumes. +* `POST /networks/prune` prunes unused networks. +* Every API response now includes a `Docker-Experimental` header specifying if experimental features are enabled (value can be `true` or `false`). +* Every API response now includes a `API-Version` header specifying the default API version of the server. +* The `hostConfig` option now accepts the fields `CpuRealtimePeriod` and `CpuRtRuntime` to allocate cpu runtime to rt tasks when `CONFIG_RT_GROUP_SCHED` is enabled in the kernel. +* The `SecurityOptions` field within the `GET /info` response now includes `userns` if user namespaces are enabled in the daemon. +* `GET /nodes` and `GET /node/(id or name)` now return `Addr` as part of a node's `Status`, which is the address that that node connects to the manager from. +* The `HostConfig` field now includes `NanoCPUs` that represents CPU quota in units of 10-9 CPUs. +* `GET /info` now returns more structured information about security options. +* The `HostConfig` field now includes `CpuCount` that represents the number of CPUs available for execution by the container. Windows daemon only. +* `POST /services/create` and `POST /services/(id or name)/update` now accept the `TTY` parameter, which allocate a pseudo-TTY in container. +* `POST /services/create` and `POST /services/(id or name)/update` now accept the `DNSConfig` parameter, which specifies DNS related configurations in resolver configuration file (resolv.conf) through `Nameservers`, `Search`, and `Options`. +* `GET /networks/(id or name)` now includes IP and name of all peers nodes for swarm mode overlay networks. +* `GET /plugins` list plugins. +* `POST /plugins/pull?name=` pulls a plugin. +* `GET /plugins/(plugin name)` inspect a plugin. +* `POST /plugins/(plugin name)/set` configure a plugin. +* `POST /plugins/(plugin name)/enable` enable a plugin. +* `POST /plugins/(plugin name)/disable` disable a plugin. +* `POST /plugins/(plugin name)/push` push a plugin. +* `POST /plugins/create?name=(plugin name)` create a plugin. +* `DELETE /plugins/(plugin name)` delete a plugin. +* `POST /node/(id or name)/update` now accepts both `id` or `name` to identify the node to update. +* `GET /images/json` now support a `reference` filter. +* `GET /secrets` returns information on the secrets. +* `POST /secrets/create` creates a secret. +* `DELETE /secrets/{id}` removes the secret `id`. +* `GET /secrets/{id}` returns information on the secret `id`. +* `POST /secrets/{id}/update` updates the secret `id`. +* `POST /services/(id or name)/update` now accepts service name or prefix of service id as a parameter. +* `POST /containers/create` added 2 built-in log-opts that work on all logging drivers, +`mode` (`blocking`|`non-blocking`), and `max-buffer-size` (e.g. `2m`) which enables a non-blocking log buffer. + +## v1.24 API changes + +[Docker Engine API v1.24](v1.24.md) documentation + +* `POST /containers/create` now takes `StorageOpt` field. +* `GET /info` now returns `SecurityOptions` field, showing if `apparmor`, `seccomp`, or `selinux` is supported. +* `GET /info` no longer returns the `ExecutionDriver` property. This property was no longer used after integration + with ContainerD in Docker 1.11. +* `GET /networks` now supports filtering by `label` and `driver`. +* `GET /containers/json` now supports filtering containers by `network` name or id. +* `POST /containers/create` now takes `IOMaximumBandwidth` and `IOMaximumIOps` fields. Windows daemon only. +* `POST /containers/create` now returns an HTTP 400 "bad parameter" message + if no command is specified (instead of an HTTP 500 "server error") +* `GET /images/search` now takes a `filters` query parameter. +* `GET /events` now supports a `reload` event that is emitted when the daemon configuration is reloaded. +* `GET /events` now supports filtering by daemon name or ID. +* `GET /events` now supports a `detach` event that is emitted on detaching from container process. +* `GET /events` now supports an `exec_detach ` event that is emitted on detaching from exec process. +* `GET /images/json` now supports filters `since` and `before`. +* `POST /containers/(id or name)/start` no longer accepts a `HostConfig`. +* `POST /images/(name)/tag` no longer has a `force` query parameter. +* `GET /images/search` now supports maximum returned search results `limit`. +* `POST /containers/{name:.*}/copy` is now removed and errors out starting from this API version. +* API errors are now returned as JSON instead of plain text. +* `POST /containers/create` and `POST /containers/(id)/start` allow you to configure kernel parameters (sysctls) for use in the container. +* `POST /containers//exec` and `POST /exec//start` + no longer expects a "Container" field to be present. This property was not used + and is no longer sent by the docker client. +* `POST /containers/create/` now validates the hostname (should be a valid RFC 1123 hostname). +* `POST /containers/create/` `HostConfig.PidMode` field now accepts `container:`, + to have the container join the PID namespace of an existing container. + +## v1.23 API changes + +[Docker Engine API v1.23](v1.23.md) documentation + +* `GET /containers/json` returns the state of the container, one of `created`, `restarting`, `running`, `paused`, `exited` or `dead`. +* `GET /containers/json` returns the mount points for the container. +* `GET /networks/(name)` now returns an `Internal` field showing whether the network is internal or not. +* `GET /networks/(name)` now returns an `EnableIPv6` field showing whether the network has ipv6 enabled or not. +* `POST /containers/(name)/update` now supports updating container's restart policy. +* `POST /networks/create` now supports enabling ipv6 on the network by setting the `EnableIPv6` field (doing this with a label will no longer work). +* `GET /info` now returns `CgroupDriver` field showing what cgroup driver the daemon is using; `cgroupfs` or `systemd`. +* `GET /info` now returns `KernelMemory` field, showing if "kernel memory limit" is supported. +* `POST /containers/create` now takes `PidsLimit` field, if the kernel is >= 4.3 and the pids cgroup is supported. +* `GET /containers/(id or name)/stats` now returns `pids_stats`, if the kernel is >= 4.3 and the pids cgroup is supported. +* `POST /containers/create` now allows you to override usernamespaces remapping and use privileged options for the container. +* `POST /containers/create` now allows specifying `nocopy` for named volumes, which disables automatic copying from the container path to the volume. +* `POST /auth` now returns an `IdentityToken` when supported by a registry. +* `POST /containers/create` with both `Hostname` and `Domainname` fields specified will result in the container's hostname being set to `Hostname`, rather than `Hostname.Domainname`. +* `GET /volumes` now supports more filters, new added filters are `name` and `driver`. +* `GET /containers/(id or name)/logs` now accepts a `details` query parameter to stream the extra attributes that were provided to the containers `LogOpts`, such as environment variables and labels, with the logs. +* `POST /images/load` now returns progress information as a JSON stream, and has a `quiet` query parameter to suppress progress details. + +## v1.22 API changes + +[Docker Engine API v1.22](v1.22.md) documentation + +* `POST /container/(name)/update` updates the resources of a container. +* `GET /containers/json` supports filter `isolation` on Windows. +* `GET /containers/json` now returns the list of networks of containers. +* `GET /info` Now returns `Architecture` and `OSType` fields, providing information + about the host architecture and operating system type that the daemon runs on. +* `GET /networks/(name)` now returns a `Name` field for each container attached to the network. +* `GET /version` now returns the `BuildTime` field in RFC3339Nano format to make it + consistent with other date/time values returned by the API. +* `AuthConfig` now supports a `registrytoken` for token based authentication +* `POST /containers/create` now has a 4M minimum value limit for `HostConfig.KernelMemory` +* Pushes initiated with `POST /images/(name)/push` and pulls initiated with `POST /images/create` + will be cancelled if the HTTP connection making the API request is closed before + the push or pull completes. +* `POST /containers/create` now allows you to set a read/write rate limit for a + device (in bytes per second or IO per second). +* `GET /networks` now supports filtering by `name`, `id` and `type`. +* `POST /containers/create` now allows you to set the static IPv4 and/or IPv6 address for the container. +* `POST /networks/(id)/connect` now allows you to set the static IPv4 and/or IPv6 address for the container. +* `GET /info` now includes the number of containers running, stopped, and paused. +* `POST /networks/create` now supports restricting external access to the network by setting the `Internal` field. +* `POST /networks/(id)/disconnect` now includes a `Force` option to forcefully disconnect a container from network +* `GET /containers/(id)/json` now returns the `NetworkID` of containers. +* `POST /networks/create` Now supports an options field in the IPAM config that provides options + for custom IPAM plugins. +* `GET /networks/{network-id}` Now returns IPAM config options for custom IPAM plugins if any + are available. +* `GET /networks/` now returns subnets info for user-defined networks. +* `GET /info` can now return a `SystemStatus` field useful for returning additional information about applications + that are built on top of engine. + +## v1.21 API changes + +[Docker Engine API v1.21](v1.21.md) documentation + +* `GET /volumes` lists volumes from all volume drivers. +* `POST /volumes/create` to create a volume. +* `GET /volumes/(name)` get low-level information about a volume. +* `DELETE /volumes/(name)` remove a volume with the specified name. +* `VolumeDriver` was moved from `config` to `HostConfig` to make the configuration portable. +* `GET /images/(name)/json` now returns information about an image's `RepoTags` and `RepoDigests`. +* The `config` option now accepts the field `StopSignal`, which specifies the signal to use to kill a container. +* `GET /containers/(id)/stats` will return networking information respectively for each interface. +* The `HostConfig` option now includes the `DnsOptions` field to configure the container's DNS options. +* `POST /build` now optionally takes a serialized map of build-time variables. +* `GET /events` now includes a `timenano` field, in addition to the existing `time` field. +* `GET /events` now supports filtering by image and container labels. +* `GET /info` now lists engine version information and return the information of `CPUShares` and `Cpuset`. +* `GET /containers/json` will return `ImageID` of the image used by container. +* `POST /exec/(name)/start` will now return an HTTP 409 when the container is either stopped or paused. +* `POST /containers/create` now takes `KernelMemory` in HostConfig to specify kernel memory limit. +* `GET /containers/(name)/json` now accepts a `size` parameter. Setting this parameter to '1' returns container size information in the `SizeRw` and `SizeRootFs` fields. +* `GET /containers/(name)/json` now returns a `NetworkSettings.Networks` field, + detailing network settings per network. This field deprecates the + `NetworkSettings.Gateway`, `NetworkSettings.IPAddress`, + `NetworkSettings.IPPrefixLen`, and `NetworkSettings.MacAddress` fields, which + are still returned for backward-compatibility, but will be removed in a future version. +* `GET /exec/(id)/json` now returns a `NetworkSettings.Networks` field, + detailing networksettings per network. This field deprecates the + `NetworkSettings.Gateway`, `NetworkSettings.IPAddress`, + `NetworkSettings.IPPrefixLen`, and `NetworkSettings.MacAddress` fields, which + are still returned for backward-compatibility, but will be removed in a future version. +* The `HostConfig` option now includes the `OomScoreAdj` field for adjusting the + badness heuristic. This heuristic selects which processes the OOM killer kills + under out-of-memory conditions. + +## v1.20 API changes + +[Docker Engine API v1.20](v1.20.md) documentation + +* `GET /containers/(id)/archive` get an archive of filesystem content from a container. +* `PUT /containers/(id)/archive` upload an archive of content to be extracted to +an existing directory inside a container's filesystem. +* `POST /containers/(id)/copy` is deprecated in favor of the above `archive` +endpoint which can be used to download files and directories from a container. +* The `hostConfig` option now accepts the field `GroupAdd`, which specifies a +list of additional groups that the container process will run as. + +## v1.19 API changes + +[Docker Engine API v1.19](v1.19.md) documentation + +* When the daemon detects a version mismatch with the client, usually when +the client is newer than the daemon, an HTTP 400 is now returned instead +of a 404. +* `GET /containers/(id)/stats` now accepts `stream` bool to get only one set of stats and disconnect. +* `GET /containers/(id)/logs` now accepts a `since` timestamp parameter. +* `GET /info` The fields `Debug`, `IPv4Forwarding`, `MemoryLimit`, and +`SwapLimit` are now returned as boolean instead of as an int. In addition, the +end point now returns the new boolean fields `CpuCfsPeriod`, `CpuCfsQuota`, and +`OomKillDisable`. +* The `hostConfig` option now accepts the fields `CpuPeriod` and `CpuQuota` +* `POST /build` accepts `cpuperiod` and `cpuquota` options + +## v1.18 API changes + +[Docker Engine API v1.18](v1.18.md) documentation + +* `GET /version` now returns `Os`, `Arch` and `KernelVersion`. +* `POST /containers/create` and `POST /containers/(id)/start`allow you to set ulimit settings for use in the container. +* `GET /info` now returns `SystemTime`, `HttpProxy`,`HttpsProxy` and `NoProxy`. +* `GET /images/json` added a `RepoDigests` field to include image digest information. +* `POST /build` can now set resource constraints for all containers created for the build. +* `CgroupParent` can be passed in the host config to setup container cgroups under a specific cgroup. +* `POST /build` closing the HTTP request cancels the build +* `POST /containers/(id)/exec` includes `Warnings` field to response. diff --git a/vendor/github.com/moby/moby/docs/static_files/contributors.png b/vendor/github.com/moby/moby/docs/static_files/contributors.png new file mode 100644 index 0000000000000000000000000000000000000000..63c0a0c09b58bce2e1ade867760a937612934202 GIT binary patch literal 23100 zcmb@OV{j(E_x888ZEbB^Tbp;=t!>=3ZQHint!;a2+qU)J@BCgqZ=W|wCYelTGH1?Q z`J8JKt|%{o0E-Lzo_Y4dhY>#tUQd@oOt3`hoOL|BKcJqN3Vxy$AhV?ax|X zwP7_xdmcs=u4=&I4-7>PVT51Et0V?6adF`Fly>RguBKa_?u?$lQ0phh>{!;bJF?oY zC)U*RkjI zsZ{9ymFQPP=Ffni?-C9P+OC9;+m(1`#sifR2o?cG#SMq<7^FFga@MBPTZc}}8{OtP z!CXnufoQzjhXU6pDu?FO?q?N~x)gG+s5_)nwFPTwBYb7~CibK72sE%1)SrJd^${!^ zEHw&Dtkf^i4vaU^uU^}gy=)4@H#0a?oWg*dF~t!rx~8uPt`)MO;*-1xScpDwk8Yd6 zV|oWAE&v<#MZbeL_k~-u+`8|Wmvs+s;S27t7f}( zmXlHcCaBX3IF<@+Er8_&TT#hEUm$9AWJhM+a2DG>$2af5AN|C9xfYFRFuLL?G1+^h z)!T!@8{JnMuGS?na}ca1a>7RB3N7|6Ju30;Ef@hw{DK01K4-F7z?2a1d z6xYB&;G|Ie<*=I5*<4$#T&O(X^Lx-a9hgg%5-@!cNcW>GOGntHUgUcesfkwoDU2}( zp^XvJc&>8m=-3u88Wgay@j*@{{4f45)4=9Jiy38dDa1naT<3nu?&eE&0d1Rn`W{JW z+U8LtqZPyYp%!)zg2B{3Lbkcu{ZEP<^T>cg)(%UgkghgLTC7~CpI^-|9klpVyAWH7 zHV%sv1WpJwWSw3XfkAq0r09b)a@xX9bKHO9xrn_r-#E&xkbhxq3~^b?MMfTMrD`?h zS1|01CL;=KT9+@O+wU!YZtx6RE#mF=ft=3IwPZU@AV(b-C>&!>dN-kzGwftdTF&rd zV2HwHnX$g0Btk=EJ0UuWB9?h;sVo?F6sphY7i^nLaqx;0#|-1_NIN znQ=;!ID#{lTECe0DwM(&vRwJD*)Gz$_3@es`+CpqM7Q%IYfT1VZ34@S>|}^$zHlkX zqF8IhE*z}@E5)`|)vYmMV8Fuuwl#g)M;dnrznCsx^)FDd&2BGw3%jIv9)+lZGfX0) zYXR{+(nD1^wvq>61Pi@=cYUqu1fPu({)DXZT}E*gsEJb65~g}4&2#wpn~EreB`Aiq zg7Pbi@B7eVz2$nQVzSKlXewM|_q0II6N#6^m%zv+)CQ#e{aek6h^j*oSezfg@@ES8MT#Kt;stNsM)7<|y0gZ}3|BfisAPKfpssv+PnM%1VQY)$rhkVcvBn)CNqo=Dw6JxNoo5 z+?S)IhN;I!PR9W}TV0%Cg%P|=Rwc*vuJcWXX}&i%Q7k8eY_YQNJh(%Oe)&`@<6og9 zh)NBcA`1moGJ-+ovIx@i7-yFFK0~yQnAM8Sr9^6XuoX!AbC?P$q2~6iiAfl=nAEpt ztl<$HhP=f;xkH2R(?;rl($kw#&`v9rb%-xrT#kQOtTDMAWxbBS4HQeav)Z{pn`{*f zNzch%el?n$o;f}o{1b;5TUzc)DubRG z_P6s%V$>GskUzg(#JC12ZBkjI>s2sl+{Py{-jh`1@wQ-7m7=K;DQA=*epw=LFrIgt zbYj&O@gA1R^a2EOt0z3ZbP);;o)nTj2r^hI#QW~6(^`bRt$iy zl()J$C_Q(fa_hBYvjNUJAg)_uN(5Yc^KwcJf^P4KI#lO(yt;Dt%5_C^SN+!I$*8R( z9M?!Cjh>#_`&aK}=msRvaedcGO2XFZhr>Gl8X2nHL0P#*@kV6UApxH+v_8;{N_M0Pl$r%yaDSqn6uqqIJBFa=5ojh-~A-^ zPmeoEHRJI_JUY^dNhY?vAL@v^rHqsPz1_@V2OKVYs(b?9_I#N5q3U0Chzi|5KU0n> zRmfWd!I0N3rM@IH1c^x0U4q4o!Kk0L38x(yTR!)X$*8xmmY-}#7eIVJZkfXzV88`x z!$Al&+Ttx~wK}v8dyHMT_ubr=!e}@<74}62y?&~OkaF>|qt=A81dXQ63s+)pM>@<@++StGdv_h{M{^*6(4wSA?L&`h(vdvy=jjQhB$s3~ob z3Y`hzC%|P0cDQR_1}PGC$1c0J>5p^z_2l~T?oQ@{K_oKU9b&I1Oyd<(gP?o$^i(oi z9e5d7eY~#v%EB)Gf-*+X+6%$qZ+Vc+9vrd(yTQ-(`=Y7~>G&eQq3jTPK z?+SNz?L*%+JQaLZ}9TlRVYrGKEHC-veo<>48F+QB1OX_b-y1 ziFZCun;tAP&7Y0IJMx=&mUN}<`z*`t?N2aY?vk(MCdW=@(=uNkl~T80k&p~)NiMJI z_V}mOu+X9?basLL{_S~lM#kyDMHEU41?1ZZYu1zXWaw)kUC>@9r_yDtAL81{(T__n zc9AR&U8ZH_zL$h_w_(L9u3!zbOuU;*atF*!Dy$yc+6jHptGZq~HnX z=%6U+11T1-&TdgZrFtxrFlCioFmWS8XF@Re;1RY?ajff%ePKRjy6$9|i_b(x*Us2^ z%v;$!7$sR!p8U~Ig?=`jzhrjarKGU>D6stp{3uUyIHUa{YK?X8$H#H%cXf^v!fhH` zd1;q$Ji0XkZSz3MoDGoRY4HrVPP`SD6+hx8B*&} z^G~lwg9($F&bIPYuHK!?;+*1Yn|Z8zXgMP~u^^s+)+BZTyV`i5c+jAnK$g?Dnzz)l z!Lp;k+7bS(+$`DHlDO2k6A{&JPK_#sC_IJ<_YZn9bCeXD@>Dj*oU~^Oipd|a)1j@@ zd9c!{Z-pq$UXCd0r8J=tRefM@qn=`aPczYp}J#$p75w76m2L2&@gGH$xJ5 zEeP7RUtVAh)Jbl6Yn%30+Y`!kdZ6Ddopj|r8m7J_48skVI%iLci*%x^Cd%eekU|aJ za?ga`snZG8h>&sMx#noLwhE~?*DOd#a%inn;Nbr3K6u?*D6_Fo4*LmTtgal6xQ1MT z1fvmX5TVixN1@_b0Lk-qCbFQ0`YXFK?N{fmK3&ch^rVcuNO^sxWLHOilBqsU-jY06u2G0xa-3$+X*yVJf zI~UKD!|JODG?=z?b#@%R)2H;3->;EB5UGT%k572IJ9H)t2rW?e7n=(GlVKcew~ zam>#q{bbEX(IZg%qh}Cl4};R|cEX^#^}ujEn3WWj-=VjnVqeB)6cZ?-M$xdb)@29m z_ufV#^-1gmtJ^Qw-f{Jt@Nxv>aC+H%~G^2VgIo?1)MnmtZ} zMI}6!Tkm&Pk-rA1eY&y+84IJ4bhZc&$fWF6YB3!}hD{c%cKVSh;!zxj<{8y5_Os6v6lFp|HsipZZq#_}(K94RQ%wcq;|?*27TE1D)R4-6exbOcEBS z%I}n{D!QE7d_+oy%BWK3SY(;`3NWvBI}f5(w<3~H%jY>PW<++|`1Y_EEYv@E>|FBc zEpBxzGto3zWkt7uWI}nDJG+c}F8AQ~ug--Ojkl2wf@lG&9pol}ZWNM?@aV(v^eFokThdqjox%^trPxl$6CuYbl8lGynbl3QZ z3|b^N@NV8<$P)TW_=*|bTF4X13gK%)i)@oNh%DHS4h4+3@JM!lRU%NkrgpAinJg%p z+qa_h@kz1s%DxKPs!HgY*4N}qhE0iueK!OWx3xwKQP`Z4B4pdmju_3Md8|mk`Cc%- zYa;n2#lX}yK|c7mT#8@1TY&NeK9!hbYV>XS@%g{uU4GEo&kVQS{-E-?Vm3JG^W~1w zC!8Ud-`K}PlWHPN#=i|nsxXIDk8;#r*=#3GY<<$9w%{2$LIH#tu`su(9oJAnqrfw4 z2B31CsqvQR1`u!wcRiVFFJY2QTH7mv&p0FCxRvc&4P`=p7FV06-O#rf*;`riR2Hen z1j%}T=s42*hZN=hN?|AJ&QSw(pTM;uW#)&wuva7y%?`2+T|(M%dx79#Xr6Tf5EQZneX5 z2P@~l%{-;BRK%8)Gnu9s`b?a}EZWeWB*{-=kM>CA7vs}9+IoD&2L|&IVri>A zKNu&oqw$OineM{b%a(ZL!=TMLe44hW;ClSgZtfDu-Ckdh>m0UO)UTQ@Xl*Q$Va9=y zYRJ(y&SXiWT>*wDSF&~!19j1SQE(K74up4it=ZE9J4HVybMXTlaII>#b~^2i^B{4k}n@Vpf23Wh$D#Ya#JXJ1Q_4S^<1( zeBvx#7Lt$S{5mY>4LYSNp-JyPj)f8Nl!!>RiM{r~r><3RNmkoOaBJhV5071mB<0&6 zMBRb=79(1t*sG{m`7#2S)9&iuYS}3(U$hq&`iCd6TSvwqW)3;&U7gCY ztZY44JH#hvo7gqS=3590X!A|b)PoxmJw%j>qRjFM0gpSCK0d`;XEr16jPQ51fx8Pg zHE3**?VgN+8{s;Q_Xwq`q**R6L}YvGV~&d%YEcmsHyvkY zEt;G0xi+_(@Rs;=y-^647=v$D^kx7XO*I#*?Q74^vu1pOSVT&eYe;;Oxm&_%3dS__ zs%gcJ3XFwo2WK^pr~YD5*6G0vbu;}Ccv>sPJ%&hyooMt)Hz$~RQ-kBPU~E=(0nT5- z*v}0Xo%~=(jxL#S-!KUQrk*}dD_O^@{9r2LkwH7E;&WcsY42>R@ErHV8^YC=r?d?y z>*o$s3jBT#2Z3Rk{rANUiw|L$fsL+2%4DGOYUPpKZ!Xq)d}v0d+p#`1{nQz1MVmSn zXbTQX-;S(NfK1E?Wl2z>fVl6y?utRE&}W| zt>AS>Z!PKGCJRwLEs}cqL*YqtkljsW9JNFS#gM8=6{@E6@B^8@6B!9?`;jdeM`S%w z7^M2f#)~)POPrKF@TOaEl^(IqRJ8k?(rQ(%;~Q(pZdR4HzkM*}5A8#O!n%~$3ozAv zSp+E~WuP?AMQIp{h5@4#!J%U9=W6_vmk|rQ*8a)Yg=UXWtl=)X*R))nDbE0{>1sC} zi3b}oQ_gy-p*=7y;~8;*Vsg&o!(K(Tg(5S?F_9x;(dWg+nze~%qYVG1ZQCQ>BVrWm zCgbrp@g}&O_i-eAkQAAU>aoZZ^LGi8+fz|Crw@SDpfkc0VWK?f3xG zVYb*wPj2A$79(=&ksP?KY>3_)MNsFbgoZp(U=hmM+Z-J>}%+69htY7X}bKOmOd1r0d^CYh6 zkBQB&5v1?nR$AD6KH2T(`_;hnHFoIV`AQ~JMXr4&^(BNg4G;8KIVsUIrMGCEUo`x6 zDBTiM=|!;Z|H;4kAs?mR1Cv}yX6P#{-a6G)-w7F}HTsHYE0zx6AZ z!wcPqXUs@YxXSGgBgcMApzWYs{(4;phk|>E#G{#kPjnc&jp8P5dRd#Io%cSDlzlng z{AhE_^Fcf0^47L~W|bhx1W`NADQjZ9_^#>JJqLWyI}W-hkq0E6s__<1CfV(B6ozE( zJLr?-k4))TjV@qH#IO}MH-XI7y14#Ng$jaCb!7|lCfvK)J|dCKLT$lWu_UI}E7S<=H?DX4tYo#>yf;}>4-2VQy4ce1x&>fn_cLn3eO&nO+_?;#i zw3i(aEh-z&FG}jNXpcJU;%eNMLpxwuqSNXSh&yA&BCXUfAl5O1-lUi4G)SO^1AvJ8{+@GSRJ8mE(5q3!94h^DUc zsr=a9_gw?$ad2Y1Y@1POPgkp0oBiG1WTnHx5?6|LX2{D)66f>f*Q3Vti0qrNAay}C zh%T%?SuzH>n4f<2JNj@Wjs3myLa?dyJSMgBp~-2zxiruG5HA*A9SyZ3o746ncVyUB zr9Z2*CPH_g{xc5N=Ya$YwsAI4P-77_@D(ND{awAZNzpRC! z(eUTAy7E#f%{`!3lImU=oT5I-2KlcfRe)kvr*)18JO{+_enyGJRD_@R}L2D0L**u?*Y(60`!x z*<2sS(;}Aop9kT~WKM610cm6BTlwk|U+)g{D~&%Fsa_7j)V9 zo?I!D6`cW;n~ebDKfE{sc?|SEs}5vlp#k}p-i);>Y43kCC8f86fgaowdl#(exa?0o zh;mM=3V@aE?LPTY5VR5d?CZ)Y&I{J5cCqeX&g?_gF^wg&Z)$WD;jO{K9ui@~Hdj^rxlpf z9Eu+P3O8*wP{yh~-`VZhB!;)-!?ezM{Pkr_BHBZ{w@-!UOb)n_)TB0L@!R$q4^>Yg zt;>bDoi{a{o7x`XuVpZjHDA2aLprmWWd^cfhex7inNuk%{;BKUkSPNRtEc zza-iT|4qj1;R2f=m{ty%pat4qAxH1<*`I~r`uXQf{F}ZnzBbL?E6KIDEzbw}X@C#N zQdK%vM`uA8FrT_Nivxp=Zrj&C^?O=byJN0h$xdf%M@MQoomvB}W=kX66ddBUe9fwD zG=Zxh2c-bo4V|&ZKIM9n_)`^Ty5q0Y{YN~TufSD@@_r;b!8MEl1C}aHRpBKE{wkSQ zIh=c10;>R|62RCSfNm0Bdl_HBR>{wi|HTtoJr1wc^QO{l4I2ukOO*7H?i;kX4S{t< zenI9Nbr@Ll1HLM7Ht4ReF#qL6cnM%m2(C;Tv5ykO#EznYgno$G%mvhivr%s5|O;3m_Q=d@j9rT zYC=+-J@Z!C9qD2FHh!8p1Mia)oXnOOdd*t6GCNqwGR4HrC_&H^oZRR-sZgWwTbj=O z!=0~J4*m{FBDybv(RiiULJ8M5fNf!2g8v?bZ9ywSvj^E(?KyeU8p$Pv#OBihHeBq) zWMM6C$^-$WkbF$@6WfF0YWOJteJ?Pwzx{^{VYtxXU5*=%0mBuQ8=O4~|H7anp)cZ$ z0uTYL;eR|;CVIz(@DOTo^{H01+nVI=hAs;DwN^=jbnIcgOo&L};!2Nk`s%D$iVPGF z-3Nh!pIPV&RqHb0$qkHq*0WJgKe|9$%vAx#?8&{GBxOOgn+v9$!DyXFq{9_l3}j&$ z&d&dVqCWKwqHlHG7)>D3`5}eCDySn zn1R>NqJ45w@Wb40!u`1rM)>4RKIuutkTs+!SVgUV+BCcRHQkN{FkVj|Q}5MC3Mh6w zHq@8pA&j~X?ks$7ZG;V$W1NkFy?v_E--yMa(qO^BJ-f7K&pn(Rd3$uAJfnvwb0_wR zLyuKILd9@=@GNM{qZd7Sk-u-9 z8hnG?8;2d;wu~z9aAgbkitRNa%9#yBeR4pCVMhb62fI@{!@pA3E5W?00%W?<-hTeb zXnt3)qF=5{c=xt{7abojDU8~F!9~(O9G!-mPYuQUT>M+?0c<7J0kkeW8M@=vQEew( z3Jk^Ooge`<)yUQmpGz!H$q;p`ONP!rxNL_Lq)f4PIn;yqOnsv6LK4(;LvT2{K9frj zqBc@E=fP>%qJ!Ts7{?Yn?EUC@MxFf87{&VU`;e1_+JXkqJ#_^!$Jz`(KuMcL7A4(R7Tr8loR1EJ1ZJcFSzl1rW%)Y)3T?Hh{Y+D`UZ25Hre|mZ)23 z=Wl5F(bAXYjh_;At^ar)a3P$^*^YQ8^b!ts(*uO9x=(M)8B;j6A{hr~G$o&V`S#Kobn}9sHW#ioMclp98a; z?N6i`F2enDfUyLe6XgzLH1mmH#vs;j-`sffR8soP>rSG!VK=v_O&gI zgI&!CbG+2p&(uogt>UHN#12YvdgE`X)nNc)o6V6+TIQMP3UKQ_JVt1>(J>iPqSw5o z?+EY5cELRrOysYfHWjLEr&{U_Et9 zBS;@k^qwEsaQFI1*C6-l@EV>-A%_bjq|Z;LYt~#;s@{mcg|Mx#-^9i-?KpktZel32 z(5-QS4&VA|)h3GOf{tlZf08is>8S8^?q&-79$^JC!wYKoe{aLtf*jup$hXJNT-thq z)(Dq%Ph&1*{?k|6?SDPU3g3AawK-cX`OAEK_h_JWx)3@`8E8TcN3;gtaqd>)I9R%=94AHMg1-R3lq_ItzhtX zs@2XwqrcU50Cld9()E{=98uN>?4*<3$=&Jt*=I#M{O7hq(}lm5;4h6OgNF|nI1l_6 zSOZEV!`>JL)jmp#pdugA2g`hftMd`HHl+U?F9Zo-Bgz|7Zx>ARx1!(~lYC(WLQmOR zz40Ifkwe8u8-W_PQj1`Y8+I%pQA%r z*ERO#JtRSRZ22YC1jO9e%2sC4vfi;DCsdJw%r=DyR^Cn<`&m`Fj3*l7`Td4y?-+Zs z6UgzNDDO6sj5vhRNUj5?(K9LWv)2Dk({U7ftADVd{InqJrfH}Qm@Mu(yiXBL8bbyw z?__zMDR%Js2D(_v{yv!2Se(Trd>3>SAXH9Jird_GG0ln#^TSz6Tr+5>(N( zG*L>oEKNiITf^bYBsXMoHrI>vMap2IWREM7f_v5aP7n-1SP}|c-T68l-{5b6oZQUb zf#rC0JH@iUu0yFsgt_gUYcWz{k;3Q_5t z>1{sQXG^SX|G3ENOGAg!obXnMX3JO`P61J)J7U5ziV0`Dt9m&jYaHDMJ)zX3JP%2_W^a*-9oD`JjAwe1U9A|N? zw4O7fOfeGURurS}SI&HQ6@p0Qie-4a*@)Vu-C`r>u5{_b4h<+M4v+{Q?{-OAgmi}~ zLkWK=o68b1DM-A};8tm0s9JlmoRkb`vccCm@+a`?|p2m6%3tek1)wAv zSa1w+Vpxe}Ss~B-lFPh#DiUB|{D@SNZW_^wTL~l*mbv>~>f>l!pxaULpKEw=)qg$Nk9XOZYa0G2t^x+V<4bVRsZwaH)l>g+ zt9F(fdeUuY&9`#q67O4N#lp&p4o0I`h;h~8nD%Y!VOXH11nFhZNF(N2!gf2Ki@*^O zc!w+}zr?24dgV{{jW4j06!46QsxD8>kiKy|T2coVzKR{z8^&;gdKBv?RI~VNRYNda zd)o=b)fVYba=_)__Zg#v!mI%ov5u1dFhn?K3pV+x7}eJWa{bmy%>Hz*yG125JBl{T z0y4rsW1Dfw12$(}+o64>v&Oimt))6>O{WYr!WT){=t?B(hyAnkc2uIh(QKd<&(RY> zNdREH$S)4bcXCL_llWl)#TbELpD;y}%(Hwz)JO`&y7M}*jp$IMR>^)-x~3y?#kQP^ z$c3J+)0Pssgf7!`$Mk$@d8#qM*4rFvy%N+&Q#yiMv#F4d3BQkf4N_@4!((sVyXLA8@w)$&@DoMIpDvLKu>k@E ze2;ItOR)VYr@!35`h2SzmE0*5wMysn z(fd+?#wH#rw-_Quw!}dztCp^KPa$TXl$x4!!8kdyIb`7Z2);G;*s!$k2hB1wRa_VD zLZO*yZb{h|?l-ZkHYx|6%csKFL<>S$eM`b+@*Jo~=DBkS9<~utwZeQN3_>#6ibDBh z(U32@O#U79W_03WUP`mW$8Esvj7=er+iRY;>&r?58Ko#pD>E$Xr?&r1Z|5L|iPvYrRTT=WVsg(!euCvC6uvh>(|SH7tMKxFw@ro?p4Rw4=%LsS5< zKo~laFV)s)e+gu{CqsplvDW8X*XChM5EU@t+mm^5Mbh>6UhEIpQ@A*wiD+`Q2-z|S zONfxkB1I4RrBaaeZI2lJ_M0eo*GU!qV(*VJ--mKto&X2g;VD%+EyP{L0Ndhm$N2)@ zI2@>&i&&htvvfk*6q63hz`!xspc-rkJaSlUroULnDQqaKjpBd8hZ zR&>)cMb4Y$Q`5jmi$wFrQsskEI_XfX4Xx1V<-l=$%PRg$K4YQfca@`wZ)KY>t#7zR z+qI!!jbr65+(blavJ|Iuo~_|TQP%kzq7aBXNAJ5cK&)j8siLY`$SR6)f!VbX*~3f7 zgeWP*E|Kk$?fP}XXAkqwHuDeL?c^1`pF;^{`8XPaB8pTIkc0U}??2l2Bms?ki);ul zl+iT($hNQ_uns6rS6qT_rQA|Kd&plh=u(JbTSX8?r4{BJ@R4^ZGQEt&TElM&IPxvc zkK3^PDYTe5Q}q)cqDlYxq$6|)$m+}`AT1@t19>p$o)BBro?_d&5We2LJ-m z3@9|Sp!rdJ9C~I0qG!sA;JC0Td3JRPAT4QnsQAxrb}m34BH@SEleq0|Q$%CIP!sU3 zP#yg1_u(w9Ltmqy4wK$@Uqw>)w<#I+xkOlR!}9dv#%P{}Jajf$ zkjF|z9$Y*37$OMF1;1AeSQL|b z{1V`=?F*gk=fXn$>uMDvErrzMnKEm?^LAahMRhGzZCQ8hwx4=sR`(T+vBZ?-iA{bD zooqvt>;Tiq+6$u+d4@ZInI{80A}?rz9l?SIH)2ny_-7Sb2N~U`OGzqP5EL2a$287qs^)Tk6Pign6p&L3r?iPUgx7>h)9v&@~SOPb{-r0MD2 zyi1~1dG5c=A9oFa%E9hJ70O*dh8>=<$eQZ(@rIlIk5gqD!2G7gPh(pDfc$s%E14!j zV=q|=9U8IYXHRl&Ag0+-iN5C8$E@NSq{tLxNJ_<*klrU z&%PYkh3%PX1`%*lkJcfa5`eA}6IbuGE-`j(HS+d7-4CME>zu-=Bf{J9nw-Xky*a6+ z-frjGlhPmUkSBxmT#Sok?0-t%QxZDoQM=9^OyybOEH4oiFT4OH}G0 zW|fi!1Wk&~2xn~VaLdqPMJ?(fEWE$L?kDvIsTWv#@5l^M_l13M1coUO8J@WrFF*~} ziWP<YSL&H4` zY;+h5Gz`J+{xMb=EQhZz_Nld(z-{WyIlD$S0l&)lKc*NqmOL7K1?5WYv8|>t9-$2= zJSyqL(K!0PS#loR(<@e?JoqmVW@TEi@ijcYp51_dv68EwV)x>l$dUP*?=p_#C(-dW zW+6n1D`{nI?efj${n?}ZpfoCE?WM0VN2rHoo zV)$feiB2O2&55bEm%i#rGIgC#SvLxp-(x z(3rn7h*CL3`eXe-UqPYb^2!CO5rTs1ngpv)ZC{2OtC{TZK2))j1}aZYcxM9MHEA%K znw;~j%t#WRT1`xK#63Kg5+qu&^^o(Lq=QnMqC@D3FwkIbT}m7BtA|k{l~^238-{nC z=D&w9>hz&h!ZS{Z$_X^B$r_F)%_V&?ieS&O5WDPzusU4FPymx4-j~B8MHCIcTDlmz zoE=L#@gS+g|I2{Yb0 z!!*+l7rs=i@5=Pb6HNiYR!zvcW`HybsPvPvBw~s*iF=);Qj+Z(r!?TV8&2LNKQj*v}$m(Blk*sx4?U4jJAG*7c;0tZh4+Nkv>KUs_#>M zynwKqsv`Tq+2zi*hGv)l(_+g0Kc<=GpV)*!aet}vAnFHugLstX!B10$$U^O)DOW1ntiSk|P0Jp4= zbq5lPxLxa^BWU*>Dc+GIETYL?}JzTo0TzpJGW^&JuRyKiPM=MQjNOf8|6KC3uevYbXv{B+CJ&+%ab}zG91B`AQ z@m-9XDp>rrXCw4OAZR))ggBjWiSLSdj`2ib=-tl@f8R})F>Dxr2;#wkaEDXEx%lmOH z5&H8~CAtDG>y?w&BRTH%0~Nc1^>O2MmDhKjhXPO07YMAE@;o~DQ2hk2FIIiE!00KF zR(yU@MZ(QtWlU)QWkn$Y0ZfN<$h1>>8fSnQRp?Bh`yKYCS<)gt@K3XM6Nom=&cS`@lrzBt3}9YIYsUE(7NkU)jgc#&y+gs)5*$D(>e#e;A$DbNj?Mc68a=3y{JMEPkYd(+8Zt;6>Uq zd_3>Bf9v^Qtd^V}{Y-MP`=!yB7RX-R-R`Cmx0Qg!dex|^!u;T%p=xr|Gp8}5{Ta+*?937 z;X{&jYf)|;yR*&}=#6`$?Fw3VuSCir4w_}pM9c1<68VFYJJml5rZAE zx}^{)x&s6Z0UhN@-Rk8i(9Te86PiarGUl>G?}sO@j4~}qx-`G=-tB$#mYbGV-jkue z{0|4qOsIj+#}y`c2odKwRfZ2oC$aVBPS)feYRr%j@1LPP<-V~m*ROxnXqtqF!Y&iO zlCKrt0TR|5+qfMR&&LQJt?6h8zf&EHX?9*P&Imdf-s>Z$w#%@GJr7V!8a0;Md8zsw z4|TIErPwz{FO;Hu%t;@f_qk_widh?WO1VD!yngw2yx8|RC_W!w3N5DNhn2@4h9>1o^SqS8rFL|ybgPDlmv$-+SFM& zFXM%s^oT-tGNSa0vC#wK_GL1`E0YdrhI)DPm%nlIZa-Ni-hKw*~C4QMvrB z4Xxs*%gvPiM=(<)qX^f=1Gd%^7aZTu%>*_zhHTIcS^&fNWqoXEtuO)i42qf61dG%FEM z5(B^EX(RN)*ElYUhn+=tlsP4JwX}U&$I}^FoDy%j&Cxe;%v>I=I)D7pls9=Bo1gH7 z1p%ppm7S^wKOy5Y?EM3kV6f=uYD<^9q`mE!lsK!@-?&b+ttmB?4nJ&zYLVLHqYCyI(&J5ZU6o;|9a7$ z&@|e$4lCH-fzy%~OFxh<=&P&FU)UEh5wZJyIO4E4wKl#xjb1tddULA0$%DeZjDSqnXc64h?IR+l<+AFuT@!0~16jU$1qnVNJ2l{~?cc=rf@m$ZR)Q z&W?54av*}TKOti+XOH)^SQ9h6A^CizHG#%VulD zqL|#9^9|Ih;!__MpH}{6d3T?j)*Jjf^m0xi-dXaUHxx`6_`5w{dTCdcI`s%GZEf1= zVU3MRKoFuFDti~Ezuywgx>m6c6`YoSMQCV;_KbukvKWPmsTLYNmCcQ0{lOG|bfnPP zqU*`wFg6J8?94pFmNR1T8wyax(N^;EVv?I%cN1mEGU8rR{=Bj~iX^S$HJ6Gs7ZbV=hF7N#Z_rv`I z=j`+BbM}7rTI(z_G^G}9L9;*Jzt1rru~U!RxDK_@Xi8E(;R$+#Omq&uWbT0`x)B)SDNFZobE(jGY{I3MbVf z3G29FiUEJuwGf84-wtMc>-;KJ(h;<3R2gJVl2Xt{Sp8A(9mr*R@0dtZFUZ2m*UIjj z?}Q)FAA3#md$;dtp~u8_P(7TJJEx|Q>mv!U9<;8hRBVyOK-ruqTM{|U`4dkG?j6U$ zk>#qNoM(d%I46G8F1RYSYNMu#bs!4!uR&m9PsPb8`hHM@{P|NcjJToy-v57 z;+sDM*dk_9-1d!HkDnZFoEMUPVM!oOsMlwiwh7wDT`iV zijIsZE9C85w0gZ2;uMCJqo_k9dc_vDf=ysEc0QA}ac5K4{b3`JKf(+DQuOa=LrS+e z%#i;!R8`z$JN^er)%Rs(6o%+Va{Z}HbKIskJ}u^RK|KSq208c(K(G(X&?Ya~{Pn=E z)qp=%k%(6W!Y_Bc;_lmPYg`9MY7>O(a^6J0|8o6Tb>ENSRe0Zr{>{@4ZMN^dRP6AP z)r0yvtM7tw3wI|>+*NnwdF`L%)m64L5SSR?1{VCW8@z1yWT{NVIp8)GmJVsVYFkS% z|HH2xi%-T?O=@rC0^&Y@gwlVUz3A$ zX~`Yu%gPuE`EoMfD02@jLmoFu(}l037@tv}YoZU$o^?Eur*@^$1_`8YJE^g|(H$5< zV%)+26)W1>0OpsXIWmwZRh00zB!NL$_DKzjxN+WO4R>xLIfzr7WA3lBo{S_|{Nh8> zo?oVuRnTWrV@Uw~c*zY!wyx7RL-SjO`?B2ju&L(LBVj_%^NbSSTHsvutI^&P0MQG2 zQljb0aC>4`d_gHB9S)1s7$eSVYXq@cXl&Ir>`Z9xo*;Csw+Tu5cf6$4rR*vvy|Ih? zdq;7+#{{RV2{A?&?-t`eHg6h9$=F(AKJO)!lcy zfJH3h5^qeo&=UPCFO3DIZo-;Z8%227>ML$sO$gmM*u^~Q!oJVi_3wrAReC}Bnl|)$ zX1umAhXIWp6O5v?l@xE0l91=1iYe7cQE!v8ecoQiEY3LQ>$W$O5(4Q>i@Ky5Ic`PI znx#QANYX#|++L$uv=q*GLTNiOI#Kj{!e^(1VbM~}HC;37r{39z-;d`k(a0M*GmU_? z49%Q4son3B$QS~n1(mX-@LgAJqRf>1Fq+TA$sf&5gi*=-U?j{;#02>f?-f>^Su1y> z>je&+`|_`*N$c3A<9@$mY*w=?->DJf83vBd`q7n3MjUsIj?q1M2aJ3Rj;nf>y^)qP zfcqC3Hy7qB_ANG)78E46T4-KYU|DrIn`qxEU2>*9d7;nJeDMeWV~nq65Bp$`$>5## ziJVgM1xtQxnxPb9f?AJu(_1PkYIpMJKV=fN@xqumf=|cqpb@8fL1G%^S=q^Eph#0f z_E(8mc2(p94+THv9GYtK->qC7;gM*|g#vf$(s?~Q>1VXjF=BzwToizePd@VJ2BXnd z9ilh0j-boqjOCw}nQ+Fxi)s>l#n5cIPK*If3LSR0IQ@~BDa#f7<2}VO?lPat3HeoQ zsZ7;Lc!gj172^aDhc!^Xq$E=wwdU0pKSb1MrZy+1;UHS){nx7LTmLd76UFAf0wi$1-K1I3gsV;TzFQe2CaZBl}}= zhK@#=6eAId{*&4KGmia@S0h~W&}Ygt2QfbKgR~kp?M6SqF|JokTcOHa7+U5i2Aiir`qDu869rQnf61M?>DJ^O_2p&hX^B%KX zVk>65tGT;wUg>IadPCWw~=kd0n2Pyw|DVoJ$5byVld5W5^R;Z^0wIDK+e*pe79w z5{Hq-^5>eO<1Wq?G%b3ppF|ZChA*g9@I(JFcZEi`6D&wi2ZS1^*AwE z45YB^7D*Mk=4w-0E8uG=3J-zm5!XUfCo=v{m21{Bl$qt|CWrEgyAuROdvMoVh07dL5ACs{Q02qj_n`z60qB|aqz1~#3v|tRFPJBqD7(l z3k&JyxAoe($X%8_3#-HpYmzrYY$or}pR?|O7yxsI?OBzUnTlSC%J6J^fijDc?PEcG z&d*#GHFY?TNaT-Mo^3FTYJt+UkpZJ1md}fTo>yso3m%)obrXnrE#)@Y7x z&l_KAwbdJX60TsN>R08OOCdU5%04+eQ3I(xEiOc^@O#*VUl>s`*12b#X>egL-t9m> zB-S({5^R5;h#PAzdm;LpPs)sI_Lv%k<`}pb=0C9{#kFF}f++3@P`nA~2z`Nit^y^> zwCZW{bhKfAb{WKGQ?y*|FgI9&MLzed-csc=+;7j$oH@8b#(~c+2gHDkztPsVJ8vJ} z9SC@SV1gM6N08W<{vn^Y)Vo=XFp8cG-MO(fJa|GS;i0>)F}edknqTxCIi~le&z` zQd8cF*5b=}v6?JYAT6MmJSMF7rhC$MF1b()YU5eg`3O#1Y@iyW-+n{ZT0=m^iQqij zIyw1jMCXdpmF*C|fZrv{EpGp`3{i8QF&&6oXx*AV?2p1En;hwdmxfu|7S<3Mu4yC7 z=IYc`4RazPpoKay#hHmUBWWatU*rAEqI|V>=T8!B^z7|RkdeKOu7ksNMN2a}Kc|C6 z)PQ8vez!Q-kcRD6;rGE=hus}7kwN5he1QOfkx6IvGW-^NM&EsE_R9O75A)RiNgJv$ z9XYYIp$kjdn@HQ0z^hF=Kl}0+KfXe{(AFVuW-j!3m6td*?B7N}F;2$z*`Xmkwr>Kf z4$=u64v0-I#=daLpagTMe)HM7B!>>WL zRZHz>*mNF=mF|%&YUR#EbSt8b!#mgDKoo%-w5mSJbQ&+#g!J_EnMudomZ%g|MrN7dfqjJdaV@VbqH=KJ1 z7b+O(6B4*H#^u{C1A<$IGR}ea5Bt9~O+f$s!dB}FDxz}Y&Tn)txOw%qGVaFv6(WC? zz9bYYa>~YTl53*t1N>AsDVm3z&Cf%uL*)UJ3fV;CQe~*jfNTBn4Phx3xCO_gz?^Of zmIuLuqoS>lU{!8d>kcOk5V~sDurFtf&hBbcM0Z8apPYGtyPP_dRc3h;B%YynF@RAz zTB>}f-`xhR#eNT7SP{HW^g;gQ2Y0^Y(0e8U-_cK4Rb`y3R|OSZ@p986A1O|g`UM0o z?jfe#c_A>u4b3()*LWHWW-*Ud^{n?#9~&^g*tm2Ek^n)vJDp5N@nF< z>2L?b4R@?vtbe@@Wrh)NyR!%3qGx#}0UBxx;>_qy{_~t)_Rpg0K9Jy?K~# zT3?=yiHVujuA}-*D5+k&7l@7o^S7QptbGk$X@`-S5i(cXEo;s|(GSbwM@knTDZWkr zZh$*Ui|pd#=O zE|@5YMVWRJYsFUH6vpSd%GmKf3c1v%-R&P__1&R2s@^F2CRpqRU@~G~v0H+MK3y6hTq3hWN>s2!-|q%| zU$!1=64}({#O2x&XY1~ti>rowsSFZd?$AkH^rt4|xBMNvg!`dT@*%RV*d6_biqQls zxvO~VZym74+@nY*Ng6(hCWjjzO)u~`5K2%lN>GoD!U}GA`{nY3H_X4U`+kPcYn>s& z?nmApS0bPl3XU*Xp?WOXYn74gACB$QMV8ZM3rCCS86I`EXdC zU9QycN3MwLTbXKF1GlRDAA^U-O+_^Fl`m9HEr>aVh^#TWYxan~t_?NtcN(zC9fQtz zFS{H}hlMDU9~#Sz?hH8$6nQlEq!UHPi_n*=*>rtn+0U_0=7iPkD%<^ti5*+xhS%Au zy&dZ%4Q;ML^p<(Lm7L@!a(OJnA_Ileo`F25&A~}wbrrET>#0__=XDjV89f8hKFV(d zVYbEsy#GwXmr|1khD!t<#_gVVhiqT}n{=#MtJ?|zYUrCze|>Jm4g_y@Z`atlT22}d zOrj9ONF{oY3etI2BmshHgN1CcpoWb15_YJx?-~H%PVV3Gjtt&`QGK;=5)i$d)yomt z8amgd{-W@)ZCiHK(2yjR7;Zn;kJY>`2!?off#A!TS<7Pnuc3Sz@M70oB5G?_RE?;a*~61G(Z;T@m*MdZH$snsAp4L{~4&3w<;PFiA7crzYB19cr z@;Y8_r+lcnl_lw}`D77jNudxik=@Nif=+DGslF_}ZE8F*J1zXj{b50k@ZkoQqxKM) zYFyfFuQ{J{i7aZIle?xoptdFl*cw<;{`HM6YTZ3;^GUa7;X}Vk3_hMf<2?xi@G=B< zIE}uu_#>bb%{N(GS^Ybl)@ek#Qg69!fFW58OfqCx)YI$2q=fPln?uhm5>V~El3be3 zF^{@nNS-hra8`^pQb487yra7;r5JG`%)xV@BnDVy1x$Z;G_JhI_gEI_G=GCkm1WUC zw4wXmS5X%bmU&OeSb<>^;7@^cQY#W`nF#I$e=AqN#y*yVH$E4n(v)X~^HM;^T?i11 zqdaje=HB2BP&+?O-fh>c< zc*UoO^hPodZ@zNStNQ*66Y?Y^HX`~`qHa5h7_^LhGSQP#@!F zO|Nh2GxrP5M@|}KsDn|rww{nCVaXpJ{?Mcq8Kbc1`jSnl<;6R37R8f%Dh%VMk(K%< zF9kh^yb+)9L@cVsFIU&4N7NQ1Az2)#LgD+_ z;F6z0MW8{6xN4(r513cm`pFEXYUT6~io>~nTTfx6B}6h%T-k~JdarMdSmb4OVHIx z?_;)Mc)dNOujjiEO12~_ARFtj)4IDEo%3}psBt97ooyYA_v`JKA;0(4FE87_9vMl0 z51PI#TJL*HH1oR#Y?INdDP<6h0m+f2H<4bXyh{e<`t%87Ng1mfZyj@Y6h59TRM1wu zbFjOaJt}a`WfmQ;eAdVxz*;+ce z7p)tTw3YS&(`0OJe~^A=p+~XuO{P8V{wYn4q>R*Q4D~)Q>e$U?-!gRVHAC-X)>N3T z*XZywk~H@k&&>YF^6|mq!`zF1knO@A^S*NA$x_{X<`Td13QKR1txaA_Kau}E-N;UF z}_Nd@KmPd)J@GAAJyW1HH|?$Muvj@6BexRYo5)cd~F`_-spS9-Q)8GVMN1)L*T=voe>uY0Uw~F(xk?{pI&ZfN1;Q6M*|@;*?k38mE43V=eHCWpfwx_C#r znAllw?Q*~E6z81p{_Bque~HKLaDi1Q*3|XZtP#Ea-Ut0w_7PG`VGJklvgd1dIu%@Ub#a?BJX7Q2*Nx2x$mJ1*^ja}|1R)=J4Vtiv8$kotL2sflbO>$xT?hsO zuY+Alx)(6`Z%|>0ptQfJN+lq2Q~l089zy4B)UX4LKEKP>ou21e(zDC+-0;~l@Bh{E j`2XOe{3aXi30VC`rN0&MDDi9u{X#`SL%vehEckx_MeJN* literal 0 HcmV?d00001 diff --git a/vendor/github.com/moby/moby/docs/static_files/docker-logo-compressed.png b/vendor/github.com/moby/moby/docs/static_files/docker-logo-compressed.png new file mode 100644 index 0000000000000000000000000000000000000000..717d09d773cc46ff8297d06f66d9aa855453f35a GIT binary patch literal 4972 zcmai&cQD-D+s0R0Yj^ebT|uHmbh}CrD|!v0*I@N7h>`>myRteVy6C-&h-eWTEJBos z9-<^fCwij%@;vj-``7QC_sp5O=f2Ko?rY{;=dTm3uctu`WrKo1AZjg5RYMSn3;=;h zs3By3m9`raW`B#bzK*fl-zuwX{C{n-Y$9*-1}L@}R6Pc&oVoU4=l$%0kfT3mND56# zql9}1Uva-kRFRCXF3fhIFQ&XZ}oXTOFRof05QPgBSL6y@1 zl15u3N2zTTe@FcP3$ULSt)XykoVa5Zj-VooAmOmv9K8eRY%r?F2HGJ->^d)z>(fIs z36C3F!*19iQFc#}ioi8E9r%m^O#b1iB0vWrBehe!28TqDgd?xf{jbPSRx|Sc$2mT7 z&m4NttP7}oTSZqXK&JR-6bJ@`zvwcz;r&N4lZ@wj+)(lm!0-sOe} z(k90<@DUDRX8FZ{eyz|1*xIhv%ID3D&1ChI_x`>`Gg;e4bH{_FXe&)x)O!9J{%Jv8 z10pkYH+EE#h@}O2-tj)+FGjX?9&g?%(*vn1tBCYe=Zl)^89~AWtfi;p zbU;b51Uq2kF^Z?IQ(~B|4Iu3_9+o_vFy7H(O8)Wz`Gex&iU2ojQM<@HGj-r2_lrl0U+sN@`J7yX!f1reW_&hJt$I=ng)fsW zyM^%)1x-`I;ysWvqdO~<8gfwWjnv|tC z>F&Q@wW<xfkQ`kwLi(OUouG{MHZ^K0!I&e+06+Oh!1%0UD4#6)B&= z``nN0G{ydfi6juFE*nQ=srFgEaf7z#zakhZrMDm(byQq_VH7tWltTYlN=oY4G9P+p+<^<4=s4n*Xxg_SIz(Q}^0KKI9uw{k^j_ zMOFA(?j7s1Mr0qqa&JjW$S_+=iVjMEHu(J#Edz5Awt)IziHVMWC}rYDyUnB}>NI<# zKLF`P%aUNM?4SlC+A^yMmOR|c&=!nOH55Aa=uc5HY+X!?0tTkvG_gKm15Ox~Fdtf! z`fTZO5?g@IeWGkTG9~8yHTCN&Lk+vj7dl+F{@7>+SWE(&arXi;Zjn*I0Idx5(r zbR1%nDN3=hhhP{pPVCh2HZ${01N6PkQx$>Q(DEo>A%8Z~_LGqHX1M7Gjec|ij00W? z9s6Kw@f=_mO4WdgEnN*>o&7EeiP-CGIFqfDejyp1b3)v?6sQ@Gj-R`2r}f2WexHb_ z6ZU%agw8y_oqjXM+pQcNXZ89xhKiVi+NZ%!q>v(W#4~)Vp_WWSzPEvw4tg4jmXe!1 zEGE`fmefFeS(Q2mtHEesd0sEZ6^kHPJVgno!@w>h{rP(O<}eGR=yj5=&q36W?!CM* z`T?8NFbF4%Q1FzoiPn2^0xuW6^vgKCZXS>j=P?+byDBuSwi30O@hM`Io%)pfri+5Y zk%Z?h`B58R6PkE_gO=;U7vY~AF~T-80ixA6;^E|7e4im?11VycOmIh|iAjMM&)Qxl z$`YSoFv8BFNMJ67@T|c8x*Ad*k-(%4z-99%C#jV4MOPh6)@gBs{l#(bL#1_bLeuOJ zMadQ+Zrrzkl8n4jImhy}u&$}}svi0_5VZC!&wT8_=OsUE|1OgTR~ncer@z#3)YaYQ*?#WrxabQv(>BcRlJOplPc+`&byEqu zSe;z0F6W5sjW{-LbqNo*Y8cT}Bb@z~yI9DLEWN1oWe59z_@ipp zLstkhb7SjtG)K5p8T!^o*bGI%V$0FtNoMIlpau7F(lUkM%3oy@Jgwn}2mJpbVsv zYUL1a#7{fVYxYs?5QBa>1dF#(YQhTY<{rU_!+3F{U(;V?U7m}mCSv`f%>`KB;P!kz zg}Xt&!&;r5fbo;^e-?ODNKx7pBCMv5#Q=9?Z@%340qiWT_HhME+>aGnjB0hEx9mrZ zIf7LeNftg$1IABOR1cnJ&qr69m%R&KzW)Zx^)>eOoLqsYeKH1V{eGaYc$UzK%N(u$ z8MW|qaibkx?1xOd_cB&)9AYUp%jDLJF*q<0_OQFoT5UI4v*lLd)Ct+4ihEmPIr4mv zT3^spo(F~bh_%?WfMyxf;(r^`B|7|(5^YW+;rXpVt6mmzj@;~|;6wte-q-K0Yp_`( zJ;nRYI;I)n>4&$m?y~DBjcs??K>Dz|ef!&(xG(F?8Kqn13naL`$21$cW{lQy$eZwAn~26|FVhQ)&w*@?T;1__?~Q@@RdnRBxDK)W08rl#y?Q zHt6|Y@KA+1?$y3I0R|XEtQ~@(1K^*1>=HW?FL;Gfrn>2X2{)Pq4;z=^FGBy{4 zfjcO)iMEO5!G5H(2PbPs`79(Gg*|FPTSn##qwNj0e}D*6oWkc+NHkG`3@+qjcLh$g z6T8BcIDh5pr@KhK)%}u<*)k0r-o>NFVRBhT%@AQ;O!g*VVZ;OS{t5M+4pX8jTz!fP zUT2PV&V>2OR%#=s5Hrr=k$-fVU2zn>Jc>pmcCD1_#PBHiv8-}q9RPJ1Z_sK3Y# zA27HxB}mSrs&X~!2ZtFIUal{SOK+u$jHlZ&q4q)-v2%g|cEWybWn=rLZAxos;_5{; z%tB8~2Eq>yW6O^wzAD|gfDOFqkjc0CGjmeiY_{XOG^?&-Q6$HP^*dnBu3^{n^e50%;zmk zwc;)Dh{=LBWv_i3!meuG>@sfew~lS&oWM4(iks3~-(?eX81YZI(rwzn7q8e4y$L78 z`4+D#Dl)qaN59RF&-9O!cN;@5)2r>hJYeq;n$uAs@2Z01pYA*HDdD&aosaAzKvx8E z;z|5zUuN5_;k4*HQvgTq^b((I=_2NFM6PVep|fAkTQCl z&iA*FJ-;u>J+8DJ8E=4sJW@%+GeXvl%);76DFJM__y<;;3%*z(2_yT_d4Q#h(SXz0 z6TG{`sFOjtl5o`&3Ged>L{cVrz63hs^eEUZ6D*s7n5fFr7<#%-3rYTGcaKbhthnSZ zvMn0>usnD8z?m8QpU?lM#i}y)Z0=qUB#J$xgB`xiv%~V#F$kHg5_n%lgUE{WATED9 z5Z37T+_QJI?|5hYlFr(_elO;+!WHpPS#rxhmNpG5SS98N{B=1haE1SAlEl)d)k7SZm;xWjT-*RY{tG zN(H#)Vd*7mhLj(sb8{+v^nR8Rgdr@)pL~OMvWo+jlthKWH_7rkcSK^o7TpqSt26HQ zv9Lq=2#t$3r9$CiI%#aWbC8{!1MS>1 z5UZ`lXyO^rgtvE2oaic!Qw{AkN z6!GotTQ_1whY#DPT1GVMS4Yz^iWhhffQ4ra^;Tj+mN*O>C&wdZ3%T64jdoSXNnenO zea@%o;r3TW=&=scbNiVI5=Rkvf{pY+tp_o;Bx zO6NISp=xBP=3DE}=6hZD-a13GU(KY^;wTpDEg?TV(7+$NWG6Sfq@BBZplR|%&spuHx|0@}%*`RMavjdMgtNFi z?6c5OqGt41{y;bM{lvvxTU*vUA|^2TBJ}1zFE$41^u5>}a$N#EgK7jS@T2(%2G>^u zMG{EO*o?=r8ydTN9h#9jGMgK))f7{VAG*3k1F=q=N+F!wD9-r_E9JenHtxz0L8~e# z@$=-I0ZOD$dj8A3`-#!zl1viIELeCBWT;@>EhpCqK4;Vcq!0WX#j%{b>K6JJiZpFg zLP9l+3-z;xQ$p+im5E|cX$26GUzO;&G zuvt5i4v?-+;G|D!5DyOUcJ{ngAt%H4hV-q0pq0g`pm8KZle(5EB=8fd^L-()yOD^# zcq4HkpO)Razp>a{d64bm$vt2#!sCF0X;V_57i1&Q^k5mQoB|V z<(W__{wk;H!y4gdG!mfR-L1s1{wXPs7V}I*f~fHizL@>v=egE;{|Fp0oP2B3DGsrcsO zmPqMH9F@IC++&PuoY2+6lP~d~ZW&j^aaF|~zOE1X+;!6}6HgrY=xS-#(z$}T{tbV@ z>=beW^pmP-9yg&@(NeWi!rjRfO+zyM)>5|4tnk1d8+ez;yd#xM znmNhT$l%P&D%+dLk>Wa$Q!3^Mn5S5aa?m{%g`Ky@Cq~JA5=UvjEDE+oma&EF1bEdv z&`!H>_o}HyFr4)2gsQ>*``6L#I43*9KSz+?morzu{~h`t6(dL&;hE-9;`#3^Ej2yW IT4fCOKM4~1IsgCw literal 0 HcmV?d00001 diff --git a/vendor/github.com/moby/moby/docs/static_files/moby-project-logo.png b/vendor/github.com/moby/moby/docs/static_files/moby-project-logo.png new file mode 100644 index 0000000000000000000000000000000000000000..2914186efdd0d3a6223efed318a9e6f09d79359e GIT binary patch literal 20458 zcmaHzV|ZlY(yo(C%#N)|GMU)6C$>GYZ95ZN6WdP5wrx+$iH*~w4F! z)%8|aJ#|lrO2mlH7DyBnN}?$PCveLIvw4$v z%-AE)xa!-$e`)>4SV5AVMc81}0I9(3ZL?|pnfk@E!OIOw+RU_fX9Xf<1h z%79YQ@CVZ(s}06#ZRBITfB*%`T+~(P`6WVDuURE-mqP+Rzx%n({ks0)?>a5^36>qA zv%(A2V#-n}cV=NWqEP?7#CHIv-!2*^m3qi}5A@@uoJ|JD;gCF|BOOGZxSi)i4Fc=W z4f3@nyY5E2bFG#8`;Uutjy8ug_RTcE-=>rLGH`xUf=HGx%4dhN--l3U@0qZxp#JM0 zbbu>Mn#+>uY~|*zpzo>=Zz`363^r@P{=JZfIpy1(T{1i-UuKHwjNkC76;hGvm4-u> z+ezgS%%JP~=PXwM$POhzyOoY;M{EXZ3n#4H6DRDMTWkj2eb;0BLxJtjyl#4(!lkS> z#XjfNQrGzu=NvdYq5u89JYe0E{q96+Q&o?2!^A*|j zTkL`{wzK)BGmc3@|6|$zdZ;t$&NT#{xW2O4bWzgk&0I~E*G$2%ybJgro9KXP@I&U5 z5@XGqGOeJgcc0ny;_ruK|8eXNH1Jr=nH3{KF4DLJ(=l1T7nCAw27QkMq(5v_-%3uV z=gmsZpUYKhttq%Y|H$iALJ|MhQbsWFSd5up@1v{(*Il( zBm@EK6)bqP&q6?VtSPjCmM z-obN+)IW?U<;}V}^<89CS&`rZBw~q`xjxtSw{xjFJznVx%l~BjcO$3y`>(^BS7xj` zpUSCPa*&q-q0;@q`n`{~zQ|nY#);|h7zl^uBmCFi+Ax1@_+yFK9EF&#Mdq^+i$vIz z>qIlzDlx1XANRkNQn$YFTCDu6{GT{e0kG!*?=;;r9n-3X+-!4NuJC_o93>eUC%(m2 z+)6kIDQYg+&JV0oe$jQe_cMaRC&=n1QPOC8GYtp-7lrh~1l?K87Tk{ivRb^zu(V_N z;70XGU@Z)k)C&-900WdtWNhv2vU@+KHocsKi*9+HKJ41upCgi14i?XrAuIF6BE8G$I#*uwV2&p6r$&l&XWio-5nTW6Ex$O_pPI`@-w-WV8M8 z?(*HsbNZt%6iHgKKs>KhF<;3!^nDufbzjo2*jRuvFY07oX~@}vN+yl5I7)R1&WXVV zYSR!Wi{C5zqs{%-N0)PYcAa_jO1qRX?hx3xn+Mn9&HTZkW7aFyYyuFUWar5U4q`RZ^h6tBR1o>=!X{%3t0I4t}brJ&# zTeWs1butYd9L8CjWh`mZ5wr9wk~VN2>cJ3z*4w?Aa`lHUjRPf_S}*vmTqPg!dT&+D zA<}6A{SgB?^2b#^hsi0*Y=Lt1xn`NdM+e%+ll_f&A4{G0y(iYZL2Ca||JK`e)V?9H zQ&9vqh2QpA66db`L%Airz{iorNsy9LKLBrkEIyjK76oC;`8U*D-|cYXTe(_-4@sCI zxG>^puy&K}7d9iIA2P>MeKZ{HRN89vDSFPJh*$vLF9BC!lr5h?sORu=FrOvN6}78t zGlV1@fE10**X}@?2KN4Nf!|yFb0Z1&?!5IQ?OJlR-1dA1T&3RJa8U%bCs6P4IHQ*A zR)^5Wyp=cU3nLU)soPBwp%=PT5hAWd?&8Wb9Fd3xBQ=k=53H6NqFhT5eNqc0lM~9- zs|AtH77Bym{@`f^8v6?LD(0BBZyNqEtdpCQN8a@%- zbp=V3f`YE6pBO_HV11kh-tl!T!!G3pT%~@g4qZxeIIVx?)XORpwQnnH-OLE-6(04--Py$XCG>S@cs?j)G-Ex!^FNt zQfDp7F4?kbaLd~liKsl8=Lkhm&|Halmha!DXsIC^l1&gs=CE3im&@WiDnB_peWYFg zS%#yKYRUE-;*Ma3h;0kkx%bG(HiEc5(}PX_5FORWjvb z5$L#bxOsgVoye*|j7G{7XVC9d3hbR>o^U&8i$3>wNEHTbw_V^qgXfkFz=X;#g7rCB zLT4GVg8>#?GJs#roij;k%TJ%w-HOXL*`Kyo+`|8|ALW=$&FaPkH0Ay(Jvgf1gz1O8 zdxwc#IqlnCSX9~4;VM;~7mq+kBIf74KVQfOR%`V|5hYM9p@w(^dIYTQ+j=SGM{UR_PNN2 z_RCwY`E05VS)>aF&Jx2&jfm*t%;dHj;xQF+U92<^FPaiV@Tz~8a&jm>XNi;)@SIbj z)*Q?+;JS=qrMKs{S#Me{66r6;l#H!q%UisJjV$S#(1@kn4T`0#S|3msT!yQyVT0Ao z;Nq$Xty-H8l8DW+zR+pt+T>jyJk*zs-rxka21`Kw$!Kfx4IJUT~|QZ8$D zV=4>3@$Xhxc0;Et3*7Z7exoI2S-Z~}>zqwfrP+B{Y))=GI>ddgf;Gau!!M~ti-$gn zh~>?)s||hZ!n$g$?$NAJifAZeu1n#SjHe`7whdn+jWtUi%%k?x&g1`9B5{Cbcm z5bm}sWS0=0aLOT7N0kt@9PZK{0lckg6oy~M%TD!keu|kNwP7Z4iEuC4h>G3AdYfe= zo!;YBFuN*EFE+6~GEVx_m@}L;L$0s;y~uPIwQOC-W^jh=d>e}Fsc>$>n;1OAV@L_T z4SSekxoMH*_)aa4mkY{=vQeq_3^y9OP@1ortfcHGhX04-SVr|j-6i&(gu2F1t+dK z7_>9@q~@IAzARc9+T!y3eK+%3T2J|Cfivf=G)YsfSY?R!-Q-b@FqWjdbvVWLZAU1U z&yTV3btvyN$6$tl-C%OQ3y!0~bzb~VV;dBD#$;Wy*&_KHc;ZwcWDwCdsm-E|`KUO~ z?ewL1GMfQ6mTR89u~8_~|4UeGdA6+&4|sqIOKn-W%~#$A&*Rg3d|GA_f%PC`NxyIA zej;COA=-S$fsnf0=}BZ(Se9G0^r`gMU;oZGk;&q7CXOE$MohlOZsevq$4?EL$uBJL{H8)k4^5bx8(uo75$etR%o*XTg!Q z&4H&2TZ!g|SP&Yhq%jR@!ZKGcbiOXXDB70lEX!g=1wl@_Fh{Pk7~TozE7leiY1&K>gJ=1Tw}70Rxmu4mN2gOqQn<;6RY#i0 z9zfNV-TpvmE-asq&;ZbA8$(mn@rmtrJ&{zv_QW^zs)3U(IQ#W_V!DYi@GGa=xV8Ke zqFfDm)?W$}y+W#X-}PaOq>uOy%GK8a9|;Y8i9&5bw@Vg(y?Plw)45nCBPIRf81{s@ z5?NXNRJt8J#I2EQPx!x@2w3e!u@xmdN49&SGgx$x0bQQuA!3s~mRQMP<=NH*J&NB3?_1d0XP!ZIG;$z$026%k&_N?oRGz zGwS1&(8pA2F~_w=@4Rvb!hV=N3PSJC50pjI01ikG7>JVTtP%#=-|2XP&_$0syO!mi z&RL82?6cKAFub0~;>bkhw!sRjA{{>Y9JWrjG$|#X_zs*qQpKUpaK~EF+LkwV$}A)j zE&#MD!wbBaa))+gwt+*;NEH~N-K9Eg7VhbMLpuHTtw=K*&*F!Zc{8f2iVR#79R4-B zx0b~yRXf6gA4lmOqJP~ts^0f;8&r_W47N6OwmZEv9Z`Zs7KLHk25DyCUaH1|8`h%y zcAF3v+Af(>)bIXEr&dPFaD5~nc!+=Ku)q;RX-KM9tLDTN3dZ?D`krm2maCVOm=MlS zPx8aCyyc=IeHU;_eLDFpgUdVQ_54FliruuDl-X)a;)Ji{_jUiz;u!~Ag$|hmr^^Fx zuPqsQAJJBz35`g3-+@*ov0q_5{~(q0(A`8RIX^`>c4}8`=dOf&_8gh3eoXBv#I{v| zZhQ51huFAAT+Etl0tuvS%IZ~`47kf~OTLiN=SWDeF#*%eyIiKS? z-6dqxzJ|=Qer?Fi;9W7&j4~%PBF7j@4Qm71i7<8vsgZjvWM@dnUjQ527e}6gR}RHz zHBze4Q`#(UhQl~Yb+@OBiP9MQU)LRUd=~F9ON`l!Uon~*2sYoYGk?bCl-td zu>EJxLNjPh)TU9XW_Pt9Wk;H;vC0AVWQ7XTP5)5t>he7-?g`Hk=~piypH8Urn_qD_ zH_-aQdGQpEZ1@jShkEt8V2unr<9>*CiVUv#^w6S8VyC}N2+NBguFiw2orWatT=DK= z)z_SF_m=ZR2M8D;EO>7NY6;c&@uhs#4GgxlRlAl)Wzd-LIMVbtc1yqhu61sEpbg8+|SuOZxBrto9%{ z9km_Q6WnwRoz5UdNB4dY%taz#5r*#xjzz12l!e@Cr8)c%S{Y&r1lHl;S%#O?;qZS) zA>^wr))ux$&(@;!^2oVB5^wM);`Z#YD0`@cxlnJ)H`H%idN1Xo*UiU?He?VJdO_9b zcH!y#H0WX%pB`CnvX+}EE~O}(vFdFU=zzOE8b-n|{AHtU+8+Q2;vJoZ_Z%H#<84oL z-xcw=9UFALJ+?dnJemz>n9y3tdD2a)EN=GQkhS+=fd~EOwM^UNv&U@su0ujxrO7%R zm)xiYuG=Q6XfdrbXs@cOh~F4oa?gIQa#d+TI{BX>T$jV4gRXnT6>w)TRi!GBg;e!fRP&jElJx`BoOs>`(N-WK(xN6(1 z#jrn-G8ngl4uedr+$20WY@P+?%Pc5M;IXf>?RFflR(igqe)eiTDVXR_#ADKCitchY zmB}a09Od2W#J#>z>)OffreLyfumaW*%=D1I_%dgi#cYS^R*ZN8I~1V>H*NnXSir&t z>*u?YwdyV+rjb{Z>1_5pOsgB4rM=%KoMRGnNNiRQQ-3o33H3U$QUdH3l?_JocOR0^ zs)D6SJoKn*ceF;cr*LJF`!qI=noqNHAF>*^AC12(1l)0p4I{@>J*vzpWFb5^_D{5) zoyuxd?WT*AF=ei5We9KEl^Lut&qZXD8T7l99(_f4e|BlIm^gc8rl+5w$d8RC404Jn z9Nc)$a-I5plHTJ+5nT7Vob%l3;I2JrcBx2?noI$8rk{JshVNLHH-H}fp5;-!tSqKT zZH-oYG7W(BOjBD=OIZ3~G5AAuWmQ!|3CqiSf-|2T#Zo5rf_v%yJdo7th&kZaM3V3Nz`NtJwodhrJ<(2*Rka zOb<=Y`PE*d(<;aTMa4o1(YV%@ks{u9Ck#rfX=NY}hWAwTx5~M@ z!?b2XGM(y)+^%d9H>1073YEW=B;YBQhRjql$Kna=Xc-N*c9X(?4WkSZ1sxO)&!YC$D5|K=zV@vB8O{ z#~etf_NZ?dr%{Es@ddi#hS-CDDxYwP56@e!C#8ER?c2`@&bexZ#b%Lc3-}J4E@de6 zr_>w}FrCg9N?C9%zMUxu{x!9j-?E(e$n(H(#e2e_J9nU&+sg9WdvURV!K}6Y_nxqM@|Ss{}Ka9wfGOReHBO zUGU6bEUcIL4EOTpEH>nZsNp<+A|~JZqUw`w3a*Oui8L_1ui`CV)0kY{j=$*7gEA7w zu)J`lW6_;WuPKPNV1la@s(w5z8!18Q-*LVkMtKYB6F5vV+A{+YgUe(>R<2Cl)G>*^ z-wg{@fbn81jR1Zu;N^Y+(~xhgzKM@`G%39K`S$fhkOjEVMNPVwoaJy~F;Rl2InqgS zsGWHkOJ+zS=KeG6RefL+2`ZhI%6J}C_bwUav_f&oyxn%!nMHNEO=BskS((zHs0l?j zF~jlA`m!Ch*GDtn2BZ&7%KXBR-ofT8GPI`kUqdQJ{m#QZIi0Q~l))(6e!b@MF%gUz zmP7iPN>7EwiU=u5GzyjEC_yNi`HG%tjIvKit}w9abEc_i!NxxU{uvXJS}#mi#MnlC zQt)|19*WMCfhy(~uN1@A+^6fV+e?Z*63YdgKLW}Yy7(!*h|f9x@?Y5v@MNFb(0Sw} zABNQfpjdBK+al^sr}oRMTkj&BZBFK|*jRU&8Bq*w(mM%4GS=^^guP$GQOQ1a5z(GwMMo~77s|_kGtt{QYSWMVH?b@t-+(8o2=(%W?2Vacwi^=>o=w_70;<) zuaHA;KuwtWV&A%lr# zPggV2u28HWxL0C_)Hl%JxF7_Dt1X1)zv zz#mt&KiK0|vdDmOJ3nm+PHLFq5|ty{^$3p>bk)7*UU)rNSmbM{i=rt94<-q3Wh0tR zeTqHGDmdx4IY!T5%S&a=5rB7Q4aO~w{w*M7Rhp%9$pB@A}t1m%X|s_eCx(Q z=aHs%a;8R7uuI>_2ihT>Pte3bPW_8=ch{)v7fU8ipm{!eKwl;l zD;Ba%`@d<{W}ACZRP{PM5>V2XQqzcf41PI?OOUK$tjI`P*#}5@e$Tw$QF;H!v7Xj{al8+puZ?Jy19K-6e#CtjK?D!|Vci*x&-1Cv;TU_6l-fgyC*9i2{qN?eHkWgjJg@A=vE-y+@#X)^tegApAC&58ngp*AMxidP`YC|Kj*rGSw5Twci znV8;(n}{>l9Hxw$(EWYeIbqf+V!6TlYCE7fR_*mZRhn zze&n4rGaO%4KXJCNav+`mHt-tJ1&LZG>Uj7=iUx6#hnbz$Lv-0>E``!Y!(Ro=wg%} zKca0zact!G`Vf{%59Q+|5cp|XVYO^G6C{z1f4BFb8_(Gvbu@p>p9@dG;!L7it{CgZ z2q+k?P7(NU8%8{-0eY5Jgwd7c`FP@R+pX}bROkeRkhAgcVAW%$+*28@cpF2kEuIe4 z=x4d5Ga^#NM1|d-iN&Yah6u{V)25S0an(1EpKiWBI=>ImwR!GA>vsAk0;Vw6aT(zq z+&r%%?97GSa%wnYYXq}(77C1b@R%(#mu1oWDD5X;U($WT5tPJpjehGRUPm@xAbI3Z z0%m+wsqxy9NzrF=bZ~<3N&M^`K7kw=k6bmpd0N9z$tW9O*1F(vxo@!RL4kOaD6NlXzvfN9>)bMI2e;U)|0Jk@tR@ceD(&X#cYMQ zXr!O(@N<~i41nx8Yc+b|-`R2w0ck3SC%v`r8tzZ-)9&I7o{&9DQ8{QPM_^R$qZp(G z9vT>g;!mL2fU_1hfj3U5Ca*Y2gc-+!Mij&F6L*nn>*`l15u2mJXHa@ps^gy9mK?FQks;IQq!zf$>nuXQ*w zB_C!&jUZYZoI@8dZo#2V;I!lV?0oUNIo@5UjcN5AU2bN^{GSs#gt^90B2q#Wt!?yKV7}5(ONFB-@Mge zh{+;CuIP4+6F3%iD{DN)u*-WmT6KJ&QG)~>MH3|-CjQ6Ux zJc^7PYF};dC8S-;`&pyiFMlzkx)@aBC1#px1U(AL`fcB`%}XD$*#4eRVO~J5Jo;=Y zKhdjMsR7yrN&7tw7xTwthZx}=U?hhJn{ca3NGq#Nc|_kh|F?h*8r^S}tUtq%Rs>+& zoUdxM5@)Fs1;KD=$2)(#mAwfB5$cUL9jT^*ZZn^N!B#Z#lQKNx0ei3)IA~QW>loyE&Mm^4T$1G(Y@_;8p+1QIp(A0=s-J z2Z`yH_vX=xG)jBV4o#algt{`+RdE83AWJoJ3-GtJ>aT~IDEf)GJ2({S3xLSLP9hqF z!Oi;?g(Gu>^0(-+yD3?CH-jyJm#aa#TF4CfGCxHYuPx+M<6^^TC`u?%961@73&7s^ z-UkD=u)?!km79EPCKVtQbt=uUtxbyyiz1c$m5UTMLX$j_9GY!#SUzXsRKG6A$xmN_ z7CR(6K=6JHvy8xLx?HQBG>g+k{4v+47CWW^BmLP0`|#;LlUz~QEbQx`&JkCNJ$IvS zYdV-zuM*=;E-mgeWqC7^`UYoRnRb{yMbjfG)`jh6F2yMR zx!U(}G~FZ?8)9BO>8kOw^zc4bl%-%n#_^-6xO|IAy_PgkdNlIq_N@Ep34n4{CG&9} zdlc4kzv4ke%`)@D z1)5YmYuWxUuqOLUg4H5X_N@?wtJNLVSl&_Cx(LoUKm88cUCa)(D;948!^Lpkaj4Y? z)m;b=SqBDqes~`O)Bm_hV5`JjDMD#a9C^zO7X%A*0e8gx3 z6l$$f{}X1(Z@HT$LBy7D@{D6U4tt%^Fly7Fg6e>sZU6zB9H0;c78rWG8mcfK#<3`5 zDgNd`0|N6{G(7eBZY%fnW#yep*2ba+elij>xaD@jAh-524&Y=88qD3T-lZRu9*Rk# zU9)iRv3={60Fg}gevm1ZI{hYbGaV#{ZL8$yGZsq4tX_x_@47ACAw&WmLB1nSdi7U0+e*Dm_OgCqY~QZ`s#kC!Od3!dEK@_ugBv}+P;>e`AqINU(Qyi-?Yus6?OQ&sc-VeH%gp;*jgxQ?avF(mm#WW^uVl^Kx2qD z^8oK^oUorcR_wNFAV0tS1qOM0$A!>>OwW!G`1Qws@GEmM118FQ*ZM+%c5-~|LmKoM zT$Vy3RP#n*Lz(x@SCrWb^_ply`W8nCgQ-3Wknkoil}U~&?;4m2g5Ch|fF7)q*6yc) zd3%#Slyv{0PN!6y7W?#-S{JYH?wP!P5m#=Z4mfsFIoz0bDSCDTYM3NF{2{2gN&ebED2W?lVC$@@bD#R!kj?HNcKBnc4cBK3w+{&z|qRDVi1 zw6ZG_STl8re>EY9sVW(X-z59S;s0@UO%|Q=0)sLEg)#*z$l2|EAHlNhxB3f=BD9A( z4IZ)sE8t#+^ERS?M{19897zddrX`JD)>4|V`6-LdFJ42eeK70E4&bNTA=-dN`%&Y6 zt7s!|Yqaf{9cVxaLiEzY!5HozrhlO;qH@&w)zRdRr_h>yt&C|GR!d+?Ea9&HZkZM; z`1m10Q2gNJ%DAiHyQMT3$+@o6i2l!DT!;~ruFn}%`i5%FPU=XQKQI0yu29Ye+)XVq zNtB5KhjjU%fVn@abmfQ!F6zN!|5Tb-Maa>QT}c8mU63RfE8w7F*+HAJWzBP)*{mIc zdsiN=Ff%tL;_@o8rJT^~f#RtZ&XEw}NeM({bmx8ft|Hi?jtgIFKo7WLhD@o}8aEMt zc04Lf8clTAElT~R3yw6DK36h7p5x{9Bo?_tv0dG-4EcOhSw@{*L#24*JL)!!zb z)6ED{E{8{c^93nijj(+-|Da+(q7i?a#*ScZ_0|u)&+8_a`=eSiDv$NyN-89UxqA%r z7^EhrD5rjkn`%bmEtW|hNGCf#YY3l#iOazjf(Q?M?)PxoAl0qRRiq0M#-p&F;tQU# zUIv>WW|&?Dv-QhPR!V!`ZaGNG072OQ`150eUC|Bco$+N6ql~V#x_*@OwUP<^%P@hn3 zB0`T3FL>$|ZM5pb;rB-=Xt1Fmz_(x#3Hh?)J~bJNq*N-y!43 z#Uwckx+7%{zC$)Djv#a`!QS1?xrv8E;DI^YEN8sou{I3#%@_r{-qmRRGC0i6JlMt9 z_k`5z^cDF)%0P?ik19@M`U!gvoF#GEk3}&fM{n^ts*bW+Q=u|3$=}h3%*|?4)eAML z4he*WSc2umn4yTo%sW;rnA64*o`i)5p-Py>xFTcE7$wXUk;>&5qtVCWSI%ewvBUz3 zAWrT0Eks(*ab~F!l+do>g|E>|R56?Q;9(GW{S5uVN9FgwTPF60)0n92s?X0EC=bSygB(q#PuZVKT4iP; z(|W3^G}~iB@k2C85JU`1-Hwn`l&{Z7VO7kA3a|Hu@}S1m=B$cssFWCq+3Yl?MZ_6G zsQ~u+kScJ;Te{rkSTiflF@$Fl5%`qs6f$xCbA?j9A4<^g6mn0Aulc0nSixW$W!!hi z(x74=mYU^d=ZO4QFlz2B3I0I0eSmkcz(1fFOyJ*g7^A+XeeC?>bj!^6K zYrpk$fJiW=1JPka2B{66#UgER1LC(>YC+R-OKZOg>sGJmEKqa+5QfvgV4Ah?B#&gBDQ0B2ijn3SR&E_o{ zjv@x;f^yekAle@(Z`qurA>`em`{QdP&yk@RH~Ec~Ix4-6mbq*WM48F29cM%=0@_gr ztGC#5w^w_yOcVubkP2wz07i%az?b9|S5DI#R=m5{G=o8n=ghV4Ahf!b%|KpkaB~sg zlbJyZDn7DW?2C74v+zd*x86BS(8R)I?(aalH1(EI0%^v-%qohb&CR!o zke{`w1^unpo5Fzg-5}w8=8WUhM@9Q|hXYiVmm1H0O+YnIM-y9pP@z2=FOMZ=*(K(5 zlB<1zy4f$TpJ-XY4b*321WS3Zxvy$@MC3A9jpZ6nlRh>JYHz4wr{pmX3Jgh;fM#8KrpG(*?g{% z*R+iSl*3LFtU20@AgN^NIW6f!n1O$z^_+j`{TXJZOr|?DgnpNIUT?wBY;c)hi*-+v z=J`5^_-uLnJN@?PcDYGBz+rQ$;_Q6nw*~8N;d;sE50KD8AeBGQ&DNy?0RmbXOnL2b zZFjghm6L6QPC++QUz;ChX3O}fSSP&93rP`CTj z6(-$C1E9tC7Av_*Z@s7wmtlD}=rtXg;Acc?nVNEg{g7bYA9}^rSnm5%c?F-FT#!ch ziA1&Kw|N9g8M{f_Ya%qjniGigbH(uz$KAL`ysF$ zLJ94Ex zH@)IZ1r6>feHxP8M~O7Dp@LUtP|E$56dQk?17-R$=bBrpm{@7#QW#E;ZlECvvZ@7P zG{~5Te$=EdGZQ@@vpRHDc(>rR%AvUp2y|wf7KtAS8A0( zSos_My4g_YM1LTQ<~nN6P>-6swth3wb}U{qLFIac(X_bWEfH&RBn*@0;YRNVlR|d3 z3B+o*M^)bE%&`QhO<|C}X}9vX(dCHfA{xn3u|W$ngL{f|GV+3mm9~y_p%RKi&A1@! zq2#TV&`dXQRw<{K%$Px~N0~C0Tb2m(hQDo&h;;+wFP}S3u)Q9ZuHBCuBBg|kTwxv< z*qYV^T>Mou#N-#ZzcQ-SNll&PJYlukBZ&K<(di?92u6&m_KY^-Q$riPW&qWo=6Fgp zRZlZ>G{H*h&!H&Nc2jMCL~$GLM-p#$E%c}nV5N=>S3`*P3``SypDhum_d;8%b5sXd zkf)nbs5yo4vNeK%4lLX26BWxyKAgV+f9s<_KjRs@>KQ9q9)A@$>Tm?Cxtc=|LZyua z3NDF}`}K`#2CEUMrow1J2FDRzw3-dQNiE+|>*LaDG-vg=vsVu2jhxrwRHxO>c(B$f zlHxSV#guxTxkRc6jB2ZdLFq51s94g%F5?DbO;!D7E1skkBV!)n`U(p{u^Xjx0t2j8 zrpTOyX~eltKRnw3xis|T`Vn( zQxET+m1;{Ho&lKD1p8{e)~|#cZFJgW3ZhM+O;#(el%P5{17I&D zSP-V=tD5d5q@`&_HViYi0b`sj8juHTpbB|*K;49|L!w z-(hBR8*}fIl131^hi_uo2)DNQ-Vo|W4zOO=n5s~t9afv#RDBKgcIRzyS?(<`B*YwzMF$*uNw z*f#cPL7dS%z8X&!r3u2ytZ`#Xe`u4Ca zvQmK-cMt!?6KYVAAbARNRd?EQ)OCSLR|jKm2B%N4nyc$s&g)s=1TjIi=zF_c{rb=> z)Cy`8!Gb)Nf@yx=M^^|oKbkcXl#}T=b=fkVOXl;S6I+DGw}D?5nfk3PJ`wC&zXa`1 z${k`P6Uo0(uuUBjwx@v)dq!`Fx|jj{^F}dDYt=!+X9x6pYBxbmYQMSr=SSET%ILGI zHpfzq(x4dyRLz1MHHHFK?y}aK>NBC=p{Ftlovpocjmbl~N8SoeyD?qj6TSGI#OjNv zjKn!vl^SdcoRRlhes!b7>t--J(MRrM)+e`A#%Dnk+0=gUai0NoB@@2qSuaMY)+dzo zJ$EyV@uOW?%MtPjH>eFxv5M6R3UWYzfK*RnmO_EZALJZ|#~{wVH$oAh52?qrEmc~x zTLTx6S)qY@I$BvDoBfn%CjsqXwUirUoNf*93bT$koXipP{v-6nDYnFJ708rr6a0PL z1vsKyh__fyokD|YYe?J;v~)g??nnBCO&Z%^V@ln^c(LlI;D%ibzEWrA$H+fpo1^pYeA3T6$cDrCg=N?uhSPMSgl@N1q^{RjAC48eED}oV+v<-} zlgxS=A?noHYmRt#6=HD|MqVjef2{3R>v&3u$Aua~UFgC#e6_#tJBc4FbUL4&dte>C zi7~Ptq#5F?)$o>rItsLMA_E4M$OK$i=LD46u44`uT$Lt_zaX&5a#vBbEIJOPQw!lu zq-Tq(cmj1pJ>i57u(|&rv=~Qmb{44t@7d(n3s# z9T}^Y&}sWeTEEIr%2VsLwqPd zOT@lqh16;ifTTZTru!md_NA?1m{DgugQ}pgx9&=?(i+y9Pf|ANoD7r%_IQumX}8V3 zM}7)f36r2%wfQ_|-y8C@}iPUkhncqC&ZS%MYK}DqTiMrEpefud%ZnO(I9F6n+H=0h(DZ7=d0-Xm9>8w8q7X(w9!bYuiz*``K z)KC1KhrmTEXDsKzjO;bzg8D8~F9J3xjPi!1M~l9}sam8ihsq9I-_n;p7LpnC|PZum_{w9h{{aSjTT`(OIC* z>|(wFyI0LC79J{!w>Yq_hEn=0(tg`$XKG70Ht0yiVHDV)$2D&W@Ai+sZ+H8?o3_&W zLP6LD`M+@d3MHNX%^kp8_inz<@WDdcN*S@(@O5ib(|3zN zQzRy&a%CpB&y0PkpSLZbsME%Cm=m5x-pj|LPEQWNkR3IdSTbZ$l%&=U=X|IgQ!?ZG znbH)Twr{G~(c)mD%jY^frpFFYBmvY4`&UG)_hB<+U09{iK+6ektQXt(C5m0uHQqPs zdpm#EEcRUEb&p*x(J%x}VX@f(+n$8t`W&+%gu^f21 zoj5q{L`UlHUzgU>6R0&U>~v}L>F$&HnQiN9b3tOmWvD-Bz$j5Bfs@omyCJHN%A^H- z9}Em7?caX^lqSYyBUv1F1h|I^-a+#+!f1R}8)%`q74{FyI}$GC84a7f`N}2yP#Y=$ z`$cN)MA~1oqDW+J_7cj3y+K|Dx0-%eo{xGlp0cB6{*81!)h2CuuVe}*ups>w&Bucn zA{5SCFeGi9=5Qjn)^LB5+70b1x+Cl&` zl}5LxJXO+CDPtuBQcz7K#SM(%z%9N}`B+Dxkk&!}trLqz(;S*hkkE1!!U)!aM8uvE zT*@ix#b2pFh!>{)xt~$B{YTxFLAK#?p9TvYQ437y^DCG-8LXKo(9taLG)3Llh@7g9 z4KSWU@qcPK^Khuz|BpvB6KVz}vJ)c0(;#FgMcD^qE&E`MrI0Df`j9QmV3K{JVeH0I zvTs8n`;4p&WiKLS`5yhA-@o^Do%^5rT<89r_wwSDTsyzjR#<9$nx_khPAqwXDsjlD zkB;E=;Uw-w<{uPuZ_ig3)JOWLg*A`&zeqmsBHy`k#s23LM2NAif>MfS1{Oa$rT|IR zvd{u_6)&bc?1Nu7#~1Iud6?QY_5{R_roK;oyaJo|Y2h4s#sC6x;%@A%)4VN{Th=$7 z9tG_B*tE0#`$bE2%1^Pq+o|wV7r7G5>xrFmuz`!f%`lC?bnwz!1lIN%c&K$AEBy!x@zgp_@N#YF>y{-vS)6(UNaV(zXcI zOQ7%NHj>uLawOWj+B>4`r73Z#8UtAcZmZL5eDZi=RnlQhk{}H=O46~N)bGB=6r-lp9Jm;i*T zQP0LFJ$Jq|b{#r<6j@8Fj`lXuF6%VH1VGGHk?_s(l0EflARt0%N@!6f!@}q=GW;cV zY|bySMIZtblwL{2%tGPdV%~g{6oubU8Bz>?S)3NCKV!o=Pe~G#&-n8$BxqezqbnZy z`8@y877SiOn|Sj*VrU-gRcw|JqJ@{K*RUj-&Xgq>Ru$eZ25pVjiFxQ7{26ok-b&w{ zAeXHvFPaqE!6n630n~-JEa=rzk9ZQK!go(9D0C|<`4^_hn1n|o3wCkoKDj1JC!vIy)E#FJ--}q%BFZj**!#%pDqFSpI z5oNYV+V-aly4qIhCEN2pz}UTKDUJTzF}U!%ma#Z0oXE?l=-B%M1{4?J7a2?O7>IzD z9>t2l%ksXZ{m*VbkRSHV=^MDsG2DOK*<~XP_==ef{df=aYIXYE&(T*b+~06|SfFNo zLS`bXaz?ENZHy2cyP-d`ZQU7K?y@U z3rO&(^z1Hu4Mt^Q$ylLhC!pYVrjHtB;fz{_#Bp?+MO165mvxspK^uX;4%oypZT23= zMX_o>)*zll!>$*~Iw8~spD3Ds#8$~nTHqS%dRv5_)tgZv53DPLoYsD@#cLNoDa+y$ zVos){T>jw0c|zJ@0K_)ahe{q^@8cT!q)4(Z=FaGD6Kwtk{YnkWeaJ48p`IyKWkLD~ zy#dZmv81q{36;|gx5XI`i7?Hq{`x*Y6f|~<{jhANQI9rR9dFk=QDvIHa|b)!jBzIz zwo;Dk@GR*hj#4&T#2`*t- zn6gcS&q)BZVo>>HcZNzd_{3(n4^@_6; z%MA{ObQTYxz!!A2oj0rKpbfS^A=Tmu1iQ)VM&=f*=U7n$%jwzAPQK!7jC)B|G8u%g zitgIG_{L52;2kps8>JBcHCYvk!`Ir2NBN$vE}t#G)w>L}!GN3t98qs}}@gjfebVY3F+SKs6s^+LrsnmTs+h{HB zR-iShZe{E_VAptc-kuRQTXn*mV6=+ee8!A=hSKCFG>#hy=1o*ON!oL`I>d(=T1<@v zi!h7r$X9xrimdLn2KV!iPyp)#+|;JbW;z^Fi~QL(DIlDM%@#A{u^`jNuNFK&njzv~ zM^5Dp$3620J1}?~A^;@CtI7RyWjc0|DN%xO*8wQHJljN2)HNl@E$Og z1jLBWfh^bbgM2JeNnuUDP$lVRVc~u0)dA48cOZuJl6q90T|+2m?ebvMdpQklv2fW% z*#usb-I-7DDei+IwKX>srl3kPlM>GiVvlzjh>b3@Te<`MKz!^=j%usvf&B9 zWRe-UI;kI%iTiTcuCJwDV@(%2rRaOvvGEez1(P5AMn%V}d?tD4E{VoP$w|kE7i~!2 zy;K^@p63O4iv3G}1k^BxB0YGXt)ziPCdXO8TQuXznNJfYZQ*`}z=7AmIx%nOb0sF{ zod5Q6EpH?rZ7j6Yq+KU@iGsr#RJTYFCU1CeltiUhHwRyx{axeQeKq|1#rQgCZjUy& z2`>(3L9&JRqe^vOXgAgv*7?j3KBxu_A>6iizDx!nS;aa2Tu=O zDa`2iuvl5fjcW?kydc92zTEQWlh>|+E{zt=a)z+`vYxM#**~TP742^1+KguYEO-^P zptGYu1H>+3zqG%seS=$H_{!c?XP-9KH@fz#6nT8Lw+%6H@*YCLnZA0ajmP?4Eof8; zJ;ye^YrQRR`>7FikLK`=FLXF|;%zIujmJ6eoMb-iR6isGxR+C_-XUF>BQihL1A7%ygE{TwZpOdtcuB(iaK54kw8OuB>WyIXLILxV0{Ak$F#09_ zubm{w=yN4kW;on&8LI&?e~Y`jcJo@e0S6*(_BrA$(hzy{;giG0MhEg*LnnAll}nU% z)R7|WZC@Kn_shP&JpKMf0LQxzTAGsfF8DOb{%>~MBG65zW3tXkOfe5=`>VlI!O!}S z{9Pd51BEgazLcITt_@JW7aX_2bFnkXOK)n7#O-4yIOY2OLxXKn>-OMLl3dM zhXMW5BFon1q#Wtz*9jAuF8cy*GWw2SjDrKMf(OdKKgUkzW#w8O99J@uPwFPEUB#=aU68Wg&bp( zaL5H_c_58^QCZmJq53z$eElK{-N0P4R?0#SEznthaFayi%`WCS zk{p_rjPL*~1MVnI08c|GHSJLc^dTYWY)qIHDHnCW)ga`Y`IB<%<`)k88vmrB_|Yo* zHjRKIVfu10;ms-YnV20-mY$RcsZqu?ug^+H<<~I@R%WO3bpH$T#Uay0HLdTDR&X|L&a>X2t5^RsV7Sr&0p9YHZ*J%~%joua3^w)i~&YfkH=K>zV%+4uPTZ z_>AX&vnJqLaO6b>R6~1D&fEKaiuku4%7%8nIEQzkohi#UWg%VY3E%sYh?Ja6c!;oe z|ALFOJNf@@S0G=d{VI&y^aOBA^9WvJLm`XWj0+V&aJNcoU92*XXq5F;LMlsEcJy@d z&8QzsoSrr`UP(Y6KE@@b3qzNgYt{~QyK-Dh#)n!9x(k_jAcE?7N*mLs2+P%^fj@j* z_P0EeFZqd(bFy$!*sc4>Rh9r_LN{jWZ>r{)Dz~WzK;y8%(@!^(0hvpD@0r6pAb3& /dev/null +! taskkill -F -IM go.exe -T >& /dev/null +! taskkill -F -IM docker.exe -T >& /dev/null + +# Remove everything +! cd /c/jenkins/gopath/src/github.com/docker/docker +! rm -rfd * >& /dev/null +! rm -rfd .* >& /dev/null + +echo INFO: Cleanup complete +exit 0 \ No newline at end of file diff --git a/vendor/github.com/moby/moby/hack/Jenkins/W2L/setup.sh b/vendor/github.com/moby/moby/hack/Jenkins/W2L/setup.sh new file mode 100644 index 000000000..a3d86b857 --- /dev/null +++ b/vendor/github.com/moby/moby/hack/Jenkins/W2L/setup.sh @@ -0,0 +1,309 @@ +# Jenkins CI script for Windows to Linux CI. +# Heavily modified by John Howard (@jhowardmsft) December 2015 to try to make it more reliable. +set +xe +SCRIPT_VER="Wed Apr 20 18:30:19 UTC 2016" + +# TODO to make (even) more resilient: +# - Wait for daemon to be running before executing docker commands +# - Check if jq is installed +# - Make sure bash is v4.3 or later. Can't do until all Azure nodes on the latest version +# - Make sure we are not running as local system. Can't do until all Azure nodes are updated. +# - Error if docker versions are not equal. Can't do until all Azure nodes are updated +# - Error if go versions are not equal. Can't do until all Azure nodes are updated. +# - Error if running 32-bit posix tools. Probably can take from bash --version and check contains "x86_64" +# - Warn if the CI directory cannot be deleted afterwards. Otherwise turdlets are left behind +# - Use %systemdrive% ($SYSTEMDRIVE) rather than hard code to c: for TEMP +# - Consider cross building the Windows binary and copy across. That's a bit of a heavy lift. Only reason +# for doing that is that it mirrors the actual release process for docker.exe which is cross-built. +# However, should absolutely not be a problem if built natively, so nit-picking. +# - Tidy up of images and containers. Either here, or in the teardown script. + +ec=0 +uniques=1 +echo INFO: Started at `date`. Script version $SCRIPT_VER + + +# !README! +# There are two daemons running on the remote Linux host: +# - outer: specified by DOCKER_HOST, this is the daemon that will build and run the inner docker daemon +# from the sources matching the PR. +# - inner: runs on the host network, on a port number similar to that of DOCKER_HOST but the last two digits are inverted +# (2357 if DOCKER_HOST had port 2375; and 2367 if DOCKER_HOST had port 2376). +# The windows integration tests are run against this inner daemon. + +# get the ip, inner and outer ports. +ip="${DOCKER_HOST#*://}" +port_outer="${ip#*:}" +# inner port is like outer port with last two digits inverted. +port_inner=$(echo "$port_outer" | sed -E 's/(.)(.)$/\2\1/') +ip="${ip%%:*}" + +echo "INFO: IP=$ip PORT_OUTER=$port_outer PORT_INNER=$port_inner" + +# If TLS is enabled +if [ -n "$DOCKER_TLS_VERIFY" ]; then + protocol=https + if [ -z "$DOCKER_MACHINE_NAME" ]; then + ec=1 + echo "ERROR: DOCKER_MACHINE_NAME is undefined" + fi + certs=$(echo ~/.docker/machine/machines/$DOCKER_MACHINE_NAME) + curlopts="--cacert $certs/ca.pem --cert $certs/cert.pem --key $certs/key.pem" + run_extra_args="-v tlscerts:/etc/docker" + daemon_extra_args="--tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem" +else + protocol=http +fi + +# Save for use by make.sh and scripts it invokes +export MAIN_DOCKER_HOST="tcp://$ip:$port_inner" + +# Verify we can get the remote node to respond to _ping +if [ $ec -eq 0 ]; then + reply=`curl -s $curlopts $protocol://$ip:$port_outer/_ping` + if [ "$reply" != "OK" ]; then + ec=1 + echo "ERROR: Failed to get an 'OK' response from the docker daemon on the Linux node" + echo " at $ip:$port_outer when called with an http request for '_ping'. This implies that" + echo " either the daemon has crashed/is not running, or the Linux node is unavailable." + echo + echo " A regular ping to the remote Linux node is below. It should reply. If not, the" + echo " machine cannot be reached at all and may have crashed. If it does reply, it is" + echo " likely a case of the Linux daemon not running or having crashed, which requires" + echo " further investigation." + echo + echo " Try re-running this CI job, or ask on #docker-dev or #docker-maintainers" + echo " for someone to perform further diagnostics, or take this node out of rotation." + echo + ping $ip + else + echo "INFO: The Linux nodes outer daemon replied to a ping. Good!" + fi +fi + +# Get the version from the remote node. Note this may fail if jq is not installed. +# That's probably worth checking to make sure, just in case. +if [ $ec -eq 0 ]; then + remoteVersion=`curl -s $curlopts $protocol://$ip:$port_outer/version | jq -c '.Version'` + echo "INFO: Remote daemon is running docker version $remoteVersion" +fi + +# Compare versions. We should really fail if result is no 1. Output at end of script. +if [ $ec -eq 0 ]; then + uniques=`docker version | grep Version | /usr/bin/sort -u | wc -l` +fi + +# Make sure we are in repo +if [ $ec -eq 0 ]; then + if [ ! -d hack ]; then + echo "ERROR: Are you sure this is being launched from a the root of docker repository?" + echo " If this is a Windows CI machine, it should be c:\jenkins\gopath\src\github.com\docker\docker." + echo " Current directory is `pwd`" + ec=1 + fi +fi + +# Are we in split binary mode? +if [ `grep DOCKER_CLIENTONLY Makefile | wc -l` -gt 0 ]; then + splitBinary=0 + echo "INFO: Running in single binary mode" +else + splitBinary=1 + echo "INFO: Running in split binary mode" +fi + + +# Get the commit has and verify we have something +if [ $ec -eq 0 ]; then + export COMMITHASH=$(git rev-parse --short HEAD) + echo INFO: Commit hash is $COMMITHASH + if [ -z $COMMITHASH ]; then + echo "ERROR: Failed to get commit hash. Are you sure this is a docker repository?" + ec=1 + fi +fi + +# Redirect to a temporary location. Check is here for local runs from Jenkins machines just in case not +# in the right directory where the repo is cloned. We also redirect TEMP to not use the environment +# TEMP as when running as a standard user (not local system), it otherwise exposes a bug in posix tar which +# will cause CI to fail from Windows to Linux. Obviously it's not best practice to ever run as local system... +if [ $ec -eq 0 ]; then + export TEMP=/c/CI/CI-$COMMITHASH + export TMP=$TEMP + /usr/bin/mkdir -p $TEMP # Make sure Linux mkdir for -p +fi + +# Tidy up time +if [ $ec -eq 0 ]; then + echo INFO: Deleting pre-existing containers and images... + + # Force remove all containers based on a previously built image with this commit + ! docker rm -f $(docker ps -aq --filter "ancestor=docker:$COMMITHASH") &>/dev/null + + # Force remove any container with this commithash as a name + ! docker rm -f $(docker ps -aq --filter "name=docker-$COMMITHASH") &>/dev/null + + # This SHOULD never happen, but just in case, also blow away any containers + # that might be around. + ! if [ ! $(docker ps -aq | wc -l) -eq 0 ]; then + echo WARN: There were some leftover containers. Cleaning them up. + ! docker rm -f $(docker ps -aq) + fi + + # Force remove the image if it exists + ! docker rmi -f "docker-$COMMITHASH" &>/dev/null +fi + +# Provide the docker version for debugging purposes. If these fail, game over. +# as the Linux box isn't responding for some reason. +if [ $ec -eq 0 ]; then + echo INFO: Docker version and info of the outer daemon on the Linux node + echo + docker version + ec=$? + if [ 0 -ne $ec ]; then + echo "ERROR: The main linux daemon does not appear to be running. Has the Linux node crashed?" + fi + echo +fi + +# Same as above, but docker info +if [ $ec -eq 0 ]; then + echo + docker info + ec=$? + if [ 0 -ne $ec ]; then + echo "ERROR: The main linux daemon does not appear to be running. Has the Linux node crashed?" + fi + echo +fi + +# build the daemon image +if [ $ec -eq 0 ]; then + echo "INFO: Running docker build on Linux host at $DOCKER_HOST" + if [ $splitBinary -eq 0 ]; then + set -x + docker build --rm --force-rm --build-arg APT_MIRROR=cdn-fastly.deb.debian.org -t "docker:$COMMITHASH" . + cat < +# See the blog post: https://blog.docker.com/2013/09/docker-can-now-run-within-docker/ +# +# This script should be executed inside a docker container in privileged mode +# ('docker run --privileged', introduced in docker 0.6). + +# Usage: dind CMD [ARG...] + +# apparmor sucks and Docker needs to know that it's in a container (c) @tianon +export container=docker + +if [ -d /sys/kernel/security ] && ! mountpoint -q /sys/kernel/security; then + mount -t securityfs none /sys/kernel/security || { + echo >&2 'Could not mount /sys/kernel/security.' + echo >&2 'AppArmor detection and --privileged mode might break.' + } +fi + +# Mount /tmp (conditionally) +if ! mountpoint -q /tmp; then + mount -t tmpfs none /tmp +fi + +if [ $# -gt 0 ]; then + exec "$@" +fi + +echo >&2 'ERROR: No command specified.' +echo >&2 'You probably want to run hack/make.sh, or maybe a shell?' diff --git a/vendor/github.com/moby/moby/hack/dockerfile/binaries-commits b/vendor/github.com/moby/moby/hack/dockerfile/binaries-commits new file mode 100644 index 000000000..84c5c0faa --- /dev/null +++ b/vendor/github.com/moby/moby/hack/dockerfile/binaries-commits @@ -0,0 +1,14 @@ +#!/bin/sh + +TOMLV_COMMIT=9baf8a8a9f2ed20a8e54160840c492f937eeaf9a + +# When updating RUNC_COMMIT, also update runc in vendor.conf accordingly +RUNC_COMMIT=2d41c047c83e09a6d61d464906feb2a2f3c52aa4 +CONTAINERD_COMMIT=3addd840653146c90a254301d6c3a663c7fd6429 +TINI_COMMIT=949e6facb77383876aeff8a6944dde66b3089574 +LIBNETWORK_COMMIT=7b2b1feb1de4817d522cc372af149ff48d25028e +VNDR_COMMIT=9909bb2b8a0b7ea464527b376dc50389c90df587 + +# CLI +DOCKERCLI_REPO=https://github.com/docker/cli +DOCKERCLI_COMMIT=3dfb8343b139d6342acfd9975d7f1068b5b1c3d3 diff --git a/vendor/github.com/moby/moby/hack/dockerfile/install-binaries.sh b/vendor/github.com/moby/moby/hack/dockerfile/install-binaries.sh new file mode 100755 index 000000000..370ec7ce4 --- /dev/null +++ b/vendor/github.com/moby/moby/hack/dockerfile/install-binaries.sh @@ -0,0 +1,125 @@ +#!/bin/sh +set -e +set -x + +. $(dirname "$0")/binaries-commits + +RM_GOPATH=0 + +TMP_GOPATH=${TMP_GOPATH:-""} + +if [ -z "$TMP_GOPATH" ]; then + export GOPATH="$(mktemp -d)" + RM_GOPATH=1 +else + export GOPATH="$TMP_GOPATH" +fi + +# Do not build with ambient capabilities support +RUNC_BUILDTAGS="${RUNC_BUILDTAGS:-"seccomp apparmor selinux"}" + +install_runc() { + echo "Install runc version $RUNC_COMMIT" + git clone https://github.com/docker/runc.git "$GOPATH/src/github.com/opencontainers/runc" + cd "$GOPATH/src/github.com/opencontainers/runc" + git checkout -q "$RUNC_COMMIT" + make BUILDTAGS="$RUNC_BUILDTAGS" $1 + cp runc /usr/local/bin/docker-runc +} + +install_containerd() { + echo "Install containerd version $CONTAINERD_COMMIT" + git clone https://github.com/containerd/containerd.git "$GOPATH/src/github.com/containerd/containerd" + cd "$GOPATH/src/github.com/containerd/containerd" + git checkout -q "$CONTAINERD_COMMIT" + make $1 + cp bin/containerd /usr/local/bin/docker-containerd + cp bin/containerd-shim /usr/local/bin/docker-containerd-shim + cp bin/ctr /usr/local/bin/docker-containerd-ctr +} + +install_proxy() { + echo "Install docker-proxy version $LIBNETWORK_COMMIT" + git clone https://github.com/docker/libnetwork.git "$GOPATH/src/github.com/docker/libnetwork" + cd "$GOPATH/src/github.com/docker/libnetwork" + git checkout -q "$LIBNETWORK_COMMIT" + go build -ldflags="$PROXY_LDFLAGS" -o /usr/local/bin/docker-proxy github.com/docker/libnetwork/cmd/proxy +} + +install_dockercli() { + echo "Install docker/cli version $DOCKERCLI_COMMIT" + git clone "$DOCKERCLI_REPO" "$GOPATH/src/github.com/docker/cli" + cd "$GOPATH/src/github.com/docker/cli" + git checkout -q "$DOCKERCLI_COMMIT" + go build -o /usr/local/bin/docker github.com/docker/cli/cmd/docker +} + +for prog in "$@" +do + case $prog in + tomlv) + echo "Install tomlv version $TOMLV_COMMIT" + git clone https://github.com/BurntSushi/toml.git "$GOPATH/src/github.com/BurntSushi/toml" + cd "$GOPATH/src/github.com/BurntSushi/toml" && git checkout -q "$TOMLV_COMMIT" + go build -v -o /usr/local/bin/tomlv github.com/BurntSushi/toml/cmd/tomlv + ;; + + runc) + install_runc static + ;; + + runc-dynamic) + install_runc + ;; + + containerd) + install_containerd static + ;; + + containerd-dynamic) + install_containerd + ;; + + tini) + echo "Install tini version $TINI_COMMIT" + git clone https://github.com/krallin/tini.git "$GOPATH/tini" + cd "$GOPATH/tini" + git checkout -q "$TINI_COMMIT" + cmake . + make tini-static + cp tini-static /usr/local/bin/docker-init + ;; + + proxy) + ( + export CGO_ENABLED=0 + install_proxy + ) + ;; + + proxy-dynamic) + PROXY_LDFLAGS="-linkmode=external" install_proxy + ;; + + vndr) + echo "Install vndr version $VNDR_COMMIT" + git clone https://github.com/LK4D4/vndr.git "$GOPATH/src/github.com/LK4D4/vndr" + cd "$GOPATH/src/github.com/LK4D4/vndr" + git checkout -q "$VNDR_COMMIT" + go build -v -o /usr/local/bin/vndr . + ;; + + dockercli) + install_dockercli + ;; + + *) + echo echo "Usage: $0 [tomlv|runc|runc-dynamic|containerd|containerd-dynamic|tini|proxy|proxy-dynamic|vndr|dockercli]" + exit 1 + + esac +done + +if [ $RM_GOPATH -eq 1 ]; then + rm -rf "$GOPATH" +fi diff --git a/vendor/github.com/moby/moby/hack/generate-authors.sh b/vendor/github.com/moby/moby/hack/generate-authors.sh new file mode 100755 index 000000000..680bdb7b3 --- /dev/null +++ b/vendor/github.com/moby/moby/hack/generate-authors.sh @@ -0,0 +1,15 @@ +#!/usr/bin/env bash +set -e + +cd "$(dirname "$(readlink -f "$BASH_SOURCE")")/.." + +# see also ".mailmap" for how email addresses and names are deduplicated + +{ + cat <<-'EOH' + # This file lists all individuals having contributed content to the repository. + # For how it is generated, see `hack/generate-authors.sh`. + EOH + echo + git log --format='%aN <%aE>' | LC_ALL=C.UTF-8 sort -uf +} > AUTHORS diff --git a/vendor/github.com/moby/moby/hack/generate-swagger-api.sh b/vendor/github.com/moby/moby/hack/generate-swagger-api.sh new file mode 100755 index 000000000..9bbd8de5d --- /dev/null +++ b/vendor/github.com/moby/moby/hack/generate-swagger-api.sh @@ -0,0 +1,27 @@ +#!/bin/sh +set -eu + +swagger generate model -f api/swagger.yaml \ + -t api -m types --skip-validator -C api/swagger-gen.yaml \ + -n ErrorResponse \ + -n GraphDriverData \ + -n IdResponse \ + -n ImageDeleteResponseItem \ + -n ImageSummary \ + -n Plugin -n PluginDevice -n PluginMount -n PluginEnv -n PluginInterfaceType \ + -n Port \ + -n ServiceUpdateResponse \ + -n Volume + +swagger generate operation -f api/swagger.yaml \ + -t api -a types -m types -C api/swagger-gen.yaml \ + -T api/templates --skip-responses --skip-parameters --skip-validator \ + -n Authenticate \ + -n ContainerChanges \ + -n ContainerCreate \ + -n ContainerTop \ + -n ContainerUpdate \ + -n ContainerWait \ + -n ImageHistory \ + -n VolumesCreate \ + -n VolumesList diff --git a/vendor/github.com/moby/moby/hack/integration-cli-on-swarm/README.md b/vendor/github.com/moby/moby/hack/integration-cli-on-swarm/README.md new file mode 100644 index 000000000..1cea52526 --- /dev/null +++ b/vendor/github.com/moby/moby/hack/integration-cli-on-swarm/README.md @@ -0,0 +1,69 @@ +# Integration Testing on Swarm + +IT on Swarm allows you to execute integration test in parallel across a Docker Swarm cluster + +## Architecture + +### Master service + + - Works as a funker caller + - Calls a worker funker (`-worker-service`) with a chunk of `-check.f` filter strings (passed as a file via `-input` flag, typically `/mnt/input`) + +### Worker service + + - Works as a funker callee + - Executes an equivalent of `TESTFLAGS=-check.f TestFoo|TestBar|TestBaz ... make test-integration-cli` using the bind-mounted API socket (`docker.sock`) + +### Client + + - Controls master and workers via `docker stack` + - No need to have a local daemon + +Typically, the master and workers are supposed to be running on a cloud environment, +while the client is supposed to be running on a laptop, e.g. Docker for Mac/Windows. + +## Requirement + + - Docker daemon 1.13 or later + - Private registry for distributed execution with multiple nodes + +## Usage + +### Step 1: Prepare images + + $ make build-integration-cli-on-swarm + +Following environment variables are known to work in this step: + + - `BUILDFLAGS` + - `DOCKER_INCREMENTAL_BINARY` + +Note: during the transition into Moby Project, you might need to create a symbolic link `$GOPATH/src/github.com/docker/docker` to `$GOPATH/src/github.com/moby/moby`. + +### Step 2: Execute tests + + $ ./hack/integration-cli-on-swarm/integration-cli-on-swarm -replicas 40 -push-worker-image YOUR_REGISTRY.EXAMPLE.COM/integration-cli-worker:latest + +Following environment variables are known to work in this step: + + - `DOCKER_GRAPHDRIVER` + - `DOCKER_EXPERIMENTAL` + +#### Flags + +Basic flags: + + - `-replicas N`: the number of worker service replicas. i.e. degree of parallelism. + - `-chunks N`: the number of chunks. By default, `chunks` == `replicas`. + - `-push-worker-image REGISTRY/IMAGE:TAG`: push the worker image to the registry. Note that if you have only single node and hence you do not need a private registry, you do not need to specify `-push-worker-image`. + +Experimental flags for mitigating makespan nonuniformity: + + - `-shuffle`: Shuffle the test filter strings + +Flags for debugging IT on Swarm itself: + + - `-rand-seed N`: the random seed. This flag is useful for deterministic replaying. By default(0), the timestamp is used. + - `-filters-file FILE`: the file contains `-check.f` strings. By default, the file is automatically generated. + - `-dry-run`: skip the actual workload + - `keep-executor`: do not auto-remove executor containers, which is used for running privileged programs on Swarm diff --git a/vendor/github.com/moby/moby/hack/integration-cli-on-swarm/agent/Dockerfile b/vendor/github.com/moby/moby/hack/integration-cli-on-swarm/agent/Dockerfile new file mode 100644 index 000000000..c2bc2f195 --- /dev/null +++ b/vendor/github.com/moby/moby/hack/integration-cli-on-swarm/agent/Dockerfile @@ -0,0 +1,6 @@ +# this Dockerfile is solely used for the master image. +# Please refer to the top-level Makefile for the worker image. +FROM golang:1.7 +ADD . /go/src/github.com/docker/docker/hack/integration-cli-on-swarm/agent +RUN go build -o /master github.com/docker/docker/hack/integration-cli-on-swarm/agent/master +ENTRYPOINT ["/master"] diff --git a/vendor/github.com/moby/moby/hack/integration-cli-on-swarm/agent/master/call.go b/vendor/github.com/moby/moby/hack/integration-cli-on-swarm/agent/master/call.go new file mode 100644 index 000000000..858c2c072 --- /dev/null +++ b/vendor/github.com/moby/moby/hack/integration-cli-on-swarm/agent/master/call.go @@ -0,0 +1,132 @@ +package main + +import ( + "encoding/json" + "fmt" + "log" + "strings" + "sync" + "sync/atomic" + "time" + + "github.com/bfirsh/funker-go" + "github.com/docker/docker/hack/integration-cli-on-swarm/agent/types" +) + +const ( + // funkerRetryTimeout is for the issue https://github.com/bfirsh/funker/issues/3 + // When all the funker replicas are busy in their own job, we cannot connect to funker. + funkerRetryTimeout = 1 * time.Hour + funkerRetryDuration = 1 * time.Second +) + +// ticker is needed for some CI (e.g., on Travis, job is aborted when no output emitted for 10 minutes) +func ticker(d time.Duration) chan struct{} { + t := time.NewTicker(d) + stop := make(chan struct{}) + go func() { + for { + select { + case <-t.C: + log.Printf("tick (just for keeping CI job active) per %s", d.String()) + case <-stop: + t.Stop() + } + } + }() + return stop +} + +func executeTests(funkerName string, testChunks [][]string) error { + tickerStopper := ticker(9*time.Minute + 55*time.Second) + defer func() { + close(tickerStopper) + }() + begin := time.Now() + log.Printf("Executing %d chunks in parallel, against %q", len(testChunks), funkerName) + var wg sync.WaitGroup + var passed, failed uint32 + for chunkID, tests := range testChunks { + log.Printf("Executing chunk %d (contains %d test filters)", chunkID, len(tests)) + wg.Add(1) + go func(chunkID int, tests []string) { + defer wg.Done() + chunkBegin := time.Now() + result, err := executeTestChunkWithRetry(funkerName, types.Args{ + ChunkID: chunkID, + Tests: tests, + }) + if result.RawLog != "" { + for _, s := range strings.Split(result.RawLog, "\n") { + log.Printf("Log (chunk %d): %s", chunkID, s) + } + } + if err != nil { + log.Printf("Error while executing chunk %d: %v", + chunkID, err) + atomic.AddUint32(&failed, 1) + } else { + if result.Code == 0 { + atomic.AddUint32(&passed, 1) + } else { + atomic.AddUint32(&failed, 1) + } + log.Printf("Finished chunk %d [%d/%d] with %d test filters in %s, code=%d.", + chunkID, passed+failed, len(testChunks), len(tests), + time.Now().Sub(chunkBegin), result.Code) + } + }(chunkID, tests) + } + wg.Wait() + // TODO: print actual tests rather than chunks + log.Printf("Executed %d chunks in %s. PASS: %d, FAIL: %d.", + len(testChunks), time.Now().Sub(begin), passed, failed) + if failed > 0 { + return fmt.Errorf("%d chunks failed", failed) + } + return nil +} + +func executeTestChunk(funkerName string, args types.Args) (types.Result, error) { + ret, err := funker.Call(funkerName, args) + if err != nil { + return types.Result{}, err + } + tmp, err := json.Marshal(ret) + if err != nil { + return types.Result{}, err + } + var result types.Result + err = json.Unmarshal(tmp, &result) + return result, err +} + +func executeTestChunkWithRetry(funkerName string, args types.Args) (types.Result, error) { + begin := time.Now() + for i := 0; time.Now().Sub(begin) < funkerRetryTimeout; i++ { + result, err := executeTestChunk(funkerName, args) + if err == nil { + log.Printf("executeTestChunk(%q, %d) returned code %d in trial %d", funkerName, args.ChunkID, result.Code, i) + return result, nil + } + if errorSeemsInteresting(err) { + log.Printf("Error while calling executeTestChunk(%q, %d), will retry (trial %d): %v", + funkerName, args.ChunkID, i, err) + } + // TODO: non-constant sleep + time.Sleep(funkerRetryDuration) + } + return types.Result{}, fmt.Errorf("could not call executeTestChunk(%q, %d) in %v", funkerName, args.ChunkID, funkerRetryTimeout) +} + +// errorSeemsInteresting returns true if err does not seem about https://github.com/bfirsh/funker/issues/3 +func errorSeemsInteresting(err error) bool { + boringSubstrs := []string{"connection refused", "connection reset by peer", "no such host", "transport endpoint is not connected", "no route to host"} + errS := err.Error() + for _, boringS := range boringSubstrs { + if strings.Contains(errS, boringS) { + return false + } + } + return true +} diff --git a/vendor/github.com/moby/moby/hack/integration-cli-on-swarm/agent/master/master.go b/vendor/github.com/moby/moby/hack/integration-cli-on-swarm/agent/master/master.go new file mode 100644 index 000000000..a0d9a0d38 --- /dev/null +++ b/vendor/github.com/moby/moby/hack/integration-cli-on-swarm/agent/master/master.go @@ -0,0 +1,65 @@ +package main + +import ( + "errors" + "flag" + "io/ioutil" + "log" + "strings" +) + +func main() { + if err := xmain(); err != nil { + log.Fatalf("fatal error: %v", err) + } +} + +func xmain() error { + workerService := flag.String("worker-service", "", "Name of worker service") + chunks := flag.Int("chunks", 0, "Number of chunks") + input := flag.String("input", "", "Path to input file") + randSeed := flag.Int64("rand-seed", int64(0), "Random seed") + shuffle := flag.Bool("shuffle", false, "Shuffle the input so as to mitigate makespan nonuniformity") + flag.Parse() + if *workerService == "" { + return errors.New("worker-service unset") + } + if *chunks == 0 { + return errors.New("chunks unset") + } + if *input == "" { + return errors.New("input unset") + } + + tests, err := loadTests(*input) + if err != nil { + return err + } + testChunks := chunkTests(tests, *chunks, *shuffle, *randSeed) + log.Printf("Loaded %d tests (%d chunks)", len(tests), len(testChunks)) + return executeTests(*workerService, testChunks) +} + +func chunkTests(tests []string, numChunks int, shuffle bool, randSeed int64) [][]string { + // shuffling (experimental) mitigates makespan nonuniformity + // Not sure this can cause some locality problem.. + if shuffle { + shuffleStrings(tests, randSeed) + } + return chunkStrings(tests, numChunks) +} + +func loadTests(filename string) ([]string, error) { + b, err := ioutil.ReadFile(filename) + if err != nil { + return nil, err + } + var tests []string + for _, line := range strings.Split(string(b), "\n") { + s := strings.TrimSpace(line) + if s != "" { + tests = append(tests, s) + } + } + return tests, nil +} diff --git a/vendor/github.com/moby/moby/hack/integration-cli-on-swarm/agent/master/set.go b/vendor/github.com/moby/moby/hack/integration-cli-on-swarm/agent/master/set.go new file mode 100644 index 000000000..d28c41da7 --- /dev/null +++ b/vendor/github.com/moby/moby/hack/integration-cli-on-swarm/agent/master/set.go @@ -0,0 +1,28 @@ +package main + +import ( + "math/rand" +) + +// chunkStrings chunks the string slice +func chunkStrings(x []string, numChunks int) [][]string { + var result [][]string + chunkSize := (len(x) + numChunks - 1) / numChunks + for i := 0; i < len(x); i += chunkSize { + ub := i + chunkSize + if ub > len(x) { + ub = len(x) + } + result = append(result, x[i:ub]) + } + return result +} + +// shuffleStrings shuffles strings +func shuffleStrings(x []string, seed int64) { + r := rand.New(rand.NewSource(seed)) + for i := range x { + j := r.Intn(i + 1) + x[i], x[j] = x[j], x[i] + } +} diff --git a/vendor/github.com/moby/moby/hack/integration-cli-on-swarm/agent/master/set_test.go b/vendor/github.com/moby/moby/hack/integration-cli-on-swarm/agent/master/set_test.go new file mode 100644 index 000000000..dfb7a0b4f --- /dev/null +++ b/vendor/github.com/moby/moby/hack/integration-cli-on-swarm/agent/master/set_test.go @@ -0,0 +1,63 @@ +package main + +import ( + "fmt" + "reflect" + "testing" + "time" +) + +func generateInput(inputLen int) []string { + input := []string{} + for i := 0; i < inputLen; i++ { + input = append(input, fmt.Sprintf("s%d", i)) + } + + return input +} + +func testChunkStrings(t *testing.T, inputLen, numChunks int) { + t.Logf("inputLen=%d, numChunks=%d", inputLen, numChunks) + input := generateInput(inputLen) + result := chunkStrings(input, numChunks) + t.Logf("result has %d chunks", len(result)) + inputReconstructedFromResult := []string{} + for i, chunk := range result { + t.Logf("chunk %d has %d elements", i, len(chunk)) + inputReconstructedFromResult = append(inputReconstructedFromResult, chunk...) + } + if !reflect.DeepEqual(input, inputReconstructedFromResult) { + t.Fatal("input != inputReconstructedFromResult") + } +} + +func TestChunkStrings_4_4(t *testing.T) { + testChunkStrings(t, 4, 4) +} + +func TestChunkStrings_4_1(t *testing.T) { + testChunkStrings(t, 4, 1) +} + +func TestChunkStrings_1_4(t *testing.T) { + testChunkStrings(t, 1, 4) +} + +func TestChunkStrings_1000_8(t *testing.T) { + testChunkStrings(t, 1000, 8) +} + +func TestChunkStrings_1000_9(t *testing.T) { + testChunkStrings(t, 1000, 9) +} + +func testShuffleStrings(t *testing.T, inputLen int, seed int64) { + t.Logf("inputLen=%d, seed=%d", inputLen, seed) + x := generateInput(inputLen) + shuffleStrings(x, seed) + t.Logf("shuffled: %v", x) +} + +func TestShuffleStrings_100(t *testing.T) { + testShuffleStrings(t, 100, time.Now().UnixNano()) +} diff --git a/vendor/github.com/moby/moby/hack/integration-cli-on-swarm/agent/types/types.go b/vendor/github.com/moby/moby/hack/integration-cli-on-swarm/agent/types/types.go new file mode 100644 index 000000000..fc598f033 --- /dev/null +++ b/vendor/github.com/moby/moby/hack/integration-cli-on-swarm/agent/types/types.go @@ -0,0 +1,18 @@ +package types + +// Args is the type for funker args +type Args struct { + // ChunkID is an unique number of the chunk + ChunkID int `json:"chunk_id"` + // Tests is the set of the strings that are passed as `-check.f` filters + Tests []string `json:"tests"` +} + +// Result is the type for funker result +type Result struct { + // ChunkID corresponds to Args.ChunkID + ChunkID int `json:"chunk_id"` + // Code is the exit code + Code int `json:"code"` + RawLog string `json:"raw_log"` +} diff --git a/vendor/github.com/moby/moby/hack/integration-cli-on-swarm/agent/vendor.conf b/vendor/github.com/moby/moby/hack/integration-cli-on-swarm/agent/vendor.conf new file mode 100644 index 000000000..efd6d6d04 --- /dev/null +++ b/vendor/github.com/moby/moby/hack/integration-cli-on-swarm/agent/vendor.conf @@ -0,0 +1,2 @@ +# dependencies specific to worker (i.e. github.com/docker/docker/...) are not vendored here +github.com/bfirsh/funker-go eaa0a2e06f30e72c9a0b7f858951e581e26ef773 diff --git a/vendor/github.com/moby/moby/hack/integration-cli-on-swarm/agent/worker/executor.go b/vendor/github.com/moby/moby/hack/integration-cli-on-swarm/agent/worker/executor.go new file mode 100644 index 000000000..3442b0940 --- /dev/null +++ b/vendor/github.com/moby/moby/hack/integration-cli-on-swarm/agent/worker/executor.go @@ -0,0 +1,118 @@ +package main + +import ( + "bytes" + "context" + "fmt" + "io" + "os" + "strings" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/mount" + "github.com/docker/docker/client" + "github.com/docker/docker/pkg/stdcopy" +) + +// testChunkExecutor executes integration-cli binary. +// image needs to be the worker image itself. testFlags are OR-set of regexp for filtering tests. +type testChunkExecutor func(image string, tests []string) (int64, string, error) + +func dryTestChunkExecutor() testChunkExecutor { + return func(image string, tests []string) (int64, string, error) { + return 0, fmt.Sprintf("DRY RUN (image=%q, tests=%v)", image, tests), nil + } +} + +// privilegedTestChunkExecutor invokes a privileged container from the worker +// service via bind-mounted API socket so as to execute the test chunk +func privilegedTestChunkExecutor(autoRemove bool) testChunkExecutor { + return func(image string, tests []string) (int64, string, error) { + cli, err := client.NewEnvClient() + if err != nil { + return 0, "", err + } + // propagate variables from the host (needs to be defined in the compose file) + experimental := os.Getenv("DOCKER_EXPERIMENTAL") + graphdriver := os.Getenv("DOCKER_GRAPHDRIVER") + if graphdriver == "" { + info, err := cli.Info(context.Background()) + if err != nil { + return 0, "", err + } + graphdriver = info.Driver + } + // `daemon_dest` is similar to `$DEST` (e.g. `bundles/VERSION/test-integration-cli`) + // but it exists outside of `bundles` so as to make `$DOCKER_GRAPHDRIVER` work. + // + // Without this hack, `$DOCKER_GRAPHDRIVER` fails because of (e.g.) `overlay2 is not supported over overlayfs` + // + // see integration-cli/daemon/daemon.go + daemonDest := "/daemon_dest" + config := container.Config{ + Image: image, + Env: []string{ + "TESTFLAGS=-check.f " + strings.Join(tests, "|"), + "KEEPBUNDLE=1", + "DOCKER_INTEGRATION_TESTS_VERIFIED=1", // for avoiding rebuilding integration-cli + "DOCKER_EXPERIMENTAL=" + experimental, + "DOCKER_GRAPHDRIVER=" + graphdriver, + "DOCKER_INTEGRATION_DAEMON_DEST=" + daemonDest, + }, + Labels: map[string]string{ + "org.dockerproject.integration-cli-on-swarm": "", + "org.dockerproject.integration-cli-on-swarm.comment": "this non-service container is created for running privileged programs on Swarm. you can remove this container manually if the corresponding service is already stopped.", + }, + Entrypoint: []string{"hack/dind"}, + Cmd: []string{"hack/make.sh", "test-integration-cli"}, + } + hostConfig := container.HostConfig{ + AutoRemove: autoRemove, + Privileged: true, + Mounts: []mount.Mount{ + { + Type: mount.TypeVolume, + Target: daemonDest, + }, + }, + } + id, stream, err := runContainer(context.Background(), cli, config, hostConfig) + if err != nil { + return 0, "", err + } + var b bytes.Buffer + teeContainerStream(&b, os.Stdout, os.Stderr, stream) + resultC, errC := cli.ContainerWait(context.Background(), id, "") + select { + case err := <-errC: + return 0, "", err + case result := <-resultC: + return result.StatusCode, b.String(), nil + } + } +} + +func runContainer(ctx context.Context, cli *client.Client, config container.Config, hostConfig container.HostConfig) (string, io.ReadCloser, error) { + created, err := cli.ContainerCreate(context.Background(), + &config, &hostConfig, nil, "") + if err != nil { + return "", nil, err + } + if err = cli.ContainerStart(ctx, created.ID, types.ContainerStartOptions{}); err != nil { + return "", nil, err + } + stream, err := cli.ContainerLogs(ctx, + created.ID, + types.ContainerLogsOptions{ + ShowStdout: true, + ShowStderr: true, + Follow: true, + }) + return created.ID, stream, err +} + +func teeContainerStream(w, stdout, stderr io.Writer, stream io.ReadCloser) { + stdcopy.StdCopy(io.MultiWriter(w, stdout), io.MultiWriter(w, stderr), stream) + stream.Close() +} diff --git a/vendor/github.com/moby/moby/hack/integration-cli-on-swarm/agent/worker/worker.go b/vendor/github.com/moby/moby/hack/integration-cli-on-swarm/agent/worker/worker.go new file mode 100644 index 000000000..36ab3684d --- /dev/null +++ b/vendor/github.com/moby/moby/hack/integration-cli-on-swarm/agent/worker/worker.go @@ -0,0 +1,69 @@ +package main + +import ( + "flag" + "fmt" + "log" + "time" + + "github.com/bfirsh/funker-go" + "github.com/docker/distribution/reference" + "github.com/docker/docker/hack/integration-cli-on-swarm/agent/types" +) + +func main() { + if err := xmain(); err != nil { + log.Fatalf("fatal error: %v", err) + } +} + +func validImageDigest(s string) bool { + return reference.DigestRegexp.FindString(s) != "" +} + +func xmain() error { + workerImageDigest := flag.String("worker-image-digest", "", "Needs to be the digest of this worker image itself") + dryRun := flag.Bool("dry-run", false, "Dry run") + keepExecutor := flag.Bool("keep-executor", false, "Do not auto-remove executor containers, which is used for running privileged programs on Swarm") + flag.Parse() + if !validImageDigest(*workerImageDigest) { + // Because of issue #29582. + // `docker service create localregistry.example.com/blahblah:latest` pulls the image data to local, but not a tag. + // So, `docker run localregistry.example.com/blahblah:latest` fails: `Unable to find image 'localregistry.example.com/blahblah:latest' locally` + return fmt.Errorf("worker-image-digest must be a digest, got %q", *workerImageDigest) + } + executor := privilegedTestChunkExecutor(!*keepExecutor) + if *dryRun { + executor = dryTestChunkExecutor() + } + return handle(*workerImageDigest, executor) +} + +func handle(workerImageDigest string, executor testChunkExecutor) error { + log.Printf("Waiting for a funker request") + return funker.Handle(func(args *types.Args) types.Result { + log.Printf("Executing chunk %d, contains %d test filters", + args.ChunkID, len(args.Tests)) + begin := time.Now() + code, rawLog, err := executor(workerImageDigest, args.Tests) + if err != nil { + log.Printf("Error while executing chunk %d: %v", args.ChunkID, err) + if code == 0 { + // Make sure this is a failure + code = 1 + } + return types.Result{ + ChunkID: args.ChunkID, + Code: int(code), + RawLog: rawLog, + } + } + elapsed := time.Now().Sub(begin) + log.Printf("Finished chunk %d, code=%d, elapsed=%v", args.ChunkID, code, elapsed) + return types.Result{ + ChunkID: args.ChunkID, + Code: int(code), + RawLog: rawLog, + } + }) +} diff --git a/vendor/github.com/moby/moby/hack/integration-cli-on-swarm/host/compose.go b/vendor/github.com/moby/moby/hack/integration-cli-on-swarm/host/compose.go new file mode 100644 index 000000000..a92282a1a --- /dev/null +++ b/vendor/github.com/moby/moby/hack/integration-cli-on-swarm/host/compose.go @@ -0,0 +1,122 @@ +package main + +import ( + "context" + "io/ioutil" + "os" + "path/filepath" + "text/template" + + "github.com/docker/docker/client" +) + +const composeTemplate = `# generated by integration-cli-on-swarm +version: "3" + +services: + worker: + image: "{{.WorkerImage}}" + command: ["-worker-image-digest={{.WorkerImageDigest}}", "-dry-run={{.DryRun}}", "-keep-executor={{.KeepExecutor}}"] + networks: + - net + volumes: +# Bind-mount the API socket so that we can invoke "docker run --privileged" within the service containers + - /var/run/docker.sock:/var/run/docker.sock + environment: + - DOCKER_GRAPHDRIVER={{.EnvDockerGraphDriver}} + - DOCKER_EXPERIMENTAL={{.EnvDockerExperimental}} + deploy: + mode: replicated + replicas: {{.Replicas}} + restart_policy: +# The restart condition needs to be any for funker function + condition: any + + master: + image: "{{.MasterImage}}" + command: ["-worker-service=worker", "-input=/mnt/input", "-chunks={{.Chunks}}", "-shuffle={{.Shuffle}}", "-rand-seed={{.RandSeed}}"] + networks: + - net + volumes: + - {{.Volume}}:/mnt + deploy: + mode: replicated + replicas: 1 + restart_policy: + condition: none + placement: +# Make sure the master can access the volume + constraints: [node.id == {{.SelfNodeID}}] + +networks: + net: + +volumes: + {{.Volume}}: + external: true +` + +type composeOptions struct { + Replicas int + Chunks int + MasterImage string + WorkerImage string + Volume string + Shuffle bool + RandSeed int64 + DryRun bool + KeepExecutor bool +} + +type composeTemplateOptions struct { + composeOptions + WorkerImageDigest string + SelfNodeID string + EnvDockerGraphDriver string + EnvDockerExperimental string +} + +// createCompose creates "dir/docker-compose.yml". +// If dir is empty, TempDir() is used. +func createCompose(dir string, cli *client.Client, opts composeOptions) (string, error) { + if dir == "" { + var err error + dir, err = ioutil.TempDir("", "integration-cli-on-swarm-") + if err != nil { + return "", err + } + } + resolved := composeTemplateOptions{} + resolved.composeOptions = opts + workerImageInspect, _, err := cli.ImageInspectWithRaw(context.Background(), defaultWorkerImageName) + if err != nil { + return "", err + } + if len(workerImageInspect.RepoDigests) > 0 { + resolved.WorkerImageDigest = workerImageInspect.RepoDigests[0] + } else { + // fall back for non-pushed image + resolved.WorkerImageDigest = workerImageInspect.ID + } + info, err := cli.Info(context.Background()) + if err != nil { + return "", err + } + resolved.SelfNodeID = info.Swarm.NodeID + resolved.EnvDockerGraphDriver = os.Getenv("DOCKER_GRAPHDRIVER") + resolved.EnvDockerExperimental = os.Getenv("DOCKER_EXPERIMENTAL") + composeFilePath := filepath.Join(dir, "docker-compose.yml") + tmpl, err := template.New("").Parse(composeTemplate) + if err != nil { + return "", err + } + f, err := os.Create(composeFilePath) + if err != nil { + return "", err + } + defer f.Close() + if err = tmpl.Execute(f, resolved); err != nil { + return "", err + } + return composeFilePath, nil +} diff --git a/vendor/github.com/moby/moby/hack/integration-cli-on-swarm/host/dockercmd.go b/vendor/github.com/moby/moby/hack/integration-cli-on-swarm/host/dockercmd.go new file mode 100644 index 000000000..10ea0ecc2 --- /dev/null +++ b/vendor/github.com/moby/moby/hack/integration-cli-on-swarm/host/dockercmd.go @@ -0,0 +1,64 @@ +package main + +import ( + "fmt" + "os" + "os/exec" + "strings" + "time" + + "github.com/docker/docker/client" +) + +func system(commands [][]string) error { + for _, c := range commands { + cmd := exec.Command(c[0], c[1:]...) + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + cmd.Env = os.Environ() + if err := cmd.Run(); err != nil { + return err + } + } + return nil +} + +func pushImage(unusedCli *client.Client, remote, local string) error { + // FIXME: eliminate os/exec (but it is hard to pass auth without os/exec ...) + return system([][]string{ + {"docker", "image", "tag", local, remote}, + {"docker", "image", "push", remote}, + }) +} + +func deployStack(unusedCli *client.Client, stackName, composeFilePath string) error { + // FIXME: eliminate os/exec (but stack is implemented in CLI ...) + return system([][]string{ + {"docker", "stack", "deploy", + "--compose-file", composeFilePath, + "--with-registry-auth", + stackName}, + }) +} + +func hasStack(unusedCli *client.Client, stackName string) bool { + // FIXME: eliminate os/exec (but stack is implemented in CLI ...) + out, err := exec.Command("docker", "stack", "ls").Output() + if err != nil { + panic(fmt.Errorf("`docker stack ls` failed with: %s", string(out))) + } + // FIXME: not accurate + return strings.Contains(string(out), stackName) +} + +func removeStack(unusedCli *client.Client, stackName string) error { + // FIXME: eliminate os/exec (but stack is implemented in CLI ...) + if err := system([][]string{ + {"docker", "stack", "rm", stackName}, + }); err != nil { + return err + } + // FIXME + time.Sleep(10 * time.Second) + return nil +} diff --git a/vendor/github.com/moby/moby/hack/integration-cli-on-swarm/host/enumerate.go b/vendor/github.com/moby/moby/hack/integration-cli-on-swarm/host/enumerate.go new file mode 100644 index 000000000..56c03e38d --- /dev/null +++ b/vendor/github.com/moby/moby/hack/integration-cli-on-swarm/host/enumerate.go @@ -0,0 +1,55 @@ +package main + +import ( + "fmt" + "io/ioutil" + "path/filepath" + "regexp" +) + +var testFuncRegexp *regexp.Regexp + +func init() { + testFuncRegexp = regexp.MustCompile(`(?m)^\s*func\s+\(\w*\s*\*(\w+Suite)\)\s+(Test\w+)`) +} + +func enumerateTestsForBytes(b []byte) ([]string, error) { + var tests []string + submatches := testFuncRegexp.FindAllSubmatch(b, -1) + for _, submatch := range submatches { + if len(submatch) == 3 { + tests = append(tests, fmt.Sprintf("%s.%s$", submatch[1], submatch[2])) + } + } + return tests, nil +} + +// enumerateTests enumerates valid `-check.f` strings for all the test functions. +// Note that we use regexp rather than parsing Go files for performance reason. +// (Try `TESTFLAGS=-check.list make test-integration-cli` to see the slowness of parsing) +// The files needs to be `gofmt`-ed +// +// The result will be as follows, but unsorted ('$' is appended because they are regexp for `-check.f`): +// "DockerAuthzSuite.TestAuthZPluginAPIDenyResponse$" +// "DockerAuthzSuite.TestAuthZPluginAllowEventStream$" +// ... +// "DockerTrustedSwarmSuite.TestTrustedServiceUpdate$" +func enumerateTests(wd string) ([]string, error) { + testGoFiles, err := filepath.Glob(filepath.Join(wd, "integration-cli", "*_test.go")) + if err != nil { + return nil, err + } + var allTests []string + for _, testGoFile := range testGoFiles { + b, err := ioutil.ReadFile(testGoFile) + if err != nil { + return nil, err + } + tests, err := enumerateTestsForBytes(b) + if err != nil { + return nil, err + } + allTests = append(allTests, tests...) + } + return allTests, nil +} diff --git a/vendor/github.com/moby/moby/hack/integration-cli-on-swarm/host/enumerate_test.go b/vendor/github.com/moby/moby/hack/integration-cli-on-swarm/host/enumerate_test.go new file mode 100644 index 000000000..d6049ae52 --- /dev/null +++ b/vendor/github.com/moby/moby/hack/integration-cli-on-swarm/host/enumerate_test.go @@ -0,0 +1,84 @@ +package main + +import ( + "os" + "path/filepath" + "reflect" + "sort" + "strings" + "testing" +) + +func getRepoTopDir(t *testing.T) string { + wd, err := os.Getwd() + if err != nil { + t.Fatal(err) + } + wd = filepath.Clean(wd) + suffix := "hack/integration-cli-on-swarm/host" + if !strings.HasSuffix(wd, suffix) { + t.Skipf("cwd seems strange (needs to have suffix %s): %v", suffix, wd) + } + return filepath.Clean(filepath.Join(wd, "../../..")) +} + +func TestEnumerateTests(t *testing.T) { + if testing.Short() { + t.Skip("skipping in short mode") + } + tests, err := enumerateTests(getRepoTopDir(t)) + if err != nil { + t.Fatal(err) + } + sort.Strings(tests) + t.Logf("enumerated %d test filter strings:", len(tests)) + for _, s := range tests { + t.Logf("- %q", s) + } +} + +func TestEnumerateTestsForBytes(t *testing.T) { + b := []byte(`package main +import ( + "github.com/go-check/check" +) + +func (s *FooSuite) TestA(c *check.C) { +} + +func (s *FooSuite) TestAAA(c *check.C) { +} + +func (s *BarSuite) TestBar(c *check.C) { +} + +func (x *FooSuite) TestC(c *check.C) { +} + +func (*FooSuite) TestD(c *check.C) { +} + +// should not be counted +func (s *FooSuite) testE(c *check.C) { +} + +// counted, although we don't support ungofmt file + func (s *FooSuite) TestF (c *check.C){} +`) + expected := []string{ + "FooSuite.TestA$", + "FooSuite.TestAAA$", + "BarSuite.TestBar$", + "FooSuite.TestC$", + "FooSuite.TestD$", + "FooSuite.TestF$", + } + + actual, err := enumerateTestsForBytes(b) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(expected, actual) { + t.Fatalf("expected %q, got %q", expected, actual) + } +} diff --git a/vendor/github.com/moby/moby/hack/integration-cli-on-swarm/host/host.go b/vendor/github.com/moby/moby/hack/integration-cli-on-swarm/host/host.go new file mode 100644 index 000000000..6823a7668 --- /dev/null +++ b/vendor/github.com/moby/moby/hack/integration-cli-on-swarm/host/host.go @@ -0,0 +1,198 @@ +package main + +import ( + "context" + "flag" + "fmt" + "io" + "io/ioutil" + "os" + "strings" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/client" + "github.com/docker/docker/pkg/stdcopy" +) + +const ( + defaultStackName = "integration-cli-on-swarm" + defaultVolumeName = "integration-cli-on-swarm" + defaultMasterImageName = "integration-cli-master" + defaultWorkerImageName = "integration-cli-worker" +) + +func main() { + rc, err := xmain() + if err != nil { + logrus.Fatalf("fatal error: %v", err) + } + os.Exit(rc) +} + +func xmain() (int, error) { + // Should we use cobra maybe? + replicas := flag.Int("replicas", 1, "Number of worker service replica") + chunks := flag.Int("chunks", 0, "Number of test chunks executed in batch (0 == replicas)") + pushWorkerImage := flag.String("push-worker-image", "", "Push the worker image to the registry. Required for distributed execution. (empty == not to push)") + shuffle := flag.Bool("shuffle", false, "Shuffle the input so as to mitigate makespan nonuniformity") + // flags below are rarely used + randSeed := flag.Int64("rand-seed", int64(0), "Random seed used for shuffling (0 == current time)") + filtersFile := flag.String("filters-file", "", "Path to optional file composed of `-check.f` filter strings") + dryRun := flag.Bool("dry-run", false, "Dry run") + keepExecutor := flag.Bool("keep-executor", false, "Do not auto-remove executor containers, which is used for running privileged programs on Swarm") + flag.Parse() + if *chunks == 0 { + *chunks = *replicas + } + if *randSeed == int64(0) { + *randSeed = time.Now().UnixNano() + } + cli, err := client.NewEnvClient() + if err != nil { + return 1, err + } + if hasStack(cli, defaultStackName) { + logrus.Infof("Removing stack %s", defaultStackName) + removeStack(cli, defaultStackName) + } + if hasVolume(cli, defaultVolumeName) { + logrus.Infof("Removing volume %s", defaultVolumeName) + removeVolume(cli, defaultVolumeName) + } + if err = ensureImages(cli, []string{defaultWorkerImageName, defaultMasterImageName}); err != nil { + return 1, err + } + workerImageForStack := defaultWorkerImageName + if *pushWorkerImage != "" { + logrus.Infof("Pushing %s to %s", defaultWorkerImageName, *pushWorkerImage) + if err = pushImage(cli, *pushWorkerImage, defaultWorkerImageName); err != nil { + return 1, err + } + workerImageForStack = *pushWorkerImage + } + compose, err := createCompose("", cli, composeOptions{ + Replicas: *replicas, + Chunks: *chunks, + MasterImage: defaultMasterImageName, + WorkerImage: workerImageForStack, + Volume: defaultVolumeName, + Shuffle: *shuffle, + RandSeed: *randSeed, + DryRun: *dryRun, + KeepExecutor: *keepExecutor, + }) + if err != nil { + return 1, err + } + filters, err := filtersBytes(*filtersFile) + if err != nil { + return 1, err + } + logrus.Infof("Creating volume %s with input data", defaultVolumeName) + if err = createVolumeWithData(cli, + defaultVolumeName, + map[string][]byte{"/input": filters}, + defaultMasterImageName); err != nil { + return 1, err + } + logrus.Infof("Deploying stack %s from %s", defaultStackName, compose) + defer func() { + logrus.Infof("NOTE: You may want to inspect or clean up following resources:") + logrus.Infof(" - Stack: %s", defaultStackName) + logrus.Infof(" - Volume: %s", defaultVolumeName) + logrus.Infof(" - Compose file: %s", compose) + logrus.Infof(" - Master image: %s", defaultMasterImageName) + logrus.Infof(" - Worker image: %s", workerImageForStack) + }() + if err = deployStack(cli, defaultStackName, compose); err != nil { + return 1, err + } + logrus.Infof("The log will be displayed here after some duration."+ + "You can watch the live status via `docker service logs %s_worker`", + defaultStackName) + masterContainerID, err := waitForMasterUp(cli, defaultStackName) + if err != nil { + return 1, err + } + rc, err := waitForContainerCompletion(cli, os.Stdout, os.Stderr, masterContainerID) + if err != nil { + return 1, err + } + logrus.Infof("Exit status: %d", rc) + return int(rc), nil +} + +func ensureImages(cli *client.Client, images []string) error { + for _, image := range images { + _, _, err := cli.ImageInspectWithRaw(context.Background(), image) + if err != nil { + return fmt.Errorf("could not find image %s, please run `make build-integration-cli-on-swarm`: %v", + image, err) + } + } + return nil +} + +func filtersBytes(optionalFiltersFile string) ([]byte, error) { + var b []byte + if optionalFiltersFile == "" { + tests, err := enumerateTests(".") + if err != nil { + return b, err + } + b = []byte(strings.Join(tests, "\n") + "\n") + } else { + var err error + b, err = ioutil.ReadFile(optionalFiltersFile) + if err != nil { + return b, err + } + } + return b, nil +} + +func waitForMasterUp(cli *client.Client, stackName string) (string, error) { + // FIXME(AkihiroSuda): it should retry until master is up, rather than pre-sleeping + time.Sleep(10 * time.Second) + + fil := filters.NewArgs() + fil.Add("label", "com.docker.stack.namespace="+stackName) + // FIXME(AkihiroSuda): we should not rely on internal service naming convention + fil.Add("label", "com.docker.swarm.service.name="+stackName+"_master") + masters, err := cli.ContainerList(context.Background(), types.ContainerListOptions{ + All: true, + Filters: fil, + }) + if err != nil { + return "", err + } + if len(masters) == 0 { + return "", fmt.Errorf("master not running in stack %s?", stackName) + } + return masters[0].ID, nil +} + +func waitForContainerCompletion(cli *client.Client, stdout, stderr io.Writer, containerID string) (int64, error) { + stream, err := cli.ContainerLogs(context.Background(), + containerID, + types.ContainerLogsOptions{ + ShowStdout: true, + ShowStderr: true, + Follow: true, + }) + if err != nil { + return 1, err + } + stdcopy.StdCopy(stdout, stderr, stream) + stream.Close() + resultC, errC := cli.ContainerWait(context.Background(), containerID, "") + select { + case err := <-errC: + return 1, err + case result := <-resultC: + return result.StatusCode, nil + } +} diff --git a/vendor/github.com/moby/moby/hack/integration-cli-on-swarm/host/volume.go b/vendor/github.com/moby/moby/hack/integration-cli-on-swarm/host/volume.go new file mode 100644 index 000000000..c2f96984a --- /dev/null +++ b/vendor/github.com/moby/moby/hack/integration-cli-on-swarm/host/volume.go @@ -0,0 +1,88 @@ +package main + +import ( + "archive/tar" + "bytes" + "context" + "io" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/mount" + "github.com/docker/docker/api/types/volume" + "github.com/docker/docker/client" +) + +func createTar(data map[string][]byte) (io.Reader, error) { + var b bytes.Buffer + tw := tar.NewWriter(&b) + for path, datum := range data { + hdr := tar.Header{ + Name: path, + Mode: 0644, + Size: int64(len(datum)), + } + if err := tw.WriteHeader(&hdr); err != nil { + return nil, err + } + _, err := tw.Write(datum) + if err != nil { + return nil, err + } + } + if err := tw.Close(); err != nil { + return nil, err + } + return &b, nil +} + +// createVolumeWithData creates a volume with the given data (e.g. data["/foo"] = []byte("bar")) +// Internally, a container is created from the image so as to provision the data to the volume, +// which is attached to the container. +func createVolumeWithData(cli *client.Client, volumeName string, data map[string][]byte, image string) error { + _, err := cli.VolumeCreate(context.Background(), + volume.VolumesCreateBody{ + Driver: "local", + Name: volumeName, + }) + if err != nil { + return err + } + mnt := "/mnt" + miniContainer, err := cli.ContainerCreate(context.Background(), + &container.Config{ + Image: image, + }, + &container.HostConfig{ + Mounts: []mount.Mount{ + { + Type: mount.TypeVolume, + Source: volumeName, + Target: mnt, + }, + }, + }, nil, "") + if err != nil { + return err + } + tr, err := createTar(data) + if err != nil { + return err + } + if cli.CopyToContainer(context.Background(), + miniContainer.ID, mnt, tr, types.CopyToContainerOptions{}); err != nil { + return err + } + return cli.ContainerRemove(context.Background(), + miniContainer.ID, + types.ContainerRemoveOptions{}) +} + +func hasVolume(cli *client.Client, volumeName string) bool { + _, err := cli.VolumeInspect(context.Background(), volumeName) + return err == nil +} + +func removeVolume(cli *client.Client, volumeName string) error { + return cli.VolumeRemove(context.Background(), volumeName, true) +} diff --git a/vendor/github.com/moby/moby/hack/make.ps1 b/vendor/github.com/moby/moby/hack/make.ps1 new file mode 100644 index 000000000..ac3e36904 --- /dev/null +++ b/vendor/github.com/moby/moby/hack/make.ps1 @@ -0,0 +1,472 @@ +<# +.NOTES + Author: @jhowardmsft + + Summary: Windows native build script. This is similar to functionality provided + by hack\make.sh, but uses native Windows PowerShell semantics. It does + not support the full set of options provided by the Linux counterpart. + For example: + + - You can't cross-build Linux docker binaries on Windows + - Hashes aren't generated on binaries + - 'Releasing' isn't supported. + - Integration tests. This is because they currently cannot run inside a container, + and require significant external setup. + + It does however provided the minimum necessary to support parts of local Windows + development and Windows to Windows CI. + + Usage Examples (run from repo root): + "hack\make.ps1 -Client" to build docker.exe client 64-bit binary (remote repo) + "hack\make.ps1 -TestUnit" to run unit tests + "hack\make.ps1 -Daemon -TestUnit" to build the daemon and run unit tests + "hack\make.ps1 -All" to run everything this script knows about that can run in a container + "hack\make.ps1" to build the daemon binary (same as -Daemon) + "hack\make.ps1 -Binary" shortcut to -Client and -Daemon + +.PARAMETER Client + Builds the client binaries. + +.PARAMETER Daemon + Builds the daemon binary. + +.PARAMETER Binary + Builds the client and daemon binaries. A convenient shortcut to `make.ps1 -Client -Daemon`. + +.PARAMETER Race + Use -race in go build and go test. + +.PARAMETER Noisy + Use -v in go build. + +.PARAMETER ForceBuildAll + Use -a in go build. + +.PARAMETER NoOpt + Use -gcflags -N -l in go build to disable optimisation (can aide debugging). + +.PARAMETER CommitSuffix + Adds a custom string to be appended to the commit ID (spaces are stripped). + +.PARAMETER DCO + Runs the DCO (Developer Certificate Of Origin) test (must be run outside a container). + +.PARAMETER PkgImports + Runs the pkg\ directory imports test (must be run outside a container). + +.PARAMETER GoFormat + Runs the Go formatting test (must be run outside a container). + +.PARAMETER TestUnit + Runs unit tests. + +.PARAMETER All + Runs everything this script knows about that can run in a container. + + +TODO +- Unify the head commit +- Add golint and other checks (swagger maybe?) + +#> + + +param( + [Parameter(Mandatory=$False)][switch]$Client, + [Parameter(Mandatory=$False)][switch]$Daemon, + [Parameter(Mandatory=$False)][switch]$Binary, + [Parameter(Mandatory=$False)][switch]$Race, + [Parameter(Mandatory=$False)][switch]$Noisy, + [Parameter(Mandatory=$False)][switch]$ForceBuildAll, + [Parameter(Mandatory=$False)][switch]$NoOpt, + [Parameter(Mandatory=$False)][string]$CommitSuffix="", + [Parameter(Mandatory=$False)][switch]$DCO, + [Parameter(Mandatory=$False)][switch]$PkgImports, + [Parameter(Mandatory=$False)][switch]$GoFormat, + [Parameter(Mandatory=$False)][switch]$TestUnit, + [Parameter(Mandatory=$False)][switch]$All +) + +$ErrorActionPreference = "Stop" +$pushed=$False # To restore the directory if we have temporarily pushed to one. + +# Utility function to get the commit ID of the repository +Function Get-GitCommit() { + if (-not (Test-Path ".\.git")) { + # If we don't have a .git directory, but we do have the environment + # variable DOCKER_GITCOMMIT set, that can override it. + if ($env:DOCKER_GITCOMMIT.Length -eq 0) { + Throw ".git directory missing and DOCKER_GITCOMMIT environment variable not specified." + } + Write-Host "INFO: Git commit ($env:DOCKER_GITCOMMIT) assumed from DOCKER_GITCOMMIT environment variable" + return $env:DOCKER_GITCOMMIT + } + $gitCommit=$(git rev-parse --short HEAD) + if ($(git status --porcelain --untracked-files=no).Length -ne 0) { + $gitCommit="$gitCommit-unsupported" + Write-Host "" + Write-Warning "This version is unsupported because there are uncommitted file(s)." + Write-Warning "Either commit these changes, or add them to .gitignore." + git status --porcelain --untracked-files=no | Write-Warning + Write-Host "" + } + return $gitCommit +} + +# Utility function to get get the current build version of docker +Function Get-DockerVersion() { + if (-not (Test-Path ".\VERSION")) { Throw "VERSION file not found. Is this running from the root of a docker repository?" } + return $(Get-Content ".\VERSION" -raw).ToString().Replace("`n","").Trim() +} + +# Utility function to determine if we are running in a container or not. +# In Windows, we get this through an environment variable set in `Dockerfile.Windows` +Function Check-InContainer() { + if ($env:FROM_DOCKERFILE.Length -eq 0) { + Write-Host "" + Write-Warning "Not running in a container. The result might be an incorrect build." + Write-Host "" + return $False + } + return $True +} + +# Utility function to warn if the version of go is correct. Used for local builds +# outside of a container where it may be out of date with master. +Function Verify-GoVersion() { + Try { + $goVersionDockerfile=(Get-Content ".\Dockerfile" | Select-String "ENV GO_VERSION").ToString().Split(" ")[2] + $goVersionInstalled=(go version).ToString().Split(" ")[2].SubString(2) + } + Catch [Exception] { + Throw "Failed to validate go version correctness: $_" + } + if (-not($goVersionInstalled -eq $goVersionDockerfile)) { + Write-Host "" + Write-Warning "Building with golang version $goVersionInstalled. You should update to $goVersionDockerfile" + Write-Host "" + } +} + +# Utility function to get the commit for HEAD +Function Get-HeadCommit() { + $head = Invoke-Expression "git rev-parse --verify HEAD" + if ($LASTEXITCODE -ne 0) { Throw "Failed getting HEAD commit" } + + return $head +} + +# Utility function to get the commit for upstream +Function Get-UpstreamCommit() { + Invoke-Expression "git fetch -q https://github.com/docker/docker.git refs/heads/master" + if ($LASTEXITCODE -ne 0) { Throw "Failed fetching" } + + $upstream = Invoke-Expression "git rev-parse --verify FETCH_HEAD" + if ($LASTEXITCODE -ne 0) { Throw "Failed getting upstream commit" } + + return $upstream +} + +# Build a binary (client or daemon) +Function Execute-Build($type, $additionalBuildTags, $directory) { + # Generate the build flags + $buildTags = "autogen" + if ($Noisy) { $verboseParm=" -v" } + if ($Race) { Write-Warning "Using race detector"; $raceParm=" -race"} + if ($ForceBuildAll) { $allParm=" -a" } + if ($NoOpt) { $optParm=" -gcflags "+""""+"-N -l"+"""" } + if ($additionalBuildTags -ne "") { $buildTags += $(" " + $additionalBuildTags) } + + # Do the go build in the appropriate directory + # Note -linkmode=internal is required to be able to debug on Windows. + # https://github.com/golang/go/issues/14319#issuecomment-189576638 + Write-Host "INFO: Building $type..." + Push-Location $root\cmd\$directory; $global:pushed=$True + $buildCommand = "go build" + ` + $raceParm + ` + $verboseParm + ` + $allParm + ` + $optParm + ` + " -tags """ + $buildTags + """" + ` + " -ldflags """ + "-linkmode=internal" + """" + ` + " -o $root\bundles\"+$directory+".exe" + Invoke-Expression $buildCommand + if ($LASTEXITCODE -ne 0) { Throw "Failed to compile $type" } + Pop-Location; $global:pushed=$False +} + + +# Validates the DCO marker is present on each commit +Function Validate-DCO($headCommit, $upstreamCommit) { + Write-Host "INFO: Validating Developer Certificate of Origin..." + # Username may only contain alphanumeric characters or dashes and cannot begin with a dash + $usernameRegex='[a-zA-Z0-9][a-zA-Z0-9-]+' + + $dcoPrefix="Signed-off-by:" + $dcoRegex="^(Docker-DCO-1.1-)?$dcoPrefix ([^<]+) <([^<>@]+@[^<>]+)>( \(github: ($usernameRegex)\))?$" + + $counts = Invoke-Expression "git diff --numstat $upstreamCommit...$headCommit" + if ($LASTEXITCODE -ne 0) { Throw "Failed git diff --numstat" } + + # Counts of adds and deletes after removing multiple white spaces. AWK anyone? :( + $adds=0; $dels=0; $($counts -replace '\s+', ' ') | %{ + $a=$_.Split(" "); + if ($a[0] -ne "-") { $adds+=[int]$a[0] } + if ($a[1] -ne "-") { $dels+=[int]$a[1] } + } + if (($adds -eq 0) -and ($dels -eq 0)) { + Write-Warning "DCO validation - nothing to validate!" + return + } + + $commits = Invoke-Expression "git log $upstreamCommit..$headCommit --format=format:%H%n" + if ($LASTEXITCODE -ne 0) { Throw "Failed git log --format" } + $commits = $($commits -split '\s+' -match '\S') + $badCommits=@() + $commits | %{ + # Skip commits with no content such as merge commits etc + if ($(git log -1 --format=format: --name-status $_).Length -gt 0) { + # Ignore exit code on next call - always process regardless + $commitMessage = Invoke-Expression "git log -1 --format=format:%B --name-status $_" + if (($commitMessage -match $dcoRegex).Length -eq 0) { $badCommits+=$_ } + } + } + if ($badCommits.Length -eq 0) { + Write-Host "Congratulations! All commits are properly signed with the DCO!" + } else { + $e = "`nThese commits do not have a proper '$dcoPrefix' marker:`n" + $badCommits | %{ $e+=" - $_`n"} + $e += "`nPlease amend each commit to include a properly formatted DCO marker.`n`n" + $e += "Visit the following URL for information about the Docker DCO:`n" + $e += "https://github.com/docker/docker/blob/master/CONTRIBUTING.md#sign-your-work`n" + Throw $e + } +} + +# Validates that .\pkg\... is safely isolated from internal code +Function Validate-PkgImports($headCommit, $upstreamCommit) { + Write-Host "INFO: Validating pkg import isolation..." + + # Get a list of go source-code files which have changed under pkg\. Ignore exit code on next call - always process regardless + $files=@(); $files = Invoke-Expression "git diff $upstreamCommit...$headCommit --diff-filter=ACMR --name-only -- `'pkg\*.go`'" + $badFiles=@(); $files | %{ + $file=$_ + # For the current changed file, get its list of dependencies, sorted and uniqued. + $imports = Invoke-Expression "go list -e -f `'{{ .Deps }}`' $file" + if ($LASTEXITCODE -ne 0) { Throw "Failed go list for dependencies on $file" } + $imports = $imports -Replace "\[" -Replace "\]", "" -Split(" ") | Sort-Object | Get-Unique + # Filter out what we are looking for + $imports = $imports -NotMatch "^github.com/docker/docker/pkg/" ` + -NotMatch "^github.com/docker/docker/vendor" ` + -Match "^github.com/docker/docker" ` + -Replace "`n", "" + $imports | % { $badFiles+="$file imports $_`n" } + } + if ($badFiles.Length -eq 0) { + Write-Host 'Congratulations! ".\pkg\*.go" is safely isolated from internal code.' + } else { + $e = "`nThese files import internal code: (either directly or indirectly)`n" + $badFiles | %{ $e+=" - $_"} + Throw $e + } +} + +# Validates that changed files are correctly go-formatted +Function Validate-GoFormat($headCommit, $upstreamCommit) { + Write-Host "INFO: Validating go formatting on changed files..." + + # Verify gofmt is installed + if ($(Get-Command gofmt -ErrorAction SilentlyContinue) -eq $nil) { Throw "gofmt does not appear to be installed" } + + # Get a list of all go source-code files which have changed. Ignore exit code on next call - always process regardless + $files=@(); $files = Invoke-Expression "git diff $upstreamCommit...$headCommit --diff-filter=ACMR --name-only -- `'*.go`'" + $files = $files | Select-String -NotMatch "^vendor/" + $badFiles=@(); $files | %{ + # Deliberately ignore error on next line - treat as failed + $content=Invoke-Expression "git show $headCommit`:$_" + + # Next set of hoops are to ensure we have LF not CRLF semantics as otherwise gofmt on Windows will not succeed. + # Also note that gofmt on Windows does not appear to support stdin piping correctly. Hence go through a temporary file. + $content=$content -join "`n" + $content+="`n" + $outputFile=[System.IO.Path]::GetTempFileName() + if (Test-Path $outputFile) { Remove-Item $outputFile } + [System.IO.File]::WriteAllText($outputFile, $content, (New-Object System.Text.UTF8Encoding($False))) + $currentFile = $_ -Replace("/","\") + Write-Host Checking $currentFile + Invoke-Expression "gofmt -s -l $outputFile" + if ($LASTEXITCODE -ne 0) { $badFiles+=$currentFile } + if (Test-Path $outputFile) { Remove-Item $outputFile } + } + if ($badFiles.Length -eq 0) { + Write-Host 'Congratulations! All Go source files are properly formatted.' + } else { + $e = "`nThese files are not properly gofmt`'d:`n" + $badFiles | %{ $e+=" - $_`n"} + $e+= "`nPlease reformat the above files using `"gofmt -s -w`" and commit the result." + Throw $e + } +} + +# Run the unit tests +Function Run-UnitTests() { + Write-Host "INFO: Running unit tests..." + $testPath="./..." + $goListCommand = "go list -e -f '{{if ne .Name """ + '\"github.com/docker/docker\"' + """}}{{.ImportPath}}{{end}}' $testPath" + $pkgList = $(Invoke-Expression $goListCommand) + if ($LASTEXITCODE -ne 0) { Throw "go list for unit tests failed" } + $pkgList = $pkgList | Select-String -Pattern "github.com/docker/docker" + $pkgList = $pkgList | Select-String -NotMatch "github.com/docker/docker/vendor" + $pkgList = $pkgList | Select-String -NotMatch "github.com/docker/docker/man" + $pkgList = $pkgList | Select-String -NotMatch "github.com/docker/docker/integration-cli" + $pkgList = $pkgList -replace "`r`n", " " + $goTestCommand = "go test" + $raceParm + " -cover -ldflags -w -tags """ + "autogen daemon" + """ -a """ + "-test.timeout=10m" + """ $pkgList" + Invoke-Expression $goTestCommand + if ($LASTEXITCODE -ne 0) { Throw "Unit tests failed" } +} + +# Start of main code. +Try { + Write-Host -ForegroundColor Cyan "INFO: make.ps1 starting at $(Get-Date)" + + # Get to the root of the repo + $root = $(Split-Path $MyInvocation.MyCommand.Definition -Parent | Split-Path -Parent) + Push-Location $root + + # Handle the "-All" shortcut to turn on all things we can handle. + # Note we expressly only include the items which can run in a container - the validations tests cannot + # as they require the .git directory which is excluded from the image by .dockerignore + if ($All) { $Client=$True; $Daemon=$True; $TestUnit=$True } + + # Handle the "-Binary" shortcut to build both client and daemon. + if ($Binary) { $Client = $True; $Daemon = $True } + + # Default to building the daemon if not asked for anything explicitly. + if (-not($Client) -and -not($Daemon) -and -not($DCO) -and -not($PkgImports) -and -not($GoFormat) -and -not($TestUnit)) { $Daemon=$True } + + # Verify git is installed + if ($(Get-Command git -ErrorAction SilentlyContinue) -eq $nil) { Throw "Git does not appear to be installed" } + + # Verify go is installed + if ($(Get-Command go -ErrorAction SilentlyContinue) -eq $nil) { Throw "GoLang does not appear to be installed" } + + # Get the git commit. This will also verify if we are in a repo or not. Then add a custom string if supplied. + $gitCommit=Get-GitCommit + if ($CommitSuffix -ne "") { $gitCommit += "-"+$CommitSuffix -Replace ' ', '' } + + # Get the version of docker (eg 17.04.0-dev) + $dockerVersion=Get-DockerVersion + + # Give a warning if we are not running in a container and are building binaries or running unit tests. + # Not relevant for validation tests as these are fine to run outside of a container. + if ($Client -or $Daemon -or $TestUnit) { $inContainer=Check-InContainer } + + # If we are not in a container, validate the version of GO that is installed. + if (-not $inContainer) { Verify-GoVersion } + + # Verify GOPATH is set + if ($env:GOPATH.Length -eq 0) { Throw "Missing GOPATH environment variable. See https://golang.org/doc/code.html#GOPATH" } + + # Run autogen if building binaries or running unit tests. + if ($Client -or $Daemon -or $TestUnit) { + Write-Host "INFO: Invoking autogen..." + Try { .\hack\make\.go-autogen.ps1 -CommitString $gitCommit -DockerVersion $dockerVersion } + Catch [Exception] { Throw $_ } + } + + # DCO, Package import and Go formatting tests. + if ($DCO -or $PkgImports -or $GoFormat) { + # We need the head and upstream commits for these + $headCommit=Get-HeadCommit + $upstreamCommit=Get-UpstreamCommit + + # Run DCO validation + if ($DCO) { Validate-DCO $headCommit $upstreamCommit } + + # Run `gofmt` validation + if ($GoFormat) { Validate-GoFormat $headCommit $upstreamCommit } + + # Run pkg isolation validation + if ($PkgImports) { Validate-PkgImports $headCommit $upstreamCommit } + } + + # Build the binaries + if ($Client -or $Daemon) { + # Create the bundles directory if it doesn't exist + if (-not (Test-Path ".\bundles")) { New-Item ".\bundles" -ItemType Directory | Out-Null } + + # Perform the actual build + if ($Daemon) { Execute-Build "daemon" "daemon" "dockerd" } + if ($Client) { + # Get the repo and commit of the client to build. + "hack\dockerfile\binaries-commits" | ForEach-Object { + $dockerCliRepo = ((Get-Content $_ | Select-String "DOCKERCLI_REPO") -split "=")[1] + $dockerCliCommit = ((Get-Content $_ | Select-String "DOCKERCLI_COMMIT") -split "=")[1] + } + + # Build from a temporary directory. + $tempLocation = "$env:TEMP\$(New-Guid)" + New-Item -ItemType Directory $tempLocation | Out-Null + + # Temporarily override GOPATH, then clone, checkout, and build. + $saveGOPATH = $env:GOPATH + Try { + $env:GOPATH = $tempLocation + $dockerCliRoot = "$env:GOPATH\src\github.com\docker\cli" + Write-Host "INFO: Cloning client repository..." + Invoke-Expression "git clone -q $dockerCliRepo $dockerCliRoot" + if ($LASTEXITCODE -ne 0) { Throw "Failed to clone client repository $dockerCliRepo" } + Invoke-Expression "git -C $dockerCliRoot checkout -q $dockerCliCommit" + if ($LASTEXITCODE -ne 0) { Throw "Failed to checkout client commit $dockerCliCommit" } + Write-Host "INFO: Building client..." + Push-Location "$dockerCliRoot\cmd\docker"; $global:pushed=$True + Invoke-Expression "go build -o $root\bundles\docker.exe" + if ($LASTEXITCODE -ne 0) { Throw "Failed to compile client" } + Pop-Location; $global:pushed=$False + } + Catch [Exception] { + Throw $_ + } + Finally { + # Always restore GOPATH and remove the temporary directory. + $env:GOPATH = $saveGOPATH + Remove-Item -Force -Recurse $tempLocation + } + } + } + + # Run unit tests + if ($TestUnit) { Run-UnitTests } + + # Gratuitous ASCII art. + if ($Daemon -or $Client) { + Write-Host + Write-Host -ForegroundColor Green " ________ ____ __." + Write-Host -ForegroundColor Green " \_____ \ `| `|/ _`|" + Write-Host -ForegroundColor Green " / `| \`| `<" + Write-Host -ForegroundColor Green " / `| \ `| \" + Write-Host -ForegroundColor Green " \_______ /____`|__ \" + Write-Host -ForegroundColor Green " \/ \/" + Write-Host + } +} +Catch [Exception] { + Write-Host -ForegroundColor Red ("`nERROR: make.ps1 failed:`n$_") + + # More gratuitous ASCII art. + Write-Host + Write-Host -ForegroundColor Red "___________ .__.__ .___" + Write-Host -ForegroundColor Red "\_ _____/____ `|__`| `| ____ __`| _/" + Write-Host -ForegroundColor Red " `| __) \__ \ `| `| `| _/ __ \ / __ `| " + Write-Host -ForegroundColor Red " `| \ / __ \`| `| `|_\ ___// /_/ `| " + Write-Host -ForegroundColor Red " \___ / (____ /__`|____/\___ `>____ `| " + Write-Host -ForegroundColor Red " \/ \/ \/ \/ " + Write-Host + + Throw $_ +} +Finally { + Pop-Location # As we pushed to the root of the repo as the very first thing + if ($global:pushed) { Pop-Location } + Write-Host -ForegroundColor Cyan "INFO: make.ps1 ended at $(Get-Date)" +} diff --git a/vendor/github.com/moby/moby/hack/make.sh b/vendor/github.com/moby/moby/hack/make.sh new file mode 100755 index 000000000..b7d59ba94 --- /dev/null +++ b/vendor/github.com/moby/moby/hack/make.sh @@ -0,0 +1,294 @@ +#!/usr/bin/env bash +set -e + +# This script builds various binary artifacts from a checkout of the docker +# source code. +# +# Requirements: +# - The current directory should be a checkout of the docker source code +# (https://github.com/docker/docker). Whatever version is checked out +# will be built. +# - The VERSION file, at the root of the repository, should exist, and +# will be used as Docker binary version and package version. +# - The hash of the git commit will also be included in the Docker binary, +# with the suffix -unsupported if the repository isn't clean. +# - The script is intended to be run inside the docker container specified +# in the Dockerfile at the root of the source. In other words: +# DO NOT CALL THIS SCRIPT DIRECTLY. +# - The right way to call this script is to invoke "make" from +# your checkout of the Docker repository. +# the Makefile will do a "docker build -t docker ." and then +# "docker run hack/make.sh" in the resulting image. +# + +set -o pipefail + +export DOCKER_PKG='github.com/docker/docker' +export SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +export MAKEDIR="$SCRIPTDIR/make" +export PKG_CONFIG=${PKG_CONFIG:-pkg-config} + +# We're a nice, sexy, little shell script, and people might try to run us; +# but really, they shouldn't. We want to be in a container! +inContainer="AssumeSoInitially" +if [ "$(go env GOHOSTOS)" = 'windows' ]; then + if [ -z "$FROM_DOCKERFILE" ]; then + unset inContainer + fi +else + if [ "$PWD" != "/go/src/$DOCKER_PKG" ]; then + unset inContainer + fi +fi + +if [ -z "$inContainer" ]; then + { + echo "# WARNING! I don't seem to be running in a Docker container." + echo "# The result of this command might be an incorrect build, and will not be" + echo "# officially supported." + echo "#" + echo "# Try this instead: make all" + echo "#" + } >&2 +fi + +echo + +# List of bundles to create when no argument is passed +DEFAULT_BUNDLES=( + binary-daemon + dynbinary + + test-unit + test-integration-cli + test-docker-py + + cross + tgz +) + +VERSION=$(< ./VERSION) +! BUILDTIME=$(date --rfc-3339 ns 2> /dev/null | sed -e 's/ /T/') +if [ "$DOCKER_GITCOMMIT" ]; then + GITCOMMIT="$DOCKER_GITCOMMIT" +elif command -v git &> /dev/null && [ -d .git ] && git rev-parse &> /dev/null; then + GITCOMMIT=$(git rev-parse --short HEAD) + if [ -n "$(git status --porcelain --untracked-files=no)" ]; then + GITCOMMIT="$GITCOMMIT-unsupported" + echo "#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~" + echo "# GITCOMMIT = $GITCOMMIT" + echo "# The version you are building is listed as unsupported because" + echo "# there are some files in the git repository that are in an uncommitted state." + echo "# Commit these changes, or add to .gitignore to remove the -unsupported from the version." + echo "# Here is the current list:" + git status --porcelain --untracked-files=no + echo "#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~" + fi +else + echo >&2 'error: .git directory missing and DOCKER_GITCOMMIT not specified' + echo >&2 ' Please either build with the .git directory accessible, or specify the' + echo >&2 ' exact (--short) commit hash you are building using DOCKER_GITCOMMIT for' + echo >&2 ' future accountability in diagnosing build issues. Thanks!' + exit 1 +fi + +if [ "$AUTO_GOPATH" ]; then + rm -rf .gopath + mkdir -p .gopath/src/"$(dirname "${DOCKER_PKG}")" + ln -sf ../../../.. .gopath/src/"${DOCKER_PKG}" + export GOPATH="${PWD}/.gopath" + + if [ "$(go env GOOS)" = 'solaris' ]; then + # sys/unix is installed outside the standard library on solaris + # TODO need to allow for version change, need to get version from go + export GO_VERSION=${GO_VERSION:-"1.8.1"} + export GOPATH="${GOPATH}:/usr/lib/gocode/${GO_VERSION}" + fi +fi + +if [ ! "$GOPATH" ]; then + echo >&2 'error: missing GOPATH; please see https://golang.org/doc/code.html#GOPATH' + echo >&2 ' alternatively, set AUTO_GOPATH=1' + exit 1 +fi + +if ${PKG_CONFIG} 'libsystemd >= 209' 2> /dev/null ; then + DOCKER_BUILDTAGS+=" journald" +elif ${PKG_CONFIG} 'libsystemd-journal' 2> /dev/null ; then + DOCKER_BUILDTAGS+=" journald journald_compat" +fi + +# test whether "btrfs/version.h" exists and apply btrfs_noversion appropriately +if \ + command -v gcc &> /dev/null \ + && ! gcc -E - -o /dev/null &> /dev/null <<<'#include ' \ +; then + DOCKER_BUILDTAGS+=' btrfs_noversion' +fi + +# test whether "libdevmapper.h" is new enough to support deferred remove +# functionality. +if \ + command -v gcc &> /dev/null \ + && ! ( echo -e '#include \nint main() { dm_task_deferred_remove(NULL); }'| gcc -xc - -o /dev/null -ldevmapper &> /dev/null ) \ +; then + DOCKER_BUILDTAGS+=' libdm_no_deferred_remove' +fi + +# Use these flags when compiling the tests and final binary + +IAMSTATIC='true' +if [ -z "$DOCKER_DEBUG" ]; then + LDFLAGS='-w' +fi + +LDFLAGS_STATIC='' +EXTLDFLAGS_STATIC='-static' +# ORIG_BUILDFLAGS is necessary for the cross target which cannot always build +# with options like -race. +ORIG_BUILDFLAGS=( -tags "autogen netgo static_build $DOCKER_BUILDTAGS" -installsuffix netgo ) +# see https://github.com/golang/go/issues/9369#issuecomment-69864440 for why -installsuffix is necessary here + +# When $DOCKER_INCREMENTAL_BINARY is set in the environment, enable incremental +# builds by installing dependent packages to the GOPATH. +REBUILD_FLAG="-a" +if [ "$DOCKER_INCREMENTAL_BINARY" == "1" ] || [ "$DOCKER_INCREMENTAL_BINARY" == "true" ]; then + REBUILD_FLAG="-i" +fi +ORIG_BUILDFLAGS+=( $REBUILD_FLAG ) + +BUILDFLAGS=( $BUILDFLAGS "${ORIG_BUILDFLAGS[@]}" ) +# Test timeout. + +if [ "${DOCKER_ENGINE_GOARCH}" == "arm" ]; then + : ${TIMEOUT:=10m} +elif [ "${DOCKER_ENGINE_GOARCH}" == "windows" ]; then + : ${TIMEOUT:=8m} +else + : ${TIMEOUT:=5m} +fi + +LDFLAGS_STATIC_DOCKER=" + $LDFLAGS_STATIC + -extldflags \"$EXTLDFLAGS_STATIC\" +" + +if [ "$(uname -s)" = 'FreeBSD' ]; then + # Tell cgo the compiler is Clang, not GCC + # https://code.google.com/p/go/source/browse/src/cmd/cgo/gcc.go?spec=svne77e74371f2340ee08622ce602e9f7b15f29d8d3&r=e6794866ebeba2bf8818b9261b54e2eef1c9e588#752 + export CC=clang + + # "-extld clang" is a workaround for + # https://code.google.com/p/go/issues/detail?id=6845 + LDFLAGS="$LDFLAGS -extld clang" +fi + +HAVE_GO_TEST_COVER= +if \ + go help testflag | grep -- -cover > /dev/null \ + && go tool -n cover > /dev/null 2>&1 \ +; then + HAVE_GO_TEST_COVER=1 +fi + +# a helper to provide ".exe" when it's appropriate +binary_extension() { + if [ "$(go env GOOS)" = 'windows' ]; then + echo -n '.exe' + fi +} + +hash_files() { + while [ $# -gt 0 ]; do + f="$1" + shift + dir="$(dirname "$f")" + base="$(basename "$f")" + for hashAlgo in md5 sha256; do + if command -v "${hashAlgo}sum" &> /dev/null; then + ( + # subshell and cd so that we get output files like: + # $HASH docker-$VERSION + # instead of: + # $HASH /go/src/github.com/.../$VERSION/binary/docker-$VERSION + cd "$dir" + "${hashAlgo}sum" "$base" > "$base.$hashAlgo" + ) + fi + done + done +} + +bundle() { + local bundle="$1"; shift + echo "---> Making bundle: $(basename "$bundle") (in $DEST)" + source "$SCRIPTDIR/make/$bundle" "$@" +} + +copy_binaries() { + dir="$1" + # Add nested executables to bundle dir so we have complete set of + # them available, but only if the native OS/ARCH is the same as the + # OS/ARCH of the build target + if [ "$(go env GOOS)/$(go env GOARCH)" == "$(go env GOHOSTOS)/$(go env GOHOSTARCH)" ]; then + if [ -x /usr/local/bin/docker-runc ]; then + echo "Copying nested executables into $dir" + for file in containerd containerd-shim containerd-ctr runc init proxy; do + cp -f `which "docker-$file"` "$dir/" + if [ "$2" == "hash" ]; then + hash_files "$dir/docker-$file" + fi + done + fi + fi +} + +install_binary() { + file="$1" + target="${DOCKER_MAKE_INSTALL_PREFIX:=/usr/local}/bin/" + if [ "$(go env GOOS)" == "linux" ]; then + echo "Installing $(basename $file) to ${target}" + mkdir -p "$target" + cp -f -L "$file" "$target" + else + echo "Install is only supported on linux" + return 1 + fi +} + +main() { + # We want this to fail if the bundles already exist and cannot be removed. + # This is to avoid mixing bundles from different versions of the code. + mkdir -p bundles + if [ -e "bundles/$VERSION" ] && [ -z "$KEEPBUNDLE" ]; then + echo "bundles/$VERSION already exists. Removing." + rm -fr "bundles/$VERSION" && mkdir "bundles/$VERSION" || exit 1 + echo + fi + + if [ "$(go env GOHOSTOS)" != 'windows' ]; then + # Windows and symlinks don't get along well + + rm -f bundles/latest + ln -s "$VERSION" bundles/latest + fi + + if [ $# -lt 1 ]; then + bundles=(${DEFAULT_BUNDLES[@]}) + else + bundles=($@) + fi + for bundle in ${bundles[@]}; do + export DEST="bundles/$VERSION/$(basename "$bundle")" + # Cygdrive paths don't play well with go build -o. + if [[ "$(uname -s)" == CYGWIN* ]]; then + export DEST="$(cygpath -mw "$DEST")" + fi + mkdir -p "$DEST" + ABS_DEST="$(cd "$DEST" && pwd -P)" + bundle "$bundle" + echo + done +} + +main "$@" diff --git a/vendor/github.com/moby/moby/hack/make/.binary b/vendor/github.com/moby/moby/hack/make/.binary new file mode 100644 index 000000000..8d4265cb6 --- /dev/null +++ b/vendor/github.com/moby/moby/hack/make/.binary @@ -0,0 +1,39 @@ +#!/usr/bin/env bash +set -e + +GO_PACKAGE='github.com/docker/docker/cmd/dockerd' +BINARY_SHORT_NAME='dockerd' +BINARY_NAME="$BINARY_SHORT_NAME-$VERSION" +BINARY_EXTENSION="$(binary_extension)" +BINARY_FULLNAME="$BINARY_NAME$BINARY_EXTENSION" + +source "${MAKEDIR}/.go-autogen" + +( +export GOGC=${DOCKER_BUILD_GOGC:-1000} + +if [ "$(go env GOOS)/$(go env GOARCH)" != "$(go env GOHOSTOS)/$(go env GOHOSTARCH)" ]; then + # must be cross-compiling! + case "$(go env GOOS)/$(go env GOARCH)" in + windows/amd64) + export CC=x86_64-w64-mingw32-gcc + export CGO_ENABLED=1 + ;; + esac +fi + +echo "Building: $DEST/$BINARY_FULLNAME" +go build \ + -o "$DEST/$BINARY_FULLNAME" \ + "${BUILDFLAGS[@]}" \ + -ldflags " + $LDFLAGS + $LDFLAGS_STATIC_DOCKER + " \ + $GO_PACKAGE +) + +echo "Created binary: $DEST/$BINARY_FULLNAME" +ln -sf "$BINARY_FULLNAME" "$DEST/$BINARY_SHORT_NAME$BINARY_EXTENSION" + +hash_files "$DEST/$BINARY_FULLNAME" diff --git a/vendor/github.com/moby/moby/hack/make/.binary-setup b/vendor/github.com/moby/moby/hack/make/.binary-setup new file mode 100644 index 000000000..15de89fe1 --- /dev/null +++ b/vendor/github.com/moby/moby/hack/make/.binary-setup @@ -0,0 +1,9 @@ +#!/usr/bin/env bash + +DOCKER_DAEMON_BINARY_NAME='dockerd' +DOCKER_RUNC_BINARY_NAME='docker-runc' +DOCKER_CONTAINERD_BINARY_NAME='docker-containerd' +DOCKER_CONTAINERD_CTR_BINARY_NAME='docker-containerd-ctr' +DOCKER_CONTAINERD_SHIM_BINARY_NAME='docker-containerd-shim' +DOCKER_PROXY_BINARY_NAME='docker-proxy' +DOCKER_INIT_BINARY_NAME='docker-init' diff --git a/vendor/github.com/moby/moby/hack/make/.build-deb/compat b/vendor/github.com/moby/moby/hack/make/.build-deb/compat new file mode 100644 index 000000000..ec635144f --- /dev/null +++ b/vendor/github.com/moby/moby/hack/make/.build-deb/compat @@ -0,0 +1 @@ +9 diff --git a/vendor/github.com/moby/moby/hack/make/.build-deb/control b/vendor/github.com/moby/moby/hack/make/.build-deb/control new file mode 100644 index 000000000..0f5439947 --- /dev/null +++ b/vendor/github.com/moby/moby/hack/make/.build-deb/control @@ -0,0 +1,29 @@ +Source: docker-engine +Section: admin +Priority: optional +Maintainer: Docker +Standards-Version: 3.9.6 +Homepage: https://dockerproject.org +Vcs-Browser: https://github.com/docker/docker +Vcs-Git: git://github.com/docker/docker.git + +Package: docker-engine +Architecture: linux-any +Depends: iptables, ${misc:Depends}, ${perl:Depends}, ${shlibs:Depends} +Recommends: aufs-tools, + ca-certificates, + cgroupfs-mount | cgroup-lite, + git, + xz-utils, + ${apparmor:Recommends} +Conflicts: docker (<< 1.5~), docker.io, lxc-docker, lxc-docker-virtual-package, docker-engine-cs +Description: Docker: the open-source application container engine + Docker is an open source project to build, ship and run any application as a + lightweight container + . + Docker containers are both hardware-agnostic and platform-agnostic. This means + they can run anywhere, from your laptop to the largest EC2 compute instance and + everything in between - and they don't require you to use a particular + language, framework or packaging system. That makes them great building blocks + for deploying and scaling web apps, databases, and backend services without + depending on a particular stack or provider. diff --git a/vendor/github.com/moby/moby/hack/make/.build-deb/docker-engine.bash-completion b/vendor/github.com/moby/moby/hack/make/.build-deb/docker-engine.bash-completion new file mode 100644 index 000000000..6ea111930 --- /dev/null +++ b/vendor/github.com/moby/moby/hack/make/.build-deb/docker-engine.bash-completion @@ -0,0 +1 @@ +contrib/completion/bash/docker diff --git a/vendor/github.com/moby/moby/hack/make/.build-deb/docker-engine.docker.default b/vendor/github.com/moby/moby/hack/make/.build-deb/docker-engine.docker.default new file mode 120000 index 000000000..4278533d6 --- /dev/null +++ b/vendor/github.com/moby/moby/hack/make/.build-deb/docker-engine.docker.default @@ -0,0 +1 @@ +../../../contrib/init/sysvinit-debian/docker.default \ No newline at end of file diff --git a/vendor/github.com/moby/moby/hack/make/.build-deb/docker-engine.docker.init b/vendor/github.com/moby/moby/hack/make/.build-deb/docker-engine.docker.init new file mode 120000 index 000000000..8cb89d30d --- /dev/null +++ b/vendor/github.com/moby/moby/hack/make/.build-deb/docker-engine.docker.init @@ -0,0 +1 @@ +../../../contrib/init/sysvinit-debian/docker \ No newline at end of file diff --git a/vendor/github.com/moby/moby/hack/make/.build-deb/docker-engine.docker.upstart b/vendor/github.com/moby/moby/hack/make/.build-deb/docker-engine.docker.upstart new file mode 120000 index 000000000..7e1b64a3e --- /dev/null +++ b/vendor/github.com/moby/moby/hack/make/.build-deb/docker-engine.docker.upstart @@ -0,0 +1 @@ +../../../contrib/init/upstart/docker.conf \ No newline at end of file diff --git a/vendor/github.com/moby/moby/hack/make/.build-deb/docker-engine.install b/vendor/github.com/moby/moby/hack/make/.build-deb/docker-engine.install new file mode 100644 index 000000000..dc6b25f04 --- /dev/null +++ b/vendor/github.com/moby/moby/hack/make/.build-deb/docker-engine.install @@ -0,0 +1,12 @@ +#contrib/syntax/vim/doc/* /usr/share/vim/vimfiles/doc/ +#contrib/syntax/vim/ftdetect/* /usr/share/vim/vimfiles/ftdetect/ +#contrib/syntax/vim/syntax/* /usr/share/vim/vimfiles/syntax/ +contrib/*-integration usr/share/docker-engine/contrib/ +contrib/check-config.sh usr/share/docker-engine/contrib/ +contrib/completion/fish/docker.fish usr/share/fish/vendor_completions.d/ +contrib/completion/zsh/_docker usr/share/zsh/vendor-completions/ +contrib/init/systemd/docker.service lib/systemd/system/ +contrib/init/systemd/docker.socket lib/systemd/system/ +contrib/mk* usr/share/docker-engine/contrib/ +contrib/nuke-graph-directory.sh usr/share/docker-engine/contrib/ +contrib/syntax/nano/Dockerfile.nanorc usr/share/nano/ diff --git a/vendor/github.com/moby/moby/hack/make/.build-deb/docker-engine.manpages b/vendor/github.com/moby/moby/hack/make/.build-deb/docker-engine.manpages new file mode 100644 index 000000000..1aa62186a --- /dev/null +++ b/vendor/github.com/moby/moby/hack/make/.build-deb/docker-engine.manpages @@ -0,0 +1 @@ +man/man*/* diff --git a/vendor/github.com/moby/moby/hack/make/.build-deb/docker-engine.postinst b/vendor/github.com/moby/moby/hack/make/.build-deb/docker-engine.postinst new file mode 100644 index 000000000..eeef6ca80 --- /dev/null +++ b/vendor/github.com/moby/moby/hack/make/.build-deb/docker-engine.postinst @@ -0,0 +1,20 @@ +#!/bin/sh +set -e + +case "$1" in + configure) + if [ -z "$2" ]; then + if ! getent group docker > /dev/null; then + groupadd --system docker + fi + fi + ;; + abort-*) + # How'd we get here?? + exit 1 + ;; + *) + ;; +esac + +#DEBHELPER# diff --git a/vendor/github.com/moby/moby/hack/make/.build-deb/docker-engine.udev b/vendor/github.com/moby/moby/hack/make/.build-deb/docker-engine.udev new file mode 120000 index 000000000..914a36195 --- /dev/null +++ b/vendor/github.com/moby/moby/hack/make/.build-deb/docker-engine.udev @@ -0,0 +1 @@ +../../../contrib/udev/80-docker.rules \ No newline at end of file diff --git a/vendor/github.com/moby/moby/hack/make/.build-deb/docs b/vendor/github.com/moby/moby/hack/make/.build-deb/docs new file mode 100644 index 000000000..b43bf86b5 --- /dev/null +++ b/vendor/github.com/moby/moby/hack/make/.build-deb/docs @@ -0,0 +1 @@ +README.md diff --git a/vendor/github.com/moby/moby/hack/make/.build-deb/rules b/vendor/github.com/moby/moby/hack/make/.build-deb/rules new file mode 100755 index 000000000..19557ed50 --- /dev/null +++ b/vendor/github.com/moby/moby/hack/make/.build-deb/rules @@ -0,0 +1,53 @@ +#!/usr/bin/make -f + +VERSION = $(shell cat VERSION) +SYSTEMD_VERSION := $(shell dpkg-query -W -f='$${Version}\n' systemd | cut -d- -f1) +SYSTEMD_GT_227 := $(shell [ '$(SYSTEMD_VERSION)' ] && [ '$(SYSTEMD_VERSION)' -gt 227 ] && echo true ) + +override_dh_gencontrol: + # if we're on Ubuntu, we need to Recommends: apparmor + echo 'apparmor:Recommends=$(shell dpkg-vendor --is Ubuntu && echo apparmor)' >> debian/docker-engine.substvars + dh_gencontrol + +override_dh_auto_build: + ./hack/make.sh dynbinary + # ./man/md2man-all.sh runs outside the build container (if at all), since we don't have go-md2man here + +override_dh_auto_test: + ./bundles/$(VERSION)/dynbinary-daemon/dockerd -v + +override_dh_strip: + # Go has lots of problems with stripping, so just don't + +override_dh_auto_install: + mkdir -p debian/docker-engine/usr/bin + cp -aT "$$(readlink -f bundles/$(VERSION)/dynbinary-daemon/dockerd)" debian/docker-engine/usr/bin/dockerd + cp -aT /usr/local/bin/docker-proxy debian/docker-engine/usr/bin/docker-proxy + cp -aT /usr/local/bin/docker-containerd debian/docker-engine/usr/bin/docker-containerd + cp -aT /usr/local/bin/docker-containerd-shim debian/docker-engine/usr/bin/docker-containerd-shim + cp -aT /usr/local/bin/docker-containerd-ctr debian/docker-engine/usr/bin/docker-containerd-ctr + cp -aT /usr/local/bin/docker-runc debian/docker-engine/usr/bin/docker-runc + cp -aT /usr/local/bin/docker-init debian/docker-engine/usr/bin/docker-init + mkdir -p debian/docker-engine/usr/lib/docker + +override_dh_installinit: + # use "docker" as our service name, not "docker-engine" + dh_installinit --name=docker +ifeq (true, $(SYSTEMD_GT_227)) + $(warning "Setting TasksMax=infinity") + sed -i -- 's/#TasksMax=infinity/TasksMax=infinity/' debian/docker-engine/lib/systemd/system/docker.service +endif + +override_dh_installudev: + # match our existing priority + dh_installudev --priority=z80 + +override_dh_install: + dh_install + dh_apparmor --profile-name=docker-engine -pdocker-engine + +override_dh_shlibdeps: + dh_shlibdeps --dpkg-shlibdeps-params=--ignore-missing-info + +%: + dh $@ --with=bash-completion $(shell command -v dh_systemd_enable > /dev/null 2>&1 && echo --with=systemd) diff --git a/vendor/github.com/moby/moby/hack/make/.build-rpm/docker-engine-selinux.spec b/vendor/github.com/moby/moby/hack/make/.build-rpm/docker-engine-selinux.spec new file mode 100644 index 000000000..6a4b6c0c3 --- /dev/null +++ b/vendor/github.com/moby/moby/hack/make/.build-rpm/docker-engine-selinux.spec @@ -0,0 +1,99 @@ +# Some bits borrowed from the openstack-selinux package +Name: docker-engine-selinux +Version: %{_version} +Release: %{_release}%{?dist} +Summary: SELinux Policies for the open-source application container engine +BuildArch: noarch +Group: Tools/Docker + +License: GPLv2 +Source: %{name}.tar.gz + +URL: https://dockerproject.org +Vendor: Docker +Packager: Docker + +%global selinux_policyver 3.13.1-102 +%if 0%{?oraclelinux} >= 7 +%global selinux_policyver 3.13.1-102.0.3.el7_3.15 +%endif # oraclelinux 7 +%global selinuxtype targeted +%global moduletype services +%global modulenames docker + +Requires(post): selinux-policy-base >= %{selinux_policyver}, selinux-policy-targeted >= %{selinux_policyver}, policycoreutils, policycoreutils-python libselinux-utils +BuildRequires: selinux-policy selinux-policy-devel + +# conflicting packages +Conflicts: docker-selinux + +# Usage: _format var format +# Expand 'modulenames' into various formats as needed +# Format must contain '$x' somewhere to do anything useful +%global _format() export %1=""; for x in %{modulenames}; do %1+=%2; %1+=" "; done; + +# Relabel files +%global relabel_files() \ + /sbin/restorecon -R %{_bindir}/docker %{_localstatedir}/run/docker.sock %{_localstatedir}/run/docker.pid %{_sysconfdir}/docker %{_localstatedir}/log/docker %{_localstatedir}/log/lxc %{_localstatedir}/lock/lxc %{_usr}/lib/systemd/system/docker.service /root/.docker &> /dev/null || : \ + +%description +SELinux policy modules for use with Docker + +%prep +%if 0%{?centos} <= 6 +%setup -n %{name} +%else +%autosetup -n %{name} +%endif + +%build +make SHARE="%{_datadir}" TARGETS="%{modulenames}" + +%install + +# Install SELinux interfaces +%_format INTERFACES $x.if +install -d %{buildroot}%{_datadir}/selinux/devel/include/%{moduletype} +install -p -m 644 $INTERFACES %{buildroot}%{_datadir}/selinux/devel/include/%{moduletype} + +# Install policy modules +%_format MODULES $x.pp.bz2 +install -d %{buildroot}%{_datadir}/selinux/packages +install -m 0644 $MODULES %{buildroot}%{_datadir}/selinux/packages + +%post +# +# Install all modules in a single transaction +# +if [ $1 -eq 1 ]; then + %{_sbindir}/setsebool -P -N virt_use_nfs=1 virt_sandbox_use_all_caps=1 +fi +%_format MODULES %{_datadir}/selinux/packages/$x.pp.bz2 +%{_sbindir}/semodule -n -s %{selinuxtype} -i $MODULES +if %{_sbindir}/selinuxenabled ; then + %{_sbindir}/load_policy + %relabel_files + if [ $1 -eq 1 ]; then + restorecon -R %{_sharedstatedir}/docker + fi +fi + +%postun +if [ $1 -eq 0 ]; then + %{_sbindir}/semodule -n -r %{modulenames} &> /dev/null || : + if %{_sbindir}/selinuxenabled ; then + %{_sbindir}/load_policy + %relabel_files + fi +fi + +%files +%doc LICENSE +%defattr(-,root,root,0755) +%attr(0644,root,root) %{_datadir}/selinux/packages/*.pp.bz2 +%attr(0644,root,root) %{_datadir}/selinux/devel/include/%{moduletype}/*.if + +%changelog +* Tue Dec 1 2015 Jessica Frazelle 1.9.1-1 +- add licence to rpm +- add selinux-policy and docker-engine-selinux rpm diff --git a/vendor/github.com/moby/moby/hack/make/.build-rpm/docker-engine.spec b/vendor/github.com/moby/moby/hack/make/.build-rpm/docker-engine.spec new file mode 100644 index 000000000..6225bb74f --- /dev/null +++ b/vendor/github.com/moby/moby/hack/make/.build-rpm/docker-engine.spec @@ -0,0 +1,249 @@ +Name: docker-engine +Version: %{_version} +Release: %{_release}%{?dist} +Summary: The open-source application container engine +Group: Tools/Docker + +License: ASL 2.0 +Source: %{name}.tar.gz + +URL: https://dockerproject.org +Vendor: Docker +Packager: Docker + +# is_systemd conditional +%if 0%{?fedora} >= 21 || 0%{?centos} >= 7 || 0%{?rhel} >= 7 || 0%{?suse_version} >= 1210 +%global is_systemd 1 +%endif + +# required packages for build +# most are already in the container (see contrib/builder/rpm/ARCH/generate.sh) +# only require systemd on those systems +%if 0%{?is_systemd} +%if 0%{?suse_version} >= 1210 +BuildRequires: systemd-rpm-macros +%{?systemd_requires} +%else +%if 0%{?fedora} >= 25 +# Systemd 230 and up no longer have libsystemd-journal (see https://bugzilla.redhat.com/show_bug.cgi?id=1350301) +BuildRequires: pkgconfig(systemd) +Requires: systemd-units +%else +BuildRequires: pkgconfig(systemd) +Requires: systemd-units +BuildRequires: pkgconfig(libsystemd-journal) +%endif +%endif +%else +Requires(post): chkconfig +Requires(preun): chkconfig +# This is for /sbin/service +Requires(preun): initscripts +%endif + +# required packages on install +Requires: /bin/sh +Requires: iptables +%if !0%{?suse_version} +Requires: libcgroup +%else +Requires: libcgroup1 +%endif +Requires: tar +Requires: xz +%if 0%{?fedora} >= 21 || 0%{?centos} >= 7 || 0%{?rhel} >= 7 || 0%{?oraclelinux} >= 7 || 0%{?amzn} >= 1 +# Resolves: rhbz#1165615 +Requires: device-mapper-libs >= 1.02.90-1 +%endif +%if 0%{?oraclelinux} >= 6 +# Require Oracle Unbreakable Enterprise Kernel R4 and newer device-mapper +Requires: kernel-uek >= 4.1 +Requires: device-mapper >= 1.02.90-2 +%endif + +# docker-selinux conditional +%if 0%{?fedora} >= 20 || 0%{?centos} >= 7 || 0%{?rhel} >= 7 || 0%{?oraclelinux} >= 7 +%global with_selinux 1 +%endif + +# DWZ problem with multiple golang binary, see bug +# https://bugzilla.redhat.com/show_bug.cgi?id=995136#c12 +%if 0%{?fedora} >= 20 || 0%{?rhel} >= 7 || 0%{?oraclelinux} >= 7 +%global _dwz_low_mem_die_limit 0 +%endif + +# start if with_selinux +%if 0%{?with_selinux} + +%if 0%{?centos} >= 7 || 0%{?rhel} >= 7 || 0%{?fedora} >= 25 +Requires: container-selinux >= 2.9 +%endif# centos 7, rhel 7, fedora 25 + +%if 0%{?oraclelinux} >= 7 +%global selinux_policyver 3.13.1-102.0.3.el7_3.15 +%endif # oraclelinux 7 +%if 0%{?fedora} == 24 +%global selinux_policyver 3.13.1-191 +%endif # fedora 24 -- container-selinux on fedora24 does not properly set dockerd, for now just carry docker-engine-selinux for it +%if 0%{?oraclelinux} >= 7 || 0%{?fedora} == 24 +Requires: selinux-policy >= %{selinux_policyver} +Requires(pre): %{name}-selinux >= %{version}-%{release} +%endif # selinux-policy for oraclelinux-7, fedora-24 + +%endif # with_selinux + +# conflicting packages +Conflicts: docker +Conflicts: docker-io +Conflicts: docker-engine-cs + +%description +Docker is an open source project to build, ship and run any application as a +lightweight container. + +Docker containers are both hardware-agnostic and platform-agnostic. This means +they can run anywhere, from your laptop to the largest EC2 compute instance and +everything in between - and they don't require you to use a particular +language, framework or packaging system. That makes them great building blocks +for deploying and scaling web apps, databases, and backend services without +depending on a particular stack or provider. + +%prep +%if 0%{?centos} <= 6 || 0%{?oraclelinux} <=6 +%setup -n %{name} +%else +%autosetup -n %{name} +%endif + +%build +export DOCKER_GITCOMMIT=%{_gitcommit} +./hack/make.sh dynbinary +# ./man/md2man-all.sh runs outside the build container (if at all), since we don't have go-md2man here + +%check +./bundles/%{_origversion}/dynbinary-daemon/dockerd -v + +%install +# install binary +install -d $RPM_BUILD_ROOT/%{_bindir} +install -p -m 755 bundles/%{_origversion}/dynbinary-daemon/dockerd-%{_origversion} $RPM_BUILD_ROOT/%{_bindir}/dockerd + +# install proxy +install -p -m 755 /usr/local/bin/docker-proxy $RPM_BUILD_ROOT/%{_bindir}/docker-proxy + +# install containerd +install -p -m 755 /usr/local/bin/docker-containerd $RPM_BUILD_ROOT/%{_bindir}/docker-containerd +install -p -m 755 /usr/local/bin/docker-containerd-shim $RPM_BUILD_ROOT/%{_bindir}/docker-containerd-shim +install -p -m 755 /usr/local/bin/docker-containerd-ctr $RPM_BUILD_ROOT/%{_bindir}/docker-containerd-ctr + +# install runc +install -p -m 755 /usr/local/bin/docker-runc $RPM_BUILD_ROOT/%{_bindir}/docker-runc + +# install tini +install -p -m 755 /usr/local/bin/docker-init $RPM_BUILD_ROOT/%{_bindir}/docker-init + +# install udev rules +install -d $RPM_BUILD_ROOT/%{_sysconfdir}/udev/rules.d +install -p -m 644 contrib/udev/80-docker.rules $RPM_BUILD_ROOT/%{_sysconfdir}/udev/rules.d/80-docker.rules + +# add init scripts +install -d $RPM_BUILD_ROOT/etc/sysconfig +install -d $RPM_BUILD_ROOT/%{_initddir} + + +%if 0%{?is_systemd} +install -d $RPM_BUILD_ROOT/%{_unitdir} +install -p -m 644 contrib/init/systemd/docker.service.rpm $RPM_BUILD_ROOT/%{_unitdir}/docker.service +%else +install -p -m 644 contrib/init/sysvinit-redhat/docker.sysconfig $RPM_BUILD_ROOT/etc/sysconfig/docker +install -p -m 755 contrib/init/sysvinit-redhat/docker $RPM_BUILD_ROOT/%{_initddir}/docker +%endif +# add bash, zsh, and fish completions +install -d $RPM_BUILD_ROOT/usr/share/bash-completion/completions +install -d $RPM_BUILD_ROOT/usr/share/zsh/vendor-completions +install -d $RPM_BUILD_ROOT/usr/share/fish/vendor_completions.d +install -p -m 644 contrib/completion/bash/docker $RPM_BUILD_ROOT/usr/share/bash-completion/completions/docker +install -p -m 644 contrib/completion/zsh/_docker $RPM_BUILD_ROOT/usr/share/zsh/vendor-completions/_docker +install -p -m 644 contrib/completion/fish/docker.fish $RPM_BUILD_ROOT/usr/share/fish/vendor_completions.d/docker.fish + +# install manpages +install -d %{buildroot}%{_mandir}/man1 +install -p -m 644 man/man1/*.1 $RPM_BUILD_ROOT/%{_mandir}/man1 +install -d %{buildroot}%{_mandir}/man5 +install -p -m 644 man/man5/*.5 $RPM_BUILD_ROOT/%{_mandir}/man5 +install -d %{buildroot}%{_mandir}/man8 +install -p -m 644 man/man8/*.8 $RPM_BUILD_ROOT/%{_mandir}/man8 + +# add vimfiles +install -d $RPM_BUILD_ROOT/usr/share/vim/vimfiles/doc +install -d $RPM_BUILD_ROOT/usr/share/vim/vimfiles/ftdetect +install -d $RPM_BUILD_ROOT/usr/share/vim/vimfiles/syntax +install -p -m 644 contrib/syntax/vim/doc/dockerfile.txt $RPM_BUILD_ROOT/usr/share/vim/vimfiles/doc/dockerfile.txt +install -p -m 644 contrib/syntax/vim/ftdetect/dockerfile.vim $RPM_BUILD_ROOT/usr/share/vim/vimfiles/ftdetect/dockerfile.vim +install -p -m 644 contrib/syntax/vim/syntax/dockerfile.vim $RPM_BUILD_ROOT/usr/share/vim/vimfiles/syntax/dockerfile.vim + +# add nano +install -d $RPM_BUILD_ROOT/usr/share/nano +install -p -m 644 contrib/syntax/nano/Dockerfile.nanorc $RPM_BUILD_ROOT/usr/share/nano/Dockerfile.nanorc + +# list files owned by the package here +%files +%doc AUTHORS CHANGELOG.md CONTRIBUTING.md LICENSE MAINTAINERS NOTICE README.md +/%{_bindir}/docker +/%{_bindir}/dockerd +/%{_bindir}/docker-containerd +/%{_bindir}/docker-containerd-shim +/%{_bindir}/docker-containerd-ctr +/%{_bindir}/docker-proxy +/%{_bindir}/docker-runc +/%{_bindir}/docker-init +/%{_sysconfdir}/udev/rules.d/80-docker.rules +%if 0%{?is_systemd} +/%{_unitdir}/docker.service +%else +%config(noreplace,missingok) /etc/sysconfig/docker +/%{_initddir}/docker +%endif +/usr/share/bash-completion/completions/docker +/usr/share/zsh/vendor-completions/_docker +/usr/share/fish/vendor_completions.d/docker.fish +%doc +/%{_mandir}/man1/* +/%{_mandir}/man5/* +/%{_mandir}/man8/* +/usr/share/vim/vimfiles/doc/dockerfile.txt +/usr/share/vim/vimfiles/ftdetect/dockerfile.vim +/usr/share/vim/vimfiles/syntax/dockerfile.vim +/usr/share/nano/Dockerfile.nanorc + +%post +%if 0%{?is_systemd} +%systemd_post docker +%else +# This adds the proper /etc/rc*.d links for the script +/sbin/chkconfig --add docker +%endif +if ! getent group docker > /dev/null; then + groupadd --system docker +fi + +%preun +%if 0%{?is_systemd} +%systemd_preun docker +%else +if [ $1 -eq 0 ] ; then + /sbin/service docker stop >/dev/null 2>&1 + /sbin/chkconfig --del docker +fi +%endif + +%postun +%if 0%{?is_systemd} +%systemd_postun_with_restart docker +%else +if [ "$1" -ge "1" ] ; then + /sbin/service docker condrestart >/dev/null 2>&1 || : +fi +%endif + +%changelog diff --git a/vendor/github.com/moby/moby/hack/make/.detect-daemon-osarch b/vendor/github.com/moby/moby/hack/make/.detect-daemon-osarch new file mode 100644 index 000000000..ac16055fc --- /dev/null +++ b/vendor/github.com/moby/moby/hack/make/.detect-daemon-osarch @@ -0,0 +1,73 @@ +#!/usr/bin/env bash +set -e + +docker-version-osarch() { + if ! type docker &>/dev/null; then + # docker is not installed + return + fi + local target="$1" # "Client" or "Server" + local fmtStr="{{.${target}.Os}}/{{.${target}.Arch}}" + if docker version -f "$fmtStr" 2>/dev/null; then + # if "docker version -f" works, let's just use that! + return + fi + docker version | awk ' + $1 ~ /^(Client|Server):$/ { section = 0 } + $1 == "'"$target"':" { section = 1; next } + section && $1 == "OS/Arch:" { print $2 } + + # old versions of Docker + $1 == "OS/Arch" && $2 == "('"${target,,}"'):" { print $3 } + ' +} + +# Retrieve OS/ARCH of docker daemon, e.g. linux/amd64 +export DOCKER_ENGINE_OSARCH="${DOCKER_ENGINE_OSARCH:=$(docker-version-osarch 'Server')}" +export DOCKER_ENGINE_GOOS="${DOCKER_ENGINE_OSARCH%/*}" +export DOCKER_ENGINE_GOARCH="${DOCKER_ENGINE_OSARCH##*/}" +DOCKER_ENGINE_GOARCH=${DOCKER_ENGINE_GOARCH:=amd64} + +# and the client, just in case +export DOCKER_CLIENT_OSARCH="$(docker-version-osarch 'Client')" +export DOCKER_CLIENT_GOOS="${DOCKER_CLIENT_OSARCH%/*}" +export DOCKER_CLIENT_GOARCH="${DOCKER_CLIENT_OSARCH##*/}" +DOCKER_CLIENT_GOARCH=${DOCKER_CLIENT_GOARCH:=amd64} + +# Retrieve the architecture used in contrib/builder/(deb|rpm)/$PACKAGE_ARCH/ +PACKAGE_ARCH='amd64' +case "${DOCKER_ENGINE_GOARCH:-$DOCKER_CLIENT_GOARCH}" in + arm) + PACKAGE_ARCH='armhf' + ;; + arm64) + PACKAGE_ARCH='aarch64' + ;; + amd64|ppc64le|s390x) + PACKAGE_ARCH="${DOCKER_ENGINE_GOARCH:-$DOCKER_CLIENT_GOARCH}" + ;; + *) + echo >&2 "warning: not sure how to convert '$DOCKER_ENGINE_GOARCH' to a 'Docker' arch, assuming '$PACKAGE_ARCH'" + ;; +esac +export PACKAGE_ARCH + +DOCKERFILE='Dockerfile' +TEST_IMAGE_NAMESPACE= +case "$PACKAGE_ARCH" in + amd64) + case "${DOCKER_ENGINE_GOOS:-$DOCKER_CLIENT_GOOS}" in + windows) + DOCKERFILE='Dockerfile.windows' + ;; + solaris) + DOCKERFILE='Dockerfile.solaris' + ;; + esac + ;; + *) + DOCKERFILE="Dockerfile.$PACKAGE_ARCH" + TEST_IMAGE_NAMESPACE="$PACKAGE_ARCH" + ;; +esac +export DOCKERFILE TEST_IMAGE_NAMESPACE diff --git a/vendor/github.com/moby/moby/hack/make/.ensure-emptyfs b/vendor/github.com/moby/moby/hack/make/.ensure-emptyfs new file mode 100644 index 000000000..7b00b9d45 --- /dev/null +++ b/vendor/github.com/moby/moby/hack/make/.ensure-emptyfs @@ -0,0 +1,23 @@ +#!/usr/bin/env bash +set -e + +if ! docker inspect -t image emptyfs &> /dev/null; then + # let's build a "docker save" tarball for "emptyfs" + # see https://github.com/docker/docker/pull/5262 + # and also https://github.com/docker/docker/issues/4242 + dir="$DEST/emptyfs" + mkdir -p "$dir" + ( + cd "$dir" + echo '{"emptyfs":{"latest":"511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158"}}' > repositories + mkdir -p 511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158 + ( + cd 511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158 + echo '{"id":"511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158","comment":"Imported from -","created":"2013-06-13T14:03:50.821769-07:00","container_config":{"Hostname":"","Domainname":"","User":"","Memory":0,"MemorySwap":0,"CpuShares":0,"AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"PortSpecs":null,"ExposedPorts":null,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":null,"Cmd":null,"Image":"","Volumes":null,"WorkingDir":"","Entrypoint":null,"NetworkDisabled":false,"OnBuild":null},"docker_version":"0.4.0","architecture":"x86_64","Size":0}' > json + echo '1.0' > VERSION + tar -cf layer.tar --files-from /dev/null + ) + ) + ( set -x; tar -cC "$dir" . | docker load ) + rm -rf "$dir" +fi diff --git a/vendor/github.com/moby/moby/hack/make/.go-autogen b/vendor/github.com/moby/moby/hack/make/.go-autogen new file mode 100644 index 000000000..ec2018067 --- /dev/null +++ b/vendor/github.com/moby/moby/hack/make/.go-autogen @@ -0,0 +1,86 @@ +#!/usr/bin/env bash + +rm -rf autogen + +source hack/dockerfile/binaries-commits + +cat > dockerversion/version_autogen.go < dockerversion/version_autogen_unix.go < + +param( + [Parameter(Mandatory=$true)][string]$CommitString, + [Parameter(Mandatory=$true)][string]$DockerVersion +) + +$ErrorActionPreference = "Stop" + +# Utility function to get the build date/time in UTC +Function Get-BuildDateTime() { + return $(Get-Date).ToUniversalTime() +} + +try { + $buildDateTime=Get-BuildDateTime + + if (Test-Path ".\autogen") { + Remove-Item ".\autogen" -Recurse -Force | Out-Null + } + + $fileContents = ' +// +build autogen + +// Package dockerversion is auto-generated at build-time +package dockerversion + +// Default build-time variable for library-import. +// This file is overridden on build with build-time informations. +const ( + GitCommit string = "'+$CommitString+'" + Version string = "'+$DockerVersion+'" + BuildTime string = "'+$buildDateTime+'" +) + +// AUTOGENERATED FILE; see hack\make\.go-autogen.ps1 +' + + # Write the file without BOM + $outputFile="$(pwd)\dockerversion\version_autogen.go" + if (Test-Path $outputFile) { Remove-Item $outputFile } + [System.IO.File]::WriteAllText($outputFile, $fileContents, (New-Object System.Text.UTF8Encoding($False))) + + New-Item -ItemType Directory -Path "autogen\winresources\tmp" | Out-Null + New-Item -ItemType Directory -Path "autogen\winresources\docker" | Out-Null + New-Item -ItemType Directory -Path "autogen\winresources\dockerd" | Out-Null + Copy-Item "hack\make\.resources-windows\resources.go" "autogen\winresources\docker" + Copy-Item "hack\make\.resources-windows\resources.go" "autogen\winresources\dockerd" + + # Generate a version in the form major,minor,patch,build + $versionQuad=$DockerVersion -replace "[^0-9.]*" -replace "\.", "," + + # Compile the messages + windmc hack\make\.resources-windows\event_messages.mc -h autogen\winresources\tmp -r autogen\winresources\tmp + if ($LASTEXITCODE -ne 0) { Throw "Failed to compile event message resources" } + + # If you really want to understand this madness below, search the Internet for powershell variables after verbatim arguments... Needed to get double-quotes passed through to the compiler options. + # Generate the .syso files containing all the resources and manifest needed to compile the final docker binaries. Both 32 and 64-bit clients. + $env:_ag_dockerVersion=$DockerVersion + $env:_ag_gitCommit=$CommitString + + windres -i hack/make/.resources-windows/docker.rc -o autogen/winresources/docker/rsrc_amd64.syso -F pe-x86-64 --use-temp-file -I autogen/winresources/tmp -D DOCKER_VERSION_QUAD=$versionQuad --% -D DOCKER_VERSION=\"%_ag_dockerVersion%\" -D DOCKER_COMMIT=\"%_ag_gitCommit%\" + if ($LASTEXITCODE -ne 0) { Throw "Failed to compile client 64-bit resources" } + + windres -i hack/make/.resources-windows/docker.rc -o autogen/winresources/docker/rsrc_386.syso -F pe-i386 --use-temp-file -I autogen/winresources/tmp -D DOCKER_VERSION_QUAD=$versionQuad --% -D DOCKER_VERSION=\"%_ag_dockerVersion%\" -D DOCKER_COMMIT=\"%_ag_gitCommit%\" + if ($LASTEXITCODE -ne 0) { Throw "Failed to compile client 32-bit resources" } + + windres -i hack/make/.resources-windows/dockerd.rc -o autogen/winresources/dockerd/rsrc_amd64.syso -F pe-x86-64 --use-temp-file -I autogen/winresources/tmp -D DOCKER_VERSION_QUAD=$versionQuad --% -D DOCKER_VERSION=\"%_ag_dockerVersion%\" -D DOCKER_COMMIT=\"%_ag_gitCommit%\" + if ($LASTEXITCODE -ne 0) { Throw "Failed to compile daemon resources" } +} +Catch [Exception] { + # Throw the error onto the caller to display errors. We don't expect this script to be called directly + Throw ".go-autogen.ps1 failed with error $_" +} +Finally { + Remove-Item .\autogen\winresources\tmp -Recurse -Force -ErrorAction SilentlyContinue | Out-Null + $env:_ag_dockerVersion="" + $env:_ag_gitCommit="" +} diff --git a/vendor/github.com/moby/moby/hack/make/.integration-daemon-setup b/vendor/github.com/moby/moby/hack/make/.integration-daemon-setup new file mode 100644 index 000000000..5134e4c2d --- /dev/null +++ b/vendor/github.com/moby/moby/hack/make/.integration-daemon-setup @@ -0,0 +1,7 @@ +#!/usr/bin/env bash +set -e + +bundle .detect-daemon-osarch +if [ "$DOCKER_ENGINE_GOOS" != "windows" ]; then + bundle .ensure-emptyfs +fi diff --git a/vendor/github.com/moby/moby/hack/make/.integration-daemon-start b/vendor/github.com/moby/moby/hack/make/.integration-daemon-start new file mode 100644 index 000000000..dafd0533d --- /dev/null +++ b/vendor/github.com/moby/moby/hack/make/.integration-daemon-start @@ -0,0 +1,130 @@ +#!/usr/bin/env bash + +# see test-integration-cli for example usage of this script + +base="$ABS_DEST/.." +export PATH="$base/binary-daemon:$base/dynbinary-daemon:$PATH" + +export TEST_CLIENT_BINARY=docker + +# Do not bump this version! Integration tests should no longer rely on the docker cli, they should be +# API tests instead. For the existing tests the scripts will use a frozen version of the docker cli +# with a DOCKER_API_VERSION frozen to 1.30, which should ensure that the CI remains green at all times. +export DOCKER_API_VERSION=1.30 +if [ -n "$DOCKER_CLI_PATH" ]; then + export TEST_CLIENT_BINARY=/usr/local/cli/$(basename "$DOCKER_CLI_PATH") +fi + +echo "Using test binary $TEST_CLIENT_BINARY" +if ! command -v "$TEST_CLIENT_BINARY" &> /dev/null; then + echo >&2 'error: missing test client $TEST_CLIENT_BINARY' + false +fi + +export DOCKER_CLI_VERSION=$(${TEST_CLIENT_BINARY} --version | awk '{ gsub(",", " "); print $3 }') + +# This is a temporary hack for split-binary mode. It can be removed once +# https://github.com/docker/docker/pull/22134 is merged into docker master +if [ "$(go env GOOS)" = 'windows' ]; then + return +fi + +if [ -z "$DOCKER_TEST_HOST" ]; then + if docker version &> /dev/null; then + echo >&2 'skipping daemon start, since daemon appears to be already started' + return + fi +fi + +if ! command -v dockerd &> /dev/null; then + echo >&2 'error: binary-daemon or dynbinary-daemon must be run before .integration-daemon-start' + false +fi + +# intentionally open a couple bogus file descriptors to help test that they get scrubbed in containers +exec 41>&1 42>&2 + +export DOCKER_GRAPHDRIVER=${DOCKER_GRAPHDRIVER:-vfs} +export DOCKER_USERLANDPROXY=${DOCKER_USERLANDPROXY:-true} + +# example usage: DOCKER_STORAGE_OPTS="dm.basesize=20G,dm.loopdatasize=200G" +storage_params="" +if [ -n "$DOCKER_STORAGE_OPTS" ]; then + IFS=',' + for i in ${DOCKER_STORAGE_OPTS}; do + storage_params="--storage-opt $i $storage_params" + done + unset IFS +fi + +# example usage: DOCKER_REMAP_ROOT=default +extra_params="" +if [ "$DOCKER_REMAP_ROOT" ]; then + extra_params="--userns-remap $DOCKER_REMAP_ROOT" +fi + +# example usage: DOCKER_EXPERIMENTAL=1 +if [ "$DOCKER_EXPERIMENTAL" ]; then + echo >&2 '# DOCKER_EXPERIMENTAL is set: starting daemon with experimental features enabled! ' + extra_params="$extra_params --experimental" +fi + +if [ -z "$DOCKER_TEST_HOST" ]; then + # Start apparmor if it is enabled + if [ -e "/sys/module/apparmor/parameters/enabled" ] && [ "$(cat /sys/module/apparmor/parameters/enabled)" == "Y" ]; then + # reset container variable so apparmor profile is applied to process + # see https://github.com/docker/libcontainer/blob/master/apparmor/apparmor.go#L16 + export container="" + ( + set -x + /etc/init.d/apparmor start + ) + fi + + export DOCKER_HOST="unix://$(cd "$DEST" && pwd)/docker.sock" # "pwd" tricks to make sure $DEST is an absolute path, not a relative one + ( set -x; exec \ + dockerd --debug \ + --host "$DOCKER_HOST" \ + --storage-driver "$DOCKER_GRAPHDRIVER" \ + --pidfile "$DEST/docker.pid" \ + --userland-proxy="$DOCKER_USERLANDPROXY" \ + $storage_params \ + $extra_params \ + &> "$DEST/docker.log" + ) & + # make sure that if the script exits unexpectedly, we stop this daemon we just started + trap 'bundle .integration-daemon-stop' EXIT +else + export DOCKER_HOST="$DOCKER_TEST_HOST" +fi + +# give it a little time to come up so it's "ready" +tries=60 +echo "INFO: Waiting for daemon to start..." +while ! $TEST_CLIENT_BINARY version &> /dev/null; do + (( tries-- )) + if [ $tries -le 0 ]; then + printf "\n" + if [ -z "$DOCKER_HOST" ]; then + echo >&2 "error: daemon failed to start" + echo >&2 " check $DEST/docker.log for details" + else + echo >&2 "error: daemon at $DOCKER_HOST fails to '$TEST_CLIENT_BINARY version':" + $TEST_CLIENT_BINARY version >&2 || true + # Additional Windows CI debugging as this is a common error as of + # January 2016 + if [ "$(go env GOOS)" = 'windows' ]; then + echo >&2 "Container log below:" + echo >&2 "---" + # Important - use the docker on the CI host, not the one built locally + # which is currently in our path. + ! /c/bin/docker -H=$MAIN_DOCKER_HOST logs docker-$COMMITHASH + echo >&2 "---" + fi + fi + false + fi + printf "." + sleep 2 +done +printf "\n" diff --git a/vendor/github.com/moby/moby/hack/make/.integration-daemon-stop b/vendor/github.com/moby/moby/hack/make/.integration-daemon-stop new file mode 100644 index 000000000..591a8d6be --- /dev/null +++ b/vendor/github.com/moby/moby/hack/make/.integration-daemon-stop @@ -0,0 +1,27 @@ +#!/usr/bin/env bash + +if [ ! "$(go env GOOS)" = 'windows' ]; then + trap - EXIT # reset EXIT trap applied in .integration-daemon-start + + for pidFile in $(find "$DEST" -name docker.pid); do + pid=$(set -x; cat "$pidFile") + ( set -x; kill "$pid" ) + if ! wait "$pid"; then + echo >&2 "warning: PID $pid from $pidFile had a nonzero exit code" + fi + done + + if [ -z "$DOCKER_TEST_HOST" ]; then + # Stop apparmor if it is enabled + if [ -e "/sys/module/apparmor/parameters/enabled" ] && [ "$(cat /sys/module/apparmor/parameters/enabled)" == "Y" ]; then + ( + set -x + /etc/init.d/apparmor stop + ) + fi + fi +else + # Note this script is not actionable on Windows to Linux CI. Instead the + # DIND daemon under test is torn down by the Jenkins tear-down script + echo "INFO: Not stopping daemon on Windows CI" +fi diff --git a/vendor/github.com/moby/moby/hack/make/.integration-test-helpers b/vendor/github.com/moby/moby/hack/make/.integration-test-helpers new file mode 100644 index 000000000..4ff9677c7 --- /dev/null +++ b/vendor/github.com/moby/moby/hack/make/.integration-test-helpers @@ -0,0 +1,84 @@ +#!/usr/bin/env bash + +: ${TEST_REPEAT:=0} + +bundle_test_integration_cli() { + TESTFLAGS="$TESTFLAGS -check.v -check.timeout=${TIMEOUT} -test.timeout=360m" + go_test_dir integration-cli $DOCKER_INTEGRATION_TESTS_VERIFIED +} + +# If $TESTFLAGS is set in the environment, it is passed as extra arguments to 'go test'. +# You can use this to select certain tests to run, e.g. +# +# TESTFLAGS='-test.run ^TestBuild$' ./hack/make.sh test-unit +# +# For integration-cli test, we use [gocheck](https://labix.org/gocheck), if you want +# to run certain tests on your local host, you should run with command: +# +# TESTFLAGS='-check.f DockerSuite.TestBuild*' ./hack/make.sh binary test-integration-cli +# +go_test_dir() { + dir=$1 + precompiled=$2 + testbinary="$ABS_DEST/test.main" + testcover=() + testcoverprofile=() + ( + set -e + mkdir -p "$DEST/coverprofiles" + export DEST="$ABS_DEST" # in a subshell this is safe -- our integration-cli tests need DEST, and "cd" screws it up + if [ -z $precompiled ]; then + ensure_test_dir $1 $testbinary + fi + cd "$dir" + i=0 + while ((++i)); do + test_env "$testbinary" $TESTFLAGS + if [ $i -gt "$TEST_REPEAT" ]; then + break + fi + echo "Repeating test ($i)" + done + ) +} + +ensure_test_dir() { + ( + # make sure a test dir will compile + dir="$1" + out="$2" + echo Building test dir: "$dir" + set -xe + cd "$dir" + go test -c -o "$out" -ldflags "$LDFLAGS" "${BUILDFLAGS[@]}" + ) +} + +test_env() { + ( + set -xe + # use "env -i" to tightly control the environment variables that bleed into the tests + env -i \ + DEST="$DEST" \ + DOCKER_CLI_VERSION="$DOCKER_CLI_VERSION" \ + DOCKER_API_VERSION="$DOCKER_API_VERSION" \ + DOCKER_INTEGRATION_DAEMON_DEST="$DOCKER_INTEGRATION_DAEMON_DEST" \ + DOCKER_TLS_VERIFY="$DOCKER_TEST_TLS_VERIFY" \ + DOCKER_CERT_PATH="$DOCKER_TEST_CERT_PATH" \ + DOCKER_ENGINE_GOARCH="$DOCKER_ENGINE_GOARCH" \ + DOCKER_GRAPHDRIVER="$DOCKER_GRAPHDRIVER" \ + DOCKER_USERLANDPROXY="$DOCKER_USERLANDPROXY" \ + DOCKER_HOST="$DOCKER_HOST" \ + DOCKER_REMAP_ROOT="$DOCKER_REMAP_ROOT" \ + DOCKER_REMOTE_DAEMON="$DOCKER_REMOTE_DAEMON" \ + DOCKERFILE="$DOCKERFILE" \ + GOPATH="$GOPATH" \ + GOTRACEBACK=all \ + HOME="$ABS_DEST/fake-HOME" \ + PATH="$PATH" \ + TEMP="$TEMP" \ + TEST_IMAGE_NAMESPACE="$TEST_IMAGE_NAMESPACE" \ + TEST_CLIENT_BINARY="$TEST_CLIENT_BINARY" \ + "$@" + ) +} diff --git a/vendor/github.com/moby/moby/hack/make/.resources-windows/common.rc b/vendor/github.com/moby/moby/hack/make/.resources-windows/common.rc new file mode 100644 index 000000000..000fb3536 --- /dev/null +++ b/vendor/github.com/moby/moby/hack/make/.resources-windows/common.rc @@ -0,0 +1,38 @@ +// Application icon +1 ICON "docker.ico" + +// Windows executable manifest +1 24 /* RT_MANIFEST */ "docker.exe.manifest" + +// Version information +1 VERSIONINFO + +#ifdef DOCKER_VERSION_QUAD +FILEVERSION DOCKER_VERSION_QUAD +PRODUCTVERSION DOCKER_VERSION_QUAD +#endif + +BEGIN + BLOCK "StringFileInfo" + BEGIN + BLOCK "000004B0" + BEGIN + VALUE "ProductName", DOCKER_NAME + +#ifdef DOCKER_VERSION + VALUE "FileVersion", DOCKER_VERSION + VALUE "ProductVersion", DOCKER_VERSION +#endif + +#ifdef DOCKER_COMMIT + VALUE "OriginalFileName", DOCKER_COMMIT +#endif + + END + END + + BLOCK "VarFileInfo" + BEGIN + VALUE "Translation", 0x0000, 0x04B0 + END +END diff --git a/vendor/github.com/moby/moby/hack/make/.resources-windows/docker.exe.manifest b/vendor/github.com/moby/moby/hack/make/.resources-windows/docker.exe.manifest new file mode 100644 index 000000000..674bc9422 --- /dev/null +++ b/vendor/github.com/moby/moby/hack/make/.resources-windows/docker.exe.manifest @@ -0,0 +1,18 @@ + + + Docker + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/vendor/github.com/moby/moby/hack/make/.resources-windows/docker.ico b/vendor/github.com/moby/moby/hack/make/.resources-windows/docker.ico new file mode 100644 index 0000000000000000000000000000000000000000..c6506ec8dbd8e295d98a084412e9d2c358a6ba39 GIT binary patch literal 370070 zcmeEP2Y405+TL?c0%F4sD!rrFr6_v!{$4AJ1?*S7-fQo@E1)91i}a!b(rX$m0Ro|S zl8_#Huc9K5%>TY~c9PA>Nlzfm^YzX_0Ys1`^UF*YMa1~pn zQjstq44g3rHUvNJ**NcYPxRuB0h?D14oKMWTR>{++JN8Fl)E5}8uGT~8vB{$p7Fiq z3GHSD%>rR6Jw0Ie?|buOFq?qadi(lEl=jLTyr^ZHo z7akkcdFke*UDm`d?HV1stb2UylFl*Mw>EBh&(-lO`z&8Q{gHVQeLchH_qMvv>*e}n zb`R@6u;-^>-%l(WU_HF#C+qHIL#^iPMp?BEgn-W;06W+I1Ne$J)^pInx>VeC~g3qScfHtL7TiJK;c zro@FsC2sibAoeS4E1a*Ay8wPnjH6}NVbS~}?= zHMiddDy*BO!n(Tvrj~*aP;+{E0bihiKd`-Zc6VzT(lGdjX$$&WgBJa0y}Pi#)e_s) zR*keSKCsZL)V-|m<$Jfyd z-o1A7ioUNV$IkjTe$BAx#ElbAr6kOUZO^qaPo*S8*tWem9d>>)HF4pIl*EV=DGA}g zd>)^cf%v&;h$Yguq%KQO*f=45`IJY~X7>zC3+`;Cg?6>lVDqPJoJ4xU#!1WtoC4B; z4BvpVYxOjiWzj9ySk~ zj(}}PoWwS5JlzJ$;2-ocJkEi*xoS)D61{KjocGk+{+Fnb&USy_!wp-`?DzfIHuk04 zFn)qQ!uShQrsyM#pD^(Q(w*=PD-kP9!Cc~th!YyYCtT(f|D>-i*VK-P5qe^taiEMB zoZLvh{4lV2c^`MurtuklJ?{+T@Ou+CP3WDpX;QL|-4nuL&#+y@>4@QJJ&(`1Pa2$OBeT0r57&rJH2VbBt z&#)aa!U*_-k1#*b+*iUO-7F8EIr#3EZDF7+G7!IZxM%mlR9EbhuDY*}jqF%Ee(i|& zF=mfTiJN^qIev~xO~lw-_wC`Zb@uDFAIQnJ^V*MbLTVD{03y|zu(#CgUgxNgA7Jma z`yP4r_xZK=ypA*ez{Cpl4SFmQYR@qsKKKLUhB(9rLos&v=c?h}@q&?aw-+oIVupaF zc=l-aNh%DK1qL>+=z}@@p`PtqmU-=dafj;@H;x~H`T0Z1@p}B8o{|`W{+?rY_VJFG zy(oFu{+8q@l^8ukEgAa{ZTF$wX!nfui@nb;Or7)t977Q1{B({P;0tyDzu?)yODjiM z=NdV~U%0p)0dtKqe`rXNv{W(>z2IXv*5T;*b?R%Dd-Jj$EfUs^o|YUNl*V`*^K`U% z_VLAM^TuD$_EVRu&8vs0@Ike`Iz?gm|(@QtaFN`QW>OC6^eoQHN)Mh3E?_c-8BELrU~mtO-zmr{uBK? z;`;@(dyMIev9C9F&lE8QZGY3^E^1Ei^HgXT+L7mU+jCK`XJP|0caY9G1NeeC_=3Jz zU({47uiaz3;6kk6VN6h=luT~6)G-jf@MAZg*Xg;v^+D|}k6$yiJK}uUI_y4i0sH!* zkLjJZ?whCW`t*Vqksn<1UWzdAZ-(S?-u6ou0xNe=DEUd*l2PwtLg>rz}_N=6wvi zcYB|~m7cMFPPSdp{bml4_h({)&F}+XE*s{3o;c=RtWCxmBg6?351c0bugI6#IPW#r z#<{O*+h055!J6@_hfT!#J=#6S^&H>x*?ck0>E%=p$NRAT_2FN5pTBp{*Yo^>+Fofp zUdIGx?hs>u!+1u}W9bm95%rPpAn=(1)*p! z>{uVRUqa)1UmsxGKF9lE-79(5@SeVBd!@lA=$L@<0DS@04;{fcU@-hZUA4nIM}T=m zJ}(eIP=dBl(W;JH(M|XHk^QYISku!xDK-et@vuHGF`Vml3cgOKFz0Z*k7s%DjmRM$?IRZa=vLwV-D+MR_y#<8=USW$e_%g@D?`3O`vJ6H`vHsr=HY$A>!=Ci z5DUNuNGwozv#1bf;$3?eY+c(v*73cIHGC>1X%YH+zF%8-+c$lDYGOESev#U>ZJEQ? zH>x8CqJd2sw0kc$==}$FuTU$-HAH`J!QKNiY}nXxp;L#SU>%F$6CwwDblfl>K4ETe zuRoyuXZQr<%YZ+~cy@^O1Ha?>;1BD6vCg6bW1H_BN-R(zthwMfnH)D;_YrZchjiex zJ*?%y8s2%F<43=*L0{ktQ)Z``eu8$-^R^~0LjS%@?ccpd+jgd{Z?bLO*nDQnzW?aq z6t#Kbt7_3t7W(x{#b^7DSRjl4@Z%FWj$n+yu|pY)5BS_rzjJ_RhU@W7!@H;voD;xz zjwBW+xOtT8Gg6b6>V5&=*y)rUhj;Xn7N5dAKfblW@9Ow|TPN#xc37Umclx|*{nKIh z@DUMd7{8~%=1(2kzu^@6_*3ZDZ7{y~#`u~2eWvXn-nU*IJ(Qq!Cr?*VBhFKchq_?< z0VQw$e*A(NPtZ?r-huvra|v9hYhr^^921zi0k$_Cw*N!iB=0&xt|i8^Lx}~-wN>Zu z<`UNas`Vf#W@cx`{P-3}T4MARmALUYm9SyVse}zi_;}2sz?;X?A<`L%~+?;;oV9ZJ6PaNL2@g&OA zv-$zV{)gh##?ZE>ukZI``&nWGlp9~bbjeVR6)?Xzzn|Od3+(3&rPLSjU86L{FPIlt ziM2w_s2Ruy;+tmT1M+7k<@%_^b-%b$;^#C-ik(?!^NN0TVxqpUyK&w-bvDd?vG#`9 z|Es-j)?>BS&TLz2-KfG&${phb&ej2 zYqEbwd@Y`-m(9|&F~A+ z^bPa_CyyRZJh?MryjncWea8BIU)%S$ZT!Jv_yRt22=9lu0N)QRrE!6oALO&b!+1vU zsyc4DKD^KSjG$aAx7=Kg4B+{YCmi3s59@3NOvRe=91kpFK|pVJrU zxBz1W_P1Pf!1=>c6c_0ELADKZgCot{ApC*M2^3Ms%i8If6UuR|s5*R$XHR(bb7Ci) zbLpmWRW9E&?ySq#{ZgsU+Mg@cBQ}hwTyxE+v#wY>x@xnHV=inIJE8jJ2f}X2_C4IZ zI2nAa6%aMN=_T6}C!L=d`9{0#vA;~&kuZMt^3fOkv3LmP{D%dkmo{6^t37&r!1)3^ zQ{ego&K;Jrn1Hc>nHyZOe3*4L<%IEoe)f}>%%r^RGk~=r+V(O2dmhh*r$Y7<5Eo8` zUQ9>dFdOqli-FbH7YiiV*ou_jv)hic6g)pqTsl1P;L_m%yMaVJqhE?~@hrr{(-#j3 zn6_kSz|>{KD*d)%MCHM&Mpk}r#fVDv7Y#YL>Z)IBT$&Qr<~*g+{l*)S&sd)2L#nh~ zHazgzs9^!Iw!QiB1Dq2)GxLIZ`2bUHd;!M?^D#bPzgvo)Cu$!6dG5t?!TYGyd_Ex0 z2+O{G=XM9?8xrXM?}Y62yb$}RoZ!?~lMf%@!n}Ouni`|;xu!!sr*O{SQNsfNTt2)~ z!m{B3-QXu)TsgAJvm1Xo{|dHw{mE$Xdet>g>{QgiH#)7X<20=f-|N4BxocrZh_8Gwc zwbS;s{%7k#JOhfRJ;Uz*2G|!kVb+nDHRj$qWPJO6;UIS!^qJ#wU!20elYYSc{>Sk{ zJ?VOF-!M-y?0n&hk(K)|9bT!?+EM3JC)-g&1Dj{qzis0S2U=<|p2aO0Q3XCI0Q2zJ z9*XVaTpv_|{zAtD@LOD;%ymZeyS5y1^uau)1i`fex*x!tz_;Wb-zj(JHa_Gr@+*4` z1b6k$^FYqMZGF-)U|zaK{hZU&rPs*$uBU}<7qstXUrx|hoB-$_OdaX8cf|gwONV

    -m7;XQ|EM&#Ha#!_>j8Q`Nqt(JCSGRkd`~Meqs5yhbpyO{Q(dnwqfz z=L_fdIpcmn&k1mRg7M(6#7W*YgYsR0vS;HtwHV)-_r}yeS*jGCU4^y+I4OdZ-Gu6QZN$Sv! zFtuuY<1*(1cz=e|g+F=py9kE!{K%%5kI7gs1RpRhak6zDb&3BAIVYWx{bhpz^S`Xs zZp(wXf01qbjPY~#0lMEWaohLjL+B3>8+hmcBQe(B5i?lr*%PNaeeqxQ;J>a_58i!) zdf={W)rW6Cu96d1sr_4KV7=fu-g$viJ};>KnooRT`~c?)`A*Uqe0Hc~Ld*ruTRYn8 z15DphHnd$@>B|7(I{kbX&v~C_j0d~VSKQBIIcHT2F~2_>&wC%n^jHU=)=X`o{`wWN2gQ;$E~TBrB@-?i$?kDpgZ@V-FI-1ce_<^xX4`vN}p>(8bOvoFIB zV6CzC1N70Se+_}|8{h*LZW?13eqHpJ&`pJ&VBm@n_WbX$P?XMd<&xto&TX$1YaSwEu^qz0>|5d$1MqE!3+o zKBOLbpt)MUG+6D99e{ZX+JDA;Vwuky8Sr@l=U`8>574>-9}tpYzdM9ygEAjbM%;i| z%dzoS{k(6Ztvk8){XAwq#-F9ahV1@7cHWcf@WDj&+J7HVZSQNYo_O>&_3-`8)!+eN zt35kbsrg|e)%I;0Rm$>@^*cjlrt+^!<#j#D4M`;>bA zsk_wA!@pO@4#(l0p%j%E^`Up{kK<2EyGqLE<+on=frW!S`klkm6bqbUAA+?bUsCr& z?6o9i!44{78xi~KIsd4itb6&q-?o1~=OMbE&d@hb?e@PxPF2>gc{D>d#}T zYR@iwS785Ub#V7G6}RwZy%u;0*8w`tDerkBs|c*eK22kSQ_v-aHRR9GrjW`zL#5QY zOz-HmfAsx*ZJW>anIH9s-?%wr&-(JT+W;NgFCBG(T0ip+tOI;nZ3?+pE&sKecioSE zPoQ+a2T)S}$czc*nK3~QK0YVkUx59(AHn+qe_&nEom743m%KY%%6tIE`+9x`?{-$l zdY}Dve3)zB&+k{5{-mVzw5at)aXtWRf%uG%&jq+Pfb#>~U)tXVEb6`I=X}bK&kNZ{ zm*AK{`vB-wD*i8d9jd;0ucVZjL4|D&p7ZN@Kg`oUWY7EMX#4d2y6?xFQhs#o%pZe3 zp7w4~2G{zPB!~0?oWJF_$@DtUl9?aWu_XHbxjSZBfiyJptf|5pT`9V8_+}lGVXkJH z9oyyj%%A;4S?K#q(q_tI-6j^8U2?I&N%kAiv0hYtj0t2MPzrp&GW)x(DO0U;Ap02G z{>}Vfwz)sba8aoq_m`Epr1Z5jGZyf^Pf&nyel|Vvb8b5Njx>%nA_rRSsQOqZB=do1 z*azU7K3WENf4ePwGRJ|8{r$%O?EB}I;yM5F*e{=ndor=W9QgWTewTngfZr6|xpKI7 zE&y{+vOe$(`T)p7+dswt?}Sm_-K`UTe7E-RlwGNN-k;}lfYR5`o(Y>S=rwgLfH@vM ztxmJyk^?_%?1(I-fP5zT;7YTsP7O4Ef?x&%j}i zzx~{QkX0M=GTJ9|e$Ou;uZ06$YUv2B_Z3t$4k-5}_#Z3y{|{=m{S4Tzp5&xI+4pQ+Hr z-01+*YzRhq_`pE5U~tJ|A?nWyCEbRi)Spw^i&AGvpPN&go%`7a?{+0Mr*B|3{qUzV z`Pg?dxSLyr^sJ=E0k1rD?fLE7mYlv9(2i((v_aY;^OpcMfJ=cY#T5gfu6n==z#t$J zI0*2273sj=!0*6e;8-z%^&Pcw?g&bYpVE6u5&b{jc3+fR<4ZHKy$YQ7!%VPsR_^x>A&bzl89dU^z9 z1Z}_x?lXGKV|~vr?6#@*IOq$D4ZVEliuUoTJ;HSvT9ucHe_*-;Xx1J-o*!(0;b_X-B)cuRY4LoP+kZ zz2D2DX)o_T%QXwZGtYN&&AQ1qxnX0R$1zD)_Vr>f8d(ozz0Ap;L_si^PTmeUe0-V z2(zF2m`8dc@H`L#Fcu&`rvUmT`pn#L(mu|Y)4#Ak1z6SopRRPf2V=! z@Mfd*FJEh-K7RRX^~Q78sTUq^t{#2R>mR5;1a$!CLSI^GnK7RmcpS^A>pY)!|Ma7+ z)cgN!t~$SWqZ;_tb!u#vW-6%P)hc30Q?-0lW3_Iq7n{a6QZW;}SU;|@TK#Kde7n1e zS}?S!3hIBgn$Z0kHKNnCs?Vp_s}677q~3k8xqANbmik(}4)GAK`$!wF-^g#9o%ch% zr2dl!-UI8T-MsX4OZD{|H>rM~U$1`Ye65<%=W4ZZSW~s~m&R)Cn8qp^ZI4FVc`w`- zIUH@qJ&x^ijT-pn^{Ug`H>%hF-9pz(KgM=G%68&f+Q;R=tG_n#Jl0{_X*hWHu~zD> z7n-ZD(B7UO-=KzexK{nv^%^y^@6{@Na8nfpPFJCgY%AML&e?t=hZUn6>w60saFrV0 zt(hA9&2_5l2REpXU%FX6*S=MTT-)A{{(}C*mbtTyzIiXN&<(WtVe~Z*-G5s;V04Lk zeE^6776PHbRA3x1(awh|Usu3_(MjdU(x+R3(>do?Njg1Hy&V}1t^1bhPA1vCPxBMvMxv68SDsZpyI&y z68N@Z9j{jJ{^wfw%9e;N@ZB}|0e}5D_wn8sliYvL?OI1(c>Lyc^g*io2lckYcRY>z zydJ0nTmn?~XJ6P)TjX3>HK0Cl8$i3A03-n?0Hcqz3&LzWQ?q|9${zwUu7`XUd*rsi zwv^O9$MyAC!`}W7DeukrG;=Ssb>kB_ZWsYP3S7wcA87T8q}}lYEt$+>^FOTa)Vlg z<7q3@A!FkQ=G52wc8Ao5UfRGhJT5Qn!7&;*PoIx%+CS^0k8ob+5cbjEu~6cg2T>pG z$Z*1Tjzhov9^x1iXZUL$?T0>7l^f{)z zhBt2K#W%}M`zb4)%YCfR_y%5QPsrseVsKT}=aX9M-|buIab&xPyzw8~OSJX&_3EXk zZd5%#s;j2=x=5}0B`_W9&s8e0eTMZI^%ZGGJZ$tA@}kYtCbQ@=%bh=zbH46wpbxfU$etgSn4g$bSdu*u}I9 z+q|bN7~l0$U03vYS55T!sqFWGG^X$aO8W!&gw5kEl{mQyNM*=G`GI)x7e1{=?zubk5{WNUuT=W zHe_tf=xCdJZ&fco*;4nPTM<*R-=?0M`b}EYwwroPN}s}5W@m5%6^!xCo6o_9&=$xp z!%rFCM8EsN|C*~Qy_zY;BD8Pz&1@&zW$JWtvA2ZyHrLu0#&*vZTB`Wxt5THFK|QrD^tzk2ll68Zk!1|G~cYkLO= z+8&wu!00vYL+6d&GsV7(0LBIJfbj!Po9SjB$8%Xnsj++Keb{Z}z3Z4ADdz`yKlEu# zhXb`7$MJr0^kcAd9aA7}2<)NV1KR%WJhmwh!~oOIfv<1q)hp8}sK|FfSt4EtIz z^eXkvf12w#y}@5!r~6{so9;KzUli3=igG^nlWk^7J7Hf%pTM}}qnDcNc|?x8$s_yJ z9vCAh!KP^Uj48~yMQKBR&Nc01zpDM~tOkgyuho8v;}*tKoGWl9>;s4 zZELyU9`kdpk~(H@XhXyY_0+Z*Ra7cs$6vm$uI>ImPuvWIko`Ze#1DycQ>uXsf-2`Ox`}wK+Odo^xniznw zJ?~>LpkpSa#)swkY{GDh^jhEuK>pM1l=%$%-!lDt*ncDFmA7xn&>h;94fb0c&ra*z zOzYnP#I`KUIJTJkp2F6vZOm>nZD&tt12y1_>-Bs%eE{bTj?9CffPc_&?ipyev1R%J z`XJ7WzVJj#{j7uYhN&|eX3Tk*{=Kk#l+1Yz;AJ7(`6T4liB8Q&C*dDg7`*^3# z+q#ye|4D}aT})d;TBQEi*Cw#t65zZdZAX8b5NVLKF+XRKc`x>bBWh5aSl%(%Pjhc{pjznPwo zEG;%gKft*H&ebg)*;IY~=1poFVgknayMv35hmw&~1Ni=in3KIo$N8L3Vt;SzS#GI` z0Y0TJ7L>pxa!^+L#kh z95-&AacO>S->HL)0rcE8(jvtGPMd(8Xn%sVF7Pkgeu}Mr=e0^QkG7k6fIMS;f9(LL zrvJ~?{^=KX%&L}e`_D-a%EJEQo*ygDdoImw%j-Vr1E@osPoh5M17;rTW9qKaReyS1 zn0>Gv^E|Co&-dkaZ@34}aTMBb8BUSr8vj#PJ7!g{5cZ#!9!q)QVqH8RlYF0p^{eZ}Pa_hU|Q zcW@0n^Y`Qx2N+#k2i?@)3_hLqQQoiRu>V@fZ^?)Kr=t%S2nW=GQ#MW_CAggi4m^XfV?y2qTkJeT^P4< z-r4*7pU(gV6`6y*S>$hQp5wy?^a1n-Oif?m*C#Lz*b_n@5SUvWV4ejA+xG8*ZWil2 zKz?kcJRbu--N5z0KY&|-*1p&_JI5p>y$JARF@Z*JfxnS8Q3t{J0?R~7w8Uj27V|czQ=jJ0R3IqVp7Vt9LNBb^FJ8?Yc4d^GtClrLZ zd>#v3(lHs*e*y|fw^QZ|ij(#^TGpwdRja@jHUbJrvs2~^f>ZYKVF|O&(Yk|cyl-FU zR3X~vyvKCd|81lS_Fo6r`MZE^^SzIg_W3;x{b=9MMQm%Def}1tm$P1^B_2m_n|X-~ z&yEB4hc(C&!|a%K>Ft<17_ld`?s3}>&G5>mvx(Iup}1GJ4NfyaR(z)ipx0PU;_Fdc{hE(g{D z!+}cyp7%8Hke&Yzn-xE^N+sAsG_VRd57-Cv0BQq=fR})~0NT;@z^A|gpgJ%f;Ca=6 zL|_5yFP0nrp#jDaBQbx7v4I{7 z&(IuqD z{uu|{0UQEo=dS>YydWJ9gxL8N$oI4J)Ki|<0$~0{?vI;!7VUpCu$0?KcLN;&4{#88 z6le<2)@bLi0sHJ@M<5?%=VOpx%shXn|2_LU)scP#q}uz3AivtqCn4VvI1ixh(5LZu zw(nx#ZRUOKpY729;}$9NZGh_l+J8;pzrbGBk8~Wc#?CKAzCXb64v+26{ruq?V}Rp! z{x{@9?Yu94|3cZ-0R1l0wt(S%6!QCkOYCz`tNrtu=C=cL8nAQBBUICNw`102H(~rg z+nyi59AU$>9YK6wz_0!D88zQmBbax>>^F0U&N__mh{rjDfLl#_kk@kvW?RyNF#w(N z`t(fuzZ?7h51<~_1l|PFSu0ZZL$mGtD&)1EBc)B<1Keuowf)CuxBtDc4cb5b!eO92 za4kSvyBv5OIKcXmjs}+5dHRq@=J~@ljQz8Hp90%}K!EXxvHxV`85>*(u>XGwxB_5X z>DS(2-jDrr{7(PGHa!U30I*N52fP9p|3v#=&-Nf)4h#f1-r=#@FWPOT{r_#}=|6Mg z?|zhB2eALY7T~;rvH#J?mkRsmeekDaf;mk*C)PZ!{Q%w%cpc9W_tOu+KcsJ;$@d1c z`2eR)=a#3v6J{+7eS#TB@I2Yq`*8yGz{Mr99?Eev{4CH42?EiDJ|NBsO4M6*6`WRsB zztr^qzRI;L_zKVO;Wug>+BWmj`j{h(;dp>!0jxdA&BhB;M!RRsVB!JhX8}#wE~JbF zv_JA~r)>LY06Hf24{Gf3KW$#@-@mHlxEFJRdX6x4+PRlvZg81BAAos5d_x4^5%Tj) zpfI-YJjcWaW~}f#?EY;wkoHeMkYi(|APWOx|5@h$#Qw8rN8TG)TdMopq$y`#0^47o zY5SS$1yiP;Qz^5Z4LX>jpUv;P(XU;+yo9OUeh(C(!;m7GOREFy9`MI3T;&vg{H27yHkm z5Cz!4|4`6zKh_R?Y|sCnaN2%mz5%|+T3f$2Y}=@BKVDqhP9IQ-w}R`*d_XQT75f+a z&qY0QEW&)hwySM3E^f3pw2oN=knwInrtN306NaCFo#*>IVukey#s>^x<6-|21LP=0 ziU0jP|C9N@T=c~h!M^ocOneVylx^!-;(lNI=Qf@R;Cr-1dM=nBpJ3($c?!1n^&z|R2J z)p9Lf9N=&LZyj9kGl1)V&jLmOdQCo3uK(=}Tmo?I>@xt@3$v|UH>lST`-%Vi;aXgu z%r>wU`fJOO;iuhHYID=;7UIqWhDF!OfKzMr##51UExV4V zkVXQ1?EU6EzAwnUwtxHmpNw~RU>nH^BAv^#Q&&dl^9c+QB-J4hFQ%BV7mh zd;f>;#=Q?D*zM?t{3<&ikNoEV{Q~_1*VA&{KihX6@CNff_MeXHbhoe1Hu3!*u223Q z;QLAc22A_7{@=XU6p4H<`@CbwJJs=YcgMg8O;rqB@Oah(00kawnhSJ-*pPHb1R>(nx1{&fHNjSyZF^L`_3n=a_s-?smn^FsSuo^8{s zdbZBIC``2M|F(GycnjzN^ag&k!EXh82k?6Wj{`jREGHM(=KDZ0@9!kVPaZmD z?4SJ)ZHD@H5;zE?1IK~=0Ckae#$%X20{mg;SP%(HE2 z0R00~-q$YJ|IKWxz5ag-uERF)`Ue2EkKY7fo<4y0VcK^Td5&RtEZfQZ;<4QBz&iQE zHTnbOl-JCQzf(Adyz^L|LtZHZ=2roi*>#=~`{%cUc>OiMSZD1CtsUiWy`Q6>_t*Dx zAL4)X|G_nN9Lf5UCI{+wcww(adY9LnZ>L#&3a=x-oEd;_oef+KF!m&_0vb8;4UunZ z!{nP_n|X6AkLSB<7w}5S!RNUSEUeW4Xa+R0)2om+w)0JqHnH>E$L*^D9?RoQ{u<=# znAXE)F9xm!8Usy%tL^+%jy(6VjK?(d$!DCmX{uYV7vCFkWuSdcXZzSTXZx6Eo7h%o z`?!zWZ1;6Qb<1lxR`vKej z3o`dd``7UTzA4z1?f*Uerfj|p_N(9FLF)TWK;5TfKU01ykdS`=(AIzXe)Q`^OsLdVD89j}ejT_xCF#p{4Am(Z6tBzc`Q)d!c{59H_Nvs^>~? z>aLNQ_b?Bb@+|q9C?j}YZ9sqfij?hNQ1d*X&VGEe3#fs7EpNVN&8+zv*yb_19LHtk zxlf;mxW6*%jGZ3n!8M%MWSgCBMLV4B(KSrDB0?YvZ1o%v! z&la`<{Jsz88t1E++jLw3`+d*0Y++v%fRl9_T;E5`5;RzvGFN_R;~M zUpN3f0&otDHd76F4bb1$L^>QW=S3sG*xV27*4RJS415HnvJ5HL;OOW7NRxmL0N3R3 z`P}0`b%6GN0q_>{uz&siU)cYZxCYyypX1y4Hb}1p=&R@-{tN8kwUGV>thV#?8T|m> z7oRKW=l|Shd3pI^&cCSkkMTl1g_!>Za)Nk5`u_5w9DeQt`fu!Cum8jLUV!#r31F<< z9=Hym-EnQs>%ak4g>)2P&f~g+NOK9;t+D@F$bSlK0|J5JfVN4b$pD|tUkLCy{Zqgd z0PX*B;9chZ*neMKi+0R5JqX+YFfOPEyaMR&{~;X*thd{`9QlDj6@bTFIv!xwP;k9ood_+LS_cW;?~fqssOwL}^7L`JrfmLCRS2gd&OIyr39{=We@FF^Zm z2k?0zZLK=M{@fcF0cVEX@R$a9R~&;I-1TAZ_E zn;rn@pJ>x{fR_P(_RrY=N1!rLZtWlTs_lRK%*zTh|F56>Vea4uYWRWKjdWe*N89C2 zd>Mc(i2eJLQiiuNXRqV+t9^_P@rq|2)6@;hiU}r|>@mKIQrNKLOJBlZbNu6Zp9 zA+Q3ZV*lmS{{LQlhqhxp%Nt_bzwaC%w)M0AWyzDz((i)enW5Nz1+aPNEsFh@Py5IE zA05ZzJHY?6-veU*Z(;!S`>_4c+KF3dUR;OzVBQ;a>W1XYKLcX_<x*f6V&Y z5!h$Wi$UJs`#)UI`!TSE4MsW`Fzf&M{!d5XJb>RCb-w>|5w>$$|J#do*=^$cKV1LI zcY109{{zhXKV1L6#y)l_@_dhx>zd2$`#-!_=kMtJ8?+C=cL7H5y&k;Jvp99yd3AUU zo)gGizmu#An1?NJ{qF`~H9-5`1M~)(0DS-N4S+Uw3b+N}`?LE1zH2iP;5YB;0tvu; zL!q%j1v$BJNcfE|yd$V%0#hbw#bp5Y#Qy(HpgYhN z=nQlLy4iW|GyA!ZWju!YA8fF^7x2RFaPQhN*zK(Vzah@PpXdy9^U3oYA1vd3mh+q9 z&OEojh5gs0gwX$2#Wgwryq>dtCePHgmG{H8vW?6$jygciw7mQ@CfjUwf@gv|f@ z*^fNu6Zt=W_-#?<4+H%FWz4huIKcVG0|4g|dG0=d)m0x(pZyoD1;SzV?2xe?zpS!aylx z0JiG1f7-nMb~jSy+W^-A{J(#-ftLaOePX1(_WvXH(Qoir{ryj*V*jO3=F%i#z%T$C z^|gQY|D0bm_J2L{jP2_Hw12%m5h?9ofBzRL`+wU1Spbg}`!@tjDh!lj24JiH?B9j` zwEqVH_W${{|H}5UV*jOB=F&7_z%T%N75g`2NGc3eFb2f_E11CLc7*{417iOUG9)Vu zR4@j_{wtWk<#vSu2LodN4l*Pw3{)@%#QrOoz~y#@0S5zO{|+)FD-2XH2E_g=n84+B zg#iZxV*d^@Br6P5Fb4eCe?RQwI)1MIYYXuGAFlu7`#-M%`$-Yf-+&EvemU|#0et^Q z*8f&89g^EEy9~fyo$LSj{$F?7rcWZz^}p8uzXR2P=YZ{YIoJQ|?-L_k1eo`Rjv+7W zf6J~)NXsh_1F%*7jSi$&0(_r81>iS8*#9pDBJBKZr2MupkC_Z?1^7(>e*1&p2;h0s z0G_A6|A|!I|EoZ{B)3}j7=VqM{{J%MR{*r*T|hKI8{cl{cOu;a!~%POL}0U>k3-7; zH?R%haUYTir1JfrvZoT#;tI$BZ581E1>twV&Ic|As@gF53$V>&SkB{2-g(|7*nf5f zq)={I7!U@80bxKGD3c7J-{yP&j01`aJ*Fs=BuJx$0bxKG5C((+VL%uV2801&Ko}4P zgaKhd7!U@80bxKG5C((+VL%uV2801&Ko}4PgaKhd7!U@80bxKG5C((+VL%uV2801& zKo}4PgaKhd7!U@80bxKG5C((+VL%uV2801&Ko}4PgaKhd7!U@80bxKG5C((+VL%uV z2801&Ko}4PgaKhd7!U@80bxKG5C((+VL%uV2801&Ko}4PgaKhd7!U@80bxKG5C((+ zVL%uV2801&Ko}4PgaKhd7!U@80bxKG5C((+VL%uV2801&Ko}4PgaKhd7!U@80bxKG z5C((+VL%uV2801&Ko}4PgaKhd7!U@80bxKG5C((+VL%uV2801&Ko}4PgaKhd7!U@8 z0bxKG5C((+VL%uV2801&Ko}4PgaKio%rT(kCkzM!!hkR!3*9?;BCD=sDr-) zQZvr?e}M&=<$gKJEVlgJL1y!C;2qf;;Ao!ZkD?5AcpX0tNacR>b7il8fhp5zhinIQ z%u-%ETX~k^>*gW1co-k^_o zP(@Y=W-rew!7SRT8<1CdRvSt6Hrn~OgYyRcl;4p@d2MI;0%y(Md=`QVQJ#fBKj&v5 zu%q{y&cAt;XCctf`B@0et=wN(Mnk|-cG+f>dhIlGGh0rRxWMX=tpGb}t5mO}JM2;Z z%TIZ`qvKWmmZR>DT878($f+-Z5%TMZZ{PEX5r+^Am?i8TE^Ybcq z3fSNId6suDcjqUi&~qzSu;@GdoR_)Fr##iF+|jztvUJKbZQ+{}u-~$3u-xPA!H-)P zI8E@tn9znnBN~M42pZ5JWE9S#OT5FJtrW_y2^ujbvz+~f!v?dLr)Kfe*~{s(I%HhH ze|csDvX`TNM?aXod;!Wc8|c40Q^&HGcf|RbI+DE{#d(%z>WKgIGYQC6?(LE?2tqmJ z|%Ltzr4#c$@N#yDW5Km zPQa_5-lMaZHwYNsz}g-#Ag6NZr@ntvo+*6q9;3g-Rqiw&*6CY+ZSRFL{uZ?Sj*K(C znXKohJFQb6;9K4ykMn(secG?Gi9c)oDw}wH%Dd!Ie;?m+`kx^9AA5g_Z+TYz@GV7e zMgMo$Ve{D!Df%ARNv@T^zvfz=d4K*7lX4^fvdK?g^3O|u{NJ_g6$XR>VL%uta||@W zFrlL!D`2=_)nN`lmp75W!hkR!3AWd(%ywLJ5BTdv3-<)dghad)QoC#)UrN?8A^Cpm6Tz?Lm*a+ERK zIk>jvne~!&R>&vrv#0*os|V_hy2wc`wGTD;>HP{EvcDyz9ID0!1#5eS|n|n{8e)7%sI)MgZC!K&q+&;pRH07 z!hH~-wr*Xc5;sj%ODDEbvwK!j@cAmNdv?t3VJQM-$LI94wgK~D`~Bzlww_zi&$@d3 z&-||inZIe@TJAYLtN@gWE@aYDY&rON_qNpK8QjLN8QM50dRk}L_2!iLxsdaGAVR?> zFii(eLjGRF&pV0zr?#XlO~Qb;U-%zwENl9= zmJ9m>7Rfjw-{ObA58%OzhFukyuwiVc^(Jku}X@Wp~45% zRUxqZ**yb_L+<|SpzNs!S#$xmzXAQj%gY8^XY*cW{eT#ur&lMqEkFMZB*jkCvX5Ev zLzATFsjE`r(YGf?(6@fuy)ODq`qW+U~>P?-}bk5tc*jM>{S8s0;K9 z^bgu6K;|jXgV&Z1v(DljQwI=BX2df7S*VzMVdq-*acf3AoD>^$%p1>Xd(Yah+j@`; z-7uhTq$2*?k`$qKZeOcbO?yzyMGPM?AW(((cWKP;=Th_fy42j>E;XlDMvR|TFN!9A zr|y{8(&)iv=)t3u?4+-BY#=eBL(RACjMp{+-~TqoaIpD>wD;pcW-O09om->S-kmGefjw*0!M$tL(ZeZf+oqq?;=z`R8WE_XhI_JM$uN&177z8P$iW`9 z@FzF)1iH}&v0+cI{Tp9V1Yclu!Ndw$4-g-OVw~8Jx6Zf#I#J<$!IoXII=;u4ZUE-; z=;PD$xDLLYv3mw!JkpE|OC<4L4xhxV>XUoo~G)4CzcC3QDJ?-`nhlE zdryp>-Xtk@*6_qlQ@SN?7}ptdb>AcQ`!-?2nC}xePV9_%n;u(}7xdk~Giv7k9SdV2 z=fs1%m;Zin_X>4r-&%EO?^^Zy(XA?B$%pBS23l!R!z<-2d)p8A>V@$cOQ0Kzhj`S2 zfo|;^=nJR|MT{LX^Z;_7jd8&h)C0r?y6hIqX`6P5=(B7?cV^@r+d1~v2) z1;h{30j&##ix*6ss37-U7%%=G?+aW6U=G1AAI|$O5BY`meMSznE`c3{&F<-n#uzPj zPEXIaIlVmlfP=^%UN|t|VAP1p$ADc+hF6YRGOW_ZrNb+&iW(j;3Vr<7kprxDv);FA zO?l9*?Puk%i!Xo=4@J3(9OOE+V1T6-{Aj7g+SZ{HIoN!D_fr>mEc=N0h$T(`P{=re zz91d_K^o$KHz?Pc-&+BT2bO;w2=3zT^C0`7Hg{(HY~m*Ecj<$3mW|WKEANJ#Pg^#k z(gRV$1H&Q*S!&&+`lnK({-+YdA4V*A7JRwO>lccs18k4+36by#9A7|&h4lw|oWS!j zhuD=m5Cm@U{Hfgf0oJkz5dFx>YjfzR;{kDp?)6O zMj;#d8so_+E@_2AvtsxF;gRbe6H)aJPlsf7bEFHnTBL0&q*a-#>FZ=yU4 z=nuR)(90S`9hmW5`5GGzdBfYUK@S!=ZJ!)5uFem?xgA5ZT#;E^1*H$GZ zty6y;T%wkYI9DwhYL5*HVc)r3*Iya+V9{Xjc%gtgz&T|4f$ymUIJaEP4`9wi-v{Qu zZnbS+&xzT3pkwMn_`in0yq{}Jj9Lp zfYU2`r%v(S7$b7bm`@#`ZE$RO0&()~nm5~im@jKT6US_sX$2zgi?`*^cwhICk?@nn zD}VOGoCD-D0HkM7_P%<+=MDMv1)3l5l@c|`x{NYE)8oQ+58dJ_3`Tx+f%g6L`?zl6 zy;HX-d)h*A^!rYZOKYBUQ1k(u)8M=TpEvRTLv0%{w&0VVWI3K!PNw|3zImzV?dO}h z9&g*KP;$sgcC5Q3f9U(Q@5g%Ccd=Gd0VkPa{Vn{oOD!AWR!fKH6Fe5s=h%5}=llG; zuH&q;gz9D6myL8|t^&`ZF=s%VuzB&9YOWUcwO*r)aO@e02cbJ2;BufIPy@IKI2Wi2 zT;K=Xe_s2xx19Ux)7MmG+h=wS96hUxM+J59oDAxU6qrA-64vPeKV?S*_Aej?dsA-D zA6Qw<^TB+atB>VzHn{DtPTzgb^CD1>vwr4zyt56y=lk1s*2(smw)nO?d=TzsKqVF2 z-OB~}mx-M!X!gWe&$YYhJa9|ynHq50?8{9iJmx}xGNdf8 z1Oh3a+sz?V9WK8RX6OQ;)Pzk3QH^J<_J7dJHMIpKsq>z4%0P_1`DGcmermaqi=7 zTV@=~$=XIWB{g0!5)J?pAJg>=XvrZoS4C>^$ zti#mDlx@)0LHXmj_M@B=s=c=BQIvCllpGPDuE zx#>K=-GjFrd$>*OzhF}toRU`?X5T4nlY7d9vT(}f2)20~%R+#Ufm?toxyjzQ49Bzo zsSk7lSU>AzJ4_px{spW6hC%M1KmKs5SH^U%wrhOXOV!9uSETp(q_+C%jb`fQXIiPY z58SHm`B!Vz_WoPJ)vbE_-n(1lzFVnp-n>bT`t~|Cd*Ib-+32Qf!*7jM?8HVYesUuf zH>r_|p3qpW`L&5!JOb-H`!`dgzq?NT@cxbJt>-C+TNLYg;GWjt_*RW}58Vpc+^Y9K zg7m%rwopHPeLc=a{iB+yji`S!>Sf&<$2V5Xam~;luU11kT&F&IskyG>UbL0xvMyaO z(l+h$)F(4m{vefQiB4qAhYTNLDt?SH5T@4xNTOHbdZ{`b_4>)YPnx@Y^gtth+y0?z?2 z0iObcfF-~mfKwJmHpj5P2hbEK@Y)le1F#>^@dxz+`C7mRz+5i|`8NQ)9ykm3j#J3f)!@ z-ghf><7PFfM>Dl)d?U3tw7xoqHn1%`mu)-3x*hFiU8a8Cn`w8_)GO84&eh<1Zq#z8 zU+ML69X0pI^G?P5=2ly$TXV?!!*^$YMuhv)TqC^;XI57S<}}puj zZ>R=*aXsw0vHE*aU6j?=Ta#7> z+A)pQpZ5N}IL@5ouYPB}ye?(-=Yo1FWuGzBpWIOE#BRu@F!l2sp38QoKwnq>(pde4y3Mr; zQ>XL(c>dndM(|%X)sC3~3jEPtG{9M!&EHz2dK`c>L*7OVeQDk>$LBXaQ+4ij+?&6)vOM#cEV@ab0)1#ey-+Dr&-SMS_fl5ivqWvX zIP?K+bOG!G8UYQ7?XxbsVQ0`4H}4F({3aVW?+vMa)#0#4^$vwLtbaHRuv2amM^Lse zq;^vrfAh9!)f#P`a#_7?Q>)eAHlOUCT zpuw)-D{k5`vwAb0XX<2qzIAS&UcCvgzdxjIW8GFq-K@`Chh=+$Yc}Jt_WharbfzuQ zzJKivt@H5iklLRj9kVyIZVYsVzQFcn4d@?ELI={leSypL0eW5-Tm}H-mv93ffcuDI z@9&F4AAIZBIK}HHR!y|(ubpJI0q(YOk0bvt|J&#(AvASr_X!`ct=G*_qjrVwT=#mEiUNP2czG1Q}3Fj@h!Fo-dyw0+XlU<4PhgsDR zN7nro=fwj{f#m?}WnE0UZygZaxxMAWdGmqQJRe})tkdL|A|DHMqI}|}RjC9!$Ngpl zWL?7pyAObz1JLJZlM~8Y?g_0k1wH_M0%F1`IwsU(C~&z3hyYdrD}hL0GSCHR59oIc zjE-QtRQdz(r0*MPU*Kcl9pEeAL*P9-{S4^`>nB+QRs5Z2Lk?qr4}cGW_w4jjd*25K z7S-zZdsKtxmyfk>+c?Gb6V83##-~U>a%_Ky^3e-NTD1=^uJ;wrCq4l_0^YarHPUx& z42_*0coxq81$fJ@hwc0vcpG4QS$_wD+#@Z}8V-z&+=y8M4$ZsHjWM$J9lL_AxE-;= zCin!EIJpx00_uR#1IC0-nEk(F|4iU6TL*L>!F*}@fgdCP{>Xm=P)6U`>6b{q0H_aN zLH>hO;=igu7UO|00LuJxfO6yZmrNo5{*eC*kbi5)cO=fGd`w*?rLKL6^05m>TD2g5 z%DJ;$2kT^A)P?VW&j9Wl89mig8Rv}yC}&gG*T}Qoybjy?EzprXBQ22powC^-Qp*F| z*XO59t6CjCVE3NT+L#aWPzN}cW<21FllDI9gwX}+f&OL*(rkXldCk+E2R}~vlRL_# zlbw=J+CFvQ3oZYI|D*l?X3OMbq(%pfj6Q?>``YrS{SU{vMmDU=)JePj4CP}Wf7(Cg zPgy(lihiIIKs_Lg{Kw+l5A3>FH}!z+W}a>BNRUgUg_b|hKN8Vg`vCL_%$ajEjs8urHvD z_53PQr|eDsZ+k!IkoEWNkm_srZ^7x=>yUpxmzJ|HxnMfV-XZ**$)5OoFKQ(>oXXyV2tN4Gi|7ZNi z{+RM-n$hd;WX#tG? z#{i7=DSOsSz18(0WqiT-r4#u?TB!d2;M^u&-yd3MTn_Tr`hdCAH)4LX{)Kp8BIKD2 z7&|vIXLYTTG59CzqQjAO=r?wt4&ORsu%G)9 zQ8v?Vb12#q1F&7rb~4X>oOAftKDNN}-w{+z`{jw9Ef;+MrmVi-%nj=21`4s@e|epQ zbFbpuUnPKZtp5NW1RetJ1UUDa@qC`gm$o1Hx)5jx)boS7*w+BKnC(v6{>a%X<-V%G z`Hqx%jr7#}t18ve-Jpu)n%K*A{**qJFREgn#`vDDb6KZ4@p=Hxzd)tlZ|Y(FRqgGn zD*4_E)Sf5w_ngp zUSsR@3;cdB!Fhe?0CnVy`+R4c3YySKriH9W_yt@ z(YOhHzLx*4keUrJ1~76rDRn^SF;8@C=d9{=*rr`US8{&U`5ZvY+LpO4<8h_+2Rm{B z?p$E{3HpfzzyyH(@I+t|Fxk#egr5&zGr@Z9ujNf!!EZ;R-<}9e044*I?Cr5g$Dw?M z+R)<4brbcR(`?i+-qd5~SvU8we*N49X>RQ_Wrfxe%<*XX?+LBd0I|N2yGbd3-S@*c zL{Of`>^XwcD%;{{7dY2^BE1wC4RB8P1HjA?GhXEO2ata^*#Gm8f9nGaYYl{)&0H>H zMa~(U?GI2sO0B!GHso*S8BJ`+`U%E%?*g2cW-M7~x#pJ#J{#e&=m+XS#wPC1CV%+; z|M57C6$%_<@Vv70GrS|c1mO6K@}jI5Z*%-*(k~$Y{*eC*w){sx-W`t)%7*D%((C(`1n0O{lA_Af?jM$o>E!I zBRlQm%aS|`+E&9mQrbWLKF3~Wj7ZsM%jX&r!^m)i0-egJ7buA#!GvS9lmwaauo$9u@n58P`$ zkX8u!@0;CF`*isBUbg(XF60FBka-VshGzj4LiW5@ANkY&(>`gFw0qjDNk51E_w(`p zoFAg^H~oN_+xy(d{=fIBi~i-b`+wdq>|4tg^T4;$-XZtDwd_OdO#Sss%k|fy+e*A2 zVmnI94{~tY|IgT_Z|0mEK{-(un0^fT_jJU6gCTd$;}Pr!7y~lT^#dP4{-f>qk1+(F z+tc^6Zi4Y7pGh-iEXcX!)5iOW$bafo4g4!+}s>whuzEy%gjB ze;!|PRXxaf5fBE<_5t_J!JO`FtSLDcb2=~JJgx}~23T*HBjrBMIrJqDNKdQn;aQ)) z9{PJX`i0Ya?K3TRxxQs#4CvzeK+Fe-?7jBKIX2E05*hPt&TZJg>(6CPT=4I1*tvnr zk>@^c>*pd!-vKrN3xUNBaE&SVtp@b-C!`{Wa$=Z}r5AVv!FmE* z7r=eLmXq6-+prmcOpW}1K)wUOu_EUN&0I0}ajuBqT%y>1Ino};)5xFp{w=_H0*(`z zQV%)-)CFP`Ao8ym@;7rtBL8wEZ{J&iJ?CWqB7a}9%Hg(;{2AwQ9Vlh*`~C;x0`dRl zOa~xSWB-irDR=hyjP>|l8P^F=A2>E7h62+6mm?j3JdON0&YKTR1*Y3zeg-fFm<3Gb zts@m3C?`4qS?W1kToVm?S#|i0r+v;ErV0HM_HrvO&+Zh$4dbd5M+}^g#wo(i95gqIu9bMiU z?y$ElOMkU1w$k?`T(QY2j^490OZ8sO?9rOX2+jsbE*Y>-u+0$(z z>6x|bT}oNk>kR+64x@>Yna6t+&N$@UPC*Ts+uj?fS*o z$6eNc&T>KEsooP#rgeDtP#VgPdu4d?>kjW8IMCq^+>4fB#`bY9#g1*=!i;USgcq>4 zJMd2QHtN^)rB>I+^ZMty(74pvEYAF8Yrm9c%;-bA^s?}*v9uXi$Y`&_Rq%#Lj7DWCG=KHDeRUdQimw%741=(cZtzSm33 zXuIPgk}Y`#n!wKQ1UdpPs}BC+cc=asR>15Y zR=}K|mS#__I{_;~?awyeo^zf+>V+f&OWeb<$1@CCR7=GUC#G8 z<2%buKIZi*J^$M`FP>83=Xkbl^WJyNq7I&<=*ib7#m;;ud2{gSq}W-DljFjowOeuy*7{xZjV!#U|)MI`*rSg!w8paiQ9hyhN=EdQr{p5vW4DS}Lr&e}Ip} zKsfICW7tANbFU#?wN8|(Zx$s*Pjl}&{Fl~sjJ0mtwHQ zihv;9LY1P%20P#X%)JW;SfkIp_p!g<|DM^I*_m_Bl-)ZsXGYe|v!KawHeBwsBhGEI zQUimioFQ{{R(jOU%yW^qKp}f%b$;G4>4^6yEYx0Lk?XZs_|GB>Tc?&0vM?ESUgr1F1?fZ9LqL?H zU_^R=4xUNB@Voa{T>kVE+XtRI<}xw)pw-Bfgr#FMPwyO6oFDz|)v|MQuT|!5xLTQ; z@P|k5>v66EbRcrMNq&y4N_I$tL{QR4%mxM5MF`59bV21_2XdVtzcSQ?k)#8}Q=!jn z@V=q2r*Q?LAEI>8Ke>e13;BU|=*L$J+3~JIPQoT3HEyHu$H5IkV63ySF=3PH3;3@& zx~nxy_P4A~+NKK~h-&#|J=CG357JHCCIyRHUt1^Y=zwG=J@9@4JS-tz@QzT%I6ZwG zAV1#bp!@gLJc&}c1J!3Ol%*ExH&N{x#G{@c+o@gcw__rkG5s4B?>UksZ4p?)7R0S1 zcYU{9FXA`fQx7CR^_dJjIm$X9`ZTYd^VB*~fs`aC3AMu-&EW%N&-zKO(xUJEFJ;{xC)xbpI8?K-UAKvAlHK_q=RjX3{)S#|AzSQPhM$Mj=Hi@{D-|FG<|K1qhJ?- z_ZPlR@BF?+!G7PWMv}XN9p2VBe&2#Nk8(fs5b-G;IZaM+&(j7yvK{GzqJLqL!~+0c zr0}(Z1bXYBlQ59viE?}DMn?W%qze4`EvLKwyO*E~b_ zgBjRe-&c`K?qP|M0!n|1tWLLB^v$d8G?@ zHvu{UZAcD)e+qS?5|&B<3>d3w}cD9B-jG>wv)W{f(;s*ws^7@#BlqzSVuCf?#v0BGOE{8f_|F(}3LMSBS7P z`?^VP)_tY1BL_*lSH2+S`dLWjb>dT8y3g~s;OUJUHAr$<+)p~aw>wX}Zd^*AryXU6 zvx8099(xmJHO)eba&5s<_9{wqeT0t0Zzj&u0{;XvU^{Re(5Eoa2C~ck2;==Is3B1E zjdkWXEyHvBg=^^^A}b2%3?9tb)#xtlYE&0i8g9x8LvWsLJrv=-B+QIeM47T{h@0bQ z&gm7zr?9&5>HbQ%8M_{%jz?)%;yHT0eq8yv(r~ltt5GI*E(hvWp=|DCcnU1tLs&<$ z_&c`N-n+6rmlJJRN%Cl9B=>#)Lm*u4Kb1DqyKx<4c#3V1c0TA$oZEY&FvR?JVX)bs zMIq)_ibKt>lxTqSJ+Bvsn3V%pz&D{3{9Z*pxLOu&URo!9InIOqlj3ROmQnnIV6*Gc zLj}bnsM9s8KwO*|Y>KmEImj)MUJ!>Bg<7y|@0L|*`$>MW1bqI3WWDGwh_>HbDBYJC zYHwZMNil0e@B7M4&&U_)Kz0+&@KlOOw?l@9*b(Q~nG^*Z8|C`9eIeafe`JQQ{xD#8 zx=-_wxdCnal|&i8TO8GSWN~EYVW6W+B8^|q^fefi=G}a>Chmv~AN>ysLOb>?iRwHY zaR|fU9#s@(^kSN~{yT`r(@pnnJ}T3<<&dJ_&YcT_I&}d|h~iAmo<$)h?*i7)#ZjE8 zDCPPZ+(8{B9n2!0!LJTc`NF*dxJxcM>`~|`FZH(u=n!BxumNz!*^@g`eYM9{B$)n; z^C&lA58{o$j`D;q^G5PztXuc59)Wc%nd=%5TbI0soXfy00sFqqN+eQX`jvnYQ>hHrCz)*jC1 zeH&->tpMn}-Z`f{g=OdbbiRcAi*Ozel(LH=|Z(uT0R$g!G*vM%JQ&oipxQr6m*^{&QVq1Om!vNo2od> zoVU@!kS?#_zB=oG9_c^=ztkTPWcPl!n``v&*!2K>@jeptp8(0v&lraMOVj*xsQl?X zYC3;{U=10kBWw!dk^CeZ$-e~hk3krnOGNU|gZ#FTe;mT*o;e`W`NgDH(gUU6D8B{4 zow>$4*aTEGVNVHxfho%3%t80UMg)WoB>>wq_iKhTGI zSJldI>Y$PT22s2h7En5<3tT_srVBcF?EX)kLhUb=2cXn|=L2^Kfa)x@-zBKW%kf_L z0p&amZGz68Sc1OcC+K16`GDr*(Jt%}2lYZAP@VsTasbr^&u!0@6o!~^I<@s9UC&-Zd*$2KVU%A!!y zZS;I;c=v~DKNR;#T;Nxhza`w805`xD*oFS=2lVq){<{HJzzu!ChIIdC@1y;%M_XS7 z*aI$z`!?|d`cU#yT$DG@8@hNA-v<2sPX5p8g3P)=bD$ZZk2CC5*OR&_(f*r5_GW-S z3lN! zUZC$F-v5C{~zdY0H50d{a-GBxjnRooI?Q8 z17RreyxdkEy=FB%@!zBYlr{B-crV_CXWoDRH|em6*Cq`#X`o31O&Vy@K$8ZVH1IeY zc=|gL{M8()G~0>fXf|H4Rc`@9+pE|hZQAI>nDW3bAGGmFIsRVjsBd}19MTHzWg1KO zHk!R&2Ftsg7~kUl#)H*+z#t)hPYKcmDz%DhWfN<|q-=@r2m+eTVhuF@nuzg;dpjkY z*@OO>uBJD#ve^l`*U<7fW8BrbBxCiBlwFS+tM8xcFYOc2510Vd>=R**o2IKt15Fxe z(m<02nl#X)fhG+!Y2b-yAmii?G5z>v@p#}k5%v>*`|(cL@Ns+2-0Z`uOGSAN`1Q0F zM6$!c+IViu`FF6r(thC^1u%p}q8^dF{IZ0WmM>ZD5Y&N+Zx?0G9UBDl(%kT%* zVm^(BkJ`;;r$_L)>+{K;WFzJQS#FVSU{-o0%gsK*PDU+cp=-5SFzn)B z4_MdENp_R%-(lFd%z-R?Eo(5^3CU4cuBYLSbMMXbIDgj1ChJ@{c#dFM=Llg{`FY20 zo;kR<3if=};p-Gq_|Ig|NOp}JRVWLx#~>Rel7rip#pPvLRfV}Buup2i;>aEYwn~Y0ZSkB$*e8N-wOu0d zF58@OIly-*%40m~oNR5LvnZ{1&F8eXa61uj=j*j@Ja^ZGkkYihdtDyDU0zrII{S@?~ z=G{T)jeKs9jc@Em5jIf@*ayMp3wA(puHucPUxbw4Z9{HtSUs5?-DAZ46JZZjWAg+% zBH8{c0XBhTA6Z8Z;-A|*L52v}*zt8Pu*G@gx>RRFy-k!{`l) zn}^(0u;Y@URh_U!2R1kY^K~1_+&3>{$2~20J!)XjMD|dCY_~+VjgheDqjU)*1KBu2 zu8`U=$>p#$Tp`l z(BjKcmYt0HjOpM!r`?3T);eU{Q%!b1kuEwc0Jc8dK8S3KG;V(f9TvP^1-m017UiPN zg6hPPZHXc@c0^=PM0QE!FWV=1*lDvp)>_PaxmK0Gy^6Wad2VcL)34fnG3ur6;D>BQ zh%d4g>Y%xASyvAD=>e|+UI2rw(UJH5^``amndbh#SQ)~ur+3waPX|a}jvgR6E_gxm zTH8|!cJ3|(0sGhVlr}7SL7FwmQi2^2>|q8-tLF5TJP{{kLwCv7p$F_<`be-3l0JEV zpfm+^&Dt z0Zs#>fVTDc=j{dlB;WMqKXlcn@H}5(Ilw9+JKs6GuRA-wyBlm(Okv|fAX^tR^*!uv zVDG_v9D1`v_a59&9gpIe!hXe+rF(T{dsg*fSw0rrhK2mEY0@C~pRuOwoTo`u#Kx94 zv%Ca$(O+$^aM)dsw?o)3jR9}35nrIZZy~pQk)P~;a<_^q`ziosK1F~VV+!ov|(NO8)-O|RTMXj)tz zVP5z78)WYS+&~$M{Ol7CZi{f4==R?M5+OVz7?~J6sO#WKj5czCBoOi286<`5WW&=H~i~POX-ZGe6##osjxPY>^2D8HV5l9|3&NdF9!TWUZYQPQ~^XoWquz<2IRx#1Z@Ls z1f0O98`k8{yn4uD3D%&lz*_uuC9y`|VNIshNe^))*5<8E_Et^Rgs-ej;NerjKi1-k z&PZbm@D1?+TBHBc@NV3VHMc>u?wDlA^=-Kodh`LWXMrQYCZGj93;Mt^pm=mqK0ptk z5AZ^=x56khQ2#q%{`o-t#h^nkM6{ihAO75e3*l`SgT8~c#1rAa09bN9u=x)Z;%*&A;h!=w1spAN(28H=ueH zz1Ni!x}q}R-_xMa0am~)$g()qSBu)i9Nr${O@2aqo&x{5Xb)uDM7AqqC@oMGo}qLa zlznMfSDrR>9YFk}PkNi4fejD$C-+~yC^N3@3} z)E-bi>!JTIC~vqM)jx&Fe4-tX1i1Y<_XByt zw}=9Ie`0l}K9BOfrVOYqE6#c=Md)8a>-H!dl-6-B0cHc%0Jm2HeIWmG`p<%bKdygJ z1HcJz0JZ>=G;;l|`ImJDeyJ>a1O0)Q0B-9H+MwT&pJ}2`Rs+yAm4S>Vy+2v?Hl^02 zfhG++V-2VmA*l~i!eLUHHCg`&N{h9S&rMq01zJw4xk9aa{ni#gG;W)w55D4?h zVpe)M_GeCP@W0V}sUXt)9_DFiPv$57{|ED^%AD+^nsg_lE4zNRpm|xrQG?JGf_8m* zpXA=Rzwc9zMW-EJrKjy+_L4IzFO*}y# z%DLZdcC_f?wKi@HWl;VSmBwBp3A_YjIBZE(^%t7V}ZYpAH*n^3S)28tt~y zJLbI#b9YX}a1qV}OsTlYZcChy1r-KfGTjkL@?c-=THIPP^{RZe~B4 zi^G1D7A$GI1q*lS&eqNA%NBgwp9MOp{S)20GS?-2*qm=(Wcyb4WN}-%vZKFRutVFr zvW<&+-dZ-pggGrVc7Nl|w_hDMYQSvF-6kTx;Uhn)-7k)Km^8+c&ruf!3a#^f6?vFj zyICG#QF1-TtmtZ#SxIH2X))%@ipwI*N(zE4ivPr%RAr=D3Fgy^%Hel8i2D^`?yVei zTGudl_Gd+uS=GgWmMqUlI3*o@o|o;wmxt^2=|B9gb^(6G&qoYYy#B^J;_OL1R7u

    {U$3Iv~D8^4{;l+zqdvpkHAO zb~)yJ=3?A$KF0X0b3$7EfcYM4jJqxq82vEP&pFsC5; z`N{BGfidlG!6)rmWsPU}yg*)Hd)_}|ta<>=fnw}B1^G`w-mbuWfbR=-FCNIBk01I7 zBGSe_A>smz@h`~^HuxH2o>tJuY|JH5o;3Hu*P6hs1#p=$&dmEAP|aRXrduh^M;ZT= z(*nhqxM)Lj63BlxbiNe%FG}&zo&&uv1Akvr+MqS{3i)z3s0GcNV9zO2ij(c#OdGQE zx=8mAb0L2n;6s%8aOD3g$FmmmIAbZD%pi4n*G&`iA4=?V(YmhR zUv*<1hU*x5p#972F}^#I(yT}{yx0=nn=!yYM< z0PLIB`8#?JWBiTgfjZcz|Gn~hkiR@u3fdm%2Jktv2R-+{7EXEqXdLs2>i2(5Pk(25 zXmsJBE|{`0jFdg}x9CI%Eq%Yd_y zi^eHvSi%IT8K-!p-VqNzqYLMhh-c!KiCE+LNcBH6HAu`kAA6US^xp;Fth6TjzG>kb z8)rJSMH^sktgfsAd^g{7?W>dXTK?SSvP%8@OPTh$=cCRi?C4XBZ{`aj ztHng{uqbSuq8+8>I=`)Q=+Ge|zDYSh@y;Tr_9{Vncz192)R?a>oeCa#G0v?S*1KWN zqKhceH#7d17RNXU?@}5;D{J3}^Cop|rZk$|ddK3HU;C|AvhejnRfv;;z}pxc5L4+pN|+M{xqdmi{6&+4G48^mS=E_+ zJqlAjOv{6v&C8SbnwKBmVO|#I()~)Nw^{Mgon}(lhBjvhy!@(pmmZ^g5ijz^*jr*j zoX*=hK{~5%AL%;(a-`nE%MmRX-a2Z!FfUZK>QY3Dmlzw_tTeLSx;$U4GZ+1Y@8xG_ zPwvUTDGu!x+Me)K{xdVE<-*J$gGE`v20vv4=+8K^M|ky&moDEou{gBrt_uOr?>PCZ z@Mb}Nrs+W4r)0qRRW|&7GUmVlv%S#dMr=2;Vn88wB6Dz`d%HIt`@$JCN0`#sr zLEj~S;!XMPEdI^h2te4?+VHw@{||kfaxDM= literal 0 HcmV?d00001 diff --git a/vendor/github.com/moby/moby/hack/make/.resources-windows/docker.png b/vendor/github.com/moby/moby/hack/make/.resources-windows/docker.png new file mode 100644 index 0000000000000000000000000000000000000000..88df0b66dfcf0b298de8cd296b793b9220c4a914 GIT binary patch literal 658195 zcmeI52VfLM-^S;9dgxUo6akTtgq{#WZz3H8tb`;KDWNHduM$KQ)K9@e6$LBOqzFor zPy}pLL3-~cKzhILKbJeW9LeSS?%qA4!|l#a{mo{dow~Dn_2}&HQ^kid=HInThdzwS zW^?PMCYOZm&Hknz@3CD5O<>HoEVnYY@QX@}`ScvozJ0G=!{aB$PZ%CQHmGa+_CaIE z#}6GbDvq(qCl~h}*01l43av8Ernc)gGvTvt@qNmB1@&q7K|;AXb?VpfF7@=x8cR=< z@4cgR$BxRX`?(x#+r|;dp=fdTNZKnCu9?MF3t&goSA+MH| z`BcqB&2LOL`G~IHd8)o_dRRVNtUp9 zex1H-iINTeD`vSW5tgtxbHN)j_WWlx%FC9_VL@f%W_Do1Te6i0o_)U~ z8yL#U4eho&oE;BlpqF@?FQg{8(u3nKPbb$~V~HeYI*$X74OuQdM06R8F3A~)3z?>Oywui$Z-0RtL7UT=Tbicghm7A=on*rwsm*B4Z7G_6VC z`4p;$C3DJCX;bnh?+;kLe}A)&HU_*Ld~>^6TXV>eOZit0tQgydu?rL5I+WiaWPO!{ zmgMrFMfnVXSz{s#;jVywfGY4z8%t(v-{QiDYGV>?u*clhQGZuU;Q+PinQE7i^?HsP`HgP&aacDs7- z{fq zYklNZalwqt8auiu%Y0Bf=#QD@de`hWr)ih4KJWJn>ffbWa;x$3@b_Nmw0Ta;eXP;j zx$kc69QN6?b-i2t>$jo(re)2mywmAx--EFe!xnyAVacL{ceJBuN_=5Jn-bl zQSJAJCSI?kX}o(>Xv00b+O2KT!28pfzt+}V>9=A}l{L?~XxyvY^LmHz|G6yScaGo!&d^*8Fr?x1Z~5E}gZz(vU`B?+xtI zpw_W6AD0a&^~TKhyPMZ8Gpl{0x|?RpyRQAV-{CTcI~^X=r`C-IKQ3z0rAn<&|2P>t za-Ampvj#y=?VE9V@#S5Y8(*$^xkmEyK8uI68TV~w-;4G7{Q1h$m9Lb#(&UOdZhoV- z{l8h*=ghLD{Yp3e__=ngmTv6xQ}6edm2L9s!tj5lSLoX9yH#I*ex%Cqio-KT{B(Wd z=F(?Af4s-Mg98r#b))n3dDj)Gll=VOEkAwl%whE^FRG-eG_3NZNG$LE@0Xul?%O}5|6lz+{_&;0 zZ?5jtzj5D7KYIT=cSZl@i+kPbTea`7A6qVaZ~3rg@%>tV`_`bmS?WQ%p4rv2{kOfB ztbO^?);&SKF_qU(Zct;S|B82Coz`@Bowfg*_}^>wx6IG0w&jCX%YR%N-oEkEjR&mn zlk(Zn)|yN6$7Oyov4{7ydOyrddF%bNAEk}?L6$`{i?;+`kv@}<_Di2Ds*oZk+!ir0rvVNSgoUZ!aZ$`Aqx|XJ6lTE&H)IL-P9Njn4idt+U@i^?JYB>QjFED@Fd^ zbXzEnOg%A2}VE^Y{T;FNc;#E%^+p=oQ52b(D)Z^10E51B7 zY}cCm8~@n&<1dxhTsf{jwR+>|jc-N|8&YxDFTbzemGtShPv6}9=Ktlj()ho3B49Im;pg$?BA$&nyKw&(Nv+@fJ$3GlkYNK4lsa&{@uiJr|9W!wh{b&u9qBx*;;>B< zrl-vOU{<%c|7tids^Oxld(Q0JcKPXvY2#kJFy(OC;zjEh?Yy(>)K5vxl13!GclxWp zn|$@;mxCHlIJj$H`}4i89BS?R)#srj&#io`XZxPhhHbc3DQ~p>m*@qt{#4j~&!RUyc4?*IRydXS_FKz`LzK8as1fl|y0knoc@&y8VQ69kM&j?QnL{ zu&|!tKLtPCw0_j`hOb1PY`(e4=GJ=}4juIO(SeHxb{*C2vG9iVS2r6o>bG;bcT(QF zxcH*)r8g?h583zP*w>ft-pQFM99A>rP_sibkGFrJ{^LE8yVUPK<&)cUwl;me_V10qYj9=LJ9U4Yz4aOYXU;5& z__D&n*MdejY8A09Z2Bj2W{+#Pyhis=Hx9cw?CQ|h{#qF~V_p95V?OvL7r3 zJ3aYI)%W86+`i!SXFvbAV)v%l149oFy>azy@R!dA%w0b3>b!t4{wGfU?0@drpYNol zcRUp`@1J_7woKpHe$zMGBY)i*_ro9i|7qE}-?#lz`(^iQmUQ^chUot?#~ezk`o`Au zCby5CdVXt0Ueq7Io$4>^-~H&&qj5Q1bJl0HUOj91jm)_@qhDOrWAwN?dliQjd74@Q z8~@w%@6l?9TlMSn@v*0Fbq?5D{kQzZcP9n=H}miRe_!mmSZi#D*Dv45@IIIExPQdS z*FJn>aSHBRxRX}b_R>Q2*gHL_E@r`RS`_H`+7=L$jsfia3chr7xt>xC1 z`(H@@bz<_mQ&-9k_;tX-pCVo!eRJZ?$ox-_^xV7Ui!BRRZ+c<;0}ANvpQ@-1>5An>%e! zv_JFS(bmuBpFV!~^wd`pCoM@ibh>kfddBnv`E$}f4s5Ubc)`biz3cz(tuI?dg@<2k zJwES7?how`#2%>gWySoj^Z#01u|mjOnMZH^xBt-mLrdo+&---y=>L6}cQf%;<)2RN z*}3QO-~O1~IP9g=XLIiCm~?tlwY%S(3+#FMe?e30X@_U6P3wE`hn!C&auIV@AeJ3>p$QY{ZyW zHSccOUNdM!Y^$0BnsyKEKDK?_@DW|6jF0O(rANP*DI;UTV{5jK@`;=rK@p6On;0E5 zdGx3;6Cx(Js;P@Nf{wX!NX;Ofmx&`=)ojZX3>w(IS5W)-@o_;-8#WD&32hV>)I7Xl zSktD>n>T6@)F?EpNl0i|NLb_Gu<(c`O(H@=gY>3mln)(6j*lH0(WgTveR6cxs^;*C z6URn`giM+=so|u?4dcfT3keGk4-W}#6w;_sF!>0c@cNjE(UXJ6OsG{5i6Nd2aT8+3 zj~F{~MEsZ_9&hxJ_*W;ks#%jK^uXxLHG1rWgvLzJvm=TjlcUFmgf$EeDJD?&?&gDy z9$jR}2@^ZMMmf=!tjGi>^m~15Tu7g|3GuIvkBRH}THKh4wTj9_Y|Mjr#=bg!l#Wkq zOi0|QxX~130%bm|$SjOCv_KvNFO-c3my0d^#1TUuR!*U?{7SLG;wFzMCK$g`5KMO~ z6x2{8-G347$Hzraj33`GK7LeG!Cm&S{Y1?tepdz6>mD65VhnFoJsBBd8t7q-Ka4M~ zL-fSBC<@y&IJ8-CXybliP3d-wXiCS;>A)zG0-q*vbf>#NHhN;TC9xFpYZ^~{?1-VS zTNKMfpM~S--aVr0mVQ}ie!KfElllvvZD zEt-T6iES1f8yhn;xJhX2kl^r`A+f>H;mu>CW1EFGj*X5sl%4`$ z(cPwd_VkS#SJ>5XVbloSGh}S^_z7|RPqS4`!{6qEht7j%3J;_E6OV|F;ZL8a7=8oC z#YTn{a($Ss2O;&rMm$=bmLVvJ-za$TX-pa(H^$uUUl5hfSMiZdh#xv}QuO$^w!`QS zvFvZNAl||OO%sR=;m_+)Bb-nb6QW>Vq0zHVrz+d{E=dra>Qu>_2A2#3*|9>oRKM zY!r&?afA+PUwUD@Kt!OQC-kC6w@TSp!|ERrK*HHiy}m;f<<3S8#-m|O)e6CmbK zfy+D}ldHgG0>u0&aGB?0auv8tfS5l8F7td$t^$_{5c8+NWuA}8Rp2rKV*V7k%=0n1 z3S1^Y%%1|6c|Imrfy)Gl`BUIB&&T8{aG3xxe+pdY`IuY3XxXkl0xe8n+K+K;4mw7%W zSAoj}i1}0CGSA24DsY(qF@FkN=J}Xh1uhdH=1+mkJRg&*z-0o&{3&pm=VNjexJ-bU zKQ)WX=izeDxG}T_bP_H0%;}f-4=of8is{m)J7ZIxpj}L+GnS>L-y4j*8p_zs!L-J7 zK4TT*KY980PR#rF?5-W!_M0qYTJj{*@_%7K00ck)1VF$_0&*+;!#fCo00@8p200hJ)YzPDbAOHd&00F^000JNY0w7=$0SJgq*boQ= zKmY_l00M%000ck)1VF$h0uT_Juptl#fB*=900ad000@8p2!Mc11Rx+bVM8Dg009sH z0SE~00T2KI5C8$22tYt=!iGQ~00JNY0uT_~10VnbAOHe35rBZ$gbjf}00ck)1Rx-| z2S5M>KmY`6A^-ug2^#`|00@8p2tYt^4}bs&fB*>CL;wO}6E*|_0T2KI5P*Q-9smIl z009uNi2ww|CTs`<0w4eaAOHcuJpckA00JOj69EW_P1q0!1V8`;KmY=QdjJGL00i8N zfGi<@s_RMw6>%MFqyz#W00M4B00QDx)rrD@00@A9>j*$VT*n$IfdB}AfLjrOfVfq4 zqA(x;0wCZz0uT__u|`TD00JQ3Rs_1jKc$krD`i00_7h0SJg& zRVNAq0w4eat|I^eaUE-<1Ogxc0&YbB0^(NHiNb&Y2!Md=2tYtw#~LYt00@A9TM>YO zxK(wcFdzT|AmBOz5D?d~MoJ(60wCa41Rx-8Rh=jd2!H?xxQ+k>#C5EZ5(t0*2)Gpi z2#8x%Ckg`sAOHfcBLD$$9c!cn0w4eaZbbkB;#Sp(!hiq>fPm`=KtNo_8YzJQ2!McF z5rBZWRdu2;AOHd&;5q^j5ZAFrO2rUJ&`vGp3YKtP-XARywb7vu^AKmY_pPXGcU`YJ*OK>!3mK%4|1 zAmXeSKtP-Xl-6WRCX-p?9ezPT^aQH!Od3L&3~+Hq zzq=4s=9Y`oL~H4(!v2uOs$Ta?f0F3xKrr3V3VF?1y7cmn9c zay(PS2m&Ag0vP%AOHd&U=sldh)viK2n0X?1V8`+f_nf2KmY_lz$O9^5Sy?e5D0((2!H?t1or?4 zfB*=9fK3D-AU0t`AP@in5C8!P2<`z8009sH0hKp+4DAOHdo5ZnVG00JNY z0yYtVfY^i$fj|HRKmY_FAh-uW00ck)1Z*Mz0kH`i0)YSsfB*v)C^@f`AkVRNt9&p7L3pY#Q0yWR+yAlI4FG7s+x{iL8d~Ewbrk{m7Ojn?d#_ zS#Ee7+*~1>OSTMIf3o+<-XrTpwjx<>!Y)LWIaqLre?dSv0@ghLi^?VcV}Sty5Ma!g zAaH9i{B6k~3I@0Gh6DYtcM*QL_!yf&I^h-u1l*1Q1jOyC)L{im&`xz&T%wO?CFQW3 zi*uRe;`ERl2sniR`mvmX6_J7f2!Me23E10@g^_ge*D-Pk0w4eaq9gzT5oNU?Qy>5W zARvAM5D@X#5poCuAOHfQB!H20QC5qSGqv(?BBjvU$*Gt|-<$PoxW?c}b7*_f@lS?x z7ya&_IkasGrM2?#c{(?pa#hHS6Hda21?KWP0W6|)EdjJZT+17&fdB}AfZGs23&d@z z(x!r3pioQ5@|A9r$Z|8S;D_-RLI~MSWcQH`BKtDg8)WB`!W+`uK>-rU8p87xd9%rKGn_16mA91ad9p3Yb|z~`lMdNb{tua1 zWHZSQCRo0$1?2cpW2tYur;|1R!00I&sK!fITAEolmqOvR_#{iXP zB#Fq}qOQZOWC9S7k{Q7V2!Mb@2~fAycomI%n9Dwi$OwptIYPx>gMbvz23J4;1SC#C zqfkt=qzF0@S$BDcC4S-OApr=;Ll!s!0T6H_0yOg~ipD++U-BxkWd3UP6dErXUZO{M zct8LG@&E-6KmY{XiGWfrpJGjU_`o_%^_@#od|O)+&XfH@Ks>qjQFah;V*+Z0!s-Qs ze5$XnQoV@=-wVy!a$`((6%7Jn7d`|A0T2))0cu6Gphbhm-zl~@n<}|nL+eKVvN(VT zdxU^^aOI=uAmFY96f$|7J!SJ&D63Xje`UEnad<*l2#6=tGRg=79!7vxn_JU;m7q0j zg=xykPo?fp&6!@N?l^x)04nm31&%-f1l)uGEdU9!wBu8>P>HQPJeZ<;*yJxb=PA_Y|Bz{>_fxOnl$Ft+^HV>)TMu#z zrDJZnsjc_(c)@9j|MsZ9Gl_>aq!~)*gX!E5-cNtdBPzrHA=8E2g6OvyKOs}w$WMU_ zW6zOWI-OK9IGFi0I)t2Qd9A$&V>jV%Sa~>+O5y4cAM|GZ8m=+WqX&C0I{wLU?xNov z7oy6vDU{aA!{_O|dLcLGpM(*MgU(OJGgGMk%Z(hROzyOoWB$avmzDW=QqO9aq)ynH z=YLUUMHg04RNw{zARt)+^iG`T%*^xGXxg!Hj5E`aO59FUX{iA4(0o2a<#u)5r*)qvSe||dp!^`< zW(2BkxORhPOI2`Yv9j~>QZ99?6yVG>ToE_Auv|eIX@CF-NQOYZR_pW@9u1`XO9qG& zGwSN5yAz9os6hY(K!7n4kSosSz$X;ZdJw}*J7*^++W1}F4=!2|F3H%+!+bjEaF?X@ zFp+yS<7f86Q<`#AFhw@V*v$pU%V-AF@dxRwJj|!H4t#L#nuGJiVu^85#9}O+moj#9 z!SQ@zas5F$D-XAz<1P=*U2|~IcvhTil5|Zf`AmD1Qi^hRYNR2}DUD;!PDjk~LqJ?x zJ6u4fx;9C6fzm4^Aap*MtgEN-9;7rM3J7CdKwQmY2c@}$tg+z&Qr+0W1;=YBjfVol z7#9#%vsg-L@_e|Rp%%yMLy;}{vpK1DY(;Mo(QJ6z|KW_U&tNgCzRobF7 zmY?)y%TGp<9YOYUvRlcXB%4X$lgVBn%j@NUk>&kk7s;lPzq@3Qko}2l0@=P~d6$OZ z2F(z`s{k1Q0T6IBft>vOQ!2TFFZ6Mm(K1H=7h#s4iHQ6w-_B?BO6NTFb zWP6bstiCSAl00w5q50U8JSpSGBBZhJFz zH!G8+=em5%+S+WNrT`194P=df2#7Vb;1>iyKr#fV8|y+wZZ4mxc~9M`Vw{{Bh8UfIxDWFBp#cc6}cf^2#A3M zen9{P+>XGV9@W-UE99ah3P$^+uxsgQe8)ydMCZ&n&Qs)Lotc0kZU~5h1b#sP1l*E9 zR(|dfM?CU*%Y(N-L~i)N%m8YA+;c@fAt0`xj5I(11Oy{Mt&nXrZt;gb8Mu>~A%#J6 zUc{j!9)H?XR%^mSK&+t!zaRhtk|aQ%5$r-!ZfzP+PR`C^_pX$MuXm(jDW>TCNF}U-cfZLOJEEF;`5D+2hBW(}>0YL~{f2K+}b!YujRN^->(phF+Zc&%S z;sRH_SJ3S|}J1o>3m`!r=&geHaZ#6MU3l$gYWb3zM* zQ-A>h5D+1O1g*nIhpFiyr>4jEq#y%iOjfASDb)1fD+Wbo_(#*{k^PX=qNauN+LAd)Z;5J^-MG6@18 zU=0Bhj*2R|d?^V>C%H^kly)%M?_IaxLo3V{yH}_@yUA`Pn@FO;zW^mUBeMoxG4&w1 zLJOARm4`zq?F0JsdDE0n81y2mA}T_%T4bKQDxQ?9MZol5BH*ff6|{D!W+`$>G5<8 z;rR<9?+@mNv*C;k{5U(Ft|3i2e87T-;bD0mcw9S(MH~8kV2H(eI;l?95WXn>hVZ;h z+@G4PH(6ePhAswE{`@-kXDEw-A33owVsVfk=*-H)W9j!}{h1-pw$!S#N?Hob(^|CP<01j#ag`&>8Cz=j-x6Q6ab9oF z(C;C#2gNd`Va@ZO^8JY6H#Lkf1o#8O*zku9x5mcsP%^mjhZ462!yk&KehlJc@MG?` zsPLxo@jn>jKN|jrWa`Ht5{9(6aGClsTr29=G&~mtZViTulxcjX{EAA`Ac}@~%>5P> z-ZV`vaK?uJ{h0bO|VlgZxW3)CHRr-WprUlw(5{^5C8#72$WrWDNG@k zk5bCyok%nuvqm%w@fUoPHq~13xJKcBHpGG7M+u+{>rp0f0Ra#YkN~wZno}#|1zPmi zhFTg=(4e^Sc$n2WGl)l)^_yHRrq&0aC;?Lp0SHJjVBib_AmCgABoq~CyqnLKicl~a zZ)MaW!6-xGVLC8vb4Co}VZ#oIeDIu)kHMHB;JE^_{A6X~(%cN2M=k}bH8p9bNBKuC zzySn6;86lHR+_d9QPZ5La%9WUGJrr5i?U<`6f(KDBZcD8#A&{{{=*(NhzE~uGqpa> z6%{Ys9un|e0b#5yVfgMLijt2s3MKPbna;E=IUIa|00_92fSuyOGyAb?b1co&9M)8n zVDZaJFwe*`2!KEd1RNk9$0&)-B_xB(M8J~;qyp{$5C8%9BH#e=;6P?m_Z1%lfhmE2 zCwF0)t{5%>6)uAS2#A@0jq5y2i*%bDzBP5kdC>$sSwJjWxmq+fxPkx(h>3u1dlFh< z&NoQ3)6ApR2j6oJ#sUJKEFi{vN?Cx4dqJz#LQwDq0xl!KH%8=pTk&5z4c~syP^PYl7`i9;m#;`c=x_l$eMm@!5K-}6{p#oH91VF%Y0;xIK zy5@)3KT6A^<34#4GUrW@qCOZS7(;p%@-M#qNS6PK)`&tt z_O4%bl?ZhwyNbZMsyV_x9cGoW@Ee#`#R<=a324q9WTn=9!erT=H=RFV}aHD5lOG=l2c5_1wkIklzdbyMAP{oUB4wm1m~1 z(rf;YrH6H9X-&E^t;*0yFXYXwPxt~4E*^#@=7HBvv08gpk$BKOlXRJ#iRjPLO7l-M z*lhUH^RqUGc2Gci&hb{p+hoU+HGQdnj@*7HD@r5yhtUaM4I*=tY;jFB-Um2|?Dr%j zyfy0>v;5>Nis&_KBdg{Y!05vu76;?qFus5P!o2@Yq^}ehJs{68%cS0~bf$Ri(aktiDhOxnCo^q+xua3K?Q^W@&WTivP~_Zz(s~n zwOc_j^DOXh-plfnQ^>;%^PV2PTFKj&`C9N{5apdK%=_?m=5_K9#vhi&8Quy>i5kfA z0?QZY#jV^G$+vYaFz=(gtRLf`2L=~h{zjhf6DUb8^7Q-6_n%GlLa~EhD6G4U^Syjo z=2NX$#*;1G`hH2HzZcz1D{Ap;Qt#@7qx6*%#nyK|eXQqpjZk)-1lEcc$(tk|f=u0| z=N@lm^d-xmL7~MK?BNC8trh8vo*2mr2tni@y za{3?Y=eNIKUrP^+%zEK0qhVW?Tc(O6=qQt}G4CUZI`NcI7mU3or*bWpRjV<}su{}i z{L9!IooK?}rGfG!nr+2}qso7~Sw;Q=W`)ouR|wLHNfN!7<#_oOSFY=Hfww-K(KTLN znv9-#{APWL>~ONxi}hIah27-bkDhrqioR@{t0V=4KyuOenyj@DjVbZ&tVFV0gnlNl z=jqpSv;1T?^8B;q0SbKb&k|OkR*23}pS|+dC8oY|lBqBLtNXQY4!?|LS8u>F>a}3m zK@BC_UBUbFxlpJtpCB$r8E=g_%j8t4%`)q@V41ZVF|Ec`Gr%n?4WEkab$UNdn%GAJ z;#Ry+SQh;QU%6$gvg~U0$<||BShT95UvM4-5a9v;W4@hwkEzZdVdc;4XQeM6Cl~H& zqoFUZ->+DM-K$!UCDU&jy=d?*FTEK`;<3nZ;>=%q*70W?f7bLN+m`HuOwh5l<}w9% zk)Cyl)&?xjzoZ0&U_MR-nMQVSaRsu&{S1XoB)f;~-(>$I(8a&%EkD_W{QO1MLc^QW z_gJa>H(15gJ2Xq^0zD*8(Ru~mMd^6M2Til9Jz4Nzp#J~7QWYGZhy!Be-E+!2mvutQ zCkUv>DsR($ZZaV5fTYYvL9Mv#s&!Zv2`}fCSGtmU|4zEfXH==_;ioxwm_*|wlR0L# zkmb3h2WM`%$5>AJ>MXZ>HL{Ph+_H~}_GN@m-Bw=yk7>^yW&y-YPHSPDVe;tjDY-%o zmKjj)TBYNAM#;4KhbW!1^o+1Wr3&Yio^MKWsZX{s*$A?&$krw6Xv1H=-=w@>v^Qc& z2nc~~Km{F5Hkz!fMgemvE#6(pciZXi8S2VY-nmLI8N`gHY74y9 zCzW!4mh2_H=`7t-VS!Zb45j4hUb8ySwRCw3a6J%?Xb=C8YzZ@?i z`xyzzVfwYnh*m&IG+T(z$;4k&I1}#1?-|{FLt{=_dePGI$s=<9lC3wy#d!KL=OO33_%FYI_=g9!B}!p6ltcQzd#MQDkmVDMteRCODghzUXh?;4T5ow1&hq)L@Ko9NKlTO zX33=j649d{UslfthFa@?c^@DQ2!H?xfPnZ3+@dV*Buk5sbfR)-&$^X%O(GC0`cr8pj^Ji*80}w@JU`UAOHep3D9ct{L~C##Dizd+h;Gt!)y)Nb=ww-=9x^T;&Z*NHrkv#n}o;egb)A(BuGG;MO{`Yv{Q)j2BNMmhmX%lHX`D#tCRJl z^gLTYcugynd?Ard2HjXTVM8FP6G+fbHD3fnzg4;bgC52?^Y#6k4G;gq*a^puCjb>u zD7V{+Rhx~$5L?+Hocj^r{Z@I&X-x39E0rpXX}mhRA7Sy82m+!|EU~p(bblx|45HYY z9>RfuSPAfHRt5c5)(TQ%2xIC#JUiw%;>`6p%?IVCO5CgwC+i$Y0EStYht_Y-WXv%2-4+aF>kpORXq|;c37-t^( z`0in&TH(79Hr#g@1F->GkwH}IUy5|M&@mrlAr_K?H>nZevki6AbHykg8ch;YtJ_MA zsk`JCU08;iRHb zy24DBqWHML4FY0-9M|k5y~$YhNiRcC-7o)RqisiFK)@XcXtQaC6(8#mW2=J$=IcFt z{bFgi#FOs8TdHM%fEd@DT5Z?9&wN^=DBskK%ZOtTFh_u|>)>1H@|_UHGQ+B<{QCMW zrL9Vq7Ig`3t|Nd!bYs?E{8f(KyL*#%fwOruT&7krxv!UTtR8pF;XPT}o7MK$Fn_gz zC1>Zc6ZbRONwP5FC7}Co2X94-SvWX8AKK??RI8@qB`KYp**l010Wq!XPj?<=8R_*+ zovk^S(-)^?9{9y6Idv=as$Pl>t5=>?^nP@8{)N_%*Q8)4GX_1h3rZpj}Q=N)Y9zj$t*j|-=3&FX^$)gwLBKJ3Sy1Q7KrA) zJ$u>4pEt1U*RJUhDwRq$>5VC@dQkNTNVi>0W)uIogqFwyBCb8)qM`fFtk}D&3|JJ3 zJQ_3)r*V(J3{Lp9pMbprfOiCUvM6V3+;o4OVx0PDO(87w`+V&(55(h}B}>_t zi@tau9z2RR?b;O*5AN2!N&xFxEs!6=xR8MEQ-SogC%!09^kW?cE^@h+`T4&9@h|`p z&o2a|sJc)nCe!yY?c3mrcMs4UdUrQ!T74f=r(gm9j_rwT*N&Z!6uE4%L&wm0<*Ij0bcldEF0SHJ@y!rQTy?uTv>XJ2X zy1_{|UmMc2ye|u+nO3|hykY&%MdIOITDkNgd4oB6l`-5>y$FI}P; z(07ZB>B9N*?7itT*oJlMb=_N^d_0@+0d{VlEahEf7&zKbz_9V5q{cdoi|yzC6@A!z zvau76#YI3VuIz{?H$$5zvAMf9u+&t$KTgBD4io`2z|5QDZf4483T~m~`^jcYcIo0p z#;w8JCjkkd*;%Ei^#bEq0=kwjpIs&87Z+dOpV=!dhd7oDVskVBw{L+YXs6oAOsN`d z_kak`f_{mwow*TZ@O!jUkQ-mWhjVx>$*8X{ZDC9%CC9$;CQH)t7(X;OTDO#3YJ(l7 z73Ax;m5pp=_uHFkv^aM>0k>BXyO8mwh_}yTyW-FX=C6dh=`W6mOhj>#647QmD*U!IjITSgb|RB}Oh5fw>#SN#DwJ$VMD|0zMzrbSJa-W~y#gphBTw9lLb4sAyfEd0OZ1PF61a_inmHVc@9< z0(^Q^dX|3YLkW(On-$Zw) zSxkxHEK&m45X;hvnUi>(Wo1^h)O$((_^yk5_eFOwK1w~{4Yv~xUUWWR7gMe*xCxx&1p?w6u0_ZC z`R=tWEwx9{7j1KOClTQ-k=VNB+4Hr^GL7QVdCO_k-SySU+wAL8w^=q?9wpx}d~lq% zIOq#cZntw7pV#i|_aTYLt0jYg4`&g8fH;e6QSr{-t6>=#Nh~MZvgt-e#ZiQt+*`wB zUg{#Qh`@=uN)$3hhFq!MDpx39q88X=IoTP1(xa`|Wq`gUBrPlYv)Ta~7C?(VcuRwS z5MTB}(`j9d^BPB;mn_xH3T3auHjrbmASirqGjs63Om|=ru&$P zAHCSSa@jg{CCO#-TxwlhlgpLcWD1>N+$*UxszrMpLy$K3RnL;tmQEjVw1WM!miNO>HVMURWfod?E? zs!WaMI#X-HNIb3=bs28bB!F31w$|H%ePJ}M_h))Ady%F2t6CM)<|}9(tDMg5ezKN0 zE~1;oN&ggdIdOHML}j@$nLJY_m*1n~n@lc0M8|6~UJfgIuUs3-?!EM2hup064f!&y zNC$0fD3l9>=r*h3Gpe!+^$NN$2LdGJ>l-MU1eHX zGPNR(()BYmE^);uj8hN2mmY}C&&f{6%ge2zmGdDI7^?_S>yLL+vRq|-0QpA8LE{$FSruPU_yz2qMrxz1K!DAA(Y&}-SPo$3T@2y-vOEb+A)e9tr9MMcLRX*8N!wEa&6&9uTzBk~)-UI8im zq!4*wkv-c9Y(IbXi5)jHMyX`7j+K0r)hl_cyvup3WZoneCG|;F^3|}&ioR_3jWpXc z;Lag+T~ss$m(Qk>@`pGaQAfVMTbN4Sg$Bz#R6QL5Q95y83`9D0^+-s?x{RQjtGGL2F}6Q)X%iASO@Uwvd}g zpK#HjZ<**s!=2Y{SX#;a6P~_)6R68-wxvPfPj~|Mwm={t4ywc7PMr(P%+vlxkLfVJ zO-DwaR!gg{^K)q%ql{c_9=Y-1(_Ff)n?FU{deH1yS-y-$(&^!@n@LNLBsrgL%d^C% zRe961pC3`HK~9UimEN?vPbFt^H3bONC}rjSePtT8>n1~u+k2jEzwW`U5S?i7mIhh= zp?6(PI_L(Hax@yR8_dhA4Ru)^mvS*YG4Bu%5!E`0h|i6Ty!{ou6?I*eE&d%2UoS7_ z3L&{k-=65S$-}y}4c$|Gl{i`&g>P%#s`d5zmiDuHvG4_)NriyD0)p>>IIyPMJy9c_ z#G}!rwA@n;jMi*ad3kv(IW3Ldp%0CxXJ%4+#(A&B6?`;oKn>Gb$7Zu9$%|TI%2$Tx z)2Ctc?xpI!Bnj~_sY;bPmH7vBf_Rv4@vu{LVL73~sU@O5m7H_ri2zj{Cq!>HO8%`4 zwMxajsB4S=I@+YuTX@vQlkW8$4@1`!AJgEi3@3`PqwB*sz69-5<5MRd^QMQ7?^gO$ zXLlMccgW{DotRrhT>=4n-#VcpB>=YdGAW7TU{coKp=C6ow#H#^IQ|h14Jirl<5DRd zH|uWG#UwWApKJDJPq^@0HE7C|ZrT)A4r+G5SU^CnPGR0Y18KlxqXoX5Sx)H+e=#Y9D;@q9$y z8gqkI7LJ* z98?`jWe)T4na+ksOm|R>h(?42ARr>Fl;W~9WYghZ9V>gUuIQ~4QCpyrvpHgg& z2LUBP00JV3YI>N-*iDD(gqGGEu3uWix8s$(IdLzOjoEdX(W*(^_Y7$fWWiUl%=Xuj zXYQYigT8x7cO&b?Lh?|gS^#gKGfb`K!f{ZlML@o-BLD%hj+ZEWPfa|m&SLTtovQi< zi6TRGW@7Exdu+}jfx5En%sYY;0gWb!c0e3S!m$iIJc|GXM8fqQv-$9%-qn0##X2rx z4chnrxz2vQh<)F!VJ5g=l`5Tt<8AtA$1K6~f(!^k00JTicE=?ev-wEJ@N(*nHT~6& zTd3i<>{%RBE{$1C{QVL;d^^M9Fz_f40hKC?gkuiPl6W0k=L%E@B1skkB9e-*CmSE_ zJ?eEmOM9eq72n!shYsvX#06oKv-8-f9T(Z<)NB`|h9s;epd{h&_FBjk%JEpwVKqIs z3;+RfqiTJ5)8WaTD|x?5AMKFLEG<*~H`8<2=$#kYtqiO;Gi4*#xk{bJyfmNDEVeh; z$W~Z(FIYv0FJTCX`0B*29L?HutV~uu+aFocySiNw2^jc7N;Vs}>ms|Cg=dw3)xZbG z@6sZ`59mW3?+Ka@BraM45D?K;jeQv#y7kDIh_afG9`{pOw?y2&WE~dzTyhqhu=^53 z#9>4&jEMJvdU>6uPXoS5!hu;;7Gm>MZx9eSuKBO_pHTmolDoNWMeo)ch4gy0AR;cj zIr(P+y}WkN0>Du;9ru_EQ$|t}CIA7EaOGK=^_lyQH_9z<8>A6%M6wHUS8Tvk8kR{xQG(`&_*M)xvs#YF`m$ ztHi9_&B|qy_grD8Xw^E50|@ZehQ@0jZL9nS8y24EfEW>lhZ2B*cwoi9z30DiK|acN zAM;guNu~H1x%q6y-`CilYj{bvsm}P=hQ{kMZK1q`wttyHp96#%+XNDUj0r$MjA?k# zF@3N~w*5xtq-x&sw`vEdy`@lezWvo#$8NLlPTbLLg(ZbN3!X=f<_-;rucj`lX*9NR zMezI}18zkC0^(NHX^&Y`PA&gG`i^dC+hTBSs9jf6*!%mhv$P!AZ5O660p8M3 ztM8I%Y@%(CXHXZ_zlA;E3*VFU92ri|n?=8~rju;^GLG|f> zU1i4|y%oG60V)0JL_>3px~G0-O64pX3_l~K>?5zzB_LG+NzhJ}E_6xcocg!|cHheU zxQ4ItxvDoK$&ocb0O=|UVntQu~_QC&z`lzrSrDxp%qA0CY zRYVlIbOk`+_1eS$SIa<*{3A^_hDt zPyeMU5oOQ%tjOj0G?;ON`lbFPyPWI_7SjYPVyq{L5Kbfj0dXQ+ca55h$*Gjwc9+s~ z{#VUg(X6(=+MD;qi@}_|pT%a=7pnffBeA{VRjN$tjyg|Mrhcc7|NcmOiEI%APvijv z%n^Wqn4^Ph77-vpDZBetR=iTC9az;{5%id^N+qz6v`~7RNSIg#c3v2kXXoeFc}HP(B|nb%?lC(0VffF zfH(;(qAg6||NcBt>&U(AI6sA~bvcc^R%LHRDVpJ>a>X}!lc*K)_3=Awl1ThP?tik_rdbY43lV{U2nj$yL|7@Fl%;Wr$Eu!7&FSZ( zkTvsD%IXBFWR**)rtnuOWHeyUln!XX+@vp6@ulp)6elFS)xZa@WwKoQ z%_1R4qu={%?W61<00JOj2LT9(9qI5JlQm;Ye9|S-E1Oy@g z0TBp1k_7<}00F5JfPhH729bXd009sXhyVmcAn-^Q1V8`;q)q?=BJ~!3mK51f)&?0wVPqME*el1VBI_ z0uT^^z#~}@009t?IspiX)N2s=2LTWO0f7iWKm-DhWI+G~KtSpQARtn&LF69DM7j00JNY0)h~LfCz#ei3(33K|59WOdtm!AbkQ*5$V?dd5|BF?0XxIq8}K){U& zKtSBMa#1u8009tiCIJYDGwC935C8!XaAN`x5I3$|6b%GG00f*#00QDnx`-PDKmY{X zm;eOCjVl*L0|5{K0cR3`fH;#b;syZ_00B2900D91%0!3m zz>Nt&K-{=;Q8W+$0T6H|0SJgQ=^}0r009tiV*(HmH?CY14Fo^{1e{3#0^&@%h#Lez z00i8a00hL1D;Grr0T2KIXA*#bIFl~o1_2NN0XHT90deEXMbSV21VF%<1Rx;Jq>H#g z00cn5jR`0T6Iw0uT^4u3Qui1V8`;oJjxz;!L`T z+j;^C+NstDLI@B50l^4BMFc~Sgh2oVKtS3AARyANKja<+KmY^;BLD#r3_TJC0T2KI zX%m2eNW1=!dk_Et5D<(21Vk|ONEie_00g8>00JWI`a|wP00cllFai(|!O$aN5C8!X zkTwAbh_veuxd#Cd00F@WKtKdTkAy)01VBLA1Rx;Nu0P}+1V8`;1S0?e5ez*N1_2NN z0cjI}fJnRkkb4jS0T2+300cxZ^hg*4KmY`!O#lKS?fOIRK>!3mKrjLj5W&zRVGsZT z5Rf(j2#B=n54i^c5C8$e2tYstLyv?(00cll+5{jV(yl+`9t1!D1Oy`h0TB#65(WVf z00C(efPhH5{*ZeR00D^-nB6gcvqZCAINMN>!aQ&W0w4eaAYdT@2#AHC;0**o00cmw zFaZciVIDXG0T2KI5U`K{1jIs6@CE`P00JOTm;eN%Fb|x800@8p2v|q}0%9R3cmn|t z009svOaKB>m 1.5.0~rc1 > 1.5.0~git20150128.112847.17e840a > 1.5.0~dev~git20150128.112847.17e840a + fi + + debSource="$(awk -F ': ' '$1 == "Source" { print $2; exit }' hack/make/.build-deb/control)" + debMaintainer="$(awk -F ': ' '$1 == "Maintainer" { print $2; exit }' hack/make/.build-deb/control)" + debDate="$(date --rfc-2822)" + + # if go-md2man is available, pre-generate the man pages + make manpages + + builderDir="contrib/builder/deb/${PACKAGE_ARCH}" + pkgs=( $(find "${builderDir}/"*/ -type d) ) + if [ ! -z "$DOCKER_BUILD_PKGS" ]; then + pkgs=() + for p in $DOCKER_BUILD_PKGS; do + pkgs+=( "$builderDir/$p" ) + done + fi + for dir in "${pkgs[@]}"; do + [ -d "$dir" ] || { echo >&2 "skipping nonexistent $dir"; continue; } + version="$(basename "$dir")" + suite="${version##*-}" + + image="dockercore/builder-deb:$version" + if ! docker inspect "$image" &> /dev/null; then + ( + # Add the APT_MIRROR args only if the consuming Dockerfile uses it + # Otherwise this will cause the build to fail + if [ "$(grep 'ARG APT_MIRROR=' $dir/Dockerfile)" ] && [ "$BUILD_APT_MIRROR" ]; then + DOCKER_BUILD_ARGS="$DOCKER_BUILD_ARGS $BUILD_APT_MIRROR" + fi + set -x && docker build ${DOCKER_BUILD_ARGS} -t "$image" "$dir" + ) + fi + + mkdir -p "$DEST/$version" + cat > "$DEST/$version/Dockerfile.build" <<-EOF + FROM $image + WORKDIR /usr/src/docker + COPY . /usr/src/docker + ENV DOCKER_GITCOMMIT $GITCOMMIT + RUN mkdir -p /go/src/github.com/docker && mkdir -p /go/src/github.com/opencontainers \ + && ln -snf /usr/src/docker /go/src/github.com/docker/docker + EOF + + cat >> "$DEST/$version/Dockerfile.build" <<-EOF + # Install runc, containerd, proxy and tini + RUN ./hack/dockerfile/install-binaries.sh runc-dynamic containerd-dynamic proxy-dynamic tini + EOF + cat >> "$DEST/$version/Dockerfile.build" <<-EOF + RUN cp -aL hack/make/.build-deb debian + RUN { echo '$debSource (${debVersion}-0~${version}) $suite; urgency=low'; echo; echo ' * Version: $VERSION'; echo; echo " -- $debMaintainer $debDate"; } > debian/changelog && cat >&2 debian/changelog + RUN dpkg-buildpackage -uc -us -I.git + EOF + tempImage="docker-temp/build-deb:$version" + ( set -x && docker build ${DOCKER_BUILD_ARGS} -t "$tempImage" -f "$DEST/$version/Dockerfile.build" . ) + docker run --rm "$tempImage" bash -c 'cd .. && tar -c *_*' | tar -xvC "$DEST/$version" + docker rmi "$tempImage" + done + + bundle .integration-daemon-stop +) 2>&1 | tee -a "$DEST/test.log" diff --git a/vendor/github.com/moby/moby/hack/make/build-integration-test-binary b/vendor/github.com/moby/moby/hack/make/build-integration-test-binary new file mode 100644 index 000000000..a842e8cce --- /dev/null +++ b/vendor/github.com/moby/moby/hack/make/build-integration-test-binary @@ -0,0 +1,13 @@ +#!/usr/bin/env bash +set -e + +rm -rf "$DEST" +DEST="$ABS_DEST/../test-integration-cli" + +source "$SCRIPTDIR/make/.go-autogen" + +if [ -z $DOCKER_INTEGRATION_TESTS_VERIFIED ]; then + source ${MAKEDIR}/.integration-test-helpers + ensure_test_dir integration-cli "$DEST/test.main" + export DOCKER_INTEGRATION_TESTS_VERIFIED=1 +fi diff --git a/vendor/github.com/moby/moby/hack/make/build-rpm b/vendor/github.com/moby/moby/hack/make/build-rpm new file mode 100644 index 000000000..1e89a78d5 --- /dev/null +++ b/vendor/github.com/moby/moby/hack/make/build-rpm @@ -0,0 +1,148 @@ +#!/usr/bin/env bash +set -e + +# subshell so that we can export PATH and TZ without breaking other things +( + export TZ=UTC # make sure our "date" variables are UTC-based + + source "$(dirname "$BASH_SOURCE")/.integration-daemon-start" + source "$(dirname "$BASH_SOURCE")/.detect-daemon-osarch" + + # TODO consider using frozen images for the dockercore/builder-rpm tags + + rpmName=docker-engine + rpmVersion="$VERSION" + rpmRelease=1 + + # rpmRelease versioning is as follows + # Docker 1.7.0: version=1.7.0, release=1 + # Docker 1.7.0-rc1: version=1.7.0, release=0.1.rc1 + # Docker 1.7.0-cs1: version=1.7.0.cs1, release=1 + # Docker 1.7.0-cs1-rc1: version=1.7.0.cs1, release=0.1.rc1 + # Docker 1.7.0-dev nightly: version=1.7.0, release=0.0.YYYYMMDD.HHMMSS.gitHASH + + # if we have a "-rc*" suffix, set appropriate release + if [[ "$rpmVersion" =~ .*-rc[0-9]+$ ]] ; then + rcVersion=${rpmVersion#*-rc} + rpmVersion=${rpmVersion%-rc*} + rpmRelease="0.${rcVersion}.rc${rcVersion}" + fi + + DOCKER_GITCOMMIT=$(git rev-parse --short HEAD) + if [ -n "$(git status --porcelain --untracked-files=no)" ]; then + DOCKER_GITCOMMIT="$DOCKER_GITCOMMIT-unsupported" + fi + + # if we have a "-dev" suffix or have change in Git, let's make this package version more complex so it works better + if [[ "$rpmVersion" == *-dev ]] || [ -n "$(git status --porcelain)" ]; then + gitUnix="$(git log -1 --pretty='%at')" + gitDate="$(date --date "@$gitUnix" +'%Y%m%d.%H%M%S')" + gitCommit="$(git log -1 --pretty='%h')" + gitVersion="${gitDate}.git${gitCommit}" + # gitVersion is now something like '20150128.112847.17e840a' + rpmVersion="${rpmVersion%-dev}" + rpmRelease="0.0.$gitVersion" + fi + + # Replace any other dashes with periods + rpmVersion="${rpmVersion/-/.}" + + rpmPackager="$(awk -F ': ' '$1 == "Packager" { print $2; exit }' hack/make/.build-rpm/${rpmName}.spec)" + rpmDate="$(date +'%a %b %d %Y')" + + # if go-md2man is available, pre-generate the man pages + make manpages + + # Convert the CHANGELOG.md file into RPM changelog format + rm -f contrib/builder/rpm/${PACKAGE_ARCH}/changelog + VERSION_REGEX="^\W\W (.*) \((.*)\)$" + ENTRY_REGEX="^[-+*] (.*)$" + while read -r line || [[ -n "$line" ]]; do + if [ -z "$line" ]; then continue; fi + if [[ "$line" =~ $VERSION_REGEX ]]; then + echo >> contrib/builder/rpm/${PACKAGE_ARCH}/changelog + echo "* `date -d ${BASH_REMATCH[2]} '+%a %b %d %Y'` ${rpmPackager} - ${BASH_REMATCH[1]}" >> contrib/builder/rpm/${PACKAGE_ARCH}/changelog + fi + if [[ "$line" =~ $ENTRY_REGEX ]]; then + echo "- ${BASH_REMATCH[1]//\`}" >> contrib/builder/rpm/${PACKAGE_ARCH}/changelog + fi + done < CHANGELOG.md + + builderDir="contrib/builder/rpm/${PACKAGE_ARCH}" + pkgs=( $(find "${builderDir}/"*/ -type d) ) + if [ ! -z "$DOCKER_BUILD_PKGS" ]; then + pkgs=() + for p in $DOCKER_BUILD_PKGS; do + pkgs+=( "$builderDir/$p" ) + done + fi + for dir in "${pkgs[@]}"; do + [ -d "$dir" ] || { echo >&2 "skipping nonexistent $dir"; continue; } + version="$(basename "$dir")" + suite="${version##*-}" + + image="dockercore/builder-rpm:$version" + if ! docker inspect "$image" &> /dev/null; then + ( set -x && docker build ${DOCKER_BUILD_ARGS} -t "$image" "$dir" ) + fi + + mkdir -p "$DEST/$version" + cat > "$DEST/$version/Dockerfile.build" <<-EOF + FROM $image + COPY . /usr/src/${rpmName} + WORKDIR /usr/src/${rpmName} + RUN mkdir -p /go/src/github.com/docker && mkdir -p /go/src/github.com/opencontainers + EOF + + cat >> "$DEST/$version/Dockerfile.build" <<-EOF + # Install runc, containerd, proxy and tini + RUN TMP_GOPATH="/go" ./hack/dockerfile/install-binaries.sh runc-dynamic containerd-dynamic proxy-dynamic tini + EOF + if [[ "$VERSION" == *-dev ]] || [ -n "$(git status --porcelain)" ]; then + echo 'ENV DOCKER_EXPERIMENTAL 1' >> "$DEST/$version/Dockerfile.build" + fi + cat >> "$DEST/$version/Dockerfile.build" <<-EOF + RUN mkdir -p /root/rpmbuild/SOURCES \ + && echo '%_topdir /root/rpmbuild' > /root/.rpmmacros + WORKDIR /root/rpmbuild + RUN ln -sfv /usr/src/${rpmName}/hack/make/.build-rpm SPECS + WORKDIR /root/rpmbuild/SPECS + RUN tar --exclude .git -r -C /usr/src -f /root/rpmbuild/SOURCES/${rpmName}.tar ${rpmName} + RUN tar --exclude .git -r -C /go/src/github.com/docker -f /root/rpmbuild/SOURCES/${rpmName}.tar containerd + RUN tar --exclude .git -r -C /go/src/github.com/docker/libnetwork/cmd -f /root/rpmbuild/SOURCES/${rpmName}.tar proxy + RUN tar --exclude .git -r -C /go/src/github.com/opencontainers -f /root/rpmbuild/SOURCES/${rpmName}.tar runc + RUN tar --exclude .git -r -C /go/ -f /root/rpmbuild/SOURCES/${rpmName}.tar tini + RUN gzip /root/rpmbuild/SOURCES/${rpmName}.tar + RUN { cat /usr/src/${rpmName}/contrib/builder/rpm/${PACKAGE_ARCH}/changelog; } >> ${rpmName}.spec && tail >&2 ${rpmName}.spec + RUN rpmbuild -ba \ + --define '_gitcommit $DOCKER_GITCOMMIT' \ + --define '_release $rpmRelease' \ + --define '_version $rpmVersion' \ + --define '_origversion $VERSION' \ + --define '_experimental ${DOCKER_EXPERIMENTAL:-0}' \ + ${rpmName}.spec + EOF + # selinux policy referencing systemd things won't work on non-systemd versions + # of centos or rhel, which we don't support anyways + if [ "${suite%.*}" -gt 6 ] && [[ "$version" != opensuse* ]]; then + if [ -d "./contrib/selinux-$version" ]; then + selinuxDir="selinux-${version}" + cat >> "$DEST/$version/Dockerfile.build" <<-EOF + RUN tar -cz -C /usr/src/${rpmName}/contrib/${selinuxDir} -f /root/rpmbuild/SOURCES/${rpmName}-selinux.tar.gz ${rpmName}-selinux + RUN rpmbuild -ba \ + --define '_gitcommit $DOCKER_GITCOMMIT' \ + --define '_release $rpmRelease' \ + --define '_version $rpmVersion' \ + --define '_origversion $VERSION' \ + ${rpmName}-selinux.spec + EOF + fi + fi + tempImage="docker-temp/build-rpm:$version" + ( set -x && docker build ${DOCKER_BUILD_ARGS} -t "$tempImage" -f $DEST/$version/Dockerfile.build . ) + docker run --rm "$tempImage" bash -c 'cd /root/rpmbuild && tar -c *RPMS' | tar -xvC "$DEST/$version" + docker rmi "$tempImage" + done + + source "$(dirname "$BASH_SOURCE")/.integration-daemon-stop" +) 2>&1 | tee -a $DEST/test.log diff --git a/vendor/github.com/moby/moby/hack/make/clean-apt-repo b/vendor/github.com/moby/moby/hack/make/clean-apt-repo new file mode 100755 index 000000000..e823cb537 --- /dev/null +++ b/vendor/github.com/moby/moby/hack/make/clean-apt-repo @@ -0,0 +1,43 @@ +#!/usr/bin/env bash +set -e + +# This script cleans the experimental pool for the apt repo. +# This is useful when there are a lot of old experimental debs and you only want to keep the most recent. +# + +: ${DOCKER_RELEASE_DIR:=$DEST} +APTDIR=$DOCKER_RELEASE_DIR/apt/repo/pool/experimental +: ${DOCKER_ARCHIVE_DIR:=$DEST/archive} +DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" + +latest_versions=$(dpkg-scanpackages "$APTDIR" /dev/null 2>/dev/null | awk -F ': ' '$1 == "Filename" { print $2 }') + +# get the latest version +latest_docker_engine_file=$(echo "$latest_versions" | grep docker-engine) +latest_docker_engine_version=$(basename ${latest_docker_engine_file%~*}) + +echo "latest docker-engine version: $latest_docker_engine_version" + +# remove all the files that are not that version in experimental +pool_dir=$(dirname "$latest_docker_engine_file") +old_pkgs=( $(ls "$pool_dir" | grep -v "^${latest_docker_engine_version}" | grep "${latest_docker_engine_version%%~git*}") ) + +echo "${old_pkgs[@]}" + +mkdir -p "$DOCKER_ARCHIVE_DIR" +for old_pkg in "${old_pkgs[@]}"; do + echo "moving ${pool_dir}/${old_pkg} to $DOCKER_ARCHIVE_DIR" + mv "${pool_dir}/${old_pkg}" "$DOCKER_ARCHIVE_DIR" +done + +echo +echo "$pool_dir now has contents:" +ls "$pool_dir" + +# now regenerate release files for experimental +export COMPONENT=experimental +source "${DIR}/update-apt-repo" + +echo "You will now want to: " +echo " - re-sign the repo with hack/make/sign-repo" +echo " - re-generate index files with hack/make/generate-index-listing" diff --git a/vendor/github.com/moby/moby/hack/make/clean-yum-repo b/vendor/github.com/moby/moby/hack/make/clean-yum-repo new file mode 100755 index 000000000..012689a96 --- /dev/null +++ b/vendor/github.com/moby/moby/hack/make/clean-yum-repo @@ -0,0 +1,20 @@ +#!/usr/bin/env bash +set -e + +# This script cleans the experimental pool for the yum repo. +# This is useful when there are a lot of old experimental rpms and you only want to keep the most recent. +# + +: ${DOCKER_RELEASE_DIR:=$DEST} +YUMDIR=$DOCKER_RELEASE_DIR/yum/repo/experimental + +suites=( $(find "$YUMDIR" -mindepth 1 -maxdepth 1 -type d) ) + +for suite in "${suites[@]}"; do + echo "cleanup in: $suite" + ( set -x; repomanage -k2 --old "$suite" | xargs rm -f ) +done + +echo "You will now want to: " +echo " - re-sign the repo with hack/make/sign-repo" +echo " - re-generate index files with hack/make/generate-index-listing" diff --git a/vendor/github.com/moby/moby/hack/make/cover b/vendor/github.com/moby/moby/hack/make/cover new file mode 100644 index 000000000..4a37995f6 --- /dev/null +++ b/vendor/github.com/moby/moby/hack/make/cover @@ -0,0 +1,15 @@ +#!/usr/bin/env bash +set -e + +bundle_cover() { + coverprofiles=( "$DEST/../"*"/coverprofiles/"* ) + for p in "${coverprofiles[@]}"; do + echo + ( + set -x + go tool cover -func="$p" + ) + done +} + +bundle_cover 2>&1 | tee "$DEST/report.log" diff --git a/vendor/github.com/moby/moby/hack/make/cross b/vendor/github.com/moby/moby/hack/make/cross new file mode 100644 index 000000000..85dd3c637 --- /dev/null +++ b/vendor/github.com/moby/moby/hack/make/cross @@ -0,0 +1,29 @@ +#!/usr/bin/env bash +set -e + +# if we have our linux/amd64 version compiled, let's symlink it in +if [ -x "$DEST/../binary-daemon/dockerd-$VERSION" ]; then + arch=$(go env GOHOSTARCH) + mkdir -p "$DEST/linux/${arch}" + ( + cd "$DEST/linux/${arch}" + ln -sf ../../../binary-daemon/* ./ + ) + echo "Created symlinks:" "$DEST/linux/${arch}/"* +fi + +DOCKER_CROSSPLATFORMS=${DOCKER_CROSSPLATFORMS:-"linux/amd64 windows/amd64"} + +for platform in $DOCKER_CROSSPLATFORMS; do + ( + export KEEPDEST=1 + export DEST="$DEST/$platform" # bundles/VERSION/cross/GOOS/GOARCH/docker-VERSION + export GOOS=${platform%/*} + export GOARCH=${platform##*/} + + echo "Cross building: $DEST" + mkdir -p "$DEST" + ABS_DEST="$(cd "$DEST" && pwd -P)" + source "${MAKEDIR}/binary-daemon" + ) +done diff --git a/vendor/github.com/moby/moby/hack/make/dynbinary b/vendor/github.com/moby/moby/hack/make/dynbinary new file mode 100644 index 000000000..981e505e9 --- /dev/null +++ b/vendor/github.com/moby/moby/hack/make/dynbinary @@ -0,0 +1,10 @@ +#!/usr/bin/env bash +set -e + +# This script exists as backwards compatibility for CI +( + + DEST="${DEST}-daemon" + ABS_DEST="${ABS_DEST}-daemon" + . hack/make/dynbinary-daemon +) diff --git a/vendor/github.com/moby/moby/hack/make/dynbinary-daemon b/vendor/github.com/moby/moby/hack/make/dynbinary-daemon new file mode 100644 index 000000000..d1c0070e6 --- /dev/null +++ b/vendor/github.com/moby/moby/hack/make/dynbinary-daemon @@ -0,0 +1,10 @@ +#!/usr/bin/env bash +set -e + +( + export IAMSTATIC='false' + export LDFLAGS_STATIC_DOCKER='' + export BUILDFLAGS=( "${BUILDFLAGS[@]/netgo /}" ) # disable netgo, since we don't need it for a dynamic binary + export BUILDFLAGS=( "${BUILDFLAGS[@]/static_build /}" ) # we're not building a "static" binary here + source "${MAKEDIR}/.binary" +) diff --git a/vendor/github.com/moby/moby/hack/make/generate-index-listing b/vendor/github.com/moby/moby/hack/make/generate-index-listing new file mode 100755 index 000000000..9f1208403 --- /dev/null +++ b/vendor/github.com/moby/moby/hack/make/generate-index-listing @@ -0,0 +1,74 @@ +#!/usr/bin/env bash +set -e + +# This script generates index files for the directory structure +# of the apt and yum repos + +: ${DOCKER_RELEASE_DIR:=$DEST} +APTDIR=$DOCKER_RELEASE_DIR/apt +YUMDIR=$DOCKER_RELEASE_DIR/yum + +if [ ! -d $APTDIR ] && [ ! -d $YUMDIR ]; then + echo >&2 'release-rpm or release-deb must be run before generate-index-listing' + exit 1 +fi + +create_index() { + local directory=$1 + local original=$2 + local cleaned=${directory#$original} + + # the index file to create + local index_file="${directory}/index" + + # cd into dir & touch the index file + cd $directory + touch $index_file + + # print the html header + cat <<-EOF > "$index_file" + + + Index of ${cleaned}/ + +

    Index of ${cleaned}/


    +
    ../
    +	EOF
    +
    +	# start of content output
    +	(
    +	# change IFS locally within subshell so the for loop saves line correctly to L var
    +	IFS=$'\n';
    +
    +	# pretty sweet, will mimic the normal apache output. skipping "index" and hidden files
    +	for L in $(find -L . -mount -depth -maxdepth 1 -type f ! -name 'index' ! -name '.*' -prune -printf "%f|@_@%Td-%Tb-%TY %Tk:%TM  @%f@\n"|sort|column -t -s '|' | sed 's,\([\ ]\+\)@_@,\1,g');
    +	do
    +		# file
    +		F=$(sed -e 's,^.*@\([^@]\+\)@.*$,\1,g'<<<"$L");
    +
    +		# file with file size
    +		F=$(du -bh $F | cut -f1);
    +
    +		# output with correct format
    +		sed -e 's,\ @.*$, '"$F"',g'<<<"$L";
    +	done;
    +	) >> $index_file;
    +
    +	# now output a list of all directories in this dir (maxdepth 1) other than '.' outputting in a sorted manner exactly like apache
    +	find -L . -mount -depth -maxdepth 1 -type d ! -name '.' -printf "%-43f@_@%Td-%Tb-%TY %Tk:%TM  -\n"|sort -d|sed 's,\([\ ]\+\)@_@,/\1,g' >> $index_file
    +
    +	# print the footer html
    +	echo "

    " >> $index_file + +} + +get_dirs() { + local directory=$1 + + for d in `find ${directory} -type d`; do + create_index $d $directory + done +} + +get_dirs $APTDIR +get_dirs $YUMDIR diff --git a/vendor/github.com/moby/moby/hack/make/install-binary b/vendor/github.com/moby/moby/hack/make/install-binary new file mode 100755 index 000000000..57aa1a28c --- /dev/null +++ b/vendor/github.com/moby/moby/hack/make/install-binary @@ -0,0 +1,8 @@ +#!/usr/bin/env bash + +set -e +rm -rf "$DEST" + +( + source "${MAKEDIR}/install-binary-daemon" +) diff --git a/vendor/github.com/moby/moby/hack/make/install-binary-daemon b/vendor/github.com/moby/moby/hack/make/install-binary-daemon new file mode 100644 index 000000000..12126ffac --- /dev/null +++ b/vendor/github.com/moby/moby/hack/make/install-binary-daemon @@ -0,0 +1,16 @@ +#!/usr/bin/env bash + +set -e +rm -rf "$DEST" + +( + DEST="$(dirname $DEST)/binary-daemon" + source "${MAKEDIR}/.binary-setup" + install_binary "${DEST}/${DOCKER_DAEMON_BINARY_NAME}" + install_binary "${DEST}/${DOCKER_RUNC_BINARY_NAME}" + install_binary "${DEST}/${DOCKER_CONTAINERD_BINARY_NAME}" + install_binary "${DEST}/${DOCKER_CONTAINERD_CTR_BINARY_NAME}" + install_binary "${DEST}/${DOCKER_CONTAINERD_SHIM_BINARY_NAME}" + install_binary "${DEST}/${DOCKER_PROXY_BINARY_NAME}" + install_binary "${DEST}/${DOCKER_INIT_BINARY_NAME}" +) diff --git a/vendor/github.com/moby/moby/hack/make/release-deb b/vendor/github.com/moby/moby/hack/make/release-deb new file mode 100755 index 000000000..acf4901d6 --- /dev/null +++ b/vendor/github.com/moby/moby/hack/make/release-deb @@ -0,0 +1,163 @@ +#!/usr/bin/env bash +set -e + +# This script creates the apt repos for the .deb files generated by hack/make/build-deb +# +# The following can then be used as apt sources: +# deb http://apt.dockerproject.org/repo $distro-$release $version +# +# For example: +# deb http://apt.dockerproject.org/repo ubuntu-trusty main +# deb http://apt.dockerproject.org/repo ubuntu-trusty testing +# deb http://apt.dockerproject.org/repo debian-wheezy experimental +# deb http://apt.dockerproject.org/repo debian-jessie main +# +# ... and so on and so forth for the builds created by hack/make/build-deb + +: ${DOCKER_RELEASE_DIR:=$DEST} +: ${GPG_KEYID:=releasedocker} +APTDIR=$DOCKER_RELEASE_DIR/apt/repo + +# setup the apt repo (if it does not exist) +mkdir -p "$APTDIR/conf" "$APTDIR/db" "$APTDIR/dists" + +# supported arches/sections +arches=( amd64 i386 armhf ppc64le s390x ) + +# Preserve existing components but don't add any non-existing ones +for component in main testing experimental ; do + exists=$(find "$APTDIR/dists" -mindepth 2 -maxdepth 2 -type d -name "$component" -print -quit) + if [ -n "$exists" ] ; then + components+=( $component ) + fi +done + +# set the component for the version being released +component="main" + +if [[ "$VERSION" == *-rc* ]]; then + component="testing" +fi + +if [[ "$VERSION" == *-dev ]] || [ -n "$(git status --porcelain)" ]; then + component="experimental" +fi + +# Make sure our component is in the list of components +if [[ ! "${components[*]}" =~ $component ]] ; then + components+=( $component ) +fi + +# create apt-ftparchive file on every run. This is essential to avoid +# using stale versions of the config file that could cause unnecessary +# refreshing of bits for EOL-ed releases. +cat <<-EOF > "$APTDIR/conf/apt-ftparchive.conf" +Dir { + ArchiveDir "${APTDIR}"; + CacheDir "${APTDIR}/db"; +}; + +Default { + Packages::Compress ". gzip bzip2"; + Sources::Compress ". gzip bzip2"; + Contents::Compress ". gzip bzip2"; +}; + +TreeDefault { + BinCacheDB "packages-\$(SECTION)-\$(ARCH).db"; + Directory "pool/\$(SECTION)"; + Packages "\$(DIST)/\$(SECTION)/binary-\$(ARCH)/Packages"; + SrcDirectory "pool/\$(SECTION)"; + Sources "\$(DIST)/\$(SECTION)/source/Sources"; + Contents "\$(DIST)/\$(SECTION)/Contents-\$(ARCH)"; + FileList "$APTDIR/\$(DIST)/\$(SECTION)/filelist"; +}; +EOF + +for dir in bundles/$VERSION/build-deb/*/; do + version="$(basename "$dir")" + suite="${version//debootstrap-}" + + cat <<-EOF + Tree "dists/${suite}" { + Sections "${components[*]}"; + Architectures "${arches[*]}"; + } + + EOF +done >> "$APTDIR/conf/apt-ftparchive.conf" + +cat <<-EOF > "$APTDIR/conf/docker-engine-release.conf" +APT::FTPArchive::Release::Origin "Docker"; +APT::FTPArchive::Release::Components "${components[*]}"; +APT::FTPArchive::Release::Label "Docker APT Repository"; +APT::FTPArchive::Release::Architectures "${arches[*]}"; +EOF + +# release the debs +for dir in bundles/$VERSION/build-deb/*/; do + version="$(basename "$dir")" + codename="${version//debootstrap-}" + + tempdir="$(mktemp -d /tmp/tmp-docker-release-deb.XXXXXXXX)" + DEBFILE=( "$dir/docker-engine"*.deb ) + + # add the deb for each component for the distro version into the + # pool (if it is not there already) + mkdir -p "$APTDIR/pool/$component/d/docker-engine/" + for deb in ${DEBFILE[@]}; do + d=$(basename "$deb") + # We do not want to generate a new deb if it has already been + # copied into the APTDIR + if [ ! -f "$APTDIR/pool/$component/d/docker-engine/$d" ]; then + cp "$deb" "$tempdir/" + # if we have a $GPG_PASSPHRASE we may as well + # dpkg-sign before copying the deb into the pool + if [ ! -z "$GPG_PASSPHRASE" ]; then + dpkg-sig -g "--no-tty --digest-algo 'sha512' --passphrase '$GPG_PASSPHRASE'" \ + -k "$GPG_KEYID" --sign builder "$tempdir/$d" + fi + mv "$tempdir/$d" "$APTDIR/pool/$component/d/docker-engine/" + fi + done + + rm -rf "$tempdir" + + # build the right directory structure, needed for apt-ftparchive + for arch in "${arches[@]}"; do + for c in "${components[@]}"; do + mkdir -p "$APTDIR/dists/$codename/$c/binary-$arch" + done + done + + # update the filelist for this codename/component + find "$APTDIR/pool/$component" \ + -name *~${codename}*.deb -o \ + -name *~${codename#*-}*.deb > "$APTDIR/dists/$codename/$component/filelist" +done + +# run the apt-ftparchive commands so we can have pinning +apt-ftparchive generate "$APTDIR/conf/apt-ftparchive.conf" + +for dir in bundles/$VERSION/build-deb/*/; do + version="$(basename "$dir")" + codename="${version//debootstrap-}" + + apt-ftparchive \ + -c "$APTDIR/conf/docker-engine-release.conf" \ + -o "APT::FTPArchive::Release::Codename=$codename" \ + -o "APT::FTPArchive::Release::Suite=$codename" \ + release \ + "$APTDIR/dists/$codename" > "$APTDIR/dists/$codename/Release" + + for arch in "${arches[@]}"; do + apt-ftparchive \ + -c "$APTDIR/conf/docker-engine-release.conf" \ + -o "APT::FTPArchive::Release::Codename=$codename" \ + -o "APT::FTPArchive::Release::Suite=$codename" \ + -o "APT::FTPArchive::Release::Components=$component" \ + -o "APT::FTPArchive::Release::Architecture=$arch" \ + release \ + "$APTDIR/dists/$codename/$component/binary-$arch" > "$APTDIR/dists/$codename/$component/binary-$arch/Release" + done +done diff --git a/vendor/github.com/moby/moby/hack/make/release-rpm b/vendor/github.com/moby/moby/hack/make/release-rpm new file mode 100755 index 000000000..477d15bee --- /dev/null +++ b/vendor/github.com/moby/moby/hack/make/release-rpm @@ -0,0 +1,71 @@ +#!/usr/bin/env bash +set -e + +# This script creates the yum repos for the .rpm files generated by hack/make/build-rpm +# +# The following can then be used as a yum repo: +# http://yum.dockerproject.org/repo/$release/$distro/$distro-version +# +# For example: +# http://yum.dockerproject.org/repo/main/fedora/23 +# http://yum.dockerproject.org/repo/testing/centos/7 +# http://yum.dockerproject.org/repo/experimental/fedora/23 +# http://yum.dockerproject.org/repo/main/centos/7 +# +# ... and so on and so forth for the builds created by hack/make/build-rpm + +: ${DOCKER_RELEASE_DIR:=$DEST} +YUMDIR=$DOCKER_RELEASE_DIR/yum/repo +: ${GPG_KEYID:=releasedocker} + +# get the release +release="main" + +if [[ "$VERSION" == *-rc* ]]; then + release="testing" +fi + +if [[ "$VERSION" == *-dev ]] || [ -n "$(git status --porcelain)" ]; then + release="experimental" +fi + +# Setup the yum repo +for dir in bundles/$VERSION/build-rpm/*/; do + version="$(basename "$dir")" + suite="${version##*-}" + distro="${version%-*}" + + REPO=$YUMDIR/$release/$distro + + # if the directory does not exist, initialize the yum repo + if [[ ! -d $REPO/$suite/Packages ]]; then + mkdir -p "$REPO/$suite/Packages" + + createrepo --pretty "$REPO/$suite" + fi + + # path to rpms + RPMFILE=( "bundles/$VERSION/build-rpm/$version/RPMS/"*"/docker-engine"*.rpm "bundles/$VERSION/build-rpm/$version/SRPMS/docker-engine"*.rpm ) + + # if we have a $GPG_PASSPHRASE we may as well + # sign the rpms before adding to repo + if [ ! -z $GPG_PASSPHRASE ]; then + # export our key to rpm import + gpg --armor --export "$GPG_KEYID" > /tmp/gpg + rpm --import /tmp/gpg + + # sign the rpms + echo "yes" | setsid rpm \ + --define "_gpg_name $GPG_KEYID" \ + --define "_signature gpg" \ + --define "__gpg_check_password_cmd /bin/true" \ + --define "__gpg_sign_cmd %{__gpg} gpg --batch --no-armor --digest-algo 'sha512' --passphrase '$GPG_PASSPHRASE' --no-secmem-warning -u '%{_gpg_name}' --sign --detach-sign --output %{__signature_filename} %{__plaintext_filename}" \ + --resign "${RPMFILE[@]}" + fi + + # copy the rpms to the packages folder + cp "${RPMFILE[@]}" "$REPO/$suite/Packages" + + # update the repo + createrepo --pretty --update "$REPO/$suite" +done diff --git a/vendor/github.com/moby/moby/hack/make/run b/vendor/github.com/moby/moby/hack/make/run new file mode 100644 index 000000000..366fea6f4 --- /dev/null +++ b/vendor/github.com/moby/moby/hack/make/run @@ -0,0 +1,44 @@ +#!/usr/bin/env bash + +set -e +rm -rf "$DEST" + +if ! command -v dockerd &> /dev/null; then + echo >&2 'error: binary-daemon or dynbinary-daemon must be run before run' + false +fi + +DOCKER_GRAPHDRIVER=${DOCKER_GRAPHDRIVER:-vfs} +DOCKER_USERLANDPROXY=${DOCKER_USERLANDPROXY:-true} + +# example usage: DOCKER_STORAGE_OPTS="dm.basesize=20G,dm.loopdatasize=200G" +storage_params="" +if [ -n "$DOCKER_STORAGE_OPTS" ]; then + IFS=',' + for i in ${DOCKER_STORAGE_OPTS}; do + storage_params="--storage-opt $i $storage_params" + done + unset IFS +fi + + +listen_port=2375 +if [ -n "$DOCKER_PORT" ]; then + IFS=':' read -r -a ports <<< "$DOCKER_PORT" + listen_port="${ports[-1]}" +fi + +extra_params="" +if [ "$DOCKER_REMAP_ROOT" ]; then + extra_params="--userns-remap $DOCKER_REMAP_ROOT" +fi + +args="--debug \ + --host tcp://0.0.0.0:${listen_port} --host unix:///var/run/docker.sock \ + --storage-driver "$DOCKER_GRAPHDRIVER" \ + --userland-proxy="$DOCKER_USERLANDPROXY" \ + $storage_params \ + $extra_params" + +echo dockerd $args +exec dockerd $args diff --git a/vendor/github.com/moby/moby/hack/make/sign-repos b/vendor/github.com/moby/moby/hack/make/sign-repos new file mode 100755 index 000000000..61dbd7acc --- /dev/null +++ b/vendor/github.com/moby/moby/hack/make/sign-repos @@ -0,0 +1,65 @@ +#!/usr/bin/env bash + +# This script signs the deliverables from release-deb and release-rpm +# with a designated GPG key. + +: ${DOCKER_RELEASE_DIR:=$DEST} +: ${GPG_KEYID:=releasedocker} +APTDIR=$DOCKER_RELEASE_DIR/apt/repo +YUMDIR=$DOCKER_RELEASE_DIR/yum/repo + +if [ -z "$GPG_PASSPHRASE" ]; then + echo >&2 'you need to set GPG_PASSPHRASE in order to sign artifacts' + exit 1 +fi + +if [ ! -d $APTDIR ] && [ ! -d $YUMDIR ]; then + echo >&2 'release-rpm or release-deb must be run before sign-repos' + exit 1 +fi + +sign_packages(){ + # sign apt repo metadata + if [ -d $APTDIR ]; then + # create file with public key + gpg --armor --export "$GPG_KEYID" > "$DOCKER_RELEASE_DIR/apt/gpg" + + # sign the repo metadata + for F in $(find $APTDIR -name Release); do + if test "$F" -nt "$F.gpg" ; then + gpg -u "$GPG_KEYID" --passphrase "$GPG_PASSPHRASE" \ + --digest-algo "sha512" \ + --armor --sign --detach-sign \ + --batch --yes \ + --output "$F.gpg" "$F" + fi + inRelease="$(dirname "$F")/InRelease" + if test "$F" -nt "$inRelease" ; then + gpg -u "$GPG_KEYID" --passphrase "$GPG_PASSPHRASE" \ + --digest-algo "sha512" \ + --clearsign \ + --batch --yes \ + --output "$inRelease" "$F" + fi + done + fi + + # sign yum repo metadata + if [ -d $YUMDIR ]; then + # create file with public key + gpg --armor --export "$GPG_KEYID" > "$DOCKER_RELEASE_DIR/yum/gpg" + + # sign the repo metadata + for F in $(find $YUMDIR -name repomd.xml); do + if test "$F" -nt "$F.asc" ; then + gpg -u "$GPG_KEYID" --passphrase "$GPG_PASSPHRASE" \ + --digest-algo "sha512" \ + --armor --sign --detach-sign \ + --batch --yes \ + --output "$F.asc" "$F" + fi + done + fi +} + +sign_packages diff --git a/vendor/github.com/moby/moby/hack/make/test-docker-py b/vendor/github.com/moby/moby/hack/make/test-docker-py new file mode 100644 index 000000000..b30879e3a --- /dev/null +++ b/vendor/github.com/moby/moby/hack/make/test-docker-py @@ -0,0 +1,20 @@ +#!/usr/bin/env bash +set -e + +source hack/make/.integration-test-helpers + +# subshell so that we can export PATH without breaking other things +( + bundle .integration-daemon-start + + dockerPy='/docker-py' + [ -d "$dockerPy" ] || { + dockerPy="$DEST/docker-py" + git clone https://github.com/docker/docker-py.git "$dockerPy" + } + + # exporting PYTHONPATH to import "docker" from our local docker-py + test_env PYTHONPATH="$dockerPy" py.test --junitxml="$DEST/results.xml" "$dockerPy/tests/integration" + + bundle .integration-daemon-stop +) 2>&1 | tee -a "$DEST/test.log" diff --git a/vendor/github.com/moby/moby/hack/make/test-integration-cli b/vendor/github.com/moby/moby/hack/make/test-integration-cli new file mode 100755 index 000000000..61e2f7a7f --- /dev/null +++ b/vendor/github.com/moby/moby/hack/make/test-integration-cli @@ -0,0 +1,29 @@ +#!/usr/bin/env bash +set -e + +source "${MAKEDIR}/.go-autogen" +source hack/make/.integration-test-helpers + +# subshell so that we can export PATH without breaking other things +( + bundle .integration-daemon-start + + bundle .integration-daemon-setup + + bundle_test_integration_cli + + bundle .integration-daemon-stop + + if [ "$(go env GOOS)" != 'windows' ] + then + leftovers=$(ps -ax -o pid,cmd | awk '$2 == "docker-containerd-shim" && $4 ~ /.*\/bundles\/.*\/test-integration-cli/ { print $1 }') + if [ -n "$leftovers" ] + then + ps aux + kill -9 $leftovers 2> /dev/null + echo "!!!! WARNING you have left over shim(s), Cleanup your test !!!!" + exit 1 + fi + fi + +) 2>&1 | tee -a "$DEST/test.log" diff --git a/vendor/github.com/moby/moby/hack/make/test-integration-shell b/vendor/github.com/moby/moby/hack/make/test-integration-shell new file mode 100644 index 000000000..2201f5eb3 --- /dev/null +++ b/vendor/github.com/moby/moby/hack/make/test-integration-shell @@ -0,0 +1,7 @@ +#!/usr/bin/env bash + +bundle .integration-daemon-start +bundle .integration-daemon-setup + +export ABS_DEST +bash +e diff --git a/vendor/github.com/moby/moby/hack/make/test-old-apt-repo b/vendor/github.com/moby/moby/hack/make/test-old-apt-repo new file mode 100755 index 000000000..e92b20ef0 --- /dev/null +++ b/vendor/github.com/moby/moby/hack/make/test-old-apt-repo @@ -0,0 +1,29 @@ +#!/usr/bin/env bash +set -e + +versions=( 1.3.3 1.4.1 1.5.0 1.6.2 ) + +install() { + local version=$1 + local tmpdir=$(mktemp -d /tmp/XXXXXXXXXX) + local dockerfile="${tmpdir}/Dockerfile" + cat <<-EOF > "$dockerfile" + FROM debian:jessie + ENV VERSION ${version} + RUN apt-get update && apt-get install -y \ + apt-transport-https \ + ca-certificates \ + --no-install-recommends + RUN echo "deb https://get.docker.com/ubuntu docker main" > /etc/apt/sources.list.d/docker.list + RUN apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 \ + --recv-keys 36A1D7869245C8950F966E92D8576A8BA88D21E9 + RUN apt-get update && apt-get install -y \ + lxc-docker-\${VERSION} + EOF + + docker build --rm --force-rm --no-cache -t docker-old-repo:${version} -f $dockerfile $tmpdir +} + +for v in "${versions[@]}"; do + install "$v" +done diff --git a/vendor/github.com/moby/moby/hack/make/test-unit b/vendor/github.com/moby/moby/hack/make/test-unit new file mode 100644 index 000000000..85eef5b5b --- /dev/null +++ b/vendor/github.com/moby/moby/hack/make/test-unit @@ -0,0 +1,58 @@ +#!/usr/bin/env bash +set -e + +# Run Docker's test suite, including sub-packages, and store their output as a bundle +# If $TESTFLAGS is set in the environment, it is passed as extra arguments to 'go test'. +# You can use this to select certain tests to run, e.g. +# +# TESTFLAGS='-test.run ^TestBuild$' ./hack/make.sh test-unit +# +bundle_test_unit() { + TESTFLAGS+=" -test.timeout=${TIMEOUT}" + INCBUILD="-i" + count=0 + for flag in "${BUILDFLAGS[@]}"; do + if [ "${flag}" == ${INCBUILD} ]; then + unset BUILDFLAGS[${count}] + break + fi + count=$[ ${count} + 1 ] + done + + date + if [ -z "$TESTDIRS" ]; then + TEST_PATH=./... + else + TEST_PATH=./${TESTDIRS} + fi + + source "${MAKEDIR}/.go-autogen" + + if [ "$(go env GOHOSTOS)" = 'solaris' ]; then + pkg_list=$(go list -e \ + -f '{{if ne .Name "github.com/docker/docker"}} + {{.ImportPath}} + {{end}}' \ + "${BUILDFLAGS[@]}" $TEST_PATH \ + | grep github.com/docker/docker \ + | grep -v github.com/docker/docker/vendor \ + | grep -v github.com/docker/docker/daemon/graphdriver \ + | grep -v github.com/docker/docker/man \ + | grep -v github.com/docker/docker/integration-cli) + else + pkg_list=$(go list -e \ + -f '{{if ne .Name "github.com/docker/docker"}} + {{.ImportPath}} + {{end}}' \ + "${BUILDFLAGS[@]}" $TEST_PATH \ + | grep github.com/docker/docker \ + | grep -v github.com/docker/docker/vendor \ + | grep -v github.com/docker/docker/man \ + | grep -v github.com/docker/docker/integration-cli) + fi + + go test -cover -ldflags "$LDFLAGS" "${BUILDFLAGS[@]}" $TESTFLAGS $pkg_list + go test -cover -ldflags "$LDFLAGS" "${BUILDFLAGS[@]}" $TESTFLAGS github.com/docker/docker/pkg/term -test.root +} + +bundle_test_unit 2>&1 | tee -a "$DEST/test.log" diff --git a/vendor/github.com/moby/moby/hack/make/tgz b/vendor/github.com/moby/moby/hack/make/tgz new file mode 100644 index 000000000..1fd37b6b5 --- /dev/null +++ b/vendor/github.com/moby/moby/hack/make/tgz @@ -0,0 +1,2 @@ +#!/usr/bin/env bash +echo "tgz is deprecated" diff --git a/vendor/github.com/moby/moby/hack/make/ubuntu b/vendor/github.com/moby/moby/hack/make/ubuntu new file mode 100644 index 000000000..ad3f1d78b --- /dev/null +++ b/vendor/github.com/moby/moby/hack/make/ubuntu @@ -0,0 +1,190 @@ +#!/usr/bin/env bash + +PKGVERSION="${VERSION//-/'~'}" +# if we have a "-dev" suffix or have change in Git, let's make this package version more complex so it works better +if [[ "$VERSION" == *-dev ]] || [ -n "$(git status --porcelain)" ]; then + GIT_UNIX="$(git log -1 --pretty='%at')" + GIT_DATE="$(date --date "@$GIT_UNIX" +'%Y%m%d.%H%M%S')" + GIT_COMMIT="$(git log -1 --pretty='%h')" + GIT_VERSION="git${GIT_DATE}.0.${GIT_COMMIT}" + # GIT_VERSION is now something like 'git20150128.112847.0.17e840a' + PKGVERSION="$PKGVERSION~$GIT_VERSION" +fi + +# $ dpkg --compare-versions 1.5.0 gt 1.5.0~rc1 && echo true || echo false +# true +# $ dpkg --compare-versions 1.5.0~rc1 gt 1.5.0~git20150128.112847.17e840a && echo true || echo false +# true +# $ dpkg --compare-versions 1.5.0~git20150128.112847.17e840a gt 1.5.0~dev~git20150128.112847.17e840a && echo true || echo false +# true + +# ie, 1.5.0 > 1.5.0~rc1 > 1.5.0~git20150128.112847.17e840a > 1.5.0~dev~git20150128.112847.17e840a + +PACKAGE_ARCHITECTURE="$(dpkg-architecture -qDEB_HOST_ARCH)" +PACKAGE_URL="https://www.docker.com/" +PACKAGE_MAINTAINER="support@docker.com" +PACKAGE_DESCRIPTION="Linux container runtime +Docker complements LXC with a high-level API which operates at the process +level. It runs unix processes with strong guarantees of isolation and +repeatability across servers. +Docker is a great building block for automating distributed systems: +large-scale web deployments, database clusters, continuous deployment systems, +private PaaS, service-oriented architectures, etc." +PACKAGE_LICENSE="Apache-2.0" + +# Build docker as an ubuntu package using FPM and REPREPRO (sue me). +# bundle_binary must be called first. +bundle_ubuntu() { + DIR="$ABS_DEST/build" + + # Include our udev rules + mkdir -p "$DIR/etc/udev/rules.d" + cp contrib/udev/80-docker.rules "$DIR/etc/udev/rules.d/" + + # Include our init scripts + mkdir -p "$DIR/etc/init" + cp contrib/init/upstart/docker.conf "$DIR/etc/init/" + mkdir -p "$DIR/etc/init.d" + cp contrib/init/sysvinit-debian/docker "$DIR/etc/init.d/" + mkdir -p "$DIR/etc/default" + cp contrib/init/sysvinit-debian/docker.default "$DIR/etc/default/docker" + mkdir -p "$DIR/lib/systemd/system" + cp contrib/init/systemd/docker.{service,socket} "$DIR/lib/systemd/system/" + + # Include contributed completions + mkdir -p "$DIR/etc/bash_completion.d" + cp contrib/completion/bash/docker "$DIR/etc/bash_completion.d/" + mkdir -p "$DIR/usr/share/zsh/vendor-completions" + cp contrib/completion/zsh/_docker "$DIR/usr/share/zsh/vendor-completions/" + mkdir -p "$DIR/etc/fish/completions" + cp contrib/completion/fish/docker.fish "$DIR/etc/fish/completions/" + + # Include man pages + make manpages + manRoot="$DIR/usr/share/man" + mkdir -p "$manRoot" + for manDir in man/man?; do + manBase="$(basename "$manDir")" # "man1" + for manFile in "$manDir"/*; do + manName="$(basename "$manFile")" # "docker-build.1" + mkdir -p "$manRoot/$manBase" + gzip -c "$manFile" > "$manRoot/$manBase/$manName.gz" + done + done + + # Copy the binary + # This will fail if the binary bundle hasn't been built + mkdir -p "$DIR/usr/bin" + cp "$DEST/../binary/docker-$VERSION" "$DIR/usr/bin/docker" + + # Generate postinst/prerm/postrm scripts + cat > "$DEST/postinst" <<'EOF' +#!/bin/sh +set -e +set -u + +if [ "$1" = 'configure' ] && [ -z "$2" ]; then + if ! getent group docker > /dev/null; then + groupadd --system docker + fi +fi + +if ! { [ -x /sbin/initctl ] && /sbin/initctl version 2>/dev/null | grep -q upstart; }; then + # we only need to do this if upstart isn't in charge + update-rc.d docker defaults > /dev/null || true +fi +if [ -n "$2" ]; then + _dh_action=restart +else + _dh_action=start +fi +service docker $_dh_action 2>/dev/null || true + +#DEBHELPER# +EOF + cat > "$DEST/prerm" <<'EOF' +#!/bin/sh +set -e +set -u + +service docker stop 2>/dev/null || true + +#DEBHELPER# +EOF + cat > "$DEST/postrm" <<'EOF' +#!/bin/sh +set -e +set -u + +if [ "$1" = "purge" ] ; then + update-rc.d docker remove > /dev/null || true +fi + +# In case this system is running systemd, we make systemd reload the unit files +# to pick up changes. +if [ -d /run/systemd/system ] ; then + systemctl --system daemon-reload > /dev/null || true +fi + +#DEBHELPER# +EOF + # TODO swaths of these were borrowed from debhelper's auto-inserted stuff, because we're still using fpm - we need to use debhelper instead, and somehow reconcile Ubuntu that way + chmod +x "$DEST/postinst" "$DEST/prerm" "$DEST/postrm" + + ( + # switch directories so we create *.deb in the right folder + cd "$DEST" + + # create lxc-docker-VERSION package + fpm -s dir -C "$DIR" \ + --name "lxc-docker-$VERSION" --version "$PKGVERSION" \ + --after-install "$ABS_DEST/postinst" \ + --before-remove "$ABS_DEST/prerm" \ + --after-remove "$ABS_DEST/postrm" \ + --architecture "$PACKAGE_ARCHITECTURE" \ + --prefix / \ + --depends iptables \ + --deb-recommends aufs-tools \ + --deb-recommends ca-certificates \ + --deb-recommends git \ + --deb-recommends xz-utils \ + --deb-recommends 'cgroupfs-mount | cgroup-lite' \ + --deb-suggests apparmor \ + --description "$PACKAGE_DESCRIPTION" \ + --maintainer "$PACKAGE_MAINTAINER" \ + --conflicts docker \ + --conflicts docker.io \ + --conflicts lxc-docker-virtual-package \ + --provides lxc-docker \ + --provides lxc-docker-virtual-package \ + --replaces lxc-docker \ + --replaces lxc-docker-virtual-package \ + --url "$PACKAGE_URL" \ + --license "$PACKAGE_LICENSE" \ + --config-files /etc/udev/rules.d/80-docker.rules \ + --config-files /etc/init/docker.conf \ + --config-files /etc/init.d/docker \ + --config-files /etc/default/docker \ + --deb-compression gz \ + -t deb . + # TODO replace "Suggests: cgroup-lite" with "Recommends: cgroupfs-mount | cgroup-lite" once cgroupfs-mount is available + + # create empty lxc-docker wrapper package + fpm -s empty \ + --name lxc-docker --version "$PKGVERSION" \ + --architecture "$PACKAGE_ARCHITECTURE" \ + --depends lxc-docker-$VERSION \ + --description "$PACKAGE_DESCRIPTION" \ + --maintainer "$PACKAGE_MAINTAINER" \ + --url "$PACKAGE_URL" \ + --license "$PACKAGE_LICENSE" \ + --deb-compression gz \ + -t deb + ) + + # clean up after ourselves so we have a clean output directory + rm "$DEST/postinst" "$DEST/prerm" "$DEST/postrm" + rm -r "$DIR" +} + +bundle_ubuntu diff --git a/vendor/github.com/moby/moby/hack/make/update-apt-repo b/vendor/github.com/moby/moby/hack/make/update-apt-repo new file mode 100755 index 000000000..3a80c94ca --- /dev/null +++ b/vendor/github.com/moby/moby/hack/make/update-apt-repo @@ -0,0 +1,70 @@ +#!/usr/bin/env bash +set -e + +# This script updates the apt repo in $DOCKER_RELEASE_DIR/apt/repo. +# This script is a "fix all" for any sort of problems that might have occurred with +# the Release or Package files in the repo. +# It should only be used in the rare case of extreme emergencies to regenerate +# Release and Package files for the apt repo. +# +# NOTE: Always be sure to re-sign the repo with hack/make/sign-repos after running +# this script. + +: ${DOCKER_RELEASE_DIR:=$DEST} +APTDIR=$DOCKER_RELEASE_DIR/apt/repo + +# supported arches/sections +arches=( amd64 i386 ) + +# Preserve existing components but don't add any non-existing ones +for component in main testing experimental ; do + if ls "$APTDIR/dists/*/$component" >/dev/null 2>&1 ; then + components+=( $component ) + fi +done + +dists=( $(find "${APTDIR}/dists" -maxdepth 1 -mindepth 1 -type d) ) + +# override component if it is set +if [ "$COMPONENT" ]; then + components=( $COMPONENT ) +fi + +# release the debs +for version in "${dists[@]}"; do + for component in "${components[@]}"; do + codename="${version//debootstrap-}" + + # update the filelist for this codename/component + find "$APTDIR/pool/$component" \ + -name *~${codename#*-}*.deb > "$APTDIR/dists/$codename/$component/filelist" + done +done + +# run the apt-ftparchive commands so we can have pinning +apt-ftparchive generate "$APTDIR/conf/apt-ftparchive.conf" + +for dist in "${dists[@]}"; do + version=$(basename "$dist") + for component in "${components[@]}"; do + codename="${version//debootstrap-}" + + apt-ftparchive \ + -o "APT::FTPArchive::Release::Codename=$codename" \ + -o "APT::FTPArchive::Release::Suite=$codename" \ + -c "$APTDIR/conf/docker-engine-release.conf" \ + release \ + "$APTDIR/dists/$codename" > "$APTDIR/dists/$codename/Release" + + for arch in "${arches[@]}"; do + apt-ftparchive \ + -o "APT::FTPArchive::Release::Codename=$codename" \ + -o "APT::FTPArchive::Release::Suite=$codename" \ + -o "APT::FTPArchive::Release::Component=$component" \ + -o "APT::FTPArchive::Release::Architecture=$arch" \ + -c "$APTDIR/conf/docker-engine-release.conf" \ + release \ + "$APTDIR/dists/$codename/$component/binary-$arch" > "$APTDIR/dists/$codename/$component/binary-$arch/Release" + done + done +done diff --git a/vendor/github.com/moby/moby/hack/make/win b/vendor/github.com/moby/moby/hack/make/win new file mode 100644 index 000000000..bc6d5108b --- /dev/null +++ b/vendor/github.com/moby/moby/hack/make/win @@ -0,0 +1,20 @@ +#!/usr/bin/env bash +set -e + +# explicit list of os/arch combos that support being a daemon +declare -A daemonSupporting +daemonSupporting=( + [linux/amd64]=1 + [windows/amd64]=1 +) +platform="windows/amd64" +export DEST="$DEST/$platform" # bundles/VERSION/cross/GOOS/GOARCH/docker-VERSION +mkdir -p "$DEST" +ABS_DEST="$(cd "$DEST" && pwd -P)" +export GOOS=${platform%/*} +export GOARCH=${platform##*/} +if [ -z "${daemonSupporting[$platform]}" ]; then + export LDFLAGS_STATIC_DOCKER="" # we just need a simple client for these platforms + export BUILDFLAGS=( "${ORIG_BUILDFLAGS[@]/ daemon/}" ) # remove the "daemon" build tag from platforms that aren't supported +fi +source "${MAKEDIR}/binary" diff --git a/vendor/github.com/moby/moby/hack/release.sh b/vendor/github.com/moby/moby/hack/release.sh new file mode 100755 index 000000000..5d7363044 --- /dev/null +++ b/vendor/github.com/moby/moby/hack/release.sh @@ -0,0 +1,317 @@ +#!/usr/bin/env bash +set -e + +# This script looks for bundles built by make.sh, and releases them on a +# public S3 bucket. +# +# Bundles should be available for the VERSION string passed as argument. +# +# The correct way to call this script is inside a container built by the +# official Dockerfile at the root of the Docker source code. The Dockerfile, +# make.sh and release.sh should all be from the same source code revision. + +set -o pipefail + +# Print a usage message and exit. +usage() { + cat >&2 <<'EOF' +To run, I need: +- to be in a container generated by the Dockerfile at the top of the Docker + repository; +- to be provided with the location of an S3 bucket and path, in + environment variables AWS_S3_BUCKET and AWS_S3_BUCKET_PATH (default: ''); +- to be provided with AWS credentials for this S3 bucket, in environment + variables AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY; +- a generous amount of good will and nice manners. +The canonical way to run me is to run the image produced by the Dockerfile: e.g.:" + +docker run -e AWS_S3_BUCKET=test.docker.com \ + -e AWS_ACCESS_KEY_ID \ + -e AWS_SECRET_ACCESS_KEY \ + -e AWS_DEFAULT_REGION \ + -it --privileged \ + docker ./hack/release.sh +EOF + exit 1 +} + +[ "$AWS_S3_BUCKET" ] || usage +[ "$AWS_ACCESS_KEY_ID" ] || usage +[ "$AWS_SECRET_ACCESS_KEY" ] || usage +[ -d /go/src/github.com/docker/docker ] || usage +cd /go/src/github.com/docker/docker +[ -x hack/make.sh ] || usage + +export AWS_DEFAULT_REGION +: ${AWS_DEFAULT_REGION:=us-west-1} + +AWS_CLI=${AWS_CLI:-'aws'} + +RELEASE_BUNDLES=( + binary + cross + tgz +) + +if [ "$1" != '--release-regardless-of-test-failure' ]; then + RELEASE_BUNDLES=( + test-unit + "${RELEASE_BUNDLES[@]}" + test-integration-cli + ) +fi + +VERSION=$(< VERSION) +BUCKET=$AWS_S3_BUCKET +BUCKET_PATH=$BUCKET +[[ -n "$AWS_S3_BUCKET_PATH" ]] && BUCKET_PATH+=/$AWS_S3_BUCKET_PATH + +if command -v git &> /dev/null && git rev-parse &> /dev/null; then + if [ -n "$(git status --porcelain --untracked-files=no)" ]; then + echo "You cannot run the release script on a repo with uncommitted changes" + usage + fi +fi + +# These are the 2 keys we've used to sign the deb's +# release (get.docker.com) +# GPG_KEY="36A1D7869245C8950F966E92D8576A8BA88D21E9" +# test (test.docker.com) +# GPG_KEY="740B314AE3941731B942C66ADF4FD13717AAD7D6" + +setup_s3() { + echo "Setting up S3" + # Try creating the bucket. Ignore errors (it might already exist). + $AWS_CLI s3 mb "s3://$BUCKET" 2>/dev/null || true + # Check access to the bucket. + $AWS_CLI s3 ls "s3://$BUCKET" >/dev/null + # Make the bucket accessible through website endpoints. + $AWS_CLI s3 website --index-document index --error-document error "s3://$BUCKET" +} + +# write_to_s3 uploads the contents of standard input to the specified S3 url. +write_to_s3() { + DEST=$1 + F=`mktemp` + cat > "$F" + $AWS_CLI s3 cp --acl public-read --content-type 'text/plain' "$F" "$DEST" + rm -f "$F" +} + +s3_url() { + case "$BUCKET" in + get.docker.com|test.docker.com|experimental.docker.com) + echo "https://$BUCKET_PATH" + ;; + *) + BASE_URL="http://${BUCKET}.s3-website-${AWS_DEFAULT_REGION}.amazonaws.com" + if [[ -n "$AWS_S3_BUCKET_PATH" ]] ; then + echo "$BASE_URL/$AWS_S3_BUCKET_PATH" + else + echo "$BASE_URL" + fi + ;; + esac +} + +build_all() { + echo "Building release" + if ! ./hack/make.sh "${RELEASE_BUNDLES[@]}"; then + echo >&2 + echo >&2 'The build or tests appear to have failed.' + echo >&2 + echo >&2 'You, as the release maintainer, now have a couple options:' + echo >&2 '- delay release and fix issues' + echo >&2 '- delay release and fix issues' + echo >&2 '- did we mention how important this is? issues need fixing :)' + echo >&2 + echo >&2 'As a final LAST RESORT, you (because only you, the release maintainer,' + echo >&2 ' really knows all the hairy problems at hand with the current release' + echo >&2 ' issues) may bypass this checking by running this script again with the' + echo >&2 ' single argument of "--release-regardless-of-test-failure", which will skip' + echo >&2 ' running the test suite, and will only build the binaries and packages. Please' + echo >&2 ' avoid using this if at all possible.' + echo >&2 + echo >&2 'Regardless, we cannot stress enough the scarcity with which this bypass' + echo >&2 ' should be used. If there are release issues, we should always err on the' + echo >&2 ' side of caution.' + echo >&2 + exit 1 + fi +} + +upload_release_build() { + src="$1" + dst="$2" + latest="$3" + + echo + echo "Uploading $src" + echo " to $dst" + echo + $AWS_CLI s3 cp --follow-symlinks --acl public-read "$src" "$dst" + if [ "$latest" ]; then + echo + echo "Copying to $latest" + echo + $AWS_CLI s3 cp --acl public-read "$dst" "$latest" + fi + + # get hash files too (see hash_files() in hack/make.sh) + for hashAlgo in md5 sha256; do + if [ -e "$src.$hashAlgo" ]; then + echo + echo "Uploading $src.$hashAlgo" + echo " to $dst.$hashAlgo" + echo + $AWS_CLI s3 cp --follow-symlinks --acl public-read --content-type='text/plain' "$src.$hashAlgo" "$dst.$hashAlgo" + if [ "$latest" ]; then + echo + echo "Copying to $latest.$hashAlgo" + echo + $AWS_CLI s3 cp --acl public-read "$dst.$hashAlgo" "$latest.$hashAlgo" + fi + fi + done +} + +release_build() { + echo "Releasing binaries" + GOOS=$1 + GOARCH=$2 + + binDir=bundles/$VERSION/cross/$GOOS/$GOARCH + tgzDir=bundles/$VERSION/tgz/$GOOS/$GOARCH + binary=docker-$VERSION + zipExt=".tgz" + binaryExt="" + tgz=$binary$zipExt + + latestBase= + if [ -z "$NOLATEST" ]; then + latestBase=docker-latest + fi + + # we need to map our GOOS and GOARCH to uname values + # see https://en.wikipedia.org/wiki/Uname + # ie, GOOS=linux -> "uname -s"=Linux + + s3Os=$GOOS + case "$s3Os" in + darwin) + s3Os=Darwin + ;; + freebsd) + s3Os=FreeBSD + ;; + linux) + s3Os=Linux + ;; + solaris) + echo skipping solaris release + return 0 + ;; + windows) + # this is windows use the .zip and .exe extensions for the files. + s3Os=Windows + zipExt=".zip" + binaryExt=".exe" + tgz=$binary$zipExt + binary+=$binaryExt + ;; + *) + echo >&2 "error: can't convert $s3Os to an appropriate value for 'uname -s'" + exit 1 + ;; + esac + + s3Arch=$GOARCH + case "$s3Arch" in + amd64) + s3Arch=x86_64 + ;; + 386) + s3Arch=i386 + ;; + arm) + s3Arch=armel + # someday, we might potentially support multiple GOARM values, in which case we might get armhf here too + ;; + *) + echo >&2 "error: can't convert $s3Arch to an appropriate value for 'uname -m'" + exit 1 + ;; + esac + + s3Dir="s3://$BUCKET_PATH/builds/$s3Os/$s3Arch" + # latest= + latestTgz= + if [ "$latestBase" ]; then + # commented out since we aren't uploading binaries right now. + # latest="$s3Dir/$latestBase$binaryExt" + # we don't include the $binaryExt because we don't want docker.exe.zip + latestTgz="$s3Dir/$latestBase$zipExt" + fi + + if [ ! -f "$tgzDir/$tgz" ]; then + echo >&2 "error: can't find $tgzDir/$tgz - was it packaged properly?" + exit 1 + fi + # disable binary uploads for now. Only providing tgz downloads + # upload_release_build "$binDir/$binary" "$s3Dir/$binary" "$latest" + upload_release_build "$tgzDir/$tgz" "$s3Dir/$tgz" "$latestTgz" +} + +# Upload binaries and tgz files to S3 +release_binaries() { + [ "$(find bundles/$VERSION -path "bundles/$VERSION/cross/*/*/docker-$VERSION")" != "" ] || { + echo >&2 './hack/make.sh must be run before release_binaries' + exit 1 + } + + for d in bundles/$VERSION/cross/*/*; do + GOARCH="$(basename "$d")" + GOOS="$(basename "$(dirname "$d")")" + release_build "$GOOS" "$GOARCH" + done + + # TODO create redirect from builds/*/i686 to builds/*/i386 + + cat <&2 + exit 1 +fi + +grep -e '^## ' "$changelogFile" | awk '{print$3}' | sort -c -r || exit 2 + +echo "Congratulations! Changelog $changelogFile dates are in descending order." diff --git a/vendor/github.com/moby/moby/hack/validate/changelog-well-formed b/vendor/github.com/moby/moby/hack/validate/changelog-well-formed new file mode 100755 index 000000000..6c7ce1a1c --- /dev/null +++ b/vendor/github.com/moby/moby/hack/validate/changelog-well-formed @@ -0,0 +1,25 @@ +#!/usr/bin/env bash + +changelogFile=${1:-CHANGELOG.md} + +if [ ! -r "$changelogFile" ]; then + echo "Unable to read file $changelogFile" >&2 + exit 1 +fi + +changelogWellFormed=1 + +# e.g. "## 1.12.3 (2016-10-26)" +VER_LINE_REGEX='^## [0-9]+\.[0-9]+\.[0-9]+(-ce)? \([0-9]+-[0-9]+-[0-9]+\)$' +while read -r line; do + if ! [[ "$line" =~ $VER_LINE_REGEX ]]; then + echo "Malformed changelog $changelogFile line \"$line\"" >&2 + changelogWellFormed=0 + fi +done < <(grep '^## ' $changelogFile) + +if [[ "$changelogWellFormed" == "1" ]]; then + echo "Congratulations! Changelog $changelogFile is well-formed." +else + exit 2 +fi diff --git a/vendor/github.com/moby/moby/hack/validate/dco b/vendor/github.com/moby/moby/hack/validate/dco new file mode 100755 index 000000000..f39100160 --- /dev/null +++ b/vendor/github.com/moby/moby/hack/validate/dco @@ -0,0 +1,55 @@ +#!/usr/bin/env bash + +export SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +source "${SCRIPTDIR}/.validate" + +adds=$(validate_diff --numstat | awk '{ s += $1 } END { print s }') +dels=$(validate_diff --numstat | awk '{ s += $2 } END { print s }') +#notDocs="$(validate_diff --numstat | awk '$3 !~ /^docs\// { print $3 }')" + +: ${adds:=0} +: ${dels:=0} + +# "Username may only contain alphanumeric characters or dashes and cannot begin with a dash" +githubUsernameRegex='[a-zA-Z0-9][a-zA-Z0-9-]+' + +# https://github.com/docker/docker/blob/master/CONTRIBUTING.md#sign-your-work +dcoPrefix='Signed-off-by:' +dcoRegex="^(Docker-DCO-1.1-)?$dcoPrefix ([^<]+) <([^<>@]+@[^<>]+)>( \\(github: ($githubUsernameRegex)\\))?$" + +check_dco() { + grep -qE "$dcoRegex" +} + +if [ $adds -eq 0 -a $dels -eq 0 ]; then + echo '0 adds, 0 deletions; nothing to validate! :)' +else + commits=( $(validate_log --format='format:%H%n') ) + badCommits=() + for commit in "${commits[@]}"; do + if [ -z "$(git log -1 --format='format:' --name-status "$commit")" ]; then + # no content (ie, Merge commit, etc) + continue + fi + if ! git log -1 --format='format:%B' "$commit" | check_dco; then + badCommits+=( "$commit" ) + fi + done + if [ ${#badCommits[@]} -eq 0 ]; then + echo "Congratulations! All commits are properly signed with the DCO!" + else + { + echo "These commits do not have a proper '$dcoPrefix' marker:" + for commit in "${badCommits[@]}"; do + echo " - $commit" + done + echo + echo 'Please amend each commit to include a properly formatted DCO marker.' + echo + echo 'Visit the following URL for information about the Docker DCO:' + echo ' https://github.com/docker/docker/blob/master/CONTRIBUTING.md#sign-your-work' + echo + } >&2 + false + fi +fi diff --git a/vendor/github.com/moby/moby/hack/validate/default b/vendor/github.com/moby/moby/hack/validate/default new file mode 100755 index 000000000..e243f4383 --- /dev/null +++ b/vendor/github.com/moby/moby/hack/validate/default @@ -0,0 +1,18 @@ +#!/usr/bin/env bash +# +# Run default validation, exclude vendor because it's slow + +export SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" + +. $SCRIPTDIR/dco +. $SCRIPTDIR/default-seccomp +. $SCRIPTDIR/gofmt +. $SCRIPTDIR/lint +. $SCRIPTDIR/pkg-imports +. $SCRIPTDIR/swagger +. $SCRIPTDIR/swagger-gen +. $SCRIPTDIR/test-imports +. $SCRIPTDIR/toml +. $SCRIPTDIR/vet +. $SCRIPTDIR/changelog-well-formed +. $SCRIPTDIR/changelog-date-descending diff --git a/vendor/github.com/moby/moby/hack/validate/default-seccomp b/vendor/github.com/moby/moby/hack/validate/default-seccomp new file mode 100755 index 000000000..24cbf00d2 --- /dev/null +++ b/vendor/github.com/moby/moby/hack/validate/default-seccomp @@ -0,0 +1,28 @@ +#!/usr/bin/env bash + +export SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +source "${SCRIPTDIR}/.validate" + +IFS=$'\n' +files=( $(validate_diff --diff-filter=ACMR --name-only -- 'profiles/seccomp' || true) ) +unset IFS + +if [ ${#files[@]} -gt 0 ]; then + # We run 'go generate' and see if we have a diff afterwards + go generate ./profiles/seccomp/ >/dev/null + # Let see if the working directory is clean + diffs="$(git status --porcelain -- profiles/seccomp 2>/dev/null)" + if [ "$diffs" ]; then + { + echo 'The result of go generate ./profiles/seccomp/ differs' + echo + echo "$diffs" + echo + echo 'Please re-run go generate ./profiles/seccomp/' + echo + } >&2 + false + else + echo 'Congratulations! Seccomp profile generation is done correctly.' + fi +fi diff --git a/vendor/github.com/moby/moby/hack/validate/gofmt b/vendor/github.com/moby/moby/hack/validate/gofmt new file mode 100755 index 000000000..38027a9f7 --- /dev/null +++ b/vendor/github.com/moby/moby/hack/validate/gofmt @@ -0,0 +1,33 @@ +#!/usr/bin/env bash + +export SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +source "${SCRIPTDIR}/.validate" + +IFS=$'\n' +files=( $(validate_diff --diff-filter=ACMR --name-only -- '*.go' | + grep -v '^vendor/' | + grep -v '\.pb\.go$' || true) ) +unset IFS + +badFiles=() +for f in "${files[@]}"; do + # we use "git show" here to validate that what's committed is formatted + if [ "$(git show "$VALIDATE_HEAD:$f" | gofmt -s -l)" ]; then + badFiles+=( "$f" ) + fi +done + +if [ ${#badFiles[@]} -eq 0 ]; then + echo 'Congratulations! All Go source files are properly formatted.' +else + { + echo "These files are not properly gofmt'd:" + for f in "${badFiles[@]}"; do + echo " - $f" + done + echo + echo 'Please reformat the above files using "gofmt -s -w" and commit the result.' + echo + } >&2 + false +fi diff --git a/vendor/github.com/moby/moby/hack/validate/lint b/vendor/github.com/moby/moby/hack/validate/lint new file mode 100755 index 000000000..341490a04 --- /dev/null +++ b/vendor/github.com/moby/moby/hack/validate/lint @@ -0,0 +1,31 @@ +#!/usr/bin/env bash + +export SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +source "${SCRIPTDIR}/.validate" + +IFS=$'\n' +files=( $(validate_diff --diff-filter=ACMR --name-only -- '*.go' | grep -v '^vendor/' | grep -v '^api/types/container/' | grep -v '\.pb\.go$' || true) ) +unset IFS + +errors=() +for f in "${files[@]}"; do + failedLint=$(golint "$f") + if [ "$failedLint" ]; then + errors+=( "$failedLint" ) + fi +done + +if [ ${#errors[@]} -eq 0 ]; then + echo 'Congratulations! All Go source files have been linted.' +else + { + echo "Errors from golint:" + for err in "${errors[@]}"; do + echo "$err" + done + echo + echo 'Please fix the above errors. You can test via "golint" and commit the result.' + echo + } >&2 + false +fi diff --git a/vendor/github.com/moby/moby/hack/validate/pkg-imports b/vendor/github.com/moby/moby/hack/validate/pkg-imports new file mode 100755 index 000000000..a9aab6456 --- /dev/null +++ b/vendor/github.com/moby/moby/hack/validate/pkg-imports @@ -0,0 +1,33 @@ +#!/usr/bin/env bash +set -e + +export SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +source "${SCRIPTDIR}/.validate" + +IFS=$'\n' +files=( $(validate_diff --diff-filter=ACMR --name-only -- 'pkg/*.go' || true) ) +unset IFS + +badFiles=() +for f in "${files[@]}"; do + IFS=$'\n' + badImports=( $(go list -e -f '{{ join .Deps "\n" }}' "$f" | sort -u | grep -vE '^github.com/docker/docker/pkg/' | grep -vE '^github.com/docker/docker/vendor' | grep -E '^github.com/docker/docker' || true) ) + unset IFS + + for import in "${badImports[@]}"; do + badFiles+=( "$f imports $import" ) + done +done + +if [ ${#badFiles[@]} -eq 0 ]; then + echo 'Congratulations! "./pkg/..." is safely isolated from internal code.' +else + { + echo 'These files import internal code: (either directly or indirectly)' + for f in "${badFiles[@]}"; do + echo " - $f" + done + echo + } >&2 + false +fi diff --git a/vendor/github.com/moby/moby/hack/validate/swagger b/vendor/github.com/moby/moby/hack/validate/swagger new file mode 100755 index 000000000..0b3c2719d --- /dev/null +++ b/vendor/github.com/moby/moby/hack/validate/swagger @@ -0,0 +1,13 @@ +#!/usr/bin/env bash +set -e +export SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +source "${SCRIPTDIR}/.validate" + +IFS=$'\n' +files=( $(validate_diff --diff-filter=ACMR --name-only -- 'api/swagger.yaml' || true) ) +unset IFS + +if [ ${#files[@]} -gt 0 ]; then + yamllint -c ${SCRIPTDIR}/.swagger-yamllint api/swagger.yaml + swagger validate api/swagger.yaml +fi diff --git a/vendor/github.com/moby/moby/hack/validate/swagger-gen b/vendor/github.com/moby/moby/hack/validate/swagger-gen new file mode 100755 index 000000000..451ea3f71 --- /dev/null +++ b/vendor/github.com/moby/moby/hack/validate/swagger-gen @@ -0,0 +1,29 @@ +#!/usr/bin/env bash + +export SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +source "${SCRIPTDIR}/.validate" + +IFS=$'\n' +files=( $(validate_diff --diff-filter=ACMR --name-only -- 'api/types/' 'api/swagger.yaml' || true) ) +unset IFS + +if [ ${#files[@]} -gt 0 ]; then + ${SCRIPTDIR}/../generate-swagger-api.sh 2> /dev/null + # Let see if the working directory is clean + diffs="$(git status --porcelain -- api/types/ 2>/dev/null)" + if [ "$diffs" ]; then + { + echo 'The result of hack/generate-swagger-api.sh differs' + echo + echo "$diffs" + echo + echo 'Please update api/swagger.yaml with any api changes, then ' + echo 'run `hack/generate-swagger-api.sh`.' + } >&2 + false + else + echo 'Congratulations! All api changes are done the right way.' + fi +else + echo 'No api/types/ or api/swagger.yaml changes in diff.' +fi diff --git a/vendor/github.com/moby/moby/hack/validate/test-imports b/vendor/github.com/moby/moby/hack/validate/test-imports new file mode 100755 index 000000000..0e836a31c --- /dev/null +++ b/vendor/github.com/moby/moby/hack/validate/test-imports @@ -0,0 +1,38 @@ +#!/usr/bin/env bash +# Make sure we're not using gos' Testing package any more in integration-cli + +export SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +source "${SCRIPTDIR}/.validate" + +IFS=$'\n' +files=( $(validate_diff --diff-filter=ACMR --name-only -- 'integration-cli/*.go' || true) ) +unset IFS + +badFiles=() +for f in "${files[@]}"; do + # skip check_test.go since it *does* use the testing package + if [ "$f" = "integration-cli/check_test.go" ]; then + continue + fi + + # we use "git show" here to validate that what's committed doesn't contain golang built-in testing + if git show "$VALIDATE_HEAD:$f" | grep -q testing.T; then + if [ "$(echo $f | grep '_test')" ]; then + # allow testing.T for non- _test files + badFiles+=( "$f" ) + fi + fi +done + +if [ ${#badFiles[@]} -eq 0 ]; then + echo 'Congratulations! No testing.T found.' +else + { + echo "These files use the wrong testing infrastructure:" + for f in "${badFiles[@]}"; do + echo " - $f" + done + echo + } >&2 + false +fi diff --git a/vendor/github.com/moby/moby/hack/validate/toml b/vendor/github.com/moby/moby/hack/validate/toml new file mode 100755 index 000000000..d5b2ce1c2 --- /dev/null +++ b/vendor/github.com/moby/moby/hack/validate/toml @@ -0,0 +1,31 @@ +#!/usr/bin/env bash + +export SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +source "${SCRIPTDIR}/.validate" + +IFS=$'\n' +files=( $(validate_diff --diff-filter=ACMR --name-only -- 'MAINTAINERS' || true) ) +unset IFS + +badFiles=() +for f in "${files[@]}"; do + # we use "git show" here to validate that what's committed has valid toml syntax + if ! git show "$VALIDATE_HEAD:$f" | tomlv /proc/self/fd/0 ; then + badFiles+=( "$f" ) + fi +done + +if [ ${#badFiles[@]} -eq 0 ]; then + echo 'Congratulations! All toml source files changed here have valid syntax.' +else + { + echo "These files are not valid toml:" + for f in "${badFiles[@]}"; do + echo " - $f" + done + echo + echo 'Please reformat the above files as valid toml' + echo + } >&2 + false +fi diff --git a/vendor/github.com/moby/moby/hack/validate/vendor b/vendor/github.com/moby/moby/hack/validate/vendor new file mode 100755 index 000000000..69160a9fa --- /dev/null +++ b/vendor/github.com/moby/moby/hack/validate/vendor @@ -0,0 +1,51 @@ +#!/usr/bin/env bash + +export SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +source "${SCRIPTDIR}/.validate" + +validate_vendor_diff(){ + IFS=$'\n' + files=( $(validate_diff --diff-filter=ACMR --name-only -- 'vendor.conf' 'vendor/' || true) ) + unset IFS + + if [ ${#files[@]} -gt 0 ]; then + # We run vndr to and see if we have a diff afterwards + vndr + # Let see if the working directory is clean + diffs="$(git status --porcelain -- vendor 2>/dev/null)" + if [ "$diffs" ]; then + { + echo 'The result of vndr differs' + echo + echo "$diffs" + echo + echo 'Please vendor your package with github.com/LK4D4/vndr.' + echo + } >&2 + false + else + echo 'Congratulations! All vendoring changes are done the right way.' + fi + else + echo 'No vendor changes in diff.' + fi +} + +# 1. make sure all the vendored packages are used +# 2. make sure all the packages contain license information (just warning, because it can cause false-positive) +validate_vendor_used() { + pkgs=$(mawk '/^[a-zA-Z0-9]/ { print $1 }' < vendor.conf) + for f in $pkgs; do + if ls -d vendor/$f > /dev/null 2>&1; then + found=$(find vendor/$f -iregex '.*LICENSE.*' -or -iregex '.*COPYRIGHT.*' -or -iregex '.*COPYING.*' | wc -l) + if [ $found -eq 0 ]; then + echo "WARNING: could not find copyright information for $f" + fi + else + echo "WARNING: $f is vendored but unused" + fi + done +} + +validate_vendor_diff +validate_vendor_used diff --git a/vendor/github.com/moby/moby/hack/validate/vet b/vendor/github.com/moby/moby/hack/validate/vet new file mode 100755 index 000000000..95dc7a718 --- /dev/null +++ b/vendor/github.com/moby/moby/hack/validate/vet @@ -0,0 +1,32 @@ +#!/usr/bin/env bash + +export SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +source "${SCRIPTDIR}/.validate" + +IFS=$'\n' +files=( $(validate_diff --diff-filter=ACMR --name-only -- '*.go' | grep -v '^vendor/' | grep -v '^api/types/container/' || true) ) +unset IFS + +errors=() +for f in "${files[@]}"; do + failedVet=$(go vet "$f") + if [ "$failedVet" ]; then + errors+=( "$failedVet" ) + fi +done + + +if [ ${#errors[@]} -eq 0 ]; then + echo 'Congratulations! All Go source files have been vetted.' +else + { + echo "Errors from go vet:" + for err in "${errors[@]}"; do + echo " - $err" + done + echo + echo 'Please fix the above errors. You can test via "go vet" and commit the result.' + echo + } >&2 + false +fi diff --git a/vendor/github.com/moby/moby/hack/vendor.sh b/vendor/github.com/moby/moby/hack/vendor.sh new file mode 100755 index 000000000..a7a571e7b --- /dev/null +++ b/vendor/github.com/moby/moby/hack/vendor.sh @@ -0,0 +1,15 @@ +#!/usr/bin/env bash + +# This file is just wrapper around vndr (github.com/LK4D4/vndr) tool. +# For updating dependencies you should change `vendor.conf` file in root of the +# project. Please refer to https://github.com/LK4D4/vndr/blob/master/README.md for +# vndr usage. + +set -e + +if ! hash vndr; then + echo "Please install vndr with \"go get github.com/LK4D4/vndr\" and put it in your \$GOPATH" + exit 1 +fi + +vndr "$@" diff --git a/vendor/github.com/moby/moby/image/cache/cache.go b/vendor/github.com/moby/moby/image/cache/cache.go new file mode 100644 index 000000000..e074bebcc --- /dev/null +++ b/vendor/github.com/moby/moby/image/cache/cache.go @@ -0,0 +1,253 @@ +package cache + +import ( + "encoding/json" + "fmt" + "reflect" + "strings" + + containertypes "github.com/docker/docker/api/types/container" + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/image" + "github.com/docker/docker/layer" + "github.com/pkg/errors" +) + +// NewLocal returns a local image cache, based on parent chain +func NewLocal(store image.Store) *LocalImageCache { + return &LocalImageCache{ + store: store, + } +} + +// LocalImageCache is cache based on parent chain. +type LocalImageCache struct { + store image.Store +} + +// GetCache returns the image id found in the cache +func (lic *LocalImageCache) GetCache(imgID string, config *containertypes.Config) (string, error) { + return getImageIDAndError(getLocalCachedImage(lic.store, image.ID(imgID), config)) +} + +// New returns an image cache, based on history objects +func New(store image.Store) *ImageCache { + return &ImageCache{ + store: store, + localImageCache: NewLocal(store), + } +} + +// ImageCache is cache based on history objects. Requires initial set of images. +type ImageCache struct { + sources []*image.Image + store image.Store + localImageCache *LocalImageCache +} + +// Populate adds an image to the cache (to be queried later) +func (ic *ImageCache) Populate(image *image.Image) { + ic.sources = append(ic.sources, image) +} + +// GetCache returns the image id found in the cache +func (ic *ImageCache) GetCache(parentID string, cfg *containertypes.Config) (string, error) { + imgID, err := ic.localImageCache.GetCache(parentID, cfg) + if err != nil { + return "", err + } + if imgID != "" { + for _, s := range ic.sources { + if ic.isParent(s.ID(), image.ID(imgID)) { + return imgID, nil + } + } + } + + var parent *image.Image + lenHistory := 0 + if parentID != "" { + parent, err = ic.store.Get(image.ID(parentID)) + if err != nil { + return "", errors.Wrapf(err, "unable to find image %v", parentID) + } + lenHistory = len(parent.History) + } + + for _, target := range ic.sources { + if !isValidParent(target, parent) || !isValidConfig(cfg, target.History[lenHistory]) { + continue + } + + if len(target.History)-1 == lenHistory { // last + if parent != nil { + if err := ic.store.SetParent(target.ID(), parent.ID()); err != nil { + return "", errors.Wrapf(err, "failed to set parent for %v to %v", target.ID(), parent.ID()) + } + } + return target.ID().String(), nil + } + + imgID, err := ic.restoreCachedImage(parent, target, cfg) + if err != nil { + return "", errors.Wrapf(err, "failed to restore cached image from %q to %v", parentID, target.ID()) + } + + ic.sources = []*image.Image{target} // avoid jumping to different target, tuned for safety atm + return imgID.String(), nil + } + + return "", nil +} + +func (ic *ImageCache) restoreCachedImage(parent, target *image.Image, cfg *containertypes.Config) (image.ID, error) { + var history []image.History + rootFS := image.NewRootFS() + lenHistory := 0 + if parent != nil { + history = parent.History + rootFS = parent.RootFS + lenHistory = len(parent.History) + } + history = append(history, target.History[lenHistory]) + if layer := getLayerForHistoryIndex(target, lenHistory); layer != "" { + rootFS.Append(layer) + } + + config, err := json.Marshal(&image.Image{ + V1Image: image.V1Image{ + DockerVersion: dockerversion.Version, + Config: cfg, + Architecture: target.Architecture, + OS: target.OS, + Author: target.Author, + Created: history[len(history)-1].Created, + }, + RootFS: rootFS, + History: history, + OSFeatures: target.OSFeatures, + OSVersion: target.OSVersion, + }) + if err != nil { + return "", errors.Wrap(err, "failed to marshal image config") + } + + imgID, err := ic.store.Create(config) + if err != nil { + return "", errors.Wrap(err, "failed to create cache image") + } + + if parent != nil { + if err := ic.store.SetParent(imgID, parent.ID()); err != nil { + return "", errors.Wrapf(err, "failed to set parent for %v to %v", target.ID(), parent.ID()) + } + } + return imgID, nil +} + +func (ic *ImageCache) isParent(imgID, parentID image.ID) bool { + nextParent, err := ic.store.GetParent(imgID) + if err != nil { + return false + } + if nextParent == parentID { + return true + } + return ic.isParent(nextParent, parentID) +} + +func getLayerForHistoryIndex(image *image.Image, index int) layer.DiffID { + layerIndex := 0 + for i, h := range image.History { + if i == index { + if h.EmptyLayer { + return "" + } + break + } + if !h.EmptyLayer { + layerIndex++ + } + } + return image.RootFS.DiffIDs[layerIndex] // validate? +} + +func isValidConfig(cfg *containertypes.Config, h image.History) bool { + // todo: make this format better than join that loses data + return strings.Join(cfg.Cmd, " ") == h.CreatedBy +} + +func isValidParent(img, parent *image.Image) bool { + if len(img.History) == 0 { + return false + } + if parent == nil || len(parent.History) == 0 && len(parent.RootFS.DiffIDs) == 0 { + return true + } + if len(parent.History) >= len(img.History) { + return false + } + if len(parent.RootFS.DiffIDs) > len(img.RootFS.DiffIDs) { + return false + } + + for i, h := range parent.History { + if !reflect.DeepEqual(h, img.History[i]) { + return false + } + } + for i, d := range parent.RootFS.DiffIDs { + if d != img.RootFS.DiffIDs[i] { + return false + } + } + return true +} + +func getImageIDAndError(img *image.Image, err error) (string, error) { + if img == nil || err != nil { + return "", err + } + return img.ID().String(), nil +} + +// getLocalCachedImage returns the most recent created image that is a child +// of the image with imgID, that had the same config when it was +// created. nil is returned if a child cannot be found. An error is +// returned if the parent image cannot be found. +func getLocalCachedImage(imageStore image.Store, imgID image.ID, config *containertypes.Config) (*image.Image, error) { + // Loop on the children of the given image and check the config + getMatch := func(siblings []image.ID) (*image.Image, error) { + var match *image.Image + for _, id := range siblings { + img, err := imageStore.Get(id) + if err != nil { + return nil, fmt.Errorf("unable to find image %q", id) + } + + if compare(&img.ContainerConfig, config) { + // check for the most up to date match + if match == nil || match.Created.Before(img.Created) { + match = img + } + } + } + return match, nil + } + + // In this case, this is `FROM scratch`, which isn't an actual image. + if imgID == "" { + images := imageStore.Map() + var siblings []image.ID + for id, img := range images { + if img.Parent == imgID { + siblings = append(siblings, id) + } + } + return getMatch(siblings) + } + + // find match from child images + siblings := imageStore.Children(imgID) + return getMatch(siblings) +} diff --git a/vendor/github.com/moby/moby/image/cache/compare.go b/vendor/github.com/moby/moby/image/cache/compare.go new file mode 100644 index 000000000..923793246 --- /dev/null +++ b/vendor/github.com/moby/moby/image/cache/compare.go @@ -0,0 +1,63 @@ +package cache + +import ( + "github.com/docker/docker/api/types/container" +) + +// compare two Config struct. Do not compare the "Image" nor "Hostname" fields +// If OpenStdin is set, then it differs +func compare(a, b *container.Config) bool { + if a == nil || b == nil || + a.OpenStdin || b.OpenStdin { + return false + } + if a.AttachStdout != b.AttachStdout || + a.AttachStderr != b.AttachStderr || + a.User != b.User || + a.OpenStdin != b.OpenStdin || + a.Tty != b.Tty { + return false + } + + if len(a.Cmd) != len(b.Cmd) || + len(a.Env) != len(b.Env) || + len(a.Labels) != len(b.Labels) || + len(a.ExposedPorts) != len(b.ExposedPorts) || + len(a.Entrypoint) != len(b.Entrypoint) || + len(a.Volumes) != len(b.Volumes) { + return false + } + + for i := 0; i < len(a.Cmd); i++ { + if a.Cmd[i] != b.Cmd[i] { + return false + } + } + for i := 0; i < len(a.Env); i++ { + if a.Env[i] != b.Env[i] { + return false + } + } + for k, v := range a.Labels { + if v != b.Labels[k] { + return false + } + } + for k := range a.ExposedPorts { + if _, exists := b.ExposedPorts[k]; !exists { + return false + } + } + + for i := 0; i < len(a.Entrypoint); i++ { + if a.Entrypoint[i] != b.Entrypoint[i] { + return false + } + } + for key := range a.Volumes { + if _, exists := b.Volumes[key]; !exists { + return false + } + } + return true +} diff --git a/vendor/github.com/moby/moby/image/cache/compare_test.go b/vendor/github.com/moby/moby/image/cache/compare_test.go new file mode 100644 index 000000000..10e464b43 --- /dev/null +++ b/vendor/github.com/moby/moby/image/cache/compare_test.go @@ -0,0 +1,126 @@ +package cache + +import ( + "testing" + + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/strslice" + "github.com/docker/go-connections/nat" +) + +// Just to make life easier +func newPortNoError(proto, port string) nat.Port { + p, _ := nat.NewPort(proto, port) + return p +} + +func TestCompare(t *testing.T) { + ports1 := make(nat.PortSet) + ports1[newPortNoError("tcp", "1111")] = struct{}{} + ports1[newPortNoError("tcp", "2222")] = struct{}{} + ports2 := make(nat.PortSet) + ports2[newPortNoError("tcp", "3333")] = struct{}{} + ports2[newPortNoError("tcp", "4444")] = struct{}{} + ports3 := make(nat.PortSet) + ports3[newPortNoError("tcp", "1111")] = struct{}{} + ports3[newPortNoError("tcp", "2222")] = struct{}{} + ports3[newPortNoError("tcp", "5555")] = struct{}{} + volumes1 := make(map[string]struct{}) + volumes1["/test1"] = struct{}{} + volumes2 := make(map[string]struct{}) + volumes2["/test2"] = struct{}{} + volumes3 := make(map[string]struct{}) + volumes3["/test1"] = struct{}{} + volumes3["/test3"] = struct{}{} + envs1 := []string{"ENV1=value1", "ENV2=value2"} + envs2 := []string{"ENV1=value1", "ENV3=value3"} + entrypoint1 := strslice.StrSlice{"/bin/sh", "-c"} + entrypoint2 := strslice.StrSlice{"/bin/sh", "-d"} + entrypoint3 := strslice.StrSlice{"/bin/sh", "-c", "echo"} + cmd1 := strslice.StrSlice{"/bin/sh", "-c"} + cmd2 := strslice.StrSlice{"/bin/sh", "-d"} + cmd3 := strslice.StrSlice{"/bin/sh", "-c", "echo"} + labels1 := map[string]string{"LABEL1": "value1", "LABEL2": "value2"} + labels2 := map[string]string{"LABEL1": "value1", "LABEL2": "value3"} + labels3 := map[string]string{"LABEL1": "value1", "LABEL2": "value2", "LABEL3": "value3"} + + sameConfigs := map[*container.Config]*container.Config{ + // Empty config + {}: {}, + // Does not compare hostname, domainname & image + { + Hostname: "host1", + Domainname: "domain1", + Image: "image1", + User: "user", + }: { + Hostname: "host2", + Domainname: "domain2", + Image: "image2", + User: "user", + }, + // only OpenStdin + {OpenStdin: false}: {OpenStdin: false}, + // only env + {Env: envs1}: {Env: envs1}, + // only cmd + {Cmd: cmd1}: {Cmd: cmd1}, + // only labels + {Labels: labels1}: {Labels: labels1}, + // only exposedPorts + {ExposedPorts: ports1}: {ExposedPorts: ports1}, + // only entrypoints + {Entrypoint: entrypoint1}: {Entrypoint: entrypoint1}, + // only volumes + {Volumes: volumes1}: {Volumes: volumes1}, + } + differentConfigs := map[*container.Config]*container.Config{ + nil: nil, + { + Hostname: "host1", + Domainname: "domain1", + Image: "image1", + User: "user1", + }: { + Hostname: "host1", + Domainname: "domain1", + Image: "image1", + User: "user2", + }, + // only OpenStdin + {OpenStdin: false}: {OpenStdin: true}, + {OpenStdin: true}: {OpenStdin: false}, + // only env + {Env: envs1}: {Env: envs2}, + // only cmd + {Cmd: cmd1}: {Cmd: cmd2}, + // not the same number of parts + {Cmd: cmd1}: {Cmd: cmd3}, + // only labels + {Labels: labels1}: {Labels: labels2}, + // not the same number of labels + {Labels: labels1}: {Labels: labels3}, + // only exposedPorts + {ExposedPorts: ports1}: {ExposedPorts: ports2}, + // not the same number of ports + {ExposedPorts: ports1}: {ExposedPorts: ports3}, + // only entrypoints + {Entrypoint: entrypoint1}: {Entrypoint: entrypoint2}, + // not the same number of parts + {Entrypoint: entrypoint1}: {Entrypoint: entrypoint3}, + // only volumes + {Volumes: volumes1}: {Volumes: volumes2}, + // not the same number of labels + {Volumes: volumes1}: {Volumes: volumes3}, + } + for config1, config2 := range sameConfigs { + if !compare(config1, config2) { + t.Fatalf("Compare should be true for [%v] and [%v]", config1, config2) + } + } + for config1, config2 := range differentConfigs { + if compare(config1, config2) { + t.Fatalf("Compare should be false for [%v] and [%v]", config1, config2) + } + } +} diff --git a/vendor/github.com/moby/moby/image/fs.go b/vendor/github.com/moby/moby/image/fs.go new file mode 100644 index 000000000..10f6dab5f --- /dev/null +++ b/vendor/github.com/moby/moby/image/fs.go @@ -0,0 +1,178 @@ +package image + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "sync" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/ioutils" + "github.com/opencontainers/go-digest" + "github.com/pkg/errors" +) + +// DigestWalkFunc is function called by StoreBackend.Walk +type DigestWalkFunc func(id digest.Digest) error + +// StoreBackend provides interface for image.Store persistence +type StoreBackend interface { + Walk(f DigestWalkFunc) error + Get(id digest.Digest) ([]byte, error) + Set(data []byte) (digest.Digest, error) + Delete(id digest.Digest) error + SetMetadata(id digest.Digest, key string, data []byte) error + GetMetadata(id digest.Digest, key string) ([]byte, error) + DeleteMetadata(id digest.Digest, key string) error +} + +// fs implements StoreBackend using the filesystem. +type fs struct { + sync.RWMutex + root string +} + +const ( + contentDirName = "content" + metadataDirName = "metadata" +) + +// NewFSStoreBackend returns new filesystem based backend for image.Store +func NewFSStoreBackend(root string) (StoreBackend, error) { + return newFSStore(root) +} + +func newFSStore(root string) (*fs, error) { + s := &fs{ + root: root, + } + if err := os.MkdirAll(filepath.Join(root, contentDirName, string(digest.Canonical)), 0700); err != nil { + return nil, errors.Wrap(err, "failed to create storage backend") + } + if err := os.MkdirAll(filepath.Join(root, metadataDirName, string(digest.Canonical)), 0700); err != nil { + return nil, errors.Wrap(err, "failed to create storage backend") + } + return s, nil +} + +func (s *fs) contentFile(dgst digest.Digest) string { + return filepath.Join(s.root, contentDirName, string(dgst.Algorithm()), dgst.Hex()) +} + +func (s *fs) metadataDir(dgst digest.Digest) string { + return filepath.Join(s.root, metadataDirName, string(dgst.Algorithm()), dgst.Hex()) +} + +// Walk calls the supplied callback for each image ID in the storage backend. +func (s *fs) Walk(f DigestWalkFunc) error { + // Only Canonical digest (sha256) is currently supported + s.RLock() + dir, err := ioutil.ReadDir(filepath.Join(s.root, contentDirName, string(digest.Canonical))) + s.RUnlock() + if err != nil { + return err + } + for _, v := range dir { + dgst := digest.NewDigestFromHex(string(digest.Canonical), v.Name()) + if err := dgst.Validate(); err != nil { + logrus.Debugf("skipping invalid digest %s: %s", dgst, err) + continue + } + if err := f(dgst); err != nil { + return err + } + } + return nil +} + +// Get returns the content stored under a given digest. +func (s *fs) Get(dgst digest.Digest) ([]byte, error) { + s.RLock() + defer s.RUnlock() + + return s.get(dgst) +} + +func (s *fs) get(dgst digest.Digest) ([]byte, error) { + content, err := ioutil.ReadFile(s.contentFile(dgst)) + if err != nil { + return nil, errors.Wrapf(err, "failed to get digest %s", dgst) + } + + // todo: maybe optional + if digest.FromBytes(content) != dgst { + return nil, fmt.Errorf("failed to verify: %v", dgst) + } + + return content, nil +} + +// Set stores content by checksum. +func (s *fs) Set(data []byte) (digest.Digest, error) { + s.Lock() + defer s.Unlock() + + if len(data) == 0 { + return "", fmt.Errorf("invalid empty data") + } + + dgst := digest.FromBytes(data) + if err := ioutils.AtomicWriteFile(s.contentFile(dgst), data, 0600); err != nil { + return "", errors.Wrap(err, "failed to write digest data") + } + + return dgst, nil +} + +// Delete removes content and metadata files associated with the digest. +func (s *fs) Delete(dgst digest.Digest) error { + s.Lock() + defer s.Unlock() + + if err := os.RemoveAll(s.metadataDir(dgst)); err != nil { + return err + } + if err := os.Remove(s.contentFile(dgst)); err != nil { + return err + } + return nil +} + +// SetMetadata sets metadata for a given ID. It fails if there's no base file. +func (s *fs) SetMetadata(dgst digest.Digest, key string, data []byte) error { + s.Lock() + defer s.Unlock() + if _, err := s.get(dgst); err != nil { + return err + } + + baseDir := filepath.Join(s.metadataDir(dgst)) + if err := os.MkdirAll(baseDir, 0700); err != nil { + return err + } + return ioutils.AtomicWriteFile(filepath.Join(s.metadataDir(dgst), key), data, 0600) +} + +// GetMetadata returns metadata for a given digest. +func (s *fs) GetMetadata(dgst digest.Digest, key string) ([]byte, error) { + s.RLock() + defer s.RUnlock() + + if _, err := s.get(dgst); err != nil { + return nil, err + } + bytes, err := ioutil.ReadFile(filepath.Join(s.metadataDir(dgst), key)) + if err != nil { + return nil, errors.Wrap(err, "failed to read metadata") + } + return bytes, nil +} + +// DeleteMetadata removes the metadata associated with a digest. +func (s *fs) DeleteMetadata(dgst digest.Digest, key string) error { + s.Lock() + defer s.Unlock() + + return os.RemoveAll(filepath.Join(s.metadataDir(dgst), key)) +} diff --git a/vendor/github.com/moby/moby/image/fs_test.go b/vendor/github.com/moby/moby/image/fs_test.go new file mode 100644 index 000000000..5f2437cad --- /dev/null +++ b/vendor/github.com/moby/moby/image/fs_test.go @@ -0,0 +1,275 @@ +package image + +import ( + "bytes" + "crypto/rand" + "crypto/sha256" + "encoding/hex" + "errors" + "io/ioutil" + "os" + "path/filepath" + "testing" + + "github.com/docker/docker/pkg/testutil" + "github.com/opencontainers/go-digest" + "github.com/stretchr/testify/assert" +) + +func defaultFSStoreBackend(t *testing.T) (StoreBackend, func()) { + tmpdir, err := ioutil.TempDir("", "images-fs-store") + assert.NoError(t, err) + + fsBackend, err := NewFSStoreBackend(tmpdir) + assert.NoError(t, err) + + return fsBackend, func() { os.RemoveAll(tmpdir) } +} + +func TestFSGetInvalidData(t *testing.T) { + store, cleanup := defaultFSStoreBackend(t) + defer cleanup() + + id, err := store.Set([]byte("foobar")) + assert.NoError(t, err) + + dgst := digest.Digest(id) + + err = ioutil.WriteFile(filepath.Join(store.(*fs).root, contentDirName, string(dgst.Algorithm()), dgst.Hex()), []byte("foobar2"), 0600) + assert.NoError(t, err) + + _, err = store.Get(id) + testutil.ErrorContains(t, err, "failed to verify") +} + +func TestFSInvalidSet(t *testing.T) { + store, cleanup := defaultFSStoreBackend(t) + defer cleanup() + + id := digest.FromBytes([]byte("foobar")) + err := os.Mkdir(filepath.Join(store.(*fs).root, contentDirName, string(id.Algorithm()), id.Hex()), 0700) + assert.NoError(t, err) + + _, err = store.Set([]byte("foobar")) + testutil.ErrorContains(t, err, "failed to write digest data") +} + +func TestFSInvalidRoot(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "images-fs-store") + assert.NoError(t, err) + defer os.RemoveAll(tmpdir) + + tcases := []struct { + root, invalidFile string + }{ + {"root", "root"}, + {"root", "root/content"}, + {"root", "root/metadata"}, + } + + for _, tc := range tcases { + root := filepath.Join(tmpdir, tc.root) + filePath := filepath.Join(tmpdir, tc.invalidFile) + err := os.MkdirAll(filepath.Dir(filePath), 0700) + assert.NoError(t, err) + + f, err := os.Create(filePath) + assert.NoError(t, err) + f.Close() + + _, err = NewFSStoreBackend(root) + testutil.ErrorContains(t, err, "failed to create storage backend") + + os.RemoveAll(root) + } + +} + +func TestFSMetadataGetSet(t *testing.T) { + store, cleanup := defaultFSStoreBackend(t) + defer cleanup() + + id, err := store.Set([]byte("foo")) + assert.NoError(t, err) + + id2, err := store.Set([]byte("bar")) + assert.NoError(t, err) + + tcases := []struct { + id digest.Digest + key string + value []byte + }{ + {id, "tkey", []byte("tval1")}, + {id, "tkey2", []byte("tval2")}, + {id2, "tkey", []byte("tval3")}, + } + + for _, tc := range tcases { + err = store.SetMetadata(tc.id, tc.key, tc.value) + assert.NoError(t, err) + + actual, err := store.GetMetadata(tc.id, tc.key) + assert.NoError(t, err) + + if bytes.Compare(actual, tc.value) != 0 { + t.Fatalf("Metadata expected %q, got %q", tc.value, actual) + } + } + + _, err = store.GetMetadata(id2, "tkey2") + testutil.ErrorContains(t, err, "failed to read metadata") + + id3 := digest.FromBytes([]byte("baz")) + err = store.SetMetadata(id3, "tkey", []byte("tval")) + testutil.ErrorContains(t, err, "failed to get digest") + + _, err = store.GetMetadata(id3, "tkey") + testutil.ErrorContains(t, err, "failed to get digest") +} + +func TestFSInvalidWalker(t *testing.T) { + store, cleanup := defaultFSStoreBackend(t) + defer cleanup() + + fooID, err := store.Set([]byte("foo")) + assert.NoError(t, err) + + err = ioutil.WriteFile(filepath.Join(store.(*fs).root, contentDirName, "sha256/foobar"), []byte("foobar"), 0600) + assert.NoError(t, err) + + n := 0 + err = store.Walk(func(id digest.Digest) error { + assert.Equal(t, fooID, id) + n++ + return nil + }) + assert.NoError(t, err) + assert.Equal(t, 1, n) +} + +func TestFSGetSet(t *testing.T) { + store, cleanup := defaultFSStoreBackend(t) + defer cleanup() + + type tcase struct { + input []byte + expected digest.Digest + } + tcases := []tcase{ + {[]byte("foobar"), digest.Digest("sha256:c3ab8ff13720e8ad9047dd39466b3c8974e592c2fa383d4a3960714caef0c4f2")}, + } + + randomInput := make([]byte, 8*1024) + _, err := rand.Read(randomInput) + assert.NoError(t, err) + + // skipping use of digest pkg because it is used by the implementation + h := sha256.New() + _, err = h.Write(randomInput) + assert.NoError(t, err) + + tcases = append(tcases, tcase{ + input: randomInput, + expected: digest.Digest("sha256:" + hex.EncodeToString(h.Sum(nil))), + }) + + for _, tc := range tcases { + id, err := store.Set([]byte(tc.input)) + assert.NoError(t, err) + assert.Equal(t, tc.expected, id) + } + + for _, tc := range tcases { + data, err := store.Get(tc.expected) + assert.NoError(t, err) + if bytes.Compare(data, tc.input) != 0 { + t.Fatalf("expected data %q, got %q", tc.input, data) + } + } +} + +func TestFSGetUnsetKey(t *testing.T) { + store, cleanup := defaultFSStoreBackend(t) + defer cleanup() + + for _, key := range []digest.Digest{"foobar:abc", "sha256:abc", "sha256:c3ab8ff13720e8ad9047dd39466b3c8974e592c2fa383d4a3960714caef0c4f2a"} { + _, err := store.Get(key) + testutil.ErrorContains(t, err, "failed to get digest") + } +} + +func TestFSGetEmptyData(t *testing.T) { + store, cleanup := defaultFSStoreBackend(t) + defer cleanup() + + for _, emptyData := range [][]byte{nil, {}} { + _, err := store.Set(emptyData) + testutil.ErrorContains(t, err, "invalid empty data") + } +} + +func TestFSDelete(t *testing.T) { + store, cleanup := defaultFSStoreBackend(t) + defer cleanup() + + id, err := store.Set([]byte("foo")) + assert.NoError(t, err) + + id2, err := store.Set([]byte("bar")) + assert.NoError(t, err) + + err = store.Delete(id) + assert.NoError(t, err) + + _, err = store.Get(id) + testutil.ErrorContains(t, err, "failed to get digest") + + _, err = store.Get(id2) + assert.NoError(t, err) + + err = store.Delete(id2) + assert.NoError(t, err) + + _, err = store.Get(id2) + testutil.ErrorContains(t, err, "failed to get digest") +} + +func TestFSWalker(t *testing.T) { + store, cleanup := defaultFSStoreBackend(t) + defer cleanup() + + id, err := store.Set([]byte("foo")) + assert.NoError(t, err) + + id2, err := store.Set([]byte("bar")) + assert.NoError(t, err) + + tcases := make(map[digest.Digest]struct{}) + tcases[id] = struct{}{} + tcases[id2] = struct{}{} + n := 0 + err = store.Walk(func(id digest.Digest) error { + delete(tcases, id) + n++ + return nil + }) + assert.NoError(t, err) + assert.Equal(t, 2, n) + assert.Len(t, tcases, 0) +} + +func TestFSWalkerStopOnError(t *testing.T) { + store, cleanup := defaultFSStoreBackend(t) + defer cleanup() + + id, err := store.Set([]byte("foo")) + assert.NoError(t, err) + + tcases := make(map[digest.Digest]struct{}) + tcases[id] = struct{}{} + err = store.Walk(func(id digest.Digest) error { + return errors.New("what") + }) + testutil.ErrorContains(t, err, "what") +} diff --git a/vendor/github.com/moby/moby/image/image.go b/vendor/github.com/moby/moby/image/image.go new file mode 100644 index 000000000..c63aec523 --- /dev/null +++ b/vendor/github.com/moby/moby/image/image.go @@ -0,0 +1,223 @@ +package image + +import ( + "encoding/json" + "errors" + "io" + "runtime" + "strings" + "time" + + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/layer" + "github.com/opencontainers/go-digest" +) + +// ID is the content-addressable ID of an image. +type ID digest.Digest + +func (id ID) String() string { + return id.Digest().String() +} + +// Digest converts ID into a digest +func (id ID) Digest() digest.Digest { + return digest.Digest(id) +} + +// IDFromDigest creates an ID from a digest +func IDFromDigest(digest digest.Digest) ID { + return ID(digest) +} + +// V1Image stores the V1 image configuration. +type V1Image struct { + // ID is a unique 64 character identifier of the image + ID string `json:"id,omitempty"` + // Parent is the ID of the parent image + Parent string `json:"parent,omitempty"` + // Comment is the commit message that was set when committing the image + Comment string `json:"comment,omitempty"` + // Created is the timestamp at which the image was created + Created time.Time `json:"created"` + // Container is the id of the container used to commit + Container string `json:"container,omitempty"` + // ContainerConfig is the configuration of the container that is committed into the image + ContainerConfig container.Config `json:"container_config,omitempty"` + // DockerVersion specifies the version of Docker that was used to build the image + DockerVersion string `json:"docker_version,omitempty"` + // Author is the name of the author that was specified when committing the image + Author string `json:"author,omitempty"` + // Config is the configuration of the container received from the client + Config *container.Config `json:"config,omitempty"` + // Architecture is the hardware that the image is built and runs on + Architecture string `json:"architecture,omitempty"` + // OS is the operating system used to build and run the image + OS string `json:"os,omitempty"` + // Size is the total size of the image including all layers it is composed of + Size int64 `json:",omitempty"` +} + +// Image stores the image configuration +type Image struct { + V1Image + Parent ID `json:"parent,omitempty"` + RootFS *RootFS `json:"rootfs,omitempty"` + History []History `json:"history,omitempty"` + OSVersion string `json:"os.version,omitempty"` + OSFeatures []string `json:"os.features,omitempty"` + + // rawJSON caches the immutable JSON associated with this image. + rawJSON []byte + + // computedID is the ID computed from the hash of the image config. + // Not to be confused with the legacy V1 ID in V1Image. + computedID ID +} + +// RawJSON returns the immutable JSON associated with the image. +func (img *Image) RawJSON() []byte { + return img.rawJSON +} + +// ID returns the image's content-addressable ID. +func (img *Image) ID() ID { + return img.computedID +} + +// ImageID stringifies ID. +func (img *Image) ImageID() string { + return img.ID().String() +} + +// RunConfig returns the image's container config. +func (img *Image) RunConfig() *container.Config { + return img.Config +} + +// Platform returns the image's operating system. If not populated, defaults to the host runtime OS. +func (img *Image) Platform() string { + os := img.OS + if os == "" { + os = runtime.GOOS + } + return os +} + +// MarshalJSON serializes the image to JSON. It sorts the top-level keys so +// that JSON that's been manipulated by a push/pull cycle with a legacy +// registry won't end up with a different key order. +func (img *Image) MarshalJSON() ([]byte, error) { + type MarshalImage Image + + pass1, err := json.Marshal(MarshalImage(*img)) + if err != nil { + return nil, err + } + + var c map[string]*json.RawMessage + if err := json.Unmarshal(pass1, &c); err != nil { + return nil, err + } + return json.Marshal(c) +} + +// ChildConfig is the configuration to apply to an Image to create a new +// Child image. Other properties of the image are copied from the parent. +type ChildConfig struct { + ContainerID string + Author string + Comment string + DiffID layer.DiffID + ContainerConfig *container.Config + Config *container.Config +} + +// NewChildImage creates a new Image as a child of this image. +func NewChildImage(img *Image, child ChildConfig, platform string) *Image { + isEmptyLayer := layer.IsEmpty(child.DiffID) + var rootFS *RootFS + if img.RootFS != nil { + rootFS = img.RootFS.Clone() + } else { + rootFS = NewRootFS() + } + + if !isEmptyLayer { + rootFS.Append(child.DiffID) + } + imgHistory := NewHistory( + child.Author, + child.Comment, + strings.Join(child.ContainerConfig.Cmd, " "), + isEmptyLayer) + + return &Image{ + V1Image: V1Image{ + DockerVersion: dockerversion.Version, + Config: child.Config, + Architecture: runtime.GOARCH, + OS: platform, + Container: child.ContainerID, + ContainerConfig: *child.ContainerConfig, + Author: child.Author, + Created: imgHistory.Created, + }, + RootFS: rootFS, + History: append(img.History, imgHistory), + OSFeatures: img.OSFeatures, + OSVersion: img.OSVersion, + } +} + +// History stores build commands that were used to create an image +type History struct { + // Created is the timestamp at which the image was created + Created time.Time `json:"created"` + // Author is the name of the author that was specified when committing the image + Author string `json:"author,omitempty"` + // CreatedBy keeps the Dockerfile command used while building the image + CreatedBy string `json:"created_by,omitempty"` + // Comment is the commit message that was set when committing the image + Comment string `json:"comment,omitempty"` + // EmptyLayer is set to true if this history item did not generate a + // layer. Otherwise, the history item is associated with the next + // layer in the RootFS section. + EmptyLayer bool `json:"empty_layer,omitempty"` +} + +// NewHistory creates a new history struct from arguments, and sets the created +// time to the current time in UTC +func NewHistory(author, comment, createdBy string, isEmptyLayer bool) History { + return History{ + Author: author, + Created: time.Now().UTC(), + CreatedBy: createdBy, + Comment: comment, + EmptyLayer: isEmptyLayer, + } +} + +// Exporter provides interface for loading and saving images +type Exporter interface { + Load(io.ReadCloser, io.Writer, bool) error + // TODO: Load(net.Context, io.ReadCloser, <- chan StatusMessage) error + Save([]string, io.Writer) error +} + +// NewFromJSON creates an Image configuration from json. +func NewFromJSON(src []byte) (*Image, error) { + img := &Image{} + + if err := json.Unmarshal(src, img); err != nil { + return nil, err + } + if img.RootFS == nil { + return nil, errors.New("invalid image JSON, no RootFS key") + } + + img.rawJSON = src + + return img, nil +} diff --git a/vendor/github.com/moby/moby/image/image_test.go b/vendor/github.com/moby/moby/image/image_test.go new file mode 100644 index 000000000..e04587eda --- /dev/null +++ b/vendor/github.com/moby/moby/image/image_test.go @@ -0,0 +1,90 @@ +package image + +import ( + "encoding/json" + "sort" + "strings" + "testing" + + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/layer" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +const sampleImageJSON = `{ + "architecture": "amd64", + "os": "linux", + "config": {}, + "rootfs": { + "type": "layers", + "diff_ids": [] + } +}` + +func TestNewFromJSON(t *testing.T) { + img, err := NewFromJSON([]byte(sampleImageJSON)) + require.NoError(t, err) + assert.Equal(t, sampleImageJSON, string(img.RawJSON())) +} + +func TestNewFromJSONWithInvalidJSON(t *testing.T) { + _, err := NewFromJSON([]byte("{}")) + assert.EqualError(t, err, "invalid image JSON, no RootFS key") +} + +func TestMarshalKeyOrder(t *testing.T) { + b, err := json.Marshal(&Image{ + V1Image: V1Image{ + Comment: "a", + Author: "b", + Architecture: "c", + }, + }) + assert.NoError(t, err) + + expectedOrder := []string{"architecture", "author", "comment"} + var indexes []int + for _, k := range expectedOrder { + indexes = append(indexes, strings.Index(string(b), k)) + } + + if !sort.IntsAreSorted(indexes) { + t.Fatal("invalid key order in JSON: ", string(b)) + } +} + +func TestNewChildImageFromImageWithRootFS(t *testing.T) { + rootFS := NewRootFS() + rootFS.Append(layer.DiffID("ba5e")) + parent := &Image{ + RootFS: rootFS, + History: []History{ + NewHistory("a", "c", "r", false), + }, + } + childConfig := ChildConfig{ + DiffID: layer.DiffID("abcdef"), + Author: "author", + Comment: "comment", + ContainerConfig: &container.Config{ + Cmd: []string{"echo", "foo"}, + }, + Config: &container.Config{}, + } + + newImage := NewChildImage(parent, childConfig, "platform") + expectedDiffIDs := []layer.DiffID{layer.DiffID("ba5e"), layer.DiffID("abcdef")} + assert.Equal(t, expectedDiffIDs, newImage.RootFS.DiffIDs) + assert.Equal(t, childConfig.Author, newImage.Author) + assert.Equal(t, childConfig.Config, newImage.Config) + assert.Equal(t, *childConfig.ContainerConfig, newImage.ContainerConfig) + assert.Equal(t, "platform", newImage.OS) + assert.Equal(t, childConfig.Config, newImage.Config) + + assert.Len(t, newImage.History, 2) + assert.Equal(t, childConfig.Comment, newImage.History[1].Comment) + + // RootFS should be copied not mutated + assert.NotEqual(t, parent.RootFS.DiffIDs, newImage.RootFS.DiffIDs) +} diff --git a/vendor/github.com/moby/moby/image/rootfs.go b/vendor/github.com/moby/moby/image/rootfs.go new file mode 100644 index 000000000..5a9020f0e --- /dev/null +++ b/vendor/github.com/moby/moby/image/rootfs.go @@ -0,0 +1,52 @@ +package image + +import ( + "runtime" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/layer" +) + +// TypeLayers is used for RootFS.Type for filesystems organized into layers. +const TypeLayers = "layers" + +// typeLayersWithBase is an older format used by Windows up to v1.12. We +// explicitly handle this as an error case to ensure that a daemon which still +// has an older image like this on disk can still start, even though the +// image itself is not usable. See https://github.com/docker/docker/pull/25806. +const typeLayersWithBase = "layers+base" + +// RootFS describes images root filesystem +// This is currently a placeholder that only supports layers. In the future +// this can be made into an interface that supports different implementations. +type RootFS struct { + Type string `json:"type"` + DiffIDs []layer.DiffID `json:"diff_ids,omitempty"` +} + +// NewRootFS returns empty RootFS struct +func NewRootFS() *RootFS { + return &RootFS{Type: TypeLayers} +} + +// Append appends a new diffID to rootfs +func (r *RootFS) Append(id layer.DiffID) { + r.DiffIDs = append(r.DiffIDs, id) +} + +// Clone returns a copy of the RootFS +func (r *RootFS) Clone() *RootFS { + newRoot := NewRootFS() + newRoot.Type = r.Type + newRoot.DiffIDs = append(r.DiffIDs) + return newRoot +} + +// ChainID returns the ChainID for the top layer in RootFS. +func (r *RootFS) ChainID() layer.ChainID { + if runtime.GOOS == "windows" && r.Type == typeLayersWithBase { + logrus.Warnf("Layer type is unsupported on this platform. DiffIDs: '%v'", r.DiffIDs) + return "" + } + return layer.CreateChainID(r.DiffIDs) +} diff --git a/vendor/github.com/moby/moby/image/spec/v1.1.md b/vendor/github.com/moby/moby/image/spec/v1.1.md new file mode 100644 index 000000000..ce761f112 --- /dev/null +++ b/vendor/github.com/moby/moby/image/spec/v1.1.md @@ -0,0 +1,637 @@ +# Docker Image Specification v1.1.0 + +An *Image* is an ordered collection of root filesystem changes and the +corresponding execution parameters for use within a container runtime. This +specification outlines the format of these filesystem changes and corresponding +parameters and describes how to create and use them for use with a container +runtime and execution tool. + +This version of the image specification was adopted starting in Docker 1.10. + +## Terminology + +This specification uses the following terms: + +
    +
    + Layer +
    +
    + Images are composed of layers. Each layer is a set of filesystem + changes. Layers do not have configuration metadata such as environment + variables or default arguments - these are properties of the image as a + whole rather than any particular layer. +
    +
    + Image JSON +
    +
    + Each image has an associated JSON structure which describes some + basic information about the image such as date created, author, and the + ID of its parent image as well as execution/runtime configuration like + its entry point, default arguments, CPU/memory shares, networking, and + volumes. The JSON structure also references a cryptographic hash of + each layer used by the image, and provides history information for + those layers. This JSON is considered to be immutable, because changing + it would change the computed ImageID. Changing it means creating a new + derived image, instead of changing the existing image. +
    +
    + Image Filesystem Changeset +
    +
    + Each layer has an archive of the files which have been added, changed, + or deleted relative to its parent layer. Using a layer-based or union + filesystem such as AUFS, or by computing the diff from filesystem + snapshots, the filesystem changeset can be used to present a series of + image layers as if they were one cohesive filesystem. +
    +
    + Layer DiffID +
    +
    + Layers are referenced by cryptographic hashes of their serialized + representation. This is a SHA256 digest over the tar archive used to + transport the layer, represented as a hexadecimal encoding of 256 bits, e.g., + sha256:a9561eb1b190625c9adb5a9513e72c4dedafc1cb2d4c5236c9a6957ec7dfd5a9. + Layers must be packed and unpacked reproducibly to avoid changing the + layer ID, for example by using tar-split to save the tar headers. Note + that the digest used as the layer ID is taken over an uncompressed + version of the tar. +
    +
    + Layer ChainID +
    +
    + For convenience, it is sometimes useful to refer to a stack of layers + with a single identifier. This is called a ChainID. For a + single layer (or the layer at the bottom of a stack), the + ChainID is equal to the layer's DiffID. + Otherwise the ChainID is given by the formula: + ChainID(layerN) = SHA256hex(ChainID(layerN-1) + " " + DiffID(layerN)). +
    +
    + ImageID +
    +
    + Each image's ID is given by the SHA256 hash of its configuration JSON. It is + represented as a hexadecimal encoding of 256 bits, e.g., + sha256:a9561eb1b190625c9adb5a9513e72c4dedafc1cb2d4c5236c9a6957ec7dfd5a9. + Since the configuration JSON that gets hashed references hashes of each + layer in the image, this formulation of the ImageID makes images + content-addressable. +
    +
    + Tag +
    +
    + A tag serves to map a descriptive, user-given name to any single image + ID. Tag values are limited to the set of characters + [a-zA-Z0-9_.-], except they may not start with a . + or - character. Tags are limited to 128 characters. +
    +
    + Repository +
    +
    + A collection of tags grouped under a common prefix (the name component + before :). For example, in an image tagged with the name + my-app:3.1.4, my-app is the Repository + component of the name. A repository name is made up of slash-separated + name components, optionally prefixed by a DNS hostname. The hostname + must comply with standard DNS rules, but may not contain + _ characters. If a hostname is present, it may optionally + be followed by a port number in the format :8080. + Name components may contain lowercase characters, digits, and + separators. A separator is defined as a period, one or two underscores, + or one or more dashes. A name component may not start or end with + a separator. +
    +
    + +## Image JSON Description + +Here is an example image JSON file: + +``` +{ + "created": "2015-10-31T22:22:56.015925234Z", + "author": "Alyssa P. Hacker <alyspdev@example.com>", + "architecture": "amd64", + "os": "linux", + "config": { + "User": "alice", + "Memory": 2048, + "MemorySwap": 4096, + "CpuShares": 8, + "ExposedPorts": { + "8080/tcp": {} + }, + "Env": [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", + "FOO=docker_is_a_really", + "BAR=great_tool_you_know" + ], + "Entrypoint": [ + "/bin/my-app-binary" + ], + "Cmd": [ + "--foreground", + "--config", + "/etc/my-app.d/default.cfg" + ], + "Volumes": { + "/var/job-result-data": {}, + "/var/log/my-app-logs": {}, + }, + "WorkingDir": "/home/alice", + }, + "rootfs": { + "diff_ids": [ + "sha256:c6f988f4874bb0add23a778f753c65efe992244e148a1d2ec2a8b664fb66bbd1", + "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef" + ], + "type": "layers" + }, + "history": [ + { + "created": "2015-10-31T22:22:54.690851953Z", + "created_by": "/bin/sh -c #(nop) ADD file:a3bc1e842b69636f9df5256c49c5374fb4eef1e281fe3f282c65fb853ee171c5 in /" + }, + { + "created": "2015-10-31T22:22:55.613815829Z", + "created_by": "/bin/sh -c #(nop) CMD [\"sh\"]", + "empty_layer": true + } + ] +} +``` + +Note that image JSON files produced by Docker don't contain formatting +whitespace. It has been added to this example for clarity. + +### Image JSON Field Descriptions + +
    +
    + created string +
    +
    + ISO-8601 formatted combined date and time at which the image was + created. +
    +
    + author string +
    +
    + Gives the name and/or email address of the person or entity which + created and is responsible for maintaining the image. +
    +
    + architecture string +
    +
    + The CPU architecture which the binaries in this image are built to run + on. Possible values include: +
      +
    • 386
    • +
    • amd64
    • +
    • arm
    • +
    + More values may be supported in the future and any of these may or may + not be supported by a given container runtime implementation. +
    +
    + os string +
    +
    + The name of the operating system which the image is built to run on. + Possible values include: +
      +
    • darwin
    • +
    • freebsd
    • +
    • linux
    • +
    + More values may be supported in the future and any of these may or may + not be supported by a given container runtime implementation. +
    +
    + config struct +
    +
    + The execution parameters which should be used as a base when running a + container using the image. This field can be null, in + which case any execution parameters should be specified at creation of + the container. + +

    Container RunConfig Field Descriptions

    + +
    +
    + User string +
    +
    +

    The username or UID which the process in the container should + run as. This acts as a default value to use when the value is + not specified when creating a container.

    + +

    All of the following are valid:

    + +
      +
    • user
    • +
    • uid
    • +
    • user:group
    • +
    • uid:gid
    • +
    • uid:group
    • +
    • user:gid
    • +
    + +

    If group/gid is not specified, the + default group and supplementary groups of the given + user/uid in /etc/passwd + from the container are applied.

    +
    +
    + Memory integer +
    +
    + Memory limit (in bytes). This acts as a default value to use + when the value is not specified when creating a container. +
    +
    + MemorySwap integer +
    +
    + Total memory usage (memory + swap); set to -1 to + disable swap. This acts as a default value to use when the + value is not specified when creating a container. +
    +
    + CpuShares integer +
    +
    + CPU shares (relative weight vs. other containers). This acts as + a default value to use when the value is not specified when + creating a container. +
    +
    + ExposedPorts struct +
    +
    + A set of ports to expose from a container running this image. + This JSON structure value is unusual because it is a direct + JSON serialization of the Go type + map[string]struct{} and is represented in JSON as + an object mapping its keys to an empty object. Here is an + example: + +
    {
    +    "8080": {},
    +    "53/udp": {},
    +    "2356/tcp": {}
    +}
    + + Its keys can be in the format of: +
      +
    • + "port/tcp" +
    • +
    • + "port/udp" +
    • +
    • + "port" +
    • +
    + with the default protocol being "tcp" if not + specified. + + These values act as defaults and are merged with any specified + when creating a container. +
    +
    + Env array of strings +
    +
    + Entries are in the format of VARNAME="var value". + These values act as defaults and are merged with any specified + when creating a container. +
    +
    + Entrypoint array of strings +
    +
    + A list of arguments to use as the command to execute when the + container starts. This value acts as a default and is replaced + by an entrypoint specified when creating a container. +
    +
    + Cmd array of strings +
    +
    + Default arguments to the entry point of the container. These + values act as defaults and are replaced with any specified when + creating a container. If an Entrypoint value is + not specified, then the first entry of the Cmd + array should be interpreted as the executable to run. +
    +
    + Volumes struct +
    +
    + A set of directories which should be created as data volumes in + a container running this image. This JSON structure value is + unusual because it is a direct JSON serialization of the Go + type map[string]struct{} and is represented in + JSON as an object mapping its keys to an empty object. Here is + an example: +
    {
    +    "/var/my-app-data/": {},
    +    "/etc/some-config.d/": {},
    +}
    +
    +
    + WorkingDir string +
    +
    + Sets the current working directory of the entry point process + in the container. This value acts as a default and is replaced + by a working directory specified when creating a container. +
    +
    +
    +
    + rootfs struct +
    +
    + The rootfs key references the layer content addresses used by the + image. This makes the image config hash depend on the filesystem hash. + rootfs has two subkeys: + +
      +
    • + type is usually set to layers. +
    • +
    • + diff_ids is an array of layer content hashes (DiffIDs), in order from bottom-most to top-most. +
    • +
    + + + Here is an example rootfs section: + +
    "rootfs": {
    +  "diff_ids": [
    +    "sha256:c6f988f4874bb0add23a778f753c65efe992244e148a1d2ec2a8b664fb66bbd1",
    +    "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef",
    +    "sha256:13f53e08df5a220ab6d13c58b2bf83a59cbdc2e04d0a3f041ddf4b0ba4112d49"
    +  ],
    +  "type": "layers"
    +}
    +
    +
    + history struct +
    +
    + history is an array of objects describing the history of + each layer. The array is ordered from bottom-most layer to top-most + layer. The object has the following fields. + +
      +
    • + created: Creation time, expressed as a ISO-8601 formatted + combined date and time +
    • +
    • + author: The author of the build point +
    • +
    • + created_by: The command which created the layer +
    • +
    • + comment: A custom message set when creating the layer +
    • +
    • + empty_layer: This field is used to mark if the history + item created a filesystem diff. It is set to true if this history + item doesn't correspond to an actual layer in the rootfs section + (for example, a command like ENV which results in no change to the + filesystem). +
    • +
    + +Here is an example history section: + +
    "history": [
    +  {
    +    "created": "2015-10-31T22:22:54.690851953Z",
    +    "created_by": "/bin/sh -c #(nop) ADD file:a3bc1e842b69636f9df5256c49c5374fb4eef1e281fe3f282c65fb853ee171c5 in /"
    +  },
    +  {
    +    "created": "2015-10-31T22:22:55.613815829Z",
    +    "created_by": "/bin/sh -c #(nop) CMD [\"sh\"]",
    +    "empty_layer": true
    +  }
    +]
    +
    +
    + +Any extra fields in the Image JSON struct are considered implementation +specific and should be ignored by any implementations which are unable to +interpret them. + +## Creating an Image Filesystem Changeset + +An example of creating an Image Filesystem Changeset follows. + +An image root filesystem is first created as an empty directory. Here is the +initial empty directory structure for the a changeset using the +randomly-generated directory name `c3167915dc9d` ([actual layer DiffIDs are +generated based on the content](#id_desc)). + +``` +c3167915dc9d/ +``` + +Files and directories are then created: + +``` +c3167915dc9d/ + etc/ + my-app-config + bin/ + my-app-binary + my-app-tools +``` + +The `c3167915dc9d` directory is then committed as a plain Tar archive with +entries for the following files: + +``` +etc/my-app-config +bin/my-app-binary +bin/my-app-tools +``` + +To make changes to the filesystem of this container image, create a new +directory, such as `f60c56784b83`, and initialize it with a snapshot of the +parent image's root filesystem, so that the directory is identical to that +of `c3167915dc9d`. NOTE: a copy-on-write or union filesystem can make this very +efficient: + +``` +f60c56784b83/ + etc/ + my-app-config + bin/ + my-app-binary + my-app-tools +``` + +This example change is going add a configuration directory at `/etc/my-app.d` +which contains a default config file. There's also a change to the +`my-app-tools` binary to handle the config layout change. The `f60c56784b83` +directory then looks like this: + +``` +f60c56784b83/ + etc/ + my-app.d/ + default.cfg + bin/ + my-app-binary + my-app-tools +``` + +This reflects the removal of `/etc/my-app-config` and creation of a file and +directory at `/etc/my-app.d/default.cfg`. `/bin/my-app-tools` has also been +replaced with an updated version. Before committing this directory to a +changeset, because it has a parent image, it is first compared with the +directory tree of the parent snapshot, `f60c56784b83`, looking for files and +directories that have been added, modified, or removed. The following changeset +is found: + +``` +Added: /etc/my-app.d/default.cfg +Modified: /bin/my-app-tools +Deleted: /etc/my-app-config +``` + +A Tar Archive is then created which contains *only* this changeset: The added +and modified files and directories in their entirety, and for each deleted item +an entry for an empty file at the same location but with the basename of the +deleted file or directory prefixed with `.wh.`. The filenames prefixed with +`.wh.` are known as "whiteout" files. NOTE: For this reason, it is not possible +to create an image root filesystem which contains a file or directory with a +name beginning with `.wh.`. The resulting Tar archive for `f60c56784b83` has +the following entries: + +``` +/etc/my-app.d/default.cfg +/bin/my-app-tools +/etc/.wh.my-app-config +``` + +Any given image is likely to be composed of several of these Image Filesystem +Changeset tar archives. + +## Combined Image JSON + Filesystem Changeset Format + +There is also a format for a single archive which contains complete information +about an image, including: + + - repository names/tags + - image configuration JSON file + - all tar archives of each layer filesystem changesets + +For example, here's what the full archive of `library/busybox` is (displayed in +`tree` format): + +``` +. +├── 47bcc53f74dc94b1920f0b34f6036096526296767650f223433fe65c35f149eb.json +├── 5f29f704785248ddb9d06b90a11b5ea36c534865e9035e4022bb2e71d4ecbb9a +│   ├── VERSION +│   ├── json +│   └── layer.tar +├── a65da33792c5187473faa80fa3e1b975acba06712852d1dea860692ccddf3198 +│   ├── VERSION +│   ├── json +│   └── layer.tar +├── manifest.json +└── repositories +``` + +There is a directory for each layer in the image. Each directory is named with +a 64 character hex name that is deterministically generated from the layer +information. These names are not necessarily layer DiffIDs or ChainIDs. Each of +these directories contains 3 files: + + * `VERSION` - The schema version of the `json` file + * `json` - The legacy JSON metadata for an image layer. In this version of + the image specification, layers don't have JSON metadata, but in + [version 1](v1.md), they did. A file is created for each layer in the + v1 format for backward compatibility. + * `layer.tar` - The Tar archive of the filesystem changeset for an image + layer. + +Note that this directory layout is only important for backward compatibility. +Current implementations use the paths specified in `manifest.json`. + +The content of the `VERSION` files is simply the semantic version of the JSON +metadata schema: + +``` +1.0 +``` + +The `repositories` file is another JSON file which describes names/tags: + +``` +{ + "busybox":{ + "latest":"5f29f704785248ddb9d06b90a11b5ea36c534865e9035e4022bb2e71d4ecbb9a" + } +} +``` + +Every key in this object is the name of a repository, and maps to a collection +of tag suffixes. Each tag maps to the ID of the image represented by that tag. +This file is only used for backwards compatibility. Current implementations use +the `manifest.json` file instead. + +The `manifest.json` file provides the image JSON for the top-level image, and +optionally for parent images that this image was derived from. It consists of +an array of metadata entries: + +``` +[ + { + "Config": "47bcc53f74dc94b1920f0b34f6036096526296767650f223433fe65c35f149eb.json", + "RepoTags": ["busybox:latest"], + "Layers": [ + "a65da33792c5187473faa80fa3e1b975acba06712852d1dea860692ccddf3198/layer.tar", + "5f29f704785248ddb9d06b90a11b5ea36c534865e9035e4022bb2e71d4ecbb9a/layer.tar" + ] + } +] +``` + +There is an entry in the array for each image. + +The `Config` field references another file in the tar which includes the image +JSON for this image. + +The `RepoTags` field lists references pointing to this image. + +The `Layers` field points to the filesystem changeset tars. + +An optional `Parent` field references the imageID of the parent image. This +parent must be part of the same `manifest.json` file. + +This file shouldn't be confused with the distribution manifest, used to push +and pull images. + +Generally, implementations that support this version of the spec will use +the `manifest.json` file if available, and older implementations will use the +legacy `*/json` files and `repositories`. diff --git a/vendor/github.com/moby/moby/image/spec/v1.2.md b/vendor/github.com/moby/moby/image/spec/v1.2.md new file mode 100644 index 000000000..789680c7a --- /dev/null +++ b/vendor/github.com/moby/moby/image/spec/v1.2.md @@ -0,0 +1,696 @@ +# Docker Image Specification v1.2.0 + +An *Image* is an ordered collection of root filesystem changes and the +corresponding execution parameters for use within a container runtime. This +specification outlines the format of these filesystem changes and corresponding +parameters and describes how to create and use them for use with a container +runtime and execution tool. + +This version of the image specification was adopted starting in Docker 1.12. + +## Terminology + +This specification uses the following terms: + +
    +
    + Layer +
    +
    + Images are composed of layers. Each layer is a set of filesystem + changes. Layers do not have configuration metadata such as environment + variables or default arguments - these are properties of the image as a + whole rather than any particular layer. +
    +
    + Image JSON +
    +
    + Each image has an associated JSON structure which describes some + basic information about the image such as date created, author, and the + ID of its parent image as well as execution/runtime configuration like + its entry point, default arguments, CPU/memory shares, networking, and + volumes. The JSON structure also references a cryptographic hash of + each layer used by the image, and provides history information for + those layers. This JSON is considered to be immutable, because changing + it would change the computed ImageID. Changing it means creating a new + derived image, instead of changing the existing image. +
    +
    + Image Filesystem Changeset +
    +
    + Each layer has an archive of the files which have been added, changed, + or deleted relative to its parent layer. Using a layer-based or union + filesystem such as AUFS, or by computing the diff from filesystem + snapshots, the filesystem changeset can be used to present a series of + image layers as if they were one cohesive filesystem. +
    +
    + Layer DiffID +
    +
    + Layers are referenced by cryptographic hashes of their serialized + representation. This is a SHA256 digest over the tar archive used to + transport the layer, represented as a hexadecimal encoding of 256 bits, e.g., + sha256:a9561eb1b190625c9adb5a9513e72c4dedafc1cb2d4c5236c9a6957ec7dfd5a9. + Layers must be packed and unpacked reproducibly to avoid changing the + layer ID, for example by using tar-split to save the tar headers. Note + that the digest used as the layer ID is taken over an uncompressed + version of the tar. +
    +
    + Layer ChainID +
    +
    + For convenience, it is sometimes useful to refer to a stack of layers + with a single identifier. This is called a ChainID. For a + single layer (or the layer at the bottom of a stack), the + ChainID is equal to the layer's DiffID. + Otherwise the ChainID is given by the formula: + ChainID(layerN) = SHA256hex(ChainID(layerN-1) + " " + DiffID(layerN)). +
    +
    + ImageID +
    +
    + Each image's ID is given by the SHA256 hash of its configuration JSON. It is + represented as a hexadecimal encoding of 256 bits, e.g., + sha256:a9561eb1b190625c9adb5a9513e72c4dedafc1cb2d4c5236c9a6957ec7dfd5a9. + Since the configuration JSON that gets hashed references hashes of each + layer in the image, this formulation of the ImageID makes images + content-addressable. +
    +
    + Tag +
    +
    + A tag serves to map a descriptive, user-given name to any single image + ID. Tag values are limited to the set of characters + [a-zA-Z0-9_.-], except they may not start with a . + or - character. Tags are limited to 128 characters. +
    +
    + Repository +
    +
    + A collection of tags grouped under a common prefix (the name component + before :). For example, in an image tagged with the name + my-app:3.1.4, my-app is the Repository + component of the name. A repository name is made up of slash-separated + name components, optionally prefixed by a DNS hostname. The hostname + must comply with standard DNS rules, but may not contain + _ characters. If a hostname is present, it may optionally + be followed by a port number in the format :8080. + Name components may contain lowercase characters, digits, and + separators. A separator is defined as a period, one or two underscores, + or one or more dashes. A name component may not start or end with + a separator. +
    +
    + +## Image JSON Description + +Here is an example image JSON file: + +``` +{ + "created": "2015-10-31T22:22:56.015925234Z", + "author": "Alyssa P. Hacker <alyspdev@example.com>", + "architecture": "amd64", + "os": "linux", + "config": { + "User": "alice", + "Memory": 2048, + "MemorySwap": 4096, + "CpuShares": 8, + "ExposedPorts": { + "8080/tcp": {} + }, + "Env": [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", + "FOO=docker_is_a_really", + "BAR=great_tool_you_know" + ], + "Entrypoint": [ + "/bin/my-app-binary" + ], + "Cmd": [ + "--foreground", + "--config", + "/etc/my-app.d/default.cfg" + ], + "Volumes": { + "/var/job-result-data": {}, + "/var/log/my-app-logs": {}, + }, + "WorkingDir": "/home/alice", + }, + "rootfs": { + "diff_ids": [ + "sha256:c6f988f4874bb0add23a778f753c65efe992244e148a1d2ec2a8b664fb66bbd1", + "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef" + ], + "type": "layers" + }, + "history": [ + { + "created": "2015-10-31T22:22:54.690851953Z", + "created_by": "/bin/sh -c #(nop) ADD file:a3bc1e842b69636f9df5256c49c5374fb4eef1e281fe3f282c65fb853ee171c5 in /" + }, + { + "created": "2015-10-31T22:22:55.613815829Z", + "created_by": "/bin/sh -c #(nop) CMD [\"sh\"]", + "empty_layer": true + } + ] +} +``` + +Note that image JSON files produced by Docker don't contain formatting +whitespace. It has been added to this example for clarity. + +### Image JSON Field Descriptions + +
    +
    + created string +
    +
    + ISO-8601 formatted combined date and time at which the image was + created. +
    +
    + author string +
    +
    + Gives the name and/or email address of the person or entity which + created and is responsible for maintaining the image. +
    +
    + architecture string +
    +
    + The CPU architecture which the binaries in this image are built to run + on. Possible values include: +
      +
    • 386
    • +
    • amd64
    • +
    • arm
    • +
    + More values may be supported in the future and any of these may or may + not be supported by a given container runtime implementation. +
    +
    + os string +
    +
    + The name of the operating system which the image is built to run on. + Possible values include: +
      +
    • darwin
    • +
    • freebsd
    • +
    • linux
    • +
    + More values may be supported in the future and any of these may or may + not be supported by a given container runtime implementation. +
    +
    + config struct +
    +
    + The execution parameters which should be used as a base when running a + container using the image. This field can be null, in + which case any execution parameters should be specified at creation of + the container. + +

    Container RunConfig Field Descriptions

    + +
    +
    + User string +
    +
    +

    The username or UID which the process in the container should + run as. This acts as a default value to use when the value is + not specified when creating a container.

    + +

    All of the following are valid:

    + +
      +
    • user
    • +
    • uid
    • +
    • user:group
    • +
    • uid:gid
    • +
    • uid:group
    • +
    • user:gid
    • +
    + +

    If group/gid is not specified, the + default group and supplementary groups of the given + user/uid in /etc/passwd + from the container are applied.

    +
    +
    + Memory integer +
    +
    + Memory limit (in bytes). This acts as a default value to use + when the value is not specified when creating a container. +
    +
    + MemorySwap integer +
    +
    + Total memory usage (memory + swap); set to -1 to + disable swap. This acts as a default value to use when the + value is not specified when creating a container. +
    +
    + CpuShares integer +
    +
    + CPU shares (relative weight vs. other containers). This acts as + a default value to use when the value is not specified when + creating a container. +
    +
    + ExposedPorts struct +
    +
    + A set of ports to expose from a container running this image. + This JSON structure value is unusual because it is a direct + JSON serialization of the Go type + map[string]struct{} and is represented in JSON as + an object mapping its keys to an empty object. Here is an + example: + +
    {
    +    "8080": {},
    +    "53/udp": {},
    +    "2356/tcp": {}
    +}
    + + Its keys can be in the format of: +
      +
    • + "port/tcp" +
    • +
    • + "port/udp" +
    • +
    • + "port" +
    • +
    + with the default protocol being "tcp" if not + specified. + + These values act as defaults and are merged with any specified + when creating a container. +
    +
    + Env array of strings +
    +
    + Entries are in the format of VARNAME="var value". + These values act as defaults and are merged with any specified + when creating a container. +
    +
    + Entrypoint array of strings +
    +
    + A list of arguments to use as the command to execute when the + container starts. This value acts as a default and is replaced + by an entrypoint specified when creating a container. +
    +
    + Cmd array of strings +
    +
    + Default arguments to the entry point of the container. These + values act as defaults and are replaced with any specified when + creating a container. If an Entrypoint value is + not specified, then the first entry of the Cmd + array should be interpreted as the executable to run. +
    +
    + Healthcheck struct +
    +
    + A test to perform to determine whether the container is healthy. + Here is an example: +
    {
    +  "Test": [
    +      "CMD-SHELL",
    +      "/usr/bin/check-health localhost"
    +  ],
    +  "Interval": 30000000000,
    +  "Timeout": 10000000000,
    +  "Retries": 3
    +}
    + The object has the following fields. +
    +
    + Test array of strings +
    +
    + The test to perform to check that the container is healthy. + The options are: +
      +
    • [] : inherit healthcheck from base image
    • +
    • ["NONE"] : disable healthcheck
    • +
    • ["CMD", arg1, arg2, ...] : exec arguments directly
    • +
    • ["CMD-SHELL", command] : run command with system's default shell
    • +
    + + The test command should exit with a status of 0 if the container is healthy, + or with 1 if it is unhealthy. +
    +
    + Interval integer +
    +
    + Number of nanoseconds to wait between probe attempts. +
    +
    + Timeout integer +
    +
    + Number of nanoseconds to wait before considering the check to have hung. +
    +
    + Retries integer +
    +
    + The number of consecutive failures needed to consider a container as unhealthy. +
    +
    + + In each case, the field can be omitted to indicate that the + value should be inherited from the base layer. + + These values act as defaults and are merged with any specified + when creating a container. +
    +
    + Volumes struct +
    +
    + A set of directories which should be created as data volumes in + a container running this image. This JSON structure value is + unusual because it is a direct JSON serialization of the Go + type map[string]struct{} and is represented in + JSON as an object mapping its keys to an empty object. Here is + an example: +
    {
    +    "/var/my-app-data/": {},
    +    "/etc/some-config.d/": {},
    +}
    +
    +
    + WorkingDir string +
    +
    + Sets the current working directory of the entry point process + in the container. This value acts as a default and is replaced + by a working directory specified when creating a container. +
    +
    +
    +
    + rootfs struct +
    +
    + The rootfs key references the layer content addresses used by the + image. This makes the image config hash depend on the filesystem hash. + rootfs has two subkeys: + +
      +
    • + type is usually set to layers. +
    • +
    • + diff_ids is an array of layer content hashes (DiffIDs), in order from bottom-most to top-most. +
    • +
    + + + Here is an example rootfs section: + +
    "rootfs": {
    +  "diff_ids": [
    +    "sha256:c6f988f4874bb0add23a778f753c65efe992244e148a1d2ec2a8b664fb66bbd1",
    +    "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef",
    +    "sha256:13f53e08df5a220ab6d13c58b2bf83a59cbdc2e04d0a3f041ddf4b0ba4112d49"
    +  ],
    +  "type": "layers"
    +}
    +
    +
    + history struct +
    +
    + history is an array of objects describing the history of + each layer. The array is ordered from bottom-most layer to top-most + layer. The object has the following fields. + +
      +
    • + created: Creation time, expressed as a ISO-8601 formatted + combined date and time +
    • +
    • + author: The author of the build point +
    • +
    • + created_by: The command which created the layer +
    • +
    • + comment: A custom message set when creating the layer +
    • +
    • + empty_layer: This field is used to mark if the history + item created a filesystem diff. It is set to true if this history + item doesn't correspond to an actual layer in the rootfs section + (for example, a command like ENV which results in no change to the + filesystem). +
    • +
    + +Here is an example history section: + +
    "history": [
    +  {
    +    "created": "2015-10-31T22:22:54.690851953Z",
    +    "created_by": "/bin/sh -c #(nop) ADD file:a3bc1e842b69636f9df5256c49c5374fb4eef1e281fe3f282c65fb853ee171c5 in /"
    +  },
    +  {
    +    "created": "2015-10-31T22:22:55.613815829Z",
    +    "created_by": "/bin/sh -c #(nop) CMD [\"sh\"]",
    +    "empty_layer": true
    +  }
    +]
    +
    +
    + +Any extra fields in the Image JSON struct are considered implementation +specific and should be ignored by any implementations which are unable to +interpret them. + +## Creating an Image Filesystem Changeset + +An example of creating an Image Filesystem Changeset follows. + +An image root filesystem is first created as an empty directory. Here is the +initial empty directory structure for the a changeset using the +randomly-generated directory name `c3167915dc9d` ([actual layer DiffIDs are +generated based on the content](#id_desc)). + +``` +c3167915dc9d/ +``` + +Files and directories are then created: + +``` +c3167915dc9d/ + etc/ + my-app-config + bin/ + my-app-binary + my-app-tools +``` + +The `c3167915dc9d` directory is then committed as a plain Tar archive with +entries for the following files: + +``` +etc/my-app-config +bin/my-app-binary +bin/my-app-tools +``` + +To make changes to the filesystem of this container image, create a new +directory, such as `f60c56784b83`, and initialize it with a snapshot of the +parent image's root filesystem, so that the directory is identical to that +of `c3167915dc9d`. NOTE: a copy-on-write or union filesystem can make this very +efficient: + +``` +f60c56784b83/ + etc/ + my-app-config + bin/ + my-app-binary + my-app-tools +``` + +This example change is going add a configuration directory at `/etc/my-app.d` +which contains a default config file. There's also a change to the +`my-app-tools` binary to handle the config layout change. The `f60c56784b83` +directory then looks like this: + +``` +f60c56784b83/ + etc/ + my-app.d/ + default.cfg + bin/ + my-app-binary + my-app-tools +``` + +This reflects the removal of `/etc/my-app-config` and creation of a file and +directory at `/etc/my-app.d/default.cfg`. `/bin/my-app-tools` has also been +replaced with an updated version. Before committing this directory to a +changeset, because it has a parent image, it is first compared with the +directory tree of the parent snapshot, `f60c56784b83`, looking for files and +directories that have been added, modified, or removed. The following changeset +is found: + +``` +Added: /etc/my-app.d/default.cfg +Modified: /bin/my-app-tools +Deleted: /etc/my-app-config +``` + +A Tar Archive is then created which contains *only* this changeset: The added +and modified files and directories in their entirety, and for each deleted item +an entry for an empty file at the same location but with the basename of the +deleted file or directory prefixed with `.wh.`. The filenames prefixed with +`.wh.` are known as "whiteout" files. NOTE: For this reason, it is not possible +to create an image root filesystem which contains a file or directory with a +name beginning with `.wh.`. The resulting Tar archive for `f60c56784b83` has +the following entries: + +``` +/etc/my-app.d/default.cfg +/bin/my-app-tools +/etc/.wh.my-app-config +``` + +Any given image is likely to be composed of several of these Image Filesystem +Changeset tar archives. + +## Combined Image JSON + Filesystem Changeset Format + +There is also a format for a single archive which contains complete information +about an image, including: + + - repository names/tags + - image configuration JSON file + - all tar archives of each layer filesystem changesets + +For example, here's what the full archive of `library/busybox` is (displayed in +`tree` format): + +``` +. +├── 47bcc53f74dc94b1920f0b34f6036096526296767650f223433fe65c35f149eb.json +├── 5f29f704785248ddb9d06b90a11b5ea36c534865e9035e4022bb2e71d4ecbb9a +│   ├── VERSION +│   ├── json +│   └── layer.tar +├── a65da33792c5187473faa80fa3e1b975acba06712852d1dea860692ccddf3198 +│   ├── VERSION +│   ├── json +│   └── layer.tar +├── manifest.json +└── repositories +``` + +There is a directory for each layer in the image. Each directory is named with +a 64 character hex name that is deterministically generated from the layer +information. These names are not necessarily layer DiffIDs or ChainIDs. Each of +these directories contains 3 files: + + * `VERSION` - The schema version of the `json` file + * `json` - The legacy JSON metadata for an image layer. In this version of + the image specification, layers don't have JSON metadata, but in + [version 1](v1.md), they did. A file is created for each layer in the + v1 format for backward compatibility. + * `layer.tar` - The Tar archive of the filesystem changeset for an image + layer. + +Note that this directory layout is only important for backward compatibility. +Current implementations use the paths specified in `manifest.json`. + +The content of the `VERSION` files is simply the semantic version of the JSON +metadata schema: + +``` +1.0 +``` + +The `repositories` file is another JSON file which describes names/tags: + +``` +{ + "busybox":{ + "latest":"5f29f704785248ddb9d06b90a11b5ea36c534865e9035e4022bb2e71d4ecbb9a" + } +} +``` + +Every key in this object is the name of a repository, and maps to a collection +of tag suffixes. Each tag maps to the ID of the image represented by that tag. +This file is only used for backwards compatibility. Current implementations use +the `manifest.json` file instead. + +The `manifest.json` file provides the image JSON for the top-level image, and +optionally for parent images that this image was derived from. It consists of +an array of metadata entries: + +``` +[ + { + "Config": "47bcc53f74dc94b1920f0b34f6036096526296767650f223433fe65c35f149eb.json", + "RepoTags": ["busybox:latest"], + "Layers": [ + "a65da33792c5187473faa80fa3e1b975acba06712852d1dea860692ccddf3198/layer.tar", + "5f29f704785248ddb9d06b90a11b5ea36c534865e9035e4022bb2e71d4ecbb9a/layer.tar" + ] + } +] +``` + +There is an entry in the array for each image. + +The `Config` field references another file in the tar which includes the image +JSON for this image. + +The `RepoTags` field lists references pointing to this image. + +The `Layers` field points to the filesystem changeset tars. + +An optional `Parent` field references the imageID of the parent image. This +parent must be part of the same `manifest.json` file. + +This file shouldn't be confused with the distribution manifest, used to push +and pull images. + +Generally, implementations that support this version of the spec will use +the `manifest.json` file if available, and older implementations will use the +legacy `*/json` files and `repositories`. diff --git a/vendor/github.com/moby/moby/image/spec/v1.md b/vendor/github.com/moby/moby/image/spec/v1.md new file mode 100644 index 000000000..fce3a06e3 --- /dev/null +++ b/vendor/github.com/moby/moby/image/spec/v1.md @@ -0,0 +1,573 @@ +# Docker Image Specification v1.0.0 + +An *Image* is an ordered collection of root filesystem changes and the +corresponding execution parameters for use within a container runtime. This +specification outlines the format of these filesystem changes and corresponding +parameters and describes how to create and use them for use with a container +runtime and execution tool. + +## Terminology + +This specification uses the following terms: + +
    +
    + Layer +
    +
    + Images are composed of layers. Image layer is a general + term which may be used to refer to one or both of the following: + +
      +
    1. The metadata for the layer, described in the JSON format.
    2. +
    3. The filesystem changes described by a layer.
    4. +
    + + To refer to the former you may use the term Layer JSON or + Layer Metadata. To refer to the latter you may use the term + Image Filesystem Changeset or Image Diff. +
    +
    + Image JSON +
    +
    + Each layer has an associated JSON structure which describes some + basic information about the image such as date created, author, and the + ID of its parent image as well as execution/runtime configuration like + its entry point, default arguments, CPU/memory shares, networking, and + volumes. +
    +
    + Image Filesystem Changeset +
    +
    + Each layer has an archive of the files which have been added, changed, + or deleted relative to its parent layer. Using a layer-based or union + filesystem such as AUFS, or by computing the diff from filesystem + snapshots, the filesystem changeset can be used to present a series of + image layers as if they were one cohesive filesystem. +
    +
    + Image ID +
    +
    + Each layer is given an ID upon its creation. It is + represented as a hexadecimal encoding of 256 bits, e.g., + a9561eb1b190625c9adb5a9513e72c4dedafc1cb2d4c5236c9a6957ec7dfd5a9. + Image IDs should be sufficiently random so as to be globally unique. + 32 bytes read from /dev/urandom is sufficient for all + practical purposes. Alternatively, an image ID may be derived as a + cryptographic hash of image contents as the result is considered + indistinguishable from random. The choice is left up to implementors. +
    +
    + Image Parent +
    +
    + Most layer metadata structs contain a parent field which + refers to the Image from which another directly descends. An image + contains a separate JSON metadata file and set of changes relative to + the filesystem of its parent image. Image Ancestor and + Image Descendant are also common terms. +
    +
    + Image Checksum +
    +
    + Layer metadata structs contain a cryptographic hash of the contents of + the layer's filesystem changeset. Though the set of changes exists as a + simple Tar archive, two archives with identical filenames and content + will have different SHA digests if the last-access or last-modified + times of any entries differ. For this reason, image checksums are + generated using the TarSum algorithm which produces a cryptographic + hash of file contents and selected headers only. Details of this + algorithm are described in the separate TarSum specification. +
    +
    + Tag +
    +
    + A tag serves to map a descriptive, user-given name to any single image + ID. An image name suffix (the name component after :) is + often referred to as a tag as well, though it strictly refers to the + full name of an image. Acceptable values for a tag suffix are + implementation specific, but they SHOULD be limited to the set of + alphanumeric characters [a-zA-Z0-9], punctuation + characters [._-], and MUST NOT contain a : + character. +
    +
    + Repository +
    +
    + A collection of tags grouped under a common prefix (the name component + before :). For example, in an image tagged with the name + my-app:3.1.4, my-app is the Repository + component of the name. Acceptable values for repository name are + implementation specific, but they SHOULD be limited to the set of + alphanumeric characters [a-zA-Z0-9], and punctuation + characters [._-], however it MAY contain additional + / and : characters for organizational + purposes, with the last : character being interpreted + dividing the repository component of the name from the tag suffix + component. +
    +
    + +## Image JSON Description + +Here is an example image JSON file: + +``` +{ + "id": "a9561eb1b190625c9adb5a9513e72c4dedafc1cb2d4c5236c9a6957ec7dfd5a9", + "parent": "c6e3cedcda2e3982a1a6760e178355e8e65f7b80e4e5248743fa3549d284e024", + "checksum": "tarsum.v1+sha256:e58fcf7418d2390dec8e8fb69d88c06ec07039d651fedc3aa72af9972e7d046b", + "created": "2014-10-13T21:19:18.674353812Z", + "author": "Alyssa P. Hacker <alyspdev@example.com>", + "architecture": "amd64", + "os": "linux", + "Size": 271828, + "config": { + "User": "alice", + "Memory": 2048, + "MemorySwap": 4096, + "CpuShares": 8, + "ExposedPorts": { + "8080/tcp": {} + }, + "Env": [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", + "FOO=docker_is_a_really", + "BAR=great_tool_you_know" + ], + "Entrypoint": [ + "/bin/my-app-binary" + ], + "Cmd": [ + "--foreground", + "--config", + "/etc/my-app.d/default.cfg" + ], + "Volumes": { + "/var/job-result-data": {}, + "/var/log/my-app-logs": {}, + }, + "WorkingDir": "/home/alice", + } +} +``` + +### Image JSON Field Descriptions + +
    +
    + id string +
    +
    + Randomly generated, 256-bit, hexadecimal encoded. Uniquely identifies + the image. +
    +
    + parent string +
    +
    + ID of the parent image. If there is no parent image then this field + should be omitted. A collection of images may share many of the same + ancestor layers. This organizational structure is strictly a tree with + any one layer having either no parent or a single parent and zero or + more descendant layers. Cycles are not allowed and implementations + should be careful to avoid creating them or iterating through a cycle + indefinitely. +
    +
    + created string +
    +
    + ISO-8601 formatted combined date and time at which the image was + created. +
    +
    + author string +
    +
    + Gives the name and/or email address of the person or entity which + created and is responsible for maintaining the image. +
    +
    + architecture string +
    +
    + The CPU architecture which the binaries in this image are built to run + on. Possible values include: +
      +
    • 386
    • +
    • amd64
    • +
    • arm
    • +
    + More values may be supported in the future and any of these may or may + not be supported by a given container runtime implementation. +
    +
    + os string +
    +
    + The name of the operating system which the image is built to run on. + Possible values include: +
      +
    • darwin
    • +
    • freebsd
    • +
    • linux
    • +
    + More values may be supported in the future and any of these may or may + not be supported by a given container runtime implementation. +
    +
    + checksum string +
    +
    + Image Checksum of the filesystem changeset associated with the image + layer. +
    +
    + Size integer +
    +
    + The size in bytes of the filesystem changeset associated with the image + layer. +
    +
    + config struct +
    +
    + The execution parameters which should be used as a base when running a + container using the image. This field can be null, in + which case any execution parameters should be specified at creation of + the container. + +

    Container RunConfig Field Descriptions

    + +
    +
    + User string +
    +
    +

    The username or UID which the process in the container should + run as. This acts as a default value to use when the value is + not specified when creating a container.

    + +

    All of the following are valid:

    + +
      +
    • user
    • +
    • uid
    • +
    • user:group
    • +
    • uid:gid
    • +
    • uid:group
    • +
    • user:gid
    • +
    + +

    If group/gid is not specified, the + default group and supplementary groups of the given + user/uid in /etc/passwd + from the container are applied.

    +
    +
    + Memory integer +
    +
    + Memory limit (in bytes). This acts as a default value to use + when the value is not specified when creating a container. +
    +
    + MemorySwap integer +
    +
    + Total memory usage (memory + swap); set to -1 to + disable swap. This acts as a default value to use when the + value is not specified when creating a container. +
    +
    + CpuShares integer +
    +
    + CPU shares (relative weight vs. other containers). This acts as + a default value to use when the value is not specified when + creating a container. +
    +
    + ExposedPorts struct +
    +
    + A set of ports to expose from a container running this image. + This JSON structure value is unusual because it is a direct + JSON serialization of the Go type + map[string]struct{} and is represented in JSON as + an object mapping its keys to an empty object. Here is an + example: + +
    {
    +    "8080": {},
    +    "53/udp": {},
    +    "2356/tcp": {}
    +}
    + + Its keys can be in the format of: +
      +
    • + "port/tcp" +
    • +
    • + "port/udp" +
    • +
    • + "port" +
    • +
    + with the default protocol being "tcp" if not + specified. + + These values act as defaults and are merged with any specified + when creating a container. +
    +
    + Env array of strings +
    +
    + Entries are in the format of VARNAME="var value". + These values act as defaults and are merged with any specified + when creating a container. +
    +
    + Entrypoint array of strings +
    +
    + A list of arguments to use as the command to execute when the + container starts. This value acts as a default and is replaced + by an entrypoint specified when creating a container. +
    +
    + Cmd array of strings +
    +
    + Default arguments to the entry point of the container. These + values act as defaults and are replaced with any specified when + creating a container. If an Entrypoint value is + not specified, then the first entry of the Cmd + array should be interpreted as the executable to run. +
    +
    + Volumes struct +
    +
    + A set of directories which should be created as data volumes in + a container running this image. This JSON structure value is + unusual because it is a direct JSON serialization of the Go + type map[string]struct{} and is represented in + JSON as an object mapping its keys to an empty object. Here is + an example: +
    {
    +    "/var/my-app-data/": {},
    +    "/etc/some-config.d/": {},
    +}
    +
    +
    + WorkingDir string +
    +
    + Sets the current working directory of the entry point process + in the container. This value acts as a default and is replaced + by a working directory specified when creating a container. +
    +
    +
    +
    + +Any extra fields in the Image JSON struct are considered implementation +specific and should be ignored by any implementations which are unable to +interpret them. + +## Creating an Image Filesystem Changeset + +An example of creating an Image Filesystem Changeset follows. + +An image root filesystem is first created as an empty directory named with the +ID of the image being created. Here is the initial empty directory structure +for the changeset for an image with ID `c3167915dc9d` ([real IDs are much +longer](#id_desc), but this example use a truncated one here for brevity. +Implementations need not name the rootfs directory in this way but it may be +convenient for keeping record of a large number of image layers.): + +``` +c3167915dc9d/ +``` + +Files and directories are then created: + +``` +c3167915dc9d/ + etc/ + my-app-config + bin/ + my-app-binary + my-app-tools +``` + +The `c3167915dc9d` directory is then committed as a plain Tar archive with +entries for the following files: + +``` +etc/my-app-config +bin/my-app-binary +bin/my-app-tools +``` + +The TarSum checksum for the archive file is then computed and placed in the +JSON metadata along with the execution parameters. + +To make changes to the filesystem of this container image, create a new +directory named with a new ID, such as `f60c56784b83`, and initialize it with +a snapshot of the parent image's root filesystem, so that the directory is +identical to that of `c3167915dc9d`. NOTE: a copy-on-write or union filesystem +can make this very efficient: + +``` +f60c56784b83/ + etc/ + my-app-config + bin/ + my-app-binary + my-app-tools +``` + +This example change is going to add a configuration directory at `/etc/my-app.d` +which contains a default config file. There's also a change to the +`my-app-tools` binary to handle the config layout change. The `f60c56784b83` +directory then looks like this: + +``` +f60c56784b83/ + etc/ + my-app.d/ + default.cfg + bin/ + my-app-binary + my-app-tools +``` + +This reflects the removal of `/etc/my-app-config` and creation of a file and +directory at `/etc/my-app.d/default.cfg`. `/bin/my-app-tools` has also been +replaced with an updated version. Before committing this directory to a +changeset, because it has a parent image, it is first compared with the +directory tree of the parent snapshot, `f60c56784b83`, looking for files and +directories that have been added, modified, or removed. The following changeset +is found: + +``` +Added: /etc/my-app.d/default.cfg +Modified: /bin/my-app-tools +Deleted: /etc/my-app-config +``` + +A Tar Archive is then created which contains *only* this changeset: The added +and modified files and directories in their entirety, and for each deleted item +an entry for an empty file at the same location but with the basename of the +deleted file or directory prefixed with `.wh.`. The filenames prefixed with +`.wh.` are known as "whiteout" files. NOTE: For this reason, it is not possible +to create an image root filesystem which contains a file or directory with a +name beginning with `.wh.`. The resulting Tar archive for `f60c56784b83` has +the following entries: + +``` +/etc/my-app.d/default.cfg +/bin/my-app-tools +/etc/.wh.my-app-config +``` + +Any given image is likely to be composed of several of these Image Filesystem +Changeset tar archives. + +## Combined Image JSON + Filesystem Changeset Format + +There is also a format for a single archive which contains complete information +about an image, including: + + - repository names/tags + - all image layer JSON files + - all tar archives of each layer filesystem changesets + +For example, here's what the full archive of `library/busybox` is (displayed in +`tree` format): + +``` +. +├── 5785b62b697b99a5af6cd5d0aabc804d5748abbb6d3d07da5d1d3795f2dcc83e +│   ├── VERSION +│   ├── json +│   └── layer.tar +├── a7b8b41220991bfc754d7ad445ad27b7f272ab8b4a2c175b9512b97471d02a8a +│   ├── VERSION +│   ├── json +│   └── layer.tar +├── a936027c5ca8bf8f517923169a233e391cbb38469a75de8383b5228dc2d26ceb +│   ├── VERSION +│   ├── json +│   └── layer.tar +├── f60c56784b832dd990022afc120b8136ab3da9528094752ae13fe63a2d28dc8c +│   ├── VERSION +│   ├── json +│   └── layer.tar +└── repositories +``` + +There are one or more directories named with the ID for each layer in a full +image. Each of these directories contains 3 files: + + * `VERSION` - The schema version of the `json` file + * `json` - The JSON metadata for an image layer + * `layer.tar` - The Tar archive of the filesystem changeset for an image + layer. + +The content of the `VERSION` files is simply the semantic version of the JSON +metadata schema: + +``` +1.0 +``` + +And the `repositories` file is another JSON file which describes names/tags: + +``` +{ + "busybox":{ + "latest":"5785b62b697b99a5af6cd5d0aabc804d5748abbb6d3d07da5d1d3795f2dcc83e" + } +} +``` + +Every key in this object is the name of a repository, and maps to a collection +of tag suffixes. Each tag maps to the ID of the image represented by that tag. + +## Loading an Image Filesystem Changeset + +Unpacking a bundle of image layer JSON files and their corresponding filesystem +changesets can be done using a series of steps: + +1. Follow the parent IDs of image layers to find the root ancestor (an image +with no parent ID specified). +2. For every image layer, in order from root ancestor and descending down, +extract the contents of that layer's filesystem changeset archive into a +directory which will be used as the root of a container filesystem. + + - Extract all contents of each archive. + - Walk the directory tree once more, removing any files with the prefix + `.wh.` and the corresponding file or directory named without this prefix. + + +## Implementations + +This specification is an admittedly imperfect description of an +imperfectly-understood problem. The Docker project is, in turn, an attempt to +implement this specification. Our goal and our execution toward it will evolve +over time, but our primary concern in this specification and in our +implementation is compatibility and interoperability. diff --git a/vendor/github.com/moby/moby/image/store.go b/vendor/github.com/moby/moby/image/store.go new file mode 100644 index 000000000..c85f8d683 --- /dev/null +++ b/vendor/github.com/moby/moby/image/store.go @@ -0,0 +1,324 @@ +package image + +import ( + "encoding/json" + "fmt" + "strings" + "sync" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/digestset" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/system" + "github.com/opencontainers/go-digest" + "github.com/pkg/errors" +) + +// Store is an interface for creating and accessing images +type Store interface { + Create(config []byte) (ID, error) + Get(id ID) (*Image, error) + Delete(id ID) ([]layer.Metadata, error) + Search(partialID string) (ID, error) + SetParent(id ID, parent ID) error + GetParent(id ID) (ID, error) + SetLastUpdated(id ID) error + GetLastUpdated(id ID) (time.Time, error) + Children(id ID) []ID + Map() map[ID]*Image + Heads() map[ID]*Image +} + +// LayerGetReleaser is a minimal interface for getting and releasing images. +type LayerGetReleaser interface { + Get(layer.ChainID) (layer.Layer, error) + Release(layer.Layer) ([]layer.Metadata, error) +} + +type imageMeta struct { + layer layer.Layer + children map[ID]struct{} +} + +type store struct { + sync.RWMutex + ls LayerGetReleaser + images map[ID]*imageMeta + fs StoreBackend + digestSet *digestset.Set + platform string +} + +// NewImageStore returns new store object for given layer store +func NewImageStore(fs StoreBackend, platform string, ls LayerGetReleaser) (Store, error) { + is := &store{ + ls: ls, + images: make(map[ID]*imageMeta), + fs: fs, + digestSet: digestset.NewSet(), + platform: platform, + } + + // load all current images and retain layers + if err := is.restore(); err != nil { + return nil, err + } + + return is, nil +} + +func (is *store) restore() error { + err := is.fs.Walk(func(dgst digest.Digest) error { + img, err := is.Get(IDFromDigest(dgst)) + if err != nil { + logrus.Errorf("invalid image %v, %v", dgst, err) + return nil + } + var l layer.Layer + if chainID := img.RootFS.ChainID(); chainID != "" { + l, err = is.ls.Get(chainID) + if err != nil { + return err + } + } + if err := is.digestSet.Add(dgst); err != nil { + return err + } + + imageMeta := &imageMeta{ + layer: l, + children: make(map[ID]struct{}), + } + + is.images[IDFromDigest(dgst)] = imageMeta + + return nil + }) + if err != nil { + return err + } + + // Second pass to fill in children maps + for id := range is.images { + if parent, err := is.GetParent(id); err == nil { + if parentMeta := is.images[parent]; parentMeta != nil { + parentMeta.children[id] = struct{}{} + } + } + } + + return nil +} + +func (is *store) Create(config []byte) (ID, error) { + var img Image + err := json.Unmarshal(config, &img) + if err != nil { + return "", err + } + + // TODO @jhowardmsft - LCOW Support. This will need revisiting. + // Integrity check - ensure we are creating something for the correct platform + if system.LCOWSupported() { + if strings.ToLower(img.Platform()) != strings.ToLower(is.platform) { + return "", fmt.Errorf("cannot create entry for platform %q in image store for platform %q", img.Platform(), is.platform) + } + } + + // Must reject any config that references diffIDs from the history + // which aren't among the rootfs layers. + rootFSLayers := make(map[layer.DiffID]struct{}) + for _, diffID := range img.RootFS.DiffIDs { + rootFSLayers[diffID] = struct{}{} + } + + layerCounter := 0 + for _, h := range img.History { + if !h.EmptyLayer { + layerCounter++ + } + } + if layerCounter > len(img.RootFS.DiffIDs) { + return "", errors.New("too many non-empty layers in History section") + } + + dgst, err := is.fs.Set(config) + if err != nil { + return "", err + } + imageID := IDFromDigest(dgst) + + is.Lock() + defer is.Unlock() + + if _, exists := is.images[imageID]; exists { + return imageID, nil + } + + layerID := img.RootFS.ChainID() + + var l layer.Layer + if layerID != "" { + l, err = is.ls.Get(layerID) + if err != nil { + return "", errors.Wrapf(err, "failed to get layer %s", layerID) + } + } + + imageMeta := &imageMeta{ + layer: l, + children: make(map[ID]struct{}), + } + + is.images[imageID] = imageMeta + if err := is.digestSet.Add(imageID.Digest()); err != nil { + delete(is.images, imageID) + return "", err + } + + return imageID, nil +} + +func (is *store) Search(term string) (ID, error) { + dgst, err := is.digestSet.Lookup(term) + if err != nil { + if err == digestset.ErrDigestNotFound { + err = fmt.Errorf("No such image: %s", term) + } + return "", err + } + return IDFromDigest(dgst), nil +} + +func (is *store) Get(id ID) (*Image, error) { + // todo: Check if image is in images + // todo: Detect manual insertions and start using them + config, err := is.fs.Get(id.Digest()) + if err != nil { + return nil, err + } + + img, err := NewFromJSON(config) + if err != nil { + return nil, err + } + img.computedID = id + + img.Parent, err = is.GetParent(id) + if err != nil { + img.Parent = "" + } + + return img, nil +} + +func (is *store) Delete(id ID) ([]layer.Metadata, error) { + is.Lock() + defer is.Unlock() + + imageMeta := is.images[id] + if imageMeta == nil { + return nil, fmt.Errorf("unrecognized image ID %s", id.String()) + } + for id := range imageMeta.children { + is.fs.DeleteMetadata(id.Digest(), "parent") + } + if parent, err := is.GetParent(id); err == nil && is.images[parent] != nil { + delete(is.images[parent].children, id) + } + + if err := is.digestSet.Remove(id.Digest()); err != nil { + logrus.Errorf("error removing %s from digest set: %q", id, err) + } + delete(is.images, id) + is.fs.Delete(id.Digest()) + + if imageMeta.layer != nil { + return is.ls.Release(imageMeta.layer) + } + return nil, nil +} + +func (is *store) SetParent(id, parent ID) error { + is.Lock() + defer is.Unlock() + parentMeta := is.images[parent] + if parentMeta == nil { + return fmt.Errorf("unknown parent image ID %s", parent.String()) + } + if parent, err := is.GetParent(id); err == nil && is.images[parent] != nil { + delete(is.images[parent].children, id) + } + parentMeta.children[id] = struct{}{} + return is.fs.SetMetadata(id.Digest(), "parent", []byte(parent)) +} + +func (is *store) GetParent(id ID) (ID, error) { + d, err := is.fs.GetMetadata(id.Digest(), "parent") + if err != nil { + return "", err + } + return ID(d), nil // todo: validate? +} + +// SetLastUpdated time for the image ID to the current time +func (is *store) SetLastUpdated(id ID) error { + lastUpdated := []byte(time.Now().Format(time.RFC3339Nano)) + return is.fs.SetMetadata(id.Digest(), "lastUpdated", lastUpdated) +} + +// GetLastUpdated time for the image ID +func (is *store) GetLastUpdated(id ID) (time.Time, error) { + bytes, err := is.fs.GetMetadata(id.Digest(), "lastUpdated") + if err != nil || len(bytes) == 0 { + // No lastUpdated time + return time.Time{}, nil + } + return time.Parse(time.RFC3339Nano, string(bytes)) +} + +func (is *store) Children(id ID) []ID { + is.RLock() + defer is.RUnlock() + + return is.children(id) +} + +func (is *store) children(id ID) []ID { + var ids []ID + if is.images[id] != nil { + for id := range is.images[id].children { + ids = append(ids, id) + } + } + return ids +} + +func (is *store) Heads() map[ID]*Image { + return is.imagesMap(false) +} + +func (is *store) Map() map[ID]*Image { + return is.imagesMap(true) +} + +func (is *store) imagesMap(all bool) map[ID]*Image { + is.RLock() + defer is.RUnlock() + + images := make(map[ID]*Image) + + for id := range is.images { + if !all && len(is.children(id)) > 0 { + continue + } + img, err := is.Get(id) + if err != nil { + logrus.Errorf("invalid image access: %q, error: %q", id, err) + continue + } + images[id] = img + } + return images +} diff --git a/vendor/github.com/moby/moby/image/store_test.go b/vendor/github.com/moby/moby/image/store_test.go new file mode 100644 index 000000000..fc6d461d9 --- /dev/null +++ b/vendor/github.com/moby/moby/image/store_test.go @@ -0,0 +1,178 @@ +package image + +import ( + "runtime" + "testing" + + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/testutil" + "github.com/opencontainers/go-digest" + "github.com/stretchr/testify/assert" +) + +func TestRestore(t *testing.T) { + fs, cleanup := defaultFSStoreBackend(t) + defer cleanup() + + id1, err := fs.Set([]byte(`{"comment": "abc", "rootfs": {"type": "layers"}}`)) + assert.NoError(t, err) + + _, err = fs.Set([]byte(`invalid`)) + assert.NoError(t, err) + + id2, err := fs.Set([]byte(`{"comment": "def", "rootfs": {"type": "layers", "diff_ids": ["2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae"]}}`)) + assert.NoError(t, err) + + err = fs.SetMetadata(id2, "parent", []byte(id1)) + assert.NoError(t, err) + + is, err := NewImageStore(fs, runtime.GOOS, &mockLayerGetReleaser{}) + assert.NoError(t, err) + + assert.Len(t, is.Map(), 2) + + img1, err := is.Get(ID(id1)) + assert.NoError(t, err) + assert.Equal(t, ID(id1), img1.computedID) + assert.Equal(t, string(id1), img1.computedID.String()) + + img2, err := is.Get(ID(id2)) + assert.NoError(t, err) + assert.Equal(t, "abc", img1.Comment) + assert.Equal(t, "def", img2.Comment) + + p, err := is.GetParent(ID(id1)) + testutil.ErrorContains(t, err, "failed to read metadata") + + p, err = is.GetParent(ID(id2)) + assert.NoError(t, err) + assert.Equal(t, ID(id1), p) + + children := is.Children(ID(id1)) + assert.Len(t, children, 1) + assert.Equal(t, ID(id2), children[0]) + assert.Len(t, is.Heads(), 1) + + sid1, err := is.Search(string(id1)[:10]) + assert.NoError(t, err) + assert.Equal(t, ID(id1), sid1) + + sid1, err = is.Search(digest.Digest(id1).Hex()[:6]) + assert.NoError(t, err) + assert.Equal(t, ID(id1), sid1) + + invalidPattern := digest.Digest(id1).Hex()[1:6] + _, err = is.Search(invalidPattern) + testutil.ErrorContains(t, err, "No such image") +} + +func TestAddDelete(t *testing.T) { + is, cleanup := defaultImageStore(t) + defer cleanup() + + id1, err := is.Create([]byte(`{"comment": "abc", "rootfs": {"type": "layers", "diff_ids": ["2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae"]}}`)) + assert.NoError(t, err) + assert.Equal(t, ID("sha256:8d25a9c45df515f9d0fe8e4a6b1c64dd3b965a84790ddbcc7954bb9bc89eb993"), id1) + + img, err := is.Get(id1) + assert.NoError(t, err) + assert.Equal(t, "abc", img.Comment) + + id2, err := is.Create([]byte(`{"comment": "def", "rootfs": {"type": "layers", "diff_ids": ["2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae"]}}`)) + assert.NoError(t, err) + + err = is.SetParent(id2, id1) + assert.NoError(t, err) + + pid1, err := is.GetParent(id2) + assert.NoError(t, err) + assert.Equal(t, pid1, id1) + + _, err = is.Delete(id1) + assert.NoError(t, err) + + _, err = is.Get(id1) + testutil.ErrorContains(t, err, "failed to get digest") + + _, err = is.Get(id2) + assert.NoError(t, err) + + _, err = is.GetParent(id2) + testutil.ErrorContains(t, err, "failed to read metadata") +} + +func TestSearchAfterDelete(t *testing.T) { + is, cleanup := defaultImageStore(t) + defer cleanup() + + id, err := is.Create([]byte(`{"comment": "abc", "rootfs": {"type": "layers"}}`)) + assert.NoError(t, err) + + id1, err := is.Search(string(id)[:15]) + assert.NoError(t, err) + assert.Equal(t, id1, id) + + _, err = is.Delete(id) + assert.NoError(t, err) + + _, err = is.Search(string(id)[:15]) + testutil.ErrorContains(t, err, "No such image") +} + +func TestParentReset(t *testing.T) { + is, cleanup := defaultImageStore(t) + defer cleanup() + + id, err := is.Create([]byte(`{"comment": "abc1", "rootfs": {"type": "layers"}}`)) + assert.NoError(t, err) + + id2, err := is.Create([]byte(`{"comment": "abc2", "rootfs": {"type": "layers"}}`)) + assert.NoError(t, err) + + id3, err := is.Create([]byte(`{"comment": "abc3", "rootfs": {"type": "layers"}}`)) + assert.NoError(t, err) + + assert.NoError(t, is.SetParent(id, id2)) + assert.Len(t, is.Children(id2), 1) + + assert.NoError(t, is.SetParent(id, id3)) + assert.Len(t, is.Children(id2), 0) + assert.Len(t, is.Children(id3), 1) +} + +func defaultImageStore(t *testing.T) (Store, func()) { + fsBackend, cleanup := defaultFSStoreBackend(t) + + store, err := NewImageStore(fsBackend, runtime.GOOS, &mockLayerGetReleaser{}) + assert.NoError(t, err) + + return store, cleanup +} + +func TestGetAndSetLastUpdated(t *testing.T) { + store, cleanup := defaultImageStore(t) + defer cleanup() + + id, err := store.Create([]byte(`{"comment": "abc1", "rootfs": {"type": "layers"}}`)) + assert.NoError(t, err) + + updated, err := store.GetLastUpdated(id) + assert.NoError(t, err) + assert.Equal(t, updated.IsZero(), true) + + assert.NoError(t, store.SetLastUpdated(id)) + + updated, err = store.GetLastUpdated(id) + assert.NoError(t, err) + assert.Equal(t, updated.IsZero(), false) +} + +type mockLayerGetReleaser struct{} + +func (ls *mockLayerGetReleaser) Get(layer.ChainID) (layer.Layer, error) { + return nil, nil +} + +func (ls *mockLayerGetReleaser) Release(layer.Layer) ([]layer.Metadata, error) { + return nil, nil +} diff --git a/vendor/github.com/moby/moby/image/tarexport/load.go b/vendor/github.com/moby/moby/image/tarexport/load.go new file mode 100644 index 000000000..af8cefc6a --- /dev/null +++ b/vendor/github.com/moby/moby/image/tarexport/load.go @@ -0,0 +1,431 @@ +package tarexport + +import ( + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "reflect" + "runtime" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution" + "github.com/docker/distribution/reference" + "github.com/docker/docker/image" + "github.com/docker/docker/image/v1" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/chrootarchive" + "github.com/docker/docker/pkg/progress" + "github.com/docker/docker/pkg/streamformatter" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/pkg/symlink" + "github.com/docker/docker/pkg/system" + "github.com/opencontainers/go-digest" +) + +func (l *tarexporter) Load(inTar io.ReadCloser, outStream io.Writer, quiet bool) error { + var progressOutput progress.Output + if !quiet { + progressOutput = streamformatter.NewJSONProgressOutput(outStream, false) + } + outStream = streamformatter.NewStdoutWriter(outStream) + + tmpDir, err := ioutil.TempDir("", "docker-import-") + if err != nil { + return err + } + defer os.RemoveAll(tmpDir) + + if err := chrootarchive.Untar(inTar, tmpDir, nil); err != nil { + return err + } + // read manifest, if no file then load in legacy mode + manifestPath, err := safePath(tmpDir, manifestFileName) + if err != nil { + return err + } + manifestFile, err := os.Open(manifestPath) + if err != nil { + if os.IsNotExist(err) { + return l.legacyLoad(tmpDir, outStream, progressOutput) + } + return err + } + defer manifestFile.Close() + + var manifest []manifestItem + if err := json.NewDecoder(manifestFile).Decode(&manifest); err != nil { + return err + } + + var parentLinks []parentLink + var imageIDsStr string + var imageRefCount int + + for _, m := range manifest { + configPath, err := safePath(tmpDir, m.Config) + if err != nil { + return err + } + config, err := ioutil.ReadFile(configPath) + if err != nil { + return err + } + img, err := image.NewFromJSON(config) + if err != nil { + return err + } + if err := checkCompatibleOS(img.OS); err != nil { + return err + } + var rootFS image.RootFS + rootFS = *img.RootFS + rootFS.DiffIDs = nil + + if expected, actual := len(m.Layers), len(img.RootFS.DiffIDs); expected != actual { + return fmt.Errorf("invalid manifest, layers length mismatch: expected %d, got %d", expected, actual) + } + + // On Windows, validate the platform, defaulting to windows if not present. + platform := layer.Platform(img.OS) + if runtime.GOOS == "windows" { + if platform == "" { + platform = "windows" + } + if (platform != "windows") && (platform != "linux") { + return fmt.Errorf("configuration for this image has an unsupported platform: %s", platform) + } + } + + for i, diffID := range img.RootFS.DiffIDs { + layerPath, err := safePath(tmpDir, m.Layers[i]) + if err != nil { + return err + } + r := rootFS + r.Append(diffID) + newLayer, err := l.ls.Get(r.ChainID()) + if err != nil { + newLayer, err = l.loadLayer(layerPath, rootFS, diffID.String(), platform, m.LayerSources[diffID], progressOutput) + if err != nil { + return err + } + } + defer layer.ReleaseAndLog(l.ls, newLayer) + if expected, actual := diffID, newLayer.DiffID(); expected != actual { + return fmt.Errorf("invalid diffID for layer %d: expected %q, got %q", i, expected, actual) + } + rootFS.Append(diffID) + } + + imgID, err := l.is.Create(config) + if err != nil { + return err + } + imageIDsStr += fmt.Sprintf("Loaded image ID: %s\n", imgID) + + imageRefCount = 0 + for _, repoTag := range m.RepoTags { + named, err := reference.ParseNormalizedNamed(repoTag) + if err != nil { + return err + } + ref, ok := named.(reference.NamedTagged) + if !ok { + return fmt.Errorf("invalid tag %q", repoTag) + } + l.setLoadedTag(ref, imgID.Digest(), outStream) + outStream.Write([]byte(fmt.Sprintf("Loaded image: %s\n", reference.FamiliarString(ref)))) + imageRefCount++ + } + + parentLinks = append(parentLinks, parentLink{imgID, m.Parent}) + l.loggerImgEvent.LogImageEvent(imgID.String(), imgID.String(), "load") + } + + for _, p := range validatedParentLinks(parentLinks) { + if p.parentID != "" { + if err := l.setParentID(p.id, p.parentID); err != nil { + return err + } + } + } + + if imageRefCount == 0 { + outStream.Write([]byte(imageIDsStr)) + } + + return nil +} + +func (l *tarexporter) setParentID(id, parentID image.ID) error { + img, err := l.is.Get(id) + if err != nil { + return err + } + parent, err := l.is.Get(parentID) + if err != nil { + return err + } + if !checkValidParent(img, parent) { + return fmt.Errorf("image %v is not a valid parent for %v", parent.ID(), img.ID()) + } + return l.is.SetParent(id, parentID) +} + +func (l *tarexporter) loadLayer(filename string, rootFS image.RootFS, id string, platform layer.Platform, foreignSrc distribution.Descriptor, progressOutput progress.Output) (layer.Layer, error) { + // We use system.OpenSequential to use sequential file access on Windows, avoiding + // depleting the standby list. On Linux, this equates to a regular os.Open. + rawTar, err := system.OpenSequential(filename) + if err != nil { + logrus.Debugf("Error reading embedded tar: %v", err) + return nil, err + } + defer rawTar.Close() + + var r io.Reader + if progressOutput != nil { + fileInfo, err := rawTar.Stat() + if err != nil { + logrus.Debugf("Error statting file: %v", err) + return nil, err + } + + r = progress.NewProgressReader(rawTar, progressOutput, fileInfo.Size(), stringid.TruncateID(id), "Loading layer") + } else { + r = rawTar + } + + inflatedLayerData, err := archive.DecompressStream(r) + if err != nil { + return nil, err + } + defer inflatedLayerData.Close() + + if ds, ok := l.ls.(layer.DescribableStore); ok { + return ds.RegisterWithDescriptor(inflatedLayerData, rootFS.ChainID(), platform, foreignSrc) + } + return l.ls.Register(inflatedLayerData, rootFS.ChainID(), platform) +} + +func (l *tarexporter) setLoadedTag(ref reference.NamedTagged, imgID digest.Digest, outStream io.Writer) error { + if prevID, err := l.rs.Get(ref); err == nil && prevID != imgID { + fmt.Fprintf(outStream, "The image %s already exists, renaming the old one with ID %s to empty string\n", reference.FamiliarString(ref), string(prevID)) // todo: this message is wrong in case of multiple tags + } + + if err := l.rs.AddTag(ref, imgID, true); err != nil { + return err + } + return nil +} + +func (l *tarexporter) legacyLoad(tmpDir string, outStream io.Writer, progressOutput progress.Output) error { + if runtime.GOOS == "windows" { + return errors.New("Windows does not support legacy loading of images") + } + + legacyLoadedMap := make(map[string]image.ID) + + dirs, err := ioutil.ReadDir(tmpDir) + if err != nil { + return err + } + + // every dir represents an image + for _, d := range dirs { + if d.IsDir() { + if err := l.legacyLoadImage(d.Name(), tmpDir, legacyLoadedMap, progressOutput); err != nil { + return err + } + } + } + + // load tags from repositories file + repositoriesPath, err := safePath(tmpDir, legacyRepositoriesFileName) + if err != nil { + return err + } + repositoriesFile, err := os.Open(repositoriesPath) + if err != nil { + return err + } + defer repositoriesFile.Close() + + repositories := make(map[string]map[string]string) + if err := json.NewDecoder(repositoriesFile).Decode(&repositories); err != nil { + return err + } + + for name, tagMap := range repositories { + for tag, oldID := range tagMap { + imgID, ok := legacyLoadedMap[oldID] + if !ok { + return fmt.Errorf("invalid target ID: %v", oldID) + } + named, err := reference.ParseNormalizedNamed(name) + if err != nil { + return err + } + ref, err := reference.WithTag(named, tag) + if err != nil { + return err + } + l.setLoadedTag(ref, imgID.Digest(), outStream) + } + } + + return nil +} + +func (l *tarexporter) legacyLoadImage(oldID, sourceDir string, loadedMap map[string]image.ID, progressOutput progress.Output) error { + if _, loaded := loadedMap[oldID]; loaded { + return nil + } + configPath, err := safePath(sourceDir, filepath.Join(oldID, legacyConfigFileName)) + if err != nil { + return err + } + imageJSON, err := ioutil.ReadFile(configPath) + if err != nil { + logrus.Debugf("Error reading json: %v", err) + return err + } + + var img struct { + OS string + Parent string + } + if err := json.Unmarshal(imageJSON, &img); err != nil { + return err + } + + if err := checkCompatibleOS(img.OS); err != nil { + return err + } + + var parentID image.ID + if img.Parent != "" { + for { + var loaded bool + if parentID, loaded = loadedMap[img.Parent]; !loaded { + if err := l.legacyLoadImage(img.Parent, sourceDir, loadedMap, progressOutput); err != nil { + return err + } + } else { + break + } + } + } + + // todo: try to connect with migrate code + rootFS := image.NewRootFS() + var history []image.History + + if parentID != "" { + parentImg, err := l.is.Get(parentID) + if err != nil { + return err + } + + rootFS = parentImg.RootFS + history = parentImg.History + } + + layerPath, err := safePath(sourceDir, filepath.Join(oldID, legacyLayerFileName)) + if err != nil { + return err + } + newLayer, err := l.loadLayer(layerPath, *rootFS, oldID, "", distribution.Descriptor{}, progressOutput) + if err != nil { + return err + } + rootFS.Append(newLayer.DiffID()) + + h, err := v1.HistoryFromConfig(imageJSON, false) + if err != nil { + return err + } + history = append(history, h) + + config, err := v1.MakeConfigFromV1Config(imageJSON, rootFS, history) + if err != nil { + return err + } + imgID, err := l.is.Create(config) + if err != nil { + return err + } + + metadata, err := l.ls.Release(newLayer) + layer.LogReleaseMetadata(metadata) + if err != nil { + return err + } + + if parentID != "" { + if err := l.is.SetParent(imgID, parentID); err != nil { + return err + } + } + + loadedMap[oldID] = imgID + return nil +} + +func safePath(base, path string) (string, error) { + return symlink.FollowSymlinkInScope(filepath.Join(base, path), base) +} + +type parentLink struct { + id, parentID image.ID +} + +func validatedParentLinks(pl []parentLink) (ret []parentLink) { +mainloop: + for i, p := range pl { + ret = append(ret, p) + for _, p2 := range pl { + if p2.id == p.parentID && p2.id != p.id { + continue mainloop + } + } + ret[i].parentID = "" + } + return +} + +func checkValidParent(img, parent *image.Image) bool { + if len(img.History) == 0 && len(parent.History) == 0 { + return true // having history is not mandatory + } + if len(img.History)-len(parent.History) != 1 { + return false + } + for i, h := range parent.History { + if !reflect.DeepEqual(h, img.History[i]) { + return false + } + } + return true +} + +func checkCompatibleOS(os string) error { + // TODO @jhowardmsft LCOW - revisit for simultaneous platforms + platform := runtime.GOOS + if system.LCOWSupported() { + platform = "linux" + } + // always compatible if the OS matches; also match an empty OS + if os == platform || os == "" { + return nil + } + // for compatibility, only fail if the image or runtime OS is Windows + if os == "windows" || platform == "windows" { + return fmt.Errorf("cannot load %s image on %s", os, platform) + } + return nil +} diff --git a/vendor/github.com/moby/moby/image/tarexport/save.go b/vendor/github.com/moby/moby/image/tarexport/save.go new file mode 100644 index 000000000..d304a54c3 --- /dev/null +++ b/vendor/github.com/moby/moby/image/tarexport/save.go @@ -0,0 +1,409 @@ +package tarexport + +import ( + "encoding/json" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "time" + + "github.com/docker/distribution" + "github.com/docker/distribution/reference" + "github.com/docker/docker/image" + "github.com/docker/docker/image/v1" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/system" + "github.com/opencontainers/go-digest" + "github.com/pkg/errors" +) + +type imageDescriptor struct { + refs []reference.NamedTagged + layers []string + image *image.Image + layerRef layer.Layer +} + +type saveSession struct { + *tarexporter + outDir string + images map[image.ID]*imageDescriptor + savedLayers map[string]struct{} + diffIDPaths map[layer.DiffID]string // cache every diffID blob to avoid duplicates +} + +func (l *tarexporter) Save(names []string, outStream io.Writer) error { + images, err := l.parseNames(names) + if err != nil { + return err + } + + // Release all the image top layer references + defer l.releaseLayerReferences(images) + return (&saveSession{tarexporter: l, images: images}).save(outStream) +} + +// parseNames will parse the image names to a map which contains image.ID to *imageDescriptor. +// Each imageDescriptor holds an image top layer reference named 'layerRef'. It is taken here, should be released later. +func (l *tarexporter) parseNames(names []string) (desc map[image.ID]*imageDescriptor, rErr error) { + imgDescr := make(map[image.ID]*imageDescriptor) + defer func() { + if rErr != nil { + l.releaseLayerReferences(imgDescr) + } + }() + + addAssoc := func(id image.ID, ref reference.Named) error { + if _, ok := imgDescr[id]; !ok { + descr := &imageDescriptor{} + if err := l.takeLayerReference(id, descr); err != nil { + return err + } + imgDescr[id] = descr + } + + if ref != nil { + if _, ok := ref.(reference.Canonical); ok { + return nil + } + tagged, ok := reference.TagNameOnly(ref).(reference.NamedTagged) + if !ok { + return nil + } + + for _, t := range imgDescr[id].refs { + if tagged.String() == t.String() { + return nil + } + } + imgDescr[id].refs = append(imgDescr[id].refs, tagged) + } + return nil + } + + for _, name := range names { + ref, err := reference.ParseAnyReference(name) + if err != nil { + return nil, err + } + namedRef, ok := ref.(reference.Named) + if !ok { + // Check if digest ID reference + if digested, ok := ref.(reference.Digested); ok { + id := image.IDFromDigest(digested.Digest()) + if err := addAssoc(id, nil); err != nil { + return nil, err + } + continue + } + return nil, errors.Errorf("invalid reference: %v", name) + } + + if reference.FamiliarName(namedRef) == string(digest.Canonical) { + imgID, err := l.is.Search(name) + if err != nil { + return nil, err + } + if err := addAssoc(imgID, nil); err != nil { + return nil, err + } + continue + } + if reference.IsNameOnly(namedRef) { + assocs := l.rs.ReferencesByName(namedRef) + for _, assoc := range assocs { + if err := addAssoc(image.IDFromDigest(assoc.ID), assoc.Ref); err != nil { + return nil, err + } + } + if len(assocs) == 0 { + imgID, err := l.is.Search(name) + if err != nil { + return nil, err + } + if err := addAssoc(imgID, nil); err != nil { + return nil, err + } + } + continue + } + id, err := l.rs.Get(namedRef) + if err != nil { + return nil, err + } + if err := addAssoc(image.IDFromDigest(id), namedRef); err != nil { + return nil, err + } + + } + return imgDescr, nil +} + +// takeLayerReference will take/Get the image top layer reference +func (l *tarexporter) takeLayerReference(id image.ID, imgDescr *imageDescriptor) error { + img, err := l.is.Get(id) + if err != nil { + return err + } + imgDescr.image = img + topLayerID := img.RootFS.ChainID() + if topLayerID == "" { + return nil + } + layer, err := l.ls.Get(topLayerID) + if err != nil { + return err + } + imgDescr.layerRef = layer + return nil +} + +// releaseLayerReferences will release all the image top layer references +func (l *tarexporter) releaseLayerReferences(imgDescr map[image.ID]*imageDescriptor) error { + for _, descr := range imgDescr { + if descr.layerRef != nil { + l.ls.Release(descr.layerRef) + } + } + return nil +} + +func (s *saveSession) save(outStream io.Writer) error { + s.savedLayers = make(map[string]struct{}) + s.diffIDPaths = make(map[layer.DiffID]string) + + // get image json + tempDir, err := ioutil.TempDir("", "docker-export-") + if err != nil { + return err + } + defer os.RemoveAll(tempDir) + + s.outDir = tempDir + reposLegacy := make(map[string]map[string]string) + + var manifest []manifestItem + var parentLinks []parentLink + + for id, imageDescr := range s.images { + foreignSrcs, err := s.saveImage(id) + if err != nil { + return err + } + + var repoTags []string + var layers []string + + for _, ref := range imageDescr.refs { + familiarName := reference.FamiliarName(ref) + if _, ok := reposLegacy[familiarName]; !ok { + reposLegacy[familiarName] = make(map[string]string) + } + reposLegacy[familiarName][ref.Tag()] = imageDescr.layers[len(imageDescr.layers)-1] + repoTags = append(repoTags, reference.FamiliarString(ref)) + } + + for _, l := range imageDescr.layers { + layers = append(layers, filepath.Join(l, legacyLayerFileName)) + } + + manifest = append(manifest, manifestItem{ + Config: id.Digest().Hex() + ".json", + RepoTags: repoTags, + Layers: layers, + LayerSources: foreignSrcs, + }) + + parentID, _ := s.is.GetParent(id) + parentLinks = append(parentLinks, parentLink{id, parentID}) + s.tarexporter.loggerImgEvent.LogImageEvent(id.String(), id.String(), "save") + } + + for i, p := range validatedParentLinks(parentLinks) { + if p.parentID != "" { + manifest[i].Parent = p.parentID + } + } + + if len(reposLegacy) > 0 { + reposFile := filepath.Join(tempDir, legacyRepositoriesFileName) + rf, err := os.OpenFile(reposFile, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644) + if err != nil { + return err + } + + if err := json.NewEncoder(rf).Encode(reposLegacy); err != nil { + rf.Close() + return err + } + + rf.Close() + + if err := system.Chtimes(reposFile, time.Unix(0, 0), time.Unix(0, 0)); err != nil { + return err + } + } + + manifestFileName := filepath.Join(tempDir, manifestFileName) + f, err := os.OpenFile(manifestFileName, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644) + if err != nil { + return err + } + + if err := json.NewEncoder(f).Encode(manifest); err != nil { + f.Close() + return err + } + + f.Close() + + if err := system.Chtimes(manifestFileName, time.Unix(0, 0), time.Unix(0, 0)); err != nil { + return err + } + + fs, err := archive.Tar(tempDir, archive.Uncompressed) + if err != nil { + return err + } + defer fs.Close() + + _, err = io.Copy(outStream, fs) + return err +} + +func (s *saveSession) saveImage(id image.ID) (map[layer.DiffID]distribution.Descriptor, error) { + img := s.images[id].image + if len(img.RootFS.DiffIDs) == 0 { + return nil, fmt.Errorf("empty export - not implemented") + } + + var parent digest.Digest + var layers []string + var foreignSrcs map[layer.DiffID]distribution.Descriptor + for i := range img.RootFS.DiffIDs { + v1Img := image.V1Image{ + // This is for backward compatibility used for + // pre v1.9 docker. + Created: time.Unix(0, 0), + } + if i == len(img.RootFS.DiffIDs)-1 { + v1Img = img.V1Image + } + rootFS := *img.RootFS + rootFS.DiffIDs = rootFS.DiffIDs[:i+1] + v1ID, err := v1.CreateID(v1Img, rootFS.ChainID(), parent) + if err != nil { + return nil, err + } + + v1Img.ID = v1ID.Hex() + if parent != "" { + v1Img.Parent = parent.Hex() + } + + src, err := s.saveLayer(rootFS.ChainID(), v1Img, img.Created) + if err != nil { + return nil, err + } + layers = append(layers, v1Img.ID) + parent = v1ID + if src.Digest != "" { + if foreignSrcs == nil { + foreignSrcs = make(map[layer.DiffID]distribution.Descriptor) + } + foreignSrcs[img.RootFS.DiffIDs[i]] = src + } + } + + configFile := filepath.Join(s.outDir, id.Digest().Hex()+".json") + if err := ioutil.WriteFile(configFile, img.RawJSON(), 0644); err != nil { + return nil, err + } + if err := system.Chtimes(configFile, img.Created, img.Created); err != nil { + return nil, err + } + + s.images[id].layers = layers + return foreignSrcs, nil +} + +func (s *saveSession) saveLayer(id layer.ChainID, legacyImg image.V1Image, createdTime time.Time) (distribution.Descriptor, error) { + if _, exists := s.savedLayers[legacyImg.ID]; exists { + return distribution.Descriptor{}, nil + } + + outDir := filepath.Join(s.outDir, legacyImg.ID) + if err := os.Mkdir(outDir, 0755); err != nil { + return distribution.Descriptor{}, err + } + + // todo: why is this version file here? + if err := ioutil.WriteFile(filepath.Join(outDir, legacyVersionFileName), []byte("1.0"), 0644); err != nil { + return distribution.Descriptor{}, err + } + + imageConfig, err := json.Marshal(legacyImg) + if err != nil { + return distribution.Descriptor{}, err + } + + if err := ioutil.WriteFile(filepath.Join(outDir, legacyConfigFileName), imageConfig, 0644); err != nil { + return distribution.Descriptor{}, err + } + + // serialize filesystem + layerPath := filepath.Join(outDir, legacyLayerFileName) + l, err := s.ls.Get(id) + if err != nil { + return distribution.Descriptor{}, err + } + defer layer.ReleaseAndLog(s.ls, l) + + if oldPath, exists := s.diffIDPaths[l.DiffID()]; exists { + relPath, err := filepath.Rel(outDir, oldPath) + if err != nil { + return distribution.Descriptor{}, err + } + if err := os.Symlink(relPath, layerPath); err != nil { + return distribution.Descriptor{}, errors.Wrap(err, "error creating symlink while saving layer") + } + } else { + // Use system.CreateSequential rather than os.Create. This ensures sequential + // file access on Windows to avoid eating into MM standby list. + // On Linux, this equates to a regular os.Create. + tarFile, err := system.CreateSequential(layerPath) + if err != nil { + return distribution.Descriptor{}, err + } + defer tarFile.Close() + + arch, err := l.TarStream() + if err != nil { + return distribution.Descriptor{}, err + } + defer arch.Close() + + if _, err := io.Copy(tarFile, arch); err != nil { + return distribution.Descriptor{}, err + } + + for _, fname := range []string{"", legacyVersionFileName, legacyConfigFileName, legacyLayerFileName} { + // todo: maybe save layer created timestamp? + if err := system.Chtimes(filepath.Join(outDir, fname), createdTime, createdTime); err != nil { + return distribution.Descriptor{}, err + } + } + + s.diffIDPaths[l.DiffID()] = layerPath + } + s.savedLayers[legacyImg.ID] = struct{}{} + + var src distribution.Descriptor + if fs, ok := l.(distribution.Describable); ok { + src = fs.Descriptor() + } + return src, nil +} diff --git a/vendor/github.com/moby/moby/image/tarexport/tarexport.go b/vendor/github.com/moby/moby/image/tarexport/tarexport.go new file mode 100644 index 000000000..f7fab74f5 --- /dev/null +++ b/vendor/github.com/moby/moby/image/tarexport/tarexport.go @@ -0,0 +1,47 @@ +package tarexport + +import ( + "github.com/docker/distribution" + "github.com/docker/docker/image" + "github.com/docker/docker/layer" + refstore "github.com/docker/docker/reference" +) + +const ( + manifestFileName = "manifest.json" + legacyLayerFileName = "layer.tar" + legacyConfigFileName = "json" + legacyVersionFileName = "VERSION" + legacyRepositoriesFileName = "repositories" +) + +type manifestItem struct { + Config string + RepoTags []string + Layers []string + Parent image.ID `json:",omitempty"` + LayerSources map[layer.DiffID]distribution.Descriptor `json:",omitempty"` +} + +type tarexporter struct { + is image.Store + ls layer.Store + rs refstore.Store + loggerImgEvent LogImageEvent +} + +// LogImageEvent defines interface for event generation related to image tar(load and save) operations +type LogImageEvent interface { + //LogImageEvent generates an event related to an image operation + LogImageEvent(imageID, refName, action string) +} + +// NewTarExporter returns new Exporter for tar packages +func NewTarExporter(is image.Store, ls layer.Store, rs refstore.Store, loggerImgEvent LogImageEvent) image.Exporter { + return &tarexporter{ + is: is, + ls: ls, + rs: rs, + loggerImgEvent: loggerImgEvent, + } +} diff --git a/vendor/github.com/moby/moby/image/v1/imagev1.go b/vendor/github.com/moby/moby/image/v1/imagev1.go new file mode 100644 index 000000000..0e8a23cb5 --- /dev/null +++ b/vendor/github.com/moby/moby/image/v1/imagev1.go @@ -0,0 +1,150 @@ +package v1 + +import ( + "encoding/json" + "reflect" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/types/versions" + "github.com/docker/docker/image" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/stringid" + "github.com/opencontainers/go-digest" +) + +// noFallbackMinVersion is the minimum version for which v1compatibility +// information will not be marshaled through the Image struct to remove +// blank fields. +var noFallbackMinVersion = "1.8.3" + +// HistoryFromConfig creates a History struct from v1 configuration JSON +func HistoryFromConfig(imageJSON []byte, emptyLayer bool) (image.History, error) { + h := image.History{} + var v1Image image.V1Image + if err := json.Unmarshal(imageJSON, &v1Image); err != nil { + return h, err + } + + return image.History{ + Author: v1Image.Author, + Created: v1Image.Created, + CreatedBy: strings.Join(v1Image.ContainerConfig.Cmd, " "), + Comment: v1Image.Comment, + EmptyLayer: emptyLayer, + }, nil +} + +// CreateID creates an ID from v1 image, layerID and parent ID. +// Used for backwards compatibility with old clients. +func CreateID(v1Image image.V1Image, layerID layer.ChainID, parent digest.Digest) (digest.Digest, error) { + v1Image.ID = "" + v1JSON, err := json.Marshal(v1Image) + if err != nil { + return "", err + } + + var config map[string]*json.RawMessage + if err := json.Unmarshal(v1JSON, &config); err != nil { + return "", err + } + + // FIXME: note that this is slightly incompatible with RootFS logic + config["layer_id"] = rawJSON(layerID) + if parent != "" { + config["parent"] = rawJSON(parent) + } + + configJSON, err := json.Marshal(config) + if err != nil { + return "", err + } + logrus.Debugf("CreateV1ID %s", configJSON) + + return digest.FromBytes(configJSON), nil +} + +// MakeConfigFromV1Config creates an image config from the legacy V1 config format. +func MakeConfigFromV1Config(imageJSON []byte, rootfs *image.RootFS, history []image.History) ([]byte, error) { + var dver struct { + DockerVersion string `json:"docker_version"` + } + + if err := json.Unmarshal(imageJSON, &dver); err != nil { + return nil, err + } + + useFallback := versions.LessThan(dver.DockerVersion, noFallbackMinVersion) + + if useFallback { + var v1Image image.V1Image + err := json.Unmarshal(imageJSON, &v1Image) + if err != nil { + return nil, err + } + imageJSON, err = json.Marshal(v1Image) + if err != nil { + return nil, err + } + } + + var c map[string]*json.RawMessage + if err := json.Unmarshal(imageJSON, &c); err != nil { + return nil, err + } + + delete(c, "id") + delete(c, "parent") + delete(c, "Size") // Size is calculated from data on disk and is inconsistent + delete(c, "parent_id") + delete(c, "layer_id") + delete(c, "throwaway") + + c["rootfs"] = rawJSON(rootfs) + c["history"] = rawJSON(history) + + return json.Marshal(c) +} + +// MakeV1ConfigFromConfig creates a legacy V1 image config from an Image struct +func MakeV1ConfigFromConfig(img *image.Image, v1ID, parentV1ID string, throwaway bool) ([]byte, error) { + // Top-level v1compatibility string should be a modified version of the + // image config. + var configAsMap map[string]*json.RawMessage + if err := json.Unmarshal(img.RawJSON(), &configAsMap); err != nil { + return nil, err + } + + // Delete fields that didn't exist in old manifest + imageType := reflect.TypeOf(img).Elem() + for i := 0; i < imageType.NumField(); i++ { + f := imageType.Field(i) + jsonName := strings.Split(f.Tag.Get("json"), ",")[0] + // Parent is handled specially below. + if jsonName != "" && jsonName != "parent" { + delete(configAsMap, jsonName) + } + } + configAsMap["id"] = rawJSON(v1ID) + if parentV1ID != "" { + configAsMap["parent"] = rawJSON(parentV1ID) + } + if throwaway { + configAsMap["throwaway"] = rawJSON(true) + } + + return json.Marshal(configAsMap) +} + +func rawJSON(value interface{}) *json.RawMessage { + jsonval, err := json.Marshal(value) + if err != nil { + return nil + } + return (*json.RawMessage)(&jsonval) +} + +// ValidateID checks whether an ID string is a valid image ID. +func ValidateID(id string) error { + return stringid.ValidateID(id) +} diff --git a/vendor/github.com/moby/moby/image/v1/imagev1_test.go b/vendor/github.com/moby/moby/image/v1/imagev1_test.go new file mode 100644 index 000000000..936c55e4c --- /dev/null +++ b/vendor/github.com/moby/moby/image/v1/imagev1_test.go @@ -0,0 +1,55 @@ +package v1 + +import ( + "encoding/json" + "testing" + + "github.com/docker/docker/image" +) + +func TestMakeV1ConfigFromConfig(t *testing.T) { + img := &image.Image{ + V1Image: image.V1Image{ + ID: "v2id", + Parent: "v2parent", + OS: "os", + }, + OSVersion: "osversion", + RootFS: &image.RootFS{ + Type: "layers", + }, + } + v2js, err := json.Marshal(img) + if err != nil { + t.Fatal(err) + } + + // Convert the image back in order to get RawJSON() support. + img, err = image.NewFromJSON(v2js) + if err != nil { + t.Fatal(err) + } + + js, err := MakeV1ConfigFromConfig(img, "v1id", "v1parent", false) + if err != nil { + t.Fatal(err) + } + + newimg := &image.Image{} + err = json.Unmarshal(js, newimg) + if err != nil { + t.Fatal(err) + } + + if newimg.V1Image.ID != "v1id" || newimg.Parent != "v1parent" { + t.Error("ids should have changed", newimg.V1Image.ID, newimg.V1Image.Parent) + } + + if newimg.RootFS != nil { + t.Error("rootfs should have been removed") + } + + if newimg.V1Image.OS != "os" { + t.Error("os should have been preserved") + } +} diff --git a/vendor/github.com/moby/moby/integration-cli/benchmark_test.go b/vendor/github.com/moby/moby/integration-cli/benchmark_test.go new file mode 100644 index 000000000..ae0f67f6b --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/benchmark_test.go @@ -0,0 +1,95 @@ +package main + +import ( + "fmt" + "io/ioutil" + "os" + "runtime" + "strings" + "sync" + + "github.com/docker/docker/integration-cli/checker" + "github.com/go-check/check" +) + +func (s *DockerSuite) BenchmarkConcurrentContainerActions(c *check.C) { + maxConcurrency := runtime.GOMAXPROCS(0) + numIterations := c.N + outerGroup := &sync.WaitGroup{} + outerGroup.Add(maxConcurrency) + chErr := make(chan error, numIterations*2*maxConcurrency) + + for i := 0; i < maxConcurrency; i++ { + go func() { + defer outerGroup.Done() + innerGroup := &sync.WaitGroup{} + innerGroup.Add(2) + + go func() { + defer innerGroup.Done() + for i := 0; i < numIterations; i++ { + args := []string{"run", "-d", defaultSleepImage} + args = append(args, sleepCommandForDaemonPlatform()...) + out, _, err := dockerCmdWithError(args...) + if err != nil { + chErr <- fmt.Errorf(out) + return + } + + id := strings.TrimSpace(out) + tmpDir, err := ioutil.TempDir("", "docker-concurrent-test-"+id) + if err != nil { + chErr <- err + return + } + defer os.RemoveAll(tmpDir) + out, _, err = dockerCmdWithError("cp", id+":/tmp", tmpDir) + if err != nil { + chErr <- fmt.Errorf(out) + return + } + + out, _, err = dockerCmdWithError("kill", id) + if err != nil { + chErr <- fmt.Errorf(out) + } + + out, _, err = dockerCmdWithError("start", id) + if err != nil { + chErr <- fmt.Errorf(out) + } + + out, _, err = dockerCmdWithError("kill", id) + if err != nil { + chErr <- fmt.Errorf(out) + } + + // don't do an rm -f here since it can potentially ignore errors from the graphdriver + out, _, err = dockerCmdWithError("rm", id) + if err != nil { + chErr <- fmt.Errorf(out) + } + } + }() + + go func() { + defer innerGroup.Done() + for i := 0; i < numIterations; i++ { + out, _, err := dockerCmdWithError("ps") + if err != nil { + chErr <- fmt.Errorf(out) + } + } + }() + + innerGroup.Wait() + }() + } + + outerGroup.Wait() + close(chErr) + + for err := range chErr { + c.Assert(err, checker.IsNil) + } +} diff --git a/vendor/github.com/moby/moby/integration-cli/check_test.go b/vendor/github.com/moby/moby/integration-cli/check_test.go new file mode 100644 index 000000000..f05b6504e --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/check_test.go @@ -0,0 +1,496 @@ +package main + +import ( + "fmt" + "net/http/httptest" + "os" + "os/exec" + "path" + "path/filepath" + "strings" + "sync" + "syscall" + "testing" + "time" + + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/cli/config" + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/cli" + "github.com/docker/docker/integration-cli/cli/build/fakestorage" + "github.com/docker/docker/integration-cli/daemon" + "github.com/docker/docker/integration-cli/environment" + "github.com/docker/docker/integration-cli/fixtures/plugin" + "github.com/docker/docker/integration-cli/registry" + "github.com/docker/docker/pkg/reexec" + "github.com/go-check/check" + "golang.org/x/net/context" +) + +const ( + // the private registry to use for tests + privateRegistryURL = "127.0.0.1:5000" + + // path to containerd's ctr binary + ctrBinary = "docker-containerd-ctr" + + // the docker daemon binary to use + dockerdBinary = "dockerd" +) + +var ( + testEnv *environment.Execution + + // the docker client binary to use + dockerBinary = "" +) + +func init() { + var err error + + reexec.Init() // This is required for external graphdriver tests + + testEnv, err = environment.New() + if err != nil { + fmt.Println(err) + os.Exit(1) + } +} + +func TestMain(m *testing.M) { + dockerBinary = testEnv.DockerBinary() + + if testEnv.LocalDaemon() { + fmt.Println("INFO: Testing against a local daemon") + } else { + fmt.Println("INFO: Testing against a remote daemon") + } + exitCode := m.Run() + os.Exit(exitCode) +} + +func Test(t *testing.T) { + cli.EnsureTestEnvIsLoaded(t) + fakestorage.EnsureTestEnvIsLoaded(t) + cmd := exec.Command(dockerBinary, "images", "-f", "dangling=false", "--format", "{{.Repository}}:{{.Tag}}") + cmd.Env = appendBaseEnv(true) + out, err := cmd.CombinedOutput() + if err != nil { + panic(fmt.Errorf("err=%v\nout=%s\n", err, out)) + } + images := strings.Split(strings.TrimSpace(string(out)), "\n") + testEnv.ProtectImage(t, images...) + if testEnv.DaemonPlatform() == "linux" { + ensureFrozenImagesLinux(t) + } + check.TestingT(t) +} + +func init() { + check.Suite(&DockerSuite{}) +} + +type DockerSuite struct { +} + +func (s *DockerSuite) OnTimeout(c *check.C) { + if testEnv.DaemonPID() > 0 && testEnv.LocalDaemon() { + daemon.SignalDaemonDump(testEnv.DaemonPID()) + } +} + +func (s *DockerSuite) TearDownTest(c *check.C) { + testEnv.Clean(c, dockerBinary) +} + +func init() { + check.Suite(&DockerRegistrySuite{ + ds: &DockerSuite{}, + }) +} + +type DockerRegistrySuite struct { + ds *DockerSuite + reg *registry.V2 + d *daemon.Daemon +} + +func (s *DockerRegistrySuite) OnTimeout(c *check.C) { + s.d.DumpStackAndQuit() +} + +func (s *DockerRegistrySuite) SetUpTest(c *check.C) { + testRequires(c, DaemonIsLinux, registry.Hosting) + s.reg = setupRegistry(c, false, "", "") + s.d = daemon.New(c, dockerBinary, dockerdBinary, daemon.Config{ + Experimental: testEnv.ExperimentalDaemon(), + }) +} + +func (s *DockerRegistrySuite) TearDownTest(c *check.C) { + if s.reg != nil { + s.reg.Close() + } + if s.d != nil { + s.d.Stop(c) + } + s.ds.TearDownTest(c) +} + +func init() { + check.Suite(&DockerSchema1RegistrySuite{ + ds: &DockerSuite{}, + }) +} + +type DockerSchema1RegistrySuite struct { + ds *DockerSuite + reg *registry.V2 + d *daemon.Daemon +} + +func (s *DockerSchema1RegistrySuite) OnTimeout(c *check.C) { + s.d.DumpStackAndQuit() +} + +func (s *DockerSchema1RegistrySuite) SetUpTest(c *check.C) { + testRequires(c, DaemonIsLinux, registry.Hosting, NotArm64) + s.reg = setupRegistry(c, true, "", "") + s.d = daemon.New(c, dockerBinary, dockerdBinary, daemon.Config{ + Experimental: testEnv.ExperimentalDaemon(), + }) +} + +func (s *DockerSchema1RegistrySuite) TearDownTest(c *check.C) { + if s.reg != nil { + s.reg.Close() + } + if s.d != nil { + s.d.Stop(c) + } + s.ds.TearDownTest(c) +} + +func init() { + check.Suite(&DockerRegistryAuthHtpasswdSuite{ + ds: &DockerSuite{}, + }) +} + +type DockerRegistryAuthHtpasswdSuite struct { + ds *DockerSuite + reg *registry.V2 + d *daemon.Daemon +} + +func (s *DockerRegistryAuthHtpasswdSuite) OnTimeout(c *check.C) { + s.d.DumpStackAndQuit() +} + +func (s *DockerRegistryAuthHtpasswdSuite) SetUpTest(c *check.C) { + testRequires(c, DaemonIsLinux, registry.Hosting) + s.reg = setupRegistry(c, false, "htpasswd", "") + s.d = daemon.New(c, dockerBinary, dockerdBinary, daemon.Config{ + Experimental: testEnv.ExperimentalDaemon(), + }) +} + +func (s *DockerRegistryAuthHtpasswdSuite) TearDownTest(c *check.C) { + if s.reg != nil { + out, err := s.d.Cmd("logout", privateRegistryURL) + c.Assert(err, check.IsNil, check.Commentf(out)) + s.reg.Close() + } + if s.d != nil { + s.d.Stop(c) + } + s.ds.TearDownTest(c) +} + +func init() { + check.Suite(&DockerRegistryAuthTokenSuite{ + ds: &DockerSuite{}, + }) +} + +type DockerRegistryAuthTokenSuite struct { + ds *DockerSuite + reg *registry.V2 + d *daemon.Daemon +} + +func (s *DockerRegistryAuthTokenSuite) OnTimeout(c *check.C) { + s.d.DumpStackAndQuit() +} + +func (s *DockerRegistryAuthTokenSuite) SetUpTest(c *check.C) { + testRequires(c, DaemonIsLinux, registry.Hosting) + s.d = daemon.New(c, dockerBinary, dockerdBinary, daemon.Config{ + Experimental: testEnv.ExperimentalDaemon(), + }) +} + +func (s *DockerRegistryAuthTokenSuite) TearDownTest(c *check.C) { + if s.reg != nil { + out, err := s.d.Cmd("logout", privateRegistryURL) + c.Assert(err, check.IsNil, check.Commentf(out)) + s.reg.Close() + } + if s.d != nil { + s.d.Stop(c) + } + s.ds.TearDownTest(c) +} + +func (s *DockerRegistryAuthTokenSuite) setupRegistryWithTokenService(c *check.C, tokenURL string) { + if s == nil { + c.Fatal("registry suite isn't initialized") + } + s.reg = setupRegistry(c, false, "token", tokenURL) +} + +func init() { + check.Suite(&DockerDaemonSuite{ + ds: &DockerSuite{}, + }) +} + +type DockerDaemonSuite struct { + ds *DockerSuite + d *daemon.Daemon +} + +func (s *DockerDaemonSuite) OnTimeout(c *check.C) { + s.d.DumpStackAndQuit() +} + +func (s *DockerDaemonSuite) SetUpTest(c *check.C) { + testRequires(c, DaemonIsLinux, SameHostDaemon) + s.d = daemon.New(c, dockerBinary, dockerdBinary, daemon.Config{ + Experimental: testEnv.ExperimentalDaemon(), + }) +} + +func (s *DockerDaemonSuite) TearDownTest(c *check.C) { + testRequires(c, DaemonIsLinux, SameHostDaemon) + if s.d != nil { + s.d.Stop(c) + } + s.ds.TearDownTest(c) +} + +func (s *DockerDaemonSuite) TearDownSuite(c *check.C) { + filepath.Walk(daemon.SockRoot, func(path string, fi os.FileInfo, err error) error { + if err != nil { + // ignore errors here + // not cleaning up sockets is not really an error + return nil + } + if fi.Mode() == os.ModeSocket { + syscall.Unlink(path) + } + return nil + }) + os.RemoveAll(daemon.SockRoot) +} + +const defaultSwarmPort = 2477 + +func init() { + check.Suite(&DockerSwarmSuite{ + ds: &DockerSuite{}, + }) +} + +type DockerSwarmSuite struct { + server *httptest.Server + ds *DockerSuite + daemons []*daemon.Swarm + daemonsLock sync.Mutex // protect access to daemons + portIndex int +} + +func (s *DockerSwarmSuite) OnTimeout(c *check.C) { + s.daemonsLock.Lock() + defer s.daemonsLock.Unlock() + for _, d := range s.daemons { + d.DumpStackAndQuit() + } +} + +func (s *DockerSwarmSuite) SetUpTest(c *check.C) { + testRequires(c, DaemonIsLinux) +} + +func (s *DockerSwarmSuite) AddDaemon(c *check.C, joinSwarm, manager bool) *daemon.Swarm { + d := &daemon.Swarm{ + Daemon: daemon.New(c, dockerBinary, dockerdBinary, daemon.Config{ + Experimental: testEnv.ExperimentalDaemon(), + }), + Port: defaultSwarmPort + s.portIndex, + } + d.ListenAddr = fmt.Sprintf("0.0.0.0:%d", d.Port) + args := []string{"--iptables=false", "--swarm-default-advertise-addr=lo"} // avoid networking conflicts + d.StartWithBusybox(c, args...) + + if joinSwarm == true { + if len(s.daemons) > 0 { + tokens := s.daemons[0].JoinTokens(c) + token := tokens.Worker + if manager { + token = tokens.Manager + } + c.Assert(d.Join(swarm.JoinRequest{ + RemoteAddrs: []string{s.daemons[0].ListenAddr}, + JoinToken: token, + }), check.IsNil) + } else { + c.Assert(d.Init(swarm.InitRequest{}), check.IsNil) + } + } + + s.portIndex++ + s.daemonsLock.Lock() + s.daemons = append(s.daemons, d) + s.daemonsLock.Unlock() + + return d +} + +func (s *DockerSwarmSuite) TearDownTest(c *check.C) { + testRequires(c, DaemonIsLinux) + s.daemonsLock.Lock() + for _, d := range s.daemons { + if d != nil { + d.Stop(c) + // FIXME(vdemeester) should be handled by SwarmDaemon ? + // raft state file is quite big (64MB) so remove it after every test + walDir := filepath.Join(d.Root, "swarm/raft/wal") + if err := os.RemoveAll(walDir); err != nil { + c.Logf("error removing %v: %v", walDir, err) + } + + d.CleanupExecRoot(c) + } + } + s.daemons = nil + s.daemonsLock.Unlock() + + s.portIndex = 0 + s.ds.TearDownTest(c) +} + +func init() { + check.Suite(&DockerTrustSuite{ + ds: &DockerSuite{}, + }) +} + +type DockerTrustSuite struct { + ds *DockerSuite + reg *registry.V2 + not *testNotary +} + +func (s *DockerTrustSuite) OnTimeout(c *check.C) { + s.ds.OnTimeout(c) +} + +func (s *DockerTrustSuite) SetUpTest(c *check.C) { + testRequires(c, registry.Hosting, NotaryServerHosting) + s.reg = setupRegistry(c, false, "", "") + s.not = setupNotary(c) +} + +func (s *DockerTrustSuite) TearDownTest(c *check.C) { + if s.reg != nil { + s.reg.Close() + } + if s.not != nil { + s.not.Close() + } + + // Remove trusted keys and metadata after test + os.RemoveAll(filepath.Join(config.Dir(), "trust")) + s.ds.TearDownTest(c) +} + +func init() { + ds := &DockerSuite{} + check.Suite(&DockerTrustedSwarmSuite{ + trustSuite: DockerTrustSuite{ + ds: ds, + }, + swarmSuite: DockerSwarmSuite{ + ds: ds, + }, + }) +} + +type DockerTrustedSwarmSuite struct { + swarmSuite DockerSwarmSuite + trustSuite DockerTrustSuite + reg *registry.V2 + not *testNotary +} + +func (s *DockerTrustedSwarmSuite) SetUpTest(c *check.C) { + s.swarmSuite.SetUpTest(c) + s.trustSuite.SetUpTest(c) +} + +func (s *DockerTrustedSwarmSuite) TearDownTest(c *check.C) { + s.trustSuite.TearDownTest(c) + s.swarmSuite.TearDownTest(c) +} + +func (s *DockerTrustedSwarmSuite) OnTimeout(c *check.C) { + s.swarmSuite.OnTimeout(c) +} + +func init() { + check.Suite(&DockerPluginSuite{ + ds: &DockerSuite{}, + }) +} + +type DockerPluginSuite struct { + ds *DockerSuite + registry *registry.V2 +} + +func (ps *DockerPluginSuite) registryHost() string { + return privateRegistryURL +} + +func (ps *DockerPluginSuite) getPluginRepo() string { + return path.Join(ps.registryHost(), "plugin", "basic") +} +func (ps *DockerPluginSuite) getPluginRepoWithTag() string { + return ps.getPluginRepo() + ":" + "latest" +} + +func (ps *DockerPluginSuite) SetUpSuite(c *check.C) { + testRequires(c, DaemonIsLinux) + ps.registry = setupRegistry(c, false, "", "") + + ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second) + defer cancel() + + err := plugin.CreateInRegistry(ctx, ps.getPluginRepo(), nil) + c.Assert(err, checker.IsNil, check.Commentf("failed to create plugin")) +} + +func (ps *DockerPluginSuite) TearDownSuite(c *check.C) { + if ps.registry != nil { + ps.registry.Close() + } +} + +func (ps *DockerPluginSuite) TearDownTest(c *check.C) { + ps.ds.TearDownTest(c) +} + +func (ps *DockerPluginSuite) OnTimeout(c *check.C) { + ps.ds.OnTimeout(c) +} diff --git a/vendor/github.com/moby/moby/integration-cli/checker/checker.go b/vendor/github.com/moby/moby/integration-cli/checker/checker.go new file mode 100644 index 000000000..d1b703a59 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/checker/checker.go @@ -0,0 +1,46 @@ +// Package checker provides Docker specific implementations of the go-check.Checker interface. +package checker + +import ( + "github.com/go-check/check" + "github.com/vdemeester/shakers" +) + +// As a commodity, we bring all check.Checker variables into the current namespace to avoid having +// to think about check.X versus checker.X. +var ( + DeepEquals = check.DeepEquals + ErrorMatches = check.ErrorMatches + FitsTypeOf = check.FitsTypeOf + HasLen = check.HasLen + Implements = check.Implements + IsNil = check.IsNil + Matches = check.Matches + Not = check.Not + NotNil = check.NotNil + PanicMatches = check.PanicMatches + Panics = check.Panics + + Contains = shakers.Contains + ContainsAny = shakers.ContainsAny + Count = shakers.Count + Equals = shakers.Equals + EqualFold = shakers.EqualFold + False = shakers.False + GreaterOrEqualThan = shakers.GreaterOrEqualThan + GreaterThan = shakers.GreaterThan + HasPrefix = shakers.HasPrefix + HasSuffix = shakers.HasSuffix + Index = shakers.Index + IndexAny = shakers.IndexAny + IsAfter = shakers.IsAfter + IsBefore = shakers.IsBefore + IsBetween = shakers.IsBetween + IsLower = shakers.IsLower + IsUpper = shakers.IsUpper + LessOrEqualThan = shakers.LessOrEqualThan + LessThan = shakers.LessThan + TimeEquals = shakers.TimeEquals + True = shakers.True + TimeIgnore = shakers.TimeIgnore +) diff --git a/vendor/github.com/moby/moby/integration-cli/cli/build/build.go b/vendor/github.com/moby/moby/integration-cli/cli/build/build.go new file mode 100644 index 000000000..8ffaa35b4 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/cli/build/build.go @@ -0,0 +1,82 @@ +package build + +import ( + "io" + "strings" + + "github.com/docker/docker/integration-cli/cli/build/fakecontext" + icmd "github.com/docker/docker/pkg/testutil/cmd" +) + +type testingT interface { + Fatal(args ...interface{}) + Fatalf(string, ...interface{}) +} + +// WithStdinContext sets the build context from the standard input with the specified reader +func WithStdinContext(closer io.ReadCloser) func(*icmd.Cmd) func() { + return func(cmd *icmd.Cmd) func() { + cmd.Command = append(cmd.Command, "-") + cmd.Stdin = closer + return func() { + // FIXME(vdemeester) we should not ignore the error here… + closer.Close() + } + } +} + +// WithDockerfile creates / returns a CmdOperator to set the Dockerfile for a build operation +func WithDockerfile(dockerfile string) func(*icmd.Cmd) func() { + return func(cmd *icmd.Cmd) func() { + cmd.Command = append(cmd.Command, "-") + cmd.Stdin = strings.NewReader(dockerfile) + return nil + } +} + +// WithoutCache makes the build ignore cache +func WithoutCache(cmd *icmd.Cmd) func() { + cmd.Command = append(cmd.Command, "--no-cache") + return nil +} + +// WithContextPath sets the build context path +func WithContextPath(path string) func(*icmd.Cmd) func() { + return func(cmd *icmd.Cmd) func() { + cmd.Command = append(cmd.Command, path) + return nil + } +} + +// WithExternalBuildContext use the specified context as build context +func WithExternalBuildContext(ctx *fakecontext.Fake) func(*icmd.Cmd) func() { + return func(cmd *icmd.Cmd) func() { + cmd.Dir = ctx.Dir + cmd.Command = append(cmd.Command, ".") + return nil + } +} + +// WithBuildContext sets up the build context +func WithBuildContext(t testingT, contextOperators ...func(*fakecontext.Fake) error) func(*icmd.Cmd) func() { + // FIXME(vdemeester) de-duplicate that + ctx := fakecontext.New(t, "", contextOperators...) + return func(cmd *icmd.Cmd) func() { + cmd.Dir = ctx.Dir + cmd.Command = append(cmd.Command, ".") + return closeBuildContext(t, ctx) + } +} + +// WithFile adds the specified file (with content) in the build context +func WithFile(name, content string) func(*fakecontext.Fake) error { + return fakecontext.WithFile(name, content) +} + +func closeBuildContext(t testingT, ctx *fakecontext.Fake) func() { + return func() { + if err := ctx.Close(); err != nil { + t.Fatal(err) + } + } +} diff --git a/vendor/github.com/moby/moby/integration-cli/cli/build/fakecontext/context.go b/vendor/github.com/moby/moby/integration-cli/cli/build/fakecontext/context.go new file mode 100644 index 000000000..8ecf4e3c6 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/cli/build/fakecontext/context.go @@ -0,0 +1,124 @@ +package fakecontext + +import ( + "bytes" + "io" + "io/ioutil" + "os" + "path/filepath" + + "github.com/docker/docker/pkg/archive" +) + +type testingT interface { + Fatal(args ...interface{}) + Fatalf(string, ...interface{}) +} + +// New creates a fake build context +func New(t testingT, dir string, modifiers ...func(*Fake) error) *Fake { + fakeContext := &Fake{Dir: dir} + if dir == "" { + if err := newDir(fakeContext); err != nil { + t.Fatal(err) + } + } + + for _, modifier := range modifiers { + if err := modifier(fakeContext); err != nil { + t.Fatal(err) + } + } + + return fakeContext +} + +func newDir(fake *Fake) error { + tmp, err := ioutil.TempDir("", "fake-context") + if err != nil { + return err + } + if err := os.Chmod(tmp, 0755); err != nil { + return err + } + fake.Dir = tmp + return nil +} + +// WithFile adds the specified file (with content) in the build context +func WithFile(name, content string) func(*Fake) error { + return func(ctx *Fake) error { + return ctx.Add(name, content) + } +} + +// WithDockerfile adds the specified content as Dockerfile in the build context +func WithDockerfile(content string) func(*Fake) error { + return WithFile("Dockerfile", content) +} + +// WithFiles adds the specified files in the build context, content is a string +func WithFiles(files map[string]string) func(*Fake) error { + return func(fakeContext *Fake) error { + for file, content := range files { + if err := fakeContext.Add(file, content); err != nil { + return err + } + } + return nil + } +} + +// WithBinaryFiles adds the specified files in the build context, content is binary +func WithBinaryFiles(files map[string]*bytes.Buffer) func(*Fake) error { + return func(fakeContext *Fake) error { + for file, content := range files { + if err := fakeContext.Add(file, string(content.Bytes())); err != nil { + return err + } + } + return nil + } +} + +// Fake creates directories that can be used as a build context +type Fake struct { + Dir string +} + +// Add a file at a path, creating directories where necessary +func (f *Fake) Add(file, content string) error { + return f.addFile(file, []byte(content)) +} + +func (f *Fake) addFile(file string, content []byte) error { + fp := filepath.Join(f.Dir, filepath.FromSlash(file)) + dirpath := filepath.Dir(fp) + if dirpath != "." { + if err := os.MkdirAll(dirpath, 0755); err != nil { + return err + } + } + return ioutil.WriteFile(fp, content, 0644) + +} + +// Delete a file at a path +func (f *Fake) Delete(file string) error { + fp := filepath.Join(f.Dir, filepath.FromSlash(file)) + return os.RemoveAll(fp) +} + +// Close deletes the context +func (f *Fake) Close() error { + return os.RemoveAll(f.Dir) +} + +// AsTarReader returns a ReadCloser with the contents of Dir as a tar archive. +func (f *Fake) AsTarReader(t testingT) io.ReadCloser { + reader, err := archive.TarWithOptions(f.Dir, &archive.TarOptions{}) + if err != nil { + t.Fatalf("Failed to create tar from %s: %s", f.Dir, err) + } + return reader +} diff --git a/vendor/github.com/moby/moby/integration-cli/cli/build/fakegit/fakegit.go b/vendor/github.com/moby/moby/integration-cli/cli/build/fakegit/fakegit.go new file mode 100644 index 000000000..74faffd92 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/cli/build/fakegit/fakegit.go @@ -0,0 +1,125 @@ +package fakegit + +import ( + "fmt" + "io/ioutil" + "net/http" + "net/http/httptest" + "os" + "os/exec" + "path/filepath" + + "github.com/docker/docker/integration-cli/cli/build/fakecontext" + "github.com/docker/docker/integration-cli/cli/build/fakestorage" +) + +type testingT interface { + logT + Fatal(args ...interface{}) + Fatalf(string, ...interface{}) +} + +type logT interface { + Logf(string, ...interface{}) +} + +type gitServer interface { + URL() string + Close() error +} + +type localGitServer struct { + *httptest.Server +} + +func (r *localGitServer) Close() error { + r.Server.Close() + return nil +} + +func (r *localGitServer) URL() string { + return r.Server.URL +} + +// FakeGit is a fake git server +type FakeGit struct { + root string + server gitServer + RepoURL string +} + +// Close closes the server, implements Closer interface +func (g *FakeGit) Close() { + g.server.Close() + os.RemoveAll(g.root) +} + +// New create a fake git server that can be used for git related tests +func New(c testingT, name string, files map[string]string, enforceLocalServer bool) *FakeGit { + ctx := fakecontext.New(c, "", fakecontext.WithFiles(files)) + defer ctx.Close() + curdir, err := os.Getwd() + if err != nil { + c.Fatal(err) + } + defer os.Chdir(curdir) + + if output, err := exec.Command("git", "init", ctx.Dir).CombinedOutput(); err != nil { + c.Fatalf("error trying to init repo: %s (%s)", err, output) + } + err = os.Chdir(ctx.Dir) + if err != nil { + c.Fatal(err) + } + if output, err := exec.Command("git", "config", "user.name", "Fake User").CombinedOutput(); err != nil { + c.Fatalf("error trying to set 'user.name': %s (%s)", err, output) + } + if output, err := exec.Command("git", "config", "user.email", "fake.user@example.com").CombinedOutput(); err != nil { + c.Fatalf("error trying to set 'user.email': %s (%s)", err, output) + } + if output, err := exec.Command("git", "add", "*").CombinedOutput(); err != nil { + c.Fatalf("error trying to add files to repo: %s (%s)", err, output) + } + if output, err := exec.Command("git", "commit", "-a", "-m", "Initial commit").CombinedOutput(); err != nil { + c.Fatalf("error trying to commit to repo: %s (%s)", err, output) + } + + root, err := ioutil.TempDir("", "docker-test-git-repo") + if err != nil { + c.Fatal(err) + } + repoPath := filepath.Join(root, name+".git") + if output, err := exec.Command("git", "clone", "--bare", ctx.Dir, repoPath).CombinedOutput(); err != nil { + os.RemoveAll(root) + c.Fatalf("error trying to clone --bare: %s (%s)", err, output) + } + err = os.Chdir(repoPath) + if err != nil { + os.RemoveAll(root) + c.Fatal(err) + } + if output, err := exec.Command("git", "update-server-info").CombinedOutput(); err != nil { + os.RemoveAll(root) + c.Fatalf("error trying to git update-server-info: %s (%s)", err, output) + } + err = os.Chdir(curdir) + if err != nil { + os.RemoveAll(root) + c.Fatal(err) + } + + var server gitServer + if !enforceLocalServer { + // use fakeStorage server, which might be local or remote (at test daemon) + server = fakestorage.New(c, root) + } else { + // always start a local http server on CLI test machine + httpServer := httptest.NewServer(http.FileServer(http.Dir(root))) + server = &localGitServer{httpServer} + } + return &FakeGit{ + root: root, + server: server, + RepoURL: fmt.Sprintf("%s/%s.git", server.URL(), name), + } +} diff --git a/vendor/github.com/moby/moby/integration-cli/cli/build/fakestorage/fixtures.go b/vendor/github.com/moby/moby/integration-cli/cli/build/fakestorage/fixtures.go new file mode 100644 index 000000000..f6a63dcf0 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/cli/build/fakestorage/fixtures.go @@ -0,0 +1,67 @@ +package fakestorage + +import ( + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "sync" + + "github.com/docker/docker/integration-cli/cli" +) + +var ensureHTTPServerOnce sync.Once + +func ensureHTTPServerImage(t testingT) { + var doIt bool + ensureHTTPServerOnce.Do(func() { + doIt = true + }) + + if !doIt { + return + } + + defer testEnv.ProtectImage(t, "httpserver:latest") + + tmp, err := ioutil.TempDir("", "docker-http-server-test") + if err != nil { + t.Fatalf("could not build http server: %v", err) + } + defer os.RemoveAll(tmp) + + goos := testEnv.DaemonPlatform() + if goos == "" { + goos = "linux" + } + goarch := os.Getenv("DOCKER_ENGINE_GOARCH") + if goarch == "" { + goarch = "amd64" + } + + goCmd, lookErr := exec.LookPath("go") + if lookErr != nil { + t.Fatalf("could not build http server: %v", lookErr) + } + + cmd := exec.Command(goCmd, "build", "-o", filepath.Join(tmp, "httpserver"), "github.com/docker/docker/contrib/httpserver") + cmd.Env = append(os.Environ(), []string{ + "CGO_ENABLED=0", + "GOOS=" + goos, + "GOARCH=" + goarch, + }...) + var out []byte + if out, err = cmd.CombinedOutput(); err != nil { + t.Fatalf("could not build http server: %s", string(out)) + } + + cpCmd, lookErr := exec.LookPath("cp") + if lookErr != nil { + t.Fatalf("could not build http server: %v", lookErr) + } + if out, err = exec.Command(cpCmd, "../contrib/httpserver/Dockerfile", filepath.Join(tmp, "Dockerfile")).CombinedOutput(); err != nil { + t.Fatalf("could not build http server: %v", string(out)) + } + + cli.DockerCmd(t, "build", "-q", "-t", "httpserver", tmp) +} diff --git a/vendor/github.com/moby/moby/integration-cli/cli/build/fakestorage/storage.go b/vendor/github.com/moby/moby/integration-cli/cli/build/fakestorage/storage.go new file mode 100644 index 000000000..49f47e436 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/cli/build/fakestorage/storage.go @@ -0,0 +1,176 @@ +package fakestorage + +import ( + "fmt" + "net" + "net/http" + "net/http/httptest" + "net/url" + "os" + "strings" + "sync" + + "github.com/docker/docker/integration-cli/cli" + "github.com/docker/docker/integration-cli/cli/build" + "github.com/docker/docker/integration-cli/cli/build/fakecontext" + "github.com/docker/docker/integration-cli/environment" + "github.com/docker/docker/integration-cli/request" + "github.com/docker/docker/pkg/stringutils" +) + +var ( + testEnv *environment.Execution + onlyOnce sync.Once +) + +// EnsureTestEnvIsLoaded make sure the test environment is loaded for this package +func EnsureTestEnvIsLoaded(t testingT) { + var doIt bool + var err error + onlyOnce.Do(func() { + doIt = true + }) + + if !doIt { + return + } + testEnv, err = environment.New() + if err != nil { + t.Fatalf("error loading testenv : %v", err) + } +} + +type testingT interface { + logT + Fatal(args ...interface{}) + Fatalf(string, ...interface{}) +} + +type logT interface { + Logf(string, ...interface{}) +} + +// Fake is a static file server. It might be running locally or remotely +// on test host. +type Fake interface { + Close() error + URL() string + CtxDir() string +} + +// New returns a static file server that will be use as build context. +func New(t testingT, dir string, modifiers ...func(*fakecontext.Fake) error) Fake { + ctx := fakecontext.New(t, dir, modifiers...) + if testEnv.LocalDaemon() { + return newLocalFakeStorage(t, ctx) + } + return newRemoteFileServer(t, ctx) +} + +// localFileStorage is a file storage on the running machine +type localFileStorage struct { + *fakecontext.Fake + *httptest.Server +} + +func (s *localFileStorage) URL() string { + return s.Server.URL +} + +func (s *localFileStorage) CtxDir() string { + return s.Fake.Dir +} + +func (s *localFileStorage) Close() error { + defer s.Server.Close() + return s.Fake.Close() +} + +func newLocalFakeStorage(t testingT, ctx *fakecontext.Fake) *localFileStorage { + handler := http.FileServer(http.Dir(ctx.Dir)) + server := httptest.NewServer(handler) + return &localFileStorage{ + Fake: ctx, + Server: server, + } +} + +// remoteFileServer is a containerized static file server started on the remote +// testing machine to be used in URL-accepting docker build functionality. +type remoteFileServer struct { + host string // hostname/port web server is listening to on docker host e.g. 0.0.0.0:43712 + container string + image string + ctx *fakecontext.Fake +} + +func (f *remoteFileServer) URL() string { + u := url.URL{ + Scheme: "http", + Host: f.host} + return u.String() +} + +func (f *remoteFileServer) CtxDir() string { + return f.ctx.Dir +} + +func (f *remoteFileServer) Close() error { + defer func() { + if f.ctx != nil { + f.ctx.Close() + } + if f.image != "" { + if err := cli.Docker(cli.Args("rmi", "-f", f.image)).Error; err != nil { + fmt.Fprintf(os.Stderr, "Error closing remote file server : %v\n", err) + } + } + }() + if f.container == "" { + return nil + } + return cli.Docker(cli.Args("rm", "-fv", f.container)).Error +} + +func newRemoteFileServer(t testingT, ctx *fakecontext.Fake) *remoteFileServer { + var ( + image = fmt.Sprintf("fileserver-img-%s", strings.ToLower(stringutils.GenerateRandomAlphaOnlyString(10))) + container = fmt.Sprintf("fileserver-cnt-%s", strings.ToLower(stringutils.GenerateRandomAlphaOnlyString(10))) + ) + + ensureHTTPServerImage(t) + + // Build the image + if err := ctx.Add("Dockerfile", `FROM httpserver +COPY . /static`); err != nil { + t.Fatal(err) + } + cli.BuildCmd(t, image, build.WithoutCache, build.WithExternalBuildContext(ctx)) + + // Start the container + cli.DockerCmd(t, "run", "-d", "-P", "--name", container, image) + + // Find out the system assigned port + out := cli.DockerCmd(t, "port", container, "80/tcp").Combined() + fileserverHostPort := strings.Trim(out, "\n") + _, port, err := net.SplitHostPort(fileserverHostPort) + if err != nil { + t.Fatalf("unable to parse file server host:port: %v", err) + } + + dockerHostURL, err := url.Parse(request.DaemonHost()) + if err != nil { + t.Fatalf("unable to parse daemon host URL: %v", err) + } + + host, _, err := net.SplitHostPort(dockerHostURL.Host) + if err != nil { + t.Fatalf("unable to parse docker daemon host:port: %v", err) + } + + return &remoteFileServer{ + container: container, + image: image, + host: fmt.Sprintf("%s:%s", host, port), + ctx: ctx} +} diff --git a/vendor/github.com/moby/moby/integration-cli/cli/cli.go b/vendor/github.com/moby/moby/integration-cli/cli/cli.go new file mode 100644 index 000000000..d8355217e --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/cli/cli.go @@ -0,0 +1,231 @@ +package cli + +import ( + "fmt" + "io" + "strings" + "sync" + "time" + + "github.com/docker/docker/integration-cli/daemon" + "github.com/docker/docker/integration-cli/environment" + icmd "github.com/docker/docker/pkg/testutil/cmd" + "github.com/pkg/errors" +) + +var ( + testEnv *environment.Execution + onlyOnce sync.Once +) + +// EnsureTestEnvIsLoaded make sure the test environment is loaded for this package +func EnsureTestEnvIsLoaded(t testingT) { + var doIt bool + var err error + onlyOnce.Do(func() { + doIt = true + }) + + if !doIt { + return + } + testEnv, err = environment.New() + if err != nil { + t.Fatalf("error loading testenv : %v", err) + } +} + +// CmdOperator defines functions that can modify a command +type CmdOperator func(*icmd.Cmd) func() + +type testingT interface { + Fatal(args ...interface{}) + Fatalf(string, ...interface{}) +} + +// DockerCmd executes the specified docker command and expect a success +func DockerCmd(t testingT, args ...string) *icmd.Result { + return Docker(Args(args...)).Assert(t, icmd.Success) +} + +// BuildCmd executes the specified docker build command and expect a success +func BuildCmd(t testingT, name string, cmdOperators ...CmdOperator) *icmd.Result { + return Docker(Build(name), cmdOperators...).Assert(t, icmd.Success) +} + +// InspectCmd executes the specified docker inspect command and expect a success +func InspectCmd(t testingT, name string, cmdOperators ...CmdOperator) *icmd.Result { + return Docker(Inspect(name), cmdOperators...).Assert(t, icmd.Success) +} + +// WaitRun will wait for the specified container to be running, maximum 5 seconds. +func WaitRun(t testingT, name string, cmdOperators ...CmdOperator) { + WaitForInspectResult(t, name, "{{.State.Running}}", "true", 5*time.Second, cmdOperators...) +} + +// WaitExited will wait for the specified container to state exit, subject +// to a maximum time limit in seconds supplied by the caller +func WaitExited(t testingT, name string, timeout time.Duration, cmdOperators ...CmdOperator) { + WaitForInspectResult(t, name, "{{.State.Status}}", "exited", timeout, cmdOperators...) +} + +// WaitRestart will wait for the specified container to restart once +func WaitRestart(t testingT, name string, timeout time.Duration, cmdOperators ...CmdOperator) { + WaitForInspectResult(t, name, "{{.RestartCount}}", "1", timeout, cmdOperators...) +} + +// WaitForInspectResult waits for the specified expression to be equals to the specified expected string in the given time. +func WaitForInspectResult(t testingT, name, expr, expected string, timeout time.Duration, cmdOperators ...CmdOperator) { + after := time.After(timeout) + + args := []string{"inspect", "-f", expr, name} + for { + result := Docker(Args(args...), cmdOperators...) + if result.Error != nil { + if !strings.Contains(strings.ToLower(result.Stderr()), "no such") { + t.Fatalf("error executing docker inspect: %v\n%s", + result.Stderr(), result.Stdout()) + } + select { + case <-after: + t.Fatal(result.Error) + default: + time.Sleep(10 * time.Millisecond) + continue + } + } + + out := strings.TrimSpace(result.Stdout()) + if out == expected { + break + } + + select { + case <-after: + t.Fatalf("condition \"%q == %q\" not true in time (%v)", out, expected, timeout) + default: + } + + time.Sleep(100 * time.Millisecond) + } +} + +// Docker executes the specified docker command +func Docker(cmd icmd.Cmd, cmdOperators ...CmdOperator) *icmd.Result { + for _, op := range cmdOperators { + deferFn := op(&cmd) + if deferFn != nil { + defer deferFn() + } + } + appendDocker(&cmd) + if err := validateArgs(cmd.Command...); err != nil { + return &icmd.Result{ + Error: err, + } + } + return icmd.RunCmd(cmd) +} + +// validateArgs is a checker to ensure tests are not running commands which are +// not supported on platforms. Specifically on Windows this is 'busybox top'. +func validateArgs(args ...string) error { + if testEnv.DaemonPlatform() != "windows" { + return nil + } + foundBusybox := -1 + for key, value := range args { + if strings.ToLower(value) == "busybox" { + foundBusybox = key + } + if (foundBusybox != -1) && (key == foundBusybox+1) && (strings.ToLower(value) == "top") { + return errors.New("cannot use 'busybox top' in tests on Windows. Use runSleepingContainer()") + } + } + return nil +} + +// Build executes the specified docker build command +func Build(name string) icmd.Cmd { + return icmd.Command("build", "-t", name) +} + +// Inspect executes the specified docker inspect command +func Inspect(name string) icmd.Cmd { + return icmd.Command("inspect", name) +} + +// Format sets the specified format with --format flag +func Format(format string) func(*icmd.Cmd) func() { + return func(cmd *icmd.Cmd) func() { + cmd.Command = append( + []string{cmd.Command[0]}, + append([]string{"--format", fmt.Sprintf("{{%s}}", format)}, cmd.Command[1:]...)..., + ) + return nil + } +} + +func appendDocker(cmd *icmd.Cmd) { + cmd.Command = append([]string{testEnv.DockerBinary()}, cmd.Command...) +} + +// Args build an icmd.Cmd struct from the specified arguments +func Args(args ...string) icmd.Cmd { + switch len(args) { + case 0: + return icmd.Cmd{} + case 1: + return icmd.Command(args[0]) + default: + return icmd.Command(args[0], args[1:]...) + } +} + +// Daemon points to the specified daemon +func Daemon(d *daemon.Daemon) func(*icmd.Cmd) func() { + return func(cmd *icmd.Cmd) func() { + cmd.Command = append([]string{"--host", d.Sock()}, cmd.Command...) + return nil + } +} + +// WithTimeout sets the timeout for the command to run +func WithTimeout(timeout time.Duration) func(cmd *icmd.Cmd) func() { + return func(cmd *icmd.Cmd) func() { + cmd.Timeout = timeout + return nil + } +} + +// WithEnvironmentVariables sets the specified environment variables for the command to run +func WithEnvironmentVariables(envs ...string) func(cmd *icmd.Cmd) func() { + return func(cmd *icmd.Cmd) func() { + cmd.Env = envs + return nil + } +} + +// WithFlags sets the specified flags for the command to run +func WithFlags(flags ...string) func(*icmd.Cmd) func() { + return func(cmd *icmd.Cmd) func() { + cmd.Command = append(cmd.Command, flags...) + return nil + } +} + +// InDir sets the folder in which the command should be executed +func InDir(path string) func(*icmd.Cmd) func() { + return func(cmd *icmd.Cmd) func() { + cmd.Dir = path + return nil + } +} + +// WithStdout sets the standard output writer of the command +func WithStdout(writer io.Writer) func(*icmd.Cmd) func() { + return func(cmd *icmd.Cmd) func() { + cmd.Stdout = writer + return nil + } +} diff --git a/vendor/github.com/moby/moby/integration-cli/daemon/daemon.go b/vendor/github.com/moby/moby/integration-cli/daemon/daemon.go new file mode 100644 index 000000000..8b086c942 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/daemon/daemon.go @@ -0,0 +1,815 @@ +package daemon + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "os" + "os/exec" + "path/filepath" + "strconv" + "strings" + "time" + + "github.com/docker/docker/api/types/events" + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/request" + "github.com/docker/docker/opts" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/pkg/testutil" + icmd "github.com/docker/docker/pkg/testutil/cmd" + "github.com/docker/go-connections/sockets" + "github.com/docker/go-connections/tlsconfig" + "github.com/go-check/check" + "github.com/pkg/errors" +) + +type testingT interface { + logT + Fatalf(string, ...interface{}) +} + +type logT interface { + Logf(string, ...interface{}) +} + +// SockRoot holds the path of the default docker integration daemon socket +var SockRoot = filepath.Join(os.TempDir(), "docker-integration") + +var errDaemonNotStarted = errors.New("daemon not started") + +// Daemon represents a Docker daemon for the testing framework. +type Daemon struct { + GlobalFlags []string + Root string + Folder string + Wait chan error + UseDefaultHost bool + UseDefaultTLSHost bool + + id string + logFile *os.File + stdin io.WriteCloser + stdout, stderr io.ReadCloser + cmd *exec.Cmd + storageDriver string + userlandProxy bool + execRoot string + experimental bool + dockerBinary string + dockerdBinary string + log logT +} + +// Config holds docker daemon integration configuration +type Config struct { + Experimental bool +} + +type clientConfig struct { + transport *http.Transport + scheme string + addr string +} + +// New returns a Daemon instance to be used for testing. +// This will create a directory such as d123456789 in the folder specified by $DOCKER_INTEGRATION_DAEMON_DEST or $DEST. +// The daemon will not automatically start. +func New(t testingT, dockerBinary string, dockerdBinary string, config Config) *Daemon { + dest := os.Getenv("DOCKER_INTEGRATION_DAEMON_DEST") + if dest == "" { + dest = os.Getenv("DEST") + } + if dest == "" { + t.Fatalf("Please set the DOCKER_INTEGRATION_DAEMON_DEST or the DEST environment variable") + } + + if err := os.MkdirAll(SockRoot, 0700); err != nil { + t.Fatalf("could not create daemon socket root") + } + + id := fmt.Sprintf("d%s", stringid.TruncateID(stringid.GenerateRandomID())) + dir := filepath.Join(dest, id) + daemonFolder, err := filepath.Abs(dir) + if err != nil { + t.Fatalf("Could not make %q an absolute path", dir) + } + daemonRoot := filepath.Join(daemonFolder, "root") + + if err := os.MkdirAll(daemonRoot, 0755); err != nil { + t.Fatalf("Could not create daemon root %q", dir) + } + + userlandProxy := true + if env := os.Getenv("DOCKER_USERLANDPROXY"); env != "" { + if val, err := strconv.ParseBool(env); err != nil { + userlandProxy = val + } + } + + return &Daemon{ + id: id, + Folder: daemonFolder, + Root: daemonRoot, + storageDriver: os.Getenv("DOCKER_GRAPHDRIVER"), + userlandProxy: userlandProxy, + execRoot: filepath.Join(os.TempDir(), "docker-execroot", id), + dockerBinary: dockerBinary, + dockerdBinary: dockerdBinary, + experimental: config.Experimental, + log: t, + } +} + +// RootDir returns the root directory of the daemon. +func (d *Daemon) RootDir() string { + return d.Root +} + +// ID returns the generated id of the daemon +func (d *Daemon) ID() string { + return d.id +} + +// StorageDriver returns the configured storage driver of the daemon +func (d *Daemon) StorageDriver() string { + return d.storageDriver +} + +// CleanupExecRoot cleans the daemon exec root (network namespaces, ...) +func (d *Daemon) CleanupExecRoot(c *check.C) { + cleanupExecRoot(c, d.execRoot) +} + +func (d *Daemon) getClientConfig() (*clientConfig, error) { + var ( + transport *http.Transport + scheme string + addr string + proto string + ) + if d.UseDefaultTLSHost { + option := &tlsconfig.Options{ + CAFile: "fixtures/https/ca.pem", + CertFile: "fixtures/https/client-cert.pem", + KeyFile: "fixtures/https/client-key.pem", + } + tlsConfig, err := tlsconfig.Client(*option) + if err != nil { + return nil, err + } + transport = &http.Transport{ + TLSClientConfig: tlsConfig, + } + addr = fmt.Sprintf("%s:%d", opts.DefaultHTTPHost, opts.DefaultTLSHTTPPort) + scheme = "https" + proto = "tcp" + } else if d.UseDefaultHost { + addr = opts.DefaultUnixSocket + proto = "unix" + scheme = "http" + transport = &http.Transport{} + } else { + addr = d.sockPath() + proto = "unix" + scheme = "http" + transport = &http.Transport{} + } + + if err := sockets.ConfigureTransport(transport, proto, addr); err != nil { + return nil, err + } + transport.DisableKeepAlives = true + + return &clientConfig{ + transport: transport, + scheme: scheme, + addr: addr, + }, nil +} + +// Start starts the daemon and return once it is ready to receive requests. +func (d *Daemon) Start(t testingT, args ...string) { + if err := d.StartWithError(args...); err != nil { + t.Fatalf("Error starting daemon with arguments: %v", args) + } +} + +// StartWithError starts the daemon and return once it is ready to receive requests. +// It returns an error in case it couldn't start. +func (d *Daemon) StartWithError(args ...string) error { + logFile, err := os.OpenFile(filepath.Join(d.Folder, "docker.log"), os.O_RDWR|os.O_CREATE|os.O_APPEND, 0600) + if err != nil { + return errors.Wrapf(err, "[%s] Could not create %s/docker.log", d.id, d.Folder) + } + + return d.StartWithLogFile(logFile, args...) +} + +// StartWithLogFile will start the daemon and attach its streams to a given file. +func (d *Daemon) StartWithLogFile(out *os.File, providedArgs ...string) error { + dockerdBinary, err := exec.LookPath(d.dockerdBinary) + if err != nil { + return errors.Wrapf(err, "[%s] could not find docker binary in $PATH", d.id) + } + args := append(d.GlobalFlags, + "--containerd", "/var/run/docker/libcontainerd/docker-containerd.sock", + "--data-root", d.Root, + "--exec-root", d.execRoot, + "--pidfile", fmt.Sprintf("%s/docker.pid", d.Folder), + fmt.Sprintf("--userland-proxy=%t", d.userlandProxy), + ) + if d.experimental { + args = append(args, "--experimental", "--init") + } + if !(d.UseDefaultHost || d.UseDefaultTLSHost) { + args = append(args, []string{"--host", d.Sock()}...) + } + if root := os.Getenv("DOCKER_REMAP_ROOT"); root != "" { + args = append(args, []string{"--userns-remap", root}...) + } + + // If we don't explicitly set the log-level or debug flag(-D) then + // turn on debug mode + foundLog := false + foundSd := false + for _, a := range providedArgs { + if strings.Contains(a, "--log-level") || strings.Contains(a, "-D") || strings.Contains(a, "--debug") { + foundLog = true + } + if strings.Contains(a, "--storage-driver") { + foundSd = true + } + } + if !foundLog { + args = append(args, "--debug") + } + if d.storageDriver != "" && !foundSd { + args = append(args, "--storage-driver", d.storageDriver) + } + + args = append(args, providedArgs...) + d.cmd = exec.Command(dockerdBinary, args...) + d.cmd.Env = append(os.Environ(), "DOCKER_SERVICE_PREFER_OFFLINE_IMAGE=1") + d.cmd.Stdout = out + d.cmd.Stderr = out + d.logFile = out + + if err := d.cmd.Start(); err != nil { + return errors.Errorf("[%s] could not start daemon container: %v", d.id, err) + } + + wait := make(chan error) + + go func() { + wait <- d.cmd.Wait() + d.log.Logf("[%s] exiting daemon", d.id) + close(wait) + }() + + d.Wait = wait + + tick := time.Tick(500 * time.Millisecond) + // make sure daemon is ready to receive requests + startTime := time.Now().Unix() + for { + d.log.Logf("[%s] waiting for daemon to start", d.id) + if time.Now().Unix()-startTime > 5 { + // After 5 seconds, give up + return errors.Errorf("[%s] Daemon exited and never started", d.id) + } + select { + case <-time.After(2 * time.Second): + return errors.Errorf("[%s] timeout: daemon does not respond", d.id) + case <-tick: + clientConfig, err := d.getClientConfig() + if err != nil { + return err + } + + client := &http.Client{ + Transport: clientConfig.transport, + } + + req, err := http.NewRequest("GET", "/_ping", nil) + if err != nil { + return errors.Wrapf(err, "[%s] could not create new request", d.id) + } + req.URL.Host = clientConfig.addr + req.URL.Scheme = clientConfig.scheme + resp, err := client.Do(req) + if err != nil { + continue + } + resp.Body.Close() + if resp.StatusCode != http.StatusOK { + d.log.Logf("[%s] received status != 200 OK: %s\n", d.id, resp.Status) + } + d.log.Logf("[%s] daemon started\n", d.id) + d.Root, err = d.queryRootDir() + if err != nil { + return errors.Errorf("[%s] error querying daemon for root directory: %v", d.id, err) + } + return nil + case <-d.Wait: + return errors.Errorf("[%s] Daemon exited during startup", d.id) + } + } +} + +// StartWithBusybox will first start the daemon with Daemon.Start() +// then save the busybox image from the main daemon and load it into this Daemon instance. +func (d *Daemon) StartWithBusybox(t testingT, arg ...string) { + d.Start(t, arg...) + if err := d.LoadBusybox(); err != nil { + t.Fatalf("Error loading busybox image to current daemon: %s\n%v", d.id, err) + } +} + +// Kill will send a SIGKILL to the daemon +func (d *Daemon) Kill() error { + if d.cmd == nil || d.Wait == nil { + return errDaemonNotStarted + } + + defer func() { + d.logFile.Close() + d.cmd = nil + }() + + if err := d.cmd.Process.Kill(); err != nil { + return err + } + + if err := os.Remove(fmt.Sprintf("%s/docker.pid", d.Folder)); err != nil { + return err + } + + return nil +} + +// Pid returns the pid of the daemon +func (d *Daemon) Pid() int { + return d.cmd.Process.Pid +} + +// Interrupt stops the daemon by sending it an Interrupt signal +func (d *Daemon) Interrupt() error { + return d.Signal(os.Interrupt) +} + +// Signal sends the specified signal to the daemon if running +func (d *Daemon) Signal(signal os.Signal) error { + if d.cmd == nil || d.Wait == nil { + return errDaemonNotStarted + } + return d.cmd.Process.Signal(signal) +} + +// DumpStackAndQuit sends SIGQUIT to the daemon, which triggers it to dump its +// stack to its log file and exit +// This is used primarily for gathering debug information on test timeout +func (d *Daemon) DumpStackAndQuit() { + if d.cmd == nil || d.cmd.Process == nil { + return + } + SignalDaemonDump(d.cmd.Process.Pid) +} + +// Stop will send a SIGINT every second and wait for the daemon to stop. +// If it times out, a SIGKILL is sent. +// Stop will not delete the daemon directory. If a purged daemon is needed, +// instantiate a new one with NewDaemon. +// If an error occurs while starting the daemon, the test will fail. +func (d *Daemon) Stop(t testingT) { + err := d.StopWithError() + if err != nil { + if err != errDaemonNotStarted { + t.Fatalf("Error while stopping the daemon %s : %v", d.id, err) + } else { + t.Logf("Daemon %s is not started", d.id) + } + } +} + +// StopWithError will send a SIGINT every second and wait for the daemon to stop. +// If it timeouts, a SIGKILL is sent. +// Stop will not delete the daemon directory. If a purged daemon is needed, +// instantiate a new one with NewDaemon. +func (d *Daemon) StopWithError() error { + if d.cmd == nil || d.Wait == nil { + return errDaemonNotStarted + } + + defer func() { + d.logFile.Close() + d.cmd = nil + }() + + i := 1 + tick := time.Tick(time.Second) + + if err := d.cmd.Process.Signal(os.Interrupt); err != nil { + if strings.Contains(err.Error(), "os: process already finished") { + return errDaemonNotStarted + } + return errors.Errorf("could not send signal: %v", err) + } +out1: + for { + select { + case err := <-d.Wait: + return err + case <-time.After(20 * time.Second): + // time for stopping jobs and run onShutdown hooks + d.log.Logf("[%s] daemon started", d.id) + break out1 + } + } + +out2: + for { + select { + case err := <-d.Wait: + return err + case <-tick: + i++ + if i > 5 { + d.log.Logf("tried to interrupt daemon for %d times, now try to kill it", i) + break out2 + } + d.log.Logf("Attempt #%d: daemon is still running with pid %d", i, d.cmd.Process.Pid) + if err := d.cmd.Process.Signal(os.Interrupt); err != nil { + return errors.Errorf("could not send signal: %v", err) + } + } + } + + if err := d.cmd.Process.Kill(); err != nil { + d.log.Logf("Could not kill daemon: %v", err) + return err + } + + if err := os.Remove(fmt.Sprintf("%s/docker.pid", d.Folder)); err != nil { + return err + } + + return nil +} + +// Restart will restart the daemon by first stopping it and the starting it. +// If an error occurs while starting the daemon, the test will fail. +func (d *Daemon) Restart(t testingT, args ...string) { + d.Stop(t) + d.handleUserns() + d.Start(t, args...) +} + +// RestartWithError will restart the daemon by first stopping it and then starting it. +func (d *Daemon) RestartWithError(arg ...string) error { + if err := d.StopWithError(); err != nil { + return err + } + d.handleUserns() + return d.StartWithError(arg...) +} + +func (d *Daemon) handleUserns() { + // in the case of tests running a user namespace-enabled daemon, we have resolved + // d.Root to be the actual final path of the graph dir after the "uid.gid" of + // remapped root is added--we need to subtract it from the path before calling + // start or else we will continue making subdirectories rather than truly restarting + // with the same location/root: + if root := os.Getenv("DOCKER_REMAP_ROOT"); root != "" { + d.Root = filepath.Dir(d.Root) + } +} + +// LoadBusybox will load the stored busybox into a newly started daemon +func (d *Daemon) LoadBusybox() error { + bb := filepath.Join(d.Folder, "busybox.tar") + if _, err := os.Stat(bb); err != nil { + if !os.IsNotExist(err) { + return errors.Errorf("unexpected error on busybox.tar stat: %v", err) + } + // saving busybox image from main daemon + if out, err := exec.Command(d.dockerBinary, "save", "--output", bb, "busybox:latest").CombinedOutput(); err != nil { + imagesOut, _ := exec.Command(d.dockerBinary, "images", "--format", "{{ .Repository }}:{{ .Tag }}").CombinedOutput() + return errors.Errorf("could not save busybox image: %s\n%s", string(out), strings.TrimSpace(string(imagesOut))) + } + } + // loading busybox image to this daemon + if out, err := d.Cmd("load", "--input", bb); err != nil { + return errors.Errorf("could not load busybox image: %s", out) + } + if err := os.Remove(bb); err != nil { + return err + } + return nil +} + +func (d *Daemon) queryRootDir() (string, error) { + // update daemon root by asking /info endpoint (to support user + // namespaced daemon with root remapped uid.gid directory) + clientConfig, err := d.getClientConfig() + if err != nil { + return "", err + } + + client := &http.Client{ + Transport: clientConfig.transport, + } + + req, err := http.NewRequest("GET", "/info", nil) + if err != nil { + return "", err + } + req.Header.Set("Content-Type", "application/json") + req.URL.Host = clientConfig.addr + req.URL.Scheme = clientConfig.scheme + + resp, err := client.Do(req) + if err != nil { + return "", err + } + body := ioutils.NewReadCloserWrapper(resp.Body, func() error { + return resp.Body.Close() + }) + + type Info struct { + DockerRootDir string + } + var b []byte + var i Info + b, err = testutil.ReadBody(body) + if err == nil && resp.StatusCode == http.StatusOK { + // read the docker root dir + if err = json.Unmarshal(b, &i); err == nil { + return i.DockerRootDir, nil + } + } + return "", err +} + +// Sock returns the socket path of the daemon +func (d *Daemon) Sock() string { + return fmt.Sprintf("unix://" + d.sockPath()) +} + +func (d *Daemon) sockPath() string { + return filepath.Join(SockRoot, d.id+".sock") +} + +// WaitRun waits for a container to be running for 10s +func (d *Daemon) WaitRun(contID string) error { + args := []string{"--host", d.Sock()} + return WaitInspectWithArgs(d.dockerBinary, contID, "{{.State.Running}}", "true", 10*time.Second, args...) +} + +// GetBaseDeviceSize returns the base device size of the daemon +func (d *Daemon) GetBaseDeviceSize(c *check.C) int64 { + infoCmdOutput, _, err := testutil.RunCommandPipelineWithOutput( + exec.Command(d.dockerBinary, "-H", d.Sock(), "info"), + exec.Command("grep", "Base Device Size"), + ) + c.Assert(err, checker.IsNil) + basesizeSlice := strings.Split(infoCmdOutput, ":") + basesize := strings.Trim(basesizeSlice[1], " ") + basesize = strings.Trim(basesize, "\n")[:len(basesize)-3] + basesizeFloat, err := strconv.ParseFloat(strings.Trim(basesize, " "), 64) + c.Assert(err, checker.IsNil) + basesizeBytes := int64(basesizeFloat) * (1024 * 1024 * 1024) + return basesizeBytes +} + +// Cmd executes a docker CLI command against this daemon. +// Example: d.Cmd("version") will run docker -H unix://path/to/unix.sock version +func (d *Daemon) Cmd(args ...string) (string, error) { + result := icmd.RunCmd(d.Command(args...)) + return result.Combined(), result.Error +} + +// Command creates a docker CLI command against this daemon, to be executed later. +// Example: d.Command("version") creates a command to run "docker -H unix://path/to/unix.sock version" +func (d *Daemon) Command(args ...string) icmd.Cmd { + return icmd.Command(d.dockerBinary, d.PrependHostArg(args)...) +} + +// PrependHostArg prepend the specified arguments by the daemon host flags +func (d *Daemon) PrependHostArg(args []string) []string { + for _, arg := range args { + if arg == "--host" || arg == "-H" { + return args + } + } + return append([]string{"--host", d.Sock()}, args...) +} + +// SockRequest executes a socket request on a daemon and returns statuscode and output. +func (d *Daemon) SockRequest(method, endpoint string, data interface{}) (int, []byte, error) { + jsonData := bytes.NewBuffer(nil) + if err := json.NewEncoder(jsonData).Encode(data); err != nil { + return -1, nil, err + } + + res, body, err := d.SockRequestRaw(method, endpoint, jsonData, "application/json") + if err != nil { + return -1, nil, err + } + b, err := testutil.ReadBody(body) + return res.StatusCode, b, err +} + +// SockRequestRaw executes a socket request on a daemon and returns an http +// response and a reader for the output data. +// Deprecated: use request package instead +func (d *Daemon) SockRequestRaw(method, endpoint string, data io.Reader, ct string) (*http.Response, io.ReadCloser, error) { + return request.SockRequestRaw(method, endpoint, data, ct, d.Sock()) +} + +// LogFileName returns the path the daemon's log file +func (d *Daemon) LogFileName() string { + return d.logFile.Name() +} + +// GetIDByName returns the ID of an object (container, volume, …) given its name +func (d *Daemon) GetIDByName(name string) (string, error) { + return d.inspectFieldWithError(name, "Id") +} + +// ActiveContainers returns the list of ids of the currently running containers +func (d *Daemon) ActiveContainers() (ids []string) { + // FIXME(vdemeester) shouldn't ignore the error + out, _ := d.Cmd("ps", "-q") + for _, id := range strings.Split(out, "\n") { + if id = strings.TrimSpace(id); id != "" { + ids = append(ids, id) + } + } + return +} + +// ReadLogFile returns the content of the daemon log file +func (d *Daemon) ReadLogFile() ([]byte, error) { + return ioutil.ReadFile(d.logFile.Name()) +} + +// InspectField returns the field filter by 'filter' +func (d *Daemon) InspectField(name, filter string) (string, error) { + return d.inspectFilter(name, filter) +} + +func (d *Daemon) inspectFilter(name, filter string) (string, error) { + format := fmt.Sprintf("{{%s}}", filter) + out, err := d.Cmd("inspect", "-f", format, name) + if err != nil { + return "", errors.Errorf("failed to inspect %s: %s", name, out) + } + return strings.TrimSpace(out), nil +} + +func (d *Daemon) inspectFieldWithError(name, field string) (string, error) { + return d.inspectFilter(name, fmt.Sprintf(".%s", field)) +} + +// FindContainerIP returns the ip of the specified container +func (d *Daemon) FindContainerIP(id string) (string, error) { + out, err := d.Cmd("inspect", "--format='{{ .NetworkSettings.Networks.bridge.IPAddress }}'", id) + if err != nil { + return "", err + } + return strings.Trim(out, " \r\n'"), nil +} + +// BuildImageWithOut builds an image with the specified dockerfile and options and returns the output +func (d *Daemon) BuildImageWithOut(name, dockerfile string, useCache bool, buildFlags ...string) (string, int, error) { + buildCmd := BuildImageCmdWithHost(d.dockerBinary, name, dockerfile, d.Sock(), useCache, buildFlags...) + result := icmd.RunCmd(icmd.Cmd{ + Command: buildCmd.Args, + Env: buildCmd.Env, + Dir: buildCmd.Dir, + Stdin: buildCmd.Stdin, + Stdout: buildCmd.Stdout, + }) + return result.Combined(), result.ExitCode, result.Error +} + +// CheckActiveContainerCount returns the number of active containers +// FIXME(vdemeester) should re-use ActivateContainers in some way +func (d *Daemon) CheckActiveContainerCount(c *check.C) (interface{}, check.CommentInterface) { + out, err := d.Cmd("ps", "-q") + c.Assert(err, checker.IsNil) + if len(strings.TrimSpace(out)) == 0 { + return 0, nil + } + return len(strings.Split(strings.TrimSpace(out), "\n")), check.Commentf("output: %q", string(out)) +} + +// ReloadConfig asks the daemon to reload its configuration +func (d *Daemon) ReloadConfig() error { + if d.cmd == nil || d.cmd.Process == nil { + return errors.New("daemon is not running") + } + + errCh := make(chan error) + started := make(chan struct{}) + go func() { + _, body, err := request.DoOnHost(d.Sock(), "/events", request.Method(http.MethodGet)) + close(started) + if err != nil { + errCh <- err + } + defer body.Close() + dec := json.NewDecoder(body) + for { + var e events.Message + if err := dec.Decode(&e); err != nil { + errCh <- err + return + } + if e.Type != events.DaemonEventType { + continue + } + if e.Action != "reload" { + continue + } + close(errCh) // notify that we are done + return + } + }() + + <-started + if err := signalDaemonReload(d.cmd.Process.Pid); err != nil { + return errors.Errorf("error signaling daemon reload: %v", err) + } + select { + case err := <-errCh: + if err != nil { + return errors.Errorf("error waiting for daemon reload event: %v", err) + } + case <-time.After(30 * time.Second): + return errors.New("timeout waiting for daemon reload event") + } + return nil +} + +// WaitInspectWithArgs waits for the specified expression to be equals to the specified expected string in the given time. +// Deprecated: use cli.WaitCmd instead +func WaitInspectWithArgs(dockerBinary, name, expr, expected string, timeout time.Duration, arg ...string) error { + after := time.After(timeout) + + args := append(arg, "inspect", "-f", expr, name) + for { + result := icmd.RunCommand(dockerBinary, args...) + if result.Error != nil { + if !strings.Contains(strings.ToLower(result.Stderr()), "no such") { + return errors.Errorf("error executing docker inspect: %v\n%s", + result.Stderr(), result.Stdout()) + } + select { + case <-after: + return result.Error + default: + time.Sleep(10 * time.Millisecond) + continue + } + } + + out := strings.TrimSpace(result.Stdout()) + if out == expected { + break + } + + select { + case <-after: + return errors.Errorf("condition \"%q == %q\" not true in time (%v)", out, expected, timeout) + default: + } + + time.Sleep(100 * time.Millisecond) + } + return nil +} + +// BuildImageCmdWithHost create a build command with the specified arguments. +// Deprecated +// FIXME(vdemeester) move this away +func BuildImageCmdWithHost(dockerBinary, name, dockerfile, host string, useCache bool, buildFlags ...string) *exec.Cmd { + args := []string{} + if host != "" { + args = append(args, "--host", host) + } + args = append(args, "build", "-t", name) + if !useCache { + args = append(args, "--no-cache") + } + args = append(args, buildFlags...) + args = append(args, "-") + buildCmd := exec.Command(dockerBinary, args...) + buildCmd.Stdin = strings.NewReader(dockerfile) + return buildCmd +} diff --git a/vendor/github.com/moby/moby/integration-cli/daemon/daemon_swarm.go b/vendor/github.com/moby/moby/integration-cli/daemon/daemon_swarm.go new file mode 100644 index 000000000..ba414066c --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/daemon/daemon_swarm.go @@ -0,0 +1,608 @@ +package daemon + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "strings" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/client" + "github.com/docker/docker/integration-cli/checker" + "github.com/go-check/check" + "github.com/pkg/errors" +) + +// Swarm is a test daemon with helpers for participating in a swarm. +type Swarm struct { + *Daemon + swarm.Info + Port int + ListenAddr string +} + +// Init initializes a new swarm cluster. +func (d *Swarm) Init(req swarm.InitRequest) error { + if req.ListenAddr == "" { + req.ListenAddr = d.ListenAddr + } + status, out, err := d.SockRequest("POST", "/swarm/init", req) + if status != http.StatusOK { + return fmt.Errorf("initializing swarm: invalid statuscode %v, %q", status, out) + } + if err != nil { + return fmt.Errorf("initializing swarm: %v", err) + } + info, err := d.SwarmInfo() + if err != nil { + return err + } + d.Info = info + return nil +} + +// Join joins a daemon to an existing cluster. +func (d *Swarm) Join(req swarm.JoinRequest) error { + if req.ListenAddr == "" { + req.ListenAddr = d.ListenAddr + } + status, out, err := d.SockRequest("POST", "/swarm/join", req) + if status != http.StatusOK { + return fmt.Errorf("joining swarm: invalid statuscode %v, %q", status, out) + } + if err != nil { + return fmt.Errorf("joining swarm: %v", err) + } + info, err := d.SwarmInfo() + if err != nil { + return err + } + d.Info = info + return nil +} + +// Leave forces daemon to leave current cluster. +func (d *Swarm) Leave(force bool) error { + url := "/swarm/leave" + if force { + url += "?force=1" + } + status, out, err := d.SockRequest("POST", url, nil) + if status != http.StatusOK { + return fmt.Errorf("leaving swarm: invalid statuscode %v, %q", status, out) + } + if err != nil { + err = fmt.Errorf("leaving swarm: %v", err) + } + return err +} + +// SwarmInfo returns the swarm information of the daemon +func (d *Swarm) SwarmInfo() (swarm.Info, error) { + var info struct { + Swarm swarm.Info + } + status, dt, err := d.SockRequest("GET", "/info", nil) + if status != http.StatusOK { + return info.Swarm, fmt.Errorf("get swarm info: invalid statuscode %v", status) + } + if err != nil { + return info.Swarm, fmt.Errorf("get swarm info: %v", err) + } + if err := json.Unmarshal(dt, &info); err != nil { + return info.Swarm, err + } + return info.Swarm, nil +} + +// Unlock tries to unlock a locked swarm +func (d *Swarm) Unlock(req swarm.UnlockRequest) error { + status, out, err := d.SockRequest("POST", "/swarm/unlock", req) + if status != http.StatusOK { + return fmt.Errorf("unlocking swarm: invalid statuscode %v, %q", status, out) + } + if err != nil { + err = errors.Wrap(err, "unlocking swarm") + } + return err +} + +// ServiceConstructor defines a swarm service constructor function +type ServiceConstructor func(*swarm.Service) + +// NodeConstructor defines a swarm node constructor +type NodeConstructor func(*swarm.Node) + +// SecretConstructor defines a swarm secret constructor +type SecretConstructor func(*swarm.Secret) + +// ConfigConstructor defines a swarm config constructor +type ConfigConstructor func(*swarm.Config) + +// SpecConstructor defines a swarm spec constructor +type SpecConstructor func(*swarm.Spec) + +// CreateServiceWithOptions creates a swarm service given the specified service constructors +// and auth config +func (d *Swarm) CreateServiceWithOptions(c *check.C, opts types.ServiceCreateOptions, f ...ServiceConstructor) string { + cl, err := client.NewClient(d.Sock(), "", nil, nil) + c.Assert(err, checker.IsNil, check.Commentf("failed to create client")) + defer cl.Close() + + var service swarm.Service + for _, fn := range f { + fn(&service) + } + + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + res, err := cl.ServiceCreate(ctx, service.Spec, opts) + c.Assert(err, checker.IsNil) + return res.ID +} + +// CreateService creates a swarm service given the specified service constructor +func (d *Swarm) CreateService(c *check.C, f ...ServiceConstructor) string { + return d.CreateServiceWithOptions(c, types.ServiceCreateOptions{}, f...) +} + +// GetService returns the swarm service corresponding to the specified id +func (d *Swarm) GetService(c *check.C, id string) *swarm.Service { + var service swarm.Service + status, out, err := d.SockRequest("GET", "/services/"+id, nil) + c.Assert(err, checker.IsNil, check.Commentf(string(out))) + c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out))) + c.Assert(json.Unmarshal(out, &service), checker.IsNil) + return &service +} + +// GetServiceTasks returns the swarm tasks for the specified service +func (d *Swarm) GetServiceTasks(c *check.C, service string) []swarm.Task { + var tasks []swarm.Task + + filterArgs := filters.NewArgs() + filterArgs.Add("desired-state", "running") + filterArgs.Add("service", service) + filters, err := filters.ToParam(filterArgs) + c.Assert(err, checker.IsNil) + + status, out, err := d.SockRequest("GET", "/tasks?filters="+filters, nil) + c.Assert(err, checker.IsNil, check.Commentf(string(out))) + c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out))) + c.Assert(json.Unmarshal(out, &tasks), checker.IsNil) + return tasks +} + +// CheckServiceTasksInState returns the number of tasks with a matching state, +// and optional message substring. +func (d *Swarm) CheckServiceTasksInState(service string, state swarm.TaskState, message string) func(*check.C) (interface{}, check.CommentInterface) { + return func(c *check.C) (interface{}, check.CommentInterface) { + tasks := d.GetServiceTasks(c, service) + var count int + for _, task := range tasks { + if task.Status.State == state { + if message == "" || strings.Contains(task.Status.Message, message) { + count++ + } + } + } + return count, nil + } +} + +// CheckServiceRunningTasks returns the number of running tasks for the specified service +func (d *Swarm) CheckServiceRunningTasks(service string) func(*check.C) (interface{}, check.CommentInterface) { + return d.CheckServiceTasksInState(service, swarm.TaskStateRunning, "") +} + +// CheckServiceUpdateState returns the current update state for the specified service +func (d *Swarm) CheckServiceUpdateState(service string) func(*check.C) (interface{}, check.CommentInterface) { + return func(c *check.C) (interface{}, check.CommentInterface) { + service := d.GetService(c, service) + if service.UpdateStatus == nil { + return "", nil + } + return service.UpdateStatus.State, nil + } +} + +// CheckPluginRunning returns the runtime state of the plugin +func (d *Swarm) CheckPluginRunning(plugin string) func(c *check.C) (interface{}, check.CommentInterface) { + return func(c *check.C) (interface{}, check.CommentInterface) { + status, out, err := d.SockRequest("GET", "/plugins/"+plugin+"/json", nil) + c.Assert(err, checker.IsNil, check.Commentf(string(out))) + if status != http.StatusOK { + return false, nil + } + + var p types.Plugin + c.Assert(json.Unmarshal(out, &p), checker.IsNil, check.Commentf(string(out))) + + return p.Enabled, check.Commentf("%+v", p) + } +} + +// CheckPluginImage returns the runtime state of the plugin +func (d *Swarm) CheckPluginImage(plugin string) func(c *check.C) (interface{}, check.CommentInterface) { + return func(c *check.C) (interface{}, check.CommentInterface) { + status, out, err := d.SockRequest("GET", "/plugins/"+plugin+"/json", nil) + c.Assert(err, checker.IsNil, check.Commentf(string(out))) + if status != http.StatusOK { + return false, nil + } + + var p types.Plugin + c.Assert(json.Unmarshal(out, &p), checker.IsNil, check.Commentf(string(out))) + return p.PluginReference, check.Commentf("%+v", p) + } +} + +// CheckServiceTasks returns the number of tasks for the specified service +func (d *Swarm) CheckServiceTasks(service string) func(*check.C) (interface{}, check.CommentInterface) { + return func(c *check.C) (interface{}, check.CommentInterface) { + tasks := d.GetServiceTasks(c, service) + return len(tasks), nil + } +} + +// CheckRunningTaskNetworks returns the number of times each network is referenced from a task. +func (d *Swarm) CheckRunningTaskNetworks(c *check.C) (interface{}, check.CommentInterface) { + var tasks []swarm.Task + + filterArgs := filters.NewArgs() + filterArgs.Add("desired-state", "running") + filters, err := filters.ToParam(filterArgs) + c.Assert(err, checker.IsNil) + + status, out, err := d.SockRequest("GET", "/tasks?filters="+filters, nil) + c.Assert(err, checker.IsNil, check.Commentf(string(out))) + c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out))) + c.Assert(json.Unmarshal(out, &tasks), checker.IsNil) + + result := make(map[string]int) + for _, task := range tasks { + for _, network := range task.Spec.Networks { + result[network.Target]++ + } + } + return result, nil +} + +// CheckRunningTaskImages returns the times each image is running as a task. +func (d *Swarm) CheckRunningTaskImages(c *check.C) (interface{}, check.CommentInterface) { + var tasks []swarm.Task + + filterArgs := filters.NewArgs() + filterArgs.Add("desired-state", "running") + filters, err := filters.ToParam(filterArgs) + c.Assert(err, checker.IsNil) + + status, out, err := d.SockRequest("GET", "/tasks?filters="+filters, nil) + c.Assert(err, checker.IsNil, check.Commentf(string(out))) + c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out))) + c.Assert(json.Unmarshal(out, &tasks), checker.IsNil) + + result := make(map[string]int) + for _, task := range tasks { + if task.Status.State == swarm.TaskStateRunning && task.Spec.ContainerSpec != nil { + result[task.Spec.ContainerSpec.Image]++ + } + } + return result, nil +} + +// CheckNodeReadyCount returns the number of ready node on the swarm +func (d *Swarm) CheckNodeReadyCount(c *check.C) (interface{}, check.CommentInterface) { + nodes := d.ListNodes(c) + var readyCount int + for _, node := range nodes { + if node.Status.State == swarm.NodeStateReady { + readyCount++ + } + } + return readyCount, nil +} + +// GetTask returns the swarm task identified by the specified id +func (d *Swarm) GetTask(c *check.C, id string) swarm.Task { + var task swarm.Task + + status, out, err := d.SockRequest("GET", "/tasks/"+id, nil) + c.Assert(err, checker.IsNil, check.Commentf(string(out))) + c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out))) + c.Assert(json.Unmarshal(out, &task), checker.IsNil) + return task +} + +// UpdateService updates a swarm service with the specified service constructor +func (d *Swarm) UpdateService(c *check.C, service *swarm.Service, f ...ServiceConstructor) { + for _, fn := range f { + fn(service) + } + url := fmt.Sprintf("/services/%s/update?version=%d", service.ID, service.Version.Index) + status, out, err := d.SockRequest("POST", url, service.Spec) + c.Assert(err, checker.IsNil, check.Commentf(string(out))) + c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out))) +} + +// RemoveService removes the specified service +func (d *Swarm) RemoveService(c *check.C, id string) { + status, out, err := d.SockRequest("DELETE", "/services/"+id, nil) + c.Assert(err, checker.IsNil, check.Commentf(string(out))) + c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out))) +} + +// GetNode returns a swarm node identified by the specified id +func (d *Swarm) GetNode(c *check.C, id string) *swarm.Node { + var node swarm.Node + status, out, err := d.SockRequest("GET", "/nodes/"+id, nil) + c.Assert(err, checker.IsNil, check.Commentf(string(out))) + c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out))) + c.Assert(json.Unmarshal(out, &node), checker.IsNil) + c.Assert(node.ID, checker.Equals, id) + return &node +} + +// RemoveNode removes the specified node +func (d *Swarm) RemoveNode(c *check.C, id string, force bool) { + url := "/nodes/" + id + if force { + url += "?force=1" + } + + status, out, err := d.SockRequest("DELETE", url, nil) + c.Assert(err, checker.IsNil, check.Commentf(string(out))) + c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out))) +} + +// UpdateNode updates a swarm node with the specified node constructor +func (d *Swarm) UpdateNode(c *check.C, id string, f ...NodeConstructor) { + for i := 0; ; i++ { + node := d.GetNode(c, id) + for _, fn := range f { + fn(node) + } + url := fmt.Sprintf("/nodes/%s/update?version=%d", node.ID, node.Version.Index) + status, out, err := d.SockRequest("POST", url, node.Spec) + if i < 10 && strings.Contains(string(out), "update out of sequence") { + time.Sleep(100 * time.Millisecond) + continue + } + c.Assert(err, checker.IsNil, check.Commentf(string(out))) + c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out))) + return + } +} + +// ListNodes returns the list of the current swarm nodes +func (d *Swarm) ListNodes(c *check.C) []swarm.Node { + status, out, err := d.SockRequest("GET", "/nodes", nil) + c.Assert(err, checker.IsNil, check.Commentf(string(out))) + c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out))) + + nodes := []swarm.Node{} + c.Assert(json.Unmarshal(out, &nodes), checker.IsNil) + return nodes +} + +// ListServices returns the list of the current swarm services +func (d *Swarm) ListServices(c *check.C) []swarm.Service { + status, out, err := d.SockRequest("GET", "/services", nil) + c.Assert(err, checker.IsNil, check.Commentf(string(out))) + c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out))) + + services := []swarm.Service{} + c.Assert(json.Unmarshal(out, &services), checker.IsNil) + return services +} + +// CreateSecret creates a secret given the specified spec +func (d *Swarm) CreateSecret(c *check.C, secretSpec swarm.SecretSpec) string { + status, out, err := d.SockRequest("POST", "/secrets/create", secretSpec) + + c.Assert(err, checker.IsNil, check.Commentf(string(out))) + c.Assert(status, checker.Equals, http.StatusCreated, check.Commentf("output: %q", string(out))) + + var scr types.SecretCreateResponse + c.Assert(json.Unmarshal(out, &scr), checker.IsNil) + return scr.ID +} + +// ListSecrets returns the list of the current swarm secrets +func (d *Swarm) ListSecrets(c *check.C) []swarm.Secret { + status, out, err := d.SockRequest("GET", "/secrets", nil) + c.Assert(err, checker.IsNil, check.Commentf(string(out))) + c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out))) + + secrets := []swarm.Secret{} + c.Assert(json.Unmarshal(out, &secrets), checker.IsNil) + return secrets +} + +// GetSecret returns a swarm secret identified by the specified id +func (d *Swarm) GetSecret(c *check.C, id string) *swarm.Secret { + var secret swarm.Secret + status, out, err := d.SockRequest("GET", "/secrets/"+id, nil) + c.Assert(err, checker.IsNil, check.Commentf(string(out))) + c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out))) + c.Assert(json.Unmarshal(out, &secret), checker.IsNil) + return &secret +} + +// DeleteSecret removes the swarm secret identified by the specified id +func (d *Swarm) DeleteSecret(c *check.C, id string) { + status, out, err := d.SockRequest("DELETE", "/secrets/"+id, nil) + c.Assert(err, checker.IsNil, check.Commentf(string(out))) + c.Assert(status, checker.Equals, http.StatusNoContent, check.Commentf("output: %q", string(out))) +} + +// UpdateSecret updates the swarm secret identified by the specified id +// Currently, only label update is supported. +func (d *Swarm) UpdateSecret(c *check.C, id string, f ...SecretConstructor) { + secret := d.GetSecret(c, id) + for _, fn := range f { + fn(secret) + } + url := fmt.Sprintf("/secrets/%s/update?version=%d", secret.ID, secret.Version.Index) + status, out, err := d.SockRequest("POST", url, secret.Spec) + c.Assert(err, checker.IsNil, check.Commentf(string(out))) + c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out))) +} + +// CreateConfig creates a config given the specified spec +func (d *Swarm) CreateConfig(c *check.C, configSpec swarm.ConfigSpec) string { + status, out, err := d.SockRequest("POST", "/configs/create", configSpec) + + c.Assert(err, checker.IsNil, check.Commentf(string(out))) + c.Assert(status, checker.Equals, http.StatusCreated, check.Commentf("output: %q", string(out))) + + var scr types.ConfigCreateResponse + c.Assert(json.Unmarshal(out, &scr), checker.IsNil) + return scr.ID +} + +// ListConfigs returns the list of the current swarm configs +func (d *Swarm) ListConfigs(c *check.C) []swarm.Config { + status, out, err := d.SockRequest("GET", "/configs", nil) + c.Assert(err, checker.IsNil, check.Commentf(string(out))) + c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out))) + + configs := []swarm.Config{} + c.Assert(json.Unmarshal(out, &configs), checker.IsNil) + return configs +} + +// GetConfig returns a swarm config identified by the specified id +func (d *Swarm) GetConfig(c *check.C, id string) *swarm.Config { + var config swarm.Config + status, out, err := d.SockRequest("GET", "/configs/"+id, nil) + c.Assert(err, checker.IsNil, check.Commentf(string(out))) + c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out))) + c.Assert(json.Unmarshal(out, &config), checker.IsNil) + return &config +} + +// DeleteConfig removes the swarm config identified by the specified id +func (d *Swarm) DeleteConfig(c *check.C, id string) { + status, out, err := d.SockRequest("DELETE", "/configs/"+id, nil) + c.Assert(err, checker.IsNil, check.Commentf(string(out))) + c.Assert(status, checker.Equals, http.StatusNoContent, check.Commentf("output: %q", string(out))) +} + +// UpdateConfig updates the swarm config identified by the specified id +// Currently, only label update is supported. +func (d *Swarm) UpdateConfig(c *check.C, id string, f ...ConfigConstructor) { + config := d.GetConfig(c, id) + for _, fn := range f { + fn(config) + } + url := fmt.Sprintf("/configs/%s/update?version=%d", config.ID, config.Version.Index) + status, out, err := d.SockRequest("POST", url, config.Spec) + c.Assert(err, checker.IsNil, check.Commentf(string(out))) + c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out))) +} + +// GetSwarm returns the current swarm object +func (d *Swarm) GetSwarm(c *check.C) swarm.Swarm { + var sw swarm.Swarm + status, out, err := d.SockRequest("GET", "/swarm", nil) + c.Assert(err, checker.IsNil, check.Commentf(string(out))) + c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out))) + c.Assert(json.Unmarshal(out, &sw), checker.IsNil) + return sw +} + +// UpdateSwarm updates the current swarm object with the specified spec constructors +func (d *Swarm) UpdateSwarm(c *check.C, f ...SpecConstructor) { + sw := d.GetSwarm(c) + for _, fn := range f { + fn(&sw.Spec) + } + url := fmt.Sprintf("/swarm/update?version=%d", sw.Version.Index) + status, out, err := d.SockRequest("POST", url, sw.Spec) + c.Assert(err, checker.IsNil, check.Commentf(string(out))) + c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out))) +} + +// RotateTokens update the swarm to rotate tokens +func (d *Swarm) RotateTokens(c *check.C) { + var sw swarm.Swarm + status, out, err := d.SockRequest("GET", "/swarm", nil) + c.Assert(err, checker.IsNil, check.Commentf(string(out))) + c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out))) + c.Assert(json.Unmarshal(out, &sw), checker.IsNil) + + url := fmt.Sprintf("/swarm/update?version=%d&rotateWorkerToken=true&rotateManagerToken=true", sw.Version.Index) + status, out, err = d.SockRequest("POST", url, sw.Spec) + c.Assert(err, checker.IsNil, check.Commentf(string(out))) + c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out))) +} + +// JoinTokens returns the current swarm join tokens +func (d *Swarm) JoinTokens(c *check.C) swarm.JoinTokens { + var sw swarm.Swarm + status, out, err := d.SockRequest("GET", "/swarm", nil) + c.Assert(err, checker.IsNil, check.Commentf(string(out))) + c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out))) + c.Assert(json.Unmarshal(out, &sw), checker.IsNil) + return sw.JoinTokens +} + +// CheckLocalNodeState returns the current swarm node state +func (d *Swarm) CheckLocalNodeState(c *check.C) (interface{}, check.CommentInterface) { + info, err := d.SwarmInfo() + c.Assert(err, checker.IsNil) + return info.LocalNodeState, nil +} + +// CheckControlAvailable returns the current swarm control available +func (d *Swarm) CheckControlAvailable(c *check.C) (interface{}, check.CommentInterface) { + info, err := d.SwarmInfo() + c.Assert(err, checker.IsNil) + c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) + return info.ControlAvailable, nil +} + +// CheckLeader returns whether there is a leader on the swarm or not +func (d *Swarm) CheckLeader(c *check.C) (interface{}, check.CommentInterface) { + errList := check.Commentf("could not get node list") + status, out, err := d.SockRequest("GET", "/nodes", nil) + if err != nil { + return err, errList + } + if status != http.StatusOK { + return fmt.Errorf("expected http status OK, got: %d", status), errList + } + + var ls []swarm.Node + if err := json.Unmarshal(out, &ls); err != nil { + return err, errList + } + + for _, node := range ls { + if node.ManagerStatus != nil && node.ManagerStatus.Leader { + return nil, nil + } + } + return fmt.Errorf("no leader"), check.Commentf("could not find leader") +} + +// CmdRetryOutOfSequence tries the specified command against the current daemon for 10 times +func (d *Swarm) CmdRetryOutOfSequence(args ...string) (string, error) { + for i := 0; ; i++ { + out, err := d.Cmd(args...) + if err != nil { + if strings.Contains(out, "update out of sequence") { + if i < 10 { + continue + } + } + } + return out, err + } +} diff --git a/vendor/github.com/moby/moby/integration-cli/daemon/daemon_unix.go b/vendor/github.com/moby/moby/integration-cli/daemon/daemon_unix.go new file mode 100644 index 000000000..77eda2de2 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/daemon/daemon_unix.go @@ -0,0 +1,36 @@ +// +build !windows + +package daemon + +import ( + "os" + "path/filepath" + + "github.com/go-check/check" + "golang.org/x/sys/unix" +) + +func cleanupExecRoot(c *check.C, execRoot string) { + // Cleanup network namespaces in the exec root of this + // daemon because this exec root is specific to this + // daemon instance and has no chance of getting + // cleaned up when a new daemon is instantiated with a + // new exec root. + netnsPath := filepath.Join(execRoot, "netns") + filepath.Walk(netnsPath, func(path string, info os.FileInfo, err error) error { + if err := unix.Unmount(path, unix.MNT_FORCE); err != nil { + c.Logf("unmount of %s failed: %v", path, err) + } + os.Remove(path) + return nil + }) +} + +// SignalDaemonDump sends a signal to the daemon to write a dump file +func SignalDaemonDump(pid int) { + unix.Kill(pid, unix.SIGQUIT) +} + +func signalDaemonReload(pid int) error { + return unix.Kill(pid, unix.SIGHUP) +} diff --git a/vendor/github.com/moby/moby/integration-cli/daemon/daemon_windows.go b/vendor/github.com/moby/moby/integration-cli/daemon/daemon_windows.go new file mode 100644 index 000000000..f8df27cf5 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/daemon/daemon_windows.go @@ -0,0 +1,54 @@ +package daemon + +import ( + "fmt" + "strconv" + "syscall" + "unsafe" + + "github.com/go-check/check" + "golang.org/x/sys/windows" +) + +func openEvent(desiredAccess uint32, inheritHandle bool, name string, proc *windows.LazyProc) (handle windows.Handle, err error) { + namep, _ := windows.UTF16PtrFromString(name) + var _p2 uint32 + if inheritHandle { + _p2 = 1 + } + r0, _, e1 := proc.Call(uintptr(desiredAccess), uintptr(_p2), uintptr(unsafe.Pointer(namep))) + handle = windows.Handle(r0) + if handle == windows.InvalidHandle { + err = e1 + } + return +} + +func pulseEvent(handle windows.Handle, proc *windows.LazyProc) (err error) { + r0, _, _ := proc.Call(uintptr(handle)) + if r0 != 0 { + err = syscall.Errno(r0) + } + return +} + +// SignalDaemonDump sends a signal to the daemon to write a dump file +func SignalDaemonDump(pid int) { + modkernel32 := windows.NewLazySystemDLL("kernel32.dll") + procOpenEvent := modkernel32.NewProc("OpenEventW") + procPulseEvent := modkernel32.NewProc("PulseEvent") + + ev := "Global\\docker-daemon-" + strconv.Itoa(pid) + h2, _ := openEvent(0x0002, false, ev, procOpenEvent) + if h2 == 0 { + return + } + pulseEvent(h2, procPulseEvent) +} + +func signalDaemonReload(pid int) error { + return fmt.Errorf("daemon reload not supported") +} + +func cleanupExecRoot(c *check.C, execRoot string) { +} diff --git a/vendor/github.com/moby/moby/integration-cli/daemon_swarm_hack_test.go b/vendor/github.com/moby/moby/integration-cli/daemon_swarm_hack_test.go new file mode 100644 index 000000000..e1fb333f8 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/daemon_swarm_hack_test.go @@ -0,0 +1,23 @@ +package main + +import ( + "github.com/docker/docker/integration-cli/daemon" + "github.com/go-check/check" +) + +func (s *DockerSwarmSuite) getDaemon(c *check.C, nodeID string) *daemon.Swarm { + s.daemonsLock.Lock() + defer s.daemonsLock.Unlock() + for _, d := range s.daemons { + if d.NodeID == nodeID { + return d + } + } + c.Fatalf("could not find node with id: %s", nodeID) + return nil +} + +// nodeCmd executes a command on a given node via the normal docker socket +func (s *DockerSwarmSuite) nodeCmd(c *check.C, id string, args ...string) (string, error) { + return s.getDaemon(c, id).Cmd(args...) +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_api_attach_test.go b/vendor/github.com/moby/moby/integration-cli/docker_api_attach_test.go new file mode 100644 index 000000000..11f7340c1 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_api_attach_test.go @@ -0,0 +1,210 @@ +package main + +import ( + "bufio" + "bytes" + "context" + "io" + "net" + "net/http" + "strings" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/client" + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/request" + "github.com/docker/docker/pkg/stdcopy" + "github.com/docker/docker/pkg/testutil" + "github.com/go-check/check" + "golang.org/x/net/websocket" +) + +func (s *DockerSuite) TestGetContainersAttachWebsocket(c *check.C) { + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "-dit", "busybox", "cat") + + rwc, err := request.SockConn(time.Duration(10*time.Second), daemonHost()) + c.Assert(err, checker.IsNil) + + cleanedContainerID := strings.TrimSpace(out) + config, err := websocket.NewConfig( + "/containers/"+cleanedContainerID+"/attach/ws?stream=1&stdin=1&stdout=1&stderr=1", + "http://localhost", + ) + c.Assert(err, checker.IsNil) + + ws, err := websocket.NewClient(config, rwc) + c.Assert(err, checker.IsNil) + defer ws.Close() + + expected := []byte("hello") + actual := make([]byte, len(expected)) + + outChan := make(chan error) + go func() { + _, err := io.ReadFull(ws, actual) + outChan <- err + close(outChan) + }() + + inChan := make(chan error) + go func() { + _, err := ws.Write(expected) + inChan <- err + close(inChan) + }() + + select { + case err := <-inChan: + c.Assert(err, checker.IsNil) + case <-time.After(5 * time.Second): + c.Fatal("Timeout writing to ws") + } + + select { + case err := <-outChan: + c.Assert(err, checker.IsNil) + case <-time.After(5 * time.Second): + c.Fatal("Timeout reading from ws") + } + + c.Assert(actual, checker.DeepEquals, expected, check.Commentf("Websocket didn't return the expected data")) +} + +// regression gh14320 +func (s *DockerSuite) TestPostContainersAttachContainerNotFound(c *check.C) { + client, err := request.NewHTTPClient(daemonHost()) + c.Assert(err, checker.IsNil) + req, err := request.New(daemonHost(), "/containers/doesnotexist/attach", request.Method(http.MethodPost)) + resp, err := client.Do(req) + // connection will shutdown, err should be "persistent connection closed" + c.Assert(resp.StatusCode, checker.Equals, http.StatusNotFound) + content, err := testutil.ReadBody(resp.Body) + c.Assert(err, checker.IsNil) + expected := "No such container: doesnotexist\r\n" + c.Assert(string(content), checker.Equals, expected) +} + +func (s *DockerSuite) TestGetContainersWsAttachContainerNotFound(c *check.C) { + status, body, err := request.SockRequest("GET", "/containers/doesnotexist/attach/ws", nil, daemonHost()) + c.Assert(status, checker.Equals, http.StatusNotFound) + c.Assert(err, checker.IsNil) + expected := "No such container: doesnotexist" + c.Assert(getErrorMessage(c, body), checker.Contains, expected) +} + +func (s *DockerSuite) TestPostContainersAttach(c *check.C) { + testRequires(c, DaemonIsLinux) + + expectSuccess := func(conn net.Conn, br *bufio.Reader, stream string, tty bool) { + defer conn.Close() + expected := []byte("success") + _, err := conn.Write(expected) + c.Assert(err, checker.IsNil) + + conn.SetReadDeadline(time.Now().Add(time.Second)) + lenHeader := 0 + if !tty { + lenHeader = 8 + } + actual := make([]byte, len(expected)+lenHeader) + _, err = io.ReadFull(br, actual) + c.Assert(err, checker.IsNil) + if !tty { + fdMap := map[string]byte{ + "stdin": 0, + "stdout": 1, + "stderr": 2, + } + c.Assert(actual[0], checker.Equals, fdMap[stream]) + } + c.Assert(actual[lenHeader:], checker.DeepEquals, expected, check.Commentf("Attach didn't return the expected data from %s", stream)) + } + + expectTimeout := func(conn net.Conn, br *bufio.Reader, stream string) { + defer conn.Close() + _, err := conn.Write([]byte{'t'}) + c.Assert(err, checker.IsNil) + + conn.SetReadDeadline(time.Now().Add(time.Second)) + actual := make([]byte, 1) + _, err = io.ReadFull(br, actual) + opErr, ok := err.(*net.OpError) + c.Assert(ok, checker.Equals, true, check.Commentf("Error is expected to be *net.OpError, got %v", err)) + c.Assert(opErr.Timeout(), checker.Equals, true, check.Commentf("Read from %s is expected to timeout", stream)) + } + + // Create a container that only emits stdout. + cid, _ := dockerCmd(c, "run", "-di", "busybox", "cat") + cid = strings.TrimSpace(cid) + // Attach to the container's stdout stream. + conn, br, err := request.SockRequestHijack("POST", "/containers/"+cid+"/attach?stream=1&stdin=1&stdout=1", nil, "text/plain", daemonHost()) + c.Assert(err, checker.IsNil) + // Check if the data from stdout can be received. + expectSuccess(conn, br, "stdout", false) + // Attach to the container's stderr stream. + conn, br, err = request.SockRequestHijack("POST", "/containers/"+cid+"/attach?stream=1&stdin=1&stderr=1", nil, "text/plain", daemonHost()) + c.Assert(err, checker.IsNil) + // Since the container only emits stdout, attaching to stderr should return nothing. + expectTimeout(conn, br, "stdout") + + // Test the similar functions of the stderr stream. + cid, _ = dockerCmd(c, "run", "-di", "busybox", "/bin/sh", "-c", "cat >&2") + cid = strings.TrimSpace(cid) + conn, br, err = request.SockRequestHijack("POST", "/containers/"+cid+"/attach?stream=1&stdin=1&stderr=1", nil, "text/plain", daemonHost()) + c.Assert(err, checker.IsNil) + expectSuccess(conn, br, "stderr", false) + conn, br, err = request.SockRequestHijack("POST", "/containers/"+cid+"/attach?stream=1&stdin=1&stdout=1", nil, "text/plain", daemonHost()) + c.Assert(err, checker.IsNil) + expectTimeout(conn, br, "stderr") + + // Test with tty. + cid, _ = dockerCmd(c, "run", "-dit", "busybox", "/bin/sh", "-c", "cat >&2") + cid = strings.TrimSpace(cid) + // Attach to stdout only. + conn, br, err = request.SockRequestHijack("POST", "/containers/"+cid+"/attach?stream=1&stdin=1&stdout=1", nil, "text/plain", daemonHost()) + c.Assert(err, checker.IsNil) + expectSuccess(conn, br, "stdout", true) + + // Attach without stdout stream. + conn, br, err = request.SockRequestHijack("POST", "/containers/"+cid+"/attach?stream=1&stdin=1&stderr=1", nil, "text/plain", daemonHost()) + c.Assert(err, checker.IsNil) + // Nothing should be received because both the stdout and stderr of the container will be + // sent to the client as stdout when tty is enabled. + expectTimeout(conn, br, "stdout") + + // Test the client API + // Make sure we don't see "hello" if Logs is false + client, err := client.NewEnvClient() + c.Assert(err, checker.IsNil) + + cid, _ = dockerCmd(c, "run", "-di", "busybox", "/bin/sh", "-c", "echo hello; cat") + cid = strings.TrimSpace(cid) + + attachOpts := types.ContainerAttachOptions{ + Stream: true, + Stdin: true, + Stdout: true, + } + + resp, err := client.ContainerAttach(context.Background(), cid, attachOpts) + c.Assert(err, checker.IsNil) + expectSuccess(resp.Conn, resp.Reader, "stdout", false) + + // Make sure we do see "hello" if Logs is true + attachOpts.Logs = true + resp, err = client.ContainerAttach(context.Background(), cid, attachOpts) + c.Assert(err, checker.IsNil) + + defer resp.Conn.Close() + resp.Conn.SetReadDeadline(time.Now().Add(time.Second)) + + _, err = resp.Conn.Write([]byte("success")) + c.Assert(err, checker.IsNil) + + actualStdout := new(bytes.Buffer) + actualStderr := new(bytes.Buffer) + stdcopy.StdCopy(actualStdout, actualStderr, resp.Reader) + c.Assert(actualStdout.Bytes(), checker.DeepEquals, []byte("hello\nsuccess"), check.Commentf("Attach didn't return the expected data from stdout")) +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_api_auth_test.go b/vendor/github.com/moby/moby/integration-cli/docker_api_auth_test.go new file mode 100644 index 000000000..cc903c01f --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_api_auth_test.go @@ -0,0 +1,26 @@ +package main + +import ( + "net/http" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/request" + "github.com/go-check/check" +) + +// Test case for #22244 +func (s *DockerSuite) TestAuthAPI(c *check.C) { + testRequires(c, Network) + config := types.AuthConfig{ + Username: "no-user", + Password: "no-password", + } + + expected := "Get https://registry-1.docker.io/v2/: unauthorized: incorrect username or password" + status, body, err := request.SockRequest("POST", "/auth", config, daemonHost()) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusUnauthorized) + msg := getErrorMessage(c, body) + c.Assert(msg, checker.Contains, expected, check.Commentf("Expected: %v, got: %v", expected, msg)) +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_api_build_test.go b/vendor/github.com/moby/moby/integration-cli/docker_api_build_test.go new file mode 100644 index 000000000..c1ab7661e --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_api_build_test.go @@ -0,0 +1,535 @@ +package main + +import ( + "archive/tar" + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "regexp" + "strings" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/client/session" + "github.com/docker/docker/client/session/filesync" + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/cli/build/fakecontext" + "github.com/docker/docker/integration-cli/cli/build/fakegit" + "github.com/docker/docker/integration-cli/cli/build/fakestorage" + "github.com/docker/docker/integration-cli/request" + "github.com/docker/docker/pkg/testutil" + "github.com/go-check/check" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "golang.org/x/net/context" + "golang.org/x/sync/errgroup" +) + +func (s *DockerSuite) TestBuildAPIDockerFileRemote(c *check.C) { + testRequires(c, NotUserNamespace) + var testD string + if testEnv.DaemonPlatform() == "windows" { + testD = `FROM busybox +RUN find / -name ba* +RUN find /tmp/` + } else { + // -xdev is required because sysfs can cause EPERM + testD = `FROM busybox +RUN find / -xdev -name ba* +RUN find /tmp/` + } + server := fakestorage.New(c, "", fakecontext.WithFiles(map[string]string{"testD": testD})) + defer server.Close() + + res, body, err := request.Post("/build?dockerfile=baz&remote="+server.URL()+"/testD", request.JSON) + c.Assert(err, checker.IsNil) + c.Assert(res.StatusCode, checker.Equals, http.StatusOK) + + buf, err := testutil.ReadBody(body) + c.Assert(err, checker.IsNil) + + // Make sure Dockerfile exists. + // Make sure 'baz' doesn't exist ANYWHERE despite being mentioned in the URL + out := string(buf) + c.Assert(out, checker.Contains, "RUN find /tmp") + c.Assert(out, checker.Not(checker.Contains), "baz") +} + +func (s *DockerSuite) TestBuildAPIRemoteTarballContext(c *check.C) { + buffer := new(bytes.Buffer) + tw := tar.NewWriter(buffer) + defer tw.Close() + + dockerfile := []byte("FROM busybox") + err := tw.WriteHeader(&tar.Header{ + Name: "Dockerfile", + Size: int64(len(dockerfile)), + }) + // failed to write tar file header + c.Assert(err, checker.IsNil) + + _, err = tw.Write(dockerfile) + // failed to write tar file content + c.Assert(err, checker.IsNil) + + // failed to close tar archive + c.Assert(tw.Close(), checker.IsNil) + + server := fakestorage.New(c, "", fakecontext.WithBinaryFiles(map[string]*bytes.Buffer{ + "testT.tar": buffer, + })) + defer server.Close() + + res, b, err := request.Post("/build?remote="+server.URL()+"/testT.tar", request.ContentType("application/tar")) + c.Assert(err, checker.IsNil) + c.Assert(res.StatusCode, checker.Equals, http.StatusOK) + b.Close() +} + +func (s *DockerSuite) TestBuildAPIRemoteTarballContextWithCustomDockerfile(c *check.C) { + buffer := new(bytes.Buffer) + tw := tar.NewWriter(buffer) + defer tw.Close() + + dockerfile := []byte(`FROM busybox +RUN echo 'wrong'`) + err := tw.WriteHeader(&tar.Header{ + Name: "Dockerfile", + Size: int64(len(dockerfile)), + }) + // failed to write tar file header + c.Assert(err, checker.IsNil) + + _, err = tw.Write(dockerfile) + // failed to write tar file content + c.Assert(err, checker.IsNil) + + custom := []byte(`FROM busybox +RUN echo 'right' +`) + err = tw.WriteHeader(&tar.Header{ + Name: "custom", + Size: int64(len(custom)), + }) + + // failed to write tar file header + c.Assert(err, checker.IsNil) + + _, err = tw.Write(custom) + // failed to write tar file content + c.Assert(err, checker.IsNil) + + // failed to close tar archive + c.Assert(tw.Close(), checker.IsNil) + + server := fakestorage.New(c, "", fakecontext.WithBinaryFiles(map[string]*bytes.Buffer{ + "testT.tar": buffer, + })) + defer server.Close() + + url := "/build?dockerfile=custom&remote=" + server.URL() + "/testT.tar" + res, body, err := request.Post(url, request.ContentType("application/tar")) + c.Assert(err, checker.IsNil) + c.Assert(res.StatusCode, checker.Equals, http.StatusOK) + + defer body.Close() + content, err := testutil.ReadBody(body) + c.Assert(err, checker.IsNil) + + // Build used the wrong dockerfile. + c.Assert(string(content), checker.Not(checker.Contains), "wrong") +} + +func (s *DockerSuite) TestBuildAPILowerDockerfile(c *check.C) { + git := fakegit.New(c, "repo", map[string]string{ + "dockerfile": `FROM busybox +RUN echo from dockerfile`, + }, false) + defer git.Close() + + res, body, err := request.Post("/build?remote="+git.RepoURL, request.JSON) + c.Assert(err, checker.IsNil) + c.Assert(res.StatusCode, checker.Equals, http.StatusOK) + + buf, err := testutil.ReadBody(body) + c.Assert(err, checker.IsNil) + + out := string(buf) + c.Assert(out, checker.Contains, "from dockerfile") +} + +func (s *DockerSuite) TestBuildAPIBuildGitWithF(c *check.C) { + git := fakegit.New(c, "repo", map[string]string{ + "baz": `FROM busybox +RUN echo from baz`, + "Dockerfile": `FROM busybox +RUN echo from Dockerfile`, + }, false) + defer git.Close() + + // Make sure it tries to 'dockerfile' query param value + res, body, err := request.Post("/build?dockerfile=baz&remote="+git.RepoURL, request.JSON) + c.Assert(err, checker.IsNil) + c.Assert(res.StatusCode, checker.Equals, http.StatusOK) + + buf, err := testutil.ReadBody(body) + c.Assert(err, checker.IsNil) + + out := string(buf) + c.Assert(out, checker.Contains, "from baz") +} + +func (s *DockerSuite) TestBuildAPIDoubleDockerfile(c *check.C) { + testRequires(c, UnixCli) // dockerfile overwrites Dockerfile on Windows + git := fakegit.New(c, "repo", map[string]string{ + "Dockerfile": `FROM busybox +RUN echo from Dockerfile`, + "dockerfile": `FROM busybox +RUN echo from dockerfile`, + }, false) + defer git.Close() + + // Make sure it tries to 'dockerfile' query param value + res, body, err := request.Post("/build?remote="+git.RepoURL, request.JSON) + c.Assert(err, checker.IsNil) + c.Assert(res.StatusCode, checker.Equals, http.StatusOK) + + buf, err := testutil.ReadBody(body) + c.Assert(err, checker.IsNil) + + out := string(buf) + c.Assert(out, checker.Contains, "from Dockerfile") +} + +func (s *DockerSuite) TestBuildAPIUnnormalizedTarPaths(c *check.C) { + // Make sure that build context tars with entries of the form + // x/./y don't cause caching false positives. + + buildFromTarContext := func(fileContents []byte) string { + buffer := new(bytes.Buffer) + tw := tar.NewWriter(buffer) + defer tw.Close() + + dockerfile := []byte(`FROM busybox + COPY dir /dir/`) + err := tw.WriteHeader(&tar.Header{ + Name: "Dockerfile", + Size: int64(len(dockerfile)), + }) + //failed to write tar file header + c.Assert(err, checker.IsNil) + + _, err = tw.Write(dockerfile) + // failed to write Dockerfile in tar file content + c.Assert(err, checker.IsNil) + + err = tw.WriteHeader(&tar.Header{ + Name: "dir/./file", + Size: int64(len(fileContents)), + }) + //failed to write tar file header + c.Assert(err, checker.IsNil) + + _, err = tw.Write(fileContents) + // failed to write file contents in tar file content + c.Assert(err, checker.IsNil) + + // failed to close tar archive + c.Assert(tw.Close(), checker.IsNil) + + res, body, err := request.Post("/build", request.RawContent(ioutil.NopCloser(buffer)), request.ContentType("application/x-tar")) + c.Assert(err, checker.IsNil) + c.Assert(res.StatusCode, checker.Equals, http.StatusOK) + + out, err := testutil.ReadBody(body) + c.Assert(err, checker.IsNil) + lines := strings.Split(string(out), "\n") + c.Assert(len(lines), checker.GreaterThan, 1) + c.Assert(lines[len(lines)-2], checker.Matches, ".*Successfully built [0-9a-f]{12}.*") + + re := regexp.MustCompile("Successfully built ([0-9a-f]{12})") + matches := re.FindStringSubmatch(lines[len(lines)-2]) + return matches[1] + } + + imageA := buildFromTarContext([]byte("abc")) + imageB := buildFromTarContext([]byte("def")) + + c.Assert(imageA, checker.Not(checker.Equals), imageB) +} + +func (s *DockerSuite) TestBuildOnBuildWithCopy(c *check.C) { + dockerfile := ` + FROM ` + minimalBaseImage() + ` as onbuildbase + ONBUILD COPY file /file + + FROM onbuildbase + ` + ctx := fakecontext.New(c, "", + fakecontext.WithDockerfile(dockerfile), + fakecontext.WithFile("file", "some content"), + ) + defer ctx.Close() + + res, body, err := request.Post( + "/build", + request.RawContent(ctx.AsTarReader(c)), + request.ContentType("application/x-tar")) + c.Assert(err, checker.IsNil) + c.Assert(res.StatusCode, checker.Equals, http.StatusOK) + + out, err := testutil.ReadBody(body) + c.Assert(err, checker.IsNil) + c.Assert(string(out), checker.Contains, "Successfully built") +} + +func (s *DockerSuite) TestBuildOnBuildCache(c *check.C) { + build := func(dockerfile string) []byte { + ctx := fakecontext.New(c, "", + fakecontext.WithDockerfile(dockerfile), + ) + defer ctx.Close() + + res, body, err := request.Post( + "/build", + request.RawContent(ctx.AsTarReader(c)), + request.ContentType("application/x-tar")) + require.NoError(c, err) + assert.Equal(c, http.StatusOK, res.StatusCode) + + out, err := testutil.ReadBody(body) + require.NoError(c, err) + assert.Contains(c, string(out), "Successfully built") + return out + } + + dockerfile := ` + FROM ` + minimalBaseImage() + ` as onbuildbase + ENV something=bar + ONBUILD ENV foo=bar + ` + build(dockerfile) + + dockerfile += "FROM onbuildbase" + out := build(dockerfile) + + imageIDs := getImageIDsFromBuild(c, out) + assert.Len(c, imageIDs, 2) + parentID, childID := imageIDs[0], imageIDs[1] + + client, err := request.NewClient() + require.NoError(c, err) + + // check parentID is correct + image, _, err := client.ImageInspectWithRaw(context.Background(), childID) + require.NoError(c, err) + assert.Equal(c, parentID, image.Parent) +} + +func (s *DockerRegistrySuite) TestBuildCopyFromForcePull(c *check.C) { + client, err := request.NewClient() + require.NoError(c, err) + + repoName := fmt.Sprintf("%v/dockercli/busybox", privateRegistryURL) + // tag the image to upload it to the private registry + err = client.ImageTag(context.TODO(), "busybox", repoName) + assert.Nil(c, err) + // push the image to the registry + rc, err := client.ImagePush(context.TODO(), repoName, types.ImagePushOptions{RegistryAuth: "{}"}) + assert.Nil(c, err) + _, err = io.Copy(ioutil.Discard, rc) + assert.Nil(c, err) + + dockerfile := fmt.Sprintf(` + FROM %s AS foo + RUN touch abc + FROM %s + COPY --from=foo /abc / + `, repoName, repoName) + + ctx := fakecontext.New(c, "", + fakecontext.WithDockerfile(dockerfile), + ) + defer ctx.Close() + + res, body, err := request.Post( + "/build?pull=1", + request.RawContent(ctx.AsTarReader(c)), + request.ContentType("application/x-tar")) + require.NoError(c, err) + assert.Equal(c, http.StatusOK, res.StatusCode) + + out, err := testutil.ReadBody(body) + require.NoError(c, err) + assert.Contains(c, string(out), "Successfully built") +} + +func (s *DockerSuite) TestBuildAddRemoteNoDecompress(c *check.C) { + buffer := new(bytes.Buffer) + tw := tar.NewWriter(buffer) + dt := []byte("contents") + err := tw.WriteHeader(&tar.Header{ + Name: "foo", + Size: int64(len(dt)), + Mode: 0600, + Typeflag: tar.TypeReg, + }) + require.NoError(c, err) + _, err = tw.Write(dt) + require.NoError(c, err) + err = tw.Close() + require.NoError(c, err) + + server := fakestorage.New(c, "", fakecontext.WithBinaryFiles(map[string]*bytes.Buffer{ + "test.tar": buffer, + })) + defer server.Close() + + dockerfile := fmt.Sprintf(` + FROM busybox + ADD %s/test.tar / + RUN [ -f test.tar ] + `, server.URL()) + + ctx := fakecontext.New(c, "", + fakecontext.WithDockerfile(dockerfile), + ) + defer ctx.Close() + + res, body, err := request.Post( + "/build", + request.RawContent(ctx.AsTarReader(c)), + request.ContentType("application/x-tar")) + require.NoError(c, err) + assert.Equal(c, http.StatusOK, res.StatusCode) + + out, err := testutil.ReadBody(body) + require.NoError(c, err) + assert.Contains(c, string(out), "Successfully built") +} + +func (s *DockerSuite) TestBuildWithSession(c *check.C) { + testRequires(c, ExperimentalDaemon) + + dockerfile := ` + FROM busybox + COPY file / + RUN cat /file + ` + + fctx := fakecontext.New(c, "", + fakecontext.WithFile("file", "some content"), + ) + defer fctx.Close() + + out := testBuildWithSession(c, fctx.Dir, dockerfile) + assert.Contains(c, out, "some content") + + fctx.Add("second", "contentcontent") + + dockerfile += ` + COPY second / + RUN cat /second + ` + + out = testBuildWithSession(c, fctx.Dir, dockerfile) + assert.Equal(c, strings.Count(out, "Using cache"), 2) + assert.Contains(c, out, "contentcontent") + + client, err := request.NewClient() + require.NoError(c, err) + + du, err := client.DiskUsage(context.TODO()) + assert.Nil(c, err) + assert.True(c, du.BuilderSize > 10) + + out = testBuildWithSession(c, fctx.Dir, dockerfile) + assert.Equal(c, strings.Count(out, "Using cache"), 4) + + du2, err := client.DiskUsage(context.TODO()) + assert.Nil(c, err) + assert.Equal(c, du.BuilderSize, du2.BuilderSize) + + // rebuild with regular tar, confirm cache still applies + fctx.Add("Dockerfile", dockerfile) + res, body, err := request.Post( + "/build", + request.RawContent(fctx.AsTarReader(c)), + request.ContentType("application/x-tar")) + require.NoError(c, err) + assert.Equal(c, http.StatusOK, res.StatusCode) + + outBytes, err := testutil.ReadBody(body) + require.NoError(c, err) + assert.Contains(c, string(outBytes), "Successfully built") + assert.Equal(c, strings.Count(string(outBytes), "Using cache"), 4) + + _, err = client.BuildCachePrune(context.TODO()) + assert.Nil(c, err) + + du, err = client.DiskUsage(context.TODO()) + assert.Nil(c, err) + assert.Equal(c, du.BuilderSize, int64(0)) +} + +func testBuildWithSession(c *check.C, dir, dockerfile string) (outStr string) { + client, err := request.NewClient() + require.NoError(c, err) + + sess, err := session.NewSession("foo1", "foo") + assert.Nil(c, err) + + fsProvider := filesync.NewFSSyncProvider(dir, nil) + sess.Allow(fsProvider) + + g, ctx := errgroup.WithContext(context.Background()) + + g.Go(func() error { + return sess.Run(ctx, client.DialSession) + }) + + g.Go(func() error { + res, body, err := request.Post("/build?remote=client-session&session="+sess.UUID(), func(req *http.Request) error { + req.Body = ioutil.NopCloser(strings.NewReader(dockerfile)) + return nil + }) + if err != nil { + return err + } + assert.Equal(c, res.StatusCode, http.StatusOK) + out, err := testutil.ReadBody(body) + require.NoError(c, err) + assert.Contains(c, string(out), "Successfully built") + sess.Close() + outStr = string(out) + return nil + }) + + err = g.Wait() + assert.Nil(c, err) + return +} + +type buildLine struct { + Stream string + Aux struct { + ID string + } +} + +func getImageIDsFromBuild(c *check.C, output []byte) []string { + ids := []string{} + for _, line := range bytes.Split(output, []byte("\n")) { + if len(line) == 0 { + continue + } + entry := buildLine{} + require.NoError(c, json.Unmarshal(line, &entry)) + if entry.Aux.ID != "" { + ids = append(ids, entry.Aux.ID) + } + } + return ids +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_api_containers_test.go b/vendor/github.com/moby/moby/integration-cli/docker_api_containers_test.go new file mode 100644 index 000000000..25c724425 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_api_containers_test.go @@ -0,0 +1,1950 @@ +package main + +import ( + "archive/tar" + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "os" + "path/filepath" + "regexp" + "strconv" + "strings" + "time" + + "github.com/docker/docker/api/types" + containertypes "github.com/docker/docker/api/types/container" + mounttypes "github.com/docker/docker/api/types/mount" + networktypes "github.com/docker/docker/api/types/network" + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/cli" + "github.com/docker/docker/integration-cli/cli/build" + "github.com/docker/docker/integration-cli/request" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/mount" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/pkg/testutil" + "github.com/docker/docker/volume" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestContainerAPIGetAll(c *check.C) { + startCount := getContainerCount(c) + name := "getall" + dockerCmd(c, "run", "--name", name, "busybox", "true") + + status, body, err := request.SockRequest("GET", "/containers/json?all=1", nil, daemonHost()) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusOK) + + var inspectJSON []struct { + Names []string + } + err = json.Unmarshal(body, &inspectJSON) + c.Assert(err, checker.IsNil, check.Commentf("unable to unmarshal response body")) + + c.Assert(inspectJSON, checker.HasLen, startCount+1) + + actual := inspectJSON[0].Names[0] + c.Assert(actual, checker.Equals, "/"+name) +} + +// regression test for empty json field being omitted #13691 +func (s *DockerSuite) TestContainerAPIGetJSONNoFieldsOmitted(c *check.C) { + dockerCmd(c, "run", "busybox", "true") + + status, body, err := request.SockRequest("GET", "/containers/json?all=1", nil, daemonHost()) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusOK) + + // empty Labels field triggered this bug, make sense to check for everything + // cause even Ports for instance can trigger this bug + // better safe than sorry.. + fields := []string{ + "Id", + "Names", + "Image", + "Command", + "Created", + "Ports", + "Labels", + "Status", + "NetworkSettings", + } + + // decoding into types.Container do not work since it eventually unmarshal + // and empty field to an empty go map, so we just check for a string + for _, f := range fields { + if !strings.Contains(string(body), f) { + c.Fatalf("Field %s is missing and it shouldn't", f) + } + } +} + +type containerPs struct { + Names []string + Ports []map[string]interface{} +} + +// regression test for non-empty fields from #13901 +func (s *DockerSuite) TestContainerAPIPsOmitFields(c *check.C) { + // Problematic for Windows porting due to networking not yet being passed back + testRequires(c, DaemonIsLinux) + name := "pstest" + port := 80 + runSleepingContainer(c, "--name", name, "--expose", strconv.Itoa(port)) + + status, body, err := request.SockRequest("GET", "/containers/json?all=1", nil, daemonHost()) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusOK) + + var resp []containerPs + err = json.Unmarshal(body, &resp) + c.Assert(err, checker.IsNil) + + var foundContainer *containerPs + for _, container := range resp { + for _, testName := range container.Names { + if "/"+name == testName { + foundContainer = &container + break + } + } + } + + c.Assert(foundContainer.Ports, checker.HasLen, 1) + c.Assert(foundContainer.Ports[0]["PrivatePort"], checker.Equals, float64(port)) + _, ok := foundContainer.Ports[0]["PublicPort"] + c.Assert(ok, checker.Not(checker.Equals), true) + _, ok = foundContainer.Ports[0]["IP"] + c.Assert(ok, checker.Not(checker.Equals), true) +} + +func (s *DockerSuite) TestContainerAPIGetExport(c *check.C) { + // Not supported on Windows as Windows does not support docker export + testRequires(c, DaemonIsLinux) + name := "exportcontainer" + dockerCmd(c, "run", "--name", name, "busybox", "touch", "/test") + + status, body, err := request.SockRequest("GET", "/containers/"+name+"/export", nil, daemonHost()) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusOK) + + found := false + for tarReader := tar.NewReader(bytes.NewReader(body)); ; { + h, err := tarReader.Next() + if err != nil && err == io.EOF { + break + } + if h.Name == "test" { + found = true + break + } + } + c.Assert(found, checker.True, check.Commentf("The created test file has not been found in the exported image")) +} + +func (s *DockerSuite) TestContainerAPIGetChanges(c *check.C) { + // Not supported on Windows as Windows does not support docker diff (/containers/name/changes) + testRequires(c, DaemonIsLinux) + name := "changescontainer" + dockerCmd(c, "run", "--name", name, "busybox", "rm", "/etc/passwd") + + status, body, err := request.SockRequest("GET", "/containers/"+name+"/changes", nil, daemonHost()) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusOK) + + changes := []struct { + Kind int + Path string + }{} + c.Assert(json.Unmarshal(body, &changes), checker.IsNil, check.Commentf("unable to unmarshal response body")) + + // Check the changelog for removal of /etc/passwd + success := false + for _, elem := range changes { + if elem.Path == "/etc/passwd" && elem.Kind == 2 { + success = true + } + } + c.Assert(success, checker.True, check.Commentf("/etc/passwd has been removed but is not present in the diff")) +} + +func (s *DockerSuite) TestGetContainerStats(c *check.C) { + var ( + name = "statscontainer" + ) + runSleepingContainer(c, "--name", name) + + type b struct { + status int + body []byte + err error + } + bc := make(chan b, 1) + go func() { + status, body, err := request.SockRequest("GET", "/containers/"+name+"/stats", nil, daemonHost()) + bc <- b{status, body, err} + }() + + // allow some time to stream the stats from the container + time.Sleep(4 * time.Second) + dockerCmd(c, "rm", "-f", name) + + // collect the results from the stats stream or timeout and fail + // if the stream was not disconnected. + select { + case <-time.After(2 * time.Second): + c.Fatal("stream was not closed after container was removed") + case sr := <-bc: + c.Assert(sr.err, checker.IsNil) + c.Assert(sr.status, checker.Equals, http.StatusOK) + + dec := json.NewDecoder(bytes.NewBuffer(sr.body)) + var s *types.Stats + // decode only one object from the stream + c.Assert(dec.Decode(&s), checker.IsNil) + } +} + +func (s *DockerSuite) TestGetContainerStatsRmRunning(c *check.C) { + out := runSleepingContainer(c) + id := strings.TrimSpace(out) + + buf := &testutil.ChannelBuffer{C: make(chan []byte, 1)} + defer buf.Close() + + _, body, err := request.Get("/containers/"+id+"/stats?stream=1", request.JSON) + c.Assert(err, checker.IsNil) + defer body.Close() + + chErr := make(chan error, 1) + go func() { + _, err = io.Copy(buf, body) + chErr <- err + }() + + b := make([]byte, 32) + // make sure we've got some stats + _, err = buf.ReadTimeout(b, 2*time.Second) + c.Assert(err, checker.IsNil) + + // Now remove without `-f` and make sure we are still pulling stats + _, _, err = dockerCmdWithError("rm", id) + c.Assert(err, checker.Not(checker.IsNil), check.Commentf("rm should have failed but didn't")) + _, err = buf.ReadTimeout(b, 2*time.Second) + c.Assert(err, checker.IsNil) + + dockerCmd(c, "rm", "-f", id) + c.Assert(<-chErr, checker.IsNil) +} + +// regression test for gh13421 +// previous test was just checking one stat entry so it didn't fail (stats with +// stream false always return one stat) +func (s *DockerSuite) TestGetContainerStatsStream(c *check.C) { + name := "statscontainer" + runSleepingContainer(c, "--name", name) + + type b struct { + status int + body io.ReadCloser + err error + } + bc := make(chan b, 1) + go func() { + status, body, err := request.Get("/containers/" + name + "/stats") + bc <- b{status.StatusCode, body, err} + }() + + // allow some time to stream the stats from the container + time.Sleep(4 * time.Second) + dockerCmd(c, "rm", "-f", name) + + // collect the results from the stats stream or timeout and fail + // if the stream was not disconnected. + select { + case <-time.After(2 * time.Second): + c.Fatal("stream was not closed after container was removed") + case sr := <-bc: + c.Assert(sr.err, checker.IsNil) + c.Assert(sr.status, checker.Equals, http.StatusOK) + + b, err := ioutil.ReadAll(sr.body) + c.Assert(err, checker.IsNil) + s := string(b) + // count occurrences of "read" of types.Stats + if l := strings.Count(s, "read"); l < 2 { + c.Fatalf("Expected more than one stat streamed, got %d", l) + } + } +} + +func (s *DockerSuite) TestGetContainerStatsNoStream(c *check.C) { + name := "statscontainer" + runSleepingContainer(c, "--name", name) + + type b struct { + status int + body []byte + err error + } + bc := make(chan b, 1) + go func() { + status, body, err := request.SockRequest("GET", "/containers/"+name+"/stats?stream=0", nil, daemonHost()) + bc <- b{status, body, err} + }() + + // allow some time to stream the stats from the container + time.Sleep(4 * time.Second) + dockerCmd(c, "rm", "-f", name) + + // collect the results from the stats stream or timeout and fail + // if the stream was not disconnected. + select { + case <-time.After(2 * time.Second): + c.Fatal("stream was not closed after container was removed") + case sr := <-bc: + c.Assert(sr.err, checker.IsNil) + c.Assert(sr.status, checker.Equals, http.StatusOK) + + s := string(sr.body) + // count occurrences of `"read"` of types.Stats + c.Assert(strings.Count(s, `"read"`), checker.Equals, 1, check.Commentf("Expected only one stat streamed, got %d", strings.Count(s, `"read"`))) + } +} + +func (s *DockerSuite) TestGetStoppedContainerStats(c *check.C) { + name := "statscontainer" + dockerCmd(c, "create", "--name", name, "busybox", "ps") + + type stats struct { + status int + err error + } + chResp := make(chan stats) + + // We expect an immediate response, but if it's not immediate, the test would hang, so put it in a goroutine + // below we'll check this on a timeout. + go func() { + resp, body, err := request.Get("/containers/" + name + "/stats") + body.Close() + chResp <- stats{resp.StatusCode, err} + }() + + select { + case r := <-chResp: + c.Assert(r.err, checker.IsNil) + c.Assert(r.status, checker.Equals, http.StatusOK) + case <-time.After(10 * time.Second): + c.Fatal("timeout waiting for stats response for stopped container") + } +} + +func (s *DockerSuite) TestContainerAPIPause(c *check.C) { + // Problematic on Windows as Windows does not support pause + testRequires(c, DaemonIsLinux) + + getPaused := func(c *check.C) []string { + return strings.Fields(cli.DockerCmd(c, "ps", "-f", "status=paused", "-q", "-a").Combined()) + } + + out := cli.DockerCmd(c, "run", "-d", "busybox", "sleep", "30").Combined() + ContainerID := strings.TrimSpace(out) + + resp, _, err := request.Post("/containers/" + ContainerID + "/pause") + c.Assert(err, checker.IsNil) + c.Assert(resp.StatusCode, checker.Equals, http.StatusNoContent) + + pausedContainers := getPaused(c) + + if len(pausedContainers) != 1 || stringid.TruncateID(ContainerID) != pausedContainers[0] { + c.Fatalf("there should be one paused container and not %d", len(pausedContainers)) + } + + resp, _, err = request.Post("/containers/" + ContainerID + "/unpause") + c.Assert(err, checker.IsNil) + c.Assert(resp.StatusCode, checker.Equals, http.StatusNoContent) + + pausedContainers = getPaused(c) + c.Assert(pausedContainers, checker.HasLen, 0, check.Commentf("There should be no paused container.")) +} + +func (s *DockerSuite) TestContainerAPITop(c *check.C) { + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "top") + id := strings.TrimSpace(string(out)) + c.Assert(waitRun(id), checker.IsNil) + + type topResp struct { + Titles []string + Processes [][]string + } + var top topResp + status, b, err := request.SockRequest("GET", "/containers/"+id+"/top?ps_args=aux", nil, daemonHost()) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusOK) + c.Assert(json.Unmarshal(b, &top), checker.IsNil) + c.Assert(top.Titles, checker.HasLen, 11, check.Commentf("expected 11 titles, found %d: %v", len(top.Titles), top.Titles)) + + if top.Titles[0] != "USER" || top.Titles[10] != "COMMAND" { + c.Fatalf("expected `USER` at `Titles[0]` and `COMMAND` at Titles[10]: %v", top.Titles) + } + c.Assert(top.Processes, checker.HasLen, 2, check.Commentf("expected 2 processes, found %d: %v", len(top.Processes), top.Processes)) + c.Assert(top.Processes[0][10], checker.Equals, "/bin/sh -c top") + c.Assert(top.Processes[1][10], checker.Equals, "top") +} + +func (s *DockerSuite) TestContainerAPITopWindows(c *check.C) { + testRequires(c, DaemonIsWindows) + out := runSleepingContainer(c, "-d") + id := strings.TrimSpace(string(out)) + c.Assert(waitRun(id), checker.IsNil) + + type topResp struct { + Titles []string + Processes [][]string + } + var top topResp + status, b, err := request.SockRequest("GET", "/containers/"+id+"/top", nil, daemonHost()) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusOK) + c.Assert(json.Unmarshal(b, &top), checker.IsNil) + c.Assert(top.Titles, checker.HasLen, 4, check.Commentf("expected 4 titles, found %d: %v", len(top.Titles), top.Titles)) + + if top.Titles[0] != "Name" || top.Titles[3] != "Private Working Set" { + c.Fatalf("expected `Name` at `Titles[0]` and `Private Working Set` at Titles[3]: %v", top.Titles) + } + c.Assert(len(top.Processes), checker.GreaterOrEqualThan, 2, check.Commentf("expected at least 2 processes, found %d: %v", len(top.Processes), top.Processes)) + + foundProcess := false + expectedProcess := "busybox.exe" + for _, process := range top.Processes { + if process[0] == expectedProcess { + foundProcess = true + break + } + } + + c.Assert(foundProcess, checker.Equals, true, check.Commentf("expected to find %s: %v", expectedProcess, top.Processes)) +} + +func (s *DockerSuite) TestContainerAPICommit(c *check.C) { + cName := "testapicommit" + dockerCmd(c, "run", "--name="+cName, "busybox", "/bin/sh", "-c", "touch /test") + + name := "testcontainerapicommit" + status, b, err := request.SockRequest("POST", "/commit?repo="+name+"&testtag=tag&container="+cName, nil, daemonHost()) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusCreated) + + type resp struct { + ID string + } + var img resp + c.Assert(json.Unmarshal(b, &img), checker.IsNil) + + cmd := inspectField(c, img.ID, "Config.Cmd") + c.Assert(cmd, checker.Equals, "[/bin/sh -c touch /test]", check.Commentf("got wrong Cmd from commit: %q", cmd)) + + // sanity check, make sure the image is what we think it is + dockerCmd(c, "run", img.ID, "ls", "/test") +} + +func (s *DockerSuite) TestContainerAPICommitWithLabelInConfig(c *check.C) { + cName := "testapicommitwithconfig" + dockerCmd(c, "run", "--name="+cName, "busybox", "/bin/sh", "-c", "touch /test") + + config := map[string]interface{}{ + "Labels": map[string]string{"key1": "value1", "key2": "value2"}, + } + + name := "testcontainerapicommitwithconfig" + status, b, err := request.SockRequest("POST", "/commit?repo="+name+"&container="+cName, config, daemonHost()) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusCreated) + + type resp struct { + ID string + } + var img resp + c.Assert(json.Unmarshal(b, &img), checker.IsNil) + + label1 := inspectFieldMap(c, img.ID, "Config.Labels", "key1") + c.Assert(label1, checker.Equals, "value1") + + label2 := inspectFieldMap(c, img.ID, "Config.Labels", "key2") + c.Assert(label2, checker.Equals, "value2") + + cmd := inspectField(c, img.ID, "Config.Cmd") + c.Assert(cmd, checker.Equals, "[/bin/sh -c touch /test]", check.Commentf("got wrong Cmd from commit: %q", cmd)) + + // sanity check, make sure the image is what we think it is + dockerCmd(c, "run", img.ID, "ls", "/test") +} + +func (s *DockerSuite) TestContainerAPIBadPort(c *check.C) { + // TODO Windows to Windows CI - Port this test + testRequires(c, DaemonIsLinux) + config := map[string]interface{}{ + "Image": "busybox", + "Cmd": []string{"/bin/sh", "-c", "echo test"}, + "PortBindings": map[string]interface{}{ + "8080/tcp": []map[string]interface{}{ + { + "HostIP": "", + "HostPort": "aa80", + }, + }, + }, + } + + jsonData := bytes.NewBuffer(nil) + json.NewEncoder(jsonData).Encode(config) + + status, body, err := request.SockRequest("POST", "/containers/create", config, daemonHost()) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusInternalServerError) + c.Assert(getErrorMessage(c, body), checker.Equals, `invalid port specification: "aa80"`, check.Commentf("Incorrect error msg: %s", body)) +} + +func (s *DockerSuite) TestContainerAPICreate(c *check.C) { + config := map[string]interface{}{ + "Image": "busybox", + "Cmd": []string{"/bin/sh", "-c", "touch /test && ls /test"}, + } + + status, b, err := request.SockRequest("POST", "/containers/create", config, daemonHost()) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusCreated) + + type createResp struct { + ID string + } + var container createResp + c.Assert(json.Unmarshal(b, &container), checker.IsNil) + + out, _ := dockerCmd(c, "start", "-a", container.ID) + c.Assert(strings.TrimSpace(out), checker.Equals, "/test") +} + +func (s *DockerSuite) TestContainerAPICreateEmptyConfig(c *check.C) { + config := map[string]interface{}{} + + status, body, err := request.SockRequest("POST", "/containers/create", config, daemonHost()) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusInternalServerError) + + expected := "Config cannot be empty in order to create a container" + c.Assert(getErrorMessage(c, body), checker.Equals, expected) +} + +func (s *DockerSuite) TestContainerAPICreateMultipleNetworksConfig(c *check.C) { + // Container creation must fail if client specified configurations for more than one network + config := map[string]interface{}{ + "Image": "busybox", + "NetworkingConfig": networktypes.NetworkingConfig{ + EndpointsConfig: map[string]*networktypes.EndpointSettings{ + "net1": {}, + "net2": {}, + "net3": {}, + }, + }, + } + + status, body, err := request.SockRequest("POST", "/containers/create", config, daemonHost()) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusBadRequest) + msg := getErrorMessage(c, body) + // network name order in error message is not deterministic + c.Assert(msg, checker.Contains, "Container cannot be connected to network endpoints") + c.Assert(msg, checker.Contains, "net1") + c.Assert(msg, checker.Contains, "net2") + c.Assert(msg, checker.Contains, "net3") +} + +func (s *DockerSuite) TestContainerAPICreateWithHostName(c *check.C) { + domainName := "test-domain" + hostName := "test-hostname" + config := map[string]interface{}{ + "Image": "busybox", + "Hostname": hostName, + "Domainname": domainName, + } + + status, body, err := request.SockRequest("POST", "/containers/create", config, daemonHost()) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusCreated) + + var container containertypes.ContainerCreateCreatedBody + c.Assert(json.Unmarshal(body, &container), checker.IsNil) + + status, body, err = request.SockRequest("GET", "/containers/"+container.ID+"/json", nil, daemonHost()) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusOK) + + var containerJSON types.ContainerJSON + c.Assert(json.Unmarshal(body, &containerJSON), checker.IsNil) + c.Assert(containerJSON.Config.Hostname, checker.Equals, hostName, check.Commentf("Mismatched Hostname")) + c.Assert(containerJSON.Config.Domainname, checker.Equals, domainName, check.Commentf("Mismatched Domainname")) +} + +func (s *DockerSuite) TestContainerAPICreateBridgeNetworkMode(c *check.C) { + // Windows does not support bridge + testRequires(c, DaemonIsLinux) + UtilCreateNetworkMode(c, "bridge") +} + +func (s *DockerSuite) TestContainerAPICreateOtherNetworkModes(c *check.C) { + // Windows does not support these network modes + testRequires(c, DaemonIsLinux, NotUserNamespace) + UtilCreateNetworkMode(c, "host") + UtilCreateNetworkMode(c, "container:web1") +} + +func UtilCreateNetworkMode(c *check.C, networkMode string) { + config := map[string]interface{}{ + "Image": "busybox", + "HostConfig": map[string]interface{}{"NetworkMode": networkMode}, + } + + status, body, err := request.SockRequest("POST", "/containers/create", config, daemonHost()) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusCreated) + + var container containertypes.ContainerCreateCreatedBody + c.Assert(json.Unmarshal(body, &container), checker.IsNil) + + status, body, err = request.SockRequest("GET", "/containers/"+container.ID+"/json", nil, daemonHost()) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusOK) + + var containerJSON types.ContainerJSON + c.Assert(json.Unmarshal(body, &containerJSON), checker.IsNil) + c.Assert(containerJSON.HostConfig.NetworkMode, checker.Equals, containertypes.NetworkMode(networkMode), check.Commentf("Mismatched NetworkMode")) +} + +func (s *DockerSuite) TestContainerAPICreateWithCpuSharesCpuset(c *check.C) { + // TODO Windows to Windows CI. The CpuShares part could be ported. + testRequires(c, DaemonIsLinux) + config := map[string]interface{}{ + "Image": "busybox", + "CpuShares": 512, + "CpusetCpus": "0", + } + + status, body, err := request.SockRequest("POST", "/containers/create", config, daemonHost()) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusCreated) + + var container containertypes.ContainerCreateCreatedBody + c.Assert(json.Unmarshal(body, &container), checker.IsNil) + + status, body, err = request.SockRequest("GET", "/containers/"+container.ID+"/json", nil, daemonHost()) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusOK) + + var containerJSON types.ContainerJSON + + c.Assert(json.Unmarshal(body, &containerJSON), checker.IsNil) + + out := inspectField(c, containerJSON.ID, "HostConfig.CpuShares") + c.Assert(out, checker.Equals, "512") + + outCpuset := inspectField(c, containerJSON.ID, "HostConfig.CpusetCpus") + c.Assert(outCpuset, checker.Equals, "0") +} + +func (s *DockerSuite) TestContainerAPIVerifyHeader(c *check.C) { + config := map[string]interface{}{ + "Image": "busybox", + } + + create := func(ct string) (*http.Response, io.ReadCloser, error) { + jsonData := bytes.NewBuffer(nil) + c.Assert(json.NewEncoder(jsonData).Encode(config), checker.IsNil) + return request.Post("/containers/create", request.RawContent(ioutil.NopCloser(jsonData)), request.ContentType(ct)) + } + + // Try with no content-type + res, body, err := create("") + c.Assert(err, checker.IsNil) + c.Assert(res.StatusCode, checker.Equals, http.StatusInternalServerError) + body.Close() + + // Try with wrong content-type + res, body, err = create("application/xml") + c.Assert(err, checker.IsNil) + c.Assert(res.StatusCode, checker.Equals, http.StatusInternalServerError) + body.Close() + + // now application/json + res, body, err = create("application/json") + c.Assert(err, checker.IsNil) + c.Assert(res.StatusCode, checker.Equals, http.StatusCreated) + body.Close() +} + +//Issue 14230. daemon should return 500 for invalid port syntax +func (s *DockerSuite) TestContainerAPIInvalidPortSyntax(c *check.C) { + config := `{ + "Image": "busybox", + "HostConfig": { + "NetworkMode": "default", + "PortBindings": { + "19039;1230": [ + {} + ] + } + } + }` + + res, body, err := request.Post("/containers/create", request.RawString(config), request.JSON) + c.Assert(err, checker.IsNil) + c.Assert(res.StatusCode, checker.Equals, http.StatusInternalServerError) + + b, err := testutil.ReadBody(body) + c.Assert(err, checker.IsNil) + c.Assert(string(b[:]), checker.Contains, "invalid port") +} + +func (s *DockerSuite) TestContainerAPIRestartPolicyInvalidPolicyName(c *check.C) { + config := `{ + "Image": "busybox", + "HostConfig": { + "RestartPolicy": { + "Name": "something", + "MaximumRetryCount": 0 + } + } + }` + + res, body, err := request.Post("/containers/create", request.RawString(config), request.JSON) + c.Assert(err, checker.IsNil) + c.Assert(res.StatusCode, checker.Equals, http.StatusInternalServerError) + + b, err := testutil.ReadBody(body) + c.Assert(err, checker.IsNil) + c.Assert(string(b[:]), checker.Contains, "invalid restart policy") +} + +func (s *DockerSuite) TestContainerAPIRestartPolicyRetryMismatch(c *check.C) { + config := `{ + "Image": "busybox", + "HostConfig": { + "RestartPolicy": { + "Name": "always", + "MaximumRetryCount": 2 + } + } + }` + + res, body, err := request.Post("/containers/create", request.RawString(config), request.JSON) + c.Assert(err, checker.IsNil) + c.Assert(res.StatusCode, checker.Equals, http.StatusInternalServerError) + + b, err := testutil.ReadBody(body) + c.Assert(err, checker.IsNil) + c.Assert(string(b[:]), checker.Contains, "maximum retry count cannot be used with restart policy") +} + +func (s *DockerSuite) TestContainerAPIRestartPolicyNegativeRetryCount(c *check.C) { + config := `{ + "Image": "busybox", + "HostConfig": { + "RestartPolicy": { + "Name": "on-failure", + "MaximumRetryCount": -2 + } + } + }` + + res, body, err := request.Post("/containers/create", request.RawString(config), request.JSON) + c.Assert(err, checker.IsNil) + c.Assert(res.StatusCode, checker.Equals, http.StatusInternalServerError) + + b, err := testutil.ReadBody(body) + c.Assert(err, checker.IsNil) + c.Assert(string(b[:]), checker.Contains, "maximum retry count cannot be negative") +} + +func (s *DockerSuite) TestContainerAPIRestartPolicyDefaultRetryCount(c *check.C) { + config := `{ + "Image": "busybox", + "HostConfig": { + "RestartPolicy": { + "Name": "on-failure", + "MaximumRetryCount": 0 + } + } + }` + + res, _, err := request.Post("/containers/create", request.RawString(config), request.JSON) + c.Assert(err, checker.IsNil) + c.Assert(res.StatusCode, checker.Equals, http.StatusCreated) +} + +// Issue 7941 - test to make sure a "null" in JSON is just ignored. +// W/o this fix a null in JSON would be parsed into a string var as "null" +func (s *DockerSuite) TestContainerAPIPostCreateNull(c *check.C) { + config := `{ + "Hostname":"", + "Domainname":"", + "Memory":0, + "MemorySwap":0, + "CpuShares":0, + "Cpuset":null, + "AttachStdin":true, + "AttachStdout":true, + "AttachStderr":true, + "ExposedPorts":{}, + "Tty":true, + "OpenStdin":true, + "StdinOnce":true, + "Env":[], + "Cmd":"ls", + "Image":"busybox", + "Volumes":{}, + "WorkingDir":"", + "Entrypoint":null, + "NetworkDisabled":false, + "OnBuild":null}` + + res, body, err := request.Post("/containers/create", request.RawString(config), request.JSON) + c.Assert(err, checker.IsNil) + c.Assert(res.StatusCode, checker.Equals, http.StatusCreated) + + b, err := testutil.ReadBody(body) + c.Assert(err, checker.IsNil) + type createResp struct { + ID string + } + var container createResp + c.Assert(json.Unmarshal(b, &container), checker.IsNil) + out := inspectField(c, container.ID, "HostConfig.CpusetCpus") + c.Assert(out, checker.Equals, "") + + outMemory := inspectField(c, container.ID, "HostConfig.Memory") + c.Assert(outMemory, checker.Equals, "0") + outMemorySwap := inspectField(c, container.ID, "HostConfig.MemorySwap") + c.Assert(outMemorySwap, checker.Equals, "0") +} + +func (s *DockerSuite) TestCreateWithTooLowMemoryLimit(c *check.C) { + // TODO Windows: Port once memory is supported + testRequires(c, DaemonIsLinux) + config := `{ + "Image": "busybox", + "Cmd": "ls", + "OpenStdin": true, + "CpuShares": 100, + "Memory": 524287 + }` + + res, body, err := request.Post("/containers/create", request.RawString(config), request.JSON) + c.Assert(err, checker.IsNil) + b, err2 := testutil.ReadBody(body) + c.Assert(err2, checker.IsNil) + + c.Assert(res.StatusCode, checker.Equals, http.StatusInternalServerError) + c.Assert(string(b), checker.Contains, "Minimum memory limit allowed is 4MB") +} + +func (s *DockerSuite) TestContainerAPIRename(c *check.C) { + out, _ := dockerCmd(c, "run", "--name", "TestContainerAPIRename", "-d", "busybox", "sh") + + containerID := strings.TrimSpace(out) + newName := "TestContainerAPIRenameNew" + statusCode, _, err := request.SockRequest("POST", "/containers/"+containerID+"/rename?name="+newName, nil, daemonHost()) + c.Assert(err, checker.IsNil) + // 204 No Content is expected, not 200 + c.Assert(statusCode, checker.Equals, http.StatusNoContent) + + name := inspectField(c, containerID, "Name") + c.Assert(name, checker.Equals, "/"+newName, check.Commentf("Failed to rename container")) +} + +func (s *DockerSuite) TestContainerAPIKill(c *check.C) { + name := "test-api-kill" + runSleepingContainer(c, "-i", "--name", name) + + status, _, err := request.SockRequest("POST", "/containers/"+name+"/kill", nil, daemonHost()) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusNoContent) + + state := inspectField(c, name, "State.Running") + c.Assert(state, checker.Equals, "false", check.Commentf("got wrong State from container %s: %q", name, state)) +} + +func (s *DockerSuite) TestContainerAPIRestart(c *check.C) { + name := "test-api-restart" + runSleepingContainer(c, "-di", "--name", name) + + status, _, err := request.SockRequest("POST", "/containers/"+name+"/restart?t=1", nil, daemonHost()) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusNoContent) + c.Assert(waitInspect(name, "{{ .State.Restarting }} {{ .State.Running }}", "false true", 15*time.Second), checker.IsNil) +} + +func (s *DockerSuite) TestContainerAPIRestartNotimeoutParam(c *check.C) { + name := "test-api-restart-no-timeout-param" + out := runSleepingContainer(c, "-di", "--name", name) + id := strings.TrimSpace(out) + c.Assert(waitRun(id), checker.IsNil) + + status, _, err := request.SockRequest("POST", "/containers/"+name+"/restart", nil, daemonHost()) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusNoContent) + c.Assert(waitInspect(name, "{{ .State.Restarting }} {{ .State.Running }}", "false true", 15*time.Second), checker.IsNil) +} + +func (s *DockerSuite) TestContainerAPIStart(c *check.C) { + name := "testing-start" + config := map[string]interface{}{ + "Image": "busybox", + "Cmd": append([]string{"/bin/sh", "-c"}, sleepCommandForDaemonPlatform()...), + "OpenStdin": true, + } + + status, _, err := request.SockRequest("POST", "/containers/create?name="+name, config, daemonHost()) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusCreated) + + status, _, err = request.SockRequest("POST", "/containers/"+name+"/start", nil, daemonHost()) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusNoContent) + + // second call to start should give 304 + status, _, err = request.SockRequest("POST", "/containers/"+name+"/start", nil, daemonHost()) + c.Assert(err, checker.IsNil) + + // TODO(tibor): figure out why this doesn't work on windows + if testEnv.LocalDaemon() { + c.Assert(status, checker.Equals, http.StatusNotModified) + } +} + +func (s *DockerSuite) TestContainerAPIStop(c *check.C) { + name := "test-api-stop" + runSleepingContainer(c, "-i", "--name", name) + + status, _, err := request.SockRequest("POST", "/containers/"+name+"/stop?t=30", nil, daemonHost()) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusNoContent) + c.Assert(waitInspect(name, "{{ .State.Running }}", "false", 60*time.Second), checker.IsNil) + + // second call to start should give 304 + status, _, err = request.SockRequest("POST", "/containers/"+name+"/stop?t=30", nil, daemonHost()) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusNotModified) +} + +func (s *DockerSuite) TestContainerAPIWait(c *check.C) { + name := "test-api-wait" + + sleepCmd := "/bin/sleep" + if testEnv.DaemonPlatform() == "windows" { + sleepCmd = "sleep" + } + dockerCmd(c, "run", "--name", name, "busybox", sleepCmd, "2") + + status, body, err := request.SockRequest("POST", "/containers/"+name+"/wait", nil, daemonHost()) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusOK) + c.Assert(waitInspect(name, "{{ .State.Running }}", "false", 60*time.Second), checker.IsNil) + + var waitres containertypes.ContainerWaitOKBody + c.Assert(json.Unmarshal(body, &waitres), checker.IsNil) + c.Assert(waitres.StatusCode, checker.Equals, int64(0)) +} + +func (s *DockerSuite) TestContainerAPICopyNotExistsAnyMore(c *check.C) { + name := "test-container-api-copy" + dockerCmd(c, "run", "--name", name, "busybox", "touch", "/test.txt") + + postData := types.CopyConfig{ + Resource: "/test.txt", + } + + status, _, err := request.SockRequest("POST", "/containers/"+name+"/copy", postData, daemonHost()) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusNotFound) +} + +func (s *DockerSuite) TestContainerAPICopyPre124(c *check.C) { + testRequires(c, DaemonIsLinux) // Windows only supports 1.25 or later + name := "test-container-api-copy" + dockerCmd(c, "run", "--name", name, "busybox", "touch", "/test.txt") + + postData := types.CopyConfig{ + Resource: "/test.txt", + } + + status, body, err := request.SockRequest("POST", "/v1.23/containers/"+name+"/copy", postData, daemonHost()) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusOK) + + found := false + for tarReader := tar.NewReader(bytes.NewReader(body)); ; { + h, err := tarReader.Next() + if err != nil { + if err == io.EOF { + break + } + c.Fatal(err) + } + if h.Name == "test.txt" { + found = true + break + } + } + c.Assert(found, checker.True) +} + +func (s *DockerSuite) TestContainerAPICopyResourcePathEmptyPr124(c *check.C) { + testRequires(c, DaemonIsLinux) // Windows only supports 1.25 or later + name := "test-container-api-copy-resource-empty" + dockerCmd(c, "run", "--name", name, "busybox", "touch", "/test.txt") + + postData := types.CopyConfig{ + Resource: "", + } + + status, body, err := request.SockRequest("POST", "/v1.23/containers/"+name+"/copy", postData, daemonHost()) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusInternalServerError) + c.Assert(string(body), checker.Matches, "Path cannot be empty\n") +} + +func (s *DockerSuite) TestContainerAPICopyResourcePathNotFoundPre124(c *check.C) { + testRequires(c, DaemonIsLinux) // Windows only supports 1.25 or later + name := "test-container-api-copy-resource-not-found" + dockerCmd(c, "run", "--name", name, "busybox") + + postData := types.CopyConfig{ + Resource: "/notexist", + } + + status, body, err := request.SockRequest("POST", "/v1.23/containers/"+name+"/copy", postData, daemonHost()) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusInternalServerError) + c.Assert(string(body), checker.Matches, "Could not find the file /notexist in container "+name+"\n") +} + +func (s *DockerSuite) TestContainerAPICopyContainerNotFoundPr124(c *check.C) { + testRequires(c, DaemonIsLinux) // Windows only supports 1.25 or later + postData := types.CopyConfig{ + Resource: "/something", + } + + status, _, err := request.SockRequest("POST", "/v1.23/containers/notexists/copy", postData, daemonHost()) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusNotFound) +} + +func (s *DockerSuite) TestContainerAPIDelete(c *check.C) { + out := runSleepingContainer(c) + + id := strings.TrimSpace(out) + c.Assert(waitRun(id), checker.IsNil) + + dockerCmd(c, "stop", id) + + status, _, err := request.SockRequest("DELETE", "/containers/"+id, nil, daemonHost()) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusNoContent) +} + +func (s *DockerSuite) TestContainerAPIDeleteNotExist(c *check.C) { + status, body, err := request.SockRequest("DELETE", "/containers/doesnotexist", nil, daemonHost()) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusNotFound) + c.Assert(getErrorMessage(c, body), checker.Matches, "No such container: doesnotexist") +} + +func (s *DockerSuite) TestContainerAPIDeleteForce(c *check.C) { + out := runSleepingContainer(c) + + id := strings.TrimSpace(out) + c.Assert(waitRun(id), checker.IsNil) + + status, _, err := request.SockRequest("DELETE", "/containers/"+id+"?force=1", nil, daemonHost()) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusNoContent) +} + +func (s *DockerSuite) TestContainerAPIDeleteRemoveLinks(c *check.C) { + // Windows does not support links + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "-d", "--name", "tlink1", "busybox", "top") + + id := strings.TrimSpace(out) + c.Assert(waitRun(id), checker.IsNil) + + out, _ = dockerCmd(c, "run", "--link", "tlink1:tlink1", "--name", "tlink2", "-d", "busybox", "top") + + id2 := strings.TrimSpace(out) + c.Assert(waitRun(id2), checker.IsNil) + + links := inspectFieldJSON(c, id2, "HostConfig.Links") + c.Assert(links, checker.Equals, "[\"/tlink1:/tlink2/tlink1\"]", check.Commentf("expected to have links between containers")) + + status, b, err := request.SockRequest("DELETE", "/containers/tlink2/tlink1?link=1", nil, daemonHost()) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusNoContent, check.Commentf(string(b))) + + linksPostRm := inspectFieldJSON(c, id2, "HostConfig.Links") + c.Assert(linksPostRm, checker.Equals, "null", check.Commentf("call to api deleteContainer links should have removed the specified links")) +} + +func (s *DockerSuite) TestContainerAPIDeleteConflict(c *check.C) { + out := runSleepingContainer(c) + + id := strings.TrimSpace(out) + c.Assert(waitRun(id), checker.IsNil) + + status, _, err := request.SockRequest("DELETE", "/containers/"+id, nil, daemonHost()) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusConflict) +} + +func (s *DockerSuite) TestContainerAPIDeleteRemoveVolume(c *check.C) { + testRequires(c, SameHostDaemon) + + vol := "/testvolume" + if testEnv.DaemonPlatform() == "windows" { + vol = `c:\testvolume` + } + + out := runSleepingContainer(c, "-v", vol) + + id := strings.TrimSpace(out) + c.Assert(waitRun(id), checker.IsNil) + + source, err := inspectMountSourceField(id, vol) + _, err = os.Stat(source) + c.Assert(err, checker.IsNil) + + status, _, err := request.SockRequest("DELETE", "/containers/"+id+"?v=1&force=1", nil, daemonHost()) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusNoContent) + _, err = os.Stat(source) + c.Assert(os.IsNotExist(err), checker.True, check.Commentf("expected to get ErrNotExist error, got %v", err)) +} + +// Regression test for https://github.com/docker/docker/issues/6231 +func (s *DockerSuite) TestContainerAPIChunkedEncoding(c *check.C) { + + config := map[string]interface{}{ + "Image": "busybox", + "Cmd": append([]string{"/bin/sh", "-c"}, sleepCommandForDaemonPlatform()...), + "OpenStdin": true, + } + + resp, _, err := request.Post("/containers/create", request.JSONBody(config), func(req *http.Request) error { + // This is a cheat to make the http request do chunked encoding + // Otherwise (just setting the Content-Encoding to chunked) net/http will overwrite + // https://golang.org/src/pkg/net/http/request.go?s=11980:12172 + req.ContentLength = -1 + return nil + }) + c.Assert(err, checker.IsNil, check.Commentf("error creating container with chunked encoding")) + defer resp.Body.Close() + c.Assert(resp.StatusCode, checker.Equals, http.StatusCreated) +} + +func (s *DockerSuite) TestContainerAPIPostContainerStop(c *check.C) { + out := runSleepingContainer(c) + + containerID := strings.TrimSpace(out) + c.Assert(waitRun(containerID), checker.IsNil) + + statusCode, _, err := request.SockRequest("POST", "/containers/"+containerID+"/stop", nil, daemonHost()) + c.Assert(err, checker.IsNil) + // 204 No Content is expected, not 200 + c.Assert(statusCode, checker.Equals, http.StatusNoContent) + c.Assert(waitInspect(containerID, "{{ .State.Running }}", "false", 60*time.Second), checker.IsNil) +} + +// #14170 +func (s *DockerSuite) TestPostContainerAPICreateWithStringOrSliceEntrypoint(c *check.C) { + config := struct { + Image string + Entrypoint string + Cmd []string + }{"busybox", "echo", []string{"hello", "world"}} + _, _, err := request.SockRequest("POST", "/containers/create?name=echotest", config, daemonHost()) + c.Assert(err, checker.IsNil) + out, _ := dockerCmd(c, "start", "-a", "echotest") + c.Assert(strings.TrimSpace(out), checker.Equals, "hello world") + + config2 := struct { + Image string + Entrypoint []string + Cmd []string + }{"busybox", []string{"echo"}, []string{"hello", "world"}} + _, _, err = request.SockRequest("POST", "/containers/create?name=echotest2", config2, daemonHost()) + c.Assert(err, checker.IsNil) + out, _ = dockerCmd(c, "start", "-a", "echotest2") + c.Assert(strings.TrimSpace(out), checker.Equals, "hello world") +} + +// #14170 +func (s *DockerSuite) TestPostContainersCreateWithStringOrSliceCmd(c *check.C) { + config := struct { + Image string + Entrypoint string + Cmd string + }{"busybox", "echo", "hello world"} + _, _, err := request.SockRequest("POST", "/containers/create?name=echotest", config, daemonHost()) + c.Assert(err, checker.IsNil) + out, _ := dockerCmd(c, "start", "-a", "echotest") + c.Assert(strings.TrimSpace(out), checker.Equals, "hello world") + + config2 := struct { + Image string + Cmd []string + }{"busybox", []string{"echo", "hello", "world"}} + _, _, err = request.SockRequest("POST", "/containers/create?name=echotest2", config2, daemonHost()) + c.Assert(err, checker.IsNil) + out, _ = dockerCmd(c, "start", "-a", "echotest2") + c.Assert(strings.TrimSpace(out), checker.Equals, "hello world") +} + +// regression #14318 +func (s *DockerSuite) TestPostContainersCreateWithStringOrSliceCapAddDrop(c *check.C) { + // Windows doesn't support CapAdd/CapDrop + testRequires(c, DaemonIsLinux) + config := struct { + Image string + CapAdd string + CapDrop string + }{"busybox", "NET_ADMIN", "SYS_ADMIN"} + status, _, err := request.SockRequest("POST", "/containers/create?name=capaddtest0", config, daemonHost()) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusCreated) + + config2 := struct { + Image string + CapAdd []string + CapDrop []string + }{"busybox", []string{"NET_ADMIN", "SYS_ADMIN"}, []string{"SETGID"}} + status, _, err = request.SockRequest("POST", "/containers/create?name=capaddtest1", config2, daemonHost()) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusCreated) +} + +// #14915 +func (s *DockerSuite) TestContainerAPICreateNoHostConfig118(c *check.C) { + testRequires(c, DaemonIsLinux) // Windows only support 1.25 or later + config := struct { + Image string + }{"busybox"} + status, _, err := request.SockRequest("POST", "/v1.18/containers/create", config, daemonHost()) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusCreated) +} + +// Ensure an error occurs when you have a container read-only rootfs but you +// extract an archive to a symlink in a writable volume which points to a +// directory outside of the volume. +func (s *DockerSuite) TestPutContainerArchiveErrSymlinkInVolumeToReadOnlyRootfs(c *check.C) { + // Windows does not support read-only rootfs + // Requires local volume mount bind. + // --read-only + userns has remount issues + testRequires(c, SameHostDaemon, NotUserNamespace, DaemonIsLinux) + + testVol := getTestDir(c, "test-put-container-archive-err-symlink-in-volume-to-read-only-rootfs-") + defer os.RemoveAll(testVol) + + makeTestContentInDir(c, testVol) + + cID := makeTestContainer(c, testContainerOptions{ + readOnly: true, + volumes: defaultVolumes(testVol), // Our bind mount is at /vol2 + }) + + // Attempt to extract to a symlink in the volume which points to a + // directory outside the volume. This should cause an error because the + // rootfs is read-only. + query := make(url.Values, 1) + query.Set("path", "/vol2/symlinkToAbsDir") + urlPath := fmt.Sprintf("/v1.20/containers/%s/archive?%s", cID, query.Encode()) + + statusCode, body, err := request.SockRequest("PUT", urlPath, nil, daemonHost()) + c.Assert(err, checker.IsNil) + + if !isCpCannotCopyReadOnly(fmt.Errorf(string(body))) { + c.Fatalf("expected ErrContainerRootfsReadonly error, but got %d: %s", statusCode, string(body)) + } +} + +func (s *DockerSuite) TestContainerAPIGetContainersJSONEmpty(c *check.C) { + status, body, err := request.SockRequest("GET", "/containers/json?all=1", nil, daemonHost()) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusOK) + c.Assert(string(body), checker.Equals, "[]\n") +} + +func (s *DockerSuite) TestPostContainersCreateWithWrongCpusetValues(c *check.C) { + // Not supported on Windows + testRequires(c, DaemonIsLinux) + + c1 := struct { + Image string + CpusetCpus string + }{"busybox", "1-42,,"} + name := "wrong-cpuset-cpus" + status, body, err := request.SockRequest("POST", "/containers/create?name="+name, c1, daemonHost()) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusInternalServerError) + expected := "Invalid value 1-42,, for cpuset cpus" + c.Assert(getErrorMessage(c, body), checker.Equals, expected) + + c2 := struct { + Image string + CpusetMems string + }{"busybox", "42-3,1--"} + name = "wrong-cpuset-mems" + status, body, err = request.SockRequest("POST", "/containers/create?name="+name, c2, daemonHost()) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusInternalServerError) + expected = "Invalid value 42-3,1-- for cpuset mems" + c.Assert(getErrorMessage(c, body), checker.Equals, expected) +} + +func (s *DockerSuite) TestPostContainersCreateShmSizeNegative(c *check.C) { + // ShmSize is not supported on Windows + testRequires(c, DaemonIsLinux) + config := map[string]interface{}{ + "Image": "busybox", + "HostConfig": map[string]interface{}{"ShmSize": -1}, + } + + status, body, err := request.SockRequest("POST", "/containers/create", config, daemonHost()) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusInternalServerError) + c.Assert(getErrorMessage(c, body), checker.Contains, "SHM size can not be less than 0") +} + +func (s *DockerSuite) TestPostContainersCreateShmSizeHostConfigOmitted(c *check.C) { + // ShmSize is not supported on Windows + testRequires(c, DaemonIsLinux) + var defaultSHMSize int64 = 67108864 + config := map[string]interface{}{ + "Image": "busybox", + "Cmd": "mount", + } + + status, body, err := request.SockRequest("POST", "/containers/create", config, daemonHost()) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusCreated) + + var container containertypes.ContainerCreateCreatedBody + c.Assert(json.Unmarshal(body, &container), check.IsNil) + + status, body, err = request.SockRequest("GET", "/containers/"+container.ID+"/json", nil, daemonHost()) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusOK) + + var containerJSON types.ContainerJSON + c.Assert(json.Unmarshal(body, &containerJSON), check.IsNil) + + c.Assert(containerJSON.HostConfig.ShmSize, check.Equals, defaultSHMSize) + + out, _ := dockerCmd(c, "start", "-i", containerJSON.ID) + shmRegexp := regexp.MustCompile(`shm on /dev/shm type tmpfs(.*)size=65536k`) + if !shmRegexp.MatchString(out) { + c.Fatalf("Expected shm of 64MB in mount command, got %v", out) + } +} + +func (s *DockerSuite) TestPostContainersCreateShmSizeOmitted(c *check.C) { + // ShmSize is not supported on Windows + testRequires(c, DaemonIsLinux) + config := map[string]interface{}{ + "Image": "busybox", + "HostConfig": map[string]interface{}{}, + "Cmd": "mount", + } + + status, body, err := request.SockRequest("POST", "/containers/create", config, daemonHost()) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusCreated) + + var container containertypes.ContainerCreateCreatedBody + c.Assert(json.Unmarshal(body, &container), check.IsNil) + + status, body, err = request.SockRequest("GET", "/containers/"+container.ID+"/json", nil, daemonHost()) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusOK) + + var containerJSON types.ContainerJSON + c.Assert(json.Unmarshal(body, &containerJSON), check.IsNil) + + c.Assert(containerJSON.HostConfig.ShmSize, check.Equals, int64(67108864)) + + out, _ := dockerCmd(c, "start", "-i", containerJSON.ID) + shmRegexp := regexp.MustCompile(`shm on /dev/shm type tmpfs(.*)size=65536k`) + if !shmRegexp.MatchString(out) { + c.Fatalf("Expected shm of 64MB in mount command, got %v", out) + } +} + +func (s *DockerSuite) TestPostContainersCreateWithShmSize(c *check.C) { + // ShmSize is not supported on Windows + testRequires(c, DaemonIsLinux) + config := map[string]interface{}{ + "Image": "busybox", + "Cmd": "mount", + "HostConfig": map[string]interface{}{"ShmSize": 1073741824}, + } + + status, body, err := request.SockRequest("POST", "/containers/create", config, daemonHost()) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusCreated) + + var container containertypes.ContainerCreateCreatedBody + c.Assert(json.Unmarshal(body, &container), check.IsNil) + + status, body, err = request.SockRequest("GET", "/containers/"+container.ID+"/json", nil, daemonHost()) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusOK) + + var containerJSON types.ContainerJSON + c.Assert(json.Unmarshal(body, &containerJSON), check.IsNil) + + c.Assert(containerJSON.HostConfig.ShmSize, check.Equals, int64(1073741824)) + + out, _ := dockerCmd(c, "start", "-i", containerJSON.ID) + shmRegex := regexp.MustCompile(`shm on /dev/shm type tmpfs(.*)size=1048576k`) + if !shmRegex.MatchString(out) { + c.Fatalf("Expected shm of 1GB in mount command, got %v", out) + } +} + +func (s *DockerSuite) TestPostContainersCreateMemorySwappinessHostConfigOmitted(c *check.C) { + // Swappiness is not supported on Windows + testRequires(c, DaemonIsLinux) + config := map[string]interface{}{ + "Image": "busybox", + } + + status, body, err := request.SockRequest("POST", "/containers/create", config, daemonHost()) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusCreated) + + var container containertypes.ContainerCreateCreatedBody + c.Assert(json.Unmarshal(body, &container), check.IsNil) + + status, body, err = request.SockRequest("GET", "/containers/"+container.ID+"/json", nil, daemonHost()) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusOK) + + var containerJSON types.ContainerJSON + c.Assert(json.Unmarshal(body, &containerJSON), check.IsNil) + + c.Assert(containerJSON.HostConfig.MemorySwappiness, check.IsNil) +} + +// check validation is done daemon side and not only in cli +func (s *DockerSuite) TestPostContainersCreateWithOomScoreAdjInvalidRange(c *check.C) { + // OomScoreAdj is not supported on Windows + testRequires(c, DaemonIsLinux) + + config := struct { + Image string + OomScoreAdj int + }{"busybox", 1001} + name := "oomscoreadj-over" + status, b, err := request.SockRequest("POST", "/containers/create?name="+name, config, daemonHost()) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusInternalServerError) + + expected := "Invalid value 1001, range for oom score adj is [-1000, 1000]" + msg := getErrorMessage(c, b) + if !strings.Contains(msg, expected) { + c.Fatalf("Expected output to contain %q, got %q", expected, msg) + } + + config = struct { + Image string + OomScoreAdj int + }{"busybox", -1001} + name = "oomscoreadj-low" + status, b, err = request.SockRequest("POST", "/containers/create?name="+name, config, daemonHost()) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusInternalServerError) + expected = "Invalid value -1001, range for oom score adj is [-1000, 1000]" + msg = getErrorMessage(c, b) + if !strings.Contains(msg, expected) { + c.Fatalf("Expected output to contain %q, got %q", expected, msg) + } +} + +// test case for #22210 where an empty container name caused panic. +func (s *DockerSuite) TestContainerAPIDeleteWithEmptyName(c *check.C) { + status, out, err := request.SockRequest("DELETE", "/containers/", nil, daemonHost()) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusBadRequest) + c.Assert(string(out), checker.Contains, "No container name or ID supplied") +} + +func (s *DockerSuite) TestContainerAPIStatsWithNetworkDisabled(c *check.C) { + // Problematic on Windows as Windows does not support stats + testRequires(c, DaemonIsLinux) + + name := "testing-network-disabled" + config := map[string]interface{}{ + "Image": "busybox", + "Cmd": []string{"top"}, + "NetworkDisabled": true, + } + + status, _, err := request.SockRequest("POST", "/containers/create?name="+name, config, daemonHost()) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusCreated) + + status, _, err = request.SockRequest("POST", "/containers/"+name+"/start", nil, daemonHost()) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusNoContent) + + c.Assert(waitRun(name), check.IsNil) + + type b struct { + status int + body []byte + err error + } + bc := make(chan b, 1) + go func() { + status, body, err := request.SockRequest("GET", "/containers/"+name+"/stats", nil, daemonHost()) + bc <- b{status, body, err} + }() + + // allow some time to stream the stats from the container + time.Sleep(4 * time.Second) + dockerCmd(c, "rm", "-f", name) + + // collect the results from the stats stream or timeout and fail + // if the stream was not disconnected. + select { + case <-time.After(2 * time.Second): + c.Fatal("stream was not closed after container was removed") + case sr := <-bc: + c.Assert(sr.err, checker.IsNil) + c.Assert(sr.status, checker.Equals, http.StatusOK) + + // decode only one object from the stream + var s *types.Stats + dec := json.NewDecoder(bytes.NewBuffer(sr.body)) + c.Assert(dec.Decode(&s), checker.IsNil) + } +} + +func (s *DockerSuite) TestContainersAPICreateMountsValidation(c *check.C) { + type m mounttypes.Mount + type hc struct{ Mounts []m } + type cfg struct { + Image string + HostConfig hc + } + type testCase struct { + config cfg + status int + msg string + } + + prefix, slash := getPrefixAndSlashFromDaemonPlatform() + destPath := prefix + slash + "foo" + notExistPath := prefix + slash + "notexist" + + cases := []testCase{ + { + config: cfg{ + Image: "busybox", + HostConfig: hc{ + Mounts: []m{{ + Type: "notreal", + Target: destPath}}}}, + status: http.StatusBadRequest, + msg: "mount type unknown", + }, + { + config: cfg{ + Image: "busybox", + HostConfig: hc{ + Mounts: []m{{ + Type: "bind"}}}}, + status: http.StatusBadRequest, + msg: "Target must not be empty", + }, + { + config: cfg{ + Image: "busybox", + HostConfig: hc{ + Mounts: []m{{ + Type: "bind", + Target: destPath}}}}, + status: http.StatusBadRequest, + msg: "Source must not be empty", + }, + { + config: cfg{ + Image: "busybox", + HostConfig: hc{ + Mounts: []m{{ + Type: "bind", + Source: notExistPath, + Target: destPath}}}}, + status: http.StatusBadRequest, + msg: "bind source path does not exist", + }, + { + config: cfg{ + Image: "busybox", + HostConfig: hc{ + Mounts: []m{{ + Type: "volume"}}}}, + status: http.StatusBadRequest, + msg: "Target must not be empty", + }, + { + config: cfg{ + Image: "busybox", + HostConfig: hc{ + Mounts: []m{{ + Type: "volume", + Source: "hello", + Target: destPath}}}}, + status: http.StatusCreated, + msg: "", + }, + { + config: cfg{ + Image: "busybox", + HostConfig: hc{ + Mounts: []m{{ + Type: "volume", + Source: "hello2", + Target: destPath, + VolumeOptions: &mounttypes.VolumeOptions{ + DriverConfig: &mounttypes.Driver{ + Name: "local"}}}}}}, + status: http.StatusCreated, + msg: "", + }, + } + + if SameHostDaemon() { + tmpDir, err := ioutils.TempDir("", "test-mounts-api") + c.Assert(err, checker.IsNil) + defer os.RemoveAll(tmpDir) + cases = append(cases, []testCase{ + { + config: cfg{ + Image: "busybox", + HostConfig: hc{ + Mounts: []m{{ + Type: "bind", + Source: tmpDir, + Target: destPath}}}}, + status: http.StatusCreated, + msg: "", + }, + { + config: cfg{ + Image: "busybox", + HostConfig: hc{ + Mounts: []m{{ + Type: "bind", + Source: tmpDir, + Target: destPath, + VolumeOptions: &mounttypes.VolumeOptions{}}}}}, + status: http.StatusBadRequest, + msg: "VolumeOptions must not be specified", + }, + }...) + } + + if DaemonIsLinux() { + cases = append(cases, []testCase{ + { + config: cfg{ + Image: "busybox", + HostConfig: hc{ + Mounts: []m{{ + Type: "volume", + Source: "hello3", + Target: destPath, + VolumeOptions: &mounttypes.VolumeOptions{ + DriverConfig: &mounttypes.Driver{ + Name: "local", + Options: map[string]string{"o": "size=1"}}}}}}}, + status: http.StatusCreated, + msg: "", + }, + { + config: cfg{ + Image: "busybox", + HostConfig: hc{ + Mounts: []m{{ + Type: "tmpfs", + Target: destPath}}}}, + status: http.StatusCreated, + msg: "", + }, + { + config: cfg{ + Image: "busybox", + HostConfig: hc{ + Mounts: []m{{ + Type: "tmpfs", + Target: destPath, + TmpfsOptions: &mounttypes.TmpfsOptions{ + SizeBytes: 4096 * 1024, + Mode: 0700, + }}}}}, + status: http.StatusCreated, + msg: "", + }, + + { + config: cfg{ + Image: "busybox", + HostConfig: hc{ + Mounts: []m{{ + Type: "tmpfs", + Source: "/shouldnotbespecified", + Target: destPath}}}}, + status: http.StatusBadRequest, + msg: "Source must not be specified", + }, + }...) + + } + + for i, x := range cases { + c.Logf("case %d", i) + status, b, err := request.SockRequest("POST", "/containers/create", x.config, daemonHost()) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, x.status, check.Commentf("%s\n%v", string(b), cases[i].config)) + if len(x.msg) > 0 { + c.Assert(string(b), checker.Contains, x.msg, check.Commentf("%v", cases[i].config)) + } + } +} + +func (s *DockerSuite) TestContainerAPICreateMountsBindRead(c *check.C) { + testRequires(c, NotUserNamespace, SameHostDaemon) + // also with data in the host side + prefix, slash := getPrefixAndSlashFromDaemonPlatform() + destPath := prefix + slash + "foo" + tmpDir, err := ioutil.TempDir("", "test-mounts-api-bind") + c.Assert(err, checker.IsNil) + defer os.RemoveAll(tmpDir) + err = ioutil.WriteFile(filepath.Join(tmpDir, "bar"), []byte("hello"), 666) + c.Assert(err, checker.IsNil) + + data := map[string]interface{}{ + "Image": "busybox", + "Cmd": []string{"/bin/sh", "-c", "cat /foo/bar"}, + "HostConfig": map[string]interface{}{"Mounts": []map[string]interface{}{{"Type": "bind", "Source": tmpDir, "Target": destPath}}}, + } + status, resp, err := request.SockRequest("POST", "/containers/create?name=test", data, daemonHost()) + c.Assert(err, checker.IsNil, check.Commentf(string(resp))) + c.Assert(status, checker.Equals, http.StatusCreated, check.Commentf(string(resp))) + + out, _ := dockerCmd(c, "start", "-a", "test") + c.Assert(out, checker.Equals, "hello") +} + +// Test Mounts comes out as expected for the MountPoint +func (s *DockerSuite) TestContainersAPICreateMountsCreate(c *check.C) { + prefix, slash := getPrefixAndSlashFromDaemonPlatform() + destPath := prefix + slash + "foo" + + var ( + testImg string + ) + if testEnv.DaemonPlatform() != "windows" { + testImg = "test-mount-config" + buildImageSuccessfully(c, testImg, build.WithDockerfile(` + FROM busybox + RUN mkdir `+destPath+` && touch `+destPath+slash+`bar + CMD cat `+destPath+slash+`bar + `)) + } else { + testImg = "busybox" + } + + type testCase struct { + cfg mounttypes.Mount + expected types.MountPoint + } + + cases := []testCase{ + // use literal strings here for `Type` instead of the defined constants in the volume package to keep this honest + // Validation of the actual `Mount` struct is done in another test is not needed here + {mounttypes.Mount{Type: "volume", Target: destPath}, types.MountPoint{Driver: volume.DefaultDriverName, Type: "volume", RW: true, Destination: destPath}}, + {mounttypes.Mount{Type: "volume", Target: destPath + slash}, types.MountPoint{Driver: volume.DefaultDriverName, Type: "volume", RW: true, Destination: destPath}}, + {mounttypes.Mount{Type: "volume", Target: destPath, Source: "test1"}, types.MountPoint{Type: "volume", Name: "test1", RW: true, Destination: destPath}}, + {mounttypes.Mount{Type: "volume", Target: destPath, ReadOnly: true, Source: "test2"}, types.MountPoint{Type: "volume", Name: "test2", RW: false, Destination: destPath}}, + {mounttypes.Mount{Type: "volume", Target: destPath, Source: "test3", VolumeOptions: &mounttypes.VolumeOptions{DriverConfig: &mounttypes.Driver{Name: volume.DefaultDriverName}}}, types.MountPoint{Driver: volume.DefaultDriverName, Type: "volume", Name: "test3", RW: true, Destination: destPath}}, + } + + if SameHostDaemon() { + // setup temp dir for testing binds + tmpDir1, err := ioutil.TempDir("", "test-mounts-api-1") + c.Assert(err, checker.IsNil) + defer os.RemoveAll(tmpDir1) + cases = append(cases, []testCase{ + {mounttypes.Mount{Type: "bind", Source: tmpDir1, Target: destPath}, types.MountPoint{Type: "bind", RW: true, Destination: destPath, Source: tmpDir1}}, + {mounttypes.Mount{Type: "bind", Source: tmpDir1, Target: destPath, ReadOnly: true}, types.MountPoint{Type: "bind", RW: false, Destination: destPath, Source: tmpDir1}}, + }...) + + // for modes only supported on Linux + if DaemonIsLinux() { + tmpDir3, err := ioutils.TempDir("", "test-mounts-api-3") + c.Assert(err, checker.IsNil) + defer os.RemoveAll(tmpDir3) + + c.Assert(mount.Mount(tmpDir3, tmpDir3, "none", "bind,rw"), checker.IsNil) + c.Assert(mount.ForceMount("", tmpDir3, "none", "shared"), checker.IsNil) + + cases = append(cases, []testCase{ + {mounttypes.Mount{Type: "bind", Source: tmpDir3, Target: destPath}, types.MountPoint{Type: "bind", RW: true, Destination: destPath, Source: tmpDir3}}, + {mounttypes.Mount{Type: "bind", Source: tmpDir3, Target: destPath, ReadOnly: true}, types.MountPoint{Type: "bind", RW: false, Destination: destPath, Source: tmpDir3}}, + {mounttypes.Mount{Type: "bind", Source: tmpDir3, Target: destPath, ReadOnly: true, BindOptions: &mounttypes.BindOptions{Propagation: "shared"}}, types.MountPoint{Type: "bind", RW: false, Destination: destPath, Source: tmpDir3, Propagation: "shared"}}, + }...) + } + } + + if testEnv.DaemonPlatform() != "windows" { // Windows does not support volume populate + cases = append(cases, []testCase{ + {mounttypes.Mount{Type: "volume", Target: destPath, VolumeOptions: &mounttypes.VolumeOptions{NoCopy: true}}, types.MountPoint{Driver: volume.DefaultDriverName, Type: "volume", RW: true, Destination: destPath}}, + {mounttypes.Mount{Type: "volume", Target: destPath + slash, VolumeOptions: &mounttypes.VolumeOptions{NoCopy: true}}, types.MountPoint{Driver: volume.DefaultDriverName, Type: "volume", RW: true, Destination: destPath}}, + {mounttypes.Mount{Type: "volume", Target: destPath, Source: "test4", VolumeOptions: &mounttypes.VolumeOptions{NoCopy: true}}, types.MountPoint{Type: "volume", Name: "test4", RW: true, Destination: destPath}}, + {mounttypes.Mount{Type: "volume", Target: destPath, Source: "test5", ReadOnly: true, VolumeOptions: &mounttypes.VolumeOptions{NoCopy: true}}, types.MountPoint{Type: "volume", Name: "test5", RW: false, Destination: destPath}}, + }...) + } + + type wrapper struct { + containertypes.Config + HostConfig containertypes.HostConfig + } + type createResp struct { + ID string `json:"Id"` + } + for i, x := range cases { + c.Logf("case %d - config: %v", i, x.cfg) + status, data, err := request.SockRequest("POST", "/containers/create", wrapper{containertypes.Config{Image: testImg}, containertypes.HostConfig{Mounts: []mounttypes.Mount{x.cfg}}}, daemonHost()) + c.Assert(err, checker.IsNil, check.Commentf(string(data))) + c.Assert(status, checker.Equals, http.StatusCreated, check.Commentf(string(data))) + + var resp createResp + err = json.Unmarshal(data, &resp) + c.Assert(err, checker.IsNil, check.Commentf(string(data))) + id := resp.ID + + var mps []types.MountPoint + err = json.NewDecoder(strings.NewReader(inspectFieldJSON(c, id, "Mounts"))).Decode(&mps) + c.Assert(err, checker.IsNil) + c.Assert(mps, checker.HasLen, 1) + c.Assert(mps[0].Destination, checker.Equals, x.expected.Destination) + + if len(x.expected.Source) > 0 { + c.Assert(mps[0].Source, checker.Equals, x.expected.Source) + } + if len(x.expected.Name) > 0 { + c.Assert(mps[0].Name, checker.Equals, x.expected.Name) + } + if len(x.expected.Driver) > 0 { + c.Assert(mps[0].Driver, checker.Equals, x.expected.Driver) + } + c.Assert(mps[0].RW, checker.Equals, x.expected.RW) + c.Assert(mps[0].Type, checker.Equals, x.expected.Type) + c.Assert(mps[0].Mode, checker.Equals, x.expected.Mode) + if len(x.expected.Propagation) > 0 { + c.Assert(mps[0].Propagation, checker.Equals, x.expected.Propagation) + } + + out, _, err := dockerCmdWithError("start", "-a", id) + if (x.cfg.Type != "volume" || (x.cfg.VolumeOptions != nil && x.cfg.VolumeOptions.NoCopy)) && testEnv.DaemonPlatform() != "windows" { + c.Assert(err, checker.NotNil, check.Commentf("%s\n%v", out, mps[0])) + } else { + c.Assert(err, checker.IsNil, check.Commentf("%s\n%v", out, mps[0])) + } + + dockerCmd(c, "rm", "-fv", id) + if x.cfg.Type == "volume" && len(x.cfg.Source) > 0 { + // This should still exist even though we removed the container + dockerCmd(c, "volume", "inspect", mps[0].Name) + } else { + // This should be removed automatically when we removed the container + out, _, err := dockerCmdWithError("volume", "inspect", mps[0].Name) + c.Assert(err, checker.NotNil, check.Commentf(out)) + } + } +} + +func (s *DockerSuite) TestContainersAPICreateMountsTmpfs(c *check.C) { + testRequires(c, DaemonIsLinux) + type testCase struct { + cfg map[string]interface{} + expectedOptions []string + } + target := "/foo" + cases := []testCase{ + { + cfg: map[string]interface{}{ + "Type": "tmpfs", + "Target": target}, + expectedOptions: []string{"rw", "nosuid", "nodev", "noexec", "relatime"}, + }, + { + cfg: map[string]interface{}{ + "Type": "tmpfs", + "Target": target, + "TmpfsOptions": map[string]interface{}{ + "SizeBytes": 4096 * 1024, "Mode": 0700}}, + expectedOptions: []string{"rw", "nosuid", "nodev", "noexec", "relatime", "size=4096k", "mode=700"}, + }, + } + + for i, x := range cases { + cName := fmt.Sprintf("test-tmpfs-%d", i) + data := map[string]interface{}{ + "Image": "busybox", + "Cmd": []string{"/bin/sh", "-c", + fmt.Sprintf("mount | grep 'tmpfs on %s'", target)}, + "HostConfig": map[string]interface{}{"Mounts": []map[string]interface{}{x.cfg}}, + } + status, resp, err := request.SockRequest("POST", "/containers/create?name="+cName, data, daemonHost()) + c.Assert(err, checker.IsNil, check.Commentf(string(resp))) + c.Assert(status, checker.Equals, http.StatusCreated, check.Commentf(string(resp))) + out, _ := dockerCmd(c, "start", "-a", cName) + for _, option := range x.expectedOptions { + c.Assert(out, checker.Contains, option) + } + } +} + +// Regression test for #33334 +// Makes sure that when a container which has a custom stop signal + restart=always +// gets killed (with SIGKILL) by the kill API, that the restart policy is cancelled. +func (s *DockerSuite) TestContainerKillCustomStopSignal(c *check.C) { + id := strings.TrimSpace(runSleepingContainer(c, "--stop-signal=SIGTERM", "--restart=always")) + res, _, err := request.Post("/containers/" + id + "/kill") + c.Assert(err, checker.IsNil) + defer res.Body.Close() + + b, err := ioutil.ReadAll(res.Body) + c.Assert(res.StatusCode, checker.Equals, http.StatusNoContent, check.Commentf(string(b))) + err = waitInspect(id, "{{.State.Running}} {{.State.Restarting}}", "false false", 30*time.Second) + c.Assert(err, checker.IsNil) +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_api_create_test.go b/vendor/github.com/moby/moby/integration-cli/docker_api_create_test.go new file mode 100644 index 000000000..e404b6cf5 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_api_create_test.go @@ -0,0 +1,171 @@ +package main + +import ( + "fmt" + "net/http" + "time" + + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/request" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestAPICreateWithNotExistImage(c *check.C) { + name := "test" + config := map[string]interface{}{ + "Image": "test456:v1", + "Volumes": map[string]struct{}{"/tmp": {}}, + } + + status, body, err := request.SockRequest("POST", "/containers/create?name="+name, config, daemonHost()) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusNotFound) + expected := "No such image: test456:v1" + c.Assert(getErrorMessage(c, body), checker.Contains, expected) + + config2 := map[string]interface{}{ + "Image": "test456", + "Volumes": map[string]struct{}{"/tmp": {}}, + } + + status, body, err = request.SockRequest("POST", "/containers/create?name="+name, config2, daemonHost()) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusNotFound) + expected = "No such image: test456:latest" + c.Assert(getErrorMessage(c, body), checker.Equals, expected) + + config3 := map[string]interface{}{ + "Image": "sha256:0cb40641836c461bc97c793971d84d758371ed682042457523e4ae701efeaaaa", + } + + status, body, err = request.SockRequest("POST", "/containers/create?name="+name, config3, daemonHost()) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusNotFound) + expected = "No such image: sha256:0cb40641836c461bc97c793971d84d758371ed682042457523e4ae701efeaaaa" + c.Assert(getErrorMessage(c, body), checker.Equals, expected) + +} + +// Test for #25099 +func (s *DockerSuite) TestAPICreateEmptyEnv(c *check.C) { + name := "test1" + config := map[string]interface{}{ + "Image": "busybox", + "Env": []string{"", "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"}, + "Cmd": []string{"true"}, + } + + status, body, err := request.SockRequest("POST", "/containers/create?name="+name, config, daemonHost()) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusInternalServerError) + expected := "invalid environment variable:" + c.Assert(getErrorMessage(c, body), checker.Contains, expected) + + name = "test2" + config = map[string]interface{}{ + "Image": "busybox", + "Env": []string{"=", "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"}, + "Cmd": []string{"true"}, + } + status, body, err = request.SockRequest("POST", "/containers/create?name="+name, config, daemonHost()) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusInternalServerError) + expected = "invalid environment variable: =" + c.Assert(getErrorMessage(c, body), checker.Contains, expected) + + name = "test3" + config = map[string]interface{}{ + "Image": "busybox", + "Env": []string{"=foo", "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"}, + "Cmd": []string{"true"}, + } + status, body, err = request.SockRequest("POST", "/containers/create?name="+name, config, daemonHost()) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusInternalServerError) + expected = "invalid environment variable: =foo" + c.Assert(getErrorMessage(c, body), checker.Contains, expected) +} + +func (s *DockerSuite) TestAPICreateWithInvalidHealthcheckParams(c *check.C) { + // test invalid Interval in Healthcheck: less than 0s + name := "test1" + config := map[string]interface{}{ + "Image": "busybox", + "Healthcheck": map[string]interface{}{ + "Interval": -10 * time.Millisecond, + "Timeout": time.Second, + "Retries": int(1000), + }, + } + + status, body, err := request.SockRequest("POST", "/containers/create?name="+name, config, daemonHost()) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusInternalServerError) + expected := fmt.Sprintf("Interval in Healthcheck cannot be less than %s", container.MinimumDuration) + c.Assert(getErrorMessage(c, body), checker.Contains, expected) + + // test invalid Interval in Healthcheck: larger than 0s but less than 1ms + name = "test2" + config = map[string]interface{}{ + "Image": "busybox", + "Healthcheck": map[string]interface{}{ + "Interval": 500 * time.Microsecond, + "Timeout": time.Second, + "Retries": int(1000), + }, + } + status, body, err = request.SockRequest("POST", "/containers/create?name="+name, config, daemonHost()) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusInternalServerError) + c.Assert(getErrorMessage(c, body), checker.Contains, expected) + + // test invalid Timeout in Healthcheck: less than 1ms + name = "test3" + config = map[string]interface{}{ + "Image": "busybox", + "Healthcheck": map[string]interface{}{ + "Interval": time.Second, + "Timeout": -100 * time.Millisecond, + "Retries": int(1000), + }, + } + status, body, err = request.SockRequest("POST", "/containers/create?name="+name, config, daemonHost()) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusInternalServerError) + expected = fmt.Sprintf("Timeout in Healthcheck cannot be less than %s", container.MinimumDuration) + c.Assert(getErrorMessage(c, body), checker.Contains, expected) + + // test invalid Retries in Healthcheck: less than 0 + name = "test4" + config = map[string]interface{}{ + "Image": "busybox", + "Healthcheck": map[string]interface{}{ + "Interval": time.Second, + "Timeout": time.Second, + "Retries": int(-10), + }, + } + status, body, err = request.SockRequest("POST", "/containers/create?name="+name, config, daemonHost()) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusInternalServerError) + expected = "Retries in Healthcheck cannot be negative" + c.Assert(getErrorMessage(c, body), checker.Contains, expected) + + // test invalid StartPeriod in Healthcheck: not 0 and less than 1ms + name = "test3" + config = map[string]interface{}{ + "Image": "busybox", + "Healthcheck": map[string]interface{}{ + "Interval": time.Second, + "Timeout": time.Second, + "Retries": int(1000), + "StartPeriod": 100 * time.Microsecond, + }, + } + status, body, err = request.SockRequest("POST", "/containers/create?name="+name, config, daemonHost()) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusInternalServerError) + expected = fmt.Sprintf("StartPeriod in Healthcheck cannot be less than %s", container.MinimumDuration) + c.Assert(getErrorMessage(c, body), checker.Contains, expected) +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_api_events_test.go b/vendor/github.com/moby/moby/integration-cli/docker_api_events_test.go new file mode 100644 index 000000000..a95422f58 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_api_events_test.go @@ -0,0 +1,74 @@ +package main + +import ( + "encoding/json" + "io" + "net/http" + "net/url" + "strconv" + "strings" + "time" + + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/request" + "github.com/docker/docker/pkg/jsonmessage" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestEventsAPIEmptyOutput(c *check.C) { + type apiResp struct { + resp *http.Response + err error + } + chResp := make(chan *apiResp) + go func() { + resp, body, err := request.Get("/events") + body.Close() + chResp <- &apiResp{resp, err} + }() + + select { + case r := <-chResp: + c.Assert(r.err, checker.IsNil) + c.Assert(r.resp.StatusCode, checker.Equals, http.StatusOK) + case <-time.After(3 * time.Second): + c.Fatal("timeout waiting for events api to respond, should have responded immediately") + } +} + +func (s *DockerSuite) TestEventsAPIBackwardsCompatible(c *check.C) { + since := daemonTime(c).Unix() + ts := strconv.FormatInt(since, 10) + + out := runSleepingContainer(c, "--name=foo", "-d") + containerID := strings.TrimSpace(out) + c.Assert(waitRun(containerID), checker.IsNil) + + q := url.Values{} + q.Set("since", ts) + + _, body, err := request.Get("/events?" + q.Encode()) + c.Assert(err, checker.IsNil) + defer body.Close() + + dec := json.NewDecoder(body) + var containerCreateEvent *jsonmessage.JSONMessage + for { + var event jsonmessage.JSONMessage + if err := dec.Decode(&event); err != nil { + if err == io.EOF { + break + } + c.Fatal(err) + } + if event.Status == "create" && event.ID == containerID { + containerCreateEvent = &event + break + } + } + + c.Assert(containerCreateEvent, checker.Not(checker.IsNil)) + c.Assert(containerCreateEvent.Status, checker.Equals, "create") + c.Assert(containerCreateEvent.ID, checker.Equals, containerID) + c.Assert(containerCreateEvent.From, checker.Equals, "busybox") +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_api_exec_resize_test.go b/vendor/github.com/moby/moby/integration-cli/docker_api_exec_resize_test.go new file mode 100644 index 000000000..f43bc2de0 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_api_exec_resize_test.go @@ -0,0 +1,104 @@ +package main + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "net/http" + "strings" + "sync" + + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/request" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestExecResizeAPIHeightWidthNoInt(c *check.C) { + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "-d", "busybox", "top") + cleanedContainerID := strings.TrimSpace(out) + + endpoint := "/exec/" + cleanedContainerID + "/resize?h=foo&w=bar" + status, _, err := request.SockRequest("POST", endpoint, nil, daemonHost()) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusInternalServerError) +} + +// Part of #14845 +func (s *DockerSuite) TestExecResizeImmediatelyAfterExecStart(c *check.C) { + name := "exec_resize_test" + dockerCmd(c, "run", "-d", "-i", "-t", "--name", name, "--restart", "always", "busybox", "/bin/sh") + + testExecResize := func() error { + data := map[string]interface{}{ + "AttachStdin": true, + "Cmd": []string{"/bin/sh"}, + } + uri := fmt.Sprintf("/containers/%s/exec", name) + status, body, err := request.SockRequest("POST", uri, data, daemonHost()) + if err != nil { + return err + } + if status != http.StatusCreated { + return fmt.Errorf("POST %s is expected to return %d, got %d", uri, http.StatusCreated, status) + } + + out := map[string]string{} + err = json.Unmarshal(body, &out) + if err != nil { + return fmt.Errorf("ExecCreate returned invalid json. Error: %q", err.Error()) + } + + execID := out["Id"] + if len(execID) < 1 { + return fmt.Errorf("ExecCreate got invalid execID") + } + + payload := bytes.NewBufferString(`{"Tty":true}`) + conn, _, err := request.SockRequestHijack("POST", fmt.Sprintf("/exec/%s/start", execID), payload, "application/json", daemonHost()) + if err != nil { + return fmt.Errorf("Failed to start the exec: %q", err.Error()) + } + defer conn.Close() + + _, rc, err := request.Post(fmt.Sprintf("/exec/%s/resize?h=24&w=80", execID), request.ContentType("text/plain")) + // It's probably a panic of the daemon if io.ErrUnexpectedEOF is returned. + if err == io.ErrUnexpectedEOF { + return fmt.Errorf("The daemon might have crashed.") + } + + if err == nil { + rc.Close() + } + + // We only interested in the io.ErrUnexpectedEOF error, so we return nil otherwise. + return nil + } + + // The panic happens when daemon.ContainerExecStart is called but the + // container.Exec is not called. + // Because the panic is not 100% reproducible, we send the requests concurrently + // to increase the probability that the problem is triggered. + var ( + n = 10 + ch = make(chan error, n) + wg sync.WaitGroup + ) + for i := 0; i < n; i++ { + wg.Add(1) + go func() { + defer wg.Done() + if err := testExecResize(); err != nil { + ch <- err + } + }() + } + + wg.Wait() + select { + case err := <-ch: + c.Fatal(err.Error()) + default: + } +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_api_exec_test.go b/vendor/github.com/moby/moby/integration-cli/docker_api_exec_test.go new file mode 100644 index 000000000..25399343a --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_api_exec_test.go @@ -0,0 +1,249 @@ +// +build !test_no_exec + +package main + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "time" + + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/request" + "github.com/docker/docker/pkg/testutil" + "github.com/go-check/check" +) + +// Regression test for #9414 +func (s *DockerSuite) TestExecAPICreateNoCmd(c *check.C) { + name := "exec_test" + dockerCmd(c, "run", "-d", "-t", "--name", name, "busybox", "/bin/sh") + + status, body, err := request.SockRequest("POST", fmt.Sprintf("/containers/%s/exec", name), map[string]interface{}{"Cmd": nil}, daemonHost()) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusInternalServerError) + + comment := check.Commentf("Expected message when creating exec command with no Cmd specified") + c.Assert(getErrorMessage(c, body), checker.Contains, "No exec command specified", comment) +} + +func (s *DockerSuite) TestExecAPICreateNoValidContentType(c *check.C) { + name := "exec_test" + dockerCmd(c, "run", "-d", "-t", "--name", name, "busybox", "/bin/sh") + + jsonData := bytes.NewBuffer(nil) + if err := json.NewEncoder(jsonData).Encode(map[string]interface{}{"Cmd": nil}); err != nil { + c.Fatalf("Can not encode data to json %s", err) + } + + res, body, err := request.Post(fmt.Sprintf("/containers/%s/exec", name), request.RawContent(ioutil.NopCloser(jsonData)), request.ContentType("test/plain")) + c.Assert(err, checker.IsNil) + c.Assert(res.StatusCode, checker.Equals, http.StatusInternalServerError) + + b, err := testutil.ReadBody(body) + c.Assert(err, checker.IsNil) + + comment := check.Commentf("Expected message when creating exec command with invalid Content-Type specified") + c.Assert(getErrorMessage(c, b), checker.Contains, "Content-Type specified", comment) +} + +func (s *DockerSuite) TestExecAPICreateContainerPaused(c *check.C) { + // Not relevant on Windows as Windows containers cannot be paused + testRequires(c, DaemonIsLinux) + name := "exec_create_test" + dockerCmd(c, "run", "-d", "-t", "--name", name, "busybox", "/bin/sh") + + dockerCmd(c, "pause", name) + status, body, err := request.SockRequest("POST", fmt.Sprintf("/containers/%s/exec", name), map[string]interface{}{"Cmd": []string{"true"}}, daemonHost()) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusConflict) + + comment := check.Commentf("Expected message when creating exec command with Container %s is paused", name) + c.Assert(getErrorMessage(c, body), checker.Contains, "Container "+name+" is paused, unpause the container before exec", comment) +} + +func (s *DockerSuite) TestExecAPIStart(c *check.C) { + testRequires(c, DaemonIsLinux) // Uses pause/unpause but bits may be salvageable to Windows to Windows CI + dockerCmd(c, "run", "-d", "--name", "test", "busybox", "top") + + id := createExec(c, "test") + startExec(c, id, http.StatusOK) + + var execJSON struct{ PID int } + inspectExec(c, id, &execJSON) + c.Assert(execJSON.PID, checker.GreaterThan, 1) + + id = createExec(c, "test") + dockerCmd(c, "stop", "test") + + startExec(c, id, http.StatusNotFound) + + dockerCmd(c, "start", "test") + startExec(c, id, http.StatusNotFound) + + // make sure exec is created before pausing + id = createExec(c, "test") + dockerCmd(c, "pause", "test") + startExec(c, id, http.StatusConflict) + dockerCmd(c, "unpause", "test") + startExec(c, id, http.StatusOK) +} + +func (s *DockerSuite) TestExecAPIStartEnsureHeaders(c *check.C) { + testRequires(c, DaemonIsLinux) + dockerCmd(c, "run", "-d", "--name", "test", "busybox", "top") + + id := createExec(c, "test") + resp, _, err := request.Post(fmt.Sprintf("/exec/%s/start", id), request.RawString(`{"Detach": true}`), request.JSON) + c.Assert(err, checker.IsNil) + c.Assert(resp.Header.Get("Server"), checker.Not(checker.Equals), "") +} + +func (s *DockerSuite) TestExecAPIStartBackwardsCompatible(c *check.C) { + testRequires(c, DaemonIsLinux) // Windows only supports 1.25 or later + runSleepingContainer(c, "-d", "--name", "test") + id := createExec(c, "test") + + resp, body, err := request.Post(fmt.Sprintf("/v1.20/exec/%s/start", id), request.RawString(`{"Detach": true}`), request.ContentType("text/plain")) + c.Assert(err, checker.IsNil) + + b, err := testutil.ReadBody(body) + comment := check.Commentf("response body: %s", b) + c.Assert(err, checker.IsNil, comment) + c.Assert(resp.StatusCode, checker.Equals, http.StatusOK, comment) +} + +// #19362 +func (s *DockerSuite) TestExecAPIStartMultipleTimesError(c *check.C) { + runSleepingContainer(c, "-d", "--name", "test") + execID := createExec(c, "test") + startExec(c, execID, http.StatusOK) + waitForExec(c, execID) + + startExec(c, execID, http.StatusConflict) +} + +// #20638 +func (s *DockerSuite) TestExecAPIStartWithDetach(c *check.C) { + name := "foo" + runSleepingContainer(c, "-d", "-t", "--name", name) + data := map[string]interface{}{ + "cmd": []string{"true"}, + "AttachStdin": true, + } + _, b, err := request.SockRequest("POST", fmt.Sprintf("/containers/%s/exec", name), data, daemonHost()) + c.Assert(err, checker.IsNil, check.Commentf(string(b))) + + createResp := struct { + ID string `json:"Id"` + }{} + c.Assert(json.Unmarshal(b, &createResp), checker.IsNil, check.Commentf(string(b))) + + _, body, err := request.Post(fmt.Sprintf("/exec/%s/start", createResp.ID), request.RawString(`{"Detach": true}`), request.JSON) + c.Assert(err, checker.IsNil) + + b, err = testutil.ReadBody(body) + comment := check.Commentf("response body: %s", b) + c.Assert(err, checker.IsNil, comment) + + resp, _, err := request.Get("/_ping") + c.Assert(err, checker.IsNil) + if resp.StatusCode != http.StatusOK { + c.Fatal("daemon is down, it should alive") + } +} + +// #30311 +func (s *DockerSuite) TestExecAPIStartValidCommand(c *check.C) { + name := "exec_test" + dockerCmd(c, "run", "-d", "-t", "--name", name, "busybox", "/bin/sh") + + id := createExecCmd(c, name, "true") + startExec(c, id, http.StatusOK) + + waitForExec(c, id) + + var inspectJSON struct{ ExecIDs []string } + inspectContainer(c, name, &inspectJSON) + + c.Assert(inspectJSON.ExecIDs, checker.IsNil) +} + +// #30311 +func (s *DockerSuite) TestExecAPIStartInvalidCommand(c *check.C) { + name := "exec_test" + dockerCmd(c, "run", "-d", "-t", "--name", name, "busybox", "/bin/sh") + + id := createExecCmd(c, name, "invalid") + startExec(c, id, http.StatusNotFound) + waitForExec(c, id) + + var inspectJSON struct{ ExecIDs []string } + inspectContainer(c, name, &inspectJSON) + + c.Assert(inspectJSON.ExecIDs, checker.IsNil) +} + +func createExec(c *check.C, name string) string { + return createExecCmd(c, name, "true") +} + +func createExecCmd(c *check.C, name string, cmd string) string { + _, reader, err := request.Post(fmt.Sprintf("/containers/%s/exec", name), request.JSONBody(map[string]interface{}{"Cmd": []string{cmd}})) + c.Assert(err, checker.IsNil) + b, err := ioutil.ReadAll(reader) + c.Assert(err, checker.IsNil) + defer reader.Close() + createResp := struct { + ID string `json:"Id"` + }{} + c.Assert(json.Unmarshal(b, &createResp), checker.IsNil, check.Commentf(string(b))) + return createResp.ID +} + +func startExec(c *check.C, id string, code int) { + resp, body, err := request.Post(fmt.Sprintf("/exec/%s/start", id), request.RawString(`{"Detach": true}`), request.JSON) + c.Assert(err, checker.IsNil) + + b, err := testutil.ReadBody(body) + comment := check.Commentf("response body: %s", b) + c.Assert(err, checker.IsNil, comment) + c.Assert(resp.StatusCode, checker.Equals, code, comment) +} + +func inspectExec(c *check.C, id string, out interface{}) { + resp, body, err := request.Get(fmt.Sprintf("/exec/%s/json", id)) + c.Assert(err, checker.IsNil) + defer body.Close() + c.Assert(resp.StatusCode, checker.Equals, http.StatusOK) + err = json.NewDecoder(body).Decode(out) + c.Assert(err, checker.IsNil) +} + +func waitForExec(c *check.C, id string) { + timeout := time.After(60 * time.Second) + var execJSON struct{ Running bool } + for { + select { + case <-timeout: + c.Fatal("timeout waiting for exec to start") + default: + } + + inspectExec(c, id, &execJSON) + if !execJSON.Running { + break + } + } +} + +func inspectContainer(c *check.C, id string, out interface{}) { + resp, body, err := request.Get(fmt.Sprintf("/containers/%s/json", id)) + c.Assert(err, checker.IsNil) + defer body.Close() + c.Assert(resp.StatusCode, checker.Equals, http.StatusOK) + err = json.NewDecoder(body).Decode(out) + c.Assert(err, checker.IsNil) +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_api_images_test.go b/vendor/github.com/moby/moby/integration-cli/docker_api_images_test.go new file mode 100644 index 000000000..d44b307fa --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_api_images_test.go @@ -0,0 +1,192 @@ +package main + +import ( + "encoding/json" + "net/http" + "net/http/httptest" + "net/url" + "strings" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/image" + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/cli" + "github.com/docker/docker/integration-cli/cli/build" + "github.com/docker/docker/integration-cli/request" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestAPIImagesFilter(c *check.C) { + name := "utest:tag1" + name2 := "utest/docker:tag2" + name3 := "utest:5000/docker:tag3" + for _, n := range []string{name, name2, name3} { + dockerCmd(c, "tag", "busybox", n) + } + type image types.ImageSummary + getImages := func(filter string) []image { + v := url.Values{} + v.Set("filter", filter) + status, b, err := request.SockRequest("GET", "/images/json?"+v.Encode(), nil, daemonHost()) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusOK) + + var images []image + err = json.Unmarshal(b, &images) + c.Assert(err, checker.IsNil) + + return images + } + + //incorrect number of matches returned + images := getImages("utest*/*") + c.Assert(images[0].RepoTags, checker.HasLen, 2) + + images = getImages("utest") + c.Assert(images[0].RepoTags, checker.HasLen, 1) + + images = getImages("utest*") + c.Assert(images[0].RepoTags, checker.HasLen, 1) + + images = getImages("*5000*/*") + c.Assert(images[0].RepoTags, checker.HasLen, 1) +} + +func (s *DockerSuite) TestAPIImagesSaveAndLoad(c *check.C) { + // TODO Windows to Windows CI: Investigate further why this test fails. + testRequires(c, Network) + testRequires(c, DaemonIsLinux) + buildImageSuccessfully(c, "saveandload", build.WithDockerfile("FROM busybox\nENV FOO bar")) + id := getIDByName(c, "saveandload") + + res, body, err := request.Get("/images/" + id + "/get") + c.Assert(err, checker.IsNil) + defer body.Close() + c.Assert(res.StatusCode, checker.Equals, http.StatusOK) + + dockerCmd(c, "rmi", id) + + res, loadBody, err := request.Post("/images/load", request.RawContent(body), request.ContentType("application/x-tar")) + c.Assert(err, checker.IsNil) + defer loadBody.Close() + c.Assert(res.StatusCode, checker.Equals, http.StatusOK) + + inspectOut := cli.InspectCmd(c, id, cli.Format(".Id")).Combined() + c.Assert(strings.TrimSpace(string(inspectOut)), checker.Equals, id, check.Commentf("load did not work properly")) +} + +func (s *DockerSuite) TestAPIImagesDelete(c *check.C) { + if testEnv.DaemonPlatform() != "windows" { + testRequires(c, Network) + } + name := "test-api-images-delete" + buildImageSuccessfully(c, name, build.WithDockerfile("FROM busybox\nENV FOO bar")) + id := getIDByName(c, name) + + dockerCmd(c, "tag", name, "test:tag1") + + status, _, err := request.SockRequest("DELETE", "/images/"+id, nil, daemonHost()) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusConflict) + + status, _, err = request.SockRequest("DELETE", "/images/test:noexist", nil, daemonHost()) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusNotFound) //Status Codes:404 – no such image + + status, _, err = request.SockRequest("DELETE", "/images/test:tag1", nil, daemonHost()) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusOK) +} + +func (s *DockerSuite) TestAPIImagesHistory(c *check.C) { + if testEnv.DaemonPlatform() != "windows" { + testRequires(c, Network) + } + name := "test-api-images-history" + buildImageSuccessfully(c, name, build.WithDockerfile("FROM busybox\nENV FOO bar")) + id := getIDByName(c, name) + + status, body, err := request.SockRequest("GET", "/images/"+id+"/history", nil, daemonHost()) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusOK) + + var historydata []image.HistoryResponseItem + err = json.Unmarshal(body, &historydata) + c.Assert(err, checker.IsNil, check.Commentf("Error on unmarshal")) + + c.Assert(historydata, checker.Not(checker.HasLen), 0) + c.Assert(historydata[0].Tags[0], checker.Equals, "test-api-images-history:latest") +} + +func (s *DockerSuite) TestAPIImagesImportBadSrc(c *check.C) { + testRequires(c, Network) + + server := httptest.NewServer(http.NewServeMux()) + defer server.Close() + + tt := []struct { + statusExp int + fromSrc string + }{ + {http.StatusNotFound, server.URL + "/nofile.tar"}, + {http.StatusNotFound, strings.TrimPrefix(server.URL, "http://") + "/nofile.tar"}, + {http.StatusNotFound, strings.TrimPrefix(server.URL, "http://") + "%2Fdata%2Ffile.tar"}, + {http.StatusInternalServerError, "%2Fdata%2Ffile.tar"}, + } + + for _, te := range tt { + res, b, err := request.SockRequestRaw("POST", strings.Join([]string{"/images/create?fromSrc=", te.fromSrc}, ""), nil, "application/json", daemonHost()) + c.Assert(err, check.IsNil) + b.Close() + c.Assert(res.StatusCode, checker.Equals, te.statusExp) + c.Assert(res.Header.Get("Content-Type"), checker.Equals, "application/json") + } + +} + +// #14846 +func (s *DockerSuite) TestAPIImagesSearchJSONContentType(c *check.C) { + testRequires(c, Network) + + res, b, err := request.Get("/images/search?term=test", request.JSON) + c.Assert(err, check.IsNil) + b.Close() + c.Assert(res.StatusCode, checker.Equals, http.StatusOK) + c.Assert(res.Header.Get("Content-Type"), checker.Equals, "application/json") +} + +// Test case for 30027: image size reported as -1 in v1.12 client against v1.13 daemon. +// This test checks to make sure both v1.12 and v1.13 client against v1.13 daemon get correct `Size` after the fix. +func (s *DockerSuite) TestAPIImagesSizeCompatibility(c *check.C) { + status, b, err := request.SockRequest("GET", "/images/json", nil, daemonHost()) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusOK) + var images []types.ImageSummary + err = json.Unmarshal(b, &images) + c.Assert(err, checker.IsNil) + c.Assert(len(images), checker.Not(checker.Equals), 0) + for _, image := range images { + c.Assert(image.Size, checker.Not(checker.Equals), int64(-1)) + } + + type v124Image struct { + ID string `json:"Id"` + ParentID string `json:"ParentId"` + RepoTags []string + RepoDigests []string + Created int64 + Size int64 + VirtualSize int64 + Labels map[string]string + } + status, b, err = request.SockRequest("GET", "/v1.24/images/json", nil, daemonHost()) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusOK) + var v124Images []v124Image + err = json.Unmarshal(b, &v124Images) + c.Assert(err, checker.IsNil) + c.Assert(len(v124Images), checker.Not(checker.Equals), 0) + for _, image := range v124Images { + c.Assert(image.Size, checker.Not(checker.Equals), int64(-1)) + } +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_api_info_test.go b/vendor/github.com/moby/moby/integration-cli/docker_api_info_test.go new file mode 100644 index 000000000..9cb873d60 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_api_info_test.go @@ -0,0 +1,76 @@ +package main + +import ( + "net/http" + + "encoding/json" + "github.com/docker/docker/api/types" + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/request" + "github.com/docker/docker/pkg/testutil" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestInfoAPI(c *check.C) { + endpoint := "/info" + + status, body, err := request.SockRequest("GET", endpoint, nil, daemonHost()) + c.Assert(status, checker.Equals, http.StatusOK) + c.Assert(err, checker.IsNil) + + // always shown fields + stringsToCheck := []string{ + "ID", + "Containers", + "ContainersRunning", + "ContainersPaused", + "ContainersStopped", + "Images", + "LoggingDriver", + "OperatingSystem", + "NCPU", + "OSType", + "Architecture", + "MemTotal", + "KernelVersion", + "Driver", + "ServerVersion", + "SecurityOptions"} + + out := string(body) + for _, linePrefix := range stringsToCheck { + c.Assert(out, checker.Contains, linePrefix) + } +} + +// TestInfoAPIRuncCommit tests that dockerd is able to obtain RunC version +// information, and that the version matches the expected version +func (s *DockerSuite) TestInfoAPIRuncCommit(c *check.C) { + testRequires(c, DaemonIsLinux) // Windows does not have RunC version information + + res, body, err := request.Get("/v1.30/info") + c.Assert(res.StatusCode, checker.Equals, http.StatusOK) + c.Assert(err, checker.IsNil) + + b, err := testutil.ReadBody(body) + c.Assert(err, checker.IsNil) + + var i types.Info + + c.Assert(json.Unmarshal(b, &i), checker.IsNil) + c.Assert(i.RuncCommit.ID, checker.Not(checker.Equals), "N/A") + c.Assert(i.RuncCommit.ID, checker.Equals, i.RuncCommit.Expected) +} + +func (s *DockerSuite) TestInfoAPIVersioned(c *check.C) { + testRequires(c, DaemonIsLinux) // Windows only supports 1.25 or later + endpoint := "/v1.20/info" + + status, body, err := request.SockRequest("GET", endpoint, nil, daemonHost()) + c.Assert(status, checker.Equals, http.StatusOK) + c.Assert(err, checker.IsNil) + + out := string(body) + c.Assert(out, checker.Contains, "ExecutionDriver") + c.Assert(out, checker.Contains, "not supported") +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_api_inspect_test.go b/vendor/github.com/moby/moby/integration-cli/docker_api_inspect_test.go new file mode 100644 index 000000000..f2aa883fa --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_api_inspect_test.go @@ -0,0 +1,184 @@ +package main + +import ( + "encoding/json" + "net/http" + "strings" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/versions/v1p20" + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/request" + "github.com/docker/docker/pkg/stringutils" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestInspectAPIContainerResponse(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "busybox", "true") + + cleanedContainerID := strings.TrimSpace(out) + keysBase := []string{"Id", "State", "Created", "Path", "Args", "Config", "Image", "NetworkSettings", + "ResolvConfPath", "HostnamePath", "HostsPath", "LogPath", "Name", "Driver", "MountLabel", "ProcessLabel", "GraphDriver"} + + type acase struct { + version string + keys []string + } + + var cases []acase + + if testEnv.DaemonPlatform() == "windows" { + cases = []acase{ + {"v1.25", append(keysBase, "Mounts")}, + } + + } else { + cases = []acase{ + {"v1.20", append(keysBase, "Mounts")}, + {"v1.19", append(keysBase, "Volumes", "VolumesRW")}, + } + } + + for _, cs := range cases { + body := getInspectBody(c, cs.version, cleanedContainerID) + + var inspectJSON map[string]interface{} + err := json.Unmarshal(body, &inspectJSON) + c.Assert(err, checker.IsNil, check.Commentf("Unable to unmarshal body for version %s", cs.version)) + + for _, key := range cs.keys { + _, ok := inspectJSON[key] + c.Check(ok, checker.True, check.Commentf("%s does not exist in response for version %s", key, cs.version)) + } + + //Issue #6830: type not properly converted to JSON/back + _, ok := inspectJSON["Path"].(bool) + c.Assert(ok, checker.False, check.Commentf("Path of `true` should not be converted to boolean `true` via JSON marshalling")) + } +} + +func (s *DockerSuite) TestInspectAPIContainerVolumeDriverLegacy(c *check.C) { + // No legacy implications for Windows + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "-d", "busybox", "true") + + cleanedContainerID := strings.TrimSpace(out) + + cases := []string{"v1.19", "v1.20"} + for _, version := range cases { + body := getInspectBody(c, version, cleanedContainerID) + + var inspectJSON map[string]interface{} + err := json.Unmarshal(body, &inspectJSON) + c.Assert(err, checker.IsNil, check.Commentf("Unable to unmarshal body for version %s", version)) + + config, ok := inspectJSON["Config"] + c.Assert(ok, checker.True, check.Commentf("Unable to find 'Config'")) + cfg := config.(map[string]interface{}) + _, ok = cfg["VolumeDriver"] + c.Assert(ok, checker.True, check.Commentf("API version %s expected to include VolumeDriver in 'Config'", version)) + } +} + +func (s *DockerSuite) TestInspectAPIContainerVolumeDriver(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "--volume-driver", "local", "busybox", "true") + + cleanedContainerID := strings.TrimSpace(out) + + body := getInspectBody(c, "v1.25", cleanedContainerID) + + var inspectJSON map[string]interface{} + err := json.Unmarshal(body, &inspectJSON) + c.Assert(err, checker.IsNil, check.Commentf("Unable to unmarshal body for version 1.25")) + + config, ok := inspectJSON["Config"] + c.Assert(ok, checker.True, check.Commentf("Unable to find 'Config'")) + cfg := config.(map[string]interface{}) + _, ok = cfg["VolumeDriver"] + c.Assert(ok, checker.False, check.Commentf("API version 1.25 expected to not include VolumeDriver in 'Config'")) + + config, ok = inspectJSON["HostConfig"] + c.Assert(ok, checker.True, check.Commentf("Unable to find 'HostConfig'")) + cfg = config.(map[string]interface{}) + _, ok = cfg["VolumeDriver"] + c.Assert(ok, checker.True, check.Commentf("API version 1.25 expected to include VolumeDriver in 'HostConfig'")) +} + +func (s *DockerSuite) TestInspectAPIImageResponse(c *check.C) { + dockerCmd(c, "tag", "busybox:latest", "busybox:mytag") + + endpoint := "/images/busybox/json" + status, body, err := request.SockRequest("GET", endpoint, nil, daemonHost()) + + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusOK) + + var imageJSON types.ImageInspect + err = json.Unmarshal(body, &imageJSON) + c.Assert(err, checker.IsNil, check.Commentf("Unable to unmarshal body for latest version")) + c.Assert(imageJSON.RepoTags, checker.HasLen, 2) + + c.Assert(stringutils.InSlice(imageJSON.RepoTags, "busybox:latest"), checker.Equals, true) + c.Assert(stringutils.InSlice(imageJSON.RepoTags, "busybox:mytag"), checker.Equals, true) +} + +// #17131, #17139, #17173 +func (s *DockerSuite) TestInspectAPIEmptyFieldsInConfigPre121(c *check.C) { + // Not relevant on Windows + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "-d", "busybox", "true") + + cleanedContainerID := strings.TrimSpace(out) + + cases := []string{"v1.19", "v1.20"} + for _, version := range cases { + body := getInspectBody(c, version, cleanedContainerID) + + var inspectJSON map[string]interface{} + err := json.Unmarshal(body, &inspectJSON) + c.Assert(err, checker.IsNil, check.Commentf("Unable to unmarshal body for version %s", version)) + config, ok := inspectJSON["Config"] + c.Assert(ok, checker.True, check.Commentf("Unable to find 'Config'")) + cfg := config.(map[string]interface{}) + for _, f := range []string{"MacAddress", "NetworkDisabled", "ExposedPorts"} { + _, ok := cfg[f] + c.Check(ok, checker.True, check.Commentf("API version %s expected to include %s in 'Config'", version, f)) + } + } +} + +func (s *DockerSuite) TestInspectAPIBridgeNetworkSettings120(c *check.C) { + // Not relevant on Windows, and besides it doesn't have any bridge network settings + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "-d", "busybox", "top") + containerID := strings.TrimSpace(out) + waitRun(containerID) + + body := getInspectBody(c, "v1.20", containerID) + + var inspectJSON v1p20.ContainerJSON + err := json.Unmarshal(body, &inspectJSON) + c.Assert(err, checker.IsNil) + + settings := inspectJSON.NetworkSettings + c.Assert(settings.IPAddress, checker.Not(checker.HasLen), 0) +} + +func (s *DockerSuite) TestInspectAPIBridgeNetworkSettings121(c *check.C) { + // Windows doesn't have any bridge network settings + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "-d", "busybox", "top") + containerID := strings.TrimSpace(out) + waitRun(containerID) + + body := getInspectBody(c, "v1.21", containerID) + + var inspectJSON types.ContainerJSON + err := json.Unmarshal(body, &inspectJSON) + c.Assert(err, checker.IsNil) + + settings := inspectJSON.NetworkSettings + c.Assert(settings.IPAddress, checker.Not(checker.HasLen), 0) + c.Assert(settings.Networks["bridge"], checker.Not(checker.IsNil)) + c.Assert(settings.IPAddress, checker.Equals, settings.Networks["bridge"].IPAddress) +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_api_inspect_unix_test.go b/vendor/github.com/moby/moby/integration-cli/docker_api_inspect_unix_test.go new file mode 100644 index 000000000..f7731f3d9 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_api_inspect_unix_test.go @@ -0,0 +1,36 @@ +// +build !windows + +package main + +import ( + "encoding/json" + "fmt" + "net/http" + + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/request" + "github.com/go-check/check" +) + +// #16665 +func (s *DockerSuite) TestInspectAPICpusetInConfigPre120(c *check.C) { + testRequires(c, DaemonIsLinux) + testRequires(c, cgroupCpuset) + + name := "cpusetinconfig-pre120" + dockerCmd(c, "run", "--name", name, "--cpuset-cpus", "0", "busybox", "true") + + status, body, err := request.SockRequest("GET", fmt.Sprintf("/v1.19/containers/%s/json", name), nil, daemonHost()) + c.Assert(status, check.Equals, http.StatusOK) + c.Assert(err, check.IsNil) + + var inspectJSON map[string]interface{} + err = json.Unmarshal(body, &inspectJSON) + c.Assert(err, checker.IsNil, check.Commentf("unable to unmarshal body for version 1.19")) + + config, ok := inspectJSON["Config"] + c.Assert(ok, checker.True, check.Commentf("Unable to find 'Config'")) + cfg := config.(map[string]interface{}) + _, ok = cfg["Cpuset"] + c.Assert(ok, checker.True, check.Commentf("API version 1.19 expected to include Cpuset in 'Config'")) +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_api_logs_test.go b/vendor/github.com/moby/moby/integration-cli/docker_api_logs_test.go new file mode 100644 index 000000000..5e953b79d --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_api_logs_test.go @@ -0,0 +1,84 @@ +package main + +import ( + "bufio" + "fmt" + "net/http" + "strings" + "time" + + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/request" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestLogsAPIWithStdout(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "-t", "busybox", "/bin/sh", "-c", "while true; do echo hello; sleep 1; done") + id := strings.TrimSpace(out) + c.Assert(waitRun(id), checker.IsNil) + + type logOut struct { + out string + err error + } + + chLog := make(chan logOut) + res, body, err := request.Get(fmt.Sprintf("/containers/%s/logs?follow=1&stdout=1×tamps=1", id)) + c.Assert(err, checker.IsNil) + c.Assert(res.StatusCode, checker.Equals, http.StatusOK) + + go func() { + defer body.Close() + out, err := bufio.NewReader(body).ReadString('\n') + if err != nil { + chLog <- logOut{"", err} + return + } + chLog <- logOut{strings.TrimSpace(out), err} + }() + + select { + case l := <-chLog: + c.Assert(l.err, checker.IsNil) + if !strings.HasSuffix(l.out, "hello") { + c.Fatalf("expected log output to container 'hello', but it does not") + } + case <-time.After(30 * time.Second): + c.Fatal("timeout waiting for logs to exit") + } +} + +func (s *DockerSuite) TestLogsAPINoStdoutNorStderr(c *check.C) { + name := "logs_test" + dockerCmd(c, "run", "-d", "-t", "--name", name, "busybox", "/bin/sh") + + status, body, err := request.SockRequest("GET", fmt.Sprintf("/containers/%s/logs", name), nil, daemonHost()) + c.Assert(status, checker.Equals, http.StatusBadRequest) + c.Assert(err, checker.IsNil) + + expected := "Bad parameters: you must choose at least one stream" + c.Assert(getErrorMessage(c, body), checker.Contains, expected) +} + +// Regression test for #12704 +func (s *DockerSuite) TestLogsAPIFollowEmptyOutput(c *check.C) { + name := "logs_test" + t0 := time.Now() + dockerCmd(c, "run", "-d", "-t", "--name", name, "busybox", "sleep", "10") + + _, body, err := request.Get(fmt.Sprintf("/containers/%s/logs?follow=1&stdout=1&stderr=1&tail=all", name)) + t1 := time.Now() + c.Assert(err, checker.IsNil) + body.Close() + elapsed := t1.Sub(t0).Seconds() + if elapsed > 20.0 { + c.Fatalf("HTTP response was not immediate (elapsed %.1fs)", elapsed) + } +} + +func (s *DockerSuite) TestLogsAPIContainerNotFound(c *check.C) { + name := "nonExistentContainer" + resp, _, err := request.Get(fmt.Sprintf("/containers/%s/logs?follow=1&stdout=1&stderr=1&tail=all", name)) + c.Assert(err, checker.IsNil) + c.Assert(resp.StatusCode, checker.Equals, http.StatusNotFound) +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_api_network_test.go b/vendor/github.com/moby/moby/integration-cli/docker_api_network_test.go new file mode 100644 index 000000000..129ec7ea6 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_api_network_test.go @@ -0,0 +1,357 @@ +package main + +import ( + "encoding/json" + "fmt" + "net" + "net/http" + "net/url" + "strings" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/network" + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/request" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestAPINetworkGetDefaults(c *check.C) { + testRequires(c, DaemonIsLinux) + // By default docker daemon creates 3 networks. check if they are present + defaults := []string{"bridge", "host", "none"} + for _, nn := range defaults { + c.Assert(isNetworkAvailable(c, nn), checker.Equals, true) + } +} + +func (s *DockerSuite) TestAPINetworkCreateDelete(c *check.C) { + testRequires(c, DaemonIsLinux) + // Create a network + name := "testnetwork" + config := types.NetworkCreateRequest{ + Name: name, + NetworkCreate: types.NetworkCreate{ + CheckDuplicate: true, + }, + } + id := createNetwork(c, config, true) + c.Assert(isNetworkAvailable(c, name), checker.Equals, true) + + // delete the network and make sure it is deleted + deleteNetwork(c, id, true) + c.Assert(isNetworkAvailable(c, name), checker.Equals, false) +} + +func (s *DockerSuite) TestAPINetworkCreateCheckDuplicate(c *check.C) { + testRequires(c, DaemonIsLinux) + name := "testcheckduplicate" + configOnCheck := types.NetworkCreateRequest{ + Name: name, + NetworkCreate: types.NetworkCreate{ + CheckDuplicate: true, + }, + } + configNotCheck := types.NetworkCreateRequest{ + Name: name, + NetworkCreate: types.NetworkCreate{ + CheckDuplicate: false, + }, + } + + // Creating a new network first + createNetwork(c, configOnCheck, true) + c.Assert(isNetworkAvailable(c, name), checker.Equals, true) + + // Creating another network with same name and CheckDuplicate must fail + createNetwork(c, configOnCheck, false) + + // Creating another network with same name and not CheckDuplicate must succeed + createNetwork(c, configNotCheck, true) +} + +func (s *DockerSuite) TestAPINetworkFilter(c *check.C) { + testRequires(c, DaemonIsLinux) + nr := getNetworkResource(c, getNetworkIDByName(c, "bridge")) + c.Assert(nr.Name, checker.Equals, "bridge") +} + +func (s *DockerSuite) TestAPINetworkInspect(c *check.C) { + testRequires(c, DaemonIsLinux) + // Inspect default bridge network + nr := getNetworkResource(c, "bridge") + c.Assert(nr.Name, checker.Equals, "bridge") + + // run a container and attach it to the default bridge network + out, _ := dockerCmd(c, "run", "-d", "--name", "test", "busybox", "top") + containerID := strings.TrimSpace(out) + containerIP := findContainerIP(c, "test", "bridge") + + // inspect default bridge network again and make sure the container is connected + nr = getNetworkResource(c, nr.ID) + c.Assert(nr.Driver, checker.Equals, "bridge") + c.Assert(nr.Scope, checker.Equals, "local") + c.Assert(nr.Internal, checker.Equals, false) + c.Assert(nr.EnableIPv6, checker.Equals, false) + c.Assert(nr.IPAM.Driver, checker.Equals, "default") + c.Assert(len(nr.Containers), checker.Equals, 1) + c.Assert(nr.Containers[containerID], checker.NotNil) + + ip, _, err := net.ParseCIDR(nr.Containers[containerID].IPv4Address) + c.Assert(err, checker.IsNil) + c.Assert(ip.String(), checker.Equals, containerIP) + + // IPAM configuration inspect + ipam := &network.IPAM{ + Driver: "default", + Config: []network.IPAMConfig{{Subnet: "172.28.0.0/16", IPRange: "172.28.5.0/24", Gateway: "172.28.5.254"}}, + } + config := types.NetworkCreateRequest{ + Name: "br0", + NetworkCreate: types.NetworkCreate{ + Driver: "bridge", + IPAM: ipam, + Options: map[string]string{"foo": "bar", "opts": "dopts"}, + }, + } + id0 := createNetwork(c, config, true) + c.Assert(isNetworkAvailable(c, "br0"), checker.Equals, true) + + nr = getNetworkResource(c, id0) + c.Assert(len(nr.IPAM.Config), checker.Equals, 1) + c.Assert(nr.IPAM.Config[0].Subnet, checker.Equals, "172.28.0.0/16") + c.Assert(nr.IPAM.Config[0].IPRange, checker.Equals, "172.28.5.0/24") + c.Assert(nr.IPAM.Config[0].Gateway, checker.Equals, "172.28.5.254") + c.Assert(nr.Options["foo"], checker.Equals, "bar") + c.Assert(nr.Options["opts"], checker.Equals, "dopts") + + // delete the network and make sure it is deleted + deleteNetwork(c, id0, true) + c.Assert(isNetworkAvailable(c, "br0"), checker.Equals, false) +} + +func (s *DockerSuite) TestAPINetworkConnectDisconnect(c *check.C) { + testRequires(c, DaemonIsLinux) + // Create test network + name := "testnetwork" + config := types.NetworkCreateRequest{ + Name: name, + } + id := createNetwork(c, config, true) + nr := getNetworkResource(c, id) + c.Assert(nr.Name, checker.Equals, name) + c.Assert(nr.ID, checker.Equals, id) + c.Assert(len(nr.Containers), checker.Equals, 0) + + // run a container + out, _ := dockerCmd(c, "run", "-d", "--name", "test", "busybox", "top") + containerID := strings.TrimSpace(out) + + // connect the container to the test network + connectNetwork(c, nr.ID, containerID) + + // inspect the network to make sure container is connected + nr = getNetworkResource(c, nr.ID) + c.Assert(len(nr.Containers), checker.Equals, 1) + c.Assert(nr.Containers[containerID], checker.NotNil) + + // check if container IP matches network inspect + ip, _, err := net.ParseCIDR(nr.Containers[containerID].IPv4Address) + c.Assert(err, checker.IsNil) + containerIP := findContainerIP(c, "test", "testnetwork") + c.Assert(ip.String(), checker.Equals, containerIP) + + // disconnect container from the network + disconnectNetwork(c, nr.ID, containerID) + nr = getNetworkResource(c, nr.ID) + c.Assert(nr.Name, checker.Equals, name) + c.Assert(len(nr.Containers), checker.Equals, 0) + + // delete the network + deleteNetwork(c, nr.ID, true) +} + +func (s *DockerSuite) TestAPINetworkIPAMMultipleBridgeNetworks(c *check.C) { + testRequires(c, DaemonIsLinux) + // test0 bridge network + ipam0 := &network.IPAM{ + Driver: "default", + Config: []network.IPAMConfig{{Subnet: "192.178.0.0/16", IPRange: "192.178.128.0/17", Gateway: "192.178.138.100"}}, + } + config0 := types.NetworkCreateRequest{ + Name: "test0", + NetworkCreate: types.NetworkCreate{ + Driver: "bridge", + IPAM: ipam0, + }, + } + id0 := createNetwork(c, config0, true) + c.Assert(isNetworkAvailable(c, "test0"), checker.Equals, true) + + ipam1 := &network.IPAM{ + Driver: "default", + Config: []network.IPAMConfig{{Subnet: "192.178.128.0/17", Gateway: "192.178.128.1"}}, + } + // test1 bridge network overlaps with test0 + config1 := types.NetworkCreateRequest{ + Name: "test1", + NetworkCreate: types.NetworkCreate{ + Driver: "bridge", + IPAM: ipam1, + }, + } + createNetwork(c, config1, false) + c.Assert(isNetworkAvailable(c, "test1"), checker.Equals, false) + + ipam2 := &network.IPAM{ + Driver: "default", + Config: []network.IPAMConfig{{Subnet: "192.169.0.0/16", Gateway: "192.169.100.100"}}, + } + // test2 bridge network does not overlap + config2 := types.NetworkCreateRequest{ + Name: "test2", + NetworkCreate: types.NetworkCreate{ + Driver: "bridge", + IPAM: ipam2, + }, + } + createNetwork(c, config2, true) + c.Assert(isNetworkAvailable(c, "test2"), checker.Equals, true) + + // remove test0 and retry to create test1 + deleteNetwork(c, id0, true) + createNetwork(c, config1, true) + c.Assert(isNetworkAvailable(c, "test1"), checker.Equals, true) + + // for networks w/o ipam specified, docker will choose proper non-overlapping subnets + createNetwork(c, types.NetworkCreateRequest{Name: "test3"}, true) + c.Assert(isNetworkAvailable(c, "test3"), checker.Equals, true) + createNetwork(c, types.NetworkCreateRequest{Name: "test4"}, true) + c.Assert(isNetworkAvailable(c, "test4"), checker.Equals, true) + createNetwork(c, types.NetworkCreateRequest{Name: "test5"}, true) + c.Assert(isNetworkAvailable(c, "test5"), checker.Equals, true) + + for i := 1; i < 6; i++ { + deleteNetwork(c, fmt.Sprintf("test%d", i), true) + } +} + +func (s *DockerSuite) TestAPICreateDeletePredefinedNetworks(c *check.C) { + testRequires(c, DaemonIsLinux) + createDeletePredefinedNetwork(c, "bridge") + createDeletePredefinedNetwork(c, "none") + createDeletePredefinedNetwork(c, "host") +} + +func createDeletePredefinedNetwork(c *check.C, name string) { + // Create pre-defined network + config := types.NetworkCreateRequest{ + Name: name, + NetworkCreate: types.NetworkCreate{ + CheckDuplicate: true, + }, + } + shouldSucceed := false + createNetwork(c, config, shouldSucceed) + deleteNetwork(c, name, shouldSucceed) +} + +func isNetworkAvailable(c *check.C, name string) bool { + resp, body, err := request.Get("/networks") + c.Assert(err, checker.IsNil) + defer resp.Body.Close() + c.Assert(resp.StatusCode, checker.Equals, http.StatusOK) + + nJSON := []types.NetworkResource{} + err = json.NewDecoder(body).Decode(&nJSON) + c.Assert(err, checker.IsNil) + + for _, n := range nJSON { + if n.Name == name { + return true + } + } + return false +} + +func getNetworkIDByName(c *check.C, name string) string { + var ( + v = url.Values{} + filterArgs = filters.NewArgs() + ) + filterArgs.Add("name", name) + filterJSON, err := filters.ToParam(filterArgs) + c.Assert(err, checker.IsNil) + v.Set("filters", filterJSON) + + resp, body, err := request.Get("/networks?" + v.Encode()) + c.Assert(resp.StatusCode, checker.Equals, http.StatusOK) + c.Assert(err, checker.IsNil) + + nJSON := []types.NetworkResource{} + err = json.NewDecoder(body).Decode(&nJSON) + c.Assert(err, checker.IsNil) + c.Assert(len(nJSON), checker.Equals, 1) + + return nJSON[0].ID +} + +func getNetworkResource(c *check.C, id string) *types.NetworkResource { + _, obj, err := request.Get("/networks/" + id) + c.Assert(err, checker.IsNil) + + nr := types.NetworkResource{} + err = json.NewDecoder(obj).Decode(&nr) + c.Assert(err, checker.IsNil) + + return &nr +} + +func createNetwork(c *check.C, config types.NetworkCreateRequest, shouldSucceed bool) string { + resp, body, err := request.Post("/networks/create", request.JSONBody(config)) + c.Assert(err, checker.IsNil) + defer resp.Body.Close() + if !shouldSucceed { + c.Assert(resp.StatusCode, checker.Not(checker.Equals), http.StatusCreated) + return "" + } + + c.Assert(resp.StatusCode, checker.Equals, http.StatusCreated) + + var nr types.NetworkCreateResponse + err = json.NewDecoder(body).Decode(&nr) + c.Assert(err, checker.IsNil) + + return nr.ID +} + +func connectNetwork(c *check.C, nid, cid string) { + config := types.NetworkConnect{ + Container: cid, + } + + resp, _, err := request.Post("/networks/"+nid+"/connect", request.JSONBody(config)) + c.Assert(resp.StatusCode, checker.Equals, http.StatusOK) + c.Assert(err, checker.IsNil) +} + +func disconnectNetwork(c *check.C, nid, cid string) { + config := types.NetworkConnect{ + Container: cid, + } + + resp, _, err := request.Post("/networks/"+nid+"/disconnect", request.JSONBody(config)) + c.Assert(resp.StatusCode, checker.Equals, http.StatusOK) + c.Assert(err, checker.IsNil) +} + +func deleteNetwork(c *check.C, id string, shouldSucceed bool) { + resp, _, err := request.Delete("/networks/" + id) + c.Assert(err, checker.IsNil) + defer resp.Body.Close() + if !shouldSucceed { + c.Assert(resp.StatusCode, checker.Not(checker.Equals), http.StatusOK) + return + } + c.Assert(resp.StatusCode, checker.Equals, http.StatusNoContent) +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_api_resize_test.go b/vendor/github.com/moby/moby/integration-cli/docker_api_resize_test.go new file mode 100644 index 000000000..4a07fc737 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_api_resize_test.go @@ -0,0 +1,45 @@ +package main + +import ( + "net/http" + "strings" + + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/request" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestResizeAPIResponse(c *check.C) { + out := runSleepingContainer(c, "-d") + cleanedContainerID := strings.TrimSpace(out) + + endpoint := "/containers/" + cleanedContainerID + "/resize?h=40&w=40" + status, _, err := request.SockRequest("POST", endpoint, nil, daemonHost()) + c.Assert(status, check.Equals, http.StatusOK) + c.Assert(err, check.IsNil) +} + +func (s *DockerSuite) TestResizeAPIHeightWidthNoInt(c *check.C) { + out := runSleepingContainer(c, "-d") + cleanedContainerID := strings.TrimSpace(out) + + endpoint := "/containers/" + cleanedContainerID + "/resize?h=foo&w=bar" + status, _, err := request.SockRequest("POST", endpoint, nil, daemonHost()) + c.Assert(status, check.Equals, http.StatusInternalServerError) + c.Assert(err, check.IsNil) +} + +func (s *DockerSuite) TestResizeAPIResponseWhenContainerNotStarted(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "busybox", "true") + cleanedContainerID := strings.TrimSpace(out) + + // make sure the exited container is not running + dockerCmd(c, "wait", cleanedContainerID) + + endpoint := "/containers/" + cleanedContainerID + "/resize?h=40&w=40" + status, body, err := request.SockRequest("POST", endpoint, nil, daemonHost()) + c.Assert(status, check.Equals, http.StatusInternalServerError) + c.Assert(err, check.IsNil) + + c.Assert(getErrorMessage(c, body), checker.Contains, "is not running", check.Commentf("resize should fail with message 'Container is not running'")) +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_api_session_test.go b/vendor/github.com/moby/moby/integration-cli/docker_api_session_test.go new file mode 100644 index 000000000..e1ad880ea --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_api_session_test.go @@ -0,0 +1,49 @@ +package main + +import ( + "net/http" + + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/request" + "github.com/docker/docker/pkg/testutil" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestSessionCreate(c *check.C) { + testRequires(c, ExperimentalDaemon) + + res, body, err := request.Post("/session", func(r *http.Request) error { + r.Header.Set("X-Docker-Expose-Session-Uuid", "testsessioncreate") // so we don't block default name if something else is using it + r.Header.Set("Upgrade", "h2c") + return nil + }) + c.Assert(err, checker.IsNil) + c.Assert(res.StatusCode, checker.Equals, http.StatusSwitchingProtocols) + c.Assert(res.Header.Get("Upgrade"), checker.Equals, "h2c") + c.Assert(body.Close(), checker.IsNil) +} + +func (s *DockerSuite) TestSessionCreateWithBadUpgrade(c *check.C) { + testRequires(c, ExperimentalDaemon) + + res, body, err := request.Post("/session") + c.Assert(err, checker.IsNil) + c.Assert(res.StatusCode, checker.Equals, http.StatusBadRequest) + buf, err := testutil.ReadBody(body) + c.Assert(err, checker.IsNil) + + out := string(buf) + c.Assert(out, checker.Contains, "no upgrade") + + res, body, err = request.Post("/session", func(r *http.Request) error { + r.Header.Set("Upgrade", "foo") + return nil + }) + c.Assert(err, checker.IsNil) + c.Assert(res.StatusCode, checker.Equals, http.StatusBadRequest) + buf, err = testutil.ReadBody(body) + c.Assert(err, checker.IsNil) + + out = string(buf) + c.Assert(out, checker.Contains, "not supported") +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_api_stats_test.go b/vendor/github.com/moby/moby/integration-cli/docker_api_stats_test.go new file mode 100644 index 000000000..f1cb5bb4a --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_api_stats_test.go @@ -0,0 +1,310 @@ +package main + +import ( + "encoding/json" + "fmt" + "net/http" + "os/exec" + "runtime" + "strconv" + "strings" + "sync" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/versions" + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/request" + "github.com/go-check/check" +) + +var expectedNetworkInterfaceStats = strings.Split("rx_bytes rx_dropped rx_errors rx_packets tx_bytes tx_dropped tx_errors tx_packets", " ") + +func (s *DockerSuite) TestAPIStatsNoStreamGetCpu(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "while true;usleep 100; do echo 'Hello'; done") + + id := strings.TrimSpace(out) + c.Assert(waitRun(id), checker.IsNil) + resp, body, err := request.Get(fmt.Sprintf("/containers/%s/stats?stream=false", id)) + c.Assert(err, checker.IsNil) + c.Assert(resp.StatusCode, checker.Equals, http.StatusOK) + c.Assert(resp.Header.Get("Content-Type"), checker.Equals, "application/json") + + var v *types.Stats + err = json.NewDecoder(body).Decode(&v) + c.Assert(err, checker.IsNil) + body.Close() + + var cpuPercent = 0.0 + + if testEnv.DaemonPlatform() != "windows" { + cpuDelta := float64(v.CPUStats.CPUUsage.TotalUsage - v.PreCPUStats.CPUUsage.TotalUsage) + systemDelta := float64(v.CPUStats.SystemUsage - v.PreCPUStats.SystemUsage) + cpuPercent = (cpuDelta / systemDelta) * float64(len(v.CPUStats.CPUUsage.PercpuUsage)) * 100.0 + } else { + // Max number of 100ns intervals between the previous time read and now + possIntervals := uint64(v.Read.Sub(v.PreRead).Nanoseconds()) // Start with number of ns intervals + possIntervals /= 100 // Convert to number of 100ns intervals + possIntervals *= uint64(v.NumProcs) // Multiple by the number of processors + + // Intervals used + intervalsUsed := v.CPUStats.CPUUsage.TotalUsage - v.PreCPUStats.CPUUsage.TotalUsage + + // Percentage avoiding divide-by-zero + if possIntervals > 0 { + cpuPercent = float64(intervalsUsed) / float64(possIntervals) * 100.0 + } + } + + c.Assert(cpuPercent, check.Not(checker.Equals), 0.0, check.Commentf("docker stats with no-stream get cpu usage failed: was %v", cpuPercent)) +} + +func (s *DockerSuite) TestAPIStatsStoppedContainerInGoroutines(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "echo 1") + id := strings.TrimSpace(out) + + getGoRoutines := func() int { + _, body, err := request.Get(fmt.Sprintf("/info")) + c.Assert(err, checker.IsNil) + info := types.Info{} + err = json.NewDecoder(body).Decode(&info) + c.Assert(err, checker.IsNil) + body.Close() + return info.NGoroutines + } + + // When the HTTP connection is closed, the number of goroutines should not increase. + routines := getGoRoutines() + _, body, err := request.Get(fmt.Sprintf("/containers/%s/stats", id)) + c.Assert(err, checker.IsNil) + body.Close() + + t := time.After(30 * time.Second) + for { + select { + case <-t: + c.Assert(getGoRoutines(), checker.LessOrEqualThan, routines) + return + default: + if n := getGoRoutines(); n <= routines { + return + } + time.Sleep(200 * time.Millisecond) + } + } +} + +func (s *DockerSuite) TestAPIStatsNetworkStats(c *check.C) { + testRequires(c, SameHostDaemon) + + out := runSleepingContainer(c) + id := strings.TrimSpace(out) + c.Assert(waitRun(id), checker.IsNil) + + // Retrieve the container address + net := "bridge" + if testEnv.DaemonPlatform() == "windows" { + net = "nat" + } + contIP := findContainerIP(c, id, net) + numPings := 1 + + var preRxPackets uint64 + var preTxPackets uint64 + var postRxPackets uint64 + var postTxPackets uint64 + + // Get the container networking stats before and after pinging the container + nwStatsPre := getNetworkStats(c, id) + for _, v := range nwStatsPre { + preRxPackets += v.RxPackets + preTxPackets += v.TxPackets + } + + countParam := "-c" + if runtime.GOOS == "windows" { + countParam = "-n" // Ping count parameter is -n on Windows + } + pingout, err := exec.Command("ping", contIP, countParam, strconv.Itoa(numPings)).CombinedOutput() + if err != nil && runtime.GOOS == "linux" { + // If it fails then try a work-around, but just for linux. + // If this fails too then go back to the old error for reporting. + // + // The ping will sometimes fail due to an apparmor issue where it + // denies access to the libc.so.6 shared library - running it + // via /lib64/ld-linux-x86-64.so.2 seems to work around it. + pingout2, err2 := exec.Command("/lib64/ld-linux-x86-64.so.2", "/bin/ping", contIP, "-c", strconv.Itoa(numPings)).CombinedOutput() + if err2 == nil { + pingout = pingout2 + err = err2 + } + } + c.Assert(err, checker.IsNil) + pingouts := string(pingout[:]) + nwStatsPost := getNetworkStats(c, id) + for _, v := range nwStatsPost { + postRxPackets += v.RxPackets + postTxPackets += v.TxPackets + } + + // Verify the stats contain at least the expected number of packets + // On Linux, account for ARP. + expRxPkts := preRxPackets + uint64(numPings) + expTxPkts := preTxPackets + uint64(numPings) + if testEnv.DaemonPlatform() != "windows" { + expRxPkts++ + expTxPkts++ + } + c.Assert(postTxPackets, checker.GreaterOrEqualThan, expTxPkts, + check.Commentf("Reported less TxPackets than expected. Expected >= %d. Found %d. %s", expTxPkts, postTxPackets, pingouts)) + c.Assert(postRxPackets, checker.GreaterOrEqualThan, expRxPkts, + check.Commentf("Reported less RxPackets than expected. Expected >= %d. Found %d. %s", expRxPkts, postRxPackets, pingouts)) +} + +func (s *DockerSuite) TestAPIStatsNetworkStatsVersioning(c *check.C) { + // Windows doesn't support API versions less than 1.25, so no point testing 1.17 .. 1.21 + testRequires(c, SameHostDaemon, DaemonIsLinux) + + out := runSleepingContainer(c) + id := strings.TrimSpace(out) + c.Assert(waitRun(id), checker.IsNil) + wg := sync.WaitGroup{} + + for i := 17; i <= 21; i++ { + wg.Add(1) + go func(i int) { + defer wg.Done() + apiVersion := fmt.Sprintf("v1.%d", i) + statsJSONBlob := getVersionedStats(c, id, apiVersion) + if versions.LessThan(apiVersion, "v1.21") { + c.Assert(jsonBlobHasLTv121NetworkStats(statsJSONBlob), checker.Equals, true, + check.Commentf("Stats JSON blob from API %s %#v does not look like a =v1.21 API stats structure", apiVersion, statsJSONBlob)) + } + }(i) + } + wg.Wait() +} + +func getNetworkStats(c *check.C, id string) map[string]types.NetworkStats { + var st *types.StatsJSON + + _, body, err := request.Get(fmt.Sprintf("/containers/%s/stats?stream=false", id)) + c.Assert(err, checker.IsNil) + + err = json.NewDecoder(body).Decode(&st) + c.Assert(err, checker.IsNil) + body.Close() + + return st.Networks +} + +// getVersionedStats returns stats result for the +// container with id using an API call with version apiVersion. Since the +// stats result type differs between API versions, we simply return +// map[string]interface{}. +func getVersionedStats(c *check.C, id string, apiVersion string) map[string]interface{} { + stats := make(map[string]interface{}) + + _, body, err := request.Get(fmt.Sprintf("/%s/containers/%s/stats?stream=false", apiVersion, id)) + c.Assert(err, checker.IsNil) + defer body.Close() + + err = json.NewDecoder(body).Decode(&stats) + c.Assert(err, checker.IsNil, check.Commentf("failed to decode stat: %s", err)) + + return stats +} + +func jsonBlobHasLTv121NetworkStats(blob map[string]interface{}) bool { + networkStatsIntfc, ok := blob["network"] + if !ok { + return false + } + networkStats, ok := networkStatsIntfc.(map[string]interface{}) + if !ok { + return false + } + for _, expectedKey := range expectedNetworkInterfaceStats { + if _, ok := networkStats[expectedKey]; !ok { + return false + } + } + return true +} + +func jsonBlobHasGTE121NetworkStats(blob map[string]interface{}) bool { + networksStatsIntfc, ok := blob["networks"] + if !ok { + return false + } + networksStats, ok := networksStatsIntfc.(map[string]interface{}) + if !ok { + return false + } + for _, networkInterfaceStatsIntfc := range networksStats { + networkInterfaceStats, ok := networkInterfaceStatsIntfc.(map[string]interface{}) + if !ok { + return false + } + for _, expectedKey := range expectedNetworkInterfaceStats { + if _, ok := networkInterfaceStats[expectedKey]; !ok { + return false + } + } + } + return true +} + +func (s *DockerSuite) TestAPIStatsContainerNotFound(c *check.C) { + testRequires(c, DaemonIsLinux) + + status, _, err := request.SockRequest("GET", "/containers/nonexistent/stats", nil, daemonHost()) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusNotFound) + + status, _, err = request.SockRequest("GET", "/containers/nonexistent/stats?stream=0", nil, daemonHost()) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusNotFound) +} + +func (s *DockerSuite) TestAPIStatsNoStreamConnectedContainers(c *check.C) { + testRequires(c, DaemonIsLinux) + + out1 := runSleepingContainer(c) + id1 := strings.TrimSpace(out1) + c.Assert(waitRun(id1), checker.IsNil) + + out2 := runSleepingContainer(c, "--net", "container:"+id1) + id2 := strings.TrimSpace(out2) + c.Assert(waitRun(id2), checker.IsNil) + + ch := make(chan error) + go func() { + resp, body, err := request.Get(fmt.Sprintf("/containers/%s/stats?stream=false", id2)) + defer body.Close() + if err != nil { + ch <- err + } + if resp.StatusCode != http.StatusOK { + ch <- fmt.Errorf("Invalid StatusCode %v", resp.StatusCode) + } + if resp.Header.Get("Content-Type") != "application/json" { + ch <- fmt.Errorf("Invalid 'Content-Type' %v", resp.Header.Get("Content-Type")) + } + var v *types.Stats + if err := json.NewDecoder(body).Decode(&v); err != nil { + ch <- err + } + ch <- nil + }() + + select { + case err := <-ch: + c.Assert(err, checker.IsNil, check.Commentf("Error in stats Engine API: %v", err)) + case <-time.After(15 * time.Second): + c.Fatalf("Stats did not return after timeout") + } +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_api_stats_unix_test.go b/vendor/github.com/moby/moby/integration-cli/docker_api_stats_unix_test.go new file mode 100644 index 000000000..627d3359c --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_api_stats_unix_test.go @@ -0,0 +1,42 @@ +// +build !windows + +package main + +import ( + "encoding/json" + "fmt" + "net/http" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/request" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestAPIStatsContainerGetMemoryLimit(c *check.C) { + testRequires(c, DaemonIsLinux, memoryLimitSupport) + + resp, body, err := request.Get("/info", request.JSON) + c.Assert(err, checker.IsNil) + c.Assert(resp.StatusCode, checker.Equals, http.StatusOK) + var info types.Info + err = json.NewDecoder(body).Decode(&info) + c.Assert(err, checker.IsNil) + body.Close() + + // don't set a memory limit, the memory limit should be system memory + conName := "foo" + dockerCmd(c, "run", "-d", "--name", conName, "busybox", "top") + c.Assert(waitRun(conName), checker.IsNil) + + resp, body, err = request.Get(fmt.Sprintf("/containers/%s/stats?stream=false", conName)) + c.Assert(err, checker.IsNil) + c.Assert(resp.StatusCode, checker.Equals, http.StatusOK) + c.Assert(resp.Header.Get("Content-Type"), checker.Equals, "application/json") + + var v *types.Stats + err = json.NewDecoder(body).Decode(&v) + c.Assert(err, checker.IsNil) + body.Close() + c.Assert(fmt.Sprintf("%d", v.MemoryStats.Limit), checker.Equals, fmt.Sprintf("%d", info.MemTotal)) +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_api_swarm_config_test.go b/vendor/github.com/moby/moby/integration-cli/docker_api_swarm_config_test.go new file mode 100644 index 000000000..fab65ccbd --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_api_swarm_config_test.go @@ -0,0 +1,118 @@ +// +build !windows + +package main + +import ( + "fmt" + "net/http" + + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/integration-cli/checker" + "github.com/go-check/check" +) + +func (s *DockerSwarmSuite) TestAPISwarmConfigsEmptyList(c *check.C) { + d := s.AddDaemon(c, true, true) + + configs := d.ListConfigs(c) + c.Assert(configs, checker.NotNil) + c.Assert(len(configs), checker.Equals, 0, check.Commentf("configs: %#v", configs)) +} + +func (s *DockerSwarmSuite) TestAPISwarmConfigsCreate(c *check.C) { + d := s.AddDaemon(c, true, true) + + testName := "test_config" + id := d.CreateConfig(c, swarm.ConfigSpec{ + Annotations: swarm.Annotations{ + Name: testName, + }, + Data: []byte("TESTINGDATA"), + }) + c.Assert(id, checker.Not(checker.Equals), "", check.Commentf("configs: %s", id)) + + configs := d.ListConfigs(c) + c.Assert(len(configs), checker.Equals, 1, check.Commentf("configs: %#v", configs)) + name := configs[0].Spec.Annotations.Name + c.Assert(name, checker.Equals, testName, check.Commentf("configs: %s", name)) +} + +func (s *DockerSwarmSuite) TestAPISwarmConfigsDelete(c *check.C) { + d := s.AddDaemon(c, true, true) + + testName := "test_config" + id := d.CreateConfig(c, swarm.ConfigSpec{Annotations: swarm.Annotations{ + Name: testName, + }, + Data: []byte("TESTINGDATA"), + }) + c.Assert(id, checker.Not(checker.Equals), "", check.Commentf("configs: %s", id)) + + config := d.GetConfig(c, id) + c.Assert(config.ID, checker.Equals, id, check.Commentf("config: %v", config)) + + d.DeleteConfig(c, config.ID) + status, out, err := d.SockRequest("GET", "/configs/"+id, nil) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusNotFound, check.Commentf("config delete: %s", string(out))) +} + +func (s *DockerSwarmSuite) TestAPISwarmConfigsUpdate(c *check.C) { + d := s.AddDaemon(c, true, true) + + testName := "test_config" + id := d.CreateConfig(c, swarm.ConfigSpec{ + Annotations: swarm.Annotations{ + Name: testName, + Labels: map[string]string{ + "test": "test1", + }, + }, + Data: []byte("TESTINGDATA"), + }) + c.Assert(id, checker.Not(checker.Equals), "", check.Commentf("configs: %s", id)) + + config := d.GetConfig(c, id) + c.Assert(config.ID, checker.Equals, id, check.Commentf("config: %v", config)) + + // test UpdateConfig with full ID + d.UpdateConfig(c, id, func(s *swarm.Config) { + s.Spec.Labels = map[string]string{ + "test": "test1", + } + }) + + config = d.GetConfig(c, id) + c.Assert(config.Spec.Labels["test"], checker.Equals, "test1", check.Commentf("config: %v", config)) + + // test UpdateConfig with full name + d.UpdateConfig(c, config.Spec.Name, func(s *swarm.Config) { + s.Spec.Labels = map[string]string{ + "test": "test2", + } + }) + + config = d.GetConfig(c, id) + c.Assert(config.Spec.Labels["test"], checker.Equals, "test2", check.Commentf("config: %v", config)) + + // test UpdateConfig with prefix ID + d.UpdateConfig(c, id[:1], func(s *swarm.Config) { + s.Spec.Labels = map[string]string{ + "test": "test3", + } + }) + + config = d.GetConfig(c, id) + c.Assert(config.Spec.Labels["test"], checker.Equals, "test3", check.Commentf("config: %v", config)) + + // test UpdateConfig in updating Data which is not supported in daemon + // this test will produce an error in func UpdateConfig + config = d.GetConfig(c, id) + config.Spec.Data = []byte("TESTINGDATA2") + + url := fmt.Sprintf("/configs/%s/update?version=%d", config.ID, config.Version.Index) + status, out, err := d.SockRequest("POST", url, config.Spec) + + c.Assert(err, checker.IsNil, check.Commentf(string(out))) + c.Assert(status, checker.Equals, http.StatusBadRequest, check.Commentf("output: %q", string(out))) +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_api_swarm_node_test.go b/vendor/github.com/moby/moby/integration-cli/docker_api_swarm_node_test.go new file mode 100644 index 000000000..98f80552c --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_api_swarm_node_test.go @@ -0,0 +1,128 @@ +// +build !windows + +package main + +import ( + "time" + + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/daemon" + "github.com/go-check/check" +) + +func (s *DockerSwarmSuite) TestAPISwarmListNodes(c *check.C) { + d1 := s.AddDaemon(c, true, true) + d2 := s.AddDaemon(c, true, false) + d3 := s.AddDaemon(c, true, false) + + nodes := d1.ListNodes(c) + c.Assert(len(nodes), checker.Equals, 3, check.Commentf("nodes: %#v", nodes)) + +loop0: + for _, n := range nodes { + for _, d := range []*daemon.Swarm{d1, d2, d3} { + if n.ID == d.NodeID { + continue loop0 + } + } + c.Errorf("unknown nodeID %v", n.ID) + } +} + +func (s *DockerSwarmSuite) TestAPISwarmNodeUpdate(c *check.C) { + d := s.AddDaemon(c, true, true) + + nodes := d.ListNodes(c) + + d.UpdateNode(c, nodes[0].ID, func(n *swarm.Node) { + n.Spec.Availability = swarm.NodeAvailabilityPause + }) + + n := d.GetNode(c, nodes[0].ID) + c.Assert(n.Spec.Availability, checker.Equals, swarm.NodeAvailabilityPause) +} + +func (s *DockerSwarmSuite) TestAPISwarmNodeRemove(c *check.C) { + testRequires(c, Network) + d1 := s.AddDaemon(c, true, true) + d2 := s.AddDaemon(c, true, false) + _ = s.AddDaemon(c, true, false) + + nodes := d1.ListNodes(c) + c.Assert(len(nodes), checker.Equals, 3, check.Commentf("nodes: %#v", nodes)) + + // Getting the info so we can take the NodeID + d2Info, err := d2.SwarmInfo() + c.Assert(err, checker.IsNil) + + // forceful removal of d2 should work + d1.RemoveNode(c, d2Info.NodeID, true) + + nodes = d1.ListNodes(c) + c.Assert(len(nodes), checker.Equals, 2, check.Commentf("nodes: %#v", nodes)) + + // Restart the node that was removed + d2.Restart(c) + + // Give some time for the node to rejoin + time.Sleep(1 * time.Second) + + // Make sure the node didn't rejoin + nodes = d1.ListNodes(c) + c.Assert(len(nodes), checker.Equals, 2, check.Commentf("nodes: %#v", nodes)) +} + +func (s *DockerSwarmSuite) TestAPISwarmNodeDrainPause(c *check.C) { + d1 := s.AddDaemon(c, true, true) + d2 := s.AddDaemon(c, true, false) + + time.Sleep(1 * time.Second) // make sure all daemons are ready to accept tasks + + // start a service, expect balanced distribution + instances := 8 + id := d1.CreateService(c, simpleTestService, setInstances(instances)) + + waitAndAssert(c, defaultReconciliationTimeout, d1.CheckActiveContainerCount, checker.GreaterThan, 0) + waitAndAssert(c, defaultReconciliationTimeout, d2.CheckActiveContainerCount, checker.GreaterThan, 0) + waitAndAssert(c, defaultReconciliationTimeout, reducedCheck(sumAsIntegers, d1.CheckActiveContainerCount, d2.CheckActiveContainerCount), checker.Equals, instances) + + // drain d2, all containers should move to d1 + d1.UpdateNode(c, d2.NodeID, func(n *swarm.Node) { + n.Spec.Availability = swarm.NodeAvailabilityDrain + }) + waitAndAssert(c, defaultReconciliationTimeout, d1.CheckActiveContainerCount, checker.Equals, instances) + waitAndAssert(c, defaultReconciliationTimeout, d2.CheckActiveContainerCount, checker.Equals, 0) + + // set d2 back to active + d1.UpdateNode(c, d2.NodeID, func(n *swarm.Node) { + n.Spec.Availability = swarm.NodeAvailabilityActive + }) + + instances = 1 + d1.UpdateService(c, d1.GetService(c, id), setInstances(instances)) + + waitAndAssert(c, defaultReconciliationTimeout*2, reducedCheck(sumAsIntegers, d1.CheckActiveContainerCount, d2.CheckActiveContainerCount), checker.Equals, instances) + + instances = 8 + d1.UpdateService(c, d1.GetService(c, id), setInstances(instances)) + + // drained node first so we don't get any old containers + waitAndAssert(c, defaultReconciliationTimeout, d2.CheckActiveContainerCount, checker.GreaterThan, 0) + waitAndAssert(c, defaultReconciliationTimeout, d1.CheckActiveContainerCount, checker.GreaterThan, 0) + waitAndAssert(c, defaultReconciliationTimeout*2, reducedCheck(sumAsIntegers, d1.CheckActiveContainerCount, d2.CheckActiveContainerCount), checker.Equals, instances) + + d2ContainerCount := len(d2.ActiveContainers()) + + // set d2 to paused, scale service up, only d1 gets new tasks + d1.UpdateNode(c, d2.NodeID, func(n *swarm.Node) { + n.Spec.Availability = swarm.NodeAvailabilityPause + }) + + instances = 14 + d1.UpdateService(c, d1.GetService(c, id), setInstances(instances)) + + waitAndAssert(c, defaultReconciliationTimeout, d1.CheckActiveContainerCount, checker.Equals, instances-d2ContainerCount) + waitAndAssert(c, defaultReconciliationTimeout, d2.CheckActiveContainerCount, checker.Equals, d2ContainerCount) + +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_api_swarm_secret_test.go b/vendor/github.com/moby/moby/integration-cli/docker_api_swarm_secret_test.go new file mode 100644 index 000000000..cb82af8e2 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_api_swarm_secret_test.go @@ -0,0 +1,132 @@ +// +build !windows + +package main + +import ( + "fmt" + "net/http" + + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/integration-cli/checker" + "github.com/go-check/check" +) + +func (s *DockerSwarmSuite) TestAPISwarmSecretsEmptyList(c *check.C) { + d := s.AddDaemon(c, true, true) + + secrets := d.ListSecrets(c) + c.Assert(secrets, checker.NotNil) + c.Assert(len(secrets), checker.Equals, 0, check.Commentf("secrets: %#v", secrets)) +} + +func (s *DockerSwarmSuite) TestAPISwarmSecretsCreate(c *check.C) { + d := s.AddDaemon(c, true, true) + + testName := "test_secret" + secretSpec := swarm.SecretSpec{ + Annotations: swarm.Annotations{ + Name: testName, + }, + Data: []byte("TESTINGDATA"), + } + + id := d.CreateSecret(c, secretSpec) + c.Assert(id, checker.Not(checker.Equals), "", check.Commentf("secrets: %s", id)) + + secrets := d.ListSecrets(c) + c.Assert(len(secrets), checker.Equals, 1, check.Commentf("secrets: %#v", secrets)) + name := secrets[0].Spec.Annotations.Name + c.Assert(name, checker.Equals, testName, check.Commentf("secret: %s", name)) + + // create an already existing secret, daemon should return a status code of 409 + status, out, err := d.SockRequest("POST", "/secrets/create", secretSpec) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusConflict, check.Commentf("secret create: %s", string(out))) +} + +func (s *DockerSwarmSuite) TestAPISwarmSecretsDelete(c *check.C) { + d := s.AddDaemon(c, true, true) + + testName := "test_secret" + id := d.CreateSecret(c, swarm.SecretSpec{Annotations: swarm.Annotations{ + Name: testName, + }, + Data: []byte("TESTINGDATA"), + }) + c.Assert(id, checker.Not(checker.Equals), "", check.Commentf("secrets: %s", id)) + + secret := d.GetSecret(c, id) + c.Assert(secret.ID, checker.Equals, id, check.Commentf("secret: %v", secret)) + + d.DeleteSecret(c, secret.ID) + status, out, err := d.SockRequest("GET", "/secrets/"+id, nil) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusNotFound, check.Commentf("secret delete: %s", string(out))) + + // delete non-existing secret, daemon should return a status code of 404 + id = "non-existing" + status, out, err = d.SockRequest("DELETE", "/secrets/"+id, nil) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusNotFound, check.Commentf("secret delete: %s", string(out))) + +} + +func (s *DockerSwarmSuite) TestAPISwarmSecretsUpdate(c *check.C) { + d := s.AddDaemon(c, true, true) + + testName := "test_secret" + id := d.CreateSecret(c, swarm.SecretSpec{ + Annotations: swarm.Annotations{ + Name: testName, + Labels: map[string]string{ + "test": "test1", + }, + }, + Data: []byte("TESTINGDATA"), + }) + c.Assert(id, checker.Not(checker.Equals), "", check.Commentf("secrets: %s", id)) + + secret := d.GetSecret(c, id) + c.Assert(secret.ID, checker.Equals, id, check.Commentf("secret: %v", secret)) + + // test UpdateSecret with full ID + d.UpdateSecret(c, id, func(s *swarm.Secret) { + s.Spec.Labels = map[string]string{ + "test": "test1", + } + }) + + secret = d.GetSecret(c, id) + c.Assert(secret.Spec.Labels["test"], checker.Equals, "test1", check.Commentf("secret: %v", secret)) + + // test UpdateSecret with full name + d.UpdateSecret(c, secret.Spec.Name, func(s *swarm.Secret) { + s.Spec.Labels = map[string]string{ + "test": "test2", + } + }) + + secret = d.GetSecret(c, id) + c.Assert(secret.Spec.Labels["test"], checker.Equals, "test2", check.Commentf("secret: %v", secret)) + + // test UpdateSecret with prefix ID + d.UpdateSecret(c, id[:1], func(s *swarm.Secret) { + s.Spec.Labels = map[string]string{ + "test": "test3", + } + }) + + secret = d.GetSecret(c, id) + c.Assert(secret.Spec.Labels["test"], checker.Equals, "test3", check.Commentf("secret: %v", secret)) + + // test UpdateSecret in updating Data which is not supported in daemon + // this test will produce an error in func UpdateSecret + secret = d.GetSecret(c, id) + secret.Spec.Data = []byte("TESTINGDATA2") + + url := fmt.Sprintf("/secrets/%s/update?version=%d", secret.ID, secret.Version.Index) + status, out, err := d.SockRequest("POST", url, secret.Spec) + + c.Assert(err, checker.IsNil, check.Commentf(string(out))) + c.Assert(status, checker.Equals, http.StatusBadRequest, check.Commentf("output: %q", string(out))) +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_api_swarm_service_test.go b/vendor/github.com/moby/moby/integration-cli/docker_api_swarm_service_test.go new file mode 100644 index 000000000..2ec56ccd8 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_api_swarm_service_test.go @@ -0,0 +1,676 @@ +// +build !windows + +package main + +import ( + "fmt" + "path" + "strconv" + "strings" + "time" + + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/api/types/swarm/runtime" + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/daemon" + "github.com/docker/docker/integration-cli/fixtures/plugin" + "github.com/go-check/check" + "golang.org/x/net/context" + "golang.org/x/sys/unix" +) + +func setPortConfig(portConfig []swarm.PortConfig) daemon.ServiceConstructor { + return func(s *swarm.Service) { + if s.Spec.EndpointSpec == nil { + s.Spec.EndpointSpec = &swarm.EndpointSpec{} + } + s.Spec.EndpointSpec.Ports = portConfig + } +} + +func (s *DockerSwarmSuite) TestAPIServiceUpdatePort(c *check.C) { + d := s.AddDaemon(c, true, true) + + // Create a service with a port mapping of 8080:8081. + portConfig := []swarm.PortConfig{{TargetPort: 8081, PublishedPort: 8080}} + serviceID := d.CreateService(c, simpleTestService, setInstances(1), setPortConfig(portConfig)) + waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, 1) + + // Update the service: changed the port mapping from 8080:8081 to 8082:8083. + updatedPortConfig := []swarm.PortConfig{{TargetPort: 8083, PublishedPort: 8082}} + remoteService := d.GetService(c, serviceID) + d.UpdateService(c, remoteService, setPortConfig(updatedPortConfig)) + + // Inspect the service and verify port mapping. + updatedService := d.GetService(c, serviceID) + c.Assert(updatedService.Spec.EndpointSpec, check.NotNil) + c.Assert(len(updatedService.Spec.EndpointSpec.Ports), check.Equals, 1) + c.Assert(updatedService.Spec.EndpointSpec.Ports[0].TargetPort, check.Equals, uint32(8083)) + c.Assert(updatedService.Spec.EndpointSpec.Ports[0].PublishedPort, check.Equals, uint32(8082)) +} + +func (s *DockerSwarmSuite) TestAPISwarmServicesEmptyList(c *check.C) { + d := s.AddDaemon(c, true, true) + + services := d.ListServices(c) + c.Assert(services, checker.NotNil) + c.Assert(len(services), checker.Equals, 0, check.Commentf("services: %#v", services)) +} + +func (s *DockerSwarmSuite) TestAPISwarmServicesCreate(c *check.C) { + d := s.AddDaemon(c, true, true) + + instances := 2 + id := d.CreateService(c, simpleTestService, setInstances(instances)) + waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, instances) + + // insertDefaults inserts UpdateConfig when service is fetched by ID + _, out, err := d.SockRequest("GET", "/services/"+id+"?insertDefaults=true", nil) + c.Assert(err, checker.IsNil, check.Commentf("%s", out)) + c.Assert(string(out), checker.Contains, "UpdateConfig") + + // insertDefaults inserts UpdateConfig when service is fetched by ID + _, out, err = d.SockRequest("GET", "/services/top?insertDefaults=true", nil) + c.Assert(err, checker.IsNil, check.Commentf("%s", out)) + c.Assert(string(out), checker.Contains, "UpdateConfig") + + service := d.GetService(c, id) + instances = 5 + d.UpdateService(c, service, setInstances(instances)) + waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, instances) + + d.RemoveService(c, service.ID) + waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, 0) +} + +func (s *DockerSwarmSuite) TestAPISwarmServicesMultipleAgents(c *check.C) { + d1 := s.AddDaemon(c, true, true) + d2 := s.AddDaemon(c, true, false) + d3 := s.AddDaemon(c, true, false) + + time.Sleep(1 * time.Second) // make sure all daemons are ready to accept tasks + + instances := 9 + id := d1.CreateService(c, simpleTestService, setInstances(instances)) + + waitAndAssert(c, defaultReconciliationTimeout, d1.CheckActiveContainerCount, checker.GreaterThan, 0) + waitAndAssert(c, defaultReconciliationTimeout, d2.CheckActiveContainerCount, checker.GreaterThan, 0) + waitAndAssert(c, defaultReconciliationTimeout, d3.CheckActiveContainerCount, checker.GreaterThan, 0) + + waitAndAssert(c, defaultReconciliationTimeout, reducedCheck(sumAsIntegers, d1.CheckActiveContainerCount, d2.CheckActiveContainerCount, d3.CheckActiveContainerCount), checker.Equals, instances) + + // reconciliation on d2 node down + d2.Stop(c) + + waitAndAssert(c, defaultReconciliationTimeout, reducedCheck(sumAsIntegers, d1.CheckActiveContainerCount, d3.CheckActiveContainerCount), checker.Equals, instances) + + // test downscaling + instances = 5 + d1.UpdateService(c, d1.GetService(c, id), setInstances(instances)) + waitAndAssert(c, defaultReconciliationTimeout, reducedCheck(sumAsIntegers, d1.CheckActiveContainerCount, d3.CheckActiveContainerCount), checker.Equals, instances) + +} + +func (s *DockerSwarmSuite) TestAPISwarmServicesCreateGlobal(c *check.C) { + d1 := s.AddDaemon(c, true, true) + d2 := s.AddDaemon(c, true, false) + d3 := s.AddDaemon(c, true, false) + + d1.CreateService(c, simpleTestService, setGlobalMode) + + waitAndAssert(c, defaultReconciliationTimeout, d1.CheckActiveContainerCount, checker.Equals, 1) + waitAndAssert(c, defaultReconciliationTimeout, d2.CheckActiveContainerCount, checker.Equals, 1) + waitAndAssert(c, defaultReconciliationTimeout, d3.CheckActiveContainerCount, checker.Equals, 1) + + d4 := s.AddDaemon(c, true, false) + d5 := s.AddDaemon(c, true, false) + + waitAndAssert(c, defaultReconciliationTimeout, d4.CheckActiveContainerCount, checker.Equals, 1) + waitAndAssert(c, defaultReconciliationTimeout, d5.CheckActiveContainerCount, checker.Equals, 1) +} + +func (s *DockerSwarmSuite) TestAPISwarmServicesUpdate(c *check.C) { + const nodeCount = 3 + var daemons [nodeCount]*daemon.Swarm + for i := 0; i < nodeCount; i++ { + daemons[i] = s.AddDaemon(c, true, i == 0) + } + // wait for nodes ready + waitAndAssert(c, 5*time.Second, daemons[0].CheckNodeReadyCount, checker.Equals, nodeCount) + + // service image at start + image1 := "busybox:latest" + // target image in update + image2 := "busybox:test" + + // create a different tag + for _, d := range daemons { + out, err := d.Cmd("tag", image1, image2) + c.Assert(err, checker.IsNil, check.Commentf(out)) + } + + // create service + instances := 5 + parallelism := 2 + rollbackParallelism := 3 + id := daemons[0].CreateService(c, serviceForUpdate, setInstances(instances)) + + // wait for tasks ready + waitAndAssert(c, defaultReconciliationTimeout, daemons[0].CheckRunningTaskImages, checker.DeepEquals, + map[string]int{image1: instances}) + + // issue service update + service := daemons[0].GetService(c, id) + daemons[0].UpdateService(c, service, setImage(image2)) + + // first batch + waitAndAssert(c, defaultReconciliationTimeout, daemons[0].CheckRunningTaskImages, checker.DeepEquals, + map[string]int{image1: instances - parallelism, image2: parallelism}) + + // 2nd batch + waitAndAssert(c, defaultReconciliationTimeout, daemons[0].CheckRunningTaskImages, checker.DeepEquals, + map[string]int{image1: instances - 2*parallelism, image2: 2 * parallelism}) + + // 3nd batch + waitAndAssert(c, defaultReconciliationTimeout, daemons[0].CheckRunningTaskImages, checker.DeepEquals, + map[string]int{image2: instances}) + + // Roll back to the previous version. This uses the CLI because + // rollback used to be a client-side operation. + out, err := daemons[0].Cmd("service", "update", "--rollback", id) + c.Assert(err, checker.IsNil, check.Commentf(out)) + + // first batch + waitAndAssert(c, defaultReconciliationTimeout, daemons[0].CheckRunningTaskImages, checker.DeepEquals, + map[string]int{image2: instances - rollbackParallelism, image1: rollbackParallelism}) + + // 2nd batch + waitAndAssert(c, defaultReconciliationTimeout, daemons[0].CheckRunningTaskImages, checker.DeepEquals, + map[string]int{image1: instances}) +} + +func (s *DockerSwarmSuite) TestAPISwarmServicesUpdateStartFirst(c *check.C) { + d := s.AddDaemon(c, true, true) + + // service image at start + image1 := "busybox:latest" + // target image in update + image2 := "testhealth" + + // service started from this image won't pass health check + _, _, err := d.BuildImageWithOut(image2, + `FROM busybox + HEALTHCHECK --interval=1s --timeout=30s --retries=1024 \ + CMD cat /status`, + true) + c.Check(err, check.IsNil) + + // create service + instances := 5 + parallelism := 2 + rollbackParallelism := 3 + id := d.CreateService(c, serviceForUpdate, setInstances(instances), setUpdateOrder(swarm.UpdateOrderStartFirst), setRollbackOrder(swarm.UpdateOrderStartFirst)) + + checkStartingTasks := func(expected int) []swarm.Task { + var startingTasks []swarm.Task + waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { + tasks := d.GetServiceTasks(c, id) + startingTasks = nil + for _, t := range tasks { + if t.Status.State == swarm.TaskStateStarting { + startingTasks = append(startingTasks, t) + } + } + return startingTasks, nil + }, checker.HasLen, expected) + + return startingTasks + } + + makeTasksHealthy := func(tasks []swarm.Task) { + for _, t := range tasks { + containerID := t.Status.ContainerStatus.ContainerID + d.Cmd("exec", containerID, "touch", "/status") + } + } + + // wait for tasks ready + waitAndAssert(c, defaultReconciliationTimeout, d.CheckRunningTaskImages, checker.DeepEquals, + map[string]int{image1: instances}) + + // issue service update + service := d.GetService(c, id) + d.UpdateService(c, service, setImage(image2)) + + // first batch + + // The old tasks should be running, and the new ones should be starting. + startingTasks := checkStartingTasks(parallelism) + + waitAndAssert(c, defaultReconciliationTimeout, d.CheckRunningTaskImages, checker.DeepEquals, + map[string]int{image1: instances}) + + // make it healthy + makeTasksHealthy(startingTasks) + + waitAndAssert(c, defaultReconciliationTimeout, d.CheckRunningTaskImages, checker.DeepEquals, + map[string]int{image1: instances - parallelism, image2: parallelism}) + + // 2nd batch + + // The old tasks should be running, and the new ones should be starting. + startingTasks = checkStartingTasks(parallelism) + + waitAndAssert(c, defaultReconciliationTimeout, d.CheckRunningTaskImages, checker.DeepEquals, + map[string]int{image1: instances - parallelism, image2: parallelism}) + + // make it healthy + makeTasksHealthy(startingTasks) + + waitAndAssert(c, defaultReconciliationTimeout, d.CheckRunningTaskImages, checker.DeepEquals, + map[string]int{image1: instances - 2*parallelism, image2: 2 * parallelism}) + + // 3nd batch + + // The old tasks should be running, and the new ones should be starting. + startingTasks = checkStartingTasks(1) + + waitAndAssert(c, defaultReconciliationTimeout, d.CheckRunningTaskImages, checker.DeepEquals, + map[string]int{image1: instances - 2*parallelism, image2: 2 * parallelism}) + + // make it healthy + makeTasksHealthy(startingTasks) + + waitAndAssert(c, defaultReconciliationTimeout, d.CheckRunningTaskImages, checker.DeepEquals, + map[string]int{image2: instances}) + + // Roll back to the previous version. This uses the CLI because + // rollback is a client-side operation. + out, err := d.Cmd("service", "update", "--rollback", id) + c.Assert(err, checker.IsNil, check.Commentf(out)) + + // first batch + waitAndAssert(c, defaultReconciliationTimeout, d.CheckRunningTaskImages, checker.DeepEquals, + map[string]int{image2: instances - rollbackParallelism, image1: rollbackParallelism}) + + // 2nd batch + waitAndAssert(c, defaultReconciliationTimeout, d.CheckRunningTaskImages, checker.DeepEquals, + map[string]int{image1: instances}) +} + +func (s *DockerSwarmSuite) TestAPISwarmServicesFailedUpdate(c *check.C) { + const nodeCount = 3 + var daemons [nodeCount]*daemon.Swarm + for i := 0; i < nodeCount; i++ { + daemons[i] = s.AddDaemon(c, true, i == 0) + } + // wait for nodes ready + waitAndAssert(c, 5*time.Second, daemons[0].CheckNodeReadyCount, checker.Equals, nodeCount) + + // service image at start + image1 := "busybox:latest" + // target image in update + image2 := "busybox:badtag" + + // create service + instances := 5 + id := daemons[0].CreateService(c, serviceForUpdate, setInstances(instances)) + + // wait for tasks ready + waitAndAssert(c, defaultReconciliationTimeout, daemons[0].CheckRunningTaskImages, checker.DeepEquals, + map[string]int{image1: instances}) + + // issue service update + service := daemons[0].GetService(c, id) + daemons[0].UpdateService(c, service, setImage(image2), setFailureAction(swarm.UpdateFailureActionPause), setMaxFailureRatio(0.25), setParallelism(1)) + + // should update 2 tasks and then pause + waitAndAssert(c, defaultReconciliationTimeout, daemons[0].CheckServiceUpdateState(id), checker.Equals, swarm.UpdateStatePaused) + v, _ := daemons[0].CheckServiceRunningTasks(id)(c) + c.Assert(v, checker.Equals, instances-2) + + // Roll back to the previous version. This uses the CLI because + // rollback used to be a client-side operation. + out, err := daemons[0].Cmd("service", "update", "--rollback", id) + c.Assert(err, checker.IsNil, check.Commentf(out)) + + waitAndAssert(c, defaultReconciliationTimeout, daemons[0].CheckRunningTaskImages, checker.DeepEquals, + map[string]int{image1: instances}) +} + +func (s *DockerSwarmSuite) TestAPISwarmServiceConstraintRole(c *check.C) { + const nodeCount = 3 + var daemons [nodeCount]*daemon.Swarm + for i := 0; i < nodeCount; i++ { + daemons[i] = s.AddDaemon(c, true, i == 0) + } + // wait for nodes ready + waitAndAssert(c, 5*time.Second, daemons[0].CheckNodeReadyCount, checker.Equals, nodeCount) + + // create service + constraints := []string{"node.role==worker"} + instances := 3 + id := daemons[0].CreateService(c, simpleTestService, setConstraints(constraints), setInstances(instances)) + // wait for tasks ready + waitAndAssert(c, defaultReconciliationTimeout, daemons[0].CheckServiceRunningTasks(id), checker.Equals, instances) + // validate tasks are running on worker nodes + tasks := daemons[0].GetServiceTasks(c, id) + for _, task := range tasks { + node := daemons[0].GetNode(c, task.NodeID) + c.Assert(node.Spec.Role, checker.Equals, swarm.NodeRoleWorker) + } + //remove service + daemons[0].RemoveService(c, id) + + // create service + constraints = []string{"node.role!=worker"} + id = daemons[0].CreateService(c, simpleTestService, setConstraints(constraints), setInstances(instances)) + // wait for tasks ready + waitAndAssert(c, defaultReconciliationTimeout, daemons[0].CheckServiceRunningTasks(id), checker.Equals, instances) + tasks = daemons[0].GetServiceTasks(c, id) + // validate tasks are running on manager nodes + for _, task := range tasks { + node := daemons[0].GetNode(c, task.NodeID) + c.Assert(node.Spec.Role, checker.Equals, swarm.NodeRoleManager) + } + //remove service + daemons[0].RemoveService(c, id) + + // create service + constraints = []string{"node.role==nosuchrole"} + id = daemons[0].CreateService(c, simpleTestService, setConstraints(constraints), setInstances(instances)) + // wait for tasks created + waitAndAssert(c, defaultReconciliationTimeout, daemons[0].CheckServiceTasks(id), checker.Equals, instances) + // let scheduler try + time.Sleep(250 * time.Millisecond) + // validate tasks are not assigned to any node + tasks = daemons[0].GetServiceTasks(c, id) + for _, task := range tasks { + c.Assert(task.NodeID, checker.Equals, "") + } +} + +func (s *DockerSwarmSuite) TestAPISwarmServiceConstraintLabel(c *check.C) { + const nodeCount = 3 + var daemons [nodeCount]*daemon.Swarm + for i := 0; i < nodeCount; i++ { + daemons[i] = s.AddDaemon(c, true, i == 0) + } + // wait for nodes ready + waitAndAssert(c, 5*time.Second, daemons[0].CheckNodeReadyCount, checker.Equals, nodeCount) + nodes := daemons[0].ListNodes(c) + c.Assert(len(nodes), checker.Equals, nodeCount) + + // add labels to nodes + daemons[0].UpdateNode(c, nodes[0].ID, func(n *swarm.Node) { + n.Spec.Annotations.Labels = map[string]string{ + "security": "high", + } + }) + for i := 1; i < nodeCount; i++ { + daemons[0].UpdateNode(c, nodes[i].ID, func(n *swarm.Node) { + n.Spec.Annotations.Labels = map[string]string{ + "security": "low", + } + }) + } + + // create service + instances := 3 + constraints := []string{"node.labels.security==high"} + id := daemons[0].CreateService(c, simpleTestService, setConstraints(constraints), setInstances(instances)) + // wait for tasks ready + waitAndAssert(c, defaultReconciliationTimeout, daemons[0].CheckServiceRunningTasks(id), checker.Equals, instances) + tasks := daemons[0].GetServiceTasks(c, id) + // validate all tasks are running on nodes[0] + for _, task := range tasks { + c.Assert(task.NodeID, checker.Equals, nodes[0].ID) + } + //remove service + daemons[0].RemoveService(c, id) + + // create service + constraints = []string{"node.labels.security!=high"} + id = daemons[0].CreateService(c, simpleTestService, setConstraints(constraints), setInstances(instances)) + // wait for tasks ready + waitAndAssert(c, defaultReconciliationTimeout, daemons[0].CheckServiceRunningTasks(id), checker.Equals, instances) + tasks = daemons[0].GetServiceTasks(c, id) + // validate all tasks are NOT running on nodes[0] + for _, task := range tasks { + c.Assert(task.NodeID, checker.Not(checker.Equals), nodes[0].ID) + } + //remove service + daemons[0].RemoveService(c, id) + + constraints = []string{"node.labels.security==medium"} + id = daemons[0].CreateService(c, simpleTestService, setConstraints(constraints), setInstances(instances)) + // wait for tasks created + waitAndAssert(c, defaultReconciliationTimeout, daemons[0].CheckServiceTasks(id), checker.Equals, instances) + // let scheduler try + time.Sleep(250 * time.Millisecond) + tasks = daemons[0].GetServiceTasks(c, id) + // validate tasks are not assigned + for _, task := range tasks { + c.Assert(task.NodeID, checker.Equals, "") + } + //remove service + daemons[0].RemoveService(c, id) + + // multiple constraints + constraints = []string{ + "node.labels.security==high", + fmt.Sprintf("node.id==%s", nodes[1].ID), + } + id = daemons[0].CreateService(c, simpleTestService, setConstraints(constraints), setInstances(instances)) + // wait for tasks created + waitAndAssert(c, defaultReconciliationTimeout, daemons[0].CheckServiceTasks(id), checker.Equals, instances) + // let scheduler try + time.Sleep(250 * time.Millisecond) + tasks = daemons[0].GetServiceTasks(c, id) + // validate tasks are not assigned + for _, task := range tasks { + c.Assert(task.NodeID, checker.Equals, "") + } + // make nodes[1] fulfills the constraints + daemons[0].UpdateNode(c, nodes[1].ID, func(n *swarm.Node) { + n.Spec.Annotations.Labels = map[string]string{ + "security": "high", + } + }) + // wait for tasks ready + waitAndAssert(c, defaultReconciliationTimeout, daemons[0].CheckServiceRunningTasks(id), checker.Equals, instances) + tasks = daemons[0].GetServiceTasks(c, id) + for _, task := range tasks { + c.Assert(task.NodeID, checker.Equals, nodes[1].ID) + } +} + +func (s *DockerSwarmSuite) TestAPISwarmServicePlacementPrefs(c *check.C) { + const nodeCount = 3 + var daemons [nodeCount]*daemon.Swarm + for i := 0; i < nodeCount; i++ { + daemons[i] = s.AddDaemon(c, true, i == 0) + } + // wait for nodes ready + waitAndAssert(c, 5*time.Second, daemons[0].CheckNodeReadyCount, checker.Equals, nodeCount) + nodes := daemons[0].ListNodes(c) + c.Assert(len(nodes), checker.Equals, nodeCount) + + // add labels to nodes + daemons[0].UpdateNode(c, nodes[0].ID, func(n *swarm.Node) { + n.Spec.Annotations.Labels = map[string]string{ + "rack": "a", + } + }) + for i := 1; i < nodeCount; i++ { + daemons[0].UpdateNode(c, nodes[i].ID, func(n *swarm.Node) { + n.Spec.Annotations.Labels = map[string]string{ + "rack": "b", + } + }) + } + + // create service + instances := 4 + prefs := []swarm.PlacementPreference{{Spread: &swarm.SpreadOver{SpreadDescriptor: "node.labels.rack"}}} + id := daemons[0].CreateService(c, simpleTestService, setPlacementPrefs(prefs), setInstances(instances)) + // wait for tasks ready + waitAndAssert(c, defaultReconciliationTimeout, daemons[0].CheckServiceRunningTasks(id), checker.Equals, instances) + tasks := daemons[0].GetServiceTasks(c, id) + // validate all tasks are running on nodes[0] + tasksOnNode := make(map[string]int) + for _, task := range tasks { + tasksOnNode[task.NodeID]++ + } + c.Assert(tasksOnNode[nodes[0].ID], checker.Equals, 2) + c.Assert(tasksOnNode[nodes[1].ID], checker.Equals, 1) + c.Assert(tasksOnNode[nodes[2].ID], checker.Equals, 1) +} + +func (s *DockerSwarmSuite) TestAPISwarmServicesStateReporting(c *check.C) { + testRequires(c, SameHostDaemon) + testRequires(c, DaemonIsLinux) + + d1 := s.AddDaemon(c, true, true) + d2 := s.AddDaemon(c, true, true) + d3 := s.AddDaemon(c, true, false) + + time.Sleep(1 * time.Second) // make sure all daemons are ready to accept + + instances := 9 + d1.CreateService(c, simpleTestService, setInstances(instances)) + + waitAndAssert(c, defaultReconciliationTimeout, reducedCheck(sumAsIntegers, d1.CheckActiveContainerCount, d2.CheckActiveContainerCount, d3.CheckActiveContainerCount), checker.Equals, instances) + + getContainers := func() map[string]*daemon.Swarm { + m := make(map[string]*daemon.Swarm) + for _, d := range []*daemon.Swarm{d1, d2, d3} { + for _, id := range d.ActiveContainers() { + m[id] = d + } + } + return m + } + + containers := getContainers() + c.Assert(containers, checker.HasLen, instances) + var toRemove string + for i := range containers { + toRemove = i + } + + _, err := containers[toRemove].Cmd("stop", toRemove) + c.Assert(err, checker.IsNil) + + waitAndAssert(c, defaultReconciliationTimeout, reducedCheck(sumAsIntegers, d1.CheckActiveContainerCount, d2.CheckActiveContainerCount, d3.CheckActiveContainerCount), checker.Equals, instances) + + containers2 := getContainers() + c.Assert(containers2, checker.HasLen, instances) + for i := range containers { + if i == toRemove { + c.Assert(containers2[i], checker.IsNil) + } else { + c.Assert(containers2[i], checker.NotNil) + } + } + + containers = containers2 + for i := range containers { + toRemove = i + } + + // try with killing process outside of docker + pidStr, err := containers[toRemove].Cmd("inspect", "-f", "{{.State.Pid}}", toRemove) + c.Assert(err, checker.IsNil) + pid, err := strconv.Atoi(strings.TrimSpace(pidStr)) + c.Assert(err, checker.IsNil) + c.Assert(unix.Kill(pid, unix.SIGKILL), checker.IsNil) + + time.Sleep(time.Second) // give some time to handle the signal + + waitAndAssert(c, defaultReconciliationTimeout, reducedCheck(sumAsIntegers, d1.CheckActiveContainerCount, d2.CheckActiveContainerCount, d3.CheckActiveContainerCount), checker.Equals, instances) + + containers2 = getContainers() + c.Assert(containers2, checker.HasLen, instances) + for i := range containers { + if i == toRemove { + c.Assert(containers2[i], checker.IsNil) + } else { + c.Assert(containers2[i], checker.NotNil) + } + } +} + +// Test plugins deployed via swarm services +func (s *DockerSwarmSuite) TestAPISwarmServicesPlugin(c *check.C) { + testRequires(c, DaemonIsLinux, IsAmd64) + reg := setupRegistry(c, false, "", "") + defer reg.Close() + + repo := path.Join(privateRegistryURL, "swarm", "test:v1") + repo2 := path.Join(privateRegistryURL, "swarm", "test:v2") + name := "test" + + err := plugin.CreateInRegistry(context.Background(), repo, nil) + c.Assert(err, checker.IsNil, check.Commentf("failed to create plugin")) + err = plugin.CreateInRegistry(context.Background(), repo2, nil) + c.Assert(err, checker.IsNil, check.Commentf("failed to create plugin")) + + d1 := s.AddDaemon(c, true, true) + d2 := s.AddDaemon(c, true, true) + d3 := s.AddDaemon(c, true, false) + + makePlugin := func(repo, name string, constraints []string) func(*swarm.Service) { + return func(s *swarm.Service) { + s.Spec.TaskTemplate.Runtime = "plugin" + s.Spec.TaskTemplate.PluginSpec = &runtime.PluginSpec{ + Name: name, + Remote: repo, + } + if constraints != nil { + s.Spec.TaskTemplate.Placement = &swarm.Placement{ + Constraints: constraints, + } + } + } + } + + id := d1.CreateService(c, makePlugin(repo, name, nil)) + waitAndAssert(c, defaultReconciliationTimeout, d1.CheckPluginRunning(name), checker.True) + waitAndAssert(c, defaultReconciliationTimeout, d2.CheckPluginRunning(name), checker.True) + waitAndAssert(c, defaultReconciliationTimeout, d3.CheckPluginRunning(name), checker.True) + + service := d1.GetService(c, id) + d1.UpdateService(c, service, makePlugin(repo2, name, nil)) + waitAndAssert(c, defaultReconciliationTimeout, d1.CheckPluginImage(name), checker.Equals, repo2) + waitAndAssert(c, defaultReconciliationTimeout, d2.CheckPluginImage(name), checker.Equals, repo2) + waitAndAssert(c, defaultReconciliationTimeout, d3.CheckPluginImage(name), checker.Equals, repo2) + waitAndAssert(c, defaultReconciliationTimeout, d1.CheckPluginRunning(name), checker.True) + waitAndAssert(c, defaultReconciliationTimeout, d2.CheckPluginRunning(name), checker.True) + waitAndAssert(c, defaultReconciliationTimeout, d3.CheckPluginRunning(name), checker.True) + + d1.RemoveService(c, id) + waitAndAssert(c, defaultReconciliationTimeout, d1.CheckPluginRunning(name), checker.False) + waitAndAssert(c, defaultReconciliationTimeout, d2.CheckPluginRunning(name), checker.False) + waitAndAssert(c, defaultReconciliationTimeout, d3.CheckPluginRunning(name), checker.False) + + // constrain to managers only + id = d1.CreateService(c, makePlugin(repo, name, []string{"node.role==manager"})) + waitAndAssert(c, defaultReconciliationTimeout, d1.CheckPluginRunning(name), checker.True) + waitAndAssert(c, defaultReconciliationTimeout, d2.CheckPluginRunning(name), checker.True) + waitAndAssert(c, defaultReconciliationTimeout, d3.CheckPluginRunning(name), checker.False) // Not a manager, not running it + d1.RemoveService(c, id) + waitAndAssert(c, defaultReconciliationTimeout, d1.CheckPluginRunning(name), checker.False) + waitAndAssert(c, defaultReconciliationTimeout, d2.CheckPluginRunning(name), checker.False) + waitAndAssert(c, defaultReconciliationTimeout, d3.CheckPluginRunning(name), checker.False) + + // with no name + id = d1.CreateService(c, makePlugin(repo, "", nil)) + waitAndAssert(c, defaultReconciliationTimeout, d1.CheckPluginRunning(repo), checker.True) + waitAndAssert(c, defaultReconciliationTimeout, d2.CheckPluginRunning(repo), checker.True) + waitAndAssert(c, defaultReconciliationTimeout, d3.CheckPluginRunning(repo), checker.True) + d1.RemoveService(c, id) + waitAndAssert(c, defaultReconciliationTimeout, d1.CheckPluginRunning(repo), checker.False) + waitAndAssert(c, defaultReconciliationTimeout, d2.CheckPluginRunning(repo), checker.False) + waitAndAssert(c, defaultReconciliationTimeout, d3.CheckPluginRunning(repo), checker.False) +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_api_swarm_test.go b/vendor/github.com/moby/moby/integration-cli/docker_api_swarm_test.go new file mode 100644 index 000000000..9d24757b4 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_api_swarm_test.go @@ -0,0 +1,1045 @@ +// +build !windows + +package main + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net" + "net/http" + "net/url" + "os" + "path/filepath" + "strings" + "sync" + "time" + + "github.com/cloudflare/cfssl/csr" + "github.com/cloudflare/cfssl/helpers" + "github.com/cloudflare/cfssl/initca" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/daemon" + "github.com/docker/swarmkit/ca" + "github.com/go-check/check" +) + +var defaultReconciliationTimeout = 30 * time.Second + +func (s *DockerSwarmSuite) TestAPISwarmInit(c *check.C) { + // todo: should find a better way to verify that components are running than /info + d1 := s.AddDaemon(c, true, true) + info, err := d1.SwarmInfo() + c.Assert(err, checker.IsNil) + c.Assert(info.ControlAvailable, checker.True) + c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) + c.Assert(info.Cluster.RootRotationInProgress, checker.False) + + d2 := s.AddDaemon(c, true, false) + info, err = d2.SwarmInfo() + c.Assert(err, checker.IsNil) + c.Assert(info.ControlAvailable, checker.False) + c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) + + // Leaving cluster + c.Assert(d2.Leave(false), checker.IsNil) + + info, err = d2.SwarmInfo() + c.Assert(err, checker.IsNil) + c.Assert(info.ControlAvailable, checker.False) + c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive) + + c.Assert(d2.Join(swarm.JoinRequest{JoinToken: d1.JoinTokens(c).Worker, RemoteAddrs: []string{d1.ListenAddr}}), checker.IsNil) + + info, err = d2.SwarmInfo() + c.Assert(err, checker.IsNil) + c.Assert(info.ControlAvailable, checker.False) + c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) + + // Current state restoring after restarts + d1.Stop(c) + d2.Stop(c) + + d1.Start(c) + d2.Start(c) + + info, err = d1.SwarmInfo() + c.Assert(err, checker.IsNil) + c.Assert(info.ControlAvailable, checker.True) + c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) + + info, err = d2.SwarmInfo() + c.Assert(err, checker.IsNil) + c.Assert(info.ControlAvailable, checker.False) + c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) +} + +func (s *DockerSwarmSuite) TestAPISwarmJoinToken(c *check.C) { + d1 := s.AddDaemon(c, false, false) + c.Assert(d1.Init(swarm.InitRequest{}), checker.IsNil) + + // todo: error message differs depending if some components of token are valid + + d2 := s.AddDaemon(c, false, false) + err := d2.Join(swarm.JoinRequest{RemoteAddrs: []string{d1.ListenAddr}}) + c.Assert(err, checker.NotNil) + c.Assert(err.Error(), checker.Contains, "join token is necessary") + info, err := d2.SwarmInfo() + c.Assert(err, checker.IsNil) + c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive) + + err = d2.Join(swarm.JoinRequest{JoinToken: "foobaz", RemoteAddrs: []string{d1.ListenAddr}}) + c.Assert(err, checker.NotNil) + c.Assert(err.Error(), checker.Contains, "invalid join token") + info, err = d2.SwarmInfo() + c.Assert(err, checker.IsNil) + c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive) + + workerToken := d1.JoinTokens(c).Worker + + c.Assert(d2.Join(swarm.JoinRequest{JoinToken: workerToken, RemoteAddrs: []string{d1.ListenAddr}}), checker.IsNil) + info, err = d2.SwarmInfo() + c.Assert(err, checker.IsNil) + c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) + c.Assert(d2.Leave(false), checker.IsNil) + info, err = d2.SwarmInfo() + c.Assert(err, checker.IsNil) + c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive) + + // change tokens + d1.RotateTokens(c) + + err = d2.Join(swarm.JoinRequest{JoinToken: workerToken, RemoteAddrs: []string{d1.ListenAddr}}) + c.Assert(err, checker.NotNil) + c.Assert(err.Error(), checker.Contains, "join token is necessary") + info, err = d2.SwarmInfo() + c.Assert(err, checker.IsNil) + c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive) + + workerToken = d1.JoinTokens(c).Worker + + c.Assert(d2.Join(swarm.JoinRequest{JoinToken: workerToken, RemoteAddrs: []string{d1.ListenAddr}}), checker.IsNil) + info, err = d2.SwarmInfo() + c.Assert(err, checker.IsNil) + c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) + c.Assert(d2.Leave(false), checker.IsNil) + info, err = d2.SwarmInfo() + c.Assert(err, checker.IsNil) + c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive) + + // change spec, don't change tokens + d1.UpdateSwarm(c, func(s *swarm.Spec) {}) + + err = d2.Join(swarm.JoinRequest{RemoteAddrs: []string{d1.ListenAddr}}) + c.Assert(err, checker.NotNil) + c.Assert(err.Error(), checker.Contains, "join token is necessary") + info, err = d2.SwarmInfo() + c.Assert(err, checker.IsNil) + c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive) + + c.Assert(d2.Join(swarm.JoinRequest{JoinToken: workerToken, RemoteAddrs: []string{d1.ListenAddr}}), checker.IsNil) + info, err = d2.SwarmInfo() + c.Assert(err, checker.IsNil) + c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) + c.Assert(d2.Leave(false), checker.IsNil) + info, err = d2.SwarmInfo() + c.Assert(err, checker.IsNil) + c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive) +} + +func (s *DockerSwarmSuite) TestUpdateSwarmAddExternalCA(c *check.C) { + d1 := s.AddDaemon(c, false, false) + c.Assert(d1.Init(swarm.InitRequest{}), checker.IsNil) + d1.UpdateSwarm(c, func(s *swarm.Spec) { + s.CAConfig.ExternalCAs = []*swarm.ExternalCA{ + { + Protocol: swarm.ExternalCAProtocolCFSSL, + URL: "https://thishasnoca.org", + }, + { + Protocol: swarm.ExternalCAProtocolCFSSL, + URL: "https://thishasacacert.org", + CACert: "cacert", + }, + } + }) + info, err := d1.SwarmInfo() + c.Assert(err, checker.IsNil) + c.Assert(info.Cluster.Spec.CAConfig.ExternalCAs, checker.HasLen, 2) + c.Assert(info.Cluster.Spec.CAConfig.ExternalCAs[0].CACert, checker.Equals, "") + c.Assert(info.Cluster.Spec.CAConfig.ExternalCAs[1].CACert, checker.Equals, "cacert") +} + +func (s *DockerSwarmSuite) TestAPISwarmCAHash(c *check.C) { + d1 := s.AddDaemon(c, true, true) + d2 := s.AddDaemon(c, false, false) + splitToken := strings.Split(d1.JoinTokens(c).Worker, "-") + splitToken[2] = "1kxftv4ofnc6mt30lmgipg6ngf9luhwqopfk1tz6bdmnkubg0e" + replacementToken := strings.Join(splitToken, "-") + err := d2.Join(swarm.JoinRequest{JoinToken: replacementToken, RemoteAddrs: []string{d1.ListenAddr}}) + c.Assert(err, checker.NotNil) + c.Assert(err.Error(), checker.Contains, "remote CA does not match fingerprint") +} + +func (s *DockerSwarmSuite) TestAPISwarmPromoteDemote(c *check.C) { + d1 := s.AddDaemon(c, false, false) + c.Assert(d1.Init(swarm.InitRequest{}), checker.IsNil) + d2 := s.AddDaemon(c, true, false) + + info, err := d2.SwarmInfo() + c.Assert(err, checker.IsNil) + c.Assert(info.ControlAvailable, checker.False) + c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) + + d1.UpdateNode(c, d2.NodeID, func(n *swarm.Node) { + n.Spec.Role = swarm.NodeRoleManager + }) + + waitAndAssert(c, defaultReconciliationTimeout, d2.CheckControlAvailable, checker.True) + + d1.UpdateNode(c, d2.NodeID, func(n *swarm.Node) { + n.Spec.Role = swarm.NodeRoleWorker + }) + + waitAndAssert(c, defaultReconciliationTimeout, d2.CheckControlAvailable, checker.False) + + // Wait for the role to change to worker in the cert. This is partially + // done because it's something worth testing in its own right, and + // partially because changing the role from manager to worker and then + // back to manager quickly might cause the node to pause for awhile + // while waiting for the role to change to worker, and the test can + // time out during this interval. + waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { + certBytes, err := ioutil.ReadFile(filepath.Join(d2.Folder, "root", "swarm", "certificates", "swarm-node.crt")) + if err != nil { + return "", check.Commentf("error: %v", err) + } + certs, err := helpers.ParseCertificatesPEM(certBytes) + if err == nil && len(certs) > 0 && len(certs[0].Subject.OrganizationalUnit) > 0 { + return certs[0].Subject.OrganizationalUnit[0], nil + } + return "", check.Commentf("could not get organizational unit from certificate") + }, checker.Equals, "swarm-worker") + + // Demoting last node should fail + node := d1.GetNode(c, d1.NodeID) + node.Spec.Role = swarm.NodeRoleWorker + url := fmt.Sprintf("/nodes/%s/update?version=%d", node.ID, node.Version.Index) + status, out, err := d1.SockRequest("POST", url, node.Spec) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusBadRequest, check.Commentf("output: %q", string(out))) + // The warning specific to demoting the last manager is best-effort and + // won't appear until the Role field of the demoted manager has been + // updated. + // Yes, I know this looks silly, but checker.Matches is broken, since + // it anchors the regexp contrary to the documentation, and this makes + // it impossible to match something that includes a line break. + if !strings.Contains(string(out), "last manager of the swarm") { + c.Assert(string(out), checker.Contains, "this would result in a loss of quorum") + } + info, err = d1.SwarmInfo() + c.Assert(err, checker.IsNil) + c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) + c.Assert(info.ControlAvailable, checker.True) + + // Promote already demoted node + d1.UpdateNode(c, d2.NodeID, func(n *swarm.Node) { + n.Spec.Role = swarm.NodeRoleManager + }) + + waitAndAssert(c, defaultReconciliationTimeout, d2.CheckControlAvailable, checker.True) +} + +func (s *DockerSwarmSuite) TestAPISwarmLeaderProxy(c *check.C) { + // add three managers, one of these is leader + d1 := s.AddDaemon(c, true, true) + d2 := s.AddDaemon(c, true, true) + d3 := s.AddDaemon(c, true, true) + + // start a service by hitting each of the 3 managers + d1.CreateService(c, simpleTestService, func(s *swarm.Service) { + s.Spec.Name = "test1" + }) + d2.CreateService(c, simpleTestService, func(s *swarm.Service) { + s.Spec.Name = "test2" + }) + d3.CreateService(c, simpleTestService, func(s *swarm.Service) { + s.Spec.Name = "test3" + }) + + // 3 services should be started now, because the requests were proxied to leader + // query each node and make sure it returns 3 services + for _, d := range []*daemon.Swarm{d1, d2, d3} { + services := d.ListServices(c) + c.Assert(services, checker.HasLen, 3) + } +} + +func (s *DockerSwarmSuite) TestAPISwarmLeaderElection(c *check.C) { + // Create 3 nodes + d1 := s.AddDaemon(c, true, true) + d2 := s.AddDaemon(c, true, true) + d3 := s.AddDaemon(c, true, true) + + // assert that the first node we made is the leader, and the other two are followers + c.Assert(d1.GetNode(c, d1.NodeID).ManagerStatus.Leader, checker.True) + c.Assert(d1.GetNode(c, d2.NodeID).ManagerStatus.Leader, checker.False) + c.Assert(d1.GetNode(c, d3.NodeID).ManagerStatus.Leader, checker.False) + + d1.Stop(c) + + var ( + leader *daemon.Swarm // keep track of leader + followers []*daemon.Swarm // keep track of followers + ) + checkLeader := func(nodes ...*daemon.Swarm) checkF { + return func(c *check.C) (interface{}, check.CommentInterface) { + // clear these out before each run + leader = nil + followers = nil + for _, d := range nodes { + if d.GetNode(c, d.NodeID).ManagerStatus.Leader { + leader = d + } else { + followers = append(followers, d) + } + } + + if leader == nil { + return false, check.Commentf("no leader elected") + } + + return true, check.Commentf("elected %v", leader.ID()) + } + } + + // wait for an election to occur + waitAndAssert(c, defaultReconciliationTimeout, checkLeader(d2, d3), checker.True) + + // assert that we have a new leader + c.Assert(leader, checker.NotNil) + + // Keep track of the current leader, since we want that to be chosen. + stableleader := leader + + // add the d1, the initial leader, back + d1.Start(c) + + // TODO(stevvooe): may need to wait for rejoin here + + // wait for possible election + waitAndAssert(c, defaultReconciliationTimeout, checkLeader(d1, d2, d3), checker.True) + // pick out the leader and the followers again + + // verify that we still only have 1 leader and 2 followers + c.Assert(leader, checker.NotNil) + c.Assert(followers, checker.HasLen, 2) + // and that after we added d1 back, the leader hasn't changed + c.Assert(leader.NodeID, checker.Equals, stableleader.NodeID) +} + +func (s *DockerSwarmSuite) TestAPISwarmRaftQuorum(c *check.C) { + d1 := s.AddDaemon(c, true, true) + d2 := s.AddDaemon(c, true, true) + d3 := s.AddDaemon(c, true, true) + + d1.CreateService(c, simpleTestService) + + d2.Stop(c) + + // make sure there is a leader + waitAndAssert(c, defaultReconciliationTimeout, d1.CheckLeader, checker.IsNil) + + d1.CreateService(c, simpleTestService, func(s *swarm.Service) { + s.Spec.Name = "top1" + }) + + d3.Stop(c) + + var service swarm.Service + simpleTestService(&service) + service.Spec.Name = "top2" + status, out, err := d1.SockRequest("POST", "/services/create", service.Spec) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusInternalServerError, check.Commentf("deadline exceeded", string(out))) + + d2.Start(c) + + // make sure there is a leader + waitAndAssert(c, defaultReconciliationTimeout, d1.CheckLeader, checker.IsNil) + + d1.CreateService(c, simpleTestService, func(s *swarm.Service) { + s.Spec.Name = "top3" + }) +} + +func (s *DockerSwarmSuite) TestAPISwarmLeaveRemovesContainer(c *check.C) { + d := s.AddDaemon(c, true, true) + + instances := 2 + d.CreateService(c, simpleTestService, setInstances(instances)) + + id, err := d.Cmd("run", "-d", "busybox", "top") + c.Assert(err, checker.IsNil) + id = strings.TrimSpace(id) + + waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, instances+1) + + c.Assert(d.Leave(false), checker.NotNil) + c.Assert(d.Leave(true), checker.IsNil) + + waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, 1) + + id2, err := d.Cmd("ps", "-q") + c.Assert(err, checker.IsNil) + c.Assert(id, checker.HasPrefix, strings.TrimSpace(id2)) +} + +// #23629 +func (s *DockerSwarmSuite) TestAPISwarmLeaveOnPendingJoin(c *check.C) { + testRequires(c, Network) + s.AddDaemon(c, true, true) + d2 := s.AddDaemon(c, false, false) + + id, err := d2.Cmd("run", "-d", "busybox", "top") + c.Assert(err, checker.IsNil) + id = strings.TrimSpace(id) + + err = d2.Join(swarm.JoinRequest{ + RemoteAddrs: []string{"123.123.123.123:1234"}, + }) + c.Assert(err, check.NotNil) + c.Assert(err.Error(), checker.Contains, "Timeout was reached") + + info, err := d2.SwarmInfo() + c.Assert(err, checker.IsNil) + c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStatePending) + + c.Assert(d2.Leave(true), checker.IsNil) + + waitAndAssert(c, defaultReconciliationTimeout, d2.CheckActiveContainerCount, checker.Equals, 1) + + id2, err := d2.Cmd("ps", "-q") + c.Assert(err, checker.IsNil) + c.Assert(id, checker.HasPrefix, strings.TrimSpace(id2)) +} + +// #23705 +func (s *DockerSwarmSuite) TestAPISwarmRestoreOnPendingJoin(c *check.C) { + testRequires(c, Network) + d := s.AddDaemon(c, false, false) + err := d.Join(swarm.JoinRequest{ + RemoteAddrs: []string{"123.123.123.123:1234"}, + }) + c.Assert(err, check.NotNil) + c.Assert(err.Error(), checker.Contains, "Timeout was reached") + + waitAndAssert(c, defaultReconciliationTimeout, d.CheckLocalNodeState, checker.Equals, swarm.LocalNodeStatePending) + + d.Stop(c) + d.Start(c) + + info, err := d.SwarmInfo() + c.Assert(err, checker.IsNil) + c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive) +} + +func (s *DockerSwarmSuite) TestAPISwarmManagerRestore(c *check.C) { + d1 := s.AddDaemon(c, true, true) + + instances := 2 + id := d1.CreateService(c, simpleTestService, setInstances(instances)) + + d1.GetService(c, id) + d1.Stop(c) + d1.Start(c) + d1.GetService(c, id) + + d2 := s.AddDaemon(c, true, true) + d2.GetService(c, id) + d2.Stop(c) + d2.Start(c) + d2.GetService(c, id) + + d3 := s.AddDaemon(c, true, true) + d3.GetService(c, id) + d3.Stop(c) + d3.Start(c) + d3.GetService(c, id) + + d3.Kill() + time.Sleep(1 * time.Second) // time to handle signal + d3.Start(c) + d3.GetService(c, id) +} + +func (s *DockerSwarmSuite) TestAPISwarmScaleNoRollingUpdate(c *check.C) { + d := s.AddDaemon(c, true, true) + + instances := 2 + id := d.CreateService(c, simpleTestService, setInstances(instances)) + + waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, instances) + containers := d.ActiveContainers() + instances = 4 + d.UpdateService(c, d.GetService(c, id), setInstances(instances)) + waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, instances) + containers2 := d.ActiveContainers() + +loop0: + for _, c1 := range containers { + for _, c2 := range containers2 { + if c1 == c2 { + continue loop0 + } + } + c.Errorf("container %v not found in new set %#v", c1, containers2) + } +} + +func (s *DockerSwarmSuite) TestAPISwarmInvalidAddress(c *check.C) { + d := s.AddDaemon(c, false, false) + req := swarm.InitRequest{ + ListenAddr: "", + } + status, _, err := d.SockRequest("POST", "/swarm/init", req) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusBadRequest) + + req2 := swarm.JoinRequest{ + ListenAddr: "0.0.0.0:2377", + RemoteAddrs: []string{""}, + } + status, _, err = d.SockRequest("POST", "/swarm/join", req2) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusBadRequest) +} + +func (s *DockerSwarmSuite) TestAPISwarmForceNewCluster(c *check.C) { + d1 := s.AddDaemon(c, true, true) + d2 := s.AddDaemon(c, true, true) + + instances := 2 + id := d1.CreateService(c, simpleTestService, setInstances(instances)) + waitAndAssert(c, defaultReconciliationTimeout, reducedCheck(sumAsIntegers, d1.CheckActiveContainerCount, d2.CheckActiveContainerCount), checker.Equals, instances) + + // drain d2, all containers should move to d1 + d1.UpdateNode(c, d2.NodeID, func(n *swarm.Node) { + n.Spec.Availability = swarm.NodeAvailabilityDrain + }) + waitAndAssert(c, defaultReconciliationTimeout, d1.CheckActiveContainerCount, checker.Equals, instances) + waitAndAssert(c, defaultReconciliationTimeout, d2.CheckActiveContainerCount, checker.Equals, 0) + + d2.Stop(c) + + c.Assert(d1.Init(swarm.InitRequest{ + ForceNewCluster: true, + Spec: swarm.Spec{}, + }), checker.IsNil) + + waitAndAssert(c, defaultReconciliationTimeout, d1.CheckActiveContainerCount, checker.Equals, instances) + + d3 := s.AddDaemon(c, true, true) + info, err := d3.SwarmInfo() + c.Assert(err, checker.IsNil) + c.Assert(info.ControlAvailable, checker.True) + c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) + + instances = 4 + d3.UpdateService(c, d3.GetService(c, id), setInstances(instances)) + + waitAndAssert(c, defaultReconciliationTimeout, reducedCheck(sumAsIntegers, d1.CheckActiveContainerCount, d3.CheckActiveContainerCount), checker.Equals, instances) +} + +func simpleTestService(s *swarm.Service) { + ureplicas := uint64(1) + restartDelay := time.Duration(100 * time.Millisecond) + + s.Spec = swarm.ServiceSpec{ + TaskTemplate: swarm.TaskSpec{ + ContainerSpec: &swarm.ContainerSpec{ + Image: "busybox:latest", + Command: []string{"/bin/top"}, + }, + RestartPolicy: &swarm.RestartPolicy{ + Delay: &restartDelay, + }, + }, + Mode: swarm.ServiceMode{ + Replicated: &swarm.ReplicatedService{ + Replicas: &ureplicas, + }, + }, + } + s.Spec.Name = "top" +} + +func serviceForUpdate(s *swarm.Service) { + ureplicas := uint64(1) + restartDelay := time.Duration(100 * time.Millisecond) + + s.Spec = swarm.ServiceSpec{ + TaskTemplate: swarm.TaskSpec{ + ContainerSpec: &swarm.ContainerSpec{ + Image: "busybox:latest", + Command: []string{"/bin/top"}, + }, + RestartPolicy: &swarm.RestartPolicy{ + Delay: &restartDelay, + }, + }, + Mode: swarm.ServiceMode{ + Replicated: &swarm.ReplicatedService{ + Replicas: &ureplicas, + }, + }, + UpdateConfig: &swarm.UpdateConfig{ + Parallelism: 2, + Delay: 4 * time.Second, + FailureAction: swarm.UpdateFailureActionContinue, + }, + RollbackConfig: &swarm.UpdateConfig{ + Parallelism: 3, + Delay: 4 * time.Second, + FailureAction: swarm.UpdateFailureActionContinue, + }, + } + s.Spec.Name = "updatetest" +} + +func setInstances(replicas int) daemon.ServiceConstructor { + ureplicas := uint64(replicas) + return func(s *swarm.Service) { + s.Spec.Mode = swarm.ServiceMode{ + Replicated: &swarm.ReplicatedService{ + Replicas: &ureplicas, + }, + } + } +} + +func setUpdateOrder(order string) daemon.ServiceConstructor { + return func(s *swarm.Service) { + if s.Spec.UpdateConfig == nil { + s.Spec.UpdateConfig = &swarm.UpdateConfig{} + } + s.Spec.UpdateConfig.Order = order + } +} + +func setRollbackOrder(order string) daemon.ServiceConstructor { + return func(s *swarm.Service) { + if s.Spec.RollbackConfig == nil { + s.Spec.RollbackConfig = &swarm.UpdateConfig{} + } + s.Spec.RollbackConfig.Order = order + } +} + +func setImage(image string) daemon.ServiceConstructor { + return func(s *swarm.Service) { + if s.Spec.TaskTemplate.ContainerSpec == nil { + s.Spec.TaskTemplate.ContainerSpec = &swarm.ContainerSpec{} + } + s.Spec.TaskTemplate.ContainerSpec.Image = image + } +} + +func setFailureAction(failureAction string) daemon.ServiceConstructor { + return func(s *swarm.Service) { + s.Spec.UpdateConfig.FailureAction = failureAction + } +} + +func setMaxFailureRatio(maxFailureRatio float32) daemon.ServiceConstructor { + return func(s *swarm.Service) { + s.Spec.UpdateConfig.MaxFailureRatio = maxFailureRatio + } +} + +func setParallelism(parallelism uint64) daemon.ServiceConstructor { + return func(s *swarm.Service) { + s.Spec.UpdateConfig.Parallelism = parallelism + } +} + +func setConstraints(constraints []string) daemon.ServiceConstructor { + return func(s *swarm.Service) { + if s.Spec.TaskTemplate.Placement == nil { + s.Spec.TaskTemplate.Placement = &swarm.Placement{} + } + s.Spec.TaskTemplate.Placement.Constraints = constraints + } +} + +func setPlacementPrefs(prefs []swarm.PlacementPreference) daemon.ServiceConstructor { + return func(s *swarm.Service) { + if s.Spec.TaskTemplate.Placement == nil { + s.Spec.TaskTemplate.Placement = &swarm.Placement{} + } + s.Spec.TaskTemplate.Placement.Preferences = prefs + } +} + +func setGlobalMode(s *swarm.Service) { + s.Spec.Mode = swarm.ServiceMode{ + Global: &swarm.GlobalService{}, + } +} + +func checkClusterHealth(c *check.C, cl []*daemon.Swarm, managerCount, workerCount int) { + var totalMCount, totalWCount int + + for _, d := range cl { + var ( + info swarm.Info + err error + ) + + // check info in a waitAndAssert, because if the cluster doesn't have a leader, `info` will return an error + checkInfo := func(c *check.C) (interface{}, check.CommentInterface) { + info, err = d.SwarmInfo() + return err, check.Commentf("cluster not ready in time") + } + waitAndAssert(c, defaultReconciliationTimeout, checkInfo, checker.IsNil) + if !info.ControlAvailable { + totalWCount++ + continue + } + + var leaderFound bool + totalMCount++ + var mCount, wCount int + + for _, n := range d.ListNodes(c) { + waitReady := func(c *check.C) (interface{}, check.CommentInterface) { + if n.Status.State == swarm.NodeStateReady { + return true, nil + } + nn := d.GetNode(c, n.ID) + n = *nn + return n.Status.State == swarm.NodeStateReady, check.Commentf("state of node %s, reported by %s", n.ID, d.Info.NodeID) + } + waitAndAssert(c, defaultReconciliationTimeout, waitReady, checker.True) + + waitActive := func(c *check.C) (interface{}, check.CommentInterface) { + if n.Spec.Availability == swarm.NodeAvailabilityActive { + return true, nil + } + nn := d.GetNode(c, n.ID) + n = *nn + return n.Spec.Availability == swarm.NodeAvailabilityActive, check.Commentf("availability of node %s, reported by %s", n.ID, d.Info.NodeID) + } + waitAndAssert(c, defaultReconciliationTimeout, waitActive, checker.True) + + if n.Spec.Role == swarm.NodeRoleManager { + c.Assert(n.ManagerStatus, checker.NotNil, check.Commentf("manager status of node %s (manager), reported by %s", n.ID, d.Info.NodeID)) + if n.ManagerStatus.Leader { + leaderFound = true + } + mCount++ + } else { + c.Assert(n.ManagerStatus, checker.IsNil, check.Commentf("manager status of node %s (worker), reported by %s", n.ID, d.Info.NodeID)) + wCount++ + } + } + c.Assert(leaderFound, checker.True, check.Commentf("lack of leader reported by node %s", info.NodeID)) + c.Assert(mCount, checker.Equals, managerCount, check.Commentf("managers count reported by node %s", info.NodeID)) + c.Assert(wCount, checker.Equals, workerCount, check.Commentf("workers count reported by node %s", info.NodeID)) + } + c.Assert(totalMCount, checker.Equals, managerCount) + c.Assert(totalWCount, checker.Equals, workerCount) +} + +func (s *DockerSwarmSuite) TestAPISwarmRestartCluster(c *check.C) { + mCount, wCount := 5, 1 + + var nodes []*daemon.Swarm + for i := 0; i < mCount; i++ { + manager := s.AddDaemon(c, true, true) + info, err := manager.SwarmInfo() + c.Assert(err, checker.IsNil) + c.Assert(info.ControlAvailable, checker.True) + c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) + nodes = append(nodes, manager) + } + + for i := 0; i < wCount; i++ { + worker := s.AddDaemon(c, true, false) + info, err := worker.SwarmInfo() + c.Assert(err, checker.IsNil) + c.Assert(info.ControlAvailable, checker.False) + c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) + nodes = append(nodes, worker) + } + + // stop whole cluster + { + var wg sync.WaitGroup + wg.Add(len(nodes)) + errs := make(chan error, len(nodes)) + + for _, d := range nodes { + go func(daemon *daemon.Swarm) { + defer wg.Done() + if err := daemon.StopWithError(); err != nil { + errs <- err + } + // FIXME(vdemeester) This is duplicated… + if root := os.Getenv("DOCKER_REMAP_ROOT"); root != "" { + daemon.Root = filepath.Dir(daemon.Root) + } + }(d) + } + wg.Wait() + close(errs) + for err := range errs { + c.Assert(err, check.IsNil) + } + } + + // start whole cluster + { + var wg sync.WaitGroup + wg.Add(len(nodes)) + errs := make(chan error, len(nodes)) + + for _, d := range nodes { + go func(daemon *daemon.Swarm) { + defer wg.Done() + if err := daemon.StartWithError("--iptables=false"); err != nil { + errs <- err + } + }(d) + } + wg.Wait() + close(errs) + for err := range errs { + c.Assert(err, check.IsNil) + } + } + + checkClusterHealth(c, nodes, mCount, wCount) +} + +func (s *DockerSwarmSuite) TestAPISwarmServicesUpdateWithName(c *check.C) { + d := s.AddDaemon(c, true, true) + + instances := 2 + id := d.CreateService(c, simpleTestService, setInstances(instances)) + waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, instances) + + service := d.GetService(c, id) + instances = 5 + + setInstances(instances)(service) + url := fmt.Sprintf("/services/%s/update?version=%d", service.Spec.Name, service.Version.Index) + status, out, err := d.SockRequest("POST", url, service.Spec) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out))) + waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, instances) +} + +// Unlocking an unlocked swarm results in an error +func (s *DockerSwarmSuite) TestAPISwarmUnlockNotLocked(c *check.C) { + d := s.AddDaemon(c, true, true) + err := d.Unlock(swarm.UnlockRequest{UnlockKey: "wrong-key"}) + c.Assert(err, checker.NotNil) + c.Assert(err.Error(), checker.Contains, "swarm is not locked") +} + +// #29885 +func (s *DockerSwarmSuite) TestAPISwarmErrorHandling(c *check.C) { + ln, err := net.Listen("tcp", fmt.Sprintf(":%d", defaultSwarmPort)) + c.Assert(err, checker.IsNil) + defer ln.Close() + d := s.AddDaemon(c, false, false) + err = d.Init(swarm.InitRequest{}) + c.Assert(err, checker.NotNil) + c.Assert(err.Error(), checker.Contains, "address already in use") +} + +// Test case for 30242, where duplicate networks, with different drivers `bridge` and `overlay`, +// caused both scopes to be `swarm` for `docker network inspect` and `docker network ls`. +// This test makes sure the fixes correctly output scopes instead. +func (s *DockerSwarmSuite) TestAPIDuplicateNetworks(c *check.C) { + d := s.AddDaemon(c, true, true) + + name := "foo" + networkCreateRequest := types.NetworkCreateRequest{ + Name: name, + NetworkCreate: types.NetworkCreate{ + CheckDuplicate: false, + }, + } + + var n1 types.NetworkCreateResponse + networkCreateRequest.NetworkCreate.Driver = "bridge" + + status, out, err := d.SockRequest("POST", "/networks/create", networkCreateRequest) + c.Assert(err, checker.IsNil, check.Commentf(string(out))) + c.Assert(status, checker.Equals, http.StatusCreated, check.Commentf(string(out))) + + c.Assert(json.Unmarshal(out, &n1), checker.IsNil) + + var n2 types.NetworkCreateResponse + networkCreateRequest.NetworkCreate.Driver = "overlay" + + status, out, err = d.SockRequest("POST", "/networks/create", networkCreateRequest) + c.Assert(err, checker.IsNil, check.Commentf(string(out))) + c.Assert(status, checker.Equals, http.StatusCreated, check.Commentf(string(out))) + + c.Assert(json.Unmarshal(out, &n2), checker.IsNil) + + var r1 types.NetworkResource + + status, out, err = d.SockRequest("GET", "/networks/"+n1.ID, nil) + c.Assert(err, checker.IsNil, check.Commentf(string(out))) + c.Assert(status, checker.Equals, http.StatusOK, check.Commentf(string(out))) + + c.Assert(json.Unmarshal(out, &r1), checker.IsNil) + + c.Assert(r1.Scope, checker.Equals, "local") + + var r2 types.NetworkResource + + status, out, err = d.SockRequest("GET", "/networks/"+n2.ID, nil) + c.Assert(err, checker.IsNil, check.Commentf(string(out))) + c.Assert(status, checker.Equals, http.StatusOK, check.Commentf(string(out))) + + c.Assert(json.Unmarshal(out, &r2), checker.IsNil) + + c.Assert(r2.Scope, checker.Equals, "swarm") +} + +// Test case for 30178 +func (s *DockerSwarmSuite) TestAPISwarmHealthcheckNone(c *check.C) { + d := s.AddDaemon(c, true, true) + + out, err := d.Cmd("network", "create", "-d", "overlay", "lb") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + instances := 1 + d.CreateService(c, simpleTestService, setInstances(instances), func(s *swarm.Service) { + if s.Spec.TaskTemplate.ContainerSpec == nil { + s.Spec.TaskTemplate.ContainerSpec = &swarm.ContainerSpec{} + } + s.Spec.TaskTemplate.ContainerSpec.Healthcheck = &container.HealthConfig{} + s.Spec.TaskTemplate.Networks = []swarm.NetworkAttachmentConfig{ + {Target: "lb"}, + } + }) + + waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, instances) + + containers := d.ActiveContainers() + + out, err = d.Cmd("exec", containers[0], "ping", "-c1", "-W3", "top") + c.Assert(err, checker.IsNil, check.Commentf(out)) +} + +func (s *DockerSwarmSuite) TestSwarmRepeatedRootRotation(c *check.C) { + m := s.AddDaemon(c, true, true) + w := s.AddDaemon(c, true, false) + + info, err := m.SwarmInfo() + c.Assert(err, checker.IsNil) + + currentTrustRoot := info.Cluster.TLSInfo.TrustRoot + + // rotate multiple times + for i := 0; i < 4; i++ { + var cert, key []byte + if i%2 != 0 { + cert, _, key, err = initca.New(&csr.CertificateRequest{ + CN: "newRoot", + KeyRequest: csr.NewBasicKeyRequest(), + CA: &csr.CAConfig{Expiry: ca.RootCAExpiration}, + }) + c.Assert(err, checker.IsNil) + } + expectedCert := string(cert) + m.UpdateSwarm(c, func(s *swarm.Spec) { + s.CAConfig.SigningCACert = expectedCert + s.CAConfig.SigningCAKey = string(key) + s.CAConfig.ForceRotate++ + }) + + // poll to make sure update succeeds + var clusterTLSInfo swarm.TLSInfo + for j := 0; j < 18; j++ { + info, err := m.SwarmInfo() + c.Assert(err, checker.IsNil) + + // the desired CA cert and key is always redacted + c.Assert(info.Cluster.Spec.CAConfig.SigningCAKey, checker.Equals, "") + c.Assert(info.Cluster.Spec.CAConfig.SigningCACert, checker.Equals, "") + + clusterTLSInfo = info.Cluster.TLSInfo + + // if root rotation is done and the trust root has changed, we don't have to poll anymore + if !info.Cluster.RootRotationInProgress && clusterTLSInfo.TrustRoot != currentTrustRoot { + break + } + + // root rotation not done + time.Sleep(250 * time.Millisecond) + } + if cert != nil { + c.Assert(clusterTLSInfo.TrustRoot, checker.Equals, expectedCert) + } + // could take another second or two for the nodes to trust the new roots after they've all gotten + // new TLS certificates + for j := 0; j < 18; j++ { + mInfo := m.GetNode(c, m.NodeID).Description.TLSInfo + wInfo := m.GetNode(c, w.NodeID).Description.TLSInfo + + if mInfo.TrustRoot == clusterTLSInfo.TrustRoot && wInfo.TrustRoot == clusterTLSInfo.TrustRoot { + break + } + + // nodes don't trust root certs yet + time.Sleep(250 * time.Millisecond) + } + + c.Assert(m.GetNode(c, m.NodeID).Description.TLSInfo, checker.DeepEquals, clusterTLSInfo) + c.Assert(m.GetNode(c, w.NodeID).Description.TLSInfo, checker.DeepEquals, clusterTLSInfo) + currentTrustRoot = clusterTLSInfo.TrustRoot + } +} + +func (s *DockerSwarmSuite) TestAPINetworkInspectWithScope(c *check.C) { + d := s.AddDaemon(c, true, true) + + name := "foo" + networkCreateRequest := types.NetworkCreateRequest{ + Name: name, + } + + var n types.NetworkCreateResponse + networkCreateRequest.NetworkCreate.Driver = "overlay" + + status, out, err := d.SockRequest("POST", "/networks/create", networkCreateRequest) + c.Assert(err, checker.IsNil, check.Commentf(string(out))) + c.Assert(status, checker.Equals, http.StatusCreated, check.Commentf(string(out))) + c.Assert(json.Unmarshal(out, &n), checker.IsNil) + + var r types.NetworkResource + + status, body, err := d.SockRequest("GET", "/networks/"+name, nil) + c.Assert(err, checker.IsNil, check.Commentf(string(out))) + c.Assert(status, checker.Equals, http.StatusOK, check.Commentf(string(out))) + c.Assert(json.Unmarshal(body, &r), checker.IsNil) + c.Assert(r.Scope, checker.Equals, "swarm") + c.Assert(r.ID, checker.Equals, n.ID) + + v := url.Values{} + v.Set("scope", "local") + + status, body, err = d.SockRequest("GET", "/networks/"+name+"?"+v.Encode(), nil) + c.Assert(err, checker.IsNil, check.Commentf(string(out))) + c.Assert(status, checker.Equals, http.StatusNotFound, check.Commentf(string(out))) +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_api_test.go b/vendor/github.com/moby/moby/integration-cli/docker_api_test.go new file mode 100644 index 000000000..1af77ea51 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_api_test.go @@ -0,0 +1,122 @@ +package main + +import ( + "fmt" + "io/ioutil" + "net/http" + "net/http/httptest" + "runtime" + "strconv" + "strings" + + "github.com/docker/docker/api" + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/cli" + "github.com/docker/docker/integration-cli/request" + "github.com/docker/docker/pkg/testutil" + icmd "github.com/docker/docker/pkg/testutil/cmd" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestAPIOptionsRoute(c *check.C) { + resp, _, err := request.Do("/", request.Method(http.MethodOptions)) + c.Assert(err, checker.IsNil) + c.Assert(resp.StatusCode, checker.Equals, http.StatusOK) +} + +func (s *DockerSuite) TestAPIGetEnabledCORS(c *check.C) { + res, body, err := request.Get("/version") + c.Assert(err, checker.IsNil) + c.Assert(res.StatusCode, checker.Equals, http.StatusOK) + body.Close() + // TODO: @runcom incomplete tests, why old integration tests had this headers + // and here none of the headers below are in the response? + //c.Log(res.Header) + //c.Assert(res.Header.Get("Access-Control-Allow-Origin"), check.Equals, "*") + //c.Assert(res.Header.Get("Access-Control-Allow-Headers"), check.Equals, "Origin, X-Requested-With, Content-Type, Accept, X-Registry-Auth") +} + +func (s *DockerSuite) TestAPIClientVersionOldNotSupported(c *check.C) { + if testEnv.DaemonPlatform() != runtime.GOOS { + c.Skip("Daemon platform doesn't match test platform") + } + if api.MinVersion == api.DefaultVersion { + c.Skip("API MinVersion==DefaultVersion") + } + v := strings.Split(api.MinVersion, ".") + vMinInt, err := strconv.Atoi(v[1]) + c.Assert(err, checker.IsNil) + vMinInt-- + v[1] = strconv.Itoa(vMinInt) + version := strings.Join(v, ".") + + resp, body, err := request.Get("/v" + version + "/version") + c.Assert(err, checker.IsNil) + defer body.Close() + c.Assert(resp.StatusCode, checker.Equals, http.StatusBadRequest) + expected := fmt.Sprintf("client version %s is too old. Minimum supported API version is %s, please upgrade your client to a newer version", version, api.MinVersion) + content, err := ioutil.ReadAll(body) + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(string(content)), checker.Contains, expected) +} + +func (s *DockerSuite) TestAPIDockerAPIVersion(c *check.C) { + var svrVersion string + + server := httptest.NewServer(http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("API-Version", api.DefaultVersion) + url := r.URL.Path + svrVersion = url + })) + defer server.Close() + + // Test using the env var first + result := cli.Docker(cli.Args("-H="+server.URL[7:], "version"), cli.WithEnvironmentVariables(appendBaseEnv(false, "DOCKER_API_VERSION=xxx")...)) + c.Assert(result, icmd.Matches, icmd.Expected{Out: "API version: xxx", ExitCode: 1}) + c.Assert(svrVersion, check.Equals, "/vxxx/version", check.Commentf("%s", result.Compare(icmd.Success))) +} + +func (s *DockerSuite) TestAPIErrorJSON(c *check.C) { + httpResp, body, err := request.Post("/containers/create", request.JSONBody(struct{}{})) + c.Assert(err, checker.IsNil) + c.Assert(httpResp.StatusCode, checker.Equals, http.StatusInternalServerError) + c.Assert(httpResp.Header.Get("Content-Type"), checker.Equals, "application/json") + b, err := testutil.ReadBody(body) + c.Assert(err, checker.IsNil) + c.Assert(getErrorMessage(c, b), checker.Equals, "Config cannot be empty in order to create a container") +} + +func (s *DockerSuite) TestAPIErrorPlainText(c *check.C) { + // Windows requires API 1.25 or later. This test is validating a behaviour which was present + // in v1.23, but changed in 1.24, hence not applicable on Windows. See apiVersionSupportsJSONErrors + testRequires(c, DaemonIsLinux) + httpResp, body, err := request.Post("/v1.23/containers/create", request.JSONBody(struct{}{})) + c.Assert(err, checker.IsNil) + c.Assert(httpResp.StatusCode, checker.Equals, http.StatusInternalServerError) + c.Assert(httpResp.Header.Get("Content-Type"), checker.Contains, "text/plain") + b, err := testutil.ReadBody(body) + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(string(b)), checker.Equals, "Config cannot be empty in order to create a container") +} + +func (s *DockerSuite) TestAPIErrorNotFoundJSON(c *check.C) { + // 404 is a different code path to normal errors, so test separately + httpResp, body, err := request.Get("/notfound", request.JSON) + c.Assert(err, checker.IsNil) + c.Assert(httpResp.StatusCode, checker.Equals, http.StatusNotFound) + c.Assert(httpResp.Header.Get("Content-Type"), checker.Equals, "application/json") + b, err := testutil.ReadBody(body) + c.Assert(err, checker.IsNil) + c.Assert(getErrorMessage(c, b), checker.Equals, "page not found") +} + +func (s *DockerSuite) TestAPIErrorNotFoundPlainText(c *check.C) { + httpResp, body, err := request.Get("/v1.23/notfound", request.JSON) + c.Assert(err, checker.IsNil) + c.Assert(httpResp.StatusCode, checker.Equals, http.StatusNotFound) + c.Assert(httpResp.Header.Get("Content-Type"), checker.Contains, "text/plain") + b, err := testutil.ReadBody(body) + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(string(b)), checker.Equals, "page not found") +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_api_update_unix_test.go b/vendor/github.com/moby/moby/integration-cli/docker_api_update_unix_test.go new file mode 100644 index 000000000..1af6ed1e7 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_api_update_unix_test.go @@ -0,0 +1,36 @@ +// +build !windows + +package main + +import ( + "strings" + + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/request" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestAPIUpdateContainer(c *check.C) { + testRequires(c, DaemonIsLinux) + testRequires(c, memoryLimitSupport) + testRequires(c, swapMemorySupport) + + name := "apiUpdateContainer" + hostConfig := map[string]interface{}{ + "Memory": 314572800, + "MemorySwap": 524288000, + } + dockerCmd(c, "run", "-d", "--name", name, "-m", "200M", "busybox", "top") + _, _, err := request.SockRequest("POST", "/containers/"+name+"/update", hostConfig, daemonHost()) + c.Assert(err, check.IsNil) + + c.Assert(inspectField(c, name, "HostConfig.Memory"), checker.Equals, "314572800") + file := "/sys/fs/cgroup/memory/memory.limit_in_bytes" + out, _ := dockerCmd(c, "exec", name, "cat", file) + c.Assert(strings.TrimSpace(out), checker.Equals, "314572800") + + c.Assert(inspectField(c, name, "HostConfig.MemorySwap"), checker.Equals, "524288000") + file = "/sys/fs/cgroup/memory/memory.memsw.limit_in_bytes" + out, _ = dockerCmd(c, "exec", name, "cat", file) + c.Assert(strings.TrimSpace(out), checker.Equals, "524288000") +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_api_version_test.go b/vendor/github.com/moby/moby/integration-cli/docker_api_version_test.go new file mode 100644 index 000000000..5f919deb7 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_api_version_test.go @@ -0,0 +1,24 @@ +package main + +import ( + "encoding/json" + "net/http" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/request" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestGetVersion(c *check.C) { + status, body, err := request.SockRequest("GET", "/version", nil, daemonHost()) + c.Assert(status, checker.Equals, http.StatusOK) + c.Assert(err, checker.IsNil) + + var v types.Version + + c.Assert(json.Unmarshal(body, &v), checker.IsNil) + + c.Assert(v.Version, checker.Equals, dockerversion.Version, check.Commentf("Version mismatch")) +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_api_volumes_test.go b/vendor/github.com/moby/moby/integration-cli/docker_api_volumes_test.go new file mode 100644 index 000000000..f354856d3 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_api_volumes_test.go @@ -0,0 +1,103 @@ +package main + +import ( + "encoding/json" + "fmt" + "net/http" + "path/filepath" + "strings" + "time" + + "github.com/docker/docker/api/types" + volumetypes "github.com/docker/docker/api/types/volume" + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/request" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestVolumesAPIList(c *check.C) { + prefix, _ := getPrefixAndSlashFromDaemonPlatform() + dockerCmd(c, "run", "-v", prefix+"/foo", "busybox") + + status, b, err := request.SockRequest("GET", "/volumes", nil, daemonHost()) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusOK) + + var volumes volumetypes.VolumesListOKBody + c.Assert(json.Unmarshal(b, &volumes), checker.IsNil) + + c.Assert(len(volumes.Volumes), checker.Equals, 1, check.Commentf("\n%v", volumes.Volumes)) +} + +func (s *DockerSuite) TestVolumesAPICreate(c *check.C) { + config := volumetypes.VolumesCreateBody{ + Name: "test", + } + status, b, err := request.SockRequest("POST", "/volumes/create", config, daemonHost()) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusCreated, check.Commentf(string(b))) + + var vol types.Volume + err = json.Unmarshal(b, &vol) + c.Assert(err, checker.IsNil) + + c.Assert(filepath.Base(filepath.Dir(vol.Mountpoint)), checker.Equals, config.Name) +} + +func (s *DockerSuite) TestVolumesAPIRemove(c *check.C) { + prefix, _ := getPrefixAndSlashFromDaemonPlatform() + dockerCmd(c, "run", "-v", prefix+"/foo", "--name=test", "busybox") + + status, b, err := request.SockRequest("GET", "/volumes", nil, daemonHost()) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusOK) + + var volumes volumetypes.VolumesListOKBody + c.Assert(json.Unmarshal(b, &volumes), checker.IsNil) + c.Assert(len(volumes.Volumes), checker.Equals, 1, check.Commentf("\n%v", volumes.Volumes)) + + v := volumes.Volumes[0] + status, _, err = request.SockRequest("DELETE", "/volumes/"+v.Name, nil, daemonHost()) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusConflict, check.Commentf("Should not be able to remove a volume that is in use")) + + dockerCmd(c, "rm", "-f", "test") + status, data, err := request.SockRequest("DELETE", "/volumes/"+v.Name, nil, daemonHost()) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusNoContent, check.Commentf(string(data))) + +} + +func (s *DockerSuite) TestVolumesAPIInspect(c *check.C) { + config := volumetypes.VolumesCreateBody{ + Name: "test", + } + // sampling current time minus a minute so to now have false positive in case of delays + now := time.Now().Truncate(time.Minute) + status, b, err := request.SockRequest("POST", "/volumes/create", config, daemonHost()) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusCreated, check.Commentf(string(b))) + + status, b, err = request.SockRequest("GET", "/volumes", nil, daemonHost()) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusOK, check.Commentf(string(b))) + + var volumes volumetypes.VolumesListOKBody + c.Assert(json.Unmarshal(b, &volumes), checker.IsNil) + c.Assert(len(volumes.Volumes), checker.Equals, 1, check.Commentf("\n%v", volumes.Volumes)) + + var vol types.Volume + status, b, err = request.SockRequest("GET", "/volumes/"+config.Name, nil, daemonHost()) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusOK, check.Commentf(string(b))) + c.Assert(json.Unmarshal(b, &vol), checker.IsNil) + c.Assert(vol.Name, checker.Equals, config.Name) + + // comparing CreatedAt field time for the new volume to now. Removing a minute from both to avoid false positive + testCreatedAt, err := time.Parse(time.RFC3339, strings.TrimSpace(vol.CreatedAt)) + c.Assert(err, check.IsNil) + testCreatedAt = testCreatedAt.Truncate(time.Minute) + if !testCreatedAt.Equal(now) { + c.Assert(fmt.Errorf("Time Volume is CreatedAt not equal to current time"), check.NotNil) + } +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_attach_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_attach_test.go new file mode 100644 index 000000000..ff319c0d8 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_attach_test.go @@ -0,0 +1,176 @@ +package main + +import ( + "bufio" + "fmt" + "io" + "os/exec" + "runtime" + "strings" + "sync" + "time" + + "github.com/docker/docker/integration-cli/cli" + icmd "github.com/docker/docker/pkg/testutil/cmd" + "github.com/go-check/check" +) + +const attachWait = 5 * time.Second + +func (s *DockerSuite) TestAttachMultipleAndRestart(c *check.C) { + endGroup := &sync.WaitGroup{} + startGroup := &sync.WaitGroup{} + endGroup.Add(3) + startGroup.Add(3) + + cli.DockerCmd(c, "run", "--name", "attacher", "-d", "busybox", "/bin/sh", "-c", "while true; do sleep 1; echo hello; done") + cli.WaitRun(c, "attacher") + + startDone := make(chan struct{}) + endDone := make(chan struct{}) + + go func() { + endGroup.Wait() + close(endDone) + }() + + go func() { + startGroup.Wait() + close(startDone) + }() + + for i := 0; i < 3; i++ { + go func() { + cmd := exec.Command(dockerBinary, "attach", "attacher") + + defer func() { + cmd.Wait() + endGroup.Done() + }() + + out, err := cmd.StdoutPipe() + if err != nil { + c.Fatal(err) + } + defer out.Close() + + if err := cmd.Start(); err != nil { + c.Fatal(err) + } + + buf := make([]byte, 1024) + + if _, err := out.Read(buf); err != nil && err != io.EOF { + c.Fatal(err) + } + + startGroup.Done() + + if !strings.Contains(string(buf), "hello") { + c.Fatalf("unexpected output %s expected hello\n", string(buf)) + } + }() + } + + select { + case <-startDone: + case <-time.After(attachWait): + c.Fatalf("Attaches did not initialize properly") + } + + cli.DockerCmd(c, "kill", "attacher") + + select { + case <-endDone: + case <-time.After(attachWait): + c.Fatalf("Attaches did not finish properly") + } +} + +func (s *DockerSuite) TestAttachTTYWithoutStdin(c *check.C) { + // TODO @jhowardmsft. Figure out how to get this running again reliable on Windows. + // It works by accident at the moment. Sometimes. I've gone back to v1.13.0 and see the same. + // On Windows, docker run -d -ti busybox causes the container to exit immediately. + // Obviously a year back when I updated the test, that was not the case. However, + // with this, and the test racing with the tear-down which panic's, sometimes CI + // will just fail and `MISS` all the other tests. For now, disabling it. Will + // open an issue to track re-enabling this and root-causing the problem. + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "-d", "-ti", "busybox") + + id := strings.TrimSpace(out) + c.Assert(waitRun(id), check.IsNil) + + done := make(chan error) + go func() { + defer close(done) + + cmd := exec.Command(dockerBinary, "attach", id) + if _, err := cmd.StdinPipe(); err != nil { + done <- err + return + } + + expected := "the input device is not a TTY" + if runtime.GOOS == "windows" { + expected += ". If you are using mintty, try prefixing the command with 'winpty'" + } + if out, _, err := runCommandWithOutput(cmd); err == nil { + done <- fmt.Errorf("attach should have failed") + return + } else if !strings.Contains(out, expected) { + done <- fmt.Errorf("attach failed with error %q: expected %q", out, expected) + return + } + }() + + select { + case err := <-done: + c.Assert(err, check.IsNil) + case <-time.After(attachWait): + c.Fatal("attach is running but should have failed") + } +} + +func (s *DockerSuite) TestAttachDisconnect(c *check.C) { + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "-di", "busybox", "/bin/cat") + id := strings.TrimSpace(out) + + cmd := exec.Command(dockerBinary, "attach", id) + stdin, err := cmd.StdinPipe() + if err != nil { + c.Fatal(err) + } + defer stdin.Close() + stdout, err := cmd.StdoutPipe() + c.Assert(err, check.IsNil) + defer stdout.Close() + c.Assert(cmd.Start(), check.IsNil) + defer cmd.Process.Kill() + + _, err = stdin.Write([]byte("hello\n")) + c.Assert(err, check.IsNil) + out, err = bufio.NewReader(stdout).ReadString('\n') + c.Assert(err, check.IsNil) + c.Assert(strings.TrimSpace(out), check.Equals, "hello") + + c.Assert(stdin.Close(), check.IsNil) + + // Expect container to still be running after stdin is closed + running := inspectField(c, id, "State.Running") + c.Assert(running, check.Equals, "true") +} + +func (s *DockerSuite) TestAttachPausedContainer(c *check.C) { + testRequires(c, IsPausable) + runSleepingContainer(c, "-d", "--name=test") + dockerCmd(c, "pause", "test") + + result := dockerCmdWithResult("attach", "test") + c.Assert(result, icmd.Matches, icmd.Expected{ + Error: "exit status 1", + ExitCode: 1, + Err: "You cannot attach to a paused container, unpause it first", + }) +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_attach_unix_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_attach_unix_test.go new file mode 100644 index 000000000..78f55e043 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_attach_unix_test.go @@ -0,0 +1,237 @@ +// +build !windows + +package main + +import ( + "bufio" + "io/ioutil" + "os/exec" + "strings" + "time" + + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/pkg/stringid" + "github.com/go-check/check" + "github.com/kr/pty" +) + +// #9860 Make sure attach ends when container ends (with no errors) +func (s *DockerSuite) TestAttachClosedOnContainerStop(c *check.C) { + testRequires(c, SameHostDaemon) + + out, _ := dockerCmd(c, "run", "-dti", "busybox", "/bin/sh", "-c", `trap 'exit 0' SIGTERM; while true; do sleep 1; done`) + + id := strings.TrimSpace(out) + c.Assert(waitRun(id), check.IsNil) + + pty, tty, err := pty.Open() + c.Assert(err, check.IsNil) + + attachCmd := exec.Command(dockerBinary, "attach", id) + attachCmd.Stdin = tty + attachCmd.Stdout = tty + attachCmd.Stderr = tty + err = attachCmd.Start() + c.Assert(err, check.IsNil) + + errChan := make(chan error) + go func() { + time.Sleep(300 * time.Millisecond) + defer close(errChan) + // Container is waiting for us to signal it to stop + dockerCmd(c, "stop", id) + // And wait for the attach command to end + errChan <- attachCmd.Wait() + }() + + // Wait for the docker to end (should be done by the + // stop command in the go routine) + dockerCmd(c, "wait", id) + + select { + case err := <-errChan: + tty.Close() + out, _ := ioutil.ReadAll(pty) + c.Assert(err, check.IsNil, check.Commentf("out: %v", string(out))) + case <-time.After(attachWait): + c.Fatal("timed out without attach returning") + } + +} + +func (s *DockerSuite) TestAttachAfterDetach(c *check.C) { + name := "detachtest" + + cpty, tty, err := pty.Open() + c.Assert(err, checker.IsNil, check.Commentf("Could not open pty: %v", err)) + cmd := exec.Command(dockerBinary, "run", "-ti", "--name", name, "busybox") + cmd.Stdin = tty + cmd.Stdout = tty + cmd.Stderr = tty + + errChan := make(chan error) + go func() { + errChan <- cmd.Run() + close(errChan) + }() + + c.Assert(waitRun(name), check.IsNil) + + cpty.Write([]byte{16}) + time.Sleep(100 * time.Millisecond) + cpty.Write([]byte{17}) + + select { + case err := <-errChan: + if err != nil { + buff := make([]byte, 200) + tty.Read(buff) + c.Fatalf("%s: %s", err, buff) + } + case <-time.After(5 * time.Second): + c.Fatal("timeout while detaching") + } + + cpty, tty, err = pty.Open() + c.Assert(err, checker.IsNil, check.Commentf("Could not open pty: %v", err)) + + cmd = exec.Command(dockerBinary, "attach", name) + cmd.Stdin = tty + cmd.Stdout = tty + cmd.Stderr = tty + + err = cmd.Start() + c.Assert(err, checker.IsNil) + + bytes := make([]byte, 10) + var nBytes int + readErr := make(chan error, 1) + + go func() { + time.Sleep(500 * time.Millisecond) + cpty.Write([]byte("\n")) + time.Sleep(500 * time.Millisecond) + + nBytes, err = cpty.Read(bytes) + cpty.Close() + readErr <- err + }() + + select { + case err := <-readErr: + c.Assert(err, check.IsNil) + case <-time.After(2 * time.Second): + c.Fatal("timeout waiting for attach read") + } + + err = cmd.Wait() + c.Assert(err, checker.IsNil) + + c.Assert(string(bytes[:nBytes]), checker.Contains, "/ #") + +} + +// TestAttachDetach checks that attach in tty mode can be detached using the long container ID +func (s *DockerSuite) TestAttachDetach(c *check.C) { + out, _ := dockerCmd(c, "run", "-itd", "busybox", "cat") + id := strings.TrimSpace(out) + c.Assert(waitRun(id), check.IsNil) + + cpty, tty, err := pty.Open() + c.Assert(err, check.IsNil) + defer cpty.Close() + + cmd := exec.Command(dockerBinary, "attach", id) + cmd.Stdin = tty + stdout, err := cmd.StdoutPipe() + c.Assert(err, check.IsNil) + defer stdout.Close() + err = cmd.Start() + c.Assert(err, check.IsNil) + c.Assert(waitRun(id), check.IsNil) + + _, err = cpty.Write([]byte("hello\n")) + c.Assert(err, check.IsNil) + out, err = bufio.NewReader(stdout).ReadString('\n') + c.Assert(err, check.IsNil) + c.Assert(strings.TrimSpace(out), checker.Equals, "hello", check.Commentf("expected 'hello', got %q", out)) + + // escape sequence + _, err = cpty.Write([]byte{16}) + c.Assert(err, checker.IsNil) + time.Sleep(100 * time.Millisecond) + _, err = cpty.Write([]byte{17}) + c.Assert(err, checker.IsNil) + + ch := make(chan struct{}) + go func() { + cmd.Wait() + ch <- struct{}{} + }() + + running := inspectField(c, id, "State.Running") + c.Assert(running, checker.Equals, "true", check.Commentf("expected container to still be running")) + + go func() { + dockerCmd(c, "kill", id) + }() + + select { + case <-ch: + case <-time.After(10 * time.Millisecond): + c.Fatal("timed out waiting for container to exit") + } + +} + +// TestAttachDetachTruncatedID checks that attach in tty mode can be detached +func (s *DockerSuite) TestAttachDetachTruncatedID(c *check.C) { + out, _ := dockerCmd(c, "run", "-itd", "busybox", "cat") + id := stringid.TruncateID(strings.TrimSpace(out)) + c.Assert(waitRun(id), check.IsNil) + + cpty, tty, err := pty.Open() + c.Assert(err, checker.IsNil) + defer cpty.Close() + + cmd := exec.Command(dockerBinary, "attach", id) + cmd.Stdin = tty + stdout, err := cmd.StdoutPipe() + c.Assert(err, checker.IsNil) + defer stdout.Close() + err = cmd.Start() + c.Assert(err, checker.IsNil) + + _, err = cpty.Write([]byte("hello\n")) + c.Assert(err, checker.IsNil) + out, err = bufio.NewReader(stdout).ReadString('\n') + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Equals, "hello", check.Commentf("expected 'hello', got %q", out)) + + // escape sequence + _, err = cpty.Write([]byte{16}) + c.Assert(err, checker.IsNil) + time.Sleep(100 * time.Millisecond) + _, err = cpty.Write([]byte{17}) + c.Assert(err, checker.IsNil) + + ch := make(chan struct{}) + go func() { + cmd.Wait() + ch <- struct{}{} + }() + + running := inspectField(c, id, "State.Running") + c.Assert(running, checker.Equals, "true", check.Commentf("expected container to still be running")) + + go func() { + dockerCmd(c, "kill", id) + }() + + select { + case <-ch: + case <-time.After(10 * time.Millisecond): + c.Fatal("timed out waiting for container to exit") + } + +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_authz_plugin_v2_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_authz_plugin_v2_test.go new file mode 100644 index 000000000..0143f1c0f --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_authz_plugin_v2_test.go @@ -0,0 +1,168 @@ +// +build !windows + +package main + +import ( + "fmt" + "strings" + + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/daemon" + "github.com/go-check/check" +) + +var ( + authzPluginName = "riyaz/authz-no-volume-plugin" + authzPluginTag = "latest" + authzPluginNameWithTag = authzPluginName + ":" + authzPluginTag + authzPluginBadManifestName = "riyaz/authz-plugin-bad-manifest" + nonexistentAuthzPluginName = "riyaz/nonexistent-authz-plugin" +) + +func init() { + check.Suite(&DockerAuthzV2Suite{ + ds: &DockerSuite{}, + }) +} + +type DockerAuthzV2Suite struct { + ds *DockerSuite + d *daemon.Daemon +} + +func (s *DockerAuthzV2Suite) SetUpTest(c *check.C) { + testRequires(c, DaemonIsLinux, Network) + s.d = daemon.New(c, dockerBinary, dockerdBinary, daemon.Config{ + Experimental: testEnv.ExperimentalDaemon(), + }) + s.d.Start(c) +} + +func (s *DockerAuthzV2Suite) TearDownTest(c *check.C) { + if s.d != nil { + s.d.Stop(c) + s.ds.TearDownTest(c) + } +} + +func (s *DockerAuthzV2Suite) TestAuthZPluginAllowNonVolumeRequest(c *check.C) { + testRequires(c, DaemonIsLinux, IsAmd64, Network) + // Install authz plugin + _, err := s.d.Cmd("plugin", "install", "--grant-all-permissions", authzPluginNameWithTag) + c.Assert(err, checker.IsNil) + // start the daemon with the plugin and load busybox, --net=none build fails otherwise + // because it needs to pull busybox + s.d.Restart(c, "--authorization-plugin="+authzPluginNameWithTag) + c.Assert(s.d.LoadBusybox(), check.IsNil) + + // defer disabling the plugin + defer func() { + s.d.Restart(c) + _, err = s.d.Cmd("plugin", "disable", authzPluginNameWithTag) + c.Assert(err, checker.IsNil) + _, err = s.d.Cmd("plugin", "rm", authzPluginNameWithTag) + c.Assert(err, checker.IsNil) + }() + + // Ensure docker run command and accompanying docker ps are successful + out, err := s.d.Cmd("run", "-d", "busybox", "top") + c.Assert(err, check.IsNil) + + id := strings.TrimSpace(out) + + out, err = s.d.Cmd("ps") + c.Assert(err, check.IsNil) + c.Assert(assertContainerList(out, []string{id}), check.Equals, true) +} + +func (s *DockerAuthzV2Suite) TestAuthZPluginDisable(c *check.C) { + testRequires(c, DaemonIsLinux, IsAmd64, Network) + // Install authz plugin + _, err := s.d.Cmd("plugin", "install", "--grant-all-permissions", authzPluginNameWithTag) + c.Assert(err, checker.IsNil) + // start the daemon with the plugin and load busybox, --net=none build fails otherwise + // because it needs to pull busybox + s.d.Restart(c, "--authorization-plugin="+authzPluginNameWithTag) + c.Assert(s.d.LoadBusybox(), check.IsNil) + + // defer removing the plugin + defer func() { + s.d.Restart(c) + _, err = s.d.Cmd("plugin", "rm", "-f", authzPluginNameWithTag) + c.Assert(err, checker.IsNil) + }() + + out, err := s.d.Cmd("volume", "create") + c.Assert(err, check.NotNil) + c.Assert(out, checker.Contains, fmt.Sprintf("Error response from daemon: plugin %s failed with error:", authzPluginNameWithTag)) + + // disable the plugin + _, err = s.d.Cmd("plugin", "disable", authzPluginNameWithTag) + c.Assert(err, checker.IsNil) + + // now test to see if the docker api works. + _, err = s.d.Cmd("volume", "create") + c.Assert(err, checker.IsNil) +} + +func (s *DockerAuthzV2Suite) TestAuthZPluginRejectVolumeRequests(c *check.C) { + testRequires(c, DaemonIsLinux, IsAmd64, Network) + // Install authz plugin + _, err := s.d.Cmd("plugin", "install", "--grant-all-permissions", authzPluginNameWithTag) + c.Assert(err, checker.IsNil) + + // restart the daemon with the plugin + s.d.Restart(c, "--authorization-plugin="+authzPluginNameWithTag) + + // defer disabling the plugin + defer func() { + s.d.Restart(c) + _, err = s.d.Cmd("plugin", "disable", authzPluginNameWithTag) + c.Assert(err, checker.IsNil) + _, err = s.d.Cmd("plugin", "rm", authzPluginNameWithTag) + c.Assert(err, checker.IsNil) + }() + + out, err := s.d.Cmd("volume", "create") + c.Assert(err, check.NotNil) + c.Assert(out, checker.Contains, fmt.Sprintf("Error response from daemon: plugin %s failed with error:", authzPluginNameWithTag)) + + out, err = s.d.Cmd("volume", "ls") + c.Assert(err, check.NotNil) + c.Assert(out, checker.Contains, fmt.Sprintf("Error response from daemon: plugin %s failed with error:", authzPluginNameWithTag)) + + // The plugin will block the command before it can determine the volume does not exist + out, err = s.d.Cmd("volume", "rm", "test") + c.Assert(err, check.NotNil) + c.Assert(out, checker.Contains, fmt.Sprintf("Error response from daemon: plugin %s failed with error:", authzPluginNameWithTag)) + + out, err = s.d.Cmd("volume", "inspect", "test") + c.Assert(err, check.NotNil) + c.Assert(out, checker.Contains, fmt.Sprintf("Error response from daemon: plugin %s failed with error:", authzPluginNameWithTag)) + + out, err = s.d.Cmd("volume", "prune", "-f") + c.Assert(err, check.NotNil) + c.Assert(out, checker.Contains, fmt.Sprintf("Error response from daemon: plugin %s failed with error:", authzPluginNameWithTag)) +} + +func (s *DockerAuthzV2Suite) TestAuthZPluginBadManifestFailsDaemonStart(c *check.C) { + testRequires(c, DaemonIsLinux, IsAmd64, Network) + // Install authz plugin with bad manifest + _, err := s.d.Cmd("plugin", "install", "--grant-all-permissions", authzPluginBadManifestName) + c.Assert(err, checker.IsNil) + + // start the daemon with the plugin, it will error + c.Assert(s.d.RestartWithError("--authorization-plugin="+authzPluginBadManifestName), check.NotNil) + + // restarting the daemon without requiring the plugin will succeed + s.d.Restart(c) +} + +func (s *DockerAuthzV2Suite) TestNonexistentAuthZPluginFailsDaemonStart(c *check.C) { + testRequires(c, DaemonIsLinux, Network) + // start the daemon with a non-existent authz plugin, it will error + c.Assert(s.d.RestartWithError("--authorization-plugin="+nonexistentAuthzPluginName), check.NotNil) + + // restarting the daemon without requiring the plugin will succeed + s.d.Start(c) +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_authz_unix_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_authz_unix_test.go new file mode 100644 index 000000000..959292f05 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_authz_unix_test.go @@ -0,0 +1,475 @@ +// +build !windows + +package main + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "net/http/httptest" + "os" + "path/filepath" + "strings" + + "bufio" + "bytes" + "os/exec" + "strconv" + "time" + + "net" + "net/http/httputil" + "net/url" + + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/daemon" + "github.com/docker/docker/pkg/authorization" + "github.com/docker/docker/pkg/plugins" + "github.com/go-check/check" +) + +const ( + testAuthZPlugin = "authzplugin" + unauthorizedMessage = "User unauthorized authz plugin" + errorMessage = "something went wrong..." + containerListAPI = "/containers/json" +) + +var ( + alwaysAllowed = []string{"/_ping", "/info"} +) + +func init() { + check.Suite(&DockerAuthzSuite{ + ds: &DockerSuite{}, + }) +} + +type DockerAuthzSuite struct { + server *httptest.Server + ds *DockerSuite + d *daemon.Daemon + ctrl *authorizationController +} + +type authorizationController struct { + reqRes authorization.Response // reqRes holds the plugin response to the initial client request + resRes authorization.Response // resRes holds the plugin response to the daemon response + psRequestCnt int // psRequestCnt counts the number of calls to list container request api + psResponseCnt int // psResponseCnt counts the number of calls to list containers response API + requestsURIs []string // requestsURIs stores all request URIs that are sent to the authorization controller + reqUser string + resUser string +} + +func (s *DockerAuthzSuite) SetUpTest(c *check.C) { + s.d = daemon.New(c, dockerBinary, dockerdBinary, daemon.Config{ + Experimental: testEnv.ExperimentalDaemon(), + }) + s.ctrl = &authorizationController{} +} + +func (s *DockerAuthzSuite) TearDownTest(c *check.C) { + if s.d != nil { + s.d.Stop(c) + s.ds.TearDownTest(c) + s.ctrl = nil + } +} + +func (s *DockerAuthzSuite) SetUpSuite(c *check.C) { + mux := http.NewServeMux() + s.server = httptest.NewServer(mux) + + mux.HandleFunc("/Plugin.Activate", func(w http.ResponseWriter, r *http.Request) { + b, err := json.Marshal(plugins.Manifest{Implements: []string{authorization.AuthZApiImplements}}) + c.Assert(err, check.IsNil) + w.Write(b) + }) + + mux.HandleFunc("/AuthZPlugin.AuthZReq", func(w http.ResponseWriter, r *http.Request) { + defer r.Body.Close() + body, err := ioutil.ReadAll(r.Body) + c.Assert(err, check.IsNil) + authReq := authorization.Request{} + err = json.Unmarshal(body, &authReq) + c.Assert(err, check.IsNil) + + assertBody(c, authReq.RequestURI, authReq.RequestHeaders, authReq.RequestBody) + assertAuthHeaders(c, authReq.RequestHeaders) + + // Count only container list api + if strings.HasSuffix(authReq.RequestURI, containerListAPI) { + s.ctrl.psRequestCnt++ + } + + s.ctrl.requestsURIs = append(s.ctrl.requestsURIs, authReq.RequestURI) + + reqRes := s.ctrl.reqRes + if isAllowed(authReq.RequestURI) { + reqRes = authorization.Response{Allow: true} + } + if reqRes.Err != "" { + w.WriteHeader(http.StatusInternalServerError) + } + b, err := json.Marshal(reqRes) + c.Assert(err, check.IsNil) + s.ctrl.reqUser = authReq.User + w.Write(b) + }) + + mux.HandleFunc("/AuthZPlugin.AuthZRes", func(w http.ResponseWriter, r *http.Request) { + defer r.Body.Close() + body, err := ioutil.ReadAll(r.Body) + c.Assert(err, check.IsNil) + authReq := authorization.Request{} + err = json.Unmarshal(body, &authReq) + c.Assert(err, check.IsNil) + + assertBody(c, authReq.RequestURI, authReq.ResponseHeaders, authReq.ResponseBody) + assertAuthHeaders(c, authReq.ResponseHeaders) + + // Count only container list api + if strings.HasSuffix(authReq.RequestURI, containerListAPI) { + s.ctrl.psResponseCnt++ + } + resRes := s.ctrl.resRes + if isAllowed(authReq.RequestURI) { + resRes = authorization.Response{Allow: true} + } + if resRes.Err != "" { + w.WriteHeader(http.StatusInternalServerError) + } + b, err := json.Marshal(resRes) + c.Assert(err, check.IsNil) + s.ctrl.resUser = authReq.User + w.Write(b) + }) + + err := os.MkdirAll("/etc/docker/plugins", 0755) + c.Assert(err, checker.IsNil) + + fileName := fmt.Sprintf("/etc/docker/plugins/%s.spec", testAuthZPlugin) + err = ioutil.WriteFile(fileName, []byte(s.server.URL), 0644) + c.Assert(err, checker.IsNil) +} + +// check for always allowed endpoints to not inhibit test framework functions +func isAllowed(reqURI string) bool { + for _, endpoint := range alwaysAllowed { + if strings.HasSuffix(reqURI, endpoint) { + return true + } + } + return false +} + +// assertAuthHeaders validates authentication headers are removed +func assertAuthHeaders(c *check.C, headers map[string]string) error { + for k := range headers { + if strings.Contains(strings.ToLower(k), "auth") || strings.Contains(strings.ToLower(k), "x-registry") { + c.Errorf("Found authentication headers in request '%v'", headers) + } + } + return nil +} + +// assertBody asserts that body is removed for non text/json requests +func assertBody(c *check.C, requestURI string, headers map[string]string, body []byte) { + if strings.Contains(strings.ToLower(requestURI), "auth") && len(body) > 0 { + //return fmt.Errorf("Body included for authentication endpoint %s", string(body)) + c.Errorf("Body included for authentication endpoint %s", string(body)) + } + + for k, v := range headers { + if strings.EqualFold(k, "Content-Type") && strings.HasPrefix(v, "text/") || v == "application/json" { + return + } + } + if len(body) > 0 { + c.Errorf("Body included while it should not (Headers: '%v')", headers) + } +} + +func (s *DockerAuthzSuite) TearDownSuite(c *check.C) { + if s.server == nil { + return + } + + s.server.Close() + + err := os.RemoveAll("/etc/docker/plugins") + c.Assert(err, checker.IsNil) +} + +func (s *DockerAuthzSuite) TestAuthZPluginAllowRequest(c *check.C) { + // start the daemon and load busybox, --net=none build fails otherwise + // cause it needs to pull busybox + s.d.Start(c, "--authorization-plugin="+testAuthZPlugin) + s.ctrl.reqRes.Allow = true + s.ctrl.resRes.Allow = true + c.Assert(s.d.LoadBusybox(), check.IsNil) + + // Ensure command successful + out, err := s.d.Cmd("run", "-d", "busybox", "top") + c.Assert(err, check.IsNil) + + id := strings.TrimSpace(out) + assertURIRecorded(c, s.ctrl.requestsURIs, "/containers/create") + assertURIRecorded(c, s.ctrl.requestsURIs, fmt.Sprintf("/containers/%s/start", id)) + + out, err = s.d.Cmd("ps") + c.Assert(err, check.IsNil) + c.Assert(assertContainerList(out, []string{id}), check.Equals, true) + c.Assert(s.ctrl.psRequestCnt, check.Equals, 1) + c.Assert(s.ctrl.psResponseCnt, check.Equals, 1) +} + +func (s *DockerAuthzSuite) TestAuthZPluginTls(c *check.C) { + + const testDaemonHTTPSAddr = "tcp://localhost:4271" + // start the daemon and load busybox, --net=none build fails otherwise + // cause it needs to pull busybox + s.d.Start(c, + "--authorization-plugin="+testAuthZPlugin, + "--tlsverify", + "--tlscacert", + "fixtures/https/ca.pem", + "--tlscert", + "fixtures/https/server-cert.pem", + "--tlskey", + "fixtures/https/server-key.pem", + "-H", testDaemonHTTPSAddr) + + s.ctrl.reqRes.Allow = true + s.ctrl.resRes.Allow = true + + out, _ := dockerCmd( + c, + "--tlsverify", + "--tlscacert", "fixtures/https/ca.pem", + "--tlscert", "fixtures/https/client-cert.pem", + "--tlskey", "fixtures/https/client-key.pem", + "-H", + testDaemonHTTPSAddr, + "version", + ) + if !strings.Contains(out, "Server") { + c.Fatalf("docker version should return information of server side") + } + + c.Assert(s.ctrl.reqUser, check.Equals, "client") + c.Assert(s.ctrl.resUser, check.Equals, "client") +} + +func (s *DockerAuthzSuite) TestAuthZPluginDenyRequest(c *check.C) { + s.d.Start(c, "--authorization-plugin="+testAuthZPlugin) + s.ctrl.reqRes.Allow = false + s.ctrl.reqRes.Msg = unauthorizedMessage + + // Ensure command is blocked + res, err := s.d.Cmd("ps") + c.Assert(err, check.NotNil) + c.Assert(s.ctrl.psRequestCnt, check.Equals, 1) + c.Assert(s.ctrl.psResponseCnt, check.Equals, 0) + + // Ensure unauthorized message appears in response + c.Assert(res, check.Equals, fmt.Sprintf("Error response from daemon: authorization denied by plugin %s: %s\n", testAuthZPlugin, unauthorizedMessage)) +} + +// TestAuthZPluginAPIDenyResponse validates that when authorization plugin deny the request, the status code is forbidden +func (s *DockerAuthzSuite) TestAuthZPluginAPIDenyResponse(c *check.C) { + s.d.Start(c, "--authorization-plugin="+testAuthZPlugin) + s.ctrl.reqRes.Allow = false + s.ctrl.resRes.Msg = unauthorizedMessage + + daemonURL, err := url.Parse(s.d.Sock()) + + conn, err := net.DialTimeout(daemonURL.Scheme, daemonURL.Path, time.Second*10) + c.Assert(err, check.IsNil) + client := httputil.NewClientConn(conn, nil) + req, err := http.NewRequest("GET", "/version", nil) + c.Assert(err, check.IsNil) + resp, err := client.Do(req) + + c.Assert(err, check.IsNil) + c.Assert(resp.StatusCode, checker.Equals, http.StatusForbidden) + c.Assert(err, checker.IsNil) +} + +func (s *DockerAuthzSuite) TestAuthZPluginDenyResponse(c *check.C) { + s.d.Start(c, "--authorization-plugin="+testAuthZPlugin) + s.ctrl.reqRes.Allow = true + s.ctrl.resRes.Allow = false + s.ctrl.resRes.Msg = unauthorizedMessage + + // Ensure command is blocked + res, err := s.d.Cmd("ps") + c.Assert(err, check.NotNil) + c.Assert(s.ctrl.psRequestCnt, check.Equals, 1) + c.Assert(s.ctrl.psResponseCnt, check.Equals, 1) + + // Ensure unauthorized message appears in response + c.Assert(res, check.Equals, fmt.Sprintf("Error response from daemon: authorization denied by plugin %s: %s\n", testAuthZPlugin, unauthorizedMessage)) +} + +// TestAuthZPluginAllowEventStream verifies event stream propagates correctly after request pass through by the authorization plugin +func (s *DockerAuthzSuite) TestAuthZPluginAllowEventStream(c *check.C) { + testRequires(c, DaemonIsLinux) + + // start the daemon and load busybox to avoid pulling busybox from Docker Hub + s.d.Start(c, "--authorization-plugin="+testAuthZPlugin) + s.ctrl.reqRes.Allow = true + s.ctrl.resRes.Allow = true + c.Assert(s.d.LoadBusybox(), check.IsNil) + + startTime := strconv.FormatInt(daemonTime(c).Unix(), 10) + // Add another command to to enable event pipelining + eventsCmd := exec.Command(dockerBinary, "--host", s.d.Sock(), "events", "--since", startTime) + stdout, err := eventsCmd.StdoutPipe() + if err != nil { + c.Assert(err, check.IsNil) + } + + observer := eventObserver{ + buffer: new(bytes.Buffer), + command: eventsCmd, + scanner: bufio.NewScanner(stdout), + startTime: startTime, + } + + err = observer.Start() + c.Assert(err, checker.IsNil) + defer observer.Stop() + + // Create a container and wait for the creation events + out, err := s.d.Cmd("run", "-d", "busybox", "top") + c.Assert(err, check.IsNil, check.Commentf(out)) + containerID := strings.TrimSpace(out) + c.Assert(s.d.WaitRun(containerID), checker.IsNil) + + events := map[string]chan bool{ + "create": make(chan bool, 1), + "start": make(chan bool, 1), + } + + matcher := matchEventLine(containerID, "container", events) + processor := processEventMatch(events) + go observer.Match(matcher, processor) + + // Ensure all events are received + for event, eventChannel := range events { + + select { + case <-time.After(30 * time.Second): + // Fail the test + observer.CheckEventError(c, containerID, event, matcher) + c.FailNow() + case <-eventChannel: + // Ignore, event received + } + } + + // Ensure both events and container endpoints are passed to the authorization plugin + assertURIRecorded(c, s.ctrl.requestsURIs, "/events") + assertURIRecorded(c, s.ctrl.requestsURIs, "/containers/create") + assertURIRecorded(c, s.ctrl.requestsURIs, fmt.Sprintf("/containers/%s/start", containerID)) +} + +func (s *DockerAuthzSuite) TestAuthZPluginErrorResponse(c *check.C) { + s.d.Start(c, "--authorization-plugin="+testAuthZPlugin) + s.ctrl.reqRes.Allow = true + s.ctrl.resRes.Err = errorMessage + + // Ensure command is blocked + res, err := s.d.Cmd("ps") + c.Assert(err, check.NotNil) + + c.Assert(res, check.Equals, fmt.Sprintf("Error response from daemon: plugin %s failed with error: %s: %s\n", testAuthZPlugin, authorization.AuthZApiResponse, errorMessage)) +} + +func (s *DockerAuthzSuite) TestAuthZPluginErrorRequest(c *check.C) { + s.d.Start(c, "--authorization-plugin="+testAuthZPlugin) + s.ctrl.reqRes.Err = errorMessage + + // Ensure command is blocked + res, err := s.d.Cmd("ps") + c.Assert(err, check.NotNil) + + c.Assert(res, check.Equals, fmt.Sprintf("Error response from daemon: plugin %s failed with error: %s: %s\n", testAuthZPlugin, authorization.AuthZApiRequest, errorMessage)) +} + +func (s *DockerAuthzSuite) TestAuthZPluginEnsureNoDuplicatePluginRegistration(c *check.C) { + s.d.Start(c, "--authorization-plugin="+testAuthZPlugin, "--authorization-plugin="+testAuthZPlugin) + + s.ctrl.reqRes.Allow = true + s.ctrl.resRes.Allow = true + + out, err := s.d.Cmd("ps") + c.Assert(err, check.IsNil, check.Commentf(out)) + + // assert plugin is only called once.. + c.Assert(s.ctrl.psRequestCnt, check.Equals, 1) + c.Assert(s.ctrl.psResponseCnt, check.Equals, 1) +} + +func (s *DockerAuthzSuite) TestAuthZPluginEnsureLoadImportWorking(c *check.C) { + s.d.Start(c, "--authorization-plugin="+testAuthZPlugin, "--authorization-plugin="+testAuthZPlugin) + s.ctrl.reqRes.Allow = true + s.ctrl.resRes.Allow = true + c.Assert(s.d.LoadBusybox(), check.IsNil) + + tmp, err := ioutil.TempDir("", "test-authz-load-import") + c.Assert(err, check.IsNil) + defer os.RemoveAll(tmp) + + savedImagePath := filepath.Join(tmp, "save.tar") + + out, err := s.d.Cmd("save", "-o", savedImagePath, "busybox") + c.Assert(err, check.IsNil, check.Commentf(out)) + out, err = s.d.Cmd("load", "--input", savedImagePath) + c.Assert(err, check.IsNil, check.Commentf(out)) + + exportedImagePath := filepath.Join(tmp, "export.tar") + + out, err = s.d.Cmd("run", "-d", "--name", "testexport", "busybox") + c.Assert(err, check.IsNil, check.Commentf(out)) + out, err = s.d.Cmd("export", "-o", exportedImagePath, "testexport") + c.Assert(err, check.IsNil, check.Commentf(out)) + out, err = s.d.Cmd("import", exportedImagePath) + c.Assert(err, check.IsNil, check.Commentf(out)) +} + +func (s *DockerAuthzSuite) TestAuthZPluginHeader(c *check.C) { + s.d.Start(c, "--debug", "--authorization-plugin="+testAuthZPlugin) + s.ctrl.reqRes.Allow = true + s.ctrl.resRes.Allow = true + c.Assert(s.d.LoadBusybox(), check.IsNil) + + daemonURL, err := url.Parse(s.d.Sock()) + + conn, err := net.DialTimeout(daemonURL.Scheme, daemonURL.Path, time.Second*10) + c.Assert(err, check.IsNil) + client := httputil.NewClientConn(conn, nil) + req, err := http.NewRequest("GET", "/version", nil) + c.Assert(err, check.IsNil) + resp, err := client.Do(req) + + c.Assert(err, check.IsNil) + c.Assert(resp.Header["Content-Type"][0], checker.Equals, "application/json") +} + +// assertURIRecorded verifies that the given URI was sent and recorded in the authz plugin +func assertURIRecorded(c *check.C, uris []string, uri string) { + var found bool + for _, u := range uris { + if strings.Contains(u, uri) { + found = true + break + } + } + if !found { + c.Fatalf("Expected to find URI '%s', recorded uris '%s'", uri, strings.Join(uris, ",")) + } +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_build_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_build_test.go new file mode 100644 index 000000000..5a3d3efc6 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_build_test.go @@ -0,0 +1,6490 @@ +package main + +import ( + "archive/tar" + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "reflect" + "regexp" + "runtime" + "strconv" + "strings" + "text/template" + "time" + + "github.com/docker/docker/builder/dockerfile/command" + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/cli" + "github.com/docker/docker/integration-cli/cli/build" + "github.com/docker/docker/integration-cli/cli/build/fakecontext" + "github.com/docker/docker/integration-cli/cli/build/fakegit" + "github.com/docker/docker/integration-cli/cli/build/fakestorage" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/stringutils" + "github.com/docker/docker/pkg/testutil" + icmd "github.com/docker/docker/pkg/testutil/cmd" + "github.com/go-check/check" + "github.com/opencontainers/go-digest" +) + +func (s *DockerSuite) TestBuildJSONEmptyRun(c *check.C) { + cli.BuildCmd(c, "testbuildjsonemptyrun", build.WithDockerfile(` + FROM busybox + RUN [] + `)) +} + +func (s *DockerSuite) TestBuildShCmdJSONEntrypoint(c *check.C) { + name := "testbuildshcmdjsonentrypoint" + expected := "/bin/sh -c echo test" + if testEnv.DaemonPlatform() == "windows" { + expected = "cmd /S /C echo test" + } + + buildImageSuccessfully(c, name, build.WithDockerfile(` + FROM busybox + ENTRYPOINT ["echo"] + CMD echo test + `)) + out, _ := dockerCmd(c, "run", "--rm", name) + + if strings.TrimSpace(out) != expected { + c.Fatalf("CMD did not contain %q : %q", expected, out) + } +} + +func (s *DockerSuite) TestBuildEnvironmentReplacementUser(c *check.C) { + // Windows does not support FROM scratch or the USER command + testRequires(c, DaemonIsLinux) + name := "testbuildenvironmentreplacement" + + buildImageSuccessfully(c, name, build.WithDockerfile(` + FROM scratch + ENV user foo + USER ${user} + `)) + res := inspectFieldJSON(c, name, "Config.User") + + if res != `"foo"` { + c.Fatal("User foo from environment not in Config.User on image") + } +} + +func (s *DockerSuite) TestBuildEnvironmentReplacementVolume(c *check.C) { + name := "testbuildenvironmentreplacement" + + var volumePath string + + if testEnv.DaemonPlatform() == "windows" { + volumePath = "c:/quux" + } else { + volumePath = "/quux" + } + + buildImageSuccessfully(c, name, build.WithDockerfile(` + FROM `+minimalBaseImage()+` + ENV volume `+volumePath+` + VOLUME ${volume} + `)) + + var volumes map[string]interface{} + inspectFieldAndUnmarshall(c, name, "Config.Volumes", &volumes) + if _, ok := volumes[volumePath]; !ok { + c.Fatal("Volume " + volumePath + " from environment not in Config.Volumes on image") + } + +} + +func (s *DockerSuite) TestBuildEnvironmentReplacementExpose(c *check.C) { + // Windows does not support FROM scratch or the EXPOSE command + testRequires(c, DaemonIsLinux) + name := "testbuildenvironmentreplacement" + + buildImageSuccessfully(c, name, build.WithDockerfile(` + FROM scratch + ENV port 80 + EXPOSE ${port} + ENV ports " 99 100 " + EXPOSE ${ports} + `)) + + var exposedPorts map[string]interface{} + inspectFieldAndUnmarshall(c, name, "Config.ExposedPorts", &exposedPorts) + exp := []int{80, 99, 100} + for _, p := range exp { + tmp := fmt.Sprintf("%d/tcp", p) + if _, ok := exposedPorts[tmp]; !ok { + c.Fatalf("Exposed port %d from environment not in Config.ExposedPorts on image", p) + } + } + +} + +func (s *DockerSuite) TestBuildEnvironmentReplacementWorkdir(c *check.C) { + name := "testbuildenvironmentreplacement" + + buildImageSuccessfully(c, name, build.WithDockerfile(` + FROM busybox + ENV MYWORKDIR /work + RUN mkdir ${MYWORKDIR} + WORKDIR ${MYWORKDIR} + `)) + res := inspectFieldJSON(c, name, "Config.WorkingDir") + + expected := `"/work"` + if testEnv.DaemonPlatform() == "windows" { + expected = `"C:\\work"` + } + if res != expected { + c.Fatalf("Workdir /workdir from environment not in Config.WorkingDir on image: %s", res) + } +} + +func (s *DockerSuite) TestBuildEnvironmentReplacementAddCopy(c *check.C) { + name := "testbuildenvironmentreplacement" + + buildImageSuccessfully(c, name, build.WithBuildContext(c, + build.WithFile("Dockerfile", ` + FROM `+minimalBaseImage()+` + ENV baz foo + ENV quux bar + ENV dot . + ENV fee fff + ENV gee ggg + + ADD ${baz} ${dot} + COPY ${quux} ${dot} + ADD ${zzz:-${fee}} ${dot} + COPY ${zzz:-${gee}} ${dot} + `), + build.WithFile("foo", "test1"), + build.WithFile("bar", "test2"), + build.WithFile("fff", "test3"), + build.WithFile("ggg", "test4"), + )) +} + +func (s *DockerSuite) TestBuildEnvironmentReplacementEnv(c *check.C) { + // ENV expansions work differently in Windows + testRequires(c, DaemonIsLinux) + name := "testbuildenvironmentreplacement" + + buildImageSuccessfully(c, name, build.WithDockerfile(` + FROM busybox + ENV foo zzz + ENV bar ${foo} + ENV abc1='$foo' + ENV env1=$foo env2=${foo} env3="$foo" env4="${foo}" + RUN [ "$abc1" = '$foo' ] && (echo "$abc1" | grep -q foo) + ENV abc2="\$foo" + RUN [ "$abc2" = '$foo' ] && (echo "$abc2" | grep -q foo) + ENV abc3 '$foo' + RUN [ "$abc3" = '$foo' ] && (echo "$abc3" | grep -q foo) + ENV abc4 "\$foo" + RUN [ "$abc4" = '$foo' ] && (echo "$abc4" | grep -q foo) + ENV foo2="abc\def" + RUN [ "$foo2" = 'abc\def' ] + ENV foo3="abc\\def" + RUN [ "$foo3" = 'abc\def' ] + ENV foo4='abc\\def' + RUN [ "$foo4" = 'abc\\def' ] + ENV foo5='abc\def' + RUN [ "$foo5" = 'abc\def' ] + `)) + + envResult := []string{} + inspectFieldAndUnmarshall(c, name, "Config.Env", &envResult) + found := false + envCount := 0 + + for _, env := range envResult { + parts := strings.SplitN(env, "=", 2) + if parts[0] == "bar" { + found = true + if parts[1] != "zzz" { + c.Fatalf("Could not find replaced var for env `bar`: got %q instead of `zzz`", parts[1]) + } + } else if strings.HasPrefix(parts[0], "env") { + envCount++ + if parts[1] != "zzz" { + c.Fatalf("%s should be 'zzz' but instead its %q", parts[0], parts[1]) + } + } else if strings.HasPrefix(parts[0], "env") { + envCount++ + if parts[1] != "foo" { + c.Fatalf("%s should be 'foo' but instead its %q", parts[0], parts[1]) + } + } + } + + if !found { + c.Fatal("Never found the `bar` env variable") + } + + if envCount != 4 { + c.Fatalf("Didn't find all env vars - only saw %d\n%s", envCount, envResult) + } + +} + +func (s *DockerSuite) TestBuildHandleEscapesInVolume(c *check.C) { + // The volume paths used in this test are invalid on Windows + testRequires(c, DaemonIsLinux) + name := "testbuildhandleescapes" + + testCases := []struct { + volumeValue string + expected string + }{ + { + volumeValue: "${FOO}", + expected: "bar", + }, + { + volumeValue: `\${FOO}`, + expected: "${FOO}", + }, + // this test in particular provides *7* backslashes and expects 6 to come back. + // Like above, the first escape is swallowed and the rest are treated as + // literals, this one is just less obvious because of all the character noise. + { + volumeValue: `\\\\\\\${FOO}`, + expected: `\\\${FOO}`, + }, + } + + for _, tc := range testCases { + buildImageSuccessfully(c, name, build.WithDockerfile(fmt.Sprintf(` + FROM scratch + ENV FOO bar + VOLUME %s + `, tc.volumeValue))) + + var result map[string]map[string]struct{} + inspectFieldAndUnmarshall(c, name, "Config.Volumes", &result) + if _, ok := result[tc.expected]; !ok { + c.Fatalf("Could not find volume %s set from env foo in volumes table, got %q", tc.expected, result) + } + + // Remove the image for the next iteration + dockerCmd(c, "rmi", name) + } +} + +func (s *DockerSuite) TestBuildOnBuildLowercase(c *check.C) { + name := "testbuildonbuildlowercase" + name2 := "testbuildonbuildlowercase2" + + buildImageSuccessfully(c, name, build.WithDockerfile(` + FROM busybox + onbuild run echo quux + `)) + + result := buildImage(name2, build.WithDockerfile(fmt.Sprintf(` + FROM %s + `, name))) + result.Assert(c, icmd.Success) + + if !strings.Contains(result.Combined(), "quux") { + c.Fatalf("Did not receive the expected echo text, got %s", result.Combined()) + } + + if strings.Contains(result.Combined(), "ONBUILD ONBUILD") { + c.Fatalf("Got an ONBUILD ONBUILD error with no error: got %s", result.Combined()) + } + +} + +func (s *DockerSuite) TestBuildEnvEscapes(c *check.C) { + // ENV expansions work differently in Windows + testRequires(c, DaemonIsLinux) + name := "testbuildenvescapes" + buildImageSuccessfully(c, name, build.WithDockerfile(` + FROM busybox + ENV TEST foo + CMD echo \$ + `)) + + out, _ := dockerCmd(c, "run", "-t", name) + if strings.TrimSpace(out) != "$" { + c.Fatalf("Env TEST was not overwritten with bar when foo was supplied to dockerfile: was %q", strings.TrimSpace(out)) + } + +} + +func (s *DockerSuite) TestBuildEnvOverwrite(c *check.C) { + // ENV expansions work differently in Windows + testRequires(c, DaemonIsLinux) + name := "testbuildenvoverwrite" + buildImageSuccessfully(c, name, build.WithDockerfile(` + FROM busybox + ENV TEST foo + CMD echo ${TEST} + `)) + + out, _ := dockerCmd(c, "run", "-e", "TEST=bar", "-t", name) + if strings.TrimSpace(out) != "bar" { + c.Fatalf("Env TEST was not overwritten with bar when foo was supplied to dockerfile: was %q", strings.TrimSpace(out)) + } + +} + +// FIXME(vdemeester) why we disabled cache here ? +func (s *DockerSuite) TestBuildOnBuildCmdEntrypointJSON(c *check.C) { + name1 := "onbuildcmd" + name2 := "onbuildgenerated" + + cli.BuildCmd(c, name1, build.WithDockerfile(` +FROM busybox +ONBUILD CMD ["hello world"] +ONBUILD ENTRYPOINT ["echo"] +ONBUILD RUN ["true"]`)) + + cli.BuildCmd(c, name2, build.WithDockerfile(fmt.Sprintf(`FROM %s`, name1))) + + result := cli.DockerCmd(c, "run", name2) + result.Assert(c, icmd.Expected{Out: "hello world"}) +} + +// FIXME(vdemeester) why we disabled cache here ? +func (s *DockerSuite) TestBuildOnBuildEntrypointJSON(c *check.C) { + name1 := "onbuildcmd" + name2 := "onbuildgenerated" + + buildImageSuccessfully(c, name1, build.WithDockerfile(` +FROM busybox +ONBUILD ENTRYPOINT ["echo"]`)) + + buildImageSuccessfully(c, name2, build.WithDockerfile(fmt.Sprintf("FROM %s\nCMD [\"hello world\"]\n", name1))) + + out, _ := dockerCmd(c, "run", name2) + if !regexp.MustCompile(`(?m)^hello world`).MatchString(out) { + c.Fatal("got malformed output from onbuild", out) + } + +} + +func (s *DockerSuite) TestBuildCacheAdd(c *check.C) { + testRequires(c, DaemonIsLinux) // Windows doesn't have httpserver image yet + name := "testbuildtwoimageswithadd" + server := fakestorage.New(c, "", fakecontext.WithFiles(map[string]string{ + "robots.txt": "hello", + "index.html": "world", + })) + defer server.Close() + + cli.BuildCmd(c, name, build.WithDockerfile(fmt.Sprintf(`FROM scratch + ADD %s/robots.txt /`, server.URL()))) + + result := cli.Docker(cli.Build(name), build.WithDockerfile(fmt.Sprintf(`FROM scratch + ADD %s/index.html /`, server.URL()))) + result.Assert(c, icmd.Success) + if strings.Contains(result.Combined(), "Using cache") { + c.Fatal("2nd build used cache on ADD, it shouldn't") + } +} + +func (s *DockerSuite) TestBuildLastModified(c *check.C) { + // Temporary fix for #30890. TODO @jhowardmsft figure out what + // has changed in the master busybox image. + testRequires(c, DaemonIsLinux) + + name := "testbuildlastmodified" + + server := fakestorage.New(c, "", fakecontext.WithFiles(map[string]string{ + "file": "hello", + })) + defer server.Close() + + var out, out2 string + + dFmt := `FROM busybox +ADD %s/file /` + dockerfile := fmt.Sprintf(dFmt, server.URL()) + + cli.BuildCmd(c, name, build.WithoutCache, build.WithDockerfile(dockerfile)) + out = cli.DockerCmd(c, "run", name, "ls", "-le", "/file").Combined() + + // Build it again and make sure the mtime of the file didn't change. + // Wait a few seconds to make sure the time changed enough to notice + time.Sleep(2 * time.Second) + + cli.BuildCmd(c, name, build.WithoutCache, build.WithDockerfile(dockerfile)) + out2 = cli.DockerCmd(c, "run", name, "ls", "-le", "/file").Combined() + + if out != out2 { + c.Fatalf("MTime changed:\nOrigin:%s\nNew:%s", out, out2) + } + + // Now 'touch' the file and make sure the timestamp DID change this time + // Create a new fakeStorage instead of just using Add() to help windows + server = fakestorage.New(c, "", fakecontext.WithFiles(map[string]string{ + "file": "hello", + })) + defer server.Close() + + dockerfile = fmt.Sprintf(dFmt, server.URL()) + cli.BuildCmd(c, name, build.WithoutCache, build.WithDockerfile(dockerfile)) + out2 = cli.DockerCmd(c, "run", name, "ls", "-le", "/file").Combined() + + if out == out2 { + c.Fatalf("MTime didn't change:\nOrigin:%s\nNew:%s", out, out2) + } + +} + +// Regression for https://github.com/docker/docker/pull/27805 +// Makes sure that we don't use the cache if the contents of +// a file in a subfolder of the context is modified and we re-build. +func (s *DockerSuite) TestBuildModifyFileInFolder(c *check.C) { + name := "testbuildmodifyfileinfolder" + + ctx := fakecontext.New(c, "", fakecontext.WithDockerfile(`FROM busybox +RUN ["mkdir", "/test"] +ADD folder/file /test/changetarget`)) + defer ctx.Close() + if err := ctx.Add("folder/file", "first"); err != nil { + c.Fatal(err) + } + buildImageSuccessfully(c, name, build.WithExternalBuildContext(ctx)) + id1 := getIDByName(c, name) + if err := ctx.Add("folder/file", "second"); err != nil { + c.Fatal(err) + } + buildImageSuccessfully(c, name, build.WithExternalBuildContext(ctx)) + id2 := getIDByName(c, name) + if id1 == id2 { + c.Fatal("cache was used even though file contents in folder was changed") + } +} + +func (s *DockerSuite) TestBuildAddSingleFileToRoot(c *check.C) { + testRequires(c, DaemonIsLinux) // Linux specific test + buildImageSuccessfully(c, "testaddimg", build.WithBuildContext(c, + build.WithFile("Dockerfile", fmt.Sprintf(`FROM busybox +RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd +RUN echo 'dockerio:x:1001:' >> /etc/group +RUN touch /exists +RUN chown dockerio.dockerio /exists +ADD test_file / +RUN [ $(ls -l /test_file | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l /test_file | awk '{print $1}') = '%s' ] +RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`, expectedFileChmod)), + build.WithFile("test_file", "test1"))) +} + +// Issue #3960: "ADD src ." hangs +func (s *DockerSuite) TestBuildAddSingleFileToWorkdir(c *check.C) { + name := "testaddsinglefiletoworkdir" + ctx := fakecontext.New(c, "", fakecontext.WithDockerfile( + `FROM busybox + ADD test_file .`), + fakecontext.WithFiles(map[string]string{ + "test_file": "test1", + })) + defer ctx.Close() + + errChan := make(chan error) + go func() { + errChan <- buildImage(name, build.WithExternalBuildContext(ctx)).Error + close(errChan) + }() + select { + case <-time.After(15 * time.Second): + c.Fatal("Build with adding to workdir timed out") + case err := <-errChan: + c.Assert(err, check.IsNil) + } +} + +func (s *DockerSuite) TestBuildAddSingleFileToExistDir(c *check.C) { + testRequires(c, DaemonIsLinux) // Linux specific test + cli.BuildCmd(c, "testaddsinglefiletoexistdir", build.WithBuildContext(c, + build.WithFile("Dockerfile", `FROM busybox +RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd +RUN echo 'dockerio:x:1001:' >> /etc/group +RUN mkdir /exists +RUN touch /exists/exists_file +RUN chown -R dockerio.dockerio /exists +ADD test_file /exists/ +RUN [ $(ls -l / | grep exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ] +RUN [ $(ls -l /exists/test_file | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l /exists/exists_file | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`), + build.WithFile("test_file", "test1"))) +} + +func (s *DockerSuite) TestBuildCopyAddMultipleFiles(c *check.C) { + testRequires(c, DaemonIsLinux) // Linux specific test + server := fakestorage.New(c, "", fakecontext.WithFiles(map[string]string{ + "robots.txt": "hello", + })) + defer server.Close() + + cli.BuildCmd(c, "testcopymultiplefilestofile", build.WithBuildContext(c, + build.WithFile("Dockerfile", fmt.Sprintf(`FROM busybox +RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd +RUN echo 'dockerio:x:1001:' >> /etc/group +RUN mkdir /exists +RUN touch /exists/exists_file +RUN chown -R dockerio.dockerio /exists +COPY test_file1 test_file2 /exists/ +ADD test_file3 test_file4 %s/robots.txt /exists/ +RUN [ $(ls -l / | grep exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ] +RUN [ $(ls -l /exists/test_file1 | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l /exists/test_file2 | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l /exists/test_file3 | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l /exists/test_file4 | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l /exists/robots.txt | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l /exists/exists_file | awk '{print $3":"$4}') = 'dockerio:dockerio' ] +`, server.URL())), + build.WithFile("test_file1", "test1"), + build.WithFile("test_file2", "test2"), + build.WithFile("test_file3", "test3"), + build.WithFile("test_file3", "test3"), + build.WithFile("test_file4", "test4"))) +} + +// These tests are mainly for user namespaces to verify that new directories +// are created as the remapped root uid/gid pair +func (s *DockerSuite) TestBuildUsernamespaceValidateRemappedRoot(c *check.C) { + testRequires(c, DaemonIsLinux) + testCases := []string{ + "ADD . /new_dir", + "COPY test_dir /new_dir", + "WORKDIR /new_dir", + } + name := "testbuildusernamespacevalidateremappedroot" + for _, tc := range testCases { + cli.BuildCmd(c, name, build.WithBuildContext(c, + build.WithFile("Dockerfile", fmt.Sprintf(`FROM busybox +%s +RUN [ $(ls -l / | grep new_dir | awk '{print $3":"$4}') = 'root:root' ]`, tc)), + build.WithFile("test_dir/test_file", "test file"))) + + cli.DockerCmd(c, "rmi", name) + } +} + +func (s *DockerSuite) TestBuildAddAndCopyFileWithWhitespace(c *check.C) { + testRequires(c, DaemonIsLinux) // Not currently passing on Windows + name := "testaddfilewithwhitespace" + + for _, command := range []string{"ADD", "COPY"} { + cli.BuildCmd(c, name, build.WithBuildContext(c, + build.WithFile("Dockerfile", fmt.Sprintf(`FROM busybox +RUN mkdir "/test dir" +RUN mkdir "/test_dir" +%s [ "test file1", "/test_file1" ] +%s [ "test_file2", "/test file2" ] +%s [ "test file3", "/test file3" ] +%s [ "test dir/test_file4", "/test_dir/test_file4" ] +%s [ "test_dir/test_file5", "/test dir/test_file5" ] +%s [ "test dir/test_file6", "/test dir/test_file6" ] +RUN [ $(cat "/test_file1") = 'test1' ] +RUN [ $(cat "/test file2") = 'test2' ] +RUN [ $(cat "/test file3") = 'test3' ] +RUN [ $(cat "/test_dir/test_file4") = 'test4' ] +RUN [ $(cat "/test dir/test_file5") = 'test5' ] +RUN [ $(cat "/test dir/test_file6") = 'test6' ]`, command, command, command, command, command, command)), + build.WithFile("test file1", "test1"), + build.WithFile("test_file2", "test2"), + build.WithFile("test file3", "test3"), + build.WithFile("test dir/test_file4", "test4"), + build.WithFile("test_dir/test_file5", "test5"), + build.WithFile("test dir/test_file6", "test6"), + )) + + cli.DockerCmd(c, "rmi", name) + } +} + +func (s *DockerSuite) TestBuildCopyFileWithWhitespaceOnWindows(c *check.C) { + testRequires(c, DaemonIsWindows) + dockerfile := `FROM ` + testEnv.MinimalBaseImage() + ` +RUN mkdir "C:/test dir" +RUN mkdir "C:/test_dir" +COPY [ "test file1", "/test_file1" ] +COPY [ "test_file2", "/test file2" ] +COPY [ "test file3", "/test file3" ] +COPY [ "test dir/test_file4", "/test_dir/test_file4" ] +COPY [ "test_dir/test_file5", "/test dir/test_file5" ] +COPY [ "test dir/test_file6", "/test dir/test_file6" ] +RUN find "test1" "C:/test_file1" +RUN find "test2" "C:/test file2" +RUN find "test3" "C:/test file3" +RUN find "test4" "C:/test_dir/test_file4" +RUN find "test5" "C:/test dir/test_file5" +RUN find "test6" "C:/test dir/test_file6"` + + name := "testcopyfilewithwhitespace" + cli.BuildCmd(c, name, build.WithBuildContext(c, + build.WithFile("Dockerfile", dockerfile), + build.WithFile("test file1", "test1"), + build.WithFile("test_file2", "test2"), + build.WithFile("test file3", "test3"), + build.WithFile("test dir/test_file4", "test4"), + build.WithFile("test_dir/test_file5", "test5"), + build.WithFile("test dir/test_file6", "test6"), + )) +} + +func (s *DockerSuite) TestBuildCopyWildcard(c *check.C) { + name := "testcopywildcard" + server := fakestorage.New(c, "", fakecontext.WithFiles(map[string]string{ + "robots.txt": "hello", + "index.html": "world", + })) + defer server.Close() + + ctx := fakecontext.New(c, "", fakecontext.WithDockerfile(fmt.Sprintf(`FROM busybox + COPY file*.txt /tmp/ + RUN ls /tmp/file1.txt /tmp/file2.txt + RUN [ "mkdir", "/tmp1" ] + COPY dir* /tmp1/ + RUN ls /tmp1/dirt /tmp1/nested_file /tmp1/nested_dir/nest_nest_file + RUN [ "mkdir", "/tmp2" ] + ADD dir/*dir %s/robots.txt /tmp2/ + RUN ls /tmp2/nest_nest_file /tmp2/robots.txt + `, server.URL())), + fakecontext.WithFiles(map[string]string{ + "file1.txt": "test1", + "file2.txt": "test2", + "dir/nested_file": "nested file", + "dir/nested_dir/nest_nest_file": "2 times nested", + "dirt": "dirty", + })) + defer ctx.Close() + + cli.BuildCmd(c, name, build.WithExternalBuildContext(ctx)) + id1 := getIDByName(c, name) + + // Now make sure we use a cache the 2nd time + cli.BuildCmd(c, name, build.WithExternalBuildContext(ctx)) + id2 := getIDByName(c, name) + + if id1 != id2 { + c.Fatal("didn't use the cache") + } + +} + +func (s *DockerSuite) TestBuildCopyWildcardInName(c *check.C) { + // Run this only on Linux + // Below is the original comment (that I don't agree with — vdemeester) + // Normally we would do c.Fatal(err) here but given that + // the odds of this failing are so rare, it must be because + // the OS we're running the client on doesn't support * in + // filenames (like windows). So, instead of failing the test + // just let it pass. Then we don't need to explicitly + // say which OSs this works on or not. + testRequires(c, DaemonIsLinux, UnixCli) + + buildImageSuccessfully(c, "testcopywildcardinname", build.WithBuildContext(c, + build.WithFile("Dockerfile", `FROM busybox + COPY *.txt /tmp/ + RUN [ "$(cat /tmp/\*.txt)" = 'hi there' ] + `), + build.WithFile("*.txt", "hi there"), + )) +} + +func (s *DockerSuite) TestBuildCopyWildcardCache(c *check.C) { + name := "testcopywildcardcache" + ctx := fakecontext.New(c, "", fakecontext.WithDockerfile(`FROM busybox + COPY file1.txt /tmp/`), + fakecontext.WithFiles(map[string]string{ + "file1.txt": "test1", + })) + defer ctx.Close() + + buildImageSuccessfully(c, name, build.WithExternalBuildContext(ctx)) + id1 := getIDByName(c, name) + + // Now make sure we use a cache the 2nd time even with wild cards. + // Use the same context so the file is the same and the checksum will match + ctx.Add("Dockerfile", `FROM busybox + COPY file*.txt /tmp/`) + + buildImageSuccessfully(c, name, build.WithExternalBuildContext(ctx)) + id2 := getIDByName(c, name) + + if id1 != id2 { + c.Fatal("didn't use the cache") + } + +} + +func (s *DockerSuite) TestBuildAddSingleFileToNonExistingDir(c *check.C) { + testRequires(c, DaemonIsLinux) // Linux specific test + buildImageSuccessfully(c, "testaddsinglefiletononexistingdir", build.WithBuildContext(c, + build.WithFile("Dockerfile", `FROM busybox +RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd +RUN echo 'dockerio:x:1001:' >> /etc/group +RUN touch /exists +RUN chown dockerio.dockerio /exists +ADD test_file /test_dir/ +RUN [ $(ls -l / | grep test_dir | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l /test_dir/test_file | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`), + build.WithFile("test_file", "test1"))) +} + +func (s *DockerSuite) TestBuildAddDirContentToRoot(c *check.C) { + testRequires(c, DaemonIsLinux) // Linux specific test + buildImageSuccessfully(c, "testadddircontenttoroot", build.WithBuildContext(c, + build.WithFile("Dockerfile", `FROM busybox +RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd +RUN echo 'dockerio:x:1001:' >> /etc/group +RUN touch /exists +RUN chown dockerio.dockerio exists +ADD test_dir / +RUN [ $(ls -l /test_file | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`), + build.WithFile("test_dir/test_file", "test1"))) +} + +func (s *DockerSuite) TestBuildAddDirContentToExistingDir(c *check.C) { + testRequires(c, DaemonIsLinux) // Linux specific test + buildImageSuccessfully(c, "testadddircontenttoexistingdir", build.WithBuildContext(c, + build.WithFile("Dockerfile", `FROM busybox +RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd +RUN echo 'dockerio:x:1001:' >> /etc/group +RUN mkdir /exists +RUN touch /exists/exists_file +RUN chown -R dockerio.dockerio /exists +ADD test_dir/ /exists/ +RUN [ $(ls -l / | grep exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ] +RUN [ $(ls -l /exists/exists_file | awk '{print $3":"$4}') = 'dockerio:dockerio' ] +RUN [ $(ls -l /exists/test_file | awk '{print $3":"$4}') = 'root:root' ]`), + build.WithFile("test_dir/test_file", "test1"))) +} + +func (s *DockerSuite) TestBuildAddWholeDirToRoot(c *check.C) { + testRequires(c, DaemonIsLinux) // Linux specific test + buildImageSuccessfully(c, "testaddwholedirtoroot", build.WithBuildContext(c, + build.WithFile("Dockerfile", fmt.Sprintf(`FROM busybox +RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd +RUN echo 'dockerio:x:1001:' >> /etc/group +RUN touch /exists +RUN chown dockerio.dockerio exists +ADD test_dir /test_dir +RUN [ $(ls -l / | grep test_dir | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l / | grep test_dir | awk '{print $1}') = 'drwxr-xr-x' ] +RUN [ $(ls -l /test_dir/test_file | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l /test_dir/test_file | awk '{print $1}') = '%s' ] +RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`, expectedFileChmod)), + build.WithFile("test_dir/test_file", "test1"))) +} + +// Testing #5941 : Having an etc directory in context conflicts with the /etc/mtab +func (s *DockerSuite) TestBuildAddOrCopyEtcToRootShouldNotConflict(c *check.C) { + buildImageSuccessfully(c, "testaddetctoroot", build.WithBuildContext(c, + build.WithFile("Dockerfile", `FROM `+minimalBaseImage()+` +ADD . /`), + build.WithFile("etc/test_file", "test1"))) + buildImageSuccessfully(c, "testcopyetctoroot", build.WithBuildContext(c, + build.WithFile("Dockerfile", `FROM `+minimalBaseImage()+` +COPY . /`), + build.WithFile("etc/test_file", "test1"))) +} + +// Testing #9401 : Losing setuid flag after a ADD +func (s *DockerSuite) TestBuildAddPreservesFilesSpecialBits(c *check.C) { + testRequires(c, DaemonIsLinux) // Linux specific test + buildImageSuccessfully(c, "testaddetctoroot", build.WithBuildContext(c, + build.WithFile("Dockerfile", `FROM busybox +ADD suidbin /usr/bin/suidbin +RUN chmod 4755 /usr/bin/suidbin +RUN [ $(ls -l /usr/bin/suidbin | awk '{print $1}') = '-rwsr-xr-x' ] +ADD ./data/ / +RUN [ $(ls -l /usr/bin/suidbin | awk '{print $1}') = '-rwsr-xr-x' ]`), + build.WithFile("suidbin", "suidbin"), + build.WithFile("/data/usr/test_file", "test1"))) +} + +func (s *DockerSuite) TestBuildCopySingleFileToRoot(c *check.C) { + testRequires(c, DaemonIsLinux) // Linux specific test + buildImageSuccessfully(c, "testcopysinglefiletoroot", build.WithBuildContext(c, + build.WithFile("Dockerfile", fmt.Sprintf(`FROM busybox +RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd +RUN echo 'dockerio:x:1001:' >> /etc/group +RUN touch /exists +RUN chown dockerio.dockerio /exists +COPY test_file / +RUN [ $(ls -l /test_file | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l /test_file | awk '{print $1}') = '%s' ] +RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`, expectedFileChmod)), + build.WithFile("test_file", "test1"))) +} + +// Issue #3960: "ADD src ." hangs - adapted for COPY +func (s *DockerSuite) TestBuildCopySingleFileToWorkdir(c *check.C) { + name := "testcopysinglefiletoworkdir" + ctx := fakecontext.New(c, "", fakecontext.WithDockerfile(`FROM busybox +COPY test_file .`), + fakecontext.WithFiles(map[string]string{ + "test_file": "test1", + })) + defer ctx.Close() + + errChan := make(chan error) + go func() { + errChan <- buildImage(name, build.WithExternalBuildContext(ctx)).Error + close(errChan) + }() + select { + case <-time.After(15 * time.Second): + c.Fatal("Build with adding to workdir timed out") + case err := <-errChan: + c.Assert(err, check.IsNil) + } +} + +func (s *DockerSuite) TestBuildCopySingleFileToExistDir(c *check.C) { + testRequires(c, DaemonIsLinux) // Linux specific test + buildImageSuccessfully(c, "testcopysinglefiletoexistdir", build.WithBuildContext(c, + build.WithFile("Dockerfile", `FROM busybox +RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd +RUN echo 'dockerio:x:1001:' >> /etc/group +RUN mkdir /exists +RUN touch /exists/exists_file +RUN chown -R dockerio.dockerio /exists +COPY test_file /exists/ +RUN [ $(ls -l / | grep exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ] +RUN [ $(ls -l /exists/test_file | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l /exists/exists_file | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`), + build.WithFile("test_file", "test1"))) +} + +func (s *DockerSuite) TestBuildCopySingleFileToNonExistDir(c *check.C) { + testRequires(c, DaemonIsLinux) // Linux specific + buildImageSuccessfully(c, "testcopysinglefiletononexistdir", build.WithBuildContext(c, + build.WithFile("Dockerfile", `FROM busybox +RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd +RUN echo 'dockerio:x:1001:' >> /etc/group +RUN touch /exists +RUN chown dockerio.dockerio /exists +COPY test_file /test_dir/ +RUN [ $(ls -l / | grep test_dir | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l /test_dir/test_file | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`), + build.WithFile("test_file", "test1"))) +} + +func (s *DockerSuite) TestBuildCopyDirContentToRoot(c *check.C) { + testRequires(c, DaemonIsLinux) // Linux specific test + buildImageSuccessfully(c, "testcopydircontenttoroot", build.WithBuildContext(c, + build.WithFile("Dockerfile", `FROM busybox +RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd +RUN echo 'dockerio:x:1001:' >> /etc/group +RUN touch /exists +RUN chown dockerio.dockerio exists +COPY test_dir / +RUN [ $(ls -l /test_file | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`), + build.WithFile("test_dir/test_file", "test1"))) +} + +func (s *DockerSuite) TestBuildCopyDirContentToExistDir(c *check.C) { + testRequires(c, DaemonIsLinux) // Linux specific test + buildImageSuccessfully(c, "testcopydircontenttoexistdir", build.WithBuildContext(c, + build.WithFile("Dockerfile", `FROM busybox +RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd +RUN echo 'dockerio:x:1001:' >> /etc/group +RUN mkdir /exists +RUN touch /exists/exists_file +RUN chown -R dockerio.dockerio /exists +COPY test_dir/ /exists/ +RUN [ $(ls -l / | grep exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ] +RUN [ $(ls -l /exists/exists_file | awk '{print $3":"$4}') = 'dockerio:dockerio' ] +RUN [ $(ls -l /exists/test_file | awk '{print $3":"$4}') = 'root:root' ]`), + build.WithFile("test_dir/test_file", "test1"))) +} + +func (s *DockerSuite) TestBuildCopyWholeDirToRoot(c *check.C) { + testRequires(c, DaemonIsLinux) // Linux specific test + buildImageSuccessfully(c, "testcopywholedirtoroot", build.WithBuildContext(c, + build.WithFile("Dockerfile", fmt.Sprintf(`FROM busybox +RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd +RUN echo 'dockerio:x:1001:' >> /etc/group +RUN touch /exists +RUN chown dockerio.dockerio exists +COPY test_dir /test_dir +RUN [ $(ls -l / | grep test_dir | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l / | grep test_dir | awk '{print $1}') = 'drwxr-xr-x' ] +RUN [ $(ls -l /test_dir/test_file | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l /test_dir/test_file | awk '{print $1}') = '%s' ] +RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`, expectedFileChmod)), + build.WithFile("test_dir/test_file", "test1"))) +} + +func (s *DockerSuite) TestBuildAddBadLinks(c *check.C) { + testRequires(c, DaemonIsLinux) // Not currently working on Windows + + dockerfile := ` + FROM scratch + ADD links.tar / + ADD foo.txt /symlink/ + ` + targetFile := "foo.txt" + var ( + name = "test-link-absolute" + ) + ctx := fakecontext.New(c, "", fakecontext.WithDockerfile(dockerfile)) + defer ctx.Close() + + tempDir, err := ioutil.TempDir("", "test-link-absolute-temp-") + if err != nil { + c.Fatalf("failed to create temporary directory: %s", tempDir) + } + defer os.RemoveAll(tempDir) + + var symlinkTarget string + if runtime.GOOS == "windows" { + var driveLetter string + if abs, err := filepath.Abs(tempDir); err != nil { + c.Fatal(err) + } else { + driveLetter = abs[:1] + } + tempDirWithoutDrive := tempDir[2:] + symlinkTarget = fmt.Sprintf(`%s:\..\..\..\..\..\..\..\..\..\..\..\..%s`, driveLetter, tempDirWithoutDrive) + } else { + symlinkTarget = fmt.Sprintf("/../../../../../../../../../../../..%s", tempDir) + } + + tarPath := filepath.Join(ctx.Dir, "links.tar") + nonExistingFile := filepath.Join(tempDir, targetFile) + fooPath := filepath.Join(ctx.Dir, targetFile) + + tarOut, err := os.Create(tarPath) + if err != nil { + c.Fatal(err) + } + + tarWriter := tar.NewWriter(tarOut) + + header := &tar.Header{ + Name: "symlink", + Typeflag: tar.TypeSymlink, + Linkname: symlinkTarget, + Mode: 0755, + Uid: 0, + Gid: 0, + } + + err = tarWriter.WriteHeader(header) + if err != nil { + c.Fatal(err) + } + + tarWriter.Close() + tarOut.Close() + + foo, err := os.Create(fooPath) + if err != nil { + c.Fatal(err) + } + defer foo.Close() + + if _, err := foo.WriteString("test"); err != nil { + c.Fatal(err) + } + + buildImageSuccessfully(c, name, build.WithExternalBuildContext(ctx)) + if _, err := os.Stat(nonExistingFile); err == nil || err != nil && !os.IsNotExist(err) { + c.Fatalf("%s shouldn't have been written and it shouldn't exist", nonExistingFile) + } + +} + +func (s *DockerSuite) TestBuildAddBadLinksVolume(c *check.C) { + testRequires(c, DaemonIsLinux) // ln not implemented on Windows busybox + const ( + dockerfileTemplate = ` + FROM busybox + RUN ln -s /../../../../../../../../%s /x + VOLUME /x + ADD foo.txt /x/` + targetFile = "foo.txt" + ) + var ( + name = "test-link-absolute-volume" + dockerfile = "" + ) + + tempDir, err := ioutil.TempDir("", "test-link-absolute-volume-temp-") + if err != nil { + c.Fatalf("failed to create temporary directory: %s", tempDir) + } + defer os.RemoveAll(tempDir) + + dockerfile = fmt.Sprintf(dockerfileTemplate, tempDir) + nonExistingFile := filepath.Join(tempDir, targetFile) + + ctx := fakecontext.New(c, "", fakecontext.WithDockerfile(dockerfile)) + defer ctx.Close() + fooPath := filepath.Join(ctx.Dir, targetFile) + + foo, err := os.Create(fooPath) + if err != nil { + c.Fatal(err) + } + defer foo.Close() + + if _, err := foo.WriteString("test"); err != nil { + c.Fatal(err) + } + + buildImageSuccessfully(c, name, build.WithExternalBuildContext(ctx)) + if _, err := os.Stat(nonExistingFile); err == nil || err != nil && !os.IsNotExist(err) { + c.Fatalf("%s shouldn't have been written and it shouldn't exist", nonExistingFile) + } + +} + +// Issue #5270 - ensure we throw a better error than "unexpected EOF" +// when we can't access files in the context. +func (s *DockerSuite) TestBuildWithInaccessibleFilesInContext(c *check.C) { + testRequires(c, DaemonIsLinux, UnixCli) // test uses chown/chmod: not available on windows + + { + name := "testbuildinaccessiblefiles" + ctx := fakecontext.New(c, "", + fakecontext.WithDockerfile("FROM scratch\nADD . /foo/"), + fakecontext.WithFiles(map[string]string{"fileWithoutReadAccess": "foo"}), + ) + defer ctx.Close() + // This is used to ensure we detect inaccessible files early during build in the cli client + pathToFileWithoutReadAccess := filepath.Join(ctx.Dir, "fileWithoutReadAccess") + + if err := os.Chown(pathToFileWithoutReadAccess, 0, 0); err != nil { + c.Fatalf("failed to chown file to root: %s", err) + } + if err := os.Chmod(pathToFileWithoutReadAccess, 0700); err != nil { + c.Fatalf("failed to chmod file to 700: %s", err) + } + result := icmd.RunCmd(icmd.Cmd{ + Command: []string{"su", "unprivilegeduser", "-c", fmt.Sprintf("%s build -t %s .", dockerBinary, name)}, + Dir: ctx.Dir, + }) + if result.Error == nil { + c.Fatalf("build should have failed: %s %s", result.Error, result.Combined()) + } + + // check if we've detected the failure before we started building + if !strings.Contains(result.Combined(), "no permission to read from ") { + c.Fatalf("output should've contained the string: no permission to read from but contained: %s", result.Combined()) + } + + if !strings.Contains(result.Combined(), "error checking context") { + c.Fatalf("output should've contained the string: error checking context") + } + } + { + name := "testbuildinaccessibledirectory" + ctx := fakecontext.New(c, "", + fakecontext.WithDockerfile("FROM scratch\nADD . /foo/"), + fakecontext.WithFiles(map[string]string{"directoryWeCantStat/bar": "foo"}), + ) + defer ctx.Close() + // This is used to ensure we detect inaccessible directories early during build in the cli client + pathToDirectoryWithoutReadAccess := filepath.Join(ctx.Dir, "directoryWeCantStat") + pathToFileInDirectoryWithoutReadAccess := filepath.Join(pathToDirectoryWithoutReadAccess, "bar") + + if err := os.Chown(pathToDirectoryWithoutReadAccess, 0, 0); err != nil { + c.Fatalf("failed to chown directory to root: %s", err) + } + if err := os.Chmod(pathToDirectoryWithoutReadAccess, 0444); err != nil { + c.Fatalf("failed to chmod directory to 444: %s", err) + } + if err := os.Chmod(pathToFileInDirectoryWithoutReadAccess, 0700); err != nil { + c.Fatalf("failed to chmod file to 700: %s", err) + } + + result := icmd.RunCmd(icmd.Cmd{ + Command: []string{"su", "unprivilegeduser", "-c", fmt.Sprintf("%s build -t %s .", dockerBinary, name)}, + Dir: ctx.Dir, + }) + if result.Error == nil { + c.Fatalf("build should have failed: %s %s", result.Error, result.Combined()) + } + + // check if we've detected the failure before we started building + if !strings.Contains(result.Combined(), "can't stat") { + c.Fatalf("output should've contained the string: can't access %s", result.Combined()) + } + + if !strings.Contains(result.Combined(), "error checking context") { + c.Fatalf("output should've contained the string: error checking context\ngot:%s", result.Combined()) + } + + } + { + name := "testlinksok" + ctx := fakecontext.New(c, "", fakecontext.WithDockerfile("FROM scratch\nADD . /foo/")) + defer ctx.Close() + + target := "../../../../../../../../../../../../../../../../../../../azA" + if err := os.Symlink(filepath.Join(ctx.Dir, "g"), target); err != nil { + c.Fatal(err) + } + defer os.Remove(target) + // This is used to ensure we don't follow links when checking if everything in the context is accessible + // This test doesn't require that we run commands as an unprivileged user + buildImageSuccessfully(c, name, build.WithExternalBuildContext(ctx)) + } + { + name := "testbuildignoredinaccessible" + ctx := fakecontext.New(c, "", + fakecontext.WithDockerfile("FROM scratch\nADD . /foo/"), + fakecontext.WithFiles(map[string]string{ + "directoryWeCantStat/bar": "foo", + ".dockerignore": "directoryWeCantStat", + }), + ) + defer ctx.Close() + // This is used to ensure we don't try to add inaccessible files when they are ignored by a .dockerignore pattern + pathToDirectoryWithoutReadAccess := filepath.Join(ctx.Dir, "directoryWeCantStat") + pathToFileInDirectoryWithoutReadAccess := filepath.Join(pathToDirectoryWithoutReadAccess, "bar") + if err := os.Chown(pathToDirectoryWithoutReadAccess, 0, 0); err != nil { + c.Fatalf("failed to chown directory to root: %s", err) + } + if err := os.Chmod(pathToDirectoryWithoutReadAccess, 0444); err != nil { + c.Fatalf("failed to chmod directory to 444: %s", err) + } + if err := os.Chmod(pathToFileInDirectoryWithoutReadAccess, 0700); err != nil { + c.Fatalf("failed to chmod file to 700: %s", err) + } + + result := icmd.RunCmd(icmd.Cmd{ + Dir: ctx.Dir, + Command: []string{"su", "unprivilegeduser", "-c", + fmt.Sprintf("%s build -t %s .", dockerBinary, name)}, + }) + result.Assert(c, icmd.Expected{}) + } +} + +func (s *DockerSuite) TestBuildForceRm(c *check.C) { + containerCountBefore := getContainerCount(c) + name := "testbuildforcerm" + + buildImage(name, cli.WithFlags("--force-rm"), build.WithBuildContext(c, + build.WithFile("Dockerfile", `FROM `+minimalBaseImage()+` + RUN true + RUN thiswillfail`))).Assert(c, icmd.Expected{ + ExitCode: 1, + }) + + containerCountAfter := getContainerCount(c) + if containerCountBefore != containerCountAfter { + c.Fatalf("--force-rm shouldn't have left containers behind") + } + +} + +func (s *DockerSuite) TestBuildRm(c *check.C) { + name := "testbuildrm" + + testCases := []struct { + buildflags []string + shouldLeftContainerBehind bool + }{ + // Default case (i.e. --rm=true) + { + buildflags: []string{}, + shouldLeftContainerBehind: false, + }, + { + buildflags: []string{"--rm"}, + shouldLeftContainerBehind: false, + }, + { + buildflags: []string{"--rm=false"}, + shouldLeftContainerBehind: true, + }, + } + + for _, tc := range testCases { + containerCountBefore := getContainerCount(c) + + buildImageSuccessfully(c, name, cli.WithFlags(tc.buildflags...), build.WithDockerfile(`FROM busybox + RUN echo hello world`)) + + containerCountAfter := getContainerCount(c) + if tc.shouldLeftContainerBehind { + if containerCountBefore == containerCountAfter { + c.Fatalf("flags %v should have left containers behind", tc.buildflags) + } + } else { + if containerCountBefore != containerCountAfter { + c.Fatalf("flags %v shouldn't have left containers behind", tc.buildflags) + } + } + + dockerCmd(c, "rmi", name) + } +} + +func (s *DockerSuite) TestBuildWithVolumes(c *check.C) { + testRequires(c, DaemonIsLinux) // Invalid volume paths on Windows + var ( + result map[string]map[string]struct{} + name = "testbuildvolumes" + emptyMap = make(map[string]struct{}) + expected = map[string]map[string]struct{}{ + "/test1": emptyMap, + "/test2": emptyMap, + "/test3": emptyMap, + "/test4": emptyMap, + "/test5": emptyMap, + "/test6": emptyMap, + "[/test7": emptyMap, + "/test8]": emptyMap, + } + ) + + buildImageSuccessfully(c, name, build.WithDockerfile(`FROM scratch + VOLUME /test1 + VOLUME /test2 + VOLUME /test3 /test4 + VOLUME ["/test5", "/test6"] + VOLUME [/test7 /test8] + `)) + + inspectFieldAndUnmarshall(c, name, "Config.Volumes", &result) + + equal := reflect.DeepEqual(&result, &expected) + if !equal { + c.Fatalf("Volumes %s, expected %s", result, expected) + } + +} + +func (s *DockerSuite) TestBuildMaintainer(c *check.C) { + name := "testbuildmaintainer" + + buildImageSuccessfully(c, name, build.WithDockerfile(`FROM `+minimalBaseImage()+` + MAINTAINER dockerio`)) + + expected := "dockerio" + res := inspectField(c, name, "Author") + if res != expected { + c.Fatalf("Maintainer %s, expected %s", res, expected) + } +} + +func (s *DockerSuite) TestBuildUser(c *check.C) { + testRequires(c, DaemonIsLinux) + name := "testbuilduser" + expected := "dockerio" + buildImageSuccessfully(c, name, build.WithDockerfile(`FROM busybox + RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd + USER dockerio + RUN [ $(whoami) = 'dockerio' ]`)) + res := inspectField(c, name, "Config.User") + if res != expected { + c.Fatalf("User %s, expected %s", res, expected) + } +} + +func (s *DockerSuite) TestBuildRelativeWorkdir(c *check.C) { + name := "testbuildrelativeworkdir" + + var ( + expected1 string + expected2 string + expected3 string + expected4 string + expectedFinal string + ) + + if testEnv.DaemonPlatform() == "windows" { + expected1 = `C:/` + expected2 = `C:/test1` + expected3 = `C:/test2` + expected4 = `C:/test2/test3` + expectedFinal = `C:\test2\test3` // Note inspect is going to return Windows paths, as it's not in busybox + } else { + expected1 = `/` + expected2 = `/test1` + expected3 = `/test2` + expected4 = `/test2/test3` + expectedFinal = `/test2/test3` + } + + buildImageSuccessfully(c, name, build.WithDockerfile(`FROM busybox + RUN sh -c "[ "$PWD" = "`+expected1+`" ]" + WORKDIR test1 + RUN sh -c "[ "$PWD" = "`+expected2+`" ]" + WORKDIR /test2 + RUN sh -c "[ "$PWD" = "`+expected3+`" ]" + WORKDIR test3 + RUN sh -c "[ "$PWD" = "`+expected4+`" ]"`)) + + res := inspectField(c, name, "Config.WorkingDir") + if res != expectedFinal { + c.Fatalf("Workdir %s, expected %s", res, expectedFinal) + } +} + +// #22181 Regression test. Single end-to-end test of using +// Windows semantics. Most path handling verifications are in unit tests +func (s *DockerSuite) TestBuildWindowsWorkdirProcessing(c *check.C) { + testRequires(c, DaemonIsWindows) + buildImageSuccessfully(c, "testbuildwindowsworkdirprocessing", build.WithDockerfile(`FROM busybox + WORKDIR C:\\foo + WORKDIR bar + RUN sh -c "[ "$PWD" = "C:/foo/bar" ]" + `)) +} + +// #22181 Regression test. Most paths handling verifications are in unit test. +// One functional test for end-to-end +func (s *DockerSuite) TestBuildWindowsAddCopyPathProcessing(c *check.C) { + testRequires(c, DaemonIsWindows) + // TODO Windows (@jhowardmsft). Needs a follow-up PR to 22181 to + // support backslash such as .\\ being equivalent to ./ and c:\\ being + // equivalent to c:/. This is not currently (nor ever has been) supported + // by docker on the Windows platform. + buildImageSuccessfully(c, "testbuildwindowsaddcopypathprocessing", build.WithBuildContext(c, + build.WithFile("Dockerfile", `FROM busybox + # No trailing slash on COPY/ADD + # Results in dir being changed to a file + WORKDIR /wc1 + COPY wc1 c:/wc1 + WORKDIR /wc2 + ADD wc2 c:/wc2 + WORKDIR c:/ + RUN sh -c "[ $(cat c:/wc1/wc1) = 'hellowc1' ]" + RUN sh -c "[ $(cat c:/wc2/wc2) = 'worldwc2' ]" + + # Trailing slash on COPY/ADD, Windows-style path. + WORKDIR /wd1 + COPY wd1 c:/wd1/ + WORKDIR /wd2 + ADD wd2 c:/wd2/ + RUN sh -c "[ $(cat c:/wd1/wd1) = 'hellowd1' ]" + RUN sh -c "[ $(cat c:/wd2/wd2) = 'worldwd2' ]" + `), + build.WithFile("wc1", "hellowc1"), + build.WithFile("wc2", "worldwc2"), + build.WithFile("wd1", "hellowd1"), + build.WithFile("wd2", "worldwd2"), + )) +} + +func (s *DockerSuite) TestBuildWorkdirWithEnvVariables(c *check.C) { + name := "testbuildworkdirwithenvvariables" + + var expected string + if testEnv.DaemonPlatform() == "windows" { + expected = `C:\test1\test2` + } else { + expected = `/test1/test2` + } + + buildImageSuccessfully(c, name, build.WithDockerfile(`FROM busybox + ENV DIRPATH /test1 + ENV SUBDIRNAME test2 + WORKDIR $DIRPATH + WORKDIR $SUBDIRNAME/$MISSING_VAR`)) + res := inspectField(c, name, "Config.WorkingDir") + if res != expected { + c.Fatalf("Workdir %s, expected %s", res, expected) + } +} + +func (s *DockerSuite) TestBuildRelativeCopy(c *check.C) { + // cat /test1/test2/foo gets permission denied for the user + testRequires(c, NotUserNamespace) + + var expected string + if testEnv.DaemonPlatform() == "windows" { + expected = `C:/test1/test2` + } else { + expected = `/test1/test2` + } + + buildImageSuccessfully(c, "testbuildrelativecopy", build.WithBuildContext(c, + build.WithFile("Dockerfile", `FROM busybox + WORKDIR /test1 + WORKDIR test2 + RUN sh -c "[ "$PWD" = '`+expected+`' ]" + COPY foo ./ + RUN sh -c "[ $(cat /test1/test2/foo) = 'hello' ]" + ADD foo ./bar/baz + RUN sh -c "[ $(cat /test1/test2/bar/baz) = 'hello' ]" + COPY foo ./bar/baz2 + RUN sh -c "[ $(cat /test1/test2/bar/baz2) = 'hello' ]" + WORKDIR .. + COPY foo ./ + RUN sh -c "[ $(cat /test1/foo) = 'hello' ]" + COPY foo /test3/ + RUN sh -c "[ $(cat /test3/foo) = 'hello' ]" + WORKDIR /test4 + COPY . . + RUN sh -c "[ $(cat /test4/foo) = 'hello' ]" + WORKDIR /test5/test6 + COPY foo ../ + RUN sh -c "[ $(cat /test5/foo) = 'hello' ]" + `), + build.WithFile("foo", "hello"), + )) +} + +func (s *DockerSuite) TestBuildBlankName(c *check.C) { + name := "testbuildblankname" + testCases := []struct { + expression string + expectedStderr string + }{ + { + expression: "ENV =", + expectedStderr: "ENV names can not be blank", + }, + { + expression: "LABEL =", + expectedStderr: "LABEL names can not be blank", + }, + { + expression: "ARG =foo", + expectedStderr: "ARG names can not be blank", + }, + } + + for _, tc := range testCases { + buildImage(name, build.WithDockerfile(fmt.Sprintf(`FROM busybox + %s`, tc.expression))).Assert(c, icmd.Expected{ + ExitCode: 1, + Err: tc.expectedStderr, + }) + } +} + +func (s *DockerSuite) TestBuildEnv(c *check.C) { + testRequires(c, DaemonIsLinux) // ENV expansion is different in Windows + name := "testbuildenv" + expected := "[PATH=/test:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin PORT=2375]" + buildImageSuccessfully(c, name, build.WithDockerfile(`FROM busybox + ENV PATH /test:$PATH + ENV PORT 2375 + RUN [ $(env | grep PORT) = 'PORT=2375' ]`)) + res := inspectField(c, name, "Config.Env") + if res != expected { + c.Fatalf("Env %s, expected %s", res, expected) + } +} + +func (s *DockerSuite) TestBuildPATH(c *check.C) { + testRequires(c, DaemonIsLinux) // ENV expansion is different in Windows + + defPath := "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + + fn := func(dockerfile string, expected string) { + buildImageSuccessfully(c, "testbldpath", build.WithDockerfile(dockerfile)) + res := inspectField(c, "testbldpath", "Config.Env") + if res != expected { + c.Fatalf("Env %q, expected %q for dockerfile:%q", res, expected, dockerfile) + } + } + + tests := []struct{ dockerfile, exp string }{ + {"FROM scratch\nMAINTAINER me", "[PATH=" + defPath + "]"}, + {"FROM busybox\nMAINTAINER me", "[PATH=" + defPath + "]"}, + {"FROM scratch\nENV FOO=bar", "[PATH=" + defPath + " FOO=bar]"}, + {"FROM busybox\nENV FOO=bar", "[PATH=" + defPath + " FOO=bar]"}, + {"FROM scratch\nENV PATH=/test", "[PATH=/test]"}, + {"FROM busybox\nENV PATH=/test", "[PATH=/test]"}, + {"FROM scratch\nENV PATH=''", "[PATH=]"}, + {"FROM busybox\nENV PATH=''", "[PATH=]"}, + } + + for _, test := range tests { + fn(test.dockerfile, test.exp) + } +} + +func (s *DockerSuite) TestBuildContextCleanup(c *check.C) { + testRequires(c, SameHostDaemon) + + name := "testbuildcontextcleanup" + entries, err := ioutil.ReadDir(filepath.Join(testEnv.DockerBasePath(), "tmp")) + if err != nil { + c.Fatalf("failed to list contents of tmp dir: %s", err) + } + + buildImageSuccessfully(c, name, build.WithDockerfile(`FROM `+minimalBaseImage()+` + ENTRYPOINT ["/bin/echo"]`)) + + entriesFinal, err := ioutil.ReadDir(filepath.Join(testEnv.DockerBasePath(), "tmp")) + if err != nil { + c.Fatalf("failed to list contents of tmp dir: %s", err) + } + if err = testutil.CompareDirectoryEntries(entries, entriesFinal); err != nil { + c.Fatalf("context should have been deleted, but wasn't") + } + +} + +func (s *DockerSuite) TestBuildContextCleanupFailedBuild(c *check.C) { + testRequires(c, SameHostDaemon) + + name := "testbuildcontextcleanup" + entries, err := ioutil.ReadDir(filepath.Join(testEnv.DockerBasePath(), "tmp")) + if err != nil { + c.Fatalf("failed to list contents of tmp dir: %s", err) + } + + buildImage(name, build.WithDockerfile(`FROM `+minimalBaseImage()+` + RUN /non/existing/command`)).Assert(c, icmd.Expected{ + ExitCode: 1, + }) + + entriesFinal, err := ioutil.ReadDir(filepath.Join(testEnv.DockerBasePath(), "tmp")) + if err != nil { + c.Fatalf("failed to list contents of tmp dir: %s", err) + } + if err = testutil.CompareDirectoryEntries(entries, entriesFinal); err != nil { + c.Fatalf("context should have been deleted, but wasn't") + } + +} + +func (s *DockerSuite) TestBuildCmd(c *check.C) { + name := "testbuildcmd" + expected := "[/bin/echo Hello World]" + + buildImageSuccessfully(c, name, build.WithDockerfile(`FROM `+minimalBaseImage()+` + CMD ["/bin/echo", "Hello World"]`)) + + res := inspectField(c, name, "Config.Cmd") + if res != expected { + c.Fatalf("Cmd %s, expected %s", res, expected) + } +} + +func (s *DockerSuite) TestBuildExpose(c *check.C) { + testRequires(c, DaemonIsLinux) // Expose not implemented on Windows + name := "testbuildexpose" + expected := "map[2375/tcp:{}]" + + buildImageSuccessfully(c, name, build.WithDockerfile(`FROM scratch + EXPOSE 2375`)) + + res := inspectField(c, name, "Config.ExposedPorts") + if res != expected { + c.Fatalf("Exposed ports %s, expected %s", res, expected) + } +} + +func (s *DockerSuite) TestBuildExposeMorePorts(c *check.C) { + testRequires(c, DaemonIsLinux) // Expose not implemented on Windows + // start building docker file with a large number of ports + portList := make([]string, 50) + line := make([]string, 100) + expectedPorts := make([]int, len(portList)*len(line)) + for i := 0; i < len(portList); i++ { + for j := 0; j < len(line); j++ { + p := i*len(line) + j + 1 + line[j] = strconv.Itoa(p) + expectedPorts[p-1] = p + } + if i == len(portList)-1 { + portList[i] = strings.Join(line, " ") + } else { + portList[i] = strings.Join(line, " ") + ` \` + } + } + + dockerfile := `FROM scratch + EXPOSE {{range .}} {{.}} + {{end}}` + tmpl := template.Must(template.New("dockerfile").Parse(dockerfile)) + buf := bytes.NewBuffer(nil) + tmpl.Execute(buf, portList) + + name := "testbuildexpose" + buildImageSuccessfully(c, name, build.WithDockerfile(buf.String())) + + // check if all the ports are saved inside Config.ExposedPorts + res := inspectFieldJSON(c, name, "Config.ExposedPorts") + var exposedPorts map[string]interface{} + if err := json.Unmarshal([]byte(res), &exposedPorts); err != nil { + c.Fatal(err) + } + + for _, p := range expectedPorts { + ep := fmt.Sprintf("%d/tcp", p) + if _, ok := exposedPorts[ep]; !ok { + c.Errorf("Port(%s) is not exposed", ep) + } else { + delete(exposedPorts, ep) + } + } + if len(exposedPorts) != 0 { + c.Errorf("Unexpected extra exposed ports %v", exposedPorts) + } +} + +func (s *DockerSuite) TestBuildExposeOrder(c *check.C) { + testRequires(c, DaemonIsLinux) // Expose not implemented on Windows + buildID := func(name, exposed string) string { + buildImageSuccessfully(c, name, build.WithDockerfile(fmt.Sprintf(`FROM scratch + EXPOSE %s`, exposed))) + id := inspectField(c, name, "Id") + return id + } + + id1 := buildID("testbuildexpose1", "80 2375") + id2 := buildID("testbuildexpose2", "2375 80") + if id1 != id2 { + c.Errorf("EXPOSE should invalidate the cache only when ports actually changed") + } +} + +func (s *DockerSuite) TestBuildExposeUpperCaseProto(c *check.C) { + testRequires(c, DaemonIsLinux) // Expose not implemented on Windows + name := "testbuildexposeuppercaseproto" + expected := "map[5678/udp:{}]" + buildImageSuccessfully(c, name, build.WithDockerfile(`FROM scratch + EXPOSE 5678/UDP`)) + res := inspectField(c, name, "Config.ExposedPorts") + if res != expected { + c.Fatalf("Exposed ports %s, expected %s", res, expected) + } +} + +func (s *DockerSuite) TestBuildEmptyEntrypointInheritance(c *check.C) { + name := "testbuildentrypointinheritance" + name2 := "testbuildentrypointinheritance2" + + buildImageSuccessfully(c, name, build.WithDockerfile(`FROM busybox + ENTRYPOINT ["/bin/echo"]`)) + res := inspectField(c, name, "Config.Entrypoint") + + expected := "[/bin/echo]" + if res != expected { + c.Fatalf("Entrypoint %s, expected %s", res, expected) + } + + buildImageSuccessfully(c, name2, build.WithDockerfile(fmt.Sprintf(`FROM %s + ENTRYPOINT []`, name))) + res = inspectField(c, name2, "Config.Entrypoint") + + expected = "[]" + if res != expected { + c.Fatalf("Entrypoint %s, expected %s", res, expected) + } +} + +func (s *DockerSuite) TestBuildEmptyEntrypoint(c *check.C) { + name := "testbuildentrypoint" + expected := "[]" + + buildImageSuccessfully(c, name, build.WithDockerfile(`FROM busybox + ENTRYPOINT []`)) + + res := inspectField(c, name, "Config.Entrypoint") + if res != expected { + c.Fatalf("Entrypoint %s, expected %s", res, expected) + } + +} + +func (s *DockerSuite) TestBuildEntrypoint(c *check.C) { + name := "testbuildentrypoint" + + expected := "[/bin/echo]" + buildImageSuccessfully(c, name, build.WithDockerfile(`FROM `+minimalBaseImage()+` + ENTRYPOINT ["/bin/echo"]`)) + + res := inspectField(c, name, "Config.Entrypoint") + if res != expected { + c.Fatalf("Entrypoint %s, expected %s", res, expected) + } + +} + +// #6445 ensure ONBUILD triggers aren't committed to grandchildren +func (s *DockerSuite) TestBuildOnBuildLimitedInheritance(c *check.C) { + buildImageSuccessfully(c, "testonbuildtrigger1", build.WithDockerfile(` + FROM busybox + RUN echo "GRANDPARENT" + ONBUILD RUN echo "ONBUILD PARENT" + `)) + // ONBUILD should be run in second build. + buildImage("testonbuildtrigger2", build.WithDockerfile("FROM testonbuildtrigger1")).Assert(c, icmd.Expected{ + Out: "ONBUILD PARENT", + }) + // ONBUILD should *not* be run in third build. + result := buildImage("testonbuildtrigger3", build.WithDockerfile("FROM testonbuildtrigger2")) + result.Assert(c, icmd.Success) + if strings.Contains(result.Combined(), "ONBUILD PARENT") { + c.Fatalf("ONBUILD instruction ran in grandchild of ONBUILD parent") + } +} + +func (s *DockerSuite) TestBuildSameDockerfileWithAndWithoutCache(c *check.C) { + testRequires(c, DaemonIsLinux) // Expose not implemented on Windows + name := "testbuildwithcache" + dockerfile := `FROM scratch + MAINTAINER dockerio + EXPOSE 5432 + ENTRYPOINT ["/bin/echo"]` + buildImageSuccessfully(c, name, build.WithDockerfile(dockerfile)) + id1 := getIDByName(c, name) + buildImageSuccessfully(c, name, build.WithDockerfile(dockerfile)) + id2 := getIDByName(c, name) + buildImageSuccessfully(c, name, build.WithoutCache, build.WithDockerfile(dockerfile)) + id3 := getIDByName(c, name) + if id1 != id2 { + c.Fatal("The cache should have been used but hasn't.") + } + if id1 == id3 { + c.Fatal("The cache should have been invalided but hasn't.") + } +} + +// Make sure that ADD/COPY still populate the cache even if they don't use it +func (s *DockerSuite) TestBuildConditionalCache(c *check.C) { + name := "testbuildconditionalcache" + + dockerfile := ` + FROM busybox + ADD foo /tmp/` + ctx := fakecontext.New(c, "", + fakecontext.WithDockerfile(dockerfile), + fakecontext.WithFiles(map[string]string{ + "foo": "hello", + })) + defer ctx.Close() + + cli.BuildCmd(c, name, build.WithExternalBuildContext(ctx)) + id1 := getIDByName(c, name) + + if err := ctx.Add("foo", "bye"); err != nil { + c.Fatalf("Error modifying foo: %s", err) + } + + // Updating a file should invalidate the cache + cli.BuildCmd(c, name, build.WithExternalBuildContext(ctx)) + id2 := getIDByName(c, name) + if id2 == id1 { + c.Fatal("Should not have used the cache") + } + + cli.BuildCmd(c, name, build.WithExternalBuildContext(ctx)) + id3 := getIDByName(c, name) + if id3 != id2 { + c.Fatal("Should have used the cache") + } +} + +func (s *DockerSuite) TestBuildAddMultipleLocalFileWithAndWithoutCache(c *check.C) { + name := "testbuildaddmultiplelocalfilewithcache" + baseName := name + "-base" + + cli.BuildCmd(c, baseName, build.WithDockerfile(` + FROM busybox + ENTRYPOINT ["/bin/sh"] + `)) + + dockerfile := ` + FROM testbuildaddmultiplelocalfilewithcache-base + MAINTAINER dockerio + ADD foo Dockerfile /usr/lib/bla/ + RUN sh -c "[ $(cat /usr/lib/bla/foo) = "hello" ]"` + ctx := fakecontext.New(c, "", fakecontext.WithDockerfile(dockerfile), fakecontext.WithFiles(map[string]string{ + "foo": "hello", + })) + defer ctx.Close() + cli.BuildCmd(c, name, build.WithExternalBuildContext(ctx)) + id1 := getIDByName(c, name) + result2 := cli.BuildCmd(c, name, build.WithExternalBuildContext(ctx)) + id2 := getIDByName(c, name) + result3 := cli.BuildCmd(c, name, build.WithoutCache, build.WithExternalBuildContext(ctx)) + id3 := getIDByName(c, name) + if id1 != id2 { + c.Fatalf("The cache should have been used but hasn't: %s", result2.Stdout()) + } + if id1 == id3 { + c.Fatalf("The cache should have been invalided but hasn't: %s", result3.Stdout()) + } +} + +func (s *DockerSuite) TestBuildCopyDirButNotFile(c *check.C) { + name := "testbuildcopydirbutnotfile" + name2 := "testbuildcopydirbutnotfile2" + + dockerfile := ` + FROM ` + minimalBaseImage() + ` + COPY dir /tmp/` + ctx := fakecontext.New(c, "", fakecontext.WithDockerfile(dockerfile), fakecontext.WithFiles(map[string]string{ + "dir/foo": "hello", + })) + defer ctx.Close() + cli.BuildCmd(c, name, build.WithExternalBuildContext(ctx)) + id1 := getIDByName(c, name) + // Check that adding file with similar name doesn't mess with cache + if err := ctx.Add("dir_file", "hello2"); err != nil { + c.Fatal(err) + } + cli.BuildCmd(c, name2, build.WithExternalBuildContext(ctx)) + id2 := getIDByName(c, name2) + if id1 != id2 { + c.Fatal("The cache should have been used but wasn't") + } +} + +func (s *DockerSuite) TestBuildAddCurrentDirWithCache(c *check.C) { + name := "testbuildaddcurrentdirwithcache" + name2 := name + "2" + name3 := name + "3" + name4 := name + "4" + dockerfile := ` + FROM ` + minimalBaseImage() + ` + MAINTAINER dockerio + ADD . /usr/lib/bla` + ctx := fakecontext.New(c, "", fakecontext.WithDockerfile(dockerfile), fakecontext.WithFiles(map[string]string{ + "foo": "hello", + })) + defer ctx.Close() + buildImageSuccessfully(c, name, build.WithExternalBuildContext(ctx)) + id1 := getIDByName(c, name) + // Check that adding file invalidate cache of "ADD ." + if err := ctx.Add("bar", "hello2"); err != nil { + c.Fatal(err) + } + buildImageSuccessfully(c, name2, build.WithExternalBuildContext(ctx)) + id2 := getIDByName(c, name2) + if id1 == id2 { + c.Fatal("The cache should have been invalided but hasn't.") + } + // Check that changing file invalidate cache of "ADD ." + if err := ctx.Add("foo", "hello1"); err != nil { + c.Fatal(err) + } + buildImageSuccessfully(c, name3, build.WithExternalBuildContext(ctx)) + id3 := getIDByName(c, name3) + if id2 == id3 { + c.Fatal("The cache should have been invalided but hasn't.") + } + // Check that changing file to same content with different mtime does not + // invalidate cache of "ADD ." + time.Sleep(1 * time.Second) // wait second because of mtime precision + if err := ctx.Add("foo", "hello1"); err != nil { + c.Fatal(err) + } + buildImageSuccessfully(c, name4, build.WithExternalBuildContext(ctx)) + id4 := getIDByName(c, name4) + if id3 != id4 { + c.Fatal("The cache should have been used but hasn't.") + } +} + +// FIXME(vdemeester) this really seems to test the same thing as before (TestBuildAddMultipleLocalFileWithAndWithoutCache) +func (s *DockerSuite) TestBuildAddCurrentDirWithoutCache(c *check.C) { + name := "testbuildaddcurrentdirwithoutcache" + dockerfile := ` + FROM ` + minimalBaseImage() + ` + MAINTAINER dockerio + ADD . /usr/lib/bla` + ctx := fakecontext.New(c, "", fakecontext.WithDockerfile(dockerfile), fakecontext.WithFiles(map[string]string{ + "foo": "hello", + })) + defer ctx.Close() + buildImageSuccessfully(c, name, build.WithExternalBuildContext(ctx)) + id1 := getIDByName(c, name) + buildImageSuccessfully(c, name, build.WithoutCache, build.WithExternalBuildContext(ctx)) + id2 := getIDByName(c, name) + if id1 == id2 { + c.Fatal("The cache should have been invalided but hasn't.") + } +} + +func (s *DockerSuite) TestBuildAddRemoteFileWithAndWithoutCache(c *check.C) { + name := "testbuildaddremotefilewithcache" + server := fakestorage.New(c, "", fakecontext.WithFiles(map[string]string{ + "baz": "hello", + })) + defer server.Close() + + dockerfile := fmt.Sprintf(`FROM `+minimalBaseImage()+` + MAINTAINER dockerio + ADD %s/baz /usr/lib/baz/quux`, server.URL()) + cli.BuildCmd(c, name, build.WithDockerfile(dockerfile)) + id1 := getIDByName(c, name) + cli.BuildCmd(c, name, build.WithDockerfile(dockerfile)) + id2 := getIDByName(c, name) + cli.BuildCmd(c, name, build.WithoutCache, build.WithDockerfile(dockerfile)) + id3 := getIDByName(c, name) + + if id1 != id2 { + c.Fatal("The cache should have been used but hasn't.") + } + if id1 == id3 { + c.Fatal("The cache should have been invalided but hasn't.") + } +} + +func (s *DockerSuite) TestBuildAddRemoteFileMTime(c *check.C) { + name := "testbuildaddremotefilemtime" + name2 := name + "2" + name3 := name + "3" + + files := map[string]string{"baz": "hello"} + server := fakestorage.New(c, "", fakecontext.WithFiles(files)) + defer server.Close() + + ctx := fakecontext.New(c, "", fakecontext.WithDockerfile(fmt.Sprintf(`FROM `+minimalBaseImage()+` + MAINTAINER dockerio + ADD %s/baz /usr/lib/baz/quux`, server.URL()))) + defer ctx.Close() + + cli.BuildCmd(c, name, build.WithExternalBuildContext(ctx)) + id1 := getIDByName(c, name) + cli.BuildCmd(c, name2, build.WithExternalBuildContext(ctx)) + id2 := getIDByName(c, name2) + if id1 != id2 { + c.Fatal("The cache should have been used but wasn't - #1") + } + + // Now create a different server with same contents (causes different mtime) + // The cache should still be used + + // allow some time for clock to pass as mtime precision is only 1s + time.Sleep(2 * time.Second) + + server2 := fakestorage.New(c, "", fakecontext.WithFiles(files)) + defer server2.Close() + + ctx2 := fakecontext.New(c, "", fakecontext.WithDockerfile(fmt.Sprintf(`FROM `+minimalBaseImage()+` + MAINTAINER dockerio + ADD %s/baz /usr/lib/baz/quux`, server2.URL()))) + defer ctx2.Close() + cli.BuildCmd(c, name3, build.WithExternalBuildContext(ctx2)) + id3 := getIDByName(c, name3) + if id1 != id3 { + c.Fatal("The cache should have been used but wasn't") + } +} + +// FIXME(vdemeester) this really seems to test the same thing as before (combined) +func (s *DockerSuite) TestBuildAddLocalAndRemoteFilesWithAndWithoutCache(c *check.C) { + name := "testbuildaddlocalandremotefilewithcache" + server := fakestorage.New(c, "", fakecontext.WithFiles(map[string]string{ + "baz": "hello", + })) + defer server.Close() + + ctx := fakecontext.New(c, "", fakecontext.WithDockerfile(fmt.Sprintf(`FROM `+minimalBaseImage()+` + MAINTAINER dockerio + ADD foo /usr/lib/bla/bar + ADD %s/baz /usr/lib/baz/quux`, server.URL())), + fakecontext.WithFiles(map[string]string{ + "foo": "hello world", + })) + defer ctx.Close() + buildImageSuccessfully(c, name, build.WithExternalBuildContext(ctx)) + id1 := getIDByName(c, name) + buildImageSuccessfully(c, name, build.WithExternalBuildContext(ctx)) + id2 := getIDByName(c, name) + buildImageSuccessfully(c, name, build.WithoutCache, build.WithExternalBuildContext(ctx)) + id3 := getIDByName(c, name) + if id1 != id2 { + c.Fatal("The cache should have been used but hasn't.") + } + if id1 == id3 { + c.Fatal("The cache should have been invalidated but hasn't.") + } +} + +func testContextTar(c *check.C, compression archive.Compression) { + ctx := fakecontext.New(c, "", + fakecontext.WithDockerfile(`FROM busybox +ADD foo /foo +CMD ["cat", "/foo"]`), + fakecontext.WithFiles(map[string]string{ + "foo": "bar", + }), + ) + defer ctx.Close() + context, err := archive.Tar(ctx.Dir, compression) + if err != nil { + c.Fatalf("failed to build context tar: %v", err) + } + name := "contexttar" + + cli.BuildCmd(c, name, build.WithStdinContext(context)) +} + +func (s *DockerSuite) TestBuildContextTarGzip(c *check.C) { + testContextTar(c, archive.Gzip) +} + +func (s *DockerSuite) TestBuildContextTarNoCompression(c *check.C) { + testContextTar(c, archive.Uncompressed) +} + +func (s *DockerSuite) TestBuildNoContext(c *check.C) { + name := "nocontext" + icmd.RunCmd(icmd.Cmd{ + Command: []string{dockerBinary, "build", "-t", name, "-"}, + Stdin: strings.NewReader( + `FROM busybox + CMD ["echo", "ok"]`), + }).Assert(c, icmd.Success) + + if out, _ := dockerCmd(c, "run", "--rm", "nocontext"); out != "ok\n" { + c.Fatalf("run produced invalid output: %q, expected %q", out, "ok") + } +} + +func (s *DockerSuite) TestBuildDockerfileStdin(c *check.C) { + name := "stdindockerfile" + tmpDir, err := ioutil.TempDir("", "fake-context") + c.Assert(err, check.IsNil) + err = ioutil.WriteFile(filepath.Join(tmpDir, "foo"), []byte("bar"), 0600) + c.Assert(err, check.IsNil) + + icmd.RunCmd(icmd.Cmd{ + Command: []string{dockerBinary, "build", "-t", name, "-f", "-", tmpDir}, + Stdin: strings.NewReader( + `FROM busybox +ADD foo /foo +CMD ["cat", "/foo"]`), + }).Assert(c, icmd.Success) + + res := inspectField(c, name, "Config.Cmd") + c.Assert(strings.TrimSpace(string(res)), checker.Equals, `[cat /foo]`) +} + +func (s *DockerSuite) TestBuildDockerfileStdinConflict(c *check.C) { + name := "stdindockerfiletarcontext" + icmd.RunCmd(icmd.Cmd{ + Command: []string{dockerBinary, "build", "-t", name, "-f", "-", "-"}, + }).Assert(c, icmd.Expected{ + ExitCode: 1, + Err: "use stdin for both build context and dockerfile", + }) +} + +func (s *DockerSuite) TestBuildDockerfileStdinNoExtraFiles(c *check.C) { + s.testBuildDockerfileStdinNoExtraFiles(c, false, false) +} + +func (s *DockerSuite) TestBuildDockerfileStdinDockerignore(c *check.C) { + s.testBuildDockerfileStdinNoExtraFiles(c, true, false) +} + +func (s *DockerSuite) TestBuildDockerfileStdinDockerignoreIgnored(c *check.C) { + s.testBuildDockerfileStdinNoExtraFiles(c, true, true) +} + +func (s *DockerSuite) testBuildDockerfileStdinNoExtraFiles(c *check.C, hasDockerignore, ignoreDockerignore bool) { + name := "stdindockerfilenoextra" + tmpDir, err := ioutil.TempDir("", "fake-context") + c.Assert(err, check.IsNil) + defer os.RemoveAll(tmpDir) + + writeFile := func(filename, content string) { + err = ioutil.WriteFile(filepath.Join(tmpDir, filename), []byte(content), 0600) + c.Assert(err, check.IsNil) + } + + writeFile("foo", "bar") + + if hasDockerignore { + // Add an empty Dockerfile to verify that it is not added to the image + writeFile("Dockerfile", "") + + ignores := "Dockerfile\n" + if ignoreDockerignore { + ignores += ".dockerignore\n" + } + writeFile(".dockerignore", ignores) + } + + result := icmd.RunCmd(icmd.Cmd{ + Command: []string{dockerBinary, "build", "-t", name, "-f", "-", tmpDir}, + Stdin: strings.NewReader( + `FROM busybox +COPY . /baz`), + }) + result.Assert(c, icmd.Success) + + result = cli.DockerCmd(c, "run", "--rm", name, "ls", "-A", "/baz") + if hasDockerignore && !ignoreDockerignore { + c.Assert(result.Stdout(), checker.Equals, ".dockerignore\nfoo\n") + } else { + c.Assert(result.Stdout(), checker.Equals, "foo\n") + } +} + +func (s *DockerSuite) TestBuildWithVolumeOwnership(c *check.C) { + testRequires(c, DaemonIsLinux) + name := "testbuildimg" + + buildImageSuccessfully(c, name, build.WithDockerfile(`FROM busybox:latest + RUN mkdir /test && chown daemon:daemon /test && chmod 0600 /test + VOLUME /test`)) + + out, _ := dockerCmd(c, "run", "--rm", "testbuildimg", "ls", "-la", "/test") + if expected := "drw-------"; !strings.Contains(out, expected) { + c.Fatalf("expected %s received %s", expected, out) + } + if expected := "daemon daemon"; !strings.Contains(out, expected) { + c.Fatalf("expected %s received %s", expected, out) + } + +} + +// testing #1405 - config.Cmd does not get cleaned up if +// utilizing cache +func (s *DockerSuite) TestBuildEntrypointRunCleanup(c *check.C) { + name := "testbuildcmdcleanup" + buildImageSuccessfully(c, name, build.WithDockerfile(`FROM busybox + RUN echo "hello"`)) + + buildImageSuccessfully(c, name, build.WithBuildContext(c, + build.WithFile("Dockerfile", `FROM busybox + RUN echo "hello" + ADD foo /foo + ENTRYPOINT ["/bin/echo"]`), + build.WithFile("foo", "hello"))) + + res := inspectField(c, name, "Config.Cmd") + // Cmd must be cleaned up + if res != "[]" { + c.Fatalf("Cmd %s, expected nil", res) + } +} + +func (s *DockerSuite) TestBuildAddFileNotFound(c *check.C) { + name := "testbuildaddnotfound" + expected := "foo: no such file or directory" + + if testEnv.DaemonPlatform() == "windows" { + expected = "foo: The system cannot find the file specified" + } + + buildImage(name, build.WithBuildContext(c, + build.WithFile("Dockerfile", `FROM `+minimalBaseImage()+` + ADD foo /usr/local/bar`), + build.WithFile("bar", "hello"))).Assert(c, icmd.Expected{ + ExitCode: 1, + Err: expected, + }) +} + +func (s *DockerSuite) TestBuildInheritance(c *check.C) { + testRequires(c, DaemonIsLinux) + name := "testbuildinheritance" + + buildImageSuccessfully(c, name, build.WithDockerfile(`FROM scratch + EXPOSE 2375`)) + ports1 := inspectField(c, name, "Config.ExposedPorts") + + buildImageSuccessfully(c, name, build.WithDockerfile(fmt.Sprintf(`FROM %s + ENTRYPOINT ["/bin/echo"]`, name))) + + res := inspectField(c, name, "Config.Entrypoint") + if expected := "[/bin/echo]"; res != expected { + c.Fatalf("Entrypoint %s, expected %s", res, expected) + } + ports2 := inspectField(c, name, "Config.ExposedPorts") + if ports1 != ports2 { + c.Fatalf("Ports must be same: %s != %s", ports1, ports2) + } +} + +func (s *DockerSuite) TestBuildFails(c *check.C) { + name := "testbuildfails" + buildImage(name, build.WithDockerfile(`FROM busybox + RUN sh -c "exit 23"`)).Assert(c, icmd.Expected{ + ExitCode: 23, + Err: "returned a non-zero code: 23", + }) +} + +func (s *DockerSuite) TestBuildOnBuild(c *check.C) { + name := "testbuildonbuild" + buildImageSuccessfully(c, name, build.WithDockerfile(`FROM busybox + ONBUILD RUN touch foobar`)) + buildImageSuccessfully(c, name, build.WithDockerfile(fmt.Sprintf(`FROM %s + RUN [ -f foobar ]`, name))) +} + +// gh #2446 +func (s *DockerSuite) TestBuildAddToSymlinkDest(c *check.C) { + makeLink := `ln -s /foo /bar` + if testEnv.DaemonPlatform() == "windows" { + makeLink = `mklink /D C:\bar C:\foo` + } + name := "testbuildaddtosymlinkdest" + buildImageSuccessfully(c, name, build.WithBuildContext(c, + build.WithFile("Dockerfile", ` + FROM busybox + RUN sh -c "mkdir /foo" + RUN `+makeLink+` + ADD foo /bar/ + RUN sh -c "[ -f /bar/foo ]" + RUN sh -c "[ -f /foo/foo ]"`), + build.WithFile("foo", "hello"), + )) +} + +func (s *DockerSuite) TestBuildEscapeWhitespace(c *check.C) { + name := "testbuildescapewhitespace" + + buildImageSuccessfully(c, name, build.WithDockerfile(` + # ESCAPE=\ + FROM busybox + MAINTAINER "Docker \ +IO " + `)) + + res := inspectField(c, name, "Author") + if res != "\"Docker IO \"" { + c.Fatalf("Parsed string did not match the escaped string. Got: %q", res) + } + +} + +func (s *DockerSuite) TestBuildVerifyIntString(c *check.C) { + // Verify that strings that look like ints are still passed as strings + name := "testbuildstringing" + + buildImageSuccessfully(c, name, build.WithDockerfile(` + FROM busybox + MAINTAINER 123`)) + + out, _ := dockerCmd(c, "inspect", name) + if !strings.Contains(out, "\"123\"") { + c.Fatalf("Output does not contain the int as a string:\n%s", out) + } + +} + +func (s *DockerSuite) TestBuildDockerignore(c *check.C) { + name := "testbuilddockerignore" + buildImageSuccessfully(c, name, build.WithBuildContext(c, + build.WithFile("Dockerfile", ` + FROM busybox + ADD . /bla + RUN sh -c "[[ -f /bla/src/x.go ]]" + RUN sh -c "[[ -f /bla/Makefile ]]" + RUN sh -c "[[ ! -e /bla/src/_vendor ]]" + RUN sh -c "[[ ! -e /bla/.gitignore ]]" + RUN sh -c "[[ ! -e /bla/README.md ]]" + RUN sh -c "[[ ! -e /bla/dir/foo ]]" + RUN sh -c "[[ ! -e /bla/foo ]]" + RUN sh -c "[[ ! -e /bla/.git ]]" + RUN sh -c "[[ ! -e v.cc ]]" + RUN sh -c "[[ ! -e src/v.cc ]]" + RUN sh -c "[[ ! -e src/_vendor/v.cc ]]"`), + build.WithFile("Makefile", "all:"), + build.WithFile(".git/HEAD", "ref: foo"), + build.WithFile("src/x.go", "package main"), + build.WithFile("src/_vendor/v.go", "package main"), + build.WithFile("src/_vendor/v.cc", "package main"), + build.WithFile("src/v.cc", "package main"), + build.WithFile("v.cc", "package main"), + build.WithFile("dir/foo", ""), + build.WithFile(".gitignore", ""), + build.WithFile("README.md", "readme"), + build.WithFile(".dockerignore", ` +.git +pkg +.gitignore +src/_vendor +*.md +**/*.cc +dir`), + )) +} + +func (s *DockerSuite) TestBuildDockerignoreCleanPaths(c *check.C) { + name := "testbuilddockerignorecleanpaths" + buildImageSuccessfully(c, name, build.WithBuildContext(c, + build.WithFile("Dockerfile", ` + FROM busybox + ADD . /tmp/ + RUN sh -c "(! ls /tmp/foo) && (! ls /tmp/foo2) && (! ls /tmp/dir1/foo)"`), + build.WithFile("foo", "foo"), + build.WithFile("foo2", "foo2"), + build.WithFile("dir1/foo", "foo in dir1"), + build.WithFile(".dockerignore", "./foo\ndir1//foo\n./dir1/../foo2"), + )) +} + +func (s *DockerSuite) TestBuildDockerignoreExceptions(c *check.C) { + name := "testbuilddockerignoreexceptions" + buildImageSuccessfully(c, name, build.WithBuildContext(c, + build.WithFile("Dockerfile", ` + FROM busybox + ADD . /bla + RUN sh -c "[[ -f /bla/src/x.go ]]" + RUN sh -c "[[ -f /bla/Makefile ]]" + RUN sh -c "[[ ! -e /bla/src/_vendor ]]" + RUN sh -c "[[ ! -e /bla/.gitignore ]]" + RUN sh -c "[[ ! -e /bla/README.md ]]" + RUN sh -c "[[ -e /bla/dir/dir/foo ]]" + RUN sh -c "[[ ! -e /bla/dir/foo1 ]]" + RUN sh -c "[[ -f /bla/dir/e ]]" + RUN sh -c "[[ -f /bla/dir/e-dir/foo ]]" + RUN sh -c "[[ ! -e /bla/foo ]]" + RUN sh -c "[[ ! -e /bla/.git ]]" + RUN sh -c "[[ -e /bla/dir/a.cc ]]"`), + build.WithFile("Makefile", "all:"), + build.WithFile(".git/HEAD", "ref: foo"), + build.WithFile("src/x.go", "package main"), + build.WithFile("src/_vendor/v.go", "package main"), + build.WithFile("dir/foo", ""), + build.WithFile("dir/foo1", ""), + build.WithFile("dir/dir/f1", ""), + build.WithFile("dir/dir/foo", ""), + build.WithFile("dir/e", ""), + build.WithFile("dir/e-dir/foo", ""), + build.WithFile(".gitignore", ""), + build.WithFile("README.md", "readme"), + build.WithFile("dir/a.cc", "hello"), + build.WithFile(".dockerignore", ` +.git +pkg +.gitignore +src/_vendor +*.md +dir +!dir/e* +!dir/dir/foo +**/*.cc +!**/*.cc`), + )) +} + +func (s *DockerSuite) TestBuildDockerignoringDockerfile(c *check.C) { + name := "testbuilddockerignoredockerfile" + dockerfile := ` + FROM busybox + ADD . /tmp/ + RUN sh -c "! ls /tmp/Dockerfile" + RUN ls /tmp/.dockerignore` + buildImageSuccessfully(c, name, build.WithBuildContext(c, + build.WithFile("Dockerfile", dockerfile), + build.WithFile(".dockerignore", "Dockerfile\n"), + )) + buildImageSuccessfully(c, name, build.WithBuildContext(c, + build.WithFile("Dockerfile", dockerfile), + build.WithFile(".dockerignore", "./Dockerfile\n"), + )) +} + +func (s *DockerSuite) TestBuildDockerignoringRenamedDockerfile(c *check.C) { + name := "testbuilddockerignoredockerfile" + dockerfile := ` + FROM busybox + ADD . /tmp/ + RUN ls /tmp/Dockerfile + RUN sh -c "! ls /tmp/MyDockerfile" + RUN ls /tmp/.dockerignore` + buildImageSuccessfully(c, name, cli.WithFlags("-f", "MyDockerfile"), build.WithBuildContext(c, + build.WithFile("Dockerfile", "Should not use me"), + build.WithFile("MyDockerfile", dockerfile), + build.WithFile(".dockerignore", "MyDockerfile\n"), + )) + buildImageSuccessfully(c, name, cli.WithFlags("-f", "MyDockerfile"), build.WithBuildContext(c, + build.WithFile("Dockerfile", "Should not use me"), + build.WithFile("MyDockerfile", dockerfile), + build.WithFile(".dockerignore", "./MyDockerfile\n"), + )) +} + +func (s *DockerSuite) TestBuildDockerignoringDockerignore(c *check.C) { + name := "testbuilddockerignoredockerignore" + dockerfile := ` + FROM busybox + ADD . /tmp/ + RUN sh -c "! ls /tmp/.dockerignore" + RUN ls /tmp/Dockerfile` + buildImageSuccessfully(c, name, build.WithBuildContext(c, + build.WithFile("Dockerfile", dockerfile), + build.WithFile(".dockerignore", ".dockerignore\n"), + )) +} + +func (s *DockerSuite) TestBuildDockerignoreTouchDockerfile(c *check.C) { + name := "testbuilddockerignoretouchdockerfile" + dockerfile := ` + FROM busybox + ADD . /tmp/` + ctx := fakecontext.New(c, "", + fakecontext.WithDockerfile(dockerfile), + fakecontext.WithFiles(map[string]string{ + ".dockerignore": "Dockerfile\n", + })) + defer ctx.Close() + + cli.BuildCmd(c, name, build.WithExternalBuildContext(ctx)) + id1 := getIDByName(c, name) + + cli.BuildCmd(c, name, build.WithExternalBuildContext(ctx)) + id2 := getIDByName(c, name) + if id1 != id2 { + c.Fatalf("Didn't use the cache - 1") + } + + // Now make sure touching Dockerfile doesn't invalidate the cache + if err := ctx.Add("Dockerfile", dockerfile+"\n# hi"); err != nil { + c.Fatalf("Didn't add Dockerfile: %s", err) + } + cli.BuildCmd(c, name, build.WithExternalBuildContext(ctx)) + id2 = getIDByName(c, name) + if id1 != id2 { + c.Fatalf("Didn't use the cache - 2") + } + + // One more time but just 'touch' it instead of changing the content + if err := ctx.Add("Dockerfile", dockerfile+"\n# hi"); err != nil { + c.Fatalf("Didn't add Dockerfile: %s", err) + } + cli.BuildCmd(c, name, build.WithExternalBuildContext(ctx)) + id2 = getIDByName(c, name) + if id1 != id2 { + c.Fatalf("Didn't use the cache - 3") + } +} + +func (s *DockerSuite) TestBuildDockerignoringWholeDir(c *check.C) { + name := "testbuilddockerignorewholedir" + + dockerfile := ` + FROM busybox + COPY . / + RUN sh -c "[[ ! -e /.gitignore ]]" + RUN sh -c "[[ ! -e /Makefile ]]"` + + buildImageSuccessfully(c, name, build.WithBuildContext(c, + build.WithFile("Dockerfile", dockerfile), + build.WithFile(".dockerignore", "*\n"), + build.WithFile("Makefile", "all:"), + build.WithFile(".gitignore", ""), + )) +} + +func (s *DockerSuite) TestBuildDockerignoringOnlyDotfiles(c *check.C) { + name := "testbuilddockerignorewholedir" + + dockerfile := ` + FROM busybox + COPY . / + RUN sh -c "[[ ! -e /.gitignore ]]" + RUN sh -c "[[ -f /Makefile ]]"` + + buildImageSuccessfully(c, name, build.WithBuildContext(c, + build.WithFile("Dockerfile", dockerfile), + build.WithFile(".dockerignore", ".*"), + build.WithFile("Makefile", "all:"), + build.WithFile(".gitignore", ""), + )) +} + +func (s *DockerSuite) TestBuildDockerignoringBadExclusion(c *check.C) { + name := "testbuilddockerignorebadexclusion" + buildImage(name, build.WithBuildContext(c, + build.WithFile("Dockerfile", ` + FROM busybox + COPY . / + RUN sh -c "[[ ! -e /.gitignore ]]" + RUN sh -c "[[ -f /Makefile ]]"`), + build.WithFile("Makefile", "all:"), + build.WithFile(".gitignore", ""), + build.WithFile(".dockerignore", "!\n"), + )).Assert(c, icmd.Expected{ + ExitCode: 1, + Err: "error checking context: 'illegal exclusion pattern: \"!\"", + }) +} + +func (s *DockerSuite) TestBuildDockerignoringWildTopDir(c *check.C) { + dockerfile := ` + FROM busybox + COPY . / + RUN sh -c "[[ ! -e /.dockerignore ]]" + RUN sh -c "[[ ! -e /Dockerfile ]]" + RUN sh -c "[[ ! -e /file1 ]]" + RUN sh -c "[[ ! -e /dir ]]"` + + // All of these should result in ignoring all files + for _, variant := range []string{"**", "**/", "**/**", "*"} { + buildImageSuccessfully(c, "noname", build.WithBuildContext(c, + build.WithFile("Dockerfile", dockerfile), + build.WithFile("file1", ""), + build.WithFile("dir/file1", ""), + build.WithFile(".dockerignore", variant), + )) + + dockerCmd(c, "rmi", "noname") + } +} + +func (s *DockerSuite) TestBuildDockerignoringWildDirs(c *check.C) { + dockerfile := ` + FROM busybox + COPY . / + #RUN sh -c "[[ -e /.dockerignore ]]" + RUN sh -c "[[ -e /Dockerfile ]] && \ + [[ ! -e /file0 ]] && \ + [[ ! -e /dir1/file0 ]] && \ + [[ ! -e /dir2/file0 ]] && \ + [[ ! -e /file1 ]] && \ + [[ ! -e /dir1/file1 ]] && \ + [[ ! -e /dir1/dir2/file1 ]] && \ + [[ ! -e /dir1/file2 ]] && \ + [[ -e /dir1/dir2/file2 ]] && \ + [[ ! -e /dir1/dir2/file4 ]] && \ + [[ ! -e /dir1/dir2/file5 ]] && \ + [[ ! -e /dir1/dir2/file6 ]] && \ + [[ ! -e /dir1/dir3/file7 ]] && \ + [[ ! -e /dir1/dir3/file8 ]] && \ + [[ -e /dir1/dir3 ]] && \ + [[ -e /dir1/dir4 ]] && \ + [[ ! -e 'dir1/dir5/fileAA' ]] && \ + [[ -e 'dir1/dir5/fileAB' ]] && \ + [[ -e 'dir1/dir5/fileB' ]]" # "." in pattern means nothing + + RUN echo all done!` + + dockerignore := ` +**/file0 +**/*file1 +**/dir1/file2 +dir1/**/file4 +**/dir2/file5 +**/dir1/dir2/file6 +dir1/dir3/** +**/dir4/** +**/file?A +**/file\?B +**/dir5/file. +` + + buildImageSuccessfully(c, "noname", build.WithBuildContext(c, + build.WithFile("Dockerfile", dockerfile), + build.WithFile(".dockerignore", dockerignore), + build.WithFile("dir1/file0", ""), + build.WithFile("dir1/dir2/file0", ""), + build.WithFile("file1", ""), + build.WithFile("dir1/file1", ""), + build.WithFile("dir1/dir2/file1", ""), + build.WithFile("dir1/file2", ""), + build.WithFile("dir1/dir2/file2", ""), // remains + build.WithFile("dir1/dir2/file4", ""), + build.WithFile("dir1/dir2/file5", ""), + build.WithFile("dir1/dir2/file6", ""), + build.WithFile("dir1/dir3/file7", ""), + build.WithFile("dir1/dir3/file8", ""), + build.WithFile("dir1/dir4/file9", ""), + build.WithFile("dir1/dir5/fileAA", ""), + build.WithFile("dir1/dir5/fileAB", ""), + build.WithFile("dir1/dir5/fileB", ""), + )) +} + +func (s *DockerSuite) TestBuildLineBreak(c *check.C) { + testRequires(c, DaemonIsLinux) + name := "testbuildlinebreak" + buildImageSuccessfully(c, name, build.WithDockerfile(`FROM busybox +RUN sh -c 'echo root:testpass \ + > /tmp/passwd' +RUN mkdir -p /var/run/sshd +RUN sh -c "[ "$(cat /tmp/passwd)" = "root:testpass" ]" +RUN sh -c "[ "$(ls -d /var/run/sshd)" = "/var/run/sshd" ]"`)) +} + +func (s *DockerSuite) TestBuildEOLInLine(c *check.C) { + testRequires(c, DaemonIsLinux) + name := "testbuildeolinline" + buildImageSuccessfully(c, name, build.WithDockerfile(`FROM busybox +RUN sh -c 'echo root:testpass > /tmp/passwd' +RUN echo "foo \n bar"; echo "baz" +RUN mkdir -p /var/run/sshd +RUN sh -c "[ "$(cat /tmp/passwd)" = "root:testpass" ]" +RUN sh -c "[ "$(ls -d /var/run/sshd)" = "/var/run/sshd" ]"`)) +} + +func (s *DockerSuite) TestBuildCommentsShebangs(c *check.C) { + testRequires(c, DaemonIsLinux) + name := "testbuildcomments" + buildImageSuccessfully(c, name, build.WithDockerfile(`FROM busybox +# This is an ordinary comment. +RUN { echo '#!/bin/sh'; echo 'echo hello world'; } > /hello.sh +RUN [ ! -x /hello.sh ] +# comment with line break \ +RUN chmod +x /hello.sh +RUN [ -x /hello.sh ] +RUN [ "$(cat /hello.sh)" = $'#!/bin/sh\necho hello world' ] +RUN [ "$(/hello.sh)" = "hello world" ]`)) +} + +func (s *DockerSuite) TestBuildUsersAndGroups(c *check.C) { + testRequires(c, DaemonIsLinux) + name := "testbuildusers" + buildImageSuccessfully(c, name, build.WithDockerfile(`FROM busybox + +# Make sure our defaults work +RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)" = '0:0/root:root' ] + +# TODO decide if "args.user = strconv.Itoa(syscall.Getuid())" is acceptable behavior for changeUser in sysvinit instead of "return nil" when "USER" isn't specified (so that we get the proper group list even if that is the empty list, even in the default case of not supplying an explicit USER to run as, which implies USER 0) +USER root +RUN [ "$(id -G):$(id -Gn)" = '0 10:root wheel' ] + +# Setup dockerio user and group +RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd && \ + echo 'dockerio:x:1001:' >> /etc/group + +# Make sure we can switch to our user and all the information is exactly as we expect it to be +USER dockerio +RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1001/dockerio:dockerio/1001:dockerio' ] + +# Switch back to root and double check that worked exactly as we might expect it to +USER root +RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '0:0/root:root/0 10:root wheel' ] && \ + # Add a "supplementary" group for our dockerio user + echo 'supplementary:x:1002:dockerio' >> /etc/group + +# ... and then go verify that we get it like we expect +USER dockerio +RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1001/dockerio:dockerio/1001 1002:dockerio supplementary' ] +USER 1001 +RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1001/dockerio:dockerio/1001 1002:dockerio supplementary' ] + +# super test the new "user:group" syntax +USER dockerio:dockerio +RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1001/dockerio:dockerio/1001:dockerio' ] +USER 1001:dockerio +RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1001/dockerio:dockerio/1001:dockerio' ] +USER dockerio:1001 +RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1001/dockerio:dockerio/1001:dockerio' ] +USER 1001:1001 +RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1001/dockerio:dockerio/1001:dockerio' ] +USER dockerio:supplementary +RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1002/dockerio:supplementary/1002:supplementary' ] +USER dockerio:1002 +RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1002/dockerio:supplementary/1002:supplementary' ] +USER 1001:supplementary +RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1002/dockerio:supplementary/1002:supplementary' ] +USER 1001:1002 +RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1002/dockerio:supplementary/1002:supplementary' ] + +# make sure unknown uid/gid still works properly +USER 1042:1043 +RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1042:1043/1042:1043/1043:1043' ]`)) +} + +// FIXME(vdemeester) rename this test (and probably "merge" it with the one below TestBuildEnvUsage2) +func (s *DockerSuite) TestBuildEnvUsage(c *check.C) { + // /docker/world/hello is not owned by the correct user + testRequires(c, NotUserNamespace) + testRequires(c, DaemonIsLinux) + name := "testbuildenvusage" + dockerfile := `FROM busybox +ENV HOME /root +ENV PATH $HOME/bin:$PATH +ENV PATH /tmp:$PATH +RUN [ "$PATH" = "/tmp:$HOME/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" ] +ENV FOO /foo/baz +ENV BAR /bar +ENV BAZ $BAR +ENV FOOPATH $PATH:$FOO +RUN [ "$BAR" = "$BAZ" ] +RUN [ "$FOOPATH" = "$PATH:/foo/baz" ] +ENV FROM hello/docker/world +ENV TO /docker/world/hello +ADD $FROM $TO +RUN [ "$(cat $TO)" = "hello" ] +ENV abc=def +ENV ghi=$abc +RUN [ "$ghi" = "def" ] +` + buildImageSuccessfully(c, name, build.WithBuildContext(c, + build.WithFile("Dockerfile", dockerfile), + build.WithFile("hello/docker/world", "hello"), + )) +} + +// FIXME(vdemeester) rename this test (and probably "merge" it with the one above TestBuildEnvUsage) +func (s *DockerSuite) TestBuildEnvUsage2(c *check.C) { + // /docker/world/hello is not owned by the correct user + testRequires(c, NotUserNamespace) + testRequires(c, DaemonIsLinux) + name := "testbuildenvusage2" + dockerfile := `FROM busybox +ENV abc=def def="hello world" +RUN [ "$abc,$def" = "def,hello world" ] +ENV def=hello\ world v1=abc v2="hi there" v3='boogie nights' v4="with'quotes too" +RUN [ "$def,$v1,$v2,$v3,$v4" = "hello world,abc,hi there,boogie nights,with'quotes too" ] +ENV abc=zzz FROM=hello/docker/world +ENV abc=zzz TO=/docker/world/hello +ADD $FROM $TO +RUN [ "$abc,$(cat $TO)" = "zzz,hello" ] +ENV abc 'yyy' +RUN [ $abc = 'yyy' ] +ENV abc= +RUN [ "$abc" = "" ] + +# use grep to make sure if the builder substitutes \$foo by mistake +# we don't get a false positive +ENV abc=\$foo +RUN [ "$abc" = "\$foo" ] && (echo "$abc" | grep foo) +ENV abc \$foo +RUN [ "$abc" = "\$foo" ] && (echo "$abc" | grep foo) + +ENV abc=\'foo\' abc2=\"foo\" +RUN [ "$abc,$abc2" = "'foo',\"foo\"" ] +ENV abc "foo" +RUN [ "$abc" = "foo" ] +ENV abc 'foo' +RUN [ "$abc" = 'foo' ] +ENV abc \'foo\' +RUN [ "$abc" = "'foo'" ] +ENV abc \"foo\" +RUN [ "$abc" = '"foo"' ] + +ENV abc=ABC +RUN [ "$abc" = "ABC" ] +ENV def1=${abc:-DEF} def2=${ccc:-DEF} +ENV def3=${ccc:-${def2}xx} def4=${abc:+ALT} def5=${def2:+${abc}:} def6=${ccc:-\$abc:} def7=${ccc:-\${abc}:} +RUN [ "$def1,$def2,$def3,$def4,$def5,$def6,$def7" = 'ABC,DEF,DEFxx,ALT,ABC:,$abc:,${abc:}' ] +ENV mypath=${mypath:+$mypath:}/home +ENV mypath=${mypath:+$mypath:}/away +RUN [ "$mypath" = '/home:/away' ] + +ENV e1=bar +ENV e2=$e1 e3=$e11 e4=\$e1 e5=\$e11 +RUN [ "$e0,$e1,$e2,$e3,$e4,$e5" = ',bar,bar,,$e1,$e11' ] + +ENV ee1 bar +ENV ee2 $ee1 +ENV ee3 $ee11 +ENV ee4 \$ee1 +ENV ee5 \$ee11 +RUN [ "$ee1,$ee2,$ee3,$ee4,$ee5" = 'bar,bar,,$ee1,$ee11' ] + +ENV eee1="foo" eee2='foo' +ENV eee3 "foo" +ENV eee4 'foo' +RUN [ "$eee1,$eee2,$eee3,$eee4" = 'foo,foo,foo,foo' ] + +` + buildImageSuccessfully(c, name, build.WithBuildContext(c, + build.WithFile("Dockerfile", dockerfile), + build.WithFile("hello/docker/world", "hello"), + )) +} + +func (s *DockerSuite) TestBuildAddScript(c *check.C) { + testRequires(c, DaemonIsLinux) + name := "testbuildaddscript" + dockerfile := ` +FROM busybox +ADD test /test +RUN ["chmod","+x","/test"] +RUN ["/test"] +RUN [ "$(cat /testfile)" = 'test!' ]` + + buildImageSuccessfully(c, name, build.WithBuildContext(c, + build.WithFile("Dockerfile", dockerfile), + build.WithFile("test", "#!/bin/sh\necho 'test!' > /testfile"), + )) +} + +func (s *DockerSuite) TestBuildAddTar(c *check.C) { + // /test/foo is not owned by the correct user + testRequires(c, NotUserNamespace) + name := "testbuildaddtar" + + ctx := func() *fakecontext.Fake { + dockerfile := ` +FROM busybox +ADD test.tar / +RUN cat /test/foo | grep Hi +ADD test.tar /test.tar +RUN cat /test.tar/test/foo | grep Hi +ADD test.tar /unlikely-to-exist +RUN cat /unlikely-to-exist/test/foo | grep Hi +ADD test.tar /unlikely-to-exist-trailing-slash/ +RUN cat /unlikely-to-exist-trailing-slash/test/foo | grep Hi +RUN sh -c "mkdir /existing-directory" #sh -c is needed on Windows to use the correct mkdir +ADD test.tar /existing-directory +RUN cat /existing-directory/test/foo | grep Hi +ADD test.tar /existing-directory-trailing-slash/ +RUN cat /existing-directory-trailing-slash/test/foo | grep Hi` + tmpDir, err := ioutil.TempDir("", "fake-context") + c.Assert(err, check.IsNil) + testTar, err := os.Create(filepath.Join(tmpDir, "test.tar")) + if err != nil { + c.Fatalf("failed to create test.tar archive: %v", err) + } + defer testTar.Close() + + tw := tar.NewWriter(testTar) + + if err := tw.WriteHeader(&tar.Header{ + Name: "test/foo", + Size: 2, + }); err != nil { + c.Fatalf("failed to write tar file header: %v", err) + } + if _, err := tw.Write([]byte("Hi")); err != nil { + c.Fatalf("failed to write tar file content: %v", err) + } + if err := tw.Close(); err != nil { + c.Fatalf("failed to close tar archive: %v", err) + } + + if err := ioutil.WriteFile(filepath.Join(tmpDir, "Dockerfile"), []byte(dockerfile), 0644); err != nil { + c.Fatalf("failed to open destination dockerfile: %v", err) + } + return fakecontext.New(c, tmpDir) + }() + defer ctx.Close() + + buildImageSuccessfully(c, name, build.WithExternalBuildContext(ctx)) +} + +func (s *DockerSuite) TestBuildAddBrokenTar(c *check.C) { + name := "testbuildaddbrokentar" + + ctx := func() *fakecontext.Fake { + dockerfile := ` +FROM busybox +ADD test.tar /` + tmpDir, err := ioutil.TempDir("", "fake-context") + c.Assert(err, check.IsNil) + testTar, err := os.Create(filepath.Join(tmpDir, "test.tar")) + if err != nil { + c.Fatalf("failed to create test.tar archive: %v", err) + } + defer testTar.Close() + + tw := tar.NewWriter(testTar) + + if err := tw.WriteHeader(&tar.Header{ + Name: "test/foo", + Size: 2, + }); err != nil { + c.Fatalf("failed to write tar file header: %v", err) + } + if _, err := tw.Write([]byte("Hi")); err != nil { + c.Fatalf("failed to write tar file content: %v", err) + } + if err := tw.Close(); err != nil { + c.Fatalf("failed to close tar archive: %v", err) + } + + // Corrupt the tar by removing one byte off the end + stat, err := testTar.Stat() + if err != nil { + c.Fatalf("failed to stat tar archive: %v", err) + } + if err := testTar.Truncate(stat.Size() - 1); err != nil { + c.Fatalf("failed to truncate tar archive: %v", err) + } + + if err := ioutil.WriteFile(filepath.Join(tmpDir, "Dockerfile"), []byte(dockerfile), 0644); err != nil { + c.Fatalf("failed to open destination dockerfile: %v", err) + } + return fakecontext.New(c, tmpDir) + }() + defer ctx.Close() + + buildImage(name, build.WithExternalBuildContext(ctx)).Assert(c, icmd.Expected{ + ExitCode: 1, + }) +} + +func (s *DockerSuite) TestBuildAddNonTar(c *check.C) { + name := "testbuildaddnontar" + + // Should not try to extract test.tar + buildImageSuccessfully(c, name, build.WithBuildContext(c, + build.WithFile("Dockerfile", ` + FROM busybox + ADD test.tar / + RUN test -f /test.tar`), + build.WithFile("test.tar", "not_a_tar_file"), + )) +} + +func (s *DockerSuite) TestBuildAddTarXz(c *check.C) { + // /test/foo is not owned by the correct user + testRequires(c, NotUserNamespace) + testRequires(c, DaemonIsLinux) + name := "testbuildaddtarxz" + + ctx := func() *fakecontext.Fake { + dockerfile := ` + FROM busybox + ADD test.tar.xz / + RUN cat /test/foo | grep Hi` + tmpDir, err := ioutil.TempDir("", "fake-context") + c.Assert(err, check.IsNil) + testTar, err := os.Create(filepath.Join(tmpDir, "test.tar")) + if err != nil { + c.Fatalf("failed to create test.tar archive: %v", err) + } + defer testTar.Close() + + tw := tar.NewWriter(testTar) + + if err := tw.WriteHeader(&tar.Header{ + Name: "test/foo", + Size: 2, + }); err != nil { + c.Fatalf("failed to write tar file header: %v", err) + } + if _, err := tw.Write([]byte("Hi")); err != nil { + c.Fatalf("failed to write tar file content: %v", err) + } + if err := tw.Close(); err != nil { + c.Fatalf("failed to close tar archive: %v", err) + } + + icmd.RunCmd(icmd.Cmd{ + Command: []string{"xz", "-k", "test.tar"}, + Dir: tmpDir, + }).Assert(c, icmd.Success) + if err := ioutil.WriteFile(filepath.Join(tmpDir, "Dockerfile"), []byte(dockerfile), 0644); err != nil { + c.Fatalf("failed to open destination dockerfile: %v", err) + } + return fakecontext.New(c, tmpDir) + }() + + defer ctx.Close() + + buildImageSuccessfully(c, name, build.WithExternalBuildContext(ctx)) +} + +func (s *DockerSuite) TestBuildAddTarXzGz(c *check.C) { + testRequires(c, DaemonIsLinux) + name := "testbuildaddtarxzgz" + + ctx := func() *fakecontext.Fake { + dockerfile := ` + FROM busybox + ADD test.tar.xz.gz / + RUN ls /test.tar.xz.gz` + tmpDir, err := ioutil.TempDir("", "fake-context") + c.Assert(err, check.IsNil) + testTar, err := os.Create(filepath.Join(tmpDir, "test.tar")) + if err != nil { + c.Fatalf("failed to create test.tar archive: %v", err) + } + defer testTar.Close() + + tw := tar.NewWriter(testTar) + + if err := tw.WriteHeader(&tar.Header{ + Name: "test/foo", + Size: 2, + }); err != nil { + c.Fatalf("failed to write tar file header: %v", err) + } + if _, err := tw.Write([]byte("Hi")); err != nil { + c.Fatalf("failed to write tar file content: %v", err) + } + if err := tw.Close(); err != nil { + c.Fatalf("failed to close tar archive: %v", err) + } + + icmd.RunCmd(icmd.Cmd{ + Command: []string{"xz", "-k", "test.tar"}, + Dir: tmpDir, + }).Assert(c, icmd.Success) + + icmd.RunCmd(icmd.Cmd{ + Command: []string{"gzip", "test.tar.xz"}, + Dir: tmpDir, + }) + if err := ioutil.WriteFile(filepath.Join(tmpDir, "Dockerfile"), []byte(dockerfile), 0644); err != nil { + c.Fatalf("failed to open destination dockerfile: %v", err) + } + return fakecontext.New(c, tmpDir) + }() + + defer ctx.Close() + + buildImageSuccessfully(c, name, build.WithExternalBuildContext(ctx)) +} + +func (s *DockerSuite) TestBuildFromGit(c *check.C) { + name := "testbuildfromgit" + git := fakegit.New(c, "repo", map[string]string{ + "Dockerfile": `FROM busybox + ADD first /first + RUN [ -f /first ] + MAINTAINER docker`, + "first": "test git data", + }, true) + defer git.Close() + + buildImageSuccessfully(c, name, build.WithContextPath(git.RepoURL)) + + res := inspectField(c, name, "Author") + if res != "docker" { + c.Fatalf("Maintainer should be docker, got %s", res) + } +} + +func (s *DockerSuite) TestBuildFromGitWithContext(c *check.C) { + name := "testbuildfromgit" + git := fakegit.New(c, "repo", map[string]string{ + "docker/Dockerfile": `FROM busybox + ADD first /first + RUN [ -f /first ] + MAINTAINER docker`, + "docker/first": "test git data", + }, true) + defer git.Close() + + buildImageSuccessfully(c, name, build.WithContextPath(fmt.Sprintf("%s#master:docker", git.RepoURL))) + + res := inspectField(c, name, "Author") + if res != "docker" { + c.Fatalf("Maintainer should be docker, got %s", res) + } +} + +func (s *DockerSuite) TestBuildFromGitWithF(c *check.C) { + name := "testbuildfromgitwithf" + git := fakegit.New(c, "repo", map[string]string{ + "myApp/myDockerfile": `FROM busybox + RUN echo hi from Dockerfile`, + }, true) + defer git.Close() + + buildImage(name, cli.WithFlags("-f", "myApp/myDockerfile"), build.WithContextPath(git.RepoURL)).Assert(c, icmd.Expected{ + Out: "hi from Dockerfile", + }) +} + +func (s *DockerSuite) TestBuildFromRemoteTarball(c *check.C) { + name := "testbuildfromremotetarball" + + buffer := new(bytes.Buffer) + tw := tar.NewWriter(buffer) + defer tw.Close() + + dockerfile := []byte(`FROM busybox + MAINTAINER docker`) + if err := tw.WriteHeader(&tar.Header{ + Name: "Dockerfile", + Size: int64(len(dockerfile)), + }); err != nil { + c.Fatalf("failed to write tar file header: %v", err) + } + if _, err := tw.Write(dockerfile); err != nil { + c.Fatalf("failed to write tar file content: %v", err) + } + if err := tw.Close(); err != nil { + c.Fatalf("failed to close tar archive: %v", err) + } + + server := fakestorage.New(c, "", fakecontext.WithBinaryFiles(map[string]*bytes.Buffer{ + "testT.tar": buffer, + })) + defer server.Close() + + cli.BuildCmd(c, name, build.WithContextPath(server.URL()+"/testT.tar")) + + res := inspectField(c, name, "Author") + if res != "docker" { + c.Fatalf("Maintainer should be docker, got %s", res) + } +} + +func (s *DockerSuite) TestBuildCleanupCmdOnEntrypoint(c *check.C) { + name := "testbuildcmdcleanuponentrypoint" + + buildImageSuccessfully(c, name, build.WithDockerfile(`FROM `+minimalBaseImage()+` + CMD ["test"] + ENTRYPOINT ["echo"]`)) + buildImageSuccessfully(c, name, build.WithDockerfile(fmt.Sprintf(`FROM %s + ENTRYPOINT ["cat"]`, name))) + + res := inspectField(c, name, "Config.Cmd") + if res != "[]" { + c.Fatalf("Cmd %s, expected nil", res) + } + res = inspectField(c, name, "Config.Entrypoint") + if expected := "[cat]"; res != expected { + c.Fatalf("Entrypoint %s, expected %s", res, expected) + } +} + +func (s *DockerSuite) TestBuildClearCmd(c *check.C) { + name := "testbuildclearcmd" + buildImageSuccessfully(c, name, build.WithDockerfile(`FROM `+minimalBaseImage()+` + ENTRYPOINT ["/bin/bash"] + CMD []`)) + + res := inspectFieldJSON(c, name, "Config.Cmd") + if res != "[]" { + c.Fatalf("Cmd %s, expected %s", res, "[]") + } +} + +func (s *DockerSuite) TestBuildEmptyCmd(c *check.C) { + // Skip on Windows. Base image on Windows has a CMD set in the image. + testRequires(c, DaemonIsLinux) + + name := "testbuildemptycmd" + buildImageSuccessfully(c, name, build.WithDockerfile("FROM "+minimalBaseImage()+"\nMAINTAINER quux\n")) + + res := inspectFieldJSON(c, name, "Config.Cmd") + if res != "null" { + c.Fatalf("Cmd %s, expected %s", res, "null") + } +} + +func (s *DockerSuite) TestBuildOnBuildOutput(c *check.C) { + name := "testbuildonbuildparent" + buildImageSuccessfully(c, name, build.WithDockerfile("FROM busybox\nONBUILD RUN echo foo\n")) + + buildImage(name, build.WithDockerfile("FROM "+name+"\nMAINTAINER quux\n")).Assert(c, icmd.Expected{ + Out: "# Executing 1 build trigger", + }) +} + +// FIXME(vdemeester) should be a unit test +func (s *DockerSuite) TestBuildInvalidTag(c *check.C) { + name := "abcd:" + stringutils.GenerateRandomAlphaOnlyString(200) + buildImage(name, build.WithDockerfile("FROM "+minimalBaseImage()+"\nMAINTAINER quux\n")).Assert(c, icmd.Expected{ + ExitCode: 125, + Err: "invalid reference format", + }) +} + +func (s *DockerSuite) TestBuildCmdShDashC(c *check.C) { + name := "testbuildcmdshc" + buildImageSuccessfully(c, name, build.WithDockerfile("FROM busybox\nCMD echo cmd\n")) + + res := inspectFieldJSON(c, name, "Config.Cmd") + expected := `["/bin/sh","-c","echo cmd"]` + if testEnv.DaemonPlatform() == "windows" { + expected = `["cmd","/S","/C","echo cmd"]` + } + if res != expected { + c.Fatalf("Expected value %s not in Config.Cmd: %s", expected, res) + } + +} + +func (s *DockerSuite) TestBuildCmdSpaces(c *check.C) { + // Test to make sure that when we strcat arrays we take into account + // the arg separator to make sure ["echo","hi"] and ["echo hi"] don't + // look the same + name := "testbuildcmdspaces" + + buildImageSuccessfully(c, name, build.WithDockerfile("FROM busybox\nCMD [\"echo hi\"]\n")) + id1 := getIDByName(c, name) + buildImageSuccessfully(c, name, build.WithDockerfile("FROM busybox\nCMD [\"echo\", \"hi\"]\n")) + id2 := getIDByName(c, name) + + if id1 == id2 { + c.Fatal("Should not have resulted in the same CMD") + } + + // Now do the same with ENTRYPOINT + buildImageSuccessfully(c, name, build.WithDockerfile("FROM busybox\nENTRYPOINT [\"echo hi\"]\n")) + id1 = getIDByName(c, name) + buildImageSuccessfully(c, name, build.WithDockerfile("FROM busybox\nENTRYPOINT [\"echo\", \"hi\"]\n")) + id2 = getIDByName(c, name) + + if id1 == id2 { + c.Fatal("Should not have resulted in the same ENTRYPOINT") + } +} + +func (s *DockerSuite) TestBuildCmdJSONNoShDashC(c *check.C) { + name := "testbuildcmdjson" + buildImageSuccessfully(c, name, build.WithDockerfile("FROM busybox\nCMD [\"echo\", \"cmd\"]")) + + res := inspectFieldJSON(c, name, "Config.Cmd") + expected := `["echo","cmd"]` + if res != expected { + c.Fatalf("Expected value %s not in Config.Cmd: %s", expected, res) + } +} + +func (s *DockerSuite) TestBuildEntrypointCanBeOverriddenByChild(c *check.C) { + buildImageSuccessfully(c, "parent", build.WithDockerfile(` + FROM busybox + ENTRYPOINT exit 130 + `)) + + icmd.RunCommand(dockerBinary, "run", "parent").Assert(c, icmd.Expected{ + ExitCode: 130, + }) + + buildImageSuccessfully(c, "child", build.WithDockerfile(` + FROM parent + ENTRYPOINT exit 5 + `)) + + icmd.RunCommand(dockerBinary, "run", "child").Assert(c, icmd.Expected{ + ExitCode: 5, + }) +} + +func (s *DockerSuite) TestBuildEntrypointCanBeOverriddenByChildInspect(c *check.C) { + var ( + name = "testbuildepinherit" + name2 = "testbuildepinherit2" + expected = `["/bin/sh","-c","echo quux"]` + ) + + if testEnv.DaemonPlatform() == "windows" { + expected = `["cmd","/S","/C","echo quux"]` + } + + buildImageSuccessfully(c, name, build.WithDockerfile("FROM busybox\nENTRYPOINT /foo/bar")) + buildImageSuccessfully(c, name2, build.WithDockerfile(fmt.Sprintf("FROM %s\nENTRYPOINT echo quux", name))) + + res := inspectFieldJSON(c, name2, "Config.Entrypoint") + if res != expected { + c.Fatalf("Expected value %s not in Config.Entrypoint: %s", expected, res) + } + + icmd.RunCommand(dockerBinary, "run", name2).Assert(c, icmd.Expected{ + Out: "quux", + }) +} + +func (s *DockerSuite) TestBuildRunShEntrypoint(c *check.C) { + name := "testbuildentrypoint" + buildImageSuccessfully(c, name, build.WithDockerfile(`FROM busybox + ENTRYPOINT echo`)) + dockerCmd(c, "run", "--rm", name) +} + +func (s *DockerSuite) TestBuildExoticShellInterpolation(c *check.C) { + testRequires(c, DaemonIsLinux) + name := "testbuildexoticshellinterpolation" + + buildImageSuccessfully(c, name, build.WithDockerfile(` + FROM busybox + + ENV SOME_VAR a.b.c + + RUN [ "$SOME_VAR" = 'a.b.c' ] + RUN [ "${SOME_VAR}" = 'a.b.c' ] + RUN [ "${SOME_VAR%.*}" = 'a.b' ] + RUN [ "${SOME_VAR%%.*}" = 'a' ] + RUN [ "${SOME_VAR#*.}" = 'b.c' ] + RUN [ "${SOME_VAR##*.}" = 'c' ] + RUN [ "${SOME_VAR/c/d}" = 'a.b.d' ] + RUN [ "${#SOME_VAR}" = '5' ] + + RUN [ "${SOME_UNSET_VAR:-$SOME_VAR}" = 'a.b.c' ] + RUN [ "${SOME_VAR:+Version: ${SOME_VAR}}" = 'Version: a.b.c' ] + RUN [ "${SOME_UNSET_VAR:+${SOME_VAR}}" = '' ] + RUN [ "${SOME_UNSET_VAR:-${SOME_VAR:-d.e.f}}" = 'a.b.c' ] + `)) +} + +func (s *DockerSuite) TestBuildVerifySingleQuoteFails(c *check.C) { + // This testcase is supposed to generate an error because the + // JSON array we're passing in on the CMD uses single quotes instead + // of double quotes (per the JSON spec). This means we interpret it + // as a "string" instead of "JSON array" and pass it on to "sh -c" and + // it should barf on it. + name := "testbuildsinglequotefails" + expectedExitCode := 2 + if testEnv.DaemonPlatform() == "windows" { + expectedExitCode = 127 + } + + buildImageSuccessfully(c, name, build.WithDockerfile(`FROM busybox + CMD [ '/bin/sh', '-c', 'echo hi' ]`)) + + icmd.RunCommand(dockerBinary, "run", "--rm", name).Assert(c, icmd.Expected{ + ExitCode: expectedExitCode, + }) +} + +func (s *DockerSuite) TestBuildVerboseOut(c *check.C) { + name := "testbuildverboseout" + expected := "\n123\n" + + if testEnv.DaemonPlatform() == "windows" { + expected = "\n123\r\n" + } + + buildImage(name, build.WithDockerfile(`FROM busybox +RUN echo 123`)).Assert(c, icmd.Expected{ + Out: expected, + }) +} + +func (s *DockerSuite) TestBuildWithTabs(c *check.C) { + name := "testbuildwithtabs" + buildImageSuccessfully(c, name, build.WithDockerfile("FROM busybox\nRUN echo\tone\t\ttwo")) + res := inspectFieldJSON(c, name, "ContainerConfig.Cmd") + expected1 := `["/bin/sh","-c","echo\tone\t\ttwo"]` + expected2 := `["/bin/sh","-c","echo\u0009one\u0009\u0009two"]` // syntactically equivalent, and what Go 1.3 generates + if testEnv.DaemonPlatform() == "windows" { + expected1 = `["cmd","/S","/C","echo\tone\t\ttwo"]` + expected2 = `["cmd","/S","/C","echo\u0009one\u0009\u0009two"]` // syntactically equivalent, and what Go 1.3 generates + } + if res != expected1 && res != expected2 { + c.Fatalf("Missing tabs.\nGot: %s\nExp: %s or %s", res, expected1, expected2) + } +} + +func (s *DockerSuite) TestBuildLabels(c *check.C) { + name := "testbuildlabel" + expected := `{"License":"GPL","Vendor":"Acme"}` + buildImageSuccessfully(c, name, build.WithDockerfile(`FROM busybox + LABEL Vendor=Acme + LABEL License GPL`)) + res := inspectFieldJSON(c, name, "Config.Labels") + if res != expected { + c.Fatalf("Labels %s, expected %s", res, expected) + } +} + +func (s *DockerSuite) TestBuildLabelsCache(c *check.C) { + name := "testbuildlabelcache" + + buildImageSuccessfully(c, name, build.WithDockerfile(`FROM busybox + LABEL Vendor=Acme`)) + id1 := getIDByName(c, name) + buildImageSuccessfully(c, name, build.WithDockerfile(`FROM busybox + LABEL Vendor=Acme`)) + id2 := getIDByName(c, name) + if id1 != id2 { + c.Fatalf("Build 2 should have worked & used cache(%s,%s)", id1, id2) + } + + buildImageSuccessfully(c, name, build.WithDockerfile(`FROM busybox + LABEL Vendor=Acme1`)) + id2 = getIDByName(c, name) + if id1 == id2 { + c.Fatalf("Build 3 should have worked & NOT used cache(%s,%s)", id1, id2) + } + + buildImageSuccessfully(c, name, build.WithDockerfile(`FROM busybox + LABEL Vendor Acme`)) + id2 = getIDByName(c, name) + if id1 != id2 { + c.Fatalf("Build 4 should have worked & used cache(%s,%s)", id1, id2) + } + + // Now make sure the cache isn't used by mistake + buildImageSuccessfully(c, name, build.WithoutCache, build.WithDockerfile(`FROM busybox + LABEL f1=b1 f2=b2`)) + + buildImageSuccessfully(c, name, build.WithDockerfile(`FROM busybox + LABEL f1=b1 f2=b2`)) + id2 = getIDByName(c, name) + if id1 == id2 { + c.Fatalf("Build 6 should have worked & NOT used the cache(%s,%s)", id1, id2) + } + +} + +func (s *DockerSuite) TestBuildNotVerboseSuccess(c *check.C) { + // This test makes sure that -q works correctly when build is successful: + // stdout has only the image ID (long image ID) and stderr is empty. + outRegexp := regexp.MustCompile("^(sha256:|)[a-z0-9]{64}\\n$") + buildFlags := cli.WithFlags("-q") + + tt := []struct { + Name string + BuildFunc func(string) *icmd.Result + }{ + { + Name: "quiet_build_stdin_success", + BuildFunc: func(name string) *icmd.Result { + return buildImage(name, buildFlags, build.WithDockerfile("FROM busybox")) + }, + }, + { + Name: "quiet_build_ctx_success", + BuildFunc: func(name string) *icmd.Result { + return buildImage(name, buildFlags, build.WithBuildContext(c, + build.WithFile("Dockerfile", "FROM busybox"), + build.WithFile("quiet_build_success_fctx", "test"), + )) + }, + }, + { + Name: "quiet_build_git_success", + BuildFunc: func(name string) *icmd.Result { + git := fakegit.New(c, "repo", map[string]string{ + "Dockerfile": "FROM busybox", + }, true) + return buildImage(name, buildFlags, build.WithContextPath(git.RepoURL)) + }, + }, + } + + for _, te := range tt { + result := te.BuildFunc(te.Name) + result.Assert(c, icmd.Success) + if outRegexp.Find([]byte(result.Stdout())) == nil { + c.Fatalf("Test %s expected stdout to match the [%v] regexp, but it is [%v]", te.Name, outRegexp, result.Stdout()) + } + + if result.Stderr() != "" { + c.Fatalf("Test %s expected stderr to be empty, but it is [%#v]", te.Name, result.Stderr()) + } + } + +} + +func (s *DockerSuite) TestBuildNotVerboseFailureWithNonExistImage(c *check.C) { + // This test makes sure that -q works correctly when build fails by + // comparing between the stderr output in quiet mode and in stdout + // and stderr output in verbose mode + testRequires(c, Network) + testName := "quiet_build_not_exists_image" + dockerfile := "FROM busybox11" + quietResult := buildImage(testName, cli.WithFlags("-q"), build.WithDockerfile(dockerfile)) + quietResult.Assert(c, icmd.Expected{ + ExitCode: 1, + }) + result := buildImage(testName, build.WithDockerfile(dockerfile)) + result.Assert(c, icmd.Expected{ + ExitCode: 1, + }) + if quietResult.Stderr() != result.Combined() { + c.Fatal(fmt.Errorf("Test[%s] expected that quiet stderr and verbose stdout are equal; quiet [%v], verbose [%v]", testName, quietResult.Stderr(), result.Combined())) + } +} + +func (s *DockerSuite) TestBuildNotVerboseFailure(c *check.C) { + // This test makes sure that -q works correctly when build fails by + // comparing between the stderr output in quiet mode and in stdout + // and stderr output in verbose mode + testCases := []struct { + testName string + dockerfile string + }{ + {"quiet_build_no_from_at_the_beginning", "RUN whoami"}, + {"quiet_build_unknown_instr", "FROMD busybox"}, + } + + for _, tc := range testCases { + quietResult := buildImage(tc.testName, cli.WithFlags("-q"), build.WithDockerfile(tc.dockerfile)) + quietResult.Assert(c, icmd.Expected{ + ExitCode: 1, + }) + result := buildImage(tc.testName, build.WithDockerfile(tc.dockerfile)) + result.Assert(c, icmd.Expected{ + ExitCode: 1, + }) + if quietResult.Stderr() != result.Combined() { + c.Fatal(fmt.Errorf("Test[%s] expected that quiet stderr and verbose stdout are equal; quiet [%v], verbose [%v]", tc.testName, quietResult.Stderr(), result.Combined())) + } + } +} + +func (s *DockerSuite) TestBuildNotVerboseFailureRemote(c *check.C) { + // This test ensures that when given a wrong URL, stderr in quiet mode and + // stderr in verbose mode are identical. + // TODO(vdemeester) with cobra, stdout has a carriage return too much so this test should not check stdout + URL := "http://something.invalid" + name := "quiet_build_wrong_remote" + quietResult := buildImage(name, cli.WithFlags("-q"), build.WithContextPath(URL)) + quietResult.Assert(c, icmd.Expected{ + ExitCode: 1, + }) + result := buildImage(name, build.WithContextPath(URL)) + result.Assert(c, icmd.Expected{ + ExitCode: 1, + }) + if strings.TrimSpace(quietResult.Stderr()) != strings.TrimSpace(result.Combined()) { + c.Fatal(fmt.Errorf("Test[%s] expected that quiet stderr and verbose stdout are equal; quiet [%v], verbose [%v]", name, quietResult.Stderr(), result.Combined())) + } +} + +func (s *DockerSuite) TestBuildStderr(c *check.C) { + // This test just makes sure that no non-error output goes + // to stderr + name := "testbuildstderr" + result := buildImage(name, build.WithDockerfile("FROM busybox\nRUN echo one")) + result.Assert(c, icmd.Success) + + // Windows to non-Windows should have a security warning + if runtime.GOOS == "windows" && testEnv.DaemonPlatform() != "windows" && !strings.Contains(result.Stdout(), "SECURITY WARNING:") { + c.Fatalf("Stdout contains unexpected output: %q", result.Stdout()) + } + + // Stderr should always be empty + if result.Stderr() != "" { + c.Fatalf("Stderr should have been empty, instead it's: %q", result.Stderr()) + } +} + +func (s *DockerSuite) TestBuildChownSingleFile(c *check.C) { + testRequires(c, UnixCli, DaemonIsLinux) // test uses chown: not available on windows + + name := "testbuildchownsinglefile" + + ctx := fakecontext.New(c, "", + fakecontext.WithDockerfile(` +FROM busybox +COPY test / +RUN ls -l /test +RUN [ $(ls -l /test | awk '{print $3":"$4}') = 'root:root' ] +`), + fakecontext.WithFiles(map[string]string{ + "test": "test", + })) + defer ctx.Close() + + if err := os.Chown(filepath.Join(ctx.Dir, "test"), 4242, 4242); err != nil { + c.Fatal(err) + } + + cli.BuildCmd(c, name, build.WithExternalBuildContext(ctx)) +} + +func (s *DockerSuite) TestBuildSymlinkBreakout(c *check.C) { + name := "testbuildsymlinkbreakout" + tmpdir, err := ioutil.TempDir("", name) + c.Assert(err, check.IsNil) + defer os.RemoveAll(tmpdir) + ctx := filepath.Join(tmpdir, "context") + if err := os.MkdirAll(ctx, 0755); err != nil { + c.Fatal(err) + } + if err := ioutil.WriteFile(filepath.Join(ctx, "Dockerfile"), []byte(` + from busybox + add symlink.tar / + add inject /symlink/ + `), 0644); err != nil { + c.Fatal(err) + } + inject := filepath.Join(ctx, "inject") + if err := ioutil.WriteFile(inject, nil, 0644); err != nil { + c.Fatal(err) + } + f, err := os.Create(filepath.Join(ctx, "symlink.tar")) + if err != nil { + c.Fatal(err) + } + w := tar.NewWriter(f) + w.WriteHeader(&tar.Header{ + Name: "symlink2", + Typeflag: tar.TypeSymlink, + Linkname: "/../../../../../../../../../../../../../../", + Uid: os.Getuid(), + Gid: os.Getgid(), + }) + w.WriteHeader(&tar.Header{ + Name: "symlink", + Typeflag: tar.TypeSymlink, + Linkname: filepath.Join("symlink2", tmpdir), + Uid: os.Getuid(), + Gid: os.Getgid(), + }) + w.Close() + f.Close() + + buildImageSuccessfully(c, name, build.WithoutCache, build.WithExternalBuildContext(fakecontext.New(c, ctx))) + if _, err := os.Lstat(filepath.Join(tmpdir, "inject")); err == nil { + c.Fatal("symlink breakout - inject") + } else if !os.IsNotExist(err) { + c.Fatalf("unexpected error: %v", err) + } +} + +func (s *DockerSuite) TestBuildXZHost(c *check.C) { + // /usr/local/sbin/xz gets permission denied for the user + testRequires(c, NotUserNamespace) + testRequires(c, DaemonIsLinux) + name := "testbuildxzhost" + + buildImageSuccessfully(c, name, build.WithBuildContext(c, + build.WithFile("Dockerfile", ` +FROM busybox +ADD xz /usr/local/sbin/ +RUN chmod 755 /usr/local/sbin/xz +ADD test.xz / +RUN [ ! -e /injected ]`), + build.WithFile("test.xz", "\xfd\x37\x7a\x58\x5a\x00\x00\x04\xe6\xd6\xb4\x46\x02\x00"+"\x21\x01\x16\x00\x00\x00\x74\x2f\xe5\xa3\x01\x00\x3f\xfd"+"\x37\x7a\x58\x5a\x00\x00\x04\xe6\xd6\xb4\x46\x02\x00\x21"), + build.WithFile("xz", "#!/bin/sh\ntouch /injected"), + )) +} + +func (s *DockerSuite) TestBuildVolumesRetainContents(c *check.C) { + // /foo/file gets permission denied for the user + testRequires(c, NotUserNamespace) + testRequires(c, DaemonIsLinux) // TODO Windows: Issue #20127 + var ( + name = "testbuildvolumescontent" + expected = "some text" + volName = "/foo" + ) + + if testEnv.DaemonPlatform() == "windows" { + volName = "C:/foo" + } + + buildImageSuccessfully(c, name, build.WithBuildContext(c, + build.WithFile("Dockerfile", ` +FROM busybox +COPY content /foo/file +VOLUME `+volName+` +CMD cat /foo/file`), + build.WithFile("content", expected), + )) + + out, _ := dockerCmd(c, "run", "--rm", name) + if out != expected { + c.Fatalf("expected file contents for /foo/file to be %q but received %q", expected, out) + } + +} + +// FIXME(vdemeester) part of this should be unit test, other part should be clearer +func (s *DockerSuite) TestBuildRenamedDockerfile(c *check.C) { + ctx := fakecontext.New(c, "", fakecontext.WithFiles(map[string]string{ + "Dockerfile": "FROM busybox\nRUN echo from Dockerfile", + "files/Dockerfile": "FROM busybox\nRUN echo from files/Dockerfile", + "files/dFile": "FROM busybox\nRUN echo from files/dFile", + "dFile": "FROM busybox\nRUN echo from dFile", + "files/dFile2": "FROM busybox\nRUN echo from files/dFile2", + })) + defer ctx.Close() + + cli.Docker(cli.Args("build", "-t", "test1", "."), cli.InDir(ctx.Dir)).Assert(c, icmd.Expected{ + Out: "from Dockerfile", + }) + + cli.Docker(cli.Args("build", "-f", filepath.Join("files", "Dockerfile"), "-t", "test2", "."), cli.InDir(ctx.Dir)).Assert(c, icmd.Expected{ + Out: "from files/Dockerfile", + }) + + cli.Docker(cli.Args("build", fmt.Sprintf("--file=%s", filepath.Join("files", "dFile")), "-t", "test3", "."), cli.InDir(ctx.Dir)).Assert(c, icmd.Expected{ + Out: "from files/dFile", + }) + + cli.Docker(cli.Args("build", "--file=dFile", "-t", "test4", "."), cli.InDir(ctx.Dir)).Assert(c, icmd.Expected{ + Out: "from dFile", + }) + + dirWithNoDockerfile, err := ioutil.TempDir(os.TempDir(), "test5") + c.Assert(err, check.IsNil) + nonDockerfileFile := filepath.Join(dirWithNoDockerfile, "notDockerfile") + if _, err = os.Create(nonDockerfileFile); err != nil { + c.Fatal(err) + } + cli.Docker(cli.Args("build", fmt.Sprintf("--file=%s", nonDockerfileFile), "-t", "test5", "."), cli.InDir(ctx.Dir)).Assert(c, icmd.Expected{ + ExitCode: 1, + Err: fmt.Sprintf("unable to prepare context: the Dockerfile (%s) must be within the build context", nonDockerfileFile), + }) + + cli.Docker(cli.Args("build", "-f", filepath.Join("..", "Dockerfile"), "-t", "test6", ".."), cli.InDir(filepath.Join(ctx.Dir, "files"))).Assert(c, icmd.Expected{ + Out: "from Dockerfile", + }) + + cli.Docker(cli.Args("build", "-f", filepath.Join(ctx.Dir, "files", "Dockerfile"), "-t", "test7", ".."), cli.InDir(filepath.Join(ctx.Dir, "files"))).Assert(c, icmd.Expected{ + Out: "from files/Dockerfile", + }) + + cli.Docker(cli.Args("build", "-f", filepath.Join("..", "Dockerfile"), "-t", "test8", "."), cli.InDir(filepath.Join(ctx.Dir, "files"))).Assert(c, icmd.Expected{ + ExitCode: 1, + Err: "must be within the build context", + }) + + tmpDir := os.TempDir() + cli.Docker(cli.Args("build", "-t", "test9", ctx.Dir), cli.InDir(tmpDir)).Assert(c, icmd.Expected{ + Out: "from Dockerfile", + }) + + cli.Docker(cli.Args("build", "-f", "dFile2", "-t", "test10", "."), cli.InDir(filepath.Join(ctx.Dir, "files"))).Assert(c, icmd.Expected{ + Out: "from files/dFile2", + }) +} + +func (s *DockerSuite) TestBuildFromMixedcaseDockerfile(c *check.C) { + testRequires(c, UnixCli) // Dockerfile overwrites dockerfile on windows + testRequires(c, DaemonIsLinux) + + // If Dockerfile is not present, use dockerfile + buildImage("test1", build.WithBuildContext(c, + build.WithFile("dockerfile", `FROM busybox + RUN echo from dockerfile`), + )).Assert(c, icmd.Expected{ + Out: "from dockerfile", + }) + + // Prefer Dockerfile in place of dockerfile + buildImage("test1", build.WithBuildContext(c, + build.WithFile("dockerfile", `FROM busybox + RUN echo from dockerfile`), + build.WithFile("Dockerfile", `FROM busybox + RUN echo from Dockerfile`), + )).Assert(c, icmd.Expected{ + Out: "from Dockerfile", + }) +} + +func (s *DockerSuite) TestBuildFromURLWithF(c *check.C) { + server := fakestorage.New(c, "", fakecontext.WithFiles(map[string]string{"baz": `FROM busybox +RUN echo from baz +COPY * /tmp/ +RUN find /tmp/`})) + defer server.Close() + + ctx := fakecontext.New(c, "", fakecontext.WithDockerfile(`FROM busybox + RUN echo from Dockerfile`)) + defer ctx.Close() + + // Make sure that -f is ignored and that we don't use the Dockerfile + // that's in the current dir + result := cli.BuildCmd(c, "test1", cli.WithFlags("-f", "baz", server.URL()+"/baz"), func(cmd *icmd.Cmd) func() { + cmd.Dir = ctx.Dir + return nil + }) + + if !strings.Contains(result.Combined(), "from baz") || + strings.Contains(result.Combined(), "/tmp/baz") || + !strings.Contains(result.Combined(), "/tmp/Dockerfile") { + c.Fatalf("Missing proper output: %s", result.Combined()) + } + +} + +func (s *DockerSuite) TestBuildFromStdinWithF(c *check.C) { + testRequires(c, DaemonIsLinux) // TODO Windows: This test is flaky; no idea why + ctx := fakecontext.New(c, "", fakecontext.WithDockerfile(`FROM busybox +RUN echo "from Dockerfile"`)) + defer ctx.Close() + + // Make sure that -f is ignored and that we don't use the Dockerfile + // that's in the current dir + result := cli.BuildCmd(c, "test1", cli.WithFlags("-f", "baz", "-"), func(cmd *icmd.Cmd) func() { + cmd.Dir = ctx.Dir + cmd.Stdin = strings.NewReader(`FROM busybox +RUN echo "from baz" +COPY * /tmp/ +RUN sh -c "find /tmp/" # sh -c is needed on Windows to use the correct find`) + return nil + }) + + if !strings.Contains(result.Combined(), "from baz") || + strings.Contains(result.Combined(), "/tmp/baz") || + !strings.Contains(result.Combined(), "/tmp/Dockerfile") { + c.Fatalf("Missing proper output: %s", result.Combined()) + } + +} + +func (s *DockerSuite) TestBuildFromOfficialNames(c *check.C) { + name := "testbuildfromofficial" + fromNames := []string{ + "busybox", + "docker.io/busybox", + "index.docker.io/busybox", + "library/busybox", + "docker.io/library/busybox", + "index.docker.io/library/busybox", + } + for idx, fromName := range fromNames { + imgName := fmt.Sprintf("%s%d", name, idx) + buildImageSuccessfully(c, imgName, build.WithDockerfile("FROM "+fromName)) + dockerCmd(c, "rmi", imgName) + } +} + +func (s *DockerSuite) TestBuildDockerfileOutsideContext(c *check.C) { + testRequires(c, UnixCli, DaemonIsLinux) // uses os.Symlink: not implemented in windows at the time of writing (go-1.4.2) + + name := "testbuilddockerfileoutsidecontext" + tmpdir, err := ioutil.TempDir("", name) + c.Assert(err, check.IsNil) + defer os.RemoveAll(tmpdir) + ctx := filepath.Join(tmpdir, "context") + if err := os.MkdirAll(ctx, 0755); err != nil { + c.Fatal(err) + } + if err := ioutil.WriteFile(filepath.Join(ctx, "Dockerfile"), []byte("FROM scratch\nENV X Y"), 0644); err != nil { + c.Fatal(err) + } + wd, err := os.Getwd() + if err != nil { + c.Fatal(err) + } + defer os.Chdir(wd) + if err := os.Chdir(ctx); err != nil { + c.Fatal(err) + } + if err := ioutil.WriteFile(filepath.Join(tmpdir, "outsideDockerfile"), []byte("FROM scratch\nENV x y"), 0644); err != nil { + c.Fatal(err) + } + if err := os.Symlink(filepath.Join("..", "outsideDockerfile"), filepath.Join(ctx, "dockerfile1")); err != nil { + c.Fatal(err) + } + if err := os.Symlink(filepath.Join(tmpdir, "outsideDockerfile"), filepath.Join(ctx, "dockerfile2")); err != nil { + c.Fatal(err) + } + + for _, dockerfilePath := range []string{ + filepath.Join("..", "outsideDockerfile"), + filepath.Join(ctx, "dockerfile1"), + filepath.Join(ctx, "dockerfile2"), + } { + result := dockerCmdWithResult("build", "-t", name, "--no-cache", "-f", dockerfilePath, ".") + c.Assert(result, icmd.Matches, icmd.Expected{ + Err: "must be within the build context", + ExitCode: 1, + }) + deleteImages(name) + } + + os.Chdir(tmpdir) + + // Path to Dockerfile should be resolved relative to working directory, not relative to context. + // There is a Dockerfile in the context, but since there is no Dockerfile in the current directory, the following should fail + out, _, err := dockerCmdWithError("build", "-t", name, "--no-cache", "-f", "Dockerfile", ctx) + if err == nil { + c.Fatalf("Expected error. Out: %s", out) + } +} + +// FIXME(vdemeester) should be a unit test +func (s *DockerSuite) TestBuildSpaces(c *check.C) { + // Test to make sure that leading/trailing spaces on a command + // doesn't change the error msg we get + name := "testspaces" + ctx := fakecontext.New(c, "", fakecontext.WithDockerfile("FROM busybox\nCOPY\n")) + defer ctx.Close() + + result1 := cli.Docker(cli.Build(name), build.WithExternalBuildContext(ctx)) + result1.Assert(c, icmd.Expected{ + ExitCode: 1, + }) + + ctx.Add("Dockerfile", "FROM busybox\nCOPY ") + result2 := cli.Docker(cli.Build(name), build.WithExternalBuildContext(ctx)) + result2.Assert(c, icmd.Expected{ + ExitCode: 1, + }) + + removeLogTimestamps := func(s string) string { + return regexp.MustCompile(`time="(.*?)"`).ReplaceAllString(s, `time=[TIMESTAMP]`) + } + + // Skip over the times + e1 := removeLogTimestamps(result1.Error.Error()) + e2 := removeLogTimestamps(result2.Error.Error()) + + // Ignore whitespace since that's what were verifying doesn't change stuff + if strings.Replace(e1, " ", "", -1) != strings.Replace(e2, " ", "", -1) { + c.Fatalf("Build 2's error wasn't the same as build 1's\n1:%s\n2:%s", result1.Error, result2.Error) + } + + ctx.Add("Dockerfile", "FROM busybox\n COPY") + result2 = cli.Docker(cli.Build(name), build.WithoutCache, build.WithExternalBuildContext(ctx)) + result2.Assert(c, icmd.Expected{ + ExitCode: 1, + }) + + // Skip over the times + e1 = removeLogTimestamps(result1.Error.Error()) + e2 = removeLogTimestamps(result2.Error.Error()) + + // Ignore whitespace since that's what were verifying doesn't change stuff + if strings.Replace(e1, " ", "", -1) != strings.Replace(e2, " ", "", -1) { + c.Fatalf("Build 3's error wasn't the same as build 1's\n1:%s\n3:%s", result1.Error, result2.Error) + } + + ctx.Add("Dockerfile", "FROM busybox\n COPY ") + result2 = cli.Docker(cli.Build(name), build.WithoutCache, build.WithExternalBuildContext(ctx)) + result2.Assert(c, icmd.Expected{ + ExitCode: 1, + }) + + // Skip over the times + e1 = removeLogTimestamps(result1.Error.Error()) + e2 = removeLogTimestamps(result2.Error.Error()) + + // Ignore whitespace since that's what were verifying doesn't change stuff + if strings.Replace(e1, " ", "", -1) != strings.Replace(e2, " ", "", -1) { + c.Fatalf("Build 4's error wasn't the same as build 1's\n1:%s\n4:%s", result1.Error, result2.Error) + } + +} + +func (s *DockerSuite) TestBuildSpacesWithQuotes(c *check.C) { + // Test to make sure that spaces in quotes aren't lost + name := "testspacesquotes" + + dockerfile := `FROM busybox +RUN echo " \ + foo "` + + expected := "\n foo \n" + // Windows uses the builtin echo, which preserves quotes + if testEnv.DaemonPlatform() == "windows" { + expected = "\" foo \"" + } + + buildImage(name, build.WithDockerfile(dockerfile)).Assert(c, icmd.Expected{ + Out: expected, + }) +} + +// #4393 +func (s *DockerSuite) TestBuildVolumeFileExistsinContainer(c *check.C) { + testRequires(c, DaemonIsLinux) // TODO Windows: This should error out + buildImage("docker-test-errcreatevolumewithfile", build.WithDockerfile(` + FROM busybox + RUN touch /foo + VOLUME /foo + `)).Assert(c, icmd.Expected{ + ExitCode: 1, + Err: "file exists", + }) +} + +// FIXME(vdemeester) should be a unit test +func (s *DockerSuite) TestBuildMissingArgs(c *check.C) { + // Test to make sure that all Dockerfile commands (except the ones listed + // in skipCmds) will generate an error if no args are provided. + // Note: INSERT is deprecated so we exclude it because of that. + skipCmds := map[string]struct{}{ + "CMD": {}, + "RUN": {}, + "ENTRYPOINT": {}, + "INSERT": {}, + } + + if testEnv.DaemonPlatform() == "windows" { + skipCmds = map[string]struct{}{ + "CMD": {}, + "RUN": {}, + "ENTRYPOINT": {}, + "INSERT": {}, + "STOPSIGNAL": {}, + "ARG": {}, + "USER": {}, + "EXPOSE": {}, + } + } + + for cmd := range command.Commands { + cmd = strings.ToUpper(cmd) + if _, ok := skipCmds[cmd]; ok { + continue + } + var dockerfile string + if cmd == "FROM" { + dockerfile = cmd + } else { + // Add FROM to make sure we don't complain about it missing + dockerfile = "FROM busybox\n" + cmd + } + + buildImage("args", build.WithDockerfile(dockerfile)).Assert(c, icmd.Expected{ + ExitCode: 1, + Err: cmd + " requires", + }) + } + +} + +func (s *DockerSuite) TestBuildEmptyScratch(c *check.C) { + testRequires(c, DaemonIsLinux) + buildImage("sc", build.WithDockerfile("FROM scratch")).Assert(c, icmd.Expected{ + ExitCode: 1, + Err: "No image was generated", + }) +} + +func (s *DockerSuite) TestBuildDotDotFile(c *check.C) { + buildImageSuccessfully(c, "sc", build.WithBuildContext(c, + build.WithFile("Dockerfile", "FROM busybox\n"), + build.WithFile("..gitme", ""), + )) +} + +func (s *DockerSuite) TestBuildRUNoneJSON(c *check.C) { + testRequires(c, DaemonIsLinux) // No hello-world Windows image + name := "testbuildrunonejson" + + buildImage(name, build.WithDockerfile(`FROM hello-world:frozen +RUN [ "/hello" ]`)).Assert(c, icmd.Expected{ + Out: "Hello from Docker", + }) +} + +func (s *DockerSuite) TestBuildEmptyStringVolume(c *check.C) { + name := "testbuildemptystringvolume" + + buildImage(name, build.WithDockerfile(` + FROM busybox + ENV foo="" + VOLUME $foo + `)).Assert(c, icmd.Expected{ + ExitCode: 1, + }) +} + +func (s *DockerSuite) TestBuildContainerWithCgroupParent(c *check.C) { + testRequires(c, SameHostDaemon, DaemonIsLinux) + + cgroupParent := "test" + data, err := ioutil.ReadFile("/proc/self/cgroup") + if err != nil { + c.Fatalf("failed to read '/proc/self/cgroup - %v", err) + } + selfCgroupPaths := testutil.ParseCgroupPaths(string(data)) + _, found := selfCgroupPaths["memory"] + if !found { + c.Fatalf("unable to find self memory cgroup path. CgroupsPath: %v", selfCgroupPaths) + } + result := buildImage("buildcgroupparent", + cli.WithFlags("--cgroup-parent", cgroupParent), + build.WithDockerfile(` +FROM busybox +RUN cat /proc/self/cgroup +`)) + result.Assert(c, icmd.Success) + m, err := regexp.MatchString(fmt.Sprintf("memory:.*/%s/.*", cgroupParent), result.Combined()) + c.Assert(err, check.IsNil) + if !m { + c.Fatalf("There is no expected memory cgroup with parent /%s/: %s", cgroupParent, result.Combined()) + } +} + +// FIXME(vdemeester) could be a unit test +func (s *DockerSuite) TestBuildNoDupOutput(c *check.C) { + // Check to make sure our build output prints the Dockerfile cmd + // property - there was a bug that caused it to be duplicated on the + // Step X line + name := "testbuildnodupoutput" + result := buildImage(name, build.WithDockerfile(` + FROM busybox + RUN env`)) + result.Assert(c, icmd.Success) + exp := "\nStep 2/2 : RUN env\n" + if !strings.Contains(result.Combined(), exp) { + c.Fatalf("Bad output\nGot:%s\n\nExpected to contain:%s\n", result.Combined(), exp) + } +} + +// GH15826 +// FIXME(vdemeester) could be a unit test +func (s *DockerSuite) TestBuildStartsFromOne(c *check.C) { + // Explicit check to ensure that build starts from step 1 rather than 0 + name := "testbuildstartsfromone" + result := buildImage(name, build.WithDockerfile(`FROM busybox`)) + result.Assert(c, icmd.Success) + exp := "\nStep 1/1 : FROM busybox\n" + if !strings.Contains(result.Combined(), exp) { + c.Fatalf("Bad output\nGot:%s\n\nExpected to contain:%s\n", result.Combined(), exp) + } +} + +func (s *DockerSuite) TestBuildRUNErrMsg(c *check.C) { + // Test to make sure the bad command is quoted with just "s and + // not as a Go []string + name := "testbuildbadrunerrmsg" + shell := "/bin/sh -c" + exitCode := 127 + if testEnv.DaemonPlatform() == "windows" { + shell = "cmd /S /C" + // architectural - Windows has to start the container to determine the exe is bad, Linux does not + exitCode = 1 + } + exp := fmt.Sprintf(`The command '%s badEXE a1 \& a2 a3' returned a non-zero code: %d`, shell, exitCode) + + buildImage(name, build.WithDockerfile(` + FROM busybox + RUN badEXE a1 \& a2 a3`)).Assert(c, icmd.Expected{ + ExitCode: exitCode, + Err: exp, + }) +} + +func (s *DockerTrustSuite) TestTrustedBuild(c *check.C) { + repoName := s.setupTrustedImage(c, "trusted-build") + dockerFile := fmt.Sprintf(` + FROM %s + RUN [] + `, repoName) + + name := "testtrustedbuild" + + buildImage(name, trustedBuild, build.WithDockerfile(dockerFile)).Assert(c, icmd.Expected{ + Out: fmt.Sprintf("FROM %s@sha", repoName[:len(repoName)-7]), + }) + + // We should also have a tag reference for the image. + dockerCmd(c, "inspect", repoName) + + // We should now be able to remove the tag reference. + dockerCmd(c, "rmi", repoName) +} + +func (s *DockerTrustSuite) TestTrustedBuildUntrustedTag(c *check.C) { + repoName := fmt.Sprintf("%v/dockercli/build-untrusted-tag:latest", privateRegistryURL) + dockerFile := fmt.Sprintf(` + FROM %s + RUN [] + `, repoName) + + name := "testtrustedbuilduntrustedtag" + + buildImage(name, trustedBuild, build.WithDockerfile(dockerFile)).Assert(c, icmd.Expected{ + ExitCode: 1, + Err: "does not have trust data for", + }) +} + +func (s *DockerTrustSuite) TestBuildContextDirIsSymlink(c *check.C) { + testRequires(c, DaemonIsLinux) + tempDir, err := ioutil.TempDir("", "test-build-dir-is-symlink-") + c.Assert(err, check.IsNil) + defer os.RemoveAll(tempDir) + + // Make a real context directory in this temp directory with a simple + // Dockerfile. + realContextDirname := filepath.Join(tempDir, "context") + if err := os.Mkdir(realContextDirname, os.FileMode(0755)); err != nil { + c.Fatal(err) + } + + if err = ioutil.WriteFile( + filepath.Join(realContextDirname, "Dockerfile"), + []byte(` + FROM busybox + RUN echo hello world + `), + os.FileMode(0644), + ); err != nil { + c.Fatal(err) + } + + // Make a symlink to the real context directory. + contextSymlinkName := filepath.Join(tempDir, "context_link") + if err := os.Symlink(realContextDirname, contextSymlinkName); err != nil { + c.Fatal(err) + } + + // Executing the build with the symlink as the specified context should + // *not* fail. + dockerCmd(c, "build", contextSymlinkName) +} + +func (s *DockerTrustSuite) TestTrustedBuildTagFromReleasesRole(c *check.C) { + testRequires(c, NotaryHosting) + + latestTag := s.setupTrustedImage(c, "trusted-build-releases-role") + repoName := strings.TrimSuffix(latestTag, ":latest") + + // Now create the releases role + s.notaryCreateDelegation(c, repoName, "targets/releases", s.not.keys[0].Public) + s.notaryImportKey(c, repoName, "targets/releases", s.not.keys[0].Private) + s.notaryPublish(c, repoName) + + // push a different tag to the releases role + otherTag := fmt.Sprintf("%s:other", repoName) + cli.DockerCmd(c, "tag", "busybox", otherTag) + + cli.Docker(cli.Args("push", otherTag), trustedCmd).Assert(c, icmd.Success) + s.assertTargetInRoles(c, repoName, "other", "targets/releases") + s.assertTargetNotInRoles(c, repoName, "other", "targets") + + cli.DockerCmd(c, "rmi", otherTag) + + dockerFile := fmt.Sprintf(` + FROM %s + RUN [] + `, otherTag) + name := "testtrustedbuildreleasesrole" + cli.BuildCmd(c, name, trustedCmd, build.WithDockerfile(dockerFile)).Assert(c, icmd.Expected{ + Out: fmt.Sprintf("FROM %s@sha", repoName), + }) +} + +func (s *DockerTrustSuite) TestTrustedBuildTagIgnoresOtherDelegationRoles(c *check.C) { + testRequires(c, NotaryHosting) + + latestTag := s.setupTrustedImage(c, "trusted-build-releases-role") + repoName := strings.TrimSuffix(latestTag, ":latest") + + // Now create a non-releases delegation role + s.notaryCreateDelegation(c, repoName, "targets/other", s.not.keys[0].Public) + s.notaryImportKey(c, repoName, "targets/other", s.not.keys[0].Private) + s.notaryPublish(c, repoName) + + // push a different tag to the other role + otherTag := fmt.Sprintf("%s:other", repoName) + cli.DockerCmd(c, "tag", "busybox", otherTag) + + cli.Docker(cli.Args("push", otherTag), trustedCmd).Assert(c, icmd.Success) + s.assertTargetInRoles(c, repoName, "other", "targets/other") + s.assertTargetNotInRoles(c, repoName, "other", "targets") + + cli.DockerCmd(c, "rmi", otherTag) + + dockerFile := fmt.Sprintf(` + FROM %s + RUN [] + `, otherTag) + + name := "testtrustedbuildotherrole" + cli.Docker(cli.Build(name), trustedCmd, build.WithDockerfile(dockerFile)).Assert(c, icmd.Expected{ + ExitCode: 1, + }) +} + +// Issue #15634: COPY fails when path starts with "null" +func (s *DockerSuite) TestBuildNullStringInAddCopyVolume(c *check.C) { + name := "testbuildnullstringinaddcopyvolume" + volName := "nullvolume" + if testEnv.DaemonPlatform() == "windows" { + volName = `C:\\nullvolume` + } + + buildImageSuccessfully(c, name, build.WithBuildContext(c, + build.WithFile("Dockerfile", ` + FROM busybox + + ADD null / + COPY nullfile / + VOLUME `+volName+` + `), + build.WithFile("null", "test1"), + build.WithFile("nullfile", "test2"), + )) +} + +func (s *DockerSuite) TestBuildStopSignal(c *check.C) { + testRequires(c, DaemonIsLinux) // Windows does not support STOPSIGNAL yet + imgName := "test_build_stop_signal" + buildImageSuccessfully(c, imgName, build.WithDockerfile(`FROM busybox + STOPSIGNAL SIGKILL`)) + res := inspectFieldJSON(c, imgName, "Config.StopSignal") + if res != `"SIGKILL"` { + c.Fatalf("Signal %s, expected SIGKILL", res) + } + + containerName := "test-container-stop-signal" + dockerCmd(c, "run", "-d", "--name", containerName, imgName, "top") + res = inspectFieldJSON(c, containerName, "Config.StopSignal") + if res != `"SIGKILL"` { + c.Fatalf("Signal %s, expected SIGKILL", res) + } +} + +func (s *DockerSuite) TestBuildBuildTimeArg(c *check.C) { + imgName := "bldargtest" + envKey := "foo" + envVal := "bar" + var dockerfile string + if testEnv.DaemonPlatform() == "windows" { + // Bugs in Windows busybox port - use the default base image and native cmd stuff + dockerfile = fmt.Sprintf(`FROM `+minimalBaseImage()+` + ARG %s + RUN echo %%%s%% + CMD setlocal enableextensions && if defined %s (echo %%%s%%)`, envKey, envKey, envKey, envKey) + } else { + dockerfile = fmt.Sprintf(`FROM busybox + ARG %s + RUN echo $%s + CMD echo $%s`, envKey, envKey, envKey) + + } + buildImage(imgName, + cli.WithFlags("--build-arg", fmt.Sprintf("%s=%s", envKey, envVal)), + build.WithDockerfile(dockerfile), + ).Assert(c, icmd.Expected{ + Out: envVal, + }) + + containerName := "bldargCont" + out, _ := dockerCmd(c, "run", "--name", containerName, imgName) + out = strings.Trim(out, " \r\n'") + if out != "" { + c.Fatalf("run produced invalid output: %q, expected empty string", out) + } +} + +func (s *DockerSuite) TestBuildBuildTimeArgHistory(c *check.C) { + imgName := "bldargtest" + envKey := "foo" + envVal := "bar" + envDef := "bar1" + dockerfile := fmt.Sprintf(`FROM busybox + ARG %s=%s`, envKey, envDef) + buildImage(imgName, + cli.WithFlags("--build-arg", fmt.Sprintf("%s=%s", envKey, envVal)), + build.WithDockerfile(dockerfile), + ).Assert(c, icmd.Expected{ + Out: envVal, + }) + + out, _ := dockerCmd(c, "history", "--no-trunc", imgName) + outputTabs := strings.Split(out, "\n")[1] + if !strings.Contains(outputTabs, envDef) { + c.Fatalf("failed to find arg default in image history output: %q expected: %q", outputTabs, envDef) + } +} + +func (s *DockerSuite) TestBuildTimeArgHistoryExclusions(c *check.C) { + imgName := "bldargtest" + envKey := "foo" + envVal := "bar" + proxy := "HTTP_PROXY=http://user:password@proxy.example.com" + explicitProxyKey := "http_proxy" + explicitProxyVal := "http://user:password@someproxy.example.com" + dockerfile := fmt.Sprintf(`FROM busybox + ARG %s + ARG %s + RUN echo "Testing Build Args!"`, envKey, explicitProxyKey) + + buildImage := func(imgName string) string { + cli.BuildCmd(c, imgName, + cli.WithFlags("--build-arg", "https_proxy=https://proxy.example.com", + "--build-arg", fmt.Sprintf("%s=%s", envKey, envVal), + "--build-arg", fmt.Sprintf("%s=%s", explicitProxyKey, explicitProxyVal), + "--build-arg", proxy), + build.WithDockerfile(dockerfile), + ) + return getIDByName(c, imgName) + } + + origID := buildImage(imgName) + result := cli.DockerCmd(c, "history", "--no-trunc", imgName) + out := result.Stdout() + + if strings.Contains(out, proxy) { + c.Fatalf("failed to exclude proxy settings from history!") + } + if strings.Contains(out, "https_proxy") { + c.Fatalf("failed to exclude proxy settings from history!") + } + result.Assert(c, icmd.Expected{Out: fmt.Sprintf("%s=%s", envKey, envVal)}) + result.Assert(c, icmd.Expected{Out: fmt.Sprintf("%s=%s", explicitProxyKey, explicitProxyVal)}) + + cacheID := buildImage(imgName + "-two") + c.Assert(origID, checker.Equals, cacheID) +} + +func (s *DockerSuite) TestBuildBuildTimeArgCacheHit(c *check.C) { + imgName := "bldargtest" + envKey := "foo" + envVal := "bar" + dockerfile := fmt.Sprintf(`FROM busybox + ARG %s + RUN echo $%s`, envKey, envKey) + buildImageSuccessfully(c, imgName, + cli.WithFlags("--build-arg", fmt.Sprintf("%s=%s", envKey, envVal)), + build.WithDockerfile(dockerfile), + ) + origImgID := getIDByName(c, imgName) + + imgNameCache := "bldargtestcachehit" + buildImageSuccessfully(c, imgNameCache, + cli.WithFlags("--build-arg", fmt.Sprintf("%s=%s", envKey, envVal)), + build.WithDockerfile(dockerfile), + ) + newImgID := getIDByName(c, imgName) + if newImgID != origImgID { + c.Fatalf("build didn't use cache! expected image id: %q built image id: %q", origImgID, newImgID) + } +} + +func (s *DockerSuite) TestBuildBuildTimeArgCacheMissExtraArg(c *check.C) { + imgName := "bldargtest" + envKey := "foo" + envVal := "bar" + extraEnvKey := "foo1" + extraEnvVal := "bar1" + dockerfile := fmt.Sprintf(`FROM busybox + ARG %s + ARG %s + RUN echo $%s`, envKey, extraEnvKey, envKey) + buildImageSuccessfully(c, imgName, + cli.WithFlags("--build-arg", fmt.Sprintf("%s=%s", envKey, envVal)), + build.WithDockerfile(dockerfile), + ) + origImgID := getIDByName(c, imgName) + + imgNameCache := "bldargtestcachemiss" + buildImageSuccessfully(c, imgNameCache, + cli.WithFlags( + "--build-arg", fmt.Sprintf("%s=%s", envKey, envVal), + "--build-arg", fmt.Sprintf("%s=%s", extraEnvKey, extraEnvVal), + ), + build.WithDockerfile(dockerfile), + ) + newImgID := getIDByName(c, imgNameCache) + + if newImgID == origImgID { + c.Fatalf("build used cache, expected a miss!") + } +} + +func (s *DockerSuite) TestBuildBuildTimeArgCacheMissSameArgDiffVal(c *check.C) { + imgName := "bldargtest" + envKey := "foo" + envVal := "bar" + newEnvVal := "bar1" + dockerfile := fmt.Sprintf(`FROM busybox + ARG %s + RUN echo $%s`, envKey, envKey) + buildImageSuccessfully(c, imgName, + cli.WithFlags("--build-arg", fmt.Sprintf("%s=%s", envKey, envVal)), + build.WithDockerfile(dockerfile), + ) + origImgID := getIDByName(c, imgName) + + imgNameCache := "bldargtestcachemiss" + buildImageSuccessfully(c, imgNameCache, + cli.WithFlags("--build-arg", fmt.Sprintf("%s=%s", envKey, newEnvVal)), + build.WithDockerfile(dockerfile), + ) + newImgID := getIDByName(c, imgNameCache) + if newImgID == origImgID { + c.Fatalf("build used cache, expected a miss!") + } +} + +func (s *DockerSuite) TestBuildBuildTimeArgOverrideArgDefinedBeforeEnv(c *check.C) { + testRequires(c, DaemonIsLinux) // Windows does not support ARG + imgName := "bldargtest" + envKey := "foo" + envVal := "bar" + envValOverride := "barOverride" + dockerfile := fmt.Sprintf(`FROM busybox + ARG %s + ENV %s %s + RUN echo $%s + CMD echo $%s + `, envKey, envKey, envValOverride, envKey, envKey) + + result := buildImage(imgName, + cli.WithFlags("--build-arg", fmt.Sprintf("%s=%s", envKey, envVal)), + build.WithDockerfile(dockerfile), + ) + result.Assert(c, icmd.Success) + if strings.Count(result.Combined(), envValOverride) != 2 { + c.Fatalf("failed to access environment variable in output: %q expected: %q", result.Combined(), envValOverride) + } + + containerName := "bldargCont" + if out, _ := dockerCmd(c, "run", "--name", containerName, imgName); !strings.Contains(out, envValOverride) { + c.Fatalf("run produced invalid output: %q, expected %q", out, envValOverride) + } +} + +// FIXME(vdemeester) might be useful to merge with the one above ? +func (s *DockerSuite) TestBuildBuildTimeArgOverrideEnvDefinedBeforeArg(c *check.C) { + testRequires(c, DaemonIsLinux) // Windows does not support ARG + imgName := "bldargtest" + envKey := "foo" + envVal := "bar" + envValOverride := "barOverride" + dockerfile := fmt.Sprintf(`FROM busybox + ENV %s %s + ARG %s + RUN echo $%s + CMD echo $%s + `, envKey, envValOverride, envKey, envKey, envKey) + result := buildImage(imgName, + cli.WithFlags("--build-arg", fmt.Sprintf("%s=%s", envKey, envVal)), + build.WithDockerfile(dockerfile), + ) + result.Assert(c, icmd.Success) + if strings.Count(result.Combined(), envValOverride) != 2 { + c.Fatalf("failed to access environment variable in output: %q expected: %q", result.Combined(), envValOverride) + } + + containerName := "bldargCont" + if out, _ := dockerCmd(c, "run", "--name", containerName, imgName); !strings.Contains(out, envValOverride) { + c.Fatalf("run produced invalid output: %q, expected %q", out, envValOverride) + } +} + +func (s *DockerSuite) TestBuildBuildTimeArgExpansion(c *check.C) { + testRequires(c, DaemonIsLinux) // Windows does not support ARG + imgName := "bldvarstest" + + wdVar := "WDIR" + wdVal := "/tmp/" + addVar := "AFILE" + addVal := "addFile" + copyVar := "CFILE" + copyVal := "copyFile" + envVar := "foo" + envVal := "bar" + exposeVar := "EPORT" + exposeVal := "9999" + userVar := "USER" + userVal := "testUser" + volVar := "VOL" + volVal := "/testVol/" + + buildImageSuccessfully(c, imgName, + cli.WithFlags( + "--build-arg", fmt.Sprintf("%s=%s", wdVar, wdVal), + "--build-arg", fmt.Sprintf("%s=%s", addVar, addVal), + "--build-arg", fmt.Sprintf("%s=%s", copyVar, copyVal), + "--build-arg", fmt.Sprintf("%s=%s", envVar, envVal), + "--build-arg", fmt.Sprintf("%s=%s", exposeVar, exposeVal), + "--build-arg", fmt.Sprintf("%s=%s", userVar, userVal), + "--build-arg", fmt.Sprintf("%s=%s", volVar, volVal), + ), + build.WithBuildContext(c, + build.WithFile("Dockerfile", fmt.Sprintf(`FROM busybox + ARG %s + WORKDIR ${%s} + ARG %s + ADD ${%s} testDir/ + ARG %s + COPY $%s testDir/ + ARG %s + ENV %s=${%s} + ARG %s + EXPOSE $%s + ARG %s + USER $%s + ARG %s + VOLUME ${%s}`, + wdVar, wdVar, addVar, addVar, copyVar, copyVar, envVar, envVar, + envVar, exposeVar, exposeVar, userVar, userVar, volVar, volVar)), + build.WithFile(addVal, "some stuff"), + build.WithFile(copyVal, "some stuff"), + ), + ) + + res := inspectField(c, imgName, "Config.WorkingDir") + c.Check(res, check.Equals, filepath.ToSlash(wdVal)) + + var resArr []string + inspectFieldAndUnmarshall(c, imgName, "Config.Env", &resArr) + + found := false + for _, v := range resArr { + if fmt.Sprintf("%s=%s", envVar, envVal) == v { + found = true + break + } + } + if !found { + c.Fatalf("Config.Env value mismatch. Expected to exist: %s=%s, got: %v", + envVar, envVal, resArr) + } + + var resMap map[string]interface{} + inspectFieldAndUnmarshall(c, imgName, "Config.ExposedPorts", &resMap) + if _, ok := resMap[fmt.Sprintf("%s/tcp", exposeVal)]; !ok { + c.Fatalf("Config.ExposedPorts value mismatch. Expected exposed port: %s/tcp, got: %v", exposeVal, resMap) + } + + res = inspectField(c, imgName, "Config.User") + if res != userVal { + c.Fatalf("Config.User value mismatch. Expected: %s, got: %s", userVal, res) + } + + inspectFieldAndUnmarshall(c, imgName, "Config.Volumes", &resMap) + if _, ok := resMap[volVal]; !ok { + c.Fatalf("Config.Volumes value mismatch. Expected volume: %s, got: %v", volVal, resMap) + } +} + +func (s *DockerSuite) TestBuildBuildTimeArgExpansionOverride(c *check.C) { + testRequires(c, DaemonIsLinux) // Windows does not support ARG + imgName := "bldvarstest" + envKey := "foo" + envVal := "bar" + envKey1 := "foo1" + envValOverride := "barOverride" + dockerfile := fmt.Sprintf(`FROM busybox + ARG %s + ENV %s %s + ENV %s ${%s} + RUN echo $%s + CMD echo $%s`, envKey, envKey, envValOverride, envKey1, envKey, envKey1, envKey1) + result := buildImage(imgName, + cli.WithFlags("--build-arg", fmt.Sprintf("%s=%s", envKey, envVal)), + build.WithDockerfile(dockerfile), + ) + result.Assert(c, icmd.Success) + if strings.Count(result.Combined(), envValOverride) != 2 { + c.Fatalf("failed to access environment variable in output: %q expected: %q", result.Combined(), envValOverride) + } + + containerName := "bldargCont" + if out, _ := dockerCmd(c, "run", "--name", containerName, imgName); !strings.Contains(out, envValOverride) { + c.Fatalf("run produced invalid output: %q, expected %q", out, envValOverride) + } +} + +func (s *DockerSuite) TestBuildBuildTimeArgUntrustedDefinedAfterUse(c *check.C) { + testRequires(c, DaemonIsLinux) // Windows does not support ARG + imgName := "bldargtest" + envKey := "foo" + envVal := "bar" + dockerfile := fmt.Sprintf(`FROM busybox + RUN echo $%s + ARG %s + CMD echo $%s`, envKey, envKey, envKey) + result := buildImage(imgName, + cli.WithFlags("--build-arg", fmt.Sprintf("%s=%s", envKey, envVal)), + build.WithDockerfile(dockerfile), + ) + result.Assert(c, icmd.Success) + if strings.Contains(result.Combined(), envVal) { + c.Fatalf("able to access environment variable in output: %q expected to be missing", result.Combined()) + } + + containerName := "bldargCont" + if out, _ := dockerCmd(c, "run", "--name", containerName, imgName); out != "\n" { + c.Fatalf("run produced invalid output: %q, expected empty string", out) + } +} + +func (s *DockerSuite) TestBuildBuildTimeArgBuiltinArg(c *check.C) { + testRequires(c, DaemonIsLinux) // Windows does not support --build-arg + imgName := "bldargtest" + envKey := "HTTP_PROXY" + envVal := "bar" + dockerfile := fmt.Sprintf(`FROM busybox + RUN echo $%s + CMD echo $%s`, envKey, envKey) + + result := buildImage(imgName, + cli.WithFlags("--build-arg", fmt.Sprintf("%s=%s", envKey, envVal)), + build.WithDockerfile(dockerfile), + ) + result.Assert(c, icmd.Success) + if !strings.Contains(result.Combined(), envVal) { + c.Fatalf("failed to access environment variable in output: %q expected: %q", result.Combined(), envVal) + } + containerName := "bldargCont" + if out, _ := dockerCmd(c, "run", "--name", containerName, imgName); out != "\n" { + c.Fatalf("run produced invalid output: %q, expected empty string", out) + } +} + +func (s *DockerSuite) TestBuildBuildTimeArgDefaultOverride(c *check.C) { + testRequires(c, DaemonIsLinux) // Windows does not support ARG + imgName := "bldargtest" + envKey := "foo" + envVal := "bar" + envValOverride := "barOverride" + dockerfile := fmt.Sprintf(`FROM busybox + ARG %s=%s + ENV %s $%s + RUN echo $%s + CMD echo $%s`, envKey, envVal, envKey, envKey, envKey, envKey) + result := buildImage(imgName, + cli.WithFlags("--build-arg", fmt.Sprintf("%s=%s", envKey, envValOverride)), + build.WithDockerfile(dockerfile), + ) + result.Assert(c, icmd.Success) + if strings.Count(result.Combined(), envValOverride) != 1 { + c.Fatalf("failed to access environment variable in output: %q expected: %q", result.Combined(), envValOverride) + } + + containerName := "bldargCont" + if out, _ := dockerCmd(c, "run", "--name", containerName, imgName); !strings.Contains(out, envValOverride) { + c.Fatalf("run produced invalid output: %q, expected %q", out, envValOverride) + } +} + +func (s *DockerSuite) TestBuildBuildTimeArgUnconsumedArg(c *check.C) { + imgName := "bldargtest" + envKey := "foo" + envVal := "bar" + dockerfile := fmt.Sprintf(`FROM busybox + RUN echo $%s + CMD echo $%s`, envKey, envKey) + warnStr := "[Warning] One or more build-args" + buildImage(imgName, + cli.WithFlags("--build-arg", fmt.Sprintf("%s=%s", envKey, envVal)), + build.WithDockerfile(dockerfile), + ).Assert(c, icmd.Expected{ + Out: warnStr, + }) +} + +func (s *DockerSuite) TestBuildBuildTimeArgEnv(c *check.C) { + testRequires(c, DaemonIsLinux) // Windows does not support ARG + dockerfile := `FROM busybox + ARG FOO1=fromfile + ARG FOO2=fromfile + ARG FOO3=fromfile + ARG FOO4=fromfile + ARG FOO5 + ARG FOO6 + ARG FO10 + RUN env + RUN [ "$FOO1" == "fromcmd" ] + RUN [ "$FOO2" == "" ] + RUN [ "$FOO3" == "fromenv" ] + RUN [ "$FOO4" == "fromfile" ] + RUN [ "$FOO5" == "fromcmd" ] + # The following should not exist at all in the env + RUN [ "$(env | grep FOO6)" == "" ] + RUN [ "$(env | grep FOO7)" == "" ] + RUN [ "$(env | grep FOO8)" == "" ] + RUN [ "$(env | grep FOO9)" == "" ] + RUN [ "$FO10" == "" ] + ` + result := buildImage("testbuildtimeargenv", + cli.WithFlags( + "--build-arg", fmt.Sprintf("FOO1=fromcmd"), + "--build-arg", fmt.Sprintf("FOO2="), + "--build-arg", fmt.Sprintf("FOO3"), // set in env + "--build-arg", fmt.Sprintf("FOO4"), // not set in env + "--build-arg", fmt.Sprintf("FOO5=fromcmd"), + // FOO6 is not set at all + "--build-arg", fmt.Sprintf("FOO7=fromcmd"), // should produce a warning + "--build-arg", fmt.Sprintf("FOO8="), // should produce a warning + "--build-arg", fmt.Sprintf("FOO9"), // should produce a warning + "--build-arg", fmt.Sprintf("FO10"), // not set in env, empty value + ), + cli.WithEnvironmentVariables(append(os.Environ(), + "FOO1=fromenv", + "FOO2=fromenv", + "FOO3=fromenv")...), + build.WithBuildContext(c, + build.WithFile("Dockerfile", dockerfile), + ), + ) + result.Assert(c, icmd.Success) + + // Now check to make sure we got a warning msg about unused build-args + i := strings.Index(result.Combined(), "[Warning]") + if i < 0 { + c.Fatalf("Missing the build-arg warning in %q", result.Combined()) + } + + out := result.Combined()[i:] // "out" should contain just the warning message now + + // These were specified on a --build-arg but no ARG was in the Dockerfile + c.Assert(out, checker.Contains, "FOO7") + c.Assert(out, checker.Contains, "FOO8") + c.Assert(out, checker.Contains, "FOO9") +} + +func (s *DockerSuite) TestBuildBuildTimeArgQuotedValVariants(c *check.C) { + imgName := "bldargtest" + envKey := "foo" + envKey1 := "foo1" + envKey2 := "foo2" + envKey3 := "foo3" + dockerfile := fmt.Sprintf(`FROM busybox + ARG %s="" + ARG %s='' + ARG %s="''" + ARG %s='""' + RUN [ "$%s" != "$%s" ] + RUN [ "$%s" != "$%s" ] + RUN [ "$%s" != "$%s" ] + RUN [ "$%s" != "$%s" ] + RUN [ "$%s" != "$%s" ]`, envKey, envKey1, envKey2, envKey3, + envKey, envKey2, envKey, envKey3, envKey1, envKey2, envKey1, envKey3, + envKey2, envKey3) + buildImageSuccessfully(c, imgName, build.WithDockerfile(dockerfile)) +} + +func (s *DockerSuite) TestBuildBuildTimeArgEmptyValVariants(c *check.C) { + testRequires(c, DaemonIsLinux) // Windows does not support ARG + imgName := "bldargtest" + envKey := "foo" + envKey1 := "foo1" + envKey2 := "foo2" + dockerfile := fmt.Sprintf(`FROM busybox + ARG %s= + ARG %s="" + ARG %s='' + RUN [ "$%s" == "$%s" ] + RUN [ "$%s" == "$%s" ] + RUN [ "$%s" == "$%s" ]`, envKey, envKey1, envKey2, envKey, envKey1, envKey1, envKey2, envKey, envKey2) + buildImageSuccessfully(c, imgName, build.WithDockerfile(dockerfile)) +} + +func (s *DockerSuite) TestBuildBuildTimeArgDefinitionWithNoEnvInjection(c *check.C) { + imgName := "bldargtest" + envKey := "foo" + dockerfile := fmt.Sprintf(`FROM busybox + ARG %s + RUN env`, envKey) + + result := cli.BuildCmd(c, imgName, build.WithDockerfile(dockerfile)) + result.Assert(c, icmd.Success) + if strings.Count(result.Combined(), envKey) != 1 { + c.Fatalf("unexpected number of occurrences of the arg in output: %q expected: 1", result.Combined()) + } +} + +func (s *DockerSuite) TestBuildBuildTimeArgMultipleFrom(c *check.C) { + imgName := "multifrombldargtest" + dockerfile := `FROM busybox + ARG foo=abc + LABEL multifromtest=1 + RUN env > /out + FROM busybox + ARG bar=def + RUN env > /out` + + result := cli.BuildCmd(c, imgName, build.WithDockerfile(dockerfile)) + result.Assert(c, icmd.Success) + + result = cli.DockerCmd(c, "images", "-q", "-f", "label=multifromtest=1") + parentID := strings.TrimSpace(result.Stdout()) + + result = cli.DockerCmd(c, "run", "--rm", parentID, "cat", "/out") + c.Assert(result.Stdout(), checker.Contains, "foo=abc") + + result = cli.DockerCmd(c, "run", "--rm", imgName, "cat", "/out") + c.Assert(result.Stdout(), checker.Not(checker.Contains), "foo") + c.Assert(result.Stdout(), checker.Contains, "bar=def") +} + +func (s *DockerSuite) TestBuildBuildTimeFromArgMultipleFrom(c *check.C) { + imgName := "multifrombldargtest" + dockerfile := `ARG tag=nosuchtag + FROM busybox:${tag} + LABEL multifromtest=1 + RUN env > /out + FROM busybox:${tag} + ARG tag + RUN env > /out` + + result := cli.BuildCmd(c, imgName, + build.WithDockerfile(dockerfile), + cli.WithFlags("--build-arg", fmt.Sprintf("tag=latest"))) + result.Assert(c, icmd.Success) + + result = cli.DockerCmd(c, "images", "-q", "-f", "label=multifromtest=1") + parentID := strings.TrimSpace(result.Stdout()) + + result = cli.DockerCmd(c, "run", "--rm", parentID, "cat", "/out") + c.Assert(result.Stdout(), checker.Not(checker.Contains), "tag") + + result = cli.DockerCmd(c, "run", "--rm", imgName, "cat", "/out") + c.Assert(result.Stdout(), checker.Contains, "tag=latest") +} + +func (s *DockerSuite) TestBuildBuildTimeUnusedArgMultipleFrom(c *check.C) { + imgName := "multifromunusedarg" + dockerfile := `FROM busybox + ARG foo + FROM busybox + ARG bar + RUN env > /out` + + result := cli.BuildCmd(c, imgName, + build.WithDockerfile(dockerfile), + cli.WithFlags("--build-arg", fmt.Sprintf("baz=abc"))) + result.Assert(c, icmd.Success) + c.Assert(result.Combined(), checker.Contains, "[Warning]") + c.Assert(result.Combined(), checker.Contains, "[baz] were not consumed") + + result = cli.DockerCmd(c, "run", "--rm", imgName, "cat", "/out") + c.Assert(result.Stdout(), checker.Not(checker.Contains), "bar") + c.Assert(result.Stdout(), checker.Not(checker.Contains), "baz") +} + +func (s *DockerSuite) TestBuildNoNamedVolume(c *check.C) { + volName := "testname:/foo" + + if testEnv.DaemonPlatform() == "windows" { + volName = "testname:C:\\foo" + } + dockerCmd(c, "run", "-v", volName, "busybox", "sh", "-c", "touch /foo/oops") + + dockerFile := `FROM busybox + VOLUME ` + volName + ` + RUN ls /foo/oops + ` + buildImage("test", build.WithDockerfile(dockerFile)).Assert(c, icmd.Expected{ + ExitCode: 1, + }) +} + +func (s *DockerSuite) TestBuildTagEvent(c *check.C) { + since := daemonUnixTime(c) + + dockerFile := `FROM busybox + RUN echo events + ` + buildImageSuccessfully(c, "test", build.WithDockerfile(dockerFile)) + + until := daemonUnixTime(c) + out, _ := dockerCmd(c, "events", "--since", since, "--until", until, "--filter", "type=image") + events := strings.Split(strings.TrimSpace(out), "\n") + actions := eventActionsByIDAndType(c, events, "test:latest", "image") + var foundTag bool + for _, a := range actions { + if a == "tag" { + foundTag = true + break + } + } + + c.Assert(foundTag, checker.True, check.Commentf("No tag event found:\n%s", out)) +} + +// #15780 +func (s *DockerSuite) TestBuildMultipleTags(c *check.C) { + dockerfile := ` + FROM busybox + MAINTAINER test-15780 + ` + buildImageSuccessfully(c, "tag1", cli.WithFlags("-t", "tag2:v2", "-t", "tag1:latest", "-t", "tag1"), build.WithDockerfile(dockerfile)) + + id1 := getIDByName(c, "tag1") + id2 := getIDByName(c, "tag2:v2") + c.Assert(id1, check.Equals, id2) +} + +// #17290 +func (s *DockerSuite) TestBuildCacheBrokenSymlink(c *check.C) { + name := "testbuildbrokensymlink" + ctx := fakecontext.New(c, "", + fakecontext.WithDockerfile(` + FROM busybox + COPY . ./`), + fakecontext.WithFiles(map[string]string{ + "foo": "bar", + })) + defer ctx.Close() + + err := os.Symlink(filepath.Join(ctx.Dir, "nosuchfile"), filepath.Join(ctx.Dir, "asymlink")) + c.Assert(err, checker.IsNil) + + // warm up cache + cli.BuildCmd(c, name, build.WithExternalBuildContext(ctx)) + + // add new file to context, should invalidate cache + err = ioutil.WriteFile(filepath.Join(ctx.Dir, "newfile"), []byte("foo"), 0644) + c.Assert(err, checker.IsNil) + + result := cli.BuildCmd(c, name, build.WithExternalBuildContext(ctx)) + if strings.Contains(result.Combined(), "Using cache") { + c.Fatal("2nd build used cache on ADD, it shouldn't") + } +} + +func (s *DockerSuite) TestBuildFollowSymlinkToFile(c *check.C) { + name := "testbuildbrokensymlink" + ctx := fakecontext.New(c, "", + fakecontext.WithDockerfile(` + FROM busybox + COPY asymlink target`), + fakecontext.WithFiles(map[string]string{ + "foo": "bar", + })) + defer ctx.Close() + + err := os.Symlink("foo", filepath.Join(ctx.Dir, "asymlink")) + c.Assert(err, checker.IsNil) + + cli.BuildCmd(c, name, build.WithExternalBuildContext(ctx)) + + out := cli.DockerCmd(c, "run", "--rm", name, "cat", "target").Combined() + c.Assert(out, checker.Matches, "bar") + + // change target file should invalidate cache + err = ioutil.WriteFile(filepath.Join(ctx.Dir, "foo"), []byte("baz"), 0644) + c.Assert(err, checker.IsNil) + + result := cli.BuildCmd(c, name, build.WithExternalBuildContext(ctx)) + c.Assert(result.Combined(), checker.Not(checker.Contains), "Using cache") + + out = cli.DockerCmd(c, "run", "--rm", name, "cat", "target").Combined() + c.Assert(out, checker.Matches, "baz") +} + +func (s *DockerSuite) TestBuildFollowSymlinkToDir(c *check.C) { + name := "testbuildbrokensymlink" + ctx := fakecontext.New(c, "", + fakecontext.WithDockerfile(` + FROM busybox + COPY asymlink /`), + fakecontext.WithFiles(map[string]string{ + "foo/abc": "bar", + "foo/def": "baz", + })) + defer ctx.Close() + + err := os.Symlink("foo", filepath.Join(ctx.Dir, "asymlink")) + c.Assert(err, checker.IsNil) + + cli.BuildCmd(c, name, build.WithExternalBuildContext(ctx)) + + out := cli.DockerCmd(c, "run", "--rm", name, "cat", "abc", "def").Combined() + c.Assert(out, checker.Matches, "barbaz") + + // change target file should invalidate cache + err = ioutil.WriteFile(filepath.Join(ctx.Dir, "foo/def"), []byte("bax"), 0644) + c.Assert(err, checker.IsNil) + + result := cli.BuildCmd(c, name, build.WithExternalBuildContext(ctx)) + c.Assert(result.Combined(), checker.Not(checker.Contains), "Using cache") + + out = cli.DockerCmd(c, "run", "--rm", name, "cat", "abc", "def").Combined() + c.Assert(out, checker.Matches, "barbax") + +} + +// TestBuildSymlinkBasename tests that target file gets basename from symlink, +// not from the target file. +func (s *DockerSuite) TestBuildSymlinkBasename(c *check.C) { + name := "testbuildbrokensymlink" + ctx := fakecontext.New(c, "", + fakecontext.WithDockerfile(` + FROM busybox + COPY asymlink /`), + fakecontext.WithFiles(map[string]string{ + "foo": "bar", + })) + defer ctx.Close() + + err := os.Symlink("foo", filepath.Join(ctx.Dir, "asymlink")) + c.Assert(err, checker.IsNil) + + cli.BuildCmd(c, name, build.WithExternalBuildContext(ctx)) + + out := cli.DockerCmd(c, "run", "--rm", name, "cat", "asymlink").Combined() + c.Assert(out, checker.Matches, "bar") +} + +// #17827 +func (s *DockerSuite) TestBuildCacheRootSource(c *check.C) { + name := "testbuildrootsource" + ctx := fakecontext.New(c, "", + fakecontext.WithDockerfile(` + FROM busybox + COPY / /data`), + fakecontext.WithFiles(map[string]string{ + "foo": "bar", + })) + defer ctx.Close() + + // warm up cache + cli.BuildCmd(c, name, build.WithExternalBuildContext(ctx)) + + // change file, should invalidate cache + err := ioutil.WriteFile(filepath.Join(ctx.Dir, "foo"), []byte("baz"), 0644) + c.Assert(err, checker.IsNil) + + result := cli.BuildCmd(c, name, build.WithExternalBuildContext(ctx)) + + c.Assert(result.Combined(), checker.Not(checker.Contains), "Using cache") +} + +// #19375 +func (s *DockerSuite) TestBuildFailsGitNotCallable(c *check.C) { + buildImage("gitnotcallable", cli.WithEnvironmentVariables("PATH="), + build.WithContextPath("github.com/docker/v1.10-migrator.git")).Assert(c, icmd.Expected{ + ExitCode: 1, + Err: "unable to prepare context: unable to find 'git': ", + }) + + buildImage("gitnotcallable", cli.WithEnvironmentVariables("PATH="), + build.WithContextPath("https://github.com/docker/v1.10-migrator.git")).Assert(c, icmd.Expected{ + ExitCode: 1, + Err: "unable to prepare context: unable to find 'git': ", + }) +} + +// TestBuildWorkdirWindowsPath tests that a Windows style path works as a workdir +func (s *DockerSuite) TestBuildWorkdirWindowsPath(c *check.C) { + testRequires(c, DaemonIsWindows) + name := "testbuildworkdirwindowspath" + buildImageSuccessfully(c, name, build.WithDockerfile(` + FROM `+testEnv.MinimalBaseImage()+` + RUN mkdir C:\\work + WORKDIR C:\\work + RUN if "%CD%" NEQ "C:\work" exit -1 + `)) +} + +func (s *DockerSuite) TestBuildLabel(c *check.C) { + name := "testbuildlabel" + testLabel := "foo" + + buildImageSuccessfully(c, name, cli.WithFlags("--label", testLabel), + build.WithDockerfile(` + FROM `+minimalBaseImage()+` + LABEL default foo +`)) + + var labels map[string]string + inspectFieldAndUnmarshall(c, name, "Config.Labels", &labels) + if _, ok := labels[testLabel]; !ok { + c.Fatal("label not found in image") + } +} + +func (s *DockerSuite) TestBuildLabelOneNode(c *check.C) { + name := "testbuildlabel" + buildImageSuccessfully(c, name, cli.WithFlags("--label", "foo=bar"), + build.WithDockerfile("FROM busybox")) + + var labels map[string]string + inspectFieldAndUnmarshall(c, name, "Config.Labels", &labels) + v, ok := labels["foo"] + if !ok { + c.Fatal("label `foo` not found in image") + } + c.Assert(v, checker.Equals, "bar") +} + +func (s *DockerSuite) TestBuildLabelCacheCommit(c *check.C) { + name := "testbuildlabelcachecommit" + testLabel := "foo" + + buildImageSuccessfully(c, name, build.WithDockerfile(` + FROM `+minimalBaseImage()+` + LABEL default foo + `)) + buildImageSuccessfully(c, name, cli.WithFlags("--label", testLabel), + build.WithDockerfile(` + FROM `+minimalBaseImage()+` + LABEL default foo + `)) + + var labels map[string]string + inspectFieldAndUnmarshall(c, name, "Config.Labels", &labels) + if _, ok := labels[testLabel]; !ok { + c.Fatal("label not found in image") + } +} + +func (s *DockerSuite) TestBuildLabelMultiple(c *check.C) { + name := "testbuildlabelmultiple" + testLabels := map[string]string{ + "foo": "bar", + "123": "456", + } + labelArgs := []string{} + for k, v := range testLabels { + labelArgs = append(labelArgs, "--label", k+"="+v) + } + + buildImageSuccessfully(c, name, cli.WithFlags(labelArgs...), + build.WithDockerfile(` + FROM `+minimalBaseImage()+` + LABEL default foo +`)) + + var labels map[string]string + inspectFieldAndUnmarshall(c, name, "Config.Labels", &labels) + for k, v := range testLabels { + if x, ok := labels[k]; !ok || x != v { + c.Fatalf("label %s=%s not found in image", k, v) + } + } +} + +func (s *DockerRegistryAuthHtpasswdSuite) TestBuildFromAuthenticatedRegistry(c *check.C) { + dockerCmd(c, "login", "-u", s.reg.Username(), "-p", s.reg.Password(), privateRegistryURL) + baseImage := privateRegistryURL + "/baseimage" + + buildImageSuccessfully(c, baseImage, build.WithDockerfile(` + FROM busybox + ENV env1 val1 + `)) + + dockerCmd(c, "push", baseImage) + dockerCmd(c, "rmi", baseImage) + + buildImageSuccessfully(c, baseImage, build.WithDockerfile(fmt.Sprintf(` + FROM %s + ENV env2 val2 + `, baseImage))) +} + +func (s *DockerRegistryAuthHtpasswdSuite) TestBuildWithExternalAuth(c *check.C) { + osPath := os.Getenv("PATH") + defer os.Setenv("PATH", osPath) + + workingDir, err := os.Getwd() + c.Assert(err, checker.IsNil) + absolute, err := filepath.Abs(filepath.Join(workingDir, "fixtures", "auth")) + c.Assert(err, checker.IsNil) + testPath := fmt.Sprintf("%s%c%s", osPath, filepath.ListSeparator, absolute) + + os.Setenv("PATH", testPath) + + repoName := fmt.Sprintf("%v/dockercli/busybox:authtest", privateRegistryURL) + + tmp, err := ioutil.TempDir("", "integration-cli-") + c.Assert(err, checker.IsNil) + + externalAuthConfig := `{ "credsStore": "shell-test" }` + + configPath := filepath.Join(tmp, "config.json") + err = ioutil.WriteFile(configPath, []byte(externalAuthConfig), 0644) + c.Assert(err, checker.IsNil) + + dockerCmd(c, "--config", tmp, "login", "-u", s.reg.Username(), "-p", s.reg.Password(), privateRegistryURL) + + b, err := ioutil.ReadFile(configPath) + c.Assert(err, checker.IsNil) + c.Assert(string(b), checker.Not(checker.Contains), "\"auth\":") + + dockerCmd(c, "--config", tmp, "tag", "busybox", repoName) + dockerCmd(c, "--config", tmp, "push", repoName) + + // make sure the image is pulled when building + dockerCmd(c, "rmi", repoName) + + icmd.RunCmd(icmd.Cmd{ + Command: []string{dockerBinary, "--config", tmp, "build", "-"}, + Stdin: strings.NewReader(fmt.Sprintf("FROM %s", repoName)), + }).Assert(c, icmd.Success) +} + +// Test cases in #22036 +func (s *DockerSuite) TestBuildLabelsOverride(c *check.C) { + // Command line option labels will always override + name := "scratchy" + expected := `{"bar":"from-flag","foo":"from-flag"}` + buildImageSuccessfully(c, name, cli.WithFlags("--label", "foo=from-flag", "--label", "bar=from-flag"), + build.WithDockerfile(`FROM `+minimalBaseImage()+` + LABEL foo=from-dockerfile`)) + res := inspectFieldJSON(c, name, "Config.Labels") + if res != expected { + c.Fatalf("Labels %s, expected %s", res, expected) + } + + name = "from" + expected = `{"foo":"from-dockerfile"}` + buildImageSuccessfully(c, name, build.WithDockerfile(`FROM `+minimalBaseImage()+` + LABEL foo from-dockerfile`)) + res = inspectFieldJSON(c, name, "Config.Labels") + if res != expected { + c.Fatalf("Labels %s, expected %s", res, expected) + } + + // Command line option label will override even via `FROM` + name = "new" + expected = `{"bar":"from-dockerfile2","foo":"new"}` + buildImageSuccessfully(c, name, cli.WithFlags("--label", "foo=new"), + build.WithDockerfile(`FROM from + LABEL bar from-dockerfile2`)) + res = inspectFieldJSON(c, name, "Config.Labels") + if res != expected { + c.Fatalf("Labels %s, expected %s", res, expected) + } + + // Command line option without a value set (--label foo, --label bar=) + // will be treated as --label foo="", --label bar="" + name = "scratchy2" + expected = `{"bar":"","foo":""}` + buildImageSuccessfully(c, name, cli.WithFlags("--label", "foo", "--label", "bar="), + build.WithDockerfile(`FROM `+minimalBaseImage()+` + LABEL foo=from-dockerfile`)) + res = inspectFieldJSON(c, name, "Config.Labels") + if res != expected { + c.Fatalf("Labels %s, expected %s", res, expected) + } + + // Command line option without a value set (--label foo, --label bar=) + // will be treated as --label foo="", --label bar="" + // This time is for inherited images + name = "new2" + expected = `{"bar":"","foo":""}` + buildImageSuccessfully(c, name, cli.WithFlags("--label", "foo=", "--label", "bar"), + build.WithDockerfile(`FROM from + LABEL bar from-dockerfile2`)) + res = inspectFieldJSON(c, name, "Config.Labels") + if res != expected { + c.Fatalf("Labels %s, expected %s", res, expected) + } + + // Command line option labels with only `FROM` + name = "scratchy" + expected = `{"bar":"from-flag","foo":"from-flag"}` + buildImageSuccessfully(c, name, cli.WithFlags("--label", "foo=from-flag", "--label", "bar=from-flag"), + build.WithDockerfile(`FROM `+minimalBaseImage())) + res = inspectFieldJSON(c, name, "Config.Labels") + if res != expected { + c.Fatalf("Labels %s, expected %s", res, expected) + } + + // Command line option labels with env var + name = "scratchz" + expected = `{"bar":"$PATH"}` + buildImageSuccessfully(c, name, cli.WithFlags("--label", "bar=$PATH"), + build.WithDockerfile(`FROM `+minimalBaseImage())) + res = inspectFieldJSON(c, name, "Config.Labels") + if res != expected { + c.Fatalf("Labels %s, expected %s", res, expected) + } +} + +// Test case for #22855 +func (s *DockerSuite) TestBuildDeleteCommittedFile(c *check.C) { + name := "test-delete-committed-file" + buildImageSuccessfully(c, name, build.WithDockerfile(`FROM busybox + RUN echo test > file + RUN test -e file + RUN rm file + RUN sh -c "! test -e file"`)) +} + +// #20083 +func (s *DockerSuite) TestBuildDockerignoreComment(c *check.C) { + // TODO Windows: Figure out why this test is flakey on TP5. If you add + // something like RUN sleep 5, or even RUN ls /tmp after the ADD line, + // it is more reliable, but that's not a good fix. + testRequires(c, DaemonIsLinux) + + name := "testbuilddockerignorecleanpaths" + dockerfile := ` + FROM busybox + ADD . /tmp/ + RUN sh -c "(ls -la /tmp/#1)" + RUN sh -c "(! ls -la /tmp/#2)" + RUN sh -c "(! ls /tmp/foo) && (! ls /tmp/foo2) && (ls /tmp/dir1/foo)"` + buildImageSuccessfully(c, name, build.WithBuildContext(c, + build.WithFile("Dockerfile", dockerfile), + build.WithFile("foo", "foo"), + build.WithFile("foo2", "foo2"), + build.WithFile("dir1/foo", "foo in dir1"), + build.WithFile("#1", "# file 1"), + build.WithFile("#2", "# file 2"), + build.WithFile(".dockerignore", `# Visual C++ cache files +# because we have git ;-) +# The above comment is from #20083 +foo +#dir1/foo +foo2 +# The following is considered as comment as # is at the beginning +#1 +# The following is not considered as comment as # is not at the beginning + #2 +`))) +} + +// Test case for #23221 +func (s *DockerSuite) TestBuildWithUTF8BOM(c *check.C) { + name := "test-with-utf8-bom" + dockerfile := []byte(`FROM busybox`) + bomDockerfile := append([]byte{0xEF, 0xBB, 0xBF}, dockerfile...) + buildImageSuccessfully(c, name, build.WithBuildContext(c, + build.WithFile("Dockerfile", string(bomDockerfile)), + )) +} + +// Test case for UTF-8 BOM in .dockerignore, related to #23221 +func (s *DockerSuite) TestBuildWithUTF8BOMDockerignore(c *check.C) { + name := "test-with-utf8-bom-dockerignore" + dockerfile := ` + FROM busybox + ADD . /tmp/ + RUN ls -la /tmp + RUN sh -c "! ls /tmp/Dockerfile" + RUN ls /tmp/.dockerignore` + dockerignore := []byte("./Dockerfile\n") + bomDockerignore := append([]byte{0xEF, 0xBB, 0xBF}, dockerignore...) + buildImageSuccessfully(c, name, build.WithBuildContext(c, + build.WithFile("Dockerfile", dockerfile), + build.WithFile(".dockerignore", string(bomDockerignore)), + )) +} + +// #22489 Shell test to confirm config gets updated correctly +func (s *DockerSuite) TestBuildShellUpdatesConfig(c *check.C) { + name := "testbuildshellupdatesconfig" + + buildImageSuccessfully(c, name, build.WithDockerfile(`FROM `+minimalBaseImage()+` + SHELL ["foo", "-bar"]`)) + expected := `["foo","-bar","#(nop) ","SHELL [foo -bar]"]` + res := inspectFieldJSON(c, name, "ContainerConfig.Cmd") + if res != expected { + c.Fatalf("%s, expected %s", res, expected) + } + res = inspectFieldJSON(c, name, "ContainerConfig.Shell") + if res != `["foo","-bar"]` { + c.Fatalf(`%s, expected ["foo","-bar"]`, res) + } +} + +// #22489 Changing the shell multiple times and CMD after. +func (s *DockerSuite) TestBuildShellMultiple(c *check.C) { + name := "testbuildshellmultiple" + + result := buildImage(name, build.WithDockerfile(`FROM busybox + RUN echo defaultshell + SHELL ["echo"] + RUN echoshell + SHELL ["ls"] + RUN -l + CMD -l`)) + result.Assert(c, icmd.Success) + + // Must contain 'defaultshell' twice + if len(strings.Split(result.Combined(), "defaultshell")) != 3 { + c.Fatalf("defaultshell should have appeared twice in %s", result.Combined()) + } + + // Must contain 'echoshell' twice + if len(strings.Split(result.Combined(), "echoshell")) != 3 { + c.Fatalf("echoshell should have appeared twice in %s", result.Combined()) + } + + // Must contain "total " (part of ls -l) + if !strings.Contains(result.Combined(), "total ") { + c.Fatalf("%s should have contained 'total '", result.Combined()) + } + + // A container started from the image uses the shell-form CMD. + // Last shell is ls. CMD is -l. So should contain 'total '. + outrun, _ := dockerCmd(c, "run", "--rm", name) + if !strings.Contains(outrun, "total ") { + c.Fatalf("Expected started container to run ls -l. %s", outrun) + } +} + +// #22489. Changed SHELL with ENTRYPOINT +func (s *DockerSuite) TestBuildShellEntrypoint(c *check.C) { + name := "testbuildshellentrypoint" + + buildImageSuccessfully(c, name, build.WithDockerfile(`FROM busybox + SHELL ["ls"] + ENTRYPOINT -l`)) + // A container started from the image uses the shell-form ENTRYPOINT. + // Shell is ls. ENTRYPOINT is -l. So should contain 'total '. + outrun, _ := dockerCmd(c, "run", "--rm", name) + if !strings.Contains(outrun, "total ") { + c.Fatalf("Expected started container to run ls -l. %s", outrun) + } +} + +// #22489 Shell test to confirm shell is inherited in a subsequent build +func (s *DockerSuite) TestBuildShellInherited(c *check.C) { + name1 := "testbuildshellinherited1" + buildImageSuccessfully(c, name1, build.WithDockerfile(`FROM busybox + SHELL ["ls"]`)) + name2 := "testbuildshellinherited2" + buildImage(name2, build.WithDockerfile(`FROM `+name1+` + RUN -l`)).Assert(c, icmd.Expected{ + // ls -l has "total " followed by some number in it, ls without -l does not. + Out: "total ", + }) +} + +// #22489 Shell test to confirm non-JSON doesn't work +func (s *DockerSuite) TestBuildShellNotJSON(c *check.C) { + name := "testbuildshellnotjson" + + buildImage(name, build.WithDockerfile(`FROM `+minimalBaseImage()+` + sHeLl exec -form`, // Casing explicit to ensure error is upper-cased. + )).Assert(c, icmd.Expected{ + ExitCode: 1, + Err: "SHELL requires the arguments to be in JSON form", + }) +} + +// #22489 Windows shell test to confirm native is powershell if executing a PS command +// This would error if the default shell were still cmd. +func (s *DockerSuite) TestBuildShellWindowsPowershell(c *check.C) { + testRequires(c, DaemonIsWindows) + name := "testbuildshellpowershell" + buildImage(name, build.WithDockerfile(`FROM `+minimalBaseImage()+` + SHELL ["powershell", "-command"] + RUN Write-Host John`)).Assert(c, icmd.Expected{ + Out: "\nJohn\n", + }) +} + +// Verify that escape is being correctly applied to words when escape directive is not \. +// Tests WORKDIR, ADD +func (s *DockerSuite) TestBuildEscapeNotBackslashWordTest(c *check.C) { + testRequires(c, DaemonIsWindows) + name := "testbuildescapenotbackslashwordtesta" + buildImage(name, build.WithDockerfile(`# escape= `+"`"+` + FROM `+minimalBaseImage()+` + WORKDIR c:\windows + RUN dir /w`)).Assert(c, icmd.Expected{ + Out: "[System32]", + }) + + name = "testbuildescapenotbackslashwordtestb" + buildImage(name, build.WithDockerfile(`# escape= `+"`"+` + FROM `+minimalBaseImage()+` + SHELL ["powershell.exe"] + WORKDIR c:\foo + ADD Dockerfile c:\foo\ + RUN dir Dockerfile`)).Assert(c, icmd.Expected{ + Out: "-a----", + }) +} + +// #22868. Make sure shell-form CMD is marked as escaped in the config of the image +func (s *DockerSuite) TestBuildCmdShellArgsEscaped(c *check.C) { + testRequires(c, DaemonIsWindows) + name := "testbuildcmdshellescaped" + buildImageSuccessfully(c, name, build.WithDockerfile(` + FROM `+minimalBaseImage()+` + CMD "ipconfig" + `)) + res := inspectFieldJSON(c, name, "Config.ArgsEscaped") + if res != "true" { + c.Fatalf("CMD did not update Config.ArgsEscaped on image: %v", res) + } + dockerCmd(c, "run", "--name", "inspectme", name) + dockerCmd(c, "wait", "inspectme") + res = inspectFieldJSON(c, name, "Config.Cmd") + + if res != `["cmd","/S","/C","\"ipconfig\""]` { + c.Fatalf("CMD was not escaped Config.Cmd: got %v", res) + } +} + +// Test case for #24912. +func (s *DockerSuite) TestBuildStepsWithProgress(c *check.C) { + name := "testbuildstepswithprogress" + totalRun := 5 + result := buildImage(name, build.WithDockerfile("FROM busybox\n"+strings.Repeat("RUN echo foo\n", totalRun))) + result.Assert(c, icmd.Success) + c.Assert(result.Combined(), checker.Contains, fmt.Sprintf("Step 1/%d : FROM busybox", 1+totalRun)) + for i := 2; i <= 1+totalRun; i++ { + c.Assert(result.Combined(), checker.Contains, fmt.Sprintf("Step %d/%d : RUN echo foo", i, 1+totalRun)) + } +} + +func (s *DockerSuite) TestBuildWithFailure(c *check.C) { + name := "testbuildwithfailure" + + // First test case can only detect `nobody` in runtime so all steps will show up + dockerfile := "FROM busybox\nRUN nobody" + result := buildImage(name, build.WithDockerfile(dockerfile)) + c.Assert(result.Error, checker.NotNil) + c.Assert(result.Stdout(), checker.Contains, "Step 1/2 : FROM busybox") + c.Assert(result.Stdout(), checker.Contains, "Step 2/2 : RUN nobody") + + // Second test case `FFOM` should have been detected before build runs so no steps + dockerfile = "FFOM nobody\nRUN nobody" + result = buildImage(name, build.WithDockerfile(dockerfile)) + c.Assert(result.Error, checker.NotNil) + c.Assert(result.Stdout(), checker.Not(checker.Contains), "Step 1/2 : FROM busybox") + c.Assert(result.Stdout(), checker.Not(checker.Contains), "Step 2/2 : RUN nobody") +} + +func (s *DockerSuite) TestBuildCacheFromEqualDiffIDsLength(c *check.C) { + dockerfile := ` + FROM busybox + RUN echo "test" + ENTRYPOINT ["sh"]` + ctx := fakecontext.New(c, "", + fakecontext.WithDockerfile(dockerfile), + fakecontext.WithFiles(map[string]string{ + "Dockerfile": dockerfile, + })) + defer ctx.Close() + + cli.BuildCmd(c, "build1", build.WithExternalBuildContext(ctx)) + id1 := getIDByName(c, "build1") + + // rebuild with cache-from + result := cli.BuildCmd(c, "build2", cli.WithFlags("--cache-from=build1"), build.WithExternalBuildContext(ctx)) + id2 := getIDByName(c, "build2") + c.Assert(id1, checker.Equals, id2) + c.Assert(strings.Count(result.Combined(), "Using cache"), checker.Equals, 2) +} + +func (s *DockerSuite) TestBuildCacheFrom(c *check.C) { + testRequires(c, DaemonIsLinux) // All tests that do save are skipped in windows + dockerfile := ` + FROM busybox + ENV FOO=bar + ADD baz / + RUN touch bax` + ctx := fakecontext.New(c, "", + fakecontext.WithDockerfile(dockerfile), + fakecontext.WithFiles(map[string]string{ + "Dockerfile": dockerfile, + "baz": "baz", + })) + defer ctx.Close() + + cli.BuildCmd(c, "build1", build.WithExternalBuildContext(ctx)) + id1 := getIDByName(c, "build1") + + // rebuild with cache-from + result := cli.BuildCmd(c, "build2", cli.WithFlags("--cache-from=build1"), build.WithExternalBuildContext(ctx)) + id2 := getIDByName(c, "build2") + c.Assert(id1, checker.Equals, id2) + c.Assert(strings.Count(result.Combined(), "Using cache"), checker.Equals, 3) + cli.DockerCmd(c, "rmi", "build2") + + // no cache match with unknown source + result = cli.BuildCmd(c, "build2", cli.WithFlags("--cache-from=nosuchtag"), build.WithExternalBuildContext(ctx)) + id2 = getIDByName(c, "build2") + c.Assert(id1, checker.Not(checker.Equals), id2) + c.Assert(strings.Count(result.Combined(), "Using cache"), checker.Equals, 0) + cli.DockerCmd(c, "rmi", "build2") + + // clear parent images + tempDir, err := ioutil.TempDir("", "test-build-cache-from-") + if err != nil { + c.Fatalf("failed to create temporary directory: %s", tempDir) + } + defer os.RemoveAll(tempDir) + tempFile := filepath.Join(tempDir, "img.tar") + cli.DockerCmd(c, "save", "-o", tempFile, "build1") + cli.DockerCmd(c, "rmi", "build1") + cli.DockerCmd(c, "load", "-i", tempFile) + parentID := cli.DockerCmd(c, "inspect", "-f", "{{.Parent}}", "build1").Combined() + c.Assert(strings.TrimSpace(parentID), checker.Equals, "") + + // cache still applies without parents + result = cli.BuildCmd(c, "build2", cli.WithFlags("--cache-from=build1"), build.WithExternalBuildContext(ctx)) + id2 = getIDByName(c, "build2") + c.Assert(id1, checker.Equals, id2) + c.Assert(strings.Count(result.Combined(), "Using cache"), checker.Equals, 3) + history1 := cli.DockerCmd(c, "history", "-q", "build2").Combined() + + // Retry, no new intermediate images + result = cli.BuildCmd(c, "build3", cli.WithFlags("--cache-from=build1"), build.WithExternalBuildContext(ctx)) + id3 := getIDByName(c, "build3") + c.Assert(id1, checker.Equals, id3) + c.Assert(strings.Count(result.Combined(), "Using cache"), checker.Equals, 3) + history2 := cli.DockerCmd(c, "history", "-q", "build3").Combined() + + c.Assert(history1, checker.Equals, history2) + cli.DockerCmd(c, "rmi", "build2") + cli.DockerCmd(c, "rmi", "build3") + cli.DockerCmd(c, "rmi", "build1") + cli.DockerCmd(c, "load", "-i", tempFile) + + // Modify file, everything up to last command and layers are reused + dockerfile = ` + FROM busybox + ENV FOO=bar + ADD baz / + RUN touch newfile` + err = ioutil.WriteFile(filepath.Join(ctx.Dir, "Dockerfile"), []byte(dockerfile), 0644) + c.Assert(err, checker.IsNil) + + result = cli.BuildCmd(c, "build2", cli.WithFlags("--cache-from=build1"), build.WithExternalBuildContext(ctx)) + id2 = getIDByName(c, "build2") + c.Assert(id1, checker.Not(checker.Equals), id2) + c.Assert(strings.Count(result.Combined(), "Using cache"), checker.Equals, 2) + + layers1Str := cli.DockerCmd(c, "inspect", "-f", "{{json .RootFS.Layers}}", "build1").Combined() + layers2Str := cli.DockerCmd(c, "inspect", "-f", "{{json .RootFS.Layers}}", "build2").Combined() + + var layers1 []string + var layers2 []string + c.Assert(json.Unmarshal([]byte(layers1Str), &layers1), checker.IsNil) + c.Assert(json.Unmarshal([]byte(layers2Str), &layers2), checker.IsNil) + + c.Assert(len(layers1), checker.Equals, len(layers2)) + for i := 0; i < len(layers1)-1; i++ { + c.Assert(layers1[i], checker.Equals, layers2[i]) + } + c.Assert(layers1[len(layers1)-1], checker.Not(checker.Equals), layers2[len(layers1)-1]) +} + +func (s *DockerSuite) TestBuildCacheMultipleFrom(c *check.C) { + testRequires(c, DaemonIsLinux) // All tests that do save are skipped in windows + dockerfile := ` + FROM busybox + ADD baz / + FROM busybox + ADD baz /` + ctx := fakecontext.New(c, "", + fakecontext.WithDockerfile(dockerfile), + fakecontext.WithFiles(map[string]string{ + "Dockerfile": dockerfile, + "baz": "baz", + })) + defer ctx.Close() + + result := cli.BuildCmd(c, "build1", build.WithExternalBuildContext(ctx)) + // second part of dockerfile was a repeat of first so should be cached + c.Assert(strings.Count(result.Combined(), "Using cache"), checker.Equals, 1) + + result = cli.BuildCmd(c, "build2", cli.WithFlags("--cache-from=build1"), build.WithExternalBuildContext(ctx)) + // now both parts of dockerfile should be cached + c.Assert(strings.Count(result.Combined(), "Using cache"), checker.Equals, 2) +} + +func (s *DockerSuite) TestBuildNetNone(c *check.C) { + testRequires(c, DaemonIsLinux) + name := "testbuildnetnone" + buildImage(name, cli.WithFlags("--network=none"), build.WithDockerfile(` + FROM busybox + RUN ping -c 1 8.8.8.8 + `)).Assert(c, icmd.Expected{ + ExitCode: 1, + Out: "unreachable", + }) +} + +func (s *DockerSuite) TestBuildNetContainer(c *check.C) { + testRequires(c, DaemonIsLinux) + + id, _ := dockerCmd(c, "run", "--hostname", "foobar", "-d", "busybox", "nc", "-ll", "-p", "1234", "-e", "hostname") + + name := "testbuildnetcontainer" + buildImageSuccessfully(c, name, cli.WithFlags("--network=container:"+strings.TrimSpace(id)), + build.WithDockerfile(` + FROM busybox + RUN nc localhost 1234 > /otherhost + `)) + + host, _ := dockerCmd(c, "run", "testbuildnetcontainer", "cat", "/otherhost") + c.Assert(strings.TrimSpace(host), check.Equals, "foobar") +} + +func (s *DockerSuite) TestBuildWithExtraHost(c *check.C) { + testRequires(c, DaemonIsLinux) + + name := "testbuildwithextrahost" + buildImageSuccessfully(c, name, + cli.WithFlags( + "--add-host", "foo:127.0.0.1", + "--add-host", "bar:127.0.0.1", + ), + build.WithDockerfile(` + FROM busybox + RUN ping -c 1 foo + RUN ping -c 1 bar + `)) +} + +func (s *DockerSuite) TestBuildWithExtraHostInvalidFormat(c *check.C) { + testRequires(c, DaemonIsLinux) + dockerfile := ` + FROM busybox + RUN ping -c 1 foo` + + testCases := []struct { + testName string + dockerfile string + buildFlag string + }{ + {"extra_host_missing_ip", dockerfile, "--add-host=foo"}, + {"extra_host_missing_ip_with_delimiter", dockerfile, "--add-host=foo:"}, + {"extra_host_missing_hostname", dockerfile, "--add-host=:127.0.0.1"}, + {"extra_host_invalid_ipv4", dockerfile, "--add-host=foo:101.10.2"}, + {"extra_host_invalid_ipv6", dockerfile, "--add-host=foo:2001::1::3F"}, + } + + for _, tc := range testCases { + result := buildImage(tc.testName, cli.WithFlags(tc.buildFlag), build.WithDockerfile(tc.dockerfile)) + result.Assert(c, icmd.Expected{ + ExitCode: 125, + }) + } + +} + +func (s *DockerSuite) TestBuildSquashParent(c *check.C) { + testRequires(c, ExperimentalDaemon) + dockerFile := ` + FROM busybox + RUN echo hello > /hello + RUN echo world >> /hello + RUN echo hello > /remove_me + ENV HELLO world + RUN rm /remove_me + ` + // build and get the ID that we can use later for history comparison + name := "test" + buildImageSuccessfully(c, name, build.WithDockerfile(dockerFile)) + origID := getIDByName(c, name) + + // build with squash + buildImageSuccessfully(c, name, cli.WithFlags("--squash"), build.WithDockerfile(dockerFile)) + id := getIDByName(c, name) + + out, _ := dockerCmd(c, "run", "--rm", id, "/bin/sh", "-c", "cat /hello") + c.Assert(strings.TrimSpace(out), checker.Equals, "hello\nworld") + + dockerCmd(c, "run", "--rm", id, "/bin/sh", "-c", "[ ! -f /remove_me ]") + dockerCmd(c, "run", "--rm", id, "/bin/sh", "-c", `[ "$(echo $HELLO)" == "world" ]`) + + // make sure the ID produced is the ID of the tag we specified + inspectID := inspectImage(c, "test", ".ID") + c.Assert(inspectID, checker.Equals, id) + + origHistory, _ := dockerCmd(c, "history", origID) + testHistory, _ := dockerCmd(c, "history", "test") + + splitOrigHistory := strings.Split(strings.TrimSpace(origHistory), "\n") + splitTestHistory := strings.Split(strings.TrimSpace(testHistory), "\n") + c.Assert(len(splitTestHistory), checker.Equals, len(splitOrigHistory)+1) + + out = inspectImage(c, id, "len .RootFS.Layers") + c.Assert(strings.TrimSpace(out), checker.Equals, "2") +} + +func (s *DockerSuite) TestBuildContChar(c *check.C) { + name := "testbuildcontchar" + + buildImage(name, build.WithDockerfile(`FROM busybox\`)).Assert(c, icmd.Expected{ + Out: "Step 1/1 : FROM busybox", + }) + + result := buildImage(name, build.WithDockerfile(`FROM busybox + RUN echo hi \`)) + result.Assert(c, icmd.Success) + c.Assert(result.Combined(), checker.Contains, "Step 1/2 : FROM busybox") + c.Assert(result.Combined(), checker.Contains, "Step 2/2 : RUN echo hi\n") + + result = buildImage(name, build.WithDockerfile(`FROM busybox + RUN echo hi \\`)) + result.Assert(c, icmd.Success) + c.Assert(result.Combined(), checker.Contains, "Step 1/2 : FROM busybox") + c.Assert(result.Combined(), checker.Contains, "Step 2/2 : RUN echo hi \\\n") + + result = buildImage(name, build.WithDockerfile(`FROM busybox + RUN echo hi \\\`)) + result.Assert(c, icmd.Success) + c.Assert(result.Combined(), checker.Contains, "Step 1/2 : FROM busybox") + c.Assert(result.Combined(), checker.Contains, "Step 2/2 : RUN echo hi \\\\\n") +} + +func (s *DockerSuite) TestBuildCopyFromPreviousRootFS(c *check.C) { + dockerfile := ` + FROM busybox AS first + COPY foo bar + + FROM busybox + %s + COPY baz baz + RUN echo mno > baz/cc + + FROM busybox + COPY bar / + COPY --from=1 baz sub/ + COPY --from=0 bar baz + COPY --from=first bar bay` + + ctx := fakecontext.New(c, "", + fakecontext.WithDockerfile(fmt.Sprintf(dockerfile, "")), + fakecontext.WithFiles(map[string]string{ + "foo": "abc", + "bar": "def", + "baz/aa": "ghi", + "baz/bb": "jkl", + })) + defer ctx.Close() + + cli.BuildCmd(c, "build1", build.WithExternalBuildContext(ctx)) + + cli.DockerCmd(c, "run", "build1", "cat", "bar").Assert(c, icmd.Expected{Out: "def"}) + cli.DockerCmd(c, "run", "build1", "cat", "sub/aa").Assert(c, icmd.Expected{Out: "ghi"}) + cli.DockerCmd(c, "run", "build1", "cat", "sub/cc").Assert(c, icmd.Expected{Out: "mno"}) + cli.DockerCmd(c, "run", "build1", "cat", "baz").Assert(c, icmd.Expected{Out: "abc"}) + cli.DockerCmd(c, "run", "build1", "cat", "bay").Assert(c, icmd.Expected{Out: "abc"}) + + result := cli.BuildCmd(c, "build2", build.WithExternalBuildContext(ctx)) + + // all commands should be cached + c.Assert(strings.Count(result.Combined(), "Using cache"), checker.Equals, 7) + c.Assert(getIDByName(c, "build1"), checker.Equals, getIDByName(c, "build2")) + + err := ioutil.WriteFile(filepath.Join(ctx.Dir, "Dockerfile"), []byte(fmt.Sprintf(dockerfile, "COPY baz/aa foo")), 0644) + c.Assert(err, checker.IsNil) + + // changing file in parent block should not affect last block + result = cli.BuildCmd(c, "build3", build.WithExternalBuildContext(ctx)) + c.Assert(strings.Count(result.Combined(), "Using cache"), checker.Equals, 5) + + err = ioutil.WriteFile(filepath.Join(ctx.Dir, "foo"), []byte("pqr"), 0644) + c.Assert(err, checker.IsNil) + + // changing file in parent block should affect both first and last block + result = cli.BuildCmd(c, "build4", build.WithExternalBuildContext(ctx)) + c.Assert(strings.Count(result.Combined(), "Using cache"), checker.Equals, 5) + + cli.DockerCmd(c, "run", "build4", "cat", "bay").Assert(c, icmd.Expected{Out: "pqr"}) + cli.DockerCmd(c, "run", "build4", "cat", "baz").Assert(c, icmd.Expected{Out: "pqr"}) +} + +func (s *DockerSuite) TestBuildCopyFromPreviousRootFSErrors(c *check.C) { + testCases := []struct { + dockerfile string + expectedError string + }{ + { + dockerfile: ` + FROM busybox + COPY --from=foo foo bar`, + expectedError: "invalid from flag value foo", + }, + { + dockerfile: ` + FROM busybox + COPY --from=0 foo bar`, + expectedError: "invalid from flag value 0: refers to current build stage", + }, + { + dockerfile: ` + FROM busybox AS foo + COPY --from=bar foo bar`, + expectedError: "invalid from flag value bar", + }, + { + dockerfile: ` + FROM busybox AS 1 + COPY --from=1 foo bar`, + expectedError: "invalid name for build stage", + }, + } + + for _, tc := range testCases { + ctx := fakecontext.New(c, "", + fakecontext.WithDockerfile(tc.dockerfile), + fakecontext.WithFiles(map[string]string{ + "foo": "abc", + })) + + cli.Docker(cli.Build("build1"), build.WithExternalBuildContext(ctx)).Assert(c, icmd.Expected{ + ExitCode: 1, + Err: tc.expectedError, + }) + + ctx.Close() + } +} + +func (s *DockerSuite) TestBuildCopyFromPreviousFrom(c *check.C) { + dockerfile := ` + FROM busybox + COPY foo bar` + ctx := fakecontext.New(c, "", + fakecontext.WithDockerfile(dockerfile), + fakecontext.WithFiles(map[string]string{ + "foo": "abc", + })) + defer ctx.Close() + + cli.BuildCmd(c, "build1", build.WithExternalBuildContext(ctx)) + + dockerfile = ` + FROM build1:latest AS foo + FROM busybox + COPY --from=foo bar / + COPY foo /` + ctx = fakecontext.New(c, "", + fakecontext.WithDockerfile(dockerfile), + fakecontext.WithFiles(map[string]string{ + "foo": "def", + })) + defer ctx.Close() + + cli.BuildCmd(c, "build2", build.WithExternalBuildContext(ctx)) + + out := cli.DockerCmd(c, "run", "build2", "cat", "bar").Combined() + c.Assert(strings.TrimSpace(out), check.Equals, "abc") + out = cli.DockerCmd(c, "run", "build2", "cat", "foo").Combined() + c.Assert(strings.TrimSpace(out), check.Equals, "def") +} + +func (s *DockerSuite) TestBuildCopyFromImplicitFrom(c *check.C) { + dockerfile := ` + FROM busybox + COPY --from=busybox /etc/passwd /mypasswd + RUN cmp /etc/passwd /mypasswd` + + if DaemonIsWindows() { + dockerfile = ` + FROM busybox + COPY --from=busybox License.txt foo` + } + + ctx := fakecontext.New(c, "", + fakecontext.WithDockerfile(dockerfile), + ) + defer ctx.Close() + + cli.BuildCmd(c, "build1", build.WithExternalBuildContext(ctx)) + + if DaemonIsWindows() { + out := cli.DockerCmd(c, "run", "build1", "cat", "License.txt").Combined() + c.Assert(len(out), checker.GreaterThan, 10) + out2 := cli.DockerCmd(c, "run", "build1", "cat", "foo").Combined() + c.Assert(out, check.Equals, out2) + } +} + +func (s *DockerRegistrySuite) TestBuildCopyFromImplicitPullingFrom(c *check.C) { + repoName := fmt.Sprintf("%v/dockercli/testf", privateRegistryURL) + + dockerfile := ` + FROM busybox + COPY foo bar` + ctx := fakecontext.New(c, "", + fakecontext.WithDockerfile(dockerfile), + fakecontext.WithFiles(map[string]string{ + "foo": "abc", + })) + defer ctx.Close() + + cli.BuildCmd(c, repoName, build.WithExternalBuildContext(ctx)) + + cli.DockerCmd(c, "push", repoName) + cli.DockerCmd(c, "rmi", repoName) + + dockerfile = ` + FROM busybox + COPY --from=%s bar baz` + + ctx = fakecontext.New(c, "", fakecontext.WithDockerfile(fmt.Sprintf(dockerfile, repoName))) + defer ctx.Close() + + cli.BuildCmd(c, "build1", build.WithExternalBuildContext(ctx)) + + cli.Docker(cli.Args("run", "build1", "cat", "baz")).Assert(c, icmd.Expected{Out: "abc"}) +} + +func (s *DockerSuite) TestBuildFromPreviousBlock(c *check.C) { + dockerfile := ` + FROM busybox as foo + COPY foo / + FROM foo as foo1 + RUN echo 1 >> foo + FROM foo as foO2 + RUN echo 2 >> foo + FROM foo + COPY --from=foo1 foo f1 + COPY --from=FOo2 foo f2 + ` // foo2 case also tests that names are canse insensitive + ctx := fakecontext.New(c, "", + fakecontext.WithDockerfile(dockerfile), + fakecontext.WithFiles(map[string]string{ + "foo": "bar", + })) + defer ctx.Close() + + cli.BuildCmd(c, "build1", build.WithExternalBuildContext(ctx)) + cli.Docker(cli.Args("run", "build1", "cat", "foo")).Assert(c, icmd.Expected{Out: "bar"}) + cli.Docker(cli.Args("run", "build1", "cat", "f1")).Assert(c, icmd.Expected{Out: "bar1"}) + cli.Docker(cli.Args("run", "build1", "cat", "f2")).Assert(c, icmd.Expected{Out: "bar2"}) +} + +func (s *DockerTrustSuite) TestCopyFromTrustedBuild(c *check.C) { + img1 := s.setupTrustedImage(c, "trusted-build1") + img2 := s.setupTrustedImage(c, "trusted-build2") + dockerFile := fmt.Sprintf(` + FROM %s AS build-base + RUN echo ok > /foo + FROM %s + COPY --from=build-base foo bar`, img1, img2) + + name := "testcopyfromtrustedbuild" + + r := buildImage(name, trustedBuild, build.WithDockerfile(dockerFile)) + r.Assert(c, icmd.Expected{ + Out: fmt.Sprintf("FROM %s@sha", img1[:len(img1)-7]), + }) + r.Assert(c, icmd.Expected{ + Out: fmt.Sprintf("FROM %s@sha", img2[:len(img2)-7]), + }) + + dockerCmdWithResult("run", name, "cat", "bar").Assert(c, icmd.Expected{Out: "ok"}) +} + +func (s *DockerSuite) TestBuildCopyFromPreviousFromWindows(c *check.C) { + testRequires(c, DaemonIsWindows) + dockerfile := ` + FROM ` + testEnv.MinimalBaseImage() + ` + COPY foo c:\\bar` + ctx := fakecontext.New(c, "", + fakecontext.WithDockerfile(dockerfile), + fakecontext.WithFiles(map[string]string{ + "foo": "abc", + })) + defer ctx.Close() + + cli.BuildCmd(c, "build1", build.WithExternalBuildContext(ctx)) + + dockerfile = ` + FROM build1:latest + FROM ` + testEnv.MinimalBaseImage() + ` + COPY --from=0 c:\\bar / + COPY foo /` + ctx = fakecontext.New(c, "", + fakecontext.WithDockerfile(dockerfile), + fakecontext.WithFiles(map[string]string{ + "foo": "def", + })) + defer ctx.Close() + + cli.BuildCmd(c, "build2", build.WithExternalBuildContext(ctx)) + + out := cli.DockerCmd(c, "run", "build2", "cmd.exe", "/s", "/c", "type", "c:\\bar").Combined() + c.Assert(strings.TrimSpace(out), check.Equals, "abc") + out = cli.DockerCmd(c, "run", "build2", "cmd.exe", "/s", "/c", "type", "c:\\foo").Combined() + c.Assert(strings.TrimSpace(out), check.Equals, "def") +} + +func (s *DockerSuite) TestBuildCopyFromForbidWindowsSystemPaths(c *check.C) { + testRequires(c, DaemonIsWindows) + dockerfile := ` + FROM ` + testEnv.MinimalBaseImage() + ` + FROM ` + testEnv.MinimalBaseImage() + ` + COPY --from=0 %s c:\\oscopy + ` + exp := icmd.Expected{ + ExitCode: 1, + Err: "copy from c:\\ or c:\\windows is not allowed on windows", + } + buildImage("testforbidsystempaths1", build.WithDockerfile(fmt.Sprintf(dockerfile, "c:\\\\"))).Assert(c, exp) + buildImage("testforbidsystempaths2", build.WithDockerfile(fmt.Sprintf(dockerfile, "C:\\\\"))).Assert(c, exp) + buildImage("testforbidsystempaths3", build.WithDockerfile(fmt.Sprintf(dockerfile, "c:\\\\windows"))).Assert(c, exp) + buildImage("testforbidsystempaths4", build.WithDockerfile(fmt.Sprintf(dockerfile, "c:\\\\wInDows"))).Assert(c, exp) +} + +func (s *DockerSuite) TestBuildCopyFromForbidWindowsRelativePaths(c *check.C) { + testRequires(c, DaemonIsWindows) + dockerfile := ` + FROM ` + testEnv.MinimalBaseImage() + ` + FROM ` + testEnv.MinimalBaseImage() + ` + COPY --from=0 %s c:\\oscopy + ` + exp := icmd.Expected{ + ExitCode: 1, + Err: "copy from c:\\ or c:\\windows is not allowed on windows", + } + buildImage("testforbidsystempaths1", build.WithDockerfile(fmt.Sprintf(dockerfile, "c:"))).Assert(c, exp) + buildImage("testforbidsystempaths2", build.WithDockerfile(fmt.Sprintf(dockerfile, "."))).Assert(c, exp) + buildImage("testforbidsystempaths3", build.WithDockerfile(fmt.Sprintf(dockerfile, "..\\\\"))).Assert(c, exp) + buildImage("testforbidsystempaths4", build.WithDockerfile(fmt.Sprintf(dockerfile, ".\\\\windows"))).Assert(c, exp) + buildImage("testforbidsystempaths5", build.WithDockerfile(fmt.Sprintf(dockerfile, "\\\\windows"))).Assert(c, exp) +} + +func (s *DockerSuite) TestBuildCopyFromWindowsIsCaseInsensitive(c *check.C) { + testRequires(c, DaemonIsWindows) + dockerfile := ` + FROM ` + testEnv.MinimalBaseImage() + ` + COPY foo / + FROM ` + testEnv.MinimalBaseImage() + ` + COPY --from=0 c:\\fOo c:\\copied + RUN type c:\\copied + ` + cli.Docker(cli.Build("copyfrom-windows-insensitive"), build.WithBuildContext(c, + build.WithFile("Dockerfile", dockerfile), + build.WithFile("foo", "hello world"), + )).Assert(c, icmd.Expected{ + ExitCode: 0, + Out: "hello world", + }) +} + +// #33176 +func (s *DockerSuite) TestBuildCopyFromResetScratch(c *check.C) { + testRequires(c, DaemonIsLinux) + + dockerfile := ` + FROM busybox + WORKDIR /foo/bar + FROM scratch + ENV FOO=bar + ` + ctx := fakecontext.New(c, "", + fakecontext.WithDockerfile(dockerfile), + ) + defer ctx.Close() + + cli.BuildCmd(c, "build1", build.WithExternalBuildContext(ctx)) + + res := cli.InspectCmd(c, "build1", cli.Format(".Config.WorkingDir")).Combined() + c.Assert(strings.TrimSpace(res), checker.Equals, "") +} + +func (s *DockerSuite) TestBuildIntermediateTarget(c *check.C) { + dockerfile := ` + FROM busybox AS build-env + CMD ["/dev"] + FROM busybox + CMD ["/dist"] + ` + ctx := fakecontext.New(c, "", fakecontext.WithDockerfile(dockerfile)) + defer ctx.Close() + + cli.BuildCmd(c, "build1", build.WithExternalBuildContext(ctx), + cli.WithFlags("--target", "build-env")) + + //res := inspectFieldJSON(c, "build1", "Config.Cmd") + res := cli.InspectCmd(c, "build1", cli.Format("json .Config.Cmd")).Combined() + c.Assert(strings.TrimSpace(res), checker.Equals, `["/dev"]`) + + result := cli.Docker(cli.Build("build1"), build.WithExternalBuildContext(ctx), + cli.WithFlags("--target", "nosuchtarget")) + result.Assert(c, icmd.Expected{ + ExitCode: 1, + Err: "failed to reach build target", + }) +} + +// TestBuildOpaqueDirectory tests that a build succeeds which +// creates opaque directories. +// See https://github.com/docker/docker/issues/25244 +func (s *DockerSuite) TestBuildOpaqueDirectory(c *check.C) { + testRequires(c, DaemonIsLinux) + dockerFile := ` + FROM busybox + RUN mkdir /dir1 && touch /dir1/f1 + RUN rm -rf /dir1 && mkdir /dir1 && touch /dir1/f2 + RUN touch /dir1/f3 + RUN [ -f /dir1/f2 ] + ` + // Test that build succeeds, last command fails if opaque directory + // was not handled correctly + buildImageSuccessfully(c, "testopaquedirectory", build.WithDockerfile(dockerFile)) +} + +// Windows test for USER in dockerfile +func (s *DockerSuite) TestBuildWindowsUser(c *check.C) { + testRequires(c, DaemonIsWindows) + name := "testbuildwindowsuser" + buildImage(name, build.WithDockerfile(`FROM `+testEnv.MinimalBaseImage()+` + RUN net user user /add + USER user + RUN set username + `)).Assert(c, icmd.Expected{ + Out: "USERNAME=user", + }) +} + +// Verifies if COPY file . when WORKDIR is set to a non-existing directory, +// the directory is created and the file is copied into the directory, +// as opposed to the file being copied as a file with the name of the +// directory. Fix for 27545 (found on Windows, but regression good for Linux too). +// Note 27545 was reverted in 28505, but a new fix was added subsequently in 28514. +func (s *DockerSuite) TestBuildCopyFileDotWithWorkdir(c *check.C) { + name := "testbuildcopyfiledotwithworkdir" + buildImageSuccessfully(c, name, build.WithBuildContext(c, + build.WithFile("Dockerfile", `FROM busybox +WORKDIR /foo +COPY file . +RUN ["cat", "/foo/file"] +`), + build.WithFile("file", "content"), + )) +} + +// Case-insensitive environment variables on Windows +func (s *DockerSuite) TestBuildWindowsEnvCaseInsensitive(c *check.C) { + testRequires(c, DaemonIsWindows) + name := "testbuildwindowsenvcaseinsensitive" + buildImageSuccessfully(c, name, build.WithDockerfile(` + FROM `+testEnv.MinimalBaseImage()+` + ENV FOO=bar foo=baz + `)) + res := inspectFieldJSON(c, name, "Config.Env") + if res != `["foo=baz"]` { // Should not have FOO=bar in it - takes the last one processed. And only one entry as deduped. + c.Fatalf("Case insensitive environment variables on Windows failed. Got %s", res) + } +} + +// Test case for 29667 +func (s *DockerSuite) TestBuildWorkdirImageCmd(c *check.C) { + image := "testworkdirimagecmd" + buildImageSuccessfully(c, image, build.WithDockerfile(` +FROM busybox +WORKDIR /foo/bar +`)) + out, _ := dockerCmd(c, "inspect", "--format", "{{ json .Config.Cmd }}", image) + + // The Windows busybox image has a blank `cmd` + lookingFor := `["sh"]` + if testEnv.DaemonPlatform() == "windows" { + lookingFor = "null" + } + c.Assert(strings.TrimSpace(out), checker.Equals, lookingFor) + + image = "testworkdirlabelimagecmd" + buildImageSuccessfully(c, image, build.WithDockerfile(` +FROM busybox +WORKDIR /foo/bar +LABEL a=b +`)) + + out, _ = dockerCmd(c, "inspect", "--format", "{{ json .Config.Cmd }}", image) + c.Assert(strings.TrimSpace(out), checker.Equals, lookingFor) +} + +// Test case for 28902/28909 +func (s *DockerSuite) TestBuildWorkdirCmd(c *check.C) { + testRequires(c, DaemonIsLinux) + name := "testbuildworkdircmd" + dockerFile := ` + FROM busybox + WORKDIR / + ` + buildImageSuccessfully(c, name, build.WithDockerfile(dockerFile)) + result := buildImage(name, build.WithDockerfile(dockerFile)) + result.Assert(c, icmd.Success) + c.Assert(strings.Count(result.Combined(), "Using cache"), checker.Equals, 1) +} + +// FIXME(vdemeester) should be a unit test +func (s *DockerSuite) TestBuildLineErrorOnBuild(c *check.C) { + name := "test_build_line_error_onbuild" + buildImage(name, build.WithDockerfile(`FROM busybox + ONBUILD + `)).Assert(c, icmd.Expected{ + ExitCode: 1, + Err: "Dockerfile parse error line 2: ONBUILD requires at least one argument", + }) +} + +// FIXME(vdemeester) should be a unit test +func (s *DockerSuite) TestBuildLineErrorUnknownInstruction(c *check.C) { + name := "test_build_line_error_unknown_instruction" + cli.Docker(cli.Build(name), build.WithDockerfile(`FROM busybox + RUN echo hello world + NOINSTRUCTION echo ba + RUN echo hello + ERROR + `)).Assert(c, icmd.Expected{ + ExitCode: 1, + Err: "Dockerfile parse error line 3: unknown instruction: NOINSTRUCTION", + }) +} + +// FIXME(vdemeester) should be a unit test +func (s *DockerSuite) TestBuildLineErrorWithEmptyLines(c *check.C) { + name := "test_build_line_error_with_empty_lines" + cli.Docker(cli.Build(name), build.WithDockerfile(` + FROM busybox + + RUN echo hello world + + NOINSTRUCTION echo ba + + CMD ["/bin/init"] + `)).Assert(c, icmd.Expected{ + ExitCode: 1, + Err: "Dockerfile parse error line 6: unknown instruction: NOINSTRUCTION", + }) +} + +// FIXME(vdemeester) should be a unit test +func (s *DockerSuite) TestBuildLineErrorWithComments(c *check.C) { + name := "test_build_line_error_with_comments" + cli.Docker(cli.Build(name), build.WithDockerfile(`FROM busybox + # This will print hello world + # and then ba + RUN echo hello world + NOINSTRUCTION echo ba + `)).Assert(c, icmd.Expected{ + ExitCode: 1, + Err: "Dockerfile parse error line 5: unknown instruction: NOINSTRUCTION", + }) +} + +// #31957 +func (s *DockerSuite) TestBuildSetCommandWithDefinedShell(c *check.C) { + buildImageSuccessfully(c, "build1", build.WithDockerfile(` +FROM busybox +SHELL ["/bin/sh", "-c"] +`)) + buildImageSuccessfully(c, "build2", build.WithDockerfile(` +FROM build1 +CMD echo foo +`)) + + out, _ := dockerCmd(c, "inspect", "--format", "{{ json .Config.Cmd }}", "build2") + c.Assert(strings.TrimSpace(out), checker.Equals, `["/bin/sh","-c","echo foo"]`) +} + +func (s *DockerSuite) TestBuildIidFile(c *check.C) { + tmpDir, err := ioutil.TempDir("", "TestBuildIidFile") + if err != nil { + c.Fatal(err) + } + defer os.RemoveAll(tmpDir) + tmpIidFile := filepath.Join(tmpDir, "iid") + + name := "testbuildiidfile" + // Use a Dockerfile with multiple stages to ensure we get the last one + cli.BuildCmd(c, name, + build.WithDockerfile(`FROM `+minimalBaseImage()+` AS stage1 +ENV FOO FOO +FROM `+minimalBaseImage()+` +ENV BAR BAZ`), + cli.WithFlags("--iidfile", tmpIidFile)) + + id, err := ioutil.ReadFile(tmpIidFile) + c.Assert(err, check.IsNil) + d, err := digest.Parse(string(id)) + c.Assert(err, check.IsNil) + c.Assert(d.String(), checker.Equals, getIDByName(c, name)) +} + +func (s *DockerSuite) TestBuildIidFileCleanupOnFail(c *check.C) { + tmpDir, err := ioutil.TempDir("", "TestBuildIidFileCleanupOnFail") + if err != nil { + c.Fatal(err) + } + defer os.RemoveAll(tmpDir) + tmpIidFile := filepath.Join(tmpDir, "iid") + + err = ioutil.WriteFile(tmpIidFile, []byte("Dummy"), 0666) + c.Assert(err, check.IsNil) + + cli.Docker(cli.Build("testbuildiidfilecleanuponfail"), + build.WithDockerfile(`FROM `+minimalBaseImage()+` + RUN /non/existing/command`), + cli.WithFlags("--iidfile", tmpIidFile)).Assert(c, icmd.Expected{ + ExitCode: 1, + }) + _, err = os.Stat(tmpIidFile) + c.Assert(err, check.NotNil) + c.Assert(os.IsNotExist(err), check.Equals, true) +} + +func (s *DockerSuite) TestBuildIidFileSquash(c *check.C) { + testRequires(c, ExperimentalDaemon) + tmpDir, err := ioutil.TempDir("", "TestBuildIidFileSquash") + if err != nil { + c.Fatal(err) + } + defer os.RemoveAll(tmpDir) + tmpIidFile := filepath.Join(tmpDir, "iidsquash") + + name := "testbuildiidfilesquash" + // Use a Dockerfile with multiple stages to ensure we get the last one + cli.BuildCmd(c, name, + // This could be minimalBaseImage except + // https://github.com/moby/moby/issues/33823 requires + // `touch` to workaround. + build.WithDockerfile(`FROM busybox +ENV FOO FOO +ENV BAR BAR +RUN touch /foop +`), + cli.WithFlags("--iidfile", tmpIidFile, "--squash")) + + id, err := ioutil.ReadFile(tmpIidFile) + c.Assert(err, check.IsNil) + d, err := digest.Parse(string(id)) + c.Assert(err, check.IsNil) + c.Assert(d.String(), checker.Equals, getIDByName(c, name)) +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_build_unix_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_build_unix_test.go new file mode 100644 index 000000000..11c682325 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_build_unix_test.go @@ -0,0 +1,204 @@ +// +build !windows + +package main + +import ( + "bufio" + "bytes" + "encoding/json" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "regexp" + "strings" + "time" + + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/cli" + "github.com/docker/docker/integration-cli/cli/build" + "github.com/docker/docker/integration-cli/cli/build/fakecontext" + "github.com/docker/docker/pkg/testutil" + icmd "github.com/docker/docker/pkg/testutil/cmd" + "github.com/docker/go-units" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestBuildResourceConstraintsAreUsed(c *check.C) { + testRequires(c, cpuCfsQuota) + name := "testbuildresourceconstraints" + + ctx := fakecontext.New(c, "", fakecontext.WithDockerfile(` + FROM hello-world:frozen + RUN ["/hello"] + `)) + cli.Docker( + cli.Args("build", "--no-cache", "--rm=false", "--memory=64m", "--memory-swap=-1", "--cpuset-cpus=0", "--cpuset-mems=0", "--cpu-shares=100", "--cpu-quota=8000", "--ulimit", "nofile=42", "-t", name, "."), + cli.InDir(ctx.Dir), + ).Assert(c, icmd.Success) + + out := cli.DockerCmd(c, "ps", "-lq").Combined() + cID := strings.TrimSpace(out) + + type hostConfig struct { + Memory int64 + MemorySwap int64 + CpusetCpus string + CpusetMems string + CPUShares int64 + CPUQuota int64 + Ulimits []*units.Ulimit + } + + cfg := inspectFieldJSON(c, cID, "HostConfig") + + var c1 hostConfig + err := json.Unmarshal([]byte(cfg), &c1) + c.Assert(err, checker.IsNil, check.Commentf(cfg)) + + c.Assert(c1.Memory, checker.Equals, int64(64*1024*1024), check.Commentf("resource constraints not set properly for Memory")) + c.Assert(c1.MemorySwap, checker.Equals, int64(-1), check.Commentf("resource constraints not set properly for MemorySwap")) + c.Assert(c1.CpusetCpus, checker.Equals, "0", check.Commentf("resource constraints not set properly for CpusetCpus")) + c.Assert(c1.CpusetMems, checker.Equals, "0", check.Commentf("resource constraints not set properly for CpusetMems")) + c.Assert(c1.CPUShares, checker.Equals, int64(100), check.Commentf("resource constraints not set properly for CPUShares")) + c.Assert(c1.CPUQuota, checker.Equals, int64(8000), check.Commentf("resource constraints not set properly for CPUQuota")) + c.Assert(c1.Ulimits[0].Name, checker.Equals, "nofile", check.Commentf("resource constraints not set properly for Ulimits")) + c.Assert(c1.Ulimits[0].Hard, checker.Equals, int64(42), check.Commentf("resource constraints not set properly for Ulimits")) + + // Make sure constraints aren't saved to image + cli.DockerCmd(c, "run", "--name=test", name) + + cfg = inspectFieldJSON(c, "test", "HostConfig") + + var c2 hostConfig + err = json.Unmarshal([]byte(cfg), &c2) + c.Assert(err, checker.IsNil, check.Commentf(cfg)) + + c.Assert(c2.Memory, check.Not(checker.Equals), int64(64*1024*1024), check.Commentf("resource leaked from build for Memory")) + c.Assert(c2.MemorySwap, check.Not(checker.Equals), int64(-1), check.Commentf("resource leaked from build for MemorySwap")) + c.Assert(c2.CpusetCpus, check.Not(checker.Equals), "0", check.Commentf("resource leaked from build for CpusetCpus")) + c.Assert(c2.CpusetMems, check.Not(checker.Equals), "0", check.Commentf("resource leaked from build for CpusetMems")) + c.Assert(c2.CPUShares, check.Not(checker.Equals), int64(100), check.Commentf("resource leaked from build for CPUShares")) + c.Assert(c2.CPUQuota, check.Not(checker.Equals), int64(8000), check.Commentf("resource leaked from build for CPUQuota")) + c.Assert(c2.Ulimits, checker.IsNil, check.Commentf("resource leaked from build for Ulimits")) +} + +func (s *DockerSuite) TestBuildAddChangeOwnership(c *check.C) { + testRequires(c, DaemonIsLinux) + name := "testbuildaddown" + + ctx := func() *fakecontext.Fake { + dockerfile := ` + FROM busybox + ADD foo /bar/ + RUN [ $(stat -c %U:%G "/bar") = 'root:root' ] + RUN [ $(stat -c %U:%G "/bar/foo") = 'root:root' ] + ` + tmpDir, err := ioutil.TempDir("", "fake-context") + c.Assert(err, check.IsNil) + testFile, err := os.Create(filepath.Join(tmpDir, "foo")) + if err != nil { + c.Fatalf("failed to create foo file: %v", err) + } + defer testFile.Close() + + icmd.RunCmd(icmd.Cmd{ + Command: []string{"chown", "daemon:daemon", "foo"}, + Dir: tmpDir, + }).Assert(c, icmd.Success) + + if err := ioutil.WriteFile(filepath.Join(tmpDir, "Dockerfile"), []byte(dockerfile), 0644); err != nil { + c.Fatalf("failed to open destination dockerfile: %v", err) + } + return fakecontext.New(c, tmpDir) + }() + + defer ctx.Close() + + buildImageSuccessfully(c, name, build.WithExternalBuildContext(ctx)) +} + +// Test that an infinite sleep during a build is killed if the client disconnects. +// This test is fairly hairy because there are lots of ways to race. +// Strategy: +// * Monitor the output of docker events starting from before +// * Run a 1-year-long sleep from a docker build. +// * When docker events sees container start, close the "docker build" command +// * Wait for docker events to emit a dying event. +func (s *DockerSuite) TestBuildCancellationKillsSleep(c *check.C) { + testRequires(c, DaemonIsLinux) + name := "testbuildcancellation" + + observer, err := newEventObserver(c) + c.Assert(err, checker.IsNil) + err = observer.Start() + c.Assert(err, checker.IsNil) + defer observer.Stop() + + // (Note: one year, will never finish) + ctx := fakecontext.New(c, "", fakecontext.WithDockerfile("FROM busybox\nRUN sleep 31536000")) + defer ctx.Close() + + buildCmd := exec.Command(dockerBinary, "build", "-t", name, ".") + buildCmd.Dir = ctx.Dir + + stdoutBuild, err := buildCmd.StdoutPipe() + c.Assert(err, checker.IsNil) + + if err := buildCmd.Start(); err != nil { + c.Fatalf("failed to run build: %s", err) + } + + matchCID := regexp.MustCompile("Running in (.+)") + scanner := bufio.NewScanner(stdoutBuild) + + outputBuffer := new(bytes.Buffer) + var buildID string + for scanner.Scan() { + line := scanner.Text() + outputBuffer.WriteString(line) + outputBuffer.WriteString("\n") + if matches := matchCID.FindStringSubmatch(line); len(matches) > 0 { + buildID = matches[1] + break + } + } + + if buildID == "" { + c.Fatalf("Unable to find build container id in build output:\n%s", outputBuffer.String()) + } + + testActions := map[string]chan bool{ + "start": make(chan bool, 1), + "die": make(chan bool, 1), + } + + matcher := matchEventLine(buildID, "container", testActions) + processor := processEventMatch(testActions) + go observer.Match(matcher, processor) + + select { + case <-time.After(10 * time.Second): + observer.CheckEventError(c, buildID, "start", matcher) + case <-testActions["start"]: + // ignore, done + } + + // Send a kill to the `docker build` command. + // Causes the underlying build to be cancelled due to socket close. + if err := buildCmd.Process.Kill(); err != nil { + c.Fatalf("error killing build command: %s", err) + } + + // Get the exit status of `docker build`, check it exited because killed. + if err := buildCmd.Wait(); err != nil && !testutil.IsKilled(err) { + c.Fatalf("wait failed during build run: %T %s", err, err) + } + + select { + case <-time.After(10 * time.Second): + observer.CheckEventError(c, buildID, "die", matcher) + case <-testActions["die"]: + // ignore, done + } +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_by_digest_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_by_digest_test.go new file mode 100644 index 000000000..c7115c88c --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_by_digest_test.go @@ -0,0 +1,690 @@ +package main + +import ( + "encoding/json" + "fmt" + "os" + "path/filepath" + "regexp" + "strings" + + "github.com/docker/distribution/manifest/schema1" + "github.com/docker/distribution/manifest/schema2" + "github.com/docker/docker/api/types" + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/cli" + "github.com/docker/docker/integration-cli/cli/build" + "github.com/docker/docker/pkg/stringutils" + "github.com/go-check/check" + "github.com/opencontainers/go-digest" +) + +var ( + remoteRepoName = "dockercli/busybox-by-dgst" + repoName = fmt.Sprintf("%s/%s", privateRegistryURL, remoteRepoName) + pushDigestRegex = regexp.MustCompile("[\\S]+: digest: ([\\S]+) size: [0-9]+") + digestRegex = regexp.MustCompile("Digest: ([\\S]+)") +) + +func setupImage(c *check.C) (digest.Digest, error) { + return setupImageWithTag(c, "latest") +} + +func setupImageWithTag(c *check.C, tag string) (digest.Digest, error) { + containerName := "busyboxbydigest" + + // new file is committed because this layer is used for detecting malicious + // changes. if this was committed as empty layer it would be skipped on pull + // and malicious changes would never be detected. + cli.DockerCmd(c, "run", "-e", "digest=1", "--name", containerName, "busybox", "touch", "anewfile") + + // tag the image to upload it to the private registry + repoAndTag := repoName + ":" + tag + cli.DockerCmd(c, "commit", containerName, repoAndTag) + + // delete the container as we don't need it any more + cli.DockerCmd(c, "rm", "-fv", containerName) + + // push the image + out := cli.DockerCmd(c, "push", repoAndTag).Combined() + + // delete our local repo that we previously tagged + cli.DockerCmd(c, "rmi", repoAndTag) + + matches := pushDigestRegex.FindStringSubmatch(out) + c.Assert(matches, checker.HasLen, 2, check.Commentf("unable to parse digest from push output: %s", out)) + pushDigest := matches[1] + + return digest.Digest(pushDigest), nil +} + +func testPullByTagDisplaysDigest(c *check.C) { + testRequires(c, DaemonIsLinux) + pushDigest, err := setupImage(c) + c.Assert(err, checker.IsNil, check.Commentf("error setting up image")) + + // pull from the registry using the tag + out, _ := dockerCmd(c, "pull", repoName) + + // the pull output includes "Digest: ", so find that + matches := digestRegex.FindStringSubmatch(out) + c.Assert(matches, checker.HasLen, 2, check.Commentf("unable to parse digest from pull output: %s", out)) + pullDigest := matches[1] + + // make sure the pushed and pull digests match + c.Assert(pushDigest.String(), checker.Equals, pullDigest) +} + +func (s *DockerRegistrySuite) TestPullByTagDisplaysDigest(c *check.C) { + testPullByTagDisplaysDigest(c) +} + +func (s *DockerSchema1RegistrySuite) TestPullByTagDisplaysDigest(c *check.C) { + testPullByTagDisplaysDigest(c) +} + +func testPullByDigest(c *check.C) { + testRequires(c, DaemonIsLinux) + pushDigest, err := setupImage(c) + c.Assert(err, checker.IsNil, check.Commentf("error setting up image")) + + // pull from the registry using the @ reference + imageReference := fmt.Sprintf("%s@%s", repoName, pushDigest) + out, _ := dockerCmd(c, "pull", imageReference) + + // the pull output includes "Digest: ", so find that + matches := digestRegex.FindStringSubmatch(out) + c.Assert(matches, checker.HasLen, 2, check.Commentf("unable to parse digest from pull output: %s", out)) + pullDigest := matches[1] + + // make sure the pushed and pull digests match + c.Assert(pushDigest.String(), checker.Equals, pullDigest) +} + +func (s *DockerRegistrySuite) TestPullByDigest(c *check.C) { + testPullByDigest(c) +} + +func (s *DockerSchema1RegistrySuite) TestPullByDigest(c *check.C) { + testPullByDigest(c) +} + +func testPullByDigestNoFallback(c *check.C) { + testRequires(c, DaemonIsLinux) + // pull from the registry using the @ reference + imageReference := fmt.Sprintf("%s@sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", repoName) + out, _, err := dockerCmdWithError("pull", imageReference) + c.Assert(err, checker.NotNil, check.Commentf("expected non-zero exit status and correct error message when pulling non-existing image")) + c.Assert(out, checker.Contains, fmt.Sprintf("manifest for %s not found", imageReference), check.Commentf("expected non-zero exit status and correct error message when pulling non-existing image")) +} + +func (s *DockerRegistrySuite) TestPullByDigestNoFallback(c *check.C) { + testPullByDigestNoFallback(c) +} + +func (s *DockerSchema1RegistrySuite) TestPullByDigestNoFallback(c *check.C) { + testPullByDigestNoFallback(c) +} + +func (s *DockerRegistrySuite) TestCreateByDigest(c *check.C) { + pushDigest, err := setupImage(c) + c.Assert(err, checker.IsNil, check.Commentf("error setting up image")) + + imageReference := fmt.Sprintf("%s@%s", repoName, pushDigest) + + containerName := "createByDigest" + dockerCmd(c, "create", "--name", containerName, imageReference) + + res := inspectField(c, containerName, "Config.Image") + c.Assert(res, checker.Equals, imageReference) +} + +func (s *DockerRegistrySuite) TestRunByDigest(c *check.C) { + pushDigest, err := setupImage(c) + c.Assert(err, checker.IsNil) + + imageReference := fmt.Sprintf("%s@%s", repoName, pushDigest) + + containerName := "runByDigest" + out, _ := dockerCmd(c, "run", "--name", containerName, imageReference, "sh", "-c", "echo found=$digest") + + foundRegex := regexp.MustCompile("found=([^\n]+)") + matches := foundRegex.FindStringSubmatch(out) + c.Assert(matches, checker.HasLen, 2, check.Commentf("unable to parse digest from pull output: %s", out)) + c.Assert(matches[1], checker.Equals, "1", check.Commentf("Expected %q, got %q", "1", matches[1])) + + res := inspectField(c, containerName, "Config.Image") + c.Assert(res, checker.Equals, imageReference) +} + +func (s *DockerRegistrySuite) TestRemoveImageByDigest(c *check.C) { + digest, err := setupImage(c) + c.Assert(err, checker.IsNil, check.Commentf("error setting up image")) + + imageReference := fmt.Sprintf("%s@%s", repoName, digest) + + // pull from the registry using the @ reference + dockerCmd(c, "pull", imageReference) + + // make sure inspect runs ok + inspectField(c, imageReference, "Id") + + // do the delete + err = deleteImages(imageReference) + c.Assert(err, checker.IsNil, check.Commentf("unexpected error deleting image")) + + // try to inspect again - it should error this time + _, err = inspectFieldWithError(imageReference, "Id") + //unexpected nil err trying to inspect what should be a non-existent image + c.Assert(err, checker.NotNil) + c.Assert(err.Error(), checker.Contains, "No such object") +} + +func (s *DockerRegistrySuite) TestBuildByDigest(c *check.C) { + digest, err := setupImage(c) + c.Assert(err, checker.IsNil, check.Commentf("error setting up image")) + + imageReference := fmt.Sprintf("%s@%s", repoName, digest) + + // pull from the registry using the @ reference + dockerCmd(c, "pull", imageReference) + + // get the image id + imageID := inspectField(c, imageReference, "Id") + + // do the build + name := "buildbydigest" + buildImageSuccessfully(c, name, build.WithDockerfile(fmt.Sprintf( + `FROM %s + CMD ["/bin/echo", "Hello World"]`, imageReference))) + c.Assert(err, checker.IsNil) + + // get the build's image id + res := inspectField(c, name, "Config.Image") + // make sure they match + c.Assert(res, checker.Equals, imageID) +} + +func (s *DockerRegistrySuite) TestTagByDigest(c *check.C) { + digest, err := setupImage(c) + c.Assert(err, checker.IsNil, check.Commentf("error setting up image")) + + imageReference := fmt.Sprintf("%s@%s", repoName, digest) + + // pull from the registry using the @ reference + dockerCmd(c, "pull", imageReference) + + // tag it + tag := "tagbydigest" + dockerCmd(c, "tag", imageReference, tag) + + expectedID := inspectField(c, imageReference, "Id") + + tagID := inspectField(c, tag, "Id") + c.Assert(tagID, checker.Equals, expectedID) +} + +func (s *DockerRegistrySuite) TestListImagesWithoutDigests(c *check.C) { + digest, err := setupImage(c) + c.Assert(err, checker.IsNil, check.Commentf("error setting up image")) + + imageReference := fmt.Sprintf("%s@%s", repoName, digest) + + // pull from the registry using the @ reference + dockerCmd(c, "pull", imageReference) + + out, _ := dockerCmd(c, "images") + c.Assert(out, checker.Not(checker.Contains), "DIGEST", check.Commentf("list output should not have contained DIGEST header")) +} + +func (s *DockerRegistrySuite) TestListImagesWithDigests(c *check.C) { + + // setup image1 + digest1, err := setupImageWithTag(c, "tag1") + c.Assert(err, checker.IsNil, check.Commentf("error setting up image")) + imageReference1 := fmt.Sprintf("%s@%s", repoName, digest1) + c.Logf("imageReference1 = %s", imageReference1) + + // pull image1 by digest + dockerCmd(c, "pull", imageReference1) + + // list images + out, _ := dockerCmd(c, "images", "--digests") + + // make sure repo shown, tag=, digest = $digest1 + re1 := regexp.MustCompile(`\s*` + repoName + `\s*\s*` + digest1.String() + `\s`) + c.Assert(re1.MatchString(out), checker.True, check.Commentf("expected %q: %s", re1.String(), out)) + // setup image2 + digest2, err := setupImageWithTag(c, "tag2") + //error setting up image + c.Assert(err, checker.IsNil) + imageReference2 := fmt.Sprintf("%s@%s", repoName, digest2) + c.Logf("imageReference2 = %s", imageReference2) + + // pull image1 by digest + dockerCmd(c, "pull", imageReference1) + + // pull image2 by digest + dockerCmd(c, "pull", imageReference2) + + // list images + out, _ = dockerCmd(c, "images", "--digests") + + // make sure repo shown, tag=, digest = $digest1 + c.Assert(re1.MatchString(out), checker.True, check.Commentf("expected %q: %s", re1.String(), out)) + + // make sure repo shown, tag=, digest = $digest2 + re2 := regexp.MustCompile(`\s*` + repoName + `\s*\s*` + digest2.String() + `\s`) + c.Assert(re2.MatchString(out), checker.True, check.Commentf("expected %q: %s", re2.String(), out)) + + // pull tag1 + dockerCmd(c, "pull", repoName+":tag1") + + // list images + out, _ = dockerCmd(c, "images", "--digests") + + // make sure image 1 has repo, tag, AND repo, , digest + reWithDigest1 := regexp.MustCompile(`\s*` + repoName + `\s*tag1\s*` + digest1.String() + `\s`) + c.Assert(reWithDigest1.MatchString(out), checker.True, check.Commentf("expected %q: %s", reWithDigest1.String(), out)) + // make sure image 2 has repo, , digest + c.Assert(re2.MatchString(out), checker.True, check.Commentf("expected %q: %s", re2.String(), out)) + + // pull tag 2 + dockerCmd(c, "pull", repoName+":tag2") + + // list images + out, _ = dockerCmd(c, "images", "--digests") + + // make sure image 1 has repo, tag, digest + c.Assert(reWithDigest1.MatchString(out), checker.True, check.Commentf("expected %q: %s", reWithDigest1.String(), out)) + + // make sure image 2 has repo, tag, digest + reWithDigest2 := regexp.MustCompile(`\s*` + repoName + `\s*tag2\s*` + digest2.String() + `\s`) + c.Assert(reWithDigest2.MatchString(out), checker.True, check.Commentf("expected %q: %s", reWithDigest2.String(), out)) + + // list images + out, _ = dockerCmd(c, "images", "--digests") + + // make sure image 1 has repo, tag, digest + c.Assert(reWithDigest1.MatchString(out), checker.True, check.Commentf("expected %q: %s", reWithDigest1.String(), out)) + // make sure image 2 has repo, tag, digest + c.Assert(reWithDigest2.MatchString(out), checker.True, check.Commentf("expected %q: %s", reWithDigest2.String(), out)) + // make sure busybox has tag, but not digest + busyboxRe := regexp.MustCompile(`\s*busybox\s*latest\s*\s`) + c.Assert(busyboxRe.MatchString(out), checker.True, check.Commentf("expected %q: %s", busyboxRe.String(), out)) +} + +func (s *DockerRegistrySuite) TestListDanglingImagesWithDigests(c *check.C) { + // setup image1 + digest1, err := setupImageWithTag(c, "dangle1") + c.Assert(err, checker.IsNil, check.Commentf("error setting up image")) + imageReference1 := fmt.Sprintf("%s@%s", repoName, digest1) + c.Logf("imageReference1 = %s", imageReference1) + + // pull image1 by digest + dockerCmd(c, "pull", imageReference1) + + // list images + out, _ := dockerCmd(c, "images", "--digests") + + // make sure repo shown, tag=, digest = $digest1 + re1 := regexp.MustCompile(`\s*` + repoName + `\s*\s*` + digest1.String() + `\s`) + c.Assert(re1.MatchString(out), checker.True, check.Commentf("expected %q: %s", re1.String(), out)) + // setup image2 + digest2, err := setupImageWithTag(c, "dangle2") + //error setting up image + c.Assert(err, checker.IsNil) + imageReference2 := fmt.Sprintf("%s@%s", repoName, digest2) + c.Logf("imageReference2 = %s", imageReference2) + + // pull image1 by digest + dockerCmd(c, "pull", imageReference1) + + // pull image2 by digest + dockerCmd(c, "pull", imageReference2) + + // list images + out, _ = dockerCmd(c, "images", "--digests", "--filter=dangling=true") + + // make sure repo shown, tag=, digest = $digest1 + c.Assert(re1.MatchString(out), checker.True, check.Commentf("expected %q: %s", re1.String(), out)) + + // make sure repo shown, tag=, digest = $digest2 + re2 := regexp.MustCompile(`\s*` + repoName + `\s*\s*` + digest2.String() + `\s`) + c.Assert(re2.MatchString(out), checker.True, check.Commentf("expected %q: %s", re2.String(), out)) + + // pull dangle1 tag + dockerCmd(c, "pull", repoName+":dangle1") + + // list images + out, _ = dockerCmd(c, "images", "--digests", "--filter=dangling=true") + + // make sure image 1 has repo, tag, AND repo, , digest + reWithDigest1 := regexp.MustCompile(`\s*` + repoName + `\s*dangle1\s*` + digest1.String() + `\s`) + c.Assert(reWithDigest1.MatchString(out), checker.False, check.Commentf("unexpected %q: %s", reWithDigest1.String(), out)) + // make sure image 2 has repo, , digest + c.Assert(re2.MatchString(out), checker.True, check.Commentf("expected %q: %s", re2.String(), out)) + + // pull dangle2 tag + dockerCmd(c, "pull", repoName+":dangle2") + + // list images, show tagged images + out, _ = dockerCmd(c, "images", "--digests") + + // make sure image 1 has repo, tag, digest + c.Assert(reWithDigest1.MatchString(out), checker.True, check.Commentf("expected %q: %s", reWithDigest1.String(), out)) + + // make sure image 2 has repo, tag, digest + reWithDigest2 := regexp.MustCompile(`\s*` + repoName + `\s*dangle2\s*` + digest2.String() + `\s`) + c.Assert(reWithDigest2.MatchString(out), checker.True, check.Commentf("expected %q: %s", reWithDigest2.String(), out)) + + // list images, no longer dangling, should not match + out, _ = dockerCmd(c, "images", "--digests", "--filter=dangling=true") + + // make sure image 1 has repo, tag, digest + c.Assert(reWithDigest1.MatchString(out), checker.False, check.Commentf("unexpected %q: %s", reWithDigest1.String(), out)) + // make sure image 2 has repo, tag, digest + c.Assert(reWithDigest2.MatchString(out), checker.False, check.Commentf("unexpected %q: %s", reWithDigest2.String(), out)) +} + +func (s *DockerRegistrySuite) TestInspectImageWithDigests(c *check.C) { + digest, err := setupImage(c) + c.Assert(err, check.IsNil, check.Commentf("error setting up image")) + + imageReference := fmt.Sprintf("%s@%s", repoName, digest) + + // pull from the registry using the @ reference + dockerCmd(c, "pull", imageReference) + + out, _ := dockerCmd(c, "inspect", imageReference) + + var imageJSON []types.ImageInspect + err = json.Unmarshal([]byte(out), &imageJSON) + c.Assert(err, checker.IsNil) + c.Assert(imageJSON, checker.HasLen, 1) + c.Assert(imageJSON[0].RepoDigests, checker.HasLen, 1) + c.Assert(stringutils.InSlice(imageJSON[0].RepoDigests, imageReference), checker.Equals, true) +} + +func (s *DockerRegistrySuite) TestPsListContainersFilterAncestorImageByDigest(c *check.C) { + digest, err := setupImage(c) + c.Assert(err, checker.IsNil, check.Commentf("error setting up image")) + + imageReference := fmt.Sprintf("%s@%s", repoName, digest) + + // pull from the registry using the @ reference + dockerCmd(c, "pull", imageReference) + + // build an image from it + imageName1 := "images_ps_filter_test" + buildImageSuccessfully(c, imageName1, build.WithDockerfile(fmt.Sprintf( + `FROM %s + LABEL match me 1`, imageReference))) + + // run a container based on that + dockerCmd(c, "run", "--name=test1", imageReference, "echo", "hello") + expectedID := getIDByName(c, "test1") + + // run a container based on the a descendant of that too + dockerCmd(c, "run", "--name=test2", imageName1, "echo", "hello") + expectedID1 := getIDByName(c, "test2") + + expectedIDs := []string{expectedID, expectedID1} + + // Invalid imageReference + out, _ := dockerCmd(c, "ps", "-a", "-q", "--no-trunc", fmt.Sprintf("--filter=ancestor=busybox@%s", digest)) + // Filter container for ancestor filter should be empty + c.Assert(strings.TrimSpace(out), checker.Equals, "") + + // Valid imageReference + out, _ = dockerCmd(c, "ps", "-a", "-q", "--no-trunc", "--filter=ancestor="+imageReference) + checkPsAncestorFilterOutput(c, out, imageReference, expectedIDs) +} + +func (s *DockerRegistrySuite) TestDeleteImageByIDOnlyPulledByDigest(c *check.C) { + pushDigest, err := setupImage(c) + c.Assert(err, checker.IsNil, check.Commentf("error setting up image")) + + // pull from the registry using the @ reference + imageReference := fmt.Sprintf("%s@%s", repoName, pushDigest) + dockerCmd(c, "pull", imageReference) + // just in case... + + dockerCmd(c, "tag", imageReference, repoName+":sometag") + + imageID := inspectField(c, imageReference, "Id") + + dockerCmd(c, "rmi", imageID) + + _, err = inspectFieldWithError(imageID, "Id") + c.Assert(err, checker.NotNil, check.Commentf("image should have been deleted")) +} + +func (s *DockerRegistrySuite) TestDeleteImageWithDigestAndTag(c *check.C) { + pushDigest, err := setupImage(c) + c.Assert(err, checker.IsNil, check.Commentf("error setting up image")) + + // pull from the registry using the @ reference + imageReference := fmt.Sprintf("%s@%s", repoName, pushDigest) + dockerCmd(c, "pull", imageReference) + + imageID := inspectField(c, imageReference, "Id") + + repoTag := repoName + ":sometag" + repoTag2 := repoName + ":othertag" + dockerCmd(c, "tag", imageReference, repoTag) + dockerCmd(c, "tag", imageReference, repoTag2) + + dockerCmd(c, "rmi", repoTag2) + + // rmi should have deleted only repoTag2, because there's another tag + inspectField(c, repoTag, "Id") + + dockerCmd(c, "rmi", repoTag) + + // rmi should have deleted the tag, the digest reference, and the image itself + _, err = inspectFieldWithError(imageID, "Id") + c.Assert(err, checker.NotNil, check.Commentf("image should have been deleted")) +} + +func (s *DockerRegistrySuite) TestDeleteImageWithDigestAndMultiRepoTag(c *check.C) { + pushDigest, err := setupImage(c) + c.Assert(err, checker.IsNil, check.Commentf("error setting up image")) + + repo2 := fmt.Sprintf("%s/%s", repoName, "repo2") + + // pull from the registry using the @ reference + imageReference := fmt.Sprintf("%s@%s", repoName, pushDigest) + dockerCmd(c, "pull", imageReference) + + imageID := inspectField(c, imageReference, "Id") + + repoTag := repoName + ":sometag" + repoTag2 := repo2 + ":othertag" + dockerCmd(c, "tag", imageReference, repoTag) + dockerCmd(c, "tag", imageReference, repoTag2) + + dockerCmd(c, "rmi", repoTag) + + // rmi should have deleted repoTag and image reference, but left repoTag2 + inspectField(c, repoTag2, "Id") + _, err = inspectFieldWithError(imageReference, "Id") + c.Assert(err, checker.NotNil, check.Commentf("image digest reference should have been removed")) + + _, err = inspectFieldWithError(repoTag, "Id") + c.Assert(err, checker.NotNil, check.Commentf("image tag reference should have been removed")) + + dockerCmd(c, "rmi", repoTag2) + + // rmi should have deleted the tag, the digest reference, and the image itself + _, err = inspectFieldWithError(imageID, "Id") + c.Assert(err, checker.NotNil, check.Commentf("image should have been deleted")) +} + +// TestPullFailsWithAlteredManifest tests that a `docker pull` fails when +// we have modified a manifest blob and its digest cannot be verified. +// This is the schema2 version of the test. +func (s *DockerRegistrySuite) TestPullFailsWithAlteredManifest(c *check.C) { + testRequires(c, DaemonIsLinux) + manifestDigest, err := setupImage(c) + c.Assert(err, checker.IsNil, check.Commentf("error setting up image")) + + // Load the target manifest blob. + manifestBlob := s.reg.ReadBlobContents(c, manifestDigest) + + var imgManifest schema2.Manifest + err = json.Unmarshal(manifestBlob, &imgManifest) + c.Assert(err, checker.IsNil, check.Commentf("unable to decode image manifest from blob")) + + // Change a layer in the manifest. + imgManifest.Layers[0].Digest = digest.Digest("sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef") + + // Move the existing data file aside, so that we can replace it with a + // malicious blob of data. NOTE: we defer the returned undo func. + undo := s.reg.TempMoveBlobData(c, manifestDigest) + defer undo() + + alteredManifestBlob, err := json.MarshalIndent(imgManifest, "", " ") + c.Assert(err, checker.IsNil, check.Commentf("unable to encode altered image manifest to JSON")) + + s.reg.WriteBlobContents(c, manifestDigest, alteredManifestBlob) + + // Now try pulling that image by digest. We should get an error about + // digest verification for the manifest digest. + + // Pull from the registry using the @ reference. + imageReference := fmt.Sprintf("%s@%s", repoName, manifestDigest) + out, exitStatus, _ := dockerCmdWithError("pull", imageReference) + c.Assert(exitStatus, checker.Not(check.Equals), 0) + + expectedErrorMsg := fmt.Sprintf("manifest verification failed for digest %s", manifestDigest) + c.Assert(out, checker.Contains, expectedErrorMsg) +} + +// TestPullFailsWithAlteredManifest tests that a `docker pull` fails when +// we have modified a manifest blob and its digest cannot be verified. +// This is the schema1 version of the test. +func (s *DockerSchema1RegistrySuite) TestPullFailsWithAlteredManifest(c *check.C) { + testRequires(c, DaemonIsLinux) + manifestDigest, err := setupImage(c) + c.Assert(err, checker.IsNil, check.Commentf("error setting up image")) + + // Load the target manifest blob. + manifestBlob := s.reg.ReadBlobContents(c, manifestDigest) + + var imgManifest schema1.Manifest + err = json.Unmarshal(manifestBlob, &imgManifest) + c.Assert(err, checker.IsNil, check.Commentf("unable to decode image manifest from blob")) + + // Change a layer in the manifest. + imgManifest.FSLayers[0] = schema1.FSLayer{ + BlobSum: digest.Digest("sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef"), + } + + // Move the existing data file aside, so that we can replace it with a + // malicious blob of data. NOTE: we defer the returned undo func. + undo := s.reg.TempMoveBlobData(c, manifestDigest) + defer undo() + + alteredManifestBlob, err := json.MarshalIndent(imgManifest, "", " ") + c.Assert(err, checker.IsNil, check.Commentf("unable to encode altered image manifest to JSON")) + + s.reg.WriteBlobContents(c, manifestDigest, alteredManifestBlob) + + // Now try pulling that image by digest. We should get an error about + // digest verification for the manifest digest. + + // Pull from the registry using the @ reference. + imageReference := fmt.Sprintf("%s@%s", repoName, manifestDigest) + out, exitStatus, _ := dockerCmdWithError("pull", imageReference) + c.Assert(exitStatus, checker.Not(check.Equals), 0) + + expectedErrorMsg := fmt.Sprintf("image verification failed for digest %s", manifestDigest) + c.Assert(out, checker.Contains, expectedErrorMsg) +} + +// TestPullFailsWithAlteredLayer tests that a `docker pull` fails when +// we have modified a layer blob and its digest cannot be verified. +// This is the schema2 version of the test. +func (s *DockerRegistrySuite) TestPullFailsWithAlteredLayer(c *check.C) { + testRequires(c, DaemonIsLinux) + manifestDigest, err := setupImage(c) + c.Assert(err, checker.IsNil) + + // Load the target manifest blob. + manifestBlob := s.reg.ReadBlobContents(c, manifestDigest) + + var imgManifest schema2.Manifest + err = json.Unmarshal(manifestBlob, &imgManifest) + c.Assert(err, checker.IsNil) + + // Next, get the digest of one of the layers from the manifest. + targetLayerDigest := imgManifest.Layers[0].Digest + + // Move the existing data file aside, so that we can replace it with a + // malicious blob of data. NOTE: we defer the returned undo func. + undo := s.reg.TempMoveBlobData(c, targetLayerDigest) + defer undo() + + // Now make a fake data blob in this directory. + s.reg.WriteBlobContents(c, targetLayerDigest, []byte("This is not the data you are looking for.")) + + // Now try pulling that image by digest. We should get an error about + // digest verification for the target layer digest. + + // Remove distribution cache to force a re-pull of the blobs + if err := os.RemoveAll(filepath.Join(testEnv.DockerBasePath(), "image", s.d.StorageDriver(), "distribution")); err != nil { + c.Fatalf("error clearing distribution cache: %v", err) + } + + // Pull from the registry using the @ reference. + imageReference := fmt.Sprintf("%s@%s", repoName, manifestDigest) + out, exitStatus, _ := dockerCmdWithError("pull", imageReference) + c.Assert(exitStatus, checker.Not(check.Equals), 0, check.Commentf("expected a non-zero exit status")) + + expectedErrorMsg := fmt.Sprintf("filesystem layer verification failed for digest %s", targetLayerDigest) + c.Assert(out, checker.Contains, expectedErrorMsg, check.Commentf("expected error message in output: %s", out)) +} + +// TestPullFailsWithAlteredLayer tests that a `docker pull` fails when +// we have modified a layer blob and its digest cannot be verified. +// This is the schema1 version of the test. +func (s *DockerSchema1RegistrySuite) TestPullFailsWithAlteredLayer(c *check.C) { + testRequires(c, DaemonIsLinux) + manifestDigest, err := setupImage(c) + c.Assert(err, checker.IsNil) + + // Load the target manifest blob. + manifestBlob := s.reg.ReadBlobContents(c, manifestDigest) + + var imgManifest schema1.Manifest + err = json.Unmarshal(manifestBlob, &imgManifest) + c.Assert(err, checker.IsNil) + + // Next, get the digest of one of the layers from the manifest. + targetLayerDigest := imgManifest.FSLayers[0].BlobSum + + // Move the existing data file aside, so that we can replace it with a + // malicious blob of data. NOTE: we defer the returned undo func. + undo := s.reg.TempMoveBlobData(c, targetLayerDigest) + defer undo() + + // Now make a fake data blob in this directory. + s.reg.WriteBlobContents(c, targetLayerDigest, []byte("This is not the data you are looking for.")) + + // Now try pulling that image by digest. We should get an error about + // digest verification for the target layer digest. + + // Remove distribution cache to force a re-pull of the blobs + if err := os.RemoveAll(filepath.Join(testEnv.DockerBasePath(), "image", s.d.StorageDriver(), "distribution")); err != nil { + c.Fatalf("error clearing distribution cache: %v", err) + } + + // Pull from the registry using the @ reference. + imageReference := fmt.Sprintf("%s@%s", repoName, manifestDigest) + out, exitStatus, _ := dockerCmdWithError("pull", imageReference) + c.Assert(exitStatus, checker.Not(check.Equals), 0, check.Commentf("expected a non-zero exit status")) + + expectedErrorMsg := fmt.Sprintf("filesystem layer verification failed for digest %s", targetLayerDigest) + c.Assert(out, checker.Contains, expectedErrorMsg, check.Commentf("expected error message in output: %s", out)) +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_commit_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_commit_test.go new file mode 100644 index 000000000..b054c79c3 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_commit_test.go @@ -0,0 +1,157 @@ +package main + +import ( + "strings" + + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/cli" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestCommitAfterContainerIsDone(c *check.C) { + out := cli.DockerCmd(c, "run", "-i", "-a", "stdin", "busybox", "echo", "foo").Combined() + + cleanedContainerID := strings.TrimSpace(out) + + cli.DockerCmd(c, "wait", cleanedContainerID) + + out = cli.DockerCmd(c, "commit", cleanedContainerID).Combined() + + cleanedImageID := strings.TrimSpace(out) + + cli.DockerCmd(c, "inspect", cleanedImageID) +} + +func (s *DockerSuite) TestCommitWithoutPause(c *check.C) { + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "-i", "-a", "stdin", "busybox", "echo", "foo") + + cleanedContainerID := strings.TrimSpace(out) + + dockerCmd(c, "wait", cleanedContainerID) + + out, _ = dockerCmd(c, "commit", "-p=false", cleanedContainerID) + + cleanedImageID := strings.TrimSpace(out) + + dockerCmd(c, "inspect", cleanedImageID) +} + +//test commit a paused container should not unpause it after commit +func (s *DockerSuite) TestCommitPausedContainer(c *check.C) { + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "-i", "-d", "busybox") + + cleanedContainerID := strings.TrimSpace(out) + + dockerCmd(c, "pause", cleanedContainerID) + + out, _ = dockerCmd(c, "commit", cleanedContainerID) + + out = inspectField(c, cleanedContainerID, "State.Paused") + // commit should not unpause a paused container + c.Assert(out, checker.Contains, "true") +} + +func (s *DockerSuite) TestCommitNewFile(c *check.C) { + dockerCmd(c, "run", "--name", "committer", "busybox", "/bin/sh", "-c", "echo koye > /foo") + + imageID, _ := dockerCmd(c, "commit", "committer") + imageID = strings.TrimSpace(imageID) + + out, _ := dockerCmd(c, "run", imageID, "cat", "/foo") + actual := strings.TrimSpace(out) + c.Assert(actual, checker.Equals, "koye") +} + +func (s *DockerSuite) TestCommitHardlink(c *check.C) { + testRequires(c, DaemonIsLinux) + firstOutput, _ := dockerCmd(c, "run", "-t", "--name", "hardlinks", "busybox", "sh", "-c", "touch file1 && ln file1 file2 && ls -di file1 file2") + + chunks := strings.Split(strings.TrimSpace(firstOutput), " ") + inode := chunks[0] + chunks = strings.SplitAfterN(strings.TrimSpace(firstOutput), " ", 2) + c.Assert(chunks[1], checker.Contains, chunks[0], check.Commentf("Failed to create hardlink in a container. Expected to find %q in %q", inode, chunks[1:])) + + imageID, _ := dockerCmd(c, "commit", "hardlinks", "hardlinks") + imageID = strings.TrimSpace(imageID) + + secondOutput, _ := dockerCmd(c, "run", "-t", imageID, "ls", "-di", "file1", "file2") + + chunks = strings.Split(strings.TrimSpace(secondOutput), " ") + inode = chunks[0] + chunks = strings.SplitAfterN(strings.TrimSpace(secondOutput), " ", 2) + c.Assert(chunks[1], checker.Contains, chunks[0], check.Commentf("Failed to create hardlink in a container. Expected to find %q in %q", inode, chunks[1:])) +} + +func (s *DockerSuite) TestCommitTTY(c *check.C) { + dockerCmd(c, "run", "-t", "--name", "tty", "busybox", "/bin/ls") + + imageID, _ := dockerCmd(c, "commit", "tty", "ttytest") + imageID = strings.TrimSpace(imageID) + + dockerCmd(c, "run", imageID, "/bin/ls") +} + +func (s *DockerSuite) TestCommitWithHostBindMount(c *check.C) { + testRequires(c, DaemonIsLinux) + dockerCmd(c, "run", "--name", "bind-commit", "-v", "/dev/null:/winning", "busybox", "true") + + imageID, _ := dockerCmd(c, "commit", "bind-commit", "bindtest") + imageID = strings.TrimSpace(imageID) + + dockerCmd(c, "run", imageID, "true") +} + +func (s *DockerSuite) TestCommitChange(c *check.C) { + dockerCmd(c, "run", "--name", "test", "busybox", "true") + + imageID, _ := dockerCmd(c, "commit", + "--change", "EXPOSE 8080", + "--change", "ENV DEBUG true", + "--change", "ENV test 1", + "--change", "ENV PATH /foo", + "--change", "LABEL foo bar", + "--change", "CMD [\"/bin/sh\"]", + "--change", "WORKDIR /opt", + "--change", "ENTRYPOINT [\"/bin/sh\"]", + "--change", "USER testuser", + "--change", "VOLUME /var/lib/docker", + "--change", "ONBUILD /usr/local/bin/python-build --dir /app/src", + "test", "test-commit") + imageID = strings.TrimSpace(imageID) + + prefix, slash := getPrefixAndSlashFromDaemonPlatform() + prefix = strings.ToUpper(prefix) // Force C: as that's how WORKDIR is normalised on Windows + expected := map[string]string{ + "Config.ExposedPorts": "map[8080/tcp:{}]", + "Config.Env": "[DEBUG=true test=1 PATH=/foo]", + "Config.Labels": "map[foo:bar]", + "Config.Cmd": "[/bin/sh]", + "Config.WorkingDir": prefix + slash + "opt", + "Config.Entrypoint": "[/bin/sh]", + "Config.User": "testuser", + "Config.Volumes": "map[/var/lib/docker:{}]", + "Config.OnBuild": "[/usr/local/bin/python-build --dir /app/src]", + } + + for conf, value := range expected { + res := inspectField(c, imageID, conf) + if res != value { + c.Errorf("%s('%s'), expected %s", conf, res, value) + } + } +} + +func (s *DockerSuite) TestCommitChangeLabels(c *check.C) { + dockerCmd(c, "run", "--name", "test", "--label", "some=label", "busybox", "true") + + imageID, _ := dockerCmd(c, "commit", + "--change", "LABEL some=label2", + "test", "test-commit") + imageID = strings.TrimSpace(imageID) + + c.Assert(inspectField(c, imageID, "Config.Labels"), checker.Equals, "map[some:label2]") + // check that container labels didn't change + c.Assert(inspectField(c, "test", "Config.Labels"), checker.Equals, "map[some:label]") +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_config_create_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_config_create_test.go new file mode 100644 index 000000000..b82325487 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_config_create_test.go @@ -0,0 +1,131 @@ +// +build !windows + +package main + +import ( + "io/ioutil" + "os" + "strings" + + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/integration-cli/checker" + "github.com/go-check/check" +) + +func (s *DockerSwarmSuite) TestConfigCreate(c *check.C) { + d := s.AddDaemon(c, true, true) + + testName := "test_config" + id := d.CreateConfig(c, swarm.ConfigSpec{ + Annotations: swarm.Annotations{ + Name: testName, + }, + Data: []byte("TESTINGDATA"), + }) + c.Assert(id, checker.Not(checker.Equals), "", check.Commentf("configs: %s", id)) + + config := d.GetConfig(c, id) + c.Assert(config.Spec.Name, checker.Equals, testName) +} + +func (s *DockerSwarmSuite) TestConfigCreateWithLabels(c *check.C) { + d := s.AddDaemon(c, true, true) + + testName := "test_config" + id := d.CreateConfig(c, swarm.ConfigSpec{ + Annotations: swarm.Annotations{ + Name: testName, + Labels: map[string]string{ + "key1": "value1", + "key2": "value2", + }, + }, + Data: []byte("TESTINGDATA"), + }) + c.Assert(id, checker.Not(checker.Equals), "", check.Commentf("configs: %s", id)) + + config := d.GetConfig(c, id) + c.Assert(config.Spec.Name, checker.Equals, testName) + c.Assert(len(config.Spec.Labels), checker.Equals, 2) + c.Assert(config.Spec.Labels["key1"], checker.Equals, "value1") + c.Assert(config.Spec.Labels["key2"], checker.Equals, "value2") +} + +// Test case for 28884 +func (s *DockerSwarmSuite) TestConfigCreateResolve(c *check.C) { + d := s.AddDaemon(c, true, true) + + name := "test_config" + id := d.CreateConfig(c, swarm.ConfigSpec{ + Annotations: swarm.Annotations{ + Name: name, + }, + Data: []byte("foo"), + }) + c.Assert(id, checker.Not(checker.Equals), "", check.Commentf("configs: %s", id)) + + fake := d.CreateConfig(c, swarm.ConfigSpec{ + Annotations: swarm.Annotations{ + Name: id, + }, + Data: []byte("fake foo"), + }) + c.Assert(fake, checker.Not(checker.Equals), "", check.Commentf("configs: %s", fake)) + + out, err := d.Cmd("config", "ls") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, name) + c.Assert(out, checker.Contains, fake) + + out, err = d.Cmd("config", "rm", id) + c.Assert(out, checker.Contains, id) + + // Fake one will remain + out, err = d.Cmd("config", "ls") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Not(checker.Contains), name) + c.Assert(out, checker.Contains, fake) + + // Remove based on name prefix of the fake one + // (which is the same as the ID of foo one) should not work + // as search is only done based on: + // - Full ID + // - Full Name + // - Partial ID (prefix) + out, err = d.Cmd("config", "rm", id[:5]) + c.Assert(out, checker.Not(checker.Contains), id) + out, err = d.Cmd("config", "ls") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Not(checker.Contains), name) + c.Assert(out, checker.Contains, fake) + + // Remove based on ID prefix of the fake one should succeed + out, err = d.Cmd("config", "rm", fake[:5]) + c.Assert(out, checker.Contains, fake[:5]) + out, err = d.Cmd("config", "ls") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Not(checker.Contains), name) + c.Assert(out, checker.Not(checker.Contains), id) + c.Assert(out, checker.Not(checker.Contains), fake) +} + +func (s *DockerSwarmSuite) TestConfigCreateWithFile(c *check.C) { + d := s.AddDaemon(c, true, true) + + testFile, err := ioutil.TempFile("", "configCreateTest") + c.Assert(err, checker.IsNil, check.Commentf("failed to create temporary file")) + defer os.Remove(testFile.Name()) + + testData := "TESTINGDATA" + _, err = testFile.Write([]byte(testData)) + c.Assert(err, checker.IsNil, check.Commentf("failed to write to temporary file")) + + testName := "test_config" + out, err := d.Cmd("config", "create", testName, testFile.Name()) + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "", check.Commentf(out)) + + id := strings.TrimSpace(out) + config := d.GetConfig(c, id) + c.Assert(config.Spec.Name, checker.Equals, testName) +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_config_inspect_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_config_inspect_test.go new file mode 100644 index 000000000..ba4e80f07 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_config_inspect_test.go @@ -0,0 +1,68 @@ +// +build !windows + +package main + +import ( + "encoding/json" + + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/integration-cli/checker" + "github.com/go-check/check" +) + +func (s *DockerSwarmSuite) TestConfigInspect(c *check.C) { + d := s.AddDaemon(c, true, true) + + testName := "test_config" + id := d.CreateConfig(c, swarm.ConfigSpec{ + Annotations: swarm.Annotations{ + Name: testName, + }, + Data: []byte("TESTINGDATA"), + }) + c.Assert(id, checker.Not(checker.Equals), "", check.Commentf("configs: %s", id)) + + config := d.GetConfig(c, id) + c.Assert(config.Spec.Name, checker.Equals, testName) + + out, err := d.Cmd("config", "inspect", testName) + c.Assert(err, checker.IsNil, check.Commentf(out)) + + var configs []swarm.Config + c.Assert(json.Unmarshal([]byte(out), &configs), checker.IsNil) + c.Assert(configs, checker.HasLen, 1) +} + +func (s *DockerSwarmSuite) TestConfigInspectMultiple(c *check.C) { + d := s.AddDaemon(c, true, true) + + testNames := []string{ + "test0", + "test1", + } + for _, n := range testNames { + id := d.CreateConfig(c, swarm.ConfigSpec{ + Annotations: swarm.Annotations{ + Name: n, + }, + Data: []byte("TESTINGDATA"), + }) + c.Assert(id, checker.Not(checker.Equals), "", check.Commentf("configs: %s", id)) + + config := d.GetConfig(c, id) + c.Assert(config.Spec.Name, checker.Equals, n) + + } + + args := []string{ + "config", + "inspect", + } + args = append(args, testNames...) + out, err := d.Cmd(args...) + c.Assert(err, checker.IsNil, check.Commentf(out)) + + var configs []swarm.Config + c.Assert(json.Unmarshal([]byte(out), &configs), checker.IsNil) + c.Assert(configs, checker.HasLen, 2) +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_config_ls_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_config_ls_test.go new file mode 100644 index 000000000..5c0701261 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_config_ls_test.go @@ -0,0 +1,125 @@ +// +build !windows + +package main + +import ( + "strings" + + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/integration-cli/checker" + "github.com/go-check/check" +) + +func (s *DockerSwarmSuite) TestConfigList(c *check.C) { + d := s.AddDaemon(c, true, true) + + testName0 := "test0" + testName1 := "test1" + + // create config test0 + id0 := d.CreateConfig(c, swarm.ConfigSpec{ + Annotations: swarm.Annotations{ + Name: testName0, + Labels: map[string]string{"type": "test"}, + }, + Data: []byte("TESTINGDATA0"), + }) + c.Assert(id0, checker.Not(checker.Equals), "", check.Commentf("configs: %s", id0)) + + config := d.GetConfig(c, id0) + c.Assert(config.Spec.Name, checker.Equals, testName0) + + // create config test1 + id1 := d.CreateConfig(c, swarm.ConfigSpec{ + Annotations: swarm.Annotations{ + Name: testName1, + Labels: map[string]string{"type": "production"}, + }, + Data: []byte("TESTINGDATA1"), + }) + c.Assert(id1, checker.Not(checker.Equals), "", check.Commentf("configs: %s", id1)) + + config = d.GetConfig(c, id1) + c.Assert(config.Spec.Name, checker.Equals, testName1) + + // test by command `docker config ls` + out, err := d.Cmd("config", "ls") + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(strings.TrimSpace(out), checker.Contains, testName0) + c.Assert(strings.TrimSpace(out), checker.Contains, testName1) + + // test filter by name `docker config ls --filter name=xxx` + args := []string{ + "config", + "ls", + "--filter", + "name=test0", + } + out, err = d.Cmd(args...) + c.Assert(err, checker.IsNil, check.Commentf(out)) + + c.Assert(strings.TrimSpace(out), checker.Contains, testName0) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), testName1) + + // test filter by id `docker config ls --filter id=xxx` + args = []string{ + "config", + "ls", + "--filter", + "id=" + id1, + } + out, err = d.Cmd(args...) + c.Assert(err, checker.IsNil, check.Commentf(out)) + + c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), testName0) + c.Assert(strings.TrimSpace(out), checker.Contains, testName1) + + // test filter by label `docker config ls --filter label=xxx` + args = []string{ + "config", + "ls", + "--filter", + "label=type", + } + out, err = d.Cmd(args...) + c.Assert(err, checker.IsNil, check.Commentf(out)) + + c.Assert(strings.TrimSpace(out), checker.Contains, testName0) + c.Assert(strings.TrimSpace(out), checker.Contains, testName1) + + args = []string{ + "config", + "ls", + "--filter", + "label=type=test", + } + out, err = d.Cmd(args...) + c.Assert(err, checker.IsNil, check.Commentf(out)) + + c.Assert(strings.TrimSpace(out), checker.Contains, testName0) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), testName1) + + args = []string{ + "config", + "ls", + "--filter", + "label=type=production", + } + out, err = d.Cmd(args...) + c.Assert(err, checker.IsNil, check.Commentf(out)) + + c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), testName0) + c.Assert(strings.TrimSpace(out), checker.Contains, testName1) + + // test invalid filter `docker config ls --filter noexisttype=xxx` + args = []string{ + "config", + "ls", + "--filter", + "noexisttype=test0", + } + out, err = d.Cmd(args...) + c.Assert(err, checker.NotNil, check.Commentf(out)) + + c.Assert(strings.TrimSpace(out), checker.Contains, "Error response from daemon: Invalid filter 'noexisttype'") +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_config_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_config_test.go new file mode 100644 index 000000000..46fe456bd --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_config_test.go @@ -0,0 +1,150 @@ +package main + +import ( + "io/ioutil" + "net/http" + "net/http/httptest" + "os" + "path/filepath" + "runtime" + + "github.com/docker/docker/api" + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/pkg/homedir" + icmd "github.com/docker/docker/pkg/testutil/cmd" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestConfigHTTPHeader(c *check.C) { + testRequires(c, UnixCli) // Can't set/unset HOME on windows right now + // We either need a level of Go that supports Unsetenv (for cases + // when HOME/USERPROFILE isn't set), or we need to be able to use + // os/user but user.Current() only works if we aren't statically compiling + + var headers map[string][]string + + server := httptest.NewServer(http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("API-Version", api.DefaultVersion) + headers = r.Header + })) + defer server.Close() + + homeKey := homedir.Key() + homeVal := homedir.Get() + tmpDir, err := ioutil.TempDir("", "fake-home") + c.Assert(err, checker.IsNil) + defer os.RemoveAll(tmpDir) + + dotDocker := filepath.Join(tmpDir, ".docker") + os.Mkdir(dotDocker, 0600) + tmpCfg := filepath.Join(dotDocker, "config.json") + + defer func() { os.Setenv(homeKey, homeVal) }() + os.Setenv(homeKey, tmpDir) + + data := `{ + "HttpHeaders": { "MyHeader": "MyValue" } + }` + + err = ioutil.WriteFile(tmpCfg, []byte(data), 0600) + c.Assert(err, checker.IsNil) + + result := icmd.RunCommand(dockerBinary, "-H="+server.URL[7:], "ps") + result.Assert(c, icmd.Expected{ + ExitCode: 1, + Error: "exit status 1", + }) + + c.Assert(headers["User-Agent"], checker.NotNil, check.Commentf("Missing User-Agent")) + + c.Assert(headers["User-Agent"][0], checker.Equals, "Docker-Client/"+os.Getenv("DOCKER_CLI_VERSION")+" ("+runtime.GOOS+")", check.Commentf("Badly formatted User-Agent,out:%v", result.Combined())) + + c.Assert(headers["Myheader"], checker.NotNil) + c.Assert(headers["Myheader"][0], checker.Equals, "MyValue", check.Commentf("Missing/bad header,out:%v", result.Combined())) + +} + +func (s *DockerSuite) TestConfigDir(c *check.C) { + cDir, err := ioutil.TempDir("", "fake-home") + c.Assert(err, checker.IsNil) + defer os.RemoveAll(cDir) + + // First make sure pointing to empty dir doesn't generate an error + dockerCmd(c, "--config", cDir, "ps") + + // Test with env var too + icmd.RunCmd(icmd.Cmd{ + Command: []string{dockerBinary, "ps"}, + Env: appendBaseEnv(true, "DOCKER_CONFIG="+cDir), + }).Assert(c, icmd.Success) + + // Start a server so we can check to see if the config file was + // loaded properly + var headers map[string][]string + + server := httptest.NewServer(http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + headers = r.Header + })) + defer server.Close() + + // Create a dummy config file in our new config dir + data := `{ + "HttpHeaders": { "MyHeader": "MyValue" } + }` + + tmpCfg := filepath.Join(cDir, "config.json") + err = ioutil.WriteFile(tmpCfg, []byte(data), 0600) + c.Assert(err, checker.IsNil, check.Commentf("Err creating file")) + + env := appendBaseEnv(false) + + icmd.RunCmd(icmd.Cmd{ + Command: []string{dockerBinary, "--config", cDir, "-H=" + server.URL[7:], "ps"}, + Env: env, + }).Assert(c, icmd.Expected{ + ExitCode: 1, + Error: "exit status 1", + }) + c.Assert(headers["Myheader"], checker.NotNil) + c.Assert(headers["Myheader"][0], checker.Equals, "MyValue", check.Commentf("ps3 - Missing header")) + + // Reset headers and try again using env var this time + headers = map[string][]string{} + icmd.RunCmd(icmd.Cmd{ + Command: []string{dockerBinary, "--config", cDir, "-H=" + server.URL[7:], "ps"}, + Env: append(env, "DOCKER_CONFIG="+cDir), + }).Assert(c, icmd.Expected{ + ExitCode: 1, + }) + c.Assert(headers["Myheader"], checker.NotNil) + c.Assert(headers["Myheader"][0], checker.Equals, "MyValue", check.Commentf("ps4 - Missing header")) + + // FIXME(vdemeester) should be a unit test + // Reset headers and make sure flag overrides the env var + headers = map[string][]string{} + icmd.RunCmd(icmd.Cmd{ + Command: []string{dockerBinary, "--config", cDir, "-H=" + server.URL[7:], "ps"}, + Env: append(env, "DOCKER_CONFIG=MissingDir"), + }).Assert(c, icmd.Expected{ + ExitCode: 1, + }) + c.Assert(headers["Myheader"], checker.NotNil) + c.Assert(headers["Myheader"][0], checker.Equals, "MyValue", check.Commentf("ps5 - Missing header")) + + // FIXME(vdemeester) should be a unit test + // Reset headers and make sure flag overrides the env var. + // Almost same as previous but make sure the "MissingDir" isn't + // ignore - we don't want to default back to the env var. + headers = map[string][]string{} + icmd.RunCmd(icmd.Cmd{ + Command: []string{dockerBinary, "--config", "MissingDir", "-H=" + server.URL[7:], "ps"}, + Env: append(env, "DOCKER_CONFIG="+cDir), + }).Assert(c, icmd.Expected{ + ExitCode: 1, + Error: "exit status 1", + }) + + c.Assert(headers["Myheader"], checker.IsNil, check.Commentf("ps6 - Headers shouldn't be the expected value")) +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_cp_from_container_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_cp_from_container_test.go new file mode 100644 index 000000000..116f24610 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_cp_from_container_test.go @@ -0,0 +1,488 @@ +package main + +import ( + "os" + "path/filepath" + + "github.com/docker/docker/integration-cli/checker" + "github.com/go-check/check" +) + +// docker cp CONTAINER:PATH LOCALPATH + +// Try all of the test cases from the archive package which implements the +// internals of `docker cp` and ensure that the behavior matches when actually +// copying to and from containers. + +// Basic assumptions about SRC and DST: +// 1. SRC must exist. +// 2. If SRC ends with a trailing separator, it must be a directory. +// 3. DST parent directory must exist. +// 4. If DST exists as a file, it must not end with a trailing separator. + +// First get these easy error cases out of the way. + +// Test for error when SRC does not exist. +func (s *DockerSuite) TestCpFromErrSrcNotExists(c *check.C) { + containerID := makeTestContainer(c, testContainerOptions{}) + + tmpDir := getTestDir(c, "test-cp-from-err-src-not-exists") + defer os.RemoveAll(tmpDir) + + err := runDockerCp(c, containerCpPath(containerID, "file1"), tmpDir, nil) + c.Assert(err, checker.NotNil) + + c.Assert(isCpNotExist(err), checker.True, check.Commentf("expected IsNotExist error, but got %T: %s", err, err)) +} + +// Test for error when SRC ends in a trailing +// path separator but it exists as a file. +func (s *DockerSuite) TestCpFromErrSrcNotDir(c *check.C) { + testRequires(c, DaemonIsLinux) + containerID := makeTestContainer(c, testContainerOptions{addContent: true}) + + tmpDir := getTestDir(c, "test-cp-from-err-src-not-dir") + defer os.RemoveAll(tmpDir) + + err := runDockerCp(c, containerCpPathTrailingSep(containerID, "file1"), tmpDir, nil) + c.Assert(err, checker.NotNil) + + c.Assert(isCpNotDir(err), checker.True, check.Commentf("expected IsNotDir error, but got %T: %s", err, err)) +} + +// Test for error when SRC is a valid file or directory, +// bu the DST parent directory does not exist. +func (s *DockerSuite) TestCpFromErrDstParentNotExists(c *check.C) { + testRequires(c, DaemonIsLinux) + containerID := makeTestContainer(c, testContainerOptions{addContent: true}) + + tmpDir := getTestDir(c, "test-cp-from-err-dst-parent-not-exists") + defer os.RemoveAll(tmpDir) + + makeTestContentInDir(c, tmpDir) + + // Try with a file source. + srcPath := containerCpPath(containerID, "/file1") + dstPath := cpPath(tmpDir, "notExists", "file1") + + err := runDockerCp(c, srcPath, dstPath, nil) + c.Assert(err, checker.NotNil) + + c.Assert(isCpNotExist(err), checker.True, check.Commentf("expected IsNotExist error, but got %T: %s", err, err)) + + // Try with a directory source. + srcPath = containerCpPath(containerID, "/dir1") + + err = runDockerCp(c, srcPath, dstPath, nil) + c.Assert(err, checker.NotNil) + + c.Assert(isCpNotExist(err), checker.True, check.Commentf("expected IsNotExist error, but got %T: %s", err, err)) +} + +// Test for error when DST ends in a trailing +// path separator but exists as a file. +func (s *DockerSuite) TestCpFromErrDstNotDir(c *check.C) { + testRequires(c, DaemonIsLinux) + containerID := makeTestContainer(c, testContainerOptions{addContent: true}) + + tmpDir := getTestDir(c, "test-cp-from-err-dst-not-dir") + defer os.RemoveAll(tmpDir) + + makeTestContentInDir(c, tmpDir) + + // Try with a file source. + srcPath := containerCpPath(containerID, "/file1") + dstPath := cpPathTrailingSep(tmpDir, "file1") + + err := runDockerCp(c, srcPath, dstPath, nil) + c.Assert(err, checker.NotNil) + + c.Assert(isCpNotDir(err), checker.True, check.Commentf("expected IsNotDir error, but got %T: %s", err, err)) + + // Try with a directory source. + srcPath = containerCpPath(containerID, "/dir1") + + err = runDockerCp(c, srcPath, dstPath, nil) + c.Assert(err, checker.NotNil) + + c.Assert(isCpNotDir(err), checker.True, check.Commentf("expected IsNotDir error, but got %T: %s", err, err)) +} + +// Check that copying from a container to a local symlink copies to the symlink +// target and does not overwrite the local symlink itself. +func (s *DockerSuite) TestCpFromSymlinkDestination(c *check.C) { + testRequires(c, DaemonIsLinux) + containerID := makeTestContainer(c, testContainerOptions{addContent: true}) + + tmpDir := getTestDir(c, "test-cp-from-err-dst-not-dir") + defer os.RemoveAll(tmpDir) + + makeTestContentInDir(c, tmpDir) + + // First, copy a file from the container to a symlink to a file. This + // should overwrite the symlink target contents with the source contents. + srcPath := containerCpPath(containerID, "/file2") + dstPath := cpPath(tmpDir, "symlinkToFile1") + + c.Assert(runDockerCp(c, srcPath, dstPath, nil), checker.IsNil) + + // The symlink should not have been modified. + c.Assert(symlinkTargetEquals(c, dstPath, "file1"), checker.IsNil) + + // The file should have the contents of "file2" now. + c.Assert(fileContentEquals(c, cpPath(tmpDir, "file1"), "file2\n"), checker.IsNil) + + // Next, copy a file from the container to a symlink to a directory. This + // should copy the file into the symlink target directory. + dstPath = cpPath(tmpDir, "symlinkToDir1") + + c.Assert(runDockerCp(c, srcPath, dstPath, nil), checker.IsNil) + + // The symlink should not have been modified. + c.Assert(symlinkTargetEquals(c, dstPath, "dir1"), checker.IsNil) + + // The file should have the contents of "file2" now. + c.Assert(fileContentEquals(c, cpPath(tmpDir, "file2"), "file2\n"), checker.IsNil) + + // Next, copy a file from the container to a symlink to a file that does + // not exist (a broken symlink). This should create the target file with + // the contents of the source file. + dstPath = cpPath(tmpDir, "brokenSymlinkToFileX") + + c.Assert(runDockerCp(c, srcPath, dstPath, nil), checker.IsNil) + + // The symlink should not have been modified. + c.Assert(symlinkTargetEquals(c, dstPath, "fileX"), checker.IsNil) + + // The file should have the contents of "file2" now. + c.Assert(fileContentEquals(c, cpPath(tmpDir, "fileX"), "file2\n"), checker.IsNil) + + // Next, copy a directory from the container to a symlink to a local + // directory. This should copy the directory into the symlink target + // directory and not modify the symlink. + srcPath = containerCpPath(containerID, "/dir2") + dstPath = cpPath(tmpDir, "symlinkToDir1") + + c.Assert(runDockerCp(c, srcPath, dstPath, nil), checker.IsNil) + + // The symlink should not have been modified. + c.Assert(symlinkTargetEquals(c, dstPath, "dir1"), checker.IsNil) + + // The directory should now contain a copy of "dir2". + c.Assert(fileContentEquals(c, cpPath(tmpDir, "dir1/dir2/file2-1"), "file2-1\n"), checker.IsNil) + + // Next, copy a directory from the container to a symlink to a local + // directory that does not exist (a broken symlink). This should create + // the target as a directory with the contents of the source directory. It + // should not modify the symlink. + dstPath = cpPath(tmpDir, "brokenSymlinkToDirX") + + c.Assert(runDockerCp(c, srcPath, dstPath, nil), checker.IsNil) + + // The symlink should not have been modified. + c.Assert(symlinkTargetEquals(c, dstPath, "dirX"), checker.IsNil) + + // The "dirX" directory should now be a copy of "dir2". + c.Assert(fileContentEquals(c, cpPath(tmpDir, "dirX/file2-1"), "file2-1\n"), checker.IsNil) +} + +// Possibilities are reduced to the remaining 10 cases: +// +// case | srcIsDir | onlyDirContents | dstExists | dstIsDir | dstTrSep | action +// =================================================================================================== +// A | no | - | no | - | no | create file +// B | no | - | no | - | yes | error +// C | no | - | yes | no | - | overwrite file +// D | no | - | yes | yes | - | create file in dst dir +// E | yes | no | no | - | - | create dir, copy contents +// F | yes | no | yes | no | - | error +// G | yes | no | yes | yes | - | copy dir and contents +// H | yes | yes | no | - | - | create dir, copy contents +// I | yes | yes | yes | no | - | error +// J | yes | yes | yes | yes | - | copy dir contents +// + +// A. SRC specifies a file and DST (no trailing path separator) doesn't +// exist. This should create a file with the name DST and copy the +// contents of the source file into it. +func (s *DockerSuite) TestCpFromCaseA(c *check.C) { + testRequires(c, DaemonIsLinux) + containerID := makeTestContainer(c, testContainerOptions{ + addContent: true, workDir: "/root", + }) + + tmpDir := getTestDir(c, "test-cp-from-case-a") + defer os.RemoveAll(tmpDir) + + srcPath := containerCpPath(containerID, "/root/file1") + dstPath := cpPath(tmpDir, "itWorks.txt") + + c.Assert(runDockerCp(c, srcPath, dstPath, nil), checker.IsNil) + + c.Assert(fileContentEquals(c, dstPath, "file1\n"), checker.IsNil) +} + +// B. SRC specifies a file and DST (with trailing path separator) doesn't +// exist. This should cause an error because the copy operation cannot +// create a directory when copying a single file. +func (s *DockerSuite) TestCpFromCaseB(c *check.C) { + testRequires(c, DaemonIsLinux) + containerID := makeTestContainer(c, testContainerOptions{addContent: true}) + + tmpDir := getTestDir(c, "test-cp-from-case-b") + defer os.RemoveAll(tmpDir) + + srcPath := containerCpPath(containerID, "/file1") + dstDir := cpPathTrailingSep(tmpDir, "testDir") + + err := runDockerCp(c, srcPath, dstDir, nil) + c.Assert(err, checker.NotNil) + + c.Assert(isCpDirNotExist(err), checker.True, check.Commentf("expected DirNotExists error, but got %T: %s", err, err)) +} + +// C. SRC specifies a file and DST exists as a file. This should overwrite +// the file at DST with the contents of the source file. +func (s *DockerSuite) TestCpFromCaseC(c *check.C) { + testRequires(c, DaemonIsLinux) + containerID := makeTestContainer(c, testContainerOptions{ + addContent: true, workDir: "/root", + }) + + tmpDir := getTestDir(c, "test-cp-from-case-c") + defer os.RemoveAll(tmpDir) + + makeTestContentInDir(c, tmpDir) + + srcPath := containerCpPath(containerID, "/root/file1") + dstPath := cpPath(tmpDir, "file2") + + // Ensure the local file starts with different content. + c.Assert(fileContentEquals(c, dstPath, "file2\n"), checker.IsNil) + + c.Assert(runDockerCp(c, srcPath, dstPath, nil), checker.IsNil) + + c.Assert(fileContentEquals(c, dstPath, "file1\n"), checker.IsNil) +} + +// D. SRC specifies a file and DST exists as a directory. This should place +// a copy of the source file inside it using the basename from SRC. Ensure +// this works whether DST has a trailing path separator or not. +func (s *DockerSuite) TestCpFromCaseD(c *check.C) { + testRequires(c, DaemonIsLinux) + containerID := makeTestContainer(c, testContainerOptions{addContent: true}) + + tmpDir := getTestDir(c, "test-cp-from-case-d") + defer os.RemoveAll(tmpDir) + + makeTestContentInDir(c, tmpDir) + + srcPath := containerCpPath(containerID, "/file1") + dstDir := cpPath(tmpDir, "dir1") + dstPath := filepath.Join(dstDir, "file1") + + // Ensure that dstPath doesn't exist. + _, err := os.Stat(dstPath) + c.Assert(os.IsNotExist(err), checker.True, check.Commentf("did not expect dstPath %q to exist", dstPath)) + + c.Assert(runDockerCp(c, srcPath, dstDir, nil), checker.IsNil) + + c.Assert(fileContentEquals(c, dstPath, "file1\n"), checker.IsNil) + + // Now try again but using a trailing path separator for dstDir. + + // unable to remove dstDir + c.Assert(os.RemoveAll(dstDir), checker.IsNil) + + // unable to make dstDir + c.Assert(os.MkdirAll(dstDir, os.FileMode(0755)), checker.IsNil) + + dstDir = cpPathTrailingSep(tmpDir, "dir1") + + c.Assert(runDockerCp(c, srcPath, dstDir, nil), checker.IsNil) + + c.Assert(fileContentEquals(c, dstPath, "file1\n"), checker.IsNil) +} + +// E. SRC specifies a directory and DST does not exist. This should create a +// directory at DST and copy the contents of the SRC directory into the DST +// directory. Ensure this works whether DST has a trailing path separator or +// not. +func (s *DockerSuite) TestCpFromCaseE(c *check.C) { + testRequires(c, DaemonIsLinux) + containerID := makeTestContainer(c, testContainerOptions{addContent: true}) + + tmpDir := getTestDir(c, "test-cp-from-case-e") + defer os.RemoveAll(tmpDir) + + srcDir := containerCpPath(containerID, "dir1") + dstDir := cpPath(tmpDir, "testDir") + dstPath := filepath.Join(dstDir, "file1-1") + + c.Assert(runDockerCp(c, srcDir, dstDir, nil), checker.IsNil) + + c.Assert(fileContentEquals(c, dstPath, "file1-1\n"), checker.IsNil) + + // Now try again but using a trailing path separator for dstDir. + + // unable to remove dstDir + c.Assert(os.RemoveAll(dstDir), checker.IsNil) + + dstDir = cpPathTrailingSep(tmpDir, "testDir") + + c.Assert(runDockerCp(c, srcDir, dstDir, nil), checker.IsNil) + + c.Assert(fileContentEquals(c, dstPath, "file1-1\n"), checker.IsNil) +} + +// F. SRC specifies a directory and DST exists as a file. This should cause an +// error as it is not possible to overwrite a file with a directory. +func (s *DockerSuite) TestCpFromCaseF(c *check.C) { + testRequires(c, DaemonIsLinux) + containerID := makeTestContainer(c, testContainerOptions{ + addContent: true, workDir: "/root", + }) + + tmpDir := getTestDir(c, "test-cp-from-case-f") + defer os.RemoveAll(tmpDir) + + makeTestContentInDir(c, tmpDir) + + srcDir := containerCpPath(containerID, "/root/dir1") + dstFile := cpPath(tmpDir, "file1") + + err := runDockerCp(c, srcDir, dstFile, nil) + c.Assert(err, checker.NotNil) + + c.Assert(isCpCannotCopyDir(err), checker.True, check.Commentf("expected ErrCannotCopyDir error, but got %T: %s", err, err)) +} + +// G. SRC specifies a directory and DST exists as a directory. This should copy +// the SRC directory and all its contents to the DST directory. Ensure this +// works whether DST has a trailing path separator or not. +func (s *DockerSuite) TestCpFromCaseG(c *check.C) { + testRequires(c, DaemonIsLinux) + containerID := makeTestContainer(c, testContainerOptions{ + addContent: true, workDir: "/root", + }) + + tmpDir := getTestDir(c, "test-cp-from-case-g") + defer os.RemoveAll(tmpDir) + + makeTestContentInDir(c, tmpDir) + + srcDir := containerCpPath(containerID, "/root/dir1") + dstDir := cpPath(tmpDir, "dir2") + resultDir := filepath.Join(dstDir, "dir1") + dstPath := filepath.Join(resultDir, "file1-1") + + c.Assert(runDockerCp(c, srcDir, dstDir, nil), checker.IsNil) + + c.Assert(fileContentEquals(c, dstPath, "file1-1\n"), checker.IsNil) + + // Now try again but using a trailing path separator for dstDir. + + // unable to remove dstDir + c.Assert(os.RemoveAll(dstDir), checker.IsNil) + + // unable to make dstDir + c.Assert(os.MkdirAll(dstDir, os.FileMode(0755)), checker.IsNil) + + dstDir = cpPathTrailingSep(tmpDir, "dir2") + + c.Assert(runDockerCp(c, srcDir, dstDir, nil), checker.IsNil) + + c.Assert(fileContentEquals(c, dstPath, "file1-1\n"), checker.IsNil) +} + +// H. SRC specifies a directory's contents only and DST does not exist. This +// should create a directory at DST and copy the contents of the SRC +// directory (but not the directory itself) into the DST directory. Ensure +// this works whether DST has a trailing path separator or not. +func (s *DockerSuite) TestCpFromCaseH(c *check.C) { + testRequires(c, DaemonIsLinux) + containerID := makeTestContainer(c, testContainerOptions{addContent: true}) + + tmpDir := getTestDir(c, "test-cp-from-case-h") + defer os.RemoveAll(tmpDir) + + srcDir := containerCpPathTrailingSep(containerID, "dir1") + "." + dstDir := cpPath(tmpDir, "testDir") + dstPath := filepath.Join(dstDir, "file1-1") + + c.Assert(runDockerCp(c, srcDir, dstDir, nil), checker.IsNil) + + c.Assert(fileContentEquals(c, dstPath, "file1-1\n"), checker.IsNil) + + // Now try again but using a trailing path separator for dstDir. + + // unable to remove resultDir + c.Assert(os.RemoveAll(dstDir), checker.IsNil) + + dstDir = cpPathTrailingSep(tmpDir, "testDir") + + c.Assert(runDockerCp(c, srcDir, dstDir, nil), checker.IsNil) + + c.Assert(fileContentEquals(c, dstPath, "file1-1\n"), checker.IsNil) +} + +// I. SRC specifies a directory's contents only and DST exists as a file. This +// should cause an error as it is not possible to overwrite a file with a +// directory. +func (s *DockerSuite) TestCpFromCaseI(c *check.C) { + testRequires(c, DaemonIsLinux) + containerID := makeTestContainer(c, testContainerOptions{ + addContent: true, workDir: "/root", + }) + + tmpDir := getTestDir(c, "test-cp-from-case-i") + defer os.RemoveAll(tmpDir) + + makeTestContentInDir(c, tmpDir) + + srcDir := containerCpPathTrailingSep(containerID, "/root/dir1") + "." + dstFile := cpPath(tmpDir, "file1") + + err := runDockerCp(c, srcDir, dstFile, nil) + c.Assert(err, checker.NotNil) + + c.Assert(isCpCannotCopyDir(err), checker.True, check.Commentf("expected ErrCannotCopyDir error, but got %T: %s", err, err)) +} + +// J. SRC specifies a directory's contents only and DST exists as a directory. +// This should copy the contents of the SRC directory (but not the directory +// itself) into the DST directory. Ensure this works whether DST has a +// trailing path separator or not. +func (s *DockerSuite) TestCpFromCaseJ(c *check.C) { + testRequires(c, DaemonIsLinux) + containerID := makeTestContainer(c, testContainerOptions{ + addContent: true, workDir: "/root", + }) + + tmpDir := getTestDir(c, "test-cp-from-case-j") + defer os.RemoveAll(tmpDir) + + makeTestContentInDir(c, tmpDir) + + srcDir := containerCpPathTrailingSep(containerID, "/root/dir1") + "." + dstDir := cpPath(tmpDir, "dir2") + dstPath := filepath.Join(dstDir, "file1-1") + + c.Assert(runDockerCp(c, srcDir, dstDir, nil), checker.IsNil) + + c.Assert(fileContentEquals(c, dstPath, "file1-1\n"), checker.IsNil) + + // Now try again but using a trailing path separator for dstDir. + + // unable to remove dstDir + c.Assert(os.RemoveAll(dstDir), checker.IsNil) + + // unable to make dstDir + c.Assert(os.MkdirAll(dstDir, os.FileMode(0755)), checker.IsNil) + + dstDir = cpPathTrailingSep(tmpDir, "dir2") + + c.Assert(runDockerCp(c, srcDir, dstDir, nil), checker.IsNil) + + c.Assert(fileContentEquals(c, dstPath, "file1-1\n"), checker.IsNil) +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_cp_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_cp_test.go new file mode 100644 index 000000000..f7ed45919 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_cp_test.go @@ -0,0 +1,665 @@ +package main + +import ( + "bytes" + "fmt" + "io/ioutil" + "os" + "os/exec" + "path" + "path/filepath" + "strings" + + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/pkg/testutil" + icmd "github.com/docker/docker/pkg/testutil/cmd" + "github.com/go-check/check" +) + +const ( + cpTestPathParent = "/some" + cpTestPath = "/some/path" + cpTestName = "test" + cpFullPath = "/some/path/test" + + cpContainerContents = "holla, i am the container" + cpHostContents = "hello, i am the host" +) + +// Ensure that an all-local path case returns an error. +func (s *DockerSuite) TestCpLocalOnly(c *check.C) { + err := runDockerCp(c, "foo", "bar", nil) + c.Assert(err, checker.NotNil) + + c.Assert(err.Error(), checker.Contains, "must specify at least one container source") +} + +// Test for #5656 +// Check that garbage paths don't escape the container's rootfs +func (s *DockerSuite) TestCpGarbagePath(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "mkdir -p '"+cpTestPath+"' && echo -n '"+cpContainerContents+"' > "+cpFullPath) + + containerID := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "wait", containerID) + // failed to set up container + c.Assert(strings.TrimSpace(out), checker.Equals, "0") + + c.Assert(os.MkdirAll(cpTestPath, os.ModeDir), checker.IsNil) + + hostFile, err := os.Create(cpFullPath) + c.Assert(err, checker.IsNil) + defer hostFile.Close() + defer os.RemoveAll(cpTestPathParent) + + fmt.Fprintf(hostFile, "%s", cpHostContents) + + tmpdir, err := ioutil.TempDir("", "docker-integration") + c.Assert(err, checker.IsNil) + + tmpname := filepath.Join(tmpdir, cpTestName) + defer os.RemoveAll(tmpdir) + + path := path.Join("../../../../../../../../../../../../", cpFullPath) + + dockerCmd(c, "cp", containerID+":"+path, tmpdir) + + file, _ := os.Open(tmpname) + defer file.Close() + + test, err := ioutil.ReadAll(file) + c.Assert(err, checker.IsNil) + + // output matched host file -- garbage path can escape container rootfs + c.Assert(string(test), checker.Not(checker.Equals), cpHostContents) + + // output doesn't match the input for garbage path + c.Assert(string(test), checker.Equals, cpContainerContents) +} + +// Check that relative paths are relative to the container's rootfs +func (s *DockerSuite) TestCpRelativePath(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "mkdir -p '"+cpTestPath+"' && echo -n '"+cpContainerContents+"' > "+cpFullPath) + + containerID := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "wait", containerID) + // failed to set up container + c.Assert(strings.TrimSpace(out), checker.Equals, "0") + + c.Assert(os.MkdirAll(cpTestPath, os.ModeDir), checker.IsNil) + + hostFile, err := os.Create(cpFullPath) + c.Assert(err, checker.IsNil) + defer hostFile.Close() + defer os.RemoveAll(cpTestPathParent) + + fmt.Fprintf(hostFile, "%s", cpHostContents) + + tmpdir, err := ioutil.TempDir("", "docker-integration") + c.Assert(err, checker.IsNil) + + tmpname := filepath.Join(tmpdir, cpTestName) + defer os.RemoveAll(tmpdir) + + var relPath string + if path.IsAbs(cpFullPath) { + // normally this is `filepath.Rel("/", cpFullPath)` but we cannot + // get this unix-path manipulation on windows with filepath. + relPath = cpFullPath[1:] + } + c.Assert(path.IsAbs(cpFullPath), checker.True, check.Commentf("path %s was assumed to be an absolute path", cpFullPath)) + + dockerCmd(c, "cp", containerID+":"+relPath, tmpdir) + + file, _ := os.Open(tmpname) + defer file.Close() + + test, err := ioutil.ReadAll(file) + c.Assert(err, checker.IsNil) + + // output matched host file -- relative path can escape container rootfs + c.Assert(string(test), checker.Not(checker.Equals), cpHostContents) + + // output doesn't match the input for relative path + c.Assert(string(test), checker.Equals, cpContainerContents) +} + +// Check that absolute paths are relative to the container's rootfs +func (s *DockerSuite) TestCpAbsolutePath(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "mkdir -p '"+cpTestPath+"' && echo -n '"+cpContainerContents+"' > "+cpFullPath) + + containerID := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "wait", containerID) + // failed to set up container + c.Assert(strings.TrimSpace(out), checker.Equals, "0") + + c.Assert(os.MkdirAll(cpTestPath, os.ModeDir), checker.IsNil) + + hostFile, err := os.Create(cpFullPath) + c.Assert(err, checker.IsNil) + defer hostFile.Close() + defer os.RemoveAll(cpTestPathParent) + + fmt.Fprintf(hostFile, "%s", cpHostContents) + + tmpdir, err := ioutil.TempDir("", "docker-integration") + c.Assert(err, checker.IsNil) + + tmpname := filepath.Join(tmpdir, cpTestName) + defer os.RemoveAll(tmpdir) + + path := cpFullPath + + dockerCmd(c, "cp", containerID+":"+path, tmpdir) + + file, _ := os.Open(tmpname) + defer file.Close() + + test, err := ioutil.ReadAll(file) + c.Assert(err, checker.IsNil) + + // output matched host file -- absolute path can escape container rootfs + c.Assert(string(test), checker.Not(checker.Equals), cpHostContents) + + // output doesn't match the input for absolute path + c.Assert(string(test), checker.Equals, cpContainerContents) +} + +// Test for #5619 +// Check that absolute symlinks are still relative to the container's rootfs +func (s *DockerSuite) TestCpAbsoluteSymlink(c *check.C) { + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "mkdir -p '"+cpTestPath+"' && echo -n '"+cpContainerContents+"' > "+cpFullPath+" && ln -s "+cpFullPath+" container_path") + + containerID := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "wait", containerID) + // failed to set up container + c.Assert(strings.TrimSpace(out), checker.Equals, "0") + + c.Assert(os.MkdirAll(cpTestPath, os.ModeDir), checker.IsNil) + + hostFile, err := os.Create(cpFullPath) + c.Assert(err, checker.IsNil) + defer hostFile.Close() + defer os.RemoveAll(cpTestPathParent) + + fmt.Fprintf(hostFile, "%s", cpHostContents) + + tmpdir, err := ioutil.TempDir("", "docker-integration") + c.Assert(err, checker.IsNil) + + tmpname := filepath.Join(tmpdir, "container_path") + defer os.RemoveAll(tmpdir) + + path := path.Join("/", "container_path") + + dockerCmd(c, "cp", containerID+":"+path, tmpdir) + + // We should have copied a symlink *NOT* the file itself! + linkTarget, err := os.Readlink(tmpname) + c.Assert(err, checker.IsNil) + + c.Assert(linkTarget, checker.Equals, filepath.FromSlash(cpFullPath)) +} + +// Check that symlinks to a directory behave as expected when copying one from +// a container. +func (s *DockerSuite) TestCpFromSymlinkToDirectory(c *check.C) { + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "mkdir -p '"+cpTestPath+"' && echo -n '"+cpContainerContents+"' > "+cpFullPath+" && ln -s "+cpTestPathParent+" /dir_link") + + containerID := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "wait", containerID) + // failed to set up container + c.Assert(strings.TrimSpace(out), checker.Equals, "0") + + testDir, err := ioutil.TempDir("", "test-cp-from-symlink-to-dir-") + c.Assert(err, checker.IsNil) + defer os.RemoveAll(testDir) + + // This copy command should copy the symlink, not the target, into the + // temporary directory. + dockerCmd(c, "cp", containerID+":"+"/dir_link", testDir) + + expectedPath := filepath.Join(testDir, "dir_link") + linkTarget, err := os.Readlink(expectedPath) + c.Assert(err, checker.IsNil) + + c.Assert(linkTarget, checker.Equals, filepath.FromSlash(cpTestPathParent)) + + os.Remove(expectedPath) + + // This copy command should resolve the symlink (note the trailing + // separator), copying the target into the temporary directory. + dockerCmd(c, "cp", containerID+":"+"/dir_link/", testDir) + + // It *should not* have copied the directory using the target's name, but + // used the given name instead. + unexpectedPath := filepath.Join(testDir, cpTestPathParent) + stat, err := os.Lstat(unexpectedPath) + if err == nil { + out = fmt.Sprintf("target name was copied: %q - %q", stat.Mode(), stat.Name()) + } + c.Assert(err, checker.NotNil, check.Commentf(out)) + + // It *should* have copied the directory using the asked name "dir_link". + stat, err = os.Lstat(expectedPath) + c.Assert(err, checker.IsNil, check.Commentf("unable to stat resource at %q", expectedPath)) + + c.Assert(stat.IsDir(), checker.True, check.Commentf("should have copied a directory but got %q instead", stat.Mode())) +} + +// Check that symlinks to a directory behave as expected when copying one to a +// container. +func (s *DockerSuite) TestCpToSymlinkToDirectory(c *check.C) { + testRequires(c, DaemonIsLinux) + testRequires(c, SameHostDaemon) // Requires local volume mount bind. + + testVol, err := ioutil.TempDir("", "test-cp-to-symlink-to-dir-") + c.Assert(err, checker.IsNil) + defer os.RemoveAll(testVol) + + // Create a test container with a local volume. We will test by copying + // to the volume path in the container which we can then verify locally. + out, _ := dockerCmd(c, "create", "-v", testVol+":/testVol", "busybox") + + containerID := strings.TrimSpace(out) + + // Create a temp directory to hold a test file nested in a directory. + testDir, err := ioutil.TempDir("", "test-cp-to-symlink-to-dir-") + c.Assert(err, checker.IsNil) + defer os.RemoveAll(testDir) + + // This file will be at "/testDir/some/path/test" and will be copied into + // the test volume later. + hostTestFilename := filepath.Join(testDir, cpFullPath) + c.Assert(os.MkdirAll(filepath.Dir(hostTestFilename), os.FileMode(0700)), checker.IsNil) + c.Assert(ioutil.WriteFile(hostTestFilename, []byte(cpHostContents), os.FileMode(0600)), checker.IsNil) + + // Now create another temp directory to hold a symlink to the + // "/testDir/some" directory. + linkDir, err := ioutil.TempDir("", "test-cp-to-symlink-to-dir-") + c.Assert(err, checker.IsNil) + defer os.RemoveAll(linkDir) + + // Then symlink "/linkDir/dir_link" to "/testdir/some". + linkTarget := filepath.Join(testDir, cpTestPathParent) + localLink := filepath.Join(linkDir, "dir_link") + c.Assert(os.Symlink(linkTarget, localLink), checker.IsNil) + + // Now copy that symlink into the test volume in the container. + dockerCmd(c, "cp", localLink, containerID+":/testVol") + + // This copy command should have copied the symlink *not* the target. + expectedPath := filepath.Join(testVol, "dir_link") + actualLinkTarget, err := os.Readlink(expectedPath) + c.Assert(err, checker.IsNil, check.Commentf("unable to read symlink at %q", expectedPath)) + + c.Assert(actualLinkTarget, checker.Equals, linkTarget) + + // Good, now remove that copied link for the next test. + os.Remove(expectedPath) + + // This copy command should resolve the symlink (note the trailing + // separator), copying the target into the test volume directory in the + // container. + dockerCmd(c, "cp", localLink+"/", containerID+":/testVol") + + // It *should not* have copied the directory using the target's name, but + // used the given name instead. + unexpectedPath := filepath.Join(testVol, cpTestPathParent) + stat, err := os.Lstat(unexpectedPath) + if err == nil { + out = fmt.Sprintf("target name was copied: %q - %q", stat.Mode(), stat.Name()) + } + c.Assert(err, checker.NotNil, check.Commentf(out)) + + // It *should* have copied the directory using the asked name "dir_link". + stat, err = os.Lstat(expectedPath) + c.Assert(err, checker.IsNil, check.Commentf("unable to stat resource at %q", expectedPath)) + + c.Assert(stat.IsDir(), checker.True, check.Commentf("should have copied a directory but got %q instead", stat.Mode())) + + // And this directory should contain the file copied from the host at the + // expected location: "/testVol/dir_link/path/test" + expectedFilepath := filepath.Join(testVol, "dir_link/path/test") + fileContents, err := ioutil.ReadFile(expectedFilepath) + c.Assert(err, checker.IsNil) + + c.Assert(string(fileContents), checker.Equals, cpHostContents) +} + +// Test for #5619 +// Check that symlinks which are part of the resource path are still relative to the container's rootfs +func (s *DockerSuite) TestCpSymlinkComponent(c *check.C) { + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "mkdir -p '"+cpTestPath+"' && echo -n '"+cpContainerContents+"' > "+cpFullPath+" && ln -s "+cpTestPath+" container_path") + + containerID := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "wait", containerID) + // failed to set up container + c.Assert(strings.TrimSpace(out), checker.Equals, "0") + + c.Assert(os.MkdirAll(cpTestPath, os.ModeDir), checker.IsNil) + + hostFile, err := os.Create(cpFullPath) + c.Assert(err, checker.IsNil) + defer hostFile.Close() + defer os.RemoveAll(cpTestPathParent) + + fmt.Fprintf(hostFile, "%s", cpHostContents) + + tmpdir, err := ioutil.TempDir("", "docker-integration") + + c.Assert(err, checker.IsNil) + + tmpname := filepath.Join(tmpdir, cpTestName) + defer os.RemoveAll(tmpdir) + + path := path.Join("/", "container_path", cpTestName) + + dockerCmd(c, "cp", containerID+":"+path, tmpdir) + + file, _ := os.Open(tmpname) + defer file.Close() + + test, err := ioutil.ReadAll(file) + c.Assert(err, checker.IsNil) + + // output matched host file -- symlink path component can escape container rootfs + c.Assert(string(test), checker.Not(checker.Equals), cpHostContents) + + // output doesn't match the input for symlink path component + c.Assert(string(test), checker.Equals, cpContainerContents) +} + +// Check that cp with unprivileged user doesn't return any error +func (s *DockerSuite) TestCpUnprivilegedUser(c *check.C) { + testRequires(c, DaemonIsLinux) + testRequires(c, UnixCli) // uses chmod/su: not available on windows + + out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "touch "+cpTestName) + + containerID := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "wait", containerID) + // failed to set up container + c.Assert(strings.TrimSpace(out), checker.Equals, "0") + + tmpdir, err := ioutil.TempDir("", "docker-integration") + c.Assert(err, checker.IsNil) + + defer os.RemoveAll(tmpdir) + + c.Assert(os.Chmod(tmpdir, 0777), checker.IsNil) + + result := icmd.RunCommand("su", "unprivilegeduser", "-c", + fmt.Sprintf("%s cp %s:%s %s", dockerBinary, containerID, cpTestName, tmpdir)) + result.Assert(c, icmd.Expected{}) +} + +func (s *DockerSuite) TestCpSpecialFiles(c *check.C) { + testRequires(c, DaemonIsLinux) + testRequires(c, SameHostDaemon) + + outDir, err := ioutil.TempDir("", "cp-test-special-files") + c.Assert(err, checker.IsNil) + defer os.RemoveAll(outDir) + + out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "touch /foo") + + containerID := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "wait", containerID) + // failed to set up container + c.Assert(strings.TrimSpace(out), checker.Equals, "0") + + // Copy actual /etc/resolv.conf + dockerCmd(c, "cp", containerID+":/etc/resolv.conf", outDir) + + expected := readContainerFile(c, containerID, "resolv.conf") + actual, err := ioutil.ReadFile(outDir + "/resolv.conf") + + // Expected copied file to be duplicate of the container resolvconf + c.Assert(bytes.Equal(actual, expected), checker.True) + + // Copy actual /etc/hosts + dockerCmd(c, "cp", containerID+":/etc/hosts", outDir) + + expected = readContainerFile(c, containerID, "hosts") + actual, err = ioutil.ReadFile(outDir + "/hosts") + + // Expected copied file to be duplicate of the container hosts + c.Assert(bytes.Equal(actual, expected), checker.True) + + // Copy actual /etc/resolv.conf + dockerCmd(c, "cp", containerID+":/etc/hostname", outDir) + + expected = readContainerFile(c, containerID, "hostname") + actual, err = ioutil.ReadFile(outDir + "/hostname") + c.Assert(err, checker.IsNil) + + // Expected copied file to be duplicate of the container resolvconf + c.Assert(bytes.Equal(actual, expected), checker.True) +} + +func (s *DockerSuite) TestCpVolumePath(c *check.C) { + // stat /tmp/cp-test-volumepath851508420/test gets permission denied for the user + testRequires(c, NotUserNamespace) + testRequires(c, DaemonIsLinux) + testRequires(c, SameHostDaemon) + + tmpDir, err := ioutil.TempDir("", "cp-test-volumepath") + c.Assert(err, checker.IsNil) + defer os.RemoveAll(tmpDir) + outDir, err := ioutil.TempDir("", "cp-test-volumepath-out") + c.Assert(err, checker.IsNil) + defer os.RemoveAll(outDir) + _, err = os.Create(tmpDir + "/test") + c.Assert(err, checker.IsNil) + + out, _ := dockerCmd(c, "run", "-d", "-v", "/foo", "-v", tmpDir+"/test:/test", "-v", tmpDir+":/baz", "busybox", "/bin/sh", "-c", "touch /foo/bar") + + containerID := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "wait", containerID) + // failed to set up container + c.Assert(strings.TrimSpace(out), checker.Equals, "0") + + // Copy actual volume path + dockerCmd(c, "cp", containerID+":/foo", outDir) + + stat, err := os.Stat(outDir + "/foo") + c.Assert(err, checker.IsNil) + // expected copied content to be dir + c.Assert(stat.IsDir(), checker.True) + stat, err = os.Stat(outDir + "/foo/bar") + c.Assert(err, checker.IsNil) + // Expected file `bar` to be a file + c.Assert(stat.IsDir(), checker.False) + + // Copy file nested in volume + dockerCmd(c, "cp", containerID+":/foo/bar", outDir) + + stat, err = os.Stat(outDir + "/bar") + c.Assert(err, checker.IsNil) + // Expected file `bar` to be a file + c.Assert(stat.IsDir(), checker.False) + + // Copy Bind-mounted dir + dockerCmd(c, "cp", containerID+":/baz", outDir) + stat, err = os.Stat(outDir + "/baz") + c.Assert(err, checker.IsNil) + // Expected `baz` to be a dir + c.Assert(stat.IsDir(), checker.True) + + // Copy file nested in bind-mounted dir + dockerCmd(c, "cp", containerID+":/baz/test", outDir) + fb, err := ioutil.ReadFile(outDir + "/baz/test") + c.Assert(err, checker.IsNil) + fb2, err := ioutil.ReadFile(tmpDir + "/test") + c.Assert(err, checker.IsNil) + // Expected copied file to be duplicate of bind-mounted file + c.Assert(bytes.Equal(fb, fb2), checker.True) + + // Copy bind-mounted file + dockerCmd(c, "cp", containerID+":/test", outDir) + fb, err = ioutil.ReadFile(outDir + "/test") + c.Assert(err, checker.IsNil) + fb2, err = ioutil.ReadFile(tmpDir + "/test") + c.Assert(err, checker.IsNil) + // Expected copied file to be duplicate of bind-mounted file + c.Assert(bytes.Equal(fb, fb2), checker.True) +} + +func (s *DockerSuite) TestCpToDot(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "echo lololol > /test") + + containerID := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "wait", containerID) + // failed to set up container + c.Assert(strings.TrimSpace(out), checker.Equals, "0") + + tmpdir, err := ioutil.TempDir("", "docker-integration") + c.Assert(err, checker.IsNil) + defer os.RemoveAll(tmpdir) + cwd, err := os.Getwd() + c.Assert(err, checker.IsNil) + defer os.Chdir(cwd) + c.Assert(os.Chdir(tmpdir), checker.IsNil) + dockerCmd(c, "cp", containerID+":/test", ".") + content, err := ioutil.ReadFile("./test") + c.Assert(err, checker.IsNil) + c.Assert(string(content), checker.Equals, "lololol\n") +} + +func (s *DockerSuite) TestCpToStdout(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "echo lololol > /test") + + containerID := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "wait", containerID) + // failed to set up container + c.Assert(strings.TrimSpace(out), checker.Equals, "0") + + out, _, err := testutil.RunCommandPipelineWithOutput( + exec.Command(dockerBinary, "cp", containerID+":/test", "-"), + exec.Command("tar", "-vtf", "-")) + + c.Assert(err, checker.IsNil) + + c.Assert(out, checker.Contains, "test") + c.Assert(out, checker.Contains, "-rw") +} + +func (s *DockerSuite) TestCpNameHasColon(c *check.C) { + testRequires(c, SameHostDaemon, DaemonIsLinux) + + out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "echo lololol > /te:s:t") + + containerID := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "wait", containerID) + // failed to set up container + c.Assert(strings.TrimSpace(out), checker.Equals, "0") + + tmpdir, err := ioutil.TempDir("", "docker-integration") + c.Assert(err, checker.IsNil) + defer os.RemoveAll(tmpdir) + dockerCmd(c, "cp", containerID+":/te:s:t", tmpdir) + content, err := ioutil.ReadFile(tmpdir + "/te:s:t") + c.Assert(err, checker.IsNil) + c.Assert(string(content), checker.Equals, "lololol\n") +} + +func (s *DockerSuite) TestCopyAndRestart(c *check.C) { + testRequires(c, DaemonIsLinux) + expectedMsg := "hello" + out, _ := dockerCmd(c, "run", "-d", "busybox", "echo", expectedMsg) + containerID := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "wait", containerID) + // failed to set up container + c.Assert(strings.TrimSpace(out), checker.Equals, "0") + + tmpDir, err := ioutil.TempDir("", "test-docker-restart-after-copy-") + c.Assert(err, checker.IsNil) + defer os.RemoveAll(tmpDir) + + dockerCmd(c, "cp", fmt.Sprintf("%s:/etc/group", containerID), tmpDir) + + out, _ = dockerCmd(c, "start", "-a", containerID) + + c.Assert(strings.TrimSpace(out), checker.Equals, expectedMsg) +} + +func (s *DockerSuite) TestCopyCreatedContainer(c *check.C) { + testRequires(c, DaemonIsLinux) + dockerCmd(c, "create", "--name", "test_cp", "-v", "/test", "busybox") + + tmpDir, err := ioutil.TempDir("", "test") + c.Assert(err, checker.IsNil) + defer os.RemoveAll(tmpDir) + dockerCmd(c, "cp", "test_cp:/bin/sh", tmpDir) +} + +// test copy with option `-L`: following symbol link +// Check that symlinks to a file behave as expected when copying one from +// a container to host following symbol link +func (s *DockerSuite) TestCpSymlinkFromConToHostFollowSymlink(c *check.C) { + testRequires(c, DaemonIsLinux) + out, exitCode := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "mkdir -p '"+cpTestPath+"' && echo -n '"+cpContainerContents+"' > "+cpFullPath+" && ln -s "+cpFullPath+" /dir_link") + if exitCode != 0 { + c.Fatal("failed to create a container", out) + } + + cleanedContainerID := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "wait", cleanedContainerID) + if strings.TrimSpace(out) != "0" { + c.Fatal("failed to set up container", out) + } + + testDir, err := ioutil.TempDir("", "test-cp-symlink-container-to-host-follow-symlink") + if err != nil { + c.Fatal(err) + } + defer os.RemoveAll(testDir) + + // This copy command should copy the symlink, not the target, into the + // temporary directory. + dockerCmd(c, "cp", "-L", cleanedContainerID+":"+"/dir_link", testDir) + + expectedPath := filepath.Join(testDir, "dir_link") + + expected := []byte(cpContainerContents) + actual, err := ioutil.ReadFile(expectedPath) + + if !bytes.Equal(actual, expected) { + c.Fatalf("Expected copied file to be duplicate of the container symbol link target") + } + os.Remove(expectedPath) + + // now test copy symbol link to a non-existing file in host + expectedPath = filepath.Join(testDir, "somefile_host") + // expectedPath shouldn't exist, if exists, remove it + if _, err := os.Lstat(expectedPath); err == nil { + os.Remove(expectedPath) + } + + dockerCmd(c, "cp", "-L", cleanedContainerID+":"+"/dir_link", expectedPath) + + actual, err = ioutil.ReadFile(expectedPath) + c.Assert(err, checker.IsNil) + + if !bytes.Equal(actual, expected) { + c.Fatalf("Expected copied file to be duplicate of the container symbol link target") + } + defer os.Remove(expectedPath) +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_cp_to_container_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_cp_to_container_test.go new file mode 100644 index 000000000..97e9aa123 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_cp_to_container_test.go @@ -0,0 +1,600 @@ +package main + +import ( + "os" + + "github.com/docker/docker/integration-cli/checker" + "github.com/go-check/check" +) + +// docker cp LOCALPATH CONTAINER:PATH + +// Try all of the test cases from the archive package which implements the +// internals of `docker cp` and ensure that the behavior matches when actually +// copying to and from containers. + +// Basic assumptions about SRC and DST: +// 1. SRC must exist. +// 2. If SRC ends with a trailing separator, it must be a directory. +// 3. DST parent directory must exist. +// 4. If DST exists as a file, it must not end with a trailing separator. + +// First get these easy error cases out of the way. + +// Test for error when SRC does not exist. +func (s *DockerSuite) TestCpToErrSrcNotExists(c *check.C) { + containerID := makeTestContainer(c, testContainerOptions{}) + + tmpDir := getTestDir(c, "test-cp-to-err-src-not-exists") + defer os.RemoveAll(tmpDir) + + srcPath := cpPath(tmpDir, "file1") + dstPath := containerCpPath(containerID, "file1") + + err := runDockerCp(c, srcPath, dstPath, nil) + c.Assert(err, checker.NotNil) + + c.Assert(isCpNotExist(err), checker.True, check.Commentf("expected IsNotExist error, but got %T: %s", err, err)) +} + +// Test for error when SRC ends in a trailing +// path separator but it exists as a file. +func (s *DockerSuite) TestCpToErrSrcNotDir(c *check.C) { + containerID := makeTestContainer(c, testContainerOptions{}) + + tmpDir := getTestDir(c, "test-cp-to-err-src-not-dir") + defer os.RemoveAll(tmpDir) + + makeTestContentInDir(c, tmpDir) + + srcPath := cpPathTrailingSep(tmpDir, "file1") + dstPath := containerCpPath(containerID, "testDir") + + err := runDockerCp(c, srcPath, dstPath, nil) + c.Assert(err, checker.NotNil) + + c.Assert(isCpNotDir(err), checker.True, check.Commentf("expected IsNotDir error, but got %T: %s", err, err)) +} + +// Test for error when SRC is a valid file or directory, +// but the DST parent directory does not exist. +func (s *DockerSuite) TestCpToErrDstParentNotExists(c *check.C) { + testRequires(c, DaemonIsLinux) + containerID := makeTestContainer(c, testContainerOptions{addContent: true}) + + tmpDir := getTestDir(c, "test-cp-to-err-dst-parent-not-exists") + defer os.RemoveAll(tmpDir) + + makeTestContentInDir(c, tmpDir) + + // Try with a file source. + srcPath := cpPath(tmpDir, "file1") + dstPath := containerCpPath(containerID, "/notExists", "file1") + + err := runDockerCp(c, srcPath, dstPath, nil) + c.Assert(err, checker.NotNil) + + c.Assert(isCpNotExist(err), checker.True, check.Commentf("expected IsNotExist error, but got %T: %s", err, err)) + + // Try with a directory source. + srcPath = cpPath(tmpDir, "dir1") + + err = runDockerCp(c, srcPath, dstPath, nil) + c.Assert(err, checker.NotNil) + + c.Assert(isCpNotExist(err), checker.True, check.Commentf("expected IsNotExist error, but got %T: %s", err, err)) +} + +// Test for error when DST ends in a trailing path separator but exists as a +// file. Also test that we cannot overwrite an existing directory with a +// non-directory and cannot overwrite an existing +func (s *DockerSuite) TestCpToErrDstNotDir(c *check.C) { + testRequires(c, DaemonIsLinux) + containerID := makeTestContainer(c, testContainerOptions{addContent: true}) + + tmpDir := getTestDir(c, "test-cp-to-err-dst-not-dir") + defer os.RemoveAll(tmpDir) + + makeTestContentInDir(c, tmpDir) + + // Try with a file source. + srcPath := cpPath(tmpDir, "dir1/file1-1") + dstPath := containerCpPathTrailingSep(containerID, "file1") + + // The client should encounter an error trying to stat the destination + // and then be unable to copy since the destination is asserted to be a + // directory but does not exist. + err := runDockerCp(c, srcPath, dstPath, nil) + c.Assert(err, checker.NotNil) + + c.Assert(isCpDirNotExist(err), checker.True, check.Commentf("expected DirNotExist error, but got %T: %s", err, err)) + + // Try with a directory source. + srcPath = cpPath(tmpDir, "dir1") + + // The client should encounter an error trying to stat the destination and + // then decide to extract to the parent directory instead with a rebased + // name in the source archive, but this directory would overwrite the + // existing file with the same name. + err = runDockerCp(c, srcPath, dstPath, nil) + c.Assert(err, checker.NotNil) + + c.Assert(isCannotOverwriteNonDirWithDir(err), checker.True, check.Commentf("expected CannotOverwriteNonDirWithDir error, but got %T: %s", err, err)) +} + +// Check that copying from a local path to a symlink in a container copies to +// the symlink target and does not overwrite the container symlink itself. +func (s *DockerSuite) TestCpToSymlinkDestination(c *check.C) { + // stat /tmp/test-cp-to-symlink-destination-262430901/vol3 gets permission denied for the user + testRequires(c, NotUserNamespace) + testRequires(c, DaemonIsLinux) + testRequires(c, SameHostDaemon) // Requires local volume mount bind. + + testVol := getTestDir(c, "test-cp-to-symlink-destination-") + defer os.RemoveAll(testVol) + + makeTestContentInDir(c, testVol) + + containerID := makeTestContainer(c, testContainerOptions{ + volumes: defaultVolumes(testVol), // Our bind mount is at /vol2 + }) + + // First, copy a local file to a symlink to a file in the container. This + // should overwrite the symlink target contents with the source contents. + srcPath := cpPath(testVol, "file2") + dstPath := containerCpPath(containerID, "/vol2/symlinkToFile1") + + c.Assert(runDockerCp(c, srcPath, dstPath, nil), checker.IsNil) + + // The symlink should not have been modified. + c.Assert(symlinkTargetEquals(c, cpPath(testVol, "symlinkToFile1"), "file1"), checker.IsNil) + + // The file should have the contents of "file2" now. + c.Assert(fileContentEquals(c, cpPath(testVol, "file1"), "file2\n"), checker.IsNil) + + // Next, copy a local file to a symlink to a directory in the container. + // This should copy the file into the symlink target directory. + dstPath = containerCpPath(containerID, "/vol2/symlinkToDir1") + + c.Assert(runDockerCp(c, srcPath, dstPath, nil), checker.IsNil) + + // The symlink should not have been modified. + c.Assert(symlinkTargetEquals(c, cpPath(testVol, "symlinkToDir1"), "dir1"), checker.IsNil) + + // The file should have the contents of "file2" now. + c.Assert(fileContentEquals(c, cpPath(testVol, "file2"), "file2\n"), checker.IsNil) + + // Next, copy a file to a symlink to a file that does not exist (a broken + // symlink) in the container. This should create the target file with the + // contents of the source file. + dstPath = containerCpPath(containerID, "/vol2/brokenSymlinkToFileX") + + c.Assert(runDockerCp(c, srcPath, dstPath, nil), checker.IsNil) + + // The symlink should not have been modified. + c.Assert(symlinkTargetEquals(c, cpPath(testVol, "brokenSymlinkToFileX"), "fileX"), checker.IsNil) + + // The file should have the contents of "file2" now. + c.Assert(fileContentEquals(c, cpPath(testVol, "fileX"), "file2\n"), checker.IsNil) + + // Next, copy a local directory to a symlink to a directory in the + // container. This should copy the directory into the symlink target + // directory and not modify the symlink. + srcPath = cpPath(testVol, "/dir2") + dstPath = containerCpPath(containerID, "/vol2/symlinkToDir1") + + c.Assert(runDockerCp(c, srcPath, dstPath, nil), checker.IsNil) + + // The symlink should not have been modified. + c.Assert(symlinkTargetEquals(c, cpPath(testVol, "symlinkToDir1"), "dir1"), checker.IsNil) + + // The directory should now contain a copy of "dir2". + c.Assert(fileContentEquals(c, cpPath(testVol, "dir1/dir2/file2-1"), "file2-1\n"), checker.IsNil) + + // Next, copy a local directory to a symlink to a local directory that does + // not exist (a broken symlink) in the container. This should create the + // target as a directory with the contents of the source directory. It + // should not modify the symlink. + dstPath = containerCpPath(containerID, "/vol2/brokenSymlinkToDirX") + + c.Assert(runDockerCp(c, srcPath, dstPath, nil), checker.IsNil) + + // The symlink should not have been modified. + c.Assert(symlinkTargetEquals(c, cpPath(testVol, "brokenSymlinkToDirX"), "dirX"), checker.IsNil) + + // The "dirX" directory should now be a copy of "dir2". + c.Assert(fileContentEquals(c, cpPath(testVol, "dirX/file2-1"), "file2-1\n"), checker.IsNil) +} + +// Possibilities are reduced to the remaining 10 cases: +// +// case | srcIsDir | onlyDirContents | dstExists | dstIsDir | dstTrSep | action +// =================================================================================================== +// A | no | - | no | - | no | create file +// B | no | - | no | - | yes | error +// C | no | - | yes | no | - | overwrite file +// D | no | - | yes | yes | - | create file in dst dir +// E | yes | no | no | - | - | create dir, copy contents +// F | yes | no | yes | no | - | error +// G | yes | no | yes | yes | - | copy dir and contents +// H | yes | yes | no | - | - | create dir, copy contents +// I | yes | yes | yes | no | - | error +// J | yes | yes | yes | yes | - | copy dir contents +// + +// A. SRC specifies a file and DST (no trailing path separator) doesn't +// exist. This should create a file with the name DST and copy the +// contents of the source file into it. +func (s *DockerSuite) TestCpToCaseA(c *check.C) { + containerID := makeTestContainer(c, testContainerOptions{ + workDir: "/root", command: makeCatFileCommand("itWorks.txt"), + }) + + tmpDir := getTestDir(c, "test-cp-to-case-a") + defer os.RemoveAll(tmpDir) + + makeTestContentInDir(c, tmpDir) + + srcPath := cpPath(tmpDir, "file1") + dstPath := containerCpPath(containerID, "/root/itWorks.txt") + + c.Assert(runDockerCp(c, srcPath, dstPath, nil), checker.IsNil) + + c.Assert(containerStartOutputEquals(c, containerID, "file1\n"), checker.IsNil) +} + +// B. SRC specifies a file and DST (with trailing path separator) doesn't +// exist. This should cause an error because the copy operation cannot +// create a directory when copying a single file. +func (s *DockerSuite) TestCpToCaseB(c *check.C) { + containerID := makeTestContainer(c, testContainerOptions{ + command: makeCatFileCommand("testDir/file1"), + }) + + tmpDir := getTestDir(c, "test-cp-to-case-b") + defer os.RemoveAll(tmpDir) + + makeTestContentInDir(c, tmpDir) + + srcPath := cpPath(tmpDir, "file1") + dstDir := containerCpPathTrailingSep(containerID, "testDir") + + err := runDockerCp(c, srcPath, dstDir, nil) + c.Assert(err, checker.NotNil) + + c.Assert(isCpDirNotExist(err), checker.True, check.Commentf("expected DirNotExists error, but got %T: %s", err, err)) +} + +// C. SRC specifies a file and DST exists as a file. This should overwrite +// the file at DST with the contents of the source file. +func (s *DockerSuite) TestCpToCaseC(c *check.C) { + testRequires(c, DaemonIsLinux) + containerID := makeTestContainer(c, testContainerOptions{ + addContent: true, workDir: "/root", + command: makeCatFileCommand("file2"), + }) + + tmpDir := getTestDir(c, "test-cp-to-case-c") + defer os.RemoveAll(tmpDir) + + makeTestContentInDir(c, tmpDir) + + srcPath := cpPath(tmpDir, "file1") + dstPath := containerCpPath(containerID, "/root/file2") + + // Ensure the container's file starts with the original content. + c.Assert(containerStartOutputEquals(c, containerID, "file2\n"), checker.IsNil) + + c.Assert(runDockerCp(c, srcPath, dstPath, nil), checker.IsNil) + + // Should now contain file1's contents. + c.Assert(containerStartOutputEquals(c, containerID, "file1\n"), checker.IsNil) +} + +// D. SRC specifies a file and DST exists as a directory. This should place +// a copy of the source file inside it using the basename from SRC. Ensure +// this works whether DST has a trailing path separator or not. +func (s *DockerSuite) TestCpToCaseD(c *check.C) { + testRequires(c, DaemonIsLinux) + containerID := makeTestContainer(c, testContainerOptions{ + addContent: true, + command: makeCatFileCommand("/dir1/file1"), + }) + + tmpDir := getTestDir(c, "test-cp-to-case-d") + defer os.RemoveAll(tmpDir) + + makeTestContentInDir(c, tmpDir) + + srcPath := cpPath(tmpDir, "file1") + dstDir := containerCpPath(containerID, "dir1") + + // Ensure that dstPath doesn't exist. + c.Assert(containerStartOutputEquals(c, containerID, ""), checker.IsNil) + + c.Assert(runDockerCp(c, srcPath, dstDir, nil), checker.IsNil) + + // Should now contain file1's contents. + c.Assert(containerStartOutputEquals(c, containerID, "file1\n"), checker.IsNil) + + // Now try again but using a trailing path separator for dstDir. + + // Make new destination container. + containerID = makeTestContainer(c, testContainerOptions{ + addContent: true, + command: makeCatFileCommand("/dir1/file1"), + }) + + dstDir = containerCpPathTrailingSep(containerID, "dir1") + + // Ensure that dstPath doesn't exist. + c.Assert(containerStartOutputEquals(c, containerID, ""), checker.IsNil) + + c.Assert(runDockerCp(c, srcPath, dstDir, nil), checker.IsNil) + + // Should now contain file1's contents. + c.Assert(containerStartOutputEquals(c, containerID, "file1\n"), checker.IsNil) +} + +// E. SRC specifies a directory and DST does not exist. This should create a +// directory at DST and copy the contents of the SRC directory into the DST +// directory. Ensure this works whether DST has a trailing path separator or +// not. +func (s *DockerSuite) TestCpToCaseE(c *check.C) { + containerID := makeTestContainer(c, testContainerOptions{ + command: makeCatFileCommand("/testDir/file1-1"), + }) + + tmpDir := getTestDir(c, "test-cp-to-case-e") + defer os.RemoveAll(tmpDir) + + makeTestContentInDir(c, tmpDir) + + srcDir := cpPath(tmpDir, "dir1") + dstDir := containerCpPath(containerID, "testDir") + + c.Assert(runDockerCp(c, srcDir, dstDir, nil), checker.IsNil) + + // Should now contain file1-1's contents. + c.Assert(containerStartOutputEquals(c, containerID, "file1-1\n"), checker.IsNil) + + // Now try again but using a trailing path separator for dstDir. + + // Make new destination container. + containerID = makeTestContainer(c, testContainerOptions{ + command: makeCatFileCommand("/testDir/file1-1"), + }) + + dstDir = containerCpPathTrailingSep(containerID, "testDir") + + c.Assert(runDockerCp(c, srcDir, dstDir, nil), checker.IsNil) + + // Should now contain file1-1's contents. + c.Assert(containerStartOutputEquals(c, containerID, "file1-1\n"), checker.IsNil) +} + +// F. SRC specifies a directory and DST exists as a file. This should cause an +// error as it is not possible to overwrite a file with a directory. +func (s *DockerSuite) TestCpToCaseF(c *check.C) { + testRequires(c, DaemonIsLinux) + containerID := makeTestContainer(c, testContainerOptions{ + addContent: true, workDir: "/root", + }) + + tmpDir := getTestDir(c, "test-cp-to-case-f") + defer os.RemoveAll(tmpDir) + + makeTestContentInDir(c, tmpDir) + + srcDir := cpPath(tmpDir, "dir1") + dstFile := containerCpPath(containerID, "/root/file1") + + err := runDockerCp(c, srcDir, dstFile, nil) + c.Assert(err, checker.NotNil) + + c.Assert(isCpCannotCopyDir(err), checker.True, check.Commentf("expected ErrCannotCopyDir error, but got %T: %s", err, err)) +} + +// G. SRC specifies a directory and DST exists as a directory. This should copy +// the SRC directory and all its contents to the DST directory. Ensure this +// works whether DST has a trailing path separator or not. +func (s *DockerSuite) TestCpToCaseG(c *check.C) { + testRequires(c, DaemonIsLinux) + containerID := makeTestContainer(c, testContainerOptions{ + addContent: true, workDir: "/root", + command: makeCatFileCommand("dir2/dir1/file1-1"), + }) + + tmpDir := getTestDir(c, "test-cp-to-case-g") + defer os.RemoveAll(tmpDir) + + makeTestContentInDir(c, tmpDir) + + srcDir := cpPath(tmpDir, "dir1") + dstDir := containerCpPath(containerID, "/root/dir2") + + // Ensure that dstPath doesn't exist. + c.Assert(containerStartOutputEquals(c, containerID, ""), checker.IsNil) + + c.Assert(runDockerCp(c, srcDir, dstDir, nil), checker.IsNil) + + // Should now contain file1-1's contents. + c.Assert(containerStartOutputEquals(c, containerID, "file1-1\n"), checker.IsNil) + + // Now try again but using a trailing path separator for dstDir. + + // Make new destination container. + containerID = makeTestContainer(c, testContainerOptions{ + addContent: true, + command: makeCatFileCommand("/dir2/dir1/file1-1"), + }) + + dstDir = containerCpPathTrailingSep(containerID, "/dir2") + + // Ensure that dstPath doesn't exist. + c.Assert(containerStartOutputEquals(c, containerID, ""), checker.IsNil) + + c.Assert(runDockerCp(c, srcDir, dstDir, nil), checker.IsNil) + + // Should now contain file1-1's contents. + c.Assert(containerStartOutputEquals(c, containerID, "file1-1\n"), checker.IsNil) +} + +// H. SRC specifies a directory's contents only and DST does not exist. This +// should create a directory at DST and copy the contents of the SRC +// directory (but not the directory itself) into the DST directory. Ensure +// this works whether DST has a trailing path separator or not. +func (s *DockerSuite) TestCpToCaseH(c *check.C) { + containerID := makeTestContainer(c, testContainerOptions{ + command: makeCatFileCommand("/testDir/file1-1"), + }) + + tmpDir := getTestDir(c, "test-cp-to-case-h") + defer os.RemoveAll(tmpDir) + + makeTestContentInDir(c, tmpDir) + + srcDir := cpPathTrailingSep(tmpDir, "dir1") + "." + dstDir := containerCpPath(containerID, "testDir") + + c.Assert(runDockerCp(c, srcDir, dstDir, nil), checker.IsNil) + + // Should now contain file1-1's contents. + c.Assert(containerStartOutputEquals(c, containerID, "file1-1\n"), checker.IsNil) + + // Now try again but using a trailing path separator for dstDir. + + // Make new destination container. + containerID = makeTestContainer(c, testContainerOptions{ + command: makeCatFileCommand("/testDir/file1-1"), + }) + + dstDir = containerCpPathTrailingSep(containerID, "testDir") + + c.Assert(runDockerCp(c, srcDir, dstDir, nil), checker.IsNil) + + // Should now contain file1-1's contents. + c.Assert(containerStartOutputEquals(c, containerID, "file1-1\n"), checker.IsNil) +} + +// I. SRC specifies a directory's contents only and DST exists as a file. This +// should cause an error as it is not possible to overwrite a file with a +// directory. +func (s *DockerSuite) TestCpToCaseI(c *check.C) { + testRequires(c, DaemonIsLinux) + containerID := makeTestContainer(c, testContainerOptions{ + addContent: true, workDir: "/root", + }) + + tmpDir := getTestDir(c, "test-cp-to-case-i") + defer os.RemoveAll(tmpDir) + + makeTestContentInDir(c, tmpDir) + + srcDir := cpPathTrailingSep(tmpDir, "dir1") + "." + dstFile := containerCpPath(containerID, "/root/file1") + + err := runDockerCp(c, srcDir, dstFile, nil) + c.Assert(err, checker.NotNil) + + c.Assert(isCpCannotCopyDir(err), checker.True, check.Commentf("expected ErrCannotCopyDir error, but got %T: %s", err, err)) +} + +// J. SRC specifies a directory's contents only and DST exists as a directory. +// This should copy the contents of the SRC directory (but not the directory +// itself) into the DST directory. Ensure this works whether DST has a +// trailing path separator or not. +func (s *DockerSuite) TestCpToCaseJ(c *check.C) { + testRequires(c, DaemonIsLinux) + containerID := makeTestContainer(c, testContainerOptions{ + addContent: true, workDir: "/root", + command: makeCatFileCommand("/dir2/file1-1"), + }) + + tmpDir := getTestDir(c, "test-cp-to-case-j") + defer os.RemoveAll(tmpDir) + + makeTestContentInDir(c, tmpDir) + + srcDir := cpPathTrailingSep(tmpDir, "dir1") + "." + dstDir := containerCpPath(containerID, "/dir2") + + // Ensure that dstPath doesn't exist. + c.Assert(containerStartOutputEquals(c, containerID, ""), checker.IsNil) + + c.Assert(runDockerCp(c, srcDir, dstDir, nil), checker.IsNil) + + // Should now contain file1-1's contents. + c.Assert(containerStartOutputEquals(c, containerID, "file1-1\n"), checker.IsNil) + + // Now try again but using a trailing path separator for dstDir. + + // Make new destination container. + containerID = makeTestContainer(c, testContainerOptions{ + command: makeCatFileCommand("/dir2/file1-1"), + }) + + dstDir = containerCpPathTrailingSep(containerID, "/dir2") + + // Ensure that dstPath doesn't exist. + c.Assert(containerStartOutputEquals(c, containerID, ""), checker.IsNil) + + c.Assert(runDockerCp(c, srcDir, dstDir, nil), checker.IsNil) + + // Should now contain file1-1's contents. + c.Assert(containerStartOutputEquals(c, containerID, "file1-1\n"), checker.IsNil) +} + +// The `docker cp` command should also ensure that you cannot +// write to a container rootfs that is marked as read-only. +func (s *DockerSuite) TestCpToErrReadOnlyRootfs(c *check.C) { + // --read-only + userns has remount issues + testRequires(c, DaemonIsLinux, NotUserNamespace) + tmpDir := getTestDir(c, "test-cp-to-err-read-only-rootfs") + defer os.RemoveAll(tmpDir) + + makeTestContentInDir(c, tmpDir) + + containerID := makeTestContainer(c, testContainerOptions{ + readOnly: true, workDir: "/root", + command: makeCatFileCommand("shouldNotExist"), + }) + + srcPath := cpPath(tmpDir, "file1") + dstPath := containerCpPath(containerID, "/root/shouldNotExist") + + err := runDockerCp(c, srcPath, dstPath, nil) + c.Assert(err, checker.NotNil) + + c.Assert(isCpCannotCopyReadOnly(err), checker.True, check.Commentf("expected ErrContainerRootfsReadonly error, but got %T: %s", err, err)) + + // Ensure that dstPath doesn't exist. + c.Assert(containerStartOutputEquals(c, containerID, ""), checker.IsNil) +} + +// The `docker cp` command should also ensure that you +// cannot write to a volume that is mounted as read-only. +func (s *DockerSuite) TestCpToErrReadOnlyVolume(c *check.C) { + // --read-only + userns has remount issues + testRequires(c, DaemonIsLinux, NotUserNamespace) + tmpDir := getTestDir(c, "test-cp-to-err-read-only-volume") + defer os.RemoveAll(tmpDir) + + makeTestContentInDir(c, tmpDir) + + containerID := makeTestContainer(c, testContainerOptions{ + volumes: defaultVolumes(tmpDir), workDir: "/root", + command: makeCatFileCommand("/vol_ro/shouldNotExist"), + }) + + srcPath := cpPath(tmpDir, "file1") + dstPath := containerCpPath(containerID, "/vol_ro/shouldNotExist") + + err := runDockerCp(c, srcPath, dstPath, nil) + c.Assert(err, checker.NotNil) + + c.Assert(isCpCannotCopyReadOnly(err), checker.True, check.Commentf("expected ErrVolumeReadonly error, but got %T: %s", err, err)) + + // Ensure that dstPath doesn't exist. + c.Assert(containerStartOutputEquals(c, containerID, ""), checker.IsNil) +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_cp_to_container_unix_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_cp_to_container_unix_test.go new file mode 100644 index 000000000..fa55b6ee2 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_cp_to_container_unix_test.go @@ -0,0 +1,81 @@ +// +build !windows + +package main + +import ( + "fmt" + "os" + "path/filepath" + "strconv" + "strings" + + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/pkg/system" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestCpToContainerWithPermissions(c *check.C) { + testRequires(c, SameHostDaemon, DaemonIsLinux) + + tmpDir := getTestDir(c, "test-cp-to-host-with-permissions") + defer os.RemoveAll(tmpDir) + + makeTestContentInDir(c, tmpDir) + + containerName := "permtest" + + _, exc := dockerCmd(c, "create", "--name", containerName, "debian:jessie", "/bin/bash", "-c", "stat -c '%u %g %a' /permdirtest /permdirtest/permtest") + c.Assert(exc, checker.Equals, 0) + defer dockerCmd(c, "rm", "-f", containerName) + + srcPath := cpPath(tmpDir, "permdirtest") + dstPath := containerCpPath(containerName, "/") + c.Assert(runDockerCp(c, srcPath, dstPath, []string{"-a"}), checker.IsNil) + + out, err := startContainerGetOutput(c, containerName) + c.Assert(err, checker.IsNil, check.Commentf("output: %v", out)) + c.Assert(strings.TrimSpace(out), checker.Equals, "2 2 700\n65534 65534 400", check.Commentf("output: %v", out)) +} + +// Check ownership is root, both in non-userns and userns enabled modes +func (s *DockerSuite) TestCpCheckDestOwnership(c *check.C) { + testRequires(c, DaemonIsLinux, SameHostDaemon) + tmpVolDir := getTestDir(c, "test-cp-tmpvol") + containerID := makeTestContainer(c, + testContainerOptions{volumes: []string{fmt.Sprintf("%s:/tmpvol", tmpVolDir)}}) + + tmpDir := getTestDir(c, "test-cp-to-check-ownership") + defer os.RemoveAll(tmpDir) + + makeTestContentInDir(c, tmpDir) + + srcPath := cpPath(tmpDir, "file1") + dstPath := containerCpPath(containerID, "/tmpvol", "file1") + + err := runDockerCp(c, srcPath, dstPath, nil) + c.Assert(err, checker.IsNil) + + stat, err := system.Stat(filepath.Join(tmpVolDir, "file1")) + c.Assert(err, checker.IsNil) + uid, gid, err := getRootUIDGID() + c.Assert(err, checker.IsNil) + c.Assert(stat.UID(), checker.Equals, uint32(uid), check.Commentf("Copied file not owned by container root UID")) + c.Assert(stat.GID(), checker.Equals, uint32(gid), check.Commentf("Copied file not owned by container root GID")) +} + +func getRootUIDGID() (int, int, error) { + uidgid := strings.Split(filepath.Base(testEnv.DockerBasePath()), ".") + if len(uidgid) == 1 { + //user namespace remapping is not turned on; return 0 + return 0, 0, nil + } + uid, err := strconv.Atoi(uidgid[0]) + if err != nil { + return 0, 0, err + } + gid, err := strconv.Atoi(uidgid[1]) + if err != nil { + return 0, 0, err + } + return uid, gid, nil +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_cp_utils_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_cp_utils_test.go new file mode 100644 index 000000000..48aff9061 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_cp_utils_test.go @@ -0,0 +1,319 @@ +package main + +import ( + "bytes" + "fmt" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "runtime" + "strings" + + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/pkg/archive" + "github.com/go-check/check" +) + +type fileType uint32 + +const ( + ftRegular fileType = iota + ftDir + ftSymlink +) + +type fileData struct { + filetype fileType + path string + contents string + uid int + gid int + mode int +} + +func (fd fileData) creationCommand() string { + var command string + + switch fd.filetype { + case ftRegular: + // Don't overwrite the file if it already exists! + command = fmt.Sprintf("if [ ! -f %s ]; then echo %q > %s; fi", fd.path, fd.contents, fd.path) + case ftDir: + command = fmt.Sprintf("mkdir -p %s", fd.path) + case ftSymlink: + command = fmt.Sprintf("ln -fs %s %s", fd.contents, fd.path) + } + + return command +} + +func mkFilesCommand(fds []fileData) string { + commands := make([]string, len(fds)) + + for i, fd := range fds { + commands[i] = fd.creationCommand() + } + + return strings.Join(commands, " && ") +} + +var defaultFileData = []fileData{ + {ftRegular, "file1", "file1", 0, 0, 0666}, + {ftRegular, "file2", "file2", 0, 0, 0666}, + {ftRegular, "file3", "file3", 0, 0, 0666}, + {ftRegular, "file4", "file4", 0, 0, 0666}, + {ftRegular, "file5", "file5", 0, 0, 0666}, + {ftRegular, "file6", "file6", 0, 0, 0666}, + {ftRegular, "file7", "file7", 0, 0, 0666}, + {ftDir, "dir1", "", 0, 0, 0777}, + {ftRegular, "dir1/file1-1", "file1-1", 0, 0, 0666}, + {ftRegular, "dir1/file1-2", "file1-2", 0, 0, 0666}, + {ftDir, "dir2", "", 0, 0, 0666}, + {ftRegular, "dir2/file2-1", "file2-1", 0, 0, 0666}, + {ftRegular, "dir2/file2-2", "file2-2", 0, 0, 0666}, + {ftDir, "dir3", "", 0, 0, 0666}, + {ftRegular, "dir3/file3-1", "file3-1", 0, 0, 0666}, + {ftRegular, "dir3/file3-2", "file3-2", 0, 0, 0666}, + {ftDir, "dir4", "", 0, 0, 0666}, + {ftRegular, "dir4/file3-1", "file4-1", 0, 0, 0666}, + {ftRegular, "dir4/file3-2", "file4-2", 0, 0, 0666}, + {ftDir, "dir5", "", 0, 0, 0666}, + {ftSymlink, "symlinkToFile1", "file1", 0, 0, 0666}, + {ftSymlink, "symlinkToDir1", "dir1", 0, 0, 0666}, + {ftSymlink, "brokenSymlinkToFileX", "fileX", 0, 0, 0666}, + {ftSymlink, "brokenSymlinkToDirX", "dirX", 0, 0, 0666}, + {ftSymlink, "symlinkToAbsDir", "/root", 0, 0, 0666}, + {ftDir, "permdirtest", "", 2, 2, 0700}, + {ftRegular, "permdirtest/permtest", "perm_test", 65534, 65534, 0400}, +} + +func defaultMkContentCommand() string { + return mkFilesCommand(defaultFileData) +} + +func makeTestContentInDir(c *check.C, dir string) { + for _, fd := range defaultFileData { + path := filepath.Join(dir, filepath.FromSlash(fd.path)) + switch fd.filetype { + case ftRegular: + c.Assert(ioutil.WriteFile(path, []byte(fd.contents+"\n"), os.FileMode(fd.mode)), checker.IsNil) + case ftDir: + c.Assert(os.Mkdir(path, os.FileMode(fd.mode)), checker.IsNil) + case ftSymlink: + c.Assert(os.Symlink(fd.contents, path), checker.IsNil) + } + + if fd.filetype != ftSymlink && runtime.GOOS != "windows" { + c.Assert(os.Chown(path, fd.uid, fd.gid), checker.IsNil) + } + } +} + +type testContainerOptions struct { + addContent bool + readOnly bool + volumes []string + workDir string + command string +} + +func makeTestContainer(c *check.C, options testContainerOptions) (containerID string) { + if options.addContent { + mkContentCmd := defaultMkContentCommand() + if options.command == "" { + options.command = mkContentCmd + } else { + options.command = fmt.Sprintf("%s && %s", defaultMkContentCommand(), options.command) + } + } + + if options.command == "" { + options.command = "#(nop)" + } + + args := []string{"run", "-d"} + + for _, volume := range options.volumes { + args = append(args, "-v", volume) + } + + if options.workDir != "" { + args = append(args, "-w", options.workDir) + } + + if options.readOnly { + args = append(args, "--read-only") + } + + args = append(args, "busybox", "/bin/sh", "-c", options.command) + + out, _ := dockerCmd(c, args...) + + containerID = strings.TrimSpace(out) + + out, _ = dockerCmd(c, "wait", containerID) + + exitCode := strings.TrimSpace(out) + if exitCode != "0" { + out, _ = dockerCmd(c, "logs", containerID) + } + c.Assert(exitCode, checker.Equals, "0", check.Commentf("failed to make test container: %s", out)) + + return +} + +func makeCatFileCommand(path string) string { + return fmt.Sprintf("if [ -f %s ]; then cat %s; fi", path, path) +} + +func cpPath(pathElements ...string) string { + localizedPathElements := make([]string, len(pathElements)) + for i, path := range pathElements { + localizedPathElements[i] = filepath.FromSlash(path) + } + return strings.Join(localizedPathElements, string(filepath.Separator)) +} + +func cpPathTrailingSep(pathElements ...string) string { + return fmt.Sprintf("%s%c", cpPath(pathElements...), filepath.Separator) +} + +func containerCpPath(containerID string, pathElements ...string) string { + joined := strings.Join(pathElements, "/") + return fmt.Sprintf("%s:%s", containerID, joined) +} + +func containerCpPathTrailingSep(containerID string, pathElements ...string) string { + return fmt.Sprintf("%s/", containerCpPath(containerID, pathElements...)) +} + +func runDockerCp(c *check.C, src, dst string, params []string) (err error) { + c.Logf("running `docker cp %s %s %s`", strings.Join(params, " "), src, dst) + + args := []string{"cp"} + + for _, param := range params { + args = append(args, param) + } + + args = append(args, src, dst) + + out, _, err := runCommandWithOutput(exec.Command(dockerBinary, args...)) + if err != nil { + err = fmt.Errorf("error executing `docker cp` command: %s: %s", err, out) + } + + return +} + +func startContainerGetOutput(c *check.C, containerID string) (out string, err error) { + c.Logf("running `docker start -a %s`", containerID) + + args := []string{"start", "-a", containerID} + + out, _, err = runCommandWithOutput(exec.Command(dockerBinary, args...)) + if err != nil { + err = fmt.Errorf("error executing `docker start` command: %s: %s", err, out) + } + + return +} + +func getTestDir(c *check.C, label string) (tmpDir string) { + var err error + + tmpDir, err = ioutil.TempDir("", label) + // unable to make temporary directory + c.Assert(err, checker.IsNil) + + return +} + +func isCpNotExist(err error) bool { + return strings.Contains(err.Error(), "no such file or directory") || strings.Contains(err.Error(), "cannot find the file specified") +} + +func isCpDirNotExist(err error) bool { + return strings.Contains(err.Error(), archive.ErrDirNotExists.Error()) +} + +func isCpNotDir(err error) bool { + return strings.Contains(err.Error(), archive.ErrNotDirectory.Error()) || strings.Contains(err.Error(), "filename, directory name, or volume label syntax is incorrect") +} + +func isCpCannotCopyDir(err error) bool { + return strings.Contains(err.Error(), archive.ErrCannotCopyDir.Error()) +} + +func isCpCannotCopyReadOnly(err error) bool { + return strings.Contains(err.Error(), "marked read-only") +} + +func isCannotOverwriteNonDirWithDir(err error) bool { + return strings.Contains(err.Error(), "cannot overwrite non-directory") +} + +func fileContentEquals(c *check.C, filename, contents string) (err error) { + c.Logf("checking that file %q contains %q\n", filename, contents) + + fileBytes, err := ioutil.ReadFile(filename) + if err != nil { + return + } + + expectedBytes, err := ioutil.ReadAll(strings.NewReader(contents)) + if err != nil { + return + } + + if !bytes.Equal(fileBytes, expectedBytes) { + err = fmt.Errorf("file content not equal - expected %q, got %q", string(expectedBytes), string(fileBytes)) + } + + return +} + +func symlinkTargetEquals(c *check.C, symlink, expectedTarget string) (err error) { + c.Logf("checking that the symlink %q points to %q\n", symlink, expectedTarget) + + actualTarget, err := os.Readlink(symlink) + if err != nil { + return + } + + if actualTarget != expectedTarget { + err = fmt.Errorf("symlink target points to %q not %q", actualTarget, expectedTarget) + } + + return +} + +func containerStartOutputEquals(c *check.C, containerID, contents string) (err error) { + c.Logf("checking that container %q start output contains %q\n", containerID, contents) + + out, err := startContainerGetOutput(c, containerID) + if err != nil { + return + } + + if out != contents { + err = fmt.Errorf("output contents not equal - expected %q, got %q", contents, out) + } + + return +} + +func defaultVolumes(tmpDir string) []string { + if SameHostDaemon() { + return []string{ + "/vol1", + fmt.Sprintf("%s:/vol2", tmpDir), + fmt.Sprintf("%s:/vol3", filepath.Join(tmpDir, "vol3")), + fmt.Sprintf("%s:/vol_ro:ro", filepath.Join(tmpDir, "vol_ro")), + } + } + + // Can't bind-mount volumes with separate host daemon. + return []string{"/vol1", "/vol2", "/vol3", "/vol_ro:/vol_ro:ro"} +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_create_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_create_test.go new file mode 100644 index 000000000..d4eb985a3 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_create_test.go @@ -0,0 +1,443 @@ +package main + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "os" + "reflect" + "strings" + "time" + + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/cli" + "github.com/docker/docker/integration-cli/cli/build" + "github.com/docker/docker/integration-cli/cli/build/fakecontext" + "github.com/docker/docker/pkg/stringid" + icmd "github.com/docker/docker/pkg/testutil/cmd" + "github.com/docker/go-connections/nat" + "github.com/go-check/check" +) + +// Make sure we can create a simple container with some args +func (s *DockerSuite) TestCreateArgs(c *check.C) { + // Intentionally clear entrypoint, as the Windows busybox image needs an entrypoint, which breaks this test + out, _ := dockerCmd(c, "create", "--entrypoint=", "busybox", "command", "arg1", "arg2", "arg with space", "-c", "flags") + + cleanedContainerID := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "inspect", cleanedContainerID) + + containers := []struct { + ID string + Created time.Time + Path string + Args []string + Image string + }{} + + err := json.Unmarshal([]byte(out), &containers) + c.Assert(err, check.IsNil, check.Commentf("Error inspecting the container: %s", err)) + c.Assert(containers, checker.HasLen, 1) + + cont := containers[0] + c.Assert(string(cont.Path), checker.Equals, "command", check.Commentf("Unexpected container path. Expected command, received: %s", cont.Path)) + + b := false + expected := []string{"arg1", "arg2", "arg with space", "-c", "flags"} + for i, arg := range expected { + if arg != cont.Args[i] { + b = true + break + } + } + if len(cont.Args) != len(expected) || b { + c.Fatalf("Unexpected args. Expected %v, received: %v", expected, cont.Args) + } + +} + +// Make sure we can grow the container's rootfs at creation time. +func (s *DockerSuite) TestCreateGrowRootfs(c *check.C) { + // Windows and Devicemapper support growing the rootfs + if testEnv.DaemonPlatform() != "windows" { + testRequires(c, Devicemapper) + } + out, _ := dockerCmd(c, "create", "--storage-opt", "size=120G", "busybox") + + cleanedContainerID := strings.TrimSpace(out) + + inspectOut := inspectField(c, cleanedContainerID, "HostConfig.StorageOpt") + c.Assert(inspectOut, checker.Equals, "map[size:120G]") +} + +// Make sure we cannot shrink the container's rootfs at creation time. +func (s *DockerSuite) TestCreateShrinkRootfs(c *check.C) { + testRequires(c, Devicemapper) + + // Ensure this fails because of the defaultBaseFsSize is 10G + out, _, err := dockerCmdWithError("create", "--storage-opt", "size=5G", "busybox") + c.Assert(err, check.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "Container size cannot be smaller than") +} + +// Make sure we can set hostconfig options too +func (s *DockerSuite) TestCreateHostConfig(c *check.C) { + out, _ := dockerCmd(c, "create", "-P", "busybox", "echo") + + cleanedContainerID := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "inspect", cleanedContainerID) + + containers := []struct { + HostConfig *struct { + PublishAllPorts bool + } + }{} + + err := json.Unmarshal([]byte(out), &containers) + c.Assert(err, check.IsNil, check.Commentf("Error inspecting the container: %s", err)) + c.Assert(containers, checker.HasLen, 1) + + cont := containers[0] + c.Assert(cont.HostConfig, check.NotNil, check.Commentf("Expected HostConfig, got none")) + c.Assert(cont.HostConfig.PublishAllPorts, check.NotNil, check.Commentf("Expected PublishAllPorts, got false")) +} + +func (s *DockerSuite) TestCreateWithPortRange(c *check.C) { + out, _ := dockerCmd(c, "create", "-p", "3300-3303:3300-3303/tcp", "busybox", "echo") + + cleanedContainerID := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "inspect", cleanedContainerID) + + containers := []struct { + HostConfig *struct { + PortBindings map[nat.Port][]nat.PortBinding + } + }{} + err := json.Unmarshal([]byte(out), &containers) + c.Assert(err, check.IsNil, check.Commentf("Error inspecting the container: %s", err)) + c.Assert(containers, checker.HasLen, 1) + + cont := containers[0] + + c.Assert(cont.HostConfig, check.NotNil, check.Commentf("Expected HostConfig, got none")) + c.Assert(cont.HostConfig.PortBindings, checker.HasLen, 4, check.Commentf("Expected 4 ports bindings, got %d", len(cont.HostConfig.PortBindings))) + + for k, v := range cont.HostConfig.PortBindings { + c.Assert(v, checker.HasLen, 1, check.Commentf("Expected 1 ports binding, for the port %s but found %s", k, v)) + c.Assert(k.Port(), checker.Equals, v[0].HostPort, check.Commentf("Expected host port %s to match published port %s", k.Port(), v[0].HostPort)) + + } + +} + +func (s *DockerSuite) TestCreateWithLargePortRange(c *check.C) { + out, _ := dockerCmd(c, "create", "-p", "1-65535:1-65535/tcp", "busybox", "echo") + + cleanedContainerID := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "inspect", cleanedContainerID) + + containers := []struct { + HostConfig *struct { + PortBindings map[nat.Port][]nat.PortBinding + } + }{} + + err := json.Unmarshal([]byte(out), &containers) + c.Assert(err, check.IsNil, check.Commentf("Error inspecting the container: %s", err)) + c.Assert(containers, checker.HasLen, 1) + + cont := containers[0] + c.Assert(cont.HostConfig, check.NotNil, check.Commentf("Expected HostConfig, got none")) + c.Assert(cont.HostConfig.PortBindings, checker.HasLen, 65535) + + for k, v := range cont.HostConfig.PortBindings { + c.Assert(v, checker.HasLen, 1) + c.Assert(k.Port(), checker.Equals, v[0].HostPort, check.Commentf("Expected host port %s to match published port %s", k.Port(), v[0].HostPort)) + } + +} + +// "test123" should be printed by docker create + start +func (s *DockerSuite) TestCreateEchoStdout(c *check.C) { + out, _ := dockerCmd(c, "create", "busybox", "echo", "test123") + + cleanedContainerID := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "start", "-ai", cleanedContainerID) + c.Assert(out, checker.Equals, "test123\n", check.Commentf("container should've printed 'test123', got %q", out)) + +} + +func (s *DockerSuite) TestCreateVolumesCreated(c *check.C) { + testRequires(c, SameHostDaemon) + prefix, slash := getPrefixAndSlashFromDaemonPlatform() + + name := "test_create_volume" + dockerCmd(c, "create", "--name", name, "-v", prefix+slash+"foo", "busybox") + + dir, err := inspectMountSourceField(name, prefix+slash+"foo") + c.Assert(err, check.IsNil, check.Commentf("Error getting volume host path: %q", err)) + + if _, err := os.Stat(dir); err != nil && os.IsNotExist(err) { + c.Fatalf("Volume was not created") + } + if err != nil { + c.Fatalf("Error statting volume host path: %q", err) + } + +} + +func (s *DockerSuite) TestCreateLabels(c *check.C) { + name := "test_create_labels" + expected := map[string]string{"k1": "v1", "k2": "v2"} + dockerCmd(c, "create", "--name", name, "-l", "k1=v1", "--label", "k2=v2", "busybox") + + actual := make(map[string]string) + inspectFieldAndUnmarshall(c, name, "Config.Labels", &actual) + + if !reflect.DeepEqual(expected, actual) { + c.Fatalf("Expected %s got %s", expected, actual) + } +} + +func (s *DockerSuite) TestCreateLabelFromImage(c *check.C) { + imageName := "testcreatebuildlabel" + buildImageSuccessfully(c, imageName, build.WithDockerfile(`FROM busybox + LABEL k1=v1 k2=v2`)) + + name := "test_create_labels_from_image" + expected := map[string]string{"k2": "x", "k3": "v3", "k1": "v1"} + dockerCmd(c, "create", "--name", name, "-l", "k2=x", "--label", "k3=v3", imageName) + + actual := make(map[string]string) + inspectFieldAndUnmarshall(c, name, "Config.Labels", &actual) + + if !reflect.DeepEqual(expected, actual) { + c.Fatalf("Expected %s got %s", expected, actual) + } +} + +func (s *DockerSuite) TestCreateHostnameWithNumber(c *check.C) { + image := "busybox" + // Busybox on Windows does not implement hostname command + if testEnv.DaemonPlatform() == "windows" { + image = testEnv.MinimalBaseImage() + } + out, _ := dockerCmd(c, "run", "-h", "web.0", image, "hostname") + c.Assert(strings.TrimSpace(out), checker.Equals, "web.0", check.Commentf("hostname not set, expected `web.0`, got: %s", out)) + +} + +func (s *DockerSuite) TestCreateRM(c *check.C) { + // Test to make sure we can 'rm' a new container that is in + // "Created" state, and has ever been run. Test "rm -f" too. + + // create a container + out, _ := dockerCmd(c, "create", "busybox") + cID := strings.TrimSpace(out) + + dockerCmd(c, "rm", cID) + + // Now do it again so we can "rm -f" this time + out, _ = dockerCmd(c, "create", "busybox") + + cID = strings.TrimSpace(out) + dockerCmd(c, "rm", "-f", cID) +} + +func (s *DockerSuite) TestCreateModeIpcContainer(c *check.C) { + // Uses Linux specific functionality (--ipc) + testRequires(c, DaemonIsLinux, SameHostDaemon) + + out, _ := dockerCmd(c, "create", "busybox") + id := strings.TrimSpace(out) + + dockerCmd(c, "create", fmt.Sprintf("--ipc=container:%s", id), "busybox") +} + +func (s *DockerSuite) TestCreateByImageID(c *check.C) { + imageName := "testcreatebyimageid" + buildImageSuccessfully(c, imageName, build.WithDockerfile(`FROM busybox + MAINTAINER dockerio`)) + imageID := getIDByName(c, imageName) + truncatedImageID := stringid.TruncateID(imageID) + + dockerCmd(c, "create", imageID) + dockerCmd(c, "create", truncatedImageID) + dockerCmd(c, "create", fmt.Sprintf("%s:%s", imageName, truncatedImageID)) + + // Ensure this fails + out, exit, _ := dockerCmdWithError("create", fmt.Sprintf("%s:%s", imageName, imageID)) + if exit == 0 { + c.Fatalf("expected non-zero exit code; received %d", exit) + } + + if expected := "invalid reference format"; !strings.Contains(out, expected) { + c.Fatalf(`Expected %q in output; got: %s`, expected, out) + } + + out, exit, _ = dockerCmdWithError("create", fmt.Sprintf("%s:%s", "wrongimage", truncatedImageID)) + if exit == 0 { + c.Fatalf("expected non-zero exit code; received %d", exit) + } + + if expected := "Unable to find image"; !strings.Contains(out, expected) { + c.Fatalf(`Expected %q in output; got: %s`, expected, out) + } +} + +func (s *DockerTrustSuite) TestTrustedCreate(c *check.C) { + repoName := s.setupTrustedImage(c, "trusted-create") + + // Try create + cli.Docker(cli.Args("create", repoName), trustedCmd).Assert(c, SuccessTagging) + cli.DockerCmd(c, "rmi", repoName) + + // Try untrusted create to ensure we pushed the tag to the registry + cli.Docker(cli.Args("create", "--disable-content-trust=true", repoName)).Assert(c, SuccessDownloadedOnStderr) +} + +func (s *DockerTrustSuite) TestUntrustedCreate(c *check.C) { + repoName := fmt.Sprintf("%v/dockercliuntrusted/createtest", privateRegistryURL) + withTagName := fmt.Sprintf("%s:latest", repoName) + // tag the image and upload it to the private registry + cli.DockerCmd(c, "tag", "busybox", withTagName) + cli.DockerCmd(c, "push", withTagName) + cli.DockerCmd(c, "rmi", withTagName) + + // Try trusted create on untrusted tag + cli.Docker(cli.Args("create", withTagName), trustedCmd).Assert(c, icmd.Expected{ + ExitCode: 1, + Err: fmt.Sprintf("does not have trust data for %s", repoName), + }) +} + +func (s *DockerTrustSuite) TestTrustedIsolatedCreate(c *check.C) { + repoName := s.setupTrustedImage(c, "trusted-isolated-create") + + // Try create + cli.Docker(cli.Args("--config", "/tmp/docker-isolated-create", "create", repoName), trustedCmd).Assert(c, SuccessTagging) + defer os.RemoveAll("/tmp/docker-isolated-create") + + cli.DockerCmd(c, "rmi", repoName) +} + +func (s *DockerTrustSuite) TestTrustedCreateFromBadTrustServer(c *check.C) { + repoName := fmt.Sprintf("%v/dockerclievilcreate/trusted:latest", privateRegistryURL) + evilLocalConfigDir, err := ioutil.TempDir("", "evilcreate-local-config-dir") + c.Assert(err, check.IsNil) + + // tag the image and upload it to the private registry + cli.DockerCmd(c, "tag", "busybox", repoName) + cli.Docker(cli.Args("push", repoName), trustedCmd).Assert(c, SuccessSigningAndPushing) + cli.DockerCmd(c, "rmi", repoName) + + // Try create + cli.Docker(cli.Args("create", repoName), trustedCmd).Assert(c, SuccessTagging) + cli.DockerCmd(c, "rmi", repoName) + + // Kill the notary server, start a new "evil" one. + s.not.Close() + s.not, err = newTestNotary(c) + c.Assert(err, check.IsNil) + + // In order to make an evil server, lets re-init a client (with a different trust dir) and push new data. + // tag an image and upload it to the private registry + cli.DockerCmd(c, "--config", evilLocalConfigDir, "tag", "busybox", repoName) + + // Push up to the new server + cli.Docker(cli.Args("--config", evilLocalConfigDir, "push", repoName), trustedCmd).Assert(c, SuccessSigningAndPushing) + + // Now, try creating with the original client from this new trust server. This should fail because the new root is invalid. + cli.Docker(cli.Args("create", repoName), trustedCmd).Assert(c, icmd.Expected{ + ExitCode: 1, + Err: "could not rotate trust to a new trusted root", + }) +} + +func (s *DockerSuite) TestCreateStopSignal(c *check.C) { + name := "test_create_stop_signal" + dockerCmd(c, "create", "--name", name, "--stop-signal", "9", "busybox") + + res := inspectFieldJSON(c, name, "Config.StopSignal") + c.Assert(res, checker.Contains, "9") + +} + +func (s *DockerSuite) TestCreateWithWorkdir(c *check.C) { + name := "foo" + + prefix, slash := getPrefixAndSlashFromDaemonPlatform() + dir := prefix + slash + "home" + slash + "foo" + slash + "bar" + + dockerCmd(c, "create", "--name", name, "-w", dir, "busybox") + // Windows does not create the workdir until the container is started + if testEnv.DaemonPlatform() == "windows" { + dockerCmd(c, "start", name) + } + dockerCmd(c, "cp", fmt.Sprintf("%s:%s", name, dir), prefix+slash+"tmp") +} + +func (s *DockerSuite) TestCreateWithInvalidLogOpts(c *check.C) { + name := "test-invalidate-log-opts" + out, _, err := dockerCmdWithError("create", "--name", name, "--log-opt", "invalid=true", "busybox") + c.Assert(err, checker.NotNil) + c.Assert(out, checker.Contains, "unknown log opt") + + out, _ = dockerCmd(c, "ps", "-a") + c.Assert(out, checker.Not(checker.Contains), name) +} + +// #20972 +func (s *DockerSuite) TestCreate64ByteHexID(c *check.C) { + out := inspectField(c, "busybox", "Id") + imageID := strings.TrimPrefix(strings.TrimSpace(string(out)), "sha256:") + + dockerCmd(c, "create", imageID) +} + +// Test case for #23498 +func (s *DockerSuite) TestCreateUnsetEntrypoint(c *check.C) { + name := "test-entrypoint" + dockerfile := `FROM busybox +ADD entrypoint.sh /entrypoint.sh +RUN chmod 755 /entrypoint.sh +ENTRYPOINT ["/entrypoint.sh"] +CMD echo foobar` + + ctx := fakecontext.New(c, "", + fakecontext.WithDockerfile(dockerfile), + fakecontext.WithFiles(map[string]string{ + "entrypoint.sh": `#!/bin/sh +echo "I am an entrypoint" +exec "$@"`, + })) + defer ctx.Close() + + cli.BuildCmd(c, name, build.WithExternalBuildContext(ctx)) + + out := cli.DockerCmd(c, "create", "--entrypoint=", name, "echo", "foo").Combined() + id := strings.TrimSpace(out) + c.Assert(id, check.Not(check.Equals), "") + out = cli.DockerCmd(c, "start", "-a", id).Combined() + c.Assert(strings.TrimSpace(out), check.Equals, "foo") +} + +// #22471 +func (s *DockerSuite) TestCreateStopTimeout(c *check.C) { + name1 := "test_create_stop_timeout_1" + dockerCmd(c, "create", "--name", name1, "--stop-timeout", "15", "busybox") + + res := inspectFieldJSON(c, name1, "Config.StopTimeout") + c.Assert(res, checker.Contains, "15") + + name2 := "test_create_stop_timeout_2" + dockerCmd(c, "create", "--name", name2, "busybox") + + res = inspectFieldJSON(c, name2, "Config.StopTimeout") + c.Assert(res, checker.Contains, "null") +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_create_unix_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_create_unix_test.go new file mode 100644 index 000000000..1b0bb4a3d --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_create_unix_test.go @@ -0,0 +1,43 @@ +// +build !windows + +package main + +import ( + "strings" + + "github.com/go-check/check" +) + +// Test case for #30166 (target was not validated) +func (s *DockerSuite) TestCreateTmpfsMountsTarget(c *check.C) { + testRequires(c, DaemonIsLinux) + type testCase struct { + target string + expectedError string + } + cases := []testCase{ + { + target: ".", + expectedError: "mount path must be absolute", + }, + { + target: "foo", + expectedError: "mount path must be absolute", + }, + { + target: "/", + expectedError: "destination can't be '/'", + }, + { + target: "//", + expectedError: "destination can't be '/'", + }, + } + for _, x := range cases { + out, _, _ := dockerCmdWithError("create", "--tmpfs", x.target, "busybox", "sh") + if x.expectedError != "" && !strings.Contains(out, x.expectedError) { + c.Fatalf("mounting tmpfs over %q should fail with %q, but got %q", + x.target, x.expectedError, out) + } + } +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_daemon_plugins_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_daemon_plugins_test.go new file mode 100644 index 000000000..66c9f6ece --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_daemon_plugins_test.go @@ -0,0 +1,369 @@ +// +build linux + +package main + +import ( + "os" + "path/filepath" + "strings" + + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/pkg/mount" + icmd "github.com/docker/docker/pkg/testutil/cmd" + "github.com/go-check/check" + "golang.org/x/sys/unix" +) + +// TestDaemonRestartWithPluginEnabled tests state restore for an enabled plugin +func (s *DockerDaemonSuite) TestDaemonRestartWithPluginEnabled(c *check.C) { + testRequires(c, IsAmd64, Network) + + s.d.Start(c) + + if out, err := s.d.Cmd("plugin", "install", "--grant-all-permissions", pName); err != nil { + c.Fatalf("Could not install plugin: %v %s", err, out) + } + + defer func() { + if out, err := s.d.Cmd("plugin", "disable", pName); err != nil { + c.Fatalf("Could not disable plugin: %v %s", err, out) + } + if out, err := s.d.Cmd("plugin", "remove", pName); err != nil { + c.Fatalf("Could not remove plugin: %v %s", err, out) + } + }() + + s.d.Restart(c) + + out, err := s.d.Cmd("plugin", "ls") + if err != nil { + c.Fatalf("Could not list plugins: %v %s", err, out) + } + c.Assert(out, checker.Contains, pName) + c.Assert(out, checker.Contains, "true") +} + +// TestDaemonRestartWithPluginDisabled tests state restore for a disabled plugin +func (s *DockerDaemonSuite) TestDaemonRestartWithPluginDisabled(c *check.C) { + testRequires(c, IsAmd64, Network) + + s.d.Start(c) + + if out, err := s.d.Cmd("plugin", "install", "--grant-all-permissions", pName, "--disable"); err != nil { + c.Fatalf("Could not install plugin: %v %s", err, out) + } + + defer func() { + if out, err := s.d.Cmd("plugin", "remove", pName); err != nil { + c.Fatalf("Could not remove plugin: %v %s", err, out) + } + }() + + s.d.Restart(c) + + out, err := s.d.Cmd("plugin", "ls") + if err != nil { + c.Fatalf("Could not list plugins: %v %s", err, out) + } + c.Assert(out, checker.Contains, pName) + c.Assert(out, checker.Contains, "false") +} + +// TestDaemonKillLiveRestoreWithPlugins SIGKILLs daemon started with --live-restore. +// Plugins should continue to run. +func (s *DockerDaemonSuite) TestDaemonKillLiveRestoreWithPlugins(c *check.C) { + testRequires(c, IsAmd64, Network) + + s.d.Start(c, "--live-restore") + if out, err := s.d.Cmd("plugin", "install", "--grant-all-permissions", pName); err != nil { + c.Fatalf("Could not install plugin: %v %s", err, out) + } + defer func() { + s.d.Restart(c, "--live-restore") + if out, err := s.d.Cmd("plugin", "disable", pName); err != nil { + c.Fatalf("Could not disable plugin: %v %s", err, out) + } + if out, err := s.d.Cmd("plugin", "remove", pName); err != nil { + c.Fatalf("Could not remove plugin: %v %s", err, out) + } + }() + + if err := s.d.Kill(); err != nil { + c.Fatalf("Could not kill daemon: %v", err) + } + + icmd.RunCommand("pgrep", "-f", pluginProcessName).Assert(c, icmd.Success) +} + +// TestDaemonShutdownLiveRestoreWithPlugins SIGTERMs daemon started with --live-restore. +// Plugins should continue to run. +func (s *DockerDaemonSuite) TestDaemonShutdownLiveRestoreWithPlugins(c *check.C) { + testRequires(c, IsAmd64, Network) + + s.d.Start(c, "--live-restore") + if out, err := s.d.Cmd("plugin", "install", "--grant-all-permissions", pName); err != nil { + c.Fatalf("Could not install plugin: %v %s", err, out) + } + defer func() { + s.d.Restart(c, "--live-restore") + if out, err := s.d.Cmd("plugin", "disable", pName); err != nil { + c.Fatalf("Could not disable plugin: %v %s", err, out) + } + if out, err := s.d.Cmd("plugin", "remove", pName); err != nil { + c.Fatalf("Could not remove plugin: %v %s", err, out) + } + }() + + if err := s.d.Interrupt(); err != nil { + c.Fatalf("Could not kill daemon: %v", err) + } + + icmd.RunCommand("pgrep", "-f", pluginProcessName).Assert(c, icmd.Success) +} + +// TestDaemonShutdownWithPlugins shuts down running plugins. +func (s *DockerDaemonSuite) TestDaemonShutdownWithPlugins(c *check.C) { + testRequires(c, IsAmd64, Network, SameHostDaemon) + + s.d.Start(c) + if out, err := s.d.Cmd("plugin", "install", "--grant-all-permissions", pName); err != nil { + c.Fatalf("Could not install plugin: %v %s", err, out) + } + + defer func() { + s.d.Restart(c) + if out, err := s.d.Cmd("plugin", "disable", pName); err != nil { + c.Fatalf("Could not disable plugin: %v %s", err, out) + } + if out, err := s.d.Cmd("plugin", "remove", pName); err != nil { + c.Fatalf("Could not remove plugin: %v %s", err, out) + } + }() + + if err := s.d.Interrupt(); err != nil { + c.Fatalf("Could not kill daemon: %v", err) + } + + for { + if err := unix.Kill(s.d.Pid(), 0); err == unix.ESRCH { + break + } + } + + icmd.RunCommand("pgrep", "-f", pluginProcessName).Assert(c, icmd.Expected{ + ExitCode: 1, + Error: "exit status 1", + }) + + s.d.Start(c) + icmd.RunCommand("pgrep", "-f", pluginProcessName).Assert(c, icmd.Success) +} + +// TestDaemonKillWithPlugins leaves plugins running. +func (s *DockerDaemonSuite) TestDaemonKillWithPlugins(c *check.C) { + testRequires(c, IsAmd64, Network, SameHostDaemon) + + s.d.Start(c) + if out, err := s.d.Cmd("plugin", "install", "--grant-all-permissions", pName); err != nil { + c.Fatalf("Could not install plugin: %v %s", err, out) + } + + defer func() { + s.d.Restart(c) + if out, err := s.d.Cmd("plugin", "disable", pName); err != nil { + c.Fatalf("Could not disable plugin: %v %s", err, out) + } + if out, err := s.d.Cmd("plugin", "remove", pName); err != nil { + c.Fatalf("Could not remove plugin: %v %s", err, out) + } + }() + + if err := s.d.Kill(); err != nil { + c.Fatalf("Could not kill daemon: %v", err) + } + + // assert that plugins are running. + icmd.RunCommand("pgrep", "-f", pluginProcessName).Assert(c, icmd.Success) +} + +// TestVolumePlugin tests volume creation using a plugin. +func (s *DockerDaemonSuite) TestVolumePlugin(c *check.C) { + testRequires(c, IsAmd64, Network) + + volName := "plugin-volume" + destDir := "/tmp/data/" + destFile := "foo" + + s.d.Start(c) + out, err := s.d.Cmd("plugin", "install", pName, "--grant-all-permissions") + if err != nil { + c.Fatalf("Could not install plugin: %v %s", err, out) + } + pluginID, err := s.d.Cmd("plugin", "inspect", "-f", "{{.Id}}", pName) + pluginID = strings.TrimSpace(pluginID) + if err != nil { + c.Fatalf("Could not retrieve plugin ID: %v %s", err, pluginID) + } + mountpointPrefix := filepath.Join(s.d.RootDir(), "plugins", pluginID, "rootfs") + defer func() { + if out, err := s.d.Cmd("plugin", "disable", pName); err != nil { + c.Fatalf("Could not disable plugin: %v %s", err, out) + } + + if out, err := s.d.Cmd("plugin", "remove", pName); err != nil { + c.Fatalf("Could not remove plugin: %v %s", err, out) + } + + exists, err := existsMountpointWithPrefix(mountpointPrefix) + c.Assert(err, checker.IsNil) + c.Assert(exists, checker.Equals, false) + + }() + + out, err = s.d.Cmd("volume", "create", "-d", pName, volName) + if err != nil { + c.Fatalf("Could not create volume: %v %s", err, out) + } + defer func() { + if out, err := s.d.Cmd("volume", "remove", volName); err != nil { + c.Fatalf("Could not remove volume: %v %s", err, out) + } + }() + + out, err = s.d.Cmd("volume", "ls") + if err != nil { + c.Fatalf("Could not list volume: %v %s", err, out) + } + c.Assert(out, checker.Contains, volName) + c.Assert(out, checker.Contains, pName) + + mountPoint, err := s.d.Cmd("volume", "inspect", volName, "--format", "{{.Mountpoint}}") + if err != nil { + c.Fatalf("Could not inspect volume: %v %s", err, mountPoint) + } + mountPoint = strings.TrimSpace(mountPoint) + + out, err = s.d.Cmd("run", "--rm", "-v", volName+":"+destDir, "busybox", "touch", destDir+destFile) + c.Assert(err, checker.IsNil, check.Commentf(out)) + path := filepath.Join(s.d.RootDir(), "plugins", pluginID, "rootfs", mountPoint, destFile) + _, err = os.Lstat(path) + c.Assert(err, checker.IsNil) + + exists, err := existsMountpointWithPrefix(mountpointPrefix) + c.Assert(err, checker.IsNil) + c.Assert(exists, checker.Equals, true) +} + +func (s *DockerDaemonSuite) TestGraphdriverPlugin(c *check.C) { + testRequires(c, Network, IsAmd64, DaemonIsLinux, overlay2Supported, ExperimentalDaemon) + + s.d.Start(c) + + // install the plugin + plugin := "cpuguy83/docker-overlay2-graphdriver-plugin" + out, err := s.d.Cmd("plugin", "install", "--grant-all-permissions", plugin) + c.Assert(err, checker.IsNil, check.Commentf(out)) + + // restart the daemon with the plugin set as the storage driver + s.d.Restart(c, "-s", plugin, "--storage-opt", "overlay2.override_kernel_check=1") + + // run a container + out, err = s.d.Cmd("run", "--rm", "busybox", "true") // this will pull busybox using the plugin + c.Assert(err, checker.IsNil, check.Commentf(out)) +} + +func (s *DockerDaemonSuite) TestPluginVolumeRemoveOnRestart(c *check.C) { + testRequires(c, DaemonIsLinux, Network, IsAmd64) + + s.d.Start(c, "--live-restore=true") + + out, err := s.d.Cmd("plugin", "install", "--grant-all-permissions", pName) + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(strings.TrimSpace(out), checker.Contains, pName) + + out, err = s.d.Cmd("volume", "create", "--driver", pName, "test") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + s.d.Restart(c, "--live-restore=true") + + out, err = s.d.Cmd("plugin", "disable", pName) + c.Assert(err, checker.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "in use") + + out, err = s.d.Cmd("volume", "rm", "test") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + out, err = s.d.Cmd("plugin", "disable", pName) + c.Assert(err, checker.IsNil, check.Commentf(out)) + + out, err = s.d.Cmd("plugin", "rm", pName) + c.Assert(err, checker.IsNil, check.Commentf(out)) +} + +func existsMountpointWithPrefix(mountpointPrefix string) (bool, error) { + mounts, err := mount.GetMounts() + if err != nil { + return false, err + } + for _, mnt := range mounts { + if strings.HasPrefix(mnt.Mountpoint, mountpointPrefix) { + return true, nil + } + } + return false, nil +} + +func (s *DockerDaemonSuite) TestPluginListFilterEnabled(c *check.C) { + testRequires(c, IsAmd64, Network) + + s.d.Start(c) + + out, err := s.d.Cmd("plugin", "install", "--grant-all-permissions", pNameWithTag, "--disable") + c.Assert(err, check.IsNil, check.Commentf(out)) + + defer func() { + if out, err := s.d.Cmd("plugin", "remove", pNameWithTag); err != nil { + c.Fatalf("Could not remove plugin: %v %s", err, out) + } + }() + + out, err = s.d.Cmd("plugin", "ls", "--filter", "enabled=true") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Not(checker.Contains), pName) + + out, err = s.d.Cmd("plugin", "ls", "--filter", "enabled=false") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, pName) + c.Assert(out, checker.Contains, "false") + + out, err = s.d.Cmd("plugin", "ls") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, pName) +} + +func (s *DockerDaemonSuite) TestPluginListFilterCapability(c *check.C) { + testRequires(c, IsAmd64, Network) + + s.d.Start(c) + + out, err := s.d.Cmd("plugin", "install", "--grant-all-permissions", pNameWithTag, "--disable") + c.Assert(err, check.IsNil, check.Commentf(out)) + + defer func() { + if out, err := s.d.Cmd("plugin", "remove", pNameWithTag); err != nil { + c.Fatalf("Could not remove plugin: %v %s", err, out) + } + }() + + out, err = s.d.Cmd("plugin", "ls", "--filter", "capability=volumedriver") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, pName) + + out, err = s.d.Cmd("plugin", "ls", "--filter", "capability=authz") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Not(checker.Contains), pName) + + out, err = s.d.Cmd("plugin", "ls") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, pName) +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_daemon_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_daemon_test.go new file mode 100644 index 000000000..6a98cabdf --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_daemon_test.go @@ -0,0 +1,3028 @@ +// +build linux + +package main + +import ( + "bufio" + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net" + "os" + "os/exec" + "path" + "path/filepath" + "regexp" + "strconv" + "strings" + "sync" + "time" + + "crypto/tls" + "crypto/x509" + + "github.com/cloudflare/cfssl/helpers" + "github.com/docker/docker/api" + "github.com/docker/docker/api/types" + "github.com/docker/docker/client" + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/cli" + "github.com/docker/docker/integration-cli/daemon" + "github.com/docker/docker/opts" + "github.com/docker/docker/pkg/mount" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/pkg/testutil" + icmd "github.com/docker/docker/pkg/testutil/cmd" + units "github.com/docker/go-units" + "github.com/docker/libnetwork/iptables" + "github.com/docker/libtrust" + "github.com/go-check/check" + "github.com/kr/pty" + "golang.org/x/sys/unix" +) + +// TestLegacyDaemonCommand test starting docker daemon using "deprecated" docker daemon +// command. Remove this test when we remove this. +func (s *DockerDaemonSuite) TestLegacyDaemonCommand(c *check.C) { + cmd := exec.Command(dockerBinary, "daemon", "--storage-driver=vfs", "--debug") + err := cmd.Start() + c.Assert(err, checker.IsNil, check.Commentf("could not start daemon using 'docker daemon'")) + + c.Assert(cmd.Process.Kill(), checker.IsNil) +} + +func (s *DockerDaemonSuite) TestDaemonRestartWithRunningContainersPorts(c *check.C) { + s.d.StartWithBusybox(c) + + cli.Docker( + cli.Args("run", "-d", "--name", "top1", "-p", "1234:80", "--restart", "always", "busybox:latest", "top"), + cli.Daemon(s.d), + ).Assert(c, icmd.Success) + + cli.Docker( + cli.Args("run", "-d", "--name", "top2", "-p", "80", "busybox:latest", "top"), + cli.Daemon(s.d), + ).Assert(c, icmd.Success) + + testRun := func(m map[string]bool, prefix string) { + var format string + for cont, shouldRun := range m { + out := cli.Docker(cli.Args("ps"), cli.Daemon(s.d)).Assert(c, icmd.Success).Combined() + if shouldRun { + format = "%scontainer %q is not running" + } else { + format = "%scontainer %q is running" + } + if shouldRun != strings.Contains(out, cont) { + c.Fatalf(format, prefix, cont) + } + } + } + + testRun(map[string]bool{"top1": true, "top2": true}, "") + + s.d.Restart(c) + testRun(map[string]bool{"top1": true, "top2": false}, "After daemon restart: ") +} + +func (s *DockerDaemonSuite) TestDaemonRestartWithVolumesRefs(c *check.C) { + s.d.StartWithBusybox(c) + + if out, err := s.d.Cmd("run", "--name", "volrestarttest1", "-v", "/foo", "busybox"); err != nil { + c.Fatal(err, out) + } + + s.d.Restart(c) + + if out, err := s.d.Cmd("run", "-d", "--volumes-from", "volrestarttest1", "--name", "volrestarttest2", "busybox", "top"); err != nil { + c.Fatal(err, out) + } + + if out, err := s.d.Cmd("rm", "-fv", "volrestarttest2"); err != nil { + c.Fatal(err, out) + } + + out, err := s.d.Cmd("inspect", "-f", "{{json .Mounts}}", "volrestarttest1") + c.Assert(err, check.IsNil) + + if _, err := inspectMountPointJSON(out, "/foo"); err != nil { + c.Fatalf("Expected volume to exist: /foo, error: %v\n", err) + } +} + +// #11008 +func (s *DockerDaemonSuite) TestDaemonRestartUnlessStopped(c *check.C) { + s.d.StartWithBusybox(c) + + out, err := s.d.Cmd("run", "-d", "--name", "top1", "--restart", "always", "busybox:latest", "top") + c.Assert(err, check.IsNil, check.Commentf("run top1: %v", out)) + + out, err = s.d.Cmd("run", "-d", "--name", "top2", "--restart", "unless-stopped", "busybox:latest", "top") + c.Assert(err, check.IsNil, check.Commentf("run top2: %v", out)) + + testRun := func(m map[string]bool, prefix string) { + var format string + for name, shouldRun := range m { + out, err := s.d.Cmd("ps") + c.Assert(err, check.IsNil, check.Commentf("run ps: %v", out)) + if shouldRun { + format = "%scontainer %q is not running" + } else { + format = "%scontainer %q is running" + } + c.Assert(strings.Contains(out, name), check.Equals, shouldRun, check.Commentf(format, prefix, name)) + } + } + + // both running + testRun(map[string]bool{"top1": true, "top2": true}, "") + + out, err = s.d.Cmd("stop", "top1") + c.Assert(err, check.IsNil, check.Commentf(out)) + + out, err = s.d.Cmd("stop", "top2") + c.Assert(err, check.IsNil, check.Commentf(out)) + + // both stopped + testRun(map[string]bool{"top1": false, "top2": false}, "") + + s.d.Restart(c) + + // restart=always running + testRun(map[string]bool{"top1": true, "top2": false}, "After daemon restart: ") + + out, err = s.d.Cmd("start", "top2") + c.Assert(err, check.IsNil, check.Commentf("start top2: %v", out)) + + s.d.Restart(c) + + // both running + testRun(map[string]bool{"top1": true, "top2": true}, "After second daemon restart: ") + +} + +func (s *DockerDaemonSuite) TestDaemonRestartOnFailure(c *check.C) { + s.d.StartWithBusybox(c) + + out, err := s.d.Cmd("run", "-d", "--name", "test1", "--restart", "on-failure:3", "busybox:latest", "false") + c.Assert(err, check.IsNil, check.Commentf("run top1: %v", out)) + + // wait test1 to stop + hostArgs := []string{"--host", s.d.Sock()} + err = waitInspectWithArgs("test1", "{{.State.Running}} {{.State.Restarting}}", "false false", 10*time.Second, hostArgs...) + c.Assert(err, checker.IsNil, check.Commentf("test1 should exit but not")) + + // record last start time + out, err = s.d.Cmd("inspect", "-f={{.State.StartedAt}}", "test1") + c.Assert(err, checker.IsNil, check.Commentf("out: %v", out)) + lastStartTime := out + + s.d.Restart(c) + + // test1 shouldn't restart at all + err = waitInspectWithArgs("test1", "{{.State.Running}} {{.State.Restarting}}", "false false", 0, hostArgs...) + c.Assert(err, checker.IsNil, check.Commentf("test1 should exit but not")) + + // make sure test1 isn't restarted when daemon restart + // if "StartAt" time updates, means test1 was once restarted. + out, err = s.d.Cmd("inspect", "-f={{.State.StartedAt}}", "test1") + c.Assert(err, checker.IsNil, check.Commentf("out: %v", out)) + c.Assert(out, checker.Equals, lastStartTime, check.Commentf("test1 shouldn't start after daemon restarts")) +} + +func (s *DockerDaemonSuite) TestDaemonStartIptablesFalse(c *check.C) { + s.d.Start(c, "--iptables=false") +} + +// Make sure we cannot shrink base device at daemon restart. +func (s *DockerDaemonSuite) TestDaemonRestartWithInvalidBasesize(c *check.C) { + testRequires(c, Devicemapper) + s.d.Start(c) + + oldBasesizeBytes := s.d.GetBaseDeviceSize(c) + var newBasesizeBytes int64 = 1073741824 //1GB in bytes + + if newBasesizeBytes < oldBasesizeBytes { + err := s.d.RestartWithError("--storage-opt", fmt.Sprintf("dm.basesize=%d", newBasesizeBytes)) + c.Assert(err, check.NotNil, check.Commentf("daemon should not have started as new base device size is less than existing base device size: %v", err)) + // 'err != nil' is expected behaviour, no new daemon started, + // so no need to stop daemon. + if err != nil { + return + } + } + s.d.Stop(c) +} + +// Make sure we can grow base device at daemon restart. +func (s *DockerDaemonSuite) TestDaemonRestartWithIncreasedBasesize(c *check.C) { + testRequires(c, Devicemapper) + s.d.Start(c) + + oldBasesizeBytes := s.d.GetBaseDeviceSize(c) + + var newBasesizeBytes int64 = 53687091200 //50GB in bytes + + if newBasesizeBytes < oldBasesizeBytes { + c.Skip(fmt.Sprintf("New base device size (%v) must be greater than (%s)", units.HumanSize(float64(newBasesizeBytes)), units.HumanSize(float64(oldBasesizeBytes)))) + } + + err := s.d.RestartWithError("--storage-opt", fmt.Sprintf("dm.basesize=%d", newBasesizeBytes)) + c.Assert(err, check.IsNil, check.Commentf("we should have been able to start the daemon with increased base device size: %v", err)) + + basesizeAfterRestart := s.d.GetBaseDeviceSize(c) + newBasesize, err := convertBasesize(newBasesizeBytes) + c.Assert(err, check.IsNil, check.Commentf("Error in converting base device size: %v", err)) + c.Assert(newBasesize, check.Equals, basesizeAfterRestart, check.Commentf("Basesize passed is not equal to Basesize set")) + s.d.Stop(c) +} + +func convertBasesize(basesizeBytes int64) (int64, error) { + basesize := units.HumanSize(float64(basesizeBytes)) + basesize = strings.Trim(basesize, " ")[:len(basesize)-3] + basesizeFloat, err := strconv.ParseFloat(strings.Trim(basesize, " "), 64) + if err != nil { + return 0, err + } + return int64(basesizeFloat) * 1024 * 1024 * 1024, nil +} + +// Issue #8444: If docker0 bridge is modified (intentionally or unintentionally) and +// no longer has an IP associated, we should gracefully handle that case and associate +// an IP with it rather than fail daemon start +func (s *DockerDaemonSuite) TestDaemonStartBridgeWithoutIPAssociation(c *check.C) { + // rather than depending on brctl commands to verify docker0 is created and up + // let's start the daemon and stop it, and then make a modification to run the + // actual test + s.d.Start(c) + s.d.Stop(c) + + // now we will remove the ip from docker0 and then try starting the daemon + icmd.RunCommand("ip", "addr", "flush", "dev", "docker0").Assert(c, icmd.Success) + + if err := s.d.StartWithError(); err != nil { + warning := "**WARNING: Docker bridge network in bad state--delete docker0 bridge interface to fix" + c.Fatalf("Could not start daemon when docker0 has no IP address: %v\n%s", err, warning) + } +} + +func (s *DockerDaemonSuite) TestDaemonIptablesClean(c *check.C) { + s.d.StartWithBusybox(c) + + if out, err := s.d.Cmd("run", "-d", "--name", "top", "-p", "80", "busybox:latest", "top"); err != nil { + c.Fatalf("Could not run top: %s, %v", out, err) + } + + ipTablesSearchString := "tcp dpt:80" + + // get output from iptables with container running + verifyIPTablesContains(c, ipTablesSearchString) + + s.d.Stop(c) + + // get output from iptables after restart + verifyIPTablesDoesNotContains(c, ipTablesSearchString) +} + +func (s *DockerDaemonSuite) TestDaemonIptablesCreate(c *check.C) { + s.d.StartWithBusybox(c) + + if out, err := s.d.Cmd("run", "-d", "--name", "top", "--restart=always", "-p", "80", "busybox:latest", "top"); err != nil { + c.Fatalf("Could not run top: %s, %v", out, err) + } + + // get output from iptables with container running + ipTablesSearchString := "tcp dpt:80" + verifyIPTablesContains(c, ipTablesSearchString) + + s.d.Restart(c) + + // make sure the container is not running + runningOut, err := s.d.Cmd("inspect", "--format={{.State.Running}}", "top") + if err != nil { + c.Fatalf("Could not inspect on container: %s, %v", runningOut, err) + } + if strings.TrimSpace(runningOut) != "true" { + c.Fatalf("Container should have been restarted after daemon restart. Status running should have been true but was: %q", strings.TrimSpace(runningOut)) + } + + // get output from iptables after restart + verifyIPTablesContains(c, ipTablesSearchString) +} + +func verifyIPTablesContains(c *check.C, ipTablesSearchString string) { + result := icmd.RunCommand("iptables", "-nvL") + result.Assert(c, icmd.Success) + if !strings.Contains(result.Combined(), ipTablesSearchString) { + c.Fatalf("iptables output should have contained %q, but was %q", ipTablesSearchString, result.Combined()) + } +} + +func verifyIPTablesDoesNotContains(c *check.C, ipTablesSearchString string) { + result := icmd.RunCommand("iptables", "-nvL") + result.Assert(c, icmd.Success) + if strings.Contains(result.Combined(), ipTablesSearchString) { + c.Fatalf("iptables output should not have contained %q, but was %q", ipTablesSearchString, result.Combined()) + } +} + +// TestDaemonIPv6Enabled checks that when the daemon is started with --ipv6=true that the docker0 bridge +// has the fe80::1 address and that a container is assigned a link-local address +func (s *DockerDaemonSuite) TestDaemonIPv6Enabled(c *check.C) { + testRequires(c, IPv6) + + setupV6(c) + defer teardownV6(c) + + s.d.StartWithBusybox(c, "--ipv6") + + iface, err := net.InterfaceByName("docker0") + if err != nil { + c.Fatalf("Error getting docker0 interface: %v", err) + } + + addrs, err := iface.Addrs() + if err != nil { + c.Fatalf("Error getting addresses for docker0 interface: %v", err) + } + + var found bool + expected := "fe80::1/64" + + for i := range addrs { + if addrs[i].String() == expected { + found = true + break + } + } + + if !found { + c.Fatalf("Bridge does not have an IPv6 Address") + } + + if out, err := s.d.Cmd("run", "-itd", "--name=ipv6test", "busybox:latest"); err != nil { + c.Fatalf("Could not run container: %s, %v", out, err) + } + + out, err := s.d.Cmd("inspect", "--format", "'{{.NetworkSettings.Networks.bridge.LinkLocalIPv6Address}}'", "ipv6test") + out = strings.Trim(out, " \r\n'") + + if err != nil { + c.Fatalf("Error inspecting container: %s, %v", out, err) + } + + if ip := net.ParseIP(out); ip == nil { + c.Fatalf("Container should have a link-local IPv6 address") + } + + out, err = s.d.Cmd("inspect", "--format", "'{{.NetworkSettings.Networks.bridge.GlobalIPv6Address}}'", "ipv6test") + out = strings.Trim(out, " \r\n'") + + if err != nil { + c.Fatalf("Error inspecting container: %s, %v", out, err) + } + + if ip := net.ParseIP(out); ip != nil { + c.Fatalf("Container should not have a global IPv6 address: %v", out) + } +} + +// TestDaemonIPv6FixedCIDR checks that when the daemon is started with --ipv6=true and a fixed CIDR +// that running containers are given a link-local and global IPv6 address +func (s *DockerDaemonSuite) TestDaemonIPv6FixedCIDR(c *check.C) { + // IPv6 setup is messing with local bridge address. + testRequires(c, SameHostDaemon) + // Delete the docker0 bridge if its left around from previous daemon. It has to be recreated with + // ipv6 enabled + deleteInterface(c, "docker0") + + s.d.StartWithBusybox(c, "--ipv6", "--fixed-cidr-v6=2001:db8:2::/64", "--default-gateway-v6=2001:db8:2::100") + + out, err := s.d.Cmd("run", "-itd", "--name=ipv6test", "busybox:latest") + c.Assert(err, checker.IsNil, check.Commentf("Could not run container: %s, %v", out, err)) + + out, err = s.d.Cmd("inspect", "--format", "{{.NetworkSettings.Networks.bridge.GlobalIPv6Address}}", "ipv6test") + out = strings.Trim(out, " \r\n'") + + c.Assert(err, checker.IsNil, check.Commentf(out)) + + ip := net.ParseIP(out) + c.Assert(ip, checker.NotNil, check.Commentf("Container should have a global IPv6 address")) + + out, err = s.d.Cmd("inspect", "--format", "{{.NetworkSettings.Networks.bridge.IPv6Gateway}}", "ipv6test") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + c.Assert(strings.Trim(out, " \r\n'"), checker.Equals, "2001:db8:2::100", check.Commentf("Container should have a global IPv6 gateway")) +} + +// TestDaemonIPv6FixedCIDRAndMac checks that when the daemon is started with ipv6 fixed CIDR +// the running containers are given an IPv6 address derived from the MAC address and the ipv6 fixed CIDR +func (s *DockerDaemonSuite) TestDaemonIPv6FixedCIDRAndMac(c *check.C) { + // IPv6 setup is messing with local bridge address. + testRequires(c, SameHostDaemon) + // Delete the docker0 bridge if its left around from previous daemon. It has to be recreated with + // ipv6 enabled + deleteInterface(c, "docker0") + + s.d.StartWithBusybox(c, "--ipv6", "--fixed-cidr-v6=2001:db8:1::/64") + + out, err := s.d.Cmd("run", "-itd", "--name=ipv6test", "--mac-address", "AA:BB:CC:DD:EE:FF", "busybox") + c.Assert(err, checker.IsNil) + + out, err = s.d.Cmd("inspect", "--format", "{{.NetworkSettings.Networks.bridge.GlobalIPv6Address}}", "ipv6test") + c.Assert(err, checker.IsNil) + c.Assert(strings.Trim(out, " \r\n'"), checker.Equals, "2001:db8:1::aabb:ccdd:eeff") +} + +// TestDaemonIPv6HostMode checks that when the running a container with +// network=host the host ipv6 addresses are not removed +func (s *DockerDaemonSuite) TestDaemonIPv6HostMode(c *check.C) { + testRequires(c, SameHostDaemon) + deleteInterface(c, "docker0") + + s.d.StartWithBusybox(c, "--ipv6", "--fixed-cidr-v6=2001:db8:2::/64") + out, err := s.d.Cmd("run", "-itd", "--name=hostcnt", "--network=host", "busybox:latest") + c.Assert(err, checker.IsNil, check.Commentf("Could not run container: %s, %v", out, err)) + + out, err = s.d.Cmd("exec", "hostcnt", "ip", "-6", "addr", "show", "docker0") + out = strings.Trim(out, " \r\n'") + + c.Assert(out, checker.Contains, "2001:db8:2::1") +} + +func (s *DockerDaemonSuite) TestDaemonLogLevelWrong(c *check.C) { + c.Assert(s.d.StartWithError("--log-level=bogus"), check.NotNil, check.Commentf("Daemon shouldn't start with wrong log level")) +} + +func (s *DockerDaemonSuite) TestDaemonLogLevelDebug(c *check.C) { + s.d.Start(c, "--log-level=debug") + content, err := s.d.ReadLogFile() + c.Assert(err, checker.IsNil) + if !strings.Contains(string(content), `level=debug`) { + c.Fatalf(`Missing level="debug" in log file:\n%s`, string(content)) + } +} + +func (s *DockerDaemonSuite) TestDaemonLogLevelFatal(c *check.C) { + // we creating new daemons to create new logFile + s.d.Start(c, "--log-level=fatal") + content, err := s.d.ReadLogFile() + c.Assert(err, checker.IsNil) + if strings.Contains(string(content), `level=debug`) { + c.Fatalf(`Should not have level="debug" in log file:\n%s`, string(content)) + } +} + +func (s *DockerDaemonSuite) TestDaemonFlagD(c *check.C) { + s.d.Start(c, "-D") + content, err := s.d.ReadLogFile() + c.Assert(err, checker.IsNil) + if !strings.Contains(string(content), `level=debug`) { + c.Fatalf(`Should have level="debug" in log file using -D:\n%s`, string(content)) + } +} + +func (s *DockerDaemonSuite) TestDaemonFlagDebug(c *check.C) { + s.d.Start(c, "--debug") + content, err := s.d.ReadLogFile() + c.Assert(err, checker.IsNil) + if !strings.Contains(string(content), `level=debug`) { + c.Fatalf(`Should have level="debug" in log file using --debug:\n%s`, string(content)) + } +} + +func (s *DockerDaemonSuite) TestDaemonFlagDebugLogLevelFatal(c *check.C) { + s.d.Start(c, "--debug", "--log-level=fatal") + content, err := s.d.ReadLogFile() + c.Assert(err, checker.IsNil) + if !strings.Contains(string(content), `level=debug`) { + c.Fatalf(`Should have level="debug" in log file when using both --debug and --log-level=fatal:\n%s`, string(content)) + } +} + +func (s *DockerDaemonSuite) TestDaemonAllocatesListeningPort(c *check.C) { + listeningPorts := [][]string{ + {"0.0.0.0", "0.0.0.0", "5678"}, + {"127.0.0.1", "127.0.0.1", "1234"}, + {"localhost", "127.0.0.1", "1235"}, + } + + cmdArgs := make([]string, 0, len(listeningPorts)*2) + for _, hostDirective := range listeningPorts { + cmdArgs = append(cmdArgs, "--host", fmt.Sprintf("tcp://%s:%s", hostDirective[0], hostDirective[2])) + } + + s.d.StartWithBusybox(c, cmdArgs...) + + for _, hostDirective := range listeningPorts { + output, err := s.d.Cmd("run", "-p", fmt.Sprintf("%s:%s:80", hostDirective[1], hostDirective[2]), "busybox", "true") + if err == nil { + c.Fatalf("Container should not start, expected port already allocated error: %q", output) + } else if !strings.Contains(output, "port is already allocated") { + c.Fatalf("Expected port is already allocated error: %q", output) + } + } +} + +func (s *DockerDaemonSuite) TestDaemonKeyGeneration(c *check.C) { + // TODO: skip or update for Windows daemon + os.Remove("/etc/docker/key.json") + s.d.Start(c) + s.d.Stop(c) + + k, err := libtrust.LoadKeyFile("/etc/docker/key.json") + if err != nil { + c.Fatalf("Error opening key file") + } + kid := k.KeyID() + // Test Key ID is a valid fingerprint (e.g. QQXN:JY5W:TBXI:MK3X:GX6P:PD5D:F56N:NHCS:LVRZ:JA46:R24J:XEFF) + if len(kid) != 59 { + c.Fatalf("Bad key ID: %s", kid) + } +} + +// GH#11320 - verify that the daemon exits on failure properly +// Note that this explicitly tests the conflict of {-b,--bridge} and {--bip} options as the means +// to get a daemon init failure; no other tests for -b/--bip conflict are therefore required +func (s *DockerDaemonSuite) TestDaemonExitOnFailure(c *check.C) { + //attempt to start daemon with incorrect flags (we know -b and --bip conflict) + if err := s.d.StartWithError("--bridge", "nosuchbridge", "--bip", "1.1.1.1"); err != nil { + //verify we got the right error + if !strings.Contains(err.Error(), "Daemon exited") { + c.Fatalf("Expected daemon not to start, got %v", err) + } + // look in the log and make sure we got the message that daemon is shutting down + icmd.RunCommand("grep", "Error starting daemon", s.d.LogFileName()).Assert(c, icmd.Success) + } else { + //if we didn't get an error and the daemon is running, this is a failure + c.Fatal("Conflicting options should cause the daemon to error out with a failure") + } +} + +func (s *DockerDaemonSuite) TestDaemonBridgeExternal(c *check.C) { + d := s.d + err := d.StartWithError("--bridge", "nosuchbridge") + c.Assert(err, check.NotNil, check.Commentf("--bridge option with an invalid bridge should cause the daemon to fail")) + defer d.Restart(c) + + bridgeName := "external-bridge" + bridgeIP := "192.169.1.1/24" + _, bridgeIPNet, _ := net.ParseCIDR(bridgeIP) + + createInterface(c, "bridge", bridgeName, bridgeIP) + defer deleteInterface(c, bridgeName) + + d.StartWithBusybox(c, "--bridge", bridgeName) + + ipTablesSearchString := bridgeIPNet.String() + icmd.RunCommand("iptables", "-t", "nat", "-nvL").Assert(c, icmd.Expected{ + Out: ipTablesSearchString, + }) + + _, err = d.Cmd("run", "-d", "--name", "ExtContainer", "busybox", "top") + c.Assert(err, check.IsNil) + + containerIP, err := d.FindContainerIP("ExtContainer") + c.Assert(err, checker.IsNil) + ip := net.ParseIP(containerIP) + c.Assert(bridgeIPNet.Contains(ip), check.Equals, true, + check.Commentf("Container IP-Address must be in the same subnet range : %s", + containerIP)) +} + +func (s *DockerDaemonSuite) TestDaemonBridgeNone(c *check.C) { + // start with bridge none + d := s.d + d.StartWithBusybox(c, "--bridge", "none") + defer d.Restart(c) + + // verify docker0 iface is not there + icmd.RunCommand("ifconfig", "docker0").Assert(c, icmd.Expected{ + ExitCode: 1, + Error: "exit status 1", + Err: "Device not found", + }) + + // verify default "bridge" network is not there + out, err := d.Cmd("network", "inspect", "bridge") + c.Assert(err, check.NotNil, check.Commentf("\"bridge\" network should not be present if daemon started with --bridge=none")) + c.Assert(strings.Contains(out, "No such network"), check.Equals, true) +} + +func createInterface(c *check.C, ifType string, ifName string, ipNet string) { + icmd.RunCommand("ip", "link", "add", "name", ifName, "type", ifType).Assert(c, icmd.Success) + icmd.RunCommand("ifconfig", ifName, ipNet, "up").Assert(c, icmd.Success) +} + +func deleteInterface(c *check.C, ifName string) { + icmd.RunCommand("ip", "link", "delete", ifName).Assert(c, icmd.Success) + icmd.RunCommand("iptables", "-t", "nat", "--flush").Assert(c, icmd.Success) + icmd.RunCommand("iptables", "--flush").Assert(c, icmd.Success) +} + +func (s *DockerDaemonSuite) TestDaemonBridgeIP(c *check.C) { + // TestDaemonBridgeIP Steps + // 1. Delete the existing docker0 Bridge + // 2. Set --bip daemon configuration and start the new Docker Daemon + // 3. Check if the bip config has taken effect using ifconfig and iptables commands + // 4. Launch a Container and make sure the IP-Address is in the expected subnet + // 5. Delete the docker0 Bridge + // 6. Restart the Docker Daemon (via deferred action) + // This Restart takes care of bringing docker0 interface back to auto-assigned IP + + defaultNetworkBridge := "docker0" + deleteInterface(c, defaultNetworkBridge) + + d := s.d + + bridgeIP := "192.169.1.1/24" + ip, bridgeIPNet, _ := net.ParseCIDR(bridgeIP) + + d.StartWithBusybox(c, "--bip", bridgeIP) + defer d.Restart(c) + + ifconfigSearchString := ip.String() + icmd.RunCommand("ifconfig", defaultNetworkBridge).Assert(c, icmd.Expected{ + Out: ifconfigSearchString, + }) + + ipTablesSearchString := bridgeIPNet.String() + icmd.RunCommand("iptables", "-t", "nat", "-nvL").Assert(c, icmd.Expected{ + Out: ipTablesSearchString, + }) + + _, err := d.Cmd("run", "-d", "--name", "test", "busybox", "top") + c.Assert(err, check.IsNil) + + containerIP, err := d.FindContainerIP("test") + c.Assert(err, checker.IsNil) + ip = net.ParseIP(containerIP) + c.Assert(bridgeIPNet.Contains(ip), check.Equals, true, + check.Commentf("Container IP-Address must be in the same subnet range : %s", + containerIP)) + deleteInterface(c, defaultNetworkBridge) +} + +func (s *DockerDaemonSuite) TestDaemonRestartWithBridgeIPChange(c *check.C) { + s.d.Start(c) + defer s.d.Restart(c) + s.d.Stop(c) + + // now we will change the docker0's IP and then try starting the daemon + bridgeIP := "192.169.100.1/24" + _, bridgeIPNet, _ := net.ParseCIDR(bridgeIP) + + icmd.RunCommand("ifconfig", "docker0", bridgeIP).Assert(c, icmd.Success) + + s.d.Start(c, "--bip", bridgeIP) + + //check if the iptables contains new bridgeIP MASQUERADE rule + ipTablesSearchString := bridgeIPNet.String() + icmd.RunCommand("iptables", "-t", "nat", "-nvL").Assert(c, icmd.Expected{ + Out: ipTablesSearchString, + }) +} + +func (s *DockerDaemonSuite) TestDaemonBridgeFixedCidr(c *check.C) { + d := s.d + + bridgeName := "external-bridge" + bridgeIP := "192.169.1.1/24" + + createInterface(c, "bridge", bridgeName, bridgeIP) + defer deleteInterface(c, bridgeName) + + args := []string{"--bridge", bridgeName, "--fixed-cidr", "192.169.1.0/30"} + d.StartWithBusybox(c, args...) + defer d.Restart(c) + + for i := 0; i < 4; i++ { + cName := "Container" + strconv.Itoa(i) + out, err := d.Cmd("run", "-d", "--name", cName, "busybox", "top") + if err != nil { + c.Assert(strings.Contains(out, "no available IPv4 addresses"), check.Equals, true, + check.Commentf("Could not run a Container : %s %s", err.Error(), out)) + } + } +} + +func (s *DockerDaemonSuite) TestDaemonBridgeFixedCidr2(c *check.C) { + d := s.d + + bridgeName := "external-bridge" + bridgeIP := "10.2.2.1/16" + + createInterface(c, "bridge", bridgeName, bridgeIP) + defer deleteInterface(c, bridgeName) + + d.StartWithBusybox(c, "--bip", bridgeIP, "--fixed-cidr", "10.2.2.0/24") + defer s.d.Restart(c) + + out, err := d.Cmd("run", "-d", "--name", "bb", "busybox", "top") + c.Assert(err, checker.IsNil, check.Commentf(out)) + defer d.Cmd("stop", "bb") + + out, err = d.Cmd("exec", "bb", "/bin/sh", "-c", "ifconfig eth0 | awk '/inet addr/{print substr($2,6)}'") + c.Assert(out, checker.Equals, "10.2.2.0\n") + + out, err = d.Cmd("run", "--rm", "busybox", "/bin/sh", "-c", "ifconfig eth0 | awk '/inet addr/{print substr($2,6)}'") + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(out, checker.Equals, "10.2.2.2\n") +} + +func (s *DockerDaemonSuite) TestDaemonBridgeFixedCIDREqualBridgeNetwork(c *check.C) { + d := s.d + + bridgeName := "external-bridge" + bridgeIP := "172.27.42.1/16" + + createInterface(c, "bridge", bridgeName, bridgeIP) + defer deleteInterface(c, bridgeName) + + d.StartWithBusybox(c, "--bridge", bridgeName, "--fixed-cidr", bridgeIP) + defer s.d.Restart(c) + + out, err := d.Cmd("run", "-d", "busybox", "top") + c.Assert(err, check.IsNil, check.Commentf(out)) + cid1 := strings.TrimSpace(out) + defer d.Cmd("stop", cid1) +} + +func (s *DockerDaemonSuite) TestDaemonDefaultGatewayIPv4Implicit(c *check.C) { + defaultNetworkBridge := "docker0" + deleteInterface(c, defaultNetworkBridge) + + d := s.d + + bridgeIP := "192.169.1.1" + bridgeIPNet := fmt.Sprintf("%s/24", bridgeIP) + + d.StartWithBusybox(c, "--bip", bridgeIPNet) + defer d.Restart(c) + + expectedMessage := fmt.Sprintf("default via %s dev", bridgeIP) + out, err := d.Cmd("run", "busybox", "ip", "-4", "route", "list", "0/0") + c.Assert(err, checker.IsNil) + c.Assert(strings.Contains(out, expectedMessage), check.Equals, true, + check.Commentf("Implicit default gateway should be bridge IP %s, but default route was '%s'", + bridgeIP, strings.TrimSpace(out))) + deleteInterface(c, defaultNetworkBridge) +} + +func (s *DockerDaemonSuite) TestDaemonDefaultGatewayIPv4Explicit(c *check.C) { + defaultNetworkBridge := "docker0" + deleteInterface(c, defaultNetworkBridge) + + d := s.d + + bridgeIP := "192.169.1.1" + bridgeIPNet := fmt.Sprintf("%s/24", bridgeIP) + gatewayIP := "192.169.1.254" + + d.StartWithBusybox(c, "--bip", bridgeIPNet, "--default-gateway", gatewayIP) + defer d.Restart(c) + + expectedMessage := fmt.Sprintf("default via %s dev", gatewayIP) + out, err := d.Cmd("run", "busybox", "ip", "-4", "route", "list", "0/0") + c.Assert(err, checker.IsNil) + c.Assert(strings.Contains(out, expectedMessage), check.Equals, true, + check.Commentf("Explicit default gateway should be %s, but default route was '%s'", + gatewayIP, strings.TrimSpace(out))) + deleteInterface(c, defaultNetworkBridge) +} + +func (s *DockerDaemonSuite) TestDaemonDefaultGatewayIPv4ExplicitOutsideContainerSubnet(c *check.C) { + defaultNetworkBridge := "docker0" + deleteInterface(c, defaultNetworkBridge) + + // Program a custom default gateway outside of the container subnet, daemon should accept it and start + s.d.StartWithBusybox(c, "--bip", "172.16.0.10/16", "--fixed-cidr", "172.16.1.0/24", "--default-gateway", "172.16.0.254") + + deleteInterface(c, defaultNetworkBridge) + s.d.Restart(c) +} + +func (s *DockerDaemonSuite) TestDaemonDefaultNetworkInvalidClusterConfig(c *check.C) { + testRequires(c, DaemonIsLinux, SameHostDaemon) + + // Start daemon without docker0 bridge + defaultNetworkBridge := "docker0" + deleteInterface(c, defaultNetworkBridge) + + discoveryBackend := "consul://consuladdr:consulport/some/path" + s.d.Start(c, fmt.Sprintf("--cluster-store=%s", discoveryBackend)) + + // Start daemon with docker0 bridge + result := icmd.RunCommand("ifconfig", defaultNetworkBridge) + c.Assert(result, icmd.Matches, icmd.Success) + + s.d.Restart(c, fmt.Sprintf("--cluster-store=%s", discoveryBackend)) +} + +func (s *DockerDaemonSuite) TestDaemonIP(c *check.C) { + d := s.d + + ipStr := "192.170.1.1/24" + ip, _, _ := net.ParseCIDR(ipStr) + args := []string{"--ip", ip.String()} + d.StartWithBusybox(c, args...) + defer d.Restart(c) + + out, err := d.Cmd("run", "-d", "-p", "8000:8000", "busybox", "top") + c.Assert(err, check.NotNil, + check.Commentf("Running a container must fail with an invalid --ip option")) + c.Assert(strings.Contains(out, "Error starting userland proxy"), check.Equals, true) + + ifName := "dummy" + createInterface(c, "dummy", ifName, ipStr) + defer deleteInterface(c, ifName) + + _, err = d.Cmd("run", "-d", "-p", "8000:8000", "busybox", "top") + c.Assert(err, check.IsNil) + + result := icmd.RunCommand("iptables", "-t", "nat", "-nvL") + result.Assert(c, icmd.Success) + regex := fmt.Sprintf("DNAT.*%s.*dpt:8000", ip.String()) + matched, _ := regexp.MatchString(regex, result.Combined()) + c.Assert(matched, check.Equals, true, + check.Commentf("iptables output should have contained %q, but was %q", regex, result.Combined())) +} + +func (s *DockerDaemonSuite) TestDaemonICCPing(c *check.C) { + testRequires(c, bridgeNfIptables) + d := s.d + + bridgeName := "external-bridge" + bridgeIP := "192.169.1.1/24" + + createInterface(c, "bridge", bridgeName, bridgeIP) + defer deleteInterface(c, bridgeName) + + d.StartWithBusybox(c, "--bridge", bridgeName, "--icc=false") + defer d.Restart(c) + + result := icmd.RunCommand("iptables", "-nvL", "FORWARD") + result.Assert(c, icmd.Success) + regex := fmt.Sprintf("DROP.*all.*%s.*%s", bridgeName, bridgeName) + matched, _ := regexp.MatchString(regex, result.Combined()) + c.Assert(matched, check.Equals, true, + check.Commentf("iptables output should have contained %q, but was %q", regex, result.Combined())) + + // Pinging another container must fail with --icc=false + pingContainers(c, d, true) + + ipStr := "192.171.1.1/24" + ip, _, _ := net.ParseCIDR(ipStr) + ifName := "icc-dummy" + + createInterface(c, "dummy", ifName, ipStr) + + // But, Pinging external or a Host interface must succeed + pingCmd := fmt.Sprintf("ping -c 1 %s -W 1", ip.String()) + runArgs := []string{"run", "--rm", "busybox", "sh", "-c", pingCmd} + _, err := d.Cmd(runArgs...) + c.Assert(err, check.IsNil) +} + +func (s *DockerDaemonSuite) TestDaemonICCLinkExpose(c *check.C) { + d := s.d + + bridgeName := "external-bridge" + bridgeIP := "192.169.1.1/24" + + createInterface(c, "bridge", bridgeName, bridgeIP) + defer deleteInterface(c, bridgeName) + + d.StartWithBusybox(c, "--bridge", bridgeName, "--icc=false") + defer d.Restart(c) + + result := icmd.RunCommand("iptables", "-nvL", "FORWARD") + result.Assert(c, icmd.Success) + regex := fmt.Sprintf("DROP.*all.*%s.*%s", bridgeName, bridgeName) + matched, _ := regexp.MatchString(regex, result.Combined()) + c.Assert(matched, check.Equals, true, + check.Commentf("iptables output should have contained %q, but was %q", regex, result.Combined())) + + out, err := d.Cmd("run", "-d", "--expose", "4567", "--name", "icc1", "busybox", "nc", "-l", "-p", "4567") + c.Assert(err, check.IsNil, check.Commentf(out)) + + out, err = d.Cmd("run", "--link", "icc1:icc1", "busybox", "nc", "icc1", "4567") + c.Assert(err, check.IsNil, check.Commentf(out)) +} + +func (s *DockerDaemonSuite) TestDaemonLinksIpTablesRulesWhenLinkAndUnlink(c *check.C) { + bridgeName := "external-bridge" + bridgeIP := "192.169.1.1/24" + + createInterface(c, "bridge", bridgeName, bridgeIP) + defer deleteInterface(c, bridgeName) + + s.d.StartWithBusybox(c, "--bridge", bridgeName, "--icc=false") + defer s.d.Restart(c) + + _, err := s.d.Cmd("run", "-d", "--name", "child", "--publish", "8080:80", "busybox", "top") + c.Assert(err, check.IsNil) + _, err = s.d.Cmd("run", "-d", "--name", "parent", "--link", "child:http", "busybox", "top") + c.Assert(err, check.IsNil) + + childIP, err := s.d.FindContainerIP("child") + c.Assert(err, checker.IsNil) + parentIP, err := s.d.FindContainerIP("parent") + c.Assert(err, checker.IsNil) + + sourceRule := []string{"-i", bridgeName, "-o", bridgeName, "-p", "tcp", "-s", childIP, "--sport", "80", "-d", parentIP, "-j", "ACCEPT"} + destinationRule := []string{"-i", bridgeName, "-o", bridgeName, "-p", "tcp", "-s", parentIP, "--dport", "80", "-d", childIP, "-j", "ACCEPT"} + if !iptables.Exists("filter", "DOCKER", sourceRule...) || !iptables.Exists("filter", "DOCKER", destinationRule...) { + c.Fatal("Iptables rules not found") + } + + s.d.Cmd("rm", "--link", "parent/http") + if iptables.Exists("filter", "DOCKER", sourceRule...) || iptables.Exists("filter", "DOCKER", destinationRule...) { + c.Fatal("Iptables rules should be removed when unlink") + } + + s.d.Cmd("kill", "child") + s.d.Cmd("kill", "parent") +} + +func (s *DockerDaemonSuite) TestDaemonUlimitDefaults(c *check.C) { + testRequires(c, DaemonIsLinux) + + s.d.StartWithBusybox(c, "--default-ulimit", "nofile=42:42", "--default-ulimit", "nproc=1024:1024") + + out, err := s.d.Cmd("run", "--ulimit", "nproc=2048", "--name=test", "busybox", "/bin/sh", "-c", "echo $(ulimit -n); echo $(ulimit -p)") + if err != nil { + c.Fatal(out, err) + } + + outArr := strings.Split(out, "\n") + if len(outArr) < 2 { + c.Fatalf("got unexpected output: %s", out) + } + nofile := strings.TrimSpace(outArr[0]) + nproc := strings.TrimSpace(outArr[1]) + + if nofile != "42" { + c.Fatalf("expected `ulimit -n` to be `42`, got: %s", nofile) + } + if nproc != "2048" { + c.Fatalf("expected `ulimit -p` to be 2048, got: %s", nproc) + } + + // Now restart daemon with a new default + s.d.Restart(c, "--default-ulimit", "nofile=43") + + out, err = s.d.Cmd("start", "-a", "test") + if err != nil { + c.Fatal(err) + } + + outArr = strings.Split(out, "\n") + if len(outArr) < 2 { + c.Fatalf("got unexpected output: %s", out) + } + nofile = strings.TrimSpace(outArr[0]) + nproc = strings.TrimSpace(outArr[1]) + + if nofile != "43" { + c.Fatalf("expected `ulimit -n` to be `43`, got: %s", nofile) + } + if nproc != "2048" { + c.Fatalf("expected `ulimit -p` to be 2048, got: %s", nproc) + } +} + +// #11315 +func (s *DockerDaemonSuite) TestDaemonRestartRenameContainer(c *check.C) { + s.d.StartWithBusybox(c) + + if out, err := s.d.Cmd("run", "--name=test", "busybox"); err != nil { + c.Fatal(err, out) + } + + if out, err := s.d.Cmd("rename", "test", "test2"); err != nil { + c.Fatal(err, out) + } + + s.d.Restart(c) + + if out, err := s.d.Cmd("start", "test2"); err != nil { + c.Fatal(err, out) + } +} + +func (s *DockerDaemonSuite) TestDaemonLoggingDriverDefault(c *check.C) { + s.d.StartWithBusybox(c) + + out, err := s.d.Cmd("run", "--name=test", "busybox", "echo", "testline") + c.Assert(err, check.IsNil, check.Commentf(out)) + id, err := s.d.GetIDByName("test") + c.Assert(err, check.IsNil) + + logPath := filepath.Join(s.d.Root, "containers", id, id+"-json.log") + + if _, err := os.Stat(logPath); err != nil { + c.Fatal(err) + } + f, err := os.Open(logPath) + if err != nil { + c.Fatal(err) + } + defer f.Close() + + var res struct { + Log string `json:"log"` + Stream string `json:"stream"` + Time time.Time `json:"time"` + } + if err := json.NewDecoder(f).Decode(&res); err != nil { + c.Fatal(err) + } + if res.Log != "testline\n" { + c.Fatalf("Unexpected log line: %q, expected: %q", res.Log, "testline\n") + } + if res.Stream != "stdout" { + c.Fatalf("Unexpected stream: %q, expected: %q", res.Stream, "stdout") + } + if !time.Now().After(res.Time) { + c.Fatalf("Log time %v in future", res.Time) + } +} + +func (s *DockerDaemonSuite) TestDaemonLoggingDriverDefaultOverride(c *check.C) { + s.d.StartWithBusybox(c) + + out, err := s.d.Cmd("run", "--name=test", "--log-driver=none", "busybox", "echo", "testline") + if err != nil { + c.Fatal(out, err) + } + id, err := s.d.GetIDByName("test") + c.Assert(err, check.IsNil) + + logPath := filepath.Join(s.d.Root, "containers", id, id+"-json.log") + + if _, err := os.Stat(logPath); err == nil || !os.IsNotExist(err) { + c.Fatalf("%s shouldn't exits, error on Stat: %s", logPath, err) + } +} + +func (s *DockerDaemonSuite) TestDaemonLoggingDriverNone(c *check.C) { + s.d.StartWithBusybox(c, "--log-driver=none") + + out, err := s.d.Cmd("run", "--name=test", "busybox", "echo", "testline") + if err != nil { + c.Fatal(out, err) + } + id, err := s.d.GetIDByName("test") + c.Assert(err, check.IsNil) + + logPath := filepath.Join(s.d.Root, "containers", id, id+"-json.log") + + if _, err := os.Stat(logPath); err == nil || !os.IsNotExist(err) { + c.Fatalf("%s shouldn't exits, error on Stat: %s", logPath, err) + } +} + +func (s *DockerDaemonSuite) TestDaemonLoggingDriverNoneOverride(c *check.C) { + s.d.StartWithBusybox(c, "--log-driver=none") + + out, err := s.d.Cmd("run", "--name=test", "--log-driver=json-file", "busybox", "echo", "testline") + if err != nil { + c.Fatal(out, err) + } + id, err := s.d.GetIDByName("test") + c.Assert(err, check.IsNil) + + logPath := filepath.Join(s.d.Root, "containers", id, id+"-json.log") + + if _, err := os.Stat(logPath); err != nil { + c.Fatal(err) + } + f, err := os.Open(logPath) + if err != nil { + c.Fatal(err) + } + defer f.Close() + + var res struct { + Log string `json:"log"` + Stream string `json:"stream"` + Time time.Time `json:"time"` + } + if err := json.NewDecoder(f).Decode(&res); err != nil { + c.Fatal(err) + } + if res.Log != "testline\n" { + c.Fatalf("Unexpected log line: %q, expected: %q", res.Log, "testline\n") + } + if res.Stream != "stdout" { + c.Fatalf("Unexpected stream: %q, expected: %q", res.Stream, "stdout") + } + if !time.Now().After(res.Time) { + c.Fatalf("Log time %v in future", res.Time) + } +} + +func (s *DockerDaemonSuite) TestDaemonLoggingDriverNoneLogsError(c *check.C) { + s.d.StartWithBusybox(c, "--log-driver=none") + + out, err := s.d.Cmd("run", "--name=test", "busybox", "echo", "testline") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + out, err = s.d.Cmd("logs", "test") + c.Assert(err, check.NotNil, check.Commentf("Logs should fail with 'none' driver")) + expected := `configured logging driver does not support reading` + c.Assert(out, checker.Contains, expected) +} + +func (s *DockerDaemonSuite) TestDaemonLoggingDriverShouldBeIgnoredForBuild(c *check.C) { + s.d.StartWithBusybox(c, "--log-driver=splunk") + + out, err := s.d.Cmd("build") + out, code, err := s.d.BuildImageWithOut("busyboxs", ` + FROM busybox + RUN echo foo`, false) + comment := check.Commentf("Failed to build image. output %s, exitCode %d, err %v", out, code, err) + c.Assert(err, check.IsNil, comment) + c.Assert(code, check.Equals, 0, comment) + c.Assert(out, checker.Contains, "foo", comment) +} + +func (s *DockerDaemonSuite) TestDaemonUnixSockCleanedUp(c *check.C) { + dir, err := ioutil.TempDir("", "socket-cleanup-test") + if err != nil { + c.Fatal(err) + } + defer os.RemoveAll(dir) + + sockPath := filepath.Join(dir, "docker.sock") + s.d.Start(c, "--host", "unix://"+sockPath) + + if _, err := os.Stat(sockPath); err != nil { + c.Fatal("socket does not exist") + } + + s.d.Stop(c) + + if _, err := os.Stat(sockPath); err == nil || !os.IsNotExist(err) { + c.Fatal("unix socket is not cleaned up") + } +} + +func (s *DockerDaemonSuite) TestDaemonWithWrongkey(c *check.C) { + type Config struct { + Crv string `json:"crv"` + D string `json:"d"` + Kid string `json:"kid"` + Kty string `json:"kty"` + X string `json:"x"` + Y string `json:"y"` + } + + os.Remove("/etc/docker/key.json") + s.d.Start(c) + s.d.Stop(c) + + config := &Config{} + bytes, err := ioutil.ReadFile("/etc/docker/key.json") + if err != nil { + c.Fatalf("Error reading key.json file: %s", err) + } + + // byte[] to Data-Struct + if err := json.Unmarshal(bytes, &config); err != nil { + c.Fatalf("Error Unmarshal: %s", err) + } + + //replace config.Kid with the fake value + config.Kid = "VSAJ:FUYR:X3H2:B2VZ:KZ6U:CJD5:K7BX:ZXHY:UZXT:P4FT:MJWG:HRJ4" + + // NEW Data-Struct to byte[] + newBytes, err := json.Marshal(&config) + if err != nil { + c.Fatalf("Error Marshal: %s", err) + } + + // write back + if err := ioutil.WriteFile("/etc/docker/key.json", newBytes, 0400); err != nil { + c.Fatalf("Error ioutil.WriteFile: %s", err) + } + + defer os.Remove("/etc/docker/key.json") + + if err := s.d.StartWithError(); err == nil { + c.Fatalf("It should not be successful to start daemon with wrong key: %v", err) + } + + content, err := s.d.ReadLogFile() + c.Assert(err, checker.IsNil) + + if !strings.Contains(string(content), "Public Key ID does not match") { + c.Fatalf("Missing KeyID message from daemon logs: %s", string(content)) + } +} + +func (s *DockerDaemonSuite) TestDaemonRestartKillWait(c *check.C) { + s.d.StartWithBusybox(c) + + out, err := s.d.Cmd("run", "-id", "busybox", "/bin/cat") + if err != nil { + c.Fatalf("Could not run /bin/cat: err=%v\n%s", err, out) + } + containerID := strings.TrimSpace(out) + + if out, err := s.d.Cmd("kill", containerID); err != nil { + c.Fatalf("Could not kill %s: err=%v\n%s", containerID, err, out) + } + + s.d.Restart(c) + + errchan := make(chan error) + go func() { + if out, err := s.d.Cmd("wait", containerID); err != nil { + errchan <- fmt.Errorf("%v:\n%s", err, out) + } + close(errchan) + }() + + select { + case <-time.After(5 * time.Second): + c.Fatal("Waiting on a stopped (killed) container timed out") + case err := <-errchan: + if err != nil { + c.Fatal(err) + } + } +} + +// TestHTTPSInfo connects via two-way authenticated HTTPS to the info endpoint +func (s *DockerDaemonSuite) TestHTTPSInfo(c *check.C) { + const ( + testDaemonHTTPSAddr = "tcp://localhost:4271" + ) + + s.d.Start(c, + "--tlsverify", + "--tlscacert", "fixtures/https/ca.pem", + "--tlscert", "fixtures/https/server-cert.pem", + "--tlskey", "fixtures/https/server-key.pem", + "-H", testDaemonHTTPSAddr) + + args := []string{ + "--host", testDaemonHTTPSAddr, + "--tlsverify", + "--tlscacert", "fixtures/https/ca.pem", + "--tlscert", "fixtures/https/client-cert.pem", + "--tlskey", "fixtures/https/client-key.pem", + "info", + } + out, err := s.d.Cmd(args...) + if err != nil { + c.Fatalf("Error Occurred: %s and output: %s", err, out) + } +} + +// TestHTTPSRun connects via two-way authenticated HTTPS to the create, attach, start, and wait endpoints. +// https://github.com/docker/docker/issues/19280 +func (s *DockerDaemonSuite) TestHTTPSRun(c *check.C) { + const ( + testDaemonHTTPSAddr = "tcp://localhost:4271" + ) + + s.d.StartWithBusybox(c, "--tlsverify", "--tlscacert", "fixtures/https/ca.pem", "--tlscert", "fixtures/https/server-cert.pem", + "--tlskey", "fixtures/https/server-key.pem", "-H", testDaemonHTTPSAddr) + + args := []string{ + "--host", testDaemonHTTPSAddr, + "--tlsverify", "--tlscacert", "fixtures/https/ca.pem", + "--tlscert", "fixtures/https/client-cert.pem", + "--tlskey", "fixtures/https/client-key.pem", + "run", "busybox", "echo", "TLS response", + } + out, err := s.d.Cmd(args...) + if err != nil { + c.Fatalf("Error Occurred: %s and output: %s", err, out) + } + + if !strings.Contains(out, "TLS response") { + c.Fatalf("expected output to include `TLS response`, got %v", out) + } +} + +// TestTLSVerify verifies that --tlsverify=false turns on tls +func (s *DockerDaemonSuite) TestTLSVerify(c *check.C) { + out, err := exec.Command(dockerdBinary, "--tlsverify=false").CombinedOutput() + if err == nil || !strings.Contains(string(out), "Could not load X509 key pair") { + c.Fatalf("Daemon should not have started due to missing certs: %v\n%s", err, string(out)) + } +} + +// TestHTTPSInfoRogueCert connects via two-way authenticated HTTPS to the info endpoint +// by using a rogue client certificate and checks that it fails with the expected error. +func (s *DockerDaemonSuite) TestHTTPSInfoRogueCert(c *check.C) { + const ( + errBadCertificate = "bad certificate" + testDaemonHTTPSAddr = "tcp://localhost:4271" + ) + + s.d.Start(c, + "--tlsverify", + "--tlscacert", "fixtures/https/ca.pem", + "--tlscert", "fixtures/https/server-cert.pem", + "--tlskey", "fixtures/https/server-key.pem", + "-H", testDaemonHTTPSAddr) + + args := []string{ + "--host", testDaemonHTTPSAddr, + "--tlsverify", + "--tlscacert", "fixtures/https/ca.pem", + "--tlscert", "fixtures/https/client-rogue-cert.pem", + "--tlskey", "fixtures/https/client-rogue-key.pem", + "info", + } + out, err := s.d.Cmd(args...) + if err == nil || !strings.Contains(out, errBadCertificate) { + c.Fatalf("Expected err: %s, got instead: %s and output: %s", errBadCertificate, err, out) + } +} + +// TestHTTPSInfoRogueServerCert connects via two-way authenticated HTTPS to the info endpoint +// which provides a rogue server certificate and checks that it fails with the expected error +func (s *DockerDaemonSuite) TestHTTPSInfoRogueServerCert(c *check.C) { + const ( + errCaUnknown = "x509: certificate signed by unknown authority" + testDaemonRogueHTTPSAddr = "tcp://localhost:4272" + ) + s.d.Start(c, + "--tlsverify", + "--tlscacert", "fixtures/https/ca.pem", + "--tlscert", "fixtures/https/server-rogue-cert.pem", + "--tlskey", "fixtures/https/server-rogue-key.pem", + "-H", testDaemonRogueHTTPSAddr) + + args := []string{ + "--host", testDaemonRogueHTTPSAddr, + "--tlsverify", + "--tlscacert", "fixtures/https/ca.pem", + "--tlscert", "fixtures/https/client-rogue-cert.pem", + "--tlskey", "fixtures/https/client-rogue-key.pem", + "info", + } + out, err := s.d.Cmd(args...) + if err == nil || !strings.Contains(out, errCaUnknown) { + c.Fatalf("Expected err: %s, got instead: %s and output: %s", errCaUnknown, err, out) + } +} + +func pingContainers(c *check.C, d *daemon.Daemon, expectFailure bool) { + var dargs []string + if d != nil { + dargs = []string{"--host", d.Sock()} + } + + args := append(dargs, "run", "-d", "--name", "container1", "busybox", "top") + dockerCmd(c, args...) + + args = append(dargs, "run", "--rm", "--link", "container1:alias1", "busybox", "sh", "-c") + pingCmd := "ping -c 1 %s -W 1" + args = append(args, fmt.Sprintf(pingCmd, "alias1")) + _, _, err := dockerCmdWithError(args...) + + if expectFailure { + c.Assert(err, check.NotNil) + } else { + c.Assert(err, check.IsNil) + } + + args = append(dargs, "rm", "-f", "container1") + dockerCmd(c, args...) +} + +func (s *DockerDaemonSuite) TestDaemonRestartWithSocketAsVolume(c *check.C) { + s.d.StartWithBusybox(c) + + socket := filepath.Join(s.d.Folder, "docker.sock") + + out, err := s.d.Cmd("run", "--restart=always", "-v", socket+":/sock", "busybox") + c.Assert(err, check.IsNil, check.Commentf("Output: %s", out)) + s.d.Restart(c) +} + +// os.Kill should kill daemon ungracefully, leaving behind container mounts. +// A subsequent daemon restart should clean up said mounts. +func (s *DockerDaemonSuite) TestCleanupMountsAfterDaemonAndContainerKill(c *check.C) { + d := daemon.New(c, dockerBinary, dockerdBinary, daemon.Config{ + Experimental: testEnv.ExperimentalDaemon(), + }) + d.StartWithBusybox(c) + + out, err := d.Cmd("run", "-d", "busybox", "top") + c.Assert(err, check.IsNil, check.Commentf("Output: %s", out)) + id := strings.TrimSpace(out) + c.Assert(d.Signal(os.Kill), check.IsNil) + mountOut, err := ioutil.ReadFile("/proc/self/mountinfo") + c.Assert(err, check.IsNil, check.Commentf("Output: %s", mountOut)) + + // container mounts should exist even after daemon has crashed. + comment := check.Commentf("%s should stay mounted from older daemon start:\nDaemon root repository %s\n%s", id, d.Root, mountOut) + c.Assert(strings.Contains(string(mountOut), id), check.Equals, true, comment) + + // kill the container + icmd.RunCommand(ctrBinary, "--address", "unix:///var/run/docker/libcontainerd/docker-containerd.sock", "containers", "kill", id).Assert(c, icmd.Success) + + // restart daemon. + d.Restart(c) + + // Now, container mounts should be gone. + mountOut, err = ioutil.ReadFile("/proc/self/mountinfo") + c.Assert(err, check.IsNil, check.Commentf("Output: %s", mountOut)) + comment = check.Commentf("%s is still mounted from older daemon start:\nDaemon root repository %s\n%s", id, d.Root, mountOut) + c.Assert(strings.Contains(string(mountOut), id), check.Equals, false, comment) + + d.Stop(c) +} + +// os.Interrupt should perform a graceful daemon shutdown and hence cleanup mounts. +func (s *DockerDaemonSuite) TestCleanupMountsAfterGracefulShutdown(c *check.C) { + d := daemon.New(c, dockerBinary, dockerdBinary, daemon.Config{ + Experimental: testEnv.ExperimentalDaemon(), + }) + d.StartWithBusybox(c) + + out, err := d.Cmd("run", "-d", "busybox", "top") + c.Assert(err, check.IsNil, check.Commentf("Output: %s", out)) + id := strings.TrimSpace(out) + + // Send SIGINT and daemon should clean up + c.Assert(d.Signal(os.Interrupt), check.IsNil) + // Wait for the daemon to stop. + c.Assert(<-d.Wait, checker.IsNil) + + mountOut, err := ioutil.ReadFile("/proc/self/mountinfo") + c.Assert(err, check.IsNil, check.Commentf("Output: %s", mountOut)) + + comment := check.Commentf("%s is still mounted from older daemon start:\nDaemon root repository %s\n%s", id, d.Root, mountOut) + c.Assert(strings.Contains(string(mountOut), id), check.Equals, false, comment) +} + +func (s *DockerDaemonSuite) TestRunContainerWithBridgeNone(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace) + s.d.StartWithBusybox(c, "-b", "none") + + out, err := s.d.Cmd("run", "--rm", "busybox", "ip", "l") + c.Assert(err, check.IsNil, check.Commentf("Output: %s", out)) + c.Assert(strings.Contains(out, "eth0"), check.Equals, false, + check.Commentf("There shouldn't be eth0 in container in default(bridge) mode when bridge network is disabled: %s", out)) + + out, err = s.d.Cmd("run", "--rm", "--net=bridge", "busybox", "ip", "l") + c.Assert(err, check.IsNil, check.Commentf("Output: %s", out)) + c.Assert(strings.Contains(out, "eth0"), check.Equals, false, + check.Commentf("There shouldn't be eth0 in container in bridge mode when bridge network is disabled: %s", out)) + // the extra grep and awk clean up the output of `ip` to only list the number and name of + // interfaces, allowing for different versions of ip (e.g. inside and outside the container) to + // be used while still verifying that the interface list is the exact same + cmd := exec.Command("sh", "-c", "ip l | grep -E '^[0-9]+:' | awk -F: ' { print $1\":\"$2 } '") + stdout := bytes.NewBuffer(nil) + cmd.Stdout = stdout + if err := cmd.Run(); err != nil { + c.Fatal("Failed to get host network interface") + } + out, err = s.d.Cmd("run", "--rm", "--net=host", "busybox", "sh", "-c", "ip l | grep -E '^[0-9]+:' | awk -F: ' { print $1\":\"$2 } '") + c.Assert(err, check.IsNil, check.Commentf("Output: %s", out)) + c.Assert(out, check.Equals, fmt.Sprintf("%s", stdout), + check.Commentf("The network interfaces in container should be the same with host when --net=host when bridge network is disabled: %s", out)) +} + +func (s *DockerDaemonSuite) TestDaemonRestartWithContainerRunning(t *check.C) { + s.d.StartWithBusybox(t) + if out, err := s.d.Cmd("run", "-d", "--name", "test", "busybox", "top"); err != nil { + t.Fatal(out, err) + } + + s.d.Restart(t) + // Container 'test' should be removed without error + if out, err := s.d.Cmd("rm", "test"); err != nil { + t.Fatal(out, err) + } +} + +func (s *DockerDaemonSuite) TestDaemonRestartCleanupNetns(c *check.C) { + s.d.StartWithBusybox(c) + out, err := s.d.Cmd("run", "--name", "netns", "-d", "busybox", "top") + if err != nil { + c.Fatal(out, err) + } + + // Get sandbox key via inspect + out, err = s.d.Cmd("inspect", "--format", "'{{.NetworkSettings.SandboxKey}}'", "netns") + if err != nil { + c.Fatalf("Error inspecting container: %s, %v", out, err) + } + fileName := strings.Trim(out, " \r\n'") + + if out, err := s.d.Cmd("stop", "netns"); err != nil { + c.Fatal(out, err) + } + + // Test if the file still exists + icmd.RunCommand("stat", "-c", "%n", fileName).Assert(c, icmd.Expected{ + Out: fileName, + }) + + // Remove the container and restart the daemon + if out, err := s.d.Cmd("rm", "netns"); err != nil { + c.Fatal(out, err) + } + + s.d.Restart(c) + + // Test again and see now the netns file does not exist + icmd.RunCommand("stat", "-c", "%n", fileName).Assert(c, icmd.Expected{ + Err: "No such file or directory", + ExitCode: 1, + }) +} + +// tests regression detailed in #13964 where DOCKER_TLS_VERIFY env is ignored +func (s *DockerDaemonSuite) TestDaemonTLSVerifyIssue13964(c *check.C) { + host := "tcp://localhost:4271" + s.d.Start(c, "-H", host) + icmd.RunCmd(icmd.Cmd{ + Command: []string{dockerBinary, "-H", host, "info"}, + Env: []string{"DOCKER_TLS_VERIFY=1", "DOCKER_CERT_PATH=fixtures/https"}, + }).Assert(c, icmd.Expected{ + ExitCode: 1, + Err: "error during connect", + }) +} + +func setupV6(c *check.C) { + // Hack to get the right IPv6 address on docker0, which has already been created + result := icmd.RunCommand("ip", "addr", "add", "fe80::1/64", "dev", "docker0") + result.Assert(c, icmd.Success) +} + +func teardownV6(c *check.C) { + result := icmd.RunCommand("ip", "addr", "del", "fe80::1/64", "dev", "docker0") + result.Assert(c, icmd.Success) +} + +func (s *DockerDaemonSuite) TestDaemonRestartWithContainerWithRestartPolicyAlways(c *check.C) { + s.d.StartWithBusybox(c) + + out, err := s.d.Cmd("run", "-d", "--restart", "always", "busybox", "top") + c.Assert(err, check.IsNil) + id := strings.TrimSpace(out) + + _, err = s.d.Cmd("stop", id) + c.Assert(err, check.IsNil) + _, err = s.d.Cmd("wait", id) + c.Assert(err, check.IsNil) + + out, err = s.d.Cmd("ps", "-q") + c.Assert(err, check.IsNil) + c.Assert(out, check.Equals, "") + + s.d.Restart(c) + + out, err = s.d.Cmd("ps", "-q") + c.Assert(err, check.IsNil) + c.Assert(strings.TrimSpace(out), check.Equals, id[:12]) +} + +func (s *DockerDaemonSuite) TestDaemonWideLogConfig(c *check.C) { + s.d.StartWithBusybox(c, "--log-opt=max-size=1k") + name := "logtest" + out, err := s.d.Cmd("run", "-d", "--log-opt=max-file=5", "--name", name, "busybox", "top") + c.Assert(err, check.IsNil, check.Commentf("Output: %s, err: %v", out, err)) + + out, err = s.d.Cmd("inspect", "-f", "{{ .HostConfig.LogConfig.Config }}", name) + c.Assert(err, check.IsNil, check.Commentf("Output: %s", out)) + c.Assert(out, checker.Contains, "max-size:1k") + c.Assert(out, checker.Contains, "max-file:5") + + out, err = s.d.Cmd("inspect", "-f", "{{ .HostConfig.LogConfig.Type }}", name) + c.Assert(err, check.IsNil, check.Commentf("Output: %s", out)) + c.Assert(strings.TrimSpace(out), checker.Equals, "json-file") +} + +func (s *DockerDaemonSuite) TestDaemonRestartWithPausedContainer(c *check.C) { + s.d.StartWithBusybox(c) + if out, err := s.d.Cmd("run", "-i", "-d", "--name", "test", "busybox", "top"); err != nil { + c.Fatal(err, out) + } + if out, err := s.d.Cmd("pause", "test"); err != nil { + c.Fatal(err, out) + } + s.d.Restart(c) + + errchan := make(chan error) + go func() { + out, err := s.d.Cmd("start", "test") + if err != nil { + errchan <- fmt.Errorf("%v:\n%s", err, out) + } + name := strings.TrimSpace(out) + if name != "test" { + errchan <- fmt.Errorf("Paused container start error on docker daemon restart, expected 'test' but got '%s'", name) + } + close(errchan) + }() + + select { + case <-time.After(5 * time.Second): + c.Fatal("Waiting on start a container timed out") + case err := <-errchan: + if err != nil { + c.Fatal(err) + } + } +} + +func (s *DockerDaemonSuite) TestDaemonRestartRmVolumeInUse(c *check.C) { + s.d.StartWithBusybox(c) + + out, err := s.d.Cmd("create", "-v", "test:/foo", "busybox") + c.Assert(err, check.IsNil, check.Commentf(out)) + + s.d.Restart(c) + + out, err = s.d.Cmd("volume", "rm", "test") + c.Assert(err, check.NotNil, check.Commentf("should not be able to remove in use volume after daemon restart")) + c.Assert(out, checker.Contains, "in use") +} + +func (s *DockerDaemonSuite) TestDaemonRestartLocalVolumes(c *check.C) { + s.d.Start(c) + + _, err := s.d.Cmd("volume", "create", "test") + c.Assert(err, check.IsNil) + s.d.Restart(c) + + _, err = s.d.Cmd("volume", "inspect", "test") + c.Assert(err, check.IsNil) +} + +// FIXME(vdemeester) should be a unit test +func (s *DockerDaemonSuite) TestDaemonCorruptedLogDriverAddress(c *check.C) { + d := daemon.New(c, dockerBinary, dockerdBinary, daemon.Config{ + Experimental: testEnv.ExperimentalDaemon(), + }) + c.Assert(d.StartWithError("--log-driver=syslog", "--log-opt", "syslog-address=corrupted:42"), check.NotNil) + expected := "Failed to set log opts: syslog-address should be in form proto://address" + icmd.RunCommand("grep", expected, d.LogFileName()).Assert(c, icmd.Success) +} + +// FIXME(vdemeester) should be a unit test +func (s *DockerDaemonSuite) TestDaemonCorruptedFluentdAddress(c *check.C) { + d := daemon.New(c, dockerBinary, dockerdBinary, daemon.Config{ + Experimental: testEnv.ExperimentalDaemon(), + }) + c.Assert(d.StartWithError("--log-driver=fluentd", "--log-opt", "fluentd-address=corrupted:c"), check.NotNil) + expected := "Failed to set log opts: invalid fluentd-address corrupted:c: " + icmd.RunCommand("grep", expected, d.LogFileName()).Assert(c, icmd.Success) +} + +// FIXME(vdemeester) Use a new daemon instance instead of the Suite one +func (s *DockerDaemonSuite) TestDaemonStartWithoutHost(c *check.C) { + s.d.UseDefaultHost = true + defer func() { + s.d.UseDefaultHost = false + }() + s.d.Start(c) +} + +// FIXME(vdemeester) Use a new daemon instance instead of the Suite one +func (s *DockerDaemonSuite) TestDaemonStartWithDefaultTLSHost(c *check.C) { + s.d.UseDefaultTLSHost = true + defer func() { + s.d.UseDefaultTLSHost = false + }() + s.d.Start(c, + "--tlsverify", + "--tlscacert", "fixtures/https/ca.pem", + "--tlscert", "fixtures/https/server-cert.pem", + "--tlskey", "fixtures/https/server-key.pem") + + // The client with --tlsverify should also use default host localhost:2376 + tmpHost := os.Getenv("DOCKER_HOST") + defer func() { + os.Setenv("DOCKER_HOST", tmpHost) + }() + + os.Setenv("DOCKER_HOST", "") + + out, _ := dockerCmd( + c, + "--tlsverify", + "--tlscacert", "fixtures/https/ca.pem", + "--tlscert", "fixtures/https/client-cert.pem", + "--tlskey", "fixtures/https/client-key.pem", + "version", + ) + if !strings.Contains(out, "Server") { + c.Fatalf("docker version should return information of server side") + } + + // ensure when connecting to the server that only a single acceptable CA is requested + contents, err := ioutil.ReadFile("fixtures/https/ca.pem") + c.Assert(err, checker.IsNil) + rootCert, err := helpers.ParseCertificatePEM(contents) + c.Assert(err, checker.IsNil) + rootPool := x509.NewCertPool() + rootPool.AddCert(rootCert) + + var certRequestInfo *tls.CertificateRequestInfo + conn, err := tls.Dial("tcp", fmt.Sprintf("%s:%d", opts.DefaultHTTPHost, opts.DefaultTLSHTTPPort), &tls.Config{ + RootCAs: rootPool, + GetClientCertificate: func(cri *tls.CertificateRequestInfo) (*tls.Certificate, error) { + certRequestInfo = cri + cert, err := tls.LoadX509KeyPair("fixtures/https/client-cert.pem", "fixtures/https/client-key.pem") + if err != nil { + return nil, err + } + return &cert, nil + }, + }) + c.Assert(err, checker.IsNil) + conn.Close() + + c.Assert(certRequestInfo, checker.NotNil) + c.Assert(certRequestInfo.AcceptableCAs, checker.HasLen, 1) + c.Assert(certRequestInfo.AcceptableCAs[0], checker.DeepEquals, rootCert.RawSubject) +} + +func (s *DockerDaemonSuite) TestBridgeIPIsExcludedFromAllocatorPool(c *check.C) { + defaultNetworkBridge := "docker0" + deleteInterface(c, defaultNetworkBridge) + + bridgeIP := "192.169.1.1" + bridgeRange := bridgeIP + "/30" + + s.d.StartWithBusybox(c, "--bip", bridgeRange) + defer s.d.Restart(c) + + var cont int + for { + contName := fmt.Sprintf("container%d", cont) + _, err := s.d.Cmd("run", "--name", contName, "-d", "busybox", "/bin/sleep", "2") + if err != nil { + // pool exhausted + break + } + ip, err := s.d.Cmd("inspect", "--format", "'{{.NetworkSettings.IPAddress}}'", contName) + c.Assert(err, check.IsNil) + + c.Assert(ip, check.Not(check.Equals), bridgeIP) + cont++ + } +} + +// Test daemon for no space left on device error +func (s *DockerDaemonSuite) TestDaemonNoSpaceLeftOnDeviceError(c *check.C) { + testRequires(c, SameHostDaemon, DaemonIsLinux, Network) + + testDir, err := ioutil.TempDir("", "no-space-left-on-device-test") + c.Assert(err, checker.IsNil) + defer os.RemoveAll(testDir) + c.Assert(mount.MakeRShared(testDir), checker.IsNil) + defer mount.Unmount(testDir) + + // create a 2MiB image and mount it as graph root + // Why in a container? Because `mount` sometimes behaves weirdly and often fails outright on this test in debian:jessie (which is what the test suite runs under if run from the Makefile) + dockerCmd(c, "run", "--rm", "-v", testDir+":/test", "busybox", "sh", "-c", "dd of=/test/testfs.img bs=1M seek=3 count=0") + icmd.RunCommand("mkfs.ext4", "-F", filepath.Join(testDir, "testfs.img")).Assert(c, icmd.Success) + + result := icmd.RunCommand("losetup", "-f", "--show", filepath.Join(testDir, "testfs.img")) + result.Assert(c, icmd.Success) + loopname := strings.TrimSpace(string(result.Combined())) + defer exec.Command("losetup", "-d", loopname).Run() + + dockerCmd(c, "run", "--privileged", "--rm", "-v", testDir+":/test:shared", "busybox", "sh", "-c", fmt.Sprintf("mkdir -p /test/test-mount && mount -t ext4 -no loop,rw %v /test/test-mount", loopname)) + defer mount.Unmount(filepath.Join(testDir, "test-mount")) + + s.d.Start(c, "--data-root", filepath.Join(testDir, "test-mount")) + defer s.d.Stop(c) + + // pull a repository large enough to fill the mount point + pullOut, err := s.d.Cmd("pull", "registry:2") + c.Assert(err, checker.NotNil, check.Commentf(pullOut)) + c.Assert(pullOut, checker.Contains, "no space left on device") +} + +// Test daemon restart with container links + auto restart +func (s *DockerDaemonSuite) TestDaemonRestartContainerLinksRestart(c *check.C) { + s.d.StartWithBusybox(c) + + parent1Args := []string{} + parent2Args := []string{} + wg := sync.WaitGroup{} + maxChildren := 10 + chErr := make(chan error, maxChildren) + + for i := 0; i < maxChildren; i++ { + wg.Add(1) + name := fmt.Sprintf("test%d", i) + + if i < maxChildren/2 { + parent1Args = append(parent1Args, []string{"--link", name}...) + } else { + parent2Args = append(parent2Args, []string{"--link", name}...) + } + + go func() { + _, err := s.d.Cmd("run", "-d", "--name", name, "--restart=always", "busybox", "top") + chErr <- err + wg.Done() + }() + } + + wg.Wait() + close(chErr) + for err := range chErr { + c.Assert(err, check.IsNil) + } + + parent1Args = append([]string{"run", "-d"}, parent1Args...) + parent1Args = append(parent1Args, []string{"--name=parent1", "--restart=always", "busybox", "top"}...) + parent2Args = append([]string{"run", "-d"}, parent2Args...) + parent2Args = append(parent2Args, []string{"--name=parent2", "--restart=always", "busybox", "top"}...) + + _, err := s.d.Cmd(parent1Args...) + c.Assert(err, check.IsNil) + _, err = s.d.Cmd(parent2Args...) + c.Assert(err, check.IsNil) + + s.d.Stop(c) + // clear the log file -- we don't need any of it but may for the next part + // can ignore the error here, this is just a cleanup + os.Truncate(s.d.LogFileName(), 0) + s.d.Start(c) + + for _, num := range []string{"1", "2"} { + out, err := s.d.Cmd("inspect", "-f", "{{ .State.Running }}", "parent"+num) + c.Assert(err, check.IsNil) + if strings.TrimSpace(out) != "true" { + log, _ := ioutil.ReadFile(s.d.LogFileName()) + c.Fatalf("parent container is not running\n%s", string(log)) + } + } +} + +func (s *DockerDaemonSuite) TestDaemonCgroupParent(c *check.C) { + testRequires(c, DaemonIsLinux) + + cgroupParent := "test" + name := "cgroup-test" + + s.d.StartWithBusybox(c, "--cgroup-parent", cgroupParent) + defer s.d.Restart(c) + + out, err := s.d.Cmd("run", "--name", name, "busybox", "cat", "/proc/self/cgroup") + c.Assert(err, checker.IsNil) + cgroupPaths := testutil.ParseCgroupPaths(string(out)) + c.Assert(len(cgroupPaths), checker.Not(checker.Equals), 0, check.Commentf("unexpected output - %q", string(out))) + out, err = s.d.Cmd("inspect", "-f", "{{.Id}}", name) + c.Assert(err, checker.IsNil) + id := strings.TrimSpace(string(out)) + expectedCgroup := path.Join(cgroupParent, id) + found := false + for _, path := range cgroupPaths { + if strings.HasSuffix(path, expectedCgroup) { + found = true + break + } + } + c.Assert(found, checker.True, check.Commentf("Cgroup path for container (%s) doesn't found in cgroups file: %s", expectedCgroup, cgroupPaths)) +} + +func (s *DockerDaemonSuite) TestDaemonRestartWithLinks(c *check.C) { + testRequires(c, DaemonIsLinux) // Windows does not support links + s.d.StartWithBusybox(c) + + out, err := s.d.Cmd("run", "-d", "--name=test", "busybox", "top") + c.Assert(err, check.IsNil, check.Commentf(out)) + + out, err = s.d.Cmd("run", "--name=test2", "--link", "test:abc", "busybox", "sh", "-c", "ping -c 1 -w 1 abc") + c.Assert(err, check.IsNil, check.Commentf(out)) + + s.d.Restart(c) + + // should fail since test is not running yet + out, err = s.d.Cmd("start", "test2") + c.Assert(err, check.NotNil, check.Commentf(out)) + + out, err = s.d.Cmd("start", "test") + c.Assert(err, check.IsNil, check.Commentf(out)) + out, err = s.d.Cmd("start", "-a", "test2") + c.Assert(err, check.IsNil, check.Commentf(out)) + c.Assert(strings.Contains(out, "1 packets transmitted, 1 packets received"), check.Equals, true, check.Commentf(out)) +} + +func (s *DockerDaemonSuite) TestDaemonRestartWithNames(c *check.C) { + testRequires(c, DaemonIsLinux) // Windows does not support links + s.d.StartWithBusybox(c) + + out, err := s.d.Cmd("create", "--name=test", "busybox") + c.Assert(err, check.IsNil, check.Commentf(out)) + + out, err = s.d.Cmd("run", "-d", "--name=test2", "busybox", "top") + c.Assert(err, check.IsNil, check.Commentf(out)) + test2ID := strings.TrimSpace(out) + + out, err = s.d.Cmd("run", "-d", "--name=test3", "--link", "test2:abc", "busybox", "top") + test3ID := strings.TrimSpace(out) + + s.d.Restart(c) + + out, err = s.d.Cmd("create", "--name=test", "busybox") + c.Assert(err, check.NotNil, check.Commentf("expected error trying to create container with duplicate name")) + // this one is no longer needed, removing simplifies the remainder of the test + out, err = s.d.Cmd("rm", "-f", "test") + c.Assert(err, check.IsNil, check.Commentf(out)) + + out, err = s.d.Cmd("ps", "-a", "--no-trunc") + c.Assert(err, check.IsNil, check.Commentf(out)) + + lines := strings.Split(strings.TrimSpace(out), "\n")[1:] + + test2validated := false + test3validated := false + for _, line := range lines { + fields := strings.Fields(line) + names := fields[len(fields)-1] + switch fields[0] { + case test2ID: + c.Assert(names, check.Equals, "test2,test3/abc") + test2validated = true + case test3ID: + c.Assert(names, check.Equals, "test3") + test3validated = true + } + } + + c.Assert(test2validated, check.Equals, true) + c.Assert(test3validated, check.Equals, true) +} + +// TestDaemonRestartWithKilledRunningContainer requires live restore of running containers +func (s *DockerDaemonSuite) TestDaemonRestartWithKilledRunningContainer(t *check.C) { + // TODO(mlaventure): Not sure what would the exit code be on windows + testRequires(t, DaemonIsLinux) + s.d.StartWithBusybox(t) + + cid, err := s.d.Cmd("run", "-d", "--name", "test", "busybox", "top") + defer s.d.Stop(t) + if err != nil { + t.Fatal(cid, err) + } + cid = strings.TrimSpace(cid) + + pid, err := s.d.Cmd("inspect", "-f", "{{.State.Pid}}", cid) + t.Assert(err, check.IsNil) + pid = strings.TrimSpace(pid) + + // Kill the daemon + if err := s.d.Kill(); err != nil { + t.Fatal(err) + } + + // kill the container + icmd.RunCommand(ctrBinary, "--address", "unix:///var/run/docker/libcontainerd/docker-containerd.sock", "containers", "kill", cid).Assert(t, icmd.Success) + + // Give time to containerd to process the command if we don't + // the exit event might be received after we do the inspect + result := icmd.RunCommand("kill", "-0", pid) + for result.ExitCode == 0 { + time.Sleep(1 * time.Second) + // FIXME(vdemeester) should we check it doesn't error out ? + result = icmd.RunCommand("kill", "-0", pid) + } + + // restart the daemon + s.d.Start(t) + + // Check that we've got the correct exit code + out, err := s.d.Cmd("inspect", "-f", "{{.State.ExitCode}}", cid) + t.Assert(err, check.IsNil) + + out = strings.TrimSpace(out) + if out != "143" { + t.Fatalf("Expected exit code '%s' got '%s' for container '%s'\n", "143", out, cid) + } + +} + +// os.Kill should kill daemon ungracefully, leaving behind live containers. +// The live containers should be known to the restarted daemon. Stopping +// them now, should remove the mounts. +func (s *DockerDaemonSuite) TestCleanupMountsAfterDaemonCrash(c *check.C) { + testRequires(c, DaemonIsLinux) + s.d.StartWithBusybox(c, "--live-restore") + + out, err := s.d.Cmd("run", "-d", "busybox", "top") + c.Assert(err, check.IsNil, check.Commentf("Output: %s", out)) + id := strings.TrimSpace(out) + + c.Assert(s.d.Signal(os.Kill), check.IsNil) + mountOut, err := ioutil.ReadFile("/proc/self/mountinfo") + c.Assert(err, check.IsNil, check.Commentf("Output: %s", mountOut)) + + // container mounts should exist even after daemon has crashed. + comment := check.Commentf("%s should stay mounted from older daemon start:\nDaemon root repository %s\n%s", id, s.d.Root, mountOut) + c.Assert(strings.Contains(string(mountOut), id), check.Equals, true, comment) + + // restart daemon. + s.d.Start(c, "--live-restore") + + // container should be running. + out, err = s.d.Cmd("inspect", "--format={{.State.Running}}", id) + c.Assert(err, check.IsNil, check.Commentf("Output: %s", out)) + out = strings.TrimSpace(out) + if out != "true" { + c.Fatalf("Container %s expected to stay alive after daemon restart", id) + } + + // 'docker stop' should work. + out, err = s.d.Cmd("stop", id) + c.Assert(err, check.IsNil, check.Commentf("Output: %s", out)) + + // Now, container mounts should be gone. + mountOut, err = ioutil.ReadFile("/proc/self/mountinfo") + c.Assert(err, check.IsNil, check.Commentf("Output: %s", mountOut)) + comment = check.Commentf("%s is still mounted from older daemon start:\nDaemon root repository %s\n%s", id, s.d.Root, mountOut) + c.Assert(strings.Contains(string(mountOut), id), check.Equals, false, comment) +} + +// TestDaemonRestartWithUnpausedRunningContainer requires live restore of running containers. +func (s *DockerDaemonSuite) TestDaemonRestartWithUnpausedRunningContainer(t *check.C) { + // TODO(mlaventure): Not sure what would the exit code be on windows + testRequires(t, DaemonIsLinux) + s.d.StartWithBusybox(t, "--live-restore") + + cid, err := s.d.Cmd("run", "-d", "--name", "test", "busybox", "top") + defer s.d.Stop(t) + if err != nil { + t.Fatal(cid, err) + } + cid = strings.TrimSpace(cid) + + pid, err := s.d.Cmd("inspect", "-f", "{{.State.Pid}}", cid) + t.Assert(err, check.IsNil) + + // pause the container + if _, err := s.d.Cmd("pause", cid); err != nil { + t.Fatal(cid, err) + } + + // Kill the daemon + if err := s.d.Kill(); err != nil { + t.Fatal(err) + } + + // resume the container + result := icmd.RunCommand( + ctrBinary, + "--address", "unix:///var/run/docker/libcontainerd/docker-containerd.sock", + "containers", "resume", cid) + t.Assert(result, icmd.Matches, icmd.Success) + + // Give time to containerd to process the command if we don't + // the resume event might be received after we do the inspect + waitAndAssert(t, defaultReconciliationTimeout, func(*check.C) (interface{}, check.CommentInterface) { + result := icmd.RunCommand("kill", "-0", strings.TrimSpace(pid)) + return result.ExitCode, nil + }, checker.Equals, 0) + + // restart the daemon + s.d.Start(t, "--live-restore") + + // Check that we've got the correct status + out, err := s.d.Cmd("inspect", "-f", "{{.State.Status}}", cid) + t.Assert(err, check.IsNil) + + out = strings.TrimSpace(out) + if out != "running" { + t.Fatalf("Expected exit code '%s' got '%s' for container '%s'\n", "running", out, cid) + } + if _, err := s.d.Cmd("kill", cid); err != nil { + t.Fatal(err) + } +} + +// TestRunLinksChanged checks that creating a new container with the same name does not update links +// this ensures that the old, pre gh#16032 functionality continues on +func (s *DockerDaemonSuite) TestRunLinksChanged(c *check.C) { + testRequires(c, DaemonIsLinux) // Windows does not support links + s.d.StartWithBusybox(c) + + out, err := s.d.Cmd("run", "-d", "--name=test", "busybox", "top") + c.Assert(err, check.IsNil, check.Commentf(out)) + + out, err = s.d.Cmd("run", "--name=test2", "--link=test:abc", "busybox", "sh", "-c", "ping -c 1 abc") + c.Assert(err, check.IsNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "1 packets transmitted, 1 packets received") + + out, err = s.d.Cmd("rm", "-f", "test") + c.Assert(err, check.IsNil, check.Commentf(out)) + + out, err = s.d.Cmd("run", "-d", "--name=test", "busybox", "top") + c.Assert(err, check.IsNil, check.Commentf(out)) + out, err = s.d.Cmd("start", "-a", "test2") + c.Assert(err, check.NotNil, check.Commentf(out)) + c.Assert(out, check.Not(checker.Contains), "1 packets transmitted, 1 packets received") + + s.d.Restart(c) + out, err = s.d.Cmd("start", "-a", "test2") + c.Assert(err, check.NotNil, check.Commentf(out)) + c.Assert(out, check.Not(checker.Contains), "1 packets transmitted, 1 packets received") +} + +func (s *DockerDaemonSuite) TestDaemonStartWithoutColors(c *check.C) { + testRequires(c, DaemonIsLinux, NotPpc64le) + + infoLog := "\x1b[34mINFO\x1b" + + b := bytes.NewBuffer(nil) + done := make(chan bool) + + p, tty, err := pty.Open() + c.Assert(err, checker.IsNil) + defer func() { + tty.Close() + p.Close() + }() + + go func() { + io.Copy(b, p) + done <- true + }() + + // Enable coloring explicitly + s.d.StartWithLogFile(tty, "--raw-logs=false") + s.d.Stop(c) + // Wait for io.Copy() before checking output + <-done + c.Assert(b.String(), checker.Contains, infoLog) + + b.Reset() + + // "tty" is already closed in prev s.d.Stop(), + // we have to close the other side "p" and open another pair of + // pty for the next test. + p.Close() + p, tty, err = pty.Open() + c.Assert(err, checker.IsNil) + + go func() { + io.Copy(b, p) + done <- true + }() + + // Disable coloring explicitly + s.d.StartWithLogFile(tty, "--raw-logs=true") + s.d.Stop(c) + // Wait for io.Copy() before checking output + <-done + c.Assert(b.String(), check.Not(check.Equals), "") + c.Assert(b.String(), check.Not(checker.Contains), infoLog) +} + +func (s *DockerDaemonSuite) TestDaemonDebugLog(c *check.C) { + testRequires(c, DaemonIsLinux, NotPpc64le) + + debugLog := "\x1b[37mDEBU\x1b" + + p, tty, err := pty.Open() + c.Assert(err, checker.IsNil) + defer func() { + tty.Close() + p.Close() + }() + + b := bytes.NewBuffer(nil) + go io.Copy(b, p) + + s.d.StartWithLogFile(tty, "--debug") + s.d.Stop(c) + c.Assert(b.String(), checker.Contains, debugLog) +} + +func (s *DockerDaemonSuite) TestDaemonDiscoveryBackendConfigReload(c *check.C) { + testRequires(c, SameHostDaemon, DaemonIsLinux) + + // daemon config file + daemonConfig := `{ "debug" : false }` + configFile, err := ioutil.TempFile("", "test-daemon-discovery-backend-config-reload-config") + c.Assert(err, checker.IsNil, check.Commentf("could not create temp file for config reload")) + configFilePath := configFile.Name() + defer func() { + configFile.Close() + os.RemoveAll(configFile.Name()) + }() + + _, err = configFile.Write([]byte(daemonConfig)) + c.Assert(err, checker.IsNil) + + // --log-level needs to be set so that d.Start() doesn't add --debug causing + // a conflict with the config + s.d.Start(c, "--config-file", configFilePath, "--log-level=info") + + // daemon config file + daemonConfig = `{ + "cluster-store": "consul://consuladdr:consulport/some/path", + "cluster-advertise": "192.168.56.100:0", + "debug" : false + }` + + err = configFile.Truncate(0) + c.Assert(err, checker.IsNil) + _, err = configFile.Seek(0, os.SEEK_SET) + c.Assert(err, checker.IsNil) + + _, err = configFile.Write([]byte(daemonConfig)) + c.Assert(err, checker.IsNil) + + err = s.d.ReloadConfig() + c.Assert(err, checker.IsNil, check.Commentf("error reloading daemon config")) + + out, err := s.d.Cmd("info") + c.Assert(err, checker.IsNil) + + c.Assert(out, checker.Contains, fmt.Sprintf("Cluster Store: consul://consuladdr:consulport/some/path")) + c.Assert(out, checker.Contains, fmt.Sprintf("Cluster Advertise: 192.168.56.100:0")) +} + +// Test for #21956 +func (s *DockerDaemonSuite) TestDaemonLogOptions(c *check.C) { + s.d.StartWithBusybox(c, "--log-driver=syslog", "--log-opt=syslog-address=udp://127.0.0.1:514") + + out, err := s.d.Cmd("run", "-d", "--log-driver=json-file", "busybox", "top") + c.Assert(err, check.IsNil, check.Commentf(out)) + id := strings.TrimSpace(out) + + out, err = s.d.Cmd("inspect", "--format='{{.HostConfig.LogConfig}}'", id) + c.Assert(err, check.IsNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "{json-file map[]}") +} + +// Test case for #20936, #22443 +func (s *DockerDaemonSuite) TestDaemonMaxConcurrency(c *check.C) { + s.d.Start(c, "--max-concurrent-uploads=6", "--max-concurrent-downloads=8") + + expectedMaxConcurrentUploads := `level=debug msg="Max Concurrent Uploads: 6"` + expectedMaxConcurrentDownloads := `level=debug msg="Max Concurrent Downloads: 8"` + content, err := s.d.ReadLogFile() + c.Assert(err, checker.IsNil) + c.Assert(string(content), checker.Contains, expectedMaxConcurrentUploads) + c.Assert(string(content), checker.Contains, expectedMaxConcurrentDownloads) +} + +// Test case for #20936, #22443 +func (s *DockerDaemonSuite) TestDaemonMaxConcurrencyWithConfigFile(c *check.C) { + testRequires(c, SameHostDaemon, DaemonIsLinux) + + // daemon config file + configFilePath := "test.json" + configFile, err := os.Create(configFilePath) + c.Assert(err, checker.IsNil) + defer os.Remove(configFilePath) + + daemonConfig := `{ "max-concurrent-downloads" : 8 }` + fmt.Fprintf(configFile, "%s", daemonConfig) + configFile.Close() + s.d.Start(c, fmt.Sprintf("--config-file=%s", configFilePath)) + + expectedMaxConcurrentUploads := `level=debug msg="Max Concurrent Uploads: 5"` + expectedMaxConcurrentDownloads := `level=debug msg="Max Concurrent Downloads: 8"` + content, err := s.d.ReadLogFile() + c.Assert(err, checker.IsNil) + c.Assert(string(content), checker.Contains, expectedMaxConcurrentUploads) + c.Assert(string(content), checker.Contains, expectedMaxConcurrentDownloads) + + configFile, err = os.Create(configFilePath) + c.Assert(err, checker.IsNil) + daemonConfig = `{ "max-concurrent-uploads" : 7, "max-concurrent-downloads" : 9 }` + fmt.Fprintf(configFile, "%s", daemonConfig) + configFile.Close() + + c.Assert(s.d.Signal(unix.SIGHUP), checker.IsNil) + // unix.Kill(s.d.cmd.Process.Pid, unix.SIGHUP) + + time.Sleep(3 * time.Second) + + expectedMaxConcurrentUploads = `level=debug msg="Reset Max Concurrent Uploads: 7"` + expectedMaxConcurrentDownloads = `level=debug msg="Reset Max Concurrent Downloads: 9"` + content, err = s.d.ReadLogFile() + c.Assert(err, checker.IsNil) + c.Assert(string(content), checker.Contains, expectedMaxConcurrentUploads) + c.Assert(string(content), checker.Contains, expectedMaxConcurrentDownloads) +} + +// Test case for #20936, #22443 +func (s *DockerDaemonSuite) TestDaemonMaxConcurrencyWithConfigFileReload(c *check.C) { + testRequires(c, SameHostDaemon, DaemonIsLinux) + + // daemon config file + configFilePath := "test.json" + configFile, err := os.Create(configFilePath) + c.Assert(err, checker.IsNil) + defer os.Remove(configFilePath) + + daemonConfig := `{ "max-concurrent-uploads" : null }` + fmt.Fprintf(configFile, "%s", daemonConfig) + configFile.Close() + s.d.Start(c, fmt.Sprintf("--config-file=%s", configFilePath)) + + expectedMaxConcurrentUploads := `level=debug msg="Max Concurrent Uploads: 5"` + expectedMaxConcurrentDownloads := `level=debug msg="Max Concurrent Downloads: 3"` + content, err := s.d.ReadLogFile() + c.Assert(err, checker.IsNil) + c.Assert(string(content), checker.Contains, expectedMaxConcurrentUploads) + c.Assert(string(content), checker.Contains, expectedMaxConcurrentDownloads) + + configFile, err = os.Create(configFilePath) + c.Assert(err, checker.IsNil) + daemonConfig = `{ "max-concurrent-uploads" : 1, "max-concurrent-downloads" : null }` + fmt.Fprintf(configFile, "%s", daemonConfig) + configFile.Close() + + c.Assert(s.d.Signal(unix.SIGHUP), checker.IsNil) + // unix.Kill(s.d.cmd.Process.Pid, unix.SIGHUP) + + time.Sleep(3 * time.Second) + + expectedMaxConcurrentUploads = `level=debug msg="Reset Max Concurrent Uploads: 1"` + expectedMaxConcurrentDownloads = `level=debug msg="Reset Max Concurrent Downloads: 3"` + content, err = s.d.ReadLogFile() + c.Assert(err, checker.IsNil) + c.Assert(string(content), checker.Contains, expectedMaxConcurrentUploads) + c.Assert(string(content), checker.Contains, expectedMaxConcurrentDownloads) + + configFile, err = os.Create(configFilePath) + c.Assert(err, checker.IsNil) + daemonConfig = `{ "labels":["foo=bar"] }` + fmt.Fprintf(configFile, "%s", daemonConfig) + configFile.Close() + + c.Assert(s.d.Signal(unix.SIGHUP), checker.IsNil) + + time.Sleep(3 * time.Second) + + expectedMaxConcurrentUploads = `level=debug msg="Reset Max Concurrent Uploads: 5"` + expectedMaxConcurrentDownloads = `level=debug msg="Reset Max Concurrent Downloads: 3"` + content, err = s.d.ReadLogFile() + c.Assert(err, checker.IsNil) + c.Assert(string(content), checker.Contains, expectedMaxConcurrentUploads) + c.Assert(string(content), checker.Contains, expectedMaxConcurrentDownloads) +} + +func (s *DockerDaemonSuite) TestBuildOnDisabledBridgeNetworkDaemon(c *check.C) { + s.d.StartWithBusybox(c, "-b=none", "--iptables=false") + out, code, err := s.d.BuildImageWithOut("busyboxs", + `FROM busybox + RUN cat /etc/hosts`, false) + comment := check.Commentf("Failed to build image. output %s, exitCode %d, err %v", out, code, err) + c.Assert(err, check.IsNil, comment) + c.Assert(code, check.Equals, 0, comment) +} + +// Test case for #21976 +func (s *DockerDaemonSuite) TestDaemonDNSFlagsInHostMode(c *check.C) { + testRequires(c, SameHostDaemon, DaemonIsLinux) + + s.d.StartWithBusybox(c, "--dns", "1.2.3.4", "--dns-search", "example.com", "--dns-opt", "timeout:3") + + expectedOutput := "nameserver 1.2.3.4" + out, _ := s.d.Cmd("run", "--net=host", "busybox", "cat", "/etc/resolv.conf") + c.Assert(out, checker.Contains, expectedOutput, check.Commentf("Expected '%s', but got %q", expectedOutput, out)) + expectedOutput = "search example.com" + c.Assert(out, checker.Contains, expectedOutput, check.Commentf("Expected '%s', but got %q", expectedOutput, out)) + expectedOutput = "options timeout:3" + c.Assert(out, checker.Contains, expectedOutput, check.Commentf("Expected '%s', but got %q", expectedOutput, out)) +} + +func (s *DockerDaemonSuite) TestRunWithRuntimeFromConfigFile(c *check.C) { + conf, err := ioutil.TempFile("", "config-file-") + c.Assert(err, check.IsNil) + configName := conf.Name() + conf.Close() + defer os.Remove(configName) + + config := ` +{ + "runtimes": { + "oci": { + "path": "docker-runc" + }, + "vm": { + "path": "/usr/local/bin/vm-manager", + "runtimeArgs": [ + "--debug" + ] + } + } +} +` + ioutil.WriteFile(configName, []byte(config), 0644) + s.d.StartWithBusybox(c, "--config-file", configName) + + // Run with default runtime + out, err := s.d.Cmd("run", "--rm", "busybox", "ls") + c.Assert(err, check.IsNil, check.Commentf(out)) + + // Run with default runtime explicitly + out, err = s.d.Cmd("run", "--rm", "--runtime=runc", "busybox", "ls") + c.Assert(err, check.IsNil, check.Commentf(out)) + + // Run with oci (same path as default) but keep it around + out, err = s.d.Cmd("run", "--name", "oci-runtime-ls", "--runtime=oci", "busybox", "ls") + c.Assert(err, check.IsNil, check.Commentf(out)) + + // Run with "vm" + out, err = s.d.Cmd("run", "--rm", "--runtime=vm", "busybox", "ls") + c.Assert(err, check.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "/usr/local/bin/vm-manager: no such file or directory") + + // Reset config to only have the default + config = ` +{ + "runtimes": { + } +} +` + ioutil.WriteFile(configName, []byte(config), 0644) + c.Assert(s.d.Signal(unix.SIGHUP), checker.IsNil) + // Give daemon time to reload config + <-time.After(1 * time.Second) + + // Run with default runtime + out, err = s.d.Cmd("run", "--rm", "--runtime=runc", "busybox", "ls") + c.Assert(err, check.IsNil, check.Commentf(out)) + + // Run with "oci" + out, err = s.d.Cmd("run", "--rm", "--runtime=oci", "busybox", "ls") + c.Assert(err, check.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "Unknown runtime specified oci") + + // Start previously created container with oci + out, err = s.d.Cmd("start", "oci-runtime-ls") + c.Assert(err, check.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "Unknown runtime specified oci") + + // Check that we can't override the default runtime + config = ` +{ + "runtimes": { + "runc": { + "path": "my-runc" + } + } +} +` + ioutil.WriteFile(configName, []byte(config), 0644) + c.Assert(s.d.Signal(unix.SIGHUP), checker.IsNil) + // Give daemon time to reload config + <-time.After(1 * time.Second) + + content, err := s.d.ReadLogFile() + c.Assert(err, checker.IsNil) + c.Assert(string(content), checker.Contains, `file configuration validation failed (runtime name 'runc' is reserved)`) + + // Check that we can select a default runtime + config = ` +{ + "default-runtime": "vm", + "runtimes": { + "oci": { + "path": "docker-runc" + }, + "vm": { + "path": "/usr/local/bin/vm-manager", + "runtimeArgs": [ + "--debug" + ] + } + } +} +` + ioutil.WriteFile(configName, []byte(config), 0644) + c.Assert(s.d.Signal(unix.SIGHUP), checker.IsNil) + // Give daemon time to reload config + <-time.After(1 * time.Second) + + out, err = s.d.Cmd("run", "--rm", "busybox", "ls") + c.Assert(err, check.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "/usr/local/bin/vm-manager: no such file or directory") + + // Run with default runtime explicitly + out, err = s.d.Cmd("run", "--rm", "--runtime=runc", "busybox", "ls") + c.Assert(err, check.IsNil, check.Commentf(out)) +} + +func (s *DockerDaemonSuite) TestRunWithRuntimeFromCommandLine(c *check.C) { + s.d.StartWithBusybox(c, "--add-runtime", "oci=docker-runc", "--add-runtime", "vm=/usr/local/bin/vm-manager") + + // Run with default runtime + out, err := s.d.Cmd("run", "--rm", "busybox", "ls") + c.Assert(err, check.IsNil, check.Commentf(out)) + + // Run with default runtime explicitly + out, err = s.d.Cmd("run", "--rm", "--runtime=runc", "busybox", "ls") + c.Assert(err, check.IsNil, check.Commentf(out)) + + // Run with oci (same path as default) but keep it around + out, err = s.d.Cmd("run", "--name", "oci-runtime-ls", "--runtime=oci", "busybox", "ls") + c.Assert(err, check.IsNil, check.Commentf(out)) + + // Run with "vm" + out, err = s.d.Cmd("run", "--rm", "--runtime=vm", "busybox", "ls") + c.Assert(err, check.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "/usr/local/bin/vm-manager: no such file or directory") + + // Start a daemon without any extra runtimes + s.d.Stop(c) + s.d.StartWithBusybox(c) + + // Run with default runtime + out, err = s.d.Cmd("run", "--rm", "--runtime=runc", "busybox", "ls") + c.Assert(err, check.IsNil, check.Commentf(out)) + + // Run with "oci" + out, err = s.d.Cmd("run", "--rm", "--runtime=oci", "busybox", "ls") + c.Assert(err, check.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "Unknown runtime specified oci") + + // Start previously created container with oci + out, err = s.d.Cmd("start", "oci-runtime-ls") + c.Assert(err, check.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "Unknown runtime specified oci") + + // Check that we can't override the default runtime + s.d.Stop(c) + c.Assert(s.d.StartWithError("--add-runtime", "runc=my-runc"), checker.NotNil) + + content, err := s.d.ReadLogFile() + c.Assert(err, checker.IsNil) + c.Assert(string(content), checker.Contains, `runtime name 'runc' is reserved`) + + // Check that we can select a default runtime + s.d.Stop(c) + s.d.StartWithBusybox(c, "--default-runtime=vm", "--add-runtime", "oci=docker-runc", "--add-runtime", "vm=/usr/local/bin/vm-manager") + + out, err = s.d.Cmd("run", "--rm", "busybox", "ls") + c.Assert(err, check.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "/usr/local/bin/vm-manager: no such file or directory") + + // Run with default runtime explicitly + out, err = s.d.Cmd("run", "--rm", "--runtime=runc", "busybox", "ls") + c.Assert(err, check.IsNil, check.Commentf(out)) +} + +func (s *DockerDaemonSuite) TestDaemonRestartWithAutoRemoveContainer(c *check.C) { + s.d.StartWithBusybox(c) + + // top1 will exist after daemon restarts + out, err := s.d.Cmd("run", "-d", "--name", "top1", "busybox:latest", "top") + c.Assert(err, checker.IsNil, check.Commentf("run top1: %v", out)) + // top2 will be removed after daemon restarts + out, err = s.d.Cmd("run", "-d", "--rm", "--name", "top2", "busybox:latest", "top") + c.Assert(err, checker.IsNil, check.Commentf("run top2: %v", out)) + + out, err = s.d.Cmd("ps") + c.Assert(out, checker.Contains, "top1", check.Commentf("top1 should be running")) + c.Assert(out, checker.Contains, "top2", check.Commentf("top2 should be running")) + + // now restart daemon gracefully + s.d.Restart(c) + + out, err = s.d.Cmd("ps", "-a") + c.Assert(err, checker.IsNil, check.Commentf("out: %v", out)) + c.Assert(out, checker.Contains, "top1", check.Commentf("top1 should exist after daemon restarts")) + c.Assert(out, checker.Not(checker.Contains), "top2", check.Commentf("top2 should be removed after daemon restarts")) +} + +func (s *DockerDaemonSuite) TestDaemonRestartSaveContainerExitCode(c *check.C) { + s.d.StartWithBusybox(c) + + containerName := "error-values" + // Make a container with both a non 0 exit code and an error message + // We explicitly disable `--init` for this test, because `--init` is enabled by default + // on "experimental". Enabling `--init` results in a different behavior; because the "init" + // process itself is PID1, the container does not fail on _startup_ (i.e., `docker-init` starting), + // but directly after. The exit code of the container is still 127, but the Error Message is not + // captured, so `.State.Error` is empty. + // See the discussion on https://github.com/docker/docker/pull/30227#issuecomment-274161426, + // and https://github.com/docker/docker/pull/26061#r78054578 for more information. + out, err := s.d.Cmd("run", "--name", containerName, "--init=false", "busybox", "toto") + c.Assert(err, checker.NotNil) + + // Check that those values were saved on disk + out, err = s.d.Cmd("inspect", "-f", "{{.State.ExitCode}}", containerName) + out = strings.TrimSpace(out) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Equals, "127") + + errMsg1, err := s.d.Cmd("inspect", "-f", "{{.State.Error}}", containerName) + errMsg1 = strings.TrimSpace(errMsg1) + c.Assert(err, checker.IsNil) + c.Assert(errMsg1, checker.Contains, "executable file not found") + + // now restart daemon + s.d.Restart(c) + + // Check that those values are still around + out, err = s.d.Cmd("inspect", "-f", "{{.State.ExitCode}}", containerName) + out = strings.TrimSpace(out) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Equals, "127") + + out, err = s.d.Cmd("inspect", "-f", "{{.State.Error}}", containerName) + out = strings.TrimSpace(out) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Equals, errMsg1) +} + +func (s *DockerDaemonSuite) TestDaemonBackcompatPre17Volumes(c *check.C) { + testRequires(c, SameHostDaemon) + d := s.d + d.StartWithBusybox(c) + + // hack to be able to side-load a container config + out, err := d.Cmd("create", "busybox:latest") + c.Assert(err, checker.IsNil, check.Commentf(out)) + id := strings.TrimSpace(out) + + out, err = d.Cmd("inspect", "--type=image", "--format={{.ID}}", "busybox:latest") + c.Assert(err, checker.IsNil, check.Commentf(out)) + d.Stop(c) + <-d.Wait + + imageID := strings.TrimSpace(out) + volumeID := stringid.GenerateNonCryptoID() + vfsPath := filepath.Join(d.Root, "vfs", "dir", volumeID) + c.Assert(os.MkdirAll(vfsPath, 0755), checker.IsNil) + + config := []byte(` + { + "ID": "` + id + `", + "Name": "hello", + "Driver": "` + d.StorageDriver() + `", + "Image": "` + imageID + `", + "Config": {"Image": "busybox:latest"}, + "NetworkSettings": {}, + "Volumes": { + "/bar":"/foo", + "/foo": "` + vfsPath + `", + "/quux":"/quux" + }, + "VolumesRW": { + "/bar": true, + "/foo": true, + "/quux": false + } + } + `) + + configPath := filepath.Join(d.Root, "containers", id, "config.v2.json") + c.Assert(ioutil.WriteFile(configPath, config, 600), checker.IsNil) + d.Start(c) + + out, err = d.Cmd("inspect", "--type=container", "--format={{ json .Mounts }}", id) + c.Assert(err, checker.IsNil, check.Commentf(out)) + type mount struct { + Name string + Source string + Destination string + Driver string + RW bool + } + + ls := []mount{} + err = json.NewDecoder(strings.NewReader(out)).Decode(&ls) + c.Assert(err, checker.IsNil) + + expected := []mount{ + {Source: "/foo", Destination: "/bar", RW: true}, + {Name: volumeID, Destination: "/foo", RW: true}, + {Source: "/quux", Destination: "/quux", RW: false}, + } + c.Assert(ls, checker.HasLen, len(expected)) + + for _, m := range ls { + var matched bool + for _, x := range expected { + if m.Source == x.Source && m.Destination == x.Destination && m.RW == x.RW || m.Name != x.Name { + matched = true + break + } + } + c.Assert(matched, checker.True, check.Commentf("did find match for %+v", m)) + } +} + +func (s *DockerDaemonSuite) TestDaemonWithUserlandProxyPath(c *check.C) { + testRequires(c, SameHostDaemon, DaemonIsLinux) + + dockerProxyPath, err := exec.LookPath("docker-proxy") + c.Assert(err, checker.IsNil) + tmpDir, err := ioutil.TempDir("", "test-docker-proxy") + c.Assert(err, checker.IsNil) + + newProxyPath := filepath.Join(tmpDir, "docker-proxy") + cmd := exec.Command("cp", dockerProxyPath, newProxyPath) + c.Assert(cmd.Run(), checker.IsNil) + + // custom one + s.d.StartWithBusybox(c, "--userland-proxy-path", newProxyPath) + out, err := s.d.Cmd("run", "-p", "5000:5000", "busybox:latest", "true") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + // try with the original one + s.d.Restart(c, "--userland-proxy-path", dockerProxyPath) + out, err = s.d.Cmd("run", "-p", "5000:5000", "busybox:latest", "true") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + // not exist + s.d.Restart(c, "--userland-proxy-path", "/does/not/exist") + out, err = s.d.Cmd("run", "-p", "5000:5000", "busybox:latest", "true") + c.Assert(err, checker.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "driver failed programming external connectivity on endpoint") + c.Assert(out, checker.Contains, "/does/not/exist: no such file or directory") +} + +// Test case for #22471 +func (s *DockerDaemonSuite) TestDaemonShutdownTimeout(c *check.C) { + testRequires(c, SameHostDaemon) + s.d.StartWithBusybox(c, "--shutdown-timeout=3") + + _, err := s.d.Cmd("run", "-d", "busybox", "top") + c.Assert(err, check.IsNil) + + c.Assert(s.d.Signal(unix.SIGINT), checker.IsNil) + + select { + case <-s.d.Wait: + case <-time.After(5 * time.Second): + } + + expectedMessage := `level=debug msg="start clean shutdown of all containers with a 3 seconds timeout..."` + content, err := s.d.ReadLogFile() + c.Assert(err, checker.IsNil) + c.Assert(string(content), checker.Contains, expectedMessage) +} + +// Test case for #22471 +func (s *DockerDaemonSuite) TestDaemonShutdownTimeoutWithConfigFile(c *check.C) { + testRequires(c, SameHostDaemon) + + // daemon config file + configFilePath := "test.json" + configFile, err := os.Create(configFilePath) + c.Assert(err, checker.IsNil) + defer os.Remove(configFilePath) + + daemonConfig := `{ "shutdown-timeout" : 8 }` + fmt.Fprintf(configFile, "%s", daemonConfig) + configFile.Close() + s.d.Start(c, fmt.Sprintf("--config-file=%s", configFilePath)) + + configFile, err = os.Create(configFilePath) + c.Assert(err, checker.IsNil) + daemonConfig = `{ "shutdown-timeout" : 5 }` + fmt.Fprintf(configFile, "%s", daemonConfig) + configFile.Close() + + c.Assert(s.d.Signal(unix.SIGHUP), checker.IsNil) + + select { + case <-s.d.Wait: + case <-time.After(3 * time.Second): + } + + expectedMessage := `level=debug msg="Reset Shutdown Timeout: 5"` + content, err := s.d.ReadLogFile() + c.Assert(err, checker.IsNil) + c.Assert(string(content), checker.Contains, expectedMessage) +} + +// Test case for 29342 +func (s *DockerDaemonSuite) TestExecWithUserAfterLiveRestore(c *check.C) { + testRequires(c, DaemonIsLinux) + s.d.StartWithBusybox(c, "--live-restore") + + out, err := s.d.Cmd("run", "-d", "--name=top", "busybox", "sh", "-c", "addgroup -S test && adduser -S -G test test -D -s /bin/sh && touch /adduser_end && top") + c.Assert(err, check.IsNil, check.Commentf("Output: %s", out)) + + s.d.WaitRun("top") + + // Wait for shell command to be completed + _, err = s.d.Cmd("exec", "top", "sh", "-c", `for i in $(seq 1 5); do if [ -e /adduser_end ]; then rm -f /adduser_end && break; else sleep 1 && false; fi; done`) + c.Assert(err, check.IsNil, check.Commentf("Timeout waiting for shell command to be completed")) + + out1, err := s.d.Cmd("exec", "-u", "test", "top", "id") + // uid=100(test) gid=101(test) groups=101(test) + c.Assert(err, check.IsNil, check.Commentf("Output: %s", out1)) + + // restart daemon. + s.d.Restart(c, "--live-restore") + + out2, err := s.d.Cmd("exec", "-u", "test", "top", "id") + c.Assert(err, check.IsNil, check.Commentf("Output: %s", out2)) + c.Assert(out2, check.Equals, out1, check.Commentf("Output: before restart '%s', after restart '%s'", out1, out2)) + + out, err = s.d.Cmd("stop", "top") + c.Assert(err, check.IsNil, check.Commentf("Output: %s", out)) +} + +func (s *DockerDaemonSuite) TestRemoveContainerAfterLiveRestore(c *check.C) { + testRequires(c, DaemonIsLinux, overlayFSSupported, SameHostDaemon) + s.d.StartWithBusybox(c, "--live-restore", "--storage-driver", "overlay") + out, err := s.d.Cmd("run", "-d", "--name=top", "busybox", "top") + c.Assert(err, check.IsNil, check.Commentf("Output: %s", out)) + + s.d.WaitRun("top") + + // restart daemon. + s.d.Restart(c, "--live-restore", "--storage-driver", "overlay") + + out, err = s.d.Cmd("stop", "top") + c.Assert(err, check.IsNil, check.Commentf("Output: %s", out)) + + // test if the rootfs mountpoint still exist + mountpoint, err := s.d.InspectField("top", ".GraphDriver.Data.MergedDir") + c.Assert(err, check.IsNil) + f, err := os.Open("/proc/self/mountinfo") + c.Assert(err, check.IsNil) + defer f.Close() + sc := bufio.NewScanner(f) + for sc.Scan() { + line := sc.Text() + if strings.Contains(line, mountpoint) { + c.Fatalf("mountinfo should not include the mountpoint of stop container") + } + } + + out, err = s.d.Cmd("rm", "top") + c.Assert(err, check.IsNil, check.Commentf("Output: %s", out)) +} + +// #29598 +func (s *DockerDaemonSuite) TestRestartPolicyWithLiveRestore(c *check.C) { + testRequires(c, DaemonIsLinux, SameHostDaemon) + s.d.StartWithBusybox(c, "--live-restore") + + out, err := s.d.Cmd("run", "-d", "--restart", "always", "busybox", "top") + c.Assert(err, check.IsNil, check.Commentf("output: %s", out)) + id := strings.TrimSpace(out) + + type state struct { + Running bool + StartedAt time.Time + } + out, err = s.d.Cmd("inspect", "-f", "{{json .State}}", id) + c.Assert(err, checker.IsNil, check.Commentf("output: %s", out)) + + var origState state + err = json.Unmarshal([]byte(strings.TrimSpace(out)), &origState) + c.Assert(err, checker.IsNil) + + s.d.Restart(c, "--live-restore") + + pid, err := s.d.Cmd("inspect", "-f", "{{.State.Pid}}", id) + c.Assert(err, check.IsNil) + pidint, err := strconv.Atoi(strings.TrimSpace(pid)) + c.Assert(err, check.IsNil) + c.Assert(pidint, checker.GreaterThan, 0) + c.Assert(unix.Kill(pidint, unix.SIGKILL), check.IsNil) + + ticker := time.NewTicker(50 * time.Millisecond) + timeout := time.After(10 * time.Second) + + for range ticker.C { + select { + case <-timeout: + c.Fatal("timeout waiting for container restart") + default: + } + + out, err := s.d.Cmd("inspect", "-f", "{{json .State}}", id) + c.Assert(err, checker.IsNil, check.Commentf("output: %s", out)) + + var newState state + err = json.Unmarshal([]byte(strings.TrimSpace(out)), &newState) + c.Assert(err, checker.IsNil) + + if !newState.Running { + continue + } + if newState.StartedAt.After(origState.StartedAt) { + break + } + } + + out, err = s.d.Cmd("stop", id) + c.Assert(err, check.IsNil, check.Commentf("output: %s", out)) +} + +func (s *DockerDaemonSuite) TestShmSize(c *check.C) { + testRequires(c, DaemonIsLinux) + + size := 67108864 * 2 + pattern := regexp.MustCompile(fmt.Sprintf("shm on /dev/shm type tmpfs(.*)size=%dk", size/1024)) + + s.d.StartWithBusybox(c, "--default-shm-size", fmt.Sprintf("%v", size)) + + name := "shm1" + out, err := s.d.Cmd("run", "--name", name, "busybox", "mount") + c.Assert(err, check.IsNil, check.Commentf("Output: %s", out)) + c.Assert(pattern.MatchString(out), checker.True) + out, err = s.d.Cmd("inspect", "--format", "{{.HostConfig.ShmSize}}", name) + c.Assert(err, check.IsNil, check.Commentf("Output: %s", out)) + c.Assert(strings.TrimSpace(out), check.Equals, fmt.Sprintf("%v", size)) +} + +func (s *DockerDaemonSuite) TestShmSizeReload(c *check.C) { + testRequires(c, DaemonIsLinux) + + configPath, err := ioutil.TempDir("", "test-daemon-shm-size-reload-config") + c.Assert(err, checker.IsNil, check.Commentf("could not create temp file for config reload")) + defer os.RemoveAll(configPath) // clean up + configFile := filepath.Join(configPath, "config.json") + + size := 67108864 * 2 + configData := []byte(fmt.Sprintf(`{"default-shm-size": "%dM"}`, size/1024/1024)) + c.Assert(ioutil.WriteFile(configFile, configData, 0666), checker.IsNil, check.Commentf("could not write temp file for config reload")) + pattern := regexp.MustCompile(fmt.Sprintf("shm on /dev/shm type tmpfs(.*)size=%dk", size/1024)) + + s.d.StartWithBusybox(c, "--config-file", configFile) + + name := "shm1" + out, err := s.d.Cmd("run", "--name", name, "busybox", "mount") + c.Assert(err, check.IsNil, check.Commentf("Output: %s", out)) + c.Assert(pattern.MatchString(out), checker.True) + out, err = s.d.Cmd("inspect", "--format", "{{.HostConfig.ShmSize}}", name) + c.Assert(err, check.IsNil, check.Commentf("Output: %s", out)) + c.Assert(strings.TrimSpace(out), check.Equals, fmt.Sprintf("%v", size)) + + size = 67108864 * 3 + configData = []byte(fmt.Sprintf(`{"default-shm-size": "%dM"}`, size/1024/1024)) + c.Assert(ioutil.WriteFile(configFile, configData, 0666), checker.IsNil, check.Commentf("could not write temp file for config reload")) + pattern = regexp.MustCompile(fmt.Sprintf("shm on /dev/shm type tmpfs(.*)size=%dk", size/1024)) + + err = s.d.ReloadConfig() + c.Assert(err, checker.IsNil, check.Commentf("error reloading daemon config")) + + name = "shm2" + out, err = s.d.Cmd("run", "--name", name, "busybox", "mount") + c.Assert(err, check.IsNil, check.Commentf("Output: %s", out)) + c.Assert(pattern.MatchString(out), checker.True) + out, err = s.d.Cmd("inspect", "--format", "{{.HostConfig.ShmSize}}", name) + c.Assert(err, check.IsNil, check.Commentf("Output: %s", out)) + c.Assert(strings.TrimSpace(out), check.Equals, fmt.Sprintf("%v", size)) +} + +// TestFailedPluginRemove makes sure that a failed plugin remove does not block +// the daemon from starting +func (s *DockerDaemonSuite) TestFailedPluginRemove(c *check.C) { + testRequires(c, DaemonIsLinux, IsAmd64, SameHostDaemon) + d := daemon.New(c, dockerBinary, dockerdBinary, daemon.Config{}) + d.Start(c) + cli, err := client.NewClient(d.Sock(), api.DefaultVersion, nil, nil) + c.Assert(err, checker.IsNil) + + ctx, cancel := context.WithTimeout(context.Background(), 300*time.Second) + defer cancel() + + name := "test-plugin-rm-fail" + out, err := cli.PluginInstall(ctx, name, types.PluginInstallOptions{ + Disabled: true, + AcceptAllPermissions: true, + RemoteRef: "cpuguy83/docker-logdriver-test", + }) + c.Assert(err, checker.IsNil) + defer out.Close() + io.Copy(ioutil.Discard, out) + + ctx, cancel = context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + p, _, err := cli.PluginInspectWithRaw(ctx, name) + c.Assert(err, checker.IsNil) + + // simulate a bad/partial removal by removing the plugin config. + configPath := filepath.Join(d.Root, "plugins", p.ID, "config.json") + c.Assert(os.Remove(configPath), checker.IsNil) + + d.Restart(c) + ctx, cancel = context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + _, err = cli.Ping(ctx) + c.Assert(err, checker.IsNil) + + _, _, err = cli.PluginInspectWithRaw(ctx, name) + // plugin should be gone since the config.json is gone + c.Assert(err, checker.NotNil) +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_diff_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_diff_test.go new file mode 100644 index 000000000..3e95a7378 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_diff_test.go @@ -0,0 +1,98 @@ +package main + +import ( + "strings" + "time" + + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/cli" + "github.com/go-check/check" +) + +// ensure that an added file shows up in docker diff +func (s *DockerSuite) TestDiffFilenameShownInOutput(c *check.C) { + containerCmd := `mkdir /foo; echo xyzzy > /foo/bar` + out := cli.DockerCmd(c, "run", "-d", "busybox", "sh", "-c", containerCmd).Combined() + + // Wait for it to exit as cannot diff a running container on Windows, and + // it will take a few seconds to exit. Also there's no way in Windows to + // differentiate between an Add or a Modify, and all files are under + // a "Files/" prefix. + containerID := strings.TrimSpace(out) + lookingFor := "A /foo/bar" + if testEnv.DaemonPlatform() == "windows" { + cli.WaitExited(c, containerID, 60*time.Second) + lookingFor = "C Files/foo/bar" + } + + cleanCID := strings.TrimSpace(out) + out = cli.DockerCmd(c, "diff", cleanCID).Combined() + + found := false + for _, line := range strings.Split(out, "\n") { + if strings.Contains(line, lookingFor) { + found = true + break + } + } + c.Assert(found, checker.True) +} + +// test to ensure GH #3840 doesn't occur any more +func (s *DockerSuite) TestDiffEnsureInitLayerFilesAreIgnored(c *check.C) { + testRequires(c, DaemonIsLinux) + // this is a list of files which shouldn't show up in `docker diff` + initLayerFiles := []string{"/etc/resolv.conf", "/etc/hostname", "/etc/hosts", "/.dockerenv"} + containerCount := 5 + + // we might not run into this problem from the first run, so start a few containers + for i := 0; i < containerCount; i++ { + containerCmd := `echo foo > /root/bar` + out, _ := dockerCmd(c, "run", "-d", "busybox", "sh", "-c", containerCmd) + + cleanCID := strings.TrimSpace(out) + out, _ = dockerCmd(c, "diff", cleanCID) + + for _, filename := range initLayerFiles { + c.Assert(out, checker.Not(checker.Contains), filename) + } + } +} + +func (s *DockerSuite) TestDiffEnsureDefaultDevs(c *check.C) { + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "-d", "busybox", "sleep", "0") + + cleanCID := strings.TrimSpace(out) + out, _ = dockerCmd(c, "diff", cleanCID) + + expected := map[string]bool{ + "C /dev": true, + "A /dev/full": true, // busybox + "C /dev/ptmx": true, // libcontainer + "A /dev/mqueue": true, + "A /dev/kmsg": true, + "A /dev/fd": true, + "A /dev/ptmx": true, + "A /dev/null": true, + "A /dev/random": true, + "A /dev/stdout": true, + "A /dev/stderr": true, + "A /dev/tty1": true, + "A /dev/stdin": true, + "A /dev/tty": true, + "A /dev/urandom": true, + "A /dev/zero": true, + } + + for _, line := range strings.Split(out, "\n") { + c.Assert(line == "" || expected[line], checker.True, check.Commentf(line)) + } +} + +// https://github.com/docker/docker/pull/14381#discussion_r33859347 +func (s *DockerSuite) TestDiffEmptyArgClientError(c *check.C) { + out, _, err := dockerCmdWithError("diff", "") + c.Assert(err, checker.NotNil) + c.Assert(strings.TrimSpace(out), checker.Contains, "Container name cannot be empty") +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_events_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_events_test.go new file mode 100644 index 000000000..0bbc98684 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_events_test.go @@ -0,0 +1,799 @@ +package main + +import ( + "bufio" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "os" + "os/exec" + "strings" + "time" + + eventtypes "github.com/docker/docker/api/types/events" + eventstestutils "github.com/docker/docker/daemon/events/testutils" + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/cli" + "github.com/docker/docker/integration-cli/cli/build" + "github.com/docker/docker/integration-cli/request" + "github.com/docker/docker/pkg/testutil" + icmd "github.com/docker/docker/pkg/testutil/cmd" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestEventsTimestampFormats(c *check.C) { + name := "events-time-format-test" + + // Start stopwatch, generate an event + start := daemonTime(c) + time.Sleep(1100 * time.Millisecond) // so that first event occur in different second from since (just for the case) + dockerCmd(c, "run", "--rm", "--name", name, "busybox", "true") + time.Sleep(1100 * time.Millisecond) // so that until > since + end := daemonTime(c) + + // List of available time formats to --since + unixTs := func(t time.Time) string { return fmt.Sprintf("%v", t.Unix()) } + rfc3339 := func(t time.Time) string { return t.Format(time.RFC3339) } + duration := func(t time.Time) string { return time.Now().Sub(t).String() } + + // --since=$start must contain only the 'untag' event + for _, f := range []func(time.Time) string{unixTs, rfc3339, duration} { + since, until := f(start), f(end) + out, _ := dockerCmd(c, "events", "--since="+since, "--until="+until) + events := strings.Split(out, "\n") + events = events[:len(events)-1] + + nEvents := len(events) + c.Assert(nEvents, checker.GreaterOrEqualThan, 5) //Missing expected event + containerEvents := eventActionsByIDAndType(c, events, name, "container") + c.Assert(containerEvents, checker.HasLen, 5, check.Commentf("events: %v", events)) + + c.Assert(containerEvents[0], checker.Equals, "create", check.Commentf(out)) + c.Assert(containerEvents[1], checker.Equals, "attach", check.Commentf(out)) + c.Assert(containerEvents[2], checker.Equals, "start", check.Commentf(out)) + c.Assert(containerEvents[3], checker.Equals, "die", check.Commentf(out)) + c.Assert(containerEvents[4], checker.Equals, "destroy", check.Commentf(out)) + } +} + +func (s *DockerSuite) TestEventsUntag(c *check.C) { + image := "busybox" + dockerCmd(c, "tag", image, "utest:tag1") + dockerCmd(c, "tag", image, "utest:tag2") + dockerCmd(c, "rmi", "utest:tag1") + dockerCmd(c, "rmi", "utest:tag2") + + result := icmd.RunCmd(icmd.Cmd{ + Command: []string{dockerBinary, "events", "--since=1"}, + Timeout: time.Millisecond * 2500, + }) + c.Assert(result, icmd.Matches, icmd.Expected{Timeout: true}) + + events := strings.Split(result.Stdout(), "\n") + nEvents := len(events) + // The last element after the split above will be an empty string, so we + // get the two elements before the last, which are the untags we're + // looking for. + for _, v := range events[nEvents-3 : nEvents-1] { + c.Assert(v, checker.Contains, "untag", check.Commentf("event should be untag")) + } +} + +func (s *DockerSuite) TestEventsLimit(c *check.C) { + // Windows: Limit to 4 goroutines creating containers in order to prevent + // timeouts creating so many containers simultaneously. This is a due to + // a bug in the Windows platform. It will be fixed in a Windows Update. + numContainers := 17 + numConcurrentContainers := numContainers + if testEnv.DaemonPlatform() == "windows" { + numConcurrentContainers = 4 + } + sem := make(chan bool, numConcurrentContainers) + errChan := make(chan error, numContainers) + + args := []string{"run", "--rm", "busybox", "true"} + for i := 0; i < numContainers; i++ { + sem <- true + go func() { + defer func() { <-sem }() + out, err := exec.Command(dockerBinary, args...).CombinedOutput() + if err != nil { + err = fmt.Errorf("%v: %s", err, string(out)) + } + errChan <- err + }() + } + + // Wait for all goroutines to finish + for i := 0; i < cap(sem); i++ { + sem <- true + } + close(errChan) + + for err := range errChan { + c.Assert(err, checker.IsNil, check.Commentf("%q failed with error", strings.Join(args, " "))) + } + + out, _ := dockerCmd(c, "events", "--since=0", "--until", daemonUnixTime(c)) + events := strings.Split(out, "\n") + nEvents := len(events) - 1 + c.Assert(nEvents, checker.Equals, 256, check.Commentf("events should be limited to 256, but received %d", nEvents)) +} + +func (s *DockerSuite) TestEventsContainerEvents(c *check.C) { + dockerCmd(c, "run", "--rm", "--name", "container-events-test", "busybox", "true") + + out, _ := dockerCmd(c, "events", "--until", daemonUnixTime(c)) + events := strings.Split(out, "\n") + events = events[:len(events)-1] + + nEvents := len(events) + c.Assert(nEvents, checker.GreaterOrEqualThan, 5) //Missing expected event + containerEvents := eventActionsByIDAndType(c, events, "container-events-test", "container") + c.Assert(containerEvents, checker.HasLen, 5, check.Commentf("events: %v", events)) + + c.Assert(containerEvents[0], checker.Equals, "create", check.Commentf(out)) + c.Assert(containerEvents[1], checker.Equals, "attach", check.Commentf(out)) + c.Assert(containerEvents[2], checker.Equals, "start", check.Commentf(out)) + c.Assert(containerEvents[3], checker.Equals, "die", check.Commentf(out)) + c.Assert(containerEvents[4], checker.Equals, "destroy", check.Commentf(out)) +} + +func (s *DockerSuite) TestEventsContainerEventsAttrSort(c *check.C) { + since := daemonUnixTime(c) + dockerCmd(c, "run", "--rm", "--name", "container-events-test", "busybox", "true") + + out, _ := dockerCmd(c, "events", "--filter", "container=container-events-test", "--since", since, "--until", daemonUnixTime(c)) + events := strings.Split(out, "\n") + + nEvents := len(events) + c.Assert(nEvents, checker.GreaterOrEqualThan, 3) //Missing expected event + matchedEvents := 0 + for _, event := range events { + matches := eventstestutils.ScanMap(event) + if matches["eventType"] == "container" && matches["action"] == "create" { + matchedEvents++ + c.Assert(out, checker.Contains, "(image=busybox, name=container-events-test)", check.Commentf("Event attributes not sorted")) + } else if matches["eventType"] == "container" && matches["action"] == "start" { + matchedEvents++ + c.Assert(out, checker.Contains, "(image=busybox, name=container-events-test)", check.Commentf("Event attributes not sorted")) + } + } + c.Assert(matchedEvents, checker.Equals, 2, check.Commentf("missing events for container container-events-test:\n%s", out)) +} + +func (s *DockerSuite) TestEventsContainerEventsSinceUnixEpoch(c *check.C) { + dockerCmd(c, "run", "--rm", "--name", "since-epoch-test", "busybox", "true") + timeBeginning := time.Unix(0, 0).Format(time.RFC3339Nano) + timeBeginning = strings.Replace(timeBeginning, "Z", ".000000000Z", -1) + out, _ := dockerCmd(c, "events", "--since", timeBeginning, "--until", daemonUnixTime(c)) + events := strings.Split(out, "\n") + events = events[:len(events)-1] + + nEvents := len(events) + c.Assert(nEvents, checker.GreaterOrEqualThan, 5) //Missing expected event + containerEvents := eventActionsByIDAndType(c, events, "since-epoch-test", "container") + c.Assert(containerEvents, checker.HasLen, 5, check.Commentf("events: %v", events)) + + c.Assert(containerEvents[0], checker.Equals, "create", check.Commentf(out)) + c.Assert(containerEvents[1], checker.Equals, "attach", check.Commentf(out)) + c.Assert(containerEvents[2], checker.Equals, "start", check.Commentf(out)) + c.Assert(containerEvents[3], checker.Equals, "die", check.Commentf(out)) + c.Assert(containerEvents[4], checker.Equals, "destroy", check.Commentf(out)) +} + +func (s *DockerSuite) TestEventsImageTag(c *check.C) { + time.Sleep(1 * time.Second) // because API has seconds granularity + since := daemonUnixTime(c) + image := "testimageevents:tag" + dockerCmd(c, "tag", "busybox", image) + + out, _ := dockerCmd(c, "events", + "--since", since, "--until", daemonUnixTime(c)) + + events := strings.Split(strings.TrimSpace(out), "\n") + c.Assert(events, checker.HasLen, 1, check.Commentf("was expecting 1 event. out=%s", out)) + event := strings.TrimSpace(events[0]) + + matches := eventstestutils.ScanMap(event) + c.Assert(matchEventID(matches, image), checker.True, check.Commentf("matches: %v\nout:\n%s", matches, out)) + c.Assert(matches["action"], checker.Equals, "tag") +} + +func (s *DockerSuite) TestEventsImagePull(c *check.C) { + // TODO Windows: Enable this test once pull and reliable image names are available + testRequires(c, DaemonIsLinux) + since := daemonUnixTime(c) + testRequires(c, Network) + + dockerCmd(c, "pull", "hello-world") + + out, _ := dockerCmd(c, "events", + "--since", since, "--until", daemonUnixTime(c)) + + events := strings.Split(strings.TrimSpace(out), "\n") + event := strings.TrimSpace(events[len(events)-1]) + matches := eventstestutils.ScanMap(event) + c.Assert(matches["id"], checker.Equals, "hello-world:latest") + c.Assert(matches["action"], checker.Equals, "pull") + +} + +func (s *DockerSuite) TestEventsImageImport(c *check.C) { + // TODO Windows CI. This should be portable once export/import are + // more reliable (@swernli) + testRequires(c, DaemonIsLinux) + + out, _ := dockerCmd(c, "run", "-d", "busybox", "true") + cleanedContainerID := strings.TrimSpace(out) + + since := daemonUnixTime(c) + out, _, err := testutil.RunCommandPipelineWithOutput( + exec.Command(dockerBinary, "export", cleanedContainerID), + exec.Command(dockerBinary, "import", "-"), + ) + c.Assert(err, checker.IsNil, check.Commentf("import failed with output: %q", out)) + imageRef := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "events", "--since", since, "--until", daemonUnixTime(c), "--filter", "event=import") + events := strings.Split(strings.TrimSpace(out), "\n") + c.Assert(events, checker.HasLen, 1) + matches := eventstestutils.ScanMap(events[0]) + c.Assert(matches["id"], checker.Equals, imageRef, check.Commentf("matches: %v\nout:\n%s\n", matches, out)) + c.Assert(matches["action"], checker.Equals, "import", check.Commentf("matches: %v\nout:\n%s\n", matches, out)) +} + +func (s *DockerSuite) TestEventsImageLoad(c *check.C) { + testRequires(c, DaemonIsLinux) + myImageName := "footest:v1" + dockerCmd(c, "tag", "busybox", myImageName) + since := daemonUnixTime(c) + + out, _ := dockerCmd(c, "images", "-q", "--no-trunc", myImageName) + longImageID := strings.TrimSpace(out) + c.Assert(longImageID, checker.Not(check.Equals), "", check.Commentf("Id should not be empty")) + + dockerCmd(c, "save", "-o", "saveimg.tar", myImageName) + dockerCmd(c, "rmi", myImageName) + out, _ = dockerCmd(c, "images", "-q", myImageName) + noImageID := strings.TrimSpace(out) + c.Assert(noImageID, checker.Equals, "", check.Commentf("Should not have any image")) + dockerCmd(c, "load", "-i", "saveimg.tar") + + result := icmd.RunCommand("rm", "-rf", "saveimg.tar") + c.Assert(result, icmd.Matches, icmd.Success) + + out, _ = dockerCmd(c, "images", "-q", "--no-trunc", myImageName) + imageID := strings.TrimSpace(out) + c.Assert(imageID, checker.Equals, longImageID, check.Commentf("Should have same image id as before")) + + out, _ = dockerCmd(c, "events", "--since", since, "--until", daemonUnixTime(c), "--filter", "event=load") + events := strings.Split(strings.TrimSpace(out), "\n") + c.Assert(events, checker.HasLen, 1) + matches := eventstestutils.ScanMap(events[0]) + c.Assert(matches["id"], checker.Equals, imageID, check.Commentf("matches: %v\nout:\n%s\n", matches, out)) + c.Assert(matches["action"], checker.Equals, "load", check.Commentf("matches: %v\nout:\n%s\n", matches, out)) + + out, _ = dockerCmd(c, "events", "--since", since, "--until", daemonUnixTime(c), "--filter", "event=save") + events = strings.Split(strings.TrimSpace(out), "\n") + c.Assert(events, checker.HasLen, 1) + matches = eventstestutils.ScanMap(events[0]) + c.Assert(matches["id"], checker.Equals, imageID, check.Commentf("matches: %v\nout:\n%s\n", matches, out)) + c.Assert(matches["action"], checker.Equals, "save", check.Commentf("matches: %v\nout:\n%s\n", matches, out)) +} + +func (s *DockerSuite) TestEventsPluginOps(c *check.C) { + testRequires(c, DaemonIsLinux, IsAmd64, Network) + + since := daemonUnixTime(c) + + dockerCmd(c, "plugin", "install", pNameWithTag, "--grant-all-permissions") + dockerCmd(c, "plugin", "disable", pNameWithTag) + dockerCmd(c, "plugin", "remove", pNameWithTag) + + out, _ := dockerCmd(c, "events", "--since", since, "--until", daemonUnixTime(c)) + events := strings.Split(out, "\n") + events = events[:len(events)-1] + + nEvents := len(events) + c.Assert(nEvents, checker.GreaterOrEqualThan, 4) + + pluginEvents := eventActionsByIDAndType(c, events, pNameWithTag, "plugin") + c.Assert(pluginEvents, checker.HasLen, 4, check.Commentf("events: %v", events)) + + c.Assert(pluginEvents[0], checker.Equals, "pull", check.Commentf(out)) + c.Assert(pluginEvents[1], checker.Equals, "enable", check.Commentf(out)) + c.Assert(pluginEvents[2], checker.Equals, "disable", check.Commentf(out)) + c.Assert(pluginEvents[3], checker.Equals, "remove", check.Commentf(out)) +} + +func (s *DockerSuite) TestEventsFilters(c *check.C) { + since := daemonUnixTime(c) + dockerCmd(c, "run", "--rm", "busybox", "true") + dockerCmd(c, "run", "--rm", "busybox", "true") + out, _ := dockerCmd(c, "events", "--since", since, "--until", daemonUnixTime(c), "--filter", "event=die") + parseEvents(c, out, "die") + + out, _ = dockerCmd(c, "events", "--since", since, "--until", daemonUnixTime(c), "--filter", "event=die", "--filter", "event=start") + parseEvents(c, out, "die|start") + + // make sure we at least got 2 start events + count := strings.Count(out, "start") + c.Assert(strings.Count(out, "start"), checker.GreaterOrEqualThan, 2, check.Commentf("should have had 2 start events but had %d, out: %s", count, out)) + +} + +func (s *DockerSuite) TestEventsFilterImageName(c *check.C) { + since := daemonUnixTime(c) + + out, _ := dockerCmd(c, "run", "--name", "container_1", "-d", "busybox:latest", "true") + container1 := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "run", "--name", "container_2", "-d", "busybox", "true") + container2 := strings.TrimSpace(out) + + name := "busybox" + out, _ = dockerCmd(c, "events", "--since", since, "--until", daemonUnixTime(c), "--filter", fmt.Sprintf("image=%s", name)) + events := strings.Split(out, "\n") + events = events[:len(events)-1] + c.Assert(events, checker.Not(checker.HasLen), 0) //Expected events but found none for the image busybox:latest + count1 := 0 + count2 := 0 + + for _, e := range events { + if strings.Contains(e, container1) { + count1++ + } else if strings.Contains(e, container2) { + count2++ + } + } + c.Assert(count1, checker.Not(checker.Equals), 0, check.Commentf("Expected event from container but got %d from %s", count1, container1)) + c.Assert(count2, checker.Not(checker.Equals), 0, check.Commentf("Expected event from container but got %d from %s", count2, container2)) + +} + +func (s *DockerSuite) TestEventsFilterLabels(c *check.C) { + since := daemonUnixTime(c) + label := "io.docker.testing=foo" + + out, _ := dockerCmd(c, "run", "-d", "-l", label, "busybox:latest", "true") + container1 := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "run", "-d", "busybox", "true") + container2 := strings.TrimSpace(out) + + out, _ = dockerCmd( + c, + "events", + "--since", since, + "--until", daemonUnixTime(c), + "--filter", fmt.Sprintf("label=%s", label)) + + events := strings.Split(strings.TrimSpace(out), "\n") + c.Assert(len(events), checker.Equals, 3) + + for _, e := range events { + c.Assert(e, checker.Contains, container1) + c.Assert(e, checker.Not(checker.Contains), container2) + } +} + +func (s *DockerSuite) TestEventsFilterImageLabels(c *check.C) { + since := daemonUnixTime(c) + name := "labelfiltertest" + label := "io.docker.testing=image" + + // Build a test image. + buildImageSuccessfully(c, name, build.WithDockerfile(fmt.Sprintf(` + FROM busybox:latest + LABEL %s`, label))) + dockerCmd(c, "tag", name, "labelfiltertest:tag1") + dockerCmd(c, "tag", name, "labelfiltertest:tag2") + dockerCmd(c, "tag", "busybox:latest", "labelfiltertest:tag3") + + out, _ := dockerCmd( + c, + "events", + "--since", since, + "--until", daemonUnixTime(c), + "--filter", fmt.Sprintf("label=%s", label), + "--filter", "type=image") + + events := strings.Split(strings.TrimSpace(out), "\n") + + // 2 events from the "docker tag" command, another one is from "docker build" + c.Assert(events, checker.HasLen, 3, check.Commentf("Events == %s", events)) + for _, e := range events { + c.Assert(e, checker.Contains, "labelfiltertest") + } +} + +func (s *DockerSuite) TestEventsFilterContainer(c *check.C) { + since := daemonUnixTime(c) + nameID := make(map[string]string) + + for _, name := range []string{"container_1", "container_2"} { + dockerCmd(c, "run", "--name", name, "busybox", "true") + id := inspectField(c, name, "Id") + nameID[name] = id + } + + until := daemonUnixTime(c) + + checkEvents := func(id string, events []string) error { + if len(events) != 4 { // create, attach, start, die + return fmt.Errorf("expected 4 events, got %v", events) + } + for _, event := range events { + matches := eventstestutils.ScanMap(event) + if !matchEventID(matches, id) { + return fmt.Errorf("expected event for container id %s: %s - parsed container id: %s", id, event, matches["id"]) + } + } + return nil + } + + for name, ID := range nameID { + // filter by names + out, _ := dockerCmd(c, "events", "--since", since, "--until", until, "--filter", "container="+name) + events := strings.Split(strings.TrimSuffix(out, "\n"), "\n") + c.Assert(checkEvents(ID, events), checker.IsNil) + + // filter by ID's + out, _ = dockerCmd(c, "events", "--since", since, "--until", until, "--filter", "container="+ID) + events = strings.Split(strings.TrimSuffix(out, "\n"), "\n") + c.Assert(checkEvents(ID, events), checker.IsNil) + } +} + +func (s *DockerSuite) TestEventsCommit(c *check.C) { + // Problematic on Windows as cannot commit a running container + testRequires(c, DaemonIsLinux) + + out := runSleepingContainer(c) + cID := strings.TrimSpace(out) + cli.WaitRun(c, cID) + + cli.DockerCmd(c, "commit", "-m", "test", cID) + cli.DockerCmd(c, "stop", cID) + cli.WaitExited(c, cID, 5*time.Second) + + until := daemonUnixTime(c) + out = cli.DockerCmd(c, "events", "-f", "container="+cID, "--until="+until).Combined() + c.Assert(out, checker.Contains, "commit", check.Commentf("Missing 'commit' log event")) +} + +func (s *DockerSuite) TestEventsCopy(c *check.C) { + // Build a test image. + buildImageSuccessfully(c, "cpimg", build.WithDockerfile(` + FROM busybox + RUN echo HI > /file`)) + id := getIDByName(c, "cpimg") + + // Create an empty test file. + tempFile, err := ioutil.TempFile("", "test-events-copy-") + c.Assert(err, checker.IsNil) + defer os.Remove(tempFile.Name()) + + c.Assert(tempFile.Close(), checker.IsNil) + + dockerCmd(c, "create", "--name=cptest", id) + + dockerCmd(c, "cp", "cptest:/file", tempFile.Name()) + + until := daemonUnixTime(c) + out, _ := dockerCmd(c, "events", "--since=0", "-f", "container=cptest", "--until="+until) + c.Assert(out, checker.Contains, "archive-path", check.Commentf("Missing 'archive-path' log event\n")) + + dockerCmd(c, "cp", tempFile.Name(), "cptest:/filecopy") + + until = daemonUnixTime(c) + out, _ = dockerCmd(c, "events", "-f", "container=cptest", "--until="+until) + c.Assert(out, checker.Contains, "extract-to-dir", check.Commentf("Missing 'extract-to-dir' log event")) +} + +func (s *DockerSuite) TestEventsResize(c *check.C) { + out := runSleepingContainer(c, "-d") + cID := strings.TrimSpace(out) + c.Assert(waitRun(cID), checker.IsNil) + + endpoint := "/containers/" + cID + "/resize?h=80&w=24" + status, _, err := request.SockRequest("POST", endpoint, nil, daemonHost()) + c.Assert(status, checker.Equals, http.StatusOK) + c.Assert(err, checker.IsNil) + + dockerCmd(c, "stop", cID) + + until := daemonUnixTime(c) + out, _ = dockerCmd(c, "events", "-f", "container="+cID, "--until="+until) + c.Assert(out, checker.Contains, "resize", check.Commentf("Missing 'resize' log event")) +} + +func (s *DockerSuite) TestEventsAttach(c *check.C) { + // TODO Windows CI: Figure out why this test fails intermittently (TP5). + testRequires(c, DaemonIsLinux) + + out := cli.DockerCmd(c, "run", "-di", "busybox", "cat").Combined() + cID := strings.TrimSpace(out) + cli.WaitRun(c, cID) + + cmd := exec.Command(dockerBinary, "attach", cID) + stdin, err := cmd.StdinPipe() + c.Assert(err, checker.IsNil) + defer stdin.Close() + stdout, err := cmd.StdoutPipe() + c.Assert(err, checker.IsNil) + defer stdout.Close() + c.Assert(cmd.Start(), checker.IsNil) + defer cmd.Process.Kill() + + // Make sure we're done attaching by writing/reading some stuff + _, err = stdin.Write([]byte("hello\n")) + c.Assert(err, checker.IsNil) + out, err = bufio.NewReader(stdout).ReadString('\n') + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Equals, "hello", check.Commentf("expected 'hello'")) + + c.Assert(stdin.Close(), checker.IsNil) + + cli.DockerCmd(c, "kill", cID) + cli.WaitExited(c, cID, 5*time.Second) + + until := daemonUnixTime(c) + out = cli.DockerCmd(c, "events", "-f", "container="+cID, "--until="+until).Combined() + c.Assert(out, checker.Contains, "attach", check.Commentf("Missing 'attach' log event")) +} + +func (s *DockerSuite) TestEventsRename(c *check.C) { + out, _ := dockerCmd(c, "run", "--name", "oldName", "busybox", "true") + cID := strings.TrimSpace(out) + dockerCmd(c, "rename", "oldName", "newName") + + until := daemonUnixTime(c) + // filter by the container id because the name in the event will be the new name. + out, _ = dockerCmd(c, "events", "-f", "container="+cID, "--until", until) + c.Assert(out, checker.Contains, "rename", check.Commentf("Missing 'rename' log event\n")) +} + +func (s *DockerSuite) TestEventsTop(c *check.C) { + // Problematic on Windows as Windows does not support top + testRequires(c, DaemonIsLinux) + + out := runSleepingContainer(c, "-d") + cID := strings.TrimSpace(out) + c.Assert(waitRun(cID), checker.IsNil) + + dockerCmd(c, "top", cID) + dockerCmd(c, "stop", cID) + + until := daemonUnixTime(c) + out, _ = dockerCmd(c, "events", "-f", "container="+cID, "--until="+until) + c.Assert(out, checker.Contains, " top", check.Commentf("Missing 'top' log event")) +} + +// #14316 +func (s *DockerRegistrySuite) TestEventsImageFilterPush(c *check.C) { + // Problematic to port for Windows CI during TP5 timeframe until + // supporting push + testRequires(c, DaemonIsLinux) + testRequires(c, Network) + repoName := fmt.Sprintf("%v/dockercli/testf", privateRegistryURL) + + out, _ := dockerCmd(c, "run", "-d", "busybox", "top") + cID := strings.TrimSpace(out) + c.Assert(waitRun(cID), checker.IsNil) + + dockerCmd(c, "commit", cID, repoName) + dockerCmd(c, "stop", cID) + dockerCmd(c, "push", repoName) + + until := daemonUnixTime(c) + out, _ = dockerCmd(c, "events", "-f", "image="+repoName, "-f", "event=push", "--until", until) + c.Assert(out, checker.Contains, repoName, check.Commentf("Missing 'push' log event for %s", repoName)) +} + +func (s *DockerSuite) TestEventsFilterType(c *check.C) { + since := daemonUnixTime(c) + name := "labelfiltertest" + label := "io.docker.testing=image" + + // Build a test image. + buildImageSuccessfully(c, name, build.WithDockerfile(fmt.Sprintf(` + FROM busybox:latest + LABEL %s`, label))) + dockerCmd(c, "tag", name, "labelfiltertest:tag1") + dockerCmd(c, "tag", name, "labelfiltertest:tag2") + dockerCmd(c, "tag", "busybox:latest", "labelfiltertest:tag3") + + out, _ := dockerCmd( + c, + "events", + "--since", since, + "--until", daemonUnixTime(c), + "--filter", fmt.Sprintf("label=%s", label), + "--filter", "type=image") + + events := strings.Split(strings.TrimSpace(out), "\n") + + // 2 events from the "docker tag" command, another one is from "docker build" + c.Assert(events, checker.HasLen, 3, check.Commentf("Events == %s", events)) + for _, e := range events { + c.Assert(e, checker.Contains, "labelfiltertest") + } + + out, _ = dockerCmd( + c, + "events", + "--since", since, + "--until", daemonUnixTime(c), + "--filter", fmt.Sprintf("label=%s", label), + "--filter", "type=container") + events = strings.Split(strings.TrimSpace(out), "\n") + + // Events generated by the container that builds the image + c.Assert(events, checker.HasLen, 3, check.Commentf("Events == %s", events)) + + out, _ = dockerCmd( + c, + "events", + "--since", since, + "--until", daemonUnixTime(c), + "--filter", "type=network") + events = strings.Split(strings.TrimSpace(out), "\n") + c.Assert(len(events), checker.GreaterOrEqualThan, 1, check.Commentf("Events == %s", events)) +} + +// #25798 +func (s *DockerSuite) TestEventsSpecialFiltersWithExecCreate(c *check.C) { + since := daemonUnixTime(c) + runSleepingContainer(c, "--name", "test-container", "-d") + waitRun("test-container") + + dockerCmd(c, "exec", "test-container", "echo", "hello-world") + + out, _ := dockerCmd( + c, + "events", + "--since", since, + "--until", daemonUnixTime(c), + "--filter", + "event='exec_create: echo hello-world'", + ) + + events := strings.Split(strings.TrimSpace(out), "\n") + c.Assert(len(events), checker.Equals, 1, check.Commentf(out)) + + out, _ = dockerCmd( + c, + "events", + "--since", since, + "--until", daemonUnixTime(c), + "--filter", + "event=exec_create", + ) + c.Assert(len(events), checker.Equals, 1, check.Commentf(out)) +} + +func (s *DockerSuite) TestEventsFilterImageInContainerAction(c *check.C) { + since := daemonUnixTime(c) + dockerCmd(c, "run", "--name", "test-container", "-d", "busybox", "true") + waitRun("test-container") + + out, _ := dockerCmd(c, "events", "--filter", "image=busybox", "--since", since, "--until", daemonUnixTime(c)) + events := strings.Split(strings.TrimSpace(out), "\n") + c.Assert(len(events), checker.GreaterThan, 1, check.Commentf(out)) +} + +func (s *DockerSuite) TestEventsContainerRestart(c *check.C) { + dockerCmd(c, "run", "-d", "--name=testEvent", "--restart=on-failure:3", "busybox", "false") + + // wait until test2 is auto removed. + waitTime := 10 * time.Second + if testEnv.DaemonPlatform() == "windows" { + // Windows takes longer... + waitTime = 90 * time.Second + } + + err := waitInspect("testEvent", "{{ .State.Restarting }} {{ .State.Running }}", "false false", waitTime) + c.Assert(err, checker.IsNil) + + var ( + createCount int + startCount int + dieCount int + ) + out, _ := dockerCmd(c, "events", "--since=0", "--until", daemonUnixTime(c), "-f", "container=testEvent") + events := strings.Split(strings.TrimSpace(out), "\n") + + nEvents := len(events) + c.Assert(nEvents, checker.GreaterOrEqualThan, 1) //Missing expected event + actions := eventActionsByIDAndType(c, events, "testEvent", "container") + + for _, a := range actions { + switch a { + case "create": + createCount++ + case "start": + startCount++ + case "die": + dieCount++ + } + } + c.Assert(createCount, checker.Equals, 1, check.Commentf("testEvent should be created 1 times: %v", actions)) + c.Assert(startCount, checker.Equals, 4, check.Commentf("testEvent should start 4 times: %v", actions)) + c.Assert(dieCount, checker.Equals, 4, check.Commentf("testEvent should die 4 times: %v", actions)) +} + +func (s *DockerSuite) TestEventsSinceInTheFuture(c *check.C) { + dockerCmd(c, "run", "--name", "test-container", "-d", "busybox", "true") + waitRun("test-container") + + since := daemonTime(c) + until := since.Add(time.Duration(-24) * time.Hour) + out, _, err := dockerCmdWithError("events", "--filter", "image=busybox", "--since", parseEventTime(since), "--until", parseEventTime(until)) + + c.Assert(err, checker.NotNil) + c.Assert(out, checker.Contains, "cannot be after `until`") +} + +func (s *DockerSuite) TestEventsUntilInThePast(c *check.C) { + since := daemonUnixTime(c) + + dockerCmd(c, "run", "--name", "test-container", "-d", "busybox", "true") + waitRun("test-container") + + until := daemonUnixTime(c) + + dockerCmd(c, "run", "--name", "test-container2", "-d", "busybox", "true") + waitRun("test-container2") + + out, _ := dockerCmd(c, "events", "--filter", "image=busybox", "--since", since, "--until", until) + + c.Assert(out, checker.Not(checker.Contains), "test-container2") + c.Assert(out, checker.Contains, "test-container") +} + +func (s *DockerSuite) TestEventsFormat(c *check.C) { + since := daemonUnixTime(c) + dockerCmd(c, "run", "--rm", "busybox", "true") + dockerCmd(c, "run", "--rm", "busybox", "true") + out, _ := dockerCmd(c, "events", "--since", since, "--until", daemonUnixTime(c), "--format", "{{json .}}") + dec := json.NewDecoder(strings.NewReader(out)) + // make sure we got 2 start events + startCount := 0 + for { + var err error + var ev eventtypes.Message + if err = dec.Decode(&ev); err == io.EOF { + break + } + c.Assert(err, checker.IsNil) + if ev.Status == "start" { + startCount++ + } + } + + c.Assert(startCount, checker.Equals, 2, check.Commentf("should have had 2 start events but had %d, out: %s", startCount, out)) +} + +func (s *DockerSuite) TestEventsFormatBadFunc(c *check.C) { + // make sure it fails immediately, without receiving any event + result := dockerCmdWithResult("events", "--format", "{{badFuncString .}}") + c.Assert(result, icmd.Matches, icmd.Expected{ + Error: "exit status 64", + ExitCode: 64, + Err: "Error parsing format: template: :1: function \"badFuncString\" not defined", + }) +} + +func (s *DockerSuite) TestEventsFormatBadField(c *check.C) { + // make sure it fails immediately, without receiving any event + result := dockerCmdWithResult("events", "--format", "{{.badFieldString}}") + c.Assert(result, icmd.Matches, icmd.Expected{ + Error: "exit status 64", + ExitCode: 64, + Err: "Error parsing format: template: :1:2: executing \"\" at <.badFieldString>: can't evaluate field badFieldString in type *events.Message", + }) +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_events_unix_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_events_unix_test.go new file mode 100644 index 000000000..1f87d5fe8 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_events_unix_test.go @@ -0,0 +1,485 @@ +// +build !windows + +package main + +import ( + "bufio" + "bytes" + "fmt" + "io/ioutil" + "os" + "os/exec" + "strings" + "time" + "unicode" + + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/cli/build" + "github.com/go-check/check" + "github.com/kr/pty" + "golang.org/x/sys/unix" +) + +// #5979 +func (s *DockerSuite) TestEventsRedirectStdout(c *check.C) { + since := daemonUnixTime(c) + dockerCmd(c, "run", "busybox", "true") + + file, err := ioutil.TempFile("", "") + c.Assert(err, checker.IsNil, check.Commentf("could not create temp file")) + defer os.Remove(file.Name()) + + command := fmt.Sprintf("%s events --since=%s --until=%s > %s", dockerBinary, since, daemonUnixTime(c), file.Name()) + _, tty, err := pty.Open() + c.Assert(err, checker.IsNil, check.Commentf("Could not open pty")) + cmd := exec.Command("sh", "-c", command) + cmd.Stdin = tty + cmd.Stdout = tty + cmd.Stderr = tty + c.Assert(cmd.Run(), checker.IsNil, check.Commentf("run err for command %q", command)) + + scanner := bufio.NewScanner(file) + for scanner.Scan() { + for _, ch := range scanner.Text() { + c.Assert(unicode.IsControl(ch), checker.False, check.Commentf("found control character %v", []byte(string(ch)))) + } + } + c.Assert(scanner.Err(), checker.IsNil, check.Commentf("Scan err for command %q", command)) + +} + +func (s *DockerSuite) TestEventsOOMDisableFalse(c *check.C) { + testRequires(c, DaemonIsLinux, oomControl, memoryLimitSupport, swapMemorySupport) + + errChan := make(chan error) + go func() { + defer close(errChan) + out, exitCode, _ := dockerCmdWithError("run", "--name", "oomFalse", "-m", "10MB", "busybox", "sh", "-c", "x=a; while true; do x=$x$x$x$x; done") + if expected := 137; exitCode != expected { + errChan <- fmt.Errorf("wrong exit code for OOM container: expected %d, got %d (output: %q)", expected, exitCode, out) + } + }() + select { + case err := <-errChan: + c.Assert(err, checker.IsNil) + case <-time.After(30 * time.Second): + c.Fatal("Timeout waiting for container to die on OOM") + } + + out, _ := dockerCmd(c, "events", "--since=0", "-f", "container=oomFalse", "--until", daemonUnixTime(c)) + events := strings.Split(strings.TrimSuffix(out, "\n"), "\n") + nEvents := len(events) + + c.Assert(nEvents, checker.GreaterOrEqualThan, 5) //Missing expected event + c.Assert(parseEventAction(c, events[nEvents-5]), checker.Equals, "create") + c.Assert(parseEventAction(c, events[nEvents-4]), checker.Equals, "attach") + c.Assert(parseEventAction(c, events[nEvents-3]), checker.Equals, "start") + c.Assert(parseEventAction(c, events[nEvents-2]), checker.Equals, "oom") + c.Assert(parseEventAction(c, events[nEvents-1]), checker.Equals, "die") +} + +func (s *DockerSuite) TestEventsOOMDisableTrue(c *check.C) { + testRequires(c, DaemonIsLinux, oomControl, memoryLimitSupport, NotArm, swapMemorySupport) + + errChan := make(chan error) + observer, err := newEventObserver(c) + c.Assert(err, checker.IsNil) + err = observer.Start() + c.Assert(err, checker.IsNil) + defer observer.Stop() + + go func() { + defer close(errChan) + out, exitCode, _ := dockerCmdWithError("run", "--oom-kill-disable=true", "--name", "oomTrue", "-m", "10MB", "busybox", "sh", "-c", "x=a; while true; do x=$x$x$x$x; done") + if expected := 137; exitCode != expected { + errChan <- fmt.Errorf("wrong exit code for OOM container: expected %d, got %d (output: %q)", expected, exitCode, out) + } + }() + + c.Assert(waitRun("oomTrue"), checker.IsNil) + defer dockerCmd(c, "kill", "oomTrue") + containerID := inspectField(c, "oomTrue", "Id") + + testActions := map[string]chan bool{ + "oom": make(chan bool), + } + + matcher := matchEventLine(containerID, "container", testActions) + processor := processEventMatch(testActions) + go observer.Match(matcher, processor) + + select { + case <-time.After(20 * time.Second): + observer.CheckEventError(c, containerID, "oom", matcher) + case <-testActions["oom"]: + // ignore, done + case errRun := <-errChan: + if errRun != nil { + c.Fatalf("%v", errRun) + } else { + c.Fatalf("container should be still running but it's not") + } + } + + status := inspectField(c, "oomTrue", "State.Status") + c.Assert(strings.TrimSpace(status), checker.Equals, "running", check.Commentf("container should be still running")) +} + +// #18453 +func (s *DockerSuite) TestEventsContainerFilterByName(c *check.C) { + testRequires(c, DaemonIsLinux) + cOut, _ := dockerCmd(c, "run", "--name=foo", "-d", "busybox", "top") + c1 := strings.TrimSpace(cOut) + waitRun("foo") + cOut, _ = dockerCmd(c, "run", "--name=bar", "-d", "busybox", "top") + c2 := strings.TrimSpace(cOut) + waitRun("bar") + out, _ := dockerCmd(c, "events", "-f", "container=foo", "--since=0", "--until", daemonUnixTime(c)) + c.Assert(out, checker.Contains, c1, check.Commentf(out)) + c.Assert(out, checker.Not(checker.Contains), c2, check.Commentf(out)) +} + +// #18453 +func (s *DockerSuite) TestEventsContainerFilterBeforeCreate(c *check.C) { + testRequires(c, DaemonIsLinux) + buf := &bytes.Buffer{} + cmd := exec.Command(dockerBinary, "events", "-f", "container=foo", "--since=0") + cmd.Stdout = buf + c.Assert(cmd.Start(), check.IsNil) + defer cmd.Wait() + defer cmd.Process.Kill() + + // Sleep for a second to make sure we are testing the case where events are listened before container starts. + time.Sleep(time.Second) + id, _ := dockerCmd(c, "run", "--name=foo", "-d", "busybox", "top") + cID := strings.TrimSpace(id) + for i := 0; ; i++ { + out := buf.String() + if strings.Contains(out, cID) { + break + } + if i > 30 { + c.Fatalf("Missing event of container (foo, %v), got %q", cID, out) + } + time.Sleep(500 * time.Millisecond) + } +} + +func (s *DockerSuite) TestVolumeEvents(c *check.C) { + testRequires(c, DaemonIsLinux) + + since := daemonUnixTime(c) + + // Observe create/mount volume actions + dockerCmd(c, "volume", "create", "test-event-volume-local") + dockerCmd(c, "run", "--name", "test-volume-container", "--volume", "test-event-volume-local:/foo", "-d", "busybox", "true") + waitRun("test-volume-container") + + // Observe unmount/destroy volume actions + dockerCmd(c, "rm", "-f", "test-volume-container") + dockerCmd(c, "volume", "rm", "test-event-volume-local") + + until := daemonUnixTime(c) + out, _ := dockerCmd(c, "events", "--since", since, "--until", until) + events := strings.Split(strings.TrimSpace(out), "\n") + c.Assert(len(events), checker.GreaterThan, 4) + + volumeEvents := eventActionsByIDAndType(c, events, "test-event-volume-local", "volume") + c.Assert(volumeEvents, checker.HasLen, 4) + c.Assert(volumeEvents[0], checker.Equals, "create") + c.Assert(volumeEvents[1], checker.Equals, "mount") + c.Assert(volumeEvents[2], checker.Equals, "unmount") + c.Assert(volumeEvents[3], checker.Equals, "destroy") +} + +func (s *DockerSuite) TestNetworkEvents(c *check.C) { + testRequires(c, DaemonIsLinux) + + since := daemonUnixTime(c) + + // Observe create/connect network actions + dockerCmd(c, "network", "create", "test-event-network-local") + dockerCmd(c, "run", "--name", "test-network-container", "--net", "test-event-network-local", "-d", "busybox", "true") + waitRun("test-network-container") + + // Observe disconnect/destroy network actions + dockerCmd(c, "rm", "-f", "test-network-container") + dockerCmd(c, "network", "rm", "test-event-network-local") + + until := daemonUnixTime(c) + out, _ := dockerCmd(c, "events", "--since", since, "--until", until) + events := strings.Split(strings.TrimSpace(out), "\n") + c.Assert(len(events), checker.GreaterThan, 4) + + netEvents := eventActionsByIDAndType(c, events, "test-event-network-local", "network") + c.Assert(netEvents, checker.HasLen, 4) + c.Assert(netEvents[0], checker.Equals, "create") + c.Assert(netEvents[1], checker.Equals, "connect") + c.Assert(netEvents[2], checker.Equals, "disconnect") + c.Assert(netEvents[3], checker.Equals, "destroy") +} + +func (s *DockerSuite) TestEventsContainerWithMultiNetwork(c *check.C) { + testRequires(c, DaemonIsLinux) + + // Observe create/connect network actions + dockerCmd(c, "network", "create", "test-event-network-local-1") + dockerCmd(c, "network", "create", "test-event-network-local-2") + dockerCmd(c, "run", "--name", "test-network-container", "--net", "test-event-network-local-1", "-td", "busybox", "sh") + waitRun("test-network-container") + dockerCmd(c, "network", "connect", "test-event-network-local-2", "test-network-container") + + since := daemonUnixTime(c) + + dockerCmd(c, "stop", "-t", "1", "test-network-container") + + until := daemonUnixTime(c) + out, _ := dockerCmd(c, "events", "--since", since, "--until", until, "-f", "type=network") + netEvents := strings.Split(strings.TrimSpace(out), "\n") + + // received two network disconnect events + c.Assert(len(netEvents), checker.Equals, 2) + c.Assert(netEvents[0], checker.Contains, "disconnect") + c.Assert(netEvents[1], checker.Contains, "disconnect") + + //both networks appeared in the network event output + c.Assert(out, checker.Contains, "test-event-network-local-1") + c.Assert(out, checker.Contains, "test-event-network-local-2") +} + +func (s *DockerSuite) TestEventsStreaming(c *check.C) { + testRequires(c, DaemonIsLinux) + + observer, err := newEventObserver(c) + c.Assert(err, checker.IsNil) + err = observer.Start() + c.Assert(err, checker.IsNil) + defer observer.Stop() + + out, _ := dockerCmd(c, "run", "-d", "busybox:latest", "true") + containerID := strings.TrimSpace(out) + + testActions := map[string]chan bool{ + "create": make(chan bool, 1), + "start": make(chan bool, 1), + "die": make(chan bool, 1), + "destroy": make(chan bool, 1), + } + + matcher := matchEventLine(containerID, "container", testActions) + processor := processEventMatch(testActions) + go observer.Match(matcher, processor) + + select { + case <-time.After(5 * time.Second): + observer.CheckEventError(c, containerID, "create", matcher) + case <-testActions["create"]: + // ignore, done + } + + select { + case <-time.After(5 * time.Second): + observer.CheckEventError(c, containerID, "start", matcher) + case <-testActions["start"]: + // ignore, done + } + + select { + case <-time.After(5 * time.Second): + observer.CheckEventError(c, containerID, "die", matcher) + case <-testActions["die"]: + // ignore, done + } + + dockerCmd(c, "rm", containerID) + + select { + case <-time.After(5 * time.Second): + observer.CheckEventError(c, containerID, "destroy", matcher) + case <-testActions["destroy"]: + // ignore, done + } +} + +func (s *DockerSuite) TestEventsImageUntagDelete(c *check.C) { + testRequires(c, DaemonIsLinux) + + observer, err := newEventObserver(c) + c.Assert(err, checker.IsNil) + err = observer.Start() + c.Assert(err, checker.IsNil) + defer observer.Stop() + + name := "testimageevents" + buildImageSuccessfully(c, name, build.WithDockerfile(`FROM scratch + MAINTAINER "docker"`)) + imageID := getIDByName(c, name) + c.Assert(deleteImages(name), checker.IsNil) + + testActions := map[string]chan bool{ + "untag": make(chan bool, 1), + "delete": make(chan bool, 1), + } + + matcher := matchEventLine(imageID, "image", testActions) + processor := processEventMatch(testActions) + go observer.Match(matcher, processor) + + select { + case <-time.After(10 * time.Second): + observer.CheckEventError(c, imageID, "untag", matcher) + case <-testActions["untag"]: + // ignore, done + } + + select { + case <-time.After(10 * time.Second): + observer.CheckEventError(c, imageID, "delete", matcher) + case <-testActions["delete"]: + // ignore, done + } +} + +func (s *DockerSuite) TestEventsFilterVolumeAndNetworkType(c *check.C) { + testRequires(c, DaemonIsLinux) + + since := daemonUnixTime(c) + + dockerCmd(c, "network", "create", "test-event-network-type") + dockerCmd(c, "volume", "create", "test-event-volume-type") + + out, _ := dockerCmd(c, "events", "--filter", "type=volume", "--filter", "type=network", "--since", since, "--until", daemonUnixTime(c)) + events := strings.Split(strings.TrimSpace(out), "\n") + c.Assert(len(events), checker.GreaterOrEqualThan, 2, check.Commentf(out)) + + networkActions := eventActionsByIDAndType(c, events, "test-event-network-type", "network") + volumeActions := eventActionsByIDAndType(c, events, "test-event-volume-type", "volume") + + c.Assert(volumeActions[0], checker.Equals, "create") + c.Assert(networkActions[0], checker.Equals, "create") +} + +func (s *DockerSuite) TestEventsFilterVolumeID(c *check.C) { + testRequires(c, DaemonIsLinux) + + since := daemonUnixTime(c) + + dockerCmd(c, "volume", "create", "test-event-volume-id") + out, _ := dockerCmd(c, "events", "--filter", "volume=test-event-volume-id", "--since", since, "--until", daemonUnixTime(c)) + events := strings.Split(strings.TrimSpace(out), "\n") + c.Assert(events, checker.HasLen, 1) + + c.Assert(events[0], checker.Contains, "test-event-volume-id") + c.Assert(events[0], checker.Contains, "driver=local") +} + +func (s *DockerSuite) TestEventsFilterNetworkID(c *check.C) { + testRequires(c, DaemonIsLinux) + + since := daemonUnixTime(c) + + dockerCmd(c, "network", "create", "test-event-network-local") + out, _ := dockerCmd(c, "events", "--filter", "network=test-event-network-local", "--since", since, "--until", daemonUnixTime(c)) + events := strings.Split(strings.TrimSpace(out), "\n") + c.Assert(events, checker.HasLen, 1) + + c.Assert(events[0], checker.Contains, "test-event-network-local") + c.Assert(events[0], checker.Contains, "type=bridge") +} + +func (s *DockerDaemonSuite) TestDaemonEvents(c *check.C) { + testRequires(c, SameHostDaemon, DaemonIsLinux) + + // daemon config file + configFilePath := "test.json" + configFile, err := os.Create(configFilePath) + c.Assert(err, checker.IsNil) + defer os.Remove(configFilePath) + + daemonConfig := `{"labels":["foo=bar"]}` + fmt.Fprintf(configFile, "%s", daemonConfig) + configFile.Close() + s.d.Start(c, fmt.Sprintf("--config-file=%s", configFilePath)) + + // Get daemon ID + out, err := s.d.Cmd("info") + c.Assert(err, checker.IsNil) + daemonID := "" + daemonName := "" + for _, line := range strings.Split(out, "\n") { + if strings.HasPrefix(line, "ID: ") { + daemonID = strings.TrimPrefix(line, "ID: ") + } else if strings.HasPrefix(line, "Name: ") { + daemonName = strings.TrimPrefix(line, "Name: ") + } + } + c.Assert(daemonID, checker.Not(checker.Equals), "") + + configFile, err = os.Create(configFilePath) + c.Assert(err, checker.IsNil) + daemonConfig = `{"max-concurrent-downloads":1,"labels":["bar=foo"], "shutdown-timeout": 10}` + fmt.Fprintf(configFile, "%s", daemonConfig) + configFile.Close() + + c.Assert(s.d.Signal(unix.SIGHUP), checker.IsNil) + + time.Sleep(3 * time.Second) + + out, err = s.d.Cmd("events", "--since=0", "--until", daemonUnixTime(c)) + c.Assert(err, checker.IsNil) + + c.Assert(out, checker.Contains, fmt.Sprintf("daemon reload %s (allow-nondistributable-artifacts=[], cluster-advertise=, cluster-store=, cluster-store-opts={}, debug=true, default-runtime=runc, default-shm-size=67108864, insecure-registries=[], labels=[\"bar=foo\"], live-restore=false, max-concurrent-downloads=1, max-concurrent-uploads=5, name=%s, registry-mirrors=[], runtimes=runc:{docker-runc []}, shutdown-timeout=10)", daemonID, daemonName)) +} + +func (s *DockerDaemonSuite) TestDaemonEventsWithFilters(c *check.C) { + testRequires(c, SameHostDaemon, DaemonIsLinux) + + // daemon config file + configFilePath := "test.json" + configFile, err := os.Create(configFilePath) + c.Assert(err, checker.IsNil) + defer os.Remove(configFilePath) + + daemonConfig := `{"labels":["foo=bar"]}` + fmt.Fprintf(configFile, "%s", daemonConfig) + configFile.Close() + s.d.Start(c, fmt.Sprintf("--config-file=%s", configFilePath)) + + // Get daemon ID + out, err := s.d.Cmd("info") + c.Assert(err, checker.IsNil) + daemonID := "" + daemonName := "" + for _, line := range strings.Split(out, "\n") { + if strings.HasPrefix(line, "ID: ") { + daemonID = strings.TrimPrefix(line, "ID: ") + } else if strings.HasPrefix(line, "Name: ") { + daemonName = strings.TrimPrefix(line, "Name: ") + } + } + c.Assert(daemonID, checker.Not(checker.Equals), "") + + c.Assert(s.d.Signal(unix.SIGHUP), checker.IsNil) + + time.Sleep(3 * time.Second) + + out, err = s.d.Cmd("events", "--since=0", "--until", daemonUnixTime(c), "--filter", fmt.Sprintf("daemon=%s", daemonID)) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, fmt.Sprintf("daemon reload %s", daemonID)) + + out, err = s.d.Cmd("events", "--since=0", "--until", daemonUnixTime(c), "--filter", fmt.Sprintf("daemon=%s", daemonName)) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, fmt.Sprintf("daemon reload %s", daemonID)) + + out, err = s.d.Cmd("events", "--since=0", "--until", daemonUnixTime(c), "--filter", "daemon=foo") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Not(checker.Contains), fmt.Sprintf("daemon reload %s", daemonID)) + + out, err = s.d.Cmd("events", "--since=0", "--until", daemonUnixTime(c), "--filter", "type=daemon") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, fmt.Sprintf("daemon reload %s", daemonID)) + + out, err = s.d.Cmd("events", "--since=0", "--until", daemonUnixTime(c), "--filter", "type=container") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Not(checker.Contains), fmt.Sprintf("daemon reload %s", daemonID)) +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_exec_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_exec_test.go new file mode 100644 index 000000000..be228ab2d --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_exec_test.go @@ -0,0 +1,602 @@ +// +build !test_no_exec + +package main + +import ( + "bufio" + "fmt" + "net/http" + "os" + "os/exec" + "reflect" + "runtime" + "sort" + "strings" + "sync" + "time" + + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/cli" + "github.com/docker/docker/integration-cli/cli/build" + "github.com/docker/docker/integration-cli/request" + icmd "github.com/docker/docker/pkg/testutil/cmd" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestExec(c *check.C) { + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "-d", "--name", "testing", "busybox", "sh", "-c", "echo test > /tmp/file && top") + c.Assert(waitRun(strings.TrimSpace(out)), check.IsNil) + + out, _ = dockerCmd(c, "exec", "testing", "cat", "/tmp/file") + out = strings.Trim(out, "\r\n") + c.Assert(out, checker.Equals, "test") + +} + +func (s *DockerSuite) TestExecInteractive(c *check.C) { + testRequires(c, DaemonIsLinux) + dockerCmd(c, "run", "-d", "--name", "testing", "busybox", "sh", "-c", "echo test > /tmp/file && top") + + execCmd := exec.Command(dockerBinary, "exec", "-i", "testing", "sh") + stdin, err := execCmd.StdinPipe() + c.Assert(err, checker.IsNil) + stdout, err := execCmd.StdoutPipe() + c.Assert(err, checker.IsNil) + + err = execCmd.Start() + c.Assert(err, checker.IsNil) + _, err = stdin.Write([]byte("cat /tmp/file\n")) + c.Assert(err, checker.IsNil) + + r := bufio.NewReader(stdout) + line, err := r.ReadString('\n') + c.Assert(err, checker.IsNil) + line = strings.TrimSpace(line) + c.Assert(line, checker.Equals, "test") + err = stdin.Close() + c.Assert(err, checker.IsNil) + errChan := make(chan error) + go func() { + errChan <- execCmd.Wait() + close(errChan) + }() + select { + case err := <-errChan: + c.Assert(err, checker.IsNil) + case <-time.After(1 * time.Second): + c.Fatal("docker exec failed to exit on stdin close") + } + +} + +func (s *DockerSuite) TestExecAfterContainerRestart(c *check.C) { + out := runSleepingContainer(c) + cleanedContainerID := strings.TrimSpace(out) + c.Assert(waitRun(cleanedContainerID), check.IsNil) + dockerCmd(c, "restart", cleanedContainerID) + c.Assert(waitRun(cleanedContainerID), check.IsNil) + + out, _ = dockerCmd(c, "exec", cleanedContainerID, "echo", "hello") + outStr := strings.TrimSpace(out) + c.Assert(outStr, checker.Equals, "hello") +} + +func (s *DockerDaemonSuite) TestExecAfterDaemonRestart(c *check.C) { + // TODO Windows CI: Requires a little work to get this ported. + testRequires(c, DaemonIsLinux, SameHostDaemon) + s.d.StartWithBusybox(c) + + out, err := s.d.Cmd("run", "-d", "--name", "top", "-p", "80", "busybox:latest", "top") + c.Assert(err, checker.IsNil, check.Commentf("Could not run top: %s", out)) + + s.d.Restart(c) + + out, err = s.d.Cmd("start", "top") + c.Assert(err, checker.IsNil, check.Commentf("Could not start top after daemon restart: %s", out)) + + out, err = s.d.Cmd("exec", "top", "echo", "hello") + c.Assert(err, checker.IsNil, check.Commentf("Could not exec on container top: %s", out)) + + outStr := strings.TrimSpace(string(out)) + c.Assert(outStr, checker.Equals, "hello") +} + +// Regression test for #9155, #9044 +func (s *DockerSuite) TestExecEnv(c *check.C) { + // TODO Windows CI: This one is interesting and may just end up being a feature + // difference between Windows and Linux. On Windows, the environment is passed + // into the process that is launched, not into the machine environment. Hence + // a subsequent exec will not have LALA set/ + testRequires(c, DaemonIsLinux) + runSleepingContainer(c, "-e", "LALA=value1", "-e", "LALA=value2", "-d", "--name", "testing") + c.Assert(waitRun("testing"), check.IsNil) + + out, _ := dockerCmd(c, "exec", "testing", "env") + c.Assert(out, checker.Not(checker.Contains), "LALA=value1") + c.Assert(out, checker.Contains, "LALA=value2") + c.Assert(out, checker.Contains, "HOME=/root") +} + +func (s *DockerSuite) TestExecSetEnv(c *check.C) { + testRequires(c, DaemonIsLinux) + runSleepingContainer(c, "-e", "HOME=/root", "-d", "--name", "testing") + c.Assert(waitRun("testing"), check.IsNil) + + out, _ := dockerCmd(c, "exec", "-e", "HOME=/another", "-e", "ABC=xyz", "testing", "env") + c.Assert(out, checker.Not(checker.Contains), "HOME=/root") + c.Assert(out, checker.Contains, "HOME=/another") + c.Assert(out, checker.Contains, "ABC=xyz") +} + +func (s *DockerSuite) TestExecExitStatus(c *check.C) { + runSleepingContainer(c, "-d", "--name", "top") + + result := icmd.RunCommand(dockerBinary, "exec", "top", "sh", "-c", "exit 23") + c.Assert(result, icmd.Matches, icmd.Expected{ExitCode: 23, Error: "exit status 23"}) +} + +func (s *DockerSuite) TestExecPausedContainer(c *check.C) { + testRequires(c, IsPausable) + + out := runSleepingContainer(c, "-d", "--name", "testing") + ContainerID := strings.TrimSpace(out) + + dockerCmd(c, "pause", "testing") + out, _, err := dockerCmdWithError("exec", ContainerID, "echo", "hello") + c.Assert(err, checker.NotNil, check.Commentf("container should fail to exec new command if it is paused")) + + expected := ContainerID + " is paused, unpause the container before exec" + c.Assert(out, checker.Contains, expected, check.Commentf("container should not exec new command if it is paused")) +} + +// regression test for #9476 +func (s *DockerSuite) TestExecTTYCloseStdin(c *check.C) { + // TODO Windows CI: This requires some work to port to Windows. + testRequires(c, DaemonIsLinux) + dockerCmd(c, "run", "-d", "-it", "--name", "exec_tty_stdin", "busybox") + + cmd := exec.Command(dockerBinary, "exec", "-i", "exec_tty_stdin", "cat") + stdinRw, err := cmd.StdinPipe() + c.Assert(err, checker.IsNil) + + stdinRw.Write([]byte("test")) + stdinRw.Close() + + out, _, err := runCommandWithOutput(cmd) + c.Assert(err, checker.IsNil, check.Commentf(out)) + + out, _ = dockerCmd(c, "top", "exec_tty_stdin") + outArr := strings.Split(out, "\n") + c.Assert(len(outArr), checker.LessOrEqualThan, 3, check.Commentf("exec process left running")) + c.Assert(out, checker.Not(checker.Contains), "nsenter-exec") +} + +func (s *DockerSuite) TestExecTTYWithoutStdin(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "-ti", "busybox") + id := strings.TrimSpace(out) + c.Assert(waitRun(id), checker.IsNil) + + errChan := make(chan error) + go func() { + defer close(errChan) + + cmd := exec.Command(dockerBinary, "exec", "-ti", id, "true") + if _, err := cmd.StdinPipe(); err != nil { + errChan <- err + return + } + + expected := "the input device is not a TTY" + if runtime.GOOS == "windows" { + expected += ". If you are using mintty, try prefixing the command with 'winpty'" + } + if out, _, err := runCommandWithOutput(cmd); err == nil { + errChan <- fmt.Errorf("exec should have failed") + return + } else if !strings.Contains(out, expected) { + errChan <- fmt.Errorf("exec failed with error %q: expected %q", out, expected) + return + } + }() + + select { + case err := <-errChan: + c.Assert(err, check.IsNil) + case <-time.After(3 * time.Second): + c.Fatal("exec is running but should have failed") + } +} + +// FIXME(vdemeester) this should be a unit tests on cli/command/container package +func (s *DockerSuite) TestExecParseError(c *check.C) { + // TODO Windows CI: Requires some extra work. Consider copying the + // runSleepingContainer helper to have an exec version. + testRequires(c, DaemonIsLinux) + dockerCmd(c, "run", "-d", "--name", "top", "busybox", "top") + + // Test normal (non-detached) case first + icmd.RunCommand(dockerBinary, "exec", "top").Assert(c, icmd.Expected{ + ExitCode: 1, + Error: "exit status 1", + Err: "See 'docker exec --help'", + }) +} + +func (s *DockerSuite) TestExecStopNotHanging(c *check.C) { + // TODO Windows CI: Requires some extra work. Consider copying the + // runSleepingContainer helper to have an exec version. + testRequires(c, DaemonIsLinux) + dockerCmd(c, "run", "-d", "--name", "testing", "busybox", "top") + + err := exec.Command(dockerBinary, "exec", "testing", "top").Start() + c.Assert(err, checker.IsNil) + + type dstop struct { + out []byte + err error + } + + ch := make(chan dstop) + go func() { + out, err := exec.Command(dockerBinary, "stop", "testing").CombinedOutput() + ch <- dstop{out, err} + close(ch) + }() + select { + case <-time.After(3 * time.Second): + c.Fatal("Container stop timed out") + case s := <-ch: + c.Assert(s.err, check.IsNil) + } +} + +func (s *DockerSuite) TestExecCgroup(c *check.C) { + // Not applicable on Windows - using Linux specific functionality + testRequires(c, NotUserNamespace) + testRequires(c, DaemonIsLinux) + dockerCmd(c, "run", "-d", "--name", "testing", "busybox", "top") + + out, _ := dockerCmd(c, "exec", "testing", "cat", "/proc/1/cgroup") + containerCgroups := sort.StringSlice(strings.Split(out, "\n")) + + var wg sync.WaitGroup + var mu sync.Mutex + execCgroups := []sort.StringSlice{} + errChan := make(chan error) + // exec a few times concurrently to get consistent failure + for i := 0; i < 5; i++ { + wg.Add(1) + go func() { + out, _, err := dockerCmdWithError("exec", "testing", "cat", "/proc/self/cgroup") + if err != nil { + errChan <- err + return + } + cg := sort.StringSlice(strings.Split(out, "\n")) + + mu.Lock() + execCgroups = append(execCgroups, cg) + mu.Unlock() + wg.Done() + }() + } + wg.Wait() + close(errChan) + + for err := range errChan { + c.Assert(err, checker.IsNil) + } + + for _, cg := range execCgroups { + if !reflect.DeepEqual(cg, containerCgroups) { + fmt.Println("exec cgroups:") + for _, name := range cg { + fmt.Printf(" %s\n", name) + } + + fmt.Println("container cgroups:") + for _, name := range containerCgroups { + fmt.Printf(" %s\n", name) + } + c.Fatal("cgroups mismatched") + } + } +} + +func (s *DockerSuite) TestExecInspectID(c *check.C) { + out := runSleepingContainer(c, "-d") + id := strings.TrimSuffix(out, "\n") + + out = inspectField(c, id, "ExecIDs") + c.Assert(out, checker.Equals, "[]", check.Commentf("ExecIDs should be empty, got: %s", out)) + + // Start an exec, have it block waiting so we can do some checking + cmd := exec.Command(dockerBinary, "exec", id, "sh", "-c", + "while ! test -e /execid1; do sleep 1; done") + + err := cmd.Start() + c.Assert(err, checker.IsNil, check.Commentf("failed to start the exec cmd")) + + // Give the exec 10 chances/seconds to start then give up and stop the test + tries := 10 + for i := 0; i < tries; i++ { + // Since its still running we should see exec as part of the container + out = strings.TrimSpace(inspectField(c, id, "ExecIDs")) + + if out != "[]" && out != "" { + break + } + c.Assert(i+1, checker.Not(checker.Equals), tries, check.Commentf("ExecIDs still empty after 10 second")) + time.Sleep(1 * time.Second) + } + + // Save execID for later + execID, err := inspectFilter(id, "index .ExecIDs 0") + c.Assert(err, checker.IsNil, check.Commentf("failed to get the exec id")) + + // End the exec by creating the missing file + err = exec.Command(dockerBinary, "exec", id, + "sh", "-c", "touch /execid1").Run() + + c.Assert(err, checker.IsNil, check.Commentf("failed to run the 2nd exec cmd")) + + // Wait for 1st exec to complete + cmd.Wait() + + // Give the exec 10 chances/seconds to stop then give up and stop the test + for i := 0; i < tries; i++ { + // Since its still running we should see exec as part of the container + out = strings.TrimSpace(inspectField(c, id, "ExecIDs")) + + if out == "[]" { + break + } + c.Assert(i+1, checker.Not(checker.Equals), tries, check.Commentf("ExecIDs still not empty after 10 second")) + time.Sleep(1 * time.Second) + } + + // But we should still be able to query the execID + sc, body, _ := request.SockRequest("GET", "/exec/"+execID+"/json", nil, daemonHost()) + + c.Assert(sc, checker.Equals, http.StatusOK, check.Commentf("received status != 200 OK: %d\n%s", sc, body)) + + // Now delete the container and then an 'inspect' on the exec should + // result in a 404 (not 'container not running') + out, ec := dockerCmd(c, "rm", "-f", id) + c.Assert(ec, checker.Equals, 0, check.Commentf("error removing container: %s", out)) + sc, body, _ = request.SockRequest("GET", "/exec/"+execID+"/json", nil, daemonHost()) + c.Assert(sc, checker.Equals, http.StatusNotFound, check.Commentf("received status != 404: %d\n%s", sc, body)) +} + +func (s *DockerSuite) TestLinksPingLinkedContainersOnRename(c *check.C) { + // Problematic on Windows as Windows does not support links + testRequires(c, DaemonIsLinux) + var out string + out, _ = dockerCmd(c, "run", "-d", "--name", "container1", "busybox", "top") + idA := strings.TrimSpace(out) + c.Assert(idA, checker.Not(checker.Equals), "", check.Commentf("%s, id should not be nil", out)) + out, _ = dockerCmd(c, "run", "-d", "--link", "container1:alias1", "--name", "container2", "busybox", "top") + idB := strings.TrimSpace(out) + c.Assert(idB, checker.Not(checker.Equals), "", check.Commentf("%s, id should not be nil", out)) + + dockerCmd(c, "exec", "container2", "ping", "-c", "1", "alias1", "-W", "1") + dockerCmd(c, "rename", "container1", "container_new") + dockerCmd(c, "exec", "container2", "ping", "-c", "1", "alias1", "-W", "1") +} + +func (s *DockerSuite) TestRunMutableNetworkFiles(c *check.C) { + // Not applicable on Windows to Windows CI. + testRequires(c, SameHostDaemon, DaemonIsLinux) + for _, fn := range []string{"resolv.conf", "hosts"} { + containers := cli.DockerCmd(c, "ps", "-q", "-a").Combined() + if containers != "" { + cli.DockerCmd(c, append([]string{"rm", "-fv"}, strings.Split(strings.TrimSpace(containers), "\n")...)...) + } + + content := runCommandAndReadContainerFile(c, fn, dockerBinary, "run", "-d", "--name", "c1", "busybox", "sh", "-c", fmt.Sprintf("echo success >/etc/%s && top", fn)) + + c.Assert(strings.TrimSpace(string(content)), checker.Equals, "success", check.Commentf("Content was not what was modified in the container", string(content))) + + out, _ := dockerCmd(c, "run", "-d", "--name", "c2", "busybox", "top") + contID := strings.TrimSpace(out) + netFilePath := containerStorageFile(contID, fn) + + f, err := os.OpenFile(netFilePath, os.O_WRONLY|os.O_SYNC|os.O_APPEND, 0644) + c.Assert(err, checker.IsNil) + + if _, err := f.Seek(0, 0); err != nil { + f.Close() + c.Fatal(err) + } + + if err := f.Truncate(0); err != nil { + f.Close() + c.Fatal(err) + } + + if _, err := f.Write([]byte("success2\n")); err != nil { + f.Close() + c.Fatal(err) + } + f.Close() + + res, _ := dockerCmd(c, "exec", contID, "cat", "/etc/"+fn) + c.Assert(res, checker.Equals, "success2\n") + } +} + +func (s *DockerSuite) TestExecWithUser(c *check.C) { + // TODO Windows CI: This may be fixable in the future once Windows + // supports users + testRequires(c, DaemonIsLinux) + dockerCmd(c, "run", "-d", "--name", "parent", "busybox", "top") + + out, _ := dockerCmd(c, "exec", "-u", "1", "parent", "id") + c.Assert(out, checker.Contains, "uid=1(daemon) gid=1(daemon)") + + out, _ = dockerCmd(c, "exec", "-u", "root", "parent", "id") + c.Assert(out, checker.Contains, "uid=0(root) gid=0(root)", check.Commentf("exec with user by id expected daemon user got %s", out)) +} + +func (s *DockerSuite) TestExecWithPrivileged(c *check.C) { + // Not applicable on Windows + testRequires(c, DaemonIsLinux, NotUserNamespace) + // Start main loop which attempts mknod repeatedly + dockerCmd(c, "run", "-d", "--name", "parent", "--cap-drop=ALL", "busybox", "sh", "-c", `while (true); do if [ -e /exec_priv ]; then cat /exec_priv && mknod /tmp/sda b 8 0 && echo "Success"; else echo "Privileged exec has not run yet"; fi; usleep 10000; done`) + + // Check exec mknod doesn't work + icmd.RunCommand(dockerBinary, "exec", "parent", "sh", "-c", "mknod /tmp/sdb b 8 16").Assert(c, icmd.Expected{ + ExitCode: 1, + Err: "Operation not permitted", + }) + + // Check exec mknod does work with --privileged + result := icmd.RunCommand(dockerBinary, "exec", "--privileged", "parent", "sh", "-c", `echo "Running exec --privileged" > /exec_priv && mknod /tmp/sdb b 8 16 && usleep 50000 && echo "Finished exec --privileged" > /exec_priv && echo ok`) + result.Assert(c, icmd.Success) + + actual := strings.TrimSpace(result.Combined()) + c.Assert(actual, checker.Equals, "ok", check.Commentf("exec mknod in --cap-drop=ALL container with --privileged failed, output: %q", result.Combined())) + + // Check subsequent unprivileged exec cannot mknod + icmd.RunCommand(dockerBinary, "exec", "parent", "sh", "-c", "mknod /tmp/sdc b 8 32").Assert(c, icmd.Expected{ + ExitCode: 1, + Err: "Operation not permitted", + }) + // Confirm at no point was mknod allowed + result = icmd.RunCommand(dockerBinary, "logs", "parent") + result.Assert(c, icmd.Success) + c.Assert(result.Combined(), checker.Not(checker.Contains), "Success") + +} + +func (s *DockerSuite) TestExecWithImageUser(c *check.C) { + // Not applicable on Windows + testRequires(c, DaemonIsLinux) + name := "testbuilduser" + buildImageSuccessfully(c, name, build.WithDockerfile(`FROM busybox + RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd + USER dockerio`)) + dockerCmd(c, "run", "-d", "--name", "dockerioexec", name, "top") + + out, _ := dockerCmd(c, "exec", "dockerioexec", "whoami") + c.Assert(out, checker.Contains, "dockerio", check.Commentf("exec with user by id expected dockerio user got %s", out)) +} + +func (s *DockerSuite) TestExecOnReadonlyContainer(c *check.C) { + // Windows does not support read-only + // --read-only + userns has remount issues + testRequires(c, DaemonIsLinux, NotUserNamespace) + dockerCmd(c, "run", "-d", "--read-only", "--name", "parent", "busybox", "top") + dockerCmd(c, "exec", "parent", "true") +} + +func (s *DockerSuite) TestExecUlimits(c *check.C) { + testRequires(c, DaemonIsLinux) + name := "testexeculimits" + runSleepingContainer(c, "-d", "--ulimit", "nofile=511:511", "--name", name) + c.Assert(waitRun(name), checker.IsNil) + + out, _, err := dockerCmdWithError("exec", name, "sh", "-c", "ulimit -n") + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Equals, "511") +} + +// #15750 +func (s *DockerSuite) TestExecStartFails(c *check.C) { + // TODO Windows CI. This test should be portable. Figure out why it fails + // currently. + testRequires(c, DaemonIsLinux) + name := "exec-15750" + runSleepingContainer(c, "-d", "--name", name) + c.Assert(waitRun(name), checker.IsNil) + + out, _, err := dockerCmdWithError("exec", name, "no-such-cmd") + c.Assert(err, checker.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "executable file not found") +} + +// Fix regression in https://github.com/docker/docker/pull/26461#issuecomment-250287297 +func (s *DockerSuite) TestExecWindowsPathNotWiped(c *check.C) { + testRequires(c, DaemonIsWindows) + out, _ := dockerCmd(c, "run", "-d", "--name", "testing", minimalBaseImage(), "powershell", "start-sleep", "60") + c.Assert(waitRun(strings.TrimSpace(out)), check.IsNil) + + out, _ = dockerCmd(c, "exec", "testing", "powershell", "write-host", "$env:PATH") + out = strings.ToLower(strings.Trim(out, "\r\n")) + c.Assert(out, checker.Contains, `windowspowershell\v1.0`) +} + +func (s *DockerSuite) TestExecEnvLinksHost(c *check.C) { + testRequires(c, DaemonIsLinux) + runSleepingContainer(c, "-d", "--name", "foo") + runSleepingContainer(c, "-d", "--link", "foo:db", "--hostname", "myhost", "--name", "bar") + out, _ := dockerCmd(c, "exec", "bar", "env") + c.Assert(out, checker.Contains, "HOSTNAME=myhost") + c.Assert(out, checker.Contains, "DB_NAME=/bar/db") +} + +func (s *DockerSuite) TestExecWindowsOpenHandles(c *check.C) { + testRequires(c, DaemonIsWindows) + runSleepingContainer(c, "-d", "--name", "test") + exec := make(chan bool) + go func() { + dockerCmd(c, "exec", "test", "cmd", "/c", "start sleep 10") + exec <- true + }() + + count := 0 + for { + top := make(chan string) + var out string + go func() { + out, _ := dockerCmd(c, "top", "test") + top <- out + }() + + select { + case <-time.After(time.Second * 5): + c.Fatal("timed out waiting for top while exec is exiting") + case out = <-top: + break + } + + if strings.Count(out, "busybox.exe") == 2 && !strings.Contains(out, "cmd.exe") { + // The initial exec process (cmd.exe) has exited, and both sleeps are currently running + break + } + count++ + if count >= 30 { + c.Fatal("too many retries") + } + time.Sleep(1 * time.Second) + } + + inspect := make(chan bool) + go func() { + dockerCmd(c, "inspect", "test") + inspect <- true + }() + + select { + case <-time.After(time.Second * 5): + c.Fatal("timed out waiting for inspect while exec is exiting") + case <-inspect: + break + } + + // Ensure the background sleep is still running + out, _ := dockerCmd(c, "top", "test") + c.Assert(strings.Count(out, "busybox.exe"), checker.Equals, 2) + + // The exec should exit when the background sleep exits + select { + case <-time.After(time.Second * 15): + c.Fatal("timed out waiting for async exec to exit") + case <-exec: + // Ensure the background sleep has actually exited + out, _ := dockerCmd(c, "top", "test") + c.Assert(strings.Count(out, "busybox.exe"), checker.Equals, 1) + break + } +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_exec_unix_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_exec_unix_test.go new file mode 100644 index 000000000..5d8efc70d --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_exec_unix_test.go @@ -0,0 +1,93 @@ +// +build !windows,!test_no_exec + +package main + +import ( + "bytes" + "io" + "os/exec" + "strings" + "time" + + "github.com/docker/docker/integration-cli/checker" + "github.com/go-check/check" + "github.com/kr/pty" +) + +// regression test for #12546 +func (s *DockerSuite) TestExecInteractiveStdinClose(c *check.C) { + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "-itd", "busybox", "/bin/cat") + contID := strings.TrimSpace(out) + + cmd := exec.Command(dockerBinary, "exec", "-i", contID, "echo", "-n", "hello") + p, err := pty.Start(cmd) + c.Assert(err, checker.IsNil) + + b := bytes.NewBuffer(nil) + go io.Copy(b, p) + + ch := make(chan error) + go func() { ch <- cmd.Wait() }() + + select { + case err := <-ch: + c.Assert(err, checker.IsNil) + output := b.String() + c.Assert(strings.TrimSpace(output), checker.Equals, "hello") + case <-time.After(5 * time.Second): + c.Fatal("timed out running docker exec") + } +} + +func (s *DockerSuite) TestExecTTY(c *check.C) { + testRequires(c, DaemonIsLinux, SameHostDaemon) + dockerCmd(c, "run", "-d", "--name=test", "busybox", "sh", "-c", "echo hello > /foo && top") + + cmd := exec.Command(dockerBinary, "exec", "-it", "test", "sh") + p, err := pty.Start(cmd) + c.Assert(err, checker.IsNil) + defer p.Close() + + _, err = p.Write([]byte("cat /foo && exit\n")) + c.Assert(err, checker.IsNil) + + chErr := make(chan error) + go func() { + chErr <- cmd.Wait() + }() + select { + case err := <-chErr: + c.Assert(err, checker.IsNil) + case <-time.After(3 * time.Second): + c.Fatal("timeout waiting for exec to exit") + } + + buf := make([]byte, 256) + read, err := p.Read(buf) + c.Assert(err, checker.IsNil) + c.Assert(bytes.Contains(buf, []byte("hello")), checker.Equals, true, check.Commentf(string(buf[:read]))) +} + +// Test the TERM env var is set when -t is provided on exec +func (s *DockerSuite) TestExecWithTERM(c *check.C) { + testRequires(c, DaemonIsLinux, SameHostDaemon) + out, _ := dockerCmd(c, "run", "-id", "busybox", "/bin/cat") + contID := strings.TrimSpace(out) + cmd := exec.Command(dockerBinary, "exec", "-t", contID, "sh", "-c", "if [ -z $TERM ]; then exit 1; else exit 0; fi") + if err := cmd.Run(); err != nil { + c.Assert(err, checker.IsNil) + } +} + +// Test that the TERM env var is not set on exec when -t is not provided, even if it was set +// on run +func (s *DockerSuite) TestExecWithNoTERM(c *check.C) { + testRequires(c, DaemonIsLinux, SameHostDaemon) + out, _ := dockerCmd(c, "run", "-itd", "busybox", "/bin/cat") + contID := strings.TrimSpace(out) + cmd := exec.Command(dockerBinary, "exec", contID, "sh", "-c", "if [ -z $TERM ]; then exit 0; else exit 1; fi") + if err := cmd.Run(); err != nil { + c.Assert(err, checker.IsNil) + } +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_experimental_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_experimental_test.go new file mode 100644 index 000000000..0a496fd26 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_experimental_test.go @@ -0,0 +1,29 @@ +package main + +import ( + "strings" + + "github.com/docker/docker/integration-cli/checker" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestExperimentalVersionTrue(c *check.C) { + testExperimentalInVersion(c, ExperimentalDaemon, "*true") +} + +func (s *DockerSuite) TestExperimentalVersionFalse(c *check.C) { + testExperimentalInVersion(c, NotExperimentalDaemon, "*false") +} + +func testExperimentalInVersion(c *check.C, requirement func() bool, expectedValue string) { + testRequires(c, requirement) + out, _ := dockerCmd(c, "version") + for _, line := range strings.Split(out, "\n") { + if strings.HasPrefix(strings.TrimSpace(line), "Experimental:") { + c.Assert(line, checker.Matches, expectedValue) + return + } + } + + c.Fatal(`"Experimental" not found in version output`) +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_export_import_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_export_import_test.go new file mode 100644 index 000000000..fe117b9ae --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_export_import_test.go @@ -0,0 +1,51 @@ +package main + +import ( + "os" + "strings" + + "github.com/docker/docker/integration-cli/checker" + icmd "github.com/docker/docker/pkg/testutil/cmd" + "github.com/go-check/check" +) + +// export an image and try to import it into a new one +func (s *DockerSuite) TestExportContainerAndImportImage(c *check.C) { + testRequires(c, DaemonIsLinux) + containerID := "testexportcontainerandimportimage" + + dockerCmd(c, "run", "--name", containerID, "busybox", "true") + + out, _ := dockerCmd(c, "export", containerID) + + result := icmd.RunCmd(icmd.Cmd{ + Command: []string{dockerBinary, "import", "-", "repo/testexp:v1"}, + Stdin: strings.NewReader(out), + }) + result.Assert(c, icmd.Success) + + cleanedImageID := strings.TrimSpace(result.Combined()) + c.Assert(cleanedImageID, checker.Not(checker.Equals), "", check.Commentf("output should have been an image id")) +} + +// Used to test output flag in the export command +func (s *DockerSuite) TestExportContainerWithOutputAndImportImage(c *check.C) { + testRequires(c, DaemonIsLinux) + containerID := "testexportcontainerwithoutputandimportimage" + + dockerCmd(c, "run", "--name", containerID, "busybox", "true") + dockerCmd(c, "export", "--output=testexp.tar", containerID) + defer os.Remove("testexp.tar") + + resultCat := icmd.RunCommand("cat", "testexp.tar") + resultCat.Assert(c, icmd.Success) + + result := icmd.RunCmd(icmd.Cmd{ + Command: []string{dockerBinary, "import", "-", "repo/testexp:v1"}, + Stdin: strings.NewReader(resultCat.Combined()), + }) + result.Assert(c, icmd.Success) + + cleanedImageID := strings.TrimSpace(result.Combined()) + c.Assert(cleanedImageID, checker.Not(checker.Equals), "", check.Commentf("output should have been an image id")) +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_external_graphdriver_unix_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_external_graphdriver_unix_test.go new file mode 100644 index 000000000..16023c9a7 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_external_graphdriver_unix_test.go @@ -0,0 +1,406 @@ +// +build !windows + +package main + +import ( + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/http/httptest" + "os" + "strings" + + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/daemon/graphdriver/vfs" + "github.com/docker/docker/integration-cli/daemon" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/plugins" + "github.com/go-check/check" +) + +func init() { + check.Suite(&DockerExternalGraphdriverSuite{ + ds: &DockerSuite{}, + }) +} + +type DockerExternalGraphdriverSuite struct { + server *httptest.Server + jserver *httptest.Server + ds *DockerSuite + d *daemon.Daemon + ec map[string]*graphEventsCounter +} + +type graphEventsCounter struct { + activations int + creations int + removals int + gets int + puts int + stats int + cleanups int + exists int + init int + metadata int + diff int + applydiff int + changes int + diffsize int +} + +func (s *DockerExternalGraphdriverSuite) SetUpTest(c *check.C) { + s.d = daemon.New(c, dockerBinary, dockerdBinary, daemon.Config{ + Experimental: testEnv.ExperimentalDaemon(), + }) +} + +func (s *DockerExternalGraphdriverSuite) OnTimeout(c *check.C) { + s.d.DumpStackAndQuit() +} + +func (s *DockerExternalGraphdriverSuite) TearDownTest(c *check.C) { + if s.d != nil { + s.d.Stop(c) + s.ds.TearDownTest(c) + } +} + +func (s *DockerExternalGraphdriverSuite) SetUpSuite(c *check.C) { + s.ec = make(map[string]*graphEventsCounter) + s.setUpPluginViaSpecFile(c) + s.setUpPluginViaJSONFile(c) +} + +func (s *DockerExternalGraphdriverSuite) setUpPluginViaSpecFile(c *check.C) { + mux := http.NewServeMux() + s.server = httptest.NewServer(mux) + + s.setUpPlugin(c, "test-external-graph-driver", "spec", mux, []byte(s.server.URL)) +} + +func (s *DockerExternalGraphdriverSuite) setUpPluginViaJSONFile(c *check.C) { + mux := http.NewServeMux() + s.jserver = httptest.NewServer(mux) + + p := plugins.NewLocalPlugin("json-external-graph-driver", s.jserver.URL) + b, err := json.Marshal(p) + c.Assert(err, check.IsNil) + + s.setUpPlugin(c, "json-external-graph-driver", "json", mux, b) +} + +func (s *DockerExternalGraphdriverSuite) setUpPlugin(c *check.C, name string, ext string, mux *http.ServeMux, b []byte) { + type graphDriverRequest struct { + ID string `json:",omitempty"` + Parent string `json:",omitempty"` + MountLabel string `json:",omitempty"` + ReadOnly bool `json:",omitempty"` + } + + type graphDriverResponse struct { + Err error `json:",omitempty"` + Dir string `json:",omitempty"` + Exists bool `json:",omitempty"` + Status [][2]string `json:",omitempty"` + Metadata map[string]string `json:",omitempty"` + Changes []archive.Change `json:",omitempty"` + Size int64 `json:",omitempty"` + } + + respond := func(w http.ResponseWriter, data interface{}) { + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + switch t := data.(type) { + case error: + fmt.Fprintln(w, fmt.Sprintf(`{"Err": %q}`, t.Error())) + case string: + fmt.Fprintln(w, t) + default: + json.NewEncoder(w).Encode(&data) + } + } + + decReq := func(b io.ReadCloser, out interface{}, w http.ResponseWriter) error { + defer b.Close() + if err := json.NewDecoder(b).Decode(&out); err != nil { + http.Error(w, fmt.Sprintf("error decoding json: %s", err.Error()), 500) + } + return nil + } + + base, err := ioutil.TempDir("", name) + c.Assert(err, check.IsNil) + vfsProto, err := vfs.Init(base, []string{}, nil, nil) + c.Assert(err, check.IsNil, check.Commentf("error initializing graph driver")) + driver := graphdriver.NewNaiveDiffDriver(vfsProto, nil, nil) + + s.ec[ext] = &graphEventsCounter{} + mux.HandleFunc("/Plugin.Activate", func(w http.ResponseWriter, r *http.Request) { + s.ec[ext].activations++ + respond(w, `{"Implements": ["GraphDriver"]}`) + }) + + mux.HandleFunc("/GraphDriver.Init", func(w http.ResponseWriter, r *http.Request) { + s.ec[ext].init++ + respond(w, "{}") + }) + + mux.HandleFunc("/GraphDriver.CreateReadWrite", func(w http.ResponseWriter, r *http.Request) { + s.ec[ext].creations++ + + var req graphDriverRequest + if err := decReq(r.Body, &req, w); err != nil { + return + } + if err := driver.CreateReadWrite(req.ID, req.Parent, nil); err != nil { + respond(w, err) + return + } + respond(w, "{}") + }) + + mux.HandleFunc("/GraphDriver.Create", func(w http.ResponseWriter, r *http.Request) { + s.ec[ext].creations++ + + var req graphDriverRequest + if err := decReq(r.Body, &req, w); err != nil { + return + } + if err := driver.Create(req.ID, req.Parent, nil); err != nil { + respond(w, err) + return + } + respond(w, "{}") + }) + + mux.HandleFunc("/GraphDriver.Remove", func(w http.ResponseWriter, r *http.Request) { + s.ec[ext].removals++ + + var req graphDriverRequest + if err := decReq(r.Body, &req, w); err != nil { + return + } + + if err := driver.Remove(req.ID); err != nil { + respond(w, err) + return + } + respond(w, "{}") + }) + + mux.HandleFunc("/GraphDriver.Get", func(w http.ResponseWriter, r *http.Request) { + s.ec[ext].gets++ + + var req graphDriverRequest + if err := decReq(r.Body, &req, w); err != nil { + return + } + + dir, err := driver.Get(req.ID, req.MountLabel) + if err != nil { + respond(w, err) + return + } + respond(w, &graphDriverResponse{Dir: dir}) + }) + + mux.HandleFunc("/GraphDriver.Put", func(w http.ResponseWriter, r *http.Request) { + s.ec[ext].puts++ + + var req graphDriverRequest + if err := decReq(r.Body, &req, w); err != nil { + return + } + + if err := driver.Put(req.ID); err != nil { + respond(w, err) + return + } + respond(w, "{}") + }) + + mux.HandleFunc("/GraphDriver.Exists", func(w http.ResponseWriter, r *http.Request) { + s.ec[ext].exists++ + + var req graphDriverRequest + if err := decReq(r.Body, &req, w); err != nil { + return + } + respond(w, &graphDriverResponse{Exists: driver.Exists(req.ID)}) + }) + + mux.HandleFunc("/GraphDriver.Status", func(w http.ResponseWriter, r *http.Request) { + s.ec[ext].stats++ + respond(w, &graphDriverResponse{Status: driver.Status()}) + }) + + mux.HandleFunc("/GraphDriver.Cleanup", func(w http.ResponseWriter, r *http.Request) { + s.ec[ext].cleanups++ + err := driver.Cleanup() + if err != nil { + respond(w, err) + return + } + respond(w, `{}`) + }) + + mux.HandleFunc("/GraphDriver.GetMetadata", func(w http.ResponseWriter, r *http.Request) { + s.ec[ext].metadata++ + + var req graphDriverRequest + if err := decReq(r.Body, &req, w); err != nil { + return + } + + data, err := driver.GetMetadata(req.ID) + if err != nil { + respond(w, err) + return + } + respond(w, &graphDriverResponse{Metadata: data}) + }) + + mux.HandleFunc("/GraphDriver.Diff", func(w http.ResponseWriter, r *http.Request) { + s.ec[ext].diff++ + + var req graphDriverRequest + if err := decReq(r.Body, &req, w); err != nil { + return + } + + diff, err := driver.Diff(req.ID, req.Parent) + if err != nil { + respond(w, err) + return + } + io.Copy(w, diff) + }) + + mux.HandleFunc("/GraphDriver.Changes", func(w http.ResponseWriter, r *http.Request) { + s.ec[ext].changes++ + var req graphDriverRequest + if err := decReq(r.Body, &req, w); err != nil { + return + } + + changes, err := driver.Changes(req.ID, req.Parent) + if err != nil { + respond(w, err) + return + } + respond(w, &graphDriverResponse{Changes: changes}) + }) + + mux.HandleFunc("/GraphDriver.ApplyDiff", func(w http.ResponseWriter, r *http.Request) { + s.ec[ext].applydiff++ + diff := r.Body + defer r.Body.Close() + + id := r.URL.Query().Get("id") + parent := r.URL.Query().Get("parent") + + if id == "" { + http.Error(w, fmt.Sprintf("missing id"), 409) + } + + size, err := driver.ApplyDiff(id, parent, diff) + if err != nil { + respond(w, err) + return + } + respond(w, &graphDriverResponse{Size: size}) + }) + + mux.HandleFunc("/GraphDriver.DiffSize", func(w http.ResponseWriter, r *http.Request) { + s.ec[ext].diffsize++ + + var req graphDriverRequest + if err := decReq(r.Body, &req, w); err != nil { + return + } + + size, err := driver.DiffSize(req.ID, req.Parent) + if err != nil { + respond(w, err) + return + } + respond(w, &graphDriverResponse{Size: size}) + }) + + err = os.MkdirAll("/etc/docker/plugins", 0755) + c.Assert(err, check.IsNil, check.Commentf("error creating /etc/docker/plugins")) + + specFile := "/etc/docker/plugins/" + name + "." + ext + err = ioutil.WriteFile(specFile, b, 0644) + c.Assert(err, check.IsNil, check.Commentf("error writing to %s", specFile)) +} + +func (s *DockerExternalGraphdriverSuite) TearDownSuite(c *check.C) { + s.server.Close() + s.jserver.Close() + + err := os.RemoveAll("/etc/docker/plugins") + c.Assert(err, check.IsNil, check.Commentf("error removing /etc/docker/plugins")) +} + +func (s *DockerExternalGraphdriverSuite) TestExternalGraphDriver(c *check.C) { + testRequires(c, ExperimentalDaemon) + + s.testExternalGraphDriver("test-external-graph-driver", "spec", c) + s.testExternalGraphDriver("json-external-graph-driver", "json", c) +} + +func (s *DockerExternalGraphdriverSuite) testExternalGraphDriver(name string, ext string, c *check.C) { + s.d.StartWithBusybox(c, "-s", name) + + out, err := s.d.Cmd("run", "--name=graphtest", "busybox", "sh", "-c", "echo hello > /hello") + c.Assert(err, check.IsNil, check.Commentf(out)) + + s.d.Restart(c, "-s", name) + + out, err = s.d.Cmd("inspect", "--format={{.GraphDriver.Name}}", "graphtest") + c.Assert(err, check.IsNil, check.Commentf(out)) + c.Assert(strings.TrimSpace(out), check.Equals, name) + + out, err = s.d.Cmd("diff", "graphtest") + c.Assert(err, check.IsNil, check.Commentf(out)) + c.Assert(strings.Contains(out, "A /hello"), check.Equals, true, check.Commentf("diff output: %s", out)) + + out, err = s.d.Cmd("rm", "-f", "graphtest") + c.Assert(err, check.IsNil, check.Commentf(out)) + + out, err = s.d.Cmd("info") + c.Assert(err, check.IsNil, check.Commentf(out)) + + s.d.Stop(c) + + // Don't check s.ec.exists, because the daemon no longer calls the + // Exists function. + c.Assert(s.ec[ext].activations, check.Equals, 2) + c.Assert(s.ec[ext].init, check.Equals, 2) + c.Assert(s.ec[ext].creations >= 1, check.Equals, true) + c.Assert(s.ec[ext].removals >= 1, check.Equals, true) + c.Assert(s.ec[ext].gets >= 1, check.Equals, true) + c.Assert(s.ec[ext].puts >= 1, check.Equals, true) + c.Assert(s.ec[ext].stats, check.Equals, 5) + c.Assert(s.ec[ext].cleanups, check.Equals, 2) + c.Assert(s.ec[ext].applydiff >= 1, check.Equals, true) + c.Assert(s.ec[ext].changes, check.Equals, 1) + c.Assert(s.ec[ext].diffsize, check.Equals, 0) + c.Assert(s.ec[ext].diff, check.Equals, 0) + c.Assert(s.ec[ext].metadata, check.Equals, 1) +} + +func (s *DockerExternalGraphdriverSuite) TestExternalGraphDriverPull(c *check.C) { + testRequires(c, Network, ExperimentalDaemon) + + s.d.Start(c) + + out, err := s.d.Cmd("pull", "busybox:latest") + c.Assert(err, check.IsNil, check.Commentf(out)) + + out, err = s.d.Cmd("run", "-d", "busybox", "top") + c.Assert(err, check.IsNil, check.Commentf(out)) +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_external_volume_driver_unix_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_external_volume_driver_unix_test.go new file mode 100644 index 000000000..5fe417c2c --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_external_volume_driver_unix_test.go @@ -0,0 +1,633 @@ +// +build !windows + +package main + +import ( + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/http/httptest" + "os" + "os/exec" + "path/filepath" + "strings" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/daemon" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/volume" + "github.com/go-check/check" +) + +const volumePluginName = "test-external-volume-driver" + +func init() { + check.Suite(&DockerExternalVolumeSuite{ + ds: &DockerSuite{}, + }) +} + +type eventCounter struct { + activations int + creations int + removals int + mounts int + unmounts int + paths int + lists int + gets int + caps int +} + +type DockerExternalVolumeSuite struct { + ds *DockerSuite + d *daemon.Daemon + *volumePlugin +} + +func (s *DockerExternalVolumeSuite) SetUpTest(c *check.C) { + s.d = daemon.New(c, dockerBinary, dockerdBinary, daemon.Config{ + Experimental: testEnv.ExperimentalDaemon(), + }) + s.ec = &eventCounter{} +} + +func (s *DockerExternalVolumeSuite) TearDownTest(c *check.C) { + if s.d != nil { + s.d.Stop(c) + s.ds.TearDownTest(c) + } +} + +func (s *DockerExternalVolumeSuite) SetUpSuite(c *check.C) { + s.volumePlugin = newVolumePlugin(c, volumePluginName) +} + +type volumePlugin struct { + ec *eventCounter + *httptest.Server + vols map[string]vol +} + +type vol struct { + Name string + Mountpoint string + Ninja bool // hack used to trigger a null volume return on `Get` + Status map[string]interface{} + Options map[string]string +} + +func (p *volumePlugin) Close() { + p.Server.Close() +} + +func newVolumePlugin(c *check.C, name string) *volumePlugin { + mux := http.NewServeMux() + s := &volumePlugin{Server: httptest.NewServer(mux), ec: &eventCounter{}, vols: make(map[string]vol)} + + type pluginRequest struct { + Name string + Opts map[string]string + ID string + } + + type pluginResp struct { + Mountpoint string `json:",omitempty"` + Err string `json:",omitempty"` + } + + read := func(b io.ReadCloser) (pluginRequest, error) { + defer b.Close() + var pr pluginRequest + err := json.NewDecoder(b).Decode(&pr) + return pr, err + } + + send := func(w http.ResponseWriter, data interface{}) { + switch t := data.(type) { + case error: + http.Error(w, t.Error(), 500) + case string: + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + fmt.Fprintln(w, t) + default: + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + json.NewEncoder(w).Encode(&data) + } + } + + mux.HandleFunc("/Plugin.Activate", func(w http.ResponseWriter, r *http.Request) { + s.ec.activations++ + send(w, `{"Implements": ["VolumeDriver"]}`) + }) + + mux.HandleFunc("/VolumeDriver.Create", func(w http.ResponseWriter, r *http.Request) { + s.ec.creations++ + pr, err := read(r.Body) + if err != nil { + send(w, err) + return + } + _, isNinja := pr.Opts["ninja"] + status := map[string]interface{}{"Hello": "world"} + s.vols[pr.Name] = vol{Name: pr.Name, Ninja: isNinja, Status: status, Options: pr.Opts} + send(w, nil) + }) + + mux.HandleFunc("/VolumeDriver.List", func(w http.ResponseWriter, r *http.Request) { + s.ec.lists++ + vols := make([]vol, 0, len(s.vols)) + for _, v := range s.vols { + if v.Ninja { + continue + } + vols = append(vols, v) + } + send(w, map[string][]vol{"Volumes": vols}) + }) + + mux.HandleFunc("/VolumeDriver.Get", func(w http.ResponseWriter, r *http.Request) { + s.ec.gets++ + pr, err := read(r.Body) + if err != nil { + send(w, err) + return + } + + v, exists := s.vols[pr.Name] + if !exists { + send(w, `{"Err": "no such volume"}`) + } + + if v.Ninja { + send(w, map[string]vol{}) + return + } + + v.Mountpoint = hostVolumePath(pr.Name) + send(w, map[string]vol{"Volume": v}) + return + }) + + mux.HandleFunc("/VolumeDriver.Remove", func(w http.ResponseWriter, r *http.Request) { + s.ec.removals++ + pr, err := read(r.Body) + if err != nil { + send(w, err) + return + } + + v, ok := s.vols[pr.Name] + if !ok { + send(w, nil) + return + } + + if err := os.RemoveAll(hostVolumePath(v.Name)); err != nil { + send(w, &pluginResp{Err: err.Error()}) + return + } + delete(s.vols, v.Name) + send(w, nil) + }) + + mux.HandleFunc("/VolumeDriver.Path", func(w http.ResponseWriter, r *http.Request) { + s.ec.paths++ + + pr, err := read(r.Body) + if err != nil { + send(w, err) + return + } + p := hostVolumePath(pr.Name) + send(w, &pluginResp{Mountpoint: p}) + }) + + mux.HandleFunc("/VolumeDriver.Mount", func(w http.ResponseWriter, r *http.Request) { + s.ec.mounts++ + + pr, err := read(r.Body) + if err != nil { + send(w, err) + return + } + + if v, exists := s.vols[pr.Name]; exists { + // Use this to simulate a mount failure + if _, exists := v.Options["invalidOption"]; exists { + send(w, fmt.Errorf("invalid argument")) + return + } + } + + p := hostVolumePath(pr.Name) + if err := os.MkdirAll(p, 0755); err != nil { + send(w, &pluginResp{Err: err.Error()}) + return + } + + if err := ioutil.WriteFile(filepath.Join(p, "test"), []byte(s.Server.URL), 0644); err != nil { + send(w, err) + return + } + + if err := ioutil.WriteFile(filepath.Join(p, "mountID"), []byte(pr.ID), 0644); err != nil { + send(w, err) + return + } + + send(w, &pluginResp{Mountpoint: p}) + }) + + mux.HandleFunc("/VolumeDriver.Unmount", func(w http.ResponseWriter, r *http.Request) { + s.ec.unmounts++ + + _, err := read(r.Body) + if err != nil { + send(w, err) + return + } + + send(w, nil) + }) + + mux.HandleFunc("/VolumeDriver.Capabilities", func(w http.ResponseWriter, r *http.Request) { + s.ec.caps++ + + _, err := read(r.Body) + if err != nil { + send(w, err) + return + } + + send(w, `{"Capabilities": { "Scope": "global" }}`) + }) + + err := os.MkdirAll("/etc/docker/plugins", 0755) + c.Assert(err, checker.IsNil) + + err = ioutil.WriteFile("/etc/docker/plugins/"+name+".spec", []byte(s.Server.URL), 0644) + c.Assert(err, checker.IsNil) + return s +} + +func (s *DockerExternalVolumeSuite) TearDownSuite(c *check.C) { + s.volumePlugin.Close() + + err := os.RemoveAll("/etc/docker/plugins") + c.Assert(err, checker.IsNil) +} + +func (s *DockerExternalVolumeSuite) TestVolumeCLICreateOptionConflict(c *check.C) { + dockerCmd(c, "volume", "create", "test") + + out, _, err := dockerCmdWithError("volume", "create", "test", "--driver", volumePluginName) + c.Assert(err, check.NotNil, check.Commentf("volume create exception name already in use with another driver")) + c.Assert(out, checker.Contains, "must be unique") + + out, _ = dockerCmd(c, "volume", "inspect", "--format={{ .Driver }}", "test") + _, _, err = dockerCmdWithError("volume", "create", "test", "--driver", strings.TrimSpace(out)) + c.Assert(err, check.IsNil) +} + +func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverNamed(c *check.C) { + s.d.StartWithBusybox(c) + + out, err := s.d.Cmd("run", "--rm", "--name", "test-data", "-v", "external-volume-test:/tmp/external-volume-test", "--volume-driver", volumePluginName, "busybox:latest", "cat", "/tmp/external-volume-test/test") + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(out, checker.Contains, s.Server.URL) + + _, err = s.d.Cmd("volume", "rm", "external-volume-test") + c.Assert(err, checker.IsNil) + + p := hostVolumePath("external-volume-test") + _, err = os.Lstat(p) + c.Assert(err, checker.NotNil) + c.Assert(os.IsNotExist(err), checker.True, check.Commentf("Expected volume path in host to not exist: %s, %v\n", p, err)) + + c.Assert(s.ec.activations, checker.Equals, 1) + c.Assert(s.ec.creations, checker.Equals, 1) + c.Assert(s.ec.removals, checker.Equals, 1) + c.Assert(s.ec.mounts, checker.Equals, 1) + c.Assert(s.ec.unmounts, checker.Equals, 1) +} + +func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverUnnamed(c *check.C) { + s.d.StartWithBusybox(c) + + out, err := s.d.Cmd("run", "--rm", "--name", "test-data", "-v", "/tmp/external-volume-test", "--volume-driver", volumePluginName, "busybox:latest", "cat", "/tmp/external-volume-test/test") + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(out, checker.Contains, s.Server.URL) + + c.Assert(s.ec.activations, checker.Equals, 1) + c.Assert(s.ec.creations, checker.Equals, 1) + c.Assert(s.ec.removals, checker.Equals, 1) + c.Assert(s.ec.mounts, checker.Equals, 1) + c.Assert(s.ec.unmounts, checker.Equals, 1) +} + +func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverVolumesFrom(c *check.C) { + s.d.StartWithBusybox(c) + + out, err := s.d.Cmd("run", "--name", "vol-test1", "-v", "/foo", "--volume-driver", volumePluginName, "busybox:latest") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + out, err = s.d.Cmd("run", "--rm", "--volumes-from", "vol-test1", "--name", "vol-test2", "busybox", "ls", "/tmp") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + out, err = s.d.Cmd("rm", "-fv", "vol-test1") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + c.Assert(s.ec.activations, checker.Equals, 1) + c.Assert(s.ec.creations, checker.Equals, 1) + c.Assert(s.ec.removals, checker.Equals, 1) + c.Assert(s.ec.mounts, checker.Equals, 2) + c.Assert(s.ec.unmounts, checker.Equals, 2) +} + +func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverDeleteContainer(c *check.C) { + s.d.StartWithBusybox(c) + + out, err := s.d.Cmd("run", "--name", "vol-test1", "-v", "/foo", "--volume-driver", volumePluginName, "busybox:latest") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + out, err = s.d.Cmd("rm", "-fv", "vol-test1") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + c.Assert(s.ec.activations, checker.Equals, 1) + c.Assert(s.ec.creations, checker.Equals, 1) + c.Assert(s.ec.removals, checker.Equals, 1) + c.Assert(s.ec.mounts, checker.Equals, 1) + c.Assert(s.ec.unmounts, checker.Equals, 1) +} + +func hostVolumePath(name string) string { + return fmt.Sprintf("/var/lib/docker/volumes/%s", name) +} + +// Make sure a request to use a down driver doesn't block other requests +func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverLookupNotBlocked(c *check.C) { + specPath := "/etc/docker/plugins/down-driver.spec" + err := ioutil.WriteFile(specPath, []byte("tcp://127.0.0.7:9999"), 0644) + c.Assert(err, check.IsNil) + defer os.RemoveAll(specPath) + + chCmd1 := make(chan struct{}) + chCmd2 := make(chan error) + cmd1 := exec.Command(dockerBinary, "volume", "create", "-d", "down-driver") + cmd2 := exec.Command(dockerBinary, "volume", "create") + + c.Assert(cmd1.Start(), checker.IsNil) + defer cmd1.Process.Kill() + time.Sleep(100 * time.Millisecond) // ensure API has been called + c.Assert(cmd2.Start(), checker.IsNil) + + go func() { + cmd1.Wait() + close(chCmd1) + }() + go func() { + chCmd2 <- cmd2.Wait() + }() + + select { + case <-chCmd1: + cmd2.Process.Kill() + c.Fatalf("volume create with down driver finished unexpectedly") + case err := <-chCmd2: + c.Assert(err, checker.IsNil) + case <-time.After(5 * time.Second): + cmd2.Process.Kill() + c.Fatal("volume creates are blocked by previous create requests when previous driver is down") + } +} + +func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverRetryNotImmediatelyExists(c *check.C) { + s.d.StartWithBusybox(c) + driverName := "test-external-volume-driver-retry" + + errchan := make(chan error) + started := make(chan struct{}) + go func() { + close(started) + if out, err := s.d.Cmd("run", "--rm", "--name", "test-data-retry", "-v", "external-volume-test:/tmp/external-volume-test", "--volume-driver", driverName, "busybox:latest"); err != nil { + errchan <- fmt.Errorf("%v:\n%s", err, out) + } + close(errchan) + }() + + <-started + // wait for a retry to occur, then create spec to allow plugin to register + time.Sleep(2 * time.Second) + p := newVolumePlugin(c, driverName) + defer p.Close() + + select { + case err := <-errchan: + c.Assert(err, checker.IsNil) + case <-time.After(8 * time.Second): + c.Fatal("volume creates fail when plugin not immediately available") + } + + _, err := s.d.Cmd("volume", "rm", "external-volume-test") + c.Assert(err, checker.IsNil) + + c.Assert(p.ec.activations, checker.Equals, 1) + c.Assert(p.ec.creations, checker.Equals, 1) + c.Assert(p.ec.removals, checker.Equals, 1) + c.Assert(p.ec.mounts, checker.Equals, 1) + c.Assert(p.ec.unmounts, checker.Equals, 1) +} + +func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverBindExternalVolume(c *check.C) { + dockerCmd(c, "volume", "create", "-d", volumePluginName, "foo") + dockerCmd(c, "run", "-d", "--name", "testing", "-v", "foo:/bar", "busybox", "top") + + var mounts []struct { + Name string + Driver string + } + out := inspectFieldJSON(c, "testing", "Mounts") + c.Assert(json.NewDecoder(strings.NewReader(out)).Decode(&mounts), checker.IsNil) + c.Assert(len(mounts), checker.Equals, 1, check.Commentf(out)) + c.Assert(mounts[0].Name, checker.Equals, "foo") + c.Assert(mounts[0].Driver, checker.Equals, volumePluginName) +} + +func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverList(c *check.C) { + dockerCmd(c, "volume", "create", "-d", volumePluginName, "abc3") + out, _ := dockerCmd(c, "volume", "ls") + ls := strings.Split(strings.TrimSpace(out), "\n") + c.Assert(len(ls), check.Equals, 2, check.Commentf("\n%s", out)) + + vol := strings.Fields(ls[len(ls)-1]) + c.Assert(len(vol), check.Equals, 2, check.Commentf("%v", vol)) + c.Assert(vol[0], check.Equals, volumePluginName) + c.Assert(vol[1], check.Equals, "abc3") + + c.Assert(s.ec.lists, check.Equals, 1) +} + +func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverGet(c *check.C) { + out, _, err := dockerCmdWithError("volume", "inspect", "dummy") + c.Assert(err, check.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "No such volume") + c.Assert(s.ec.gets, check.Equals, 1) + + dockerCmd(c, "volume", "create", "test", "-d", volumePluginName) + out, _ = dockerCmd(c, "volume", "inspect", "test") + + type vol struct { + Status map[string]string + } + var st []vol + + c.Assert(json.Unmarshal([]byte(out), &st), checker.IsNil) + c.Assert(st, checker.HasLen, 1) + c.Assert(st[0].Status, checker.HasLen, 1, check.Commentf("%v", st[0])) + c.Assert(st[0].Status["Hello"], checker.Equals, "world", check.Commentf("%v", st[0].Status)) +} + +func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverWithDaemonRestart(c *check.C) { + dockerCmd(c, "volume", "create", "-d", volumePluginName, "abc1") + s.d.Restart(c) + + dockerCmd(c, "run", "--name=test", "-v", "abc1:/foo", "busybox", "true") + var mounts []types.MountPoint + inspectFieldAndUnmarshall(c, "test", "Mounts", &mounts) + c.Assert(mounts, checker.HasLen, 1) + c.Assert(mounts[0].Driver, checker.Equals, volumePluginName) +} + +// Ensures that the daemon handles when the plugin responds to a `Get` request with a null volume and a null error. +// Prior the daemon would panic in this scenario. +func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverGetEmptyResponse(c *check.C) { + s.d.Start(c) + + out, err := s.d.Cmd("volume", "create", "-d", volumePluginName, "abc2", "--opt", "ninja=1") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + out, err = s.d.Cmd("volume", "inspect", "abc2") + c.Assert(err, checker.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "No such volume") +} + +// Ensure only cached paths are used in volume list to prevent N+1 calls to `VolumeDriver.Path` +func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverPathCalls(c *check.C) { + s.d.Start(c) + c.Assert(s.ec.paths, checker.Equals, 0) + + out, err := s.d.Cmd("volume", "create", "test", "--driver=test-external-volume-driver") + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(s.ec.paths, checker.Equals, 1) + + out, err = s.d.Cmd("volume", "ls") + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(s.ec.paths, checker.Equals, 1) + + out, err = s.d.Cmd("volume", "inspect", "--format='{{.Mountpoint}}'", "test") + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") + c.Assert(s.ec.paths, checker.Equals, 1) +} + +func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverMountID(c *check.C) { + s.d.StartWithBusybox(c) + + out, err := s.d.Cmd("run", "--rm", "-v", "external-volume-test:/tmp/external-volume-test", "--volume-driver", volumePluginName, "busybox:latest", "cat", "/tmp/external-volume-test/test") + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") +} + +// Check that VolumeDriver.Capabilities gets called, and only called once +func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverCapabilities(c *check.C) { + s.d.Start(c) + c.Assert(s.ec.caps, checker.Equals, 0) + + for i := 0; i < 3; i++ { + out, err := s.d.Cmd("volume", "create", "-d", volumePluginName, fmt.Sprintf("test%d", i)) + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(s.ec.caps, checker.Equals, 1) + out, err = s.d.Cmd("volume", "inspect", "--format={{.Scope}}", fmt.Sprintf("test%d", i)) + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Equals, volume.GlobalScope) + } +} + +func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverOutOfBandDelete(c *check.C) { + driverName := stringid.GenerateNonCryptoID() + p := newVolumePlugin(c, driverName) + defer p.Close() + + s.d.StartWithBusybox(c) + + out, err := s.d.Cmd("volume", "create", "-d", driverName, "--name", "test") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + out, err = s.d.Cmd("volume", "create", "-d", "local", "--name", "test") + c.Assert(err, checker.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "must be unique") + + // simulate out of band volume deletion on plugin level + delete(p.vols, "test") + + // test re-create with same driver + out, err = s.d.Cmd("volume", "create", "-d", driverName, "--opt", "foo=bar", "--name", "test") + c.Assert(err, checker.IsNil, check.Commentf(out)) + out, err = s.d.Cmd("volume", "inspect", "test") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + var vs []types.Volume + err = json.Unmarshal([]byte(out), &vs) + c.Assert(err, checker.IsNil) + c.Assert(vs, checker.HasLen, 1) + c.Assert(vs[0].Driver, checker.Equals, driverName) + c.Assert(vs[0].Options, checker.NotNil) + c.Assert(vs[0].Options["foo"], checker.Equals, "bar") + c.Assert(vs[0].Driver, checker.Equals, driverName) + + // simulate out of band volume deletion on plugin level + delete(p.vols, "test") + + // test create with different driver + out, err = s.d.Cmd("volume", "create", "-d", "local", "--name", "test") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + out, err = s.d.Cmd("volume", "inspect", "test") + c.Assert(err, checker.IsNil, check.Commentf(out)) + vs = nil + err = json.Unmarshal([]byte(out), &vs) + c.Assert(err, checker.IsNil) + c.Assert(vs, checker.HasLen, 1) + c.Assert(vs[0].Options, checker.HasLen, 0) + c.Assert(vs[0].Driver, checker.Equals, "local") +} + +func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverUnmountOnMountFail(c *check.C) { + s.d.StartWithBusybox(c) + s.d.Cmd("volume", "create", "-d", "test-external-volume-driver", "--opt=invalidOption=1", "--name=testumount") + + out, _ := s.d.Cmd("run", "-v", "testumount:/foo", "busybox", "true") + c.Assert(s.ec.unmounts, checker.Equals, 0, check.Commentf(out)) + out, _ = s.d.Cmd("run", "-w", "/foo", "-v", "testumount:/foo", "busybox", "true") + c.Assert(s.ec.unmounts, checker.Equals, 0, check.Commentf(out)) +} + +func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverUnmountOnCp(c *check.C) { + s.d.StartWithBusybox(c) + s.d.Cmd("volume", "create", "-d", "test-external-volume-driver", "--name=test") + + out, _ := s.d.Cmd("run", "-d", "--name=test", "-v", "test:/foo", "busybox", "/bin/sh", "-c", "touch /test && top") + c.Assert(s.ec.mounts, checker.Equals, 1, check.Commentf(out)) + + out, _ = s.d.Cmd("cp", "test:/test", "/tmp/test") + c.Assert(s.ec.mounts, checker.Equals, 2, check.Commentf(out)) + c.Assert(s.ec.unmounts, checker.Equals, 1, check.Commentf(out)) + + out, _ = s.d.Cmd("kill", "test") + c.Assert(s.ec.unmounts, checker.Equals, 2, check.Commentf(out)) +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_health_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_health_test.go new file mode 100644 index 000000000..0f78a41d8 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_health_test.go @@ -0,0 +1,164 @@ +package main + +import ( + "encoding/json" + "strconv" + "strings" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/cli/build" + "github.com/go-check/check" +) + +func waitForHealthStatus(c *check.C, name string, prev string, expected string) { + prev = prev + "\n" + expected = expected + "\n" + for { + out, _ := dockerCmd(c, "inspect", "--format={{.State.Health.Status}}", name) + if out == expected { + return + } + c.Check(out, checker.Equals, prev) + if out != prev { + return + } + time.Sleep(100 * time.Millisecond) + } +} + +func getHealth(c *check.C, name string) *types.Health { + out, _ := dockerCmd(c, "inspect", "--format={{json .State.Health}}", name) + var health types.Health + err := json.Unmarshal([]byte(out), &health) + c.Check(err, checker.Equals, nil) + return &health +} + +func (s *DockerSuite) TestHealth(c *check.C) { + testRequires(c, DaemonIsLinux) // busybox doesn't work on Windows + + imageName := "testhealth" + buildImageSuccessfully(c, imageName, build.WithDockerfile(`FROM busybox + RUN echo OK > /status + CMD ["/bin/sleep", "120"] + STOPSIGNAL SIGKILL + HEALTHCHECK --interval=1s --timeout=30s \ + CMD cat /status`)) + + // No health status before starting + name := "test_health" + dockerCmd(c, "create", "--name", name, imageName) + out, _ := dockerCmd(c, "ps", "-a", "--format={{.Status}}") + c.Check(out, checker.Equals, "Created\n") + + // Inspect the options + out, _ = dockerCmd(c, "inspect", + "--format=timeout={{.Config.Healthcheck.Timeout}} interval={{.Config.Healthcheck.Interval}} retries={{.Config.Healthcheck.Retries}} test={{.Config.Healthcheck.Test}}", name) + c.Check(out, checker.Equals, "timeout=30s interval=1s retries=0 test=[CMD-SHELL cat /status]\n") + + // Start + dockerCmd(c, "start", name) + waitForHealthStatus(c, name, "starting", "healthy") + + // Make it fail + dockerCmd(c, "exec", name, "rm", "/status") + waitForHealthStatus(c, name, "healthy", "unhealthy") + + // Inspect the status + out, _ = dockerCmd(c, "inspect", "--format={{.State.Health.Status}}", name) + c.Check(out, checker.Equals, "unhealthy\n") + + // Make it healthy again + dockerCmd(c, "exec", name, "touch", "/status") + waitForHealthStatus(c, name, "unhealthy", "healthy") + + // Remove container + dockerCmd(c, "rm", "-f", name) + + // Disable the check from the CLI + out, _ = dockerCmd(c, "create", "--name=noh", "--no-healthcheck", imageName) + out, _ = dockerCmd(c, "inspect", "--format={{.Config.Healthcheck.Test}}", "noh") + c.Check(out, checker.Equals, "[NONE]\n") + dockerCmd(c, "rm", "noh") + + // Disable the check with a new build + buildImageSuccessfully(c, "no_healthcheck", build.WithDockerfile(`FROM testhealth + HEALTHCHECK NONE`)) + + out, _ = dockerCmd(c, "inspect", "--format={{.ContainerConfig.Healthcheck.Test}}", "no_healthcheck") + c.Check(out, checker.Equals, "[NONE]\n") + + // Enable the checks from the CLI + _, _ = dockerCmd(c, "run", "-d", "--name=fatal_healthcheck", + "--health-interval=1s", + "--health-retries=3", + "--health-cmd=cat /status", + "no_healthcheck") + waitForHealthStatus(c, "fatal_healthcheck", "starting", "healthy") + health := getHealth(c, "fatal_healthcheck") + c.Check(health.Status, checker.Equals, "healthy") + c.Check(health.FailingStreak, checker.Equals, 0) + last := health.Log[len(health.Log)-1] + c.Check(last.ExitCode, checker.Equals, 0) + c.Check(last.Output, checker.Equals, "OK\n") + + // Fail the check + dockerCmd(c, "exec", "fatal_healthcheck", "rm", "/status") + waitForHealthStatus(c, "fatal_healthcheck", "healthy", "unhealthy") + + failsStr, _ := dockerCmd(c, "inspect", "--format={{.State.Health.FailingStreak}}", "fatal_healthcheck") + fails, err := strconv.Atoi(strings.TrimSpace(failsStr)) + c.Check(err, check.IsNil) + c.Check(fails >= 3, checker.Equals, true) + dockerCmd(c, "rm", "-f", "fatal_healthcheck") + + // Check timeout + // Note: if the interval is too small, it seems that Docker spends all its time running health + // checks and never gets around to killing it. + _, _ = dockerCmd(c, "run", "-d", "--name=test", + "--health-interval=1s", "--health-cmd=sleep 5m", "--health-timeout=1s", imageName) + waitForHealthStatus(c, "test", "starting", "unhealthy") + health = getHealth(c, "test") + last = health.Log[len(health.Log)-1] + c.Check(health.Status, checker.Equals, "unhealthy") + c.Check(last.ExitCode, checker.Equals, -1) + c.Check(last.Output, checker.Equals, "Health check exceeded timeout (1s)") + dockerCmd(c, "rm", "-f", "test") + + // Check JSON-format + buildImageSuccessfully(c, imageName, build.WithDockerfile(`FROM busybox + RUN echo OK > /status + CMD ["/bin/sleep", "120"] + STOPSIGNAL SIGKILL + HEALTHCHECK --interval=1s --timeout=30s \ + CMD ["cat", "/my status"]`)) + out, _ = dockerCmd(c, "inspect", + "--format={{.Config.Healthcheck.Test}}", imageName) + c.Check(out, checker.Equals, "[CMD cat /my status]\n") + +} + +// Github #33021 +func (s *DockerSuite) TestUnsetEnvVarHealthCheck(c *check.C) { + testRequires(c, DaemonIsLinux) // busybox doesn't work on Windows + + imageName := "testhealth" + buildImageSuccessfully(c, imageName, build.WithDockerfile(`FROM busybox +HEALTHCHECK --interval=1s --timeout=5s --retries=5 CMD /bin/sh -c "sleep 1" +ENTRYPOINT /bin/sh -c "sleep 600"`)) + + name := "env_test_health" + // No health status before starting + dockerCmd(c, "run", "-d", "--name", name, "-e", "FOO", imageName) + defer func() { + dockerCmd(c, "rm", "-f", name) + dockerCmd(c, "rmi", imageName) + }() + + // Start + dockerCmd(c, "start", name) + waitForHealthStatus(c, name, "starting", "healthy") + +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_help_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_help_test.go new file mode 100644 index 000000000..d1dcd8c7d --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_help_test.go @@ -0,0 +1,319 @@ +package main + +import ( + "fmt" + "runtime" + "strings" + "unicode" + + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/cli" + "github.com/docker/docker/pkg/homedir" + icmd "github.com/docker/docker/pkg/testutil/cmd" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestHelpTextVerify(c *check.C) { + // FIXME(vdemeester) should be a unit test, probably using golden files ? + testRequires(c, DaemonIsLinux) + + // Make sure main help text fits within 80 chars and that + // on non-windows system we use ~ when possible (to shorten things). + // Test for HOME set to its default value and set to "/" on linux + // Yes on windows setting up an array and looping (right now) isn't + // necessary because we just have one value, but we'll need the + // array/loop on linux so we might as well set it up so that we can + // test any number of home dirs later on and all we need to do is + // modify the array - the rest of the testing infrastructure should work + homes := []string{homedir.Get()} + + // Non-Windows machines need to test for this special case of $HOME + if runtime.GOOS != "windows" { + homes = append(homes, "/") + } + + homeKey := homedir.Key() + baseEnvs := appendBaseEnv(true) + + // Remove HOME env var from list so we can add a new value later. + for i, env := range baseEnvs { + if strings.HasPrefix(env, homeKey+"=") { + baseEnvs = append(baseEnvs[:i], baseEnvs[i+1:]...) + break + } + } + + for _, home := range homes { + + // Dup baseEnvs and add our new HOME value + newEnvs := make([]string, len(baseEnvs)+1) + copy(newEnvs, baseEnvs) + newEnvs[len(newEnvs)-1] = homeKey + "=" + home + + scanForHome := runtime.GOOS != "windows" && home != "/" + + // Check main help text to make sure its not over 80 chars + result := icmd.RunCmd(icmd.Cmd{ + Command: []string{dockerBinary, "help"}, + Env: newEnvs, + }) + result.Assert(c, icmd.Success) + lines := strings.Split(result.Combined(), "\n") + for _, line := range lines { + // All lines should not end with a space + c.Assert(line, checker.Not(checker.HasSuffix), " ", check.Commentf("Line should not end with a space")) + + if scanForHome && strings.Contains(line, `=`+home) { + c.Fatalf("Line should use '%q' instead of %q:\n%s", homedir.GetShortcutString(), home, line) + } + if runtime.GOOS != "windows" { + i := strings.Index(line, homedir.GetShortcutString()) + if i >= 0 && i != len(line)-1 && line[i+1] != '/' { + c.Fatalf("Main help should not have used home shortcut:\n%s", line) + } + } + } + + // Make sure each cmd's help text fits within 90 chars and that + // on non-windows system we use ~ when possible (to shorten things). + // Pull the list of commands from the "Commands:" section of docker help + // FIXME(vdemeester) Why re-run help ? + //helpCmd = exec.Command(dockerBinary, "help") + //helpCmd.Env = newEnvs + //out, _, err = runCommandWithOutput(helpCmd) + //c.Assert(err, checker.IsNil, check.Commentf(out)) + i := strings.Index(result.Combined(), "Commands:") + c.Assert(i, checker.GreaterOrEqualThan, 0, check.Commentf("Missing 'Commands:' in:\n%s", result.Combined())) + + cmds := []string{} + // Grab all chars starting at "Commands:" + helpOut := strings.Split(result.Combined()[i:], "\n") + // Skip first line, it is just "Commands:" + helpOut = helpOut[1:] + + // Create the list of commands we want to test + cmdsToTest := []string{} + for _, cmd := range helpOut { + // Stop on blank line or non-indented line + if cmd == "" || !unicode.IsSpace(rune(cmd[0])) { + break + } + + // Grab just the first word of each line + cmd = strings.Split(strings.TrimSpace(cmd), " ")[0] + cmds = append(cmds, cmd) // Saving count for later + + cmdsToTest = append(cmdsToTest, cmd) + } + + // Add some 'two word' commands - would be nice to automatically + // calculate this list - somehow + cmdsToTest = append(cmdsToTest, "volume create") + cmdsToTest = append(cmdsToTest, "volume inspect") + cmdsToTest = append(cmdsToTest, "volume ls") + cmdsToTest = append(cmdsToTest, "volume rm") + cmdsToTest = append(cmdsToTest, "network connect") + cmdsToTest = append(cmdsToTest, "network create") + cmdsToTest = append(cmdsToTest, "network disconnect") + cmdsToTest = append(cmdsToTest, "network inspect") + cmdsToTest = append(cmdsToTest, "network ls") + cmdsToTest = append(cmdsToTest, "network rm") + + if testEnv.ExperimentalDaemon() { + cmdsToTest = append(cmdsToTest, "checkpoint create") + cmdsToTest = append(cmdsToTest, "checkpoint ls") + cmdsToTest = append(cmdsToTest, "checkpoint rm") + } + + // Divide the list of commands into go routines and run the func testcommand on the commands in parallel + // to save runtime of test + + errChan := make(chan error) + + for index := 0; index < len(cmdsToTest); index++ { + go func(index int) { + errChan <- testCommand(cmdsToTest[index], newEnvs, scanForHome, home) + }(index) + } + + for index := 0; index < len(cmdsToTest); index++ { + err := <-errChan + if err != nil { + c.Fatal(err) + } + } + } +} + +func (s *DockerSuite) TestHelpExitCodesHelpOutput(c *check.C) { + // Test to make sure the exit code and output (stdout vs stderr) of + // various good and bad cases are what we expect + + // docker : stdout=all, stderr=empty, rc=0 + out := cli.DockerCmd(c).Combined() + // Be really pick + c.Assert(out, checker.Not(checker.HasSuffix), "\n\n", check.Commentf("Should not have a blank line at the end of 'docker'\n")) + + // docker help: stdout=all, stderr=empty, rc=0 + out = cli.DockerCmd(c, "help").Combined() + // Be really pick + c.Assert(out, checker.Not(checker.HasSuffix), "\n\n", check.Commentf("Should not have a blank line at the end of 'docker help'\n")) + + // docker --help: stdout=all, stderr=empty, rc=0 + out = cli.DockerCmd(c, "--help").Combined() + // Be really pick + c.Assert(out, checker.Not(checker.HasSuffix), "\n\n", check.Commentf("Should not have a blank line at the end of 'docker --help'\n")) + + // docker inspect busybox: stdout=all, stderr=empty, rc=0 + // Just making sure stderr is empty on valid cmd + out = cli.DockerCmd(c, "inspect", "busybox").Combined() + // Be really pick + c.Assert(out, checker.Not(checker.HasSuffix), "\n\n", check.Commentf("Should not have a blank line at the end of 'docker inspect busyBox'\n")) + + // docker rm: stdout=empty, stderr=all, rc!=0 + // testing the min arg error msg + cli.Docker(cli.Args("rm")).Assert(c, icmd.Expected{ + ExitCode: 1, + Error: "exit status 1", + Out: "", + // Should not contain full help text but should contain info about + // # of args and Usage line + Err: "requires at least 1 argument", + }) + + // docker rm NoSuchContainer: stdout=empty, stderr=all, rc=0 + // testing to make sure no blank line on error + result := cli.Docker(cli.Args("rm", "NoSuchContainer")).Assert(c, icmd.Expected{ + ExitCode: 1, + Error: "exit status 1", + Out: "", + }) + // Be really picky + c.Assert(len(result.Stderr()), checker.Not(checker.Equals), 0) + c.Assert(result.Stderr(), checker.Not(checker.HasSuffix), "\n\n", check.Commentf("Should not have a blank line at the end of 'docker rm'\n")) + + // docker BadCmd: stdout=empty, stderr=all, rc=0 + cli.Docker(cli.Args("BadCmd")).Assert(c, icmd.Expected{ + ExitCode: 1, + Error: "exit status 1", + Out: "", + Err: "docker: 'BadCmd' is not a docker command.\nSee 'docker --help'\n", + }) +} + +func testCommand(cmd string, newEnvs []string, scanForHome bool, home string) error { + + args := strings.Split(cmd+" --help", " ") + + // Check the full usage text + result := icmd.RunCmd(icmd.Cmd{ + Command: append([]string{dockerBinary}, args...), + Env: newEnvs, + }) + err := result.Error + out := result.Stdout() + stderr := result.Stderr() + if len(stderr) != 0 { + return fmt.Errorf("Error on %q help. non-empty stderr:%q\n", cmd, stderr) + } + if strings.HasSuffix(out, "\n\n") { + return fmt.Errorf("Should not have blank line on %q\n", cmd) + } + if !strings.Contains(out, "--help") { + return fmt.Errorf("All commands should mention '--help'. Command '%v' did not.\n", cmd) + } + + if err != nil { + return fmt.Errorf(out) + } + + // Check each line for lots of stuff + lines := strings.Split(out, "\n") + for _, line := range lines { + i := strings.Index(line, "~") + if i >= 0 && i != len(line)-1 && line[i+1] != '/' { + return fmt.Errorf("Help for %q should not have used ~:\n%s", cmd, line) + } + + // Options should NOT end with a period + if strings.HasPrefix(line, " -") && strings.HasSuffix(line, ".") { + return fmt.Errorf("Help for %q should not end with a period: %s", cmd, line) + } + + // Options should NOT end with a space + if strings.HasSuffix(line, " ") { + return fmt.Errorf("Help for %q should not end with a space: %s", cmd, line) + } + + } + + // For each command make sure we generate an error + // if we give a bad arg + args = strings.Split(cmd+" --badArg", " ") + + out, _, err = dockerCmdWithError(args...) + if err == nil { + return fmt.Errorf(out) + } + + // Be really picky + if strings.HasSuffix(stderr, "\n\n") { + return fmt.Errorf("Should not have a blank line at the end of 'docker rm'\n") + } + + // Now make sure that each command will print a short-usage + // (not a full usage - meaning no opts section) if we + // are missing a required arg or pass in a bad arg + + // These commands will never print a short-usage so don't test + noShortUsage := map[string]string{ + "images": "", + "login": "", + "logout": "", + "network": "", + "stats": "", + "volume create": "", + } + + if _, ok := noShortUsage[cmd]; !ok { + // skipNoArgs are ones that we don't want to try w/o + // any args. Either because it'll hang the test or + // lead to incorrect test result (like false negative). + // Whatever the reason, skip trying to run w/o args and + // jump to trying with a bogus arg. + skipNoArgs := map[string]struct{}{ + "daemon": {}, + "events": {}, + "load": {}, + } + + var result *icmd.Result + if _, ok := skipNoArgs[cmd]; !ok { + result = dockerCmdWithResult(strings.Split(cmd, " ")...) + } + + // If its ok w/o any args then try again with an arg + if result == nil || result.ExitCode == 0 { + result = dockerCmdWithResult(strings.Split(cmd+" badArg", " ")...) + } + + if err := result.Compare(icmd.Expected{ + Out: icmd.None, + Err: "\nUsage:", + ExitCode: 1, + }); err != nil { + return err + } + + stderr := result.Stderr() + // Shouldn't have full usage + if strings.Contains(stderr, "--help=false") { + return fmt.Errorf("Should not have full usage on %q:%v", result.Cmd.Args, stderr) + } + if strings.HasSuffix(stderr, "\n\n") { + return fmt.Errorf("Should not have a blank line on %q\n%v", result.Cmd.Args, stderr) + } + } + + return nil +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_history_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_history_test.go new file mode 100644 index 000000000..43c4b9433 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_history_test.go @@ -0,0 +1,119 @@ +package main + +import ( + "fmt" + "regexp" + "strconv" + "strings" + + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/cli/build" + "github.com/go-check/check" +) + +// This is a heisen-test. Because the created timestamp of images and the behavior of +// sort is not predictable it doesn't always fail. +func (s *DockerSuite) TestBuildHistory(c *check.C) { + name := "testbuildhistory" + buildImageSuccessfully(c, name, build.WithDockerfile(`FROM `+minimalBaseImage()+` +LABEL label.A="A" +LABEL label.B="B" +LABEL label.C="C" +LABEL label.D="D" +LABEL label.E="E" +LABEL label.F="F" +LABEL label.G="G" +LABEL label.H="H" +LABEL label.I="I" +LABEL label.J="J" +LABEL label.K="K" +LABEL label.L="L" +LABEL label.M="M" +LABEL label.N="N" +LABEL label.O="O" +LABEL label.P="P" +LABEL label.Q="Q" +LABEL label.R="R" +LABEL label.S="S" +LABEL label.T="T" +LABEL label.U="U" +LABEL label.V="V" +LABEL label.W="W" +LABEL label.X="X" +LABEL label.Y="Y" +LABEL label.Z="Z"`)) + + out, _ := dockerCmd(c, "history", name) + actualValues := strings.Split(out, "\n")[1:27] + expectedValues := [26]string{"Z", "Y", "X", "W", "V", "U", "T", "S", "R", "Q", "P", "O", "N", "M", "L", "K", "J", "I", "H", "G", "F", "E", "D", "C", "B", "A"} + + for i := 0; i < 26; i++ { + echoValue := fmt.Sprintf("LABEL label.%s=%s", expectedValues[i], expectedValues[i]) + actualValue := actualValues[i] + c.Assert(actualValue, checker.Contains, echoValue) + } + +} + +func (s *DockerSuite) TestHistoryExistentImage(c *check.C) { + dockerCmd(c, "history", "busybox") +} + +func (s *DockerSuite) TestHistoryNonExistentImage(c *check.C) { + _, _, err := dockerCmdWithError("history", "testHistoryNonExistentImage") + c.Assert(err, checker.NotNil, check.Commentf("history on a non-existent image should fail.")) +} + +func (s *DockerSuite) TestHistoryImageWithComment(c *check.C) { + name := "testhistoryimagewithcomment" + + // make an image through docker commit [ -m messages ] + + dockerCmd(c, "run", "--name", name, "busybox", "true") + dockerCmd(c, "wait", name) + + comment := "This_is_a_comment" + dockerCmd(c, "commit", "-m="+comment, name, name) + + // test docker history to check comment messages + + out, _ := dockerCmd(c, "history", name) + outputTabs := strings.Fields(strings.Split(out, "\n")[1]) + actualValue := outputTabs[len(outputTabs)-1] + c.Assert(actualValue, checker.Contains, comment) +} + +func (s *DockerSuite) TestHistoryHumanOptionFalse(c *check.C) { + out, _ := dockerCmd(c, "history", "--human=false", "busybox") + lines := strings.Split(out, "\n") + sizeColumnRegex, _ := regexp.Compile("SIZE +") + indices := sizeColumnRegex.FindStringIndex(lines[0]) + startIndex := indices[0] + endIndex := indices[1] + for i := 1; i < len(lines)-1; i++ { + if endIndex > len(lines[i]) { + endIndex = len(lines[i]) + } + sizeString := lines[i][startIndex:endIndex] + + _, err := strconv.Atoi(strings.TrimSpace(sizeString)) + c.Assert(err, checker.IsNil, check.Commentf("The size '%s' was not an Integer", sizeString)) + } +} + +func (s *DockerSuite) TestHistoryHumanOptionTrue(c *check.C) { + out, _ := dockerCmd(c, "history", "--human=true", "busybox") + lines := strings.Split(out, "\n") + sizeColumnRegex, _ := regexp.Compile("SIZE +") + humanSizeRegexRaw := "\\d+.*B" // Matches human sizes like 10 MB, 3.2 KB, etc + indices := sizeColumnRegex.FindStringIndex(lines[0]) + startIndex := indices[0] + endIndex := indices[1] + for i := 1; i < len(lines)-1; i++ { + if endIndex > len(lines[i]) { + endIndex = len(lines[i]) + } + sizeString := lines[i][startIndex:endIndex] + c.Assert(strings.TrimSpace(sizeString), checker.Matches, humanSizeRegexRaw, check.Commentf("The size '%s' was not in human format", sizeString)) + } +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_images_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_images_test.go new file mode 100644 index 000000000..dccbe1262 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_images_test.go @@ -0,0 +1,366 @@ +package main + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "reflect" + "sort" + "strings" + "time" + + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/cli/build" + "github.com/docker/docker/pkg/stringid" + icmd "github.com/docker/docker/pkg/testutil/cmd" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestImagesEnsureImageIsListed(c *check.C) { + imagesOut, _ := dockerCmd(c, "images") + c.Assert(imagesOut, checker.Contains, "busybox") +} + +func (s *DockerSuite) TestImagesEnsureImageWithTagIsListed(c *check.C) { + name := "imagewithtag" + dockerCmd(c, "tag", "busybox", name+":v1") + dockerCmd(c, "tag", "busybox", name+":v1v1") + dockerCmd(c, "tag", "busybox", name+":v2") + + imagesOut, _ := dockerCmd(c, "images", name+":v1") + c.Assert(imagesOut, checker.Contains, name) + c.Assert(imagesOut, checker.Contains, "v1") + c.Assert(imagesOut, checker.Not(checker.Contains), "v2") + c.Assert(imagesOut, checker.Not(checker.Contains), "v1v1") + + imagesOut, _ = dockerCmd(c, "images", name) + c.Assert(imagesOut, checker.Contains, name) + c.Assert(imagesOut, checker.Contains, "v1") + c.Assert(imagesOut, checker.Contains, "v1v1") + c.Assert(imagesOut, checker.Contains, "v2") +} + +func (s *DockerSuite) TestImagesEnsureImageWithBadTagIsNotListed(c *check.C) { + imagesOut, _ := dockerCmd(c, "images", "busybox:nonexistent") + c.Assert(imagesOut, checker.Not(checker.Contains), "busybox") +} + +func (s *DockerSuite) TestImagesOrderedByCreationDate(c *check.C) { + buildImageSuccessfully(c, "order:test_a", build.WithDockerfile(`FROM busybox + MAINTAINER dockerio1`)) + id1 := getIDByName(c, "order:test_a") + time.Sleep(1 * time.Second) + buildImageSuccessfully(c, "order:test_c", build.WithDockerfile(`FROM busybox + MAINTAINER dockerio2`)) + id2 := getIDByName(c, "order:test_c") + time.Sleep(1 * time.Second) + buildImageSuccessfully(c, "order:test_b", build.WithDockerfile(`FROM busybox + MAINTAINER dockerio3`)) + id3 := getIDByName(c, "order:test_b") + + out, _ := dockerCmd(c, "images", "-q", "--no-trunc") + imgs := strings.Split(out, "\n") + c.Assert(imgs[0], checker.Equals, id3, check.Commentf("First image must be %s, got %s", id3, imgs[0])) + c.Assert(imgs[1], checker.Equals, id2, check.Commentf("First image must be %s, got %s", id2, imgs[1])) + c.Assert(imgs[2], checker.Equals, id1, check.Commentf("First image must be %s, got %s", id1, imgs[2])) +} + +func (s *DockerSuite) TestImagesErrorWithInvalidFilterNameTest(c *check.C) { + out, _, err := dockerCmdWithError("images", "-f", "FOO=123") + c.Assert(err, checker.NotNil) + c.Assert(out, checker.Contains, "Invalid filter") +} + +func (s *DockerSuite) TestImagesFilterLabelMatch(c *check.C) { + imageName1 := "images_filter_test1" + imageName2 := "images_filter_test2" + imageName3 := "images_filter_test3" + buildImageSuccessfully(c, imageName1, build.WithDockerfile(`FROM busybox + LABEL match me`)) + image1ID := getIDByName(c, imageName1) + + buildImageSuccessfully(c, imageName2, build.WithDockerfile(`FROM busybox + LABEL match="me too"`)) + image2ID := getIDByName(c, imageName2) + + buildImageSuccessfully(c, imageName3, build.WithDockerfile(`FROM busybox + LABEL nomatch me`)) + image3ID := getIDByName(c, imageName3) + + out, _ := dockerCmd(c, "images", "--no-trunc", "-q", "-f", "label=match") + out = strings.TrimSpace(out) + c.Assert(out, check.Matches, fmt.Sprintf("[\\s\\w:]*%s[\\s\\w:]*", image1ID)) + c.Assert(out, check.Matches, fmt.Sprintf("[\\s\\w:]*%s[\\s\\w:]*", image2ID)) + c.Assert(out, check.Not(check.Matches), fmt.Sprintf("[\\s\\w:]*%s[\\s\\w:]*", image3ID)) + + out, _ = dockerCmd(c, "images", "--no-trunc", "-q", "-f", "label=match=me too") + out = strings.TrimSpace(out) + c.Assert(out, check.Equals, image2ID) +} + +// Regression : #15659 +func (s *DockerSuite) TestCommitWithFilterLabel(c *check.C) { + // Create a container + dockerCmd(c, "run", "--name", "bar", "busybox", "/bin/sh") + // Commit with labels "using changes" + out, _ := dockerCmd(c, "commit", "-c", "LABEL foo.version=1.0.0-1", "-c", "LABEL foo.name=bar", "-c", "LABEL foo.author=starlord", "bar", "bar:1.0.0-1") + imageID := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "images", "--no-trunc", "-q", "-f", "label=foo.version=1.0.0-1") + out = strings.TrimSpace(out) + c.Assert(out, check.Equals, imageID) +} + +func (s *DockerSuite) TestImagesFilterSinceAndBefore(c *check.C) { + buildImageSuccessfully(c, "image:1", build.WithDockerfile(`FROM `+minimalBaseImage()+` +LABEL number=1`)) + imageID1 := getIDByName(c, "image:1") + buildImageSuccessfully(c, "image:2", build.WithDockerfile(`FROM `+minimalBaseImage()+` +LABEL number=2`)) + imageID2 := getIDByName(c, "image:2") + buildImageSuccessfully(c, "image:3", build.WithDockerfile(`FROM `+minimalBaseImage()+` +LABEL number=3`)) + imageID3 := getIDByName(c, "image:3") + + expected := []string{imageID3, imageID2} + + out, _ := dockerCmd(c, "images", "-f", "since=image:1", "image") + c.Assert(assertImageList(out, expected), checker.Equals, true, check.Commentf("SINCE filter: Image list is not in the correct order: %v\n%s", expected, out)) + + out, _ = dockerCmd(c, "images", "-f", "since="+imageID1, "image") + c.Assert(assertImageList(out, expected), checker.Equals, true, check.Commentf("SINCE filter: Image list is not in the correct order: %v\n%s", expected, out)) + + expected = []string{imageID3} + + out, _ = dockerCmd(c, "images", "-f", "since=image:2", "image") + c.Assert(assertImageList(out, expected), checker.Equals, true, check.Commentf("SINCE filter: Image list is not in the correct order: %v\n%s", expected, out)) + + out, _ = dockerCmd(c, "images", "-f", "since="+imageID2, "image") + c.Assert(assertImageList(out, expected), checker.Equals, true, check.Commentf("SINCE filter: Image list is not in the correct order: %v\n%s", expected, out)) + + expected = []string{imageID2, imageID1} + + out, _ = dockerCmd(c, "images", "-f", "before=image:3", "image") + c.Assert(assertImageList(out, expected), checker.Equals, true, check.Commentf("BEFORE filter: Image list is not in the correct order: %v\n%s", expected, out)) + + out, _ = dockerCmd(c, "images", "-f", "before="+imageID3, "image") + c.Assert(assertImageList(out, expected), checker.Equals, true, check.Commentf("BEFORE filter: Image list is not in the correct order: %v\n%s", expected, out)) + + expected = []string{imageID1} + + out, _ = dockerCmd(c, "images", "-f", "before=image:2", "image") + c.Assert(assertImageList(out, expected), checker.Equals, true, check.Commentf("BEFORE filter: Image list is not in the correct order: %v\n%s", expected, out)) + + out, _ = dockerCmd(c, "images", "-f", "before="+imageID2, "image") + c.Assert(assertImageList(out, expected), checker.Equals, true, check.Commentf("BEFORE filter: Image list is not in the correct order: %v\n%s", expected, out)) +} + +func assertImageList(out string, expected []string) bool { + lines := strings.Split(strings.Trim(out, "\n "), "\n") + + if len(lines)-1 != len(expected) { + return false + } + + imageIDIndex := strings.Index(lines[0], "IMAGE ID") + for i := 0; i < len(expected); i++ { + imageID := lines[i+1][imageIDIndex : imageIDIndex+12] + found := false + for _, e := range expected { + if imageID == e[7:19] { + found = true + break + } + } + if !found { + return false + } + } + + return true +} + +// FIXME(vdemeester) should be a unit test on `docker image ls` +func (s *DockerSuite) TestImagesFilterSpaceTrimCase(c *check.C) { + imageName := "images_filter_test" + // Build a image and fail to build so that we have dangling images ? + buildImage(imageName, build.WithDockerfile(`FROM busybox + RUN touch /test/foo + RUN touch /test/bar + RUN touch /test/baz`)).Assert(c, icmd.Expected{ + ExitCode: 1, + }) + + filters := []string{ + "dangling=true", + "Dangling=true", + " dangling=true", + "dangling=true ", + "dangling = true", + } + + imageListings := make([][]string, 5, 5) + for idx, filter := range filters { + out, _ := dockerCmd(c, "images", "-q", "-f", filter) + listing := strings.Split(out, "\n") + sort.Strings(listing) + imageListings[idx] = listing + } + + for idx, listing := range imageListings { + if idx < 4 && !reflect.DeepEqual(listing, imageListings[idx+1]) { + for idx, errListing := range imageListings { + fmt.Printf("out %d\n", idx) + for _, image := range errListing { + fmt.Print(image) + } + fmt.Print("") + } + c.Fatalf("All output must be the same") + } + } +} + +func (s *DockerSuite) TestImagesEnsureDanglingImageOnlyListedOnce(c *check.C) { + testRequires(c, DaemonIsLinux) + // create container 1 + out, _ := dockerCmd(c, "run", "-d", "busybox", "true") + containerID1 := strings.TrimSpace(out) + + // tag as foobox + out, _ = dockerCmd(c, "commit", containerID1, "foobox") + imageID := stringid.TruncateID(strings.TrimSpace(out)) + + // overwrite the tag, making the previous image dangling + dockerCmd(c, "tag", "busybox", "foobox") + + out, _ = dockerCmd(c, "images", "-q", "-f", "dangling=true") + // Expect one dangling image + c.Assert(strings.Count(out, imageID), checker.Equals, 1) + + out, _ = dockerCmd(c, "images", "-q", "-f", "dangling=false") + //dangling=false would not include dangling images + c.Assert(out, checker.Not(checker.Contains), imageID) + + out, _ = dockerCmd(c, "images") + //docker images still include dangling images + c.Assert(out, checker.Contains, imageID) + +} + +// FIXME(vdemeester) should be a unit test for `docker image ls` +func (s *DockerSuite) TestImagesWithIncorrectFilter(c *check.C) { + out, _, err := dockerCmdWithError("images", "-f", "dangling=invalid") + c.Assert(err, check.NotNil) + c.Assert(out, checker.Contains, "Invalid filter") +} + +func (s *DockerSuite) TestImagesEnsureOnlyHeadsImagesShown(c *check.C) { + dockerfile := ` + FROM busybox + MAINTAINER docker + ENV foo bar` + name := "scratch-image" + result := buildImage(name, build.WithDockerfile(dockerfile)) + result.Assert(c, icmd.Success) + id := getIDByName(c, name) + + // this is just the output of docker build + // we're interested in getting the image id of the MAINTAINER instruction + // and that's located at output, line 5, from 7 to end + split := strings.Split(result.Combined(), "\n") + intermediate := strings.TrimSpace(split[5][7:]) + + out, _ := dockerCmd(c, "images") + // images shouldn't show non-heads images + c.Assert(out, checker.Not(checker.Contains), intermediate) + // images should contain final built images + c.Assert(out, checker.Contains, stringid.TruncateID(id)) +} + +func (s *DockerSuite) TestImagesEnsureImagesFromScratchShown(c *check.C) { + testRequires(c, DaemonIsLinux) // Windows does not support FROM scratch + dockerfile := ` + FROM scratch + MAINTAINER docker` + + name := "scratch-image" + buildImageSuccessfully(c, name, build.WithDockerfile(dockerfile)) + id := getIDByName(c, name) + + out, _ := dockerCmd(c, "images") + // images should contain images built from scratch + c.Assert(out, checker.Contains, stringid.TruncateID(id)) +} + +// For W2W - equivalent to TestImagesEnsureImagesFromScratchShown but Windows +// doesn't support from scratch +func (s *DockerSuite) TestImagesEnsureImagesFromBusyboxShown(c *check.C) { + dockerfile := ` + FROM busybox + MAINTAINER docker` + name := "busybox-image" + + buildImageSuccessfully(c, name, build.WithDockerfile(dockerfile)) + id := getIDByName(c, name) + + out, _ := dockerCmd(c, "images") + // images should contain images built from busybox + c.Assert(out, checker.Contains, stringid.TruncateID(id)) +} + +// #18181 +func (s *DockerSuite) TestImagesFilterNameWithPort(c *check.C) { + tag := "a.b.c.d:5000/hello" + dockerCmd(c, "tag", "busybox", tag) + out, _ := dockerCmd(c, "images", tag) + c.Assert(out, checker.Contains, tag) + + out, _ = dockerCmd(c, "images", tag+":latest") + c.Assert(out, checker.Contains, tag) + + out, _ = dockerCmd(c, "images", tag+":no-such-tag") + c.Assert(out, checker.Not(checker.Contains), tag) +} + +func (s *DockerSuite) TestImagesFormat(c *check.C) { + // testRequires(c, DaemonIsLinux) + tag := "myimage" + dockerCmd(c, "tag", "busybox", tag+":v1") + dockerCmd(c, "tag", "busybox", tag+":v2") + + out, _ := dockerCmd(c, "images", "--format", "{{.Repository}}", tag) + lines := strings.Split(strings.TrimSpace(string(out)), "\n") + + expected := []string{"myimage", "myimage"} + var names []string + names = append(names, lines...) + c.Assert(names, checker.DeepEquals, expected, check.Commentf("Expected array with truncated names: %v, got: %v", expected, names)) +} + +// ImagesDefaultFormatAndQuiet +func (s *DockerSuite) TestImagesFormatDefaultFormat(c *check.C) { + testRequires(c, DaemonIsLinux) + + // create container 1 + out, _ := dockerCmd(c, "run", "-d", "busybox", "true") + containerID1 := strings.TrimSpace(out) + + // tag as foobox + out, _ = dockerCmd(c, "commit", containerID1, "myimage") + imageID := stringid.TruncateID(strings.TrimSpace(out)) + + config := `{ + "imagesFormat": "{{ .ID }} default" +}` + d, err := ioutil.TempDir("", "integration-cli-") + c.Assert(err, checker.IsNil) + defer os.RemoveAll(d) + + err = ioutil.WriteFile(filepath.Join(d, "config.json"), []byte(config), 0644) + c.Assert(err, checker.IsNil) + + out, _ = dockerCmd(c, "--config", d, "images", "-q", "myimage") + c.Assert(out, checker.Equals, imageID+"\n", check.Commentf("Expected to print only the image id, got %v\n", out)) +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_import_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_import_test.go new file mode 100644 index 000000000..711f39b87 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_import_test.go @@ -0,0 +1,143 @@ +package main + +import ( + "bufio" + "compress/gzip" + "io/ioutil" + "os" + "os/exec" + "regexp" + "strings" + + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/cli" + "github.com/docker/docker/pkg/testutil" + icmd "github.com/docker/docker/pkg/testutil/cmd" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestImportDisplay(c *check.C) { + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "-d", "busybox", "true") + cleanedContainerID := strings.TrimSpace(out) + + out, _, err := testutil.RunCommandPipelineWithOutput( + exec.Command(dockerBinary, "export", cleanedContainerID), + exec.Command(dockerBinary, "import", "-"), + ) + c.Assert(err, checker.IsNil) + + c.Assert(out, checker.Count, "\n", 1, check.Commentf("display is expected 1 '\\n' but didn't")) + + image := strings.TrimSpace(out) + out, _ = dockerCmd(c, "run", "--rm", image, "true") + c.Assert(out, checker.Equals, "", check.Commentf("command output should've been nothing.")) +} + +func (s *DockerSuite) TestImportBadURL(c *check.C) { + out, _, err := dockerCmdWithError("import", "http://nourl/bad") + c.Assert(err, checker.NotNil, check.Commentf("import was supposed to fail but didn't")) + // Depending on your system you can get either of these errors + if !strings.Contains(out, "dial tcp") && + !strings.Contains(out, "ApplyLayer exit status 1 stdout: stderr: archive/tar: invalid tar header") && + !strings.Contains(out, "Error processing tar file") { + c.Fatalf("expected an error msg but didn't get one.\nErr: %v\nOut: %v", err, out) + } +} + +func (s *DockerSuite) TestImportFile(c *check.C) { + testRequires(c, DaemonIsLinux) + dockerCmd(c, "run", "--name", "test-import", "busybox", "true") + + temporaryFile, err := ioutil.TempFile("", "exportImportTest") + c.Assert(err, checker.IsNil, check.Commentf("failed to create temporary file")) + defer os.Remove(temporaryFile.Name()) + + icmd.RunCmd(icmd.Cmd{ + Command: []string{dockerBinary, "export", "test-import"}, + Stdout: bufio.NewWriter(temporaryFile), + }).Assert(c, icmd.Success) + + out, _ := dockerCmd(c, "import", temporaryFile.Name()) + c.Assert(out, checker.Count, "\n", 1, check.Commentf("display is expected 1 '\\n' but didn't")) + image := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "run", "--rm", image, "true") + c.Assert(out, checker.Equals, "", check.Commentf("command output should've been nothing.")) +} + +func (s *DockerSuite) TestImportGzipped(c *check.C) { + testRequires(c, DaemonIsLinux) + dockerCmd(c, "run", "--name", "test-import", "busybox", "true") + + temporaryFile, err := ioutil.TempFile("", "exportImportTest") + c.Assert(err, checker.IsNil, check.Commentf("failed to create temporary file")) + defer os.Remove(temporaryFile.Name()) + + w := gzip.NewWriter(temporaryFile) + icmd.RunCmd(icmd.Cmd{ + Command: []string{dockerBinary, "export", "test-import"}, + Stdout: w, + }).Assert(c, icmd.Success) + c.Assert(w.Close(), checker.IsNil, check.Commentf("failed to close gzip writer")) + temporaryFile.Close() + out, _ := dockerCmd(c, "import", temporaryFile.Name()) + c.Assert(out, checker.Count, "\n", 1, check.Commentf("display is expected 1 '\\n' but didn't")) + image := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "run", "--rm", image, "true") + c.Assert(out, checker.Equals, "", check.Commentf("command output should've been nothing.")) +} + +func (s *DockerSuite) TestImportFileWithMessage(c *check.C) { + testRequires(c, DaemonIsLinux) + dockerCmd(c, "run", "--name", "test-import", "busybox", "true") + + temporaryFile, err := ioutil.TempFile("", "exportImportTest") + c.Assert(err, checker.IsNil, check.Commentf("failed to create temporary file")) + defer os.Remove(temporaryFile.Name()) + + icmd.RunCmd(icmd.Cmd{ + Command: []string{dockerBinary, "export", "test-import"}, + Stdout: bufio.NewWriter(temporaryFile), + }).Assert(c, icmd.Success) + + message := "Testing commit message" + out, _ := dockerCmd(c, "import", "-m", message, temporaryFile.Name()) + c.Assert(out, checker.Count, "\n", 1, check.Commentf("display is expected 1 '\\n' but didn't")) + image := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "history", image) + split := strings.Split(out, "\n") + + c.Assert(split, checker.HasLen, 3, check.Commentf("expected 3 lines from image history")) + r := regexp.MustCompile("[\\s]{2,}") + split = r.Split(split[1], -1) + + c.Assert(message, checker.Equals, split[3], check.Commentf("didn't get expected value in commit message")) + + out, _ = dockerCmd(c, "run", "--rm", image, "true") + c.Assert(out, checker.Equals, "", check.Commentf("command output should've been nothing")) +} + +func (s *DockerSuite) TestImportFileNonExistentFile(c *check.C) { + _, _, err := dockerCmdWithError("import", "example.com/myImage.tar") + c.Assert(err, checker.NotNil, check.Commentf("import non-existing file must failed")) +} + +func (s *DockerSuite) TestImportWithQuotedChanges(c *check.C) { + testRequires(c, DaemonIsLinux) + cli.DockerCmd(c, "run", "--name", "test-import", "busybox", "true") + + temporaryFile, err := ioutil.TempFile("", "exportImportTest") + c.Assert(err, checker.IsNil, check.Commentf("failed to create temporary file")) + defer os.Remove(temporaryFile.Name()) + + cli.Docker(cli.Args("export", "test-import"), cli.WithStdout(bufio.NewWriter(temporaryFile))).Assert(c, icmd.Success) + + result := cli.DockerCmd(c, "import", "-c", `ENTRYPOINT ["/bin/sh", "-c"]`, temporaryFile.Name()) + image := strings.TrimSpace(result.Stdout()) + + result = cli.DockerCmd(c, "run", "--rm", image, "true") + c.Assert(result, icmd.Matches, icmd.Expected{Out: icmd.None}) +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_info_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_info_test.go new file mode 100644 index 000000000..d75974dfc --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_info_test.go @@ -0,0 +1,239 @@ +package main + +import ( + "encoding/json" + "fmt" + "net" + "strings" + + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/daemon" + "github.com/go-check/check" +) + +// ensure docker info succeeds +func (s *DockerSuite) TestInfoEnsureSucceeds(c *check.C) { + out, _ := dockerCmd(c, "info") + + // always shown fields + stringsToCheck := []string{ + "ID:", + "Containers:", + " Running:", + " Paused:", + " Stopped:", + "Images:", + "OSType:", + "Architecture:", + "Logging Driver:", + "Operating System:", + "CPUs:", + "Total Memory:", + "Kernel Version:", + "Storage Driver:", + "Volume:", + "Network:", + "Live Restore Enabled:", + } + + if testEnv.DaemonPlatform() == "linux" { + stringsToCheck = append(stringsToCheck, "Init Binary:", "Security Options:", "containerd version:", "runc version:", "init version:") + } + + if DaemonIsLinux() { + stringsToCheck = append(stringsToCheck, "Runtimes:", "Default Runtime: runc") + } + + if testEnv.ExperimentalDaemon() { + stringsToCheck = append(stringsToCheck, "Experimental: true") + } else { + stringsToCheck = append(stringsToCheck, "Experimental: false") + } + + for _, linePrefix := range stringsToCheck { + c.Assert(out, checker.Contains, linePrefix, check.Commentf("couldn't find string %v in output", linePrefix)) + } +} + +// TestInfoFormat tests `docker info --format` +func (s *DockerSuite) TestInfoFormat(c *check.C) { + out, status := dockerCmd(c, "info", "--format", "{{json .}}") + c.Assert(status, checker.Equals, 0) + var m map[string]interface{} + err := json.Unmarshal([]byte(out), &m) + c.Assert(err, checker.IsNil) + _, _, err = dockerCmdWithError("info", "--format", "{{.badString}}") + c.Assert(err, checker.NotNil) +} + +// TestInfoDiscoveryBackend verifies that a daemon run with `--cluster-advertise` and +// `--cluster-store` properly show the backend's endpoint in info output. +func (s *DockerSuite) TestInfoDiscoveryBackend(c *check.C) { + testRequires(c, SameHostDaemon, DaemonIsLinux) + + d := daemon.New(c, dockerBinary, dockerdBinary, daemon.Config{ + Experimental: testEnv.ExperimentalDaemon(), + }) + discoveryBackend := "consul://consuladdr:consulport/some/path" + discoveryAdvertise := "1.1.1.1:2375" + d.Start(c, fmt.Sprintf("--cluster-store=%s", discoveryBackend), fmt.Sprintf("--cluster-advertise=%s", discoveryAdvertise)) + defer d.Stop(c) + + out, err := d.Cmd("info") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, fmt.Sprintf("Cluster Store: %s\n", discoveryBackend)) + c.Assert(out, checker.Contains, fmt.Sprintf("Cluster Advertise: %s\n", discoveryAdvertise)) +} + +// TestInfoDiscoveryInvalidAdvertise verifies that a daemon run with +// an invalid `--cluster-advertise` configuration +func (s *DockerSuite) TestInfoDiscoveryInvalidAdvertise(c *check.C) { + testRequires(c, SameHostDaemon, DaemonIsLinux) + + d := daemon.New(c, dockerBinary, dockerdBinary, daemon.Config{ + Experimental: testEnv.ExperimentalDaemon(), + }) + discoveryBackend := "consul://consuladdr:consulport/some/path" + + // --cluster-advertise with an invalid string is an error + err := d.StartWithError(fmt.Sprintf("--cluster-store=%s", discoveryBackend), "--cluster-advertise=invalid") + c.Assert(err, checker.NotNil) + + // --cluster-advertise without --cluster-store is also an error + err = d.StartWithError("--cluster-advertise=1.1.1.1:2375") + c.Assert(err, checker.NotNil) +} + +// TestInfoDiscoveryAdvertiseInterfaceName verifies that a daemon run with `--cluster-advertise` +// configured with interface name properly show the advertise ip-address in info output. +func (s *DockerSuite) TestInfoDiscoveryAdvertiseInterfaceName(c *check.C) { + testRequires(c, SameHostDaemon, Network, DaemonIsLinux) + + d := daemon.New(c, dockerBinary, dockerdBinary, daemon.Config{ + Experimental: testEnv.ExperimentalDaemon(), + }) + discoveryBackend := "consul://consuladdr:consulport/some/path" + discoveryAdvertise := "eth0" + + d.Start(c, fmt.Sprintf("--cluster-store=%s", discoveryBackend), fmt.Sprintf("--cluster-advertise=%s:2375", discoveryAdvertise)) + defer d.Stop(c) + + iface, err := net.InterfaceByName(discoveryAdvertise) + c.Assert(err, checker.IsNil) + addrs, err := iface.Addrs() + c.Assert(err, checker.IsNil) + c.Assert(len(addrs), checker.GreaterThan, 0) + ip, _, err := net.ParseCIDR(addrs[0].String()) + c.Assert(err, checker.IsNil) + + out, err := d.Cmd("info") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, fmt.Sprintf("Cluster Store: %s\n", discoveryBackend)) + c.Assert(out, checker.Contains, fmt.Sprintf("Cluster Advertise: %s:2375\n", ip.String())) +} + +func (s *DockerSuite) TestInfoDisplaysRunningContainers(c *check.C) { + testRequires(c, DaemonIsLinux) + + dockerCmd(c, "run", "-d", "busybox", "top") + out, _ := dockerCmd(c, "info") + c.Assert(out, checker.Contains, fmt.Sprintf("Containers: %d\n", 1)) + c.Assert(out, checker.Contains, fmt.Sprintf(" Running: %d\n", 1)) + c.Assert(out, checker.Contains, fmt.Sprintf(" Paused: %d\n", 0)) + c.Assert(out, checker.Contains, fmt.Sprintf(" Stopped: %d\n", 0)) +} + +func (s *DockerSuite) TestInfoDisplaysPausedContainers(c *check.C) { + testRequires(c, IsPausable) + + out := runSleepingContainer(c, "-d") + cleanedContainerID := strings.TrimSpace(out) + + dockerCmd(c, "pause", cleanedContainerID) + + out, _ = dockerCmd(c, "info") + c.Assert(out, checker.Contains, fmt.Sprintf("Containers: %d\n", 1)) + c.Assert(out, checker.Contains, fmt.Sprintf(" Running: %d\n", 0)) + c.Assert(out, checker.Contains, fmt.Sprintf(" Paused: %d\n", 1)) + c.Assert(out, checker.Contains, fmt.Sprintf(" Stopped: %d\n", 0)) +} + +func (s *DockerSuite) TestInfoDisplaysStoppedContainers(c *check.C) { + testRequires(c, DaemonIsLinux) + + out, _ := dockerCmd(c, "run", "-d", "busybox", "top") + cleanedContainerID := strings.TrimSpace(out) + + dockerCmd(c, "stop", cleanedContainerID) + + out, _ = dockerCmd(c, "info") + c.Assert(out, checker.Contains, fmt.Sprintf("Containers: %d\n", 1)) + c.Assert(out, checker.Contains, fmt.Sprintf(" Running: %d\n", 0)) + c.Assert(out, checker.Contains, fmt.Sprintf(" Paused: %d\n", 0)) + c.Assert(out, checker.Contains, fmt.Sprintf(" Stopped: %d\n", 1)) +} + +func (s *DockerSuite) TestInfoDebug(c *check.C) { + testRequires(c, SameHostDaemon, DaemonIsLinux) + + d := daemon.New(c, dockerBinary, dockerdBinary, daemon.Config{ + Experimental: testEnv.ExperimentalDaemon(), + }) + d.Start(c, "--debug") + defer d.Stop(c) + + out, err := d.Cmd("--debug", "info") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, "Debug Mode (client): true\n") + c.Assert(out, checker.Contains, "Debug Mode (server): true\n") + c.Assert(out, checker.Contains, "File Descriptors") + c.Assert(out, checker.Contains, "Goroutines") + c.Assert(out, checker.Contains, "System Time") + c.Assert(out, checker.Contains, "EventsListeners") + c.Assert(out, checker.Contains, "Docker Root Dir") +} + +func (s *DockerSuite) TestInsecureRegistries(c *check.C) { + testRequires(c, SameHostDaemon, DaemonIsLinux) + + registryCIDR := "192.168.1.0/24" + registryHost := "insecurehost.com:5000" + + d := daemon.New(c, dockerBinary, dockerdBinary, daemon.Config{ + Experimental: testEnv.ExperimentalDaemon(), + }) + d.Start(c, "--insecure-registry="+registryCIDR, "--insecure-registry="+registryHost) + defer d.Stop(c) + + out, err := d.Cmd("info") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, "Insecure Registries:\n") + c.Assert(out, checker.Contains, fmt.Sprintf(" %s\n", registryHost)) + c.Assert(out, checker.Contains, fmt.Sprintf(" %s\n", registryCIDR)) +} + +func (s *DockerDaemonSuite) TestRegistryMirrors(c *check.C) { + testRequires(c, SameHostDaemon, DaemonIsLinux) + + registryMirror1 := "https://192.168.1.2" + registryMirror2 := "http://registry.mirror.com:5000" + + s.d.Start(c, "--registry-mirror="+registryMirror1, "--registry-mirror="+registryMirror2) + + out, err := s.d.Cmd("info") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, "Registry Mirrors:\n") + c.Assert(out, checker.Contains, fmt.Sprintf(" %s", registryMirror1)) + c.Assert(out, checker.Contains, fmt.Sprintf(" %s", registryMirror2)) +} + +// Test case for #24392 +func (s *DockerDaemonSuite) TestInfoLabels(c *check.C) { + testRequires(c, SameHostDaemon, DaemonIsLinux) + + s.d.Start(c, "--label", `test.empty=`, "--label", `test.empty=`, "--label", `test.label="1"`, "--label", `test.label="2"`) + + out, err := s.d.Cmd("info") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, "WARNING: labels with duplicate keys and conflicting values have been deprecated") +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_info_unix_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_info_unix_test.go new file mode 100644 index 000000000..d55c05c4a --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_info_unix_test.go @@ -0,0 +1,15 @@ +// +build !windows + +package main + +import ( + "github.com/docker/docker/integration-cli/checker" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestInfoSecurityOptions(c *check.C) { + testRequires(c, SameHostDaemon, seccompEnabled, Apparmor, DaemonIsLinux) + + out, _ := dockerCmd(c, "info") + c.Assert(out, checker.Contains, "Security Options:\n apparmor\n seccomp\n Profile: default\n") +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_inspect_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_inspect_test.go new file mode 100644 index 000000000..96e2ee451 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_inspect_test.go @@ -0,0 +1,468 @@ +package main + +import ( + "encoding/json" + "fmt" + "os" + "strconv" + "strings" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/integration-cli/checker" + icmd "github.com/docker/docker/pkg/testutil/cmd" + "github.com/go-check/check" +) + +func checkValidGraphDriver(c *check.C, name string) { + if name != "devicemapper" && name != "overlay" && name != "vfs" && name != "zfs" && name != "btrfs" && name != "aufs" { + c.Fatalf("%v is not a valid graph driver name", name) + } +} + +func (s *DockerSuite) TestInspectImage(c *check.C) { + testRequires(c, DaemonIsLinux) + imageTest := "emptyfs" + // It is important that this ID remain stable. If a code change causes + // it to be different, this is equivalent to a cache bust when pulling + // a legacy-format manifest. If the check at the end of this function + // fails, fix the difference in the image serialization instead of + // updating this hash. + imageTestID := "sha256:11f64303f0f7ffdc71f001788132bca5346831939a956e3e975c93267d89a16d" + id := inspectField(c, imageTest, "Id") + + c.Assert(id, checker.Equals, imageTestID) +} + +func (s *DockerSuite) TestInspectInt64(c *check.C) { + dockerCmd(c, "run", "-d", "-m=300M", "--name", "inspectTest", "busybox", "true") + inspectOut := inspectField(c, "inspectTest", "HostConfig.Memory") + c.Assert(inspectOut, checker.Equals, "314572800") +} + +func (s *DockerSuite) TestInspectDefault(c *check.C) { + //Both the container and image are named busybox. docker inspect will fetch the container JSON. + //If the container JSON is not available, it will go for the image JSON. + + out, _ := dockerCmd(c, "run", "--name=busybox", "-d", "busybox", "true") + containerID := strings.TrimSpace(out) + + inspectOut := inspectField(c, "busybox", "Id") + c.Assert(strings.TrimSpace(inspectOut), checker.Equals, containerID) +} + +func (s *DockerSuite) TestInspectStatus(c *check.C) { + out := runSleepingContainer(c, "-d") + out = strings.TrimSpace(out) + + inspectOut := inspectField(c, out, "State.Status") + c.Assert(inspectOut, checker.Equals, "running") + + // Windows does not support pause/unpause on Windows Server Containers. + // (RS1 does for Hyper-V Containers, but production CI is not setup for that) + if testEnv.DaemonPlatform() != "windows" { + dockerCmd(c, "pause", out) + inspectOut = inspectField(c, out, "State.Status") + c.Assert(inspectOut, checker.Equals, "paused") + + dockerCmd(c, "unpause", out) + inspectOut = inspectField(c, out, "State.Status") + c.Assert(inspectOut, checker.Equals, "running") + } + + dockerCmd(c, "stop", out) + inspectOut = inspectField(c, out, "State.Status") + c.Assert(inspectOut, checker.Equals, "exited") + +} + +func (s *DockerSuite) TestInspectTypeFlagContainer(c *check.C) { + //Both the container and image are named busybox. docker inspect will fetch container + //JSON State.Running field. If the field is true, it's a container. + runSleepingContainer(c, "--name=busybox", "-d") + + formatStr := "--format={{.State.Running}}" + out, _ := dockerCmd(c, "inspect", "--type=container", formatStr, "busybox") + c.Assert(out, checker.Equals, "true\n") // not a container JSON +} + +func (s *DockerSuite) TestInspectTypeFlagWithNoContainer(c *check.C) { + //Run this test on an image named busybox. docker inspect will try to fetch container + //JSON. Since there is no container named busybox and --type=container, docker inspect will + //not try to get the image JSON. It will throw an error. + + dockerCmd(c, "run", "-d", "busybox", "true") + + _, _, err := dockerCmdWithError("inspect", "--type=container", "busybox") + // docker inspect should fail, as there is no container named busybox + c.Assert(err, checker.NotNil) +} + +func (s *DockerSuite) TestInspectTypeFlagWithImage(c *check.C) { + //Both the container and image are named busybox. docker inspect will fetch image + //JSON as --type=image. if there is no image with name busybox, docker inspect + //will throw an error. + + dockerCmd(c, "run", "--name=busybox", "-d", "busybox", "true") + + out, _ := dockerCmd(c, "inspect", "--type=image", "busybox") + c.Assert(out, checker.Not(checker.Contains), "State") // not an image JSON +} + +func (s *DockerSuite) TestInspectTypeFlagWithInvalidValue(c *check.C) { + //Both the container and image are named busybox. docker inspect will fail + //as --type=foobar is not a valid value for the flag. + + dockerCmd(c, "run", "--name=busybox", "-d", "busybox", "true") + + out, exitCode, err := dockerCmdWithError("inspect", "--type=foobar", "busybox") + c.Assert(err, checker.NotNil, check.Commentf("%s", exitCode)) + c.Assert(exitCode, checker.Equals, 1, check.Commentf("%s", err)) + c.Assert(out, checker.Contains, "not a valid value for --type") +} + +func (s *DockerSuite) TestInspectImageFilterInt(c *check.C) { + testRequires(c, DaemonIsLinux) + imageTest := "emptyfs" + out := inspectField(c, imageTest, "Size") + + size, err := strconv.Atoi(out) + c.Assert(err, checker.IsNil, check.Commentf("failed to inspect size of the image: %s, %v", out, err)) + + //now see if the size turns out to be the same + formatStr := fmt.Sprintf("--format={{eq .Size %d}}", size) + out, _ = dockerCmd(c, "inspect", formatStr, imageTest) + result, err := strconv.ParseBool(strings.TrimSuffix(out, "\n")) + c.Assert(err, checker.IsNil) + c.Assert(result, checker.Equals, true) +} + +func (s *DockerSuite) TestInspectContainerFilterInt(c *check.C) { + result := icmd.RunCmd(icmd.Cmd{ + Command: []string{dockerBinary, "run", "-i", "-a", "stdin", "busybox", "cat"}, + Stdin: strings.NewReader("blahblah"), + }) + result.Assert(c, icmd.Success) + out := result.Stdout() + id := strings.TrimSpace(out) + + out = inspectField(c, id, "State.ExitCode") + + exitCode, err := strconv.Atoi(out) + c.Assert(err, checker.IsNil, check.Commentf("failed to inspect exitcode of the container: %s, %v", out, err)) + + //now get the exit code to verify + formatStr := fmt.Sprintf("--format={{eq .State.ExitCode %d}}", exitCode) + out, _ = dockerCmd(c, "inspect", formatStr, id) + inspectResult, err := strconv.ParseBool(strings.TrimSuffix(out, "\n")) + c.Assert(err, checker.IsNil) + c.Assert(inspectResult, checker.Equals, true) +} + +func (s *DockerSuite) TestInspectImageGraphDriver(c *check.C) { + testRequires(c, DaemonIsLinux, Devicemapper) + imageTest := "emptyfs" + name := inspectField(c, imageTest, "GraphDriver.Name") + + checkValidGraphDriver(c, name) + + deviceID := inspectField(c, imageTest, "GraphDriver.Data.DeviceId") + + _, err := strconv.Atoi(deviceID) + c.Assert(err, checker.IsNil, check.Commentf("failed to inspect DeviceId of the image: %s, %v", deviceID, err)) + + deviceSize := inspectField(c, imageTest, "GraphDriver.Data.DeviceSize") + + _, err = strconv.ParseUint(deviceSize, 10, 64) + c.Assert(err, checker.IsNil, check.Commentf("failed to inspect DeviceSize of the image: %s, %v", deviceSize, err)) +} + +func (s *DockerSuite) TestInspectContainerGraphDriver(c *check.C) { + testRequires(c, DaemonIsLinux, Devicemapper) + + out, _ := dockerCmd(c, "run", "-d", "busybox", "true") + out = strings.TrimSpace(out) + + name := inspectField(c, out, "GraphDriver.Name") + + checkValidGraphDriver(c, name) + + imageDeviceID := inspectField(c, "busybox", "GraphDriver.Data.DeviceId") + + deviceID := inspectField(c, out, "GraphDriver.Data.DeviceId") + + c.Assert(imageDeviceID, checker.Not(checker.Equals), deviceID) + + _, err := strconv.Atoi(deviceID) + c.Assert(err, checker.IsNil, check.Commentf("failed to inspect DeviceId of the image: %s, %v", deviceID, err)) + + deviceSize := inspectField(c, out, "GraphDriver.Data.DeviceSize") + + _, err = strconv.ParseUint(deviceSize, 10, 64) + c.Assert(err, checker.IsNil, check.Commentf("failed to inspect DeviceSize of the image: %s, %v", deviceSize, err)) +} + +func (s *DockerSuite) TestInspectBindMountPoint(c *check.C) { + modifier := ",z" + prefix, slash := getPrefixAndSlashFromDaemonPlatform() + if testEnv.DaemonPlatform() == "windows" { + modifier = "" + // Linux creates the host directory if it doesn't exist. Windows does not. + os.Mkdir(`c:\data`, os.ModeDir) + } + + dockerCmd(c, "run", "-d", "--name", "test", "-v", prefix+slash+"data:"+prefix+slash+"data:ro"+modifier, "busybox", "cat") + + vol := inspectFieldJSON(c, "test", "Mounts") + + var mp []types.MountPoint + err := json.Unmarshal([]byte(vol), &mp) + c.Assert(err, checker.IsNil) + + // check that there is only one mountpoint + c.Assert(mp, check.HasLen, 1) + + m := mp[0] + + c.Assert(m.Name, checker.Equals, "") + c.Assert(m.Driver, checker.Equals, "") + c.Assert(m.Source, checker.Equals, prefix+slash+"data") + c.Assert(m.Destination, checker.Equals, prefix+slash+"data") + if testEnv.DaemonPlatform() != "windows" { // Windows does not set mode + c.Assert(m.Mode, checker.Equals, "ro"+modifier) + } + c.Assert(m.RW, checker.Equals, false) +} + +func (s *DockerSuite) TestInspectNamedMountPoint(c *check.C) { + prefix, slash := getPrefixAndSlashFromDaemonPlatform() + + dockerCmd(c, "run", "-d", "--name", "test", "-v", "data:"+prefix+slash+"data", "busybox", "cat") + + vol := inspectFieldJSON(c, "test", "Mounts") + + var mp []types.MountPoint + err := json.Unmarshal([]byte(vol), &mp) + c.Assert(err, checker.IsNil) + + // check that there is only one mountpoint + c.Assert(mp, checker.HasLen, 1) + + m := mp[0] + + c.Assert(m.Name, checker.Equals, "data") + c.Assert(m.Driver, checker.Equals, "local") + c.Assert(m.Source, checker.Not(checker.Equals), "") + c.Assert(m.Destination, checker.Equals, prefix+slash+"data") + c.Assert(m.RW, checker.Equals, true) +} + +// #14947 +func (s *DockerSuite) TestInspectTimesAsRFC3339Nano(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "busybox", "true") + id := strings.TrimSpace(out) + startedAt := inspectField(c, id, "State.StartedAt") + finishedAt := inspectField(c, id, "State.FinishedAt") + created := inspectField(c, id, "Created") + + _, err := time.Parse(time.RFC3339Nano, startedAt) + c.Assert(err, checker.IsNil) + _, err = time.Parse(time.RFC3339Nano, finishedAt) + c.Assert(err, checker.IsNil) + _, err = time.Parse(time.RFC3339Nano, created) + c.Assert(err, checker.IsNil) + + created = inspectField(c, "busybox", "Created") + + _, err = time.Parse(time.RFC3339Nano, created) + c.Assert(err, checker.IsNil) +} + +// #15633 +func (s *DockerSuite) TestInspectLogConfigNoType(c *check.C) { + dockerCmd(c, "create", "--name=test", "--log-opt", "max-file=42", "busybox") + var logConfig container.LogConfig + + out := inspectFieldJSON(c, "test", "HostConfig.LogConfig") + + err := json.NewDecoder(strings.NewReader(out)).Decode(&logConfig) + c.Assert(err, checker.IsNil, check.Commentf("%v", out)) + + c.Assert(logConfig.Type, checker.Equals, "json-file") + c.Assert(logConfig.Config["max-file"], checker.Equals, "42", check.Commentf("%v", logConfig)) +} + +func (s *DockerSuite) TestInspectNoSizeFlagContainer(c *check.C) { + + //Both the container and image are named busybox. docker inspect will fetch container + //JSON SizeRw and SizeRootFs field. If there is no flag --size/-s, there are no size fields. + + runSleepingContainer(c, "--name=busybox", "-d") + + formatStr := "--format={{.SizeRw}},{{.SizeRootFs}}" + out, _ := dockerCmd(c, "inspect", "--type=container", formatStr, "busybox") + c.Assert(strings.TrimSpace(out), check.Equals, ",", check.Commentf("Expected not to display size info: %s", out)) +} + +func (s *DockerSuite) TestInspectSizeFlagContainer(c *check.C) { + runSleepingContainer(c, "--name=busybox", "-d") + + formatStr := "--format='{{.SizeRw}},{{.SizeRootFs}}'" + out, _ := dockerCmd(c, "inspect", "-s", "--type=container", formatStr, "busybox") + sz := strings.Split(out, ",") + + c.Assert(strings.TrimSpace(sz[0]), check.Not(check.Equals), "") + c.Assert(strings.TrimSpace(sz[1]), check.Not(check.Equals), "") +} + +func (s *DockerSuite) TestInspectTemplateError(c *check.C) { + // Template parsing error for both the container and image. + + runSleepingContainer(c, "--name=container1", "-d") + + out, _, err := dockerCmdWithError("inspect", "--type=container", "--format='Format container: {{.ThisDoesNotExist}}'", "container1") + c.Assert(err, check.Not(check.IsNil)) + c.Assert(out, checker.Contains, "Template parsing error") + + out, _, err = dockerCmdWithError("inspect", "--type=image", "--format='Format container: {{.ThisDoesNotExist}}'", "busybox") + c.Assert(err, check.Not(check.IsNil)) + c.Assert(out, checker.Contains, "Template parsing error") +} + +func (s *DockerSuite) TestInspectJSONFields(c *check.C) { + runSleepingContainer(c, "--name=busybox", "-d") + out, _, err := dockerCmdWithError("inspect", "--type=container", "--format={{.HostConfig.Dns}}", "busybox") + + c.Assert(err, check.IsNil) + c.Assert(out, checker.Equals, "[]\n") +} + +func (s *DockerSuite) TestInspectByPrefix(c *check.C) { + id := inspectField(c, "busybox", "Id") + c.Assert(id, checker.HasPrefix, "sha256:") + + id2 := inspectField(c, id[:12], "Id") + c.Assert(id, checker.Equals, id2) + + id3 := inspectField(c, strings.TrimPrefix(id, "sha256:")[:12], "Id") + c.Assert(id, checker.Equals, id3) +} + +func (s *DockerSuite) TestInspectStopWhenNotFound(c *check.C) { + runSleepingContainer(c, "--name=busybox1", "-d") + runSleepingContainer(c, "--name=busybox2", "-d") + result := dockerCmdWithResult("inspect", "--type=container", "--format='{{.Name}}'", "busybox1", "busybox2", "missing") + + c.Assert(result.Error, checker.Not(check.IsNil)) + c.Assert(result.Stdout(), checker.Contains, "busybox1") + c.Assert(result.Stdout(), checker.Contains, "busybox2") + c.Assert(result.Stderr(), checker.Contains, "Error: No such container: missing") + + // test inspect would not fast fail + result = dockerCmdWithResult("inspect", "--type=container", "--format='{{.Name}}'", "missing", "busybox1", "busybox2") + + c.Assert(result.Error, checker.Not(check.IsNil)) + c.Assert(result.Stdout(), checker.Contains, "busybox1") + c.Assert(result.Stdout(), checker.Contains, "busybox2") + c.Assert(result.Stderr(), checker.Contains, "Error: No such container: missing") +} + +func (s *DockerSuite) TestInspectHistory(c *check.C) { + dockerCmd(c, "run", "--name=testcont", "busybox", "echo", "hello") + dockerCmd(c, "commit", "-m", "test comment", "testcont", "testimg") + out, _, err := dockerCmdWithError("inspect", "--format='{{.Comment}}'", "testimg") + c.Assert(err, check.IsNil) + c.Assert(out, checker.Contains, "test comment") +} + +func (s *DockerSuite) TestInspectContainerNetworkDefault(c *check.C) { + testRequires(c, DaemonIsLinux) + + contName := "test1" + dockerCmd(c, "run", "--name", contName, "-d", "busybox", "top") + netOut, _ := dockerCmd(c, "network", "inspect", "--format={{.ID}}", "bridge") + out := inspectField(c, contName, "NetworkSettings.Networks") + c.Assert(out, checker.Contains, "bridge") + out = inspectField(c, contName, "NetworkSettings.Networks.bridge.NetworkID") + c.Assert(strings.TrimSpace(out), checker.Equals, strings.TrimSpace(netOut)) +} + +func (s *DockerSuite) TestInspectContainerNetworkCustom(c *check.C) { + testRequires(c, DaemonIsLinux) + + netOut, _ := dockerCmd(c, "network", "create", "net1") + dockerCmd(c, "run", "--name=container1", "--net=net1", "-d", "busybox", "top") + out := inspectField(c, "container1", "NetworkSettings.Networks") + c.Assert(out, checker.Contains, "net1") + out = inspectField(c, "container1", "NetworkSettings.Networks.net1.NetworkID") + c.Assert(strings.TrimSpace(out), checker.Equals, strings.TrimSpace(netOut)) +} + +func (s *DockerSuite) TestInspectRootFS(c *check.C) { + out, _, err := dockerCmdWithError("inspect", "busybox") + c.Assert(err, check.IsNil) + + var imageJSON []types.ImageInspect + err = json.Unmarshal([]byte(out), &imageJSON) + c.Assert(err, checker.IsNil) + + c.Assert(len(imageJSON[0].RootFS.Layers), checker.GreaterOrEqualThan, 1) +} + +func (s *DockerSuite) TestInspectAmpersand(c *check.C) { + testRequires(c, DaemonIsLinux) + + name := "test" + out, _ := dockerCmd(c, "run", "--name", name, "--env", `TEST_ENV="soanni&rtr"`, "busybox", "env") + c.Assert(out, checker.Contains, `soanni&rtr`) + out, _ = dockerCmd(c, "inspect", name) + c.Assert(out, checker.Contains, `soanni&rtr`) +} + +func (s *DockerSuite) TestInspectPlugin(c *check.C) { + testRequires(c, DaemonIsLinux, IsAmd64, Network) + _, _, err := dockerCmdWithError("plugin", "install", "--grant-all-permissions", pNameWithTag) + c.Assert(err, checker.IsNil) + + out, _, err := dockerCmdWithError("inspect", "--type", "plugin", "--format", "{{.Name}}", pNameWithTag) + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Equals, pNameWithTag) + + out, _, err = dockerCmdWithError("inspect", "--format", "{{.Name}}", pNameWithTag) + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Equals, pNameWithTag) + + // Even without tag the inspect still work + out, _, err = dockerCmdWithError("inspect", "--type", "plugin", "--format", "{{.Name}}", pNameWithTag) + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Equals, pNameWithTag) + + out, _, err = dockerCmdWithError("inspect", "--format", "{{.Name}}", pNameWithTag) + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Equals, pNameWithTag) + + _, _, err = dockerCmdWithError("plugin", "disable", pNameWithTag) + c.Assert(err, checker.IsNil) + + out, _, err = dockerCmdWithError("plugin", "remove", pNameWithTag) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, pNameWithTag) +} + +// Test case for 29185 +func (s *DockerSuite) TestInspectUnknownObject(c *check.C) { + // This test should work on both Windows and Linux + out, _, err := dockerCmdWithError("inspect", "foobar") + c.Assert(err, checker.NotNil) + c.Assert(out, checker.Contains, "Error: No such object: foobar") + c.Assert(err.Error(), checker.Contains, "Error: No such object: foobar") +} + +func (s *DockerSuite) TestInspectInvalidReference(c *check.C) { + // This test should work on both Windows and Linux + out, _, err := dockerCmdWithError("inspect", "FooBar") + c.Assert(err, checker.NotNil) + c.Assert(out, checker.Contains, "Error: No such object: FooBar") + c.Assert(err.Error(), checker.Contains, "Error: No such object: FooBar") +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_kill_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_kill_test.go new file mode 100644 index 000000000..3273ecf1f --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_kill_test.go @@ -0,0 +1,138 @@ +package main + +import ( + "fmt" + "net/http" + "strings" + "time" + + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/cli" + "github.com/docker/docker/integration-cli/request" + icmd "github.com/docker/docker/pkg/testutil/cmd" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestKillContainer(c *check.C) { + out := runSleepingContainer(c, "-d") + cleanedContainerID := strings.TrimSpace(out) + cli.WaitRun(c, cleanedContainerID) + + cli.DockerCmd(c, "kill", cleanedContainerID) + cli.WaitExited(c, cleanedContainerID, 10*time.Second) + + out = cli.DockerCmd(c, "ps", "-q").Combined() + c.Assert(out, checker.Not(checker.Contains), cleanedContainerID, check.Commentf("killed container is still running")) + +} + +func (s *DockerSuite) TestKillOffStoppedContainer(c *check.C) { + out := runSleepingContainer(c, "-d") + cleanedContainerID := strings.TrimSpace(out) + + cli.DockerCmd(c, "stop", cleanedContainerID) + cli.WaitExited(c, cleanedContainerID, 10*time.Second) + + cli.Docker(cli.Args("kill", "-s", "30", cleanedContainerID)).Assert(c, icmd.Expected{ + ExitCode: 1, + }) +} + +func (s *DockerSuite) TestKillDifferentUserContainer(c *check.C) { + // TODO Windows: Windows does not yet support -u (Feb 2016). + testRequires(c, DaemonIsLinux) + out := cli.DockerCmd(c, "run", "-u", "daemon", "-d", "busybox", "top").Combined() + cleanedContainerID := strings.TrimSpace(out) + cli.WaitRun(c, cleanedContainerID) + + cli.DockerCmd(c, "kill", cleanedContainerID) + cli.WaitExited(c, cleanedContainerID, 10*time.Second) + + out = cli.DockerCmd(c, "ps", "-q").Combined() + c.Assert(out, checker.Not(checker.Contains), cleanedContainerID, check.Commentf("killed container is still running")) + +} + +// regression test about correct signal parsing see #13665 +func (s *DockerSuite) TestKillWithSignal(c *check.C) { + // Cannot port to Windows - does not support signals in the same way Linux does + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "-d", "busybox", "top") + cid := strings.TrimSpace(out) + c.Assert(waitRun(cid), check.IsNil) + + dockerCmd(c, "kill", "-s", "SIGWINCH", cid) + time.Sleep(250 * time.Millisecond) + + running := inspectField(c, cid, "State.Running") + + c.Assert(running, checker.Equals, "true", check.Commentf("Container should be in running state after SIGWINCH")) +} + +func (s *DockerSuite) TestKillWithStopSignalWithSameSignalShouldDisableRestartPolicy(c *check.C) { + // Cannot port to Windows - does not support signals int the same way as Linux does + testRequires(c, DaemonIsLinux) + out := cli.DockerCmd(c, "run", "-d", "--stop-signal=TERM", "--restart=always", "busybox", "top").Combined() + cid := strings.TrimSpace(out) + cli.WaitRun(c, cid) + + // Let docker send a TERM signal to the container + // It will kill the process and disable the restart policy + cli.DockerCmd(c, "kill", "-s", "TERM", cid) + cli.WaitExited(c, cid, 10*time.Second) + + out = cli.DockerCmd(c, "ps", "-q").Combined() + c.Assert(out, checker.Not(checker.Contains), cid, check.Commentf("killed container is still running")) +} + +func (s *DockerSuite) TestKillWithStopSignalWithDifferentSignalShouldKeepRestartPolicy(c *check.C) { + // Cannot port to Windows - does not support signals int the same way as Linux does + testRequires(c, DaemonIsLinux) + out := cli.DockerCmd(c, "run", "-d", "--stop-signal=CONT", "--restart=always", "busybox", "top").Combined() + cid := strings.TrimSpace(out) + cli.WaitRun(c, cid) + + // Let docker send a TERM signal to the container + // It will kill the process, but not disable the restart policy + cli.DockerCmd(c, "kill", "-s", "TERM", cid) + cli.WaitRestart(c, cid, 10*time.Second) + + // Restart policy should still be in place, so it should be still running + cli.WaitRun(c, cid) +} + +// FIXME(vdemeester) should be a unit test +func (s *DockerSuite) TestKillWithInvalidSignal(c *check.C) { + out := runSleepingContainer(c, "-d") + cid := strings.TrimSpace(out) + c.Assert(waitRun(cid), check.IsNil) + + out, _, err := dockerCmdWithError("kill", "-s", "0", cid) + c.Assert(err, check.NotNil) + c.Assert(out, checker.Contains, "Invalid signal: 0", check.Commentf("Kill with an invalid signal didn't error out correctly")) + + running := inspectField(c, cid, "State.Running") + c.Assert(running, checker.Equals, "true", check.Commentf("Container should be in running state after an invalid signal")) + + out = runSleepingContainer(c, "-d") + cid = strings.TrimSpace(out) + c.Assert(waitRun(cid), check.IsNil) + + out, _, err = dockerCmdWithError("kill", "-s", "SIG42", cid) + c.Assert(err, check.NotNil) + c.Assert(out, checker.Contains, "Invalid signal: SIG42", check.Commentf("Kill with an invalid signal error out correctly")) + + running = inspectField(c, cid, "State.Running") + c.Assert(running, checker.Equals, "true", check.Commentf("Container should be in running state after an invalid signal")) + +} + +func (s *DockerSuite) TestKillStoppedContainerAPIPre120(c *check.C) { + testRequires(c, DaemonIsLinux) // Windows only supports 1.25 or later + runSleepingContainer(c, "--name", "docker-kill-test-api", "-d") + dockerCmd(c, "stop", "docker-kill-test-api") + + status, _, err := request.SockRequest("POST", fmt.Sprintf("/v1.19/containers/%s/kill", "docker-kill-test-api"), nil, daemonHost()) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusNoContent) +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_links_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_links_test.go new file mode 100644 index 000000000..b43c6d1fb --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_links_test.go @@ -0,0 +1,236 @@ +package main + +import ( + "encoding/json" + "fmt" + "regexp" + "strings" + + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/pkg/testutil" + "github.com/docker/docker/runconfig" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestLinksPingUnlinkedContainers(c *check.C) { + testRequires(c, DaemonIsLinux) + _, exitCode, err := dockerCmdWithError("run", "--rm", "busybox", "sh", "-c", "ping -c 1 alias1 -W 1 && ping -c 1 alias2 -W 1") + + // run ping failed with error + c.Assert(exitCode, checker.Equals, 1, check.Commentf("error: %v", err)) +} + +// Test for appropriate error when calling --link with an invalid target container +func (s *DockerSuite) TestLinksInvalidContainerTarget(c *check.C) { + testRequires(c, DaemonIsLinux) + out, _, err := dockerCmdWithError("run", "--link", "bogus:alias", "busybox", "true") + + // an invalid container target should produce an error + c.Assert(err, checker.NotNil, check.Commentf("out: %s", out)) + // an invalid container target should produce an error + c.Assert(out, checker.Contains, "Could not get container") +} + +func (s *DockerSuite) TestLinksPingLinkedContainers(c *check.C) { + testRequires(c, DaemonIsLinux) + // Test with the three different ways of specifying the default network on Linux + testLinkPingOnNetwork(c, "") + testLinkPingOnNetwork(c, "default") + testLinkPingOnNetwork(c, "bridge") +} + +func testLinkPingOnNetwork(c *check.C, network string) { + var postArgs []string + if network != "" { + postArgs = append(postArgs, []string{"--net", network}...) + } + postArgs = append(postArgs, []string{"busybox", "top"}...) + runArgs1 := append([]string{"run", "-d", "--name", "container1", "--hostname", "fred"}, postArgs...) + runArgs2 := append([]string{"run", "-d", "--name", "container2", "--hostname", "wilma"}, postArgs...) + + // Run the two named containers + dockerCmd(c, runArgs1...) + dockerCmd(c, runArgs2...) + + postArgs = []string{} + if network != "" { + postArgs = append(postArgs, []string{"--net", network}...) + } + postArgs = append(postArgs, []string{"busybox", "sh", "-c"}...) + + // Format a run for a container which links to the other two + runArgs := append([]string{"run", "--rm", "--link", "container1:alias1", "--link", "container2:alias2"}, postArgs...) + pingCmd := "ping -c 1 %s -W 1 && ping -c 1 %s -W 1" + + // test ping by alias, ping by name, and ping by hostname + // 1. Ping by alias + dockerCmd(c, append(runArgs, fmt.Sprintf(pingCmd, "alias1", "alias2"))...) + // 2. Ping by container name + dockerCmd(c, append(runArgs, fmt.Sprintf(pingCmd, "container1", "container2"))...) + // 3. Ping by hostname + dockerCmd(c, append(runArgs, fmt.Sprintf(pingCmd, "fred", "wilma"))...) + + // Clean for next round + dockerCmd(c, "rm", "-f", "container1") + dockerCmd(c, "rm", "-f", "container2") +} + +func (s *DockerSuite) TestLinksPingLinkedContainersAfterRename(c *check.C) { + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "-d", "--name", "container1", "busybox", "top") + idA := strings.TrimSpace(out) + out, _ = dockerCmd(c, "run", "-d", "--name", "container2", "busybox", "top") + idB := strings.TrimSpace(out) + dockerCmd(c, "rename", "container1", "container_new") + dockerCmd(c, "run", "--rm", "--link", "container_new:alias1", "--link", "container2:alias2", "busybox", "sh", "-c", "ping -c 1 alias1 -W 1 && ping -c 1 alias2 -W 1") + dockerCmd(c, "kill", idA) + dockerCmd(c, "kill", idB) + +} + +func (s *DockerSuite) TestLinksInspectLinksStarted(c *check.C) { + testRequires(c, DaemonIsLinux) + var ( + expected = map[string]struct{}{"/container1:/testinspectlink/alias1": {}, "/container2:/testinspectlink/alias2": {}} + result []string + ) + dockerCmd(c, "run", "-d", "--name", "container1", "busybox", "top") + dockerCmd(c, "run", "-d", "--name", "container2", "busybox", "top") + dockerCmd(c, "run", "-d", "--name", "testinspectlink", "--link", "container1:alias1", "--link", "container2:alias2", "busybox", "top") + links := inspectFieldJSON(c, "testinspectlink", "HostConfig.Links") + + err := json.Unmarshal([]byte(links), &result) + c.Assert(err, checker.IsNil) + + output := testutil.ConvertSliceOfStringsToMap(result) + + c.Assert(output, checker.DeepEquals, expected) +} + +func (s *DockerSuite) TestLinksInspectLinksStopped(c *check.C) { + testRequires(c, DaemonIsLinux) + var ( + expected = map[string]struct{}{"/container1:/testinspectlink/alias1": {}, "/container2:/testinspectlink/alias2": {}} + result []string + ) + dockerCmd(c, "run", "-d", "--name", "container1", "busybox", "top") + dockerCmd(c, "run", "-d", "--name", "container2", "busybox", "top") + dockerCmd(c, "run", "-d", "--name", "testinspectlink", "--link", "container1:alias1", "--link", "container2:alias2", "busybox", "true") + links := inspectFieldJSON(c, "testinspectlink", "HostConfig.Links") + + err := json.Unmarshal([]byte(links), &result) + c.Assert(err, checker.IsNil) + + output := testutil.ConvertSliceOfStringsToMap(result) + + c.Assert(output, checker.DeepEquals, expected) +} + +func (s *DockerSuite) TestLinksNotStartedParentNotFail(c *check.C) { + testRequires(c, DaemonIsLinux) + dockerCmd(c, "create", "--name=first", "busybox", "top") + dockerCmd(c, "create", "--name=second", "--link=first:first", "busybox", "top") + dockerCmd(c, "start", "first") + +} + +func (s *DockerSuite) TestLinksHostsFilesInject(c *check.C) { + testRequires(c, DaemonIsLinux) + testRequires(c, SameHostDaemon, ExecSupport) + + out, _ := dockerCmd(c, "run", "-itd", "--name", "one", "busybox", "top") + idOne := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "run", "-itd", "--name", "two", "--link", "one:onetwo", "busybox", "top") + idTwo := strings.TrimSpace(out) + + c.Assert(waitRun(idTwo), checker.IsNil) + + readContainerFileWithExec(c, idOne, "/etc/hosts") + contentTwo := readContainerFileWithExec(c, idTwo, "/etc/hosts") + // Host is not present in updated hosts file + c.Assert(string(contentTwo), checker.Contains, "onetwo") +} + +func (s *DockerSuite) TestLinksUpdateOnRestart(c *check.C) { + testRequires(c, DaemonIsLinux) + testRequires(c, SameHostDaemon, ExecSupport) + dockerCmd(c, "run", "-d", "--name", "one", "busybox", "top") + out, _ := dockerCmd(c, "run", "-d", "--name", "two", "--link", "one:onetwo", "--link", "one:one", "busybox", "top") + id := strings.TrimSpace(string(out)) + + realIP := inspectField(c, "one", "NetworkSettings.Networks.bridge.IPAddress") + content := readContainerFileWithExec(c, id, "/etc/hosts") + + getIP := func(hosts []byte, hostname string) string { + re := regexp.MustCompile(fmt.Sprintf(`(\S*)\t%s`, regexp.QuoteMeta(hostname))) + matches := re.FindSubmatch(hosts) + c.Assert(matches, checker.NotNil, check.Commentf("Hostname %s have no matches in hosts", hostname)) + return string(matches[1]) + } + ip := getIP(content, "one") + c.Assert(ip, checker.Equals, realIP) + + ip = getIP(content, "onetwo") + c.Assert(ip, checker.Equals, realIP) + + dockerCmd(c, "restart", "one") + realIP = inspectField(c, "one", "NetworkSettings.Networks.bridge.IPAddress") + + content = readContainerFileWithExec(c, id, "/etc/hosts") + ip = getIP(content, "one") + c.Assert(ip, checker.Equals, realIP) + + ip = getIP(content, "onetwo") + c.Assert(ip, checker.Equals, realIP) +} + +func (s *DockerSuite) TestLinksEnvs(c *check.C) { + testRequires(c, DaemonIsLinux) + dockerCmd(c, "run", "-d", "-e", "e1=", "-e", "e2=v2", "-e", "e3=v3=v3", "--name=first", "busybox", "top") + out, _ := dockerCmd(c, "run", "--name=second", "--link=first:first", "busybox", "env") + c.Assert(out, checker.Contains, "FIRST_ENV_e1=\n") + c.Assert(out, checker.Contains, "FIRST_ENV_e2=v2") + c.Assert(out, checker.Contains, "FIRST_ENV_e3=v3=v3") +} + +func (s *DockerSuite) TestLinkShortDefinition(c *check.C) { + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "-d", "--name", "shortlinkdef", "busybox", "top") + + cid := strings.TrimSpace(out) + c.Assert(waitRun(cid), checker.IsNil) + + out, _ = dockerCmd(c, "run", "-d", "--name", "link2", "--link", "shortlinkdef", "busybox", "top") + + cid2 := strings.TrimSpace(out) + c.Assert(waitRun(cid2), checker.IsNil) + + links := inspectFieldJSON(c, cid2, "HostConfig.Links") + c.Assert(links, checker.Equals, "[\"/shortlinkdef:/link2/shortlinkdef\"]") +} + +func (s *DockerSuite) TestLinksNetworkHostContainer(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace) + dockerCmd(c, "run", "-d", "--net", "host", "--name", "host_container", "busybox", "top") + out, _, err := dockerCmdWithError("run", "--name", "should_fail", "--link", "host_container:tester", "busybox", "true") + + // Running container linking to a container with --net host should have failed + c.Assert(err, checker.NotNil, check.Commentf("out: %s", out)) + // Running container linking to a container with --net host should have failed + c.Assert(out, checker.Contains, runconfig.ErrConflictHostNetworkAndLinks.Error()) +} + +func (s *DockerSuite) TestLinksEtcHostsRegularFile(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace) + out, _ := dockerCmd(c, "run", "--net=host", "busybox", "ls", "-la", "/etc/hosts") + // /etc/hosts should be a regular file + c.Assert(out, checker.Matches, "^-.+\n") +} + +func (s *DockerSuite) TestLinksMultipleWithSameName(c *check.C) { + testRequires(c, DaemonIsLinux) + dockerCmd(c, "run", "-d", "--name=upstream-a", "busybox", "top") + dockerCmd(c, "run", "-d", "--name=upstream-b", "busybox", "top") + dockerCmd(c, "run", "--link", "upstream-a:upstream", "--link", "upstream-b:upstream", "busybox", "sh", "-c", "ping -c 1 upstream") +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_links_unix_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_links_unix_test.go new file mode 100644 index 000000000..dbff2911a --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_links_unix_test.go @@ -0,0 +1,26 @@ +// +build !windows + +package main + +import ( + "io/ioutil" + "os" + + "github.com/docker/docker/integration-cli/checker" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestLinksEtcHostsContentMatch(c *check.C) { + // In a _unix file as using Unix specific files, and must be on the + // same host as the daemon. + testRequires(c, SameHostDaemon, NotUserNamespace) + + out, _ := dockerCmd(c, "run", "--net=host", "busybox", "cat", "/etc/hosts") + hosts, err := ioutil.ReadFile("/etc/hosts") + if os.IsNotExist(err) { + c.Skip("/etc/hosts does not exist, skip this test") + } + + c.Assert(out, checker.Equals, string(hosts), check.Commentf("container: %s\n\nhost:%s", out, hosts)) + +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_login_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_login_test.go new file mode 100644 index 000000000..cb261bed8 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_login_test.go @@ -0,0 +1,30 @@ +package main + +import ( + "bytes" + "os/exec" + + "github.com/docker/docker/integration-cli/checker" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestLoginWithoutTTY(c *check.C) { + cmd := exec.Command(dockerBinary, "login") + + // Send to stdin so the process does not get the TTY + cmd.Stdin = bytes.NewBufferString("buffer test string \n") + + // run the command and block until it's done + err := cmd.Run() + c.Assert(err, checker.NotNil) //"Expected non nil err when logging in & TTY not available" +} + +func (s *DockerRegistryAuthHtpasswdSuite) TestLoginToPrivateRegistry(c *check.C) { + // wrong credentials + out, _, err := dockerCmdWithError("login", "-u", s.reg.Username(), "-p", "WRONGPASSWORD", privateRegistryURL) + c.Assert(err, checker.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "401 Unauthorized") + + // now it's fine + dockerCmd(c, "login", "-u", s.reg.Username(), "-p", s.reg.Password(), privateRegistryURL) +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_logout_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_logout_test.go new file mode 100644 index 000000000..5076ceba0 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_logout_test.go @@ -0,0 +1,108 @@ +package main + +import ( + "bytes" + "fmt" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + + "github.com/docker/docker/integration-cli/checker" + "github.com/go-check/check" +) + +func (s *DockerRegistryAuthHtpasswdSuite) TestLogoutWithExternalAuth(c *check.C) { + + // @TODO TestLogoutWithExternalAuth expects docker to fall back to a v1 registry, so has to be updated for v17.12, when v1 registries are no longer supported + s.d.StartWithBusybox(c, "--disable-legacy-registry=false") + + osPath := os.Getenv("PATH") + defer os.Setenv("PATH", osPath) + + workingDir, err := os.Getwd() + c.Assert(err, checker.IsNil) + absolute, err := filepath.Abs(filepath.Join(workingDir, "fixtures", "auth")) + c.Assert(err, checker.IsNil) + testPath := fmt.Sprintf("%s%c%s", osPath, filepath.ListSeparator, absolute) + + os.Setenv("PATH", testPath) + + repoName := fmt.Sprintf("%v/dockercli/busybox:authtest", privateRegistryURL) + + tmp, err := ioutil.TempDir("", "integration-cli-") + c.Assert(err, checker.IsNil) + defer os.RemoveAll(tmp) + + externalAuthConfig := `{ "credsStore": "shell-test" }` + + configPath := filepath.Join(tmp, "config.json") + err = ioutil.WriteFile(configPath, []byte(externalAuthConfig), 0644) + c.Assert(err, checker.IsNil) + + _, err = s.d.Cmd("--config", tmp, "login", "-u", s.reg.Username(), "-p", s.reg.Password(), privateRegistryURL) + c.Assert(err, checker.IsNil) + + b, err := ioutil.ReadFile(configPath) + c.Assert(err, checker.IsNil) + c.Assert(string(b), checker.Not(checker.Contains), "\"auth\":") + c.Assert(string(b), checker.Contains, privateRegistryURL) + + _, err = s.d.Cmd("--config", tmp, "tag", "busybox", repoName) + c.Assert(err, checker.IsNil) + _, err = s.d.Cmd("--config", tmp, "push", repoName) + c.Assert(err, checker.IsNil) + _, err = s.d.Cmd("--config", tmp, "logout", privateRegistryURL) + c.Assert(err, checker.IsNil) + + b, err = ioutil.ReadFile(configPath) + c.Assert(err, checker.IsNil) + c.Assert(string(b), checker.Not(checker.Contains), privateRegistryURL) + + // check I cannot pull anymore + out, err := s.d.Cmd("--config", tmp, "pull", repoName) + c.Assert(err, check.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "Error: image dockercli/busybox:authtest not found") +} + +// #23100 +func (s *DockerRegistryAuthHtpasswdSuite) TestLogoutWithWrongHostnamesStored(c *check.C) { + osPath := os.Getenv("PATH") + defer os.Setenv("PATH", osPath) + + workingDir, err := os.Getwd() + c.Assert(err, checker.IsNil) + absolute, err := filepath.Abs(filepath.Join(workingDir, "fixtures", "auth")) + c.Assert(err, checker.IsNil) + testPath := fmt.Sprintf("%s%c%s", osPath, filepath.ListSeparator, absolute) + + os.Setenv("PATH", testPath) + + cmd := exec.Command("docker-credential-shell-test", "store") + stdin := bytes.NewReader([]byte(fmt.Sprintf(`{"ServerURL": "https://%s", "Username": "%s", "Secret": "%s"}`, privateRegistryURL, s.reg.Username(), s.reg.Password()))) + cmd.Stdin = stdin + c.Assert(cmd.Run(), checker.IsNil) + + tmp, err := ioutil.TempDir("", "integration-cli-") + c.Assert(err, checker.IsNil) + + externalAuthConfig := fmt.Sprintf(`{ "auths": {"https://%s": {}}, "credsStore": "shell-test" }`, privateRegistryURL) + + configPath := filepath.Join(tmp, "config.json") + err = ioutil.WriteFile(configPath, []byte(externalAuthConfig), 0644) + c.Assert(err, checker.IsNil) + + dockerCmd(c, "--config", tmp, "login", "-u", s.reg.Username(), "-p", s.reg.Password(), privateRegistryURL) + + b, err := ioutil.ReadFile(configPath) + c.Assert(err, checker.IsNil) + c.Assert(string(b), checker.Contains, fmt.Sprintf("\"https://%s\": {}", privateRegistryURL)) + c.Assert(string(b), checker.Contains, fmt.Sprintf("\"%s\": {}", privateRegistryURL)) + + dockerCmd(c, "--config", tmp, "logout", privateRegistryURL) + + b, err = ioutil.ReadFile(configPath) + c.Assert(err, checker.IsNil) + c.Assert(string(b), checker.Not(checker.Contains), fmt.Sprintf("\"https://%s\": {}", privateRegistryURL)) + c.Assert(string(b), checker.Not(checker.Contains), fmt.Sprintf("\"%s\": {}", privateRegistryURL)) +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_logs_bench_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_logs_bench_test.go new file mode 100644 index 000000000..eeb008de7 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_logs_bench_test.go @@ -0,0 +1,32 @@ +package main + +import ( + "fmt" + "strings" + "time" + + "github.com/go-check/check" +) + +func (s *DockerSuite) BenchmarkLogsCLIRotateFollow(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "--log-opt", "max-size=1b", "--log-opt", "max-file=10", "busybox", "sh", "-c", "while true; do usleep 50000; echo hello; done") + id := strings.TrimSpace(out) + ch := make(chan error, 1) + go func() { + ch <- nil + out, _, _ := dockerCmdWithError("logs", "-f", id) + // if this returns at all, it's an error + ch <- fmt.Errorf(out) + }() + + <-ch + select { + case <-time.After(30 * time.Second): + // ran for 30 seconds with no problem + return + case err := <-ch: + if err != nil { + c.Fatal(err) + } + } +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_logs_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_logs_test.go new file mode 100644 index 000000000..a8b8f9054 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_logs_test.go @@ -0,0 +1,307 @@ +package main + +import ( + "fmt" + "io" + "os/exec" + "regexp" + "strings" + "time" + + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/cli" + "github.com/docker/docker/pkg/jsonlog" + "github.com/docker/docker/pkg/testutil" + icmd "github.com/docker/docker/pkg/testutil/cmd" + "github.com/go-check/check" +) + +// This used to work, it test a log of PageSize-1 (gh#4851) +func (s *DockerSuite) TestLogsContainerSmallerThanPage(c *check.C) { + testLogsContainerPagination(c, 32767) +} + +// Regression test: When going over the PageSize, it used to panic (gh#4851) +func (s *DockerSuite) TestLogsContainerBiggerThanPage(c *check.C) { + testLogsContainerPagination(c, 32768) +} + +// Regression test: When going much over the PageSize, it used to block (gh#4851) +func (s *DockerSuite) TestLogsContainerMuchBiggerThanPage(c *check.C) { + testLogsContainerPagination(c, 33000) +} + +func testLogsContainerPagination(c *check.C, testLen int) { + out, _ := dockerCmd(c, "run", "-d", "busybox", "sh", "-c", fmt.Sprintf("for i in $(seq 1 %d); do echo -n = >> a.a; done; echo >> a.a; cat a.a", testLen)) + id := strings.TrimSpace(out) + dockerCmd(c, "wait", id) + out, _ = dockerCmd(c, "logs", id) + c.Assert(out, checker.HasLen, testLen+1) +} + +func (s *DockerSuite) TestLogsTimestamps(c *check.C) { + testLen := 100 + out, _ := dockerCmd(c, "run", "-d", "busybox", "sh", "-c", fmt.Sprintf("for i in $(seq 1 %d); do echo = >> a.a; done; cat a.a", testLen)) + + id := strings.TrimSpace(out) + dockerCmd(c, "wait", id) + + out, _ = dockerCmd(c, "logs", "-t", id) + + lines := strings.Split(out, "\n") + + c.Assert(lines, checker.HasLen, testLen+1) + + ts := regexp.MustCompile(`^.* `) + + for _, l := range lines { + if l != "" { + _, err := time.Parse(jsonlog.RFC3339NanoFixed+" ", ts.FindString(l)) + c.Assert(err, checker.IsNil, check.Commentf("Failed to parse timestamp from %v", l)) + // ensure we have padded 0's + c.Assert(l[29], checker.Equals, uint8('Z')) + } + } +} + +func (s *DockerSuite) TestLogsSeparateStderr(c *check.C) { + msg := "stderr_log" + out := cli.DockerCmd(c, "run", "-d", "busybox", "sh", "-c", fmt.Sprintf("echo %s 1>&2", msg)).Combined() + id := strings.TrimSpace(out) + cli.DockerCmd(c, "wait", id) + cli.DockerCmd(c, "logs", id).Assert(c, icmd.Expected{ + Out: "", + Err: msg, + }) +} + +func (s *DockerSuite) TestLogsStderrInStdout(c *check.C) { + // TODO Windows: Needs investigation why this fails. Obtained string includes + // a bunch of ANSI escape sequences before the "stderr_log" message. + testRequires(c, DaemonIsLinux) + msg := "stderr_log" + out := cli.DockerCmd(c, "run", "-d", "-t", "busybox", "sh", "-c", fmt.Sprintf("echo %s 1>&2", msg)).Combined() + id := strings.TrimSpace(out) + cli.DockerCmd(c, "wait", id) + + cli.DockerCmd(c, "logs", id).Assert(c, icmd.Expected{ + Out: msg, + Err: "", + }) +} + +func (s *DockerSuite) TestLogsTail(c *check.C) { + testLen := 100 + out := cli.DockerCmd(c, "run", "-d", "busybox", "sh", "-c", fmt.Sprintf("for i in $(seq 1 %d); do echo =; done;", testLen)).Combined() + + id := strings.TrimSpace(out) + cli.DockerCmd(c, "wait", id) + + out = cli.DockerCmd(c, "logs", "--tail", "0", id).Combined() + lines := strings.Split(out, "\n") + c.Assert(lines, checker.HasLen, 1) + + out = cli.DockerCmd(c, "logs", "--tail", "5", id).Combined() + lines = strings.Split(out, "\n") + c.Assert(lines, checker.HasLen, 6) + + out = cli.DockerCmd(c, "logs", "--tail", "99", id).Combined() + lines = strings.Split(out, "\n") + c.Assert(lines, checker.HasLen, 100) + + out = cli.DockerCmd(c, "logs", "--tail", "all", id).Combined() + lines = strings.Split(out, "\n") + c.Assert(lines, checker.HasLen, testLen+1) + + out = cli.DockerCmd(c, "logs", "--tail", "-1", id).Combined() + lines = strings.Split(out, "\n") + c.Assert(lines, checker.HasLen, testLen+1) + + out = cli.DockerCmd(c, "logs", "--tail", "random", id).Combined() + lines = strings.Split(out, "\n") + c.Assert(lines, checker.HasLen, testLen+1) +} + +func (s *DockerSuite) TestLogsFollowStopped(c *check.C) { + dockerCmd(c, "run", "--name=test", "busybox", "echo", "hello") + id := getIDByName(c, "test") + + logsCmd := exec.Command(dockerBinary, "logs", "-f", id) + c.Assert(logsCmd.Start(), checker.IsNil) + + errChan := make(chan error) + go func() { + errChan <- logsCmd.Wait() + close(errChan) + }() + + select { + case err := <-errChan: + c.Assert(err, checker.IsNil) + case <-time.After(30 * time.Second): + c.Fatal("Following logs is hanged") + } +} + +func (s *DockerSuite) TestLogsSince(c *check.C) { + name := "testlogssince" + dockerCmd(c, "run", "--name="+name, "busybox", "/bin/sh", "-c", "for i in $(seq 1 3); do sleep 2; echo log$i; done") + out, _ := dockerCmd(c, "logs", "-t", name) + + log2Line := strings.Split(strings.Split(out, "\n")[1], " ") + t, err := time.Parse(time.RFC3339Nano, log2Line[0]) // the timestamp log2 is written + c.Assert(err, checker.IsNil) + since := t.Unix() + 1 // add 1s so log1 & log2 doesn't show up + out, _ = dockerCmd(c, "logs", "-t", fmt.Sprintf("--since=%v", since), name) + + // Skip 2 seconds + unexpected := []string{"log1", "log2"} + for _, v := range unexpected { + c.Assert(out, checker.Not(checker.Contains), v, check.Commentf("unexpected log message returned, since=%v", since)) + } + + // Test to make sure a bad since format is caught by the client + out, _, _ = dockerCmdWithError("logs", "-t", "--since=2006-01-02T15:04:0Z", name) + c.Assert(out, checker.Contains, "cannot parse \"0Z\" as \"05\"", check.Commentf("bad since format passed to server")) + + // Test with default value specified and parameter omitted + expected := []string{"log1", "log2", "log3"} + for _, cmd := range [][]string{ + {"logs", "-t", name}, + {"logs", "-t", "--since=0", name}, + } { + result := icmd.RunCommand(dockerBinary, cmd...) + result.Assert(c, icmd.Success) + for _, v := range expected { + c.Assert(result.Combined(), checker.Contains, v) + } + } +} + +func (s *DockerSuite) TestLogsSinceFutureFollow(c *check.C) { + // TODO Windows TP5 - Figure out why this test is so flakey. Disabled for now. + testRequires(c, DaemonIsLinux) + name := "testlogssincefuturefollow" + out, _ := dockerCmd(c, "run", "-d", "--name", name, "busybox", "/bin/sh", "-c", `for i in $(seq 1 5); do echo log$i; sleep 1; done`) + + // Extract one timestamp from the log file to give us a starting point for + // our `--since` argument. Because the log producer runs in the background, + // we need to check repeatedly for some output to be produced. + var timestamp string + for i := 0; i != 100 && timestamp == ""; i++ { + if out, _ = dockerCmd(c, "logs", "-t", name); out == "" { + time.Sleep(time.Millisecond * 100) // Retry + } else { + timestamp = strings.Split(strings.Split(out, "\n")[0], " ")[0] + } + } + + c.Assert(timestamp, checker.Not(checker.Equals), "") + t, err := time.Parse(time.RFC3339Nano, timestamp) + c.Assert(err, check.IsNil) + + since := t.Unix() + 2 + out, _ = dockerCmd(c, "logs", "-t", "-f", fmt.Sprintf("--since=%v", since), name) + c.Assert(out, checker.Not(checker.HasLen), 0, check.Commentf("cannot read from empty log")) + lines := strings.Split(strings.TrimSpace(out), "\n") + for _, v := range lines { + ts, err := time.Parse(time.RFC3339Nano, strings.Split(v, " ")[0]) + c.Assert(err, checker.IsNil, check.Commentf("cannot parse timestamp output from log: '%v'", v)) + c.Assert(ts.Unix() >= since, checker.Equals, true, check.Commentf("earlier log found. since=%v logdate=%v", since, ts)) + } +} + +// Regression test for #8832 +func (s *DockerSuite) TestLogsFollowSlowStdoutConsumer(c *check.C) { + // TODO Windows: Fix this test for TP5. + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", `usleep 600000;yes X | head -c 200000`) + + id := strings.TrimSpace(out) + + stopSlowRead := make(chan bool) + + go func() { + exec.Command(dockerBinary, "wait", id).Run() + stopSlowRead <- true + }() + + logCmd := exec.Command(dockerBinary, "logs", "-f", id) + stdout, err := logCmd.StdoutPipe() + c.Assert(err, checker.IsNil) + c.Assert(logCmd.Start(), checker.IsNil) + + // First read slowly + bytes1, err := testutil.ConsumeWithSpeed(stdout, 10, 50*time.Millisecond, stopSlowRead) + c.Assert(err, checker.IsNil) + + // After the container has finished we can continue reading fast + bytes2, err := testutil.ConsumeWithSpeed(stdout, 32*1024, 0, nil) + c.Assert(err, checker.IsNil) + + actual := bytes1 + bytes2 + expected := 200000 + c.Assert(actual, checker.Equals, expected) +} + +func (s *DockerSuite) TestLogsFollowGoroutinesWithStdout(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "while true; do echo hello; sleep 2; done") + id := strings.TrimSpace(out) + c.Assert(waitRun(id), checker.IsNil) + + nroutines, err := getGoroutineNumber() + c.Assert(err, checker.IsNil) + cmd := exec.Command(dockerBinary, "logs", "-f", id) + r, w := io.Pipe() + cmd.Stdout = w + c.Assert(cmd.Start(), checker.IsNil) + + // Make sure pipe is written to + chErr := make(chan error) + go func() { + b := make([]byte, 1) + _, err := r.Read(b) + chErr <- err + }() + c.Assert(<-chErr, checker.IsNil) + c.Assert(cmd.Process.Kill(), checker.IsNil) + r.Close() + // NGoroutines is not updated right away, so we need to wait before failing + c.Assert(waitForGoroutines(nroutines), checker.IsNil) +} + +func (s *DockerSuite) TestLogsFollowGoroutinesNoOutput(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "while true; do sleep 2; done") + id := strings.TrimSpace(out) + c.Assert(waitRun(id), checker.IsNil) + + nroutines, err := getGoroutineNumber() + c.Assert(err, checker.IsNil) + cmd := exec.Command(dockerBinary, "logs", "-f", id) + c.Assert(cmd.Start(), checker.IsNil) + time.Sleep(200 * time.Millisecond) + c.Assert(cmd.Process.Kill(), checker.IsNil) + + // NGoroutines is not updated right away, so we need to wait before failing + c.Assert(waitForGoroutines(nroutines), checker.IsNil) +} + +func (s *DockerSuite) TestLogsCLIContainerNotFound(c *check.C) { + name := "testlogsnocontainer" + out, _, _ := dockerCmdWithError("logs", name) + message := fmt.Sprintf("No such container: %s\n", name) + c.Assert(out, checker.Contains, message) +} + +func (s *DockerSuite) TestLogsWithDetails(c *check.C) { + dockerCmd(c, "run", "--name=test", "--label", "foo=bar", "-e", "baz=qux", "--log-opt", "labels=foo", "--log-opt", "env=baz", "busybox", "echo", "hello") + out, _ := dockerCmd(c, "logs", "--details", "--timestamps", "test") + + logFields := strings.Fields(strings.TrimSpace(out)) + c.Assert(len(logFields), checker.Equals, 3, check.Commentf(out)) + + details := strings.Split(logFields[1], ",") + c.Assert(details, checker.HasLen, 2) + c.Assert(details[0], checker.Equals, "baz=qux") + c.Assert(details[1], checker.Equals, "foo=bar") +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_nat_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_nat_test.go new file mode 100644 index 000000000..bb6eca13b --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_nat_test.go @@ -0,0 +1,88 @@ +package main + +import ( + "fmt" + "io/ioutil" + "net" + "strings" + + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/cli" + "github.com/go-check/check" +) + +func startServerContainer(c *check.C, msg string, port int) string { + name := "server" + cmd := []string{ + "run", + "--name", + name, + "-d", + "-p", fmt.Sprintf("%d:%d", port, port), + "busybox", + "sh", "-c", fmt.Sprintf("echo %q | nc -lp %d", msg, port), + } + cli.DockerCmd(c, cmd...) + cli.WaitRun(c, name) + return name +} + +func getExternalAddress(c *check.C) net.IP { + iface, err := net.InterfaceByName("eth0") + if err != nil { + c.Skip(fmt.Sprintf("Test not running with `make test`. Interface eth0 not found: %v", err)) + } + + ifaceAddrs, err := iface.Addrs() + c.Assert(err, check.IsNil) + c.Assert(ifaceAddrs, checker.Not(checker.HasLen), 0) + + ifaceIP, _, err := net.ParseCIDR(ifaceAddrs[0].String()) + c.Assert(err, check.IsNil) + + return ifaceIP +} + +func (s *DockerSuite) TestNetworkNat(c *check.C) { + testRequires(c, DaemonIsLinux, SameHostDaemon) + msg := "it works" + startServerContainer(c, msg, 8080) + endpoint := getExternalAddress(c) + conn, err := net.Dial("tcp", fmt.Sprintf("%s:%d", endpoint.String(), 8080)) + c.Assert(err, check.IsNil) + + data, err := ioutil.ReadAll(conn) + conn.Close() + c.Assert(err, check.IsNil) + + final := strings.TrimRight(string(data), "\n") + c.Assert(final, checker.Equals, msg) +} + +func (s *DockerSuite) TestNetworkLocalhostTCPNat(c *check.C) { + testRequires(c, DaemonIsLinux, SameHostDaemon) + var ( + msg = "hi yall" + ) + startServerContainer(c, msg, 8081) + conn, err := net.Dial("tcp", "localhost:8081") + c.Assert(err, check.IsNil) + + data, err := ioutil.ReadAll(conn) + conn.Close() + c.Assert(err, check.IsNil) + + final := strings.TrimRight(string(data), "\n") + c.Assert(final, checker.Equals, msg) +} + +func (s *DockerSuite) TestNetworkLoopbackNat(c *check.C) { + testRequires(c, DaemonIsLinux, SameHostDaemon, NotUserNamespace) + msg := "it works" + startServerContainer(c, msg, 8080) + endpoint := getExternalAddress(c) + out, _ := dockerCmd(c, "run", "-t", "--net=container:server", "busybox", + "sh", "-c", fmt.Sprintf("stty raw && nc -w 5 %s 8080", endpoint.String())) + final := strings.TrimRight(string(out), "\n") + c.Assert(final, checker.Equals, msg) +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_netmode_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_netmode_test.go new file mode 100644 index 000000000..deb8f6916 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_netmode_test.go @@ -0,0 +1,94 @@ +package main + +import ( + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/runconfig" + "github.com/go-check/check" +) + +// GH14530. Validates combinations of --net= with other options + +// stringCheckPS is how the output of PS starts in order to validate that +// the command executed in a container did really run PS correctly. +const stringCheckPS = "PID USER" + +// DockerCmdWithFail executes a docker command that is supposed to fail and returns +// the output, the exit code. If the command returns a Nil error, it will fail and +// stop the tests. +func dockerCmdWithFail(c *check.C, args ...string) (string, int) { + out, status, err := dockerCmdWithError(args...) + c.Assert(err, check.NotNil, check.Commentf("%v", out)) + return out, status +} + +func (s *DockerSuite) TestNetHostnameWithNetHost(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace) + + out, _ := dockerCmd(c, "run", "--net=host", "busybox", "ps") + c.Assert(out, checker.Contains, stringCheckPS) +} + +func (s *DockerSuite) TestNetHostname(c *check.C) { + testRequires(c, DaemonIsLinux) + + out, _ := dockerCmd(c, "run", "-h=name", "busybox", "ps") + c.Assert(out, checker.Contains, stringCheckPS) + + out, _ = dockerCmd(c, "run", "-h=name", "--net=bridge", "busybox", "ps") + c.Assert(out, checker.Contains, stringCheckPS) + + out, _ = dockerCmd(c, "run", "-h=name", "--net=none", "busybox", "ps") + c.Assert(out, checker.Contains, stringCheckPS) + + out, _ = dockerCmdWithFail(c, "run", "-h=name", "--net=container:other", "busybox", "ps") + c.Assert(out, checker.Contains, runconfig.ErrConflictNetworkHostname.Error()) + + out, _ = dockerCmdWithFail(c, "run", "--net=container", "busybox", "ps") + c.Assert(out, checker.Contains, "Invalid network mode: invalid container format container:") + + out, _ = dockerCmdWithFail(c, "run", "--net=weird", "busybox", "ps") + c.Assert(out, checker.Contains, "network weird not found") +} + +func (s *DockerSuite) TestConflictContainerNetworkAndLinks(c *check.C) { + testRequires(c, DaemonIsLinux) + + out, _ := dockerCmdWithFail(c, "run", "--net=container:other", "--link=zip:zap", "busybox", "ps") + c.Assert(out, checker.Contains, runconfig.ErrConflictContainerNetworkAndLinks.Error()) +} + +func (s *DockerSuite) TestConflictContainerNetworkHostAndLinks(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace) + + out, _ := dockerCmdWithFail(c, "run", "--net=host", "--link=zip:zap", "busybox", "ps") + c.Assert(out, checker.Contains, runconfig.ErrConflictHostNetworkAndLinks.Error()) +} + +func (s *DockerSuite) TestConflictNetworkModeNetHostAndOptions(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace) + + out, _ := dockerCmdWithFail(c, "run", "--net=host", "--mac-address=92:d0:c6:0a:29:33", "busybox", "ps") + c.Assert(out, checker.Contains, runconfig.ErrConflictContainerNetworkAndMac.Error()) +} + +func (s *DockerSuite) TestConflictNetworkModeAndOptions(c *check.C) { + testRequires(c, DaemonIsLinux) + + out, _ := dockerCmdWithFail(c, "run", "--net=container:other", "--dns=8.8.8.8", "busybox", "ps") + c.Assert(out, checker.Contains, runconfig.ErrConflictNetworkAndDNS.Error()) + + out, _ = dockerCmdWithFail(c, "run", "--net=container:other", "--add-host=name:8.8.8.8", "busybox", "ps") + c.Assert(out, checker.Contains, runconfig.ErrConflictNetworkHosts.Error()) + + out, _ = dockerCmdWithFail(c, "run", "--net=container:other", "--mac-address=92:d0:c6:0a:29:33", "busybox", "ps") + c.Assert(out, checker.Contains, runconfig.ErrConflictContainerNetworkAndMac.Error()) + + out, _ = dockerCmdWithFail(c, "run", "--net=container:other", "-P", "busybox", "ps") + c.Assert(out, checker.Contains, runconfig.ErrConflictNetworkPublishPorts.Error()) + + out, _ = dockerCmdWithFail(c, "run", "--net=container:other", "-p", "8080", "busybox", "ps") + c.Assert(out, checker.Contains, runconfig.ErrConflictNetworkPublishPorts.Error()) + + out, _ = dockerCmdWithFail(c, "run", "--net=container:other", "--expose", "8000-9000", "busybox", "ps") + c.Assert(out, checker.Contains, runconfig.ErrConflictNetworkExposePorts.Error()) +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_network_unix_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_network_unix_test.go new file mode 100644 index 000000000..05cc078bc --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_network_unix_test.go @@ -0,0 +1,1843 @@ +// +build !windows + +package main + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net" + "net/http" + "net/http/httptest" + "os" + "path/filepath" + "strings" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/versions/v1p20" + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/cli" + "github.com/docker/docker/integration-cli/daemon" + "github.com/docker/docker/pkg/stringid" + icmd "github.com/docker/docker/pkg/testutil/cmd" + "github.com/docker/docker/runconfig" + "github.com/docker/libnetwork/driverapi" + remoteapi "github.com/docker/libnetwork/drivers/remote/api" + "github.com/docker/libnetwork/ipamapi" + remoteipam "github.com/docker/libnetwork/ipams/remote/api" + "github.com/docker/libnetwork/netlabel" + "github.com/go-check/check" + "github.com/vishvananda/netlink" + "golang.org/x/sys/unix" +) + +const dummyNetworkDriver = "dummy-network-driver" +const dummyIPAMDriver = "dummy-ipam-driver" + +var remoteDriverNetworkRequest remoteapi.CreateNetworkRequest + +func init() { + check.Suite(&DockerNetworkSuite{ + ds: &DockerSuite{}, + }) +} + +type DockerNetworkSuite struct { + server *httptest.Server + ds *DockerSuite + d *daemon.Daemon +} + +func (s *DockerNetworkSuite) SetUpTest(c *check.C) { + s.d = daemon.New(c, dockerBinary, dockerdBinary, daemon.Config{ + Experimental: testEnv.ExperimentalDaemon(), + }) +} + +func (s *DockerNetworkSuite) TearDownTest(c *check.C) { + if s.d != nil { + s.d.Stop(c) + s.ds.TearDownTest(c) + } +} + +func (s *DockerNetworkSuite) SetUpSuite(c *check.C) { + mux := http.NewServeMux() + s.server = httptest.NewServer(mux) + c.Assert(s.server, check.NotNil, check.Commentf("Failed to start an HTTP Server")) + setupRemoteNetworkDrivers(c, mux, s.server.URL, dummyNetworkDriver, dummyIPAMDriver) +} + +func setupRemoteNetworkDrivers(c *check.C, mux *http.ServeMux, url, netDrv, ipamDrv string) { + + mux.HandleFunc("/Plugin.Activate", func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + fmt.Fprintf(w, `{"Implements": ["%s", "%s"]}`, driverapi.NetworkPluginEndpointType, ipamapi.PluginEndpointType) + }) + + // Network driver implementation + mux.HandleFunc(fmt.Sprintf("/%s.GetCapabilities", driverapi.NetworkPluginEndpointType), func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + fmt.Fprintf(w, `{"Scope":"local"}`) + }) + + mux.HandleFunc(fmt.Sprintf("/%s.CreateNetwork", driverapi.NetworkPluginEndpointType), func(w http.ResponseWriter, r *http.Request) { + err := json.NewDecoder(r.Body).Decode(&remoteDriverNetworkRequest) + if err != nil { + http.Error(w, "Unable to decode JSON payload: "+err.Error(), http.StatusBadRequest) + return + } + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + fmt.Fprintf(w, "null") + }) + + mux.HandleFunc(fmt.Sprintf("/%s.DeleteNetwork", driverapi.NetworkPluginEndpointType), func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + fmt.Fprintf(w, "null") + }) + + mux.HandleFunc(fmt.Sprintf("/%s.CreateEndpoint", driverapi.NetworkPluginEndpointType), func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + fmt.Fprintf(w, `{"Interface":{"MacAddress":"a0:b1:c2:d3:e4:f5"}}`) + }) + + mux.HandleFunc(fmt.Sprintf("/%s.Join", driverapi.NetworkPluginEndpointType), func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + + veth := &netlink.Veth{ + LinkAttrs: netlink.LinkAttrs{Name: "randomIfName", TxQLen: 0}, PeerName: "cnt0"} + if err := netlink.LinkAdd(veth); err != nil { + fmt.Fprintf(w, `{"Error":"failed to add veth pair: `+err.Error()+`"}`) + } else { + fmt.Fprintf(w, `{"InterfaceName":{ "SrcName":"cnt0", "DstPrefix":"veth"}}`) + } + }) + + mux.HandleFunc(fmt.Sprintf("/%s.Leave", driverapi.NetworkPluginEndpointType), func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + fmt.Fprintf(w, "null") + }) + + mux.HandleFunc(fmt.Sprintf("/%s.DeleteEndpoint", driverapi.NetworkPluginEndpointType), func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + if link, err := netlink.LinkByName("cnt0"); err == nil { + netlink.LinkDel(link) + } + fmt.Fprintf(w, "null") + }) + + // IPAM Driver implementation + var ( + poolRequest remoteipam.RequestPoolRequest + poolReleaseReq remoteipam.ReleasePoolRequest + addressRequest remoteipam.RequestAddressRequest + addressReleaseReq remoteipam.ReleaseAddressRequest + lAS = "localAS" + gAS = "globalAS" + pool = "172.28.0.0/16" + poolID = lAS + "/" + pool + gw = "172.28.255.254/16" + ) + + mux.HandleFunc(fmt.Sprintf("/%s.GetDefaultAddressSpaces", ipamapi.PluginEndpointType), func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + fmt.Fprintf(w, `{"LocalDefaultAddressSpace":"`+lAS+`", "GlobalDefaultAddressSpace": "`+gAS+`"}`) + }) + + mux.HandleFunc(fmt.Sprintf("/%s.RequestPool", ipamapi.PluginEndpointType), func(w http.ResponseWriter, r *http.Request) { + err := json.NewDecoder(r.Body).Decode(&poolRequest) + if err != nil { + http.Error(w, "Unable to decode JSON payload: "+err.Error(), http.StatusBadRequest) + return + } + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + if poolRequest.AddressSpace != lAS && poolRequest.AddressSpace != gAS { + fmt.Fprintf(w, `{"Error":"Unknown address space in pool request: `+poolRequest.AddressSpace+`"}`) + } else if poolRequest.Pool != "" && poolRequest.Pool != pool { + fmt.Fprintf(w, `{"Error":"Cannot handle explicit pool requests yet"}`) + } else { + fmt.Fprintf(w, `{"PoolID":"`+poolID+`", "Pool":"`+pool+`"}`) + } + }) + + mux.HandleFunc(fmt.Sprintf("/%s.RequestAddress", ipamapi.PluginEndpointType), func(w http.ResponseWriter, r *http.Request) { + err := json.NewDecoder(r.Body).Decode(&addressRequest) + if err != nil { + http.Error(w, "Unable to decode JSON payload: "+err.Error(), http.StatusBadRequest) + return + } + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + // make sure libnetwork is now querying on the expected pool id + if addressRequest.PoolID != poolID { + fmt.Fprintf(w, `{"Error":"unknown pool id"}`) + } else if addressRequest.Address != "" { + fmt.Fprintf(w, `{"Error":"Cannot handle explicit address requests yet"}`) + } else { + fmt.Fprintf(w, `{"Address":"`+gw+`"}`) + } + }) + + mux.HandleFunc(fmt.Sprintf("/%s.ReleaseAddress", ipamapi.PluginEndpointType), func(w http.ResponseWriter, r *http.Request) { + err := json.NewDecoder(r.Body).Decode(&addressReleaseReq) + if err != nil { + http.Error(w, "Unable to decode JSON payload: "+err.Error(), http.StatusBadRequest) + return + } + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + // make sure libnetwork is now asking to release the expected address from the expected poolid + if addressRequest.PoolID != poolID { + fmt.Fprintf(w, `{"Error":"unknown pool id"}`) + } else if addressReleaseReq.Address != gw { + fmt.Fprintf(w, `{"Error":"unknown address"}`) + } else { + fmt.Fprintf(w, "null") + } + }) + + mux.HandleFunc(fmt.Sprintf("/%s.ReleasePool", ipamapi.PluginEndpointType), func(w http.ResponseWriter, r *http.Request) { + err := json.NewDecoder(r.Body).Decode(&poolReleaseReq) + if err != nil { + http.Error(w, "Unable to decode JSON payload: "+err.Error(), http.StatusBadRequest) + return + } + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + // make sure libnetwork is now asking to release the expected poolid + if addressRequest.PoolID != poolID { + fmt.Fprintf(w, `{"Error":"unknown pool id"}`) + } else { + fmt.Fprintf(w, "null") + } + }) + + err := os.MkdirAll("/etc/docker/plugins", 0755) + c.Assert(err, checker.IsNil) + + fileName := fmt.Sprintf("/etc/docker/plugins/%s.spec", netDrv) + err = ioutil.WriteFile(fileName, []byte(url), 0644) + c.Assert(err, checker.IsNil) + + ipamFileName := fmt.Sprintf("/etc/docker/plugins/%s.spec", ipamDrv) + err = ioutil.WriteFile(ipamFileName, []byte(url), 0644) + c.Assert(err, checker.IsNil) +} + +func (s *DockerNetworkSuite) TearDownSuite(c *check.C) { + if s.server == nil { + return + } + + s.server.Close() + + err := os.RemoveAll("/etc/docker/plugins") + c.Assert(err, checker.IsNil) +} + +func assertNwIsAvailable(c *check.C, name string) { + if !isNwPresent(c, name) { + c.Fatalf("Network %s not found in network ls o/p", name) + } +} + +func assertNwNotAvailable(c *check.C, name string) { + if isNwPresent(c, name) { + c.Fatalf("Found network %s in network ls o/p", name) + } +} + +func isNwPresent(c *check.C, name string) bool { + out, _ := dockerCmd(c, "network", "ls") + lines := strings.Split(out, "\n") + for i := 1; i < len(lines)-1; i++ { + netFields := strings.Fields(lines[i]) + if netFields[1] == name { + return true + } + } + return false +} + +// assertNwList checks network list retrieved with ls command +// equals to expected network list +// note: out should be `network ls [option]` result +func assertNwList(c *check.C, out string, expectNws []string) { + lines := strings.Split(out, "\n") + var nwList []string + for _, line := range lines[1 : len(lines)-1] { + netFields := strings.Fields(line) + // wrap all network name in nwList + nwList = append(nwList, netFields[1]) + } + + // network ls should contains all expected networks + c.Assert(nwList, checker.DeepEquals, expectNws) +} + +func getNwResource(c *check.C, name string) *types.NetworkResource { + out, _ := dockerCmd(c, "network", "inspect", name) + nr := []types.NetworkResource{} + err := json.Unmarshal([]byte(out), &nr) + c.Assert(err, check.IsNil) + return &nr[0] +} + +func (s *DockerNetworkSuite) TestDockerNetworkLsDefault(c *check.C) { + defaults := []string{"bridge", "host", "none"} + for _, nn := range defaults { + assertNwIsAvailable(c, nn) + } +} + +func (s *DockerSuite) TestNetworkLsFormat(c *check.C) { + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "network", "ls", "--format", "{{.Name}}") + lines := strings.Split(strings.TrimSpace(string(out)), "\n") + + expected := []string{"bridge", "host", "none"} + var names []string + names = append(names, lines...) + c.Assert(expected, checker.DeepEquals, names, check.Commentf("Expected array with truncated names: %v, got: %v", expected, names)) +} + +func (s *DockerSuite) TestNetworkLsFormatDefaultFormat(c *check.C) { + testRequires(c, DaemonIsLinux) + + config := `{ + "networksFormat": "{{ .Name }} default" +}` + d, err := ioutil.TempDir("", "integration-cli-") + c.Assert(err, checker.IsNil) + defer os.RemoveAll(d) + + err = ioutil.WriteFile(filepath.Join(d, "config.json"), []byte(config), 0644) + c.Assert(err, checker.IsNil) + + out, _ := dockerCmd(c, "--config", d, "network", "ls") + lines := strings.Split(strings.TrimSpace(string(out)), "\n") + + expected := []string{"bridge default", "host default", "none default"} + var names []string + names = append(names, lines...) + c.Assert(expected, checker.DeepEquals, names, check.Commentf("Expected array with truncated names: %v, got: %v", expected, names)) +} + +func (s *DockerNetworkSuite) TestDockerNetworkCreatePredefined(c *check.C) { + predefined := []string{"bridge", "host", "none", "default"} + for _, net := range predefined { + // predefined networks can't be created again + out, _, err := dockerCmdWithError("network", "create", net) + c.Assert(err, checker.NotNil, check.Commentf("%v", out)) + } +} + +func (s *DockerNetworkSuite) TestDockerNetworkCreateHostBind(c *check.C) { + dockerCmd(c, "network", "create", "--subnet=192.168.10.0/24", "--gateway=192.168.10.1", "-o", "com.docker.network.bridge.host_binding_ipv4=192.168.10.1", "testbind") + assertNwIsAvailable(c, "testbind") + + out := runSleepingContainer(c, "--net=testbind", "-p", "5000:5000") + id := strings.TrimSpace(out) + c.Assert(waitRun(id), checker.IsNil) + out, _ = dockerCmd(c, "ps") + c.Assert(out, checker.Contains, "192.168.10.1:5000->5000/tcp") +} + +func (s *DockerNetworkSuite) TestDockerNetworkRmPredefined(c *check.C) { + predefined := []string{"bridge", "host", "none", "default"} + for _, net := range predefined { + // predefined networks can't be removed + out, _, err := dockerCmdWithError("network", "rm", net) + c.Assert(err, checker.NotNil, check.Commentf("%v", out)) + } +} + +func (s *DockerNetworkSuite) TestDockerNetworkLsFilter(c *check.C) { + testNet := "testnet1" + testLabel := "foo" + testValue := "bar" + out, _ := dockerCmd(c, "network", "create", "dev") + defer func() { + dockerCmd(c, "network", "rm", "dev") + dockerCmd(c, "network", "rm", testNet) + }() + networkID := strings.TrimSpace(out) + + // filter with partial ID + // only show 'dev' network + out, _ = dockerCmd(c, "network", "ls", "-f", "id="+networkID[0:5]) + assertNwList(c, out, []string{"dev"}) + + out, _ = dockerCmd(c, "network", "ls", "-f", "name=dge") + assertNwList(c, out, []string{"bridge"}) + + // only show built-in network (bridge, none, host) + out, _ = dockerCmd(c, "network", "ls", "-f", "type=builtin") + assertNwList(c, out, []string{"bridge", "host", "none"}) + + // only show custom networks (dev) + out, _ = dockerCmd(c, "network", "ls", "-f", "type=custom") + assertNwList(c, out, []string{"dev"}) + + // show all networks with filter + // it should be equivalent of ls without option + out, _ = dockerCmd(c, "network", "ls", "-f", "type=custom", "-f", "type=builtin") + assertNwList(c, out, []string{"bridge", "dev", "host", "none"}) + + out, _ = dockerCmd(c, "network", "create", "--label", testLabel+"="+testValue, testNet) + assertNwIsAvailable(c, testNet) + + out, _ = dockerCmd(c, "network", "ls", "-f", "label="+testLabel) + assertNwList(c, out, []string{testNet}) + + out, _ = dockerCmd(c, "network", "ls", "-f", "label="+testLabel+"="+testValue) + assertNwList(c, out, []string{testNet}) + + out, _ = dockerCmd(c, "network", "ls", "-f", "label=nonexistent") + outArr := strings.Split(strings.TrimSpace(out), "\n") + c.Assert(len(outArr), check.Equals, 1, check.Commentf("%s\n", out)) + + out, _ = dockerCmd(c, "network", "ls", "-f", "driver=null") + assertNwList(c, out, []string{"none"}) + + out, _ = dockerCmd(c, "network", "ls", "-f", "driver=host") + assertNwList(c, out, []string{"host"}) + + out, _ = dockerCmd(c, "network", "ls", "-f", "driver=bridge") + assertNwList(c, out, []string{"bridge", "dev", testNet}) +} + +func (s *DockerNetworkSuite) TestDockerNetworkCreateDelete(c *check.C) { + dockerCmd(c, "network", "create", "test") + assertNwIsAvailable(c, "test") + + dockerCmd(c, "network", "rm", "test") + assertNwNotAvailable(c, "test") +} + +func (s *DockerNetworkSuite) TestDockerNetworkCreateLabel(c *check.C) { + testNet := "testnetcreatelabel" + testLabel := "foo" + testValue := "bar" + + dockerCmd(c, "network", "create", "--label", testLabel+"="+testValue, testNet) + assertNwIsAvailable(c, testNet) + + out, _, err := dockerCmdWithError("network", "inspect", "--format={{ .Labels."+testLabel+" }}", testNet) + c.Assert(err, check.IsNil) + c.Assert(strings.TrimSpace(out), check.Equals, testValue) + + dockerCmd(c, "network", "rm", testNet) + assertNwNotAvailable(c, testNet) +} + +func (s *DockerSuite) TestDockerNetworkDeleteNotExists(c *check.C) { + out, _, err := dockerCmdWithError("network", "rm", "test") + c.Assert(err, checker.NotNil, check.Commentf("%v", out)) +} + +func (s *DockerSuite) TestDockerNetworkDeleteMultiple(c *check.C) { + dockerCmd(c, "network", "create", "testDelMulti0") + assertNwIsAvailable(c, "testDelMulti0") + dockerCmd(c, "network", "create", "testDelMulti1") + assertNwIsAvailable(c, "testDelMulti1") + dockerCmd(c, "network", "create", "testDelMulti2") + assertNwIsAvailable(c, "testDelMulti2") + out, _ := dockerCmd(c, "run", "-d", "--net", "testDelMulti2", "busybox", "top") + containerID := strings.TrimSpace(out) + waitRun(containerID) + + // delete three networks at the same time, since testDelMulti2 + // contains active container, its deletion should fail. + out, _, err := dockerCmdWithError("network", "rm", "testDelMulti0", "testDelMulti1", "testDelMulti2") + // err should not be nil due to deleting testDelMulti2 failed. + c.Assert(err, checker.NotNil, check.Commentf("out: %s", out)) + // testDelMulti2 should fail due to network has active endpoints + c.Assert(out, checker.Contains, "has active endpoints") + assertNwNotAvailable(c, "testDelMulti0") + assertNwNotAvailable(c, "testDelMulti1") + // testDelMulti2 can't be deleted, so it should exist + assertNwIsAvailable(c, "testDelMulti2") +} + +func (s *DockerSuite) TestDockerNetworkInspect(c *check.C) { + out, _ := dockerCmd(c, "network", "inspect", "host") + networkResources := []types.NetworkResource{} + err := json.Unmarshal([]byte(out), &networkResources) + c.Assert(err, check.IsNil) + c.Assert(networkResources, checker.HasLen, 1) + + out, _ = dockerCmd(c, "network", "inspect", "--format={{ .Name }}", "host") + c.Assert(strings.TrimSpace(out), check.Equals, "host") +} + +func (s *DockerSuite) TestDockerNetworkInspectWithID(c *check.C) { + out, _ := dockerCmd(c, "network", "create", "test2") + networkID := strings.TrimSpace(out) + assertNwIsAvailable(c, "test2") + out, _ = dockerCmd(c, "network", "inspect", "--format={{ .Id }}", "test2") + c.Assert(strings.TrimSpace(out), check.Equals, networkID) + + out, _ = dockerCmd(c, "network", "inspect", "--format={{ .ID }}", "test2") + c.Assert(strings.TrimSpace(out), check.Equals, networkID) +} + +func (s *DockerSuite) TestDockerInspectMultipleNetwork(c *check.C) { + result := dockerCmdWithResult("network", "inspect", "host", "none") + c.Assert(result, icmd.Matches, icmd.Success) + + networkResources := []types.NetworkResource{} + err := json.Unmarshal([]byte(result.Stdout()), &networkResources) + c.Assert(err, check.IsNil) + c.Assert(networkResources, checker.HasLen, 2) +} + +func (s *DockerSuite) TestDockerInspectMultipleNetworksIncludingNonexistent(c *check.C) { + // non-existent network was not at the beginning of the inspect list + // This should print an error, return an exitCode 1 and print the host network + result := dockerCmdWithResult("network", "inspect", "host", "nonexistent") + c.Assert(result, icmd.Matches, icmd.Expected{ + ExitCode: 1, + Err: "Error: No such network: nonexistent", + Out: "host", + }) + + networkResources := []types.NetworkResource{} + err := json.Unmarshal([]byte(result.Stdout()), &networkResources) + c.Assert(err, check.IsNil) + c.Assert(networkResources, checker.HasLen, 1) + + // Only one non-existent network to inspect + // Should print an error and return an exitCode, nothing else + result = dockerCmdWithResult("network", "inspect", "nonexistent") + c.Assert(result, icmd.Matches, icmd.Expected{ + ExitCode: 1, + Err: "Error: No such network: nonexistent", + Out: "[]", + }) + + // non-existent network was at the beginning of the inspect list + // Should not fail fast, and still print host network but print an error + result = dockerCmdWithResult("network", "inspect", "nonexistent", "host") + c.Assert(result, icmd.Matches, icmd.Expected{ + ExitCode: 1, + Err: "Error: No such network: nonexistent", + Out: "host", + }) + + networkResources = []types.NetworkResource{} + err = json.Unmarshal([]byte(result.Stdout()), &networkResources) + c.Assert(err, check.IsNil) + c.Assert(networkResources, checker.HasLen, 1) +} + +func (s *DockerSuite) TestDockerInspectNetworkWithContainerName(c *check.C) { + dockerCmd(c, "network", "create", "brNetForInspect") + assertNwIsAvailable(c, "brNetForInspect") + defer func() { + dockerCmd(c, "network", "rm", "brNetForInspect") + assertNwNotAvailable(c, "brNetForInspect") + }() + + out, _ := dockerCmd(c, "run", "-d", "--name", "testNetInspect1", "--net", "brNetForInspect", "busybox", "top") + c.Assert(waitRun("testNetInspect1"), check.IsNil) + containerID := strings.TrimSpace(out) + defer func() { + // we don't stop container by name, because we'll rename it later + dockerCmd(c, "stop", containerID) + }() + + out, _ = dockerCmd(c, "network", "inspect", "brNetForInspect") + networkResources := []types.NetworkResource{} + err := json.Unmarshal([]byte(out), &networkResources) + c.Assert(err, check.IsNil) + c.Assert(networkResources, checker.HasLen, 1) + container, ok := networkResources[0].Containers[containerID] + c.Assert(ok, checker.True) + c.Assert(container.Name, checker.Equals, "testNetInspect1") + + // rename container and check docker inspect output update + newName := "HappyNewName" + dockerCmd(c, "rename", "testNetInspect1", newName) + + // check whether network inspect works properly + out, _ = dockerCmd(c, "network", "inspect", "brNetForInspect") + newNetRes := []types.NetworkResource{} + err = json.Unmarshal([]byte(out), &newNetRes) + c.Assert(err, check.IsNil) + c.Assert(newNetRes, checker.HasLen, 1) + container1, ok := newNetRes[0].Containers[containerID] + c.Assert(ok, checker.True) + c.Assert(container1.Name, checker.Equals, newName) + +} + +func (s *DockerNetworkSuite) TestDockerNetworkConnectDisconnect(c *check.C) { + dockerCmd(c, "network", "create", "test") + assertNwIsAvailable(c, "test") + nr := getNwResource(c, "test") + + c.Assert(nr.Name, checker.Equals, "test") + c.Assert(len(nr.Containers), checker.Equals, 0) + + // run a container + out, _ := dockerCmd(c, "run", "-d", "--name", "test", "busybox", "top") + c.Assert(waitRun("test"), check.IsNil) + containerID := strings.TrimSpace(out) + + // connect the container to the test network + dockerCmd(c, "network", "connect", "test", containerID) + + // inspect the network to make sure container is connected + nr = getNetworkResource(c, nr.ID) + c.Assert(len(nr.Containers), checker.Equals, 1) + c.Assert(nr.Containers[containerID], check.NotNil) + + // check if container IP matches network inspect + ip, _, err := net.ParseCIDR(nr.Containers[containerID].IPv4Address) + c.Assert(err, check.IsNil) + containerIP := findContainerIP(c, "test", "test") + c.Assert(ip.String(), checker.Equals, containerIP) + + // disconnect container from the network + dockerCmd(c, "network", "disconnect", "test", containerID) + nr = getNwResource(c, "test") + c.Assert(nr.Name, checker.Equals, "test") + c.Assert(len(nr.Containers), checker.Equals, 0) + + // run another container + out, _ = dockerCmd(c, "run", "-d", "--net", "test", "--name", "test2", "busybox", "top") + c.Assert(waitRun("test2"), check.IsNil) + containerID = strings.TrimSpace(out) + + nr = getNwResource(c, "test") + c.Assert(nr.Name, checker.Equals, "test") + c.Assert(len(nr.Containers), checker.Equals, 1) + + // force disconnect the container to the test network + dockerCmd(c, "network", "disconnect", "-f", "test", containerID) + + nr = getNwResource(c, "test") + c.Assert(nr.Name, checker.Equals, "test") + c.Assert(len(nr.Containers), checker.Equals, 0) + + dockerCmd(c, "network", "rm", "test") + assertNwNotAvailable(c, "test") +} + +func (s *DockerNetworkSuite) TestDockerNetworkIPAMMultipleNetworks(c *check.C) { + // test0 bridge network + dockerCmd(c, "network", "create", "--subnet=192.168.0.0/16", "test1") + assertNwIsAvailable(c, "test1") + + // test2 bridge network does not overlap + dockerCmd(c, "network", "create", "--subnet=192.169.0.0/16", "test2") + assertNwIsAvailable(c, "test2") + + // for networks w/o ipam specified, docker will choose proper non-overlapping subnets + dockerCmd(c, "network", "create", "test3") + assertNwIsAvailable(c, "test3") + dockerCmd(c, "network", "create", "test4") + assertNwIsAvailable(c, "test4") + dockerCmd(c, "network", "create", "test5") + assertNwIsAvailable(c, "test5") + + // test network with multiple subnets + // bridge network doesn't support multiple subnets. hence, use a dummy driver that supports + + dockerCmd(c, "network", "create", "-d", dummyNetworkDriver, "--subnet=192.168.0.0/16", "--subnet=192.170.0.0/16", "test6") + assertNwIsAvailable(c, "test6") + + // test network with multiple subnets with valid ipam combinations + // also check same subnet across networks when the driver supports it. + dockerCmd(c, "network", "create", "-d", dummyNetworkDriver, + "--subnet=192.168.0.0/16", "--subnet=192.170.0.0/16", + "--gateway=192.168.0.100", "--gateway=192.170.0.100", + "--ip-range=192.168.1.0/24", + "--aux-address", "a=192.168.1.5", "--aux-address", "b=192.168.1.6", + "--aux-address", "c=192.170.1.5", "--aux-address", "d=192.170.1.6", + "test7") + assertNwIsAvailable(c, "test7") + + // cleanup + for i := 1; i < 8; i++ { + dockerCmd(c, "network", "rm", fmt.Sprintf("test%d", i)) + } +} + +func (s *DockerNetworkSuite) TestDockerNetworkCustomIPAM(c *check.C) { + // Create a bridge network using custom ipam driver + dockerCmd(c, "network", "create", "--ipam-driver", dummyIPAMDriver, "br0") + assertNwIsAvailable(c, "br0") + + // Verify expected network ipam fields are there + nr := getNetworkResource(c, "br0") + c.Assert(nr.Driver, checker.Equals, "bridge") + c.Assert(nr.IPAM.Driver, checker.Equals, dummyIPAMDriver) + + // remove network and exercise remote ipam driver + dockerCmd(c, "network", "rm", "br0") + assertNwNotAvailable(c, "br0") +} + +func (s *DockerNetworkSuite) TestDockerNetworkIPAMOptions(c *check.C) { + // Create a bridge network using custom ipam driver and options + dockerCmd(c, "network", "create", "--ipam-driver", dummyIPAMDriver, "--ipam-opt", "opt1=drv1", "--ipam-opt", "opt2=drv2", "br0") + assertNwIsAvailable(c, "br0") + + // Verify expected network ipam options + nr := getNetworkResource(c, "br0") + opts := nr.IPAM.Options + c.Assert(opts["opt1"], checker.Equals, "drv1") + c.Assert(opts["opt2"], checker.Equals, "drv2") +} + +func (s *DockerNetworkSuite) TestDockerNetworkInspectDefault(c *check.C) { + nr := getNetworkResource(c, "none") + c.Assert(nr.Driver, checker.Equals, "null") + c.Assert(nr.Scope, checker.Equals, "local") + c.Assert(nr.Internal, checker.Equals, false) + c.Assert(nr.EnableIPv6, checker.Equals, false) + c.Assert(nr.IPAM.Driver, checker.Equals, "default") + c.Assert(len(nr.IPAM.Config), checker.Equals, 0) + + nr = getNetworkResource(c, "host") + c.Assert(nr.Driver, checker.Equals, "host") + c.Assert(nr.Scope, checker.Equals, "local") + c.Assert(nr.Internal, checker.Equals, false) + c.Assert(nr.EnableIPv6, checker.Equals, false) + c.Assert(nr.IPAM.Driver, checker.Equals, "default") + c.Assert(len(nr.IPAM.Config), checker.Equals, 0) + + nr = getNetworkResource(c, "bridge") + c.Assert(nr.Driver, checker.Equals, "bridge") + c.Assert(nr.Scope, checker.Equals, "local") + c.Assert(nr.Internal, checker.Equals, false) + c.Assert(nr.EnableIPv6, checker.Equals, false) + c.Assert(nr.IPAM.Driver, checker.Equals, "default") + c.Assert(len(nr.IPAM.Config), checker.Equals, 1) + c.Assert(nr.IPAM.Config[0].Subnet, checker.NotNil) + c.Assert(nr.IPAM.Config[0].Gateway, checker.NotNil) +} + +func (s *DockerNetworkSuite) TestDockerNetworkInspectCustomUnspecified(c *check.C) { + // if unspecified, network subnet will be selected from inside preferred pool + dockerCmd(c, "network", "create", "test01") + assertNwIsAvailable(c, "test01") + + nr := getNetworkResource(c, "test01") + c.Assert(nr.Driver, checker.Equals, "bridge") + c.Assert(nr.Scope, checker.Equals, "local") + c.Assert(nr.Internal, checker.Equals, false) + c.Assert(nr.EnableIPv6, checker.Equals, false) + c.Assert(nr.IPAM.Driver, checker.Equals, "default") + c.Assert(len(nr.IPAM.Config), checker.Equals, 1) + c.Assert(nr.IPAM.Config[0].Subnet, checker.NotNil) + c.Assert(nr.IPAM.Config[0].Gateway, checker.NotNil) + + dockerCmd(c, "network", "rm", "test01") + assertNwNotAvailable(c, "test01") +} + +func (s *DockerNetworkSuite) TestDockerNetworkInspectCustomSpecified(c *check.C) { + dockerCmd(c, "network", "create", "--driver=bridge", "--ipv6", "--subnet=fd80:24e2:f998:72d6::/64", "--subnet=172.28.0.0/16", "--ip-range=172.28.5.0/24", "--gateway=172.28.5.254", "br0") + assertNwIsAvailable(c, "br0") + + nr := getNetworkResource(c, "br0") + c.Assert(nr.Driver, checker.Equals, "bridge") + c.Assert(nr.Scope, checker.Equals, "local") + c.Assert(nr.Internal, checker.Equals, false) + c.Assert(nr.EnableIPv6, checker.Equals, true) + c.Assert(nr.IPAM.Driver, checker.Equals, "default") + c.Assert(len(nr.IPAM.Config), checker.Equals, 2) + c.Assert(nr.IPAM.Config[0].Subnet, checker.Equals, "172.28.0.0/16") + c.Assert(nr.IPAM.Config[0].IPRange, checker.Equals, "172.28.5.0/24") + c.Assert(nr.IPAM.Config[0].Gateway, checker.Equals, "172.28.5.254") + c.Assert(nr.Internal, checker.False) + dockerCmd(c, "network", "rm", "br0") + assertNwNotAvailable(c, "test01") +} + +func (s *DockerNetworkSuite) TestDockerNetworkIPAMInvalidCombinations(c *check.C) { + // network with ip-range out of subnet range + _, _, err := dockerCmdWithError("network", "create", "--subnet=192.168.0.0/16", "--ip-range=192.170.0.0/16", "test") + c.Assert(err, check.NotNil) + + // network with multiple gateways for a single subnet + _, _, err = dockerCmdWithError("network", "create", "--subnet=192.168.0.0/16", "--gateway=192.168.0.1", "--gateway=192.168.0.2", "test") + c.Assert(err, check.NotNil) + + // Multiple overlapping subnets in the same network must fail + _, _, err = dockerCmdWithError("network", "create", "--subnet=192.168.0.0/16", "--subnet=192.168.1.0/16", "test") + c.Assert(err, check.NotNil) + + // overlapping subnets across networks must fail + // create a valid test0 network + dockerCmd(c, "network", "create", "--subnet=192.168.0.0/16", "test0") + assertNwIsAvailable(c, "test0") + // create an overlapping test1 network + _, _, err = dockerCmdWithError("network", "create", "--subnet=192.168.128.0/17", "test1") + c.Assert(err, check.NotNil) + dockerCmd(c, "network", "rm", "test0") + assertNwNotAvailable(c, "test0") +} + +func (s *DockerNetworkSuite) TestDockerNetworkDriverOptions(c *check.C) { + dockerCmd(c, "network", "create", "-d", dummyNetworkDriver, "-o", "opt1=drv1", "-o", "opt2=drv2", "testopt") + assertNwIsAvailable(c, "testopt") + gopts := remoteDriverNetworkRequest.Options[netlabel.GenericData] + c.Assert(gopts, checker.NotNil) + opts, ok := gopts.(map[string]interface{}) + c.Assert(ok, checker.Equals, true) + c.Assert(opts["opt1"], checker.Equals, "drv1") + c.Assert(opts["opt2"], checker.Equals, "drv2") + dockerCmd(c, "network", "rm", "testopt") + assertNwNotAvailable(c, "testopt") + +} + +func (s *DockerNetworkSuite) TestDockerPluginV2NetworkDriver(c *check.C) { + testRequires(c, DaemonIsLinux, IsAmd64, Network) + + var ( + npName = "tiborvass/test-docker-netplugin" + npTag = "latest" + npNameWithTag = npName + ":" + npTag + ) + _, _, err := dockerCmdWithError("plugin", "install", "--grant-all-permissions", npNameWithTag) + c.Assert(err, checker.IsNil) + + out, _, err := dockerCmdWithError("plugin", "ls") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, npName) + c.Assert(out, checker.Contains, npTag) + c.Assert(out, checker.Contains, "true") + + dockerCmd(c, "network", "create", "-d", npNameWithTag, "v2net") + assertNwIsAvailable(c, "v2net") + dockerCmd(c, "network", "rm", "v2net") + assertNwNotAvailable(c, "v2net") + +} + +func (s *DockerDaemonSuite) TestDockerNetworkNoDiscoveryDefaultBridgeNetwork(c *check.C) { + testRequires(c, ExecSupport) + // On default bridge network built-in service discovery should not happen + hostsFile := "/etc/hosts" + bridgeName := "external-bridge" + bridgeIP := "192.169.255.254/24" + createInterface(c, "bridge", bridgeName, bridgeIP) + defer deleteInterface(c, bridgeName) + + s.d.StartWithBusybox(c, "--bridge", bridgeName) + defer s.d.Restart(c) + + // run two containers and store first container's etc/hosts content + out, err := s.d.Cmd("run", "-d", "busybox", "top") + c.Assert(err, check.IsNil) + cid1 := strings.TrimSpace(out) + defer s.d.Cmd("stop", cid1) + + hosts, err := s.d.Cmd("exec", cid1, "cat", hostsFile) + c.Assert(err, checker.IsNil) + + out, err = s.d.Cmd("run", "-d", "--name", "container2", "busybox", "top") + c.Assert(err, check.IsNil) + cid2 := strings.TrimSpace(out) + + // verify first container's etc/hosts file has not changed after spawning the second named container + hostsPost, err := s.d.Cmd("exec", cid1, "cat", hostsFile) + c.Assert(err, checker.IsNil) + c.Assert(string(hosts), checker.Equals, string(hostsPost), + check.Commentf("Unexpected %s change on second container creation", hostsFile)) + + // stop container 2 and verify first container's etc/hosts has not changed + _, err = s.d.Cmd("stop", cid2) + c.Assert(err, check.IsNil) + + hostsPost, err = s.d.Cmd("exec", cid1, "cat", hostsFile) + c.Assert(err, checker.IsNil) + c.Assert(string(hosts), checker.Equals, string(hostsPost), + check.Commentf("Unexpected %s change on second container creation", hostsFile)) + + // but discovery is on when connecting to non default bridge network + network := "anotherbridge" + out, err = s.d.Cmd("network", "create", network) + c.Assert(err, check.IsNil, check.Commentf(out)) + defer s.d.Cmd("network", "rm", network) + + out, err = s.d.Cmd("network", "connect", network, cid1) + c.Assert(err, check.IsNil, check.Commentf(out)) + + hosts, err = s.d.Cmd("exec", cid1, "cat", hostsFile) + c.Assert(err, checker.IsNil) + + hostsPost, err = s.d.Cmd("exec", cid1, "cat", hostsFile) + c.Assert(err, checker.IsNil) + c.Assert(string(hosts), checker.Equals, string(hostsPost), + check.Commentf("Unexpected %s change on second network connection", hostsFile)) +} + +func (s *DockerNetworkSuite) TestDockerNetworkAnonymousEndpoint(c *check.C) { + testRequires(c, ExecSupport, NotArm) + hostsFile := "/etc/hosts" + cstmBridgeNw := "custom-bridge-nw" + cstmBridgeNw1 := "custom-bridge-nw1" + + dockerCmd(c, "network", "create", "-d", "bridge", cstmBridgeNw) + assertNwIsAvailable(c, cstmBridgeNw) + + // run two anonymous containers and store their etc/hosts content + out, _ := dockerCmd(c, "run", "-d", "--net", cstmBridgeNw, "busybox", "top") + cid1 := strings.TrimSpace(out) + + hosts1 := readContainerFileWithExec(c, cid1, hostsFile) + + out, _ = dockerCmd(c, "run", "-d", "--net", cstmBridgeNw, "busybox", "top") + cid2 := strings.TrimSpace(out) + + hosts2 := readContainerFileWithExec(c, cid2, hostsFile) + + // verify first container etc/hosts file has not changed + hosts1post := readContainerFileWithExec(c, cid1, hostsFile) + c.Assert(string(hosts1), checker.Equals, string(hosts1post), + check.Commentf("Unexpected %s change on anonymous container creation", hostsFile)) + + // Connect the 2nd container to a new network and verify the + // first container /etc/hosts file still hasn't changed. + dockerCmd(c, "network", "create", "-d", "bridge", cstmBridgeNw1) + assertNwIsAvailable(c, cstmBridgeNw1) + + dockerCmd(c, "network", "connect", cstmBridgeNw1, cid2) + + hosts2 = readContainerFileWithExec(c, cid2, hostsFile) + hosts1post = readContainerFileWithExec(c, cid1, hostsFile) + c.Assert(string(hosts1), checker.Equals, string(hosts1post), + check.Commentf("Unexpected %s change on container connect", hostsFile)) + + // start a named container + cName := "AnyName" + out, _ = dockerCmd(c, "run", "-d", "--net", cstmBridgeNw, "--name", cName, "busybox", "top") + cid3 := strings.TrimSpace(out) + + // verify that container 1 and 2 can ping the named container + dockerCmd(c, "exec", cid1, "ping", "-c", "1", cName) + dockerCmd(c, "exec", cid2, "ping", "-c", "1", cName) + + // Stop named container and verify first two containers' etc/hosts file hasn't changed + dockerCmd(c, "stop", cid3) + hosts1post = readContainerFileWithExec(c, cid1, hostsFile) + c.Assert(string(hosts1), checker.Equals, string(hosts1post), + check.Commentf("Unexpected %s change on name container creation", hostsFile)) + + hosts2post := readContainerFileWithExec(c, cid2, hostsFile) + c.Assert(string(hosts2), checker.Equals, string(hosts2post), + check.Commentf("Unexpected %s change on name container creation", hostsFile)) + + // verify that container 1 and 2 can't ping the named container now + _, _, err := dockerCmdWithError("exec", cid1, "ping", "-c", "1", cName) + c.Assert(err, check.NotNil) + _, _, err = dockerCmdWithError("exec", cid2, "ping", "-c", "1", cName) + c.Assert(err, check.NotNil) +} + +func (s *DockerNetworkSuite) TestDockerNetworkLinkOnDefaultNetworkOnly(c *check.C) { + // Legacy Link feature must work only on default network, and not across networks + cnt1 := "container1" + cnt2 := "container2" + network := "anotherbridge" + + // Run first container on default network + dockerCmd(c, "run", "-d", "--name", cnt1, "busybox", "top") + + // Create another network and run the second container on it + dockerCmd(c, "network", "create", network) + assertNwIsAvailable(c, network) + dockerCmd(c, "run", "-d", "--net", network, "--name", cnt2, "busybox", "top") + + // Try launching a container on default network, linking to the first container. Must succeed + dockerCmd(c, "run", "-d", "--link", fmt.Sprintf("%s:%s", cnt1, cnt1), "busybox", "top") + + // Try launching a container on default network, linking to the second container. Must fail + _, _, err := dockerCmdWithError("run", "-d", "--link", fmt.Sprintf("%s:%s", cnt2, cnt2), "busybox", "top") + c.Assert(err, checker.NotNil) + + // Connect second container to default network. Now a container on default network can link to it + dockerCmd(c, "network", "connect", "bridge", cnt2) + dockerCmd(c, "run", "-d", "--link", fmt.Sprintf("%s:%s", cnt2, cnt2), "busybox", "top") +} + +func (s *DockerNetworkSuite) TestDockerNetworkOverlayPortMapping(c *check.C) { + // Verify exposed ports are present in ps output when running a container on + // a network managed by a driver which does not provide the default gateway + // for the container + nwn := "ov" + ctn := "bb" + port1 := 80 + port2 := 443 + expose1 := fmt.Sprintf("--expose=%d", port1) + expose2 := fmt.Sprintf("--expose=%d", port2) + + dockerCmd(c, "network", "create", "-d", dummyNetworkDriver, nwn) + assertNwIsAvailable(c, nwn) + + dockerCmd(c, "run", "-d", "--net", nwn, "--name", ctn, expose1, expose2, "busybox", "top") + + // Check docker ps o/p for last created container reports the unpublished ports + unpPort1 := fmt.Sprintf("%d/tcp", port1) + unpPort2 := fmt.Sprintf("%d/tcp", port2) + out, _ := dockerCmd(c, "ps", "-n=1") + // Missing unpublished ports in docker ps output + c.Assert(out, checker.Contains, unpPort1) + // Missing unpublished ports in docker ps output + c.Assert(out, checker.Contains, unpPort2) +} + +func (s *DockerNetworkSuite) TestDockerNetworkDriverUngracefulRestart(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace) + dnd := "dnd" + did := "did" + + mux := http.NewServeMux() + server := httptest.NewServer(mux) + setupRemoteNetworkDrivers(c, mux, server.URL, dnd, did) + + s.d.StartWithBusybox(c) + _, err := s.d.Cmd("network", "create", "-d", dnd, "--subnet", "1.1.1.0/24", "net1") + c.Assert(err, checker.IsNil) + + _, err = s.d.Cmd("run", "-itd", "--net", "net1", "--name", "foo", "--ip", "1.1.1.10", "busybox", "sh") + c.Assert(err, checker.IsNil) + + // Kill daemon and restart + c.Assert(s.d.Kill(), checker.IsNil) + + server.Close() + + startTime := time.Now().Unix() + s.d.Restart(c) + lapse := time.Now().Unix() - startTime + if lapse > 60 { + // In normal scenarios, daemon restart takes ~1 second. + // Plugin retry mechanism can delay the daemon start. systemd may not like it. + // Avoid accessing plugins during daemon bootup + c.Logf("daemon restart took too long : %d seconds", lapse) + } + + // Restart the custom dummy plugin + mux = http.NewServeMux() + server = httptest.NewServer(mux) + setupRemoteNetworkDrivers(c, mux, server.URL, dnd, did) + + // trying to reuse the same ip must succeed + _, err = s.d.Cmd("run", "-itd", "--net", "net1", "--name", "bar", "--ip", "1.1.1.10", "busybox", "sh") + c.Assert(err, checker.IsNil) +} + +func (s *DockerNetworkSuite) TestDockerNetworkMacInspect(c *check.C) { + // Verify endpoint MAC address is correctly populated in container's network settings + nwn := "ov" + ctn := "bb" + + dockerCmd(c, "network", "create", "-d", dummyNetworkDriver, nwn) + assertNwIsAvailable(c, nwn) + + dockerCmd(c, "run", "-d", "--net", nwn, "--name", ctn, "busybox", "top") + + mac := inspectField(c, ctn, "NetworkSettings.Networks."+nwn+".MacAddress") + c.Assert(mac, checker.Equals, "a0:b1:c2:d3:e4:f5") +} + +func (s *DockerSuite) TestInspectAPIMultipleNetworks(c *check.C) { + dockerCmd(c, "network", "create", "mybridge1") + dockerCmd(c, "network", "create", "mybridge2") + out, _ := dockerCmd(c, "run", "-d", "busybox", "top") + id := strings.TrimSpace(out) + c.Assert(waitRun(id), check.IsNil) + + dockerCmd(c, "network", "connect", "mybridge1", id) + dockerCmd(c, "network", "connect", "mybridge2", id) + + body := getInspectBody(c, "v1.20", id) + var inspect120 v1p20.ContainerJSON + err := json.Unmarshal(body, &inspect120) + c.Assert(err, checker.IsNil) + + versionedIP := inspect120.NetworkSettings.IPAddress + + body = getInspectBody(c, "v1.21", id) + var inspect121 types.ContainerJSON + err = json.Unmarshal(body, &inspect121) + c.Assert(err, checker.IsNil) + c.Assert(inspect121.NetworkSettings.Networks, checker.HasLen, 3) + + bridge := inspect121.NetworkSettings.Networks["bridge"] + c.Assert(bridge.IPAddress, checker.Equals, versionedIP) + c.Assert(bridge.IPAddress, checker.Equals, inspect121.NetworkSettings.IPAddress) +} + +func connectContainerToNetworks(c *check.C, d *daemon.Daemon, cName string, nws []string) { + // Run a container on the default network + out, err := d.Cmd("run", "-d", "--name", cName, "busybox", "top") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + // Attach the container to other networks + for _, nw := range nws { + out, err = d.Cmd("network", "create", nw) + c.Assert(err, checker.IsNil, check.Commentf(out)) + out, err = d.Cmd("network", "connect", nw, cName) + c.Assert(err, checker.IsNil, check.Commentf(out)) + } +} + +func verifyContainerIsConnectedToNetworks(c *check.C, d *daemon.Daemon, cName string, nws []string) { + // Verify container is connected to all the networks + for _, nw := range nws { + out, err := d.Cmd("inspect", "-f", fmt.Sprintf("{{.NetworkSettings.Networks.%s}}", nw), cName) + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(out, checker.Not(checker.Equals), "\n") + } +} + +func (s *DockerNetworkSuite) TestDockerNetworkMultipleNetworksGracefulDaemonRestart(c *check.C) { + cName := "bb" + nwList := []string{"nw1", "nw2", "nw3"} + + s.d.StartWithBusybox(c) + + connectContainerToNetworks(c, s.d, cName, nwList) + verifyContainerIsConnectedToNetworks(c, s.d, cName, nwList) + + // Reload daemon + s.d.Restart(c) + + _, err := s.d.Cmd("start", cName) + c.Assert(err, checker.IsNil) + + verifyContainerIsConnectedToNetworks(c, s.d, cName, nwList) +} + +func (s *DockerNetworkSuite) TestDockerNetworkMultipleNetworksUngracefulDaemonRestart(c *check.C) { + cName := "cc" + nwList := []string{"nw1", "nw2", "nw3"} + + s.d.StartWithBusybox(c) + + connectContainerToNetworks(c, s.d, cName, nwList) + verifyContainerIsConnectedToNetworks(c, s.d, cName, nwList) + + // Kill daemon and restart + c.Assert(s.d.Kill(), checker.IsNil) + s.d.Restart(c) + + // Restart container + _, err := s.d.Cmd("start", cName) + c.Assert(err, checker.IsNil) + + verifyContainerIsConnectedToNetworks(c, s.d, cName, nwList) +} + +func (s *DockerNetworkSuite) TestDockerNetworkRunNetByID(c *check.C) { + out, _ := dockerCmd(c, "network", "create", "one") + containerOut, _, err := dockerCmdWithError("run", "-d", "--net", strings.TrimSpace(out), "busybox", "top") + c.Assert(err, checker.IsNil, check.Commentf(containerOut)) +} + +func (s *DockerNetworkSuite) TestDockerNetworkHostModeUngracefulDaemonRestart(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace) + s.d.StartWithBusybox(c) + + // Run a few containers on host network + for i := 0; i < 10; i++ { + cName := fmt.Sprintf("hostc-%d", i) + out, err := s.d.Cmd("run", "-d", "--name", cName, "--net=host", "--restart=always", "busybox", "top") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + // verify container has finished starting before killing daemon + err = s.d.WaitRun(cName) + c.Assert(err, checker.IsNil) + } + + // Kill daemon ungracefully and restart + c.Assert(s.d.Kill(), checker.IsNil) + s.d.Restart(c) + + // make sure all the containers are up and running + for i := 0; i < 10; i++ { + err := s.d.WaitRun(fmt.Sprintf("hostc-%d", i)) + c.Assert(err, checker.IsNil) + } +} + +func (s *DockerNetworkSuite) TestDockerNetworkConnectToHostFromOtherNetwork(c *check.C) { + dockerCmd(c, "run", "-d", "--name", "container1", "busybox", "top") + c.Assert(waitRun("container1"), check.IsNil) + dockerCmd(c, "network", "disconnect", "bridge", "container1") + out, _, err := dockerCmdWithError("network", "connect", "host", "container1") + c.Assert(err, checker.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, runconfig.ErrConflictHostNetwork.Error()) +} + +func (s *DockerNetworkSuite) TestDockerNetworkDisconnectFromHost(c *check.C) { + dockerCmd(c, "run", "-d", "--name", "container1", "--net=host", "busybox", "top") + c.Assert(waitRun("container1"), check.IsNil) + out, _, err := dockerCmdWithError("network", "disconnect", "host", "container1") + c.Assert(err, checker.NotNil, check.Commentf("Should err out disconnect from host")) + c.Assert(out, checker.Contains, runconfig.ErrConflictHostNetwork.Error()) +} + +func (s *DockerNetworkSuite) TestDockerNetworkConnectWithPortMapping(c *check.C) { + testRequires(c, NotArm) + dockerCmd(c, "network", "create", "test1") + dockerCmd(c, "run", "-d", "--name", "c1", "-p", "5000:5000", "busybox", "top") + c.Assert(waitRun("c1"), check.IsNil) + dockerCmd(c, "network", "connect", "test1", "c1") +} + +func verifyPortMap(c *check.C, container, port, originalMapping string, mustBeEqual bool) { + chk := checker.Equals + if !mustBeEqual { + chk = checker.Not(checker.Equals) + } + currentMapping, _ := dockerCmd(c, "port", container, port) + c.Assert(currentMapping, chk, originalMapping) +} + +func (s *DockerNetworkSuite) TestDockerNetworkConnectDisconnectWithPortMapping(c *check.C) { + // Connect and disconnect a container with explicit and non-explicit + // host port mapping to/from networks which do cause and do not cause + // the container default gateway to change, and verify docker port cmd + // returns congruent information + testRequires(c, NotArm) + cnt := "c1" + dockerCmd(c, "network", "create", "aaa") + dockerCmd(c, "network", "create", "ccc") + + dockerCmd(c, "run", "-d", "--name", cnt, "-p", "9000:90", "-p", "70", "busybox", "top") + c.Assert(waitRun(cnt), check.IsNil) + curPortMap, _ := dockerCmd(c, "port", cnt, "70") + curExplPortMap, _ := dockerCmd(c, "port", cnt, "90") + + // Connect to a network which causes the container's default gw switch + dockerCmd(c, "network", "connect", "aaa", cnt) + verifyPortMap(c, cnt, "70", curPortMap, false) + verifyPortMap(c, cnt, "90", curExplPortMap, true) + + // Read current mapping + curPortMap, _ = dockerCmd(c, "port", cnt, "70") + + // Disconnect from a network which causes the container's default gw switch + dockerCmd(c, "network", "disconnect", "aaa", cnt) + verifyPortMap(c, cnt, "70", curPortMap, false) + verifyPortMap(c, cnt, "90", curExplPortMap, true) + + // Read current mapping + curPortMap, _ = dockerCmd(c, "port", cnt, "70") + + // Connect to a network which does not cause the container's default gw switch + dockerCmd(c, "network", "connect", "ccc", cnt) + verifyPortMap(c, cnt, "70", curPortMap, true) + verifyPortMap(c, cnt, "90", curExplPortMap, true) +} + +func (s *DockerNetworkSuite) TestDockerNetworkConnectWithMac(c *check.C) { + macAddress := "02:42:ac:11:00:02" + dockerCmd(c, "network", "create", "mynetwork") + dockerCmd(c, "run", "--name=test", "-d", "--mac-address", macAddress, "busybox", "top") + c.Assert(waitRun("test"), check.IsNil) + mac1 := inspectField(c, "test", "NetworkSettings.Networks.bridge.MacAddress") + c.Assert(strings.TrimSpace(mac1), checker.Equals, macAddress) + dockerCmd(c, "network", "connect", "mynetwork", "test") + mac2 := inspectField(c, "test", "NetworkSettings.Networks.mynetwork.MacAddress") + c.Assert(strings.TrimSpace(mac2), checker.Not(checker.Equals), strings.TrimSpace(mac1)) +} + +func (s *DockerNetworkSuite) TestDockerNetworkInspectCreatedContainer(c *check.C) { + dockerCmd(c, "create", "--name", "test", "busybox") + networks := inspectField(c, "test", "NetworkSettings.Networks") + c.Assert(networks, checker.Contains, "bridge", check.Commentf("Should return 'bridge' network")) +} + +func (s *DockerNetworkSuite) TestDockerNetworkRestartWithMultipleNetworks(c *check.C) { + dockerCmd(c, "network", "create", "test") + dockerCmd(c, "run", "--name=foo", "-d", "busybox", "top") + c.Assert(waitRun("foo"), checker.IsNil) + dockerCmd(c, "network", "connect", "test", "foo") + dockerCmd(c, "restart", "foo") + networks := inspectField(c, "foo", "NetworkSettings.Networks") + c.Assert(networks, checker.Contains, "bridge", check.Commentf("Should contain 'bridge' network")) + c.Assert(networks, checker.Contains, "test", check.Commentf("Should contain 'test' network")) +} + +func (s *DockerNetworkSuite) TestDockerNetworkConnectDisconnectToStoppedContainer(c *check.C) { + dockerCmd(c, "network", "create", "test") + dockerCmd(c, "create", "--name=foo", "busybox", "top") + dockerCmd(c, "network", "connect", "test", "foo") + networks := inspectField(c, "foo", "NetworkSettings.Networks") + c.Assert(networks, checker.Contains, "test", check.Commentf("Should contain 'test' network")) + + // Restart docker daemon to test the config has persisted to disk + s.d.Restart(c) + networks = inspectField(c, "foo", "NetworkSettings.Networks") + c.Assert(networks, checker.Contains, "test", check.Commentf("Should contain 'test' network")) + + // start the container and test if we can ping it from another container in the same network + dockerCmd(c, "start", "foo") + c.Assert(waitRun("foo"), checker.IsNil) + ip := inspectField(c, "foo", "NetworkSettings.Networks.test.IPAddress") + ip = strings.TrimSpace(ip) + dockerCmd(c, "run", "--net=test", "busybox", "sh", "-c", fmt.Sprintf("ping -c 1 %s", ip)) + + dockerCmd(c, "stop", "foo") + + // Test disconnect + dockerCmd(c, "network", "disconnect", "test", "foo") + networks = inspectField(c, "foo", "NetworkSettings.Networks") + c.Assert(networks, checker.Not(checker.Contains), "test", check.Commentf("Should not contain 'test' network")) + + // Restart docker daemon to test the config has persisted to disk + s.d.Restart(c) + networks = inspectField(c, "foo", "NetworkSettings.Networks") + c.Assert(networks, checker.Not(checker.Contains), "test", check.Commentf("Should not contain 'test' network")) + +} + +func (s *DockerNetworkSuite) TestDockerNetworkDisconnectContainerNonexistingNetwork(c *check.C) { + dockerCmd(c, "network", "create", "test") + dockerCmd(c, "run", "--net=test", "-d", "--name=foo", "busybox", "top") + networks := inspectField(c, "foo", "NetworkSettings.Networks") + c.Assert(networks, checker.Contains, "test", check.Commentf("Should contain 'test' network")) + + // Stop container and remove network + dockerCmd(c, "stop", "foo") + dockerCmd(c, "network", "rm", "test") + + // Test disconnecting stopped container from nonexisting network + dockerCmd(c, "network", "disconnect", "-f", "test", "foo") + networks = inspectField(c, "foo", "NetworkSettings.Networks") + c.Assert(networks, checker.Not(checker.Contains), "test", check.Commentf("Should not contain 'test' network")) +} + +func (s *DockerNetworkSuite) TestDockerNetworkConnectPreferredIP(c *check.C) { + // create two networks + dockerCmd(c, "network", "create", "--ipv6", "--subnet=172.28.0.0/16", "--subnet=2001:db8:1234::/64", "n0") + assertNwIsAvailable(c, "n0") + + dockerCmd(c, "network", "create", "--ipv6", "--subnet=172.30.0.0/16", "--ip-range=172.30.5.0/24", "--subnet=2001:db8:abcd::/64", "--ip-range=2001:db8:abcd::/80", "n1") + assertNwIsAvailable(c, "n1") + + // run a container on first network specifying the ip addresses + dockerCmd(c, "run", "-d", "--name", "c0", "--net=n0", "--ip", "172.28.99.88", "--ip6", "2001:db8:1234::9988", "busybox", "top") + c.Assert(waitRun("c0"), check.IsNil) + verifyIPAddressConfig(c, "c0", "n0", "172.28.99.88", "2001:db8:1234::9988") + verifyIPAddresses(c, "c0", "n0", "172.28.99.88", "2001:db8:1234::9988") + + // connect the container to the second network specifying an ip addresses + dockerCmd(c, "network", "connect", "--ip", "172.30.55.44", "--ip6", "2001:db8:abcd::5544", "n1", "c0") + verifyIPAddressConfig(c, "c0", "n1", "172.30.55.44", "2001:db8:abcd::5544") + verifyIPAddresses(c, "c0", "n1", "172.30.55.44", "2001:db8:abcd::5544") + + // Stop and restart the container + dockerCmd(c, "stop", "c0") + dockerCmd(c, "start", "c0") + + // verify requested addresses are applied and configs are still there + verifyIPAddressConfig(c, "c0", "n0", "172.28.99.88", "2001:db8:1234::9988") + verifyIPAddresses(c, "c0", "n0", "172.28.99.88", "2001:db8:1234::9988") + verifyIPAddressConfig(c, "c0", "n1", "172.30.55.44", "2001:db8:abcd::5544") + verifyIPAddresses(c, "c0", "n1", "172.30.55.44", "2001:db8:abcd::5544") + + // Still it should fail to connect to the default network with a specified IP (whatever ip) + out, _, err := dockerCmdWithError("network", "connect", "--ip", "172.21.55.44", "bridge", "c0") + c.Assert(err, checker.NotNil, check.Commentf("out: %s", out)) + c.Assert(out, checker.Contains, runconfig.ErrUnsupportedNetworkAndIP.Error()) + +} + +func (s *DockerNetworkSuite) TestDockerNetworkConnectPreferredIPStoppedContainer(c *check.C) { + // create a container + dockerCmd(c, "create", "--name", "c0", "busybox", "top") + + // create a network + dockerCmd(c, "network", "create", "--ipv6", "--subnet=172.30.0.0/16", "--subnet=2001:db8:abcd::/64", "n0") + assertNwIsAvailable(c, "n0") + + // connect the container to the network specifying an ip addresses + dockerCmd(c, "network", "connect", "--ip", "172.30.55.44", "--ip6", "2001:db8:abcd::5544", "n0", "c0") + verifyIPAddressConfig(c, "c0", "n0", "172.30.55.44", "2001:db8:abcd::5544") + + // start the container, verify config has not changed and ip addresses are assigned + dockerCmd(c, "start", "c0") + c.Assert(waitRun("c0"), check.IsNil) + verifyIPAddressConfig(c, "c0", "n0", "172.30.55.44", "2001:db8:abcd::5544") + verifyIPAddresses(c, "c0", "n0", "172.30.55.44", "2001:db8:abcd::5544") + + // stop the container and check ip config has not changed + dockerCmd(c, "stop", "c0") + verifyIPAddressConfig(c, "c0", "n0", "172.30.55.44", "2001:db8:abcd::5544") +} + +func (s *DockerNetworkSuite) TestDockerNetworkUnsupportedRequiredIP(c *check.C) { + // requested IP is not supported on predefined networks + for _, mode := range []string{"none", "host", "bridge", "default"} { + checkUnsupportedNetworkAndIP(c, mode) + } + + // requested IP is not supported on networks with no user defined subnets + dockerCmd(c, "network", "create", "n0") + assertNwIsAvailable(c, "n0") + + out, _, err := dockerCmdWithError("run", "-d", "--ip", "172.28.99.88", "--net", "n0", "busybox", "top") + c.Assert(err, checker.NotNil, check.Commentf("out: %s", out)) + c.Assert(out, checker.Contains, runconfig.ErrUnsupportedNetworkNoSubnetAndIP.Error()) + + out, _, err = dockerCmdWithError("run", "-d", "--ip6", "2001:db8:1234::9988", "--net", "n0", "busybox", "top") + c.Assert(err, checker.NotNil, check.Commentf("out: %s", out)) + c.Assert(out, checker.Contains, runconfig.ErrUnsupportedNetworkNoSubnetAndIP.Error()) + + dockerCmd(c, "network", "rm", "n0") + assertNwNotAvailable(c, "n0") +} + +func checkUnsupportedNetworkAndIP(c *check.C, nwMode string) { + out, _, err := dockerCmdWithError("run", "-d", "--net", nwMode, "--ip", "172.28.99.88", "--ip6", "2001:db8:1234::9988", "busybox", "top") + c.Assert(err, checker.NotNil, check.Commentf("out: %s", out)) + c.Assert(out, checker.Contains, runconfig.ErrUnsupportedNetworkAndIP.Error()) +} + +func verifyIPAddressConfig(c *check.C, cName, nwname, ipv4, ipv6 string) { + if ipv4 != "" { + out := inspectField(c, cName, fmt.Sprintf("NetworkSettings.Networks.%s.IPAMConfig.IPv4Address", nwname)) + c.Assert(strings.TrimSpace(out), check.Equals, ipv4) + } + + if ipv6 != "" { + out := inspectField(c, cName, fmt.Sprintf("NetworkSettings.Networks.%s.IPAMConfig.IPv6Address", nwname)) + c.Assert(strings.TrimSpace(out), check.Equals, ipv6) + } +} + +func verifyIPAddresses(c *check.C, cName, nwname, ipv4, ipv6 string) { + out := inspectField(c, cName, fmt.Sprintf("NetworkSettings.Networks.%s.IPAddress", nwname)) + c.Assert(strings.TrimSpace(out), check.Equals, ipv4) + + out = inspectField(c, cName, fmt.Sprintf("NetworkSettings.Networks.%s.GlobalIPv6Address", nwname)) + c.Assert(strings.TrimSpace(out), check.Equals, ipv6) +} + +func (s *DockerNetworkSuite) TestDockerNetworkConnectLinkLocalIP(c *check.C) { + // create one test network + dockerCmd(c, "network", "create", "--ipv6", "--subnet=2001:db8:1234::/64", "n0") + assertNwIsAvailable(c, "n0") + + // run a container with incorrect link-local address + _, _, err := dockerCmdWithError("run", "--link-local-ip", "169.253.5.5", "busybox", "top") + c.Assert(err, check.NotNil) + _, _, err = dockerCmdWithError("run", "--link-local-ip", "2001:db8::89", "busybox", "top") + c.Assert(err, check.NotNil) + + // run two containers with link-local ip on the test network + dockerCmd(c, "run", "-d", "--name", "c0", "--net=n0", "--link-local-ip", "169.254.7.7", "--link-local-ip", "fe80::254:77", "busybox", "top") + c.Assert(waitRun("c0"), check.IsNil) + dockerCmd(c, "run", "-d", "--name", "c1", "--net=n0", "--link-local-ip", "169.254.8.8", "--link-local-ip", "fe80::254:88", "busybox", "top") + c.Assert(waitRun("c1"), check.IsNil) + + // run a container on the default network and connect it to the test network specifying a link-local address + dockerCmd(c, "run", "-d", "--name", "c2", "busybox", "top") + c.Assert(waitRun("c2"), check.IsNil) + dockerCmd(c, "network", "connect", "--link-local-ip", "169.254.9.9", "n0", "c2") + + // verify the three containers can ping each other via the link-local addresses + _, _, err = dockerCmdWithError("exec", "c0", "ping", "-c", "1", "169.254.8.8") + c.Assert(err, check.IsNil) + _, _, err = dockerCmdWithError("exec", "c1", "ping", "-c", "1", "169.254.9.9") + c.Assert(err, check.IsNil) + _, _, err = dockerCmdWithError("exec", "c2", "ping", "-c", "1", "169.254.7.7") + c.Assert(err, check.IsNil) + + // Stop and restart the three containers + dockerCmd(c, "stop", "c0") + dockerCmd(c, "stop", "c1") + dockerCmd(c, "stop", "c2") + dockerCmd(c, "start", "c0") + dockerCmd(c, "start", "c1") + dockerCmd(c, "start", "c2") + + // verify the ping again + _, _, err = dockerCmdWithError("exec", "c0", "ping", "-c", "1", "169.254.8.8") + c.Assert(err, check.IsNil) + _, _, err = dockerCmdWithError("exec", "c1", "ping", "-c", "1", "169.254.9.9") + c.Assert(err, check.IsNil) + _, _, err = dockerCmdWithError("exec", "c2", "ping", "-c", "1", "169.254.7.7") + c.Assert(err, check.IsNil) +} + +func (s *DockerSuite) TestUserDefinedNetworkConnectDisconnectLink(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace, NotArm) + dockerCmd(c, "network", "create", "-d", "bridge", "foo1") + dockerCmd(c, "network", "create", "-d", "bridge", "foo2") + + dockerCmd(c, "run", "-d", "--net=foo1", "--name=first", "busybox", "top") + c.Assert(waitRun("first"), check.IsNil) + + // run a container in a user-defined network with a link for an existing container + // and a link for a container that doesn't exist + dockerCmd(c, "run", "-d", "--net=foo1", "--name=second", "--link=first:FirstInFoo1", + "--link=third:bar", "busybox", "top") + c.Assert(waitRun("second"), check.IsNil) + + // ping to first and its alias FirstInFoo1 must succeed + _, _, err := dockerCmdWithError("exec", "second", "ping", "-c", "1", "first") + c.Assert(err, check.IsNil) + _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "FirstInFoo1") + c.Assert(err, check.IsNil) + + // connect first container to foo2 network + dockerCmd(c, "network", "connect", "foo2", "first") + // connect second container to foo2 network with a different alias for first container + dockerCmd(c, "network", "connect", "--link=first:FirstInFoo2", "foo2", "second") + + // ping the new alias in network foo2 + _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "FirstInFoo2") + c.Assert(err, check.IsNil) + + // disconnect first container from foo1 network + dockerCmd(c, "network", "disconnect", "foo1", "first") + + // link in foo1 network must fail + _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "FirstInFoo1") + c.Assert(err, check.NotNil) + + // link in foo2 network must succeed + _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "FirstInFoo2") + c.Assert(err, check.IsNil) +} + +func (s *DockerNetworkSuite) TestDockerNetworkDisconnectDefault(c *check.C) { + netWorkName1 := "test1" + netWorkName2 := "test2" + containerName := "foo" + + dockerCmd(c, "network", "create", netWorkName1) + dockerCmd(c, "network", "create", netWorkName2) + dockerCmd(c, "create", "--name", containerName, "busybox", "top") + dockerCmd(c, "network", "connect", netWorkName1, containerName) + dockerCmd(c, "network", "connect", netWorkName2, containerName) + dockerCmd(c, "network", "disconnect", "bridge", containerName) + + dockerCmd(c, "start", containerName) + c.Assert(waitRun(containerName), checker.IsNil) + networks := inspectField(c, containerName, "NetworkSettings.Networks") + c.Assert(networks, checker.Contains, netWorkName1, check.Commentf(fmt.Sprintf("Should contain '%s' network", netWorkName1))) + c.Assert(networks, checker.Contains, netWorkName2, check.Commentf(fmt.Sprintf("Should contain '%s' network", netWorkName2))) + c.Assert(networks, checker.Not(checker.Contains), "bridge", check.Commentf("Should not contain 'bridge' network")) +} + +func (s *DockerNetworkSuite) TestDockerNetworkConnectWithAliasOnDefaultNetworks(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace, NotArm) + + defaults := []string{"bridge", "host", "none"} + out, _ := dockerCmd(c, "run", "-d", "--net=none", "busybox", "top") + containerID := strings.TrimSpace(out) + for _, net := range defaults { + res, _, err := dockerCmdWithError("network", "connect", "--alias", "alias"+net, net, containerID) + c.Assert(err, checker.NotNil) + c.Assert(res, checker.Contains, runconfig.ErrUnsupportedNetworkAndAlias.Error()) + } +} + +func (s *DockerSuite) TestUserDefinedNetworkConnectDisconnectAlias(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace, NotArm) + dockerCmd(c, "network", "create", "-d", "bridge", "net1") + dockerCmd(c, "network", "create", "-d", "bridge", "net2") + + cid, _ := dockerCmd(c, "run", "-d", "--net=net1", "--name=first", "--net-alias=foo", "busybox", "top") + c.Assert(waitRun("first"), check.IsNil) + + dockerCmd(c, "run", "-d", "--net=net1", "--name=second", "busybox", "top") + c.Assert(waitRun("second"), check.IsNil) + + // ping first container and its alias + _, _, err := dockerCmdWithError("exec", "second", "ping", "-c", "1", "first") + c.Assert(err, check.IsNil) + _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "foo") + c.Assert(err, check.IsNil) + + // ping first container's short-id alias + _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", stringid.TruncateID(cid)) + c.Assert(err, check.IsNil) + + // connect first container to net2 network + dockerCmd(c, "network", "connect", "--alias=bar", "net2", "first") + // connect second container to foo2 network with a different alias for first container + dockerCmd(c, "network", "connect", "net2", "second") + + // ping the new alias in network foo2 + _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "bar") + c.Assert(err, check.IsNil) + + // disconnect first container from net1 network + dockerCmd(c, "network", "disconnect", "net1", "first") + + // ping to net1 scoped alias "foo" must fail + _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "foo") + c.Assert(err, check.NotNil) + + // ping to net2 scoped alias "bar" must still succeed + _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "bar") + c.Assert(err, check.IsNil) + // ping to net2 scoped alias short-id must still succeed + _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", stringid.TruncateID(cid)) + c.Assert(err, check.IsNil) + + // verify the alias option is rejected when running on predefined network + out, _, err := dockerCmdWithError("run", "--rm", "--name=any", "--net-alias=any", "busybox", "top") + c.Assert(err, checker.NotNil, check.Commentf("out: %s", out)) + c.Assert(out, checker.Contains, runconfig.ErrUnsupportedNetworkAndAlias.Error()) + + // verify the alias option is rejected when connecting to predefined network + out, _, err = dockerCmdWithError("network", "connect", "--alias=any", "bridge", "first") + c.Assert(err, checker.NotNil, check.Commentf("out: %s", out)) + c.Assert(out, checker.Contains, runconfig.ErrUnsupportedNetworkAndAlias.Error()) +} + +func (s *DockerSuite) TestUserDefinedNetworkConnectivity(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace) + dockerCmd(c, "network", "create", "-d", "bridge", "br.net1") + + dockerCmd(c, "run", "-d", "--net=br.net1", "--name=c1.net1", "busybox", "top") + c.Assert(waitRun("c1.net1"), check.IsNil) + + dockerCmd(c, "run", "-d", "--net=br.net1", "--name=c2.net1", "busybox", "top") + c.Assert(waitRun("c2.net1"), check.IsNil) + + // ping first container by its unqualified name + _, _, err := dockerCmdWithError("exec", "c2.net1", "ping", "-c", "1", "c1.net1") + c.Assert(err, check.IsNil) + + // ping first container by its qualified name + _, _, err = dockerCmdWithError("exec", "c2.net1", "ping", "-c", "1", "c1.net1.br.net1") + c.Assert(err, check.IsNil) + + // ping with first qualified name masked by an additional domain. should fail + _, _, err = dockerCmdWithError("exec", "c2.net1", "ping", "-c", "1", "c1.net1.br.net1.google.com") + c.Assert(err, check.NotNil) +} + +func (s *DockerSuite) TestEmbeddedDNSInvalidInput(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace) + dockerCmd(c, "network", "create", "-d", "bridge", "nw1") + + // Sending garbage to embedded DNS shouldn't crash the daemon + dockerCmd(c, "run", "-i", "--net=nw1", "--name=c1", "debian:jessie", "bash", "-c", "echo InvalidQuery > /dev/udp/127.0.0.11/53") +} + +func (s *DockerSuite) TestDockerNetworkConnectFailsNoInspectChange(c *check.C) { + dockerCmd(c, "run", "-d", "--name=bb", "busybox", "top") + c.Assert(waitRun("bb"), check.IsNil) + + ns0 := inspectField(c, "bb", "NetworkSettings.Networks.bridge") + + // A failing redundant network connect should not alter current container's endpoint settings + _, _, err := dockerCmdWithError("network", "connect", "bridge", "bb") + c.Assert(err, check.NotNil) + + ns1 := inspectField(c, "bb", "NetworkSettings.Networks.bridge") + c.Assert(ns1, check.Equals, ns0) +} + +func (s *DockerSuite) TestDockerNetworkInternalMode(c *check.C) { + dockerCmd(c, "network", "create", "--driver=bridge", "--internal", "internal") + assertNwIsAvailable(c, "internal") + nr := getNetworkResource(c, "internal") + c.Assert(nr.Internal, checker.True) + + dockerCmd(c, "run", "-d", "--net=internal", "--name=first", "busybox", "top") + c.Assert(waitRun("first"), check.IsNil) + dockerCmd(c, "run", "-d", "--net=internal", "--name=second", "busybox", "top") + c.Assert(waitRun("second"), check.IsNil) + out, _, err := dockerCmdWithError("exec", "first", "ping", "-W", "4", "-c", "1", "www.google.com") + c.Assert(err, check.NotNil) + c.Assert(out, checker.Contains, "ping: bad address") + _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "first") + c.Assert(err, check.IsNil) +} + +// Test for #21401 +func (s *DockerNetworkSuite) TestDockerNetworkCreateDeleteSpecialCharacters(c *check.C) { + dockerCmd(c, "network", "create", "test@#$") + assertNwIsAvailable(c, "test@#$") + dockerCmd(c, "network", "rm", "test@#$") + assertNwNotAvailable(c, "test@#$") + + dockerCmd(c, "network", "create", "kiwl$%^") + assertNwIsAvailable(c, "kiwl$%^") + dockerCmd(c, "network", "rm", "kiwl$%^") + assertNwNotAvailable(c, "kiwl$%^") +} + +func (s *DockerDaemonSuite) TestDaemonRestartRestoreBridgeNetwork(t *check.C) { + testRequires(t, DaemonIsLinux) + s.d.StartWithBusybox(t, "--live-restore") + defer s.d.Stop(t) + oldCon := "old" + + _, err := s.d.Cmd("run", "-d", "--name", oldCon, "-p", "80:80", "busybox", "top") + if err != nil { + t.Fatal(err) + } + oldContainerIP, err := s.d.Cmd("inspect", "-f", "{{ .NetworkSettings.Networks.bridge.IPAddress }}", oldCon) + if err != nil { + t.Fatal(err) + } + // Kill the daemon + if err := s.d.Kill(); err != nil { + t.Fatal(err) + } + + // restart the daemon + s.d.Start(t, "--live-restore") + + // start a new container, the new container's ip should not be the same with + // old running container. + newCon := "new" + _, err = s.d.Cmd("run", "-d", "--name", newCon, "busybox", "top") + if err != nil { + t.Fatal(err) + } + newContainerIP, err := s.d.Cmd("inspect", "-f", "{{ .NetworkSettings.Networks.bridge.IPAddress }}", newCon) + if err != nil { + t.Fatal(err) + } + if strings.Compare(strings.TrimSpace(oldContainerIP), strings.TrimSpace(newContainerIP)) == 0 { + t.Fatalf("new container ip should not equal to old running container ip") + } + + // start a new container, the new container should ping old running container + _, err = s.d.Cmd("run", "-t", "busybox", "ping", "-c", "1", oldContainerIP) + if err != nil { + t.Fatal(err) + } + + // start a new container, trying to publish port 80:80 should fail + out, err := s.d.Cmd("run", "-p", "80:80", "-d", "busybox", "top") + if err == nil || !strings.Contains(out, "Bind for 0.0.0.0:80 failed: port is already allocated") { + t.Fatalf("80 port is allocated to old running container, it should failed on allocating to new container") + } + + // kill old running container and try to allocate again + _, err = s.d.Cmd("kill", oldCon) + if err != nil { + t.Fatal(err) + } + id, err := s.d.Cmd("run", "-p", "80:80", "-d", "busybox", "top") + if err != nil { + t.Fatal(err) + } + + // Cleanup because these containers will not be shut down by daemon + out, err = s.d.Cmd("stop", newCon) + if err != nil { + t.Fatalf("err: %v %v", err, string(out)) + } + _, err = s.d.Cmd("stop", strings.TrimSpace(id)) + if err != nil { + t.Fatal(err) + } +} + +func (s *DockerNetworkSuite) TestDockerNetworkFlagAlias(c *check.C) { + dockerCmd(c, "network", "create", "user") + output, status := dockerCmd(c, "run", "--rm", "--network=user", "--network-alias=foo", "busybox", "true") + c.Assert(status, checker.Equals, 0, check.Commentf("unexpected status code %d (%s)", status, output)) + + output, status, _ = dockerCmdWithError("run", "--rm", "--net=user", "--network=user", "busybox", "true") + c.Assert(status, checker.Equals, 0, check.Commentf("unexpected status code %d (%s)", status, output)) + + output, status, _ = dockerCmdWithError("run", "--rm", "--network=user", "--net-alias=foo", "--network-alias=bar", "busybox", "true") + c.Assert(status, checker.Equals, 0, check.Commentf("unexpected status code %d (%s)", status, output)) +} + +func (s *DockerNetworkSuite) TestDockerNetworkValidateIP(c *check.C) { + _, _, err := dockerCmdWithError("network", "create", "--ipv6", "--subnet=172.28.0.0/16", "--subnet=2001:db8:1234::/64", "mynet") + c.Assert(err, check.IsNil) + assertNwIsAvailable(c, "mynet") + + _, _, err = dockerCmdWithError("run", "-d", "--name", "mynet0", "--net=mynet", "--ip", "172.28.99.88", "--ip6", "2001:db8:1234::9988", "busybox", "top") + c.Assert(err, check.IsNil) + c.Assert(waitRun("mynet0"), check.IsNil) + verifyIPAddressConfig(c, "mynet0", "mynet", "172.28.99.88", "2001:db8:1234::9988") + verifyIPAddresses(c, "mynet0", "mynet", "172.28.99.88", "2001:db8:1234::9988") + + _, _, err = dockerCmdWithError("run", "--net=mynet", "--ip", "mynet_ip", "--ip6", "2001:db8:1234::9999", "busybox", "top") + c.Assert(err.Error(), checker.Contains, "invalid IPv4 address") + _, _, err = dockerCmdWithError("run", "--net=mynet", "--ip", "172.28.99.99", "--ip6", "mynet_ip6", "busybox", "top") + c.Assert(err.Error(), checker.Contains, "invalid IPv6 address") + // This is a case of IPv4 address to `--ip6` + _, _, err = dockerCmdWithError("run", "--net=mynet", "--ip6", "172.28.99.99", "busybox", "top") + c.Assert(err.Error(), checker.Contains, "invalid IPv6 address") + // This is a special case of an IPv4-mapped IPv6 address + _, _, err = dockerCmdWithError("run", "--net=mynet", "--ip6", "::ffff:172.28.99.99", "busybox", "top") + c.Assert(err.Error(), checker.Contains, "invalid IPv6 address") +} + +// Test case for 26220 +func (s *DockerNetworkSuite) TestDockerNetworkDisconnectFromBridge(c *check.C) { + out, _ := dockerCmd(c, "network", "inspect", "--format", "{{.Id}}", "bridge") + + network := strings.TrimSpace(out) + + name := "test" + dockerCmd(c, "create", "--name", name, "busybox", "top") + + _, _, err := dockerCmdWithError("network", "disconnect", network, name) + c.Assert(err, check.IsNil) +} + +// TestConntrackFlowsLeak covers the failure scenario of ticket: https://github.com/docker/docker/issues/8795 +// Validates that conntrack is correctly cleaned once a container is destroyed +func (s *DockerNetworkSuite) TestConntrackFlowsLeak(c *check.C) { + testRequires(c, IsAmd64, DaemonIsLinux, Network) + + // Create a new network + cli.DockerCmd(c, "network", "create", "--subnet=192.168.10.0/24", "--gateway=192.168.10.1", "-o", "com.docker.network.bridge.host_binding_ipv4=192.168.10.1", "testbind") + assertNwIsAvailable(c, "testbind") + + // Launch the server, this will remain listening on an exposed port and reply to any request in a ping/pong fashion + cmd := "while true; do echo hello | nc -w 1 -lu 8080; done" + cli.DockerCmd(c, "run", "-d", "--name", "server", "--net", "testbind", "-p", "8080:8080/udp", "appropriate/nc", "sh", "-c", cmd) + + // Launch a container client, here the objective is to create a flow that is natted in order to expose the bug + cmd = "echo world | nc -q 1 -u 192.168.10.1 8080" + cli.DockerCmd(c, "run", "-d", "--name", "client", "--net=host", "appropriate/nc", "sh", "-c", cmd) + + // Get all the flows using netlink + flows, err := netlink.ConntrackTableList(netlink.ConntrackTable, unix.AF_INET) + c.Assert(err, check.IsNil) + var flowMatch int + for _, flow := range flows { + // count only the flows that we are interested in, skipping others that can be laying around the host + if flow.Forward.Protocol == unix.IPPROTO_UDP && + flow.Forward.DstIP.Equal(net.ParseIP("192.168.10.1")) && + flow.Forward.DstPort == 8080 { + flowMatch++ + } + } + // The client should have created only 1 flow + c.Assert(flowMatch, checker.Equals, 1) + + // Now delete the server, this will trigger the conntrack cleanup + cli.DockerCmd(c, "rm", "-fv", "server") + + // Fetch again all the flows and validate that there is no server flow in the conntrack laying around + flows, err = netlink.ConntrackTableList(netlink.ConntrackTable, unix.AF_INET) + c.Assert(err, check.IsNil) + flowMatch = 0 + for _, flow := range flows { + if flow.Forward.Protocol == unix.IPPROTO_UDP && + flow.Forward.DstIP.Equal(net.ParseIP("192.168.10.1")) && + flow.Forward.DstPort == 8080 { + flowMatch++ + } + } + // All the flows have to be gone + c.Assert(flowMatch, checker.Equals, 0) +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_oom_killed_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_oom_killed_test.go new file mode 100644 index 000000000..54c34d206 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_oom_killed_test.go @@ -0,0 +1,30 @@ +// +build !windows + +package main + +import ( + "github.com/docker/docker/integration-cli/checker" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestInspectOomKilledTrue(c *check.C) { + testRequires(c, DaemonIsLinux, memoryLimitSupport, swapMemorySupport) + + name := "testoomkilled" + _, exitCode, _ := dockerCmdWithError("run", "--name", name, "--memory", "32MB", "busybox", "sh", "-c", "x=a; while true; do x=$x$x$x$x; done") + + c.Assert(exitCode, checker.Equals, 137, check.Commentf("OOM exit should be 137")) + + oomKilled := inspectField(c, name, "State.OOMKilled") + c.Assert(oomKilled, checker.Equals, "true") +} + +func (s *DockerSuite) TestInspectOomKilledFalse(c *check.C) { + testRequires(c, DaemonIsLinux, memoryLimitSupport, swapMemorySupport) + + name := "testoomkilled" + dockerCmd(c, "run", "--name", name, "--memory", "32MB", "busybox", "sh", "-c", "echo hello world") + + oomKilled := inspectField(c, name, "State.OOMKilled") + c.Assert(oomKilled, checker.Equals, "false") +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_pause_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_pause_test.go new file mode 100644 index 000000000..682384fc1 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_pause_test.go @@ -0,0 +1,78 @@ +package main + +import ( + "strings" + "time" + + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/cli" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestPause(c *check.C) { + testRequires(c, IsPausable) + + name := "testeventpause" + runSleepingContainer(c, "-d", "--name", name) + + cli.DockerCmd(c, "pause", name) + pausedContainers := strings.Fields( + cli.DockerCmd(c, "ps", "-f", "status=paused", "-q", "-a").Combined(), + ) + c.Assert(len(pausedContainers), checker.Equals, 1) + + cli.DockerCmd(c, "unpause", name) + + out := cli.DockerCmd(c, "events", "--since=0", "--until", daemonUnixTime(c)).Combined() + events := strings.Split(strings.TrimSpace(out), "\n") + actions := eventActionsByIDAndType(c, events, name, "container") + + c.Assert(actions[len(actions)-2], checker.Equals, "pause") + c.Assert(actions[len(actions)-1], checker.Equals, "unpause") +} + +func (s *DockerSuite) TestPauseMultipleContainers(c *check.C) { + testRequires(c, IsPausable) + + containers := []string{ + "testpausewithmorecontainers1", + "testpausewithmorecontainers2", + } + for _, name := range containers { + runSleepingContainer(c, "-d", "--name", name) + } + cli.DockerCmd(c, append([]string{"pause"}, containers...)...) + pausedContainers := strings.Fields( + cli.DockerCmd(c, "ps", "-f", "status=paused", "-q", "-a").Combined(), + ) + c.Assert(len(pausedContainers), checker.Equals, len(containers)) + + cli.DockerCmd(c, append([]string{"unpause"}, containers...)...) + + out := cli.DockerCmd(c, "events", "--since=0", "--until", daemonUnixTime(c)).Combined() + events := strings.Split(strings.TrimSpace(out), "\n") + + for _, name := range containers { + actions := eventActionsByIDAndType(c, events, name, "container") + + c.Assert(actions[len(actions)-2], checker.Equals, "pause") + c.Assert(actions[len(actions)-1], checker.Equals, "unpause") + } +} + +func (s *DockerSuite) TestPauseFailsOnWindowsServerContainers(c *check.C) { + testRequires(c, DaemonIsWindows, NotPausable) + runSleepingContainer(c, "-d", "--name=test") + out, _, _ := dockerCmdWithError("pause", "test") + c.Assert(out, checker.Contains, "cannot pause Windows Server Containers") +} + +func (s *DockerSuite) TestStopPausedContainer(c *check.C) { + testRequires(c, DaemonIsLinux) + + id := runSleepingContainer(c) + cli.WaitRun(c, id) + cli.DockerCmd(c, "pause", id) + cli.DockerCmd(c, "stop", id) + cli.WaitForInspectResult(c, id, "{{.State.Running}}", "false", 30*time.Second) +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_plugins_logdriver_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_plugins_logdriver_test.go new file mode 100644 index 000000000..d74256656 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_plugins_logdriver_test.go @@ -0,0 +1,49 @@ +package main + +import ( + "encoding/json" + "net/http" + "strings" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/request" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestPluginLogDriver(c *check.C) { + testRequires(c, IsAmd64, DaemonIsLinux) + + pluginName := "cpuguy83/docker-logdriver-test:latest" + + dockerCmd(c, "plugin", "install", pluginName) + dockerCmd(c, "run", "--log-driver", pluginName, "--name=test", "busybox", "echo", "hello") + out, _ := dockerCmd(c, "logs", "test") + c.Assert(strings.TrimSpace(out), checker.Equals, "hello") + + dockerCmd(c, "start", "-a", "test") + out, _ = dockerCmd(c, "logs", "test") + c.Assert(strings.TrimSpace(out), checker.Equals, "hello\nhello") + + dockerCmd(c, "rm", "test") + dockerCmd(c, "plugin", "disable", pluginName) + dockerCmd(c, "plugin", "rm", pluginName) +} + +// Make sure log drivers are listed in info, and v2 plugins are not. +func (s *DockerSuite) TestPluginLogDriverInfoList(c *check.C) { + testRequires(c, IsAmd64, DaemonIsLinux) + pluginName := "cpuguy83/docker-logdriver-test" + + dockerCmd(c, "plugin", "install", pluginName) + status, body, err := request.SockRequest("GET", "/info", nil, daemonHost()) + c.Assert(status, checker.Equals, http.StatusOK) + c.Assert(err, checker.IsNil) + + var info types.Info + err = json.Unmarshal(body, &info) + c.Assert(err, checker.IsNil) + drivers := strings.Join(info.Plugins.Log, " ") + c.Assert(drivers, checker.Contains, "json-file") + c.Assert(drivers, checker.Not(checker.Contains), pluginName) +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_plugins_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_plugins_test.go new file mode 100644 index 000000000..38b4af8f1 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_plugins_test.go @@ -0,0 +1,515 @@ +package main + +import ( + "fmt" + "io/ioutil" + "net/http" + "os" + "path" + "path/filepath" + "strings" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/cli" + "github.com/docker/docker/integration-cli/daemon" + "github.com/docker/docker/integration-cli/fixtures/plugin" + "github.com/docker/docker/integration-cli/request" + icmd "github.com/docker/docker/pkg/testutil/cmd" + "github.com/go-check/check" + "golang.org/x/net/context" +) + +var ( + pluginProcessName = "sample-volume-plugin" + pName = "tiborvass/sample-volume-plugin" + npName = "tiborvass/test-docker-netplugin" + pTag = "latest" + pNameWithTag = pName + ":" + pTag + npNameWithTag = npName + ":" + pTag +) + +func (ps *DockerPluginSuite) TestPluginBasicOps(c *check.C) { + plugin := ps.getPluginRepoWithTag() + _, _, err := dockerCmdWithError("plugin", "install", "--grant-all-permissions", plugin) + c.Assert(err, checker.IsNil) + + out, _, err := dockerCmdWithError("plugin", "ls") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, plugin) + c.Assert(out, checker.Contains, "true") + + id, _, err := dockerCmdWithError("plugin", "inspect", "-f", "{{.Id}}", plugin) + id = strings.TrimSpace(id) + c.Assert(err, checker.IsNil) + + out, _, err = dockerCmdWithError("plugin", "remove", plugin) + c.Assert(err, checker.NotNil) + c.Assert(out, checker.Contains, "is enabled") + + _, _, err = dockerCmdWithError("plugin", "disable", plugin) + c.Assert(err, checker.IsNil) + + out, _, err = dockerCmdWithError("plugin", "remove", plugin) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, plugin) + + _, err = os.Stat(filepath.Join(testEnv.DockerBasePath(), "plugins", id)) + if !os.IsNotExist(err) { + c.Fatal(err) + } +} + +func (ps *DockerPluginSuite) TestPluginForceRemove(c *check.C) { + pNameWithTag := ps.getPluginRepoWithTag() + + out, _, err := dockerCmdWithError("plugin", "install", "--grant-all-permissions", pNameWithTag) + c.Assert(err, checker.IsNil) + + out, _, err = dockerCmdWithError("plugin", "remove", pNameWithTag) + c.Assert(out, checker.Contains, "is enabled") + + out, _, err = dockerCmdWithError("plugin", "remove", "--force", pNameWithTag) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, pNameWithTag) +} + +func (s *DockerSuite) TestPluginActive(c *check.C) { + testRequires(c, DaemonIsLinux, IsAmd64, Network) + + _, _, err := dockerCmdWithError("plugin", "install", "--grant-all-permissions", pNameWithTag) + c.Assert(err, checker.IsNil) + + _, _, err = dockerCmdWithError("volume", "create", "-d", pNameWithTag, "--name", "testvol1") + c.Assert(err, checker.IsNil) + + out, _, err := dockerCmdWithError("plugin", "disable", pNameWithTag) + c.Assert(out, checker.Contains, "in use") + + _, _, err = dockerCmdWithError("volume", "rm", "testvol1") + c.Assert(err, checker.IsNil) + + _, _, err = dockerCmdWithError("plugin", "disable", pNameWithTag) + c.Assert(err, checker.IsNil) + + out, _, err = dockerCmdWithError("plugin", "remove", pNameWithTag) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, pNameWithTag) +} + +func (s *DockerSuite) TestPluginActiveNetwork(c *check.C) { + testRequires(c, DaemonIsLinux, IsAmd64, Network) + out, _, err := dockerCmdWithError("plugin", "install", "--grant-all-permissions", npNameWithTag) + c.Assert(err, checker.IsNil) + + out, _, err = dockerCmdWithError("network", "create", "-d", npNameWithTag, "test") + c.Assert(err, checker.IsNil) + + nID := strings.TrimSpace(out) + + out, _, err = dockerCmdWithError("plugin", "remove", npNameWithTag) + c.Assert(out, checker.Contains, "is in use") + + _, _, err = dockerCmdWithError("network", "rm", nID) + c.Assert(err, checker.IsNil) + + out, _, err = dockerCmdWithError("plugin", "remove", npNameWithTag) + c.Assert(out, checker.Contains, "is enabled") + + _, _, err = dockerCmdWithError("plugin", "disable", npNameWithTag) + c.Assert(err, checker.IsNil) + + out, _, err = dockerCmdWithError("plugin", "remove", npNameWithTag) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, npNameWithTag) +} + +func (ps *DockerPluginSuite) TestPluginInstallDisable(c *check.C) { + pName := ps.getPluginRepoWithTag() + + out, _, err := dockerCmdWithError("plugin", "install", "--grant-all-permissions", "--disable", pName) + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Contains, pName) + + out, _, err = dockerCmdWithError("plugin", "ls") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, "false") + + out, _, err = dockerCmdWithError("plugin", "enable", pName) + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Contains, pName) + + out, _, err = dockerCmdWithError("plugin", "disable", pName) + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Contains, pName) + + out, _, err = dockerCmdWithError("plugin", "remove", pName) + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Contains, pName) +} + +func (s *DockerSuite) TestPluginInstallDisableVolumeLs(c *check.C) { + testRequires(c, DaemonIsLinux, IsAmd64, Network) + out, _, err := dockerCmdWithError("plugin", "install", "--grant-all-permissions", "--disable", pName) + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Contains, pName) + + dockerCmd(c, "volume", "ls") +} + +func (ps *DockerPluginSuite) TestPluginSet(c *check.C) { + // Create a new plugin with extra settings + client, err := request.NewClient() + c.Assert(err, checker.IsNil, check.Commentf("failed to create test client")) + + name := "test" + ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second) + defer cancel() + + initialValue := "0" + err = plugin.Create(ctx, client, name, func(cfg *plugin.Config) { + cfg.Env = []types.PluginEnv{{Name: "DEBUG", Value: &initialValue, Settable: []string{"value"}}} + }) + c.Assert(err, checker.IsNil, check.Commentf("failed to create test plugin")) + + env, _ := dockerCmd(c, "plugin", "inspect", "-f", "{{.Settings.Env}}", name) + c.Assert(strings.TrimSpace(env), checker.Equals, "[DEBUG=0]") + + dockerCmd(c, "plugin", "set", name, "DEBUG=1") + + env, _ = dockerCmd(c, "plugin", "inspect", "-f", "{{.Settings.Env}}", name) + c.Assert(strings.TrimSpace(env), checker.Equals, "[DEBUG=1]") +} + +func (ps *DockerPluginSuite) TestPluginInstallArgs(c *check.C) { + pName := path.Join(ps.registryHost(), "plugin", "testplugininstallwithargs") + ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second) + defer cancel() + + plugin.CreateInRegistry(ctx, pName, nil, func(cfg *plugin.Config) { + cfg.Env = []types.PluginEnv{{Name: "DEBUG", Settable: []string{"value"}}} + }) + + out, _ := dockerCmd(c, "plugin", "install", "--grant-all-permissions", "--disable", pName, "DEBUG=1") + c.Assert(strings.TrimSpace(out), checker.Contains, pName) + + env, _ := dockerCmd(c, "plugin", "inspect", "-f", "{{.Settings.Env}}", pName) + c.Assert(strings.TrimSpace(env), checker.Equals, "[DEBUG=1]") +} + +func (ps *DockerPluginSuite) TestPluginInstallImage(c *check.C) { + testRequires(c, IsAmd64) + + repoName := fmt.Sprintf("%v/dockercli/busybox", privateRegistryURL) + // tag the image to upload it to the private registry + dockerCmd(c, "tag", "busybox", repoName) + // push the image to the registry + dockerCmd(c, "push", repoName) + + out, _, err := dockerCmdWithError("plugin", "install", repoName) + c.Assert(err, checker.NotNil) + c.Assert(out, checker.Contains, `Encountered remote "application/vnd.docker.container.image.v1+json"(image) when fetching`) +} + +func (ps *DockerPluginSuite) TestPluginEnableDisableNegative(c *check.C) { + pName := ps.getPluginRepoWithTag() + + out, _, err := dockerCmdWithError("plugin", "install", "--grant-all-permissions", pName) + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Contains, pName) + + out, _, err = dockerCmdWithError("plugin", "enable", pName) + c.Assert(err, checker.NotNil) + c.Assert(strings.TrimSpace(out), checker.Contains, "already enabled") + + _, _, err = dockerCmdWithError("plugin", "disable", pName) + c.Assert(err, checker.IsNil) + + out, _, err = dockerCmdWithError("plugin", "disable", pName) + c.Assert(err, checker.NotNil) + c.Assert(strings.TrimSpace(out), checker.Contains, "already disabled") + + _, _, err = dockerCmdWithError("plugin", "remove", pName) + c.Assert(err, checker.IsNil) +} + +func (ps *DockerPluginSuite) TestPluginCreate(c *check.C) { + name := "foo/bar-driver" + temp, err := ioutil.TempDir("", "foo") + c.Assert(err, checker.IsNil) + defer os.RemoveAll(temp) + + data := `{"description": "foo plugin"}` + err = ioutil.WriteFile(filepath.Join(temp, "config.json"), []byte(data), 0644) + c.Assert(err, checker.IsNil) + + err = os.MkdirAll(filepath.Join(temp, "rootfs"), 0700) + c.Assert(err, checker.IsNil) + + out, _, err := dockerCmdWithError("plugin", "create", name, temp) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, name) + + out, _, err = dockerCmdWithError("plugin", "ls") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, name) + + out, _, err = dockerCmdWithError("plugin", "create", name, temp) + c.Assert(err, checker.NotNil) + c.Assert(out, checker.Contains, "already exist") + + out, _, err = dockerCmdWithError("plugin", "ls") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, name) + // The output will consists of one HEADER line and one line of foo/bar-driver + c.Assert(len(strings.Split(strings.TrimSpace(out), "\n")), checker.Equals, 2) +} + +func (ps *DockerPluginSuite) TestPluginInspect(c *check.C) { + pNameWithTag := ps.getPluginRepoWithTag() + + _, _, err := dockerCmdWithError("plugin", "install", "--grant-all-permissions", pNameWithTag) + c.Assert(err, checker.IsNil) + + out, _, err := dockerCmdWithError("plugin", "ls") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, pNameWithTag) + c.Assert(out, checker.Contains, "true") + + // Find the ID first + out, _, err = dockerCmdWithError("plugin", "inspect", "-f", "{{.Id}}", pNameWithTag) + c.Assert(err, checker.IsNil) + id := strings.TrimSpace(out) + c.Assert(id, checker.Not(checker.Equals), "") + + // Long form + out, _, err = dockerCmdWithError("plugin", "inspect", "-f", "{{.Id}}", id) + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Equals, id) + + // Short form + out, _, err = dockerCmdWithError("plugin", "inspect", "-f", "{{.Id}}", id[:5]) + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Equals, id) + + // Name with tag form + out, _, err = dockerCmdWithError("plugin", "inspect", "-f", "{{.Id}}", pNameWithTag) + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Equals, id) + + // Name without tag form + out, _, err = dockerCmdWithError("plugin", "inspect", "-f", "{{.Id}}", ps.getPluginRepo()) + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Equals, id) + + _, _, err = dockerCmdWithError("plugin", "disable", pNameWithTag) + c.Assert(err, checker.IsNil) + + out, _, err = dockerCmdWithError("plugin", "remove", pNameWithTag) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, pNameWithTag) + + // After remove nothing should be found + _, _, err = dockerCmdWithError("plugin", "inspect", "-f", "{{.Id}}", id[:5]) + c.Assert(err, checker.NotNil) +} + +// Test case for https://github.com/docker/docker/pull/29186#discussion_r91277345 +func (s *DockerSuite) TestPluginInspectOnWindows(c *check.C) { + // This test should work on Windows only + testRequires(c, DaemonIsWindows) + + out, _, err := dockerCmdWithError("plugin", "inspect", "foobar") + c.Assert(err, checker.NotNil) + c.Assert(out, checker.Contains, "plugins are not supported on this platform") + c.Assert(err.Error(), checker.Contains, "plugins are not supported on this platform") +} + +func (s *DockerTrustSuite) TestPluginTrustedInstall(c *check.C) { + testRequires(c, DaemonIsLinux, IsAmd64, Network) + + trustedName := s.setupTrustedplugin(c, pNameWithTag, "trusted-plugin-install") + + cli.Docker(cli.Args("plugin", "install", "--grant-all-permissions", trustedName), trustedCmd).Assert(c, icmd.Expected{ + Out: trustedName, + }) + + out := cli.DockerCmd(c, "plugin", "ls").Combined() + c.Assert(out, checker.Contains, "true") + + out = cli.DockerCmd(c, "plugin", "disable", trustedName).Combined() + c.Assert(strings.TrimSpace(out), checker.Contains, trustedName) + + out = cli.DockerCmd(c, "plugin", "enable", trustedName).Combined() + c.Assert(strings.TrimSpace(out), checker.Contains, trustedName) + + out = cli.DockerCmd(c, "plugin", "rm", "-f", trustedName).Combined() + c.Assert(strings.TrimSpace(out), checker.Contains, trustedName) + + // Try untrusted pull to ensure we pushed the tag to the registry + cli.Docker(cli.Args("plugin", "install", "--disable-content-trust=true", "--grant-all-permissions", trustedName), trustedCmd).Assert(c, SuccessDownloaded) + + out = cli.DockerCmd(c, "plugin", "ls").Combined() + c.Assert(out, checker.Contains, "true") + +} + +func (s *DockerTrustSuite) TestPluginUntrustedInstall(c *check.C) { + testRequires(c, DaemonIsLinux, IsAmd64, Network) + + pluginName := fmt.Sprintf("%v/dockercliuntrusted/plugintest:latest", privateRegistryURL) + // install locally and push to private registry + cli.DockerCmd(c, "plugin", "install", "--grant-all-permissions", "--alias", pluginName, pNameWithTag) + cli.DockerCmd(c, "plugin", "push", pluginName) + cli.DockerCmd(c, "plugin", "rm", "-f", pluginName) + + // Try trusted install on untrusted plugin + cli.Docker(cli.Args("plugin", "install", "--grant-all-permissions", pluginName), trustedCmd).Assert(c, icmd.Expected{ + ExitCode: 1, + Err: "Error: remote trust data does not exist", + }) +} + +func (ps *DockerPluginSuite) TestPluginIDPrefix(c *check.C) { + name := "test" + client, err := request.NewClient() + c.Assert(err, checker.IsNil, check.Commentf("error creating test client")) + + ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second) + initialValue := "0" + err = plugin.Create(ctx, client, name, func(cfg *plugin.Config) { + cfg.Env = []types.PluginEnv{{Name: "DEBUG", Value: &initialValue, Settable: []string{"value"}}} + }) + cancel() + + c.Assert(err, checker.IsNil, check.Commentf("failed to create test plugin")) + + // Find ID first + id, _, err := dockerCmdWithError("plugin", "inspect", "-f", "{{.Id}}", name) + id = strings.TrimSpace(id) + c.Assert(err, checker.IsNil) + + // List current state + out, _, err := dockerCmdWithError("plugin", "ls") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, name) + c.Assert(out, checker.Contains, "false") + + env, _ := dockerCmd(c, "plugin", "inspect", "-f", "{{.Settings.Env}}", id[:5]) + c.Assert(strings.TrimSpace(env), checker.Equals, "[DEBUG=0]") + + dockerCmd(c, "plugin", "set", id[:5], "DEBUG=1") + + env, _ = dockerCmd(c, "plugin", "inspect", "-f", "{{.Settings.Env}}", id[:5]) + c.Assert(strings.TrimSpace(env), checker.Equals, "[DEBUG=1]") + + // Enable + _, _, err = dockerCmdWithError("plugin", "enable", id[:5]) + c.Assert(err, checker.IsNil) + out, _, err = dockerCmdWithError("plugin", "ls") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, name) + c.Assert(out, checker.Contains, "true") + + // Disable + _, _, err = dockerCmdWithError("plugin", "disable", id[:5]) + c.Assert(err, checker.IsNil) + out, _, err = dockerCmdWithError("plugin", "ls") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, name) + c.Assert(out, checker.Contains, "false") + + // Remove + out, _, err = dockerCmdWithError("plugin", "remove", id[:5]) + c.Assert(err, checker.IsNil) + // List returns none + out, _, err = dockerCmdWithError("plugin", "ls") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Not(checker.Contains), name) +} + +func (ps *DockerPluginSuite) TestPluginListDefaultFormat(c *check.C) { + config, err := ioutil.TempDir("", "config-file-") + c.Assert(err, check.IsNil) + defer os.RemoveAll(config) + + err = ioutil.WriteFile(filepath.Join(config, "config.json"), []byte(`{"pluginsFormat": "raw"}`), 0644) + c.Assert(err, check.IsNil) + + name := "test:latest" + client, err := request.NewClient() + c.Assert(err, checker.IsNil, check.Commentf("error creating test client")) + + ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second) + defer cancel() + err = plugin.Create(ctx, client, name, func(cfg *plugin.Config) { + cfg.Description = "test plugin" + }) + c.Assert(err, checker.IsNil, check.Commentf("failed to create test plugin")) + + out, _ := dockerCmd(c, "plugin", "inspect", "--format", "{{.ID}}", name) + id := strings.TrimSpace(out) + + // We expect the format to be in `raw + --no-trunc` + expectedOutput := fmt.Sprintf(`plugin_id: %s +name: %s +description: test plugin +enabled: false`, id, name) + + out, _ = dockerCmd(c, "--config", config, "plugin", "ls", "--no-trunc") + c.Assert(strings.TrimSpace(out), checker.Contains, expectedOutput) +} + +func (s *DockerSuite) TestPluginUpgrade(c *check.C) { + testRequires(c, DaemonIsLinux, Network, SameHostDaemon, IsAmd64) + plugin := "cpuguy83/docker-volume-driver-plugin-local:latest" + pluginV2 := "cpuguy83/docker-volume-driver-plugin-local:v2" + + dockerCmd(c, "plugin", "install", "--grant-all-permissions", plugin) + dockerCmd(c, "volume", "create", "--driver", plugin, "bananas") + dockerCmd(c, "run", "--rm", "-v", "bananas:/apple", "busybox", "sh", "-c", "touch /apple/core") + + out, _, err := dockerCmdWithError("plugin", "upgrade", "--grant-all-permissions", plugin, pluginV2) + c.Assert(err, checker.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "disabled before upgrading") + + out, _ = dockerCmd(c, "plugin", "inspect", "--format={{.ID}}", plugin) + id := strings.TrimSpace(out) + + // make sure "v2" does not exists + _, err = os.Stat(filepath.Join(testEnv.DockerBasePath(), "plugins", id, "rootfs", "v2")) + c.Assert(os.IsNotExist(err), checker.True, check.Commentf(out)) + + dockerCmd(c, "plugin", "disable", "-f", plugin) + dockerCmd(c, "plugin", "upgrade", "--grant-all-permissions", "--skip-remote-check", plugin, pluginV2) + + // make sure "v2" file exists + _, err = os.Stat(filepath.Join(testEnv.DockerBasePath(), "plugins", id, "rootfs", "v2")) + c.Assert(err, checker.IsNil) + + dockerCmd(c, "plugin", "enable", plugin) + dockerCmd(c, "volume", "inspect", "bananas") + dockerCmd(c, "run", "--rm", "-v", "bananas:/apple", "busybox", "sh", "-c", "ls -lh /apple/core") +} + +func (s *DockerSuite) TestPluginMetricsCollector(c *check.C) { + testRequires(c, DaemonIsLinux, Network, SameHostDaemon, IsAmd64) + d := daemon.New(c, dockerBinary, dockerdBinary, daemon.Config{}) + d.Start(c) + defer d.Stop(c) + + name := "cpuguy83/docker-metrics-plugin-test:latest" + r := cli.Docker(cli.Args("plugin", "install", "--grant-all-permissions", name), cli.Daemon(d)) + c.Assert(r.Error, checker.IsNil, check.Commentf(r.Combined())) + + // plugin lisens on localhost:19393 and proxies the metrics + resp, err := http.Get("http://localhost:19393/metrics") + c.Assert(err, checker.IsNil) + defer resp.Body.Close() + + b, err := ioutil.ReadAll(resp.Body) + c.Assert(err, checker.IsNil) + // check that a known metric is there... don't expect this metric to change over time.. probably safe + c.Assert(string(b), checker.Contains, "container_actions") +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_port_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_port_test.go new file mode 100644 index 000000000..bcb87f5f3 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_port_test.go @@ -0,0 +1,319 @@ +package main + +import ( + "fmt" + "net" + "regexp" + "sort" + "strings" + + "github.com/docker/docker/integration-cli/checker" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestPortList(c *check.C) { + testRequires(c, DaemonIsLinux) + // one port + out, _ := dockerCmd(c, "run", "-d", "-p", "9876:80", "busybox", "top") + firstID := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "port", firstID, "80") + + err := assertPortList(c, out, []string{"0.0.0.0:9876"}) + // Port list is not correct + c.Assert(err, checker.IsNil) + + out, _ = dockerCmd(c, "port", firstID) + + err = assertPortList(c, out, []string{"80/tcp -> 0.0.0.0:9876"}) + // Port list is not correct + c.Assert(err, checker.IsNil) + + dockerCmd(c, "rm", "-f", firstID) + + // three port + out, _ = dockerCmd(c, "run", "-d", + "-p", "9876:80", + "-p", "9877:81", + "-p", "9878:82", + "busybox", "top") + ID := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "port", ID, "80") + + err = assertPortList(c, out, []string{"0.0.0.0:9876"}) + // Port list is not correct + c.Assert(err, checker.IsNil) + + out, _ = dockerCmd(c, "port", ID) + + err = assertPortList(c, out, []string{ + "80/tcp -> 0.0.0.0:9876", + "81/tcp -> 0.0.0.0:9877", + "82/tcp -> 0.0.0.0:9878"}) + // Port list is not correct + c.Assert(err, checker.IsNil) + + dockerCmd(c, "rm", "-f", ID) + + // more and one port mapped to the same container port + out, _ = dockerCmd(c, "run", "-d", + "-p", "9876:80", + "-p", "9999:80", + "-p", "9877:81", + "-p", "9878:82", + "busybox", "top") + ID = strings.TrimSpace(out) + + out, _ = dockerCmd(c, "port", ID, "80") + + err = assertPortList(c, out, []string{"0.0.0.0:9876", "0.0.0.0:9999"}) + // Port list is not correct + c.Assert(err, checker.IsNil) + + out, _ = dockerCmd(c, "port", ID) + + err = assertPortList(c, out, []string{ + "80/tcp -> 0.0.0.0:9876", + "80/tcp -> 0.0.0.0:9999", + "81/tcp -> 0.0.0.0:9877", + "82/tcp -> 0.0.0.0:9878"}) + // Port list is not correct + c.Assert(err, checker.IsNil) + dockerCmd(c, "rm", "-f", ID) + + testRange := func() { + // host port ranges used + IDs := make([]string, 3) + for i := 0; i < 3; i++ { + out, _ = dockerCmd(c, "run", "-d", + "-p", "9090-9092:80", + "busybox", "top") + IDs[i] = strings.TrimSpace(out) + + out, _ = dockerCmd(c, "port", IDs[i]) + + err = assertPortList(c, out, []string{fmt.Sprintf("80/tcp -> 0.0.0.0:%d", 9090+i)}) + // Port list is not correct + c.Assert(err, checker.IsNil) + } + + // test port range exhaustion + out, _, err = dockerCmdWithError("run", "-d", + "-p", "9090-9092:80", + "busybox", "top") + // Exhausted port range did not return an error + c.Assert(err, checker.NotNil, check.Commentf("out: %s", out)) + + for i := 0; i < 3; i++ { + dockerCmd(c, "rm", "-f", IDs[i]) + } + } + testRange() + // Verify we ran re-use port ranges after they are no longer in use. + testRange() + + // test invalid port ranges + for _, invalidRange := range []string{"9090-9089:80", "9090-:80", "-9090:80"} { + out, _, err = dockerCmdWithError("run", "-d", + "-p", invalidRange, + "busybox", "top") + // Port range should have returned an error + c.Assert(err, checker.NotNil, check.Commentf("out: %s", out)) + } + + // test host range:container range spec. + out, _ = dockerCmd(c, "run", "-d", + "-p", "9800-9803:80-83", + "busybox", "top") + ID = strings.TrimSpace(out) + + out, _ = dockerCmd(c, "port", ID) + + err = assertPortList(c, out, []string{ + "80/tcp -> 0.0.0.0:9800", + "81/tcp -> 0.0.0.0:9801", + "82/tcp -> 0.0.0.0:9802", + "83/tcp -> 0.0.0.0:9803"}) + // Port list is not correct + c.Assert(err, checker.IsNil) + dockerCmd(c, "rm", "-f", ID) + + // test mixing protocols in same port range + out, _ = dockerCmd(c, "run", "-d", + "-p", "8000-8080:80", + "-p", "8000-8080:80/udp", + "busybox", "top") + ID = strings.TrimSpace(out) + + out, _ = dockerCmd(c, "port", ID) + + err = assertPortList(c, out, []string{ + "80/tcp -> 0.0.0.0:8000", + "80/udp -> 0.0.0.0:8000"}) + // Port list is not correct + c.Assert(err, checker.IsNil) + dockerCmd(c, "rm", "-f", ID) +} + +func assertPortList(c *check.C, out string, expected []string) error { + lines := strings.Split(strings.Trim(out, "\n "), "\n") + if len(lines) != len(expected) { + return fmt.Errorf("different size lists %s, %d, %d", out, len(lines), len(expected)) + } + sort.Strings(lines) + sort.Strings(expected) + + for i := 0; i < len(expected); i++ { + if lines[i] != expected[i] { + return fmt.Errorf("|" + lines[i] + "!=" + expected[i] + "|") + } + } + + return nil +} + +func stopRemoveContainer(id string, c *check.C) { + dockerCmd(c, "rm", "-f", id) +} + +func (s *DockerSuite) TestUnpublishedPortsInPsOutput(c *check.C) { + testRequires(c, DaemonIsLinux) + // Run busybox with command line expose (equivalent to EXPOSE in image's Dockerfile) for the following ports + port1 := 80 + port2 := 443 + expose1 := fmt.Sprintf("--expose=%d", port1) + expose2 := fmt.Sprintf("--expose=%d", port2) + dockerCmd(c, "run", "-d", expose1, expose2, "busybox", "sleep", "5") + + // Check docker ps o/p for last created container reports the unpublished ports + unpPort1 := fmt.Sprintf("%d/tcp", port1) + unpPort2 := fmt.Sprintf("%d/tcp", port2) + out, _ := dockerCmd(c, "ps", "-n=1") + // Missing unpublished ports in docker ps output + c.Assert(out, checker.Contains, unpPort1) + // Missing unpublished ports in docker ps output + c.Assert(out, checker.Contains, unpPort2) + + // Run the container forcing to publish the exposed ports + dockerCmd(c, "run", "-d", "-P", expose1, expose2, "busybox", "sleep", "5") + + // Check docker ps o/p for last created container reports the exposed ports in the port bindings + expBndRegx1 := regexp.MustCompile(`0.0.0.0:\d\d\d\d\d->` + unpPort1) + expBndRegx2 := regexp.MustCompile(`0.0.0.0:\d\d\d\d\d->` + unpPort2) + out, _ = dockerCmd(c, "ps", "-n=1") + // Cannot find expected port binding port (0.0.0.0:xxxxx->unpPort1) in docker ps output + c.Assert(expBndRegx1.MatchString(out), checker.Equals, true, check.Commentf("out: %s; unpPort1: %s", out, unpPort1)) + // Cannot find expected port binding port (0.0.0.0:xxxxx->unpPort2) in docker ps output + c.Assert(expBndRegx2.MatchString(out), checker.Equals, true, check.Commentf("out: %s; unpPort2: %s", out, unpPort2)) + + // Run the container specifying explicit port bindings for the exposed ports + offset := 10000 + pFlag1 := fmt.Sprintf("%d:%d", offset+port1, port1) + pFlag2 := fmt.Sprintf("%d:%d", offset+port2, port2) + out, _ = dockerCmd(c, "run", "-d", "-p", pFlag1, "-p", pFlag2, expose1, expose2, "busybox", "sleep", "5") + id := strings.TrimSpace(out) + + // Check docker ps o/p for last created container reports the specified port mappings + expBnd1 := fmt.Sprintf("0.0.0.0:%d->%s", offset+port1, unpPort1) + expBnd2 := fmt.Sprintf("0.0.0.0:%d->%s", offset+port2, unpPort2) + out, _ = dockerCmd(c, "ps", "-n=1") + // Cannot find expected port binding (expBnd1) in docker ps output + c.Assert(out, checker.Contains, expBnd1) + // Cannot find expected port binding (expBnd2) in docker ps output + c.Assert(out, checker.Contains, expBnd2) + + // Remove container now otherwise it will interfere with next test + stopRemoveContainer(id, c) + + // Run the container with explicit port bindings and no exposed ports + out, _ = dockerCmd(c, "run", "-d", "-p", pFlag1, "-p", pFlag2, "busybox", "sleep", "5") + id = strings.TrimSpace(out) + + // Check docker ps o/p for last created container reports the specified port mappings + out, _ = dockerCmd(c, "ps", "-n=1") + // Cannot find expected port binding (expBnd1) in docker ps output + c.Assert(out, checker.Contains, expBnd1) + // Cannot find expected port binding (expBnd2) in docker ps output + c.Assert(out, checker.Contains, expBnd2) + // Remove container now otherwise it will interfere with next test + stopRemoveContainer(id, c) + + // Run the container with one unpublished exposed port and one explicit port binding + dockerCmd(c, "run", "-d", expose1, "-p", pFlag2, "busybox", "sleep", "5") + + // Check docker ps o/p for last created container reports the specified unpublished port and port mapping + out, _ = dockerCmd(c, "ps", "-n=1") + // Missing unpublished exposed ports (unpPort1) in docker ps output + c.Assert(out, checker.Contains, unpPort1) + // Missing port binding (expBnd2) in docker ps output + c.Assert(out, checker.Contains, expBnd2) +} + +func (s *DockerSuite) TestPortHostBinding(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace) + out, _ := dockerCmd(c, "run", "-d", "-p", "9876:80", "busybox", + "nc", "-l", "-p", "80") + firstID := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "port", firstID, "80") + + err := assertPortList(c, out, []string{"0.0.0.0:9876"}) + // Port list is not correct + c.Assert(err, checker.IsNil) + + dockerCmd(c, "run", "--net=host", "busybox", + "nc", "localhost", "9876") + + dockerCmd(c, "rm", "-f", firstID) + + out, _, err = dockerCmdWithError("run", "--net=host", "busybox", "nc", "localhost", "9876") + // Port is still bound after the Container is removed + c.Assert(err, checker.NotNil, check.Commentf("out: %s", out)) +} + +func (s *DockerSuite) TestPortExposeHostBinding(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace) + out, _ := dockerCmd(c, "run", "-d", "-P", "--expose", "80", "busybox", + "nc", "-l", "-p", "80") + firstID := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "port", firstID, "80") + + _, exposedPort, err := net.SplitHostPort(out) + c.Assert(err, checker.IsNil, check.Commentf("out: %s", out)) + + dockerCmd(c, "run", "--net=host", "busybox", + "nc", "localhost", strings.TrimSpace(exposedPort)) + + dockerCmd(c, "rm", "-f", firstID) + + out, _, err = dockerCmdWithError("run", "--net=host", "busybox", + "nc", "localhost", strings.TrimSpace(exposedPort)) + // Port is still bound after the Container is removed + c.Assert(err, checker.NotNil, check.Commentf("out: %s", out)) +} + +func (s *DockerSuite) TestPortBindingOnSandbox(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace) + dockerCmd(c, "network", "create", "--internal", "-d", "bridge", "internal-net") + nr := getNetworkResource(c, "internal-net") + c.Assert(nr.Internal, checker.Equals, true) + + dockerCmd(c, "run", "--net", "internal-net", "-d", "--name", "c1", + "-p", "8080:8080", "busybox", "nc", "-l", "-p", "8080") + c.Assert(waitRun("c1"), check.IsNil) + + _, _, err := dockerCmdWithError("run", "--net=host", "busybox", "nc", "localhost", "8080") + c.Assert(err, check.NotNil, + check.Commentf("Port mapping on internal network is expected to fail")) + + // Connect container to another normal bridge network + dockerCmd(c, "network", "create", "-d", "bridge", "foo-net") + dockerCmd(c, "network", "connect", "foo-net", "c1") + + _, _, err = dockerCmdWithError("run", "--net=host", "busybox", "nc", "localhost", "8080") + c.Assert(err, check.IsNil, + check.Commentf("Port mapping on the new network is expected to succeed")) + +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_proxy_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_proxy_test.go new file mode 100644 index 000000000..3344985a0 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_proxy_test.go @@ -0,0 +1,51 @@ +package main + +import ( + "net" + "strings" + + "github.com/docker/docker/integration-cli/checker" + icmd "github.com/docker/docker/pkg/testutil/cmd" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestCLIProxyDisableProxyUnixSock(c *check.C) { + testRequires(c, DaemonIsLinux, SameHostDaemon) + + icmd.RunCmd(icmd.Cmd{ + Command: []string{dockerBinary, "info"}, + Env: appendBaseEnv(false, "HTTP_PROXY=http://127.0.0.1:9999"), + }).Assert(c, icmd.Success) +} + +// Can't use localhost here since go has a special case to not use proxy if connecting to localhost +// See https://golang.org/pkg/net/http/#ProxyFromEnvironment +func (s *DockerDaemonSuite) TestCLIProxyProxyTCPSock(c *check.C) { + testRequires(c, SameHostDaemon) + // get the IP to use to connect since we can't use localhost + addrs, err := net.InterfaceAddrs() + c.Assert(err, checker.IsNil) + var ip string + for _, addr := range addrs { + sAddr := addr.String() + if !strings.Contains(sAddr, "127.0.0.1") { + addrArr := strings.Split(sAddr, "/") + ip = addrArr[0] + break + } + } + + c.Assert(ip, checker.Not(checker.Equals), "") + + s.d.Start(c, "-H", "tcp://"+ip+":2375") + + icmd.RunCmd(icmd.Cmd{ + Command: []string{dockerBinary, "info"}, + Env: []string{"DOCKER_HOST=tcp://" + ip + ":2375", "HTTP_PROXY=127.0.0.1:9999"}, + }).Assert(c, icmd.Expected{Error: "exit status 1", ExitCode: 1}) + // Test with no_proxy + icmd.RunCmd(icmd.Cmd{ + Command: []string{dockerBinary, "info"}, + Env: []string{"DOCKER_HOST=tcp://" + ip + ":2375", "HTTP_PROXY=127.0.0.1:9999", "NO_PROXY=" + ip}, + }).Assert(c, icmd.Success) +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_prune_unix_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_prune_unix_test.go new file mode 100644 index 000000000..bea4f4fbd --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_prune_unix_test.go @@ -0,0 +1,292 @@ +// +build !windows + +package main + +import ( + "io/ioutil" + "os" + "path/filepath" + "strconv" + "strings" + "time" + + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/cli" + "github.com/docker/docker/integration-cli/daemon" + "github.com/go-check/check" +) + +func pruneNetworkAndVerify(c *check.C, d *daemon.Swarm, kept, pruned []string) { + _, err := d.Cmd("network", "prune", "--force") + c.Assert(err, checker.IsNil) + out, err := d.Cmd("network", "ls", "--format", "{{.Name}}") + c.Assert(err, checker.IsNil) + for _, s := range kept { + c.Assert(out, checker.Contains, s) + } + for _, s := range pruned { + c.Assert(out, checker.Not(checker.Contains), s) + } +} + +func (s *DockerSwarmSuite) TestPruneNetwork(c *check.C) { + d := s.AddDaemon(c, true, true) + _, err := d.Cmd("network", "create", "n1") // used by container (testprune) + c.Assert(err, checker.IsNil) + _, err = d.Cmd("network", "create", "n2") + c.Assert(err, checker.IsNil) + _, err = d.Cmd("network", "create", "n3", "--driver", "overlay") // used by service (testprunesvc) + c.Assert(err, checker.IsNil) + _, err = d.Cmd("network", "create", "n4", "--driver", "overlay") + c.Assert(err, checker.IsNil) + + cName := "testprune" + _, err = d.Cmd("run", "-d", "--name", cName, "--net", "n1", "busybox", "top") + c.Assert(err, checker.IsNil) + + serviceName := "testprunesvc" + replicas := 1 + out, err := d.Cmd("service", "create", "--no-resolve-image", + "--name", serviceName, + "--replicas", strconv.Itoa(replicas), + "--network", "n3", + "busybox", "top") + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") + waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, replicas+1) + + // prune and verify + pruneNetworkAndVerify(c, d, []string{"n1", "n3"}, []string{"n2", "n4"}) + + // remove containers, then prune and verify again + _, err = d.Cmd("rm", "-f", cName) + c.Assert(err, checker.IsNil) + _, err = d.Cmd("service", "rm", serviceName) + c.Assert(err, checker.IsNil) + waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, 0) + pruneNetworkAndVerify(c, d, []string{}, []string{"n1", "n3"}) +} + +func (s *DockerDaemonSuite) TestPruneImageDangling(c *check.C) { + s.d.StartWithBusybox(c) + + out, _, err := s.d.BuildImageWithOut("test", + `FROM busybox + LABEL foo=bar`, true, "-q") + c.Assert(err, checker.IsNil) + id := strings.TrimSpace(out) + + out, err = s.d.Cmd("images", "-q", "--no-trunc") + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Contains, id) + + out, err = s.d.Cmd("image", "prune", "--force") + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), id) + + out, err = s.d.Cmd("images", "-q", "--no-trunc") + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Contains, id) + + out, err = s.d.Cmd("image", "prune", "--force", "--all") + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Contains, id) + + out, err = s.d.Cmd("images", "-q", "--no-trunc") + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), id) +} + +func (s *DockerSuite) TestPruneContainerUntil(c *check.C) { + out := cli.DockerCmd(c, "run", "-d", "busybox").Combined() + id1 := strings.TrimSpace(out) + cli.WaitExited(c, id1, 5*time.Second) + + until := daemonUnixTime(c) + + out = cli.DockerCmd(c, "run", "-d", "busybox").Combined() + id2 := strings.TrimSpace(out) + cli.WaitExited(c, id2, 5*time.Second) + + out = cli.DockerCmd(c, "container", "prune", "--force", "--filter", "until="+until).Combined() + c.Assert(strings.TrimSpace(out), checker.Contains, id1) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), id2) + + out = cli.DockerCmd(c, "ps", "-a", "-q", "--no-trunc").Combined() + c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), id1) + c.Assert(strings.TrimSpace(out), checker.Contains, id2) +} + +func (s *DockerSuite) TestPruneContainerLabel(c *check.C) { + out := cli.DockerCmd(c, "run", "-d", "--label", "foo", "busybox").Combined() + id1 := strings.TrimSpace(out) + cli.WaitExited(c, id1, 5*time.Second) + + out = cli.DockerCmd(c, "run", "-d", "--label", "bar", "busybox").Combined() + id2 := strings.TrimSpace(out) + cli.WaitExited(c, id2, 5*time.Second) + + out = cli.DockerCmd(c, "run", "-d", "busybox").Combined() + id3 := strings.TrimSpace(out) + cli.WaitExited(c, id3, 5*time.Second) + + out = cli.DockerCmd(c, "run", "-d", "--label", "foobar", "busybox").Combined() + id4 := strings.TrimSpace(out) + cli.WaitExited(c, id4, 5*time.Second) + + // Add a config file of label=foobar, that will have no impact if cli is label!=foobar + config := `{"pruneFilters": ["label=foobar"]}` + d, err := ioutil.TempDir("", "integration-cli-") + c.Assert(err, checker.IsNil) + defer os.RemoveAll(d) + err = ioutil.WriteFile(filepath.Join(d, "config.json"), []byte(config), 0644) + c.Assert(err, checker.IsNil) + + // With config.json only, prune based on label=foobar + out = cli.DockerCmd(c, "--config", d, "container", "prune", "--force").Combined() + c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), id1) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), id2) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), id3) + c.Assert(strings.TrimSpace(out), checker.Contains, id4) + + out = cli.DockerCmd(c, "container", "prune", "--force", "--filter", "label=foo").Combined() + c.Assert(strings.TrimSpace(out), checker.Contains, id1) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), id2) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), id3) + + out = cli.DockerCmd(c, "ps", "-a", "-q", "--no-trunc").Combined() + c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), id1) + c.Assert(strings.TrimSpace(out), checker.Contains, id2) + c.Assert(strings.TrimSpace(out), checker.Contains, id3) + + out = cli.DockerCmd(c, "container", "prune", "--force", "--filter", "label!=bar").Combined() + c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), id2) + c.Assert(strings.TrimSpace(out), checker.Contains, id3) + + out = cli.DockerCmd(c, "ps", "-a", "-q", "--no-trunc").Combined() + c.Assert(strings.TrimSpace(out), checker.Contains, id2) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), id3) + + // With config.json label=foobar and CLI label!=foobar, CLI label!=foobar supersede + out = cli.DockerCmd(c, "--config", d, "container", "prune", "--force", "--filter", "label!=foobar").Combined() + c.Assert(strings.TrimSpace(out), checker.Contains, id2) + + out = cli.DockerCmd(c, "ps", "-a", "-q", "--no-trunc").Combined() + c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), id2) +} + +func (s *DockerSuite) TestPruneVolumeLabel(c *check.C) { + out, _ := dockerCmd(c, "volume", "create", "--label", "foo") + id1 := strings.TrimSpace(out) + c.Assert(id1, checker.Not(checker.Equals), "") + + out, _ = dockerCmd(c, "volume", "create", "--label", "bar") + id2 := strings.TrimSpace(out) + c.Assert(id2, checker.Not(checker.Equals), "") + + out, _ = dockerCmd(c, "volume", "create") + id3 := strings.TrimSpace(out) + c.Assert(id3, checker.Not(checker.Equals), "") + + out, _ = dockerCmd(c, "volume", "create", "--label", "foobar") + id4 := strings.TrimSpace(out) + c.Assert(id4, checker.Not(checker.Equals), "") + + // Add a config file of label=foobar, that will have no impact if cli is label!=foobar + config := `{"pruneFilters": ["label=foobar"]}` + d, err := ioutil.TempDir("", "integration-cli-") + c.Assert(err, checker.IsNil) + defer os.RemoveAll(d) + err = ioutil.WriteFile(filepath.Join(d, "config.json"), []byte(config), 0644) + c.Assert(err, checker.IsNil) + + // With config.json only, prune based on label=foobar + out, _ = dockerCmd(c, "--config", d, "volume", "prune", "--force") + c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), id1) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), id2) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), id3) + c.Assert(strings.TrimSpace(out), checker.Contains, id4) + + out, _ = dockerCmd(c, "volume", "prune", "--force", "--filter", "label=foo") + c.Assert(strings.TrimSpace(out), checker.Contains, id1) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), id2) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), id3) + + out, _ = dockerCmd(c, "volume", "ls", "--format", "{{.Name}}") + c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), id1) + c.Assert(strings.TrimSpace(out), checker.Contains, id2) + c.Assert(strings.TrimSpace(out), checker.Contains, id3) + + out, _ = dockerCmd(c, "volume", "prune", "--force", "--filter", "label!=bar") + c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), id2) + c.Assert(strings.TrimSpace(out), checker.Contains, id3) + + out, _ = dockerCmd(c, "volume", "ls", "--format", "{{.Name}}") + c.Assert(strings.TrimSpace(out), checker.Contains, id2) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), id3) + + // With config.json label=foobar and CLI label!=foobar, CLI label!=foobar supersede + out, _ = dockerCmd(c, "--config", d, "volume", "prune", "--force", "--filter", "label!=foobar") + c.Assert(strings.TrimSpace(out), checker.Contains, id2) + + out, _ = dockerCmd(c, "volume", "ls", "--format", "{{.Name}}") + c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), id2) +} + +func (s *DockerSuite) TestPruneNetworkLabel(c *check.C) { + dockerCmd(c, "network", "create", "--label", "foo", "n1") + dockerCmd(c, "network", "create", "--label", "bar", "n2") + dockerCmd(c, "network", "create", "n3") + + out, _ := dockerCmd(c, "network", "prune", "--force", "--filter", "label=foo") + c.Assert(strings.TrimSpace(out), checker.Contains, "n1") + c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), "n2") + c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), "n3") + + out, _ = dockerCmd(c, "network", "prune", "--force", "--filter", "label!=bar") + c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), "n1") + c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), "n2") + c.Assert(strings.TrimSpace(out), checker.Contains, "n3") + + out, _ = dockerCmd(c, "network", "prune", "--force") + c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), "n1") + c.Assert(strings.TrimSpace(out), checker.Contains, "n2") + c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), "n3") +} + +func (s *DockerDaemonSuite) TestPruneImageLabel(c *check.C) { + s.d.StartWithBusybox(c) + + out, _, err := s.d.BuildImageWithOut("test1", + `FROM busybox + LABEL foo=bar`, true, "-q") + c.Assert(err, checker.IsNil) + id1 := strings.TrimSpace(out) + out, err = s.d.Cmd("images", "-q", "--no-trunc") + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Contains, id1) + + out, _, err = s.d.BuildImageWithOut("test2", + `FROM busybox + LABEL bar=foo`, true, "-q") + c.Assert(err, checker.IsNil) + id2 := strings.TrimSpace(out) + out, err = s.d.Cmd("images", "-q", "--no-trunc") + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Contains, id2) + + out, err = s.d.Cmd("image", "prune", "--force", "--all", "--filter", "label=foo=bar") + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Contains, id1) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), id2) + + out, err = s.d.Cmd("image", "prune", "--force", "--all", "--filter", "label!=bar=foo") + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), id1) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), id2) + + out, err = s.d.Cmd("image", "prune", "--force", "--all", "--filter", "label=bar=foo") + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), id1) + c.Assert(strings.TrimSpace(out), checker.Contains, id2) +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_ps_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_ps_test.go new file mode 100644 index 000000000..98a20f426 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_ps_test.go @@ -0,0 +1,967 @@ +package main + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "sort" + "strconv" + "strings" + "time" + + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/cli" + "github.com/docker/docker/integration-cli/cli/build" + "github.com/docker/docker/pkg/stringid" + icmd "github.com/docker/docker/pkg/testutil/cmd" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestPsListContainersBase(c *check.C) { + out := runSleepingContainer(c, "-d") + firstID := strings.TrimSpace(out) + + out = runSleepingContainer(c, "-d") + secondID := strings.TrimSpace(out) + + // not long running + out, _ = dockerCmd(c, "run", "-d", "busybox", "true") + thirdID := strings.TrimSpace(out) + + out = runSleepingContainer(c, "-d") + fourthID := strings.TrimSpace(out) + + // make sure the second is running + c.Assert(waitRun(secondID), checker.IsNil) + + // make sure third one is not running + dockerCmd(c, "wait", thirdID) + + // make sure the forth is running + c.Assert(waitRun(fourthID), checker.IsNil) + + // all + out, _ = dockerCmd(c, "ps", "-a") + c.Assert(assertContainerList(out, []string{fourthID, thirdID, secondID, firstID}), checker.Equals, true, check.Commentf("ALL: Container list is not in the correct order: \n%s", out)) + + // running + out, _ = dockerCmd(c, "ps") + c.Assert(assertContainerList(out, []string{fourthID, secondID, firstID}), checker.Equals, true, check.Commentf("RUNNING: Container list is not in the correct order: \n%s", out)) + + // limit + out, _ = dockerCmd(c, "ps", "-n=2", "-a") + expected := []string{fourthID, thirdID} + c.Assert(assertContainerList(out, expected), checker.Equals, true, check.Commentf("LIMIT & ALL: Container list is not in the correct order: \n%s", out)) + + out, _ = dockerCmd(c, "ps", "-n=2") + c.Assert(assertContainerList(out, expected), checker.Equals, true, check.Commentf("LIMIT: Container list is not in the correct order: \n%s", out)) + + // filter since + out, _ = dockerCmd(c, "ps", "-f", "since="+firstID, "-a") + expected = []string{fourthID, thirdID, secondID} + c.Assert(assertContainerList(out, expected), checker.Equals, true, check.Commentf("SINCE filter & ALL: Container list is not in the correct order: \n%s", out)) + + out, _ = dockerCmd(c, "ps", "-f", "since="+firstID) + expected = []string{fourthID, secondID} + c.Assert(assertContainerList(out, expected), checker.Equals, true, check.Commentf("SINCE filter: Container list is not in the correct order: \n%s", out)) + + out, _ = dockerCmd(c, "ps", "-f", "since="+thirdID) + expected = []string{fourthID} + c.Assert(assertContainerList(out, expected), checker.Equals, true, check.Commentf("SINCE filter: Container list is not in the correct order: \n%s", out)) + + // filter before + out, _ = dockerCmd(c, "ps", "-f", "before="+fourthID, "-a") + expected = []string{thirdID, secondID, firstID} + c.Assert(assertContainerList(out, expected), checker.Equals, true, check.Commentf("BEFORE filter & ALL: Container list is not in the correct order: \n%s", out)) + + out, _ = dockerCmd(c, "ps", "-f", "before="+fourthID) + expected = []string{secondID, firstID} + c.Assert(assertContainerList(out, expected), checker.Equals, true, check.Commentf("BEFORE filter: Container list is not in the correct order: \n%s", out)) + + out, _ = dockerCmd(c, "ps", "-f", "before="+thirdID) + expected = []string{secondID, firstID} + c.Assert(assertContainerList(out, expected), checker.Equals, true, check.Commentf("SINCE filter: Container list is not in the correct order: \n%s", out)) + + // filter since & before + out, _ = dockerCmd(c, "ps", "-f", "since="+firstID, "-f", "before="+fourthID, "-a") + expected = []string{thirdID, secondID} + c.Assert(assertContainerList(out, expected), checker.Equals, true, check.Commentf("SINCE filter, BEFORE filter & ALL: Container list is not in the correct order: \n%s", out)) + + out, _ = dockerCmd(c, "ps", "-f", "since="+firstID, "-f", "before="+fourthID) + expected = []string{secondID} + c.Assert(assertContainerList(out, expected), checker.Equals, true, check.Commentf("SINCE filter, BEFORE filter: Container list is not in the correct order: \n%s", out)) + + // filter since & limit + out, _ = dockerCmd(c, "ps", "-f", "since="+firstID, "-n=2", "-a") + expected = []string{fourthID, thirdID} + + c.Assert(assertContainerList(out, expected), checker.Equals, true, check.Commentf("SINCE filter, LIMIT & ALL: Container list is not in the correct order: \n%s", out)) + + out, _ = dockerCmd(c, "ps", "-f", "since="+firstID, "-n=2") + c.Assert(assertContainerList(out, expected), checker.Equals, true, check.Commentf("SINCE filter, LIMIT: Container list is not in the correct order: \n%s", out)) + + // filter before & limit + out, _ = dockerCmd(c, "ps", "-f", "before="+fourthID, "-n=1", "-a") + expected = []string{thirdID} + c.Assert(assertContainerList(out, expected), checker.Equals, true, check.Commentf("BEFORE filter, LIMIT & ALL: Container list is not in the correct order: \n%s", out)) + + out, _ = dockerCmd(c, "ps", "-f", "before="+fourthID, "-n=1") + c.Assert(assertContainerList(out, expected), checker.Equals, true, check.Commentf("BEFORE filter, LIMIT: Container list is not in the correct order: \n%s", out)) + + // filter since & filter before & limit + out, _ = dockerCmd(c, "ps", "-f", "since="+firstID, "-f", "before="+fourthID, "-n=1", "-a") + expected = []string{thirdID} + c.Assert(assertContainerList(out, expected), checker.Equals, true, check.Commentf("SINCE filter, BEFORE filter, LIMIT & ALL: Container list is not in the correct order: \n%s", out)) + + out, _ = dockerCmd(c, "ps", "-f", "since="+firstID, "-f", "before="+fourthID, "-n=1") + c.Assert(assertContainerList(out, expected), checker.Equals, true, check.Commentf("SINCE filter, BEFORE filter, LIMIT: Container list is not in the correct order: \n%s", out)) + +} + +func assertContainerList(out string, expected []string) bool { + lines := strings.Split(strings.Trim(out, "\n "), "\n") + + if len(lines)-1 != len(expected) { + return false + } + + containerIDIndex := strings.Index(lines[0], "CONTAINER ID") + for i := 0; i < len(expected); i++ { + foundID := lines[i+1][containerIDIndex : containerIDIndex+12] + if foundID != expected[i][:12] { + return false + } + } + + return true +} + +// FIXME(vdemeester) Move this into a unit test in daemon package +func (s *DockerSuite) TestPsListContainersInvalidFilterName(c *check.C) { + out, _, err := dockerCmdWithError("ps", "-f", "invalidFilter=test") + c.Assert(err, checker.NotNil) + c.Assert(out, checker.Contains, "Invalid filter") +} + +func (s *DockerSuite) TestPsListContainersSize(c *check.C) { + // Problematic on Windows as it doesn't report the size correctly @swernli + testRequires(c, DaemonIsLinux) + dockerCmd(c, "run", "-d", "busybox") + + baseOut, _ := dockerCmd(c, "ps", "-s", "-n=1") + baseLines := strings.Split(strings.Trim(baseOut, "\n "), "\n") + baseSizeIndex := strings.Index(baseLines[0], "SIZE") + baseFoundsize := baseLines[1][baseSizeIndex:] + baseBytes, err := strconv.Atoi(strings.Split(baseFoundsize, "B")[0]) + c.Assert(err, checker.IsNil) + + name := "test_size" + dockerCmd(c, "run", "--name", name, "busybox", "sh", "-c", "echo 1 > test") + id := getIDByName(c, name) + + var result *icmd.Result + + wait := make(chan struct{}) + go func() { + result = icmd.RunCommand(dockerBinary, "ps", "-s", "-n=1") + close(wait) + }() + select { + case <-wait: + case <-time.After(3 * time.Second): + c.Fatalf("Calling \"docker ps -s\" timed out!") + } + result.Assert(c, icmd.Success) + lines := strings.Split(strings.Trim(result.Combined(), "\n "), "\n") + c.Assert(lines, checker.HasLen, 2, check.Commentf("Expected 2 lines for 'ps -s -n=1' output, got %d", len(lines))) + sizeIndex := strings.Index(lines[0], "SIZE") + idIndex := strings.Index(lines[0], "CONTAINER ID") + foundID := lines[1][idIndex : idIndex+12] + c.Assert(foundID, checker.Equals, id[:12], check.Commentf("Expected id %s, got %s", id[:12], foundID)) + expectedSize := fmt.Sprintf("%dB", (2 + baseBytes)) + foundSize := lines[1][sizeIndex:] + c.Assert(foundSize, checker.Contains, expectedSize, check.Commentf("Expected size %q, got %q", expectedSize, foundSize)) +} + +func (s *DockerSuite) TestPsListContainersFilterStatus(c *check.C) { + // start exited container + out := cli.DockerCmd(c, "run", "-d", "busybox").Combined() + firstID := strings.TrimSpace(out) + + // make sure the exited container is not running + cli.DockerCmd(c, "wait", firstID) + + // start running container + out = cli.DockerCmd(c, "run", "-itd", "busybox").Combined() + secondID := strings.TrimSpace(out) + + // filter containers by exited + out = cli.DockerCmd(c, "ps", "--no-trunc", "-q", "--filter=status=exited").Combined() + containerOut := strings.TrimSpace(out) + c.Assert(containerOut, checker.Equals, firstID) + + out = cli.DockerCmd(c, "ps", "-a", "--no-trunc", "-q", "--filter=status=running").Combined() + containerOut = strings.TrimSpace(out) + c.Assert(containerOut, checker.Equals, secondID) + + result := cli.Docker(cli.Args("ps", "-a", "-q", "--filter=status=rubbish"), cli.WithTimeout(time.Second*60)) + c.Assert(result, icmd.Matches, icmd.Expected{ + ExitCode: 1, + Err: "Unrecognised filter value for status", + }) + + // Windows doesn't support pausing of containers + if testEnv.DaemonPlatform() != "windows" { + // pause running container + out = cli.DockerCmd(c, "run", "-itd", "busybox").Combined() + pausedID := strings.TrimSpace(out) + cli.DockerCmd(c, "pause", pausedID) + // make sure the container is unpaused to let the daemon stop it properly + defer func() { cli.DockerCmd(c, "unpause", pausedID) }() + + out = cli.DockerCmd(c, "ps", "--no-trunc", "-q", "--filter=status=paused").Combined() + containerOut = strings.TrimSpace(out) + c.Assert(containerOut, checker.Equals, pausedID) + } +} + +func (s *DockerSuite) TestPsListContainersFilterHealth(c *check.C) { + // Test legacy no health check + out := runSleepingContainer(c, "--name=none_legacy") + containerID := strings.TrimSpace(out) + + cli.WaitRun(c, containerID) + + out = cli.DockerCmd(c, "ps", "-q", "-l", "--no-trunc", "--filter=health=none").Combined() + containerOut := strings.TrimSpace(out) + c.Assert(containerOut, checker.Equals, containerID, check.Commentf("Expected id %s, got %s for legacy none filter, output: %q", containerID, containerOut, out)) + + // Test no health check specified explicitly + out = runSleepingContainer(c, "--name=none", "--no-healthcheck") + containerID = strings.TrimSpace(out) + + cli.WaitRun(c, containerID) + + out = cli.DockerCmd(c, "ps", "-q", "-l", "--no-trunc", "--filter=health=none").Combined() + containerOut = strings.TrimSpace(out) + c.Assert(containerOut, checker.Equals, containerID, check.Commentf("Expected id %s, got %s for none filter, output: %q", containerID, containerOut, out)) + + // Test failing health check + out = runSleepingContainer(c, "--name=failing_container", "--health-cmd=exit 1", "--health-interval=1s") + containerID = strings.TrimSpace(out) + + waitForHealthStatus(c, "failing_container", "starting", "unhealthy") + + out = cli.DockerCmd(c, "ps", "-q", "--no-trunc", "--filter=health=unhealthy").Combined() + containerOut = strings.TrimSpace(out) + c.Assert(containerOut, checker.Equals, containerID, check.Commentf("Expected containerID %s, got %s for unhealthy filter, output: %q", containerID, containerOut, out)) + + // Check passing healthcheck + out = runSleepingContainer(c, "--name=passing_container", "--health-cmd=exit 0", "--health-interval=1s") + containerID = strings.TrimSpace(out) + + waitForHealthStatus(c, "passing_container", "starting", "healthy") + + out = cli.DockerCmd(c, "ps", "-q", "--no-trunc", "--filter=health=healthy").Combined() + containerOut = strings.TrimSpace(out) + c.Assert(containerOut, checker.Equals, containerID, check.Commentf("Expected containerID %s, got %s for healthy filter, output: %q", containerID, containerOut, out)) +} + +func (s *DockerSuite) TestPsListContainersFilterID(c *check.C) { + // start container + out, _ := dockerCmd(c, "run", "-d", "busybox") + firstID := strings.TrimSpace(out) + + // start another container + runSleepingContainer(c) + + // filter containers by id + out, _ = dockerCmd(c, "ps", "-a", "-q", "--filter=id="+firstID) + containerOut := strings.TrimSpace(out) + c.Assert(containerOut, checker.Equals, firstID[:12], check.Commentf("Expected id %s, got %s for exited filter, output: %q", firstID[:12], containerOut, out)) +} + +func (s *DockerSuite) TestPsListContainersFilterName(c *check.C) { + // start container + dockerCmd(c, "run", "--name=a_name_to_match", "busybox") + id := getIDByName(c, "a_name_to_match") + + // start another container + runSleepingContainer(c, "--name=b_name_to_match") + + // filter containers by name + out, _ := dockerCmd(c, "ps", "-a", "-q", "--filter=name=a_name_to_match") + containerOut := strings.TrimSpace(out) + c.Assert(containerOut, checker.Equals, id[:12], check.Commentf("Expected id %s, got %s for exited filter, output: %q", id[:12], containerOut, out)) +} + +// Test for the ancestor filter for ps. +// There is also the same test but with image:tag@digest in docker_cli_by_digest_test.go +// +// What the test setups : +// - Create 2 image based on busybox using the same repository but different tags +// - Create an image based on the previous image (images_ps_filter_test2) +// - Run containers for each of those image (busybox, images_ps_filter_test1, images_ps_filter_test2) +// - Filter them out :P +func (s *DockerSuite) TestPsListContainersFilterAncestorImage(c *check.C) { + // Build images + imageName1 := "images_ps_filter_test1" + buildImageSuccessfully(c, imageName1, build.WithDockerfile(`FROM busybox + LABEL match me 1`)) + imageID1 := getIDByName(c, imageName1) + + imageName1Tagged := "images_ps_filter_test1:tag" + buildImageSuccessfully(c, imageName1Tagged, build.WithDockerfile(`FROM busybox + LABEL match me 1 tagged`)) + imageID1Tagged := getIDByName(c, imageName1Tagged) + + imageName2 := "images_ps_filter_test2" + buildImageSuccessfully(c, imageName2, build.WithDockerfile(fmt.Sprintf(`FROM %s + LABEL match me 2`, imageName1))) + imageID2 := getIDByName(c, imageName2) + + // start containers + dockerCmd(c, "run", "--name=first", "busybox", "echo", "hello") + firstID := getIDByName(c, "first") + + // start another container + dockerCmd(c, "run", "--name=second", "busybox", "echo", "hello") + secondID := getIDByName(c, "second") + + // start third container + dockerCmd(c, "run", "--name=third", imageName1, "echo", "hello") + thirdID := getIDByName(c, "third") + + // start fourth container + dockerCmd(c, "run", "--name=fourth", imageName1Tagged, "echo", "hello") + fourthID := getIDByName(c, "fourth") + + // start fifth container + dockerCmd(c, "run", "--name=fifth", imageName2, "echo", "hello") + fifthID := getIDByName(c, "fifth") + + var filterTestSuite = []struct { + filterName string + expectedIDs []string + }{ + // non existent stuff + {"nonexistent", []string{}}, + {"nonexistent:tag", []string{}}, + // image + {"busybox", []string{firstID, secondID, thirdID, fourthID, fifthID}}, + {imageName1, []string{thirdID, fifthID}}, + {imageName2, []string{fifthID}}, + // image:tag + {fmt.Sprintf("%s:latest", imageName1), []string{thirdID, fifthID}}, + {imageName1Tagged, []string{fourthID}}, + // short-id + {stringid.TruncateID(imageID1), []string{thirdID, fifthID}}, + {stringid.TruncateID(imageID2), []string{fifthID}}, + // full-id + {imageID1, []string{thirdID, fifthID}}, + {imageID1Tagged, []string{fourthID}}, + {imageID2, []string{fifthID}}, + } + + var out string + for _, filter := range filterTestSuite { + out, _ = dockerCmd(c, "ps", "-a", "-q", "--no-trunc", "--filter=ancestor="+filter.filterName) + checkPsAncestorFilterOutput(c, out, filter.filterName, filter.expectedIDs) + } + + // Multiple ancestor filter + out, _ = dockerCmd(c, "ps", "-a", "-q", "--no-trunc", "--filter=ancestor="+imageName2, "--filter=ancestor="+imageName1Tagged) + checkPsAncestorFilterOutput(c, out, imageName2+","+imageName1Tagged, []string{fourthID, fifthID}) +} + +func checkPsAncestorFilterOutput(c *check.C, out string, filterName string, expectedIDs []string) { + actualIDs := []string{} + if out != "" { + actualIDs = strings.Split(out[:len(out)-1], "\n") + } + sort.Strings(actualIDs) + sort.Strings(expectedIDs) + + c.Assert(actualIDs, checker.HasLen, len(expectedIDs), check.Commentf("Expected filtered container(s) for %s ancestor filter to be %v:%v, got %v:%v", filterName, len(expectedIDs), expectedIDs, len(actualIDs), actualIDs)) + if len(expectedIDs) > 0 { + same := true + for i := range expectedIDs { + if actualIDs[i] != expectedIDs[i] { + c.Logf("%s, %s", actualIDs[i], expectedIDs[i]) + same = false + break + } + } + c.Assert(same, checker.Equals, true, check.Commentf("Expected filtered container(s) for %s ancestor filter to be %v, got %v", filterName, expectedIDs, actualIDs)) + } +} + +func (s *DockerSuite) TestPsListContainersFilterLabel(c *check.C) { + // start container + dockerCmd(c, "run", "--name=first", "-l", "match=me", "-l", "second=tag", "busybox") + firstID := getIDByName(c, "first") + + // start another container + dockerCmd(c, "run", "--name=second", "-l", "match=me too", "busybox") + secondID := getIDByName(c, "second") + + // start third container + dockerCmd(c, "run", "--name=third", "-l", "nomatch=me", "busybox") + thirdID := getIDByName(c, "third") + + // filter containers by exact match + out, _ := dockerCmd(c, "ps", "-a", "-q", "--no-trunc", "--filter=label=match=me") + containerOut := strings.TrimSpace(out) + c.Assert(containerOut, checker.Equals, firstID, check.Commentf("Expected id %s, got %s for exited filter, output: %q", firstID, containerOut, out)) + + // filter containers by two labels + out, _ = dockerCmd(c, "ps", "-a", "-q", "--no-trunc", "--filter=label=match=me", "--filter=label=second=tag") + containerOut = strings.TrimSpace(out) + c.Assert(containerOut, checker.Equals, firstID, check.Commentf("Expected id %s, got %s for exited filter, output: %q", firstID, containerOut, out)) + + // filter containers by two labels, but expect not found because of AND behavior + out, _ = dockerCmd(c, "ps", "-a", "-q", "--no-trunc", "--filter=label=match=me", "--filter=label=second=tag-no") + containerOut = strings.TrimSpace(out) + c.Assert(containerOut, checker.Equals, "", check.Commentf("Expected nothing, got %s for exited filter, output: %q", containerOut, out)) + + // filter containers by exact key + out, _ = dockerCmd(c, "ps", "-a", "-q", "--no-trunc", "--filter=label=match") + containerOut = strings.TrimSpace(out) + c.Assert(containerOut, checker.Contains, firstID) + c.Assert(containerOut, checker.Contains, secondID) + c.Assert(containerOut, checker.Not(checker.Contains), thirdID) +} + +func (s *DockerSuite) TestPsListContainersFilterExited(c *check.C) { + runSleepingContainer(c, "--name=sleep") + + dockerCmd(c, "run", "--name", "zero1", "busybox", "true") + firstZero := getIDByName(c, "zero1") + + dockerCmd(c, "run", "--name", "zero2", "busybox", "true") + secondZero := getIDByName(c, "zero2") + + out, _, err := dockerCmdWithError("run", "--name", "nonzero1", "busybox", "false") + c.Assert(err, checker.NotNil, check.Commentf("Should fail.", out, err)) + + firstNonZero := getIDByName(c, "nonzero1") + + out, _, err = dockerCmdWithError("run", "--name", "nonzero2", "busybox", "false") + c.Assert(err, checker.NotNil, check.Commentf("Should fail.", out, err)) + secondNonZero := getIDByName(c, "nonzero2") + + // filter containers by exited=0 + out, _ = dockerCmd(c, "ps", "-a", "-q", "--no-trunc", "--filter=exited=0") + ids := strings.Split(strings.TrimSpace(out), "\n") + c.Assert(ids, checker.HasLen, 2, check.Commentf("Should be 2 zero exited containers got %d: %s", len(ids), out)) + c.Assert(ids[0], checker.Equals, secondZero, check.Commentf("First in list should be %q, got %q", secondZero, ids[0])) + c.Assert(ids[1], checker.Equals, firstZero, check.Commentf("Second in list should be %q, got %q", firstZero, ids[1])) + + out, _ = dockerCmd(c, "ps", "-a", "-q", "--no-trunc", "--filter=exited=1") + ids = strings.Split(strings.TrimSpace(out), "\n") + c.Assert(ids, checker.HasLen, 2, check.Commentf("Should be 2 zero exited containers got %d", len(ids))) + c.Assert(ids[0], checker.Equals, secondNonZero, check.Commentf("First in list should be %q, got %q", secondNonZero, ids[0])) + c.Assert(ids[1], checker.Equals, firstNonZero, check.Commentf("Second in list should be %q, got %q", firstNonZero, ids[1])) + +} + +func (s *DockerSuite) TestPsRightTagName(c *check.C) { + // TODO Investigate further why this fails on Windows to Windows CI + testRequires(c, DaemonIsLinux) + tag := "asybox:shmatest" + dockerCmd(c, "tag", "busybox", tag) + + var id1 string + out := runSleepingContainer(c) + id1 = strings.TrimSpace(string(out)) + + var id2 string + out = runSleepingContainerInImage(c, tag) + id2 = strings.TrimSpace(string(out)) + + var imageID string + out = inspectField(c, "busybox", "Id") + imageID = strings.TrimSpace(string(out)) + + var id3 string + out = runSleepingContainerInImage(c, imageID) + id3 = strings.TrimSpace(string(out)) + + out, _ = dockerCmd(c, "ps", "--no-trunc") + lines := strings.Split(strings.TrimSpace(string(out)), "\n") + // skip header + lines = lines[1:] + c.Assert(lines, checker.HasLen, 3, check.Commentf("There should be 3 running container, got %d", len(lines))) + for _, line := range lines { + f := strings.Fields(line) + switch f[0] { + case id1: + c.Assert(f[1], checker.Equals, "busybox", check.Commentf("Expected %s tag for id %s, got %s", "busybox", id1, f[1])) + case id2: + c.Assert(f[1], checker.Equals, tag, check.Commentf("Expected %s tag for id %s, got %s", tag, id2, f[1])) + case id3: + c.Assert(f[1], checker.Equals, imageID, check.Commentf("Expected %s imageID for id %s, got %s", tag, id3, f[1])) + default: + c.Fatalf("Unexpected id %s, expected %s and %s and %s", f[0], id1, id2, id3) + } + } +} + +func (s *DockerSuite) TestPsLinkedWithNoTrunc(c *check.C) { + // Problematic on Windows as it doesn't support links as of Jan 2016 + testRequires(c, DaemonIsLinux) + runSleepingContainer(c, "--name=first") + runSleepingContainer(c, "--name=second", "--link=first:first") + + out, _ := dockerCmd(c, "ps", "--no-trunc") + lines := strings.Split(strings.TrimSpace(string(out)), "\n") + // strip header + lines = lines[1:] + expected := []string{"second", "first,second/first"} + var names []string + for _, l := range lines { + fields := strings.Fields(l) + names = append(names, fields[len(fields)-1]) + } + c.Assert(expected, checker.DeepEquals, names, check.Commentf("Expected array: %v, got: %v", expected, names)) +} + +func (s *DockerSuite) TestPsGroupPortRange(c *check.C) { + // Problematic on Windows as it doesn't support port ranges as of Jan 2016 + testRequires(c, DaemonIsLinux) + portRange := "3850-3900" + dockerCmd(c, "run", "-d", "--name", "porttest", "-p", portRange+":"+portRange, "busybox", "top") + + out, _ := dockerCmd(c, "ps") + + c.Assert(string(out), checker.Contains, portRange, check.Commentf("docker ps output should have had the port range %q: %s", portRange, string(out))) + +} + +func (s *DockerSuite) TestPsWithSize(c *check.C) { + // Problematic on Windows as it doesn't report the size correctly @swernli + testRequires(c, DaemonIsLinux) + dockerCmd(c, "run", "-d", "--name", "sizetest", "busybox", "top") + + out, _ := dockerCmd(c, "ps", "--size") + c.Assert(out, checker.Contains, "virtual", check.Commentf("docker ps with --size should show virtual size of container")) +} + +func (s *DockerSuite) TestPsListContainersFilterCreated(c *check.C) { + // create a container + out, _ := dockerCmd(c, "create", "busybox") + cID := strings.TrimSpace(out) + shortCID := cID[:12] + + // Make sure it DOESN'T show up w/o a '-a' for normal 'ps' + out, _ = dockerCmd(c, "ps", "-q") + c.Assert(out, checker.Not(checker.Contains), shortCID, check.Commentf("Should have not seen '%s' in ps output:\n%s", shortCID, out)) + + // Make sure it DOES show up as 'Created' for 'ps -a' + out, _ = dockerCmd(c, "ps", "-a") + + hits := 0 + for _, line := range strings.Split(out, "\n") { + if !strings.Contains(line, shortCID) { + continue + } + hits++ + c.Assert(line, checker.Contains, "Created", check.Commentf("Missing 'Created' on '%s'", line)) + } + + c.Assert(hits, checker.Equals, 1, check.Commentf("Should have seen '%s' in ps -a output once:%d\n%s", shortCID, hits, out)) + + // filter containers by 'create' - note, no -a needed + out, _ = dockerCmd(c, "ps", "-q", "-f", "status=created") + containerOut := strings.TrimSpace(out) + c.Assert(cID, checker.HasPrefix, containerOut) +} + +func (s *DockerSuite) TestPsFormatMultiNames(c *check.C) { + // Problematic on Windows as it doesn't support link as of Jan 2016 + testRequires(c, DaemonIsLinux) + //create 2 containers and link them + dockerCmd(c, "run", "--name=child", "-d", "busybox", "top") + dockerCmd(c, "run", "--name=parent", "--link=child:linkedone", "-d", "busybox", "top") + + //use the new format capabilities to only list the names and --no-trunc to get all names + out, _ := dockerCmd(c, "ps", "--format", "{{.Names}}", "--no-trunc") + lines := strings.Split(strings.TrimSpace(string(out)), "\n") + expected := []string{"parent", "child,parent/linkedone"} + var names []string + names = append(names, lines...) + c.Assert(expected, checker.DeepEquals, names, check.Commentf("Expected array with non-truncated names: %v, got: %v", expected, names)) + + //now list without turning off truncation and make sure we only get the non-link names + out, _ = dockerCmd(c, "ps", "--format", "{{.Names}}") + lines = strings.Split(strings.TrimSpace(string(out)), "\n") + expected = []string{"parent", "child"} + var truncNames []string + truncNames = append(truncNames, lines...) + c.Assert(expected, checker.DeepEquals, truncNames, check.Commentf("Expected array with truncated names: %v, got: %v", expected, truncNames)) +} + +// Test for GitHub issue #21772 +func (s *DockerSuite) TestPsNamesMultipleTime(c *check.C) { + runSleepingContainer(c, "--name=test1") + runSleepingContainer(c, "--name=test2") + + //use the new format capabilities to list the names twice + out, _ := dockerCmd(c, "ps", "--format", "{{.Names}} {{.Names}}") + lines := strings.Split(strings.TrimSpace(string(out)), "\n") + expected := []string{"test2 test2", "test1 test1"} + var names []string + names = append(names, lines...) + c.Assert(expected, checker.DeepEquals, names, check.Commentf("Expected array with names displayed twice: %v, got: %v", expected, names)) +} + +func (s *DockerSuite) TestPsFormatHeaders(c *check.C) { + // make sure no-container "docker ps" still prints the header row + out, _ := dockerCmd(c, "ps", "--format", "table {{.ID}}") + c.Assert(out, checker.Equals, "CONTAINER ID\n", check.Commentf(`Expected 'CONTAINER ID\n', got %v`, out)) + + // verify that "docker ps" with a container still prints the header row also + runSleepingContainer(c, "--name=test") + out, _ = dockerCmd(c, "ps", "--format", "table {{.Names}}") + c.Assert(out, checker.Equals, "NAMES\ntest\n", check.Commentf(`Expected 'NAMES\ntest\n', got %v`, out)) +} + +func (s *DockerSuite) TestPsDefaultFormatAndQuiet(c *check.C) { + config := `{ + "psFormat": "default {{ .ID }}" +}` + d, err := ioutil.TempDir("", "integration-cli-") + c.Assert(err, checker.IsNil) + defer os.RemoveAll(d) + + err = ioutil.WriteFile(filepath.Join(d, "config.json"), []byte(config), 0644) + c.Assert(err, checker.IsNil) + + out := runSleepingContainer(c, "--name=test") + id := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "--config", d, "ps", "-q") + c.Assert(id, checker.HasPrefix, strings.TrimSpace(out), check.Commentf("Expected to print only the container id, got %v\n", out)) +} + +// Test for GitHub issue #12595 +func (s *DockerSuite) TestPsImageIDAfterUpdate(c *check.C) { + // TODO: Investigate why this fails on Windows to Windows CI further. + testRequires(c, DaemonIsLinux) + originalImageName := "busybox:TestPsImageIDAfterUpdate-original" + updatedImageName := "busybox:TestPsImageIDAfterUpdate-updated" + + icmd.RunCommand(dockerBinary, "tag", "busybox:latest", originalImageName).Assert(c, icmd.Success) + + originalImageID := getIDByName(c, originalImageName) + + result := icmd.RunCommand(dockerBinary, append([]string{"run", "-d", originalImageName}, sleepCommandForDaemonPlatform()...)...) + result.Assert(c, icmd.Success) + containerID := strings.TrimSpace(result.Combined()) + + result = icmd.RunCommand(dockerBinary, "ps", "--no-trunc") + result.Assert(c, icmd.Success) + + lines := strings.Split(strings.TrimSpace(string(result.Combined())), "\n") + // skip header + lines = lines[1:] + c.Assert(len(lines), checker.Equals, 1) + + for _, line := range lines { + f := strings.Fields(line) + c.Assert(f[1], checker.Equals, originalImageName) + } + + icmd.RunCommand(dockerBinary, "commit", containerID, updatedImageName).Assert(c, icmd.Success) + icmd.RunCommand(dockerBinary, "tag", updatedImageName, originalImageName).Assert(c, icmd.Success) + + result = icmd.RunCommand(dockerBinary, "ps", "--no-trunc") + result.Assert(c, icmd.Success) + + lines = strings.Split(strings.TrimSpace(string(result.Combined())), "\n") + // skip header + lines = lines[1:] + c.Assert(len(lines), checker.Equals, 1) + + for _, line := range lines { + f := strings.Fields(line) + c.Assert(f[1], checker.Equals, originalImageID) + } + +} + +func (s *DockerSuite) TestPsNotShowPortsOfStoppedContainer(c *check.C) { + testRequires(c, DaemonIsLinux) + dockerCmd(c, "run", "--name=foo", "-d", "-p", "5000:5000", "busybox", "top") + c.Assert(waitRun("foo"), checker.IsNil) + out, _ := dockerCmd(c, "ps") + lines := strings.Split(strings.TrimSpace(string(out)), "\n") + expected := "0.0.0.0:5000->5000/tcp" + fields := strings.Fields(lines[1]) + c.Assert(fields[len(fields)-2], checker.Equals, expected, check.Commentf("Expected: %v, got: %v", expected, fields[len(fields)-2])) + + dockerCmd(c, "kill", "foo") + dockerCmd(c, "wait", "foo") + out, _ = dockerCmd(c, "ps", "-l") + lines = strings.Split(strings.TrimSpace(string(out)), "\n") + fields = strings.Fields(lines[1]) + c.Assert(fields[len(fields)-2], checker.Not(checker.Equals), expected, check.Commentf("Should not got %v", expected)) +} + +func (s *DockerSuite) TestPsShowMounts(c *check.C) { + prefix, slash := getPrefixAndSlashFromDaemonPlatform() + + mp := prefix + slash + "test" + + dockerCmd(c, "volume", "create", "ps-volume-test") + // volume mount containers + runSleepingContainer(c, "--name=volume-test-1", "--volume", "ps-volume-test:"+mp) + c.Assert(waitRun("volume-test-1"), checker.IsNil) + runSleepingContainer(c, "--name=volume-test-2", "--volume", mp) + c.Assert(waitRun("volume-test-2"), checker.IsNil) + // bind mount container + var bindMountSource string + var bindMountDestination string + if DaemonIsWindows() { + bindMountSource = "c:\\" + bindMountDestination = "c:\\t" + } else { + bindMountSource = "/tmp" + bindMountDestination = "/t" + } + runSleepingContainer(c, "--name=bind-mount-test", "-v", bindMountSource+":"+bindMountDestination) + c.Assert(waitRun("bind-mount-test"), checker.IsNil) + + out, _ := dockerCmd(c, "ps", "--format", "{{.Names}} {{.Mounts}}") + + lines := strings.Split(strings.TrimSpace(string(out)), "\n") + c.Assert(lines, checker.HasLen, 3) + + fields := strings.Fields(lines[0]) + c.Assert(fields, checker.HasLen, 2) + c.Assert(fields[0], checker.Equals, "bind-mount-test") + c.Assert(fields[1], checker.Equals, bindMountSource) + + fields = strings.Fields(lines[1]) + c.Assert(fields, checker.HasLen, 2) + + anonymousVolumeID := fields[1] + + fields = strings.Fields(lines[2]) + c.Assert(fields[1], checker.Equals, "ps-volume-test") + + // filter by volume name + out, _ = dockerCmd(c, "ps", "--format", "{{.Names}} {{.Mounts}}", "--filter", "volume=ps-volume-test") + + lines = strings.Split(strings.TrimSpace(string(out)), "\n") + c.Assert(lines, checker.HasLen, 1) + + fields = strings.Fields(lines[0]) + c.Assert(fields[1], checker.Equals, "ps-volume-test") + + // empty results filtering by unknown volume + out, _ = dockerCmd(c, "ps", "--format", "{{.Names}} {{.Mounts}}", "--filter", "volume=this-volume-should-not-exist") + c.Assert(strings.TrimSpace(string(out)), checker.HasLen, 0) + + // filter by mount destination + out, _ = dockerCmd(c, "ps", "--format", "{{.Names}} {{.Mounts}}", "--filter", "volume="+mp) + + lines = strings.Split(strings.TrimSpace(string(out)), "\n") + c.Assert(lines, checker.HasLen, 2) + + fields = strings.Fields(lines[0]) + c.Assert(fields[1], checker.Equals, anonymousVolumeID) + fields = strings.Fields(lines[1]) + c.Assert(fields[1], checker.Equals, "ps-volume-test") + + // filter by bind mount source + out, _ = dockerCmd(c, "ps", "--format", "{{.Names}} {{.Mounts}}", "--filter", "volume="+bindMountSource) + + lines = strings.Split(strings.TrimSpace(string(out)), "\n") + c.Assert(lines, checker.HasLen, 1) + + fields = strings.Fields(lines[0]) + c.Assert(fields, checker.HasLen, 2) + c.Assert(fields[0], checker.Equals, "bind-mount-test") + c.Assert(fields[1], checker.Equals, bindMountSource) + + // filter by bind mount destination + out, _ = dockerCmd(c, "ps", "--format", "{{.Names}} {{.Mounts}}", "--filter", "volume="+bindMountDestination) + + lines = strings.Split(strings.TrimSpace(string(out)), "\n") + c.Assert(lines, checker.HasLen, 1) + + fields = strings.Fields(lines[0]) + c.Assert(fields, checker.HasLen, 2) + c.Assert(fields[0], checker.Equals, "bind-mount-test") + c.Assert(fields[1], checker.Equals, bindMountSource) + + // empty results filtering by unknown mount point + out, _ = dockerCmd(c, "ps", "--format", "{{.Names}} {{.Mounts}}", "--filter", "volume="+prefix+slash+"this-path-was-never-mounted") + c.Assert(strings.TrimSpace(string(out)), checker.HasLen, 0) +} + +func (s *DockerSuite) TestPsFormatSize(c *check.C) { + testRequires(c, DaemonIsLinux) + runSleepingContainer(c) + + out, _ := dockerCmd(c, "ps", "--format", "table {{.Size}}") + lines := strings.Split(out, "\n") + c.Assert(lines[1], checker.Not(checker.Equals), "0 B", check.Commentf("Should not display a size of 0 B")) + + out, _ = dockerCmd(c, "ps", "--size", "--format", "table {{.Size}}") + lines = strings.Split(out, "\n") + c.Assert(lines[0], checker.Equals, "SIZE", check.Commentf("Should only have one size column")) + + out, _ = dockerCmd(c, "ps", "--size", "--format", "raw") + lines = strings.Split(out, "\n") + c.Assert(lines[8], checker.HasPrefix, "size:", check.Commentf("Size should be appended on a newline")) +} + +func (s *DockerSuite) TestPsListContainersFilterNetwork(c *check.C) { + // TODO default network on Windows is not called "bridge", and creating a + // custom network fails on Windows fails with "Error response from daemon: plugin not found") + testRequires(c, DaemonIsLinux) + + // create some containers + runSleepingContainer(c, "--net=bridge", "--name=onbridgenetwork") + runSleepingContainer(c, "--net=none", "--name=onnonenetwork") + + // Filter docker ps on non existing network + out, _ := dockerCmd(c, "ps", "--filter", "network=doesnotexist") + containerOut := strings.TrimSpace(string(out)) + lines := strings.Split(containerOut, "\n") + + // skip header + lines = lines[1:] + + // ps output should have no containers + c.Assert(lines, checker.HasLen, 0) + + // Filter docker ps on network bridge + out, _ = dockerCmd(c, "ps", "--filter", "network=bridge") + containerOut = strings.TrimSpace(string(out)) + + lines = strings.Split(containerOut, "\n") + + // skip header + lines = lines[1:] + + // ps output should have only one container + c.Assert(lines, checker.HasLen, 1) + + // Making sure onbridgenetwork is on the output + c.Assert(containerOut, checker.Contains, "onbridgenetwork", check.Commentf("Missing the container on network\n")) + + // Filter docker ps on networks bridge and none + out, _ = dockerCmd(c, "ps", "--filter", "network=bridge", "--filter", "network=none") + containerOut = strings.TrimSpace(string(out)) + + lines = strings.Split(containerOut, "\n") + + // skip header + lines = lines[1:] + + //ps output should have both the containers + c.Assert(lines, checker.HasLen, 2) + + // Making sure onbridgenetwork and onnonenetwork is on the output + c.Assert(containerOut, checker.Contains, "onnonenetwork", check.Commentf("Missing the container on none network\n")) + c.Assert(containerOut, checker.Contains, "onbridgenetwork", check.Commentf("Missing the container on bridge network\n")) + + nwID, _ := dockerCmd(c, "network", "inspect", "--format", "{{.ID}}", "bridge") + + // Filter by network ID + out, _ = dockerCmd(c, "ps", "--filter", "network="+nwID) + containerOut = strings.TrimSpace(string(out)) + + c.Assert(containerOut, checker.Contains, "onbridgenetwork") + + // Filter by partial network ID + partialnwID := string(nwID[0:4]) + + out, _ = dockerCmd(c, "ps", "--filter", "network="+partialnwID) + containerOut = strings.TrimSpace(string(out)) + + lines = strings.Split(containerOut, "\n") + // skip header + lines = lines[1:] + + // ps output should have only one container + c.Assert(lines, checker.HasLen, 1) + + // Making sure onbridgenetwork is on the output + c.Assert(containerOut, checker.Contains, "onbridgenetwork", check.Commentf("Missing the container on network\n")) + +} + +func (s *DockerSuite) TestPsByOrder(c *check.C) { + name1 := "xyz-abc" + out := runSleepingContainer(c, "--name", name1) + container1 := strings.TrimSpace(out) + + name2 := "xyz-123" + out = runSleepingContainer(c, "--name", name2) + container2 := strings.TrimSpace(out) + + name3 := "789-abc" + out = runSleepingContainer(c, "--name", name3) + + name4 := "789-123" + out = runSleepingContainer(c, "--name", name4) + + // Run multiple time should have the same result + out = cli.DockerCmd(c, "ps", "--no-trunc", "-q", "-f", "name=xyz").Combined() + c.Assert(strings.TrimSpace(out), checker.Equals, fmt.Sprintf("%s\n%s", container2, container1)) + + // Run multiple time should have the same result + out = cli.DockerCmd(c, "ps", "--no-trunc", "-q", "-f", "name=xyz").Combined() + c.Assert(strings.TrimSpace(out), checker.Equals, fmt.Sprintf("%s\n%s", container2, container1)) +} + +func (s *DockerSuite) TestPsFilterMissingArgErrorCode(c *check.C) { + _, errCode, _ := dockerCmdWithError("ps", "--filter") + c.Assert(errCode, checker.Equals, 125) +} + +// Test case for 30291 +func (s *DockerSuite) TestPsFormatTemplateWithArg(c *check.C) { + runSleepingContainer(c, "-d", "--name", "top", "--label", "some.label=label.foo-bar") + out, _ := dockerCmd(c, "ps", "--format", `{{.Names}} {{.Label "some.label"}}`) + c.Assert(strings.TrimSpace(out), checker.Equals, "top label.foo-bar") +} + +func (s *DockerSuite) TestPsListContainersFilterPorts(c *check.C) { + testRequires(c, DaemonIsLinux) + + out, _ := dockerCmd(c, "run", "-d", "--publish=80", "busybox", "top") + id1 := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "run", "-d", "--expose=8080", "busybox", "top") + id2 := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "ps", "--no-trunc", "-q") + c.Assert(strings.TrimSpace(out), checker.Contains, id1) + c.Assert(strings.TrimSpace(out), checker.Contains, id2) + + out, _ = dockerCmd(c, "ps", "--no-trunc", "-q", "--filter", "publish=80-8080/udp") + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), id1) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), id2) + + out, _ = dockerCmd(c, "ps", "--no-trunc", "-q", "--filter", "expose=8081") + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), id1) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), id2) + + out, _ = dockerCmd(c, "ps", "--no-trunc", "-q", "--filter", "publish=80-81") + c.Assert(strings.TrimSpace(out), checker.Equals, id1) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), id2) + + out, _ = dockerCmd(c, "ps", "--no-trunc", "-q", "--filter", "expose=80/tcp") + c.Assert(strings.TrimSpace(out), checker.Equals, id1) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), id2) + + out, _ = dockerCmd(c, "ps", "--no-trunc", "-q", "--filter", "expose=8080/tcp") + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), id1) + c.Assert(strings.TrimSpace(out), checker.Equals, id2) +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_pull_local_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_pull_local_test.go new file mode 100644 index 000000000..a45e31359 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_pull_local_test.go @@ -0,0 +1,470 @@ +package main + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "runtime" + "strings" + + "github.com/docker/distribution" + "github.com/docker/distribution/manifest" + "github.com/docker/distribution/manifest/manifestlist" + "github.com/docker/distribution/manifest/schema2" + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/cli/build" + icmd "github.com/docker/docker/pkg/testutil/cmd" + "github.com/go-check/check" + "github.com/opencontainers/go-digest" +) + +// testPullImageWithAliases pulls a specific image tag and verifies that any aliases (i.e., other +// tags for the same image) are not also pulled down. +// +// Ref: docker/docker#8141 +func testPullImageWithAliases(c *check.C) { + repoName := fmt.Sprintf("%v/dockercli/busybox", privateRegistryURL) + + repos := []string{} + for _, tag := range []string{"recent", "fresh"} { + repos = append(repos, fmt.Sprintf("%v:%v", repoName, tag)) + } + + // Tag and push the same image multiple times. + for _, repo := range repos { + dockerCmd(c, "tag", "busybox", repo) + dockerCmd(c, "push", repo) + } + + // Clear local images store. + args := append([]string{"rmi"}, repos...) + dockerCmd(c, args...) + + // Pull a single tag and verify it doesn't bring down all aliases. + dockerCmd(c, "pull", repos[0]) + dockerCmd(c, "inspect", repos[0]) + for _, repo := range repos[1:] { + _, _, err := dockerCmdWithError("inspect", repo) + c.Assert(err, checker.NotNil, check.Commentf("Image %v shouldn't have been pulled down", repo)) + } +} + +func (s *DockerRegistrySuite) TestPullImageWithAliases(c *check.C) { + testPullImageWithAliases(c) +} + +func (s *DockerSchema1RegistrySuite) TestPullImageWithAliases(c *check.C) { + testPullImageWithAliases(c) +} + +// testConcurrentPullWholeRepo pulls the same repo concurrently. +func testConcurrentPullWholeRepo(c *check.C) { + repoName := fmt.Sprintf("%v/dockercli/busybox", privateRegistryURL) + + repos := []string{} + for _, tag := range []string{"recent", "fresh", "todays"} { + repo := fmt.Sprintf("%v:%v", repoName, tag) + buildImageSuccessfully(c, repo, build.WithDockerfile(fmt.Sprintf(` + FROM busybox + ENTRYPOINT ["/bin/echo"] + ENV FOO foo + ENV BAR bar + CMD echo %s + `, repo))) + dockerCmd(c, "push", repo) + repos = append(repos, repo) + } + + // Clear local images store. + args := append([]string{"rmi"}, repos...) + dockerCmd(c, args...) + + // Run multiple re-pulls concurrently + results := make(chan error) + numPulls := 3 + + for i := 0; i != numPulls; i++ { + go func() { + result := icmd.RunCommand(dockerBinary, "pull", "-a", repoName) + results <- result.Error + }() + } + + // These checks are separate from the loop above because the check + // package is not goroutine-safe. + for i := 0; i != numPulls; i++ { + err := <-results + c.Assert(err, checker.IsNil, check.Commentf("concurrent pull failed with error: %v", err)) + } + + // Ensure all tags were pulled successfully + for _, repo := range repos { + dockerCmd(c, "inspect", repo) + out, _ := dockerCmd(c, "run", "--rm", repo) + c.Assert(strings.TrimSpace(out), checker.Equals, "/bin/sh -c echo "+repo) + } +} + +func (s *DockerRegistrySuite) testConcurrentPullWholeRepo(c *check.C) { + testConcurrentPullWholeRepo(c) +} + +func (s *DockerSchema1RegistrySuite) testConcurrentPullWholeRepo(c *check.C) { + testConcurrentPullWholeRepo(c) +} + +// testConcurrentFailingPull tries a concurrent pull that doesn't succeed. +func testConcurrentFailingPull(c *check.C) { + repoName := fmt.Sprintf("%v/dockercli/busybox", privateRegistryURL) + + // Run multiple pulls concurrently + results := make(chan error) + numPulls := 3 + + for i := 0; i != numPulls; i++ { + go func() { + result := icmd.RunCommand(dockerBinary, "pull", repoName+":asdfasdf") + results <- result.Error + }() + } + + // These checks are separate from the loop above because the check + // package is not goroutine-safe. + for i := 0; i != numPulls; i++ { + err := <-results + c.Assert(err, checker.NotNil, check.Commentf("expected pull to fail")) + } +} + +func (s *DockerRegistrySuite) testConcurrentFailingPull(c *check.C) { + testConcurrentFailingPull(c) +} + +func (s *DockerSchema1RegistrySuite) testConcurrentFailingPull(c *check.C) { + testConcurrentFailingPull(c) +} + +// testConcurrentPullMultipleTags pulls multiple tags from the same repo +// concurrently. +func testConcurrentPullMultipleTags(c *check.C) { + repoName := fmt.Sprintf("%v/dockercli/busybox", privateRegistryURL) + + repos := []string{} + for _, tag := range []string{"recent", "fresh", "todays"} { + repo := fmt.Sprintf("%v:%v", repoName, tag) + buildImageSuccessfully(c, repo, build.WithDockerfile(fmt.Sprintf(` + FROM busybox + ENTRYPOINT ["/bin/echo"] + ENV FOO foo + ENV BAR bar + CMD echo %s + `, repo))) + dockerCmd(c, "push", repo) + repos = append(repos, repo) + } + + // Clear local images store. + args := append([]string{"rmi"}, repos...) + dockerCmd(c, args...) + + // Re-pull individual tags, in parallel + results := make(chan error) + + for _, repo := range repos { + go func(repo string) { + result := icmd.RunCommand(dockerBinary, "pull", repo) + results <- result.Error + }(repo) + } + + // These checks are separate from the loop above because the check + // package is not goroutine-safe. + for range repos { + err := <-results + c.Assert(err, checker.IsNil, check.Commentf("concurrent pull failed with error: %v", err)) + } + + // Ensure all tags were pulled successfully + for _, repo := range repos { + dockerCmd(c, "inspect", repo) + out, _ := dockerCmd(c, "run", "--rm", repo) + c.Assert(strings.TrimSpace(out), checker.Equals, "/bin/sh -c echo "+repo) + } +} + +func (s *DockerRegistrySuite) TestConcurrentPullMultipleTags(c *check.C) { + testConcurrentPullMultipleTags(c) +} + +func (s *DockerSchema1RegistrySuite) TestConcurrentPullMultipleTags(c *check.C) { + testConcurrentPullMultipleTags(c) +} + +// testPullIDStability verifies that pushing an image and pulling it back +// preserves the image ID. +func testPullIDStability(c *check.C) { + derivedImage := privateRegistryURL + "/dockercli/id-stability" + baseImage := "busybox" + + buildImageSuccessfully(c, derivedImage, build.WithDockerfile(fmt.Sprintf(` + FROM %s + ENV derived true + ENV asdf true + RUN dd if=/dev/zero of=/file bs=1024 count=1024 + CMD echo %s + `, baseImage, derivedImage))) + + originalID := getIDByName(c, derivedImage) + dockerCmd(c, "push", derivedImage) + + // Pull + out, _ := dockerCmd(c, "pull", derivedImage) + if strings.Contains(out, "Pull complete") { + c.Fatalf("repull redownloaded a layer: %s", out) + } + + derivedIDAfterPull := getIDByName(c, derivedImage) + + if derivedIDAfterPull != originalID { + c.Fatal("image's ID unexpectedly changed after a repush/repull") + } + + // Make sure the image runs correctly + out, _ = dockerCmd(c, "run", "--rm", derivedImage) + if strings.TrimSpace(out) != derivedImage { + c.Fatalf("expected %s; got %s", derivedImage, out) + } + + // Confirm that repushing and repulling does not change the computed ID + dockerCmd(c, "push", derivedImage) + dockerCmd(c, "rmi", derivedImage) + dockerCmd(c, "pull", derivedImage) + + derivedIDAfterPull = getIDByName(c, derivedImage) + + if derivedIDAfterPull != originalID { + c.Fatal("image's ID unexpectedly changed after a repush/repull") + } + + // Make sure the image still runs + out, _ = dockerCmd(c, "run", "--rm", derivedImage) + if strings.TrimSpace(out) != derivedImage { + c.Fatalf("expected %s; got %s", derivedImage, out) + } +} + +func (s *DockerRegistrySuite) TestPullIDStability(c *check.C) { + testPullIDStability(c) +} + +func (s *DockerSchema1RegistrySuite) TestPullIDStability(c *check.C) { + testPullIDStability(c) +} + +// #21213 +func testPullNoLayers(c *check.C) { + repoName := fmt.Sprintf("%v/dockercli/scratch", privateRegistryURL) + + buildImageSuccessfully(c, repoName, build.WithDockerfile(` + FROM scratch + ENV foo bar`)) + dockerCmd(c, "push", repoName) + dockerCmd(c, "rmi", repoName) + dockerCmd(c, "pull", repoName) +} + +func (s *DockerRegistrySuite) TestPullNoLayers(c *check.C) { + testPullNoLayers(c) +} + +func (s *DockerSchema1RegistrySuite) TestPullNoLayers(c *check.C) { + testPullNoLayers(c) +} + +func (s *DockerRegistrySuite) TestPullManifestList(c *check.C) { + testRequires(c, NotArm) + pushDigest, err := setupImage(c) + c.Assert(err, checker.IsNil, check.Commentf("error setting up image")) + + // Inject a manifest list into the registry + manifestList := &manifestlist.ManifestList{ + Versioned: manifest.Versioned{ + SchemaVersion: 2, + MediaType: manifestlist.MediaTypeManifestList, + }, + Manifests: []manifestlist.ManifestDescriptor{ + { + Descriptor: distribution.Descriptor{ + Digest: "sha256:1a9ec845ee94c202b2d5da74a24f0ed2058318bfa9879fa541efaecba272e86b", + Size: 3253, + MediaType: schema2.MediaTypeManifest, + }, + Platform: manifestlist.PlatformSpec{ + Architecture: "bogus_arch", + OS: "bogus_os", + }, + }, + { + Descriptor: distribution.Descriptor{ + Digest: pushDigest, + Size: 3253, + MediaType: schema2.MediaTypeManifest, + }, + Platform: manifestlist.PlatformSpec{ + Architecture: runtime.GOARCH, + OS: runtime.GOOS, + }, + }, + }, + } + + manifestListJSON, err := json.MarshalIndent(manifestList, "", " ") + c.Assert(err, checker.IsNil, check.Commentf("error marshalling manifest list")) + + manifestListDigest := digest.FromBytes(manifestListJSON) + hexDigest := manifestListDigest.Hex() + + registryV2Path := s.reg.Path() + + // Write manifest list to blob store + blobDir := filepath.Join(registryV2Path, "blobs", "sha256", hexDigest[:2], hexDigest) + err = os.MkdirAll(blobDir, 0755) + c.Assert(err, checker.IsNil, check.Commentf("error creating blob dir")) + blobPath := filepath.Join(blobDir, "data") + err = ioutil.WriteFile(blobPath, []byte(manifestListJSON), 0644) + c.Assert(err, checker.IsNil, check.Commentf("error writing manifest list")) + + // Add to revision store + revisionDir := filepath.Join(registryV2Path, "repositories", remoteRepoName, "_manifests", "revisions", "sha256", hexDigest) + err = os.Mkdir(revisionDir, 0755) + c.Assert(err, checker.IsNil, check.Commentf("error creating revision dir")) + revisionPath := filepath.Join(revisionDir, "link") + err = ioutil.WriteFile(revisionPath, []byte(manifestListDigest.String()), 0644) + c.Assert(err, checker.IsNil, check.Commentf("error writing revision link")) + + // Update tag + tagPath := filepath.Join(registryV2Path, "repositories", remoteRepoName, "_manifests", "tags", "latest", "current", "link") + err = ioutil.WriteFile(tagPath, []byte(manifestListDigest.String()), 0644) + c.Assert(err, checker.IsNil, check.Commentf("error writing tag link")) + + // Verify that the image can be pulled through the manifest list. + out, _ := dockerCmd(c, "pull", repoName) + + // The pull output includes "Digest: ", so find that + matches := digestRegex.FindStringSubmatch(out) + c.Assert(matches, checker.HasLen, 2, check.Commentf("unable to parse digest from pull output: %s", out)) + pullDigest := matches[1] + + // Make sure the pushed and pull digests match + c.Assert(manifestListDigest.String(), checker.Equals, pullDigest) + + // Was the image actually created? + dockerCmd(c, "inspect", repoName) + + dockerCmd(c, "rmi", repoName) +} + +// #23100 +func (s *DockerRegistryAuthHtpasswdSuite) TestPullWithExternalAuthLoginWithScheme(c *check.C) { + osPath := os.Getenv("PATH") + defer os.Setenv("PATH", osPath) + + workingDir, err := os.Getwd() + c.Assert(err, checker.IsNil) + absolute, err := filepath.Abs(filepath.Join(workingDir, "fixtures", "auth")) + c.Assert(err, checker.IsNil) + testPath := fmt.Sprintf("%s%c%s", osPath, filepath.ListSeparator, absolute) + + os.Setenv("PATH", testPath) + + repoName := fmt.Sprintf("%v/dockercli/busybox:authtest", privateRegistryURL) + + tmp, err := ioutil.TempDir("", "integration-cli-") + c.Assert(err, checker.IsNil) + + externalAuthConfig := `{ "credsStore": "shell-test" }` + + configPath := filepath.Join(tmp, "config.json") + err = ioutil.WriteFile(configPath, []byte(externalAuthConfig), 0644) + c.Assert(err, checker.IsNil) + + dockerCmd(c, "--config", tmp, "login", "-u", s.reg.Username(), "-p", s.reg.Password(), privateRegistryURL) + + b, err := ioutil.ReadFile(configPath) + c.Assert(err, checker.IsNil) + c.Assert(string(b), checker.Not(checker.Contains), "\"auth\":") + + dockerCmd(c, "--config", tmp, "tag", "busybox", repoName) + dockerCmd(c, "--config", tmp, "push", repoName) + + dockerCmd(c, "--config", tmp, "logout", privateRegistryURL) + dockerCmd(c, "--config", tmp, "login", "-u", s.reg.Username(), "-p", s.reg.Password(), "https://"+privateRegistryURL) + dockerCmd(c, "--config", tmp, "pull", repoName) + + // likewise push should work + repoName2 := fmt.Sprintf("%v/dockercli/busybox:nocreds", privateRegistryURL) + dockerCmd(c, "tag", repoName, repoName2) + dockerCmd(c, "--config", tmp, "push", repoName2) + + // logout should work w scheme also because it will be stripped + dockerCmd(c, "--config", tmp, "logout", "https://"+privateRegistryURL) +} + +func (s *DockerRegistryAuthHtpasswdSuite) TestPullWithExternalAuth(c *check.C) { + osPath := os.Getenv("PATH") + defer os.Setenv("PATH", osPath) + + workingDir, err := os.Getwd() + c.Assert(err, checker.IsNil) + absolute, err := filepath.Abs(filepath.Join(workingDir, "fixtures", "auth")) + c.Assert(err, checker.IsNil) + testPath := fmt.Sprintf("%s%c%s", osPath, filepath.ListSeparator, absolute) + + os.Setenv("PATH", testPath) + + repoName := fmt.Sprintf("%v/dockercli/busybox:authtest", privateRegistryURL) + + tmp, err := ioutil.TempDir("", "integration-cli-") + c.Assert(err, checker.IsNil) + + externalAuthConfig := `{ "credsStore": "shell-test" }` + + configPath := filepath.Join(tmp, "config.json") + err = ioutil.WriteFile(configPath, []byte(externalAuthConfig), 0644) + c.Assert(err, checker.IsNil) + + dockerCmd(c, "--config", tmp, "login", "-u", s.reg.Username(), "-p", s.reg.Password(), privateRegistryURL) + + b, err := ioutil.ReadFile(configPath) + c.Assert(err, checker.IsNil) + c.Assert(string(b), checker.Not(checker.Contains), "\"auth\":") + + dockerCmd(c, "--config", tmp, "tag", "busybox", repoName) + dockerCmd(c, "--config", tmp, "push", repoName) + + dockerCmd(c, "--config", tmp, "pull", repoName) +} + +// TestRunImplicitPullWithNoTag should pull implicitly only the default tag (latest) +func (s *DockerRegistrySuite) TestRunImplicitPullWithNoTag(c *check.C) { + testRequires(c, DaemonIsLinux) + repo := fmt.Sprintf("%v/dockercli/busybox", privateRegistryURL) + repoTag1 := fmt.Sprintf("%v:latest", repo) + repoTag2 := fmt.Sprintf("%v:t1", repo) + // tag the image and upload it to the private registry + dockerCmd(c, "tag", "busybox", repoTag1) + dockerCmd(c, "tag", "busybox", repoTag2) + dockerCmd(c, "push", repo) + dockerCmd(c, "rmi", repoTag1) + dockerCmd(c, "rmi", repoTag2) + + out, _ := dockerCmd(c, "run", repo) + c.Assert(out, checker.Contains, fmt.Sprintf("Unable to find image '%s:latest' locally", repo)) + + // There should be only one line for repo, the one with repo:latest + outImageCmd, _ := dockerCmd(c, "images", repo) + splitOutImageCmd := strings.Split(strings.TrimSpace(outImageCmd), "\n") + c.Assert(splitOutImageCmd, checker.HasLen, 2) +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_pull_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_pull_test.go new file mode 100644 index 000000000..fd91edb81 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_pull_test.go @@ -0,0 +1,284 @@ +package main + +import ( + "fmt" + "regexp" + "strings" + "sync" + "time" + + "github.com/docker/docker/integration-cli/checker" + "github.com/go-check/check" + "github.com/opencontainers/go-digest" +) + +// TestPullFromCentralRegistry pulls an image from the central registry and verifies that the client +// prints all expected output. +func (s *DockerHubPullSuite) TestPullFromCentralRegistry(c *check.C) { + testRequires(c, DaemonIsLinux) + out := s.Cmd(c, "pull", "hello-world") + defer deleteImages("hello-world") + + c.Assert(out, checker.Contains, "Using default tag: latest", check.Commentf("expected the 'latest' tag to be automatically assumed")) + c.Assert(out, checker.Contains, "Pulling from library/hello-world", check.Commentf("expected the 'library/' prefix to be automatically assumed")) + c.Assert(out, checker.Contains, "Downloaded newer image for hello-world:latest") + + matches := regexp.MustCompile(`Digest: (.+)\n`).FindAllStringSubmatch(out, -1) + c.Assert(len(matches), checker.Equals, 1, check.Commentf("expected exactly one image digest in the output")) + c.Assert(len(matches[0]), checker.Equals, 2, check.Commentf("unexpected number of submatches for the digest")) + _, err := digest.Parse(matches[0][1]) + c.Check(err, checker.IsNil, check.Commentf("invalid digest %q in output", matches[0][1])) + + // We should have a single entry in images. + img := strings.TrimSpace(s.Cmd(c, "images")) + splitImg := strings.Split(img, "\n") + c.Assert(splitImg, checker.HasLen, 2) + c.Assert(splitImg[1], checker.Matches, `hello-world\s+latest.*?`, check.Commentf("invalid output for `docker images` (expected image and tag name")) +} + +// TestPullNonExistingImage pulls non-existing images from the central registry, with different +// combinations of implicit tag and library prefix. +func (s *DockerHubPullSuite) TestPullNonExistingImage(c *check.C) { + testRequires(c, DaemonIsLinux) + + type entry struct { + repo string + alias string + tag string + } + + entries := []entry{ + {"asdfasdf", "asdfasdf", "foobar"}, + {"asdfasdf", "library/asdfasdf", "foobar"}, + {"asdfasdf", "asdfasdf", ""}, + {"asdfasdf", "asdfasdf", "latest"}, + {"asdfasdf", "library/asdfasdf", ""}, + {"asdfasdf", "library/asdfasdf", "latest"}, + } + + // The option field indicates "-a" or not. + type record struct { + e entry + option string + out string + err error + } + + // Execute 'docker pull' in parallel, pass results (out, err) and + // necessary information ("-a" or not, and the image name) to channel. + var group sync.WaitGroup + recordChan := make(chan record, len(entries)*2) + for _, e := range entries { + group.Add(1) + go func(e entry) { + defer group.Done() + repoName := e.alias + if e.tag != "" { + repoName += ":" + e.tag + } + out, err := s.CmdWithError("pull", repoName) + recordChan <- record{e, "", out, err} + }(e) + if e.tag == "" { + // pull -a on a nonexistent registry should fall back as well + group.Add(1) + go func(e entry) { + defer group.Done() + out, err := s.CmdWithError("pull", "-a", e.alias) + recordChan <- record{e, "-a", out, err} + }(e) + } + } + + // Wait for completion + group.Wait() + close(recordChan) + + // Process the results (out, err). + for record := range recordChan { + if len(record.option) == 0 { + c.Assert(record.err, checker.NotNil, check.Commentf("expected non-zero exit status when pulling non-existing image: %s", record.out)) + c.Assert(record.out, checker.Contains, fmt.Sprintf("pull access denied for %s, repository does not exist or may require 'docker login'", record.e.repo), check.Commentf("expected image not found error messages")) + } else { + // pull -a on a nonexistent registry should fall back as well + c.Assert(record.err, checker.NotNil, check.Commentf("expected non-zero exit status when pulling non-existing image: %s", record.out)) + c.Assert(record.out, checker.Contains, fmt.Sprintf("pull access denied for %s, repository does not exist or may require 'docker login'", record.e.repo), check.Commentf("expected image not found error messages")) + c.Assert(record.out, checker.Not(checker.Contains), "unauthorized", check.Commentf(`message should not contain "unauthorized"`)) + } + } + +} + +// TestPullFromCentralRegistryImplicitRefParts pulls an image from the central registry and verifies +// that pulling the same image with different combinations of implicit elements of the image +// reference (tag, repository, central registry url, ...) doesn't trigger a new pull nor leads to +// multiple images. +func (s *DockerHubPullSuite) TestPullFromCentralRegistryImplicitRefParts(c *check.C) { + testRequires(c, DaemonIsLinux) + + // Pull hello-world from v2 + pullFromV2 := func(ref string) (int, string) { + out := s.Cmd(c, "pull", "hello-world") + v1Retries := 0 + for strings.Contains(out, "this image was pulled from a legacy registry") { + // Some network errors may cause fallbacks to the v1 + // protocol, which would violate the test's assumption + // that it will get the same images. To make the test + // more robust against these network glitches, allow a + // few retries if we end up with a v1 pull. + + if v1Retries > 2 { + c.Fatalf("too many v1 fallback incidents when pulling %s", ref) + } + + s.Cmd(c, "rmi", ref) + out = s.Cmd(c, "pull", ref) + + v1Retries++ + } + + return v1Retries, out + } + + pullFromV2("hello-world") + defer deleteImages("hello-world") + + s.Cmd(c, "tag", "hello-world", "hello-world-backup") + + for _, ref := range []string{ + "hello-world", + "hello-world:latest", + "library/hello-world", + "library/hello-world:latest", + "docker.io/library/hello-world", + "index.docker.io/library/hello-world", + } { + var out string + for { + var v1Retries int + v1Retries, out = pullFromV2(ref) + + // Keep repeating the test case until we don't hit a v1 + // fallback case. We won't get the right "Image is up + // to date" message if the local image was replaced + // with one pulled from v1. + if v1Retries == 0 { + break + } + s.Cmd(c, "rmi", ref) + s.Cmd(c, "tag", "hello-world-backup", "hello-world") + } + c.Assert(out, checker.Contains, "Image is up to date for hello-world:latest") + } + + s.Cmd(c, "rmi", "hello-world-backup") + + // We should have a single entry in images. + img := strings.TrimSpace(s.Cmd(c, "images")) + splitImg := strings.Split(img, "\n") + c.Assert(splitImg, checker.HasLen, 2) + c.Assert(splitImg[1], checker.Matches, `hello-world\s+latest.*?`, check.Commentf("invalid output for `docker images` (expected image and tag name")) +} + +// TestPullScratchNotAllowed verifies that pulling 'scratch' is rejected. +func (s *DockerHubPullSuite) TestPullScratchNotAllowed(c *check.C) { + testRequires(c, DaemonIsLinux) + out, err := s.CmdWithError("pull", "scratch") + c.Assert(err, checker.NotNil, check.Commentf("expected pull of scratch to fail")) + c.Assert(out, checker.Contains, "'scratch' is a reserved name") + c.Assert(out, checker.Not(checker.Contains), "Pulling repository scratch") +} + +// TestPullAllTagsFromCentralRegistry pulls using `all-tags` for a given image and verifies that it +// results in more images than a naked pull. +func (s *DockerHubPullSuite) TestPullAllTagsFromCentralRegistry(c *check.C) { + testRequires(c, DaemonIsLinux) + s.Cmd(c, "pull", "busybox") + outImageCmd := s.Cmd(c, "images", "busybox") + splitOutImageCmd := strings.Split(strings.TrimSpace(outImageCmd), "\n") + c.Assert(splitOutImageCmd, checker.HasLen, 2) + + s.Cmd(c, "pull", "--all-tags=true", "busybox") + outImageAllTagCmd := s.Cmd(c, "images", "busybox") + linesCount := strings.Count(outImageAllTagCmd, "\n") + c.Assert(linesCount, checker.GreaterThan, 2, check.Commentf("pulling all tags should provide more than two images, got %s", outImageAllTagCmd)) + + // Verify that the line for 'busybox:latest' is left unchanged. + var latestLine string + for _, line := range strings.Split(outImageAllTagCmd, "\n") { + if strings.HasPrefix(line, "busybox") && strings.Contains(line, "latest") { + latestLine = line + break + } + } + c.Assert(latestLine, checker.Not(checker.Equals), "", check.Commentf("no entry for busybox:latest found after pulling all tags")) + splitLatest := strings.Fields(latestLine) + splitCurrent := strings.Fields(splitOutImageCmd[1]) + + // Clear relative creation times, since these can easily change between + // two invocations of "docker images". Without this, the test can fail + // like this: + // ... obtained []string = []string{"busybox", "latest", "d9551b4026f0", "27", "minutes", "ago", "1.113", "MB"} + // ... expected []string = []string{"busybox", "latest", "d9551b4026f0", "26", "minutes", "ago", "1.113", "MB"} + splitLatest[3] = "" + splitLatest[4] = "" + splitLatest[5] = "" + splitCurrent[3] = "" + splitCurrent[4] = "" + splitCurrent[5] = "" + + c.Assert(splitLatest, checker.DeepEquals, splitCurrent, check.Commentf("busybox:latest was changed after pulling all tags")) +} + +// TestPullClientDisconnect kills the client during a pull operation and verifies that the operation +// gets cancelled. +// +// Ref: docker/docker#15589 +func (s *DockerHubPullSuite) TestPullClientDisconnect(c *check.C) { + testRequires(c, DaemonIsLinux) + repoName := "hello-world:latest" + + pullCmd := s.MakeCmd("pull", repoName) + stdout, err := pullCmd.StdoutPipe() + c.Assert(err, checker.IsNil) + err = pullCmd.Start() + c.Assert(err, checker.IsNil) + + // Cancel as soon as we get some output. + buf := make([]byte, 10) + _, err = stdout.Read(buf) + c.Assert(err, checker.IsNil) + + err = pullCmd.Process.Kill() + c.Assert(err, checker.IsNil) + + time.Sleep(2 * time.Second) + _, err = s.CmdWithError("inspect", repoName) + c.Assert(err, checker.NotNil, check.Commentf("image was pulled after client disconnected")) +} + +func (s *DockerRegistryAuthHtpasswdSuite) TestPullNoCredentialsNotFound(c *check.C) { + // @TODO TestPullNoCredentialsNotFound expects docker to fall back to a v1 registry, so has to be updated for v17.12, when v1 registries are no longer supported + s.d.StartWithBusybox(c, "--disable-legacy-registry=false") + + // we don't care about the actual image, we just want to see image not found + // because that means v2 call returned 401 and we fell back to v1 which usually + // gives a 404 (in this case the test registry doesn't handle v1 at all) + out, err := s.d.Cmd("pull", privateRegistryURL+"/busybox") + c.Assert(err, check.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "Error: image busybox:latest not found") +} + +// Regression test for https://github.com/docker/docker/issues/26429 +func (s *DockerSuite) TestPullLinuxImageFailsOnWindows(c *check.C) { + testRequires(c, DaemonIsWindows, Network) + _, _, err := dockerCmdWithError("pull", "ubuntu") + c.Assert(err.Error(), checker.Contains, "cannot be used on this platform") +} + +// Regression test for https://github.com/docker/docker/issues/28892 +func (s *DockerSuite) TestPullWindowsImageFailsOnLinux(c *check.C) { + testRequires(c, DaemonIsLinux, Network) + _, _, err := dockerCmdWithError("pull", "microsoft/nanoserver") + c.Assert(err.Error(), checker.Contains, "cannot be used on this platform") +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_pull_trusted_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_pull_trusted_test.go new file mode 100644 index 000000000..d9628d971 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_pull_trusted_test.go @@ -0,0 +1,222 @@ +package main + +import ( + "fmt" + "io/ioutil" + + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/cli" + "github.com/docker/docker/integration-cli/cli/build" + icmd "github.com/docker/docker/pkg/testutil/cmd" + "github.com/go-check/check" +) + +func (s *DockerTrustSuite) TestTrustedPull(c *check.C) { + repoName := s.setupTrustedImage(c, "trusted-pull") + + // Try pull + cli.Docker(cli.Args("pull", repoName), trustedCmd).Assert(c, SuccessTagging) + + cli.DockerCmd(c, "rmi", repoName) + // Try untrusted pull to ensure we pushed the tag to the registry + cli.Docker(cli.Args("pull", "--disable-content-trust=true", repoName), trustedCmd).Assert(c, SuccessDownloaded) +} + +func (s *DockerTrustSuite) TestTrustedIsolatedPull(c *check.C) { + repoName := s.setupTrustedImage(c, "trusted-isolated-pull") + + // Try pull (run from isolated directory without trust information) + cli.Docker(cli.Args("--config", "/tmp/docker-isolated", "pull", repoName), trustedCmd).Assert(c, SuccessTagging) + + cli.DockerCmd(c, "rmi", repoName) +} + +func (s *DockerTrustSuite) TestUntrustedPull(c *check.C) { + repoName := fmt.Sprintf("%v/dockercliuntrusted/pulltest:latest", privateRegistryURL) + // tag the image and upload it to the private registry + cli.DockerCmd(c, "tag", "busybox", repoName) + cli.DockerCmd(c, "push", repoName) + cli.DockerCmd(c, "rmi", repoName) + + // Try trusted pull on untrusted tag + cli.Docker(cli.Args("pull", repoName), trustedCmd).Assert(c, icmd.Expected{ + ExitCode: 1, + Err: "Error: remote trust data does not exist", + }) +} + +func (s *DockerTrustSuite) TestTrustedPullFromBadTrustServer(c *check.C) { + repoName := fmt.Sprintf("%v/dockerclievilpull/trusted:latest", privateRegistryURL) + evilLocalConfigDir, err := ioutil.TempDir("", "evil-local-config-dir") + if err != nil { + c.Fatalf("Failed to create local temp dir") + } + + // tag the image and upload it to the private registry + cli.DockerCmd(c, "tag", "busybox", repoName) + + cli.Docker(cli.Args("push", repoName), trustedCmd).Assert(c, SuccessSigningAndPushing) + cli.DockerCmd(c, "rmi", repoName) + + // Try pull + cli.Docker(cli.Args("pull", repoName), trustedCmd).Assert(c, SuccessTagging) + cli.DockerCmd(c, "rmi", repoName) + + // Kill the notary server, start a new "evil" one. + s.not.Close() + s.not, err = newTestNotary(c) + + c.Assert(err, check.IsNil, check.Commentf("Restarting notary server failed.")) + + // In order to make an evil server, lets re-init a client (with a different trust dir) and push new data. + // tag an image and upload it to the private registry + cli.DockerCmd(c, "--config", evilLocalConfigDir, "tag", "busybox", repoName) + + // Push up to the new server + cli.Docker(cli.Args("--config", evilLocalConfigDir, "push", repoName), trustedCmd).Assert(c, SuccessSigningAndPushing) + + // Now, try pulling with the original client from this new trust server. This should fail because the new root is invalid. + cli.Docker(cli.Args("pull", repoName), trustedCmd).Assert(c, icmd.Expected{ + ExitCode: 1, + Err: "could not rotate trust to a new trusted root", + }) +} + +func (s *DockerTrustSuite) TestTrustedOfflinePull(c *check.C) { + repoName := s.setupTrustedImage(c, "trusted-offline-pull") + + cli.Docker(cli.Args("pull", repoName), trustedCmdWithServer("https://invalidnotaryserver")).Assert(c, icmd.Expected{ + ExitCode: 1, + Err: "error contacting notary server", + }) + // Do valid trusted pull to warm cache + cli.Docker(cli.Args("pull", repoName), trustedCmd).Assert(c, SuccessTagging) + cli.DockerCmd(c, "rmi", repoName) + + // Try pull again with invalid notary server, should use cache + cli.Docker(cli.Args("pull", repoName), trustedCmdWithServer("https://invalidnotaryserver")).Assert(c, SuccessTagging) +} + +func (s *DockerTrustSuite) TestTrustedPullDelete(c *check.C) { + repoName := fmt.Sprintf("%v/dockercli/%s:latest", privateRegistryURL, "trusted-pull-delete") + // tag the image and upload it to the private registry + cli.BuildCmd(c, repoName, build.WithDockerfile(` + FROM busybox + CMD echo trustedpulldelete + `)) + cli.Docker(cli.Args("push", repoName), trustedCmd).Assert(c, SuccessSigningAndPushing) + + cli.DockerCmd(c, "rmi", repoName) + + // Try pull + result := cli.Docker(cli.Args("pull", repoName), trustedCmd).Assert(c, icmd.Success) + + matches := digestRegex.FindStringSubmatch(result.Combined()) + c.Assert(matches, checker.HasLen, 2, check.Commentf("unable to parse digest from pull output: %s", result.Combined())) + pullDigest := matches[1] + + imageID := inspectField(c, repoName, "Id") + + imageByDigest := repoName + "@" + pullDigest + byDigestID := inspectField(c, imageByDigest, "Id") + + c.Assert(byDigestID, checker.Equals, imageID) + + // rmi of tag should also remove the digest reference + cli.DockerCmd(c, "rmi", repoName) + + _, err := inspectFieldWithError(imageByDigest, "Id") + c.Assert(err, checker.NotNil, check.Commentf("digest reference should have been removed")) + + _, err = inspectFieldWithError(imageID, "Id") + c.Assert(err, checker.NotNil, check.Commentf("image should have been deleted")) +} + +func (s *DockerTrustSuite) TestTrustedPullReadsFromReleasesRole(c *check.C) { + testRequires(c, NotaryHosting) + repoName := fmt.Sprintf("%v/dockerclireleasesdelegationpulling/trusted", privateRegistryURL) + targetName := fmt.Sprintf("%s:latest", repoName) + + // Push with targets first, initializing the repo + cli.DockerCmd(c, "tag", "busybox", targetName) + cli.Docker(cli.Args("push", targetName), trustedCmd).Assert(c, icmd.Success) + s.assertTargetInRoles(c, repoName, "latest", "targets") + + // Try pull, check we retrieve from targets role + cli.Docker(cli.Args("-D", "pull", repoName), trustedCmd).Assert(c, icmd.Expected{ + Err: "retrieving target for targets role", + }) + + // Now we'll create the releases role, and try pushing and pulling + s.notaryCreateDelegation(c, repoName, "targets/releases", s.not.keys[0].Public) + s.notaryImportKey(c, repoName, "targets/releases", s.not.keys[0].Private) + s.notaryPublish(c, repoName) + + // try a pull, check that we can still pull because we can still read the + // old tag in the targets role + cli.Docker(cli.Args("-D", "pull", repoName), trustedCmd).Assert(c, icmd.Expected{ + Err: "retrieving target for targets role", + }) + + // try a pull -a, check that it succeeds because we can still pull from the + // targets role + cli.Docker(cli.Args("-D", "pull", "-a", repoName), trustedCmd).Assert(c, icmd.Success) + + // Push, should sign with targets/releases + cli.DockerCmd(c, "tag", "busybox", targetName) + cli.Docker(cli.Args("push", targetName), trustedCmd).Assert(c, icmd.Success) + s.assertTargetInRoles(c, repoName, "latest", "targets", "targets/releases") + + // Try pull, check we retrieve from targets/releases role + cli.Docker(cli.Args("-D", "pull", repoName), trustedCmd).Assert(c, icmd.Expected{ + Err: "retrieving target for targets/releases role", + }) + + // Create another delegation that we'll sign with + s.notaryCreateDelegation(c, repoName, "targets/other", s.not.keys[1].Public) + s.notaryImportKey(c, repoName, "targets/other", s.not.keys[1].Private) + s.notaryPublish(c, repoName) + + cli.DockerCmd(c, "tag", "busybox", targetName) + cli.Docker(cli.Args("push", targetName), trustedCmd).Assert(c, icmd.Success) + s.assertTargetInRoles(c, repoName, "latest", "targets", "targets/releases", "targets/other") + + // Try pull, check we retrieve from targets/releases role + cli.Docker(cli.Args("-D", "pull", repoName), trustedCmd).Assert(c, icmd.Expected{ + Err: "retrieving target for targets/releases role", + }) +} + +func (s *DockerTrustSuite) TestTrustedPullIgnoresOtherDelegationRoles(c *check.C) { + testRequires(c, NotaryHosting) + repoName := fmt.Sprintf("%v/dockerclipullotherdelegation/trusted", privateRegistryURL) + targetName := fmt.Sprintf("%s:latest", repoName) + + // We'll create a repo first with a non-release delegation role, so that when we + // push we'll sign it into the delegation role + s.notaryInitRepo(c, repoName) + s.notaryCreateDelegation(c, repoName, "targets/other", s.not.keys[0].Public) + s.notaryImportKey(c, repoName, "targets/other", s.not.keys[0].Private) + s.notaryPublish(c, repoName) + + // Push should write to the delegation role, not targets + cli.DockerCmd(c, "tag", "busybox", targetName) + cli.Docker(cli.Args("push", targetName), trustedCmd).Assert(c, icmd.Success) + s.assertTargetInRoles(c, repoName, "latest", "targets/other") + s.assertTargetNotInRoles(c, repoName, "latest", "targets") + + // Try pull - we should fail, since pull will only pull from the targets/releases + // role or the targets role + cli.DockerCmd(c, "tag", "busybox", targetName) + cli.Docker(cli.Args("-D", "pull", repoName), trustedCmd).Assert(c, icmd.Expected{ + ExitCode: 1, + Err: "No trust data for", + }) + + // try a pull -a: we should fail since pull will only pull from the targets/releases + // role or the targets role + cli.Docker(cli.Args("-D", "pull", "-a", repoName), trustedCmd).Assert(c, icmd.Expected{ + ExitCode: 1, + Err: "No trusted tags for", + }) +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_push_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_push_test.go new file mode 100644 index 000000000..2ae206df7 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_push_test.go @@ -0,0 +1,604 @@ +package main + +import ( + "archive/tar" + "fmt" + "io/ioutil" + "net/http" + "net/http/httptest" + "os" + "path/filepath" + "strings" + "sync" + + "github.com/docker/distribution/reference" + "github.com/docker/docker/cli/config" + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/cli" + "github.com/docker/docker/integration-cli/cli/build" + icmd "github.com/docker/docker/pkg/testutil/cmd" + "github.com/go-check/check" +) + +// Pushing an image to a private registry. +func testPushBusyboxImage(c *check.C) { + repoName := fmt.Sprintf("%v/dockercli/busybox", privateRegistryURL) + // tag the image to upload it to the private registry + dockerCmd(c, "tag", "busybox", repoName) + // push the image to the registry + dockerCmd(c, "push", repoName) +} + +func (s *DockerRegistrySuite) TestPushBusyboxImage(c *check.C) { + testPushBusyboxImage(c) +} + +func (s *DockerSchema1RegistrySuite) TestPushBusyboxImage(c *check.C) { + testPushBusyboxImage(c) +} + +// pushing an image without a prefix should throw an error +func (s *DockerSuite) TestPushUnprefixedRepo(c *check.C) { + out, _, err := dockerCmdWithError("push", "busybox") + c.Assert(err, check.NotNil, check.Commentf("pushing an unprefixed repo didn't result in a non-zero exit status: %s", out)) +} + +func testPushUntagged(c *check.C) { + repoName := fmt.Sprintf("%v/dockercli/busybox", privateRegistryURL) + expected := "An image does not exist locally with the tag" + + out, _, err := dockerCmdWithError("push", repoName) + c.Assert(err, check.NotNil, check.Commentf("pushing the image to the private registry should have failed: output %q", out)) + c.Assert(out, checker.Contains, expected, check.Commentf("pushing the image failed")) +} + +func (s *DockerRegistrySuite) TestPushUntagged(c *check.C) { + testPushUntagged(c) +} + +func (s *DockerSchema1RegistrySuite) TestPushUntagged(c *check.C) { + testPushUntagged(c) +} + +func testPushBadTag(c *check.C) { + repoName := fmt.Sprintf("%v/dockercli/busybox:latest", privateRegistryURL) + expected := "does not exist" + + out, _, err := dockerCmdWithError("push", repoName) + c.Assert(err, check.NotNil, check.Commentf("pushing the image to the private registry should have failed: output %q", out)) + c.Assert(out, checker.Contains, expected, check.Commentf("pushing the image failed")) +} + +func (s *DockerRegistrySuite) TestPushBadTag(c *check.C) { + testPushBadTag(c) +} + +func (s *DockerSchema1RegistrySuite) TestPushBadTag(c *check.C) { + testPushBadTag(c) +} + +func testPushMultipleTags(c *check.C) { + repoName := fmt.Sprintf("%v/dockercli/busybox", privateRegistryURL) + repoTag1 := fmt.Sprintf("%v/dockercli/busybox:t1", privateRegistryURL) + repoTag2 := fmt.Sprintf("%v/dockercli/busybox:t2", privateRegistryURL) + // tag the image and upload it to the private registry + dockerCmd(c, "tag", "busybox", repoTag1) + + dockerCmd(c, "tag", "busybox", repoTag2) + + dockerCmd(c, "push", repoName) + + // Ensure layer list is equivalent for repoTag1 and repoTag2 + out1, _ := dockerCmd(c, "pull", repoTag1) + + imageAlreadyExists := ": Image already exists" + var out1Lines []string + for _, outputLine := range strings.Split(out1, "\n") { + if strings.Contains(outputLine, imageAlreadyExists) { + out1Lines = append(out1Lines, outputLine) + } + } + + out2, _ := dockerCmd(c, "pull", repoTag2) + + var out2Lines []string + for _, outputLine := range strings.Split(out2, "\n") { + if strings.Contains(outputLine, imageAlreadyExists) { + out1Lines = append(out1Lines, outputLine) + } + } + c.Assert(out2Lines, checker.HasLen, len(out1Lines)) + + for i := range out1Lines { + c.Assert(out1Lines[i], checker.Equals, out2Lines[i]) + } +} + +func (s *DockerRegistrySuite) TestPushMultipleTags(c *check.C) { + testPushMultipleTags(c) +} + +func (s *DockerSchema1RegistrySuite) TestPushMultipleTags(c *check.C) { + testPushMultipleTags(c) +} + +func testPushEmptyLayer(c *check.C) { + repoName := fmt.Sprintf("%v/dockercli/emptylayer", privateRegistryURL) + emptyTarball, err := ioutil.TempFile("", "empty_tarball") + c.Assert(err, check.IsNil, check.Commentf("Unable to create test file")) + + tw := tar.NewWriter(emptyTarball) + err = tw.Close() + c.Assert(err, check.IsNil, check.Commentf("Error creating empty tarball")) + + freader, err := os.Open(emptyTarball.Name()) + c.Assert(err, check.IsNil, check.Commentf("Could not open test tarball")) + defer freader.Close() + + icmd.RunCmd(icmd.Cmd{ + Command: []string{dockerBinary, "import", "-", repoName}, + Stdin: freader, + }).Assert(c, icmd.Success) + + // Now verify we can push it + out, _, err := dockerCmdWithError("push", repoName) + c.Assert(err, check.IsNil, check.Commentf("pushing the image to the private registry has failed: %s", out)) +} + +func (s *DockerRegistrySuite) TestPushEmptyLayer(c *check.C) { + testPushEmptyLayer(c) +} + +func (s *DockerSchema1RegistrySuite) TestPushEmptyLayer(c *check.C) { + testPushEmptyLayer(c) +} + +// testConcurrentPush pushes multiple tags to the same repo +// concurrently. +func testConcurrentPush(c *check.C) { + repoName := fmt.Sprintf("%v/dockercli/busybox", privateRegistryURL) + + repos := []string{} + for _, tag := range []string{"push1", "push2", "push3"} { + repo := fmt.Sprintf("%v:%v", repoName, tag) + buildImageSuccessfully(c, repo, build.WithDockerfile(fmt.Sprintf(` + FROM busybox + ENTRYPOINT ["/bin/echo"] + ENV FOO foo + ENV BAR bar + CMD echo %s +`, repo))) + repos = append(repos, repo) + } + + // Push tags, in parallel + results := make(chan error) + + for _, repo := range repos { + go func(repo string) { + result := icmd.RunCommand(dockerBinary, "push", repo) + results <- result.Error + }(repo) + } + + for range repos { + err := <-results + c.Assert(err, checker.IsNil, check.Commentf("concurrent push failed with error: %v", err)) + } + + // Clear local images store. + args := append([]string{"rmi"}, repos...) + dockerCmd(c, args...) + + // Re-pull and run individual tags, to make sure pushes succeeded + for _, repo := range repos { + dockerCmd(c, "pull", repo) + dockerCmd(c, "inspect", repo) + out, _ := dockerCmd(c, "run", "--rm", repo) + c.Assert(strings.TrimSpace(out), checker.Equals, "/bin/sh -c echo "+repo) + } +} + +func (s *DockerRegistrySuite) TestConcurrentPush(c *check.C) { + testConcurrentPush(c) +} + +func (s *DockerSchema1RegistrySuite) TestConcurrentPush(c *check.C) { + testConcurrentPush(c) +} + +func (s *DockerRegistrySuite) TestCrossRepositoryLayerPush(c *check.C) { + sourceRepoName := fmt.Sprintf("%v/dockercli/busybox", privateRegistryURL) + // tag the image to upload it to the private registry + dockerCmd(c, "tag", "busybox", sourceRepoName) + // push the image to the registry + out1, _, err := dockerCmdWithError("push", sourceRepoName) + c.Assert(err, check.IsNil, check.Commentf("pushing the image to the private registry has failed: %s", out1)) + // ensure that none of the layers were mounted from another repository during push + c.Assert(strings.Contains(out1, "Mounted from"), check.Equals, false) + + digest1 := reference.DigestRegexp.FindString(out1) + c.Assert(len(digest1), checker.GreaterThan, 0, check.Commentf("no digest found for pushed manifest")) + + destRepoName := fmt.Sprintf("%v/dockercli/crossrepopush", privateRegistryURL) + // retag the image to upload the same layers to another repo in the same registry + dockerCmd(c, "tag", "busybox", destRepoName) + // push the image to the registry + out2, _, err := dockerCmdWithError("push", destRepoName) + c.Assert(err, check.IsNil, check.Commentf("pushing the image to the private registry has failed: %s", out2)) + // ensure that layers were mounted from the first repo during push + c.Assert(strings.Contains(out2, "Mounted from dockercli/busybox"), check.Equals, true) + + digest2 := reference.DigestRegexp.FindString(out2) + c.Assert(len(digest2), checker.GreaterThan, 0, check.Commentf("no digest found for pushed manifest")) + c.Assert(digest1, check.Equals, digest2) + + // ensure that pushing again produces the same digest + out3, _, err := dockerCmdWithError("push", destRepoName) + c.Assert(err, check.IsNil, check.Commentf("pushing the image to the private registry has failed: %s", out2)) + + digest3 := reference.DigestRegexp.FindString(out3) + c.Assert(len(digest2), checker.GreaterThan, 0, check.Commentf("no digest found for pushed manifest")) + c.Assert(digest3, check.Equals, digest2) + + // ensure that we can pull and run the cross-repo-pushed repository + dockerCmd(c, "rmi", destRepoName) + dockerCmd(c, "pull", destRepoName) + out4, _ := dockerCmd(c, "run", destRepoName, "echo", "-n", "hello world") + c.Assert(out4, check.Equals, "hello world") +} + +func (s *DockerSchema1RegistrySuite) TestCrossRepositoryLayerPushNotSupported(c *check.C) { + sourceRepoName := fmt.Sprintf("%v/dockercli/busybox", privateRegistryURL) + // tag the image to upload it to the private registry + dockerCmd(c, "tag", "busybox", sourceRepoName) + // push the image to the registry + out1, _, err := dockerCmdWithError("push", sourceRepoName) + c.Assert(err, check.IsNil, check.Commentf("pushing the image to the private registry has failed: %s", out1)) + // ensure that none of the layers were mounted from another repository during push + c.Assert(strings.Contains(out1, "Mounted from"), check.Equals, false) + + digest1 := reference.DigestRegexp.FindString(out1) + c.Assert(len(digest1), checker.GreaterThan, 0, check.Commentf("no digest found for pushed manifest")) + + destRepoName := fmt.Sprintf("%v/dockercli/crossrepopush", privateRegistryURL) + // retag the image to upload the same layers to another repo in the same registry + dockerCmd(c, "tag", "busybox", destRepoName) + // push the image to the registry + out2, _, err := dockerCmdWithError("push", destRepoName) + c.Assert(err, check.IsNil, check.Commentf("pushing the image to the private registry has failed: %s", out2)) + // schema1 registry should not support cross-repo layer mounts, so ensure that this does not happen + c.Assert(strings.Contains(out2, "Mounted from"), check.Equals, false) + + digest2 := reference.DigestRegexp.FindString(out2) + c.Assert(len(digest2), checker.GreaterThan, 0, check.Commentf("no digest found for pushed manifest")) + c.Assert(digest1, check.Not(check.Equals), digest2) + + // ensure that we can pull and run the second pushed repository + dockerCmd(c, "rmi", destRepoName) + dockerCmd(c, "pull", destRepoName) + out3, _ := dockerCmd(c, "run", destRepoName, "echo", "-n", "hello world") + c.Assert(out3, check.Equals, "hello world") +} + +func (s *DockerTrustSuite) TestTrustedPush(c *check.C) { + repoName := fmt.Sprintf("%v/dockerclitrusted/pushtest:latest", privateRegistryURL) + // tag the image and upload it to the private registry + cli.DockerCmd(c, "tag", "busybox", repoName) + + cli.Docker(cli.Args("push", repoName), trustedCmd).Assert(c, SuccessSigningAndPushing) + + // Try pull after push + cli.Docker(cli.Args("pull", repoName), trustedCmd).Assert(c, icmd.Expected{ + Out: "Status: Image is up to date", + }) + + // Assert that we rotated the snapshot key to the server by checking our local keystore + contents, err := ioutil.ReadDir(filepath.Join(config.Dir(), "trust/private/tuf_keys", privateRegistryURL, "dockerclitrusted/pushtest")) + c.Assert(err, check.IsNil, check.Commentf("Unable to read local tuf key files")) + // Check that we only have 1 key (targets key) + c.Assert(contents, checker.HasLen, 1) +} + +func (s *DockerTrustSuite) TestTrustedPushWithEnvPasswords(c *check.C) { + repoName := fmt.Sprintf("%v/dockerclienv/trusted:latest", privateRegistryURL) + // tag the image and upload it to the private registry + cli.DockerCmd(c, "tag", "busybox", repoName) + + cli.Docker(cli.Args("push", repoName), trustedCmdWithPassphrases("12345678", "12345678")).Assert(c, SuccessSigningAndPushing) + + // Try pull after push + cli.Docker(cli.Args("pull", repoName), trustedCmd).Assert(c, icmd.Expected{ + Out: "Status: Image is up to date", + }) +} + +func (s *DockerTrustSuite) TestTrustedPushWithFailingServer(c *check.C) { + repoName := fmt.Sprintf("%v/dockerclitrusted/failingserver:latest", privateRegistryURL) + // tag the image and upload it to the private registry + cli.DockerCmd(c, "tag", "busybox", repoName) + + // Using a name that doesn't resolve to an address makes this test faster + cli.Docker(cli.Args("push", repoName), trustedCmdWithServer("https://server.invalid:81/")).Assert(c, icmd.Expected{ + ExitCode: 1, + Err: "error contacting notary server", + }) +} + +func (s *DockerTrustSuite) TestTrustedPushWithoutServerAndUntrusted(c *check.C) { + repoName := fmt.Sprintf("%v/dockerclitrusted/trustedandnot:latest", privateRegistryURL) + // tag the image and upload it to the private registry + cli.DockerCmd(c, "tag", "busybox", repoName) + + result := cli.Docker(cli.Args("push", "--disable-content-trust", repoName), trustedCmdWithServer("https://server.invalid:81/")) + result.Assert(c, icmd.Success) + c.Assert(result.Combined(), check.Not(checker.Contains), "Error establishing connection to notary repository", check.Commentf("Missing expected output on trusted push with --disable-content-trust:")) +} + +func (s *DockerTrustSuite) TestTrustedPushWithExistingTag(c *check.C) { + repoName := fmt.Sprintf("%v/dockerclitag/trusted:latest", privateRegistryURL) + // tag the image and upload it to the private registry + cli.DockerCmd(c, "tag", "busybox", repoName) + cli.DockerCmd(c, "push", repoName) + + cli.Docker(cli.Args("push", repoName), trustedCmd).Assert(c, SuccessSigningAndPushing) + + // Try pull after push + cli.Docker(cli.Args("pull", repoName), trustedCmd).Assert(c, icmd.Expected{ + Out: "Status: Image is up to date", + }) +} + +func (s *DockerTrustSuite) TestTrustedPushWithExistingSignedTag(c *check.C) { + repoName := fmt.Sprintf("%v/dockerclipushpush/trusted:latest", privateRegistryURL) + // tag the image and upload it to the private registry + cli.DockerCmd(c, "tag", "busybox", repoName) + + // Do a trusted push + cli.Docker(cli.Args("push", repoName), trustedCmd).Assert(c, SuccessSigningAndPushing) + + // Do another trusted push + cli.Docker(cli.Args("push", repoName), trustedCmd).Assert(c, SuccessSigningAndPushing) + cli.DockerCmd(c, "rmi", repoName) + + // Try pull to ensure the double push did not break our ability to pull + cli.Docker(cli.Args("pull", repoName), trustedCmd).Assert(c, SuccessDownloaded) +} + +func (s *DockerTrustSuite) TestTrustedPushWithIncorrectPassphraseForNonRoot(c *check.C) { + repoName := fmt.Sprintf("%v/dockercliincorretpwd/trusted:latest", privateRegistryURL) + // tag the image and upload it to the private registry + cli.DockerCmd(c, "tag", "busybox", repoName) + + // Push with default passphrases + cli.Docker(cli.Args("push", repoName), trustedCmd).Assert(c, SuccessSigningAndPushing) + + // Push with wrong passphrases + cli.Docker(cli.Args("push", repoName), trustedCmdWithPassphrases("12345678", "87654321")).Assert(c, icmd.Expected{ + ExitCode: 1, + Err: "could not find necessary signing keys", + }) +} + +func (s *DockerTrustSuite) TestTrustedPushWithReleasesDelegationOnly(c *check.C) { + testRequires(c, NotaryHosting) + repoName := fmt.Sprintf("%v/dockerclireleasedelegationinitfirst/trusted", privateRegistryURL) + targetName := fmt.Sprintf("%s:latest", repoName) + s.notaryInitRepo(c, repoName) + s.notaryCreateDelegation(c, repoName, "targets/releases", s.not.keys[0].Public) + s.notaryPublish(c, repoName) + + s.notaryImportKey(c, repoName, "targets/releases", s.not.keys[0].Private) + + // tag the image and upload it to the private registry + cli.DockerCmd(c, "tag", "busybox", targetName) + + cli.Docker(cli.Args("push", targetName), trustedCmd).Assert(c, SuccessSigningAndPushing) + // check to make sure that the target has been added to targets/releases and not targets + s.assertTargetInRoles(c, repoName, "latest", "targets/releases") + s.assertTargetNotInRoles(c, repoName, "latest", "targets") + + // Try pull after push + os.RemoveAll(filepath.Join(config.Dir(), "trust")) + + cli.Docker(cli.Args("pull", targetName), trustedCmd).Assert(c, icmd.Expected{ + Out: "Status: Image is up to date", + }) +} + +func (s *DockerTrustSuite) TestTrustedPushSignsAllFirstLevelRolesWeHaveKeysFor(c *check.C) { + testRequires(c, NotaryHosting) + repoName := fmt.Sprintf("%v/dockerclimanyroles/trusted", privateRegistryURL) + targetName := fmt.Sprintf("%s:latest", repoName) + s.notaryInitRepo(c, repoName) + s.notaryCreateDelegation(c, repoName, "targets/role1", s.not.keys[0].Public) + s.notaryCreateDelegation(c, repoName, "targets/role2", s.not.keys[1].Public) + s.notaryCreateDelegation(c, repoName, "targets/role3", s.not.keys[2].Public) + + // import everything except the third key + s.notaryImportKey(c, repoName, "targets/role1", s.not.keys[0].Private) + s.notaryImportKey(c, repoName, "targets/role2", s.not.keys[1].Private) + + s.notaryCreateDelegation(c, repoName, "targets/role1/subrole", s.not.keys[3].Public) + s.notaryImportKey(c, repoName, "targets/role1/subrole", s.not.keys[3].Private) + + s.notaryPublish(c, repoName) + + // tag the image and upload it to the private registry + cli.DockerCmd(c, "tag", "busybox", targetName) + + cli.Docker(cli.Args("push", targetName), trustedCmd).Assert(c, SuccessSigningAndPushing) + + // check to make sure that the target has been added to targets/role1 and targets/role2, and + // not targets (because there are delegations) or targets/role3 (due to missing key) or + // targets/role1/subrole (due to it being a second level delegation) + s.assertTargetInRoles(c, repoName, "latest", "targets/role1", "targets/role2") + s.assertTargetNotInRoles(c, repoName, "latest", "targets") + + // Try pull after push + os.RemoveAll(filepath.Join(config.Dir(), "trust")) + + // pull should fail because none of these are the releases role + cli.Docker(cli.Args("pull", targetName), trustedCmd).Assert(c, icmd.Expected{ + ExitCode: 1, + }) +} + +func (s *DockerTrustSuite) TestTrustedPushSignsForRolesWithKeysAndValidPaths(c *check.C) { + repoName := fmt.Sprintf("%v/dockerclirolesbykeysandpaths/trusted", privateRegistryURL) + targetName := fmt.Sprintf("%s:latest", repoName) + s.notaryInitRepo(c, repoName) + s.notaryCreateDelegation(c, repoName, "targets/role1", s.not.keys[0].Public, "l", "z") + s.notaryCreateDelegation(c, repoName, "targets/role2", s.not.keys[1].Public, "x", "y") + s.notaryCreateDelegation(c, repoName, "targets/role3", s.not.keys[2].Public, "latest") + s.notaryCreateDelegation(c, repoName, "targets/role4", s.not.keys[3].Public, "latest") + + // import everything except the third key + s.notaryImportKey(c, repoName, "targets/role1", s.not.keys[0].Private) + s.notaryImportKey(c, repoName, "targets/role2", s.not.keys[1].Private) + s.notaryImportKey(c, repoName, "targets/role4", s.not.keys[3].Private) + + s.notaryPublish(c, repoName) + + // tag the image and upload it to the private registry + cli.DockerCmd(c, "tag", "busybox", targetName) + + cli.Docker(cli.Args("push", targetName), trustedCmd).Assert(c, SuccessSigningAndPushing) + + // check to make sure that the target has been added to targets/role1 and targets/role4, and + // not targets (because there are delegations) or targets/role2 (due to path restrictions) or + // targets/role3 (due to missing key) + s.assertTargetInRoles(c, repoName, "latest", "targets/role1", "targets/role4") + s.assertTargetNotInRoles(c, repoName, "latest", "targets") + + // Try pull after push + os.RemoveAll(filepath.Join(config.Dir(), "trust")) + + // pull should fail because none of these are the releases role + cli.Docker(cli.Args("pull", targetName), trustedCmd).Assert(c, icmd.Expected{ + ExitCode: 1, + }) +} + +func (s *DockerTrustSuite) TestTrustedPushDoesntSignTargetsIfDelegationsExist(c *check.C) { + testRequires(c, NotaryHosting) + repoName := fmt.Sprintf("%v/dockerclireleasedelegationnotsignable/trusted", privateRegistryURL) + targetName := fmt.Sprintf("%s:latest", repoName) + s.notaryInitRepo(c, repoName) + s.notaryCreateDelegation(c, repoName, "targets/role1", s.not.keys[0].Public) + s.notaryPublish(c, repoName) + + // do not import any delegations key + + // tag the image and upload it to the private registry + cli.DockerCmd(c, "tag", "busybox", targetName) + + cli.Docker(cli.Args("push", targetName), trustedCmd).Assert(c, icmd.Expected{ + ExitCode: 1, + Err: "no valid signing keys", + }) + s.assertTargetNotInRoles(c, repoName, "latest", "targets", "targets/role1") +} + +func (s *DockerRegistryAuthHtpasswdSuite) TestPushNoCredentialsNoRetry(c *check.C) { + repoName := fmt.Sprintf("%s/busybox", privateRegistryURL) + dockerCmd(c, "tag", "busybox", repoName) + out, _, err := dockerCmdWithError("push", repoName) + c.Assert(err, check.NotNil, check.Commentf(out)) + c.Assert(out, check.Not(checker.Contains), "Retrying") + c.Assert(out, checker.Contains, "no basic auth credentials") +} + +// This may be flaky but it's needed not to regress on unauthorized push, see #21054 +func (s *DockerSuite) TestPushToCentralRegistryUnauthorized(c *check.C) { + testRequires(c, Network) + repoName := "test/busybox" + dockerCmd(c, "tag", "busybox", repoName) + out, _, err := dockerCmdWithError("push", repoName) + c.Assert(err, check.NotNil, check.Commentf(out)) + c.Assert(out, check.Not(checker.Contains), "Retrying") +} + +func getTestTokenService(status int, body string, retries int) *httptest.Server { + var mu sync.Mutex + return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + mu.Lock() + if retries > 0 { + w.WriteHeader(http.StatusServiceUnavailable) + w.Header().Set("Content-Type", "application/json") + w.Write([]byte(`{"errors":[{"code":"UNAVAILABLE","message":"cannot create token at this time"}]}`)) + retries-- + } else { + w.WriteHeader(status) + w.Header().Set("Content-Type", "application/json") + w.Write([]byte(body)) + } + mu.Unlock() + })) +} + +func (s *DockerRegistryAuthTokenSuite) TestPushTokenServiceUnauthResponse(c *check.C) { + ts := getTestTokenService(http.StatusUnauthorized, `{"errors": [{"Code":"UNAUTHORIZED", "message": "a message", "detail": null}]}`, 0) + defer ts.Close() + s.setupRegistryWithTokenService(c, ts.URL) + repoName := fmt.Sprintf("%s/busybox", privateRegistryURL) + dockerCmd(c, "tag", "busybox", repoName) + out, _, err := dockerCmdWithError("push", repoName) + c.Assert(err, check.NotNil, check.Commentf(out)) + c.Assert(out, checker.Not(checker.Contains), "Retrying") + c.Assert(out, checker.Contains, "unauthorized: a message") +} + +func (s *DockerRegistryAuthTokenSuite) TestPushMisconfiguredTokenServiceResponseUnauthorized(c *check.C) { + ts := getTestTokenService(http.StatusUnauthorized, `{"error": "unauthorized"}`, 0) + defer ts.Close() + s.setupRegistryWithTokenService(c, ts.URL) + repoName := fmt.Sprintf("%s/busybox", privateRegistryURL) + dockerCmd(c, "tag", "busybox", repoName) + out, _, err := dockerCmdWithError("push", repoName) + c.Assert(err, check.NotNil, check.Commentf(out)) + c.Assert(out, checker.Not(checker.Contains), "Retrying") + split := strings.Split(out, "\n") + c.Assert(split[len(split)-2], check.Equals, "unauthorized: authentication required") +} + +func (s *DockerRegistryAuthTokenSuite) TestPushMisconfiguredTokenServiceResponseError(c *check.C) { + ts := getTestTokenService(http.StatusTooManyRequests, `{"errors": [{"code":"TOOMANYREQUESTS","message":"out of tokens"}]}`, 3) + defer ts.Close() + s.setupRegistryWithTokenService(c, ts.URL) + repoName := fmt.Sprintf("%s/busybox", privateRegistryURL) + dockerCmd(c, "tag", "busybox", repoName) + out, _, err := dockerCmdWithError("push", repoName) + c.Assert(err, check.NotNil, check.Commentf(out)) + // TODO: isolate test so that it can be guaranteed that the 503 will trigger xfer retries + //c.Assert(out, checker.Contains, "Retrying") + //c.Assert(out, checker.Not(checker.Contains), "Retrying in 15") + split := strings.Split(out, "\n") + c.Assert(split[len(split)-2], check.Equals, "toomanyrequests: out of tokens") +} + +func (s *DockerRegistryAuthTokenSuite) TestPushMisconfiguredTokenServiceResponseUnparsable(c *check.C) { + ts := getTestTokenService(http.StatusForbidden, `no way`, 0) + defer ts.Close() + s.setupRegistryWithTokenService(c, ts.URL) + repoName := fmt.Sprintf("%s/busybox", privateRegistryURL) + dockerCmd(c, "tag", "busybox", repoName) + out, _, err := dockerCmdWithError("push", repoName) + c.Assert(err, check.NotNil, check.Commentf(out)) + c.Assert(out, checker.Not(checker.Contains), "Retrying") + split := strings.Split(out, "\n") + c.Assert(split[len(split)-2], checker.Contains, "error parsing HTTP 403 response body: ") +} + +func (s *DockerRegistryAuthTokenSuite) TestPushMisconfiguredTokenServiceResponseNoToken(c *check.C) { + ts := getTestTokenService(http.StatusOK, `{"something": "wrong"}`, 0) + defer ts.Close() + s.setupRegistryWithTokenService(c, ts.URL) + repoName := fmt.Sprintf("%s/busybox", privateRegistryURL) + dockerCmd(c, "tag", "busybox", repoName) + out, _, err := dockerCmdWithError("push", repoName) + c.Assert(err, check.NotNil, check.Commentf(out)) + c.Assert(out, checker.Not(checker.Contains), "Retrying") + split := strings.Split(out, "\n") + c.Assert(split[len(split)-2], check.Equals, "authorization server did not include a token in the response") +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_registry_user_agent_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_registry_user_agent_test.go new file mode 100644 index 000000000..6cbe6e7e6 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_registry_user_agent_test.go @@ -0,0 +1,103 @@ +package main + +import ( + "fmt" + "io/ioutil" + "net/http" + "os" + "regexp" + + "github.com/docker/docker/integration-cli/registry" + "github.com/go-check/check" +) + +// unescapeBackslashSemicolonParens unescapes \;() +func unescapeBackslashSemicolonParens(s string) string { + re := regexp.MustCompile(`\\;`) + ret := re.ReplaceAll([]byte(s), []byte(";")) + + re = regexp.MustCompile(`\\\(`) + ret = re.ReplaceAll([]byte(ret), []byte("(")) + + re = regexp.MustCompile(`\\\)`) + ret = re.ReplaceAll([]byte(ret), []byte(")")) + + re = regexp.MustCompile(`\\\\`) + ret = re.ReplaceAll([]byte(ret), []byte(`\`)) + + return string(ret) +} + +func regexpCheckUA(c *check.C, ua string) { + re := regexp.MustCompile("(?P.+) UpstreamClient(?P.+)") + substrArr := re.FindStringSubmatch(ua) + + c.Assert(substrArr, check.HasLen, 3, check.Commentf("Expected 'UpstreamClient()' with upstream client UA")) + dockerUA := substrArr[1] + upstreamUAEscaped := substrArr[2] + + // check dockerUA looks correct + reDockerUA := regexp.MustCompile("^docker/[0-9A-Za-z+]") + bMatchDockerUA := reDockerUA.MatchString(dockerUA) + c.Assert(bMatchDockerUA, check.Equals, true, check.Commentf("Docker Engine User-Agent malformed")) + + // check upstreamUA looks correct + // Expecting something like: Docker-Client/1.11.0-dev (linux) + upstreamUA := unescapeBackslashSemicolonParens(upstreamUAEscaped) + reUpstreamUA := regexp.MustCompile("^\\(Docker-Client/[0-9A-Za-z+]") + bMatchUpstreamUA := reUpstreamUA.MatchString(upstreamUA) + c.Assert(bMatchUpstreamUA, check.Equals, true, check.Commentf("(Upstream) Docker Client User-Agent malformed")) +} + +// registerUserAgentHandler registers a handler for the `/v2/*` endpoint. +// Note that a 404 is returned to prevent the client to proceed. +// We are only checking if the client sent a valid User Agent string along +// with the request. +func registerUserAgentHandler(reg *registry.Mock, result *string) { + reg.RegisterHandler("/v2/", func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(404) + w.Write([]byte(`{"errors":[{"code": "UNSUPPORTED","message": "this is a mock registry"}]}`)) + var ua string + for k, v := range r.Header { + if k == "User-Agent" { + ua = v[0] + } + } + *result = ua + }) +} + +// TestUserAgentPassThrough verifies that when an image is pulled from +// a registry, the registry should see a User-Agent string of the form +// [docker engine UA] UpstreamClientSTREAM-CLIENT([client UA]) +func (s *DockerRegistrySuite) TestUserAgentPassThrough(c *check.C) { + var ua string + + reg, err := registry.NewMock(c) + defer reg.Close() + c.Assert(err, check.IsNil) + registerUserAgentHandler(reg, &ua) + repoName := fmt.Sprintf("%s/busybox", reg.URL()) + + s.d.StartWithBusybox(c, "--insecure-registry", reg.URL()) + + tmp, err := ioutil.TempDir("", "integration-cli-") + c.Assert(err, check.IsNil) + defer os.RemoveAll(tmp) + + dockerfile, err := makefile(tmp, fmt.Sprintf("FROM %s", repoName)) + c.Assert(err, check.IsNil, check.Commentf("Unable to create test dockerfile")) + + s.d.Cmd("build", "--file", dockerfile, tmp) + regexpCheckUA(c, ua) + + s.d.Cmd("login", "-u", "richard", "-p", "testtest", reg.URL()) + regexpCheckUA(c, ua) + + s.d.Cmd("pull", repoName) + regexpCheckUA(c, ua) + + s.d.Cmd("tag", "busybox", repoName) + s.d.Cmd("push", repoName) + regexpCheckUA(c, ua) +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_rename_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_rename_test.go new file mode 100644 index 000000000..ea430227d --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_rename_test.go @@ -0,0 +1,138 @@ +package main + +import ( + "strings" + + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/pkg/stringid" + icmd "github.com/docker/docker/pkg/testutil/cmd" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestRenameStoppedContainer(c *check.C) { + out, _ := dockerCmd(c, "run", "--name", "first_name", "-d", "busybox", "sh") + + cleanedContainerID := strings.TrimSpace(out) + dockerCmd(c, "wait", cleanedContainerID) + + name := inspectField(c, cleanedContainerID, "Name") + newName := "new_name" + stringid.GenerateNonCryptoID() + dockerCmd(c, "rename", "first_name", newName) + + name = inspectField(c, cleanedContainerID, "Name") + c.Assert(name, checker.Equals, "/"+newName, check.Commentf("Failed to rename container %s", name)) + +} + +func (s *DockerSuite) TestRenameRunningContainer(c *check.C) { + out, _ := dockerCmd(c, "run", "--name", "first_name", "-d", "busybox", "sh") + + newName := "new_name" + stringid.GenerateNonCryptoID() + cleanedContainerID := strings.TrimSpace(out) + dockerCmd(c, "rename", "first_name", newName) + + name := inspectField(c, cleanedContainerID, "Name") + c.Assert(name, checker.Equals, "/"+newName, check.Commentf("Failed to rename container %s", name)) +} + +func (s *DockerSuite) TestRenameRunningContainerAndReuse(c *check.C) { + out := runSleepingContainer(c, "--name", "first_name") + c.Assert(waitRun("first_name"), check.IsNil) + + newName := "new_name" + ContainerID := strings.TrimSpace(out) + dockerCmd(c, "rename", "first_name", newName) + + name := inspectField(c, ContainerID, "Name") + c.Assert(name, checker.Equals, "/"+newName, check.Commentf("Failed to rename container")) + + out = runSleepingContainer(c, "--name", "first_name") + c.Assert(waitRun("first_name"), check.IsNil) + newContainerID := strings.TrimSpace(out) + name = inspectField(c, newContainerID, "Name") + c.Assert(name, checker.Equals, "/first_name", check.Commentf("Failed to reuse container name")) +} + +func (s *DockerSuite) TestRenameCheckNames(c *check.C) { + dockerCmd(c, "run", "--name", "first_name", "-d", "busybox", "sh") + + newName := "new_name" + stringid.GenerateNonCryptoID() + dockerCmd(c, "rename", "first_name", newName) + + name := inspectField(c, newName, "Name") + c.Assert(name, checker.Equals, "/"+newName, check.Commentf("Failed to rename container %s", name)) + + result := dockerCmdWithResult("inspect", "-f={{.Name}}", "--type=container", "first_name") + c.Assert(result, icmd.Matches, icmd.Expected{ + ExitCode: 1, + Err: "No such container: first_name", + }) +} + +func (s *DockerSuite) TestRenameInvalidName(c *check.C) { + runSleepingContainer(c, "--name", "myname") + + out, _, err := dockerCmdWithError("rename", "myname", "new:invalid") + c.Assert(err, checker.NotNil, check.Commentf("Renaming container to invalid name should have failed: %s", out)) + c.Assert(out, checker.Contains, "Invalid container name", check.Commentf("%v", err)) + + out, _, err = dockerCmdWithError("rename", "myname") + c.Assert(err, checker.NotNil, check.Commentf("Renaming container to invalid name should have failed: %s", out)) + c.Assert(out, checker.Contains, "requires exactly 2 argument(s).", check.Commentf("%v", err)) + + out, _, err = dockerCmdWithError("rename", "myname", "") + c.Assert(err, checker.NotNil, check.Commentf("Renaming container to invalid name should have failed: %s", out)) + c.Assert(out, checker.Contains, "may be empty", check.Commentf("%v", err)) + + out, _, err = dockerCmdWithError("rename", "", "newname") + c.Assert(err, checker.NotNil, check.Commentf("Renaming container with empty name should have failed: %s", out)) + c.Assert(out, checker.Contains, "may be empty", check.Commentf("%v", err)) + + out, _ = dockerCmd(c, "ps", "-a") + c.Assert(out, checker.Contains, "myname", check.Commentf("Output of docker ps should have included 'myname': %s", out)) +} + +func (s *DockerSuite) TestRenameAnonymousContainer(c *check.C) { + testRequires(c, DaemonIsLinux) + + dockerCmd(c, "network", "create", "network1") + out, _ := dockerCmd(c, "create", "-it", "--net", "network1", "busybox", "top") + + anonymousContainerID := strings.TrimSpace(out) + + dockerCmd(c, "rename", anonymousContainerID, "container1") + dockerCmd(c, "start", "container1") + + count := "-c" + if testEnv.DaemonPlatform() == "windows" { + count = "-n" + } + + _, _, err := dockerCmdWithError("run", "--net", "network1", "busybox", "ping", count, "1", "container1") + c.Assert(err, check.IsNil, check.Commentf("Embedded DNS lookup fails after renaming anonymous container: %v", err)) +} + +func (s *DockerSuite) TestRenameContainerWithSameName(c *check.C) { + out := runSleepingContainer(c, "--name", "old") + ContainerID := strings.TrimSpace(out) + + out, _, err := dockerCmdWithError("rename", "old", "old") + c.Assert(err, checker.NotNil, check.Commentf("Renaming a container with the same name should have failed")) + c.Assert(out, checker.Contains, "Renaming a container with the same name", check.Commentf("%v", err)) + + out, _, err = dockerCmdWithError("rename", ContainerID, "old") + c.Assert(err, checker.NotNil, check.Commentf("Renaming a container with the same name should have failed")) + c.Assert(out, checker.Contains, "Renaming a container with the same name", check.Commentf("%v", err)) +} + +// Test case for #23973 +func (s *DockerSuite) TestRenameContainerWithLinkedContainer(c *check.C) { + testRequires(c, DaemonIsLinux) + + db1, _ := dockerCmd(c, "run", "--name", "db1", "-d", "busybox", "top") + dockerCmd(c, "run", "--name", "app1", "-d", "--link", "db1:/mysql", "busybox", "top") + dockerCmd(c, "rename", "app1", "app2") + out, _, err := dockerCmdWithError("inspect", "--format={{ .Id }}", "app2/mysql") + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Equals, strings.TrimSpace(db1)) +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_restart_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_restart_test.go new file mode 100644 index 000000000..cf6b135ed --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_restart_test.go @@ -0,0 +1,309 @@ +package main + +import ( + "os" + "strconv" + "strings" + "time" + + "github.com/docker/docker/integration-cli/checker" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestRestartStoppedContainer(c *check.C) { + dockerCmd(c, "run", "--name=test", "busybox", "echo", "foobar") + cleanedContainerID := getIDByName(c, "test") + + out, _ := dockerCmd(c, "logs", cleanedContainerID) + c.Assert(out, checker.Equals, "foobar\n") + + dockerCmd(c, "restart", cleanedContainerID) + + // Wait until the container has stopped + err := waitInspect(cleanedContainerID, "{{.State.Running}}", "false", 20*time.Second) + c.Assert(err, checker.IsNil) + + out, _ = dockerCmd(c, "logs", cleanedContainerID) + c.Assert(out, checker.Equals, "foobar\nfoobar\n") +} + +func (s *DockerSuite) TestRestartRunningContainer(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "busybox", "sh", "-c", "echo foobar && sleep 30 && echo 'should not print this'") + + cleanedContainerID := strings.TrimSpace(out) + + c.Assert(waitRun(cleanedContainerID), checker.IsNil) + + getLogs := func(c *check.C) (interface{}, check.CommentInterface) { + out, _ := dockerCmd(c, "logs", cleanedContainerID) + return out, nil + } + + // Wait 10 seconds for the 'echo' to appear in the logs + waitAndAssert(c, 10*time.Second, getLogs, checker.Equals, "foobar\n") + + dockerCmd(c, "restart", "-t", "1", cleanedContainerID) + c.Assert(waitRun(cleanedContainerID), checker.IsNil) + + // Wait 10 seconds for first 'echo' appear (again) in the logs + waitAndAssert(c, 10*time.Second, getLogs, checker.Equals, "foobar\nfoobar\n") +} + +// Test that restarting a container with a volume does not create a new volume on restart. Regression test for #819. +func (s *DockerSuite) TestRestartWithVolumes(c *check.C) { + prefix, slash := getPrefixAndSlashFromDaemonPlatform() + out := runSleepingContainer(c, "-d", "-v", prefix+slash+"test") + + cleanedContainerID := strings.TrimSpace(out) + out, err := inspectFilter(cleanedContainerID, "len .Mounts") + c.Assert(err, check.IsNil, check.Commentf("failed to inspect %s: %s", cleanedContainerID, out)) + out = strings.Trim(out, " \n\r") + c.Assert(out, checker.Equals, "1") + + source, err := inspectMountSourceField(cleanedContainerID, prefix+slash+"test") + c.Assert(err, checker.IsNil) + + dockerCmd(c, "restart", cleanedContainerID) + + out, err = inspectFilter(cleanedContainerID, "len .Mounts") + c.Assert(err, check.IsNil, check.Commentf("failed to inspect %s: %s", cleanedContainerID, out)) + out = strings.Trim(out, " \n\r") + c.Assert(out, checker.Equals, "1") + + sourceAfterRestart, err := inspectMountSourceField(cleanedContainerID, prefix+slash+"test") + c.Assert(err, checker.IsNil) + c.Assert(source, checker.Equals, sourceAfterRestart) +} + +func (s *DockerSuite) TestRestartDisconnectedContainer(c *check.C) { + testRequires(c, DaemonIsLinux, SameHostDaemon, NotUserNamespace, NotArm) + + // Run a container on the default bridge network + out, _ := dockerCmd(c, "run", "-d", "--name", "c0", "busybox", "top") + cleanedContainerID := strings.TrimSpace(out) + c.Assert(waitRun(cleanedContainerID), checker.IsNil) + + // Disconnect the container from the network + out, err := dockerCmd(c, "network", "disconnect", "bridge", "c0") + c.Assert(err, check.NotNil, check.Commentf(out)) + + // Restart the container + dockerCmd(c, "restart", "c0") + c.Assert(err, check.NotNil, check.Commentf(out)) +} + +func (s *DockerSuite) TestRestartPolicyNO(c *check.C) { + out, _ := dockerCmd(c, "create", "--restart=no", "busybox") + + id := strings.TrimSpace(string(out)) + name := inspectField(c, id, "HostConfig.RestartPolicy.Name") + c.Assert(name, checker.Equals, "no") +} + +func (s *DockerSuite) TestRestartPolicyAlways(c *check.C) { + out, _ := dockerCmd(c, "create", "--restart=always", "busybox") + + id := strings.TrimSpace(string(out)) + name := inspectField(c, id, "HostConfig.RestartPolicy.Name") + c.Assert(name, checker.Equals, "always") + + MaximumRetryCount := inspectField(c, id, "HostConfig.RestartPolicy.MaximumRetryCount") + + // MaximumRetryCount=0 if the restart policy is always + c.Assert(MaximumRetryCount, checker.Equals, "0") +} + +func (s *DockerSuite) TestRestartPolicyOnFailure(c *check.C) { + out, _, err := dockerCmdWithError("create", "--restart=on-failure:-1", "busybox") + c.Assert(err, check.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "maximum retry count cannot be negative") + + out, _ = dockerCmd(c, "create", "--restart=on-failure:1", "busybox") + + id := strings.TrimSpace(string(out)) + name := inspectField(c, id, "HostConfig.RestartPolicy.Name") + maxRetry := inspectField(c, id, "HostConfig.RestartPolicy.MaximumRetryCount") + + c.Assert(name, checker.Equals, "on-failure") + c.Assert(maxRetry, checker.Equals, "1") + + out, _ = dockerCmd(c, "create", "--restart=on-failure:0", "busybox") + + id = strings.TrimSpace(string(out)) + name = inspectField(c, id, "HostConfig.RestartPolicy.Name") + maxRetry = inspectField(c, id, "HostConfig.RestartPolicy.MaximumRetryCount") + + c.Assert(name, checker.Equals, "on-failure") + c.Assert(maxRetry, checker.Equals, "0") + + out, _ = dockerCmd(c, "create", "--restart=on-failure", "busybox") + + id = strings.TrimSpace(string(out)) + name = inspectField(c, id, "HostConfig.RestartPolicy.Name") + maxRetry = inspectField(c, id, "HostConfig.RestartPolicy.MaximumRetryCount") + + c.Assert(name, checker.Equals, "on-failure") + c.Assert(maxRetry, checker.Equals, "0") +} + +// a good container with --restart=on-failure:3 +// MaximumRetryCount!=0; RestartCount=0 +func (s *DockerSuite) TestRestartContainerwithGoodContainer(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "--restart=on-failure:3", "busybox", "true") + + id := strings.TrimSpace(string(out)) + err := waitInspect(id, "{{ .State.Restarting }} {{ .State.Running }}", "false false", 30*time.Second) + c.Assert(err, checker.IsNil) + + count := inspectField(c, id, "RestartCount") + c.Assert(count, checker.Equals, "0") + + MaximumRetryCount := inspectField(c, id, "HostConfig.RestartPolicy.MaximumRetryCount") + c.Assert(MaximumRetryCount, checker.Equals, "3") + +} + +func (s *DockerSuite) TestRestartContainerSuccess(c *check.C) { + testRequires(c, SameHostDaemon) + + out := runSleepingContainer(c, "-d", "--restart=always") + id := strings.TrimSpace(out) + c.Assert(waitRun(id), check.IsNil) + + pidStr := inspectField(c, id, "State.Pid") + + pid, err := strconv.Atoi(pidStr) + c.Assert(err, check.IsNil) + + p, err := os.FindProcess(pid) + c.Assert(err, check.IsNil) + c.Assert(p, check.NotNil) + + err = p.Kill() + c.Assert(err, check.IsNil) + + err = waitInspect(id, "{{.RestartCount}}", "1", 30*time.Second) + c.Assert(err, check.IsNil) + + err = waitInspect(id, "{{.State.Status}}", "running", 30*time.Second) + c.Assert(err, check.IsNil) +} + +func (s *DockerSuite) TestRestartWithPolicyUserDefinedNetwork(c *check.C) { + // TODO Windows. This may be portable following HNS integration post TP5. + testRequires(c, DaemonIsLinux, SameHostDaemon, NotUserNamespace, NotArm) + dockerCmd(c, "network", "create", "-d", "bridge", "udNet") + + dockerCmd(c, "run", "-d", "--net=udNet", "--name=first", "busybox", "top") + c.Assert(waitRun("first"), check.IsNil) + + dockerCmd(c, "run", "-d", "--restart=always", "--net=udNet", "--name=second", + "--link=first:foo", "busybox", "top") + c.Assert(waitRun("second"), check.IsNil) + + // ping to first and its alias foo must succeed + _, _, err := dockerCmdWithError("exec", "second", "ping", "-c", "1", "first") + c.Assert(err, check.IsNil) + _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "foo") + c.Assert(err, check.IsNil) + + // Now kill the second container and let the restart policy kick in + pidStr := inspectField(c, "second", "State.Pid") + + pid, err := strconv.Atoi(pidStr) + c.Assert(err, check.IsNil) + + p, err := os.FindProcess(pid) + c.Assert(err, check.IsNil) + c.Assert(p, check.NotNil) + + err = p.Kill() + c.Assert(err, check.IsNil) + + err = waitInspect("second", "{{.RestartCount}}", "1", 5*time.Second) + c.Assert(err, check.IsNil) + + err = waitInspect("second", "{{.State.Status}}", "running", 5*time.Second) + + // ping to first and its alias foo must still succeed + _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "first") + c.Assert(err, check.IsNil) + _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "foo") + c.Assert(err, check.IsNil) +} + +func (s *DockerSuite) TestRestartPolicyAfterRestart(c *check.C) { + testRequires(c, SameHostDaemon) + + out := runSleepingContainer(c, "-d", "--restart=always") + id := strings.TrimSpace(out) + c.Assert(waitRun(id), check.IsNil) + + dockerCmd(c, "restart", id) + + c.Assert(waitRun(id), check.IsNil) + + pidStr := inspectField(c, id, "State.Pid") + + pid, err := strconv.Atoi(pidStr) + c.Assert(err, check.IsNil) + + p, err := os.FindProcess(pid) + c.Assert(err, check.IsNil) + c.Assert(p, check.NotNil) + + err = p.Kill() + c.Assert(err, check.IsNil) + + err = waitInspect(id, "{{.RestartCount}}", "1", 30*time.Second) + c.Assert(err, check.IsNil) + + err = waitInspect(id, "{{.State.Status}}", "running", 30*time.Second) + c.Assert(err, check.IsNil) +} + +func (s *DockerSuite) TestRestartContainerwithRestartPolicy(c *check.C) { + out1, _ := dockerCmd(c, "run", "-d", "--restart=on-failure:3", "busybox", "false") + out2, _ := dockerCmd(c, "run", "-d", "--restart=always", "busybox", "false") + + id1 := strings.TrimSpace(string(out1)) + id2 := strings.TrimSpace(string(out2)) + waitTimeout := 15 * time.Second + if testEnv.DaemonPlatform() == "windows" { + waitTimeout = 150 * time.Second + } + err := waitInspect(id1, "{{ .State.Restarting }} {{ .State.Running }}", "false false", waitTimeout) + c.Assert(err, checker.IsNil) + + dockerCmd(c, "restart", id1) + dockerCmd(c, "restart", id2) + + // Make sure we can stop/start (regression test from a705e166cf3bcca62543150c2b3f9bfeae45ecfa) + dockerCmd(c, "stop", id1) + dockerCmd(c, "stop", id2) + dockerCmd(c, "start", id1) + dockerCmd(c, "start", id2) + + // Kill the containers, making sure the are stopped at the end of the test + dockerCmd(c, "kill", id1) + dockerCmd(c, "kill", id2) + err = waitInspect(id1, "{{ .State.Restarting }} {{ .State.Running }}", "false false", waitTimeout) + c.Assert(err, checker.IsNil) + err = waitInspect(id2, "{{ .State.Restarting }} {{ .State.Running }}", "false false", waitTimeout) + c.Assert(err, checker.IsNil) +} + +func (s *DockerSuite) TestRestartAutoRemoveContainer(c *check.C) { + out := runSleepingContainer(c, "--rm") + + id := strings.TrimSpace(string(out)) + dockerCmd(c, "restart", id) + err := waitInspect(id, "{{ .State.Restarting }} {{ .State.Running }}", "false true", 15*time.Second) + c.Assert(err, checker.IsNil) + + out, _ = dockerCmd(c, "ps") + c.Assert(out, checker.Contains, id[:12], check.Commentf("container should be restarted instead of removed: %v", out)) + + // Kill the container to make sure it will be removed + dockerCmd(c, "kill", id) +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_rm_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_rm_test.go new file mode 100644 index 000000000..d281704a7 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_rm_test.go @@ -0,0 +1,87 @@ +package main + +import ( + "io/ioutil" + "os" + + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/cli/build" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestRmContainerWithRemovedVolume(c *check.C) { + testRequires(c, SameHostDaemon) + + prefix, slash := getPrefixAndSlashFromDaemonPlatform() + + tempDir, err := ioutil.TempDir("", "test-rm-container-with-removed-volume-") + if err != nil { + c.Fatalf("failed to create temporary directory: %s", tempDir) + } + defer os.RemoveAll(tempDir) + + dockerCmd(c, "run", "--name", "losemyvolumes", "-v", tempDir+":"+prefix+slash+"test", "busybox", "true") + + err = os.RemoveAll(tempDir) + c.Assert(err, check.IsNil) + + dockerCmd(c, "rm", "-v", "losemyvolumes") +} + +func (s *DockerSuite) TestRmContainerWithVolume(c *check.C) { + prefix, slash := getPrefixAndSlashFromDaemonPlatform() + + dockerCmd(c, "run", "--name", "foo", "-v", prefix+slash+"srv", "busybox", "true") + + dockerCmd(c, "rm", "-v", "foo") +} + +func (s *DockerSuite) TestRmContainerRunning(c *check.C) { + createRunningContainer(c, "foo") + + res, _, err := dockerCmdWithError("rm", "foo") + c.Assert(err, checker.NotNil, check.Commentf("Expected error, can't rm a running container")) + c.Assert(res, checker.Contains, "cannot remove a running container") +} + +func (s *DockerSuite) TestRmContainerForceRemoveRunning(c *check.C) { + createRunningContainer(c, "foo") + + // Stop then remove with -f + dockerCmd(c, "rm", "-f", "foo") +} + +func (s *DockerSuite) TestRmContainerOrphaning(c *check.C) { + dockerfile1 := `FROM busybox:latest + ENTRYPOINT ["true"]` + img := "test-container-orphaning" + dockerfile2 := `FROM busybox:latest + ENTRYPOINT ["true"] + MAINTAINER Integration Tests` + + // build first dockerfile + buildImageSuccessfully(c, img, build.WithDockerfile(dockerfile1)) + img1 := getIDByName(c, img) + // run container on first image + dockerCmd(c, "run", img) + // rebuild dockerfile with a small addition at the end + buildImageSuccessfully(c, img, build.WithDockerfile(dockerfile2)) + // try to remove the image, should not error out. + out, _, err := dockerCmdWithError("rmi", img) + c.Assert(err, check.IsNil, check.Commentf("Expected to removing the image, but failed: %s", out)) + + // check if we deleted the first image + out, _ = dockerCmd(c, "images", "-q", "--no-trunc") + c.Assert(out, checker.Contains, img1, check.Commentf("Orphaned container (could not find %q in docker images): %s", img1, out)) + +} + +func (s *DockerSuite) TestRmInvalidContainer(c *check.C) { + out, _, err := dockerCmdWithError("rm", "unknown") + c.Assert(err, checker.NotNil, check.Commentf("Expected error on rm unknown container, got none")) + c.Assert(out, checker.Contains, "No such container") +} + +func createRunningContainer(c *check.C, name string) { + runSleepingContainer(c, "-dt", "--name", name) +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_rmi_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_rmi_test.go new file mode 100644 index 000000000..afbc4c2fa --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_rmi_test.go @@ -0,0 +1,338 @@ +package main + +import ( + "fmt" + "strings" + "time" + + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/cli" + "github.com/docker/docker/integration-cli/cli/build" + "github.com/docker/docker/pkg/stringid" + icmd "github.com/docker/docker/pkg/testutil/cmd" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestRmiWithContainerFails(c *check.C) { + errSubstr := "is using it" + + // create a container + out, _ := dockerCmd(c, "run", "-d", "busybox", "true") + + cleanedContainerID := strings.TrimSpace(out) + + // try to delete the image + out, _, err := dockerCmdWithError("rmi", "busybox") + // Container is using image, should not be able to rmi + c.Assert(err, checker.NotNil) + // Container is using image, error message should contain errSubstr + c.Assert(out, checker.Contains, errSubstr, check.Commentf("Container: %q", cleanedContainerID)) + + // make sure it didn't delete the busybox name + images, _ := dockerCmd(c, "images") + // The name 'busybox' should not have been removed from images + c.Assert(images, checker.Contains, "busybox") +} + +func (s *DockerSuite) TestRmiTag(c *check.C) { + imagesBefore, _ := dockerCmd(c, "images", "-a") + dockerCmd(c, "tag", "busybox", "utest:tag1") + dockerCmd(c, "tag", "busybox", "utest/docker:tag2") + dockerCmd(c, "tag", "busybox", "utest:5000/docker:tag3") + { + imagesAfter, _ := dockerCmd(c, "images", "-a") + c.Assert(strings.Count(imagesAfter, "\n"), checker.Equals, strings.Count(imagesBefore, "\n")+3, check.Commentf("before: %q\n\nafter: %q\n", imagesBefore, imagesAfter)) + } + dockerCmd(c, "rmi", "utest/docker:tag2") + { + imagesAfter, _ := dockerCmd(c, "images", "-a") + c.Assert(strings.Count(imagesAfter, "\n"), checker.Equals, strings.Count(imagesBefore, "\n")+2, check.Commentf("before: %q\n\nafter: %q\n", imagesBefore, imagesAfter)) + } + dockerCmd(c, "rmi", "utest:5000/docker:tag3") + { + imagesAfter, _ := dockerCmd(c, "images", "-a") + c.Assert(strings.Count(imagesAfter, "\n"), checker.Equals, strings.Count(imagesBefore, "\n")+1, check.Commentf("before: %q\n\nafter: %q\n", imagesBefore, imagesAfter)) + + } + dockerCmd(c, "rmi", "utest:tag1") + { + imagesAfter, _ := dockerCmd(c, "images", "-a") + c.Assert(strings.Count(imagesAfter, "\n"), checker.Equals, strings.Count(imagesBefore, "\n"), check.Commentf("before: %q\n\nafter: %q\n", imagesBefore, imagesAfter)) + + } +} + +func (s *DockerSuite) TestRmiImgIDMultipleTag(c *check.C) { + out := cli.DockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "mkdir '/busybox-one'").Combined() + containerID := strings.TrimSpace(out) + + // Wait for it to exit as cannot commit a running container on Windows, and + // it will take a few seconds to exit + if testEnv.DaemonPlatform() == "windows" { + cli.WaitExited(c, containerID, 60*time.Second) + } + + cli.DockerCmd(c, "commit", containerID, "busybox-one") + + imagesBefore := cli.DockerCmd(c, "images", "-a").Combined() + cli.DockerCmd(c, "tag", "busybox-one", "busybox-one:tag1") + cli.DockerCmd(c, "tag", "busybox-one", "busybox-one:tag2") + + imagesAfter := cli.DockerCmd(c, "images", "-a").Combined() + // tag busybox to create 2 more images with same imageID + c.Assert(strings.Count(imagesAfter, "\n"), checker.Equals, strings.Count(imagesBefore, "\n")+2, check.Commentf("docker images shows: %q\n", imagesAfter)) + + imgID := inspectField(c, "busybox-one:tag1", "Id") + + // run a container with the image + out = runSleepingContainerInImage(c, "busybox-one") + containerID = strings.TrimSpace(out) + + // first checkout without force it fails + // rmi tagged in multiple repos should have failed without force + cli.Docker(cli.Args("rmi", imgID)).Assert(c, icmd.Expected{ + ExitCode: 1, + Err: fmt.Sprintf("conflict: unable to delete %s (cannot be forced) - image is being used by running container %s", stringid.TruncateID(imgID), stringid.TruncateID(containerID)), + }) + + cli.DockerCmd(c, "stop", containerID) + cli.DockerCmd(c, "rmi", "-f", imgID) + + imagesAfter = cli.DockerCmd(c, "images", "-a").Combined() + // rmi -f failed, image still exists + c.Assert(imagesAfter, checker.Not(checker.Contains), imgID[:12], check.Commentf("ImageID:%q; ImagesAfter: %q", imgID, imagesAfter)) +} + +func (s *DockerSuite) TestRmiImgIDForce(c *check.C) { + out := cli.DockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "mkdir '/busybox-test'").Combined() + containerID := strings.TrimSpace(out) + + // Wait for it to exit as cannot commit a running container on Windows, and + // it will take a few seconds to exit + if testEnv.DaemonPlatform() == "windows" { + cli.WaitExited(c, containerID, 60*time.Second) + } + + cli.DockerCmd(c, "commit", containerID, "busybox-test") + + imagesBefore := cli.DockerCmd(c, "images", "-a").Combined() + cli.DockerCmd(c, "tag", "busybox-test", "utest:tag1") + cli.DockerCmd(c, "tag", "busybox-test", "utest:tag2") + cli.DockerCmd(c, "tag", "busybox-test", "utest/docker:tag3") + cli.DockerCmd(c, "tag", "busybox-test", "utest:5000/docker:tag4") + { + imagesAfter := cli.DockerCmd(c, "images", "-a").Combined() + c.Assert(strings.Count(imagesAfter, "\n"), checker.Equals, strings.Count(imagesBefore, "\n")+4, check.Commentf("before: %q\n\nafter: %q\n", imagesBefore, imagesAfter)) + } + imgID := inspectField(c, "busybox-test", "Id") + + // first checkout without force it fails + cli.Docker(cli.Args("rmi", imgID)).Assert(c, icmd.Expected{ + ExitCode: 1, + Err: "(must be forced) - image is referenced in multiple repositories", + }) + + cli.DockerCmd(c, "rmi", "-f", imgID) + { + imagesAfter := cli.DockerCmd(c, "images", "-a").Combined() + // rmi failed, image still exists + c.Assert(imagesAfter, checker.Not(checker.Contains), imgID[:12]) + } +} + +// See https://github.com/docker/docker/issues/14116 +func (s *DockerSuite) TestRmiImageIDForceWithRunningContainersAndMultipleTags(c *check.C) { + dockerfile := "FROM busybox\nRUN echo test 14116\n" + buildImageSuccessfully(c, "test-14116", build.WithDockerfile(dockerfile)) + imgID := getIDByName(c, "test-14116") + + newTag := "newtag" + dockerCmd(c, "tag", imgID, newTag) + runSleepingContainerInImage(c, imgID) + + out, _, err := dockerCmdWithError("rmi", "-f", imgID) + // rmi -f should not delete image with running containers + c.Assert(err, checker.NotNil) + c.Assert(out, checker.Contains, "(cannot be forced) - image is being used by running container") +} + +func (s *DockerSuite) TestRmiTagWithExistingContainers(c *check.C) { + container := "test-delete-tag" + newtag := "busybox:newtag" + bb := "busybox:latest" + dockerCmd(c, "tag", bb, newtag) + + dockerCmd(c, "run", "--name", container, bb, "/bin/true") + + out, _ := dockerCmd(c, "rmi", newtag) + c.Assert(strings.Count(out, "Untagged: "), checker.Equals, 1) +} + +func (s *DockerSuite) TestRmiForceWithExistingContainers(c *check.C) { + image := "busybox-clone" + + icmd.RunCmd(icmd.Cmd{ + Command: []string{dockerBinary, "build", "--no-cache", "-t", image, "-"}, + Stdin: strings.NewReader(`FROM busybox +MAINTAINER foo`), + }).Assert(c, icmd.Success) + + dockerCmd(c, "run", "--name", "test-force-rmi", image, "/bin/true") + + dockerCmd(c, "rmi", "-f", image) +} + +func (s *DockerSuite) TestRmiWithMultipleRepositories(c *check.C) { + newRepo := "127.0.0.1:5000/busybox" + oldRepo := "busybox" + newTag := "busybox:test" + dockerCmd(c, "tag", oldRepo, newRepo) + + dockerCmd(c, "run", "--name", "test", oldRepo, "touch", "/abcd") + + dockerCmd(c, "commit", "test", newTag) + + out, _ := dockerCmd(c, "rmi", newTag) + c.Assert(out, checker.Contains, "Untagged: "+newTag) +} + +func (s *DockerSuite) TestRmiForceWithMultipleRepositories(c *check.C) { + imageName := "rmiimage" + tag1 := imageName + ":tag1" + tag2 := imageName + ":tag2" + + buildImageSuccessfully(c, tag1, build.WithDockerfile(`FROM busybox + MAINTAINER "docker"`)) + dockerCmd(c, "tag", tag1, tag2) + + out, _ := dockerCmd(c, "rmi", "-f", tag2) + c.Assert(out, checker.Contains, "Untagged: "+tag2) + c.Assert(out, checker.Not(checker.Contains), "Untagged: "+tag1) + + // Check built image still exists + images, _ := dockerCmd(c, "images", "-a") + c.Assert(images, checker.Contains, imageName, check.Commentf("Built image missing %q; Images: %q", imageName, images)) +} + +func (s *DockerSuite) TestRmiBlank(c *check.C) { + out, _, err := dockerCmdWithError("rmi", " ") + // Should have failed to delete ' ' image + c.Assert(err, checker.NotNil) + // Wrong error message generated + c.Assert(out, checker.Not(checker.Contains), "no such id", check.Commentf("out: %s", out)) + // Expected error message not generated + c.Assert(out, checker.Contains, "image name cannot be blank", check.Commentf("out: %s", out)) +} + +func (s *DockerSuite) TestRmiContainerImageNotFound(c *check.C) { + // Build 2 images for testing. + imageNames := []string{"test1", "test2"} + imageIds := make([]string, 2) + for i, name := range imageNames { + dockerfile := fmt.Sprintf("FROM busybox\nMAINTAINER %s\nRUN echo %s\n", name, name) + buildImageSuccessfully(c, name, build.WithoutCache, build.WithDockerfile(dockerfile)) + id := getIDByName(c, name) + imageIds[i] = id + } + + // Create a long-running container. + runSleepingContainerInImage(c, imageNames[0]) + + // Create a stopped container, and then force remove its image. + dockerCmd(c, "run", imageNames[1], "true") + dockerCmd(c, "rmi", "-f", imageIds[1]) + + // Try to remove the image of the running container and see if it fails as expected. + out, _, err := dockerCmdWithError("rmi", "-f", imageIds[0]) + // The image of the running container should not be removed. + c.Assert(err, checker.NotNil) + c.Assert(out, checker.Contains, "image is being used by running container", check.Commentf("out: %s", out)) +} + +// #13422 +func (s *DockerSuite) TestRmiUntagHistoryLayer(c *check.C) { + image := "tmp1" + // Build an image for testing. + dockerfile := `FROM busybox +MAINTAINER foo +RUN echo 0 #layer0 +RUN echo 1 #layer1 +RUN echo 2 #layer2 +` + buildImageSuccessfully(c, image, build.WithoutCache, build.WithDockerfile(dockerfile)) + out, _ := dockerCmd(c, "history", "-q", image) + ids := strings.Split(out, "\n") + idToTag := ids[2] + + // Tag layer0 to "tmp2". + newTag := "tmp2" + dockerCmd(c, "tag", idToTag, newTag) + // Create a container based on "tmp1". + dockerCmd(c, "run", "-d", image, "true") + + // See if the "tmp2" can be untagged. + out, _ = dockerCmd(c, "rmi", newTag) + // Expected 1 untagged entry + c.Assert(strings.Count(out, "Untagged: "), checker.Equals, 1, check.Commentf("out: %s", out)) + + // Now let's add the tag again and create a container based on it. + dockerCmd(c, "tag", idToTag, newTag) + out, _ = dockerCmd(c, "run", "-d", newTag, "true") + cid := strings.TrimSpace(out) + + // At this point we have 2 containers, one based on layer2 and another based on layer0. + // Try to untag "tmp2" without the -f flag. + out, _, err := dockerCmdWithError("rmi", newTag) + // should not be untagged without the -f flag + c.Assert(err, checker.NotNil) + c.Assert(out, checker.Contains, cid[:12]) + c.Assert(out, checker.Contains, "(must force)") + + // Add the -f flag and test again. + out, _ = dockerCmd(c, "rmi", "-f", newTag) + // should be allowed to untag with the -f flag + c.Assert(out, checker.Contains, fmt.Sprintf("Untagged: %s:latest", newTag)) +} + +func (*DockerSuite) TestRmiParentImageFail(c *check.C) { + buildImageSuccessfully(c, "test", build.WithDockerfile(` + FROM busybox + RUN echo hello`)) + + id := inspectField(c, "busybox", "ID") + out, _, err := dockerCmdWithError("rmi", id) + c.Assert(err, check.NotNil) + if !strings.Contains(out, "image has dependent child images") { + c.Fatalf("rmi should have failed because it's a parent image, got %s", out) + } +} + +func (s *DockerSuite) TestRmiWithParentInUse(c *check.C) { + out, _ := dockerCmd(c, "create", "busybox") + cID := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "commit", cID) + imageID := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "create", imageID) + cID = strings.TrimSpace(out) + + out, _ = dockerCmd(c, "commit", cID) + imageID = strings.TrimSpace(out) + + dockerCmd(c, "rmi", imageID) +} + +// #18873 +func (s *DockerSuite) TestRmiByIDHardConflict(c *check.C) { + dockerCmd(c, "create", "busybox") + + imgID := inspectField(c, "busybox:latest", "Id") + + _, _, err := dockerCmdWithError("rmi", imgID[:12]) + c.Assert(err, checker.NotNil) + + // check that tag was not removed + imgID2 := inspectField(c, "busybox:latest", "Id") + c.Assert(imgID, checker.Equals, imgID2) +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_run_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_run_test.go new file mode 100644 index 000000000..544cfdf9a --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_run_test.go @@ -0,0 +1,4619 @@ +package main + +import ( + "bufio" + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net" + "os" + "os/exec" + "path" + "path/filepath" + "reflect" + "regexp" + "runtime" + "sort" + "strconv" + "strings" + "sync" + "time" + + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/cli" + "github.com/docker/docker/integration-cli/cli/build" + "github.com/docker/docker/integration-cli/cli/build/fakecontext" + "github.com/docker/docker/pkg/mount" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/pkg/stringutils" + "github.com/docker/docker/pkg/testutil" + icmd "github.com/docker/docker/pkg/testutil/cmd" + "github.com/docker/docker/runconfig" + "github.com/docker/go-connections/nat" + "github.com/docker/libnetwork/resolvconf" + "github.com/docker/libnetwork/types" + "github.com/go-check/check" + libcontainerUser "github.com/opencontainers/runc/libcontainer/user" +) + +// "test123" should be printed by docker run +func (s *DockerSuite) TestRunEchoStdout(c *check.C) { + out, _ := dockerCmd(c, "run", "busybox", "echo", "test123") + if out != "test123\n" { + c.Fatalf("container should've printed 'test123', got '%s'", out) + } +} + +// "test" should be printed +func (s *DockerSuite) TestRunEchoNamedContainer(c *check.C) { + out, _ := dockerCmd(c, "run", "--name", "testfoonamedcontainer", "busybox", "echo", "test") + if out != "test\n" { + c.Errorf("container should've printed 'test'") + } +} + +// docker run should not leak file descriptors. This test relies on Unix +// specific functionality and cannot run on Windows. +func (s *DockerSuite) TestRunLeakyFileDescriptors(c *check.C) { + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "busybox", "ls", "-C", "/proc/self/fd") + + // normally, we should only get 0, 1, and 2, but 3 gets created by "ls" when it does "opendir" on the "fd" directory + if out != "0 1 2 3\n" { + c.Errorf("container should've printed '0 1 2 3', not: %s", out) + } +} + +// it should be possible to lookup Google DNS +// this will fail when Internet access is unavailable +func (s *DockerSuite) TestRunLookupGoogleDNS(c *check.C) { + testRequires(c, Network, NotArm) + if testEnv.DaemonPlatform() == "windows" { + // nslookup isn't present in Windows busybox. Is built-in. Further, + // nslookup isn't present in nanoserver. Hence just use PowerShell... + dockerCmd(c, "run", testEnv.MinimalBaseImage(), "powershell", "Resolve-DNSName", "google.com") + } else { + dockerCmd(c, "run", "busybox", "nslookup", "google.com") + } + +} + +// the exit code should be 0 +func (s *DockerSuite) TestRunExitCodeZero(c *check.C) { + dockerCmd(c, "run", "busybox", "true") +} + +// the exit code should be 1 +func (s *DockerSuite) TestRunExitCodeOne(c *check.C) { + _, exitCode, err := dockerCmdWithError("run", "busybox", "false") + c.Assert(err, checker.NotNil) + c.Assert(exitCode, checker.Equals, 1) +} + +// it should be possible to pipe in data via stdin to a process running in a container +func (s *DockerSuite) TestRunStdinPipe(c *check.C) { + // TODO Windows: This needs some work to make compatible. + testRequires(c, DaemonIsLinux) + result := icmd.RunCmd(icmd.Cmd{ + Command: []string{dockerBinary, "run", "-i", "-a", "stdin", "busybox", "cat"}, + Stdin: strings.NewReader("blahblah"), + }) + result.Assert(c, icmd.Success) + out := result.Stdout() + + out = strings.TrimSpace(out) + dockerCmd(c, "wait", out) + + logsOut, _ := dockerCmd(c, "logs", out) + + containerLogs := strings.TrimSpace(logsOut) + if containerLogs != "blahblah" { + c.Errorf("logs didn't print the container's logs %s", containerLogs) + } + + dockerCmd(c, "rm", out) +} + +// the container's ID should be printed when starting a container in detached mode +func (s *DockerSuite) TestRunDetachedContainerIDPrinting(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "busybox", "true") + + out = strings.TrimSpace(out) + dockerCmd(c, "wait", out) + + rmOut, _ := dockerCmd(c, "rm", out) + + rmOut = strings.TrimSpace(rmOut) + if rmOut != out { + c.Errorf("rm didn't print the container ID %s %s", out, rmOut) + } +} + +// the working directory should be set correctly +func (s *DockerSuite) TestRunWorkingDirectory(c *check.C) { + dir := "/root" + image := "busybox" + if testEnv.DaemonPlatform() == "windows" { + dir = `C:/Windows` + } + + // First with -w + out, _ := dockerCmd(c, "run", "-w", dir, image, "pwd") + out = strings.TrimSpace(out) + if out != dir { + c.Errorf("-w failed to set working directory") + } + + // Then with --workdir + out, _ = dockerCmd(c, "run", "--workdir", dir, image, "pwd") + out = strings.TrimSpace(out) + if out != dir { + c.Errorf("--workdir failed to set working directory") + } +} + +// pinging Google's DNS resolver should fail when we disable the networking +func (s *DockerSuite) TestRunWithoutNetworking(c *check.C) { + count := "-c" + image := "busybox" + if testEnv.DaemonPlatform() == "windows" { + count = "-n" + image = testEnv.MinimalBaseImage() + } + + // First using the long form --net + out, exitCode, err := dockerCmdWithError("run", "--net=none", image, "ping", count, "1", "8.8.8.8") + if err != nil && exitCode != 1 { + c.Fatal(out, err) + } + if exitCode != 1 { + c.Errorf("--net=none should've disabled the network; the container shouldn't have been able to ping 8.8.8.8") + } +} + +//test --link use container name to link target +func (s *DockerSuite) TestRunLinksContainerWithContainerName(c *check.C) { + // TODO Windows: This test cannot run on a Windows daemon as the networking + // settings are not populated back yet on inspect. + testRequires(c, DaemonIsLinux) + dockerCmd(c, "run", "-i", "-t", "-d", "--name", "parent", "busybox") + + ip := inspectField(c, "parent", "NetworkSettings.Networks.bridge.IPAddress") + + out, _ := dockerCmd(c, "run", "--link", "parent:test", "busybox", "/bin/cat", "/etc/hosts") + if !strings.Contains(out, ip+" test") { + c.Fatalf("use a container name to link target failed") + } +} + +//test --link use container id to link target +func (s *DockerSuite) TestRunLinksContainerWithContainerID(c *check.C) { + // TODO Windows: This test cannot run on a Windows daemon as the networking + // settings are not populated back yet on inspect. + testRequires(c, DaemonIsLinux) + cID, _ := dockerCmd(c, "run", "-i", "-t", "-d", "busybox") + + cID = strings.TrimSpace(cID) + ip := inspectField(c, cID, "NetworkSettings.Networks.bridge.IPAddress") + + out, _ := dockerCmd(c, "run", "--link", cID+":test", "busybox", "/bin/cat", "/etc/hosts") + if !strings.Contains(out, ip+" test") { + c.Fatalf("use a container id to link target failed") + } +} + +func (s *DockerSuite) TestUserDefinedNetworkLinks(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace, NotArm) + dockerCmd(c, "network", "create", "-d", "bridge", "udlinkNet") + + dockerCmd(c, "run", "-d", "--net=udlinkNet", "--name=first", "busybox", "top") + c.Assert(waitRun("first"), check.IsNil) + + // run a container in user-defined network udlinkNet with a link for an existing container + // and a link for a container that doesn't exist + dockerCmd(c, "run", "-d", "--net=udlinkNet", "--name=second", "--link=first:foo", + "--link=third:bar", "busybox", "top") + c.Assert(waitRun("second"), check.IsNil) + + // ping to first and its alias foo must succeed + _, _, err := dockerCmdWithError("exec", "second", "ping", "-c", "1", "first") + c.Assert(err, check.IsNil) + _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "foo") + c.Assert(err, check.IsNil) + + // ping to third and its alias must fail + _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "third") + c.Assert(err, check.NotNil) + _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "bar") + c.Assert(err, check.NotNil) + + // start third container now + dockerCmd(c, "run", "-d", "--net=udlinkNet", "--name=third", "busybox", "top") + c.Assert(waitRun("third"), check.IsNil) + + // ping to third and its alias must succeed now + _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "third") + c.Assert(err, check.IsNil) + _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "bar") + c.Assert(err, check.IsNil) +} + +func (s *DockerSuite) TestUserDefinedNetworkLinksWithRestart(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace, NotArm) + dockerCmd(c, "network", "create", "-d", "bridge", "udlinkNet") + + dockerCmd(c, "run", "-d", "--net=udlinkNet", "--name=first", "busybox", "top") + c.Assert(waitRun("first"), check.IsNil) + + dockerCmd(c, "run", "-d", "--net=udlinkNet", "--name=second", "--link=first:foo", + "busybox", "top") + c.Assert(waitRun("second"), check.IsNil) + + // ping to first and its alias foo must succeed + _, _, err := dockerCmdWithError("exec", "second", "ping", "-c", "1", "first") + c.Assert(err, check.IsNil) + _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "foo") + c.Assert(err, check.IsNil) + + // Restart first container + dockerCmd(c, "restart", "first") + c.Assert(waitRun("first"), check.IsNil) + + // ping to first and its alias foo must still succeed + _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "first") + c.Assert(err, check.IsNil) + _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "foo") + c.Assert(err, check.IsNil) + + // Restart second container + dockerCmd(c, "restart", "second") + c.Assert(waitRun("second"), check.IsNil) + + // ping to first and its alias foo must still succeed + _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "first") + c.Assert(err, check.IsNil) + _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "foo") + c.Assert(err, check.IsNil) +} + +func (s *DockerSuite) TestRunWithNetAliasOnDefaultNetworks(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace, NotArm) + + defaults := []string{"bridge", "host", "none"} + for _, net := range defaults { + out, _, err := dockerCmdWithError("run", "-d", "--net", net, "--net-alias", "alias_"+net, "busybox", "top") + c.Assert(err, checker.NotNil) + c.Assert(out, checker.Contains, runconfig.ErrUnsupportedNetworkAndAlias.Error()) + } +} + +func (s *DockerSuite) TestUserDefinedNetworkAlias(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace, NotArm) + dockerCmd(c, "network", "create", "-d", "bridge", "net1") + + cid1, _ := dockerCmd(c, "run", "-d", "--net=net1", "--name=first", "--net-alias=foo1", "--net-alias=foo2", "busybox", "top") + c.Assert(waitRun("first"), check.IsNil) + + // Check if default short-id alias is added automatically + id := strings.TrimSpace(cid1) + aliases := inspectField(c, id, "NetworkSettings.Networks.net1.Aliases") + c.Assert(aliases, checker.Contains, stringid.TruncateID(id)) + + cid2, _ := dockerCmd(c, "run", "-d", "--net=net1", "--name=second", "busybox", "top") + c.Assert(waitRun("second"), check.IsNil) + + // Check if default short-id alias is added automatically + id = strings.TrimSpace(cid2) + aliases = inspectField(c, id, "NetworkSettings.Networks.net1.Aliases") + c.Assert(aliases, checker.Contains, stringid.TruncateID(id)) + + // ping to first and its network-scoped aliases + _, _, err := dockerCmdWithError("exec", "second", "ping", "-c", "1", "first") + c.Assert(err, check.IsNil) + _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "foo1") + c.Assert(err, check.IsNil) + _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "foo2") + c.Assert(err, check.IsNil) + // ping first container's short-id alias + _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", stringid.TruncateID(cid1)) + c.Assert(err, check.IsNil) + + // Restart first container + dockerCmd(c, "restart", "first") + c.Assert(waitRun("first"), check.IsNil) + + // ping to first and its network-scoped aliases must succeed + _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "first") + c.Assert(err, check.IsNil) + _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "foo1") + c.Assert(err, check.IsNil) + _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "foo2") + c.Assert(err, check.IsNil) + // ping first container's short-id alias + _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", stringid.TruncateID(cid1)) + c.Assert(err, check.IsNil) +} + +// Issue 9677. +func (s *DockerSuite) TestRunWithDaemonFlags(c *check.C) { + out, _, err := dockerCmdWithError("--exec-opt", "foo=bar", "run", "-i", "busybox", "true") + c.Assert(err, checker.NotNil) + c.Assert(out, checker.Contains, "unknown flag: --exec-opt") +} + +// Regression test for #4979 +func (s *DockerSuite) TestRunWithVolumesFromExited(c *check.C) { + + var ( + out string + exitCode int + ) + + // Create a file in a volume + if testEnv.DaemonPlatform() == "windows" { + out, exitCode = dockerCmd(c, "run", "--name", "test-data", "--volume", `c:\some\dir`, testEnv.MinimalBaseImage(), "cmd", "/c", `echo hello > c:\some\dir\file`) + } else { + out, exitCode = dockerCmd(c, "run", "--name", "test-data", "--volume", "/some/dir", "busybox", "touch", "/some/dir/file") + } + if exitCode != 0 { + c.Fatal("1", out, exitCode) + } + + // Read the file from another container using --volumes-from to access the volume in the second container + if testEnv.DaemonPlatform() == "windows" { + out, exitCode = dockerCmd(c, "run", "--volumes-from", "test-data", testEnv.MinimalBaseImage(), "cmd", "/c", `type c:\some\dir\file`) + } else { + out, exitCode = dockerCmd(c, "run", "--volumes-from", "test-data", "busybox", "cat", "/some/dir/file") + } + if exitCode != 0 { + c.Fatal("2", out, exitCode) + } +} + +// Volume path is a symlink which also exists on the host, and the host side is a file not a dir +// But the volume call is just a normal volume, not a bind mount +func (s *DockerSuite) TestRunCreateVolumesInSymlinkDir(c *check.C) { + var ( + dockerFile string + containerPath string + cmd string + ) + // This test cannot run on a Windows daemon as + // Windows does not support symlinks inside a volume path + testRequires(c, SameHostDaemon, DaemonIsLinux) + name := "test-volume-symlink" + + dir, err := ioutil.TempDir("", name) + if err != nil { + c.Fatal(err) + } + defer os.RemoveAll(dir) + + // In the case of Windows to Windows CI, if the machine is setup so that + // the temp directory is not the C: drive, this test is invalid and will + // not work. + if testEnv.DaemonPlatform() == "windows" && strings.ToLower(dir[:1]) != "c" { + c.Skip("Requires TEMP to point to C: drive") + } + + f, err := os.OpenFile(filepath.Join(dir, "test"), os.O_CREATE, 0700) + if err != nil { + c.Fatal(err) + } + f.Close() + + if testEnv.DaemonPlatform() == "windows" { + dockerFile = fmt.Sprintf("FROM %s\nRUN mkdir %s\nRUN mklink /D c:\\test %s", testEnv.MinimalBaseImage(), dir, dir) + containerPath = `c:\test\test` + cmd = "tasklist" + } else { + dockerFile = fmt.Sprintf("FROM busybox\nRUN mkdir -p %s\nRUN ln -s %s /test", dir, dir) + containerPath = "/test/test" + cmd = "true" + } + buildImageSuccessfully(c, name, build.WithDockerfile(dockerFile)) + dockerCmd(c, "run", "-v", containerPath, name, cmd) +} + +// Volume path is a symlink in the container +func (s *DockerSuite) TestRunCreateVolumesInSymlinkDir2(c *check.C) { + var ( + dockerFile string + containerPath string + cmd string + ) + // This test cannot run on a Windows daemon as + // Windows does not support symlinks inside a volume path + testRequires(c, SameHostDaemon, DaemonIsLinux) + name := "test-volume-symlink2" + + if testEnv.DaemonPlatform() == "windows" { + dockerFile = fmt.Sprintf("FROM %s\nRUN mkdir c:\\%s\nRUN mklink /D c:\\test c:\\%s", testEnv.MinimalBaseImage(), name, name) + containerPath = `c:\test\test` + cmd = "tasklist" + } else { + dockerFile = fmt.Sprintf("FROM busybox\nRUN mkdir -p /%s\nRUN ln -s /%s /test", name, name) + containerPath = "/test/test" + cmd = "true" + } + buildImageSuccessfully(c, name, build.WithDockerfile(dockerFile)) + dockerCmd(c, "run", "-v", containerPath, name, cmd) +} + +func (s *DockerSuite) TestRunVolumesMountedAsReadonly(c *check.C) { + if _, code, err := dockerCmdWithError("run", "-v", "/test:/test:ro", "busybox", "touch", "/test/somefile"); err == nil || code == 0 { + c.Fatalf("run should fail because volume is ro: exit code %d", code) + } +} + +func (s *DockerSuite) TestRunVolumesFromInReadonlyModeFails(c *check.C) { + var ( + volumeDir string + fileInVol string + ) + if testEnv.DaemonPlatform() == "windows" { + volumeDir = `c:/test` // Forward-slash as using busybox + fileInVol = `c:/test/file` + } else { + testRequires(c, DaemonIsLinux) + volumeDir = "/test" + fileInVol = `/test/file` + } + dockerCmd(c, "run", "--name", "parent", "-v", volumeDir, "busybox", "true") + + if _, code, err := dockerCmdWithError("run", "--volumes-from", "parent:ro", "busybox", "touch", fileInVol); err == nil || code == 0 { + c.Fatalf("run should fail because volume is ro: exit code %d", code) + } +} + +// Regression test for #1201 +func (s *DockerSuite) TestRunVolumesFromInReadWriteMode(c *check.C) { + var ( + volumeDir string + fileInVol string + ) + if testEnv.DaemonPlatform() == "windows" { + volumeDir = `c:/test` // Forward-slash as using busybox + fileInVol = `c:/test/file` + } else { + volumeDir = "/test" + fileInVol = "/test/file" + } + + dockerCmd(c, "run", "--name", "parent", "-v", volumeDir, "busybox", "true") + dockerCmd(c, "run", "--volumes-from", "parent:rw", "busybox", "touch", fileInVol) + + if out, _, err := dockerCmdWithError("run", "--volumes-from", "parent:bar", "busybox", "touch", fileInVol); err == nil || !strings.Contains(out, `invalid mode: bar`) { + c.Fatalf("running --volumes-from parent:bar should have failed with invalid mode: %q", out) + } + + dockerCmd(c, "run", "--volumes-from", "parent", "busybox", "touch", fileInVol) +} + +func (s *DockerSuite) TestVolumesFromGetsProperMode(c *check.C) { + testRequires(c, SameHostDaemon) + prefix, slash := getPrefixAndSlashFromDaemonPlatform() + hostpath := testutil.RandomTmpDirPath("test", testEnv.DaemonPlatform()) + if err := os.MkdirAll(hostpath, 0755); err != nil { + c.Fatalf("Failed to create %s: %q", hostpath, err) + } + defer os.RemoveAll(hostpath) + + dockerCmd(c, "run", "--name", "parent", "-v", hostpath+":"+prefix+slash+"test:ro", "busybox", "true") + + // Expect this "rw" mode to be be ignored since the inherited volume is "ro" + if _, _, err := dockerCmdWithError("run", "--volumes-from", "parent:rw", "busybox", "touch", prefix+slash+"test"+slash+"file"); err == nil { + c.Fatal("Expected volumes-from to inherit read-only volume even when passing in `rw`") + } + + dockerCmd(c, "run", "--name", "parent2", "-v", hostpath+":"+prefix+slash+"test:ro", "busybox", "true") + + // Expect this to be read-only since both are "ro" + if _, _, err := dockerCmdWithError("run", "--volumes-from", "parent2:ro", "busybox", "touch", prefix+slash+"test"+slash+"file"); err == nil { + c.Fatal("Expected volumes-from to inherit read-only volume even when passing in `ro`") + } +} + +// Test for GH#10618 +func (s *DockerSuite) TestRunNoDupVolumes(c *check.C) { + path1 := testutil.RandomTmpDirPath("test1", testEnv.DaemonPlatform()) + path2 := testutil.RandomTmpDirPath("test2", testEnv.DaemonPlatform()) + + someplace := ":/someplace" + if testEnv.DaemonPlatform() == "windows" { + // Windows requires that the source directory exists before calling HCS + testRequires(c, SameHostDaemon) + someplace = `:c:\someplace` + if err := os.MkdirAll(path1, 0755); err != nil { + c.Fatalf("Failed to create %s: %q", path1, err) + } + defer os.RemoveAll(path1) + if err := os.MkdirAll(path2, 0755); err != nil { + c.Fatalf("Failed to create %s: %q", path1, err) + } + defer os.RemoveAll(path2) + } + mountstr1 := path1 + someplace + mountstr2 := path2 + someplace + + if out, _, err := dockerCmdWithError("run", "-v", mountstr1, "-v", mountstr2, "busybox", "true"); err == nil { + c.Fatal("Expected error about duplicate mount definitions") + } else { + if !strings.Contains(out, "Duplicate mount point") { + c.Fatalf("Expected 'duplicate mount point' error, got %v", out) + } + } + + // Test for https://github.com/docker/docker/issues/22093 + volumename1 := "test1" + volumename2 := "test2" + volume1 := volumename1 + someplace + volume2 := volumename2 + someplace + if out, _, err := dockerCmdWithError("run", "-v", volume1, "-v", volume2, "busybox", "true"); err == nil { + c.Fatal("Expected error about duplicate mount definitions") + } else { + if !strings.Contains(out, "Duplicate mount point") { + c.Fatalf("Expected 'duplicate mount point' error, got %v", out) + } + } + // create failed should have create volume volumename1 or volumename2 + // we should remove volumename2 or volumename2 successfully + out, _ := dockerCmd(c, "volume", "ls") + if strings.Contains(out, volumename1) { + dockerCmd(c, "volume", "rm", volumename1) + } else { + dockerCmd(c, "volume", "rm", volumename2) + } +} + +// Test for #1351 +func (s *DockerSuite) TestRunApplyVolumesFromBeforeVolumes(c *check.C) { + prefix := "" + if testEnv.DaemonPlatform() == "windows" { + prefix = `c:` + } + dockerCmd(c, "run", "--name", "parent", "-v", prefix+"/test", "busybox", "touch", prefix+"/test/foo") + dockerCmd(c, "run", "--volumes-from", "parent", "-v", prefix+"/test", "busybox", "cat", prefix+"/test/foo") +} + +func (s *DockerSuite) TestRunMultipleVolumesFrom(c *check.C) { + prefix := "" + if testEnv.DaemonPlatform() == "windows" { + prefix = `c:` + } + dockerCmd(c, "run", "--name", "parent1", "-v", prefix+"/test", "busybox", "touch", prefix+"/test/foo") + dockerCmd(c, "run", "--name", "parent2", "-v", prefix+"/other", "busybox", "touch", prefix+"/other/bar") + dockerCmd(c, "run", "--volumes-from", "parent1", "--volumes-from", "parent2", "busybox", "sh", "-c", "cat /test/foo && cat /other/bar") +} + +// this tests verifies the ID format for the container +func (s *DockerSuite) TestRunVerifyContainerID(c *check.C) { + out, exit, err := dockerCmdWithError("run", "-d", "busybox", "true") + if err != nil { + c.Fatal(err) + } + if exit != 0 { + c.Fatalf("expected exit code 0 received %d", exit) + } + + match, err := regexp.MatchString("^[0-9a-f]{64}$", strings.TrimSuffix(out, "\n")) + if err != nil { + c.Fatal(err) + } + if !match { + c.Fatalf("Invalid container ID: %s", out) + } +} + +// Test that creating a container with a volume doesn't crash. Regression test for #995. +func (s *DockerSuite) TestRunCreateVolume(c *check.C) { + prefix := "" + if testEnv.DaemonPlatform() == "windows" { + prefix = `c:` + } + dockerCmd(c, "run", "-v", prefix+"/var/lib/data", "busybox", "true") +} + +// Test that creating a volume with a symlink in its path works correctly. Test for #5152. +// Note that this bug happens only with symlinks with a target that starts with '/'. +func (s *DockerSuite) TestRunCreateVolumeWithSymlink(c *check.C) { + // Cannot run on Windows as relies on Linux-specific functionality (sh -c mount...) + testRequires(c, DaemonIsLinux) + workingDirectory, err := ioutil.TempDir("", "TestRunCreateVolumeWithSymlink") + image := "docker-test-createvolumewithsymlink" + + buildCmd := exec.Command(dockerBinary, "build", "-t", image, "-") + buildCmd.Stdin = strings.NewReader(`FROM busybox + RUN ln -s home /bar`) + buildCmd.Dir = workingDirectory + err = buildCmd.Run() + if err != nil { + c.Fatalf("could not build '%s': %v", image, err) + } + + _, exitCode, err := dockerCmdWithError("run", "-v", "/bar/foo", "--name", "test-createvolumewithsymlink", image, "sh", "-c", "mount | grep -q /home/foo") + if err != nil || exitCode != 0 { + c.Fatalf("[run] err: %v, exitcode: %d", err, exitCode) + } + + volPath, err := inspectMountSourceField("test-createvolumewithsymlink", "/bar/foo") + c.Assert(err, checker.IsNil) + + _, exitCode, err = dockerCmdWithError("rm", "-v", "test-createvolumewithsymlink") + if err != nil || exitCode != 0 { + c.Fatalf("[rm] err: %v, exitcode: %d", err, exitCode) + } + + _, err = os.Stat(volPath) + if !os.IsNotExist(err) { + c.Fatalf("[open] (expecting 'file does not exist' error) err: %v, volPath: %s", err, volPath) + } +} + +// Tests that a volume path that has a symlink exists in a container mounting it with `--volumes-from`. +func (s *DockerSuite) TestRunVolumesFromSymlinkPath(c *check.C) { + // This test cannot run on a Windows daemon as + // Windows does not support symlinks inside a volume path + testRequires(c, DaemonIsLinux) + + workingDirectory, err := ioutil.TempDir("", "TestRunVolumesFromSymlinkPath") + c.Assert(err, checker.IsNil) + name := "docker-test-volumesfromsymlinkpath" + prefix := "" + dfContents := `FROM busybox + RUN ln -s home /foo + VOLUME ["/foo/bar"]` + + if testEnv.DaemonPlatform() == "windows" { + prefix = `c:` + dfContents = `FROM ` + testEnv.MinimalBaseImage() + ` + RUN mkdir c:\home + RUN mklink /D c:\foo c:\home + VOLUME ["c:/foo/bar"] + ENTRYPOINT c:\windows\system32\cmd.exe` + } + + buildCmd := exec.Command(dockerBinary, "build", "-t", name, "-") + buildCmd.Stdin = strings.NewReader(dfContents) + buildCmd.Dir = workingDirectory + err = buildCmd.Run() + if err != nil { + c.Fatalf("could not build 'docker-test-volumesfromsymlinkpath': %v", err) + } + + out, exitCode, err := dockerCmdWithError("run", "--name", "test-volumesfromsymlinkpath", name) + if err != nil || exitCode != 0 { + c.Fatalf("[run] (volume) err: %v, exitcode: %d, out: %s", err, exitCode, out) + } + + _, exitCode, err = dockerCmdWithError("run", "--volumes-from", "test-volumesfromsymlinkpath", "busybox", "sh", "-c", "ls "+prefix+"/foo | grep -q bar") + if err != nil || exitCode != 0 { + c.Fatalf("[run] err: %v, exitcode: %d", err, exitCode) + } +} + +func (s *DockerSuite) TestRunExitCode(c *check.C) { + var ( + exit int + err error + ) + + _, exit, err = dockerCmdWithError("run", "busybox", "/bin/sh", "-c", "exit 72") + + if err == nil { + c.Fatal("should not have a non nil error") + } + if exit != 72 { + c.Fatalf("expected exit code 72 received %d", exit) + } +} + +func (s *DockerSuite) TestRunUserDefaults(c *check.C) { + expected := "uid=0(root) gid=0(root)" + if testEnv.DaemonPlatform() == "windows" { + expected = "uid=1000(ContainerAdministrator) gid=1000(ContainerAdministrator)" + } + out, _ := dockerCmd(c, "run", "busybox", "id") + if !strings.Contains(out, expected) { + c.Fatalf("expected '%s' got %s", expected, out) + } +} + +func (s *DockerSuite) TestRunUserByName(c *check.C) { + // TODO Windows: This test cannot run on a Windows daemon as Windows does + // not support the use of -u + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "-u", "root", "busybox", "id") + if !strings.Contains(out, "uid=0(root) gid=0(root)") { + c.Fatalf("expected root user got %s", out) + } +} + +func (s *DockerSuite) TestRunUserByID(c *check.C) { + // TODO Windows: This test cannot run on a Windows daemon as Windows does + // not support the use of -u + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "-u", "1", "busybox", "id") + if !strings.Contains(out, "uid=1(daemon) gid=1(daemon)") { + c.Fatalf("expected daemon user got %s", out) + } +} + +func (s *DockerSuite) TestRunUserByIDBig(c *check.C) { + // TODO Windows: This test cannot run on a Windows daemon as Windows does + // not support the use of -u + testRequires(c, DaemonIsLinux, NotArm) + out, _, err := dockerCmdWithError("run", "-u", "2147483648", "busybox", "id") + if err == nil { + c.Fatal("No error, but must be.", out) + } + if !strings.Contains(strings.ToUpper(out), strings.ToUpper(libcontainerUser.ErrRange.Error())) { + c.Fatalf("expected error about uids range, got %s", out) + } +} + +func (s *DockerSuite) TestRunUserByIDNegative(c *check.C) { + // TODO Windows: This test cannot run on a Windows daemon as Windows does + // not support the use of -u + testRequires(c, DaemonIsLinux) + out, _, err := dockerCmdWithError("run", "-u", "-1", "busybox", "id") + if err == nil { + c.Fatal("No error, but must be.", out) + } + if !strings.Contains(strings.ToUpper(out), strings.ToUpper(libcontainerUser.ErrRange.Error())) { + c.Fatalf("expected error about uids range, got %s", out) + } +} + +func (s *DockerSuite) TestRunUserByIDZero(c *check.C) { + // TODO Windows: This test cannot run on a Windows daemon as Windows does + // not support the use of -u + testRequires(c, DaemonIsLinux) + out, _, err := dockerCmdWithError("run", "-u", "0", "busybox", "id") + if err != nil { + c.Fatal(err, out) + } + if !strings.Contains(out, "uid=0(root) gid=0(root) groups=10(wheel)") { + c.Fatalf("expected daemon user got %s", out) + } +} + +func (s *DockerSuite) TestRunUserNotFound(c *check.C) { + // TODO Windows: This test cannot run on a Windows daemon as Windows does + // not support the use of -u + testRequires(c, DaemonIsLinux) + _, _, err := dockerCmdWithError("run", "-u", "notme", "busybox", "id") + if err == nil { + c.Fatal("unknown user should cause container to fail") + } +} + +func (s *DockerSuite) TestRunTwoConcurrentContainers(c *check.C) { + sleepTime := "2" + group := sync.WaitGroup{} + group.Add(2) + + errChan := make(chan error, 2) + for i := 0; i < 2; i++ { + go func() { + defer group.Done() + _, _, err := dockerCmdWithError("run", "busybox", "sleep", sleepTime) + errChan <- err + }() + } + + group.Wait() + close(errChan) + + for err := range errChan { + c.Assert(err, check.IsNil) + } +} + +func (s *DockerSuite) TestRunEnvironment(c *check.C) { + // TODO Windows: Environment handling is different between Linux and + // Windows and this test relies currently on unix functionality. + testRequires(c, DaemonIsLinux) + result := icmd.RunCmd(icmd.Cmd{ + Command: []string{dockerBinary, "run", "-h", "testing", "-e=FALSE=true", "-e=TRUE", "-e=TRICKY", "-e=HOME=", "busybox", "env"}, + Env: append(os.Environ(), + "TRUE=false", + "TRICKY=tri\ncky\n", + ), + }) + result.Assert(c, icmd.Success) + + actualEnv := strings.Split(strings.TrimSpace(result.Combined()), "\n") + sort.Strings(actualEnv) + + goodEnv := []string{ + // The first two should not be tested here, those are "inherent" environment variable. This test validates + // the -e behavior, not the default environment variable (that could be subject to change) + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", + "HOSTNAME=testing", + "FALSE=true", + "TRUE=false", + "TRICKY=tri", + "cky", + "", + "HOME=/root", + } + sort.Strings(goodEnv) + if len(goodEnv) != len(actualEnv) { + c.Fatalf("Wrong environment: should be %d variables, not %d: %q", len(goodEnv), len(actualEnv), strings.Join(actualEnv, ", ")) + } + for i := range goodEnv { + if actualEnv[i] != goodEnv[i] { + c.Fatalf("Wrong environment variable: should be %s, not %s", goodEnv[i], actualEnv[i]) + } + } +} + +func (s *DockerSuite) TestRunEnvironmentErase(c *check.C) { + // TODO Windows: Environment handling is different between Linux and + // Windows and this test relies currently on unix functionality. + testRequires(c, DaemonIsLinux) + + // Test to make sure that when we use -e on env vars that are + // not set in our local env that they're removed (if present) in + // the container + + result := icmd.RunCmd(icmd.Cmd{ + Command: []string{dockerBinary, "run", "-e", "FOO", "-e", "HOSTNAME", "busybox", "env"}, + Env: appendBaseEnv(true), + }) + result.Assert(c, icmd.Success) + + actualEnv := strings.Split(strings.TrimSpace(result.Combined()), "\n") + sort.Strings(actualEnv) + + goodEnv := []string{ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", + "HOME=/root", + } + sort.Strings(goodEnv) + if len(goodEnv) != len(actualEnv) { + c.Fatalf("Wrong environment: should be %d variables, not %d: %q", len(goodEnv), len(actualEnv), strings.Join(actualEnv, ", ")) + } + for i := range goodEnv { + if actualEnv[i] != goodEnv[i] { + c.Fatalf("Wrong environment variable: should be %s, not %s", goodEnv[i], actualEnv[i]) + } + } +} + +func (s *DockerSuite) TestRunEnvironmentOverride(c *check.C) { + // TODO Windows: Environment handling is different between Linux and + // Windows and this test relies currently on unix functionality. + testRequires(c, DaemonIsLinux) + + // Test to make sure that when we use -e on env vars that are + // already in the env that we're overriding them + + result := icmd.RunCmd(icmd.Cmd{ + Command: []string{dockerBinary, "run", "-e", "HOSTNAME", "-e", "HOME=/root2", "busybox", "env"}, + Env: appendBaseEnv(true, "HOSTNAME=bar"), + }) + result.Assert(c, icmd.Success) + + actualEnv := strings.Split(strings.TrimSpace(result.Combined()), "\n") + sort.Strings(actualEnv) + + goodEnv := []string{ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", + "HOME=/root2", + "HOSTNAME=bar", + } + sort.Strings(goodEnv) + if len(goodEnv) != len(actualEnv) { + c.Fatalf("Wrong environment: should be %d variables, not %d: %q", len(goodEnv), len(actualEnv), strings.Join(actualEnv, ", ")) + } + for i := range goodEnv { + if actualEnv[i] != goodEnv[i] { + c.Fatalf("Wrong environment variable: should be %s, not %s", goodEnv[i], actualEnv[i]) + } + } +} + +func (s *DockerSuite) TestRunContainerNetwork(c *check.C) { + if testEnv.DaemonPlatform() == "windows" { + // Windows busybox does not have ping. Use built in ping instead. + dockerCmd(c, "run", testEnv.MinimalBaseImage(), "ping", "-n", "1", "127.0.0.1") + } else { + dockerCmd(c, "run", "busybox", "ping", "-c", "1", "127.0.0.1") + } +} + +func (s *DockerSuite) TestRunNetHostNotAllowedWithLinks(c *check.C) { + // TODO Windows: This is Linux specific as --link is not supported and + // this will be deprecated in favor of container networking model. + testRequires(c, DaemonIsLinux, NotUserNamespace) + dockerCmd(c, "run", "--name", "linked", "busybox", "true") + + _, _, err := dockerCmdWithError("run", "--net=host", "--link", "linked:linked", "busybox", "true") + if err == nil { + c.Fatal("Expected error") + } +} + +// #7851 hostname outside container shows FQDN, inside only shortname +// For testing purposes it is not required to set host's hostname directly +// and use "--net=host" (as the original issue submitter did), as the same +// codepath is executed with "docker run -h ". Both were manually +// tested, but this testcase takes the simpler path of using "run -h .." +func (s *DockerSuite) TestRunFullHostnameSet(c *check.C) { + // TODO Windows: -h is not yet functional. + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "-h", "foo.bar.baz", "busybox", "hostname") + if actual := strings.Trim(out, "\r\n"); actual != "foo.bar.baz" { + c.Fatalf("expected hostname 'foo.bar.baz', received %s", actual) + } +} + +func (s *DockerSuite) TestRunPrivilegedCanMknod(c *check.C) { + // Not applicable for Windows as Windows daemon does not support + // the concept of --privileged, and mknod is a Unix concept. + testRequires(c, DaemonIsLinux, NotUserNamespace) + out, _ := dockerCmd(c, "run", "--privileged", "busybox", "sh", "-c", "mknod /tmp/sda b 8 0 && echo ok") + if actual := strings.Trim(out, "\r\n"); actual != "ok" { + c.Fatalf("expected output ok received %s", actual) + } +} + +func (s *DockerSuite) TestRunUnprivilegedCanMknod(c *check.C) { + // Not applicable for Windows as Windows daemon does not support + // the concept of --privileged, and mknod is a Unix concept. + testRequires(c, DaemonIsLinux, NotUserNamespace) + out, _ := dockerCmd(c, "run", "busybox", "sh", "-c", "mknod /tmp/sda b 8 0 && echo ok") + if actual := strings.Trim(out, "\r\n"); actual != "ok" { + c.Fatalf("expected output ok received %s", actual) + } +} + +func (s *DockerSuite) TestRunCapDropInvalid(c *check.C) { + // Not applicable for Windows as there is no concept of --cap-drop + testRequires(c, DaemonIsLinux) + out, _, err := dockerCmdWithError("run", "--cap-drop=CHPASS", "busybox", "ls") + if err == nil { + c.Fatal(err, out) + } +} + +func (s *DockerSuite) TestRunCapDropCannotMknod(c *check.C) { + // Not applicable for Windows as there is no concept of --cap-drop or mknod + testRequires(c, DaemonIsLinux) + out, _, err := dockerCmdWithError("run", "--cap-drop=MKNOD", "busybox", "sh", "-c", "mknod /tmp/sda b 8 0 && echo ok") + + if err == nil { + c.Fatal(err, out) + } + if actual := strings.Trim(out, "\r\n"); actual == "ok" { + c.Fatalf("expected output not ok received %s", actual) + } +} + +func (s *DockerSuite) TestRunCapDropCannotMknodLowerCase(c *check.C) { + // Not applicable for Windows as there is no concept of --cap-drop or mknod + testRequires(c, DaemonIsLinux) + out, _, err := dockerCmdWithError("run", "--cap-drop=mknod", "busybox", "sh", "-c", "mknod /tmp/sda b 8 0 && echo ok") + + if err == nil { + c.Fatal(err, out) + } + if actual := strings.Trim(out, "\r\n"); actual == "ok" { + c.Fatalf("expected output not ok received %s", actual) + } +} + +func (s *DockerSuite) TestRunCapDropALLCannotMknod(c *check.C) { + // Not applicable for Windows as there is no concept of --cap-drop or mknod + testRequires(c, DaemonIsLinux) + out, _, err := dockerCmdWithError("run", "--cap-drop=ALL", "--cap-add=SETGID", "busybox", "sh", "-c", "mknod /tmp/sda b 8 0 && echo ok") + if err == nil { + c.Fatal(err, out) + } + if actual := strings.Trim(out, "\r\n"); actual == "ok" { + c.Fatalf("expected output not ok received %s", actual) + } +} + +func (s *DockerSuite) TestRunCapDropALLAddMknodCanMknod(c *check.C) { + // Not applicable for Windows as there is no concept of --cap-drop or mknod + testRequires(c, DaemonIsLinux, NotUserNamespace) + out, _ := dockerCmd(c, "run", "--cap-drop=ALL", "--cap-add=MKNOD", "--cap-add=SETGID", "busybox", "sh", "-c", "mknod /tmp/sda b 8 0 && echo ok") + + if actual := strings.Trim(out, "\r\n"); actual != "ok" { + c.Fatalf("expected output ok received %s", actual) + } +} + +func (s *DockerSuite) TestRunCapAddInvalid(c *check.C) { + // Not applicable for Windows as there is no concept of --cap-add + testRequires(c, DaemonIsLinux) + out, _, err := dockerCmdWithError("run", "--cap-add=CHPASS", "busybox", "ls") + if err == nil { + c.Fatal(err, out) + } +} + +func (s *DockerSuite) TestRunCapAddCanDownInterface(c *check.C) { + // Not applicable for Windows as there is no concept of --cap-add + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "--cap-add=NET_ADMIN", "busybox", "sh", "-c", "ip link set eth0 down && echo ok") + + if actual := strings.Trim(out, "\r\n"); actual != "ok" { + c.Fatalf("expected output ok received %s", actual) + } +} + +func (s *DockerSuite) TestRunCapAddALLCanDownInterface(c *check.C) { + // Not applicable for Windows as there is no concept of --cap-add + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "--cap-add=ALL", "busybox", "sh", "-c", "ip link set eth0 down && echo ok") + + if actual := strings.Trim(out, "\r\n"); actual != "ok" { + c.Fatalf("expected output ok received %s", actual) + } +} + +func (s *DockerSuite) TestRunCapAddALLDropNetAdminCanDownInterface(c *check.C) { + // Not applicable for Windows as there is no concept of --cap-add + testRequires(c, DaemonIsLinux) + out, _, err := dockerCmdWithError("run", "--cap-add=ALL", "--cap-drop=NET_ADMIN", "busybox", "sh", "-c", "ip link set eth0 down && echo ok") + if err == nil { + c.Fatal(err, out) + } + if actual := strings.Trim(out, "\r\n"); actual == "ok" { + c.Fatalf("expected output not ok received %s", actual) + } +} + +func (s *DockerSuite) TestRunGroupAdd(c *check.C) { + // Not applicable for Windows as there is no concept of --group-add + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "--group-add=audio", "--group-add=staff", "--group-add=777", "busybox", "sh", "-c", "id") + + groupsList := "uid=0(root) gid=0(root) groups=10(wheel),29(audio),50(staff),777" + if actual := strings.Trim(out, "\r\n"); actual != groupsList { + c.Fatalf("expected output %s received %s", groupsList, actual) + } +} + +func (s *DockerSuite) TestRunPrivilegedCanMount(c *check.C) { + // Not applicable for Windows as there is no concept of --privileged + testRequires(c, DaemonIsLinux, NotUserNamespace) + out, _ := dockerCmd(c, "run", "--privileged", "busybox", "sh", "-c", "mount -t tmpfs none /tmp && echo ok") + + if actual := strings.Trim(out, "\r\n"); actual != "ok" { + c.Fatalf("expected output ok received %s", actual) + } +} + +func (s *DockerSuite) TestRunUnprivilegedCannotMount(c *check.C) { + // Not applicable for Windows as there is no concept of unprivileged + testRequires(c, DaemonIsLinux) + out, _, err := dockerCmdWithError("run", "busybox", "sh", "-c", "mount -t tmpfs none /tmp && echo ok") + + if err == nil { + c.Fatal(err, out) + } + if actual := strings.Trim(out, "\r\n"); actual == "ok" { + c.Fatalf("expected output not ok received %s", actual) + } +} + +func (s *DockerSuite) TestRunSysNotWritableInNonPrivilegedContainers(c *check.C) { + // Not applicable for Windows as there is no concept of unprivileged + testRequires(c, DaemonIsLinux, NotArm) + if _, code, err := dockerCmdWithError("run", "busybox", "touch", "/sys/kernel/profiling"); err == nil || code == 0 { + c.Fatal("sys should not be writable in a non privileged container") + } +} + +func (s *DockerSuite) TestRunSysWritableInPrivilegedContainers(c *check.C) { + // Not applicable for Windows as there is no concept of unprivileged + testRequires(c, DaemonIsLinux, NotUserNamespace, NotArm) + if _, code, err := dockerCmdWithError("run", "--privileged", "busybox", "touch", "/sys/kernel/profiling"); err != nil || code != 0 { + c.Fatalf("sys should be writable in privileged container") + } +} + +func (s *DockerSuite) TestRunProcNotWritableInNonPrivilegedContainers(c *check.C) { + // Not applicable for Windows as there is no concept of unprivileged + testRequires(c, DaemonIsLinux) + if _, code, err := dockerCmdWithError("run", "busybox", "touch", "/proc/sysrq-trigger"); err == nil || code == 0 { + c.Fatal("proc should not be writable in a non privileged container") + } +} + +func (s *DockerSuite) TestRunProcWritableInPrivilegedContainers(c *check.C) { + // Not applicable for Windows as there is no concept of --privileged + testRequires(c, DaemonIsLinux, NotUserNamespace) + if _, code := dockerCmd(c, "run", "--privileged", "busybox", "sh", "-c", "touch /proc/sysrq-trigger"); code != 0 { + c.Fatalf("proc should be writable in privileged container") + } +} + +func (s *DockerSuite) TestRunDeviceNumbers(c *check.C) { + // Not applicable on Windows as /dev/ is a Unix specific concept + // TODO: NotUserNamespace could be removed here if "root" "root" is replaced w user + testRequires(c, DaemonIsLinux, NotUserNamespace) + out, _ := dockerCmd(c, "run", "busybox", "sh", "-c", "ls -l /dev/null") + deviceLineFields := strings.Fields(out) + deviceLineFields[6] = "" + deviceLineFields[7] = "" + deviceLineFields[8] = "" + expected := []string{"crw-rw-rw-", "1", "root", "root", "1,", "3", "", "", "", "/dev/null"} + + if !(reflect.DeepEqual(deviceLineFields, expected)) { + c.Fatalf("expected output\ncrw-rw-rw- 1 root root 1, 3 May 24 13:29 /dev/null\n received\n %s\n", out) + } +} + +func (s *DockerSuite) TestRunThatCharacterDevicesActLikeCharacterDevices(c *check.C) { + // Not applicable on Windows as /dev/ is a Unix specific concept + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "busybox", "sh", "-c", "dd if=/dev/zero of=/zero bs=1k count=5 2> /dev/null ; du -h /zero") + if actual := strings.Trim(out, "\r\n"); actual[0] == '0' { + c.Fatalf("expected a new file called /zero to be create that is greater than 0 bytes long, but du says: %s", actual) + } +} + +func (s *DockerSuite) TestRunUnprivilegedWithChroot(c *check.C) { + // Not applicable on Windows as it does not support chroot + testRequires(c, DaemonIsLinux) + dockerCmd(c, "run", "busybox", "chroot", "/", "true") +} + +func (s *DockerSuite) TestRunAddingOptionalDevices(c *check.C) { + // Not applicable on Windows as Windows does not support --device + testRequires(c, DaemonIsLinux, NotUserNamespace) + out, _ := dockerCmd(c, "run", "--device", "/dev/zero:/dev/nulo", "busybox", "sh", "-c", "ls /dev/nulo") + if actual := strings.Trim(out, "\r\n"); actual != "/dev/nulo" { + c.Fatalf("expected output /dev/nulo, received %s", actual) + } +} + +func (s *DockerSuite) TestRunAddingOptionalDevicesNoSrc(c *check.C) { + // Not applicable on Windows as Windows does not support --device + testRequires(c, DaemonIsLinux, NotUserNamespace) + out, _ := dockerCmd(c, "run", "--device", "/dev/zero:rw", "busybox", "sh", "-c", "ls /dev/zero") + if actual := strings.Trim(out, "\r\n"); actual != "/dev/zero" { + c.Fatalf("expected output /dev/zero, received %s", actual) + } +} + +func (s *DockerSuite) TestRunAddingOptionalDevicesInvalidMode(c *check.C) { + // Not applicable on Windows as Windows does not support --device + testRequires(c, DaemonIsLinux, NotUserNamespace) + _, _, err := dockerCmdWithError("run", "--device", "/dev/zero:ro", "busybox", "sh", "-c", "ls /dev/zero") + if err == nil { + c.Fatalf("run container with device mode ro should fail") + } +} + +func (s *DockerSuite) TestRunModeHostname(c *check.C) { + // Not applicable on Windows as Windows does not support -h + testRequires(c, SameHostDaemon, DaemonIsLinux, NotUserNamespace) + + out, _ := dockerCmd(c, "run", "-h=testhostname", "busybox", "cat", "/etc/hostname") + + if actual := strings.Trim(out, "\r\n"); actual != "testhostname" { + c.Fatalf("expected 'testhostname', but says: %q", actual) + } + + out, _ = dockerCmd(c, "run", "--net=host", "busybox", "cat", "/etc/hostname") + + hostname, err := os.Hostname() + if err != nil { + c.Fatal(err) + } + if actual := strings.Trim(out, "\r\n"); actual != hostname { + c.Fatalf("expected %q, but says: %q", hostname, actual) + } +} + +func (s *DockerSuite) TestRunRootWorkdir(c *check.C) { + out, _ := dockerCmd(c, "run", "--workdir", "/", "busybox", "pwd") + expected := "/\n" + if testEnv.DaemonPlatform() == "windows" { + expected = "C:" + expected + } + if out != expected { + c.Fatalf("pwd returned %q (expected %s)", s, expected) + } +} + +func (s *DockerSuite) TestRunAllowBindMountingRoot(c *check.C) { + if testEnv.DaemonPlatform() == "windows" { + // Windows busybox will fail with Permission Denied on items such as pagefile.sys + dockerCmd(c, "run", "-v", `c:\:c:\host`, testEnv.MinimalBaseImage(), "cmd", "-c", "dir", `c:\host`) + } else { + dockerCmd(c, "run", "-v", "/:/host", "busybox", "ls", "/host") + } +} + +func (s *DockerSuite) TestRunDisallowBindMountingRootToRoot(c *check.C) { + mount := "/:/" + targetDir := "/host" + if testEnv.DaemonPlatform() == "windows" { + mount = `c:\:c\` + targetDir = "c:/host" // Forward slash as using busybox + } + out, _, err := dockerCmdWithError("run", "-v", mount, "busybox", "ls", targetDir) + if err == nil { + c.Fatal(out, err) + } +} + +// Verify that a container gets default DNS when only localhost resolvers exist +func (s *DockerSuite) TestRunDNSDefaultOptions(c *check.C) { + // Not applicable on Windows as this is testing Unix specific functionality + testRequires(c, SameHostDaemon, DaemonIsLinux) + + // preserve original resolv.conf for restoring after test + origResolvConf, err := ioutil.ReadFile("/etc/resolv.conf") + if os.IsNotExist(err) { + c.Fatalf("/etc/resolv.conf does not exist") + } + // defer restored original conf + defer func() { + if err := ioutil.WriteFile("/etc/resolv.conf", origResolvConf, 0644); err != nil { + c.Fatal(err) + } + }() + + // test 3 cases: standard IPv4 localhost, commented out localhost, and IPv6 localhost + // 2 are removed from the file at container start, and the 3rd (commented out) one is ignored by + // GetNameservers(), leading to a replacement of nameservers with the default set + tmpResolvConf := []byte("nameserver 127.0.0.1\n#nameserver 127.0.2.1\nnameserver ::1") + if err := ioutil.WriteFile("/etc/resolv.conf", tmpResolvConf, 0644); err != nil { + c.Fatal(err) + } + + actual, _ := dockerCmd(c, "run", "busybox", "cat", "/etc/resolv.conf") + // check that the actual defaults are appended to the commented out + // localhost resolver (which should be preserved) + // NOTE: if we ever change the defaults from google dns, this will break + expected := "#nameserver 127.0.2.1\n\nnameserver 8.8.8.8\nnameserver 8.8.4.4\n" + if actual != expected { + c.Fatalf("expected resolv.conf be: %q, but was: %q", expected, actual) + } +} + +func (s *DockerSuite) TestRunDNSOptions(c *check.C) { + // Not applicable on Windows as Windows does not support --dns*, or + // the Unix-specific functionality of resolv.conf. + testRequires(c, DaemonIsLinux) + result := cli.DockerCmd(c, "run", "--dns=127.0.0.1", "--dns-search=mydomain", "--dns-opt=ndots:9", "busybox", "cat", "/etc/resolv.conf") + + // The client will get a warning on stderr when setting DNS to a localhost address; verify this: + if !strings.Contains(result.Stderr(), "Localhost DNS setting") { + c.Fatalf("Expected warning on stderr about localhost resolver, but got %q", result.Stderr()) + } + + actual := strings.Replace(strings.Trim(result.Stdout(), "\r\n"), "\n", " ", -1) + if actual != "search mydomain nameserver 127.0.0.1 options ndots:9" { + c.Fatalf("expected 'search mydomain nameserver 127.0.0.1 options ndots:9', but says: %q", actual) + } + + out := cli.DockerCmd(c, "run", "--dns=1.1.1.1", "--dns-search=.", "--dns-opt=ndots:3", "busybox", "cat", "/etc/resolv.conf").Combined() + + actual = strings.Replace(strings.Trim(strings.Trim(out, "\r\n"), " "), "\n", " ", -1) + if actual != "nameserver 1.1.1.1 options ndots:3" { + c.Fatalf("expected 'nameserver 1.1.1.1 options ndots:3', but says: %q", actual) + } +} + +func (s *DockerSuite) TestRunDNSRepeatOptions(c *check.C) { + testRequires(c, DaemonIsLinux) + out := cli.DockerCmd(c, "run", "--dns=1.1.1.1", "--dns=2.2.2.2", "--dns-search=mydomain", "--dns-search=mydomain2", "--dns-opt=ndots:9", "--dns-opt=timeout:3", "busybox", "cat", "/etc/resolv.conf").Stdout() + + actual := strings.Replace(strings.Trim(out, "\r\n"), "\n", " ", -1) + if actual != "search mydomain mydomain2 nameserver 1.1.1.1 nameserver 2.2.2.2 options ndots:9 timeout:3" { + c.Fatalf("expected 'search mydomain mydomain2 nameserver 1.1.1.1 nameserver 2.2.2.2 options ndots:9 timeout:3', but says: %q", actual) + } +} + +func (s *DockerSuite) TestRunDNSOptionsBasedOnHostResolvConf(c *check.C) { + // Not applicable on Windows as testing Unix specific functionality + testRequires(c, SameHostDaemon, DaemonIsLinux) + + origResolvConf, err := ioutil.ReadFile("/etc/resolv.conf") + if os.IsNotExist(err) { + c.Fatalf("/etc/resolv.conf does not exist") + } + + hostNameservers := resolvconf.GetNameservers(origResolvConf, types.IP) + hostSearch := resolvconf.GetSearchDomains(origResolvConf) + + var out string + out, _ = dockerCmd(c, "run", "--dns=127.0.0.1", "busybox", "cat", "/etc/resolv.conf") + + if actualNameservers := resolvconf.GetNameservers([]byte(out), types.IP); string(actualNameservers[0]) != "127.0.0.1" { + c.Fatalf("expected '127.0.0.1', but says: %q", string(actualNameservers[0])) + } + + actualSearch := resolvconf.GetSearchDomains([]byte(out)) + if len(actualSearch) != len(hostSearch) { + c.Fatalf("expected %q search domain(s), but it has: %q", len(hostSearch), len(actualSearch)) + } + for i := range actualSearch { + if actualSearch[i] != hostSearch[i] { + c.Fatalf("expected %q domain, but says: %q", actualSearch[i], hostSearch[i]) + } + } + + out, _ = dockerCmd(c, "run", "--dns-search=mydomain", "busybox", "cat", "/etc/resolv.conf") + + actualNameservers := resolvconf.GetNameservers([]byte(out), types.IP) + if len(actualNameservers) != len(hostNameservers) { + c.Fatalf("expected %q nameserver(s), but it has: %q", len(hostNameservers), len(actualNameservers)) + } + for i := range actualNameservers { + if actualNameservers[i] != hostNameservers[i] { + c.Fatalf("expected %q nameserver, but says: %q", actualNameservers[i], hostNameservers[i]) + } + } + + if actualSearch = resolvconf.GetSearchDomains([]byte(out)); string(actualSearch[0]) != "mydomain" { + c.Fatalf("expected 'mydomain', but says: %q", string(actualSearch[0])) + } + + // test with file + tmpResolvConf := []byte("search example.com\nnameserver 12.34.56.78\nnameserver 127.0.0.1") + if err := ioutil.WriteFile("/etc/resolv.conf", tmpResolvConf, 0644); err != nil { + c.Fatal(err) + } + // put the old resolvconf back + defer func() { + if err := ioutil.WriteFile("/etc/resolv.conf", origResolvConf, 0644); err != nil { + c.Fatal(err) + } + }() + + resolvConf, err := ioutil.ReadFile("/etc/resolv.conf") + if os.IsNotExist(err) { + c.Fatalf("/etc/resolv.conf does not exist") + } + + hostSearch = resolvconf.GetSearchDomains(resolvConf) + + out, _ = dockerCmd(c, "run", "busybox", "cat", "/etc/resolv.conf") + if actualNameservers = resolvconf.GetNameservers([]byte(out), types.IP); string(actualNameservers[0]) != "12.34.56.78" || len(actualNameservers) != 1 { + c.Fatalf("expected '12.34.56.78', but has: %v", actualNameservers) + } + + actualSearch = resolvconf.GetSearchDomains([]byte(out)) + if len(actualSearch) != len(hostSearch) { + c.Fatalf("expected %q search domain(s), but it has: %q", len(hostSearch), len(actualSearch)) + } + for i := range actualSearch { + if actualSearch[i] != hostSearch[i] { + c.Fatalf("expected %q domain, but says: %q", actualSearch[i], hostSearch[i]) + } + } +} + +// Test to see if a non-root user can resolve a DNS name. Also +// check if the container resolv.conf file has at least 0644 perm. +func (s *DockerSuite) TestRunNonRootUserResolvName(c *check.C) { + // Not applicable on Windows as Windows does not support --user + testRequires(c, SameHostDaemon, Network, DaemonIsLinux, NotArm) + + dockerCmd(c, "run", "--name=testperm", "--user=nobody", "busybox", "nslookup", "apt.dockerproject.org") + + cID := getIDByName(c, "testperm") + + fmode := (os.FileMode)(0644) + finfo, err := os.Stat(containerStorageFile(cID, "resolv.conf")) + if err != nil { + c.Fatal(err) + } + + if (finfo.Mode() & fmode) != fmode { + c.Fatalf("Expected container resolv.conf mode to be at least %s, instead got %s", fmode.String(), finfo.Mode().String()) + } +} + +// Test if container resolv.conf gets updated the next time it restarts +// if host /etc/resolv.conf has changed. This only applies if the container +// uses the host's /etc/resolv.conf and does not have any dns options provided. +func (s *DockerSuite) TestRunResolvconfUpdate(c *check.C) { + // Not applicable on Windows as testing unix specific functionality + testRequires(c, SameHostDaemon, DaemonIsLinux) + c.Skip("Unstable test, to be re-activated once #19937 is resolved") + + tmpResolvConf := []byte("search pommesfrites.fr\nnameserver 12.34.56.78\n") + tmpLocalhostResolvConf := []byte("nameserver 127.0.0.1") + + //take a copy of resolv.conf for restoring after test completes + resolvConfSystem, err := ioutil.ReadFile("/etc/resolv.conf") + if err != nil { + c.Fatal(err) + } + + // This test case is meant to test monitoring resolv.conf when it is + // a regular file not a bind mounc. So we unmount resolv.conf and replace + // it with a file containing the original settings. + mounted, err := mount.Mounted("/etc/resolv.conf") + if err != nil { + c.Fatal(err) + } + if mounted { + icmd.RunCommand("umount", "/etc/resolv.conf").Assert(c, icmd.Success) + } + + //cleanup + defer func() { + if err := ioutil.WriteFile("/etc/resolv.conf", resolvConfSystem, 0644); err != nil { + c.Fatal(err) + } + }() + + //1. test that a restarting container gets an updated resolv.conf + dockerCmd(c, "run", "--name=first", "busybox", "true") + containerID1 := getIDByName(c, "first") + + // replace resolv.conf with our temporary copy + bytesResolvConf := []byte(tmpResolvConf) + if err := ioutil.WriteFile("/etc/resolv.conf", bytesResolvConf, 0644); err != nil { + c.Fatal(err) + } + + // start the container again to pickup changes + dockerCmd(c, "start", "first") + + // check for update in container + containerResolv := readContainerFile(c, containerID1, "resolv.conf") + if !bytes.Equal(containerResolv, bytesResolvConf) { + c.Fatalf("Restarted container does not have updated resolv.conf; expected %q, got %q", tmpResolvConf, string(containerResolv)) + } + + /* //make a change to resolv.conf (in this case replacing our tmp copy with orig copy) + if err := ioutil.WriteFile("/etc/resolv.conf", resolvConfSystem, 0644); err != nil { + c.Fatal(err) + } */ + //2. test that a restarting container does not receive resolv.conf updates + // if it modified the container copy of the starting point resolv.conf + dockerCmd(c, "run", "--name=second", "busybox", "sh", "-c", "echo 'search mylittlepony.com' >>/etc/resolv.conf") + containerID2 := getIDByName(c, "second") + + //make a change to resolv.conf (in this case replacing our tmp copy with orig copy) + if err := ioutil.WriteFile("/etc/resolv.conf", resolvConfSystem, 0644); err != nil { + c.Fatal(err) + } + + // start the container again + dockerCmd(c, "start", "second") + + // check for update in container + containerResolv = readContainerFile(c, containerID2, "resolv.conf") + if bytes.Equal(containerResolv, resolvConfSystem) { + c.Fatalf("Container's resolv.conf should not have been updated with host resolv.conf: %q", string(containerResolv)) + } + + //3. test that a running container's resolv.conf is not modified while running + out, _ := dockerCmd(c, "run", "-d", "busybox", "top") + runningContainerID := strings.TrimSpace(out) + + // replace resolv.conf + if err := ioutil.WriteFile("/etc/resolv.conf", bytesResolvConf, 0644); err != nil { + c.Fatal(err) + } + + // check for update in container + containerResolv = readContainerFile(c, runningContainerID, "resolv.conf") + if bytes.Equal(containerResolv, bytesResolvConf) { + c.Fatalf("Running container should not have updated resolv.conf; expected %q, got %q", string(resolvConfSystem), string(containerResolv)) + } + + //4. test that a running container's resolv.conf is updated upon restart + // (the above container is still running..) + dockerCmd(c, "restart", runningContainerID) + + // check for update in container + containerResolv = readContainerFile(c, runningContainerID, "resolv.conf") + if !bytes.Equal(containerResolv, bytesResolvConf) { + c.Fatalf("Restarted container should have updated resolv.conf; expected %q, got %q", string(bytesResolvConf), string(containerResolv)) + } + + //5. test that additions of a localhost resolver are cleaned from + // host resolv.conf before updating container's resolv.conf copies + + // replace resolv.conf with a localhost-only nameserver copy + bytesResolvConf = []byte(tmpLocalhostResolvConf) + if err = ioutil.WriteFile("/etc/resolv.conf", bytesResolvConf, 0644); err != nil { + c.Fatal(err) + } + + // start the container again to pickup changes + dockerCmd(c, "start", "first") + + // our first exited container ID should have been updated, but with default DNS + // after the cleanup of resolv.conf found only a localhost nameserver: + containerResolv = readContainerFile(c, containerID1, "resolv.conf") + expected := "\nnameserver 8.8.8.8\nnameserver 8.8.4.4\n" + if !bytes.Equal(containerResolv, []byte(expected)) { + c.Fatalf("Container does not have cleaned/replaced DNS in resolv.conf; expected %q, got %q", expected, string(containerResolv)) + } + + //6. Test that replacing (as opposed to modifying) resolv.conf triggers an update + // of containers' resolv.conf. + + // Restore the original resolv.conf + if err := ioutil.WriteFile("/etc/resolv.conf", resolvConfSystem, 0644); err != nil { + c.Fatal(err) + } + + // Run the container so it picks up the old settings + dockerCmd(c, "run", "--name=third", "busybox", "true") + containerID3 := getIDByName(c, "third") + + // Create a modified resolv.conf.aside and override resolv.conf with it + bytesResolvConf = []byte(tmpResolvConf) + if err := ioutil.WriteFile("/etc/resolv.conf.aside", bytesResolvConf, 0644); err != nil { + c.Fatal(err) + } + + err = os.Rename("/etc/resolv.conf.aside", "/etc/resolv.conf") + if err != nil { + c.Fatal(err) + } + + // start the container again to pickup changes + dockerCmd(c, "start", "third") + + // check for update in container + containerResolv = readContainerFile(c, containerID3, "resolv.conf") + if !bytes.Equal(containerResolv, bytesResolvConf) { + c.Fatalf("Stopped container does not have updated resolv.conf; expected\n%q\n got\n%q", tmpResolvConf, string(containerResolv)) + } + + //cleanup, restore original resolv.conf happens in defer func() +} + +func (s *DockerSuite) TestRunAddHost(c *check.C) { + // Not applicable on Windows as it does not support --add-host + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "--add-host=extra:86.75.30.9", "busybox", "grep", "extra", "/etc/hosts") + + actual := strings.Trim(out, "\r\n") + if actual != "86.75.30.9\textra" { + c.Fatalf("expected '86.75.30.9\textra', but says: %q", actual) + } +} + +// Regression test for #6983 +func (s *DockerSuite) TestRunAttachStdErrOnlyTTYMode(c *check.C) { + _, exitCode := dockerCmd(c, "run", "-t", "-a", "stderr", "busybox", "true") + if exitCode != 0 { + c.Fatalf("Container should have exited with error code 0") + } +} + +// Regression test for #6983 +func (s *DockerSuite) TestRunAttachStdOutOnlyTTYMode(c *check.C) { + _, exitCode := dockerCmd(c, "run", "-t", "-a", "stdout", "busybox", "true") + if exitCode != 0 { + c.Fatalf("Container should have exited with error code 0") + } +} + +// Regression test for #6983 +func (s *DockerSuite) TestRunAttachStdOutAndErrTTYMode(c *check.C) { + _, exitCode := dockerCmd(c, "run", "-t", "-a", "stdout", "-a", "stderr", "busybox", "true") + if exitCode != 0 { + c.Fatalf("Container should have exited with error code 0") + } +} + +// Test for #10388 - this will run the same test as TestRunAttachStdOutAndErrTTYMode +// but using --attach instead of -a to make sure we read the flag correctly +func (s *DockerSuite) TestRunAttachWithDetach(c *check.C) { + icmd.RunCommand(dockerBinary, "run", "-d", "--attach", "stdout", "busybox", "true").Assert(c, icmd.Expected{ + ExitCode: 1, + Error: "exit status 1", + Err: "Conflicting options: -a and -d", + }) +} + +func (s *DockerSuite) TestRunState(c *check.C) { + // TODO Windows: This needs some rework as Windows busybox does not support top + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "-d", "busybox", "top") + + id := strings.TrimSpace(out) + state := inspectField(c, id, "State.Running") + if state != "true" { + c.Fatal("Container state is 'not running'") + } + pid1 := inspectField(c, id, "State.Pid") + if pid1 == "0" { + c.Fatal("Container state Pid 0") + } + + dockerCmd(c, "stop", id) + state = inspectField(c, id, "State.Running") + if state != "false" { + c.Fatal("Container state is 'running'") + } + pid2 := inspectField(c, id, "State.Pid") + if pid2 == pid1 { + c.Fatalf("Container state Pid %s, but expected %s", pid2, pid1) + } + + dockerCmd(c, "start", id) + state = inspectField(c, id, "State.Running") + if state != "true" { + c.Fatal("Container state is 'not running'") + } + pid3 := inspectField(c, id, "State.Pid") + if pid3 == pid1 { + c.Fatalf("Container state Pid %s, but expected %s", pid2, pid1) + } +} + +// Test for #1737 +func (s *DockerSuite) TestRunCopyVolumeUIDGID(c *check.C) { + // Not applicable on Windows as it does not support uid or gid in this way + testRequires(c, DaemonIsLinux) + name := "testrunvolumesuidgid" + buildImageSuccessfully(c, name, build.WithDockerfile(`FROM busybox + RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd + RUN echo 'dockerio:x:1001:' >> /etc/group + RUN mkdir -p /hello && touch /hello/test && chown dockerio.dockerio /hello`)) + + // Test that the uid and gid is copied from the image to the volume + out, _ := dockerCmd(c, "run", "--rm", "-v", "/hello", name, "sh", "-c", "ls -l / | grep hello | awk '{print $3\":\"$4}'") + out = strings.TrimSpace(out) + if out != "dockerio:dockerio" { + c.Fatalf("Wrong /hello ownership: %s, expected dockerio:dockerio", out) + } +} + +// Test for #1582 +func (s *DockerSuite) TestRunCopyVolumeContent(c *check.C) { + // TODO Windows, post RS1. Windows does not yet support volume functionality + // that copies from the image to the volume. + testRequires(c, DaemonIsLinux) + name := "testruncopyvolumecontent" + buildImageSuccessfully(c, name, build.WithDockerfile(`FROM busybox + RUN mkdir -p /hello/local && echo hello > /hello/local/world`)) + + // Test that the content is copied from the image to the volume + out, _ := dockerCmd(c, "run", "--rm", "-v", "/hello", name, "find", "/hello") + if !(strings.Contains(out, "/hello/local/world") && strings.Contains(out, "/hello/local")) { + c.Fatal("Container failed to transfer content to volume") + } +} + +func (s *DockerSuite) TestRunCleanupCmdOnEntrypoint(c *check.C) { + name := "testrunmdcleanuponentrypoint" + buildImageSuccessfully(c, name, build.WithDockerfile(`FROM busybox + ENTRYPOINT ["echo"] + CMD ["testingpoint"]`)) + + out, exit := dockerCmd(c, "run", "--entrypoint", "whoami", name) + if exit != 0 { + c.Fatalf("expected exit code 0 received %d, out: %q", exit, out) + } + out = strings.TrimSpace(out) + expected := "root" + if testEnv.DaemonPlatform() == "windows" { + if strings.Contains(testEnv.MinimalBaseImage(), "windowsservercore") { + expected = `user manager\containeradministrator` + } else { + expected = `ContainerAdministrator` // nanoserver + } + } + if out != expected { + c.Fatalf("Expected output %s, got %q. %s", expected, out, testEnv.MinimalBaseImage()) + } +} + +// TestRunWorkdirExistsAndIsFile checks that if 'docker run -w' with existing file can be detected +func (s *DockerSuite) TestRunWorkdirExistsAndIsFile(c *check.C) { + existingFile := "/bin/cat" + expected := "not a directory" + if testEnv.DaemonPlatform() == "windows" { + existingFile = `\windows\system32\ntdll.dll` + expected = `The directory name is invalid.` + } + + out, exitCode, err := dockerCmdWithError("run", "-w", existingFile, "busybox") + if !(err != nil && exitCode == 125 && strings.Contains(out, expected)) { + c.Fatalf("Existing binary as a directory should error out with exitCode 125; we got: %s, exitCode: %d", out, exitCode) + } +} + +func (s *DockerSuite) TestRunExitOnStdinClose(c *check.C) { + name := "testrunexitonstdinclose" + + meow := "/bin/cat" + delay := 60 + if testEnv.DaemonPlatform() == "windows" { + meow = "cat" + } + runCmd := exec.Command(dockerBinary, "run", "--name", name, "-i", "busybox", meow) + + stdin, err := runCmd.StdinPipe() + if err != nil { + c.Fatal(err) + } + stdout, err := runCmd.StdoutPipe() + if err != nil { + c.Fatal(err) + } + + if err := runCmd.Start(); err != nil { + c.Fatal(err) + } + if _, err := stdin.Write([]byte("hello\n")); err != nil { + c.Fatal(err) + } + + r := bufio.NewReader(stdout) + line, err := r.ReadString('\n') + if err != nil { + c.Fatal(err) + } + line = strings.TrimSpace(line) + if line != "hello" { + c.Fatalf("Output should be 'hello', got '%q'", line) + } + if err := stdin.Close(); err != nil { + c.Fatal(err) + } + finish := make(chan error) + go func() { + finish <- runCmd.Wait() + close(finish) + }() + select { + case err := <-finish: + c.Assert(err, check.IsNil) + case <-time.After(time.Duration(delay) * time.Second): + c.Fatal("docker run failed to exit on stdin close") + } + state := inspectField(c, name, "State.Running") + + if state != "false" { + c.Fatal("Container must be stopped after stdin closing") + } +} + +// Test run -i --restart xxx doesn't hang +func (s *DockerSuite) TestRunInteractiveWithRestartPolicy(c *check.C) { + name := "test-inter-restart" + + result := icmd.StartCmd(icmd.Cmd{ + Command: []string{dockerBinary, "run", "-i", "--name", name, "--restart=always", "busybox", "sh"}, + Stdin: bytes.NewBufferString("exit 11"), + }) + c.Assert(result.Error, checker.IsNil) + defer func() { + dockerCmdWithResult("stop", name).Assert(c, icmd.Success) + }() + + result = icmd.WaitOnCmd(60*time.Second, result) + c.Assert(result, icmd.Matches, icmd.Expected{ExitCode: 11}) +} + +// Test for #2267 +func (s *DockerSuite) TestRunWriteSpecialFilesAndNotCommit(c *check.C) { + // Cannot run on Windows as this files are not present in Windows + testRequires(c, DaemonIsLinux) + + testRunWriteSpecialFilesAndNotCommit(c, "writehosts", "/etc/hosts") + testRunWriteSpecialFilesAndNotCommit(c, "writehostname", "/etc/hostname") + testRunWriteSpecialFilesAndNotCommit(c, "writeresolv", "/etc/resolv.conf") +} + +func testRunWriteSpecialFilesAndNotCommit(c *check.C, name, path string) { + command := fmt.Sprintf("echo test2267 >> %s && cat %s", path, path) + out, _ := dockerCmd(c, "run", "--name", name, "busybox", "sh", "-c", command) + if !strings.Contains(out, "test2267") { + c.Fatalf("%s should contain 'test2267'", path) + } + + out, _ = dockerCmd(c, "diff", name) + if len(strings.Trim(out, "\r\n")) != 0 && !eqToBaseDiff(out, c) { + c.Fatal("diff should be empty") + } +} + +func eqToBaseDiff(out string, c *check.C) bool { + name := "eqToBaseDiff" + stringutils.GenerateRandomAlphaOnlyString(32) + dockerCmd(c, "run", "--name", name, "busybox", "echo", "hello") + cID := getIDByName(c, name) + baseDiff, _ := dockerCmd(c, "diff", cID) + baseArr := strings.Split(baseDiff, "\n") + sort.Strings(baseArr) + outArr := strings.Split(out, "\n") + sort.Strings(outArr) + return sliceEq(baseArr, outArr) +} + +func sliceEq(a, b []string) bool { + if len(a) != len(b) { + return false + } + + for i := range a { + if a[i] != b[i] { + return false + } + } + + return true +} + +func (s *DockerSuite) TestRunWithBadDevice(c *check.C) { + // Cannot run on Windows as Windows does not support --device + testRequires(c, DaemonIsLinux) + name := "baddevice" + out, _, err := dockerCmdWithError("run", "--name", name, "--device", "/etc", "busybox", "true") + + if err == nil { + c.Fatal("Run should fail with bad device") + } + expected := `"/etc": not a device node` + if !strings.Contains(out, expected) { + c.Fatalf("Output should contain %q, actual out: %q", expected, out) + } +} + +func (s *DockerSuite) TestRunEntrypoint(c *check.C) { + name := "entrypoint" + + out, _ := dockerCmd(c, "run", "--name", name, "--entrypoint", "echo", "busybox", "-n", "foobar") + expected := "foobar" + + if out != expected { + c.Fatalf("Output should be %q, actual out: %q", expected, out) + } +} + +func (s *DockerSuite) TestRunBindMounts(c *check.C) { + testRequires(c, SameHostDaemon) + if testEnv.DaemonPlatform() == "linux" { + testRequires(c, DaemonIsLinux, NotUserNamespace) + } + + prefix, _ := getPrefixAndSlashFromDaemonPlatform() + + tmpDir, err := ioutil.TempDir("", "docker-test-container") + if err != nil { + c.Fatal(err) + } + + defer os.RemoveAll(tmpDir) + writeFile(path.Join(tmpDir, "touch-me"), "", c) + + // Test reading from a read-only bind mount + out, _ := dockerCmd(c, "run", "-v", fmt.Sprintf("%s:%s/tmp:ro", tmpDir, prefix), "busybox", "ls", prefix+"/tmp") + if !strings.Contains(out, "touch-me") { + c.Fatal("Container failed to read from bind mount") + } + + // test writing to bind mount + if testEnv.DaemonPlatform() == "windows" { + dockerCmd(c, "run", "-v", fmt.Sprintf(`%s:c:\tmp:rw`, tmpDir), "busybox", "touch", "c:/tmp/holla") + } else { + dockerCmd(c, "run", "-v", fmt.Sprintf("%s:/tmp:rw", tmpDir), "busybox", "touch", "/tmp/holla") + } + + readFile(path.Join(tmpDir, "holla"), c) // Will fail if the file doesn't exist + + // test mounting to an illegal destination directory + _, _, err = dockerCmdWithError("run", "-v", fmt.Sprintf("%s:.", tmpDir), "busybox", "ls", ".") + if err == nil { + c.Fatal("Container bind mounted illegal directory") + } + + // Windows does not (and likely never will) support mounting a single file + if testEnv.DaemonPlatform() != "windows" { + // test mount a file + dockerCmd(c, "run", "-v", fmt.Sprintf("%s/holla:/tmp/holla:rw", tmpDir), "busybox", "sh", "-c", "echo -n 'yotta' > /tmp/holla") + content := readFile(path.Join(tmpDir, "holla"), c) // Will fail if the file doesn't exist + expected := "yotta" + if content != expected { + c.Fatalf("Output should be %q, actual out: %q", expected, content) + } + } +} + +// Ensure that CIDFile gets deleted if it's empty +// Perform this test by making `docker run` fail +func (s *DockerSuite) TestRunCidFileCleanupIfEmpty(c *check.C) { + // Skip on Windows. Base image on Windows has a CMD set in the image. + testRequires(c, DaemonIsLinux) + + tmpDir, err := ioutil.TempDir("", "TestRunCidFile") + if err != nil { + c.Fatal(err) + } + defer os.RemoveAll(tmpDir) + tmpCidFile := path.Join(tmpDir, "cid") + + image := "emptyfs" + if testEnv.DaemonPlatform() == "windows" { + // Windows can't support an emptyfs image. Just use the regular Windows image + image = testEnv.MinimalBaseImage() + } + out, _, err := dockerCmdWithError("run", "--cidfile", tmpCidFile, image) + if err == nil { + c.Fatalf("Run without command must fail. out=%s", out) + } else if !strings.Contains(out, "No command specified") { + c.Fatalf("Run without command failed with wrong output. out=%s\nerr=%v", out, err) + } + + if _, err := os.Stat(tmpCidFile); err == nil { + c.Fatalf("empty CIDFile %q should've been deleted", tmpCidFile) + } +} + +// #2098 - Docker cidFiles only contain short version of the containerId +//sudo docker run --cidfile /tmp/docker_tesc.cid ubuntu echo "test" +// TestRunCidFile tests that run --cidfile returns the longid +func (s *DockerSuite) TestRunCidFileCheckIDLength(c *check.C) { + tmpDir, err := ioutil.TempDir("", "TestRunCidFile") + if err != nil { + c.Fatal(err) + } + tmpCidFile := path.Join(tmpDir, "cid") + defer os.RemoveAll(tmpDir) + + out, _ := dockerCmd(c, "run", "-d", "--cidfile", tmpCidFile, "busybox", "true") + + id := strings.TrimSpace(out) + buffer, err := ioutil.ReadFile(tmpCidFile) + if err != nil { + c.Fatal(err) + } + cid := string(buffer) + if len(cid) != 64 { + c.Fatalf("--cidfile should be a long id, not %q", id) + } + if cid != id { + c.Fatalf("cid must be equal to %s, got %s", id, cid) + } +} + +func (s *DockerSuite) TestRunSetMacAddress(c *check.C) { + mac := "12:34:56:78:9a:bc" + var out string + if testEnv.DaemonPlatform() == "windows" { + out, _ = dockerCmd(c, "run", "-i", "--rm", fmt.Sprintf("--mac-address=%s", mac), "busybox", "sh", "-c", "ipconfig /all | grep 'Physical Address' | awk '{print $12}'") + mac = strings.Replace(strings.ToUpper(mac), ":", "-", -1) // To Windows-style MACs + } else { + out, _ = dockerCmd(c, "run", "-i", "--rm", fmt.Sprintf("--mac-address=%s", mac), "busybox", "/bin/sh", "-c", "ip link show eth0 | tail -1 | awk '{print $2}'") + } + + actualMac := strings.TrimSpace(out) + if actualMac != mac { + c.Fatalf("Set MAC address with --mac-address failed. The container has an incorrect MAC address: %q, expected: %q", actualMac, mac) + } +} + +func (s *DockerSuite) TestRunInspectMacAddress(c *check.C) { + // TODO Windows. Network settings are not propagated back to inspect. + testRequires(c, DaemonIsLinux) + mac := "12:34:56:78:9a:bc" + out, _ := dockerCmd(c, "run", "-d", "--mac-address="+mac, "busybox", "top") + + id := strings.TrimSpace(out) + inspectedMac := inspectField(c, id, "NetworkSettings.Networks.bridge.MacAddress") + if inspectedMac != mac { + c.Fatalf("docker inspect outputs wrong MAC address: %q, should be: %q", inspectedMac, mac) + } +} + +// test docker run use an invalid mac address +func (s *DockerSuite) TestRunWithInvalidMacAddress(c *check.C) { + out, _, err := dockerCmdWithError("run", "--mac-address", "92:d0:c6:0a:29", "busybox") + //use an invalid mac address should with an error out + if err == nil || !strings.Contains(out, "is not a valid mac address") { + c.Fatalf("run with an invalid --mac-address should with error out") + } +} + +func (s *DockerSuite) TestRunDeallocatePortOnMissingIptablesRule(c *check.C) { + // TODO Windows. Network settings are not propagated back to inspect. + testRequires(c, SameHostDaemon, DaemonIsLinux) + + out := cli.DockerCmd(c, "run", "-d", "-p", "23:23", "busybox", "top").Combined() + + id := strings.TrimSpace(out) + ip := inspectField(c, id, "NetworkSettings.Networks.bridge.IPAddress") + icmd.RunCommand("iptables", "-D", "DOCKER", "-d", fmt.Sprintf("%s/32", ip), + "!", "-i", "docker0", "-o", "docker0", "-p", "tcp", "-m", "tcp", "--dport", "23", "-j", "ACCEPT").Assert(c, icmd.Success) + + cli.DockerCmd(c, "rm", "-fv", id) + + cli.DockerCmd(c, "run", "-d", "-p", "23:23", "busybox", "top") +} + +func (s *DockerSuite) TestRunPortInUse(c *check.C) { + // TODO Windows. The duplicate NAT message returned by Windows will be + // changing as is currently completely undecipherable. Does need modifying + // to run sh rather than top though as top isn't in Windows busybox. + testRequires(c, SameHostDaemon, DaemonIsLinux) + + port := "1234" + dockerCmd(c, "run", "-d", "-p", port+":80", "busybox", "top") + + out, _, err := dockerCmdWithError("run", "-d", "-p", port+":80", "busybox", "top") + if err == nil { + c.Fatalf("Binding on used port must fail") + } + if !strings.Contains(out, "port is already allocated") { + c.Fatalf("Out must be about \"port is already allocated\", got %s", out) + } +} + +// https://github.com/docker/docker/issues/12148 +func (s *DockerSuite) TestRunAllocatePortInReservedRange(c *check.C) { + // TODO Windows. -P is not yet supported + testRequires(c, DaemonIsLinux) + // allocate a dynamic port to get the most recent + out, _ := dockerCmd(c, "run", "-d", "-P", "-p", "80", "busybox", "top") + + id := strings.TrimSpace(out) + out, _ = dockerCmd(c, "port", id, "80") + + strPort := strings.Split(strings.TrimSpace(out), ":")[1] + port, err := strconv.ParseInt(strPort, 10, 64) + if err != nil { + c.Fatalf("invalid port, got: %s, error: %s", strPort, err) + } + + // allocate a static port and a dynamic port together, with static port + // takes the next recent port in dynamic port range. + dockerCmd(c, "run", "-d", "-P", "-p", "80", "-p", fmt.Sprintf("%d:8080", port+1), "busybox", "top") +} + +// Regression test for #7792 +func (s *DockerSuite) TestRunMountOrdering(c *check.C) { + // TODO Windows: Post RS1. Windows does not support nested mounts. + testRequires(c, SameHostDaemon, DaemonIsLinux, NotUserNamespace) + prefix, _ := getPrefixAndSlashFromDaemonPlatform() + + tmpDir, err := ioutil.TempDir("", "docker_nested_mount_test") + if err != nil { + c.Fatal(err) + } + defer os.RemoveAll(tmpDir) + + tmpDir2, err := ioutil.TempDir("", "docker_nested_mount_test2") + if err != nil { + c.Fatal(err) + } + defer os.RemoveAll(tmpDir2) + + // Create a temporary tmpfs mounc. + fooDir := filepath.Join(tmpDir, "foo") + if err := os.MkdirAll(filepath.Join(tmpDir, "foo"), 0755); err != nil { + c.Fatalf("failed to mkdir at %s - %s", fooDir, err) + } + + if err := ioutil.WriteFile(fmt.Sprintf("%s/touch-me", fooDir), []byte{}, 0644); err != nil { + c.Fatal(err) + } + + if err := ioutil.WriteFile(fmt.Sprintf("%s/touch-me", tmpDir), []byte{}, 0644); err != nil { + c.Fatal(err) + } + + if err := ioutil.WriteFile(fmt.Sprintf("%s/touch-me", tmpDir2), []byte{}, 0644); err != nil { + c.Fatal(err) + } + + dockerCmd(c, "run", + "-v", fmt.Sprintf("%s:"+prefix+"/tmp", tmpDir), + "-v", fmt.Sprintf("%s:"+prefix+"/tmp/foo", fooDir), + "-v", fmt.Sprintf("%s:"+prefix+"/tmp/tmp2", tmpDir2), + "-v", fmt.Sprintf("%s:"+prefix+"/tmp/tmp2/foo", fooDir), + "busybox:latest", "sh", "-c", + "ls "+prefix+"/tmp/touch-me && ls "+prefix+"/tmp/foo/touch-me && ls "+prefix+"/tmp/tmp2/touch-me && ls "+prefix+"/tmp/tmp2/foo/touch-me") +} + +// Regression test for https://github.com/docker/docker/issues/8259 +func (s *DockerSuite) TestRunReuseBindVolumeThatIsSymlink(c *check.C) { + // Not applicable on Windows as Windows does not support volumes + testRequires(c, SameHostDaemon, DaemonIsLinux, NotUserNamespace) + prefix, _ := getPrefixAndSlashFromDaemonPlatform() + + tmpDir, err := ioutil.TempDir(os.TempDir(), "testlink") + if err != nil { + c.Fatal(err) + } + defer os.RemoveAll(tmpDir) + + linkPath := os.TempDir() + "/testlink2" + if err := os.Symlink(tmpDir, linkPath); err != nil { + c.Fatal(err) + } + defer os.RemoveAll(linkPath) + + // Create first container + dockerCmd(c, "run", "-v", fmt.Sprintf("%s:"+prefix+"/tmp/test", linkPath), "busybox", "ls", prefix+"/tmp/test") + + // Create second container with same symlinked path + // This will fail if the referenced issue is hit with a "Volume exists" error + dockerCmd(c, "run", "-v", fmt.Sprintf("%s:"+prefix+"/tmp/test", linkPath), "busybox", "ls", prefix+"/tmp/test") +} + +//GH#10604: Test an "/etc" volume doesn't overlay special bind mounts in container +func (s *DockerSuite) TestRunCreateVolumeEtc(c *check.C) { + // While Windows supports volumes, it does not support --add-host hence + // this test is not applicable on Windows. + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "--dns=127.0.0.1", "-v", "/etc", "busybox", "cat", "/etc/resolv.conf") + if !strings.Contains(out, "nameserver 127.0.0.1") { + c.Fatal("/etc volume mount hides /etc/resolv.conf") + } + + out, _ = dockerCmd(c, "run", "-h=test123", "-v", "/etc", "busybox", "cat", "/etc/hostname") + if !strings.Contains(out, "test123") { + c.Fatal("/etc volume mount hides /etc/hostname") + } + + out, _ = dockerCmd(c, "run", "--add-host=test:192.168.0.1", "-v", "/etc", "busybox", "cat", "/etc/hosts") + out = strings.Replace(out, "\n", " ", -1) + if !strings.Contains(out, "192.168.0.1\ttest") || !strings.Contains(out, "127.0.0.1\tlocalhost") { + c.Fatal("/etc volume mount hides /etc/hosts") + } +} + +func (s *DockerSuite) TestVolumesNoCopyData(c *check.C) { + // TODO Windows (Post RS1). Windows does not support volumes which + // are pre-populated such as is built in the dockerfile used in this test. + testRequires(c, DaemonIsLinux) + prefix, slash := getPrefixAndSlashFromDaemonPlatform() + buildImageSuccessfully(c, "dataimage", build.WithDockerfile(`FROM busybox + RUN ["mkdir", "-p", "/foo"] + RUN ["touch", "/foo/bar"]`)) + dockerCmd(c, "run", "--name", "test", "-v", prefix+slash+"foo", "busybox") + + if out, _, err := dockerCmdWithError("run", "--volumes-from", "test", "dataimage", "ls", "-lh", "/foo/bar"); err == nil || !strings.Contains(out, "No such file or directory") { + c.Fatalf("Data was copied on volumes-from but shouldn't be:\n%q", out) + } + + tmpDir := testutil.RandomTmpDirPath("docker_test_bind_mount_copy_data", testEnv.DaemonPlatform()) + if out, _, err := dockerCmdWithError("run", "-v", tmpDir+":/foo", "dataimage", "ls", "-lh", "/foo/bar"); err == nil || !strings.Contains(out, "No such file or directory") { + c.Fatalf("Data was copied on bind-mount but shouldn't be:\n%q", out) + } +} + +func (s *DockerSuite) TestRunNoOutputFromPullInStdout(c *check.C) { + // just run with unknown image + cmd := exec.Command(dockerBinary, "run", "asdfsg") + stdout := bytes.NewBuffer(nil) + cmd.Stdout = stdout + if err := cmd.Run(); err == nil { + c.Fatal("Run with unknown image should fail") + } + if stdout.Len() != 0 { + c.Fatalf("Stdout contains output from pull: %s", stdout) + } +} + +func (s *DockerSuite) TestRunVolumesCleanPaths(c *check.C) { + testRequires(c, SameHostDaemon) + prefix, slash := getPrefixAndSlashFromDaemonPlatform() + buildImageSuccessfully(c, "run_volumes_clean_paths", build.WithDockerfile(`FROM busybox + VOLUME `+prefix+`/foo/`)) + dockerCmd(c, "run", "-v", prefix+"/foo", "-v", prefix+"/bar/", "--name", "dark_helmet", "run_volumes_clean_paths") + + out, err := inspectMountSourceField("dark_helmet", prefix+slash+"foo"+slash) + if err != errMountNotFound { + c.Fatalf("Found unexpected volume entry for '%s/foo/' in volumes\n%q", prefix, out) + } + + out, err = inspectMountSourceField("dark_helmet", prefix+slash+`foo`) + c.Assert(err, check.IsNil) + if !strings.Contains(strings.ToLower(out), strings.ToLower(testEnv.VolumesConfigPath())) { + c.Fatalf("Volume was not defined for %s/foo\n%q", prefix, out) + } + + out, err = inspectMountSourceField("dark_helmet", prefix+slash+"bar"+slash) + if err != errMountNotFound { + c.Fatalf("Found unexpected volume entry for '%s/bar/' in volumes\n%q", prefix, out) + } + + out, err = inspectMountSourceField("dark_helmet", prefix+slash+"bar") + c.Assert(err, check.IsNil) + if !strings.Contains(strings.ToLower(out), strings.ToLower(testEnv.VolumesConfigPath())) { + c.Fatalf("Volume was not defined for %s/bar\n%q", prefix, out) + } +} + +// Regression test for #3631 +func (s *DockerSuite) TestRunSlowStdoutConsumer(c *check.C) { + // TODO Windows: This should be able to run on Windows if can find an + // alternate to /dev/zero and /dev/stdout. + testRequires(c, DaemonIsLinux) + cont := exec.Command(dockerBinary, "run", "--rm", "busybox", "/bin/sh", "-c", "dd if=/dev/zero of=/dev/stdout bs=1024 count=2000 | catv") + + stdout, err := cont.StdoutPipe() + if err != nil { + c.Fatal(err) + } + + if err := cont.Start(); err != nil { + c.Fatal(err) + } + n, err := testutil.ConsumeWithSpeed(stdout, 10000, 5*time.Millisecond, nil) + if err != nil { + c.Fatal(err) + } + + expected := 2 * 1024 * 2000 + if n != expected { + c.Fatalf("Expected %d, got %d", expected, n) + } +} + +func (s *DockerSuite) TestRunAllowPortRangeThroughExpose(c *check.C) { + // TODO Windows: -P is not currently supported. Also network + // settings are not propagated back. + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "-d", "--expose", "3000-3003", "-P", "busybox", "top") + + id := strings.TrimSpace(out) + portstr := inspectFieldJSON(c, id, "NetworkSettings.Ports") + var ports nat.PortMap + if err := json.Unmarshal([]byte(portstr), &ports); err != nil { + c.Fatal(err) + } + for port, binding := range ports { + portnum, _ := strconv.Atoi(strings.Split(string(port), "/")[0]) + if portnum < 3000 || portnum > 3003 { + c.Fatalf("Port %d is out of range ", portnum) + } + if binding == nil || len(binding) != 1 || len(binding[0].HostPort) == 0 { + c.Fatalf("Port is not mapped for the port %s", port) + } + } +} + +func (s *DockerSuite) TestRunExposePort(c *check.C) { + out, _, err := dockerCmdWithError("run", "--expose", "80000", "busybox") + c.Assert(err, checker.NotNil, check.Commentf("--expose with an invalid port should error out")) + c.Assert(out, checker.Contains, "invalid range format for --expose") +} + +func (s *DockerSuite) TestRunModeIpcHost(c *check.C) { + // Not applicable on Windows as uses Unix-specific capabilities + testRequires(c, SameHostDaemon, DaemonIsLinux, NotUserNamespace) + + hostIpc, err := os.Readlink("/proc/1/ns/ipc") + if err != nil { + c.Fatal(err) + } + + out, _ := dockerCmd(c, "run", "--ipc=host", "busybox", "readlink", "/proc/self/ns/ipc") + out = strings.Trim(out, "\n") + if hostIpc != out { + c.Fatalf("IPC different with --ipc=host %s != %s\n", hostIpc, out) + } + + out, _ = dockerCmd(c, "run", "busybox", "readlink", "/proc/self/ns/ipc") + out = strings.Trim(out, "\n") + if hostIpc == out { + c.Fatalf("IPC should be different without --ipc=host %s == %s\n", hostIpc, out) + } +} + +func (s *DockerSuite) TestRunModeIpcContainer(c *check.C) { + // Not applicable on Windows as uses Unix-specific capabilities + testRequires(c, SameHostDaemon, DaemonIsLinux) + + out, _ := dockerCmd(c, "run", "-d", "busybox", "sh", "-c", "echo -n test > /dev/shm/test && touch /dev/mqueue/toto && top") + + id := strings.TrimSpace(out) + state := inspectField(c, id, "State.Running") + if state != "true" { + c.Fatal("Container state is 'not running'") + } + pid1 := inspectField(c, id, "State.Pid") + + parentContainerIpc, err := os.Readlink(fmt.Sprintf("/proc/%s/ns/ipc", pid1)) + if err != nil { + c.Fatal(err) + } + + out, _ = dockerCmd(c, "run", fmt.Sprintf("--ipc=container:%s", id), "busybox", "readlink", "/proc/self/ns/ipc") + out = strings.Trim(out, "\n") + if parentContainerIpc != out { + c.Fatalf("IPC different with --ipc=container:%s %s != %s\n", id, parentContainerIpc, out) + } + + catOutput, _ := dockerCmd(c, "run", fmt.Sprintf("--ipc=container:%s", id), "busybox", "cat", "/dev/shm/test") + if catOutput != "test" { + c.Fatalf("Output of /dev/shm/test expected test but found: %s", catOutput) + } + + // check that /dev/mqueue is actually of mqueue type + grepOutput, _ := dockerCmd(c, "run", fmt.Sprintf("--ipc=container:%s", id), "busybox", "grep", "/dev/mqueue", "/proc/mounts") + if !strings.HasPrefix(grepOutput, "mqueue /dev/mqueue mqueue rw") { + c.Fatalf("Output of 'grep /proc/mounts' expected 'mqueue /dev/mqueue mqueue rw' but found: %s", grepOutput) + } + + lsOutput, _ := dockerCmd(c, "run", fmt.Sprintf("--ipc=container:%s", id), "busybox", "ls", "/dev/mqueue") + lsOutput = strings.Trim(lsOutput, "\n") + if lsOutput != "toto" { + c.Fatalf("Output of 'ls /dev/mqueue' expected 'toto' but found: %s", lsOutput) + } +} + +func (s *DockerSuite) TestRunModeIpcContainerNotExists(c *check.C) { + // Not applicable on Windows as uses Unix-specific capabilities + testRequires(c, DaemonIsLinux) + out, _, err := dockerCmdWithError("run", "-d", "--ipc", "container:abcd1234", "busybox", "top") + if !strings.Contains(out, "abcd1234") || err == nil { + c.Fatalf("run IPC from a non exists container should with correct error out") + } +} + +func (s *DockerSuite) TestRunModeIpcContainerNotRunning(c *check.C) { + // Not applicable on Windows as uses Unix-specific capabilities + testRequires(c, SameHostDaemon, DaemonIsLinux) + + out, _ := dockerCmd(c, "create", "busybox") + + id := strings.TrimSpace(out) + out, _, err := dockerCmdWithError("run", fmt.Sprintf("--ipc=container:%s", id), "busybox") + if err == nil { + c.Fatalf("Run container with ipc mode container should fail with non running container: %s\n%s", out, err) + } +} + +func (s *DockerSuite) TestRunModePIDContainer(c *check.C) { + // Not applicable on Windows as uses Unix-specific capabilities + testRequires(c, SameHostDaemon, DaemonIsLinux) + + out, _ := dockerCmd(c, "run", "-d", "busybox", "sh", "-c", "top") + + id := strings.TrimSpace(out) + state := inspectField(c, id, "State.Running") + if state != "true" { + c.Fatal("Container state is 'not running'") + } + pid1 := inspectField(c, id, "State.Pid") + + parentContainerPid, err := os.Readlink(fmt.Sprintf("/proc/%s/ns/pid", pid1)) + if err != nil { + c.Fatal(err) + } + + out, _ = dockerCmd(c, "run", fmt.Sprintf("--pid=container:%s", id), "busybox", "readlink", "/proc/self/ns/pid") + out = strings.Trim(out, "\n") + if parentContainerPid != out { + c.Fatalf("PID different with --pid=container:%s %s != %s\n", id, parentContainerPid, out) + } +} + +func (s *DockerSuite) TestRunModePIDContainerNotExists(c *check.C) { + // Not applicable on Windows as uses Unix-specific capabilities + testRequires(c, DaemonIsLinux) + out, _, err := dockerCmdWithError("run", "-d", "--pid", "container:abcd1234", "busybox", "top") + if !strings.Contains(out, "abcd1234") || err == nil { + c.Fatalf("run PID from a non exists container should with correct error out") + } +} + +func (s *DockerSuite) TestRunModePIDContainerNotRunning(c *check.C) { + // Not applicable on Windows as uses Unix-specific capabilities + testRequires(c, SameHostDaemon, DaemonIsLinux) + + out, _ := dockerCmd(c, "create", "busybox") + + id := strings.TrimSpace(out) + out, _, err := dockerCmdWithError("run", fmt.Sprintf("--pid=container:%s", id), "busybox") + if err == nil { + c.Fatalf("Run container with pid mode container should fail with non running container: %s\n%s", out, err) + } +} + +func (s *DockerSuite) TestRunMountShmMqueueFromHost(c *check.C) { + // Not applicable on Windows as uses Unix-specific capabilities + testRequires(c, SameHostDaemon, DaemonIsLinux, NotUserNamespace) + + dockerCmd(c, "run", "-d", "--name", "shmfromhost", "-v", "/dev/shm:/dev/shm", "-v", "/dev/mqueue:/dev/mqueue", "busybox", "sh", "-c", "echo -n test > /dev/shm/test && touch /dev/mqueue/toto && top") + defer os.Remove("/dev/mqueue/toto") + defer os.Remove("/dev/shm/test") + volPath, err := inspectMountSourceField("shmfromhost", "/dev/shm") + c.Assert(err, checker.IsNil) + if volPath != "/dev/shm" { + c.Fatalf("volumePath should have been /dev/shm, was %s", volPath) + } + + out, _ := dockerCmd(c, "run", "--name", "ipchost", "--ipc", "host", "busybox", "cat", "/dev/shm/test") + if out != "test" { + c.Fatalf("Output of /dev/shm/test expected test but found: %s", out) + } + + // Check that the mq was created + if _, err := os.Stat("/dev/mqueue/toto"); err != nil { + c.Fatalf("Failed to confirm '/dev/mqueue/toto' presence on host: %s", err.Error()) + } +} + +func (s *DockerSuite) TestContainerNetworkMode(c *check.C) { + // Not applicable on Windows as uses Unix-specific capabilities + testRequires(c, SameHostDaemon, DaemonIsLinux) + + out, _ := dockerCmd(c, "run", "-d", "busybox", "top") + id := strings.TrimSpace(out) + c.Assert(waitRun(id), check.IsNil) + pid1 := inspectField(c, id, "State.Pid") + + parentContainerNet, err := os.Readlink(fmt.Sprintf("/proc/%s/ns/net", pid1)) + if err != nil { + c.Fatal(err) + } + + out, _ = dockerCmd(c, "run", fmt.Sprintf("--net=container:%s", id), "busybox", "readlink", "/proc/self/ns/net") + out = strings.Trim(out, "\n") + if parentContainerNet != out { + c.Fatalf("NET different with --net=container:%s %s != %s\n", id, parentContainerNet, out) + } +} + +func (s *DockerSuite) TestRunModePIDHost(c *check.C) { + // Not applicable on Windows as uses Unix-specific capabilities + testRequires(c, SameHostDaemon, DaemonIsLinux, NotUserNamespace) + + hostPid, err := os.Readlink("/proc/1/ns/pid") + if err != nil { + c.Fatal(err) + } + + out, _ := dockerCmd(c, "run", "--pid=host", "busybox", "readlink", "/proc/self/ns/pid") + out = strings.Trim(out, "\n") + if hostPid != out { + c.Fatalf("PID different with --pid=host %s != %s\n", hostPid, out) + } + + out, _ = dockerCmd(c, "run", "busybox", "readlink", "/proc/self/ns/pid") + out = strings.Trim(out, "\n") + if hostPid == out { + c.Fatalf("PID should be different without --pid=host %s == %s\n", hostPid, out) + } +} + +func (s *DockerSuite) TestRunModeUTSHost(c *check.C) { + // Not applicable on Windows as uses Unix-specific capabilities + testRequires(c, SameHostDaemon, DaemonIsLinux) + + hostUTS, err := os.Readlink("/proc/1/ns/uts") + if err != nil { + c.Fatal(err) + } + + out, _ := dockerCmd(c, "run", "--uts=host", "busybox", "readlink", "/proc/self/ns/uts") + out = strings.Trim(out, "\n") + if hostUTS != out { + c.Fatalf("UTS different with --uts=host %s != %s\n", hostUTS, out) + } + + out, _ = dockerCmd(c, "run", "busybox", "readlink", "/proc/self/ns/uts") + out = strings.Trim(out, "\n") + if hostUTS == out { + c.Fatalf("UTS should be different without --uts=host %s == %s\n", hostUTS, out) + } + + out, _ = dockerCmdWithFail(c, "run", "-h=name", "--uts=host", "busybox", "ps") + c.Assert(out, checker.Contains, runconfig.ErrConflictUTSHostname.Error()) +} + +func (s *DockerSuite) TestRunTLSVerify(c *check.C) { + // Remote daemons use TLS and this test is not applicable when TLS is required. + testRequires(c, SameHostDaemon) + if out, code, err := dockerCmdWithError("ps"); err != nil || code != 0 { + c.Fatalf("Should have worked: %v:\n%v", err, out) + } + + // Regardless of whether we specify true or false we need to + // test to make sure tls is turned on if --tlsverify is specified at all + result := dockerCmdWithResult("--tlsverify=false", "ps") + result.Assert(c, icmd.Expected{ExitCode: 1, Err: "error during connect"}) + + result = dockerCmdWithResult("--tlsverify=true", "ps") + result.Assert(c, icmd.Expected{ExitCode: 1, Err: "cert"}) +} + +func (s *DockerSuite) TestRunPortFromDockerRangeInUse(c *check.C) { + // TODO Windows. Once moved to libnetwork/CNM, this may be able to be + // re-instated. + testRequires(c, DaemonIsLinux) + // first find allocator current position + out, _ := dockerCmd(c, "run", "-d", "-p", ":80", "busybox", "top") + + id := strings.TrimSpace(out) + out, _ = dockerCmd(c, "port", id) + + out = strings.TrimSpace(out) + if out == "" { + c.Fatal("docker port command output is empty") + } + out = strings.Split(out, ":")[1] + lastPort, err := strconv.Atoi(out) + if err != nil { + c.Fatal(err) + } + port := lastPort + 1 + l, err := net.Listen("tcp", ":"+strconv.Itoa(port)) + if err != nil { + c.Fatal(err) + } + defer l.Close() + + out, _ = dockerCmd(c, "run", "-d", "-p", ":80", "busybox", "top") + + id = strings.TrimSpace(out) + dockerCmd(c, "port", id) +} + +func (s *DockerSuite) TestRunTTYWithPipe(c *check.C) { + errChan := make(chan error) + go func() { + defer close(errChan) + + cmd := exec.Command(dockerBinary, "run", "-ti", "busybox", "true") + if _, err := cmd.StdinPipe(); err != nil { + errChan <- err + return + } + + expected := "the input device is not a TTY" + if runtime.GOOS == "windows" { + expected += ". If you are using mintty, try prefixing the command with 'winpty'" + } + if out, _, err := runCommandWithOutput(cmd); err == nil { + errChan <- fmt.Errorf("run should have failed") + return + } else if !strings.Contains(out, expected) { + errChan <- fmt.Errorf("run failed with error %q: expected %q", out, expected) + return + } + }() + + select { + case err := <-errChan: + c.Assert(err, check.IsNil) + case <-time.After(30 * time.Second): + c.Fatal("container is running but should have failed") + } +} + +func (s *DockerSuite) TestRunNonLocalMacAddress(c *check.C) { + addr := "00:16:3E:08:00:50" + args := []string{"run", "--mac-address", addr} + expected := addr + + if testEnv.DaemonPlatform() != "windows" { + args = append(args, "busybox", "ifconfig") + } else { + args = append(args, testEnv.MinimalBaseImage(), "ipconfig", "/all") + expected = strings.Replace(strings.ToUpper(addr), ":", "-", -1) + } + + if out, _ := dockerCmd(c, args...); !strings.Contains(out, expected) { + c.Fatalf("Output should have contained %q: %s", expected, out) + } +} + +func (s *DockerSuite) TestRunNetHost(c *check.C) { + // Not applicable on Windows as uses Unix-specific capabilities + testRequires(c, SameHostDaemon, DaemonIsLinux, NotUserNamespace) + + hostNet, err := os.Readlink("/proc/1/ns/net") + if err != nil { + c.Fatal(err) + } + + out, _ := dockerCmd(c, "run", "--net=host", "busybox", "readlink", "/proc/self/ns/net") + out = strings.Trim(out, "\n") + if hostNet != out { + c.Fatalf("Net namespace different with --net=host %s != %s\n", hostNet, out) + } + + out, _ = dockerCmd(c, "run", "busybox", "readlink", "/proc/self/ns/net") + out = strings.Trim(out, "\n") + if hostNet == out { + c.Fatalf("Net namespace should be different without --net=host %s == %s\n", hostNet, out) + } +} + +func (s *DockerSuite) TestRunNetHostTwiceSameName(c *check.C) { + // TODO Windows. As Windows networking evolves and converges towards + // CNM, this test may be possible to enable on Windows. + testRequires(c, SameHostDaemon, DaemonIsLinux, NotUserNamespace) + + dockerCmd(c, "run", "--rm", "--name=thost", "--net=host", "busybox", "true") + dockerCmd(c, "run", "--rm", "--name=thost", "--net=host", "busybox", "true") +} + +func (s *DockerSuite) TestRunNetContainerWhichHost(c *check.C) { + // Not applicable on Windows as uses Unix-specific capabilities + testRequires(c, SameHostDaemon, DaemonIsLinux, NotUserNamespace) + + hostNet, err := os.Readlink("/proc/1/ns/net") + if err != nil { + c.Fatal(err) + } + + dockerCmd(c, "run", "-d", "--net=host", "--name=test", "busybox", "top") + + out, _ := dockerCmd(c, "run", "--net=container:test", "busybox", "readlink", "/proc/self/ns/net") + out = strings.Trim(out, "\n") + if hostNet != out { + c.Fatalf("Container should have host network namespace") + } +} + +func (s *DockerSuite) TestRunAllowPortRangeThroughPublish(c *check.C) { + // TODO Windows. This may be possible to enable in the future. However, + // Windows does not currently support --expose, or populate the network + // settings seen through inspect. + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "-d", "--expose", "3000-3003", "-p", "3000-3003", "busybox", "top") + + id := strings.TrimSpace(out) + portstr := inspectFieldJSON(c, id, "NetworkSettings.Ports") + + var ports nat.PortMap + err := json.Unmarshal([]byte(portstr), &ports) + c.Assert(err, checker.IsNil, check.Commentf("failed to unmarshal: %v", portstr)) + for port, binding := range ports { + portnum, _ := strconv.Atoi(strings.Split(string(port), "/")[0]) + if portnum < 3000 || portnum > 3003 { + c.Fatalf("Port %d is out of range ", portnum) + } + if binding == nil || len(binding) != 1 || len(binding[0].HostPort) == 0 { + c.Fatal("Port is not mapped for the port "+port, out) + } + } +} + +func (s *DockerSuite) TestRunSetDefaultRestartPolicy(c *check.C) { + runSleepingContainer(c, "--name=testrunsetdefaultrestartpolicy") + out := inspectField(c, "testrunsetdefaultrestartpolicy", "HostConfig.RestartPolicy.Name") + if out != "no" { + c.Fatalf("Set default restart policy failed") + } +} + +func (s *DockerSuite) TestRunRestartMaxRetries(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "--restart=on-failure:3", "busybox", "false") + timeout := 10 * time.Second + if testEnv.DaemonPlatform() == "windows" { + timeout = 120 * time.Second + } + + id := strings.TrimSpace(string(out)) + if err := waitInspect(id, "{{ .State.Restarting }} {{ .State.Running }}", "false false", timeout); err != nil { + c.Fatal(err) + } + + count := inspectField(c, id, "RestartCount") + if count != "3" { + c.Fatalf("Container was restarted %s times, expected %d", count, 3) + } + + MaximumRetryCount := inspectField(c, id, "HostConfig.RestartPolicy.MaximumRetryCount") + if MaximumRetryCount != "3" { + c.Fatalf("Container Maximum Retry Count is %s, expected %s", MaximumRetryCount, "3") + } +} + +func (s *DockerSuite) TestRunContainerWithWritableRootfs(c *check.C) { + dockerCmd(c, "run", "--rm", "busybox", "touch", "/file") +} + +func (s *DockerSuite) TestRunContainerWithReadonlyRootfs(c *check.C) { + // Not applicable on Windows which does not support --read-only + testRequires(c, DaemonIsLinux, UserNamespaceROMount) + + testPriv := true + // don't test privileged mode subtest if user namespaces enabled + if root := os.Getenv("DOCKER_REMAP_ROOT"); root != "" { + testPriv = false + } + testReadOnlyFile(c, testPriv, "/file", "/etc/hosts", "/etc/resolv.conf", "/etc/hostname", "/sys/kernel", "/dev/.dont.touch.me") +} + +func (s *DockerSuite) TestPermissionsPtsReadonlyRootfs(c *check.C) { + // Not applicable on Windows due to use of Unix specific functionality, plus + // the use of --read-only which is not supported. + testRequires(c, DaemonIsLinux, UserNamespaceROMount) + + // Ensure we have not broken writing /dev/pts + out, status := dockerCmd(c, "run", "--read-only", "--rm", "busybox", "mount") + if status != 0 { + c.Fatal("Could not obtain mounts when checking /dev/pts mntpnt.") + } + expected := "type devpts (rw," + if !strings.Contains(string(out), expected) { + c.Fatalf("expected output to contain %s but contains %s", expected, out) + } +} + +func testReadOnlyFile(c *check.C, testPriv bool, filenames ...string) { + touch := "touch " + strings.Join(filenames, " ") + out, _, err := dockerCmdWithError("run", "--read-only", "--rm", "busybox", "sh", "-c", touch) + c.Assert(err, checker.NotNil) + + for _, f := range filenames { + expected := "touch: " + f + ": Read-only file system" + c.Assert(out, checker.Contains, expected) + } + + if !testPriv { + return + } + + out, _, err = dockerCmdWithError("run", "--read-only", "--privileged", "--rm", "busybox", "sh", "-c", touch) + c.Assert(err, checker.NotNil) + + for _, f := range filenames { + expected := "touch: " + f + ": Read-only file system" + c.Assert(out, checker.Contains, expected) + } +} + +func (s *DockerSuite) TestRunContainerWithReadonlyEtcHostsAndLinkedContainer(c *check.C) { + // Not applicable on Windows which does not support --link + testRequires(c, DaemonIsLinux, UserNamespaceROMount) + + dockerCmd(c, "run", "-d", "--name", "test-etc-hosts-ro-linked", "busybox", "top") + + out, _ := dockerCmd(c, "run", "--read-only", "--link", "test-etc-hosts-ro-linked:testlinked", "busybox", "cat", "/etc/hosts") + if !strings.Contains(string(out), "testlinked") { + c.Fatal("Expected /etc/hosts to be updated even if --read-only enabled") + } +} + +func (s *DockerSuite) TestRunContainerWithReadonlyRootfsWithDNSFlag(c *check.C) { + // Not applicable on Windows which does not support either --read-only or --dns. + testRequires(c, DaemonIsLinux, UserNamespaceROMount) + + out, _ := dockerCmd(c, "run", "--read-only", "--dns", "1.1.1.1", "busybox", "/bin/cat", "/etc/resolv.conf") + if !strings.Contains(string(out), "1.1.1.1") { + c.Fatal("Expected /etc/resolv.conf to be updated even if --read-only enabled and --dns flag used") + } +} + +func (s *DockerSuite) TestRunContainerWithReadonlyRootfsWithAddHostFlag(c *check.C) { + // Not applicable on Windows which does not support --read-only + testRequires(c, DaemonIsLinux, UserNamespaceROMount) + + out, _ := dockerCmd(c, "run", "--read-only", "--add-host", "testreadonly:127.0.0.1", "busybox", "/bin/cat", "/etc/hosts") + if !strings.Contains(string(out), "testreadonly") { + c.Fatal("Expected /etc/hosts to be updated even if --read-only enabled and --add-host flag used") + } +} + +func (s *DockerSuite) TestRunVolumesFromRestartAfterRemoved(c *check.C) { + prefix, _ := getPrefixAndSlashFromDaemonPlatform() + runSleepingContainer(c, "--name=voltest", "-v", prefix+"/foo") + runSleepingContainer(c, "--name=restarter", "--volumes-from", "voltest") + + // Remove the main volume container and restart the consuming container + dockerCmd(c, "rm", "-f", "voltest") + + // This should not fail since the volumes-from were already applied + dockerCmd(c, "restart", "restarter") +} + +// run container with --rm should remove container if exit code != 0 +func (s *DockerSuite) TestRunContainerWithRmFlagExitCodeNotEqualToZero(c *check.C) { + name := "flowers" + cli.Docker(cli.Args("run", "--name", name, "--rm", "busybox", "ls", "/notexists")).Assert(c, icmd.Expected{ + ExitCode: 1, + }) + + out := cli.DockerCmd(c, "ps", "-q", "-a").Combined() + if out != "" { + c.Fatal("Expected not to have containers", out) + } +} + +func (s *DockerSuite) TestRunContainerWithRmFlagCannotStartContainer(c *check.C) { + name := "sparkles" + cli.Docker(cli.Args("run", "--name", name, "--rm", "busybox", "commandNotFound")).Assert(c, icmd.Expected{ + ExitCode: 127, + }) + out := cli.DockerCmd(c, "ps", "-q", "-a").Combined() + if out != "" { + c.Fatal("Expected not to have containers", out) + } +} + +func (s *DockerSuite) TestRunPIDHostWithChildIsKillable(c *check.C) { + // Not applicable on Windows as uses Unix specific functionality + testRequires(c, DaemonIsLinux, NotUserNamespace) + name := "ibuildthecloud" + dockerCmd(c, "run", "-d", "--pid=host", "--name", name, "busybox", "sh", "-c", "sleep 30; echo hi") + + c.Assert(waitRun(name), check.IsNil) + + errchan := make(chan error) + go func() { + if out, _, err := dockerCmdWithError("kill", name); err != nil { + errchan <- fmt.Errorf("%v:\n%s", err, out) + } + close(errchan) + }() + select { + case err := <-errchan: + c.Assert(err, check.IsNil) + case <-time.After(5 * time.Second): + c.Fatal("Kill container timed out") + } +} + +func (s *DockerSuite) TestRunWithTooSmallMemoryLimit(c *check.C) { + // TODO Windows. This may be possible to enable once Windows supports + // memory limits on containers + testRequires(c, DaemonIsLinux) + // this memory limit is 1 byte less than the min, which is 4MB + // https://github.com/docker/docker/blob/v1.5.0/daemon/create.go#L22 + out, _, err := dockerCmdWithError("run", "-m", "4194303", "busybox") + if err == nil || !strings.Contains(out, "Minimum memory limit allowed is 4MB") { + c.Fatalf("expected run to fail when using too low a memory limit: %q", out) + } +} + +func (s *DockerSuite) TestRunWriteToProcAsound(c *check.C) { + // Not applicable on Windows as uses Unix specific functionality + testRequires(c, DaemonIsLinux) + _, code, err := dockerCmdWithError("run", "busybox", "sh", "-c", "echo 111 >> /proc/asound/version") + if err == nil || code == 0 { + c.Fatal("standard container should not be able to write to /proc/asound") + } +} + +func (s *DockerSuite) TestRunReadProcTimer(c *check.C) { + // Not applicable on Windows as uses Unix specific functionality + testRequires(c, DaemonIsLinux) + out, code, err := dockerCmdWithError("run", "busybox", "cat", "/proc/timer_stats") + if code != 0 { + return + } + if err != nil { + c.Fatal(err) + } + if strings.Trim(out, "\n ") != "" { + c.Fatalf("expected to receive no output from /proc/timer_stats but received %q", out) + } +} + +func (s *DockerSuite) TestRunReadProcLatency(c *check.C) { + // Not applicable on Windows as uses Unix specific functionality + testRequires(c, DaemonIsLinux) + // some kernels don't have this configured so skip the test if this file is not found + // on the host running the tests. + if _, err := os.Stat("/proc/latency_stats"); err != nil { + c.Skip("kernel doesn't have latency_stats configured") + return + } + out, code, err := dockerCmdWithError("run", "busybox", "cat", "/proc/latency_stats") + if code != 0 { + return + } + if err != nil { + c.Fatal(err) + } + if strings.Trim(out, "\n ") != "" { + c.Fatalf("expected to receive no output from /proc/latency_stats but received %q", out) + } +} + +func (s *DockerSuite) TestRunReadFilteredProc(c *check.C) { + // Not applicable on Windows as uses Unix specific functionality + testRequires(c, Apparmor, DaemonIsLinux, NotUserNamespace) + + testReadPaths := []string{ + "/proc/latency_stats", + "/proc/timer_stats", + "/proc/kcore", + } + for i, filePath := range testReadPaths { + name := fmt.Sprintf("procsieve-%d", i) + shellCmd := fmt.Sprintf("exec 3<%s", filePath) + + out, exitCode, err := dockerCmdWithError("run", "--privileged", "--security-opt", "apparmor=docker-default", "--name", name, "busybox", "sh", "-c", shellCmd) + if exitCode != 0 { + return + } + if err != nil { + c.Fatalf("Open FD for read should have failed with permission denied, got: %s, %v", out, err) + } + } +} + +func (s *DockerSuite) TestMountIntoProc(c *check.C) { + // Not applicable on Windows as uses Unix specific functionality + testRequires(c, DaemonIsLinux) + _, code, err := dockerCmdWithError("run", "-v", "/proc//sys", "busybox", "true") + if err == nil || code == 0 { + c.Fatal("container should not be able to mount into /proc") + } +} + +func (s *DockerSuite) TestMountIntoSys(c *check.C) { + // Not applicable on Windows as uses Unix specific functionality + testRequires(c, DaemonIsLinux) + testRequires(c, NotUserNamespace) + dockerCmd(c, "run", "-v", "/sys/fs/cgroup", "busybox", "true") +} + +func (s *DockerSuite) TestRunUnshareProc(c *check.C) { + // Not applicable on Windows as uses Unix specific functionality + testRequires(c, Apparmor, DaemonIsLinux, NotUserNamespace) + + // In this test goroutines are used to run test cases in parallel to prevent the test from taking a long time to run. + errChan := make(chan error) + + go func() { + name := "acidburn" + out, _, err := dockerCmdWithError("run", "--name", name, "--security-opt", "seccomp=unconfined", "debian:jessie", "unshare", "-p", "-m", "-f", "-r", "--mount-proc=/proc", "mount") + if err == nil || + !(strings.Contains(strings.ToLower(out), "permission denied") || + strings.Contains(strings.ToLower(out), "operation not permitted")) { + errChan <- fmt.Errorf("unshare with --mount-proc should have failed with 'permission denied' or 'operation not permitted', got: %s, %v", out, err) + } else { + errChan <- nil + } + }() + + go func() { + name := "cereal" + out, _, err := dockerCmdWithError("run", "--name", name, "--security-opt", "seccomp=unconfined", "debian:jessie", "unshare", "-p", "-m", "-f", "-r", "mount", "-t", "proc", "none", "/proc") + if err == nil || + !(strings.Contains(strings.ToLower(out), "mount: cannot mount none") || + strings.Contains(strings.ToLower(out), "permission denied") || + strings.Contains(strings.ToLower(out), "operation not permitted")) { + errChan <- fmt.Errorf("unshare and mount of /proc should have failed with 'mount: cannot mount none' or 'permission denied', got: %s, %v", out, err) + } else { + errChan <- nil + } + }() + + /* Ensure still fails if running privileged with the default policy */ + go func() { + name := "crashoverride" + out, _, err := dockerCmdWithError("run", "--privileged", "--security-opt", "seccomp=unconfined", "--security-opt", "apparmor=docker-default", "--name", name, "debian:jessie", "unshare", "-p", "-m", "-f", "-r", "mount", "-t", "proc", "none", "/proc") + if err == nil || + !(strings.Contains(strings.ToLower(out), "mount: cannot mount none") || + strings.Contains(strings.ToLower(out), "permission denied") || + strings.Contains(strings.ToLower(out), "operation not permitted")) { + errChan <- fmt.Errorf("privileged unshare with apparmor should have failed with 'mount: cannot mount none' or 'permission denied', got: %s, %v", out, err) + } else { + errChan <- nil + } + }() + + var retErr error + for i := 0; i < 3; i++ { + err := <-errChan + if retErr == nil && err != nil { + retErr = err + } + } + if retErr != nil { + c.Fatal(retErr) + } +} + +func (s *DockerSuite) TestRunPublishPort(c *check.C) { + // TODO Windows: This may be possible once Windows moves to libnetwork and CNM + testRequires(c, DaemonIsLinux) + dockerCmd(c, "run", "-d", "--name", "test", "--expose", "8080", "busybox", "top") + out, _ := dockerCmd(c, "port", "test") + out = strings.Trim(out, "\r\n") + if out != "" { + c.Fatalf("run without --publish-all should not publish port, out should be nil, but got: %s", out) + } +} + +// Issue #10184. +func (s *DockerSuite) TestDevicePermissions(c *check.C) { + // Not applicable on Windows as uses Unix specific functionality + testRequires(c, DaemonIsLinux) + const permissions = "crw-rw-rw-" + out, status := dockerCmd(c, "run", "--device", "/dev/fuse:/dev/fuse:mrw", "busybox:latest", "ls", "-l", "/dev/fuse") + if status != 0 { + c.Fatalf("expected status 0, got %d", status) + } + if !strings.HasPrefix(out, permissions) { + c.Fatalf("output should begin with %q, got %q", permissions, out) + } +} + +func (s *DockerSuite) TestRunCapAddCHOWN(c *check.C) { + // Not applicable on Windows as uses Unix specific functionality + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "--cap-drop=ALL", "--cap-add=CHOWN", "busybox", "sh", "-c", "adduser -D -H newuser && chown newuser /home && echo ok") + + if actual := strings.Trim(out, "\r\n"); actual != "ok" { + c.Fatalf("expected output ok received %s", actual) + } +} + +// https://github.com/docker/docker/pull/14498 +func (s *DockerSuite) TestVolumeFromMixedRWOptions(c *check.C) { + prefix, slash := getPrefixAndSlashFromDaemonPlatform() + + dockerCmd(c, "run", "--name", "parent", "-v", prefix+"/test", "busybox", "true") + + dockerCmd(c, "run", "--volumes-from", "parent:ro", "--name", "test-volumes-1", "busybox", "true") + dockerCmd(c, "run", "--volumes-from", "parent:rw", "--name", "test-volumes-2", "busybox", "true") + + if testEnv.DaemonPlatform() != "windows" { + mRO, err := inspectMountPoint("test-volumes-1", prefix+slash+"test") + c.Assert(err, checker.IsNil, check.Commentf("failed to inspect mount point")) + if mRO.RW { + c.Fatalf("Expected RO volume was RW") + } + } + + mRW, err := inspectMountPoint("test-volumes-2", prefix+slash+"test") + c.Assert(err, checker.IsNil, check.Commentf("failed to inspect mount point")) + if !mRW.RW { + c.Fatalf("Expected RW volume was RO") + } +} + +func (s *DockerSuite) TestRunWriteFilteredProc(c *check.C) { + // Not applicable on Windows as uses Unix specific functionality + testRequires(c, Apparmor, DaemonIsLinux, NotUserNamespace) + + testWritePaths := []string{ + /* modprobe and core_pattern should both be denied by generic + * policy of denials for /proc/sys/kernel. These files have been + * picked to be checked as they are particularly sensitive to writes */ + "/proc/sys/kernel/modprobe", + "/proc/sys/kernel/core_pattern", + "/proc/sysrq-trigger", + "/proc/kcore", + } + for i, filePath := range testWritePaths { + name := fmt.Sprintf("writeprocsieve-%d", i) + + shellCmd := fmt.Sprintf("exec 3>%s", filePath) + out, code, err := dockerCmdWithError("run", "--privileged", "--security-opt", "apparmor=docker-default", "--name", name, "busybox", "sh", "-c", shellCmd) + if code != 0 { + return + } + if err != nil { + c.Fatalf("Open FD for write should have failed with permission denied, got: %s, %v", out, err) + } + } +} + +func (s *DockerSuite) TestRunNetworkFilesBindMount(c *check.C) { + // Not applicable on Windows as uses Unix specific functionality + testRequires(c, SameHostDaemon, DaemonIsLinux) + + expected := "test123" + + filename := createTmpFile(c, expected) + defer os.Remove(filename) + + nwfiles := []string{"/etc/resolv.conf", "/etc/hosts", "/etc/hostname"} + + for i := range nwfiles { + actual, _ := dockerCmd(c, "run", "-v", filename+":"+nwfiles[i], "busybox", "cat", nwfiles[i]) + if actual != expected { + c.Fatalf("expected %s be: %q, but was: %q", nwfiles[i], expected, actual) + } + } +} + +func (s *DockerSuite) TestRunNetworkFilesBindMountRO(c *check.C) { + // Not applicable on Windows as uses Unix specific functionality + testRequires(c, SameHostDaemon, DaemonIsLinux) + + filename := createTmpFile(c, "test123") + defer os.Remove(filename) + + nwfiles := []string{"/etc/resolv.conf", "/etc/hosts", "/etc/hostname"} + + for i := range nwfiles { + _, exitCode, err := dockerCmdWithError("run", "-v", filename+":"+nwfiles[i]+":ro", "busybox", "touch", nwfiles[i]) + if err == nil || exitCode == 0 { + c.Fatalf("run should fail because bind mount of %s is ro: exit code %d", nwfiles[i], exitCode) + } + } +} + +func (s *DockerSuite) TestRunNetworkFilesBindMountROFilesystem(c *check.C) { + // Not applicable on Windows as uses Unix specific functionality + testRequires(c, SameHostDaemon, DaemonIsLinux, UserNamespaceROMount) + + filename := createTmpFile(c, "test123") + defer os.Remove(filename) + + nwfiles := []string{"/etc/resolv.conf", "/etc/hosts", "/etc/hostname"} + + for i := range nwfiles { + _, exitCode := dockerCmd(c, "run", "-v", filename+":"+nwfiles[i], "--read-only", "busybox", "touch", nwfiles[i]) + if exitCode != 0 { + c.Fatalf("run should not fail because %s is mounted writable on read-only root filesystem: exit code %d", nwfiles[i], exitCode) + } + } + + for i := range nwfiles { + _, exitCode, err := dockerCmdWithError("run", "-v", filename+":"+nwfiles[i]+":ro", "--read-only", "busybox", "touch", nwfiles[i]) + if err == nil || exitCode == 0 { + c.Fatalf("run should fail because %s is mounted read-only on read-only root filesystem: exit code %d", nwfiles[i], exitCode) + } + } +} + +func (s *DockerTrustSuite) TestTrustedRun(c *check.C) { + // Windows does not support this functionality + testRequires(c, DaemonIsLinux) + repoName := s.setupTrustedImage(c, "trusted-run") + + // Try run + cli.Docker(cli.Args("run", repoName), trustedCmd).Assert(c, SuccessTagging) + cli.DockerCmd(c, "rmi", repoName) + + // Try untrusted run to ensure we pushed the tag to the registry + cli.Docker(cli.Args("run", "--disable-content-trust=true", repoName), trustedCmd).Assert(c, SuccessDownloadedOnStderr) +} + +func (s *DockerTrustSuite) TestUntrustedRun(c *check.C) { + // Windows does not support this functionality + testRequires(c, DaemonIsLinux) + repoName := fmt.Sprintf("%v/dockercliuntrusted/runtest:latest", privateRegistryURL) + // tag the image and upload it to the private registry + cli.DockerCmd(c, "tag", "busybox", repoName) + cli.DockerCmd(c, "push", repoName) + cli.DockerCmd(c, "rmi", repoName) + + // Try trusted run on untrusted tag + cli.Docker(cli.Args("run", repoName), trustedCmd).Assert(c, icmd.Expected{ + ExitCode: 125, + Err: "does not have trust data for", + }) +} + +func (s *DockerTrustSuite) TestTrustedRunFromBadTrustServer(c *check.C) { + // Windows does not support this functionality + testRequires(c, DaemonIsLinux) + repoName := fmt.Sprintf("%v/dockerclievilrun/trusted:latest", privateRegistryURL) + evilLocalConfigDir, err := ioutil.TempDir("", "evilrun-local-config-dir") + if err != nil { + c.Fatalf("Failed to create local temp dir") + } + + // tag the image and upload it to the private registry + cli.DockerCmd(c, "tag", "busybox", repoName) + + cli.Docker(cli.Args("push", repoName), trustedCmd).Assert(c, SuccessSigningAndPushing) + cli.DockerCmd(c, "rmi", repoName) + + // Try run + cli.Docker(cli.Args("run", repoName), trustedCmd).Assert(c, SuccessTagging) + cli.DockerCmd(c, "rmi", repoName) + + // Kill the notary server, start a new "evil" one. + s.not.Close() + s.not, err = newTestNotary(c) + if err != nil { + c.Fatalf("Restarting notary server failed.") + } + + // In order to make an evil server, lets re-init a client (with a different trust dir) and push new data. + // tag an image and upload it to the private registry + cli.DockerCmd(c, "--config", evilLocalConfigDir, "tag", "busybox", repoName) + + // Push up to the new server + cli.Docker(cli.Args("--config", evilLocalConfigDir, "push", repoName), trustedCmd).Assert(c, SuccessSigningAndPushing) + + // Now, try running with the original client from this new trust server. This should fail because the new root is invalid. + cli.Docker(cli.Args("run", repoName), trustedCmd).Assert(c, icmd.Expected{ + ExitCode: 125, + Err: "could not rotate trust to a new trusted root", + }) +} + +func (s *DockerSuite) TestPtraceContainerProcsFromHost(c *check.C) { + // Not applicable on Windows as uses Unix specific functionality + testRequires(c, DaemonIsLinux, SameHostDaemon) + + out, _ := dockerCmd(c, "run", "-d", "busybox", "top") + id := strings.TrimSpace(out) + c.Assert(waitRun(id), check.IsNil) + pid1 := inspectField(c, id, "State.Pid") + + _, err := os.Readlink(fmt.Sprintf("/proc/%s/ns/net", pid1)) + if err != nil { + c.Fatal(err) + } +} + +func (s *DockerSuite) TestAppArmorDeniesPtrace(c *check.C) { + // Not applicable on Windows as uses Unix specific functionality + testRequires(c, SameHostDaemon, Apparmor, DaemonIsLinux) + + // Run through 'sh' so we are NOT pid 1. Pid 1 may be able to trace + // itself, but pid>1 should not be able to trace pid1. + _, exitCode, _ := dockerCmdWithError("run", "busybox", "sh", "-c", "sh -c readlink /proc/1/ns/net") + if exitCode == 0 { + c.Fatal("ptrace was not successfully restricted by AppArmor") + } +} + +func (s *DockerSuite) TestAppArmorTraceSelf(c *check.C) { + // Not applicable on Windows as uses Unix specific functionality + testRequires(c, DaemonIsLinux, SameHostDaemon, Apparmor) + + _, exitCode, _ := dockerCmdWithError("run", "busybox", "readlink", "/proc/1/ns/net") + if exitCode != 0 { + c.Fatal("ptrace of self failed.") + } +} + +func (s *DockerSuite) TestAppArmorDeniesChmodProc(c *check.C) { + // Not applicable on Windows as uses Unix specific functionality + testRequires(c, SameHostDaemon, Apparmor, DaemonIsLinux, NotUserNamespace) + _, exitCode, _ := dockerCmdWithError("run", "busybox", "chmod", "744", "/proc/cpuinfo") + if exitCode == 0 { + // If our test failed, attempt to repair the host system... + _, exitCode, _ := dockerCmdWithError("run", "busybox", "chmod", "444", "/proc/cpuinfo") + if exitCode == 0 { + c.Fatal("AppArmor was unsuccessful in prohibiting chmod of /proc/* files.") + } + } +} + +func (s *DockerSuite) TestRunCapAddSYSTIME(c *check.C) { + // Not applicable on Windows as uses Unix specific functionality + testRequires(c, DaemonIsLinux) + + dockerCmd(c, "run", "--cap-drop=ALL", "--cap-add=SYS_TIME", "busybox", "sh", "-c", "grep ^CapEff /proc/self/status | sed 's/^CapEff:\t//' | grep ^0000000002000000$") +} + +// run create container failed should clean up the container +func (s *DockerSuite) TestRunCreateContainerFailedCleanUp(c *check.C) { + // TODO Windows. This may be possible to enable once link is supported + testRequires(c, DaemonIsLinux) + name := "unique_name" + _, _, err := dockerCmdWithError("run", "--name", name, "--link", "nothing:nothing", "busybox") + c.Assert(err, check.NotNil, check.Commentf("Expected docker run to fail!")) + + containerID, err := inspectFieldWithError(name, "Id") + c.Assert(err, checker.NotNil, check.Commentf("Expected not to have this container: %s!", containerID)) + c.Assert(containerID, check.Equals, "", check.Commentf("Expected not to have this container: %s!", containerID)) +} + +func (s *DockerSuite) TestRunNamedVolume(c *check.C) { + prefix, _ := getPrefixAndSlashFromDaemonPlatform() + testRequires(c, DaemonIsLinux) + dockerCmd(c, "run", "--name=test", "-v", "testing:"+prefix+"/foo", "busybox", "sh", "-c", "echo hello > "+prefix+"/foo/bar") + + out, _ := dockerCmd(c, "run", "--volumes-from", "test", "busybox", "sh", "-c", "cat "+prefix+"/foo/bar") + c.Assert(strings.TrimSpace(out), check.Equals, "hello") + + out, _ = dockerCmd(c, "run", "-v", "testing:"+prefix+"/foo", "busybox", "sh", "-c", "cat "+prefix+"/foo/bar") + c.Assert(strings.TrimSpace(out), check.Equals, "hello") +} + +func (s *DockerSuite) TestRunWithUlimits(c *check.C) { + // Not applicable on Windows as uses Unix specific functionality + testRequires(c, DaemonIsLinux) + + out, _ := dockerCmd(c, "run", "--name=testulimits", "--ulimit", "nofile=42", "busybox", "/bin/sh", "-c", "ulimit -n") + ul := strings.TrimSpace(out) + if ul != "42" { + c.Fatalf("expected `ulimit -n` to be 42, got %s", ul) + } +} + +func (s *DockerSuite) TestRunContainerWithCgroupParent(c *check.C) { + // Not applicable on Windows as uses Unix specific functionality + testRequires(c, DaemonIsLinux) + + // cgroup-parent relative path + testRunContainerWithCgroupParent(c, "test", "cgroup-test") + + // cgroup-parent absolute path + testRunContainerWithCgroupParent(c, "/cgroup-parent/test", "cgroup-test-absolute") +} + +func testRunContainerWithCgroupParent(c *check.C, cgroupParent, name string) { + out, _, err := dockerCmdWithError("run", "--cgroup-parent", cgroupParent, "--name", name, "busybox", "cat", "/proc/self/cgroup") + if err != nil { + c.Fatalf("unexpected failure when running container with --cgroup-parent option - %s\n%v", string(out), err) + } + cgroupPaths := testutil.ParseCgroupPaths(string(out)) + if len(cgroupPaths) == 0 { + c.Fatalf("unexpected output - %q", string(out)) + } + id := getIDByName(c, name) + expectedCgroup := path.Join(cgroupParent, id) + found := false + for _, path := range cgroupPaths { + if strings.HasSuffix(path, expectedCgroup) { + found = true + break + } + } + if !found { + c.Fatalf("unexpected cgroup paths. Expected at least one cgroup path to have suffix %q. Cgroup Paths: %v", expectedCgroup, cgroupPaths) + } +} + +// TestRunInvalidCgroupParent checks that a specially-crafted cgroup parent doesn't cause Docker to crash or start modifying /. +func (s *DockerSuite) TestRunInvalidCgroupParent(c *check.C) { + // Not applicable on Windows as uses Unix specific functionality + testRequires(c, DaemonIsLinux) + + testRunInvalidCgroupParent(c, "../../../../../../../../SHOULD_NOT_EXIST", "SHOULD_NOT_EXIST", "cgroup-invalid-test") + + testRunInvalidCgroupParent(c, "/../../../../../../../../SHOULD_NOT_EXIST", "/SHOULD_NOT_EXIST", "cgroup-absolute-invalid-test") +} + +func testRunInvalidCgroupParent(c *check.C, cgroupParent, cleanCgroupParent, name string) { + out, _, err := dockerCmdWithError("run", "--cgroup-parent", cgroupParent, "--name", name, "busybox", "cat", "/proc/self/cgroup") + if err != nil { + // XXX: This may include a daemon crash. + c.Fatalf("unexpected failure when running container with --cgroup-parent option - %s\n%v", string(out), err) + } + + // We expect "/SHOULD_NOT_EXIST" to not exist. If not, we have a security issue. + if _, err := os.Stat("/SHOULD_NOT_EXIST"); err == nil || !os.IsNotExist(err) { + c.Fatalf("SECURITY: --cgroup-parent with ../../ relative paths cause files to be created in the host (this is bad) !!") + } + + cgroupPaths := testutil.ParseCgroupPaths(string(out)) + if len(cgroupPaths) == 0 { + c.Fatalf("unexpected output - %q", string(out)) + } + id := getIDByName(c, name) + expectedCgroup := path.Join(cleanCgroupParent, id) + found := false + for _, path := range cgroupPaths { + if strings.HasSuffix(path, expectedCgroup) { + found = true + break + } + } + if !found { + c.Fatalf("unexpected cgroup paths. Expected at least one cgroup path to have suffix %q. Cgroup Paths: %v", expectedCgroup, cgroupPaths) + } +} + +func (s *DockerSuite) TestRunContainerWithCgroupMountRO(c *check.C) { + // Not applicable on Windows as uses Unix specific functionality + // --read-only + userns has remount issues + testRequires(c, DaemonIsLinux, NotUserNamespace) + + filename := "/sys/fs/cgroup/devices/test123" + out, _, err := dockerCmdWithError("run", "busybox", "touch", filename) + if err == nil { + c.Fatal("expected cgroup mount point to be read-only, touch file should fail") + } + expected := "Read-only file system" + if !strings.Contains(out, expected) { + c.Fatalf("expected output from failure to contain %s but contains %s", expected, out) + } +} + +func (s *DockerSuite) TestRunContainerNetworkModeToSelf(c *check.C) { + // Not applicable on Windows which does not support --net=container + testRequires(c, DaemonIsLinux) + out, _, err := dockerCmdWithError("run", "--name=me", "--net=container:me", "busybox", "true") + if err == nil || !strings.Contains(out, "cannot join own network") { + c.Fatalf("using container net mode to self should result in an error\nerr: %q\nout: %s", err, out) + } +} + +func (s *DockerSuite) TestRunContainerNetModeWithDNSMacHosts(c *check.C) { + // Not applicable on Windows which does not support --net=container + testRequires(c, DaemonIsLinux) + out, _, err := dockerCmdWithError("run", "-d", "--name", "parent", "busybox", "top") + if err != nil { + c.Fatalf("failed to run container: %v, output: %q", err, out) + } + + out, _, err = dockerCmdWithError("run", "--dns", "1.2.3.4", "--net=container:parent", "busybox") + if err == nil || !strings.Contains(out, runconfig.ErrConflictNetworkAndDNS.Error()) { + c.Fatalf("run --net=container with --dns should error out") + } + + out, _, err = dockerCmdWithError("run", "--mac-address", "92:d0:c6:0a:29:33", "--net=container:parent", "busybox") + if err == nil || !strings.Contains(out, runconfig.ErrConflictContainerNetworkAndMac.Error()) { + c.Fatalf("run --net=container with --mac-address should error out") + } + + out, _, err = dockerCmdWithError("run", "--add-host", "test:192.168.2.109", "--net=container:parent", "busybox") + if err == nil || !strings.Contains(out, runconfig.ErrConflictNetworkHosts.Error()) { + c.Fatalf("run --net=container with --add-host should error out") + } +} + +func (s *DockerSuite) TestRunContainerNetModeWithExposePort(c *check.C) { + // Not applicable on Windows which does not support --net=container + testRequires(c, DaemonIsLinux) + dockerCmd(c, "run", "-d", "--name", "parent", "busybox", "top") + + out, _, err := dockerCmdWithError("run", "-p", "5000:5000", "--net=container:parent", "busybox") + if err == nil || !strings.Contains(out, runconfig.ErrConflictNetworkPublishPorts.Error()) { + c.Fatalf("run --net=container with -p should error out") + } + + out, _, err = dockerCmdWithError("run", "-P", "--net=container:parent", "busybox") + if err == nil || !strings.Contains(out, runconfig.ErrConflictNetworkPublishPorts.Error()) { + c.Fatalf("run --net=container with -P should error out") + } + + out, _, err = dockerCmdWithError("run", "--expose", "5000", "--net=container:parent", "busybox") + if err == nil || !strings.Contains(out, runconfig.ErrConflictNetworkExposePorts.Error()) { + c.Fatalf("run --net=container with --expose should error out") + } +} + +func (s *DockerSuite) TestRunLinkToContainerNetMode(c *check.C) { + // Not applicable on Windows which does not support --net=container or --link + testRequires(c, DaemonIsLinux) + dockerCmd(c, "run", "--name", "test", "-d", "busybox", "top") + dockerCmd(c, "run", "--name", "parent", "-d", "--net=container:test", "busybox", "top") + dockerCmd(c, "run", "-d", "--link=parent:parent", "busybox", "top") + dockerCmd(c, "run", "--name", "child", "-d", "--net=container:parent", "busybox", "top") + dockerCmd(c, "run", "-d", "--link=child:child", "busybox", "top") +} + +func (s *DockerSuite) TestRunLoopbackOnlyExistsWhenNetworkingDisabled(c *check.C) { + // TODO Windows: This may be possible to convert. + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "--net=none", "busybox", "ip", "-o", "-4", "a", "show", "up") + + var ( + count = 0 + parts = strings.Split(out, "\n") + ) + + for _, l := range parts { + if l != "" { + count++ + } + } + + if count != 1 { + c.Fatalf("Wrong interface count in container %d", count) + } + + if !strings.HasPrefix(out, "1: lo") { + c.Fatalf("Wrong interface in test container: expected [1: lo], got %s", out) + } +} + +// Issue #4681 +func (s *DockerSuite) TestRunLoopbackWhenNetworkDisabled(c *check.C) { + if testEnv.DaemonPlatform() == "windows" { + dockerCmd(c, "run", "--net=none", testEnv.MinimalBaseImage(), "ping", "-n", "1", "127.0.0.1") + } else { + dockerCmd(c, "run", "--net=none", "busybox", "ping", "-c", "1", "127.0.0.1") + } +} + +func (s *DockerSuite) TestRunModeNetContainerHostname(c *check.C) { + // Windows does not support --net=container + testRequires(c, DaemonIsLinux, ExecSupport) + + dockerCmd(c, "run", "-i", "-d", "--name", "parent", "busybox", "top") + out, _ := dockerCmd(c, "exec", "parent", "cat", "/etc/hostname") + out1, _ := dockerCmd(c, "run", "--net=container:parent", "busybox", "cat", "/etc/hostname") + + if out1 != out { + c.Fatal("containers with shared net namespace should have same hostname") + } +} + +func (s *DockerSuite) TestRunNetworkNotInitializedNoneMode(c *check.C) { + // TODO Windows: Network settings are not currently propagated. This may + // be resolved in the future with the move to libnetwork and CNM. + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "-d", "--net=none", "busybox", "top") + id := strings.TrimSpace(out) + res := inspectField(c, id, "NetworkSettings.Networks.none.IPAddress") + if res != "" { + c.Fatalf("For 'none' mode network must not be initialized, but container got IP: %s", res) + } +} + +func (s *DockerSuite) TestTwoContainersInNetHost(c *check.C) { + // Not applicable as Windows does not support --net=host + testRequires(c, DaemonIsLinux, NotUserNamespace, NotUserNamespace) + dockerCmd(c, "run", "-d", "--net=host", "--name=first", "busybox", "top") + dockerCmd(c, "run", "-d", "--net=host", "--name=second", "busybox", "top") + dockerCmd(c, "stop", "first") + dockerCmd(c, "stop", "second") +} + +func (s *DockerSuite) TestContainersInUserDefinedNetwork(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace, NotArm) + dockerCmd(c, "network", "create", "-d", "bridge", "testnetwork") + dockerCmd(c, "run", "-d", "--net=testnetwork", "--name=first", "busybox", "top") + c.Assert(waitRun("first"), check.IsNil) + dockerCmd(c, "run", "-t", "--net=testnetwork", "--name=second", "busybox", "ping", "-c", "1", "first") +} + +func (s *DockerSuite) TestContainersInMultipleNetworks(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace, NotArm) + // Create 2 networks using bridge driver + dockerCmd(c, "network", "create", "-d", "bridge", "testnetwork1") + dockerCmd(c, "network", "create", "-d", "bridge", "testnetwork2") + // Run and connect containers to testnetwork1 + dockerCmd(c, "run", "-d", "--net=testnetwork1", "--name=first", "busybox", "top") + c.Assert(waitRun("first"), check.IsNil) + dockerCmd(c, "run", "-d", "--net=testnetwork1", "--name=second", "busybox", "top") + c.Assert(waitRun("second"), check.IsNil) + // Check connectivity between containers in testnetwork2 + dockerCmd(c, "exec", "first", "ping", "-c", "1", "second.testnetwork1") + // Connect containers to testnetwork2 + dockerCmd(c, "network", "connect", "testnetwork2", "first") + dockerCmd(c, "network", "connect", "testnetwork2", "second") + // Check connectivity between containers + dockerCmd(c, "exec", "second", "ping", "-c", "1", "first.testnetwork2") +} + +func (s *DockerSuite) TestContainersNetworkIsolation(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace, NotArm) + // Create 2 networks using bridge driver + dockerCmd(c, "network", "create", "-d", "bridge", "testnetwork1") + dockerCmd(c, "network", "create", "-d", "bridge", "testnetwork2") + // Run 1 container in testnetwork1 and another in testnetwork2 + dockerCmd(c, "run", "-d", "--net=testnetwork1", "--name=first", "busybox", "top") + c.Assert(waitRun("first"), check.IsNil) + dockerCmd(c, "run", "-d", "--net=testnetwork2", "--name=second", "busybox", "top") + c.Assert(waitRun("second"), check.IsNil) + + // Check Isolation between containers : ping must fail + _, _, err := dockerCmdWithError("exec", "first", "ping", "-c", "1", "second") + c.Assert(err, check.NotNil) + // Connect first container to testnetwork2 + dockerCmd(c, "network", "connect", "testnetwork2", "first") + // ping must succeed now + _, _, err = dockerCmdWithError("exec", "first", "ping", "-c", "1", "second") + c.Assert(err, check.IsNil) + + // Disconnect first container from testnetwork2 + dockerCmd(c, "network", "disconnect", "testnetwork2", "first") + // ping must fail again + _, _, err = dockerCmdWithError("exec", "first", "ping", "-c", "1", "second") + c.Assert(err, check.NotNil) +} + +func (s *DockerSuite) TestNetworkRmWithActiveContainers(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace) + // Create 2 networks using bridge driver + dockerCmd(c, "network", "create", "-d", "bridge", "testnetwork1") + // Run and connect containers to testnetwork1 + dockerCmd(c, "run", "-d", "--net=testnetwork1", "--name=first", "busybox", "top") + c.Assert(waitRun("first"), check.IsNil) + dockerCmd(c, "run", "-d", "--net=testnetwork1", "--name=second", "busybox", "top") + c.Assert(waitRun("second"), check.IsNil) + // Network delete with active containers must fail + _, _, err := dockerCmdWithError("network", "rm", "testnetwork1") + c.Assert(err, check.NotNil) + + dockerCmd(c, "stop", "first") + _, _, err = dockerCmdWithError("network", "rm", "testnetwork1") + c.Assert(err, check.NotNil) +} + +func (s *DockerSuite) TestContainerRestartInMultipleNetworks(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace, NotArm) + // Create 2 networks using bridge driver + dockerCmd(c, "network", "create", "-d", "bridge", "testnetwork1") + dockerCmd(c, "network", "create", "-d", "bridge", "testnetwork2") + + // Run and connect containers to testnetwork1 + dockerCmd(c, "run", "-d", "--net=testnetwork1", "--name=first", "busybox", "top") + c.Assert(waitRun("first"), check.IsNil) + dockerCmd(c, "run", "-d", "--net=testnetwork1", "--name=second", "busybox", "top") + c.Assert(waitRun("second"), check.IsNil) + // Check connectivity between containers in testnetwork2 + dockerCmd(c, "exec", "first", "ping", "-c", "1", "second.testnetwork1") + // Connect containers to testnetwork2 + dockerCmd(c, "network", "connect", "testnetwork2", "first") + dockerCmd(c, "network", "connect", "testnetwork2", "second") + // Check connectivity between containers + dockerCmd(c, "exec", "second", "ping", "-c", "1", "first.testnetwork2") + + // Stop second container and test ping failures on both networks + dockerCmd(c, "stop", "second") + _, _, err := dockerCmdWithError("exec", "first", "ping", "-c", "1", "second.testnetwork1") + c.Assert(err, check.NotNil) + _, _, err = dockerCmdWithError("exec", "first", "ping", "-c", "1", "second.testnetwork2") + c.Assert(err, check.NotNil) + + // Start second container and connectivity must be restored on both networks + dockerCmd(c, "start", "second") + dockerCmd(c, "exec", "first", "ping", "-c", "1", "second.testnetwork1") + dockerCmd(c, "exec", "second", "ping", "-c", "1", "first.testnetwork2") +} + +func (s *DockerSuite) TestContainerWithConflictingHostNetworks(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace) + // Run a container with --net=host + dockerCmd(c, "run", "-d", "--net=host", "--name=first", "busybox", "top") + c.Assert(waitRun("first"), check.IsNil) + + // Create a network using bridge driver + dockerCmd(c, "network", "create", "-d", "bridge", "testnetwork1") + + // Connecting to the user defined network must fail + _, _, err := dockerCmdWithError("network", "connect", "testnetwork1", "first") + c.Assert(err, check.NotNil) +} + +func (s *DockerSuite) TestContainerWithConflictingSharedNetwork(c *check.C) { + testRequires(c, DaemonIsLinux) + dockerCmd(c, "run", "-d", "--name=first", "busybox", "top") + c.Assert(waitRun("first"), check.IsNil) + // Run second container in first container's network namespace + dockerCmd(c, "run", "-d", "--net=container:first", "--name=second", "busybox", "top") + c.Assert(waitRun("second"), check.IsNil) + + // Create a network using bridge driver + dockerCmd(c, "network", "create", "-d", "bridge", "testnetwork1") + + // Connecting to the user defined network must fail + out, _, err := dockerCmdWithError("network", "connect", "testnetwork1", "second") + c.Assert(err, check.NotNil) + c.Assert(out, checker.Contains, runconfig.ErrConflictSharedNetwork.Error()) +} + +func (s *DockerSuite) TestContainerWithConflictingNoneNetwork(c *check.C) { + testRequires(c, DaemonIsLinux) + dockerCmd(c, "run", "-d", "--net=none", "--name=first", "busybox", "top") + c.Assert(waitRun("first"), check.IsNil) + + // Create a network using bridge driver + dockerCmd(c, "network", "create", "-d", "bridge", "testnetwork1") + + // Connecting to the user defined network must fail + out, _, err := dockerCmdWithError("network", "connect", "testnetwork1", "first") + c.Assert(err, check.NotNil) + c.Assert(out, checker.Contains, runconfig.ErrConflictNoNetwork.Error()) + + // create a container connected to testnetwork1 + dockerCmd(c, "run", "-d", "--net=testnetwork1", "--name=second", "busybox", "top") + c.Assert(waitRun("second"), check.IsNil) + + // Connect second container to none network. it must fail as well + _, _, err = dockerCmdWithError("network", "connect", "none", "second") + c.Assert(err, check.NotNil) +} + +// #11957 - stdin with no tty does not exit if stdin is not closed even though container exited +func (s *DockerSuite) TestRunStdinBlockedAfterContainerExit(c *check.C) { + cmd := exec.Command(dockerBinary, "run", "-i", "--name=test", "busybox", "true") + in, err := cmd.StdinPipe() + c.Assert(err, check.IsNil) + defer in.Close() + stdout := bytes.NewBuffer(nil) + cmd.Stdout = stdout + cmd.Stderr = stdout + c.Assert(cmd.Start(), check.IsNil) + + waitChan := make(chan error) + go func() { + waitChan <- cmd.Wait() + }() + + select { + case err := <-waitChan: + c.Assert(err, check.IsNil, check.Commentf(stdout.String())) + case <-time.After(30 * time.Second): + c.Fatal("timeout waiting for command to exit") + } +} + +func (s *DockerSuite) TestRunWrongCpusetCpusFlagValue(c *check.C) { + // TODO Windows: This needs validation (error out) in the daemon. + testRequires(c, DaemonIsLinux) + out, exitCode, err := dockerCmdWithError("run", "--cpuset-cpus", "1-10,11--", "busybox", "true") + c.Assert(err, check.NotNil) + expected := "Error response from daemon: Invalid value 1-10,11-- for cpuset cpus.\n" + if !(strings.Contains(out, expected) || exitCode == 125) { + c.Fatalf("Expected output to contain %q with exitCode 125, got out: %q exitCode: %v", expected, out, exitCode) + } +} + +func (s *DockerSuite) TestRunWrongCpusetMemsFlagValue(c *check.C) { + // TODO Windows: This needs validation (error out) in the daemon. + testRequires(c, DaemonIsLinux) + out, exitCode, err := dockerCmdWithError("run", "--cpuset-mems", "1-42--", "busybox", "true") + c.Assert(err, check.NotNil) + expected := "Error response from daemon: Invalid value 1-42-- for cpuset mems.\n" + if !(strings.Contains(out, expected) || exitCode == 125) { + c.Fatalf("Expected output to contain %q with exitCode 125, got out: %q exitCode: %v", expected, out, exitCode) + } +} + +// TestRunNonExecutableCmd checks that 'docker run busybox foo' exits with error code 127' +func (s *DockerSuite) TestRunNonExecutableCmd(c *check.C) { + name := "testNonExecutableCmd" + icmd.RunCommand(dockerBinary, "run", "--name", name, "busybox", "foo").Assert(c, icmd.Expected{ + ExitCode: 127, + Error: "exit status 127", + }) +} + +// TestRunNonExistingCmd checks that 'docker run busybox /bin/foo' exits with code 127. +func (s *DockerSuite) TestRunNonExistingCmd(c *check.C) { + name := "testNonExistingCmd" + icmd.RunCommand(dockerBinary, "run", "--name", name, "busybox", "/bin/foo").Assert(c, icmd.Expected{ + ExitCode: 127, + Error: "exit status 127", + }) +} + +// TestCmdCannotBeInvoked checks that 'docker run busybox /etc' exits with 126, or +// 127 on Windows. The difference is that in Windows, the container must be started +// as that's when the check is made (and yes, by its design...) +func (s *DockerSuite) TestCmdCannotBeInvoked(c *check.C) { + expected := 126 + if testEnv.DaemonPlatform() == "windows" { + expected = 127 + } + name := "testCmdCannotBeInvoked" + icmd.RunCommand(dockerBinary, "run", "--name", name, "busybox", "/etc").Assert(c, icmd.Expected{ + ExitCode: expected, + Error: fmt.Sprintf("exit status %d", expected), + }) +} + +// TestRunNonExistingImage checks that 'docker run foo' exits with error msg 125 and contains 'Unable to find image' +// FIXME(vdemeester) should be a unit test +func (s *DockerSuite) TestRunNonExistingImage(c *check.C) { + icmd.RunCommand(dockerBinary, "run", "foo").Assert(c, icmd.Expected{ + ExitCode: 125, + Err: "Unable to find image", + }) +} + +// TestDockerFails checks that 'docker run -foo busybox' exits with 125 to signal docker run failed +// FIXME(vdemeester) should be a unit test +func (s *DockerSuite) TestDockerFails(c *check.C) { + icmd.RunCommand(dockerBinary, "run", "-foo", "busybox").Assert(c, icmd.Expected{ + ExitCode: 125, + Error: "exit status 125", + }) +} + +// TestRunInvalidReference invokes docker run with a bad reference. +func (s *DockerSuite) TestRunInvalidReference(c *check.C) { + out, exit, _ := dockerCmdWithError("run", "busybox@foo") + if exit == 0 { + c.Fatalf("expected non-zero exist code; received %d", exit) + } + + if !strings.Contains(out, "invalid reference format") { + c.Fatalf(`Expected "invalid reference format" in output; got: %s`, out) + } +} + +// Test fix for issue #17854 +func (s *DockerSuite) TestRunInitLayerPathOwnership(c *check.C) { + // Not applicable on Windows as it does not support Linux uid/gid ownership + testRequires(c, DaemonIsLinux) + name := "testetcfileownership" + buildImageSuccessfully(c, name, build.WithDockerfile(`FROM busybox + RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd + RUN echo 'dockerio:x:1001:' >> /etc/group + RUN chown dockerio:dockerio /etc`)) + + // Test that dockerio ownership of /etc is retained at runtime + out, _ := dockerCmd(c, "run", "--rm", name, "stat", "-c", "%U:%G", "/etc") + out = strings.TrimSpace(out) + if out != "dockerio:dockerio" { + c.Fatalf("Wrong /etc ownership: expected dockerio:dockerio, got %q", out) + } +} + +func (s *DockerSuite) TestRunWithOomScoreAdj(c *check.C) { + testRequires(c, DaemonIsLinux) + + expected := "642" + out, _ := dockerCmd(c, "run", "--oom-score-adj", expected, "busybox", "cat", "/proc/self/oom_score_adj") + oomScoreAdj := strings.TrimSpace(out) + if oomScoreAdj != "642" { + c.Fatalf("Expected oom_score_adj set to %q, got %q instead", expected, oomScoreAdj) + } +} + +func (s *DockerSuite) TestRunWithOomScoreAdjInvalidRange(c *check.C) { + testRequires(c, DaemonIsLinux) + + out, _, err := dockerCmdWithError("run", "--oom-score-adj", "1001", "busybox", "true") + c.Assert(err, check.NotNil) + expected := "Invalid value 1001, range for oom score adj is [-1000, 1000]." + if !strings.Contains(out, expected) { + c.Fatalf("Expected output to contain %q, got %q instead", expected, out) + } + out, _, err = dockerCmdWithError("run", "--oom-score-adj", "-1001", "busybox", "true") + c.Assert(err, check.NotNil) + expected = "Invalid value -1001, range for oom score adj is [-1000, 1000]." + if !strings.Contains(out, expected) { + c.Fatalf("Expected output to contain %q, got %q instead", expected, out) + } +} + +func (s *DockerSuite) TestRunVolumesMountedAsShared(c *check.C) { + // Volume propagation is linux only. Also it creates directories for + // bind mounting, so needs to be same host. + testRequires(c, DaemonIsLinux, SameHostDaemon, NotUserNamespace) + + // Prepare a source directory to bind mount + tmpDir, err := ioutil.TempDir("", "volume-source") + if err != nil { + c.Fatal(err) + } + defer os.RemoveAll(tmpDir) + + if err := os.Mkdir(path.Join(tmpDir, "mnt1"), 0755); err != nil { + c.Fatal(err) + } + + // Convert this directory into a shared mount point so that we do + // not rely on propagation properties of parent mount. + icmd.RunCommand("mount", "--bind", tmpDir, tmpDir).Assert(c, icmd.Success) + icmd.RunCommand("mount", "--make-private", "--make-shared", tmpDir).Assert(c, icmd.Success) + + dockerCmd(c, "run", "--privileged", "-v", fmt.Sprintf("%s:/volume-dest:shared", tmpDir), "busybox", "mount", "--bind", "/volume-dest/mnt1", "/volume-dest/mnt1") + + // Make sure a bind mount under a shared volume propagated to host. + if mounted, _ := mount.Mounted(path.Join(tmpDir, "mnt1")); !mounted { + c.Fatalf("Bind mount under shared volume did not propagate to host") + } + + mount.Unmount(path.Join(tmpDir, "mnt1")) +} + +func (s *DockerSuite) TestRunVolumesMountedAsSlave(c *check.C) { + // Volume propagation is linux only. Also it creates directories for + // bind mounting, so needs to be same host. + testRequires(c, DaemonIsLinux, SameHostDaemon, NotUserNamespace) + + // Prepare a source directory to bind mount + tmpDir, err := ioutil.TempDir("", "volume-source") + if err != nil { + c.Fatal(err) + } + defer os.RemoveAll(tmpDir) + + if err := os.Mkdir(path.Join(tmpDir, "mnt1"), 0755); err != nil { + c.Fatal(err) + } + + // Prepare a source directory with file in it. We will bind mount this + // directory and see if file shows up. + tmpDir2, err := ioutil.TempDir("", "volume-source2") + if err != nil { + c.Fatal(err) + } + defer os.RemoveAll(tmpDir2) + + if err := ioutil.WriteFile(path.Join(tmpDir2, "slave-testfile"), []byte("Test"), 0644); err != nil { + c.Fatal(err) + } + + // Convert this directory into a shared mount point so that we do + // not rely on propagation properties of parent mount. + icmd.RunCommand("mount", "--bind", tmpDir, tmpDir).Assert(c, icmd.Success) + icmd.RunCommand("mount", "--make-private", "--make-shared", tmpDir).Assert(c, icmd.Success) + + dockerCmd(c, "run", "-i", "-d", "--name", "parent", "-v", fmt.Sprintf("%s:/volume-dest:slave", tmpDir), "busybox", "top") + + // Bind mount tmpDir2/ onto tmpDir/mnt1. If mount propagates inside + // container then contents of tmpDir2/slave-testfile should become + // visible at "/volume-dest/mnt1/slave-testfile" + icmd.RunCommand("mount", "--bind", tmpDir2, path.Join(tmpDir, "mnt1")).Assert(c, icmd.Success) + + out, _ := dockerCmd(c, "exec", "parent", "cat", "/volume-dest/mnt1/slave-testfile") + + mount.Unmount(path.Join(tmpDir, "mnt1")) + + if out != "Test" { + c.Fatalf("Bind mount under slave volume did not propagate to container") + } +} + +func (s *DockerSuite) TestRunNamedVolumesMountedAsShared(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace) + out, exitCode, _ := dockerCmdWithError("run", "-v", "foo:/test:shared", "busybox", "touch", "/test/somefile") + c.Assert(exitCode, checker.Not(checker.Equals), 0) + c.Assert(out, checker.Contains, "invalid mount config") +} + +func (s *DockerSuite) TestRunNamedVolumeCopyImageData(c *check.C) { + testRequires(c, DaemonIsLinux) + + testImg := "testvolumecopy" + buildImageSuccessfully(c, testImg, build.WithDockerfile(` + FROM busybox + RUN mkdir -p /foo && echo hello > /foo/hello + `)) + + dockerCmd(c, "run", "-v", "foo:/foo", testImg) + out, _ := dockerCmd(c, "run", "-v", "foo:/foo", "busybox", "cat", "/foo/hello") + c.Assert(strings.TrimSpace(out), check.Equals, "hello") +} + +func (s *DockerSuite) TestRunNamedVolumeNotRemoved(c *check.C) { + prefix, _ := getPrefixAndSlashFromDaemonPlatform() + + dockerCmd(c, "volume", "create", "test") + + dockerCmd(c, "run", "--rm", "-v", "test:"+prefix+"/foo", "-v", prefix+"/bar", "busybox", "true") + dockerCmd(c, "volume", "inspect", "test") + out, _ := dockerCmd(c, "volume", "ls", "-q") + c.Assert(strings.TrimSpace(out), checker.Equals, "test") + + dockerCmd(c, "run", "--name=test", "-v", "test:"+prefix+"/foo", "-v", prefix+"/bar", "busybox", "true") + dockerCmd(c, "rm", "-fv", "test") + dockerCmd(c, "volume", "inspect", "test") + out, _ = dockerCmd(c, "volume", "ls", "-q") + c.Assert(strings.TrimSpace(out), checker.Equals, "test") +} + +func (s *DockerSuite) TestRunNamedVolumesFromNotRemoved(c *check.C) { + prefix, _ := getPrefixAndSlashFromDaemonPlatform() + + dockerCmd(c, "volume", "create", "test") + dockerCmd(c, "run", "--name=parent", "-v", "test:"+prefix+"/foo", "-v", prefix+"/bar", "busybox", "true") + dockerCmd(c, "run", "--name=child", "--volumes-from=parent", "busybox", "true") + + // Remove the parent so there are not other references to the volumes + dockerCmd(c, "rm", "-f", "parent") + // now remove the child and ensure the named volume (and only the named volume) still exists + dockerCmd(c, "rm", "-fv", "child") + dockerCmd(c, "volume", "inspect", "test") + out, _ := dockerCmd(c, "volume", "ls", "-q") + c.Assert(strings.TrimSpace(out), checker.Equals, "test") +} + +func (s *DockerSuite) TestRunAttachFailedNoLeak(c *check.C) { + nroutines, err := getGoroutineNumber() + c.Assert(err, checker.IsNil) + + runSleepingContainer(c, "--name=test", "-p", "8000:8000") + + // Wait until container is fully up and running + c.Assert(waitRun("test"), check.IsNil) + + out, _, err := dockerCmdWithError("run", "--name=fail", "-p", "8000:8000", "busybox", "true") + // We will need the following `inspect` to diagnose the issue if test fails (#21247) + out1, err1 := dockerCmd(c, "inspect", "--format", "{{json .State}}", "test") + out2, err2 := dockerCmd(c, "inspect", "--format", "{{json .State}}", "fail") + c.Assert(err, checker.NotNil, check.Commentf("Command should have failed but succeeded with: %s\nContainer 'test' [%+v]: %s\nContainer 'fail' [%+v]: %s", out, err1, out1, err2, out2)) + // check for windows error as well + // TODO Windows Post TP5. Fix the error message string + c.Assert(strings.Contains(string(out), "port is already allocated") || + strings.Contains(string(out), "were not connected because a duplicate name exists") || + strings.Contains(string(out), "HNS failed with error : Failed to create endpoint") || + strings.Contains(string(out), "HNS failed with error : The object already exists"), checker.Equals, true, check.Commentf("Output: %s", out)) + dockerCmd(c, "rm", "-f", "test") + + // NGoroutines is not updated right away, so we need to wait before failing + c.Assert(waitForGoroutines(nroutines), checker.IsNil) +} + +// Test for one character directory name case (#20122) +func (s *DockerSuite) TestRunVolumeWithOneCharacter(c *check.C) { + testRequires(c, DaemonIsLinux) + + out, _ := dockerCmd(c, "run", "-v", "/tmp/q:/foo", "busybox", "sh", "-c", "find /foo") + c.Assert(strings.TrimSpace(out), checker.Equals, "/foo") +} + +func (s *DockerSuite) TestRunVolumeCopyFlag(c *check.C) { + testRequires(c, DaemonIsLinux) // Windows does not support copying data from image to the volume + buildImageSuccessfully(c, "volumecopy", build.WithDockerfile(`FROM busybox + RUN mkdir /foo && echo hello > /foo/bar + CMD cat /foo/bar`)) + dockerCmd(c, "volume", "create", "test") + + // test with the nocopy flag + out, _, err := dockerCmdWithError("run", "-v", "test:/foo:nocopy", "volumecopy") + c.Assert(err, checker.NotNil, check.Commentf(out)) + // test default behavior which is to copy for non-binds + out, _ = dockerCmd(c, "run", "-v", "test:/foo", "volumecopy") + c.Assert(strings.TrimSpace(out), checker.Equals, "hello") + // error out when the volume is already populated + out, _, err = dockerCmdWithError("run", "-v", "test:/foo:copy", "volumecopy") + c.Assert(err, checker.NotNil, check.Commentf(out)) + // do not error out when copy isn't explicitly set even though it's already populated + out, _ = dockerCmd(c, "run", "-v", "test:/foo", "volumecopy") + c.Assert(strings.TrimSpace(out), checker.Equals, "hello") + + // do not allow copy modes on volumes-from + dockerCmd(c, "run", "--name=test", "-v", "/foo", "busybox", "true") + out, _, err = dockerCmdWithError("run", "--volumes-from=test:copy", "busybox", "true") + c.Assert(err, checker.NotNil, check.Commentf(out)) + out, _, err = dockerCmdWithError("run", "--volumes-from=test:nocopy", "busybox", "true") + c.Assert(err, checker.NotNil, check.Commentf(out)) + + // do not allow copy modes on binds + out, _, err = dockerCmdWithError("run", "-v", "/foo:/bar:copy", "busybox", "true") + c.Assert(err, checker.NotNil, check.Commentf(out)) + out, _, err = dockerCmdWithError("run", "-v", "/foo:/bar:nocopy", "busybox", "true") + c.Assert(err, checker.NotNil, check.Commentf(out)) +} + +// Test case for #21976 +func (s *DockerSuite) TestRunDNSInHostMode(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace) + + expectedOutput := "nameserver 127.0.0.1" + expectedWarning := "Localhost DNS setting" + cli.DockerCmd(c, "run", "--dns=127.0.0.1", "--net=host", "busybox", "cat", "/etc/resolv.conf").Assert(c, icmd.Expected{ + Out: expectedOutput, + Err: expectedWarning, + }) + + expectedOutput = "nameserver 1.2.3.4" + cli.DockerCmd(c, "run", "--dns=1.2.3.4", "--net=host", "busybox", "cat", "/etc/resolv.conf").Assert(c, icmd.Expected{ + Out: expectedOutput, + }) + + expectedOutput = "search example.com" + cli.DockerCmd(c, "run", "--dns-search=example.com", "--net=host", "busybox", "cat", "/etc/resolv.conf").Assert(c, icmd.Expected{ + Out: expectedOutput, + }) + + expectedOutput = "options timeout:3" + cli.DockerCmd(c, "run", "--dns-opt=timeout:3", "--net=host", "busybox", "cat", "/etc/resolv.conf").Assert(c, icmd.Expected{ + Out: expectedOutput, + }) + + expectedOutput1 := "nameserver 1.2.3.4" + expectedOutput2 := "search example.com" + expectedOutput3 := "options timeout:3" + out := cli.DockerCmd(c, "run", "--dns=1.2.3.4", "--dns-search=example.com", "--dns-opt=timeout:3", "--net=host", "busybox", "cat", "/etc/resolv.conf").Combined() + c.Assert(out, checker.Contains, expectedOutput1, check.Commentf("Expected '%s', but got %q", expectedOutput1, out)) + c.Assert(out, checker.Contains, expectedOutput2, check.Commentf("Expected '%s', but got %q", expectedOutput2, out)) + c.Assert(out, checker.Contains, expectedOutput3, check.Commentf("Expected '%s', but got %q", expectedOutput3, out)) +} + +// Test case for #21976 +func (s *DockerSuite) TestRunAddHostInHostMode(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace) + + expectedOutput := "1.2.3.4\textra" + out, _ := dockerCmd(c, "run", "--add-host=extra:1.2.3.4", "--net=host", "busybox", "cat", "/etc/hosts") + c.Assert(out, checker.Contains, expectedOutput, check.Commentf("Expected '%s', but got %q", expectedOutput, out)) +} + +func (s *DockerSuite) TestRunRmAndWait(c *check.C) { + dockerCmd(c, "run", "--name=test", "--rm", "-d", "busybox", "sh", "-c", "sleep 3;exit 2") + + out, code, err := dockerCmdWithError("wait", "test") + c.Assert(err, checker.IsNil, check.Commentf("out: %s; exit code: %d", out, code)) + c.Assert(out, checker.Equals, "2\n", check.Commentf("exit code: %d", code)) + c.Assert(code, checker.Equals, 0) +} + +// Test that auto-remove is performed by the daemon (API 1.25 and above) +func (s *DockerSuite) TestRunRm(c *check.C) { + name := "miss-me-when-im-gone" + cli.DockerCmd(c, "run", "--name="+name, "--rm", "busybox") + + cli.Docker(cli.Inspect(name), cli.Format(".name")).Assert(c, icmd.Expected{ + ExitCode: 1, + Err: "No such object: " + name, + }) +} + +// Test that auto-remove is performed by the client on API versions that do not support daemon-side api-remove (API < 1.25) +func (s *DockerSuite) TestRunRmPre125Api(c *check.C) { + name := "miss-me-when-im-gone" + envs := appendBaseEnv(false, "DOCKER_API_VERSION=1.24") + cli.Docker(cli.Args("run", "--name="+name, "--rm", "busybox"), cli.WithEnvironmentVariables(envs...)).Assert(c, icmd.Success) + + cli.Docker(cli.Inspect(name), cli.Format(".name")).Assert(c, icmd.Expected{ + ExitCode: 1, + Err: "No such object: " + name, + }) +} + +// Test case for #23498 +func (s *DockerSuite) TestRunUnsetEntrypoint(c *check.C) { + testRequires(c, DaemonIsLinux) + name := "test-entrypoint" + dockerfile := `FROM busybox +ADD entrypoint.sh /entrypoint.sh +RUN chmod 755 /entrypoint.sh +ENTRYPOINT ["/entrypoint.sh"] +CMD echo foobar` + + ctx := fakecontext.New(c, "", + fakecontext.WithDockerfile(dockerfile), + fakecontext.WithFiles(map[string]string{ + "entrypoint.sh": `#!/bin/sh +echo "I am an entrypoint" +exec "$@"`, + })) + defer ctx.Close() + + cli.BuildCmd(c, name, build.WithExternalBuildContext(ctx)) + + out := cli.DockerCmd(c, "run", "--entrypoint=", "-t", name, "echo", "foo").Combined() + c.Assert(strings.TrimSpace(out), check.Equals, "foo") + + // CMD will be reset as well (the same as setting a custom entrypoint) + cli.Docker(cli.Args("run", "--entrypoint=", "-t", name)).Assert(c, icmd.Expected{ + ExitCode: 125, + Err: "No command specified", + }) +} + +func (s *DockerDaemonSuite) TestRunWithUlimitAndDaemonDefault(c *check.C) { + s.d.StartWithBusybox(c, "--debug", "--default-ulimit=nofile=65535") + + name := "test-A" + _, err := s.d.Cmd("run", "--name", name, "-d", "busybox", "top") + c.Assert(err, checker.IsNil) + c.Assert(s.d.WaitRun(name), check.IsNil) + + out, err := s.d.Cmd("inspect", "--format", "{{.HostConfig.Ulimits}}", name) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, "[nofile=65535:65535]") + + name = "test-B" + _, err = s.d.Cmd("run", "--name", name, "--ulimit=nofile=42", "-d", "busybox", "top") + c.Assert(err, checker.IsNil) + c.Assert(s.d.WaitRun(name), check.IsNil) + + out, err = s.d.Cmd("inspect", "--format", "{{.HostConfig.Ulimits}}", name) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, "[nofile=42:42]") +} + +func (s *DockerSuite) TestRunStoppedLoggingDriverNoLeak(c *check.C) { + nroutines, err := getGoroutineNumber() + c.Assert(err, checker.IsNil) + + out, _, err := dockerCmdWithError("run", "--name=fail", "--log-driver=splunk", "busybox", "true") + c.Assert(err, checker.NotNil) + c.Assert(out, checker.Contains, "failed to initialize logging driver", check.Commentf("error should be about logging driver, got output %s", out)) + + // NGoroutines is not updated right away, so we need to wait before failing + c.Assert(waitForGoroutines(nroutines), checker.IsNil) +} + +// Handles error conditions for --credentialspec. Validating E2E success cases +// requires additional infrastructure (AD for example) on CI servers. +func (s *DockerSuite) TestRunCredentialSpecFailures(c *check.C) { + testRequires(c, DaemonIsWindows) + attempts := []struct{ value, expectedError string }{ + {"rubbish", "invalid credential spec security option - value must be prefixed file:// or registry://"}, + {"rubbish://", "invalid credential spec security option - value must be prefixed file:// or registry://"}, + {"file://", "no value supplied for file:// credential spec security option"}, + {"registry://", "no value supplied for registry:// credential spec security option"}, + {`file://c:\blah.txt`, "path cannot be absolute"}, + {`file://doesnotexist.txt`, "The system cannot find the file specified"}, + } + for _, attempt := range attempts { + _, _, err := dockerCmdWithError("run", "--security-opt=credentialspec="+attempt.value, "busybox", "true") + c.Assert(err, checker.NotNil, check.Commentf("%s expected non-nil err", attempt.value)) + c.Assert(err.Error(), checker.Contains, attempt.expectedError, check.Commentf("%s expected %s got %s", attempt.value, attempt.expectedError, err)) + } +} + +// Windows specific test to validate credential specs with a well-formed spec. +// Note it won't actually do anything in CI configuration with the spec, but +// it should not fail to run a container. +func (s *DockerSuite) TestRunCredentialSpecWellFormed(c *check.C) { + testRequires(c, DaemonIsWindows, SameHostDaemon) + validCS := readFile(`fixtures\credentialspecs\valid.json`, c) + writeFile(filepath.Join(testEnv.DockerBasePath(), `credentialspecs\valid.json`), validCS, c) + dockerCmd(c, "run", `--security-opt=credentialspec=file://valid.json`, "busybox", "true") +} + +// Windows specific test to ensure that a servicing app container is started +// if necessary once a container exits. It does this by forcing a no-op +// servicing event and verifying the event from Hyper-V-Compute +func (s *DockerSuite) TestRunServicingContainer(c *check.C) { + testRequires(c, DaemonIsWindows, SameHostDaemon) + + out := cli.DockerCmd(c, "run", "-d", testEnv.MinimalBaseImage(), "cmd", "/c", "mkdir c:\\programdata\\Microsoft\\Windows\\ContainerUpdates\\000_000_d99f45d0-ffc8-4af7-bd9c-ea6a62e035c9_200 && sc control cexecsvc 255").Combined() + containerID := strings.TrimSpace(out) + cli.WaitExited(c, containerID, 60*time.Second) + + result := icmd.RunCommand("powershell", "echo", `(Get-WinEvent -ProviderName "Microsoft-Windows-Hyper-V-Compute" -FilterXPath 'Event[System[EventID=2010]]' -MaxEvents 1).Message`) + result.Assert(c, icmd.Success) + out2 := result.Combined() + c.Assert(out2, checker.Contains, `"Servicing":true`, check.Commentf("Servicing container does not appear to have been started: %s", out2)) + c.Assert(out2, checker.Contains, `Windows Container (Servicing)`, check.Commentf("Didn't find 'Windows Container (Servicing): %s", out2)) + c.Assert(out2, checker.Contains, containerID+"_servicing", check.Commentf("Didn't find '%s_servicing': %s", containerID+"_servicing", out2)) +} + +func (s *DockerSuite) TestRunDuplicateMount(c *check.C) { + testRequires(c, SameHostDaemon, DaemonIsLinux, NotUserNamespace) + + tmpFile, err := ioutil.TempFile("", "touch-me") + c.Assert(err, checker.IsNil) + defer tmpFile.Close() + + data := "touch-me-foo-bar\n" + if _, err := tmpFile.Write([]byte(data)); err != nil { + c.Fatal(err) + } + + name := "test" + out, _ := dockerCmd(c, "run", "--name", name, "-v", "/tmp:/tmp", "-v", "/tmp:/tmp", "busybox", "sh", "-c", "cat "+tmpFile.Name()+" && ls /") + c.Assert(out, checker.Not(checker.Contains), "tmp:") + c.Assert(out, checker.Contains, data) + + out = inspectFieldJSON(c, name, "Config.Volumes") + c.Assert(out, checker.Contains, "null") +} + +func (s *DockerSuite) TestRunWindowsWithCPUCount(c *check.C) { + testRequires(c, DaemonIsWindows) + + out, _ := dockerCmd(c, "run", "--cpu-count=1", "--name", "test", "busybox", "echo", "testing") + c.Assert(strings.TrimSpace(out), checker.Equals, "testing") + + out = inspectField(c, "test", "HostConfig.CPUCount") + c.Assert(out, check.Equals, "1") +} + +func (s *DockerSuite) TestRunWindowsWithCPUShares(c *check.C) { + testRequires(c, DaemonIsWindows) + + out, _ := dockerCmd(c, "run", "--cpu-shares=1000", "--name", "test", "busybox", "echo", "testing") + c.Assert(strings.TrimSpace(out), checker.Equals, "testing") + + out = inspectField(c, "test", "HostConfig.CPUShares") + c.Assert(out, check.Equals, "1000") +} + +func (s *DockerSuite) TestRunWindowsWithCPUPercent(c *check.C) { + testRequires(c, DaemonIsWindows) + + out, _ := dockerCmd(c, "run", "--cpu-percent=80", "--name", "test", "busybox", "echo", "testing") + c.Assert(strings.TrimSpace(out), checker.Equals, "testing") + + out = inspectField(c, "test", "HostConfig.CPUPercent") + c.Assert(out, check.Equals, "80") +} + +func (s *DockerSuite) TestRunProcessIsolationWithCPUCountCPUSharesAndCPUPercent(c *check.C) { + testRequires(c, DaemonIsWindows, IsolationIsProcess) + + out, _ := dockerCmd(c, "run", "--cpu-count=1", "--cpu-shares=1000", "--cpu-percent=80", "--name", "test", "busybox", "echo", "testing") + c.Assert(strings.TrimSpace(out), checker.Contains, "WARNING: Conflicting options: CPU count takes priority over CPU shares on Windows Server Containers. CPU shares discarded") + c.Assert(strings.TrimSpace(out), checker.Contains, "WARNING: Conflicting options: CPU count takes priority over CPU percent on Windows Server Containers. CPU percent discarded") + c.Assert(strings.TrimSpace(out), checker.Contains, "testing") + + out = inspectField(c, "test", "HostConfig.CPUCount") + c.Assert(out, check.Equals, "1") + + out = inspectField(c, "test", "HostConfig.CPUShares") + c.Assert(out, check.Equals, "0") + + out = inspectField(c, "test", "HostConfig.CPUPercent") + c.Assert(out, check.Equals, "0") +} + +func (s *DockerSuite) TestRunHypervIsolationWithCPUCountCPUSharesAndCPUPercent(c *check.C) { + testRequires(c, DaemonIsWindows, IsolationIsHyperv) + + out, _ := dockerCmd(c, "run", "--cpu-count=1", "--cpu-shares=1000", "--cpu-percent=80", "--name", "test", "busybox", "echo", "testing") + c.Assert(strings.TrimSpace(out), checker.Contains, "testing") + + out = inspectField(c, "test", "HostConfig.CPUCount") + c.Assert(out, check.Equals, "1") + + out = inspectField(c, "test", "HostConfig.CPUShares") + c.Assert(out, check.Equals, "1000") + + out = inspectField(c, "test", "HostConfig.CPUPercent") + c.Assert(out, check.Equals, "80") +} + +// Test for #25099 +func (s *DockerSuite) TestRunEmptyEnv(c *check.C) { + testRequires(c, DaemonIsLinux) + + expectedOutput := "invalid environment variable:" + + out, _, err := dockerCmdWithError("run", "-e", "", "busybox", "true") + c.Assert(err, checker.NotNil) + c.Assert(out, checker.Contains, expectedOutput) + + out, _, err = dockerCmdWithError("run", "-e", "=", "busybox", "true") + c.Assert(err, checker.NotNil) + c.Assert(out, checker.Contains, expectedOutput) + + out, _, err = dockerCmdWithError("run", "-e", "=foo", "busybox", "true") + c.Assert(err, checker.NotNil) + c.Assert(out, checker.Contains, expectedOutput) +} + +// #28658 +func (s *DockerSuite) TestSlowStdinClosing(c *check.C) { + name := "testslowstdinclosing" + repeat := 3 // regression happened 50% of the time + for i := 0; i < repeat; i++ { + cmd := icmd.Cmd{ + Command: []string{dockerBinary, "run", "--rm", "--name", name, "-i", "busybox", "cat"}, + Stdin: &delayedReader{}, + } + done := make(chan error, 1) + go func() { + err := icmd.RunCmd(cmd).Error + done <- err + }() + + select { + case <-time.After(15 * time.Second): + c.Fatal("running container timed out") // cleanup in teardown + case err := <-done: + c.Assert(err, checker.IsNil) + } + } +} + +type delayedReader struct{} + +func (s *delayedReader) Read([]byte) (int, error) { + time.Sleep(500 * time.Millisecond) + return 0, io.EOF +} + +// #28823 (originally #28639) +func (s *DockerSuite) TestRunMountReadOnlyDevShm(c *check.C) { + testRequires(c, SameHostDaemon, DaemonIsLinux, NotUserNamespace) + emptyDir, err := ioutil.TempDir("", "test-read-only-dev-shm") + c.Assert(err, check.IsNil) + defer os.RemoveAll(emptyDir) + out, _, err := dockerCmdWithError("run", "--rm", "--read-only", + "-v", fmt.Sprintf("%s:/dev/shm:ro", emptyDir), + "busybox", "touch", "/dev/shm/foo") + c.Assert(err, checker.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "Read-only file system") +} + +func (s *DockerSuite) TestRunMount(c *check.C) { + testRequires(c, DaemonIsLinux, SameHostDaemon, NotUserNamespace) + + // mnt1, mnt2, and testCatFooBar are commonly used in multiple test cases + tmpDir, err := ioutil.TempDir("", "mount") + if err != nil { + c.Fatal(err) + } + defer os.RemoveAll(tmpDir) + mnt1, mnt2 := path.Join(tmpDir, "mnt1"), path.Join(tmpDir, "mnt2") + if err := os.Mkdir(mnt1, 0755); err != nil { + c.Fatal(err) + } + if err := os.Mkdir(mnt2, 0755); err != nil { + c.Fatal(err) + } + if err := ioutil.WriteFile(path.Join(mnt1, "test1"), []byte("test1"), 0644); err != nil { + c.Fatal(err) + } + if err := ioutil.WriteFile(path.Join(mnt2, "test2"), []byte("test2"), 0644); err != nil { + c.Fatal(err) + } + testCatFooBar := func(cName string) error { + out, _ := dockerCmd(c, "exec", cName, "cat", "/foo/test1") + if out != "test1" { + return fmt.Errorf("%s not mounted on /foo", mnt1) + } + out, _ = dockerCmd(c, "exec", cName, "cat", "/bar/test2") + if out != "test2" { + return fmt.Errorf("%s not mounted on /bar", mnt2) + } + return nil + } + + type testCase struct { + equivalents [][]string + valid bool + // fn should be nil if valid==false + fn func(cName string) error + } + cases := []testCase{ + { + equivalents: [][]string{ + { + "--mount", fmt.Sprintf("type=bind,src=%s,dst=/foo", mnt1), + "--mount", fmt.Sprintf("type=bind,src=%s,dst=/bar", mnt2), + }, + { + "--mount", fmt.Sprintf("type=bind,src=%s,dst=/foo", mnt1), + "--mount", fmt.Sprintf("type=bind,src=%s,target=/bar", mnt2), + }, + { + "--volume", mnt1 + ":/foo", + "--mount", fmt.Sprintf("type=bind,src=%s,target=/bar", mnt2), + }, + }, + valid: true, + fn: testCatFooBar, + }, + { + equivalents: [][]string{ + { + "--mount", fmt.Sprintf("type=volume,src=%s,dst=/foo", mnt1), + "--mount", fmt.Sprintf("type=volume,src=%s,dst=/bar", mnt2), + }, + { + "--mount", fmt.Sprintf("type=volume,src=%s,dst=/foo", mnt1), + "--mount", fmt.Sprintf("type=volume,src=%s,target=/bar", mnt2), + }, + }, + valid: false, + }, + { + equivalents: [][]string{ + { + "--mount", fmt.Sprintf("type=bind,src=%s,dst=/foo", mnt1), + "--mount", fmt.Sprintf("type=volume,src=%s,dst=/bar", mnt2), + }, + { + "--volume", mnt1 + ":/foo", + "--mount", fmt.Sprintf("type=volume,src=%s,target=/bar", mnt2), + }, + }, + valid: false, + fn: testCatFooBar, + }, + { + equivalents: [][]string{ + { + "--read-only", + "--mount", "type=volume,dst=/bar", + }, + }, + valid: true, + fn: func(cName string) error { + _, _, err := dockerCmdWithError("exec", cName, "touch", "/bar/icanwritehere") + return err + }, + }, + { + equivalents: [][]string{ + { + "--read-only", + "--mount", fmt.Sprintf("type=bind,src=%s,dst=/foo", mnt1), + "--mount", "type=volume,dst=/bar", + }, + { + "--read-only", + "--volume", fmt.Sprintf("%s:/foo", mnt1), + "--mount", "type=volume,dst=/bar", + }, + }, + valid: true, + fn: func(cName string) error { + out, _ := dockerCmd(c, "exec", cName, "cat", "/foo/test1") + if out != "test1" { + return fmt.Errorf("%s not mounted on /foo", mnt1) + } + _, _, err := dockerCmdWithError("exec", cName, "touch", "/bar/icanwritehere") + return err + }, + }, + { + equivalents: [][]string{ + { + "--mount", fmt.Sprintf("type=bind,src=%s,dst=/foo", mnt1), + "--mount", fmt.Sprintf("type=bind,src=%s,dst=/foo", mnt2), + }, + { + "--mount", fmt.Sprintf("type=bind,src=%s,dst=/foo", mnt1), + "--mount", fmt.Sprintf("type=bind,src=%s,target=/foo", mnt2), + }, + { + "--volume", fmt.Sprintf("%s:/foo", mnt1), + "--mount", fmt.Sprintf("type=bind,src=%s,target=/foo", mnt2), + }, + }, + valid: false, + }, + { + equivalents: [][]string{ + { + "--volume", fmt.Sprintf("%s:/foo", mnt1), + "--mount", fmt.Sprintf("type=volume,src=%s,target=/foo", mnt2), + }, + }, + valid: false, + }, + { + equivalents: [][]string{ + { + "--mount", "type=volume,target=/foo", + "--mount", "type=volume,target=/foo", + }, + }, + valid: false, + }, + } + + for i, testCase := range cases { + for j, opts := range testCase.equivalents { + cName := fmt.Sprintf("mount-%d-%d", i, j) + _, _, err := dockerCmdWithError(append([]string{"run", "-i", "-d", "--name", cName}, + append(opts, []string{"busybox", "top"}...)...)...) + if testCase.valid { + c.Assert(err, check.IsNil, + check.Commentf("got error while creating a container with %v (%s)", opts, cName)) + c.Assert(testCase.fn(cName), check.IsNil, + check.Commentf("got error while executing test for %v (%s)", opts, cName)) + dockerCmd(c, "rm", "-f", cName) + } else { + c.Assert(err, checker.NotNil, + check.Commentf("got nil while creating a container with %v (%s)", opts, cName)) + } + } + } +} + +// Test that passing a FQDN as hostname properly sets hostname, and +// /etc/hostname. Test case for 29100 +func (s *DockerSuite) TestRunHostnameFQDN(c *check.C) { + testRequires(c, DaemonIsLinux) + + expectedOutput := "foobar.example.com\nfoobar.example.com\nfoobar\nexample.com\nfoobar.example.com" + out, _ := dockerCmd(c, "run", "--hostname=foobar.example.com", "busybox", "sh", "-c", `cat /etc/hostname && hostname && hostname -s && hostname -d && hostname -f`) + c.Assert(strings.TrimSpace(out), checker.Equals, expectedOutput) + + out, _ = dockerCmd(c, "run", "--hostname=foobar.example.com", "busybox", "sh", "-c", `cat /etc/hosts`) + expectedOutput = "foobar.example.com foobar" + c.Assert(strings.TrimSpace(out), checker.Contains, expectedOutput) +} + +// Test case for 29129 +func (s *DockerSuite) TestRunHostnameInHostMode(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace) + + expectedOutput := "foobar\nfoobar" + out, _ := dockerCmd(c, "run", "--net=host", "--hostname=foobar", "busybox", "sh", "-c", `echo $HOSTNAME && hostname`) + c.Assert(strings.TrimSpace(out), checker.Equals, expectedOutput) +} + +func (s *DockerSuite) TestRunAddDeviceCgroupRule(c *check.C) { + testRequires(c, DaemonIsLinux) + + deviceRule := "c 7:128 rwm" + + out, _ := dockerCmd(c, "run", "--rm", "busybox", "cat", "/sys/fs/cgroup/devices/devices.list") + if strings.Contains(out, deviceRule) { + c.Fatalf("%s shouldn't been in the device.list", deviceRule) + } + + out, _ = dockerCmd(c, "run", "--rm", fmt.Sprintf("--device-cgroup-rule=%s", deviceRule), "busybox", "grep", deviceRule, "/sys/fs/cgroup/devices/devices.list") + c.Assert(strings.TrimSpace(out), checker.Equals, deviceRule) +} + +// Verifies that running as local system is operating correctly on Windows +func (s *DockerSuite) TestWindowsRunAsSystem(c *check.C) { + testRequires(c, DaemonIsWindows) + if testEnv.DaemonKernelVersionNumeric() < 15000 { + c.Skip("Requires build 15000 or later") + } + out, _ := dockerCmd(c, "run", "--net=none", `--user=nt authority\system`, "--hostname=XYZZY", minimalBaseImage(), "cmd", "/c", `@echo %USERNAME%`) + c.Assert(strings.TrimSpace(out), checker.Equals, "XYZZY$") +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_run_unix_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_run_unix_test.go new file mode 100644 index 000000000..b3d1b0721 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_run_unix_test.go @@ -0,0 +1,1576 @@ +// +build !windows + +package main + +import ( + "bufio" + "encoding/json" + "fmt" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "regexp" + "strconv" + "strings" + "syscall" + "time" + + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/cli" + "github.com/docker/docker/integration-cli/cli/build" + "github.com/docker/docker/pkg/homedir" + "github.com/docker/docker/pkg/mount" + "github.com/docker/docker/pkg/parsers" + "github.com/docker/docker/pkg/sysinfo" + icmd "github.com/docker/docker/pkg/testutil/cmd" + "github.com/go-check/check" + "github.com/kr/pty" +) + +// #6509 +func (s *DockerSuite) TestRunRedirectStdout(c *check.C) { + checkRedirect := func(command string) { + _, tty, err := pty.Open() + c.Assert(err, checker.IsNil, check.Commentf("Could not open pty")) + cmd := exec.Command("sh", "-c", command) + cmd.Stdin = tty + cmd.Stdout = tty + cmd.Stderr = tty + c.Assert(cmd.Start(), checker.IsNil) + ch := make(chan error) + go func() { + ch <- cmd.Wait() + close(ch) + }() + + select { + case <-time.After(10 * time.Second): + c.Fatal("command timeout") + case err := <-ch: + c.Assert(err, checker.IsNil, check.Commentf("wait err")) + } + } + + checkRedirect(dockerBinary + " run -i busybox cat /etc/passwd | grep -q root") + checkRedirect(dockerBinary + " run busybox cat /etc/passwd | grep -q root") +} + +// Test recursive bind mount works by default +func (s *DockerSuite) TestRunWithVolumesIsRecursive(c *check.C) { + // /tmp gets permission denied + testRequires(c, NotUserNamespace, SameHostDaemon) + tmpDir, err := ioutil.TempDir("", "docker_recursive_mount_test") + c.Assert(err, checker.IsNil) + + defer os.RemoveAll(tmpDir) + + // Create a temporary tmpfs mount. + tmpfsDir := filepath.Join(tmpDir, "tmpfs") + c.Assert(os.MkdirAll(tmpfsDir, 0777), checker.IsNil, check.Commentf("failed to mkdir at %s", tmpfsDir)) + c.Assert(mount.Mount("tmpfs", tmpfsDir, "tmpfs", ""), checker.IsNil, check.Commentf("failed to create a tmpfs mount at %s", tmpfsDir)) + + f, err := ioutil.TempFile(tmpfsDir, "touch-me") + c.Assert(err, checker.IsNil) + defer f.Close() + + out, _ := dockerCmd(c, "run", "--name", "test-data", "--volume", fmt.Sprintf("%s:/tmp:ro", tmpDir), "busybox:latest", "ls", "/tmp/tmpfs") + c.Assert(out, checker.Contains, filepath.Base(f.Name()), check.Commentf("Recursive bind mount test failed. Expected file not found")) +} + +func (s *DockerSuite) TestRunDeviceDirectory(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace, NotArm) + if _, err := os.Stat("/dev/snd"); err != nil { + c.Skip("Host does not have /dev/snd") + } + + out, _ := dockerCmd(c, "run", "--device", "/dev/snd:/dev/snd", "busybox", "sh", "-c", "ls /dev/snd/") + c.Assert(strings.Trim(out, "\r\n"), checker.Contains, "timer", check.Commentf("expected output /dev/snd/timer")) + + out, _ = dockerCmd(c, "run", "--device", "/dev/snd:/dev/othersnd", "busybox", "sh", "-c", "ls /dev/othersnd/") + c.Assert(strings.Trim(out, "\r\n"), checker.Contains, "seq", check.Commentf("expected output /dev/othersnd/seq")) +} + +// TestRunAttachDetach checks attaching and detaching with the default escape sequence. +func (s *DockerSuite) TestRunAttachDetach(c *check.C) { + name := "attach-detach" + + dockerCmd(c, "run", "--name", name, "-itd", "busybox", "cat") + + cmd := exec.Command(dockerBinary, "attach", name) + stdout, err := cmd.StdoutPipe() + c.Assert(err, checker.IsNil) + cpty, tty, err := pty.Open() + c.Assert(err, checker.IsNil) + defer cpty.Close() + cmd.Stdin = tty + c.Assert(cmd.Start(), checker.IsNil) + c.Assert(waitRun(name), check.IsNil) + + _, err = cpty.Write([]byte("hello\n")) + c.Assert(err, checker.IsNil) + + out, err := bufio.NewReader(stdout).ReadString('\n') + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Equals, "hello") + + // escape sequence + _, err = cpty.Write([]byte{16}) + c.Assert(err, checker.IsNil) + time.Sleep(100 * time.Millisecond) + _, err = cpty.Write([]byte{17}) + c.Assert(err, checker.IsNil) + + ch := make(chan struct{}) + go func() { + cmd.Wait() + ch <- struct{}{} + }() + + select { + case <-ch: + case <-time.After(10 * time.Second): + c.Fatal("timed out waiting for container to exit") + } + + running := inspectField(c, name, "State.Running") + c.Assert(running, checker.Equals, "true", check.Commentf("expected container to still be running")) + + out, _ = dockerCmd(c, "events", "--since=0", "--until", daemonUnixTime(c), "-f", "container="+name) + // attach and detach event should be monitored + c.Assert(out, checker.Contains, "attach") + c.Assert(out, checker.Contains, "detach") +} + +// TestRunAttachDetachFromFlag checks attaching and detaching with the escape sequence specified via flags. +func (s *DockerSuite) TestRunAttachDetachFromFlag(c *check.C) { + name := "attach-detach" + keyCtrlA := []byte{1} + keyA := []byte{97} + + dockerCmd(c, "run", "--name", name, "-itd", "busybox", "cat") + + cmd := exec.Command(dockerBinary, "attach", "--detach-keys=ctrl-a,a", name) + stdout, err := cmd.StdoutPipe() + if err != nil { + c.Fatal(err) + } + cpty, tty, err := pty.Open() + if err != nil { + c.Fatal(err) + } + defer cpty.Close() + cmd.Stdin = tty + if err := cmd.Start(); err != nil { + c.Fatal(err) + } + c.Assert(waitRun(name), check.IsNil) + + if _, err := cpty.Write([]byte("hello\n")); err != nil { + c.Fatal(err) + } + + out, err := bufio.NewReader(stdout).ReadString('\n') + if err != nil { + c.Fatal(err) + } + if strings.TrimSpace(out) != "hello" { + c.Fatalf("expected 'hello', got %q", out) + } + + // escape sequence + if _, err := cpty.Write(keyCtrlA); err != nil { + c.Fatal(err) + } + time.Sleep(100 * time.Millisecond) + if _, err := cpty.Write(keyA); err != nil { + c.Fatal(err) + } + + ch := make(chan struct{}) + go func() { + cmd.Wait() + ch <- struct{}{} + }() + + select { + case <-ch: + case <-time.After(10 * time.Second): + c.Fatal("timed out waiting for container to exit") + } + + running := inspectField(c, name, "State.Running") + c.Assert(running, checker.Equals, "true", check.Commentf("expected container to still be running")) +} + +// TestRunAttachDetachFromInvalidFlag checks attaching and detaching with the escape sequence specified via flags. +func (s *DockerSuite) TestRunAttachDetachFromInvalidFlag(c *check.C) { + name := "attach-detach" + dockerCmd(c, "run", "--name", name, "-itd", "busybox", "top") + c.Assert(waitRun(name), check.IsNil) + + // specify an invalid detach key, container will ignore it and use default + cmd := exec.Command(dockerBinary, "attach", "--detach-keys=ctrl-A,a", name) + stdout, err := cmd.StdoutPipe() + if err != nil { + c.Fatal(err) + } + cpty, tty, err := pty.Open() + if err != nil { + c.Fatal(err) + } + defer cpty.Close() + cmd.Stdin = tty + if err := cmd.Start(); err != nil { + c.Fatal(err) + } + + bufReader := bufio.NewReader(stdout) + out, err := bufReader.ReadString('\n') + if err != nil { + c.Fatal(err) + } + // it should print a warning to indicate the detach key flag is invalid + errStr := "Invalid detach keys (ctrl-A,a) provided" + c.Assert(strings.TrimSpace(out), checker.Equals, errStr) +} + +// TestRunAttachDetachFromConfig checks attaching and detaching with the escape sequence specified via config file. +func (s *DockerSuite) TestRunAttachDetachFromConfig(c *check.C) { + keyCtrlA := []byte{1} + keyA := []byte{97} + + // Setup config + homeKey := homedir.Key() + homeVal := homedir.Get() + tmpDir, err := ioutil.TempDir("", "fake-home") + c.Assert(err, checker.IsNil) + defer os.RemoveAll(tmpDir) + + dotDocker := filepath.Join(tmpDir, ".docker") + os.Mkdir(dotDocker, 0600) + tmpCfg := filepath.Join(dotDocker, "config.json") + + defer func() { os.Setenv(homeKey, homeVal) }() + os.Setenv(homeKey, tmpDir) + + data := `{ + "detachKeys": "ctrl-a,a" + }` + + err = ioutil.WriteFile(tmpCfg, []byte(data), 0600) + c.Assert(err, checker.IsNil) + + // Then do the work + name := "attach-detach" + dockerCmd(c, "run", "--name", name, "-itd", "busybox", "cat") + + cmd := exec.Command(dockerBinary, "attach", name) + stdout, err := cmd.StdoutPipe() + if err != nil { + c.Fatal(err) + } + cpty, tty, err := pty.Open() + if err != nil { + c.Fatal(err) + } + defer cpty.Close() + cmd.Stdin = tty + if err := cmd.Start(); err != nil { + c.Fatal(err) + } + c.Assert(waitRun(name), check.IsNil) + + if _, err := cpty.Write([]byte("hello\n")); err != nil { + c.Fatal(err) + } + + out, err := bufio.NewReader(stdout).ReadString('\n') + if err != nil { + c.Fatal(err) + } + if strings.TrimSpace(out) != "hello" { + c.Fatalf("expected 'hello', got %q", out) + } + + // escape sequence + if _, err := cpty.Write(keyCtrlA); err != nil { + c.Fatal(err) + } + time.Sleep(100 * time.Millisecond) + if _, err := cpty.Write(keyA); err != nil { + c.Fatal(err) + } + + ch := make(chan struct{}) + go func() { + cmd.Wait() + ch <- struct{}{} + }() + + select { + case <-ch: + case <-time.After(10 * time.Second): + c.Fatal("timed out waiting for container to exit") + } + + running := inspectField(c, name, "State.Running") + c.Assert(running, checker.Equals, "true", check.Commentf("expected container to still be running")) +} + +// TestRunAttachDetachKeysOverrideConfig checks attaching and detaching with the detach flags, making sure it overrides config file +func (s *DockerSuite) TestRunAttachDetachKeysOverrideConfig(c *check.C) { + keyCtrlA := []byte{1} + keyA := []byte{97} + + // Setup config + homeKey := homedir.Key() + homeVal := homedir.Get() + tmpDir, err := ioutil.TempDir("", "fake-home") + c.Assert(err, checker.IsNil) + defer os.RemoveAll(tmpDir) + + dotDocker := filepath.Join(tmpDir, ".docker") + os.Mkdir(dotDocker, 0600) + tmpCfg := filepath.Join(dotDocker, "config.json") + + defer func() { os.Setenv(homeKey, homeVal) }() + os.Setenv(homeKey, tmpDir) + + data := `{ + "detachKeys": "ctrl-e,e" + }` + + err = ioutil.WriteFile(tmpCfg, []byte(data), 0600) + c.Assert(err, checker.IsNil) + + // Then do the work + name := "attach-detach" + dockerCmd(c, "run", "--name", name, "-itd", "busybox", "cat") + + cmd := exec.Command(dockerBinary, "attach", "--detach-keys=ctrl-a,a", name) + stdout, err := cmd.StdoutPipe() + if err != nil { + c.Fatal(err) + } + cpty, tty, err := pty.Open() + if err != nil { + c.Fatal(err) + } + defer cpty.Close() + cmd.Stdin = tty + if err := cmd.Start(); err != nil { + c.Fatal(err) + } + c.Assert(waitRun(name), check.IsNil) + + if _, err := cpty.Write([]byte("hello\n")); err != nil { + c.Fatal(err) + } + + out, err := bufio.NewReader(stdout).ReadString('\n') + if err != nil { + c.Fatal(err) + } + if strings.TrimSpace(out) != "hello" { + c.Fatalf("expected 'hello', got %q", out) + } + + // escape sequence + if _, err := cpty.Write(keyCtrlA); err != nil { + c.Fatal(err) + } + time.Sleep(100 * time.Millisecond) + if _, err := cpty.Write(keyA); err != nil { + c.Fatal(err) + } + + ch := make(chan struct{}) + go func() { + cmd.Wait() + ch <- struct{}{} + }() + + select { + case <-ch: + case <-time.After(10 * time.Second): + c.Fatal("timed out waiting for container to exit") + } + + running := inspectField(c, name, "State.Running") + c.Assert(running, checker.Equals, "true", check.Commentf("expected container to still be running")) +} + +func (s *DockerSuite) TestRunAttachInvalidDetachKeySequencePreserved(c *check.C) { + name := "attach-detach" + keyA := []byte{97} + keyB := []byte{98} + + dockerCmd(c, "run", "--name", name, "-itd", "busybox", "cat") + + cmd := exec.Command(dockerBinary, "attach", "--detach-keys=a,b,c", name) + stdout, err := cmd.StdoutPipe() + if err != nil { + c.Fatal(err) + } + cpty, tty, err := pty.Open() + if err != nil { + c.Fatal(err) + } + defer cpty.Close() + cmd.Stdin = tty + if err := cmd.Start(); err != nil { + c.Fatal(err) + } + c.Assert(waitRun(name), check.IsNil) + + // Invalid escape sequence aba, should print aba in output + if _, err := cpty.Write(keyA); err != nil { + c.Fatal(err) + } + time.Sleep(100 * time.Millisecond) + if _, err := cpty.Write(keyB); err != nil { + c.Fatal(err) + } + time.Sleep(100 * time.Millisecond) + if _, err := cpty.Write(keyA); err != nil { + c.Fatal(err) + } + time.Sleep(100 * time.Millisecond) + if _, err := cpty.Write([]byte("\n")); err != nil { + c.Fatal(err) + } + + out, err := bufio.NewReader(stdout).ReadString('\n') + if err != nil { + c.Fatal(err) + } + if strings.TrimSpace(out) != "aba" { + c.Fatalf("expected 'aba', got %q", out) + } +} + +// "test" should be printed +func (s *DockerSuite) TestRunWithCPUQuota(c *check.C) { + testRequires(c, cpuCfsQuota) + + file := "/sys/fs/cgroup/cpu/cpu.cfs_quota_us" + out, _ := dockerCmd(c, "run", "--cpu-quota", "8000", "--name", "test", "busybox", "cat", file) + c.Assert(strings.TrimSpace(out), checker.Equals, "8000") + + out = inspectField(c, "test", "HostConfig.CpuQuota") + c.Assert(out, checker.Equals, "8000", check.Commentf("setting the CPU CFS quota failed")) +} + +func (s *DockerSuite) TestRunWithCpuPeriod(c *check.C) { + testRequires(c, cpuCfsPeriod) + + file := "/sys/fs/cgroup/cpu/cpu.cfs_period_us" + out, _ := dockerCmd(c, "run", "--cpu-period", "50000", "--name", "test", "busybox", "cat", file) + c.Assert(strings.TrimSpace(out), checker.Equals, "50000") + + out, _ = dockerCmd(c, "run", "--cpu-period", "0", "busybox", "cat", file) + c.Assert(strings.TrimSpace(out), checker.Equals, "100000") + + out = inspectField(c, "test", "HostConfig.CpuPeriod") + c.Assert(out, checker.Equals, "50000", check.Commentf("setting the CPU CFS period failed")) +} + +func (s *DockerSuite) TestRunWithInvalidCpuPeriod(c *check.C) { + testRequires(c, cpuCfsPeriod) + out, _, err := dockerCmdWithError("run", "--cpu-period", "900", "busybox", "true") + c.Assert(err, check.NotNil) + expected := "CPU cfs period can not be less than 1ms (i.e. 1000) or larger than 1s (i.e. 1000000)" + c.Assert(out, checker.Contains, expected) + + out, _, err = dockerCmdWithError("run", "--cpu-period", "2000000", "busybox", "true") + c.Assert(err, check.NotNil) + c.Assert(out, checker.Contains, expected) + + out, _, err = dockerCmdWithError("run", "--cpu-period", "-3", "busybox", "true") + c.Assert(err, check.NotNil) + c.Assert(out, checker.Contains, expected) +} + +func (s *DockerSuite) TestRunWithKernelMemory(c *check.C) { + testRequires(c, kernelMemorySupport) + + file := "/sys/fs/cgroup/memory/memory.kmem.limit_in_bytes" + cli.DockerCmd(c, "run", "--kernel-memory", "50M", "--name", "test1", "busybox", "cat", file).Assert(c, icmd.Expected{ + Out: "52428800", + }) + + cli.InspectCmd(c, "test1", cli.Format(".HostConfig.KernelMemory")).Assert(c, icmd.Expected{ + Out: "52428800", + }) +} + +func (s *DockerSuite) TestRunWithInvalidKernelMemory(c *check.C) { + testRequires(c, kernelMemorySupport) + + out, _, err := dockerCmdWithError("run", "--kernel-memory", "2M", "busybox", "true") + c.Assert(err, check.NotNil) + expected := "Minimum kernel memory limit allowed is 4MB" + c.Assert(out, checker.Contains, expected) + + out, _, err = dockerCmdWithError("run", "--kernel-memory", "-16m", "--name", "test2", "busybox", "echo", "test") + c.Assert(err, check.NotNil) + expected = "invalid size" + c.Assert(out, checker.Contains, expected) +} + +func (s *DockerSuite) TestRunWithCPUShares(c *check.C) { + testRequires(c, cpuShare) + + file := "/sys/fs/cgroup/cpu/cpu.shares" + out, _ := dockerCmd(c, "run", "--cpu-shares", "1000", "--name", "test", "busybox", "cat", file) + c.Assert(strings.TrimSpace(out), checker.Equals, "1000") + + out = inspectField(c, "test", "HostConfig.CPUShares") + c.Assert(out, check.Equals, "1000") +} + +// "test" should be printed +func (s *DockerSuite) TestRunEchoStdoutWithCPUSharesAndMemoryLimit(c *check.C) { + testRequires(c, cpuShare) + testRequires(c, memoryLimitSupport) + cli.DockerCmd(c, "run", "--cpu-shares", "1000", "-m", "32m", "busybox", "echo", "test").Assert(c, icmd.Expected{ + Out: "test\n", + }) +} + +func (s *DockerSuite) TestRunWithCpusetCpus(c *check.C) { + testRequires(c, cgroupCpuset) + + file := "/sys/fs/cgroup/cpuset/cpuset.cpus" + out, _ := dockerCmd(c, "run", "--cpuset-cpus", "0", "--name", "test", "busybox", "cat", file) + c.Assert(strings.TrimSpace(out), checker.Equals, "0") + + out = inspectField(c, "test", "HostConfig.CpusetCpus") + c.Assert(out, check.Equals, "0") +} + +func (s *DockerSuite) TestRunWithCpusetMems(c *check.C) { + testRequires(c, cgroupCpuset) + + file := "/sys/fs/cgroup/cpuset/cpuset.mems" + out, _ := dockerCmd(c, "run", "--cpuset-mems", "0", "--name", "test", "busybox", "cat", file) + c.Assert(strings.TrimSpace(out), checker.Equals, "0") + + out = inspectField(c, "test", "HostConfig.CpusetMems") + c.Assert(out, check.Equals, "0") +} + +func (s *DockerSuite) TestRunWithBlkioWeight(c *check.C) { + testRequires(c, blkioWeight) + + file := "/sys/fs/cgroup/blkio/blkio.weight" + out, _ := dockerCmd(c, "run", "--blkio-weight", "300", "--name", "test", "busybox", "cat", file) + c.Assert(strings.TrimSpace(out), checker.Equals, "300") + + out = inspectField(c, "test", "HostConfig.BlkioWeight") + c.Assert(out, check.Equals, "300") +} + +func (s *DockerSuite) TestRunWithInvalidBlkioWeight(c *check.C) { + testRequires(c, blkioWeight) + out, _, err := dockerCmdWithError("run", "--blkio-weight", "5", "busybox", "true") + c.Assert(err, check.NotNil, check.Commentf(out)) + expected := "Range of blkio weight is from 10 to 1000" + c.Assert(out, checker.Contains, expected) +} + +func (s *DockerSuite) TestRunWithInvalidPathforBlkioWeightDevice(c *check.C) { + testRequires(c, blkioWeight) + out, _, err := dockerCmdWithError("run", "--blkio-weight-device", "/dev/sdX:100", "busybox", "true") + c.Assert(err, check.NotNil, check.Commentf(out)) +} + +func (s *DockerSuite) TestRunWithInvalidPathforBlkioDeviceReadBps(c *check.C) { + testRequires(c, blkioWeight) + out, _, err := dockerCmdWithError("run", "--device-read-bps", "/dev/sdX:500", "busybox", "true") + c.Assert(err, check.NotNil, check.Commentf(out)) +} + +func (s *DockerSuite) TestRunWithInvalidPathforBlkioDeviceWriteBps(c *check.C) { + testRequires(c, blkioWeight) + out, _, err := dockerCmdWithError("run", "--device-write-bps", "/dev/sdX:500", "busybox", "true") + c.Assert(err, check.NotNil, check.Commentf(out)) +} + +func (s *DockerSuite) TestRunWithInvalidPathforBlkioDeviceReadIOps(c *check.C) { + testRequires(c, blkioWeight) + out, _, err := dockerCmdWithError("run", "--device-read-iops", "/dev/sdX:500", "busybox", "true") + c.Assert(err, check.NotNil, check.Commentf(out)) +} + +func (s *DockerSuite) TestRunWithInvalidPathforBlkioDeviceWriteIOps(c *check.C) { + testRequires(c, blkioWeight) + out, _, err := dockerCmdWithError("run", "--device-write-iops", "/dev/sdX:500", "busybox", "true") + c.Assert(err, check.NotNil, check.Commentf(out)) +} + +func (s *DockerSuite) TestRunOOMExitCode(c *check.C) { + testRequires(c, memoryLimitSupport, swapMemorySupport) + errChan := make(chan error) + go func() { + defer close(errChan) + out, exitCode, _ := dockerCmdWithError("run", "-m", "4MB", "busybox", "sh", "-c", "x=a; while true; do x=$x$x$x$x; done") + if expected := 137; exitCode != expected { + errChan <- fmt.Errorf("wrong exit code for OOM container: expected %d, got %d (output: %q)", expected, exitCode, out) + } + }() + + select { + case err := <-errChan: + c.Assert(err, check.IsNil) + case <-time.After(600 * time.Second): + c.Fatal("Timeout waiting for container to die on OOM") + } +} + +func (s *DockerSuite) TestRunWithMemoryLimit(c *check.C) { + testRequires(c, memoryLimitSupport) + + file := "/sys/fs/cgroup/memory/memory.limit_in_bytes" + cli.DockerCmd(c, "run", "-m", "32M", "--name", "test", "busybox", "cat", file).Assert(c, icmd.Expected{ + Out: "33554432", + }) + cli.InspectCmd(c, "test", cli.Format(".HostConfig.Memory")).Assert(c, icmd.Expected{ + Out: "33554432", + }) +} + +// TestRunWithoutMemoryswapLimit sets memory limit and disables swap +// memory limit, this means the processes in the container can use +// 16M memory and as much swap memory as they need (if the host +// supports swap memory). +func (s *DockerSuite) TestRunWithoutMemoryswapLimit(c *check.C) { + testRequires(c, DaemonIsLinux) + testRequires(c, memoryLimitSupport) + testRequires(c, swapMemorySupport) + dockerCmd(c, "run", "-m", "32m", "--memory-swap", "-1", "busybox", "true") +} + +func (s *DockerSuite) TestRunWithSwappiness(c *check.C) { + testRequires(c, memorySwappinessSupport) + file := "/sys/fs/cgroup/memory/memory.swappiness" + out, _ := dockerCmd(c, "run", "--memory-swappiness", "0", "--name", "test", "busybox", "cat", file) + c.Assert(strings.TrimSpace(out), checker.Equals, "0") + + out = inspectField(c, "test", "HostConfig.MemorySwappiness") + c.Assert(out, check.Equals, "0") +} + +func (s *DockerSuite) TestRunWithSwappinessInvalid(c *check.C) { + testRequires(c, memorySwappinessSupport) + out, _, err := dockerCmdWithError("run", "--memory-swappiness", "101", "busybox", "true") + c.Assert(err, check.NotNil) + expected := "Valid memory swappiness range is 0-100" + c.Assert(out, checker.Contains, expected, check.Commentf("Expected output to contain %q, not %q", out, expected)) + + out, _, err = dockerCmdWithError("run", "--memory-swappiness", "-10", "busybox", "true") + c.Assert(err, check.NotNil) + c.Assert(out, checker.Contains, expected, check.Commentf("Expected output to contain %q, not %q", out, expected)) +} + +func (s *DockerSuite) TestRunWithMemoryReservation(c *check.C) { + testRequires(c, memoryReservationSupport) + + file := "/sys/fs/cgroup/memory/memory.soft_limit_in_bytes" + out, _ := dockerCmd(c, "run", "--memory-reservation", "200M", "--name", "test", "busybox", "cat", file) + c.Assert(strings.TrimSpace(out), checker.Equals, "209715200") + + out = inspectField(c, "test", "HostConfig.MemoryReservation") + c.Assert(out, check.Equals, "209715200") +} + +func (s *DockerSuite) TestRunWithMemoryReservationInvalid(c *check.C) { + testRequires(c, memoryLimitSupport) + testRequires(c, memoryReservationSupport) + out, _, err := dockerCmdWithError("run", "-m", "500M", "--memory-reservation", "800M", "busybox", "true") + c.Assert(err, check.NotNil) + expected := "Minimum memory limit can not be less than memory reservation limit" + c.Assert(strings.TrimSpace(out), checker.Contains, expected, check.Commentf("run container should fail with invalid memory reservation")) + + out, _, err = dockerCmdWithError("run", "--memory-reservation", "1k", "busybox", "true") + c.Assert(err, check.NotNil) + expected = "Minimum memory reservation allowed is 4MB" + c.Assert(strings.TrimSpace(out), checker.Contains, expected, check.Commentf("run container should fail with invalid memory reservation")) +} + +func (s *DockerSuite) TestStopContainerSignal(c *check.C) { + out, _ := dockerCmd(c, "run", "--stop-signal", "SIGUSR1", "-d", "busybox", "/bin/sh", "-c", `trap 'echo "exit trapped"; exit 0' USR1; while true; do sleep 1; done`) + containerID := strings.TrimSpace(out) + + c.Assert(waitRun(containerID), checker.IsNil) + + dockerCmd(c, "stop", containerID) + out, _ = dockerCmd(c, "logs", containerID) + + c.Assert(out, checker.Contains, "exit trapped", check.Commentf("Expected `exit trapped` in the log")) +} + +func (s *DockerSuite) TestRunSwapLessThanMemoryLimit(c *check.C) { + testRequires(c, memoryLimitSupport) + testRequires(c, swapMemorySupport) + out, _, err := dockerCmdWithError("run", "-m", "16m", "--memory-swap", "15m", "busybox", "echo", "test") + expected := "Minimum memoryswap limit should be larger than memory limit" + c.Assert(err, check.NotNil) + + c.Assert(out, checker.Contains, expected) +} + +func (s *DockerSuite) TestRunInvalidCpusetCpusFlagValue(c *check.C) { + testRequires(c, cgroupCpuset, SameHostDaemon) + + sysInfo := sysinfo.New(true) + cpus, err := parsers.ParseUintList(sysInfo.Cpus) + c.Assert(err, check.IsNil) + var invalid int + for i := 0; i <= len(cpus)+1; i++ { + if !cpus[i] { + invalid = i + break + } + } + out, _, err := dockerCmdWithError("run", "--cpuset-cpus", strconv.Itoa(invalid), "busybox", "true") + c.Assert(err, check.NotNil) + expected := fmt.Sprintf("Error response from daemon: Requested CPUs are not available - requested %s, available: %s", strconv.Itoa(invalid), sysInfo.Cpus) + c.Assert(out, checker.Contains, expected) +} + +func (s *DockerSuite) TestRunInvalidCpusetMemsFlagValue(c *check.C) { + testRequires(c, cgroupCpuset) + + sysInfo := sysinfo.New(true) + mems, err := parsers.ParseUintList(sysInfo.Mems) + c.Assert(err, check.IsNil) + var invalid int + for i := 0; i <= len(mems)+1; i++ { + if !mems[i] { + invalid = i + break + } + } + out, _, err := dockerCmdWithError("run", "--cpuset-mems", strconv.Itoa(invalid), "busybox", "true") + c.Assert(err, check.NotNil) + expected := fmt.Sprintf("Error response from daemon: Requested memory nodes are not available - requested %s, available: %s", strconv.Itoa(invalid), sysInfo.Mems) + c.Assert(out, checker.Contains, expected) +} + +func (s *DockerSuite) TestRunInvalidCPUShares(c *check.C) { + testRequires(c, cpuShare, DaemonIsLinux) + out, _, err := dockerCmdWithError("run", "--cpu-shares", "1", "busybox", "echo", "test") + c.Assert(err, check.NotNil, check.Commentf(out)) + expected := "The minimum allowed cpu-shares is 2" + c.Assert(out, checker.Contains, expected) + + out, _, err = dockerCmdWithError("run", "--cpu-shares", "-1", "busybox", "echo", "test") + c.Assert(err, check.NotNil, check.Commentf(out)) + expected = "shares: invalid argument" + c.Assert(out, checker.Contains, expected) + + out, _, err = dockerCmdWithError("run", "--cpu-shares", "99999999", "busybox", "echo", "test") + c.Assert(err, check.NotNil, check.Commentf(out)) + expected = "The maximum allowed cpu-shares is" + c.Assert(out, checker.Contains, expected) +} + +func (s *DockerSuite) TestRunWithDefaultShmSize(c *check.C) { + testRequires(c, DaemonIsLinux) + + name := "shm-default" + out, _ := dockerCmd(c, "run", "--name", name, "busybox", "mount") + shmRegex := regexp.MustCompile(`shm on /dev/shm type tmpfs(.*)size=65536k`) + if !shmRegex.MatchString(out) { + c.Fatalf("Expected shm of 64MB in mount command, got %v", out) + } + shmSize := inspectField(c, name, "HostConfig.ShmSize") + c.Assert(shmSize, check.Equals, "67108864") +} + +func (s *DockerSuite) TestRunWithShmSize(c *check.C) { + testRequires(c, DaemonIsLinux) + + name := "shm" + out, _ := dockerCmd(c, "run", "--name", name, "--shm-size=1G", "busybox", "mount") + shmRegex := regexp.MustCompile(`shm on /dev/shm type tmpfs(.*)size=1048576k`) + if !shmRegex.MatchString(out) { + c.Fatalf("Expected shm of 1GB in mount command, got %v", out) + } + shmSize := inspectField(c, name, "HostConfig.ShmSize") + c.Assert(shmSize, check.Equals, "1073741824") +} + +func (s *DockerSuite) TestRunTmpfsMountsEnsureOrdered(c *check.C) { + tmpFile, err := ioutil.TempFile("", "test") + c.Assert(err, check.IsNil) + defer tmpFile.Close() + out, _ := dockerCmd(c, "run", "--tmpfs", "/run", "-v", tmpFile.Name()+":/run/test", "busybox", "ls", "/run") + c.Assert(out, checker.Contains, "test") +} + +func (s *DockerSuite) TestRunTmpfsMounts(c *check.C) { + // TODO Windows (Post TP5): This test cannot run on a Windows daemon as + // Windows does not support tmpfs mounts. + testRequires(c, DaemonIsLinux) + if out, _, err := dockerCmdWithError("run", "--tmpfs", "/run", "busybox", "touch", "/run/somefile"); err != nil { + c.Fatalf("/run directory not mounted on tmpfs %q %s", err, out) + } + if out, _, err := dockerCmdWithError("run", "--tmpfs", "/run:noexec", "busybox", "touch", "/run/somefile"); err != nil { + c.Fatalf("/run directory not mounted on tmpfs %q %s", err, out) + } + if out, _, err := dockerCmdWithError("run", "--tmpfs", "/run:noexec,nosuid,rw,size=5k,mode=700", "busybox", "touch", "/run/somefile"); err != nil { + c.Fatalf("/run failed to mount on tmpfs with valid options %q %s", err, out) + } + if _, _, err := dockerCmdWithError("run", "--tmpfs", "/run:foobar", "busybox", "touch", "/run/somefile"); err == nil { + c.Fatalf("/run mounted on tmpfs when it should have vailed within invalid mount option") + } + if _, _, err := dockerCmdWithError("run", "--tmpfs", "/run", "-v", "/run:/run", "busybox", "touch", "/run/somefile"); err == nil { + c.Fatalf("Should have generated an error saying Duplicate mount points") + } +} + +func (s *DockerSuite) TestRunTmpfsMountsOverrideImageVolumes(c *check.C) { + name := "img-with-volumes" + buildImageSuccessfully(c, name, build.WithDockerfile(` + FROM busybox + VOLUME /run + RUN touch /run/stuff + `)) + out, _ := dockerCmd(c, "run", "--tmpfs", "/run", name, "ls", "/run") + c.Assert(out, checker.Not(checker.Contains), "stuff") +} + +// Test case for #22420 +func (s *DockerSuite) TestRunTmpfsMountsWithOptions(c *check.C) { + testRequires(c, DaemonIsLinux) + + expectedOptions := []string{"rw", "nosuid", "nodev", "noexec", "relatime"} + out, _ := dockerCmd(c, "run", "--tmpfs", "/tmp", "busybox", "sh", "-c", "mount | grep 'tmpfs on /tmp'") + for _, option := range expectedOptions { + c.Assert(out, checker.Contains, option) + } + c.Assert(out, checker.Not(checker.Contains), "size=") + + expectedOptions = []string{"rw", "nosuid", "nodev", "noexec", "relatime"} + out, _ = dockerCmd(c, "run", "--tmpfs", "/tmp:rw", "busybox", "sh", "-c", "mount | grep 'tmpfs on /tmp'") + for _, option := range expectedOptions { + c.Assert(out, checker.Contains, option) + } + c.Assert(out, checker.Not(checker.Contains), "size=") + + expectedOptions = []string{"rw", "nosuid", "nodev", "relatime", "size=8192k"} + out, _ = dockerCmd(c, "run", "--tmpfs", "/tmp:rw,exec,size=8192k", "busybox", "sh", "-c", "mount | grep 'tmpfs on /tmp'") + for _, option := range expectedOptions { + c.Assert(out, checker.Contains, option) + } + + expectedOptions = []string{"rw", "nosuid", "nodev", "noexec", "relatime", "size=4096k"} + out, _ = dockerCmd(c, "run", "--tmpfs", "/tmp:rw,size=8192k,exec,size=4096k,noexec", "busybox", "sh", "-c", "mount | grep 'tmpfs on /tmp'") + for _, option := range expectedOptions { + c.Assert(out, checker.Contains, option) + } + + // We use debian:jessie as there is no findmnt in busybox. Also the output will be in the format of + // TARGET PROPAGATION + // /tmp shared + // so we only capture `shared` here. + expectedOptions = []string{"shared"} + out, _ = dockerCmd(c, "run", "--tmpfs", "/tmp:shared", "debian:jessie", "findmnt", "-o", "TARGET,PROPAGATION", "/tmp") + for _, option := range expectedOptions { + c.Assert(out, checker.Contains, option) + } +} + +func (s *DockerSuite) TestRunSysctls(c *check.C) { + testRequires(c, DaemonIsLinux) + var err error + + out, _ := dockerCmd(c, "run", "--sysctl", "net.ipv4.ip_forward=1", "--name", "test", "busybox", "cat", "/proc/sys/net/ipv4/ip_forward") + c.Assert(strings.TrimSpace(out), check.Equals, "1") + + out = inspectFieldJSON(c, "test", "HostConfig.Sysctls") + + sysctls := make(map[string]string) + err = json.Unmarshal([]byte(out), &sysctls) + c.Assert(err, check.IsNil) + c.Assert(sysctls["net.ipv4.ip_forward"], check.Equals, "1") + + out, _ = dockerCmd(c, "run", "--sysctl", "net.ipv4.ip_forward=0", "--name", "test1", "busybox", "cat", "/proc/sys/net/ipv4/ip_forward") + c.Assert(strings.TrimSpace(out), check.Equals, "0") + + out = inspectFieldJSON(c, "test1", "HostConfig.Sysctls") + + err = json.Unmarshal([]byte(out), &sysctls) + c.Assert(err, check.IsNil) + c.Assert(sysctls["net.ipv4.ip_forward"], check.Equals, "0") + + icmd.RunCommand(dockerBinary, "run", "--sysctl", "kernel.foobar=1", "--name", "test2", + "busybox", "cat", "/proc/sys/kernel/foobar").Assert(c, icmd.Expected{ + ExitCode: 125, + Err: "invalid argument", + }) +} + +// TestRunSeccompProfileDenyUnshare checks that 'docker run --security-opt seccomp=/tmp/profile.json debian:jessie unshare' exits with operation not permitted. +func (s *DockerSuite) TestRunSeccompProfileDenyUnshare(c *check.C) { + testRequires(c, SameHostDaemon, seccompEnabled, NotArm, Apparmor) + jsonData := `{ + "defaultAction": "SCMP_ACT_ALLOW", + "syscalls": [ + { + "name": "unshare", + "action": "SCMP_ACT_ERRNO" + } + ] +}` + tmpFile, err := ioutil.TempFile("", "profile.json") + if err != nil { + c.Fatal(err) + } + defer tmpFile.Close() + + if _, err := tmpFile.Write([]byte(jsonData)); err != nil { + c.Fatal(err) + } + icmd.RunCommand(dockerBinary, "run", "--security-opt", "apparmor=unconfined", + "--security-opt", "seccomp="+tmpFile.Name(), + "debian:jessie", "unshare", "-p", "-m", "-f", "-r", "mount", "-t", "proc", "none", "/proc").Assert(c, icmd.Expected{ + ExitCode: 1, + Err: "Operation not permitted", + }) +} + +// TestRunSeccompProfileDenyChmod checks that 'docker run --security-opt seccomp=/tmp/profile.json busybox chmod 400 /etc/hostname' exits with operation not permitted. +func (s *DockerSuite) TestRunSeccompProfileDenyChmod(c *check.C) { + testRequires(c, SameHostDaemon, seccompEnabled) + jsonData := `{ + "defaultAction": "SCMP_ACT_ALLOW", + "syscalls": [ + { + "name": "chmod", + "action": "SCMP_ACT_ERRNO" + }, + { + "name":"fchmod", + "action": "SCMP_ACT_ERRNO" + }, + { + "name": "fchmodat", + "action":"SCMP_ACT_ERRNO" + } + ] +}` + tmpFile, err := ioutil.TempFile("", "profile.json") + c.Assert(err, check.IsNil) + defer tmpFile.Close() + + if _, err := tmpFile.Write([]byte(jsonData)); err != nil { + c.Fatal(err) + } + icmd.RunCommand(dockerBinary, "run", "--security-opt", "seccomp="+tmpFile.Name(), + "busybox", "chmod", "400", "/etc/hostname").Assert(c, icmd.Expected{ + ExitCode: 1, + Err: "Operation not permitted", + }) +} + +// TestRunSeccompProfileDenyUnshareUserns checks that 'docker run debian:jessie unshare --map-root-user --user sh -c whoami' with a specific profile to +// deny unshare of a userns exits with operation not permitted. +func (s *DockerSuite) TestRunSeccompProfileDenyUnshareUserns(c *check.C) { + testRequires(c, SameHostDaemon, seccompEnabled, NotArm, Apparmor) + // from sched.h + jsonData := fmt.Sprintf(`{ + "defaultAction": "SCMP_ACT_ALLOW", + "syscalls": [ + { + "name": "unshare", + "action": "SCMP_ACT_ERRNO", + "args": [ + { + "index": 0, + "value": %d, + "op": "SCMP_CMP_EQ" + } + ] + } + ] +}`, uint64(0x10000000)) + tmpFile, err := ioutil.TempFile("", "profile.json") + if err != nil { + c.Fatal(err) + } + defer tmpFile.Close() + + if _, err := tmpFile.Write([]byte(jsonData)); err != nil { + c.Fatal(err) + } + icmd.RunCommand(dockerBinary, "run", + "--security-opt", "apparmor=unconfined", "--security-opt", "seccomp="+tmpFile.Name(), + "debian:jessie", "unshare", "--map-root-user", "--user", "sh", "-c", "whoami").Assert(c, icmd.Expected{ + ExitCode: 1, + Err: "Operation not permitted", + }) +} + +// TestRunSeccompProfileDenyCloneUserns checks that 'docker run syscall-test' +// with a the default seccomp profile exits with operation not permitted. +func (s *DockerSuite) TestRunSeccompProfileDenyCloneUserns(c *check.C) { + testRequires(c, SameHostDaemon, seccompEnabled) + ensureSyscallTest(c) + + icmd.RunCommand(dockerBinary, "run", "syscall-test", "userns-test", "id").Assert(c, icmd.Expected{ + ExitCode: 1, + Err: "clone failed: Operation not permitted", + }) +} + +// TestRunSeccompUnconfinedCloneUserns checks that +// 'docker run --security-opt seccomp=unconfined syscall-test' allows creating a userns. +func (s *DockerSuite) TestRunSeccompUnconfinedCloneUserns(c *check.C) { + testRequires(c, SameHostDaemon, seccompEnabled, UserNamespaceInKernel, NotUserNamespace, unprivilegedUsernsClone) + ensureSyscallTest(c) + + // make sure running w privileged is ok + icmd.RunCommand(dockerBinary, "run", "--security-opt", "seccomp=unconfined", + "syscall-test", "userns-test", "id").Assert(c, icmd.Expected{ + Out: "nobody", + }) +} + +// TestRunSeccompAllowPrivCloneUserns checks that 'docker run --privileged syscall-test' +// allows creating a userns. +func (s *DockerSuite) TestRunSeccompAllowPrivCloneUserns(c *check.C) { + testRequires(c, SameHostDaemon, seccompEnabled, UserNamespaceInKernel, NotUserNamespace) + ensureSyscallTest(c) + + // make sure running w privileged is ok + icmd.RunCommand(dockerBinary, "run", "--privileged", "syscall-test", "userns-test", "id").Assert(c, icmd.Expected{ + Out: "nobody", + }) +} + +// TestRunSeccompProfileAllow32Bit checks that 32 bit code can run on x86_64 +// with the default seccomp profile. +func (s *DockerSuite) TestRunSeccompProfileAllow32Bit(c *check.C) { + testRequires(c, SameHostDaemon, seccompEnabled, IsAmd64) + ensureSyscallTest(c) + + icmd.RunCommand(dockerBinary, "run", "syscall-test", "exit32-test", "id").Assert(c, icmd.Success) +} + +// TestRunSeccompAllowSetrlimit checks that 'docker run debian:jessie ulimit -v 1048510' succeeds. +func (s *DockerSuite) TestRunSeccompAllowSetrlimit(c *check.C) { + testRequires(c, SameHostDaemon, seccompEnabled) + + // ulimit uses setrlimit, so we want to make sure we don't break it + icmd.RunCommand(dockerBinary, "run", "debian:jessie", "bash", "-c", "ulimit -v 1048510").Assert(c, icmd.Success) +} + +func (s *DockerSuite) TestRunSeccompDefaultProfileAcct(c *check.C) { + testRequires(c, SameHostDaemon, seccompEnabled, NotUserNamespace) + ensureSyscallTest(c) + + out, _, err := dockerCmdWithError("run", "syscall-test", "acct-test") + if err == nil || !strings.Contains(out, "Operation not permitted") { + c.Fatalf("test 0: expected Operation not permitted, got: %s", out) + } + + out, _, err = dockerCmdWithError("run", "--cap-add", "sys_admin", "syscall-test", "acct-test") + if err == nil || !strings.Contains(out, "Operation not permitted") { + c.Fatalf("test 1: expected Operation not permitted, got: %s", out) + } + + out, _, err = dockerCmdWithError("run", "--cap-add", "sys_pacct", "syscall-test", "acct-test") + if err == nil || !strings.Contains(out, "No such file or directory") { + c.Fatalf("test 2: expected No such file or directory, got: %s", out) + } + + out, _, err = dockerCmdWithError("run", "--cap-add", "ALL", "syscall-test", "acct-test") + if err == nil || !strings.Contains(out, "No such file or directory") { + c.Fatalf("test 3: expected No such file or directory, got: %s", out) + } + + out, _, err = dockerCmdWithError("run", "--cap-drop", "ALL", "--cap-add", "sys_pacct", "syscall-test", "acct-test") + if err == nil || !strings.Contains(out, "No such file or directory") { + c.Fatalf("test 4: expected No such file or directory, got: %s", out) + } +} + +func (s *DockerSuite) TestRunSeccompDefaultProfileNS(c *check.C) { + testRequires(c, SameHostDaemon, seccompEnabled, NotUserNamespace) + ensureSyscallTest(c) + + out, _, err := dockerCmdWithError("run", "syscall-test", "ns-test", "echo", "hello0") + if err == nil || !strings.Contains(out, "Operation not permitted") { + c.Fatalf("test 0: expected Operation not permitted, got: %s", out) + } + + out, _, err = dockerCmdWithError("run", "--cap-add", "sys_admin", "syscall-test", "ns-test", "echo", "hello1") + if err != nil || !strings.Contains(out, "hello1") { + c.Fatalf("test 1: expected hello1, got: %s, %v", out, err) + } + + out, _, err = dockerCmdWithError("run", "--cap-drop", "all", "--cap-add", "sys_admin", "syscall-test", "ns-test", "echo", "hello2") + if err != nil || !strings.Contains(out, "hello2") { + c.Fatalf("test 2: expected hello2, got: %s, %v", out, err) + } + + out, _, err = dockerCmdWithError("run", "--cap-add", "ALL", "syscall-test", "ns-test", "echo", "hello3") + if err != nil || !strings.Contains(out, "hello3") { + c.Fatalf("test 3: expected hello3, got: %s, %v", out, err) + } + + out, _, err = dockerCmdWithError("run", "--cap-add", "ALL", "--security-opt", "seccomp=unconfined", "syscall-test", "acct-test") + if err == nil || !strings.Contains(out, "No such file or directory") { + c.Fatalf("test 4: expected No such file or directory, got: %s", out) + } + + out, _, err = dockerCmdWithError("run", "--cap-add", "ALL", "--security-opt", "seccomp=unconfined", "syscall-test", "ns-test", "echo", "hello4") + if err != nil || !strings.Contains(out, "hello4") { + c.Fatalf("test 5: expected hello4, got: %s, %v", out, err) + } +} + +// TestRunNoNewPrivSetuid checks that --security-opt='no-new-privileges=true' prevents +// effective uid transtions on executing setuid binaries. +func (s *DockerSuite) TestRunNoNewPrivSetuid(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace, SameHostDaemon) + ensureNNPTest(c) + + // test that running a setuid binary results in no effective uid transition + icmd.RunCommand(dockerBinary, "run", "--security-opt", "no-new-privileges=true", "--user", "1000", + "nnp-test", "/usr/bin/nnp-test").Assert(c, icmd.Expected{ + Out: "EUID=1000", + }) +} + +// TestLegacyRunNoNewPrivSetuid checks that --security-opt=no-new-privileges prevents +// effective uid transtions on executing setuid binaries. +func (s *DockerSuite) TestLegacyRunNoNewPrivSetuid(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace, SameHostDaemon) + ensureNNPTest(c) + + // test that running a setuid binary results in no effective uid transition + icmd.RunCommand(dockerBinary, "run", "--security-opt", "no-new-privileges", "--user", "1000", + "nnp-test", "/usr/bin/nnp-test").Assert(c, icmd.Expected{ + Out: "EUID=1000", + }) +} + +func (s *DockerSuite) TestUserNoEffectiveCapabilitiesChown(c *check.C) { + testRequires(c, DaemonIsLinux) + ensureSyscallTest(c) + + // test that a root user has default capability CAP_CHOWN + dockerCmd(c, "run", "busybox", "chown", "100", "/tmp") + // test that non root user does not have default capability CAP_CHOWN + icmd.RunCommand(dockerBinary, "run", "--user", "1000:1000", "busybox", "chown", "100", "/tmp").Assert(c, icmd.Expected{ + ExitCode: 1, + Err: "Operation not permitted", + }) + // test that root user can drop default capability CAP_CHOWN + icmd.RunCommand(dockerBinary, "run", "--cap-drop", "chown", "busybox", "chown", "100", "/tmp").Assert(c, icmd.Expected{ + ExitCode: 1, + Err: "Operation not permitted", + }) +} + +func (s *DockerSuite) TestUserNoEffectiveCapabilitiesDacOverride(c *check.C) { + testRequires(c, DaemonIsLinux) + ensureSyscallTest(c) + + // test that a root user has default capability CAP_DAC_OVERRIDE + dockerCmd(c, "run", "busybox", "sh", "-c", "echo test > /etc/passwd") + // test that non root user does not have default capability CAP_DAC_OVERRIDE + icmd.RunCommand(dockerBinary, "run", "--user", "1000:1000", "busybox", "sh", "-c", "echo test > /etc/passwd").Assert(c, icmd.Expected{ + ExitCode: 1, + Err: "Permission denied", + }) +} + +func (s *DockerSuite) TestUserNoEffectiveCapabilitiesFowner(c *check.C) { + testRequires(c, DaemonIsLinux) + ensureSyscallTest(c) + + // test that a root user has default capability CAP_FOWNER + dockerCmd(c, "run", "busybox", "chmod", "777", "/etc/passwd") + // test that non root user does not have default capability CAP_FOWNER + icmd.RunCommand(dockerBinary, "run", "--user", "1000:1000", "busybox", "chmod", "777", "/etc/passwd").Assert(c, icmd.Expected{ + ExitCode: 1, + Err: "Operation not permitted", + }) + // TODO test that root user can drop default capability CAP_FOWNER +} + +// TODO CAP_KILL + +func (s *DockerSuite) TestUserNoEffectiveCapabilitiesSetuid(c *check.C) { + testRequires(c, DaemonIsLinux) + ensureSyscallTest(c) + + // test that a root user has default capability CAP_SETUID + dockerCmd(c, "run", "syscall-test", "setuid-test") + // test that non root user does not have default capability CAP_SETUID + icmd.RunCommand(dockerBinary, "run", "--user", "1000:1000", "syscall-test", "setuid-test").Assert(c, icmd.Expected{ + ExitCode: 1, + Err: "Operation not permitted", + }) + // test that root user can drop default capability CAP_SETUID + icmd.RunCommand(dockerBinary, "run", "--cap-drop", "setuid", "syscall-test", "setuid-test").Assert(c, icmd.Expected{ + ExitCode: 1, + Err: "Operation not permitted", + }) +} + +func (s *DockerSuite) TestUserNoEffectiveCapabilitiesSetgid(c *check.C) { + testRequires(c, DaemonIsLinux) + ensureSyscallTest(c) + + // test that a root user has default capability CAP_SETGID + dockerCmd(c, "run", "syscall-test", "setgid-test") + // test that non root user does not have default capability CAP_SETGID + icmd.RunCommand(dockerBinary, "run", "--user", "1000:1000", "syscall-test", "setgid-test").Assert(c, icmd.Expected{ + ExitCode: 1, + Err: "Operation not permitted", + }) + // test that root user can drop default capability CAP_SETGID + icmd.RunCommand(dockerBinary, "run", "--cap-drop", "setgid", "syscall-test", "setgid-test").Assert(c, icmd.Expected{ + ExitCode: 1, + Err: "Operation not permitted", + }) +} + +// TODO CAP_SETPCAP + +func (s *DockerSuite) TestUserNoEffectiveCapabilitiesNetBindService(c *check.C) { + testRequires(c, DaemonIsLinux) + ensureSyscallTest(c) + + // test that a root user has default capability CAP_NET_BIND_SERVICE + dockerCmd(c, "run", "syscall-test", "socket-test") + // test that non root user does not have default capability CAP_NET_BIND_SERVICE + icmd.RunCommand(dockerBinary, "run", "--user", "1000:1000", "syscall-test", "socket-test").Assert(c, icmd.Expected{ + ExitCode: 1, + Err: "Permission denied", + }) + // test that root user can drop default capability CAP_NET_BIND_SERVICE + icmd.RunCommand(dockerBinary, "run", "--cap-drop", "net_bind_service", "syscall-test", "socket-test").Assert(c, icmd.Expected{ + ExitCode: 1, + Err: "Permission denied", + }) +} + +func (s *DockerSuite) TestUserNoEffectiveCapabilitiesNetRaw(c *check.C) { + testRequires(c, DaemonIsLinux) + ensureSyscallTest(c) + + // test that a root user has default capability CAP_NET_RAW + dockerCmd(c, "run", "syscall-test", "raw-test") + // test that non root user does not have default capability CAP_NET_RAW + icmd.RunCommand(dockerBinary, "run", "--user", "1000:1000", "syscall-test", "raw-test").Assert(c, icmd.Expected{ + ExitCode: 1, + Err: "Operation not permitted", + }) + // test that root user can drop default capability CAP_NET_RAW + icmd.RunCommand(dockerBinary, "run", "--cap-drop", "net_raw", "syscall-test", "raw-test").Assert(c, icmd.Expected{ + ExitCode: 1, + Err: "Operation not permitted", + }) +} + +func (s *DockerSuite) TestUserNoEffectiveCapabilitiesChroot(c *check.C) { + testRequires(c, DaemonIsLinux) + ensureSyscallTest(c) + + // test that a root user has default capability CAP_SYS_CHROOT + dockerCmd(c, "run", "busybox", "chroot", "/", "/bin/true") + // test that non root user does not have default capability CAP_SYS_CHROOT + icmd.RunCommand(dockerBinary, "run", "--user", "1000:1000", "busybox", "chroot", "/", "/bin/true").Assert(c, icmd.Expected{ + ExitCode: 1, + Err: "Operation not permitted", + }) + // test that root user can drop default capability CAP_SYS_CHROOT + icmd.RunCommand(dockerBinary, "run", "--cap-drop", "sys_chroot", "busybox", "chroot", "/", "/bin/true").Assert(c, icmd.Expected{ + ExitCode: 1, + Err: "Operation not permitted", + }) +} + +func (s *DockerSuite) TestUserNoEffectiveCapabilitiesMknod(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace) + ensureSyscallTest(c) + + // test that a root user has default capability CAP_MKNOD + dockerCmd(c, "run", "busybox", "mknod", "/tmp/node", "b", "1", "2") + // test that non root user does not have default capability CAP_MKNOD + // test that root user can drop default capability CAP_SYS_CHROOT + icmd.RunCommand(dockerBinary, "run", "--user", "1000:1000", "busybox", "mknod", "/tmp/node", "b", "1", "2").Assert(c, icmd.Expected{ + ExitCode: 1, + Err: "Operation not permitted", + }) + // test that root user can drop default capability CAP_MKNOD + icmd.RunCommand(dockerBinary, "run", "--cap-drop", "mknod", "busybox", "mknod", "/tmp/node", "b", "1", "2").Assert(c, icmd.Expected{ + ExitCode: 1, + Err: "Operation not permitted", + }) +} + +// TODO CAP_AUDIT_WRITE +// TODO CAP_SETFCAP + +func (s *DockerSuite) TestRunApparmorProcDirectory(c *check.C) { + testRequires(c, SameHostDaemon, Apparmor) + + // running w seccomp unconfined tests the apparmor profile + result := icmd.RunCommand(dockerBinary, "run", "--security-opt", "seccomp=unconfined", "busybox", "chmod", "777", "/proc/1/cgroup") + result.Assert(c, icmd.Expected{ExitCode: 1}) + if !(strings.Contains(result.Combined(), "Permission denied") || strings.Contains(result.Combined(), "Operation not permitted")) { + c.Fatalf("expected chmod 777 /proc/1/cgroup to fail, got %s: %v", result.Combined(), result.Error) + } + + result = icmd.RunCommand(dockerBinary, "run", "--security-opt", "seccomp=unconfined", "busybox", "chmod", "777", "/proc/1/attr/current") + result.Assert(c, icmd.Expected{ExitCode: 1}) + if !(strings.Contains(result.Combined(), "Permission denied") || strings.Contains(result.Combined(), "Operation not permitted")) { + c.Fatalf("expected chmod 777 /proc/1/attr/current to fail, got %s: %v", result.Combined(), result.Error) + } +} + +// make sure the default profile can be successfully parsed (using unshare as it is +// something which we know is blocked in the default profile) +func (s *DockerSuite) TestRunSeccompWithDefaultProfile(c *check.C) { + testRequires(c, SameHostDaemon, seccompEnabled) + + out, _, err := dockerCmdWithError("run", "--security-opt", "seccomp=../profiles/seccomp/default.json", "debian:jessie", "unshare", "--map-root-user", "--user", "sh", "-c", "whoami") + c.Assert(err, checker.NotNil, check.Commentf(out)) + c.Assert(strings.TrimSpace(out), checker.Equals, "unshare: unshare failed: Operation not permitted") +} + +// TestRunDeviceSymlink checks run with device that follows symlink (#13840 and #22271) +func (s *DockerSuite) TestRunDeviceSymlink(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace, NotArm, SameHostDaemon) + if _, err := os.Stat("/dev/zero"); err != nil { + c.Skip("Host does not have /dev/zero") + } + + // Create a temporary directory to create symlink + tmpDir, err := ioutil.TempDir("", "docker_device_follow_symlink_tests") + c.Assert(err, checker.IsNil) + + defer os.RemoveAll(tmpDir) + + // Create a symbolic link to /dev/zero + symZero := filepath.Join(tmpDir, "zero") + err = os.Symlink("/dev/zero", symZero) + c.Assert(err, checker.IsNil) + + // Create a temporary file "temp" inside tmpDir, write some data to "tmpDir/temp", + // then create a symlink "tmpDir/file" to the temporary file "tmpDir/temp". + tmpFile := filepath.Join(tmpDir, "temp") + err = ioutil.WriteFile(tmpFile, []byte("temp"), 0666) + c.Assert(err, checker.IsNil) + symFile := filepath.Join(tmpDir, "file") + err = os.Symlink(tmpFile, symFile) + c.Assert(err, checker.IsNil) + + // Create a symbolic link to /dev/zero, this time with a relative path (#22271) + err = os.Symlink("zero", "/dev/symzero") + if err != nil { + c.Fatal("/dev/symzero creation failed") + } + // We need to remove this symbolic link here as it is created in /dev/, not temporary directory as above + defer os.Remove("/dev/symzero") + + // md5sum of 'dd if=/dev/zero bs=4K count=8' is bb7df04e1b0a2570657527a7e108ae23 + out, _ := dockerCmd(c, "run", "--device", symZero+":/dev/symzero", "busybox", "sh", "-c", "dd if=/dev/symzero bs=4K count=8 | md5sum") + c.Assert(strings.Trim(out, "\r\n"), checker.Contains, "bb7df04e1b0a2570657527a7e108ae23", check.Commentf("expected output bb7df04e1b0a2570657527a7e108ae23")) + + // symlink "tmpDir/file" to a file "tmpDir/temp" will result in an error as it is not a device. + out, _, err = dockerCmdWithError("run", "--device", symFile+":/dev/symzero", "busybox", "sh", "-c", "dd if=/dev/symzero bs=4K count=8 | md5sum") + c.Assert(err, check.NotNil) + c.Assert(strings.Trim(out, "\r\n"), checker.Contains, "not a device node", check.Commentf("expected output 'not a device node'")) + + // md5sum of 'dd if=/dev/zero bs=4K count=8' is bb7df04e1b0a2570657527a7e108ae23 (this time check with relative path backed, see #22271) + out, _ = dockerCmd(c, "run", "--device", "/dev/symzero:/dev/symzero", "busybox", "sh", "-c", "dd if=/dev/symzero bs=4K count=8 | md5sum") + c.Assert(strings.Trim(out, "\r\n"), checker.Contains, "bb7df04e1b0a2570657527a7e108ae23", check.Commentf("expected output bb7df04e1b0a2570657527a7e108ae23")) +} + +// TestRunPIDsLimit makes sure the pids cgroup is set with --pids-limit +func (s *DockerSuite) TestRunPIDsLimit(c *check.C) { + testRequires(c, pidsLimit) + + file := "/sys/fs/cgroup/pids/pids.max" + out, _ := dockerCmd(c, "run", "--name", "skittles", "--pids-limit", "4", "busybox", "cat", file) + c.Assert(strings.TrimSpace(out), checker.Equals, "4") + + out = inspectField(c, "skittles", "HostConfig.PidsLimit") + c.Assert(out, checker.Equals, "4", check.Commentf("setting the pids limit failed")) +} + +func (s *DockerSuite) TestRunPrivilegedAllowedDevices(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace) + + file := "/sys/fs/cgroup/devices/devices.list" + out, _ := dockerCmd(c, "run", "--privileged", "busybox", "cat", file) + c.Logf("out: %q", out) + c.Assert(strings.TrimSpace(out), checker.Equals, "a *:* rwm") +} + +func (s *DockerSuite) TestRunUserDeviceAllowed(c *check.C) { + testRequires(c, DaemonIsLinux) + + fi, err := os.Stat("/dev/snd/timer") + if err != nil { + c.Skip("Host does not have /dev/snd/timer") + } + stat, ok := fi.Sys().(*syscall.Stat_t) + if !ok { + c.Skip("Could not stat /dev/snd/timer") + } + + file := "/sys/fs/cgroup/devices/devices.list" + out, _ := dockerCmd(c, "run", "--device", "/dev/snd/timer:w", "busybox", "cat", file) + c.Assert(out, checker.Contains, fmt.Sprintf("c %d:%d w", stat.Rdev/256, stat.Rdev%256)) +} + +func (s *DockerDaemonSuite) TestRunSeccompJSONNewFormat(c *check.C) { + testRequires(c, SameHostDaemon, seccompEnabled) + + s.d.StartWithBusybox(c) + + jsonData := `{ + "defaultAction": "SCMP_ACT_ALLOW", + "syscalls": [ + { + "names": ["chmod", "fchmod", "fchmodat"], + "action": "SCMP_ACT_ERRNO" + } + ] +}` + tmpFile, err := ioutil.TempFile("", "profile.json") + c.Assert(err, check.IsNil) + defer tmpFile.Close() + _, err = tmpFile.Write([]byte(jsonData)) + c.Assert(err, check.IsNil) + + out, err := s.d.Cmd("run", "--security-opt", "seccomp="+tmpFile.Name(), "busybox", "chmod", "777", ".") + c.Assert(err, check.NotNil) + c.Assert(out, checker.Contains, "Operation not permitted") +} + +func (s *DockerDaemonSuite) TestRunSeccompJSONNoNameAndNames(c *check.C) { + testRequires(c, SameHostDaemon, seccompEnabled) + + s.d.StartWithBusybox(c) + + jsonData := `{ + "defaultAction": "SCMP_ACT_ALLOW", + "syscalls": [ + { + "name": "chmod", + "names": ["fchmod", "fchmodat"], + "action": "SCMP_ACT_ERRNO" + } + ] +}` + tmpFile, err := ioutil.TempFile("", "profile.json") + c.Assert(err, check.IsNil) + defer tmpFile.Close() + _, err = tmpFile.Write([]byte(jsonData)) + c.Assert(err, check.IsNil) + + out, err := s.d.Cmd("run", "--security-opt", "seccomp="+tmpFile.Name(), "busybox", "chmod", "777", ".") + c.Assert(err, check.NotNil) + c.Assert(out, checker.Contains, "'name' and 'names' were specified in the seccomp profile, use either 'name' or 'names'") +} + +func (s *DockerDaemonSuite) TestRunSeccompJSONNoArchAndArchMap(c *check.C) { + testRequires(c, SameHostDaemon, seccompEnabled) + + s.d.StartWithBusybox(c) + + jsonData := `{ + "archMap": [ + { + "architecture": "SCMP_ARCH_X86_64", + "subArchitectures": [ + "SCMP_ARCH_X86", + "SCMP_ARCH_X32" + ] + } + ], + "architectures": [ + "SCMP_ARCH_X32" + ], + "defaultAction": "SCMP_ACT_ALLOW", + "syscalls": [ + { + "names": ["chmod", "fchmod", "fchmodat"], + "action": "SCMP_ACT_ERRNO" + } + ] +}` + tmpFile, err := ioutil.TempFile("", "profile.json") + c.Assert(err, check.IsNil) + defer tmpFile.Close() + _, err = tmpFile.Write([]byte(jsonData)) + c.Assert(err, check.IsNil) + + out, err := s.d.Cmd("run", "--security-opt", "seccomp="+tmpFile.Name(), "busybox", "chmod", "777", ".") + c.Assert(err, check.NotNil) + c.Assert(out, checker.Contains, "'architectures' and 'archMap' were specified in the seccomp profile, use either 'architectures' or 'archMap'") +} + +func (s *DockerDaemonSuite) TestRunWithDaemonDefaultSeccompProfile(c *check.C) { + testRequires(c, SameHostDaemon, seccompEnabled) + + s.d.StartWithBusybox(c) + + // 1) verify I can run containers with the Docker default shipped profile which allows chmod + _, err := s.d.Cmd("run", "busybox", "chmod", "777", ".") + c.Assert(err, check.IsNil) + + jsonData := `{ + "defaultAction": "SCMP_ACT_ALLOW", + "syscalls": [ + { + "name": "chmod", + "action": "SCMP_ACT_ERRNO" + } + ] +}` + tmpFile, err := ioutil.TempFile("", "profile.json") + c.Assert(err, check.IsNil) + defer tmpFile.Close() + _, err = tmpFile.Write([]byte(jsonData)) + c.Assert(err, check.IsNil) + + // 2) restart the daemon and add a custom seccomp profile in which we deny chmod + s.d.Restart(c, "--seccomp-profile="+tmpFile.Name()) + + out, err := s.d.Cmd("run", "busybox", "chmod", "777", ".") + c.Assert(err, check.NotNil) + c.Assert(out, checker.Contains, "Operation not permitted") +} + +func (s *DockerSuite) TestRunWithNanoCPUs(c *check.C) { + testRequires(c, cpuCfsQuota, cpuCfsPeriod) + + file1 := "/sys/fs/cgroup/cpu/cpu.cfs_quota_us" + file2 := "/sys/fs/cgroup/cpu/cpu.cfs_period_us" + out, _ := dockerCmd(c, "run", "--cpus", "0.5", "--name", "test", "busybox", "sh", "-c", fmt.Sprintf("cat %s && cat %s", file1, file2)) + c.Assert(strings.TrimSpace(out), checker.Equals, "50000\n100000") + + out = inspectField(c, "test", "HostConfig.NanoCpus") + c.Assert(out, checker.Equals, "5e+08", check.Commentf("setting the Nano CPUs failed")) + out = inspectField(c, "test", "HostConfig.CpuQuota") + c.Assert(out, checker.Equals, "0", check.Commentf("CPU CFS quota should be 0")) + out = inspectField(c, "test", "HostConfig.CpuPeriod") + c.Assert(out, checker.Equals, "0", check.Commentf("CPU CFS period should be 0")) + + out, _, err := dockerCmdWithError("run", "--cpus", "0.5", "--cpu-quota", "50000", "--cpu-period", "100000", "busybox", "sh") + c.Assert(err, check.NotNil) + c.Assert(out, checker.Contains, "Conflicting options: Nano CPUs and CPU Period cannot both be set") +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_save_load_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_save_load_test.go new file mode 100644 index 000000000..3b14576f7 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_save_load_test.go @@ -0,0 +1,385 @@ +package main + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "reflect" + "regexp" + "sort" + "strings" + "time" + + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/cli/build" + "github.com/docker/docker/pkg/testutil" + icmd "github.com/docker/docker/pkg/testutil/cmd" + "github.com/go-check/check" + "github.com/opencontainers/go-digest" +) + +// save a repo using gz compression and try to load it using stdout +func (s *DockerSuite) TestSaveXzAndLoadRepoStdout(c *check.C) { + testRequires(c, DaemonIsLinux) + name := "test-save-xz-and-load-repo-stdout" + dockerCmd(c, "run", "--name", name, "busybox", "true") + + repoName := "foobar-save-load-test-xz-gz" + out, _ := dockerCmd(c, "commit", name, repoName) + + dockerCmd(c, "inspect", repoName) + + repoTarball, _, err := testutil.RunCommandPipelineWithOutput( + exec.Command(dockerBinary, "save", repoName), + exec.Command("xz", "-c"), + exec.Command("gzip", "-c")) + c.Assert(err, checker.IsNil, check.Commentf("failed to save repo: %v %v", out, err)) + deleteImages(repoName) + + icmd.RunCmd(icmd.Cmd{ + Command: []string{dockerBinary, "load"}, + Stdin: strings.NewReader(repoTarball), + }).Assert(c, icmd.Expected{ + ExitCode: 1, + }) + + after, _, err := dockerCmdWithError("inspect", repoName) + c.Assert(err, checker.NotNil, check.Commentf("the repo should not exist: %v", after)) +} + +// save a repo using xz+gz compression and try to load it using stdout +func (s *DockerSuite) TestSaveXzGzAndLoadRepoStdout(c *check.C) { + testRequires(c, DaemonIsLinux) + name := "test-save-xz-gz-and-load-repo-stdout" + dockerCmd(c, "run", "--name", name, "busybox", "true") + + repoName := "foobar-save-load-test-xz-gz" + dockerCmd(c, "commit", name, repoName) + + dockerCmd(c, "inspect", repoName) + + out, _, err := testutil.RunCommandPipelineWithOutput( + exec.Command(dockerBinary, "save", repoName), + exec.Command("xz", "-c"), + exec.Command("gzip", "-c")) + c.Assert(err, checker.IsNil, check.Commentf("failed to save repo: %v %v", out, err)) + + deleteImages(repoName) + + icmd.RunCmd(icmd.Cmd{ + Command: []string{dockerBinary, "load"}, + Stdin: strings.NewReader(out), + }).Assert(c, icmd.Expected{ + ExitCode: 1, + }) + + after, _, err := dockerCmdWithError("inspect", repoName) + c.Assert(err, checker.NotNil, check.Commentf("the repo should not exist: %v", after)) +} + +func (s *DockerSuite) TestSaveSingleTag(c *check.C) { + testRequires(c, DaemonIsLinux) + repoName := "foobar-save-single-tag-test" + dockerCmd(c, "tag", "busybox:latest", fmt.Sprintf("%v:latest", repoName)) + + out, _ := dockerCmd(c, "images", "-q", "--no-trunc", repoName) + cleanedImageID := strings.TrimSpace(out) + + out, _, err := testutil.RunCommandPipelineWithOutput( + exec.Command(dockerBinary, "save", fmt.Sprintf("%v:latest", repoName)), + exec.Command("tar", "t"), + exec.Command("grep", "-E", fmt.Sprintf("(^repositories$|%v)", cleanedImageID))) + c.Assert(err, checker.IsNil, check.Commentf("failed to save repo with image ID and 'repositories' file: %s, %v", out, err)) +} + +func (s *DockerSuite) TestSaveCheckTimes(c *check.C) { + testRequires(c, DaemonIsLinux) + repoName := "busybox:latest" + out, _ := dockerCmd(c, "inspect", repoName) + data := []struct { + ID string + Created time.Time + }{} + err := json.Unmarshal([]byte(out), &data) + c.Assert(err, checker.IsNil, check.Commentf("failed to marshal from %q: err %v", repoName, err)) + c.Assert(len(data), checker.Not(checker.Equals), 0, check.Commentf("failed to marshal the data from %q", repoName)) + tarTvTimeFormat := "2006-01-02 15:04" + out, _, err = testutil.RunCommandPipelineWithOutput( + exec.Command(dockerBinary, "save", repoName), + exec.Command("tar", "tv"), + exec.Command("grep", "-E", fmt.Sprintf("%s %s", data[0].Created.Format(tarTvTimeFormat), digest.Digest(data[0].ID).Hex()))) + c.Assert(err, checker.IsNil, check.Commentf("failed to save repo with image ID and 'repositories' file: %s, %v", out, err)) +} + +func (s *DockerSuite) TestSaveImageId(c *check.C) { + testRequires(c, DaemonIsLinux) + repoName := "foobar-save-image-id-test" + dockerCmd(c, "tag", "emptyfs:latest", fmt.Sprintf("%v:latest", repoName)) + + out, _ := dockerCmd(c, "images", "-q", "--no-trunc", repoName) + cleanedLongImageID := strings.TrimPrefix(strings.TrimSpace(out), "sha256:") + + out, _ = dockerCmd(c, "images", "-q", repoName) + cleanedShortImageID := strings.TrimSpace(out) + + // Make sure IDs are not empty + c.Assert(cleanedLongImageID, checker.Not(check.Equals), "", check.Commentf("Id should not be empty.")) + c.Assert(cleanedShortImageID, checker.Not(check.Equals), "", check.Commentf("Id should not be empty.")) + + saveCmd := exec.Command(dockerBinary, "save", cleanedShortImageID) + tarCmd := exec.Command("tar", "t") + + var err error + tarCmd.Stdin, err = saveCmd.StdoutPipe() + c.Assert(err, checker.IsNil, check.Commentf("cannot set stdout pipe for tar: %v", err)) + grepCmd := exec.Command("grep", cleanedLongImageID) + grepCmd.Stdin, err = tarCmd.StdoutPipe() + c.Assert(err, checker.IsNil, check.Commentf("cannot set stdout pipe for grep: %v", err)) + + c.Assert(tarCmd.Start(), checker.IsNil, check.Commentf("tar failed with error: %v", err)) + c.Assert(saveCmd.Start(), checker.IsNil, check.Commentf("docker save failed with error: %v", err)) + defer func() { + saveCmd.Wait() + tarCmd.Wait() + dockerCmd(c, "rmi", repoName) + }() + + out, _, err = runCommandWithOutput(grepCmd) + + c.Assert(err, checker.IsNil, check.Commentf("failed to save repo with image ID: %s, %v", out, err)) +} + +// save a repo and try to load it using flags +func (s *DockerSuite) TestSaveAndLoadRepoFlags(c *check.C) { + testRequires(c, DaemonIsLinux) + name := "test-save-and-load-repo-flags" + dockerCmd(c, "run", "--name", name, "busybox", "true") + + repoName := "foobar-save-load-test" + + deleteImages(repoName) + dockerCmd(c, "commit", name, repoName) + + before, _ := dockerCmd(c, "inspect", repoName) + + out, _, err := testutil.RunCommandPipelineWithOutput( + exec.Command(dockerBinary, "save", repoName), + exec.Command(dockerBinary, "load")) + c.Assert(err, checker.IsNil, check.Commentf("failed to save and load repo: %s, %v", out, err)) + + after, _ := dockerCmd(c, "inspect", repoName) + c.Assert(before, checker.Equals, after, check.Commentf("inspect is not the same after a save / load")) +} + +func (s *DockerSuite) TestSaveWithNoExistImage(c *check.C) { + testRequires(c, DaemonIsLinux) + + imgName := "foobar-non-existing-image" + + out, _, err := dockerCmdWithError("save", "-o", "test-img.tar", imgName) + c.Assert(err, checker.NotNil, check.Commentf("save image should fail for non-existing image")) + c.Assert(out, checker.Contains, fmt.Sprintf("No such image: %s", imgName)) +} + +func (s *DockerSuite) TestSaveMultipleNames(c *check.C) { + testRequires(c, DaemonIsLinux) + repoName := "foobar-save-multi-name-test" + + // Make one image + dockerCmd(c, "tag", "emptyfs:latest", fmt.Sprintf("%v-one:latest", repoName)) + + // Make two images + dockerCmd(c, "tag", "emptyfs:latest", fmt.Sprintf("%v-two:latest", repoName)) + + out, _, err := testutil.RunCommandPipelineWithOutput( + exec.Command(dockerBinary, "save", fmt.Sprintf("%v-one", repoName), fmt.Sprintf("%v-two:latest", repoName)), + exec.Command("tar", "xO", "repositories"), + exec.Command("grep", "-q", "-E", "(-one|-two)"), + ) + c.Assert(err, checker.IsNil, check.Commentf("failed to save multiple repos: %s, %v", out, err)) +} + +func (s *DockerSuite) TestSaveRepoWithMultipleImages(c *check.C) { + testRequires(c, DaemonIsLinux) + makeImage := func(from string, tag string) string { + var ( + out string + ) + out, _ = dockerCmd(c, "run", "-d", from, "true") + cleanedContainerID := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "commit", cleanedContainerID, tag) + imageID := strings.TrimSpace(out) + return imageID + } + + repoName := "foobar-save-multi-images-test" + tagFoo := repoName + ":foo" + tagBar := repoName + ":bar" + + idFoo := makeImage("busybox:latest", tagFoo) + idBar := makeImage("busybox:latest", tagBar) + + deleteImages(repoName) + + // create the archive + out, _, err := testutil.RunCommandPipelineWithOutput( + exec.Command(dockerBinary, "save", repoName, "busybox:latest"), + exec.Command("tar", "t")) + c.Assert(err, checker.IsNil, check.Commentf("failed to save multiple images: %s, %v", out, err)) + + lines := strings.Split(strings.TrimSpace(out), "\n") + var actual []string + for _, l := range lines { + if regexp.MustCompile("^[a-f0-9]{64}\\.json$").Match([]byte(l)) { + actual = append(actual, strings.TrimSuffix(l, ".json")) + } + } + + // make the list of expected layers + out = inspectField(c, "busybox:latest", "Id") + expected := []string{strings.TrimSpace(out), idFoo, idBar} + + // prefixes are not in tar + for i := range expected { + expected[i] = digest.Digest(expected[i]).Hex() + } + + sort.Strings(actual) + sort.Strings(expected) + c.Assert(actual, checker.DeepEquals, expected, check.Commentf("archive does not contains the right layers: got %v, expected %v, output: %q", actual, expected, out)) +} + +// Issue #6722 #5892 ensure directories are included in changes +func (s *DockerSuite) TestSaveDirectoryPermissions(c *check.C) { + testRequires(c, DaemonIsLinux) + layerEntries := []string{"opt/", "opt/a/", "opt/a/b/", "opt/a/b/c"} + layerEntriesAUFS := []string{"./", ".wh..wh.aufs", ".wh..wh.orph/", ".wh..wh.plnk/", "opt/", "opt/a/", "opt/a/b/", "opt/a/b/c"} + + name := "save-directory-permissions" + tmpDir, err := ioutil.TempDir("", "save-layers-with-directories") + c.Assert(err, checker.IsNil, check.Commentf("failed to create temporary directory: %s", err)) + extractionDirectory := filepath.Join(tmpDir, "image-extraction-dir") + os.Mkdir(extractionDirectory, 0777) + + defer os.RemoveAll(tmpDir) + buildImageSuccessfully(c, name, build.WithDockerfile(`FROM busybox + RUN adduser -D user && mkdir -p /opt/a/b && chown -R user:user /opt/a + RUN touch /opt/a/b/c && chown user:user /opt/a/b/c`)) + + out, _, err := testutil.RunCommandPipelineWithOutput( + exec.Command(dockerBinary, "save", name), + exec.Command("tar", "-xf", "-", "-C", extractionDirectory), + ) + c.Assert(err, checker.IsNil, check.Commentf("failed to save and extract image: %s", out)) + + dirs, err := ioutil.ReadDir(extractionDirectory) + c.Assert(err, checker.IsNil, check.Commentf("failed to get a listing of the layer directories: %s", err)) + + found := false + for _, entry := range dirs { + var entriesSansDev []string + if entry.IsDir() { + layerPath := filepath.Join(extractionDirectory, entry.Name(), "layer.tar") + + f, err := os.Open(layerPath) + c.Assert(err, checker.IsNil, check.Commentf("failed to open %s: %s", layerPath, err)) + defer f.Close() + + entries, err := testutil.ListTar(f) + for _, e := range entries { + if !strings.Contains(e, "dev/") { + entriesSansDev = append(entriesSansDev, e) + } + } + c.Assert(err, checker.IsNil, check.Commentf("encountered error while listing tar entries: %s", err)) + + if reflect.DeepEqual(entriesSansDev, layerEntries) || reflect.DeepEqual(entriesSansDev, layerEntriesAUFS) { + found = true + break + } + } + } + + c.Assert(found, checker.Equals, true, check.Commentf("failed to find the layer with the right content listing")) + +} + +// Test loading a weird image where one of the layers is of zero size. +// The layer.tar file is actually zero bytes, no padding or anything else. +// See issue: 18170 +func (s *DockerSuite) TestLoadZeroSizeLayer(c *check.C) { + testRequires(c, DaemonIsLinux) + + dockerCmd(c, "load", "-i", "fixtures/load/emptyLayer.tar") +} + +func (s *DockerSuite) TestSaveLoadParents(c *check.C) { + testRequires(c, DaemonIsLinux) + + makeImage := func(from string, addfile string) string { + var ( + out string + ) + out, _ = dockerCmd(c, "run", "-d", from, "touch", addfile) + cleanedContainerID := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "commit", cleanedContainerID) + imageID := strings.TrimSpace(out) + + dockerCmd(c, "rm", "-f", cleanedContainerID) + return imageID + } + + idFoo := makeImage("busybox", "foo") + idBar := makeImage(idFoo, "bar") + + tmpDir, err := ioutil.TempDir("", "save-load-parents") + c.Assert(err, checker.IsNil) + defer os.RemoveAll(tmpDir) + + c.Log("tmpdir", tmpDir) + + outfile := filepath.Join(tmpDir, "out.tar") + + dockerCmd(c, "save", "-o", outfile, idBar, idFoo) + dockerCmd(c, "rmi", idBar) + dockerCmd(c, "load", "-i", outfile) + + inspectOut := inspectField(c, idBar, "Parent") + c.Assert(inspectOut, checker.Equals, idFoo) + + inspectOut = inspectField(c, idFoo, "Parent") + c.Assert(inspectOut, checker.Equals, "") +} + +func (s *DockerSuite) TestSaveLoadNoTag(c *check.C) { + testRequires(c, DaemonIsLinux) + + name := "saveloadnotag" + + buildImageSuccessfully(c, name, build.WithDockerfile("FROM busybox\nENV foo=bar")) + id := inspectField(c, name, "Id") + + // Test to make sure that save w/o name just shows imageID during load + out, _, err := testutil.RunCommandPipelineWithOutput( + exec.Command(dockerBinary, "save", id), + exec.Command(dockerBinary, "load")) + c.Assert(err, checker.IsNil, check.Commentf("failed to save and load repo: %s, %v", out, err)) + + // Should not show 'name' but should show the image ID during the load + c.Assert(out, checker.Not(checker.Contains), "Loaded image: ") + c.Assert(out, checker.Contains, "Loaded image ID:") + c.Assert(out, checker.Contains, id) + + // Test to make sure that save by name shows that name during load + out, _, err = testutil.RunCommandPipelineWithOutput( + exec.Command(dockerBinary, "save", name), + exec.Command(dockerBinary, "load")) + c.Assert(err, checker.IsNil, check.Commentf("failed to save and load repo: %s, %v", out, err)) + c.Assert(out, checker.Contains, "Loaded image: "+name+":latest") + c.Assert(out, checker.Not(checker.Contains), "Loaded image ID:") +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_save_load_unix_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_save_load_unix_test.go new file mode 100644 index 000000000..deb061682 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_save_load_unix_test.go @@ -0,0 +1,107 @@ +// +build !windows + +package main + +import ( + "context" + "fmt" + "io/ioutil" + "os" + "os/exec" + "strings" + "time" + + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/cli/build" + icmd "github.com/docker/docker/pkg/testutil/cmd" + "github.com/go-check/check" + "github.com/kr/pty" +) + +// save a repo and try to load it using stdout +func (s *DockerSuite) TestSaveAndLoadRepoStdout(c *check.C) { + name := "test-save-and-load-repo-stdout" + dockerCmd(c, "run", "--name", name, "busybox", "true") + + repoName := "foobar-save-load-test" + before, _ := dockerCmd(c, "commit", name, repoName) + before = strings.TrimRight(before, "\n") + + tmpFile, err := ioutil.TempFile("", "foobar-save-load-test.tar") + c.Assert(err, check.IsNil) + defer os.Remove(tmpFile.Name()) + + icmd.RunCmd(icmd.Cmd{ + Command: []string{dockerBinary, "save", repoName}, + Stdout: tmpFile, + }).Assert(c, icmd.Success) + + tmpFile, err = os.Open(tmpFile.Name()) + c.Assert(err, check.IsNil) + defer tmpFile.Close() + + deleteImages(repoName) + + icmd.RunCmd(icmd.Cmd{ + Command: []string{dockerBinary, "load"}, + Stdin: tmpFile, + }).Assert(c, icmd.Success) + + after := inspectField(c, repoName, "Id") + after = strings.TrimRight(after, "\n") + + c.Assert(after, check.Equals, before) //inspect is not the same after a save / load + + deleteImages(repoName) + + pty, tty, err := pty.Open() + c.Assert(err, check.IsNil) + cmd := exec.Command(dockerBinary, "save", repoName) + cmd.Stdin = tty + cmd.Stdout = tty + cmd.Stderr = tty + c.Assert(cmd.Start(), check.IsNil) + c.Assert(cmd.Wait(), check.NotNil) //did not break writing to a TTY + + buf := make([]byte, 1024) + + n, err := pty.Read(buf) + c.Assert(err, check.IsNil) //could not read tty output + c.Assert(string(buf[:n]), checker.Contains, "cowardly refusing", check.Commentf("help output is not being yielded")) +} + +func (s *DockerSuite) TestSaveAndLoadWithProgressBar(c *check.C) { + name := "test-load" + buildImageSuccessfully(c, name, build.WithDockerfile(`FROM busybox + RUN touch aa + `)) + + tmptar := name + ".tar" + dockerCmd(c, "save", "-o", tmptar, name) + defer os.Remove(tmptar) + + dockerCmd(c, "rmi", name) + dockerCmd(c, "tag", "busybox", name) + out, _ := dockerCmd(c, "load", "-i", tmptar) + expected := fmt.Sprintf("The image %s:latest already exists, renaming the old one with ID", name) + c.Assert(out, checker.Contains, expected) +} + +// fail because load didn't receive data from stdin +func (s *DockerSuite) TestLoadNoStdinFail(c *check.C) { + pty, tty, err := pty.Open() + c.Assert(err, check.IsNil) + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + cmd := exec.CommandContext(ctx, dockerBinary, "load") + cmd.Stdin = tty + cmd.Stdout = tty + cmd.Stderr = tty + c.Assert(cmd.Run(), check.NotNil) // docker-load should fail + + buf := make([]byte, 1024) + + n, err := pty.Read(buf) + c.Assert(err, check.IsNil) //could not read tty output + c.Assert(string(buf[:n]), checker.Contains, "requested load from stdin, but stdin is empty") +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_search_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_search_test.go new file mode 100644 index 000000000..2c3312d9e --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_search_test.go @@ -0,0 +1,131 @@ +package main + +import ( + "fmt" + "strings" + + "github.com/docker/docker/integration-cli/checker" + "github.com/go-check/check" +) + +// search for repos named "registry" on the central registry +func (s *DockerSuite) TestSearchOnCentralRegistry(c *check.C) { + testRequires(c, Network, DaemonIsLinux) + + out, _ := dockerCmd(c, "search", "busybox") + c.Assert(out, checker.Contains, "Busybox base image.", check.Commentf("couldn't find any repository named (or containing) 'Busybox base image.'")) +} + +func (s *DockerSuite) TestSearchStarsOptionWithWrongParameter(c *check.C) { + out, _, err := dockerCmdWithError("search", "--filter", "stars=a", "busybox") + c.Assert(err, check.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "Invalid filter", check.Commentf("couldn't find the invalid filter warning")) + + out, _, err = dockerCmdWithError("search", "-f", "stars=a", "busybox") + c.Assert(err, check.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "Invalid filter", check.Commentf("couldn't find the invalid filter warning")) + + out, _, err = dockerCmdWithError("search", "-f", "is-automated=a", "busybox") + c.Assert(err, check.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "Invalid filter", check.Commentf("couldn't find the invalid filter warning")) + + out, _, err = dockerCmdWithError("search", "-f", "is-official=a", "busybox") + c.Assert(err, check.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "Invalid filter", check.Commentf("couldn't find the invalid filter warning")) + + // -s --stars deprecated since Docker 1.13 + out, _, err = dockerCmdWithError("search", "--stars=a", "busybox") + c.Assert(err, check.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "invalid syntax", check.Commentf("couldn't find the invalid value warning")) + + // -s --stars deprecated since Docker 1.13 + out, _, err = dockerCmdWithError("search", "-s=-1", "busybox") + c.Assert(err, check.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "invalid syntax", check.Commentf("couldn't find the invalid value warning")) +} + +func (s *DockerSuite) TestSearchCmdOptions(c *check.C) { + testRequires(c, Network, DaemonIsLinux) + + out, _ := dockerCmd(c, "search", "--help") + c.Assert(out, checker.Contains, "Usage:\tdocker search [OPTIONS] TERM") + + outSearchCmd, _ := dockerCmd(c, "search", "busybox") + outSearchCmdNotrunc, _ := dockerCmd(c, "search", "--no-trunc=true", "busybox") + + c.Assert(len(outSearchCmd) > len(outSearchCmdNotrunc), check.Equals, false, check.Commentf("The no-trunc option can't take effect.")) + + outSearchCmdautomated, _ := dockerCmd(c, "search", "--filter", "is-automated=true", "busybox") //The busybox is a busybox base image, not an AUTOMATED image. + outSearchCmdautomatedSlice := strings.Split(outSearchCmdautomated, "\n") + for i := range outSearchCmdautomatedSlice { + c.Assert(strings.HasPrefix(outSearchCmdautomatedSlice[i], "busybox "), check.Equals, false, check.Commentf("The busybox is not an AUTOMATED image: %s", outSearchCmdautomated)) + } + + outSearchCmdNotOfficial, _ := dockerCmd(c, "search", "--filter", "is-official=false", "busybox") //The busybox is a busybox base image, official image. + outSearchCmdNotOfficialSlice := strings.Split(outSearchCmdNotOfficial, "\n") + for i := range outSearchCmdNotOfficialSlice { + c.Assert(strings.HasPrefix(outSearchCmdNotOfficialSlice[i], "busybox "), check.Equals, false, check.Commentf("The busybox is not an OFFICIAL image: %s", outSearchCmdNotOfficial)) + } + + outSearchCmdOfficial, _ := dockerCmd(c, "search", "--filter", "is-official=true", "busybox") //The busybox is a busybox base image, official image. + outSearchCmdOfficialSlice := strings.Split(outSearchCmdOfficial, "\n") + c.Assert(outSearchCmdOfficialSlice, checker.HasLen, 3) // 1 header, 1 line, 1 carriage return + c.Assert(strings.HasPrefix(outSearchCmdOfficialSlice[1], "busybox "), check.Equals, true, check.Commentf("The busybox is an OFFICIAL image: %s", outSearchCmdNotOfficial)) + + outSearchCmdStars, _ := dockerCmd(c, "search", "--filter", "stars=2", "busybox") + c.Assert(strings.Count(outSearchCmdStars, "[OK]") > strings.Count(outSearchCmd, "[OK]"), check.Equals, false, check.Commentf("The quantity of images with stars should be less than that of all images: %s", outSearchCmdStars)) + + dockerCmd(c, "search", "--filter", "is-automated=true", "--filter", "stars=2", "--no-trunc=true", "busybox") + + // --automated deprecated since Docker 1.13 + outSearchCmdautomated1, _ := dockerCmd(c, "search", "--automated=true", "busybox") //The busybox is a busybox base image, not an AUTOMATED image. + outSearchCmdautomatedSlice1 := strings.Split(outSearchCmdautomated1, "\n") + for i := range outSearchCmdautomatedSlice1 { + c.Assert(strings.HasPrefix(outSearchCmdautomatedSlice1[i], "busybox "), check.Equals, false, check.Commentf("The busybox is not an AUTOMATED image: %s", outSearchCmdautomated)) + } + + // -s --stars deprecated since Docker 1.13 + outSearchCmdStars1, _ := dockerCmd(c, "search", "--stars=2", "busybox") + c.Assert(strings.Count(outSearchCmdStars1, "[OK]") > strings.Count(outSearchCmd, "[OK]"), check.Equals, false, check.Commentf("The quantity of images with stars should be less than that of all images: %s", outSearchCmdStars1)) + + // -s --stars deprecated since Docker 1.13 + dockerCmd(c, "search", "--stars=2", "--automated=true", "--no-trunc=true", "busybox") +} + +// search for repos which start with "ubuntu-" on the central registry +func (s *DockerSuite) TestSearchOnCentralRegistryWithDash(c *check.C) { + testRequires(c, Network, DaemonIsLinux) + + dockerCmd(c, "search", "ubuntu-") +} + +// test case for #23055 +func (s *DockerSuite) TestSearchWithLimit(c *check.C) { + testRequires(c, Network, DaemonIsLinux) + + limit := 10 + out, _, err := dockerCmdWithError("search", fmt.Sprintf("--limit=%d", limit), "docker") + c.Assert(err, checker.IsNil) + outSlice := strings.Split(out, "\n") + c.Assert(outSlice, checker.HasLen, limit+2) // 1 header, 1 carriage return + + limit = 50 + out, _, err = dockerCmdWithError("search", fmt.Sprintf("--limit=%d", limit), "docker") + c.Assert(err, checker.IsNil) + outSlice = strings.Split(out, "\n") + c.Assert(outSlice, checker.HasLen, limit+2) // 1 header, 1 carriage return + + limit = 100 + out, _, err = dockerCmdWithError("search", fmt.Sprintf("--limit=%d", limit), "docker") + c.Assert(err, checker.IsNil) + outSlice = strings.Split(out, "\n") + c.Assert(outSlice, checker.HasLen, limit+2) // 1 header, 1 carriage return + + limit = 0 + _, _, err = dockerCmdWithError("search", fmt.Sprintf("--limit=%d", limit), "docker") + c.Assert(err, checker.Not(checker.IsNil)) + + limit = 200 + _, _, err = dockerCmdWithError("search", fmt.Sprintf("--limit=%d", limit), "docker") + c.Assert(err, checker.Not(checker.IsNil)) +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_secret_create_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_secret_create_test.go new file mode 100644 index 000000000..839c3922a --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_secret_create_test.go @@ -0,0 +1,131 @@ +// +build !windows + +package main + +import ( + "io/ioutil" + "os" + "strings" + + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/integration-cli/checker" + "github.com/go-check/check" +) + +func (s *DockerSwarmSuite) TestSecretCreate(c *check.C) { + d := s.AddDaemon(c, true, true) + + testName := "test_secret" + id := d.CreateSecret(c, swarm.SecretSpec{ + Annotations: swarm.Annotations{ + Name: testName, + }, + Data: []byte("TESTINGDATA"), + }) + c.Assert(id, checker.Not(checker.Equals), "", check.Commentf("secrets: %s", id)) + + secret := d.GetSecret(c, id) + c.Assert(secret.Spec.Name, checker.Equals, testName) +} + +func (s *DockerSwarmSuite) TestSecretCreateWithLabels(c *check.C) { + d := s.AddDaemon(c, true, true) + + testName := "test_secret" + id := d.CreateSecret(c, swarm.SecretSpec{ + Annotations: swarm.Annotations{ + Name: testName, + Labels: map[string]string{ + "key1": "value1", + "key2": "value2", + }, + }, + Data: []byte("TESTINGDATA"), + }) + c.Assert(id, checker.Not(checker.Equals), "", check.Commentf("secrets: %s", id)) + + secret := d.GetSecret(c, id) + c.Assert(secret.Spec.Name, checker.Equals, testName) + c.Assert(len(secret.Spec.Labels), checker.Equals, 2) + c.Assert(secret.Spec.Labels["key1"], checker.Equals, "value1") + c.Assert(secret.Spec.Labels["key2"], checker.Equals, "value2") +} + +// Test case for 28884 +func (s *DockerSwarmSuite) TestSecretCreateResolve(c *check.C) { + d := s.AddDaemon(c, true, true) + + name := "test_secret" + id := d.CreateSecret(c, swarm.SecretSpec{ + Annotations: swarm.Annotations{ + Name: name, + }, + Data: []byte("foo"), + }) + c.Assert(id, checker.Not(checker.Equals), "", check.Commentf("secrets: %s", id)) + + fake := d.CreateSecret(c, swarm.SecretSpec{ + Annotations: swarm.Annotations{ + Name: id, + }, + Data: []byte("fake foo"), + }) + c.Assert(fake, checker.Not(checker.Equals), "", check.Commentf("secrets: %s", fake)) + + out, err := d.Cmd("secret", "ls") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, name) + c.Assert(out, checker.Contains, fake) + + out, err = d.Cmd("secret", "rm", id) + c.Assert(out, checker.Contains, id) + + // Fake one will remain + out, err = d.Cmd("secret", "ls") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Not(checker.Contains), name) + c.Assert(out, checker.Contains, fake) + + // Remove based on name prefix of the fake one + // (which is the same as the ID of foo one) should not work + // as search is only done based on: + // - Full ID + // - Full Name + // - Partial ID (prefix) + out, err = d.Cmd("secret", "rm", id[:5]) + c.Assert(out, checker.Not(checker.Contains), id) + out, err = d.Cmd("secret", "ls") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Not(checker.Contains), name) + c.Assert(out, checker.Contains, fake) + + // Remove based on ID prefix of the fake one should succeed + out, err = d.Cmd("secret", "rm", fake[:5]) + c.Assert(out, checker.Contains, fake[:5]) + out, err = d.Cmd("secret", "ls") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Not(checker.Contains), name) + c.Assert(out, checker.Not(checker.Contains), id) + c.Assert(out, checker.Not(checker.Contains), fake) +} + +func (s *DockerSwarmSuite) TestSecretCreateWithFile(c *check.C) { + d := s.AddDaemon(c, true, true) + + testFile, err := ioutil.TempFile("", "secretCreateTest") + c.Assert(err, checker.IsNil, check.Commentf("failed to create temporary file")) + defer os.Remove(testFile.Name()) + + testData := "TESTINGDATA" + _, err = testFile.Write([]byte(testData)) + c.Assert(err, checker.IsNil, check.Commentf("failed to write to temporary file")) + + testName := "test_secret" + out, err := d.Cmd("secret", "create", testName, testFile.Name()) + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "", check.Commentf(out)) + + id := strings.TrimSpace(out) + secret := d.GetSecret(c, id) + c.Assert(secret.Spec.Name, checker.Equals, testName) +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_secret_inspect_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_secret_inspect_test.go new file mode 100644 index 000000000..218463bea --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_secret_inspect_test.go @@ -0,0 +1,68 @@ +// +build !windows + +package main + +import ( + "encoding/json" + + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/integration-cli/checker" + "github.com/go-check/check" +) + +func (s *DockerSwarmSuite) TestSecretInspect(c *check.C) { + d := s.AddDaemon(c, true, true) + + testName := "test_secret" + id := d.CreateSecret(c, swarm.SecretSpec{ + Annotations: swarm.Annotations{ + Name: testName, + }, + Data: []byte("TESTINGDATA"), + }) + c.Assert(id, checker.Not(checker.Equals), "", check.Commentf("secrets: %s", id)) + + secret := d.GetSecret(c, id) + c.Assert(secret.Spec.Name, checker.Equals, testName) + + out, err := d.Cmd("secret", "inspect", testName) + c.Assert(err, checker.IsNil, check.Commentf(out)) + + var secrets []swarm.Secret + c.Assert(json.Unmarshal([]byte(out), &secrets), checker.IsNil) + c.Assert(secrets, checker.HasLen, 1) +} + +func (s *DockerSwarmSuite) TestSecretInspectMultiple(c *check.C) { + d := s.AddDaemon(c, true, true) + + testNames := []string{ + "test0", + "test1", + } + for _, n := range testNames { + id := d.CreateSecret(c, swarm.SecretSpec{ + Annotations: swarm.Annotations{ + Name: n, + }, + Data: []byte("TESTINGDATA"), + }) + c.Assert(id, checker.Not(checker.Equals), "", check.Commentf("secrets: %s", id)) + + secret := d.GetSecret(c, id) + c.Assert(secret.Spec.Name, checker.Equals, n) + + } + + args := []string{ + "secret", + "inspect", + } + args = append(args, testNames...) + out, err := d.Cmd(args...) + c.Assert(err, checker.IsNil, check.Commentf(out)) + + var secrets []swarm.Secret + c.Assert(json.Unmarshal([]byte(out), &secrets), checker.IsNil) + c.Assert(secrets, checker.HasLen, 2) +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_secret_ls_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_secret_ls_test.go new file mode 100644 index 000000000..f3201f7d7 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_secret_ls_test.go @@ -0,0 +1,125 @@ +// +build !windows + +package main + +import ( + "strings" + + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/integration-cli/checker" + "github.com/go-check/check" +) + +func (s *DockerSwarmSuite) TestSecretList(c *check.C) { + d := s.AddDaemon(c, true, true) + + testName0 := "test0" + testName1 := "test1" + + // create secret test0 + id0 := d.CreateSecret(c, swarm.SecretSpec{ + Annotations: swarm.Annotations{ + Name: testName0, + Labels: map[string]string{"type": "test"}, + }, + Data: []byte("TESTINGDATA0"), + }) + c.Assert(id0, checker.Not(checker.Equals), "", check.Commentf("secrets: %s", id0)) + + secret := d.GetSecret(c, id0) + c.Assert(secret.Spec.Name, checker.Equals, testName0) + + // create secret test1 + id1 := d.CreateSecret(c, swarm.SecretSpec{ + Annotations: swarm.Annotations{ + Name: testName1, + Labels: map[string]string{"type": "production"}, + }, + Data: []byte("TESTINGDATA1"), + }) + c.Assert(id1, checker.Not(checker.Equals), "", check.Commentf("secrets: %s", id1)) + + secret = d.GetSecret(c, id1) + c.Assert(secret.Spec.Name, checker.Equals, testName1) + + // test by command `docker secret ls` + out, err := d.Cmd("secret", "ls") + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(strings.TrimSpace(out), checker.Contains, testName0) + c.Assert(strings.TrimSpace(out), checker.Contains, testName1) + + // test filter by name `docker secret ls --filter name=xxx` + args := []string{ + "secret", + "ls", + "--filter", + "name=test0", + } + out, err = d.Cmd(args...) + c.Assert(err, checker.IsNil, check.Commentf(out)) + + c.Assert(strings.TrimSpace(out), checker.Contains, testName0) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), testName1) + + // test filter by id `docker secret ls --filter id=xxx` + args = []string{ + "secret", + "ls", + "--filter", + "id=" + id1, + } + out, err = d.Cmd(args...) + c.Assert(err, checker.IsNil, check.Commentf(out)) + + c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), testName0) + c.Assert(strings.TrimSpace(out), checker.Contains, testName1) + + // test filter by label `docker secret ls --filter label=xxx` + args = []string{ + "secret", + "ls", + "--filter", + "label=type", + } + out, err = d.Cmd(args...) + c.Assert(err, checker.IsNil, check.Commentf(out)) + + c.Assert(strings.TrimSpace(out), checker.Contains, testName0) + c.Assert(strings.TrimSpace(out), checker.Contains, testName1) + + args = []string{ + "secret", + "ls", + "--filter", + "label=type=test", + } + out, err = d.Cmd(args...) + c.Assert(err, checker.IsNil, check.Commentf(out)) + + c.Assert(strings.TrimSpace(out), checker.Contains, testName0) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), testName1) + + args = []string{ + "secret", + "ls", + "--filter", + "label=type=production", + } + out, err = d.Cmd(args...) + c.Assert(err, checker.IsNil, check.Commentf(out)) + + c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), testName0) + c.Assert(strings.TrimSpace(out), checker.Contains, testName1) + + // test invalid filter `docker secret ls --filter noexisttype=xxx` + args = []string{ + "secret", + "ls", + "--filter", + "noexisttype=test0", + } + out, err = d.Cmd(args...) + c.Assert(err, checker.NotNil, check.Commentf(out)) + + c.Assert(strings.TrimSpace(out), checker.Contains, "Error response from daemon: Invalid filter 'noexisttype'") +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_service_create_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_service_create_test.go new file mode 100644 index 000000000..6fc92c237 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_service_create_test.go @@ -0,0 +1,447 @@ +// +build !windows + +package main + +import ( + "encoding/json" + "fmt" + "path/filepath" + "strings" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/mount" + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/integration-cli/checker" + "github.com/go-check/check" +) + +func (s *DockerSwarmSuite) TestServiceCreateMountVolume(c *check.C) { + d := s.AddDaemon(c, true, true) + out, err := d.Cmd("service", "create", "--no-resolve-image", "--detach=true", "--mount", "type=volume,source=foo,target=/foo,volume-nocopy", "busybox", "top") + c.Assert(err, checker.IsNil, check.Commentf(out)) + id := strings.TrimSpace(out) + + var tasks []swarm.Task + waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { + tasks = d.GetServiceTasks(c, id) + return len(tasks) > 0, nil + }, checker.Equals, true) + + task := tasks[0] + waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { + if task.NodeID == "" || task.Status.ContainerStatus.ContainerID == "" { + task = d.GetTask(c, task.ID) + } + return task.NodeID != "" && task.Status.ContainerStatus.ContainerID != "", nil + }, checker.Equals, true) + + // check container mount config + out, err = s.nodeCmd(c, task.NodeID, "inspect", "--format", "{{json .HostConfig.Mounts}}", task.Status.ContainerStatus.ContainerID) + c.Assert(err, checker.IsNil, check.Commentf(out)) + + var mountConfig []mount.Mount + c.Assert(json.Unmarshal([]byte(out), &mountConfig), checker.IsNil) + c.Assert(mountConfig, checker.HasLen, 1) + + c.Assert(mountConfig[0].Source, checker.Equals, "foo") + c.Assert(mountConfig[0].Target, checker.Equals, "/foo") + c.Assert(mountConfig[0].Type, checker.Equals, mount.TypeVolume) + c.Assert(mountConfig[0].VolumeOptions, checker.NotNil) + c.Assert(mountConfig[0].VolumeOptions.NoCopy, checker.True) + + // check container mounts actual + out, err = s.nodeCmd(c, task.NodeID, "inspect", "--format", "{{json .Mounts}}", task.Status.ContainerStatus.ContainerID) + c.Assert(err, checker.IsNil, check.Commentf(out)) + + var mounts []types.MountPoint + c.Assert(json.Unmarshal([]byte(out), &mounts), checker.IsNil) + c.Assert(mounts, checker.HasLen, 1) + + c.Assert(mounts[0].Type, checker.Equals, mount.TypeVolume) + c.Assert(mounts[0].Name, checker.Equals, "foo") + c.Assert(mounts[0].Destination, checker.Equals, "/foo") + c.Assert(mounts[0].RW, checker.Equals, true) +} + +func (s *DockerSwarmSuite) TestServiceCreateWithSecretSimple(c *check.C) { + d := s.AddDaemon(c, true, true) + + serviceName := "test-service-secret" + testName := "test_secret" + id := d.CreateSecret(c, swarm.SecretSpec{ + Annotations: swarm.Annotations{ + Name: testName, + }, + Data: []byte("TESTINGDATA"), + }) + c.Assert(id, checker.Not(checker.Equals), "", check.Commentf("secrets: %s", id)) + + out, err := d.Cmd("service", "create", "--no-resolve-image", "--name", serviceName, "--secret", testName, "busybox", "top") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + out, err = d.Cmd("service", "inspect", "--format", "{{ json .Spec.TaskTemplate.ContainerSpec.Secrets }}", serviceName) + c.Assert(err, checker.IsNil) + + var refs []swarm.SecretReference + c.Assert(json.Unmarshal([]byte(out), &refs), checker.IsNil) + c.Assert(refs, checker.HasLen, 1) + + c.Assert(refs[0].SecretName, checker.Equals, testName) + c.Assert(refs[0].File, checker.Not(checker.IsNil)) + c.Assert(refs[0].File.Name, checker.Equals, testName) + c.Assert(refs[0].File.UID, checker.Equals, "0") + c.Assert(refs[0].File.GID, checker.Equals, "0") + + out, err = d.Cmd("service", "rm", serviceName) + c.Assert(err, checker.IsNil, check.Commentf(out)) + d.DeleteSecret(c, testName) +} + +func (s *DockerSwarmSuite) TestServiceCreateWithSecretSourceTargetPaths(c *check.C) { + d := s.AddDaemon(c, true, true) + + testPaths := map[string]string{ + "app": "/etc/secret", + "test_secret": "test_secret", + "relative_secret": "relative/secret", + "escapes_in_container": "../secret", + } + + var secretFlags []string + + for testName, testTarget := range testPaths { + id := d.CreateSecret(c, swarm.SecretSpec{ + Annotations: swarm.Annotations{ + Name: testName, + }, + Data: []byte("TESTINGDATA " + testName + " " + testTarget), + }) + c.Assert(id, checker.Not(checker.Equals), "", check.Commentf("secrets: %s", id)) + + secretFlags = append(secretFlags, "--secret", fmt.Sprintf("source=%s,target=%s", testName, testTarget)) + } + + serviceName := "svc" + serviceCmd := []string{"service", "create", "--no-resolve-image", "--name", serviceName} + serviceCmd = append(serviceCmd, secretFlags...) + serviceCmd = append(serviceCmd, "busybox", "top") + out, err := d.Cmd(serviceCmd...) + c.Assert(err, checker.IsNil, check.Commentf(out)) + + out, err = d.Cmd("service", "inspect", "--format", "{{ json .Spec.TaskTemplate.ContainerSpec.Secrets }}", serviceName) + c.Assert(err, checker.IsNil) + + var refs []swarm.SecretReference + c.Assert(json.Unmarshal([]byte(out), &refs), checker.IsNil) + c.Assert(refs, checker.HasLen, len(testPaths)) + + var tasks []swarm.Task + waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { + tasks = d.GetServiceTasks(c, serviceName) + return len(tasks) > 0, nil + }, checker.Equals, true) + + task := tasks[0] + waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { + if task.NodeID == "" || task.Status.ContainerStatus.ContainerID == "" { + task = d.GetTask(c, task.ID) + } + return task.NodeID != "" && task.Status.ContainerStatus.ContainerID != "", nil + }, checker.Equals, true) + + for testName, testTarget := range testPaths { + path := testTarget + if !filepath.IsAbs(path) { + path = filepath.Join("/run/secrets", path) + } + out, err := d.Cmd("exec", task.Status.ContainerStatus.ContainerID, "cat", path) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Equals, "TESTINGDATA "+testName+" "+testTarget) + } + + out, err = d.Cmd("service", "rm", serviceName) + c.Assert(err, checker.IsNil, check.Commentf(out)) +} + +func (s *DockerSwarmSuite) TestServiceCreateWithSecretReferencedTwice(c *check.C) { + d := s.AddDaemon(c, true, true) + + id := d.CreateSecret(c, swarm.SecretSpec{ + Annotations: swarm.Annotations{ + Name: "mysecret", + }, + Data: []byte("TESTINGDATA"), + }) + c.Assert(id, checker.Not(checker.Equals), "", check.Commentf("secrets: %s", id)) + + serviceName := "svc" + out, err := d.Cmd("service", "create", "--no-resolve-image", "--name", serviceName, "--secret", "source=mysecret,target=target1", "--secret", "source=mysecret,target=target2", "busybox", "top") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + out, err = d.Cmd("service", "inspect", "--format", "{{ json .Spec.TaskTemplate.ContainerSpec.Secrets }}", serviceName) + c.Assert(err, checker.IsNil) + + var refs []swarm.SecretReference + c.Assert(json.Unmarshal([]byte(out), &refs), checker.IsNil) + c.Assert(refs, checker.HasLen, 2) + + var tasks []swarm.Task + waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { + tasks = d.GetServiceTasks(c, serviceName) + return len(tasks) > 0, nil + }, checker.Equals, true) + + task := tasks[0] + waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { + if task.NodeID == "" || task.Status.ContainerStatus.ContainerID == "" { + task = d.GetTask(c, task.ID) + } + return task.NodeID != "" && task.Status.ContainerStatus.ContainerID != "", nil + }, checker.Equals, true) + + for _, target := range []string{"target1", "target2"} { + c.Assert(err, checker.IsNil, check.Commentf(out)) + path := filepath.Join("/run/secrets", target) + out, err := d.Cmd("exec", task.Status.ContainerStatus.ContainerID, "cat", path) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Equals, "TESTINGDATA") + } + + out, err = d.Cmd("service", "rm", serviceName) + c.Assert(err, checker.IsNil, check.Commentf(out)) +} + +func (s *DockerSwarmSuite) TestServiceCreateWithConfigSimple(c *check.C) { + d := s.AddDaemon(c, true, true) + + serviceName := "test-service-config" + testName := "test_config" + id := d.CreateConfig(c, swarm.ConfigSpec{ + Annotations: swarm.Annotations{ + Name: testName, + }, + Data: []byte("TESTINGDATA"), + }) + c.Assert(id, checker.Not(checker.Equals), "", check.Commentf("configs: %s", id)) + + out, err := d.Cmd("service", "create", "--no-resolve-image", "--name", serviceName, "--config", testName, "busybox", "top") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + out, err = d.Cmd("service", "inspect", "--format", "{{ json .Spec.TaskTemplate.ContainerSpec.Configs }}", serviceName) + c.Assert(err, checker.IsNil) + + var refs []swarm.ConfigReference + c.Assert(json.Unmarshal([]byte(out), &refs), checker.IsNil) + c.Assert(refs, checker.HasLen, 1) + + c.Assert(refs[0].ConfigName, checker.Equals, testName) + c.Assert(refs[0].File, checker.Not(checker.IsNil)) + c.Assert(refs[0].File.Name, checker.Equals, testName) + c.Assert(refs[0].File.UID, checker.Equals, "0") + c.Assert(refs[0].File.GID, checker.Equals, "0") + + out, err = d.Cmd("service", "rm", serviceName) + c.Assert(err, checker.IsNil, check.Commentf(out)) + d.DeleteConfig(c, testName) +} + +func (s *DockerSwarmSuite) TestServiceCreateWithConfigSourceTargetPaths(c *check.C) { + d := s.AddDaemon(c, true, true) + + testPaths := map[string]string{ + "app": "/etc/config", + "test_config": "test_config", + "relative_config": "relative/config", + } + + var configFlags []string + + for testName, testTarget := range testPaths { + id := d.CreateConfig(c, swarm.ConfigSpec{ + Annotations: swarm.Annotations{ + Name: testName, + }, + Data: []byte("TESTINGDATA " + testName + " " + testTarget), + }) + c.Assert(id, checker.Not(checker.Equals), "", check.Commentf("configs: %s", id)) + + configFlags = append(configFlags, "--config", fmt.Sprintf("source=%s,target=%s", testName, testTarget)) + } + + serviceName := "svc" + serviceCmd := []string{"service", "create", "--no-resolve-image", "--name", serviceName} + serviceCmd = append(serviceCmd, configFlags...) + serviceCmd = append(serviceCmd, "busybox", "top") + out, err := d.Cmd(serviceCmd...) + c.Assert(err, checker.IsNil, check.Commentf(out)) + + out, err = d.Cmd("service", "inspect", "--format", "{{ json .Spec.TaskTemplate.ContainerSpec.Configs }}", serviceName) + c.Assert(err, checker.IsNil) + + var refs []swarm.ConfigReference + c.Assert(json.Unmarshal([]byte(out), &refs), checker.IsNil) + c.Assert(refs, checker.HasLen, len(testPaths)) + + var tasks []swarm.Task + waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { + tasks = d.GetServiceTasks(c, serviceName) + return len(tasks) > 0, nil + }, checker.Equals, true) + + task := tasks[0] + waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { + if task.NodeID == "" || task.Status.ContainerStatus.ContainerID == "" { + task = d.GetTask(c, task.ID) + } + return task.NodeID != "" && task.Status.ContainerStatus.ContainerID != "", nil + }, checker.Equals, true) + + for testName, testTarget := range testPaths { + path := testTarget + if !filepath.IsAbs(path) { + path = filepath.Join("/", path) + } + out, err := d.Cmd("exec", task.Status.ContainerStatus.ContainerID, "cat", path) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Equals, "TESTINGDATA "+testName+" "+testTarget) + } + + out, err = d.Cmd("service", "rm", serviceName) + c.Assert(err, checker.IsNil, check.Commentf(out)) +} + +func (s *DockerSwarmSuite) TestServiceCreateWithConfigReferencedTwice(c *check.C) { + d := s.AddDaemon(c, true, true) + + id := d.CreateConfig(c, swarm.ConfigSpec{ + Annotations: swarm.Annotations{ + Name: "myconfig", + }, + Data: []byte("TESTINGDATA"), + }) + c.Assert(id, checker.Not(checker.Equals), "", check.Commentf("configs: %s", id)) + + serviceName := "svc" + out, err := d.Cmd("service", "create", "--no-resolve-image", "--name", serviceName, "--config", "source=myconfig,target=target1", "--config", "source=myconfig,target=target2", "busybox", "top") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + out, err = d.Cmd("service", "inspect", "--format", "{{ json .Spec.TaskTemplate.ContainerSpec.Configs }}", serviceName) + c.Assert(err, checker.IsNil) + + var refs []swarm.ConfigReference + c.Assert(json.Unmarshal([]byte(out), &refs), checker.IsNil) + c.Assert(refs, checker.HasLen, 2) + + var tasks []swarm.Task + waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { + tasks = d.GetServiceTasks(c, serviceName) + return len(tasks) > 0, nil + }, checker.Equals, true) + + task := tasks[0] + waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { + if task.NodeID == "" || task.Status.ContainerStatus.ContainerID == "" { + task = d.GetTask(c, task.ID) + } + return task.NodeID != "" && task.Status.ContainerStatus.ContainerID != "", nil + }, checker.Equals, true) + + for _, target := range []string{"target1", "target2"} { + c.Assert(err, checker.IsNil, check.Commentf(out)) + path := filepath.Join("/", target) + out, err := d.Cmd("exec", task.Status.ContainerStatus.ContainerID, "cat", path) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Equals, "TESTINGDATA") + } + + out, err = d.Cmd("service", "rm", serviceName) + c.Assert(err, checker.IsNil, check.Commentf(out)) +} + +func (s *DockerSwarmSuite) TestServiceCreateMountTmpfs(c *check.C) { + d := s.AddDaemon(c, true, true) + out, err := d.Cmd("service", "create", "--no-resolve-image", "--detach=true", "--mount", "type=tmpfs,target=/foo,tmpfs-size=1MB", "busybox", "sh", "-c", "mount | grep foo; tail -f /dev/null") + c.Assert(err, checker.IsNil, check.Commentf(out)) + id := strings.TrimSpace(out) + + var tasks []swarm.Task + waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { + tasks = d.GetServiceTasks(c, id) + return len(tasks) > 0, nil + }, checker.Equals, true) + + task := tasks[0] + waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { + if task.NodeID == "" || task.Status.ContainerStatus.ContainerID == "" { + task = d.GetTask(c, task.ID) + } + return task.NodeID != "" && task.Status.ContainerStatus.ContainerID != "", nil + }, checker.Equals, true) + + // check container mount config + out, err = s.nodeCmd(c, task.NodeID, "inspect", "--format", "{{json .HostConfig.Mounts}}", task.Status.ContainerStatus.ContainerID) + c.Assert(err, checker.IsNil, check.Commentf(out)) + + var mountConfig []mount.Mount + c.Assert(json.Unmarshal([]byte(out), &mountConfig), checker.IsNil) + c.Assert(mountConfig, checker.HasLen, 1) + + c.Assert(mountConfig[0].Source, checker.Equals, "") + c.Assert(mountConfig[0].Target, checker.Equals, "/foo") + c.Assert(mountConfig[0].Type, checker.Equals, mount.TypeTmpfs) + c.Assert(mountConfig[0].TmpfsOptions, checker.NotNil) + c.Assert(mountConfig[0].TmpfsOptions.SizeBytes, checker.Equals, int64(1048576)) + + // check container mounts actual + out, err = s.nodeCmd(c, task.NodeID, "inspect", "--format", "{{json .Mounts}}", task.Status.ContainerStatus.ContainerID) + c.Assert(err, checker.IsNil, check.Commentf(out)) + + var mounts []types.MountPoint + c.Assert(json.Unmarshal([]byte(out), &mounts), checker.IsNil) + c.Assert(mounts, checker.HasLen, 1) + + c.Assert(mounts[0].Type, checker.Equals, mount.TypeTmpfs) + c.Assert(mounts[0].Name, checker.Equals, "") + c.Assert(mounts[0].Destination, checker.Equals, "/foo") + c.Assert(mounts[0].RW, checker.Equals, true) + + out, err = s.nodeCmd(c, task.NodeID, "logs", task.Status.ContainerStatus.ContainerID) + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(strings.TrimSpace(out), checker.HasPrefix, "tmpfs on /foo type tmpfs") + c.Assert(strings.TrimSpace(out), checker.Contains, "size=1024k") +} + +func (s *DockerSwarmSuite) TestServiceCreateWithNetworkAlias(c *check.C) { + d := s.AddDaemon(c, true, true) + out, err := d.Cmd("network", "create", "--scope=swarm", "test_swarm_br") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + out, err = d.Cmd("service", "create", "--no-resolve-image", "--detach=true", "--network=name=test_swarm_br,alias=srv_alias", "--name=alias_tst_container", "busybox", "top") + c.Assert(err, checker.IsNil, check.Commentf(out)) + id := strings.TrimSpace(out) + + var tasks []swarm.Task + waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { + tasks = d.GetServiceTasks(c, id) + return len(tasks) > 0, nil + }, checker.Equals, true) + + task := tasks[0] + waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { + if task.NodeID == "" || task.Status.ContainerStatus.ContainerID == "" { + task = d.GetTask(c, task.ID) + } + return task.NodeID != "" && task.Status.ContainerStatus.ContainerID != "", nil + }, checker.Equals, true) + + // check container alias config + out, err = s.nodeCmd(c, task.NodeID, "inspect", "--format", "{{json .NetworkSettings.Networks.test_swarm_br.Aliases}}", task.Status.ContainerStatus.ContainerID) + c.Assert(err, checker.IsNil, check.Commentf(out)) + + // Make sure the only alias seen is the container-id + var aliases []string + c.Assert(json.Unmarshal([]byte(out), &aliases), checker.IsNil) + c.Assert(aliases, checker.HasLen, 1) + + c.Assert(task.Status.ContainerStatus.ContainerID, checker.Contains, aliases[0]) +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_service_health_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_service_health_test.go new file mode 100644 index 000000000..789838545 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_service_health_test.go @@ -0,0 +1,134 @@ +// +build !windows + +package main + +import ( + "strconv" + "strings" + + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/daemon/cluster/executor/container" + "github.com/docker/docker/integration-cli/checker" + "github.com/go-check/check" +) + +// start a service, and then make its task unhealthy during running +// finally, unhealthy task should be detected and killed +func (s *DockerSwarmSuite) TestServiceHealthRun(c *check.C) { + testRequires(c, DaemonIsLinux) // busybox doesn't work on Windows + + d := s.AddDaemon(c, true, true) + + // build image with health-check + // note: use `daemon.buildImageWithOut` to build, do not use `buildImage` to build + imageName := "testhealth" + _, _, err := d.BuildImageWithOut(imageName, + `FROM busybox + RUN touch /status + HEALTHCHECK --interval=1s --timeout=1s --retries=1\ + CMD cat /status`, + true) + c.Check(err, check.IsNil) + + serviceName := "healthServiceRun" + out, err := d.Cmd("service", "create", "--no-resolve-image", "--detach=true", "--name", serviceName, imageName, "top") + c.Assert(err, checker.IsNil, check.Commentf(out)) + id := strings.TrimSpace(out) + + var tasks []swarm.Task + waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { + tasks = d.GetServiceTasks(c, id) + return tasks, nil + }, checker.HasLen, 1) + + task := tasks[0] + + // wait for task to start + waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { + task = d.GetTask(c, task.ID) + return task.Status.State, nil + }, checker.Equals, swarm.TaskStateRunning) + containerID := task.Status.ContainerStatus.ContainerID + + // wait for container to be healthy + waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { + out, _ := d.Cmd("inspect", "--format={{.State.Health.Status}}", containerID) + return strings.TrimSpace(out), nil + }, checker.Equals, "healthy") + + // make it fail + d.Cmd("exec", containerID, "rm", "/status") + // wait for container to be unhealthy + waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { + out, _ := d.Cmd("inspect", "--format={{.State.Health.Status}}", containerID) + return strings.TrimSpace(out), nil + }, checker.Equals, "unhealthy") + + // Task should be terminated + waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { + task = d.GetTask(c, task.ID) + return task.Status.State, nil + }, checker.Equals, swarm.TaskStateFailed) + + if !strings.Contains(task.Status.Err, container.ErrContainerUnhealthy.Error()) { + c.Fatal("unhealthy task exits because of other error") + } +} + +// start a service whose task is unhealthy at beginning +// its tasks should be blocked in starting stage, until health check is passed +func (s *DockerSwarmSuite) TestServiceHealthStart(c *check.C) { + testRequires(c, DaemonIsLinux) // busybox doesn't work on Windows + + d := s.AddDaemon(c, true, true) + + // service started from this image won't pass health check + imageName := "testhealth" + _, _, err := d.BuildImageWithOut(imageName, + `FROM busybox + HEALTHCHECK --interval=1s --timeout=1s --retries=1024\ + CMD cat /status`, + true) + c.Check(err, check.IsNil) + + serviceName := "healthServiceStart" + out, err := d.Cmd("service", "create", "--no-resolve-image", "--detach=true", "--name", serviceName, imageName, "top") + c.Assert(err, checker.IsNil, check.Commentf(out)) + id := strings.TrimSpace(out) + + var tasks []swarm.Task + waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { + tasks = d.GetServiceTasks(c, id) + return tasks, nil + }, checker.HasLen, 1) + + task := tasks[0] + + // wait for task to start + waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { + task = d.GetTask(c, task.ID) + return task.Status.State, nil + }, checker.Equals, swarm.TaskStateStarting) + + containerID := task.Status.ContainerStatus.ContainerID + + // wait for health check to work + waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { + out, _ := d.Cmd("inspect", "--format={{.State.Health.FailingStreak}}", containerID) + failingStreak, _ := strconv.Atoi(strings.TrimSpace(out)) + return failingStreak, nil + }, checker.GreaterThan, 0) + + // task should be blocked at starting status + task = d.GetTask(c, task.ID) + c.Assert(task.Status.State, check.Equals, swarm.TaskStateStarting) + + // make it healthy + d.Cmd("exec", containerID, "touch", "/status") + + // Task should be at running status + waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { + task = d.GetTask(c, task.ID) + return task.Status.State, nil + }, checker.Equals, swarm.TaskStateRunning) +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_service_logs_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_service_logs_test.go new file mode 100644 index 000000000..d2ce36def --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_service_logs_test.go @@ -0,0 +1,387 @@ +// +build !windows + +package main + +import ( + "bufio" + "fmt" + "io" + "os/exec" + "strings" + "time" + + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/daemon" + icmd "github.com/docker/docker/pkg/testutil/cmd" + "github.com/go-check/check" +) + +type logMessage struct { + err error + data []byte +} + +func (s *DockerSwarmSuite) TestServiceLogs(c *check.C) { + d := s.AddDaemon(c, true, true) + + // we have multiple services here for detecting the goroutine issue #28915 + services := map[string]string{ + "TestServiceLogs1": "hello1", + "TestServiceLogs2": "hello2", + } + + for name, message := range services { + out, err := d.Cmd("service", "create", "--no-resolve-image", "--name", name, "busybox", + "sh", "-c", fmt.Sprintf("echo %s; tail -f /dev/null", message)) + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") + } + + // make sure task has been deployed. + waitAndAssert(c, defaultReconciliationTimeout, + d.CheckRunningTaskImages, checker.DeepEquals, + map[string]int{"busybox": len(services)}) + + for name, message := range services { + out, err := d.Cmd("service", "logs", name) + c.Assert(err, checker.IsNil) + c.Logf("log for %q: %q", name, out) + c.Assert(out, checker.Contains, message) + } +} + +// countLogLines returns a closure that can be used with waitAndAssert to +// verify that a minimum number of expected container log messages have been +// output. +func countLogLines(d *daemon.Swarm, name string) func(*check.C) (interface{}, check.CommentInterface) { + return func(c *check.C) (interface{}, check.CommentInterface) { + result := icmd.RunCmd(d.Command("service", "logs", "-t", "--raw", name)) + result.Assert(c, icmd.Expected{}) + // if this returns an emptystring, trying to split it later will return + // an array containing emptystring. a valid log line will NEVER be + // emptystring because we ask for the timestamp. + if result.Stdout() == "" { + return 0, check.Commentf("Empty stdout") + } + lines := strings.Split(strings.TrimSpace(result.Stdout()), "\n") + return len(lines), check.Commentf("output, %q", string(result.Stdout())) + } +} + +func (s *DockerSwarmSuite) TestServiceLogsCompleteness(c *check.C) { + d := s.AddDaemon(c, true, true) + + name := "TestServiceLogsCompleteness" + + // make a service that prints 6 lines + out, err := d.Cmd("service", "create", "--no-resolve-image", "--name", name, "busybox", "sh", "-c", "for line in $(seq 0 5); do echo log test $line; done; sleep 100000") + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") + + // make sure task has been deployed. + waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, 1) + // and make sure we have all the log lines + waitAndAssert(c, defaultReconciliationTimeout, countLogLines(d, name), checker.Equals, 6) + + out, err = d.Cmd("service", "logs", name) + c.Assert(err, checker.IsNil) + lines := strings.Split(strings.TrimSpace(out), "\n") + + // i have heard anecdotal reports that logs may come back from the engine + // mis-ordered. if this tests fails, consider the possibility that that + // might be occurring + for i, line := range lines { + c.Assert(line, checker.Contains, fmt.Sprintf("log test %v", i)) + } +} + +func (s *DockerSwarmSuite) TestServiceLogsTail(c *check.C) { + d := s.AddDaemon(c, true, true) + + name := "TestServiceLogsTail" + + // make a service that prints 6 lines + out, err := d.Cmd("service", "create", "--no-resolve-image", "--name", name, "busybox", "sh", "-c", "for line in $(seq 1 6); do echo log test $line; done; sleep 100000") + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") + + // make sure task has been deployed. + waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, 1) + waitAndAssert(c, defaultReconciliationTimeout, countLogLines(d, name), checker.Equals, 6) + + out, err = d.Cmd("service", "logs", "--tail=2", name) + c.Assert(err, checker.IsNil) + lines := strings.Split(strings.TrimSpace(out), "\n") + + for i, line := range lines { + // doing i+5 is hacky but not too fragile, it's good enough. if it flakes something else is wrong + c.Assert(line, checker.Contains, fmt.Sprintf("log test %v", i+5)) + } +} + +func (s *DockerSwarmSuite) TestServiceLogsSince(c *check.C) { + // See DockerSuite.TestLogsSince, which is where this comes from + d := s.AddDaemon(c, true, true) + + name := "TestServiceLogsSince" + + out, err := d.Cmd("service", "create", "--no-resolve-image", "--name", name, "busybox", "sh", "-c", "for i in $(seq 1 3); do sleep .1; echo log$i; done; sleep 10000000") + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") + waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, 1) + // wait a sec for the logs to come in + waitAndAssert(c, defaultReconciliationTimeout, countLogLines(d, name), checker.Equals, 3) + + out, err = d.Cmd("service", "logs", "-t", name) + c.Assert(err, checker.IsNil) + + log2Line := strings.Split(strings.Split(out, "\n")[1], " ") + t, err := time.Parse(time.RFC3339Nano, log2Line[0]) // timestamp log2 is written + c.Assert(err, checker.IsNil) + u := t.Add(50 * time.Millisecond) // add .05s so log1 & log2 don't show up + since := u.Format(time.RFC3339Nano) + + out, err = d.Cmd("service", "logs", "-t", fmt.Sprintf("--since=%v", since), name) + c.Assert(err, checker.IsNil) + + unexpected := []string{"log1", "log2"} + expected := []string{"log3"} + for _, v := range unexpected { + c.Assert(out, checker.Not(checker.Contains), v, check.Commentf("unexpected log message returned, since=%v", u)) + } + for _, v := range expected { + c.Assert(out, checker.Contains, v, check.Commentf("expected log message %v, was not present, since=%v", u)) + } +} + +func (s *DockerSwarmSuite) TestServiceLogsFollow(c *check.C) { + d := s.AddDaemon(c, true, true) + + name := "TestServiceLogsFollow" + + out, err := d.Cmd("service", "create", "--no-resolve-image", "--name", name, "busybox", "sh", "-c", "while true; do echo log test; sleep 0.1; done") + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") + + // make sure task has been deployed. + waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, 1) + + args := []string{"service", "logs", "-f", name} + cmd := exec.Command(dockerBinary, d.PrependHostArg(args)...) + r, w := io.Pipe() + cmd.Stdout = w + cmd.Stderr = w + c.Assert(cmd.Start(), checker.IsNil) + + // Make sure pipe is written to + ch := make(chan *logMessage) + done := make(chan struct{}) + go func() { + reader := bufio.NewReader(r) + for { + msg := &logMessage{} + msg.data, _, msg.err = reader.ReadLine() + select { + case ch <- msg: + case <-done: + return + } + } + }() + + for i := 0; i < 3; i++ { + msg := <-ch + c.Assert(msg.err, checker.IsNil) + c.Assert(string(msg.data), checker.Contains, "log test") + } + close(done) + + c.Assert(cmd.Process.Kill(), checker.IsNil) +} + +func (s *DockerSwarmSuite) TestServiceLogsTaskLogs(c *check.C) { + d := s.AddDaemon(c, true, true) + + name := "TestServicelogsTaskLogs" + replicas := 2 + + result := icmd.RunCmd(d.Command( + // create a service with the name + "service", "create", "--no-resolve-image", "--name", name, + // which has some number of replicas + fmt.Sprintf("--replicas=%v", replicas), + // which has this the task id as an environment variable templated in + "--env", "TASK={{.Task.ID}}", + // and runs this command to print exactly 6 logs lines + "busybox", "sh", "-c", "for line in $(seq 0 5); do echo $TASK log test $line; done; sleep 100000", + )) + result.Assert(c, icmd.Expected{}) + // ^^ verify that we get no error + // then verify that we have an id in stdout + id := strings.TrimSpace(result.Stdout()) + c.Assert(id, checker.Not(checker.Equals), "") + // so, right here, we're basically inspecting by id and returning only + // the ID. if they don't match, the service doesn't exist. + result = icmd.RunCmd(d.Command("service", "inspect", "--format=\"{{.ID}}\"", id)) + result.Assert(c, icmd.Expected{Out: id}) + + // make sure task has been deployed. + waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, replicas) + waitAndAssert(c, defaultReconciliationTimeout, countLogLines(d, name), checker.Equals, 6*replicas) + + // get the task ids + result = icmd.RunCmd(d.Command("service", "ps", "-q", name)) + result.Assert(c, icmd.Expected{}) + // make sure we have two tasks + taskIDs := strings.Split(strings.TrimSpace(result.Stdout()), "\n") + c.Assert(taskIDs, checker.HasLen, replicas) + + for _, taskID := range taskIDs { + c.Logf("checking task %v", taskID) + result := icmd.RunCmd(d.Command("service", "logs", taskID)) + result.Assert(c, icmd.Expected{}) + lines := strings.Split(strings.TrimSpace(result.Stdout()), "\n") + + c.Logf("checking messages for %v", taskID) + for i, line := range lines { + // make sure the message is in order + c.Assert(line, checker.Contains, fmt.Sprintf("log test %v", i)) + // make sure it contains the task id + c.Assert(line, checker.Contains, taskID) + } + } +} + +func (s *DockerSwarmSuite) TestServiceLogsTTY(c *check.C) { + d := s.AddDaemon(c, true, true) + + name := "TestServiceLogsTTY" + + result := icmd.RunCmd(d.Command( + // create a service + "service", "create", "--no-resolve-image", + // name it $name + "--name", name, + // use a TTY + "-t", + // busybox image, shell string + "busybox", "sh", "-c", + // echo to stdout and stderr + "echo out; (echo err 1>&2); sleep 10000", + )) + + result.Assert(c, icmd.Expected{}) + id := strings.TrimSpace(result.Stdout()) + c.Assert(id, checker.Not(checker.Equals), "") + // so, right here, we're basically inspecting by id and returning only + // the ID. if they don't match, the service doesn't exist. + result = icmd.RunCmd(d.Command("service", "inspect", "--format=\"{{.ID}}\"", id)) + result.Assert(c, icmd.Expected{Out: id}) + + // make sure task has been deployed. + waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, 1) + // and make sure we have all the log lines + waitAndAssert(c, defaultReconciliationTimeout, countLogLines(d, name), checker.Equals, 2) + + cmd := d.Command("service", "logs", "--raw", name) + result = icmd.RunCmd(cmd) + // for some reason there is carriage return in the output. i think this is + // just expected. + c.Assert(result, icmd.Matches, icmd.Expected{Out: "out\r\nerr\r\n"}) +} + +func (s *DockerSwarmSuite) TestServiceLogsNoHangDeletedContainer(c *check.C) { + d := s.AddDaemon(c, true, true) + + name := "TestServiceLogsNoHangDeletedContainer" + + result := icmd.RunCmd(d.Command( + // create a service + "service", "create", "--no-resolve-image", + // name it $name + "--name", name, + // busybox image, shell string + "busybox", "sh", "-c", + // echo to stdout and stderr + "while true; do echo line; sleep 2; done", + )) + + // confirm that the command succeeded + c.Assert(result, icmd.Matches, icmd.Expected{}) + // get the service id + id := strings.TrimSpace(result.Stdout()) + c.Assert(id, checker.Not(checker.Equals), "") + + // make sure task has been deployed. + waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, 1) + // and make sure we have all the log lines + waitAndAssert(c, defaultReconciliationTimeout, countLogLines(d, name), checker.Equals, 2) + + // now find and nuke the container + result = icmd.RunCmd(d.Command("ps", "-q")) + containerID := strings.TrimSpace(result.Stdout()) + c.Assert(containerID, checker.Not(checker.Equals), "") + result = icmd.RunCmd(d.Command("stop", containerID)) + c.Assert(result, icmd.Matches, icmd.Expected{Out: containerID}) + result = icmd.RunCmd(d.Command("rm", containerID)) + c.Assert(result, icmd.Matches, icmd.Expected{Out: containerID}) + + // run logs. use tail 2 to make sure we don't try to get a bunch of logs + // somehow and slow down execution time + cmd := d.Command("service", "logs", "--tail", "2", id) + // start the command and then wait for it to finish with a 3 second timeout + result = icmd.StartCmd(cmd) + result = icmd.WaitOnCmd(3*time.Second, result) + + // then, assert that the result matches expected. if the command timed out, + // if the command is timed out, result.Timeout will be true, but the + // Expected defaults to false + c.Assert(result, icmd.Matches, icmd.Expected{}) +} + +func (s *DockerSwarmSuite) TestServiceLogsDetails(c *check.C) { + d := s.AddDaemon(c, true, true) + + name := "TestServiceLogsDetails" + + result := icmd.RunCmd(d.Command( + // create a service + "service", "create", "--no-resolve-image", + // name it $name + "--name", name, + // add an environment variable + "--env", "asdf=test1", + // add a log driver (without explicitly setting a driver, log-opt doesn't work) + "--log-driver", "json-file", + // add a log option to print the environment variable + "--log-opt", "env=asdf", + // busybox image, shell string + "busybox", "sh", "-c", + // make a log line + "echo LogLine; while true; do sleep 1; done;", + )) + + result.Assert(c, icmd.Expected{}) + id := strings.TrimSpace(result.Stdout()) + c.Assert(id, checker.Not(checker.Equals), "") + + // make sure task has been deployed + waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, 1) + // and make sure we have all the log lines + waitAndAssert(c, defaultReconciliationTimeout, countLogLines(d, name), checker.Equals, 1) + + // First, test without pretty printing + // call service logs with details. set raw to skip pretty printing + result = icmd.RunCmd(d.Command("service", "logs", "--raw", "--details", name)) + // in this case, we should get details and we should get log message, but + // there will also be context as details (which will fall after the detail + // we inserted in alphabetical order + c.Assert(result, icmd.Matches, icmd.Expected{Out: "asdf=test1"}) + c.Assert(result, icmd.Matches, icmd.Expected{Out: "LogLine"}) + + // call service logs with details. this time, don't pass raw + result = icmd.RunCmd(d.Command("service", "logs", "--details", id)) + // in this case, we should get details space logmessage as well. the context + // is part of the pretty part of the logline + c.Assert(result, icmd.Matches, icmd.Expected{Out: "asdf=test1 LogLine"}) +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_service_scale_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_service_scale_test.go new file mode 100644 index 000000000..8fb84fed8 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_service_scale_test.go @@ -0,0 +1,57 @@ +// +build !windows + +package main + +import ( + "fmt" + "strings" + + "github.com/docker/docker/integration-cli/checker" + "github.com/go-check/check" +) + +func (s *DockerSwarmSuite) TestServiceScale(c *check.C) { + d := s.AddDaemon(c, true, true) + + service1Name := "TestService1" + service1Args := append([]string{"service", "create", "--no-resolve-image", "--name", service1Name, defaultSleepImage}, sleepCommandForDaemonPlatform()...) + + // global mode + service2Name := "TestService2" + service2Args := append([]string{"service", "create", "--no-resolve-image", "--name", service2Name, "--mode=global", defaultSleepImage}, sleepCommandForDaemonPlatform()...) + + // Create services + out, err := d.Cmd(service1Args...) + c.Assert(err, checker.IsNil) + + out, err = d.Cmd(service2Args...) + c.Assert(err, checker.IsNil) + + out, err = d.Cmd("service", "scale", "TestService1=2") + c.Assert(err, checker.IsNil) + + out, err = d.Cmd("service", "scale", "TestService1=foobar") + c.Assert(err, checker.NotNil) + + str := fmt.Sprintf("%s: invalid replicas value %s", service1Name, "foobar") + if !strings.Contains(out, str) { + c.Errorf("got: %s, expected has sub string: %s", out, str) + } + + out, err = d.Cmd("service", "scale", "TestService1=-1") + c.Assert(err, checker.NotNil) + + str = fmt.Sprintf("%s: invalid replicas value %s", service1Name, "-1") + if !strings.Contains(out, str) { + c.Errorf("got: %s, expected has sub string: %s", out, str) + } + + // TestService2 is a global mode + out, err = d.Cmd("service", "scale", "TestService2=2") + c.Assert(err, checker.NotNil) + + str = fmt.Sprintf("%s: scale can only be used with replicated mode\n", service2Name) + if out != str { + c.Errorf("got: %s, expected: %s", out, str) + } +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_service_update_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_service_update_test.go new file mode 100644 index 000000000..086ae773e --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_service_update_test.go @@ -0,0 +1,172 @@ +// +build !windows + +package main + +import ( + "encoding/json" + "fmt" + + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/integration-cli/checker" + "github.com/go-check/check" +) + +func (s *DockerSwarmSuite) TestServiceUpdatePort(c *check.C) { + d := s.AddDaemon(c, true, true) + + serviceName := "TestServiceUpdatePort" + serviceArgs := append([]string{"service", "create", "--no-resolve-image", "--name", serviceName, "-p", "8080:8081", defaultSleepImage}, sleepCommandForDaemonPlatform()...) + + // Create a service with a port mapping of 8080:8081. + out, err := d.Cmd(serviceArgs...) + c.Assert(err, checker.IsNil) + waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, 1) + + // Update the service: changed the port mapping from 8080:8081 to 8082:8083. + _, err = d.Cmd("service", "update", "--publish-add", "8082:8083", "--publish-rm", "8081", serviceName) + c.Assert(err, checker.IsNil) + + // Inspect the service and verify port mapping + expected := []swarm.PortConfig{ + { + Protocol: "tcp", + PublishedPort: 8082, + TargetPort: 8083, + PublishMode: "ingress", + }, + } + + out, err = d.Cmd("service", "inspect", "--format", "{{ json .Spec.EndpointSpec.Ports }}", serviceName) + c.Assert(err, checker.IsNil) + + var portConfig []swarm.PortConfig + if err := json.Unmarshal([]byte(out), &portConfig); err != nil { + c.Fatalf("invalid JSON in inspect result: %v (%s)", err, out) + } + c.Assert(portConfig, checker.DeepEquals, expected) +} + +func (s *DockerSwarmSuite) TestServiceUpdateLabel(c *check.C) { + d := s.AddDaemon(c, true, true) + out, err := d.Cmd("service", "create", "--no-resolve-image", "--name=test", "busybox", "top") + c.Assert(err, checker.IsNil, check.Commentf(out)) + service := d.GetService(c, "test") + c.Assert(service.Spec.Labels, checker.HasLen, 0) + + // add label to empty set + out, err = d.Cmd("service", "update", "test", "--label-add", "foo=bar") + c.Assert(err, checker.IsNil, check.Commentf(out)) + service = d.GetService(c, "test") + c.Assert(service.Spec.Labels, checker.HasLen, 1) + c.Assert(service.Spec.Labels["foo"], checker.Equals, "bar") + + // add label to non-empty set + out, err = d.Cmd("service", "update", "test", "--label-add", "foo2=bar") + c.Assert(err, checker.IsNil, check.Commentf(out)) + service = d.GetService(c, "test") + c.Assert(service.Spec.Labels, checker.HasLen, 2) + c.Assert(service.Spec.Labels["foo2"], checker.Equals, "bar") + + out, err = d.Cmd("service", "update", "test", "--label-rm", "foo2") + c.Assert(err, checker.IsNil, check.Commentf(out)) + service = d.GetService(c, "test") + c.Assert(service.Spec.Labels, checker.HasLen, 1) + c.Assert(service.Spec.Labels["foo2"], checker.Equals, "") + + out, err = d.Cmd("service", "update", "test", "--label-rm", "foo") + c.Assert(err, checker.IsNil, check.Commentf(out)) + service = d.GetService(c, "test") + c.Assert(service.Spec.Labels, checker.HasLen, 0) + c.Assert(service.Spec.Labels["foo"], checker.Equals, "") + + // now make sure we can add again + out, err = d.Cmd("service", "update", "test", "--label-add", "foo=bar") + c.Assert(err, checker.IsNil, check.Commentf(out)) + service = d.GetService(c, "test") + c.Assert(service.Spec.Labels, checker.HasLen, 1) + c.Assert(service.Spec.Labels["foo"], checker.Equals, "bar") +} + +func (s *DockerSwarmSuite) TestServiceUpdateSecrets(c *check.C) { + d := s.AddDaemon(c, true, true) + testName := "test_secret" + id := d.CreateSecret(c, swarm.SecretSpec{ + Annotations: swarm.Annotations{ + Name: testName, + }, + Data: []byte("TESTINGDATA"), + }) + c.Assert(id, checker.Not(checker.Equals), "", check.Commentf("secrets: %s", id)) + testTarget := "testing" + serviceName := "test" + + out, err := d.Cmd("service", "create", "--no-resolve-image", "--name", serviceName, "busybox", "top") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + // add secret + out, err = d.CmdRetryOutOfSequence("service", "update", "test", "--secret-add", fmt.Sprintf("source=%s,target=%s", testName, testTarget)) + c.Assert(err, checker.IsNil, check.Commentf(out)) + + out, err = d.Cmd("service", "inspect", "--format", "{{ json .Spec.TaskTemplate.ContainerSpec.Secrets }}", serviceName) + c.Assert(err, checker.IsNil) + + var refs []swarm.SecretReference + c.Assert(json.Unmarshal([]byte(out), &refs), checker.IsNil) + c.Assert(refs, checker.HasLen, 1) + + c.Assert(refs[0].SecretName, checker.Equals, testName) + c.Assert(refs[0].File, checker.Not(checker.IsNil)) + c.Assert(refs[0].File.Name, checker.Equals, testTarget) + + // remove + out, err = d.CmdRetryOutOfSequence("service", "update", "test", "--secret-rm", testName) + c.Assert(err, checker.IsNil, check.Commentf(out)) + + out, err = d.Cmd("service", "inspect", "--format", "{{ json .Spec.TaskTemplate.ContainerSpec.Secrets }}", serviceName) + c.Assert(err, checker.IsNil) + + c.Assert(json.Unmarshal([]byte(out), &refs), checker.IsNil) + c.Assert(refs, checker.HasLen, 0) +} + +func (s *DockerSwarmSuite) TestServiceUpdateConfigs(c *check.C) { + d := s.AddDaemon(c, true, true) + testName := "test_config" + id := d.CreateConfig(c, swarm.ConfigSpec{ + Annotations: swarm.Annotations{ + Name: testName, + }, + Data: []byte("TESTINGDATA"), + }) + c.Assert(id, checker.Not(checker.Equals), "", check.Commentf("configs: %s", id)) + testTarget := "/testing" + serviceName := "test" + + out, err := d.Cmd("service", "create", "--no-resolve-image", "--name", serviceName, "busybox", "top") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + // add config + out, err = d.CmdRetryOutOfSequence("service", "update", "test", "--config-add", fmt.Sprintf("source=%s,target=%s", testName, testTarget)) + c.Assert(err, checker.IsNil, check.Commentf(out)) + + out, err = d.Cmd("service", "inspect", "--format", "{{ json .Spec.TaskTemplate.ContainerSpec.Configs }}", serviceName) + c.Assert(err, checker.IsNil) + + var refs []swarm.ConfigReference + c.Assert(json.Unmarshal([]byte(out), &refs), checker.IsNil) + c.Assert(refs, checker.HasLen, 1) + + c.Assert(refs[0].ConfigName, checker.Equals, testName) + c.Assert(refs[0].File, checker.Not(checker.IsNil)) + c.Assert(refs[0].File.Name, checker.Equals, testTarget) + + // remove + out, err = d.CmdRetryOutOfSequence("service", "update", "test", "--config-rm", testName) + c.Assert(err, checker.IsNil, check.Commentf(out)) + + out, err = d.Cmd("service", "inspect", "--format", "{{ json .Spec.TaskTemplate.ContainerSpec.Configs }}", serviceName) + c.Assert(err, checker.IsNil) + + c.Assert(json.Unmarshal([]byte(out), &refs), checker.IsNil) + c.Assert(refs, checker.HasLen, 0) +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_sni_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_sni_test.go new file mode 100644 index 000000000..fb896d52d --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_sni_test.go @@ -0,0 +1,44 @@ +package main + +import ( + "fmt" + "io/ioutil" + "log" + "net/http" + "net/http/httptest" + "net/url" + "os/exec" + "strings" + + "github.com/go-check/check" +) + +func (s *DockerSuite) TestClientSetsTLSServerName(c *check.C) { + c.Skip("Flakey test") + // there may be more than one hit to the server for each registry request + serverNameReceived := []string{} + var serverName string + + virtualHostServer := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + serverNameReceived = append(serverNameReceived, r.TLS.ServerName) + })) + defer virtualHostServer.Close() + // discard TLS handshake errors written by default to os.Stderr + virtualHostServer.Config.ErrorLog = log.New(ioutil.Discard, "", 0) + + u, err := url.Parse(virtualHostServer.URL) + c.Assert(err, check.IsNil) + hostPort := u.Host + serverName = strings.Split(hostPort, ":")[0] + + repoName := fmt.Sprintf("%v/dockercli/image:latest", hostPort) + cmd := exec.Command(dockerBinary, "pull", repoName) + cmd.Run() + + // check that the fake server was hit at least once + c.Assert(len(serverNameReceived) > 0, check.Equals, true) + // check that for each hit the right server name was received + for _, item := range serverNameReceived { + c.Check(item, check.Equals, serverName) + } +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_stack_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_stack_test.go new file mode 100644 index 000000000..91fe4d75c --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_stack_test.go @@ -0,0 +1,206 @@ +// +build !windows + +package main + +import ( + "encoding/json" + "io/ioutil" + "os" + "sort" + "strings" + + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/integration-cli/checker" + icmd "github.com/docker/docker/pkg/testutil/cmd" + "github.com/go-check/check" +) + +var cleanSpaces = func(s string) string { + lines := strings.Split(s, "\n") + for i, line := range lines { + spaceIx := strings.Index(line, " ") + if spaceIx > 0 { + lines[i] = line[:spaceIx+1] + strings.TrimLeft(line[spaceIx:], " ") + } + } + return strings.Join(lines, "\n") +} + +func (s *DockerSwarmSuite) TestStackRemoveUnknown(c *check.C) { + d := s.AddDaemon(c, true, true) + + stackArgs := append([]string{"stack", "remove", "UNKNOWN_STACK"}) + + out, err := d.Cmd(stackArgs...) + c.Assert(err, checker.IsNil) + c.Assert(out, check.Equals, "Nothing found in stack: UNKNOWN_STACK\n") +} + +func (s *DockerSwarmSuite) TestStackPSUnknown(c *check.C) { + d := s.AddDaemon(c, true, true) + + stackArgs := append([]string{"stack", "ps", "UNKNOWN_STACK"}) + + out, err := d.Cmd(stackArgs...) + c.Assert(err, checker.IsNil) + c.Assert(out, check.Equals, "Nothing found in stack: UNKNOWN_STACK\n") +} + +func (s *DockerSwarmSuite) TestStackServicesUnknown(c *check.C) { + d := s.AddDaemon(c, true, true) + + stackArgs := append([]string{"stack", "services", "UNKNOWN_STACK"}) + + out, err := d.Cmd(stackArgs...) + c.Assert(err, checker.IsNil) + c.Assert(out, check.Equals, "Nothing found in stack: UNKNOWN_STACK\n") +} + +func (s *DockerSwarmSuite) TestStackDeployComposeFile(c *check.C) { + d := s.AddDaemon(c, true, true) + + testStackName := "testdeploy" + stackArgs := []string{ + "stack", "deploy", + "--compose-file", "fixtures/deploy/default.yaml", + testStackName, + } + out, err := d.Cmd(stackArgs...) + c.Assert(err, checker.IsNil, check.Commentf(out)) + + out, err = d.Cmd("stack", "ls") + c.Assert(err, checker.IsNil) + c.Assert(cleanSpaces(out), check.Equals, "NAME SERVICES\n"+"testdeploy 2\n") + + out, err = d.Cmd("stack", "rm", testStackName) + c.Assert(err, checker.IsNil) + out, err = d.Cmd("stack", "ls") + c.Assert(err, checker.IsNil) + c.Assert(cleanSpaces(out), check.Equals, "NAME SERVICES\n") +} + +func (s *DockerSwarmSuite) TestStackDeployWithSecretsTwice(c *check.C) { + d := s.AddDaemon(c, true, true) + + out, err := d.Cmd("secret", "create", "outside", "fixtures/secrets/default") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + testStackName := "testdeploy" + stackArgs := []string{ + "stack", "deploy", + "--compose-file", "fixtures/deploy/secrets.yaml", + testStackName, + } + out, err = d.Cmd(stackArgs...) + c.Assert(err, checker.IsNil, check.Commentf(out)) + + out, err = d.Cmd("service", "inspect", "--format", "{{ json .Spec.TaskTemplate.ContainerSpec.Secrets }}", "testdeploy_web") + c.Assert(err, checker.IsNil) + + var refs []swarm.SecretReference + c.Assert(json.Unmarshal([]byte(out), &refs), checker.IsNil) + c.Assert(refs, checker.HasLen, 3) + + sort.Sort(sortSecrets(refs)) + c.Assert(refs[0].SecretName, checker.Equals, "outside") + c.Assert(refs[1].SecretName, checker.Equals, "testdeploy_special") + c.Assert(refs[1].File.Name, checker.Equals, "special") + c.Assert(refs[2].SecretName, checker.Equals, "testdeploy_super") + c.Assert(refs[2].File.Name, checker.Equals, "foo.txt") + c.Assert(refs[2].File.Mode, checker.Equals, os.FileMode(0400)) + + // Deploy again to ensure there are no errors when secret hasn't changed + out, err = d.Cmd(stackArgs...) + c.Assert(err, checker.IsNil, check.Commentf(out)) +} + +func (s *DockerSwarmSuite) TestStackRemove(c *check.C) { + d := s.AddDaemon(c, true, true) + + stackName := "testdeploy" + stackArgs := []string{ + "stack", "deploy", + "--compose-file", "fixtures/deploy/remove.yaml", + stackName, + } + result := icmd.RunCmd(d.Command(stackArgs...)) + result.Assert(c, icmd.Expected{ + Err: icmd.None, + Out: "Creating service testdeploy_web", + }) + + result = icmd.RunCmd(d.Command("service", "ls")) + result.Assert(c, icmd.Success) + c.Assert( + strings.Split(strings.TrimSpace(result.Stdout()), "\n"), + checker.HasLen, 2) + + result = icmd.RunCmd(d.Command("stack", "rm", stackName)) + result.Assert(c, icmd.Success) + stderr := result.Stderr() + c.Assert(stderr, checker.Contains, "Removing service testdeploy_web") + c.Assert(stderr, checker.Contains, "Removing network testdeploy_default") + c.Assert(stderr, checker.Contains, "Removing secret testdeploy_special") +} + +type sortSecrets []swarm.SecretReference + +func (s sortSecrets) Len() int { return len(s) } +func (s sortSecrets) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s sortSecrets) Less(i, j int) bool { return s[i].SecretName < s[j].SecretName } + +// testDAB is the DAB JSON used for testing. +// TODO: Use template/text and substitute "Image" with the result of +// `docker inspect --format '{{index .RepoDigests 0}}' busybox:latest` +const testDAB = `{ + "Version": "0.1", + "Services": { + "srv1": { + "Image": "busybox@sha256:e4f93f6ed15a0cdd342f5aae387886fba0ab98af0a102da6276eaf24d6e6ade0", + "Command": ["top"] + }, + "srv2": { + "Image": "busybox@sha256:e4f93f6ed15a0cdd342f5aae387886fba0ab98af0a102da6276eaf24d6e6ade0", + "Command": ["tail"], + "Args": ["-f", "/dev/null"] + } + } +}` + +func (s *DockerSwarmSuite) TestStackDeployWithDAB(c *check.C) { + testRequires(c, ExperimentalDaemon) + // setup + testStackName := "test" + testDABFileName := testStackName + ".dab" + defer os.RemoveAll(testDABFileName) + err := ioutil.WriteFile(testDABFileName, []byte(testDAB), 0444) + c.Assert(err, checker.IsNil) + d := s.AddDaemon(c, true, true) + // deploy + stackArgs := []string{ + "stack", "deploy", + "--bundle-file", testDABFileName, + testStackName, + } + out, err := d.Cmd(stackArgs...) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, "Loading bundle from test.dab\n") + c.Assert(out, checker.Contains, "Creating service test_srv1\n") + c.Assert(out, checker.Contains, "Creating service test_srv2\n") + // ls + stackArgs = []string{"stack", "ls"} + out, err = d.Cmd(stackArgs...) + c.Assert(err, checker.IsNil) + c.Assert(cleanSpaces(out), check.Equals, "NAME SERVICES\n"+"test 2\n") + // rm + stackArgs = []string{"stack", "rm", testStackName} + out, err = d.Cmd(stackArgs...) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, "Removing service test_srv1\n") + c.Assert(out, checker.Contains, "Removing service test_srv2\n") + // ls (empty) + stackArgs = []string{"stack", "ls"} + out, err = d.Cmd(stackArgs...) + c.Assert(err, checker.IsNil) + c.Assert(cleanSpaces(out), check.Equals, "NAME SERVICES\n") +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_start_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_start_test.go new file mode 100644 index 000000000..2dd5fdf5f --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_start_test.go @@ -0,0 +1,199 @@ +package main + +import ( + "fmt" + "strings" + "time" + + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/cli" + icmd "github.com/docker/docker/pkg/testutil/cmd" + "github.com/go-check/check" +) + +// Regression test for https://github.com/docker/docker/issues/7843 +func (s *DockerSuite) TestStartAttachReturnsOnError(c *check.C) { + // Windows does not support link + testRequires(c, DaemonIsLinux) + dockerCmd(c, "run", "--name", "test", "busybox") + + // Expect this to fail because the above container is stopped, this is what we want + out, _, err := dockerCmdWithError("run", "--name", "test2", "--link", "test:test", "busybox") + // err shouldn't be nil because container test2 try to link to stopped container + c.Assert(err, checker.NotNil, check.Commentf("out: %s", out)) + + ch := make(chan error) + go func() { + // Attempt to start attached to the container that won't start + // This should return an error immediately since the container can't be started + if out, _, err := dockerCmdWithError("start", "-a", "test2"); err == nil { + ch <- fmt.Errorf("Expected error but got none:\n%s", out) + } + close(ch) + }() + + select { + case err := <-ch: + c.Assert(err, check.IsNil) + case <-time.After(5 * time.Second): + c.Fatalf("Attach did not exit properly") + } +} + +// gh#8555: Exit code should be passed through when using start -a +func (s *DockerSuite) TestStartAttachCorrectExitCode(c *check.C) { + testRequires(c, DaemonIsLinux) + out := cli.DockerCmd(c, "run", "-d", "busybox", "sh", "-c", "sleep 2; exit 1").Stdout() + out = strings.TrimSpace(out) + + // make sure the container has exited before trying the "start -a" + cli.DockerCmd(c, "wait", out) + + cli.Docker(cli.Args("start", "-a", out)).Assert(c, icmd.Expected{ + ExitCode: 1, + }) +} + +func (s *DockerSuite) TestStartAttachSilent(c *check.C) { + name := "teststartattachcorrectexitcode" + dockerCmd(c, "run", "--name", name, "busybox", "echo", "test") + + // make sure the container has exited before trying the "start -a" + dockerCmd(c, "wait", name) + + startOut, _ := dockerCmd(c, "start", "-a", name) + // start -a produced unexpected output + c.Assert(startOut, checker.Equals, "test\n") +} + +func (s *DockerSuite) TestStartRecordError(c *check.C) { + // TODO Windows CI: Requires further porting work. Should be possible. + testRequires(c, DaemonIsLinux) + // when container runs successfully, we should not have state.Error + dockerCmd(c, "run", "-d", "-p", "9999:9999", "--name", "test", "busybox", "top") + stateErr := inspectField(c, "test", "State.Error") + // Expected to not have state error + c.Assert(stateErr, checker.Equals, "") + + // Expect this to fail and records error because of ports conflict + out, _, err := dockerCmdWithError("run", "-d", "--name", "test2", "-p", "9999:9999", "busybox", "top") + // err shouldn't be nil because docker run will fail + c.Assert(err, checker.NotNil, check.Commentf("out: %s", out)) + + stateErr = inspectField(c, "test2", "State.Error") + c.Assert(stateErr, checker.Contains, "port is already allocated") + + // Expect the conflict to be resolved when we stop the initial container + dockerCmd(c, "stop", "test") + dockerCmd(c, "start", "test2") + stateErr = inspectField(c, "test2", "State.Error") + // Expected to not have state error but got one + c.Assert(stateErr, checker.Equals, "") +} + +func (s *DockerSuite) TestStartPausedContainer(c *check.C) { + // Windows does not support pausing containers + testRequires(c, IsPausable) + + runSleepingContainer(c, "-d", "--name", "testing") + + dockerCmd(c, "pause", "testing") + + out, _, err := dockerCmdWithError("start", "testing") + // an error should have been shown that you cannot start paused container + c.Assert(err, checker.NotNil, check.Commentf("out: %s", out)) + // an error should have been shown that you cannot start paused container + c.Assert(out, checker.Contains, "Cannot start a paused container, try unpause instead.") +} + +func (s *DockerSuite) TestStartMultipleContainers(c *check.C) { + // Windows does not support --link + testRequires(c, DaemonIsLinux) + // run a container named 'parent' and create two container link to `parent` + dockerCmd(c, "run", "-d", "--name", "parent", "busybox", "top") + + for _, container := range []string{"child_first", "child_second"} { + dockerCmd(c, "create", "--name", container, "--link", "parent:parent", "busybox", "top") + } + + // stop 'parent' container + dockerCmd(c, "stop", "parent") + + out := inspectField(c, "parent", "State.Running") + // Container should be stopped + c.Assert(out, checker.Equals, "false") + + // start all the three containers, container `child_first` start first which should be failed + // container 'parent' start second and then start container 'child_second' + expOut := "Cannot link to a non running container" + expErr := "failed to start containers: [child_first]" + out, _, err := dockerCmdWithError("start", "child_first", "parent", "child_second") + // err shouldn't be nil because start will fail + c.Assert(err, checker.NotNil, check.Commentf("out: %s", out)) + // output does not correspond to what was expected + if !(strings.Contains(out, expOut) || strings.Contains(err.Error(), expErr)) { + c.Fatalf("Expected out: %v with err: %v but got out: %v with err: %v", expOut, expErr, out, err) + } + + for container, expected := range map[string]string{"parent": "true", "child_first": "false", "child_second": "true"} { + out := inspectField(c, container, "State.Running") + // Container running state wrong + c.Assert(out, checker.Equals, expected) + } +} + +func (s *DockerSuite) TestStartAttachMultipleContainers(c *check.C) { + // run multiple containers to test + for _, container := range []string{"test1", "test2", "test3"} { + runSleepingContainer(c, "--name", container) + } + + // stop all the containers + for _, container := range []string{"test1", "test2", "test3"} { + dockerCmd(c, "stop", container) + } + + // test start and attach multiple containers at once, expected error + for _, option := range []string{"-a", "-i", "-ai"} { + out, _, err := dockerCmdWithError("start", option, "test1", "test2", "test3") + // err shouldn't be nil because start will fail + c.Assert(err, checker.NotNil, check.Commentf("out: %s", out)) + // output does not correspond to what was expected + c.Assert(out, checker.Contains, "you cannot start and attach multiple containers at once") + } + + // confirm the state of all the containers be stopped + for container, expected := range map[string]string{"test1": "false", "test2": "false", "test3": "false"} { + out := inspectField(c, container, "State.Running") + // Container running state wrong + c.Assert(out, checker.Equals, expected) + } +} + +// Test case for #23716 +func (s *DockerSuite) TestStartAttachWithRename(c *check.C) { + testRequires(c, DaemonIsLinux) + cli.DockerCmd(c, "create", "-t", "--name", "before", "busybox") + go func() { + cli.WaitRun(c, "before") + cli.DockerCmd(c, "rename", "before", "after") + cli.DockerCmd(c, "stop", "--time=2", "after") + }() + // FIXME(vdemeester) the intent is not clear and potentially racey + result := cli.Docker(cli.Args("start", "-a", "before")).Assert(c, icmd.Expected{ + ExitCode: 137, + }) + c.Assert(result.Stderr(), checker.Not(checker.Contains), "No such container") +} + +func (s *DockerSuite) TestStartReturnCorrectExitCode(c *check.C) { + dockerCmd(c, "create", "--restart=on-failure:2", "--name", "withRestart", "busybox", "sh", "-c", "exit 11") + dockerCmd(c, "create", "--rm", "--name", "withRm", "busybox", "sh", "-c", "exit 12") + + _, exitCode, err := dockerCmdWithError("start", "-a", "withRestart") + c.Assert(err, checker.NotNil) + c.Assert(exitCode, checker.Equals, 11) + _, exitCode, err = dockerCmdWithError("start", "-a", "withRm") + c.Assert(err, checker.NotNil) + c.Assert(exitCode, checker.Equals, 12) +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_stats_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_stats_test.go new file mode 100644 index 000000000..9d40ce028 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_stats_test.go @@ -0,0 +1,179 @@ +package main + +import ( + "bufio" + "os/exec" + "regexp" + "strings" + "time" + + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/cli" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestStatsNoStream(c *check.C) { + // Windows does not support stats + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "-d", "busybox", "top") + id := strings.TrimSpace(out) + c.Assert(waitRun(id), checker.IsNil) + + statsCmd := exec.Command(dockerBinary, "stats", "--no-stream", id) + type output struct { + out []byte + err error + } + + ch := make(chan output) + go func() { + out, err := statsCmd.Output() + ch <- output{out, err} + }() + + select { + case outerr := <-ch: + c.Assert(outerr.err, checker.IsNil, check.Commentf("Error running stats: %v", outerr.err)) + c.Assert(string(outerr.out), checker.Contains, id) //running container wasn't present in output + case <-time.After(3 * time.Second): + statsCmd.Process.Kill() + c.Fatalf("stats did not return immediately when not streaming") + } +} + +func (s *DockerSuite) TestStatsContainerNotFound(c *check.C) { + // Windows does not support stats + testRequires(c, DaemonIsLinux) + + out, _, err := dockerCmdWithError("stats", "notfound") + c.Assert(err, checker.NotNil) + c.Assert(out, checker.Contains, "No such container: notfound", check.Commentf("Expected to fail on not found container stats, got %q instead", out)) + + out, _, err = dockerCmdWithError("stats", "--no-stream", "notfound") + c.Assert(err, checker.NotNil) + c.Assert(out, checker.Contains, "No such container: notfound", check.Commentf("Expected to fail on not found container stats with --no-stream, got %q instead", out)) +} + +func (s *DockerSuite) TestStatsAllRunningNoStream(c *check.C) { + // Windows does not support stats + testRequires(c, DaemonIsLinux) + + out, _ := dockerCmd(c, "run", "-d", "busybox", "top") + id1 := strings.TrimSpace(out)[:12] + c.Assert(waitRun(id1), check.IsNil) + out, _ = dockerCmd(c, "run", "-d", "busybox", "top") + id2 := strings.TrimSpace(out)[:12] + c.Assert(waitRun(id2), check.IsNil) + out, _ = dockerCmd(c, "run", "-d", "busybox", "top") + id3 := strings.TrimSpace(out)[:12] + c.Assert(waitRun(id3), check.IsNil) + dockerCmd(c, "stop", id3) + + out, _ = dockerCmd(c, "stats", "--no-stream") + if !strings.Contains(out, id1) || !strings.Contains(out, id2) { + c.Fatalf("Expected stats output to contain both %s and %s, got %s", id1, id2, out) + } + if strings.Contains(out, id3) { + c.Fatalf("Did not expect %s in stats, got %s", id3, out) + } + + // check output contains real data, but not all zeros + reg, _ := regexp.Compile("[1-9]+") + // split output with "\n", outLines[1] is id2's output + // outLines[2] is id1's output + outLines := strings.Split(out, "\n") + // check stat result of id2 contains real data + realData := reg.Find([]byte(outLines[1][12:])) + c.Assert(realData, checker.NotNil, check.Commentf("stat result are empty: %s", out)) + // check stat result of id1 contains real data + realData = reg.Find([]byte(outLines[2][12:])) + c.Assert(realData, checker.NotNil, check.Commentf("stat result are empty: %s", out)) +} + +func (s *DockerSuite) TestStatsAllNoStream(c *check.C) { + // Windows does not support stats + testRequires(c, DaemonIsLinux) + + out, _ := dockerCmd(c, "run", "-d", "busybox", "top") + id1 := strings.TrimSpace(out)[:12] + c.Assert(waitRun(id1), check.IsNil) + dockerCmd(c, "stop", id1) + out, _ = dockerCmd(c, "run", "-d", "busybox", "top") + id2 := strings.TrimSpace(out)[:12] + c.Assert(waitRun(id2), check.IsNil) + + out, _ = dockerCmd(c, "stats", "--all", "--no-stream") + if !strings.Contains(out, id1) || !strings.Contains(out, id2) { + c.Fatalf("Expected stats output to contain both %s and %s, got %s", id1, id2, out) + } + + // check output contains real data, but not all zeros + reg, _ := regexp.Compile("[1-9]+") + // split output with "\n", outLines[1] is id2's output + outLines := strings.Split(out, "\n") + // check stat result of id2 contains real data + realData := reg.Find([]byte(outLines[1][12:])) + c.Assert(realData, checker.NotNil, check.Commentf("stat result of %s is empty: %s", id2, out)) + // check stat result of id1 contains all zero + realData = reg.Find([]byte(outLines[2][12:])) + c.Assert(realData, checker.IsNil, check.Commentf("stat result of %s should be empty : %s", id1, out)) +} + +func (s *DockerSuite) TestStatsAllNewContainersAdded(c *check.C) { + // Windows does not support stats + testRequires(c, DaemonIsLinux) + + id := make(chan string) + addedChan := make(chan struct{}) + + runSleepingContainer(c, "-d") + statsCmd := exec.Command(dockerBinary, "stats") + stdout, err := statsCmd.StdoutPipe() + c.Assert(err, check.IsNil) + c.Assert(statsCmd.Start(), check.IsNil) + defer statsCmd.Process.Kill() + + go func() { + containerID := <-id + matchID := regexp.MustCompile(containerID) + + scanner := bufio.NewScanner(stdout) + for scanner.Scan() { + switch { + case matchID.MatchString(scanner.Text()): + close(addedChan) + return + } + } + }() + + out := runSleepingContainer(c, "-d") + c.Assert(waitRun(strings.TrimSpace(out)), check.IsNil) + id <- strings.TrimSpace(out)[:12] + + select { + case <-time.After(30 * time.Second): + c.Fatal("failed to observe new container created added to stats") + case <-addedChan: + // ignore, done + } +} + +func (s *DockerSuite) TestStatsFormatAll(c *check.C) { + // Windows does not support stats + testRequires(c, DaemonIsLinux) + + cli.DockerCmd(c, "run", "-d", "--name=RunningOne", "busybox", "top") + cli.WaitRun(c, "RunningOne") + cli.DockerCmd(c, "run", "-d", "--name=ExitedOne", "busybox", "top") + cli.DockerCmd(c, "stop", "ExitedOne") + cli.WaitExited(c, "ExitedOne", 5*time.Second) + + out := cli.DockerCmd(c, "stats", "--no-stream", "--format", "{{.Name}}").Combined() + c.Assert(out, checker.Contains, "RunningOne") + c.Assert(out, checker.Not(checker.Contains), "ExitedOne") + + out = cli.DockerCmd(c, "stats", "--all", "--no-stream", "--format", "{{.Name}}").Combined() + c.Assert(out, checker.Contains, "RunningOne") + c.Assert(out, checker.Contains, "ExitedOne") +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_stop_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_stop_test.go new file mode 100644 index 000000000..1be41203b --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_stop_test.go @@ -0,0 +1,17 @@ +package main + +import ( + "github.com/docker/docker/integration-cli/checker" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestStopContainerWithRestartPolicyAlways(c *check.C) { + dockerCmd(c, "run", "--name", "verifyRestart1", "-d", "--restart=always", "busybox", "false") + dockerCmd(c, "run", "--name", "verifyRestart2", "-d", "--restart=always", "busybox", "false") + + c.Assert(waitRun("verifyRestart1"), checker.IsNil) + c.Assert(waitRun("verifyRestart2"), checker.IsNil) + + dockerCmd(c, "stop", "verifyRestart1") + dockerCmd(c, "stop", "verifyRestart2") +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_swarm_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_swarm_test.go new file mode 100644 index 000000000..a0bb7a228 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_swarm_test.go @@ -0,0 +1,2230 @@ +// +build !windows + +package main + +import ( + "bytes" + "crypto/x509" + "encoding/json" + "encoding/pem" + "fmt" + "io/ioutil" + "net/http" + "net/http/httptest" + "os" + "os/exec" + "path/filepath" + "strings" + "time" + + "github.com/cloudflare/cfssl/helpers" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/cli" + "github.com/docker/docker/integration-cli/daemon" + "github.com/docker/docker/pkg/testutil" + icmd "github.com/docker/docker/pkg/testutil/cmd" + "github.com/docker/docker/pkg/testutil/tempfile" + "github.com/docker/libnetwork/driverapi" + "github.com/docker/libnetwork/ipamapi" + remoteipam "github.com/docker/libnetwork/ipams/remote/api" + "github.com/go-check/check" + "github.com/vishvananda/netlink" +) + +func (s *DockerSwarmSuite) TestSwarmUpdate(c *check.C) { + d := s.AddDaemon(c, true, true) + + getSpec := func() swarm.Spec { + sw := d.GetSwarm(c) + return sw.Spec + } + + out, err := d.Cmd("swarm", "update", "--cert-expiry", "30h", "--dispatcher-heartbeat", "11s") + c.Assert(err, checker.IsNil, check.Commentf("out: %v", out)) + + spec := getSpec() + c.Assert(spec.CAConfig.NodeCertExpiry, checker.Equals, 30*time.Hour) + c.Assert(spec.Dispatcher.HeartbeatPeriod, checker.Equals, 11*time.Second) + + // setting anything under 30m for cert-expiry is not allowed + out, err = d.Cmd("swarm", "update", "--cert-expiry", "15m") + c.Assert(err, checker.NotNil) + c.Assert(out, checker.Contains, "minimum certificate expiry time") + spec = getSpec() + c.Assert(spec.CAConfig.NodeCertExpiry, checker.Equals, 30*time.Hour) + + // passing an external CA (this is without starting a root rotation) does not fail + cli.Docker(cli.Args("swarm", "update", "--external-ca", "protocol=cfssl,url=https://something.org", + "--external-ca", "protocol=cfssl,url=https://somethingelse.org,cacert=fixtures/https/ca.pem"), + cli.Daemon(d.Daemon)).Assert(c, icmd.Success) + + expected, err := ioutil.ReadFile("fixtures/https/ca.pem") + c.Assert(err, checker.IsNil) + + spec = getSpec() + c.Assert(spec.CAConfig.ExternalCAs, checker.HasLen, 2) + c.Assert(spec.CAConfig.ExternalCAs[0].CACert, checker.Equals, "") + c.Assert(spec.CAConfig.ExternalCAs[1].CACert, checker.Equals, string(expected)) + + // passing an invalid external CA fails + tempFile := tempfile.NewTempFile(c, "testfile", "fakecert") + defer tempFile.Remove() + + result := cli.Docker(cli.Args("swarm", "update", + "--external-ca", fmt.Sprintf("protocol=cfssl,url=https://something.org,cacert=%s", tempFile.Name())), + cli.Daemon(d.Daemon)) + result.Assert(c, icmd.Expected{ + ExitCode: 125, + Err: "must be in PEM format", + }) +} + +func (s *DockerSwarmSuite) TestSwarmInit(c *check.C) { + d := s.AddDaemon(c, false, false) + + getSpec := func() swarm.Spec { + sw := d.GetSwarm(c) + return sw.Spec + } + + // passing an invalid external CA fails + tempFile := tempfile.NewTempFile(c, "testfile", "fakecert") + defer tempFile.Remove() + + result := cli.Docker(cli.Args("swarm", "init", "--cert-expiry", "30h", "--dispatcher-heartbeat", "11s", + "--external-ca", fmt.Sprintf("protocol=cfssl,url=https://somethingelse.org,cacert=%s", tempFile.Name())), + cli.Daemon(d.Daemon)) + result.Assert(c, icmd.Expected{ + ExitCode: 125, + Err: "must be in PEM format", + }) + + cli.Docker(cli.Args("swarm", "init", "--cert-expiry", "30h", "--dispatcher-heartbeat", "11s", + "--external-ca", "protocol=cfssl,url=https://something.org", + "--external-ca", "protocol=cfssl,url=https://somethingelse.org,cacert=fixtures/https/ca.pem"), + cli.Daemon(d.Daemon)).Assert(c, icmd.Success) + + expected, err := ioutil.ReadFile("fixtures/https/ca.pem") + c.Assert(err, checker.IsNil) + + spec := getSpec() + c.Assert(spec.CAConfig.NodeCertExpiry, checker.Equals, 30*time.Hour) + c.Assert(spec.Dispatcher.HeartbeatPeriod, checker.Equals, 11*time.Second) + c.Assert(spec.CAConfig.ExternalCAs, checker.HasLen, 2) + c.Assert(spec.CAConfig.ExternalCAs[0].CACert, checker.Equals, "") + c.Assert(spec.CAConfig.ExternalCAs[1].CACert, checker.Equals, string(expected)) + + c.Assert(d.Leave(true), checker.IsNil) + cli.Docker(cli.Args("swarm", "init"), cli.Daemon(d.Daemon)).Assert(c, icmd.Success) + + spec = getSpec() + c.Assert(spec.CAConfig.NodeCertExpiry, checker.Equals, 90*24*time.Hour) + c.Assert(spec.Dispatcher.HeartbeatPeriod, checker.Equals, 5*time.Second) +} + +func (s *DockerSwarmSuite) TestSwarmInitIPv6(c *check.C) { + testRequires(c, IPv6) + d1 := s.AddDaemon(c, false, false) + cli.Docker(cli.Args("swarm", "init", "--listen-add", "::1"), cli.Daemon(d1.Daemon)).Assert(c, icmd.Success) + + d2 := s.AddDaemon(c, false, false) + cli.Docker(cli.Args("swarm", "join", "::1"), cli.Daemon(d2.Daemon)).Assert(c, icmd.Success) + + out := cli.Docker(cli.Args("info"), cli.Daemon(d2.Daemon)).Assert(c, icmd.Success).Combined() + c.Assert(out, checker.Contains, "Swarm: active") +} + +func (s *DockerSwarmSuite) TestSwarmInitUnspecifiedAdvertiseAddr(c *check.C) { + d := s.AddDaemon(c, false, false) + out, err := d.Cmd("swarm", "init", "--advertise-addr", "0.0.0.0") + c.Assert(err, checker.NotNil) + c.Assert(out, checker.Contains, "advertise address must be a non-zero IP address") +} + +func (s *DockerSwarmSuite) TestSwarmIncompatibleDaemon(c *check.C) { + // init swarm mode and stop a daemon + d := s.AddDaemon(c, true, true) + info, err := d.SwarmInfo() + c.Assert(err, checker.IsNil) + c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) + d.Stop(c) + + // start a daemon with --cluster-store and --cluster-advertise + err = d.StartWithError("--cluster-store=consul://consuladdr:consulport/some/path", "--cluster-advertise=1.1.1.1:2375") + c.Assert(err, checker.NotNil) + content, err := d.ReadLogFile() + c.Assert(err, checker.IsNil) + c.Assert(string(content), checker.Contains, "--cluster-store and --cluster-advertise daemon configurations are incompatible with swarm mode") + + // start a daemon with --live-restore + err = d.StartWithError("--live-restore") + c.Assert(err, checker.NotNil) + content, err = d.ReadLogFile() + c.Assert(err, checker.IsNil) + c.Assert(string(content), checker.Contains, "--live-restore daemon configuration is incompatible with swarm mode") + // restart for teardown + d.Start(c) +} + +func (s *DockerSwarmSuite) TestSwarmServiceTemplatingHostname(c *check.C) { + d := s.AddDaemon(c, true, true) + + out, err := d.Cmd("service", "create", "--no-resolve-image", "--name", "test", "--hostname", "{{.Service.Name}}-{{.Task.Slot}}", "busybox", "top") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + // make sure task has been deployed. + waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, 1) + + containers := d.ActiveContainers() + out, err = d.Cmd("inspect", "--type", "container", "--format", "{{.Config.Hostname}}", containers[0]) + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(strings.Split(out, "\n")[0], checker.Equals, "test-1", check.Commentf("hostname with templating invalid")) +} + +// Test case for #24270 +func (s *DockerSwarmSuite) TestSwarmServiceListFilter(c *check.C) { + d := s.AddDaemon(c, true, true) + + name1 := "redis-cluster-md5" + name2 := "redis-cluster" + name3 := "other-cluster" + out, err := d.Cmd("service", "create", "--no-resolve-image", "--name", name1, "busybox", "top") + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") + + out, err = d.Cmd("service", "create", "--no-resolve-image", "--name", name2, "busybox", "top") + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") + + out, err = d.Cmd("service", "create", "--no-resolve-image", "--name", name3, "busybox", "top") + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") + + filter1 := "name=redis-cluster-md5" + filter2 := "name=redis-cluster" + + // We search checker.Contains with `name+" "` to prevent prefix only. + out, err = d.Cmd("service", "ls", "--filter", filter1) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, name1+" ") + c.Assert(out, checker.Not(checker.Contains), name2+" ") + c.Assert(out, checker.Not(checker.Contains), name3+" ") + + out, err = d.Cmd("service", "ls", "--filter", filter2) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, name1+" ") + c.Assert(out, checker.Contains, name2+" ") + c.Assert(out, checker.Not(checker.Contains), name3+" ") + + out, err = d.Cmd("service", "ls") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, name1+" ") + c.Assert(out, checker.Contains, name2+" ") + c.Assert(out, checker.Contains, name3+" ") +} + +func (s *DockerSwarmSuite) TestSwarmNodeListFilter(c *check.C) { + d := s.AddDaemon(c, true, true) + + out, err := d.Cmd("node", "inspect", "--format", "{{ .Description.Hostname }}", "self") + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") + name := strings.TrimSpace(out) + + filter := "name=" + name[:4] + + out, err = d.Cmd("node", "ls", "--filter", filter) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, name) + + out, err = d.Cmd("node", "ls", "--filter", "name=none") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Not(checker.Contains), name) +} + +func (s *DockerSwarmSuite) TestSwarmNodeTaskListFilter(c *check.C) { + d := s.AddDaemon(c, true, true) + + name := "redis-cluster-md5" + out, err := d.Cmd("service", "create", "--no-resolve-image", "--name", name, "--replicas=3", "busybox", "top") + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") + + // make sure task has been deployed. + waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, 3) + + filter := "name=redis-cluster" + + out, err = d.Cmd("node", "ps", "--filter", filter, "self") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, name+".1") + c.Assert(out, checker.Contains, name+".2") + c.Assert(out, checker.Contains, name+".3") + + out, err = d.Cmd("node", "ps", "--filter", "name=none", "self") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Not(checker.Contains), name+".1") + c.Assert(out, checker.Not(checker.Contains), name+".2") + c.Assert(out, checker.Not(checker.Contains), name+".3") +} + +// Test case for #25375 +func (s *DockerSwarmSuite) TestSwarmPublishAdd(c *check.C) { + d := s.AddDaemon(c, true, true) + + name := "top" + out, err := d.Cmd("service", "create", "--no-resolve-image", "--name", name, "--label", "x=y", "busybox", "top") + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") + + out, err = d.Cmd("service", "update", "--publish-add", "80:80", name) + c.Assert(err, checker.IsNil) + + out, err = d.CmdRetryOutOfSequence("service", "update", "--publish-add", "80:80", name) + c.Assert(err, checker.IsNil) + + out, err = d.CmdRetryOutOfSequence("service", "update", "--publish-add", "80:80", "--publish-add", "80:20", name) + c.Assert(err, checker.NotNil) + + out, err = d.Cmd("service", "inspect", "--format", "{{ .Spec.EndpointSpec.Ports }}", name) + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Equals, "[{ tcp 80 80 ingress}]") +} + +func (s *DockerSwarmSuite) TestSwarmServiceWithGroup(c *check.C) { + d := s.AddDaemon(c, true, true) + + name := "top" + out, err := d.Cmd("service", "create", "--no-resolve-image", "--name", name, "--user", "root:root", "--group", "wheel", "--group", "audio", "--group", "staff", "--group", "777", "busybox", "top") + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") + + // make sure task has been deployed. + waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, 1) + + out, err = d.Cmd("ps", "-q") + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") + + container := strings.TrimSpace(out) + + out, err = d.Cmd("exec", container, "id") + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Equals, "uid=0(root) gid=0(root) groups=10(wheel),29(audio),50(staff),777") +} + +func (s *DockerSwarmSuite) TestSwarmContainerAutoStart(c *check.C) { + d := s.AddDaemon(c, true, true) + + out, err := d.Cmd("network", "create", "--attachable", "-d", "overlay", "foo") + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") + + out, err = d.Cmd("run", "-id", "--restart=always", "--net=foo", "--name=test", "busybox", "top") + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") + + out, err = d.Cmd("ps", "-q") + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") + + d.Restart(c) + + out, err = d.Cmd("ps", "-q") + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") +} + +func (s *DockerSwarmSuite) TestSwarmContainerEndpointOptions(c *check.C) { + d := s.AddDaemon(c, true, true) + + out, err := d.Cmd("network", "create", "--attachable", "-d", "overlay", "foo") + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") + + _, err = d.Cmd("run", "-d", "--net=foo", "--name=first", "--net-alias=first-alias", "busybox", "top") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + _, err = d.Cmd("run", "-d", "--net=foo", "--name=second", "busybox", "top") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + _, err = d.Cmd("run", "-d", "--net=foo", "--net-alias=third-alias", "busybox", "top") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + // ping first container and its alias, also ping third and anonymous container by its alias + _, err = d.Cmd("exec", "second", "ping", "-c", "1", "first") + c.Assert(err, check.IsNil, check.Commentf(out)) + _, err = d.Cmd("exec", "second", "ping", "-c", "1", "first-alias") + c.Assert(err, check.IsNil, check.Commentf(out)) + _, err = d.Cmd("exec", "second", "ping", "-c", "1", "third-alias") + c.Assert(err, check.IsNil, check.Commentf(out)) +} + +func (s *DockerSwarmSuite) TestSwarmContainerAttachByNetworkId(c *check.C) { + d := s.AddDaemon(c, true, true) + + out, err := d.Cmd("network", "create", "--attachable", "-d", "overlay", "testnet") + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") + networkID := strings.TrimSpace(out) + + out, err = d.Cmd("run", "-d", "--net", networkID, "busybox", "top") + c.Assert(err, checker.IsNil) + cID := strings.TrimSpace(out) + d.WaitRun(cID) + + _, err = d.Cmd("rm", "-f", cID) + c.Assert(err, checker.IsNil) + + _, err = d.Cmd("network", "rm", "testnet") + c.Assert(err, checker.IsNil) + + checkNetwork := func(*check.C) (interface{}, check.CommentInterface) { + out, err := d.Cmd("network", "ls") + c.Assert(err, checker.IsNil) + return out, nil + } + + waitAndAssert(c, 3*time.Second, checkNetwork, checker.Not(checker.Contains), "testnet") +} + +func (s *DockerSwarmSuite) TestOverlayAttachable(c *check.C) { + d := s.AddDaemon(c, true, true) + + out, err := d.Cmd("network", "create", "-d", "overlay", "--attachable", "ovnet") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + // validate attachable + out, err = d.Cmd("network", "inspect", "--format", "{{json .Attachable}}", "ovnet") + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(strings.TrimSpace(out), checker.Equals, "true") + + // validate containers can attache to this overlay network + out, err = d.Cmd("run", "-d", "--network", "ovnet", "--name", "c1", "busybox", "top") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + // redo validation, there was a bug that the value of attachable changes after + // containers attach to the network + out, err = d.Cmd("network", "inspect", "--format", "{{json .Attachable}}", "ovnet") + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(strings.TrimSpace(out), checker.Equals, "true") +} + +func (s *DockerSwarmSuite) TestOverlayAttachableOnSwarmLeave(c *check.C) { + d := s.AddDaemon(c, true, true) + + // Create an attachable swarm network + nwName := "attovl" + out, err := d.Cmd("network", "create", "-d", "overlay", "--attachable", nwName) + c.Assert(err, checker.IsNil, check.Commentf(out)) + + // Connect a container to the network + out, err = d.Cmd("run", "-d", "--network", nwName, "--name", "c1", "busybox", "top") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + // Leave the swarm + err = d.Leave(true) + c.Assert(err, checker.IsNil) + + // Check the container is disconnected + out, err = d.Cmd("inspect", "c1", "--format", "{{.NetworkSettings.Networks."+nwName+"}}") + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Equals, "") + + // Check the network is gone + out, err = d.Cmd("network", "ls", "--format", "{{.Name}}") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Not(checker.Contains), nwName) +} + +func (s *DockerSwarmSuite) TestOverlayAttachableReleaseResourcesOnFailure(c *check.C) { + d := s.AddDaemon(c, true, true) + + // Create attachable network + out, err := d.Cmd("network", "create", "-d", "overlay", "--attachable", "--subnet", "10.10.9.0/24", "ovnet") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + // Attach a container with specific IP + out, err = d.Cmd("run", "-d", "--network", "ovnet", "--name", "c1", "--ip", "10.10.9.33", "busybox", "top") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + // Attempt to attach another container with same IP, must fail + _, err = d.Cmd("run", "-d", "--network", "ovnet", "--name", "c2", "--ip", "10.10.9.33", "busybox", "top") + c.Assert(err, checker.NotNil) + + // Remove first container + out, err = d.Cmd("rm", "-f", "c1") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + // Verify the network can be removed, no phantom network attachment task left over + out, err = d.Cmd("network", "rm", "ovnet") + c.Assert(err, checker.IsNil, check.Commentf(out)) +} + +func (s *DockerSwarmSuite) TestSwarmIngressNetwork(c *check.C) { + d := s.AddDaemon(c, true, true) + + // Ingress network can be removed + out, _, err := testutil.RunCommandPipelineWithOutput( + exec.Command("echo", "Y"), + exec.Command("docker", "-H", d.Sock(), "network", "rm", "ingress"), + ) + c.Assert(err, checker.IsNil, check.Commentf(out)) + + // And recreated + out, err = d.Cmd("network", "create", "-d", "overlay", "--ingress", "new-ingress") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + // But only one is allowed + out, err = d.Cmd("network", "create", "-d", "overlay", "--ingress", "another-ingress") + c.Assert(err, checker.NotNil) + c.Assert(strings.TrimSpace(out), checker.Contains, "is already present") + + // It cannot be removed if it is being used + out, err = d.Cmd("service", "create", "--no-resolve-image", "--name", "srv1", "-p", "9000:8000", "busybox", "top") + c.Assert(err, checker.IsNil, check.Commentf(out)) + out, _, err = testutil.RunCommandPipelineWithOutput( + exec.Command("echo", "Y"), + exec.Command("docker", "-H", d.Sock(), "network", "rm", "new-ingress"), + ) + c.Assert(err, checker.NotNil) + c.Assert(strings.TrimSpace(out), checker.Contains, "ingress network cannot be removed because service") + + // But it can be removed once no more services depend on it + out, err = d.Cmd("service", "update", "--publish-rm", "9000:8000", "srv1") + c.Assert(err, checker.IsNil, check.Commentf(out)) + out, _, err = testutil.RunCommandPipelineWithOutput( + exec.Command("echo", "Y"), + exec.Command("docker", "-H", d.Sock(), "network", "rm", "new-ingress"), + ) + c.Assert(err, checker.IsNil, check.Commentf(out)) + + // A service which needs the ingress network cannot be created if no ingress is present + out, err = d.Cmd("service", "create", "--no-resolve-image", "--name", "srv2", "-p", "500:500", "busybox", "top") + c.Assert(err, checker.NotNil) + c.Assert(strings.TrimSpace(out), checker.Contains, "no ingress network is present") + + // An existing service cannot be updated to use the ingress nw if the nw is not present + out, err = d.Cmd("service", "update", "--publish-add", "9000:8000", "srv1") + c.Assert(err, checker.NotNil) + c.Assert(strings.TrimSpace(out), checker.Contains, "no ingress network is present") + + // But services which do not need routing mesh can be created regardless + out, err = d.Cmd("service", "create", "--no-resolve-image", "--name", "srv3", "--endpoint-mode", "dnsrr", "busybox", "top") + c.Assert(err, checker.IsNil, check.Commentf(out)) +} + +func (s *DockerSwarmSuite) TestSwarmCreateServiceWithNoIngressNetwork(c *check.C) { + d := s.AddDaemon(c, true, true) + + // Remove ingress network + out, _, err := testutil.RunCommandPipelineWithOutput( + exec.Command("echo", "Y"), + exec.Command("docker", "-H", d.Sock(), "network", "rm", "ingress"), + ) + c.Assert(err, checker.IsNil, check.Commentf(out)) + + // Create a overlay network and launch a service on it + // Make sure nothing panics because ingress network is missing + out, err = d.Cmd("network", "create", "-d", "overlay", "another-network") + c.Assert(err, checker.IsNil, check.Commentf(out)) + out, err = d.Cmd("service", "create", "--no-resolve-image", "--name", "srv4", "--network", "another-network", "busybox", "top") + c.Assert(err, checker.IsNil, check.Commentf(out)) +} + +// Test case for #24108, also the case from: +// https://github.com/docker/docker/pull/24620#issuecomment-233715656 +func (s *DockerSwarmSuite) TestSwarmTaskListFilter(c *check.C) { + d := s.AddDaemon(c, true, true) + + name := "redis-cluster-md5" + out, err := d.Cmd("service", "create", "--no-resolve-image", "--name", name, "--replicas=3", "busybox", "top") + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") + + filter := "name=redis-cluster" + + checkNumTasks := func(*check.C) (interface{}, check.CommentInterface) { + out, err := d.Cmd("service", "ps", "--filter", filter, name) + c.Assert(err, checker.IsNil) + return len(strings.Split(out, "\n")) - 2, nil // includes header and nl in last line + } + + // wait until all tasks have been created + waitAndAssert(c, defaultReconciliationTimeout, checkNumTasks, checker.Equals, 3) + + out, err = d.Cmd("service", "ps", "--filter", filter, name) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, name+".1") + c.Assert(out, checker.Contains, name+".2") + c.Assert(out, checker.Contains, name+".3") + + out, err = d.Cmd("service", "ps", "--filter", "name="+name+".1", name) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, name+".1") + c.Assert(out, checker.Not(checker.Contains), name+".2") + c.Assert(out, checker.Not(checker.Contains), name+".3") + + out, err = d.Cmd("service", "ps", "--filter", "name=none", name) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Not(checker.Contains), name+".1") + c.Assert(out, checker.Not(checker.Contains), name+".2") + c.Assert(out, checker.Not(checker.Contains), name+".3") + + name = "redis-cluster-sha1" + out, err = d.Cmd("service", "create", "--no-resolve-image", "--name", name, "--mode=global", "busybox", "top") + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") + + waitAndAssert(c, defaultReconciliationTimeout, checkNumTasks, checker.Equals, 1) + + filter = "name=redis-cluster" + out, err = d.Cmd("service", "ps", "--filter", filter, name) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, name) + + out, err = d.Cmd("service", "ps", "--filter", "name="+name, name) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, name) + + out, err = d.Cmd("service", "ps", "--filter", "name=none", name) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Not(checker.Contains), name) +} + +func (s *DockerSwarmSuite) TestPsListContainersFilterIsTask(c *check.C) { + d := s.AddDaemon(c, true, true) + + // Create a bare container + out, err := d.Cmd("run", "-d", "--name=bare-container", "busybox", "top") + c.Assert(err, checker.IsNil) + bareID := strings.TrimSpace(out)[:12] + // Create a service + name := "busybox-top" + out, err = d.Cmd("service", "create", "--no-resolve-image", "--name", name, "busybox", "top") + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") + + // make sure task has been deployed. + waitAndAssert(c, defaultReconciliationTimeout, d.CheckServiceRunningTasks(name), checker.Equals, 1) + + // Filter non-tasks + out, err = d.Cmd("ps", "-a", "-q", "--filter=is-task=false") + c.Assert(err, checker.IsNil) + psOut := strings.TrimSpace(out) + c.Assert(psOut, checker.Equals, bareID, check.Commentf("Expected id %s, got %s for is-task label, output %q", bareID, psOut, out)) + + // Filter tasks + out, err = d.Cmd("ps", "-a", "-q", "--filter=is-task=true") + c.Assert(err, checker.IsNil) + lines := strings.Split(strings.Trim(out, "\n "), "\n") + c.Assert(lines, checker.HasLen, 1) + c.Assert(lines[0], checker.Not(checker.Equals), bareID, check.Commentf("Expected not %s, but got it for is-task label, output %q", bareID, out)) +} + +const globalNetworkPlugin = "global-network-plugin" +const globalIPAMPlugin = "global-ipam-plugin" + +func setupRemoteGlobalNetworkPlugin(c *check.C, mux *http.ServeMux, url, netDrv, ipamDrv string) { + + mux.HandleFunc("/Plugin.Activate", func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + fmt.Fprintf(w, `{"Implements": ["%s", "%s"]}`, driverapi.NetworkPluginEndpointType, ipamapi.PluginEndpointType) + }) + + // Network driver implementation + mux.HandleFunc(fmt.Sprintf("/%s.GetCapabilities", driverapi.NetworkPluginEndpointType), func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + fmt.Fprintf(w, `{"Scope":"global"}`) + }) + + mux.HandleFunc(fmt.Sprintf("/%s.AllocateNetwork", driverapi.NetworkPluginEndpointType), func(w http.ResponseWriter, r *http.Request) { + err := json.NewDecoder(r.Body).Decode(&remoteDriverNetworkRequest) + if err != nil { + http.Error(w, "Unable to decode JSON payload: "+err.Error(), http.StatusBadRequest) + return + } + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + fmt.Fprintf(w, "null") + }) + + mux.HandleFunc(fmt.Sprintf("/%s.FreeNetwork", driverapi.NetworkPluginEndpointType), func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + fmt.Fprintf(w, "null") + }) + + mux.HandleFunc(fmt.Sprintf("/%s.CreateNetwork", driverapi.NetworkPluginEndpointType), func(w http.ResponseWriter, r *http.Request) { + err := json.NewDecoder(r.Body).Decode(&remoteDriverNetworkRequest) + if err != nil { + http.Error(w, "Unable to decode JSON payload: "+err.Error(), http.StatusBadRequest) + return + } + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + fmt.Fprintf(w, "null") + }) + + mux.HandleFunc(fmt.Sprintf("/%s.DeleteNetwork", driverapi.NetworkPluginEndpointType), func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + fmt.Fprintf(w, "null") + }) + + mux.HandleFunc(fmt.Sprintf("/%s.CreateEndpoint", driverapi.NetworkPluginEndpointType), func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + fmt.Fprintf(w, `{"Interface":{"MacAddress":"a0:b1:c2:d3:e4:f5"}}`) + }) + + mux.HandleFunc(fmt.Sprintf("/%s.Join", driverapi.NetworkPluginEndpointType), func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + + veth := &netlink.Veth{ + LinkAttrs: netlink.LinkAttrs{Name: "randomIfName", TxQLen: 0}, PeerName: "cnt0"} + if err := netlink.LinkAdd(veth); err != nil { + fmt.Fprintf(w, `{"Error":"failed to add veth pair: `+err.Error()+`"}`) + } else { + fmt.Fprintf(w, `{"InterfaceName":{ "SrcName":"cnt0", "DstPrefix":"veth"}}`) + } + }) + + mux.HandleFunc(fmt.Sprintf("/%s.Leave", driverapi.NetworkPluginEndpointType), func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + fmt.Fprintf(w, "null") + }) + + mux.HandleFunc(fmt.Sprintf("/%s.DeleteEndpoint", driverapi.NetworkPluginEndpointType), func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + if link, err := netlink.LinkByName("cnt0"); err == nil { + netlink.LinkDel(link) + } + fmt.Fprintf(w, "null") + }) + + // IPAM Driver implementation + var ( + poolRequest remoteipam.RequestPoolRequest + poolReleaseReq remoteipam.ReleasePoolRequest + addressRequest remoteipam.RequestAddressRequest + addressReleaseReq remoteipam.ReleaseAddressRequest + lAS = "localAS" + gAS = "globalAS" + pool = "172.28.0.0/16" + poolID = lAS + "/" + pool + gw = "172.28.255.254/16" + ) + + mux.HandleFunc(fmt.Sprintf("/%s.GetDefaultAddressSpaces", ipamapi.PluginEndpointType), func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + fmt.Fprintf(w, `{"LocalDefaultAddressSpace":"`+lAS+`", "GlobalDefaultAddressSpace": "`+gAS+`"}`) + }) + + mux.HandleFunc(fmt.Sprintf("/%s.RequestPool", ipamapi.PluginEndpointType), func(w http.ResponseWriter, r *http.Request) { + err := json.NewDecoder(r.Body).Decode(&poolRequest) + if err != nil { + http.Error(w, "Unable to decode JSON payload: "+err.Error(), http.StatusBadRequest) + return + } + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + if poolRequest.AddressSpace != lAS && poolRequest.AddressSpace != gAS { + fmt.Fprintf(w, `{"Error":"Unknown address space in pool request: `+poolRequest.AddressSpace+`"}`) + } else if poolRequest.Pool != "" && poolRequest.Pool != pool { + fmt.Fprintf(w, `{"Error":"Cannot handle explicit pool requests yet"}`) + } else { + fmt.Fprintf(w, `{"PoolID":"`+poolID+`", "Pool":"`+pool+`"}`) + } + }) + + mux.HandleFunc(fmt.Sprintf("/%s.RequestAddress", ipamapi.PluginEndpointType), func(w http.ResponseWriter, r *http.Request) { + err := json.NewDecoder(r.Body).Decode(&addressRequest) + if err != nil { + http.Error(w, "Unable to decode JSON payload: "+err.Error(), http.StatusBadRequest) + return + } + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + // make sure libnetwork is now querying on the expected pool id + if addressRequest.PoolID != poolID { + fmt.Fprintf(w, `{"Error":"unknown pool id"}`) + } else if addressRequest.Address != "" { + fmt.Fprintf(w, `{"Error":"Cannot handle explicit address requests yet"}`) + } else { + fmt.Fprintf(w, `{"Address":"`+gw+`"}`) + } + }) + + mux.HandleFunc(fmt.Sprintf("/%s.ReleaseAddress", ipamapi.PluginEndpointType), func(w http.ResponseWriter, r *http.Request) { + err := json.NewDecoder(r.Body).Decode(&addressReleaseReq) + if err != nil { + http.Error(w, "Unable to decode JSON payload: "+err.Error(), http.StatusBadRequest) + return + } + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + // make sure libnetwork is now asking to release the expected address from the expected poolid + if addressRequest.PoolID != poolID { + fmt.Fprintf(w, `{"Error":"unknown pool id"}`) + } else if addressReleaseReq.Address != gw { + fmt.Fprintf(w, `{"Error":"unknown address"}`) + } else { + fmt.Fprintf(w, "null") + } + }) + + mux.HandleFunc(fmt.Sprintf("/%s.ReleasePool", ipamapi.PluginEndpointType), func(w http.ResponseWriter, r *http.Request) { + err := json.NewDecoder(r.Body).Decode(&poolReleaseReq) + if err != nil { + http.Error(w, "Unable to decode JSON payload: "+err.Error(), http.StatusBadRequest) + return + } + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + // make sure libnetwork is now asking to release the expected poolid + if addressRequest.PoolID != poolID { + fmt.Fprintf(w, `{"Error":"unknown pool id"}`) + } else { + fmt.Fprintf(w, "null") + } + }) + + err := os.MkdirAll("/etc/docker/plugins", 0755) + c.Assert(err, checker.IsNil) + + fileName := fmt.Sprintf("/etc/docker/plugins/%s.spec", netDrv) + err = ioutil.WriteFile(fileName, []byte(url), 0644) + c.Assert(err, checker.IsNil) + + ipamFileName := fmt.Sprintf("/etc/docker/plugins/%s.spec", ipamDrv) + err = ioutil.WriteFile(ipamFileName, []byte(url), 0644) + c.Assert(err, checker.IsNil) +} + +func (s *DockerSwarmSuite) TestSwarmNetworkPlugin(c *check.C) { + mux := http.NewServeMux() + s.server = httptest.NewServer(mux) + c.Assert(s.server, check.NotNil, check.Commentf("Failed to start an HTTP Server")) + setupRemoteGlobalNetworkPlugin(c, mux, s.server.URL, globalNetworkPlugin, globalIPAMPlugin) + defer func() { + s.server.Close() + err := os.RemoveAll("/etc/docker/plugins") + c.Assert(err, checker.IsNil) + }() + + d := s.AddDaemon(c, true, true) + + out, err := d.Cmd("network", "create", "-d", globalNetworkPlugin, "foo") + c.Assert(err, checker.NotNil) + c.Assert(out, checker.Contains, "not supported in swarm mode") +} + +// Test case for #24712 +func (s *DockerSwarmSuite) TestSwarmServiceEnvFile(c *check.C) { + d := s.AddDaemon(c, true, true) + + path := filepath.Join(d.Folder, "env.txt") + err := ioutil.WriteFile(path, []byte("VAR1=A\nVAR2=A\n"), 0644) + c.Assert(err, checker.IsNil) + + name := "worker" + out, err := d.Cmd("service", "create", "--no-resolve-image", "--env-file", path, "--env", "VAR1=B", "--env", "VAR1=C", "--env", "VAR2=", "--env", "VAR2", "--name", name, "busybox", "top") + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") + + // The complete env is [VAR1=A VAR2=A VAR1=B VAR1=C VAR2= VAR2] and duplicates will be removed => [VAR1=C VAR2] + out, err = d.Cmd("inspect", "--format", "{{ .Spec.TaskTemplate.ContainerSpec.Env }}", name) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, "[VAR1=C VAR2]") +} + +func (s *DockerSwarmSuite) TestSwarmServiceTTY(c *check.C) { + d := s.AddDaemon(c, true, true) + + name := "top" + + ttyCheck := "if [ -t 0 ]; then echo TTY > /status && top; else echo none > /status && top; fi" + + // Without --tty + expectedOutput := "none" + out, err := d.Cmd("service", "create", "--no-resolve-image", "--name", name, "busybox", "sh", "-c", ttyCheck) + c.Assert(err, checker.IsNil) + + // Make sure task has been deployed. + waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, 1) + + // We need to get the container id. + out, err = d.Cmd("ps", "-a", "-q", "--no-trunc") + c.Assert(err, checker.IsNil) + id := strings.TrimSpace(out) + + out, err = d.Cmd("exec", id, "cat", "/status") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, expectedOutput, check.Commentf("Expected '%s', but got %q", expectedOutput, out)) + + // Remove service + out, err = d.Cmd("service", "rm", name) + c.Assert(err, checker.IsNil) + // Make sure container has been destroyed. + waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, 0) + + // With --tty + expectedOutput = "TTY" + out, err = d.Cmd("service", "create", "--no-resolve-image", "--name", name, "--tty", "busybox", "sh", "-c", ttyCheck) + c.Assert(err, checker.IsNil) + + // Make sure task has been deployed. + waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, 1) + + // We need to get the container id. + out, err = d.Cmd("ps", "-a", "-q", "--no-trunc") + c.Assert(err, checker.IsNil) + id = strings.TrimSpace(out) + + out, err = d.Cmd("exec", id, "cat", "/status") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, expectedOutput, check.Commentf("Expected '%s', but got %q", expectedOutput, out)) +} + +func (s *DockerSwarmSuite) TestSwarmServiceTTYUpdate(c *check.C) { + d := s.AddDaemon(c, true, true) + + // Create a service + name := "top" + _, err := d.Cmd("service", "create", "--no-resolve-image", "--name", name, "busybox", "top") + c.Assert(err, checker.IsNil) + + // Make sure task has been deployed. + waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, 1) + + out, err := d.Cmd("service", "inspect", "--format", "{{ .Spec.TaskTemplate.ContainerSpec.TTY }}", name) + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Equals, "false") + + _, err = d.Cmd("service", "update", "--tty", name) + c.Assert(err, checker.IsNil) + + out, err = d.Cmd("service", "inspect", "--format", "{{ .Spec.TaskTemplate.ContainerSpec.TTY }}", name) + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Equals, "true") +} + +func (s *DockerSwarmSuite) TestSwarmServiceNetworkUpdate(c *check.C) { + d := s.AddDaemon(c, true, true) + + result := icmd.RunCmd(d.Command("network", "create", "-d", "overlay", "foo")) + result.Assert(c, icmd.Success) + fooNetwork := strings.TrimSpace(string(result.Combined())) + + result = icmd.RunCmd(d.Command("network", "create", "-d", "overlay", "bar")) + result.Assert(c, icmd.Success) + barNetwork := strings.TrimSpace(string(result.Combined())) + + result = icmd.RunCmd(d.Command("network", "create", "-d", "overlay", "baz")) + result.Assert(c, icmd.Success) + bazNetwork := strings.TrimSpace(string(result.Combined())) + + // Create a service + name := "top" + result = icmd.RunCmd(d.Command("service", "create", "--no-resolve-image", "--network", "foo", "--network", "bar", "--name", name, "busybox", "top")) + result.Assert(c, icmd.Success) + + // Make sure task has been deployed. + waitAndAssert(c, defaultReconciliationTimeout, d.CheckRunningTaskNetworks, checker.DeepEquals, + map[string]int{fooNetwork: 1, barNetwork: 1}) + + // Remove a network + result = icmd.RunCmd(d.Command("service", "update", "--network-rm", "foo", name)) + result.Assert(c, icmd.Success) + + waitAndAssert(c, defaultReconciliationTimeout, d.CheckRunningTaskNetworks, checker.DeepEquals, + map[string]int{barNetwork: 1}) + + // Add a network + result = icmd.RunCmd(d.Command("service", "update", "--network-add", "baz", name)) + result.Assert(c, icmd.Success) + + waitAndAssert(c, defaultReconciliationTimeout, d.CheckRunningTaskNetworks, checker.DeepEquals, + map[string]int{barNetwork: 1, bazNetwork: 1}) +} + +func (s *DockerSwarmSuite) TestDNSConfig(c *check.C) { + d := s.AddDaemon(c, true, true) + + // Create a service + name := "top" + _, err := d.Cmd("service", "create", "--no-resolve-image", "--name", name, "--dns=1.2.3.4", "--dns-search=example.com", "--dns-option=timeout:3", "busybox", "top") + c.Assert(err, checker.IsNil) + + // Make sure task has been deployed. + waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, 1) + + // We need to get the container id. + out, err := d.Cmd("ps", "-a", "-q", "--no-trunc") + c.Assert(err, checker.IsNil) + id := strings.TrimSpace(out) + + // Compare against expected output. + expectedOutput1 := "nameserver 1.2.3.4" + expectedOutput2 := "search example.com" + expectedOutput3 := "options timeout:3" + out, err = d.Cmd("exec", id, "cat", "/etc/resolv.conf") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, expectedOutput1, check.Commentf("Expected '%s', but got %q", expectedOutput1, out)) + c.Assert(out, checker.Contains, expectedOutput2, check.Commentf("Expected '%s', but got %q", expectedOutput2, out)) + c.Assert(out, checker.Contains, expectedOutput3, check.Commentf("Expected '%s', but got %q", expectedOutput3, out)) +} + +func (s *DockerSwarmSuite) TestDNSConfigUpdate(c *check.C) { + d := s.AddDaemon(c, true, true) + + // Create a service + name := "top" + _, err := d.Cmd("service", "create", "--no-resolve-image", "--name", name, "busybox", "top") + c.Assert(err, checker.IsNil) + + // Make sure task has been deployed. + waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, 1) + + _, err = d.Cmd("service", "update", "--dns-add=1.2.3.4", "--dns-search-add=example.com", "--dns-option-add=timeout:3", name) + c.Assert(err, checker.IsNil) + + out, err := d.Cmd("service", "inspect", "--format", "{{ .Spec.TaskTemplate.ContainerSpec.DNSConfig }}", name) + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Equals, "{[1.2.3.4] [example.com] [timeout:3]}") +} + +func getNodeStatus(c *check.C, d *daemon.Swarm) swarm.LocalNodeState { + info, err := d.SwarmInfo() + c.Assert(err, checker.IsNil) + return info.LocalNodeState +} + +func checkKeyIsEncrypted(d *daemon.Swarm) func(*check.C) (interface{}, check.CommentInterface) { + return func(c *check.C) (interface{}, check.CommentInterface) { + keyBytes, err := ioutil.ReadFile(filepath.Join(d.Folder, "root", "swarm", "certificates", "swarm-node.key")) + if err != nil { + return fmt.Errorf("error reading key: %v", err), nil + } + + keyBlock, _ := pem.Decode(keyBytes) + if keyBlock == nil { + return fmt.Errorf("invalid PEM-encoded private key"), nil + } + + return x509.IsEncryptedPEMBlock(keyBlock), nil + } +} + +func checkSwarmLockedToUnlocked(c *check.C, d *daemon.Swarm, unlockKey string) { + // Wait for the PEM file to become unencrypted + waitAndAssert(c, defaultReconciliationTimeout, checkKeyIsEncrypted(d), checker.Equals, false) + + d.Restart(c) + c.Assert(getNodeStatus(c, d), checker.Equals, swarm.LocalNodeStateActive) +} + +func checkSwarmUnlockedToLocked(c *check.C, d *daemon.Swarm) { + // Wait for the PEM file to become encrypted + waitAndAssert(c, defaultReconciliationTimeout, checkKeyIsEncrypted(d), checker.Equals, true) + + d.Restart(c) + c.Assert(getNodeStatus(c, d), checker.Equals, swarm.LocalNodeStateLocked) +} + +func (s *DockerSwarmSuite) TestUnlockEngineAndUnlockedSwarm(c *check.C) { + d := s.AddDaemon(c, false, false) + + // unlocking a normal engine should return an error - it does not even ask for the key + cmd := d.Command("swarm", "unlock") + result := icmd.RunCmd(cmd) + result.Assert(c, icmd.Expected{ + ExitCode: 1, + }) + c.Assert(result.Combined(), checker.Contains, "Error: This node is not part of a swarm") + c.Assert(result.Combined(), checker.Not(checker.Contains), "Please enter unlock key") + + _, err := d.Cmd("swarm", "init") + c.Assert(err, checker.IsNil) + + // unlocking an unlocked swarm should return an error - it does not even ask for the key + cmd = d.Command("swarm", "unlock") + result = icmd.RunCmd(cmd) + result.Assert(c, icmd.Expected{ + ExitCode: 1, + }) + c.Assert(result.Combined(), checker.Contains, "Error: swarm is not locked") + c.Assert(result.Combined(), checker.Not(checker.Contains), "Please enter unlock key") +} + +func (s *DockerSwarmSuite) TestSwarmInitLocked(c *check.C) { + d := s.AddDaemon(c, false, false) + + outs, err := d.Cmd("swarm", "init", "--autolock") + c.Assert(err, checker.IsNil, check.Commentf("out: %v", outs)) + + c.Assert(outs, checker.Contains, "docker swarm unlock") + + var unlockKey string + for _, line := range strings.Split(outs, "\n") { + if strings.Contains(line, "SWMKEY") { + unlockKey = strings.TrimSpace(line) + break + } + } + + c.Assert(unlockKey, checker.Not(checker.Equals), "") + + outs, err = d.Cmd("swarm", "unlock-key", "-q") + c.Assert(outs, checker.Equals, unlockKey+"\n") + + c.Assert(getNodeStatus(c, d), checker.Equals, swarm.LocalNodeStateActive) + + // It starts off locked + d.Restart(c) + c.Assert(getNodeStatus(c, d), checker.Equals, swarm.LocalNodeStateLocked) + + cmd := d.Command("swarm", "unlock") + cmd.Stdin = bytes.NewBufferString("wrong-secret-key") + icmd.RunCmd(cmd).Assert(c, icmd.Expected{ + ExitCode: 1, + Err: "invalid key", + }) + + c.Assert(getNodeStatus(c, d), checker.Equals, swarm.LocalNodeStateLocked) + + cmd = d.Command("swarm", "unlock") + cmd.Stdin = bytes.NewBufferString(unlockKey) + icmd.RunCmd(cmd).Assert(c, icmd.Success) + + c.Assert(getNodeStatus(c, d), checker.Equals, swarm.LocalNodeStateActive) + + outs, err = d.Cmd("node", "ls") + c.Assert(err, checker.IsNil) + c.Assert(outs, checker.Not(checker.Contains), "Swarm is encrypted and needs to be unlocked") + + outs, err = d.Cmd("swarm", "update", "--autolock=false") + c.Assert(err, checker.IsNil, check.Commentf("out: %v", outs)) + + checkSwarmLockedToUnlocked(c, d, unlockKey) + + outs, err = d.Cmd("node", "ls") + c.Assert(err, checker.IsNil) + c.Assert(outs, checker.Not(checker.Contains), "Swarm is encrypted and needs to be unlocked") +} + +func (s *DockerSwarmSuite) TestSwarmLeaveLocked(c *check.C) { + d := s.AddDaemon(c, false, false) + + outs, err := d.Cmd("swarm", "init", "--autolock") + c.Assert(err, checker.IsNil, check.Commentf("out: %v", outs)) + + // It starts off locked + d.Restart(c, "--swarm-default-advertise-addr=lo") + + info, err := d.SwarmInfo() + c.Assert(err, checker.IsNil) + c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateLocked) + + outs, _ = d.Cmd("node", "ls") + c.Assert(outs, checker.Contains, "Swarm is encrypted and needs to be unlocked") + + // `docker swarm leave` a locked swarm without --force will return an error + outs, _ = d.Cmd("swarm", "leave") + c.Assert(outs, checker.Contains, "Swarm is encrypted and locked.") + + // It is OK for user to leave a locked swarm with --force + outs, err = d.Cmd("swarm", "leave", "--force") + c.Assert(err, checker.IsNil, check.Commentf("out: %v", outs)) + + info, err = d.SwarmInfo() + c.Assert(err, checker.IsNil) + c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive) + + outs, err = d.Cmd("swarm", "init") + c.Assert(err, checker.IsNil, check.Commentf("out: %v", outs)) + + info, err = d.SwarmInfo() + c.Assert(err, checker.IsNil) + c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) +} + +func (s *DockerSwarmSuite) TestSwarmLockUnlockCluster(c *check.C) { + d1 := s.AddDaemon(c, true, true) + d2 := s.AddDaemon(c, true, true) + d3 := s.AddDaemon(c, true, true) + + // they start off unlocked + d2.Restart(c) + c.Assert(getNodeStatus(c, d2), checker.Equals, swarm.LocalNodeStateActive) + + // stop this one so it does not get autolock info + d2.Stop(c) + + // enable autolock + outs, err := d1.Cmd("swarm", "update", "--autolock") + c.Assert(err, checker.IsNil, check.Commentf("out: %v", outs)) + + c.Assert(outs, checker.Contains, "docker swarm unlock") + + var unlockKey string + for _, line := range strings.Split(outs, "\n") { + if strings.Contains(line, "SWMKEY") { + unlockKey = strings.TrimSpace(line) + break + } + } + + c.Assert(unlockKey, checker.Not(checker.Equals), "") + + outs, err = d1.Cmd("swarm", "unlock-key", "-q") + c.Assert(outs, checker.Equals, unlockKey+"\n") + + // The ones that got the cluster update should be set to locked + for _, d := range []*daemon.Swarm{d1, d3} { + checkSwarmUnlockedToLocked(c, d) + + cmd := d.Command("swarm", "unlock") + cmd.Stdin = bytes.NewBufferString(unlockKey) + icmd.RunCmd(cmd).Assert(c, icmd.Success) + c.Assert(getNodeStatus(c, d), checker.Equals, swarm.LocalNodeStateActive) + } + + // d2 never got the cluster update, so it is still set to unlocked + d2.Start(c) + c.Assert(getNodeStatus(c, d2), checker.Equals, swarm.LocalNodeStateActive) + + // d2 is now set to lock + checkSwarmUnlockedToLocked(c, d2) + + // leave it locked, and set the cluster to no longer autolock + outs, err = d1.Cmd("swarm", "update", "--autolock=false") + c.Assert(err, checker.IsNil, check.Commentf("out: %v", outs)) + + // the ones that got the update are now set to unlocked + for _, d := range []*daemon.Swarm{d1, d3} { + checkSwarmLockedToUnlocked(c, d, unlockKey) + } + + // d2 still locked + c.Assert(getNodeStatus(c, d2), checker.Equals, swarm.LocalNodeStateLocked) + + // unlock it + cmd := d2.Command("swarm", "unlock") + cmd.Stdin = bytes.NewBufferString(unlockKey) + icmd.RunCmd(cmd).Assert(c, icmd.Success) + c.Assert(getNodeStatus(c, d2), checker.Equals, swarm.LocalNodeStateActive) + + // once it's caught up, d2 is set to not be locked + checkSwarmLockedToUnlocked(c, d2, unlockKey) + + // managers who join now are never set to locked in the first place + d4 := s.AddDaemon(c, true, true) + d4.Restart(c) + c.Assert(getNodeStatus(c, d4), checker.Equals, swarm.LocalNodeStateActive) +} + +func (s *DockerSwarmSuite) TestSwarmJoinPromoteLocked(c *check.C) { + d1 := s.AddDaemon(c, true, true) + + // enable autolock + outs, err := d1.Cmd("swarm", "update", "--autolock") + c.Assert(err, checker.IsNil, check.Commentf("out: %v", outs)) + + c.Assert(outs, checker.Contains, "docker swarm unlock") + + var unlockKey string + for _, line := range strings.Split(outs, "\n") { + if strings.Contains(line, "SWMKEY") { + unlockKey = strings.TrimSpace(line) + break + } + } + + c.Assert(unlockKey, checker.Not(checker.Equals), "") + + outs, err = d1.Cmd("swarm", "unlock-key", "-q") + c.Assert(outs, checker.Equals, unlockKey+"\n") + + // joined workers start off unlocked + d2 := s.AddDaemon(c, true, false) + d2.Restart(c) + c.Assert(getNodeStatus(c, d2), checker.Equals, swarm.LocalNodeStateActive) + + // promote worker + outs, err = d1.Cmd("node", "promote", d2.Info.NodeID) + c.Assert(err, checker.IsNil) + c.Assert(outs, checker.Contains, "promoted to a manager in the swarm") + + // join new manager node + d3 := s.AddDaemon(c, true, true) + + // both new nodes are locked + for _, d := range []*daemon.Swarm{d2, d3} { + checkSwarmUnlockedToLocked(c, d) + + cmd := d.Command("swarm", "unlock") + cmd.Stdin = bytes.NewBufferString(unlockKey) + icmd.RunCmd(cmd).Assert(c, icmd.Success) + c.Assert(getNodeStatus(c, d), checker.Equals, swarm.LocalNodeStateActive) + } + + // demote manager back to worker - workers are not locked + outs, err = d1.Cmd("node", "demote", d3.Info.NodeID) + c.Assert(err, checker.IsNil) + c.Assert(outs, checker.Contains, "demoted in the swarm") + + // Wait for it to actually be demoted, for the key and cert to be replaced. + // Then restart and assert that the node is not locked. If we don't wait for the cert + // to be replaced, then the node still has the manager TLS key which is still locked + // (because we never want a manager TLS key to be on disk unencrypted if the cluster + // is set to autolock) + waitAndAssert(c, defaultReconciliationTimeout, d3.CheckControlAvailable, checker.False) + waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { + certBytes, err := ioutil.ReadFile(filepath.Join(d3.Folder, "root", "swarm", "certificates", "swarm-node.crt")) + if err != nil { + return "", check.Commentf("error: %v", err) + } + certs, err := helpers.ParseCertificatesPEM(certBytes) + if err == nil && len(certs) > 0 && len(certs[0].Subject.OrganizationalUnit) > 0 { + return certs[0].Subject.OrganizationalUnit[0], nil + } + return "", check.Commentf("could not get organizational unit from certificate") + }, checker.Equals, "swarm-worker") + + // by now, it should *never* be locked on restart + d3.Restart(c) + c.Assert(getNodeStatus(c, d3), checker.Equals, swarm.LocalNodeStateActive) +} + +func (s *DockerSwarmSuite) TestSwarmRotateUnlockKey(c *check.C) { + d := s.AddDaemon(c, true, true) + + outs, err := d.Cmd("swarm", "update", "--autolock") + c.Assert(err, checker.IsNil, check.Commentf("out: %v", outs)) + + c.Assert(outs, checker.Contains, "docker swarm unlock") + + var unlockKey string + for _, line := range strings.Split(outs, "\n") { + if strings.Contains(line, "SWMKEY") { + unlockKey = strings.TrimSpace(line) + break + } + } + + c.Assert(unlockKey, checker.Not(checker.Equals), "") + + outs, err = d.Cmd("swarm", "unlock-key", "-q") + c.Assert(outs, checker.Equals, unlockKey+"\n") + + // Rotate multiple times + for i := 0; i != 3; i++ { + outs, err = d.Cmd("swarm", "unlock-key", "-q", "--rotate") + c.Assert(err, checker.IsNil, check.Commentf("out: %v", outs)) + // Strip \n + newUnlockKey := outs[:len(outs)-1] + c.Assert(newUnlockKey, checker.Not(checker.Equals), "") + c.Assert(newUnlockKey, checker.Not(checker.Equals), unlockKey) + + d.Restart(c) + c.Assert(getNodeStatus(c, d), checker.Equals, swarm.LocalNodeStateLocked) + + outs, _ = d.Cmd("node", "ls") + c.Assert(outs, checker.Contains, "Swarm is encrypted and needs to be unlocked") + + cmd := d.Command("swarm", "unlock") + cmd.Stdin = bytes.NewBufferString(unlockKey) + result := icmd.RunCmd(cmd) + + if result.Error == nil { + // On occasion, the daemon may not have finished + // rotating the KEK before restarting. The test is + // intentionally written to explore this behavior. + // When this happens, unlocking with the old key will + // succeed. If we wait for the rotation to happen and + // restart again, the new key should be required this + // time. + + time.Sleep(3 * time.Second) + + d.Restart(c) + + cmd = d.Command("swarm", "unlock") + cmd.Stdin = bytes.NewBufferString(unlockKey) + result = icmd.RunCmd(cmd) + } + result.Assert(c, icmd.Expected{ + ExitCode: 1, + Err: "invalid key", + }) + + outs, _ = d.Cmd("node", "ls") + c.Assert(outs, checker.Contains, "Swarm is encrypted and needs to be unlocked") + + cmd = d.Command("swarm", "unlock") + cmd.Stdin = bytes.NewBufferString(newUnlockKey) + icmd.RunCmd(cmd).Assert(c, icmd.Success) + + c.Assert(getNodeStatus(c, d), checker.Equals, swarm.LocalNodeStateActive) + + outs, err = d.Cmd("node", "ls") + c.Assert(err, checker.IsNil) + c.Assert(outs, checker.Not(checker.Contains), "Swarm is encrypted and needs to be unlocked") + + unlockKey = newUnlockKey + } +} + +// This differs from `TestSwarmRotateUnlockKey` because that one rotates a single node, which is the leader. +// This one keeps the leader up, and asserts that other manager nodes in the cluster also have their unlock +// key rotated. +func (s *DockerSwarmSuite) TestSwarmClusterRotateUnlockKey(c *check.C) { + d1 := s.AddDaemon(c, true, true) // leader - don't restart this one, we don't want leader election delays + d2 := s.AddDaemon(c, true, true) + d3 := s.AddDaemon(c, true, true) + + outs, err := d1.Cmd("swarm", "update", "--autolock") + c.Assert(err, checker.IsNil, check.Commentf("out: %v", outs)) + + c.Assert(outs, checker.Contains, "docker swarm unlock") + + var unlockKey string + for _, line := range strings.Split(outs, "\n") { + if strings.Contains(line, "SWMKEY") { + unlockKey = strings.TrimSpace(line) + break + } + } + + c.Assert(unlockKey, checker.Not(checker.Equals), "") + + outs, err = d1.Cmd("swarm", "unlock-key", "-q") + c.Assert(outs, checker.Equals, unlockKey+"\n") + + // Rotate multiple times + for i := 0; i != 3; i++ { + outs, err = d1.Cmd("swarm", "unlock-key", "-q", "--rotate") + c.Assert(err, checker.IsNil, check.Commentf("out: %v", outs)) + // Strip \n + newUnlockKey := outs[:len(outs)-1] + c.Assert(newUnlockKey, checker.Not(checker.Equals), "") + c.Assert(newUnlockKey, checker.Not(checker.Equals), unlockKey) + + d2.Restart(c) + d3.Restart(c) + + for _, d := range []*daemon.Swarm{d2, d3} { + c.Assert(getNodeStatus(c, d), checker.Equals, swarm.LocalNodeStateLocked) + + outs, _ := d.Cmd("node", "ls") + c.Assert(outs, checker.Contains, "Swarm is encrypted and needs to be unlocked") + + cmd := d.Command("swarm", "unlock") + cmd.Stdin = bytes.NewBufferString(unlockKey) + result := icmd.RunCmd(cmd) + + if result.Error == nil { + // On occasion, the daemon may not have finished + // rotating the KEK before restarting. The test is + // intentionally written to explore this behavior. + // When this happens, unlocking with the old key will + // succeed. If we wait for the rotation to happen and + // restart again, the new key should be required this + // time. + + time.Sleep(3 * time.Second) + + d.Restart(c) + + cmd = d.Command("swarm", "unlock") + cmd.Stdin = bytes.NewBufferString(unlockKey) + result = icmd.RunCmd(cmd) + } + result.Assert(c, icmd.Expected{ + ExitCode: 1, + Err: "invalid key", + }) + + outs, _ = d.Cmd("node", "ls") + c.Assert(outs, checker.Contains, "Swarm is encrypted and needs to be unlocked") + + cmd = d.Command("swarm", "unlock") + cmd.Stdin = bytes.NewBufferString(newUnlockKey) + icmd.RunCmd(cmd).Assert(c, icmd.Success) + + c.Assert(getNodeStatus(c, d), checker.Equals, swarm.LocalNodeStateActive) + + outs, err = d.Cmd("node", "ls") + c.Assert(err, checker.IsNil) + c.Assert(outs, checker.Not(checker.Contains), "Swarm is encrypted and needs to be unlocked") + } + + unlockKey = newUnlockKey + } +} + +func (s *DockerSwarmSuite) TestSwarmAlternateLockUnlock(c *check.C) { + d := s.AddDaemon(c, true, true) + + var unlockKey string + for i := 0; i < 2; i++ { + // set to lock + outs, err := d.Cmd("swarm", "update", "--autolock") + c.Assert(err, checker.IsNil, check.Commentf("out: %v", outs)) + c.Assert(outs, checker.Contains, "docker swarm unlock") + + for _, line := range strings.Split(outs, "\n") { + if strings.Contains(line, "SWMKEY") { + unlockKey = strings.TrimSpace(line) + break + } + } + + c.Assert(unlockKey, checker.Not(checker.Equals), "") + checkSwarmUnlockedToLocked(c, d) + + cmd := d.Command("swarm", "unlock") + cmd.Stdin = bytes.NewBufferString(unlockKey) + icmd.RunCmd(cmd).Assert(c, icmd.Success) + + c.Assert(getNodeStatus(c, d), checker.Equals, swarm.LocalNodeStateActive) + + outs, err = d.Cmd("swarm", "update", "--autolock=false") + c.Assert(err, checker.IsNil, check.Commentf("out: %v", outs)) + + checkSwarmLockedToUnlocked(c, d, unlockKey) + } +} + +func (s *DockerSwarmSuite) TestExtraHosts(c *check.C) { + d := s.AddDaemon(c, true, true) + + // Create a service + name := "top" + _, err := d.Cmd("service", "create", "--no-resolve-image", "--name", name, "--host=example.com:1.2.3.4", "busybox", "top") + c.Assert(err, checker.IsNil) + + // Make sure task has been deployed. + waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, 1) + + // We need to get the container id. + out, err := d.Cmd("ps", "-a", "-q", "--no-trunc") + c.Assert(err, checker.IsNil) + id := strings.TrimSpace(out) + + // Compare against expected output. + expectedOutput := "1.2.3.4\texample.com" + out, err = d.Cmd("exec", id, "cat", "/etc/hosts") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, expectedOutput, check.Commentf("Expected '%s', but got %q", expectedOutput, out)) +} + +func (s *DockerSwarmSuite) TestSwarmManagerAddress(c *check.C) { + d1 := s.AddDaemon(c, true, true) + d2 := s.AddDaemon(c, true, false) + d3 := s.AddDaemon(c, true, false) + + // Manager Addresses will always show Node 1's address + expectedOutput := fmt.Sprintf("Manager Addresses:\n 127.0.0.1:%d\n", d1.Port) + + out, err := d1.Cmd("info") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, expectedOutput) + + out, err = d2.Cmd("info") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, expectedOutput) + + out, err = d3.Cmd("info") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, expectedOutput) +} + +func (s *DockerSwarmSuite) TestSwarmServiceInspectPretty(c *check.C) { + d := s.AddDaemon(c, true, true) + + name := "top" + out, err := d.Cmd("service", "create", "--no-resolve-image", "--name", name, "--limit-cpu=0.5", "busybox", "top") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + expectedOutput := ` +Resources: + Limits: + CPU: 0.5` + out, err = d.Cmd("service", "inspect", "--pretty", name) + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(out, checker.Contains, expectedOutput, check.Commentf(out)) +} + +func (s *DockerSwarmSuite) TestSwarmNetworkIPAMOptions(c *check.C) { + d := s.AddDaemon(c, true, true) + + out, err := d.Cmd("network", "create", "-d", "overlay", "--ipam-opt", "foo=bar", "foo") + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") + + out, err = d.Cmd("network", "inspect", "--format", "{{.IPAM.Options}}", "foo") + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(strings.TrimSpace(out), checker.Equals, "map[foo:bar]") + + out, err = d.Cmd("service", "create", "--no-resolve-image", "--network=foo", "--name", "top", "busybox", "top") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + // make sure task has been deployed. + waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, 1) + + out, err = d.Cmd("network", "inspect", "--format", "{{.IPAM.Options}}", "foo") + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(strings.TrimSpace(out), checker.Equals, "map[foo:bar]") +} + +func (s *DockerTrustedSwarmSuite) TestTrustedServiceCreate(c *check.C) { + d := s.swarmSuite.AddDaemon(c, true, true) + + // Attempt creating a service from an image that is known to notary. + repoName := s.trustSuite.setupTrustedImage(c, "trusted-pull") + + name := "trusted" + cli.Docker(cli.Args("-D", "service", "create", "--no-resolve-image", "--name", name, repoName, "top"), trustedCmd, cli.Daemon(d.Daemon)).Assert(c, icmd.Expected{ + Err: "resolved image tag to", + }) + + out, err := d.Cmd("service", "inspect", "--pretty", name) + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(out, checker.Contains, repoName+"@", check.Commentf(out)) + + // Try trusted service create on an untrusted tag. + + repoName = fmt.Sprintf("%v/untrustedservicecreate/createtest:latest", privateRegistryURL) + // tag the image and upload it to the private registry + cli.DockerCmd(c, "tag", "busybox", repoName) + cli.DockerCmd(c, "push", repoName) + cli.DockerCmd(c, "rmi", repoName) + + name = "untrusted" + cli.Docker(cli.Args("service", "create", "--no-resolve-image", "--name", name, repoName, "top"), trustedCmd, cli.Daemon(d.Daemon)).Assert(c, icmd.Expected{ + ExitCode: 1, + Err: "Error: remote trust data does not exist", + }) + + out, err = d.Cmd("service", "inspect", "--pretty", name) + c.Assert(err, checker.NotNil, check.Commentf(out)) +} + +func (s *DockerTrustedSwarmSuite) TestTrustedServiceUpdate(c *check.C) { + d := s.swarmSuite.AddDaemon(c, true, true) + + // Attempt creating a service from an image that is known to notary. + repoName := s.trustSuite.setupTrustedImage(c, "trusted-pull") + + name := "myservice" + + // Create a service without content trust + cli.Docker(cli.Args("service", "create", "--no-resolve-image", "--name", name, repoName, "top"), cli.Daemon(d.Daemon)).Assert(c, icmd.Success) + + result := cli.Docker(cli.Args("service", "inspect", "--pretty", name), cli.Daemon(d.Daemon)) + c.Assert(result.Error, checker.IsNil, check.Commentf(result.Combined())) + // Daemon won't insert the digest because this is disabled by + // DOCKER_SERVICE_PREFER_OFFLINE_IMAGE. + c.Assert(result.Combined(), check.Not(checker.Contains), repoName+"@", check.Commentf(result.Combined())) + + cli.Docker(cli.Args("-D", "service", "update", "--no-resolve-image", "--image", repoName, name), trustedCmd, cli.Daemon(d.Daemon)).Assert(c, icmd.Expected{ + Err: "resolved image tag to", + }) + + cli.Docker(cli.Args("service", "inspect", "--pretty", name), cli.Daemon(d.Daemon)).Assert(c, icmd.Expected{ + Out: repoName + "@", + }) + + // Try trusted service update on an untrusted tag. + + repoName = fmt.Sprintf("%v/untrustedservicecreate/createtest:latest", privateRegistryURL) + // tag the image and upload it to the private registry + cli.DockerCmd(c, "tag", "busybox", repoName) + cli.DockerCmd(c, "push", repoName) + cli.DockerCmd(c, "rmi", repoName) + + cli.Docker(cli.Args("service", "update", "--no-resolve-image", "--image", repoName, name), trustedCmd, cli.Daemon(d.Daemon)).Assert(c, icmd.Expected{ + ExitCode: 1, + Err: "Error: remote trust data does not exist", + }) +} + +// Test case for issue #27866, which did not allow NW name that is the prefix of a swarm NW ID. +// e.g. if the ingress ID starts with "n1", it was impossible to create a NW named "n1". +func (s *DockerSwarmSuite) TestSwarmNetworkCreateIssue27866(c *check.C) { + d := s.AddDaemon(c, true, true) + out, err := d.Cmd("network", "inspect", "-f", "{{.Id}}", "ingress") + c.Assert(err, checker.IsNil, check.Commentf("out: %v", out)) + ingressID := strings.TrimSpace(out) + c.Assert(ingressID, checker.Not(checker.Equals), "") + + // create a network of which name is the prefix of the ID of an overlay network + // (ingressID in this case) + newNetName := ingressID[0:2] + out, err = d.Cmd("network", "create", "--driver", "overlay", newNetName) + // In #27866, it was failing because of "network with name %s already exists" + c.Assert(err, checker.IsNil, check.Commentf("out: %v", out)) + out, err = d.Cmd("network", "rm", newNetName) + c.Assert(err, checker.IsNil, check.Commentf("out: %v", out)) +} + +// Test case for https://github.com/docker/docker/pull/27938#issuecomment-265768303 +// This test creates two networks with the same name sequentially, with various drivers. +// Since the operations in this test are done sequentially, the 2nd call should fail with +// "network with name FOO already exists". +// Note that it is to ok have multiple networks with the same name if the operations are done +// in parallel. (#18864) +func (s *DockerSwarmSuite) TestSwarmNetworkCreateDup(c *check.C) { + d := s.AddDaemon(c, true, true) + drivers := []string{"bridge", "overlay"} + for i, driver1 := range drivers { + nwName := fmt.Sprintf("network-test-%d", i) + for _, driver2 := range drivers { + c.Logf("Creating a network named %q with %q, then %q", + nwName, driver1, driver2) + out, err := d.Cmd("network", "create", "--driver", driver1, nwName) + c.Assert(err, checker.IsNil, check.Commentf("out: %v", out)) + out, err = d.Cmd("network", "create", "--driver", driver2, nwName) + c.Assert(out, checker.Contains, + fmt.Sprintf("network with name %s already exists", nwName)) + c.Assert(err, checker.NotNil) + c.Logf("As expected, the attempt to network %q with %q failed: %s", + nwName, driver2, out) + out, err = d.Cmd("network", "rm", nwName) + c.Assert(err, checker.IsNil, check.Commentf("out: %v", out)) + } + } +} + +func (s *DockerSwarmSuite) TestSwarmServicePsMultipleServiceIDs(c *check.C) { + d := s.AddDaemon(c, true, true) + + name1 := "top1" + out, err := d.Cmd("service", "create", "--no-resolve-image", "--detach=true", "--name", name1, "--replicas=3", "busybox", "top") + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") + id1 := strings.TrimSpace(out) + + name2 := "top2" + out, err = d.Cmd("service", "create", "--no-resolve-image", "--detach=true", "--name", name2, "--replicas=3", "busybox", "top") + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") + id2 := strings.TrimSpace(out) + + // make sure task has been deployed. + waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, 6) + + out, err = d.Cmd("service", "ps", name1) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, name1+".1") + c.Assert(out, checker.Contains, name1+".2") + c.Assert(out, checker.Contains, name1+".3") + c.Assert(out, checker.Not(checker.Contains), name2+".1") + c.Assert(out, checker.Not(checker.Contains), name2+".2") + c.Assert(out, checker.Not(checker.Contains), name2+".3") + + out, err = d.Cmd("service", "ps", name1, name2) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, name1+".1") + c.Assert(out, checker.Contains, name1+".2") + c.Assert(out, checker.Contains, name1+".3") + c.Assert(out, checker.Contains, name2+".1") + c.Assert(out, checker.Contains, name2+".2") + c.Assert(out, checker.Contains, name2+".3") + + // Name Prefix + out, err = d.Cmd("service", "ps", "to") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, name1+".1") + c.Assert(out, checker.Contains, name1+".2") + c.Assert(out, checker.Contains, name1+".3") + c.Assert(out, checker.Contains, name2+".1") + c.Assert(out, checker.Contains, name2+".2") + c.Assert(out, checker.Contains, name2+".3") + + // Name Prefix (no hit) + out, err = d.Cmd("service", "ps", "noname") + c.Assert(err, checker.NotNil) + c.Assert(out, checker.Contains, "no such services: noname") + + out, err = d.Cmd("service", "ps", id1) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, name1+".1") + c.Assert(out, checker.Contains, name1+".2") + c.Assert(out, checker.Contains, name1+".3") + c.Assert(out, checker.Not(checker.Contains), name2+".1") + c.Assert(out, checker.Not(checker.Contains), name2+".2") + c.Assert(out, checker.Not(checker.Contains), name2+".3") + + out, err = d.Cmd("service", "ps", id1, id2) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, name1+".1") + c.Assert(out, checker.Contains, name1+".2") + c.Assert(out, checker.Contains, name1+".3") + c.Assert(out, checker.Contains, name2+".1") + c.Assert(out, checker.Contains, name2+".2") + c.Assert(out, checker.Contains, name2+".3") +} + +func (s *DockerSwarmSuite) TestSwarmPublishDuplicatePorts(c *check.C) { + d := s.AddDaemon(c, true, true) + + out, err := d.Cmd("service", "create", "--no-resolve-image", "--detach=true", "--publish", "5005:80", "--publish", "5006:80", "--publish", "80", "--publish", "80", "busybox", "top") + c.Assert(err, check.IsNil, check.Commentf(out)) + id := strings.TrimSpace(out) + + // make sure task has been deployed. + waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, 1) + + // Total len = 4, with 2 dynamic ports and 2 non-dynamic ports + // Dynamic ports are likely to be 30000 and 30001 but doesn't matter + out, err = d.Cmd("service", "inspect", "--format", "{{.Endpoint.Ports}} len={{len .Endpoint.Ports}}", id) + c.Assert(err, check.IsNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "len=4") + c.Assert(out, checker.Contains, "{ tcp 80 5005 ingress}") + c.Assert(out, checker.Contains, "{ tcp 80 5006 ingress}") +} + +func (s *DockerSwarmSuite) TestSwarmJoinWithDrain(c *check.C) { + d := s.AddDaemon(c, true, true) + + out, err := d.Cmd("node", "ls") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Not(checker.Contains), "Drain") + + out, err = d.Cmd("swarm", "join-token", "-q", "manager") + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") + + token := strings.TrimSpace(out) + + d1 := s.AddDaemon(c, false, false) + + out, err = d1.Cmd("swarm", "join", "--availability=drain", "--token", token, d.ListenAddr) + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") + + out, err = d.Cmd("node", "ls") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, "Drain") + + out, err = d1.Cmd("node", "ls") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, "Drain") +} + +func (s *DockerSwarmSuite) TestSwarmInitWithDrain(c *check.C) { + d := s.AddDaemon(c, false, false) + + out, err := d.Cmd("swarm", "init", "--availability", "drain") + c.Assert(err, checker.IsNil, check.Commentf("out: %v", out)) + + out, err = d.Cmd("node", "ls") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, "Drain") +} + +func (s *DockerSwarmSuite) TestSwarmReadonlyRootfs(c *check.C) { + testRequires(c, DaemonIsLinux, UserNamespaceROMount) + + d := s.AddDaemon(c, true, true) + + out, err := d.Cmd("service", "create", "--no-resolve-image", "--name", "top", "--read-only", "busybox", "top") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + // make sure task has been deployed. + waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, 1) + + out, err = d.Cmd("service", "inspect", "--format", "{{ .Spec.TaskTemplate.ContainerSpec.ReadOnly }}", "top") + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(strings.TrimSpace(out), checker.Equals, "true") + + containers := d.ActiveContainers() + out, err = d.Cmd("inspect", "--type", "container", "--format", "{{.HostConfig.ReadonlyRootfs}}", containers[0]) + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(strings.TrimSpace(out), checker.Equals, "true") +} + +func (s *DockerSwarmSuite) TestNetworkInspectWithDuplicateNames(c *check.C) { + d := s.AddDaemon(c, true, true) + + name := "foo" + networkCreateRequest := types.NetworkCreateRequest{ + Name: name, + NetworkCreate: types.NetworkCreate{ + CheckDuplicate: false, + Driver: "bridge", + }, + } + + var n1 types.NetworkCreateResponse + status, body, err := d.SockRequest("POST", "/networks/create", networkCreateRequest) + c.Assert(err, checker.IsNil, check.Commentf(string(body))) + c.Assert(status, checker.Equals, http.StatusCreated, check.Commentf(string(body))) + c.Assert(json.Unmarshal(body, &n1), checker.IsNil) + + // Full ID always works + out, err := d.Cmd("network", "inspect", "--format", "{{.ID}}", n1.ID) + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(strings.TrimSpace(out), checker.Equals, n1.ID) + + // Name works if it is unique + out, err = d.Cmd("network", "inspect", "--format", "{{.ID}}", name) + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(strings.TrimSpace(out), checker.Equals, n1.ID) + + var n2 types.NetworkCreateResponse + status, body, err = d.SockRequest("POST", "/networks/create", networkCreateRequest) + c.Assert(err, checker.IsNil, check.Commentf(string(body))) + c.Assert(status, checker.Equals, http.StatusCreated, check.Commentf(string(body))) + c.Assert(json.Unmarshal(body, &n2), checker.IsNil) + + // Full ID always works + out, err = d.Cmd("network", "inspect", "--format", "{{.ID}}", n1.ID) + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(strings.TrimSpace(out), checker.Equals, n1.ID) + + out, err = d.Cmd("network", "inspect", "--format", "{{.ID}}", n2.ID) + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(strings.TrimSpace(out), checker.Equals, n2.ID) + + // Name with duplicates + out, err = d.Cmd("network", "inspect", "--format", "{{.ID}}", name) + c.Assert(err, checker.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "network foo is ambiguous (2 matches found based on name)") + + out, err = d.Cmd("network", "rm", n2.ID) + c.Assert(err, checker.IsNil, check.Commentf(out)) + + // Duplicates with name but with different driver + networkCreateRequest.NetworkCreate.Driver = "overlay" + + status, body, err = d.SockRequest("POST", "/networks/create", networkCreateRequest) + c.Assert(err, checker.IsNil, check.Commentf(string(body))) + c.Assert(status, checker.Equals, http.StatusCreated, check.Commentf(string(body))) + c.Assert(json.Unmarshal(body, &n2), checker.IsNil) + + // Full ID always works + out, err = d.Cmd("network", "inspect", "--format", "{{.ID}}", n1.ID) + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(strings.TrimSpace(out), checker.Equals, n1.ID) + + out, err = d.Cmd("network", "inspect", "--format", "{{.ID}}", n2.ID) + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(strings.TrimSpace(out), checker.Equals, n2.ID) + + // Name with duplicates + out, err = d.Cmd("network", "inspect", "--format", "{{.ID}}", name) + c.Assert(err, checker.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "network foo is ambiguous (2 matches found based on name)") +} + +func (s *DockerSwarmSuite) TestSwarmStopSignal(c *check.C) { + testRequires(c, DaemonIsLinux, UserNamespaceROMount) + + d := s.AddDaemon(c, true, true) + + out, err := d.Cmd("service", "create", "--no-resolve-image", "--name", "top", "--stop-signal=SIGHUP", "busybox", "top") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + // make sure task has been deployed. + waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, 1) + + out, err = d.Cmd("service", "inspect", "--format", "{{ .Spec.TaskTemplate.ContainerSpec.StopSignal }}", "top") + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(strings.TrimSpace(out), checker.Equals, "SIGHUP") + + containers := d.ActiveContainers() + out, err = d.Cmd("inspect", "--type", "container", "--format", "{{.Config.StopSignal}}", containers[0]) + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(strings.TrimSpace(out), checker.Equals, "SIGHUP") + + out, err = d.Cmd("service", "update", "--stop-signal=SIGUSR1", "top") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + out, err = d.Cmd("service", "inspect", "--format", "{{ .Spec.TaskTemplate.ContainerSpec.StopSignal }}", "top") + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(strings.TrimSpace(out), checker.Equals, "SIGUSR1") +} + +func (s *DockerSwarmSuite) TestSwarmServiceLsFilterMode(c *check.C) { + d := s.AddDaemon(c, true, true) + + out, err := d.Cmd("service", "create", "--no-resolve-image", "--name", "top1", "busybox", "top") + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") + + out, err = d.Cmd("service", "create", "--no-resolve-image", "--name", "top2", "--mode=global", "busybox", "top") + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") + + // make sure task has been deployed. + waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, 2) + + out, err = d.Cmd("service", "ls") + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "top1") + c.Assert(out, checker.Contains, "top2") + c.Assert(out, checker.Not(checker.Contains), "localnet") + + out, err = d.Cmd("service", "ls", "--filter", "mode=global") + c.Assert(out, checker.Not(checker.Contains), "top1") + c.Assert(out, checker.Contains, "top2") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + out, err = d.Cmd("service", "ls", "--filter", "mode=replicated") + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "top1") + c.Assert(out, checker.Not(checker.Contains), "top2") +} + +func (s *DockerSwarmSuite) TestSwarmInitUnspecifiedDataPathAddr(c *check.C) { + d := s.AddDaemon(c, false, false) + + out, err := d.Cmd("swarm", "init", "--data-path-addr", "0.0.0.0") + c.Assert(err, checker.NotNil) + c.Assert(out, checker.Contains, "data path address must be a non-zero IP") + + out, err = d.Cmd("swarm", "init", "--data-path-addr", "0.0.0.0:2000") + c.Assert(err, checker.NotNil) + c.Assert(out, checker.Contains, "data path address must be a non-zero IP") +} + +func (s *DockerSwarmSuite) TestSwarmJoinLeave(c *check.C) { + d := s.AddDaemon(c, true, true) + + out, err := d.Cmd("swarm", "join-token", "-q", "worker") + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") + + token := strings.TrimSpace(out) + + // Verify that back to back join/leave does not cause panics + d1 := s.AddDaemon(c, false, false) + for i := 0; i < 10; i++ { + out, err = d1.Cmd("swarm", "join", "--token", token, d.ListenAddr) + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") + + _, err = d1.Cmd("swarm", "leave") + c.Assert(err, checker.IsNil) + } +} + +const defaultRetryCount = 10 + +func waitForEvent(c *check.C, d *daemon.Swarm, since string, filter string, event string, retry int) string { + if retry < 1 { + c.Fatalf("retry count %d is invalid. It should be no less than 1", retry) + return "" + } + var out string + for i := 0; i < retry; i++ { + until := daemonUnixTime(c) + var err error + if len(filter) > 0 { + out, err = d.Cmd("events", "--since", since, "--until", until, filter) + } else { + out, err = d.Cmd("events", "--since", since, "--until", until) + } + c.Assert(err, checker.IsNil, check.Commentf(out)) + if strings.Contains(out, event) { + return strings.TrimSpace(out) + } + // no need to sleep after last retry + if i < retry-1 { + time.Sleep(200 * time.Millisecond) + } + } + c.Fatalf("docker events output '%s' doesn't contain event '%s'", out, event) + return "" +} + +func (s *DockerSwarmSuite) TestSwarmClusterEventsSource(c *check.C) { + d1 := s.AddDaemon(c, true, true) + d2 := s.AddDaemon(c, true, true) + d3 := s.AddDaemon(c, true, false) + + // create a network + out, err := d1.Cmd("network", "create", "--attachable", "-d", "overlay", "foo") + c.Assert(err, checker.IsNil, check.Commentf(out)) + networkID := strings.TrimSpace(out) + c.Assert(networkID, checker.Not(checker.Equals), "") + + // d1, d2 are managers that can get swarm events + waitForEvent(c, d1, "0", "-f scope=swarm", "network create "+networkID, defaultRetryCount) + waitForEvent(c, d2, "0", "-f scope=swarm", "network create "+networkID, defaultRetryCount) + + // d3 is a worker, not able to get cluster events + out = waitForEvent(c, d3, "0", "-f scope=swarm", "", 1) + c.Assert(out, checker.Not(checker.Contains), "network create ") +} + +func (s *DockerSwarmSuite) TestSwarmClusterEventsScope(c *check.C) { + d := s.AddDaemon(c, true, true) + + // create a service + out, err := d.Cmd("service", "create", "--no-resolve-image", "--name", "test", "--detach=false", "busybox", "top") + c.Assert(err, checker.IsNil, check.Commentf(out)) + serviceID := strings.Split(out, "\n")[0] + + // scope swarm filters cluster events + out = waitForEvent(c, d, "0", "-f scope=swarm", "service create "+serviceID, defaultRetryCount) + c.Assert(out, checker.Not(checker.Contains), "container create ") + + // all events are returned if scope is not specified + waitForEvent(c, d, "0", "", "service create "+serviceID, 1) + waitForEvent(c, d, "0", "", "container create ", defaultRetryCount) + + // scope local only shows non-cluster events + out = waitForEvent(c, d, "0", "-f scope=local", "container create ", 1) + c.Assert(out, checker.Not(checker.Contains), "service create ") +} + +func (s *DockerSwarmSuite) TestSwarmClusterEventsType(c *check.C) { + d := s.AddDaemon(c, true, true) + + // create a service + out, err := d.Cmd("service", "create", "--no-resolve-image", "--name", "test", "--detach=false", "busybox", "top") + c.Assert(err, checker.IsNil, check.Commentf(out)) + serviceID := strings.Split(out, "\n")[0] + + // create a network + out, err = d.Cmd("network", "create", "--attachable", "-d", "overlay", "foo") + c.Assert(err, checker.IsNil, check.Commentf(out)) + networkID := strings.TrimSpace(out) + c.Assert(networkID, checker.Not(checker.Equals), "") + + // filter by service + out = waitForEvent(c, d, "0", "-f type=service", "service create "+serviceID, defaultRetryCount) + c.Assert(out, checker.Not(checker.Contains), "network create") + + // filter by network + out = waitForEvent(c, d, "0", "-f type=network", "network create "+networkID, defaultRetryCount) + c.Assert(out, checker.Not(checker.Contains), "service create") +} + +func (s *DockerSwarmSuite) TestSwarmClusterEventsService(c *check.C) { + d := s.AddDaemon(c, true, true) + + // create a service + out, err := d.Cmd("service", "create", "--no-resolve-image", "--name", "test", "--detach=false", "busybox", "top") + c.Assert(err, checker.IsNil, check.Commentf(out)) + serviceID := strings.Split(out, "\n")[0] + + // validate service create event + waitForEvent(c, d, "0", "-f scope=swarm", "service create "+serviceID, defaultRetryCount) + + t1 := daemonUnixTime(c) + out, err = d.Cmd("service", "update", "--force", "--detach=false", "test") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + // wait for service update start + out = waitForEvent(c, d, t1, "-f scope=swarm", "service update "+serviceID, defaultRetryCount) + c.Assert(out, checker.Contains, "updatestate.new=updating") + + // allow service update complete. This is a service with 1 instance + time.Sleep(400 * time.Millisecond) + out = waitForEvent(c, d, t1, "-f scope=swarm", "service update "+serviceID, defaultRetryCount) + c.Assert(out, checker.Contains, "updatestate.new=completed, updatestate.old=updating") + + // scale service + t2 := daemonUnixTime(c) + out, err = d.Cmd("service", "scale", "test=3") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + out = waitForEvent(c, d, t2, "-f scope=swarm", "service update "+serviceID, defaultRetryCount) + c.Assert(out, checker.Contains, "replicas.new=3, replicas.old=1") + + // remove service + t3 := daemonUnixTime(c) + out, err = d.Cmd("service", "rm", "test") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + waitForEvent(c, d, t3, "-f scope=swarm", "service remove "+serviceID, defaultRetryCount) +} + +func (s *DockerSwarmSuite) TestSwarmClusterEventsNode(c *check.C) { + d1 := s.AddDaemon(c, true, true) + s.AddDaemon(c, true, true) + d3 := s.AddDaemon(c, true, true) + + d3ID := d3.NodeID + waitForEvent(c, d1, "0", "-f scope=swarm", "node create "+d3ID, defaultRetryCount) + + t1 := daemonUnixTime(c) + out, err := d1.Cmd("node", "update", "--availability=pause", d3ID) + c.Assert(err, checker.IsNil, check.Commentf(out)) + + // filter by type + out = waitForEvent(c, d1, t1, "-f type=node", "node update "+d3ID, defaultRetryCount) + c.Assert(out, checker.Contains, "availability.new=pause, availability.old=active") + + t2 := daemonUnixTime(c) + out, err = d1.Cmd("node", "demote", d3ID) + c.Assert(err, checker.IsNil, check.Commentf(out)) + + waitForEvent(c, d1, t2, "-f type=node", "node update "+d3ID, defaultRetryCount) + + t3 := daemonUnixTime(c) + out, err = d1.Cmd("node", "rm", "-f", d3ID) + c.Assert(err, checker.IsNil, check.Commentf(out)) + + // filter by scope + waitForEvent(c, d1, t3, "-f scope=swarm", "node remove "+d3ID, defaultRetryCount) +} + +func (s *DockerSwarmSuite) TestSwarmClusterEventsNetwork(c *check.C) { + d := s.AddDaemon(c, true, true) + + // create a network + out, err := d.Cmd("network", "create", "--attachable", "-d", "overlay", "foo") + c.Assert(err, checker.IsNil, check.Commentf(out)) + networkID := strings.TrimSpace(out) + + waitForEvent(c, d, "0", "-f scope=swarm", "network create "+networkID, defaultRetryCount) + + // remove network + t1 := daemonUnixTime(c) + out, err = d.Cmd("network", "rm", "foo") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + // filtered by network + waitForEvent(c, d, t1, "-f type=network", "network remove "+networkID, defaultRetryCount) +} + +func (s *DockerSwarmSuite) TestSwarmClusterEventsSecret(c *check.C) { + d := s.AddDaemon(c, true, true) + + testName := "test_secret" + id := d.CreateSecret(c, swarm.SecretSpec{ + Annotations: swarm.Annotations{ + Name: testName, + }, + Data: []byte("TESTINGDATA"), + }) + c.Assert(id, checker.Not(checker.Equals), "", check.Commentf("secrets: %s", id)) + + waitForEvent(c, d, "0", "-f scope=swarm", "secret create "+id, defaultRetryCount) + + t1 := daemonUnixTime(c) + d.DeleteSecret(c, id) + // filtered by secret + waitForEvent(c, d, t1, "-f type=secret", "secret remove "+id, defaultRetryCount) +} + +func (s *DockerSwarmSuite) TestSwarmClusterEventsConfig(c *check.C) { + d := s.AddDaemon(c, true, true) + + testName := "test_config" + id := d.CreateConfig(c, swarm.ConfigSpec{ + Annotations: swarm.Annotations{ + Name: testName, + }, + Data: []byte("TESTINGDATA"), + }) + c.Assert(id, checker.Not(checker.Equals), "", check.Commentf("configs: %s", id)) + + waitForEvent(c, d, "0", "-f scope=swarm", "config create "+id, defaultRetryCount) + + t1 := daemonUnixTime(c) + d.DeleteConfig(c, id) + // filtered by config + waitForEvent(c, d, t1, "-f type=config", "config remove "+id, defaultRetryCount) +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_swarm_unix_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_swarm_unix_test.go new file mode 100644 index 000000000..cffabcc2a --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_swarm_unix_test.go @@ -0,0 +1,104 @@ +// +build !windows + +package main + +import ( + "encoding/json" + "strings" + "time" + + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/integration-cli/checker" + "github.com/go-check/check" +) + +func (s *DockerSwarmSuite) TestSwarmVolumePlugin(c *check.C) { + d := s.AddDaemon(c, true, true) + + out, err := d.Cmd("service", "create", "--no-resolve-image", "--mount", "type=volume,source=my-volume,destination=/foo,volume-driver=customvolumedriver", "--name", "top", "busybox", "top") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + // Make sure task stays pending before plugin is available + waitAndAssert(c, defaultReconciliationTimeout, d.CheckServiceTasksInState("top", swarm.TaskStatePending, "missing plugin on 1 node"), checker.Equals, 1) + + plugin := newVolumePlugin(c, "customvolumedriver") + defer plugin.Close() + + // create a dummy volume to trigger lazy loading of the plugin + out, err = d.Cmd("volume", "create", "-d", "customvolumedriver", "hello") + + // TODO(aaronl): It will take about 15 seconds for swarm to realize the + // plugin was loaded. Switching the test over to plugin v2 would avoid + // this long delay. + + // make sure task has been deployed. + waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, 1) + + out, err = d.Cmd("ps", "-q") + c.Assert(err, checker.IsNil) + containerID := strings.TrimSpace(out) + + out, err = d.Cmd("inspect", "-f", "{{json .Mounts}}", containerID) + c.Assert(err, checker.IsNil) + + var mounts []struct { + Name string + Driver string + } + + c.Assert(json.NewDecoder(strings.NewReader(out)).Decode(&mounts), checker.IsNil) + c.Assert(len(mounts), checker.Equals, 1, check.Commentf(out)) + c.Assert(mounts[0].Name, checker.Equals, "my-volume") + c.Assert(mounts[0].Driver, checker.Equals, "customvolumedriver") +} + +// Test network plugin filter in swarm +func (s *DockerSwarmSuite) TestSwarmNetworkPluginV2(c *check.C) { + testRequires(c, IsAmd64) + d1 := s.AddDaemon(c, true, true) + d2 := s.AddDaemon(c, true, false) + + // install plugin on d1 and d2 + pluginName := "aragunathan/global-net-plugin:latest" + + _, err := d1.Cmd("plugin", "install", pluginName, "--grant-all-permissions") + c.Assert(err, checker.IsNil) + + _, err = d2.Cmd("plugin", "install", pluginName, "--grant-all-permissions") + c.Assert(err, checker.IsNil) + + // create network + networkName := "globalnet" + _, err = d1.Cmd("network", "create", "--driver", pluginName, networkName) + c.Assert(err, checker.IsNil) + + // create a global service to ensure that both nodes will have an instance + serviceName := "my-service" + _, err = d1.Cmd("service", "create", "--no-resolve-image", "--name", serviceName, "--mode=global", "--network", networkName, "busybox", "top") + c.Assert(err, checker.IsNil) + + // wait for tasks ready + waitAndAssert(c, defaultReconciliationTimeout, reducedCheck(sumAsIntegers, d1.CheckActiveContainerCount, d2.CheckActiveContainerCount), checker.Equals, 2) + + // remove service + _, err = d1.Cmd("service", "rm", serviceName) + c.Assert(err, checker.IsNil) + + // wait to ensure all containers have exited before removing the plugin. Else there's a + // possibility of container exits erroring out due to plugins being unavailable. + waitAndAssert(c, defaultReconciliationTimeout, reducedCheck(sumAsIntegers, d1.CheckActiveContainerCount, d2.CheckActiveContainerCount), checker.Equals, 0) + + // disable plugin on worker + _, err = d2.Cmd("plugin", "disable", "-f", pluginName) + c.Assert(err, checker.IsNil) + + time.Sleep(20 * time.Second) + + image := "busybox" + // create a new global service again. + _, err = d1.Cmd("service", "create", "--no-resolve-image", "--name", serviceName, "--mode=global", "--network", networkName, image, "top") + c.Assert(err, checker.IsNil) + + waitAndAssert(c, defaultReconciliationTimeout, d1.CheckRunningTaskImages, checker.DeepEquals, + map[string]int{image: 1}) +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_tag_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_tag_test.go new file mode 100644 index 000000000..907977fc0 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_tag_test.go @@ -0,0 +1,168 @@ +package main + +import ( + "fmt" + "strings" + + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/cli/build" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/pkg/stringutils" + "github.com/go-check/check" +) + +// tagging a named image in a new unprefixed repo should work +func (s *DockerSuite) TestTagUnprefixedRepoByName(c *check.C) { + dockerCmd(c, "tag", "busybox:latest", "testfoobarbaz") +} + +// tagging an image by ID in a new unprefixed repo should work +func (s *DockerSuite) TestTagUnprefixedRepoByID(c *check.C) { + imageID := inspectField(c, "busybox", "Id") + dockerCmd(c, "tag", imageID, "testfoobarbaz") +} + +// ensure we don't allow the use of invalid repository names; these tag operations should fail +func (s *DockerSuite) TestTagInvalidUnprefixedRepo(c *check.C) { + invalidRepos := []string{"fo$z$", "Foo@3cc", "Foo$3", "Foo*3", "Fo^3", "Foo!3", "F)xcz(", "fo%asd", "FOO/bar"} + + for _, repo := range invalidRepos { + out, _, err := dockerCmdWithError("tag", "busybox", repo) + c.Assert(err, checker.NotNil, check.Commentf("tag busybox %v should have failed : %v", repo, out)) + } +} + +// ensure we don't allow the use of invalid tags; these tag operations should fail +func (s *DockerSuite) TestTagInvalidPrefixedRepo(c *check.C) { + longTag := stringutils.GenerateRandomAlphaOnlyString(121) + + invalidTags := []string{"repo:fo$z$", "repo:Foo@3cc", "repo:Foo$3", "repo:Foo*3", "repo:Fo^3", "repo:Foo!3", "repo:%goodbye", "repo:#hashtagit", "repo:F)xcz(", "repo:-foo", "repo:..", longTag} + + for _, repotag := range invalidTags { + out, _, err := dockerCmdWithError("tag", "busybox", repotag) + c.Assert(err, checker.NotNil, check.Commentf("tag busybox %v should have failed : %v", repotag, out)) + } +} + +// ensure we allow the use of valid tags +func (s *DockerSuite) TestTagValidPrefixedRepo(c *check.C) { + validRepos := []string{"fooo/bar", "fooaa/test", "foooo:t", "HOSTNAME.DOMAIN.COM:443/foo/bar"} + + for _, repo := range validRepos { + _, _, err := dockerCmdWithError("tag", "busybox:latest", repo) + if err != nil { + c.Errorf("tag busybox %v should have worked: %s", repo, err) + continue + } + deleteImages(repo) + } +} + +// tag an image with an existed tag name without -f option should work +func (s *DockerSuite) TestTagExistedNameWithoutForce(c *check.C) { + dockerCmd(c, "tag", "busybox:latest", "busybox:test") +} + +func (s *DockerSuite) TestTagWithPrefixHyphen(c *check.C) { + // test repository name begin with '-' + out, _, err := dockerCmdWithError("tag", "busybox:latest", "-busybox:test") + c.Assert(err, checker.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "Error parsing reference", check.Commentf("tag a name begin with '-' should failed")) + + // test namespace name begin with '-' + out, _, err = dockerCmdWithError("tag", "busybox:latest", "-test/busybox:test") + c.Assert(err, checker.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "Error parsing reference", check.Commentf("tag a name begin with '-' should failed")) + + // test index name begin with '-' + out, _, err = dockerCmdWithError("tag", "busybox:latest", "-index:5000/busybox:test") + c.Assert(err, checker.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "Error parsing reference", check.Commentf("tag a name begin with '-' should failed")) +} + +// ensure tagging using official names works +// ensure all tags result in the same name +func (s *DockerSuite) TestTagOfficialNames(c *check.C) { + names := []string{ + "docker.io/busybox", + "index.docker.io/busybox", + "library/busybox", + "docker.io/library/busybox", + "index.docker.io/library/busybox", + } + + for _, name := range names { + out, exitCode, err := dockerCmdWithError("tag", "busybox:latest", name+":latest") + if err != nil || exitCode != 0 { + c.Errorf("tag busybox %v should have worked: %s, %s", name, err, out) + continue + } + + // ensure we don't have multiple tag names. + out, _, err = dockerCmdWithError("images") + if err != nil { + c.Errorf("listing images failed with errors: %v, %s", err, out) + } else if strings.Contains(out, name) { + c.Errorf("images should not have listed '%s'", name) + deleteImages(name + ":latest") + } + } + + for _, name := range names { + _, exitCode, err := dockerCmdWithError("tag", name+":latest", "fooo/bar:latest") + if err != nil || exitCode != 0 { + c.Errorf("tag %v fooo/bar should have worked: %s", name, err) + continue + } + deleteImages("fooo/bar:latest") + } +} + +// ensure tags can not match digests +func (s *DockerSuite) TestTagMatchesDigest(c *check.C) { + digest := "busybox@sha256:abcdef76720241213f5303bda7704ec4c2ef75613173910a56fb1b6e20251507" + // test setting tag fails + _, _, err := dockerCmdWithError("tag", "busybox:latest", digest) + if err == nil { + c.Fatal("digest tag a name should have failed") + } + // check that no new image matches the digest + _, _, err = dockerCmdWithError("inspect", digest) + if err == nil { + c.Fatal("inspecting by digest should have failed") + } +} + +func (s *DockerSuite) TestTagInvalidRepoName(c *check.C) { + // test setting tag fails + _, _, err := dockerCmdWithError("tag", "busybox:latest", "sha256:sometag") + if err == nil { + c.Fatal("tagging with image named \"sha256\" should have failed") + } +} + +// ensure tags cannot create ambiguity with image ids +func (s *DockerSuite) TestTagTruncationAmbiguity(c *check.C) { + buildImageSuccessfully(c, "notbusybox:latest", build.WithDockerfile(`FROM busybox + MAINTAINER dockerio`)) + imageID := getIDByName(c, "notbusybox:latest") + truncatedImageID := stringid.TruncateID(imageID) + truncatedTag := fmt.Sprintf("notbusybox:%s", truncatedImageID) + + id := inspectField(c, truncatedTag, "Id") + + // Ensure inspect by image id returns image for image id + c.Assert(id, checker.Equals, imageID) + c.Logf("Built image: %s", imageID) + + // test setting tag fails + _, _, err := dockerCmdWithError("tag", "busybox:latest", truncatedTag) + if err != nil { + c.Fatalf("Error tagging with an image id: %s", err) + } + + id = inspectField(c, truncatedTag, "Id") + + // Ensure id is imageID and not busybox:latest + c.Assert(id, checker.Not(checker.Equals), imageID) +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_top_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_top_test.go new file mode 100644 index 000000000..ea32fc672 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_top_test.go @@ -0,0 +1,73 @@ +package main + +import ( + "strings" + + "github.com/docker/docker/integration-cli/checker" + icmd "github.com/docker/docker/pkg/testutil/cmd" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestTopMultipleArgs(c *check.C) { + out := runSleepingContainer(c, "-d") + cleanedContainerID := strings.TrimSpace(out) + + var expected icmd.Expected + switch testEnv.DaemonPlatform() { + case "windows": + expected = icmd.Expected{ExitCode: 1, Err: "Windows does not support arguments to top"} + default: + expected = icmd.Expected{Out: "PID"} + } + result := dockerCmdWithResult("top", cleanedContainerID, "-o", "pid") + c.Assert(result, icmd.Matches, expected) +} + +func (s *DockerSuite) TestTopNonPrivileged(c *check.C) { + out := runSleepingContainer(c, "-d") + cleanedContainerID := strings.TrimSpace(out) + + out1, _ := dockerCmd(c, "top", cleanedContainerID) + out2, _ := dockerCmd(c, "top", cleanedContainerID) + dockerCmd(c, "kill", cleanedContainerID) + + // Windows will list the name of the launched executable which in this case is busybox.exe, without the parameters. + // Linux will display the command executed in the container + var lookingFor string + if testEnv.DaemonPlatform() == "windows" { + lookingFor = "busybox.exe" + } else { + lookingFor = "top" + } + + c.Assert(out1, checker.Contains, lookingFor, check.Commentf("top should've listed `%s` in the process list, but failed the first time", lookingFor)) + c.Assert(out2, checker.Contains, lookingFor, check.Commentf("top should've listed `%s` in the process list, but failed the second time", lookingFor)) +} + +// TestTopWindowsCoreProcesses validates that there are lines for the critical +// processes which are found in a Windows container. Note Windows is architecturally +// very different to Linux in this regard. +func (s *DockerSuite) TestTopWindowsCoreProcesses(c *check.C) { + testRequires(c, DaemonIsWindows) + out := runSleepingContainer(c, "-d") + cleanedContainerID := strings.TrimSpace(out) + out1, _ := dockerCmd(c, "top", cleanedContainerID) + lookingFor := []string{"smss.exe", "csrss.exe", "wininit.exe", "services.exe", "lsass.exe", "CExecSvc.exe"} + for i, s := range lookingFor { + c.Assert(out1, checker.Contains, s, check.Commentf("top should've listed `%s` in the process list, but failed. Test case %d", s, i)) + } +} + +func (s *DockerSuite) TestTopPrivileged(c *check.C) { + // Windows does not support --privileged + testRequires(c, DaemonIsLinux, NotUserNamespace) + out, _ := dockerCmd(c, "run", "--privileged", "-i", "-d", "busybox", "top") + cleanedContainerID := strings.TrimSpace(out) + + out1, _ := dockerCmd(c, "top", cleanedContainerID) + out2, _ := dockerCmd(c, "top", cleanedContainerID) + dockerCmd(c, "kill", cleanedContainerID) + + c.Assert(out1, checker.Contains, "top", check.Commentf("top should've listed `top` in the process list, but failed the first time")) + c.Assert(out2, checker.Contains, "top", check.Commentf("top should've listed `top` in the process list, but failed the second time")) +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_update_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_update_test.go new file mode 100644 index 000000000..c898690c5 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_update_test.go @@ -0,0 +1,43 @@ +package main + +import ( + "strings" + "time" + + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/cli" + icmd "github.com/docker/docker/pkg/testutil/cmd" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestUpdateRestartPolicy(c *check.C) { + out := cli.DockerCmd(c, "run", "-d", "--restart=on-failure:3", "busybox", "sh", "-c", "sleep 1 && false").Combined() + timeout := 60 * time.Second + if testEnv.DaemonPlatform() == "windows" { + timeout = 180 * time.Second + } + + id := strings.TrimSpace(string(out)) + + // update restart policy to on-failure:5 + cli.DockerCmd(c, "update", "--restart=on-failure:5", id) + + cli.WaitExited(c, id, timeout) + + count := inspectField(c, id, "RestartCount") + c.Assert(count, checker.Equals, "5") + + maximumRetryCount := inspectField(c, id, "HostConfig.RestartPolicy.MaximumRetryCount") + c.Assert(maximumRetryCount, checker.Equals, "5") +} + +func (s *DockerSuite) TestUpdateRestartWithAutoRemoveFlag(c *check.C) { + out := runSleepingContainer(c, "--rm") + id := strings.TrimSpace(out) + + // update restart policy for an AutoRemove container + cli.Docker(cli.Args("update", "--restart=always", id)).Assert(c, icmd.Expected{ + ExitCode: 1, + Err: "Restart policy cannot be updated because AutoRemove is enabled for the container", + }) +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_update_unix_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_update_unix_test.go new file mode 100644 index 000000000..be2274bb3 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_update_unix_test.go @@ -0,0 +1,319 @@ +// +build !windows + +package main + +import ( + "encoding/json" + "fmt" + "os/exec" + "strings" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/request" + "github.com/docker/docker/pkg/parsers/kernel" + "github.com/go-check/check" + "github.com/kr/pty" +) + +func (s *DockerSuite) TestUpdateRunningContainer(c *check.C) { + testRequires(c, DaemonIsLinux) + testRequires(c, memoryLimitSupport) + + name := "test-update-container" + dockerCmd(c, "run", "-d", "--name", name, "-m", "300M", "busybox", "top") + dockerCmd(c, "update", "-m", "500M", name) + + c.Assert(inspectField(c, name, "HostConfig.Memory"), checker.Equals, "524288000") + + file := "/sys/fs/cgroup/memory/memory.limit_in_bytes" + out, _ := dockerCmd(c, "exec", name, "cat", file) + c.Assert(strings.TrimSpace(out), checker.Equals, "524288000") +} + +func (s *DockerSuite) TestUpdateRunningContainerWithRestart(c *check.C) { + testRequires(c, DaemonIsLinux) + testRequires(c, memoryLimitSupport) + + name := "test-update-container" + dockerCmd(c, "run", "-d", "--name", name, "-m", "300M", "busybox", "top") + dockerCmd(c, "update", "-m", "500M", name) + dockerCmd(c, "restart", name) + + c.Assert(inspectField(c, name, "HostConfig.Memory"), checker.Equals, "524288000") + + file := "/sys/fs/cgroup/memory/memory.limit_in_bytes" + out, _ := dockerCmd(c, "exec", name, "cat", file) + c.Assert(strings.TrimSpace(out), checker.Equals, "524288000") +} + +func (s *DockerSuite) TestUpdateStoppedContainer(c *check.C) { + testRequires(c, DaemonIsLinux) + testRequires(c, memoryLimitSupport) + + name := "test-update-container" + file := "/sys/fs/cgroup/memory/memory.limit_in_bytes" + dockerCmd(c, "run", "--name", name, "-m", "300M", "busybox", "cat", file) + dockerCmd(c, "update", "-m", "500M", name) + + c.Assert(inspectField(c, name, "HostConfig.Memory"), checker.Equals, "524288000") + + out, _ := dockerCmd(c, "start", "-a", name) + c.Assert(strings.TrimSpace(out), checker.Equals, "524288000") +} + +func (s *DockerSuite) TestUpdatePausedContainer(c *check.C) { + testRequires(c, DaemonIsLinux) + testRequires(c, cpuShare) + + name := "test-update-container" + dockerCmd(c, "run", "-d", "--name", name, "--cpu-shares", "1000", "busybox", "top") + dockerCmd(c, "pause", name) + dockerCmd(c, "update", "--cpu-shares", "500", name) + + c.Assert(inspectField(c, name, "HostConfig.CPUShares"), checker.Equals, "500") + + dockerCmd(c, "unpause", name) + file := "/sys/fs/cgroup/cpu/cpu.shares" + out, _ := dockerCmd(c, "exec", name, "cat", file) + c.Assert(strings.TrimSpace(out), checker.Equals, "500") +} + +func (s *DockerSuite) TestUpdateWithUntouchedFields(c *check.C) { + testRequires(c, DaemonIsLinux) + testRequires(c, memoryLimitSupport) + testRequires(c, cpuShare) + + name := "test-update-container" + dockerCmd(c, "run", "-d", "--name", name, "-m", "300M", "--cpu-shares", "800", "busybox", "top") + dockerCmd(c, "update", "-m", "500M", name) + + // Update memory and not touch cpus, `cpuset.cpus` should still have the old value + out := inspectField(c, name, "HostConfig.CPUShares") + c.Assert(out, check.Equals, "800") + + file := "/sys/fs/cgroup/cpu/cpu.shares" + out, _ = dockerCmd(c, "exec", name, "cat", file) + c.Assert(strings.TrimSpace(out), checker.Equals, "800") +} + +func (s *DockerSuite) TestUpdateContainerInvalidValue(c *check.C) { + testRequires(c, DaemonIsLinux) + testRequires(c, memoryLimitSupport) + + name := "test-update-container" + dockerCmd(c, "run", "-d", "--name", name, "-m", "300M", "busybox", "true") + out, _, err := dockerCmdWithError("update", "-m", "2M", name) + c.Assert(err, check.NotNil) + expected := "Minimum memory limit allowed is 4MB" + c.Assert(out, checker.Contains, expected) +} + +func (s *DockerSuite) TestUpdateContainerWithoutFlags(c *check.C) { + testRequires(c, DaemonIsLinux) + testRequires(c, memoryLimitSupport) + + name := "test-update-container" + dockerCmd(c, "run", "-d", "--name", name, "-m", "300M", "busybox", "true") + _, _, err := dockerCmdWithError("update", name) + c.Assert(err, check.NotNil) +} + +func (s *DockerSuite) TestUpdateKernelMemory(c *check.C) { + testRequires(c, DaemonIsLinux, kernelMemorySupport) + + name := "test-update-container" + dockerCmd(c, "run", "-d", "--name", name, "--kernel-memory", "50M", "busybox", "top") + dockerCmd(c, "update", "--kernel-memory", "100M", name) + + c.Assert(inspectField(c, name, "HostConfig.KernelMemory"), checker.Equals, "104857600") + + file := "/sys/fs/cgroup/memory/memory.kmem.limit_in_bytes" + out, _ := dockerCmd(c, "exec", name, "cat", file) + c.Assert(strings.TrimSpace(out), checker.Equals, "104857600") +} + +func (s *DockerSuite) TestUpdateKernelMemoryUninitialized(c *check.C) { + testRequires(c, DaemonIsLinux, kernelMemorySupport) + + isNewKernel := kernel.CheckKernelVersion(4, 6, 0) + name := "test-update-container" + dockerCmd(c, "run", "-d", "--name", name, "busybox", "top") + _, _, err := dockerCmdWithError("update", "--kernel-memory", "100M", name) + // Update kernel memory to a running container without kernel memory initialized + // is not allowed before kernel version 4.6. + if !isNewKernel { + c.Assert(err, check.NotNil) + } else { + c.Assert(err, check.IsNil) + } + + dockerCmd(c, "pause", name) + _, _, err = dockerCmdWithError("update", "--kernel-memory", "200M", name) + if !isNewKernel { + c.Assert(err, check.NotNil) + } else { + c.Assert(err, check.IsNil) + } + dockerCmd(c, "unpause", name) + + dockerCmd(c, "stop", name) + dockerCmd(c, "update", "--kernel-memory", "300M", name) + dockerCmd(c, "start", name) + + c.Assert(inspectField(c, name, "HostConfig.KernelMemory"), checker.Equals, "314572800") + + file := "/sys/fs/cgroup/memory/memory.kmem.limit_in_bytes" + out, _ := dockerCmd(c, "exec", name, "cat", file) + c.Assert(strings.TrimSpace(out), checker.Equals, "314572800") +} + +func (s *DockerSuite) TestUpdateSwapMemoryOnly(c *check.C) { + testRequires(c, DaemonIsLinux) + testRequires(c, memoryLimitSupport) + testRequires(c, swapMemorySupport) + + name := "test-update-container" + dockerCmd(c, "run", "-d", "--name", name, "--memory", "300M", "--memory-swap", "500M", "busybox", "top") + dockerCmd(c, "update", "--memory-swap", "600M", name) + + c.Assert(inspectField(c, name, "HostConfig.MemorySwap"), checker.Equals, "629145600") + + file := "/sys/fs/cgroup/memory/memory.memsw.limit_in_bytes" + out, _ := dockerCmd(c, "exec", name, "cat", file) + c.Assert(strings.TrimSpace(out), checker.Equals, "629145600") +} + +func (s *DockerSuite) TestUpdateInvalidSwapMemory(c *check.C) { + testRequires(c, DaemonIsLinux) + testRequires(c, memoryLimitSupport) + testRequires(c, swapMemorySupport) + + name := "test-update-container" + dockerCmd(c, "run", "-d", "--name", name, "--memory", "300M", "--memory-swap", "500M", "busybox", "top") + _, _, err := dockerCmdWithError("update", "--memory-swap", "200M", name) + // Update invalid swap memory should fail. + // This will pass docker config validation, but failed at kernel validation + c.Assert(err, check.NotNil) + + // Update invalid swap memory with failure should not change HostConfig + c.Assert(inspectField(c, name, "HostConfig.Memory"), checker.Equals, "314572800") + c.Assert(inspectField(c, name, "HostConfig.MemorySwap"), checker.Equals, "524288000") + + dockerCmd(c, "update", "--memory-swap", "600M", name) + + c.Assert(inspectField(c, name, "HostConfig.MemorySwap"), checker.Equals, "629145600") + + file := "/sys/fs/cgroup/memory/memory.memsw.limit_in_bytes" + out, _ := dockerCmd(c, "exec", name, "cat", file) + c.Assert(strings.TrimSpace(out), checker.Equals, "629145600") +} + +func (s *DockerSuite) TestUpdateStats(c *check.C) { + testRequires(c, DaemonIsLinux) + testRequires(c, memoryLimitSupport) + testRequires(c, cpuCfsQuota) + name := "foo" + dockerCmd(c, "run", "-d", "-ti", "--name", name, "-m", "500m", "busybox") + + c.Assert(waitRun(name), checker.IsNil) + + getMemLimit := func(id string) uint64 { + resp, body, err := request.Get(fmt.Sprintf("/containers/%s/stats?stream=false", id)) + c.Assert(err, checker.IsNil) + c.Assert(resp.Header.Get("Content-Type"), checker.Equals, "application/json") + + var v *types.Stats + err = json.NewDecoder(body).Decode(&v) + c.Assert(err, checker.IsNil) + body.Close() + + return v.MemoryStats.Limit + } + preMemLimit := getMemLimit(name) + + dockerCmd(c, "update", "--cpu-quota", "2000", name) + + curMemLimit := getMemLimit(name) + + c.Assert(preMemLimit, checker.Equals, curMemLimit) + +} + +func (s *DockerSuite) TestUpdateMemoryWithSwapMemory(c *check.C) { + testRequires(c, DaemonIsLinux) + testRequires(c, memoryLimitSupport) + testRequires(c, swapMemorySupport) + + name := "test-update-container" + dockerCmd(c, "run", "-d", "--name", name, "--memory", "300M", "busybox", "top") + out, _, err := dockerCmdWithError("update", "--memory", "800M", name) + c.Assert(err, checker.NotNil) + c.Assert(out, checker.Contains, "Memory limit should be smaller than already set memoryswap limit") + + dockerCmd(c, "update", "--memory", "800M", "--memory-swap", "1000M", name) +} + +func (s *DockerSuite) TestUpdateNotAffectMonitorRestartPolicy(c *check.C) { + testRequires(c, DaemonIsLinux, cpuShare) + + out, _ := dockerCmd(c, "run", "-tid", "--restart=always", "busybox", "sh") + id := strings.TrimSpace(string(out)) + dockerCmd(c, "update", "--cpu-shares", "512", id) + + cpty, tty, err := pty.Open() + c.Assert(err, checker.IsNil) + defer cpty.Close() + + cmd := exec.Command(dockerBinary, "attach", id) + cmd.Stdin = tty + + c.Assert(cmd.Start(), checker.IsNil) + defer cmd.Process.Kill() + + _, err = cpty.Write([]byte("exit\n")) + c.Assert(err, checker.IsNil) + + c.Assert(cmd.Wait(), checker.IsNil) + + // container should restart again and keep running + err = waitInspect(id, "{{.RestartCount}}", "1", 30*time.Second) + c.Assert(err, checker.IsNil) + c.Assert(waitRun(id), checker.IsNil) +} + +func (s *DockerSuite) TestUpdateWithNanoCPUs(c *check.C) { + testRequires(c, cpuCfsQuota, cpuCfsPeriod) + + file1 := "/sys/fs/cgroup/cpu/cpu.cfs_quota_us" + file2 := "/sys/fs/cgroup/cpu/cpu.cfs_period_us" + + out, _ := dockerCmd(c, "run", "-d", "--cpus", "0.5", "--name", "top", "busybox", "top") + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") + + out, _ = dockerCmd(c, "exec", "top", "sh", "-c", fmt.Sprintf("cat %s && cat %s", file1, file2)) + c.Assert(strings.TrimSpace(out), checker.Equals, "50000\n100000") + + out = inspectField(c, "top", "HostConfig.NanoCpus") + c.Assert(out, checker.Equals, "5e+08", check.Commentf("setting the Nano CPUs failed")) + out = inspectField(c, "top", "HostConfig.CpuQuota") + c.Assert(out, checker.Equals, "0", check.Commentf("CPU CFS quota should be 0")) + out = inspectField(c, "top", "HostConfig.CpuPeriod") + c.Assert(out, checker.Equals, "0", check.Commentf("CPU CFS period should be 0")) + + out, _, err := dockerCmdWithError("update", "--cpu-quota", "80000", "top") + c.Assert(err, checker.NotNil) + c.Assert(out, checker.Contains, "Conflicting options: CPU Quota cannot be updated as NanoCPUs has already been set") + + out, _ = dockerCmd(c, "update", "--cpus", "0.8", "top") + out = inspectField(c, "top", "HostConfig.NanoCpus") + c.Assert(out, checker.Equals, "8e+08", check.Commentf("updating the Nano CPUs failed")) + out = inspectField(c, "top", "HostConfig.CpuQuota") + c.Assert(out, checker.Equals, "0", check.Commentf("CPU CFS quota should be 0")) + out = inspectField(c, "top", "HostConfig.CpuPeriod") + c.Assert(out, checker.Equals, "0", check.Commentf("CPU CFS period should be 0")) + + out, _ = dockerCmd(c, "exec", "top", "sh", "-c", fmt.Sprintf("cat %s && cat %s", file1, file2)) + c.Assert(strings.TrimSpace(out), checker.Equals, "80000\n100000") +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_userns_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_userns_test.go new file mode 100644 index 000000000..8311401d0 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_userns_test.go @@ -0,0 +1,99 @@ +// +build !windows + +package main + +import ( + "fmt" + "io/ioutil" + "os" + "os/exec" + "path" + "path/filepath" + "strconv" + "strings" + + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/pkg/system" + "github.com/docker/docker/pkg/testutil" + "github.com/go-check/check" +) + +// user namespaces test: run daemon with remapped root setting +// 1. validate uid/gid maps are set properly +// 2. verify that files created are owned by remapped root +func (s *DockerDaemonSuite) TestDaemonUserNamespaceRootSetting(c *check.C) { + testRequires(c, DaemonIsLinux, SameHostDaemon, UserNamespaceInKernel) + + s.d.StartWithBusybox(c, "--userns-remap", "default") + + tmpDir, err := ioutil.TempDir("", "userns") + c.Assert(err, checker.IsNil) + + defer os.RemoveAll(tmpDir) + + // Set a non-existent path + tmpDirNotExists := path.Join(os.TempDir(), "userns"+stringid.GenerateRandomID()) + defer os.RemoveAll(tmpDirNotExists) + + // we need to find the uid and gid of the remapped root from the daemon's root dir info + uidgid := strings.Split(filepath.Base(s.d.Root), ".") + c.Assert(uidgid, checker.HasLen, 2, check.Commentf("Should have gotten uid/gid strings from root dirname: %s", filepath.Base(s.d.Root))) + uid, err := strconv.Atoi(uidgid[0]) + c.Assert(err, checker.IsNil, check.Commentf("Can't parse uid")) + gid, err := strconv.Atoi(uidgid[1]) + c.Assert(err, checker.IsNil, check.Commentf("Can't parse gid")) + + // writable by the remapped root UID/GID pair + c.Assert(os.Chown(tmpDir, uid, gid), checker.IsNil) + + out, err := s.d.Cmd("run", "-d", "--name", "userns", "-v", tmpDir+":/goofy", "-v", tmpDirNotExists+":/donald", "busybox", "sh", "-c", "touch /goofy/testfile; top") + c.Assert(err, checker.IsNil, check.Commentf("Output: %s", out)) + user := s.findUser(c, "userns") + c.Assert(uidgid[0], checker.Equals, user) + + // check that the created directory is owned by remapped uid:gid + statNotExists, err := system.Stat(tmpDirNotExists) + c.Assert(err, checker.IsNil) + c.Assert(statNotExists.UID(), checker.Equals, uint32(uid), check.Commentf("Created directory not owned by remapped root UID")) + c.Assert(statNotExists.GID(), checker.Equals, uint32(gid), check.Commentf("Created directory not owned by remapped root GID")) + + pid, err := s.d.Cmd("inspect", "--format={{.State.Pid}}", "userns") + c.Assert(err, checker.IsNil, check.Commentf("Could not inspect running container: out: %q", pid)) + // check the uid and gid maps for the PID to ensure root is remapped + // (cmd = cat /proc//uid_map | grep -E '0\s+9999\s+1') + out, rc1, err := testutil.RunCommandPipelineWithOutput( + exec.Command("cat", "/proc/"+strings.TrimSpace(pid)+"/uid_map"), + exec.Command("grep", "-E", fmt.Sprintf("0[[:space:]]+%d[[:space:]]+", uid))) + c.Assert(rc1, checker.Equals, 0, check.Commentf("Didn't match uid_map: output: %s", out)) + + out, rc2, err := testutil.RunCommandPipelineWithOutput( + exec.Command("cat", "/proc/"+strings.TrimSpace(pid)+"/gid_map"), + exec.Command("grep", "-E", fmt.Sprintf("0[[:space:]]+%d[[:space:]]+", gid))) + c.Assert(rc2, checker.Equals, 0, check.Commentf("Didn't match gid_map: output: %s", out)) + + // check that the touched file is owned by remapped uid:gid + stat, err := system.Stat(filepath.Join(tmpDir, "testfile")) + c.Assert(err, checker.IsNil) + c.Assert(stat.UID(), checker.Equals, uint32(uid), check.Commentf("Touched file not owned by remapped root UID")) + c.Assert(stat.GID(), checker.Equals, uint32(gid), check.Commentf("Touched file not owned by remapped root GID")) + + // use host usernamespace + out, err = s.d.Cmd("run", "-d", "--name", "userns_skip", "--userns", "host", "busybox", "sh", "-c", "touch /goofy/testfile; top") + c.Assert(err, checker.IsNil, check.Commentf("Output: %s", out)) + user = s.findUser(c, "userns_skip") + // userns are skipped, user is root + c.Assert(user, checker.Equals, "root") +} + +// findUser finds the uid or name of the user of the first process that runs in a container +func (s *DockerDaemonSuite) findUser(c *check.C, container string) string { + out, err := s.d.Cmd("top", container) + c.Assert(err, checker.IsNil, check.Commentf("Output: %s", out)) + rows := strings.Split(out, "\n") + if len(rows) < 2 { + // No process rows founds + c.FailNow() + } + return strings.Fields(rows[1])[0] +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_v2_only_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_v2_only_test.go new file mode 100644 index 000000000..b82cdbde1 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_v2_only_test.go @@ -0,0 +1,120 @@ +package main + +import ( + "fmt" + "io/ioutil" + "net/http" + "os" + + "github.com/docker/docker/integration-cli/registry" + "github.com/go-check/check" +) + +func makefile(path string, contents string) (string, error) { + f, err := ioutil.TempFile(path, "tmp") + if err != nil { + return "", err + } + err = ioutil.WriteFile(f.Name(), []byte(contents), os.ModePerm) + if err != nil { + return "", err + } + return f.Name(), nil +} + +// TestV2Only ensures that a daemon by default does not +// attempt to contact any v1 registry endpoints. +func (s *DockerRegistrySuite) TestV2Only(c *check.C) { + reg, err := registry.NewMock(c) + defer reg.Close() + c.Assert(err, check.IsNil) + + reg.RegisterHandler("/v2/", func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(404) + }) + + reg.RegisterHandler("/v1/.*", func(w http.ResponseWriter, r *http.Request) { + c.Fatal("V1 registry contacted") + }) + + repoName := fmt.Sprintf("%s/busybox", reg.URL()) + + s.d.Start(c, "--insecure-registry", reg.URL()) + + tmp, err := ioutil.TempDir("", "integration-cli-") + c.Assert(err, check.IsNil) + defer os.RemoveAll(tmp) + + dockerfileName, err := makefile(tmp, fmt.Sprintf("FROM %s/busybox", reg.URL())) + c.Assert(err, check.IsNil, check.Commentf("Unable to create test dockerfile")) + + s.d.Cmd("build", "--file", dockerfileName, tmp) + + s.d.Cmd("run", repoName) + s.d.Cmd("login", "-u", "richard", "-p", "testtest", reg.URL()) + s.d.Cmd("tag", "busybox", repoName) + s.d.Cmd("push", repoName) + s.d.Cmd("pull", repoName) +} + +// TestV1 starts a daemon with legacy registries enabled +// and ensure v1 endpoints are hit for the following operations: +// login, push, pull, build & run +func (s *DockerRegistrySuite) TestV1(c *check.C) { + reg, err := registry.NewMock(c) + defer reg.Close() + c.Assert(err, check.IsNil) + + v2Pings := 0 + reg.RegisterHandler("/v2/", func(w http.ResponseWriter, r *http.Request) { + v2Pings++ + // V2 ping 404 causes fallback to v1 + w.WriteHeader(404) + }) + + v1Pings := 0 + reg.RegisterHandler("/v1/_ping", func(w http.ResponseWriter, r *http.Request) { + v1Pings++ + }) + + v1Logins := 0 + reg.RegisterHandler("/v1/users/", func(w http.ResponseWriter, r *http.Request) { + v1Logins++ + }) + + v1Repo := 0 + reg.RegisterHandler("/v1/repositories/busybox/", func(w http.ResponseWriter, r *http.Request) { + v1Repo++ + }) + + reg.RegisterHandler("/v1/repositories/busybox/images", func(w http.ResponseWriter, r *http.Request) { + v1Repo++ + }) + + s.d.Start(c, "--insecure-registry", reg.URL(), "--disable-legacy-registry=false") + + tmp, err := ioutil.TempDir("", "integration-cli-") + c.Assert(err, check.IsNil) + defer os.RemoveAll(tmp) + + dockerfileName, err := makefile(tmp, fmt.Sprintf("FROM %s/busybox", reg.URL())) + c.Assert(err, check.IsNil, check.Commentf("Unable to create test dockerfile")) + + s.d.Cmd("build", "--file", dockerfileName, tmp) + c.Assert(v1Repo, check.Equals, 1, check.Commentf("Expected v1 repository access after build")) + + repoName := fmt.Sprintf("%s/busybox", reg.URL()) + s.d.Cmd("run", repoName) + c.Assert(v1Repo, check.Equals, 2, check.Commentf("Expected v1 repository access after run")) + + s.d.Cmd("login", "-u", "richard", "-p", "testtest", reg.URL()) + c.Assert(v1Logins, check.Equals, 1, check.Commentf("Expected v1 login attempt")) + + s.d.Cmd("tag", "busybox", repoName) + s.d.Cmd("push", repoName) + + c.Assert(v1Repo, check.Equals, 2) + + s.d.Cmd("pull", repoName) + c.Assert(v1Repo, check.Equals, 3, check.Commentf("Expected v1 repository access after pull")) +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_version_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_version_test.go new file mode 100644 index 000000000..074a7db47 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_version_test.go @@ -0,0 +1,58 @@ +package main + +import ( + "strings" + + "github.com/docker/docker/integration-cli/checker" + "github.com/go-check/check" +) + +// ensure docker version works +func (s *DockerSuite) TestVersionEnsureSucceeds(c *check.C) { + out, _ := dockerCmd(c, "version") + stringsToCheck := map[string]int{ + "Client:": 1, + "Server:": 1, + " Version:": 2, + " API version:": 2, + " Go version:": 2, + " Git commit:": 2, + " OS/Arch:": 2, + " Built:": 2, + } + + for k, v := range stringsToCheck { + c.Assert(strings.Count(out, k), checker.Equals, v, check.Commentf("The count of %v in %s does not match excepted", k, out)) + } +} + +// ensure the Windows daemon return the correct platform string +func (s *DockerSuite) TestVersionPlatform_w(c *check.C) { + testRequires(c, DaemonIsWindows) + testVersionPlatform(c, "windows/amd64") +} + +// ensure the Linux daemon return the correct platform string +func (s *DockerSuite) TestVersionPlatform_l(c *check.C) { + testRequires(c, DaemonIsLinux) + testVersionPlatform(c, "linux") +} + +func testVersionPlatform(c *check.C, platform string) { + out, _ := dockerCmd(c, "version") + expected := "OS/Arch: " + platform + + split := strings.Split(out, "\n") + c.Assert(len(split) >= 14, checker.Equals, true, check.Commentf("got %d lines from version", len(split))) + + // Verify the second 'OS/Arch' matches the platform. Experimental has + // more lines of output than 'regular' + bFound := false + for i := 14; i < len(split); i++ { + if strings.Contains(split[i], expected) { + bFound = true + break + } + } + c.Assert(bFound, checker.Equals, true, check.Commentf("Could not find server '%s' in '%s'", expected, out)) +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_volume_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_volume_test.go new file mode 100644 index 000000000..e0bf7cafe --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_volume_test.go @@ -0,0 +1,643 @@ +package main + +import ( + "fmt" + "io/ioutil" + "net/http" + "os" + "os/exec" + "path/filepath" + "strings" + + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/cli/build" + "github.com/docker/docker/integration-cli/request" + icmd "github.com/docker/docker/pkg/testutil/cmd" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestVolumeCLICreate(c *check.C) { + dockerCmd(c, "volume", "create") + + _, _, err := dockerCmdWithError("volume", "create", "-d", "nosuchdriver") + c.Assert(err, check.NotNil) + + // test using hidden --name option + out, _ := dockerCmd(c, "volume", "create", "--name=test") + name := strings.TrimSpace(out) + c.Assert(name, check.Equals, "test") + + out, _ = dockerCmd(c, "volume", "create", "test2") + name = strings.TrimSpace(out) + c.Assert(name, check.Equals, "test2") +} + +func (s *DockerSuite) TestVolumeCLIInspect(c *check.C) { + c.Assert( + exec.Command(dockerBinary, "volume", "inspect", "doesnotexist").Run(), + check.Not(check.IsNil), + check.Commentf("volume inspect should error on non-existent volume"), + ) + + out, _ := dockerCmd(c, "volume", "create") + name := strings.TrimSpace(out) + out, _ = dockerCmd(c, "volume", "inspect", "--format={{ .Name }}", name) + c.Assert(strings.TrimSpace(out), check.Equals, name) + + dockerCmd(c, "volume", "create", "test") + out, _ = dockerCmd(c, "volume", "inspect", "--format={{ .Name }}", "test") + c.Assert(strings.TrimSpace(out), check.Equals, "test") +} + +func (s *DockerSuite) TestVolumeCLIInspectMulti(c *check.C) { + dockerCmd(c, "volume", "create", "test1") + dockerCmd(c, "volume", "create", "test2") + dockerCmd(c, "volume", "create", "test3") + + result := dockerCmdWithResult("volume", "inspect", "--format={{ .Name }}", "test1", "test2", "doesnotexist", "test3") + c.Assert(result, icmd.Matches, icmd.Expected{ + ExitCode: 1, + Err: "No such volume: doesnotexist", + }) + + out := result.Stdout() + outArr := strings.Split(strings.TrimSpace(out), "\n") + c.Assert(len(outArr), check.Equals, 3, check.Commentf("\n%s", out)) + + c.Assert(out, checker.Contains, "test1") + c.Assert(out, checker.Contains, "test2") + c.Assert(out, checker.Contains, "test3") +} + +func (s *DockerSuite) TestVolumeCLILs(c *check.C) { + prefix, _ := getPrefixAndSlashFromDaemonPlatform() + dockerCmd(c, "volume", "create", "aaa") + + dockerCmd(c, "volume", "create", "test") + + dockerCmd(c, "volume", "create", "soo") + dockerCmd(c, "run", "-v", "soo:"+prefix+"/foo", "busybox", "ls", "/") + + out, _ := dockerCmd(c, "volume", "ls") + outArr := strings.Split(strings.TrimSpace(out), "\n") + c.Assert(len(outArr), check.Equals, 4, check.Commentf("\n%s", out)) + + assertVolList(c, out, []string{"aaa", "soo", "test"}) +} + +func (s *DockerSuite) TestVolumeLsFormat(c *check.C) { + dockerCmd(c, "volume", "create", "aaa") + dockerCmd(c, "volume", "create", "test") + dockerCmd(c, "volume", "create", "soo") + + out, _ := dockerCmd(c, "volume", "ls", "--format", "{{.Name}}") + lines := strings.Split(strings.TrimSpace(string(out)), "\n") + + expected := []string{"aaa", "soo", "test"} + var names []string + names = append(names, lines...) + c.Assert(expected, checker.DeepEquals, names, check.Commentf("Expected array with truncated names: %v, got: %v", expected, names)) +} + +func (s *DockerSuite) TestVolumeLsFormatDefaultFormat(c *check.C) { + dockerCmd(c, "volume", "create", "aaa") + dockerCmd(c, "volume", "create", "test") + dockerCmd(c, "volume", "create", "soo") + + config := `{ + "volumesFormat": "{{ .Name }} default" +}` + d, err := ioutil.TempDir("", "integration-cli-") + c.Assert(err, checker.IsNil) + defer os.RemoveAll(d) + + err = ioutil.WriteFile(filepath.Join(d, "config.json"), []byte(config), 0644) + c.Assert(err, checker.IsNil) + + out, _ := dockerCmd(c, "--config", d, "volume", "ls") + lines := strings.Split(strings.TrimSpace(string(out)), "\n") + + expected := []string{"aaa default", "soo default", "test default"} + var names []string + names = append(names, lines...) + c.Assert(expected, checker.DeepEquals, names, check.Commentf("Expected array with truncated names: %v, got: %v", expected, names)) +} + +// assertVolList checks volume retrieved with ls command +// equals to expected volume list +// note: out should be `volume ls [option]` result +func assertVolList(c *check.C, out string, expectVols []string) { + lines := strings.Split(out, "\n") + var volList []string + for _, line := range lines[1 : len(lines)-1] { + volFields := strings.Fields(line) + // wrap all volume name in volList + volList = append(volList, volFields[1]) + } + + // volume ls should contains all expected volumes + c.Assert(volList, checker.DeepEquals, expectVols) +} + +func (s *DockerSuite) TestVolumeCLILsFilterDangling(c *check.C) { + prefix, _ := getPrefixAndSlashFromDaemonPlatform() + dockerCmd(c, "volume", "create", "testnotinuse1") + dockerCmd(c, "volume", "create", "testisinuse1") + dockerCmd(c, "volume", "create", "testisinuse2") + + // Make sure both "created" (but not started), and started + // containers are included in reference counting + dockerCmd(c, "run", "--name", "volume-test1", "-v", "testisinuse1:"+prefix+"/foo", "busybox", "true") + dockerCmd(c, "create", "--name", "volume-test2", "-v", "testisinuse2:"+prefix+"/foo", "busybox", "true") + + out, _ := dockerCmd(c, "volume", "ls") + + // No filter, all volumes should show + c.Assert(out, checker.Contains, "testnotinuse1\n", check.Commentf("expected volume 'testnotinuse1' in output")) + c.Assert(out, checker.Contains, "testisinuse1\n", check.Commentf("expected volume 'testisinuse1' in output")) + c.Assert(out, checker.Contains, "testisinuse2\n", check.Commentf("expected volume 'testisinuse2' in output")) + + out, _ = dockerCmd(c, "volume", "ls", "--filter", "dangling=false") + + // Explicitly disabling dangling + c.Assert(out, check.Not(checker.Contains), "testnotinuse1\n", check.Commentf("expected volume 'testnotinuse1' in output")) + c.Assert(out, checker.Contains, "testisinuse1\n", check.Commentf("expected volume 'testisinuse1' in output")) + c.Assert(out, checker.Contains, "testisinuse2\n", check.Commentf("expected volume 'testisinuse2' in output")) + + out, _ = dockerCmd(c, "volume", "ls", "--filter", "dangling=true") + + // Filter "dangling" volumes; only "dangling" (unused) volumes should be in the output + c.Assert(out, checker.Contains, "testnotinuse1\n", check.Commentf("expected volume 'testnotinuse1' in output")) + c.Assert(out, check.Not(checker.Contains), "testisinuse1\n", check.Commentf("volume 'testisinuse1' in output, but not expected")) + c.Assert(out, check.Not(checker.Contains), "testisinuse2\n", check.Commentf("volume 'testisinuse2' in output, but not expected")) + + out, _ = dockerCmd(c, "volume", "ls", "--filter", "dangling=1") + // Filter "dangling" volumes; only "dangling" (unused) volumes should be in the output, dangling also accept 1 + c.Assert(out, checker.Contains, "testnotinuse1\n", check.Commentf("expected volume 'testnotinuse1' in output")) + c.Assert(out, check.Not(checker.Contains), "testisinuse1\n", check.Commentf("volume 'testisinuse1' in output, but not expected")) + c.Assert(out, check.Not(checker.Contains), "testisinuse2\n", check.Commentf("volume 'testisinuse2' in output, but not expected")) + + out, _ = dockerCmd(c, "volume", "ls", "--filter", "dangling=0") + // dangling=0 is same as dangling=false case + c.Assert(out, check.Not(checker.Contains), "testnotinuse1\n", check.Commentf("expected volume 'testnotinuse1' in output")) + c.Assert(out, checker.Contains, "testisinuse1\n", check.Commentf("expected volume 'testisinuse1' in output")) + c.Assert(out, checker.Contains, "testisinuse2\n", check.Commentf("expected volume 'testisinuse2' in output")) + + out, _ = dockerCmd(c, "volume", "ls", "--filter", "name=testisin") + c.Assert(out, check.Not(checker.Contains), "testnotinuse1\n", check.Commentf("expected volume 'testnotinuse1' in output")) + c.Assert(out, checker.Contains, "testisinuse1\n", check.Commentf("expected volume 'testisinuse1' in output")) + c.Assert(out, checker.Contains, "testisinuse2\n", check.Commentf("expected volume 'testisinuse2' in output")) +} + +func (s *DockerSuite) TestVolumeCLILsErrorWithInvalidFilterName(c *check.C) { + out, _, err := dockerCmdWithError("volume", "ls", "-f", "FOO=123") + c.Assert(err, checker.NotNil) + c.Assert(out, checker.Contains, "Invalid filter") +} + +func (s *DockerSuite) TestVolumeCLILsWithIncorrectFilterValue(c *check.C) { + out, _, err := dockerCmdWithError("volume", "ls", "-f", "dangling=invalid") + c.Assert(err, check.NotNil) + c.Assert(out, checker.Contains, "Invalid filter") +} + +func (s *DockerSuite) TestVolumeCLIRm(c *check.C) { + prefix, _ := getPrefixAndSlashFromDaemonPlatform() + out, _ := dockerCmd(c, "volume", "create") + id := strings.TrimSpace(out) + + dockerCmd(c, "volume", "create", "test") + dockerCmd(c, "volume", "rm", id) + dockerCmd(c, "volume", "rm", "test") + + out, _ = dockerCmd(c, "volume", "ls") + outArr := strings.Split(strings.TrimSpace(out), "\n") + c.Assert(len(outArr), check.Equals, 1, check.Commentf("%s\n", out)) + + volumeID := "testing" + dockerCmd(c, "run", "-v", volumeID+":"+prefix+"/foo", "--name=test", "busybox", "sh", "-c", "echo hello > /foo/bar") + + icmd.RunCommand(dockerBinary, "volume", "rm", "testing").Assert(c, icmd.Expected{ + ExitCode: 1, + Error: "exit status 1", + }) + + out, _ = dockerCmd(c, "run", "--volumes-from=test", "--name=test2", "busybox", "sh", "-c", "cat /foo/bar") + c.Assert(strings.TrimSpace(out), check.Equals, "hello") + dockerCmd(c, "rm", "-fv", "test2") + dockerCmd(c, "volume", "inspect", volumeID) + dockerCmd(c, "rm", "-f", "test") + + out, _ = dockerCmd(c, "run", "--name=test2", "-v", volumeID+":"+prefix+"/foo", "busybox", "sh", "-c", "cat /foo/bar") + c.Assert(strings.TrimSpace(out), check.Equals, "hello", check.Commentf("volume data was removed")) + dockerCmd(c, "rm", "test2") + + dockerCmd(c, "volume", "rm", volumeID) + c.Assert( + exec.Command("volume", "rm", "doesnotexist").Run(), + check.Not(check.IsNil), + check.Commentf("volume rm should fail with non-existent volume"), + ) +} + +// FIXME(vdemeester) should be a unit test in cli/command/volume package +func (s *DockerSuite) TestVolumeCLINoArgs(c *check.C) { + out, _ := dockerCmd(c, "volume") + // no args should produce the cmd usage output + usage := "Usage: docker volume COMMAND" + c.Assert(out, checker.Contains, usage) + + // invalid arg should error and show the command usage on stderr + icmd.RunCommand(dockerBinary, "volume", "somearg").Assert(c, icmd.Expected{ + ExitCode: 1, + Error: "exit status 1", + Err: usage, + }) + + // invalid flag should error and show the flag error and cmd usage + result := icmd.RunCommand(dockerBinary, "volume", "--no-such-flag") + result.Assert(c, icmd.Expected{ + ExitCode: 125, + Error: "exit status 125", + Err: usage, + }) + c.Assert(result.Stderr(), checker.Contains, "unknown flag: --no-such-flag") +} + +func (s *DockerSuite) TestVolumeCLIInspectTmplError(c *check.C) { + out, _ := dockerCmd(c, "volume", "create") + name := strings.TrimSpace(out) + + out, exitCode, err := dockerCmdWithError("volume", "inspect", "--format='{{ .FooBar }}'", name) + c.Assert(err, checker.NotNil, check.Commentf("Output: %s", out)) + c.Assert(exitCode, checker.Equals, 1, check.Commentf("Output: %s", out)) + c.Assert(out, checker.Contains, "Template parsing error") +} + +func (s *DockerSuite) TestVolumeCLICreateWithOpts(c *check.C) { + testRequires(c, DaemonIsLinux) + + dockerCmd(c, "volume", "create", "-d", "local", "test", "--opt=type=tmpfs", "--opt=device=tmpfs", "--opt=o=size=1m,uid=1000") + out, _ := dockerCmd(c, "run", "-v", "test:/foo", "busybox", "mount") + + mounts := strings.Split(out, "\n") + var found bool + for _, m := range mounts { + if strings.Contains(m, "/foo") { + found = true + info := strings.Fields(m) + // tmpfs on type tmpfs (rw,relatime,size=1024k,uid=1000) + c.Assert(info[0], checker.Equals, "tmpfs") + c.Assert(info[2], checker.Equals, "/foo") + c.Assert(info[4], checker.Equals, "tmpfs") + c.Assert(info[5], checker.Contains, "uid=1000") + c.Assert(info[5], checker.Contains, "size=1024k") + break + } + } + c.Assert(found, checker.Equals, true) +} + +func (s *DockerSuite) TestVolumeCLICreateLabel(c *check.C) { + testVol := "testvolcreatelabel" + testLabel := "foo" + testValue := "bar" + + out, _, err := dockerCmdWithError("volume", "create", "--label", testLabel+"="+testValue, testVol) + c.Assert(err, check.IsNil) + + out, _ = dockerCmd(c, "volume", "inspect", "--format={{ .Labels."+testLabel+" }}", testVol) + c.Assert(strings.TrimSpace(out), check.Equals, testValue) +} + +func (s *DockerSuite) TestVolumeCLICreateLabelMultiple(c *check.C) { + testVol := "testvolcreatelabel" + + testLabels := map[string]string{ + "foo": "bar", + "baz": "foo", + } + + args := []string{ + "volume", + "create", + testVol, + } + + for k, v := range testLabels { + args = append(args, "--label", k+"="+v) + } + + out, _, err := dockerCmdWithError(args...) + c.Assert(err, check.IsNil) + + for k, v := range testLabels { + out, _ = dockerCmd(c, "volume", "inspect", "--format={{ .Labels."+k+" }}", testVol) + c.Assert(strings.TrimSpace(out), check.Equals, v) + } +} + +func (s *DockerSuite) TestVolumeCLILsFilterLabels(c *check.C) { + testVol1 := "testvolcreatelabel-1" + out, _, err := dockerCmdWithError("volume", "create", "--label", "foo=bar1", testVol1) + c.Assert(err, check.IsNil) + + testVol2 := "testvolcreatelabel-2" + out, _, err = dockerCmdWithError("volume", "create", "--label", "foo=bar2", testVol2) + c.Assert(err, check.IsNil) + + out, _ = dockerCmd(c, "volume", "ls", "--filter", "label=foo") + + // filter with label=key + c.Assert(out, checker.Contains, "testvolcreatelabel-1\n", check.Commentf("expected volume 'testvolcreatelabel-1' in output")) + c.Assert(out, checker.Contains, "testvolcreatelabel-2\n", check.Commentf("expected volume 'testvolcreatelabel-2' in output")) + + out, _ = dockerCmd(c, "volume", "ls", "--filter", "label=foo=bar1") + + // filter with label=key=value + c.Assert(out, checker.Contains, "testvolcreatelabel-1\n", check.Commentf("expected volume 'testvolcreatelabel-1' in output")) + c.Assert(out, check.Not(checker.Contains), "testvolcreatelabel-2\n", check.Commentf("expected volume 'testvolcreatelabel-2 in output")) + + out, _ = dockerCmd(c, "volume", "ls", "--filter", "label=non-exist") + outArr := strings.Split(strings.TrimSpace(out), "\n") + c.Assert(len(outArr), check.Equals, 1, check.Commentf("\n%s", out)) + + out, _ = dockerCmd(c, "volume", "ls", "--filter", "label=foo=non-exist") + outArr = strings.Split(strings.TrimSpace(out), "\n") + c.Assert(len(outArr), check.Equals, 1, check.Commentf("\n%s", out)) +} + +func (s *DockerSuite) TestVolumeCLILsFilterDrivers(c *check.C) { + // using default volume driver local to create volumes + testVol1 := "testvol-1" + out, _, err := dockerCmdWithError("volume", "create", testVol1) + c.Assert(err, check.IsNil) + + testVol2 := "testvol-2" + out, _, err = dockerCmdWithError("volume", "create", testVol2) + c.Assert(err, check.IsNil) + + // filter with driver=local + out, _ = dockerCmd(c, "volume", "ls", "--filter", "driver=local") + c.Assert(out, checker.Contains, "testvol-1\n", check.Commentf("expected volume 'testvol-1' in output")) + c.Assert(out, checker.Contains, "testvol-2\n", check.Commentf("expected volume 'testvol-2' in output")) + + // filter with driver=invaliddriver + out, _ = dockerCmd(c, "volume", "ls", "--filter", "driver=invaliddriver") + outArr := strings.Split(strings.TrimSpace(out), "\n") + c.Assert(len(outArr), check.Equals, 1, check.Commentf("\n%s", out)) + + // filter with driver=loca + out, _ = dockerCmd(c, "volume", "ls", "--filter", "driver=loca") + outArr = strings.Split(strings.TrimSpace(out), "\n") + c.Assert(len(outArr), check.Equals, 1, check.Commentf("\n%s", out)) + + // filter with driver= + out, _ = dockerCmd(c, "volume", "ls", "--filter", "driver=") + outArr = strings.Split(strings.TrimSpace(out), "\n") + c.Assert(len(outArr), check.Equals, 1, check.Commentf("\n%s", out)) +} + +func (s *DockerSuite) TestVolumeCLIRmForceUsage(c *check.C) { + out, _ := dockerCmd(c, "volume", "create") + id := strings.TrimSpace(out) + + dockerCmd(c, "volume", "rm", "-f", id) + dockerCmd(c, "volume", "rm", "--force", "nonexist") + + out, _ = dockerCmd(c, "volume", "ls") + outArr := strings.Split(strings.TrimSpace(out), "\n") + c.Assert(len(outArr), check.Equals, 1, check.Commentf("%s\n", out)) +} + +func (s *DockerSuite) TestVolumeCLIRmForce(c *check.C) { + testRequires(c, SameHostDaemon, DaemonIsLinux) + + name := "test" + out, _ := dockerCmd(c, "volume", "create", name) + id := strings.TrimSpace(out) + c.Assert(id, checker.Equals, name) + + out, _ = dockerCmd(c, "volume", "inspect", "--format", "{{.Mountpoint}}", name) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") + // Mountpoint is in the form of "/var/lib/docker/volumes/.../_data", removing `/_data` + path := strings.TrimSuffix(strings.TrimSpace(out), "/_data") + icmd.RunCommand("rm", "-rf", path).Assert(c, icmd.Success) + + dockerCmd(c, "volume", "rm", "-f", name) + out, _ = dockerCmd(c, "volume", "ls") + c.Assert(out, checker.Not(checker.Contains), name) + dockerCmd(c, "volume", "create", name) + out, _ = dockerCmd(c, "volume", "ls") + c.Assert(out, checker.Contains, name) +} + +// TestVolumeCLIRmForceInUse verifies that repeated `docker volume rm -f` calls does not remove a volume +// if it is in use. Test case for https://github.com/docker/docker/issues/31446 +func (s *DockerSuite) TestVolumeCLIRmForceInUse(c *check.C) { + name := "testvolume" + out, _ := dockerCmd(c, "volume", "create", name) + id := strings.TrimSpace(out) + c.Assert(id, checker.Equals, name) + + prefix, slash := getPrefixAndSlashFromDaemonPlatform() + out, e := dockerCmd(c, "create", "-v", "testvolume:"+prefix+slash+"foo", "busybox") + cid := strings.TrimSpace(out) + + _, _, err := dockerCmdWithError("volume", "rm", "-f", name) + c.Assert(err, check.NotNil) + c.Assert(err.Error(), checker.Contains, "volume is in use") + out, _ = dockerCmd(c, "volume", "ls") + c.Assert(out, checker.Contains, name) + + // The original issue did not _remove_ the volume from the list + // the first time. But a second call to `volume rm` removed it. + // Calling `volume rm` a second time to confirm it's not removed + // when calling twice. + _, _, err = dockerCmdWithError("volume", "rm", "-f", name) + c.Assert(err, check.NotNil) + c.Assert(err.Error(), checker.Contains, "volume is in use") + out, _ = dockerCmd(c, "volume", "ls") + c.Assert(out, checker.Contains, name) + + // Verify removing the volume after the container is removed works + _, e = dockerCmd(c, "rm", cid) + c.Assert(e, check.Equals, 0) + + _, e = dockerCmd(c, "volume", "rm", "-f", name) + c.Assert(e, check.Equals, 0) + + out, e = dockerCmd(c, "volume", "ls") + c.Assert(e, check.Equals, 0) + c.Assert(out, checker.Not(checker.Contains), name) +} + +func (s *DockerSuite) TestVolumeCliInspectWithVolumeOpts(c *check.C) { + testRequires(c, DaemonIsLinux) + + // Without options + name := "test1" + dockerCmd(c, "volume", "create", "-d", "local", name) + out, _ := dockerCmd(c, "volume", "inspect", "--format={{ .Options }}", name) + c.Assert(strings.TrimSpace(out), checker.Contains, "map[]") + + // With options + name = "test2" + k1, v1 := "type", "tmpfs" + k2, v2 := "device", "tmpfs" + k3, v3 := "o", "size=1m,uid=1000" + dockerCmd(c, "volume", "create", "-d", "local", name, "--opt", fmt.Sprintf("%s=%s", k1, v1), "--opt", fmt.Sprintf("%s=%s", k2, v2), "--opt", fmt.Sprintf("%s=%s", k3, v3)) + out, _ = dockerCmd(c, "volume", "inspect", "--format={{ .Options }}", name) + c.Assert(strings.TrimSpace(out), checker.Contains, fmt.Sprintf("%s:%s", k1, v1)) + c.Assert(strings.TrimSpace(out), checker.Contains, fmt.Sprintf("%s:%s", k2, v2)) + c.Assert(strings.TrimSpace(out), checker.Contains, fmt.Sprintf("%s:%s", k3, v3)) +} + +// Test case (1) for 21845: duplicate targets for --volumes-from +func (s *DockerSuite) TestDuplicateMountpointsForVolumesFrom(c *check.C) { + testRequires(c, DaemonIsLinux) + + image := "vimage" + buildImageSuccessfully(c, image, build.WithDockerfile(` + FROM busybox + VOLUME ["/tmp/data"]`)) + + dockerCmd(c, "run", "--name=data1", image, "true") + dockerCmd(c, "run", "--name=data2", image, "true") + + out, _ := dockerCmd(c, "inspect", "--format", "{{(index .Mounts 0).Name}}", "data1") + data1 := strings.TrimSpace(out) + c.Assert(data1, checker.Not(checker.Equals), "") + + out, _ = dockerCmd(c, "inspect", "--format", "{{(index .Mounts 0).Name}}", "data2") + data2 := strings.TrimSpace(out) + c.Assert(data2, checker.Not(checker.Equals), "") + + // Both volume should exist + out, _ = dockerCmd(c, "volume", "ls", "-q") + c.Assert(strings.TrimSpace(out), checker.Contains, data1) + c.Assert(strings.TrimSpace(out), checker.Contains, data2) + + out, _, err := dockerCmdWithError("run", "--name=app", "--volumes-from=data1", "--volumes-from=data2", "-d", "busybox", "top") + c.Assert(err, checker.IsNil, check.Commentf("Out: %s", out)) + + // Only the second volume will be referenced, this is backward compatible + out, _ = dockerCmd(c, "inspect", "--format", "{{(index .Mounts 0).Name}}", "app") + c.Assert(strings.TrimSpace(out), checker.Equals, data2) + + dockerCmd(c, "rm", "-f", "-v", "app") + dockerCmd(c, "rm", "-f", "-v", "data1") + dockerCmd(c, "rm", "-f", "-v", "data2") + + // Both volume should not exist + out, _ = dockerCmd(c, "volume", "ls", "-q") + c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), data1) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), data2) +} + +// Test case (2) for 21845: duplicate targets for --volumes-from and -v (bind) +func (s *DockerSuite) TestDuplicateMountpointsForVolumesFromAndBind(c *check.C) { + testRequires(c, DaemonIsLinux) + + image := "vimage" + buildImageSuccessfully(c, image, build.WithDockerfile(` + FROM busybox + VOLUME ["/tmp/data"]`)) + + dockerCmd(c, "run", "--name=data1", image, "true") + dockerCmd(c, "run", "--name=data2", image, "true") + + out, _ := dockerCmd(c, "inspect", "--format", "{{(index .Mounts 0).Name}}", "data1") + data1 := strings.TrimSpace(out) + c.Assert(data1, checker.Not(checker.Equals), "") + + out, _ = dockerCmd(c, "inspect", "--format", "{{(index .Mounts 0).Name}}", "data2") + data2 := strings.TrimSpace(out) + c.Assert(data2, checker.Not(checker.Equals), "") + + // Both volume should exist + out, _ = dockerCmd(c, "volume", "ls", "-q") + c.Assert(strings.TrimSpace(out), checker.Contains, data1) + c.Assert(strings.TrimSpace(out), checker.Contains, data2) + + // /tmp/data is automatically created, because we are not using the modern mount API here + out, _, err := dockerCmdWithError("run", "--name=app", "--volumes-from=data1", "--volumes-from=data2", "-v", "/tmp/data:/tmp/data", "-d", "busybox", "top") + c.Assert(err, checker.IsNil, check.Commentf("Out: %s", out)) + + // No volume will be referenced (mount is /tmp/data), this is backward compatible + out, _ = dockerCmd(c, "inspect", "--format", "{{(index .Mounts 0).Name}}", "app") + c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), data1) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), data2) + + dockerCmd(c, "rm", "-f", "-v", "app") + dockerCmd(c, "rm", "-f", "-v", "data1") + dockerCmd(c, "rm", "-f", "-v", "data2") + + // Both volume should not exist + out, _ = dockerCmd(c, "volume", "ls", "-q") + c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), data1) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), data2) +} + +// Test case (3) for 21845: duplicate targets for --volumes-from and `Mounts` (API only) +func (s *DockerSuite) TestDuplicateMountpointsForVolumesFromAndMounts(c *check.C) { + testRequires(c, SameHostDaemon, DaemonIsLinux) + + image := "vimage" + buildImageSuccessfully(c, image, build.WithDockerfile(` + FROM busybox + VOLUME ["/tmp/data"]`)) + + dockerCmd(c, "run", "--name=data1", image, "true") + dockerCmd(c, "run", "--name=data2", image, "true") + + out, _ := dockerCmd(c, "inspect", "--format", "{{(index .Mounts 0).Name}}", "data1") + data1 := strings.TrimSpace(out) + c.Assert(data1, checker.Not(checker.Equals), "") + + out, _ = dockerCmd(c, "inspect", "--format", "{{(index .Mounts 0).Name}}", "data2") + data2 := strings.TrimSpace(out) + c.Assert(data2, checker.Not(checker.Equals), "") + + // Both volume should exist + out, _ = dockerCmd(c, "volume", "ls", "-q") + c.Assert(strings.TrimSpace(out), checker.Contains, data1) + c.Assert(strings.TrimSpace(out), checker.Contains, data2) + + err := os.MkdirAll("/tmp/data", 0755) + c.Assert(err, checker.IsNil) + // Mounts is available in API + status, body, err := request.SockRequest("POST", "/containers/create?name=app", map[string]interface{}{ + "Image": "busybox", + "Cmd": []string{"top"}, + "HostConfig": map[string]interface{}{ + "VolumesFrom": []string{ + "data1", + "data2", + }, + "Mounts": []map[string]interface{}{ + { + "Type": "bind", + "Source": "/tmp/data", + "Target": "/tmp/data", + }, + }}, + }, daemonHost()) + + c.Assert(err, checker.IsNil, check.Commentf(string(body))) + c.Assert(status, checker.Equals, http.StatusCreated, check.Commentf(string(body))) + + // No volume will be referenced (mount is /tmp/data), this is backward compatible + out, _ = dockerCmd(c, "inspect", "--format", "{{(index .Mounts 0).Name}}", "app") + c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), data1) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), data2) + + dockerCmd(c, "rm", "-f", "-v", "app") + dockerCmd(c, "rm", "-f", "-v", "data1") + dockerCmd(c, "rm", "-f", "-v", "data2") + + // Both volume should not exist + out, _ = dockerCmd(c, "volume", "ls", "-q") + c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), data1) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), data2) +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_cli_wait_test.go b/vendor/github.com/moby/moby/integration-cli/docker_cli_wait_test.go new file mode 100644 index 000000000..6f45bf07a --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_cli_wait_test.go @@ -0,0 +1,98 @@ +package main + +import ( + "bytes" + "os/exec" + "strings" + "time" + + "github.com/docker/docker/integration-cli/checker" + icmd "github.com/docker/docker/pkg/testutil/cmd" + "github.com/go-check/check" +) + +// non-blocking wait with 0 exit code +func (s *DockerSuite) TestWaitNonBlockedExitZero(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "busybox", "sh", "-c", "true") + containerID := strings.TrimSpace(out) + + err := waitInspect(containerID, "{{.State.Running}}", "false", 30*time.Second) + c.Assert(err, checker.IsNil) //Container should have stopped by now + + out, _ = dockerCmd(c, "wait", containerID) + c.Assert(strings.TrimSpace(out), checker.Equals, "0", check.Commentf("failed to set up container, %v", out)) + +} + +// blocking wait with 0 exit code +func (s *DockerSuite) TestWaitBlockedExitZero(c *check.C) { + // Windows busybox does not support trap in this way, not sleep with sub-second + // granularity. It will always exit 0x40010004. + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "trap 'exit 0' TERM; while true; do usleep 10; done") + containerID := strings.TrimSpace(out) + + c.Assert(waitRun(containerID), checker.IsNil) + + chWait := make(chan string) + go func() { + chWait <- "" + out := icmd.RunCommand(dockerBinary, "wait", containerID).Combined() + chWait <- out + }() + + <-chWait // make sure the goroutine is started + time.Sleep(100 * time.Millisecond) + dockerCmd(c, "stop", containerID) + + select { + case status := <-chWait: + c.Assert(strings.TrimSpace(status), checker.Equals, "0", check.Commentf("expected exit 0, got %s", status)) + case <-time.After(2 * time.Second): + c.Fatal("timeout waiting for `docker wait` to exit") + } + +} + +// non-blocking wait with random exit code +func (s *DockerSuite) TestWaitNonBlockedExitRandom(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "busybox", "sh", "-c", "exit 99") + containerID := strings.TrimSpace(out) + + err := waitInspect(containerID, "{{.State.Running}}", "false", 30*time.Second) + c.Assert(err, checker.IsNil) //Container should have stopped by now + out, _ = dockerCmd(c, "wait", containerID) + c.Assert(strings.TrimSpace(out), checker.Equals, "99", check.Commentf("failed to set up container, %v", out)) + +} + +// blocking wait with random exit code +func (s *DockerSuite) TestWaitBlockedExitRandom(c *check.C) { + // Cannot run on Windows as trap in Windows busybox does not support trap in this way. + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "trap 'exit 99' TERM; while true; do usleep 10; done") + containerID := strings.TrimSpace(out) + c.Assert(waitRun(containerID), checker.IsNil) + + chWait := make(chan error) + waitCmd := exec.Command(dockerBinary, "wait", containerID) + waitCmdOut := bytes.NewBuffer(nil) + waitCmd.Stdout = waitCmdOut + c.Assert(waitCmd.Start(), checker.IsNil) + go func() { + chWait <- waitCmd.Wait() + }() + + dockerCmd(c, "stop", containerID) + + select { + case err := <-chWait: + c.Assert(err, checker.IsNil, check.Commentf(waitCmdOut.String())) + status, err := waitCmdOut.ReadString('\n') + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(status), checker.Equals, "99", check.Commentf("expected exit 99, got %s", status)) + case <-time.After(2 * time.Second): + waitCmd.Process.Kill() + c.Fatal("timeout waiting for `docker wait` to exit") + } +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_deprecated_api_v124_test.go b/vendor/github.com/moby/moby/integration-cli/docker_deprecated_api_v124_test.go new file mode 100644 index 000000000..a3a8fbdb6 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_deprecated_api_v124_test.go @@ -0,0 +1,229 @@ +// This file will be removed when we completely drop support for +// passing HostConfig to container start API. + +package main + +import ( + "net/http" + "strings" + + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/request" + "github.com/docker/docker/pkg/testutil" + "github.com/go-check/check" +) + +func formatV123StartAPIURL(url string) string { + return "/v1.23" + url +} + +func (s *DockerSuite) TestDeprecatedContainerAPIStartHostConfig(c *check.C) { + name := "test-deprecated-api-124" + dockerCmd(c, "create", "--name", name, "busybox") + config := map[string]interface{}{ + "Binds": []string{"/aa:/bb"}, + } + status, body, err := request.SockRequest("POST", "/containers/"+name+"/start", config, daemonHost()) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusBadRequest) + c.Assert(string(body), checker.Contains, "was deprecated since v1.10") +} + +func (s *DockerSuite) TestDeprecatedContainerAPIStartVolumeBinds(c *check.C) { + // TODO Windows CI: Investigate further why this fails on Windows to Windows CI. + testRequires(c, DaemonIsLinux) + path := "/foo" + if testEnv.DaemonPlatform() == "windows" { + path = `c:\foo` + } + name := "testing" + config := map[string]interface{}{ + "Image": "busybox", + "Volumes": map[string]struct{}{path: {}}, + } + + status, _, err := request.SockRequest("POST", formatV123StartAPIURL("/containers/create?name="+name), config, daemonHost()) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusCreated) + + bindPath := testutil.RandomTmpDirPath("test", testEnv.DaemonPlatform()) + config = map[string]interface{}{ + "Binds": []string{bindPath + ":" + path}, + } + status, _, err = request.SockRequest("POST", formatV123StartAPIURL("/containers/"+name+"/start"), config, daemonHost()) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusNoContent) + + pth, err := inspectMountSourceField(name, path) + c.Assert(err, checker.IsNil) + c.Assert(pth, checker.Equals, bindPath, check.Commentf("expected volume host path to be %s, got %s", bindPath, pth)) +} + +// Test for GH#10618 +func (s *DockerSuite) TestDeprecatedContainerAPIStartDupVolumeBinds(c *check.C) { + // TODO Windows to Windows CI - Port this + testRequires(c, DaemonIsLinux) + name := "testdups" + config := map[string]interface{}{ + "Image": "busybox", + "Volumes": map[string]struct{}{"/tmp": {}}, + } + + status, _, err := request.SockRequest("POST", formatV123StartAPIURL("/containers/create?name="+name), config, daemonHost()) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusCreated) + + bindPath1 := testutil.RandomTmpDirPath("test1", testEnv.DaemonPlatform()) + bindPath2 := testutil.RandomTmpDirPath("test2", testEnv.DaemonPlatform()) + + config = map[string]interface{}{ + "Binds": []string{bindPath1 + ":/tmp", bindPath2 + ":/tmp"}, + } + status, body, err := request.SockRequest("POST", formatV123StartAPIURL("/containers/"+name+"/start"), config, daemonHost()) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusInternalServerError) + c.Assert(string(body), checker.Contains, "Duplicate mount point", check.Commentf("Expected failure due to duplicate bind mounts to same path, instead got: %q with error: %v", string(body), err)) +} + +func (s *DockerSuite) TestDeprecatedContainerAPIStartVolumesFrom(c *check.C) { + // TODO Windows to Windows CI - Port this + testRequires(c, DaemonIsLinux) + volName := "voltst" + volPath := "/tmp" + + dockerCmd(c, "run", "--name", volName, "-v", volPath, "busybox") + + name := "TestContainerAPIStartVolumesFrom" + config := map[string]interface{}{ + "Image": "busybox", + "Volumes": map[string]struct{}{volPath: {}}, + } + + status, _, err := request.SockRequest("POST", formatV123StartAPIURL("/containers/create?name="+name), config, daemonHost()) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusCreated) + + config = map[string]interface{}{ + "VolumesFrom": []string{volName}, + } + status, _, err = request.SockRequest("POST", formatV123StartAPIURL("/containers/"+name+"/start"), config, daemonHost()) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusNoContent) + + pth, err := inspectMountSourceField(name, volPath) + c.Assert(err, checker.IsNil) + pth2, err := inspectMountSourceField(volName, volPath) + c.Assert(err, checker.IsNil) + c.Assert(pth, checker.Equals, pth2, check.Commentf("expected volume host path to be %s, got %s", pth, pth2)) +} + +// #9981 - Allow a docker created volume (ie, one in /var/lib/docker/volumes) to be used to overwrite (via passing in Binds on api start) an existing volume +func (s *DockerSuite) TestDeprecatedPostContainerBindNormalVolume(c *check.C) { + // TODO Windows to Windows CI - Port this + testRequires(c, DaemonIsLinux) + dockerCmd(c, "create", "-v", "/foo", "--name=one", "busybox") + + fooDir, err := inspectMountSourceField("one", "/foo") + c.Assert(err, checker.IsNil) + + dockerCmd(c, "create", "-v", "/foo", "--name=two", "busybox") + + bindSpec := map[string][]string{"Binds": {fooDir + ":/foo"}} + status, _, err := request.SockRequest("POST", formatV123StartAPIURL("/containers/two/start"), bindSpec, daemonHost()) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusNoContent) + + fooDir2, err := inspectMountSourceField("two", "/foo") + c.Assert(err, checker.IsNil) + c.Assert(fooDir2, checker.Equals, fooDir, check.Commentf("expected volume path to be %s, got: %s", fooDir, fooDir2)) +} + +func (s *DockerSuite) TestDeprecatedStartWithTooLowMemoryLimit(c *check.C) { + // TODO Windows: Port once memory is supported + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "create", "busybox") + + containerID := strings.TrimSpace(out) + + config := `{ + "CpuShares": 100, + "Memory": 524287 + }` + + res, body, err := request.Post(formatV123StartAPIURL("/containers/"+containerID+"/start"), request.RawString(config), request.JSON) + c.Assert(err, checker.IsNil) + b, err2 := testutil.ReadBody(body) + c.Assert(err2, checker.IsNil) + c.Assert(res.StatusCode, checker.Equals, http.StatusInternalServerError) + c.Assert(string(b), checker.Contains, "Minimum memory limit allowed is 4MB") +} + +// #14640 +func (s *DockerSuite) TestDeprecatedPostContainersStartWithoutLinksInHostConfig(c *check.C) { + // TODO Windows: Windows doesn't support supplying a hostconfig on start. + // An alternate test could be written to validate the negative testing aspect of this + testRequires(c, DaemonIsLinux) + name := "test-host-config-links" + dockerCmd(c, append([]string{"create", "--name", name, "busybox"}, sleepCommandForDaemonPlatform()...)...) + + hc := inspectFieldJSON(c, name, "HostConfig") + config := `{"HostConfig":` + hc + `}` + + res, b, err := request.Post(formatV123StartAPIURL("/containers/"+name+"/start"), request.RawString(config), request.JSON) + c.Assert(err, checker.IsNil) + c.Assert(res.StatusCode, checker.Equals, http.StatusNoContent) + b.Close() +} + +// #14640 +func (s *DockerSuite) TestDeprecatedPostContainersStartWithLinksInHostConfig(c *check.C) { + // TODO Windows: Windows doesn't support supplying a hostconfig on start. + // An alternate test could be written to validate the negative testing aspect of this + testRequires(c, DaemonIsLinux) + name := "test-host-config-links" + dockerCmd(c, "run", "--name", "foo", "-d", "busybox", "top") + dockerCmd(c, "create", "--name", name, "--link", "foo:bar", "busybox", "top") + + hc := inspectFieldJSON(c, name, "HostConfig") + config := `{"HostConfig":` + hc + `}` + + res, b, err := request.Post(formatV123StartAPIURL("/containers/"+name+"/start"), request.RawString(config), request.JSON) + c.Assert(err, checker.IsNil) + c.Assert(res.StatusCode, checker.Equals, http.StatusNoContent) + b.Close() +} + +// #14640 +func (s *DockerSuite) TestDeprecatedPostContainersStartWithLinksInHostConfigIdLinked(c *check.C) { + // Windows does not support links + testRequires(c, DaemonIsLinux) + name := "test-host-config-links" + out, _ := dockerCmd(c, "run", "--name", "link0", "-d", "busybox", "top") + id := strings.TrimSpace(out) + dockerCmd(c, "create", "--name", name, "--link", id, "busybox", "top") + + hc := inspectFieldJSON(c, name, "HostConfig") + config := `{"HostConfig":` + hc + `}` + + res, b, err := request.Post(formatV123StartAPIURL("/containers/"+name+"/start"), request.RawString(config), request.JSON) + c.Assert(err, checker.IsNil) + c.Assert(res.StatusCode, checker.Equals, http.StatusNoContent) + b.Close() +} + +func (s *DockerSuite) TestDeprecatedStartWithNilDNS(c *check.C) { + // TODO Windows: Add once DNS is supported + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "create", "busybox") + containerID := strings.TrimSpace(out) + + config := `{"HostConfig": {"Dns": null}}` + + res, b, err := request.Post(formatV123StartAPIURL("/containers/"+containerID+"/start"), request.RawString(config), request.JSON) + c.Assert(err, checker.IsNil) + c.Assert(res.StatusCode, checker.Equals, http.StatusNoContent) + b.Close() + + dns := inspectFieldJSON(c, containerID, "HostConfig.Dns") + c.Assert(dns, checker.Equals, "[]") +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_deprecated_api_v124_unix_test.go b/vendor/github.com/moby/moby/integration-cli/docker_deprecated_api_v124_unix_test.go new file mode 100644 index 000000000..5fc6c2ddf --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_deprecated_api_v124_unix_test.go @@ -0,0 +1,31 @@ +// +build !windows + +package main + +import ( + "fmt" + + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/request" + "github.com/go-check/check" +) + +// #19100 This is a deprecated feature test, it should be removed in Docker 1.12 +func (s *DockerNetworkSuite) TestDeprecatedDockerNetworkStartAPIWithHostconfig(c *check.C) { + netName := "test" + conName := "foo" + dockerCmd(c, "network", "create", netName) + dockerCmd(c, "create", "--name", conName, "busybox", "top") + + config := map[string]interface{}{ + "HostConfig": map[string]interface{}{ + "NetworkMode": netName, + }, + } + _, _, err := request.SockRequest("POST", formatV123StartAPIURL("/containers/"+conName+"/start"), config, daemonHost()) + c.Assert(err, checker.IsNil) + c.Assert(waitRun(conName), checker.IsNil) + networks := inspectField(c, conName, "NetworkSettings.Networks") + c.Assert(networks, checker.Contains, netName, check.Commentf(fmt.Sprintf("Should contain '%s' network", netName))) + c.Assert(networks, checker.Not(checker.Contains), "bridge", check.Commentf("Should not contain 'bridge' network")) +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_experimental_network_test.go b/vendor/github.com/moby/moby/integration-cli/docker_experimental_network_test.go new file mode 100644 index 000000000..f352050d3 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_experimental_network_test.go @@ -0,0 +1,533 @@ +// +build !windows + +package main + +import ( + "strings" + "time" + + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/cli" + "github.com/docker/docker/pkg/parsers/kernel" + icmd "github.com/docker/docker/pkg/testutil/cmd" + "github.com/go-check/check" +) + +// ensure Kernel version is >= v3.9 for macvlan support +func macvlanKernelSupport() bool { + return checkKernelMajorVersionGreaterOrEqualThen(3, 9) +} + +// ensure Kernel version is >= v4.2 for ipvlan support +func ipvlanKernelSupport() bool { + return checkKernelMajorVersionGreaterOrEqualThen(4, 2) +} + +func checkKernelMajorVersionGreaterOrEqualThen(kernelVersion int, majorVersion int) bool { + kv, err := kernel.GetKernelVersion() + if err != nil { + return false + } + if kv.Kernel < kernelVersion || (kv.Kernel == kernelVersion && kv.Major < majorVersion) { + return false + } + return true +} + +func (s *DockerNetworkSuite) TestDockerNetworkMacvlanPersistance(c *check.C) { + // verify the driver automatically provisions the 802.1q link (dm-dummy0.60) + testRequires(c, DaemonIsLinux, macvlanKernelSupport, NotUserNamespace, NotArm, ExperimentalDaemon) + + // master dummy interface 'dm' abbreviation represents 'docker macvlan' + master := "dm-dummy0" + // simulate the master link the vlan tagged subinterface parent link will use + createMasterDummy(c, master) + // create a network specifying the desired sub-interface name + dockerCmd(c, "network", "create", "--driver=macvlan", "-o", "parent=dm-dummy0.60", "dm-persist") + assertNwIsAvailable(c, "dm-persist") + // Restart docker daemon to test the config has persisted to disk + s.d.Restart(c) + // verify network is recreated from persistence + assertNwIsAvailable(c, "dm-persist") + // cleanup the master interface that also collects the slave dev + deleteInterface(c, "dm-dummy0") +} + +func (s *DockerNetworkSuite) TestDockerNetworkIpvlanPersistance(c *check.C) { + // verify the driver automatically provisions the 802.1q link (di-dummy0.70) + testRequires(c, DaemonIsLinux, ipvlanKernelSupport, NotUserNamespace, NotArm, ExperimentalDaemon) + // master dummy interface 'di' notation represent 'docker ipvlan' + master := "di-dummy0" + // simulate the master link the vlan tagged subinterface parent link will use + createMasterDummy(c, master) + // create a network specifying the desired sub-interface name + dockerCmd(c, "network", "create", "--driver=ipvlan", "-o", "parent=di-dummy0.70", "di-persist") + assertNwIsAvailable(c, "di-persist") + // Restart docker daemon to test the config has persisted to disk + s.d.Restart(c) + // verify network is recreated from persistence + assertNwIsAvailable(c, "di-persist") + // cleanup the master interface that also collects the slave dev + deleteInterface(c, "di-dummy0") +} + +func (s *DockerNetworkSuite) TestDockerNetworkMacvlanSubIntCreate(c *check.C) { + // verify the driver automatically provisions the 802.1q link (dm-dummy0.50) + testRequires(c, DaemonIsLinux, macvlanKernelSupport, NotUserNamespace, NotArm, ExperimentalDaemon) + // master dummy interface 'dm' abbreviation represents 'docker macvlan' + master := "dm-dummy0" + // simulate the master link the vlan tagged subinterface parent link will use + createMasterDummy(c, master) + // create a network specifying the desired sub-interface name + dockerCmd(c, "network", "create", "--driver=macvlan", "-o", "parent=dm-dummy0.50", "dm-subinterface") + assertNwIsAvailable(c, "dm-subinterface") + // cleanup the master interface which also collects the slave dev + deleteInterface(c, "dm-dummy0") +} + +func (s *DockerNetworkSuite) TestDockerNetworkIpvlanSubIntCreate(c *check.C) { + // verify the driver automatically provisions the 802.1q link (di-dummy0.50) + testRequires(c, DaemonIsLinux, ipvlanKernelSupport, NotUserNamespace, NotArm, ExperimentalDaemon) + // master dummy interface 'dm' abbreviation represents 'docker ipvlan' + master := "di-dummy0" + // simulate the master link the vlan tagged subinterface parent link will use + createMasterDummy(c, master) + // create a network specifying the desired sub-interface name + dockerCmd(c, "network", "create", "--driver=ipvlan", "-o", "parent=di-dummy0.60", "di-subinterface") + assertNwIsAvailable(c, "di-subinterface") + // cleanup the master interface which also collects the slave dev + deleteInterface(c, "di-dummy0") +} + +func (s *DockerNetworkSuite) TestDockerNetworkMacvlanOverlapParent(c *check.C) { + // verify the same parent interface cannot be used if already in use by an existing network + testRequires(c, DaemonIsLinux, macvlanKernelSupport, NotUserNamespace, NotArm, ExperimentalDaemon) + // master dummy interface 'dm' abbreviation represents 'docker macvlan' + master := "dm-dummy0" + createMasterDummy(c, master) + createVlanInterface(c, master, "dm-dummy0.40", "40") + // create a network using an existing parent interface + dockerCmd(c, "network", "create", "--driver=macvlan", "-o", "parent=dm-dummy0.40", "dm-subinterface") + assertNwIsAvailable(c, "dm-subinterface") + // attempt to create another network using the same parent iface that should fail + out, _, err := dockerCmdWithError("network", "create", "--driver=macvlan", "-o", "parent=dm-dummy0.40", "dm-parent-net-overlap") + // verify that the overlap returns an error + c.Assert(err, check.NotNil, check.Commentf(out)) + // cleanup the master interface which also collects the slave dev + deleteInterface(c, "dm-dummy0") +} + +func (s *DockerNetworkSuite) TestDockerNetworkIpvlanOverlapParent(c *check.C) { + // verify the same parent interface cannot be used if already in use by an existing network + testRequires(c, DaemonIsLinux, ipvlanKernelSupport, NotUserNamespace, NotArm, ExperimentalDaemon) + // master dummy interface 'dm' abbreviation represents 'docker ipvlan' + master := "di-dummy0" + createMasterDummy(c, master) + createVlanInterface(c, master, "di-dummy0.30", "30") + // create a network using an existing parent interface + dockerCmd(c, "network", "create", "--driver=ipvlan", "-o", "parent=di-dummy0.30", "di-subinterface") + assertNwIsAvailable(c, "di-subinterface") + // attempt to create another network using the same parent iface that should fail + out, _, err := dockerCmdWithError("network", "create", "--driver=ipvlan", "-o", "parent=di-dummy0.30", "di-parent-net-overlap") + // verify that the overlap returns an error + c.Assert(err, check.NotNil, check.Commentf(out)) + // cleanup the master interface which also collects the slave dev + deleteInterface(c, "di-dummy0") +} + +func (s *DockerNetworkSuite) TestDockerNetworkMacvlanMultiSubnet(c *check.C) { + // create a dual stack multi-subnet Macvlan bridge mode network and validate connectivity between four containers, two on each subnet + testRequires(c, DaemonIsLinux, IPv6, macvlanKernelSupport, NotUserNamespace, NotArm, ExperimentalDaemon) + dockerCmd(c, "network", "create", "--driver=macvlan", "--ipv6", "--subnet=172.28.100.0/24", "--subnet=172.28.102.0/24", "--gateway=172.28.102.254", + "--subnet=2001:db8:abc2::/64", "--subnet=2001:db8:abc4::/64", "--gateway=2001:db8:abc4::254", "dualstackbridge") + // Ensure the network was created + assertNwIsAvailable(c, "dualstackbridge") + // start dual stack containers and verify the user specified --ip and --ip6 addresses on subnets 172.28.100.0/24 and 2001:db8:abc2::/64 + dockerCmd(c, "run", "-d", "--net=dualstackbridge", "--name=first", "--ip", "172.28.100.20", "--ip6", "2001:db8:abc2::20", "busybox", "top") + dockerCmd(c, "run", "-d", "--net=dualstackbridge", "--name=second", "--ip", "172.28.100.21", "--ip6", "2001:db8:abc2::21", "busybox", "top") + + // Inspect and store the v4 address from specified container on the network dualstackbridge + ip := inspectField(c, "first", "NetworkSettings.Networks.dualstackbridge.IPAddress") + // Inspect and store the v6 address from specified container on the network dualstackbridge + ip6 := inspectField(c, "first", "NetworkSettings.Networks.dualstackbridge.GlobalIPv6Address") + + // verify ipv4 connectivity to the explicit --ipv address second to first + _, _, err := dockerCmdWithError("exec", "second", "ping", "-c", "1", strings.TrimSpace(ip)) + c.Assert(err, check.IsNil) + // verify ipv6 connectivity to the explicit --ipv6 address second to first + c.Skip("Temporarily skipping while investigating sporadic v6 CI issues") + _, _, err = dockerCmdWithError("exec", "second", "ping6", "-c", "1", strings.TrimSpace(ip6)) + c.Assert(err, check.IsNil) + + // start dual stack containers and verify the user specified --ip and --ip6 addresses on subnets 172.28.102.0/24 and 2001:db8:abc4::/64 + dockerCmd(c, "run", "-d", "--net=dualstackbridge", "--name=third", "--ip", "172.28.102.20", "--ip6", "2001:db8:abc4::20", "busybox", "top") + dockerCmd(c, "run", "-d", "--net=dualstackbridge", "--name=fourth", "--ip", "172.28.102.21", "--ip6", "2001:db8:abc4::21", "busybox", "top") + + // Inspect and store the v4 address from specified container on the network dualstackbridge + ip = inspectField(c, "third", "NetworkSettings.Networks.dualstackbridge.IPAddress") + // Inspect and store the v6 address from specified container on the network dualstackbridge + ip6 = inspectField(c, "third", "NetworkSettings.Networks.dualstackbridge.GlobalIPv6Address") + + // verify ipv4 connectivity to the explicit --ipv address from third to fourth + _, _, err = dockerCmdWithError("exec", "fourth", "ping", "-c", "1", strings.TrimSpace(ip)) + c.Assert(err, check.IsNil) + // verify ipv6 connectivity to the explicit --ipv6 address from third to fourth + _, _, err = dockerCmdWithError("exec", "fourth", "ping6", "-c", "1", strings.TrimSpace(ip6)) + c.Assert(err, check.IsNil) + + // Inspect the v4 gateway to ensure the proper default GW was assigned + ip4gw := inspectField(c, "first", "NetworkSettings.Networks.dualstackbridge.Gateway") + c.Assert(strings.TrimSpace(ip4gw), check.Equals, "172.28.100.1") + // Inspect the v6 gateway to ensure the proper default GW was assigned + ip6gw := inspectField(c, "first", "NetworkSettings.Networks.dualstackbridge.IPv6Gateway") + c.Assert(strings.TrimSpace(ip6gw), check.Equals, "2001:db8:abc2::1") + + // Inspect the v4 gateway to ensure the proper explicitly assigned default GW was assigned + ip4gw = inspectField(c, "third", "NetworkSettings.Networks.dualstackbridge.Gateway") + c.Assert(strings.TrimSpace(ip4gw), check.Equals, "172.28.102.254") + // Inspect the v6 gateway to ensure the proper explicitly assigned default GW was assigned + ip6gw = inspectField(c, "third", "NetworkSettings.Networks.dualstackbridge.IPv6Gateway") + c.Assert(strings.TrimSpace(ip6gw), check.Equals, "2001:db8:abc4::254") +} + +func (s *DockerNetworkSuite) TestDockerNetworkIpvlanL2MultiSubnet(c *check.C) { + // create a dual stack multi-subnet Ipvlan L2 network and validate connectivity within the subnets, two on each subnet + testRequires(c, DaemonIsLinux, IPv6, ipvlanKernelSupport, NotUserNamespace, NotArm, ExperimentalDaemon) + dockerCmd(c, "network", "create", "--driver=ipvlan", "--ipv6", "--subnet=172.28.200.0/24", "--subnet=172.28.202.0/24", "--gateway=172.28.202.254", + "--subnet=2001:db8:abc8::/64", "--subnet=2001:db8:abc6::/64", "--gateway=2001:db8:abc6::254", "dualstackl2") + // Ensure the network was created + assertNwIsAvailable(c, "dualstackl2") + // start dual stack containers and verify the user specified --ip and --ip6 addresses on subnets 172.28.200.0/24 and 2001:db8:abc8::/64 + dockerCmd(c, "run", "-d", "--net=dualstackl2", "--name=first", "--ip", "172.28.200.20", "--ip6", "2001:db8:abc8::20", "busybox", "top") + dockerCmd(c, "run", "-d", "--net=dualstackl2", "--name=second", "--ip", "172.28.200.21", "--ip6", "2001:db8:abc8::21", "busybox", "top") + + // Inspect and store the v4 address from specified container on the network dualstackl2 + ip := inspectField(c, "first", "NetworkSettings.Networks.dualstackl2.IPAddress") + // Inspect and store the v6 address from specified container on the network dualstackl2 + ip6 := inspectField(c, "first", "NetworkSettings.Networks.dualstackl2.GlobalIPv6Address") + + // verify ipv4 connectivity to the explicit --ipv address second to first + _, _, err := dockerCmdWithError("exec", "second", "ping", "-c", "1", strings.TrimSpace(ip)) + c.Assert(err, check.IsNil) + // verify ipv6 connectivity to the explicit --ipv6 address second to first + _, _, err = dockerCmdWithError("exec", "second", "ping6", "-c", "1", strings.TrimSpace(ip6)) + c.Assert(err, check.IsNil) + + // start dual stack containers and verify the user specified --ip and --ip6 addresses on subnets 172.28.202.0/24 and 2001:db8:abc6::/64 + dockerCmd(c, "run", "-d", "--net=dualstackl2", "--name=third", "--ip", "172.28.202.20", "--ip6", "2001:db8:abc6::20", "busybox", "top") + dockerCmd(c, "run", "-d", "--net=dualstackl2", "--name=fourth", "--ip", "172.28.202.21", "--ip6", "2001:db8:abc6::21", "busybox", "top") + + // Inspect and store the v4 address from specified container on the network dualstackl2 + ip = inspectField(c, "third", "NetworkSettings.Networks.dualstackl2.IPAddress") + // Inspect and store the v6 address from specified container on the network dualstackl2 + ip6 = inspectField(c, "third", "NetworkSettings.Networks.dualstackl2.GlobalIPv6Address") + + // verify ipv4 connectivity to the explicit --ipv address from third to fourth + _, _, err = dockerCmdWithError("exec", "fourth", "ping", "-c", "1", strings.TrimSpace(ip)) + c.Assert(err, check.IsNil) + // verify ipv6 connectivity to the explicit --ipv6 address from third to fourth + _, _, err = dockerCmdWithError("exec", "fourth", "ping6", "-c", "1", strings.TrimSpace(ip6)) + c.Assert(err, check.IsNil) + + // Inspect the v4 gateway to ensure the proper default GW was assigned + ip4gw := inspectField(c, "first", "NetworkSettings.Networks.dualstackl2.Gateway") + c.Assert(strings.TrimSpace(ip4gw), check.Equals, "172.28.200.1") + // Inspect the v6 gateway to ensure the proper default GW was assigned + ip6gw := inspectField(c, "first", "NetworkSettings.Networks.dualstackl2.IPv6Gateway") + c.Assert(strings.TrimSpace(ip6gw), check.Equals, "2001:db8:abc8::1") + + // Inspect the v4 gateway to ensure the proper explicitly assigned default GW was assigned + ip4gw = inspectField(c, "third", "NetworkSettings.Networks.dualstackl2.Gateway") + c.Assert(strings.TrimSpace(ip4gw), check.Equals, "172.28.202.254") + // Inspect the v6 gateway to ensure the proper explicitly assigned default GW was assigned + ip6gw = inspectField(c, "third", "NetworkSettings.Networks.dualstackl2.IPv6Gateway") + c.Assert(strings.TrimSpace(ip6gw), check.Equals, "2001:db8:abc6::254") +} + +func (s *DockerNetworkSuite) TestDockerNetworkIpvlanL3MultiSubnet(c *check.C) { + // create a dual stack multi-subnet Ipvlan L3 network and validate connectivity between all four containers per L3 mode + testRequires(c, DaemonIsLinux, IPv6, ipvlanKernelSupport, NotUserNamespace, NotArm, IPv6, ExperimentalDaemon) + dockerCmd(c, "network", "create", "--driver=ipvlan", "--ipv6", "--subnet=172.28.10.0/24", "--subnet=172.28.12.0/24", "--gateway=172.28.12.254", + "--subnet=2001:db8:abc9::/64", "--subnet=2001:db8:abc7::/64", "--gateway=2001:db8:abc7::254", "-o", "ipvlan_mode=l3", "dualstackl3") + // Ensure the network was created + assertNwIsAvailable(c, "dualstackl3") + + // start dual stack containers and verify the user specified --ip and --ip6 addresses on subnets 172.28.10.0/24 and 2001:db8:abc9::/64 + dockerCmd(c, "run", "-d", "--net=dualstackl3", "--name=first", "--ip", "172.28.10.20", "--ip6", "2001:db8:abc9::20", "busybox", "top") + dockerCmd(c, "run", "-d", "--net=dualstackl3", "--name=second", "--ip", "172.28.10.21", "--ip6", "2001:db8:abc9::21", "busybox", "top") + + // Inspect and store the v4 address from specified container on the network dualstackl3 + ip := inspectField(c, "first", "NetworkSettings.Networks.dualstackl3.IPAddress") + // Inspect and store the v6 address from specified container on the network dualstackl3 + ip6 := inspectField(c, "first", "NetworkSettings.Networks.dualstackl3.GlobalIPv6Address") + + // verify ipv4 connectivity to the explicit --ipv address second to first + _, _, err := dockerCmdWithError("exec", "second", "ping", "-c", "1", strings.TrimSpace(ip)) + c.Assert(err, check.IsNil) + // verify ipv6 connectivity to the explicit --ipv6 address second to first + _, _, err = dockerCmdWithError("exec", "second", "ping6", "-c", "1", strings.TrimSpace(ip6)) + c.Assert(err, check.IsNil) + + // start dual stack containers and verify the user specified --ip and --ip6 addresses on subnets 172.28.12.0/24 and 2001:db8:abc7::/64 + dockerCmd(c, "run", "-d", "--net=dualstackl3", "--name=third", "--ip", "172.28.12.20", "--ip6", "2001:db8:abc7::20", "busybox", "top") + dockerCmd(c, "run", "-d", "--net=dualstackl3", "--name=fourth", "--ip", "172.28.12.21", "--ip6", "2001:db8:abc7::21", "busybox", "top") + + // Inspect and store the v4 address from specified container on the network dualstackl3 + ip = inspectField(c, "third", "NetworkSettings.Networks.dualstackl3.IPAddress") + // Inspect and store the v6 address from specified container on the network dualstackl3 + ip6 = inspectField(c, "third", "NetworkSettings.Networks.dualstackl3.GlobalIPv6Address") + + // verify ipv4 connectivity to the explicit --ipv address from third to fourth + _, _, err = dockerCmdWithError("exec", "fourth", "ping", "-c", "1", strings.TrimSpace(ip)) + c.Assert(err, check.IsNil) + // verify ipv6 connectivity to the explicit --ipv6 address from third to fourth + _, _, err = dockerCmdWithError("exec", "fourth", "ping6", "-c", "1", strings.TrimSpace(ip6)) + c.Assert(err, check.IsNil) + + // Inspect and store the v4 address from specified container on the network dualstackl3 + ip = inspectField(c, "second", "NetworkSettings.Networks.dualstackl3.IPAddress") + // Inspect and store the v6 address from specified container on the network dualstackl3 + ip6 = inspectField(c, "second", "NetworkSettings.Networks.dualstackl3.GlobalIPv6Address") + + // Verify connectivity across disparate subnets which is unique to L3 mode only + _, _, err = dockerCmdWithError("exec", "third", "ping", "-c", "1", strings.TrimSpace(ip)) + c.Assert(err, check.IsNil) + _, _, err = dockerCmdWithError("exec", "third", "ping6", "-c", "1", strings.TrimSpace(ip6)) + c.Assert(err, check.IsNil) + + // Inspect the v4 gateway to ensure no next hop is assigned in L3 mode + ip4gw := inspectField(c, "first", "NetworkSettings.Networks.dualstackl3.Gateway") + c.Assert(strings.TrimSpace(ip4gw), check.Equals, "") + // Inspect the v6 gateway to ensure the explicitly specified default GW is ignored per L3 mode enabled + ip6gw := inspectField(c, "third", "NetworkSettings.Networks.dualstackl3.IPv6Gateway") + c.Assert(strings.TrimSpace(ip6gw), check.Equals, "") +} + +func (s *DockerNetworkSuite) TestDockerNetworkIpvlanAddressing(c *check.C) { + // Ensure the default gateways, next-hops and default dev devices are properly set + testRequires(c, DaemonIsLinux, IPv6, ipvlanKernelSupport, NotUserNamespace, NotArm, ExperimentalDaemon) + dockerCmd(c, "network", "create", "--driver=macvlan", "--ipv6", "--subnet=172.28.130.0/24", + "--subnet=2001:db8:abca::/64", "--gateway=2001:db8:abca::254", "-o", "macvlan_mode=bridge", "dualstackbridge") + assertNwIsAvailable(c, "dualstackbridge") + dockerCmd(c, "run", "-d", "--net=dualstackbridge", "--name=first", "busybox", "top") + // Validate macvlan bridge mode defaults gateway sets the default IPAM next-hop inferred from the subnet + out, _, err := dockerCmdWithError("exec", "first", "ip", "route") + c.Assert(err, check.IsNil) + c.Assert(out, checker.Contains, "default via 172.28.130.1 dev eth0") + // Validate macvlan bridge mode sets the v6 gateway to the user specified default gateway/next-hop + out, _, err = dockerCmdWithError("exec", "first", "ip", "-6", "route") + c.Assert(err, check.IsNil) + c.Assert(out, checker.Contains, "default via 2001:db8:abca::254 dev eth0") + + // Verify ipvlan l2 mode sets the proper default gateway routes via netlink + // for either an explicitly set route by the user or inferred via default IPAM + dockerCmd(c, "network", "create", "--driver=ipvlan", "--ipv6", "--subnet=172.28.140.0/24", "--gateway=172.28.140.254", + "--subnet=2001:db8:abcb::/64", "-o", "ipvlan_mode=l2", "dualstackl2") + assertNwIsAvailable(c, "dualstackl2") + dockerCmd(c, "run", "-d", "--net=dualstackl2", "--name=second", "busybox", "top") + // Validate ipvlan l2 mode defaults gateway sets the default IPAM next-hop inferred from the subnet + out, _, err = dockerCmdWithError("exec", "second", "ip", "route") + c.Assert(err, check.IsNil) + c.Assert(out, checker.Contains, "default via 172.28.140.254 dev eth0") + // Validate ipvlan l2 mode sets the v6 gateway to the user specified default gateway/next-hop + out, _, err = dockerCmdWithError("exec", "second", "ip", "-6", "route") + c.Assert(err, check.IsNil) + c.Assert(out, checker.Contains, "default via 2001:db8:abcb::1 dev eth0") + + // Validate ipvlan l3 mode sets the v4 gateway to dev eth0 and disregards any explicit or inferred next-hops + dockerCmd(c, "network", "create", "--driver=ipvlan", "--ipv6", "--subnet=172.28.160.0/24", "--gateway=172.28.160.254", + "--subnet=2001:db8:abcd::/64", "--gateway=2001:db8:abcd::254", "-o", "ipvlan_mode=l3", "dualstackl3") + assertNwIsAvailable(c, "dualstackl3") + dockerCmd(c, "run", "-d", "--net=dualstackl3", "--name=third", "busybox", "top") + // Validate ipvlan l3 mode sets the v4 gateway to dev eth0 and disregards any explicit or inferred next-hops + out, _, err = dockerCmdWithError("exec", "third", "ip", "route") + c.Assert(err, check.IsNil) + c.Assert(out, checker.Contains, "default dev eth0") + // Validate ipvlan l3 mode sets the v6 gateway to dev eth0 and disregards any explicit or inferred next-hops + out, _, err = dockerCmdWithError("exec", "third", "ip", "-6", "route") + c.Assert(err, check.IsNil) + c.Assert(out, checker.Contains, "default dev eth0") +} + +func (s *DockerSuite) TestDockerNetworkMacVlanBridgeNilParent(c *check.C) { + // macvlan bridge mode - dummy parent interface is provisioned dynamically + testRequires(c, DaemonIsLinux, macvlanKernelSupport, NotUserNamespace, NotArm, ExperimentalDaemon) + dockerCmd(c, "network", "create", "--driver=macvlan", "dm-nil-parent") + assertNwIsAvailable(c, "dm-nil-parent") + + // start two containers on the same subnet + dockerCmd(c, "run", "-d", "--net=dm-nil-parent", "--name=first", "busybox", "top") + c.Assert(waitRun("first"), check.IsNil) + dockerCmd(c, "run", "-d", "--net=dm-nil-parent", "--name=second", "busybox", "top") + c.Assert(waitRun("second"), check.IsNil) + + // intra-network communications should succeed + _, _, err := dockerCmdWithError("exec", "second", "ping", "-c", "1", "first") + c.Assert(err, check.IsNil) +} + +func (s *DockerSuite) TestDockerNetworkMacVlanBridgeInternalMode(c *check.C) { + // macvlan bridge mode --internal containers can communicate inside the network but not externally + testRequires(c, DaemonIsLinux, macvlanKernelSupport, NotUserNamespace, NotArm, ExperimentalDaemon) + cli.DockerCmd(c, "network", "create", "--driver=macvlan", "--internal", "dm-internal") + assertNwIsAvailable(c, "dm-internal") + nr := getNetworkResource(c, "dm-internal") + c.Assert(nr.Internal, checker.True) + + // start two containers on the same subnet + cli.DockerCmd(c, "run", "-d", "--net=dm-internal", "--name=first", "busybox", "top") + c.Assert(waitRun("first"), check.IsNil) + cli.DockerCmd(c, "run", "-d", "--net=dm-internal", "--name=second", "busybox", "top") + c.Assert(waitRun("second"), check.IsNil) + + // access outside of the network should fail + result := cli.Docker(cli.Args("exec", "first", "ping", "-c", "1", "-w", "1", "8.8.8.8"), cli.WithTimeout(time.Second)) + c.Assert(result, icmd.Matches, icmd.Expected{Timeout: true}) + + // intra-network communications should succeed + cli.DockerCmd(c, "exec", "second", "ping", "-c", "1", "first") +} + +func (s *DockerSuite) TestDockerNetworkIpvlanL2NilParent(c *check.C) { + // ipvlan l2 mode - dummy parent interface is provisioned dynamically + testRequires(c, DaemonIsLinux, ipvlanKernelSupport, NotUserNamespace, NotArm, ExperimentalDaemon) + dockerCmd(c, "network", "create", "--driver=ipvlan", "di-nil-parent") + assertNwIsAvailable(c, "di-nil-parent") + + // start two containers on the same subnet + dockerCmd(c, "run", "-d", "--net=di-nil-parent", "--name=first", "busybox", "top") + c.Assert(waitRun("first"), check.IsNil) + dockerCmd(c, "run", "-d", "--net=di-nil-parent", "--name=second", "busybox", "top") + c.Assert(waitRun("second"), check.IsNil) + + // intra-network communications should succeed + _, _, err := dockerCmdWithError("exec", "second", "ping", "-c", "1", "first") + c.Assert(err, check.IsNil) +} + +func (s *DockerSuite) TestDockerNetworkIpvlanL2InternalMode(c *check.C) { + // ipvlan l2 mode --internal containers can communicate inside the network but not externally + testRequires(c, DaemonIsLinux, ipvlanKernelSupport, NotUserNamespace, NotArm, ExperimentalDaemon) + cli.DockerCmd(c, "network", "create", "--driver=ipvlan", "--internal", "di-internal") + assertNwIsAvailable(c, "di-internal") + nr := getNetworkResource(c, "di-internal") + c.Assert(nr.Internal, checker.True) + + // start two containers on the same subnet + cli.DockerCmd(c, "run", "-d", "--net=di-internal", "--name=first", "busybox", "top") + c.Assert(waitRun("first"), check.IsNil) + cli.DockerCmd(c, "run", "-d", "--net=di-internal", "--name=second", "busybox", "top") + c.Assert(waitRun("second"), check.IsNil) + + // access outside of the network should fail + result := cli.Docker(cli.Args("exec", "first", "ping", "-c", "1", "-w", "1", "8.8.8.8"), cli.WithTimeout(time.Second)) + c.Assert(result, icmd.Matches, icmd.Expected{Timeout: true}) + // intra-network communications should succeed + cli.DockerCmd(c, "exec", "second", "ping", "-c", "1", "first") +} + +func (s *DockerSuite) TestDockerNetworkIpvlanL3NilParent(c *check.C) { + // ipvlan l3 mode - dummy parent interface is provisioned dynamically + testRequires(c, DaemonIsLinux, ipvlanKernelSupport, NotUserNamespace, NotArm, ExperimentalDaemon) + dockerCmd(c, "network", "create", "--driver=ipvlan", "--subnet=172.28.230.0/24", + "--subnet=172.28.220.0/24", "-o", "ipvlan_mode=l3", "di-nil-parent-l3") + assertNwIsAvailable(c, "di-nil-parent-l3") + + // start two containers on separate subnets + dockerCmd(c, "run", "-d", "--ip=172.28.220.10", "--net=di-nil-parent-l3", "--name=first", "busybox", "top") + c.Assert(waitRun("first"), check.IsNil) + dockerCmd(c, "run", "-d", "--ip=172.28.230.10", "--net=di-nil-parent-l3", "--name=second", "busybox", "top") + c.Assert(waitRun("second"), check.IsNil) + + // intra-network communications should succeed + _, _, err := dockerCmdWithError("exec", "second", "ping", "-c", "1", "first") + c.Assert(err, check.IsNil) +} + +func (s *DockerSuite) TestDockerNetworkIpvlanL3InternalMode(c *check.C) { + // ipvlan l3 mode --internal containers can communicate inside the network but not externally + testRequires(c, DaemonIsLinux, ipvlanKernelSupport, NotUserNamespace, NotArm, ExperimentalDaemon) + cli.DockerCmd(c, "network", "create", "--driver=ipvlan", "--subnet=172.28.230.0/24", + "--subnet=172.28.220.0/24", "-o", "ipvlan_mode=l3", "--internal", "di-internal-l3") + assertNwIsAvailable(c, "di-internal-l3") + nr := getNetworkResource(c, "di-internal-l3") + c.Assert(nr.Internal, checker.True) + + // start two containers on separate subnets + cli.DockerCmd(c, "run", "-d", "--ip=172.28.220.10", "--net=di-internal-l3", "--name=first", "busybox", "top") + c.Assert(waitRun("first"), check.IsNil) + cli.DockerCmd(c, "run", "-d", "--ip=172.28.230.10", "--net=di-internal-l3", "--name=second", "busybox", "top") + c.Assert(waitRun("second"), check.IsNil) + + // access outside of the network should fail + result := cli.Docker(cli.Args("exec", "first", "ping", "-c", "1", "-w", "1", "8.8.8.8"), cli.WithTimeout(time.Second)) + c.Assert(result, icmd.Matches, icmd.Expected{Timeout: true}) + // intra-network communications should succeed + cli.DockerCmd(c, "exec", "second", "ping", "-c", "1", "first") +} + +func (s *DockerSuite) TestDockerNetworkMacVlanExistingParent(c *check.C) { + // macvlan bridge mode - empty parent interface containers can reach each other internally but not externally + testRequires(c, DaemonIsLinux, macvlanKernelSupport, NotUserNamespace, NotArm, ExperimentalDaemon) + netName := "dm-parent-exists" + createMasterDummy(c, "dm-dummy0") + //out, err := createVlanInterface(c, "dm-parent", "dm-slave", "macvlan", "bridge") + // create a network using an existing parent interface + dockerCmd(c, "network", "create", "--driver=macvlan", "-o", "parent=dm-dummy0", netName) + assertNwIsAvailable(c, netName) + // delete the network while preserving the parent link + dockerCmd(c, "network", "rm", netName) + assertNwNotAvailable(c, netName) + // verify the network delete did not delete the predefined link + linkExists(c, "dm-dummy0") + deleteInterface(c, "dm-dummy0") +} + +func (s *DockerSuite) TestDockerNetworkMacVlanSubinterface(c *check.C) { + // macvlan bridge mode - empty parent interface containers can reach each other internally but not externally + testRequires(c, DaemonIsLinux, macvlanKernelSupport, NotUserNamespace, NotArm, ExperimentalDaemon) + netName := "dm-subinterface" + createMasterDummy(c, "dm-dummy0") + createVlanInterface(c, "dm-dummy0", "dm-dummy0.20", "20") + // create a network using an existing parent interface + dockerCmd(c, "network", "create", "--driver=macvlan", "-o", "parent=dm-dummy0.20", netName) + assertNwIsAvailable(c, netName) + + // start containers on 802.1q tagged '-o parent' sub-interface + dockerCmd(c, "run", "-d", "--net=dm-subinterface", "--name=first", "busybox", "top") + c.Assert(waitRun("first"), check.IsNil) + dockerCmd(c, "run", "-d", "--net=dm-subinterface", "--name=second", "busybox", "top") + c.Assert(waitRun("second"), check.IsNil) + // verify containers can communicate + _, _, err := dockerCmdWithError("exec", "second", "ping", "-c", "1", "first") + c.Assert(err, check.IsNil) + + // remove the containers + dockerCmd(c, "rm", "-f", "first") + dockerCmd(c, "rm", "-f", "second") + // delete the network while preserving the parent link + dockerCmd(c, "network", "rm", netName) + assertNwNotAvailable(c, netName) + // verify the network delete did not delete the predefined sub-interface + linkExists(c, "dm-dummy0.20") + // delete the parent interface which also collects the slave + deleteInterface(c, "dm-dummy0") +} + +func createMasterDummy(c *check.C, master string) { + // ip link add type dummy + icmd.RunCommand("ip", "link", "add", master, "type", "dummy").Assert(c, icmd.Success) + icmd.RunCommand("ip", "link", "set", master, "up").Assert(c, icmd.Success) +} + +func createVlanInterface(c *check.C, master, slave, id string) { + // ip link add link name . type vlan id + icmd.RunCommand("ip", "link", "add", "link", master, "name", slave, "type", "vlan", "id", id).Assert(c, icmd.Success) + // ip link set up + icmd.RunCommand("ip", "link", "set", slave, "up").Assert(c, icmd.Success) +} + +func linkExists(c *check.C, master string) { + // verify the specified link exists, ip link show + icmd.RunCommand("ip", "link", "show", master).Assert(c, icmd.Success) +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_hub_pull_suite_test.go b/vendor/github.com/moby/moby/integration-cli/docker_hub_pull_suite_test.go new file mode 100644 index 000000000..263372087 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_hub_pull_suite_test.go @@ -0,0 +1,91 @@ +package main + +import ( + "os/exec" + "runtime" + "strings" + + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/daemon" + "github.com/go-check/check" +) + +func init() { + // FIXME. Temporarily turning this off for Windows as GH16039 was breaking + // Windows to Linux CI @icecrime + if runtime.GOOS != "windows" { + check.Suite(newDockerHubPullSuite()) + } +} + +// DockerHubPullSuite provides an isolated daemon that doesn't have all the +// images that are baked into our 'global' test environment daemon (e.g., +// busybox, httpserver, ...). +// +// We use it for push/pull tests where we want to start fresh, and measure the +// relative impact of each individual operation. As part of this suite, all +// images are removed after each test. +type DockerHubPullSuite struct { + d *daemon.Daemon + ds *DockerSuite +} + +// newDockerHubPullSuite returns a new instance of a DockerHubPullSuite. +func newDockerHubPullSuite() *DockerHubPullSuite { + return &DockerHubPullSuite{ + ds: &DockerSuite{}, + } +} + +// SetUpSuite starts the suite daemon. +func (s *DockerHubPullSuite) SetUpSuite(c *check.C) { + testRequires(c, DaemonIsLinux) + s.d = daemon.New(c, dockerBinary, dockerdBinary, daemon.Config{ + Experimental: testEnv.ExperimentalDaemon(), + }) + s.d.Start(c) +} + +// TearDownSuite stops the suite daemon. +func (s *DockerHubPullSuite) TearDownSuite(c *check.C) { + if s.d != nil { + s.d.Stop(c) + } +} + +// SetUpTest declares that all tests of this suite require network. +func (s *DockerHubPullSuite) SetUpTest(c *check.C) { + testRequires(c, Network) +} + +// TearDownTest removes all images from the suite daemon. +func (s *DockerHubPullSuite) TearDownTest(c *check.C) { + out := s.Cmd(c, "images", "-aq") + images := strings.Split(out, "\n") + images = append([]string{"rmi", "-f"}, images...) + s.d.Cmd(images...) + s.ds.TearDownTest(c) +} + +// Cmd executes a command against the suite daemon and returns the combined +// output. The function fails the test when the command returns an error. +func (s *DockerHubPullSuite) Cmd(c *check.C, name string, arg ...string) string { + out, err := s.CmdWithError(name, arg...) + c.Assert(err, checker.IsNil, check.Commentf("%q failed with errors: %s, %v", strings.Join(arg, " "), out, err)) + return out +} + +// CmdWithError executes a command against the suite daemon and returns the +// combined output as well as any error. +func (s *DockerHubPullSuite) CmdWithError(name string, arg ...string) (string, error) { + c := s.MakeCmd(name, arg...) + b, err := c.CombinedOutput() + return string(b), err +} + +// MakeCmd returns an exec.Cmd command to run against the suite daemon. +func (s *DockerHubPullSuite) MakeCmd(name string, arg ...string) *exec.Cmd { + args := []string{"--host", s.d.Sock(), name} + args = append(args, arg...) + return exec.Command(dockerBinary, args...) +} diff --git a/vendor/github.com/moby/moby/integration-cli/docker_utils_test.go b/vendor/github.com/moby/moby/integration-cli/docker_utils_test.go new file mode 100644 index 000000000..0c0a16483 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/docker_utils_test.go @@ -0,0 +1,502 @@ +package main + +import ( + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "net/http" + "os" + "path" + "path/filepath" + "strconv" + "strings" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/cli" + "github.com/docker/docker/integration-cli/daemon" + "github.com/docker/docker/integration-cli/registry" + "github.com/docker/docker/integration-cli/request" + icmd "github.com/docker/docker/pkg/testutil/cmd" + "github.com/go-check/check" +) + +// Deprecated +func daemonHost() string { + return request.DaemonHost() +} + +func deleteImages(images ...string) error { + args := []string{dockerBinary, "rmi", "-f"} + return icmd.RunCmd(icmd.Cmd{Command: append(args, images...)}).Error +} + +// Deprecated: use cli.Docker or cli.DockerCmd +func dockerCmdWithError(args ...string) (string, int, error) { + result := cli.Docker(cli.Args(args...)) + if result.Error != nil { + return result.Combined(), result.ExitCode, result.Compare(icmd.Success) + } + return result.Combined(), result.ExitCode, result.Error +} + +// Deprecated: use cli.Docker or cli.DockerCmd +func dockerCmd(c *check.C, args ...string) (string, int) { + result := cli.DockerCmd(c, args...) + return result.Combined(), result.ExitCode +} + +// Deprecated: use cli.Docker or cli.DockerCmd +func dockerCmdWithResult(args ...string) *icmd.Result { + return cli.Docker(cli.Args(args...)) +} + +func findContainerIP(c *check.C, id string, network string) string { + out, _ := dockerCmd(c, "inspect", fmt.Sprintf("--format='{{ .NetworkSettings.Networks.%s.IPAddress }}'", network), id) + return strings.Trim(out, " \r\n'") +} + +func getContainerCount(c *check.C) int { + const containers = "Containers:" + + result := icmd.RunCommand(dockerBinary, "info") + result.Assert(c, icmd.Success) + + lines := strings.Split(result.Combined(), "\n") + for _, line := range lines { + if strings.Contains(line, containers) { + output := strings.TrimSpace(line) + output = strings.TrimLeft(output, containers) + output = strings.Trim(output, " ") + containerCount, err := strconv.Atoi(output) + c.Assert(err, checker.IsNil) + return containerCount + } + } + return 0 +} + +func inspectFieldAndUnmarshall(c *check.C, name, field string, output interface{}) { + str := inspectFieldJSON(c, name, field) + err := json.Unmarshal([]byte(str), output) + if c != nil { + c.Assert(err, check.IsNil, check.Commentf("failed to unmarshal: %v", err)) + } +} + +// Deprecated: use cli.Inspect +func inspectFilter(name, filter string) (string, error) { + format := fmt.Sprintf("{{%s}}", filter) + result := icmd.RunCommand(dockerBinary, "inspect", "-f", format, name) + if result.Error != nil || result.ExitCode != 0 { + return "", fmt.Errorf("failed to inspect %s: %s", name, result.Combined()) + } + return strings.TrimSpace(result.Combined()), nil +} + +// Deprecated: use cli.Inspect +func inspectFieldWithError(name, field string) (string, error) { + return inspectFilter(name, fmt.Sprintf(".%s", field)) +} + +// Deprecated: use cli.Inspect +func inspectField(c *check.C, name, field string) string { + out, err := inspectFilter(name, fmt.Sprintf(".%s", field)) + if c != nil { + c.Assert(err, check.IsNil) + } + return out +} + +// Deprecated: use cli.Inspect +func inspectFieldJSON(c *check.C, name, field string) string { + out, err := inspectFilter(name, fmt.Sprintf("json .%s", field)) + if c != nil { + c.Assert(err, check.IsNil) + } + return out +} + +// Deprecated: use cli.Inspect +func inspectFieldMap(c *check.C, name, path, field string) string { + out, err := inspectFilter(name, fmt.Sprintf("index .%s %q", path, field)) + if c != nil { + c.Assert(err, check.IsNil) + } + return out +} + +// Deprecated: use cli.Inspect +func inspectMountSourceField(name, destination string) (string, error) { + m, err := inspectMountPoint(name, destination) + if err != nil { + return "", err + } + return m.Source, nil +} + +// Deprecated: use cli.Inspect +func inspectMountPoint(name, destination string) (types.MountPoint, error) { + out, err := inspectFilter(name, "json .Mounts") + if err != nil { + return types.MountPoint{}, err + } + + return inspectMountPointJSON(out, destination) +} + +var errMountNotFound = errors.New("mount point not found") + +// Deprecated: use cli.Inspect +func inspectMountPointJSON(j, destination string) (types.MountPoint, error) { + var mp []types.MountPoint + if err := json.Unmarshal([]byte(j), &mp); err != nil { + return types.MountPoint{}, err + } + + var m *types.MountPoint + for _, c := range mp { + if c.Destination == destination { + m = &c + break + } + } + + if m == nil { + return types.MountPoint{}, errMountNotFound + } + + return *m, nil +} + +// Deprecated: use cli.Inspect +func inspectImage(c *check.C, name, filter string) string { + args := []string{"inspect", "--type", "image"} + if filter != "" { + format := fmt.Sprintf("{{%s}}", filter) + args = append(args, "-f", format) + } + args = append(args, name) + result := icmd.RunCommand(dockerBinary, args...) + result.Assert(c, icmd.Success) + return strings.TrimSpace(result.Combined()) +} + +func getIDByName(c *check.C, name string) string { + id, err := inspectFieldWithError(name, "Id") + c.Assert(err, checker.IsNil) + return id +} + +// Deprecated: use cli.Build +func buildImageSuccessfully(c *check.C, name string, cmdOperators ...cli.CmdOperator) { + buildImage(name, cmdOperators...).Assert(c, icmd.Success) +} + +// Deprecated: use cli.Build +func buildImage(name string, cmdOperators ...cli.CmdOperator) *icmd.Result { + return cli.Docker(cli.Build(name), cmdOperators...) +} + +// Deprecated: use trustedcmd +func trustedBuild(cmd *icmd.Cmd) func() { + trustedCmd(cmd) + return nil +} + +// Write `content` to the file at path `dst`, creating it if necessary, +// as well as any missing directories. +// The file is truncated if it already exists. +// Fail the test when error occurs. +func writeFile(dst, content string, c *check.C) { + // Create subdirectories if necessary + c.Assert(os.MkdirAll(path.Dir(dst), 0700), check.IsNil) + f, err := os.OpenFile(dst, os.O_CREATE|os.O_RDWR|os.O_TRUNC, 0700) + c.Assert(err, check.IsNil) + defer f.Close() + // Write content (truncate if it exists) + _, err = io.Copy(f, strings.NewReader(content)) + c.Assert(err, check.IsNil) +} + +// Return the contents of file at path `src`. +// Fail the test when error occurs. +func readFile(src string, c *check.C) (content string) { + data, err := ioutil.ReadFile(src) + c.Assert(err, check.IsNil) + + return string(data) +} + +func containerStorageFile(containerID, basename string) string { + return filepath.Join(testEnv.ContainerStoragePath(), containerID, basename) +} + +// docker commands that use this function must be run with the '-d' switch. +func runCommandAndReadContainerFile(c *check.C, filename string, command string, args ...string) []byte { + result := icmd.RunCommand(command, args...) + result.Assert(c, icmd.Success) + contID := strings.TrimSpace(result.Combined()) + if err := waitRun(contID); err != nil { + c.Fatalf("%v: %q", contID, err) + } + return readContainerFile(c, contID, filename) +} + +func readContainerFile(c *check.C, containerID, filename string) []byte { + f, err := os.Open(containerStorageFile(containerID, filename)) + c.Assert(err, checker.IsNil) + defer f.Close() + + content, err := ioutil.ReadAll(f) + c.Assert(err, checker.IsNil) + return content +} + +func readContainerFileWithExec(c *check.C, containerID, filename string) []byte { + result := icmd.RunCommand(dockerBinary, "exec", containerID, "cat", filename) + result.Assert(c, icmd.Success) + return []byte(result.Combined()) +} + +// daemonTime provides the current time on the daemon host +func daemonTime(c *check.C) time.Time { + if testEnv.LocalDaemon() { + return time.Now() + } + + status, body, err := request.SockRequest("GET", "/info", nil, daemonHost()) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusOK) + + type infoJSON struct { + SystemTime string + } + var info infoJSON + err = json.Unmarshal(body, &info) + c.Assert(err, check.IsNil, check.Commentf("unable to unmarshal GET /info response")) + + dt, err := time.Parse(time.RFC3339Nano, info.SystemTime) + c.Assert(err, check.IsNil, check.Commentf("invalid time format in GET /info response")) + return dt +} + +// daemonUnixTime returns the current time on the daemon host with nanoseconds precision. +// It return the time formatted how the client sends timestamps to the server. +func daemonUnixTime(c *check.C) string { + return parseEventTime(daemonTime(c)) +} + +func parseEventTime(t time.Time) string { + return fmt.Sprintf("%d.%09d", t.Unix(), int64(t.Nanosecond())) +} + +func setupRegistry(c *check.C, schema1 bool, auth, tokenURL string) *registry.V2 { + reg, err := registry.NewV2(schema1, auth, tokenURL, privateRegistryURL) + c.Assert(err, check.IsNil) + + // Wait for registry to be ready to serve requests. + for i := 0; i != 50; i++ { + if err = reg.Ping(); err == nil { + break + } + time.Sleep(100 * time.Millisecond) + } + + c.Assert(err, check.IsNil, check.Commentf("Timeout waiting for test registry to become available: %v", err)) + return reg +} + +func setupNotary(c *check.C) *testNotary { + ts, err := newTestNotary(c) + c.Assert(err, check.IsNil) + + return ts +} + +// appendBaseEnv appends the minimum set of environment variables to exec the +// docker cli binary for testing with correct configuration to the given env +// list. +func appendBaseEnv(isTLS bool, env ...string) []string { + preserveList := []string{ + // preserve remote test host + "DOCKER_HOST", + + // windows: requires preserving SystemRoot, otherwise dial tcp fails + // with "GetAddrInfoW: A non-recoverable error occurred during a database lookup." + "SystemRoot", + + // testing help text requires the $PATH to dockerd is set + "PATH", + } + if isTLS { + preserveList = append(preserveList, "DOCKER_TLS_VERIFY", "DOCKER_CERT_PATH") + } + + for _, key := range preserveList { + if val := os.Getenv(key); val != "" { + env = append(env, fmt.Sprintf("%s=%s", key, val)) + } + } + return env +} + +func createTmpFile(c *check.C, content string) string { + f, err := ioutil.TempFile("", "testfile") + c.Assert(err, check.IsNil) + + filename := f.Name() + + err = ioutil.WriteFile(filename, []byte(content), 0644) + c.Assert(err, check.IsNil) + + return filename +} + +// waitRun will wait for the specified container to be running, maximum 5 seconds. +// Deprecated: use cli.WaitFor +func waitRun(contID string) error { + return waitInspect(contID, "{{.State.Running}}", "true", 5*time.Second) +} + +// waitInspect will wait for the specified container to have the specified string +// in the inspect output. It will wait until the specified timeout (in seconds) +// is reached. +// Deprecated: use cli.WaitFor +func waitInspect(name, expr, expected string, timeout time.Duration) error { + return waitInspectWithArgs(name, expr, expected, timeout) +} + +// Deprecated: use cli.WaitFor +func waitInspectWithArgs(name, expr, expected string, timeout time.Duration, arg ...string) error { + return daemon.WaitInspectWithArgs(dockerBinary, name, expr, expected, timeout, arg...) +} + +func getInspectBody(c *check.C, version, id string) []byte { + endpoint := fmt.Sprintf("/%s/containers/%s/json", version, id) + status, body, err := request.SockRequest("GET", endpoint, nil, daemonHost()) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusOK) + return body +} + +// Run a long running idle task in a background container using the +// system-specific default image and command. +func runSleepingContainer(c *check.C, extraArgs ...string) string { + return runSleepingContainerInImage(c, defaultSleepImage, extraArgs...) +} + +// Run a long running idle task in a background container using the specified +// image and the system-specific command. +func runSleepingContainerInImage(c *check.C, image string, extraArgs ...string) string { + args := []string{"run", "-d"} + args = append(args, extraArgs...) + args = append(args, image) + args = append(args, sleepCommandForDaemonPlatform()...) + return strings.TrimSpace(cli.DockerCmd(c, args...).Combined()) +} + +// minimalBaseImage returns the name of the minimal base image for the current +// daemon platform. +func minimalBaseImage() string { + return testEnv.MinimalBaseImage() +} + +func getGoroutineNumber() (int, error) { + i := struct { + NGoroutines int + }{} + status, b, err := request.SockRequest("GET", "/info", nil, daemonHost()) + if err != nil { + return 0, err + } + if status != http.StatusOK { + return 0, fmt.Errorf("http status code: %d", status) + } + if err := json.Unmarshal(b, &i); err != nil { + return 0, err + } + return i.NGoroutines, nil +} + +func waitForGoroutines(expected int) error { + t := time.After(30 * time.Second) + for { + select { + case <-t: + n, err := getGoroutineNumber() + if err != nil { + return err + } + if n > expected { + return fmt.Errorf("leaked goroutines: expected less than or equal to %d, got: %d", expected, n) + } + default: + n, err := getGoroutineNumber() + if err != nil { + return err + } + if n <= expected { + return nil + } + time.Sleep(200 * time.Millisecond) + } + } +} + +// getErrorMessage returns the error message from an error API response +func getErrorMessage(c *check.C, body []byte) string { + var resp types.ErrorResponse + c.Assert(json.Unmarshal(body, &resp), check.IsNil) + return strings.TrimSpace(resp.Message) +} + +func waitAndAssert(c *check.C, timeout time.Duration, f checkF, checker check.Checker, args ...interface{}) { + after := time.After(timeout) + for { + v, comment := f(c) + assert, _ := checker.Check(append([]interface{}{v}, args...), checker.Info().Params) + select { + case <-after: + assert = true + default: + } + if assert { + if comment != nil { + args = append(args, comment) + } + c.Assert(v, checker, args...) + return + } + time.Sleep(100 * time.Millisecond) + } +} + +type checkF func(*check.C) (interface{}, check.CommentInterface) +type reducer func(...interface{}) interface{} + +func reducedCheck(r reducer, funcs ...checkF) checkF { + return func(c *check.C) (interface{}, check.CommentInterface) { + var values []interface{} + var comments []string + for _, f := range funcs { + v, comment := f(c) + values = append(values, v) + if comment != nil { + comments = append(comments, comment.CheckCommentString()) + } + } + return r(values...), check.Commentf("%v", strings.Join(comments, ", ")) + } +} + +func sumAsIntegers(vals ...interface{}) interface{} { + var s int + for _, v := range vals { + s += v.(int) + } + return s +} diff --git a/vendor/github.com/moby/moby/integration-cli/environment/clean.go b/vendor/github.com/moby/moby/integration-cli/environment/clean.go new file mode 100644 index 000000000..809baa7b5 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/environment/clean.go @@ -0,0 +1,216 @@ +package environment + +import ( + "encoding/json" + "fmt" + "net/http" + "regexp" + "strings" + + "github.com/docker/docker/api/types" + volumetypes "github.com/docker/docker/api/types/volume" + "github.com/docker/docker/integration-cli/request" + icmd "github.com/docker/docker/pkg/testutil/cmd" +) + +type testingT interface { + logT + Fatalf(string, ...interface{}) +} + +type logT interface { + Logf(string, ...interface{}) +} + +// Clean the environment, preserving protected objects (images, containers, ...) +// and removing everything else. It's meant to run after any tests so that they don't +// depend on each others. +func (e *Execution) Clean(t testingT, dockerBinary string) { + if (e.DaemonPlatform() != "windows") || (e.DaemonPlatform() == "windows" && e.Isolation() == "hyperv") { + unpauseAllContainers(t, dockerBinary) + } + deleteAllContainers(t, dockerBinary) + deleteAllImages(t, dockerBinary, e.protectedElements.images) + deleteAllVolumes(t, dockerBinary) + deleteAllNetworks(t, dockerBinary, e.DaemonPlatform()) + if e.DaemonPlatform() == "linux" { + deleteAllPlugins(t, dockerBinary) + } +} + +func unpauseAllContainers(t testingT, dockerBinary string) { + containers := getPausedContainers(t, dockerBinary) + if len(containers) > 0 { + icmd.RunCommand(dockerBinary, append([]string{"unpause"}, containers...)...).Assert(t, icmd.Success) + } +} + +func getPausedContainers(t testingT, dockerBinary string) []string { + result := icmd.RunCommand(dockerBinary, "ps", "-f", "status=paused", "-q", "-a") + result.Assert(t, icmd.Success) + return strings.Fields(result.Combined()) +} + +var alreadyExists = regexp.MustCompile(`Error response from daemon: removal of container (\w+) is already in progress`) + +func deleteAllContainers(t testingT, dockerBinary string) { + containers := getAllContainers(t, dockerBinary) + if len(containers) > 0 { + result := icmd.RunCommand(dockerBinary, append([]string{"rm", "-fv"}, containers...)...) + if result.Error != nil { + // If the error is "No such container: ..." this means the container doesn't exists anymore, + // or if it is "... removal of container ... is already in progress" it will be removed eventually. + // We can safely ignore those. + if strings.Contains(result.Stderr(), "No such container") || alreadyExists.MatchString(result.Stderr()) { + return + } + t.Fatalf("error removing containers %v : %v (%s)", containers, result.Error, result.Combined()) + } + } +} + +func getAllContainers(t testingT, dockerBinary string) []string { + result := icmd.RunCommand(dockerBinary, "ps", "-q", "-a") + result.Assert(t, icmd.Success) + return strings.Fields(result.Combined()) +} + +func deleteAllImages(t testingT, dockerBinary string, protectedImages map[string]struct{}) { + result := icmd.RunCommand(dockerBinary, "images", "--digests") + result.Assert(t, icmd.Success) + lines := strings.Split(string(result.Combined()), "\n")[1:] + imgMap := map[string]struct{}{} + for _, l := range lines { + if l == "" { + continue + } + fields := strings.Fields(l) + imgTag := fields[0] + ":" + fields[1] + if _, ok := protectedImages[imgTag]; !ok { + if fields[0] == "" || fields[1] == "" { + if fields[2] != "" { + imgMap[fields[0]+"@"+fields[2]] = struct{}{} + } else { + imgMap[fields[3]] = struct{}{} + } + // continue + } else { + imgMap[imgTag] = struct{}{} + } + } + } + if len(imgMap) != 0 { + imgs := make([]string, 0, len(imgMap)) + for k := range imgMap { + imgs = append(imgs, k) + } + icmd.RunCommand(dockerBinary, append([]string{"rmi", "-f"}, imgs...)...).Assert(t, icmd.Success) + } +} + +func deleteAllVolumes(t testingT, dockerBinary string) { + volumes, err := getAllVolumes() + if err != nil { + t.Fatalf("%v", err) + } + var errs []string + for _, v := range volumes { + status, b, err := request.SockRequest("DELETE", "/volumes/"+v.Name, nil, request.DaemonHost()) + if err != nil { + errs = append(errs, err.Error()) + continue + } + if status != http.StatusNoContent { + errs = append(errs, fmt.Sprintf("error deleting volume %s: %s", v.Name, string(b))) + } + } + if len(errs) > 0 { + t.Fatalf("%v", strings.Join(errs, "\n")) + } +} + +func getAllVolumes() ([]*types.Volume, error) { + var volumes volumetypes.VolumesListOKBody + _, b, err := request.SockRequest("GET", "/volumes", nil, request.DaemonHost()) + if err != nil { + return nil, err + } + if err := json.Unmarshal(b, &volumes); err != nil { + return nil, err + } + return volumes.Volumes, nil +} + +func deleteAllNetworks(t testingT, dockerBinary string, daemonPlatform string) { + networks, err := getAllNetworks() + if err != nil { + t.Fatalf("%v", err) + } + var errs []string + for _, n := range networks { + if n.Name == "bridge" || n.Name == "none" || n.Name == "host" { + continue + } + if daemonPlatform == "windows" && strings.ToLower(n.Name) == "nat" { + // nat is a pre-defined network on Windows and cannot be removed + continue + } + status, b, err := request.SockRequest("DELETE", "/networks/"+n.Name, nil, request.DaemonHost()) + if err != nil { + errs = append(errs, err.Error()) + continue + } + if status != http.StatusNoContent { + errs = append(errs, fmt.Sprintf("error deleting network %s: %s", n.Name, string(b))) + } + } + if len(errs) > 0 { + t.Fatalf("%v", strings.Join(errs, "\n")) + } +} + +func getAllNetworks() ([]types.NetworkResource, error) { + var networks []types.NetworkResource + _, b, err := request.SockRequest("GET", "/networks", nil, request.DaemonHost()) + if err != nil { + return nil, err + } + if err := json.Unmarshal(b, &networks); err != nil { + return nil, err + } + return networks, nil +} + +func deleteAllPlugins(t testingT, dockerBinary string) { + plugins, err := getAllPlugins() + if err != nil { + t.Fatalf("%v", err) + } + var errs []string + for _, p := range plugins { + pluginName := p.Name + status, b, err := request.SockRequest("DELETE", "/plugins/"+pluginName+"?force=1", nil, request.DaemonHost()) + if err != nil { + errs = append(errs, err.Error()) + continue + } + if status != http.StatusOK { + errs = append(errs, fmt.Sprintf("error deleting plugin %s: %s", p.Name, string(b))) + } + } + if len(errs) > 0 { + t.Fatalf("%v", strings.Join(errs, "\n")) + } +} + +func getAllPlugins() (types.PluginsListResponse, error) { + var plugins types.PluginsListResponse + _, b, err := request.SockRequest("GET", "/plugins", nil, request.DaemonHost()) + if err != nil { + return nil, err + } + if err := json.Unmarshal(b, &plugins); err != nil { + return nil, err + } + return plugins, nil +} diff --git a/vendor/github.com/moby/moby/integration-cli/environment/environment.go b/vendor/github.com/moby/moby/integration-cli/environment/environment.go new file mode 100644 index 000000000..a8a104590 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/environment/environment.go @@ -0,0 +1,229 @@ +package environment + +import ( + "fmt" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "strconv" + "strings" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/client" + "github.com/docker/docker/opts" + "golang.org/x/net/context" +) + +var ( + // DefaultClientBinary is the name of the docker binary + DefaultClientBinary = os.Getenv("TEST_CLIENT_BINARY") +) + +func init() { + if DefaultClientBinary == "" { + // TODO: to be removed once we no longer depend on the docker cli for integration tests + //panic("TEST_CLIENT_BINARY must be set") + DefaultClientBinary = "docker" + } +} + +// Execution holds informations about the test execution environment. +type Execution struct { + daemonPlatform string + localDaemon bool + experimentalDaemon bool + daemonStorageDriver string + isolation container.Isolation + daemonPid int + daemonKernelVersion string + // For a local daemon on Linux, these values will be used for testing + // user namespace support as the standard graph path(s) will be + // appended with the root remapped uid.gid prefix + dockerBasePath string + volumesConfigPath string + containerStoragePath string + // baseImage is the name of the base image for testing + // Environment variable WINDOWS_BASE_IMAGE can override this + baseImage string + dockerBinary string + + protectedElements protectedElements +} + +// New creates a new Execution struct +func New() (*Execution, error) { + localDaemon := true + // Deterministically working out the environment in which CI is running + // to evaluate whether the daemon is local or remote is not possible through + // a build tag. + // + // For example Windows to Linux CI under Jenkins tests the 64-bit + // Windows binary build with the daemon build tag, but calls a remote + // Linux daemon. + // + // We can't just say if Windows then assume the daemon is local as at + // some point, we will be testing the Windows CLI against a Windows daemon. + // + // Similarly, it will be perfectly valid to also run CLI tests from + // a Linux CLI (built with the daemon tag) against a Windows daemon. + if len(os.Getenv("DOCKER_REMOTE_DAEMON")) > 0 { + localDaemon = false + } + info, err := getDaemonDockerInfo() + if err != nil { + return nil, err + } + daemonPlatform := info.OSType + if daemonPlatform != "linux" && daemonPlatform != "windows" { + return nil, fmt.Errorf("Cannot run tests against platform: %s", daemonPlatform) + } + baseImage := "scratch" + volumesConfigPath := filepath.Join(info.DockerRootDir, "volumes") + containerStoragePath := filepath.Join(info.DockerRootDir, "containers") + // Make sure in context of daemon, not the local platform. Note we can't + // use filepath.FromSlash or ToSlash here as they are a no-op on Unix. + if daemonPlatform == "windows" { + volumesConfigPath = strings.Replace(volumesConfigPath, `/`, `\`, -1) + containerStoragePath = strings.Replace(containerStoragePath, `/`, `\`, -1) + + baseImage = "microsoft/windowsservercore" + if len(os.Getenv("WINDOWS_BASE_IMAGE")) > 0 { + baseImage = os.Getenv("WINDOWS_BASE_IMAGE") + fmt.Println("INFO: Windows Base image is ", baseImage) + } + } else { + volumesConfigPath = strings.Replace(volumesConfigPath, `\`, `/`, -1) + containerStoragePath = strings.Replace(containerStoragePath, `\`, `/`, -1) + } + + var daemonPid int + dest := os.Getenv("DEST") + b, err := ioutil.ReadFile(filepath.Join(dest, "docker.pid")) + if err == nil { + if p, err := strconv.ParseInt(string(b), 10, 32); err == nil { + daemonPid = int(p) + } + } + + dockerBinary, err := exec.LookPath(DefaultClientBinary) + if err != nil { + return nil, err + } + + return &Execution{ + localDaemon: localDaemon, + daemonPlatform: daemonPlatform, + daemonStorageDriver: info.Driver, + daemonKernelVersion: info.KernelVersion, + dockerBasePath: info.DockerRootDir, + volumesConfigPath: volumesConfigPath, + containerStoragePath: containerStoragePath, + isolation: info.Isolation, + daemonPid: daemonPid, + experimentalDaemon: info.ExperimentalBuild, + baseImage: baseImage, + dockerBinary: dockerBinary, + protectedElements: protectedElements{ + images: map[string]struct{}{}, + }, + }, nil +} +func getDaemonDockerInfo() (types.Info, error) { + // FIXME(vdemeester) should be safe to use as is + client, err := client.NewEnvClient() + if err != nil { + return types.Info{}, err + } + return client.Info(context.Background()) +} + +// LocalDaemon is true if the daemon under test is on the same +// host as the CLI. +func (e *Execution) LocalDaemon() bool { + return e.localDaemon +} + +// DaemonPlatform is held globally so that tests can make intelligent +// decisions on how to configure themselves according to the platform +// of the daemon. This is initialized in docker_utils by sending +// a version call to the daemon and examining the response header. +func (e *Execution) DaemonPlatform() string { + return e.daemonPlatform +} + +// DockerBasePath is the base path of the docker folder (by default it is -/var/run/docker) +func (e *Execution) DockerBasePath() string { + return e.dockerBasePath +} + +// VolumesConfigPath is the path of the volume configuration for the testing daemon +func (e *Execution) VolumesConfigPath() string { + return e.volumesConfigPath +} + +// ContainerStoragePath is the path where the container are stored for the testing daemon +func (e *Execution) ContainerStoragePath() string { + return e.containerStoragePath +} + +// DaemonStorageDriver is held globally so that tests can know the storage +// driver of the daemon. This is initialized in docker_utils by sending +// a version call to the daemon and examining the response header. +func (e *Execution) DaemonStorageDriver() string { + return e.daemonStorageDriver +} + +// Isolation is the isolation mode of the daemon under test +func (e *Execution) Isolation() container.Isolation { + return e.isolation +} + +// DaemonPID is the pid of the main test daemon +func (e *Execution) DaemonPID() int { + return e.daemonPid +} + +// ExperimentalDaemon tell whether the main daemon has +// experimental features enabled or not +func (e *Execution) ExperimentalDaemon() bool { + return e.experimentalDaemon +} + +// MinimalBaseImage is the image used for minimal builds (it depends on the platform) +func (e *Execution) MinimalBaseImage() string { + return e.baseImage +} + +// DaemonKernelVersion is the kernel version of the daemon as a string, as returned +// by an INFO call to the daemon. +func (e *Execution) DaemonKernelVersion() string { + return e.daemonKernelVersion +} + +// DaemonKernelVersionNumeric is the kernel version of the daemon as an integer. +// Mostly useful on Windows where DaemonKernelVersion holds the full string such +// as `10.0 14393 (14393.447.amd64fre.rs1_release_inmarket.161102-0100)`, but +// integration tests really only need the `14393` piece to make decisions. +func (e *Execution) DaemonKernelVersionNumeric() int { + if e.daemonPlatform != "windows" { + return -1 + } + v, _ := strconv.Atoi(strings.Split(e.daemonKernelVersion, " ")[1]) + return v +} + +// DockerBinary returns the docker binary for this testing environment +func (e *Execution) DockerBinary() string { + return e.dockerBinary +} + +// DaemonHost return the daemon host string for this test execution +func DaemonHost() string { + daemonURLStr := "unix://" + opts.DefaultUnixSocket + if daemonHostVar := os.Getenv("DOCKER_HOST"); daemonHostVar != "" { + daemonURLStr = daemonHostVar + } + return daemonURLStr +} diff --git a/vendor/github.com/moby/moby/integration-cli/environment/protect.go b/vendor/github.com/moby/moby/integration-cli/environment/protect.go new file mode 100644 index 000000000..2b0dd6df2 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/environment/protect.go @@ -0,0 +1,12 @@ +package environment + +// ProtectImage adds the specified image(s) to be protected in case of clean +func (e *Execution) ProtectImage(t testingT, images ...string) { + for _, image := range images { + e.protectedElements.images[image] = struct{}{} + } +} + +type protectedElements struct { + images map[string]struct{} +} diff --git a/vendor/github.com/moby/moby/integration-cli/events_utils_test.go b/vendor/github.com/moby/moby/integration-cli/events_utils_test.go new file mode 100644 index 000000000..9350edcb9 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/events_utils_test.go @@ -0,0 +1,206 @@ +package main + +import ( + "bufio" + "bytes" + "io" + "os/exec" + "regexp" + "strconv" + "strings" + + "github.com/Sirupsen/logrus" + eventstestutils "github.com/docker/docker/daemon/events/testutils" + "github.com/docker/docker/integration-cli/checker" + "github.com/go-check/check" +) + +// eventMatcher is a function that tries to match an event input. +// It returns true if the event matches and a map with +// a set of key/value to identify the match. +type eventMatcher func(text string) (map[string]string, bool) + +// eventMatchProcessor is a function to handle an event match. +// It receives a map of key/value with the information extracted in a match. +type eventMatchProcessor func(matches map[string]string) + +// eventObserver runs an events commands and observes its output. +type eventObserver struct { + buffer *bytes.Buffer + command *exec.Cmd + scanner *bufio.Scanner + startTime string + disconnectionError error +} + +// newEventObserver creates the observer and initializes the command +// without running it. Users must call `eventObserver.Start` to start the command. +func newEventObserver(c *check.C, args ...string) (*eventObserver, error) { + since := daemonTime(c).Unix() + return newEventObserverWithBacklog(c, since, args...) +} + +// newEventObserverWithBacklog creates a new observer changing the start time of the backlog to return. +func newEventObserverWithBacklog(c *check.C, since int64, args ...string) (*eventObserver, error) { + startTime := strconv.FormatInt(since, 10) + cmdArgs := []string{"events", "--since", startTime} + if len(args) > 0 { + cmdArgs = append(cmdArgs, args...) + } + eventsCmd := exec.Command(dockerBinary, cmdArgs...) + stdout, err := eventsCmd.StdoutPipe() + if err != nil { + return nil, err + } + + return &eventObserver{ + buffer: new(bytes.Buffer), + command: eventsCmd, + scanner: bufio.NewScanner(stdout), + startTime: startTime, + }, nil +} + +// Start starts the events command. +func (e *eventObserver) Start() error { + return e.command.Start() +} + +// Stop stops the events command. +func (e *eventObserver) Stop() { + e.command.Process.Kill() + e.command.Process.Release() +} + +// Match tries to match the events output with a given matcher. +func (e *eventObserver) Match(match eventMatcher, process eventMatchProcessor) { + for e.scanner.Scan() { + text := e.scanner.Text() + e.buffer.WriteString(text) + e.buffer.WriteString("\n") + + if matches, ok := match(text); ok { + process(matches) + } + } + + err := e.scanner.Err() + if err == nil { + err = io.EOF + } + + logrus.Debugf("EventObserver scanner loop finished: %v", err) + e.disconnectionError = err +} + +func (e *eventObserver) CheckEventError(c *check.C, id, event string, match eventMatcher) { + var foundEvent bool + scannerOut := e.buffer.String() + + if e.disconnectionError != nil { + until := daemonUnixTime(c) + out, _ := dockerCmd(c, "events", "--since", e.startTime, "--until", until) + events := strings.Split(strings.TrimSpace(out), "\n") + for _, e := range events { + if _, ok := match(e); ok { + foundEvent = true + break + } + } + scannerOut = out + } + if !foundEvent { + c.Fatalf("failed to observe event `%s` for %s. Disconnection error: %v\nout:\n%v", event, id, e.disconnectionError, scannerOut) + } +} + +// matchEventLine matches a text with the event regular expression. +// It returns the matches and true if the regular expression matches with the given id and event type. +// It returns an empty map and false if there is no match. +func matchEventLine(id, eventType string, actions map[string]chan bool) eventMatcher { + return func(text string) (map[string]string, bool) { + matches := eventstestutils.ScanMap(text) + if len(matches) == 0 { + return matches, false + } + + if matchIDAndEventType(matches, id, eventType) { + if _, ok := actions[matches["action"]]; ok { + return matches, true + } + } + return matches, false + } +} + +// processEventMatch closes an action channel when an event line matches the expected action. +func processEventMatch(actions map[string]chan bool) eventMatchProcessor { + return func(matches map[string]string) { + if ch, ok := actions[matches["action"]]; ok { + ch <- true + } + } +} + +// parseEventAction parses an event text and returns the action. +// It fails if the text is not in the event format. +func parseEventAction(c *check.C, text string) string { + matches := eventstestutils.ScanMap(text) + return matches["action"] +} + +// eventActionsByIDAndType returns the actions for a given id and type. +// It fails if the text is not in the event format. +func eventActionsByIDAndType(c *check.C, events []string, id, eventType string) []string { + var filtered []string + for _, event := range events { + matches := eventstestutils.ScanMap(event) + c.Assert(matches, checker.Not(checker.IsNil)) + if matchIDAndEventType(matches, id, eventType) { + filtered = append(filtered, matches["action"]) + } + } + return filtered +} + +// matchIDAndEventType returns true if an event matches a given id and type. +// It also resolves names in the event attributes if the id doesn't match. +func matchIDAndEventType(matches map[string]string, id, eventType string) bool { + return matchEventID(matches, id) && matches["eventType"] == eventType +} + +func matchEventID(matches map[string]string, id string) bool { + matchID := matches["id"] == id || strings.HasPrefix(matches["id"], id) + if !matchID && matches["attributes"] != "" { + // try matching a name in the attributes + attributes := map[string]string{} + for _, a := range strings.Split(matches["attributes"], ", ") { + kv := strings.Split(a, "=") + attributes[kv[0]] = kv[1] + } + matchID = attributes["name"] == id + } + return matchID +} + +func parseEvents(c *check.C, out, match string) { + events := strings.Split(strings.TrimSpace(out), "\n") + for _, event := range events { + matches := eventstestutils.ScanMap(event) + matched, err := regexp.MatchString(match, matches["action"]) + c.Assert(err, checker.IsNil) + c.Assert(matched, checker.True, check.Commentf("Matcher: %s did not match %s", match, matches["action"])) + } +} + +func parseEventsWithID(c *check.C, out, match, id string) { + events := strings.Split(strings.TrimSpace(out), "\n") + for _, event := range events { + matches := eventstestutils.ScanMap(event) + c.Assert(matchEventID(matches, id), checker.True) + + matched, err := regexp.MatchString(match, matches["action"]) + c.Assert(err, checker.IsNil) + c.Assert(matched, checker.True, check.Commentf("Matcher: %s did not match %s", match, matches["action"])) + } +} diff --git a/vendor/github.com/moby/moby/integration-cli/fixtures/auth/docker-credential-shell-test b/vendor/github.com/moby/moby/integration-cli/fixtures/auth/docker-credential-shell-test new file mode 100755 index 000000000..97b3f1483 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/fixtures/auth/docker-credential-shell-test @@ -0,0 +1,55 @@ +#!/usr/bin/env bash + +set -e + +listFile=shell_test_list.json + +case $1 in + "store") + in=$( $TEMP/$serverHash + # add the server to the list file + if [[ ! -f $TEMP/$listFile ]]; then + echo "{ \"${server}\": \"${username}\" }" > $TEMP/$listFile + else + list=$(<$TEMP/$listFile) + echo "$list" | jq ". + {\"${server}\": \"${username}\"}" > $TEMP/$listFile + fi + ;; + "get") + in=$( $TEMP/$listFile + ;; + "list") + if [[ ! -f $TEMP/$listFile ]]; then + echo "{}" + else + payload=$(<$TEMP/$listFile) + echo "$payload" + fi + ;; + *) + echo "unknown credential option" + exit 1 + ;; +esac diff --git a/vendor/github.com/moby/moby/integration-cli/fixtures/credentialspecs/valid.json b/vendor/github.com/moby/moby/integration-cli/fixtures/credentialspecs/valid.json new file mode 100644 index 000000000..28913e49d --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/fixtures/credentialspecs/valid.json @@ -0,0 +1,25 @@ +{ + "CmsPlugins": [ + "ActiveDirectory" + ], + "DomainJoinConfig": { + "Sid": "S-1-5-21-4288985-3632099173-1864715694", + "MachineAccountName": "MusicStoreAcct", + "Guid": "3705d4c3-0b80-42a9-ad97-ebc1801c74b9", + "DnsTreeName": "hyperv.local", + "DnsName": "hyperv.local", + "NetBiosName": "hyperv" + }, + "ActiveDirectoryConfig": { + "GroupManagedServiceAccounts": [ + { + "Name": "MusicStoreAcct", + "Scope": "hyperv.local" + }, + { + "Name": "MusicStoreAcct", + "Scope": "hyperv" + } + ] + } +} diff --git a/vendor/github.com/moby/moby/integration-cli/fixtures/deploy/default.yaml b/vendor/github.com/moby/moby/integration-cli/fixtures/deploy/default.yaml new file mode 100644 index 000000000..f30c04f8f --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/fixtures/deploy/default.yaml @@ -0,0 +1,9 @@ + +version: "3" +services: + web: + image: busybox@sha256:e4f93f6ed15a0cdd342f5aae387886fba0ab98af0a102da6276eaf24d6e6ade0 + command: top + db: + image: busybox@sha256:e4f93f6ed15a0cdd342f5aae387886fba0ab98af0a102da6276eaf24d6e6ade0 + command: "tail -f /dev/null" diff --git a/vendor/github.com/moby/moby/integration-cli/fixtures/deploy/remove.yaml b/vendor/github.com/moby/moby/integration-cli/fixtures/deploy/remove.yaml new file mode 100644 index 000000000..4337581bf --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/fixtures/deploy/remove.yaml @@ -0,0 +1,11 @@ + +version: "3.1" +services: + web: + image: busybox:latest + command: top + secrets: + - special +secrets: + special: + file: fixtures/secrets/default diff --git a/vendor/github.com/moby/moby/integration-cli/fixtures/deploy/secrets.yaml b/vendor/github.com/moby/moby/integration-cli/fixtures/deploy/secrets.yaml new file mode 100644 index 000000000..6ac92cdde --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/fixtures/deploy/secrets.yaml @@ -0,0 +1,20 @@ + +version: "3.1" +services: + web: + image: busybox@sha256:e4f93f6ed15a0cdd342f5aae387886fba0ab98af0a102da6276eaf24d6e6ade0 + command: top + secrets: + - special + - source: super + target: foo.txt + mode: 0400 + - star +secrets: + special: + file: fixtures/secrets/default + super: + file: fixtures/secrets/default + star: + external: + name: outside diff --git a/vendor/github.com/moby/moby/integration-cli/fixtures/load/emptyLayer.tar b/vendor/github.com/moby/moby/integration-cli/fixtures/load/emptyLayer.tar new file mode 100644 index 0000000000000000000000000000000000000000..beabb569ac1e7fea69848f79ffd77fb17b2487b0 GIT binary patch literal 30720 zcmeI4ZExeo5y$c* zovW*2`my-BS#-6VC#(TWylUoH;FtY>E>gSBdD(R?T)*vNRTtCjW{dQq*rhc~JiEJz zsl`J#iI?4n-S>-|RPWcb+PkO&p2Vj7>-wi+S}$j_7sXd|yt?(*H*av1 z+nj$j?T=M`^{U#O?d!U0Kio7`Pi=kdMg6yBHQQD7cjwb=bNHthMQp;4h~=l$E~*AE zuwWB8>5;iMTvuHR-Lmal{@LpHT4MUleZcZ73oou6S9)q>^d-k>oX_}Qcxyo^p;T#I z4yBjEs}MP78A@rGh|&6(B$v4q&IKEaPbcYiRLK(|Fm&Lbue#U-=g_}>K?m>u=o9SE z|2Wfh|Nm{EC(M}~AUk1=_?|0jAs(MS-F>HlGq1Fv!G%u}F zxq~RdI2NK#f|*#Nn~(V{vFLOO8UqEuV@wWH0lWbP;+XE!kN?B|zvfDCV+{KLI33XJ zG420+{r|!CKbik8jnddl(C`@lF&t?A{}{IbApid{FhAKg29N*Y{y*OI0q1{v<6mif z{ttd8A4C0_$Nx6||IluLzUGZ<*Y1BAOzlPXfBaM3-28d; z^3|(Ru4ZY9j6K(!m62PF7=SBvvY>6u9_MkwI>$54a7OCT$T3-m=lJE( zmUnm}hhi;&u@=Bq+_JlF`T)-5bsBwzUH;iw#FV;!sB`|AkTbLnLJ;j*XJ0q(@q+&r#AY7K_tidj`Je@64Y zt^SAcHvH+*phv{y_9nO&aXZ27@H1gNA7uuCn|I7u4AJ4h5%>!(#?o4@01sM9=?Y<}U-wbn}~IT=+q) zpnU-y`(0?Hj|bqk-Cup*+8>Y5g-CYfCvx)B0vO)01+SpM1Tko z0U|&IhyW2F0z`la5CI}U1c(3;cp3zTzMXo^%Y*&eXctdy*>?6A8Sq zp|r}Cz7&k%LWj}|qk_p@3pT0=6BEu*_dHZBTzvXOYO{wlC)G^^hyW2F0z`la5CI}U k1c(3;AOb{y2oM1xKm>>Y5g-CYfCvx)B0vO)z%wTBe{YUtx&QzG literal 0 HcmV?d00001 diff --git a/vendor/github.com/moby/moby/integration-cli/fixtures/load/frozen.go b/vendor/github.com/moby/moby/integration-cli/fixtures/load/frozen.go new file mode 100644 index 000000000..13cd393f3 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/fixtures/load/frozen.go @@ -0,0 +1,182 @@ +package load + +import ( + "bufio" + "bytes" + "os" + "os/exec" + "path/filepath" + "strings" + "sync" + + "github.com/pkg/errors" +) + +var frozenImgDir = "/docker-frozen-images" + +// FrozenImagesLinux loads the frozen image set for the integration suite +// If the images are not available locally it will download them +// TODO: This loads whatever is in the frozen image dir, regardless of what +// images were passed in. If the images need to be downloaded, then it will respect +// the passed in images +func FrozenImagesLinux(dockerBinary string, images ...string) error { + imgNS := os.Getenv("TEST_IMAGE_NAMESPACE") + var loadImages []struct{ srcName, destName string } + for _, img := range images { + if err := exec.Command(dockerBinary, "inspect", "--type=image", img).Run(); err != nil { + srcName := img + // hello-world:latest gets re-tagged as hello-world:frozen + // there are some tests that use hello-world:latest specifically so it pulls + // the image and hello-world:frozen is used for when we just want a super + // small image + if img == "hello-world:frozen" { + srcName = "hello-world:latest" + } + if imgNS != "" { + srcName = imgNS + "/" + srcName + } + loadImages = append(loadImages, struct{ srcName, destName string }{ + srcName: srcName, + destName: img, + }) + } + } + if len(loadImages) == 0 { + // everything is loaded, we're done + return nil + } + + fi, err := os.Stat(frozenImgDir) + if err != nil || !fi.IsDir() { + srcImages := make([]string, 0, len(loadImages)) + for _, img := range loadImages { + srcImages = append(srcImages, img.srcName) + } + if err := pullImages(dockerBinary, srcImages); err != nil { + return errors.Wrap(err, "error pulling image list") + } + } else { + if err := loadFrozenImages(dockerBinary); err != nil { + return err + } + } + + for _, img := range loadImages { + if img.srcName != img.destName { + if out, err := exec.Command(dockerBinary, "tag", img.srcName, img.destName).CombinedOutput(); err != nil { + return errors.Errorf("%v: %s", err, string(out)) + } + if out, err := exec.Command(dockerBinary, "rmi", img.srcName).CombinedOutput(); err != nil { + return errors.Errorf("%v: %s", err, string(out)) + } + } + } + return nil +} + +func loadFrozenImages(dockerBinary string) error { + tar, err := exec.LookPath("tar") + if err != nil { + return errors.Wrap(err, "could not find tar binary") + } + tarCmd := exec.Command(tar, "-cC", frozenImgDir, ".") + out, err := tarCmd.StdoutPipe() + if err != nil { + return errors.Wrap(err, "error getting stdout pipe for tar command") + } + + errBuf := bytes.NewBuffer(nil) + tarCmd.Stderr = errBuf + tarCmd.Start() + defer tarCmd.Wait() + + cmd := exec.Command(dockerBinary, "load") + cmd.Stdin = out + if out, err := cmd.CombinedOutput(); err != nil { + return errors.Errorf("%v: %s", err, string(out)) + } + return nil +} + +func pullImages(dockerBinary string, images []string) error { + cwd, err := os.Getwd() + if err != nil { + return errors.Wrap(err, "error getting path to dockerfile") + } + dockerfile := os.Getenv("DOCKERFILE") + if dockerfile == "" { + dockerfile = "Dockerfile" + } + dockerfilePath := filepath.Join(filepath.Dir(filepath.Clean(cwd)), dockerfile) + pullRefs, err := readFrozenImageList(dockerfilePath, images) + if err != nil { + return errors.Wrap(err, "error reading frozen image list") + } + + var wg sync.WaitGroup + chErr := make(chan error, len(images)) + for tag, ref := range pullRefs { + wg.Add(1) + go func(tag, ref string) { + defer wg.Done() + if out, err := exec.Command(dockerBinary, "pull", ref).CombinedOutput(); err != nil { + chErr <- errors.Errorf("%v: %s", string(out), err) + return + } + if out, err := exec.Command(dockerBinary, "tag", ref, tag).CombinedOutput(); err != nil { + chErr <- errors.Errorf("%v: %s", string(out), err) + return + } + if out, err := exec.Command(dockerBinary, "rmi", ref).CombinedOutput(); err != nil { + chErr <- errors.Errorf("%v: %s", string(out), err) + return + } + }(tag, ref) + } + wg.Wait() + close(chErr) + return <-chErr +} + +func readFrozenImageList(dockerfilePath string, images []string) (map[string]string, error) { + f, err := os.Open(dockerfilePath) + if err != nil { + return nil, errors.Wrap(err, "error reading dockerfile") + } + defer f.Close() + ls := make(map[string]string) + + scanner := bufio.NewScanner(f) + for scanner.Scan() { + line := strings.Fields(scanner.Text()) + if len(line) < 3 { + continue + } + if !(line[0] == "RUN" && line[1] == "./contrib/download-frozen-image-v2.sh") { + continue + } + + frozenImgDir = line[2] + if line[2] == frozenImgDir { + frozenImgDir = filepath.Join(os.Getenv("DEST"), "frozen-images") + } + + for scanner.Scan() { + img := strings.TrimSpace(scanner.Text()) + img = strings.TrimSuffix(img, "\\") + img = strings.TrimSpace(img) + split := strings.Split(img, "@") + if len(split) < 2 { + break + } + + for _, i := range images { + if split[0] == i { + ls[i] = img + break + } + } + } + } + return ls, nil +} diff --git a/vendor/github.com/moby/moby/integration-cli/fixtures/notary/delgkey1.crt b/vendor/github.com/moby/moby/integration-cli/fixtures/notary/delgkey1.crt new file mode 100644 index 000000000..2218f23c8 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/fixtures/notary/delgkey1.crt @@ -0,0 +1,21 @@ +-----BEGIN CERTIFICATE----- +MIIDhTCCAm2gAwIBAgIJAP2EcMN2UXPcMA0GCSqGSIb3DQEBCwUAMFcxCzAJBgNV +BAYTAlVTMQswCQYDVQQIEwJDQTEVMBMGA1UEBxMMU2FuRnJhbmNpc2NvMQ8wDQYD +VQQKEwZEb2NrZXIxEzARBgNVBAMTCmRlbGVnYXRpb24wHhcNMTYwOTI4MTc0ODQ4 +WhcNMjYwNjI4MTc0ODQ4WjBXMQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExFTAT +BgNVBAcTDFNhbkZyYW5jaXNjbzEPMA0GA1UEChMGRG9ja2VyMRMwEQYDVQQDEwpk +ZWxlZ2F0aW9uMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAvgewhaYs +Ke5s2AM7xxKrT4A6n7hW17qSnBjonCcPcwTFmYqIOdxWjYITgJuHrTwB4ZhBqWS7 +tTsUUu6hWLMeB7Uo5/GEQAAZspKkT9G/rNKF9lbWK9PPhGGkeR01c/Q932m92Hsn +fCQ0Pp/OzD3nVTh0v9HKk+PObNMOCcqG87eYs4ylPRxs0RrE/rP+bEGssKQSbeCZ +wazDnO+kiatVgKQZ2CK23iFdRE1z2rzqVDeaFWdvBqrRdWnkOZClhlLgEQ5nK2yV +B6tSqOiI3MmHyHzIkGOQJp2/s7Pe0ckEkzsjTsJW8oKHlBBl6pRxHIKzNN4VFbeB +vvYvrogrDrC/owIDAQABo1QwUjAMBgNVHRMBAf8EAjAAMA4GA1UdDwEB/wQEAwIF +oDATBgNVHSUEDDAKBggrBgEFBQcDAzAdBgNVHQ4EFgQUFoHfukRa6qGk1ncON64Z +ASKlZdkwDQYJKoZIhvcNAQELBQADggEBAEq9Adpd03CPmpbRtTAJGAkjjLFr60sV +2r+/l/m9R31ZCN9ymM9nxToQ8zfMdeAh/nnPcErziil2gDVqXueCNDkRj09tmDIE +Q1Oc92uyNZNgcECow77cKZCTZSTku+qsJrYaykH5vSnia8ltcKj8inJedIcpBR+p +608HEQvF0Eg5eaLPJwH48BCb0Gqdri1dJgrNnqptz7MDr8M+u7tHVulbAd3YxLlq +JH1W2bkVUx6esbn/MUE5HL5iTuOYREEINvBSmLdmmFkampmCnCB/bDEyJeL9bAkt +ZPIi0UNSnqFKLSP1Vf8AGLXt6iO7+1OGvtsDXEEYdXVOMsSXZtUuT7A= +-----END CERTIFICATE----- diff --git a/vendor/github.com/moby/moby/integration-cli/fixtures/notary/delgkey1.key b/vendor/github.com/moby/moby/integration-cli/fixtures/notary/delgkey1.key new file mode 100644 index 000000000..cb37efc94 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/fixtures/notary/delgkey1.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEpAIBAAKCAQEAvgewhaYsKe5s2AM7xxKrT4A6n7hW17qSnBjonCcPcwTFmYqI +OdxWjYITgJuHrTwB4ZhBqWS7tTsUUu6hWLMeB7Uo5/GEQAAZspKkT9G/rNKF9lbW +K9PPhGGkeR01c/Q932m92HsnfCQ0Pp/OzD3nVTh0v9HKk+PObNMOCcqG87eYs4yl +PRxs0RrE/rP+bEGssKQSbeCZwazDnO+kiatVgKQZ2CK23iFdRE1z2rzqVDeaFWdv +BqrRdWnkOZClhlLgEQ5nK2yVB6tSqOiI3MmHyHzIkGOQJp2/s7Pe0ckEkzsjTsJW +8oKHlBBl6pRxHIKzNN4VFbeBvvYvrogrDrC/owIDAQABAoIBAB/o8KZwsgfUhqh7 +WoViSCwQb0e0z7hoFwhpUl4uXPTGf1v6HEgDDPG0PwwgkdbwNaypQZVtWevj4NTQ +R326jjdjH1xbfQa2PZpz722L3jDqJR6plEtFxRoIv3KrCffPsrgabIu2mnnJJpDB +ixtW5cq0sT4ov2i4H0i85CWWwbSY/G/MHsvCuK9PhoCj9uToVqrf1KrAESE5q4fh +mPSYUL99KVnj7SZkUz+79rc8sLLPVks3szZACMlm1n05ZTj/d6Nd2ZZUO45DllIj +1XJghfWmnChrB/P/KYXgQ3Y9BofIAw1ra2y3wOZeqRFNsbmojcGldfdtN/iQzhEj +uk4ThokCgYEA9FTmv36N8qSPWuqX/KzkixDQ8WrDGohcB54kK98Wx4ijXx3i38SY +tFjO8YUS9GVo1+UgmRjZbzVX7xeum6+TdBBwOjNOxEQ4tzwiQBWDdGpli8BccdJ2 +OOIVxSslWhiUWfpYloXVetrR88iHbT882g795pbonDaJdXSLnij4UW8CgYEAxxrr +QFpsmOEZvI/yPSOGdG7A1RIsCeH+cEOf4cKghs7+aCtAHlIweztNOrqirl3oKI1r +I0zQl46WsaW8S/y99v9lmmnZbWwqLa4vIu0NWs0zaZdzKZw3xljMhgp4Ge69hHa2 +utCtAxcX+7q/yLlHoTiYwKdxX54iLkheCB8csw0CgYEAleEG820kkjXUIodJ2JwO +Tihwo8dEC6CeI6YktizRgnEVFqH0rCOjMO5Rc+KX8AfNOrK5PnD54LguSuKSH7qi +j04OKgWTSd43lF90+y63RtCFnibQDpp2HwrBJAQFk7EEP/XMJfnPLN/SbuMSADgM +kg8kPTFRW5Iw3DYz9z9WpE0CgYAkn6/8Q2XMbUOFqti9JEa8Lg8sYk5VdwuNbPMA +3QMYKQUk9ieyLB4c3Nik3+XCuyVUKEc31A5egmz3umu7cn8i6vGuiJ/k/8t2YZ7s +Bry5Ihu95Yzab5DW3Eiqs0xKQN79ebS9AluAwQO5Wy2h52rknfuDHIm/M+BHsSoS +xl5KFQKBgQCokCsYuX1z2GojHw369/R2aX3ovCGuHqy4k7fWxUrpHTHvth2+qNPr +84qLJ9rLWoZE5sUiZ5YdwCgW877EdfkT+v4aaBX79ixso5VdqgJ/PdnoNntah/Vq +njQiW1skn6/P5V/eyimN2n0VsyBr/zMDEtYTRP/Tb1zi/njFLQkZEA== +-----END RSA PRIVATE KEY----- diff --git a/vendor/github.com/moby/moby/integration-cli/fixtures/notary/delgkey2.crt b/vendor/github.com/moby/moby/integration-cli/fixtures/notary/delgkey2.crt new file mode 100644 index 000000000..bec084790 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/fixtures/notary/delgkey2.crt @@ -0,0 +1,21 @@ +-----BEGIN CERTIFICATE----- +MIIDhTCCAm2gAwIBAgIJAIq8naKlYAQfMA0GCSqGSIb3DQEBCwUAMFcxCzAJBgNV +BAYTAlVTMQswCQYDVQQIEwJDQTEVMBMGA1UEBxMMU2FuRnJhbmNpc2NvMQ8wDQYD +VQQKEwZEb2NrZXIxEzARBgNVBAMTCmRlbGVnYXRpb24wHhcNMTYwOTI4MTc0ODQ4 +WhcNMjYwNjI4MTc0ODQ4WjBXMQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExFTAT +BgNVBAcTDFNhbkZyYW5jaXNjbzEPMA0GA1UEChMGRG9ja2VyMRMwEQYDVQQDEwpk +ZWxlZ2F0aW9uMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAyY2EWYTW +5VHipw08t675upmD6a+akiuZ1z+XpuOxZCgjZ0aHfoOe8wGKg3Ohz7UCBdD5Mob/ +L/qvRlsCaqPHGZKIyyX1HDO4mpuQQFBhYxt+ZAO3AaawEUOw2rwwMDEjLnDDTSZM +z8jxCMvsJjBDqgb8g3z+AmjducQ/OH6llldgHIBY8ioRbROCL2PGgqywWq2fThav +c70YMxtKviBGDNCouYeQ8JMK/PuLwPNDXNQAagFHVARXiUv/ILHk7ImYnSGJUcuk +JTUGN2MBnpY0eakg7i+4za8sjjqOdn+2I6aVzlGJDSiRP72nkg/cE4BqMl9FrMwK +9iS8xa9yMDLUvwIDAQABo1QwUjAMBgNVHRMBAf8EAjAAMA4GA1UdDwEB/wQEAwIF +oDATBgNVHSUEDDAKBggrBgEFBQcDAzAdBgNVHQ4EFgQUvQzzFmh3Sv3HcdExY3wx +/1u6JLAwDQYJKoZIhvcNAQELBQADggEBAJcmDme2Xj/HPUPwaN/EyCmjhY73EiHO +x6Pm16tscg5JGn5A+u3CZ1DmxUYl8Hp6MaW/sWzdtL0oKJg76pynadCWh5EacFR8 +u+2GV/IcN9mSX6JQzvrqbjSqo5/FehqBD+W5h3euwwApWA3STAadYeyEfmdOA3SQ +W1vzrA1y7i8qgTqeJ7UX1sEAXlIhBK2zPYaMB+en+ZOiPyNxJYj6IDdGdD2paC9L +6H9wKC+GAUTSdCWp89HP7ETSXEGr94AXkrwU+qNsiN+OyK8ke0EMngEPh5IQoplw +/7zEZCth3oKxvR1/4S5LmTVaHI2ZlbU4q9bnY72G4tw8YQr2gcBGo4w= +-----END CERTIFICATE----- diff --git a/vendor/github.com/moby/moby/integration-cli/fixtures/notary/delgkey2.key b/vendor/github.com/moby/moby/integration-cli/fixtures/notary/delgkey2.key new file mode 100644 index 000000000..5ccabe908 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/fixtures/notary/delgkey2.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEogIBAAKCAQEAyY2EWYTW5VHipw08t675upmD6a+akiuZ1z+XpuOxZCgjZ0aH +foOe8wGKg3Ohz7UCBdD5Mob/L/qvRlsCaqPHGZKIyyX1HDO4mpuQQFBhYxt+ZAO3 +AaawEUOw2rwwMDEjLnDDTSZMz8jxCMvsJjBDqgb8g3z+AmjducQ/OH6llldgHIBY +8ioRbROCL2PGgqywWq2fThavc70YMxtKviBGDNCouYeQ8JMK/PuLwPNDXNQAagFH +VARXiUv/ILHk7ImYnSGJUcukJTUGN2MBnpY0eakg7i+4za8sjjqOdn+2I6aVzlGJ +DSiRP72nkg/cE4BqMl9FrMwK9iS8xa9yMDLUvwIDAQABAoIBAHmffvzx7ydESWwa +zcfdu26BkptiTvjjfJrqEd4wSewxWGPKqJqMXE8xX99A2KTZClZuKuH1mmnecQQY +iRXGrK9ewFMuHYGeKEiLlPlqR8ohXhyGLVm+t0JDwaXMp5t9G0i73O5iLTm5fNGd +FGxa9YnVW20Q8MqNczbVGH1D1zInhxzzOyFzBd4bBBJ8PdrUdyLpd7+RxY2ghnbT +p9ZANR2vk5zmDLJgZx72n/u+miJWuhY6p0v3Vq4z/HHgdhf+K6vpDdzTcYlA0rO4 +c/c+RKED3ZadGUD5QoLsmEN0e3FVSMPN1kt4ZRTqWfH8f2X4mLz33aBryTjktP6+ +1rX6ThECgYEA74wc1Tq23B5R0/GaMm1AK3Ko2zzTD8wK7NSCElh2dls02B+GzrEB +aE3A2GMQSuzb+EA0zkipwANBaqs3ZemH5G1pu4hstQsXCMd4jAJn0TmTXlplXBCf +PSc8ZUU6XcJENRr9Q7O9/TGlgahX+z0ndxYx/CMCsSu7XsMg4IZsbAcCgYEA12Vb +wKOVG15GGp7pMshr+2rQfVimARUP4gf3JnQmenktI4PfdnMW3a4L3DEHfLhIerwT +6lRp/NpxSADmuT4h1UO1l2lc+gmTVPw0Vbl6VwHpgS5Kfu4ZyM6n3S66f/dE4nu7 +hQF9yZz7vn5Agghak4p6a1wC1gdMzR1tvxFzk4kCgYByBMTskWfcWeok8Yitm+bB +R3Ar+kWT7VD97SCETusD5uG+RTNLSmEbHnc+B9kHcLo67YS0800pAeOvPBPARGnU +RmffRU5I1iB+o0MzkSmNItSMQoagTaEd4IEUyuC/I+qHRHNsOC+kRm86ycAm67LP +MhdUpe1wGxqyPjp15EXTHQKBgDKzFu+3EWfJvvKRKQ7dAh3BvKVkcl6a2Iw5l8Ej +YdM+JpPPfI/i8yTmzL/dgoem0Nii4IUtrWzo9fUe0TAVId2S/HFRSaNJEbbVTnRH +HjbQqmfPv5U08jjD+9siHp/0UfCFc1QRT8xe+RqTmReCY9+KntoaZEiAm2FEZgqt +TukRAoGAf7QqbTP5/UH1KSkX89F5qy/6GS3pw6TLj9Ufm/l/NO8Um8gag6YhEKWR +7HpkpCqjfWj8Av8ESR9cqddPGrbdqXFm9z7dCjlAd5T3Q3h/h+v+JzLQWbsI6WOb +SsOSWNyE006ZZdIiFwO6GfxpLI24sVtYKgyob6Q71oxSqfnrnT0= +-----END RSA PRIVATE KEY----- diff --git a/vendor/github.com/moby/moby/integration-cli/fixtures/notary/delgkey3.crt b/vendor/github.com/moby/moby/integration-cli/fixtures/notary/delgkey3.crt new file mode 100644 index 000000000..f434b45fc --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/fixtures/notary/delgkey3.crt @@ -0,0 +1,21 @@ +-----BEGIN CERTIFICATE----- +MIIDhTCCAm2gAwIBAgIJAKHt/jxiWqMtMA0GCSqGSIb3DQEBCwUAMFcxCzAJBgNV +BAYTAlVTMQswCQYDVQQIEwJDQTEVMBMGA1UEBxMMU2FuRnJhbmNpc2NvMQ8wDQYD +VQQKEwZEb2NrZXIxEzARBgNVBAMTCmRlbGVnYXRpb24wHhcNMTYwOTI4MTc0ODQ5 +WhcNMjYwNjI4MTc0ODQ5WjBXMQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExFTAT +BgNVBAcTDFNhbkZyYW5jaXNjbzEPMA0GA1UEChMGRG9ja2VyMRMwEQYDVQQDEwpk +ZWxlZ2F0aW9uMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAqfbJk2Dk +C9FJVjV2+Q2CQrJphG3vFc1Qlu9jgVA5RhGmF9jJzetsclsV/95nBhinIGcSmPQA +l318G7Bz/cG/6O2n5+hj+S1+YOvQweReZj3d4kCeS86SOyLNTpMD9gsF0S8nR1RN +h0jD4t1vxAVeGD1o61U8/k0O5eDoeOfOSWZagKk5PhyrMZgNip4IrG46umCkFlrw +zMMcgQdwTQXywPqkr/LmYpqT1WpMlzHYTQEY8rKorIJQbPtHVYdr4UxYnNmk6fbU +biEP1DQlwjBWcFTsDLqXKP/K+e3O0/e/hMB0y7Tj9fZ7Viw0t5IKXZPsxMhwknUT +9vmPzIJO6NiniwIDAQABo1QwUjAMBgNVHRMBAf8EAjAAMA4GA1UdDwEB/wQEAwIF +oDATBgNVHSUEDDAKBggrBgEFBQcDAzAdBgNVHQ4EFgQUdTXRP1EzxQ+UDZSoheVo +Mobud1cwDQYJKoZIhvcNAQELBQADggEBADV9asTWWdbmpkeRuKyi0xGho39ONK88 +xxkFlco766BVgemo/rGQj3oPuw6M6SzHFoJ6JUPjmLiAQDIGEU/2/b6LcOuLjP+4 +YejCcDTY3lSW/HMNoAmzr2foo/LngNGfe/qhVFUqV7GjFT9+XzFFBfIZ1cQiL2ed +kc8rgQxFPwWXFCSwaENWeFnMDugkd+7xanoAHq8GsJpg5fTruDTmJkUqC2RNiMLn +WM7QaqW7+lmUnMnc1IBoz0hFhgoiadWM/1RQxx51zTVw6Au1koIm4ZXu5a+/WyC8 +K1+HyUbc0AVaDaRBpRSOR9aHRwLGh6WQ4aUZQNyJroc999qfYrDEEV8= +-----END CERTIFICATE----- diff --git a/vendor/github.com/moby/moby/integration-cli/fixtures/notary/delgkey3.key b/vendor/github.com/moby/moby/integration-cli/fixtures/notary/delgkey3.key new file mode 100644 index 000000000..a61d18cc3 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/fixtures/notary/delgkey3.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEpQIBAAKCAQEAqfbJk2DkC9FJVjV2+Q2CQrJphG3vFc1Qlu9jgVA5RhGmF9jJ +zetsclsV/95nBhinIGcSmPQAl318G7Bz/cG/6O2n5+hj+S1+YOvQweReZj3d4kCe +S86SOyLNTpMD9gsF0S8nR1RNh0jD4t1vxAVeGD1o61U8/k0O5eDoeOfOSWZagKk5 +PhyrMZgNip4IrG46umCkFlrwzMMcgQdwTQXywPqkr/LmYpqT1WpMlzHYTQEY8rKo +rIJQbPtHVYdr4UxYnNmk6fbUbiEP1DQlwjBWcFTsDLqXKP/K+e3O0/e/hMB0y7Tj +9fZ7Viw0t5IKXZPsxMhwknUT9vmPzIJO6NiniwIDAQABAoIBAQCAr/ed3A2umO7T +FDYZik3nXBiiiW4t7r+nGGgZ3/kNgY1lnuHlROxehXLZwbX1mrLnyML/BjhwezV9 +7ZNVPd6laVPpNj6DyxtWHRZ5yARlm1Al39E7CpQTrF0QsiWcpGnqIa62xjDRTpnq +askV/Q5qggyvqmE9FnFCQpEiAjlhvp7F0kVHVJm9s3MK3zSyR0UTZ3cpYus2Jr2z +OotHgAMHq5Hgb3dvxOeE2xRMeYAVDujbkNzXm2SddAtiRdLhWDh7JIr3zXhp0HyN +4rLOyhlgz00oIGeDt/C0q3fRmghr3iZOG+7m2sUx0FD1Ru1dI9v2A+jYmIVNW6+x +YJk5PzxJAoGBANDj7AGdcHSci/LDBPoTTUiz3uucAd27/IJma/iy8mdbVfOAb0Fy +PRSPvoozlpZyOxg2J4eH/o4QxQR4lVKtnLKZLNHK2tg3LarwyBX1LiI3vVlB+DT1 +AmV8i5bJAckDhqFeEH5qdWZFi03oZsSXWEqX5iMYCrdK5lTZggcrFZeHAoGBANBL +fkk3knAdcVfTYpmHx18GBi2AsCWTd20KD49YBdbVy0Y2Jaa1EJAmGWpTUKdYx40R +H5CuGgcAviXQz3bugdTU1I3tAclBtpJNU7JkhuE+Epz0CM/6WERJrE0YxcGQA5ui +6fOguFyiXD1/85jrDBOKy74aoS7lYz9r/a6eqmjdAoGBAJpm/nmrIAZx+Ff2ouUe +A1Ar9Ch/Zjm5zEmu3zwzOU4AiyWz14iuoktifNq2iyalRNz+mnVpplToPFizsNwu +C9dPtXtU0DJlhtIFrD/evLz6KnGhe4/ZUm4lgyBvb2xfuNHqL5Lhqelwmil6EQxb +Oh3Y7XkfOjyFln89TwlxZUJdAoGAJRMa4kta7EvBTeGZLjyltvsqhFTghX+vBSCC +ToBbYbbiHJgssXSPAylU4sD7nR3HPwuqM6VZip+OOMrm8oNXZpuPTce+xqTEq1vK +JvmPrG3RAFDLdMFZjqYSXhKnuGE60yv3Ol8EEbDwfB3XLQPBPYU56Jdy0xcPSE2f +dMJXEJ0CgYEAisZw0nXw6lFeYecu642EGuU0wv1O9i21p7eho9QwOcsoTl4Q9l+M +M8iBv+qTHO+D19l4JbkGvy2H2diKoYduUFACcuiFYs8fjrT+4Z6DyOQAQGAf6Ylw +BFbU15k6KbA9v4mZDfd1tY9x62L/XO55ZxYG+J+q0e26tEThgD8cEog= +-----END RSA PRIVATE KEY----- diff --git a/vendor/github.com/moby/moby/integration-cli/fixtures/notary/delgkey4.crt b/vendor/github.com/moby/moby/integration-cli/fixtures/notary/delgkey4.crt new file mode 100644 index 000000000..c8cbe46bd --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/fixtures/notary/delgkey4.crt @@ -0,0 +1,21 @@ +-----BEGIN CERTIFICATE----- +MIIDhTCCAm2gAwIBAgIJANae++ZkUEWMMA0GCSqGSIb3DQEBCwUAMFcxCzAJBgNV +BAYTAlVTMQswCQYDVQQIEwJDQTEVMBMGA1UEBxMMU2FuRnJhbmNpc2NvMQ8wDQYD +VQQKEwZEb2NrZXIxEzARBgNVBAMTCmRlbGVnYXRpb24wHhcNMTYwOTI4MTc0ODQ5 +WhcNMjYwNjI4MTc0ODQ5WjBXMQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExFTAT +BgNVBAcTDFNhbkZyYW5jaXNjbzEPMA0GA1UEChMGRG9ja2VyMRMwEQYDVQQDEwpk +ZWxlZ2F0aW9uMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAqULAjgba +Y2I10WfqdmYnPfEqEe6iMDbzcgECb2xKafXcI4ltkQj1iO4zBTs0Ft9EzXFc5ZBh +pTjZrL6vrIa0y/CH2BiIHBJ0wRHx/40HXp4DSj3HZpVOlEMI3npRfBGNIBllUaRN +PWG7zL7DcKMIepBfPXyjBsxzH3yNiISq0W5hSiy+ImhSo3aipJUHHcp9Z9NgvpNC +3QvnxsGKRnECmDRDlxkq+FQu9Iqs/HWFYWgyfcsw+YTrWZq3qVnnqUouHO//c9PG +Ry3sZSDU97MwvkjvWys1e01Xvd3AbHx08YAsxih58i/OBKe81eD9NuZDP2KrjTxI +5xkXKhj6DV2NnQIDAQABo1QwUjAMBgNVHRMBAf8EAjAAMA4GA1UdDwEB/wQEAwIF +oDATBgNVHSUEDDAKBggrBgEFBQcDAzAdBgNVHQ4EFgQUDt95hiqbQvi0KcvZGAUu +VisnztQwDQYJKoZIhvcNAQELBQADggEBAGi7qHai7MWbfeu6SlXhzIP3AIMa8TMi +lp/+mvPUFPswIVqYJ71MAN8uA7CTH3z50a2vYupGeOEtZqVJeRf+xgOEpwycncxp +Qz6wc6TWPVIoT5q1Hqxw1RD2MyKL+Y+QBDYwFxFkthpDMlX48I9frcqoJUWFxBF2 +lnRr/cE7BbPE3sMbXV3wGPlH7+eUf+CgzXJo2HB6THzagyEgNrDiz/0rCQa1ipFd +mNU3D/U6BFGmJNxhvSOtXX9escg8yjr05YwwzokHS2K4jE0ZuJPBd50C/Rvo3Mf4 +0h7/2Q95e7d42zPe9WYPu2F8KTWsf4r+6ddhKrKhYzXIcTAfHIOiO+U= +-----END CERTIFICATE----- diff --git a/vendor/github.com/moby/moby/integration-cli/fixtures/notary/delgkey4.key b/vendor/github.com/moby/moby/integration-cli/fixtures/notary/delgkey4.key new file mode 100644 index 000000000..f473cc495 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/fixtures/notary/delgkey4.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEpAIBAAKCAQEAqULAjgbaY2I10WfqdmYnPfEqEe6iMDbzcgECb2xKafXcI4lt +kQj1iO4zBTs0Ft9EzXFc5ZBhpTjZrL6vrIa0y/CH2BiIHBJ0wRHx/40HXp4DSj3H +ZpVOlEMI3npRfBGNIBllUaRNPWG7zL7DcKMIepBfPXyjBsxzH3yNiISq0W5hSiy+ +ImhSo3aipJUHHcp9Z9NgvpNC3QvnxsGKRnECmDRDlxkq+FQu9Iqs/HWFYWgyfcsw ++YTrWZq3qVnnqUouHO//c9PGRy3sZSDU97MwvkjvWys1e01Xvd3AbHx08YAsxih5 +8i/OBKe81eD9NuZDP2KrjTxI5xkXKhj6DV2NnQIDAQABAoIBAGK0ZKnuYSiXux60 +5MvK4pOCsa/nY3mOcgVHhW4IzpRgJdIrcFOlz9ncXrBsSAIWjX7o3u2Ydvjs4DOW +t8d6frB3QiDInYcRVDjLCD6otWV97Bk9Ua0G4N4hAWkMF7ysV4oihS1JDSoAdo39 +qOdki6s9yeyHZGKwk2oHLlowU5TxQMBA8DHmxqBII1HTm+8xRz45bcEqRXydYSUn +P1JuSU9jFqdylxU+Nrq6ehslMQ3y7qNWQyiLGxu6EmR+vgrzSU0s3iAOqCHthaOS +VBBXPL3DNEYUS+0QGnGrACuJhanOMBfdiO6Orelx6ZzWZm38PNGv0yBt0WCM+8/A +TtQNGkECgYEA1LqR6AH9XikUQ0+rM4526BgVuYqtjw21h4Lj9alaA+YTQntBBJOv +iAcUpnJiV4T8jzAMLeqpK8R/rbxRnK5S9jOV2gr+puk4L6tH46cgahBUESDigDp8 +6vK8ur6ubBcXNPh3AT6rsPj+Ph2EU3raqiYdouvCdga/OCYZb+jr6UkCgYEAy7Cr +l8WssI/8/ORcQ4MFJFNyfz/Y2beNXyLd1PX0H+wRSiGcKzeUuTHNtzFFpMbrK/nx +ZOPCT2ROdHsBHzp1L+WquCb0fyMVSiYiXBU+VCFDbUU5tBr3ycTc7VwuFPENOiha +IdlWgew/aW110FQHIaqe9g+htRe+mXe++faZtbUCgYB/MSJmNzJX53XvHSZ/CBJ+ +iVAMBSfq3caJRLCqRNzGcf1YBbwFUYxlZ95n+wJj0+byckcF+UW3HqE8rtmZNf3y +qTtTCLnj8JQgpGeybU4LPMIXD7N9+fqQvBwuCC7gABpnGJyHCQK9KNNTLnDdPRqb +G3ki3ZYC3dvdZaJV8E2FyQKBgQCMa5Mf4kqWvezueo+QizZ0QILibqWUEhIH0AWV +1qkhiKCytlDvCjYhJdBnxjP40Jk3i+t6XfmKud/MNTAk0ywOhQoYQeKz8v+uSnPN +f2ekn/nXzq1lGGJSWsDjcXTjQvqXaVIZm7cjgjaE+80IfaUc9H75qvUT3vaq3f5u +XC7DMQKBgQDMAzCCpWlEPbZoFMl6F49+7jG0/TiqM/WRUSQnNtufPMbrR9Je4QM1 +L1UCANCPaHFOncKYer15NfIV1ctt5MZKImevDsUaQO8CUlO+dzd5H8KvHw9E29gA +B22v8k3jIjsYeRL+UJ/sBnWHgxdAe/NEM+TdlP2oP9D1gTifutPqAg== +-----END RSA PRIVATE KEY----- diff --git a/vendor/github.com/moby/moby/integration-cli/fixtures/notary/gen.sh b/vendor/github.com/moby/moby/integration-cli/fixtures/notary/gen.sh new file mode 100755 index 000000000..8d6381cec --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/fixtures/notary/gen.sh @@ -0,0 +1,18 @@ +for selfsigned in delgkey1 delgkey2 delgkey3 delgkey4; do + subj='/C=US/ST=CA/L=SanFrancisco/O=Docker/CN=delegation' + + openssl genrsa -out "${selfsigned}.key" 2048 + openssl req -new -key "${selfsigned}.key" -out "${selfsigned}.csr" -sha256 -subj "${subj}" + cat > "${selfsigned}.cnf" < 1 && buf[0] == 'Y' +} + +func NotaryHosting() bool { + // for now notary binary is built only if we're running inside + // container through `make test`. Figure that out by testing if + // notary-server binary is in PATH. + _, err := exec.LookPath(notaryServerBinary) + return err == nil +} + +func NotaryServerHosting() bool { + // for now notary-server binary is built only if we're running inside + // container through `make test`. Figure that out by testing if + // notary-server binary is in PATH. + _, err := exec.LookPath(notaryServerBinary) + return err == nil +} + +func NotOverlay() bool { + return StorageDriverIsNot("overlay") +} + +func Devicemapper() bool { + return StorageDriverIs("devicemapper") +} + +func IPv6() bool { + cmd := exec.Command("test", "-f", "/proc/net/if_inet6") + return cmd.Run() != nil +} + +func UserNamespaceROMount() bool { + // quick case--userns not enabled in this test run + if os.Getenv("DOCKER_REMAP_ROOT") == "" { + return true + } + if _, _, err := dockerCmdWithError("run", "--rm", "--read-only", "busybox", "date"); err != nil { + return false + } + return true +} + +func NotUserNamespace() bool { + root := os.Getenv("DOCKER_REMAP_ROOT") + return root == "" +} + +func UserNamespaceInKernel() bool { + if _, err := os.Stat("/proc/self/uid_map"); os.IsNotExist(err) { + /* + * This kernel-provided file only exists if user namespaces are + * supported + */ + return false + } + + // We need extra check on redhat based distributions + if f, err := os.Open("/sys/module/user_namespace/parameters/enable"); err == nil { + defer f.Close() + b := make([]byte, 1) + _, _ = f.Read(b) + return string(b) != "N" + } + + return true +} + +func IsPausable() bool { + if testEnv.DaemonPlatform() == "windows" { + return testEnv.Isolation() == "hyperv" + } + return true +} + +func NotPausable() bool { + if testEnv.DaemonPlatform() == "windows" { + return testEnv.Isolation() == "process" + } + return false +} + +func IsolationIs(expectedIsolation string) bool { + return testEnv.DaemonPlatform() == "windows" && string(testEnv.Isolation()) == expectedIsolation +} + +func IsolationIsHyperv() bool { + return IsolationIs("hyperv") +} + +func IsolationIsProcess() bool { + return IsolationIs("process") +} + +// testRequires checks if the environment satisfies the requirements +// for the test to run or skips the tests. +func testRequires(c *check.C, requirements ...requirement.Test) { + requirement.Is(c, requirements...) +} diff --git a/vendor/github.com/moby/moby/integration-cli/requirements_unix_test.go b/vendor/github.com/moby/moby/integration-cli/requirements_unix_test.go new file mode 100644 index 000000000..2ed04f6e1 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/requirements_unix_test.go @@ -0,0 +1,115 @@ +// +build !windows + +package main + +import ( + "bytes" + "io/ioutil" + "os/exec" + "strings" + + "github.com/docker/docker/pkg/parsers/kernel" + "github.com/docker/docker/pkg/sysinfo" +) + +var ( + // SysInfo stores information about which features a kernel supports. + SysInfo *sysinfo.SysInfo +) + +func cpuCfsPeriod() bool { + return SysInfo.CPUCfsPeriod +} + +func cpuCfsQuota() bool { + return SysInfo.CPUCfsQuota +} + +func cpuShare() bool { + return SysInfo.CPUShares +} + +func oomControl() bool { + return SysInfo.OomKillDisable +} + +func pidsLimit() bool { + return SysInfo.PidsLimit +} + +func kernelMemorySupport() bool { + return SysInfo.KernelMemory +} + +func memoryLimitSupport() bool { + return SysInfo.MemoryLimit +} + +func memoryReservationSupport() bool { + return SysInfo.MemoryReservation +} + +func swapMemorySupport() bool { + return SysInfo.SwapLimit +} + +func memorySwappinessSupport() bool { + return SysInfo.MemorySwappiness +} + +func blkioWeight() bool { + return SysInfo.BlkioWeight +} + +func cgroupCpuset() bool { + return SysInfo.Cpuset +} + +func seccompEnabled() bool { + return supportsSeccomp && SysInfo.Seccomp +} + +func bridgeNfIptables() bool { + return !SysInfo.BridgeNFCallIPTablesDisabled +} + +func bridgeNfIP6tables() bool { + return !SysInfo.BridgeNFCallIP6TablesDisabled +} + +func unprivilegedUsernsClone() bool { + content, err := ioutil.ReadFile("/proc/sys/kernel/unprivileged_userns_clone") + return err != nil || !strings.Contains(string(content), "0") +} + +func ambientCapabilities() bool { + content, err := ioutil.ReadFile("/proc/self/status") + return err != nil || strings.Contains(string(content), "CapAmb:") +} + +func overlayFSSupported() bool { + cmd := exec.Command(dockerBinary, "run", "--rm", "busybox", "/bin/sh", "-c", "cat /proc/filesystems") + out, err := cmd.CombinedOutput() + if err != nil { + return false + } + return bytes.Contains(out, []byte("overlay\n")) +} + +func overlay2Supported() bool { + if !overlayFSSupported() { + return false + } + + daemonV, err := kernel.ParseRelease(testEnv.DaemonKernelVersion()) + if err != nil { + return false + } + requiredV := kernel.VersionInfo{Kernel: 4} + return kernel.CompareKernelVersion(*daemonV, requiredV) > -1 + +} + +func init() { + SysInfo = sysinfo.New(true) +} diff --git a/vendor/github.com/moby/moby/integration-cli/test_vars_exec_test.go b/vendor/github.com/moby/moby/integration-cli/test_vars_exec_test.go new file mode 100644 index 000000000..7633b346b --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/test_vars_exec_test.go @@ -0,0 +1,8 @@ +// +build !test_no_exec + +package main + +const ( + // indicates docker daemon tested supports 'docker exec' + supportsExec = true +) diff --git a/vendor/github.com/moby/moby/integration-cli/test_vars_noexec_test.go b/vendor/github.com/moby/moby/integration-cli/test_vars_noexec_test.go new file mode 100644 index 000000000..084509052 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/test_vars_noexec_test.go @@ -0,0 +1,8 @@ +// +build test_no_exec + +package main + +const ( + // indicates docker daemon tested supports 'docker exec' + supportsExec = false +) diff --git a/vendor/github.com/moby/moby/integration-cli/test_vars_noseccomp_test.go b/vendor/github.com/moby/moby/integration-cli/test_vars_noseccomp_test.go new file mode 100644 index 000000000..2f47ab07a --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/test_vars_noseccomp_test.go @@ -0,0 +1,8 @@ +// +build !seccomp + +package main + +const ( + // indicates docker daemon built with seccomp support + supportsSeccomp = false +) diff --git a/vendor/github.com/moby/moby/integration-cli/test_vars_seccomp_test.go b/vendor/github.com/moby/moby/integration-cli/test_vars_seccomp_test.go new file mode 100644 index 000000000..00cf69720 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/test_vars_seccomp_test.go @@ -0,0 +1,8 @@ +// +build seccomp + +package main + +const ( + // indicates docker daemon built with seccomp support + supportsSeccomp = true +) diff --git a/vendor/github.com/moby/moby/integration-cli/test_vars_test.go b/vendor/github.com/moby/moby/integration-cli/test_vars_test.go new file mode 100644 index 000000000..139279ccd --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/test_vars_test.go @@ -0,0 +1,11 @@ +package main + +// sleepCommandForDaemonPlatform is a helper function that determines what +// the command is for a sleeping container based on the daemon platform. +// The Windows busybox image does not have a `top` command. +func sleepCommandForDaemonPlatform() []string { + if testEnv.DaemonPlatform() == "windows" { + return []string{"sleep", "240"} + } + return []string{"top"} +} diff --git a/vendor/github.com/moby/moby/integration-cli/test_vars_unix_test.go b/vendor/github.com/moby/moby/integration-cli/test_vars_unix_test.go new file mode 100644 index 000000000..f9ecc0112 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/test_vars_unix_test.go @@ -0,0 +1,14 @@ +// +build !windows + +package main + +const ( + // identifies if test suite is running on a unix platform + isUnixCli = true + + expectedFileChmod = "-rw-r--r--" + + // On Unix variants, the busybox image comes with the `top` command which + // runs indefinitely while still being interruptible by a signal. + defaultSleepImage = "busybox" +) diff --git a/vendor/github.com/moby/moby/integration-cli/test_vars_windows_test.go b/vendor/github.com/moby/moby/integration-cli/test_vars_windows_test.go new file mode 100644 index 000000000..bfc9a5a91 --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/test_vars_windows_test.go @@ -0,0 +1,15 @@ +// +build windows + +package main + +const ( + // identifies if test suite is running on a unix platform + isUnixCli = false + + // this is the expected file permission set on windows: gh#11395 + expectedFileChmod = "-rwxr-xr-x" + + // On Windows, the busybox image doesn't have the `top` command, so we rely + // on `sleep` with a high duration. + defaultSleepImage = "busybox" +) diff --git a/vendor/github.com/moby/moby/integration-cli/trust_server_test.go b/vendor/github.com/moby/moby/integration-cli/trust_server_test.go new file mode 100644 index 000000000..9a999323f --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/trust_server_test.go @@ -0,0 +1,336 @@ +package main + +import ( + "context" + "fmt" + "io/ioutil" + "net" + "net/http" + "os" + "os/exec" + "path/filepath" + "strings" + "time" + + "github.com/docker/docker/api/types" + cliconfig "github.com/docker/docker/cli/config" + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/cli" + "github.com/docker/docker/integration-cli/fixtures/plugin" + "github.com/docker/docker/integration-cli/request" + icmd "github.com/docker/docker/pkg/testutil/cmd" + "github.com/docker/go-connections/tlsconfig" + "github.com/go-check/check" +) + +var notaryBinary = "notary" +var notaryServerBinary = "notary-server" + +type keyPair struct { + Public string + Private string +} + +type testNotary struct { + cmd *exec.Cmd + dir string + keys []keyPair +} + +const notaryHost = "localhost:4443" +const notaryURL = "https://" + notaryHost + +var SuccessTagging = icmd.Expected{ + Out: "Tagging", +} + +var SuccessSigningAndPushing = icmd.Expected{ + Out: "Signing and pushing trust metadata", +} + +var SuccessDownloaded = icmd.Expected{ + Out: "Status: Downloaded", +} + +var SuccessDownloadedOnStderr = icmd.Expected{ + Err: "Status: Downloaded", +} + +func newTestNotary(c *check.C) (*testNotary, error) { + // generate server config + template := `{ + "server": { + "http_addr": "%s", + "tls_key_file": "%s", + "tls_cert_file": "%s" + }, + "trust_service": { + "type": "local", + "hostname": "", + "port": "", + "key_algorithm": "ed25519" + }, + "logging": { + "level": "debug" + }, + "storage": { + "backend": "memory" + } +}` + tmp, err := ioutil.TempDir("", "notary-test-") + if err != nil { + return nil, err + } + confPath := filepath.Join(tmp, "config.json") + config, err := os.Create(confPath) + if err != nil { + return nil, err + } + defer config.Close() + + workingDir, err := os.Getwd() + if err != nil { + return nil, err + } + if _, err := fmt.Fprintf(config, template, notaryHost, filepath.Join(workingDir, "fixtures/notary/localhost.key"), filepath.Join(workingDir, "fixtures/notary/localhost.cert")); err != nil { + os.RemoveAll(tmp) + return nil, err + } + + // generate client config + clientConfPath := filepath.Join(tmp, "client-config.json") + clientConfig, err := os.Create(clientConfPath) + if err != nil { + return nil, err + } + defer clientConfig.Close() + + template = `{ + "trust_dir" : "%s", + "remote_server": { + "url": "%s", + "skipTLSVerify": true + } +}` + if _, err = fmt.Fprintf(clientConfig, template, filepath.Join(cliconfig.Dir(), "trust"), notaryURL); err != nil { + os.RemoveAll(tmp) + return nil, err + } + + // load key fixture filenames + var keys []keyPair + for i := 1; i < 5; i++ { + keys = append(keys, keyPair{ + Public: filepath.Join(workingDir, fmt.Sprintf("fixtures/notary/delgkey%v.crt", i)), + Private: filepath.Join(workingDir, fmt.Sprintf("fixtures/notary/delgkey%v.key", i)), + }) + } + + // run notary-server + cmd := exec.Command(notaryServerBinary, "-config", confPath) + if err := cmd.Start(); err != nil { + os.RemoveAll(tmp) + if os.IsNotExist(err) { + c.Skip(err.Error()) + } + return nil, err + } + + testNotary := &testNotary{ + cmd: cmd, + dir: tmp, + keys: keys, + } + + // Wait for notary to be ready to serve requests. + for i := 1; i <= 20; i++ { + if err = testNotary.Ping(); err == nil { + break + } + time.Sleep(10 * time.Millisecond * time.Duration(i*i)) + } + + if err != nil { + c.Fatalf("Timeout waiting for test notary to become available: %s", err) + } + + return testNotary, nil +} + +func (t *testNotary) Ping() error { + tlsConfig := tlsconfig.ClientDefault() + tlsConfig.InsecureSkipVerify = true + client := http.Client{ + Transport: &http.Transport{ + Proxy: http.ProxyFromEnvironment, + Dial: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + }).Dial, + TLSHandshakeTimeout: 10 * time.Second, + TLSClientConfig: tlsConfig, + }, + } + resp, err := client.Get(fmt.Sprintf("%s/v2/", notaryURL)) + if err != nil { + return err + } + if resp.StatusCode != http.StatusOK { + return fmt.Errorf("notary ping replied with an unexpected status code %d", resp.StatusCode) + } + return nil +} + +func (t *testNotary) Close() { + t.cmd.Process.Kill() + t.cmd.Process.Wait() + os.RemoveAll(t.dir) +} + +func trustedCmd(cmd *icmd.Cmd) func() { + pwd := "12345678" + cmd.Env = append(cmd.Env, trustEnv(notaryURL, pwd, pwd)...) + return nil +} + +func trustedCmdWithServer(server string) func(*icmd.Cmd) func() { + return func(cmd *icmd.Cmd) func() { + pwd := "12345678" + cmd.Env = append(cmd.Env, trustEnv(server, pwd, pwd)...) + return nil + } +} + +func trustedCmdWithPassphrases(rootPwd, repositoryPwd string) func(*icmd.Cmd) func() { + return func(cmd *icmd.Cmd) func() { + cmd.Env = append(cmd.Env, trustEnv(notaryURL, rootPwd, repositoryPwd)...) + return nil + } +} + +func trustEnv(server, rootPwd, repositoryPwd string) []string { + env := append(os.Environ(), []string{ + "DOCKER_CONTENT_TRUST=1", + fmt.Sprintf("DOCKER_CONTENT_TRUST_SERVER=%s", server), + fmt.Sprintf("DOCKER_CONTENT_TRUST_ROOT_PASSPHRASE=%s", rootPwd), + fmt.Sprintf("DOCKER_CONTENT_TRUST_REPOSITORY_PASSPHRASE=%s", repositoryPwd), + }...) + return env +} + +func (s *DockerTrustSuite) setupTrustedImage(c *check.C, name string) string { + repoName := fmt.Sprintf("%v/dockercli/%s:latest", privateRegistryURL, name) + // tag the image and upload it to the private registry + cli.DockerCmd(c, "tag", "busybox", repoName) + cli.Docker(cli.Args("push", repoName), trustedCmd).Assert(c, SuccessSigningAndPushing) + cli.DockerCmd(c, "rmi", repoName) + return repoName +} + +func (s *DockerTrustSuite) setupTrustedplugin(c *check.C, source, name string) string { + repoName := fmt.Sprintf("%v/dockercli/%s:latest", privateRegistryURL, name) + + client, err := request.NewClient() + c.Assert(err, checker.IsNil, check.Commentf("could not create test client")) + + ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second) + err = plugin.Create(ctx, client, repoName) + cancel() + c.Assert(err, checker.IsNil, check.Commentf("could not create test plugin")) + + // tag the image and upload it to the private registry + // TODO: shouldn't need to use the CLI to do trust + cli.Docker(cli.Args("plugin", "push", repoName), trustedCmd).Assert(c, SuccessSigningAndPushing) + + ctx, cancel = context.WithTimeout(context.Background(), 60*time.Second) + err = client.PluginRemove(ctx, repoName, types.PluginRemoveOptions{Force: true}) + cancel() + c.Assert(err, checker.IsNil, check.Commentf("failed to cleanup test plugin for trust suite")) + return repoName +} + +func (s *DockerTrustSuite) notaryCmd(c *check.C, args ...string) string { + pwd := "12345678" + env := []string{ + fmt.Sprintf("NOTARY_ROOT_PASSPHRASE=%s", pwd), + fmt.Sprintf("NOTARY_TARGETS_PASSPHRASE=%s", pwd), + fmt.Sprintf("NOTARY_SNAPSHOT_PASSPHRASE=%s", pwd), + fmt.Sprintf("NOTARY_DELEGATION_PASSPHRASE=%s", pwd), + } + result := icmd.RunCmd(icmd.Cmd{ + Command: append([]string{notaryBinary, "-c", filepath.Join(s.not.dir, "client-config.json")}, args...), + Env: append(os.Environ(), env...), + }) + result.Assert(c, icmd.Success) + return result.Combined() +} + +func (s *DockerTrustSuite) notaryInitRepo(c *check.C, repoName string) { + s.notaryCmd(c, "init", repoName) +} + +func (s *DockerTrustSuite) notaryCreateDelegation(c *check.C, repoName, role string, pubKey string, paths ...string) { + pathsArg := "--all-paths" + if len(paths) > 0 { + pathsArg = "--paths=" + strings.Join(paths, ",") + } + + s.notaryCmd(c, "delegation", "add", repoName, role, pubKey, pathsArg) +} + +func (s *DockerTrustSuite) notaryPublish(c *check.C, repoName string) { + s.notaryCmd(c, "publish", repoName) +} + +func (s *DockerTrustSuite) notaryImportKey(c *check.C, repoName, role string, privKey string) { + s.notaryCmd(c, "key", "import", privKey, "-g", repoName, "-r", role) +} + +func (s *DockerTrustSuite) notaryListTargetsInRole(c *check.C, repoName, role string) map[string]string { + out := s.notaryCmd(c, "list", repoName, "-r", role) + + // should look something like: + // NAME DIGEST SIZE (BYTES) ROLE + // ------------------------------------------------------------------------------------------------------ + // latest 24a36bbc059b1345b7e8be0df20f1b23caa3602e85d42fff7ecd9d0bd255de56 1377 targets + + targets := make(map[string]string) + + // no target + lines := strings.Split(strings.TrimSpace(out), "\n") + if len(lines) == 1 && strings.Contains(out, "No targets present in this repository.") { + return targets + } + + // otherwise, there is at least one target + c.Assert(len(lines), checker.GreaterOrEqualThan, 3) + + for _, line := range lines[2:] { + tokens := strings.Fields(line) + c.Assert(tokens, checker.HasLen, 4) + targets[tokens[0]] = tokens[3] + } + + return targets +} + +func (s *DockerTrustSuite) assertTargetInRoles(c *check.C, repoName, target string, roles ...string) { + // check all the roles + for _, role := range roles { + targets := s.notaryListTargetsInRole(c, repoName, role) + roleName, ok := targets[target] + c.Assert(ok, checker.True) + c.Assert(roleName, checker.Equals, role) + } +} + +func (s *DockerTrustSuite) assertTargetNotInRoles(c *check.C, repoName, target string, roles ...string) { + targets := s.notaryListTargetsInRole(c, repoName, "targets") + + roleName, ok := targets[target] + if ok { + for _, role := range roles { + c.Assert(roleName, checker.Not(checker.Equals), role) + } + } +} diff --git a/vendor/github.com/moby/moby/integration-cli/utils_test.go b/vendor/github.com/moby/moby/integration-cli/utils_test.go new file mode 100644 index 000000000..2725ddf4f --- /dev/null +++ b/vendor/github.com/moby/moby/integration-cli/utils_test.go @@ -0,0 +1,32 @@ +package main + +import ( + "os/exec" + + "github.com/docker/docker/pkg/testutil/cmd" +) + +func getPrefixAndSlashFromDaemonPlatform() (prefix, slash string) { + if testEnv.DaemonPlatform() == "windows" { + return "c:", `\` + } + return "", "/" +} + +// TODO: update code to call cmd.RunCmd directly, and remove this function +// Deprecated: use pkg/testutil/cmd instead +func runCommandWithOutput(execCmd *exec.Cmd) (string, int, error) { + result := cmd.RunCmd(transformCmd(execCmd)) + return result.Combined(), result.ExitCode, result.Error +} + +// Temporary shim for migrating commands to the new function +func transformCmd(execCmd *exec.Cmd) cmd.Cmd { + return cmd.Cmd{ + Command: execCmd.Args, + Env: execCmd.Env, + Dir: execCmd.Dir, + Stdin: execCmd.Stdin, + Stdout: execCmd.Stdout, + } +} diff --git a/vendor/github.com/moby/moby/layer/empty.go b/vendor/github.com/moby/moby/layer/empty.go new file mode 100644 index 000000000..cf04aa12f --- /dev/null +++ b/vendor/github.com/moby/moby/layer/empty.go @@ -0,0 +1,65 @@ +package layer + +import ( + "archive/tar" + "bytes" + "fmt" + "io" + "io/ioutil" +) + +// DigestSHA256EmptyTar is the canonical sha256 digest of empty tar file - +// (1024 NULL bytes) +const DigestSHA256EmptyTar = DiffID("sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef") + +type emptyLayer struct{} + +// EmptyLayer is a layer that corresponds to empty tar. +var EmptyLayer = &emptyLayer{} + +func (el *emptyLayer) TarStream() (io.ReadCloser, error) { + buf := new(bytes.Buffer) + tarWriter := tar.NewWriter(buf) + tarWriter.Close() + return ioutil.NopCloser(buf), nil +} + +func (el *emptyLayer) TarStreamFrom(p ChainID) (io.ReadCloser, error) { + if p == "" { + return el.TarStream() + } + return nil, fmt.Errorf("can't get parent tar stream of an empty layer") +} + +func (el *emptyLayer) ChainID() ChainID { + return ChainID(DigestSHA256EmptyTar) +} + +func (el *emptyLayer) DiffID() DiffID { + return DigestSHA256EmptyTar +} + +func (el *emptyLayer) Parent() Layer { + return nil +} + +func (el *emptyLayer) Size() (size int64, err error) { + return 0, nil +} + +func (el *emptyLayer) DiffSize() (size int64, err error) { + return 0, nil +} + +func (el *emptyLayer) Metadata() (map[string]string, error) { + return make(map[string]string), nil +} + +func (el *emptyLayer) Platform() Platform { + return "" +} + +// IsEmpty returns true if the layer is an EmptyLayer +func IsEmpty(diffID DiffID) bool { + return diffID == DigestSHA256EmptyTar +} diff --git a/vendor/github.com/moby/moby/layer/empty_test.go b/vendor/github.com/moby/moby/layer/empty_test.go new file mode 100644 index 000000000..5555dbd8a --- /dev/null +++ b/vendor/github.com/moby/moby/layer/empty_test.go @@ -0,0 +1,46 @@ +package layer + +import ( + "io" + "testing" + + "github.com/opencontainers/go-digest" +) + +func TestEmptyLayer(t *testing.T) { + if EmptyLayer.ChainID() != ChainID(DigestSHA256EmptyTar) { + t.Fatal("wrong ChainID for empty layer") + } + + if EmptyLayer.DiffID() != DigestSHA256EmptyTar { + t.Fatal("wrong DiffID for empty layer") + } + + if EmptyLayer.Parent() != nil { + t.Fatal("expected no parent for empty layer") + } + + if size, err := EmptyLayer.Size(); err != nil || size != 0 { + t.Fatal("expected zero size for empty layer") + } + + if diffSize, err := EmptyLayer.DiffSize(); err != nil || diffSize != 0 { + t.Fatal("expected zero diffsize for empty layer") + } + + tarStream, err := EmptyLayer.TarStream() + if err != nil { + t.Fatalf("error streaming tar for empty layer: %v", err) + } + + digester := digest.Canonical.Digester() + _, err = io.Copy(digester.Hash(), tarStream) + + if err != nil { + t.Fatalf("error hashing empty tar layer: %v", err) + } + + if digester.Digest() != digest.Digest(DigestSHA256EmptyTar) { + t.Fatal("empty layer tar stream hashes to wrong value") + } +} diff --git a/vendor/github.com/moby/moby/layer/filestore.go b/vendor/github.com/moby/moby/layer/filestore.go new file mode 100644 index 000000000..533f45481 --- /dev/null +++ b/vendor/github.com/moby/moby/layer/filestore.go @@ -0,0 +1,355 @@ +package layer + +import ( + "compress/gzip" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "regexp" + "strconv" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution" + "github.com/docker/docker/pkg/ioutils" + "github.com/opencontainers/go-digest" +) + +var ( + stringIDRegexp = regexp.MustCompile(`^[a-f0-9]{64}(-init)?$`) + supportedAlgorithms = []digest.Algorithm{ + digest.SHA256, + // digest.SHA384, // Currently not used + // digest.SHA512, // Currently not used + } +) + +type fileMetadataStore struct { + root string +} + +type fileMetadataTransaction struct { + store *fileMetadataStore + ws *ioutils.AtomicWriteSet +} + +// NewFSMetadataStore returns an instance of a metadata store +// which is backed by files on disk using the provided root +// as the root of metadata files. +func NewFSMetadataStore(root string) (MetadataStore, error) { + if err := os.MkdirAll(root, 0700); err != nil { + return nil, err + } + return &fileMetadataStore{ + root: root, + }, nil +} + +func (fms *fileMetadataStore) getLayerDirectory(layer ChainID) string { + dgst := digest.Digest(layer) + return filepath.Join(fms.root, string(dgst.Algorithm()), dgst.Hex()) +} + +func (fms *fileMetadataStore) getLayerFilename(layer ChainID, filename string) string { + return filepath.Join(fms.getLayerDirectory(layer), filename) +} + +func (fms *fileMetadataStore) getMountDirectory(mount string) string { + return filepath.Join(fms.root, "mounts", mount) +} + +func (fms *fileMetadataStore) getMountFilename(mount, filename string) string { + return filepath.Join(fms.getMountDirectory(mount), filename) +} + +func (fms *fileMetadataStore) StartTransaction() (MetadataTransaction, error) { + tmpDir := filepath.Join(fms.root, "tmp") + if err := os.MkdirAll(tmpDir, 0755); err != nil { + return nil, err + } + ws, err := ioutils.NewAtomicWriteSet(tmpDir) + if err != nil { + return nil, err + } + + return &fileMetadataTransaction{ + store: fms, + ws: ws, + }, nil +} + +func (fm *fileMetadataTransaction) SetSize(size int64) error { + content := fmt.Sprintf("%d", size) + return fm.ws.WriteFile("size", []byte(content), 0644) +} + +func (fm *fileMetadataTransaction) SetParent(parent ChainID) error { + return fm.ws.WriteFile("parent", []byte(digest.Digest(parent).String()), 0644) +} + +func (fm *fileMetadataTransaction) SetDiffID(diff DiffID) error { + return fm.ws.WriteFile("diff", []byte(digest.Digest(diff).String()), 0644) +} + +func (fm *fileMetadataTransaction) SetCacheID(cacheID string) error { + return fm.ws.WriteFile("cache-id", []byte(cacheID), 0644) +} + +func (fm *fileMetadataTransaction) SetDescriptor(ref distribution.Descriptor) error { + jsonRef, err := json.Marshal(ref) + if err != nil { + return err + } + return fm.ws.WriteFile("descriptor.json", jsonRef, 0644) +} + +func (fm *fileMetadataTransaction) TarSplitWriter(compressInput bool) (io.WriteCloser, error) { + f, err := fm.ws.FileWriter("tar-split.json.gz", os.O_TRUNC|os.O_CREATE|os.O_WRONLY, 0644) + if err != nil { + return nil, err + } + var wc io.WriteCloser + if compressInput { + wc = gzip.NewWriter(f) + } else { + wc = f + } + + return ioutils.NewWriteCloserWrapper(wc, func() error { + wc.Close() + return f.Close() + }), nil +} + +func (fm *fileMetadataTransaction) Commit(layer ChainID) error { + finalDir := fm.store.getLayerDirectory(layer) + if err := os.MkdirAll(filepath.Dir(finalDir), 0755); err != nil { + return err + } + + return fm.ws.Commit(finalDir) +} + +func (fm *fileMetadataTransaction) Cancel() error { + return fm.ws.Cancel() +} + +func (fm *fileMetadataTransaction) String() string { + return fm.ws.String() +} + +func (fms *fileMetadataStore) GetSize(layer ChainID) (int64, error) { + content, err := ioutil.ReadFile(fms.getLayerFilename(layer, "size")) + if err != nil { + return 0, err + } + + size, err := strconv.ParseInt(string(content), 10, 64) + if err != nil { + return 0, err + } + + return size, nil +} + +func (fms *fileMetadataStore) GetParent(layer ChainID) (ChainID, error) { + content, err := ioutil.ReadFile(fms.getLayerFilename(layer, "parent")) + if err != nil { + if os.IsNotExist(err) { + return "", nil + } + return "", err + } + + dgst, err := digest.Parse(strings.TrimSpace(string(content))) + if err != nil { + return "", err + } + + return ChainID(dgst), nil +} + +func (fms *fileMetadataStore) GetDiffID(layer ChainID) (DiffID, error) { + content, err := ioutil.ReadFile(fms.getLayerFilename(layer, "diff")) + if err != nil { + return "", err + } + + dgst, err := digest.Parse(strings.TrimSpace(string(content))) + if err != nil { + return "", err + } + + return DiffID(dgst), nil +} + +func (fms *fileMetadataStore) GetCacheID(layer ChainID) (string, error) { + contentBytes, err := ioutil.ReadFile(fms.getLayerFilename(layer, "cache-id")) + if err != nil { + return "", err + } + content := strings.TrimSpace(string(contentBytes)) + + if !stringIDRegexp.MatchString(content) { + return "", errors.New("invalid cache id value") + } + + return content, nil +} + +func (fms *fileMetadataStore) GetDescriptor(layer ChainID) (distribution.Descriptor, error) { + content, err := ioutil.ReadFile(fms.getLayerFilename(layer, "descriptor.json")) + if err != nil { + if os.IsNotExist(err) { + // only return empty descriptor to represent what is stored + return distribution.Descriptor{}, nil + } + return distribution.Descriptor{}, err + } + + var ref distribution.Descriptor + err = json.Unmarshal(content, &ref) + if err != nil { + return distribution.Descriptor{}, err + } + return ref, err +} + +func (fms *fileMetadataStore) TarSplitReader(layer ChainID) (io.ReadCloser, error) { + fz, err := os.Open(fms.getLayerFilename(layer, "tar-split.json.gz")) + if err != nil { + return nil, err + } + f, err := gzip.NewReader(fz) + if err != nil { + fz.Close() + return nil, err + } + + return ioutils.NewReadCloserWrapper(f, func() error { + f.Close() + return fz.Close() + }), nil +} + +func (fms *fileMetadataStore) SetMountID(mount string, mountID string) error { + if err := os.MkdirAll(fms.getMountDirectory(mount), 0755); err != nil { + return err + } + return ioutil.WriteFile(fms.getMountFilename(mount, "mount-id"), []byte(mountID), 0644) +} + +func (fms *fileMetadataStore) SetInitID(mount string, init string) error { + if err := os.MkdirAll(fms.getMountDirectory(mount), 0755); err != nil { + return err + } + return ioutil.WriteFile(fms.getMountFilename(mount, "init-id"), []byte(init), 0644) +} + +func (fms *fileMetadataStore) SetMountParent(mount string, parent ChainID) error { + if err := os.MkdirAll(fms.getMountDirectory(mount), 0755); err != nil { + return err + } + return ioutil.WriteFile(fms.getMountFilename(mount, "parent"), []byte(digest.Digest(parent).String()), 0644) +} + +func (fms *fileMetadataStore) GetMountID(mount string) (string, error) { + contentBytes, err := ioutil.ReadFile(fms.getMountFilename(mount, "mount-id")) + if err != nil { + return "", err + } + content := strings.TrimSpace(string(contentBytes)) + + if !stringIDRegexp.MatchString(content) { + return "", errors.New("invalid mount id value") + } + + return content, nil +} + +func (fms *fileMetadataStore) GetInitID(mount string) (string, error) { + contentBytes, err := ioutil.ReadFile(fms.getMountFilename(mount, "init-id")) + if err != nil { + if os.IsNotExist(err) { + return "", nil + } + return "", err + } + content := strings.TrimSpace(string(contentBytes)) + + if !stringIDRegexp.MatchString(content) { + return "", errors.New("invalid init id value") + } + + return content, nil +} + +func (fms *fileMetadataStore) GetMountParent(mount string) (ChainID, error) { + content, err := ioutil.ReadFile(fms.getMountFilename(mount, "parent")) + if err != nil { + if os.IsNotExist(err) { + return "", nil + } + return "", err + } + + dgst, err := digest.Parse(strings.TrimSpace(string(content))) + if err != nil { + return "", err + } + + return ChainID(dgst), nil +} + +func (fms *fileMetadataStore) List() ([]ChainID, []string, error) { + var ids []ChainID + for _, algorithm := range supportedAlgorithms { + fileInfos, err := ioutil.ReadDir(filepath.Join(fms.root, string(algorithm))) + if err != nil { + if os.IsNotExist(err) { + continue + } + return nil, nil, err + } + + for _, fi := range fileInfos { + if fi.IsDir() && fi.Name() != "mounts" { + dgst := digest.NewDigestFromHex(string(algorithm), fi.Name()) + if err := dgst.Validate(); err != nil { + logrus.Debugf("Ignoring invalid digest %s:%s", algorithm, fi.Name()) + } else { + ids = append(ids, ChainID(dgst)) + } + } + } + } + + fileInfos, err := ioutil.ReadDir(filepath.Join(fms.root, "mounts")) + if err != nil { + if os.IsNotExist(err) { + return ids, []string{}, nil + } + return nil, nil, err + } + + var mounts []string + for _, fi := range fileInfos { + if fi.IsDir() { + mounts = append(mounts, fi.Name()) + } + } + + return ids, mounts, nil +} + +func (fms *fileMetadataStore) Remove(layer ChainID) error { + return os.RemoveAll(fms.getLayerDirectory(layer)) +} + +func (fms *fileMetadataStore) RemoveMount(mount string) error { + return os.RemoveAll(fms.getMountDirectory(mount)) +} diff --git a/vendor/github.com/moby/moby/layer/filestore_test.go b/vendor/github.com/moby/moby/layer/filestore_test.go new file mode 100644 index 000000000..2126a20b8 --- /dev/null +++ b/vendor/github.com/moby/moby/layer/filestore_test.go @@ -0,0 +1,104 @@ +package layer + +import ( + "fmt" + "io/ioutil" + "math/rand" + "os" + "path/filepath" + "strings" + "syscall" + "testing" + + "github.com/opencontainers/go-digest" +) + +func randomLayerID(seed int64) ChainID { + r := rand.New(rand.NewSource(seed)) + + return ChainID(digest.FromBytes([]byte(fmt.Sprintf("%d", r.Int63())))) +} + +func newFileMetadataStore(t *testing.T) (*fileMetadataStore, string, func()) { + td, err := ioutil.TempDir("", "layers-") + if err != nil { + t.Fatal(err) + } + fms, err := NewFSMetadataStore(td) + if err != nil { + t.Fatal(err) + } + + return fms.(*fileMetadataStore), td, func() { + if err := os.RemoveAll(td); err != nil { + t.Logf("Failed to cleanup %q: %s", td, err) + } + } +} + +func assertNotDirectoryError(t *testing.T, err error) { + perr, ok := err.(*os.PathError) + if !ok { + t.Fatalf("Unexpected error %#v, expected path error", err) + } + + if perr.Err != syscall.ENOTDIR { + t.Fatalf("Unexpected error %s, expected %s", perr.Err, syscall.ENOTDIR) + } +} + +func TestCommitFailure(t *testing.T) { + fms, td, cleanup := newFileMetadataStore(t) + defer cleanup() + + if err := ioutil.WriteFile(filepath.Join(td, "sha256"), []byte("was here first!"), 0644); err != nil { + t.Fatal(err) + } + + tx, err := fms.StartTransaction() + if err != nil { + t.Fatal(err) + } + + if err := tx.SetSize(0); err != nil { + t.Fatal(err) + } + + err = tx.Commit(randomLayerID(5)) + if err == nil { + t.Fatalf("Expected error committing with invalid layer parent directory") + } + assertNotDirectoryError(t, err) +} + +func TestStartTransactionFailure(t *testing.T) { + fms, td, cleanup := newFileMetadataStore(t) + defer cleanup() + + if err := ioutil.WriteFile(filepath.Join(td, "tmp"), []byte("was here first!"), 0644); err != nil { + t.Fatal(err) + } + + _, err := fms.StartTransaction() + if err == nil { + t.Fatalf("Expected error starting transaction with invalid layer parent directory") + } + assertNotDirectoryError(t, err) + + if err := os.Remove(filepath.Join(td, "tmp")); err != nil { + t.Fatal(err) + } + + tx, err := fms.StartTransaction() + if err != nil { + t.Fatal(err) + } + + if expected := filepath.Join(td, "tmp"); strings.HasPrefix(expected, tx.String()) { + t.Fatalf("Unexpected transaction string %q, expected prefix %q", tx.String(), expected) + } + + if err := tx.Cancel(); err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/moby/moby/layer/filestore_unix.go b/vendor/github.com/moby/moby/layer/filestore_unix.go new file mode 100644 index 000000000..fe8a4f8b2 --- /dev/null +++ b/vendor/github.com/moby/moby/layer/filestore_unix.go @@ -0,0 +1,13 @@ +// +build !windows + +package layer + +// SetPlatform writes the "platform" file to the layer filestore +func (fm *fileMetadataTransaction) SetPlatform(platform Platform) error { + return nil +} + +// GetPlatform reads the "platform" file from the layer filestore +func (fms *fileMetadataStore) GetPlatform(layer ChainID) (Platform, error) { + return "", nil +} diff --git a/vendor/github.com/moby/moby/layer/filestore_windows.go b/vendor/github.com/moby/moby/layer/filestore_windows.go new file mode 100644 index 000000000..066456d8d --- /dev/null +++ b/vendor/github.com/moby/moby/layer/filestore_windows.go @@ -0,0 +1,35 @@ +package layer + +import ( + "fmt" + "io/ioutil" + "os" + "strings" +) + +// SetPlatform writes the "platform" file to the layer filestore +func (fm *fileMetadataTransaction) SetPlatform(platform Platform) error { + if platform == "" { + return nil + } + return fm.ws.WriteFile("platform", []byte(platform), 0644) +} + +// GetPlatform reads the "platform" file from the layer filestore +func (fms *fileMetadataStore) GetPlatform(layer ChainID) (Platform, error) { + contentBytes, err := ioutil.ReadFile(fms.getLayerFilename(layer, "platform")) + if err != nil { + // For backwards compatibility, the platform file may not exist. Default to "windows" if missing. + if os.IsNotExist(err) { + return "windows", nil + } + return "", err + } + content := strings.TrimSpace(string(contentBytes)) + + if content != "windows" && content != "linux" { + return "", fmt.Errorf("invalid platform value: %s", content) + } + + return Platform(content), nil +} diff --git a/vendor/github.com/moby/moby/layer/layer.go b/vendor/github.com/moby/moby/layer/layer.go new file mode 100644 index 000000000..b3480a0cc --- /dev/null +++ b/vendor/github.com/moby/moby/layer/layer.go @@ -0,0 +1,295 @@ +// Package layer is package for managing read-only +// and read-write mounts on the union file system +// driver. Read-only mounts are referenced using a +// content hash and are protected from mutation in +// the exposed interface. The tar format is used +// to create read-only layers and export both +// read-only and writable layers. The exported +// tar data for a read-only layer should match +// the tar used to create the layer. +package layer + +import ( + "errors" + "io" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution" + "github.com/docker/docker/pkg/archive" + "github.com/opencontainers/go-digest" +) + +var ( + // ErrLayerDoesNotExist is used when an operation is + // attempted on a layer which does not exist. + ErrLayerDoesNotExist = errors.New("layer does not exist") + + // ErrLayerNotRetained is used when a release is + // attempted on a layer which is not retained. + ErrLayerNotRetained = errors.New("layer not retained") + + // ErrMountDoesNotExist is used when an operation is + // attempted on a mount layer which does not exist. + ErrMountDoesNotExist = errors.New("mount does not exist") + + // ErrMountNameConflict is used when a mount is attempted + // to be created but there is already a mount with the name + // used for creation. + ErrMountNameConflict = errors.New("mount already exists with name") + + // ErrActiveMount is used when an operation on a + // mount is attempted but the layer is still + // mounted and the operation cannot be performed. + ErrActiveMount = errors.New("mount still active") + + // ErrNotMounted is used when requesting an active + // mount but the layer is not mounted. + ErrNotMounted = errors.New("not mounted") + + // ErrMaxDepthExceeded is used when a layer is attempted + // to be created which would result in a layer depth + // greater than the 125 max. + ErrMaxDepthExceeded = errors.New("max depth exceeded") + + // ErrNotSupported is used when the action is not supported + // on the current platform + ErrNotSupported = errors.New("not support on this platform") +) + +// ChainID is the content-addressable ID of a layer. +type ChainID digest.Digest + +// String returns a string rendition of a layer ID +func (id ChainID) String() string { + return string(id) +} + +// Platform is the platform of a layer +type Platform string + +// String returns a string rendition of layers target platform +func (id Platform) String() string { + return string(id) +} + +// DiffID is the hash of an individual layer tar. +type DiffID digest.Digest + +// String returns a string rendition of a layer DiffID +func (diffID DiffID) String() string { + return string(diffID) +} + +// TarStreamer represents an object which may +// have its contents exported as a tar stream. +type TarStreamer interface { + // TarStream returns a tar archive stream + // for the contents of a layer. + TarStream() (io.ReadCloser, error) +} + +// Layer represents a read-only layer +type Layer interface { + TarStreamer + + // TarStreamFrom returns a tar archive stream for all the layer chain with + // arbitrary depth. + TarStreamFrom(ChainID) (io.ReadCloser, error) + + // ChainID returns the content hash of the entire layer chain. The hash + // chain is made up of DiffID of top layer and all of its parents. + ChainID() ChainID + + // DiffID returns the content hash of the layer + // tar stream used to create this layer. + DiffID() DiffID + + // Parent returns the next layer in the layer chain. + Parent() Layer + + // Platform returns the platform of the layer + Platform() Platform + + // Size returns the size of the entire layer chain. The size + // is calculated from the total size of all files in the layers. + Size() (int64, error) + + // DiffSize returns the size difference of the top layer + // from parent layer. + DiffSize() (int64, error) + + // Metadata returns the low level storage metadata associated + // with layer. + Metadata() (map[string]string, error) +} + +// RWLayer represents a layer which is +// read and writable +type RWLayer interface { + TarStreamer + + // Name of mounted layer + Name() string + + // Parent returns the layer which the writable + // layer was created from. + Parent() Layer + + // Mount mounts the RWLayer and returns the filesystem path + // the to the writable layer. + Mount(mountLabel string) (string, error) + + // Unmount unmounts the RWLayer. This should be called + // for every mount. If there are multiple mount calls + // this operation will only decrement the internal mount counter. + Unmount() error + + // Size represents the size of the writable layer + // as calculated by the total size of the files + // changed in the mutable layer. + Size() (int64, error) + + // Changes returns the set of changes for the mutable layer + // from the base layer. + Changes() ([]archive.Change, error) + + // Metadata returns the low level metadata for the mutable layer + Metadata() (map[string]string, error) +} + +// Metadata holds information about a +// read-only layer +type Metadata struct { + // ChainID is the content hash of the layer + ChainID ChainID + + // DiffID is the hash of the tar data used to + // create the layer + DiffID DiffID + + // Size is the size of the layer and all parents + Size int64 + + // DiffSize is the size of the top layer + DiffSize int64 +} + +// MountInit is a function to initialize a +// writable mount. Changes made here will +// not be included in the Tar stream of the +// RWLayer. +type MountInit func(root string) error + +// CreateRWLayerOpts contains optional arguments to be passed to CreateRWLayer +type CreateRWLayerOpts struct { + MountLabel string + InitFunc MountInit + StorageOpt map[string]string +} + +// Store represents a backend for managing both +// read-only and read-write layers. +type Store interface { + Register(io.Reader, ChainID, Platform) (Layer, error) + Get(ChainID) (Layer, error) + Map() map[ChainID]Layer + Release(Layer) ([]Metadata, error) + + CreateRWLayer(id string, parent ChainID, opts *CreateRWLayerOpts) (RWLayer, error) + GetRWLayer(id string) (RWLayer, error) + GetMountID(id string) (string, error) + ReleaseRWLayer(RWLayer) ([]Metadata, error) + + Cleanup() error + DriverStatus() [][2]string + DriverName() string +} + +// DescribableStore represents a layer store capable of storing +// descriptors for layers. +type DescribableStore interface { + RegisterWithDescriptor(io.Reader, ChainID, Platform, distribution.Descriptor) (Layer, error) +} + +// MetadataTransaction represents functions for setting layer metadata +// with a single transaction. +type MetadataTransaction interface { + SetSize(int64) error + SetParent(parent ChainID) error + SetDiffID(DiffID) error + SetCacheID(string) error + SetDescriptor(distribution.Descriptor) error + SetPlatform(Platform) error + TarSplitWriter(compressInput bool) (io.WriteCloser, error) + + Commit(ChainID) error + Cancel() error + String() string +} + +// MetadataStore represents a backend for persisting +// metadata about layers and providing the metadata +// for restoring a Store. +type MetadataStore interface { + // StartTransaction starts an update for new metadata + // which will be used to represent an ID on commit. + StartTransaction() (MetadataTransaction, error) + + GetSize(ChainID) (int64, error) + GetParent(ChainID) (ChainID, error) + GetDiffID(ChainID) (DiffID, error) + GetCacheID(ChainID) (string, error) + GetDescriptor(ChainID) (distribution.Descriptor, error) + GetPlatform(ChainID) (Platform, error) + TarSplitReader(ChainID) (io.ReadCloser, error) + + SetMountID(string, string) error + SetInitID(string, string) error + SetMountParent(string, ChainID) error + + GetMountID(string) (string, error) + GetInitID(string) (string, error) + GetMountParent(string) (ChainID, error) + + // List returns the full list of referenced + // read-only and read-write layers + List() ([]ChainID, []string, error) + + Remove(ChainID) error + RemoveMount(string) error +} + +// CreateChainID returns ID for a layerDigest slice +func CreateChainID(dgsts []DiffID) ChainID { + return createChainIDFromParent("", dgsts...) +} + +func createChainIDFromParent(parent ChainID, dgsts ...DiffID) ChainID { + if len(dgsts) == 0 { + return parent + } + if parent == "" { + return createChainIDFromParent(ChainID(dgsts[0]), dgsts[1:]...) + } + // H = "H(n-1) SHA256(n)" + dgst := digest.FromBytes([]byte(string(parent) + " " + string(dgsts[0]))) + return createChainIDFromParent(ChainID(dgst), dgsts[1:]...) +} + +// ReleaseAndLog releases the provided layer from the given layer +// store, logging any error and release metadata +func ReleaseAndLog(ls Store, l Layer) { + metadata, err := ls.Release(l) + if err != nil { + logrus.Errorf("Error releasing layer %s: %v", l.ChainID(), err) + } + LogReleaseMetadata(metadata) +} + +// LogReleaseMetadata logs a metadata array, uses this to +// ensure consistent logging for release metadata +func LogReleaseMetadata(metadatas []Metadata) { + for _, metadata := range metadatas { + logrus.Infof("Layer %s cleaned up", metadata.ChainID) + } +} diff --git a/vendor/github.com/moby/moby/layer/layer_store.go b/vendor/github.com/moby/moby/layer/layer_store.go new file mode 100644 index 000000000..75ac1e4f4 --- /dev/null +++ b/vendor/github.com/moby/moby/layer/layer_store.go @@ -0,0 +1,753 @@ +package layer + +import ( + "errors" + "fmt" + "io" + "io/ioutil" + "strings" + "sync" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution" + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/plugingetter" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/pkg/system" + "github.com/opencontainers/go-digest" + "github.com/vbatts/tar-split/tar/asm" + "github.com/vbatts/tar-split/tar/storage" +) + +// maxLayerDepth represents the maximum number of +// layers which can be chained together. 125 was +// chosen to account for the 127 max in some +// graphdrivers plus the 2 additional layers +// used to create a rwlayer. +const maxLayerDepth = 125 + +type layerStore struct { + store MetadataStore + driver graphdriver.Driver + + layerMap map[ChainID]*roLayer + layerL sync.Mutex + + mounts map[string]*mountedLayer + mountL sync.Mutex + + useTarSplit bool + + platform string +} + +// StoreOptions are the options used to create a new Store instance +type StoreOptions struct { + StorePath string + MetadataStorePathTemplate string + GraphDriver string + GraphDriverOptions []string + IDMappings *idtools.IDMappings + PluginGetter plugingetter.PluginGetter + ExperimentalEnabled bool + Platform string +} + +// NewStoreFromOptions creates a new Store instance +func NewStoreFromOptions(options StoreOptions) (Store, error) { + driver, err := graphdriver.New(options.GraphDriver, options.PluginGetter, graphdriver.Options{ + Root: options.StorePath, + DriverOptions: options.GraphDriverOptions, + UIDMaps: options.IDMappings.UIDs(), + GIDMaps: options.IDMappings.GIDs(), + ExperimentalEnabled: options.ExperimentalEnabled, + }) + if err != nil { + return nil, fmt.Errorf("error initializing graphdriver: %v", err) + } + logrus.Debugf("Using graph driver %s", driver) + + fms, err := NewFSMetadataStore(fmt.Sprintf(options.MetadataStorePathTemplate, driver)) + if err != nil { + return nil, err + } + + return NewStoreFromGraphDriver(fms, driver, options.Platform) +} + +// NewStoreFromGraphDriver creates a new Store instance using the provided +// metadata store and graph driver. The metadata store will be used to restore +// the Store. +func NewStoreFromGraphDriver(store MetadataStore, driver graphdriver.Driver, platform string) (Store, error) { + caps := graphdriver.Capabilities{} + if capDriver, ok := driver.(graphdriver.CapabilityDriver); ok { + caps = capDriver.Capabilities() + } + + ls := &layerStore{ + store: store, + driver: driver, + layerMap: map[ChainID]*roLayer{}, + mounts: map[string]*mountedLayer{}, + useTarSplit: !caps.ReproducesExactDiffs, + platform: platform, + } + + ids, mounts, err := store.List() + if err != nil { + return nil, err + } + + for _, id := range ids { + l, err := ls.loadLayer(id) + if err != nil { + logrus.Debugf("Failed to load layer %s: %s", id, err) + continue + } + if l.parent != nil { + l.parent.referenceCount++ + } + } + + for _, mount := range mounts { + if err := ls.loadMount(mount); err != nil { + logrus.Debugf("Failed to load mount %s: %s", mount, err) + } + } + + return ls, nil +} + +func (ls *layerStore) loadLayer(layer ChainID) (*roLayer, error) { + cl, ok := ls.layerMap[layer] + if ok { + return cl, nil + } + + diff, err := ls.store.GetDiffID(layer) + if err != nil { + return nil, fmt.Errorf("failed to get diff id for %s: %s", layer, err) + } + + size, err := ls.store.GetSize(layer) + if err != nil { + return nil, fmt.Errorf("failed to get size for %s: %s", layer, err) + } + + cacheID, err := ls.store.GetCacheID(layer) + if err != nil { + return nil, fmt.Errorf("failed to get cache id for %s: %s", layer, err) + } + + parent, err := ls.store.GetParent(layer) + if err != nil { + return nil, fmt.Errorf("failed to get parent for %s: %s", layer, err) + } + + descriptor, err := ls.store.GetDescriptor(layer) + if err != nil { + return nil, fmt.Errorf("failed to get descriptor for %s: %s", layer, err) + } + + platform, err := ls.store.GetPlatform(layer) + if err != nil { + return nil, fmt.Errorf("failed to get platform for %s: %s", layer, err) + } + + cl = &roLayer{ + chainID: layer, + diffID: diff, + size: size, + cacheID: cacheID, + layerStore: ls, + references: map[Layer]struct{}{}, + descriptor: descriptor, + platform: platform, + } + + if parent != "" { + p, err := ls.loadLayer(parent) + if err != nil { + return nil, err + } + cl.parent = p + } + + ls.layerMap[cl.chainID] = cl + + return cl, nil +} + +func (ls *layerStore) loadMount(mount string) error { + if _, ok := ls.mounts[mount]; ok { + return nil + } + + mountID, err := ls.store.GetMountID(mount) + if err != nil { + return err + } + + initID, err := ls.store.GetInitID(mount) + if err != nil { + return err + } + + parent, err := ls.store.GetMountParent(mount) + if err != nil { + return err + } + + ml := &mountedLayer{ + name: mount, + mountID: mountID, + initID: initID, + layerStore: ls, + references: map[RWLayer]*referencedRWLayer{}, + } + + if parent != "" { + p, err := ls.loadLayer(parent) + if err != nil { + return err + } + ml.parent = p + + p.referenceCount++ + } + + ls.mounts[ml.name] = ml + + return nil +} + +func (ls *layerStore) applyTar(tx MetadataTransaction, ts io.Reader, parent string, layer *roLayer) error { + digester := digest.Canonical.Digester() + tr := io.TeeReader(ts, digester.Hash()) + + rdr := tr + if ls.useTarSplit { + tsw, err := tx.TarSplitWriter(true) + if err != nil { + return err + } + metaPacker := storage.NewJSONPacker(tsw) + defer tsw.Close() + + // we're passing nil here for the file putter, because the ApplyDiff will + // handle the extraction of the archive + rdr, err = asm.NewInputTarStream(tr, metaPacker, nil) + if err != nil { + return err + } + } + + applySize, err := ls.driver.ApplyDiff(layer.cacheID, parent, rdr) + if err != nil { + return err + } + + // Discard trailing data but ensure metadata is picked up to reconstruct stream + io.Copy(ioutil.Discard, rdr) // ignore error as reader may be closed + + layer.size = applySize + layer.diffID = DiffID(digester.Digest()) + + logrus.Debugf("Applied tar %s to %s, size: %d", layer.diffID, layer.cacheID, applySize) + + return nil +} + +func (ls *layerStore) Register(ts io.Reader, parent ChainID, platform Platform) (Layer, error) { + return ls.registerWithDescriptor(ts, parent, platform, distribution.Descriptor{}) +} + +func (ls *layerStore) registerWithDescriptor(ts io.Reader, parent ChainID, platform Platform, descriptor distribution.Descriptor) (Layer, error) { + // err is used to hold the error which will always trigger + // cleanup of creates sources but may not be an error returned + // to the caller (already exists). + var err error + var pid string + var p *roLayer + + // Integrity check - ensure we are creating something for the correct platform + if system.LCOWSupported() { + if strings.ToLower(ls.platform) != strings.ToLower(string(platform)) { + return nil, fmt.Errorf("cannot create entry for platform %q in layer store for platform %q", platform, ls.platform) + } + } + + if string(parent) != "" { + p = ls.get(parent) + if p == nil { + return nil, ErrLayerDoesNotExist + } + pid = p.cacheID + // Release parent chain if error + defer func() { + if err != nil { + ls.layerL.Lock() + ls.releaseLayer(p) + ls.layerL.Unlock() + } + }() + if p.depth() >= maxLayerDepth { + err = ErrMaxDepthExceeded + return nil, err + } + } + + // Create new roLayer + layer := &roLayer{ + parent: p, + cacheID: stringid.GenerateRandomID(), + referenceCount: 1, + layerStore: ls, + references: map[Layer]struct{}{}, + descriptor: descriptor, + platform: platform, + } + + if err = ls.driver.Create(layer.cacheID, pid, nil); err != nil { + return nil, err + } + + tx, err := ls.store.StartTransaction() + if err != nil { + return nil, err + } + + defer func() { + if err != nil { + logrus.Debugf("Cleaning up layer %s: %v", layer.cacheID, err) + if err := ls.driver.Remove(layer.cacheID); err != nil { + logrus.Errorf("Error cleaning up cache layer %s: %v", layer.cacheID, err) + } + if err := tx.Cancel(); err != nil { + logrus.Errorf("Error canceling metadata transaction %q: %s", tx.String(), err) + } + } + }() + + if err = ls.applyTar(tx, ts, pid, layer); err != nil { + return nil, err + } + + if layer.parent == nil { + layer.chainID = ChainID(layer.diffID) + } else { + layer.chainID = createChainIDFromParent(layer.parent.chainID, layer.diffID) + } + + if err = storeLayer(tx, layer); err != nil { + return nil, err + } + + ls.layerL.Lock() + defer ls.layerL.Unlock() + + if existingLayer := ls.getWithoutLock(layer.chainID); existingLayer != nil { + // Set error for cleanup, but do not return the error + err = errors.New("layer already exists") + return existingLayer.getReference(), nil + } + + if err = tx.Commit(layer.chainID); err != nil { + return nil, err + } + + ls.layerMap[layer.chainID] = layer + + return layer.getReference(), nil +} + +func (ls *layerStore) getWithoutLock(layer ChainID) *roLayer { + l, ok := ls.layerMap[layer] + if !ok { + return nil + } + + l.referenceCount++ + + return l +} + +func (ls *layerStore) get(l ChainID) *roLayer { + ls.layerL.Lock() + defer ls.layerL.Unlock() + return ls.getWithoutLock(l) +} + +func (ls *layerStore) Get(l ChainID) (Layer, error) { + ls.layerL.Lock() + defer ls.layerL.Unlock() + + layer := ls.getWithoutLock(l) + if layer == nil { + return nil, ErrLayerDoesNotExist + } + + return layer.getReference(), nil +} + +func (ls *layerStore) Map() map[ChainID]Layer { + ls.layerL.Lock() + defer ls.layerL.Unlock() + + layers := map[ChainID]Layer{} + + for k, v := range ls.layerMap { + layers[k] = v + } + + return layers +} + +func (ls *layerStore) deleteLayer(layer *roLayer, metadata *Metadata) error { + err := ls.driver.Remove(layer.cacheID) + if err != nil { + return err + } + err = ls.store.Remove(layer.chainID) + if err != nil { + return err + } + metadata.DiffID = layer.diffID + metadata.ChainID = layer.chainID + metadata.Size, err = layer.Size() + if err != nil { + return err + } + metadata.DiffSize = layer.size + + return nil +} + +func (ls *layerStore) releaseLayer(l *roLayer) ([]Metadata, error) { + depth := 0 + removed := []Metadata{} + for { + if l.referenceCount == 0 { + panic("layer not retained") + } + l.referenceCount-- + if l.referenceCount != 0 { + return removed, nil + } + + if len(removed) == 0 && depth > 0 { + panic("cannot remove layer with child") + } + if l.hasReferences() { + panic("cannot delete referenced layer") + } + var metadata Metadata + if err := ls.deleteLayer(l, &metadata); err != nil { + return nil, err + } + + delete(ls.layerMap, l.chainID) + removed = append(removed, metadata) + + if l.parent == nil { + return removed, nil + } + + depth++ + l = l.parent + } +} + +func (ls *layerStore) Release(l Layer) ([]Metadata, error) { + ls.layerL.Lock() + defer ls.layerL.Unlock() + layer, ok := ls.layerMap[l.ChainID()] + if !ok { + return []Metadata{}, nil + } + if !layer.hasReference(l) { + return nil, ErrLayerNotRetained + } + + layer.deleteReference(l) + + return ls.releaseLayer(layer) +} + +func (ls *layerStore) CreateRWLayer(name string, parent ChainID, opts *CreateRWLayerOpts) (RWLayer, error) { + var ( + storageOpt map[string]string + initFunc MountInit + mountLabel string + ) + + if opts != nil { + mountLabel = opts.MountLabel + storageOpt = opts.StorageOpt + initFunc = opts.InitFunc + } + + ls.mountL.Lock() + defer ls.mountL.Unlock() + m, ok := ls.mounts[name] + if ok { + return nil, ErrMountNameConflict + } + + var err error + var pid string + var p *roLayer + if string(parent) != "" { + p = ls.get(parent) + if p == nil { + return nil, ErrLayerDoesNotExist + } + pid = p.cacheID + + // Release parent chain if error + defer func() { + if err != nil { + ls.layerL.Lock() + ls.releaseLayer(p) + ls.layerL.Unlock() + } + }() + } + + m = &mountedLayer{ + name: name, + parent: p, + mountID: ls.mountID(name), + layerStore: ls, + references: map[RWLayer]*referencedRWLayer{}, + } + + if initFunc != nil { + pid, err = ls.initMount(m.mountID, pid, mountLabel, initFunc, storageOpt) + if err != nil { + return nil, err + } + m.initID = pid + } + + createOpts := &graphdriver.CreateOpts{ + StorageOpt: storageOpt, + } + + if err = ls.driver.CreateReadWrite(m.mountID, pid, createOpts); err != nil { + return nil, err + } + if err = ls.saveMount(m); err != nil { + return nil, err + } + + return m.getReference(), nil +} + +func (ls *layerStore) GetRWLayer(id string) (RWLayer, error) { + ls.mountL.Lock() + defer ls.mountL.Unlock() + mount, ok := ls.mounts[id] + if !ok { + return nil, ErrMountDoesNotExist + } + + return mount.getReference(), nil +} + +func (ls *layerStore) GetMountID(id string) (string, error) { + ls.mountL.Lock() + defer ls.mountL.Unlock() + mount, ok := ls.mounts[id] + if !ok { + return "", ErrMountDoesNotExist + } + logrus.Debugf("GetMountID id: %s -> mountID: %s", id, mount.mountID) + + return mount.mountID, nil +} + +func (ls *layerStore) ReleaseRWLayer(l RWLayer) ([]Metadata, error) { + ls.mountL.Lock() + defer ls.mountL.Unlock() + m, ok := ls.mounts[l.Name()] + if !ok { + return []Metadata{}, nil + } + + if err := m.deleteReference(l); err != nil { + return nil, err + } + + if m.hasReferences() { + return []Metadata{}, nil + } + + if err := ls.driver.Remove(m.mountID); err != nil { + logrus.Errorf("Error removing mounted layer %s: %s", m.name, err) + m.retakeReference(l) + return nil, err + } + + if m.initID != "" { + if err := ls.driver.Remove(m.initID); err != nil { + logrus.Errorf("Error removing init layer %s: %s", m.name, err) + m.retakeReference(l) + return nil, err + } + } + + if err := ls.store.RemoveMount(m.name); err != nil { + logrus.Errorf("Error removing mount metadata: %s: %s", m.name, err) + m.retakeReference(l) + return nil, err + } + + delete(ls.mounts, m.Name()) + + ls.layerL.Lock() + defer ls.layerL.Unlock() + if m.parent != nil { + return ls.releaseLayer(m.parent) + } + + return []Metadata{}, nil +} + +func (ls *layerStore) saveMount(mount *mountedLayer) error { + if err := ls.store.SetMountID(mount.name, mount.mountID); err != nil { + return err + } + + if mount.initID != "" { + if err := ls.store.SetInitID(mount.name, mount.initID); err != nil { + return err + } + } + + if mount.parent != nil { + if err := ls.store.SetMountParent(mount.name, mount.parent.chainID); err != nil { + return err + } + } + + ls.mounts[mount.name] = mount + + return nil +} + +func (ls *layerStore) initMount(graphID, parent, mountLabel string, initFunc MountInit, storageOpt map[string]string) (string, error) { + // Use "-init" to maintain compatibility with graph drivers + // which are expecting this layer with this special name. If all + // graph drivers can be updated to not rely on knowing about this layer + // then the initID should be randomly generated. + initID := fmt.Sprintf("%s-init", graphID) + + createOpts := &graphdriver.CreateOpts{ + MountLabel: mountLabel, + StorageOpt: storageOpt, + } + + if err := ls.driver.CreateReadWrite(initID, parent, createOpts); err != nil { + return "", err + } + p, err := ls.driver.Get(initID, "") + if err != nil { + return "", err + } + + if err := initFunc(p); err != nil { + ls.driver.Put(initID) + return "", err + } + + if err := ls.driver.Put(initID); err != nil { + return "", err + } + + return initID, nil +} + +func (ls *layerStore) getTarStream(rl *roLayer) (io.ReadCloser, error) { + if !ls.useTarSplit { + var parentCacheID string + if rl.parent != nil { + parentCacheID = rl.parent.cacheID + } + + return ls.driver.Diff(rl.cacheID, parentCacheID) + } + + r, err := ls.store.TarSplitReader(rl.chainID) + if err != nil { + return nil, err + } + + pr, pw := io.Pipe() + go func() { + err := ls.assembleTarTo(rl.cacheID, r, nil, pw) + if err != nil { + pw.CloseWithError(err) + } else { + pw.Close() + } + }() + + return pr, nil +} + +func (ls *layerStore) assembleTarTo(graphID string, metadata io.ReadCloser, size *int64, w io.Writer) error { + diffDriver, ok := ls.driver.(graphdriver.DiffGetterDriver) + if !ok { + diffDriver = &naiveDiffPathDriver{ls.driver} + } + + defer metadata.Close() + + // get our relative path to the container + fileGetCloser, err := diffDriver.DiffGetter(graphID) + if err != nil { + return err + } + defer fileGetCloser.Close() + + metaUnpacker := storage.NewJSONUnpacker(metadata) + upackerCounter := &unpackSizeCounter{metaUnpacker, size} + logrus.Debugf("Assembling tar data for %s", graphID) + return asm.WriteOutputTarStream(fileGetCloser, upackerCounter, w) +} + +func (ls *layerStore) Cleanup() error { + return ls.driver.Cleanup() +} + +func (ls *layerStore) DriverStatus() [][2]string { + return ls.driver.Status() +} + +func (ls *layerStore) DriverName() string { + return ls.driver.String() +} + +type naiveDiffPathDriver struct { + graphdriver.Driver +} + +type fileGetPutter struct { + storage.FileGetter + driver graphdriver.Driver + id string +} + +func (w *fileGetPutter) Close() error { + return w.driver.Put(w.id) +} + +func (n *naiveDiffPathDriver) DiffGetter(id string) (graphdriver.FileGetCloser, error) { + p, err := n.Driver.Get(id, "") + if err != nil { + return nil, err + } + return &fileGetPutter{storage.NewPathFileGetter(p), n.Driver, id}, nil +} diff --git a/vendor/github.com/moby/moby/layer/layer_store_windows.go b/vendor/github.com/moby/moby/layer/layer_store_windows.go new file mode 100644 index 000000000..ccbf6dd52 --- /dev/null +++ b/vendor/github.com/moby/moby/layer/layer_store_windows.go @@ -0,0 +1,11 @@ +package layer + +import ( + "io" + + "github.com/docker/distribution" +) + +func (ls *layerStore) RegisterWithDescriptor(ts io.Reader, parent ChainID, platform Platform, descriptor distribution.Descriptor) (Layer, error) { + return ls.registerWithDescriptor(ts, parent, platform, descriptor) +} diff --git a/vendor/github.com/moby/moby/layer/layer_test.go b/vendor/github.com/moby/moby/layer/layer_test.go new file mode 100644 index 000000000..8ec5b4df5 --- /dev/null +++ b/vendor/github.com/moby/moby/layer/layer_test.go @@ -0,0 +1,772 @@ +package layer + +import ( + "bytes" + "io" + "io/ioutil" + "os" + "path/filepath" + "runtime" + "strings" + "testing" + + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/daemon/graphdriver/vfs" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/stringid" + "github.com/opencontainers/go-digest" +) + +func init() { + graphdriver.ApplyUncompressedLayer = archive.UnpackLayer + defaultArchiver := archive.NewDefaultArchiver() + vfs.CopyWithTar = defaultArchiver.CopyWithTar +} + +func newVFSGraphDriver(td string) (graphdriver.Driver, error) { + uidMap := []idtools.IDMap{ + { + ContainerID: 0, + HostID: os.Getuid(), + Size: 1, + }, + } + gidMap := []idtools.IDMap{ + { + ContainerID: 0, + HostID: os.Getgid(), + Size: 1, + }, + } + + options := graphdriver.Options{Root: td, UIDMaps: uidMap, GIDMaps: gidMap} + return graphdriver.GetDriver("vfs", nil, options) +} + +func newTestGraphDriver(t *testing.T) (graphdriver.Driver, func()) { + td, err := ioutil.TempDir("", "graph-") + if err != nil { + t.Fatal(err) + } + + driver, err := newVFSGraphDriver(td) + if err != nil { + t.Fatal(err) + } + + return driver, func() { + os.RemoveAll(td) + } +} + +func newTestStore(t *testing.T) (Store, string, func()) { + td, err := ioutil.TempDir("", "layerstore-") + if err != nil { + t.Fatal(err) + } + + graph, graphcleanup := newTestGraphDriver(t) + fms, err := NewFSMetadataStore(td) + if err != nil { + t.Fatal(err) + } + ls, err := NewStoreFromGraphDriver(fms, graph, runtime.GOOS) + if err != nil { + t.Fatal(err) + } + + return ls, td, func() { + graphcleanup() + os.RemoveAll(td) + } +} + +type layerInit func(root string) error + +func createLayer(ls Store, parent ChainID, layerFunc layerInit) (Layer, error) { + containerID := stringid.GenerateRandomID() + mount, err := ls.CreateRWLayer(containerID, parent, nil) + if err != nil { + return nil, err + } + + path, err := mount.Mount("") + if err != nil { + return nil, err + } + + if err := layerFunc(path); err != nil { + return nil, err + } + + ts, err := mount.TarStream() + if err != nil { + return nil, err + } + defer ts.Close() + + layer, err := ls.Register(ts, parent, Platform(runtime.GOOS)) + if err != nil { + return nil, err + } + + if err := mount.Unmount(); err != nil { + return nil, err + } + + if _, err := ls.ReleaseRWLayer(mount); err != nil { + return nil, err + } + + return layer, nil +} + +type FileApplier interface { + ApplyFile(root string) error +} + +type testFile struct { + name string + content []byte + permission os.FileMode +} + +func newTestFile(name string, content []byte, perm os.FileMode) FileApplier { + return &testFile{ + name: name, + content: content, + permission: perm, + } +} + +func (tf *testFile) ApplyFile(root string) error { + fullPath := filepath.Join(root, tf.name) + if err := os.MkdirAll(filepath.Dir(fullPath), 0755); err != nil { + return err + } + // Check if already exists + if stat, err := os.Stat(fullPath); err == nil && stat.Mode().Perm() != tf.permission { + if err := os.Chmod(fullPath, tf.permission); err != nil { + return err + } + } + if err := ioutil.WriteFile(fullPath, tf.content, tf.permission); err != nil { + return err + } + return nil +} + +func initWithFiles(files ...FileApplier) layerInit { + return func(root string) error { + for _, f := range files { + if err := f.ApplyFile(root); err != nil { + return err + } + } + return nil + } +} + +func getCachedLayer(l Layer) *roLayer { + if rl, ok := l.(*referencedCacheLayer); ok { + return rl.roLayer + } + return l.(*roLayer) +} + +func getMountLayer(l RWLayer) *mountedLayer { + return l.(*referencedRWLayer).mountedLayer +} + +func createMetadata(layers ...Layer) []Metadata { + metadata := make([]Metadata, len(layers)) + for i := range layers { + size, err := layers[i].Size() + if err != nil { + panic(err) + } + + metadata[i].ChainID = layers[i].ChainID() + metadata[i].DiffID = layers[i].DiffID() + metadata[i].Size = size + metadata[i].DiffSize = getCachedLayer(layers[i]).size + } + + return metadata +} + +func assertMetadata(t *testing.T, metadata, expectedMetadata []Metadata) { + if len(metadata) != len(expectedMetadata) { + t.Fatalf("Unexpected number of deletes %d, expected %d", len(metadata), len(expectedMetadata)) + } + + for i := range metadata { + if metadata[i] != expectedMetadata[i] { + t.Errorf("Unexpected metadata\n\tExpected: %#v\n\tActual: %#v", expectedMetadata[i], metadata[i]) + } + } + if t.Failed() { + t.FailNow() + } +} + +func releaseAndCheckDeleted(t *testing.T, ls Store, layer Layer, removed ...Layer) { + layerCount := len(ls.(*layerStore).layerMap) + expectedMetadata := createMetadata(removed...) + metadata, err := ls.Release(layer) + if err != nil { + t.Fatal(err) + } + + assertMetadata(t, metadata, expectedMetadata) + + if expected := layerCount - len(removed); len(ls.(*layerStore).layerMap) != expected { + t.Fatalf("Unexpected number of layers %d, expected %d", len(ls.(*layerStore).layerMap), expected) + } +} + +func cacheID(l Layer) string { + return getCachedLayer(l).cacheID +} + +func assertLayerEqual(t *testing.T, l1, l2 Layer) { + if l1.ChainID() != l2.ChainID() { + t.Fatalf("Mismatched ChainID: %s vs %s", l1.ChainID(), l2.ChainID()) + } + if l1.DiffID() != l2.DiffID() { + t.Fatalf("Mismatched DiffID: %s vs %s", l1.DiffID(), l2.DiffID()) + } + + size1, err := l1.Size() + if err != nil { + t.Fatal(err) + } + + size2, err := l2.Size() + if err != nil { + t.Fatal(err) + } + + if size1 != size2 { + t.Fatalf("Mismatched size: %d vs %d", size1, size2) + } + + if cacheID(l1) != cacheID(l2) { + t.Fatalf("Mismatched cache id: %s vs %s", cacheID(l1), cacheID(l2)) + } + + p1 := l1.Parent() + p2 := l2.Parent() + if p1 != nil && p2 != nil { + assertLayerEqual(t, p1, p2) + } else if p1 != nil || p2 != nil { + t.Fatalf("Mismatched parents: %v vs %v", p1, p2) + } +} + +func TestMountAndRegister(t *testing.T) { + ls, _, cleanup := newTestStore(t) + defer cleanup() + + li := initWithFiles(newTestFile("testfile.txt", []byte("some test data"), 0644)) + layer, err := createLayer(ls, "", li) + if err != nil { + t.Fatal(err) + } + + size, _ := layer.Size() + t.Logf("Layer size: %d", size) + + mount2, err := ls.CreateRWLayer("new-test-mount", layer.ChainID(), nil) + if err != nil { + t.Fatal(err) + } + + path2, err := mount2.Mount("") + if err != nil { + t.Fatal(err) + } + + b, err := ioutil.ReadFile(filepath.Join(path2, "testfile.txt")) + if err != nil { + t.Fatal(err) + } + + if expected := "some test data"; string(b) != expected { + t.Fatalf("Wrong file data, expected %q, got %q", expected, string(b)) + } + + if err := mount2.Unmount(); err != nil { + t.Fatal(err) + } + + if _, err := ls.ReleaseRWLayer(mount2); err != nil { + t.Fatal(err) + } +} + +func TestLayerRelease(t *testing.T) { + // TODO Windows: Figure out why this is failing + if runtime.GOOS == "windows" { + t.Skip("Failing on Windows") + } + ls, _, cleanup := newTestStore(t) + defer cleanup() + + layer1, err := createLayer(ls, "", initWithFiles(newTestFile("layer1.txt", []byte("layer 1 file"), 0644))) + if err != nil { + t.Fatal(err) + } + + layer2, err := createLayer(ls, layer1.ChainID(), initWithFiles(newTestFile("layer2.txt", []byte("layer 2 file"), 0644))) + if err != nil { + t.Fatal(err) + } + + if _, err := ls.Release(layer1); err != nil { + t.Fatal(err) + } + + layer3a, err := createLayer(ls, layer2.ChainID(), initWithFiles(newTestFile("layer3.txt", []byte("layer 3a file"), 0644))) + if err != nil { + t.Fatal(err) + } + + layer3b, err := createLayer(ls, layer2.ChainID(), initWithFiles(newTestFile("layer3.txt", []byte("layer 3b file"), 0644))) + if err != nil { + t.Fatal(err) + } + + if _, err := ls.Release(layer2); err != nil { + t.Fatal(err) + } + + t.Logf("Layer1: %s", layer1.ChainID()) + t.Logf("Layer2: %s", layer2.ChainID()) + t.Logf("Layer3a: %s", layer3a.ChainID()) + t.Logf("Layer3b: %s", layer3b.ChainID()) + + if expected := 4; len(ls.(*layerStore).layerMap) != expected { + t.Fatalf("Unexpected number of layers %d, expected %d", len(ls.(*layerStore).layerMap), expected) + } + + releaseAndCheckDeleted(t, ls, layer3b, layer3b) + releaseAndCheckDeleted(t, ls, layer3a, layer3a, layer2, layer1) +} + +func TestStoreRestore(t *testing.T) { + // TODO Windows: Figure out why this is failing + if runtime.GOOS == "windows" { + t.Skip("Failing on Windows") + } + ls, _, cleanup := newTestStore(t) + defer cleanup() + + layer1, err := createLayer(ls, "", initWithFiles(newTestFile("layer1.txt", []byte("layer 1 file"), 0644))) + if err != nil { + t.Fatal(err) + } + + layer2, err := createLayer(ls, layer1.ChainID(), initWithFiles(newTestFile("layer2.txt", []byte("layer 2 file"), 0644))) + if err != nil { + t.Fatal(err) + } + + if _, err := ls.Release(layer1); err != nil { + t.Fatal(err) + } + + layer3, err := createLayer(ls, layer2.ChainID(), initWithFiles(newTestFile("layer3.txt", []byte("layer 3 file"), 0644))) + if err != nil { + t.Fatal(err) + } + + if _, err := ls.Release(layer2); err != nil { + t.Fatal(err) + } + + m, err := ls.CreateRWLayer("some-mount_name", layer3.ChainID(), nil) + if err != nil { + t.Fatal(err) + } + + path, err := m.Mount("") + if err != nil { + t.Fatal(err) + } + + if err := ioutil.WriteFile(filepath.Join(path, "testfile.txt"), []byte("nothing here"), 0644); err != nil { + t.Fatal(err) + } + + if err := m.Unmount(); err != nil { + t.Fatal(err) + } + + ls2, err := NewStoreFromGraphDriver(ls.(*layerStore).store, ls.(*layerStore).driver, runtime.GOOS) + if err != nil { + t.Fatal(err) + } + + layer3b, err := ls2.Get(layer3.ChainID()) + if err != nil { + t.Fatal(err) + } + + assertLayerEqual(t, layer3b, layer3) + + // Create again with same name, should return error + if _, err := ls2.CreateRWLayer("some-mount_name", layer3b.ChainID(), nil); err == nil { + t.Fatal("Expected error creating mount with same name") + } else if err != ErrMountNameConflict { + t.Fatal(err) + } + + m2, err := ls2.GetRWLayer("some-mount_name") + if err != nil { + t.Fatal(err) + } + + if mountPath, err := m2.Mount(""); err != nil { + t.Fatal(err) + } else if path != mountPath { + t.Fatalf("Unexpected path %s, expected %s", mountPath, path) + } + + if mountPath, err := m2.Mount(""); err != nil { + t.Fatal(err) + } else if path != mountPath { + t.Fatalf("Unexpected path %s, expected %s", mountPath, path) + } + if err := m2.Unmount(); err != nil { + t.Fatal(err) + } + + b, err := ioutil.ReadFile(filepath.Join(path, "testfile.txt")) + if err != nil { + t.Fatal(err) + } + if expected := "nothing here"; string(b) != expected { + t.Fatalf("Unexpected content %q, expected %q", string(b), expected) + } + + if err := m2.Unmount(); err != nil { + t.Fatal(err) + } + + if metadata, err := ls2.ReleaseRWLayer(m2); err != nil { + t.Fatal(err) + } else if len(metadata) != 0 { + t.Fatalf("Unexpectedly deleted layers: %#v", metadata) + } + + if metadata, err := ls2.ReleaseRWLayer(m2); err != nil { + t.Fatal(err) + } else if len(metadata) != 0 { + t.Fatalf("Unexpectedly deleted layers: %#v", metadata) + } + + releaseAndCheckDeleted(t, ls2, layer3b, layer3, layer2, layer1) +} + +func TestTarStreamStability(t *testing.T) { + // TODO Windows: Figure out why this is failing + if runtime.GOOS == "windows" { + t.Skip("Failing on Windows") + } + ls, _, cleanup := newTestStore(t) + defer cleanup() + + files1 := []FileApplier{ + newTestFile("/etc/hosts", []byte("mydomain 10.0.0.1"), 0644), + newTestFile("/etc/profile", []byte("PATH=/usr/bin"), 0644), + } + addedFile := newTestFile("/etc/shadow", []byte("root:::::::"), 0644) + files2 := []FileApplier{ + newTestFile("/etc/hosts", []byte("mydomain 10.0.0.2"), 0644), + newTestFile("/etc/profile", []byte("PATH=/usr/bin"), 0664), + newTestFile("/root/.bashrc", []byte("PATH=/usr/sbin:/usr/bin"), 0644), + } + + tar1, err := tarFromFiles(files1...) + if err != nil { + t.Fatal(err) + } + + tar2, err := tarFromFiles(files2...) + if err != nil { + t.Fatal(err) + } + + layer1, err := ls.Register(bytes.NewReader(tar1), "", Platform(runtime.GOOS)) + if err != nil { + t.Fatal(err) + } + + // hack layer to add file + p, err := ls.(*layerStore).driver.Get(layer1.(*referencedCacheLayer).cacheID, "") + if err != nil { + t.Fatal(err) + } + + if err := addedFile.ApplyFile(p); err != nil { + t.Fatal(err) + } + + if err := ls.(*layerStore).driver.Put(layer1.(*referencedCacheLayer).cacheID); err != nil { + t.Fatal(err) + } + + layer2, err := ls.Register(bytes.NewReader(tar2), layer1.ChainID(), Platform(runtime.GOOS)) + if err != nil { + t.Fatal(err) + } + + id1 := layer1.ChainID() + t.Logf("Layer 1: %s", layer1.ChainID()) + t.Logf("Layer 2: %s", layer2.ChainID()) + + if _, err := ls.Release(layer1); err != nil { + t.Fatal(err) + } + + assertLayerDiff(t, tar2, layer2) + + layer1b, err := ls.Get(id1) + if err != nil { + t.Logf("Content of layer map: %#v", ls.(*layerStore).layerMap) + t.Fatal(err) + } + + if _, err := ls.Release(layer2); err != nil { + t.Fatal(err) + } + + assertLayerDiff(t, tar1, layer1b) + + if _, err := ls.Release(layer1b); err != nil { + t.Fatal(err) + } +} + +func assertLayerDiff(t *testing.T, expected []byte, layer Layer) { + expectedDigest := digest.FromBytes(expected) + + if digest.Digest(layer.DiffID()) != expectedDigest { + t.Fatalf("Mismatched diff id for %s, got %s, expected %s", layer.ChainID(), layer.DiffID(), expected) + } + + ts, err := layer.TarStream() + if err != nil { + t.Fatal(err) + } + defer ts.Close() + + actual, err := ioutil.ReadAll(ts) + if err != nil { + t.Fatal(err) + } + + if len(actual) != len(expected) { + logByteDiff(t, actual, expected) + t.Fatalf("Mismatched tar stream size for %s, got %d, expected %d", layer.ChainID(), len(actual), len(expected)) + } + + actualDigest := digest.FromBytes(actual) + + if actualDigest != expectedDigest { + logByteDiff(t, actual, expected) + t.Fatalf("Wrong digest of tar stream, got %s, expected %s", actualDigest, expectedDigest) + } +} + +const maxByteLog = 4 * 1024 + +func logByteDiff(t *testing.T, actual, expected []byte) { + d1, d2 := byteDiff(actual, expected) + if len(d1) == 0 && len(d2) == 0 { + return + } + + prefix := len(actual) - len(d1) + if len(d1) > maxByteLog || len(d2) > maxByteLog { + t.Logf("Byte diff after %d matching bytes", prefix) + } else { + t.Logf("Byte diff after %d matching bytes\nActual bytes after prefix:\n%x\nExpected bytes after prefix:\n%x", prefix, d1, d2) + } +} + +// byteDiff returns the differing bytes after the matching prefix +func byteDiff(b1, b2 []byte) ([]byte, []byte) { + i := 0 + for i < len(b1) && i < len(b2) { + if b1[i] != b2[i] { + break + } + i++ + } + + return b1[i:], b2[i:] +} + +func tarFromFiles(files ...FileApplier) ([]byte, error) { + td, err := ioutil.TempDir("", "tar-") + if err != nil { + return nil, err + } + defer os.RemoveAll(td) + + for _, f := range files { + if err := f.ApplyFile(td); err != nil { + return nil, err + } + } + + r, err := archive.Tar(td, archive.Uncompressed) + if err != nil { + return nil, err + } + + buf := bytes.NewBuffer(nil) + if _, err := io.Copy(buf, r); err != nil { + return nil, err + } + + return buf.Bytes(), nil +} + +// assertReferences asserts that all the references are to the same +// image and represent the full set of references to that image. +func assertReferences(t *testing.T, references ...Layer) { + if len(references) == 0 { + return + } + base := references[0].(*referencedCacheLayer).roLayer + seenReferences := map[Layer]struct{}{ + references[0]: {}, + } + for i := 1; i < len(references); i++ { + other := references[i].(*referencedCacheLayer).roLayer + if base != other { + t.Fatalf("Unexpected referenced cache layer %s, expecting %s", other.ChainID(), base.ChainID()) + } + if _, ok := base.references[references[i]]; !ok { + t.Fatalf("Reference not part of reference list: %v", references[i]) + } + if _, ok := seenReferences[references[i]]; ok { + t.Fatalf("Duplicated reference %v", references[i]) + } + } + if rc := len(base.references); rc != len(references) { + t.Fatalf("Unexpected number of references %d, expecting %d", rc, len(references)) + } +} + +func TestRegisterExistingLayer(t *testing.T) { + ls, _, cleanup := newTestStore(t) + defer cleanup() + + baseFiles := []FileApplier{ + newTestFile("/etc/profile", []byte("# Base configuration"), 0644), + } + + layerFiles := []FileApplier{ + newTestFile("/root/.bashrc", []byte("# Root configuration"), 0644), + } + + li := initWithFiles(baseFiles...) + layer1, err := createLayer(ls, "", li) + if err != nil { + t.Fatal(err) + } + + tar1, err := tarFromFiles(layerFiles...) + if err != nil { + t.Fatal(err) + } + + layer2a, err := ls.Register(bytes.NewReader(tar1), layer1.ChainID(), Platform(runtime.GOOS)) + if err != nil { + t.Fatal(err) + } + + layer2b, err := ls.Register(bytes.NewReader(tar1), layer1.ChainID(), Platform(runtime.GOOS)) + if err != nil { + t.Fatal(err) + } + + assertReferences(t, layer2a, layer2b) +} + +func TestTarStreamVerification(t *testing.T) { + // TODO Windows: Figure out why this is failing + if runtime.GOOS == "windows" { + t.Skip("Failing on Windows") + } + ls, tmpdir, cleanup := newTestStore(t) + defer cleanup() + + files1 := []FileApplier{ + newTestFile("/foo", []byte("abc"), 0644), + newTestFile("/bar", []byte("def"), 0644), + } + files2 := []FileApplier{ + newTestFile("/foo", []byte("abc"), 0644), + newTestFile("/bar", []byte("def"), 0600), // different perm + } + + tar1, err := tarFromFiles(files1...) + if err != nil { + t.Fatal(err) + } + + tar2, err := tarFromFiles(files2...) + if err != nil { + t.Fatal(err) + } + + layer1, err := ls.Register(bytes.NewReader(tar1), "", Platform(runtime.GOOS)) + if err != nil { + t.Fatal(err) + } + + layer2, err := ls.Register(bytes.NewReader(tar2), "", Platform(runtime.GOOS)) + if err != nil { + t.Fatal(err) + } + id1 := digest.Digest(layer1.ChainID()) + id2 := digest.Digest(layer2.ChainID()) + + // Replace tar data files + src, err := os.Open(filepath.Join(tmpdir, id1.Algorithm().String(), id1.Hex(), "tar-split.json.gz")) + if err != nil { + t.Fatal(err) + } + defer src.Close() + + dst, err := os.Create(filepath.Join(tmpdir, id2.Algorithm().String(), id2.Hex(), "tar-split.json.gz")) + if err != nil { + t.Fatal(err) + } + defer dst.Close() + + if _, err := io.Copy(dst, src); err != nil { + t.Fatal(err) + } + + src.Sync() + dst.Sync() + + ts, err := layer2.TarStream() + if err != nil { + t.Fatal(err) + } + _, err = io.Copy(ioutil.Discard, ts) + if err == nil { + t.Fatal("expected data verification to fail") + } + if !strings.Contains(err.Error(), "could not verify layer data") { + t.Fatalf("wrong error returned from tarstream: %q", err) + } +} diff --git a/vendor/github.com/moby/moby/layer/layer_unix.go b/vendor/github.com/moby/moby/layer/layer_unix.go new file mode 100644 index 000000000..776b78ac0 --- /dev/null +++ b/vendor/github.com/moby/moby/layer/layer_unix.go @@ -0,0 +1,9 @@ +// +build linux freebsd darwin openbsd solaris + +package layer + +import "github.com/docker/docker/pkg/stringid" + +func (ls *layerStore) mountID(name string) string { + return stringid.GenerateRandomID() +} diff --git a/vendor/github.com/moby/moby/layer/layer_unix_test.go b/vendor/github.com/moby/moby/layer/layer_unix_test.go new file mode 100644 index 000000000..9aa1afd59 --- /dev/null +++ b/vendor/github.com/moby/moby/layer/layer_unix_test.go @@ -0,0 +1,71 @@ +// +build !windows + +package layer + +import "testing" + +func graphDiffSize(ls Store, l Layer) (int64, error) { + cl := getCachedLayer(l) + var parent string + if cl.parent != nil { + parent = cl.parent.cacheID + } + return ls.(*layerStore).driver.DiffSize(cl.cacheID, parent) +} + +// Unix as Windows graph driver does not support Changes which is indirectly +// invoked by calling DiffSize on the driver +func TestLayerSize(t *testing.T) { + ls, _, cleanup := newTestStore(t) + defer cleanup() + + content1 := []byte("Base contents") + content2 := []byte("Added contents") + + layer1, err := createLayer(ls, "", initWithFiles(newTestFile("file1", content1, 0644))) + if err != nil { + t.Fatal(err) + } + + layer2, err := createLayer(ls, layer1.ChainID(), initWithFiles(newTestFile("file2", content2, 0644))) + if err != nil { + t.Fatal(err) + } + + layer1DiffSize, err := graphDiffSize(ls, layer1) + if err != nil { + t.Fatal(err) + } + + if int(layer1DiffSize) != len(content1) { + t.Fatalf("Unexpected diff size %d, expected %d", layer1DiffSize, len(content1)) + } + + layer1Size, err := layer1.Size() + if err != nil { + t.Fatal(err) + } + + if expected := len(content1); int(layer1Size) != expected { + t.Fatalf("Unexpected size %d, expected %d", layer1Size, expected) + } + + layer2DiffSize, err := graphDiffSize(ls, layer2) + if err != nil { + t.Fatal(err) + } + + if int(layer2DiffSize) != len(content2) { + t.Fatalf("Unexpected diff size %d, expected %d", layer2DiffSize, len(content2)) + } + + layer2Size, err := layer2.Size() + if err != nil { + t.Fatal(err) + } + + if expected := len(content1) + len(content2); int(layer2Size) != expected { + t.Fatalf("Unexpected size %d, expected %d", layer2Size, expected) + } + +} diff --git a/vendor/github.com/moby/moby/layer/layer_windows.go b/vendor/github.com/moby/moby/layer/layer_windows.go new file mode 100644 index 000000000..a1c195311 --- /dev/null +++ b/vendor/github.com/moby/moby/layer/layer_windows.go @@ -0,0 +1,34 @@ +package layer + +import "errors" + +// GetLayerPath returns the path to a layer +func GetLayerPath(s Store, layer ChainID) (string, error) { + ls, ok := s.(*layerStore) + if !ok { + return "", errors.New("unsupported layer store") + } + ls.layerL.Lock() + defer ls.layerL.Unlock() + + rl, ok := ls.layerMap[layer] + if !ok { + return "", ErrLayerDoesNotExist + } + + path, err := ls.driver.Get(rl.cacheID, "") + if err != nil { + return "", err + } + + if err := ls.driver.Put(rl.cacheID); err != nil { + return "", err + } + + return path, nil +} + +func (ls *layerStore) mountID(name string) string { + // windows has issues if container ID doesn't match mount ID + return name +} diff --git a/vendor/github.com/moby/moby/layer/migration.go b/vendor/github.com/moby/moby/layer/migration.go new file mode 100644 index 000000000..4803a1ae5 --- /dev/null +++ b/vendor/github.com/moby/moby/layer/migration.go @@ -0,0 +1,256 @@ +package layer + +import ( + "compress/gzip" + "errors" + "fmt" + "io" + "os" + + "github.com/Sirupsen/logrus" + "github.com/opencontainers/go-digest" + "github.com/vbatts/tar-split/tar/asm" + "github.com/vbatts/tar-split/tar/storage" +) + +// CreateRWLayerByGraphID creates a RWLayer in the layer store using +// the provided name with the given graphID. To get the RWLayer +// after migration the layer may be retrieved by the given name. +func (ls *layerStore) CreateRWLayerByGraphID(name string, graphID string, parent ChainID) (err error) { + ls.mountL.Lock() + defer ls.mountL.Unlock() + m, ok := ls.mounts[name] + if ok { + if m.parent.chainID != parent { + return errors.New("name conflict, mismatched parent") + } + if m.mountID != graphID { + return errors.New("mount already exists") + } + + return nil + } + + if !ls.driver.Exists(graphID) { + return fmt.Errorf("graph ID does not exist: %q", graphID) + } + + var p *roLayer + if string(parent) != "" { + p = ls.get(parent) + if p == nil { + return ErrLayerDoesNotExist + } + + // Release parent chain if error + defer func() { + if err != nil { + ls.layerL.Lock() + ls.releaseLayer(p) + ls.layerL.Unlock() + } + }() + } + + // TODO: Ensure graphID has correct parent + + m = &mountedLayer{ + name: name, + parent: p, + mountID: graphID, + layerStore: ls, + references: map[RWLayer]*referencedRWLayer{}, + } + + // Check for existing init layer + initID := fmt.Sprintf("%s-init", graphID) + if ls.driver.Exists(initID) { + m.initID = initID + } + + if err = ls.saveMount(m); err != nil { + return err + } + + return nil +} + +func (ls *layerStore) ChecksumForGraphID(id, parent, oldTarDataPath, newTarDataPath string) (diffID DiffID, size int64, err error) { + defer func() { + if err != nil { + logrus.Debugf("could not get checksum for %q with tar-split: %q", id, err) + diffID, size, err = ls.checksumForGraphIDNoTarsplit(id, parent, newTarDataPath) + } + }() + + if oldTarDataPath == "" { + err = errors.New("no tar-split file") + return + } + + tarDataFile, err := os.Open(oldTarDataPath) + if err != nil { + return + } + defer tarDataFile.Close() + uncompressed, err := gzip.NewReader(tarDataFile) + if err != nil { + return + } + + dgst := digest.Canonical.Digester() + err = ls.assembleTarTo(id, uncompressed, &size, dgst.Hash()) + if err != nil { + return + } + + diffID = DiffID(dgst.Digest()) + err = os.RemoveAll(newTarDataPath) + if err != nil { + return + } + err = os.Link(oldTarDataPath, newTarDataPath) + + return +} + +func (ls *layerStore) checksumForGraphIDNoTarsplit(id, parent, newTarDataPath string) (diffID DiffID, size int64, err error) { + rawarchive, err := ls.driver.Diff(id, parent) + if err != nil { + return + } + defer rawarchive.Close() + + f, err := os.Create(newTarDataPath) + if err != nil { + return + } + defer f.Close() + mfz := gzip.NewWriter(f) + defer mfz.Close() + metaPacker := storage.NewJSONPacker(mfz) + + packerCounter := &packSizeCounter{metaPacker, &size} + + archive, err := asm.NewInputTarStream(rawarchive, packerCounter, nil) + if err != nil { + return + } + dgst, err := digest.FromReader(archive) + if err != nil { + return + } + diffID = DiffID(dgst) + return +} + +func (ls *layerStore) RegisterByGraphID(graphID string, parent ChainID, diffID DiffID, tarDataFile string, size int64) (Layer, error) { + // err is used to hold the error which will always trigger + // cleanup of creates sources but may not be an error returned + // to the caller (already exists). + var err error + var p *roLayer + if string(parent) != "" { + p = ls.get(parent) + if p == nil { + return nil, ErrLayerDoesNotExist + } + + // Release parent chain if error + defer func() { + if err != nil { + ls.layerL.Lock() + ls.releaseLayer(p) + ls.layerL.Unlock() + } + }() + } + + // Create new roLayer + layer := &roLayer{ + parent: p, + cacheID: graphID, + referenceCount: 1, + layerStore: ls, + references: map[Layer]struct{}{}, + diffID: diffID, + size: size, + chainID: createChainIDFromParent(parent, diffID), + } + + ls.layerL.Lock() + defer ls.layerL.Unlock() + + if existingLayer := ls.getWithoutLock(layer.chainID); existingLayer != nil { + // Set error for cleanup, but do not return + err = errors.New("layer already exists") + return existingLayer.getReference(), nil + } + + tx, err := ls.store.StartTransaction() + if err != nil { + return nil, err + } + + defer func() { + if err != nil { + logrus.Debugf("Cleaning up transaction after failed migration for %s: %v", graphID, err) + if err := tx.Cancel(); err != nil { + logrus.Errorf("Error canceling metadata transaction %q: %s", tx.String(), err) + } + } + }() + + tsw, err := tx.TarSplitWriter(false) + if err != nil { + return nil, err + } + defer tsw.Close() + tdf, err := os.Open(tarDataFile) + if err != nil { + return nil, err + } + defer tdf.Close() + _, err = io.Copy(tsw, tdf) + if err != nil { + return nil, err + } + + if err = storeLayer(tx, layer); err != nil { + return nil, err + } + + if err = tx.Commit(layer.chainID); err != nil { + return nil, err + } + + ls.layerMap[layer.chainID] = layer + + return layer.getReference(), nil +} + +type unpackSizeCounter struct { + unpacker storage.Unpacker + size *int64 +} + +func (u *unpackSizeCounter) Next() (*storage.Entry, error) { + e, err := u.unpacker.Next() + if err == nil && u.size != nil { + *u.size += e.Size + } + return e, err +} + +type packSizeCounter struct { + packer storage.Packer + size *int64 +} + +func (p *packSizeCounter) AddEntry(e storage.Entry) (int, error) { + n, err := p.packer.AddEntry(e) + if err == nil && p.size != nil { + *p.size += e.Size + } + return n, err +} diff --git a/vendor/github.com/moby/moby/layer/migration_test.go b/vendor/github.com/moby/moby/layer/migration_test.go new file mode 100644 index 000000000..7364e6cdc --- /dev/null +++ b/vendor/github.com/moby/moby/layer/migration_test.go @@ -0,0 +1,435 @@ +package layer + +import ( + "bytes" + "compress/gzip" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "runtime" + "testing" + + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/stringid" + "github.com/vbatts/tar-split/tar/asm" + "github.com/vbatts/tar-split/tar/storage" +) + +func writeTarSplitFile(name string, tarContent []byte) error { + f, err := os.OpenFile(name, os.O_TRUNC|os.O_CREATE|os.O_WRONLY, 0644) + if err != nil { + return err + } + defer f.Close() + + fz := gzip.NewWriter(f) + + metaPacker := storage.NewJSONPacker(fz) + defer fz.Close() + + rdr, err := asm.NewInputTarStream(bytes.NewReader(tarContent), metaPacker, nil) + if err != nil { + return err + } + + if _, err := io.Copy(ioutil.Discard, rdr); err != nil { + return err + } + + return nil +} + +func TestLayerMigration(t *testing.T) { + // TODO Windows: Figure out why this is failing + if runtime.GOOS == "windows" { + t.Skip("Failing on Windows") + } + td, err := ioutil.TempDir("", "migration-test-") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(td) + + layer1Files := []FileApplier{ + newTestFile("/root/.bashrc", []byte("# Boring configuration"), 0644), + newTestFile("/etc/profile", []byte("# Base configuration"), 0644), + } + + layer2Files := []FileApplier{ + newTestFile("/root/.bashrc", []byte("# Updated configuration"), 0644), + } + + tar1, err := tarFromFiles(layer1Files...) + if err != nil { + t.Fatal(err) + } + + tar2, err := tarFromFiles(layer2Files...) + if err != nil { + t.Fatal(err) + } + + graph, err := newVFSGraphDriver(filepath.Join(td, "graphdriver-")) + if err != nil { + t.Fatal(err) + } + + graphID1 := stringid.GenerateRandomID() + if err := graph.Create(graphID1, "", nil); err != nil { + t.Fatal(err) + } + if _, err := graph.ApplyDiff(graphID1, "", bytes.NewReader(tar1)); err != nil { + t.Fatal(err) + } + + tf1 := filepath.Join(td, "tar1.json.gz") + if err := writeTarSplitFile(tf1, tar1); err != nil { + t.Fatal(err) + } + + fms, err := NewFSMetadataStore(filepath.Join(td, "layers")) + if err != nil { + t.Fatal(err) + } + ls, err := NewStoreFromGraphDriver(fms, graph, runtime.GOOS) + if err != nil { + t.Fatal(err) + } + + newTarDataPath := filepath.Join(td, ".migration-tardata") + diffID, size, err := ls.(*layerStore).ChecksumForGraphID(graphID1, "", tf1, newTarDataPath) + if err != nil { + t.Fatal(err) + } + + layer1a, err := ls.(*layerStore).RegisterByGraphID(graphID1, "", diffID, newTarDataPath, size) + if err != nil { + t.Fatal(err) + } + + layer1b, err := ls.Register(bytes.NewReader(tar1), "", Platform(runtime.GOOS)) + if err != nil { + t.Fatal(err) + } + + assertReferences(t, layer1a, layer1b) + // Attempt register, should be same + layer2a, err := ls.Register(bytes.NewReader(tar2), layer1a.ChainID(), Platform(runtime.GOOS)) + if err != nil { + t.Fatal(err) + } + + graphID2 := stringid.GenerateRandomID() + if err := graph.Create(graphID2, graphID1, nil); err != nil { + t.Fatal(err) + } + if _, err := graph.ApplyDiff(graphID2, graphID1, bytes.NewReader(tar2)); err != nil { + t.Fatal(err) + } + + tf2 := filepath.Join(td, "tar2.json.gz") + if err := writeTarSplitFile(tf2, tar2); err != nil { + t.Fatal(err) + } + diffID, size, err = ls.(*layerStore).ChecksumForGraphID(graphID2, graphID1, tf2, newTarDataPath) + if err != nil { + t.Fatal(err) + } + + layer2b, err := ls.(*layerStore).RegisterByGraphID(graphID2, layer1a.ChainID(), diffID, tf2, size) + if err != nil { + t.Fatal(err) + } + assertReferences(t, layer2a, layer2b) + + if metadata, err := ls.Release(layer2a); err != nil { + t.Fatal(err) + } else if len(metadata) > 0 { + t.Fatalf("Unexpected layer removal after first release: %#v", metadata) + } + + metadata, err := ls.Release(layer2b) + if err != nil { + t.Fatal(err) + } + + assertMetadata(t, metadata, createMetadata(layer2a)) +} + +func tarFromFilesInGraph(graph graphdriver.Driver, graphID, parentID string, files ...FileApplier) ([]byte, error) { + t, err := tarFromFiles(files...) + if err != nil { + return nil, err + } + + if err := graph.Create(graphID, parentID, nil); err != nil { + return nil, err + } + if _, err := graph.ApplyDiff(graphID, parentID, bytes.NewReader(t)); err != nil { + return nil, err + } + + ar, err := graph.Diff(graphID, parentID) + if err != nil { + return nil, err + } + defer ar.Close() + + return ioutil.ReadAll(ar) +} + +func TestLayerMigrationNoTarsplit(t *testing.T) { + // TODO Windows: Figure out why this is failing + if runtime.GOOS == "windows" { + t.Skip("Failing on Windows") + } + td, err := ioutil.TempDir("", "migration-test-") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(td) + + layer1Files := []FileApplier{ + newTestFile("/root/.bashrc", []byte("# Boring configuration"), 0644), + newTestFile("/etc/profile", []byte("# Base configuration"), 0644), + } + + layer2Files := []FileApplier{ + newTestFile("/root/.bashrc", []byte("# Updated configuration"), 0644), + } + + graph, err := newVFSGraphDriver(filepath.Join(td, "graphdriver-")) + if err != nil { + t.Fatal(err) + } + graphID1 := stringid.GenerateRandomID() + graphID2 := stringid.GenerateRandomID() + + tar1, err := tarFromFilesInGraph(graph, graphID1, "", layer1Files...) + if err != nil { + t.Fatal(err) + } + + tar2, err := tarFromFilesInGraph(graph, graphID2, graphID1, layer2Files...) + if err != nil { + t.Fatal(err) + } + + fms, err := NewFSMetadataStore(filepath.Join(td, "layers")) + if err != nil { + t.Fatal(err) + } + ls, err := NewStoreFromGraphDriver(fms, graph, runtime.GOOS) + if err != nil { + t.Fatal(err) + } + + newTarDataPath := filepath.Join(td, ".migration-tardata") + diffID, size, err := ls.(*layerStore).ChecksumForGraphID(graphID1, "", "", newTarDataPath) + if err != nil { + t.Fatal(err) + } + + layer1a, err := ls.(*layerStore).RegisterByGraphID(graphID1, "", diffID, newTarDataPath, size) + if err != nil { + t.Fatal(err) + } + + layer1b, err := ls.Register(bytes.NewReader(tar1), "", Platform(runtime.GOOS)) + if err != nil { + t.Fatal(err) + } + + assertReferences(t, layer1a, layer1b) + + // Attempt register, should be same + layer2a, err := ls.Register(bytes.NewReader(tar2), layer1a.ChainID(), Platform(runtime.GOOS)) + if err != nil { + t.Fatal(err) + } + + diffID, size, err = ls.(*layerStore).ChecksumForGraphID(graphID2, graphID1, "", newTarDataPath) + if err != nil { + t.Fatal(err) + } + + layer2b, err := ls.(*layerStore).RegisterByGraphID(graphID2, layer1a.ChainID(), diffID, newTarDataPath, size) + if err != nil { + t.Fatal(err) + } + assertReferences(t, layer2a, layer2b) + + if metadata, err := ls.Release(layer2a); err != nil { + t.Fatal(err) + } else if len(metadata) > 0 { + t.Fatalf("Unexpected layer removal after first release: %#v", metadata) + } + + metadata, err := ls.Release(layer2b) + if err != nil { + t.Fatal(err) + } + + assertMetadata(t, metadata, createMetadata(layer2a)) +} + +func TestMountMigration(t *testing.T) { + // TODO Windows: Figure out why this is failing (obvious - paths... needs porting) + if runtime.GOOS == "windows" { + t.Skip("Failing on Windows") + } + ls, _, cleanup := newTestStore(t) + defer cleanup() + + baseFiles := []FileApplier{ + newTestFile("/root/.bashrc", []byte("# Boring configuration"), 0644), + newTestFile("/etc/profile", []byte("# Base configuration"), 0644), + } + initFiles := []FileApplier{ + newTestFile("/etc/hosts", []byte{}, 0644), + newTestFile("/etc/resolv.conf", []byte{}, 0644), + } + mountFiles := []FileApplier{ + newTestFile("/etc/hosts", []byte("localhost 127.0.0.1"), 0644), + newTestFile("/root/.bashrc", []byte("# Updated configuration"), 0644), + newTestFile("/root/testfile1.txt", []byte("nothing valuable"), 0644), + } + + initTar, err := tarFromFiles(initFiles...) + if err != nil { + t.Fatal(err) + } + + mountTar, err := tarFromFiles(mountFiles...) + if err != nil { + t.Fatal(err) + } + + graph := ls.(*layerStore).driver + + layer1, err := createLayer(ls, "", initWithFiles(baseFiles...)) + if err != nil { + t.Fatal(err) + } + + graphID1 := layer1.(*referencedCacheLayer).cacheID + + containerID := stringid.GenerateRandomID() + containerInit := fmt.Sprintf("%s-init", containerID) + + if err := graph.Create(containerInit, graphID1, nil); err != nil { + t.Fatal(err) + } + if _, err := graph.ApplyDiff(containerInit, graphID1, bytes.NewReader(initTar)); err != nil { + t.Fatal(err) + } + + if err := graph.Create(containerID, containerInit, nil); err != nil { + t.Fatal(err) + } + if _, err := graph.ApplyDiff(containerID, containerInit, bytes.NewReader(mountTar)); err != nil { + t.Fatal(err) + } + + if err := ls.(*layerStore).CreateRWLayerByGraphID("migration-mount", containerID, layer1.ChainID()); err != nil { + t.Fatal(err) + } + + rwLayer1, err := ls.GetRWLayer("migration-mount") + if err != nil { + t.Fatal(err) + } + + if _, err := rwLayer1.Mount(""); err != nil { + t.Fatal(err) + } + + changes, err := rwLayer1.Changes() + if err != nil { + t.Fatal(err) + } + + if expected := 5; len(changes) != expected { + t.Logf("Changes %#v", changes) + t.Fatalf("Wrong number of changes %d, expected %d", len(changes), expected) + } + + sortChanges(changes) + + assertChange(t, changes[0], archive.Change{ + Path: "/etc", + Kind: archive.ChangeModify, + }) + assertChange(t, changes[1], archive.Change{ + Path: "/etc/hosts", + Kind: archive.ChangeModify, + }) + assertChange(t, changes[2], archive.Change{ + Path: "/root", + Kind: archive.ChangeModify, + }) + assertChange(t, changes[3], archive.Change{ + Path: "/root/.bashrc", + Kind: archive.ChangeModify, + }) + assertChange(t, changes[4], archive.Change{ + Path: "/root/testfile1.txt", + Kind: archive.ChangeAdd, + }) + + if _, err := ls.CreateRWLayer("migration-mount", layer1.ChainID(), nil); err == nil { + t.Fatal("Expected error creating mount with same name") + } else if err != ErrMountNameConflict { + t.Fatal(err) + } + + rwLayer2, err := ls.GetRWLayer("migration-mount") + if err != nil { + t.Fatal(err) + } + + if getMountLayer(rwLayer1) != getMountLayer(rwLayer2) { + t.Fatal("Expected same layer from get with same name as from migrate") + } + + if _, err := rwLayer2.Mount(""); err != nil { + t.Fatal(err) + } + + if _, err := rwLayer2.Mount(""); err != nil { + t.Fatal(err) + } + + if metadata, err := ls.Release(layer1); err != nil { + t.Fatal(err) + } else if len(metadata) > 0 { + t.Fatalf("Expected no layers to be deleted, deleted %#v", metadata) + } + + if err := rwLayer1.Unmount(); err != nil { + t.Fatal(err) + } + + if _, err := ls.ReleaseRWLayer(rwLayer1); err != nil { + t.Fatal(err) + } + + if err := rwLayer2.Unmount(); err != nil { + t.Fatal(err) + } + if err := rwLayer2.Unmount(); err != nil { + t.Fatal(err) + } + metadata, err := ls.ReleaseRWLayer(rwLayer2) + if err != nil { + t.Fatal(err) + } + if len(metadata) == 0 { + t.Fatal("Expected base layer to be deleted when deleting mount") + } + + assertMetadata(t, metadata, createMetadata(layer1)) +} diff --git a/vendor/github.com/moby/moby/layer/mount_test.go b/vendor/github.com/moby/moby/layer/mount_test.go new file mode 100644 index 000000000..f5799e7cd --- /dev/null +++ b/vendor/github.com/moby/moby/layer/mount_test.go @@ -0,0 +1,239 @@ +package layer + +import ( + "io/ioutil" + "os" + "path/filepath" + "runtime" + "sort" + "testing" + + "github.com/docker/docker/pkg/archive" +) + +func TestMountInit(t *testing.T) { + // TODO Windows: Figure out why this is failing + if runtime.GOOS == "windows" { + t.Skip("Failing on Windows") + } + ls, _, cleanup := newTestStore(t) + defer cleanup() + + basefile := newTestFile("testfile.txt", []byte("base data!"), 0644) + initfile := newTestFile("testfile.txt", []byte("init data!"), 0777) + + li := initWithFiles(basefile) + layer, err := createLayer(ls, "", li) + if err != nil { + t.Fatal(err) + } + + mountInit := func(root string) error { + return initfile.ApplyFile(root) + } + + rwLayerOpts := &CreateRWLayerOpts{ + InitFunc: mountInit, + } + m, err := ls.CreateRWLayer("fun-mount", layer.ChainID(), rwLayerOpts) + if err != nil { + t.Fatal(err) + } + + path, err := m.Mount("") + if err != nil { + t.Fatal(err) + } + + f, err := os.Open(filepath.Join(path, "testfile.txt")) + if err != nil { + t.Fatal(err) + } + defer f.Close() + + fi, err := f.Stat() + if err != nil { + t.Fatal(err) + } + + b, err := ioutil.ReadAll(f) + if err != nil { + t.Fatal(err) + } + + if expected := "init data!"; string(b) != expected { + t.Fatalf("Unexpected test file contents %q, expected %q", string(b), expected) + } + + if fi.Mode().Perm() != 0777 { + t.Fatalf("Unexpected filemode %o, expecting %o", fi.Mode().Perm(), 0777) + } +} + +func TestMountSize(t *testing.T) { + // TODO Windows: Figure out why this is failing + if runtime.GOOS == "windows" { + t.Skip("Failing on Windows") + } + ls, _, cleanup := newTestStore(t) + defer cleanup() + + content1 := []byte("Base contents") + content2 := []byte("Mutable contents") + contentInit := []byte("why am I excluded from the size ☹") + + li := initWithFiles(newTestFile("file1", content1, 0644)) + layer, err := createLayer(ls, "", li) + if err != nil { + t.Fatal(err) + } + + mountInit := func(root string) error { + return newTestFile("file-init", contentInit, 0777).ApplyFile(root) + } + rwLayerOpts := &CreateRWLayerOpts{ + InitFunc: mountInit, + } + + m, err := ls.CreateRWLayer("mount-size", layer.ChainID(), rwLayerOpts) + if err != nil { + t.Fatal(err) + } + + path, err := m.Mount("") + if err != nil { + t.Fatal(err) + } + + if err := ioutil.WriteFile(filepath.Join(path, "file2"), content2, 0755); err != nil { + t.Fatal(err) + } + + mountSize, err := m.Size() + if err != nil { + t.Fatal(err) + } + + if expected := len(content2); int(mountSize) != expected { + t.Fatalf("Unexpected mount size %d, expected %d", int(mountSize), expected) + } +} + +func TestMountChanges(t *testing.T) { + // TODO Windows: Figure out why this is failing + if runtime.GOOS == "windows" { + t.Skip("Failing on Windows") + } + ls, _, cleanup := newTestStore(t) + defer cleanup() + + basefiles := []FileApplier{ + newTestFile("testfile1.txt", []byte("base data!"), 0644), + newTestFile("testfile2.txt", []byte("base data!"), 0644), + newTestFile("testfile3.txt", []byte("base data!"), 0644), + } + initfile := newTestFile("testfile1.txt", []byte("init data!"), 0777) + + li := initWithFiles(basefiles...) + layer, err := createLayer(ls, "", li) + if err != nil { + t.Fatal(err) + } + + mountInit := func(root string) error { + return initfile.ApplyFile(root) + } + rwLayerOpts := &CreateRWLayerOpts{ + InitFunc: mountInit, + } + + m, err := ls.CreateRWLayer("mount-changes", layer.ChainID(), rwLayerOpts) + if err != nil { + t.Fatal(err) + } + + path, err := m.Mount("") + if err != nil { + t.Fatal(err) + } + + if err := os.Chmod(filepath.Join(path, "testfile1.txt"), 0755); err != nil { + t.Fatal(err) + } + + if err := ioutil.WriteFile(filepath.Join(path, "testfile1.txt"), []byte("mount data!"), 0755); err != nil { + t.Fatal(err) + } + + if err := os.Remove(filepath.Join(path, "testfile2.txt")); err != nil { + t.Fatal(err) + } + + if err := os.Chmod(filepath.Join(path, "testfile3.txt"), 0755); err != nil { + t.Fatal(err) + } + + if err := ioutil.WriteFile(filepath.Join(path, "testfile4.txt"), []byte("mount data!"), 0644); err != nil { + t.Fatal(err) + } + + changes, err := m.Changes() + if err != nil { + t.Fatal(err) + } + + if expected := 4; len(changes) != expected { + t.Fatalf("Wrong number of changes %d, expected %d", len(changes), expected) + } + + sortChanges(changes) + + assertChange(t, changes[0], archive.Change{ + Path: "/testfile1.txt", + Kind: archive.ChangeModify, + }) + assertChange(t, changes[1], archive.Change{ + Path: "/testfile2.txt", + Kind: archive.ChangeDelete, + }) + assertChange(t, changes[2], archive.Change{ + Path: "/testfile3.txt", + Kind: archive.ChangeModify, + }) + assertChange(t, changes[3], archive.Change{ + Path: "/testfile4.txt", + Kind: archive.ChangeAdd, + }) +} + +func assertChange(t *testing.T, actual, expected archive.Change) { + if actual.Path != expected.Path { + t.Fatalf("Unexpected change path %s, expected %s", actual.Path, expected.Path) + } + if actual.Kind != expected.Kind { + t.Fatalf("Unexpected change type %s, expected %s", actual.Kind, expected.Kind) + } +} + +func sortChanges(changes []archive.Change) { + cs := &changeSorter{ + changes: changes, + } + sort.Sort(cs) +} + +type changeSorter struct { + changes []archive.Change +} + +func (cs *changeSorter) Len() int { + return len(cs.changes) +} + +func (cs *changeSorter) Swap(i, j int) { + cs.changes[i], cs.changes[j] = cs.changes[j], cs.changes[i] +} + +func (cs *changeSorter) Less(i, j int) bool { + return cs.changes[i].Path < cs.changes[j].Path +} diff --git a/vendor/github.com/moby/moby/layer/mounted_layer.go b/vendor/github.com/moby/moby/layer/mounted_layer.go new file mode 100644 index 000000000..a5cfcfa9b --- /dev/null +++ b/vendor/github.com/moby/moby/layer/mounted_layer.go @@ -0,0 +1,99 @@ +package layer + +import ( + "io" + + "github.com/docker/docker/pkg/archive" +) + +type mountedLayer struct { + name string + mountID string + initID string + parent *roLayer + path string + layerStore *layerStore + + references map[RWLayer]*referencedRWLayer +} + +func (ml *mountedLayer) cacheParent() string { + if ml.initID != "" { + return ml.initID + } + if ml.parent != nil { + return ml.parent.cacheID + } + return "" +} + +func (ml *mountedLayer) TarStream() (io.ReadCloser, error) { + return ml.layerStore.driver.Diff(ml.mountID, ml.cacheParent()) +} + +func (ml *mountedLayer) Name() string { + return ml.name +} + +func (ml *mountedLayer) Parent() Layer { + if ml.parent != nil { + return ml.parent + } + + // Return a nil interface instead of an interface wrapping a nil + // pointer. + return nil +} + +func (ml *mountedLayer) Size() (int64, error) { + return ml.layerStore.driver.DiffSize(ml.mountID, ml.cacheParent()) +} + +func (ml *mountedLayer) Changes() ([]archive.Change, error) { + return ml.layerStore.driver.Changes(ml.mountID, ml.cacheParent()) +} + +func (ml *mountedLayer) Metadata() (map[string]string, error) { + return ml.layerStore.driver.GetMetadata(ml.mountID) +} + +func (ml *mountedLayer) getReference() RWLayer { + ref := &referencedRWLayer{ + mountedLayer: ml, + } + ml.references[ref] = ref + + return ref +} + +func (ml *mountedLayer) hasReferences() bool { + return len(ml.references) > 0 +} + +func (ml *mountedLayer) deleteReference(ref RWLayer) error { + if _, ok := ml.references[ref]; !ok { + return ErrLayerNotRetained + } + delete(ml.references, ref) + return nil +} + +func (ml *mountedLayer) retakeReference(r RWLayer) { + if ref, ok := r.(*referencedRWLayer); ok { + ml.references[ref] = ref + } +} + +type referencedRWLayer struct { + *mountedLayer +} + +func (rl *referencedRWLayer) Mount(mountLabel string) (string, error) { + return rl.layerStore.driver.Get(rl.mountedLayer.mountID, mountLabel) +} + +// Unmount decrements the activity count and unmounts the underlying layer +// Callers should only call `Unmount` once per call to `Mount`, even on error. +func (rl *referencedRWLayer) Unmount() error { + return rl.layerStore.driver.Put(rl.mountedLayer.mountID) +} diff --git a/vendor/github.com/moby/moby/layer/ro_layer.go b/vendor/github.com/moby/moby/layer/ro_layer.go new file mode 100644 index 000000000..e03d78b4d --- /dev/null +++ b/vendor/github.com/moby/moby/layer/ro_layer.go @@ -0,0 +1,183 @@ +package layer + +import ( + "fmt" + "io" + + "github.com/docker/distribution" + "github.com/opencontainers/go-digest" +) + +type roLayer struct { + chainID ChainID + diffID DiffID + parent *roLayer + cacheID string + size int64 + layerStore *layerStore + descriptor distribution.Descriptor + platform Platform + + referenceCount int + references map[Layer]struct{} +} + +// TarStream for roLayer guarantees that the data that is produced is the exact +// data that the layer was registered with. +func (rl *roLayer) TarStream() (io.ReadCloser, error) { + rc, err := rl.layerStore.getTarStream(rl) + if err != nil { + return nil, err + } + + vrc, err := newVerifiedReadCloser(rc, digest.Digest(rl.diffID)) + if err != nil { + return nil, err + } + return vrc, nil +} + +// TarStreamFrom does not make any guarantees to the correctness of the produced +// data. As such it should not be used when the layer content must be verified +// to be an exact match to the registered layer. +func (rl *roLayer) TarStreamFrom(parent ChainID) (io.ReadCloser, error) { + var parentCacheID string + for pl := rl.parent; pl != nil; pl = pl.parent { + if pl.chainID == parent { + parentCacheID = pl.cacheID + break + } + } + + if parent != ChainID("") && parentCacheID == "" { + return nil, fmt.Errorf("layer ID '%s' is not a parent of the specified layer: cannot provide diff to non-parent", parent) + } + return rl.layerStore.driver.Diff(rl.cacheID, parentCacheID) +} + +func (rl *roLayer) ChainID() ChainID { + return rl.chainID +} + +func (rl *roLayer) DiffID() DiffID { + return rl.diffID +} + +func (rl *roLayer) Parent() Layer { + if rl.parent == nil { + return nil + } + return rl.parent +} + +func (rl *roLayer) Size() (size int64, err error) { + if rl.parent != nil { + size, err = rl.parent.Size() + if err != nil { + return + } + } + + return size + rl.size, nil +} + +func (rl *roLayer) DiffSize() (size int64, err error) { + return rl.size, nil +} + +func (rl *roLayer) Metadata() (map[string]string, error) { + return rl.layerStore.driver.GetMetadata(rl.cacheID) +} + +type referencedCacheLayer struct { + *roLayer +} + +func (rl *roLayer) getReference() Layer { + ref := &referencedCacheLayer{ + roLayer: rl, + } + rl.references[ref] = struct{}{} + + return ref +} + +func (rl *roLayer) hasReference(ref Layer) bool { + _, ok := rl.references[ref] + return ok +} + +func (rl *roLayer) hasReferences() bool { + return len(rl.references) > 0 +} + +func (rl *roLayer) deleteReference(ref Layer) { + delete(rl.references, ref) +} + +func (rl *roLayer) depth() int { + if rl.parent == nil { + return 1 + } + return rl.parent.depth() + 1 +} + +func storeLayer(tx MetadataTransaction, layer *roLayer) error { + if err := tx.SetDiffID(layer.diffID); err != nil { + return err + } + if err := tx.SetSize(layer.size); err != nil { + return err + } + if err := tx.SetCacheID(layer.cacheID); err != nil { + return err + } + // Do not store empty descriptors + if layer.descriptor.Digest != "" { + if err := tx.SetDescriptor(layer.descriptor); err != nil { + return err + } + } + if layer.parent != nil { + if err := tx.SetParent(layer.parent.chainID); err != nil { + return err + } + } + if err := tx.SetPlatform(layer.platform); err != nil { + return err + } + + return nil +} + +func newVerifiedReadCloser(rc io.ReadCloser, dgst digest.Digest) (io.ReadCloser, error) { + return &verifiedReadCloser{ + rc: rc, + dgst: dgst, + verifier: dgst.Verifier(), + }, nil +} + +type verifiedReadCloser struct { + rc io.ReadCloser + dgst digest.Digest + verifier digest.Verifier +} + +func (vrc *verifiedReadCloser) Read(p []byte) (n int, err error) { + n, err = vrc.rc.Read(p) + if n > 0 { + if n, err := vrc.verifier.Write(p[:n]); err != nil { + return n, err + } + } + if err == io.EOF { + if !vrc.verifier.Verified() { + err = fmt.Errorf("could not verify layer data for: %s. This may be because internal files in the layer store were modified. Re-pulling or rebuilding this image may resolve the issue", vrc.dgst) + } + } + return +} +func (vrc *verifiedReadCloser) Close() error { + return vrc.rc.Close() +} diff --git a/vendor/github.com/moby/moby/layer/ro_layer_unix.go b/vendor/github.com/moby/moby/layer/ro_layer_unix.go new file mode 100644 index 000000000..1b36856f9 --- /dev/null +++ b/vendor/github.com/moby/moby/layer/ro_layer_unix.go @@ -0,0 +1,7 @@ +// +build !windows + +package layer + +func (rl *roLayer) Platform() Platform { + return "" +} diff --git a/vendor/github.com/moby/moby/layer/ro_layer_windows.go b/vendor/github.com/moby/moby/layer/ro_layer_windows.go new file mode 100644 index 000000000..6679bdfe8 --- /dev/null +++ b/vendor/github.com/moby/moby/layer/ro_layer_windows.go @@ -0,0 +1,16 @@ +package layer + +import "github.com/docker/distribution" + +var _ distribution.Describable = &roLayer{} + +func (rl *roLayer) Descriptor() distribution.Descriptor { + return rl.descriptor +} + +func (rl *roLayer) Platform() Platform { + if rl.platform == "" { + return "windows" + } + return rl.platform +} diff --git a/vendor/github.com/moby/moby/libcontainerd/client.go b/vendor/github.com/moby/moby/libcontainerd/client.go new file mode 100644 index 000000000..c9004b813 --- /dev/null +++ b/vendor/github.com/moby/moby/libcontainerd/client.go @@ -0,0 +1,46 @@ +package libcontainerd + +import ( + "fmt" + "sync" + + "github.com/docker/docker/pkg/locker" +) + +// clientCommon contains the platform agnostic fields used in the client structure +type clientCommon struct { + backend Backend + containers map[string]*container + locker *locker.Locker + mapMutex sync.RWMutex // protects read/write operations from containers map +} + +func (clnt *client) lock(containerID string) { + clnt.locker.Lock(containerID) +} + +func (clnt *client) unlock(containerID string) { + clnt.locker.Unlock(containerID) +} + +// must hold a lock for cont.containerID +func (clnt *client) appendContainer(cont *container) { + clnt.mapMutex.Lock() + clnt.containers[cont.containerID] = cont + clnt.mapMutex.Unlock() +} +func (clnt *client) deleteContainer(containerID string) { + clnt.mapMutex.Lock() + delete(clnt.containers, containerID) + clnt.mapMutex.Unlock() +} + +func (clnt *client) getContainer(containerID string) (*container, error) { + clnt.mapMutex.RLock() + container, ok := clnt.containers[containerID] + defer clnt.mapMutex.RUnlock() + if !ok { + return nil, fmt.Errorf("invalid container: %s", containerID) // fixme: typed error + } + return container, nil +} diff --git a/vendor/github.com/moby/moby/libcontainerd/client_linux.go b/vendor/github.com/moby/moby/libcontainerd/client_linux.go new file mode 100644 index 000000000..54eaf3553 --- /dev/null +++ b/vendor/github.com/moby/moby/libcontainerd/client_linux.go @@ -0,0 +1,619 @@ +package libcontainerd + +import ( + "fmt" + "os" + "strings" + "sync" + "time" + + "github.com/Sirupsen/logrus" + containerd "github.com/containerd/containerd/api/grpc/types" + containerd_runtime_types "github.com/containerd/containerd/runtime" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/mount" + "github.com/golang/protobuf/ptypes" + "github.com/golang/protobuf/ptypes/timestamp" + specs "github.com/opencontainers/runtime-spec/specs-go" + "golang.org/x/net/context" + "golang.org/x/sys/unix" +) + +type client struct { + clientCommon + + // Platform specific properties below here. + remote *remote + q queue + exitNotifiers map[string]*exitNotifier + liveRestore bool +} + +// GetServerVersion returns the connected server version information +func (clnt *client) GetServerVersion(ctx context.Context) (*ServerVersion, error) { + resp, err := clnt.remote.apiClient.GetServerVersion(ctx, &containerd.GetServerVersionRequest{}) + if err != nil { + return nil, err + } + + sv := &ServerVersion{ + GetServerVersionResponse: *resp, + } + + return sv, nil +} + +// AddProcess is the handler for adding a process to an already running +// container. It's called through docker exec. It returns the system pid of the +// exec'd process. +func (clnt *client) AddProcess(ctx context.Context, containerID, processFriendlyName string, specp Process, attachStdio StdioCallback) (pid int, err error) { + clnt.lock(containerID) + defer clnt.unlock(containerID) + container, err := clnt.getContainer(containerID) + if err != nil { + return -1, err + } + + spec, err := container.spec() + if err != nil { + return -1, err + } + sp := spec.Process + sp.Args = specp.Args + sp.Terminal = specp.Terminal + if len(specp.Env) > 0 { + sp.Env = specp.Env + } + if specp.Cwd != nil { + sp.Cwd = *specp.Cwd + } + if specp.User != nil { + sp.User = specs.User{ + UID: specp.User.UID, + GID: specp.User.GID, + AdditionalGids: specp.User.AdditionalGids, + } + } + if specp.Capabilities != nil { + sp.Capabilities.Bounding = specp.Capabilities + sp.Capabilities.Effective = specp.Capabilities + sp.Capabilities.Inheritable = specp.Capabilities + sp.Capabilities.Permitted = specp.Capabilities + } + + p := container.newProcess(processFriendlyName) + + r := &containerd.AddProcessRequest{ + Args: sp.Args, + Cwd: sp.Cwd, + Terminal: sp.Terminal, + Id: containerID, + Env: sp.Env, + User: &containerd.User{ + Uid: sp.User.UID, + Gid: sp.User.GID, + AdditionalGids: sp.User.AdditionalGids, + }, + Pid: processFriendlyName, + Stdin: p.fifo(unix.Stdin), + Stdout: p.fifo(unix.Stdout), + Stderr: p.fifo(unix.Stderr), + Capabilities: sp.Capabilities.Effective, + ApparmorProfile: sp.ApparmorProfile, + SelinuxLabel: sp.SelinuxLabel, + NoNewPrivileges: sp.NoNewPrivileges, + Rlimits: convertRlimits(sp.Rlimits), + } + + fifoCtx, cancel := context.WithCancel(context.Background()) + defer func() { + if err != nil { + cancel() + } + }() + + iopipe, err := p.openFifos(fifoCtx, sp.Terminal) + if err != nil { + return -1, err + } + + resp, err := clnt.remote.apiClient.AddProcess(ctx, r) + if err != nil { + p.closeFifos(iopipe) + return -1, err + } + + var stdinOnce sync.Once + stdin := iopipe.Stdin + iopipe.Stdin = ioutils.NewWriteCloserWrapper(stdin, func() error { + var err error + stdinOnce.Do(func() { // on error from attach we don't know if stdin was already closed + err = stdin.Close() + if err2 := p.sendCloseStdin(); err == nil { + err = err2 + } + }) + return err + }) + + container.processes[processFriendlyName] = p + + if err := attachStdio(*iopipe); err != nil { + p.closeFifos(iopipe) + return -1, err + } + + return int(resp.SystemPid), nil +} + +func (clnt *client) SignalProcess(containerID string, pid string, sig int) error { + clnt.lock(containerID) + defer clnt.unlock(containerID) + _, err := clnt.remote.apiClient.Signal(context.Background(), &containerd.SignalRequest{ + Id: containerID, + Pid: pid, + Signal: uint32(sig), + }) + return err +} + +func (clnt *client) Resize(containerID, processFriendlyName string, width, height int) error { + clnt.lock(containerID) + defer clnt.unlock(containerID) + if _, err := clnt.getContainer(containerID); err != nil { + return err + } + _, err := clnt.remote.apiClient.UpdateProcess(context.Background(), &containerd.UpdateProcessRequest{ + Id: containerID, + Pid: processFriendlyName, + Width: uint32(width), + Height: uint32(height), + }) + return err +} + +func (clnt *client) Pause(containerID string) error { + return clnt.setState(containerID, StatePause) +} + +func (clnt *client) setState(containerID, state string) error { + clnt.lock(containerID) + container, err := clnt.getContainer(containerID) + if err != nil { + clnt.unlock(containerID) + return err + } + if container.systemPid == 0 { + clnt.unlock(containerID) + return fmt.Errorf("No active process for container %s", containerID) + } + st := "running" + if state == StatePause { + st = "paused" + } + chstate := make(chan struct{}) + _, err = clnt.remote.apiClient.UpdateContainer(context.Background(), &containerd.UpdateContainerRequest{ + Id: containerID, + Pid: InitFriendlyName, + Status: st, + }) + if err != nil { + clnt.unlock(containerID) + return err + } + container.pauseMonitor.append(state, chstate) + clnt.unlock(containerID) + <-chstate + return nil +} + +func (clnt *client) Resume(containerID string) error { + return clnt.setState(containerID, StateResume) +} + +func (clnt *client) Stats(containerID string) (*Stats, error) { + resp, err := clnt.remote.apiClient.Stats(context.Background(), &containerd.StatsRequest{containerID}) + if err != nil { + return nil, err + } + return (*Stats)(resp), nil +} + +// Take care of the old 1.11.0 behavior in case the version upgrade +// happened without a clean daemon shutdown +func (clnt *client) cleanupOldRootfs(containerID string) { + // Unmount and delete the bundle folder + if mts, err := mount.GetMounts(); err == nil { + for _, mts := range mts { + if strings.HasSuffix(mts.Mountpoint, containerID+"/rootfs") { + if err := unix.Unmount(mts.Mountpoint, unix.MNT_DETACH); err == nil { + os.RemoveAll(strings.TrimSuffix(mts.Mountpoint, "/rootfs")) + } + break + } + } + } +} + +func (clnt *client) setExited(containerID string, exitCode uint32) error { + clnt.lock(containerID) + defer clnt.unlock(containerID) + + err := clnt.backend.StateChanged(containerID, StateInfo{ + CommonStateInfo: CommonStateInfo{ + State: StateExit, + ExitCode: exitCode, + }}) + + clnt.cleanupOldRootfs(containerID) + + return err +} + +func (clnt *client) GetPidsForContainer(containerID string) ([]int, error) { + cont, err := clnt.getContainerdContainer(containerID) + if err != nil { + return nil, err + } + pids := make([]int, len(cont.Pids)) + for i, p := range cont.Pids { + pids[i] = int(p) + } + return pids, nil +} + +// Summary returns a summary of the processes running in a container. +// This is a no-op on Linux. +func (clnt *client) Summary(containerID string) ([]Summary, error) { + return nil, nil +} + +func (clnt *client) getContainerdContainer(containerID string) (*containerd.Container, error) { + resp, err := clnt.remote.apiClient.State(context.Background(), &containerd.StateRequest{Id: containerID}) + if err != nil { + return nil, err + } + for _, cont := range resp.Containers { + if cont.Id == containerID { + return cont, nil + } + } + return nil, fmt.Errorf("invalid state response") +} + +func (clnt *client) UpdateResources(containerID string, resources Resources) error { + clnt.lock(containerID) + defer clnt.unlock(containerID) + container, err := clnt.getContainer(containerID) + if err != nil { + return err + } + if container.systemPid == 0 { + return fmt.Errorf("No active process for container %s", containerID) + } + _, err = clnt.remote.apiClient.UpdateContainer(context.Background(), &containerd.UpdateContainerRequest{ + Id: containerID, + Pid: InitFriendlyName, + Resources: (*containerd.UpdateResource)(&resources), + }) + if err != nil { + return err + } + return nil +} + +func (clnt *client) getExitNotifier(containerID string) *exitNotifier { + clnt.mapMutex.RLock() + defer clnt.mapMutex.RUnlock() + return clnt.exitNotifiers[containerID] +} + +func (clnt *client) getOrCreateExitNotifier(containerID string) *exitNotifier { + clnt.mapMutex.Lock() + w, ok := clnt.exitNotifiers[containerID] + defer clnt.mapMutex.Unlock() + if !ok { + w = &exitNotifier{c: make(chan struct{}), client: clnt} + clnt.exitNotifiers[containerID] = w + } + return w +} + +func (clnt *client) restore(cont *containerd.Container, lastEvent *containerd.Event, attachStdio StdioCallback, options ...CreateOption) (err error) { + clnt.lock(cont.Id) + defer clnt.unlock(cont.Id) + + logrus.Debugf("libcontainerd: restore container %s state %s", cont.Id, cont.Status) + + containerID := cont.Id + if _, err := clnt.getContainer(containerID); err == nil { + return fmt.Errorf("container %s is already active", containerID) + } + + defer func() { + if err != nil { + clnt.deleteContainer(cont.Id) + } + }() + + container := clnt.newContainer(cont.BundlePath, options...) + container.systemPid = systemPid(cont) + + var terminal bool + for _, p := range cont.Processes { + if p.Pid == InitFriendlyName { + terminal = p.Terminal + } + } + + fifoCtx, cancel := context.WithCancel(context.Background()) + defer func() { + if err != nil { + cancel() + } + }() + + iopipe, err := container.openFifos(fifoCtx, terminal) + if err != nil { + return err + } + var stdinOnce sync.Once + stdin := iopipe.Stdin + iopipe.Stdin = ioutils.NewWriteCloserWrapper(stdin, func() error { + var err error + stdinOnce.Do(func() { // on error from attach we don't know if stdin was already closed + err = stdin.Close() + }) + return err + }) + + if err := attachStdio(*iopipe); err != nil { + container.closeFifos(iopipe) + return err + } + + clnt.appendContainer(container) + + err = clnt.backend.StateChanged(containerID, StateInfo{ + CommonStateInfo: CommonStateInfo{ + State: StateRestore, + Pid: container.systemPid, + }}) + + if err != nil { + container.closeFifos(iopipe) + return err + } + + if lastEvent != nil { + // This should only be a pause or resume event + if lastEvent.Type == StatePause || lastEvent.Type == StateResume { + return clnt.backend.StateChanged(containerID, StateInfo{ + CommonStateInfo: CommonStateInfo{ + State: lastEvent.Type, + Pid: container.systemPid, + }}) + } + + logrus.Warnf("libcontainerd: unexpected backlog event: %#v", lastEvent) + } + + return nil +} + +func (clnt *client) getContainerLastEventSinceTime(id string, tsp *timestamp.Timestamp) (*containerd.Event, error) { + er := &containerd.EventsRequest{ + Timestamp: tsp, + StoredOnly: true, + Id: id, + } + events, err := clnt.remote.apiClient.Events(context.Background(), er) + if err != nil { + logrus.Errorf("libcontainerd: failed to get container events stream for %s: %q", er.Id, err) + return nil, err + } + + var ev *containerd.Event + for { + e, err := events.Recv() + if err != nil { + if err.Error() == "EOF" { + break + } + logrus.Errorf("libcontainerd: failed to get container event for %s: %q", id, err) + return nil, err + } + ev = e + logrus.Debugf("libcontainerd: received past event %#v", ev) + } + + return ev, nil +} + +func (clnt *client) getContainerLastEvent(id string) (*containerd.Event, error) { + ev, err := clnt.getContainerLastEventSinceTime(id, clnt.remote.restoreFromTimestamp) + if err == nil && ev == nil { + // If ev is nil and the container is running in containerd, + // we already consumed all the event of the + // container, included the "exit" one. + // Thus, we request all events containerd has in memory for + // this container in order to get the last one (which should + // be an exit event) + logrus.Warnf("libcontainerd: client is out of sync, restore was called on a fully synced container (%s).", id) + // Request all events since beginning of time + t := time.Unix(0, 0) + tsp, err := ptypes.TimestampProto(t) + if err != nil { + logrus.Errorf("libcontainerd: getLastEventSinceTime() failed to convert timestamp: %q", err) + return nil, err + } + + return clnt.getContainerLastEventSinceTime(id, tsp) + } + + return ev, err +} + +func (clnt *client) Restore(containerID string, attachStdio StdioCallback, options ...CreateOption) error { + // Synchronize with live events + clnt.remote.Lock() + defer clnt.remote.Unlock() + // Check that containerd still knows this container. + // + // In the unlikely event that Restore for this container process + // the its past event before the main loop, the event will be + // processed twice. However, this is not an issue as all those + // events will do is change the state of the container to be + // exactly the same. + cont, err := clnt.getContainerdContainer(containerID) + // Get its last event + ev, eerr := clnt.getContainerLastEvent(containerID) + if err != nil || containerd_runtime_types.State(cont.Status) == containerd_runtime_types.Stopped { + if err != nil { + logrus.Warnf("libcontainerd: failed to retrieve container %s state: %v", containerID, err) + } + if ev != nil && (ev.Pid != InitFriendlyName || ev.Type != StateExit) { + // Wait a while for the exit event + timeout := time.NewTimer(10 * time.Second) + tick := time.NewTicker(100 * time.Millisecond) + stop: + for { + select { + case <-timeout.C: + break stop + case <-tick.C: + ev, eerr = clnt.getContainerLastEvent(containerID) + if eerr != nil { + break stop + } + if ev != nil && ev.Pid == InitFriendlyName && ev.Type == StateExit { + break stop + } + } + } + timeout.Stop() + tick.Stop() + } + + // get the exit status for this container, if we don't have + // one, indicate an error + ec := uint32(255) + if eerr == nil && ev != nil && ev.Pid == InitFriendlyName && ev.Type == StateExit { + ec = ev.Status + } + clnt.setExited(containerID, ec) + + return nil + } + + // container is still alive + if clnt.liveRestore { + if err := clnt.restore(cont, ev, attachStdio, options...); err != nil { + logrus.Errorf("libcontainerd: error restoring %s: %v", containerID, err) + } + return nil + } + + // Kill the container if liveRestore == false + w := clnt.getOrCreateExitNotifier(containerID) + clnt.lock(cont.Id) + container := clnt.newContainer(cont.BundlePath) + container.systemPid = systemPid(cont) + clnt.appendContainer(container) + clnt.unlock(cont.Id) + + container.discardFifos() + + if err := clnt.Signal(containerID, int(unix.SIGTERM)); err != nil { + logrus.Errorf("libcontainerd: error sending sigterm to %v: %v", containerID, err) + } + + // Let the main loop handle the exit event + clnt.remote.Unlock() + + if ev != nil && ev.Type == StatePause { + // resume container, it depends on the main loop, so we do it after Unlock() + logrus.Debugf("libcontainerd: %s was paused, resuming it so it can die", containerID) + if err := clnt.Resume(containerID); err != nil { + return fmt.Errorf("failed to resume container: %v", err) + } + } + + select { + case <-time.After(10 * time.Second): + if err := clnt.Signal(containerID, int(unix.SIGKILL)); err != nil { + logrus.Errorf("libcontainerd: error sending sigkill to %v: %v", containerID, err) + } + select { + case <-time.After(2 * time.Second): + case <-w.wait(): + // relock because of the defer + clnt.remote.Lock() + return nil + } + case <-w.wait(): + // relock because of the defer + clnt.remote.Lock() + return nil + } + // relock because of the defer + clnt.remote.Lock() + + clnt.deleteContainer(containerID) + + return clnt.setExited(containerID, uint32(255)) +} + +func (clnt *client) CreateCheckpoint(containerID string, checkpointID string, checkpointDir string, exit bool) error { + clnt.lock(containerID) + defer clnt.unlock(containerID) + if _, err := clnt.getContainer(containerID); err != nil { + return err + } + + _, err := clnt.remote.apiClient.CreateCheckpoint(context.Background(), &containerd.CreateCheckpointRequest{ + Id: containerID, + Checkpoint: &containerd.Checkpoint{ + Name: checkpointID, + Exit: exit, + Tcp: true, + UnixSockets: true, + Shell: false, + EmptyNS: []string{"network"}, + }, + CheckpointDir: checkpointDir, + }) + return err +} + +func (clnt *client) DeleteCheckpoint(containerID string, checkpointID string, checkpointDir string) error { + clnt.lock(containerID) + defer clnt.unlock(containerID) + if _, err := clnt.getContainer(containerID); err != nil { + return err + } + + _, err := clnt.remote.apiClient.DeleteCheckpoint(context.Background(), &containerd.DeleteCheckpointRequest{ + Id: containerID, + Name: checkpointID, + CheckpointDir: checkpointDir, + }) + return err +} + +func (clnt *client) ListCheckpoints(containerID string, checkpointDir string) (*Checkpoints, error) { + clnt.lock(containerID) + defer clnt.unlock(containerID) + if _, err := clnt.getContainer(containerID); err != nil { + return nil, err + } + + resp, err := clnt.remote.apiClient.ListCheckpoint(context.Background(), &containerd.ListCheckpointRequest{ + Id: containerID, + CheckpointDir: checkpointDir, + }) + if err != nil { + return nil, err + } + return (*Checkpoints)(resp), nil +} diff --git a/vendor/github.com/moby/moby/libcontainerd/client_solaris.go b/vendor/github.com/moby/moby/libcontainerd/client_solaris.go new file mode 100644 index 000000000..cb939975f --- /dev/null +++ b/vendor/github.com/moby/moby/libcontainerd/client_solaris.go @@ -0,0 +1,101 @@ +package libcontainerd + +import "golang.org/x/net/context" + +type client struct { + clientCommon + + // Platform specific properties below here. + remote *remote + q queue + exitNotifiers map[string]*exitNotifier + liveRestore bool +} + +// GetServerVersion returns the connected server version information +func (clnt *client) GetServerVersion(ctx context.Context) (*ServerVersion, error) { + resp, err := clnt.remote.apiClient.GetServerVersion(ctx, &containerd.GetServerVersionRequest{}) + if err != nil { + return nil, err + } + + sv := &ServerVersion{ + GetServerVersionResponse: *resp, + } + + return sv, nil +} + +func (clnt *client) AddProcess(ctx context.Context, containerID, processFriendlyName string, specp Process, attachStdio StdioCallback) (int, error) { + return -1, nil +} + +func (clnt *client) SignalProcess(containerID string, pid string, sig int) error { + return nil +} + +func (clnt *client) Resize(containerID, processFriendlyName string, width, height int) error { + return nil +} + +func (clnt *client) Pause(containerID string) error { + return nil +} + +func (clnt *client) Resume(containerID string) error { + return nil +} + +func (clnt *client) Stats(containerID string) (*Stats, error) { + return nil, nil +} + +func (clnt *client) getExitNotifier(containerID string) *exitNotifier { + clnt.mapMutex.RLock() + defer clnt.mapMutex.RUnlock() + return clnt.exitNotifiers[containerID] +} + +func (clnt *client) getOrCreateExitNotifier(containerID string) *exitNotifier { + clnt.mapMutex.Lock() + defer clnt.mapMutex.Unlock() + w, ok := clnt.exitNotifiers[containerID] + if !ok { + w = &exitNotifier{c: make(chan struct{}), client: clnt} + clnt.exitNotifiers[containerID] = w + } + return w +} + +// Restore is the handler for restoring a container +func (clnt *client) Restore(containerID string, attachStdio StdioCallback, options ...CreateOption) error { + return nil +} + +func (clnt *client) GetPidsForContainer(containerID string) ([]int, error) { + return nil, nil +} + +// Summary returns a summary of the processes running in a container. +func (clnt *client) Summary(containerID string) ([]Summary, error) { + return nil, nil +} + +// UpdateResources updates resources for a running container. +func (clnt *client) UpdateResources(containerID string, resources Resources) error { + // Updating resource isn't supported on Solaris + // but we should return nil for enabling updating container + return nil +} + +func (clnt *client) CreateCheckpoint(containerID string, checkpointID string, checkpointDir string, exit bool) error { + return nil +} + +func (clnt *client) DeleteCheckpoint(containerID string, checkpointID string, checkpointDir string) error { + return nil +} + +func (clnt *client) ListCheckpoints(containerID string, checkpointDir string) (*Checkpoints, error) { + return nil, nil +} diff --git a/vendor/github.com/moby/moby/libcontainerd/client_unix.go b/vendor/github.com/moby/moby/libcontainerd/client_unix.go new file mode 100644 index 000000000..6dbf3af06 --- /dev/null +++ b/vendor/github.com/moby/moby/libcontainerd/client_unix.go @@ -0,0 +1,141 @@ +// +build linux solaris + +package libcontainerd + +import ( + "encoding/json" + "fmt" + "os" + "path/filepath" + "strings" + "sync" + + "github.com/Sirupsen/logrus" + containerd "github.com/containerd/containerd/api/grpc/types" + "github.com/docker/docker/pkg/idtools" + specs "github.com/opencontainers/runtime-spec/specs-go" + "golang.org/x/net/context" +) + +func (clnt *client) prepareBundleDir(uid, gid int) (string, error) { + root, err := filepath.Abs(clnt.remote.stateDir) + if err != nil { + return "", err + } + if uid == 0 && gid == 0 { + return root, nil + } + p := string(filepath.Separator) + for _, d := range strings.Split(root, string(filepath.Separator))[1:] { + p = filepath.Join(p, d) + fi, err := os.Stat(p) + if err != nil && !os.IsNotExist(err) { + return "", err + } + if os.IsNotExist(err) || fi.Mode()&1 == 0 { + p = fmt.Sprintf("%s.%d.%d", p, uid, gid) + if err := idtools.MkdirAndChown(p, 0700, idtools.IDPair{uid, gid}); err != nil && !os.IsExist(err) { + return "", err + } + } + } + return p, nil +} + +func (clnt *client) Create(containerID string, checkpoint string, checkpointDir string, spec specs.Spec, attachStdio StdioCallback, options ...CreateOption) (err error) { + clnt.lock(containerID) + defer clnt.unlock(containerID) + + if _, err := clnt.getContainer(containerID); err == nil { + return fmt.Errorf("Container %s is already active", containerID) + } + + uid, gid, err := getRootIDs(specs.Spec(spec)) + if err != nil { + return err + } + dir, err := clnt.prepareBundleDir(uid, gid) + if err != nil { + return err + } + + container := clnt.newContainer(filepath.Join(dir, containerID), options...) + if err := container.clean(); err != nil { + return err + } + + defer func() { + if err != nil { + container.clean() + clnt.deleteContainer(containerID) + } + }() + + if err := idtools.MkdirAllAndChown(container.dir, 0700, idtools.IDPair{uid, gid}); err != nil && !os.IsExist(err) { + return err + } + + f, err := os.Create(filepath.Join(container.dir, configFilename)) + if err != nil { + return err + } + defer f.Close() + if err := json.NewEncoder(f).Encode(spec); err != nil { + return err + } + return container.start(&spec, checkpoint, checkpointDir, attachStdio) +} + +func (clnt *client) Signal(containerID string, sig int) error { + clnt.lock(containerID) + defer clnt.unlock(containerID) + _, err := clnt.remote.apiClient.Signal(context.Background(), &containerd.SignalRequest{ + Id: containerID, + Pid: InitFriendlyName, + Signal: uint32(sig), + }) + return err +} + +func (clnt *client) newContainer(dir string, options ...CreateOption) *container { + container := &container{ + containerCommon: containerCommon{ + process: process{ + dir: dir, + processCommon: processCommon{ + containerID: filepath.Base(dir), + client: clnt, + friendlyName: InitFriendlyName, + }, + }, + processes: make(map[string]*process), + }, + } + for _, option := range options { + if err := option.Apply(container); err != nil { + logrus.Errorf("libcontainerd: newContainer(): %v", err) + } + } + return container +} + +type exitNotifier struct { + id string + client *client + c chan struct{} + once sync.Once +} + +func (en *exitNotifier) close() { + en.once.Do(func() { + close(en.c) + en.client.mapMutex.Lock() + if en == en.client.exitNotifiers[en.id] { + delete(en.client.exitNotifiers, en.id) + } + en.client.mapMutex.Unlock() + }) +} +func (en *exitNotifier) wait() <-chan struct{} { + return en.c +} diff --git a/vendor/github.com/moby/moby/libcontainerd/client_windows.go b/vendor/github.com/moby/moby/libcontainerd/client_windows.go new file mode 100644 index 000000000..455e8e5e6 --- /dev/null +++ b/vendor/github.com/moby/moby/libcontainerd/client_windows.go @@ -0,0 +1,754 @@ +package libcontainerd + +import ( + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "strings" + "syscall" + "time" + + "golang.org/x/net/context" + + "github.com/Microsoft/hcsshim" + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/sysinfo" + specs "github.com/opencontainers/runtime-spec/specs-go" +) + +type client struct { + clientCommon + + // Platform specific properties below here (none presently on Windows) +} + +// Win32 error codes that are used for various workarounds +// These really should be ALL_CAPS to match golangs syscall library and standard +// Win32 error conventions, but golint insists on CamelCase. +const ( + CoEClassstring = syscall.Errno(0x800401F3) // Invalid class string + ErrorNoNetwork = syscall.Errno(1222) // The network is not present or not started + ErrorBadPathname = syscall.Errno(161) // The specified path is invalid + ErrorInvalidObject = syscall.Errno(0x800710D8) // The object identifier does not represent a valid object +) + +// defaultOwner is a tag passed to HCS to allow it to differentiate between +// container creator management stacks. We hard code "docker" in the case +// of docker. +const defaultOwner = "docker" + +// Create is the entrypoint to create a container from a spec, and if successfully +// created, start it too. Table below shows the fields required for HCS JSON calling parameters, +// where if not populated, is omitted. +// +-----------------+--------------------------------------------+---------------------------------------------------+ +// | | Isolation=Process | Isolation=Hyper-V | +// +-----------------+--------------------------------------------+---------------------------------------------------+ +// | VolumePath | \\?\\Volume{GUIDa} | | +// | LayerFolderPath | %root%\windowsfilter\containerID | %root%\windowsfilter\containerID (servicing only) | +// | Layers[] | ID=GUIDb;Path=%root%\windowsfilter\layerID | ID=GUIDb;Path=%root%\windowsfilter\layerID | +// | HvRuntime | | ImagePath=%root%\BaseLayerID\UtilityVM | +// +-----------------+--------------------------------------------+---------------------------------------------------+ +// +// Isolation=Process example: +// +// { +// "SystemType": "Container", +// "Name": "5e0055c814a6005b8e57ac59f9a522066e0af12b48b3c26a9416e23907698776", +// "Owner": "docker", +// "VolumePath": "\\\\\\\\?\\\\Volume{66d1ef4c-7a00-11e6-8948-00155ddbef9d}", +// "IgnoreFlushesDuringBoot": true, +// "LayerFolderPath": "C:\\\\control\\\\windowsfilter\\\\5e0055c814a6005b8e57ac59f9a522066e0af12b48b3c26a9416e23907698776", +// "Layers": [{ +// "ID": "18955d65-d45a-557b-bf1c-49d6dfefc526", +// "Path": "C:\\\\control\\\\windowsfilter\\\\65bf96e5760a09edf1790cb229e2dfb2dbd0fcdc0bf7451bae099106bfbfea0c" +// }], +// "HostName": "5e0055c814a6", +// "MappedDirectories": [], +// "HvPartition": false, +// "EndpointList": ["eef2649d-bb17-4d53-9937-295a8efe6f2c"], +// "Servicing": false +//} +// +// Isolation=Hyper-V example: +// +//{ +// "SystemType": "Container", +// "Name": "475c2c58933b72687a88a441e7e0ca4bd72d76413c5f9d5031fee83b98f6045d", +// "Owner": "docker", +// "IgnoreFlushesDuringBoot": true, +// "Layers": [{ +// "ID": "18955d65-d45a-557b-bf1c-49d6dfefc526", +// "Path": "C:\\\\control\\\\windowsfilter\\\\65bf96e5760a09edf1790cb229e2dfb2dbd0fcdc0bf7451bae099106bfbfea0c" +// }], +// "HostName": "475c2c58933b", +// "MappedDirectories": [], +// "HvPartition": true, +// "EndpointList": ["e1bb1e61-d56f-405e-b75d-fd520cefa0cb"], +// "DNSSearchList": "a.com,b.com,c.com", +// "HvRuntime": { +// "ImagePath": "C:\\\\control\\\\windowsfilter\\\\65bf96e5760a09edf1790cb229e2dfb2dbd0fcdc0bf7451bae099106bfbfea0c\\\\UtilityVM" +// }, +// "Servicing": false +//} +func (clnt *client) Create(containerID string, checkpoint string, checkpointDir string, spec specs.Spec, attachStdio StdioCallback, options ...CreateOption) error { + clnt.lock(containerID) + defer clnt.unlock(containerID) + if b, err := json.Marshal(spec); err == nil { + logrus.Debugln("libcontainerd: client.Create() with spec", string(b)) + } + osName := spec.Platform.OS + if osName == "windows" { + return clnt.createWindows(containerID, checkpoint, checkpointDir, spec, attachStdio, options...) + } + return clnt.createLinux(containerID, checkpoint, checkpointDir, spec, attachStdio, options...) +} + +func (clnt *client) createWindows(containerID string, checkpoint string, checkpointDir string, spec specs.Spec, attachStdio StdioCallback, options ...CreateOption) error { + configuration := &hcsshim.ContainerConfig{ + SystemType: "Container", + Name: containerID, + Owner: defaultOwner, + IgnoreFlushesDuringBoot: false, + HostName: spec.Hostname, + HvPartition: false, + } + + if spec.Windows.Resources != nil { + if spec.Windows.Resources.CPU != nil { + if spec.Windows.Resources.CPU.Count != nil { + // This check is being done here rather than in adaptContainerSettings + // because we don't want to update the HostConfig in case this container + // is moved to a host with more CPUs than this one. + cpuCount := *spec.Windows.Resources.CPU.Count + hostCPUCount := uint64(sysinfo.NumCPU()) + if cpuCount > hostCPUCount { + logrus.Warnf("Changing requested CPUCount of %d to current number of processors, %d", cpuCount, hostCPUCount) + cpuCount = hostCPUCount + } + configuration.ProcessorCount = uint32(cpuCount) + } + if spec.Windows.Resources.CPU.Shares != nil { + configuration.ProcessorWeight = uint64(*spec.Windows.Resources.CPU.Shares) + } + if spec.Windows.Resources.CPU.Maximum != nil { + configuration.ProcessorMaximum = int64(*spec.Windows.Resources.CPU.Maximum) + } + } + if spec.Windows.Resources.Memory != nil { + if spec.Windows.Resources.Memory.Limit != nil { + configuration.MemoryMaximumInMB = int64(*spec.Windows.Resources.Memory.Limit) / 1024 / 1024 + } + } + if spec.Windows.Resources.Storage != nil { + if spec.Windows.Resources.Storage.Bps != nil { + configuration.StorageBandwidthMaximum = *spec.Windows.Resources.Storage.Bps + } + if spec.Windows.Resources.Storage.Iops != nil { + configuration.StorageIOPSMaximum = *spec.Windows.Resources.Storage.Iops + } + } + } + + var layerOpt *LayerOption + for _, option := range options { + if s, ok := option.(*ServicingOption); ok { + configuration.Servicing = s.IsServicing + continue + } + if f, ok := option.(*FlushOption); ok { + configuration.IgnoreFlushesDuringBoot = f.IgnoreFlushesDuringBoot + continue + } + if h, ok := option.(*HyperVIsolationOption); ok { + configuration.HvPartition = h.IsHyperV + continue + } + if l, ok := option.(*LayerOption); ok { + layerOpt = l + } + if n, ok := option.(*NetworkEndpointsOption); ok { + configuration.EndpointList = n.Endpoints + configuration.AllowUnqualifiedDNSQuery = n.AllowUnqualifiedDNSQuery + if n.DNSSearchList != nil { + configuration.DNSSearchList = strings.Join(n.DNSSearchList, ",") + } + configuration.NetworkSharedContainerName = n.NetworkSharedContainerID + continue + } + if c, ok := option.(*CredentialsOption); ok { + configuration.Credentials = c.Credentials + continue + } + } + + // We must have a layer option with at least one path + if layerOpt == nil || layerOpt.LayerPaths == nil { + return fmt.Errorf("no layer option or paths were supplied to the runtime") + } + + if configuration.HvPartition { + // Find the upper-most utility VM image, since the utility VM does not + // use layering in RS1. + // TODO @swernli/jhowardmsft at some point post RS1 this may be re-locatable. + var uvmImagePath string + for _, path := range layerOpt.LayerPaths { + fullPath := filepath.Join(path, "UtilityVM") + _, err := os.Stat(fullPath) + if err == nil { + uvmImagePath = fullPath + break + } + if !os.IsNotExist(err) { + return err + } + } + if uvmImagePath == "" { + return errors.New("utility VM image could not be found") + } + configuration.HvRuntime = &hcsshim.HvRuntime{ImagePath: uvmImagePath} + } else { + configuration.VolumePath = spec.Root.Path + } + + configuration.LayerFolderPath = layerOpt.LayerFolderPath + + for _, layerPath := range layerOpt.LayerPaths { + _, filename := filepath.Split(layerPath) + g, err := hcsshim.NameToGuid(filename) + if err != nil { + return err + } + configuration.Layers = append(configuration.Layers, hcsshim.Layer{ + ID: g.ToString(), + Path: layerPath, + }) + } + + // Add the mounts (volumes, bind mounts etc) to the structure + mds := make([]hcsshim.MappedDir, len(spec.Mounts)) + for i, mount := range spec.Mounts { + mds[i] = hcsshim.MappedDir{ + HostPath: mount.Source, + ContainerPath: mount.Destination, + ReadOnly: false, + } + for _, o := range mount.Options { + if strings.ToLower(o) == "ro" { + mds[i].ReadOnly = true + } + } + } + configuration.MappedDirectories = mds + + hcsContainer, err := hcsshim.CreateContainer(containerID, configuration) + if err != nil { + return err + } + + // Construct a container object for calling start on it. + container := &container{ + containerCommon: containerCommon{ + process: process{ + processCommon: processCommon{ + containerID: containerID, + client: clnt, + friendlyName: InitFriendlyName, + }, + }, + processes: make(map[string]*process), + }, + ociSpec: spec, + hcsContainer: hcsContainer, + } + + container.options = options + for _, option := range options { + if err := option.Apply(container); err != nil { + logrus.Errorf("libcontainerd: %v", err) + } + } + + // Call start, and if it fails, delete the container from our + // internal structure, start will keep HCS in sync by deleting the + // container there. + logrus.Debugf("libcontainerd: createWindows() id=%s, Calling start()", containerID) + if err := container.start(attachStdio); err != nil { + clnt.deleteContainer(containerID) + return err + } + + logrus.Debugf("libcontainerd: createWindows() id=%s completed successfully", containerID) + return nil + +} + +func (clnt *client) createLinux(containerID string, checkpoint string, checkpointDir string, spec specs.Spec, attachStdio StdioCallback, options ...CreateOption) error { + logrus.Debugf("libcontainerd: createLinux(): containerId %s ", containerID) + + // TODO @jhowardmsft LCOW Support: This needs to be configurable, not hard-coded. + // However, good-enough for the LCOW bring-up. + configuration := &hcsshim.ContainerConfig{ + HvPartition: true, + Name: containerID, + SystemType: "container", + ContainerType: "linux", + Owner: defaultOwner, + TerminateOnLastHandleClosed: true, + HvRuntime: &hcsshim.HvRuntime{ + ImagePath: `c:\Program Files\Linux Containers`, + LinuxKernelFile: `bootx64.efi`, + LinuxInitrdFile: `initrd.img`, + }, + } + + var layerOpt *LayerOption + for _, option := range options { + if l, ok := option.(*LayerOption); ok { + layerOpt = l + } + } + + // We must have a layer option with at least one path + if layerOpt == nil || layerOpt.LayerPaths == nil { + return fmt.Errorf("no layer option or paths were supplied to the runtime") + } + + // LayerFolderPath (writeable layer) + Layers (Guid + path) + configuration.LayerFolderPath = layerOpt.LayerFolderPath + for _, layerPath := range layerOpt.LayerPaths { + _, filename := filepath.Split(layerPath) + g, err := hcsshim.NameToGuid(filename) + if err != nil { + return err + } + configuration.Layers = append(configuration.Layers, hcsshim.Layer{ + ID: g.ToString(), + Path: filepath.Join(layerPath, "layer.vhd"), + }) + } + + for _, option := range options { + if n, ok := option.(*NetworkEndpointsOption); ok { + configuration.EndpointList = n.Endpoints + configuration.AllowUnqualifiedDNSQuery = n.AllowUnqualifiedDNSQuery + if n.DNSSearchList != nil { + configuration.DNSSearchList = strings.Join(n.DNSSearchList, ",") + } + configuration.NetworkSharedContainerName = n.NetworkSharedContainerID + break + } + } + + hcsContainer, err := hcsshim.CreateContainer(containerID, configuration) + if err != nil { + return err + } + + // Construct a container object for calling start on it. + container := &container{ + containerCommon: containerCommon{ + process: process{ + processCommon: processCommon{ + containerID: containerID, + client: clnt, + friendlyName: InitFriendlyName, + }, + }, + processes: make(map[string]*process), + }, + ociSpec: spec, + hcsContainer: hcsContainer, + } + + container.options = options + for _, option := range options { + if err := option.Apply(container); err != nil { + logrus.Errorf("libcontainerd: createLinux() %v", err) + } + } + + // Call start, and if it fails, delete the container from our + // internal structure, start will keep HCS in sync by deleting the + // container there. + logrus.Debugf("libcontainerd: createLinux() id=%s, Calling start()", containerID) + if err := container.start(attachStdio); err != nil { + clnt.deleteContainer(containerID) + return err + } + + logrus.Debugf("libcontainerd: createLinux() id=%s completed successfully", containerID) + return nil +} + +// AddProcess is the handler for adding a process to an already running +// container. It's called through docker exec. It returns the system pid of the +// exec'd process. +func (clnt *client) AddProcess(ctx context.Context, containerID, processFriendlyName string, procToAdd Process, attachStdio StdioCallback) (int, error) { + clnt.lock(containerID) + defer clnt.unlock(containerID) + container, err := clnt.getContainer(containerID) + if err != nil { + return -1, err + } + // Note we always tell HCS to + // create stdout as it's required regardless of '-i' or '-t' options, so that + // docker can always grab the output through logs. We also tell HCS to always + // create stdin, even if it's not used - it will be closed shortly. Stderr + // is only created if it we're not -t. + createProcessParms := hcsshim.ProcessConfig{ + CreateStdInPipe: true, + CreateStdOutPipe: true, + CreateStdErrPipe: !procToAdd.Terminal, + } + if procToAdd.Terminal { + createProcessParms.EmulateConsole = true + createProcessParms.ConsoleSize[0] = uint(procToAdd.ConsoleSize.Height) + createProcessParms.ConsoleSize[1] = uint(procToAdd.ConsoleSize.Width) + } + + // Take working directory from the process to add if it is defined, + // otherwise take from the first process. + if procToAdd.Cwd != "" { + createProcessParms.WorkingDirectory = procToAdd.Cwd + } else { + createProcessParms.WorkingDirectory = container.ociSpec.Process.Cwd + } + + // Configure the environment for the process + createProcessParms.Environment = setupEnvironmentVariables(procToAdd.Env) + if container.ociSpec.Platform.OS == "windows" { + createProcessParms.CommandLine = strings.Join(procToAdd.Args, " ") + } else { + createProcessParms.CommandArgs = procToAdd.Args + } + createProcessParms.User = procToAdd.User.Username + + logrus.Debugf("libcontainerd: commandLine: %s", createProcessParms.CommandLine) + + // Start the command running in the container. + var stdout, stderr io.ReadCloser + var stdin io.WriteCloser + newProcess, err := container.hcsContainer.CreateProcess(&createProcessParms) + if err != nil { + logrus.Errorf("libcontainerd: AddProcess(%s) CreateProcess() failed %s", containerID, err) + return -1, err + } + + pid := newProcess.Pid() + + stdin, stdout, stderr, err = newProcess.Stdio() + if err != nil { + logrus.Errorf("libcontainerd: %s getting std pipes failed %s", containerID, err) + return -1, err + } + + iopipe := &IOPipe{Terminal: procToAdd.Terminal} + iopipe.Stdin = createStdInCloser(stdin, newProcess) + + // Convert io.ReadClosers to io.Readers + if stdout != nil { + iopipe.Stdout = ioutil.NopCloser(&autoClosingReader{ReadCloser: stdout}) + } + if stderr != nil { + iopipe.Stderr = ioutil.NopCloser(&autoClosingReader{ReadCloser: stderr}) + } + + proc := &process{ + processCommon: processCommon{ + containerID: containerID, + friendlyName: processFriendlyName, + client: clnt, + systemPid: uint32(pid), + }, + hcsProcess: newProcess, + } + + // Add the process to the container's list of processes + container.processes[processFriendlyName] = proc + + // Tell the engine to attach streams back to the client + if err := attachStdio(*iopipe); err != nil { + return -1, err + } + + // Spin up a go routine waiting for exit to handle cleanup + go container.waitExit(proc, false) + + return pid, nil +} + +// Signal handles `docker stop` on Windows. While Linux has support for +// the full range of signals, signals aren't really implemented on Windows. +// We fake supporting regular stop and -9 to force kill. +func (clnt *client) Signal(containerID string, sig int) error { + var ( + cont *container + err error + ) + + // Get the container as we need it to get the container handle. + clnt.lock(containerID) + defer clnt.unlock(containerID) + if cont, err = clnt.getContainer(containerID); err != nil { + return err + } + + cont.manualStopRequested = true + + logrus.Debugf("libcontainerd: Signal() containerID=%s sig=%d pid=%d", containerID, sig, cont.systemPid) + + if syscall.Signal(sig) == syscall.SIGKILL { + // Terminate the compute system + if err := cont.hcsContainer.Terminate(); err != nil { + if !hcsshim.IsPending(err) { + logrus.Errorf("libcontainerd: failed to terminate %s - %q", containerID, err) + } + } + } else { + // Shut down the container + if err := cont.hcsContainer.Shutdown(); err != nil { + if !hcsshim.IsPending(err) && !hcsshim.IsAlreadyStopped(err) { + // ignore errors + logrus.Warnf("libcontainerd: failed to shutdown container %s: %q", containerID, err) + } + } + } + + return nil +} + +// While Linux has support for the full range of signals, signals aren't really implemented on Windows. +// We try to terminate the specified process whatever signal is requested. +func (clnt *client) SignalProcess(containerID string, processFriendlyName string, sig int) error { + clnt.lock(containerID) + defer clnt.unlock(containerID) + cont, err := clnt.getContainer(containerID) + if err != nil { + return err + } + + for _, p := range cont.processes { + if p.friendlyName == processFriendlyName { + return p.hcsProcess.Kill() + } + } + + return fmt.Errorf("SignalProcess could not find process %s in %s", processFriendlyName, containerID) +} + +// Resize handles a CLI event to resize an interactive docker run or docker exec +// window. +func (clnt *client) Resize(containerID, processFriendlyName string, width, height int) error { + // Get the libcontainerd container object + clnt.lock(containerID) + defer clnt.unlock(containerID) + cont, err := clnt.getContainer(containerID) + if err != nil { + return err + } + + h, w := uint16(height), uint16(width) + + if processFriendlyName == InitFriendlyName { + logrus.Debugln("libcontainerd: resizing systemPID in", containerID, cont.process.systemPid) + return cont.process.hcsProcess.ResizeConsole(w, h) + } + + for _, p := range cont.processes { + if p.friendlyName == processFriendlyName { + logrus.Debugln("libcontainerd: resizing exec'd process", containerID, p.systemPid) + return p.hcsProcess.ResizeConsole(w, h) + } + } + + return fmt.Errorf("Resize could not find containerID %s to resize", containerID) + +} + +// Pause handles pause requests for containers +func (clnt *client) Pause(containerID string) error { + unlockContainer := true + // Get the libcontainerd container object + clnt.lock(containerID) + defer func() { + if unlockContainer { + clnt.unlock(containerID) + } + }() + container, err := clnt.getContainer(containerID) + if err != nil { + return err + } + + for _, option := range container.options { + if h, ok := option.(*HyperVIsolationOption); ok { + if !h.IsHyperV { + return errors.New("cannot pause Windows Server Containers") + } + break + } + } + + err = container.hcsContainer.Pause() + if err != nil { + return err + } + + // Unlock container before calling back into the daemon + unlockContainer = false + clnt.unlock(containerID) + + return clnt.backend.StateChanged(containerID, StateInfo{ + CommonStateInfo: CommonStateInfo{ + State: StatePause, + }}) +} + +// Resume handles resume requests for containers +func (clnt *client) Resume(containerID string) error { + unlockContainer := true + // Get the libcontainerd container object + clnt.lock(containerID) + defer func() { + if unlockContainer { + clnt.unlock(containerID) + } + }() + container, err := clnt.getContainer(containerID) + if err != nil { + return err + } + + // This should never happen, since Windows Server Containers cannot be paused + for _, option := range container.options { + if h, ok := option.(*HyperVIsolationOption); ok { + if !h.IsHyperV { + return errors.New("cannot resume Windows Server Containers") + } + break + } + } + + err = container.hcsContainer.Resume() + if err != nil { + return err + } + + // Unlock container before calling back into the daemon + unlockContainer = false + clnt.unlock(containerID) + + return clnt.backend.StateChanged(containerID, StateInfo{ + CommonStateInfo: CommonStateInfo{ + State: StateResume, + }}) +} + +// Stats handles stats requests for containers +func (clnt *client) Stats(containerID string) (*Stats, error) { + // Get the libcontainerd container object + clnt.lock(containerID) + defer clnt.unlock(containerID) + container, err := clnt.getContainer(containerID) + if err != nil { + return nil, err + } + s, err := container.hcsContainer.Statistics() + if err != nil { + return nil, err + } + st := Stats(s) + return &st, nil +} + +// Restore is the handler for restoring a container +func (clnt *client) Restore(containerID string, _ StdioCallback, unusedOnWindows ...CreateOption) error { + logrus.Debugf("libcontainerd: Restore(%s)", containerID) + + // TODO Windows: On RS1, a re-attach isn't possible. + // However, there is a scenario in which there is an issue. + // Consider a background container. The daemon dies unexpectedly. + // HCS will still have the compute service alive and running. + // For consistence, we call in to shoot it regardless if HCS knows about it + // We explicitly just log a warning if the terminate fails. + // Then we tell the backend the container exited. + if hc, err := hcsshim.OpenContainer(containerID); err == nil { + const terminateTimeout = time.Minute * 2 + err := hc.Terminate() + + if hcsshim.IsPending(err) { + err = hc.WaitTimeout(terminateTimeout) + } else if hcsshim.IsAlreadyStopped(err) { + err = nil + } + + if err != nil { + logrus.Warnf("libcontainerd: failed to terminate %s on restore - %q", containerID, err) + return err + } + } + return clnt.backend.StateChanged(containerID, StateInfo{ + CommonStateInfo: CommonStateInfo{ + State: StateExit, + ExitCode: 1 << 31, + }}) +} + +// GetPidsForContainer returns a list of process IDs running in a container. +// Not used on Windows. +func (clnt *client) GetPidsForContainer(containerID string) ([]int, error) { + return nil, errors.New("not implemented on Windows") +} + +// Summary returns a summary of the processes running in a container. +// This is present in Windows to support docker top. In linux, the +// engine shells out to ps to get process information. On Windows, as +// the containers could be Hyper-V containers, they would not be +// visible on the container host. However, libcontainerd does have +// that information. +func (clnt *client) Summary(containerID string) ([]Summary, error) { + + // Get the libcontainerd container object + clnt.lock(containerID) + defer clnt.unlock(containerID) + container, err := clnt.getContainer(containerID) + if err != nil { + return nil, err + } + p, err := container.hcsContainer.ProcessList() + if err != nil { + return nil, err + } + pl := make([]Summary, len(p)) + for i := range p { + pl[i] = Summary(p[i]) + } + return pl, nil +} + +// UpdateResources updates resources for a running container. +func (clnt *client) UpdateResources(containerID string, resources Resources) error { + // Updating resource isn't supported on Windows + // but we should return nil for enabling updating container + return nil +} + +func (clnt *client) CreateCheckpoint(containerID string, checkpointID string, checkpointDir string, exit bool) error { + return errors.New("Windows: Containers do not support checkpoints") +} + +func (clnt *client) DeleteCheckpoint(containerID string, checkpointID string, checkpointDir string) error { + return errors.New("Windows: Containers do not support checkpoints") +} + +func (clnt *client) ListCheckpoints(containerID string, checkpointDir string) (*Checkpoints, error) { + return nil, errors.New("Windows: Containers do not support checkpoints") +} + +func (clnt *client) GetServerVersion(ctx context.Context) (*ServerVersion, error) { + return &ServerVersion{}, nil +} diff --git a/vendor/github.com/moby/moby/libcontainerd/container.go b/vendor/github.com/moby/moby/libcontainerd/container.go new file mode 100644 index 000000000..b40321389 --- /dev/null +++ b/vendor/github.com/moby/moby/libcontainerd/container.go @@ -0,0 +1,13 @@ +package libcontainerd + +const ( + // InitFriendlyName is the name given in the lookup map of processes + // for the first process started in a container. + InitFriendlyName = "init" + configFilename = "config.json" +) + +type containerCommon struct { + process + processes map[string]*process +} diff --git a/vendor/github.com/moby/moby/libcontainerd/container_unix.go b/vendor/github.com/moby/moby/libcontainerd/container_unix.go new file mode 100644 index 000000000..869f88523 --- /dev/null +++ b/vendor/github.com/moby/moby/libcontainerd/container_unix.go @@ -0,0 +1,246 @@ +// +build linux solaris + +package libcontainerd + +import ( + "encoding/json" + "io" + "io/ioutil" + "os" + "path/filepath" + "sync" + "time" + + "github.com/Sirupsen/logrus" + containerd "github.com/containerd/containerd/api/grpc/types" + "github.com/docker/docker/pkg/ioutils" + specs "github.com/opencontainers/runtime-spec/specs-go" + "github.com/tonistiigi/fifo" + "golang.org/x/net/context" + "golang.org/x/sys/unix" +) + +type container struct { + containerCommon + + // Platform specific fields are below here. + pauseMonitor + oom bool + runtime string + runtimeArgs []string +} + +type runtime struct { + path string + args []string +} + +// WithRuntime sets the runtime to be used for the created container +func WithRuntime(path string, args []string) CreateOption { + return runtime{path, args} +} + +func (rt runtime) Apply(p interface{}) error { + if pr, ok := p.(*container); ok { + pr.runtime = rt.path + pr.runtimeArgs = rt.args + } + return nil +} + +func (ctr *container) clean() error { + if os.Getenv("LIBCONTAINERD_NOCLEAN") == "1" { + return nil + } + if _, err := os.Lstat(ctr.dir); err != nil { + if os.IsNotExist(err) { + return nil + } + return err + } + + if err := os.RemoveAll(ctr.dir); err != nil { + return err + } + return nil +} + +// cleanProcess removes the fifos used by an additional process. +// Caller needs to lock container ID before calling this method. +func (ctr *container) cleanProcess(id string) { + if p, ok := ctr.processes[id]; ok { + for _, i := range []int{unix.Stdin, unix.Stdout, unix.Stderr} { + if err := os.Remove(p.fifo(i)); err != nil && !os.IsNotExist(err) { + logrus.Warnf("libcontainerd: failed to remove %v for process %v: %v", p.fifo(i), id, err) + } + } + } + delete(ctr.processes, id) +} + +func (ctr *container) spec() (*specs.Spec, error) { + var spec specs.Spec + dt, err := ioutil.ReadFile(filepath.Join(ctr.dir, configFilename)) + if err != nil { + return nil, err + } + if err := json.Unmarshal(dt, &spec); err != nil { + return nil, err + } + return &spec, nil +} + +func (ctr *container) start(spec *specs.Spec, checkpoint, checkpointDir string, attachStdio StdioCallback) (err error) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ready := make(chan struct{}) + + fifoCtx, cancel := context.WithCancel(context.Background()) + defer func() { + if err != nil { + cancel() + } + }() + + iopipe, err := ctr.openFifos(fifoCtx, spec.Process.Terminal) + if err != nil { + return err + } + + var stdinOnce sync.Once + + // we need to delay stdin closure after container start or else "stdin close" + // event will be rejected by containerd. + // stdin closure happens in attachStdio + stdin := iopipe.Stdin + iopipe.Stdin = ioutils.NewWriteCloserWrapper(stdin, func() error { + var err error + stdinOnce.Do(func() { // on error from attach we don't know if stdin was already closed + err = stdin.Close() + go func() { + select { + case <-ready: + case <-ctx.Done(): + } + select { + case <-ready: + if err := ctr.sendCloseStdin(); err != nil { + logrus.Warnf("failed to close stdin: %+v", err) + } + default: + } + }() + }) + return err + }) + + r := &containerd.CreateContainerRequest{ + Id: ctr.containerID, + BundlePath: ctr.dir, + Stdin: ctr.fifo(unix.Stdin), + Stdout: ctr.fifo(unix.Stdout), + Stderr: ctr.fifo(unix.Stderr), + Checkpoint: checkpoint, + CheckpointDir: checkpointDir, + // check to see if we are running in ramdisk to disable pivot root + NoPivotRoot: os.Getenv("DOCKER_RAMDISK") != "", + Runtime: ctr.runtime, + RuntimeArgs: ctr.runtimeArgs, + } + ctr.client.appendContainer(ctr) + + if err := attachStdio(*iopipe); err != nil { + ctr.closeFifos(iopipe) + return err + } + + resp, err := ctr.client.remote.apiClient.CreateContainer(context.Background(), r) + if err != nil { + ctr.closeFifos(iopipe) + return err + } + ctr.systemPid = systemPid(resp.Container) + close(ready) + + return ctr.client.backend.StateChanged(ctr.containerID, StateInfo{ + CommonStateInfo: CommonStateInfo{ + State: StateStart, + Pid: ctr.systemPid, + }}) + +} + +func (ctr *container) newProcess(friendlyName string) *process { + return &process{ + dir: ctr.dir, + processCommon: processCommon{ + containerID: ctr.containerID, + friendlyName: friendlyName, + client: ctr.client, + }, + } +} + +func (ctr *container) handleEvent(e *containerd.Event) error { + ctr.client.lock(ctr.containerID) + defer ctr.client.unlock(ctr.containerID) + switch e.Type { + case StateExit, StatePause, StateResume, StateOOM: + st := StateInfo{ + CommonStateInfo: CommonStateInfo{ + State: e.Type, + ExitCode: e.Status, + }, + OOMKilled: e.Type == StateExit && ctr.oom, + } + if e.Type == StateOOM { + ctr.oom = true + } + if e.Type == StateExit && e.Pid != InitFriendlyName { + st.ProcessID = e.Pid + st.State = StateExitProcess + } + + // Remove process from list if we have exited + switch st.State { + case StateExit: + ctr.clean() + ctr.client.deleteContainer(e.Id) + case StateExitProcess: + ctr.cleanProcess(st.ProcessID) + } + ctr.client.q.append(e.Id, func() { + if err := ctr.client.backend.StateChanged(e.Id, st); err != nil { + logrus.Errorf("libcontainerd: backend.StateChanged(): %v", err) + } + if e.Type == StatePause || e.Type == StateResume { + ctr.pauseMonitor.handle(e.Type) + } + if e.Type == StateExit { + if en := ctr.client.getExitNotifier(e.Id); en != nil { + en.close() + } + } + }) + + default: + logrus.Debugf("libcontainerd: event unhandled: %+v", e) + } + return nil +} + +// discardFifos attempts to fully read the container fifos to unblock processes +// that may be blocked on the writer side. +func (ctr *container) discardFifos() { + ctx, _ := context.WithTimeout(context.Background(), 3*time.Second) + for _, i := range []int{unix.Stdout, unix.Stderr} { + f, err := fifo.OpenFifo(ctx, ctr.fifo(i), unix.O_RDONLY|unix.O_NONBLOCK, 0) + if err != nil { + logrus.Warnf("error opening fifo %v for discarding: %+v", f, err) + continue + } + go func() { + io.Copy(ioutil.Discard, f) + }() + } +} diff --git a/vendor/github.com/moby/moby/libcontainerd/container_windows.go b/vendor/github.com/moby/moby/libcontainerd/container_windows.go new file mode 100644 index 000000000..e895fa030 --- /dev/null +++ b/vendor/github.com/moby/moby/libcontainerd/container_windows.go @@ -0,0 +1,330 @@ +package libcontainerd + +import ( + "encoding/json" + "fmt" + "io" + "io/ioutil" + "strings" + "time" + + "github.com/Microsoft/hcsshim" + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/system" + "github.com/opencontainers/runtime-spec/specs-go" + "golang.org/x/sys/windows" +) + +type container struct { + containerCommon + + // Platform specific fields are below here. There are none presently on Windows. + options []CreateOption + + // The ociSpec is required, as client.Create() needs a spec, + // but can be called from the RestartManager context which does not + // otherwise have access to the Spec + ociSpec specs.Spec + + manualStopRequested bool + hcsContainer hcsshim.Container +} + +func (ctr *container) newProcess(friendlyName string) *process { + return &process{ + processCommon: processCommon{ + containerID: ctr.containerID, + friendlyName: friendlyName, + client: ctr.client, + }, + } +} + +// start starts a created container. +// Caller needs to lock container ID before calling this method. +func (ctr *container) start(attachStdio StdioCallback) error { + var err error + isServicing := false + + for _, option := range ctr.options { + if s, ok := option.(*ServicingOption); ok && s.IsServicing { + isServicing = true + } + } + + // Start the container. If this is a servicing container, this call will block + // until the container is done with the servicing execution. + logrus.Debugln("libcontainerd: starting container ", ctr.containerID) + if err = ctr.hcsContainer.Start(); err != nil { + logrus.Errorf("libcontainerd: failed to start container: %s", err) + if err := ctr.terminate(); err != nil { + logrus.Errorf("libcontainerd: failed to cleanup after a failed Start. %s", err) + } else { + logrus.Debugln("libcontainerd: cleaned up after failed Start by calling Terminate") + } + return err + } + + // Note we always tell HCS to + // create stdout as it's required regardless of '-i' or '-t' options, so that + // docker can always grab the output through logs. We also tell HCS to always + // create stdin, even if it's not used - it will be closed shortly. Stderr + // is only created if it we're not -t. + createProcessParms := &hcsshim.ProcessConfig{ + EmulateConsole: ctr.ociSpec.Process.Terminal, + WorkingDirectory: ctr.ociSpec.Process.Cwd, + CreateStdInPipe: !isServicing, + CreateStdOutPipe: !isServicing, + CreateStdErrPipe: !ctr.ociSpec.Process.Terminal && !isServicing, + } + createProcessParms.ConsoleSize[0] = uint(ctr.ociSpec.Process.ConsoleSize.Height) + createProcessParms.ConsoleSize[1] = uint(ctr.ociSpec.Process.ConsoleSize.Width) + + // Configure the environment for the process + createProcessParms.Environment = setupEnvironmentVariables(ctr.ociSpec.Process.Env) + if ctr.ociSpec.Platform.OS == "windows" { + createProcessParms.CommandLine = strings.Join(ctr.ociSpec.Process.Args, " ") + } else { + createProcessParms.CommandArgs = ctr.ociSpec.Process.Args + } + createProcessParms.User = ctr.ociSpec.Process.User.Username + + // LCOW requires the raw OCI spec passed through HCS and onwards to GCS for the utility VM. + if system.LCOWSupported() && ctr.ociSpec.Platform.OS == "linux" { + ociBuf, err := json.Marshal(ctr.ociSpec) + if err != nil { + return err + } + ociRaw := json.RawMessage(ociBuf) + createProcessParms.OCISpecification = &ociRaw + } + + // Start the command running in the container. + newProcess, err := ctr.hcsContainer.CreateProcess(createProcessParms) + if err != nil { + logrus.Errorf("libcontainerd: CreateProcess() failed %s", err) + if err := ctr.terminate(); err != nil { + logrus.Errorf("libcontainerd: failed to cleanup after a failed CreateProcess. %s", err) + } else { + logrus.Debugln("libcontainerd: cleaned up after failed CreateProcess by calling Terminate") + } + return err + } + + pid := newProcess.Pid() + + // Save the hcs Process and PID + ctr.process.friendlyName = InitFriendlyName + ctr.process.hcsProcess = newProcess + + // If this is a servicing container, wait on the process synchronously here and + // if it succeeds, wait for it cleanly shutdown and merge into the parent container. + if isServicing { + exitCode := ctr.waitProcessExitCode(&ctr.process) + + if exitCode != 0 { + if err := ctr.terminate(); err != nil { + logrus.Warnf("libcontainerd: terminating servicing container %s failed: %s", ctr.containerID, err) + } + return fmt.Errorf("libcontainerd: servicing container %s returned non-zero exit code %d", ctr.containerID, exitCode) + } + + return ctr.hcsContainer.WaitTimeout(time.Minute * 5) + } + + var stdout, stderr io.ReadCloser + var stdin io.WriteCloser + stdin, stdout, stderr, err = newProcess.Stdio() + if err != nil { + logrus.Errorf("libcontainerd: failed to get stdio pipes: %s", err) + if err := ctr.terminate(); err != nil { + logrus.Errorf("libcontainerd: failed to cleanup after a failed Stdio. %s", err) + } + return err + } + + iopipe := &IOPipe{Terminal: ctr.ociSpec.Process.Terminal} + + iopipe.Stdin = createStdInCloser(stdin, newProcess) + + // Convert io.ReadClosers to io.Readers + if stdout != nil { + iopipe.Stdout = ioutil.NopCloser(&autoClosingReader{ReadCloser: stdout}) + } + if stderr != nil { + iopipe.Stderr = ioutil.NopCloser(&autoClosingReader{ReadCloser: stderr}) + } + + // Save the PID + logrus.Debugf("libcontainerd: process started - PID %d", pid) + ctr.systemPid = uint32(pid) + + // Spin up a go routine waiting for exit to handle cleanup + go ctr.waitExit(&ctr.process, true) + + ctr.client.appendContainer(ctr) + + if err := attachStdio(*iopipe); err != nil { + // OK to return the error here, as waitExit will handle tear-down in HCS + return err + } + + // Tell the docker engine that the container has started. + si := StateInfo{ + CommonStateInfo: CommonStateInfo{ + State: StateStart, + Pid: ctr.systemPid, // Not sure this is needed? Double-check monitor.go in daemon BUGBUG @jhowardmsft + }} + logrus.Debugf("libcontainerd: start() completed OK, %+v", si) + return ctr.client.backend.StateChanged(ctr.containerID, si) + +} + +// waitProcessExitCode will wait for the given process to exit and return its error code. +func (ctr *container) waitProcessExitCode(process *process) int { + // Block indefinitely for the process to exit. + err := process.hcsProcess.Wait() + if err != nil { + if herr, ok := err.(*hcsshim.ProcessError); ok && herr.Err != windows.ERROR_BROKEN_PIPE { + logrus.Warnf("libcontainerd: Wait() failed (container may have been killed): %s", err) + } + // Fall through here, do not return. This ensures we attempt to continue the + // shutdown in HCS and tell the docker engine that the process/container + // has exited to avoid a container being dropped on the floor. + } + + exitCode, err := process.hcsProcess.ExitCode() + if err != nil { + if herr, ok := err.(*hcsshim.ProcessError); ok && herr.Err != windows.ERROR_BROKEN_PIPE { + logrus.Warnf("libcontainerd: unable to get exit code from container %s", ctr.containerID) + } + // Since we got an error retrieving the exit code, make sure that the code we return + // doesn't incorrectly indicate success. + exitCode = -1 + + // Fall through here, do not return. This ensures we attempt to continue the + // shutdown in HCS and tell the docker engine that the process/container + // has exited to avoid a container being dropped on the floor. + } + + return exitCode +} + +// waitExit runs as a goroutine waiting for the process to exit. It's +// equivalent to (in the linux containerd world) where events come in for +// state change notifications from containerd. +func (ctr *container) waitExit(process *process, isFirstProcessToStart bool) error { + logrus.Debugln("libcontainerd: waitExit() on pid", process.systemPid) + + exitCode := ctr.waitProcessExitCode(process) + // Lock the container while removing the process/container from the list + ctr.client.lock(ctr.containerID) + + if !isFirstProcessToStart { + ctr.cleanProcess(process.friendlyName) + } else { + ctr.client.deleteContainer(ctr.containerID) + } + + // Unlock here so other threads are unblocked + ctr.client.unlock(ctr.containerID) + + // Assume the container has exited + si := StateInfo{ + CommonStateInfo: CommonStateInfo{ + State: StateExit, + ExitCode: uint32(exitCode), + Pid: process.systemPid, + ProcessID: process.friendlyName, + }, + UpdatePending: false, + } + + // But it could have been an exec'd process which exited + if !isFirstProcessToStart { + si.State = StateExitProcess + } else { + // Pending updates is only applicable for WCOW + if ctr.ociSpec.Platform.OS == "windows" { + updatePending, err := ctr.hcsContainer.HasPendingUpdates() + if err != nil { + logrus.Warnf("libcontainerd: HasPendingUpdates() failed (container may have been killed): %s", err) + } else { + si.UpdatePending = updatePending + } + } + + logrus.Debugf("libcontainerd: shutting down container %s", ctr.containerID) + if err := ctr.shutdown(); err != nil { + logrus.Debugf("libcontainerd: failed to shutdown container %s", ctr.containerID) + } else { + logrus.Debugf("libcontainerd: completed shutting down container %s", ctr.containerID) + } + if err := ctr.hcsContainer.Close(); err != nil { + logrus.Error(err) + } + } + + if err := process.hcsProcess.Close(); err != nil { + logrus.Errorf("libcontainerd: hcsProcess.Close(): %v", err) + } + + // Call into the backend to notify it of the state change. + logrus.Debugf("libcontainerd: waitExit() calling backend.StateChanged %+v", si) + if err := ctr.client.backend.StateChanged(ctr.containerID, si); err != nil { + logrus.Error(err) + } + + logrus.Debugf("libcontainerd: waitExit() completed OK, %+v", si) + + return nil +} + +// cleanProcess removes process from the map. +// Caller needs to lock container ID before calling this method. +func (ctr *container) cleanProcess(id string) { + delete(ctr.processes, id) +} + +// shutdown shuts down the container in HCS +// Caller needs to lock container ID before calling this method. +func (ctr *container) shutdown() error { + const shutdownTimeout = time.Minute * 5 + err := ctr.hcsContainer.Shutdown() + if hcsshim.IsPending(err) { + // Explicit timeout to avoid a (remote) possibility that shutdown hangs indefinitely. + err = ctr.hcsContainer.WaitTimeout(shutdownTimeout) + } else if hcsshim.IsAlreadyStopped(err) { + err = nil + } + + if err != nil { + logrus.Debugf("libcontainerd: error shutting down container %s %v calling terminate", ctr.containerID, err) + if err := ctr.terminate(); err != nil { + return err + } + return err + } + + return nil +} + +// terminate terminates the container in HCS +// Caller needs to lock container ID before calling this method. +func (ctr *container) terminate() error { + const terminateTimeout = time.Minute * 5 + err := ctr.hcsContainer.Terminate() + + if hcsshim.IsPending(err) { + err = ctr.hcsContainer.WaitTimeout(terminateTimeout) + } else if hcsshim.IsAlreadyStopped(err) { + err = nil + } + + if err != nil { + logrus.Debugf("libcontainerd: error terminating container %s %v", ctr.containerID, err) + return err + } + + return nil +} diff --git a/vendor/github.com/moby/moby/libcontainerd/oom_linux.go b/vendor/github.com/moby/moby/libcontainerd/oom_linux.go new file mode 100644 index 000000000..e126b7a55 --- /dev/null +++ b/vendor/github.com/moby/moby/libcontainerd/oom_linux.go @@ -0,0 +1,31 @@ +package libcontainerd + +import ( + "fmt" + "os" + "strconv" + + "github.com/Sirupsen/logrus" + "github.com/opencontainers/runc/libcontainer/system" +) + +func setOOMScore(pid, score int) error { + oomScoreAdjPath := fmt.Sprintf("/proc/%d/oom_score_adj", pid) + f, err := os.OpenFile(oomScoreAdjPath, os.O_WRONLY, 0) + if err != nil { + return err + } + stringScore := strconv.Itoa(score) + _, err = f.WriteString(stringScore) + f.Close() + if os.IsPermission(err) { + // Setting oom_score_adj does not work in an + // unprivileged container. Ignore the error, but log + // it if we appear not to be in that situation. + if !system.RunningInUserNS() { + logrus.Debugf("Permission denied writing %q to %s", stringScore, oomScoreAdjPath) + } + return nil + } + return err +} diff --git a/vendor/github.com/moby/moby/libcontainerd/oom_solaris.go b/vendor/github.com/moby/moby/libcontainerd/oom_solaris.go new file mode 100644 index 000000000..2ebe5e87c --- /dev/null +++ b/vendor/github.com/moby/moby/libcontainerd/oom_solaris.go @@ -0,0 +1,5 @@ +package libcontainerd + +func setOOMScore(pid, score int) error { + return nil +} diff --git a/vendor/github.com/moby/moby/libcontainerd/pausemonitor_unix.go b/vendor/github.com/moby/moby/libcontainerd/pausemonitor_unix.go new file mode 100644 index 000000000..4f3766d95 --- /dev/null +++ b/vendor/github.com/moby/moby/libcontainerd/pausemonitor_unix.go @@ -0,0 +1,42 @@ +// +build !windows + +package libcontainerd + +import ( + "sync" +) + +// pauseMonitor is helper to get notifications from pause state changes. +type pauseMonitor struct { + sync.Mutex + waiters map[string][]chan struct{} +} + +func (m *pauseMonitor) handle(t string) { + m.Lock() + defer m.Unlock() + if m.waiters == nil { + return + } + q, ok := m.waiters[t] + if !ok { + return + } + if len(q) > 0 { + close(q[0]) + m.waiters[t] = q[1:] + } +} + +func (m *pauseMonitor) append(t string, waiter chan struct{}) { + m.Lock() + defer m.Unlock() + if m.waiters == nil { + m.waiters = make(map[string][]chan struct{}) + } + _, ok := m.waiters[t] + if !ok { + m.waiters[t] = make([]chan struct{}, 0) + } + m.waiters[t] = append(m.waiters[t], waiter) +} diff --git a/vendor/github.com/moby/moby/libcontainerd/process.go b/vendor/github.com/moby/moby/libcontainerd/process.go new file mode 100644 index 000000000..57562c878 --- /dev/null +++ b/vendor/github.com/moby/moby/libcontainerd/process.go @@ -0,0 +1,18 @@ +package libcontainerd + +// processCommon are the platform common fields as part of the process structure +// which keeps the state for the main container process, as well as any exec +// processes. +type processCommon struct { + client *client + + // containerID is the Container ID + containerID string + + // friendlyName is an identifier for the process (or `InitFriendlyName` + // for the first process) + friendlyName string + + // systemPid is the PID of the main container process + systemPid uint32 +} diff --git a/vendor/github.com/moby/moby/libcontainerd/process_unix.go b/vendor/github.com/moby/moby/libcontainerd/process_unix.go new file mode 100644 index 000000000..3b54e325b --- /dev/null +++ b/vendor/github.com/moby/moby/libcontainerd/process_unix.go @@ -0,0 +1,107 @@ +// +build linux solaris + +package libcontainerd + +import ( + "io" + "io/ioutil" + "os" + "path/filepath" + goruntime "runtime" + "strings" + + containerd "github.com/containerd/containerd/api/grpc/types" + "github.com/tonistiigi/fifo" + "golang.org/x/net/context" + "golang.org/x/sys/unix" +) + +var fdNames = map[int]string{ + unix.Stdin: "stdin", + unix.Stdout: "stdout", + unix.Stderr: "stderr", +} + +// process keeps the state for both main container process and exec process. +type process struct { + processCommon + + // Platform specific fields are below here. + dir string +} + +func (p *process) openFifos(ctx context.Context, terminal bool) (pipe *IOPipe, err error) { + if err := os.MkdirAll(p.dir, 0700); err != nil { + return nil, err + } + + io := &IOPipe{} + + io.Stdin, err = fifo.OpenFifo(ctx, p.fifo(unix.Stdin), unix.O_WRONLY|unix.O_CREAT|unix.O_NONBLOCK, 0700) + if err != nil { + return nil, err + } + + defer func() { + if err != nil { + io.Stdin.Close() + } + }() + + io.Stdout, err = fifo.OpenFifo(ctx, p.fifo(unix.Stdout), unix.O_RDONLY|unix.O_CREAT|unix.O_NONBLOCK, 0700) + if err != nil { + return nil, err + } + + defer func() { + if err != nil { + io.Stdout.Close() + } + }() + + if goruntime.GOOS == "solaris" || !terminal { + // For Solaris terminal handling is done exclusively by the runtime therefore we make no distinction + // in the processing for terminal and !terminal cases. + io.Stderr, err = fifo.OpenFifo(ctx, p.fifo(unix.Stderr), unix.O_RDONLY|unix.O_CREAT|unix.O_NONBLOCK, 0700) + if err != nil { + return nil, err + } + defer func() { + if err != nil { + io.Stderr.Close() + } + }() + } else { + io.Stderr = ioutil.NopCloser(emptyReader{}) + } + + return io, nil +} + +func (p *process) sendCloseStdin() error { + _, err := p.client.remote.apiClient.UpdateProcess(context.Background(), &containerd.UpdateProcessRequest{ + Id: p.containerID, + Pid: p.friendlyName, + CloseStdin: true, + }) + if err != nil && (strings.Contains(err.Error(), "container not found") || strings.Contains(err.Error(), "process not found")) { + return nil + } + return err +} + +func (p *process) closeFifos(io *IOPipe) { + io.Stdin.Close() + io.Stdout.Close() + io.Stderr.Close() +} + +type emptyReader struct{} + +func (r emptyReader) Read(b []byte) (int, error) { + return 0, io.EOF +} + +func (p *process) fifo(index int) string { + return filepath.Join(p.dir, p.friendlyName+"-"+fdNames[index]) +} diff --git a/vendor/github.com/moby/moby/libcontainerd/process_windows.go b/vendor/github.com/moby/moby/libcontainerd/process_windows.go new file mode 100644 index 000000000..854c4dd1f --- /dev/null +++ b/vendor/github.com/moby/moby/libcontainerd/process_windows.go @@ -0,0 +1,48 @@ +package libcontainerd + +import ( + "io" + "sync" + + "github.com/Microsoft/hcsshim" + "github.com/docker/docker/pkg/ioutils" +) + +// process keeps the state for both main container process and exec process. +type process struct { + processCommon + + // Platform specific fields are below here. + hcsProcess hcsshim.Process +} + +type autoClosingReader struct { + io.ReadCloser + sync.Once +} + +func (r *autoClosingReader) Read(b []byte) (n int, err error) { + n, err = r.ReadCloser.Read(b) + if err == io.EOF { + r.Once.Do(func() { r.ReadCloser.Close() }) + } + return +} + +func createStdInCloser(pipe io.WriteCloser, process hcsshim.Process) io.WriteCloser { + return ioutils.NewWriteCloserWrapper(pipe, func() error { + if err := pipe.Close(); err != nil { + return err + } + + err := process.CloseStdin() + if err != nil && !hcsshim.IsNotExist(err) && !hcsshim.IsAlreadyClosed(err) { + // This error will occur if the compute system is currently shutting down + if perr, ok := err.(*hcsshim.ProcessError); ok && perr.Err != hcsshim.ErrVmcomputeOperationInvalidState { + return err + } + } + + return nil + }) +} diff --git a/vendor/github.com/moby/moby/libcontainerd/queue_unix.go b/vendor/github.com/moby/moby/libcontainerd/queue_unix.go new file mode 100644 index 000000000..66765f75e --- /dev/null +++ b/vendor/github.com/moby/moby/libcontainerd/queue_unix.go @@ -0,0 +1,37 @@ +// +build linux solaris + +package libcontainerd + +import "sync" + +type queue struct { + sync.Mutex + fns map[string]chan struct{} +} + +func (q *queue) append(id string, f func()) { + q.Lock() + defer q.Unlock() + + if q.fns == nil { + q.fns = make(map[string]chan struct{}) + } + + done := make(chan struct{}) + + fn, ok := q.fns[id] + q.fns[id] = done + go func() { + if ok { + <-fn + } + f() + close(done) + + q.Lock() + if q.fns[id] == done { + delete(q.fns, id) + } + q.Unlock() + }() +} diff --git a/vendor/github.com/moby/moby/libcontainerd/queue_unix_test.go b/vendor/github.com/moby/moby/libcontainerd/queue_unix_test.go new file mode 100644 index 000000000..bb49a5d4c --- /dev/null +++ b/vendor/github.com/moby/moby/libcontainerd/queue_unix_test.go @@ -0,0 +1,33 @@ +// +build linux solaris + +package libcontainerd + +import ( + "testing" + "time" + + "github.com/stretchr/testify/require" +) + +func TestSerialization(t *testing.T) { + var ( + q queue + serialization = 1 + ) + + q.append("aaa", func() { + //simulate a long time task + time.Sleep(10 * time.Millisecond) + require.EqualValues(t, serialization, 1) + serialization = 2 + }) + q.append("aaa", func() { + require.EqualValues(t, serialization, 2) + serialization = 3 + }) + q.append("aaa", func() { + require.EqualValues(t, serialization, 3) + serialization = 4 + }) + time.Sleep(20 * time.Millisecond) +} diff --git a/vendor/github.com/moby/moby/libcontainerd/remote.go b/vendor/github.com/moby/moby/libcontainerd/remote.go new file mode 100644 index 000000000..9031e3ae7 --- /dev/null +++ b/vendor/github.com/moby/moby/libcontainerd/remote.go @@ -0,0 +1,20 @@ +package libcontainerd + +// Remote on Linux defines the accesspoint to the containerd grpc API. +// Remote on Windows is largely an unimplemented interface as there is +// no remote containerd. +type Remote interface { + // Client returns a new Client instance connected with given Backend. + Client(Backend) (Client, error) + // Cleanup stops containerd if it was started by libcontainerd. + // Note this is not used on Windows as there is no remote containerd. + Cleanup() + // UpdateOptions allows various remote options to be updated at runtime. + UpdateOptions(...RemoteOption) error +} + +// RemoteOption allows to configure parameters of remotes. +// This is unused on Windows. +type RemoteOption interface { + Apply(Remote) error +} diff --git a/vendor/github.com/moby/moby/libcontainerd/remote_unix.go b/vendor/github.com/moby/moby/libcontainerd/remote_unix.go new file mode 100644 index 000000000..24fbc5ad6 --- /dev/null +++ b/vendor/github.com/moby/moby/libcontainerd/remote_unix.go @@ -0,0 +1,565 @@ +// +build linux solaris + +package libcontainerd + +import ( + "fmt" + "io" + "io/ioutil" + "log" + "net" + "os" + "os/exec" + "path/filepath" + goruntime "runtime" + "strconv" + "strings" + "sync" + "time" + + "github.com/Sirupsen/logrus" + containerd "github.com/containerd/containerd/api/grpc/types" + "github.com/docker/docker/pkg/locker" + "github.com/docker/docker/pkg/system" + "github.com/golang/protobuf/ptypes" + "github.com/golang/protobuf/ptypes/timestamp" + "golang.org/x/net/context" + "golang.org/x/sys/unix" + "google.golang.org/grpc" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/health/grpc_health_v1" + "google.golang.org/grpc/transport" +) + +const ( + maxConnectionRetryCount = 3 + containerdHealthCheckTimeout = 3 * time.Second + containerdShutdownTimeout = 15 * time.Second + containerdBinary = "docker-containerd" + containerdPidFilename = "docker-containerd.pid" + containerdSockFilename = "docker-containerd.sock" + containerdStateDir = "containerd" + eventTimestampFilename = "event.ts" +) + +type remote struct { + sync.RWMutex + apiClient containerd.APIClient + daemonPid int + stateDir string + rpcAddr string + startDaemon bool + closedManually bool + debugLog bool + rpcConn *grpc.ClientConn + clients []*client + eventTsPath string + runtime string + runtimeArgs []string + daemonWaitCh chan struct{} + liveRestore bool + oomScore int + restoreFromTimestamp *timestamp.Timestamp +} + +// New creates a fresh instance of libcontainerd remote. +func New(stateDir string, options ...RemoteOption) (_ Remote, err error) { + defer func() { + if err != nil { + err = fmt.Errorf("Failed to connect to containerd. Please make sure containerd is installed in your PATH or you have specified the correct address. Got error: %v", err) + } + }() + r := &remote{ + stateDir: stateDir, + daemonPid: -1, + eventTsPath: filepath.Join(stateDir, eventTimestampFilename), + } + for _, option := range options { + if err := option.Apply(r); err != nil { + return nil, err + } + } + + if err := system.MkdirAll(stateDir, 0700, ""); err != nil { + return nil, err + } + + if r.rpcAddr == "" { + r.rpcAddr = filepath.Join(stateDir, containerdSockFilename) + } + + if r.startDaemon { + if err := r.runContainerdDaemon(); err != nil { + return nil, err + } + } + + // don't output the grpc reconnect logging + grpclog.SetLogger(log.New(ioutil.Discard, "", log.LstdFlags)) + dialOpts := []grpc.DialOption{ + grpc.WithInsecure(), + grpc.WithBackoffMaxDelay(2 * time.Second), + grpc.WithDialer(func(addr string, timeout time.Duration) (net.Conn, error) { + return net.DialTimeout("unix", addr, timeout) + }), + } + conn, err := grpc.Dial(r.rpcAddr, dialOpts...) + if err != nil { + return nil, fmt.Errorf("error connecting to containerd: %v", err) + } + + r.rpcConn = conn + r.apiClient = containerd.NewAPIClient(conn) + + // Get the timestamp to restore from + t := r.getLastEventTimestamp() + tsp, err := ptypes.TimestampProto(t) + if err != nil { + logrus.Errorf("libcontainerd: failed to convert timestamp: %q", err) + } + r.restoreFromTimestamp = tsp + + go r.handleConnectionChange() + + if err := r.startEventsMonitor(); err != nil { + return nil, err + } + + return r, nil +} + +func (r *remote) UpdateOptions(options ...RemoteOption) error { + for _, option := range options { + if err := option.Apply(r); err != nil { + return err + } + } + return nil +} + +func (r *remote) handleConnectionChange() { + var transientFailureCount = 0 + + ticker := time.NewTicker(500 * time.Millisecond) + defer ticker.Stop() + healthClient := grpc_health_v1.NewHealthClient(r.rpcConn) + + for { + <-ticker.C + ctx, cancel := context.WithTimeout(context.Background(), containerdHealthCheckTimeout) + _, err := healthClient.Check(ctx, &grpc_health_v1.HealthCheckRequest{}) + cancel() + if err == nil { + continue + } + + logrus.Debugf("libcontainerd: containerd health check returned error: %v", err) + + if r.daemonPid != -1 { + if r.closedManually { + // Well, we asked for it to stop, just return + return + } + // all other errors are transient + // Reset state to be notified of next failure + transientFailureCount++ + if transientFailureCount >= maxConnectionRetryCount { + transientFailureCount = 0 + if system.IsProcessAlive(r.daemonPid) { + system.KillProcess(r.daemonPid) + } + <-r.daemonWaitCh + if err := r.runContainerdDaemon(); err != nil { //FIXME: Handle error + logrus.Errorf("libcontainerd: error restarting containerd: %v", err) + } + continue + } + } + } +} + +func (r *remote) Cleanup() { + if r.daemonPid == -1 { + return + } + r.closedManually = true + r.rpcConn.Close() + // Ask the daemon to quit + unix.Kill(r.daemonPid, unix.SIGTERM) + + // Wait up to 15secs for it to stop + for i := time.Duration(0); i < containerdShutdownTimeout; i += time.Second { + if !system.IsProcessAlive(r.daemonPid) { + break + } + time.Sleep(time.Second) + } + + if system.IsProcessAlive(r.daemonPid) { + logrus.Warnf("libcontainerd: containerd (%d) didn't stop within 15 secs, killing it\n", r.daemonPid) + unix.Kill(r.daemonPid, unix.SIGKILL) + } + + // cleanup some files + os.Remove(filepath.Join(r.stateDir, containerdPidFilename)) + os.Remove(filepath.Join(r.stateDir, containerdSockFilename)) +} + +func (r *remote) Client(b Backend) (Client, error) { + c := &client{ + clientCommon: clientCommon{ + backend: b, + containers: make(map[string]*container), + locker: locker.New(), + }, + remote: r, + exitNotifiers: make(map[string]*exitNotifier), + liveRestore: r.liveRestore, + } + + r.Lock() + r.clients = append(r.clients, c) + r.Unlock() + return c, nil +} + +func (r *remote) updateEventTimestamp(t time.Time) { + f, err := os.OpenFile(r.eventTsPath, unix.O_CREAT|unix.O_WRONLY|unix.O_TRUNC, 0600) + if err != nil { + logrus.Warnf("libcontainerd: failed to open event timestamp file: %v", err) + return + } + defer f.Close() + + b, err := t.MarshalText() + if err != nil { + logrus.Warnf("libcontainerd: failed to encode timestamp: %v", err) + return + } + + n, err := f.Write(b) + if err != nil || n != len(b) { + logrus.Warnf("libcontainerd: failed to update event timestamp file: %v", err) + f.Truncate(0) + return + } +} + +func (r *remote) getLastEventTimestamp() time.Time { + t := time.Now() + + fi, err := os.Stat(r.eventTsPath) + if os.IsNotExist(err) || fi.Size() == 0 { + return t + } + + f, err := os.Open(r.eventTsPath) + if err != nil { + logrus.Warnf("libcontainerd: Unable to access last event ts: %v", err) + return t + } + defer f.Close() + + b := make([]byte, fi.Size()) + n, err := f.Read(b) + if err != nil || n != len(b) { + logrus.Warnf("libcontainerd: Unable to read last event ts: %v", err) + return t + } + + t.UnmarshalText(b) + + return t +} + +func (r *remote) startEventsMonitor() error { + // First, get past events + t := r.getLastEventTimestamp() + tsp, err := ptypes.TimestampProto(t) + if err != nil { + logrus.Errorf("libcontainerd: failed to convert timestamp: %q", err) + } + er := &containerd.EventsRequest{ + Timestamp: tsp, + } + + var events containerd.API_EventsClient + for { + events, err = r.apiClient.Events(context.Background(), er, grpc.FailFast(false)) + if err == nil { + break + } + logrus.Warnf("libcontainerd: failed to get events from containerd: %q", err) + + if r.closedManually { + // ignore error if grpc remote connection is closed manually + return nil + } + + <-time.After(100 * time.Millisecond) + } + + go r.handleEventStream(events) + return nil +} + +func (r *remote) handleEventStream(events containerd.API_EventsClient) { + for { + e, err := events.Recv() + if err != nil { + if grpc.ErrorDesc(err) == transport.ErrConnClosing.Desc && + r.closedManually { + // ignore error if grpc remote connection is closed manually + return + } + logrus.Errorf("libcontainerd: failed to receive event from containerd: %v", err) + go r.startEventsMonitor() + return + } + + logrus.Debugf("libcontainerd: received containerd event: %#v", e) + + var container *container + var c *client + r.RLock() + for _, c = range r.clients { + container, err = c.getContainer(e.Id) + if err == nil { + break + } + } + r.RUnlock() + if container == nil { + logrus.Warnf("libcontainerd: unknown container %s", e.Id) + continue + } + + if err := container.handleEvent(e); err != nil { + logrus.Errorf("libcontainerd: error processing state change for %s: %v", e.Id, err) + } + + tsp, err := ptypes.Timestamp(e.Timestamp) + if err != nil { + logrus.Errorf("libcontainerd: failed to convert event timestamp: %q", err) + continue + } + + r.updateEventTimestamp(tsp) + } +} + +func (r *remote) runContainerdDaemon() error { + pidFilename := filepath.Join(r.stateDir, containerdPidFilename) + f, err := os.OpenFile(pidFilename, os.O_RDWR|os.O_CREATE, 0600) + if err != nil { + return err + } + defer f.Close() + + // File exist, check if the daemon is alive + b := make([]byte, 8) + n, err := f.Read(b) + if err != nil && err != io.EOF { + return err + } + + if n > 0 { + pid, err := strconv.ParseUint(string(b[:n]), 10, 64) + if err != nil { + return err + } + if system.IsProcessAlive(int(pid)) { + logrus.Infof("libcontainerd: previous instance of containerd still alive (%d)", pid) + r.daemonPid = int(pid) + return nil + } + } + + // rewind the file + _, err = f.Seek(0, os.SEEK_SET) + if err != nil { + return err + } + + // Truncate it + err = f.Truncate(0) + if err != nil { + return err + } + + // Start a new instance + args := []string{ + "-l", fmt.Sprintf("unix://%s", r.rpcAddr), + "--metrics-interval=0", + "--start-timeout", "2m", + "--state-dir", filepath.Join(r.stateDir, containerdStateDir), + } + if goruntime.GOOS == "solaris" { + args = append(args, "--shim", "containerd-shim", "--runtime", "runc") + } else { + args = append(args, "--shim", "docker-containerd-shim") + if r.runtime != "" { + args = append(args, "--runtime") + args = append(args, r.runtime) + } + } + if r.debugLog { + args = append(args, "--debug") + } + if len(r.runtimeArgs) > 0 { + for _, v := range r.runtimeArgs { + args = append(args, "--runtime-args") + args = append(args, v) + } + logrus.Debugf("libcontainerd: runContainerdDaemon: runtimeArgs: %s", args) + } + + cmd := exec.Command(containerdBinary, args...) + // redirect containerd logs to docker logs + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + cmd.SysProcAttr = setSysProcAttr(true) + cmd.Env = nil + // clear the NOTIFY_SOCKET from the env when starting containerd + for _, e := range os.Environ() { + if !strings.HasPrefix(e, "NOTIFY_SOCKET") { + cmd.Env = append(cmd.Env, e) + } + } + if err := cmd.Start(); err != nil { + return err + } + + // unless strictly necessary, do not add anything in between here + // as the reaper goroutine below needs to kick in as soon as possible + // and any "return" from code paths added here will defeat the reaper + // process. + + r.daemonWaitCh = make(chan struct{}) + go func() { + cmd.Wait() + close(r.daemonWaitCh) + }() // Reap our child when needed + + logrus.Infof("libcontainerd: new containerd process, pid: %d", cmd.Process.Pid) + if err := setOOMScore(cmd.Process.Pid, r.oomScore); err != nil { + system.KillProcess(cmd.Process.Pid) + return err + } + if _, err := f.WriteString(fmt.Sprintf("%d", cmd.Process.Pid)); err != nil { + system.KillProcess(cmd.Process.Pid) + return err + } + + r.daemonPid = cmd.Process.Pid + return nil +} + +// WithRemoteAddr sets the external containerd socket to connect to. +func WithRemoteAddr(addr string) RemoteOption { + return rpcAddr(addr) +} + +type rpcAddr string + +func (a rpcAddr) Apply(r Remote) error { + if remote, ok := r.(*remote); ok { + remote.rpcAddr = string(a) + return nil + } + return fmt.Errorf("WithRemoteAddr option not supported for this remote") +} + +// WithRuntimePath sets the path of the runtime to be used as the +// default by containerd +func WithRuntimePath(rt string) RemoteOption { + return runtimePath(rt) +} + +type runtimePath string + +func (rt runtimePath) Apply(r Remote) error { + if remote, ok := r.(*remote); ok { + remote.runtime = string(rt) + return nil + } + return fmt.Errorf("WithRuntime option not supported for this remote") +} + +// WithRuntimeArgs sets the list of runtime args passed to containerd +func WithRuntimeArgs(args []string) RemoteOption { + return runtimeArgs(args) +} + +type runtimeArgs []string + +func (rt runtimeArgs) Apply(r Remote) error { + if remote, ok := r.(*remote); ok { + remote.runtimeArgs = rt + return nil + } + return fmt.Errorf("WithRuntimeArgs option not supported for this remote") +} + +// WithStartDaemon defines if libcontainerd should also run containerd daemon. +func WithStartDaemon(start bool) RemoteOption { + return startDaemon(start) +} + +type startDaemon bool + +func (s startDaemon) Apply(r Remote) error { + if remote, ok := r.(*remote); ok { + remote.startDaemon = bool(s) + return nil + } + return fmt.Errorf("WithStartDaemon option not supported for this remote") +} + +// WithDebugLog defines if containerd debug logs will be enabled for daemon. +func WithDebugLog(debug bool) RemoteOption { + return debugLog(debug) +} + +type debugLog bool + +func (d debugLog) Apply(r Remote) error { + if remote, ok := r.(*remote); ok { + remote.debugLog = bool(d) + return nil + } + return fmt.Errorf("WithDebugLog option not supported for this remote") +} + +// WithLiveRestore defines if containers are stopped on shutdown or restored. +func WithLiveRestore(v bool) RemoteOption { + return liveRestore(v) +} + +type liveRestore bool + +func (l liveRestore) Apply(r Remote) error { + if remote, ok := r.(*remote); ok { + remote.liveRestore = bool(l) + for _, c := range remote.clients { + c.liveRestore = bool(l) + } + return nil + } + return fmt.Errorf("WithLiveRestore option not supported for this remote") +} + +// WithOOMScore defines the oom_score_adj to set for the containerd process. +func WithOOMScore(score int) RemoteOption { + return oomScore(score) +} + +type oomScore int + +func (o oomScore) Apply(r Remote) error { + if remote, ok := r.(*remote); ok { + remote.oomScore = int(o) + return nil + } + return fmt.Errorf("WithOOMScore option not supported for this remote") +} diff --git a/vendor/github.com/moby/moby/libcontainerd/remote_windows.go b/vendor/github.com/moby/moby/libcontainerd/remote_windows.go new file mode 100644 index 000000000..74c10447b --- /dev/null +++ b/vendor/github.com/moby/moby/libcontainerd/remote_windows.go @@ -0,0 +1,36 @@ +package libcontainerd + +import "github.com/docker/docker/pkg/locker" + +type remote struct { +} + +func (r *remote) Client(b Backend) (Client, error) { + c := &client{ + clientCommon: clientCommon{ + backend: b, + containers: make(map[string]*container), + locker: locker.New(), + }, + } + return c, nil +} + +// Cleanup is a no-op on Windows. It is here to implement the interface. +func (r *remote) Cleanup() { +} + +func (r *remote) UpdateOptions(opts ...RemoteOption) error { + return nil +} + +// New creates a fresh instance of libcontainerd remote. On Windows, +// this is not used as there is no remote containerd process. +func New(_ string, _ ...RemoteOption) (Remote, error) { + return &remote{}, nil +} + +// WithLiveRestore is a noop on windows. +func WithLiveRestore(v bool) RemoteOption { + return nil +} diff --git a/vendor/github.com/moby/moby/libcontainerd/types.go b/vendor/github.com/moby/moby/libcontainerd/types.go new file mode 100644 index 000000000..c7ade6b18 --- /dev/null +++ b/vendor/github.com/moby/moby/libcontainerd/types.go @@ -0,0 +1,75 @@ +package libcontainerd + +import ( + "io" + + containerd "github.com/containerd/containerd/api/grpc/types" + "github.com/opencontainers/runtime-spec/specs-go" + "golang.org/x/net/context" +) + +// State constants used in state change reporting. +const ( + StateStart = "start-container" + StatePause = "pause" + StateResume = "resume" + StateExit = "exit" + StateRestore = "restore" + StateExitProcess = "exit-process" + StateOOM = "oom" // fake state +) + +// CommonStateInfo contains the state info common to all platforms. +type CommonStateInfo struct { // FIXME: event? + State string + Pid uint32 + ExitCode uint32 + ProcessID string +} + +// Backend defines callbacks that the client of the library needs to implement. +type Backend interface { + StateChanged(containerID string, state StateInfo) error +} + +// Client provides access to containerd features. +type Client interface { + GetServerVersion(ctx context.Context) (*ServerVersion, error) + Create(containerID string, checkpoint string, checkpointDir string, spec specs.Spec, attachStdio StdioCallback, options ...CreateOption) error + Signal(containerID string, sig int) error + SignalProcess(containerID string, processFriendlyName string, sig int) error + AddProcess(ctx context.Context, containerID, processFriendlyName string, process Process, attachStdio StdioCallback) (int, error) + Resize(containerID, processFriendlyName string, width, height int) error + Pause(containerID string) error + Resume(containerID string) error + Restore(containerID string, attachStdio StdioCallback, options ...CreateOption) error + Stats(containerID string) (*Stats, error) + GetPidsForContainer(containerID string) ([]int, error) + Summary(containerID string) ([]Summary, error) + UpdateResources(containerID string, resources Resources) error + CreateCheckpoint(containerID string, checkpointID string, checkpointDir string, exit bool) error + DeleteCheckpoint(containerID string, checkpointID string, checkpointDir string) error + ListCheckpoints(containerID string, checkpointDir string) (*Checkpoints, error) +} + +// CreateOption allows to configure parameters of container creation. +type CreateOption interface { + Apply(interface{}) error +} + +// StdioCallback is called to connect a container or process stdio. +type StdioCallback func(IOPipe) error + +// IOPipe contains the stdio streams. +type IOPipe struct { + Stdin io.WriteCloser + Stdout io.ReadCloser + Stderr io.ReadCloser + Terminal bool // Whether stderr is connected on Windows +} + +// ServerVersion contains version information as retrieved from the +// server +type ServerVersion struct { + containerd.GetServerVersionResponse +} diff --git a/vendor/github.com/moby/moby/libcontainerd/types_linux.go b/vendor/github.com/moby/moby/libcontainerd/types_linux.go new file mode 100644 index 000000000..4f0635835 --- /dev/null +++ b/vendor/github.com/moby/moby/libcontainerd/types_linux.go @@ -0,0 +1,49 @@ +package libcontainerd + +import ( + containerd "github.com/containerd/containerd/api/grpc/types" + "github.com/opencontainers/runtime-spec/specs-go" +) + +// Process contains information to start a specific application inside the container. +type Process struct { + // Terminal creates an interactive terminal for the container. + Terminal bool `json:"terminal"` + // User specifies user information for the process. + User *specs.User `json:"user"` + // Args specifies the binary and arguments for the application to execute. + Args []string `json:"args"` + // Env populates the process environment for the process. + Env []string `json:"env,omitempty"` + // Cwd is the current working directory for the process and must be + // relative to the container's root. + Cwd *string `json:"cwd"` + // Capabilities are linux capabilities that are kept for the container. + Capabilities []string `json:"capabilities,omitempty"` + // Rlimits specifies rlimit options to apply to the process. + Rlimits []specs.LinuxRlimit `json:"rlimits,omitempty"` + // ApparmorProfile specifies the apparmor profile for the container. + ApparmorProfile *string `json:"apparmorProfile,omitempty"` + // SelinuxLabel specifies the selinux context that the container process is run as. + SelinuxLabel *string `json:"selinuxLabel,omitempty"` +} + +// StateInfo contains description about the new state container has entered. +type StateInfo struct { + CommonStateInfo + + // Platform specific StateInfo + OOMKilled bool +} + +// Stats contains a stats properties from containerd. +type Stats containerd.StatsResponse + +// Summary contains a container summary from containerd +type Summary struct{} + +// Resources defines updatable container resource values. +type Resources containerd.UpdateResource + +// Checkpoints contains the details of a checkpoint +type Checkpoints containerd.ListCheckpointResponse diff --git a/vendor/github.com/moby/moby/libcontainerd/types_solaris.go b/vendor/github.com/moby/moby/libcontainerd/types_solaris.go new file mode 100644 index 000000000..2ab18eb0d --- /dev/null +++ b/vendor/github.com/moby/moby/libcontainerd/types_solaris.go @@ -0,0 +1,43 @@ +package libcontainerd + +import ( + containerd "github.com/containerd/containerd/api/grpc/types" + "github.com/opencontainers/runtime-spec/specs-go" +) + +// Process contains information to start a specific application inside the container. +type Process struct { + // Terminal creates an interactive terminal for the container. + Terminal bool `json:"terminal"` + // User specifies user information for the process. + User *specs.User `json:"user"` + // Args specifies the binary and arguments for the application to execute. + Args []string `json:"args"` + // Env populates the process environment for the process. + Env []string `json:"env,omitempty"` + // Cwd is the current working directory for the process and must be + // relative to the container's root. + Cwd *string `json:"cwd"` + // Capabilities are linux capabilities that are kept for the container. + Capabilities []string `json:"capabilities,omitempty"` +} + +// Stats contains a stats properties from containerd. +type Stats struct{} + +// Summary contains a container summary from containerd +type Summary struct{} + +// StateInfo contains description about the new state container has entered. +type StateInfo struct { + CommonStateInfo + + // Platform specific StateInfo + OOMKilled bool +} + +// Resources defines updatable container resource values. +type Resources struct{} + +// Checkpoints contains the details of a checkpoint +type Checkpoints containerd.ListCheckpointResponse diff --git a/vendor/github.com/moby/moby/libcontainerd/types_windows.go b/vendor/github.com/moby/moby/libcontainerd/types_windows.go new file mode 100644 index 000000000..317bfb020 --- /dev/null +++ b/vendor/github.com/moby/moby/libcontainerd/types_windows.go @@ -0,0 +1,79 @@ +package libcontainerd + +import ( + "github.com/Microsoft/hcsshim" + "github.com/opencontainers/runtime-spec/specs-go" +) + +// Process contains information to start a specific application inside the container. +type Process specs.Process + +// Summary contains a ProcessList item from HCS to support `top` +type Summary hcsshim.ProcessListItem + +// StateInfo contains description about the new state container has entered. +type StateInfo struct { + CommonStateInfo + + // Platform specific StateInfo + UpdatePending bool // Indicates that there are some update operations pending that should be completed by a servicing container. +} + +// Stats contains statistics from HCS +type Stats hcsshim.Statistics + +// Resources defines updatable container resource values. +type Resources struct{} + +// ServicingOption is a CreateOption with a no-op application that signifies +// the container needs to be used for a Windows servicing operation. +type ServicingOption struct { + IsServicing bool +} + +// FlushOption is a CreateOption that signifies if the container should be +// started with flushes ignored until boot has completed. This is an optimisation +// for first boot of a container. +type FlushOption struct { + IgnoreFlushesDuringBoot bool +} + +// HyperVIsolationOption is a CreateOption that indicates whether the runtime +// should start the container as a Hyper-V container. +type HyperVIsolationOption struct { + IsHyperV bool +} + +// LayerOption is a CreateOption that indicates to the runtime the layer folder +// and layer paths for a container. +type LayerOption struct { + // LayerFolderPath is the path to the current layer folder. Empty for Hyper-V containers. + LayerFolderPath string `json:",omitempty"` + // Layer paths of the parent layers + LayerPaths []string +} + +// NetworkEndpointsOption is a CreateOption that provides the runtime list +// of network endpoints to which a container should be attached during its creation. +type NetworkEndpointsOption struct { + Endpoints []string + AllowUnqualifiedDNSQuery bool + DNSSearchList []string + NetworkSharedContainerID string +} + +// CredentialsOption is a CreateOption that indicates the credentials from +// a credential spec to be used to the runtime +type CredentialsOption struct { + Credentials string +} + +// Checkpoint holds the details of a checkpoint (not supported in windows) +type Checkpoint struct { + Name string +} + +// Checkpoints contains the details of a checkpoint +type Checkpoints struct { + Checkpoints []*Checkpoint +} diff --git a/vendor/github.com/moby/moby/libcontainerd/utils_linux.go b/vendor/github.com/moby/moby/libcontainerd/utils_linux.go new file mode 100644 index 000000000..170f90b8e --- /dev/null +++ b/vendor/github.com/moby/moby/libcontainerd/utils_linux.go @@ -0,0 +1,63 @@ +package libcontainerd + +import ( + "syscall" + + containerd "github.com/containerd/containerd/api/grpc/types" + "github.com/opencontainers/runtime-spec/specs-go" + "golang.org/x/sys/unix" +) + +func getRootIDs(s specs.Spec) (int, int, error) { + var hasUserns bool + for _, ns := range s.Linux.Namespaces { + if ns.Type == specs.UserNamespace { + hasUserns = true + break + } + } + if !hasUserns { + return 0, 0, nil + } + uid := hostIDFromMap(0, s.Linux.UIDMappings) + gid := hostIDFromMap(0, s.Linux.GIDMappings) + return uid, gid, nil +} + +func hostIDFromMap(id uint32, mp []specs.LinuxIDMapping) int { + for _, m := range mp { + if id >= m.ContainerID && id <= m.ContainerID+m.Size-1 { + return int(m.HostID + id - m.ContainerID) + } + } + return 0 +} + +func systemPid(ctr *containerd.Container) uint32 { + var pid uint32 + for _, p := range ctr.Processes { + if p.Pid == InitFriendlyName { + pid = p.SystemPid + } + } + return pid +} + +func convertRlimits(sr []specs.LinuxRlimit) (cr []*containerd.Rlimit) { + for _, r := range sr { + cr = append(cr, &containerd.Rlimit{ + Type: r.Type, + Hard: r.Hard, + Soft: r.Soft, + }) + } + return +} + +// setPDeathSig sets the parent death signal to SIGKILL +func setSysProcAttr(sid bool) *syscall.SysProcAttr { + return &syscall.SysProcAttr{ + Setsid: sid, + Pdeathsig: unix.SIGKILL, + } +} diff --git a/vendor/github.com/moby/moby/libcontainerd/utils_solaris.go b/vendor/github.com/moby/moby/libcontainerd/utils_solaris.go new file mode 100644 index 000000000..10ae59980 --- /dev/null +++ b/vendor/github.com/moby/moby/libcontainerd/utils_solaris.go @@ -0,0 +1,27 @@ +package libcontainerd + +import ( + "syscall" + + containerd "github.com/containerd/containerd/api/grpc/types" + "github.com/opencontainers/runtime-spec/specs-go" +) + +func getRootIDs(s specs.Spec) (int, int, error) { + return 0, 0, nil +} + +func systemPid(ctr *containerd.Container) uint32 { + var pid uint32 + for _, p := range ctr.Processes { + if p.Pid == InitFriendlyName { + pid = p.SystemPid + } + } + return pid +} + +// setPDeathSig sets the parent death signal to SIGKILL +func setSysProcAttr(sid bool) *syscall.SysProcAttr { + return nil +} diff --git a/vendor/github.com/moby/moby/libcontainerd/utils_windows.go b/vendor/github.com/moby/moby/libcontainerd/utils_windows.go new file mode 100644 index 000000000..41ac40d2c --- /dev/null +++ b/vendor/github.com/moby/moby/libcontainerd/utils_windows.go @@ -0,0 +1,46 @@ +package libcontainerd + +import "strings" + +// setupEnvironmentVariables converts a string array of environment variables +// into a map as required by the HCS. Source array is in format [v1=k1] [v2=k2] etc. +func setupEnvironmentVariables(a []string) map[string]string { + r := make(map[string]string) + for _, s := range a { + arr := strings.SplitN(s, "=", 2) + if len(arr) == 2 { + r[arr[0]] = arr[1] + } + } + return r +} + +// Apply for a servicing option is a no-op. +func (s *ServicingOption) Apply(interface{}) error { + return nil +} + +// Apply for the flush option is a no-op. +func (f *FlushOption) Apply(interface{}) error { + return nil +} + +// Apply for the hypervisolation option is a no-op. +func (h *HyperVIsolationOption) Apply(interface{}) error { + return nil +} + +// Apply for the layer option is a no-op. +func (h *LayerOption) Apply(interface{}) error { + return nil +} + +// Apply for the network endpoints option is a no-op. +func (s *NetworkEndpointsOption) Apply(interface{}) error { + return nil +} + +// Apply for the credentials option is a no-op. +func (s *CredentialsOption) Apply(interface{}) error { + return nil +} diff --git a/vendor/github.com/moby/moby/libcontainerd/utils_windows_test.go b/vendor/github.com/moby/moby/libcontainerd/utils_windows_test.go new file mode 100644 index 000000000..f3679bfb7 --- /dev/null +++ b/vendor/github.com/moby/moby/libcontainerd/utils_windows_test.go @@ -0,0 +1,13 @@ +package libcontainerd + +import ( + "testing" +) + +func TestEnvironmentParsing(t *testing.T) { + env := []string{"foo=bar", "car=hat", "a=b=c"} + result := setupEnvironmentVariables(env) + if len(result) != 3 || result["foo"] != "bar" || result["car"] != "hat" || result["a"] != "b=c" { + t.Fatalf("Expected map[foo:bar car:hat a:b=c], got %v", result) + } +} diff --git a/vendor/github.com/moby/moby/migrate/v1/migratev1.go b/vendor/github.com/moby/moby/migrate/v1/migratev1.go new file mode 100644 index 000000000..3cb882891 --- /dev/null +++ b/vendor/github.com/moby/moby/migrate/v1/migratev1.go @@ -0,0 +1,506 @@ +package v1 + +import ( + "errors" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "runtime" + "strconv" + "sync" + "time" + + "encoding/json" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/reference" + "github.com/docker/docker/distribution/metadata" + "github.com/docker/docker/image" + imagev1 "github.com/docker/docker/image/v1" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/ioutils" + refstore "github.com/docker/docker/reference" + "github.com/opencontainers/go-digest" +) + +type graphIDRegistrar interface { + RegisterByGraphID(string, layer.ChainID, layer.DiffID, string, int64) (layer.Layer, error) + Release(layer.Layer) ([]layer.Metadata, error) +} + +type graphIDMounter interface { + CreateRWLayerByGraphID(string, string, layer.ChainID) error +} + +type checksumCalculator interface { + ChecksumForGraphID(id, parent, oldTarDataPath, newTarDataPath string) (diffID layer.DiffID, size int64, err error) +} + +const ( + graphDirName = "graph" + tarDataFileName = "tar-data.json.gz" + migrationFileName = ".migration-v1-images.json" + migrationTagsFileName = ".migration-v1-tags" + migrationDiffIDFileName = ".migration-diffid" + migrationSizeFileName = ".migration-size" + migrationTarDataFileName = ".migration-tardata" + containersDirName = "containers" + configFileNameLegacy = "config.json" + configFileName = "config.v2.json" + repositoriesFilePrefixLegacy = "repositories-" +) + +var ( + errUnsupported = errors.New("migration is not supported") +) + +// Migrate takes an old graph directory and transforms the metadata into the +// new format. +func Migrate(root, driverName string, ls layer.Store, is image.Store, rs refstore.Store, ms metadata.Store) error { + graphDir := filepath.Join(root, graphDirName) + if _, err := os.Lstat(graphDir); os.IsNotExist(err) { + return nil + } + + mappings, err := restoreMappings(root) + if err != nil { + return err + } + + if cc, ok := ls.(checksumCalculator); ok { + CalculateLayerChecksums(root, cc, mappings) + } + + if registrar, ok := ls.(graphIDRegistrar); !ok { + return errUnsupported + } else if err := migrateImages(root, registrar, is, ms, mappings); err != nil { + return err + } + + err = saveMappings(root, mappings) + if err != nil { + return err + } + + if mounter, ok := ls.(graphIDMounter); !ok { + return errUnsupported + } else if err := migrateContainers(root, mounter, is, mappings); err != nil { + return err + } + + if err := migrateRefs(root, driverName, rs, mappings); err != nil { + return err + } + + return nil +} + +// CalculateLayerChecksums walks an old graph directory and calculates checksums +// for each layer. These checksums are later used for migration. +func CalculateLayerChecksums(root string, ls checksumCalculator, mappings map[string]image.ID) { + graphDir := filepath.Join(root, graphDirName) + // spawn some extra workers also for maximum performance because the process is bounded by both cpu and io + workers := runtime.NumCPU() * 3 + workQueue := make(chan string, workers) + + wg := sync.WaitGroup{} + + for i := 0; i < workers; i++ { + wg.Add(1) + go func() { + for id := range workQueue { + start := time.Now() + if err := calculateLayerChecksum(graphDir, id, ls); err != nil { + logrus.Errorf("could not calculate checksum for %q, %q", id, err) + } + elapsed := time.Since(start) + logrus.Debugf("layer %s took %.2f seconds", id, elapsed.Seconds()) + } + wg.Done() + }() + } + + dir, err := ioutil.ReadDir(graphDir) + if err != nil { + logrus.Errorf("could not read directory %q", graphDir) + return + } + for _, v := range dir { + v1ID := v.Name() + if err := imagev1.ValidateID(v1ID); err != nil { + continue + } + if _, ok := mappings[v1ID]; ok { // support old migrations without helper files + continue + } + workQueue <- v1ID + } + close(workQueue) + wg.Wait() +} + +func calculateLayerChecksum(graphDir, id string, ls checksumCalculator) error { + diffIDFile := filepath.Join(graphDir, id, migrationDiffIDFileName) + if _, err := os.Lstat(diffIDFile); err == nil { + return nil + } else if !os.IsNotExist(err) { + return err + } + + parent, err := getParent(filepath.Join(graphDir, id)) + if err != nil { + return err + } + + diffID, size, err := ls.ChecksumForGraphID(id, parent, filepath.Join(graphDir, id, tarDataFileName), filepath.Join(graphDir, id, migrationTarDataFileName)) + if err != nil { + return err + } + + if err := ioutil.WriteFile(filepath.Join(graphDir, id, migrationSizeFileName), []byte(strconv.Itoa(int(size))), 0600); err != nil { + return err + } + + if err := ioutils.AtomicWriteFile(filepath.Join(graphDir, id, migrationDiffIDFileName), []byte(diffID), 0600); err != nil { + return err + } + + logrus.Infof("calculated checksum for layer %s: %s", id, diffID) + return nil +} + +func restoreMappings(root string) (map[string]image.ID, error) { + mappings := make(map[string]image.ID) + + mfile := filepath.Join(root, migrationFileName) + f, err := os.Open(mfile) + if err != nil && !os.IsNotExist(err) { + return nil, err + } else if err == nil { + err := json.NewDecoder(f).Decode(&mappings) + if err != nil { + f.Close() + return nil, err + } + f.Close() + } + + return mappings, nil +} + +func saveMappings(root string, mappings map[string]image.ID) error { + mfile := filepath.Join(root, migrationFileName) + f, err := os.OpenFile(mfile, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600) + if err != nil { + return err + } + defer f.Close() + return json.NewEncoder(f).Encode(mappings) +} + +func migrateImages(root string, ls graphIDRegistrar, is image.Store, ms metadata.Store, mappings map[string]image.ID) error { + graphDir := filepath.Join(root, graphDirName) + + dir, err := ioutil.ReadDir(graphDir) + if err != nil { + return err + } + for _, v := range dir { + v1ID := v.Name() + if err := imagev1.ValidateID(v1ID); err != nil { + continue + } + if _, exists := mappings[v1ID]; exists { + continue + } + if err := migrateImage(v1ID, root, ls, is, ms, mappings); err != nil { + continue + } + } + + return nil +} + +func migrateContainers(root string, ls graphIDMounter, is image.Store, imageMappings map[string]image.ID) error { + containersDir := filepath.Join(root, containersDirName) + dir, err := ioutil.ReadDir(containersDir) + if err != nil { + return err + } + for _, v := range dir { + id := v.Name() + + if _, err := os.Stat(filepath.Join(containersDir, id, configFileName)); err == nil { + continue + } + + containerJSON, err := ioutil.ReadFile(filepath.Join(containersDir, id, configFileNameLegacy)) + if err != nil { + logrus.Errorf("migrate container error: %v", err) + continue + } + + var c map[string]*json.RawMessage + if err := json.Unmarshal(containerJSON, &c); err != nil { + logrus.Errorf("migrate container error: %v", err) + continue + } + + imageStrJSON, ok := c["Image"] + if !ok { + return fmt.Errorf("invalid container configuration for %v", id) + } + + var image string + if err := json.Unmarshal([]byte(*imageStrJSON), &image); err != nil { + logrus.Errorf("migrate container error: %v", err) + continue + } + + imageID, ok := imageMappings[image] + if !ok { + logrus.Errorf("image not migrated %v", imageID) // non-fatal error + continue + } + + c["Image"] = rawJSON(imageID) + + containerJSON, err = json.Marshal(c) + if err != nil { + return err + } + + if err := ioutil.WriteFile(filepath.Join(containersDir, id, configFileName), containerJSON, 0600); err != nil { + return err + } + + img, err := is.Get(imageID) + if err != nil { + return err + } + + if err := ls.CreateRWLayerByGraphID(id, id, img.RootFS.ChainID()); err != nil { + logrus.Errorf("migrate container error: %v", err) + continue + } + + logrus.Infof("migrated container %s to point to %s", id, imageID) + + } + return nil +} + +type refAdder interface { + AddTag(ref reference.Named, id digest.Digest, force bool) error + AddDigest(ref reference.Canonical, id digest.Digest, force bool) error +} + +func migrateRefs(root, driverName string, rs refAdder, mappings map[string]image.ID) error { + migrationFile := filepath.Join(root, migrationTagsFileName) + if _, err := os.Lstat(migrationFile); !os.IsNotExist(err) { + return err + } + + type repositories struct { + Repositories map[string]map[string]string + } + + var repos repositories + + f, err := os.Open(filepath.Join(root, repositoriesFilePrefixLegacy+driverName)) + if err != nil { + if os.IsNotExist(err) { + return nil + } + return err + } + defer f.Close() + if err := json.NewDecoder(f).Decode(&repos); err != nil { + return err + } + + for name, repo := range repos.Repositories { + for tag, id := range repo { + if strongID, exists := mappings[id]; exists { + ref, err := reference.ParseNormalizedNamed(name) + if err != nil { + logrus.Errorf("migrate tags: invalid name %q, %q", name, err) + continue + } + if !reference.IsNameOnly(ref) { + logrus.Errorf("migrate tags: invalid name %q, unexpected tag or digest", name) + continue + } + if dgst, err := digest.Parse(tag); err == nil { + canonical, err := reference.WithDigest(reference.TrimNamed(ref), dgst) + if err != nil { + logrus.Errorf("migrate tags: invalid digest %q, %q", dgst, err) + continue + } + if err := rs.AddDigest(canonical, strongID.Digest(), false); err != nil { + logrus.Errorf("can't migrate digest %q for %q, err: %q", reference.FamiliarString(ref), strongID, err) + } + } else { + tagRef, err := reference.WithTag(ref, tag) + if err != nil { + logrus.Errorf("migrate tags: invalid tag %q, %q", tag, err) + continue + } + if err := rs.AddTag(tagRef, strongID.Digest(), false); err != nil { + logrus.Errorf("can't migrate tag %q for %q, err: %q", reference.FamiliarString(ref), strongID, err) + } + } + logrus.Infof("migrated tag %s:%s to point to %s", name, tag, strongID) + } + } + } + + mf, err := os.Create(migrationFile) + if err != nil { + return err + } + mf.Close() + + return nil +} + +func getParent(confDir string) (string, error) { + jsonFile := filepath.Join(confDir, "json") + imageJSON, err := ioutil.ReadFile(jsonFile) + if err != nil { + return "", err + } + var parent struct { + Parent string + ParentID digest.Digest `json:"parent_id"` + } + if err := json.Unmarshal(imageJSON, &parent); err != nil { + return "", err + } + if parent.Parent == "" && parent.ParentID != "" { // v1.9 + parent.Parent = parent.ParentID.Hex() + } + // compatibilityID for parent + parentCompatibilityID, err := ioutil.ReadFile(filepath.Join(confDir, "parent")) + if err == nil && len(parentCompatibilityID) > 0 { + parent.Parent = string(parentCompatibilityID) + } + return parent.Parent, nil +} + +func migrateImage(id, root string, ls graphIDRegistrar, is image.Store, ms metadata.Store, mappings map[string]image.ID) (err error) { + defer func() { + if err != nil { + logrus.Errorf("migration failed for %v, err: %v", id, err) + } + }() + + parent, err := getParent(filepath.Join(root, graphDirName, id)) + if err != nil { + return err + } + + var parentID image.ID + if parent != "" { + var exists bool + if parentID, exists = mappings[parent]; !exists { + if err := migrateImage(parent, root, ls, is, ms, mappings); err != nil { + // todo: fail or allow broken chains? + return err + } + parentID = mappings[parent] + } + } + + rootFS := image.NewRootFS() + var history []image.History + + if parentID != "" { + parentImg, err := is.Get(parentID) + if err != nil { + return err + } + + rootFS = parentImg.RootFS + history = parentImg.History + } + + diffIDData, err := ioutil.ReadFile(filepath.Join(root, graphDirName, id, migrationDiffIDFileName)) + if err != nil { + return err + } + diffID, err := digest.Parse(string(diffIDData)) + if err != nil { + return err + } + + sizeStr, err := ioutil.ReadFile(filepath.Join(root, graphDirName, id, migrationSizeFileName)) + if err != nil { + return err + } + size, err := strconv.ParseInt(string(sizeStr), 10, 64) + if err != nil { + return err + } + + layer, err := ls.RegisterByGraphID(id, rootFS.ChainID(), layer.DiffID(diffID), filepath.Join(root, graphDirName, id, migrationTarDataFileName), size) + if err != nil { + return err + } + logrus.Infof("migrated layer %s to %s", id, layer.DiffID()) + + jsonFile := filepath.Join(root, graphDirName, id, "json") + imageJSON, err := ioutil.ReadFile(jsonFile) + if err != nil { + return err + } + + h, err := imagev1.HistoryFromConfig(imageJSON, false) + if err != nil { + return err + } + history = append(history, h) + + rootFS.Append(layer.DiffID()) + + config, err := imagev1.MakeConfigFromV1Config(imageJSON, rootFS, history) + if err != nil { + return err + } + strongID, err := is.Create(config) + if err != nil { + return err + } + logrus.Infof("migrated image %s to %s", id, strongID) + + if parentID != "" { + if err := is.SetParent(strongID, parentID); err != nil { + return err + } + } + + checksum, err := ioutil.ReadFile(filepath.Join(root, graphDirName, id, "checksum")) + if err == nil { // best effort + dgst, err := digest.Parse(string(checksum)) + if err == nil { + V2MetadataService := metadata.NewV2MetadataService(ms) + V2MetadataService.Add(layer.DiffID(), metadata.V2Metadata{Digest: dgst}) + } + } + _, err = ls.Release(layer) + if err != nil { + return err + } + + mappings[id] = strongID + return +} + +func rawJSON(value interface{}) *json.RawMessage { + jsonval, err := json.Marshal(value) + if err != nil { + return nil + } + return (*json.RawMessage)(&jsonval) +} diff --git a/vendor/github.com/moby/moby/migrate/v1/migratev1_test.go b/vendor/github.com/moby/moby/migrate/v1/migratev1_test.go new file mode 100644 index 000000000..51b674122 --- /dev/null +++ b/vendor/github.com/moby/moby/migrate/v1/migratev1_test.go @@ -0,0 +1,442 @@ +package v1 + +import ( + "crypto/rand" + "encoding/hex" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "reflect" + "runtime" + "testing" + + "github.com/docker/distribution/reference" + "github.com/docker/docker/distribution/metadata" + "github.com/docker/docker/image" + "github.com/docker/docker/layer" + "github.com/opencontainers/go-digest" +) + +func TestMigrateRefs(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "migrate-tags") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + + ioutil.WriteFile(filepath.Join(tmpdir, "repositories-generic"), []byte(`{"Repositories":{"busybox":{"latest":"b3ca410aa2c115c05969a7b2c8cf8a9fcf62c1340ed6a601c9ee50df337ec108","sha256:16a2a52884c2a9481ed267c2d46483eac7693b813a63132368ab098a71303f8a":"b3ca410aa2c115c05969a7b2c8cf8a9fcf62c1340ed6a601c9ee50df337ec108"},"registry":{"2":"5d165b8e4b203685301c815e95663231691d383fd5e3d3185d1ce3f8dddead3d","latest":"8d5547a9f329b1d3f93198cd661fb5117e5a96b721c5cf9a2c389e7dd4877128"}}}`), 0600) + + ta := &mockTagAdder{} + err = migrateRefs(tmpdir, "generic", ta, map[string]image.ID{ + "5d165b8e4b203685301c815e95663231691d383fd5e3d3185d1ce3f8dddead3d": image.ID("sha256:2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae"), + "b3ca410aa2c115c05969a7b2c8cf8a9fcf62c1340ed6a601c9ee50df337ec108": image.ID("sha256:fcde2b2edba56bf408601fb721fe9b5c338d10ee429ea04fae5511b68fbf8fb9"), + "abcdef3434c115c05969a7b2c8cf8a9fcf62c1340ed6a601c9ee50df337ec108": image.ID("sha256:56434342345ae68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae"), + }) + if err != nil { + t.Fatal(err) + } + + expected := map[string]string{ + "docker.io/library/busybox:latest": "sha256:fcde2b2edba56bf408601fb721fe9b5c338d10ee429ea04fae5511b68fbf8fb9", + "docker.io/library/busybox@sha256:16a2a52884c2a9481ed267c2d46483eac7693b813a63132368ab098a71303f8a": "sha256:fcde2b2edba56bf408601fb721fe9b5c338d10ee429ea04fae5511b68fbf8fb9", + "docker.io/library/registry:2": "sha256:2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae", + } + + if !reflect.DeepEqual(expected, ta.refs) { + t.Fatalf("Invalid migrated tags: expected %q, got %q", expected, ta.refs) + } + + // second migration is no-op + ioutil.WriteFile(filepath.Join(tmpdir, "repositories-generic"), []byte(`{"Repositories":{"busybox":{"latest":"b3ca410aa2c115c05969a7b2c8cf8a9fcf62c1340ed6a601c9ee50df337ec108"`), 0600) + err = migrateRefs(tmpdir, "generic", ta, map[string]image.ID{ + "b3ca410aa2c115c05969a7b2c8cf8a9fcf62c1340ed6a601c9ee50df337ec108": image.ID("sha256:fcde2b2edba56bf408601fb721fe9b5c338d10ee429ea04fae5511b68fbf8fb9"), + }) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(expected, ta.refs) { + t.Fatalf("Invalid migrated tags: expected %q, got %q", expected, ta.refs) + } +} + +func TestMigrateContainers(t *testing.T) { + // TODO Windows: Figure out why this is failing + if runtime.GOOS == "windows" { + t.Skip("Failing on Windows") + } + if runtime.GOARCH != "amd64" { + t.Skip("Test tailored to amd64 architecture") + } + tmpdir, err := ioutil.TempDir("", "migrate-containers") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + + err = addContainer(tmpdir, `{"State":{"Running":false,"Paused":false,"Restarting":false,"OOMKilled":false,"Dead":false,"Pid":0,"ExitCode":0,"Error":"","StartedAt":"2015-11-10T21:42:40.604267436Z","FinishedAt":"2015-11-10T21:42:41.869265487Z"},"ID":"f780ee3f80e66e9b432a57049597118a66aab8932be88e5628d4c824edbee37c","Created":"2015-11-10T21:42:40.433831551Z","Path":"sh","Args":[],"Config":{"Hostname":"f780ee3f80e6","Domainname":"","User":"","AttachStdin":true,"AttachStdout":true,"AttachStderr":true,"Tty":true,"OpenStdin":true,"StdinOnce":true,"Env":null,"Cmd":["sh"],"Image":"busybox","Volumes":null,"WorkingDir":"","Entrypoint":null,"OnBuild":null,"Labels":{}},"Image":"2c5ac3f849df8627fcf2822727f87c57f38b7129d3604fbc11d861fe856ff093","NetworkSettings":{"Bridge":"","EndpointID":"","Gateway":"","GlobalIPv6Address":"","GlobalIPv6PrefixLen":0,"HairpinMode":false,"IPAddress":"","IPPrefixLen":0,"IPv6Gateway":"","LinkLocalIPv6Address":"","LinkLocalIPv6PrefixLen":0,"MacAddress":"","NetworkID":"","PortMapping":null,"Ports":null,"SandboxKey":"","SecondaryIPAddresses":null,"SecondaryIPv6Addresses":null},"ResolvConfPath":"/var/lib/docker/containers/f780ee3f80e66e9b432a57049597118a66aab8932be88e5628d4c824edbee37c/resolv.conf","HostnamePath":"/var/lib/docker/containers/f780ee3f80e66e9b432a57049597118a66aab8932be88e5628d4c824edbee37c/hostname","HostsPath":"/var/lib/docker/containers/f780ee3f80e66e9b432a57049597118a66aab8932be88e5628d4c824edbee37c/hosts","LogPath":"/var/lib/docker/containers/f780ee3f80e66e9b432a57049597118a66aab8932be88e5628d4c824edbee37c/f780ee3f80e66e9b432a57049597118a66aab8932be88e5628d4c824edbee37c-json.log","Name":"/determined_euclid","Driver":"overlay","ExecDriver":"native-0.2","MountLabel":"","ProcessLabel":"","RestartCount":0,"UpdateDns":false,"HasBeenStartedBefore":false,"MountPoints":{},"Volumes":{},"VolumesRW":{},"AppArmorProfile":""}`) + if err != nil { + t.Fatal(err) + } + + // container with invalid image + err = addContainer(tmpdir, `{"State":{"Running":false,"Paused":false,"Restarting":false,"OOMKilled":false,"Dead":false,"Pid":0,"ExitCode":0,"Error":"","StartedAt":"2015-11-10T21:42:40.604267436Z","FinishedAt":"2015-11-10T21:42:41.869265487Z"},"ID":"e780ee3f80e66e9b432a57049597118a66aab8932be88e5628d4c824edbee37c","Created":"2015-11-10T21:42:40.433831551Z","Path":"sh","Args":[],"Config":{"Hostname":"f780ee3f80e6","Domainname":"","User":"","AttachStdin":true,"AttachStdout":true,"AttachStderr":true,"Tty":true,"OpenStdin":true,"StdinOnce":true,"Env":null,"Cmd":["sh"],"Image":"busybox","Volumes":null,"WorkingDir":"","Entrypoint":null,"OnBuild":null,"Labels":{}},"Image":"4c5ac3f849df8627fcf2822727f87c57f38b7129d3604fbc11d861fe856ff093","NetworkSettings":{"Bridge":"","EndpointID":"","Gateway":"","GlobalIPv6Address":"","GlobalIPv6PrefixLen":0,"HairpinMode":false,"IPAddress":"","IPPrefixLen":0,"IPv6Gateway":"","LinkLocalIPv6Address":"","LinkLocalIPv6PrefixLen":0,"MacAddress":"","NetworkID":"","PortMapping":null,"Ports":null,"SandboxKey":"","SecondaryIPAddresses":null,"SecondaryIPv6Addresses":null},"ResolvConfPath":"/var/lib/docker/containers/f780ee3f80e66e9b432a57049597118a66aab8932be88e5628d4c824edbee37c/resolv.conf","HostnamePath":"/var/lib/docker/containers/f780ee3f80e66e9b432a57049597118a66aab8932be88e5628d4c824edbee37c/hostname","HostsPath":"/var/lib/docker/containers/f780ee3f80e66e9b432a57049597118a66aab8932be88e5628d4c824edbee37c/hosts","LogPath":"/var/lib/docker/containers/f780ee3f80e66e9b432a57049597118a66aab8932be88e5628d4c824edbee37c/f780ee3f80e66e9b432a57049597118a66aab8932be88e5628d4c824edbee37c-json.log","Name":"/determined_euclid","Driver":"overlay","ExecDriver":"native-0.2","MountLabel":"","ProcessLabel":"","RestartCount":0,"UpdateDns":false,"HasBeenStartedBefore":false,"MountPoints":{},"Volumes":{},"VolumesRW":{},"AppArmorProfile":""}`) + if err != nil { + t.Fatal(err) + } + + ls := &mockMounter{} + + ifs, err := image.NewFSStoreBackend(filepath.Join(tmpdir, "imagedb")) + if err != nil { + t.Fatal(err) + } + + is, err := image.NewImageStore(ifs, runtime.GOOS, ls) + if err != nil { + t.Fatal(err) + } + + imgID, err := is.Create([]byte(`{"architecture":"amd64","config":{"AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"Cmd":["sh"],"Entrypoint":null,"Env":null,"Hostname":"23304fc829f9","Image":"d1592a710ac323612bd786fa8ac20727c58d8a67847e5a65177c594f43919498","Labels":null,"OnBuild":null,"OpenStdin":false,"StdinOnce":false,"Tty":false,"Volumes":null,"WorkingDir":"","Domainname":"","User":""},"container":"349b014153779e30093d94f6df2a43c7a0a164e05aa207389917b540add39b51","container_config":{"AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"Cmd":["/bin/sh","-c","#(nop) CMD [\"sh\"]"],"Entrypoint":null,"Env":null,"Hostname":"23304fc829f9","Image":"d1592a710ac323612bd786fa8ac20727c58d8a67847e5a65177c594f43919498","Labels":null,"OnBuild":null,"OpenStdin":false,"StdinOnce":false,"Tty":false,"Volumes":null,"WorkingDir":"","Domainname":"","User":""},"created":"2015-10-31T22:22:55.613815829Z","docker_version":"1.8.2","history":[{"created":"2015-10-31T22:22:54.690851953Z","created_by":"/bin/sh -c #(nop) ADD file:a3bc1e842b69636f9df5256c49c5374fb4eef1e281fe3f282c65fb853ee171c5 in /"},{"created":"2015-10-31T22:22:55.613815829Z","created_by":"/bin/sh -c #(nop) CMD [\"sh\"]"}],"os":"linux","rootfs":{"type":"layers","diff_ids":["sha256:c6f988f4874bb0add23a778f753c65efe992244e148a1d2ec2a8b664fb66bbd1","sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef"]}}`)) + if err != nil { + t.Fatal(err) + } + + err = migrateContainers(tmpdir, ls, is, map[string]image.ID{ + "2c5ac3f849df8627fcf2822727f87c57f38b7129d3604fbc11d861fe856ff093": imgID, + }) + if err != nil { + t.Fatal(err) + } + + expected := []mountInfo{{ + "f780ee3f80e66e9b432a57049597118a66aab8932be88e5628d4c824edbee37c", + "f780ee3f80e66e9b432a57049597118a66aab8932be88e5628d4c824edbee37c", + "sha256:c3191d32a37d7159b2e30830937d2e30268ad6c375a773a8994911a3aba9b93f", + }} + if !reflect.DeepEqual(expected, ls.mounts) { + t.Fatalf("invalid mounts: expected %q, got %q", expected, ls.mounts) + } + + if actual, expected := ls.count, 0; actual != expected { + t.Fatalf("invalid active mounts: expected %d, got %d", expected, actual) + } + + config2, err := ioutil.ReadFile(filepath.Join(tmpdir, "containers", "f780ee3f80e66e9b432a57049597118a66aab8932be88e5628d4c824edbee37c", "config.v2.json")) + if err != nil { + t.Fatal(err) + } + var config struct{ Image string } + err = json.Unmarshal(config2, &config) + if err != nil { + t.Fatal(err) + } + + if actual, expected := config.Image, string(imgID); actual != expected { + t.Fatalf("invalid image pointer in migrated config: expected %q, got %q", expected, actual) + } + +} + +func TestMigrateImages(t *testing.T) { + // TODO Windows: Figure out why this is failing + if runtime.GOOS == "windows" { + t.Skip("Failing on Windows") + } + if runtime.GOARCH != "amd64" { + t.Skip("Test tailored to amd64 architecture") + } + tmpdir, err := ioutil.TempDir("", "migrate-images") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + + // busybox from 1.9 + id1, err := addImage(tmpdir, `{"architecture":"amd64","config":{"Hostname":"23304fc829f9","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":null,"Cmd":null,"Image":"","Volumes":null,"WorkingDir":"","Entrypoint":null,"OnBuild":null,"Labels":null},"container":"23304fc829f9b9349416f6eb1afec162907eba3a328f51d53a17f8986f865d65","container_config":{"Hostname":"23304fc829f9","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":null,"Cmd":["/bin/sh","-c","#(nop) ADD file:a3bc1e842b69636f9df5256c49c5374fb4eef1e281fe3f282c65fb853ee171c5 in /"],"Image":"","Volumes":null,"WorkingDir":"","Entrypoint":null,"OnBuild":null,"Labels":null},"created":"2015-10-31T22:22:54.690851953Z","docker_version":"1.8.2","layer_id":"sha256:55dc925c23d1ed82551fd018c27ac3ee731377b6bad3963a2a4e76e753d70e57","os":"linux"}`, "", "") + if err != nil { + t.Fatal(err) + } + + id2, err := addImage(tmpdir, `{"architecture":"amd64","config":{"Hostname":"23304fc829f9","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":null,"Cmd":["sh"],"Image":"d1592a710ac323612bd786fa8ac20727c58d8a67847e5a65177c594f43919498","Volumes":null,"WorkingDir":"","Entrypoint":null,"OnBuild":null,"Labels":null},"container":"349b014153779e30093d94f6df2a43c7a0a164e05aa207389917b540add39b51","container_config":{"Hostname":"23304fc829f9","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":null,"Cmd":["/bin/sh","-c","#(nop) CMD [\"sh\"]"],"Image":"d1592a710ac323612bd786fa8ac20727c58d8a67847e5a65177c594f43919498","Volumes":null,"WorkingDir":"","Entrypoint":null,"OnBuild":null,"Labels":null},"created":"2015-10-31T22:22:55.613815829Z","docker_version":"1.8.2","layer_id":"sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4","os":"linux","parent_id":"sha256:039b63dd2cbaa10d6015ea574392530571ed8d7b174090f032211285a71881d0"}`, id1, "") + if err != nil { + t.Fatal(err) + } + + ls := &mockRegistrar{} + + ifs, err := image.NewFSStoreBackend(filepath.Join(tmpdir, "imagedb")) + if err != nil { + t.Fatal(err) + } + + is, err := image.NewImageStore(ifs, runtime.GOOS, ls) + if err != nil { + t.Fatal(err) + } + + ms, err := metadata.NewFSMetadataStore(filepath.Join(tmpdir, "distribution"), runtime.GOOS) + if err != nil { + t.Fatal(err) + } + mappings := make(map[string]image.ID) + + err = migrateImages(tmpdir, ls, is, ms, mappings) + if err != nil { + t.Fatal(err) + } + + expected := map[string]image.ID{ + id1: image.ID("sha256:ca406eaf9c26898414ff5b7b3a023c33310759d6203be0663dbf1b3a712f432d"), + id2: image.ID("sha256:a488bec94bb96b26a968f913d25ef7d8d204d727ca328b52b4b059c7d03260b6"), + } + + if !reflect.DeepEqual(mappings, expected) { + t.Fatalf("invalid image mappings: expected %q, got %q", expected, mappings) + } + + if actual, expected := ls.count, 2; actual != expected { + t.Fatalf("invalid register count: expected %q, got %q", expected, actual) + } + ls.count = 0 + + // next images are busybox from 1.8.2 + _, err = addImage(tmpdir, `{"id":"17583c7dd0dae6244203b8029733bdb7d17fccbb2b5d93e2b24cf48b8bfd06e2","parent":"d1592a710ac323612bd786fa8ac20727c58d8a67847e5a65177c594f43919498","created":"2015-10-31T22:22:55.613815829Z","container":"349b014153779e30093d94f6df2a43c7a0a164e05aa207389917b540add39b51","container_config":{"Hostname":"23304fc829f9","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"ExposedPorts":null,"PublishService":"","Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":null,"Cmd":["/bin/sh","-c","#(nop) CMD [\"sh\"]"],"Image":"d1592a710ac323612bd786fa8ac20727c58d8a67847e5a65177c594f43919498","Volumes":null,"VolumeDriver":"","WorkingDir":"","Entrypoint":null,"NetworkDisabled":false,"MacAddress":"","OnBuild":null,"Labels":null},"docker_version":"1.8.2","config":{"Hostname":"23304fc829f9","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"ExposedPorts":null,"PublishService":"","Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":null,"Cmd":["sh"],"Image":"d1592a710ac323612bd786fa8ac20727c58d8a67847e5a65177c594f43919498","Volumes":null,"VolumeDriver":"","WorkingDir":"","Entrypoint":null,"NetworkDisabled":false,"MacAddress":"","OnBuild":null,"Labels":null},"architecture":"amd64","os":"linux","Size":0}`, "", "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4") + if err != nil { + t.Fatal(err) + } + + _, err = addImage(tmpdir, `{"id":"d1592a710ac323612bd786fa8ac20727c58d8a67847e5a65177c594f43919498","created":"2015-10-31T22:22:54.690851953Z","container":"23304fc829f9b9349416f6eb1afec162907eba3a328f51d53a17f8986f865d65","container_config":{"Hostname":"23304fc829f9","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"ExposedPorts":null,"PublishService":"","Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":null,"Cmd":["/bin/sh","-c","#(nop) ADD file:a3bc1e842b69636f9df5256c49c5374fb4eef1e281fe3f282c65fb853ee171c5 in /"],"Image":"","Volumes":null,"VolumeDriver":"","WorkingDir":"","Entrypoint":null,"NetworkDisabled":false,"MacAddress":"","OnBuild":null,"Labels":null},"docker_version":"1.8.2","config":{"Hostname":"23304fc829f9","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"ExposedPorts":null,"PublishService":"","Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":null,"Cmd":null,"Image":"","Volumes":null,"VolumeDriver":"","WorkingDir":"","Entrypoint":null,"NetworkDisabled":false,"MacAddress":"","OnBuild":null,"Labels":null},"architecture":"amd64","os":"linux","Size":1108935}`, "", "sha256:55dc925c23d1ed82551fd018c27ac3ee731377b6bad3963a2a4e76e753d70e57") + if err != nil { + t.Fatal(err) + } + + err = migrateImages(tmpdir, ls, is, ms, mappings) + if err != nil { + t.Fatal(err) + } + + expected["d1592a710ac323612bd786fa8ac20727c58d8a67847e5a65177c594f43919498"] = image.ID("sha256:c091bb33854e57e6902b74c08719856d30b5593c7db6143b2b48376b8a588395") + expected["17583c7dd0dae6244203b8029733bdb7d17fccbb2b5d93e2b24cf48b8bfd06e2"] = image.ID("sha256:d963020e755ff2715b936065949472c1f8a6300144b922992a1a421999e71f07") + + if actual, expected := ls.count, 2; actual != expected { + t.Fatalf("invalid register count: expected %q, got %q", expected, actual) + } + + v2MetadataService := metadata.NewV2MetadataService(ms) + receivedMetadata, err := v2MetadataService.GetMetadata(layer.EmptyLayer.DiffID()) + if err != nil { + t.Fatal(err) + } + + expectedMetadata := []metadata.V2Metadata{ + {Digest: digest.Digest("sha256:55dc925c23d1ed82551fd018c27ac3ee731377b6bad3963a2a4e76e753d70e57")}, + {Digest: digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")}, + } + + if !reflect.DeepEqual(expectedMetadata, receivedMetadata) { + t.Fatalf("invalid metadata: expected %q, got %q", expectedMetadata, receivedMetadata) + } + +} + +func TestMigrateUnsupported(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "migrate-empty") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + + err = os.MkdirAll(filepath.Join(tmpdir, "graph"), 0700) + if err != nil { + t.Fatal(err) + } + + err = Migrate(tmpdir, "generic", nil, nil, nil, nil) + if err != errUnsupported { + t.Fatalf("expected unsupported error, got %q", err) + } +} + +func TestMigrateEmptyDir(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "migrate-empty") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + + err = Migrate(tmpdir, "generic", nil, nil, nil, nil) + if err != nil { + t.Fatal(err) + } +} + +func addImage(dest, jsonConfig, parent, checksum string) (string, error) { + var config struct{ ID string } + if err := json.Unmarshal([]byte(jsonConfig), &config); err != nil { + return "", err + } + if config.ID == "" { + b := make([]byte, 32) + rand.Read(b) + config.ID = hex.EncodeToString(b) + } + contDir := filepath.Join(dest, "graph", config.ID) + if err := os.MkdirAll(contDir, 0700); err != nil { + return "", err + } + if err := ioutil.WriteFile(filepath.Join(contDir, "json"), []byte(jsonConfig), 0600); err != nil { + return "", err + } + if checksum != "" { + if err := ioutil.WriteFile(filepath.Join(contDir, "checksum"), []byte(checksum), 0600); err != nil { + return "", err + } + } + if err := ioutil.WriteFile(filepath.Join(contDir, ".migration-diffid"), []byte(layer.EmptyLayer.DiffID()), 0600); err != nil { + return "", err + } + if err := ioutil.WriteFile(filepath.Join(contDir, ".migration-size"), []byte("0"), 0600); err != nil { + return "", err + } + if parent != "" { + if err := ioutil.WriteFile(filepath.Join(contDir, "parent"), []byte(parent), 0600); err != nil { + return "", err + } + } + if checksum != "" { + if err := ioutil.WriteFile(filepath.Join(contDir, "checksum"), []byte(checksum), 0600); err != nil { + return "", err + } + } + return config.ID, nil +} + +func addContainer(dest, jsonConfig string) error { + var config struct{ ID string } + if err := json.Unmarshal([]byte(jsonConfig), &config); err != nil { + return err + } + contDir := filepath.Join(dest, "containers", config.ID) + if err := os.MkdirAll(contDir, 0700); err != nil { + return err + } + if err := ioutil.WriteFile(filepath.Join(contDir, "config.json"), []byte(jsonConfig), 0600); err != nil { + return err + } + return nil +} + +type mockTagAdder struct { + refs map[string]string +} + +func (t *mockTagAdder) AddTag(ref reference.Named, id digest.Digest, force bool) error { + if t.refs == nil { + t.refs = make(map[string]string) + } + t.refs[ref.String()] = id.String() + return nil +} +func (t *mockTagAdder) AddDigest(ref reference.Canonical, id digest.Digest, force bool) error { + return t.AddTag(ref, id, force) +} + +type mockRegistrar struct { + layers map[layer.ChainID]*mockLayer + count int +} + +func (r *mockRegistrar) RegisterByGraphID(graphID string, parent layer.ChainID, diffID layer.DiffID, tarDataFile string, size int64) (layer.Layer, error) { + r.count++ + l := &mockLayer{} + if parent != "" { + p, exists := r.layers[parent] + if !exists { + return nil, fmt.Errorf("invalid parent %q", parent) + } + l.parent = p + l.diffIDs = append(l.diffIDs, p.diffIDs...) + } + l.diffIDs = append(l.diffIDs, diffID) + if r.layers == nil { + r.layers = make(map[layer.ChainID]*mockLayer) + } + r.layers[l.ChainID()] = l + return l, nil +} +func (r *mockRegistrar) Release(l layer.Layer) ([]layer.Metadata, error) { + return nil, nil +} +func (r *mockRegistrar) Get(layer.ChainID) (layer.Layer, error) { + return nil, nil +} + +type mountInfo struct { + name, graphID, parent string +} +type mockMounter struct { + mounts []mountInfo + count int +} + +func (r *mockMounter) CreateRWLayerByGraphID(name string, graphID string, parent layer.ChainID) error { + r.mounts = append(r.mounts, mountInfo{name, graphID, string(parent)}) + return nil +} +func (r *mockMounter) Unmount(string) error { + r.count-- + return nil +} +func (r *mockMounter) Get(layer.ChainID) (layer.Layer, error) { + return nil, nil +} + +func (r *mockMounter) Release(layer.Layer) ([]layer.Metadata, error) { + return nil, nil +} + +type mockLayer struct { + diffIDs []layer.DiffID + parent *mockLayer +} + +func (l *mockLayer) TarStream() (io.ReadCloser, error) { + return nil, nil +} +func (l *mockLayer) TarStreamFrom(layer.ChainID) (io.ReadCloser, error) { + return nil, nil +} + +func (l *mockLayer) ChainID() layer.ChainID { + return layer.CreateChainID(l.diffIDs) +} + +func (l *mockLayer) DiffID() layer.DiffID { + return l.diffIDs[len(l.diffIDs)-1] +} + +func (l *mockLayer) Parent() layer.Layer { + if l.parent == nil { + return nil + } + return l.parent +} + +func (l *mockLayer) Size() (int64, error) { + return 0, nil +} + +func (l *mockLayer) DiffSize() (int64, error) { + return 0, nil +} + +func (l *mockLayer) Platform() layer.Platform { + return "" +} + +func (l *mockLayer) Metadata() (map[string]string, error) { + return nil, nil +} diff --git a/vendor/github.com/moby/moby/oci/defaults.go b/vendor/github.com/moby/moby/oci/defaults.go new file mode 100644 index 000000000..083726e12 --- /dev/null +++ b/vendor/github.com/moby/moby/oci/defaults.go @@ -0,0 +1,221 @@ +package oci + +import ( + "os" + "runtime" + + "github.com/opencontainers/runtime-spec/specs-go" +) + +func iPtr(i int64) *int64 { return &i } +func u32Ptr(i int64) *uint32 { u := uint32(i); return &u } +func fmPtr(i int64) *os.FileMode { fm := os.FileMode(i); return &fm } + +func defaultCapabilities() []string { + return []string{ + "CAP_CHOWN", + "CAP_DAC_OVERRIDE", + "CAP_FSETID", + "CAP_FOWNER", + "CAP_MKNOD", + "CAP_NET_RAW", + "CAP_SETGID", + "CAP_SETUID", + "CAP_SETFCAP", + "CAP_SETPCAP", + "CAP_NET_BIND_SERVICE", + "CAP_SYS_CHROOT", + "CAP_KILL", + "CAP_AUDIT_WRITE", + } +} + +// DefaultSpec returns the default spec used by docker for the current Platform +func DefaultSpec() specs.Spec { + return DefaultOSSpec(runtime.GOOS) +} + +// DefaultOSSpec returns the spec for a given OS +func DefaultOSSpec(osName string) specs.Spec { + if osName == "windows" { + return DefaultWindowsSpec() + } else if osName == "solaris" { + return DefaultSolarisSpec() + } else { + return DefaultLinuxSpec() + } +} + +// DefaultWindowsSpec create a default spec for running Windows containers +func DefaultWindowsSpec() specs.Spec { + return specs.Spec{ + Version: specs.Version, + Platform: specs.Platform{ + OS: runtime.GOOS, + Arch: runtime.GOARCH, + }, + Windows: &specs.Windows{}, + } +} + +// DefaultSolarisSpec create a default spec for running Solaris containers +func DefaultSolarisSpec() specs.Spec { + s := specs.Spec{ + Version: "0.6.0", + Platform: specs.Platform{ + OS: "SunOS", + Arch: runtime.GOARCH, + }, + } + s.Solaris = &specs.Solaris{} + return s +} + +// DefaultLinuxSpec create a default spec for running Linux containers +func DefaultLinuxSpec() specs.Spec { + s := specs.Spec{ + Version: specs.Version, + Platform: specs.Platform{ + OS: "linux", + Arch: runtime.GOARCH, + }, + } + s.Mounts = []specs.Mount{ + { + Destination: "/proc", + Type: "proc", + Source: "proc", + Options: []string{"nosuid", "noexec", "nodev"}, + }, + { + Destination: "/dev", + Type: "tmpfs", + Source: "tmpfs", + Options: []string{"nosuid", "strictatime", "mode=755", "size=65536k"}, + }, + { + Destination: "/dev/pts", + Type: "devpts", + Source: "devpts", + Options: []string{"nosuid", "noexec", "newinstance", "ptmxmode=0666", "mode=0620", "gid=5"}, + }, + { + Destination: "/sys", + Type: "sysfs", + Source: "sysfs", + Options: []string{"nosuid", "noexec", "nodev", "ro"}, + }, + { + Destination: "/sys/fs/cgroup", + Type: "cgroup", + Source: "cgroup", + Options: []string{"ro", "nosuid", "noexec", "nodev"}, + }, + { + Destination: "/dev/mqueue", + Type: "mqueue", + Source: "mqueue", + Options: []string{"nosuid", "noexec", "nodev"}, + }, + } + s.Process.Capabilities = &specs.LinuxCapabilities{ + Bounding: defaultCapabilities(), + Permitted: defaultCapabilities(), + Inheritable: defaultCapabilities(), + Effective: defaultCapabilities(), + } + + s.Linux = &specs.Linux{ + MaskedPaths: []string{ + "/proc/kcore", + "/proc/latency_stats", + "/proc/timer_list", + "/proc/timer_stats", + "/proc/sched_debug", + }, + ReadonlyPaths: []string{ + "/proc/asound", + "/proc/bus", + "/proc/fs", + "/proc/irq", + "/proc/sys", + "/proc/sysrq-trigger", + }, + Namespaces: []specs.LinuxNamespace{ + {Type: "mount"}, + {Type: "network"}, + {Type: "uts"}, + {Type: "pid"}, + {Type: "ipc"}, + }, + // Devices implicitly contains the following devices: + // null, zero, full, random, urandom, tty, console, and ptmx. + // ptmx is a bind-mount or symlink of the container's ptmx. + // See also: https://github.com/opencontainers/runtime-spec/blob/master/config-linux.md#default-devices + Devices: []specs.LinuxDevice{}, + Resources: &specs.LinuxResources{ + Devices: []specs.LinuxDeviceCgroup{ + { + Allow: false, + Access: "rwm", + }, + { + Allow: true, + Type: "c", + Major: iPtr(1), + Minor: iPtr(5), + Access: "rwm", + }, + { + Allow: true, + Type: "c", + Major: iPtr(1), + Minor: iPtr(3), + Access: "rwm", + }, + { + Allow: true, + Type: "c", + Major: iPtr(1), + Minor: iPtr(9), + Access: "rwm", + }, + { + Allow: true, + Type: "c", + Major: iPtr(1), + Minor: iPtr(8), + Access: "rwm", + }, + { + Allow: true, + Type: "c", + Major: iPtr(5), + Minor: iPtr(0), + Access: "rwm", + }, + { + Allow: true, + Type: "c", + Major: iPtr(5), + Minor: iPtr(1), + Access: "rwm", + }, + { + Allow: false, + Type: "c", + Major: iPtr(10), + Minor: iPtr(229), + Access: "rwm", + }, + }, + }, + } + + // For LCOW support, don't mask /sys/firmware + if runtime.GOOS != "windows" { + s.Linux.MaskedPaths = append(s.Linux.MaskedPaths, "/sys/firmware") + } + + return s +} diff --git a/vendor/github.com/moby/moby/oci/devices_linux.go b/vendor/github.com/moby/moby/oci/devices_linux.go new file mode 100644 index 000000000..fa9c72698 --- /dev/null +++ b/vendor/github.com/moby/moby/oci/devices_linux.go @@ -0,0 +1,86 @@ +package oci + +import ( + "fmt" + "os" + "path/filepath" + "strings" + + "github.com/opencontainers/runc/libcontainer/configs" + "github.com/opencontainers/runc/libcontainer/devices" + specs "github.com/opencontainers/runtime-spec/specs-go" +) + +// Device transforms a libcontainer configs.Device to a specs.LinuxDevice object. +func Device(d *configs.Device) specs.LinuxDevice { + return specs.LinuxDevice{ + Type: string(d.Type), + Path: d.Path, + Major: d.Major, + Minor: d.Minor, + FileMode: fmPtr(int64(d.FileMode)), + UID: u32Ptr(int64(d.Uid)), + GID: u32Ptr(int64(d.Gid)), + } +} + +func deviceCgroup(d *configs.Device) specs.LinuxDeviceCgroup { + t := string(d.Type) + return specs.LinuxDeviceCgroup{ + Allow: true, + Type: t, + Major: &d.Major, + Minor: &d.Minor, + Access: d.Permissions, + } +} + +// DevicesFromPath computes a list of devices and device permissions from paths (pathOnHost and pathInContainer) and cgroup permissions. +func DevicesFromPath(pathOnHost, pathInContainer, cgroupPermissions string) (devs []specs.LinuxDevice, devPermissions []specs.LinuxDeviceCgroup, err error) { + resolvedPathOnHost := pathOnHost + + // check if it is a symbolic link + if src, e := os.Lstat(pathOnHost); e == nil && src.Mode()&os.ModeSymlink == os.ModeSymlink { + if linkedPathOnHost, e := filepath.EvalSymlinks(pathOnHost); e == nil { + resolvedPathOnHost = linkedPathOnHost + } + } + + device, err := devices.DeviceFromPath(resolvedPathOnHost, cgroupPermissions) + // if there was no error, return the device + if err == nil { + device.Path = pathInContainer + return append(devs, Device(device)), append(devPermissions, deviceCgroup(device)), nil + } + + // if the device is not a device node + // try to see if it's a directory holding many devices + if err == devices.ErrNotADevice { + + // check if it is a directory + if src, e := os.Stat(resolvedPathOnHost); e == nil && src.IsDir() { + + // mount the internal devices recursively + filepath.Walk(resolvedPathOnHost, func(dpath string, f os.FileInfo, e error) error { + childDevice, e := devices.DeviceFromPath(dpath, cgroupPermissions) + if e != nil { + // ignore the device + return nil + } + + // add the device to userSpecified devices + childDevice.Path = strings.Replace(dpath, resolvedPathOnHost, pathInContainer, 1) + devs = append(devs, Device(childDevice)) + devPermissions = append(devPermissions, deviceCgroup(childDevice)) + + return nil + }) + } + } + + if len(devs) > 0 { + return devs, devPermissions, nil + } + + return devs, devPermissions, fmt.Errorf("error gathering device information while adding custom device %q: %s", pathOnHost, err) +} diff --git a/vendor/github.com/moby/moby/oci/devices_unsupported.go b/vendor/github.com/moby/moby/oci/devices_unsupported.go new file mode 100644 index 000000000..b5d3fab59 --- /dev/null +++ b/vendor/github.com/moby/moby/oci/devices_unsupported.go @@ -0,0 +1,20 @@ +// +build !linux + +package oci + +import ( + "errors" + + "github.com/opencontainers/runc/libcontainer/configs" + specs "github.com/opencontainers/runtime-spec/specs-go" +) + +// Device transforms a libcontainer configs.Device to a specs.Device object. +// Not implemented +func Device(d *configs.Device) specs.LinuxDevice { return specs.LinuxDevice{} } + +// DevicesFromPath computes a list of devices and device permissions from paths (pathOnHost and pathInContainer) and cgroup permissions. +// Not implemented +func DevicesFromPath(pathOnHost, pathInContainer, cgroupPermissions string) (devs []specs.LinuxDevice, devPermissions []specs.LinuxDeviceCgroup, err error) { + return nil, nil, errors.New("oci/devices: unsupported platform") +} diff --git a/vendor/github.com/moby/moby/oci/namespaces.go b/vendor/github.com/moby/moby/oci/namespaces.go new file mode 100644 index 000000000..cb222dcee --- /dev/null +++ b/vendor/github.com/moby/moby/oci/namespaces.go @@ -0,0 +1,13 @@ +package oci + +import specs "github.com/opencontainers/runtime-spec/specs-go" + +// RemoveNamespace removes the `nsType` namespace from OCI spec `s` +func RemoveNamespace(s *specs.Spec, nsType specs.LinuxNamespaceType) { + for i, n := range s.Linux.Namespaces { + if n.Type == nsType { + s.Linux.Namespaces = append(s.Linux.Namespaces[:i], s.Linux.Namespaces[i+1:]...) + return + } + } +} diff --git a/vendor/github.com/moby/moby/opts/env.go b/vendor/github.com/moby/moby/opts/env.go new file mode 100644 index 000000000..e6ddd7330 --- /dev/null +++ b/vendor/github.com/moby/moby/opts/env.go @@ -0,0 +1,46 @@ +package opts + +import ( + "fmt" + "os" + "runtime" + "strings" +) + +// ValidateEnv validates an environment variable and returns it. +// If no value is specified, it returns the current value using os.Getenv. +// +// As on ParseEnvFile and related to #16585, environment variable names +// are not validate what so ever, it's up to application inside docker +// to validate them or not. +// +// The only validation here is to check if name is empty, per #25099 +func ValidateEnv(val string) (string, error) { + arr := strings.Split(val, "=") + if arr[0] == "" { + return "", fmt.Errorf("invalid environment variable: %s", val) + } + if len(arr) > 1 { + return val, nil + } + if !doesEnvExist(val) { + return val, nil + } + return fmt.Sprintf("%s=%s", val, os.Getenv(val)), nil +} + +func doesEnvExist(name string) bool { + for _, entry := range os.Environ() { + parts := strings.SplitN(entry, "=", 2) + if runtime.GOOS == "windows" { + // Environment variable are case-insensitive on Windows. PaTh, path and PATH are equivalent. + if strings.EqualFold(parts[0], name) { + return true + } + } + if parts[0] == name { + return true + } + } + return false +} diff --git a/vendor/github.com/moby/moby/opts/env_test.go b/vendor/github.com/moby/moby/opts/env_test.go new file mode 100644 index 000000000..6f6c7a7a2 --- /dev/null +++ b/vendor/github.com/moby/moby/opts/env_test.go @@ -0,0 +1,42 @@ +package opts + +import ( + "fmt" + "os" + "runtime" + "testing" +) + +func TestValidateEnv(t *testing.T) { + valids := map[string]string{ + "a": "a", + "something": "something", + "_=a": "_=a", + "env1=value1": "env1=value1", + "_env1=value1": "_env1=value1", + "env2=value2=value3": "env2=value2=value3", + "env3=abc!qwe": "env3=abc!qwe", + "env_4=value 4": "env_4=value 4", + "PATH": fmt.Sprintf("PATH=%v", os.Getenv("PATH")), + "PATH=something": "PATH=something", + "asd!qwe": "asd!qwe", + "1asd": "1asd", + "123": "123", + "some space": "some space", + " some space before": " some space before", + "some space after ": "some space after ", + } + // Environment variables are case in-sensitive on Windows + if runtime.GOOS == "windows" { + valids["PaTh"] = fmt.Sprintf("PaTh=%v", os.Getenv("PATH")) + } + for value, expected := range valids { + actual, err := ValidateEnv(value) + if err != nil { + t.Fatal(err) + } + if actual != expected { + t.Fatalf("Expected [%v], got [%v]", expected, actual) + } + } +} diff --git a/vendor/github.com/moby/moby/opts/hosts.go b/vendor/github.com/moby/moby/opts/hosts.go new file mode 100644 index 000000000..594cccf2f --- /dev/null +++ b/vendor/github.com/moby/moby/opts/hosts.go @@ -0,0 +1,165 @@ +package opts + +import ( + "fmt" + "net" + "net/url" + "strconv" + "strings" +) + +var ( + // DefaultHTTPPort Default HTTP Port used if only the protocol is provided to -H flag e.g. dockerd -H tcp:// + // These are the IANA registered port numbers for use with Docker + // see http://www.iana.org/assignments/service-names-port-numbers/service-names-port-numbers.xhtml?search=docker + DefaultHTTPPort = 2375 // Default HTTP Port + // DefaultTLSHTTPPort Default HTTP Port used when TLS enabled + DefaultTLSHTTPPort = 2376 // Default TLS encrypted HTTP Port + // DefaultUnixSocket Path for the unix socket. + // Docker daemon by default always listens on the default unix socket + DefaultUnixSocket = "/var/run/docker.sock" + // DefaultTCPHost constant defines the default host string used by docker on Windows + DefaultTCPHost = fmt.Sprintf("tcp://%s:%d", DefaultHTTPHost, DefaultHTTPPort) + // DefaultTLSHost constant defines the default host string used by docker for TLS sockets + DefaultTLSHost = fmt.Sprintf("tcp://%s:%d", DefaultHTTPHost, DefaultTLSHTTPPort) + // DefaultNamedPipe defines the default named pipe used by docker on Windows + DefaultNamedPipe = `//./pipe/docker_engine` +) + +// ValidateHost validates that the specified string is a valid host and returns it. +func ValidateHost(val string) (string, error) { + host := strings.TrimSpace(val) + // The empty string means default and is not handled by parseDockerDaemonHost + if host != "" { + _, err := parseDockerDaemonHost(host) + if err != nil { + return val, err + } + } + // Note: unlike most flag validators, we don't return the mutated value here + // we need to know what the user entered later (using ParseHost) to adjust for TLS + return val, nil +} + +// ParseHost and set defaults for a Daemon host string +func ParseHost(defaultToTLS bool, val string) (string, error) { + host := strings.TrimSpace(val) + if host == "" { + if defaultToTLS { + host = DefaultTLSHost + } else { + host = DefaultHost + } + } else { + var err error + host, err = parseDockerDaemonHost(host) + if err != nil { + return val, err + } + } + return host, nil +} + +// parseDockerDaemonHost parses the specified address and returns an address that will be used as the host. +// Depending of the address specified, this may return one of the global Default* strings defined in hosts.go. +func parseDockerDaemonHost(addr string) (string, error) { + addrParts := strings.SplitN(addr, "://", 2) + if len(addrParts) == 1 && addrParts[0] != "" { + addrParts = []string{"tcp", addrParts[0]} + } + + switch addrParts[0] { + case "tcp": + return ParseTCPAddr(addrParts[1], DefaultTCPHost) + case "unix": + return parseSimpleProtoAddr("unix", addrParts[1], DefaultUnixSocket) + case "npipe": + return parseSimpleProtoAddr("npipe", addrParts[1], DefaultNamedPipe) + case "fd": + return addr, nil + default: + return "", fmt.Errorf("Invalid bind address format: %s", addr) + } +} + +// parseSimpleProtoAddr parses and validates that the specified address is a valid +// socket address for simple protocols like unix and npipe. It returns a formatted +// socket address, either using the address parsed from addr, or the contents of +// defaultAddr if addr is a blank string. +func parseSimpleProtoAddr(proto, addr, defaultAddr string) (string, error) { + addr = strings.TrimPrefix(addr, proto+"://") + if strings.Contains(addr, "://") { + return "", fmt.Errorf("Invalid proto, expected %s: %s", proto, addr) + } + if addr == "" { + addr = defaultAddr + } + return fmt.Sprintf("%s://%s", proto, addr), nil +} + +// ParseTCPAddr parses and validates that the specified address is a valid TCP +// address. It returns a formatted TCP address, either using the address parsed +// from tryAddr, or the contents of defaultAddr if tryAddr is a blank string. +// tryAddr is expected to have already been Trim()'d +// defaultAddr must be in the full `tcp://host:port` form +func ParseTCPAddr(tryAddr string, defaultAddr string) (string, error) { + if tryAddr == "" || tryAddr == "tcp://" { + return defaultAddr, nil + } + addr := strings.TrimPrefix(tryAddr, "tcp://") + if strings.Contains(addr, "://") || addr == "" { + return "", fmt.Errorf("Invalid proto, expected tcp: %s", tryAddr) + } + + defaultAddr = strings.TrimPrefix(defaultAddr, "tcp://") + defaultHost, defaultPort, err := net.SplitHostPort(defaultAddr) + if err != nil { + return "", err + } + // url.Parse fails for trailing colon on IPv6 brackets on Go 1.5, but + // not 1.4. See https://github.com/golang/go/issues/12200 and + // https://github.com/golang/go/issues/6530. + if strings.HasSuffix(addr, "]:") { + addr += defaultPort + } + + u, err := url.Parse("tcp://" + addr) + if err != nil { + return "", err + } + host, port, err := net.SplitHostPort(u.Host) + if err != nil { + // try port addition once + host, port, err = net.SplitHostPort(net.JoinHostPort(u.Host, defaultPort)) + } + if err != nil { + return "", fmt.Errorf("Invalid bind address format: %s", tryAddr) + } + + if host == "" { + host = defaultHost + } + if port == "" { + port = defaultPort + } + p, err := strconv.Atoi(port) + if err != nil && p == 0 { + return "", fmt.Errorf("Invalid bind address format: %s", tryAddr) + } + + return fmt.Sprintf("tcp://%s%s", net.JoinHostPort(host, port), u.Path), nil +} + +// ValidateExtraHost validates that the specified string is a valid extrahost and returns it. +// ExtraHost is in the form of name:ip where the ip has to be a valid ip (IPv4 or IPv6). +func ValidateExtraHost(val string) (string, error) { + // allow for IPv6 addresses in extra hosts by only splitting on first ":" + arr := strings.SplitN(val, ":", 2) + if len(arr) != 2 || len(arr[0]) == 0 { + return "", fmt.Errorf("bad format for add-host: %q", val) + } + if _, err := ValidateIPAddress(arr[1]); err != nil { + return "", fmt.Errorf("invalid IP address in add-host: %q", arr[1]) + } + return val, nil +} diff --git a/vendor/github.com/moby/moby/opts/hosts_test.go b/vendor/github.com/moby/moby/opts/hosts_test.go new file mode 100644 index 000000000..8aada6a95 --- /dev/null +++ b/vendor/github.com/moby/moby/opts/hosts_test.go @@ -0,0 +1,181 @@ +package opts + +import ( + "fmt" + "strings" + "testing" +) + +func TestParseHost(t *testing.T) { + invalid := []string{ + "something with spaces", + "://", + "unknown://", + "tcp://:port", + "tcp://invalid:port", + } + + valid := map[string]string{ + "": DefaultHost, + " ": DefaultHost, + " ": DefaultHost, + "fd://": "fd://", + "fd://something": "fd://something", + "tcp://host:": fmt.Sprintf("tcp://host:%d", DefaultHTTPPort), + "tcp://": DefaultTCPHost, + "tcp://:2375": fmt.Sprintf("tcp://%s:2375", DefaultHTTPHost), + "tcp://:2376": fmt.Sprintf("tcp://%s:2376", DefaultHTTPHost), + "tcp://0.0.0.0:8080": "tcp://0.0.0.0:8080", + "tcp://192.168.0.0:12000": "tcp://192.168.0.0:12000", + "tcp://192.168:8080": "tcp://192.168:8080", + "tcp://0.0.0.0:1234567890": "tcp://0.0.0.0:1234567890", // yeah it's valid :P + " tcp://:7777/path ": fmt.Sprintf("tcp://%s:7777/path", DefaultHTTPHost), + "tcp://docker.com:2375": "tcp://docker.com:2375", + "unix://": "unix://" + DefaultUnixSocket, + "unix://path/to/socket": "unix://path/to/socket", + "npipe://": "npipe://" + DefaultNamedPipe, + "npipe:////./pipe/foo": "npipe:////./pipe/foo", + } + + for _, value := range invalid { + if _, err := ParseHost(false, value); err == nil { + t.Errorf("Expected an error for %v, got [nil]", value) + } + } + + for value, expected := range valid { + if actual, err := ParseHost(false, value); err != nil || actual != expected { + t.Errorf("Expected for %v [%v], got [%v, %v]", value, expected, actual, err) + } + } +} + +func TestParseDockerDaemonHost(t *testing.T) { + invalids := map[string]string{ + + "tcp:a.b.c.d": "Invalid bind address format: tcp:a.b.c.d", + "tcp:a.b.c.d/path": "Invalid bind address format: tcp:a.b.c.d/path", + "udp://127.0.0.1": "Invalid bind address format: udp://127.0.0.1", + "udp://127.0.0.1:2375": "Invalid bind address format: udp://127.0.0.1:2375", + "tcp://unix:///run/docker.sock": "Invalid proto, expected tcp: unix:///run/docker.sock", + " tcp://:7777/path ": "Invalid bind address format: tcp://:7777/path ", + "": "Invalid bind address format: ", + } + valids := map[string]string{ + "0.0.0.1:": "tcp://0.0.0.1:2375", + "0.0.0.1:5555": "tcp://0.0.0.1:5555", + "0.0.0.1:5555/path": "tcp://0.0.0.1:5555/path", + "[::1]:": "tcp://[::1]:2375", + "[::1]:5555/path": "tcp://[::1]:5555/path", + "[0:0:0:0:0:0:0:1]:": "tcp://[0:0:0:0:0:0:0:1]:2375", + "[0:0:0:0:0:0:0:1]:5555/path": "tcp://[0:0:0:0:0:0:0:1]:5555/path", + ":6666": fmt.Sprintf("tcp://%s:6666", DefaultHTTPHost), + ":6666/path": fmt.Sprintf("tcp://%s:6666/path", DefaultHTTPHost), + "tcp://": DefaultTCPHost, + "tcp://:7777": fmt.Sprintf("tcp://%s:7777", DefaultHTTPHost), + "tcp://:7777/path": fmt.Sprintf("tcp://%s:7777/path", DefaultHTTPHost), + "unix:///run/docker.sock": "unix:///run/docker.sock", + "unix://": "unix://" + DefaultUnixSocket, + "fd://": "fd://", + "fd://something": "fd://something", + "localhost:": "tcp://localhost:2375", + "localhost:5555": "tcp://localhost:5555", + "localhost:5555/path": "tcp://localhost:5555/path", + } + for invalidAddr, expectedError := range invalids { + if addr, err := parseDockerDaemonHost(invalidAddr); err == nil || err.Error() != expectedError { + t.Errorf("tcp %v address expected error %q return, got %q and addr %v", invalidAddr, expectedError, err, addr) + } + } + for validAddr, expectedAddr := range valids { + if addr, err := parseDockerDaemonHost(validAddr); err != nil || addr != expectedAddr { + t.Errorf("%v -> expected %v, got (%v) addr (%v)", validAddr, expectedAddr, err, addr) + } + } +} + +func TestParseTCP(t *testing.T) { + var ( + defaultHTTPHost = "tcp://127.0.0.1:2376" + ) + invalids := map[string]string{ + "tcp:a.b.c.d": "Invalid bind address format: tcp:a.b.c.d", + "tcp:a.b.c.d/path": "Invalid bind address format: tcp:a.b.c.d/path", + "udp://127.0.0.1": "Invalid proto, expected tcp: udp://127.0.0.1", + "udp://127.0.0.1:2375": "Invalid proto, expected tcp: udp://127.0.0.1:2375", + } + valids := map[string]string{ + "": defaultHTTPHost, + "tcp://": defaultHTTPHost, + "0.0.0.1:": "tcp://0.0.0.1:2376", + "0.0.0.1:5555": "tcp://0.0.0.1:5555", + "0.0.0.1:5555/path": "tcp://0.0.0.1:5555/path", + ":6666": "tcp://127.0.0.1:6666", + ":6666/path": "tcp://127.0.0.1:6666/path", + "tcp://:7777": "tcp://127.0.0.1:7777", + "tcp://:7777/path": "tcp://127.0.0.1:7777/path", + "[::1]:": "tcp://[::1]:2376", + "[::1]:5555": "tcp://[::1]:5555", + "[::1]:5555/path": "tcp://[::1]:5555/path", + "[0:0:0:0:0:0:0:1]:": "tcp://[0:0:0:0:0:0:0:1]:2376", + "[0:0:0:0:0:0:0:1]:5555": "tcp://[0:0:0:0:0:0:0:1]:5555", + "[0:0:0:0:0:0:0:1]:5555/path": "tcp://[0:0:0:0:0:0:0:1]:5555/path", + "localhost:": "tcp://localhost:2376", + "localhost:5555": "tcp://localhost:5555", + "localhost:5555/path": "tcp://localhost:5555/path", + } + for invalidAddr, expectedError := range invalids { + if addr, err := ParseTCPAddr(invalidAddr, defaultHTTPHost); err == nil || err.Error() != expectedError { + t.Errorf("tcp %v address expected error %v return, got %s and addr %v", invalidAddr, expectedError, err, addr) + } + } + for validAddr, expectedAddr := range valids { + if addr, err := ParseTCPAddr(validAddr, defaultHTTPHost); err != nil || addr != expectedAddr { + t.Errorf("%v -> expected %v, got %v and addr %v", validAddr, expectedAddr, err, addr) + } + } +} + +func TestParseInvalidUnixAddrInvalid(t *testing.T) { + if _, err := parseSimpleProtoAddr("unix", "tcp://127.0.0.1", "unix:///var/run/docker.sock"); err == nil || err.Error() != "Invalid proto, expected unix: tcp://127.0.0.1" { + t.Fatalf("Expected an error, got %v", err) + } + if _, err := parseSimpleProtoAddr("unix", "unix://tcp://127.0.0.1", "/var/run/docker.sock"); err == nil || err.Error() != "Invalid proto, expected unix: tcp://127.0.0.1" { + t.Fatalf("Expected an error, got %v", err) + } + if v, err := parseSimpleProtoAddr("unix", "", "/var/run/docker.sock"); err != nil || v != "unix:///var/run/docker.sock" { + t.Fatalf("Expected an %v, got %v", v, "unix:///var/run/docker.sock") + } +} + +func TestValidateExtraHosts(t *testing.T) { + valid := []string{ + `myhost:192.168.0.1`, + `thathost:10.0.2.1`, + `anipv6host:2003:ab34:e::1`, + `ipv6local:::1`, + } + + invalid := map[string]string{ + `myhost:192.notanipaddress.1`: `invalid IP`, + `thathost-nosemicolon10.0.0.1`: `bad format`, + `anipv6host:::::1`: `invalid IP`, + `ipv6local:::0::`: `invalid IP`, + } + + for _, extrahost := range valid { + if _, err := ValidateExtraHost(extrahost); err != nil { + t.Fatalf("ValidateExtraHost(`"+extrahost+"`) should succeed: error %v", err) + } + } + + for extraHost, expectedError := range invalid { + if _, err := ValidateExtraHost(extraHost); err == nil { + t.Fatalf("ValidateExtraHost(`%q`) should have failed validation", extraHost) + } else { + if !strings.Contains(err.Error(), expectedError) { + t.Fatalf("ValidateExtraHost(`%q`) error should contain %q", extraHost, expectedError) + } + } + } +} diff --git a/vendor/github.com/moby/moby/opts/hosts_unix.go b/vendor/github.com/moby/moby/opts/hosts_unix.go new file mode 100644 index 000000000..611407a9d --- /dev/null +++ b/vendor/github.com/moby/moby/opts/hosts_unix.go @@ -0,0 +1,8 @@ +// +build !windows + +package opts + +import "fmt" + +// DefaultHost constant defines the default host string used by docker on other hosts than Windows +var DefaultHost = fmt.Sprintf("unix://%s", DefaultUnixSocket) diff --git a/vendor/github.com/moby/moby/opts/hosts_windows.go b/vendor/github.com/moby/moby/opts/hosts_windows.go new file mode 100644 index 000000000..7c239e00f --- /dev/null +++ b/vendor/github.com/moby/moby/opts/hosts_windows.go @@ -0,0 +1,6 @@ +// +build windows + +package opts + +// DefaultHost constant defines the default host string used by docker on Windows +var DefaultHost = "npipe://" + DefaultNamedPipe diff --git a/vendor/github.com/moby/moby/opts/ip.go b/vendor/github.com/moby/moby/opts/ip.go new file mode 100644 index 000000000..109506397 --- /dev/null +++ b/vendor/github.com/moby/moby/opts/ip.go @@ -0,0 +1,47 @@ +package opts + +import ( + "fmt" + "net" +) + +// IPOpt holds an IP. It is used to store values from CLI flags. +type IPOpt struct { + *net.IP +} + +// NewIPOpt creates a new IPOpt from a reference net.IP and a +// string representation of an IP. If the string is not a valid +// IP it will fallback to the specified reference. +func NewIPOpt(ref *net.IP, defaultVal string) *IPOpt { + o := &IPOpt{ + IP: ref, + } + o.Set(defaultVal) + return o +} + +// Set sets an IPv4 or IPv6 address from a given string. If the given +// string is not parsable as an IP address it returns an error. +func (o *IPOpt) Set(val string) error { + ip := net.ParseIP(val) + if ip == nil { + return fmt.Errorf("%s is not an ip address", val) + } + *o.IP = ip + return nil +} + +// String returns the IP address stored in the IPOpt. If stored IP is a +// nil pointer, it returns an empty string. +func (o *IPOpt) String() string { + if *o.IP == nil { + return "" + } + return o.IP.String() +} + +// Type returns the type of the option +func (o *IPOpt) Type() string { + return "ip" +} diff --git a/vendor/github.com/moby/moby/opts/ip_test.go b/vendor/github.com/moby/moby/opts/ip_test.go new file mode 100644 index 000000000..1027d84a0 --- /dev/null +++ b/vendor/github.com/moby/moby/opts/ip_test.go @@ -0,0 +1,54 @@ +package opts + +import ( + "net" + "testing" +) + +func TestIpOptString(t *testing.T) { + addresses := []string{"", "0.0.0.0"} + var ip net.IP + + for _, address := range addresses { + stringAddress := NewIPOpt(&ip, address).String() + if stringAddress != address { + t.Fatalf("IpOpt string should be `%s`, not `%s`", address, stringAddress) + } + } +} + +func TestNewIpOptInvalidDefaultVal(t *testing.T) { + ip := net.IPv4(127, 0, 0, 1) + defaultVal := "Not an ip" + + ipOpt := NewIPOpt(&ip, defaultVal) + + expected := "127.0.0.1" + if ipOpt.String() != expected { + t.Fatalf("Expected [%v], got [%v]", expected, ipOpt.String()) + } +} + +func TestNewIpOptValidDefaultVal(t *testing.T) { + ip := net.IPv4(127, 0, 0, 1) + defaultVal := "192.168.1.1" + + ipOpt := NewIPOpt(&ip, defaultVal) + + expected := "192.168.1.1" + if ipOpt.String() != expected { + t.Fatalf("Expected [%v], got [%v]", expected, ipOpt.String()) + } +} + +func TestIpOptSetInvalidVal(t *testing.T) { + ip := net.IPv4(127, 0, 0, 1) + ipOpt := &IPOpt{IP: &ip} + + invalidIP := "invalid ip" + expectedError := "invalid ip is not an ip address" + err := ipOpt.Set(invalidIP) + if err == nil || err.Error() != expectedError { + t.Fatalf("Expected an Error with [%v], got [%v]", expectedError, err.Error()) + } +} diff --git a/vendor/github.com/moby/moby/opts/opts.go b/vendor/github.com/moby/moby/opts/opts.go new file mode 100644 index 000000000..300fb426a --- /dev/null +++ b/vendor/github.com/moby/moby/opts/opts.go @@ -0,0 +1,346 @@ +package opts + +import ( + "fmt" + "net" + "path" + "regexp" + "strings" + + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/daemon/cluster/convert" + units "github.com/docker/go-units" + "github.com/docker/swarmkit/api/genericresource" +) + +var ( + alphaRegexp = regexp.MustCompile(`[a-zA-Z]`) + domainRegexp = regexp.MustCompile(`^(:?(:?[a-zA-Z0-9]|(:?[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9]))(:?\.(:?[a-zA-Z0-9]|(:?[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])))*)\.?\s*$`) +) + +// ListOpts holds a list of values and a validation function. +type ListOpts struct { + values *[]string + validator ValidatorFctType +} + +// NewListOpts creates a new ListOpts with the specified validator. +func NewListOpts(validator ValidatorFctType) ListOpts { + var values []string + return *NewListOptsRef(&values, validator) +} + +// NewListOptsRef creates a new ListOpts with the specified values and validator. +func NewListOptsRef(values *[]string, validator ValidatorFctType) *ListOpts { + return &ListOpts{ + values: values, + validator: validator, + } +} + +func (opts *ListOpts) String() string { + if len(*opts.values) == 0 { + return "" + } + return fmt.Sprintf("%v", *opts.values) +} + +// Set validates if needed the input value and adds it to the +// internal slice. +func (opts *ListOpts) Set(value string) error { + if opts.validator != nil { + v, err := opts.validator(value) + if err != nil { + return err + } + value = v + } + (*opts.values) = append((*opts.values), value) + return nil +} + +// Delete removes the specified element from the slice. +func (opts *ListOpts) Delete(key string) { + for i, k := range *opts.values { + if k == key { + (*opts.values) = append((*opts.values)[:i], (*opts.values)[i+1:]...) + return + } + } +} + +// GetMap returns the content of values in a map in order to avoid +// duplicates. +func (opts *ListOpts) GetMap() map[string]struct{} { + ret := make(map[string]struct{}) + for _, k := range *opts.values { + ret[k] = struct{}{} + } + return ret +} + +// GetAll returns the values of slice. +func (opts *ListOpts) GetAll() []string { + return (*opts.values) +} + +// GetAllOrEmpty returns the values of the slice +// or an empty slice when there are no values. +func (opts *ListOpts) GetAllOrEmpty() []string { + v := *opts.values + if v == nil { + return make([]string, 0) + } + return v +} + +// Get checks the existence of the specified key. +func (opts *ListOpts) Get(key string) bool { + for _, k := range *opts.values { + if k == key { + return true + } + } + return false +} + +// Len returns the amount of element in the slice. +func (opts *ListOpts) Len() int { + return len((*opts.values)) +} + +// Type returns a string name for this Option type +func (opts *ListOpts) Type() string { + return "list" +} + +// WithValidator returns the ListOpts with validator set. +func (opts *ListOpts) WithValidator(validator ValidatorFctType) *ListOpts { + opts.validator = validator + return opts +} + +// NamedOption is an interface that list and map options +// with names implement. +type NamedOption interface { + Name() string +} + +// NamedListOpts is a ListOpts with a configuration name. +// This struct is useful to keep reference to the assigned +// field name in the internal configuration struct. +type NamedListOpts struct { + name string + ListOpts +} + +var _ NamedOption = &NamedListOpts{} + +// NewNamedListOptsRef creates a reference to a new NamedListOpts struct. +func NewNamedListOptsRef(name string, values *[]string, validator ValidatorFctType) *NamedListOpts { + return &NamedListOpts{ + name: name, + ListOpts: *NewListOptsRef(values, validator), + } +} + +// Name returns the name of the NamedListOpts in the configuration. +func (o *NamedListOpts) Name() string { + return o.name +} + +// MapOpts holds a map of values and a validation function. +type MapOpts struct { + values map[string]string + validator ValidatorFctType +} + +// Set validates if needed the input value and add it to the +// internal map, by splitting on '='. +func (opts *MapOpts) Set(value string) error { + if opts.validator != nil { + v, err := opts.validator(value) + if err != nil { + return err + } + value = v + } + vals := strings.SplitN(value, "=", 2) + if len(vals) == 1 { + (opts.values)[vals[0]] = "" + } else { + (opts.values)[vals[0]] = vals[1] + } + return nil +} + +// GetAll returns the values of MapOpts as a map. +func (opts *MapOpts) GetAll() map[string]string { + return opts.values +} + +func (opts *MapOpts) String() string { + return fmt.Sprintf("%v", map[string]string((opts.values))) +} + +// Type returns a string name for this Option type +func (opts *MapOpts) Type() string { + return "map" +} + +// NewMapOpts creates a new MapOpts with the specified map of values and a validator. +func NewMapOpts(values map[string]string, validator ValidatorFctType) *MapOpts { + if values == nil { + values = make(map[string]string) + } + return &MapOpts{ + values: values, + validator: validator, + } +} + +// NamedMapOpts is a MapOpts struct with a configuration name. +// This struct is useful to keep reference to the assigned +// field name in the internal configuration struct. +type NamedMapOpts struct { + name string + MapOpts +} + +var _ NamedOption = &NamedMapOpts{} + +// NewNamedMapOpts creates a reference to a new NamedMapOpts struct. +func NewNamedMapOpts(name string, values map[string]string, validator ValidatorFctType) *NamedMapOpts { + return &NamedMapOpts{ + name: name, + MapOpts: *NewMapOpts(values, validator), + } +} + +// Name returns the name of the NamedMapOpts in the configuration. +func (o *NamedMapOpts) Name() string { + return o.name +} + +// ValidatorFctType defines a validator function that returns a validated string and/or an error. +type ValidatorFctType func(val string) (string, error) + +// ValidatorFctListType defines a validator function that returns a validated list of string and/or an error +type ValidatorFctListType func(val string) ([]string, error) + +// ValidateIPAddress validates an Ip address. +func ValidateIPAddress(val string) (string, error) { + var ip = net.ParseIP(strings.TrimSpace(val)) + if ip != nil { + return ip.String(), nil + } + return "", fmt.Errorf("%s is not an ip address", val) +} + +// ValidateDNSSearch validates domain for resolvconf search configuration. +// A zero length domain is represented by a dot (.). +func ValidateDNSSearch(val string) (string, error) { + if val = strings.Trim(val, " "); val == "." { + return val, nil + } + return validateDomain(val) +} + +func validateDomain(val string) (string, error) { + if alphaRegexp.FindString(val) == "" { + return "", fmt.Errorf("%s is not a valid domain", val) + } + ns := domainRegexp.FindSubmatch([]byte(val)) + if len(ns) > 0 && len(ns[1]) < 255 { + return string(ns[1]), nil + } + return "", fmt.Errorf("%s is not a valid domain", val) +} + +// ValidateLabel validates that the specified string is a valid label, and returns it. +// Labels are in the form on key=value. +func ValidateLabel(val string) (string, error) { + if strings.Count(val, "=") < 1 { + return "", fmt.Errorf("bad attribute format: %s", val) + } + return val, nil +} + +// ParseLink parses and validates the specified string as a link format (name:alias) +func ParseLink(val string) (string, string, error) { + if val == "" { + return "", "", fmt.Errorf("empty string specified for links") + } + arr := strings.Split(val, ":") + if len(arr) > 2 { + return "", "", fmt.Errorf("bad format for links: %s", val) + } + if len(arr) == 1 { + return val, val, nil + } + // This is kept because we can actually get a HostConfig with links + // from an already created container and the format is not `foo:bar` + // but `/foo:/c1/bar` + if strings.HasPrefix(arr[0], "/") { + _, alias := path.Split(arr[1]) + return arr[0][1:], alias, nil + } + return arr[0], arr[1], nil +} + +// MemBytes is a type for human readable memory bytes (like 128M, 2g, etc) +type MemBytes int64 + +// String returns the string format of the human readable memory bytes +func (m *MemBytes) String() string { + // NOTE: In spf13/pflag/flag.go, "0" is considered as "zero value" while "0 B" is not. + // We return "0" in case value is 0 here so that the default value is hidden. + // (Sometimes "default 0 B" is actually misleading) + if m.Value() != 0 { + return units.BytesSize(float64(m.Value())) + } + return "0" +} + +// Set sets the value of the MemBytes by passing a string +func (m *MemBytes) Set(value string) error { + val, err := units.RAMInBytes(value) + *m = MemBytes(val) + return err +} + +// Type returns the type +func (m *MemBytes) Type() string { + return "bytes" +} + +// Value returns the value in int64 +func (m *MemBytes) Value() int64 { + return int64(*m) +} + +// UnmarshalJSON is the customized unmarshaler for MemBytes +func (m *MemBytes) UnmarshalJSON(s []byte) error { + if len(s) <= 2 || s[0] != '"' || s[len(s)-1] != '"' { + return fmt.Errorf("invalid size: %q", s) + } + val, err := units.RAMInBytes(string(s[1 : len(s)-1])) + *m = MemBytes(val) + return err +} + +// ParseGenericResources parses and validates the specified string as a list of GenericResource +func ParseGenericResources(value string) ([]swarm.GenericResource, error) { + if value == "" { + return nil, nil + } + + resources, err := genericresource.Parse(value) + if err != nil { + return nil, err + } + + obj := convert.GenericResourcesFromGRPC(resources) + + return obj, nil +} diff --git a/vendor/github.com/moby/moby/opts/opts_test.go b/vendor/github.com/moby/moby/opts/opts_test.go new file mode 100644 index 000000000..269f88639 --- /dev/null +++ b/vendor/github.com/moby/moby/opts/opts_test.go @@ -0,0 +1,264 @@ +package opts + +import ( + "fmt" + "strings" + "testing" +) + +func TestValidateIPAddress(t *testing.T) { + if ret, err := ValidateIPAddress(`1.2.3.4`); err != nil || ret == "" { + t.Fatalf("ValidateIPAddress(`1.2.3.4`) got %s %s", ret, err) + } + + if ret, err := ValidateIPAddress(`127.0.0.1`); err != nil || ret == "" { + t.Fatalf("ValidateIPAddress(`127.0.0.1`) got %s %s", ret, err) + } + + if ret, err := ValidateIPAddress(`::1`); err != nil || ret == "" { + t.Fatalf("ValidateIPAddress(`::1`) got %s %s", ret, err) + } + + if ret, err := ValidateIPAddress(`127`); err == nil || ret != "" { + t.Fatalf("ValidateIPAddress(`127`) got %s %s", ret, err) + } + + if ret, err := ValidateIPAddress(`random invalid string`); err == nil || ret != "" { + t.Fatalf("ValidateIPAddress(`random invalid string`) got %s %s", ret, err) + } + +} + +func TestMapOpts(t *testing.T) { + tmpMap := make(map[string]string) + o := NewMapOpts(tmpMap, logOptsValidator) + o.Set("max-size=1") + if o.String() != "map[max-size:1]" { + t.Errorf("%s != [map[max-size:1]", o.String()) + } + + o.Set("max-file=2") + if len(tmpMap) != 2 { + t.Errorf("map length %d != 2", len(tmpMap)) + } + + if tmpMap["max-file"] != "2" { + t.Errorf("max-file = %s != 2", tmpMap["max-file"]) + } + + if tmpMap["max-size"] != "1" { + t.Errorf("max-size = %s != 1", tmpMap["max-size"]) + } + if o.Set("dummy-val=3") == nil { + t.Error("validator is not being called") + } +} + +func TestListOptsWithoutValidator(t *testing.T) { + o := NewListOpts(nil) + o.Set("foo") + if o.String() != "[foo]" { + t.Errorf("%s != [foo]", o.String()) + } + o.Set("bar") + if o.Len() != 2 { + t.Errorf("%d != 2", o.Len()) + } + o.Set("bar") + if o.Len() != 3 { + t.Errorf("%d != 3", o.Len()) + } + if !o.Get("bar") { + t.Error("o.Get(\"bar\") == false") + } + if o.Get("baz") { + t.Error("o.Get(\"baz\") == true") + } + o.Delete("foo") + if o.String() != "[bar bar]" { + t.Errorf("%s != [bar bar]", o.String()) + } + listOpts := o.GetAll() + if len(listOpts) != 2 || listOpts[0] != "bar" || listOpts[1] != "bar" { + t.Errorf("Expected [[bar bar]], got [%v]", listOpts) + } + mapListOpts := o.GetMap() + if len(mapListOpts) != 1 { + t.Errorf("Expected [map[bar:{}]], got [%v]", mapListOpts) + } + +} + +func TestListOptsWithValidator(t *testing.T) { + // Re-using logOptsvalidator (used by MapOpts) + o := NewListOpts(logOptsValidator) + o.Set("foo") + if o.String() != "" { + t.Errorf(`%s != ""`, o.String()) + } + o.Set("foo=bar") + if o.String() != "" { + t.Errorf(`%s != ""`, o.String()) + } + o.Set("max-file=2") + if o.Len() != 1 { + t.Errorf("%d != 1", o.Len()) + } + if !o.Get("max-file=2") { + t.Error("o.Get(\"max-file=2\") == false") + } + if o.Get("baz") { + t.Error("o.Get(\"baz\") == true") + } + o.Delete("max-file=2") + if o.String() != "" { + t.Errorf(`%s != ""`, o.String()) + } +} + +func TestValidateDNSSearch(t *testing.T) { + valid := []string{ + `.`, + `a`, + `a.`, + `1.foo`, + `17.foo`, + `foo.bar`, + `foo.bar.baz`, + `foo.bar.`, + `foo.bar.baz`, + `foo1.bar2`, + `foo1.bar2.baz`, + `1foo.2bar.`, + `1foo.2bar.baz`, + `foo-1.bar-2`, + `foo-1.bar-2.baz`, + `foo-1.bar-2.`, + `foo-1.bar-2.baz`, + `1-foo.2-bar`, + `1-foo.2-bar.baz`, + `1-foo.2-bar.`, + `1-foo.2-bar.baz`, + } + + invalid := []string{ + ``, + ` `, + ` `, + `17`, + `17.`, + `.17`, + `17-.`, + `17-.foo`, + `.foo`, + `foo-.bar`, + `-foo.bar`, + `foo.bar-`, + `foo.bar-.baz`, + `foo.-bar`, + `foo.-bar.baz`, + `foo.bar.baz.this.should.fail.on.long.name.because.it.is.longer.thanitshouldbethis.should.fail.on.long.name.because.it.is.longer.thanitshouldbethis.should.fail.on.long.name.because.it.is.longer.thanitshouldbethis.should.fail.on.long.name.because.it.is.longer.thanitshouldbe`, + } + + for _, domain := range valid { + if ret, err := ValidateDNSSearch(domain); err != nil || ret == "" { + t.Fatalf("ValidateDNSSearch(`"+domain+"`) got %s %s", ret, err) + } + } + + for _, domain := range invalid { + if ret, err := ValidateDNSSearch(domain); err == nil || ret != "" { + t.Fatalf("ValidateDNSSearch(`"+domain+"`) got %s %s", ret, err) + } + } +} + +func TestValidateLabel(t *testing.T) { + if _, err := ValidateLabel("label"); err == nil || err.Error() != "bad attribute format: label" { + t.Fatalf("Expected an error [bad attribute format: label], go %v", err) + } + if actual, err := ValidateLabel("key1=value1"); err != nil || actual != "key1=value1" { + t.Fatalf("Expected [key1=value1], got [%v,%v]", actual, err) + } + // Validate it's working with more than one = + if actual, err := ValidateLabel("key1=value1=value2"); err != nil { + t.Fatalf("Expected [key1=value1=value2], got [%v,%v]", actual, err) + } + // Validate it's working with one more + if actual, err := ValidateLabel("key1=value1=value2=value3"); err != nil { + t.Fatalf("Expected [key1=value1=value2=value2], got [%v,%v]", actual, err) + } +} + +func logOptsValidator(val string) (string, error) { + allowedKeys := map[string]string{"max-size": "1", "max-file": "2"} + vals := strings.Split(val, "=") + if allowedKeys[vals[0]] != "" { + return val, nil + } + return "", fmt.Errorf("invalid key %s", vals[0]) +} + +func TestNamedListOpts(t *testing.T) { + var v []string + o := NewNamedListOptsRef("foo-name", &v, nil) + + o.Set("foo") + if o.String() != "[foo]" { + t.Errorf("%s != [foo]", o.String()) + } + if o.Name() != "foo-name" { + t.Errorf("%s != foo-name", o.Name()) + } + if len(v) != 1 { + t.Errorf("expected foo to be in the values, got %v", v) + } +} + +func TestNamedMapOpts(t *testing.T) { + tmpMap := make(map[string]string) + o := NewNamedMapOpts("max-name", tmpMap, nil) + + o.Set("max-size=1") + if o.String() != "map[max-size:1]" { + t.Errorf("%s != [map[max-size:1]", o.String()) + } + if o.Name() != "max-name" { + t.Errorf("%s != max-name", o.Name()) + } + if _, exist := tmpMap["max-size"]; !exist { + t.Errorf("expected map-size to be in the values, got %v", tmpMap) + } +} + +func TestParseLink(t *testing.T) { + name, alias, err := ParseLink("name:alias") + if err != nil { + t.Fatalf("Expected not to error out on a valid name:alias format but got: %v", err) + } + if name != "name" { + t.Fatalf("Link name should have been name, got %s instead", name) + } + if alias != "alias" { + t.Fatalf("Link alias should have been alias, got %s instead", alias) + } + // short format definition + name, alias, err = ParseLink("name") + if err != nil { + t.Fatalf("Expected not to error out on a valid name only format but got: %v", err) + } + if name != "name" { + t.Fatalf("Link name should have been name, got %s instead", name) + } + if alias != "name" { + t.Fatalf("Link alias should have been name, got %s instead", alias) + } + // empty string link definition is not allowed + if _, _, err := ParseLink(""); err == nil || !strings.Contains(err.Error(), "empty string specified for links") { + t.Fatalf("Expected error 'empty string specified for links' but got: %v", err) + } + // more than two colons are not allowed + if _, _, err := ParseLink("link:alias:wrong"); err == nil || !strings.Contains(err.Error(), "bad format for links: link:alias:wrong") { + t.Fatalf("Expected error 'bad format for links: link:alias:wrong' but got: %v", err) + } +} diff --git a/vendor/github.com/moby/moby/opts/opts_unix.go b/vendor/github.com/moby/moby/opts/opts_unix.go new file mode 100644 index 000000000..2766a43a0 --- /dev/null +++ b/vendor/github.com/moby/moby/opts/opts_unix.go @@ -0,0 +1,6 @@ +// +build !windows + +package opts + +// DefaultHTTPHost Default HTTP Host used if only port is provided to -H flag e.g. dockerd -H tcp://:8080 +const DefaultHTTPHost = "localhost" diff --git a/vendor/github.com/moby/moby/opts/opts_windows.go b/vendor/github.com/moby/moby/opts/opts_windows.go new file mode 100644 index 000000000..98b7251a9 --- /dev/null +++ b/vendor/github.com/moby/moby/opts/opts_windows.go @@ -0,0 +1,56 @@ +package opts + +// TODO Windows. Identify bug in GOLang 1.5.1+ and/or Windows Server 2016 TP5. +// @jhowardmsft, @swernli. +// +// On Windows, this mitigates a problem with the default options of running +// a docker client against a local docker daemon on TP5. +// +// What was found that if the default host is "localhost", even if the client +// (and daemon as this is local) is not physically on a network, and the DNS +// cache is flushed (ipconfig /flushdns), then the client will pause for +// exactly one second when connecting to the daemon for calls. For example +// using docker run windowsservercore cmd, the CLI will send a create followed +// by an attach. You see the delay between the attach finishing and the attach +// being seen by the daemon. +// +// Here's some daemon debug logs with additional debug spew put in. The +// AfterWriteJSON log is the very last thing the daemon does as part of the +// create call. The POST /attach is the second CLI call. Notice the second +// time gap. +// +// time="2015-11-06T13:38:37.259627400-08:00" level=debug msg="After createRootfs" +// time="2015-11-06T13:38:37.263626300-08:00" level=debug msg="After setHostConfig" +// time="2015-11-06T13:38:37.267631200-08:00" level=debug msg="before createContainerPl...." +// time="2015-11-06T13:38:37.271629500-08:00" level=debug msg=ToDiskLocking.... +// time="2015-11-06T13:38:37.275643200-08:00" level=debug msg="loggin event...." +// time="2015-11-06T13:38:37.277627600-08:00" level=debug msg="logged event...." +// time="2015-11-06T13:38:37.279631800-08:00" level=debug msg="In defer func" +// time="2015-11-06T13:38:37.282628100-08:00" level=debug msg="After daemon.create" +// time="2015-11-06T13:38:37.286651700-08:00" level=debug msg="return 2" +// time="2015-11-06T13:38:37.289629500-08:00" level=debug msg="Returned from daemon.ContainerCreate" +// time="2015-11-06T13:38:37.311629100-08:00" level=debug msg="After WriteJSON" +// ... 1 second gap here.... +// time="2015-11-06T13:38:38.317866200-08:00" level=debug msg="Calling POST /v1.22/containers/984758282b842f779e805664b2c95d563adc9a979c8a3973e68c807843ee4757/attach" +// time="2015-11-06T13:38:38.326882500-08:00" level=info msg="POST /v1.22/containers/984758282b842f779e805664b2c95d563adc9a979c8a3973e68c807843ee4757/attach?stderr=1&stdin=1&stdout=1&stream=1" +// +// We suspect this is either a bug introduced in GOLang 1.5.1, or that a change +// in GOLang 1.5.1 (from 1.4.3) is exposing a bug in Windows. In theory, +// the Windows networking stack is supposed to resolve "localhost" internally, +// without hitting DNS, or even reading the hosts file (which is why localhost +// is commented out in the hosts file on Windows). +// +// We have validated that working around this using the actual IPv4 localhost +// address does not cause the delay. +// +// This does not occur with the docker client built with 1.4.3 on the same +// Windows build, regardless of whether the daemon is built using 1.5.1 +// or 1.4.3. It does not occur on Linux. We also verified we see the same thing +// on a cross-compiled Windows binary (from Linux). +// +// Final note: This is a mitigation, not a 'real' fix. It is still susceptible +// to the delay if a user were to do 'docker run -H=tcp://localhost:2375...' +// explicitly. + +// DefaultHTTPHost Default HTTP Host used if only port is provided to -H flag e.g. dockerd -H tcp://:8080 +const DefaultHTTPHost = "127.0.0.1" diff --git a/vendor/github.com/moby/moby/opts/quotedstring.go b/vendor/github.com/moby/moby/opts/quotedstring.go new file mode 100644 index 000000000..fb1e5374b --- /dev/null +++ b/vendor/github.com/moby/moby/opts/quotedstring.go @@ -0,0 +1,37 @@ +package opts + +// QuotedString is a string that may have extra quotes around the value. The +// quotes are stripped from the value. +type QuotedString struct { + value *string +} + +// Set sets a new value +func (s *QuotedString) Set(val string) error { + *s.value = trimQuotes(val) + return nil +} + +// Type returns the type of the value +func (s *QuotedString) Type() string { + return "string" +} + +func (s *QuotedString) String() string { + return string(*s.value) +} + +func trimQuotes(value string) string { + lastIndex := len(value) - 1 + for _, char := range []byte{'\'', '"'} { + if value[0] == char && value[lastIndex] == char { + return value[1:lastIndex] + } + } + return value +} + +// NewQuotedString returns a new quoted string option +func NewQuotedString(value *string) *QuotedString { + return &QuotedString{value: value} +} diff --git a/vendor/github.com/moby/moby/opts/quotedstring_test.go b/vendor/github.com/moby/moby/opts/quotedstring_test.go new file mode 100644 index 000000000..54dcbc19b --- /dev/null +++ b/vendor/github.com/moby/moby/opts/quotedstring_test.go @@ -0,0 +1,29 @@ +package opts + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestQuotedStringSetWithQuotes(t *testing.T) { + value := "" + qs := NewQuotedString(&value) + assert.NoError(t, qs.Set(`"something"`)) + assert.Equal(t, "something", qs.String()) + assert.Equal(t, "something", value) +} + +func TestQuotedStringSetWithMismatchedQuotes(t *testing.T) { + value := "" + qs := NewQuotedString(&value) + assert.NoError(t, qs.Set(`"something'`)) + assert.Equal(t, `"something'`, qs.String()) +} + +func TestQuotedStringSetWithNoQuotes(t *testing.T) { + value := "" + qs := NewQuotedString(&value) + assert.NoError(t, qs.Set("something")) + assert.Equal(t, "something", qs.String()) +} diff --git a/vendor/github.com/moby/moby/opts/runtime.go b/vendor/github.com/moby/moby/opts/runtime.go new file mode 100644 index 000000000..4361b3ce0 --- /dev/null +++ b/vendor/github.com/moby/moby/opts/runtime.go @@ -0,0 +1,79 @@ +package opts + +import ( + "fmt" + "strings" + + "github.com/docker/docker/api/types" +) + +// RuntimeOpt defines a map of Runtimes +type RuntimeOpt struct { + name string + stockRuntimeName string + values *map[string]types.Runtime +} + +// NewNamedRuntimeOpt creates a new RuntimeOpt +func NewNamedRuntimeOpt(name string, ref *map[string]types.Runtime, stockRuntime string) *RuntimeOpt { + if ref == nil { + ref = &map[string]types.Runtime{} + } + return &RuntimeOpt{name: name, values: ref, stockRuntimeName: stockRuntime} +} + +// Name returns the name of the NamedListOpts in the configuration. +func (o *RuntimeOpt) Name() string { + return o.name +} + +// Set validates and updates the list of Runtimes +func (o *RuntimeOpt) Set(val string) error { + parts := strings.SplitN(val, "=", 2) + if len(parts) != 2 { + return fmt.Errorf("invalid runtime argument: %s", val) + } + + parts[0] = strings.TrimSpace(parts[0]) + parts[1] = strings.TrimSpace(parts[1]) + if parts[0] == "" || parts[1] == "" { + return fmt.Errorf("invalid runtime argument: %s", val) + } + + parts[0] = strings.ToLower(parts[0]) + if parts[0] == o.stockRuntimeName { + return fmt.Errorf("runtime name '%s' is reserved", o.stockRuntimeName) + } + + if _, ok := (*o.values)[parts[0]]; ok { + return fmt.Errorf("runtime '%s' was already defined", parts[0]) + } + + (*o.values)[parts[0]] = types.Runtime{Path: parts[1]} + + return nil +} + +// String returns Runtime values as a string. +func (o *RuntimeOpt) String() string { + var out []string + for k := range *o.values { + out = append(out, k) + } + + return fmt.Sprintf("%v", out) +} + +// GetMap returns a map of Runtimes (name: path) +func (o *RuntimeOpt) GetMap() map[string]types.Runtime { + if o.values != nil { + return *o.values + } + + return map[string]types.Runtime{} +} + +// Type returns the type of the option +func (o *RuntimeOpt) Type() string { + return "runtime" +} diff --git a/vendor/github.com/moby/moby/opts/ulimit.go b/vendor/github.com/moby/moby/opts/ulimit.go new file mode 100644 index 000000000..a2a65fcd2 --- /dev/null +++ b/vendor/github.com/moby/moby/opts/ulimit.go @@ -0,0 +1,81 @@ +package opts + +import ( + "fmt" + + "github.com/docker/go-units" +) + +// UlimitOpt defines a map of Ulimits +type UlimitOpt struct { + values *map[string]*units.Ulimit +} + +// NewUlimitOpt creates a new UlimitOpt +func NewUlimitOpt(ref *map[string]*units.Ulimit) *UlimitOpt { + if ref == nil { + ref = &map[string]*units.Ulimit{} + } + return &UlimitOpt{ref} +} + +// Set validates a Ulimit and sets its name as a key in UlimitOpt +func (o *UlimitOpt) Set(val string) error { + l, err := units.ParseUlimit(val) + if err != nil { + return err + } + + (*o.values)[l.Name] = l + + return nil +} + +// String returns Ulimit values as a string. +func (o *UlimitOpt) String() string { + var out []string + for _, v := range *o.values { + out = append(out, v.String()) + } + + return fmt.Sprintf("%v", out) +} + +// GetList returns a slice of pointers to Ulimits. +func (o *UlimitOpt) GetList() []*units.Ulimit { + var ulimits []*units.Ulimit + for _, v := range *o.values { + ulimits = append(ulimits, v) + } + + return ulimits +} + +// Type returns the option type +func (o *UlimitOpt) Type() string { + return "ulimit" +} + +// NamedUlimitOpt defines a named map of Ulimits +type NamedUlimitOpt struct { + name string + UlimitOpt +} + +var _ NamedOption = &NamedUlimitOpt{} + +// NewNamedUlimitOpt creates a new NamedUlimitOpt +func NewNamedUlimitOpt(name string, ref *map[string]*units.Ulimit) *NamedUlimitOpt { + if ref == nil { + ref = &map[string]*units.Ulimit{} + } + return &NamedUlimitOpt{ + name: name, + UlimitOpt: *NewUlimitOpt(ref), + } +} + +// Name returns the option name +func (o *NamedUlimitOpt) Name() string { + return o.name +} diff --git a/vendor/github.com/moby/moby/opts/ulimit_test.go b/vendor/github.com/moby/moby/opts/ulimit_test.go new file mode 100644 index 000000000..0aa3facdf --- /dev/null +++ b/vendor/github.com/moby/moby/opts/ulimit_test.go @@ -0,0 +1,42 @@ +package opts + +import ( + "testing" + + "github.com/docker/go-units" +) + +func TestUlimitOpt(t *testing.T) { + ulimitMap := map[string]*units.Ulimit{ + "nofile": {"nofile", 1024, 512}, + } + + ulimitOpt := NewUlimitOpt(&ulimitMap) + + expected := "[nofile=512:1024]" + if ulimitOpt.String() != expected { + t.Fatalf("Expected %v, got %v", expected, ulimitOpt) + } + + // Valid ulimit append to opts + if err := ulimitOpt.Set("core=1024:1024"); err != nil { + t.Fatal(err) + } + + // Invalid ulimit type returns an error and do not append to opts + if err := ulimitOpt.Set("notavalidtype=1024:1024"); err == nil { + t.Fatalf("Expected error on invalid ulimit type") + } + expected = "[nofile=512:1024 core=1024:1024]" + expected2 := "[core=1024:1024 nofile=512:1024]" + result := ulimitOpt.String() + if result != expected && result != expected2 { + t.Fatalf("Expected %v or %v, got %v", expected, expected2, ulimitOpt) + } + + // And test GetList + ulimits := ulimitOpt.GetList() + if len(ulimits) != 2 { + t.Fatalf("Expected a ulimit list of 2, got %v", ulimits) + } +} diff --git a/vendor/github.com/moby/moby/pkg/README.md b/vendor/github.com/moby/moby/pkg/README.md new file mode 100644 index 000000000..c4b78a8ad --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/README.md @@ -0,0 +1,11 @@ +pkg/ is a collection of utility packages used by the Docker project without being specific to its internals. + +Utility packages are kept separate from the docker core codebase to keep it as small and concise as possible. +If some utilities grow larger and their APIs stabilize, they may be moved to their own repository under the +Docker organization, to facilitate re-use by other projects. However that is not the priority. + +The directory `pkg` is named after the same directory in the camlistore project. Since Brad is a core +Go maintainer, we thought it made sense to copy his methods for organizing Go code :) Thanks Brad! + +Because utility packages are small and neatly separated from the rest of the codebase, they are a good +place to start for aspiring maintainers and contributors. Get in touch if you want to help maintain them! diff --git a/vendor/github.com/moby/moby/pkg/aaparser/aaparser.go b/vendor/github.com/moby/moby/pkg/aaparser/aaparser.go new file mode 100644 index 000000000..5de4a4d79 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/aaparser/aaparser.go @@ -0,0 +1,89 @@ +// Package aaparser is a convenience package interacting with `apparmor_parser`. +package aaparser + +import ( + "fmt" + "os/exec" + "strconv" + "strings" +) + +const ( + binary = "apparmor_parser" +) + +// GetVersion returns the major and minor version of apparmor_parser. +func GetVersion() (int, error) { + output, err := cmd("", "--version") + if err != nil { + return -1, err + } + + return parseVersion(output) +} + +// LoadProfile runs `apparmor_parser -Kr` on a specified apparmor profile to +// replace the profile. The `-K` is necessary to make sure that apparmor_parser +// doesn't try to write to a read-only filesystem. +func LoadProfile(profilePath string) error { + _, err := cmd("", "-Kr", profilePath) + return err +} + +// cmd runs `apparmor_parser` with the passed arguments. +func cmd(dir string, arg ...string) (string, error) { + c := exec.Command(binary, arg...) + c.Dir = dir + + output, err := c.CombinedOutput() + if err != nil { + return "", fmt.Errorf("running `%s %s` failed with output: %s\nerror: %v", c.Path, strings.Join(c.Args, " "), output, err) + } + + return string(output), nil +} + +// parseVersion takes the output from `apparmor_parser --version` and returns +// a representation of the {major, minor, patch} version as a single number of +// the form MMmmPPP {major, minor, patch}. +func parseVersion(output string) (int, error) { + // output is in the form of the following: + // AppArmor parser version 2.9.1 + // Copyright (C) 1999-2008 Novell Inc. + // Copyright 2009-2012 Canonical Ltd. + + lines := strings.SplitN(output, "\n", 2) + words := strings.Split(lines[0], " ") + version := words[len(words)-1] + + // split by major minor version + v := strings.Split(version, ".") + if len(v) == 0 || len(v) > 3 { + return -1, fmt.Errorf("parsing version failed for output: `%s`", output) + } + + // Default the versions to 0. + var majorVersion, minorVersion, patchLevel int + + majorVersion, err := strconv.Atoi(v[0]) + if err != nil { + return -1, err + } + + if len(v) > 1 { + minorVersion, err = strconv.Atoi(v[1]) + if err != nil { + return -1, err + } + } + if len(v) > 2 { + patchLevel, err = strconv.Atoi(v[2]) + if err != nil { + return -1, err + } + } + + // major*10^5 + minor*10^3 + patch*10^0 + numericVersion := majorVersion*1e5 + minorVersion*1e3 + patchLevel + return numericVersion, nil +} diff --git a/vendor/github.com/moby/moby/pkg/aaparser/aaparser_test.go b/vendor/github.com/moby/moby/pkg/aaparser/aaparser_test.go new file mode 100644 index 000000000..69bc8d2fd --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/aaparser/aaparser_test.go @@ -0,0 +1,73 @@ +package aaparser + +import ( + "testing" +) + +type versionExpected struct { + output string + version int +} + +func TestParseVersion(t *testing.T) { + versions := []versionExpected{ + { + output: `AppArmor parser version 2.10 +Copyright (C) 1999-2008 Novell Inc. +Copyright 2009-2012 Canonical Ltd. + +`, + version: 210000, + }, + { + output: `AppArmor parser version 2.8 +Copyright (C) 1999-2008 Novell Inc. +Copyright 2009-2012 Canonical Ltd. + +`, + version: 208000, + }, + { + output: `AppArmor parser version 2.20 +Copyright (C) 1999-2008 Novell Inc. +Copyright 2009-2012 Canonical Ltd. + +`, + version: 220000, + }, + { + output: `AppArmor parser version 2.05 +Copyright (C) 1999-2008 Novell Inc. +Copyright 2009-2012 Canonical Ltd. + +`, + version: 205000, + }, + { + output: `AppArmor parser version 2.9.95 +Copyright (C) 1999-2008 Novell Inc. +Copyright 2009-2012 Canonical Ltd. + +`, + version: 209095, + }, + { + output: `AppArmor parser version 3.14.159 +Copyright (C) 1999-2008 Novell Inc. +Copyright 2009-2012 Canonical Ltd. + +`, + version: 314159, + }, + } + + for _, v := range versions { + version, err := parseVersion(v.output) + if err != nil { + t.Fatalf("expected error to be nil for %#v, got: %v", v, err) + } + if version != v.version { + t.Fatalf("expected version to be %d, was %d, for: %#v\n", v.version, version, v) + } + } +} diff --git a/vendor/github.com/moby/moby/pkg/archive/README.md b/vendor/github.com/moby/moby/pkg/archive/README.md new file mode 100644 index 000000000..7307d9694 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/archive/README.md @@ -0,0 +1 @@ +This code provides helper functions for dealing with archive files. diff --git a/vendor/github.com/moby/moby/pkg/archive/archive.go b/vendor/github.com/moby/moby/pkg/archive/archive.go new file mode 100644 index 000000000..6cbc2e2bd --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/archive/archive.go @@ -0,0 +1,1219 @@ +package archive + +import ( + "archive/tar" + "bufio" + "bytes" + "compress/bzip2" + "compress/gzip" + "fmt" + "io" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "runtime" + "strings" + "syscall" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/fileutils" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/pools" + "github.com/docker/docker/pkg/promise" + "github.com/docker/docker/pkg/system" +) + +type ( + // Compression is the state represents if compressed or not. + Compression int + // WhiteoutFormat is the format of whiteouts unpacked + WhiteoutFormat int + + // TarOptions wraps the tar options. + TarOptions struct { + IncludeFiles []string + ExcludePatterns []string + Compression Compression + NoLchown bool + UIDMaps []idtools.IDMap + GIDMaps []idtools.IDMap + ChownOpts *idtools.IDPair + IncludeSourceDir bool + // WhiteoutFormat is the expected on disk format for whiteout files. + // This format will be converted to the standard format on pack + // and from the standard format on unpack. + WhiteoutFormat WhiteoutFormat + // When unpacking, specifies whether overwriting a directory with a + // non-directory is allowed and vice versa. + NoOverwriteDirNonDir bool + // For each include when creating an archive, the included name will be + // replaced with the matching name from this map. + RebaseNames map[string]string + InUserNS bool + } +) + +// Archiver allows the reuse of most utility functions of this package +// with a pluggable Untar function. Also, to facilitate the passing of +// specific id mappings for untar, an archiver can be created with maps +// which will then be passed to Untar operations +type Archiver struct { + Untar func(io.Reader, string, *TarOptions) error + IDMappings *idtools.IDMappings +} + +// NewDefaultArchiver returns a new Archiver without any IDMappings +func NewDefaultArchiver() *Archiver { + return &Archiver{Untar: Untar, IDMappings: &idtools.IDMappings{}} +} + +// breakoutError is used to differentiate errors related to breaking out +// When testing archive breakout in the unit tests, this error is expected +// in order for the test to pass. +type breakoutError error + +const ( + // Uncompressed represents the uncompressed. + Uncompressed Compression = iota + // Bzip2 is bzip2 compression algorithm. + Bzip2 + // Gzip is gzip compression algorithm. + Gzip + // Xz is xz compression algorithm. + Xz +) + +const ( + // AUFSWhiteoutFormat is the default format for whiteouts + AUFSWhiteoutFormat WhiteoutFormat = iota + // OverlayWhiteoutFormat formats whiteout according to the overlay + // standard. + OverlayWhiteoutFormat +) + +const ( + modeISDIR = 040000 // Directory + modeISFIFO = 010000 // FIFO + modeISREG = 0100000 // Regular file + modeISLNK = 0120000 // Symbolic link + modeISBLK = 060000 // Block special file + modeISCHR = 020000 // Character special file + modeISSOCK = 0140000 // Socket +) + +// IsArchivePath checks if the (possibly compressed) file at the given path +// starts with a tar file header. +func IsArchivePath(path string) bool { + file, err := os.Open(path) + if err != nil { + return false + } + defer file.Close() + rdr, err := DecompressStream(file) + if err != nil { + return false + } + r := tar.NewReader(rdr) + _, err = r.Next() + return err == nil +} + +// DetectCompression detects the compression algorithm of the source. +func DetectCompression(source []byte) Compression { + for compression, m := range map[Compression][]byte{ + Bzip2: {0x42, 0x5A, 0x68}, + Gzip: {0x1F, 0x8B, 0x08}, + Xz: {0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00}, + } { + if len(source) < len(m) { + logrus.Debug("Len too short") + continue + } + if bytes.Equal(m, source[:len(m)]) { + return compression + } + } + return Uncompressed +} + +func xzDecompress(archive io.Reader) (io.ReadCloser, <-chan struct{}, error) { + args := []string{"xz", "-d", "-c", "-q"} + + return cmdStream(exec.Command(args[0], args[1:]...), archive) +} + +// DecompressStream decompresses the archive and returns a ReaderCloser with the decompressed archive. +func DecompressStream(archive io.Reader) (io.ReadCloser, error) { + p := pools.BufioReader32KPool + buf := p.Get(archive) + bs, err := buf.Peek(10) + if err != nil && err != io.EOF { + // Note: we'll ignore any io.EOF error because there are some odd + // cases where the layer.tar file will be empty (zero bytes) and + // that results in an io.EOF from the Peek() call. So, in those + // cases we'll just treat it as a non-compressed stream and + // that means just create an empty layer. + // See Issue 18170 + return nil, err + } + + compression := DetectCompression(bs) + switch compression { + case Uncompressed: + readBufWrapper := p.NewReadCloserWrapper(buf, buf) + return readBufWrapper, nil + case Gzip: + gzReader, err := gzip.NewReader(buf) + if err != nil { + return nil, err + } + readBufWrapper := p.NewReadCloserWrapper(buf, gzReader) + return readBufWrapper, nil + case Bzip2: + bz2Reader := bzip2.NewReader(buf) + readBufWrapper := p.NewReadCloserWrapper(buf, bz2Reader) + return readBufWrapper, nil + case Xz: + xzReader, chdone, err := xzDecompress(buf) + if err != nil { + return nil, err + } + readBufWrapper := p.NewReadCloserWrapper(buf, xzReader) + return ioutils.NewReadCloserWrapper(readBufWrapper, func() error { + <-chdone + return readBufWrapper.Close() + }), nil + default: + return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) + } +} + +// CompressStream compresses the dest with specified compression algorithm. +func CompressStream(dest io.Writer, compression Compression) (io.WriteCloser, error) { + p := pools.BufioWriter32KPool + buf := p.Get(dest) + switch compression { + case Uncompressed: + writeBufWrapper := p.NewWriteCloserWrapper(buf, buf) + return writeBufWrapper, nil + case Gzip: + gzWriter := gzip.NewWriter(dest) + writeBufWrapper := p.NewWriteCloserWrapper(buf, gzWriter) + return writeBufWrapper, nil + case Bzip2, Xz: + // archive/bzip2 does not support writing, and there is no xz support at all + // However, this is not a problem as docker only currently generates gzipped tars + return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) + default: + return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) + } +} + +// TarModifierFunc is a function that can be passed to ReplaceFileTarWrapper to +// modify the contents or header of an entry in the archive. If the file already +// exists in the archive the TarModifierFunc will be called with the Header and +// a reader which will return the files content. If the file does not exist both +// header and content will be nil. +type TarModifierFunc func(path string, header *tar.Header, content io.Reader) (*tar.Header, []byte, error) + +// ReplaceFileTarWrapper converts inputTarStream to a new tar stream. Files in the +// tar stream are modified if they match any of the keys in mods. +func ReplaceFileTarWrapper(inputTarStream io.ReadCloser, mods map[string]TarModifierFunc) io.ReadCloser { + pipeReader, pipeWriter := io.Pipe() + + go func() { + tarReader := tar.NewReader(inputTarStream) + tarWriter := tar.NewWriter(pipeWriter) + defer inputTarStream.Close() + defer tarWriter.Close() + + modify := func(name string, original *tar.Header, modifier TarModifierFunc, tarReader io.Reader) error { + header, data, err := modifier(name, original, tarReader) + switch { + case err != nil: + return err + case header == nil: + return nil + } + + header.Name = name + header.Size = int64(len(data)) + if err := tarWriter.WriteHeader(header); err != nil { + return err + } + if len(data) != 0 { + if _, err := tarWriter.Write(data); err != nil { + return err + } + } + return nil + } + + var err error + var originalHeader *tar.Header + for { + originalHeader, err = tarReader.Next() + if err == io.EOF { + break + } + if err != nil { + pipeWriter.CloseWithError(err) + return + } + + modifier, ok := mods[originalHeader.Name] + if !ok { + // No modifiers for this file, copy the header and data + if err := tarWriter.WriteHeader(originalHeader); err != nil { + pipeWriter.CloseWithError(err) + return + } + if _, err := pools.Copy(tarWriter, tarReader); err != nil { + pipeWriter.CloseWithError(err) + return + } + continue + } + delete(mods, originalHeader.Name) + + if err := modify(originalHeader.Name, originalHeader, modifier, tarReader); err != nil { + pipeWriter.CloseWithError(err) + return + } + } + + // Apply the modifiers that haven't matched any files in the archive + for name, modifier := range mods { + if err := modify(name, nil, modifier, nil); err != nil { + pipeWriter.CloseWithError(err) + return + } + } + + pipeWriter.Close() + + }() + return pipeReader +} + +// Extension returns the extension of a file that uses the specified compression algorithm. +func (compression *Compression) Extension() string { + switch *compression { + case Uncompressed: + return "tar" + case Bzip2: + return "tar.bz2" + case Gzip: + return "tar.gz" + case Xz: + return "tar.xz" + } + return "" +} + +// FileInfoHeader creates a populated Header from fi. +// Compared to archive pkg this function fills in more information. +// Also, regardless of Go version, this function fills file type bits (e.g. hdr.Mode |= modeISDIR), +// which have been deleted since Go 1.9 archive/tar. +func FileInfoHeader(name string, fi os.FileInfo, link string) (*tar.Header, error) { + hdr, err := tar.FileInfoHeader(fi, link) + if err != nil { + return nil, err + } + hdr.Mode = fillGo18FileTypeBits(int64(chmodTarEntry(os.FileMode(hdr.Mode))), fi) + name, err = canonicalTarName(name, fi.IsDir()) + if err != nil { + return nil, fmt.Errorf("tar: cannot canonicalize path: %v", err) + } + hdr.Name = name + if err := setHeaderForSpecialDevice(hdr, name, fi.Sys()); err != nil { + return nil, err + } + return hdr, nil +} + +// fillGo18FileTypeBits fills type bits which have been removed on Go 1.9 archive/tar +// https://github.com/golang/go/commit/66b5a2f +func fillGo18FileTypeBits(mode int64, fi os.FileInfo) int64 { + fm := fi.Mode() + switch { + case fm.IsRegular(): + mode |= modeISREG + case fi.IsDir(): + mode |= modeISDIR + case fm&os.ModeSymlink != 0: + mode |= modeISLNK + case fm&os.ModeDevice != 0: + if fm&os.ModeCharDevice != 0 { + mode |= modeISCHR + } else { + mode |= modeISBLK + } + case fm&os.ModeNamedPipe != 0: + mode |= modeISFIFO + case fm&os.ModeSocket != 0: + mode |= modeISSOCK + } + return mode +} + +// ReadSecurityXattrToTarHeader reads security.capability xattr from filesystem +// to a tar header +func ReadSecurityXattrToTarHeader(path string, hdr *tar.Header) error { + capability, _ := system.Lgetxattr(path, "security.capability") + if capability != nil { + hdr.Xattrs = make(map[string]string) + hdr.Xattrs["security.capability"] = string(capability) + } + return nil +} + +type tarWhiteoutConverter interface { + ConvertWrite(*tar.Header, string, os.FileInfo) (*tar.Header, error) + ConvertRead(*tar.Header, string) (bool, error) +} + +type tarAppender struct { + TarWriter *tar.Writer + Buffer *bufio.Writer + + // for hardlink mapping + SeenFiles map[uint64]string + IDMappings *idtools.IDMappings + + // For packing and unpacking whiteout files in the + // non standard format. The whiteout files defined + // by the AUFS standard are used as the tar whiteout + // standard. + WhiteoutConverter tarWhiteoutConverter +} + +func newTarAppender(idMapping *idtools.IDMappings, writer io.Writer) *tarAppender { + return &tarAppender{ + SeenFiles: make(map[uint64]string), + TarWriter: tar.NewWriter(writer), + Buffer: pools.BufioWriter32KPool.Get(nil), + IDMappings: idMapping, + } +} + +// canonicalTarName provides a platform-independent and consistent posix-style +//path for files and directories to be archived regardless of the platform. +func canonicalTarName(name string, isDir bool) (string, error) { + name, err := CanonicalTarNameForPath(name) + if err != nil { + return "", err + } + + // suffix with '/' for directories + if isDir && !strings.HasSuffix(name, "/") { + name += "/" + } + return name, nil +} + +// addTarFile adds to the tar archive a file from `path` as `name` +func (ta *tarAppender) addTarFile(path, name string) error { + fi, err := os.Lstat(path) + if err != nil { + return err + } + + var link string + if fi.Mode()&os.ModeSymlink != 0 { + var err error + link, err = os.Readlink(path) + if err != nil { + return err + } + } + + hdr, err := FileInfoHeader(name, fi, link) + if err != nil { + return err + } + if err := ReadSecurityXattrToTarHeader(path, hdr); err != nil { + return err + } + + // if it's not a directory and has more than 1 link, + // it's hard linked, so set the type flag accordingly + if !fi.IsDir() && hasHardlinks(fi) { + inode, err := getInodeFromStat(fi.Sys()) + if err != nil { + return err + } + // a link should have a name that it links too + // and that linked name should be first in the tar archive + if oldpath, ok := ta.SeenFiles[inode]; ok { + hdr.Typeflag = tar.TypeLink + hdr.Linkname = oldpath + hdr.Size = 0 // This Must be here for the writer math to add up! + } else { + ta.SeenFiles[inode] = name + } + } + + //handle re-mapping container ID mappings back to host ID mappings before + //writing tar headers/files. We skip whiteout files because they were written + //by the kernel and already have proper ownership relative to the host + if !strings.HasPrefix(filepath.Base(hdr.Name), WhiteoutPrefix) && !ta.IDMappings.Empty() { + fileIDPair, err := getFileUIDGID(fi.Sys()) + if err != nil { + return err + } + hdr.Uid, hdr.Gid, err = ta.IDMappings.ToContainer(fileIDPair) + if err != nil { + return err + } + } + + if ta.WhiteoutConverter != nil { + wo, err := ta.WhiteoutConverter.ConvertWrite(hdr, path, fi) + if err != nil { + return err + } + + // If a new whiteout file exists, write original hdr, then + // replace hdr with wo to be written after. Whiteouts should + // always be written after the original. Note the original + // hdr may have been updated to be a whiteout with returning + // a whiteout header + if wo != nil { + if err := ta.TarWriter.WriteHeader(hdr); err != nil { + return err + } + if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 { + return fmt.Errorf("tar: cannot use whiteout for non-empty file") + } + hdr = wo + } + } + + if err := ta.TarWriter.WriteHeader(hdr); err != nil { + return err + } + + if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 { + // We use system.OpenSequential to ensure we use sequential file + // access on Windows to avoid depleting the standby list. + // On Linux, this equates to a regular os.Open. + file, err := system.OpenSequential(path) + if err != nil { + return err + } + + ta.Buffer.Reset(ta.TarWriter) + defer ta.Buffer.Reset(nil) + _, err = io.Copy(ta.Buffer, file) + file.Close() + if err != nil { + return err + } + err = ta.Buffer.Flush() + if err != nil { + return err + } + } + + return nil +} + +func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, Lchown bool, chownOpts *idtools.IDPair, inUserns bool) error { + // hdr.Mode is in linux format, which we can use for sycalls, + // but for os.Foo() calls we need the mode converted to os.FileMode, + // so use hdrInfo.Mode() (they differ for e.g. setuid bits) + hdrInfo := hdr.FileInfo() + + switch hdr.Typeflag { + case tar.TypeDir: + // Create directory unless it exists as a directory already. + // In that case we just want to merge the two + if fi, err := os.Lstat(path); !(err == nil && fi.IsDir()) { + if err := os.Mkdir(path, hdrInfo.Mode()); err != nil { + return err + } + } + + case tar.TypeReg, tar.TypeRegA: + // Source is regular file. We use system.OpenFileSequential to use sequential + // file access to avoid depleting the standby list on Windows. + // On Linux, this equates to a regular os.OpenFile + file, err := system.OpenFileSequential(path, os.O_CREATE|os.O_WRONLY, hdrInfo.Mode()) + if err != nil { + return err + } + if _, err := io.Copy(file, reader); err != nil { + file.Close() + return err + } + file.Close() + + case tar.TypeBlock, tar.TypeChar: + if inUserns { // cannot create devices in a userns + return nil + } + // Handle this is an OS-specific way + if err := handleTarTypeBlockCharFifo(hdr, path); err != nil { + return err + } + + case tar.TypeFifo: + // Handle this is an OS-specific way + if err := handleTarTypeBlockCharFifo(hdr, path); err != nil { + return err + } + + case tar.TypeLink: + targetPath := filepath.Join(extractDir, hdr.Linkname) + // check for hardlink breakout + if !strings.HasPrefix(targetPath, extractDir) { + return breakoutError(fmt.Errorf("invalid hardlink %q -> %q", targetPath, hdr.Linkname)) + } + if err := os.Link(targetPath, path); err != nil { + return err + } + + case tar.TypeSymlink: + // path -> hdr.Linkname = targetPath + // e.g. /extractDir/path/to/symlink -> ../2/file = /extractDir/path/2/file + targetPath := filepath.Join(filepath.Dir(path), hdr.Linkname) + + // the reason we don't need to check symlinks in the path (with FollowSymlinkInScope) is because + // that symlink would first have to be created, which would be caught earlier, at this very check: + if !strings.HasPrefix(targetPath, extractDir) { + return breakoutError(fmt.Errorf("invalid symlink %q -> %q", path, hdr.Linkname)) + } + if err := os.Symlink(hdr.Linkname, path); err != nil { + return err + } + + case tar.TypeXGlobalHeader: + logrus.Debug("PAX Global Extended Headers found and ignored") + return nil + + default: + return fmt.Errorf("Unhandled tar header type %d\n", hdr.Typeflag) + } + + // Lchown is not supported on Windows. + if Lchown && runtime.GOOS != "windows" { + if chownOpts == nil { + chownOpts = &idtools.IDPair{UID: hdr.Uid, GID: hdr.Gid} + } + if err := os.Lchown(path, chownOpts.UID, chownOpts.GID); err != nil { + return err + } + } + + var errors []string + for key, value := range hdr.Xattrs { + if err := system.Lsetxattr(path, key, []byte(value), 0); err != nil { + if err == syscall.ENOTSUP { + // We ignore errors here because not all graphdrivers support + // xattrs *cough* old versions of AUFS *cough*. However only + // ENOTSUP should be emitted in that case, otherwise we still + // bail. + errors = append(errors, err.Error()) + continue + } + return err + } + + } + + if len(errors) > 0 { + logrus.WithFields(logrus.Fields{ + "errors": errors, + }).Warn("ignored xattrs in archive: underlying filesystem doesn't support them") + } + + // There is no LChmod, so ignore mode for symlink. Also, this + // must happen after chown, as that can modify the file mode + if err := handleLChmod(hdr, path, hdrInfo); err != nil { + return err + } + + aTime := hdr.AccessTime + if aTime.Before(hdr.ModTime) { + // Last access time should never be before last modified time. + aTime = hdr.ModTime + } + + // system.Chtimes doesn't support a NOFOLLOW flag atm + if hdr.Typeflag == tar.TypeLink { + if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) { + if err := system.Chtimes(path, aTime, hdr.ModTime); err != nil { + return err + } + } + } else if hdr.Typeflag != tar.TypeSymlink { + if err := system.Chtimes(path, aTime, hdr.ModTime); err != nil { + return err + } + } else { + ts := []syscall.Timespec{timeToTimespec(aTime), timeToTimespec(hdr.ModTime)} + if err := system.LUtimesNano(path, ts); err != nil && err != system.ErrNotSupportedPlatform { + return err + } + } + return nil +} + +// Tar creates an archive from the directory at `path`, and returns it as a +// stream of bytes. +func Tar(path string, compression Compression) (io.ReadCloser, error) { + return TarWithOptions(path, &TarOptions{Compression: compression}) +} + +// TarWithOptions creates an archive from the directory at `path`, only including files whose relative +// paths are included in `options.IncludeFiles` (if non-nil) or not in `options.ExcludePatterns`. +func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) { + + // Fix the source path to work with long path names. This is a no-op + // on platforms other than Windows. + srcPath = fixVolumePathPrefix(srcPath) + + pm, err := fileutils.NewPatternMatcher(options.ExcludePatterns) + if err != nil { + return nil, err + } + + pipeReader, pipeWriter := io.Pipe() + + compressWriter, err := CompressStream(pipeWriter, options.Compression) + if err != nil { + return nil, err + } + + go func() { + ta := newTarAppender( + idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps), + compressWriter, + ) + ta.WhiteoutConverter = getWhiteoutConverter(options.WhiteoutFormat) + + defer func() { + // Make sure to check the error on Close. + if err := ta.TarWriter.Close(); err != nil { + logrus.Errorf("Can't close tar writer: %s", err) + } + if err := compressWriter.Close(); err != nil { + logrus.Errorf("Can't close compress writer: %s", err) + } + if err := pipeWriter.Close(); err != nil { + logrus.Errorf("Can't close pipe writer: %s", err) + } + }() + + // this buffer is needed for the duration of this piped stream + defer pools.BufioWriter32KPool.Put(ta.Buffer) + + // In general we log errors here but ignore them because + // during e.g. a diff operation the container can continue + // mutating the filesystem and we can see transient errors + // from this + + stat, err := os.Lstat(srcPath) + if err != nil { + return + } + + if !stat.IsDir() { + // We can't later join a non-dir with any includes because the + // 'walk' will error if "file/." is stat-ed and "file" is not a + // directory. So, we must split the source path and use the + // basename as the include. + if len(options.IncludeFiles) > 0 { + logrus.Warn("Tar: Can't archive a file with includes") + } + + dir, base := SplitPathDirEntry(srcPath) + srcPath = dir + options.IncludeFiles = []string{base} + } + + if len(options.IncludeFiles) == 0 { + options.IncludeFiles = []string{"."} + } + + seen := make(map[string]bool) + + for _, include := range options.IncludeFiles { + rebaseName := options.RebaseNames[include] + + walkRoot := getWalkRoot(srcPath, include) + filepath.Walk(walkRoot, func(filePath string, f os.FileInfo, err error) error { + if err != nil { + logrus.Errorf("Tar: Can't stat file %s to tar: %s", srcPath, err) + return nil + } + + relFilePath, err := filepath.Rel(srcPath, filePath) + if err != nil || (!options.IncludeSourceDir && relFilePath == "." && f.IsDir()) { + // Error getting relative path OR we are looking + // at the source directory path. Skip in both situations. + return nil + } + + if options.IncludeSourceDir && include == "." && relFilePath != "." { + relFilePath = strings.Join([]string{".", relFilePath}, string(filepath.Separator)) + } + + skip := false + + // If "include" is an exact match for the current file + // then even if there's an "excludePatterns" pattern that + // matches it, don't skip it. IOW, assume an explicit 'include' + // is asking for that file no matter what - which is true + // for some files, like .dockerignore and Dockerfile (sometimes) + if include != relFilePath { + skip, err = pm.Matches(relFilePath) + if err != nil { + logrus.Errorf("Error matching %s: %v", relFilePath, err) + return err + } + } + + if skip { + // If we want to skip this file and its a directory + // then we should first check to see if there's an + // excludes pattern (e.g. !dir/file) that starts with this + // dir. If so then we can't skip this dir. + + // Its not a dir then so we can just return/skip. + if !f.IsDir() { + return nil + } + + // No exceptions (!...) in patterns so just skip dir + if !pm.Exclusions() { + return filepath.SkipDir + } + + dirSlash := relFilePath + string(filepath.Separator) + + for _, pat := range pm.Patterns() { + if !pat.Exclusion() { + continue + } + if strings.HasPrefix(pat.String()+string(filepath.Separator), dirSlash) { + // found a match - so can't skip this dir + return nil + } + } + + // No matching exclusion dir so just skip dir + return filepath.SkipDir + } + + if seen[relFilePath] { + return nil + } + seen[relFilePath] = true + + // Rename the base resource. + if rebaseName != "" { + var replacement string + if rebaseName != string(filepath.Separator) { + // Special case the root directory to replace with an + // empty string instead so that we don't end up with + // double slashes in the paths. + replacement = rebaseName + } + + relFilePath = strings.Replace(relFilePath, include, replacement, 1) + } + + if err := ta.addTarFile(filePath, relFilePath); err != nil { + logrus.Errorf("Can't add file %s to tar: %s", filePath, err) + // if pipe is broken, stop writing tar stream to it + if err == io.ErrClosedPipe { + return err + } + } + return nil + }) + } + }() + + return pipeReader, nil +} + +// Unpack unpacks the decompressedArchive to dest with options. +func Unpack(decompressedArchive io.Reader, dest string, options *TarOptions) error { + tr := tar.NewReader(decompressedArchive) + trBuf := pools.BufioReader32KPool.Get(nil) + defer pools.BufioReader32KPool.Put(trBuf) + + var dirs []*tar.Header + idMappings := idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps) + rootIDs := idMappings.RootPair() + whiteoutConverter := getWhiteoutConverter(options.WhiteoutFormat) + + // Iterate through the files in the archive. +loop: + for { + hdr, err := tr.Next() + if err == io.EOF { + // end of tar archive + break + } + if err != nil { + return err + } + + // Normalize name, for safety and for a simple is-root check + // This keeps "../" as-is, but normalizes "/../" to "/". Or Windows: + // This keeps "..\" as-is, but normalizes "\..\" to "\". + hdr.Name = filepath.Clean(hdr.Name) + + for _, exclude := range options.ExcludePatterns { + if strings.HasPrefix(hdr.Name, exclude) { + continue loop + } + } + + // After calling filepath.Clean(hdr.Name) above, hdr.Name will now be in + // the filepath format for the OS on which the daemon is running. Hence + // the check for a slash-suffix MUST be done in an OS-agnostic way. + if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) { + // Not the root directory, ensure that the parent directory exists + parent := filepath.Dir(hdr.Name) + parentPath := filepath.Join(dest, parent) + if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) { + err = idtools.MkdirAllAndChownNew(parentPath, 0777, rootIDs) + if err != nil { + return err + } + } + } + + path := filepath.Join(dest, hdr.Name) + rel, err := filepath.Rel(dest, path) + if err != nil { + return err + } + if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) { + return breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest)) + } + + // If path exits we almost always just want to remove and replace it + // The only exception is when it is a directory *and* the file from + // the layer is also a directory. Then we want to merge them (i.e. + // just apply the metadata from the layer). + if fi, err := os.Lstat(path); err == nil { + if options.NoOverwriteDirNonDir && fi.IsDir() && hdr.Typeflag != tar.TypeDir { + // If NoOverwriteDirNonDir is true then we cannot replace + // an existing directory with a non-directory from the archive. + return fmt.Errorf("cannot overwrite directory %q with non-directory %q", path, dest) + } + + if options.NoOverwriteDirNonDir && !fi.IsDir() && hdr.Typeflag == tar.TypeDir { + // If NoOverwriteDirNonDir is true then we cannot replace + // an existing non-directory with a directory from the archive. + return fmt.Errorf("cannot overwrite non-directory %q with directory %q", path, dest) + } + + if fi.IsDir() && hdr.Name == "." { + continue + } + + if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) { + if err := os.RemoveAll(path); err != nil { + return err + } + } + } + trBuf.Reset(tr) + + if err := remapIDs(idMappings, hdr); err != nil { + return err + } + + if whiteoutConverter != nil { + writeFile, err := whiteoutConverter.ConvertRead(hdr, path) + if err != nil { + return err + } + if !writeFile { + continue + } + } + + if err := createTarFile(path, dest, hdr, trBuf, !options.NoLchown, options.ChownOpts, options.InUserNS); err != nil { + return err + } + + // Directory mtimes must be handled at the end to avoid further + // file creation in them to modify the directory mtime + if hdr.Typeflag == tar.TypeDir { + dirs = append(dirs, hdr) + } + } + + for _, hdr := range dirs { + path := filepath.Join(dest, hdr.Name) + + if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil { + return err + } + } + return nil +} + +// Untar reads a stream of bytes from `archive`, parses it as a tar archive, +// and unpacks it into the directory at `dest`. +// The archive may be compressed with one of the following algorithms: +// identity (uncompressed), gzip, bzip2, xz. +// FIXME: specify behavior when target path exists vs. doesn't exist. +func Untar(tarArchive io.Reader, dest string, options *TarOptions) error { + return untarHandler(tarArchive, dest, options, true) +} + +// UntarUncompressed reads a stream of bytes from `archive`, parses it as a tar archive, +// and unpacks it into the directory at `dest`. +// The archive must be an uncompressed stream. +func UntarUncompressed(tarArchive io.Reader, dest string, options *TarOptions) error { + return untarHandler(tarArchive, dest, options, false) +} + +// Handler for teasing out the automatic decompression +func untarHandler(tarArchive io.Reader, dest string, options *TarOptions, decompress bool) error { + if tarArchive == nil { + return fmt.Errorf("Empty archive") + } + dest = filepath.Clean(dest) + if options == nil { + options = &TarOptions{} + } + if options.ExcludePatterns == nil { + options.ExcludePatterns = []string{} + } + + r := tarArchive + if decompress { + decompressedArchive, err := DecompressStream(tarArchive) + if err != nil { + return err + } + defer decompressedArchive.Close() + r = decompressedArchive + } + + return Unpack(r, dest, options) +} + +// TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other. +// If either Tar or Untar fails, TarUntar aborts and returns the error. +func (archiver *Archiver) TarUntar(src, dst string) error { + logrus.Debugf("TarUntar(%s %s)", src, dst) + archive, err := TarWithOptions(src, &TarOptions{Compression: Uncompressed}) + if err != nil { + return err + } + defer archive.Close() + options := &TarOptions{ + UIDMaps: archiver.IDMappings.UIDs(), + GIDMaps: archiver.IDMappings.GIDs(), + } + return archiver.Untar(archive, dst, options) +} + +// UntarPath untar a file from path to a destination, src is the source tar file path. +func (archiver *Archiver) UntarPath(src, dst string) error { + archive, err := os.Open(src) + if err != nil { + return err + } + defer archive.Close() + options := &TarOptions{ + UIDMaps: archiver.IDMappings.UIDs(), + GIDMaps: archiver.IDMappings.GIDs(), + } + return archiver.Untar(archive, dst, options) +} + +// CopyWithTar creates a tar archive of filesystem path `src`, and +// unpacks it at filesystem path `dst`. +// The archive is streamed directly with fixed buffering and no +// intermediary disk IO. +func (archiver *Archiver) CopyWithTar(src, dst string) error { + srcSt, err := os.Stat(src) + if err != nil { + return err + } + if !srcSt.IsDir() { + return archiver.CopyFileWithTar(src, dst) + } + + // if this archiver is set up with ID mapping we need to create + // the new destination directory with the remapped root UID/GID pair + // as owner + rootIDs := archiver.IDMappings.RootPair() + // Create dst, copy src's content into it + logrus.Debugf("Creating dest directory: %s", dst) + if err := idtools.MkdirAllAndChownNew(dst, 0755, rootIDs); err != nil { + return err + } + logrus.Debugf("Calling TarUntar(%s, %s)", src, dst) + return archiver.TarUntar(src, dst) +} + +// CopyFileWithTar emulates the behavior of the 'cp' command-line +// for a single file. It copies a regular file from path `src` to +// path `dst`, and preserves all its metadata. +func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) { + logrus.Debugf("CopyFileWithTar(%s, %s)", src, dst) + srcSt, err := os.Stat(src) + if err != nil { + return err + } + + if srcSt.IsDir() { + return fmt.Errorf("Can't copy a directory") + } + + // Clean up the trailing slash. This must be done in an operating + // system specific manner. + if dst[len(dst)-1] == os.PathSeparator { + dst = filepath.Join(dst, filepath.Base(src)) + } + // Create the holding directory if necessary + if err := system.MkdirAll(filepath.Dir(dst), 0700, ""); err != nil { + return err + } + + r, w := io.Pipe() + errC := promise.Go(func() error { + defer w.Close() + + srcF, err := os.Open(src) + if err != nil { + return err + } + defer srcF.Close() + + hdr, err := tar.FileInfoHeader(srcSt, "") + if err != nil { + return err + } + hdr.Name = filepath.Base(dst) + hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode))) + + if err := remapIDs(archiver.IDMappings, hdr); err != nil { + return err + } + + tw := tar.NewWriter(w) + defer tw.Close() + if err := tw.WriteHeader(hdr); err != nil { + return err + } + if _, err := io.Copy(tw, srcF); err != nil { + return err + } + return nil + }) + defer func() { + if er := <-errC; err == nil && er != nil { + err = er + } + }() + + err = archiver.Untar(r, filepath.Dir(dst), nil) + if err != nil { + r.CloseWithError(err) + } + return err +} + +func remapIDs(idMappings *idtools.IDMappings, hdr *tar.Header) error { + ids, err := idMappings.ToHost(idtools.IDPair{UID: hdr.Uid, GID: hdr.Gid}) + hdr.Uid, hdr.Gid = ids.UID, ids.GID + return err +} + +// cmdStream executes a command, and returns its stdout as a stream. +// If the command fails to run or doesn't complete successfully, an error +// will be returned, including anything written on stderr. +func cmdStream(cmd *exec.Cmd, input io.Reader) (io.ReadCloser, <-chan struct{}, error) { + chdone := make(chan struct{}) + cmd.Stdin = input + pipeR, pipeW := io.Pipe() + cmd.Stdout = pipeW + var errBuf bytes.Buffer + cmd.Stderr = &errBuf + + // Run the command and return the pipe + if err := cmd.Start(); err != nil { + return nil, nil, err + } + + // Copy stdout to the returned pipe + go func() { + if err := cmd.Wait(); err != nil { + pipeW.CloseWithError(fmt.Errorf("%s: %s", err, errBuf.String())) + } else { + pipeW.Close() + } + close(chdone) + }() + + return pipeR, chdone, nil +} + +// NewTempArchive reads the content of src into a temporary file, and returns the contents +// of that file as an archive. The archive can only be read once - as soon as reading completes, +// the file will be deleted. +func NewTempArchive(src io.Reader, dir string) (*TempArchive, error) { + f, err := ioutil.TempFile(dir, "") + if err != nil { + return nil, err + } + if _, err := io.Copy(f, src); err != nil { + return nil, err + } + if _, err := f.Seek(0, 0); err != nil { + return nil, err + } + st, err := f.Stat() + if err != nil { + return nil, err + } + size := st.Size() + return &TempArchive{File: f, Size: size}, nil +} + +// TempArchive is a temporary archive. The archive can only be read once - as soon as reading completes, +// the file will be deleted. +type TempArchive struct { + *os.File + Size int64 // Pre-computed from Stat().Size() as a convenience + read int64 + closed bool +} + +// Close closes the underlying file if it's still open, or does a no-op +// to allow callers to try to close the TempArchive multiple times safely. +func (archive *TempArchive) Close() error { + if archive.closed { + return nil + } + + archive.closed = true + + return archive.File.Close() +} + +func (archive *TempArchive) Read(data []byte) (int, error) { + n, err := archive.File.Read(data) + archive.read += int64(n) + if err != nil || archive.read == archive.Size { + archive.Close() + os.Remove(archive.File.Name()) + } + return n, err +} diff --git a/vendor/github.com/moby/moby/pkg/archive/archive_linux.go b/vendor/github.com/moby/moby/pkg/archive/archive_linux.go new file mode 100644 index 000000000..6e950e93c --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/archive/archive_linux.go @@ -0,0 +1,92 @@ +package archive + +import ( + "archive/tar" + "os" + "path/filepath" + "strings" + + "github.com/docker/docker/pkg/system" + "golang.org/x/sys/unix" +) + +func getWhiteoutConverter(format WhiteoutFormat) tarWhiteoutConverter { + if format == OverlayWhiteoutFormat { + return overlayWhiteoutConverter{} + } + return nil +} + +type overlayWhiteoutConverter struct{} + +func (overlayWhiteoutConverter) ConvertWrite(hdr *tar.Header, path string, fi os.FileInfo) (wo *tar.Header, err error) { + // convert whiteouts to AUFS format + if fi.Mode()&os.ModeCharDevice != 0 && hdr.Devmajor == 0 && hdr.Devminor == 0 { + // we just rename the file and make it normal + dir, filename := filepath.Split(hdr.Name) + hdr.Name = filepath.Join(dir, WhiteoutPrefix+filename) + hdr.Mode = 0600 + hdr.Typeflag = tar.TypeReg + hdr.Size = 0 + } + + if fi.Mode()&os.ModeDir != 0 { + // convert opaque dirs to AUFS format by writing an empty file with the prefix + opaque, err := system.Lgetxattr(path, "trusted.overlay.opaque") + if err != nil { + return nil, err + } + if len(opaque) == 1 && opaque[0] == 'y' { + if hdr.Xattrs != nil { + delete(hdr.Xattrs, "trusted.overlay.opaque") + } + + // create a header for the whiteout file + // it should inherit some properties from the parent, but be a regular file + wo = &tar.Header{ + Typeflag: tar.TypeReg, + Mode: hdr.Mode & int64(os.ModePerm), + Name: filepath.Join(hdr.Name, WhiteoutOpaqueDir), + Size: 0, + Uid: hdr.Uid, + Uname: hdr.Uname, + Gid: hdr.Gid, + Gname: hdr.Gname, + AccessTime: hdr.AccessTime, + ChangeTime: hdr.ChangeTime, + } + } + } + + return +} + +func (overlayWhiteoutConverter) ConvertRead(hdr *tar.Header, path string) (bool, error) { + base := filepath.Base(path) + dir := filepath.Dir(path) + + // if a directory is marked as opaque by the AUFS special file, we need to translate that to overlay + if base == WhiteoutOpaqueDir { + err := unix.Setxattr(dir, "trusted.overlay.opaque", []byte{'y'}, 0) + // don't write the file itself + return false, err + } + + // if a file was deleted and we are using overlay, we need to create a character device + if strings.HasPrefix(base, WhiteoutPrefix) { + originalBase := base[len(WhiteoutPrefix):] + originalPath := filepath.Join(dir, originalBase) + + if err := unix.Mknod(originalPath, unix.S_IFCHR, 0); err != nil { + return false, err + } + if err := os.Chown(originalPath, hdr.Uid, hdr.Gid); err != nil { + return false, err + } + + // don't write the file itself + return false, nil + } + + return true, nil +} diff --git a/vendor/github.com/moby/moby/pkg/archive/archive_linux_test.go b/vendor/github.com/moby/moby/pkg/archive/archive_linux_test.go new file mode 100644 index 000000000..f219b3e67 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/archive/archive_linux_test.go @@ -0,0 +1,188 @@ +package archive + +import ( + "io/ioutil" + "os" + "path/filepath" + "syscall" + "testing" + + "github.com/docker/docker/pkg/system" + "golang.org/x/sys/unix" +) + +// setupOverlayTestDir creates files in a directory with overlay whiteouts +// Tree layout +// . +// ├── d1 # opaque, 0700 +// │   └── f1 # empty file, 0600 +// ├── d2 # opaque, 0750 +// │   └── f1 # empty file, 0660 +// └── d3 # 0700 +// └── f1 # whiteout, 0644 +func setupOverlayTestDir(t *testing.T, src string) { + // Create opaque directory containing single file and permission 0700 + if err := os.Mkdir(filepath.Join(src, "d1"), 0700); err != nil { + t.Fatal(err) + } + + if err := system.Lsetxattr(filepath.Join(src, "d1"), "trusted.overlay.opaque", []byte("y"), 0); err != nil { + t.Fatal(err) + } + + if err := ioutil.WriteFile(filepath.Join(src, "d1", "f1"), []byte{}, 0600); err != nil { + t.Fatal(err) + } + + // Create another opaque directory containing single file but with permission 0750 + if err := os.Mkdir(filepath.Join(src, "d2"), 0750); err != nil { + t.Fatal(err) + } + + if err := system.Lsetxattr(filepath.Join(src, "d2"), "trusted.overlay.opaque", []byte("y"), 0); err != nil { + t.Fatal(err) + } + + if err := ioutil.WriteFile(filepath.Join(src, "d2", "f1"), []byte{}, 0660); err != nil { + t.Fatal(err) + } + + // Create regular directory with deleted file + if err := os.Mkdir(filepath.Join(src, "d3"), 0700); err != nil { + t.Fatal(err) + } + + if err := system.Mknod(filepath.Join(src, "d3", "f1"), unix.S_IFCHR, 0); err != nil { + t.Fatal(err) + } +} + +func checkOpaqueness(t *testing.T, path string, opaque string) { + xattrOpaque, err := system.Lgetxattr(path, "trusted.overlay.opaque") + if err != nil { + t.Fatal(err) + } + if string(xattrOpaque) != opaque { + t.Fatalf("Unexpected opaque value: %q, expected %q", string(xattrOpaque), opaque) + } + +} + +func checkOverlayWhiteout(t *testing.T, path string) { + stat, err := os.Stat(path) + if err != nil { + t.Fatal(err) + } + statT, ok := stat.Sys().(*syscall.Stat_t) + if !ok { + t.Fatalf("Unexpected type: %t, expected *syscall.Stat_t", stat.Sys()) + } + if statT.Rdev != 0 { + t.Fatalf("Non-zero device number for whiteout") + } +} + +func checkFileMode(t *testing.T, path string, perm os.FileMode) { + stat, err := os.Stat(path) + if err != nil { + t.Fatal(err) + } + if stat.Mode() != perm { + t.Fatalf("Unexpected file mode for %s: %o, expected %o", path, stat.Mode(), perm) + } +} + +func TestOverlayTarUntar(t *testing.T) { + oldmask, err := system.Umask(0) + if err != nil { + t.Fatal(err) + } + defer system.Umask(oldmask) + + src, err := ioutil.TempDir("", "docker-test-overlay-tar-src") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(src) + + setupOverlayTestDir(t, src) + + dst, err := ioutil.TempDir("", "docker-test-overlay-tar-dst") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(dst) + + options := &TarOptions{ + Compression: Uncompressed, + WhiteoutFormat: OverlayWhiteoutFormat, + } + archive, err := TarWithOptions(src, options) + if err != nil { + t.Fatal(err) + } + defer archive.Close() + + if err := Untar(archive, dst, options); err != nil { + t.Fatal(err) + } + + checkFileMode(t, filepath.Join(dst, "d1"), 0700|os.ModeDir) + checkFileMode(t, filepath.Join(dst, "d2"), 0750|os.ModeDir) + checkFileMode(t, filepath.Join(dst, "d3"), 0700|os.ModeDir) + checkFileMode(t, filepath.Join(dst, "d1", "f1"), 0600) + checkFileMode(t, filepath.Join(dst, "d2", "f1"), 0660) + checkFileMode(t, filepath.Join(dst, "d3", "f1"), os.ModeCharDevice|os.ModeDevice) + + checkOpaqueness(t, filepath.Join(dst, "d1"), "y") + checkOpaqueness(t, filepath.Join(dst, "d2"), "y") + checkOpaqueness(t, filepath.Join(dst, "d3"), "") + checkOverlayWhiteout(t, filepath.Join(dst, "d3", "f1")) +} + +func TestOverlayTarAUFSUntar(t *testing.T) { + oldmask, err := system.Umask(0) + if err != nil { + t.Fatal(err) + } + defer system.Umask(oldmask) + + src, err := ioutil.TempDir("", "docker-test-overlay-tar-src") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(src) + + setupOverlayTestDir(t, src) + + dst, err := ioutil.TempDir("", "docker-test-overlay-tar-dst") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(dst) + + archive, err := TarWithOptions(src, &TarOptions{ + Compression: Uncompressed, + WhiteoutFormat: OverlayWhiteoutFormat, + }) + if err != nil { + t.Fatal(err) + } + defer archive.Close() + + if err := Untar(archive, dst, &TarOptions{ + Compression: Uncompressed, + WhiteoutFormat: AUFSWhiteoutFormat, + }); err != nil { + t.Fatal(err) + } + + checkFileMode(t, filepath.Join(dst, "d1"), 0700|os.ModeDir) + checkFileMode(t, filepath.Join(dst, "d1", WhiteoutOpaqueDir), 0700) + checkFileMode(t, filepath.Join(dst, "d2"), 0750|os.ModeDir) + checkFileMode(t, filepath.Join(dst, "d2", WhiteoutOpaqueDir), 0750) + checkFileMode(t, filepath.Join(dst, "d3"), 0700|os.ModeDir) + checkFileMode(t, filepath.Join(dst, "d1", "f1"), 0600) + checkFileMode(t, filepath.Join(dst, "d2", "f1"), 0660) + checkFileMode(t, filepath.Join(dst, "d3", WhiteoutPrefix+"f1"), 0600) +} diff --git a/vendor/github.com/moby/moby/pkg/archive/archive_other.go b/vendor/github.com/moby/moby/pkg/archive/archive_other.go new file mode 100644 index 000000000..54acbf285 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/archive/archive_other.go @@ -0,0 +1,7 @@ +// +build !linux + +package archive + +func getWhiteoutConverter(format WhiteoutFormat) tarWhiteoutConverter { + return nil +} diff --git a/vendor/github.com/moby/moby/pkg/archive/archive_test.go b/vendor/github.com/moby/moby/pkg/archive/archive_test.go new file mode 100644 index 000000000..1371b8ab1 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/archive/archive_test.go @@ -0,0 +1,1279 @@ +package archive + +import ( + "archive/tar" + "bytes" + "fmt" + "io" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "runtime" + "strings" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +var tmp string + +func init() { + tmp = "/tmp/" + if runtime.GOOS == "windows" { + tmp = os.Getenv("TEMP") + `\` + } +} + +var defaultArchiver = NewDefaultArchiver() + +func defaultTarUntar(src, dst string) error { + return defaultArchiver.TarUntar(src, dst) +} + +func defaultUntarPath(src, dst string) error { + return defaultArchiver.UntarPath(src, dst) +} + +func defaultCopyFileWithTar(src, dst string) (err error) { + return defaultArchiver.CopyFileWithTar(src, dst) +} + +func defaultCopyWithTar(src, dst string) error { + return defaultArchiver.CopyWithTar(src, dst) +} + +func TestIsArchivePathDir(t *testing.T) { + cmd := exec.Command("sh", "-c", "mkdir -p /tmp/archivedir") + output, err := cmd.CombinedOutput() + if err != nil { + t.Fatalf("Fail to create an archive file for test : %s.", output) + } + if IsArchivePath(tmp + "archivedir") { + t.Fatalf("Incorrectly recognised directory as an archive") + } +} + +func TestIsArchivePathInvalidFile(t *testing.T) { + cmd := exec.Command("sh", "-c", "dd if=/dev/zero bs=1024 count=1 of=/tmp/archive && gzip --stdout /tmp/archive > /tmp/archive.gz") + output, err := cmd.CombinedOutput() + if err != nil { + t.Fatalf("Fail to create an archive file for test : %s.", output) + } + if IsArchivePath(tmp + "archive") { + t.Fatalf("Incorrectly recognised invalid tar path as archive") + } + if IsArchivePath(tmp + "archive.gz") { + t.Fatalf("Incorrectly recognised invalid compressed tar path as archive") + } +} + +func TestIsArchivePathTar(t *testing.T) { + var whichTar string + if runtime.GOOS == "solaris" { + whichTar = "gtar" + } else { + whichTar = "tar" + } + cmdStr := fmt.Sprintf("touch /tmp/archivedata && %s -cf /tmp/archive /tmp/archivedata && gzip --stdout /tmp/archive > /tmp/archive.gz", whichTar) + cmd := exec.Command("sh", "-c", cmdStr) + output, err := cmd.CombinedOutput() + if err != nil { + t.Fatalf("Fail to create an archive file for test : %s.", output) + } + if !IsArchivePath(tmp + "/archive") { + t.Fatalf("Did not recognise valid tar path as archive") + } + if !IsArchivePath(tmp + "archive.gz") { + t.Fatalf("Did not recognise valid compressed tar path as archive") + } +} + +func testDecompressStream(t *testing.T, ext, compressCommand string) { + cmd := exec.Command("sh", "-c", + fmt.Sprintf("touch /tmp/archive && %s /tmp/archive", compressCommand)) + output, err := cmd.CombinedOutput() + if err != nil { + t.Fatalf("Failed to create an archive file for test : %s.", output) + } + filename := "archive." + ext + archive, err := os.Open(tmp + filename) + if err != nil { + t.Fatalf("Failed to open file %s: %v", filename, err) + } + defer archive.Close() + + r, err := DecompressStream(archive) + if err != nil { + t.Fatalf("Failed to decompress %s: %v", filename, err) + } + if _, err = ioutil.ReadAll(r); err != nil { + t.Fatalf("Failed to read the decompressed stream: %v ", err) + } + if err = r.Close(); err != nil { + t.Fatalf("Failed to close the decompressed stream: %v ", err) + } +} + +func TestDecompressStreamGzip(t *testing.T) { + testDecompressStream(t, "gz", "gzip -f") +} + +func TestDecompressStreamBzip2(t *testing.T) { + testDecompressStream(t, "bz2", "bzip2 -f") +} + +func TestDecompressStreamXz(t *testing.T) { + if runtime.GOOS == "windows" { + t.Skip("Xz not present in msys2") + } + testDecompressStream(t, "xz", "xz -f") +} + +func TestCompressStreamXzUnsupported(t *testing.T) { + dest, err := os.Create(tmp + "dest") + if err != nil { + t.Fatalf("Fail to create the destination file") + } + defer dest.Close() + + _, err = CompressStream(dest, Xz) + if err == nil { + t.Fatalf("Should fail as xz is unsupported for compression format.") + } +} + +func TestCompressStreamBzip2Unsupported(t *testing.T) { + dest, err := os.Create(tmp + "dest") + if err != nil { + t.Fatalf("Fail to create the destination file") + } + defer dest.Close() + + _, err = CompressStream(dest, Xz) + if err == nil { + t.Fatalf("Should fail as xz is unsupported for compression format.") + } +} + +func TestCompressStreamInvalid(t *testing.T) { + dest, err := os.Create(tmp + "dest") + if err != nil { + t.Fatalf("Fail to create the destination file") + } + defer dest.Close() + + _, err = CompressStream(dest, -1) + if err == nil { + t.Fatalf("Should fail as xz is unsupported for compression format.") + } +} + +func TestExtensionInvalid(t *testing.T) { + compression := Compression(-1) + output := compression.Extension() + if output != "" { + t.Fatalf("The extension of an invalid compression should be an empty string.") + } +} + +func TestExtensionUncompressed(t *testing.T) { + compression := Uncompressed + output := compression.Extension() + if output != "tar" { + t.Fatalf("The extension of an uncompressed archive should be 'tar'.") + } +} +func TestExtensionBzip2(t *testing.T) { + compression := Bzip2 + output := compression.Extension() + if output != "tar.bz2" { + t.Fatalf("The extension of a bzip2 archive should be 'tar.bz2'") + } +} +func TestExtensionGzip(t *testing.T) { + compression := Gzip + output := compression.Extension() + if output != "tar.gz" { + t.Fatalf("The extension of a bzip2 archive should be 'tar.gz'") + } +} +func TestExtensionXz(t *testing.T) { + compression := Xz + output := compression.Extension() + if output != "tar.xz" { + t.Fatalf("The extension of a bzip2 archive should be 'tar.xz'") + } +} + +func TestCmdStreamLargeStderr(t *testing.T) { + cmd := exec.Command("sh", "-c", "dd if=/dev/zero bs=1k count=1000 of=/dev/stderr; echo hello") + out, _, err := cmdStream(cmd, nil) + if err != nil { + t.Fatalf("Failed to start command: %s", err) + } + errCh := make(chan error) + go func() { + _, err := io.Copy(ioutil.Discard, out) + errCh <- err + }() + select { + case err := <-errCh: + if err != nil { + t.Fatalf("Command should not have failed (err=%.100s...)", err) + } + case <-time.After(5 * time.Second): + t.Fatalf("Command did not complete in 5 seconds; probable deadlock") + } +} + +func TestCmdStreamBad(t *testing.T) { + // TODO Windows: Figure out why this is failing in CI but not locally + if runtime.GOOS == "windows" { + t.Skip("Failing on Windows CI machines") + } + badCmd := exec.Command("sh", "-c", "echo hello; echo >&2 error couldn\\'t reverse the phase pulser; exit 1") + out, _, err := cmdStream(badCmd, nil) + if err != nil { + t.Fatalf("Failed to start command: %s", err) + } + if output, err := ioutil.ReadAll(out); err == nil { + t.Fatalf("Command should have failed") + } else if err.Error() != "exit status 1: error couldn't reverse the phase pulser\n" { + t.Fatalf("Wrong error value (%s)", err) + } else if s := string(output); s != "hello\n" { + t.Fatalf("Command output should be '%s', not '%s'", "hello\\n", output) + } +} + +func TestCmdStreamGood(t *testing.T) { + cmd := exec.Command("sh", "-c", "echo hello; exit 0") + out, _, err := cmdStream(cmd, nil) + if err != nil { + t.Fatal(err) + } + if output, err := ioutil.ReadAll(out); err != nil { + t.Fatalf("Command should not have failed (err=%s)", err) + } else if s := string(output); s != "hello\n" { + t.Fatalf("Command output should be '%s', not '%s'", "hello\\n", output) + } +} + +func TestUntarPathWithInvalidDest(t *testing.T) { + tempFolder, err := ioutil.TempDir("", "docker-archive-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tempFolder) + invalidDestFolder := filepath.Join(tempFolder, "invalidDest") + // Create a src file + srcFile := filepath.Join(tempFolder, "src") + tarFile := filepath.Join(tempFolder, "src.tar") + os.Create(srcFile) + os.Create(invalidDestFolder) // being a file (not dir) should cause an error + + // Translate back to Unix semantics as next exec.Command is run under sh + srcFileU := srcFile + tarFileU := tarFile + if runtime.GOOS == "windows" { + tarFileU = "/tmp/" + filepath.Base(filepath.Dir(tarFile)) + "/src.tar" + srcFileU = "/tmp/" + filepath.Base(filepath.Dir(srcFile)) + "/src" + } + + cmd := exec.Command("sh", "-c", "tar cf "+tarFileU+" "+srcFileU) + _, err = cmd.CombinedOutput() + if err != nil { + t.Fatal(err) + } + + err = defaultUntarPath(tarFile, invalidDestFolder) + if err == nil { + t.Fatalf("UntarPath with invalid destination path should throw an error.") + } +} + +func TestUntarPathWithInvalidSrc(t *testing.T) { + dest, err := ioutil.TempDir("", "docker-archive-test") + if err != nil { + t.Fatalf("Fail to create the destination file") + } + defer os.RemoveAll(dest) + err = defaultUntarPath("/invalid/path", dest) + if err == nil { + t.Fatalf("UntarPath with invalid src path should throw an error.") + } +} + +func TestUntarPath(t *testing.T) { + tmpFolder, err := ioutil.TempDir("", "docker-archive-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpFolder) + srcFile := filepath.Join(tmpFolder, "src") + tarFile := filepath.Join(tmpFolder, "src.tar") + os.Create(filepath.Join(tmpFolder, "src")) + + destFolder := filepath.Join(tmpFolder, "dest") + err = os.MkdirAll(destFolder, 0740) + if err != nil { + t.Fatalf("Fail to create the destination file") + } + + // Translate back to Unix semantics as next exec.Command is run under sh + srcFileU := srcFile + tarFileU := tarFile + if runtime.GOOS == "windows" { + tarFileU = "/tmp/" + filepath.Base(filepath.Dir(tarFile)) + "/src.tar" + srcFileU = "/tmp/" + filepath.Base(filepath.Dir(srcFile)) + "/src" + } + cmd := exec.Command("sh", "-c", "tar cf "+tarFileU+" "+srcFileU) + _, err = cmd.CombinedOutput() + if err != nil { + t.Fatal(err) + } + + err = defaultUntarPath(tarFile, destFolder) + if err != nil { + t.Fatalf("UntarPath shouldn't throw an error, %s.", err) + } + expectedFile := filepath.Join(destFolder, srcFileU) + _, err = os.Stat(expectedFile) + if err != nil { + t.Fatalf("Destination folder should contain the source file but did not.") + } +} + +// Do the same test as above but with the destination as file, it should fail +func TestUntarPathWithDestinationFile(t *testing.T) { + tmpFolder, err := ioutil.TempDir("", "docker-archive-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpFolder) + srcFile := filepath.Join(tmpFolder, "src") + tarFile := filepath.Join(tmpFolder, "src.tar") + os.Create(filepath.Join(tmpFolder, "src")) + + // Translate back to Unix semantics as next exec.Command is run under sh + srcFileU := srcFile + tarFileU := tarFile + if runtime.GOOS == "windows" { + tarFileU = "/tmp/" + filepath.Base(filepath.Dir(tarFile)) + "/src.tar" + srcFileU = "/tmp/" + filepath.Base(filepath.Dir(srcFile)) + "/src" + } + cmd := exec.Command("sh", "-c", "tar cf "+tarFileU+" "+srcFileU) + _, err = cmd.CombinedOutput() + if err != nil { + t.Fatal(err) + } + destFile := filepath.Join(tmpFolder, "dest") + _, err = os.Create(destFile) + if err != nil { + t.Fatalf("Fail to create the destination file") + } + err = defaultUntarPath(tarFile, destFile) + if err == nil { + t.Fatalf("UntarPath should throw an error if the destination if a file") + } +} + +// Do the same test as above but with the destination folder already exists +// and the destination file is a directory +// It's working, see https://github.com/docker/docker/issues/10040 +func TestUntarPathWithDestinationSrcFileAsFolder(t *testing.T) { + tmpFolder, err := ioutil.TempDir("", "docker-archive-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpFolder) + srcFile := filepath.Join(tmpFolder, "src") + tarFile := filepath.Join(tmpFolder, "src.tar") + os.Create(srcFile) + + // Translate back to Unix semantics as next exec.Command is run under sh + srcFileU := srcFile + tarFileU := tarFile + if runtime.GOOS == "windows" { + tarFileU = "/tmp/" + filepath.Base(filepath.Dir(tarFile)) + "/src.tar" + srcFileU = "/tmp/" + filepath.Base(filepath.Dir(srcFile)) + "/src" + } + + cmd := exec.Command("sh", "-c", "tar cf "+tarFileU+" "+srcFileU) + _, err = cmd.CombinedOutput() + if err != nil { + t.Fatal(err) + } + destFolder := filepath.Join(tmpFolder, "dest") + err = os.MkdirAll(destFolder, 0740) + if err != nil { + t.Fatalf("Fail to create the destination folder") + } + // Let's create a folder that will has the same path as the extracted file (from tar) + destSrcFileAsFolder := filepath.Join(destFolder, srcFileU) + err = os.MkdirAll(destSrcFileAsFolder, 0740) + if err != nil { + t.Fatal(err) + } + err = defaultUntarPath(tarFile, destFolder) + if err != nil { + t.Fatalf("UntarPath should throw not throw an error if the extracted file already exists and is a folder") + } +} + +func TestCopyWithTarInvalidSrc(t *testing.T) { + tempFolder, err := ioutil.TempDir("", "docker-archive-test") + if err != nil { + t.Fatal(nil) + } + destFolder := filepath.Join(tempFolder, "dest") + invalidSrc := filepath.Join(tempFolder, "doesnotexists") + err = os.MkdirAll(destFolder, 0740) + if err != nil { + t.Fatal(err) + } + err = defaultCopyWithTar(invalidSrc, destFolder) + if err == nil { + t.Fatalf("archiver.CopyWithTar with invalid src path should throw an error.") + } +} + +func TestCopyWithTarInexistentDestWillCreateIt(t *testing.T) { + tempFolder, err := ioutil.TempDir("", "docker-archive-test") + if err != nil { + t.Fatal(nil) + } + srcFolder := filepath.Join(tempFolder, "src") + inexistentDestFolder := filepath.Join(tempFolder, "doesnotexists") + err = os.MkdirAll(srcFolder, 0740) + if err != nil { + t.Fatal(err) + } + err = defaultCopyWithTar(srcFolder, inexistentDestFolder) + if err != nil { + t.Fatalf("CopyWithTar with an inexistent folder shouldn't fail.") + } + _, err = os.Stat(inexistentDestFolder) + if err != nil { + t.Fatalf("CopyWithTar with an inexistent folder should create it.") + } +} + +// Test CopyWithTar with a file as src +func TestCopyWithTarSrcFile(t *testing.T) { + folder, err := ioutil.TempDir("", "docker-archive-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(folder) + dest := filepath.Join(folder, "dest") + srcFolder := filepath.Join(folder, "src") + src := filepath.Join(folder, filepath.Join("src", "src")) + err = os.MkdirAll(srcFolder, 0740) + if err != nil { + t.Fatal(err) + } + err = os.MkdirAll(dest, 0740) + if err != nil { + t.Fatal(err) + } + ioutil.WriteFile(src, []byte("content"), 0777) + err = defaultCopyWithTar(src, dest) + if err != nil { + t.Fatalf("archiver.CopyWithTar shouldn't throw an error, %s.", err) + } + _, err = os.Stat(dest) + // FIXME Check the content + if err != nil { + t.Fatalf("Destination file should be the same as the source.") + } +} + +// Test CopyWithTar with a folder as src +func TestCopyWithTarSrcFolder(t *testing.T) { + folder, err := ioutil.TempDir("", "docker-archive-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(folder) + dest := filepath.Join(folder, "dest") + src := filepath.Join(folder, filepath.Join("src", "folder")) + err = os.MkdirAll(src, 0740) + if err != nil { + t.Fatal(err) + } + err = os.MkdirAll(dest, 0740) + if err != nil { + t.Fatal(err) + } + ioutil.WriteFile(filepath.Join(src, "file"), []byte("content"), 0777) + err = defaultCopyWithTar(src, dest) + if err != nil { + t.Fatalf("archiver.CopyWithTar shouldn't throw an error, %s.", err) + } + _, err = os.Stat(dest) + // FIXME Check the content (the file inside) + if err != nil { + t.Fatalf("Destination folder should contain the source file but did not.") + } +} + +func TestCopyFileWithTarInvalidSrc(t *testing.T) { + tempFolder, err := ioutil.TempDir("", "docker-archive-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tempFolder) + destFolder := filepath.Join(tempFolder, "dest") + err = os.MkdirAll(destFolder, 0740) + if err != nil { + t.Fatal(err) + } + invalidFile := filepath.Join(tempFolder, "doesnotexists") + err = defaultCopyFileWithTar(invalidFile, destFolder) + if err == nil { + t.Fatalf("archiver.CopyWithTar with invalid src path should throw an error.") + } +} + +func TestCopyFileWithTarInexistentDestWillCreateIt(t *testing.T) { + tempFolder, err := ioutil.TempDir("", "docker-archive-test") + if err != nil { + t.Fatal(nil) + } + defer os.RemoveAll(tempFolder) + srcFile := filepath.Join(tempFolder, "src") + inexistentDestFolder := filepath.Join(tempFolder, "doesnotexists") + _, err = os.Create(srcFile) + if err != nil { + t.Fatal(err) + } + err = defaultCopyFileWithTar(srcFile, inexistentDestFolder) + if err != nil { + t.Fatalf("CopyWithTar with an inexistent folder shouldn't fail.") + } + _, err = os.Stat(inexistentDestFolder) + if err != nil { + t.Fatalf("CopyWithTar with an inexistent folder should create it.") + } + // FIXME Test the src file and content +} + +func TestCopyFileWithTarSrcFolder(t *testing.T) { + folder, err := ioutil.TempDir("", "docker-archive-copyfilewithtar-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(folder) + dest := filepath.Join(folder, "dest") + src := filepath.Join(folder, "srcfolder") + err = os.MkdirAll(src, 0740) + if err != nil { + t.Fatal(err) + } + err = os.MkdirAll(dest, 0740) + if err != nil { + t.Fatal(err) + } + err = defaultCopyFileWithTar(src, dest) + if err == nil { + t.Fatalf("CopyFileWithTar should throw an error with a folder.") + } +} + +func TestCopyFileWithTarSrcFile(t *testing.T) { + folder, err := ioutil.TempDir("", "docker-archive-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(folder) + dest := filepath.Join(folder, "dest") + srcFolder := filepath.Join(folder, "src") + src := filepath.Join(folder, filepath.Join("src", "src")) + err = os.MkdirAll(srcFolder, 0740) + if err != nil { + t.Fatal(err) + } + err = os.MkdirAll(dest, 0740) + if err != nil { + t.Fatal(err) + } + ioutil.WriteFile(src, []byte("content"), 0777) + err = defaultCopyWithTar(src, dest+"/") + if err != nil { + t.Fatalf("archiver.CopyFileWithTar shouldn't throw an error, %s.", err) + } + _, err = os.Stat(dest) + if err != nil { + t.Fatalf("Destination folder should contain the source file but did not.") + } +} + +func TestTarFiles(t *testing.T) { + // TODO Windows: Figure out how to port this test. + if runtime.GOOS == "windows" { + t.Skip("Failing on Windows") + } + // try without hardlinks + if err := checkNoChanges(1000, false); err != nil { + t.Fatal(err) + } + // try with hardlinks + if err := checkNoChanges(1000, true); err != nil { + t.Fatal(err) + } +} + +func checkNoChanges(fileNum int, hardlinks bool) error { + srcDir, err := ioutil.TempDir("", "docker-test-srcDir") + if err != nil { + return err + } + defer os.RemoveAll(srcDir) + + destDir, err := ioutil.TempDir("", "docker-test-destDir") + if err != nil { + return err + } + defer os.RemoveAll(destDir) + + _, err = prepareUntarSourceDirectory(fileNum, srcDir, hardlinks) + if err != nil { + return err + } + + err = defaultTarUntar(srcDir, destDir) + if err != nil { + return err + } + + changes, err := ChangesDirs(destDir, srcDir) + if err != nil { + return err + } + if len(changes) > 0 { + return fmt.Errorf("with %d files and %v hardlinks: expected 0 changes, got %d", fileNum, hardlinks, len(changes)) + } + return nil +} + +func tarUntar(t *testing.T, origin string, options *TarOptions) ([]Change, error) { + archive, err := TarWithOptions(origin, options) + if err != nil { + t.Fatal(err) + } + defer archive.Close() + + buf := make([]byte, 10) + if _, err := archive.Read(buf); err != nil { + return nil, err + } + wrap := io.MultiReader(bytes.NewReader(buf), archive) + + detectedCompression := DetectCompression(buf) + compression := options.Compression + if detectedCompression.Extension() != compression.Extension() { + return nil, fmt.Errorf("Wrong compression detected. Actual compression: %s, found %s", compression.Extension(), detectedCompression.Extension()) + } + + tmp, err := ioutil.TempDir("", "docker-test-untar") + if err != nil { + return nil, err + } + defer os.RemoveAll(tmp) + if err := Untar(wrap, tmp, nil); err != nil { + return nil, err + } + if _, err := os.Stat(tmp); err != nil { + return nil, err + } + + return ChangesDirs(origin, tmp) +} + +func TestTarUntar(t *testing.T) { + // TODO Windows: Figure out how to fix this test. + if runtime.GOOS == "windows" { + t.Skip("Failing on Windows") + } + origin, err := ioutil.TempDir("", "docker-test-untar-origin") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(origin) + if err := ioutil.WriteFile(filepath.Join(origin, "1"), []byte("hello world"), 0700); err != nil { + t.Fatal(err) + } + if err := ioutil.WriteFile(filepath.Join(origin, "2"), []byte("welcome!"), 0700); err != nil { + t.Fatal(err) + } + if err := ioutil.WriteFile(filepath.Join(origin, "3"), []byte("will be ignored"), 0700); err != nil { + t.Fatal(err) + } + + for _, c := range []Compression{ + Uncompressed, + Gzip, + } { + changes, err := tarUntar(t, origin, &TarOptions{ + Compression: c, + ExcludePatterns: []string{"3"}, + }) + + if err != nil { + t.Fatalf("Error tar/untar for compression %s: %s", c.Extension(), err) + } + + if len(changes) != 1 || changes[0].Path != "/3" { + t.Fatalf("Unexpected differences after tarUntar: %v", changes) + } + } +} + +func TestTarWithOptions(t *testing.T) { + // TODO Windows: Figure out how to fix this test. + if runtime.GOOS == "windows" { + t.Skip("Failing on Windows") + } + origin, err := ioutil.TempDir("", "docker-test-untar-origin") + if err != nil { + t.Fatal(err) + } + if _, err := ioutil.TempDir(origin, "folder"); err != nil { + t.Fatal(err) + } + defer os.RemoveAll(origin) + if err := ioutil.WriteFile(filepath.Join(origin, "1"), []byte("hello world"), 0700); err != nil { + t.Fatal(err) + } + if err := ioutil.WriteFile(filepath.Join(origin, "2"), []byte("welcome!"), 0700); err != nil { + t.Fatal(err) + } + + cases := []struct { + opts *TarOptions + numChanges int + }{ + {&TarOptions{IncludeFiles: []string{"1"}}, 2}, + {&TarOptions{ExcludePatterns: []string{"2"}}, 1}, + {&TarOptions{ExcludePatterns: []string{"1", "folder*"}}, 2}, + {&TarOptions{IncludeFiles: []string{"1", "1"}}, 2}, + {&TarOptions{IncludeFiles: []string{"1"}, RebaseNames: map[string]string{"1": "test"}}, 4}, + } + for _, testCase := range cases { + changes, err := tarUntar(t, origin, testCase.opts) + if err != nil { + t.Fatalf("Error tar/untar when testing inclusion/exclusion: %s", err) + } + if len(changes) != testCase.numChanges { + t.Errorf("Expected %d changes, got %d for %+v:", + testCase.numChanges, len(changes), testCase.opts) + } + } +} + +// Some tar archives such as http://haproxy.1wt.eu/download/1.5/src/devel/haproxy-1.5-dev21.tar.gz +// use PAX Global Extended Headers. +// Failing prevents the archives from being uncompressed during ADD +func TestTypeXGlobalHeaderDoesNotFail(t *testing.T) { + hdr := tar.Header{Typeflag: tar.TypeXGlobalHeader} + tmpDir, err := ioutil.TempDir("", "docker-test-archive-pax-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpDir) + err = createTarFile(filepath.Join(tmpDir, "pax_global_header"), tmpDir, &hdr, nil, true, nil, false) + if err != nil { + t.Fatal(err) + } +} + +// Some tar have both GNU specific (huge uid) and Ustar specific (long name) things. +// Not supposed to happen (should use PAX instead of Ustar for long name) but it does and it should still work. +func TestUntarUstarGnuConflict(t *testing.T) { + f, err := os.Open("testdata/broken.tar") + if err != nil { + t.Fatal(err) + } + defer f.Close() + + found := false + tr := tar.NewReader(f) + // Iterate through the files in the archive. + for { + hdr, err := tr.Next() + if err == io.EOF { + // end of tar archive + break + } + if err != nil { + t.Fatal(err) + } + if hdr.Name == "root/.cpanm/work/1395823785.24209/Plack-1.0030/blib/man3/Plack::Middleware::LighttpdScriptNameFix.3pm" { + found = true + break + } + } + if !found { + t.Fatalf("%s not found in the archive", "root/.cpanm/work/1395823785.24209/Plack-1.0030/blib/man3/Plack::Middleware::LighttpdScriptNameFix.3pm") + } +} + +func prepareUntarSourceDirectory(numberOfFiles int, targetPath string, makeLinks bool) (int, error) { + fileData := []byte("fooo") + for n := 0; n < numberOfFiles; n++ { + fileName := fmt.Sprintf("file-%d", n) + if err := ioutil.WriteFile(filepath.Join(targetPath, fileName), fileData, 0700); err != nil { + return 0, err + } + if makeLinks { + if err := os.Link(filepath.Join(targetPath, fileName), filepath.Join(targetPath, fileName+"-link")); err != nil { + return 0, err + } + } + } + totalSize := numberOfFiles * len(fileData) + return totalSize, nil +} + +func BenchmarkTarUntar(b *testing.B) { + origin, err := ioutil.TempDir("", "docker-test-untar-origin") + if err != nil { + b.Fatal(err) + } + tempDir, err := ioutil.TempDir("", "docker-test-untar-destination") + if err != nil { + b.Fatal(err) + } + target := filepath.Join(tempDir, "dest") + n, err := prepareUntarSourceDirectory(100, origin, false) + if err != nil { + b.Fatal(err) + } + defer os.RemoveAll(origin) + defer os.RemoveAll(tempDir) + + b.ResetTimer() + b.SetBytes(int64(n)) + for n := 0; n < b.N; n++ { + err := defaultTarUntar(origin, target) + if err != nil { + b.Fatal(err) + } + os.RemoveAll(target) + } +} + +func BenchmarkTarUntarWithLinks(b *testing.B) { + origin, err := ioutil.TempDir("", "docker-test-untar-origin") + if err != nil { + b.Fatal(err) + } + tempDir, err := ioutil.TempDir("", "docker-test-untar-destination") + if err != nil { + b.Fatal(err) + } + target := filepath.Join(tempDir, "dest") + n, err := prepareUntarSourceDirectory(100, origin, true) + if err != nil { + b.Fatal(err) + } + defer os.RemoveAll(origin) + defer os.RemoveAll(tempDir) + + b.ResetTimer() + b.SetBytes(int64(n)) + for n := 0; n < b.N; n++ { + err := defaultTarUntar(origin, target) + if err != nil { + b.Fatal(err) + } + os.RemoveAll(target) + } +} + +func TestUntarInvalidFilenames(t *testing.T) { + // TODO Windows: Figure out how to fix this test. + if runtime.GOOS == "windows" { + t.Skip("Passes but hits breakoutError: platform and architecture is not supported") + } + for i, headers := range [][]*tar.Header{ + { + { + Name: "../victim/dotdot", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + { + { + // Note the leading slash + Name: "/../victim/slash-dotdot", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + } { + if err := testBreakout("untar", "docker-TestUntarInvalidFilenames", headers); err != nil { + t.Fatalf("i=%d. %v", i, err) + } + } +} + +func TestUntarHardlinkToSymlink(t *testing.T) { + // TODO Windows. There may be a way of running this, but turning off for now + if runtime.GOOS == "windows" { + t.Skip("hardlinks on Windows") + } + for i, headers := range [][]*tar.Header{ + { + { + Name: "symlink1", + Typeflag: tar.TypeSymlink, + Linkname: "regfile", + Mode: 0644, + }, + { + Name: "symlink2", + Typeflag: tar.TypeLink, + Linkname: "symlink1", + Mode: 0644, + }, + { + Name: "regfile", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + } { + if err := testBreakout("untar", "docker-TestUntarHardlinkToSymlink", headers); err != nil { + t.Fatalf("i=%d. %v", i, err) + } + } +} + +func TestUntarInvalidHardlink(t *testing.T) { + // TODO Windows. There may be a way of running this, but turning off for now + if runtime.GOOS == "windows" { + t.Skip("hardlinks on Windows") + } + for i, headers := range [][]*tar.Header{ + { // try reading victim/hello (../) + { + Name: "dotdot", + Typeflag: tar.TypeLink, + Linkname: "../victim/hello", + Mode: 0644, + }, + }, + { // try reading victim/hello (/../) + { + Name: "slash-dotdot", + Typeflag: tar.TypeLink, + // Note the leading slash + Linkname: "/../victim/hello", + Mode: 0644, + }, + }, + { // try writing victim/file + { + Name: "loophole-victim", + Typeflag: tar.TypeLink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "loophole-victim/file", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + { // try reading victim/hello (hardlink, symlink) + { + Name: "loophole-victim", + Typeflag: tar.TypeLink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "symlink", + Typeflag: tar.TypeSymlink, + Linkname: "loophole-victim/hello", + Mode: 0644, + }, + }, + { // Try reading victim/hello (hardlink, hardlink) + { + Name: "loophole-victim", + Typeflag: tar.TypeLink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "hardlink", + Typeflag: tar.TypeLink, + Linkname: "loophole-victim/hello", + Mode: 0644, + }, + }, + { // Try removing victim directory (hardlink) + { + Name: "loophole-victim", + Typeflag: tar.TypeLink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "loophole-victim", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + } { + if err := testBreakout("untar", "docker-TestUntarInvalidHardlink", headers); err != nil { + t.Fatalf("i=%d. %v", i, err) + } + } +} + +func TestUntarInvalidSymlink(t *testing.T) { + // TODO Windows. There may be a way of running this, but turning off for now + if runtime.GOOS == "windows" { + t.Skip("hardlinks on Windows") + } + for i, headers := range [][]*tar.Header{ + { // try reading victim/hello (../) + { + Name: "dotdot", + Typeflag: tar.TypeSymlink, + Linkname: "../victim/hello", + Mode: 0644, + }, + }, + { // try reading victim/hello (/../) + { + Name: "slash-dotdot", + Typeflag: tar.TypeSymlink, + // Note the leading slash + Linkname: "/../victim/hello", + Mode: 0644, + }, + }, + { // try writing victim/file + { + Name: "loophole-victim", + Typeflag: tar.TypeSymlink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "loophole-victim/file", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + { // try reading victim/hello (symlink, symlink) + { + Name: "loophole-victim", + Typeflag: tar.TypeSymlink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "symlink", + Typeflag: tar.TypeSymlink, + Linkname: "loophole-victim/hello", + Mode: 0644, + }, + }, + { // try reading victim/hello (symlink, hardlink) + { + Name: "loophole-victim", + Typeflag: tar.TypeSymlink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "hardlink", + Typeflag: tar.TypeLink, + Linkname: "loophole-victim/hello", + Mode: 0644, + }, + }, + { // try removing victim directory (symlink) + { + Name: "loophole-victim", + Typeflag: tar.TypeSymlink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "loophole-victim", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + { // try writing to victim/newdir/newfile with a symlink in the path + { + // this header needs to be before the next one, or else there is an error + Name: "dir/loophole", + Typeflag: tar.TypeSymlink, + Linkname: "../../victim", + Mode: 0755, + }, + { + Name: "dir/loophole/newdir/newfile", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + } { + if err := testBreakout("untar", "docker-TestUntarInvalidSymlink", headers); err != nil { + t.Fatalf("i=%d. %v", i, err) + } + } +} + +func TestTempArchiveCloseMultipleTimes(t *testing.T) { + reader := ioutil.NopCloser(strings.NewReader("hello")) + tempArchive, err := NewTempArchive(reader, "") + buf := make([]byte, 10) + n, err := tempArchive.Read(buf) + if n != 5 { + t.Fatalf("Expected to read 5 bytes. Read %d instead", n) + } + for i := 0; i < 3; i++ { + if err = tempArchive.Close(); err != nil { + t.Fatalf("i=%d. Unexpected error closing temp archive: %v", i, err) + } + } +} + +func TestReplaceFileTarWrapper(t *testing.T) { + filesInArchive := 20 + testcases := []struct { + doc string + filename string + modifier TarModifierFunc + expected string + fileCount int + }{ + { + doc: "Modifier creates a new file", + filename: "newfile", + modifier: createModifier(t), + expected: "the new content", + fileCount: filesInArchive + 1, + }, + { + doc: "Modifier replaces a file", + filename: "file-2", + modifier: createOrReplaceModifier, + expected: "the new content", + fileCount: filesInArchive, + }, + { + doc: "Modifier replaces the last file", + filename: fmt.Sprintf("file-%d", filesInArchive-1), + modifier: createOrReplaceModifier, + expected: "the new content", + fileCount: filesInArchive, + }, + { + doc: "Modifier appends to a file", + filename: "file-3", + modifier: appendModifier, + expected: "fooo\nnext line", + fileCount: filesInArchive, + }, + } + + for _, testcase := range testcases { + sourceArchive, cleanup := buildSourceArchive(t, filesInArchive) + defer cleanup() + + resultArchive := ReplaceFileTarWrapper( + sourceArchive, + map[string]TarModifierFunc{testcase.filename: testcase.modifier}) + + actual := readFileFromArchive(t, resultArchive, testcase.filename, testcase.fileCount, testcase.doc) + assert.Equal(t, testcase.expected, actual, testcase.doc) + } +} + +// TestPrefixHeaderReadable tests that files that could be created with the +// version of this package that was built with <=go17 are still readable. +func TestPrefixHeaderReadable(t *testing.T) { + // https://gist.github.com/stevvooe/e2a790ad4e97425896206c0816e1a882#file-out-go + var testFile = []byte("\x1f\x8b\x08\x08\x44\x21\x68\x59\x00\x03\x74\x2e\x74\x61\x72\x00\x4b\xcb\xcf\x67\xa0\x35\x30\x80\x00\x86\x06\x10\x47\x01\xc1\x37\x40\x00\x54\xb6\xb1\xa1\xa9\x99\x09\x48\x25\x1d\x40\x69\x71\x49\x62\x91\x02\xe5\x76\xa1\x79\x84\x21\x91\xd6\x80\x72\xaf\x8f\x82\x51\x30\x0a\x46\x36\x00\x00\xf0\x1c\x1e\x95\x00\x06\x00\x00") + + tmpDir, err := ioutil.TempDir("", "prefix-test") + require.NoError(t, err) + defer os.RemoveAll(tmpDir) + err = Untar(bytes.NewReader(testFile), tmpDir, nil) + require.NoError(t, err) + + baseName := "foo" + pth := strings.Repeat("a", 100-len(baseName)) + "/" + baseName + + _, err = os.Lstat(filepath.Join(tmpDir, pth)) + require.NoError(t, err) +} + +func buildSourceArchive(t *testing.T, numberOfFiles int) (io.ReadCloser, func()) { + srcDir, err := ioutil.TempDir("", "docker-test-srcDir") + require.NoError(t, err) + + _, err = prepareUntarSourceDirectory(numberOfFiles, srcDir, false) + require.NoError(t, err) + + sourceArchive, err := TarWithOptions(srcDir, &TarOptions{}) + require.NoError(t, err) + return sourceArchive, func() { + os.RemoveAll(srcDir) + sourceArchive.Close() + } +} + +func createOrReplaceModifier(path string, header *tar.Header, content io.Reader) (*tar.Header, []byte, error) { + return &tar.Header{ + Mode: 0600, + Typeflag: tar.TypeReg, + }, []byte("the new content"), nil +} + +func createModifier(t *testing.T) TarModifierFunc { + return func(path string, header *tar.Header, content io.Reader) (*tar.Header, []byte, error) { + assert.Nil(t, content) + return createOrReplaceModifier(path, header, content) + } +} + +func appendModifier(path string, header *tar.Header, content io.Reader) (*tar.Header, []byte, error) { + buffer := bytes.Buffer{} + if content != nil { + if _, err := buffer.ReadFrom(content); err != nil { + return nil, nil, err + } + } + buffer.WriteString("\nnext line") + return &tar.Header{Mode: 0600, Typeflag: tar.TypeReg}, buffer.Bytes(), nil +} + +func readFileFromArchive(t *testing.T, archive io.ReadCloser, name string, expectedCount int, doc string) string { + destDir, err := ioutil.TempDir("", "docker-test-destDir") + require.NoError(t, err) + defer os.RemoveAll(destDir) + + err = Untar(archive, destDir, nil) + require.NoError(t, err) + + files, _ := ioutil.ReadDir(destDir) + assert.Len(t, files, expectedCount, doc) + + content, err := ioutil.ReadFile(filepath.Join(destDir, name)) + assert.NoError(t, err) + return string(content) +} diff --git a/vendor/github.com/moby/moby/pkg/archive/archive_unix.go b/vendor/github.com/moby/moby/pkg/archive/archive_unix.go new file mode 100644 index 000000000..121317432 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/archive/archive_unix.go @@ -0,0 +1,122 @@ +// +build !windows + +package archive + +import ( + "archive/tar" + "errors" + "os" + "path/filepath" + "syscall" + + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/system" + rsystem "github.com/opencontainers/runc/libcontainer/system" + "golang.org/x/sys/unix" +) + +// fixVolumePathPrefix does platform specific processing to ensure that if +// the path being passed in is not in a volume path format, convert it to one. +func fixVolumePathPrefix(srcPath string) string { + return srcPath +} + +// getWalkRoot calculates the root path when performing a TarWithOptions. +// We use a separate function as this is platform specific. On Linux, we +// can't use filepath.Join(srcPath,include) because this will clean away +// a trailing "." or "/" which may be important. +func getWalkRoot(srcPath string, include string) string { + return srcPath + string(filepath.Separator) + include +} + +// CanonicalTarNameForPath returns platform-specific filepath +// to canonical posix-style path for tar archival. p is relative +// path. +func CanonicalTarNameForPath(p string) (string, error) { + return p, nil // already unix-style +} + +// chmodTarEntry is used to adjust the file permissions used in tar header based +// on the platform the archival is done. + +func chmodTarEntry(perm os.FileMode) os.FileMode { + return perm // noop for unix as golang APIs provide perm bits correctly +} + +func setHeaderForSpecialDevice(hdr *tar.Header, name string, stat interface{}) (err error) { + s, ok := stat.(*syscall.Stat_t) + + if ok { + // Currently go does not fill in the major/minors + if s.Mode&unix.S_IFBLK != 0 || + s.Mode&unix.S_IFCHR != 0 { + hdr.Devmajor = int64(major(uint64(s.Rdev))) + hdr.Devminor = int64(minor(uint64(s.Rdev))) + } + } + + return +} + +func getInodeFromStat(stat interface{}) (inode uint64, err error) { + s, ok := stat.(*syscall.Stat_t) + + if ok { + inode = uint64(s.Ino) + } + + return +} + +func getFileUIDGID(stat interface{}) (idtools.IDPair, error) { + s, ok := stat.(*syscall.Stat_t) + + if !ok { + return idtools.IDPair{}, errors.New("cannot convert stat value to syscall.Stat_t") + } + return idtools.IDPair{UID: int(s.Uid), GID: int(s.Gid)}, nil +} + +func major(device uint64) uint64 { + return (device >> 8) & 0xfff +} + +func minor(device uint64) uint64 { + return (device & 0xff) | ((device >> 12) & 0xfff00) +} + +// handleTarTypeBlockCharFifo is an OS-specific helper function used by +// createTarFile to handle the following types of header: Block; Char; Fifo +func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error { + if rsystem.RunningInUserNS() { + // cannot create a device if running in user namespace + return nil + } + + mode := uint32(hdr.Mode & 07777) + switch hdr.Typeflag { + case tar.TypeBlock: + mode |= unix.S_IFBLK + case tar.TypeChar: + mode |= unix.S_IFCHR + case tar.TypeFifo: + mode |= unix.S_IFIFO + } + + return system.Mknod(path, mode, int(system.Mkdev(hdr.Devmajor, hdr.Devminor))) +} + +func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error { + if hdr.Typeflag == tar.TypeLink { + if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) { + if err := os.Chmod(path, hdrInfo.Mode()); err != nil { + return err + } + } + } else if hdr.Typeflag != tar.TypeSymlink { + if err := os.Chmod(path, hdrInfo.Mode()); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/moby/moby/pkg/archive/archive_unix_test.go b/vendor/github.com/moby/moby/pkg/archive/archive_unix_test.go new file mode 100644 index 000000000..90f8adaa5 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/archive/archive_unix_test.go @@ -0,0 +1,250 @@ +// +build !windows + +package archive + +import ( + "bytes" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "runtime" + "syscall" + "testing" + + "github.com/docker/docker/pkg/system" + "golang.org/x/sys/unix" +) + +func TestCanonicalTarNameForPath(t *testing.T) { + cases := []struct{ in, expected string }{ + {"foo", "foo"}, + {"foo/bar", "foo/bar"}, + {"foo/dir/", "foo/dir/"}, + } + for _, v := range cases { + if out, err := CanonicalTarNameForPath(v.in); err != nil { + t.Fatalf("cannot get canonical name for path: %s: %v", v.in, err) + } else if out != v.expected { + t.Fatalf("wrong canonical tar name. expected:%s got:%s", v.expected, out) + } + } +} + +func TestCanonicalTarName(t *testing.T) { + cases := []struct { + in string + isDir bool + expected string + }{ + {"foo", false, "foo"}, + {"foo", true, "foo/"}, + {"foo/bar", false, "foo/bar"}, + {"foo/bar", true, "foo/bar/"}, + } + for _, v := range cases { + if out, err := canonicalTarName(v.in, v.isDir); err != nil { + t.Fatalf("cannot get canonical name for path: %s: %v", v.in, err) + } else if out != v.expected { + t.Fatalf("wrong canonical tar name. expected:%s got:%s", v.expected, out) + } + } +} + +func TestChmodTarEntry(t *testing.T) { + cases := []struct { + in, expected os.FileMode + }{ + {0000, 0000}, + {0777, 0777}, + {0644, 0644}, + {0755, 0755}, + {0444, 0444}, + } + for _, v := range cases { + if out := chmodTarEntry(v.in); out != v.expected { + t.Fatalf("wrong chmod. expected:%v got:%v", v.expected, out) + } + } +} + +func TestTarWithHardLink(t *testing.T) { + origin, err := ioutil.TempDir("", "docker-test-tar-hardlink") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(origin) + if err := ioutil.WriteFile(filepath.Join(origin, "1"), []byte("hello world"), 0700); err != nil { + t.Fatal(err) + } + if err := os.Link(filepath.Join(origin, "1"), filepath.Join(origin, "2")); err != nil { + t.Fatal(err) + } + + var i1, i2 uint64 + if i1, err = getNlink(filepath.Join(origin, "1")); err != nil { + t.Fatal(err) + } + // sanity check that we can hardlink + if i1 != 2 { + t.Skipf("skipping since hardlinks don't work here; expected 2 links, got %d", i1) + } + + dest, err := ioutil.TempDir("", "docker-test-tar-hardlink-dest") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(dest) + + // we'll do this in two steps to separate failure + fh, err := Tar(origin, Uncompressed) + if err != nil { + t.Fatal(err) + } + + // ensure we can read the whole thing with no error, before writing back out + buf, err := ioutil.ReadAll(fh) + if err != nil { + t.Fatal(err) + } + + bRdr := bytes.NewReader(buf) + err = Untar(bRdr, dest, &TarOptions{Compression: Uncompressed}) + if err != nil { + t.Fatal(err) + } + + if i1, err = getInode(filepath.Join(dest, "1")); err != nil { + t.Fatal(err) + } + if i2, err = getInode(filepath.Join(dest, "2")); err != nil { + t.Fatal(err) + } + + if i1 != i2 { + t.Errorf("expected matching inodes, but got %d and %d", i1, i2) + } +} + +func getNlink(path string) (uint64, error) { + stat, err := os.Stat(path) + if err != nil { + return 0, err + } + statT, ok := stat.Sys().(*syscall.Stat_t) + if !ok { + return 0, fmt.Errorf("expected type *syscall.Stat_t, got %t", stat.Sys()) + } + // We need this conversion on ARM64 + return uint64(statT.Nlink), nil +} + +func getInode(path string) (uint64, error) { + stat, err := os.Stat(path) + if err != nil { + return 0, err + } + statT, ok := stat.Sys().(*syscall.Stat_t) + if !ok { + return 0, fmt.Errorf("expected type *syscall.Stat_t, got %t", stat.Sys()) + } + return statT.Ino, nil +} + +func TestTarWithBlockCharFifo(t *testing.T) { + origin, err := ioutil.TempDir("", "docker-test-tar-hardlink") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(origin) + if err := ioutil.WriteFile(filepath.Join(origin, "1"), []byte("hello world"), 0700); err != nil { + t.Fatal(err) + } + if err := system.Mknod(filepath.Join(origin, "2"), unix.S_IFBLK, int(system.Mkdev(int64(12), int64(5)))); err != nil { + t.Fatal(err) + } + if err := system.Mknod(filepath.Join(origin, "3"), unix.S_IFCHR, int(system.Mkdev(int64(12), int64(5)))); err != nil { + t.Fatal(err) + } + if err := system.Mknod(filepath.Join(origin, "4"), unix.S_IFIFO, int(system.Mkdev(int64(12), int64(5)))); err != nil { + t.Fatal(err) + } + + dest, err := ioutil.TempDir("", "docker-test-tar-hardlink-dest") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(dest) + + // we'll do this in two steps to separate failure + fh, err := Tar(origin, Uncompressed) + if err != nil { + t.Fatal(err) + } + + // ensure we can read the whole thing with no error, before writing back out + buf, err := ioutil.ReadAll(fh) + if err != nil { + t.Fatal(err) + } + + bRdr := bytes.NewReader(buf) + err = Untar(bRdr, dest, &TarOptions{Compression: Uncompressed}) + if err != nil { + t.Fatal(err) + } + + changes, err := ChangesDirs(origin, dest) + if err != nil { + t.Fatal(err) + } + if len(changes) > 0 { + t.Fatalf("Tar with special device (block, char, fifo) should keep them (recreate them when untar) : %v", changes) + } +} + +// TestTarUntarWithXattr is Unix as Lsetxattr is not supported on Windows +func TestTarUntarWithXattr(t *testing.T) { + if runtime.GOOS == "solaris" { + t.Skip() + } + origin, err := ioutil.TempDir("", "docker-test-untar-origin") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(origin) + if err := ioutil.WriteFile(filepath.Join(origin, "1"), []byte("hello world"), 0700); err != nil { + t.Fatal(err) + } + if err := ioutil.WriteFile(filepath.Join(origin, "2"), []byte("welcome!"), 0700); err != nil { + t.Fatal(err) + } + if err := ioutil.WriteFile(filepath.Join(origin, "3"), []byte("will be ignored"), 0700); err != nil { + t.Fatal(err) + } + if err := system.Lsetxattr(filepath.Join(origin, "2"), "security.capability", []byte{0x00}, 0); err != nil { + t.Fatal(err) + } + + for _, c := range []Compression{ + Uncompressed, + Gzip, + } { + changes, err := tarUntar(t, origin, &TarOptions{ + Compression: c, + ExcludePatterns: []string{"3"}, + }) + + if err != nil { + t.Fatalf("Error tar/untar for compression %s: %s", c.Extension(), err) + } + + if len(changes) != 1 || changes[0].Path != "/3" { + t.Fatalf("Unexpected differences after tarUntar: %v", changes) + } + capability, _ := system.Lgetxattr(filepath.Join(origin, "2"), "security.capability") + if capability == nil && capability[0] != 0x00 { + t.Fatalf("Untar should have kept the 'security.capability' xattr.") + } + } +} diff --git a/vendor/github.com/moby/moby/pkg/archive/archive_windows.go b/vendor/github.com/moby/moby/pkg/archive/archive_windows.go new file mode 100644 index 000000000..a22410c03 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/archive/archive_windows.go @@ -0,0 +1,79 @@ +// +build windows + +package archive + +import ( + "archive/tar" + "fmt" + "os" + "path/filepath" + "strings" + + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/longpath" +) + +// fixVolumePathPrefix does platform specific processing to ensure that if +// the path being passed in is not in a volume path format, convert it to one. +func fixVolumePathPrefix(srcPath string) string { + return longpath.AddPrefix(srcPath) +} + +// getWalkRoot calculates the root path when performing a TarWithOptions. +// We use a separate function as this is platform specific. +func getWalkRoot(srcPath string, include string) string { + return filepath.Join(srcPath, include) +} + +// CanonicalTarNameForPath returns platform-specific filepath +// to canonical posix-style path for tar archival. p is relative +// path. +func CanonicalTarNameForPath(p string) (string, error) { + // windows: convert windows style relative path with backslashes + // into forward slashes. Since windows does not allow '/' or '\' + // in file names, it is mostly safe to replace however we must + // check just in case + if strings.Contains(p, "/") { + return "", fmt.Errorf("Windows path contains forward slash: %s", p) + } + return strings.Replace(p, string(os.PathSeparator), "/", -1), nil + +} + +// chmodTarEntry is used to adjust the file permissions used in tar header based +// on the platform the archival is done. +func chmodTarEntry(perm os.FileMode) os.FileMode { + //perm &= 0755 // this 0-ed out tar flags (like link, regular file, directory marker etc.) + permPart := perm & os.ModePerm + noPermPart := perm &^ os.ModePerm + // Add the x bit: make everything +x from windows + permPart |= 0111 + permPart &= 0755 + + return noPermPart | permPart +} + +func setHeaderForSpecialDevice(hdr *tar.Header, name string, stat interface{}) (err error) { + // do nothing. no notion of Rdev, Nlink in stat on Windows + return +} + +func getInodeFromStat(stat interface{}) (inode uint64, err error) { + // do nothing. no notion of Inode in stat on Windows + return +} + +// handleTarTypeBlockCharFifo is an OS-specific helper function used by +// createTarFile to handle the following types of header: Block; Char; Fifo +func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error { + return nil +} + +func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error { + return nil +} + +func getFileUIDGID(stat interface{}) (idtools.IDPair, error) { + // no notion of file ownership mapping yet on Windows + return idtools.IDPair{0, 0}, nil +} diff --git a/vendor/github.com/moby/moby/pkg/archive/archive_windows_test.go b/vendor/github.com/moby/moby/pkg/archive/archive_windows_test.go new file mode 100644 index 000000000..685e114ba --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/archive/archive_windows_test.go @@ -0,0 +1,93 @@ +// +build windows + +package archive + +import ( + "io/ioutil" + "os" + "path/filepath" + "testing" +) + +func TestCopyFileWithInvalidDest(t *testing.T) { + // TODO Windows: This is currently failing. Not sure what has + // recently changed in CopyWithTar as used to pass. Further investigation + // is required. + t.Skip("Currently fails") + folder, err := ioutil.TempDir("", "docker-archive-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(folder) + dest := "c:dest" + srcFolder := filepath.Join(folder, "src") + src := filepath.Join(folder, "src", "src") + err = os.MkdirAll(srcFolder, 0740) + if err != nil { + t.Fatal(err) + } + ioutil.WriteFile(src, []byte("content"), 0777) + err = defaultCopyWithTar(src, dest) + if err == nil { + t.Fatalf("archiver.CopyWithTar should throw an error on invalid dest.") + } +} + +func TestCanonicalTarNameForPath(t *testing.T) { + cases := []struct { + in, expected string + shouldFail bool + }{ + {"foo", "foo", false}, + {"foo/bar", "___", true}, // unix-styled windows path must fail + {`foo\bar`, "foo/bar", false}, + } + for _, v := range cases { + if out, err := CanonicalTarNameForPath(v.in); err != nil && !v.shouldFail { + t.Fatalf("cannot get canonical name for path: %s: %v", v.in, err) + } else if v.shouldFail && err == nil { + t.Fatalf("canonical path call should have failed with error. in=%s out=%s", v.in, out) + } else if !v.shouldFail && out != v.expected { + t.Fatalf("wrong canonical tar name. expected:%s got:%s", v.expected, out) + } + } +} + +func TestCanonicalTarName(t *testing.T) { + cases := []struct { + in string + isDir bool + expected string + }{ + {"foo", false, "foo"}, + {"foo", true, "foo/"}, + {`foo\bar`, false, "foo/bar"}, + {`foo\bar`, true, "foo/bar/"}, + } + for _, v := range cases { + if out, err := canonicalTarName(v.in, v.isDir); err != nil { + t.Fatalf("cannot get canonical name for path: %s: %v", v.in, err) + } else if out != v.expected { + t.Fatalf("wrong canonical tar name. expected:%s got:%s", v.expected, out) + } + } +} + +func TestChmodTarEntry(t *testing.T) { + cases := []struct { + in, expected os.FileMode + }{ + {0000, 0111}, + {0777, 0755}, + {0644, 0755}, + {0755, 0755}, + {0444, 0555}, + {0755 | os.ModeDir, 0755 | os.ModeDir}, + {0755 | os.ModeSymlink, 0755 | os.ModeSymlink}, + } + for _, v := range cases { + if out := chmodTarEntry(v.in); out != v.expected { + t.Fatalf("wrong chmod. expected:%v got:%v", v.expected, out) + } + } +} diff --git a/vendor/github.com/moby/moby/pkg/archive/changes.go b/vendor/github.com/moby/moby/pkg/archive/changes.go new file mode 100644 index 000000000..5ca39b721 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/archive/changes.go @@ -0,0 +1,441 @@ +package archive + +import ( + "archive/tar" + "bytes" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "sort" + "strings" + "syscall" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/pools" + "github.com/docker/docker/pkg/system" +) + +// ChangeType represents the change type. +type ChangeType int + +const ( + // ChangeModify represents the modify operation. + ChangeModify = iota + // ChangeAdd represents the add operation. + ChangeAdd + // ChangeDelete represents the delete operation. + ChangeDelete +) + +func (c ChangeType) String() string { + switch c { + case ChangeModify: + return "C" + case ChangeAdd: + return "A" + case ChangeDelete: + return "D" + } + return "" +} + +// Change represents a change, it wraps the change type and path. +// It describes changes of the files in the path respect to the +// parent layers. The change could be modify, add, delete. +// This is used for layer diff. +type Change struct { + Path string + Kind ChangeType +} + +func (change *Change) String() string { + return fmt.Sprintf("%s %s", change.Kind, change.Path) +} + +// for sort.Sort +type changesByPath []Change + +func (c changesByPath) Less(i, j int) bool { return c[i].Path < c[j].Path } +func (c changesByPath) Len() int { return len(c) } +func (c changesByPath) Swap(i, j int) { c[j], c[i] = c[i], c[j] } + +// Gnu tar and the go tar writer don't have sub-second mtime +// precision, which is problematic when we apply changes via tar +// files, we handle this by comparing for exact times, *or* same +// second count and either a or b having exactly 0 nanoseconds +func sameFsTime(a, b time.Time) bool { + return a == b || + (a.Unix() == b.Unix() && + (a.Nanosecond() == 0 || b.Nanosecond() == 0)) +} + +func sameFsTimeSpec(a, b syscall.Timespec) bool { + return a.Sec == b.Sec && + (a.Nsec == b.Nsec || a.Nsec == 0 || b.Nsec == 0) +} + +// Changes walks the path rw and determines changes for the files in the path, +// with respect to the parent layers +func Changes(layers []string, rw string) ([]Change, error) { + return changes(layers, rw, aufsDeletedFile, aufsMetadataSkip) +} + +func aufsMetadataSkip(path string) (skip bool, err error) { + skip, err = filepath.Match(string(os.PathSeparator)+WhiteoutMetaPrefix+"*", path) + if err != nil { + skip = true + } + return +} + +func aufsDeletedFile(root, path string, fi os.FileInfo) (string, error) { + f := filepath.Base(path) + + // If there is a whiteout, then the file was removed + if strings.HasPrefix(f, WhiteoutPrefix) { + originalFile := f[len(WhiteoutPrefix):] + return filepath.Join(filepath.Dir(path), originalFile), nil + } + + return "", nil +} + +type skipChange func(string) (bool, error) +type deleteChange func(string, string, os.FileInfo) (string, error) + +func changes(layers []string, rw string, dc deleteChange, sc skipChange) ([]Change, error) { + var ( + changes []Change + changedDirs = make(map[string]struct{}) + ) + + err := filepath.Walk(rw, func(path string, f os.FileInfo, err error) error { + if err != nil { + return err + } + + // Rebase path + path, err = filepath.Rel(rw, path) + if err != nil { + return err + } + + // As this runs on the daemon side, file paths are OS specific. + path = filepath.Join(string(os.PathSeparator), path) + + // Skip root + if path == string(os.PathSeparator) { + return nil + } + + if sc != nil { + if skip, err := sc(path); skip { + return err + } + } + + change := Change{ + Path: path, + } + + deletedFile, err := dc(rw, path, f) + if err != nil { + return err + } + + // Find out what kind of modification happened + if deletedFile != "" { + change.Path = deletedFile + change.Kind = ChangeDelete + } else { + // Otherwise, the file was added + change.Kind = ChangeAdd + + // ...Unless it already existed in a top layer, in which case, it's a modification + for _, layer := range layers { + stat, err := os.Stat(filepath.Join(layer, path)) + if err != nil && !os.IsNotExist(err) { + return err + } + if err == nil { + // The file existed in the top layer, so that's a modification + + // However, if it's a directory, maybe it wasn't actually modified. + // If you modify /foo/bar/baz, then /foo will be part of the changed files only because it's the parent of bar + if stat.IsDir() && f.IsDir() { + if f.Size() == stat.Size() && f.Mode() == stat.Mode() && sameFsTime(f.ModTime(), stat.ModTime()) { + // Both directories are the same, don't record the change + return nil + } + } + change.Kind = ChangeModify + break + } + } + } + + // If /foo/bar/file.txt is modified, then /foo/bar must be part of the changed files. + // This block is here to ensure the change is recorded even if the + // modify time, mode and size of the parent directory in the rw and ro layers are all equal. + // Check https://github.com/docker/docker/pull/13590 for details. + if f.IsDir() { + changedDirs[path] = struct{}{} + } + if change.Kind == ChangeAdd || change.Kind == ChangeDelete { + parent := filepath.Dir(path) + if _, ok := changedDirs[parent]; !ok && parent != "/" { + changes = append(changes, Change{Path: parent, Kind: ChangeModify}) + changedDirs[parent] = struct{}{} + } + } + + // Record change + changes = append(changes, change) + return nil + }) + if err != nil && !os.IsNotExist(err) { + return nil, err + } + return changes, nil +} + +// FileInfo describes the information of a file. +type FileInfo struct { + parent *FileInfo + name string + stat *system.StatT + children map[string]*FileInfo + capability []byte + added bool +} + +// LookUp looks up the file information of a file. +func (info *FileInfo) LookUp(path string) *FileInfo { + // As this runs on the daemon side, file paths are OS specific. + parent := info + if path == string(os.PathSeparator) { + return info + } + + pathElements := strings.Split(path, string(os.PathSeparator)) + for _, elem := range pathElements { + if elem != "" { + child := parent.children[elem] + if child == nil { + return nil + } + parent = child + } + } + return parent +} + +func (info *FileInfo) path() string { + if info.parent == nil { + // As this runs on the daemon side, file paths are OS specific. + return string(os.PathSeparator) + } + return filepath.Join(info.parent.path(), info.name) +} + +func (info *FileInfo) addChanges(oldInfo *FileInfo, changes *[]Change) { + + sizeAtEntry := len(*changes) + + if oldInfo == nil { + // add + change := Change{ + Path: info.path(), + Kind: ChangeAdd, + } + *changes = append(*changes, change) + info.added = true + } + + // We make a copy so we can modify it to detect additions + // also, we only recurse on the old dir if the new info is a directory + // otherwise any previous delete/change is considered recursive + oldChildren := make(map[string]*FileInfo) + if oldInfo != nil && info.isDir() { + for k, v := range oldInfo.children { + oldChildren[k] = v + } + } + + for name, newChild := range info.children { + oldChild := oldChildren[name] + if oldChild != nil { + // change? + oldStat := oldChild.stat + newStat := newChild.stat + // Note: We can't compare inode or ctime or blocksize here, because these change + // when copying a file into a container. However, that is not generally a problem + // because any content change will change mtime, and any status change should + // be visible when actually comparing the stat fields. The only time this + // breaks down is if some code intentionally hides a change by setting + // back mtime + if statDifferent(oldStat, newStat) || + !bytes.Equal(oldChild.capability, newChild.capability) { + change := Change{ + Path: newChild.path(), + Kind: ChangeModify, + } + *changes = append(*changes, change) + newChild.added = true + } + + // Remove from copy so we can detect deletions + delete(oldChildren, name) + } + + newChild.addChanges(oldChild, changes) + } + for _, oldChild := range oldChildren { + // delete + change := Change{ + Path: oldChild.path(), + Kind: ChangeDelete, + } + *changes = append(*changes, change) + } + + // If there were changes inside this directory, we need to add it, even if the directory + // itself wasn't changed. This is needed to properly save and restore filesystem permissions. + // As this runs on the daemon side, file paths are OS specific. + if len(*changes) > sizeAtEntry && info.isDir() && !info.added && info.path() != string(os.PathSeparator) { + change := Change{ + Path: info.path(), + Kind: ChangeModify, + } + // Let's insert the directory entry before the recently added entries located inside this dir + *changes = append(*changes, change) // just to resize the slice, will be overwritten + copy((*changes)[sizeAtEntry+1:], (*changes)[sizeAtEntry:]) + (*changes)[sizeAtEntry] = change + } + +} + +// Changes add changes to file information. +func (info *FileInfo) Changes(oldInfo *FileInfo) []Change { + var changes []Change + + info.addChanges(oldInfo, &changes) + + return changes +} + +func newRootFileInfo() *FileInfo { + // As this runs on the daemon side, file paths are OS specific. + root := &FileInfo{ + name: string(os.PathSeparator), + children: make(map[string]*FileInfo), + } + return root +} + +// ChangesDirs compares two directories and generates an array of Change objects describing the changes. +// If oldDir is "", then all files in newDir will be Add-Changes. +func ChangesDirs(newDir, oldDir string) ([]Change, error) { + var ( + oldRoot, newRoot *FileInfo + ) + if oldDir == "" { + emptyDir, err := ioutil.TempDir("", "empty") + if err != nil { + return nil, err + } + defer os.Remove(emptyDir) + oldDir = emptyDir + } + oldRoot, newRoot, err := collectFileInfoForChanges(oldDir, newDir) + if err != nil { + return nil, err + } + + return newRoot.Changes(oldRoot), nil +} + +// ChangesSize calculates the size in bytes of the provided changes, based on newDir. +func ChangesSize(newDir string, changes []Change) int64 { + var ( + size int64 + sf = make(map[uint64]struct{}) + ) + for _, change := range changes { + if change.Kind == ChangeModify || change.Kind == ChangeAdd { + file := filepath.Join(newDir, change.Path) + fileInfo, err := os.Lstat(file) + if err != nil { + logrus.Errorf("Can not stat %q: %s", file, err) + continue + } + + if fileInfo != nil && !fileInfo.IsDir() { + if hasHardlinks(fileInfo) { + inode := getIno(fileInfo) + if _, ok := sf[inode]; !ok { + size += fileInfo.Size() + sf[inode] = struct{}{} + } + } else { + size += fileInfo.Size() + } + } + } + } + return size +} + +// ExportChanges produces an Archive from the provided changes, relative to dir. +func ExportChanges(dir string, changes []Change, uidMaps, gidMaps []idtools.IDMap) (io.ReadCloser, error) { + reader, writer := io.Pipe() + go func() { + ta := newTarAppender(idtools.NewIDMappingsFromMaps(uidMaps, gidMaps), writer) + + // this buffer is needed for the duration of this piped stream + defer pools.BufioWriter32KPool.Put(ta.Buffer) + + sort.Sort(changesByPath(changes)) + + // In general we log errors here but ignore them because + // during e.g. a diff operation the container can continue + // mutating the filesystem and we can see transient errors + // from this + for _, change := range changes { + if change.Kind == ChangeDelete { + whiteOutDir := filepath.Dir(change.Path) + whiteOutBase := filepath.Base(change.Path) + whiteOut := filepath.Join(whiteOutDir, WhiteoutPrefix+whiteOutBase) + timestamp := time.Now() + hdr := &tar.Header{ + Name: whiteOut[1:], + Size: 0, + ModTime: timestamp, + AccessTime: timestamp, + ChangeTime: timestamp, + } + if err := ta.TarWriter.WriteHeader(hdr); err != nil { + logrus.Debugf("Can't write whiteout header: %s", err) + } + } else { + path := filepath.Join(dir, change.Path) + if err := ta.addTarFile(path, change.Path[1:]); err != nil { + logrus.Debugf("Can't add file %s to tar: %s", path, err) + } + } + } + + // Make sure to check the error on Close. + if err := ta.TarWriter.Close(); err != nil { + logrus.Debugf("Can't close layer: %s", err) + } + if err := writer.Close(); err != nil { + logrus.Debugf("failed close Changes writer: %s", err) + } + }() + return reader, nil +} diff --git a/vendor/github.com/moby/moby/pkg/archive/changes_linux.go b/vendor/github.com/moby/moby/pkg/archive/changes_linux.go new file mode 100644 index 000000000..b987e5224 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/archive/changes_linux.go @@ -0,0 +1,313 @@ +package archive + +import ( + "bytes" + "fmt" + "os" + "path/filepath" + "sort" + "syscall" + "unsafe" + + "github.com/docker/docker/pkg/system" + "golang.org/x/sys/unix" +) + +// walker is used to implement collectFileInfoForChanges on linux. Where this +// method in general returns the entire contents of two directory trees, we +// optimize some FS calls out on linux. In particular, we take advantage of the +// fact that getdents(2) returns the inode of each file in the directory being +// walked, which, when walking two trees in parallel to generate a list of +// changes, can be used to prune subtrees without ever having to lstat(2) them +// directly. Eliminating stat calls in this way can save up to seconds on large +// images. +type walker struct { + dir1 string + dir2 string + root1 *FileInfo + root2 *FileInfo +} + +// collectFileInfoForChanges returns a complete representation of the trees +// rooted at dir1 and dir2, with one important exception: any subtree or +// leaf where the inode and device numbers are an exact match between dir1 +// and dir2 will be pruned from the results. This method is *only* to be used +// to generating a list of changes between the two directories, as it does not +// reflect the full contents. +func collectFileInfoForChanges(dir1, dir2 string) (*FileInfo, *FileInfo, error) { + w := &walker{ + dir1: dir1, + dir2: dir2, + root1: newRootFileInfo(), + root2: newRootFileInfo(), + } + + i1, err := os.Lstat(w.dir1) + if err != nil { + return nil, nil, err + } + i2, err := os.Lstat(w.dir2) + if err != nil { + return nil, nil, err + } + + if err := w.walk("/", i1, i2); err != nil { + return nil, nil, err + } + + return w.root1, w.root2, nil +} + +// Given a FileInfo, its path info, and a reference to the root of the tree +// being constructed, register this file with the tree. +func walkchunk(path string, fi os.FileInfo, dir string, root *FileInfo) error { + if fi == nil { + return nil + } + parent := root.LookUp(filepath.Dir(path)) + if parent == nil { + return fmt.Errorf("walkchunk: Unexpectedly no parent for %s", path) + } + info := &FileInfo{ + name: filepath.Base(path), + children: make(map[string]*FileInfo), + parent: parent, + } + cpath := filepath.Join(dir, path) + stat, err := system.FromStatT(fi.Sys().(*syscall.Stat_t)) + if err != nil { + return err + } + info.stat = stat + info.capability, _ = system.Lgetxattr(cpath, "security.capability") // lgetxattr(2): fs access + parent.children[info.name] = info + return nil +} + +// Walk a subtree rooted at the same path in both trees being iterated. For +// example, /docker/overlay/1234/a/b/c/d and /docker/overlay/8888/a/b/c/d +func (w *walker) walk(path string, i1, i2 os.FileInfo) (err error) { + // Register these nodes with the return trees, unless we're still at the + // (already-created) roots: + if path != "/" { + if err := walkchunk(path, i1, w.dir1, w.root1); err != nil { + return err + } + if err := walkchunk(path, i2, w.dir2, w.root2); err != nil { + return err + } + } + + is1Dir := i1 != nil && i1.IsDir() + is2Dir := i2 != nil && i2.IsDir() + + sameDevice := false + if i1 != nil && i2 != nil { + si1 := i1.Sys().(*syscall.Stat_t) + si2 := i2.Sys().(*syscall.Stat_t) + if si1.Dev == si2.Dev { + sameDevice = true + } + } + + // If these files are both non-existent, or leaves (non-dirs), we are done. + if !is1Dir && !is2Dir { + return nil + } + + // Fetch the names of all the files contained in both directories being walked: + var names1, names2 []nameIno + if is1Dir { + names1, err = readdirnames(filepath.Join(w.dir1, path)) // getdents(2): fs access + if err != nil { + return err + } + } + if is2Dir { + names2, err = readdirnames(filepath.Join(w.dir2, path)) // getdents(2): fs access + if err != nil { + return err + } + } + + // We have lists of the files contained in both parallel directories, sorted + // in the same order. Walk them in parallel, generating a unique merged list + // of all items present in either or both directories. + var names []string + ix1 := 0 + ix2 := 0 + + for { + if ix1 >= len(names1) { + break + } + if ix2 >= len(names2) { + break + } + + ni1 := names1[ix1] + ni2 := names2[ix2] + + switch bytes.Compare([]byte(ni1.name), []byte(ni2.name)) { + case -1: // ni1 < ni2 -- advance ni1 + // we will not encounter ni1 in names2 + names = append(names, ni1.name) + ix1++ + case 0: // ni1 == ni2 + if ni1.ino != ni2.ino || !sameDevice { + names = append(names, ni1.name) + } + ix1++ + ix2++ + case 1: // ni1 > ni2 -- advance ni2 + // we will not encounter ni2 in names1 + names = append(names, ni2.name) + ix2++ + } + } + for ix1 < len(names1) { + names = append(names, names1[ix1].name) + ix1++ + } + for ix2 < len(names2) { + names = append(names, names2[ix2].name) + ix2++ + } + + // For each of the names present in either or both of the directories being + // iterated, stat the name under each root, and recurse the pair of them: + for _, name := range names { + fname := filepath.Join(path, name) + var cInfo1, cInfo2 os.FileInfo + if is1Dir { + cInfo1, err = os.Lstat(filepath.Join(w.dir1, fname)) // lstat(2): fs access + if err != nil && !os.IsNotExist(err) { + return err + } + } + if is2Dir { + cInfo2, err = os.Lstat(filepath.Join(w.dir2, fname)) // lstat(2): fs access + if err != nil && !os.IsNotExist(err) { + return err + } + } + if err = w.walk(fname, cInfo1, cInfo2); err != nil { + return err + } + } + return nil +} + +// {name,inode} pairs used to support the early-pruning logic of the walker type +type nameIno struct { + name string + ino uint64 +} + +type nameInoSlice []nameIno + +func (s nameInoSlice) Len() int { return len(s) } +func (s nameInoSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s nameInoSlice) Less(i, j int) bool { return s[i].name < s[j].name } + +// readdirnames is a hacked-apart version of the Go stdlib code, exposing inode +// numbers further up the stack when reading directory contents. Unlike +// os.Readdirnames, which returns a list of filenames, this function returns a +// list of {filename,inode} pairs. +func readdirnames(dirname string) (names []nameIno, err error) { + var ( + size = 100 + buf = make([]byte, 4096) + nbuf int + bufp int + nb int + ) + + f, err := os.Open(dirname) + if err != nil { + return nil, err + } + defer f.Close() + + names = make([]nameIno, 0, size) // Empty with room to grow. + for { + // Refill the buffer if necessary + if bufp >= nbuf { + bufp = 0 + nbuf, err = unix.ReadDirent(int(f.Fd()), buf) // getdents on linux + if nbuf < 0 { + nbuf = 0 + } + if err != nil { + return nil, os.NewSyscallError("readdirent", err) + } + if nbuf <= 0 { + break // EOF + } + } + + // Drain the buffer + nb, names = parseDirent(buf[bufp:nbuf], names) + bufp += nb + } + + sl := nameInoSlice(names) + sort.Sort(sl) + return sl, nil +} + +// parseDirent is a minor modification of unix.ParseDirent (linux version) +// which returns {name,inode} pairs instead of just names. +func parseDirent(buf []byte, names []nameIno) (consumed int, newnames []nameIno) { + origlen := len(buf) + for len(buf) > 0 { + dirent := (*unix.Dirent)(unsafe.Pointer(&buf[0])) + buf = buf[dirent.Reclen:] + if dirent.Ino == 0 { // File absent in directory. + continue + } + bytes := (*[10000]byte)(unsafe.Pointer(&dirent.Name[0])) + var name = string(bytes[0:clen(bytes[:])]) + if name == "." || name == ".." { // Useless names + continue + } + names = append(names, nameIno{name, dirent.Ino}) + } + return origlen - len(buf), names +} + +func clen(n []byte) int { + for i := 0; i < len(n); i++ { + if n[i] == 0 { + return i + } + } + return len(n) +} + +// OverlayChanges walks the path rw and determines changes for the files in the path, +// with respect to the parent layers +func OverlayChanges(layers []string, rw string) ([]Change, error) { + return changes(layers, rw, overlayDeletedFile, nil) +} + +func overlayDeletedFile(root, path string, fi os.FileInfo) (string, error) { + if fi.Mode()&os.ModeCharDevice != 0 { + s := fi.Sys().(*syscall.Stat_t) + if major(uint64(s.Rdev)) == 0 && minor(uint64(s.Rdev)) == 0 { + return path, nil + } + } + if fi.Mode()&os.ModeDir != 0 { + opaque, err := system.Lgetxattr(filepath.Join(root, path), "trusted.overlay.opaque") + if err != nil { + return "", err + } + if len(opaque) == 1 && opaque[0] == 'y' { + return path, nil + } + } + + return "", nil + +} diff --git a/vendor/github.com/moby/moby/pkg/archive/changes_other.go b/vendor/github.com/moby/moby/pkg/archive/changes_other.go new file mode 100644 index 000000000..da70ed37c --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/archive/changes_other.go @@ -0,0 +1,97 @@ +// +build !linux + +package archive + +import ( + "fmt" + "os" + "path/filepath" + "runtime" + "strings" + + "github.com/docker/docker/pkg/system" +) + +func collectFileInfoForChanges(oldDir, newDir string) (*FileInfo, *FileInfo, error) { + var ( + oldRoot, newRoot *FileInfo + err1, err2 error + errs = make(chan error, 2) + ) + go func() { + oldRoot, err1 = collectFileInfo(oldDir) + errs <- err1 + }() + go func() { + newRoot, err2 = collectFileInfo(newDir) + errs <- err2 + }() + + // block until both routines have returned + for i := 0; i < 2; i++ { + if err := <-errs; err != nil { + return nil, nil, err + } + } + + return oldRoot, newRoot, nil +} + +func collectFileInfo(sourceDir string) (*FileInfo, error) { + root := newRootFileInfo() + + err := filepath.Walk(sourceDir, func(path string, f os.FileInfo, err error) error { + if err != nil { + return err + } + + // Rebase path + relPath, err := filepath.Rel(sourceDir, path) + if err != nil { + return err + } + + // As this runs on the daemon side, file paths are OS specific. + relPath = filepath.Join(string(os.PathSeparator), relPath) + + // See https://github.com/golang/go/issues/9168 - bug in filepath.Join. + // Temporary workaround. If the returned path starts with two backslashes, + // trim it down to a single backslash. Only relevant on Windows. + if runtime.GOOS == "windows" { + if strings.HasPrefix(relPath, `\\`) { + relPath = relPath[1:] + } + } + + if relPath == string(os.PathSeparator) { + return nil + } + + parent := root.LookUp(filepath.Dir(relPath)) + if parent == nil { + return fmt.Errorf("collectFileInfo: Unexpectedly no parent for %s", relPath) + } + + info := &FileInfo{ + name: filepath.Base(relPath), + children: make(map[string]*FileInfo), + parent: parent, + } + + s, err := system.Lstat(path) + if err != nil { + return err + } + info.stat = s + + info.capability, _ = system.Lgetxattr(path, "security.capability") + + parent.children[info.name] = info + + return nil + }) + if err != nil { + return nil, err + } + return root, nil +} diff --git a/vendor/github.com/moby/moby/pkg/archive/changes_posix_test.go b/vendor/github.com/moby/moby/pkg/archive/changes_posix_test.go new file mode 100644 index 000000000..095102e57 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/archive/changes_posix_test.go @@ -0,0 +1,132 @@ +package archive + +import ( + "archive/tar" + "fmt" + "io" + "io/ioutil" + "os" + "path" + "runtime" + "sort" + "testing" +) + +func TestHardLinkOrder(t *testing.T) { + //TODO Should run for Solaris + if runtime.GOOS == "solaris" { + t.Skip("gcp failures on Solaris") + } + names := []string{"file1.txt", "file2.txt", "file3.txt"} + msg := []byte("Hey y'all") + + // Create dir + src, err := ioutil.TempDir("", "docker-hardlink-test-src-") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(src) + for _, name := range names { + func() { + fh, err := os.Create(path.Join(src, name)) + if err != nil { + t.Fatal(err) + } + defer fh.Close() + if _, err = fh.Write(msg); err != nil { + t.Fatal(err) + } + }() + } + // Create dest, with changes that includes hardlinks + dest, err := ioutil.TempDir("", "docker-hardlink-test-dest-") + if err != nil { + t.Fatal(err) + } + os.RemoveAll(dest) // we just want the name, at first + if err := copyDir(src, dest); err != nil { + t.Fatal(err) + } + defer os.RemoveAll(dest) + for _, name := range names { + for i := 0; i < 5; i++ { + if err := os.Link(path.Join(dest, name), path.Join(dest, fmt.Sprintf("%s.link%d", name, i))); err != nil { + t.Fatal(err) + } + } + } + + // get changes + changes, err := ChangesDirs(dest, src) + if err != nil { + t.Fatal(err) + } + + // sort + sort.Sort(changesByPath(changes)) + + // ExportChanges + ar, err := ExportChanges(dest, changes, nil, nil) + if err != nil { + t.Fatal(err) + } + hdrs, err := walkHeaders(ar) + if err != nil { + t.Fatal(err) + } + + // reverse sort + sort.Sort(sort.Reverse(changesByPath(changes))) + // ExportChanges + arRev, err := ExportChanges(dest, changes, nil, nil) + if err != nil { + t.Fatal(err) + } + hdrsRev, err := walkHeaders(arRev) + if err != nil { + t.Fatal(err) + } + + // line up the two sets + sort.Sort(tarHeaders(hdrs)) + sort.Sort(tarHeaders(hdrsRev)) + + // compare Size and LinkName + for i := range hdrs { + if hdrs[i].Name != hdrsRev[i].Name { + t.Errorf("headers - expected name %q; but got %q", hdrs[i].Name, hdrsRev[i].Name) + } + if hdrs[i].Size != hdrsRev[i].Size { + t.Errorf("headers - %q expected size %d; but got %d", hdrs[i].Name, hdrs[i].Size, hdrsRev[i].Size) + } + if hdrs[i].Typeflag != hdrsRev[i].Typeflag { + t.Errorf("headers - %q expected type %d; but got %d", hdrs[i].Name, hdrs[i].Typeflag, hdrsRev[i].Typeflag) + } + if hdrs[i].Linkname != hdrsRev[i].Linkname { + t.Errorf("headers - %q expected linkname %q; but got %q", hdrs[i].Name, hdrs[i].Linkname, hdrsRev[i].Linkname) + } + } + +} + +type tarHeaders []tar.Header + +func (th tarHeaders) Len() int { return len(th) } +func (th tarHeaders) Swap(i, j int) { th[j], th[i] = th[i], th[j] } +func (th tarHeaders) Less(i, j int) bool { return th[i].Name < th[j].Name } + +func walkHeaders(r io.Reader) ([]tar.Header, error) { + t := tar.NewReader(r) + headers := []tar.Header{} + for { + hdr, err := t.Next() + if err != nil { + if err == io.EOF { + break + } + return headers, err + } + headers = append(headers, *hdr) + } + return headers, nil +} diff --git a/vendor/github.com/moby/moby/pkg/archive/changes_test.go b/vendor/github.com/moby/moby/pkg/archive/changes_test.go new file mode 100644 index 000000000..c5d1629e7 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/archive/changes_test.go @@ -0,0 +1,572 @@ +package archive + +import ( + "io/ioutil" + "os" + "os/exec" + "path" + "runtime" + "sort" + "testing" + "time" + + "github.com/docker/docker/pkg/system" +) + +func max(x, y int) int { + if x >= y { + return x + } + return y +} + +func copyDir(src, dst string) error { + cmd := exec.Command("cp", "-a", src, dst) + if runtime.GOOS == "solaris" { + cmd = exec.Command("gcp", "-a", src, dst) + } + + if err := cmd.Run(); err != nil { + return err + } + return nil +} + +type FileType uint32 + +const ( + Regular FileType = iota + Dir + Symlink +) + +type FileData struct { + filetype FileType + path string + contents string + permissions os.FileMode +} + +func createSampleDir(t *testing.T, root string) { + files := []FileData{ + {Regular, "file1", "file1\n", 0600}, + {Regular, "file2", "file2\n", 0666}, + {Regular, "file3", "file3\n", 0404}, + {Regular, "file4", "file4\n", 0600}, + {Regular, "file5", "file5\n", 0600}, + {Regular, "file6", "file6\n", 0600}, + {Regular, "file7", "file7\n", 0600}, + {Dir, "dir1", "", 0740}, + {Regular, "dir1/file1-1", "file1-1\n", 01444}, + {Regular, "dir1/file1-2", "file1-2\n", 0666}, + {Dir, "dir2", "", 0700}, + {Regular, "dir2/file2-1", "file2-1\n", 0666}, + {Regular, "dir2/file2-2", "file2-2\n", 0666}, + {Dir, "dir3", "", 0700}, + {Regular, "dir3/file3-1", "file3-1\n", 0666}, + {Regular, "dir3/file3-2", "file3-2\n", 0666}, + {Dir, "dir4", "", 0700}, + {Regular, "dir4/file3-1", "file4-1\n", 0666}, + {Regular, "dir4/file3-2", "file4-2\n", 0666}, + {Symlink, "symlink1", "target1", 0666}, + {Symlink, "symlink2", "target2", 0666}, + {Symlink, "symlink3", root + "/file1", 0666}, + {Symlink, "symlink4", root + "/symlink3", 0666}, + {Symlink, "dirSymlink", root + "/dir1", 0740}, + } + + now := time.Now() + for _, info := range files { + p := path.Join(root, info.path) + if info.filetype == Dir { + if err := os.MkdirAll(p, info.permissions); err != nil { + t.Fatal(err) + } + } else if info.filetype == Regular { + if err := ioutil.WriteFile(p, []byte(info.contents), info.permissions); err != nil { + t.Fatal(err) + } + } else if info.filetype == Symlink { + if err := os.Symlink(info.contents, p); err != nil { + t.Fatal(err) + } + } + + if info.filetype != Symlink { + // Set a consistent ctime, atime for all files and dirs + if err := system.Chtimes(p, now, now); err != nil { + t.Fatal(err) + } + } + } +} + +func TestChangeString(t *testing.T) { + modifyChange := Change{"change", ChangeModify} + toString := modifyChange.String() + if toString != "C change" { + t.Fatalf("String() of a change with ChangeModify Kind should have been %s but was %s", "C change", toString) + } + addChange := Change{"change", ChangeAdd} + toString = addChange.String() + if toString != "A change" { + t.Fatalf("String() of a change with ChangeAdd Kind should have been %s but was %s", "A change", toString) + } + deleteChange := Change{"change", ChangeDelete} + toString = deleteChange.String() + if toString != "D change" { + t.Fatalf("String() of a change with ChangeDelete Kind should have been %s but was %s", "D change", toString) + } +} + +func TestChangesWithNoChanges(t *testing.T) { + // TODO Windows. There may be a way of running this, but turning off for now + // as createSampleDir uses symlinks. + if runtime.GOOS == "windows" { + t.Skip("symlinks on Windows") + } + rwLayer, err := ioutil.TempDir("", "docker-changes-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(rwLayer) + layer, err := ioutil.TempDir("", "docker-changes-test-layer") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(layer) + createSampleDir(t, layer) + changes, err := Changes([]string{layer}, rwLayer) + if err != nil { + t.Fatal(err) + } + if len(changes) != 0 { + t.Fatalf("Changes with no difference should have detect no changes, but detected %d", len(changes)) + } +} + +func TestChangesWithChanges(t *testing.T) { + // TODO Windows. There may be a way of running this, but turning off for now + // as createSampleDir uses symlinks. + if runtime.GOOS == "windows" { + t.Skip("symlinks on Windows") + } + // Mock the readonly layer + layer, err := ioutil.TempDir("", "docker-changes-test-layer") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(layer) + createSampleDir(t, layer) + os.MkdirAll(path.Join(layer, "dir1/subfolder"), 0740) + + // Mock the RW layer + rwLayer, err := ioutil.TempDir("", "docker-changes-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(rwLayer) + + // Create a folder in RW layer + dir1 := path.Join(rwLayer, "dir1") + os.MkdirAll(dir1, 0740) + deletedFile := path.Join(dir1, ".wh.file1-2") + ioutil.WriteFile(deletedFile, []byte{}, 0600) + modifiedFile := path.Join(dir1, "file1-1") + ioutil.WriteFile(modifiedFile, []byte{0x00}, 01444) + // Let's add a subfolder for a newFile + subfolder := path.Join(dir1, "subfolder") + os.MkdirAll(subfolder, 0740) + newFile := path.Join(subfolder, "newFile") + ioutil.WriteFile(newFile, []byte{}, 0740) + + changes, err := Changes([]string{layer}, rwLayer) + if err != nil { + t.Fatal(err) + } + + expectedChanges := []Change{ + {"/dir1", ChangeModify}, + {"/dir1/file1-1", ChangeModify}, + {"/dir1/file1-2", ChangeDelete}, + {"/dir1/subfolder", ChangeModify}, + {"/dir1/subfolder/newFile", ChangeAdd}, + } + checkChanges(expectedChanges, changes, t) +} + +// See https://github.com/docker/docker/pull/13590 +func TestChangesWithChangesGH13590(t *testing.T) { + // TODO Windows. There may be a way of running this, but turning off for now + // as createSampleDir uses symlinks. + if runtime.GOOS == "windows" { + t.Skip("symlinks on Windows") + } + baseLayer, err := ioutil.TempDir("", "docker-changes-test.") + defer os.RemoveAll(baseLayer) + + dir3 := path.Join(baseLayer, "dir1/dir2/dir3") + os.MkdirAll(dir3, 07400) + + file := path.Join(dir3, "file.txt") + ioutil.WriteFile(file, []byte("hello"), 0666) + + layer, err := ioutil.TempDir("", "docker-changes-test2.") + defer os.RemoveAll(layer) + + // Test creating a new file + if err := copyDir(baseLayer+"/dir1", layer+"/"); err != nil { + t.Fatalf("Cmd failed: %q", err) + } + + os.Remove(path.Join(layer, "dir1/dir2/dir3/file.txt")) + file = path.Join(layer, "dir1/dir2/dir3/file1.txt") + ioutil.WriteFile(file, []byte("bye"), 0666) + + changes, err := Changes([]string{baseLayer}, layer) + if err != nil { + t.Fatal(err) + } + + expectedChanges := []Change{ + {"/dir1/dir2/dir3", ChangeModify}, + {"/dir1/dir2/dir3/file1.txt", ChangeAdd}, + } + checkChanges(expectedChanges, changes, t) + + // Now test changing a file + layer, err = ioutil.TempDir("", "docker-changes-test3.") + defer os.RemoveAll(layer) + + if err := copyDir(baseLayer+"/dir1", layer+"/"); err != nil { + t.Fatalf("Cmd failed: %q", err) + } + + file = path.Join(layer, "dir1/dir2/dir3/file.txt") + ioutil.WriteFile(file, []byte("bye"), 0666) + + changes, err = Changes([]string{baseLayer}, layer) + if err != nil { + t.Fatal(err) + } + + expectedChanges = []Change{ + {"/dir1/dir2/dir3/file.txt", ChangeModify}, + } + checkChanges(expectedChanges, changes, t) +} + +// Create a directory, copy it, make sure we report no changes between the two +func TestChangesDirsEmpty(t *testing.T) { + // TODO Windows. There may be a way of running this, but turning off for now + // as createSampleDir uses symlinks. + // TODO Should work for Solaris + if runtime.GOOS == "windows" || runtime.GOOS == "solaris" { + t.Skip("symlinks on Windows; gcp failure on Solaris") + } + src, err := ioutil.TempDir("", "docker-changes-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(src) + createSampleDir(t, src) + dst := src + "-copy" + if err := copyDir(src, dst); err != nil { + t.Fatal(err) + } + defer os.RemoveAll(dst) + changes, err := ChangesDirs(dst, src) + if err != nil { + t.Fatal(err) + } + + if len(changes) != 0 { + t.Fatalf("Reported changes for identical dirs: %v", changes) + } + os.RemoveAll(src) + os.RemoveAll(dst) +} + +func mutateSampleDir(t *testing.T, root string) { + // Remove a regular file + if err := os.RemoveAll(path.Join(root, "file1")); err != nil { + t.Fatal(err) + } + + // Remove a directory + if err := os.RemoveAll(path.Join(root, "dir1")); err != nil { + t.Fatal(err) + } + + // Remove a symlink + if err := os.RemoveAll(path.Join(root, "symlink1")); err != nil { + t.Fatal(err) + } + + // Rewrite a file + if err := ioutil.WriteFile(path.Join(root, "file2"), []byte("fileNN\n"), 0777); err != nil { + t.Fatal(err) + } + + // Replace a file + if err := os.RemoveAll(path.Join(root, "file3")); err != nil { + t.Fatal(err) + } + if err := ioutil.WriteFile(path.Join(root, "file3"), []byte("fileMM\n"), 0404); err != nil { + t.Fatal(err) + } + + // Touch file + if err := system.Chtimes(path.Join(root, "file4"), time.Now().Add(time.Second), time.Now().Add(time.Second)); err != nil { + t.Fatal(err) + } + + // Replace file with dir + if err := os.RemoveAll(path.Join(root, "file5")); err != nil { + t.Fatal(err) + } + if err := os.MkdirAll(path.Join(root, "file5"), 0666); err != nil { + t.Fatal(err) + } + + // Create new file + if err := ioutil.WriteFile(path.Join(root, "filenew"), []byte("filenew\n"), 0777); err != nil { + t.Fatal(err) + } + + // Create new dir + if err := os.MkdirAll(path.Join(root, "dirnew"), 0766); err != nil { + t.Fatal(err) + } + + // Create a new symlink + if err := os.Symlink("targetnew", path.Join(root, "symlinknew")); err != nil { + t.Fatal(err) + } + + // Change a symlink + if err := os.RemoveAll(path.Join(root, "symlink2")); err != nil { + t.Fatal(err) + } + if err := os.Symlink("target2change", path.Join(root, "symlink2")); err != nil { + t.Fatal(err) + } + + // Replace dir with file + if err := os.RemoveAll(path.Join(root, "dir2")); err != nil { + t.Fatal(err) + } + if err := ioutil.WriteFile(path.Join(root, "dir2"), []byte("dir2\n"), 0777); err != nil { + t.Fatal(err) + } + + // Touch dir + if err := system.Chtimes(path.Join(root, "dir3"), time.Now().Add(time.Second), time.Now().Add(time.Second)); err != nil { + t.Fatal(err) + } +} + +func TestChangesDirsMutated(t *testing.T) { + // TODO Windows. There may be a way of running this, but turning off for now + // as createSampleDir uses symlinks. + // TODO Should work for Solaris + if runtime.GOOS == "windows" || runtime.GOOS == "solaris" { + t.Skip("symlinks on Windows; gcp failures on Solaris") + } + src, err := ioutil.TempDir("", "docker-changes-test") + if err != nil { + t.Fatal(err) + } + createSampleDir(t, src) + dst := src + "-copy" + if err := copyDir(src, dst); err != nil { + t.Fatal(err) + } + defer os.RemoveAll(src) + defer os.RemoveAll(dst) + + mutateSampleDir(t, dst) + + changes, err := ChangesDirs(dst, src) + if err != nil { + t.Fatal(err) + } + + sort.Sort(changesByPath(changes)) + + expectedChanges := []Change{ + {"/dir1", ChangeDelete}, + {"/dir2", ChangeModify}, + {"/dirnew", ChangeAdd}, + {"/file1", ChangeDelete}, + {"/file2", ChangeModify}, + {"/file3", ChangeModify}, + {"/file4", ChangeModify}, + {"/file5", ChangeModify}, + {"/filenew", ChangeAdd}, + {"/symlink1", ChangeDelete}, + {"/symlink2", ChangeModify}, + {"/symlinknew", ChangeAdd}, + } + + for i := 0; i < max(len(changes), len(expectedChanges)); i++ { + if i >= len(expectedChanges) { + t.Fatalf("unexpected change %s\n", changes[i].String()) + } + if i >= len(changes) { + t.Fatalf("no change for expected change %s\n", expectedChanges[i].String()) + } + if changes[i].Path == expectedChanges[i].Path { + if changes[i] != expectedChanges[i] { + t.Fatalf("Wrong change for %s, expected %s, got %s\n", changes[i].Path, changes[i].String(), expectedChanges[i].String()) + } + } else if changes[i].Path < expectedChanges[i].Path { + t.Fatalf("unexpected change %s\n", changes[i].String()) + } else { + t.Fatalf("no change for expected change %s != %s\n", expectedChanges[i].String(), changes[i].String()) + } + } +} + +func TestApplyLayer(t *testing.T) { + // TODO Windows. There may be a way of running this, but turning off for now + // as createSampleDir uses symlinks. + // TODO Should work for Solaris + if runtime.GOOS == "windows" || runtime.GOOS == "solaris" { + t.Skip("symlinks on Windows; gcp failures on Solaris") + } + src, err := ioutil.TempDir("", "docker-changes-test") + if err != nil { + t.Fatal(err) + } + createSampleDir(t, src) + defer os.RemoveAll(src) + dst := src + "-copy" + if err := copyDir(src, dst); err != nil { + t.Fatal(err) + } + mutateSampleDir(t, dst) + defer os.RemoveAll(dst) + + changes, err := ChangesDirs(dst, src) + if err != nil { + t.Fatal(err) + } + + layer, err := ExportChanges(dst, changes, nil, nil) + if err != nil { + t.Fatal(err) + } + + layerCopy, err := NewTempArchive(layer, "") + if err != nil { + t.Fatal(err) + } + + if _, err := ApplyLayer(src, layerCopy); err != nil { + t.Fatal(err) + } + + changes2, err := ChangesDirs(src, dst) + if err != nil { + t.Fatal(err) + } + + if len(changes2) != 0 { + t.Fatalf("Unexpected differences after reapplying mutation: %v", changes2) + } +} + +func TestChangesSizeWithHardlinks(t *testing.T) { + // TODO Windows. There may be a way of running this, but turning off for now + // as createSampleDir uses symlinks. + if runtime.GOOS == "windows" { + t.Skip("hardlinks on Windows") + } + srcDir, err := ioutil.TempDir("", "docker-test-srcDir") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(srcDir) + + destDir, err := ioutil.TempDir("", "docker-test-destDir") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(destDir) + + creationSize, err := prepareUntarSourceDirectory(100, destDir, true) + if err != nil { + t.Fatal(err) + } + + changes, err := ChangesDirs(destDir, srcDir) + if err != nil { + t.Fatal(err) + } + + got := ChangesSize(destDir, changes) + if got != int64(creationSize) { + t.Errorf("Expected %d bytes of changes, got %d", creationSize, got) + } +} + +func TestChangesSizeWithNoChanges(t *testing.T) { + size := ChangesSize("/tmp", nil) + if size != 0 { + t.Fatalf("ChangesSizes with no changes should be 0, was %d", size) + } +} + +func TestChangesSizeWithOnlyDeleteChanges(t *testing.T) { + changes := []Change{ + {Path: "deletedPath", Kind: ChangeDelete}, + } + size := ChangesSize("/tmp", changes) + if size != 0 { + t.Fatalf("ChangesSizes with only delete changes should be 0, was %d", size) + } +} + +func TestChangesSize(t *testing.T) { + parentPath, err := ioutil.TempDir("", "docker-changes-test") + defer os.RemoveAll(parentPath) + addition := path.Join(parentPath, "addition") + if err := ioutil.WriteFile(addition, []byte{0x01, 0x01, 0x01}, 0744); err != nil { + t.Fatal(err) + } + modification := path.Join(parentPath, "modification") + if err = ioutil.WriteFile(modification, []byte{0x01, 0x01, 0x01}, 0744); err != nil { + t.Fatal(err) + } + changes := []Change{ + {Path: "addition", Kind: ChangeAdd}, + {Path: "modification", Kind: ChangeModify}, + } + size := ChangesSize(parentPath, changes) + if size != 6 { + t.Fatalf("Expected 6 bytes of changes, got %d", size) + } +} + +func checkChanges(expectedChanges, changes []Change, t *testing.T) { + sort.Sort(changesByPath(expectedChanges)) + sort.Sort(changesByPath(changes)) + for i := 0; i < max(len(changes), len(expectedChanges)); i++ { + if i >= len(expectedChanges) { + t.Fatalf("unexpected change %s\n", changes[i].String()) + } + if i >= len(changes) { + t.Fatalf("no change for expected change %s\n", expectedChanges[i].String()) + } + if changes[i].Path == expectedChanges[i].Path { + if changes[i] != expectedChanges[i] { + t.Fatalf("Wrong change for %s, expected %s, got %s\n", changes[i].Path, changes[i].String(), expectedChanges[i].String()) + } + } else if changes[i].Path < expectedChanges[i].Path { + t.Fatalf("unexpected change %s\n", changes[i].String()) + } else { + t.Fatalf("no change for expected change %s != %s\n", expectedChanges[i].String(), changes[i].String()) + } + } +} diff --git a/vendor/github.com/moby/moby/pkg/archive/changes_unix.go b/vendor/github.com/moby/moby/pkg/archive/changes_unix.go new file mode 100644 index 000000000..98e2b39ae --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/archive/changes_unix.go @@ -0,0 +1,37 @@ +// +build !windows + +package archive + +import ( + "os" + "syscall" + + "github.com/docker/docker/pkg/system" + "golang.org/x/sys/unix" +) + +func statDifferent(oldStat *system.StatT, newStat *system.StatT) bool { + // Don't look at size for dirs, its not a good measure of change + if oldStat.Mode() != newStat.Mode() || + oldStat.UID() != newStat.UID() || + oldStat.GID() != newStat.GID() || + oldStat.Rdev() != newStat.Rdev() || + // Don't look at size for dirs, its not a good measure of change + (oldStat.Mode()&unix.S_IFDIR != unix.S_IFDIR && + (!sameFsTimeSpec(oldStat.Mtim(), newStat.Mtim()) || (oldStat.Size() != newStat.Size()))) { + return true + } + return false +} + +func (info *FileInfo) isDir() bool { + return info.parent == nil || info.stat.Mode()&unix.S_IFDIR != 0 +} + +func getIno(fi os.FileInfo) uint64 { + return uint64(fi.Sys().(*syscall.Stat_t).Ino) +} + +func hasHardlinks(fi os.FileInfo) bool { + return fi.Sys().(*syscall.Stat_t).Nlink > 1 +} diff --git a/vendor/github.com/moby/moby/pkg/archive/changes_windows.go b/vendor/github.com/moby/moby/pkg/archive/changes_windows.go new file mode 100644 index 000000000..6fd353269 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/archive/changes_windows.go @@ -0,0 +1,30 @@ +package archive + +import ( + "os" + + "github.com/docker/docker/pkg/system" +) + +func statDifferent(oldStat *system.StatT, newStat *system.StatT) bool { + + // Don't look at size for dirs, its not a good measure of change + if oldStat.Mtim() != newStat.Mtim() || + oldStat.Mode() != newStat.Mode() || + oldStat.Size() != newStat.Size() && !oldStat.Mode().IsDir() { + return true + } + return false +} + +func (info *FileInfo) isDir() bool { + return info.parent == nil || info.stat.Mode().IsDir() +} + +func getIno(fi os.FileInfo) (inode uint64) { + return +} + +func hasHardlinks(fi os.FileInfo) bool { + return false +} diff --git a/vendor/github.com/moby/moby/pkg/archive/copy.go b/vendor/github.com/moby/moby/pkg/archive/copy.go new file mode 100644 index 000000000..5281e29d1 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/archive/copy.go @@ -0,0 +1,461 @@ +package archive + +import ( + "archive/tar" + "errors" + "io" + "io/ioutil" + "os" + "path/filepath" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/system" +) + +// Errors used or returned by this file. +var ( + ErrNotDirectory = errors.New("not a directory") + ErrDirNotExists = errors.New("no such directory") + ErrCannotCopyDir = errors.New("cannot copy directory") + ErrInvalidCopySource = errors.New("invalid copy source content") +) + +// PreserveTrailingDotOrSeparator returns the given cleaned path (after +// processing using any utility functions from the path or filepath stdlib +// packages) and appends a trailing `/.` or `/` if its corresponding original +// path (from before being processed by utility functions from the path or +// filepath stdlib packages) ends with a trailing `/.` or `/`. If the cleaned +// path already ends in a `.` path segment, then another is not added. If the +// clean path already ends in a path separator, then another is not added. +func PreserveTrailingDotOrSeparator(cleanedPath, originalPath string) string { + // Ensure paths are in platform semantics + cleanedPath = normalizePath(cleanedPath) + originalPath = normalizePath(originalPath) + + if !specifiesCurrentDir(cleanedPath) && specifiesCurrentDir(originalPath) { + if !hasTrailingPathSeparator(cleanedPath) { + // Add a separator if it doesn't already end with one (a cleaned + // path would only end in a separator if it is the root). + cleanedPath += string(filepath.Separator) + } + cleanedPath += "." + } + + if !hasTrailingPathSeparator(cleanedPath) && hasTrailingPathSeparator(originalPath) { + cleanedPath += string(filepath.Separator) + } + + return cleanedPath +} + +// assertsDirectory returns whether the given path is +// asserted to be a directory, i.e., the path ends with +// a trailing '/' or `/.`, assuming a path separator of `/`. +func assertsDirectory(path string) bool { + return hasTrailingPathSeparator(path) || specifiesCurrentDir(path) +} + +// hasTrailingPathSeparator returns whether the given +// path ends with the system's path separator character. +func hasTrailingPathSeparator(path string) bool { + return len(path) > 0 && os.IsPathSeparator(path[len(path)-1]) +} + +// specifiesCurrentDir returns whether the given path specifies +// a "current directory", i.e., the last path segment is `.`. +func specifiesCurrentDir(path string) bool { + return filepath.Base(path) == "." +} + +// SplitPathDirEntry splits the given path between its directory name and its +// basename by first cleaning the path but preserves a trailing "." if the +// original path specified the current directory. +func SplitPathDirEntry(path string) (dir, base string) { + cleanedPath := filepath.Clean(normalizePath(path)) + + if specifiesCurrentDir(path) { + cleanedPath += string(filepath.Separator) + "." + } + + return filepath.Dir(cleanedPath), filepath.Base(cleanedPath) +} + +// TarResource archives the resource described by the given CopyInfo to a Tar +// archive. A non-nil error is returned if sourcePath does not exist or is +// asserted to be a directory but exists as another type of file. +// +// This function acts as a convenient wrapper around TarWithOptions, which +// requires a directory as the source path. TarResource accepts either a +// directory or a file path and correctly sets the Tar options. +func TarResource(sourceInfo CopyInfo) (content io.ReadCloser, err error) { + return TarResourceRebase(sourceInfo.Path, sourceInfo.RebaseName) +} + +// TarResourceRebase is like TarResource but renames the first path element of +// items in the resulting tar archive to match the given rebaseName if not "". +func TarResourceRebase(sourcePath, rebaseName string) (content io.ReadCloser, err error) { + sourcePath = normalizePath(sourcePath) + if _, err = os.Lstat(sourcePath); err != nil { + // Catches the case where the source does not exist or is not a + // directory if asserted to be a directory, as this also causes an + // error. + return + } + + // Separate the source path between its directory and + // the entry in that directory which we are archiving. + sourceDir, sourceBase := SplitPathDirEntry(sourcePath) + + filter := []string{sourceBase} + + logrus.Debugf("copying %q from %q", sourceBase, sourceDir) + + return TarWithOptions(sourceDir, &TarOptions{ + Compression: Uncompressed, + IncludeFiles: filter, + IncludeSourceDir: true, + RebaseNames: map[string]string{ + sourceBase: rebaseName, + }, + }) +} + +// CopyInfo holds basic info about the source +// or destination path of a copy operation. +type CopyInfo struct { + Path string + Exists bool + IsDir bool + RebaseName string +} + +// CopyInfoSourcePath stats the given path to create a CopyInfo +// struct representing that resource for the source of an archive copy +// operation. The given path should be an absolute local path. A source path +// has all symlinks evaluated that appear before the last path separator ("/" +// on Unix). As it is to be a copy source, the path must exist. +func CopyInfoSourcePath(path string, followLink bool) (CopyInfo, error) { + // normalize the file path and then evaluate the symbol link + // we will use the target file instead of the symbol link if + // followLink is set + path = normalizePath(path) + + resolvedPath, rebaseName, err := ResolveHostSourcePath(path, followLink) + if err != nil { + return CopyInfo{}, err + } + + stat, err := os.Lstat(resolvedPath) + if err != nil { + return CopyInfo{}, err + } + + return CopyInfo{ + Path: resolvedPath, + Exists: true, + IsDir: stat.IsDir(), + RebaseName: rebaseName, + }, nil +} + +// CopyInfoDestinationPath stats the given path to create a CopyInfo +// struct representing that resource for the destination of an archive copy +// operation. The given path should be an absolute local path. +func CopyInfoDestinationPath(path string) (info CopyInfo, err error) { + maxSymlinkIter := 10 // filepath.EvalSymlinks uses 255, but 10 already seems like a lot. + path = normalizePath(path) + originalPath := path + + stat, err := os.Lstat(path) + + if err == nil && stat.Mode()&os.ModeSymlink == 0 { + // The path exists and is not a symlink. + return CopyInfo{ + Path: path, + Exists: true, + IsDir: stat.IsDir(), + }, nil + } + + // While the path is a symlink. + for n := 0; err == nil && stat.Mode()&os.ModeSymlink != 0; n++ { + if n > maxSymlinkIter { + // Don't follow symlinks more than this arbitrary number of times. + return CopyInfo{}, errors.New("too many symlinks in " + originalPath) + } + + // The path is a symbolic link. We need to evaluate it so that the + // destination of the copy operation is the link target and not the + // link itself. This is notably different than CopyInfoSourcePath which + // only evaluates symlinks before the last appearing path separator. + // Also note that it is okay if the last path element is a broken + // symlink as the copy operation should create the target. + var linkTarget string + + linkTarget, err = os.Readlink(path) + if err != nil { + return CopyInfo{}, err + } + + if !system.IsAbs(linkTarget) { + // Join with the parent directory. + dstParent, _ := SplitPathDirEntry(path) + linkTarget = filepath.Join(dstParent, linkTarget) + } + + path = linkTarget + stat, err = os.Lstat(path) + } + + if err != nil { + // It's okay if the destination path doesn't exist. We can still + // continue the copy operation if the parent directory exists. + if !os.IsNotExist(err) { + return CopyInfo{}, err + } + + // Ensure destination parent dir exists. + dstParent, _ := SplitPathDirEntry(path) + + parentDirStat, err := os.Lstat(dstParent) + if err != nil { + return CopyInfo{}, err + } + if !parentDirStat.IsDir() { + return CopyInfo{}, ErrNotDirectory + } + + return CopyInfo{Path: path}, nil + } + + // The path exists after resolving symlinks. + return CopyInfo{ + Path: path, + Exists: true, + IsDir: stat.IsDir(), + }, nil +} + +// PrepareArchiveCopy prepares the given srcContent archive, which should +// contain the archived resource described by srcInfo, to the destination +// described by dstInfo. Returns the possibly modified content archive along +// with the path to the destination directory which it should be extracted to. +func PrepareArchiveCopy(srcContent io.Reader, srcInfo, dstInfo CopyInfo) (dstDir string, content io.ReadCloser, err error) { + // Ensure in platform semantics + srcInfo.Path = normalizePath(srcInfo.Path) + dstInfo.Path = normalizePath(dstInfo.Path) + + // Separate the destination path between its directory and base + // components in case the source archive contents need to be rebased. + dstDir, dstBase := SplitPathDirEntry(dstInfo.Path) + _, srcBase := SplitPathDirEntry(srcInfo.Path) + + switch { + case dstInfo.Exists && dstInfo.IsDir: + // The destination exists as a directory. No alteration + // to srcContent is needed as its contents can be + // simply extracted to the destination directory. + return dstInfo.Path, ioutil.NopCloser(srcContent), nil + case dstInfo.Exists && srcInfo.IsDir: + // The destination exists as some type of file and the source + // content is a directory. This is an error condition since + // you cannot copy a directory to an existing file location. + return "", nil, ErrCannotCopyDir + case dstInfo.Exists: + // The destination exists as some type of file and the source content + // is also a file. The source content entry will have to be renamed to + // have a basename which matches the destination path's basename. + if len(srcInfo.RebaseName) != 0 { + srcBase = srcInfo.RebaseName + } + return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil + case srcInfo.IsDir: + // The destination does not exist and the source content is an archive + // of a directory. The archive should be extracted to the parent of + // the destination path instead, and when it is, the directory that is + // created as a result should take the name of the destination path. + // The source content entries will have to be renamed to have a + // basename which matches the destination path's basename. + if len(srcInfo.RebaseName) != 0 { + srcBase = srcInfo.RebaseName + } + return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil + case assertsDirectory(dstInfo.Path): + // The destination does not exist and is asserted to be created as a + // directory, but the source content is not a directory. This is an + // error condition since you cannot create a directory from a file + // source. + return "", nil, ErrDirNotExists + default: + // The last remaining case is when the destination does not exist, is + // not asserted to be a directory, and the source content is not an + // archive of a directory. It this case, the destination file will need + // to be created when the archive is extracted and the source content + // entry will have to be renamed to have a basename which matches the + // destination path's basename. + if len(srcInfo.RebaseName) != 0 { + srcBase = srcInfo.RebaseName + } + return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil + } + +} + +// RebaseArchiveEntries rewrites the given srcContent archive replacing +// an occurrence of oldBase with newBase at the beginning of entry names. +func RebaseArchiveEntries(srcContent io.Reader, oldBase, newBase string) io.ReadCloser { + if oldBase == string(os.PathSeparator) { + // If oldBase specifies the root directory, use an empty string as + // oldBase instead so that newBase doesn't replace the path separator + // that all paths will start with. + oldBase = "" + } + + rebased, w := io.Pipe() + + go func() { + srcTar := tar.NewReader(srcContent) + rebasedTar := tar.NewWriter(w) + + for { + hdr, err := srcTar.Next() + if err == io.EOF { + // Signals end of archive. + rebasedTar.Close() + w.Close() + return + } + if err != nil { + w.CloseWithError(err) + return + } + + hdr.Name = strings.Replace(hdr.Name, oldBase, newBase, 1) + if hdr.Typeflag == tar.TypeLink { + hdr.Linkname = strings.Replace(hdr.Linkname, oldBase, newBase, 1) + } + + if err = rebasedTar.WriteHeader(hdr); err != nil { + w.CloseWithError(err) + return + } + + if _, err = io.Copy(rebasedTar, srcTar); err != nil { + w.CloseWithError(err) + return + } + } + }() + + return rebased +} + +// CopyResource performs an archive copy from the given source path to the +// given destination path. The source path MUST exist and the destination +// path's parent directory must exist. +func CopyResource(srcPath, dstPath string, followLink bool) error { + var ( + srcInfo CopyInfo + err error + ) + + // Ensure in platform semantics + srcPath = normalizePath(srcPath) + dstPath = normalizePath(dstPath) + + // Clean the source and destination paths. + srcPath = PreserveTrailingDotOrSeparator(filepath.Clean(srcPath), srcPath) + dstPath = PreserveTrailingDotOrSeparator(filepath.Clean(dstPath), dstPath) + + if srcInfo, err = CopyInfoSourcePath(srcPath, followLink); err != nil { + return err + } + + content, err := TarResource(srcInfo) + if err != nil { + return err + } + defer content.Close() + + return CopyTo(content, srcInfo, dstPath) +} + +// CopyTo handles extracting the given content whose +// entries should be sourced from srcInfo to dstPath. +func CopyTo(content io.Reader, srcInfo CopyInfo, dstPath string) error { + // The destination path need not exist, but CopyInfoDestinationPath will + // ensure that at least the parent directory exists. + dstInfo, err := CopyInfoDestinationPath(normalizePath(dstPath)) + if err != nil { + return err + } + + dstDir, copyArchive, err := PrepareArchiveCopy(content, srcInfo, dstInfo) + if err != nil { + return err + } + defer copyArchive.Close() + + options := &TarOptions{ + NoLchown: true, + NoOverwriteDirNonDir: true, + } + + return Untar(copyArchive, dstDir, options) +} + +// ResolveHostSourcePath decides real path need to be copied with parameters such as +// whether to follow symbol link or not, if followLink is true, resolvedPath will return +// link target of any symbol link file, else it will only resolve symlink of directory +// but return symbol link file itself without resolving. +func ResolveHostSourcePath(path string, followLink bool) (resolvedPath, rebaseName string, err error) { + if followLink { + resolvedPath, err = filepath.EvalSymlinks(path) + if err != nil { + return + } + + resolvedPath, rebaseName = GetRebaseName(path, resolvedPath) + } else { + dirPath, basePath := filepath.Split(path) + + // if not follow symbol link, then resolve symbol link of parent dir + var resolvedDirPath string + resolvedDirPath, err = filepath.EvalSymlinks(dirPath) + if err != nil { + return + } + // resolvedDirPath will have been cleaned (no trailing path separators) so + // we can manually join it with the base path element. + resolvedPath = resolvedDirPath + string(filepath.Separator) + basePath + if hasTrailingPathSeparator(path) && filepath.Base(path) != filepath.Base(resolvedPath) { + rebaseName = filepath.Base(path) + } + } + return resolvedPath, rebaseName, nil +} + +// GetRebaseName normalizes and compares path and resolvedPath, +// return completed resolved path and rebased file name +func GetRebaseName(path, resolvedPath string) (string, string) { + // linkTarget will have been cleaned (no trailing path separators and dot) so + // we can manually join it with them + var rebaseName string + if specifiesCurrentDir(path) && !specifiesCurrentDir(resolvedPath) { + resolvedPath += string(filepath.Separator) + "." + } + + if hasTrailingPathSeparator(path) && !hasTrailingPathSeparator(resolvedPath) { + resolvedPath += string(filepath.Separator) + } + + if filepath.Base(path) != filepath.Base(resolvedPath) { + // In the case where the path had a trailing separator and a symlink + // evaluation has changed the last path component, we will need to + // rebase the name in the archive that is being copied to match the + // originally requested name. + rebaseName = filepath.Base(path) + } + return resolvedPath, rebaseName +} diff --git a/vendor/github.com/moby/moby/pkg/archive/copy_unix.go b/vendor/github.com/moby/moby/pkg/archive/copy_unix.go new file mode 100644 index 000000000..e305b5e4a --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/archive/copy_unix.go @@ -0,0 +1,11 @@ +// +build !windows + +package archive + +import ( + "path/filepath" +) + +func normalizePath(path string) string { + return filepath.ToSlash(path) +} diff --git a/vendor/github.com/moby/moby/pkg/archive/copy_unix_test.go b/vendor/github.com/moby/moby/pkg/archive/copy_unix_test.go new file mode 100644 index 000000000..4d5ae79cd --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/archive/copy_unix_test.go @@ -0,0 +1,978 @@ +// +build !windows + +// TODO Windows: Some of these tests may be salvageable and portable to Windows. + +package archive + +import ( + "bytes" + "crypto/sha256" + "encoding/hex" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "strings" + "testing" +) + +func removeAllPaths(paths ...string) { + for _, path := range paths { + os.RemoveAll(path) + } +} + +func getTestTempDirs(t *testing.T) (tmpDirA, tmpDirB string) { + var err error + + if tmpDirA, err = ioutil.TempDir("", "archive-copy-test"); err != nil { + t.Fatal(err) + } + + if tmpDirB, err = ioutil.TempDir("", "archive-copy-test"); err != nil { + t.Fatal(err) + } + + return +} + +func isNotDir(err error) bool { + return strings.Contains(err.Error(), "not a directory") +} + +func joinTrailingSep(pathElements ...string) string { + joined := filepath.Join(pathElements...) + + return fmt.Sprintf("%s%c", joined, filepath.Separator) +} + +func fileContentsEqual(t *testing.T, filenameA, filenameB string) (err error) { + t.Logf("checking for equal file contents: %q and %q\n", filenameA, filenameB) + + fileA, err := os.Open(filenameA) + if err != nil { + return + } + defer fileA.Close() + + fileB, err := os.Open(filenameB) + if err != nil { + return + } + defer fileB.Close() + + hasher := sha256.New() + + if _, err = io.Copy(hasher, fileA); err != nil { + return + } + + hashA := hasher.Sum(nil) + hasher.Reset() + + if _, err = io.Copy(hasher, fileB); err != nil { + return + } + + hashB := hasher.Sum(nil) + + if !bytes.Equal(hashA, hashB) { + err = fmt.Errorf("file content hashes not equal - expected %s, got %s", hex.EncodeToString(hashA), hex.EncodeToString(hashB)) + } + + return +} + +func dirContentsEqual(t *testing.T, newDir, oldDir string) (err error) { + t.Logf("checking for equal directory contents: %q and %q\n", newDir, oldDir) + + var changes []Change + + if changes, err = ChangesDirs(newDir, oldDir); err != nil { + return + } + + if len(changes) != 0 { + err = fmt.Errorf("expected no changes between directories, but got: %v", changes) + } + + return +} + +func logDirContents(t *testing.T, dirPath string) { + logWalkedPaths := filepath.WalkFunc(func(path string, info os.FileInfo, err error) error { + if err != nil { + t.Errorf("stat error for path %q: %s", path, err) + return nil + } + + if info.IsDir() { + path = joinTrailingSep(path) + } + + t.Logf("\t%s", path) + + return nil + }) + + t.Logf("logging directory contents: %q", dirPath) + + if err := filepath.Walk(dirPath, logWalkedPaths); err != nil { + t.Fatal(err) + } +} + +func testCopyHelper(t *testing.T, srcPath, dstPath string) (err error) { + t.Logf("copying from %q to %q (not follow symbol link)", srcPath, dstPath) + + return CopyResource(srcPath, dstPath, false) +} + +func testCopyHelperFSym(t *testing.T, srcPath, dstPath string) (err error) { + t.Logf("copying from %q to %q (follow symbol link)", srcPath, dstPath) + + return CopyResource(srcPath, dstPath, true) +} + +// Basic assumptions about SRC and DST: +// 1. SRC must exist. +// 2. If SRC ends with a trailing separator, it must be a directory. +// 3. DST parent directory must exist. +// 4. If DST exists as a file, it must not end with a trailing separator. + +// First get these easy error cases out of the way. + +// Test for error when SRC does not exist. +func TestCopyErrSrcNotExists(t *testing.T) { + tmpDirA, tmpDirB := getTestTempDirs(t) + defer removeAllPaths(tmpDirA, tmpDirB) + + if _, err := CopyInfoSourcePath(filepath.Join(tmpDirA, "file1"), false); !os.IsNotExist(err) { + t.Fatalf("expected IsNotExist error, but got %T: %s", err, err) + } +} + +// Test for error when SRC ends in a trailing +// path separator but it exists as a file. +func TestCopyErrSrcNotDir(t *testing.T) { + tmpDirA, tmpDirB := getTestTempDirs(t) + defer removeAllPaths(tmpDirA, tmpDirB) + + // Load A with some sample files and directories. + createSampleDir(t, tmpDirA) + + if _, err := CopyInfoSourcePath(joinTrailingSep(tmpDirA, "file1"), false); !isNotDir(err) { + t.Fatalf("expected IsNotDir error, but got %T: %s", err, err) + } +} + +// Test for error when SRC is a valid file or directory, +// but the DST parent directory does not exist. +func TestCopyErrDstParentNotExists(t *testing.T) { + tmpDirA, tmpDirB := getTestTempDirs(t) + defer removeAllPaths(tmpDirA, tmpDirB) + + // Load A with some sample files and directories. + createSampleDir(t, tmpDirA) + + srcInfo := CopyInfo{Path: filepath.Join(tmpDirA, "file1"), Exists: true, IsDir: false} + + // Try with a file source. + content, err := TarResource(srcInfo) + if err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + defer content.Close() + + // Copy to a file whose parent does not exist. + if err = CopyTo(content, srcInfo, filepath.Join(tmpDirB, "fakeParentDir", "file1")); err == nil { + t.Fatal("expected IsNotExist error, but got nil instead") + } + + if !os.IsNotExist(err) { + t.Fatalf("expected IsNotExist error, but got %T: %s", err, err) + } + + // Try with a directory source. + srcInfo = CopyInfo{Path: filepath.Join(tmpDirA, "dir1"), Exists: true, IsDir: true} + + content, err = TarResource(srcInfo) + if err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + defer content.Close() + + // Copy to a directory whose parent does not exist. + if err = CopyTo(content, srcInfo, joinTrailingSep(tmpDirB, "fakeParentDir", "fakeDstDir")); err == nil { + t.Fatal("expected IsNotExist error, but got nil instead") + } + + if !os.IsNotExist(err) { + t.Fatalf("expected IsNotExist error, but got %T: %s", err, err) + } +} + +// Test for error when DST ends in a trailing +// path separator but exists as a file. +func TestCopyErrDstNotDir(t *testing.T) { + tmpDirA, tmpDirB := getTestTempDirs(t) + defer removeAllPaths(tmpDirA, tmpDirB) + + // Load A and B with some sample files and directories. + createSampleDir(t, tmpDirA) + createSampleDir(t, tmpDirB) + + // Try with a file source. + srcInfo := CopyInfo{Path: filepath.Join(tmpDirA, "file1"), Exists: true, IsDir: false} + + content, err := TarResource(srcInfo) + if err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + defer content.Close() + + if err = CopyTo(content, srcInfo, joinTrailingSep(tmpDirB, "file1")); err == nil { + t.Fatal("expected IsNotDir error, but got nil instead") + } + + if !isNotDir(err) { + t.Fatalf("expected IsNotDir error, but got %T: %s", err, err) + } + + // Try with a directory source. + srcInfo = CopyInfo{Path: filepath.Join(tmpDirA, "dir1"), Exists: true, IsDir: true} + + content, err = TarResource(srcInfo) + if err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + defer content.Close() + + if err = CopyTo(content, srcInfo, joinTrailingSep(tmpDirB, "file1")); err == nil { + t.Fatal("expected IsNotDir error, but got nil instead") + } + + if !isNotDir(err) { + t.Fatalf("expected IsNotDir error, but got %T: %s", err, err) + } +} + +// Possibilities are reduced to the remaining 10 cases: +// +// case | srcIsDir | onlyDirContents | dstExists | dstIsDir | dstTrSep | action +// =================================================================================================== +// A | no | - | no | - | no | create file +// B | no | - | no | - | yes | error +// C | no | - | yes | no | - | overwrite file +// D | no | - | yes | yes | - | create file in dst dir +// E | yes | no | no | - | - | create dir, copy contents +// F | yes | no | yes | no | - | error +// G | yes | no | yes | yes | - | copy dir and contents +// H | yes | yes | no | - | - | create dir, copy contents +// I | yes | yes | yes | no | - | error +// J | yes | yes | yes | yes | - | copy dir contents +// + +// A. SRC specifies a file and DST (no trailing path separator) doesn't +// exist. This should create a file with the name DST and copy the +// contents of the source file into it. +func TestCopyCaseA(t *testing.T) { + tmpDirA, tmpDirB := getTestTempDirs(t) + defer removeAllPaths(tmpDirA, tmpDirB) + + // Load A with some sample files and directories. + createSampleDir(t, tmpDirA) + + srcPath := filepath.Join(tmpDirA, "file1") + dstPath := filepath.Join(tmpDirB, "itWorks.txt") + + var err error + + if err = testCopyHelper(t, srcPath, dstPath); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = fileContentsEqual(t, srcPath, dstPath); err != nil { + t.Fatal(err) + } + os.Remove(dstPath) + + symlinkPath := filepath.Join(tmpDirA, "symlink3") + symlinkPath1 := filepath.Join(tmpDirA, "symlink4") + linkTarget := filepath.Join(tmpDirA, "file1") + + if err = testCopyHelperFSym(t, symlinkPath, dstPath); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = fileContentsEqual(t, linkTarget, dstPath); err != nil { + t.Fatal(err) + } + os.Remove(dstPath) + if err = testCopyHelperFSym(t, symlinkPath1, dstPath); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = fileContentsEqual(t, linkTarget, dstPath); err != nil { + t.Fatal(err) + } +} + +// B. SRC specifies a file and DST (with trailing path separator) doesn't +// exist. This should cause an error because the copy operation cannot +// create a directory when copying a single file. +func TestCopyCaseB(t *testing.T) { + tmpDirA, tmpDirB := getTestTempDirs(t) + defer removeAllPaths(tmpDirA, tmpDirB) + + // Load A with some sample files and directories. + createSampleDir(t, tmpDirA) + + srcPath := filepath.Join(tmpDirA, "file1") + dstDir := joinTrailingSep(tmpDirB, "testDir") + + var err error + + if err = testCopyHelper(t, srcPath, dstDir); err == nil { + t.Fatal("expected ErrDirNotExists error, but got nil instead") + } + + if err != ErrDirNotExists { + t.Fatalf("expected ErrDirNotExists error, but got %T: %s", err, err) + } + + symlinkPath := filepath.Join(tmpDirA, "symlink3") + + if err = testCopyHelperFSym(t, symlinkPath, dstDir); err == nil { + t.Fatal("expected ErrDirNotExists error, but got nil instead") + } + if err != ErrDirNotExists { + t.Fatalf("expected ErrDirNotExists error, but got %T: %s", err, err) + } + +} + +// C. SRC specifies a file and DST exists as a file. This should overwrite +// the file at DST with the contents of the source file. +func TestCopyCaseC(t *testing.T) { + tmpDirA, tmpDirB := getTestTempDirs(t) + defer removeAllPaths(tmpDirA, tmpDirB) + + // Load A and B with some sample files and directories. + createSampleDir(t, tmpDirA) + createSampleDir(t, tmpDirB) + + srcPath := filepath.Join(tmpDirA, "file1") + dstPath := filepath.Join(tmpDirB, "file2") + + var err error + + // Ensure they start out different. + if err = fileContentsEqual(t, srcPath, dstPath); err == nil { + t.Fatal("expected different file contents") + } + + if err = testCopyHelper(t, srcPath, dstPath); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = fileContentsEqual(t, srcPath, dstPath); err != nil { + t.Fatal(err) + } +} + +// C. Symbol link following version: +// SRC specifies a file and DST exists as a file. This should overwrite +// the file at DST with the contents of the source file. +func TestCopyCaseCFSym(t *testing.T) { + tmpDirA, tmpDirB := getTestTempDirs(t) + defer removeAllPaths(tmpDirA, tmpDirB) + + // Load A and B with some sample files and directories. + createSampleDir(t, tmpDirA) + createSampleDir(t, tmpDirB) + + symlinkPathBad := filepath.Join(tmpDirA, "symlink1") + symlinkPath := filepath.Join(tmpDirA, "symlink3") + linkTarget := filepath.Join(tmpDirA, "file1") + dstPath := filepath.Join(tmpDirB, "file2") + + var err error + + // first to test broken link + if err = testCopyHelperFSym(t, symlinkPathBad, dstPath); err == nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + // test symbol link -> symbol link -> target + // Ensure they start out different. + if err = fileContentsEqual(t, linkTarget, dstPath); err == nil { + t.Fatal("expected different file contents") + } + + if err = testCopyHelperFSym(t, symlinkPath, dstPath); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = fileContentsEqual(t, linkTarget, dstPath); err != nil { + t.Fatal(err) + } +} + +// D. SRC specifies a file and DST exists as a directory. This should place +// a copy of the source file inside it using the basename from SRC. Ensure +// this works whether DST has a trailing path separator or not. +func TestCopyCaseD(t *testing.T) { + tmpDirA, tmpDirB := getTestTempDirs(t) + defer removeAllPaths(tmpDirA, tmpDirB) + + // Load A and B with some sample files and directories. + createSampleDir(t, tmpDirA) + createSampleDir(t, tmpDirB) + + srcPath := filepath.Join(tmpDirA, "file1") + dstDir := filepath.Join(tmpDirB, "dir1") + dstPath := filepath.Join(dstDir, "file1") + + var err error + + // Ensure that dstPath doesn't exist. + if _, err = os.Stat(dstPath); !os.IsNotExist(err) { + t.Fatalf("did not expect dstPath %q to exist", dstPath) + } + + if err = testCopyHelper(t, srcPath, dstDir); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = fileContentsEqual(t, srcPath, dstPath); err != nil { + t.Fatal(err) + } + + // Now try again but using a trailing path separator for dstDir. + + if err = os.RemoveAll(dstDir); err != nil { + t.Fatalf("unable to remove dstDir: %s", err) + } + + if err = os.MkdirAll(dstDir, os.FileMode(0755)); err != nil { + t.Fatalf("unable to make dstDir: %s", err) + } + + dstDir = joinTrailingSep(tmpDirB, "dir1") + + if err = testCopyHelper(t, srcPath, dstDir); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = fileContentsEqual(t, srcPath, dstPath); err != nil { + t.Fatal(err) + } +} + +// D. Symbol link following version: +// SRC specifies a file and DST exists as a directory. This should place +// a copy of the source file inside it using the basename from SRC. Ensure +// this works whether DST has a trailing path separator or not. +func TestCopyCaseDFSym(t *testing.T) { + tmpDirA, tmpDirB := getTestTempDirs(t) + defer removeAllPaths(tmpDirA, tmpDirB) + + // Load A and B with some sample files and directories. + createSampleDir(t, tmpDirA) + createSampleDir(t, tmpDirB) + + srcPath := filepath.Join(tmpDirA, "symlink4") + linkTarget := filepath.Join(tmpDirA, "file1") + dstDir := filepath.Join(tmpDirB, "dir1") + dstPath := filepath.Join(dstDir, "symlink4") + + var err error + + // Ensure that dstPath doesn't exist. + if _, err = os.Stat(dstPath); !os.IsNotExist(err) { + t.Fatalf("did not expect dstPath %q to exist", dstPath) + } + + if err = testCopyHelperFSym(t, srcPath, dstDir); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = fileContentsEqual(t, linkTarget, dstPath); err != nil { + t.Fatal(err) + } + + // Now try again but using a trailing path separator for dstDir. + + if err = os.RemoveAll(dstDir); err != nil { + t.Fatalf("unable to remove dstDir: %s", err) + } + + if err = os.MkdirAll(dstDir, os.FileMode(0755)); err != nil { + t.Fatalf("unable to make dstDir: %s", err) + } + + dstDir = joinTrailingSep(tmpDirB, "dir1") + + if err = testCopyHelperFSym(t, srcPath, dstDir); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = fileContentsEqual(t, linkTarget, dstPath); err != nil { + t.Fatal(err) + } +} + +// E. SRC specifies a directory and DST does not exist. This should create a +// directory at DST and copy the contents of the SRC directory into the DST +// directory. Ensure this works whether DST has a trailing path separator or +// not. +func TestCopyCaseE(t *testing.T) { + tmpDirA, tmpDirB := getTestTempDirs(t) + defer removeAllPaths(tmpDirA, tmpDirB) + + // Load A with some sample files and directories. + createSampleDir(t, tmpDirA) + + srcDir := filepath.Join(tmpDirA, "dir1") + dstDir := filepath.Join(tmpDirB, "testDir") + + var err error + + if err = testCopyHelper(t, srcDir, dstDir); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = dirContentsEqual(t, dstDir, srcDir); err != nil { + t.Log("dir contents not equal") + logDirContents(t, tmpDirA) + logDirContents(t, tmpDirB) + t.Fatal(err) + } + + // Now try again but using a trailing path separator for dstDir. + + if err = os.RemoveAll(dstDir); err != nil { + t.Fatalf("unable to remove dstDir: %s", err) + } + + dstDir = joinTrailingSep(tmpDirB, "testDir") + + if err = testCopyHelper(t, srcDir, dstDir); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = dirContentsEqual(t, dstDir, srcDir); err != nil { + t.Fatal(err) + } +} + +// E. Symbol link following version: +// SRC specifies a directory and DST does not exist. This should create a +// directory at DST and copy the contents of the SRC directory into the DST +// directory. Ensure this works whether DST has a trailing path separator or +// not. +func TestCopyCaseEFSym(t *testing.T) { + tmpDirA, tmpDirB := getTestTempDirs(t) + defer removeAllPaths(tmpDirA, tmpDirB) + + // Load A with some sample files and directories. + createSampleDir(t, tmpDirA) + + srcDir := filepath.Join(tmpDirA, "dirSymlink") + linkTarget := filepath.Join(tmpDirA, "dir1") + dstDir := filepath.Join(tmpDirB, "testDir") + + var err error + + if err = testCopyHelperFSym(t, srcDir, dstDir); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = dirContentsEqual(t, dstDir, linkTarget); err != nil { + t.Log("dir contents not equal") + logDirContents(t, tmpDirA) + logDirContents(t, tmpDirB) + t.Fatal(err) + } + + // Now try again but using a trailing path separator for dstDir. + + if err = os.RemoveAll(dstDir); err != nil { + t.Fatalf("unable to remove dstDir: %s", err) + } + + dstDir = joinTrailingSep(tmpDirB, "testDir") + + if err = testCopyHelperFSym(t, srcDir, dstDir); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = dirContentsEqual(t, dstDir, linkTarget); err != nil { + t.Fatal(err) + } +} + +// F. SRC specifies a directory and DST exists as a file. This should cause an +// error as it is not possible to overwrite a file with a directory. +func TestCopyCaseF(t *testing.T) { + tmpDirA, tmpDirB := getTestTempDirs(t) + defer removeAllPaths(tmpDirA, tmpDirB) + + // Load A and B with some sample files and directories. + createSampleDir(t, tmpDirA) + createSampleDir(t, tmpDirB) + + srcDir := filepath.Join(tmpDirA, "dir1") + symSrcDir := filepath.Join(tmpDirA, "dirSymlink") + dstFile := filepath.Join(tmpDirB, "file1") + + var err error + + if err = testCopyHelper(t, srcDir, dstFile); err == nil { + t.Fatal("expected ErrCannotCopyDir error, but got nil instead") + } + + if err != ErrCannotCopyDir { + t.Fatalf("expected ErrCannotCopyDir error, but got %T: %s", err, err) + } + + // now test with symbol link + if err = testCopyHelperFSym(t, symSrcDir, dstFile); err == nil { + t.Fatal("expected ErrCannotCopyDir error, but got nil instead") + } + + if err != ErrCannotCopyDir { + t.Fatalf("expected ErrCannotCopyDir error, but got %T: %s", err, err) + } +} + +// G. SRC specifies a directory and DST exists as a directory. This should copy +// the SRC directory and all its contents to the DST directory. Ensure this +// works whether DST has a trailing path separator or not. +func TestCopyCaseG(t *testing.T) { + tmpDirA, tmpDirB := getTestTempDirs(t) + defer removeAllPaths(tmpDirA, tmpDirB) + + // Load A and B with some sample files and directories. + createSampleDir(t, tmpDirA) + createSampleDir(t, tmpDirB) + + srcDir := filepath.Join(tmpDirA, "dir1") + dstDir := filepath.Join(tmpDirB, "dir2") + resultDir := filepath.Join(dstDir, "dir1") + + var err error + + if err = testCopyHelper(t, srcDir, dstDir); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = dirContentsEqual(t, resultDir, srcDir); err != nil { + t.Fatal(err) + } + + // Now try again but using a trailing path separator for dstDir. + + if err = os.RemoveAll(dstDir); err != nil { + t.Fatalf("unable to remove dstDir: %s", err) + } + + if err = os.MkdirAll(dstDir, os.FileMode(0755)); err != nil { + t.Fatalf("unable to make dstDir: %s", err) + } + + dstDir = joinTrailingSep(tmpDirB, "dir2") + + if err = testCopyHelper(t, srcDir, dstDir); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = dirContentsEqual(t, resultDir, srcDir); err != nil { + t.Fatal(err) + } +} + +// G. Symbol link version: +// SRC specifies a directory and DST exists as a directory. This should copy +// the SRC directory and all its contents to the DST directory. Ensure this +// works whether DST has a trailing path separator or not. +func TestCopyCaseGFSym(t *testing.T) { + tmpDirA, tmpDirB := getTestTempDirs(t) + defer removeAllPaths(tmpDirA, tmpDirB) + + // Load A and B with some sample files and directories. + createSampleDir(t, tmpDirA) + createSampleDir(t, tmpDirB) + + srcDir := filepath.Join(tmpDirA, "dirSymlink") + linkTarget := filepath.Join(tmpDirA, "dir1") + dstDir := filepath.Join(tmpDirB, "dir2") + resultDir := filepath.Join(dstDir, "dirSymlink") + + var err error + + if err = testCopyHelperFSym(t, srcDir, dstDir); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = dirContentsEqual(t, resultDir, linkTarget); err != nil { + t.Fatal(err) + } + + // Now try again but using a trailing path separator for dstDir. + + if err = os.RemoveAll(dstDir); err != nil { + t.Fatalf("unable to remove dstDir: %s", err) + } + + if err = os.MkdirAll(dstDir, os.FileMode(0755)); err != nil { + t.Fatalf("unable to make dstDir: %s", err) + } + + dstDir = joinTrailingSep(tmpDirB, "dir2") + + if err = testCopyHelperFSym(t, srcDir, dstDir); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = dirContentsEqual(t, resultDir, linkTarget); err != nil { + t.Fatal(err) + } +} + +// H. SRC specifies a directory's contents only and DST does not exist. This +// should create a directory at DST and copy the contents of the SRC +// directory (but not the directory itself) into the DST directory. Ensure +// this works whether DST has a trailing path separator or not. +func TestCopyCaseH(t *testing.T) { + tmpDirA, tmpDirB := getTestTempDirs(t) + defer removeAllPaths(tmpDirA, tmpDirB) + + // Load A with some sample files and directories. + createSampleDir(t, tmpDirA) + + srcDir := joinTrailingSep(tmpDirA, "dir1") + "." + dstDir := filepath.Join(tmpDirB, "testDir") + + var err error + + if err = testCopyHelper(t, srcDir, dstDir); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = dirContentsEqual(t, dstDir, srcDir); err != nil { + t.Log("dir contents not equal") + logDirContents(t, tmpDirA) + logDirContents(t, tmpDirB) + t.Fatal(err) + } + + // Now try again but using a trailing path separator for dstDir. + + if err = os.RemoveAll(dstDir); err != nil { + t.Fatalf("unable to remove dstDir: %s", err) + } + + dstDir = joinTrailingSep(tmpDirB, "testDir") + + if err = testCopyHelper(t, srcDir, dstDir); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = dirContentsEqual(t, dstDir, srcDir); err != nil { + t.Log("dir contents not equal") + logDirContents(t, tmpDirA) + logDirContents(t, tmpDirB) + t.Fatal(err) + } +} + +// H. Symbol link following version: +// SRC specifies a directory's contents only and DST does not exist. This +// should create a directory at DST and copy the contents of the SRC +// directory (but not the directory itself) into the DST directory. Ensure +// this works whether DST has a trailing path separator or not. +func TestCopyCaseHFSym(t *testing.T) { + tmpDirA, tmpDirB := getTestTempDirs(t) + defer removeAllPaths(tmpDirA, tmpDirB) + + // Load A with some sample files and directories. + createSampleDir(t, tmpDirA) + + srcDir := joinTrailingSep(tmpDirA, "dirSymlink") + "." + linkTarget := filepath.Join(tmpDirA, "dir1") + dstDir := filepath.Join(tmpDirB, "testDir") + + var err error + + if err = testCopyHelperFSym(t, srcDir, dstDir); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = dirContentsEqual(t, dstDir, linkTarget); err != nil { + t.Log("dir contents not equal") + logDirContents(t, tmpDirA) + logDirContents(t, tmpDirB) + t.Fatal(err) + } + + // Now try again but using a trailing path separator for dstDir. + + if err = os.RemoveAll(dstDir); err != nil { + t.Fatalf("unable to remove dstDir: %s", err) + } + + dstDir = joinTrailingSep(tmpDirB, "testDir") + + if err = testCopyHelperFSym(t, srcDir, dstDir); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = dirContentsEqual(t, dstDir, linkTarget); err != nil { + t.Log("dir contents not equal") + logDirContents(t, tmpDirA) + logDirContents(t, tmpDirB) + t.Fatal(err) + } +} + +// I. SRC specifies a directory's contents only and DST exists as a file. This +// should cause an error as it is not possible to overwrite a file with a +// directory. +func TestCopyCaseI(t *testing.T) { + tmpDirA, tmpDirB := getTestTempDirs(t) + defer removeAllPaths(tmpDirA, tmpDirB) + + // Load A and B with some sample files and directories. + createSampleDir(t, tmpDirA) + createSampleDir(t, tmpDirB) + + srcDir := joinTrailingSep(tmpDirA, "dir1") + "." + symSrcDir := filepath.Join(tmpDirB, "dirSymlink") + dstFile := filepath.Join(tmpDirB, "file1") + + var err error + + if err = testCopyHelper(t, srcDir, dstFile); err == nil { + t.Fatal("expected ErrCannotCopyDir error, but got nil instead") + } + + if err != ErrCannotCopyDir { + t.Fatalf("expected ErrCannotCopyDir error, but got %T: %s", err, err) + } + + // now try with symbol link of dir + if err = testCopyHelperFSym(t, symSrcDir, dstFile); err == nil { + t.Fatal("expected ErrCannotCopyDir error, but got nil instead") + } + + if err != ErrCannotCopyDir { + t.Fatalf("expected ErrCannotCopyDir error, but got %T: %s", err, err) + } +} + +// J. SRC specifies a directory's contents only and DST exists as a directory. +// This should copy the contents of the SRC directory (but not the directory +// itself) into the DST directory. Ensure this works whether DST has a +// trailing path separator or not. +func TestCopyCaseJ(t *testing.T) { + tmpDirA, tmpDirB := getTestTempDirs(t) + defer removeAllPaths(tmpDirA, tmpDirB) + + // Load A and B with some sample files and directories. + createSampleDir(t, tmpDirA) + createSampleDir(t, tmpDirB) + + srcDir := joinTrailingSep(tmpDirA, "dir1") + "." + dstDir := filepath.Join(tmpDirB, "dir5") + + var err error + + // first to create an empty dir + if err = os.MkdirAll(dstDir, os.FileMode(0755)); err != nil { + t.Fatalf("unable to make dstDir: %s", err) + } + + if err = testCopyHelper(t, srcDir, dstDir); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = dirContentsEqual(t, dstDir, srcDir); err != nil { + t.Fatal(err) + } + + // Now try again but using a trailing path separator for dstDir. + + if err = os.RemoveAll(dstDir); err != nil { + t.Fatalf("unable to remove dstDir: %s", err) + } + + if err = os.MkdirAll(dstDir, os.FileMode(0755)); err != nil { + t.Fatalf("unable to make dstDir: %s", err) + } + + dstDir = joinTrailingSep(tmpDirB, "dir5") + + if err = testCopyHelper(t, srcDir, dstDir); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = dirContentsEqual(t, dstDir, srcDir); err != nil { + t.Fatal(err) + } +} + +// J. Symbol link following version: +// SRC specifies a directory's contents only and DST exists as a directory. +// This should copy the contents of the SRC directory (but not the directory +// itself) into the DST directory. Ensure this works whether DST has a +// trailing path separator or not. +func TestCopyCaseJFSym(t *testing.T) { + tmpDirA, tmpDirB := getTestTempDirs(t) + defer removeAllPaths(tmpDirA, tmpDirB) + + // Load A and B with some sample files and directories. + createSampleDir(t, tmpDirA) + createSampleDir(t, tmpDirB) + + srcDir := joinTrailingSep(tmpDirA, "dirSymlink") + "." + linkTarget := filepath.Join(tmpDirA, "dir1") + dstDir := filepath.Join(tmpDirB, "dir5") + + var err error + + // first to create an empty dir + if err = os.MkdirAll(dstDir, os.FileMode(0755)); err != nil { + t.Fatalf("unable to make dstDir: %s", err) + } + + if err = testCopyHelperFSym(t, srcDir, dstDir); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = dirContentsEqual(t, dstDir, linkTarget); err != nil { + t.Fatal(err) + } + + // Now try again but using a trailing path separator for dstDir. + + if err = os.RemoveAll(dstDir); err != nil { + t.Fatalf("unable to remove dstDir: %s", err) + } + + if err = os.MkdirAll(dstDir, os.FileMode(0755)); err != nil { + t.Fatalf("unable to make dstDir: %s", err) + } + + dstDir = joinTrailingSep(tmpDirB, "dir5") + + if err = testCopyHelperFSym(t, srcDir, dstDir); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = dirContentsEqual(t, dstDir, linkTarget); err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/moby/moby/pkg/archive/copy_windows.go b/vendor/github.com/moby/moby/pkg/archive/copy_windows.go new file mode 100644 index 000000000..2b775b45c --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/archive/copy_windows.go @@ -0,0 +1,9 @@ +package archive + +import ( + "path/filepath" +) + +func normalizePath(path string) string { + return filepath.FromSlash(path) +} diff --git a/vendor/github.com/moby/moby/pkg/archive/diff.go b/vendor/github.com/moby/moby/pkg/archive/diff.go new file mode 100644 index 000000000..a2766b592 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/archive/diff.go @@ -0,0 +1,256 @@ +package archive + +import ( + "archive/tar" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "runtime" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/pools" + "github.com/docker/docker/pkg/system" +) + +// UnpackLayer unpack `layer` to a `dest`. The stream `layer` can be +// compressed or uncompressed. +// Returns the size in bytes of the contents of the layer. +func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64, err error) { + tr := tar.NewReader(layer) + trBuf := pools.BufioReader32KPool.Get(tr) + defer pools.BufioReader32KPool.Put(trBuf) + + var dirs []*tar.Header + unpackedPaths := make(map[string]struct{}) + + if options == nil { + options = &TarOptions{} + } + if options.ExcludePatterns == nil { + options.ExcludePatterns = []string{} + } + idMappings := idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps) + + aufsTempdir := "" + aufsHardlinks := make(map[string]*tar.Header) + + // Iterate through the files in the archive. + for { + hdr, err := tr.Next() + if err == io.EOF { + // end of tar archive + break + } + if err != nil { + return 0, err + } + + size += hdr.Size + + // Normalize name, for safety and for a simple is-root check + hdr.Name = filepath.Clean(hdr.Name) + + // Windows does not support filenames with colons in them. Ignore + // these files. This is not a problem though (although it might + // appear that it is). Let's suppose a client is running docker pull. + // The daemon it points to is Windows. Would it make sense for the + // client to be doing a docker pull Ubuntu for example (which has files + // with colons in the name under /usr/share/man/man3)? No, absolutely + // not as it would really only make sense that they were pulling a + // Windows image. However, for development, it is necessary to be able + // to pull Linux images which are in the repository. + // + // TODO Windows. Once the registry is aware of what images are Windows- + // specific or Linux-specific, this warning should be changed to an error + // to cater for the situation where someone does manage to upload a Linux + // image but have it tagged as Windows inadvertently. + if runtime.GOOS == "windows" { + if strings.Contains(hdr.Name, ":") { + logrus.Warnf("Windows: Ignoring %s (is this a Linux image?)", hdr.Name) + continue + } + } + + // Note as these operations are platform specific, so must the slash be. + if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) { + // Not the root directory, ensure that the parent directory exists. + // This happened in some tests where an image had a tarfile without any + // parent directories. + parent := filepath.Dir(hdr.Name) + parentPath := filepath.Join(dest, parent) + + if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) { + err = system.MkdirAll(parentPath, 0600, "") + if err != nil { + return 0, err + } + } + } + + // Skip AUFS metadata dirs + if strings.HasPrefix(hdr.Name, WhiteoutMetaPrefix) { + // Regular files inside /.wh..wh.plnk can be used as hardlink targets + // We don't want this directory, but we need the files in them so that + // such hardlinks can be resolved. + if strings.HasPrefix(hdr.Name, WhiteoutLinkDir) && hdr.Typeflag == tar.TypeReg { + basename := filepath.Base(hdr.Name) + aufsHardlinks[basename] = hdr + if aufsTempdir == "" { + if aufsTempdir, err = ioutil.TempDir("", "dockerplnk"); err != nil { + return 0, err + } + defer os.RemoveAll(aufsTempdir) + } + if err := createTarFile(filepath.Join(aufsTempdir, basename), dest, hdr, tr, true, nil, options.InUserNS); err != nil { + return 0, err + } + } + + if hdr.Name != WhiteoutOpaqueDir { + continue + } + } + path := filepath.Join(dest, hdr.Name) + rel, err := filepath.Rel(dest, path) + if err != nil { + return 0, err + } + + // Note as these operations are platform specific, so must the slash be. + if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) { + return 0, breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest)) + } + base := filepath.Base(path) + + if strings.HasPrefix(base, WhiteoutPrefix) { + dir := filepath.Dir(path) + if base == WhiteoutOpaqueDir { + _, err := os.Lstat(dir) + if err != nil { + return 0, err + } + err = filepath.Walk(dir, func(path string, info os.FileInfo, err error) error { + if err != nil { + if os.IsNotExist(err) { + err = nil // parent was deleted + } + return err + } + if path == dir { + return nil + } + if _, exists := unpackedPaths[path]; !exists { + err := os.RemoveAll(path) + return err + } + return nil + }) + if err != nil { + return 0, err + } + } else { + originalBase := base[len(WhiteoutPrefix):] + originalPath := filepath.Join(dir, originalBase) + if err := os.RemoveAll(originalPath); err != nil { + return 0, err + } + } + } else { + // If path exits we almost always just want to remove and replace it. + // The only exception is when it is a directory *and* the file from + // the layer is also a directory. Then we want to merge them (i.e. + // just apply the metadata from the layer). + if fi, err := os.Lstat(path); err == nil { + if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) { + if err := os.RemoveAll(path); err != nil { + return 0, err + } + } + } + + trBuf.Reset(tr) + srcData := io.Reader(trBuf) + srcHdr := hdr + + // Hard links into /.wh..wh.plnk don't work, as we don't extract that directory, so + // we manually retarget these into the temporary files we extracted them into + if hdr.Typeflag == tar.TypeLink && strings.HasPrefix(filepath.Clean(hdr.Linkname), WhiteoutLinkDir) { + linkBasename := filepath.Base(hdr.Linkname) + srcHdr = aufsHardlinks[linkBasename] + if srcHdr == nil { + return 0, fmt.Errorf("Invalid aufs hardlink") + } + tmpFile, err := os.Open(filepath.Join(aufsTempdir, linkBasename)) + if err != nil { + return 0, err + } + defer tmpFile.Close() + srcData = tmpFile + } + + if err := remapIDs(idMappings, srcHdr); err != nil { + return 0, err + } + + if err := createTarFile(path, dest, srcHdr, srcData, true, nil, options.InUserNS); err != nil { + return 0, err + } + + // Directory mtimes must be handled at the end to avoid further + // file creation in them to modify the directory mtime + if hdr.Typeflag == tar.TypeDir { + dirs = append(dirs, hdr) + } + unpackedPaths[path] = struct{}{} + } + } + + for _, hdr := range dirs { + path := filepath.Join(dest, hdr.Name) + if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil { + return 0, err + } + } + + return size, nil +} + +// ApplyLayer parses a diff in the standard layer format from `layer`, +// and applies it to the directory `dest`. The stream `layer` can be +// compressed or uncompressed. +// Returns the size in bytes of the contents of the layer. +func ApplyLayer(dest string, layer io.Reader) (int64, error) { + return applyLayerHandler(dest, layer, &TarOptions{}, true) +} + +// ApplyUncompressedLayer parses a diff in the standard layer format from +// `layer`, and applies it to the directory `dest`. The stream `layer` +// can only be uncompressed. +// Returns the size in bytes of the contents of the layer. +func ApplyUncompressedLayer(dest string, layer io.Reader, options *TarOptions) (int64, error) { + return applyLayerHandler(dest, layer, options, false) +} + +// do the bulk load of ApplyLayer, but allow for not calling DecompressStream +func applyLayerHandler(dest string, layer io.Reader, options *TarOptions, decompress bool) (int64, error) { + dest = filepath.Clean(dest) + + // We need to be able to set any perms + oldmask, err := system.Umask(0) + if err != nil { + return 0, err + } + defer system.Umask(oldmask) // ignore err, ErrNotSupportedPlatform + + if decompress { + layer, err = DecompressStream(layer) + if err != nil { + return 0, err + } + } + return UnpackLayer(dest, layer, options) +} diff --git a/vendor/github.com/moby/moby/pkg/archive/diff_test.go b/vendor/github.com/moby/moby/pkg/archive/diff_test.go new file mode 100644 index 000000000..8167941ac --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/archive/diff_test.go @@ -0,0 +1,386 @@ +package archive + +import ( + "archive/tar" + "io" + "io/ioutil" + "os" + "path/filepath" + "reflect" + "runtime" + "testing" + + "github.com/docker/docker/pkg/ioutils" +) + +func TestApplyLayerInvalidFilenames(t *testing.T) { + // TODO Windows: Figure out how to fix this test. + if runtime.GOOS == "windows" { + t.Skip("Passes but hits breakoutError: platform and architecture is not supported") + } + for i, headers := range [][]*tar.Header{ + { + { + Name: "../victim/dotdot", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + { + { + // Note the leading slash + Name: "/../victim/slash-dotdot", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + } { + if err := testBreakout("applylayer", "docker-TestApplyLayerInvalidFilenames", headers); err != nil { + t.Fatalf("i=%d. %v", i, err) + } + } +} + +func TestApplyLayerInvalidHardlink(t *testing.T) { + if runtime.GOOS == "windows" { + t.Skip("TypeLink support on Windows") + } + for i, headers := range [][]*tar.Header{ + { // try reading victim/hello (../) + { + Name: "dotdot", + Typeflag: tar.TypeLink, + Linkname: "../victim/hello", + Mode: 0644, + }, + }, + { // try reading victim/hello (/../) + { + Name: "slash-dotdot", + Typeflag: tar.TypeLink, + // Note the leading slash + Linkname: "/../victim/hello", + Mode: 0644, + }, + }, + { // try writing victim/file + { + Name: "loophole-victim", + Typeflag: tar.TypeLink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "loophole-victim/file", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + { // try reading victim/hello (hardlink, symlink) + { + Name: "loophole-victim", + Typeflag: tar.TypeLink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "symlink", + Typeflag: tar.TypeSymlink, + Linkname: "loophole-victim/hello", + Mode: 0644, + }, + }, + { // Try reading victim/hello (hardlink, hardlink) + { + Name: "loophole-victim", + Typeflag: tar.TypeLink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "hardlink", + Typeflag: tar.TypeLink, + Linkname: "loophole-victim/hello", + Mode: 0644, + }, + }, + { // Try removing victim directory (hardlink) + { + Name: "loophole-victim", + Typeflag: tar.TypeLink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "loophole-victim", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + } { + if err := testBreakout("applylayer", "docker-TestApplyLayerInvalidHardlink", headers); err != nil { + t.Fatalf("i=%d. %v", i, err) + } + } +} + +func TestApplyLayerInvalidSymlink(t *testing.T) { + if runtime.GOOS == "windows" { + t.Skip("TypeSymLink support on Windows") + } + for i, headers := range [][]*tar.Header{ + { // try reading victim/hello (../) + { + Name: "dotdot", + Typeflag: tar.TypeSymlink, + Linkname: "../victim/hello", + Mode: 0644, + }, + }, + { // try reading victim/hello (/../) + { + Name: "slash-dotdot", + Typeflag: tar.TypeSymlink, + // Note the leading slash + Linkname: "/../victim/hello", + Mode: 0644, + }, + }, + { // try writing victim/file + { + Name: "loophole-victim", + Typeflag: tar.TypeSymlink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "loophole-victim/file", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + { // try reading victim/hello (symlink, symlink) + { + Name: "loophole-victim", + Typeflag: tar.TypeSymlink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "symlink", + Typeflag: tar.TypeSymlink, + Linkname: "loophole-victim/hello", + Mode: 0644, + }, + }, + { // try reading victim/hello (symlink, hardlink) + { + Name: "loophole-victim", + Typeflag: tar.TypeSymlink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "hardlink", + Typeflag: tar.TypeLink, + Linkname: "loophole-victim/hello", + Mode: 0644, + }, + }, + { // try removing victim directory (symlink) + { + Name: "loophole-victim", + Typeflag: tar.TypeSymlink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "loophole-victim", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + } { + if err := testBreakout("applylayer", "docker-TestApplyLayerInvalidSymlink", headers); err != nil { + t.Fatalf("i=%d. %v", i, err) + } + } +} + +func TestApplyLayerWhiteouts(t *testing.T) { + // TODO Windows: Figure out why this test fails + if runtime.GOOS == "windows" { + t.Skip("Failing on Windows") + } + + wd, err := ioutil.TempDir("", "graphdriver-test-whiteouts") + if err != nil { + return + } + defer os.RemoveAll(wd) + + base := []string{ + ".baz", + "bar/", + "bar/bax", + "bar/bay/", + "baz", + "foo/", + "foo/.abc", + "foo/.bcd/", + "foo/.bcd/a", + "foo/cde/", + "foo/cde/def", + "foo/cde/efg", + "foo/fgh", + "foobar", + } + + type tcase struct { + change, expected []string + } + + tcases := []tcase{ + { + base, + base, + }, + { + []string{ + ".bay", + ".wh.baz", + "foo/", + "foo/.bce", + "foo/.wh..wh..opq", + "foo/cde/", + "foo/cde/efg", + }, + []string{ + ".bay", + ".baz", + "bar/", + "bar/bax", + "bar/bay/", + "foo/", + "foo/.bce", + "foo/cde/", + "foo/cde/efg", + "foobar", + }, + }, + { + []string{ + ".bay", + ".wh..baz", + ".wh.foobar", + "foo/", + "foo/.abc", + "foo/.wh.cde", + "bar/", + }, + []string{ + ".bay", + "bar/", + "bar/bax", + "bar/bay/", + "foo/", + "foo/.abc", + "foo/.bce", + }, + }, + { + []string{ + ".abc", + ".wh..wh..opq", + "foobar", + }, + []string{ + ".abc", + "foobar", + }, + }, + } + + for i, tc := range tcases { + l, err := makeTestLayer(tc.change) + if err != nil { + t.Fatal(err) + } + + _, err = UnpackLayer(wd, l, nil) + if err != nil { + t.Fatal(err) + } + err = l.Close() + if err != nil { + t.Fatal(err) + } + + paths, err := readDirContents(wd) + if err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(tc.expected, paths) { + t.Fatalf("invalid files for layer %d: expected %q, got %q", i, tc.expected, paths) + } + } + +} + +func makeTestLayer(paths []string) (rc io.ReadCloser, err error) { + tmpDir, err := ioutil.TempDir("", "graphdriver-test-mklayer") + if err != nil { + return + } + defer func() { + if err != nil { + os.RemoveAll(tmpDir) + } + }() + for _, p := range paths { + if p[len(p)-1] == filepath.Separator { + if err = os.MkdirAll(filepath.Join(tmpDir, p), 0700); err != nil { + return + } + } else { + if err = ioutil.WriteFile(filepath.Join(tmpDir, p), nil, 0600); err != nil { + return + } + } + } + archive, err := Tar(tmpDir, Uncompressed) + if err != nil { + return + } + return ioutils.NewReadCloserWrapper(archive, func() error { + err := archive.Close() + os.RemoveAll(tmpDir) + return err + }), nil +} + +func readDirContents(root string) ([]string, error) { + var files []string + err := filepath.Walk(root, func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + if path == root { + return nil + } + rel, err := filepath.Rel(root, path) + if err != nil { + return err + } + if info.IsDir() { + rel = rel + "/" + } + files = append(files, rel) + return nil + }) + if err != nil { + return nil, err + } + return files, nil +} diff --git a/vendor/github.com/moby/moby/pkg/archive/example_changes.go b/vendor/github.com/moby/moby/pkg/archive/example_changes.go new file mode 100644 index 000000000..cedd46a40 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/archive/example_changes.go @@ -0,0 +1,97 @@ +// +build ignore + +// Simple tool to create an archive stream from an old and new directory +// +// By default it will stream the comparison of two temporary directories with junk files +package main + +import ( + "flag" + "fmt" + "io" + "io/ioutil" + "os" + "path" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/archive" +) + +var ( + flDebug = flag.Bool("D", false, "debugging output") + flNewDir = flag.String("newdir", "", "") + flOldDir = flag.String("olddir", "", "") + log = logrus.New() +) + +func main() { + flag.Usage = func() { + fmt.Println("Produce a tar from comparing two directory paths. By default a demo tar is created of around 200 files (including hardlinks)") + fmt.Printf("%s [OPTIONS]\n", os.Args[0]) + flag.PrintDefaults() + } + flag.Parse() + log.Out = os.Stderr + if (len(os.Getenv("DEBUG")) > 0) || *flDebug { + logrus.SetLevel(logrus.DebugLevel) + } + var newDir, oldDir string + + if len(*flNewDir) == 0 { + var err error + newDir, err = ioutil.TempDir("", "docker-test-newDir") + if err != nil { + log.Fatal(err) + } + defer os.RemoveAll(newDir) + if _, err := prepareUntarSourceDirectory(100, newDir, true); err != nil { + log.Fatal(err) + } + } else { + newDir = *flNewDir + } + + if len(*flOldDir) == 0 { + oldDir, err := ioutil.TempDir("", "docker-test-oldDir") + if err != nil { + log.Fatal(err) + } + defer os.RemoveAll(oldDir) + } else { + oldDir = *flOldDir + } + + changes, err := archive.ChangesDirs(newDir, oldDir) + if err != nil { + log.Fatal(err) + } + + a, err := archive.ExportChanges(newDir, changes) + if err != nil { + log.Fatal(err) + } + defer a.Close() + + i, err := io.Copy(os.Stdout, a) + if err != nil && err != io.EOF { + log.Fatal(err) + } + fmt.Fprintf(os.Stderr, "wrote archive of %d bytes", i) +} + +func prepareUntarSourceDirectory(numberOfFiles int, targetPath string, makeLinks bool) (int, error) { + fileData := []byte("fooo") + for n := 0; n < numberOfFiles; n++ { + fileName := fmt.Sprintf("file-%d", n) + if err := ioutil.WriteFile(path.Join(targetPath, fileName), fileData, 0700); err != nil { + return 0, err + } + if makeLinks { + if err := os.Link(path.Join(targetPath, fileName), path.Join(targetPath, fileName+"-link")); err != nil { + return 0, err + } + } + } + totalSize := numberOfFiles * len(fileData) + return totalSize, nil +} diff --git a/vendor/github.com/moby/moby/pkg/archive/testdata/broken.tar b/vendor/github.com/moby/moby/pkg/archive/testdata/broken.tar new file mode 100644 index 0000000000000000000000000000000000000000..8f10ea6b87d3eb4fed572349dfe87695603b10a5 GIT binary patch literal 13824 zcmeHN>rxv>7UtLfn5Q@^l8gXrG&7O_li)0oQBai)6v9rjo&-ixOPXbFo(r;aaqT1Q zi|o_vJM6y3ey8Um2^?(fm~vH66==Hq^tqqYr_U$~f~3CkaX-4=)VFkfMbAE0zj=1W zFdGeXOK)!KtrgwSO|!8=t&huAhCPiFI|54|O6#g{AByje_D5`gZ4lbN_tD%y+P?+6 zW}mCyJbT6dM$<6v?SB_8uxS5j5M6u>C%C=+&BoS!{NIK7SFYLLXgq9fL;u??&1{)C_QVb?f0pB4xfD_C1pX2f z=LE&>$4O)llEszRik&8tAi~^>9~IXb2tQsXkop&XF!hz8gWXO)O@R9>nS~7H1w&*U zWf1ryXPidjED|qMClc|F!YuB;N}eT-8}IBqwJ!w!F&$m$r;a;(N7!YIEb7h<=ej}& zT~f;Cd!ZOC&mX2n zv4)UvkOa{z8}jxVC6bTq+3^R;Sok8c6EQsN&k9^`&h(Hc32JVwt-Hrj<{`vG3V< zCk?#){6BW>!9@+(L2u}{Jos}CZh!u_HaA;$dH(--^ZzaF-*=tS5&i^O)@Me!3BwBQ`@=VE zIl)Fp0MG z@%2K`G+^8HA?T&;xGZB%_q<@Vt&(_!w-gfXxk@mb9|fb)1BuBGk_ptuvx%G~pq0Kb zb&?6Szj_3#ClOiI_3vu1e+mOX z9k`Og2B5RmN7LGZ)c;3%E%Ip__9KKUf&G&zD9jkJNr-{ibNby{ds> zUrSU_0z^Wf<)}gE{Jb22kgArW_I#nO79{eFvL6rZP*4oJ7H%7}fn5i&1ZT@5hDK4~ z(U`5S#`Fws86Z{2P=gP6usiI=mKaOr@4W|(?6Ye5$Oayf(LUxEb zaN*HO8gZBg{sZJ1)pg4>36^kmC*dQ2;oE@^#)cw_*aI^!cM=y1Rqga(?Ey`Mja44@ zco?Vs7`J_y5ir%m6vXp*y&Gb{4lfBvR0R>wjxNBA^zHAzdc;~eK6(s=AB|{$OM8p} zp9LwiIkAyG5Q$+F3`7h$CPJbL(j-h1h61!ZViYo4dBXOg@lop12w4VYz!&$vL+Po-n0lE6B8Y;6$Ar89(FQ zU43m0VVC)g+}A0GY(H3=vGXH;5|6sFnZk+NN-WF&+)64KnDBNmlR?P<{j247c6ZGs zY`hF!K4&Hi(0r~#=6sH0f#>;~|6uT_GuPArovwt~PT&t2-pNh;x9aMe7i;!lK!(<$ z?d`g5*7a@bJ?(y(Y4ln98)|Cinp8V=gdKs-N$TT&k8N344C6y&*H}a~{9Pg&%cB8( zs3gwCMEH-=;aI?u+)#>TQj}R!`jyO-QsK*KZS|lK9+9#7oV0B(la+@sRbyfJf~*mY z#+u;OA2B@66aq^nOW6`=t5qYdRV{oFkE8T+GhJI-*NldTtcr!I|PQf({z2i zZs;`}x~m6ks)bXh@+($$(s>pJ`5X6~16{UfoJC(mW1b(MtJcpN$ZBT3r1B`&Cx9{-iF=!{A}z(ob033DW~d!*9$cfm zVNC%z6l$8Qz0LiPv&`A!8a*yd3zi-in+*e-!2$MiQNyE>1xX!65{vsnGKkf9!|0+OGBAb= z5*&U!Rl91sZq^%6Di#9<<87G)rv;99!{p6oE&}gq)LXeeJT)kYlsjz{ehkbMY(O`q zGvc6vviAh-6>EFt+I|*)$Z&%o;(ob2LAmI= zd);1Ux&vAHF3sW+ZYtInM5`7V!gWe@@A3}gzBN4OzKHcFXhsnBZ62vkM}c;c8?C16|}T)I>F_`E4y<`7O_Uv z_IIGuK3}j6k8x0(NE^)|N^6ztuoF5wcqyCPP4-b>1H5)kQM(q_kYzo37tjs2w1@@5 z)pou5q*BNKlggS#-4TOxF*--bZwQgZIP>8>Wh4R6qJg1trGj7P+M9C-U$bgV0-Bbc zM}8SyaI1`5o3Hn=gK~dij~yq2v7>PXETRIqq!En36W>+P9az*N;)5;FK054lzkPPH zcY4hR*Orc{l5us$Y*nZ!(@__9wdDn6|B~BL+;v!B^Cr(N`)UtH54-56s#rGO&e@Q}~KNYPdQ94MZxA|gP9PSIqe@Ff$9bNNvws)xH zUYfZ#^MIJly?f4ly_CL`QQoB~o&>3jKAlL=*#tHX$;*%#;^sVnJHGU0={L0dh$?du z$V*u|2o=sbG6HQV;$?~-5Xh?Gjf~m#{@1wY+1@T!Us<#xZ;2Rn{Y@!B=|jZ;TY#GL zQet9G=4h_z5?#7$NWf6BJyZ3f$1aFp02S_lpyVtB;|niLX54VbZP`xU1YMSiGnf#! zBhWBJBLfCg3eCtIG~av^x3Yo4twnBx#0a&E>6G9&~+z{;Wn%CtG>DYD1(pjqYiYL oJsf9Rk?Q4-IWqA2mih3}{ZBUT=3UD@m3s}`Yv5i3pOOat4?XSI`2YX_ literal 0 HcmV?d00001 diff --git a/vendor/github.com/moby/moby/pkg/archive/time_linux.go b/vendor/github.com/moby/moby/pkg/archive/time_linux.go new file mode 100644 index 000000000..3448569b1 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/archive/time_linux.go @@ -0,0 +1,16 @@ +package archive + +import ( + "syscall" + "time" +) + +func timeToTimespec(time time.Time) (ts syscall.Timespec) { + if time.IsZero() { + // Return UTIME_OMIT special value + ts.Sec = 0 + ts.Nsec = ((1 << 30) - 2) + return + } + return syscall.NsecToTimespec(time.UnixNano()) +} diff --git a/vendor/github.com/moby/moby/pkg/archive/time_unsupported.go b/vendor/github.com/moby/moby/pkg/archive/time_unsupported.go new file mode 100644 index 000000000..e85aac054 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/archive/time_unsupported.go @@ -0,0 +1,16 @@ +// +build !linux + +package archive + +import ( + "syscall" + "time" +) + +func timeToTimespec(time time.Time) (ts syscall.Timespec) { + nsec := int64(0) + if !time.IsZero() { + nsec = time.UnixNano() + } + return syscall.NsecToTimespec(nsec) +} diff --git a/vendor/github.com/moby/moby/pkg/archive/utils_test.go b/vendor/github.com/moby/moby/pkg/archive/utils_test.go new file mode 100644 index 000000000..01b9e92d1 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/archive/utils_test.go @@ -0,0 +1,166 @@ +package archive + +import ( + "archive/tar" + "bytes" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "time" +) + +var testUntarFns = map[string]func(string, io.Reader) error{ + "untar": func(dest string, r io.Reader) error { + return Untar(r, dest, nil) + }, + "applylayer": func(dest string, r io.Reader) error { + _, err := ApplyLayer(dest, r) + return err + }, +} + +// testBreakout is a helper function that, within the provided `tmpdir` directory, +// creates a `victim` folder with a generated `hello` file in it. +// `untar` extracts to a directory named `dest`, the tar file created from `headers`. +// +// Here are the tested scenarios: +// - removed `victim` folder (write) +// - removed files from `victim` folder (write) +// - new files in `victim` folder (write) +// - modified files in `victim` folder (write) +// - file in `dest` with same content as `victim/hello` (read) +// +// When using testBreakout make sure you cover one of the scenarios listed above. +func testBreakout(untarFn string, tmpdir string, headers []*tar.Header) error { + tmpdir, err := ioutil.TempDir("", tmpdir) + if err != nil { + return err + } + defer os.RemoveAll(tmpdir) + + dest := filepath.Join(tmpdir, "dest") + if err := os.Mkdir(dest, 0755); err != nil { + return err + } + + victim := filepath.Join(tmpdir, "victim") + if err := os.Mkdir(victim, 0755); err != nil { + return err + } + hello := filepath.Join(victim, "hello") + helloData, err := time.Now().MarshalText() + if err != nil { + return err + } + if err := ioutil.WriteFile(hello, helloData, 0644); err != nil { + return err + } + helloStat, err := os.Stat(hello) + if err != nil { + return err + } + + reader, writer := io.Pipe() + go func() { + t := tar.NewWriter(writer) + for _, hdr := range headers { + t.WriteHeader(hdr) + } + t.Close() + }() + + untar := testUntarFns[untarFn] + if untar == nil { + return fmt.Errorf("could not find untar function %q in testUntarFns", untarFn) + } + if err := untar(dest, reader); err != nil { + if _, ok := err.(breakoutError); !ok { + // If untar returns an error unrelated to an archive breakout, + // then consider this an unexpected error and abort. + return err + } + // Here, untar detected the breakout. + // Let's move on verifying that indeed there was no breakout. + fmt.Printf("breakoutError: %v\n", err) + } + + // Check victim folder + f, err := os.Open(victim) + if err != nil { + // codepath taken if victim folder was removed + return fmt.Errorf("archive breakout: error reading %q: %v", victim, err) + } + defer f.Close() + + // Check contents of victim folder + // + // We are only interested in getting 2 files from the victim folder, because if all is well + // we expect only one result, the `hello` file. If there is a second result, it cannot + // hold the same name `hello` and we assume that a new file got created in the victim folder. + // That is enough to detect an archive breakout. + names, err := f.Readdirnames(2) + if err != nil { + // codepath taken if victim is not a folder + return fmt.Errorf("archive breakout: error reading directory content of %q: %v", victim, err) + } + for _, name := range names { + if name != "hello" { + // codepath taken if new file was created in victim folder + return fmt.Errorf("archive breakout: new file %q", name) + } + } + + // Check victim/hello + f, err = os.Open(hello) + if err != nil { + // codepath taken if read permissions were removed + return fmt.Errorf("archive breakout: could not lstat %q: %v", hello, err) + } + defer f.Close() + b, err := ioutil.ReadAll(f) + if err != nil { + return err + } + fi, err := f.Stat() + if err != nil { + return err + } + if helloStat.IsDir() != fi.IsDir() || + // TODO: cannot check for fi.ModTime() change + helloStat.Mode() != fi.Mode() || + helloStat.Size() != fi.Size() || + !bytes.Equal(helloData, b) { + // codepath taken if hello has been modified + return fmt.Errorf("archive breakout: file %q has been modified. Contents: expected=%q, got=%q. FileInfo: expected=%#v, got=%#v", hello, helloData, b, helloStat, fi) + } + + // Check that nothing in dest/ has the same content as victim/hello. + // Since victim/hello was generated with time.Now(), it is safe to assume + // that any file whose content matches exactly victim/hello, managed somehow + // to access victim/hello. + return filepath.Walk(dest, func(path string, info os.FileInfo, err error) error { + if info.IsDir() { + if err != nil { + // skip directory if error + return filepath.SkipDir + } + // enter directory + return nil + } + if err != nil { + // skip file if error + return nil + } + b, err := ioutil.ReadFile(path) + if err != nil { + // Houston, we have a problem. Aborting (space)walk. + return err + } + if bytes.Equal(helloData, b) { + return fmt.Errorf("archive breakout: file %q has been accessed via %q", hello, path) + } + return nil + }) +} diff --git a/vendor/github.com/moby/moby/pkg/archive/whiteouts.go b/vendor/github.com/moby/moby/pkg/archive/whiteouts.go new file mode 100644 index 000000000..d20478a10 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/archive/whiteouts.go @@ -0,0 +1,23 @@ +package archive + +// Whiteouts are files with a special meaning for the layered filesystem. +// Docker uses AUFS whiteout files inside exported archives. In other +// filesystems these files are generated/handled on tar creation/extraction. + +// WhiteoutPrefix prefix means file is a whiteout. If this is followed by a +// filename this means that file has been removed from the base layer. +const WhiteoutPrefix = ".wh." + +// WhiteoutMetaPrefix prefix means whiteout has a special meaning and is not +// for removing an actual file. Normally these files are excluded from exported +// archives. +const WhiteoutMetaPrefix = WhiteoutPrefix + WhiteoutPrefix + +// WhiteoutLinkDir is a directory AUFS uses for storing hardlink links to other +// layers. Normally these should not go into exported archives and all changed +// hardlinks should be copied to the top layer. +const WhiteoutLinkDir = WhiteoutMetaPrefix + "plnk" + +// WhiteoutOpaqueDir file means directory has been made opaque - meaning +// readdir calls to this directory do not follow to lower layers. +const WhiteoutOpaqueDir = WhiteoutMetaPrefix + ".opq" diff --git a/vendor/github.com/moby/moby/pkg/archive/wrap.go b/vendor/github.com/moby/moby/pkg/archive/wrap.go new file mode 100644 index 000000000..b39d12c87 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/archive/wrap.go @@ -0,0 +1,59 @@ +package archive + +import ( + "archive/tar" + "bytes" + "io" +) + +// Generate generates a new archive from the content provided +// as input. +// +// `files` is a sequence of path/content pairs. A new file is +// added to the archive for each pair. +// If the last pair is incomplete, the file is created with an +// empty content. For example: +// +// Generate("foo.txt", "hello world", "emptyfile") +// +// The above call will return an archive with 2 files: +// * ./foo.txt with content "hello world" +// * ./empty with empty content +// +// FIXME: stream content instead of buffering +// FIXME: specify permissions and other archive metadata +func Generate(input ...string) (io.Reader, error) { + files := parseStringPairs(input...) + buf := new(bytes.Buffer) + tw := tar.NewWriter(buf) + for _, file := range files { + name, content := file[0], file[1] + hdr := &tar.Header{ + Name: name, + Size: int64(len(content)), + } + if err := tw.WriteHeader(hdr); err != nil { + return nil, err + } + if _, err := tw.Write([]byte(content)); err != nil { + return nil, err + } + } + if err := tw.Close(); err != nil { + return nil, err + } + return buf, nil +} + +func parseStringPairs(input ...string) (output [][2]string) { + output = make([][2]string, 0, len(input)/2+1) + for i := 0; i < len(input); i += 2 { + var pair [2]string + pair[0] = input[i] + if i+1 < len(input) { + pair[1] = input[i+1] + } + output = append(output, pair) + } + return +} diff --git a/vendor/github.com/moby/moby/pkg/archive/wrap_test.go b/vendor/github.com/moby/moby/pkg/archive/wrap_test.go new file mode 100644 index 000000000..46ab36697 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/archive/wrap_test.go @@ -0,0 +1,98 @@ +package archive + +import ( + "archive/tar" + "bytes" + "io" + "testing" +) + +func TestGenerateEmptyFile(t *testing.T) { + archive, err := Generate("emptyFile") + if err != nil { + t.Fatal(err) + } + if archive == nil { + t.Fatal("The generated archive should not be nil.") + } + + expectedFiles := [][]string{ + {"emptyFile", ""}, + } + + tr := tar.NewReader(archive) + actualFiles := make([][]string, 0, 10) + i := 0 + for { + hdr, err := tr.Next() + if err == io.EOF { + break + } + if err != nil { + t.Fatal(err) + } + buf := new(bytes.Buffer) + buf.ReadFrom(tr) + content := buf.String() + actualFiles = append(actualFiles, []string{hdr.Name, content}) + i++ + } + if len(actualFiles) != len(expectedFiles) { + t.Fatalf("Number of expected file %d, got %d.", len(expectedFiles), len(actualFiles)) + } + for i := 0; i < len(expectedFiles); i++ { + actual := actualFiles[i] + expected := expectedFiles[i] + if actual[0] != expected[0] { + t.Fatalf("Expected name '%s', Actual name '%s'", expected[0], actual[0]) + } + if actual[1] != expected[1] { + t.Fatalf("Expected content '%s', Actual content '%s'", expected[1], actual[1]) + } + } +} + +func TestGenerateWithContent(t *testing.T) { + archive, err := Generate("file", "content") + if err != nil { + t.Fatal(err) + } + if archive == nil { + t.Fatal("The generated archive should not be nil.") + } + + expectedFiles := [][]string{ + {"file", "content"}, + } + + tr := tar.NewReader(archive) + actualFiles := make([][]string, 0, 10) + i := 0 + for { + hdr, err := tr.Next() + if err == io.EOF { + break + } + if err != nil { + t.Fatal(err) + } + buf := new(bytes.Buffer) + buf.ReadFrom(tr) + content := buf.String() + actualFiles = append(actualFiles, []string{hdr.Name, content}) + i++ + } + if len(actualFiles) != len(expectedFiles) { + t.Fatalf("Number of expected file %d, got %d.", len(expectedFiles), len(actualFiles)) + } + for i := 0; i < len(expectedFiles); i++ { + actual := actualFiles[i] + expected := expectedFiles[i] + if actual[0] != expected[0] { + t.Fatalf("Expected name '%s', Actual name '%s'", expected[0], actual[0]) + } + if actual[1] != expected[1] { + t.Fatalf("Expected content '%s', Actual content '%s'", expected[1], actual[1]) + } + } +} diff --git a/vendor/github.com/moby/moby/pkg/authorization/api.go b/vendor/github.com/moby/moby/pkg/authorization/api.go new file mode 100644 index 000000000..e8d7996c6 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/authorization/api.go @@ -0,0 +1,88 @@ +package authorization + +import ( + "crypto/x509" + "encoding/json" + "encoding/pem" +) + +const ( + // AuthZApiRequest is the url for daemon request authorization + AuthZApiRequest = "AuthZPlugin.AuthZReq" + + // AuthZApiResponse is the url for daemon response authorization + AuthZApiResponse = "AuthZPlugin.AuthZRes" + + // AuthZApiImplements is the name of the interface all AuthZ plugins implement + AuthZApiImplements = "authz" +) + +// PeerCertificate is a wrapper around x509.Certificate which provides a sane +// encoding/decoding to/from PEM format and JSON. +type PeerCertificate x509.Certificate + +// MarshalJSON returns the JSON encoded pem bytes of a PeerCertificate. +func (pc *PeerCertificate) MarshalJSON() ([]byte, error) { + b := pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: pc.Raw}) + return json.Marshal(b) +} + +// UnmarshalJSON populates a new PeerCertificate struct from JSON data. +func (pc *PeerCertificate) UnmarshalJSON(b []byte) error { + var buf []byte + if err := json.Unmarshal(b, &buf); err != nil { + return err + } + derBytes, _ := pem.Decode(buf) + c, err := x509.ParseCertificate(derBytes.Bytes) + if err != nil { + return err + } + *pc = PeerCertificate(*c) + return nil +} + +// Request holds data required for authZ plugins +type Request struct { + // User holds the user extracted by AuthN mechanism + User string `json:"User,omitempty"` + + // UserAuthNMethod holds the mechanism used to extract user details (e.g., krb) + UserAuthNMethod string `json:"UserAuthNMethod,omitempty"` + + // RequestMethod holds the HTTP method (GET/POST/PUT) + RequestMethod string `json:"RequestMethod,omitempty"` + + // RequestUri holds the full HTTP uri (e.g., /v1.21/version) + RequestURI string `json:"RequestUri,omitempty"` + + // RequestBody stores the raw request body sent to the docker daemon + RequestBody []byte `json:"RequestBody,omitempty"` + + // RequestHeaders stores the raw request headers sent to the docker daemon + RequestHeaders map[string]string `json:"RequestHeaders,omitempty"` + + // RequestPeerCertificates stores the request's TLS peer certificates in PEM format + RequestPeerCertificates []*PeerCertificate `json:"RequestPeerCertificates,omitempty"` + + // ResponseStatusCode stores the status code returned from docker daemon + ResponseStatusCode int `json:"ResponseStatusCode,omitempty"` + + // ResponseBody stores the raw response body sent from docker daemon + ResponseBody []byte `json:"ResponseBody,omitempty"` + + // ResponseHeaders stores the response headers sent to the docker daemon + ResponseHeaders map[string]string `json:"ResponseHeaders,omitempty"` +} + +// Response represents authZ plugin response +type Response struct { + // Allow indicating whether the user is allowed or not + Allow bool `json:"Allow"` + + // Msg stores the authorization message + Msg string `json:"Msg,omitempty"` + + // Err stores a message in case there's an error + Err string `json:"Err,omitempty"` +} diff --git a/vendor/github.com/moby/moby/pkg/authorization/api_test.go b/vendor/github.com/moby/moby/pkg/authorization/api_test.go new file mode 100644 index 000000000..103194906 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/authorization/api_test.go @@ -0,0 +1,75 @@ +package authorization + +import ( + "crypto/rand" + "crypto/rsa" + "crypto/tls" + "crypto/x509" + "crypto/x509/pkix" + "math/big" + "net/http" + "testing" + "time" + + "github.com/stretchr/testify/require" +) + +func TestPeerCertificateMarshalJSON(t *testing.T) { + template := &x509.Certificate{ + IsCA: true, + BasicConstraintsValid: true, + SubjectKeyId: []byte{1, 2, 3}, + SerialNumber: big.NewInt(1234), + Subject: pkix.Name{ + Country: []string{"Earth"}, + Organization: []string{"Mother Nature"}, + }, + NotBefore: time.Now(), + NotAfter: time.Now().AddDate(5, 5, 5), + + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth, x509.ExtKeyUsageServerAuth}, + KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign, + } + // generate private key + privatekey, err := rsa.GenerateKey(rand.Reader, 2048) + require.NoError(t, err) + publickey := &privatekey.PublicKey + + // create a self-signed certificate. template = parent + var parent = template + raw, err := x509.CreateCertificate(rand.Reader, template, parent, publickey, privatekey) + require.NoError(t, err) + + cert, err := x509.ParseCertificate(raw) + require.NoError(t, err) + + var certs = []*x509.Certificate{cert} + addr := "www.authz.com/auth" + req, err := http.NewRequest("GET", addr, nil) + require.NoError(t, err) + + req.RequestURI = addr + req.TLS = &tls.ConnectionState{} + req.TLS.PeerCertificates = certs + req.Header.Add("header", "value") + + for _, c := range req.TLS.PeerCertificates { + pcObj := PeerCertificate(*c) + + t.Run("Marshalling :", func(t *testing.T) { + raw, err = pcObj.MarshalJSON() + require.NotNil(t, raw) + require.Nil(t, err) + }) + + t.Run("UnMarshalling :", func(t *testing.T) { + err := pcObj.UnmarshalJSON(raw) + require.Nil(t, err) + require.Equal(t, "Earth", pcObj.Subject.Country[0]) + require.Equal(t, true, pcObj.IsCA) + + }) + + } + +} diff --git a/vendor/github.com/moby/moby/pkg/authorization/authz.go b/vendor/github.com/moby/moby/pkg/authorization/authz.go new file mode 100644 index 000000000..dc9a9ae56 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/authorization/authz.go @@ -0,0 +1,186 @@ +package authorization + +import ( + "bufio" + "bytes" + "fmt" + "io" + "net/http" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/ioutils" +) + +const maxBodySize = 1048576 // 1MB + +// NewCtx creates new authZ context, it is used to store authorization information related to a specific docker +// REST http session +// A context provides two method: +// Authenticate Request: +// Call authZ plugins with current REST request and AuthN response +// Request contains full HTTP packet sent to the docker daemon +// https://docs.docker.com/engine/reference/api/ +// +// Authenticate Response: +// Call authZ plugins with full info about current REST request, REST response and AuthN response +// The response from this method may contains content that overrides the daemon response +// This allows authZ plugins to filter privileged content +// +// If multiple authZ plugins are specified, the block/allow decision is based on ANDing all plugin results +// For response manipulation, the response from each plugin is piped between plugins. Plugin execution order +// is determined according to daemon parameters +func NewCtx(authZPlugins []Plugin, user, userAuthNMethod, requestMethod, requestURI string) *Ctx { + return &Ctx{ + plugins: authZPlugins, + user: user, + userAuthNMethod: userAuthNMethod, + requestMethod: requestMethod, + requestURI: requestURI, + } +} + +// Ctx stores a single request-response interaction context +type Ctx struct { + user string + userAuthNMethod string + requestMethod string + requestURI string + plugins []Plugin + // authReq stores the cached request object for the current transaction + authReq *Request +} + +// AuthZRequest authorized the request to the docker daemon using authZ plugins +func (ctx *Ctx) AuthZRequest(w http.ResponseWriter, r *http.Request) error { + var body []byte + if sendBody(ctx.requestURI, r.Header) && r.ContentLength > 0 && r.ContentLength < maxBodySize { + var err error + body, r.Body, err = drainBody(r.Body) + if err != nil { + return err + } + } + + var h bytes.Buffer + if err := r.Header.Write(&h); err != nil { + return err + } + + ctx.authReq = &Request{ + User: ctx.user, + UserAuthNMethod: ctx.userAuthNMethod, + RequestMethod: ctx.requestMethod, + RequestURI: ctx.requestURI, + RequestBody: body, + RequestHeaders: headers(r.Header), + } + + if r.TLS != nil { + for _, c := range r.TLS.PeerCertificates { + pc := PeerCertificate(*c) + ctx.authReq.RequestPeerCertificates = append(ctx.authReq.RequestPeerCertificates, &pc) + } + } + + for _, plugin := range ctx.plugins { + logrus.Debugf("AuthZ request using plugin %s", plugin.Name()) + + authRes, err := plugin.AuthZRequest(ctx.authReq) + if err != nil { + return fmt.Errorf("plugin %s failed with error: %s", plugin.Name(), err) + } + + if !authRes.Allow { + return newAuthorizationError(plugin.Name(), authRes.Msg) + } + } + + return nil +} + +// AuthZResponse authorized and manipulates the response from docker daemon using authZ plugins +func (ctx *Ctx) AuthZResponse(rm ResponseModifier, r *http.Request) error { + ctx.authReq.ResponseStatusCode = rm.StatusCode() + ctx.authReq.ResponseHeaders = headers(rm.Header()) + + if sendBody(ctx.requestURI, rm.Header()) { + ctx.authReq.ResponseBody = rm.RawBody() + } + + for _, plugin := range ctx.plugins { + logrus.Debugf("AuthZ response using plugin %s", plugin.Name()) + + authRes, err := plugin.AuthZResponse(ctx.authReq) + if err != nil { + return fmt.Errorf("plugin %s failed with error: %s", plugin.Name(), err) + } + + if !authRes.Allow { + return newAuthorizationError(plugin.Name(), authRes.Msg) + } + } + + rm.FlushAll() + + return nil +} + +// drainBody dump the body (if its length is less than 1MB) without modifying the request state +func drainBody(body io.ReadCloser) ([]byte, io.ReadCloser, error) { + bufReader := bufio.NewReaderSize(body, maxBodySize) + newBody := ioutils.NewReadCloserWrapper(bufReader, func() error { return body.Close() }) + + data, err := bufReader.Peek(maxBodySize) + // Body size exceeds max body size + if err == nil { + logrus.Warnf("Request body is larger than: '%d' skipping body", maxBodySize) + return nil, newBody, nil + } + // Body size is less than maximum size + if err == io.EOF { + return data, newBody, nil + } + // Unknown error + return nil, newBody, err +} + +// sendBody returns true when request/response body should be sent to AuthZPlugin +func sendBody(url string, header http.Header) bool { + // Skip body for auth endpoint + if strings.HasSuffix(url, "/auth") { + return false + } + + // body is sent only for text or json messages + return header.Get("Content-Type") == "application/json" +} + +// headers returns flatten version of the http headers excluding authorization +func headers(header http.Header) map[string]string { + v := make(map[string]string, 0) + for k, values := range header { + // Skip authorization headers + if strings.EqualFold(k, "Authorization") || strings.EqualFold(k, "X-Registry-Config") || strings.EqualFold(k, "X-Registry-Auth") { + continue + } + for _, val := range values { + v[k] = val + } + } + return v +} + +// authorizationError represents an authorization deny error +type authorizationError struct { + error +} + +// HTTPErrorStatusCode returns the authorization error status code (forbidden) +func (e authorizationError) HTTPErrorStatusCode() int { + return http.StatusForbidden +} + +func newAuthorizationError(plugin, msg string) authorizationError { + return authorizationError{error: fmt.Errorf("authorization denied by plugin %s: %s", plugin, msg)} +} diff --git a/vendor/github.com/moby/moby/pkg/authorization/authz_unix_test.go b/vendor/github.com/moby/moby/pkg/authorization/authz_unix_test.go new file mode 100644 index 000000000..a692802d5 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/authorization/authz_unix_test.go @@ -0,0 +1,282 @@ +// +build !windows + +// TODO Windows: This uses a Unix socket for testing. This might be possible +// to port to Windows using a named pipe instead. + +package authorization + +import ( + "bytes" + "encoding/json" + "io/ioutil" + "net" + "net/http" + "net/http/httptest" + "os" + "path" + "reflect" + "strings" + "testing" + + "github.com/docker/docker/pkg/plugins" + "github.com/docker/go-connections/tlsconfig" + "github.com/gorilla/mux" +) + +const ( + pluginAddress = "authz-test-plugin.sock" +) + +func TestAuthZRequestPluginError(t *testing.T) { + server := authZPluginTestServer{t: t} + server.start() + defer server.stop() + + authZPlugin := createTestPlugin(t) + + request := Request{ + User: "user", + RequestBody: []byte("sample body"), + RequestURI: "www.authz.com/auth", + RequestMethod: "GET", + RequestHeaders: map[string]string{"header": "value"}, + } + server.replayResponse = Response{ + Err: "an error", + } + + actualResponse, err := authZPlugin.AuthZRequest(&request) + if err != nil { + t.Fatalf("Failed to authorize request %v", err) + } + + if !reflect.DeepEqual(server.replayResponse, *actualResponse) { + t.Fatal("Response must be equal") + } + if !reflect.DeepEqual(request, server.recordedRequest) { + t.Fatal("Requests must be equal") + } +} + +func TestAuthZRequestPlugin(t *testing.T) { + server := authZPluginTestServer{t: t} + server.start() + defer server.stop() + + authZPlugin := createTestPlugin(t) + + request := Request{ + User: "user", + RequestBody: []byte("sample body"), + RequestURI: "www.authz.com/auth", + RequestMethod: "GET", + RequestHeaders: map[string]string{"header": "value"}, + } + server.replayResponse = Response{ + Allow: true, + Msg: "Sample message", + } + + actualResponse, err := authZPlugin.AuthZRequest(&request) + if err != nil { + t.Fatalf("Failed to authorize request %v", err) + } + + if !reflect.DeepEqual(server.replayResponse, *actualResponse) { + t.Fatal("Response must be equal") + } + if !reflect.DeepEqual(request, server.recordedRequest) { + t.Fatal("Requests must be equal") + } +} + +func TestAuthZResponsePlugin(t *testing.T) { + server := authZPluginTestServer{t: t} + server.start() + defer server.stop() + + authZPlugin := createTestPlugin(t) + + request := Request{ + User: "user", + RequestURI: "something.com/auth", + RequestBody: []byte("sample body"), + } + server.replayResponse = Response{ + Allow: true, + Msg: "Sample message", + } + + actualResponse, err := authZPlugin.AuthZResponse(&request) + if err != nil { + t.Fatalf("Failed to authorize request %v", err) + } + + if !reflect.DeepEqual(server.replayResponse, *actualResponse) { + t.Fatal("Response must be equal") + } + if !reflect.DeepEqual(request, server.recordedRequest) { + t.Fatal("Requests must be equal") + } +} + +func TestResponseModifier(t *testing.T) { + r := httptest.NewRecorder() + m := NewResponseModifier(r) + m.Header().Set("h1", "v1") + m.Write([]byte("body")) + m.WriteHeader(http.StatusInternalServerError) + + m.FlushAll() + if r.Header().Get("h1") != "v1" { + t.Fatalf("Header value must exists %s", r.Header().Get("h1")) + } + if !reflect.DeepEqual(r.Body.Bytes(), []byte("body")) { + t.Fatalf("Body value must exists %s", r.Body.Bytes()) + } + if r.Code != http.StatusInternalServerError { + t.Fatalf("Status code must be correct %d", r.Code) + } +} + +func TestDrainBody(t *testing.T) { + tests := []struct { + length int // length is the message length send to drainBody + expectedBodyLength int // expectedBodyLength is the expected body length after drainBody is called + }{ + {10, 10}, // Small message size + {maxBodySize - 1, maxBodySize - 1}, // Max message size + {maxBodySize * 2, 0}, // Large message size (skip copying body) + + } + + for _, test := range tests { + msg := strings.Repeat("a", test.length) + body, closer, err := drainBody(ioutil.NopCloser(bytes.NewReader([]byte(msg)))) + if err != nil { + t.Fatal(err) + } + if len(body) != test.expectedBodyLength { + t.Fatalf("Body must be copied, actual length: '%d'", len(body)) + } + if closer == nil { + t.Fatal("Closer must not be nil") + } + modified, err := ioutil.ReadAll(closer) + if err != nil { + t.Fatalf("Error must not be nil: '%v'", err) + } + if len(modified) != len(msg) { + t.Fatalf("Result should not be truncated. Original length: '%d', new length: '%d'", len(msg), len(modified)) + } + } +} + +func TestResponseModifierOverride(t *testing.T) { + r := httptest.NewRecorder() + m := NewResponseModifier(r) + m.Header().Set("h1", "v1") + m.Write([]byte("body")) + m.WriteHeader(http.StatusInternalServerError) + + overrideHeader := make(http.Header) + overrideHeader.Add("h1", "v2") + overrideHeaderBytes, err := json.Marshal(overrideHeader) + if err != nil { + t.Fatalf("override header failed %v", err) + } + + m.OverrideHeader(overrideHeaderBytes) + m.OverrideBody([]byte("override body")) + m.OverrideStatusCode(http.StatusNotFound) + m.FlushAll() + if r.Header().Get("h1") != "v2" { + t.Fatalf("Header value must exists %s", r.Header().Get("h1")) + } + if !reflect.DeepEqual(r.Body.Bytes(), []byte("override body")) { + t.Fatalf("Body value must exists %s", r.Body.Bytes()) + } + if r.Code != http.StatusNotFound { + t.Fatalf("Status code must be correct %d", r.Code) + } +} + +// createTestPlugin creates a new sample authorization plugin +func createTestPlugin(t *testing.T) *authorizationPlugin { + pwd, err := os.Getwd() + if err != nil { + t.Fatal(err) + } + + client, err := plugins.NewClient("unix:///"+path.Join(pwd, pluginAddress), &tlsconfig.Options{InsecureSkipVerify: true}) + if err != nil { + t.Fatalf("Failed to create client %v", err) + } + + return &authorizationPlugin{name: "plugin", plugin: client} +} + +// AuthZPluginTestServer is a simple server that implements the authZ plugin interface +type authZPluginTestServer struct { + listener net.Listener + t *testing.T + // request stores the request sent from the daemon to the plugin + recordedRequest Request + // response stores the response sent from the plugin to the daemon + replayResponse Response + server *httptest.Server +} + +// start starts the test server that implements the plugin +func (t *authZPluginTestServer) start() { + r := mux.NewRouter() + l, err := net.Listen("unix", pluginAddress) + if err != nil { + t.t.Fatal(err) + } + t.listener = l + r.HandleFunc("/Plugin.Activate", t.activate) + r.HandleFunc("/"+AuthZApiRequest, t.auth) + r.HandleFunc("/"+AuthZApiResponse, t.auth) + t.server = &httptest.Server{ + Listener: l, + Config: &http.Server{ + Handler: r, + Addr: pluginAddress, + }, + } + t.server.Start() +} + +// stop stops the test server that implements the plugin +func (t *authZPluginTestServer) stop() { + t.server.Close() + os.Remove(pluginAddress) + if t.listener != nil { + t.listener.Close() + } +} + +// auth is a used to record/replay the authentication api messages +func (t *authZPluginTestServer) auth(w http.ResponseWriter, r *http.Request) { + t.recordedRequest = Request{} + body, err := ioutil.ReadAll(r.Body) + if err != nil { + t.t.Fatal(err) + } + r.Body.Close() + json.Unmarshal(body, &t.recordedRequest) + b, err := json.Marshal(t.replayResponse) + if err != nil { + t.t.Fatal(err) + } + w.Write(b) +} + +func (t *authZPluginTestServer) activate(w http.ResponseWriter, r *http.Request) { + b, err := json.Marshal(plugins.Manifest{Implements: []string{AuthZApiImplements}}) + if err != nil { + t.t.Fatal(err) + } + w.Write(b) +} diff --git a/vendor/github.com/moby/moby/pkg/authorization/middleware.go b/vendor/github.com/moby/moby/pkg/authorization/middleware.go new file mode 100644 index 000000000..7789a758d --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/authorization/middleware.go @@ -0,0 +1,110 @@ +package authorization + +import ( + "net/http" + "sync" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/plugingetter" + "golang.org/x/net/context" +) + +// Middleware uses a list of plugins to +// handle authorization in the API requests. +type Middleware struct { + mu sync.Mutex + plugins []Plugin +} + +// NewMiddleware creates a new Middleware +// with a slice of plugins names. +func NewMiddleware(names []string, pg plugingetter.PluginGetter) *Middleware { + SetPluginGetter(pg) + return &Middleware{ + plugins: newPlugins(names), + } +} + +func (m *Middleware) getAuthzPlugins() []Plugin { + m.mu.Lock() + defer m.mu.Unlock() + return m.plugins +} + +// SetPlugins sets the plugin used for authorization +func (m *Middleware) SetPlugins(names []string) { + m.mu.Lock() + m.plugins = newPlugins(names) + m.mu.Unlock() +} + +// RemovePlugin removes a single plugin from this authz middleware chain +func (m *Middleware) RemovePlugin(name string) { + m.mu.Lock() + defer m.mu.Unlock() + plugins := m.plugins[:0] + for _, authPlugin := range m.plugins { + if authPlugin.Name() != name { + plugins = append(plugins, authPlugin) + } + } + m.plugins = plugins +} + +// WrapHandler returns a new handler function wrapping the previous one in the request chain. +func (m *Middleware) WrapHandler(handler func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error) func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + return func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + plugins := m.getAuthzPlugins() + if len(plugins) == 0 { + return handler(ctx, w, r, vars) + } + + user := "" + userAuthNMethod := "" + + // Default authorization using existing TLS connection credentials + // FIXME: Non trivial authorization mechanisms (such as advanced certificate validations, kerberos support + // and ldap) will be extracted using AuthN feature, which is tracked under: + // https://github.com/docker/docker/pull/20883 + if r.TLS != nil && len(r.TLS.PeerCertificates) > 0 { + user = r.TLS.PeerCertificates[0].Subject.CommonName + userAuthNMethod = "TLS" + } + + authCtx := NewCtx(plugins, user, userAuthNMethod, r.Method, r.RequestURI) + + if err := authCtx.AuthZRequest(w, r); err != nil { + logrus.Errorf("AuthZRequest for %s %s returned error: %s", r.Method, r.RequestURI, err) + return err + } + + rw := NewResponseModifier(w) + + var errD error + + if errD = handler(ctx, rw, r, vars); errD != nil { + logrus.Errorf("Handler for %s %s returned error: %s", r.Method, r.RequestURI, errD) + } + + // There's a chance that the authCtx.plugins was updated. One of the reasons + // this can happen is when an authzplugin is disabled. + plugins = m.getAuthzPlugins() + if len(plugins) == 0 { + logrus.Debug("There are no authz plugins in the chain") + return nil + } + + authCtx.plugins = plugins + + if err := authCtx.AuthZResponse(rw, r); errD == nil && err != nil { + logrus.Errorf("AuthZResponse for %s %s returned error: %s", r.Method, r.RequestURI, err) + return err + } + + if errD != nil { + return errD + } + + return nil + } +} diff --git a/vendor/github.com/moby/moby/pkg/authorization/middleware_test.go b/vendor/github.com/moby/moby/pkg/authorization/middleware_test.go new file mode 100644 index 000000000..fc7401135 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/authorization/middleware_test.go @@ -0,0 +1,53 @@ +package authorization + +import ( + "net/http" + "net/http/httptest" + "strings" + "testing" + + "github.com/docker/docker/pkg/plugingetter" + "github.com/stretchr/testify/require" +) + +func TestMiddleware(t *testing.T) { + pluginNames := []string{"testPlugin1", "testPlugin2"} + var pluginGetter plugingetter.PluginGetter + m := NewMiddleware(pluginNames, pluginGetter) + authPlugins := m.getAuthzPlugins() + require.Equal(t, 2, len(authPlugins)) + require.EqualValues(t, pluginNames[0], authPlugins[0].Name()) + require.EqualValues(t, pluginNames[1], authPlugins[1].Name()) +} + +func TestNewResponseModifier(t *testing.T) { + recorder := httptest.NewRecorder() + modifier := NewResponseModifier(recorder) + modifier.Header().Set("H1", "V1") + modifier.Write([]byte("body")) + require.False(t, modifier.Hijacked()) + modifier.WriteHeader(http.StatusInternalServerError) + require.NotNil(t, modifier.RawBody()) + + raw, err := modifier.RawHeaders() + require.NotNil(t, raw) + require.Nil(t, err) + + headerData := strings.Split(strings.TrimSpace(string(raw)), ":") + require.EqualValues(t, "H1", strings.TrimSpace(headerData[0])) + require.EqualValues(t, "V1", strings.TrimSpace(headerData[1])) + + modifier.Flush() + modifier.FlushAll() + + if recorder.Header().Get("H1") != "V1" { + t.Fatalf("Header value must exists %s", recorder.Header().Get("H1")) + } + +} + +func setAuthzPlugins(m *Middleware, plugins []Plugin) { + m.mu.Lock() + m.plugins = plugins + m.mu.Unlock() +} diff --git a/vendor/github.com/moby/moby/pkg/authorization/middleware_unix_test.go b/vendor/github.com/moby/moby/pkg/authorization/middleware_unix_test.go new file mode 100644 index 000000000..fd684f120 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/authorization/middleware_unix_test.go @@ -0,0 +1,65 @@ +// +build !windows + +package authorization + +import ( + "net/http" + "net/http/httptest" + "testing" + + "github.com/docker/docker/pkg/plugingetter" + "github.com/stretchr/testify/require" + "golang.org/x/net/context" +) + +func TestMiddlewareWrapHandler(t *testing.T) { + server := authZPluginTestServer{t: t} + server.start() + defer server.stop() + + authZPlugin := createTestPlugin(t) + pluginNames := []string{authZPlugin.name} + + var pluginGetter plugingetter.PluginGetter + middleWare := NewMiddleware(pluginNames, pluginGetter) + handler := func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + return nil + } + + authList := []Plugin{authZPlugin} + middleWare.SetPlugins([]string{"My Test Plugin"}) + setAuthzPlugins(middleWare, authList) + mdHandler := middleWare.WrapHandler(handler) + require.NotNil(t, mdHandler) + + addr := "www.example.com/auth" + req, _ := http.NewRequest("GET", addr, nil) + req.RequestURI = addr + req.Header.Add("header", "value") + + resp := httptest.NewRecorder() + ctx := context.Background() + + t.Run("Error Test Case :", func(t *testing.T) { + server.replayResponse = Response{ + Allow: false, + Msg: "Server Auth Not Allowed", + } + if err := mdHandler(ctx, resp, req, map[string]string{}); err == nil { + require.Error(t, err) + } + + }) + + t.Run("Positive Test Case :", func(t *testing.T) { + server.replayResponse = Response{ + Allow: true, + Msg: "Server Auth Allowed", + } + if err := mdHandler(ctx, resp, req, map[string]string{}); err != nil { + require.NoError(t, err) + } + + }) + +} diff --git a/vendor/github.com/moby/moby/pkg/authorization/plugin.go b/vendor/github.com/moby/moby/pkg/authorization/plugin.go new file mode 100644 index 000000000..939f92674 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/authorization/plugin.go @@ -0,0 +1,118 @@ +package authorization + +import ( + "sync" + + "github.com/docker/docker/pkg/plugingetter" + "github.com/docker/docker/pkg/plugins" +) + +// Plugin allows third party plugins to authorize requests and responses +// in the context of docker API +type Plugin interface { + // Name returns the registered plugin name + Name() string + + // AuthZRequest authorizes the request from the client to the daemon + AuthZRequest(*Request) (*Response, error) + + // AuthZResponse authorizes the response from the daemon to the client + AuthZResponse(*Request) (*Response, error) +} + +// newPlugins constructs and initializes the authorization plugins based on plugin names +func newPlugins(names []string) []Plugin { + plugins := []Plugin{} + pluginsMap := make(map[string]struct{}) + for _, name := range names { + if _, ok := pluginsMap[name]; ok { + continue + } + pluginsMap[name] = struct{}{} + plugins = append(plugins, newAuthorizationPlugin(name)) + } + return plugins +} + +var getter plugingetter.PluginGetter + +// SetPluginGetter sets the plugingetter +func SetPluginGetter(pg plugingetter.PluginGetter) { + getter = pg +} + +// GetPluginGetter gets the plugingetter +func GetPluginGetter() plugingetter.PluginGetter { + return getter +} + +// authorizationPlugin is an internal adapter to docker plugin system +type authorizationPlugin struct { + plugin *plugins.Client + name string + once sync.Once +} + +func newAuthorizationPlugin(name string) Plugin { + return &authorizationPlugin{name: name} +} + +func (a *authorizationPlugin) Name() string { + return a.name +} + +// Set the remote for an authz pluginv2 +func (a *authorizationPlugin) SetName(remote string) { + a.name = remote +} + +func (a *authorizationPlugin) AuthZRequest(authReq *Request) (*Response, error) { + if err := a.initPlugin(); err != nil { + return nil, err + } + + authRes := &Response{} + if err := a.plugin.Call(AuthZApiRequest, authReq, authRes); err != nil { + return nil, err + } + + return authRes, nil +} + +func (a *authorizationPlugin) AuthZResponse(authReq *Request) (*Response, error) { + if err := a.initPlugin(); err != nil { + return nil, err + } + + authRes := &Response{} + if err := a.plugin.Call(AuthZApiResponse, authReq, authRes); err != nil { + return nil, err + } + + return authRes, nil +} + +// initPlugin initializes the authorization plugin if needed +func (a *authorizationPlugin) initPlugin() error { + // Lazy loading of plugins + var err error + a.once.Do(func() { + if a.plugin == nil { + var plugin plugingetter.CompatPlugin + var e error + + if pg := GetPluginGetter(); pg != nil { + plugin, e = pg.Get(a.name, AuthZApiImplements, plugingetter.Lookup) + a.SetName(plugin.Name()) + } else { + plugin, e = plugins.Get(a.name, AuthZApiImplements) + } + if e != nil { + err = e + return + } + a.plugin = plugin.Client() + } + }) + return err +} diff --git a/vendor/github.com/moby/moby/pkg/authorization/response.go b/vendor/github.com/moby/moby/pkg/authorization/response.go new file mode 100644 index 000000000..129bf2f41 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/authorization/response.go @@ -0,0 +1,203 @@ +package authorization + +import ( + "bufio" + "bytes" + "encoding/json" + "fmt" + "net" + "net/http" + + "github.com/Sirupsen/logrus" +) + +// ResponseModifier allows authorization plugins to read and modify the content of the http.response +type ResponseModifier interface { + http.ResponseWriter + http.Flusher + http.CloseNotifier + + // RawBody returns the current http content + RawBody() []byte + + // RawHeaders returns the current content of the http headers + RawHeaders() ([]byte, error) + + // StatusCode returns the current status code + StatusCode() int + + // OverrideBody replaces the body of the HTTP reply + OverrideBody(b []byte) + + // OverrideHeader replaces the headers of the HTTP reply + OverrideHeader(b []byte) error + + // OverrideStatusCode replaces the status code of the HTTP reply + OverrideStatusCode(statusCode int) + + // FlushAll flushes all data to the HTTP response + FlushAll() error + + // Hijacked indicates the response has been hijacked by the Docker daemon + Hijacked() bool +} + +// NewResponseModifier creates a wrapper to an http.ResponseWriter to allow inspecting and modifying the content +func NewResponseModifier(rw http.ResponseWriter) ResponseModifier { + return &responseModifier{rw: rw, header: make(http.Header)} +} + +// responseModifier is used as an adapter to http.ResponseWriter in order to manipulate and explore +// the http request/response from docker daemon +type responseModifier struct { + // The original response writer + rw http.ResponseWriter + // body holds the response body + body []byte + // header holds the response header + header http.Header + // statusCode holds the response status code + statusCode int + // hijacked indicates the request has been hijacked + hijacked bool +} + +func (rm *responseModifier) Hijacked() bool { + return rm.hijacked +} + +// WriterHeader stores the http status code +func (rm *responseModifier) WriteHeader(s int) { + + // Use original request if hijacked + if rm.hijacked { + rm.rw.WriteHeader(s) + return + } + + rm.statusCode = s +} + +// Header returns the internal http header +func (rm *responseModifier) Header() http.Header { + + // Use original header if hijacked + if rm.hijacked { + return rm.rw.Header() + } + + return rm.header +} + +// StatusCode returns the http status code +func (rm *responseModifier) StatusCode() int { + return rm.statusCode +} + +// OverrideBody replaces the body of the HTTP response +func (rm *responseModifier) OverrideBody(b []byte) { + rm.body = b +} + +// OverrideStatusCode replaces the status code of the HTTP response +func (rm *responseModifier) OverrideStatusCode(statusCode int) { + rm.statusCode = statusCode +} + +// OverrideHeader replaces the headers of the HTTP response +func (rm *responseModifier) OverrideHeader(b []byte) error { + header := http.Header{} + if err := json.Unmarshal(b, &header); err != nil { + return err + } + rm.header = header + return nil +} + +// Write stores the byte array inside content +func (rm *responseModifier) Write(b []byte) (int, error) { + + if rm.hijacked { + return rm.rw.Write(b) + } + + rm.body = append(rm.body, b...) + return len(b), nil +} + +// Body returns the response body +func (rm *responseModifier) RawBody() []byte { + return rm.body +} + +func (rm *responseModifier) RawHeaders() ([]byte, error) { + var b bytes.Buffer + if err := rm.header.Write(&b); err != nil { + return nil, err + } + return b.Bytes(), nil +} + +// Hijack returns the internal connection of the wrapped http.ResponseWriter +func (rm *responseModifier) Hijack() (net.Conn, *bufio.ReadWriter, error) { + + rm.hijacked = true + rm.FlushAll() + + hijacker, ok := rm.rw.(http.Hijacker) + if !ok { + return nil, nil, fmt.Errorf("Internal response writer doesn't support the Hijacker interface") + } + return hijacker.Hijack() +} + +// CloseNotify uses the internal close notify API of the wrapped http.ResponseWriter +func (rm *responseModifier) CloseNotify() <-chan bool { + closeNotifier, ok := rm.rw.(http.CloseNotifier) + if !ok { + logrus.Error("Internal response writer doesn't support the CloseNotifier interface") + return nil + } + return closeNotifier.CloseNotify() +} + +// Flush uses the internal flush API of the wrapped http.ResponseWriter +func (rm *responseModifier) Flush() { + flusher, ok := rm.rw.(http.Flusher) + if !ok { + logrus.Error("Internal response writer doesn't support the Flusher interface") + return + } + + rm.FlushAll() + flusher.Flush() +} + +// FlushAll flushes all data to the HTTP response +func (rm *responseModifier) FlushAll() error { + // Copy the header + for k, vv := range rm.header { + for _, v := range vv { + rm.rw.Header().Add(k, v) + } + } + + // Copy the status code + // Also WriteHeader needs to be done after all the headers + // have been copied (above). + if rm.statusCode > 0 { + rm.rw.WriteHeader(rm.statusCode) + } + + var err error + if len(rm.body) > 0 { + // Write body + _, err = rm.rw.Write(rm.body) + } + + // Clean previous data + rm.body = nil + rm.statusCode = 0 + rm.header = http.Header{} + return err +} diff --git a/vendor/github.com/moby/moby/pkg/broadcaster/unbuffered.go b/vendor/github.com/moby/moby/pkg/broadcaster/unbuffered.go new file mode 100644 index 000000000..784d65d6f --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/broadcaster/unbuffered.go @@ -0,0 +1,49 @@ +package broadcaster + +import ( + "io" + "sync" +) + +// Unbuffered accumulates multiple io.WriteCloser by stream. +type Unbuffered struct { + mu sync.Mutex + writers []io.WriteCloser +} + +// Add adds new io.WriteCloser. +func (w *Unbuffered) Add(writer io.WriteCloser) { + w.mu.Lock() + w.writers = append(w.writers, writer) + w.mu.Unlock() +} + +// Write writes bytes to all writers. Failed writers will be evicted during +// this call. +func (w *Unbuffered) Write(p []byte) (n int, err error) { + w.mu.Lock() + var evict []int + for i, sw := range w.writers { + if n, err := sw.Write(p); err != nil || n != len(p) { + // On error, evict the writer + evict = append(evict, i) + } + } + for n, i := range evict { + w.writers = append(w.writers[:i-n], w.writers[i-n+1:]...) + } + w.mu.Unlock() + return len(p), nil +} + +// Clean closes and removes all writers. Last non-eol-terminated part of data +// will be saved. +func (w *Unbuffered) Clean() error { + w.mu.Lock() + for _, sw := range w.writers { + sw.Close() + } + w.writers = nil + w.mu.Unlock() + return nil +} diff --git a/vendor/github.com/moby/moby/pkg/broadcaster/unbuffered_test.go b/vendor/github.com/moby/moby/pkg/broadcaster/unbuffered_test.go new file mode 100644 index 000000000..9f8e72bc0 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/broadcaster/unbuffered_test.go @@ -0,0 +1,162 @@ +package broadcaster + +import ( + "bytes" + "errors" + "strings" + + "testing" +) + +type dummyWriter struct { + buffer bytes.Buffer + failOnWrite bool +} + +func (dw *dummyWriter) Write(p []byte) (n int, err error) { + if dw.failOnWrite { + return 0, errors.New("Fake fail") + } + return dw.buffer.Write(p) +} + +func (dw *dummyWriter) String() string { + return dw.buffer.String() +} + +func (dw *dummyWriter) Close() error { + return nil +} + +func TestUnbuffered(t *testing.T) { + writer := new(Unbuffered) + + // Test 1: Both bufferA and bufferB should contain "foo" + bufferA := &dummyWriter{} + writer.Add(bufferA) + bufferB := &dummyWriter{} + writer.Add(bufferB) + writer.Write([]byte("foo")) + + if bufferA.String() != "foo" { + t.Errorf("Buffer contains %v", bufferA.String()) + } + + if bufferB.String() != "foo" { + t.Errorf("Buffer contains %v", bufferB.String()) + } + + // Test2: bufferA and bufferB should contain "foobar", + // while bufferC should only contain "bar" + bufferC := &dummyWriter{} + writer.Add(bufferC) + writer.Write([]byte("bar")) + + if bufferA.String() != "foobar" { + t.Errorf("Buffer contains %v", bufferA.String()) + } + + if bufferB.String() != "foobar" { + t.Errorf("Buffer contains %v", bufferB.String()) + } + + if bufferC.String() != "bar" { + t.Errorf("Buffer contains %v", bufferC.String()) + } + + // Test3: Test eviction on failure + bufferA.failOnWrite = true + writer.Write([]byte("fail")) + if bufferA.String() != "foobar" { + t.Errorf("Buffer contains %v", bufferA.String()) + } + if bufferC.String() != "barfail" { + t.Errorf("Buffer contains %v", bufferC.String()) + } + // Even though we reset the flag, no more writes should go in there + bufferA.failOnWrite = false + writer.Write([]byte("test")) + if bufferA.String() != "foobar" { + t.Errorf("Buffer contains %v", bufferA.String()) + } + if bufferC.String() != "barfailtest" { + t.Errorf("Buffer contains %v", bufferC.String()) + } + + // Test4: Test eviction on multiple simultaneous failures + bufferB.failOnWrite = true + bufferC.failOnWrite = true + bufferD := &dummyWriter{} + writer.Add(bufferD) + writer.Write([]byte("yo")) + writer.Write([]byte("ink")) + if strings.Contains(bufferB.String(), "yoink") { + t.Errorf("bufferB received write. contents: %q", bufferB) + } + if strings.Contains(bufferC.String(), "yoink") { + t.Errorf("bufferC received write. contents: %q", bufferC) + } + if g, w := bufferD.String(), "yoink"; g != w { + t.Errorf("bufferD = %q, want %q", g, w) + } + + writer.Clean() +} + +type devNullCloser int + +func (d devNullCloser) Close() error { + return nil +} + +func (d devNullCloser) Write(buf []byte) (int, error) { + return len(buf), nil +} + +// This test checks for races. It is only useful when run with the race detector. +func TestRaceUnbuffered(t *testing.T) { + writer := new(Unbuffered) + c := make(chan bool) + go func() { + writer.Add(devNullCloser(0)) + c <- true + }() + writer.Write([]byte("hello")) + <-c +} + +func BenchmarkUnbuffered(b *testing.B) { + writer := new(Unbuffered) + setUpWriter := func() { + for i := 0; i < 100; i++ { + writer.Add(devNullCloser(0)) + writer.Add(devNullCloser(0)) + writer.Add(devNullCloser(0)) + } + } + testLine := "Line that thinks that it is log line from docker" + var buf bytes.Buffer + for i := 0; i < 100; i++ { + buf.Write([]byte(testLine + "\n")) + } + // line without eol + buf.Write([]byte(testLine)) + testText := buf.Bytes() + b.SetBytes(int64(5 * len(testText))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + b.StopTimer() + setUpWriter() + b.StartTimer() + + for j := 0; j < 5; j++ { + if _, err := writer.Write(testText); err != nil { + b.Fatal(err) + } + } + + b.StopTimer() + writer.Clean() + b.StartTimer() + } +} diff --git a/vendor/github.com/moby/moby/pkg/chrootarchive/archive.go b/vendor/github.com/moby/moby/pkg/chrootarchive/archive.go new file mode 100644 index 000000000..760441876 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/chrootarchive/archive.go @@ -0,0 +1,70 @@ +package chrootarchive + +import ( + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/idtools" +) + +// NewArchiver returns a new Archiver which uses chrootarchive.Untar +func NewArchiver(idMappings *idtools.IDMappings) *archive.Archiver { + if idMappings == nil { + idMappings = &idtools.IDMappings{} + } + return &archive.Archiver{Untar: Untar, IDMappings: idMappings} +} + +// Untar reads a stream of bytes from `archive`, parses it as a tar archive, +// and unpacks it into the directory at `dest`. +// The archive may be compressed with one of the following algorithms: +// identity (uncompressed), gzip, bzip2, xz. +func Untar(tarArchive io.Reader, dest string, options *archive.TarOptions) error { + return untarHandler(tarArchive, dest, options, true) +} + +// UntarUncompressed reads a stream of bytes from `archive`, parses it as a tar archive, +// and unpacks it into the directory at `dest`. +// The archive must be an uncompressed stream. +func UntarUncompressed(tarArchive io.Reader, dest string, options *archive.TarOptions) error { + return untarHandler(tarArchive, dest, options, false) +} + +// Handler for teasing out the automatic decompression +func untarHandler(tarArchive io.Reader, dest string, options *archive.TarOptions, decompress bool) error { + if tarArchive == nil { + return fmt.Errorf("Empty archive") + } + if options == nil { + options = &archive.TarOptions{} + } + if options.ExcludePatterns == nil { + options.ExcludePatterns = []string{} + } + + idMappings := idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps) + rootIDs := idMappings.RootPair() + + dest = filepath.Clean(dest) + if _, err := os.Stat(dest); os.IsNotExist(err) { + if err := idtools.MkdirAllAndChownNew(dest, 0755, rootIDs); err != nil { + return err + } + } + + r := ioutil.NopCloser(tarArchive) + if decompress { + decompressedArchive, err := archive.DecompressStream(tarArchive) + if err != nil { + return err + } + defer decompressedArchive.Close() + r = decompressedArchive + } + + return invokeUnpack(r, dest, options) +} diff --git a/vendor/github.com/moby/moby/pkg/chrootarchive/archive_test.go b/vendor/github.com/moby/moby/pkg/chrootarchive/archive_test.go new file mode 100644 index 000000000..bd2deb2dd --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/chrootarchive/archive_test.go @@ -0,0 +1,412 @@ +package chrootarchive + +import ( + "bytes" + "fmt" + "hash/crc32" + "io" + "io/ioutil" + "os" + "path/filepath" + "runtime" + "strings" + "testing" + "time" + + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/reexec" + "github.com/docker/docker/pkg/system" +) + +func init() { + reexec.Init() +} + +var chrootArchiver = NewArchiver(nil) + +func TarUntar(src, dst string) error { + return chrootArchiver.TarUntar(src, dst) +} + +func CopyFileWithTar(src, dst string) (err error) { + return chrootArchiver.CopyFileWithTar(src, dst) +} + +func UntarPath(src, dst string) error { + return chrootArchiver.UntarPath(src, dst) +} + +func CopyWithTar(src, dst string) error { + return chrootArchiver.CopyWithTar(src, dst) +} + +func TestChrootTarUntar(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "docker-TestChrootTarUntar") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + src := filepath.Join(tmpdir, "src") + if err := system.MkdirAll(src, 0700, ""); err != nil { + t.Fatal(err) + } + if err := ioutil.WriteFile(filepath.Join(src, "toto"), []byte("hello toto"), 0644); err != nil { + t.Fatal(err) + } + if err := ioutil.WriteFile(filepath.Join(src, "lolo"), []byte("hello lolo"), 0644); err != nil { + t.Fatal(err) + } + stream, err := archive.Tar(src, archive.Uncompressed) + if err != nil { + t.Fatal(err) + } + dest := filepath.Join(tmpdir, "src") + if err := system.MkdirAll(dest, 0700, ""); err != nil { + t.Fatal(err) + } + if err := Untar(stream, dest, &archive.TarOptions{ExcludePatterns: []string{"lolo"}}); err != nil { + t.Fatal(err) + } +} + +// gh#10426: Verify the fix for having a huge excludes list (like on `docker load` with large # of +// local images) +func TestChrootUntarWithHugeExcludesList(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "docker-TestChrootUntarHugeExcludes") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + src := filepath.Join(tmpdir, "src") + if err := system.MkdirAll(src, 0700, ""); err != nil { + t.Fatal(err) + } + if err := ioutil.WriteFile(filepath.Join(src, "toto"), []byte("hello toto"), 0644); err != nil { + t.Fatal(err) + } + stream, err := archive.Tar(src, archive.Uncompressed) + if err != nil { + t.Fatal(err) + } + dest := filepath.Join(tmpdir, "dest") + if err := system.MkdirAll(dest, 0700, ""); err != nil { + t.Fatal(err) + } + options := &archive.TarOptions{} + //65534 entries of 64-byte strings ~= 4MB of environment space which should overflow + //on most systems when passed via environment or command line arguments + excludes := make([]string, 65534) + for i := 0; i < 65534; i++ { + excludes[i] = strings.Repeat(string(i), 64) + } + options.ExcludePatterns = excludes + if err := Untar(stream, dest, options); err != nil { + t.Fatal(err) + } +} + +func TestChrootUntarEmptyArchive(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "docker-TestChrootUntarEmptyArchive") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + if err := Untar(nil, tmpdir, nil); err == nil { + t.Fatal("expected error on empty archive") + } +} + +func prepareSourceDirectory(numberOfFiles int, targetPath string, makeSymLinks bool) (int, error) { + fileData := []byte("fooo") + for n := 0; n < numberOfFiles; n++ { + fileName := fmt.Sprintf("file-%d", n) + if err := ioutil.WriteFile(filepath.Join(targetPath, fileName), fileData, 0700); err != nil { + return 0, err + } + if makeSymLinks { + if err := os.Symlink(filepath.Join(targetPath, fileName), filepath.Join(targetPath, fileName+"-link")); err != nil { + return 0, err + } + } + } + totalSize := numberOfFiles * len(fileData) + return totalSize, nil +} + +func getHash(filename string) (uint32, error) { + stream, err := ioutil.ReadFile(filename) + if err != nil { + return 0, err + } + hash := crc32.NewIEEE() + hash.Write(stream) + return hash.Sum32(), nil +} + +func compareDirectories(src string, dest string) error { + changes, err := archive.ChangesDirs(dest, src) + if err != nil { + return err + } + if len(changes) > 0 { + return fmt.Errorf("Unexpected differences after untar: %v", changes) + } + return nil +} + +func compareFiles(src string, dest string) error { + srcHash, err := getHash(src) + if err != nil { + return err + } + destHash, err := getHash(dest) + if err != nil { + return err + } + if srcHash != destHash { + return fmt.Errorf("%s is different from %s", src, dest) + } + return nil +} + +func TestChrootTarUntarWithSymlink(t *testing.T) { + // TODO Windows: Figure out why this is failing + if runtime.GOOS == "windows" { + t.Skip("Failing on Windows") + } + tmpdir, err := ioutil.TempDir("", "docker-TestChrootTarUntarWithSymlink") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + src := filepath.Join(tmpdir, "src") + if err := system.MkdirAll(src, 0700, ""); err != nil { + t.Fatal(err) + } + if _, err := prepareSourceDirectory(10, src, false); err != nil { + t.Fatal(err) + } + dest := filepath.Join(tmpdir, "dest") + if err := TarUntar(src, dest); err != nil { + t.Fatal(err) + } + if err := compareDirectories(src, dest); err != nil { + t.Fatal(err) + } +} + +func TestChrootCopyWithTar(t *testing.T) { + // TODO Windows: Figure out why this is failing + if runtime.GOOS == "windows" || runtime.GOOS == "solaris" { + t.Skip("Failing on Windows and Solaris") + } + tmpdir, err := ioutil.TempDir("", "docker-TestChrootCopyWithTar") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + src := filepath.Join(tmpdir, "src") + if err := system.MkdirAll(src, 0700, ""); err != nil { + t.Fatal(err) + } + if _, err := prepareSourceDirectory(10, src, true); err != nil { + t.Fatal(err) + } + + // Copy directory + dest := filepath.Join(tmpdir, "dest") + if err := CopyWithTar(src, dest); err != nil { + t.Fatal(err) + } + if err := compareDirectories(src, dest); err != nil { + t.Fatal(err) + } + + // Copy file + srcfile := filepath.Join(src, "file-1") + dest = filepath.Join(tmpdir, "destFile") + destfile := filepath.Join(dest, "file-1") + if err := CopyWithTar(srcfile, destfile); err != nil { + t.Fatal(err) + } + if err := compareFiles(srcfile, destfile); err != nil { + t.Fatal(err) + } + + // Copy symbolic link + srcLinkfile := filepath.Join(src, "file-1-link") + dest = filepath.Join(tmpdir, "destSymlink") + destLinkfile := filepath.Join(dest, "file-1-link") + if err := CopyWithTar(srcLinkfile, destLinkfile); err != nil { + t.Fatal(err) + } + if err := compareFiles(srcLinkfile, destLinkfile); err != nil { + t.Fatal(err) + } +} + +func TestChrootCopyFileWithTar(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "docker-TestChrootCopyFileWithTar") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + src := filepath.Join(tmpdir, "src") + if err := system.MkdirAll(src, 0700, ""); err != nil { + t.Fatal(err) + } + if _, err := prepareSourceDirectory(10, src, true); err != nil { + t.Fatal(err) + } + + // Copy directory + dest := filepath.Join(tmpdir, "dest") + if err := CopyFileWithTar(src, dest); err == nil { + t.Fatal("Expected error on copying directory") + } + + // Copy file + srcfile := filepath.Join(src, "file-1") + dest = filepath.Join(tmpdir, "destFile") + destfile := filepath.Join(dest, "file-1") + if err := CopyFileWithTar(srcfile, destfile); err != nil { + t.Fatal(err) + } + if err := compareFiles(srcfile, destfile); err != nil { + t.Fatal(err) + } + + // Copy symbolic link + srcLinkfile := filepath.Join(src, "file-1-link") + dest = filepath.Join(tmpdir, "destSymlink") + destLinkfile := filepath.Join(dest, "file-1-link") + if err := CopyFileWithTar(srcLinkfile, destLinkfile); err != nil { + t.Fatal(err) + } + if err := compareFiles(srcLinkfile, destLinkfile); err != nil { + t.Fatal(err) + } +} + +func TestChrootUntarPath(t *testing.T) { + // TODO Windows: Figure out why this is failing + if runtime.GOOS == "windows" { + t.Skip("Failing on Windows") + } + tmpdir, err := ioutil.TempDir("", "docker-TestChrootUntarPath") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + src := filepath.Join(tmpdir, "src") + if err := system.MkdirAll(src, 0700, ""); err != nil { + t.Fatal(err) + } + if _, err := prepareSourceDirectory(10, src, false); err != nil { + t.Fatal(err) + } + dest := filepath.Join(tmpdir, "dest") + // Untar a directory + if err := UntarPath(src, dest); err == nil { + t.Fatal("Expected error on untaring a directory") + } + + // Untar a tar file + stream, err := archive.Tar(src, archive.Uncompressed) + if err != nil { + t.Fatal(err) + } + buf := new(bytes.Buffer) + buf.ReadFrom(stream) + tarfile := filepath.Join(tmpdir, "src.tar") + if err := ioutil.WriteFile(tarfile, buf.Bytes(), 0644); err != nil { + t.Fatal(err) + } + if err := UntarPath(tarfile, dest); err != nil { + t.Fatal(err) + } + if err := compareDirectories(src, dest); err != nil { + t.Fatal(err) + } +} + +type slowEmptyTarReader struct { + size int + offset int + chunkSize int +} + +// Read is a slow reader of an empty tar (like the output of "tar c --files-from /dev/null") +func (s *slowEmptyTarReader) Read(p []byte) (int, error) { + time.Sleep(100 * time.Millisecond) + count := s.chunkSize + if len(p) < s.chunkSize { + count = len(p) + } + for i := 0; i < count; i++ { + p[i] = 0 + } + s.offset += count + if s.offset > s.size { + return count, io.EOF + } + return count, nil +} + +func TestChrootUntarEmptyArchiveFromSlowReader(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "docker-TestChrootUntarEmptyArchiveFromSlowReader") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + dest := filepath.Join(tmpdir, "dest") + if err := system.MkdirAll(dest, 0700, ""); err != nil { + t.Fatal(err) + } + stream := &slowEmptyTarReader{size: 10240, chunkSize: 1024} + if err := Untar(stream, dest, nil); err != nil { + t.Fatal(err) + } +} + +func TestChrootApplyEmptyArchiveFromSlowReader(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "docker-TestChrootApplyEmptyArchiveFromSlowReader") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + dest := filepath.Join(tmpdir, "dest") + if err := system.MkdirAll(dest, 0700, ""); err != nil { + t.Fatal(err) + } + stream := &slowEmptyTarReader{size: 10240, chunkSize: 1024} + if _, err := ApplyLayer(dest, stream); err != nil { + t.Fatal(err) + } +} + +func TestChrootApplyDotDotFile(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "docker-TestChrootApplyDotDotFile") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + src := filepath.Join(tmpdir, "src") + if err := system.MkdirAll(src, 0700, ""); err != nil { + t.Fatal(err) + } + if err := ioutil.WriteFile(filepath.Join(src, "..gitme"), []byte(""), 0644); err != nil { + t.Fatal(err) + } + stream, err := archive.Tar(src, archive.Uncompressed) + if err != nil { + t.Fatal(err) + } + dest := filepath.Join(tmpdir, "dest") + if err := system.MkdirAll(dest, 0700, ""); err != nil { + t.Fatal(err) + } + if _, err := ApplyLayer(dest, stream); err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/moby/moby/pkg/chrootarchive/archive_unix.go b/vendor/github.com/moby/moby/pkg/chrootarchive/archive_unix.go new file mode 100644 index 000000000..f2325abd7 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/chrootarchive/archive_unix.go @@ -0,0 +1,86 @@ +// +build !windows + +package chrootarchive + +import ( + "bytes" + "encoding/json" + "flag" + "fmt" + "io" + "io/ioutil" + "os" + "runtime" + + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/reexec" +) + +// untar is the entry-point for docker-untar on re-exec. This is not used on +// Windows as it does not support chroot, hence no point sandboxing through +// chroot and rexec. +func untar() { + runtime.LockOSThread() + flag.Parse() + + var options *archive.TarOptions + + //read the options from the pipe "ExtraFiles" + if err := json.NewDecoder(os.NewFile(3, "options")).Decode(&options); err != nil { + fatal(err) + } + + if err := chroot(flag.Arg(0)); err != nil { + fatal(err) + } + + if err := archive.Unpack(os.Stdin, "/", options); err != nil { + fatal(err) + } + // fully consume stdin in case it is zero padded + if _, err := flush(os.Stdin); err != nil { + fatal(err) + } + + os.Exit(0) +} + +func invokeUnpack(decompressedArchive io.Reader, dest string, options *archive.TarOptions) error { + + // We can't pass a potentially large exclude list directly via cmd line + // because we easily overrun the kernel's max argument/environment size + // when the full image list is passed (e.g. when this is used by + // `docker load`). We will marshall the options via a pipe to the + // child + r, w, err := os.Pipe() + if err != nil { + return fmt.Errorf("Untar pipe failure: %v", err) + } + + cmd := reexec.Command("docker-untar", dest) + cmd.Stdin = decompressedArchive + + cmd.ExtraFiles = append(cmd.ExtraFiles, r) + output := bytes.NewBuffer(nil) + cmd.Stdout = output + cmd.Stderr = output + + if err := cmd.Start(); err != nil { + return fmt.Errorf("Untar error on re-exec cmd: %v", err) + } + //write the options to the pipe for the untar exec to read + if err := json.NewEncoder(w).Encode(options); err != nil { + return fmt.Errorf("Untar json encode to pipe failed: %v", err) + } + w.Close() + + if err := cmd.Wait(); err != nil { + // when `xz -d -c -q | docker-untar ...` failed on docker-untar side, + // we need to exhaust `xz`'s output, otherwise the `xz` side will be + // pending on write pipe forever + io.Copy(ioutil.Discard, decompressedArchive) + + return fmt.Errorf("Error processing tar file(%v): %s", err, output) + } + return nil +} diff --git a/vendor/github.com/moby/moby/pkg/chrootarchive/archive_windows.go b/vendor/github.com/moby/moby/pkg/chrootarchive/archive_windows.go new file mode 100644 index 000000000..0a500ed5c --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/chrootarchive/archive_windows.go @@ -0,0 +1,22 @@ +package chrootarchive + +import ( + "io" + + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/longpath" +) + +// chroot is not supported by Windows +func chroot(path string) error { + return nil +} + +func invokeUnpack(decompressedArchive io.ReadCloser, + dest string, + options *archive.TarOptions) error { + // Windows is different to Linux here because Windows does not support + // chroot. Hence there is no point sandboxing a chrooted process to + // do the unpack. We call inline instead within the daemon process. + return archive.Unpack(decompressedArchive, longpath.AddPrefix(dest), options) +} diff --git a/vendor/github.com/moby/moby/pkg/chrootarchive/chroot_linux.go b/vendor/github.com/moby/moby/pkg/chrootarchive/chroot_linux.go new file mode 100644 index 000000000..ebc3b8446 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/chrootarchive/chroot_linux.go @@ -0,0 +1,108 @@ +package chrootarchive + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + + "github.com/docker/docker/pkg/mount" + rsystem "github.com/opencontainers/runc/libcontainer/system" + "golang.org/x/sys/unix" +) + +// chroot on linux uses pivot_root instead of chroot +// pivot_root takes a new root and an old root. +// Old root must be a sub-dir of new root, it is where the current rootfs will reside after the call to pivot_root. +// New root is where the new rootfs is set to. +// Old root is removed after the call to pivot_root so it is no longer available under the new root. +// This is similar to how libcontainer sets up a container's rootfs +func chroot(path string) (err error) { + // if the engine is running in a user namespace we need to use actual chroot + if rsystem.RunningInUserNS() { + return realChroot(path) + } + if err := unix.Unshare(unix.CLONE_NEWNS); err != nil { + return fmt.Errorf("Error creating mount namespace before pivot: %v", err) + } + + // make everything in new ns private + if err := mount.MakeRPrivate("/"); err != nil { + return err + } + + if mounted, _ := mount.Mounted(path); !mounted { + if err := mount.Mount(path, path, "bind", "rbind,rw"); err != nil { + return realChroot(path) + } + } + + // setup oldRoot for pivot_root + pivotDir, err := ioutil.TempDir(path, ".pivot_root") + if err != nil { + return fmt.Errorf("Error setting up pivot dir: %v", err) + } + + var mounted bool + defer func() { + if mounted { + // make sure pivotDir is not mounted before we try to remove it + if errCleanup := unix.Unmount(pivotDir, unix.MNT_DETACH); errCleanup != nil { + if err == nil { + err = errCleanup + } + return + } + } + + errCleanup := os.Remove(pivotDir) + // pivotDir doesn't exist if pivot_root failed and chroot+chdir was successful + // because we already cleaned it up on failed pivot_root + if errCleanup != nil && !os.IsNotExist(errCleanup) { + errCleanup = fmt.Errorf("Error cleaning up after pivot: %v", errCleanup) + if err == nil { + err = errCleanup + } + } + }() + + if err := unix.PivotRoot(path, pivotDir); err != nil { + // If pivot fails, fall back to the normal chroot after cleaning up temp dir + if err := os.Remove(pivotDir); err != nil { + return fmt.Errorf("Error cleaning up after failed pivot: %v", err) + } + return realChroot(path) + } + mounted = true + + // This is the new path for where the old root (prior to the pivot) has been moved to + // This dir contains the rootfs of the caller, which we need to remove so it is not visible during extraction + pivotDir = filepath.Join("/", filepath.Base(pivotDir)) + + if err := unix.Chdir("/"); err != nil { + return fmt.Errorf("Error changing to new root: %v", err) + } + + // Make the pivotDir (where the old root lives) private so it can be unmounted without propagating to the host + if err := unix.Mount("", pivotDir, "", unix.MS_PRIVATE|unix.MS_REC, ""); err != nil { + return fmt.Errorf("Error making old root private after pivot: %v", err) + } + + // Now unmount the old root so it's no longer visible from the new root + if err := unix.Unmount(pivotDir, unix.MNT_DETACH); err != nil { + return fmt.Errorf("Error while unmounting old root after pivot: %v", err) + } + mounted = false + + return nil +} + +func realChroot(path string) error { + if err := unix.Chroot(path); err != nil { + return fmt.Errorf("Error after fallback to chroot: %v", err) + } + if err := unix.Chdir("/"); err != nil { + return fmt.Errorf("Error changing to new root after chroot: %v", err) + } + return nil +} diff --git a/vendor/github.com/moby/moby/pkg/chrootarchive/chroot_unix.go b/vendor/github.com/moby/moby/pkg/chrootarchive/chroot_unix.go new file mode 100644 index 000000000..f9b5dece8 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/chrootarchive/chroot_unix.go @@ -0,0 +1,12 @@ +// +build !windows,!linux + +package chrootarchive + +import "golang.org/x/sys/unix" + +func chroot(path string) error { + if err := unix.Chroot(path); err != nil { + return err + } + return unix.Chdir("/") +} diff --git a/vendor/github.com/moby/moby/pkg/chrootarchive/diff.go b/vendor/github.com/moby/moby/pkg/chrootarchive/diff.go new file mode 100644 index 000000000..49acad79f --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/chrootarchive/diff.go @@ -0,0 +1,23 @@ +package chrootarchive + +import ( + "io" + + "github.com/docker/docker/pkg/archive" +) + +// ApplyLayer parses a diff in the standard layer format from `layer`, +// and applies it to the directory `dest`. The stream `layer` can only be +// uncompressed. +// Returns the size in bytes of the contents of the layer. +func ApplyLayer(dest string, layer io.Reader) (size int64, err error) { + return applyLayerHandler(dest, layer, &archive.TarOptions{}, true) +} + +// ApplyUncompressedLayer parses a diff in the standard layer format from +// `layer`, and applies it to the directory `dest`. The stream `layer` +// can only be uncompressed. +// Returns the size in bytes of the contents of the layer. +func ApplyUncompressedLayer(dest string, layer io.Reader, options *archive.TarOptions) (int64, error) { + return applyLayerHandler(dest, layer, options, false) +} diff --git a/vendor/github.com/moby/moby/pkg/chrootarchive/diff_unix.go b/vendor/github.com/moby/moby/pkg/chrootarchive/diff_unix.go new file mode 100644 index 000000000..33098b33e --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/chrootarchive/diff_unix.go @@ -0,0 +1,130 @@ +//+build !windows + +package chrootarchive + +import ( + "bytes" + "encoding/json" + "flag" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "runtime" + + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/reexec" + "github.com/docker/docker/pkg/system" + rsystem "github.com/opencontainers/runc/libcontainer/system" +) + +type applyLayerResponse struct { + LayerSize int64 `json:"layerSize"` +} + +// applyLayer is the entry-point for docker-applylayer on re-exec. This is not +// used on Windows as it does not support chroot, hence no point sandboxing +// through chroot and rexec. +func applyLayer() { + + var ( + tmpDir string + err error + options *archive.TarOptions + ) + runtime.LockOSThread() + flag.Parse() + + inUserns := rsystem.RunningInUserNS() + if err := chroot(flag.Arg(0)); err != nil { + fatal(err) + } + + // We need to be able to set any perms + oldmask, err := system.Umask(0) + defer system.Umask(oldmask) + if err != nil { + fatal(err) + } + + if err := json.Unmarshal([]byte(os.Getenv("OPT")), &options); err != nil { + fatal(err) + } + + if inUserns { + options.InUserNS = true + } + + if tmpDir, err = ioutil.TempDir("/", "temp-docker-extract"); err != nil { + fatal(err) + } + + os.Setenv("TMPDIR", tmpDir) + size, err := archive.UnpackLayer("/", os.Stdin, options) + os.RemoveAll(tmpDir) + if err != nil { + fatal(err) + } + + encoder := json.NewEncoder(os.Stdout) + if err := encoder.Encode(applyLayerResponse{size}); err != nil { + fatal(fmt.Errorf("unable to encode layerSize JSON: %s", err)) + } + + if _, err := flush(os.Stdin); err != nil { + fatal(err) + } + + os.Exit(0) +} + +// applyLayerHandler parses a diff in the standard layer format from `layer`, and +// applies it to the directory `dest`. Returns the size in bytes of the +// contents of the layer. +func applyLayerHandler(dest string, layer io.Reader, options *archive.TarOptions, decompress bool) (size int64, err error) { + dest = filepath.Clean(dest) + if decompress { + decompressed, err := archive.DecompressStream(layer) + if err != nil { + return 0, err + } + defer decompressed.Close() + + layer = decompressed + } + if options == nil { + options = &archive.TarOptions{} + if rsystem.RunningInUserNS() { + options.InUserNS = true + } + } + if options.ExcludePatterns == nil { + options.ExcludePatterns = []string{} + } + + data, err := json.Marshal(options) + if err != nil { + return 0, fmt.Errorf("ApplyLayer json encode: %v", err) + } + + cmd := reexec.Command("docker-applyLayer", dest) + cmd.Stdin = layer + cmd.Env = append(cmd.Env, fmt.Sprintf("OPT=%s", data)) + + outBuf, errBuf := new(bytes.Buffer), new(bytes.Buffer) + cmd.Stdout, cmd.Stderr = outBuf, errBuf + + if err = cmd.Run(); err != nil { + return 0, fmt.Errorf("ApplyLayer %s stdout: %s stderr: %s", err, outBuf, errBuf) + } + + // Stdout should be a valid JSON struct representing an applyLayerResponse. + response := applyLayerResponse{} + decoder := json.NewDecoder(outBuf) + if err = decoder.Decode(&response); err != nil { + return 0, fmt.Errorf("unable to decode ApplyLayer JSON response: %s", err) + } + + return response.LayerSize, nil +} diff --git a/vendor/github.com/moby/moby/pkg/chrootarchive/diff_windows.go b/vendor/github.com/moby/moby/pkg/chrootarchive/diff_windows.go new file mode 100644 index 000000000..dc07eb680 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/chrootarchive/diff_windows.go @@ -0,0 +1,45 @@ +package chrootarchive + +import ( + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/longpath" +) + +// applyLayerHandler parses a diff in the standard layer format from `layer`, and +// applies it to the directory `dest`. Returns the size in bytes of the +// contents of the layer. +func applyLayerHandler(dest string, layer io.Reader, options *archive.TarOptions, decompress bool) (size int64, err error) { + dest = filepath.Clean(dest) + + // Ensure it is a Windows-style volume path + dest = longpath.AddPrefix(dest) + + if decompress { + decompressed, err := archive.DecompressStream(layer) + if err != nil { + return 0, err + } + defer decompressed.Close() + + layer = decompressed + } + + tmpDir, err := ioutil.TempDir(os.Getenv("temp"), "temp-docker-extract") + if err != nil { + return 0, fmt.Errorf("ApplyLayer failed to create temp-docker-extract under %s. %s", dest, err) + } + + s, err := archive.UnpackLayer(dest, layer, nil) + os.RemoveAll(tmpDir) + if err != nil { + return 0, fmt.Errorf("ApplyLayer %s failed UnpackLayer to %s: %s", layer, dest, err) + } + + return s, nil +} diff --git a/vendor/github.com/moby/moby/pkg/chrootarchive/init_unix.go b/vendor/github.com/moby/moby/pkg/chrootarchive/init_unix.go new file mode 100644 index 000000000..4f637f17b --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/chrootarchive/init_unix.go @@ -0,0 +1,28 @@ +// +build !windows + +package chrootarchive + +import ( + "fmt" + "io" + "io/ioutil" + "os" + + "github.com/docker/docker/pkg/reexec" +) + +func init() { + reexec.Register("docker-applyLayer", applyLayer) + reexec.Register("docker-untar", untar) +} + +func fatal(err error) { + fmt.Fprint(os.Stderr, err) + os.Exit(1) +} + +// flush consumes all the bytes from the reader discarding +// any errors +func flush(r io.Reader) (bytes int64, err error) { + return io.Copy(ioutil.Discard, r) +} diff --git a/vendor/github.com/moby/moby/pkg/chrootarchive/init_windows.go b/vendor/github.com/moby/moby/pkg/chrootarchive/init_windows.go new file mode 100644 index 000000000..fa17c9bf8 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/chrootarchive/init_windows.go @@ -0,0 +1,4 @@ +package chrootarchive + +func init() { +} diff --git a/vendor/github.com/moby/moby/pkg/devicemapper/devmapper.go b/vendor/github.com/moby/moby/pkg/devicemapper/devmapper.go new file mode 100644 index 000000000..f33197080 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/devicemapper/devmapper.go @@ -0,0 +1,819 @@ +// +build linux,cgo + +package devicemapper + +import ( + "errors" + "fmt" + "os" + "runtime" + "unsafe" + + "github.com/Sirupsen/logrus" + "golang.org/x/sys/unix" +) + +const ( + deviceCreate TaskType = iota + deviceReload + deviceRemove + deviceRemoveAll + deviceSuspend + deviceResume + deviceInfo + deviceDeps + deviceRename + deviceVersion + deviceStatus + deviceTable + deviceWaitevent + deviceList + deviceClear + deviceMknodes + deviceListVersions + deviceTargetMsg + deviceSetGeometry +) + +const ( + addNodeOnResume AddNodeType = iota + addNodeOnCreate +) + +// List of errors returned when using devicemapper. +var ( + ErrTaskRun = errors.New("dm_task_run failed") + ErrTaskSetName = errors.New("dm_task_set_name failed") + ErrTaskSetMessage = errors.New("dm_task_set_message failed") + ErrTaskSetAddNode = errors.New("dm_task_set_add_node failed") + ErrTaskSetRo = errors.New("dm_task_set_ro failed") + ErrTaskAddTarget = errors.New("dm_task_add_target failed") + ErrTaskSetSector = errors.New("dm_task_set_sector failed") + ErrTaskGetDeps = errors.New("dm_task_get_deps failed") + ErrTaskGetInfo = errors.New("dm_task_get_info failed") + ErrTaskGetDriverVersion = errors.New("dm_task_get_driver_version failed") + ErrTaskDeferredRemove = errors.New("dm_task_deferred_remove failed") + ErrTaskSetCookie = errors.New("dm_task_set_cookie failed") + ErrNilCookie = errors.New("cookie ptr can't be nil") + ErrGetBlockSize = errors.New("Can't get block size") + ErrUdevWait = errors.New("wait on udev cookie failed") + ErrSetDevDir = errors.New("dm_set_dev_dir failed") + ErrGetLibraryVersion = errors.New("dm_get_library_version failed") + ErrCreateRemoveTask = errors.New("Can't create task of type deviceRemove") + ErrRunRemoveDevice = errors.New("running RemoveDevice failed") + ErrInvalidAddNode = errors.New("Invalid AddNode type") + ErrBusy = errors.New("Device is Busy") + ErrDeviceIDExists = errors.New("Device Id Exists") + ErrEnxio = errors.New("No such device or address") +) + +var ( + dmSawBusy bool + dmSawExist bool + dmSawEnxio bool // No Such Device or Address +) + +type ( + // Task represents a devicemapper task (like lvcreate, etc.) ; a task is needed for each ioctl + // command to execute. + Task struct { + unmanaged *cdmTask + } + // Deps represents dependents (layer) of a device. + Deps struct { + Count uint32 + Filler uint32 + Device []uint64 + } + // Info represents information about a device. + Info struct { + Exists int + Suspended int + LiveTable int + InactiveTable int + OpenCount int32 + EventNr uint32 + Major uint32 + Minor uint32 + ReadOnly int + TargetCount int32 + DeferredRemove int + } + // TaskType represents a type of task + TaskType int + // AddNodeType represents a type of node to be added + AddNodeType int +) + +// DeviceIDExists returns whether error conveys the information about device Id already +// exist or not. This will be true if device creation or snap creation +// operation fails if device or snap device already exists in pool. +// Current implementation is little crude as it scans the error string +// for exact pattern match. Replacing it with more robust implementation +// is desirable. +func DeviceIDExists(err error) bool { + return fmt.Sprint(err) == fmt.Sprint(ErrDeviceIDExists) +} + +func (t *Task) destroy() { + if t != nil { + DmTaskDestroy(t.unmanaged) + runtime.SetFinalizer(t, nil) + } +} + +// TaskCreateNamed is a convenience function for TaskCreate when a name +// will be set on the task as well +func TaskCreateNamed(t TaskType, name string) (*Task, error) { + task := TaskCreate(t) + if task == nil { + return nil, fmt.Errorf("devicemapper: Can't create task of type %d", int(t)) + } + if err := task.setName(name); err != nil { + return nil, fmt.Errorf("devicemapper: Can't set task name %s", name) + } + return task, nil +} + +// TaskCreate initializes a devicemapper task of tasktype +func TaskCreate(tasktype TaskType) *Task { + Ctask := DmTaskCreate(int(tasktype)) + if Ctask == nil { + return nil + } + task := &Task{unmanaged: Ctask} + runtime.SetFinalizer(task, (*Task).destroy) + return task +} + +func (t *Task) run() error { + if res := DmTaskRun(t.unmanaged); res != 1 { + return ErrTaskRun + } + runtime.KeepAlive(t) + return nil +} + +func (t *Task) setName(name string) error { + if res := DmTaskSetName(t.unmanaged, name); res != 1 { + return ErrTaskSetName + } + return nil +} + +func (t *Task) setMessage(message string) error { + if res := DmTaskSetMessage(t.unmanaged, message); res != 1 { + return ErrTaskSetMessage + } + return nil +} + +func (t *Task) setSector(sector uint64) error { + if res := DmTaskSetSector(t.unmanaged, sector); res != 1 { + return ErrTaskSetSector + } + return nil +} + +func (t *Task) setCookie(cookie *uint, flags uint16) error { + if cookie == nil { + return ErrNilCookie + } + if res := DmTaskSetCookie(t.unmanaged, cookie, flags); res != 1 { + return ErrTaskSetCookie + } + return nil +} + +func (t *Task) setAddNode(addNode AddNodeType) error { + if addNode != addNodeOnResume && addNode != addNodeOnCreate { + return ErrInvalidAddNode + } + if res := DmTaskSetAddNode(t.unmanaged, addNode); res != 1 { + return ErrTaskSetAddNode + } + return nil +} + +func (t *Task) setRo() error { + if res := DmTaskSetRo(t.unmanaged); res != 1 { + return ErrTaskSetRo + } + return nil +} + +func (t *Task) addTarget(start, size uint64, ttype, params string) error { + if res := DmTaskAddTarget(t.unmanaged, start, size, + ttype, params); res != 1 { + return ErrTaskAddTarget + } + return nil +} + +func (t *Task) getDeps() (*Deps, error) { + var deps *Deps + if deps = DmTaskGetDeps(t.unmanaged); deps == nil { + return nil, ErrTaskGetDeps + } + return deps, nil +} + +func (t *Task) getInfo() (*Info, error) { + info := &Info{} + if res := DmTaskGetInfo(t.unmanaged, info); res != 1 { + return nil, ErrTaskGetInfo + } + return info, nil +} + +func (t *Task) getInfoWithDeferred() (*Info, error) { + info := &Info{} + if res := DmTaskGetInfoWithDeferred(t.unmanaged, info); res != 1 { + return nil, ErrTaskGetInfo + } + return info, nil +} + +func (t *Task) getDriverVersion() (string, error) { + res := DmTaskGetDriverVersion(t.unmanaged) + if res == "" { + return "", ErrTaskGetDriverVersion + } + return res, nil +} + +func (t *Task) getNextTarget(next unsafe.Pointer) (nextPtr unsafe.Pointer, start uint64, + length uint64, targetType string, params string) { + + return DmGetNextTarget(t.unmanaged, next, &start, &length, + &targetType, ¶ms), + start, length, targetType, params +} + +// UdevWait waits for any processes that are waiting for udev to complete the specified cookie. +func UdevWait(cookie *uint) error { + if res := DmUdevWait(*cookie); res != 1 { + logrus.Debugf("devicemapper: Failed to wait on udev cookie %d, %d", *cookie, res) + return ErrUdevWait + } + return nil +} + +// SetDevDir sets the dev folder for the device mapper library (usually /dev). +func SetDevDir(dir string) error { + if res := DmSetDevDir(dir); res != 1 { + logrus.Debug("devicemapper: Error dm_set_dev_dir") + return ErrSetDevDir + } + return nil +} + +// GetLibraryVersion returns the device mapper library version. +func GetLibraryVersion() (string, error) { + var version string + if res := DmGetLibraryVersion(&version); res != 1 { + return "", ErrGetLibraryVersion + } + return version, nil +} + +// UdevSyncSupported returns whether device-mapper is able to sync with udev +// +// This is essential otherwise race conditions can arise where both udev and +// device-mapper attempt to create and destroy devices. +func UdevSyncSupported() bool { + return DmUdevGetSyncSupport() != 0 +} + +// UdevSetSyncSupport allows setting whether the udev sync should be enabled. +// The return bool indicates the state of whether the sync is enabled. +func UdevSetSyncSupport(enable bool) bool { + if enable { + DmUdevSetSyncSupport(1) + } else { + DmUdevSetSyncSupport(0) + } + + return UdevSyncSupported() +} + +// CookieSupported returns whether the version of device-mapper supports the +// use of cookie's in the tasks. +// This is largely a lower level call that other functions use. +func CookieSupported() bool { + return DmCookieSupported() != 0 +} + +// RemoveDevice is a useful helper for cleaning up a device. +func RemoveDevice(name string) error { + task, err := TaskCreateNamed(deviceRemove, name) + if task == nil { + return err + } + + cookie := new(uint) + if err := task.setCookie(cookie, 0); err != nil { + return fmt.Errorf("devicemapper: Can not set cookie: %s", err) + } + defer UdevWait(cookie) + + dmSawBusy = false // reset before the task is run + dmSawEnxio = false + if err = task.run(); err != nil { + if dmSawBusy { + return ErrBusy + } + if dmSawEnxio { + return ErrEnxio + } + return fmt.Errorf("devicemapper: Error running RemoveDevice %s", err) + } + + return nil +} + +// RemoveDeviceDeferred is a useful helper for cleaning up a device, but deferred. +func RemoveDeviceDeferred(name string) error { + logrus.Debugf("devicemapper: RemoveDeviceDeferred START(%s)", name) + defer logrus.Debugf("devicemapper: RemoveDeviceDeferred END(%s)", name) + task, err := TaskCreateNamed(deviceRemove, name) + if task == nil { + return err + } + + if err := DmTaskDeferredRemove(task.unmanaged); err != 1 { + return ErrTaskDeferredRemove + } + + // set a task cookie and disable library fallback, or else libdevmapper will + // disable udev dm rules and delete the symlink under /dev/mapper by itself, + // even if the removal is deferred by the kernel. + cookie := new(uint) + var flags uint16 + flags = DmUdevDisableLibraryFallback + if err := task.setCookie(cookie, flags); err != nil { + return fmt.Errorf("devicemapper: Can not set cookie: %s", err) + } + + // libdevmapper and udev relies on System V semaphore for synchronization, + // semaphores created in `task.setCookie` will be cleaned up in `UdevWait`. + // So these two function call must come in pairs, otherwise semaphores will + // be leaked, and the limit of number of semaphores defined in `/proc/sys/kernel/sem` + // will be reached, which will eventually make all following calls to 'task.SetCookie' + // fail. + // this call will not wait for the deferred removal's final executing, since no + // udev event will be generated, and the semaphore's value will not be incremented + // by udev, what UdevWait is just cleaning up the semaphore. + defer UdevWait(cookie) + + dmSawEnxio = false + if err = task.run(); err != nil { + if dmSawEnxio { + return ErrEnxio + } + return fmt.Errorf("devicemapper: Error running RemoveDeviceDeferred %s", err) + } + + return nil +} + +// CancelDeferredRemove cancels a deferred remove for a device. +func CancelDeferredRemove(deviceName string) error { + task, err := TaskCreateNamed(deviceTargetMsg, deviceName) + if task == nil { + return err + } + + if err := task.setSector(0); err != nil { + return fmt.Errorf("devicemapper: Can't set sector %s", err) + } + + if err := task.setMessage(fmt.Sprintf("@cancel_deferred_remove")); err != nil { + return fmt.Errorf("devicemapper: Can't set message %s", err) + } + + dmSawBusy = false + dmSawEnxio = false + if err := task.run(); err != nil { + // A device might be being deleted already + if dmSawBusy { + return ErrBusy + } else if dmSawEnxio { + return ErrEnxio + } + return fmt.Errorf("devicemapper: Error running CancelDeferredRemove %s", err) + + } + return nil +} + +// GetBlockDeviceSize returns the size of a block device identified by the specified file. +func GetBlockDeviceSize(file *os.File) (uint64, error) { + size, err := ioctlBlkGetSize64(file.Fd()) + if err != nil { + logrus.Errorf("devicemapper: Error getblockdevicesize: %s", err) + return 0, ErrGetBlockSize + } + return uint64(size), nil +} + +// BlockDeviceDiscard runs discard for the given path. +// This is used as a workaround for the kernel not discarding block so +// on the thin pool when we remove a thinp device, so we do it +// manually +func BlockDeviceDiscard(path string) error { + file, err := os.OpenFile(path, os.O_RDWR, 0) + if err != nil { + return err + } + defer file.Close() + + size, err := GetBlockDeviceSize(file) + if err != nil { + return err + } + + if err := ioctlBlkDiscard(file.Fd(), 0, size); err != nil { + return err + } + + // Without this sometimes the remove of the device that happens after + // discard fails with EBUSY. + unix.Sync() + + return nil +} + +// CreatePool is the programmatic example of "dmsetup create". +// It creates a device with the specified poolName, data and metadata file and block size. +func CreatePool(poolName string, dataFile, metadataFile *os.File, poolBlockSize uint32) error { + task, err := TaskCreateNamed(deviceCreate, poolName) + if task == nil { + return err + } + + size, err := GetBlockDeviceSize(dataFile) + if err != nil { + return fmt.Errorf("devicemapper: Can't get data size %s", err) + } + + params := fmt.Sprintf("%s %s %d 32768 1 skip_block_zeroing", metadataFile.Name(), dataFile.Name(), poolBlockSize) + if err := task.addTarget(0, size/512, "thin-pool", params); err != nil { + return fmt.Errorf("devicemapper: Can't add target %s", err) + } + + cookie := new(uint) + var flags uint16 + flags = DmUdevDisableSubsystemRulesFlag | DmUdevDisableDiskRulesFlag | DmUdevDisableOtherRulesFlag + if err := task.setCookie(cookie, flags); err != nil { + return fmt.Errorf("devicemapper: Can't set cookie %s", err) + } + defer UdevWait(cookie) + + if err := task.run(); err != nil { + return fmt.Errorf("devicemapper: Error running deviceCreate (CreatePool) %s", err) + } + + return nil +} + +// ReloadPool is the programmatic example of "dmsetup reload". +// It reloads the table with the specified poolName, data and metadata file and block size. +func ReloadPool(poolName string, dataFile, metadataFile *os.File, poolBlockSize uint32) error { + task, err := TaskCreateNamed(deviceReload, poolName) + if task == nil { + return err + } + + size, err := GetBlockDeviceSize(dataFile) + if err != nil { + return fmt.Errorf("devicemapper: Can't get data size %s", err) + } + + params := fmt.Sprintf("%s %s %d 32768 1 skip_block_zeroing", metadataFile.Name(), dataFile.Name(), poolBlockSize) + if err := task.addTarget(0, size/512, "thin-pool", params); err != nil { + return fmt.Errorf("devicemapper: Can't add target %s", err) + } + + if err := task.run(); err != nil { + return fmt.Errorf("devicemapper: Error running ReloadPool %s", err) + } + + return nil +} + +// GetDeps is the programmatic example of "dmsetup deps". +// It outputs a list of devices referenced by the live table for the specified device. +func GetDeps(name string) (*Deps, error) { + task, err := TaskCreateNamed(deviceDeps, name) + if task == nil { + return nil, err + } + if err := task.run(); err != nil { + return nil, err + } + return task.getDeps() +} + +// GetInfo is the programmatic example of "dmsetup info". +// It outputs some brief information about the device. +func GetInfo(name string) (*Info, error) { + task, err := TaskCreateNamed(deviceInfo, name) + if task == nil { + return nil, err + } + if err := task.run(); err != nil { + return nil, err + } + return task.getInfo() +} + +// GetInfoWithDeferred is the programmatic example of "dmsetup info", but deferred. +// It outputs some brief information about the device. +func GetInfoWithDeferred(name string) (*Info, error) { + task, err := TaskCreateNamed(deviceInfo, name) + if task == nil { + return nil, err + } + if err := task.run(); err != nil { + return nil, err + } + return task.getInfoWithDeferred() +} + +// GetDriverVersion is the programmatic example of "dmsetup version". +// It outputs version information of the driver. +func GetDriverVersion() (string, error) { + task := TaskCreate(deviceVersion) + if task == nil { + return "", fmt.Errorf("devicemapper: Can't create deviceVersion task") + } + if err := task.run(); err != nil { + return "", err + } + return task.getDriverVersion() +} + +// GetStatus is the programmatic example of "dmsetup status". +// It outputs status information for the specified device name. +func GetStatus(name string) (uint64, uint64, string, string, error) { + task, err := TaskCreateNamed(deviceStatus, name) + if task == nil { + logrus.Debugf("devicemapper: GetStatus() Error TaskCreateNamed: %s", err) + return 0, 0, "", "", err + } + if err := task.run(); err != nil { + logrus.Debugf("devicemapper: GetStatus() Error Run: %s", err) + return 0, 0, "", "", err + } + + devinfo, err := task.getInfo() + if err != nil { + logrus.Debugf("devicemapper: GetStatus() Error GetInfo: %s", err) + return 0, 0, "", "", err + } + if devinfo.Exists == 0 { + logrus.Debugf("devicemapper: GetStatus() Non existing device %s", name) + return 0, 0, "", "", fmt.Errorf("devicemapper: Non existing device %s", name) + } + + _, start, length, targetType, params := task.getNextTarget(unsafe.Pointer(nil)) + return start, length, targetType, params, nil +} + +// GetTable is the programmatic example for "dmsetup table". +// It outputs the current table for the specified device name. +func GetTable(name string) (uint64, uint64, string, string, error) { + task, err := TaskCreateNamed(deviceTable, name) + if task == nil { + logrus.Debugf("devicemapper: GetTable() Error TaskCreateNamed: %s", err) + return 0, 0, "", "", err + } + if err := task.run(); err != nil { + logrus.Debugf("devicemapper: GetTable() Error Run: %s", err) + return 0, 0, "", "", err + } + + devinfo, err := task.getInfo() + if err != nil { + logrus.Debugf("devicemapper: GetTable() Error GetInfo: %s", err) + return 0, 0, "", "", err + } + if devinfo.Exists == 0 { + logrus.Debugf("devicemapper: GetTable() Non existing device %s", name) + return 0, 0, "", "", fmt.Errorf("devicemapper: Non existing device %s", name) + } + + _, start, length, targetType, params := task.getNextTarget(unsafe.Pointer(nil)) + return start, length, targetType, params, nil +} + +// SetTransactionID sets a transaction id for the specified device name. +func SetTransactionID(poolName string, oldID uint64, newID uint64) error { + task, err := TaskCreateNamed(deviceTargetMsg, poolName) + if task == nil { + return err + } + + if err := task.setSector(0); err != nil { + return fmt.Errorf("devicemapper: Can't set sector %s", err) + } + + if err := task.setMessage(fmt.Sprintf("set_transaction_id %d %d", oldID, newID)); err != nil { + return fmt.Errorf("devicemapper: Can't set message %s", err) + } + + if err := task.run(); err != nil { + return fmt.Errorf("devicemapper: Error running SetTransactionID %s", err) + } + return nil +} + +// SuspendDevice is the programmatic example of "dmsetup suspend". +// It suspends the specified device. +func SuspendDevice(name string) error { + task, err := TaskCreateNamed(deviceSuspend, name) + if task == nil { + return err + } + if err := task.run(); err != nil { + return fmt.Errorf("devicemapper: Error running deviceSuspend %s", err) + } + return nil +} + +// ResumeDevice is the programmatic example of "dmsetup resume". +// It un-suspends the specified device. +func ResumeDevice(name string) error { + task, err := TaskCreateNamed(deviceResume, name) + if task == nil { + return err + } + + cookie := new(uint) + if err := task.setCookie(cookie, 0); err != nil { + return fmt.Errorf("devicemapper: Can't set cookie %s", err) + } + defer UdevWait(cookie) + + if err := task.run(); err != nil { + return fmt.Errorf("devicemapper: Error running deviceResume %s", err) + } + + return nil +} + +// CreateDevice creates a device with the specified poolName with the specified device id. +func CreateDevice(poolName string, deviceID int) error { + logrus.Debugf("devicemapper: CreateDevice(poolName=%v, deviceID=%v)", poolName, deviceID) + task, err := TaskCreateNamed(deviceTargetMsg, poolName) + if task == nil { + return err + } + + if err := task.setSector(0); err != nil { + return fmt.Errorf("devicemapper: Can't set sector %s", err) + } + + if err := task.setMessage(fmt.Sprintf("create_thin %d", deviceID)); err != nil { + return fmt.Errorf("devicemapper: Can't set message %s", err) + } + + dmSawExist = false // reset before the task is run + if err := task.run(); err != nil { + // Caller wants to know about ErrDeviceIDExists so that it can try with a different device id. + if dmSawExist { + return ErrDeviceIDExists + } + + return fmt.Errorf("devicemapper: Error running CreateDevice %s", err) + + } + return nil +} + +// DeleteDevice deletes a device with the specified poolName with the specified device id. +func DeleteDevice(poolName string, deviceID int) error { + task, err := TaskCreateNamed(deviceTargetMsg, poolName) + if task == nil { + return err + } + + if err := task.setSector(0); err != nil { + return fmt.Errorf("devicemapper: Can't set sector %s", err) + } + + if err := task.setMessage(fmt.Sprintf("delete %d", deviceID)); err != nil { + return fmt.Errorf("devicemapper: Can't set message %s", err) + } + + dmSawBusy = false + if err := task.run(); err != nil { + if dmSawBusy { + return ErrBusy + } + return fmt.Errorf("devicemapper: Error running DeleteDevice %s", err) + } + return nil +} + +// ActivateDevice activates the device identified by the specified +// poolName, name and deviceID with the specified size. +func ActivateDevice(poolName string, name string, deviceID int, size uint64) error { + return activateDevice(poolName, name, deviceID, size, "") +} + +// ActivateDeviceWithExternal activates the device identified by the specified +// poolName, name and deviceID with the specified size. +func ActivateDeviceWithExternal(poolName string, name string, deviceID int, size uint64, external string) error { + return activateDevice(poolName, name, deviceID, size, external) +} + +func activateDevice(poolName string, name string, deviceID int, size uint64, external string) error { + task, err := TaskCreateNamed(deviceCreate, name) + if task == nil { + return err + } + + var params string + if len(external) > 0 { + params = fmt.Sprintf("%s %d %s", poolName, deviceID, external) + } else { + params = fmt.Sprintf("%s %d", poolName, deviceID) + } + if err := task.addTarget(0, size/512, "thin", params); err != nil { + return fmt.Errorf("devicemapper: Can't add target %s", err) + } + if err := task.setAddNode(addNodeOnCreate); err != nil { + return fmt.Errorf("devicemapper: Can't add node %s", err) + } + + cookie := new(uint) + if err := task.setCookie(cookie, 0); err != nil { + return fmt.Errorf("devicemapper: Can't set cookie %s", err) + } + + defer UdevWait(cookie) + + if err := task.run(); err != nil { + return fmt.Errorf("devicemapper: Error running deviceCreate (ActivateDevice) %s", err) + } + + return nil +} + +// CreateSnapDeviceRaw creates a snapshot device. Caller needs to suspend and resume the origin device if it is active. +func CreateSnapDeviceRaw(poolName string, deviceID int, baseDeviceID int) error { + task, err := TaskCreateNamed(deviceTargetMsg, poolName) + if task == nil { + return err + } + + if err := task.setSector(0); err != nil { + return fmt.Errorf("devicemapper: Can't set sector %s", err) + } + + if err := task.setMessage(fmt.Sprintf("create_snap %d %d", deviceID, baseDeviceID)); err != nil { + return fmt.Errorf("devicemapper: Can't set message %s", err) + } + + dmSawExist = false // reset before the task is run + if err := task.run(); err != nil { + // Caller wants to know about ErrDeviceIDExists so that it can try with a different device id. + if dmSawExist { + return ErrDeviceIDExists + } + return fmt.Errorf("devicemapper: Error running deviceCreate (CreateSnapDeviceRaw) %s", err) + } + + return nil +} + +// CreateSnapDevice creates a snapshot based on the device identified by the baseName and baseDeviceId, +func CreateSnapDevice(poolName string, deviceID int, baseName string, baseDeviceID int) error { + devinfo, _ := GetInfo(baseName) + doSuspend := devinfo != nil && devinfo.Exists != 0 + + if doSuspend { + if err := SuspendDevice(baseName); err != nil { + return err + } + } + + if err := CreateSnapDeviceRaw(poolName, deviceID, baseDeviceID); err != nil { + if doSuspend { + if err2 := ResumeDevice(baseName); err2 != nil { + return fmt.Errorf("CreateSnapDeviceRaw Error: (%v): ResumeDevice Error: (%v)", err, err2) + } + } + return err + } + + if doSuspend { + if err := ResumeDevice(baseName); err != nil { + return err + } + } + + return nil +} diff --git a/vendor/github.com/moby/moby/pkg/devicemapper/devmapper_log.go b/vendor/github.com/moby/moby/pkg/devicemapper/devmapper_log.go new file mode 100644 index 000000000..098d2405e --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/devicemapper/devmapper_log.go @@ -0,0 +1,121 @@ +// +build linux,cgo + +package devicemapper + +import "C" + +import ( + "fmt" + "strings" + + "github.com/Sirupsen/logrus" +) + +// DevmapperLogger defines methods required to register as a callback for +// logging events recieved from devicemapper. Note that devicemapper will send +// *all* logs regardless to callbacks (including debug logs) so it's +// recommended to not spam the console with the outputs. +type DevmapperLogger interface { + // DMLog is the logging callback containing all of the information from + // devicemapper. The interface is identical to the C libdm counterpart. + DMLog(level int, file string, line int, dmError int, message string) +} + +// dmLogger is the current logger in use that is being forwarded our messages. +var dmLogger DevmapperLogger + +// LogInit changes the logging callback called after processing libdm logs for +// error message information. The default logger simply forwards all logs to +// logrus. Calling LogInit(nil) disables the calling of callbacks. +func LogInit(logger DevmapperLogger) { + dmLogger = logger +} + +// Due to the way cgo works this has to be in a separate file, as devmapper.go has +// definitions in the cgo block, which is incompatible with using "//export" + +// DevmapperLogCallback exports the devmapper log callback for cgo. Note that +// because we are using callbacks, this function will be called for *every* log +// in libdm (even debug ones because there's no way of setting the verbosity +// level for an external logging callback). +//export DevmapperLogCallback +func DevmapperLogCallback(level C.int, file *C.char, line, dmErrnoOrClass C.int, message *C.char) { + msg := C.GoString(message) + + // Track what errno libdm saw, because the library only gives us 0 or 1. + if level < LogLevelDebug { + if strings.Contains(msg, "busy") { + dmSawBusy = true + } + + if strings.Contains(msg, "File exists") { + dmSawExist = true + } + + if strings.Contains(msg, "No such device or address") { + dmSawEnxio = true + } + } + + if dmLogger != nil { + dmLogger.DMLog(int(level), C.GoString(file), int(line), int(dmErrnoOrClass), msg) + } +} + +// DefaultLogger is the default logger used by pkg/devicemapper. It forwards +// all logs that are of higher or equal priority to the given level to the +// corresponding logrus level. +type DefaultLogger struct { + // Level corresponds to the highest libdm level that will be forwarded to + // logrus. In order to change this, register a new DefaultLogger. + Level int +} + +// DMLog is the logging callback containing all of the information from +// devicemapper. The interface is identical to the C libdm counterpart. +func (l DefaultLogger) DMLog(level int, file string, line, dmError int, message string) { + if int(level) <= l.Level { + // Forward the log to the correct logrus level, if allowed by dmLogLevel. + logMsg := fmt.Sprintf("libdevmapper(%d): %s:%d (%d) %s", level, file, line, dmError, message) + switch level { + case LogLevelFatal, LogLevelErr: + logrus.Error(logMsg) + case LogLevelWarn: + logrus.Warn(logMsg) + case LogLevelNotice, LogLevelInfo: + logrus.Info(logMsg) + case LogLevelDebug: + logrus.Debug(logMsg) + default: + // Don't drop any "unknown" levels. + logrus.Info(logMsg) + } + } +} + +// registerLogCallback registers our own logging callback function for libdm +// (which is DevmapperLogCallback). +// +// Because libdm only gives us {0,1} error codes we need to parse the logs +// produced by libdm (to set dmSawBusy and so on). Note that by registering a +// callback using DevmapperLogCallback, libdm will no longer output logs to +// stderr so we have to log everything ourselves. None of this handling is +// optional because we depend on log callbacks to parse the logs, and if we +// don't forward the log information we'll be in a lot of trouble when +// debugging things. +func registerLogCallback() { + LogWithErrnoInit() +} + +func init() { + // Use the default logger by default. We only allow LogLevelFatal by + // default, because internally we mask a lot of libdm errors by retrying + // and similar tricks. Also, libdm is very chatty and we don't want to + // worry users for no reason. + dmLogger = DefaultLogger{ + Level: LogLevelFatal, + } + + // Register as early as possible so we don't miss anything. + registerLogCallback() +} diff --git a/vendor/github.com/moby/moby/pkg/devicemapper/devmapper_wrapper.go b/vendor/github.com/moby/moby/pkg/devicemapper/devmapper_wrapper.go new file mode 100644 index 000000000..da3b43f79 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/devicemapper/devmapper_wrapper.go @@ -0,0 +1,253 @@ +// +build linux,cgo + +package devicemapper + +/* +#cgo LDFLAGS: -L. -ldevmapper +#define _GNU_SOURCE +#include +#include // FIXME: present only for BLKGETSIZE64, maybe we can remove it? + +// FIXME: Can't we find a way to do the logging in pure Go? +extern void DevmapperLogCallback(int level, char *file, int line, int dm_errno_or_class, char *str); + +static void log_cb(int level, const char *file, int line, int dm_errno_or_class, const char *f, ...) +{ + char *buffer = NULL; + va_list ap; + int ret; + + va_start(ap, f); + ret = vasprintf(&buffer, f, ap); + va_end(ap); + if (ret < 0) { + // memory allocation failed -- should never happen? + return; + } + + DevmapperLogCallback(level, (char *)file, line, dm_errno_or_class, buffer); + free(buffer); +} + +static void log_with_errno_init() +{ + dm_log_with_errno_init(log_cb); +} +*/ +import "C" + +import ( + "reflect" + "unsafe" +) + +type ( + cdmTask C.struct_dm_task +) + +// IOCTL consts +const ( + BlkGetSize64 = C.BLKGETSIZE64 + BlkDiscard = C.BLKDISCARD +) + +// Devicemapper cookie flags. +const ( + DmUdevDisableSubsystemRulesFlag = C.DM_UDEV_DISABLE_SUBSYSTEM_RULES_FLAG + DmUdevDisableDiskRulesFlag = C.DM_UDEV_DISABLE_DISK_RULES_FLAG + DmUdevDisableOtherRulesFlag = C.DM_UDEV_DISABLE_OTHER_RULES_FLAG + DmUdevDisableLibraryFallback = C.DM_UDEV_DISABLE_LIBRARY_FALLBACK +) + +// DeviceMapper mapped functions. +var ( + DmGetLibraryVersion = dmGetLibraryVersionFct + DmGetNextTarget = dmGetNextTargetFct + DmSetDevDir = dmSetDevDirFct + DmTaskAddTarget = dmTaskAddTargetFct + DmTaskCreate = dmTaskCreateFct + DmTaskDestroy = dmTaskDestroyFct + DmTaskGetDeps = dmTaskGetDepsFct + DmTaskGetInfo = dmTaskGetInfoFct + DmTaskGetDriverVersion = dmTaskGetDriverVersionFct + DmTaskRun = dmTaskRunFct + DmTaskSetAddNode = dmTaskSetAddNodeFct + DmTaskSetCookie = dmTaskSetCookieFct + DmTaskSetMessage = dmTaskSetMessageFct + DmTaskSetName = dmTaskSetNameFct + DmTaskSetRo = dmTaskSetRoFct + DmTaskSetSector = dmTaskSetSectorFct + DmUdevWait = dmUdevWaitFct + DmUdevSetSyncSupport = dmUdevSetSyncSupportFct + DmUdevGetSyncSupport = dmUdevGetSyncSupportFct + DmCookieSupported = dmCookieSupportedFct + LogWithErrnoInit = logWithErrnoInitFct + DmTaskDeferredRemove = dmTaskDeferredRemoveFct + DmTaskGetInfoWithDeferred = dmTaskGetInfoWithDeferredFct +) + +func free(p *C.char) { + C.free(unsafe.Pointer(p)) +} + +func dmTaskDestroyFct(task *cdmTask) { + C.dm_task_destroy((*C.struct_dm_task)(task)) +} + +func dmTaskCreateFct(taskType int) *cdmTask { + return (*cdmTask)(C.dm_task_create(C.int(taskType))) +} + +func dmTaskRunFct(task *cdmTask) int { + ret, _ := C.dm_task_run((*C.struct_dm_task)(task)) + return int(ret) +} + +func dmTaskSetNameFct(task *cdmTask, name string) int { + Cname := C.CString(name) + defer free(Cname) + + return int(C.dm_task_set_name((*C.struct_dm_task)(task), Cname)) +} + +func dmTaskSetMessageFct(task *cdmTask, message string) int { + Cmessage := C.CString(message) + defer free(Cmessage) + + return int(C.dm_task_set_message((*C.struct_dm_task)(task), Cmessage)) +} + +func dmTaskSetSectorFct(task *cdmTask, sector uint64) int { + return int(C.dm_task_set_sector((*C.struct_dm_task)(task), C.uint64_t(sector))) +} + +func dmTaskSetCookieFct(task *cdmTask, cookie *uint, flags uint16) int { + cCookie := C.uint32_t(*cookie) + defer func() { + *cookie = uint(cCookie) + }() + return int(C.dm_task_set_cookie((*C.struct_dm_task)(task), &cCookie, C.uint16_t(flags))) +} + +func dmTaskSetAddNodeFct(task *cdmTask, addNode AddNodeType) int { + return int(C.dm_task_set_add_node((*C.struct_dm_task)(task), C.dm_add_node_t(addNode))) +} + +func dmTaskSetRoFct(task *cdmTask) int { + return int(C.dm_task_set_ro((*C.struct_dm_task)(task))) +} + +func dmTaskAddTargetFct(task *cdmTask, + start, size uint64, ttype, params string) int { + + Cttype := C.CString(ttype) + defer free(Cttype) + + Cparams := C.CString(params) + defer free(Cparams) + + return int(C.dm_task_add_target((*C.struct_dm_task)(task), C.uint64_t(start), C.uint64_t(size), Cttype, Cparams)) +} + +func dmTaskGetDepsFct(task *cdmTask) *Deps { + Cdeps := C.dm_task_get_deps((*C.struct_dm_task)(task)) + if Cdeps == nil { + return nil + } + + // golang issue: https://github.com/golang/go/issues/11925 + hdr := reflect.SliceHeader{ + Data: uintptr(unsafe.Pointer(uintptr(unsafe.Pointer(Cdeps)) + unsafe.Sizeof(*Cdeps))), + Len: int(Cdeps.count), + Cap: int(Cdeps.count), + } + devices := *(*[]C.uint64_t)(unsafe.Pointer(&hdr)) + + deps := &Deps{ + Count: uint32(Cdeps.count), + Filler: uint32(Cdeps.filler), + } + for _, device := range devices { + deps.Device = append(deps.Device, uint64(device)) + } + return deps +} + +func dmTaskGetInfoFct(task *cdmTask, info *Info) int { + Cinfo := C.struct_dm_info{} + defer func() { + info.Exists = int(Cinfo.exists) + info.Suspended = int(Cinfo.suspended) + info.LiveTable = int(Cinfo.live_table) + info.InactiveTable = int(Cinfo.inactive_table) + info.OpenCount = int32(Cinfo.open_count) + info.EventNr = uint32(Cinfo.event_nr) + info.Major = uint32(Cinfo.major) + info.Minor = uint32(Cinfo.minor) + info.ReadOnly = int(Cinfo.read_only) + info.TargetCount = int32(Cinfo.target_count) + }() + return int(C.dm_task_get_info((*C.struct_dm_task)(task), &Cinfo)) +} + +func dmTaskGetDriverVersionFct(task *cdmTask) string { + buffer := C.malloc(128) + defer C.free(buffer) + res := C.dm_task_get_driver_version((*C.struct_dm_task)(task), (*C.char)(buffer), 128) + if res == 0 { + return "" + } + return C.GoString((*C.char)(buffer)) +} + +func dmGetNextTargetFct(task *cdmTask, next unsafe.Pointer, start, length *uint64, target, params *string) unsafe.Pointer { + var ( + Cstart, Clength C.uint64_t + CtargetType, Cparams *C.char + ) + defer func() { + *start = uint64(Cstart) + *length = uint64(Clength) + *target = C.GoString(CtargetType) + *params = C.GoString(Cparams) + }() + + nextp := C.dm_get_next_target((*C.struct_dm_task)(task), next, &Cstart, &Clength, &CtargetType, &Cparams) + return nextp +} + +func dmUdevSetSyncSupportFct(syncWithUdev int) { + (C.dm_udev_set_sync_support(C.int(syncWithUdev))) +} + +func dmUdevGetSyncSupportFct() int { + return int(C.dm_udev_get_sync_support()) +} + +func dmUdevWaitFct(cookie uint) int { + return int(C.dm_udev_wait(C.uint32_t(cookie))) +} + +func dmCookieSupportedFct() int { + return int(C.dm_cookie_supported()) +} + +func logWithErrnoInitFct() { + C.log_with_errno_init() +} + +func dmSetDevDirFct(dir string) int { + Cdir := C.CString(dir) + defer free(Cdir) + + return int(C.dm_set_dev_dir(Cdir)) +} + +func dmGetLibraryVersionFct(version *string) int { + buffer := C.CString(string(make([]byte, 128))) + defer free(buffer) + defer func() { + *version = C.GoString(buffer) + }() + return int(C.dm_get_library_version(buffer, 128)) +} diff --git a/vendor/github.com/moby/moby/pkg/devicemapper/devmapper_wrapper_deferred_remove.go b/vendor/github.com/moby/moby/pkg/devicemapper/devmapper_wrapper_deferred_remove.go new file mode 100644 index 000000000..5bdd97d9c --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/devicemapper/devmapper_wrapper_deferred_remove.go @@ -0,0 +1,34 @@ +// +build linux,cgo,!libdm_no_deferred_remove + +package devicemapper + +/* +#cgo LDFLAGS: -L. -ldevmapper +#include +*/ +import "C" + +// LibraryDeferredRemovalSupport is supported when statically linked. +const LibraryDeferredRemovalSupport = true + +func dmTaskDeferredRemoveFct(task *cdmTask) int { + return int(C.dm_task_deferred_remove((*C.struct_dm_task)(task))) +} + +func dmTaskGetInfoWithDeferredFct(task *cdmTask, info *Info) int { + Cinfo := C.struct_dm_info{} + defer func() { + info.Exists = int(Cinfo.exists) + info.Suspended = int(Cinfo.suspended) + info.LiveTable = int(Cinfo.live_table) + info.InactiveTable = int(Cinfo.inactive_table) + info.OpenCount = int32(Cinfo.open_count) + info.EventNr = uint32(Cinfo.event_nr) + info.Major = uint32(Cinfo.major) + info.Minor = uint32(Cinfo.minor) + info.ReadOnly = int(Cinfo.read_only) + info.TargetCount = int32(Cinfo.target_count) + info.DeferredRemove = int(Cinfo.deferred_remove) + }() + return int(C.dm_task_get_info((*C.struct_dm_task)(task), &Cinfo)) +} diff --git a/vendor/github.com/moby/moby/pkg/devicemapper/devmapper_wrapper_no_deferred_remove.go b/vendor/github.com/moby/moby/pkg/devicemapper/devmapper_wrapper_no_deferred_remove.go new file mode 100644 index 000000000..968b2ce0c --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/devicemapper/devmapper_wrapper_no_deferred_remove.go @@ -0,0 +1,15 @@ +// +build linux,cgo,libdm_no_deferred_remove + +package devicemapper + +// LibraryDeferredRemovalSupport is not supported when statically linked. +const LibraryDeferredRemovalSupport = false + +func dmTaskDeferredRemoveFct(task *cdmTask) int { + // Error. Nobody should be calling it. + return -1 +} + +func dmTaskGetInfoWithDeferredFct(task *cdmTask, info *Info) int { + return -1 +} diff --git a/vendor/github.com/moby/moby/pkg/devicemapper/ioctl.go b/vendor/github.com/moby/moby/pkg/devicemapper/ioctl.go new file mode 100644 index 000000000..50ea7c482 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/devicemapper/ioctl.go @@ -0,0 +1,28 @@ +// +build linux,cgo + +package devicemapper + +import ( + "unsafe" + + "golang.org/x/sys/unix" +) + +func ioctlBlkGetSize64(fd uintptr) (int64, error) { + var size int64 + if _, _, err := unix.Syscall(unix.SYS_IOCTL, fd, BlkGetSize64, uintptr(unsafe.Pointer(&size))); err != 0 { + return 0, err + } + return size, nil +} + +func ioctlBlkDiscard(fd uintptr, offset, length uint64) error { + var r [2]uint64 + r[0] = offset + r[1] = length + + if _, _, err := unix.Syscall(unix.SYS_IOCTL, fd, BlkDiscard, uintptr(unsafe.Pointer(&r[0]))); err != 0 { + return err + } + return nil +} diff --git a/vendor/github.com/moby/moby/pkg/devicemapper/log.go b/vendor/github.com/moby/moby/pkg/devicemapper/log.go new file mode 100644 index 000000000..cee5e5454 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/devicemapper/log.go @@ -0,0 +1,11 @@ +package devicemapper + +// definitions from lvm2 lib/log/log.h +const ( + LogLevelFatal = 2 + iota // _LOG_FATAL + LogLevelErr // _LOG_ERR + LogLevelWarn // _LOG_WARN + LogLevelNotice // _LOG_NOTICE + LogLevelInfo // _LOG_INFO + LogLevelDebug // _LOG_DEBUG +) diff --git a/vendor/github.com/moby/moby/pkg/directory/directory.go b/vendor/github.com/moby/moby/pkg/directory/directory.go new file mode 100644 index 000000000..1715ef45d --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/directory/directory.go @@ -0,0 +1,26 @@ +package directory + +import ( + "io/ioutil" + "os" + "path/filepath" +) + +// MoveToSubdir moves all contents of a directory to a subdirectory underneath the original path +func MoveToSubdir(oldpath, subdir string) error { + + infos, err := ioutil.ReadDir(oldpath) + if err != nil { + return err + } + for _, info := range infos { + if info.Name() != subdir { + oldName := filepath.Join(oldpath, info.Name()) + newName := filepath.Join(oldpath, subdir, info.Name()) + if err := os.Rename(oldName, newName); err != nil { + return err + } + } + } + return nil +} diff --git a/vendor/github.com/moby/moby/pkg/directory/directory_test.go b/vendor/github.com/moby/moby/pkg/directory/directory_test.go new file mode 100644 index 000000000..2b7a4657b --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/directory/directory_test.go @@ -0,0 +1,192 @@ +package directory + +import ( + "io/ioutil" + "os" + "path/filepath" + "reflect" + "sort" + "testing" +) + +// Size of an empty directory should be 0 +func TestSizeEmpty(t *testing.T) { + var dir string + var err error + if dir, err = ioutil.TempDir(os.TempDir(), "testSizeEmptyDirectory"); err != nil { + t.Fatalf("failed to create directory: %s", err) + } + + var size int64 + if size, _ = Size(dir); size != 0 { + t.Fatalf("empty directory has size: %d", size) + } +} + +// Size of a directory with one empty file should be 0 +func TestSizeEmptyFile(t *testing.T) { + var dir string + var err error + if dir, err = ioutil.TempDir(os.TempDir(), "testSizeEmptyFile"); err != nil { + t.Fatalf("failed to create directory: %s", err) + } + + var file *os.File + if file, err = ioutil.TempFile(dir, "file"); err != nil { + t.Fatalf("failed to create file: %s", err) + } + + var size int64 + if size, _ = Size(file.Name()); size != 0 { + t.Fatalf("directory with one file has size: %d", size) + } +} + +// Size of a directory with one 5-byte file should be 5 +func TestSizeNonemptyFile(t *testing.T) { + var dir string + var err error + if dir, err = ioutil.TempDir(os.TempDir(), "testSizeNonemptyFile"); err != nil { + t.Fatalf("failed to create directory: %s", err) + } + + var file *os.File + if file, err = ioutil.TempFile(dir, "file"); err != nil { + t.Fatalf("failed to create file: %s", err) + } + + d := []byte{97, 98, 99, 100, 101} + file.Write(d) + + var size int64 + if size, _ = Size(file.Name()); size != 5 { + t.Fatalf("directory with one 5-byte file has size: %d", size) + } +} + +// Size of a directory with one empty directory should be 0 +func TestSizeNestedDirectoryEmpty(t *testing.T) { + var dir string + var err error + if dir, err = ioutil.TempDir(os.TempDir(), "testSizeNestedDirectoryEmpty"); err != nil { + t.Fatalf("failed to create directory: %s", err) + } + if dir, err = ioutil.TempDir(dir, "nested"); err != nil { + t.Fatalf("failed to create nested directory: %s", err) + } + + var size int64 + if size, _ = Size(dir); size != 0 { + t.Fatalf("directory with one empty directory has size: %d", size) + } +} + +// Test directory with 1 file and 1 empty directory +func TestSizeFileAndNestedDirectoryEmpty(t *testing.T) { + var dir string + var err error + if dir, err = ioutil.TempDir(os.TempDir(), "testSizeFileAndNestedDirectoryEmpty"); err != nil { + t.Fatalf("failed to create directory: %s", err) + } + if dir, err = ioutil.TempDir(dir, "nested"); err != nil { + t.Fatalf("failed to create nested directory: %s", err) + } + + var file *os.File + if file, err = ioutil.TempFile(dir, "file"); err != nil { + t.Fatalf("failed to create file: %s", err) + } + + d := []byte{100, 111, 99, 107, 101, 114} + file.Write(d) + + var size int64 + if size, _ = Size(dir); size != 6 { + t.Fatalf("directory with 6-byte file and empty directory has size: %d", size) + } +} + +// Test directory with 1 file and 1 non-empty directory +func TestSizeFileAndNestedDirectoryNonempty(t *testing.T) { + var dir, dirNested string + var err error + if dir, err = ioutil.TempDir(os.TempDir(), "TestSizeFileAndNestedDirectoryNonempty"); err != nil { + t.Fatalf("failed to create directory: %s", err) + } + if dirNested, err = ioutil.TempDir(dir, "nested"); err != nil { + t.Fatalf("failed to create nested directory: %s", err) + } + + var file *os.File + if file, err = ioutil.TempFile(dir, "file"); err != nil { + t.Fatalf("failed to create file: %s", err) + } + + data := []byte{100, 111, 99, 107, 101, 114} + file.Write(data) + + var nestedFile *os.File + if nestedFile, err = ioutil.TempFile(dirNested, "file"); err != nil { + t.Fatalf("failed to create file in nested directory: %s", err) + } + + nestedData := []byte{100, 111, 99, 107, 101, 114} + nestedFile.Write(nestedData) + + var size int64 + if size, _ = Size(dir); size != 12 { + t.Fatalf("directory with 6-byte file and nested directory with 6-byte file has size: %d", size) + } +} + +// Test migration of directory to a subdir underneath itself +func TestMoveToSubdir(t *testing.T) { + var outerDir, subDir string + var err error + + if outerDir, err = ioutil.TempDir(os.TempDir(), "TestMoveToSubdir"); err != nil { + t.Fatalf("failed to create directory: %v", err) + } + + if subDir, err = ioutil.TempDir(outerDir, "testSub"); err != nil { + t.Fatalf("failed to create subdirectory: %v", err) + } + + // write 4 temp files in the outer dir to get moved + filesList := []string{"a", "b", "c", "d"} + for _, fName := range filesList { + if file, err := os.Create(filepath.Join(outerDir, fName)); err != nil { + t.Fatalf("couldn't create temp file %q: %v", fName, err) + } else { + file.WriteString(fName) + file.Close() + } + } + + if err = MoveToSubdir(outerDir, filepath.Base(subDir)); err != nil { + t.Fatalf("Error during migration of content to subdirectory: %v", err) + } + // validate that the files were moved to the subdirectory + infos, err := ioutil.ReadDir(subDir) + if err != nil { + t.Fatal(err) + } + if len(infos) != 4 { + t.Fatalf("Should be four files in the subdir after the migration: actual length: %d", len(infos)) + } + var results []string + for _, info := range infos { + results = append(results, info.Name()) + } + sort.Sort(sort.StringSlice(results)) + if !reflect.DeepEqual(filesList, results) { + t.Fatalf("Results after migration do not equal list of files: expected: %v, got: %v", filesList, results) + } +} + +// Test a non-existing directory +func TestSizeNonExistingDirectory(t *testing.T) { + if _, err := Size("/thisdirectoryshouldnotexist/TestSizeNonExistingDirectory"); err == nil { + t.Fatalf("error is expected") + } +} diff --git a/vendor/github.com/moby/moby/pkg/directory/directory_unix.go b/vendor/github.com/moby/moby/pkg/directory/directory_unix.go new file mode 100644 index 000000000..397251bdb --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/directory/directory_unix.go @@ -0,0 +1,48 @@ +// +build linux freebsd solaris + +package directory + +import ( + "os" + "path/filepath" + "syscall" +) + +// Size walks a directory tree and returns its total size in bytes. +func Size(dir string) (size int64, err error) { + data := make(map[uint64]struct{}) + err = filepath.Walk(dir, func(d string, fileInfo os.FileInfo, err error) error { + if err != nil { + // if dir does not exist, Size() returns the error. + // if dir/x disappeared while walking, Size() ignores dir/x. + if os.IsNotExist(err) && d != dir { + return nil + } + return err + } + + // Ignore directory sizes + if fileInfo == nil { + return nil + } + + s := fileInfo.Size() + if fileInfo.IsDir() || s == 0 { + return nil + } + + // Check inode to handle hard links correctly + inode := fileInfo.Sys().(*syscall.Stat_t).Ino + // inode is not a uint64 on all platforms. Cast it to avoid issues. + if _, exists := data[uint64(inode)]; exists { + return nil + } + // inode is not a uint64 on all platforms. Cast it to avoid issues. + data[uint64(inode)] = struct{}{} + + size += s + + return nil + }) + return +} diff --git a/vendor/github.com/moby/moby/pkg/directory/directory_windows.go b/vendor/github.com/moby/moby/pkg/directory/directory_windows.go new file mode 100644 index 000000000..6fb0917c4 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/directory/directory_windows.go @@ -0,0 +1,37 @@ +// +build windows + +package directory + +import ( + "os" + "path/filepath" +) + +// Size walks a directory tree and returns its total size in bytes. +func Size(dir string) (size int64, err error) { + err = filepath.Walk(dir, func(d string, fileInfo os.FileInfo, err error) error { + if err != nil { + // if dir does not exist, Size() returns the error. + // if dir/x disappeared while walking, Size() ignores dir/x. + if os.IsNotExist(err) && d != dir { + return nil + } + return err + } + + // Ignore directory sizes + if fileInfo == nil { + return nil + } + + s := fileInfo.Size() + if fileInfo.IsDir() || s == 0 { + return nil + } + + size += s + + return nil + }) + return +} diff --git a/vendor/github.com/moby/moby/pkg/discovery/README.md b/vendor/github.com/moby/moby/pkg/discovery/README.md new file mode 100644 index 000000000..d8ed9ce71 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/discovery/README.md @@ -0,0 +1,41 @@ +--- +page_title: Docker discovery +page_description: discovery +page_keywords: docker, clustering, discovery +--- + +# Discovery + +Docker comes with multiple Discovery backends. + +## Backends + +### Using etcd + +Point your Docker Engine instances to a common etcd instance. You can specify +the address Docker uses to advertise the node using the `--cluster-advertise` +flag. + +```bash +$ dockerd -H= --cluster-advertise= --cluster-store etcd://,/ +``` + +### Using consul + +Point your Docker Engine instances to a common Consul instance. You can specify +the address Docker uses to advertise the node using the `--cluster-advertise` +flag. + +```bash +$ dockerd -H= --cluster-advertise= --cluster-store consul:/// +``` + +### Using zookeeper + +Point your Docker Engine instances to a common Zookeeper instance. You can specify +the address Docker uses to advertise the node using the `--cluster-advertise` +flag. + +```bash +$ dockerd -H= --cluster-advertise= --cluster-store zk://,/ +``` diff --git a/vendor/github.com/moby/moby/pkg/discovery/backends.go b/vendor/github.com/moby/moby/pkg/discovery/backends.go new file mode 100644 index 000000000..2eab550e2 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/discovery/backends.go @@ -0,0 +1,107 @@ +package discovery + +import ( + "fmt" + "net" + "strings" + "time" + + "github.com/Sirupsen/logrus" +) + +var ( + // Backends is a global map of discovery backends indexed by their + // associated scheme. + backends = make(map[string]Backend) +) + +// Register makes a discovery backend available by the provided scheme. +// If Register is called twice with the same scheme an error is returned. +func Register(scheme string, d Backend) error { + if _, exists := backends[scheme]; exists { + return fmt.Errorf("scheme already registered %s", scheme) + } + logrus.WithField("name", scheme).Debugf("Registering discovery service") + backends[scheme] = d + return nil +} + +func parse(rawurl string) (string, string) { + parts := strings.SplitN(rawurl, "://", 2) + + // nodes:port,node2:port => nodes://node1:port,node2:port + if len(parts) == 1 { + return "nodes", parts[0] + } + return parts[0], parts[1] +} + +// ParseAdvertise parses the --cluster-advertise daemon config which accepts +// : or : +func ParseAdvertise(advertise string) (string, error) { + var ( + iface *net.Interface + addrs []net.Addr + err error + ) + + addr, port, err := net.SplitHostPort(advertise) + + if err != nil { + return "", fmt.Errorf("invalid --cluster-advertise configuration: %s: %v", advertise, err) + } + + ip := net.ParseIP(addr) + // If it is a valid ip-address, use it as is + if ip != nil { + return advertise, nil + } + + // If advertise is a valid interface name, get the valid IPv4 address and use it to advertise + ifaceName := addr + iface, err = net.InterfaceByName(ifaceName) + if err != nil { + return "", fmt.Errorf("invalid cluster advertise IP address or interface name (%s) : %v", advertise, err) + } + + addrs, err = iface.Addrs() + if err != nil { + return "", fmt.Errorf("unable to get advertise IP address from interface (%s) : %v", advertise, err) + } + + if len(addrs) == 0 { + return "", fmt.Errorf("no available advertise IP address in interface (%s)", advertise) + } + + addr = "" + for _, a := range addrs { + ip, _, err := net.ParseCIDR(a.String()) + if err != nil { + return "", fmt.Errorf("error deriving advertise ip-address in interface (%s) : %v", advertise, err) + } + if ip.To4() == nil || ip.IsLoopback() { + continue + } + addr = ip.String() + break + } + if addr == "" { + return "", fmt.Errorf("could not find a valid ip-address in interface %s", advertise) + } + + addr = net.JoinHostPort(addr, port) + return addr, nil +} + +// New returns a new Discovery given a URL, heartbeat and ttl settings. +// Returns an error if the URL scheme is not supported. +func New(rawurl string, heartbeat time.Duration, ttl time.Duration, clusterOpts map[string]string) (Backend, error) { + scheme, uri := parse(rawurl) + if backend, exists := backends[scheme]; exists { + logrus.WithFields(logrus.Fields{"name": scheme, "uri": uri}).Debugf("Initializing discovery service") + err := backend.Initialize(uri, heartbeat, ttl, clusterOpts) + return backend, err + } + + return nil, ErrNotSupported +} diff --git a/vendor/github.com/moby/moby/pkg/discovery/discovery.go b/vendor/github.com/moby/moby/pkg/discovery/discovery.go new file mode 100644 index 000000000..ca7f58745 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/discovery/discovery.go @@ -0,0 +1,35 @@ +package discovery + +import ( + "errors" + "time" +) + +var ( + // ErrNotSupported is returned when a discovery service is not supported. + ErrNotSupported = errors.New("discovery service not supported") + + // ErrNotImplemented is returned when discovery feature is not implemented + // by discovery backend. + ErrNotImplemented = errors.New("not implemented in this discovery service") +) + +// Watcher provides watching over a cluster for nodes joining and leaving. +type Watcher interface { + // Watch the discovery for entry changes. + // Returns a channel that will receive changes or an error. + // Providing a non-nil stopCh can be used to stop watching. + Watch(stopCh <-chan struct{}) (<-chan Entries, <-chan error) +} + +// Backend is implemented by discovery backends which manage cluster entries. +type Backend interface { + // Watcher must be provided by every backend. + Watcher + + // Initialize the discovery with URIs, a heartbeat, a ttl and optional settings. + Initialize(string, time.Duration, time.Duration, map[string]string) error + + // Register to the discovery. + Register(string) error +} diff --git a/vendor/github.com/moby/moby/pkg/discovery/discovery_test.go b/vendor/github.com/moby/moby/pkg/discovery/discovery_test.go new file mode 100644 index 000000000..6084f3ef0 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/discovery/discovery_test.go @@ -0,0 +1,137 @@ +package discovery + +import ( + "testing" + + "github.com/go-check/check" +) + +// Hook up gocheck into the "go test" runner. +func Test(t *testing.T) { check.TestingT(t) } + +type DiscoverySuite struct{} + +var _ = check.Suite(&DiscoverySuite{}) + +func (s *DiscoverySuite) TestNewEntry(c *check.C) { + entry, err := NewEntry("127.0.0.1:2375") + c.Assert(err, check.IsNil) + c.Assert(entry.Equals(&Entry{Host: "127.0.0.1", Port: "2375"}), check.Equals, true) + c.Assert(entry.String(), check.Equals, "127.0.0.1:2375") + + entry, err = NewEntry("[2001:db8:0:f101::2]:2375") + c.Assert(err, check.IsNil) + c.Assert(entry.Equals(&Entry{Host: "2001:db8:0:f101::2", Port: "2375"}), check.Equals, true) + c.Assert(entry.String(), check.Equals, "[2001:db8:0:f101::2]:2375") + + _, err = NewEntry("127.0.0.1") + c.Assert(err, check.NotNil) +} + +func (s *DiscoverySuite) TestParse(c *check.C) { + scheme, uri := parse("127.0.0.1:2375") + c.Assert(scheme, check.Equals, "nodes") + c.Assert(uri, check.Equals, "127.0.0.1:2375") + + scheme, uri = parse("localhost:2375") + c.Assert(scheme, check.Equals, "nodes") + c.Assert(uri, check.Equals, "localhost:2375") + + scheme, uri = parse("scheme://127.0.0.1:2375") + c.Assert(scheme, check.Equals, "scheme") + c.Assert(uri, check.Equals, "127.0.0.1:2375") + + scheme, uri = parse("scheme://localhost:2375") + c.Assert(scheme, check.Equals, "scheme") + c.Assert(uri, check.Equals, "localhost:2375") + + scheme, uri = parse("") + c.Assert(scheme, check.Equals, "nodes") + c.Assert(uri, check.Equals, "") +} + +func (s *DiscoverySuite) TestCreateEntries(c *check.C) { + entries, err := CreateEntries(nil) + c.Assert(entries, check.DeepEquals, Entries{}) + c.Assert(err, check.IsNil) + + entries, err = CreateEntries([]string{"127.0.0.1:2375", "127.0.0.2:2375", "[2001:db8:0:f101::2]:2375", ""}) + c.Assert(err, check.IsNil) + expected := Entries{ + &Entry{Host: "127.0.0.1", Port: "2375"}, + &Entry{Host: "127.0.0.2", Port: "2375"}, + &Entry{Host: "2001:db8:0:f101::2", Port: "2375"}, + } + c.Assert(entries.Equals(expected), check.Equals, true) + + _, err = CreateEntries([]string{"127.0.0.1", "127.0.0.2"}) + c.Assert(err, check.NotNil) +} + +func (s *DiscoverySuite) TestContainsEntry(c *check.C) { + entries, err := CreateEntries([]string{"127.0.0.1:2375", "127.0.0.2:2375", ""}) + c.Assert(err, check.IsNil) + c.Assert(entries.Contains(&Entry{Host: "127.0.0.1", Port: "2375"}), check.Equals, true) + c.Assert(entries.Contains(&Entry{Host: "127.0.0.3", Port: "2375"}), check.Equals, false) +} + +func (s *DiscoverySuite) TestEntriesEquality(c *check.C) { + entries := Entries{ + &Entry{Host: "127.0.0.1", Port: "2375"}, + &Entry{Host: "127.0.0.2", Port: "2375"}, + } + + // Same + c.Assert(entries.Equals(Entries{ + &Entry{Host: "127.0.0.1", Port: "2375"}, + &Entry{Host: "127.0.0.2", Port: "2375"}, + }), check. + Equals, true) + + // Different size + c.Assert(entries.Equals(Entries{ + &Entry{Host: "127.0.0.1", Port: "2375"}, + &Entry{Host: "127.0.0.2", Port: "2375"}, + &Entry{Host: "127.0.0.3", Port: "2375"}, + }), check. + Equals, false) + + // Different content + c.Assert(entries.Equals(Entries{ + &Entry{Host: "127.0.0.1", Port: "2375"}, + &Entry{Host: "127.0.0.42", Port: "2375"}, + }), check. + Equals, false) + +} + +func (s *DiscoverySuite) TestEntriesDiff(c *check.C) { + entry1 := &Entry{Host: "1.1.1.1", Port: "1111"} + entry2 := &Entry{Host: "2.2.2.2", Port: "2222"} + entry3 := &Entry{Host: "3.3.3.3", Port: "3333"} + entries := Entries{entry1, entry2} + + // No diff + added, removed := entries.Diff(Entries{entry2, entry1}) + c.Assert(added, check.HasLen, 0) + c.Assert(removed, check.HasLen, 0) + + // Add + added, removed = entries.Diff(Entries{entry2, entry3, entry1}) + c.Assert(added, check.HasLen, 1) + c.Assert(added.Contains(entry3), check.Equals, true) + c.Assert(removed, check.HasLen, 0) + + // Remove + added, removed = entries.Diff(Entries{entry2}) + c.Assert(added, check.HasLen, 0) + c.Assert(removed, check.HasLen, 1) + c.Assert(removed.Contains(entry1), check.Equals, true) + + // Add and remove + added, removed = entries.Diff(Entries{entry1, entry3}) + c.Assert(added, check.HasLen, 1) + c.Assert(added.Contains(entry3), check.Equals, true) + c.Assert(removed, check.HasLen, 1) + c.Assert(removed.Contains(entry2), check.Equals, true) +} diff --git a/vendor/github.com/moby/moby/pkg/discovery/entry.go b/vendor/github.com/moby/moby/pkg/discovery/entry.go new file mode 100644 index 000000000..ce23bbf89 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/discovery/entry.go @@ -0,0 +1,94 @@ +package discovery + +import "net" + +// NewEntry creates a new entry. +func NewEntry(url string) (*Entry, error) { + host, port, err := net.SplitHostPort(url) + if err != nil { + return nil, err + } + return &Entry{host, port}, nil +} + +// An Entry represents a host. +type Entry struct { + Host string + Port string +} + +// Equals returns true if cmp contains the same data. +func (e *Entry) Equals(cmp *Entry) bool { + return e.Host == cmp.Host && e.Port == cmp.Port +} + +// String returns the string form of an entry. +func (e *Entry) String() string { + return net.JoinHostPort(e.Host, e.Port) +} + +// Entries is a list of *Entry with some helpers. +type Entries []*Entry + +// Equals returns true if cmp contains the same data. +func (e Entries) Equals(cmp Entries) bool { + // Check if the file has really changed. + if len(e) != len(cmp) { + return false + } + for i := range e { + if !e[i].Equals(cmp[i]) { + return false + } + } + return true +} + +// Contains returns true if the Entries contain a given Entry. +func (e Entries) Contains(entry *Entry) bool { + for _, curr := range e { + if curr.Equals(entry) { + return true + } + } + return false +} + +// Diff compares two entries and returns the added and removed entries. +func (e Entries) Diff(cmp Entries) (Entries, Entries) { + added := Entries{} + for _, entry := range cmp { + if !e.Contains(entry) { + added = append(added, entry) + } + } + + removed := Entries{} + for _, entry := range e { + if !cmp.Contains(entry) { + removed = append(removed, entry) + } + } + + return added, removed +} + +// CreateEntries returns an array of entries based on the given addresses. +func CreateEntries(addrs []string) (Entries, error) { + entries := Entries{} + if addrs == nil { + return entries, nil + } + + for _, addr := range addrs { + if len(addr) == 0 { + continue + } + entry, err := NewEntry(addr) + if err != nil { + return nil, err + } + entries = append(entries, entry) + } + return entries, nil +} diff --git a/vendor/github.com/moby/moby/pkg/discovery/file/file.go b/vendor/github.com/moby/moby/pkg/discovery/file/file.go new file mode 100644 index 000000000..2b8e27b75 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/discovery/file/file.go @@ -0,0 +1,107 @@ +package file + +import ( + "fmt" + "io/ioutil" + "strings" + "time" + + "github.com/docker/docker/pkg/discovery" +) + +// Discovery is exported +type Discovery struct { + heartbeat time.Duration + path string +} + +func init() { + Init() +} + +// Init is exported +func Init() { + discovery.Register("file", &Discovery{}) +} + +// Initialize is exported +func (s *Discovery) Initialize(path string, heartbeat time.Duration, ttl time.Duration, _ map[string]string) error { + s.path = path + s.heartbeat = heartbeat + return nil +} + +func parseFileContent(content []byte) []string { + var result []string + for _, line := range strings.Split(strings.TrimSpace(string(content)), "\n") { + line = strings.TrimSpace(line) + // Ignoring line starts with # + if strings.HasPrefix(line, "#") { + continue + } + // Inlined # comment also ignored. + if strings.Contains(line, "#") { + line = line[0:strings.Index(line, "#")] + // Trim additional spaces caused by above stripping. + line = strings.TrimSpace(line) + } + result = append(result, discovery.Generate(line)...) + } + return result +} + +func (s *Discovery) fetch() (discovery.Entries, error) { + fileContent, err := ioutil.ReadFile(s.path) + if err != nil { + return nil, fmt.Errorf("failed to read '%s': %v", s.path, err) + } + return discovery.CreateEntries(parseFileContent(fileContent)) +} + +// Watch is exported +func (s *Discovery) Watch(stopCh <-chan struct{}) (<-chan discovery.Entries, <-chan error) { + ch := make(chan discovery.Entries) + errCh := make(chan error) + ticker := time.NewTicker(s.heartbeat) + + go func() { + defer close(errCh) + defer close(ch) + + // Send the initial entries if available. + currentEntries, err := s.fetch() + if err != nil { + errCh <- err + } else { + ch <- currentEntries + } + + // Periodically send updates. + for { + select { + case <-ticker.C: + newEntries, err := s.fetch() + if err != nil { + errCh <- err + continue + } + + // Check if the file has really changed. + if !newEntries.Equals(currentEntries) { + ch <- newEntries + } + currentEntries = newEntries + case <-stopCh: + ticker.Stop() + return + } + } + }() + + return ch, errCh +} + +// Register is exported +func (s *Discovery) Register(addr string) error { + return discovery.ErrNotImplemented +} diff --git a/vendor/github.com/moby/moby/pkg/discovery/file/file_test.go b/vendor/github.com/moby/moby/pkg/discovery/file/file_test.go new file mode 100644 index 000000000..667f00ba0 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/discovery/file/file_test.go @@ -0,0 +1,114 @@ +package file + +import ( + "io/ioutil" + "os" + "testing" + + "github.com/docker/docker/pkg/discovery" + + "github.com/go-check/check" +) + +// Hook up gocheck into the "go test" runner. +func Test(t *testing.T) { check.TestingT(t) } + +type DiscoverySuite struct{} + +var _ = check.Suite(&DiscoverySuite{}) + +func (s *DiscoverySuite) TestInitialize(c *check.C) { + d := &Discovery{} + d.Initialize("/path/to/file", 1000, 0, nil) + c.Assert(d.path, check.Equals, "/path/to/file") +} + +func (s *DiscoverySuite) TestNew(c *check.C) { + d, err := discovery.New("file:///path/to/file", 0, 0, nil) + c.Assert(err, check.IsNil) + c.Assert(d.(*Discovery).path, check.Equals, "/path/to/file") +} + +func (s *DiscoverySuite) TestContent(c *check.C) { + data := ` +1.1.1.[1:2]:1111 +2.2.2.[2:4]:2222 +` + ips := parseFileContent([]byte(data)) + c.Assert(ips, check.HasLen, 5) + c.Assert(ips[0], check.Equals, "1.1.1.1:1111") + c.Assert(ips[1], check.Equals, "1.1.1.2:1111") + c.Assert(ips[2], check.Equals, "2.2.2.2:2222") + c.Assert(ips[3], check.Equals, "2.2.2.3:2222") + c.Assert(ips[4], check.Equals, "2.2.2.4:2222") +} + +func (s *DiscoverySuite) TestRegister(c *check.C) { + discovery := &Discovery{path: "/path/to/file"} + c.Assert(discovery.Register("0.0.0.0"), check.NotNil) +} + +func (s *DiscoverySuite) TestParsingContentsWithComments(c *check.C) { + data := ` +### test ### +1.1.1.1:1111 # inline comment +# 2.2.2.2:2222 + ### empty line with comment + 3.3.3.3:3333 +### test ### +` + ips := parseFileContent([]byte(data)) + c.Assert(ips, check.HasLen, 2) + c.Assert("1.1.1.1:1111", check.Equals, ips[0]) + c.Assert("3.3.3.3:3333", check.Equals, ips[1]) +} + +func (s *DiscoverySuite) TestWatch(c *check.C) { + data := ` +1.1.1.1:1111 +2.2.2.2:2222 +` + expected := discovery.Entries{ + &discovery.Entry{Host: "1.1.1.1", Port: "1111"}, + &discovery.Entry{Host: "2.2.2.2", Port: "2222"}, + } + + // Create a temporary file and remove it. + tmp, err := ioutil.TempFile(os.TempDir(), "discovery-file-test") + c.Assert(err, check.IsNil) + c.Assert(tmp.Close(), check.IsNil) + c.Assert(os.Remove(tmp.Name()), check.IsNil) + + // Set up file discovery. + d := &Discovery{} + d.Initialize(tmp.Name(), 1000, 0, nil) + stopCh := make(chan struct{}) + ch, errCh := d.Watch(stopCh) + + // Make sure it fires errors since the file doesn't exist. + c.Assert(<-errCh, check.NotNil) + // We have to drain the error channel otherwise Watch will get stuck. + go func() { + for range errCh { + } + }() + + // Write the file and make sure we get the expected value back. + c.Assert(ioutil.WriteFile(tmp.Name(), []byte(data), 0600), check.IsNil) + c.Assert(<-ch, check.DeepEquals, expected) + + // Add a new entry and look it up. + expected = append(expected, &discovery.Entry{Host: "3.3.3.3", Port: "3333"}) + f, err := os.OpenFile(tmp.Name(), os.O_APPEND|os.O_WRONLY, 0600) + c.Assert(err, check.IsNil) + c.Assert(f, check.NotNil) + _, err = f.WriteString("\n3.3.3.3:3333\n") + c.Assert(err, check.IsNil) + f.Close() + c.Assert(<-ch, check.DeepEquals, expected) + + // Stop and make sure it closes all channels. + close(stopCh) + c.Assert(<-ch, check.IsNil) + c.Assert(<-errCh, check.IsNil) +} diff --git a/vendor/github.com/moby/moby/pkg/discovery/generator.go b/vendor/github.com/moby/moby/pkg/discovery/generator.go new file mode 100644 index 000000000..d22298298 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/discovery/generator.go @@ -0,0 +1,35 @@ +package discovery + +import ( + "fmt" + "regexp" + "strconv" +) + +// Generate takes care of IP generation +func Generate(pattern string) []string { + re, _ := regexp.Compile(`\[(.+):(.+)\]`) + submatch := re.FindStringSubmatch(pattern) + if submatch == nil { + return []string{pattern} + } + + from, err := strconv.Atoi(submatch[1]) + if err != nil { + return []string{pattern} + } + to, err := strconv.Atoi(submatch[2]) + if err != nil { + return []string{pattern} + } + + template := re.ReplaceAllString(pattern, "%d") + + var result []string + for val := from; val <= to; val++ { + entry := fmt.Sprintf(template, val) + result = append(result, entry) + } + + return result +} diff --git a/vendor/github.com/moby/moby/pkg/discovery/generator_test.go b/vendor/github.com/moby/moby/pkg/discovery/generator_test.go new file mode 100644 index 000000000..6281c4666 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/discovery/generator_test.go @@ -0,0 +1,53 @@ +package discovery + +import ( + "github.com/go-check/check" +) + +func (s *DiscoverySuite) TestGeneratorNotGenerate(c *check.C) { + ips := Generate("127.0.0.1") + c.Assert(len(ips), check.Equals, 1) + c.Assert(ips[0], check.Equals, "127.0.0.1") +} + +func (s *DiscoverySuite) TestGeneratorWithPortNotGenerate(c *check.C) { + ips := Generate("127.0.0.1:8080") + c.Assert(len(ips), check.Equals, 1) + c.Assert(ips[0], check.Equals, "127.0.0.1:8080") +} + +func (s *DiscoverySuite) TestGeneratorMatchFailedNotGenerate(c *check.C) { + ips := Generate("127.0.0.[1]") + c.Assert(len(ips), check.Equals, 1) + c.Assert(ips[0], check.Equals, "127.0.0.[1]") +} + +func (s *DiscoverySuite) TestGeneratorWithPort(c *check.C) { + ips := Generate("127.0.0.[1:11]:2375") + c.Assert(len(ips), check.Equals, 11) + c.Assert(ips[0], check.Equals, "127.0.0.1:2375") + c.Assert(ips[1], check.Equals, "127.0.0.2:2375") + c.Assert(ips[2], check.Equals, "127.0.0.3:2375") + c.Assert(ips[3], check.Equals, "127.0.0.4:2375") + c.Assert(ips[4], check.Equals, "127.0.0.5:2375") + c.Assert(ips[5], check.Equals, "127.0.0.6:2375") + c.Assert(ips[6], check.Equals, "127.0.0.7:2375") + c.Assert(ips[7], check.Equals, "127.0.0.8:2375") + c.Assert(ips[8], check.Equals, "127.0.0.9:2375") + c.Assert(ips[9], check.Equals, "127.0.0.10:2375") + c.Assert(ips[10], check.Equals, "127.0.0.11:2375") +} + +func (s *DiscoverySuite) TestGenerateWithMalformedInputAtRangeStart(c *check.C) { + malformedInput := "127.0.0.[x:11]:2375" + ips := Generate(malformedInput) + c.Assert(len(ips), check.Equals, 1) + c.Assert(ips[0], check.Equals, malformedInput) +} + +func (s *DiscoverySuite) TestGenerateWithMalformedInputAtRangeEnd(c *check.C) { + malformedInput := "127.0.0.[1:x]:2375" + ips := Generate(malformedInput) + c.Assert(len(ips), check.Equals, 1) + c.Assert(ips[0], check.Equals, malformedInput) +} diff --git a/vendor/github.com/moby/moby/pkg/discovery/kv/kv.go b/vendor/github.com/moby/moby/pkg/discovery/kv/kv.go new file mode 100644 index 000000000..77eee7d45 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/discovery/kv/kv.go @@ -0,0 +1,192 @@ +package kv + +import ( + "fmt" + "path" + "strings" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/discovery" + "github.com/docker/go-connections/tlsconfig" + "github.com/docker/libkv" + "github.com/docker/libkv/store" + "github.com/docker/libkv/store/consul" + "github.com/docker/libkv/store/etcd" + "github.com/docker/libkv/store/zookeeper" +) + +const ( + defaultDiscoveryPath = "docker/nodes" +) + +// Discovery is exported +type Discovery struct { + backend store.Backend + store store.Store + heartbeat time.Duration + ttl time.Duration + prefix string + path string +} + +func init() { + Init() +} + +// Init is exported +func Init() { + // Register to libkv + zookeeper.Register() + consul.Register() + etcd.Register() + + // Register to internal discovery service + discovery.Register("zk", &Discovery{backend: store.ZK}) + discovery.Register("consul", &Discovery{backend: store.CONSUL}) + discovery.Register("etcd", &Discovery{backend: store.ETCD}) +} + +// Initialize is exported +func (s *Discovery) Initialize(uris string, heartbeat time.Duration, ttl time.Duration, clusterOpts map[string]string) error { + var ( + parts = strings.SplitN(uris, "/", 2) + addrs = strings.Split(parts[0], ",") + err error + ) + + // A custom prefix to the path can be optionally used. + if len(parts) == 2 { + s.prefix = parts[1] + } + + s.heartbeat = heartbeat + s.ttl = ttl + + // Use a custom path if specified in discovery options + dpath := defaultDiscoveryPath + if clusterOpts["kv.path"] != "" { + dpath = clusterOpts["kv.path"] + } + + s.path = path.Join(s.prefix, dpath) + + var config *store.Config + if clusterOpts["kv.cacertfile"] != "" && clusterOpts["kv.certfile"] != "" && clusterOpts["kv.keyfile"] != "" { + logrus.Info("Initializing discovery with TLS") + tlsConfig, err := tlsconfig.Client(tlsconfig.Options{ + CAFile: clusterOpts["kv.cacertfile"], + CertFile: clusterOpts["kv.certfile"], + KeyFile: clusterOpts["kv.keyfile"], + }) + if err != nil { + return err + } + config = &store.Config{ + // Set ClientTLS to trigger https (bug in libkv/etcd) + ClientTLS: &store.ClientTLSConfig{ + CACertFile: clusterOpts["kv.cacertfile"], + CertFile: clusterOpts["kv.certfile"], + KeyFile: clusterOpts["kv.keyfile"], + }, + // The actual TLS config that will be used + TLS: tlsConfig, + } + } else { + logrus.Info("Initializing discovery without TLS") + } + + // Creates a new store, will ignore options given + // if not supported by the chosen store + s.store, err = libkv.NewStore(s.backend, addrs, config) + return err +} + +// Watch the store until either there's a store error or we receive a stop request. +// Returns false if we shouldn't attempt watching the store anymore (stop request received). +func (s *Discovery) watchOnce(stopCh <-chan struct{}, watchCh <-chan []*store.KVPair, discoveryCh chan discovery.Entries, errCh chan error) bool { + for { + select { + case pairs := <-watchCh: + if pairs == nil { + return true + } + + logrus.WithField("discovery", s.backend).Debugf("Watch triggered with %d nodes", len(pairs)) + + // Convert `KVPair` into `discovery.Entry`. + addrs := make([]string, len(pairs)) + for _, pair := range pairs { + addrs = append(addrs, string(pair.Value)) + } + + entries, err := discovery.CreateEntries(addrs) + if err != nil { + errCh <- err + } else { + discoveryCh <- entries + } + case <-stopCh: + // We were requested to stop watching. + return false + } + } +} + +// Watch is exported +func (s *Discovery) Watch(stopCh <-chan struct{}) (<-chan discovery.Entries, <-chan error) { + ch := make(chan discovery.Entries) + errCh := make(chan error) + + go func() { + defer close(ch) + defer close(errCh) + + // Forever: Create a store watch, watch until we get an error and then try again. + // Will only stop if we receive a stopCh request. + for { + // Create the path to watch if it does not exist yet + exists, err := s.store.Exists(s.path) + if err != nil { + errCh <- err + } + if !exists { + if err := s.store.Put(s.path, []byte(""), &store.WriteOptions{IsDir: true}); err != nil { + errCh <- err + } + } + + // Set up a watch. + watchCh, err := s.store.WatchTree(s.path, stopCh) + if err != nil { + errCh <- err + } else { + if !s.watchOnce(stopCh, watchCh, ch, errCh) { + return + } + } + + // If we get here it means the store watch channel was closed. This + // is unexpected so let's retry later. + errCh <- fmt.Errorf("Unexpected watch error") + time.Sleep(s.heartbeat) + } + }() + return ch, errCh +} + +// Register is exported +func (s *Discovery) Register(addr string) error { + opts := &store.WriteOptions{TTL: s.ttl} + return s.store.Put(path.Join(s.path, addr), []byte(addr), opts) +} + +// Store returns the underlying store used by KV discovery. +func (s *Discovery) Store() store.Store { + return s.store +} + +// Prefix returns the store prefix +func (s *Discovery) Prefix() string { + return s.prefix +} diff --git a/vendor/github.com/moby/moby/pkg/discovery/kv/kv_test.go b/vendor/github.com/moby/moby/pkg/discovery/kv/kv_test.go new file mode 100644 index 000000000..dab3939dd --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/discovery/kv/kv_test.go @@ -0,0 +1,324 @@ +package kv + +import ( + "errors" + "io/ioutil" + "os" + "path" + "testing" + "time" + + "github.com/docker/docker/pkg/discovery" + "github.com/docker/libkv" + "github.com/docker/libkv/store" + + "github.com/go-check/check" +) + +// Hook up gocheck into the "go test" runner. +func Test(t *testing.T) { check.TestingT(t) } + +type DiscoverySuite struct{} + +var _ = check.Suite(&DiscoverySuite{}) + +func (ds *DiscoverySuite) TestInitialize(c *check.C) { + storeMock := &FakeStore{ + Endpoints: []string{"127.0.0.1"}, + } + d := &Discovery{backend: store.CONSUL} + d.Initialize("127.0.0.1", 0, 0, nil) + d.store = storeMock + + s := d.store.(*FakeStore) + c.Assert(s.Endpoints, check.HasLen, 1) + c.Assert(s.Endpoints[0], check.Equals, "127.0.0.1") + c.Assert(d.path, check.Equals, defaultDiscoveryPath) + + storeMock = &FakeStore{ + Endpoints: []string{"127.0.0.1:1234"}, + } + d = &Discovery{backend: store.CONSUL} + d.Initialize("127.0.0.1:1234/path", 0, 0, nil) + d.store = storeMock + + s = d.store.(*FakeStore) + c.Assert(s.Endpoints, check.HasLen, 1) + c.Assert(s.Endpoints[0], check.Equals, "127.0.0.1:1234") + c.Assert(d.path, check.Equals, "path/"+defaultDiscoveryPath) + + storeMock = &FakeStore{ + Endpoints: []string{"127.0.0.1:1234", "127.0.0.2:1234", "127.0.0.3:1234"}, + } + d = &Discovery{backend: store.CONSUL} + d.Initialize("127.0.0.1:1234,127.0.0.2:1234,127.0.0.3:1234/path", 0, 0, nil) + d.store = storeMock + + s = d.store.(*FakeStore) + c.Assert(s.Endpoints, check.HasLen, 3) + c.Assert(s.Endpoints[0], check.Equals, "127.0.0.1:1234") + c.Assert(s.Endpoints[1], check.Equals, "127.0.0.2:1234") + c.Assert(s.Endpoints[2], check.Equals, "127.0.0.3:1234") + + c.Assert(d.path, check.Equals, "path/"+defaultDiscoveryPath) +} + +// Extremely limited mock store so we can test initialization +type Mock struct { + // Endpoints passed to InitializeMock + Endpoints []string + + // Options passed to InitializeMock + Options *store.Config +} + +func NewMock(endpoints []string, options *store.Config) (store.Store, error) { + s := &Mock{} + s.Endpoints = endpoints + s.Options = options + return s, nil +} +func (s *Mock) Put(key string, value []byte, opts *store.WriteOptions) error { + return errors.New("Put not supported") +} +func (s *Mock) Get(key string) (*store.KVPair, error) { + return nil, errors.New("Get not supported") +} +func (s *Mock) Delete(key string) error { + return errors.New("Delete not supported") +} + +// Exists mock +func (s *Mock) Exists(key string) (bool, error) { + return false, errors.New("Exists not supported") +} + +// Watch mock +func (s *Mock) Watch(key string, stopCh <-chan struct{}) (<-chan *store.KVPair, error) { + return nil, errors.New("Watch not supported") +} + +// WatchTree mock +func (s *Mock) WatchTree(prefix string, stopCh <-chan struct{}) (<-chan []*store.KVPair, error) { + return nil, errors.New("WatchTree not supported") +} + +// NewLock mock +func (s *Mock) NewLock(key string, options *store.LockOptions) (store.Locker, error) { + return nil, errors.New("NewLock not supported") +} + +// List mock +func (s *Mock) List(prefix string) ([]*store.KVPair, error) { + return nil, errors.New("List not supported") +} + +// DeleteTree mock +func (s *Mock) DeleteTree(prefix string) error { + return errors.New("DeleteTree not supported") +} + +// AtomicPut mock +func (s *Mock) AtomicPut(key string, value []byte, previous *store.KVPair, opts *store.WriteOptions) (bool, *store.KVPair, error) { + return false, nil, errors.New("AtomicPut not supported") +} + +// AtomicDelete mock +func (s *Mock) AtomicDelete(key string, previous *store.KVPair) (bool, error) { + return false, errors.New("AtomicDelete not supported") +} + +// Close mock +func (s *Mock) Close() { + return +} + +func (ds *DiscoverySuite) TestInitializeWithCerts(c *check.C) { + cert := `-----BEGIN CERTIFICATE----- +MIIDCDCCAfKgAwIBAgIICifG7YeiQOEwCwYJKoZIhvcNAQELMBIxEDAOBgNVBAMT +B1Rlc3QgQ0EwHhcNMTUxMDAxMjMwMDAwWhcNMjAwOTI5MjMwMDAwWjASMRAwDgYD +VQQDEwdUZXN0IENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA1wRC +O+flnLTK5ImjTurNRHwSejuqGbc4CAvpB0hS+z0QlSs4+zE9h80aC4hz+6caRpds ++J908Q+RvAittMHbpc7VjbZP72G6fiXk7yPPl6C10HhRSoSi3nY+B7F2E8cuz14q +V2e+ejhWhSrBb/keyXpcyjoW1BOAAJ2TIclRRkICSCZrpXUyXxAvzXfpFXo1RhSb +UywN11pfiCQzDUN7sPww9UzFHuAHZHoyfTr27XnJYVUerVYrCPq8vqfn//01qz55 +Xs0hvzGdlTFXhuabFtQnKFH5SNwo/fcznhB7rePOwHojxOpXTBepUCIJLbtNnWFT +V44t9gh5IqIWtoBReQIDAQABo2YwZDAOBgNVHQ8BAf8EBAMCAAYwEgYDVR0TAQH/ +BAgwBgEB/wIBAjAdBgNVHQ4EFgQUZKUI8IIjIww7X/6hvwggQK4bD24wHwYDVR0j +BBgwFoAUZKUI8IIjIww7X/6hvwggQK4bD24wCwYJKoZIhvcNAQELA4IBAQDES2cz +7sCQfDCxCIWH7X8kpi/JWExzUyQEJ0rBzN1m3/x8ySRxtXyGekimBqQwQdFqlwMI +xzAQKkh3ue8tNSzRbwqMSyH14N1KrSxYS9e9szJHfUasoTpQGPmDmGIoRJuq1h6M +ej5x1SCJ7GWCR6xEXKUIE9OftXm9TdFzWa7Ja3OHz/mXteii8VXDuZ5ACq6EE5bY +8sP4gcICfJ5fTrpTlk9FIqEWWQrCGa5wk95PGEj+GJpNogjXQ97wVoo/Y3p1brEn +t5zjN9PAq4H1fuCMdNNA+p1DHNwd+ELTxcMAnb2ajwHvV6lKPXutrTFc4umJToBX +FpTxDmJHEV4bzUzh +-----END CERTIFICATE----- +` + key := `-----BEGIN RSA PRIVATE KEY----- +MIIEpQIBAAKCAQEA1wRCO+flnLTK5ImjTurNRHwSejuqGbc4CAvpB0hS+z0QlSs4 ++zE9h80aC4hz+6caRpds+J908Q+RvAittMHbpc7VjbZP72G6fiXk7yPPl6C10HhR +SoSi3nY+B7F2E8cuz14qV2e+ejhWhSrBb/keyXpcyjoW1BOAAJ2TIclRRkICSCZr +pXUyXxAvzXfpFXo1RhSbUywN11pfiCQzDUN7sPww9UzFHuAHZHoyfTr27XnJYVUe +rVYrCPq8vqfn//01qz55Xs0hvzGdlTFXhuabFtQnKFH5SNwo/fcznhB7rePOwHoj +xOpXTBepUCIJLbtNnWFTV44t9gh5IqIWtoBReQIDAQABAoIBAHSWipORGp/uKFXj +i/mut776x8ofsAxhnLBARQr93ID+i49W8H7EJGkOfaDjTICYC1dbpGrri61qk8sx +qX7p3v/5NzKwOIfEpirgwVIqSNYe/ncbxnhxkx6tXtUtFKmEx40JskvSpSYAhmmO +1XSx0E/PWaEN/nLgX/f1eWJIlxlQkk3QeqL+FGbCXI48DEtlJ9+MzMu4pAwZTpj5 +5qtXo5JJ0jRGfJVPAOznRsYqv864AhMdMIWguzk6EGnbaCWwPcfcn+h9a5LMdony +MDHfBS7bb5tkF3+AfnVY3IBMVx7YlsD9eAyajlgiKu4zLbwTRHjXgShy+4Oussz0 +ugNGnkECgYEA/hi+McrZC8C4gg6XqK8+9joD8tnyDZDz88BQB7CZqABUSwvjDqlP +L8hcwo/lzvjBNYGkqaFPUICGWKjeCtd8pPS2DCVXxDQX4aHF1vUur0uYNncJiV3N +XQz4Iemsa6wnKf6M67b5vMXICw7dw0HZCdIHD1hnhdtDz0uVpeevLZ8CgYEA2KCT +Y43lorjrbCgMqtlefkr3GJA9dey+hTzCiWEOOqn9RqGoEGUday0sKhiLofOgmN2B +LEukpKIey8s+Q/cb6lReajDVPDsMweX8i7hz3Wa4Ugp4Xa5BpHqu8qIAE2JUZ7bU +t88aQAYE58pUF+/Lq1QzAQdrjjzQBx6SrBxieecCgYEAvukoPZEC8mmiN1VvbTX+ +QFHmlZha3QaDxChB+QUe7bMRojEUL/fVnzkTOLuVFqSfxevaI/km9n0ac5KtAchV +xjp2bTnBb5EUQFqjopYktWA+xO07JRJtMfSEmjZPbbay1kKC7rdTfBm961EIHaRj +xZUf6M+rOE8964oGrdgdLlECgYEA046GQmx6fh7/82FtdZDRQp9tj3SWQUtSiQZc +qhO59Lq8mjUXz+MgBuJXxkiwXRpzlbaFB0Bca1fUoYw8o915SrDYf/Zu2OKGQ/qa +V81sgiVmDuEgycR7YOlbX6OsVUHrUlpwhY3hgfMe6UtkMvhBvHF/WhroBEIJm1pV +PXZ/CbMCgYEApNWVktFBjOaYfY6SNn4iSts1jgsQbbpglg3kT7PLKjCAhI6lNsbk +dyT7ut01PL6RaW4SeQWtrJIVQaM6vF3pprMKqlc5XihOGAmVqH7rQx9rtQB5TicL +BFrwkQE4HQtQBV60hYQUzzlSk44VFDz+jxIEtacRHaomDRh2FtOTz+I= +-----END RSA PRIVATE KEY----- +` + certFile, err := ioutil.TempFile("", "cert") + c.Assert(err, check.IsNil) + defer os.Remove(certFile.Name()) + certFile.Write([]byte(cert)) + certFile.Close() + keyFile, err := ioutil.TempFile("", "key") + c.Assert(err, check.IsNil) + defer os.Remove(keyFile.Name()) + keyFile.Write([]byte(key)) + keyFile.Close() + + libkv.AddStore("mock", NewMock) + d := &Discovery{backend: "mock"} + err = d.Initialize("127.0.0.3:1234", 0, 0, map[string]string{ + "kv.cacertfile": certFile.Name(), + "kv.certfile": certFile.Name(), + "kv.keyfile": keyFile.Name(), + }) + c.Assert(err, check.IsNil) + s := d.store.(*Mock) + c.Assert(s.Options.TLS, check.NotNil) + c.Assert(s.Options.TLS.RootCAs, check.NotNil) + c.Assert(s.Options.TLS.Certificates, check.HasLen, 1) +} + +func (ds *DiscoverySuite) TestWatch(c *check.C) { + mockCh := make(chan []*store.KVPair) + + storeMock := &FakeStore{ + Endpoints: []string{"127.0.0.1:1234"}, + mockKVChan: mockCh, + } + + d := &Discovery{backend: store.CONSUL} + d.Initialize("127.0.0.1:1234/path", 0, 0, nil) + d.store = storeMock + + expected := discovery.Entries{ + &discovery.Entry{Host: "1.1.1.1", Port: "1111"}, + &discovery.Entry{Host: "2.2.2.2", Port: "2222"}, + } + kvs := []*store.KVPair{ + {Key: path.Join("path", defaultDiscoveryPath, "1.1.1.1"), Value: []byte("1.1.1.1:1111")}, + {Key: path.Join("path", defaultDiscoveryPath, "2.2.2.2"), Value: []byte("2.2.2.2:2222")}, + } + + stopCh := make(chan struct{}) + ch, errCh := d.Watch(stopCh) + + // It should fire an error since the first WatchTree call failed. + c.Assert(<-errCh, check.ErrorMatches, "test error") + // We have to drain the error channel otherwise Watch will get stuck. + go func() { + for range errCh { + } + }() + + // Push the entries into the store channel and make sure discovery emits. + mockCh <- kvs + c.Assert(<-ch, check.DeepEquals, expected) + + // Add a new entry. + expected = append(expected, &discovery.Entry{Host: "3.3.3.3", Port: "3333"}) + kvs = append(kvs, &store.KVPair{Key: path.Join("path", defaultDiscoveryPath, "3.3.3.3"), Value: []byte("3.3.3.3:3333")}) + mockCh <- kvs + c.Assert(<-ch, check.DeepEquals, expected) + + close(mockCh) + // Give it enough time to call WatchTree. + time.Sleep(3 * time.Second) + + // Stop and make sure it closes all channels. + close(stopCh) + c.Assert(<-ch, check.IsNil) + c.Assert(<-errCh, check.IsNil) +} + +// FakeStore implements store.Store methods. It mocks all store +// function in a simple, naive way. +type FakeStore struct { + Endpoints []string + Options *store.Config + mockKVChan <-chan []*store.KVPair + + watchTreeCallCount int +} + +func (s *FakeStore) Put(key string, value []byte, options *store.WriteOptions) error { + return nil +} + +func (s *FakeStore) Get(key string) (*store.KVPair, error) { + return nil, nil +} + +func (s *FakeStore) Delete(key string) error { + return nil +} + +func (s *FakeStore) Exists(key string) (bool, error) { + return true, nil +} + +func (s *FakeStore) Watch(key string, stopCh <-chan struct{}) (<-chan *store.KVPair, error) { + return nil, nil +} + +// WatchTree will fail the first time, and return the mockKVchan afterwards. +// This is the behavior we need for testing.. If we need 'moar', should update this. +func (s *FakeStore) WatchTree(directory string, stopCh <-chan struct{}) (<-chan []*store.KVPair, error) { + if s.watchTreeCallCount == 0 { + s.watchTreeCallCount = 1 + return nil, errors.New("test error") + } + // First calls error + return s.mockKVChan, nil +} + +func (s *FakeStore) NewLock(key string, options *store.LockOptions) (store.Locker, error) { + return nil, nil +} + +func (s *FakeStore) List(directory string) ([]*store.KVPair, error) { + return []*store.KVPair{}, nil +} + +func (s *FakeStore) DeleteTree(directory string) error { + return nil +} + +func (s *FakeStore) AtomicPut(key string, value []byte, previous *store.KVPair, options *store.WriteOptions) (bool, *store.KVPair, error) { + return true, nil, nil +} + +func (s *FakeStore) AtomicDelete(key string, previous *store.KVPair) (bool, error) { + return true, nil +} + +func (s *FakeStore) Close() { +} diff --git a/vendor/github.com/moby/moby/pkg/discovery/memory/memory.go b/vendor/github.com/moby/moby/pkg/discovery/memory/memory.go new file mode 100644 index 000000000..ba8b1f55f --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/discovery/memory/memory.go @@ -0,0 +1,93 @@ +package memory + +import ( + "sync" + "time" + + "github.com/docker/docker/pkg/discovery" +) + +// Discovery implements a discovery backend that keeps +// data in memory. +type Discovery struct { + heartbeat time.Duration + values []string + mu sync.Mutex +} + +func init() { + Init() +} + +// Init registers the memory backend on demand. +func Init() { + discovery.Register("memory", &Discovery{}) +} + +// Initialize sets the heartbeat for the memory backend. +func (s *Discovery) Initialize(_ string, heartbeat time.Duration, _ time.Duration, _ map[string]string) error { + s.heartbeat = heartbeat + s.values = make([]string, 0) + return nil +} + +// Watch sends periodic discovery updates to a channel. +func (s *Discovery) Watch(stopCh <-chan struct{}) (<-chan discovery.Entries, <-chan error) { + ch := make(chan discovery.Entries) + errCh := make(chan error) + ticker := time.NewTicker(s.heartbeat) + + go func() { + defer close(errCh) + defer close(ch) + + // Send the initial entries if available. + var currentEntries discovery.Entries + var err error + + s.mu.Lock() + if len(s.values) > 0 { + currentEntries, err = discovery.CreateEntries(s.values) + } + s.mu.Unlock() + + if err != nil { + errCh <- err + } else if currentEntries != nil { + ch <- currentEntries + } + + // Periodically send updates. + for { + select { + case <-ticker.C: + s.mu.Lock() + newEntries, err := discovery.CreateEntries(s.values) + s.mu.Unlock() + if err != nil { + errCh <- err + continue + } + + // Check if the file has really changed. + if !newEntries.Equals(currentEntries) { + ch <- newEntries + } + currentEntries = newEntries + case <-stopCh: + ticker.Stop() + return + } + } + }() + + return ch, errCh +} + +// Register adds a new address to the discovery. +func (s *Discovery) Register(addr string) error { + s.mu.Lock() + s.values = append(s.values, addr) + s.mu.Unlock() + return nil +} diff --git a/vendor/github.com/moby/moby/pkg/discovery/memory/memory_test.go b/vendor/github.com/moby/moby/pkg/discovery/memory/memory_test.go new file mode 100644 index 000000000..c2da0a068 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/discovery/memory/memory_test.go @@ -0,0 +1,48 @@ +package memory + +import ( + "testing" + + "github.com/docker/docker/pkg/discovery" + "github.com/go-check/check" +) + +// Hook up gocheck into the "go test" runner. +func Test(t *testing.T) { check.TestingT(t) } + +type discoverySuite struct{} + +var _ = check.Suite(&discoverySuite{}) + +func (s *discoverySuite) TestWatch(c *check.C) { + d := &Discovery{} + d.Initialize("foo", 1000, 0, nil) + stopCh := make(chan struct{}) + ch, errCh := d.Watch(stopCh) + + // We have to drain the error channel otherwise Watch will get stuck. + go func() { + for range errCh { + } + }() + + expected := discovery.Entries{ + &discovery.Entry{Host: "1.1.1.1", Port: "1111"}, + } + + c.Assert(d.Register("1.1.1.1:1111"), check.IsNil) + c.Assert(<-ch, check.DeepEquals, expected) + + expected = discovery.Entries{ + &discovery.Entry{Host: "1.1.1.1", Port: "1111"}, + &discovery.Entry{Host: "2.2.2.2", Port: "2222"}, + } + + c.Assert(d.Register("2.2.2.2:2222"), check.IsNil) + c.Assert(<-ch, check.DeepEquals, expected) + + // Stop and make sure it closes all channels. + close(stopCh) + c.Assert(<-ch, check.IsNil) + c.Assert(<-errCh, check.IsNil) +} diff --git a/vendor/github.com/moby/moby/pkg/discovery/nodes/nodes.go b/vendor/github.com/moby/moby/pkg/discovery/nodes/nodes.go new file mode 100644 index 000000000..c0e3c07b2 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/discovery/nodes/nodes.go @@ -0,0 +1,54 @@ +package nodes + +import ( + "fmt" + "strings" + "time" + + "github.com/docker/docker/pkg/discovery" +) + +// Discovery is exported +type Discovery struct { + entries discovery.Entries +} + +func init() { + Init() +} + +// Init is exported +func Init() { + discovery.Register("nodes", &Discovery{}) +} + +// Initialize is exported +func (s *Discovery) Initialize(uris string, _ time.Duration, _ time.Duration, _ map[string]string) error { + for _, input := range strings.Split(uris, ",") { + for _, ip := range discovery.Generate(input) { + entry, err := discovery.NewEntry(ip) + if err != nil { + return fmt.Errorf("%s, please check you are using the correct discovery (missing token:// ?)", err.Error()) + } + s.entries = append(s.entries, entry) + } + } + + return nil +} + +// Watch is exported +func (s *Discovery) Watch(stopCh <-chan struct{}) (<-chan discovery.Entries, <-chan error) { + ch := make(chan discovery.Entries) + go func() { + defer close(ch) + ch <- s.entries + <-stopCh + }() + return ch, nil +} + +// Register is exported +func (s *Discovery) Register(addr string) error { + return discovery.ErrNotImplemented +} diff --git a/vendor/github.com/moby/moby/pkg/discovery/nodes/nodes_test.go b/vendor/github.com/moby/moby/pkg/discovery/nodes/nodes_test.go new file mode 100644 index 000000000..e26568cf5 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/discovery/nodes/nodes_test.go @@ -0,0 +1,51 @@ +package nodes + +import ( + "testing" + + "github.com/docker/docker/pkg/discovery" + + "github.com/go-check/check" +) + +// Hook up gocheck into the "go test" runner. +func Test(t *testing.T) { check.TestingT(t) } + +type DiscoverySuite struct{} + +var _ = check.Suite(&DiscoverySuite{}) + +func (s *DiscoverySuite) TestInitialize(c *check.C) { + d := &Discovery{} + d.Initialize("1.1.1.1:1111,2.2.2.2:2222", 0, 0, nil) + c.Assert(len(d.entries), check.Equals, 2) + c.Assert(d.entries[0].String(), check.Equals, "1.1.1.1:1111") + c.Assert(d.entries[1].String(), check.Equals, "2.2.2.2:2222") +} + +func (s *DiscoverySuite) TestInitializeWithPattern(c *check.C) { + d := &Discovery{} + d.Initialize("1.1.1.[1:2]:1111,2.2.2.[2:4]:2222", 0, 0, nil) + c.Assert(len(d.entries), check.Equals, 5) + c.Assert(d.entries[0].String(), check.Equals, "1.1.1.1:1111") + c.Assert(d.entries[1].String(), check.Equals, "1.1.1.2:1111") + c.Assert(d.entries[2].String(), check.Equals, "2.2.2.2:2222") + c.Assert(d.entries[3].String(), check.Equals, "2.2.2.3:2222") + c.Assert(d.entries[4].String(), check.Equals, "2.2.2.4:2222") +} + +func (s *DiscoverySuite) TestWatch(c *check.C) { + d := &Discovery{} + d.Initialize("1.1.1.1:1111,2.2.2.2:2222", 0, 0, nil) + expected := discovery.Entries{ + &discovery.Entry{Host: "1.1.1.1", Port: "1111"}, + &discovery.Entry{Host: "2.2.2.2", Port: "2222"}, + } + ch, _ := d.Watch(nil) + c.Assert(expected.Equals(<-ch), check.Equals, true) +} + +func (s *DiscoverySuite) TestRegister(c *check.C) { + d := &Discovery{} + c.Assert(d.Register("0.0.0.0"), check.NotNil) +} diff --git a/vendor/github.com/moby/moby/pkg/filenotify/filenotify.go b/vendor/github.com/moby/moby/pkg/filenotify/filenotify.go new file mode 100644 index 000000000..7a81cbda9 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/filenotify/filenotify.go @@ -0,0 +1,40 @@ +// Package filenotify provides a mechanism for watching file(s) for changes. +// Generally leans on fsnotify, but provides a poll-based notifier which fsnotify does not support. +// These are wrapped up in a common interface so that either can be used interchangeably in your code. +package filenotify + +import "github.com/fsnotify/fsnotify" + +// FileWatcher is an interface for implementing file notification watchers +type FileWatcher interface { + Events() <-chan fsnotify.Event + Errors() <-chan error + Add(name string) error + Remove(name string) error + Close() error +} + +// New tries to use an fs-event watcher, and falls back to the poller if there is an error +func New() (FileWatcher, error) { + if watcher, err := NewEventWatcher(); err == nil { + return watcher, nil + } + return NewPollingWatcher(), nil +} + +// NewPollingWatcher returns a poll-based file watcher +func NewPollingWatcher() FileWatcher { + return &filePoller{ + events: make(chan fsnotify.Event), + errors: make(chan error), + } +} + +// NewEventWatcher returns an fs-event based file watcher +func NewEventWatcher() (FileWatcher, error) { + watcher, err := fsnotify.NewWatcher() + if err != nil { + return nil, err + } + return &fsNotifyWatcher{watcher}, nil +} diff --git a/vendor/github.com/moby/moby/pkg/filenotify/fsnotify.go b/vendor/github.com/moby/moby/pkg/filenotify/fsnotify.go new file mode 100644 index 000000000..2614e05d3 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/filenotify/fsnotify.go @@ -0,0 +1,18 @@ +package filenotify + +import "github.com/fsnotify/fsnotify" + +// fsNotifyWatcher wraps the fsnotify package to satisfy the FileNotifier interface +type fsNotifyWatcher struct { + *fsnotify.Watcher +} + +// Events returns the fsnotify event channel receiver +func (w *fsNotifyWatcher) Events() <-chan fsnotify.Event { + return w.Watcher.Events +} + +// Errors returns the fsnotify error channel receiver +func (w *fsNotifyWatcher) Errors() <-chan error { + return w.Watcher.Errors +} diff --git a/vendor/github.com/moby/moby/pkg/filenotify/poller.go b/vendor/github.com/moby/moby/pkg/filenotify/poller.go new file mode 100644 index 000000000..b90111bb4 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/filenotify/poller.go @@ -0,0 +1,204 @@ +package filenotify + +import ( + "errors" + "fmt" + "os" + "sync" + "time" + + "github.com/Sirupsen/logrus" + + "github.com/fsnotify/fsnotify" +) + +var ( + // errPollerClosed is returned when the poller is closed + errPollerClosed = errors.New("poller is closed") + // errNoSuchWatch is returned when trying to remove a watch that doesn't exist + errNoSuchWatch = errors.New("watch does not exist") +) + +// watchWaitTime is the time to wait between file poll loops +const watchWaitTime = 200 * time.Millisecond + +// filePoller is used to poll files for changes, especially in cases where fsnotify +// can't be run (e.g. when inotify handles are exhausted) +// filePoller satisfies the FileWatcher interface +type filePoller struct { + // watches is the list of files currently being polled, close the associated channel to stop the watch + watches map[string]chan struct{} + // events is the channel to listen to for watch events + events chan fsnotify.Event + // errors is the channel to listen to for watch errors + errors chan error + // mu locks the poller for modification + mu sync.Mutex + // closed is used to specify when the poller has already closed + closed bool +} + +// Add adds a filename to the list of watches +// once added the file is polled for changes in a separate goroutine +func (w *filePoller) Add(name string) error { + w.mu.Lock() + defer w.mu.Unlock() + + if w.closed { + return errPollerClosed + } + + f, err := os.Open(name) + if err != nil { + return err + } + fi, err := os.Stat(name) + if err != nil { + return err + } + + if w.watches == nil { + w.watches = make(map[string]chan struct{}) + } + if _, exists := w.watches[name]; exists { + return fmt.Errorf("watch exists") + } + chClose := make(chan struct{}) + w.watches[name] = chClose + + go w.watch(f, fi, chClose) + return nil +} + +// Remove stops and removes watch with the specified name +func (w *filePoller) Remove(name string) error { + w.mu.Lock() + defer w.mu.Unlock() + return w.remove(name) +} + +func (w *filePoller) remove(name string) error { + if w.closed { + return errPollerClosed + } + + chClose, exists := w.watches[name] + if !exists { + return errNoSuchWatch + } + close(chClose) + delete(w.watches, name) + return nil +} + +// Events returns the event channel +// This is used for notifications on events about watched files +func (w *filePoller) Events() <-chan fsnotify.Event { + return w.events +} + +// Errors returns the errors channel +// This is used for notifications about errors on watched files +func (w *filePoller) Errors() <-chan error { + return w.errors +} + +// Close closes the poller +// All watches are stopped, removed, and the poller cannot be added to +func (w *filePoller) Close() error { + w.mu.Lock() + defer w.mu.Unlock() + + if w.closed { + return nil + } + + w.closed = true + for name := range w.watches { + w.remove(name) + delete(w.watches, name) + } + return nil +} + +// sendEvent publishes the specified event to the events channel +func (w *filePoller) sendEvent(e fsnotify.Event, chClose <-chan struct{}) error { + select { + case w.events <- e: + case <-chClose: + return fmt.Errorf("closed") + } + return nil +} + +// sendErr publishes the specified error to the errors channel +func (w *filePoller) sendErr(e error, chClose <-chan struct{}) error { + select { + case w.errors <- e: + case <-chClose: + return fmt.Errorf("closed") + } + return nil +} + +// watch is responsible for polling the specified file for changes +// upon finding changes to a file or errors, sendEvent/sendErr is called +func (w *filePoller) watch(f *os.File, lastFi os.FileInfo, chClose chan struct{}) { + defer f.Close() + for { + time.Sleep(watchWaitTime) + select { + case <-chClose: + logrus.Debugf("watch for %s closed", f.Name()) + return + default: + } + + fi, err := os.Stat(f.Name()) + if err != nil { + // if we got an error here and lastFi is not set, we can presume that nothing has changed + // This should be safe since before `watch()` is called, a stat is performed, there is any error `watch` is not called + if lastFi == nil { + continue + } + // If it doesn't exist at this point, it must have been removed + // no need to send the error here since this is a valid operation + if os.IsNotExist(err) { + if err := w.sendEvent(fsnotify.Event{Op: fsnotify.Remove, Name: f.Name()}, chClose); err != nil { + return + } + lastFi = nil + continue + } + // at this point, send the error + if err := w.sendErr(err, chClose); err != nil { + return + } + continue + } + + if lastFi == nil { + if err := w.sendEvent(fsnotify.Event{Op: fsnotify.Create, Name: fi.Name()}, chClose); err != nil { + return + } + lastFi = fi + continue + } + + if fi.Mode() != lastFi.Mode() { + if err := w.sendEvent(fsnotify.Event{Op: fsnotify.Chmod, Name: fi.Name()}, chClose); err != nil { + return + } + lastFi = fi + continue + } + + if fi.ModTime() != lastFi.ModTime() || fi.Size() != lastFi.Size() { + if err := w.sendEvent(fsnotify.Event{Op: fsnotify.Write, Name: fi.Name()}, chClose); err != nil { + return + } + lastFi = fi + continue + } + } +} diff --git a/vendor/github.com/moby/moby/pkg/filenotify/poller_test.go b/vendor/github.com/moby/moby/pkg/filenotify/poller_test.go new file mode 100644 index 000000000..d85420199 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/filenotify/poller_test.go @@ -0,0 +1,119 @@ +package filenotify + +import ( + "fmt" + "io/ioutil" + "os" + "runtime" + "testing" + "time" + + "github.com/fsnotify/fsnotify" +) + +func TestPollerAddRemove(t *testing.T) { + w := NewPollingWatcher() + + if err := w.Add("no-such-file"); err == nil { + t.Fatal("should have gotten error when adding a non-existent file") + } + if err := w.Remove("no-such-file"); err == nil { + t.Fatal("should have gotten error when removing non-existent watch") + } + + f, err := ioutil.TempFile("", "asdf") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(f.Name()) + + if err := w.Add(f.Name()); err != nil { + t.Fatal(err) + } + + if err := w.Remove(f.Name()); err != nil { + t.Fatal(err) + } +} + +func TestPollerEvent(t *testing.T) { + if runtime.GOOS == "windows" { + t.Skip("No chmod on Windows") + } + w := NewPollingWatcher() + + f, err := ioutil.TempFile("", "test-poller") + if err != nil { + t.Fatal("error creating temp file") + } + defer os.RemoveAll(f.Name()) + f.Close() + + if err := w.Add(f.Name()); err != nil { + t.Fatal(err) + } + + select { + case <-w.Events(): + t.Fatal("got event before anything happened") + case <-w.Errors(): + t.Fatal("got error before anything happened") + default: + } + + if err := ioutil.WriteFile(f.Name(), []byte("hello"), 0644); err != nil { + t.Fatal(err) + } + if err := assertEvent(w, fsnotify.Write); err != nil { + t.Fatal(err) + } + + if err := os.Chmod(f.Name(), 600); err != nil { + t.Fatal(err) + } + if err := assertEvent(w, fsnotify.Chmod); err != nil { + t.Fatal(err) + } + + if err := os.Remove(f.Name()); err != nil { + t.Fatal(err) + } + if err := assertEvent(w, fsnotify.Remove); err != nil { + t.Fatal(err) + } +} + +func TestPollerClose(t *testing.T) { + w := NewPollingWatcher() + if err := w.Close(); err != nil { + t.Fatal(err) + } + // test double-close + if err := w.Close(); err != nil { + t.Fatal(err) + } + + f, err := ioutil.TempFile("", "asdf") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(f.Name()) + if err := w.Add(f.Name()); err == nil { + t.Fatal("should have gotten error adding watch for closed watcher") + } +} + +func assertEvent(w FileWatcher, eType fsnotify.Op) error { + var err error + select { + case e := <-w.Events(): + if e.Op != eType { + err = fmt.Errorf("got wrong event type, expected %q: %v", eType, e.Op) + } + case e := <-w.Errors(): + err = fmt.Errorf("got unexpected error waiting for events %v: %v", eType, e) + case <-time.After(watchWaitTime * 3): + err = fmt.Errorf("timeout waiting for event %v", eType) + } + return err +} diff --git a/vendor/github.com/moby/moby/pkg/fileutils/fileutils.go b/vendor/github.com/moby/moby/pkg/fileutils/fileutils.go new file mode 100644 index 000000000..57cc08734 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/fileutils/fileutils.go @@ -0,0 +1,298 @@ +package fileutils + +import ( + "errors" + "fmt" + "io" + "os" + "path/filepath" + "regexp" + "strings" + "text/scanner" + + "github.com/Sirupsen/logrus" +) + +// PatternMatcher allows checking paths agaist a list of patterns +type PatternMatcher struct { + patterns []*Pattern + exclusions bool +} + +// NewPatternMatcher creates a new matcher object for specific patterns that can +// be used later to match against patterns against paths +func NewPatternMatcher(patterns []string) (*PatternMatcher, error) { + pm := &PatternMatcher{ + patterns: make([]*Pattern, 0, len(patterns)), + } + for _, p := range patterns { + // Eliminate leading and trailing whitespace. + p = strings.TrimSpace(p) + if p == "" { + continue + } + p = filepath.Clean(p) + newp := &Pattern{} + if p[0] == '!' { + if len(p) == 1 { + return nil, errors.New("illegal exclusion pattern: \"!\"") + } + newp.exclusion = true + p = p[1:] + pm.exclusions = true + } + // Do some syntax checking on the pattern. + // filepath's Match() has some really weird rules that are inconsistent + // so instead of trying to dup their logic, just call Match() for its + // error state and if there is an error in the pattern return it. + // If this becomes an issue we can remove this since its really only + // needed in the error (syntax) case - which isn't really critical. + if _, err := filepath.Match(p, "."); err != nil { + return nil, err + } + newp.cleanedPattern = p + newp.dirs = strings.Split(p, string(os.PathSeparator)) + pm.patterns = append(pm.patterns, newp) + } + return pm, nil +} + +// Matches matches path against all the patterns. Matches is not safe to be +// called concurrently +func (pm *PatternMatcher) Matches(file string) (bool, error) { + matched := false + file = filepath.FromSlash(file) + parentPath := filepath.Dir(file) + parentPathDirs := strings.Split(parentPath, string(os.PathSeparator)) + + for _, pattern := range pm.patterns { + negative := false + + if pattern.exclusion { + negative = true + } + + match, err := pattern.match(file) + if err != nil { + return false, err + } + + if !match && parentPath != "." { + // Check to see if the pattern matches one of our parent dirs. + if len(pattern.dirs) <= len(parentPathDirs) { + match, _ = pattern.match(strings.Join(parentPathDirs[:len(pattern.dirs)], string(os.PathSeparator))) + } + } + + if match { + matched = !negative + } + } + + if matched { + logrus.Debugf("Skipping excluded path: %s", file) + } + + return matched, nil +} + +// Exclusions returns true if any of the patterns define exclusions +func (pm *PatternMatcher) Exclusions() bool { + return pm.exclusions +} + +// Patterns returns array of active patterns +func (pm *PatternMatcher) Patterns() []*Pattern { + return pm.patterns +} + +// Pattern defines a single regexp used used to filter file paths. +type Pattern struct { + cleanedPattern string + dirs []string + regexp *regexp.Regexp + exclusion bool +} + +func (p *Pattern) String() string { + return p.cleanedPattern +} + +// Exclusion returns true if this pattern defines exclusion +func (p *Pattern) Exclusion() bool { + return p.exclusion +} + +func (p *Pattern) match(path string) (bool, error) { + + if p.regexp == nil { + if err := p.compile(); err != nil { + return false, filepath.ErrBadPattern + } + } + + b := p.regexp.MatchString(path) + + return b, nil +} + +func (p *Pattern) compile() error { + regStr := "^" + pattern := p.cleanedPattern + // Go through the pattern and convert it to a regexp. + // We use a scanner so we can support utf-8 chars. + var scan scanner.Scanner + scan.Init(strings.NewReader(pattern)) + + sl := string(os.PathSeparator) + escSL := sl + if sl == `\` { + escSL += `\` + } + + for scan.Peek() != scanner.EOF { + ch := scan.Next() + + if ch == '*' { + if scan.Peek() == '*' { + // is some flavor of "**" + scan.Next() + + // Treat **/ as ** so eat the "/" + if string(scan.Peek()) == sl { + scan.Next() + } + + if scan.Peek() == scanner.EOF { + // is "**EOF" - to align with .gitignore just accept all + regStr += ".*" + } else { + // is "**" + // Note that this allows for any # of /'s (even 0) because + // the .* will eat everything, even /'s + regStr += "(.*" + escSL + ")?" + } + } else { + // is "*" so map it to anything but "/" + regStr += "[^" + escSL + "]*" + } + } else if ch == '?' { + // "?" is any char except "/" + regStr += "[^" + escSL + "]" + } else if ch == '.' || ch == '$' { + // Escape some regexp special chars that have no meaning + // in golang's filepath.Match + regStr += `\` + string(ch) + } else if ch == '\\' { + // escape next char. Note that a trailing \ in the pattern + // will be left alone (but need to escape it) + if sl == `\` { + // On windows map "\" to "\\", meaning an escaped backslash, + // and then just continue because filepath.Match on + // Windows doesn't allow escaping at all + regStr += escSL + continue + } + if scan.Peek() != scanner.EOF { + regStr += `\` + string(scan.Next()) + } else { + regStr += `\` + } + } else { + regStr += string(ch) + } + } + + regStr += "$" + + re, err := regexp.Compile(regStr) + if err != nil { + return err + } + + p.regexp = re + return nil +} + +// Matches returns true if file matches any of the patterns +// and isn't excluded by any of the subsequent patterns. +func Matches(file string, patterns []string) (bool, error) { + pm, err := NewPatternMatcher(patterns) + if err != nil { + return false, err + } + file = filepath.Clean(file) + + if file == "." { + // Don't let them exclude everything, kind of silly. + return false, nil + } + + return pm.Matches(file) +} + +// CopyFile copies from src to dst until either EOF is reached +// on src or an error occurs. It verifies src exists and removes +// the dst if it exists. +func CopyFile(src, dst string) (int64, error) { + cleanSrc := filepath.Clean(src) + cleanDst := filepath.Clean(dst) + if cleanSrc == cleanDst { + return 0, nil + } + sf, err := os.Open(cleanSrc) + if err != nil { + return 0, err + } + defer sf.Close() + if err := os.Remove(cleanDst); err != nil && !os.IsNotExist(err) { + return 0, err + } + df, err := os.Create(cleanDst) + if err != nil { + return 0, err + } + defer df.Close() + return io.Copy(df, sf) +} + +// ReadSymlinkedDirectory returns the target directory of a symlink. +// The target of the symbolic link may not be a file. +func ReadSymlinkedDirectory(path string) (string, error) { + var realPath string + var err error + if realPath, err = filepath.Abs(path); err != nil { + return "", fmt.Errorf("unable to get absolute path for %s: %s", path, err) + } + if realPath, err = filepath.EvalSymlinks(realPath); err != nil { + return "", fmt.Errorf("failed to canonicalise path for %s: %s", path, err) + } + realPathInfo, err := os.Stat(realPath) + if err != nil { + return "", fmt.Errorf("failed to stat target '%s' of '%s': %s", realPath, path, err) + } + if !realPathInfo.Mode().IsDir() { + return "", fmt.Errorf("canonical path points to a file '%s'", realPath) + } + return realPath, nil +} + +// CreateIfNotExists creates a file or a directory only if it does not already exist. +func CreateIfNotExists(path string, isDir bool) error { + if _, err := os.Stat(path); err != nil { + if os.IsNotExist(err) { + if isDir { + return os.MkdirAll(path, 0755) + } + if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil { + return err + } + f, err := os.OpenFile(path, os.O_CREATE, 0755) + if err != nil { + return err + } + f.Close() + } + } + return nil +} diff --git a/vendor/github.com/moby/moby/pkg/fileutils/fileutils_darwin.go b/vendor/github.com/moby/moby/pkg/fileutils/fileutils_darwin.go new file mode 100644 index 000000000..ccd648fac --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/fileutils/fileutils_darwin.go @@ -0,0 +1,27 @@ +package fileutils + +import ( + "os" + "os/exec" + "strconv" + "strings" +) + +// GetTotalUsedFds returns the number of used File Descriptors by +// executing `lsof -p PID` +func GetTotalUsedFds() int { + pid := os.Getpid() + + cmd := exec.Command("lsof", "-p", strconv.Itoa(pid)) + + output, err := cmd.CombinedOutput() + if err != nil { + return -1 + } + + outputStr := strings.TrimSpace(string(output)) + + fds := strings.Split(outputStr, "\n") + + return len(fds) - 1 +} diff --git a/vendor/github.com/moby/moby/pkg/fileutils/fileutils_solaris.go b/vendor/github.com/moby/moby/pkg/fileutils/fileutils_solaris.go new file mode 100644 index 000000000..0f2cb7ab9 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/fileutils/fileutils_solaris.go @@ -0,0 +1,7 @@ +package fileutils + +// GetTotalUsedFds Returns the number of used File Descriptors. +// On Solaris these limits are per process and not systemwide +func GetTotalUsedFds() int { + return -1 +} diff --git a/vendor/github.com/moby/moby/pkg/fileutils/fileutils_test.go b/vendor/github.com/moby/moby/pkg/fileutils/fileutils_test.go new file mode 100644 index 000000000..3d61d55c3 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/fileutils/fileutils_test.go @@ -0,0 +1,591 @@ +package fileutils + +import ( + "io/ioutil" + "os" + "path" + "path/filepath" + "runtime" + "strings" + "testing" + + "fmt" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +// CopyFile with invalid src +func TestCopyFileWithInvalidSrc(t *testing.T) { + tempFolder, err := ioutil.TempDir("", "docker-fileutils-test") + defer os.RemoveAll(tempFolder) + if err != nil { + t.Fatal(err) + } + bytes, err := CopyFile("/invalid/file/path", path.Join(tempFolder, "dest")) + if err == nil { + t.Fatal("Should have fail to copy an invalid src file") + } + if bytes != 0 { + t.Fatal("Should have written 0 bytes") + } + +} + +// CopyFile with invalid dest +func TestCopyFileWithInvalidDest(t *testing.T) { + tempFolder, err := ioutil.TempDir("", "docker-fileutils-test") + defer os.RemoveAll(tempFolder) + if err != nil { + t.Fatal(err) + } + src := path.Join(tempFolder, "file") + err = ioutil.WriteFile(src, []byte("content"), 0740) + if err != nil { + t.Fatal(err) + } + bytes, err := CopyFile(src, path.Join(tempFolder, "/invalid/dest/path")) + if err == nil { + t.Fatal("Should have fail to copy an invalid src file") + } + if bytes != 0 { + t.Fatal("Should have written 0 bytes") + } + +} + +// CopyFile with same src and dest +func TestCopyFileWithSameSrcAndDest(t *testing.T) { + tempFolder, err := ioutil.TempDir("", "docker-fileutils-test") + defer os.RemoveAll(tempFolder) + if err != nil { + t.Fatal(err) + } + file := path.Join(tempFolder, "file") + err = ioutil.WriteFile(file, []byte("content"), 0740) + if err != nil { + t.Fatal(err) + } + bytes, err := CopyFile(file, file) + if err != nil { + t.Fatal(err) + } + if bytes != 0 { + t.Fatal("Should have written 0 bytes as it is the same file.") + } +} + +// CopyFile with same src and dest but path is different and not clean +func TestCopyFileWithSameSrcAndDestWithPathNameDifferent(t *testing.T) { + tempFolder, err := ioutil.TempDir("", "docker-fileutils-test") + defer os.RemoveAll(tempFolder) + if err != nil { + t.Fatal(err) + } + testFolder := path.Join(tempFolder, "test") + err = os.MkdirAll(testFolder, 0740) + if err != nil { + t.Fatal(err) + } + file := path.Join(testFolder, "file") + sameFile := testFolder + "/../test/file" + err = ioutil.WriteFile(file, []byte("content"), 0740) + if err != nil { + t.Fatal(err) + } + bytes, err := CopyFile(file, sameFile) + if err != nil { + t.Fatal(err) + } + if bytes != 0 { + t.Fatal("Should have written 0 bytes as it is the same file.") + } +} + +func TestCopyFile(t *testing.T) { + tempFolder, err := ioutil.TempDir("", "docker-fileutils-test") + defer os.RemoveAll(tempFolder) + if err != nil { + t.Fatal(err) + } + src := path.Join(tempFolder, "src") + dest := path.Join(tempFolder, "dest") + ioutil.WriteFile(src, []byte("content"), 0777) + ioutil.WriteFile(dest, []byte("destContent"), 0777) + bytes, err := CopyFile(src, dest) + if err != nil { + t.Fatal(err) + } + if bytes != 7 { + t.Fatalf("Should have written %d bytes but wrote %d", 7, bytes) + } + actual, err := ioutil.ReadFile(dest) + if err != nil { + t.Fatal(err) + } + if string(actual) != "content" { + t.Fatalf("Dest content was '%s', expected '%s'", string(actual), "content") + } +} + +// Reading a symlink to a directory must return the directory +func TestReadSymlinkedDirectoryExistingDirectory(t *testing.T) { + // TODO Windows: Port this test + if runtime.GOOS == "windows" { + t.Skip("Needs porting to Windows") + } + var err error + if err = os.Mkdir("/tmp/testReadSymlinkToExistingDirectory", 0777); err != nil { + t.Errorf("failed to create directory: %s", err) + } + + if err = os.Symlink("/tmp/testReadSymlinkToExistingDirectory", "/tmp/dirLinkTest"); err != nil { + t.Errorf("failed to create symlink: %s", err) + } + + var path string + if path, err = ReadSymlinkedDirectory("/tmp/dirLinkTest"); err != nil { + t.Fatalf("failed to read symlink to directory: %s", err) + } + + if path != "/tmp/testReadSymlinkToExistingDirectory" { + t.Fatalf("symlink returned unexpected directory: %s", path) + } + + if err = os.Remove("/tmp/testReadSymlinkToExistingDirectory"); err != nil { + t.Errorf("failed to remove temporary directory: %s", err) + } + + if err = os.Remove("/tmp/dirLinkTest"); err != nil { + t.Errorf("failed to remove symlink: %s", err) + } +} + +// Reading a non-existing symlink must fail +func TestReadSymlinkedDirectoryNonExistingSymlink(t *testing.T) { + var path string + var err error + if path, err = ReadSymlinkedDirectory("/tmp/test/foo/Non/ExistingPath"); err == nil { + t.Fatalf("error expected for non-existing symlink") + } + + if path != "" { + t.Fatalf("expected empty path, but '%s' was returned", path) + } +} + +// Reading a symlink to a file must fail +func TestReadSymlinkedDirectoryToFile(t *testing.T) { + // TODO Windows: Port this test + if runtime.GOOS == "windows" { + t.Skip("Needs porting to Windows") + } + var err error + var file *os.File + + if file, err = os.Create("/tmp/testReadSymlinkToFile"); err != nil { + t.Fatalf("failed to create file: %s", err) + } + + file.Close() + + if err = os.Symlink("/tmp/testReadSymlinkToFile", "/tmp/fileLinkTest"); err != nil { + t.Errorf("failed to create symlink: %s", err) + } + + var path string + if path, err = ReadSymlinkedDirectory("/tmp/fileLinkTest"); err == nil { + t.Fatalf("ReadSymlinkedDirectory on a symlink to a file should've failed") + } + + if path != "" { + t.Fatalf("path should've been empty: %s", path) + } + + if err = os.Remove("/tmp/testReadSymlinkToFile"); err != nil { + t.Errorf("failed to remove file: %s", err) + } + + if err = os.Remove("/tmp/fileLinkTest"); err != nil { + t.Errorf("failed to remove symlink: %s", err) + } +} + +func TestWildcardMatches(t *testing.T) { + match, _ := Matches("fileutils.go", []string{"*"}) + if !match { + t.Errorf("failed to get a wildcard match, got %v", match) + } +} + +// A simple pattern match should return true. +func TestPatternMatches(t *testing.T) { + match, _ := Matches("fileutils.go", []string{"*.go"}) + if !match { + t.Errorf("failed to get a match, got %v", match) + } +} + +// An exclusion followed by an inclusion should return true. +func TestExclusionPatternMatchesPatternBefore(t *testing.T) { + match, _ := Matches("fileutils.go", []string{"!fileutils.go", "*.go"}) + if !match { + t.Errorf("failed to get true match on exclusion pattern, got %v", match) + } +} + +// A folder pattern followed by an exception should return false. +func TestPatternMatchesFolderExclusions(t *testing.T) { + match, _ := Matches("docs/README.md", []string{"docs", "!docs/README.md"}) + if match { + t.Errorf("failed to get a false match on exclusion pattern, got %v", match) + } +} + +// A folder pattern followed by an exception should return false. +func TestPatternMatchesFolderWithSlashExclusions(t *testing.T) { + match, _ := Matches("docs/README.md", []string{"docs/", "!docs/README.md"}) + if match { + t.Errorf("failed to get a false match on exclusion pattern, got %v", match) + } +} + +// A folder pattern followed by an exception should return false. +func TestPatternMatchesFolderWildcardExclusions(t *testing.T) { + match, _ := Matches("docs/README.md", []string{"docs/*", "!docs/README.md"}) + if match { + t.Errorf("failed to get a false match on exclusion pattern, got %v", match) + } +} + +// A pattern followed by an exclusion should return false. +func TestExclusionPatternMatchesPatternAfter(t *testing.T) { + match, _ := Matches("fileutils.go", []string{"*.go", "!fileutils.go"}) + if match { + t.Errorf("failed to get false match on exclusion pattern, got %v", match) + } +} + +// A filename evaluating to . should return false. +func TestExclusionPatternMatchesWholeDirectory(t *testing.T) { + match, _ := Matches(".", []string{"*.go"}) + if match { + t.Errorf("failed to get false match on ., got %v", match) + } +} + +// A single ! pattern should return an error. +func TestSingleExclamationError(t *testing.T) { + _, err := Matches("fileutils.go", []string{"!"}) + if err == nil { + t.Errorf("failed to get an error for a single exclamation point, got %v", err) + } +} + +// Matches with no patterns +func TestMatchesWithNoPatterns(t *testing.T) { + matches, err := Matches("/any/path/there", []string{}) + if err != nil { + t.Fatal(err) + } + if matches { + t.Fatalf("Should not have match anything") + } +} + +// Matches with malformed patterns +func TestMatchesWithMalformedPatterns(t *testing.T) { + matches, err := Matches("/any/path/there", []string{"["}) + if err == nil { + t.Fatal("Should have failed because of a malformed syntax in the pattern") + } + if matches { + t.Fatalf("Should not have match anything") + } +} + +type matchesTestCase struct { + pattern string + text string + pass bool +} + +func TestMatches(t *testing.T) { + tests := []matchesTestCase{ + {"**", "file", true}, + {"**", "file/", true}, + {"**/", "file", true}, // weird one + {"**/", "file/", true}, + {"**", "/", true}, + {"**/", "/", true}, + {"**", "dir/file", true}, + {"**/", "dir/file", true}, + {"**", "dir/file/", true}, + {"**/", "dir/file/", true}, + {"**/**", "dir/file", true}, + {"**/**", "dir/file/", true}, + {"dir/**", "dir/file", true}, + {"dir/**", "dir/file/", true}, + {"dir/**", "dir/dir2/file", true}, + {"dir/**", "dir/dir2/file/", true}, + {"**/dir2/*", "dir/dir2/file", true}, + {"**/dir2/*", "dir/dir2/file/", true}, + {"**/dir2/**", "dir/dir2/dir3/file", true}, + {"**/dir2/**", "dir/dir2/dir3/file/", true}, + {"**file", "file", true}, + {"**file", "dir/file", true}, + {"**/file", "dir/file", true}, + {"**file", "dir/dir/file", true}, + {"**/file", "dir/dir/file", true}, + {"**/file*", "dir/dir/file", true}, + {"**/file*", "dir/dir/file.txt", true}, + {"**/file*txt", "dir/dir/file.txt", true}, + {"**/file*.txt", "dir/dir/file.txt", true}, + {"**/file*.txt*", "dir/dir/file.txt", true}, + {"**/**/*.txt", "dir/dir/file.txt", true}, + {"**/**/*.txt2", "dir/dir/file.txt", false}, + {"**/*.txt", "file.txt", true}, + {"**/**/*.txt", "file.txt", true}, + {"a**/*.txt", "a/file.txt", true}, + {"a**/*.txt", "a/dir/file.txt", true}, + {"a**/*.txt", "a/dir/dir/file.txt", true}, + {"a/*.txt", "a/dir/file.txt", false}, + {"a/*.txt", "a/file.txt", true}, + {"a/*.txt**", "a/file.txt", true}, + {"a[b-d]e", "ae", false}, + {"a[b-d]e", "ace", true}, + {"a[b-d]e", "aae", false}, + {"a[^b-d]e", "aze", true}, + {".*", ".foo", true}, + {".*", "foo", false}, + {"abc.def", "abcdef", false}, + {"abc.def", "abc.def", true}, + {"abc.def", "abcZdef", false}, + {"abc?def", "abcZdef", true}, + {"abc?def", "abcdef", false}, + {"a\\\\", "a\\", true}, + {"**/foo/bar", "foo/bar", true}, + {"**/foo/bar", "dir/foo/bar", true}, + {"**/foo/bar", "dir/dir2/foo/bar", true}, + {"abc/**", "abc", false}, + {"abc/**", "abc/def", true}, + {"abc/**", "abc/def/ghi", true}, + {"**/.foo", ".foo", true}, + {"**/.foo", "bar.foo", false}, + } + + if runtime.GOOS != "windows" { + tests = append(tests, []matchesTestCase{ + {"a\\*b", "a*b", true}, + {"a\\", "a", false}, + {"a\\", "a\\", false}, + }...) + } + + for _, test := range tests { + desc := fmt.Sprintf("pattern=%q text=%q", test.pattern, test.text) + pm, err := NewPatternMatcher([]string{test.pattern}) + require.NoError(t, err, desc) + res, _ := pm.Matches(test.text) + assert.Equal(t, test.pass, res, desc) + } +} + +func TestCleanPatterns(t *testing.T) { + patterns := []string{"docs", "config"} + pm, err := NewPatternMatcher(patterns) + if err != nil { + t.Fatalf("invalid pattern %v", patterns) + } + cleaned := pm.Patterns() + if len(cleaned) != 2 { + t.Errorf("expected 2 element slice, got %v", len(cleaned)) + } +} + +func TestCleanPatternsStripEmptyPatterns(t *testing.T) { + patterns := []string{"docs", "config", ""} + pm, err := NewPatternMatcher(patterns) + if err != nil { + t.Fatalf("invalid pattern %v", patterns) + } + cleaned := pm.Patterns() + if len(cleaned) != 2 { + t.Errorf("expected 2 element slice, got %v", len(cleaned)) + } +} + +func TestCleanPatternsExceptionFlag(t *testing.T) { + patterns := []string{"docs", "!docs/README.md"} + pm, err := NewPatternMatcher(patterns) + if err != nil { + t.Fatalf("invalid pattern %v", patterns) + } + if !pm.Exclusions() { + t.Errorf("expected exceptions to be true, got %v", pm.Exclusions()) + } +} + +func TestCleanPatternsLeadingSpaceTrimmed(t *testing.T) { + patterns := []string{"docs", " !docs/README.md"} + pm, err := NewPatternMatcher(patterns) + if err != nil { + t.Fatalf("invalid pattern %v", patterns) + } + if !pm.Exclusions() { + t.Errorf("expected exceptions to be true, got %v", pm.Exclusions()) + } +} + +func TestCleanPatternsTrailingSpaceTrimmed(t *testing.T) { + patterns := []string{"docs", "!docs/README.md "} + pm, err := NewPatternMatcher(patterns) + if err != nil { + t.Fatalf("invalid pattern %v", patterns) + } + if !pm.Exclusions() { + t.Errorf("expected exceptions to be true, got %v", pm.Exclusions()) + } +} + +func TestCleanPatternsErrorSingleException(t *testing.T) { + patterns := []string{"!"} + _, err := NewPatternMatcher(patterns) + if err == nil { + t.Errorf("expected error on single exclamation point, got %v", err) + } +} + +func TestCreateIfNotExistsDir(t *testing.T) { + tempFolder, err := ioutil.TempDir("", "docker-fileutils-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tempFolder) + + folderToCreate := filepath.Join(tempFolder, "tocreate") + + if err := CreateIfNotExists(folderToCreate, true); err != nil { + t.Fatal(err) + } + fileinfo, err := os.Stat(folderToCreate) + if err != nil { + t.Fatalf("Should have create a folder, got %v", err) + } + + if !fileinfo.IsDir() { + t.Fatalf("Should have been a dir, seems it's not") + } +} + +func TestCreateIfNotExistsFile(t *testing.T) { + tempFolder, err := ioutil.TempDir("", "docker-fileutils-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tempFolder) + + fileToCreate := filepath.Join(tempFolder, "file/to/create") + + if err := CreateIfNotExists(fileToCreate, false); err != nil { + t.Fatal(err) + } + fileinfo, err := os.Stat(fileToCreate) + if err != nil { + t.Fatalf("Should have create a file, got %v", err) + } + + if fileinfo.IsDir() { + t.Fatalf("Should have been a file, seems it's not") + } +} + +// These matchTests are stolen from go's filepath Match tests. +type matchTest struct { + pattern, s string + match bool + err error +} + +var matchTests = []matchTest{ + {"abc", "abc", true, nil}, + {"*", "abc", true, nil}, + {"*c", "abc", true, nil}, + {"a*", "a", true, nil}, + {"a*", "abc", true, nil}, + {"a*", "ab/c", true, nil}, + {"a*/b", "abc/b", true, nil}, + {"a*/b", "a/c/b", false, nil}, + {"a*b*c*d*e*/f", "axbxcxdxe/f", true, nil}, + {"a*b*c*d*e*/f", "axbxcxdxexxx/f", true, nil}, + {"a*b*c*d*e*/f", "axbxcxdxe/xxx/f", false, nil}, + {"a*b*c*d*e*/f", "axbxcxdxexxx/fff", false, nil}, + {"a*b?c*x", "abxbbxdbxebxczzx", true, nil}, + {"a*b?c*x", "abxbbxdbxebxczzy", false, nil}, + {"ab[c]", "abc", true, nil}, + {"ab[b-d]", "abc", true, nil}, + {"ab[e-g]", "abc", false, nil}, + {"ab[^c]", "abc", false, nil}, + {"ab[^b-d]", "abc", false, nil}, + {"ab[^e-g]", "abc", true, nil}, + {"a\\*b", "a*b", true, nil}, + {"a\\*b", "ab", false, nil}, + {"a?b", "a☺b", true, nil}, + {"a[^a]b", "a☺b", true, nil}, + {"a???b", "a☺b", false, nil}, + {"a[^a][^a][^a]b", "a☺b", false, nil}, + {"[a-ζ]*", "α", true, nil}, + {"*[a-ζ]", "A", false, nil}, + {"a?b", "a/b", false, nil}, + {"a*b", "a/b", false, nil}, + {"[\\]a]", "]", true, nil}, + {"[\\-]", "-", true, nil}, + {"[x\\-]", "x", true, nil}, + {"[x\\-]", "-", true, nil}, + {"[x\\-]", "z", false, nil}, + {"[\\-x]", "x", true, nil}, + {"[\\-x]", "-", true, nil}, + {"[\\-x]", "a", false, nil}, + {"[]a]", "]", false, filepath.ErrBadPattern}, + {"[-]", "-", false, filepath.ErrBadPattern}, + {"[x-]", "x", false, filepath.ErrBadPattern}, + {"[x-]", "-", false, filepath.ErrBadPattern}, + {"[x-]", "z", false, filepath.ErrBadPattern}, + {"[-x]", "x", false, filepath.ErrBadPattern}, + {"[-x]", "-", false, filepath.ErrBadPattern}, + {"[-x]", "a", false, filepath.ErrBadPattern}, + {"\\", "a", false, filepath.ErrBadPattern}, + {"[a-b-c]", "a", false, filepath.ErrBadPattern}, + {"[", "a", false, filepath.ErrBadPattern}, + {"[^", "a", false, filepath.ErrBadPattern}, + {"[^bc", "a", false, filepath.ErrBadPattern}, + {"a[", "a", false, filepath.ErrBadPattern}, // was nil but IMO its wrong + {"a[", "ab", false, filepath.ErrBadPattern}, + {"*x", "xxx", true, nil}, +} + +func errp(e error) string { + if e == nil { + return "" + } + return e.Error() +} + +// TestMatch test's our version of filepath.Match, called regexpMatch. +func TestMatch(t *testing.T) { + for _, tt := range matchTests { + pattern := tt.pattern + s := tt.s + if runtime.GOOS == "windows" { + if strings.Contains(pattern, "\\") { + // no escape allowed on windows. + continue + } + pattern = filepath.Clean(pattern) + s = filepath.Clean(s) + } + ok, err := Matches(s, []string{pattern}) + if ok != tt.match || err != tt.err { + t.Fatalf("Match(%#q, %#q) = %v, %q want %v, %q", pattern, s, ok, errp(err), tt.match, errp(tt.err)) + } + } +} diff --git a/vendor/github.com/moby/moby/pkg/fileutils/fileutils_unix.go b/vendor/github.com/moby/moby/pkg/fileutils/fileutils_unix.go new file mode 100644 index 000000000..d5c3abf56 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/fileutils/fileutils_unix.go @@ -0,0 +1,22 @@ +// +build linux freebsd + +package fileutils + +import ( + "fmt" + "io/ioutil" + "os" + + "github.com/Sirupsen/logrus" +) + +// GetTotalUsedFds Returns the number of used File Descriptors by +// reading it via /proc filesystem. +func GetTotalUsedFds() int { + if fds, err := ioutil.ReadDir(fmt.Sprintf("/proc/%d/fd", os.Getpid())); err != nil { + logrus.Errorf("Error opening /proc/%d/fd: %s", os.Getpid(), err) + } else { + return len(fds) + } + return -1 +} diff --git a/vendor/github.com/moby/moby/pkg/fileutils/fileutils_windows.go b/vendor/github.com/moby/moby/pkg/fileutils/fileutils_windows.go new file mode 100644 index 000000000..5ec21cace --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/fileutils/fileutils_windows.go @@ -0,0 +1,7 @@ +package fileutils + +// GetTotalUsedFds Returns the number of used File Descriptors. Not supported +// on Windows. +func GetTotalUsedFds() int { + return -1 +} diff --git a/vendor/github.com/moby/moby/pkg/fsutils/fsutils_linux.go b/vendor/github.com/moby/moby/pkg/fsutils/fsutils_linux.go new file mode 100644 index 000000000..e6094b55b --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/fsutils/fsutils_linux.go @@ -0,0 +1,88 @@ +// +build linux + +package fsutils + +import ( + "fmt" + "io/ioutil" + "os" + "unsafe" + + "golang.org/x/sys/unix" +) + +func locateDummyIfEmpty(path string) (string, error) { + children, err := ioutil.ReadDir(path) + if err != nil { + return "", err + } + if len(children) != 0 { + return "", nil + } + dummyFile, err := ioutil.TempFile(path, "fsutils-dummy") + if err != nil { + return "", err + } + name := dummyFile.Name() + err = dummyFile.Close() + return name, err +} + +// SupportsDType returns whether the filesystem mounted on path supports d_type +func SupportsDType(path string) (bool, error) { + // locate dummy so that we have at least one dirent + dummy, err := locateDummyIfEmpty(path) + if err != nil { + return false, err + } + if dummy != "" { + defer os.Remove(dummy) + } + + visited := 0 + supportsDType := true + fn := func(ent *unix.Dirent) bool { + visited++ + if ent.Type == unix.DT_UNKNOWN { + supportsDType = false + // stop iteration + return true + } + // continue iteration + return false + } + if err = iterateReadDir(path, fn); err != nil { + return false, err + } + if visited == 0 { + return false, fmt.Errorf("did not hit any dirent during iteration %s", path) + } + return supportsDType, nil +} + +func iterateReadDir(path string, fn func(*unix.Dirent) bool) error { + d, err := os.Open(path) + if err != nil { + return err + } + defer d.Close() + fd := int(d.Fd()) + buf := make([]byte, 4096) + for { + nbytes, err := unix.ReadDirent(fd, buf) + if err != nil { + return err + } + if nbytes == 0 { + break + } + for off := 0; off < nbytes; { + ent := (*unix.Dirent)(unsafe.Pointer(&buf[off])) + if stop := fn(ent); stop { + return nil + } + off += int(ent.Reclen) + } + } + return nil +} diff --git a/vendor/github.com/moby/moby/pkg/fsutils/fsutils_linux_test.go b/vendor/github.com/moby/moby/pkg/fsutils/fsutils_linux_test.go new file mode 100644 index 000000000..816752e41 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/fsutils/fsutils_linux_test.go @@ -0,0 +1,92 @@ +// +build linux + +package fsutils + +import ( + "io/ioutil" + "os" + "os/exec" + "testing" + + "golang.org/x/sys/unix" +) + +func testSupportsDType(t *testing.T, expected bool, mkfsCommand string, mkfsArg ...string) { + // check whether mkfs is installed + if _, err := exec.LookPath(mkfsCommand); err != nil { + t.Skipf("%s not installed: %v", mkfsCommand, err) + } + + // create a sparse image + imageSize := int64(32 * 1024 * 1024) + imageFile, err := ioutil.TempFile("", "fsutils-image") + if err != nil { + t.Fatal(err) + } + imageFileName := imageFile.Name() + defer os.Remove(imageFileName) + if _, err = imageFile.Seek(imageSize-1, 0); err != nil { + t.Fatal(err) + } + if _, err = imageFile.Write([]byte{0}); err != nil { + t.Fatal(err) + } + if err = imageFile.Close(); err != nil { + t.Fatal(err) + } + + // create a mountpoint + mountpoint, err := ioutil.TempDir("", "fsutils-mountpoint") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(mountpoint) + + // format the image + args := append(mkfsArg, imageFileName) + t.Logf("Executing `%s %v`", mkfsCommand, args) + out, err := exec.Command(mkfsCommand, args...).CombinedOutput() + if len(out) > 0 { + t.Log(string(out)) + } + if err != nil { + t.Fatal(err) + } + + // loopback-mount the image. + // for ease of setting up loopback device, we use os/exec rather than unix.Mount + out, err = exec.Command("mount", "-o", "loop", imageFileName, mountpoint).CombinedOutput() + if len(out) > 0 { + t.Log(string(out)) + } + if err != nil { + t.Skip("skipping the test because mount failed") + } + defer func() { + if err := unix.Unmount(mountpoint, 0); err != nil { + t.Fatal(err) + } + }() + + // check whether it supports d_type + result, err := SupportsDType(mountpoint) + if err != nil { + t.Fatal(err) + } + t.Logf("Supports d_type: %v", result) + if result != expected { + t.Fatalf("expected %v, got %v", expected, result) + } +} + +func TestSupportsDTypeWithFType0XFS(t *testing.T) { + testSupportsDType(t, false, "mkfs.xfs", "-m", "crc=0", "-n", "ftype=0") +} + +func TestSupportsDTypeWithFType1XFS(t *testing.T) { + testSupportsDType(t, true, "mkfs.xfs", "-m", "crc=0", "-n", "ftype=1") +} + +func TestSupportsDTypeWithExt4(t *testing.T) { + testSupportsDType(t, true, "mkfs.ext4") +} diff --git a/vendor/github.com/moby/moby/pkg/homedir/homedir_linux.go b/vendor/github.com/moby/moby/pkg/homedir/homedir_linux.go new file mode 100644 index 000000000..012fe52a2 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/homedir/homedir_linux.go @@ -0,0 +1,23 @@ +// +build linux + +package homedir + +import ( + "os" + + "github.com/docker/docker/pkg/idtools" +) + +// GetStatic returns the home directory for the current user without calling +// os/user.Current(). This is useful for static-linked binary on glibc-based +// system, because a call to os/user.Current() in a static binary leads to +// segfault due to a glibc issue that won't be fixed in a short term. +// (#29344, golang/go#13470, https://sourceware.org/bugzilla/show_bug.cgi?id=19341) +func GetStatic() (string, error) { + uid := os.Getuid() + usr, err := idtools.LookupUID(uid) + if err != nil { + return "", err + } + return usr.Home, nil +} diff --git a/vendor/github.com/moby/moby/pkg/homedir/homedir_others.go b/vendor/github.com/moby/moby/pkg/homedir/homedir_others.go new file mode 100644 index 000000000..6b96b856f --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/homedir/homedir_others.go @@ -0,0 +1,13 @@ +// +build !linux + +package homedir + +import ( + "errors" +) + +// GetStatic is not needed for non-linux systems. +// (Precisely, it is needed only for glibc-based linux systems.) +func GetStatic() (string, error) { + return "", errors.New("homedir.GetStatic() is not supported on this system") +} diff --git a/vendor/github.com/moby/moby/pkg/homedir/homedir_test.go b/vendor/github.com/moby/moby/pkg/homedir/homedir_test.go new file mode 100644 index 000000000..7a95cb2bd --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/homedir/homedir_test.go @@ -0,0 +1,24 @@ +package homedir + +import ( + "path/filepath" + "testing" +) + +func TestGet(t *testing.T) { + home := Get() + if home == "" { + t.Fatal("returned home directory is empty") + } + + if !filepath.IsAbs(home) { + t.Fatalf("returned path is not absolute: %s", home) + } +} + +func TestGetShortcutString(t *testing.T) { + shortcut := GetShortcutString() + if shortcut == "" { + t.Fatal("returned shortcut string is empty") + } +} diff --git a/vendor/github.com/moby/moby/pkg/homedir/homedir_unix.go b/vendor/github.com/moby/moby/pkg/homedir/homedir_unix.go new file mode 100644 index 000000000..f2a20ea8f --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/homedir/homedir_unix.go @@ -0,0 +1,34 @@ +// +build !windows + +package homedir + +import ( + "os" + + "github.com/opencontainers/runc/libcontainer/user" +) + +// Key returns the env var name for the user's home dir based on +// the platform being run on +func Key() string { + return "HOME" +} + +// Get returns the home directory of the current user with the help of +// environment variables depending on the target operating system. +// Returned path should be used with "path/filepath" to form new paths. +func Get() string { + home := os.Getenv(Key()) + if home == "" { + if u, err := user.CurrentUser(); err == nil { + return u.Home + } + } + return home +} + +// GetShortcutString returns the string that is shortcut to user's home directory +// in the native shell of the platform running on. +func GetShortcutString() string { + return "~" +} diff --git a/vendor/github.com/moby/moby/pkg/homedir/homedir_windows.go b/vendor/github.com/moby/moby/pkg/homedir/homedir_windows.go new file mode 100644 index 000000000..fafdb2bbf --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/homedir/homedir_windows.go @@ -0,0 +1,24 @@ +package homedir + +import ( + "os" +) + +// Key returns the env var name for the user's home dir based on +// the platform being run on +func Key() string { + return "USERPROFILE" +} + +// Get returns the home directory of the current user with the help of +// environment variables depending on the target operating system. +// Returned path should be used with "path/filepath" to form new paths. +func Get() string { + return os.Getenv(Key()) +} + +// GetShortcutString returns the string that is shortcut to user's home directory +// in the native shell of the platform running on. +func GetShortcutString() string { + return "%USERPROFILE%" // be careful while using in format functions +} diff --git a/vendor/github.com/moby/moby/pkg/idtools/idtools.go b/vendor/github.com/moby/moby/pkg/idtools/idtools.go new file mode 100644 index 000000000..68a072db2 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/idtools/idtools.go @@ -0,0 +1,279 @@ +package idtools + +import ( + "bufio" + "fmt" + "os" + "sort" + "strconv" + "strings" +) + +// IDMap contains a single entry for user namespace range remapping. An array +// of IDMap entries represents the structure that will be provided to the Linux +// kernel for creating a user namespace. +type IDMap struct { + ContainerID int `json:"container_id"` + HostID int `json:"host_id"` + Size int `json:"size"` +} + +type subIDRange struct { + Start int + Length int +} + +type ranges []subIDRange + +func (e ranges) Len() int { return len(e) } +func (e ranges) Swap(i, j int) { e[i], e[j] = e[j], e[i] } +func (e ranges) Less(i, j int) bool { return e[i].Start < e[j].Start } + +const ( + subuidFileName string = "/etc/subuid" + subgidFileName string = "/etc/subgid" +) + +// MkdirAllAs creates a directory (include any along the path) and then modifies +// ownership to the requested uid/gid. If the directory already exists, this +// function will still change ownership to the requested uid/gid pair. +// Deprecated: Use MkdirAllAndChown +func MkdirAllAs(path string, mode os.FileMode, ownerUID, ownerGID int) error { + return mkdirAs(path, mode, ownerUID, ownerGID, true, true) +} + +// MkdirAs creates a directory and then modifies ownership to the requested uid/gid. +// If the directory already exists, this function still changes ownership +// Deprecated: Use MkdirAndChown with a IDPair +func MkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int) error { + return mkdirAs(path, mode, ownerUID, ownerGID, false, true) +} + +// MkdirAllAndChown creates a directory (include any along the path) and then modifies +// ownership to the requested uid/gid. If the directory already exists, this +// function will still change ownership to the requested uid/gid pair. +func MkdirAllAndChown(path string, mode os.FileMode, ids IDPair) error { + return mkdirAs(path, mode, ids.UID, ids.GID, true, true) +} + +// MkdirAndChown creates a directory and then modifies ownership to the requested uid/gid. +// If the directory already exists, this function still changes ownership +func MkdirAndChown(path string, mode os.FileMode, ids IDPair) error { + return mkdirAs(path, mode, ids.UID, ids.GID, false, true) +} + +// MkdirAllAndChownNew creates a directory (include any along the path) and then modifies +// ownership ONLY of newly created directories to the requested uid/gid. If the +// directories along the path exist, no change of ownership will be performed +func MkdirAllAndChownNew(path string, mode os.FileMode, ids IDPair) error { + return mkdirAs(path, mode, ids.UID, ids.GID, true, false) +} + +// GetRootUIDGID retrieves the remapped root uid/gid pair from the set of maps. +// If the maps are empty, then the root uid/gid will default to "real" 0/0 +func GetRootUIDGID(uidMap, gidMap []IDMap) (int, int, error) { + uid, err := toHost(0, uidMap) + if err != nil { + return -1, -1, err + } + gid, err := toHost(0, gidMap) + if err != nil { + return -1, -1, err + } + return uid, gid, nil +} + +// toContainer takes an id mapping, and uses it to translate a +// host ID to the remapped ID. If no map is provided, then the translation +// assumes a 1-to-1 mapping and returns the passed in id +func toContainer(hostID int, idMap []IDMap) (int, error) { + if idMap == nil { + return hostID, nil + } + for _, m := range idMap { + if (hostID >= m.HostID) && (hostID <= (m.HostID + m.Size - 1)) { + contID := m.ContainerID + (hostID - m.HostID) + return contID, nil + } + } + return -1, fmt.Errorf("Host ID %d cannot be mapped to a container ID", hostID) +} + +// toHost takes an id mapping and a remapped ID, and translates the +// ID to the mapped host ID. If no map is provided, then the translation +// assumes a 1-to-1 mapping and returns the passed in id # +func toHost(contID int, idMap []IDMap) (int, error) { + if idMap == nil { + return contID, nil + } + for _, m := range idMap { + if (contID >= m.ContainerID) && (contID <= (m.ContainerID + m.Size - 1)) { + hostID := m.HostID + (contID - m.ContainerID) + return hostID, nil + } + } + return -1, fmt.Errorf("Container ID %d cannot be mapped to a host ID", contID) +} + +// IDPair is a UID and GID pair +type IDPair struct { + UID int + GID int +} + +// IDMappings contains a mappings of UIDs and GIDs +type IDMappings struct { + uids []IDMap + gids []IDMap +} + +// NewIDMappings takes a requested user and group name and +// using the data from /etc/sub{uid,gid} ranges, creates the +// proper uid and gid remapping ranges for that user/group pair +func NewIDMappings(username, groupname string) (*IDMappings, error) { + subuidRanges, err := parseSubuid(username) + if err != nil { + return nil, err + } + subgidRanges, err := parseSubgid(groupname) + if err != nil { + return nil, err + } + if len(subuidRanges) == 0 { + return nil, fmt.Errorf("No subuid ranges found for user %q", username) + } + if len(subgidRanges) == 0 { + return nil, fmt.Errorf("No subgid ranges found for group %q", groupname) + } + + return &IDMappings{ + uids: createIDMap(subuidRanges), + gids: createIDMap(subgidRanges), + }, nil +} + +// NewIDMappingsFromMaps creates a new mapping from two slices +// Deprecated: this is a temporary shim while transitioning to IDMapping +func NewIDMappingsFromMaps(uids []IDMap, gids []IDMap) *IDMappings { + return &IDMappings{uids: uids, gids: gids} +} + +// RootPair returns a uid and gid pair for the root user. The error is ignored +// because a root user always exists, and the defaults are correct when the uid +// and gid maps are empty. +func (i *IDMappings) RootPair() IDPair { + uid, gid, _ := GetRootUIDGID(i.uids, i.gids) + return IDPair{UID: uid, GID: gid} +} + +// ToHost returns the host UID and GID for the container uid, gid. +// Remapping is only performed if the ids aren't already the remapped root ids +func (i *IDMappings) ToHost(pair IDPair) (IDPair, error) { + var err error + target := i.RootPair() + + if pair.UID != target.UID { + target.UID, err = toHost(pair.UID, i.uids) + if err != nil { + return target, err + } + } + + if pair.GID != target.GID { + target.GID, err = toHost(pair.GID, i.gids) + } + return target, err +} + +// ToContainer returns the container UID and GID for the host uid and gid +func (i *IDMappings) ToContainer(pair IDPair) (int, int, error) { + uid, err := toContainer(pair.UID, i.uids) + if err != nil { + return -1, -1, err + } + gid, err := toContainer(pair.GID, i.gids) + return uid, gid, err +} + +// Empty returns true if there are no id mappings +func (i *IDMappings) Empty() bool { + return len(i.uids) == 0 && len(i.gids) == 0 +} + +// UIDs return the UID mapping +// TODO: remove this once everything has been refactored to use pairs +func (i *IDMappings) UIDs() []IDMap { + return i.uids +} + +// GIDs return the UID mapping +// TODO: remove this once everything has been refactored to use pairs +func (i *IDMappings) GIDs() []IDMap { + return i.gids +} + +func createIDMap(subidRanges ranges) []IDMap { + idMap := []IDMap{} + + // sort the ranges by lowest ID first + sort.Sort(subidRanges) + containerID := 0 + for _, idrange := range subidRanges { + idMap = append(idMap, IDMap{ + ContainerID: containerID, + HostID: idrange.Start, + Size: idrange.Length, + }) + containerID = containerID + idrange.Length + } + return idMap +} + +func parseSubuid(username string) (ranges, error) { + return parseSubidFile(subuidFileName, username) +} + +func parseSubgid(username string) (ranges, error) { + return parseSubidFile(subgidFileName, username) +} + +// parseSubidFile will read the appropriate file (/etc/subuid or /etc/subgid) +// and return all found ranges for a specified username. If the special value +// "ALL" is supplied for username, then all ranges in the file will be returned +func parseSubidFile(path, username string) (ranges, error) { + var rangeList ranges + + subidFile, err := os.Open(path) + if err != nil { + return rangeList, err + } + defer subidFile.Close() + + s := bufio.NewScanner(subidFile) + for s.Scan() { + if err := s.Err(); err != nil { + return rangeList, err + } + + text := strings.TrimSpace(s.Text()) + if text == "" || strings.HasPrefix(text, "#") { + continue + } + parts := strings.Split(text, ":") + if len(parts) != 3 { + return rangeList, fmt.Errorf("Cannot parse subuid/gid information: Format not correct for %s file", path) + } + if parts[0] == username || username == "ALL" { + startid, err := strconv.Atoi(parts[1]) + if err != nil { + return rangeList, fmt.Errorf("String to int conversion failed during subuid/gid parsing of %s: %v", path, err) + } + length, err := strconv.Atoi(parts[2]) + if err != nil { + return rangeList, fmt.Errorf("String to int conversion failed during subuid/gid parsing of %s: %v", path, err) + } + rangeList = append(rangeList, subIDRange{startid, length}) + } + } + return rangeList, nil +} diff --git a/vendor/github.com/moby/moby/pkg/idtools/idtools_unix.go b/vendor/github.com/moby/moby/pkg/idtools/idtools_unix.go new file mode 100644 index 000000000..8701bb7fa --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/idtools/idtools_unix.go @@ -0,0 +1,204 @@ +// +build !windows + +package idtools + +import ( + "bytes" + "fmt" + "io" + "os" + "path/filepath" + "strings" + "sync" + + "github.com/docker/docker/pkg/system" + "github.com/opencontainers/runc/libcontainer/user" +) + +var ( + entOnce sync.Once + getentCmd string +) + +func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chownExisting bool) error { + // make an array containing the original path asked for, plus (for mkAll == true) + // all path components leading up to the complete path that don't exist before we MkdirAll + // so that we can chown all of them properly at the end. If chownExisting is false, we won't + // chown the full directory path if it exists + var paths []string + if _, err := os.Stat(path); err != nil && os.IsNotExist(err) { + paths = []string{path} + } else if err == nil && chownExisting { + // short-circuit--we were called with an existing directory and chown was requested + return os.Chown(path, ownerUID, ownerGID) + } else if err == nil { + // nothing to do; directory path fully exists already and chown was NOT requested + return nil + } + + if mkAll { + // walk back to "/" looking for directories which do not exist + // and add them to the paths array for chown after creation + dirPath := path + for { + dirPath = filepath.Dir(dirPath) + if dirPath == "/" { + break + } + if _, err := os.Stat(dirPath); err != nil && os.IsNotExist(err) { + paths = append(paths, dirPath) + } + } + if err := system.MkdirAll(path, mode, ""); err != nil && !os.IsExist(err) { + return err + } + } else { + if err := os.Mkdir(path, mode); err != nil && !os.IsExist(err) { + return err + } + } + // even if it existed, we will chown the requested path + any subpaths that + // didn't exist when we called MkdirAll + for _, pathComponent := range paths { + if err := os.Chown(pathComponent, ownerUID, ownerGID); err != nil { + return err + } + } + return nil +} + +// CanAccess takes a valid (existing) directory and a uid, gid pair and determines +// if that uid, gid pair has access (execute bit) to the directory +func CanAccess(path string, pair IDPair) bool { + statInfo, err := system.Stat(path) + if err != nil { + return false + } + fileMode := os.FileMode(statInfo.Mode()) + permBits := fileMode.Perm() + return accessible(statInfo.UID() == uint32(pair.UID), + statInfo.GID() == uint32(pair.GID), permBits) +} + +func accessible(isOwner, isGroup bool, perms os.FileMode) bool { + if isOwner && (perms&0100 == 0100) { + return true + } + if isGroup && (perms&0010 == 0010) { + return true + } + if perms&0001 == 0001 { + return true + } + return false +} + +// LookupUser uses traditional local system files lookup (from libcontainer/user) on a username, +// followed by a call to `getent` for supporting host configured non-files passwd and group dbs +func LookupUser(username string) (user.User, error) { + // first try a local system files lookup using existing capabilities + usr, err := user.LookupUser(username) + if err == nil { + return usr, nil + } + // local files lookup failed; attempt to call `getent` to query configured passwd dbs + usr, err = getentUser(fmt.Sprintf("%s %s", "passwd", username)) + if err != nil { + return user.User{}, err + } + return usr, nil +} + +// LookupUID uses traditional local system files lookup (from libcontainer/user) on a uid, +// followed by a call to `getent` for supporting host configured non-files passwd and group dbs +func LookupUID(uid int) (user.User, error) { + // first try a local system files lookup using existing capabilities + usr, err := user.LookupUid(uid) + if err == nil { + return usr, nil + } + // local files lookup failed; attempt to call `getent` to query configured passwd dbs + return getentUser(fmt.Sprintf("%s %d", "passwd", uid)) +} + +func getentUser(args string) (user.User, error) { + reader, err := callGetent(args) + if err != nil { + return user.User{}, err + } + users, err := user.ParsePasswd(reader) + if err != nil { + return user.User{}, err + } + if len(users) == 0 { + return user.User{}, fmt.Errorf("getent failed to find passwd entry for %q", strings.Split(args, " ")[1]) + } + return users[0], nil +} + +// LookupGroup uses traditional local system files lookup (from libcontainer/user) on a group name, +// followed by a call to `getent` for supporting host configured non-files passwd and group dbs +func LookupGroup(groupname string) (user.Group, error) { + // first try a local system files lookup using existing capabilities + group, err := user.LookupGroup(groupname) + if err == nil { + return group, nil + } + // local files lookup failed; attempt to call `getent` to query configured group dbs + return getentGroup(fmt.Sprintf("%s %s", "group", groupname)) +} + +// LookupGID uses traditional local system files lookup (from libcontainer/user) on a group ID, +// followed by a call to `getent` for supporting host configured non-files passwd and group dbs +func LookupGID(gid int) (user.Group, error) { + // first try a local system files lookup using existing capabilities + group, err := user.LookupGid(gid) + if err == nil { + return group, nil + } + // local files lookup failed; attempt to call `getent` to query configured group dbs + return getentGroup(fmt.Sprintf("%s %d", "group", gid)) +} + +func getentGroup(args string) (user.Group, error) { + reader, err := callGetent(args) + if err != nil { + return user.Group{}, err + } + groups, err := user.ParseGroup(reader) + if err != nil { + return user.Group{}, err + } + if len(groups) == 0 { + return user.Group{}, fmt.Errorf("getent failed to find groups entry for %q", strings.Split(args, " ")[1]) + } + return groups[0], nil +} + +func callGetent(args string) (io.Reader, error) { + entOnce.Do(func() { getentCmd, _ = resolveBinary("getent") }) + // if no `getent` command on host, can't do anything else + if getentCmd == "" { + return nil, fmt.Errorf("") + } + out, err := execCmd(getentCmd, args) + if err != nil { + exitCode, errC := system.GetExitCode(err) + if errC != nil { + return nil, err + } + switch exitCode { + case 1: + return nil, fmt.Errorf("getent reported invalid parameters/database unknown") + case 2: + terms := strings.Split(args, " ") + return nil, fmt.Errorf("getent unable to find entry %q in %s database", terms[1], terms[0]) + case 3: + return nil, fmt.Errorf("getent database doesn't support enumeration") + default: + return nil, err + } + + } + return bytes.NewReader(out), nil +} diff --git a/vendor/github.com/moby/moby/pkg/idtools/idtools_unix_test.go b/vendor/github.com/moby/moby/pkg/idtools/idtools_unix_test.go new file mode 100644 index 000000000..31522a547 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/idtools/idtools_unix_test.go @@ -0,0 +1,253 @@ +// +build !windows + +package idtools + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "syscall" + "testing" + + "github.com/stretchr/testify/require" +) + +type node struct { + uid int + gid int +} + +func TestMkdirAllAs(t *testing.T) { + dirName, err := ioutil.TempDir("", "mkdirall") + if err != nil { + t.Fatalf("Couldn't create temp dir: %v", err) + } + defer os.RemoveAll(dirName) + + testTree := map[string]node{ + "usr": {0, 0}, + "usr/bin": {0, 0}, + "lib": {33, 33}, + "lib/x86_64": {45, 45}, + "lib/x86_64/share": {1, 1}, + } + + if err := buildTree(dirName, testTree); err != nil { + t.Fatal(err) + } + + // test adding a directory to a pre-existing dir; only the new dir is owned by the uid/gid + if err := MkdirAllAs(filepath.Join(dirName, "usr", "share"), 0755, 99, 99); err != nil { + t.Fatal(err) + } + testTree["usr/share"] = node{99, 99} + verifyTree, err := readTree(dirName, "") + if err != nil { + t.Fatal(err) + } + if err := compareTrees(testTree, verifyTree); err != nil { + t.Fatal(err) + } + + // test 2-deep new directories--both should be owned by the uid/gid pair + if err := MkdirAllAs(filepath.Join(dirName, "lib", "some", "other"), 0755, 101, 101); err != nil { + t.Fatal(err) + } + testTree["lib/some"] = node{101, 101} + testTree["lib/some/other"] = node{101, 101} + verifyTree, err = readTree(dirName, "") + if err != nil { + t.Fatal(err) + } + if err := compareTrees(testTree, verifyTree); err != nil { + t.Fatal(err) + } + + // test a directory that already exists; should be chowned, but nothing else + if err := MkdirAllAs(filepath.Join(dirName, "usr"), 0755, 102, 102); err != nil { + t.Fatal(err) + } + testTree["usr"] = node{102, 102} + verifyTree, err = readTree(dirName, "") + if err != nil { + t.Fatal(err) + } + if err := compareTrees(testTree, verifyTree); err != nil { + t.Fatal(err) + } +} + +func TestMkdirAllAndChownNew(t *testing.T) { + dirName, err := ioutil.TempDir("", "mkdirnew") + require.NoError(t, err) + defer os.RemoveAll(dirName) + + testTree := map[string]node{ + "usr": {0, 0}, + "usr/bin": {0, 0}, + "lib": {33, 33}, + "lib/x86_64": {45, 45}, + "lib/x86_64/share": {1, 1}, + } + require.NoError(t, buildTree(dirName, testTree)) + + // test adding a directory to a pre-existing dir; only the new dir is owned by the uid/gid + err = MkdirAllAndChownNew(filepath.Join(dirName, "usr", "share"), 0755, IDPair{99, 99}) + require.NoError(t, err) + + testTree["usr/share"] = node{99, 99} + verifyTree, err := readTree(dirName, "") + require.NoError(t, err) + require.NoError(t, compareTrees(testTree, verifyTree)) + + // test 2-deep new directories--both should be owned by the uid/gid pair + err = MkdirAllAndChownNew(filepath.Join(dirName, "lib", "some", "other"), 0755, IDPair{101, 101}) + require.NoError(t, err) + testTree["lib/some"] = node{101, 101} + testTree["lib/some/other"] = node{101, 101} + verifyTree, err = readTree(dirName, "") + require.NoError(t, err) + require.NoError(t, compareTrees(testTree, verifyTree)) + + // test a directory that already exists; should NOT be chowned + err = MkdirAllAndChownNew(filepath.Join(dirName, "usr"), 0755, IDPair{102, 102}) + require.NoError(t, err) + verifyTree, err = readTree(dirName, "") + require.NoError(t, err) + require.NoError(t, compareTrees(testTree, verifyTree)) +} + +func TestMkdirAs(t *testing.T) { + + dirName, err := ioutil.TempDir("", "mkdir") + if err != nil { + t.Fatalf("Couldn't create temp dir: %v", err) + } + defer os.RemoveAll(dirName) + + testTree := map[string]node{ + "usr": {0, 0}, + } + if err := buildTree(dirName, testTree); err != nil { + t.Fatal(err) + } + + // test a directory that already exists; should just chown to the requested uid/gid + if err := MkdirAs(filepath.Join(dirName, "usr"), 0755, 99, 99); err != nil { + t.Fatal(err) + } + testTree["usr"] = node{99, 99} + verifyTree, err := readTree(dirName, "") + if err != nil { + t.Fatal(err) + } + if err := compareTrees(testTree, verifyTree); err != nil { + t.Fatal(err) + } + + // create a subdir under a dir which doesn't exist--should fail + if err := MkdirAs(filepath.Join(dirName, "usr", "bin", "subdir"), 0755, 102, 102); err == nil { + t.Fatalf("Trying to create a directory with Mkdir where the parent doesn't exist should have failed") + } + + // create a subdir under an existing dir; should only change the ownership of the new subdir + if err := MkdirAs(filepath.Join(dirName, "usr", "bin"), 0755, 102, 102); err != nil { + t.Fatal(err) + } + testTree["usr/bin"] = node{102, 102} + verifyTree, err = readTree(dirName, "") + if err != nil { + t.Fatal(err) + } + if err := compareTrees(testTree, verifyTree); err != nil { + t.Fatal(err) + } +} + +func buildTree(base string, tree map[string]node) error { + for path, node := range tree { + fullPath := filepath.Join(base, path) + if err := os.MkdirAll(fullPath, 0755); err != nil { + return fmt.Errorf("Couldn't create path: %s; error: %v", fullPath, err) + } + if err := os.Chown(fullPath, node.uid, node.gid); err != nil { + return fmt.Errorf("Couldn't chown path: %s; error: %v", fullPath, err) + } + } + return nil +} + +func readTree(base, root string) (map[string]node, error) { + tree := make(map[string]node) + + dirInfos, err := ioutil.ReadDir(base) + if err != nil { + return nil, fmt.Errorf("Couldn't read directory entries for %q: %v", base, err) + } + + for _, info := range dirInfos { + s := &syscall.Stat_t{} + if err := syscall.Stat(filepath.Join(base, info.Name()), s); err != nil { + return nil, fmt.Errorf("Can't stat file %q: %v", filepath.Join(base, info.Name()), err) + } + tree[filepath.Join(root, info.Name())] = node{int(s.Uid), int(s.Gid)} + if info.IsDir() { + // read the subdirectory + subtree, err := readTree(filepath.Join(base, info.Name()), filepath.Join(root, info.Name())) + if err != nil { + return nil, err + } + for path, nodeinfo := range subtree { + tree[path] = nodeinfo + } + } + } + return tree, nil +} + +func compareTrees(left, right map[string]node) error { + if len(left) != len(right) { + return fmt.Errorf("Trees aren't the same size") + } + for path, nodeLeft := range left { + if nodeRight, ok := right[path]; ok { + if nodeRight.uid != nodeLeft.uid || nodeRight.gid != nodeLeft.gid { + // mismatch + return fmt.Errorf("mismatched ownership for %q: expected: %d:%d, got: %d:%d", path, + nodeLeft.uid, nodeLeft.gid, nodeRight.uid, nodeRight.gid) + } + continue + } + return fmt.Errorf("right tree didn't contain path %q", path) + } + return nil +} + +func TestParseSubidFileWithNewlinesAndComments(t *testing.T) { + tmpDir, err := ioutil.TempDir("", "parsesubid") + if err != nil { + t.Fatal(err) + } + fnamePath := filepath.Join(tmpDir, "testsubuid") + fcontent := `tss:100000:65536 +# empty default subuid/subgid file + +dockremap:231072:65536` + if err := ioutil.WriteFile(fnamePath, []byte(fcontent), 0644); err != nil { + t.Fatal(err) + } + ranges, err := parseSubidFile(fnamePath, "dockremap") + if err != nil { + t.Fatal(err) + } + if len(ranges) != 1 { + t.Fatalf("wanted 1 element in ranges, got %d instead", len(ranges)) + } + if ranges[0].Start != 231072 { + t.Fatalf("wanted 231072, got %d instead", ranges[0].Start) + } + if ranges[0].Length != 65536 { + t.Fatalf("wanted 65536, got %d instead", ranges[0].Length) + } +} diff --git a/vendor/github.com/moby/moby/pkg/idtools/idtools_windows.go b/vendor/github.com/moby/moby/pkg/idtools/idtools_windows.go new file mode 100644 index 000000000..45d2878e3 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/idtools/idtools_windows.go @@ -0,0 +1,25 @@ +// +build windows + +package idtools + +import ( + "os" + + "github.com/docker/docker/pkg/system" +) + +// Platforms such as Windows do not support the UID/GID concept. So make this +// just a wrapper around system.MkdirAll. +func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chownExisting bool) error { + if err := system.MkdirAll(path, mode, ""); err != nil && !os.IsExist(err) { + return err + } + return nil +} + +// CanAccess takes a valid (existing) directory and a uid, gid pair and determines +// if that uid, gid pair has access (execute bit) to the directory +// Windows does not require/support this function, so always return true +func CanAccess(path string, pair IDPair) bool { + return true +} diff --git a/vendor/github.com/moby/moby/pkg/idtools/usergroupadd_linux.go b/vendor/github.com/moby/moby/pkg/idtools/usergroupadd_linux.go new file mode 100644 index 000000000..9da7975e2 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/idtools/usergroupadd_linux.go @@ -0,0 +1,164 @@ +package idtools + +import ( + "fmt" + "regexp" + "sort" + "strconv" + "strings" + "sync" +) + +// add a user and/or group to Linux /etc/passwd, /etc/group using standard +// Linux distribution commands: +// adduser --system --shell /bin/false --disabled-login --disabled-password --no-create-home --group +// useradd -r -s /bin/false + +var ( + once sync.Once + userCommand string + + cmdTemplates = map[string]string{ + "adduser": "--system --shell /bin/false --no-create-home --disabled-login --disabled-password --group %s", + "useradd": "-r -s /bin/false %s", + "usermod": "-%s %d-%d %s", + } + + idOutRegexp = regexp.MustCompile(`uid=([0-9]+).*gid=([0-9]+)`) + // default length for a UID/GID subordinate range + defaultRangeLen = 65536 + defaultRangeStart = 100000 + userMod = "usermod" +) + +// AddNamespaceRangesUser takes a username and uses the standard system +// utility to create a system user/group pair used to hold the +// /etc/sub{uid,gid} ranges which will be used for user namespace +// mapping ranges in containers. +func AddNamespaceRangesUser(name string) (int, int, error) { + if err := addUser(name); err != nil { + return -1, -1, fmt.Errorf("Error adding user %q: %v", name, err) + } + + // Query the system for the created uid and gid pair + out, err := execCmd("id", name) + if err != nil { + return -1, -1, fmt.Errorf("Error trying to find uid/gid for new user %q: %v", name, err) + } + matches := idOutRegexp.FindStringSubmatch(strings.TrimSpace(string(out))) + if len(matches) != 3 { + return -1, -1, fmt.Errorf("Can't find uid, gid from `id` output: %q", string(out)) + } + uid, err := strconv.Atoi(matches[1]) + if err != nil { + return -1, -1, fmt.Errorf("Can't convert found uid (%s) to int: %v", matches[1], err) + } + gid, err := strconv.Atoi(matches[2]) + if err != nil { + return -1, -1, fmt.Errorf("Can't convert found gid (%s) to int: %v", matches[2], err) + } + + // Now we need to create the subuid/subgid ranges for our new user/group (system users + // do not get auto-created ranges in subuid/subgid) + + if err := createSubordinateRanges(name); err != nil { + return -1, -1, fmt.Errorf("Couldn't create subordinate ID ranges: %v", err) + } + return uid, gid, nil +} + +func addUser(userName string) error { + once.Do(func() { + // set up which commands are used for adding users/groups dependent on distro + if _, err := resolveBinary("adduser"); err == nil { + userCommand = "adduser" + } else if _, err := resolveBinary("useradd"); err == nil { + userCommand = "useradd" + } + }) + if userCommand == "" { + return fmt.Errorf("Cannot add user; no useradd/adduser binary found") + } + args := fmt.Sprintf(cmdTemplates[userCommand], userName) + out, err := execCmd(userCommand, args) + if err != nil { + return fmt.Errorf("Failed to add user with error: %v; output: %q", err, string(out)) + } + return nil +} + +func createSubordinateRanges(name string) error { + + // first, we should verify that ranges weren't automatically created + // by the distro tooling + ranges, err := parseSubuid(name) + if err != nil { + return fmt.Errorf("Error while looking for subuid ranges for user %q: %v", name, err) + } + if len(ranges) == 0 { + // no UID ranges; let's create one + startID, err := findNextUIDRange() + if err != nil { + return fmt.Errorf("Can't find available subuid range: %v", err) + } + out, err := execCmd(userMod, fmt.Sprintf(cmdTemplates[userMod], "v", startID, startID+defaultRangeLen-1, name)) + if err != nil { + return fmt.Errorf("Unable to add subuid range to user: %q; output: %s, err: %v", name, out, err) + } + } + + ranges, err = parseSubgid(name) + if err != nil { + return fmt.Errorf("Error while looking for subgid ranges for user %q: %v", name, err) + } + if len(ranges) == 0 { + // no GID ranges; let's create one + startID, err := findNextGIDRange() + if err != nil { + return fmt.Errorf("Can't find available subgid range: %v", err) + } + out, err := execCmd(userMod, fmt.Sprintf(cmdTemplates[userMod], "w", startID, startID+defaultRangeLen-1, name)) + if err != nil { + return fmt.Errorf("Unable to add subgid range to user: %q; output: %s, err: %v", name, out, err) + } + } + return nil +} + +func findNextUIDRange() (int, error) { + ranges, err := parseSubuid("ALL") + if err != nil { + return -1, fmt.Errorf("Couldn't parse all ranges in /etc/subuid file: %v", err) + } + sort.Sort(ranges) + return findNextRangeStart(ranges) +} + +func findNextGIDRange() (int, error) { + ranges, err := parseSubgid("ALL") + if err != nil { + return -1, fmt.Errorf("Couldn't parse all ranges in /etc/subgid file: %v", err) + } + sort.Sort(ranges) + return findNextRangeStart(ranges) +} + +func findNextRangeStart(rangeList ranges) (int, error) { + startID := defaultRangeStart + for _, arange := range rangeList { + if wouldOverlap(arange, startID) { + startID = arange.Start + arange.Length + } + } + return startID, nil +} + +func wouldOverlap(arange subIDRange, ID int) bool { + low := ID + high := ID + defaultRangeLen + if (low >= arange.Start && low <= arange.Start+arange.Length) || + (high <= arange.Start+arange.Length && high >= arange.Start) { + return true + } + return false +} diff --git a/vendor/github.com/moby/moby/pkg/idtools/usergroupadd_unsupported.go b/vendor/github.com/moby/moby/pkg/idtools/usergroupadd_unsupported.go new file mode 100644 index 000000000..d98b354cb --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/idtools/usergroupadd_unsupported.go @@ -0,0 +1,12 @@ +// +build !linux + +package idtools + +import "fmt" + +// AddNamespaceRangesUser takes a name and finds an unused uid, gid pair +// and calls the appropriate helper function to add the group and then +// the user to the group in /etc/group and /etc/passwd respectively. +func AddNamespaceRangesUser(name string) (int, int, error) { + return -1, -1, fmt.Errorf("No support for adding users or groups on this OS") +} diff --git a/vendor/github.com/moby/moby/pkg/idtools/utils_unix.go b/vendor/github.com/moby/moby/pkg/idtools/utils_unix.go new file mode 100644 index 000000000..9703ecbd9 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/idtools/utils_unix.go @@ -0,0 +1,32 @@ +// +build !windows + +package idtools + +import ( + "fmt" + "os/exec" + "path/filepath" + "strings" +) + +func resolveBinary(binname string) (string, error) { + binaryPath, err := exec.LookPath(binname) + if err != nil { + return "", err + } + resolvedPath, err := filepath.EvalSymlinks(binaryPath) + if err != nil { + return "", err + } + //only return no error if the final resolved binary basename + //matches what was searched for + if filepath.Base(resolvedPath) == binname { + return resolvedPath, nil + } + return "", fmt.Errorf("Binary %q does not resolve to a binary of that name in $PATH (%q)", binname, resolvedPath) +} + +func execCmd(cmd, args string) ([]byte, error) { + execCmd := exec.Command(cmd, strings.Split(args, " ")...) + return execCmd.CombinedOutput() +} diff --git a/vendor/github.com/moby/moby/pkg/ioutils/buffer.go b/vendor/github.com/moby/moby/pkg/ioutils/buffer.go new file mode 100644 index 000000000..3d737b3e1 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/ioutils/buffer.go @@ -0,0 +1,51 @@ +package ioutils + +import ( + "errors" + "io" +) + +var errBufferFull = errors.New("buffer is full") + +type fixedBuffer struct { + buf []byte + pos int + lastRead int +} + +func (b *fixedBuffer) Write(p []byte) (int, error) { + n := copy(b.buf[b.pos:cap(b.buf)], p) + b.pos += n + + if n < len(p) { + if b.pos == cap(b.buf) { + return n, errBufferFull + } + return n, io.ErrShortWrite + } + return n, nil +} + +func (b *fixedBuffer) Read(p []byte) (int, error) { + n := copy(p, b.buf[b.lastRead:b.pos]) + b.lastRead += n + return n, nil +} + +func (b *fixedBuffer) Len() int { + return b.pos - b.lastRead +} + +func (b *fixedBuffer) Cap() int { + return cap(b.buf) +} + +func (b *fixedBuffer) Reset() { + b.pos = 0 + b.lastRead = 0 + b.buf = b.buf[:0] +} + +func (b *fixedBuffer) String() string { + return string(b.buf[b.lastRead:b.pos]) +} diff --git a/vendor/github.com/moby/moby/pkg/ioutils/buffer_test.go b/vendor/github.com/moby/moby/pkg/ioutils/buffer_test.go new file mode 100644 index 000000000..f68712438 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/ioutils/buffer_test.go @@ -0,0 +1,153 @@ +package ioutils + +import ( + "bytes" + "testing" +) + +func TestFixedBufferCap(t *testing.T) { + buf := &fixedBuffer{buf: make([]byte, 0, 5)} + + n := buf.Cap() + if n != 5 { + t.Fatalf("expected buffer capacity to be 5 bytes, got %d", n) + } +} + +func TestFixedBufferLen(t *testing.T) { + buf := &fixedBuffer{buf: make([]byte, 0, 10)} + + buf.Write([]byte("hello")) + l := buf.Len() + if l != 5 { + t.Fatalf("expected buffer length to be 5 bytes, got %d", l) + } + + buf.Write([]byte("world")) + l = buf.Len() + if l != 10 { + t.Fatalf("expected buffer length to be 10 bytes, got %d", l) + } + + // read 5 bytes + b := make([]byte, 5) + buf.Read(b) + + l = buf.Len() + if l != 5 { + t.Fatalf("expected buffer length to be 5 bytes, got %d", l) + } + + n, err := buf.Write([]byte("i-wont-fit")) + if n != 0 { + t.Fatalf("expected no bytes to be written to buffer, got %d", n) + } + if err != errBufferFull { + t.Fatalf("expected errBufferFull, got %v", err) + } + + l = buf.Len() + if l != 5 { + t.Fatalf("expected buffer length to still be 5 bytes, got %d", l) + } + + buf.Reset() + l = buf.Len() + if l != 0 { + t.Fatalf("expected buffer length to still be 0 bytes, got %d", l) + } +} + +func TestFixedBufferString(t *testing.T) { + buf := &fixedBuffer{buf: make([]byte, 0, 10)} + + buf.Write([]byte("hello")) + buf.Write([]byte("world")) + + out := buf.String() + if out != "helloworld" { + t.Fatalf("expected output to be \"helloworld\", got %q", out) + } + + // read 5 bytes + b := make([]byte, 5) + buf.Read(b) + + // test that fixedBuffer.String() only returns the part that hasn't been read + out = buf.String() + if out != "world" { + t.Fatalf("expected output to be \"world\", got %q", out) + } +} + +func TestFixedBufferWrite(t *testing.T) { + buf := &fixedBuffer{buf: make([]byte, 0, 64)} + n, err := buf.Write([]byte("hello")) + if err != nil { + t.Fatal(err) + } + + if n != 5 { + t.Fatalf("expected 5 bytes written, got %d", n) + } + + if string(buf.buf[:5]) != "hello" { + t.Fatalf("expected \"hello\", got %q", string(buf.buf[:5])) + } + + n, err = buf.Write(bytes.Repeat([]byte{1}, 64)) + if n != 59 { + t.Fatalf("expected 59 bytes written before buffer is full, got %d", n) + } + if err != errBufferFull { + t.Fatalf("expected errBufferFull, got %v - %v", err, buf.buf[:64]) + } +} + +func TestFixedBufferRead(t *testing.T) { + buf := &fixedBuffer{buf: make([]byte, 0, 64)} + if _, err := buf.Write([]byte("hello world")); err != nil { + t.Fatal(err) + } + + b := make([]byte, 5) + n, err := buf.Read(b) + if err != nil { + t.Fatal(err) + } + + if n != 5 { + t.Fatalf("expected 5 bytes read, got %d - %s", n, buf.String()) + } + + if string(b) != "hello" { + t.Fatalf("expected \"hello\", got %q", string(b)) + } + + n, err = buf.Read(b) + if err != nil { + t.Fatal(err) + } + + if n != 5 { + t.Fatalf("expected 5 bytes read, got %d", n) + } + + if string(b) != " worl" { + t.Fatalf("expected \" worl\", got %s", string(b)) + } + + b = b[:1] + n, err = buf.Read(b) + if err != nil { + t.Fatal(err) + } + + if n != 1 { + t.Fatalf("expected 1 byte read, got %d - %s", n, buf.String()) + } + + if string(b) != "d" { + t.Fatalf("expected \"d\", got %s", string(b)) + } +} diff --git a/vendor/github.com/moby/moby/pkg/ioutils/bytespipe.go b/vendor/github.com/moby/moby/pkg/ioutils/bytespipe.go new file mode 100644 index 000000000..72a04f349 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/ioutils/bytespipe.go @@ -0,0 +1,186 @@ +package ioutils + +import ( + "errors" + "io" + "sync" +) + +// maxCap is the highest capacity to use in byte slices that buffer data. +const maxCap = 1e6 + +// minCap is the lowest capacity to use in byte slices that buffer data +const minCap = 64 + +// blockThreshold is the minimum number of bytes in the buffer which will cause +// a write to BytesPipe to block when allocating a new slice. +const blockThreshold = 1e6 + +var ( + // ErrClosed is returned when Write is called on a closed BytesPipe. + ErrClosed = errors.New("write to closed BytesPipe") + + bufPools = make(map[int]*sync.Pool) + bufPoolsLock sync.Mutex +) + +// BytesPipe is io.ReadWriteCloser which works similarly to pipe(queue). +// All written data may be read at most once. Also, BytesPipe allocates +// and releases new byte slices to adjust to current needs, so the buffer +// won't be overgrown after peak loads. +type BytesPipe struct { + mu sync.Mutex + wait *sync.Cond + buf []*fixedBuffer + bufLen int + closeErr error // error to return from next Read. set to nil if not closed. +} + +// NewBytesPipe creates new BytesPipe, initialized by specified slice. +// If buf is nil, then it will be initialized with slice which cap is 64. +// buf will be adjusted in a way that len(buf) == 0, cap(buf) == cap(buf). +func NewBytesPipe() *BytesPipe { + bp := &BytesPipe{} + bp.buf = append(bp.buf, getBuffer(minCap)) + bp.wait = sync.NewCond(&bp.mu) + return bp +} + +// Write writes p to BytesPipe. +// It can allocate new []byte slices in a process of writing. +func (bp *BytesPipe) Write(p []byte) (int, error) { + bp.mu.Lock() + + written := 0 +loop0: + for { + if bp.closeErr != nil { + bp.mu.Unlock() + return written, ErrClosed + } + + if len(bp.buf) == 0 { + bp.buf = append(bp.buf, getBuffer(64)) + } + // get the last buffer + b := bp.buf[len(bp.buf)-1] + + n, err := b.Write(p) + written += n + bp.bufLen += n + + // errBufferFull is an error we expect to get if the buffer is full + if err != nil && err != errBufferFull { + bp.wait.Broadcast() + bp.mu.Unlock() + return written, err + } + + // if there was enough room to write all then break + if len(p) == n { + break + } + + // more data: write to the next slice + p = p[n:] + + // make sure the buffer doesn't grow too big from this write + for bp.bufLen >= blockThreshold { + bp.wait.Wait() + if bp.closeErr != nil { + continue loop0 + } + } + + // add new byte slice to the buffers slice and continue writing + nextCap := b.Cap() * 2 + if nextCap > maxCap { + nextCap = maxCap + } + bp.buf = append(bp.buf, getBuffer(nextCap)) + } + bp.wait.Broadcast() + bp.mu.Unlock() + return written, nil +} + +// CloseWithError causes further reads from a BytesPipe to return immediately. +func (bp *BytesPipe) CloseWithError(err error) error { + bp.mu.Lock() + if err != nil { + bp.closeErr = err + } else { + bp.closeErr = io.EOF + } + bp.wait.Broadcast() + bp.mu.Unlock() + return nil +} + +// Close causes further reads from a BytesPipe to return immediately. +func (bp *BytesPipe) Close() error { + return bp.CloseWithError(nil) +} + +// Read reads bytes from BytesPipe. +// Data could be read only once. +func (bp *BytesPipe) Read(p []byte) (n int, err error) { + bp.mu.Lock() + if bp.bufLen == 0 { + if bp.closeErr != nil { + bp.mu.Unlock() + return 0, bp.closeErr + } + bp.wait.Wait() + if bp.bufLen == 0 && bp.closeErr != nil { + err := bp.closeErr + bp.mu.Unlock() + return 0, err + } + } + + for bp.bufLen > 0 { + b := bp.buf[0] + read, _ := b.Read(p) // ignore error since fixedBuffer doesn't really return an error + n += read + bp.bufLen -= read + + if b.Len() == 0 { + // it's empty so return it to the pool and move to the next one + returnBuffer(b) + bp.buf[0] = nil + bp.buf = bp.buf[1:] + } + + if len(p) == read { + break + } + + p = p[read:] + } + + bp.wait.Broadcast() + bp.mu.Unlock() + return +} + +func returnBuffer(b *fixedBuffer) { + b.Reset() + bufPoolsLock.Lock() + pool := bufPools[b.Cap()] + bufPoolsLock.Unlock() + if pool != nil { + pool.Put(b) + } +} + +func getBuffer(size int) *fixedBuffer { + bufPoolsLock.Lock() + pool, ok := bufPools[size] + if !ok { + pool = &sync.Pool{New: func() interface{} { return &fixedBuffer{buf: make([]byte, 0, size)} }} + bufPools[size] = pool + } + bufPoolsLock.Unlock() + return pool.Get().(*fixedBuffer) +} diff --git a/vendor/github.com/moby/moby/pkg/ioutils/bytespipe_test.go b/vendor/github.com/moby/moby/pkg/ioutils/bytespipe_test.go new file mode 100644 index 000000000..300fb5f6d --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/ioutils/bytespipe_test.go @@ -0,0 +1,159 @@ +package ioutils + +import ( + "crypto/sha1" + "encoding/hex" + "math/rand" + "testing" + "time" +) + +func TestBytesPipeRead(t *testing.T) { + buf := NewBytesPipe() + buf.Write([]byte("12")) + buf.Write([]byte("34")) + buf.Write([]byte("56")) + buf.Write([]byte("78")) + buf.Write([]byte("90")) + rd := make([]byte, 4) + n, err := buf.Read(rd) + if err != nil { + t.Fatal(err) + } + if n != 4 { + t.Fatalf("Wrong number of bytes read: %d, should be %d", n, 4) + } + if string(rd) != "1234" { + t.Fatalf("Read %s, but must be %s", rd, "1234") + } + n, err = buf.Read(rd) + if err != nil { + t.Fatal(err) + } + if n != 4 { + t.Fatalf("Wrong number of bytes read: %d, should be %d", n, 4) + } + if string(rd) != "5678" { + t.Fatalf("Read %s, but must be %s", rd, "5679") + } + n, err = buf.Read(rd) + if err != nil { + t.Fatal(err) + } + if n != 2 { + t.Fatalf("Wrong number of bytes read: %d, should be %d", n, 2) + } + if string(rd[:n]) != "90" { + t.Fatalf("Read %s, but must be %s", rd, "90") + } +} + +func TestBytesPipeWrite(t *testing.T) { + buf := NewBytesPipe() + buf.Write([]byte("12")) + buf.Write([]byte("34")) + buf.Write([]byte("56")) + buf.Write([]byte("78")) + buf.Write([]byte("90")) + if buf.buf[0].String() != "1234567890" { + t.Fatalf("Buffer %q, must be %q", buf.buf[0].String(), "1234567890") + } +} + +// Write and read in different speeds/chunk sizes and check valid data is read. +func TestBytesPipeWriteRandomChunks(t *testing.T) { + cases := []struct{ iterations, writesPerLoop, readsPerLoop int }{ + {100, 10, 1}, + {1000, 10, 5}, + {1000, 100, 0}, + {1000, 5, 6}, + {10000, 50, 25}, + } + + testMessage := []byte("this is a random string for testing") + // random slice sizes to read and write + writeChunks := []int{25, 35, 15, 20} + readChunks := []int{5, 45, 20, 25} + + for _, c := range cases { + // first pass: write directly to hash + hash := sha1.New() + for i := 0; i < c.iterations*c.writesPerLoop; i++ { + if _, err := hash.Write(testMessage[:writeChunks[i%len(writeChunks)]]); err != nil { + t.Fatal(err) + } + } + expected := hex.EncodeToString(hash.Sum(nil)) + + // write/read through buffer + buf := NewBytesPipe() + hash.Reset() + + done := make(chan struct{}) + + go func() { + // random delay before read starts + <-time.After(time.Duration(rand.Intn(10)) * time.Millisecond) + for i := 0; ; i++ { + p := make([]byte, readChunks[(c.iterations*c.readsPerLoop+i)%len(readChunks)]) + n, _ := buf.Read(p) + if n == 0 { + break + } + hash.Write(p[:n]) + } + + close(done) + }() + + for i := 0; i < c.iterations; i++ { + for w := 0; w < c.writesPerLoop; w++ { + buf.Write(testMessage[:writeChunks[(i*c.writesPerLoop+w)%len(writeChunks)]]) + } + } + buf.Close() + <-done + + actual := hex.EncodeToString(hash.Sum(nil)) + + if expected != actual { + t.Fatalf("BytesPipe returned invalid data. Expected checksum %v, got %v", expected, actual) + } + + } +} + +func BenchmarkBytesPipeWrite(b *testing.B) { + testData := []byte("pretty short line, because why not?") + for i := 0; i < b.N; i++ { + readBuf := make([]byte, 1024) + buf := NewBytesPipe() + go func() { + var err error + for err == nil { + _, err = buf.Read(readBuf) + } + }() + for j := 0; j < 1000; j++ { + buf.Write(testData) + } + buf.Close() + } +} + +func BenchmarkBytesPipeRead(b *testing.B) { + rd := make([]byte, 512) + for i := 0; i < b.N; i++ { + b.StopTimer() + buf := NewBytesPipe() + for j := 0; j < 500; j++ { + buf.Write(make([]byte, 1024)) + } + b.StartTimer() + for j := 0; j < 1000; j++ { + if n, _ := buf.Read(rd); n != 512 { + b.Fatalf("Wrong number of bytes: %d", n) + } + } + } +} diff --git a/vendor/github.com/moby/moby/pkg/ioutils/fswriters.go b/vendor/github.com/moby/moby/pkg/ioutils/fswriters.go new file mode 100644 index 000000000..a56c46265 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/ioutils/fswriters.go @@ -0,0 +1,162 @@ +package ioutils + +import ( + "io" + "io/ioutil" + "os" + "path/filepath" +) + +// NewAtomicFileWriter returns WriteCloser so that writing to it writes to a +// temporary file and closing it atomically changes the temporary file to +// destination path. Writing and closing concurrently is not allowed. +func NewAtomicFileWriter(filename string, perm os.FileMode) (io.WriteCloser, error) { + f, err := ioutil.TempFile(filepath.Dir(filename), ".tmp-"+filepath.Base(filename)) + if err != nil { + return nil, err + } + + abspath, err := filepath.Abs(filename) + if err != nil { + return nil, err + } + return &atomicFileWriter{ + f: f, + fn: abspath, + perm: perm, + }, nil +} + +// AtomicWriteFile atomically writes data to a file named by filename. +func AtomicWriteFile(filename string, data []byte, perm os.FileMode) error { + f, err := NewAtomicFileWriter(filename, perm) + if err != nil { + return err + } + n, err := f.Write(data) + if err == nil && n < len(data) { + err = io.ErrShortWrite + f.(*atomicFileWriter).writeErr = err + } + if err1 := f.Close(); err == nil { + err = err1 + } + return err +} + +type atomicFileWriter struct { + f *os.File + fn string + writeErr error + perm os.FileMode +} + +func (w *atomicFileWriter) Write(dt []byte) (int, error) { + n, err := w.f.Write(dt) + if err != nil { + w.writeErr = err + } + return n, err +} + +func (w *atomicFileWriter) Close() (retErr error) { + defer func() { + if retErr != nil || w.writeErr != nil { + os.Remove(w.f.Name()) + } + }() + if err := w.f.Sync(); err != nil { + w.f.Close() + return err + } + if err := w.f.Close(); err != nil { + return err + } + if err := os.Chmod(w.f.Name(), w.perm); err != nil { + return err + } + if w.writeErr == nil { + return os.Rename(w.f.Name(), w.fn) + } + return nil +} + +// AtomicWriteSet is used to atomically write a set +// of files and ensure they are visible at the same time. +// Must be committed to a new directory. +type AtomicWriteSet struct { + root string +} + +// NewAtomicWriteSet creates a new atomic write set to +// atomically create a set of files. The given directory +// is used as the base directory for storing files before +// commit. If no temporary directory is given the system +// default is used. +func NewAtomicWriteSet(tmpDir string) (*AtomicWriteSet, error) { + td, err := ioutil.TempDir(tmpDir, "write-set-") + if err != nil { + return nil, err + } + + return &AtomicWriteSet{ + root: td, + }, nil +} + +// WriteFile writes a file to the set, guaranteeing the file +// has been synced. +func (ws *AtomicWriteSet) WriteFile(filename string, data []byte, perm os.FileMode) error { + f, err := ws.FileWriter(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm) + if err != nil { + return err + } + n, err := f.Write(data) + if err == nil && n < len(data) { + err = io.ErrShortWrite + } + if err1 := f.Close(); err == nil { + err = err1 + } + return err +} + +type syncFileCloser struct { + *os.File +} + +func (w syncFileCloser) Close() error { + err := w.File.Sync() + if err1 := w.File.Close(); err == nil { + err = err1 + } + return err +} + +// FileWriter opens a file writer inside the set. The file +// should be synced and closed before calling commit. +func (ws *AtomicWriteSet) FileWriter(name string, flag int, perm os.FileMode) (io.WriteCloser, error) { + f, err := os.OpenFile(filepath.Join(ws.root, name), flag, perm) + if err != nil { + return nil, err + } + return syncFileCloser{f}, nil +} + +// Cancel cancels the set and removes all temporary data +// created in the set. +func (ws *AtomicWriteSet) Cancel() error { + return os.RemoveAll(ws.root) +} + +// Commit moves all created files to the target directory. The +// target directory must not exist and the parent of the target +// directory must exist. +func (ws *AtomicWriteSet) Commit(target string) error { + return os.Rename(ws.root, target) +} + +// String returns the location the set is writing to. +func (ws *AtomicWriteSet) String() string { + return ws.root +} diff --git a/vendor/github.com/moby/moby/pkg/ioutils/fswriters_test.go b/vendor/github.com/moby/moby/pkg/ioutils/fswriters_test.go new file mode 100644 index 000000000..5d286005d --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/ioutils/fswriters_test.go @@ -0,0 +1,132 @@ +package ioutils + +import ( + "bytes" + "io/ioutil" + "os" + "path/filepath" + "runtime" + "testing" +) + +var ( + testMode os.FileMode = 0640 +) + +func init() { + // Windows does not support full Linux file mode + if runtime.GOOS == "windows" { + testMode = 0666 + } +} + +func TestAtomicWriteToFile(t *testing.T) { + tmpDir, err := ioutil.TempDir("", "atomic-writers-test") + if err != nil { + t.Fatalf("Error when creating temporary directory: %s", err) + } + defer os.RemoveAll(tmpDir) + + expected := []byte("barbaz") + if err := AtomicWriteFile(filepath.Join(tmpDir, "foo"), expected, testMode); err != nil { + t.Fatalf("Error writing to file: %v", err) + } + + actual, err := ioutil.ReadFile(filepath.Join(tmpDir, "foo")) + if err != nil { + t.Fatalf("Error reading from file: %v", err) + } + + if !bytes.Equal(actual, expected) { + t.Fatalf("Data mismatch, expected %q, got %q", expected, actual) + } + + st, err := os.Stat(filepath.Join(tmpDir, "foo")) + if err != nil { + t.Fatalf("Error statting file: %v", err) + } + if expected := os.FileMode(testMode); st.Mode() != expected { + t.Fatalf("Mode mismatched, expected %o, got %o", expected, st.Mode()) + } +} + +func TestAtomicWriteSetCommit(t *testing.T) { + tmpDir, err := ioutil.TempDir("", "atomic-writerset-test") + if err != nil { + t.Fatalf("Error when creating temporary directory: %s", err) + } + defer os.RemoveAll(tmpDir) + + if err := os.Mkdir(filepath.Join(tmpDir, "tmp"), 0700); err != nil { + t.Fatalf("Error creating tmp directory: %s", err) + } + + targetDir := filepath.Join(tmpDir, "target") + ws, err := NewAtomicWriteSet(filepath.Join(tmpDir, "tmp")) + if err != nil { + t.Fatalf("Error creating atomic write set: %s", err) + } + + expected := []byte("barbaz") + if err := ws.WriteFile("foo", expected, testMode); err != nil { + t.Fatalf("Error writing to file: %v", err) + } + + if _, err := ioutil.ReadFile(filepath.Join(targetDir, "foo")); err == nil { + t.Fatalf("Expected error reading file where should not exist") + } + + if err := ws.Commit(targetDir); err != nil { + t.Fatalf("Error committing file: %s", err) + } + + actual, err := ioutil.ReadFile(filepath.Join(targetDir, "foo")) + if err != nil { + t.Fatalf("Error reading from file: %v", err) + } + + if !bytes.Equal(actual, expected) { + t.Fatalf("Data mismatch, expected %q, got %q", expected, actual) + } + + st, err := os.Stat(filepath.Join(targetDir, "foo")) + if err != nil { + t.Fatalf("Error statting file: %v", err) + } + if expected := os.FileMode(testMode); st.Mode() != expected { + t.Fatalf("Mode mismatched, expected %o, got %o", expected, st.Mode()) + } + +} + +func TestAtomicWriteSetCancel(t *testing.T) { + tmpDir, err := ioutil.TempDir("", "atomic-writerset-test") + if err != nil { + t.Fatalf("Error when creating temporary directory: %s", err) + } + defer os.RemoveAll(tmpDir) + + if err := os.Mkdir(filepath.Join(tmpDir, "tmp"), 0700); err != nil { + t.Fatalf("Error creating tmp directory: %s", err) + } + + ws, err := NewAtomicWriteSet(filepath.Join(tmpDir, "tmp")) + if err != nil { + t.Fatalf("Error creating atomic write set: %s", err) + } + + expected := []byte("barbaz") + if err := ws.WriteFile("foo", expected, testMode); err != nil { + t.Fatalf("Error writing to file: %v", err) + } + + if err := ws.Cancel(); err != nil { + t.Fatalf("Error committing file: %s", err) + } + + if _, err := ioutil.ReadFile(filepath.Join(tmpDir, "target", "foo")); err == nil { + t.Fatalf("Expected error reading file where should not exist") + } else if !os.IsNotExist(err) { + t.Fatalf("Unexpected error reading file: %s", err) + } +} diff --git a/vendor/github.com/moby/moby/pkg/ioutils/readers.go b/vendor/github.com/moby/moby/pkg/ioutils/readers.go new file mode 100644 index 000000000..63f3c07f4 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/ioutils/readers.go @@ -0,0 +1,154 @@ +package ioutils + +import ( + "crypto/sha256" + "encoding/hex" + "io" + + "golang.org/x/net/context" +) + +type readCloserWrapper struct { + io.Reader + closer func() error +} + +func (r *readCloserWrapper) Close() error { + return r.closer() +} + +// NewReadCloserWrapper returns a new io.ReadCloser. +func NewReadCloserWrapper(r io.Reader, closer func() error) io.ReadCloser { + return &readCloserWrapper{ + Reader: r, + closer: closer, + } +} + +type readerErrWrapper struct { + reader io.Reader + closer func() +} + +func (r *readerErrWrapper) Read(p []byte) (int, error) { + n, err := r.reader.Read(p) + if err != nil { + r.closer() + } + return n, err +} + +// NewReaderErrWrapper returns a new io.Reader. +func NewReaderErrWrapper(r io.Reader, closer func()) io.Reader { + return &readerErrWrapper{ + reader: r, + closer: closer, + } +} + +// HashData returns the sha256 sum of src. +func HashData(src io.Reader) (string, error) { + h := sha256.New() + if _, err := io.Copy(h, src); err != nil { + return "", err + } + return "sha256:" + hex.EncodeToString(h.Sum(nil)), nil +} + +// OnEOFReader wraps an io.ReadCloser and a function +// the function will run at the end of file or close the file. +type OnEOFReader struct { + Rc io.ReadCloser + Fn func() +} + +func (r *OnEOFReader) Read(p []byte) (n int, err error) { + n, err = r.Rc.Read(p) + if err == io.EOF { + r.runFunc() + } + return +} + +// Close closes the file and run the function. +func (r *OnEOFReader) Close() error { + err := r.Rc.Close() + r.runFunc() + return err +} + +func (r *OnEOFReader) runFunc() { + if fn := r.Fn; fn != nil { + fn() + r.Fn = nil + } +} + +// cancelReadCloser wraps an io.ReadCloser with a context for cancelling read +// operations. +type cancelReadCloser struct { + cancel func() + pR *io.PipeReader // Stream to read from + pW *io.PipeWriter +} + +// NewCancelReadCloser creates a wrapper that closes the ReadCloser when the +// context is cancelled. The returned io.ReadCloser must be closed when it is +// no longer needed. +func NewCancelReadCloser(ctx context.Context, in io.ReadCloser) io.ReadCloser { + pR, pW := io.Pipe() + + // Create a context used to signal when the pipe is closed + doneCtx, cancel := context.WithCancel(context.Background()) + + p := &cancelReadCloser{ + cancel: cancel, + pR: pR, + pW: pW, + } + + go func() { + _, err := io.Copy(pW, in) + select { + case <-ctx.Done(): + // If the context was closed, p.closeWithError + // was already called. Calling it again would + // change the error that Read returns. + default: + p.closeWithError(err) + } + in.Close() + }() + go func() { + for { + select { + case <-ctx.Done(): + p.closeWithError(ctx.Err()) + case <-doneCtx.Done(): + return + } + } + }() + + return p +} + +// Read wraps the Read method of the pipe that provides data from the wrapped +// ReadCloser. +func (p *cancelReadCloser) Read(buf []byte) (n int, err error) { + return p.pR.Read(buf) +} + +// closeWithError closes the wrapper and its underlying reader. It will +// cause future calls to Read to return err. +func (p *cancelReadCloser) closeWithError(err error) { + p.pW.CloseWithError(err) + p.cancel() +} + +// Close closes the wrapper its underlying reader. It will cause +// future calls to Read to return io.EOF. +func (p *cancelReadCloser) Close() error { + p.closeWithError(io.EOF) + return nil +} diff --git a/vendor/github.com/moby/moby/pkg/ioutils/readers_test.go b/vendor/github.com/moby/moby/pkg/ioutils/readers_test.go new file mode 100644 index 000000000..9abc1054d --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/ioutils/readers_test.go @@ -0,0 +1,94 @@ +package ioutils + +import ( + "fmt" + "io/ioutil" + "strings" + "testing" + "time" + + "golang.org/x/net/context" +) + +// Implement io.Reader +type errorReader struct{} + +func (r *errorReader) Read(p []byte) (int, error) { + return 0, fmt.Errorf("Error reader always fail.") +} + +func TestReadCloserWrapperClose(t *testing.T) { + reader := strings.NewReader("A string reader") + wrapper := NewReadCloserWrapper(reader, func() error { + return fmt.Errorf("This will be called when closing") + }) + err := wrapper.Close() + if err == nil || !strings.Contains(err.Error(), "This will be called when closing") { + t.Fatalf("readCloserWrapper should have call the anonymous func and thus, fail.") + } +} + +func TestReaderErrWrapperReadOnError(t *testing.T) { + called := false + reader := &errorReader{} + wrapper := NewReaderErrWrapper(reader, func() { + called = true + }) + _, err := wrapper.Read([]byte{}) + if err == nil || !strings.Contains(err.Error(), "Error reader always fail.") { + t.Fatalf("readErrWrapper should returned an error") + } + if !called { + t.Fatalf("readErrWrapper should have call the anonymous function on failure") + } +} + +func TestReaderErrWrapperRead(t *testing.T) { + reader := strings.NewReader("a string reader.") + wrapper := NewReaderErrWrapper(reader, func() { + t.Fatalf("readErrWrapper should not have called the anonymous function") + }) + // Read 20 byte (should be ok with the string above) + num, err := wrapper.Read(make([]byte, 20)) + if err != nil { + t.Fatal(err) + } + if num != 16 { + t.Fatalf("readerErrWrapper should have read 16 byte, but read %d", num) + } +} + +func TestHashData(t *testing.T) { + reader := strings.NewReader("hash-me") + actual, err := HashData(reader) + if err != nil { + t.Fatal(err) + } + expected := "sha256:4d11186aed035cc624d553e10db358492c84a7cd6b9670d92123c144930450aa" + if actual != expected { + t.Fatalf("Expecting %s, got %s", expected, actual) + } +} + +type perpetualReader struct{} + +func (p *perpetualReader) Read(buf []byte) (n int, err error) { + for i := 0; i != len(buf); i++ { + buf[i] = 'a' + } + return len(buf), nil +} + +func TestCancelReadCloser(t *testing.T) { + ctx, _ := context.WithTimeout(context.Background(), 100*time.Millisecond) + cancelReadCloser := NewCancelReadCloser(ctx, ioutil.NopCloser(&perpetualReader{})) + for { + var buf [128]byte + _, err := cancelReadCloser.Read(buf[:]) + if err == context.DeadlineExceeded { + break + } else if err != nil { + t.Fatalf("got unexpected error: %v", err) + } + } +} diff --git a/vendor/github.com/moby/moby/pkg/ioutils/temp_unix.go b/vendor/github.com/moby/moby/pkg/ioutils/temp_unix.go new file mode 100644 index 000000000..1539ad21b --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/ioutils/temp_unix.go @@ -0,0 +1,10 @@ +// +build !windows + +package ioutils + +import "io/ioutil" + +// TempDir on Unix systems is equivalent to ioutil.TempDir. +func TempDir(dir, prefix string) (string, error) { + return ioutil.TempDir(dir, prefix) +} diff --git a/vendor/github.com/moby/moby/pkg/ioutils/temp_windows.go b/vendor/github.com/moby/moby/pkg/ioutils/temp_windows.go new file mode 100644 index 000000000..c258e5fdd --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/ioutils/temp_windows.go @@ -0,0 +1,18 @@ +// +build windows + +package ioutils + +import ( + "io/ioutil" + + "github.com/docker/docker/pkg/longpath" +) + +// TempDir is the equivalent of ioutil.TempDir, except that the result is in Windows longpath format. +func TempDir(dir, prefix string) (string, error) { + tempDir, err := ioutil.TempDir(dir, prefix) + if err != nil { + return "", err + } + return longpath.AddPrefix(tempDir), nil +} diff --git a/vendor/github.com/moby/moby/pkg/ioutils/writeflusher.go b/vendor/github.com/moby/moby/pkg/ioutils/writeflusher.go new file mode 100644 index 000000000..52a4901ad --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/ioutils/writeflusher.go @@ -0,0 +1,92 @@ +package ioutils + +import ( + "io" + "sync" +) + +// WriteFlusher wraps the Write and Flush operation ensuring that every write +// is a flush. In addition, the Close method can be called to intercept +// Read/Write calls if the targets lifecycle has already ended. +type WriteFlusher struct { + w io.Writer + flusher flusher + flushed chan struct{} + flushedOnce sync.Once + closed chan struct{} + closeLock sync.Mutex +} + +type flusher interface { + Flush() +} + +var errWriteFlusherClosed = io.EOF + +func (wf *WriteFlusher) Write(b []byte) (n int, err error) { + select { + case <-wf.closed: + return 0, errWriteFlusherClosed + default: + } + + n, err = wf.w.Write(b) + wf.Flush() // every write is a flush. + return n, err +} + +// Flush the stream immediately. +func (wf *WriteFlusher) Flush() { + select { + case <-wf.closed: + return + default: + } + + wf.flushedOnce.Do(func() { + close(wf.flushed) + }) + wf.flusher.Flush() +} + +// Flushed returns the state of flushed. +// If it's flushed, return true, or else it return false. +func (wf *WriteFlusher) Flushed() bool { + // BUG(stevvooe): Remove this method. Its use is inherently racy. Seems to + // be used to detect whether or a response code has been issued or not. + // Another hook should be used instead. + var flushed bool + select { + case <-wf.flushed: + flushed = true + default: + } + return flushed +} + +// Close closes the write flusher, disallowing any further writes to the +// target. After the flusher is closed, all calls to write or flush will +// result in an error. +func (wf *WriteFlusher) Close() error { + wf.closeLock.Lock() + defer wf.closeLock.Unlock() + + select { + case <-wf.closed: + return errWriteFlusherClosed + default: + close(wf.closed) + } + return nil +} + +// NewWriteFlusher returns a new WriteFlusher. +func NewWriteFlusher(w io.Writer) *WriteFlusher { + var fl flusher + if f, ok := w.(flusher); ok { + fl = f + } else { + fl = &NopFlusher{} + } + return &WriteFlusher{w: w, flusher: fl, closed: make(chan struct{}), flushed: make(chan struct{})} +} diff --git a/vendor/github.com/moby/moby/pkg/ioutils/writers.go b/vendor/github.com/moby/moby/pkg/ioutils/writers.go new file mode 100644 index 000000000..ccc7f9c23 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/ioutils/writers.go @@ -0,0 +1,66 @@ +package ioutils + +import "io" + +// NopWriter represents a type which write operation is nop. +type NopWriter struct{} + +func (*NopWriter) Write(buf []byte) (int, error) { + return len(buf), nil +} + +type nopWriteCloser struct { + io.Writer +} + +func (w *nopWriteCloser) Close() error { return nil } + +// NopWriteCloser returns a nopWriteCloser. +func NopWriteCloser(w io.Writer) io.WriteCloser { + return &nopWriteCloser{w} +} + +// NopFlusher represents a type which flush operation is nop. +type NopFlusher struct{} + +// Flush is a nop operation. +func (f *NopFlusher) Flush() {} + +type writeCloserWrapper struct { + io.Writer + closer func() error +} + +func (r *writeCloserWrapper) Close() error { + return r.closer() +} + +// NewWriteCloserWrapper returns a new io.WriteCloser. +func NewWriteCloserWrapper(r io.Writer, closer func() error) io.WriteCloser { + return &writeCloserWrapper{ + Writer: r, + closer: closer, + } +} + +// WriteCounter wraps a concrete io.Writer and hold a count of the number +// of bytes written to the writer during a "session". +// This can be convenient when write return is masked +// (e.g., json.Encoder.Encode()) +type WriteCounter struct { + Count int64 + Writer io.Writer +} + +// NewWriteCounter returns a new WriteCounter. +func NewWriteCounter(w io.Writer) *WriteCounter { + return &WriteCounter{ + Writer: w, + } +} + +func (wc *WriteCounter) Write(p []byte) (count int, err error) { + count, err = wc.Writer.Write(p) + wc.Count += int64(count) + return +} diff --git a/vendor/github.com/moby/moby/pkg/ioutils/writers_test.go b/vendor/github.com/moby/moby/pkg/ioutils/writers_test.go new file mode 100644 index 000000000..564b1cd4f --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/ioutils/writers_test.go @@ -0,0 +1,65 @@ +package ioutils + +import ( + "bytes" + "strings" + "testing" +) + +func TestWriteCloserWrapperClose(t *testing.T) { + called := false + writer := bytes.NewBuffer([]byte{}) + wrapper := NewWriteCloserWrapper(writer, func() error { + called = true + return nil + }) + if err := wrapper.Close(); err != nil { + t.Fatal(err) + } + if !called { + t.Fatalf("writeCloserWrapper should have call the anonymous function.") + } +} + +func TestNopWriteCloser(t *testing.T) { + writer := bytes.NewBuffer([]byte{}) + wrapper := NopWriteCloser(writer) + if err := wrapper.Close(); err != nil { + t.Fatal("NopWriteCloser always return nil on Close.") + } + +} + +func TestNopWriter(t *testing.T) { + nw := &NopWriter{} + l, err := nw.Write([]byte{'c'}) + if err != nil { + t.Fatal(err) + } + if l != 1 { + t.Fatalf("Expected 1 got %d", l) + } +} + +func TestWriteCounter(t *testing.T) { + dummy1 := "This is a dummy string." + dummy2 := "This is another dummy string." + totalLength := int64(len(dummy1) + len(dummy2)) + + reader1 := strings.NewReader(dummy1) + reader2 := strings.NewReader(dummy2) + + var buffer bytes.Buffer + wc := NewWriteCounter(&buffer) + + reader1.WriteTo(wc) + reader2.WriteTo(wc) + + if wc.Count != totalLength { + t.Errorf("Wrong count: %d vs. %d", wc.Count, totalLength) + } + + if buffer.String() != dummy1+dummy2 { + t.Error("Wrong message written") + } +} diff --git a/vendor/github.com/moby/moby/pkg/jsonlog/jsonlog.go b/vendor/github.com/moby/moby/pkg/jsonlog/jsonlog.go new file mode 100644 index 000000000..4734c3111 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/jsonlog/jsonlog.go @@ -0,0 +1,42 @@ +package jsonlog + +import ( + "encoding/json" + "fmt" + "time" +) + +// JSONLog represents a log message, typically a single entry from a given log stream. +// JSONLogs can be easily serialized to and from JSON and support custom formatting. +type JSONLog struct { + // Log is the log message + Log string `json:"log,omitempty"` + // Stream is the log source + Stream string `json:"stream,omitempty"` + // Created is the created timestamp of log + Created time.Time `json:"time"` + // Attrs is the list of extra attributes provided by the user + Attrs map[string]string `json:"attrs,omitempty"` +} + +// Format returns the log formatted according to format +// If format is nil, returns the log message +// If format is json, returns the log marshaled in json format +// By default, returns the log with the log time formatted according to format. +func (jl *JSONLog) Format(format string) (string, error) { + if format == "" { + return jl.Log, nil + } + if format == "json" { + m, err := json.Marshal(jl) + return string(m), err + } + return fmt.Sprintf("%s %s", jl.Created.Format(format), jl.Log), nil +} + +// Reset resets the log to nil. +func (jl *JSONLog) Reset() { + jl.Log = "" + jl.Stream = "" + jl.Created = time.Time{} +} diff --git a/vendor/github.com/moby/moby/pkg/jsonlog/jsonlog_marshalling.go b/vendor/github.com/moby/moby/pkg/jsonlog/jsonlog_marshalling.go new file mode 100644 index 000000000..83ce684a8 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/jsonlog/jsonlog_marshalling.go @@ -0,0 +1,178 @@ +// This code was initially generated by ffjson +// This code was generated via the following steps: +// $ go get -u github.com/pquerna/ffjson +// $ make BIND_DIR=. shell +// $ ffjson pkg/jsonlog/jsonlog.go +// $ mv pkg/jsonglog/jsonlog_ffjson.go pkg/jsonlog/jsonlog_marshalling.go +// +// It has been modified to improve the performance of time marshalling to JSON +// and to clean it up. +// Should this code need to be regenerated when the JSONLog struct is changed, +// the relevant changes which have been made are: +// import ( +// "bytes" +//- +// "unicode/utf8" +// ) +// +// func (mj *JSONLog) MarshalJSON() ([]byte, error) { +//@@ -20,13 +16,13 @@ func (mj *JSONLog) MarshalJSON() ([]byte, error) { +// } +// return buf.Bytes(), nil +// } +//+ +// func (mj *JSONLog) MarshalJSONBuf(buf *bytes.Buffer) error { +//- var err error +//- var obj []byte +//- var first bool = true +//- _ = obj +//- _ = err +//- _ = first +//+ var ( +//+ err error +//+ timestamp string +//+ first bool = true +//+ ) +// buf.WriteString(`{`) +// if len(mj.Log) != 0 { +// if first == true { +//@@ -52,11 +48,11 @@ func (mj *JSONLog) MarshalJSONBuf(buf *bytes.Buffer) error { +// buf.WriteString(`,`) +// } +// buf.WriteString(`"time":`) +//- obj, err = mj.Created.MarshalJSON() +//+ timestamp, err = FastTimeMarshalJSON(mj.Created) +// if err != nil { +// return err +// } +//- buf.Write(obj) +//+ buf.WriteString(timestamp) +// buf.WriteString(`}`) +// return nil +// } +// @@ -81,9 +81,10 @@ func (mj *JSONLog) MarshalJSONBuf(buf *bytes.Buffer) error { +// if len(mj.Log) != 0 { +// - if first == true { +// - first = false +// - } else { +// - buf.WriteString(`,`) +// - } +// + first = false +// buf.WriteString(`"log":`) +// ffjsonWriteJSONString(buf, mj.Log) +// } + +package jsonlog + +import ( + "bytes" + "unicode/utf8" +) + +// MarshalJSON marshals the JSONLog. +func (mj *JSONLog) MarshalJSON() ([]byte, error) { + var buf bytes.Buffer + buf.Grow(1024) + if err := mj.MarshalJSONBuf(&buf); err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +// MarshalJSONBuf marshals the JSONLog and stores the result to a bytes.Buffer. +func (mj *JSONLog) MarshalJSONBuf(buf *bytes.Buffer) error { + var ( + err error + timestamp string + first = true + ) + buf.WriteString(`{`) + if len(mj.Log) != 0 { + first = false + buf.WriteString(`"log":`) + ffjsonWriteJSONString(buf, mj.Log) + } + if len(mj.Stream) != 0 { + if first { + first = false + } else { + buf.WriteString(`,`) + } + buf.WriteString(`"stream":`) + ffjsonWriteJSONString(buf, mj.Stream) + } + if !first { + buf.WriteString(`,`) + } + buf.WriteString(`"time":`) + timestamp, err = FastTimeMarshalJSON(mj.Created) + if err != nil { + return err + } + buf.WriteString(timestamp) + buf.WriteString(`}`) + return nil +} + +func ffjsonWriteJSONString(buf *bytes.Buffer, s string) { + const hex = "0123456789abcdef" + + buf.WriteByte('"') + start := 0 + for i := 0; i < len(s); { + if b := s[i]; b < utf8.RuneSelf { + if 0x20 <= b && b != '\\' && b != '"' && b != '<' && b != '>' && b != '&' { + i++ + continue + } + if start < i { + buf.WriteString(s[start:i]) + } + switch b { + case '\\', '"': + buf.WriteByte('\\') + buf.WriteByte(b) + case '\n': + buf.WriteByte('\\') + buf.WriteByte('n') + case '\r': + buf.WriteByte('\\') + buf.WriteByte('r') + default: + + buf.WriteString(`\u00`) + buf.WriteByte(hex[b>>4]) + buf.WriteByte(hex[b&0xF]) + } + i++ + start = i + continue + } + c, size := utf8.DecodeRuneInString(s[i:]) + if c == utf8.RuneError && size == 1 { + if start < i { + buf.WriteString(s[start:i]) + } + buf.WriteString(`\ufffd`) + i += size + start = i + continue + } + + if c == '\u2028' || c == '\u2029' { + if start < i { + buf.WriteString(s[start:i]) + } + buf.WriteString(`\u202`) + buf.WriteByte(hex[c&0xF]) + i += size + start = i + continue + } + i += size + } + if start < len(s) { + buf.WriteString(s[start:]) + } + buf.WriteByte('"') +} diff --git a/vendor/github.com/moby/moby/pkg/jsonlog/jsonlog_marshalling_test.go b/vendor/github.com/moby/moby/pkg/jsonlog/jsonlog_marshalling_test.go new file mode 100644 index 000000000..8b0d072cd --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/jsonlog/jsonlog_marshalling_test.go @@ -0,0 +1,34 @@ +package jsonlog + +import ( + "regexp" + "testing" +) + +func TestJSONLogMarshalJSON(t *testing.T) { + logs := map[*JSONLog]string{ + {Log: `"A log line with \\"`}: `^{\"log\":\"\\\"A log line with \\\\\\\\\\\"\",\"time\":\".{20,}\"}$`, + {Log: "A log line"}: `^{\"log\":\"A log line\",\"time\":\".{20,}\"}$`, + {Log: "A log line with \r"}: `^{\"log\":\"A log line with \\r\",\"time\":\".{20,}\"}$`, + {Log: "A log line with & < >"}: `^{\"log\":\"A log line with \\u0026 \\u003c \\u003e\",\"time\":\".{20,}\"}$`, + {Log: "A log line with utf8 : 🚀 ψ ω β"}: `^{\"log\":\"A log line with utf8 : 🚀 ψ ω β\",\"time\":\".{20,}\"}$`, + {Stream: "stdout"}: `^{\"stream\":\"stdout\",\"time\":\".{20,}\"}$`, + {}: `^{\"time\":\".{20,}\"}$`, + // These ones are a little weird + {Log: "\u2028 \u2029"}: `^{\"log\":\"\\u2028 \\u2029\",\"time\":\".{20,}\"}$`, + {Log: string([]byte{0xaF})}: `^{\"log\":\"\\ufffd\",\"time\":\".{20,}\"}$`, + {Log: string([]byte{0x7F})}: `^{\"log\":\"\x7f\",\"time\":\".{20,}\"}$`, + } + for jsonLog, expression := range logs { + data, err := jsonLog.MarshalJSON() + if err != nil { + t.Fatal(err) + } + res := string(data) + t.Logf("Result of WriteLog: %q", res) + logRe := regexp.MustCompile(expression) + if !logRe.MatchString(res) { + t.Fatalf("Log line not in expected format [%v]: %q", expression, res) + } + } +} diff --git a/vendor/github.com/moby/moby/pkg/jsonlog/jsonlogbytes.go b/vendor/github.com/moby/moby/pkg/jsonlog/jsonlogbytes.go new file mode 100644 index 000000000..0ba716f26 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/jsonlog/jsonlogbytes.go @@ -0,0 +1,122 @@ +package jsonlog + +import ( + "bytes" + "encoding/json" + "unicode/utf8" +) + +// JSONLogs is based on JSONLog. +// It allows marshalling JSONLog from Log as []byte +// and an already marshalled Created timestamp. +type JSONLogs struct { + Log []byte `json:"log,omitempty"` + Stream string `json:"stream,omitempty"` + Created string `json:"time"` + + // json-encoded bytes + RawAttrs json.RawMessage `json:"attrs,omitempty"` +} + +// MarshalJSONBuf is based on the same method from JSONLog +// It has been modified to take into account the necessary changes. +func (mj *JSONLogs) MarshalJSONBuf(buf *bytes.Buffer) error { + var first = true + + buf.WriteString(`{`) + if len(mj.Log) != 0 { + first = false + buf.WriteString(`"log":`) + ffjsonWriteJSONBytesAsString(buf, mj.Log) + } + if len(mj.Stream) != 0 { + if first { + first = false + } else { + buf.WriteString(`,`) + } + buf.WriteString(`"stream":`) + ffjsonWriteJSONString(buf, mj.Stream) + } + if len(mj.RawAttrs) > 0 { + if first { + first = false + } else { + buf.WriteString(`,`) + } + buf.WriteString(`"attrs":`) + buf.Write(mj.RawAttrs) + } + if !first { + buf.WriteString(`,`) + } + buf.WriteString(`"time":`) + buf.WriteString(mj.Created) + buf.WriteString(`}`) + return nil +} + +// This is based on ffjsonWriteJSONBytesAsString. It has been changed +// to accept a string passed as a slice of bytes. +func ffjsonWriteJSONBytesAsString(buf *bytes.Buffer, s []byte) { + const hex = "0123456789abcdef" + + buf.WriteByte('"') + start := 0 + for i := 0; i < len(s); { + if b := s[i]; b < utf8.RuneSelf { + if 0x20 <= b && b != '\\' && b != '"' && b != '<' && b != '>' && b != '&' { + i++ + continue + } + if start < i { + buf.Write(s[start:i]) + } + switch b { + case '\\', '"': + buf.WriteByte('\\') + buf.WriteByte(b) + case '\n': + buf.WriteByte('\\') + buf.WriteByte('n') + case '\r': + buf.WriteByte('\\') + buf.WriteByte('r') + default: + + buf.WriteString(`\u00`) + buf.WriteByte(hex[b>>4]) + buf.WriteByte(hex[b&0xF]) + } + i++ + start = i + continue + } + c, size := utf8.DecodeRune(s[i:]) + if c == utf8.RuneError && size == 1 { + if start < i { + buf.Write(s[start:i]) + } + buf.WriteString(`\ufffd`) + i += size + start = i + continue + } + + if c == '\u2028' || c == '\u2029' { + if start < i { + buf.Write(s[start:i]) + } + buf.WriteString(`\u202`) + buf.WriteByte(hex[c&0xF]) + i += size + start = i + continue + } + i += size + } + if start < len(s) { + buf.Write(s[start:]) + } + buf.WriteByte('"') +} diff --git a/vendor/github.com/moby/moby/pkg/jsonlog/jsonlogbytes_test.go b/vendor/github.com/moby/moby/pkg/jsonlog/jsonlogbytes_test.go new file mode 100644 index 000000000..41049aaea --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/jsonlog/jsonlogbytes_test.go @@ -0,0 +1,39 @@ +package jsonlog + +import ( + "bytes" + "regexp" + "testing" +) + +func TestJSONLogsMarshalJSONBuf(t *testing.T) { + logs := map[*JSONLogs]string{ + {Log: []byte(`"A log line with \\"`)}: `^{\"log\":\"\\\"A log line with \\\\\\\\\\\"\",\"time\":}$`, + {Log: []byte("A log line")}: `^{\"log\":\"A log line\",\"time\":}$`, + {Log: []byte("A log line with \r")}: `^{\"log\":\"A log line with \\r\",\"time\":}$`, + {Log: []byte("A log line with & < >")}: `^{\"log\":\"A log line with \\u0026 \\u003c \\u003e\",\"time\":}$`, + {Log: []byte("A log line with utf8 : 🚀 ψ ω β")}: `^{\"log\":\"A log line with utf8 : 🚀 ψ ω β\",\"time\":}$`, + {Stream: "stdout"}: `^{\"stream\":\"stdout\",\"time\":}$`, + {Stream: "stdout", Log: []byte("A log line")}: `^{\"log\":\"A log line\",\"stream\":\"stdout\",\"time\":}$`, + {Created: "time"}: `^{\"time\":time}$`, + {}: `^{\"time\":}$`, + // These ones are a little weird + {Log: []byte("\u2028 \u2029")}: `^{\"log\":\"\\u2028 \\u2029\",\"time\":}$`, + {Log: []byte{0xaF}}: `^{\"log\":\"\\ufffd\",\"time\":}$`, + {Log: []byte{0x7F}}: `^{\"log\":\"\x7f\",\"time\":}$`, + // with raw attributes + {Log: []byte("A log line"), RawAttrs: []byte(`{"hello":"world","value":1234}`)}: `^{\"log\":\"A log line\",\"attrs\":{\"hello\":\"world\",\"value\":1234},\"time\":}$`, + } + for jsonLog, expression := range logs { + var buf bytes.Buffer + if err := jsonLog.MarshalJSONBuf(&buf); err != nil { + t.Fatal(err) + } + res := buf.String() + t.Logf("Result of WriteLog: %q", res) + logRe := regexp.MustCompile(expression) + if !logRe.MatchString(res) { + t.Fatalf("Log line not in expected format [%v]: %q", expression, res) + } + } +} diff --git a/vendor/github.com/moby/moby/pkg/jsonlog/time_marshalling.go b/vendor/github.com/moby/moby/pkg/jsonlog/time_marshalling.go new file mode 100644 index 000000000..211733814 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/jsonlog/time_marshalling.go @@ -0,0 +1,27 @@ +// Package jsonlog provides helper functions to parse and print time (time.Time) as JSON. +package jsonlog + +import ( + "errors" + "time" +) + +const ( + // RFC3339NanoFixed is our own version of RFC339Nano because we want one + // that pads the nano seconds part with zeros to ensure + // the timestamps are aligned in the logs. + RFC3339NanoFixed = "2006-01-02T15:04:05.000000000Z07:00" + // JSONFormat is the format used by FastMarshalJSON + JSONFormat = `"` + time.RFC3339Nano + `"` +) + +// FastTimeMarshalJSON avoids one of the extra allocations that +// time.MarshalJSON is making. +func FastTimeMarshalJSON(t time.Time) (string, error) { + if y := t.Year(); y < 0 || y >= 10000 { + // RFC 3339 is clear that years are 4 digits exactly. + // See golang.org/issue/4556#c15 for more discussion. + return "", errors.New("time.MarshalJSON: year outside of range [0,9999]") + } + return t.Format(JSONFormat), nil +} diff --git a/vendor/github.com/moby/moby/pkg/jsonlog/time_marshalling_test.go b/vendor/github.com/moby/moby/pkg/jsonlog/time_marshalling_test.go new file mode 100644 index 000000000..02d0302c4 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/jsonlog/time_marshalling_test.go @@ -0,0 +1,47 @@ +package jsonlog + +import ( + "testing" + "time" +) + +// Testing to ensure 'year' fields is between 0 and 9999 +func TestFastTimeMarshalJSONWithInvalidDate(t *testing.T) { + aTime := time.Date(-1, 1, 1, 0, 0, 0, 0, time.Local) + json, err := FastTimeMarshalJSON(aTime) + if err == nil { + t.Fatalf("FastTimeMarshalJSON should throw an error, but was '%v'", json) + } + anotherTime := time.Date(10000, 1, 1, 0, 0, 0, 0, time.Local) + json, err = FastTimeMarshalJSON(anotherTime) + if err == nil { + t.Fatalf("FastTimeMarshalJSON should throw an error, but was '%v'", json) + } + +} + +func TestFastTimeMarshalJSON(t *testing.T) { + aTime := time.Date(2015, 5, 29, 11, 1, 2, 3, time.UTC) + json, err := FastTimeMarshalJSON(aTime) + if err != nil { + t.Fatal(err) + } + expected := "\"2015-05-29T11:01:02.000000003Z\"" + if json != expected { + t.Fatalf("Expected %v, got %v", expected, json) + } + + location, err := time.LoadLocation("Europe/Paris") + if err != nil { + t.Fatal(err) + } + aTime = time.Date(2015, 5, 29, 11, 1, 2, 3, location) + json, err = FastTimeMarshalJSON(aTime) + if err != nil { + t.Fatal(err) + } + expected = "\"2015-05-29T11:01:02.000000003+02:00\"" + if json != expected { + t.Fatalf("Expected %v, got %v", expected, json) + } +} diff --git a/vendor/github.com/moby/moby/pkg/jsonmessage/jsonmessage.go b/vendor/github.com/moby/moby/pkg/jsonmessage/jsonmessage.go new file mode 100644 index 000000000..dc785d618 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/jsonmessage/jsonmessage.go @@ -0,0 +1,315 @@ +package jsonmessage + +import ( + "encoding/json" + "fmt" + "io" + "os" + "strings" + "time" + + "github.com/Nvveen/Gotty" + + "github.com/docker/docker/pkg/jsonlog" + "github.com/docker/docker/pkg/term" + "github.com/docker/go-units" +) + +// JSONError wraps a concrete Code and Message, `Code` is +// is an integer error code, `Message` is the error message. +type JSONError struct { + Code int `json:"code,omitempty"` + Message string `json:"message,omitempty"` +} + +func (e *JSONError) Error() string { + return e.Message +} + +// JSONProgress describes a Progress. terminalFd is the fd of the current terminal, +// Start is the initial value for the operation. Current is the current status and +// value of the progress made towards Total. Total is the end value describing when +// we made 100% progress for an operation. +type JSONProgress struct { + terminalFd uintptr + Current int64 `json:"current,omitempty"` + Total int64 `json:"total,omitempty"` + Start int64 `json:"start,omitempty"` + // If true, don't show xB/yB + HideCounts bool `json:"hidecounts,omitempty"` + Units string `json:"units,omitempty"` +} + +func (p *JSONProgress) String() string { + var ( + width = 200 + pbBox string + numbersBox string + timeLeftBox string + ) + + ws, err := term.GetWinsize(p.terminalFd) + if err == nil { + width = int(ws.Width) + } + + if p.Current <= 0 && p.Total <= 0 { + return "" + } + if p.Total <= 0 { + switch p.Units { + case "": + current := units.HumanSize(float64(p.Current)) + return fmt.Sprintf("%8v", current) + default: + return fmt.Sprintf("%d %s", p.Current, p.Units) + } + } + + percentage := int(float64(p.Current)/float64(p.Total)*100) / 2 + if percentage > 50 { + percentage = 50 + } + if width > 110 { + // this number can't be negative gh#7136 + numSpaces := 0 + if 50-percentage > 0 { + numSpaces = 50 - percentage + } + pbBox = fmt.Sprintf("[%s>%s] ", strings.Repeat("=", percentage), strings.Repeat(" ", numSpaces)) + } + + switch { + case p.HideCounts: + case p.Units == "": // no units, use bytes + current := units.HumanSize(float64(p.Current)) + total := units.HumanSize(float64(p.Total)) + + numbersBox = fmt.Sprintf("%8v/%v", current, total) + + if p.Current > p.Total { + // remove total display if the reported current is wonky. + numbersBox = fmt.Sprintf("%8v", current) + } + default: + numbersBox = fmt.Sprintf("%d/%d %s", p.Current, p.Total, p.Units) + + if p.Current > p.Total { + // remove total display if the reported current is wonky. + numbersBox = fmt.Sprintf("%d %s", p.Current, p.Units) + } + } + + if p.Current > 0 && p.Start > 0 && percentage < 50 { + fromStart := time.Now().UTC().Sub(time.Unix(p.Start, 0)) + perEntry := fromStart / time.Duration(p.Current) + left := time.Duration(p.Total-p.Current) * perEntry + left = (left / time.Second) * time.Second + + if width > 50 { + timeLeftBox = " " + left.String() + } + } + return pbBox + numbersBox + timeLeftBox +} + +// JSONMessage defines a message struct. It describes +// the created time, where it from, status, ID of the +// message. It's used for docker events. +type JSONMessage struct { + Stream string `json:"stream,omitempty"` + Status string `json:"status,omitempty"` + Progress *JSONProgress `json:"progressDetail,omitempty"` + ProgressMessage string `json:"progress,omitempty"` //deprecated + ID string `json:"id,omitempty"` + From string `json:"from,omitempty"` + Time int64 `json:"time,omitempty"` + TimeNano int64 `json:"timeNano,omitempty"` + Error *JSONError `json:"errorDetail,omitempty"` + ErrorMessage string `json:"error,omitempty"` //deprecated + // Aux contains out-of-band data, such as digests for push signing and image id after building. + Aux *json.RawMessage `json:"aux,omitempty"` +} + +/* Satisfied by gotty.TermInfo as well as noTermInfo from below */ +type termInfo interface { + Parse(attr string, params ...interface{}) (string, error) +} + +type noTermInfo struct{} // canary used when no terminfo. + +func (ti *noTermInfo) Parse(attr string, params ...interface{}) (string, error) { + return "", fmt.Errorf("noTermInfo") +} + +func clearLine(out io.Writer, ti termInfo) { + // el2 (clear whole line) is not exposed by terminfo. + + // First clear line from beginning to cursor + if attr, err := ti.Parse("el1"); err == nil { + fmt.Fprintf(out, "%s", attr) + } else { + fmt.Fprintf(out, "\x1b[1K") + } + // Then clear line from cursor to end + if attr, err := ti.Parse("el"); err == nil { + fmt.Fprintf(out, "%s", attr) + } else { + fmt.Fprintf(out, "\x1b[K") + } +} + +func cursorUp(out io.Writer, ti termInfo, l int) { + if l == 0 { // Should never be the case, but be tolerant + return + } + if attr, err := ti.Parse("cuu", l); err == nil { + fmt.Fprintf(out, "%s", attr) + } else { + fmt.Fprintf(out, "\x1b[%dA", l) + } +} + +func cursorDown(out io.Writer, ti termInfo, l int) { + if l == 0 { // Should never be the case, but be tolerant + return + } + if attr, err := ti.Parse("cud", l); err == nil { + fmt.Fprintf(out, "%s", attr) + } else { + fmt.Fprintf(out, "\x1b[%dB", l) + } +} + +// Display displays the JSONMessage to `out`. `termInfo` is non-nil if `out` +// is a terminal. If this is the case, it will erase the entire current line +// when displaying the progressbar. +func (jm *JSONMessage) Display(out io.Writer, termInfo termInfo) error { + if jm.Error != nil { + if jm.Error.Code == 401 { + return fmt.Errorf("Authentication is required.") + } + return jm.Error + } + var endl string + if termInfo != nil && jm.Stream == "" && jm.Progress != nil { + clearLine(out, termInfo) + endl = "\r" + fmt.Fprintf(out, endl) + } else if jm.Progress != nil && jm.Progress.String() != "" { //disable progressbar in non-terminal + return nil + } + if jm.TimeNano != 0 { + fmt.Fprintf(out, "%s ", time.Unix(0, jm.TimeNano).Format(jsonlog.RFC3339NanoFixed)) + } else if jm.Time != 0 { + fmt.Fprintf(out, "%s ", time.Unix(jm.Time, 0).Format(jsonlog.RFC3339NanoFixed)) + } + if jm.ID != "" { + fmt.Fprintf(out, "%s: ", jm.ID) + } + if jm.From != "" { + fmt.Fprintf(out, "(from %s) ", jm.From) + } + if jm.Progress != nil && termInfo != nil { + fmt.Fprintf(out, "%s %s%s", jm.Status, jm.Progress.String(), endl) + } else if jm.ProgressMessage != "" { //deprecated + fmt.Fprintf(out, "%s %s%s", jm.Status, jm.ProgressMessage, endl) + } else if jm.Stream != "" { + fmt.Fprintf(out, "%s%s", jm.Stream, endl) + } else { + fmt.Fprintf(out, "%s%s\n", jm.Status, endl) + } + return nil +} + +// DisplayJSONMessagesStream displays a json message stream from `in` to `out`, `isTerminal` +// describes if `out` is a terminal. If this is the case, it will print `\n` at the end of +// each line and move the cursor while displaying. +func DisplayJSONMessagesStream(in io.Reader, out io.Writer, terminalFd uintptr, isTerminal bool, auxCallback func(*json.RawMessage)) error { + var ( + dec = json.NewDecoder(in) + ids = make(map[string]int) + ) + + var termInfo termInfo + + if isTerminal { + term := os.Getenv("TERM") + if term == "" { + term = "vt102" + } + + var err error + if termInfo, err = gotty.OpenTermInfo(term); err != nil { + termInfo = &noTermInfo{} + } + } + + for { + diff := 0 + var jm JSONMessage + if err := dec.Decode(&jm); err != nil { + if err == io.EOF { + break + } + return err + } + + if jm.Aux != nil { + if auxCallback != nil { + auxCallback(jm.Aux) + } + continue + } + + if jm.Progress != nil { + jm.Progress.terminalFd = terminalFd + } + if jm.ID != "" && (jm.Progress != nil || jm.ProgressMessage != "") { + line, ok := ids[jm.ID] + if !ok { + // NOTE: This approach of using len(id) to + // figure out the number of lines of history + // only works as long as we clear the history + // when we output something that's not + // accounted for in the map, such as a line + // with no ID. + line = len(ids) + ids[jm.ID] = line + if termInfo != nil { + fmt.Fprintf(out, "\n") + } + } + diff = len(ids) - line + if termInfo != nil { + cursorUp(out, termInfo, diff) + } + } else { + // When outputting something that isn't progress + // output, clear the history of previous lines. We + // don't want progress entries from some previous + // operation to be updated (for example, pull -a + // with multiple tags). + ids = make(map[string]int) + } + err := jm.Display(out, termInfo) + if jm.ID != "" && termInfo != nil { + cursorDown(out, termInfo, diff) + } + if err != nil { + return err + } + } + return nil +} + +type stream interface { + io.Writer + FD() uintptr + IsTerminal() bool +} + +// DisplayJSONMessagesToStream prints json messages to the output stream +func DisplayJSONMessagesToStream(in io.Reader, stream stream, auxCallback func(*json.RawMessage)) error { + return DisplayJSONMessagesStream(in, stream, stream.FD(), stream.IsTerminal(), auxCallback) +} diff --git a/vendor/github.com/moby/moby/pkg/jsonmessage/jsonmessage_test.go b/vendor/github.com/moby/moby/pkg/jsonmessage/jsonmessage_test.go new file mode 100644 index 000000000..c3ed6c046 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/jsonmessage/jsonmessage_test.go @@ -0,0 +1,281 @@ +package jsonmessage + +import ( + "bytes" + "fmt" + "os" + "strings" + "testing" + "time" + + "github.com/docker/docker/pkg/jsonlog" + "github.com/docker/docker/pkg/term" +) + +func TestError(t *testing.T) { + je := JSONError{404, "Not found"} + if je.Error() != "Not found" { + t.Fatalf("Expected 'Not found' got '%s'", je.Error()) + } +} + +func TestProgress(t *testing.T) { + termsz, err := term.GetWinsize(0) + if err != nil { + // we can safely ignore the err here + termsz = nil + } + jp := JSONProgress{} + if jp.String() != "" { + t.Fatalf("Expected empty string, got '%s'", jp.String()) + } + + expected := " 1B" + jp2 := JSONProgress{Current: 1} + if jp2.String() != expected { + t.Fatalf("Expected %q, got %q", expected, jp2.String()) + } + + expectedStart := "[==========> ] 20B/100B" + if termsz != nil && termsz.Width <= 110 { + expectedStart = " 20B/100B" + } + jp3 := JSONProgress{Current: 20, Total: 100, Start: time.Now().Unix()} + // Just look at the start of the string + // (the remaining time is really hard to test -_-) + if jp3.String()[:len(expectedStart)] != expectedStart { + t.Fatalf("Expected to start with %q, got %q", expectedStart, jp3.String()) + } + + expected = "[=========================> ] 50B/100B" + if termsz != nil && termsz.Width <= 110 { + expected = " 50B/100B" + } + jp4 := JSONProgress{Current: 50, Total: 100} + if jp4.String() != expected { + t.Fatalf("Expected %q, got %q", expected, jp4.String()) + } + + // this number can't be negative gh#7136 + expected = "[==================================================>] 50B" + if termsz != nil && termsz.Width <= 110 { + expected = " 50B" + } + jp5 := JSONProgress{Current: 50, Total: 40} + if jp5.String() != expected { + t.Fatalf("Expected %q, got %q", expected, jp5.String()) + } + + expected = "[=========================> ] 50/100 units" + if termsz != nil && termsz.Width <= 110 { + expected = " 50/100 units" + } + jp6 := JSONProgress{Current: 50, Total: 100, Units: "units"} + if jp6.String() != expected { + t.Fatalf("Expected %q, got %q", expected, jp6.String()) + } + + // this number can't be negative + expected = "[==================================================>] 50 units" + if termsz != nil && termsz.Width <= 110 { + expected = " 50 units" + } + jp7 := JSONProgress{Current: 50, Total: 40, Units: "units"} + if jp7.String() != expected { + t.Fatalf("Expected %q, got %q", expected, jp7.String()) + } + + expected = "[=========================> ] " + if termsz != nil && termsz.Width <= 110 { + expected = "" + } + jp8 := JSONProgress{Current: 50, Total: 100, HideCounts: true} + if jp8.String() != expected { + t.Fatalf("Expected %q, got %q", expected, jp8.String()) + } +} + +func TestJSONMessageDisplay(t *testing.T) { + now := time.Now() + messages := map[JSONMessage][]string{ + // Empty + {}: {"\n", "\n"}, + // Status + { + Status: "status", + }: { + "status\n", + "status\n", + }, + // General + { + Time: now.Unix(), + ID: "ID", + From: "From", + Status: "status", + }: { + fmt.Sprintf("%v ID: (from From) status\n", time.Unix(now.Unix(), 0).Format(jsonlog.RFC3339NanoFixed)), + fmt.Sprintf("%v ID: (from From) status\n", time.Unix(now.Unix(), 0).Format(jsonlog.RFC3339NanoFixed)), + }, + // General, with nano precision time + { + TimeNano: now.UnixNano(), + ID: "ID", + From: "From", + Status: "status", + }: { + fmt.Sprintf("%v ID: (from From) status\n", time.Unix(0, now.UnixNano()).Format(jsonlog.RFC3339NanoFixed)), + fmt.Sprintf("%v ID: (from From) status\n", time.Unix(0, now.UnixNano()).Format(jsonlog.RFC3339NanoFixed)), + }, + // General, with both times Nano is preferred + { + Time: now.Unix(), + TimeNano: now.UnixNano(), + ID: "ID", + From: "From", + Status: "status", + }: { + fmt.Sprintf("%v ID: (from From) status\n", time.Unix(0, now.UnixNano()).Format(jsonlog.RFC3339NanoFixed)), + fmt.Sprintf("%v ID: (from From) status\n", time.Unix(0, now.UnixNano()).Format(jsonlog.RFC3339NanoFixed)), + }, + // Stream over status + { + Status: "status", + Stream: "stream", + }: { + "stream", + "stream", + }, + // With progress message + { + Status: "status", + ProgressMessage: "progressMessage", + }: { + "status progressMessage", + "status progressMessage", + }, + // With progress, stream empty + { + Status: "status", + Stream: "", + Progress: &JSONProgress{Current: 1}, + }: { + "", + fmt.Sprintf("%c[1K%c[K\rstatus 1B\r", 27, 27), + }, + } + + // The tests :) + for jsonMessage, expectedMessages := range messages { + // Without terminal + data := bytes.NewBuffer([]byte{}) + if err := jsonMessage.Display(data, nil); err != nil { + t.Fatal(err) + } + if data.String() != expectedMessages[0] { + t.Fatalf("Expected %q,got %q", expectedMessages[0], data.String()) + } + // With terminal + data = bytes.NewBuffer([]byte{}) + if err := jsonMessage.Display(data, &noTermInfo{}); err != nil { + t.Fatal(err) + } + if data.String() != expectedMessages[1] { + t.Fatalf("\nExpected %q\n got %q", expectedMessages[1], data.String()) + } + } +} + +// Test JSONMessage with an Error. It will return an error with the text as error, not the meaning of the HTTP code. +func TestJSONMessageDisplayWithJSONError(t *testing.T) { + data := bytes.NewBuffer([]byte{}) + jsonMessage := JSONMessage{Error: &JSONError{404, "Can't find it"}} + + err := jsonMessage.Display(data, &noTermInfo{}) + if err == nil || err.Error() != "Can't find it" { + t.Fatalf("Expected a JSONError 404, got %q", err) + } + + jsonMessage = JSONMessage{Error: &JSONError{401, "Anything"}} + err = jsonMessage.Display(data, &noTermInfo{}) + if err == nil || err.Error() != "Authentication is required." { + t.Fatalf("Expected an error \"Authentication is required.\", got %q", err) + } +} + +func TestDisplayJSONMessagesStreamInvalidJSON(t *testing.T) { + var ( + inFd uintptr + ) + data := bytes.NewBuffer([]byte{}) + reader := strings.NewReader("This is not a 'valid' JSON []") + inFd, _ = term.GetFdInfo(reader) + + if err := DisplayJSONMessagesStream(reader, data, inFd, false, nil); err == nil && err.Error()[:17] != "invalid character" { + t.Fatalf("Should have thrown an error (invalid character in ..), got %q", err) + } +} + +func TestDisplayJSONMessagesStream(t *testing.T) { + var ( + inFd uintptr + ) + + messages := map[string][]string{ + // empty string + "": { + "", + ""}, + // Without progress & ID + "{ \"status\": \"status\" }": { + "status\n", + "status\n", + }, + // Without progress, with ID + "{ \"id\": \"ID\",\"status\": \"status\" }": { + "ID: status\n", + fmt.Sprintf("ID: status\n"), + }, + // With progress + "{ \"id\": \"ID\", \"status\": \"status\", \"progress\": \"ProgressMessage\" }": { + "ID: status ProgressMessage", + fmt.Sprintf("\n%c[%dAID: status ProgressMessage%c[%dB", 27, 1, 27, 1), + }, + // With progressDetail + "{ \"id\": \"ID\", \"status\": \"status\", \"progressDetail\": { \"Current\": 1} }": { + "", // progressbar is disabled in non-terminal + fmt.Sprintf("\n%c[%dA%c[1K%c[K\rID: status 1B\r%c[%dB", 27, 1, 27, 27, 27, 1), + }, + } + + // Use $TERM which is unlikely to exist, forcing DisplayJSONMessageStream to + // (hopefully) use &noTermInfo. + origTerm := os.Getenv("TERM") + os.Setenv("TERM", "xyzzy-non-existent-terminfo") + + for jsonMessage, expectedMessages := range messages { + data := bytes.NewBuffer([]byte{}) + reader := strings.NewReader(jsonMessage) + inFd, _ = term.GetFdInfo(reader) + + // Without terminal + if err := DisplayJSONMessagesStream(reader, data, inFd, false, nil); err != nil { + t.Fatal(err) + } + if data.String() != expectedMessages[0] { + t.Fatalf("Expected an %q, got %q", expectedMessages[0], data.String()) + } + + // With terminal + data = bytes.NewBuffer([]byte{}) + reader = strings.NewReader(jsonMessage) + if err := DisplayJSONMessagesStream(reader, data, inFd, true, nil); err != nil { + t.Fatal(err) + } + if data.String() != expectedMessages[1] { + t.Fatalf("\nExpected %q\n got %q", expectedMessages[1], data.String()) + } + } + os.Setenv("TERM", origTerm) + +} diff --git a/vendor/github.com/moby/moby/pkg/listeners/group_unix.go b/vendor/github.com/moby/moby/pkg/listeners/group_unix.go new file mode 100644 index 000000000..e1d8774ca --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/listeners/group_unix.go @@ -0,0 +1,34 @@ +// +build !windows + +package listeners + +import ( + "fmt" + "strconv" + + "github.com/opencontainers/runc/libcontainer/user" + "github.com/pkg/errors" +) + +const defaultSocketGroup = "docker" + +func lookupGID(name string) (int, error) { + groupFile, err := user.GetGroupPath() + if err != nil { + return -1, errors.Wrap(err, "error looking up groups") + } + groups, err := user.ParseGroupFileFilter(groupFile, func(g user.Group) bool { + return g.Name == name || strconv.Itoa(g.Gid) == name + }) + if err != nil { + return -1, errors.Wrapf(err, "error parsing groups for %s", name) + } + if len(groups) > 0 { + return groups[0].Gid, nil + } + gid, err := strconv.Atoi(name) + if err == nil { + return gid, nil + } + return -1, fmt.Errorf("group %s not found", name) +} diff --git a/vendor/github.com/moby/moby/pkg/listeners/listeners_solaris.go b/vendor/github.com/moby/moby/pkg/listeners/listeners_solaris.go new file mode 100644 index 000000000..c9003bcf6 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/listeners/listeners_solaris.go @@ -0,0 +1,43 @@ +package listeners + +import ( + "crypto/tls" + "fmt" + "net" + "os" + + "github.com/Sirupsen/logrus" + "github.com/docker/go-connections/sockets" +) + +// Init creates new listeners for the server. +func Init(proto, addr, socketGroup string, tlsConfig *tls.Config) (ls []net.Listener, err error) { + switch proto { + case "tcp": + l, err := sockets.NewTCPSocket(addr, tlsConfig) + if err != nil { + return nil, err + } + ls = append(ls, l) + case "unix": + gid, err := lookupGID(socketGroup) + if err != nil { + if socketGroup != "" { + if socketGroup != defaultSocketGroup { + return nil, err + } + logrus.Warnf("could not change group %s to %s: %v", addr, defaultSocketGroup, err) + } + gid = os.Getgid() + } + l, err := sockets.NewUnixSocket(addr, gid) + if err != nil { + return nil, fmt.Errorf("can't create unix socket %s: %v", addr, err) + } + ls = append(ls, l) + default: + return nil, fmt.Errorf("Invalid protocol format: %q", proto) + } + + return +} diff --git a/vendor/github.com/moby/moby/pkg/listeners/listeners_unix.go b/vendor/github.com/moby/moby/pkg/listeners/listeners_unix.go new file mode 100644 index 000000000..25c98fba1 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/listeners/listeners_unix.go @@ -0,0 +1,104 @@ +// +build !windows,!solaris + +package listeners + +import ( + "crypto/tls" + "fmt" + "net" + "os" + "strconv" + + "github.com/Sirupsen/logrus" + "github.com/coreos/go-systemd/activation" + "github.com/docker/go-connections/sockets" +) + +// Init creates new listeners for the server. +// TODO: Clean up the fact that socketGroup and tlsConfig aren't always used. +func Init(proto, addr, socketGroup string, tlsConfig *tls.Config) ([]net.Listener, error) { + ls := []net.Listener{} + + switch proto { + case "fd": + fds, err := listenFD(addr, tlsConfig) + if err != nil { + return nil, err + } + ls = append(ls, fds...) + case "tcp": + l, err := sockets.NewTCPSocket(addr, tlsConfig) + if err != nil { + return nil, err + } + ls = append(ls, l) + case "unix": + gid, err := lookupGID(socketGroup) + if err != nil { + if socketGroup != "" { + if socketGroup != defaultSocketGroup { + return nil, err + } + logrus.Warnf("could not change group %s to %s: %v", addr, defaultSocketGroup, err) + } + gid = os.Getgid() + } + l, err := sockets.NewUnixSocket(addr, gid) + if err != nil { + return nil, fmt.Errorf("can't create unix socket %s: %v", addr, err) + } + ls = append(ls, l) + default: + return nil, fmt.Errorf("invalid protocol format: %q", proto) + } + + return ls, nil +} + +// listenFD returns the specified socket activated files as a slice of +// net.Listeners or all of the activated files if "*" is given. +func listenFD(addr string, tlsConfig *tls.Config) ([]net.Listener, error) { + var ( + err error + listeners []net.Listener + ) + // socket activation + if tlsConfig != nil { + listeners, err = activation.TLSListeners(false, tlsConfig) + } else { + listeners, err = activation.Listeners(false) + } + if err != nil { + return nil, err + } + + if len(listeners) == 0 { + return nil, fmt.Errorf("no sockets found via socket activation: make sure the service was started by systemd") + } + + // default to all fds just like unix:// and tcp:// + if addr == "" || addr == "*" { + return listeners, nil + } + + fdNum, err := strconv.Atoi(addr) + if err != nil { + return nil, fmt.Errorf("failed to parse systemd fd address: should be a number: %v", addr) + } + fdOffset := fdNum - 3 + if len(listeners) < int(fdOffset)+1 { + return nil, fmt.Errorf("too few socket activated files passed in by systemd") + } + if listeners[fdOffset] == nil { + return nil, fmt.Errorf("failed to listen on systemd activated file: fd %d", fdOffset+3) + } + for i, ls := range listeners { + if i == fdOffset || ls == nil { + continue + } + if err := ls.Close(); err != nil { + return nil, fmt.Errorf("failed to close systemd activated file: fd %d: %v", fdOffset+3, err) + } + } + return []net.Listener{listeners[fdOffset]}, nil +} diff --git a/vendor/github.com/moby/moby/pkg/listeners/listeners_windows.go b/vendor/github.com/moby/moby/pkg/listeners/listeners_windows.go new file mode 100644 index 000000000..5b5a470fc --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/listeners/listeners_windows.go @@ -0,0 +1,54 @@ +package listeners + +import ( + "crypto/tls" + "fmt" + "net" + "strings" + + "github.com/Microsoft/go-winio" + "github.com/docker/go-connections/sockets" +) + +// Init creates new listeners for the server. +func Init(proto, addr, socketGroup string, tlsConfig *tls.Config) ([]net.Listener, error) { + ls := []net.Listener{} + + switch proto { + case "tcp": + l, err := sockets.NewTCPSocket(addr, tlsConfig) + if err != nil { + return nil, err + } + ls = append(ls, l) + + case "npipe": + // allow Administrators and SYSTEM, plus whatever additional users or groups were specified + sddl := "D:P(A;;GA;;;BA)(A;;GA;;;SY)" + if socketGroup != "" { + for _, g := range strings.Split(socketGroup, ",") { + sid, err := winio.LookupSidByName(g) + if err != nil { + return nil, err + } + sddl += fmt.Sprintf("(A;;GRGW;;;%s)", sid) + } + } + c := winio.PipeConfig{ + SecurityDescriptor: sddl, + MessageMode: true, // Use message mode so that CloseWrite() is supported + InputBufferSize: 65536, // Use 64KB buffers to improve performance + OutputBufferSize: 65536, + } + l, err := winio.ListenPipe(addr, &c) + if err != nil { + return nil, err + } + ls = append(ls, l) + + default: + return nil, fmt.Errorf("invalid protocol format: windows only supports tcp and npipe") + } + + return ls, nil +} diff --git a/vendor/github.com/moby/moby/pkg/locker/README.md b/vendor/github.com/moby/moby/pkg/locker/README.md new file mode 100644 index 000000000..c8dbddc57 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/locker/README.md @@ -0,0 +1,65 @@ +Locker +===== + +locker provides a mechanism for creating finer-grained locking to help +free up more global locks to handle other tasks. + +The implementation looks close to a sync.Mutex, however, the user must provide a +reference to use to refer to the underlying lock when locking and unlocking, +and unlock may generate an error. + +If a lock with a given name does not exist when `Lock` is called, one is +created. +Lock references are automatically cleaned up on `Unlock` if nothing else is +waiting for the lock. + + +## Usage + +```go +package important + +import ( + "sync" + "time" + + "github.com/docker/docker/pkg/locker" +) + +type important struct { + locks *locker.Locker + data map[string]interface{} + mu sync.Mutex +} + +func (i *important) Get(name string) interface{} { + i.locks.Lock(name) + defer i.locks.Unlock(name) + return data[name] +} + +func (i *important) Create(name string, data interface{}) { + i.locks.Lock(name) + defer i.locks.Unlock(name) + + i.createImportant(data) + + s.mu.Lock() + i.data[name] = data + s.mu.Unlock() +} + +func (i *important) createImportant(data interface{}) { + time.Sleep(10 * time.Second) +} +``` + +For functions dealing with a given name, always lock at the beginning of the +function (or before doing anything with the underlying state), this ensures any +other function that is dealing with the same name will block. + +When needing to modify the underlying data, use the global lock to ensure nothing +else is modifying it at the same time. +Since name lock is already in place, no reads will occur while the modification +is being performed. + diff --git a/vendor/github.com/moby/moby/pkg/locker/locker.go b/vendor/github.com/moby/moby/pkg/locker/locker.go new file mode 100644 index 000000000..0b22ddfab --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/locker/locker.go @@ -0,0 +1,112 @@ +/* +Package locker provides a mechanism for creating finer-grained locking to help +free up more global locks to handle other tasks. + +The implementation looks close to a sync.Mutex, however the user must provide a +reference to use to refer to the underlying lock when locking and unlocking, +and unlock may generate an error. + +If a lock with a given name does not exist when `Lock` is called, one is +created. +Lock references are automatically cleaned up on `Unlock` if nothing else is +waiting for the lock. +*/ +package locker + +import ( + "errors" + "sync" + "sync/atomic" +) + +// ErrNoSuchLock is returned when the requested lock does not exist +var ErrNoSuchLock = errors.New("no such lock") + +// Locker provides a locking mechanism based on the passed in reference name +type Locker struct { + mu sync.Mutex + locks map[string]*lockCtr +} + +// lockCtr is used by Locker to represent a lock with a given name. +type lockCtr struct { + mu sync.Mutex + // waiters is the number of waiters waiting to acquire the lock + // this is int32 instead of uint32 so we can add `-1` in `dec()` + waiters int32 +} + +// inc increments the number of waiters waiting for the lock +func (l *lockCtr) inc() { + atomic.AddInt32(&l.waiters, 1) +} + +// dec decrements the number of waiters waiting on the lock +func (l *lockCtr) dec() { + atomic.AddInt32(&l.waiters, -1) +} + +// count gets the current number of waiters +func (l *lockCtr) count() int32 { + return atomic.LoadInt32(&l.waiters) +} + +// Lock locks the mutex +func (l *lockCtr) Lock() { + l.mu.Lock() +} + +// Unlock unlocks the mutex +func (l *lockCtr) Unlock() { + l.mu.Unlock() +} + +// New creates a new Locker +func New() *Locker { + return &Locker{ + locks: make(map[string]*lockCtr), + } +} + +// Lock locks a mutex with the given name. If it doesn't exist, one is created +func (l *Locker) Lock(name string) { + l.mu.Lock() + if l.locks == nil { + l.locks = make(map[string]*lockCtr) + } + + nameLock, exists := l.locks[name] + if !exists { + nameLock = &lockCtr{} + l.locks[name] = nameLock + } + + // increment the nameLock waiters while inside the main mutex + // this makes sure that the lock isn't deleted if `Lock` and `Unlock` are called concurrently + nameLock.inc() + l.mu.Unlock() + + // Lock the nameLock outside the main mutex so we don't block other operations + // once locked then we can decrement the number of waiters for this lock + nameLock.Lock() + nameLock.dec() +} + +// Unlock unlocks the mutex with the given name +// If the given lock is not being waited on by any other callers, it is deleted +func (l *Locker) Unlock(name string) error { + l.mu.Lock() + nameLock, exists := l.locks[name] + if !exists { + l.mu.Unlock() + return ErrNoSuchLock + } + + if nameLock.count() == 0 { + delete(l.locks, name) + } + nameLock.Unlock() + + l.mu.Unlock() + return nil +} diff --git a/vendor/github.com/moby/moby/pkg/locker/locker_test.go b/vendor/github.com/moby/moby/pkg/locker/locker_test.go new file mode 100644 index 000000000..5a297dd47 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/locker/locker_test.go @@ -0,0 +1,124 @@ +package locker + +import ( + "sync" + "testing" + "time" +) + +func TestLockCounter(t *testing.T) { + l := &lockCtr{} + l.inc() + + if l.waiters != 1 { + t.Fatal("counter inc failed") + } + + l.dec() + if l.waiters != 0 { + t.Fatal("counter dec failed") + } +} + +func TestLockerLock(t *testing.T) { + l := New() + l.Lock("test") + ctr := l.locks["test"] + + if ctr.count() != 0 { + t.Fatalf("expected waiters to be 0, got :%d", ctr.waiters) + } + + chDone := make(chan struct{}) + go func() { + l.Lock("test") + close(chDone) + }() + + chWaiting := make(chan struct{}) + go func() { + for range time.Tick(1 * time.Millisecond) { + if ctr.count() == 1 { + close(chWaiting) + break + } + } + }() + + select { + case <-chWaiting: + case <-time.After(3 * time.Second): + t.Fatal("timed out waiting for lock waiters to be incremented") + } + + select { + case <-chDone: + t.Fatal("lock should not have returned while it was still held") + default: + } + + if err := l.Unlock("test"); err != nil { + t.Fatal(err) + } + + select { + case <-chDone: + case <-time.After(3 * time.Second): + t.Fatalf("lock should have completed") + } + + if ctr.count() != 0 { + t.Fatalf("expected waiters to be 0, got: %d", ctr.count()) + } +} + +func TestLockerUnlock(t *testing.T) { + l := New() + + l.Lock("test") + l.Unlock("test") + + chDone := make(chan struct{}) + go func() { + l.Lock("test") + close(chDone) + }() + + select { + case <-chDone: + case <-time.After(3 * time.Second): + t.Fatalf("lock should not be blocked") + } +} + +func TestLockerConcurrency(t *testing.T) { + l := New() + + var wg sync.WaitGroup + for i := 0; i <= 10000; i++ { + wg.Add(1) + go func() { + l.Lock("test") + // if there is a concurrency issue, will very likely panic here + l.Unlock("test") + wg.Done() + }() + } + + chDone := make(chan struct{}) + go func() { + wg.Wait() + close(chDone) + }() + + select { + case <-chDone: + case <-time.After(10 * time.Second): + t.Fatal("timeout waiting for locks to complete") + } + + // Since everything has unlocked this should not exist anymore + if ctr, exists := l.locks["test"]; exists { + t.Fatalf("lock should not exist: %v", ctr) + } +} diff --git a/vendor/github.com/moby/moby/pkg/longpath/longpath.go b/vendor/github.com/moby/moby/pkg/longpath/longpath.go new file mode 100644 index 000000000..9b15bfff4 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/longpath/longpath.go @@ -0,0 +1,26 @@ +// longpath introduces some constants and helper functions for handling long paths +// in Windows, which are expected to be prepended with `\\?\` and followed by either +// a drive letter, a UNC server\share, or a volume identifier. + +package longpath + +import ( + "strings" +) + +// Prefix is the longpath prefix for Windows file paths. +const Prefix = `\\?\` + +// AddPrefix will add the Windows long path prefix to the path provided if +// it does not already have it. +func AddPrefix(path string) string { + if !strings.HasPrefix(path, Prefix) { + if strings.HasPrefix(path, `\\`) { + // This is a UNC path, so we need to add 'UNC' to the path as well. + path = Prefix + `UNC` + path[1:] + } else { + path = Prefix + path + } + } + return path +} diff --git a/vendor/github.com/moby/moby/pkg/longpath/longpath_test.go b/vendor/github.com/moby/moby/pkg/longpath/longpath_test.go new file mode 100644 index 000000000..01865eff0 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/longpath/longpath_test.go @@ -0,0 +1,22 @@ +package longpath + +import ( + "strings" + "testing" +) + +func TestStandardLongPath(t *testing.T) { + c := `C:\simple\path` + longC := AddPrefix(c) + if !strings.EqualFold(longC, `\\?\C:\simple\path`) { + t.Errorf("Wrong long path returned. Original = %s ; Long = %s", c, longC) + } +} + +func TestUNCLongPath(t *testing.T) { + c := `\\server\share\path` + longC := AddPrefix(c) + if !strings.EqualFold(longC, `\\?\UNC\server\share\path`) { + t.Errorf("Wrong UNC long path returned. Original = %s ; Long = %s", c, longC) + } +} diff --git a/vendor/github.com/moby/moby/pkg/loopback/attach_loopback.go b/vendor/github.com/moby/moby/pkg/loopback/attach_loopback.go new file mode 100644 index 000000000..6ea9a309d --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/loopback/attach_loopback.go @@ -0,0 +1,137 @@ +// +build linux,cgo + +package loopback + +import ( + "errors" + "fmt" + "os" + + "github.com/Sirupsen/logrus" + "golang.org/x/sys/unix" +) + +// Loopback related errors +var ( + ErrAttachLoopbackDevice = errors.New("loopback attach failed") + ErrGetLoopbackBackingFile = errors.New("Unable to get loopback backing file") + ErrSetCapacity = errors.New("Unable set loopback capacity") +) + +func stringToLoopName(src string) [LoNameSize]uint8 { + var dst [LoNameSize]uint8 + copy(dst[:], src[:]) + return dst +} + +func getNextFreeLoopbackIndex() (int, error) { + f, err := os.OpenFile("/dev/loop-control", os.O_RDONLY, 0644) + if err != nil { + return 0, err + } + defer f.Close() + + index, err := ioctlLoopCtlGetFree(f.Fd()) + if index < 0 { + index = 0 + } + return index, err +} + +func openNextAvailableLoopback(index int, sparseFile *os.File) (loopFile *os.File, err error) { + // Start looking for a free /dev/loop + for { + target := fmt.Sprintf("/dev/loop%d", index) + index++ + + fi, err := os.Stat(target) + if err != nil { + if os.IsNotExist(err) { + logrus.Error("There are no more loopback devices available.") + } + return nil, ErrAttachLoopbackDevice + } + + if fi.Mode()&os.ModeDevice != os.ModeDevice { + logrus.Errorf("Loopback device %s is not a block device.", target) + continue + } + + // OpenFile adds O_CLOEXEC + loopFile, err = os.OpenFile(target, os.O_RDWR, 0644) + if err != nil { + logrus.Errorf("Error opening loopback device: %s", err) + return nil, ErrAttachLoopbackDevice + } + + // Try to attach to the loop file + if err := ioctlLoopSetFd(loopFile.Fd(), sparseFile.Fd()); err != nil { + loopFile.Close() + + // If the error is EBUSY, then try the next loopback + if err != unix.EBUSY { + logrus.Errorf("Cannot set up loopback device %s: %s", target, err) + return nil, ErrAttachLoopbackDevice + } + + // Otherwise, we keep going with the loop + continue + } + // In case of success, we finished. Break the loop. + break + } + + // This can't happen, but let's be sure + if loopFile == nil { + logrus.Errorf("Unreachable code reached! Error attaching %s to a loopback device.", sparseFile.Name()) + return nil, ErrAttachLoopbackDevice + } + + return loopFile, nil +} + +// AttachLoopDevice attaches the given sparse file to the next +// available loopback device. It returns an opened *os.File. +func AttachLoopDevice(sparseName string) (loop *os.File, err error) { + + // Try to retrieve the next available loopback device via syscall. + // If it fails, we discard error and start looping for a + // loopback from index 0. + startIndex, err := getNextFreeLoopbackIndex() + if err != nil { + logrus.Debugf("Error retrieving the next available loopback: %s", err) + } + + // OpenFile adds O_CLOEXEC + sparseFile, err := os.OpenFile(sparseName, os.O_RDWR, 0644) + if err != nil { + logrus.Errorf("Error opening sparse file %s: %s", sparseName, err) + return nil, ErrAttachLoopbackDevice + } + defer sparseFile.Close() + + loopFile, err := openNextAvailableLoopback(startIndex, sparseFile) + if err != nil { + return nil, err + } + + // Set the status of the loopback device + loopInfo := &loopInfo64{ + loFileName: stringToLoopName(loopFile.Name()), + loOffset: 0, + loFlags: LoFlagsAutoClear, + } + + if err := ioctlLoopSetStatus64(loopFile.Fd(), loopInfo); err != nil { + logrus.Errorf("Cannot set up loopback device info: %s", err) + + // If the call failed, then free the loopback device + if err := ioctlLoopClrFd(loopFile.Fd()); err != nil { + logrus.Error("Error while cleaning up the loopback device") + } + loopFile.Close() + return nil, ErrAttachLoopbackDevice + } + + return loopFile, nil +} diff --git a/vendor/github.com/moby/moby/pkg/loopback/ioctl.go b/vendor/github.com/moby/moby/pkg/loopback/ioctl.go new file mode 100644 index 000000000..fa744f0a6 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/loopback/ioctl.go @@ -0,0 +1,54 @@ +// +build linux,cgo + +package loopback + +import ( + "unsafe" + + "golang.org/x/sys/unix" +) + +func ioctlLoopCtlGetFree(fd uintptr) (int, error) { + index, err := unix.IoctlGetInt(int(fd), LoopCtlGetFree) + if err != nil { + return 0, err + } + return index, nil +} + +func ioctlLoopSetFd(loopFd, sparseFd uintptr) error { + if err := unix.IoctlSetInt(int(loopFd), LoopSetFd, int(sparseFd)); err != nil { + return err + } + return nil +} + +func ioctlLoopSetStatus64(loopFd uintptr, loopInfo *loopInfo64) error { + if _, _, err := unix.Syscall(unix.SYS_IOCTL, loopFd, LoopSetStatus64, uintptr(unsafe.Pointer(loopInfo))); err != 0 { + return err + } + return nil +} + +func ioctlLoopClrFd(loopFd uintptr) error { + if _, _, err := unix.Syscall(unix.SYS_IOCTL, loopFd, LoopClrFd, 0); err != 0 { + return err + } + return nil +} + +func ioctlLoopGetStatus64(loopFd uintptr) (*loopInfo64, error) { + loopInfo := &loopInfo64{} + + if _, _, err := unix.Syscall(unix.SYS_IOCTL, loopFd, LoopGetStatus64, uintptr(unsafe.Pointer(loopInfo))); err != 0 { + return nil, err + } + return loopInfo, nil +} + +func ioctlLoopSetCapacity(loopFd uintptr, value int) error { + if err := unix.IoctlSetInt(int(loopFd), LoopSetCapacity, value); err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/moby/moby/pkg/loopback/loop_wrapper.go b/vendor/github.com/moby/moby/pkg/loopback/loop_wrapper.go new file mode 100644 index 000000000..a50de7f07 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/loopback/loop_wrapper.go @@ -0,0 +1,52 @@ +// +build linux,cgo + +package loopback + +/* +#include // FIXME: present only for defines, maybe we can remove it? + +#ifndef LOOP_CTL_GET_FREE + #define LOOP_CTL_GET_FREE 0x4C82 +#endif + +#ifndef LO_FLAGS_PARTSCAN + #define LO_FLAGS_PARTSCAN 8 +#endif + +*/ +import "C" + +type loopInfo64 struct { + loDevice uint64 /* ioctl r/o */ + loInode uint64 /* ioctl r/o */ + loRdevice uint64 /* ioctl r/o */ + loOffset uint64 + loSizelimit uint64 /* bytes, 0 == max available */ + loNumber uint32 /* ioctl r/o */ + loEncryptType uint32 + loEncryptKeySize uint32 /* ioctl w/o */ + loFlags uint32 /* ioctl r/o */ + loFileName [LoNameSize]uint8 + loCryptName [LoNameSize]uint8 + loEncryptKey [LoKeySize]uint8 /* ioctl w/o */ + loInit [2]uint64 +} + +// IOCTL consts +const ( + LoopSetFd = C.LOOP_SET_FD + LoopCtlGetFree = C.LOOP_CTL_GET_FREE + LoopGetStatus64 = C.LOOP_GET_STATUS64 + LoopSetStatus64 = C.LOOP_SET_STATUS64 + LoopClrFd = C.LOOP_CLR_FD + LoopSetCapacity = C.LOOP_SET_CAPACITY +) + +// LOOP consts. +const ( + LoFlagsAutoClear = C.LO_FLAGS_AUTOCLEAR + LoFlagsReadOnly = C.LO_FLAGS_READ_ONLY + LoFlagsPartScan = C.LO_FLAGS_PARTSCAN + LoKeySize = C.LO_KEY_SIZE + LoNameSize = C.LO_NAME_SIZE +) diff --git a/vendor/github.com/moby/moby/pkg/loopback/loopback.go b/vendor/github.com/moby/moby/pkg/loopback/loopback.go new file mode 100644 index 000000000..c2d91da6f --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/loopback/loopback.go @@ -0,0 +1,63 @@ +// +build linux,cgo + +package loopback + +import ( + "fmt" + "os" + "syscall" + + "github.com/Sirupsen/logrus" +) + +func getLoopbackBackingFile(file *os.File) (uint64, uint64, error) { + loopInfo, err := ioctlLoopGetStatus64(file.Fd()) + if err != nil { + logrus.Errorf("Error get loopback backing file: %s", err) + return 0, 0, ErrGetLoopbackBackingFile + } + return loopInfo.loDevice, loopInfo.loInode, nil +} + +// SetCapacity reloads the size for the loopback device. +func SetCapacity(file *os.File) error { + if err := ioctlLoopSetCapacity(file.Fd(), 0); err != nil { + logrus.Errorf("Error loopbackSetCapacity: %s", err) + return ErrSetCapacity + } + return nil +} + +// FindLoopDeviceFor returns a loopback device file for the specified file which +// is backing file of a loop back device. +func FindLoopDeviceFor(file *os.File) *os.File { + stat, err := file.Stat() + if err != nil { + return nil + } + targetInode := stat.Sys().(*syscall.Stat_t).Ino + targetDevice := stat.Sys().(*syscall.Stat_t).Dev + + for i := 0; true; i++ { + path := fmt.Sprintf("/dev/loop%d", i) + + file, err := os.OpenFile(path, os.O_RDWR, 0) + if err != nil { + if os.IsNotExist(err) { + return nil + } + + // Ignore all errors until the first not-exist + // we want to continue looking for the file + continue + } + + dev, inode, err := getLoopbackBackingFile(file) + if err == nil && dev == targetDevice && inode == targetInode { + return file + } + file.Close() + } + + return nil +} diff --git a/vendor/github.com/moby/moby/pkg/mount/flags.go b/vendor/github.com/moby/moby/pkg/mount/flags.go new file mode 100644 index 000000000..607dbed43 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/mount/flags.go @@ -0,0 +1,149 @@ +package mount + +import ( + "fmt" + "strings" +) + +var flags = map[string]struct { + clear bool + flag int +}{ + "defaults": {false, 0}, + "ro": {false, RDONLY}, + "rw": {true, RDONLY}, + "suid": {true, NOSUID}, + "nosuid": {false, NOSUID}, + "dev": {true, NODEV}, + "nodev": {false, NODEV}, + "exec": {true, NOEXEC}, + "noexec": {false, NOEXEC}, + "sync": {false, SYNCHRONOUS}, + "async": {true, SYNCHRONOUS}, + "dirsync": {false, DIRSYNC}, + "remount": {false, REMOUNT}, + "mand": {false, MANDLOCK}, + "nomand": {true, MANDLOCK}, + "atime": {true, NOATIME}, + "noatime": {false, NOATIME}, + "diratime": {true, NODIRATIME}, + "nodiratime": {false, NODIRATIME}, + "bind": {false, BIND}, + "rbind": {false, RBIND}, + "unbindable": {false, UNBINDABLE}, + "runbindable": {false, RUNBINDABLE}, + "private": {false, PRIVATE}, + "rprivate": {false, RPRIVATE}, + "shared": {false, SHARED}, + "rshared": {false, RSHARED}, + "slave": {false, SLAVE}, + "rslave": {false, RSLAVE}, + "relatime": {false, RELATIME}, + "norelatime": {true, RELATIME}, + "strictatime": {false, STRICTATIME}, + "nostrictatime": {true, STRICTATIME}, +} + +var validFlags = map[string]bool{ + "": true, + "size": true, + "mode": true, + "uid": true, + "gid": true, + "nr_inodes": true, + "nr_blocks": true, + "mpol": true, +} + +var propagationFlags = map[string]bool{ + "bind": true, + "rbind": true, + "unbindable": true, + "runbindable": true, + "private": true, + "rprivate": true, + "shared": true, + "rshared": true, + "slave": true, + "rslave": true, +} + +// MergeTmpfsOptions merge mount options to make sure there is no duplicate. +func MergeTmpfsOptions(options []string) ([]string, error) { + // We use collisions maps to remove duplicates. + // For flag, the key is the flag value (the key for propagation flag is -1) + // For data=value, the key is the data + flagCollisions := map[int]bool{} + dataCollisions := map[string]bool{} + + var newOptions []string + // We process in reverse order + for i := len(options) - 1; i >= 0; i-- { + option := options[i] + if option == "defaults" { + continue + } + if f, ok := flags[option]; ok && f.flag != 0 { + // There is only one propagation mode + key := f.flag + if propagationFlags[option] { + key = -1 + } + // Check to see if there is collision for flag + if !flagCollisions[key] { + // We prepend the option and add to collision map + newOptions = append([]string{option}, newOptions...) + flagCollisions[key] = true + } + continue + } + opt := strings.SplitN(option, "=", 2) + if len(opt) != 2 || !validFlags[opt[0]] { + return nil, fmt.Errorf("Invalid tmpfs option %q", opt) + } + if !dataCollisions[opt[0]] { + // We prepend the option and add to collision map + newOptions = append([]string{option}, newOptions...) + dataCollisions[opt[0]] = true + } + } + + return newOptions, nil +} + +// Parse fstab type mount options into mount() flags +// and device specific data +func parseOptions(options string) (int, string) { + var ( + flag int + data []string + ) + + for _, o := range strings.Split(options, ",") { + // If the option does not exist in the flags table or the flag + // is not supported on the platform, + // then it is a data value for a specific fs type + if f, exists := flags[o]; exists && f.flag != 0 { + if f.clear { + flag &= ^f.flag + } else { + flag |= f.flag + } + } else { + data = append(data, o) + } + } + return flag, strings.Join(data, ",") +} + +// ParseTmpfsOptions parse fstab type mount options into flags and data +func ParseTmpfsOptions(options string) (int, string, error) { + flags, data := parseOptions(options) + for _, o := range strings.Split(data, ",") { + opt := strings.SplitN(o, "=", 2) + if !validFlags[opt[0]] { + return 0, "", fmt.Errorf("Invalid tmpfs option %q", opt) + } + } + return flags, data, nil +} diff --git a/vendor/github.com/moby/moby/pkg/mount/flags_freebsd.go b/vendor/github.com/moby/moby/pkg/mount/flags_freebsd.go new file mode 100644 index 000000000..5f76f331b --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/mount/flags_freebsd.go @@ -0,0 +1,49 @@ +// +build freebsd,cgo + +package mount + +/* +#include +*/ +import "C" + +const ( + // RDONLY will mount the filesystem as read-only. + RDONLY = C.MNT_RDONLY + + // NOSUID will not allow set-user-identifier or set-group-identifier bits to + // take effect. + NOSUID = C.MNT_NOSUID + + // NOEXEC will not allow execution of any binaries on the mounted file system. + NOEXEC = C.MNT_NOEXEC + + // SYNCHRONOUS will allow any I/O to the file system to be done synchronously. + SYNCHRONOUS = C.MNT_SYNCHRONOUS + + // NOATIME will not update the file access time when reading from a file. + NOATIME = C.MNT_NOATIME +) + +// These flags are unsupported. +const ( + BIND = 0 + DIRSYNC = 0 + MANDLOCK = 0 + NODEV = 0 + NODIRATIME = 0 + UNBINDABLE = 0 + RUNBINDABLE = 0 + PRIVATE = 0 + RPRIVATE = 0 + SHARED = 0 + RSHARED = 0 + SLAVE = 0 + RSLAVE = 0 + RBIND = 0 + RELATIVE = 0 + RELATIME = 0 + REMOUNT = 0 + STRICTATIME = 0 + mntDetach = 0 +) diff --git a/vendor/github.com/moby/moby/pkg/mount/flags_linux.go b/vendor/github.com/moby/moby/pkg/mount/flags_linux.go new file mode 100644 index 000000000..0425d0dd6 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/mount/flags_linux.go @@ -0,0 +1,87 @@ +package mount + +import ( + "golang.org/x/sys/unix" +) + +const ( + // RDONLY will mount the file system read-only. + RDONLY = unix.MS_RDONLY + + // NOSUID will not allow set-user-identifier or set-group-identifier bits to + // take effect. + NOSUID = unix.MS_NOSUID + + // NODEV will not interpret character or block special devices on the file + // system. + NODEV = unix.MS_NODEV + + // NOEXEC will not allow execution of any binaries on the mounted file system. + NOEXEC = unix.MS_NOEXEC + + // SYNCHRONOUS will allow I/O to the file system to be done synchronously. + SYNCHRONOUS = unix.MS_SYNCHRONOUS + + // DIRSYNC will force all directory updates within the file system to be done + // synchronously. This affects the following system calls: create, link, + // unlink, symlink, mkdir, rmdir, mknod and rename. + DIRSYNC = unix.MS_DIRSYNC + + // REMOUNT will attempt to remount an already-mounted file system. This is + // commonly used to change the mount flags for a file system, especially to + // make a readonly file system writeable. It does not change device or mount + // point. + REMOUNT = unix.MS_REMOUNT + + // MANDLOCK will force mandatory locks on a filesystem. + MANDLOCK = unix.MS_MANDLOCK + + // NOATIME will not update the file access time when reading from a file. + NOATIME = unix.MS_NOATIME + + // NODIRATIME will not update the directory access time. + NODIRATIME = unix.MS_NODIRATIME + + // BIND remounts a subtree somewhere else. + BIND = unix.MS_BIND + + // RBIND remounts a subtree and all possible submounts somewhere else. + RBIND = unix.MS_BIND | unix.MS_REC + + // UNBINDABLE creates a mount which cannot be cloned through a bind operation. + UNBINDABLE = unix.MS_UNBINDABLE + + // RUNBINDABLE marks the entire mount tree as UNBINDABLE. + RUNBINDABLE = unix.MS_UNBINDABLE | unix.MS_REC + + // PRIVATE creates a mount which carries no propagation abilities. + PRIVATE = unix.MS_PRIVATE + + // RPRIVATE marks the entire mount tree as PRIVATE. + RPRIVATE = unix.MS_PRIVATE | unix.MS_REC + + // SLAVE creates a mount which receives propagation from its master, but not + // vice versa. + SLAVE = unix.MS_SLAVE + + // RSLAVE marks the entire mount tree as SLAVE. + RSLAVE = unix.MS_SLAVE | unix.MS_REC + + // SHARED creates a mount which provides the ability to create mirrors of + // that mount such that mounts and unmounts within any of the mirrors + // propagate to the other mirrors. + SHARED = unix.MS_SHARED + + // RSHARED marks the entire mount tree as SHARED. + RSHARED = unix.MS_SHARED | unix.MS_REC + + // RELATIME updates inode access times relative to modify or change time. + RELATIME = unix.MS_RELATIME + + // STRICTATIME allows to explicitly request full atime updates. This makes + // it possible for the kernel to default to relatime or noatime but still + // allow userspace to override it. + STRICTATIME = unix.MS_STRICTATIME + + mntDetach = unix.MNT_DETACH +) diff --git a/vendor/github.com/moby/moby/pkg/mount/flags_unsupported.go b/vendor/github.com/moby/moby/pkg/mount/flags_unsupported.go new file mode 100644 index 000000000..9ed741e3f --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/mount/flags_unsupported.go @@ -0,0 +1,31 @@ +// +build !linux,!freebsd freebsd,!cgo solaris,!cgo + +package mount + +// These flags are unsupported. +const ( + BIND = 0 + DIRSYNC = 0 + MANDLOCK = 0 + NOATIME = 0 + NODEV = 0 + NODIRATIME = 0 + NOEXEC = 0 + NOSUID = 0 + UNBINDABLE = 0 + RUNBINDABLE = 0 + PRIVATE = 0 + RPRIVATE = 0 + SHARED = 0 + RSHARED = 0 + SLAVE = 0 + RSLAVE = 0 + RBIND = 0 + RELATIME = 0 + RELATIVE = 0 + REMOUNT = 0 + STRICTATIME = 0 + SYNCHRONOUS = 0 + RDONLY = 0 + mntDetach = 0 +) diff --git a/vendor/github.com/moby/moby/pkg/mount/mount.go b/vendor/github.com/moby/moby/pkg/mount/mount.go new file mode 100644 index 000000000..c9fdfd694 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/mount/mount.go @@ -0,0 +1,86 @@ +package mount + +import ( + "sort" + "strings" +) + +// GetMounts retrieves a list of mounts for the current running process. +func GetMounts() ([]*Info, error) { + return parseMountTable() +} + +// Mounted determines if a specified mountpoint has been mounted. +// On Linux it looks at /proc/self/mountinfo and on Solaris at mnttab. +func Mounted(mountpoint string) (bool, error) { + entries, err := parseMountTable() + if err != nil { + return false, err + } + + // Search the table for the mountpoint + for _, e := range entries { + if e.Mountpoint == mountpoint { + return true, nil + } + } + return false, nil +} + +// Mount will mount filesystem according to the specified configuration, on the +// condition that the target path is *not* already mounted. Options must be +// specified like the mount or fstab unix commands: "opt1=val1,opt2=val2". See +// flags.go for supported option flags. +func Mount(device, target, mType, options string) error { + flag, _ := parseOptions(options) + if flag&REMOUNT != REMOUNT { + if mounted, err := Mounted(target); err != nil || mounted { + return err + } + } + return ForceMount(device, target, mType, options) +} + +// ForceMount will mount a filesystem according to the specified configuration, +// *regardless* if the target path is not already mounted. Options must be +// specified like the mount or fstab unix commands: "opt1=val1,opt2=val2". See +// flags.go for supported option flags. +func ForceMount(device, target, mType, options string) error { + flag, data := parseOptions(options) + return mount(device, target, mType, uintptr(flag), data) +} + +// Unmount lazily unmounts a filesystem on supported platforms, otherwise +// does a normal unmount. +func Unmount(target string) error { + if mounted, err := Mounted(target); err != nil || !mounted { + return err + } + return unmount(target, mntDetach) +} + +// RecursiveUnmount unmounts the target and all mounts underneath, starting with +// the deepsest mount first. +func RecursiveUnmount(target string) error { + mounts, err := GetMounts() + if err != nil { + return err + } + + // Make the deepest mount be first + sort.Sort(sort.Reverse(byMountpoint(mounts))) + + for i, m := range mounts { + if !strings.HasPrefix(m.Mountpoint, target) { + continue + } + if err := Unmount(m.Mountpoint); err != nil && i == len(mounts)-1 { + if mounted, err := Mounted(m.Mountpoint); err != nil || mounted { + return err + } + // Ignore errors for submounts and continue trying to unmount others + // The final unmount should fail if there ane any submounts remaining + } + } + return nil +} diff --git a/vendor/github.com/moby/moby/pkg/mount/mount_unix_test.go b/vendor/github.com/moby/moby/pkg/mount/mount_unix_test.go new file mode 100644 index 000000000..253aff3b8 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/mount/mount_unix_test.go @@ -0,0 +1,162 @@ +// +build !windows,!solaris + +package mount + +import ( + "os" + "path" + "testing" +) + +func TestMountOptionsParsing(t *testing.T) { + options := "noatime,ro,size=10k" + + flag, data := parseOptions(options) + + if data != "size=10k" { + t.Fatalf("Expected size=10 got %s", data) + } + + expectedFlag := NOATIME | RDONLY + + if flag != expectedFlag { + t.Fatalf("Expected %d got %d", expectedFlag, flag) + } +} + +func TestMounted(t *testing.T) { + tmp := path.Join(os.TempDir(), "mount-tests") + if err := os.MkdirAll(tmp, 0777); err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmp) + + var ( + sourceDir = path.Join(tmp, "source") + targetDir = path.Join(tmp, "target") + sourcePath = path.Join(sourceDir, "file.txt") + targetPath = path.Join(targetDir, "file.txt") + ) + + os.Mkdir(sourceDir, 0777) + os.Mkdir(targetDir, 0777) + + f, err := os.Create(sourcePath) + if err != nil { + t.Fatal(err) + } + f.WriteString("hello") + f.Close() + + f, err = os.Create(targetPath) + if err != nil { + t.Fatal(err) + } + f.Close() + + if err := Mount(sourceDir, targetDir, "none", "bind,rw"); err != nil { + t.Fatal(err) + } + defer func() { + if err := Unmount(targetDir); err != nil { + t.Fatal(err) + } + }() + + mounted, err := Mounted(targetDir) + if err != nil { + t.Fatal(err) + } + if !mounted { + t.Fatalf("Expected %s to be mounted", targetDir) + } + if _, err := os.Stat(targetDir); err != nil { + t.Fatal(err) + } +} + +func TestMountReadonly(t *testing.T) { + tmp := path.Join(os.TempDir(), "mount-tests") + if err := os.MkdirAll(tmp, 0777); err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmp) + + var ( + sourceDir = path.Join(tmp, "source") + targetDir = path.Join(tmp, "target") + sourcePath = path.Join(sourceDir, "file.txt") + targetPath = path.Join(targetDir, "file.txt") + ) + + os.Mkdir(sourceDir, 0777) + os.Mkdir(targetDir, 0777) + + f, err := os.Create(sourcePath) + if err != nil { + t.Fatal(err) + } + f.WriteString("hello") + f.Close() + + f, err = os.Create(targetPath) + if err != nil { + t.Fatal(err) + } + f.Close() + + if err := Mount(sourceDir, targetDir, "none", "bind,ro"); err != nil { + t.Fatal(err) + } + defer func() { + if err := Unmount(targetDir); err != nil { + t.Fatal(err) + } + }() + + f, err = os.OpenFile(targetPath, os.O_RDWR, 0777) + if err == nil { + t.Fatal("Should not be able to open a ro file as rw") + } +} + +func TestGetMounts(t *testing.T) { + mounts, err := GetMounts() + if err != nil { + t.Fatal(err) + } + + root := false + for _, entry := range mounts { + if entry.Mountpoint == "/" { + root = true + } + } + + if !root { + t.Fatal("/ should be mounted at least") + } +} + +func TestMergeTmpfsOptions(t *testing.T) { + options := []string{"noatime", "ro", "size=10k", "defaults", "atime", "defaults", "rw", "rprivate", "size=1024k", "slave"} + expected := []string{"atime", "rw", "size=1024k", "slave"} + merged, err := MergeTmpfsOptions(options) + if err != nil { + t.Fatal(err) + } + if len(expected) != len(merged) { + t.Fatalf("Expected %s got %s", expected, merged) + } + for index := range merged { + if merged[index] != expected[index] { + t.Fatalf("Expected %s for the %dth option, got %s", expected, index, merged) + } + } + + options = []string{"noatime", "ro", "size=10k", "atime", "rw", "rprivate", "size=1024k", "slave", "size"} + _, err = MergeTmpfsOptions(options) + if err == nil { + t.Fatal("Expected error got nil") + } +} diff --git a/vendor/github.com/moby/moby/pkg/mount/mounter_freebsd.go b/vendor/github.com/moby/moby/pkg/mount/mounter_freebsd.go new file mode 100644 index 000000000..814896cc9 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/mount/mounter_freebsd.go @@ -0,0 +1,60 @@ +package mount + +/* +#include +#include +#include +#include +#include +#include +*/ +import "C" + +import ( + "fmt" + "strings" + "unsafe" + + "golang.org/x/sys/unix" +) + +func allocateIOVecs(options []string) []C.struct_iovec { + out := make([]C.struct_iovec, len(options)) + for i, option := range options { + out[i].iov_base = unsafe.Pointer(C.CString(option)) + out[i].iov_len = C.size_t(len(option) + 1) + } + return out +} + +func mount(device, target, mType string, flag uintptr, data string) error { + isNullFS := false + + xs := strings.Split(data, ",") + for _, x := range xs { + if x == "bind" { + isNullFS = true + } + } + + options := []string{"fspath", target} + if isNullFS { + options = append(options, "fstype", "nullfs", "target", device) + } else { + options = append(options, "fstype", mType, "from", device) + } + rawOptions := allocateIOVecs(options) + for _, rawOption := range rawOptions { + defer C.free(rawOption.iov_base) + } + + if errno := C.nmount(&rawOptions[0], C.uint(len(options)), C.int(flag)); errno != 0 { + reason := C.GoString(C.strerror(*C.__error())) + return fmt.Errorf("Failed to call nmount: %s", reason) + } + return nil +} + +func unmount(target string, flag int) error { + return unix.Unmount(target, flag) +} diff --git a/vendor/github.com/moby/moby/pkg/mount/mounter_linux.go b/vendor/github.com/moby/moby/pkg/mount/mounter_linux.go new file mode 100644 index 000000000..39c36d472 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/mount/mounter_linux.go @@ -0,0 +1,57 @@ +package mount + +import ( + "golang.org/x/sys/unix" +) + +const ( + // ptypes is the set propagation types. + ptypes = unix.MS_SHARED | unix.MS_PRIVATE | unix.MS_SLAVE | unix.MS_UNBINDABLE + + // pflags is the full set valid flags for a change propagation call. + pflags = ptypes | unix.MS_REC | unix.MS_SILENT + + // broflags is the combination of bind and read only + broflags = unix.MS_BIND | unix.MS_RDONLY +) + +// isremount returns true if either device name or flags identify a remount request, false otherwise. +func isremount(device string, flags uintptr) bool { + switch { + // We treat device "" and "none" as a remount request to provide compatibility with + // requests that don't explicitly set MS_REMOUNT such as those manipulating bind mounts. + case flags&unix.MS_REMOUNT != 0, device == "", device == "none": + return true + default: + return false + } +} + +func mount(device, target, mType string, flags uintptr, data string) error { + oflags := flags &^ ptypes + if !isremount(device, flags) || data != "" { + // Initial call applying all non-propagation flags for mount + // or remount with changed data + if err := unix.Mount(device, target, mType, oflags, data); err != nil { + return err + } + } + + if flags&ptypes != 0 { + // Change the propagation type. + if err := unix.Mount("", target, "", flags&pflags, ""); err != nil { + return err + } + } + + if oflags&broflags == broflags { + // Remount the bind to apply read only. + return unix.Mount("", target, "", oflags|unix.MS_REMOUNT, "") + } + + return nil +} + +func unmount(target string, flag int) error { + return unix.Unmount(target, flag) +} diff --git a/vendor/github.com/moby/moby/pkg/mount/mounter_linux_test.go b/vendor/github.com/moby/moby/pkg/mount/mounter_linux_test.go new file mode 100644 index 000000000..47c03b363 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/mount/mounter_linux_test.go @@ -0,0 +1,228 @@ +// +build linux + +package mount + +import ( + "fmt" + "io/ioutil" + "os" + "strings" + "testing" +) + +func TestMount(t *testing.T) { + if os.Getuid() != 0 { + t.Skip("not root tests would fail") + } + + source, err := ioutil.TempDir("", "mount-test-source-") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(source) + + // Ensure we have a known start point by mounting tmpfs with given options + if err := Mount("tmpfs", source, "tmpfs", "private"); err != nil { + t.Fatal(err) + } + defer ensureUnmount(t, source) + validateMount(t, source, "", "", "") + if t.Failed() { + t.FailNow() + } + + target, err := ioutil.TempDir("", "mount-test-target-") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(target) + + tests := []struct { + source string + ftype string + options string + expectedOpts string + expectedOptional string + expectedVFS string + }{ + // No options + {"tmpfs", "tmpfs", "", "", "", ""}, + // Default rw / ro test + {source, "", "bind", "", "", ""}, + {source, "", "bind,private", "", "", ""}, + {source, "", "bind,shared", "", "shared", ""}, + {source, "", "bind,slave", "", "master", ""}, + {source, "", "bind,unbindable", "", "unbindable", ""}, + // Read Write tests + {source, "", "bind,rw", "rw", "", ""}, + {source, "", "bind,rw,private", "rw", "", ""}, + {source, "", "bind,rw,shared", "rw", "shared", ""}, + {source, "", "bind,rw,slave", "rw", "master", ""}, + {source, "", "bind,rw,unbindable", "rw", "unbindable", ""}, + // Read Only tests + {source, "", "bind,ro", "ro", "", ""}, + {source, "", "bind,ro,private", "ro", "", ""}, + {source, "", "bind,ro,shared", "ro", "shared", ""}, + {source, "", "bind,ro,slave", "ro", "master", ""}, + {source, "", "bind,ro,unbindable", "ro", "unbindable", ""}, + // Remount tests to change per filesystem options + {"", "", "remount,size=128k", "rw", "", "rw,size=128k"}, + {"", "", "remount,ro,size=128k", "ro", "", "ro,size=128k"}, + } + + for _, tc := range tests { + ftype, options := tc.ftype, tc.options + if tc.ftype == "" { + ftype = "none" + } + if tc.options == "" { + options = "none" + } + + t.Run(fmt.Sprintf("%v-%v", ftype, options), func(t *testing.T) { + if strings.Contains(tc.options, "slave") { + // Slave requires a shared source + if err := MakeShared(source); err != nil { + t.Fatal(err) + } + defer func() { + if err := MakePrivate(source); err != nil { + t.Fatal(err) + } + }() + } + if strings.Contains(tc.options, "remount") { + // create a new mount to remount first + if err := Mount("tmpfs", target, "tmpfs", ""); err != nil { + t.Fatal(err) + } + } + if err := Mount(tc.source, target, tc.ftype, tc.options); err != nil { + t.Fatal(err) + } + defer ensureUnmount(t, target) + validateMount(t, target, tc.expectedOpts, tc.expectedOptional, tc.expectedVFS) + }) + } +} + +// ensureUnmount umounts mnt checking for errors +func ensureUnmount(t *testing.T, mnt string) { + if err := Unmount(mnt); err != nil { + t.Error(err) + } +} + +// validateMount checks that mnt has the given options +func validateMount(t *testing.T, mnt string, opts, optional, vfs string) { + info, err := GetMounts() + if err != nil { + t.Fatal(err) + } + + wantedOpts := make(map[string]struct{}) + if opts != "" { + for _, opt := range strings.Split(opts, ",") { + wantedOpts[opt] = struct{}{} + } + } + + wantedOptional := make(map[string]struct{}) + if optional != "" { + for _, opt := range strings.Split(optional, ",") { + wantedOptional[opt] = struct{}{} + } + } + + wantedVFS := make(map[string]struct{}) + if vfs != "" { + for _, opt := range strings.Split(vfs, ",") { + wantedVFS[opt] = struct{}{} + } + } + + mnts := make(map[int]*Info, len(info)) + for _, mi := range info { + mnts[mi.ID] = mi + } + + for _, mi := range info { + if mi.Mountpoint != mnt { + continue + } + + // Use parent info as the defaults + p := mnts[mi.Parent] + pOpts := make(map[string]struct{}) + if p.Opts != "" { + for _, opt := range strings.Split(p.Opts, ",") { + pOpts[clean(opt)] = struct{}{} + } + } + pOptional := make(map[string]struct{}) + if p.Optional != "" { + for _, field := range strings.Split(p.Optional, ",") { + pOptional[clean(field)] = struct{}{} + } + } + + // Validate Opts + if mi.Opts != "" { + for _, opt := range strings.Split(mi.Opts, ",") { + opt = clean(opt) + if !has(wantedOpts, opt) && !has(pOpts, opt) { + t.Errorf("unexpected mount option %q expected %q", opt, opts) + } + delete(wantedOpts, opt) + } + } + for opt := range wantedOpts { + t.Errorf("missing mount option %q found %q", opt, mi.Opts) + } + + // Validate Optional + if mi.Optional != "" { + for _, field := range strings.Split(mi.Optional, ",") { + field = clean(field) + if !has(wantedOptional, field) && !has(pOptional, field) { + t.Errorf("unexpected optional failed %q expected %q", field, optional) + } + delete(wantedOptional, field) + } + } + for field := range wantedOptional { + t.Errorf("missing optional field %q found %q", field, mi.Optional) + } + + // Validate VFS if set + if vfs != "" { + if mi.VfsOpts != "" { + for _, opt := range strings.Split(mi.VfsOpts, ",") { + opt = clean(opt) + if !has(wantedVFS, opt) { + t.Errorf("unexpected mount option %q expected %q", opt, vfs) + } + delete(wantedVFS, opt) + } + } + for opt := range wantedVFS { + t.Errorf("missing mount option %q found %q", opt, mi.VfsOpts) + } + } + + return + } + + t.Errorf("failed to find mount %q", mnt) +} + +// clean strips off any value param after the colon +func clean(v string) string { + return strings.SplitN(v, ":", 2)[0] +} + +// has returns true if key is a member of m +func has(m map[string]struct{}, key string) bool { + _, ok := m[key] + return ok +} diff --git a/vendor/github.com/moby/moby/pkg/mount/mounter_solaris.go b/vendor/github.com/moby/moby/pkg/mount/mounter_solaris.go new file mode 100644 index 000000000..c684aa81f --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/mount/mounter_solaris.go @@ -0,0 +1,33 @@ +// +build solaris,cgo + +package mount + +import ( + "golang.org/x/sys/unix" + "unsafe" +) + +// #include +// #include +// #include +// int Mount(const char *spec, const char *dir, int mflag, +// char *fstype, char *dataptr, int datalen, char *optptr, int optlen) { +// return mount(spec, dir, mflag, fstype, dataptr, datalen, optptr, optlen); +// } +import "C" + +func mount(device, target, mType string, flag uintptr, data string) error { + spec := C.CString(device) + dir := C.CString(target) + fstype := C.CString(mType) + _, err := C.Mount(spec, dir, C.int(flag), fstype, nil, 0, nil, 0) + C.free(unsafe.Pointer(spec)) + C.free(unsafe.Pointer(dir)) + C.free(unsafe.Pointer(fstype)) + return err +} + +func unmount(target string, flag int) error { + err := unix.Unmount(target, flag) + return err +} diff --git a/vendor/github.com/moby/moby/pkg/mount/mounter_unsupported.go b/vendor/github.com/moby/moby/pkg/mount/mounter_unsupported.go new file mode 100644 index 000000000..a2a3bb457 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/mount/mounter_unsupported.go @@ -0,0 +1,11 @@ +// +build !linux,!freebsd,!solaris freebsd,!cgo solaris,!cgo + +package mount + +func mount(device, target, mType string, flag uintptr, data string) error { + panic("Not implemented") +} + +func unmount(target string, flag int) error { + panic("Not implemented") +} diff --git a/vendor/github.com/moby/moby/pkg/mount/mountinfo.go b/vendor/github.com/moby/moby/pkg/mount/mountinfo.go new file mode 100644 index 000000000..ff4cc1d86 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/mount/mountinfo.go @@ -0,0 +1,54 @@ +package mount + +// Info reveals information about a particular mounted filesystem. This +// struct is populated from the content in the /proc//mountinfo file. +type Info struct { + // ID is a unique identifier of the mount (may be reused after umount). + ID int + + // Parent indicates the ID of the mount parent (or of self for the top of the + // mount tree). + Parent int + + // Major indicates one half of the device ID which identifies the device class. + Major int + + // Minor indicates one half of the device ID which identifies a specific + // instance of device. + Minor int + + // Root of the mount within the filesystem. + Root string + + // Mountpoint indicates the mount point relative to the process's root. + Mountpoint string + + // Opts represents mount-specific options. + Opts string + + // Optional represents optional fields. + Optional string + + // Fstype indicates the type of filesystem, such as EXT3. + Fstype string + + // Source indicates filesystem specific information or "none". + Source string + + // VfsOpts represents per super block options. + VfsOpts string +} + +type byMountpoint []*Info + +func (by byMountpoint) Len() int { + return len(by) +} + +func (by byMountpoint) Less(i, j int) bool { + return by[i].Mountpoint < by[j].Mountpoint +} + +func (by byMountpoint) Swap(i, j int) { + by[i], by[j] = by[j], by[i] +} diff --git a/vendor/github.com/moby/moby/pkg/mount/mountinfo_freebsd.go b/vendor/github.com/moby/moby/pkg/mount/mountinfo_freebsd.go new file mode 100644 index 000000000..4f32edcd9 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/mount/mountinfo_freebsd.go @@ -0,0 +1,41 @@ +package mount + +/* +#include +#include +#include +*/ +import "C" + +import ( + "fmt" + "reflect" + "unsafe" +) + +// Parse /proc/self/mountinfo because comparing Dev and ino does not work from +// bind mounts. +func parseMountTable() ([]*Info, error) { + var rawEntries *C.struct_statfs + + count := int(C.getmntinfo(&rawEntries, C.MNT_WAIT)) + if count == 0 { + return nil, fmt.Errorf("Failed to call getmntinfo") + } + + var entries []C.struct_statfs + header := (*reflect.SliceHeader)(unsafe.Pointer(&entries)) + header.Cap = count + header.Len = count + header.Data = uintptr(unsafe.Pointer(rawEntries)) + + var out []*Info + for _, entry := range entries { + var mountinfo Info + mountinfo.Mountpoint = C.GoString(&entry.f_mntonname[0]) + mountinfo.Source = C.GoString(&entry.f_mntfromname[0]) + mountinfo.Fstype = C.GoString(&entry.f_fstypename[0]) + out = append(out, &mountinfo) + } + return out, nil +} diff --git a/vendor/github.com/moby/moby/pkg/mount/mountinfo_linux.go b/vendor/github.com/moby/moby/pkg/mount/mountinfo_linux.go new file mode 100644 index 000000000..be69fee1d --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/mount/mountinfo_linux.go @@ -0,0 +1,95 @@ +// +build linux + +package mount + +import ( + "bufio" + "fmt" + "io" + "os" + "strings" +) + +const ( + /* 36 35 98:0 /mnt1 /mnt2 rw,noatime master:1 - ext3 /dev/root rw,errors=continue + (1)(2)(3) (4) (5) (6) (7) (8) (9) (10) (11) + + (1) mount ID: unique identifier of the mount (may be reused after umount) + (2) parent ID: ID of parent (or of self for the top of the mount tree) + (3) major:minor: value of st_dev for files on filesystem + (4) root: root of the mount within the filesystem + (5) mount point: mount point relative to the process's root + (6) mount options: per mount options + (7) optional fields: zero or more fields of the form "tag[:value]" + (8) separator: marks the end of the optional fields + (9) filesystem type: name of filesystem of the form "type[.subtype]" + (10) mount source: filesystem specific information or "none" + (11) super options: per super block options*/ + mountinfoFormat = "%d %d %d:%d %s %s %s %s" +) + +// Parse /proc/self/mountinfo because comparing Dev and ino does not work from +// bind mounts +func parseMountTable() ([]*Info, error) { + f, err := os.Open("/proc/self/mountinfo") + if err != nil { + return nil, err + } + defer f.Close() + + return parseInfoFile(f) +} + +func parseInfoFile(r io.Reader) ([]*Info, error) { + var ( + s = bufio.NewScanner(r) + out = []*Info{} + ) + + for s.Scan() { + if err := s.Err(); err != nil { + return nil, err + } + + var ( + p = &Info{} + text = s.Text() + optionalFields string + ) + + if _, err := fmt.Sscanf(text, mountinfoFormat, + &p.ID, &p.Parent, &p.Major, &p.Minor, + &p.Root, &p.Mountpoint, &p.Opts, &optionalFields); err != nil { + return nil, fmt.Errorf("Scanning '%s' failed: %s", text, err) + } + // Safe as mountinfo encodes mountpoints with spaces as \040. + index := strings.Index(text, " - ") + postSeparatorFields := strings.Fields(text[index+3:]) + if len(postSeparatorFields) < 3 { + return nil, fmt.Errorf("Error found less than 3 fields post '-' in %q", text) + } + + if optionalFields != "-" { + p.Optional = optionalFields + } + + p.Fstype = postSeparatorFields[0] + p.Source = postSeparatorFields[1] + p.VfsOpts = strings.Join(postSeparatorFields[2:], " ") + out = append(out, p) + } + return out, nil +} + +// PidMountInfo collects the mounts for a specific process ID. If the process +// ID is unknown, it is better to use `GetMounts` which will inspect +// "/proc/self/mountinfo" instead. +func PidMountInfo(pid int) ([]*Info, error) { + f, err := os.Open(fmt.Sprintf("/proc/%d/mountinfo", pid)) + if err != nil { + return nil, err + } + defer f.Close() + + return parseInfoFile(f) +} diff --git a/vendor/github.com/moby/moby/pkg/mount/mountinfo_linux_test.go b/vendor/github.com/moby/moby/pkg/mount/mountinfo_linux_test.go new file mode 100644 index 000000000..bd100e1d4 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/mount/mountinfo_linux_test.go @@ -0,0 +1,476 @@ +// +build linux + +package mount + +import ( + "bytes" + "testing" +) + +const ( + fedoraMountinfo = `15 35 0:3 / /proc rw,nosuid,nodev,noexec,relatime shared:5 - proc proc rw + 16 35 0:14 / /sys rw,nosuid,nodev,noexec,relatime shared:6 - sysfs sysfs rw,seclabel + 17 35 0:5 / /dev rw,nosuid shared:2 - devtmpfs devtmpfs rw,seclabel,size=8056484k,nr_inodes=2014121,mode=755 + 18 16 0:15 / /sys/kernel/security rw,nosuid,nodev,noexec,relatime shared:7 - securityfs securityfs rw + 19 16 0:13 / /sys/fs/selinux rw,relatime shared:8 - selinuxfs selinuxfs rw + 20 17 0:16 / /dev/shm rw,nosuid,nodev shared:3 - tmpfs tmpfs rw,seclabel + 21 17 0:10 / /dev/pts rw,nosuid,noexec,relatime shared:4 - devpts devpts rw,seclabel,gid=5,mode=620,ptmxmode=000 + 22 35 0:17 / /run rw,nosuid,nodev shared:21 - tmpfs tmpfs rw,seclabel,mode=755 + 23 16 0:18 / /sys/fs/cgroup rw,nosuid,nodev,noexec shared:9 - tmpfs tmpfs rw,seclabel,mode=755 + 24 23 0:19 / /sys/fs/cgroup/systemd rw,nosuid,nodev,noexec,relatime shared:10 - cgroup cgroup rw,xattr,release_agent=/usr/lib/systemd/systemd-cgroups-agent,name=systemd + 25 16 0:20 / /sys/fs/pstore rw,nosuid,nodev,noexec,relatime shared:20 - pstore pstore rw + 26 23 0:21 / /sys/fs/cgroup/cpuset rw,nosuid,nodev,noexec,relatime shared:11 - cgroup cgroup rw,cpuset,clone_children + 27 23 0:22 / /sys/fs/cgroup/cpu,cpuacct rw,nosuid,nodev,noexec,relatime shared:12 - cgroup cgroup rw,cpuacct,cpu,clone_children + 28 23 0:23 / /sys/fs/cgroup/memory rw,nosuid,nodev,noexec,relatime shared:13 - cgroup cgroup rw,memory,clone_children + 29 23 0:24 / /sys/fs/cgroup/devices rw,nosuid,nodev,noexec,relatime shared:14 - cgroup cgroup rw,devices,clone_children + 30 23 0:25 / /sys/fs/cgroup/freezer rw,nosuid,nodev,noexec,relatime shared:15 - cgroup cgroup rw,freezer,clone_children + 31 23 0:26 / /sys/fs/cgroup/net_cls rw,nosuid,nodev,noexec,relatime shared:16 - cgroup cgroup rw,net_cls,clone_children + 32 23 0:27 / /sys/fs/cgroup/blkio rw,nosuid,nodev,noexec,relatime shared:17 - cgroup cgroup rw,blkio,clone_children + 33 23 0:28 / /sys/fs/cgroup/perf_event rw,nosuid,nodev,noexec,relatime shared:18 - cgroup cgroup rw,perf_event,clone_children + 34 23 0:29 / /sys/fs/cgroup/hugetlb rw,nosuid,nodev,noexec,relatime shared:19 - cgroup cgroup rw,hugetlb,clone_children + 35 1 253:2 / / rw,relatime shared:1 - ext4 /dev/mapper/ssd-root--f20 rw,seclabel,data=ordered + 36 15 0:30 / /proc/sys/fs/binfmt_misc rw,relatime shared:22 - autofs systemd-1 rw,fd=38,pgrp=1,timeout=300,minproto=5,maxproto=5,direct + 37 17 0:12 / /dev/mqueue rw,relatime shared:23 - mqueue mqueue rw,seclabel + 38 35 0:31 / /tmp rw shared:24 - tmpfs tmpfs rw,seclabel + 39 17 0:32 / /dev/hugepages rw,relatime shared:25 - hugetlbfs hugetlbfs rw,seclabel + 40 16 0:7 / /sys/kernel/debug rw,relatime shared:26 - debugfs debugfs rw + 41 16 0:33 / /sys/kernel/config rw,relatime shared:27 - configfs configfs rw + 42 35 0:34 / /var/lib/nfs/rpc_pipefs rw,relatime shared:28 - rpc_pipefs sunrpc rw + 43 15 0:35 / /proc/fs/nfsd rw,relatime shared:29 - nfsd sunrpc rw + 45 35 8:17 / /boot rw,relatime shared:30 - ext4 /dev/sdb1 rw,seclabel,data=ordered + 46 35 253:4 / /home rw,relatime shared:31 - ext4 /dev/mapper/ssd-home rw,seclabel,data=ordered + 47 35 253:5 / /var/lib/libvirt/images rw,noatime,nodiratime shared:32 - ext4 /dev/mapper/ssd-virt rw,seclabel,discard,data=ordered + 48 35 253:12 / /mnt/old rw,relatime shared:33 - ext4 /dev/mapper/HelpDeskRHEL6-FedoraRoot rw,seclabel,data=ordered + 121 22 0:36 / /run/user/1000/gvfs rw,nosuid,nodev,relatime shared:104 - fuse.gvfsd-fuse gvfsd-fuse rw,user_id=1000,group_id=1000 + 124 16 0:37 / /sys/fs/fuse/connections rw,relatime shared:107 - fusectl fusectl rw + 165 38 253:3 / /tmp/mnt rw,relatime shared:147 - ext4 /dev/mapper/ssd-root rw,seclabel,data=ordered + 167 35 253:15 / /var/lib/docker/devicemapper/mnt/aae4076022f0e2b80a2afbf8fc6df450c52080191fcef7fb679a73e6f073e5c2 rw,relatime shared:149 - ext4 /dev/mapper/docker-253:2-425882-aae4076022f0e2b80a2afbf8fc6df450c52080191fcef7fb679a73e6f073e5c2 rw,seclabel,discard,stripe=16,data=ordered + 171 35 253:16 / /var/lib/docker/devicemapper/mnt/c71be651f114db95180e472f7871b74fa597ee70a58ccc35cb87139ddea15373 rw,relatime shared:153 - ext4 /dev/mapper/docker-253:2-425882-c71be651f114db95180e472f7871b74fa597ee70a58ccc35cb87139ddea15373 rw,seclabel,discard,stripe=16,data=ordered + 175 35 253:17 / /var/lib/docker/devicemapper/mnt/1bac6ab72862d2d5626560df6197cf12036b82e258c53d981fa29adce6f06c3c rw,relatime shared:157 - ext4 /dev/mapper/docker-253:2-425882-1bac6ab72862d2d5626560df6197cf12036b82e258c53d981fa29adce6f06c3c rw,seclabel,discard,stripe=16,data=ordered + 179 35 253:18 / /var/lib/docker/devicemapper/mnt/d710a357d77158e80d5b2c55710ae07c94e76d34d21ee7bae65ce5418f739b09 rw,relatime shared:161 - ext4 /dev/mapper/docker-253:2-425882-d710a357d77158e80d5b2c55710ae07c94e76d34d21ee7bae65ce5418f739b09 rw,seclabel,discard,stripe=16,data=ordered + 183 35 253:19 / /var/lib/docker/devicemapper/mnt/6479f52366114d5f518db6837254baab48fab39f2ac38d5099250e9a6ceae6c7 rw,relatime shared:165 - ext4 /dev/mapper/docker-253:2-425882-6479f52366114d5f518db6837254baab48fab39f2ac38d5099250e9a6ceae6c7 rw,seclabel,discard,stripe=16,data=ordered + 187 35 253:20 / /var/lib/docker/devicemapper/mnt/8d9df91c4cca5aef49eeb2725292aab324646f723a7feab56be34c2ad08268e1 rw,relatime shared:169 - ext4 /dev/mapper/docker-253:2-425882-8d9df91c4cca5aef49eeb2725292aab324646f723a7feab56be34c2ad08268e1 rw,seclabel,discard,stripe=16,data=ordered + 191 35 253:21 / /var/lib/docker/devicemapper/mnt/c8240b768603d32e920d365dc9d1dc2a6af46cd23e7ae819947f969e1b4ec661 rw,relatime shared:173 - ext4 /dev/mapper/docker-253:2-425882-c8240b768603d32e920d365dc9d1dc2a6af46cd23e7ae819947f969e1b4ec661 rw,seclabel,discard,stripe=16,data=ordered + 195 35 253:22 / /var/lib/docker/devicemapper/mnt/2eb3a01278380bbf3ed12d86ac629eaa70a4351301ee307a5cabe7b5f3b1615f rw,relatime shared:177 - ext4 /dev/mapper/docker-253:2-425882-2eb3a01278380bbf3ed12d86ac629eaa70a4351301ee307a5cabe7b5f3b1615f rw,seclabel,discard,stripe=16,data=ordered + 199 35 253:23 / /var/lib/docker/devicemapper/mnt/37a17fb7c9d9b80821235d5f2662879bd3483915f245f9b49cdaa0e38779b70b rw,relatime shared:181 - ext4 /dev/mapper/docker-253:2-425882-37a17fb7c9d9b80821235d5f2662879bd3483915f245f9b49cdaa0e38779b70b rw,seclabel,discard,stripe=16,data=ordered + 203 35 253:24 / /var/lib/docker/devicemapper/mnt/aea459ae930bf1de913e2f29428fd80ee678a1e962d4080019d9f9774331ee2b rw,relatime shared:185 - ext4 /dev/mapper/docker-253:2-425882-aea459ae930bf1de913e2f29428fd80ee678a1e962d4080019d9f9774331ee2b rw,seclabel,discard,stripe=16,data=ordered + 207 35 253:25 / /var/lib/docker/devicemapper/mnt/928ead0bc06c454bd9f269e8585aeae0a6bd697f46dc8754c2a91309bc810882 rw,relatime shared:189 - ext4 /dev/mapper/docker-253:2-425882-928ead0bc06c454bd9f269e8585aeae0a6bd697f46dc8754c2a91309bc810882 rw,seclabel,discard,stripe=16,data=ordered + 211 35 253:26 / /var/lib/docker/devicemapper/mnt/0f284d18481d671644706e7a7244cbcf63d590d634cc882cb8721821929d0420 rw,relatime shared:193 - ext4 /dev/mapper/docker-253:2-425882-0f284d18481d671644706e7a7244cbcf63d590d634cc882cb8721821929d0420 rw,seclabel,discard,stripe=16,data=ordered + 215 35 253:27 / /var/lib/docker/devicemapper/mnt/d9dd16722ab34c38db2733e23f69e8f4803ce59658250dd63e98adff95d04919 rw,relatime shared:197 - ext4 /dev/mapper/docker-253:2-425882-d9dd16722ab34c38db2733e23f69e8f4803ce59658250dd63e98adff95d04919 rw,seclabel,discard,stripe=16,data=ordered + 219 35 253:28 / /var/lib/docker/devicemapper/mnt/bc4500479f18c2c08c21ad5282e5f826a016a386177d9874c2764751c031d634 rw,relatime shared:201 - ext4 /dev/mapper/docker-253:2-425882-bc4500479f18c2c08c21ad5282e5f826a016a386177d9874c2764751c031d634 rw,seclabel,discard,stripe=16,data=ordered + 223 35 253:29 / /var/lib/docker/devicemapper/mnt/7770c8b24eb3d5cc159a065910076938910d307ab2f5d94e1dc3b24c06ee2c8a rw,relatime shared:205 - ext4 /dev/mapper/docker-253:2-425882-7770c8b24eb3d5cc159a065910076938910d307ab2f5d94e1dc3b24c06ee2c8a rw,seclabel,discard,stripe=16,data=ordered + 227 35 253:30 / /var/lib/docker/devicemapper/mnt/c280cd3d0bf0aa36b478b292279671624cceafc1a67eaa920fa1082601297adf rw,relatime shared:209 - ext4 /dev/mapper/docker-253:2-425882-c280cd3d0bf0aa36b478b292279671624cceafc1a67eaa920fa1082601297adf rw,seclabel,discard,stripe=16,data=ordered + 231 35 253:31 / /var/lib/docker/devicemapper/mnt/8b59a7d9340279f09fea67fd6ad89ddef711e9e7050eb647984f8b5ef006335f rw,relatime shared:213 - ext4 /dev/mapper/docker-253:2-425882-8b59a7d9340279f09fea67fd6ad89ddef711e9e7050eb647984f8b5ef006335f rw,seclabel,discard,stripe=16,data=ordered + 235 35 253:32 / /var/lib/docker/devicemapper/mnt/1a28059f29eda821578b1bb27a60cc71f76f846a551abefabce6efd0146dce9f rw,relatime shared:217 - ext4 /dev/mapper/docker-253:2-425882-1a28059f29eda821578b1bb27a60cc71f76f846a551abefabce6efd0146dce9f rw,seclabel,discard,stripe=16,data=ordered + 239 35 253:33 / /var/lib/docker/devicemapper/mnt/e9aa60c60128cad1 rw,relatime shared:221 - ext4 /dev/mapper/docker-253:2-425882-e9aa60c60128cad1 rw,seclabel,discard,stripe=16,data=ordered + 243 35 253:34 / /var/lib/docker/devicemapper/mnt/5fec11304b6f4713fea7b6ccdcc1adc0a1966187f590fe25a8227428a8df275d-init rw,relatime shared:225 - ext4 /dev/mapper/docker-253:2-425882-5fec11304b6f4713fea7b6ccdcc1adc0a1966187f590fe25a8227428a8df275d-init rw,seclabel,discard,stripe=16,data=ordered + 247 35 253:35 / /var/lib/docker/devicemapper/mnt/5fec11304b6f4713fea7b6ccdcc1adc0a1966187f590fe25a8227428a8df275d rw,relatime shared:229 - ext4 /dev/mapper/docker-253:2-425882-5fec11304b6f4713fea7b6ccdcc1adc0a1966187f590fe25a8227428a8df275d rw,seclabel,discard,stripe=16,data=ordered + 31 21 0:23 / /DATA/foo_bla_bla rw,relatime - cifs //foo/BLA\040BLA\040BLA/ rw,sec=ntlm,cache=loose,unc=\\foo\BLA BLA BLA,username=my_login,domain=mydomain.com,uid=12345678,forceuid,gid=12345678,forcegid,addr=10.1.30.10,file_mode=0755,dir_mode=0755,nounix,rsize=61440,wsize=65536,actimeo=1` + + ubuntuMountInfo = `15 20 0:14 / /sys rw,nosuid,nodev,noexec,relatime - sysfs sysfs rw +16 20 0:3 / /proc rw,nosuid,nodev,noexec,relatime - proc proc rw +17 20 0:5 / /dev rw,relatime - devtmpfs udev rw,size=1015140k,nr_inodes=253785,mode=755 +18 17 0:11 / /dev/pts rw,nosuid,noexec,relatime - devpts devpts rw,gid=5,mode=620,ptmxmode=000 +19 20 0:15 / /run rw,nosuid,noexec,relatime - tmpfs tmpfs rw,size=205044k,mode=755 +20 1 253:0 / / rw,relatime - ext4 /dev/disk/by-label/DOROOT rw,errors=remount-ro,data=ordered +21 15 0:16 / /sys/fs/cgroup rw,relatime - tmpfs none rw,size=4k,mode=755 +22 15 0:17 / /sys/fs/fuse/connections rw,relatime - fusectl none rw +23 15 0:6 / /sys/kernel/debug rw,relatime - debugfs none rw +24 15 0:10 / /sys/kernel/security rw,relatime - securityfs none rw +25 19 0:18 / /run/lock rw,nosuid,nodev,noexec,relatime - tmpfs none rw,size=5120k +26 21 0:19 / /sys/fs/cgroup/cpuset rw,relatime - cgroup cgroup rw,cpuset,clone_children +27 19 0:20 / /run/shm rw,nosuid,nodev,relatime - tmpfs none rw +28 21 0:21 / /sys/fs/cgroup/cpu rw,relatime - cgroup cgroup rw,cpu +29 19 0:22 / /run/user rw,nosuid,nodev,noexec,relatime - tmpfs none rw,size=102400k,mode=755 +30 15 0:23 / /sys/fs/pstore rw,relatime - pstore none rw +31 21 0:24 / /sys/fs/cgroup/cpuacct rw,relatime - cgroup cgroup rw,cpuacct +32 21 0:25 / /sys/fs/cgroup/memory rw,relatime - cgroup cgroup rw,memory +33 21 0:26 / /sys/fs/cgroup/devices rw,relatime - cgroup cgroup rw,devices +34 21 0:27 / /sys/fs/cgroup/freezer rw,relatime - cgroup cgroup rw,freezer +35 21 0:28 / /sys/fs/cgroup/blkio rw,relatime - cgroup cgroup rw,blkio +36 21 0:29 / /sys/fs/cgroup/perf_event rw,relatime - cgroup cgroup rw,perf_event +37 21 0:30 / /sys/fs/cgroup/hugetlb rw,relatime - cgroup cgroup rw,hugetlb +38 21 0:31 / /sys/fs/cgroup/systemd rw,nosuid,nodev,noexec,relatime - cgroup systemd rw,name=systemd +39 20 0:32 / /var/lib/docker/aufs/mnt/b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc rw,relatime - aufs none rw,si=caafa54fdc06525 +40 20 0:33 / /var/lib/docker/aufs/mnt/2eed44ac7ce7c75af04f088ed6cb4ce9d164801e91d78c6db65d7ef6d572bba8-init rw,relatime - aufs none rw,si=caafa54f882b525 +41 20 0:34 / /var/lib/docker/aufs/mnt/2eed44ac7ce7c75af04f088ed6cb4ce9d164801e91d78c6db65d7ef6d572bba8 rw,relatime - aufs none rw,si=caafa54f8829525 +42 20 0:35 / /var/lib/docker/aufs/mnt/16f4d7e96dd612903f425bfe856762f291ff2e36a8ecd55a2209b7d7cd81c30b rw,relatime - aufs none rw,si=caafa54f882d525 +43 20 0:36 / /var/lib/docker/aufs/mnt/63ca08b75d7438a9469a5954e003f48ffede73541f6286ce1cb4d7dd4811da7e-init rw,relatime - aufs none rw,si=caafa54f882f525 +44 20 0:37 / /var/lib/docker/aufs/mnt/63ca08b75d7438a9469a5954e003f48ffede73541f6286ce1cb4d7dd4811da7e rw,relatime - aufs none rw,si=caafa54f88ba525 +45 20 0:38 / /var/lib/docker/aufs/mnt/283f35a910233c756409313be71ecd8fcfef0df57108b8d740b61b3e88860452 rw,relatime - aufs none rw,si=caafa54f88b8525 +46 20 0:39 / /var/lib/docker/aufs/mnt/2c6c7253d4090faa3886871fb21bd660609daeb0206588c0602007f7d0f254b1-init rw,relatime - aufs none rw,si=caafa54f88be525 +47 20 0:40 / /var/lib/docker/aufs/mnt/2c6c7253d4090faa3886871fb21bd660609daeb0206588c0602007f7d0f254b1 rw,relatime - aufs none rw,si=caafa54f882c525 +48 20 0:41 / /var/lib/docker/aufs/mnt/de2b538c97d6366cc80e8658547c923ea1d042f85580df379846f36a4df7049d rw,relatime - aufs none rw,si=caafa54f85bb525 +49 20 0:42 / /var/lib/docker/aufs/mnt/94a3d8ed7c27e5b0aa71eba46c736bfb2742afda038e74f2dd6035fb28415b49-init rw,relatime - aufs none rw,si=caafa54fdc00525 +50 20 0:43 / /var/lib/docker/aufs/mnt/94a3d8ed7c27e5b0aa71eba46c736bfb2742afda038e74f2dd6035fb28415b49 rw,relatime - aufs none rw,si=caafa54fbaec525 +51 20 0:44 / /var/lib/docker/aufs/mnt/6ac1cace985c9fc9bea32234de8b36dba49bdd5e29a2972b327ff939d78a6274 rw,relatime - aufs none rw,si=caafa54f8e1a525 +52 20 0:45 / /var/lib/docker/aufs/mnt/dff147033e3a0ef061e1de1ad34256b523d4a8c1fa6bba71a0ab538e8628ff0b-init rw,relatime - aufs none rw,si=caafa54f8e1d525 +53 20 0:46 / /var/lib/docker/aufs/mnt/dff147033e3a0ef061e1de1ad34256b523d4a8c1fa6bba71a0ab538e8628ff0b rw,relatime - aufs none rw,si=caafa54f8e1b525 +54 20 0:47 / /var/lib/docker/aufs/mnt/cabb117d997f0f93519185aea58389a9762770b7496ed0b74a3e4a083fa45902 rw,relatime - aufs none rw,si=caafa54f810a525 +55 20 0:48 / /var/lib/docker/aufs/mnt/e1c8a94ffaa9d532bbbdc6ef771ce8a6c2c06757806ecaf8b68e9108fec65f33-init rw,relatime - aufs none rw,si=caafa54f8529525 +56 20 0:49 / /var/lib/docker/aufs/mnt/e1c8a94ffaa9d532bbbdc6ef771ce8a6c2c06757806ecaf8b68e9108fec65f33 rw,relatime - aufs none rw,si=caafa54f852f525 +57 20 0:50 / /var/lib/docker/aufs/mnt/16a1526fa445b84ce84f89506d219e87fa488a814063baf045d88b02f21166b3 rw,relatime - aufs none rw,si=caafa54f9e1d525 +58 20 0:51 / /var/lib/docker/aufs/mnt/57b9c92e1e368fa7dbe5079f7462e917777829caae732828b003c355fe49da9f-init rw,relatime - aufs none rw,si=caafa54f854d525 +59 20 0:52 / /var/lib/docker/aufs/mnt/57b9c92e1e368fa7dbe5079f7462e917777829caae732828b003c355fe49da9f rw,relatime - aufs none rw,si=caafa54f854e525 +60 20 0:53 / /var/lib/docker/aufs/mnt/e370c3e286bea027917baa0e4d251262681a472a87056e880dfd0513516dffd9 rw,relatime - aufs none rw,si=caafa54f840a525 +61 20 0:54 / /var/lib/docker/aufs/mnt/6b00d3b4f32b41997ec07412b5e18204f82fbe643e7122251cdeb3582abd424e-init rw,relatime - aufs none rw,si=caafa54f8408525 +62 20 0:55 / /var/lib/docker/aufs/mnt/6b00d3b4f32b41997ec07412b5e18204f82fbe643e7122251cdeb3582abd424e rw,relatime - aufs none rw,si=caafa54f8409525 +63 20 0:56 / /var/lib/docker/aufs/mnt/abd0b5ea5d355a67f911475e271924a5388ee60c27185fcd60d095afc4a09dc7 rw,relatime - aufs none rw,si=caafa54f9eb1525 +64 20 0:57 / /var/lib/docker/aufs/mnt/336222effc3f7b89867bb39ff7792ae5412c35c749f127c29159d046b6feedd2-init rw,relatime - aufs none rw,si=caafa54f85bf525 +65 20 0:58 / /var/lib/docker/aufs/mnt/336222effc3f7b89867bb39ff7792ae5412c35c749f127c29159d046b6feedd2 rw,relatime - aufs none rw,si=caafa54f85b8525 +66 20 0:59 / /var/lib/docker/aufs/mnt/912e1bf28b80a09644503924a8a1a4fb8ed10b808ca847bda27a369919aa52fa rw,relatime - aufs none rw,si=caafa54fbaea525 +67 20 0:60 / /var/lib/docker/aufs/mnt/386f722875013b4a875118367abc783fc6617a3cb7cf08b2b4dcf550b4b9c576-init rw,relatime - aufs none rw,si=caafa54f8472525 +68 20 0:61 / /var/lib/docker/aufs/mnt/386f722875013b4a875118367abc783fc6617a3cb7cf08b2b4dcf550b4b9c576 rw,relatime - aufs none rw,si=caafa54f8474525 +69 20 0:62 / /var/lib/docker/aufs/mnt/5aaebb79ef3097dfca377889aeb61a0c9d5e3795117d2b08d0751473c671dfb2 rw,relatime - aufs none rw,si=caafa54f8c5e525 +70 20 0:63 / /var/lib/docker/aufs/mnt/5ba3e493279d01277d583600b81c7c079e691b73c3a2bdea8e4b12a35a418be2-init rw,relatime - aufs none rw,si=caafa54f8c3b525 +71 20 0:64 / /var/lib/docker/aufs/mnt/5ba3e493279d01277d583600b81c7c079e691b73c3a2bdea8e4b12a35a418be2 rw,relatime - aufs none rw,si=caafa54f8c3d525 +72 20 0:65 / /var/lib/docker/aufs/mnt/2777f0763da4de93f8bebbe1595cc77f739806a158657b033eca06f827b6028a rw,relatime - aufs none rw,si=caafa54f8c3e525 +73 20 0:66 / /var/lib/docker/aufs/mnt/5d7445562acf73c6f0ae34c3dd0921d7457de1ba92a587d9e06a44fa209eeb3e-init rw,relatime - aufs none rw,si=caafa54f8c39525 +74 20 0:67 / /var/lib/docker/aufs/mnt/5d7445562acf73c6f0ae34c3dd0921d7457de1ba92a587d9e06a44fa209eeb3e rw,relatime - aufs none rw,si=caafa54f854f525 +75 20 0:68 / /var/lib/docker/aufs/mnt/06400b526ec18b66639c96efc41a84f4ae0b117cb28dafd56be420651b4084a0 rw,relatime - aufs none rw,si=caafa54f840b525 +76 20 0:69 / /var/lib/docker/aufs/mnt/e051d45ec42d8e3e1cc57bb39871a40de486dc123522e9c067fbf2ca6a357785-init rw,relatime - aufs none rw,si=caafa54fdddf525 +77 20 0:70 / /var/lib/docker/aufs/mnt/e051d45ec42d8e3e1cc57bb39871a40de486dc123522e9c067fbf2ca6a357785 rw,relatime - aufs none rw,si=caafa54f854b525 +78 20 0:71 / /var/lib/docker/aufs/mnt/1ff414fa93fd61ec81b0ab7b365a841ff6545accae03cceac702833aaeaf718f rw,relatime - aufs none rw,si=caafa54f8d85525 +79 20 0:72 / /var/lib/docker/aufs/mnt/c661b2f871dd5360e46a2aebf8f970f6d39a2ff64e06979aa0361227c88128b8-init rw,relatime - aufs none rw,si=caafa54f8da3525 +80 20 0:73 / /var/lib/docker/aufs/mnt/c661b2f871dd5360e46a2aebf8f970f6d39a2ff64e06979aa0361227c88128b8 rw,relatime - aufs none rw,si=caafa54f8da2525 +81 20 0:74 / /var/lib/docker/aufs/mnt/b68b1d4fe4d30016c552398e78b379a39f651661d8e1fa5f2460c24a5e723420 rw,relatime - aufs none rw,si=caafa54f8d81525 +82 20 0:75 / /var/lib/docker/aufs/mnt/c5c5979c936cd0153a4c626fa9d69ce4fce7d924cc74fa68b025d2f585031739-init rw,relatime - aufs none rw,si=caafa54f8da1525 +83 20 0:76 / /var/lib/docker/aufs/mnt/c5c5979c936cd0153a4c626fa9d69ce4fce7d924cc74fa68b025d2f585031739 rw,relatime - aufs none rw,si=caafa54f8da0525 +84 20 0:77 / /var/lib/docker/aufs/mnt/53e10b0329afc0e0d3322d31efaed4064139dc7027fe6ae445cffd7104bcc94f rw,relatime - aufs none rw,si=caafa54f8c35525 +85 20 0:78 / /var/lib/docker/aufs/mnt/3bfafd09ff2603e2165efacc2215c1f51afabba6c42d04a68cc2df0e8cc31494-init rw,relatime - aufs none rw,si=caafa54f8db8525 +86 20 0:79 / /var/lib/docker/aufs/mnt/3bfafd09ff2603e2165efacc2215c1f51afabba6c42d04a68cc2df0e8cc31494 rw,relatime - aufs none rw,si=caafa54f8dba525 +87 20 0:80 / /var/lib/docker/aufs/mnt/90fdd2c03eeaf65311f88f4200e18aef6d2772482712d9aea01cd793c64781b5 rw,relatime - aufs none rw,si=caafa54f8315525 +88 20 0:81 / /var/lib/docker/aufs/mnt/7bdf2591c06c154ceb23f5e74b1d03b18fbf6fe96e35fbf539b82d446922442f-init rw,relatime - aufs none rw,si=caafa54f8fc6525 +89 20 0:82 / /var/lib/docker/aufs/mnt/7bdf2591c06c154ceb23f5e74b1d03b18fbf6fe96e35fbf539b82d446922442f rw,relatime - aufs none rw,si=caafa54f8468525 +90 20 0:83 / /var/lib/docker/aufs/mnt/8cf9a993f50f3305abad3da268c0fc44ff78a1e7bba595ef9de963497496c3f9 rw,relatime - aufs none rw,si=caafa54f8c59525 +91 20 0:84 / /var/lib/docker/aufs/mnt/ecc896fd74b21840a8d35e8316b92a08b1b9c83d722a12acff847e9f0ff17173-init rw,relatime - aufs none rw,si=caafa54f846a525 +92 20 0:85 / /var/lib/docker/aufs/mnt/ecc896fd74b21840a8d35e8316b92a08b1b9c83d722a12acff847e9f0ff17173 rw,relatime - aufs none rw,si=caafa54f846b525 +93 20 0:86 / /var/lib/docker/aufs/mnt/d8c8288ec920439a48b5796bab5883ee47a019240da65e8d8f33400c31bac5df rw,relatime - aufs none rw,si=caafa54f8dbf525 +94 20 0:87 / /var/lib/docker/aufs/mnt/ecba66710bcd03199b9398e46c005cd6b68d0266ec81dc8b722a29cc417997c6-init rw,relatime - aufs none rw,si=caafa54f810f525 +95 20 0:88 / /var/lib/docker/aufs/mnt/ecba66710bcd03199b9398e46c005cd6b68d0266ec81dc8b722a29cc417997c6 rw,relatime - aufs none rw,si=caafa54fbae9525 +96 20 0:89 / /var/lib/docker/aufs/mnt/befc1c67600df449dddbe796c0d06da7caff1d2bbff64cde1f0ba82d224996b5 rw,relatime - aufs none rw,si=caafa54f8dab525 +97 20 0:90 / /var/lib/docker/aufs/mnt/c9f470e73d2742629cdc4084a1b2c1a8302914f2aa0d0ec4542371df9a050562-init rw,relatime - aufs none rw,si=caafa54fdc02525 +98 20 0:91 / /var/lib/docker/aufs/mnt/c9f470e73d2742629cdc4084a1b2c1a8302914f2aa0d0ec4542371df9a050562 rw,relatime - aufs none rw,si=caafa54f9eb0525 +99 20 0:92 / /var/lib/docker/aufs/mnt/2a31f10029f04ff9d4381167a9b739609853d7220d55a56cb654779a700ee246 rw,relatime - aufs none rw,si=caafa54f8c37525 +100 20 0:93 / /var/lib/docker/aufs/mnt/8c4261b8e3e4b21ebba60389bd64b6261217e7e6b9fd09e201d5a7f6760f6927-init rw,relatime - aufs none rw,si=caafa54fd173525 +101 20 0:94 / /var/lib/docker/aufs/mnt/8c4261b8e3e4b21ebba60389bd64b6261217e7e6b9fd09e201d5a7f6760f6927 rw,relatime - aufs none rw,si=caafa54f8108525 +102 20 0:95 / /var/lib/docker/aufs/mnt/eaa0f57403a3dc685268f91df3fbcd7a8423cee50e1a9ee5c3e1688d9d676bb4 rw,relatime - aufs none rw,si=caafa54f852d525 +103 20 0:96 / /var/lib/docker/aufs/mnt/9cfe69a2cbffd9bfc7f396d4754f6fe5cc457ef417b277797be3762dfe955a6b-init rw,relatime - aufs none rw,si=caafa54f8d80525 +104 20 0:97 / /var/lib/docker/aufs/mnt/9cfe69a2cbffd9bfc7f396d4754f6fe5cc457ef417b277797be3762dfe955a6b rw,relatime - aufs none rw,si=caafa54f8fc3525 +105 20 0:98 / /var/lib/docker/aufs/mnt/d1b322ae17613c6adee84e709641a9244ac56675244a89a64dc0075075fcbb83 rw,relatime - aufs none rw,si=caafa54f8c58525 +106 20 0:99 / /var/lib/docker/aufs/mnt/d46c2a8e9da7e91ab34fd9c192851c246a4e770a46720bda09e55c7554b9dbbd-init rw,relatime - aufs none rw,si=caafa54f8c63525 +107 20 0:100 / /var/lib/docker/aufs/mnt/d46c2a8e9da7e91ab34fd9c192851c246a4e770a46720bda09e55c7554b9dbbd rw,relatime - aufs none rw,si=caafa54f8c67525 +108 20 0:101 / /var/lib/docker/aufs/mnt/bc9d2a264158f83a617a069bf17cbbf2a2ba453db7d3951d9dc63cc1558b1c2b rw,relatime - aufs none rw,si=caafa54f8dbe525 +109 20 0:102 / /var/lib/docker/aufs/mnt/9e6abb8d72bbeb4d5cf24b96018528015ba830ce42b4859965bd482cbd034e99-init rw,relatime - aufs none rw,si=caafa54f9e0d525 +110 20 0:103 / /var/lib/docker/aufs/mnt/9e6abb8d72bbeb4d5cf24b96018528015ba830ce42b4859965bd482cbd034e99 rw,relatime - aufs none rw,si=caafa54f9e1b525 +111 20 0:104 / /var/lib/docker/aufs/mnt/d4dca7b02569c732e740071e1c654d4ad282de5c41edb619af1f0aafa618be26 rw,relatime - aufs none rw,si=caafa54f8dae525 +112 20 0:105 / /var/lib/docker/aufs/mnt/fea63da40fa1c5ffbad430dde0bc64a8fc2edab09a051fff55b673c40a08f6b7-init rw,relatime - aufs none rw,si=caafa54f8c5c525 +113 20 0:106 / /var/lib/docker/aufs/mnt/fea63da40fa1c5ffbad430dde0bc64a8fc2edab09a051fff55b673c40a08f6b7 rw,relatime - aufs none rw,si=caafa54fd172525 +114 20 0:107 / /var/lib/docker/aufs/mnt/e60c57499c0b198a6734f77f660cdbbd950a5b78aa23f470ca4f0cfcc376abef rw,relatime - aufs none rw,si=caafa54909c4525 +115 20 0:108 / /var/lib/docker/aufs/mnt/099c78e7ccd9c8717471bb1bbfff838c0a9913321ba2f214fbeaf92c678e5b35-init rw,relatime - aufs none rw,si=caafa54909c3525 +116 20 0:109 / /var/lib/docker/aufs/mnt/099c78e7ccd9c8717471bb1bbfff838c0a9913321ba2f214fbeaf92c678e5b35 rw,relatime - aufs none rw,si=caafa54909c7525 +117 20 0:110 / /var/lib/docker/aufs/mnt/2997be666d58b9e71469759bcb8bd9608dad0e533a1a7570a896919ba3388825 rw,relatime - aufs none rw,si=caafa54f8557525 +118 20 0:111 / /var/lib/docker/aufs/mnt/730694eff438ef20569df38dfb38a920969d7ff2170cc9aa7cb32a7ed8147a93-init rw,relatime - aufs none rw,si=caafa54c6e88525 +119 20 0:112 / /var/lib/docker/aufs/mnt/730694eff438ef20569df38dfb38a920969d7ff2170cc9aa7cb32a7ed8147a93 rw,relatime - aufs none rw,si=caafa54c6e8e525 +120 20 0:113 / /var/lib/docker/aufs/mnt/a672a1e2f2f051f6e19ed1dfbe80860a2d774174c49f7c476695f5dd1d5b2f67 rw,relatime - aufs none rw,si=caafa54c6e15525 +121 20 0:114 / /var/lib/docker/aufs/mnt/aba3570e17859f76cf29d282d0d150659c6bd80780fdc52a465ba05245c2a420-init rw,relatime - aufs none rw,si=caafa54f8dad525 +122 20 0:115 / /var/lib/docker/aufs/mnt/aba3570e17859f76cf29d282d0d150659c6bd80780fdc52a465ba05245c2a420 rw,relatime - aufs none rw,si=caafa54f8d84525 +123 20 0:116 / /var/lib/docker/aufs/mnt/2abc86007aca46fb4a817a033e2a05ccacae40b78ea4b03f8ea616b9ada40e2e rw,relatime - aufs none rw,si=caafa54c6e8b525 +124 20 0:117 / /var/lib/docker/aufs/mnt/36352f27f7878e648367a135bd1ec3ed497adcb8ac13577ee892a0bd921d2374-init rw,relatime - aufs none rw,si=caafa54c6e8d525 +125 20 0:118 / /var/lib/docker/aufs/mnt/36352f27f7878e648367a135bd1ec3ed497adcb8ac13577ee892a0bd921d2374 rw,relatime - aufs none rw,si=caafa54f8c34525 +126 20 0:119 / /var/lib/docker/aufs/mnt/2f95ca1a629cea8363b829faa727dd52896d5561f2c96ddee4f697ea2fc872c2 rw,relatime - aufs none rw,si=caafa54c6e8a525 +127 20 0:120 / /var/lib/docker/aufs/mnt/f108c8291654f179ef143a3e07de2b5a34adbc0b28194a0ab17742b6db9a7fb2-init rw,relatime - aufs none rw,si=caafa54f8e19525 +128 20 0:121 / /var/lib/docker/aufs/mnt/f108c8291654f179ef143a3e07de2b5a34adbc0b28194a0ab17742b6db9a7fb2 rw,relatime - aufs none rw,si=caafa54fa8c6525 +129 20 0:122 / /var/lib/docker/aufs/mnt/c1d04dfdf8cccb3676d5a91e84e9b0781ce40623d127d038bcfbe4c761b27401 rw,relatime - aufs none rw,si=caafa54f8c30525 +130 20 0:123 / /var/lib/docker/aufs/mnt/3f4898ffd0e1239aeebf1d1412590cdb7254207fa3883663e2c40cf772e5f05a-init rw,relatime - aufs none rw,si=caafa54c6e1a525 +131 20 0:124 / /var/lib/docker/aufs/mnt/3f4898ffd0e1239aeebf1d1412590cdb7254207fa3883663e2c40cf772e5f05a rw,relatime - aufs none rw,si=caafa54c6e1c525 +132 20 0:125 / /var/lib/docker/aufs/mnt/5ae3b6fccb1539fc02d420e86f3e9637bef5b711fed2ca31a2f426c8f5deddbf rw,relatime - aufs none rw,si=caafa54c4fea525 +133 20 0:126 / /var/lib/docker/aufs/mnt/310bfaf80d57020f2e73b06aeffb0b9b0ca2f54895f88bf5e4d1529ccac58fe0-init rw,relatime - aufs none rw,si=caafa54c6e1e525 +134 20 0:127 / /var/lib/docker/aufs/mnt/310bfaf80d57020f2e73b06aeffb0b9b0ca2f54895f88bf5e4d1529ccac58fe0 rw,relatime - aufs none rw,si=caafa54fa8c0525 +135 20 0:128 / /var/lib/docker/aufs/mnt/f382bd5aaccaf2d04a59089ac7cb12ec87efd769fd0c14d623358fbfd2a3f896 rw,relatime - aufs none rw,si=caafa54c4fec525 +136 20 0:129 / /var/lib/docker/aufs/mnt/50d45e9bb2d779bc6362824085564c7578c231af5ae3b3da116acf7e17d00735-init rw,relatime - aufs none rw,si=caafa54c4fef525 +137 20 0:130 / /var/lib/docker/aufs/mnt/50d45e9bb2d779bc6362824085564c7578c231af5ae3b3da116acf7e17d00735 rw,relatime - aufs none rw,si=caafa54c4feb525 +138 20 0:131 / /var/lib/docker/aufs/mnt/a9c5ee0854dc083b6bf62b7eb1e5291aefbb10702289a446471ce73aba0d5d7d rw,relatime - aufs none rw,si=caafa54909c6525 +139 20 0:134 / /var/lib/docker/aufs/mnt/03a613e7bd5078819d1fd92df4e671c0127559a5e0b5a885cc8d5616875162f0-init rw,relatime - aufs none rw,si=caafa54804fe525 +140 20 0:135 / /var/lib/docker/aufs/mnt/03a613e7bd5078819d1fd92df4e671c0127559a5e0b5a885cc8d5616875162f0 rw,relatime - aufs none rw,si=caafa54804fa525 +141 20 0:136 / /var/lib/docker/aufs/mnt/7ec3277e5c04c907051caf9c9c35889f5fcd6463e5485971b25404566830bb70 rw,relatime - aufs none rw,si=caafa54804f9525 +142 20 0:139 / /var/lib/docker/aufs/mnt/26b5b5d71d79a5b2bfcf8bc4b2280ee829f261eb886745dd90997ed410f7e8b8-init rw,relatime - aufs none rw,si=caafa54c6ef6525 +143 20 0:140 / /var/lib/docker/aufs/mnt/26b5b5d71d79a5b2bfcf8bc4b2280ee829f261eb886745dd90997ed410f7e8b8 rw,relatime - aufs none rw,si=caafa54c6ef5525 +144 20 0:356 / /var/lib/docker/aufs/mnt/e6ecde9e2c18cd3c75f424c67b6d89685cfee0fc67abf2cb6bdc0867eb998026 rw,relatime - aufs none rw,si=caafa548068e525` + + gentooMountinfo = `15 1 8:6 / / rw,noatime,nodiratime - ext4 /dev/sda6 rw,data=ordered +16 15 0:3 / /proc rw,nosuid,nodev,noexec,relatime - proc proc rw +17 15 0:14 / /run rw,nosuid,nodev,relatime - tmpfs tmpfs rw,size=3292172k,mode=755 +18 15 0:5 / /dev rw,nosuid,relatime - devtmpfs udev rw,size=10240k,nr_inodes=4106451,mode=755 +19 18 0:12 / /dev/mqueue rw,nosuid,nodev,noexec,relatime - mqueue mqueue rw +20 18 0:10 / /dev/pts rw,nosuid,noexec,relatime - devpts devpts rw,gid=5,mode=620,ptmxmode=000 +21 18 0:15 / /dev/shm rw,nosuid,nodev,noexec,relatime - tmpfs shm rw +22 15 0:16 / /sys rw,nosuid,nodev,noexec,relatime - sysfs sysfs rw +23 22 0:7 / /sys/kernel/debug rw,nosuid,nodev,noexec,relatime - debugfs debugfs rw +24 22 0:17 / /sys/fs/cgroup rw,nosuid,nodev,noexec,relatime - tmpfs cgroup_root rw,size=10240k,mode=755 +25 24 0:18 / /sys/fs/cgroup/openrc rw,nosuid,nodev,noexec,relatime - cgroup openrc rw,release_agent=/lib64/rc/sh/cgroup-release-agent.sh,name=openrc +26 24 0:19 / /sys/fs/cgroup/cpuset rw,nosuid,nodev,noexec,relatime - cgroup cpuset rw,cpuset,clone_children +27 24 0:20 / /sys/fs/cgroup/cpu rw,nosuid,nodev,noexec,relatime - cgroup cpu rw,cpu,clone_children +28 24 0:21 / /sys/fs/cgroup/cpuacct rw,nosuid,nodev,noexec,relatime - cgroup cpuacct rw,cpuacct,clone_children +29 24 0:22 / /sys/fs/cgroup/memory rw,nosuid,nodev,noexec,relatime - cgroup memory rw,memory,clone_children +30 24 0:23 / /sys/fs/cgroup/devices rw,nosuid,nodev,noexec,relatime - cgroup devices rw,devices,clone_children +31 24 0:24 / /sys/fs/cgroup/freezer rw,nosuid,nodev,noexec,relatime - cgroup freezer rw,freezer,clone_children +32 24 0:25 / /sys/fs/cgroup/blkio rw,nosuid,nodev,noexec,relatime - cgroup blkio rw,blkio,clone_children +33 15 8:1 / /boot rw,noatime,nodiratime - vfat /dev/sda1 rw,fmask=0022,dmask=0022,codepage=437,iocharset=iso8859-1,shortname=mixed,errors=remount-ro +34 15 8:18 / /mnt/xfs rw,noatime,nodiratime - xfs /dev/sdb2 rw,attr2,inode64,noquota +35 15 0:26 / /tmp rw,relatime - tmpfs tmpfs rw +36 16 0:27 / /proc/sys/fs/binfmt_misc rw,nosuid,nodev,noexec,relatime - binfmt_misc binfmt_misc rw +42 15 0:33 / /var/lib/nfs/rpc_pipefs rw,relatime - rpc_pipefs rpc_pipefs rw +43 16 0:34 / /proc/fs/nfsd rw,nosuid,nodev,noexec,relatime - nfsd nfsd rw +44 15 0:35 / /home/tianon/.gvfs rw,nosuid,nodev,relatime - fuse.gvfs-fuse-daemon gvfs-fuse-daemon rw,user_id=1000,group_id=1000 +68 15 0:3336 / /var/lib/docker/aufs/mnt/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd rw,relatime - aufs none rw,si=9b4a7640128db39c +86 68 8:6 /var/lib/docker/containers/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd/config.env /var/lib/docker/aufs/mnt/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd/.dockerenv rw,noatime,nodiratime - ext4 /dev/sda6 rw,data=ordered +87 68 8:6 /etc/resolv.conf /var/lib/docker/aufs/mnt/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd/etc/resolv.conf rw,noatime,nodiratime - ext4 /dev/sda6 rw,data=ordered +88 68 8:6 /var/lib/docker/containers/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd/hostname /var/lib/docker/aufs/mnt/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd/etc/hostname rw,noatime,nodiratime - ext4 /dev/sda6 rw,data=ordered +89 68 8:6 /var/lib/docker/containers/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd/hosts /var/lib/docker/aufs/mnt/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd/etc/hosts rw,noatime,nodiratime - ext4 /dev/sda6 rw,data=ordered +38 15 0:3384 / /var/lib/docker/aufs/mnt/0292005a9292401bb5197657f2b682d97d8edcb3b72b5e390d2a680139985b55 rw,relatime - aufs none rw,si=9b4a7642b584939c +39 15 0:3385 / /var/lib/docker/aufs/mnt/59db98c889de5f71b70cfb82c40cbe47b64332f0f56042a2987a9e5df6e5e3aa rw,relatime - aufs none rw,si=9b4a7642b584e39c +40 15 0:3386 / /var/lib/docker/aufs/mnt/0545f0f2b6548eb9601d08f35a08f5a0a385407d36027a28f58e06e9f61e0278 rw,relatime - aufs none rw,si=9b4a7642b584b39c +41 15 0:3387 / /var/lib/docker/aufs/mnt/d882cfa16d1aa8fe0331a36e79be3d80b151e49f24fc39a39c3fed1735d5feb5 rw,relatime - aufs none rw,si=9b4a76453040039c +45 15 0:3388 / /var/lib/docker/aufs/mnt/055ca3befcb1626e74f5344b3398724ff05c0de0e20021683d04305c9e70a3f6 rw,relatime - aufs none rw,si=9b4a76453040739c +46 15 0:3389 / /var/lib/docker/aufs/mnt/b899e4567a351745d4285e7f1c18fdece75d877deb3041981cd290be348b7aa6 rw,relatime - aufs none rw,si=9b4a7647def4039c +47 15 0:3390 / /var/lib/docker/aufs/mnt/067ca040292c58954c5129f953219accfae0d40faca26b4d05e76ca76a998f16 rw,relatime - aufs none rw,si=9b4a7647def4239c +48 15 0:3391 / /var/lib/docker/aufs/mnt/8c995e7cb6e5082742daeea720e340b021d288d25d92e0412c03d200df308a11 rw,relatime - aufs none rw,si=9b4a764479c1639c +49 15 0:3392 / /var/lib/docker/aufs/mnt/07cc54dfae5b45300efdacdd53cc72c01b9044956a86ce7bff42d087e426096d rw,relatime - aufs none rw,si=9b4a764479c1739c +50 15 0:3393 / /var/lib/docker/aufs/mnt/0a9c95cf4c589c05b06baa79150b0cc1d8e7102759fe3ce4afaabb8247ca4f85 rw,relatime - aufs none rw,si=9b4a7644059c839c +51 15 0:3394 / /var/lib/docker/aufs/mnt/468fa98cececcf4e226e8370f18f4f848d63faf287fb8321a07f73086441a3a0 rw,relatime - aufs none rw,si=9b4a7644059ca39c +52 15 0:3395 / /var/lib/docker/aufs/mnt/0b826192231c5ce066fffb5beff4397337b5fc19a377aa7c6282c7c0ce7f111f rw,relatime - aufs none rw,si=9b4a764479c1339c +53 15 0:3396 / /var/lib/docker/aufs/mnt/93b8ba1b772fbe79709b909c43ea4b2c30d712e53548f467db1ffdc7a384f196 rw,relatime - aufs none rw,si=9b4a7640798a739c +54 15 0:3397 / /var/lib/docker/aufs/mnt/0c0d0acfb506859b12ef18cdfef9ebed0b43a611482403564224bde9149d373c rw,relatime - aufs none rw,si=9b4a7640798a039c +55 15 0:3398 / /var/lib/docker/aufs/mnt/33648c39ab6c7c74af0243d6d6a81b052e9e25ad1e04b19892eb2dde013e358b rw,relatime - aufs none rw,si=9b4a7644b439b39c +56 15 0:3399 / /var/lib/docker/aufs/mnt/0c12bea97a1c958a3c739fb148536c1c89351d48e885ecda8f0499b5cc44407e rw,relatime - aufs none rw,si=9b4a7640798a239c +57 15 0:3400 / /var/lib/docker/aufs/mnt/ed443988ce125f172d7512e84a4de2627405990fd767a16adefa8ce700c19ce8 rw,relatime - aufs none rw,si=9b4a7644c8ed339c +59 15 0:3402 / /var/lib/docker/aufs/mnt/f61612c324ff3c924d3f7a82fb00a0f8d8f73c248c41897061949e9f5ab7e3b1 rw,relatime - aufs none rw,si=9b4a76442810c39c +60 15 0:3403 / /var/lib/docker/aufs/mnt/0f1ee55c6c4e25027b80de8e64b8b6fb542b3b41aa0caab9261da75752e22bfd rw,relatime - aufs none rw,si=9b4a76442810e39c +61 15 0:3404 / /var/lib/docker/aufs/mnt/956f6cc4af5785cb3ee6963dcbca668219437d9b28f513290b1453ac64a34f97 rw,relatime - aufs none rw,si=9b4a7644303ec39c +62 15 0:3405 / /var/lib/docker/aufs/mnt/1099769158c4b4773e2569e38024e8717e400f87a002c41d8cf47cb81b051ba6 rw,relatime - aufs none rw,si=9b4a7644303ee39c +63 15 0:3406 / /var/lib/docker/aufs/mnt/11890ceb98d4442595b676085cd7b21550ab85c5df841e0fba997ff54e3d522d rw,relatime - aufs none rw,si=9b4a7644303ed39c +64 15 0:3407 / /var/lib/docker/aufs/mnt/acdb90dc378e8ed2420b43a6d291f1c789a081cd1904018780cc038fcd7aae53 rw,relatime - aufs none rw,si=9b4a76434be2139c +65 15 0:3408 / /var/lib/docker/aufs/mnt/120e716f19d4714fbe63cc1ed246204f2c1106eefebc6537ba2587d7e7711959 rw,relatime - aufs none rw,si=9b4a76434be2339c +66 15 0:3409 / /var/lib/docker/aufs/mnt/b197b7fffb61d89e0ba1c40de9a9fc0d912e778b3c1bd828cf981ff37c1963bc rw,relatime - aufs none rw,si=9b4a76434be2039c +70 15 0:3412 / /var/lib/docker/aufs/mnt/1434b69d2e1bb18a9f0b96b9cdac30132b2688f5d1379f68a39a5e120c2f93eb rw,relatime - aufs none rw,si=9b4a76434be2639c +71 15 0:3413 / /var/lib/docker/aufs/mnt/16006e83caf33ab5eb0cd6afc92ea2ee8edeff897496b0bb3ec3a75b767374b3 rw,relatime - aufs none rw,si=9b4a7644d790439c +72 15 0:3414 / /var/lib/docker/aufs/mnt/55bfa5f44e94d27f91f79ba901b118b15098449165c87abf1b53ffff147ff164 rw,relatime - aufs none rw,si=9b4a7644d790239c +73 15 0:3415 / /var/lib/docker/aufs/mnt/1912b97a07ab21ccd98a2a27bc779bf3cf364a3138afa3c3e6f7f169a3c3eab5 rw,relatime - aufs none rw,si=9b4a76441822739c +76 15 0:3418 / /var/lib/docker/aufs/mnt/1a7c3292e8879bd91ffd9282e954f643b1db5683093574c248ff14a9609f2f56 rw,relatime - aufs none rw,si=9b4a76438cb7239c +77 15 0:3419 / /var/lib/docker/aufs/mnt/bb1faaf0d076ddba82c2318305a85f490dafa4e8a8640a8db8ed657c439120cc rw,relatime - aufs none rw,si=9b4a76438cb7339c +78 15 0:3420 / /var/lib/docker/aufs/mnt/1ab869f21d2241a73ac840c7f988490313f909ac642eba71d092204fec66dd7c rw,relatime - aufs none rw,si=9b4a76438cb7639c +79 15 0:3421 / /var/lib/docker/aufs/mnt/fd7245b2cfe3890fa5f5b452260e4edf9e7fb7746532ed9d83f7a0d7dbaa610e rw,relatime - aufs none rw,si=9b4a7644bdc0139c +80 15 0:3422 / /var/lib/docker/aufs/mnt/1e5686c5301f26b9b3cd24e322c608913465cc6c5d0dcd7c5e498d1314747d61 rw,relatime - aufs none rw,si=9b4a7644bdc0639c +81 15 0:3423 / /var/lib/docker/aufs/mnt/52edf6ee6e40bfec1e9301a4d4a92ab83d144e2ae4ce5099e99df6138cb844bf rw,relatime - aufs none rw,si=9b4a7644bdc0239c +82 15 0:3424 / /var/lib/docker/aufs/mnt/1ea10fb7085d28cda4904657dff0454e52598d28e1d77e4f2965bbc3666e808f rw,relatime - aufs none rw,si=9b4a76438cb7139c +83 15 0:3425 / /var/lib/docker/aufs/mnt/9c03e98c3593946dbd4087f8d83f9ca262f4a2efdc952ce60690838b9ba6c526 rw,relatime - aufs none rw,si=9b4a76443020639c +84 15 0:3426 / /var/lib/docker/aufs/mnt/220a2344d67437602c6d2cee9a98c46be13f82c2a8063919dd2fad52bf2fb7dd rw,relatime - aufs none rw,si=9b4a76434bff339c +94 15 0:3427 / /var/lib/docker/aufs/mnt/3b32876c5b200312c50baa476ff342248e88c8ea96e6a1032cd53a88738a1cf2 rw,relatime - aufs none rw,si=9b4a76434bff139c +95 15 0:3428 / /var/lib/docker/aufs/mnt/23ee2b8b0d4ae8db6f6d1e168e2c6f79f8a18f953b09f65e0d22cc1e67a3a6fa rw,relatime - aufs none rw,si=9b4a7646c305c39c +96 15 0:3429 / /var/lib/docker/aufs/mnt/e86e6daa70b61b57945fa178222615f3c3d6bcef12c9f28e9f8623d44dc2d429 rw,relatime - aufs none rw,si=9b4a7646c305f39c +97 15 0:3430 / /var/lib/docker/aufs/mnt/2413d07623e80860bb2e9e306fbdee699afd07525785c025c591231e864aa162 rw,relatime - aufs none rw,si=9b4a76434bff039c +98 15 0:3431 / /var/lib/docker/aufs/mnt/adfd622eb22340fc80b429e5564b125668e260bf9068096c46dd59f1386a4b7d rw,relatime - aufs none rw,si=9b4a7646a7a1039c +102 15 0:3435 / /var/lib/docker/aufs/mnt/27cd92e7a91d02e2d6b44d16679a00fb6d169b19b88822891084e7fd1a84882d rw,relatime - aufs none rw,si=9b4a7646f25ec39c +103 15 0:3436 / /var/lib/docker/aufs/mnt/27dfdaf94cfbf45055c748293c37dd68d9140240bff4c646cb09216015914a88 rw,relatime - aufs none rw,si=9b4a7646732f939c +104 15 0:3437 / /var/lib/docker/aufs/mnt/5ed7524aff68dfbf0fc601cbaeac01bab14391850a973dabf3653282a627920f rw,relatime - aufs none rw,si=9b4a7646732f839c +105 15 0:3438 / /var/lib/docker/aufs/mnt/2a0d4767e536beb5785b60e071e3ac8e5e812613ab143a9627bee77d0c9ab062 rw,relatime - aufs none rw,si=9b4a7646732fe39c +106 15 0:3439 / /var/lib/docker/aufs/mnt/dea3fc045d9f4ae51ba952450b948a822cf85c39411489ca5224f6d9a8d02bad rw,relatime - aufs none rw,si=9b4a764012ad839c +107 15 0:3440 / /var/lib/docker/aufs/mnt/2d140a787160798da60cb67c21b1210054ad4dafecdcf832f015995b9aa99cfd rw,relatime - aufs none rw,si=9b4a764012add39c +108 15 0:3441 / /var/lib/docker/aufs/mnt/cb190b2a8e984475914430fbad2382e0d20b9b659f8ef83ae8d170cc672e519c rw,relatime - aufs none rw,si=9b4a76454d9c239c +109 15 0:3442 / /var/lib/docker/aufs/mnt/2f4a012d5a7ffd90256a6e9aa479054b3dddbc3c6a343f26dafbf3196890223b rw,relatime - aufs none rw,si=9b4a76454d9c439c +110 15 0:3443 / /var/lib/docker/aufs/mnt/63cc77904b80c4ffbf49cb974c5d8733dc52ad7640d3ae87554b325d7312d87f rw,relatime - aufs none rw,si=9b4a76454d9c339c +111 15 0:3444 / /var/lib/docker/aufs/mnt/30333e872c451482ea2d235ff2192e875bd234006b238ae2bdde3b91a86d7522 rw,relatime - aufs none rw,si=9b4a76422cebf39c +112 15 0:3445 / /var/lib/docker/aufs/mnt/6c54fc1125da3925cae65b5c9a98f3be55b0a2c2666082e5094a4ba71beb5bff rw,relatime - aufs none rw,si=9b4a7646dd5a439c +113 15 0:3446 / /var/lib/docker/aufs/mnt/3087d48cb01cda9d0a83a9ca301e6ea40e8593d18c4921be4794c91a420ab9a3 rw,relatime - aufs none rw,si=9b4a7646dd5a739c +114 15 0:3447 / /var/lib/docker/aufs/mnt/cc2607462a8f55b179a749b144c3fdbb50678e1a4f3065ea04e283e9b1f1d8e2 rw,relatime - aufs none rw,si=9b4a7646dd5a239c +117 15 0:3450 / /var/lib/docker/aufs/mnt/310c5e8392b29e8658a22e08d96d63936633b7e2c38e8d220047928b00a03d24 rw,relatime - aufs none rw,si=9b4a7647932d739c +118 15 0:3451 / /var/lib/docker/aufs/mnt/38a1f0029406ba9c3b6058f2f406d8a1d23c855046cf355c91d87d446fcc1460 rw,relatime - aufs none rw,si=9b4a76445abc939c +119 15 0:3452 / /var/lib/docker/aufs/mnt/42e109ab7914ae997a11ccd860fd18e4d488c50c044c3240423ce15774b8b62e rw,relatime - aufs none rw,si=9b4a76445abca39c +120 15 0:3453 / /var/lib/docker/aufs/mnt/365d832af0402d052b389c1e9c0d353b48487533d20cd4351df8e24ec4e4f9d8 rw,relatime - aufs none rw,si=9b4a7644066aa39c +121 15 0:3454 / /var/lib/docker/aufs/mnt/d3fa8a24d695b6cda9b64f96188f701963d28bef0473343f8b212df1a2cf1d2b rw,relatime - aufs none rw,si=9b4a7644066af39c +122 15 0:3455 / /var/lib/docker/aufs/mnt/37d4f491919abc49a15d0c7a7cc8383f087573525d7d288accd14f0b4af9eae0 rw,relatime - aufs none rw,si=9b4a7644066ad39c +123 15 0:3456 / /var/lib/docker/aufs/mnt/93902707fe12cbdd0068ce73f2baad4b3a299189b1b19cb5f8a2025e106ae3f5 rw,relatime - aufs none rw,si=9b4a76444445f39c +126 15 0:3459 / /var/lib/docker/aufs/mnt/3b49291670a625b9bbb329ffba99bf7fa7abff80cefef040f8b89e2b3aad4f9f rw,relatime - aufs none rw,si=9b4a7640798a339c +127 15 0:3460 / /var/lib/docker/aufs/mnt/8d9c7b943cc8f854f4d0d4ec19f7c16c13b0cc4f67a41472a072648610cecb59 rw,relatime - aufs none rw,si=9b4a76427383039c +128 15 0:3461 / /var/lib/docker/aufs/mnt/3b6c90036526c376307df71d49c9f5fce334c01b926faa6a78186842de74beac rw,relatime - aufs none rw,si=9b4a7644badd439c +130 15 0:3463 / /var/lib/docker/aufs/mnt/7b24158eeddfb5d31b7e932e406ea4899fd728344335ff8e0765e89ddeb351dd rw,relatime - aufs none rw,si=9b4a7644badd539c +131 15 0:3464 / /var/lib/docker/aufs/mnt/3ead6dd5773765c74850cf6c769f21fe65c29d622ffa712664f9f5b80364ce27 rw,relatime - aufs none rw,si=9b4a7642f469939c +132 15 0:3465 / /var/lib/docker/aufs/mnt/3f825573b29547744a37b65597a9d6d15a8350be4429b7038d126a4c9a8e178f rw,relatime - aufs none rw,si=9b4a7642f469c39c +133 15 0:3466 / /var/lib/docker/aufs/mnt/f67aaaeb3681e5dcb99a41f847087370bd1c206680cb8c7b6a9819fd6c97a331 rw,relatime - aufs none rw,si=9b4a7647cc25939c +134 15 0:3467 / /var/lib/docker/aufs/mnt/41afe6cfb3c1fc2280b869db07699da88552786e28793f0bc048a265c01bd942 rw,relatime - aufs none rw,si=9b4a7647cc25c39c +135 15 0:3468 / /var/lib/docker/aufs/mnt/b8092ea59da34a40b120e8718c3ae9fa8436996edc4fc50e4b99c72dfd81e1af rw,relatime - aufs none rw,si=9b4a76445abc439c +136 15 0:3469 / /var/lib/docker/aufs/mnt/42c69d2cc179e2684458bb8596a9da6dad182c08eae9b74d5f0e615b399f75a5 rw,relatime - aufs none rw,si=9b4a76455ddbe39c +137 15 0:3470 / /var/lib/docker/aufs/mnt/ea0871954acd2d62a211ac60e05969622044d4c74597870c4f818fbb0c56b09b rw,relatime - aufs none rw,si=9b4a76455ddbf39c +138 15 0:3471 / /var/lib/docker/aufs/mnt/4307906b275ab3fc971786b3841ae3217ac85b6756ddeb7ad4ba09cd044c2597 rw,relatime - aufs none rw,si=9b4a76455ddb839c +139 15 0:3472 / /var/lib/docker/aufs/mnt/4390b872928c53500a5035634f3421622ed6299dc1472b631fc45de9f56dc180 rw,relatime - aufs none rw,si=9b4a76402f2fd39c +140 15 0:3473 / /var/lib/docker/aufs/mnt/6bb41e78863b85e4aa7da89455314855c8c3bda64e52a583bab15dc1fa2e80c2 rw,relatime - aufs none rw,si=9b4a76402f2fa39c +141 15 0:3474 / /var/lib/docker/aufs/mnt/4444f583c2a79c66608f4673a32c9c812154f027045fbd558c2d69920c53f835 rw,relatime - aufs none rw,si=9b4a764479dbd39c +142 15 0:3475 / /var/lib/docker/aufs/mnt/6f11883af4a05ea362e0c54df89058da4859f977efd07b6f539e1f55c1d2a668 rw,relatime - aufs none rw,si=9b4a76402f30b39c +143 15 0:3476 / /var/lib/docker/aufs/mnt/453490dd32e7c2e9ef906f995d8fb3c2753923d1a5e0ba3fd3296e2e4dc238e7 rw,relatime - aufs none rw,si=9b4a76402f30c39c +144 15 0:3477 / /var/lib/docker/aufs/mnt/45e5945735ee102b5e891c91650c57ec4b52bb53017d68f02d50ea8a6e230610 rw,relatime - aufs none rw,si=9b4a76423260739c +147 15 0:3480 / /var/lib/docker/aufs/mnt/4727a64a5553a1125f315b96bed10d3073d6988225a292cce732617c925b56ab rw,relatime - aufs none rw,si=9b4a76443030339c +150 15 0:3483 / /var/lib/docker/aufs/mnt/4e348b5187b9a567059306afc72d42e0ec5c893b0d4abd547526d5f9b6fb4590 rw,relatime - aufs none rw,si=9b4a7644f5d8c39c +151 15 0:3484 / /var/lib/docker/aufs/mnt/4efc616bfbc3f906718b052da22e4335f8e9f91ee9b15866ed3a8029645189ef rw,relatime - aufs none rw,si=9b4a7644f5d8939c +152 15 0:3485 / /var/lib/docker/aufs/mnt/83e730ae9754d5adb853b64735472d98dfa17136b8812ac9cfcd1eba7f4e7d2d rw,relatime - aufs none rw,si=9b4a76469aa7139c +153 15 0:3486 / /var/lib/docker/aufs/mnt/4fc5ba8a5b333be2b7eefacccb626772eeec0ae8a6975112b56c9fb36c0d342f rw,relatime - aufs none rw,si=9b4a7640128dc39c +154 15 0:3487 / /var/lib/docker/aufs/mnt/50200d5edff5dfe8d1ef3c78b0bbd709793ac6e936aa16d74ff66f7ea577b6f9 rw,relatime - aufs none rw,si=9b4a7640128da39c +155 15 0:3488 / /var/lib/docker/aufs/mnt/51e5e51604361448f0b9777f38329f414bc5ba9cf238f26d465ff479bd574b61 rw,relatime - aufs none rw,si=9b4a76444f68939c +156 15 0:3489 / /var/lib/docker/aufs/mnt/52a142149aa98bba83df8766bbb1c629a97b9799944ead90dd206c4bdf0b8385 rw,relatime - aufs none rw,si=9b4a76444f68b39c +157 15 0:3490 / /var/lib/docker/aufs/mnt/52dd21a94a00f58a1ed489312fcfffb91578089c76c5650364476f1d5de031bc rw,relatime - aufs none rw,si=9b4a76444f68f39c +158 15 0:3491 / /var/lib/docker/aufs/mnt/ee562415ddaad353ed22c88d0ca768a0c74bfba6333b6e25c46849ee22d990da rw,relatime - aufs none rw,si=9b4a7640128d839c +159 15 0:3492 / /var/lib/docker/aufs/mnt/db47a9e87173f7554f550c8a01891de79cf12acdd32e01f95c1a527a08bdfb2c rw,relatime - aufs none rw,si=9b4a764405a1d39c +160 15 0:3493 / /var/lib/docker/aufs/mnt/55e827bf6d44d930ec0b827c98356eb8b68c3301e2d60d1429aa72e05b4c17df rw,relatime - aufs none rw,si=9b4a764405a1a39c +162 15 0:3495 / /var/lib/docker/aufs/mnt/578dc4e0a87fc37ec081ca098430499a59639c09f6f12a8f48de29828a091aa6 rw,relatime - aufs none rw,si=9b4a76406d7d439c +163 15 0:3496 / /var/lib/docker/aufs/mnt/728cc1cb04fa4bc6f7bf7a90980beda6d8fc0beb71630874c0747b994efb0798 rw,relatime - aufs none rw,si=9b4a76444f20e39c +164 15 0:3497 / /var/lib/docker/aufs/mnt/5850cc4bd9b55aea46c7ad598f1785117607974084ea643580f58ce3222e683a rw,relatime - aufs none rw,si=9b4a7644a824239c +165 15 0:3498 / /var/lib/docker/aufs/mnt/89443b3f766d5a37bc8b84e29da8b84e6a3ea8486d3cf154e2aae1816516e4a8 rw,relatime - aufs none rw,si=9b4a7644a824139c +166 15 0:3499 / /var/lib/docker/aufs/mnt/f5ae8fd5a41a337907d16515bc3162525154b59c32314c695ecd092c3b47943d rw,relatime - aufs none rw,si=9b4a7644a824439c +167 15 0:3500 / /var/lib/docker/aufs/mnt/5a430854f2a03a9e5f7cbc9f3fb46a8ebca526a5b3f435236d8295e5998798f5 rw,relatime - aufs none rw,si=9b4a7647fc82439c +168 15 0:3501 / /var/lib/docker/aufs/mnt/eda16901ae4cead35070c39845cbf1e10bd6b8cb0ffa7879ae2d8a186e460f91 rw,relatime - aufs none rw,si=9b4a76441e0df39c +169 15 0:3502 / /var/lib/docker/aufs/mnt/5a593721430c2a51b119ff86a7e06ea2b37e3b4131f8f1344d402b61b0c8d868 rw,relatime - aufs none rw,si=9b4a764248bad39c +170 15 0:3503 / /var/lib/docker/aufs/mnt/d662ad0a30fbfa902e0962108685b9330597e1ee2abb16dc9462eb5a67fdd23f rw,relatime - aufs none rw,si=9b4a764248bae39c +171 15 0:3504 / /var/lib/docker/aufs/mnt/5bc9de5c79812843fb36eee96bef1ddba812407861f572e33242f4ee10da2c15 rw,relatime - aufs none rw,si=9b4a764248ba839c +172 15 0:3505 / /var/lib/docker/aufs/mnt/5e763de8e9b0f7d58d2e12a341e029ab4efb3b99788b175090d8209e971156c1 rw,relatime - aufs none rw,si=9b4a764248baa39c +173 15 0:3506 / /var/lib/docker/aufs/mnt/b4431dc2739936f1df6387e337f5a0c99cf051900c896bd7fd46a870ce61c873 rw,relatime - aufs none rw,si=9b4a76401263539c +174 15 0:3507 / /var/lib/docker/aufs/mnt/5f37830e5a02561ab8c67ea3113137ba69f67a60e41c05cb0e7a0edaa1925b24 rw,relatime - aufs none rw,si=9b4a76401263639c +184 15 0:3508 / /var/lib/docker/aufs/mnt/62ea10b957e6533538a4633a1e1d678502f50ddcdd354b2ca275c54dd7a7793a rw,relatime - aufs none rw,si=9b4a76401263039c +187 15 0:3509 / /var/lib/docker/aufs/mnt/d56ee9d44195fe390e042fda75ec15af5132adb6d5c69468fa8792f4e54a6953 rw,relatime - aufs none rw,si=9b4a76401263239c +188 15 0:3510 / /var/lib/docker/aufs/mnt/6a300930673174549c2b62f36c933f0332a20735978c007c805a301f897146c5 rw,relatime - aufs none rw,si=9b4a76455d4c539c +189 15 0:3511 / /var/lib/docker/aufs/mnt/64496c45c84d348c24d410015456d101601c30cab4d1998c395591caf7e57a70 rw,relatime - aufs none rw,si=9b4a76455d4c639c +190 15 0:3512 / /var/lib/docker/aufs/mnt/65a6a645883fe97a7422cd5e71ebe0bc17c8e6302a5361edf52e89747387e908 rw,relatime - aufs none rw,si=9b4a76455d4c039c +191 15 0:3513 / /var/lib/docker/aufs/mnt/672be40695f7b6e13b0a3ed9fc996c73727dede3481f58155950fcfad57ed616 rw,relatime - aufs none rw,si=9b4a76455d4c239c +192 15 0:3514 / /var/lib/docker/aufs/mnt/d42438acb2bfb2169e1c0d8e917fc824f7c85d336dadb0b0af36dfe0f001b3ba rw,relatime - aufs none rw,si=9b4a7642bfded39c +193 15 0:3515 / /var/lib/docker/aufs/mnt/b48a54abf26d01cb2ddd908b1ed6034d17397c1341bf0eb2b251a3e5b79be854 rw,relatime - aufs none rw,si=9b4a7642bfdee39c +194 15 0:3516 / /var/lib/docker/aufs/mnt/76f27134491f052bfb87f59092126e53ef875d6851990e59195a9da16a9412f8 rw,relatime - aufs none rw,si=9b4a7642bfde839c +195 15 0:3517 / /var/lib/docker/aufs/mnt/6bd626a5462b4f8a8e1cc7d10351326dca97a59b2758e5ea549a4f6350ce8a90 rw,relatime - aufs none rw,si=9b4a7642bfdea39c +196 15 0:3518 / /var/lib/docker/aufs/mnt/f1fe3549dbd6f5ca615e9139d9b53f0c83a3b825565df37628eacc13e70cbd6d rw,relatime - aufs none rw,si=9b4a7642bfdf539c +197 15 0:3519 / /var/lib/docker/aufs/mnt/6d0458c8426a9e93d58d0625737e6122e725c9408488ed9e3e649a9984e15c34 rw,relatime - aufs none rw,si=9b4a7642bfdf639c +198 15 0:3520 / /var/lib/docker/aufs/mnt/6e4c97db83aa82145c9cf2bafc20d500c0b5389643b689e3ae84188c270a48c5 rw,relatime - aufs none rw,si=9b4a7642bfdf039c +199 15 0:3521 / /var/lib/docker/aufs/mnt/eb94d6498f2c5969eaa9fa11ac2934f1ab90ef88e2d002258dca08e5ba74ea27 rw,relatime - aufs none rw,si=9b4a7642bfdf239c +200 15 0:3522 / /var/lib/docker/aufs/mnt/fe3f88f0c511608a2eec5f13a98703aa16e55dbf930309723d8a37101f539fe1 rw,relatime - aufs none rw,si=9b4a7642bfc3539c +201 15 0:3523 / /var/lib/docker/aufs/mnt/6f40c229fb9cad85fabf4b64a2640a5403ec03fe5ac1a57d0609fb8b606b9c83 rw,relatime - aufs none rw,si=9b4a7642bfc3639c +202 15 0:3524 / /var/lib/docker/aufs/mnt/7513e9131f7a8acf58ff15248237feb767c78732ca46e159f4d791e6ef031dbc rw,relatime - aufs none rw,si=9b4a7642bfc3039c +203 15 0:3525 / /var/lib/docker/aufs/mnt/79f48b00aa713cdf809c6bb7c7cb911b66e9a8076c81d6c9d2504139984ea2da rw,relatime - aufs none rw,si=9b4a7642bfc3239c +204 15 0:3526 / /var/lib/docker/aufs/mnt/c3680418350d11358f0a96c676bc5aa74fa00a7c89e629ef5909d3557b060300 rw,relatime - aufs none rw,si=9b4a7642f47cd39c +205 15 0:3527 / /var/lib/docker/aufs/mnt/7a1744dd350d7fcc0cccb6f1757ca4cbe5453f203a5888b0f1014d96ad5a5ef9 rw,relatime - aufs none rw,si=9b4a7642f47ce39c +206 15 0:3528 / /var/lib/docker/aufs/mnt/7fa99662db046be9f03c33c35251afda9ccdc0085636bbba1d90592cec3ff68d rw,relatime - aufs none rw,si=9b4a7642f47c839c +207 15 0:3529 / /var/lib/docker/aufs/mnt/f815021ef20da9c9b056bd1d52d8aaf6e2c0c19f11122fc793eb2b04eb995e35 rw,relatime - aufs none rw,si=9b4a7642f47ca39c +208 15 0:3530 / /var/lib/docker/aufs/mnt/801086ae3110192d601dfcebdba2db92e86ce6b6a9dba6678ea04488e4513669 rw,relatime - aufs none rw,si=9b4a7642dc6dd39c +209 15 0:3531 / /var/lib/docker/aufs/mnt/822ba7db69f21daddda87c01cfbfbf73013fc03a879daf96d16cdde6f9b1fbd6 rw,relatime - aufs none rw,si=9b4a7642dc6de39c +210 15 0:3532 / /var/lib/docker/aufs/mnt/834227c1a950fef8cae3827489129d0dd220541e60c6b731caaa765bf2e6a199 rw,relatime - aufs none rw,si=9b4a7642dc6d839c +211 15 0:3533 / /var/lib/docker/aufs/mnt/83dccbc385299bd1c7cf19326e791b33a544eea7b4cdfb6db70ea94eed4389fb rw,relatime - aufs none rw,si=9b4a7642dc6da39c +212 15 0:3534 / /var/lib/docker/aufs/mnt/f1b8e6f0e7c8928b5dcdab944db89306ebcae3e0b32f9ff40d2daa8329f21600 rw,relatime - aufs none rw,si=9b4a7645a126039c +213 15 0:3535 / /var/lib/docker/aufs/mnt/970efb262c7a020c2404cbcc5b3259efba0d110a786079faeef05bc2952abf3a rw,relatime - aufs none rw,si=9b4a7644c8ed139c +214 15 0:3536 / /var/lib/docker/aufs/mnt/84b6d73af7450f3117a77e15a5ca1255871fea6182cd8e8a7be6bc744be18c2c rw,relatime - aufs none rw,si=9b4a76406559139c +215 15 0:3537 / /var/lib/docker/aufs/mnt/88be2716e026bc681b5e63fe7942068773efbd0b6e901ca7ba441412006a96b6 rw,relatime - aufs none rw,si=9b4a76406559339c +216 15 0:3538 / /var/lib/docker/aufs/mnt/c81939aa166ce50cd8bca5cfbbcc420a78e0318dd5cd7c755209b9166a00a752 rw,relatime - aufs none rw,si=9b4a76406559239c +217 15 0:3539 / /var/lib/docker/aufs/mnt/e0f241645d64b7dc5ff6a8414087cca226be08fb54ce987d1d1f6350c57083aa rw,relatime - aufs none rw,si=9b4a7647cfc0f39c +218 15 0:3540 / /var/lib/docker/aufs/mnt/e10e2bf75234ed51d8a6a4bb39e465404fecbe318e54400d3879cdb2b0679c78 rw,relatime - aufs none rw,si=9b4a7647cfc0939c +219 15 0:3541 / /var/lib/docker/aufs/mnt/8f71d74c8cfc3228b82564aa9f09b2e576cff0083ddfb6aa5cb350346063f080 rw,relatime - aufs none rw,si=9b4a7647cfc0a39c +220 15 0:3542 / /var/lib/docker/aufs/mnt/9159f1eba2aef7f5205cc18d015cda7f5933cd29bba3b1b8aed5ccb5824c69ee rw,relatime - aufs none rw,si=9b4a76468cedd39c +221 15 0:3543 / /var/lib/docker/aufs/mnt/932cad71e652e048e500d9fbb5b8ea4fc9a269d42a3134ce527ceef42a2be56b rw,relatime - aufs none rw,si=9b4a76468cede39c +222 15 0:3544 / /var/lib/docker/aufs/mnt/bf1e1b5f529e8943cc0144ee86dbaaa37885c1ddffcef29537e0078ee7dd316a rw,relatime - aufs none rw,si=9b4a76468ced839c +223 15 0:3545 / /var/lib/docker/aufs/mnt/949d93ecf3322e09f858ce81d5f4b434068ec44ff84c375de03104f7b45ee955 rw,relatime - aufs none rw,si=9b4a76468ceda39c +224 15 0:3546 / /var/lib/docker/aufs/mnt/d65c6087f92dc2a3841b5251d2fe9ca07d4c6e5b021597692479740816e4e2a1 rw,relatime - aufs none rw,si=9b4a7645a126239c +225 15 0:3547 / /var/lib/docker/aufs/mnt/98a0153119d0651c193d053d254f6e16a68345a141baa80c87ae487e9d33f290 rw,relatime - aufs none rw,si=9b4a7640787cf39c +226 15 0:3548 / /var/lib/docker/aufs/mnt/99daf7fe5847c017392f6e59aa9706b3dfdd9e6d1ba11dae0f7fffde0a60b5e5 rw,relatime - aufs none rw,si=9b4a7640787c839c +227 15 0:3549 / /var/lib/docker/aufs/mnt/9ad1f2fe8a5599d4e10c5a6effa7f03d932d4e92ee13149031a372087a359079 rw,relatime - aufs none rw,si=9b4a7640787ca39c +228 15 0:3550 / /var/lib/docker/aufs/mnt/c26d64494da782ddac26f8370d86ac93e7c1666d88a7b99110fc86b35ea6a85d rw,relatime - aufs none rw,si=9b4a7642fc6b539c +229 15 0:3551 / /var/lib/docker/aufs/mnt/a49e4a8275133c230ec640997f35f172312eb0ea5bd2bbe10abf34aae98f30eb rw,relatime - aufs none rw,si=9b4a7642fc6b639c +230 15 0:3552 / /var/lib/docker/aufs/mnt/b5e2740c867ed843025f49d84e8d769de9e8e6039b3c8cb0735b5bf358994bc7 rw,relatime - aufs none rw,si=9b4a7642fc6b039c +231 15 0:3553 / /var/lib/docker/aufs/mnt/a826fdcf3a7039b30570054579b65763db605a314275d7aef31b872c13311b4b rw,relatime - aufs none rw,si=9b4a7642fc6b239c +232 15 0:3554 / /var/lib/docker/aufs/mnt/addf3025babf5e43b5a3f4a0da7ad863dda3c01fb8365c58fd8d28bb61dc11bc rw,relatime - aufs none rw,si=9b4a76407871d39c +233 15 0:3555 / /var/lib/docker/aufs/mnt/c5b6c6813ab3e5ebdc6d22cb2a3d3106a62095f2c298be52b07a3b0fa20ff690 rw,relatime - aufs none rw,si=9b4a76407871e39c +234 15 0:3556 / /var/lib/docker/aufs/mnt/af0609eaaf64e2392060cb46f5a9f3d681a219bb4c651d4f015bf573fbe6c4cf rw,relatime - aufs none rw,si=9b4a76407871839c +235 15 0:3557 / /var/lib/docker/aufs/mnt/e7f20e3c37ecad39cd90a97cd3549466d0d106ce4f0a930b8495442634fa4a1f rw,relatime - aufs none rw,si=9b4a76407871a39c +237 15 0:3559 / /var/lib/docker/aufs/mnt/b57a53d440ffd0c1295804fa68cdde35d2fed5409484627e71b9c37e4249fd5c rw,relatime - aufs none rw,si=9b4a76444445a39c +238 15 0:3560 / /var/lib/docker/aufs/mnt/b5e7d7b8f35e47efbba3d80c5d722f5e7bd43e54c824e54b4a4b351714d36d42 rw,relatime - aufs none rw,si=9b4a7647932d439c +239 15 0:3561 / /var/lib/docker/aufs/mnt/f1b136def157e9465640658f277f3347de593c6ae76412a2e79f7002f091cae2 rw,relatime - aufs none rw,si=9b4a76445abcd39c +240 15 0:3562 / /var/lib/docker/aufs/mnt/b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc rw,relatime - aufs none rw,si=9b4a7644403b339c +241 15 0:3563 / /var/lib/docker/aufs/mnt/b89b140cdbc95063761864e0a23346207fa27ee4c5c63a1ae85c9069a9d9cf1d rw,relatime - aufs none rw,si=9b4a7644aa19739c +242 15 0:3564 / /var/lib/docker/aufs/mnt/bc6a69ed51c07f5228f6b4f161c892e6a949c0e7e86a9c3432049d4c0e5cd298 rw,relatime - aufs none rw,si=9b4a7644aa19139c +243 15 0:3565 / /var/lib/docker/aufs/mnt/be4e2ba3f136933e239f7cf3d136f484fb9004f1fbdfee24a62a2c7b0ab30670 rw,relatime - aufs none rw,si=9b4a7644aa19339c +244 15 0:3566 / /var/lib/docker/aufs/mnt/e04ca1a4a5171e30d20f0c92f90a50b8b6f8600af5459c4b4fb25e42e864dfe1 rw,relatime - aufs none rw,si=9b4a7647932d139c +245 15 0:3567 / /var/lib/docker/aufs/mnt/be61576b31db893129aaffcd3dcb5ce35e49c4b71b30c392a78609a45c7323d8 rw,relatime - aufs none rw,si=9b4a7642d85f739c +246 15 0:3568 / /var/lib/docker/aufs/mnt/dda42c191e56becf672327658ab84fcb563322db3764b91c2fefe4aaef04c624 rw,relatime - aufs none rw,si=9b4a7642d85f139c +247 15 0:3569 / /var/lib/docker/aufs/mnt/c0a7995053330f3d88969247a2e72b07e2dd692133f5668a4a35ea3905561072 rw,relatime - aufs none rw,si=9b4a7642d85f339c +249 15 0:3571 / /var/lib/docker/aufs/mnt/c3594b2e5f08c59ff5ed338a1ba1eceeeb1f7fc5d180068338110c00b1eb8502 rw,relatime - aufs none rw,si=9b4a7642738c739c +250 15 0:3572 / /var/lib/docker/aufs/mnt/c58dce03a0ab0a7588393880379dc3bce9f96ec08ed3f99cf1555260ff0031e8 rw,relatime - aufs none rw,si=9b4a7642738c139c +251 15 0:3573 / /var/lib/docker/aufs/mnt/c73e9f1d109c9d14cb36e1c7489df85649be3911116d76c2fd3648ec8fd94e23 rw,relatime - aufs none rw,si=9b4a7642738c339c +252 15 0:3574 / /var/lib/docker/aufs/mnt/c9eef28c344877cd68aa09e543c0710ab2b305a0ff96dbb859bfa7808c3e8d01 rw,relatime - aufs none rw,si=9b4a7642d85f439c +253 15 0:3575 / /var/lib/docker/aufs/mnt/feb67148f548d70cb7484f2aaad2a86051cd6867a561741a2f13b552457d666e rw,relatime - aufs none rw,si=9b4a76468c55739c +254 15 0:3576 / /var/lib/docker/aufs/mnt/cdf1f96c36d35a96041a896bf398ec0f7dc3b0fb0643612a0f4b6ff96e04e1bb rw,relatime - aufs none rw,si=9b4a76468c55139c +255 15 0:3577 / /var/lib/docker/aufs/mnt/ec6e505872353268451ac4bc034c1df00f3bae4a3ea2261c6e48f7bd5417c1b3 rw,relatime - aufs none rw,si=9b4a76468c55339c +256 15 0:3578 / /var/lib/docker/aufs/mnt/d6dc8aca64efd90e0bc10274001882d0efb310d42ccbf5712b99b169053b8b1a rw,relatime - aufs none rw,si=9b4a7642738c439c +257 15 0:3579 / /var/lib/docker/aufs/mnt/d712594e2ff6eaeb895bfd150d694bd1305fb927e7a186b2dab7df2ea95f8f81 rw,relatime - aufs none rw,si=9b4a76401268f39c +259 15 0:3581 / /var/lib/docker/aufs/mnt/dbfa1174cd78cde2d7410eae442af0b416c4a0e6f87ed4ff1e9f169a0029abc0 rw,relatime - aufs none rw,si=9b4a76401268b39c +260 15 0:3582 / /var/lib/docker/aufs/mnt/e883f5a82316d7856fbe93ee8c0af5a920b7079619dd95c4ffd88bbd309d28dd rw,relatime - aufs none rw,si=9b4a76468c55439c +261 15 0:3583 / /var/lib/docker/aufs/mnt/fdec3eff581c4fc2b09f87befa2fa021f3f2d373bea636a87f1fb5b367d6347a rw,relatime - aufs none rw,si=9b4a7644aa1af39c +262 15 0:3584 / /var/lib/docker/aufs/mnt/ef764e26712184653067ecf7afea18a80854c41331ca0f0ef03e1bacf90a6ffc rw,relatime - aufs none rw,si=9b4a7644aa1a939c +263 15 0:3585 / /var/lib/docker/aufs/mnt/f3176b40c41fce8ce6942936359a2001a6f1b5c1bb40ee224186db0789ec2f76 rw,relatime - aufs none rw,si=9b4a7644aa1ab39c +264 15 0:3586 / /var/lib/docker/aufs/mnt/f5daf06785d3565c6dd18ea7d953d9a8b9606107781e63270fe0514508736e6a rw,relatime - aufs none rw,si=9b4a76401268c39c +58 15 0:3587 / /var/lib/docker/aufs/mnt/cde8c40f6524b7361af4f5ad05bb857dc9ee247c20852ba666195c0739e3a2b8-init rw,relatime - aufs none rw,si=9b4a76444445839c +67 15 0:3588 / /var/lib/docker/aufs/mnt/cde8c40f6524b7361af4f5ad05bb857dc9ee247c20852ba666195c0739e3a2b8 rw,relatime - aufs none rw,si=9b4a7644badd339c +265 15 0:3610 / /var/lib/docker/aufs/mnt/e812472cd2c8c4748d1ef71fac4e77e50d661b9349abe66ce3e23511ed44f414 rw,relatime - aufs none rw,si=9b4a76427937d39c +270 15 0:3615 / /var/lib/docker/aufs/mnt/997636e7c5c9d0d1376a217e295c14c205350b62bc12052804fb5f90abe6f183 rw,relatime - aufs none rw,si=9b4a76406540739c +273 15 0:3618 / /var/lib/docker/aufs/mnt/d5794d080417b6e52e69227c3873e0e4c1ff0d5a845ebe3860ec2f89a47a2a1e rw,relatime - aufs none rw,si=9b4a76454814039c +278 15 0:3623 / /var/lib/docker/aufs/mnt/586bdd48baced671bb19bc4d294ec325f26c55545ae267db426424f157d59c48 rw,relatime - aufs none rw,si=9b4a7644b439f39c +281 15 0:3626 / /var/lib/docker/aufs/mnt/69739d022f89f8586908bbd5edbbdd95ea5256356f177f9ffcc6ef9c0ea752d2 rw,relatime - aufs none rw,si=9b4a7644a0f1b39c +286 15 0:3631 / /var/lib/docker/aufs/mnt/ff28c27d5f894363993622de26d5dd352dba072f219e4691d6498c19bbbc15a9 rw,relatime - aufs none rw,si=9b4a7642265b339c +289 15 0:3634 / /var/lib/docker/aufs/mnt/aa128fe0e64fdede333aa48fd9de39530c91a9244a0f0649a3c411c61e372daa rw,relatime - aufs none rw,si=9b4a764012ada39c +99 15 8:33 / /media/REMOVE\040ME rw,nosuid,nodev,relatime - fuseblk /dev/sdc1 rw,user_id=0,group_id=0,allow_other,blksize=4096` +) + +func TestParseFedoraMountinfo(t *testing.T) { + r := bytes.NewBuffer([]byte(fedoraMountinfo)) + _, err := parseInfoFile(r) + if err != nil { + t.Fatal(err) + } +} + +func TestParseUbuntuMountinfo(t *testing.T) { + r := bytes.NewBuffer([]byte(ubuntuMountInfo)) + _, err := parseInfoFile(r) + if err != nil { + t.Fatal(err) + } +} + +func TestParseGentooMountinfo(t *testing.T) { + r := bytes.NewBuffer([]byte(gentooMountinfo)) + _, err := parseInfoFile(r) + if err != nil { + t.Fatal(err) + } +} + +func TestParseFedoraMountinfoFields(t *testing.T) { + r := bytes.NewBuffer([]byte(fedoraMountinfo)) + infos, err := parseInfoFile(r) + if err != nil { + t.Fatal(err) + } + expectedLength := 58 + if len(infos) != expectedLength { + t.Fatalf("Expected %d entries, got %d", expectedLength, len(infos)) + } + mi := Info{ + ID: 15, + Parent: 35, + Major: 0, + Minor: 3, + Root: "/", + Mountpoint: "/proc", + Opts: "rw,nosuid,nodev,noexec,relatime", + Optional: "shared:5", + Fstype: "proc", + Source: "proc", + VfsOpts: "rw", + } + + if *infos[0] != mi { + t.Fatalf("expected %#v, got %#v", mi, infos[0]) + } +} diff --git a/vendor/github.com/moby/moby/pkg/mount/mountinfo_solaris.go b/vendor/github.com/moby/moby/pkg/mount/mountinfo_solaris.go new file mode 100644 index 000000000..ad9ab57f8 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/mount/mountinfo_solaris.go @@ -0,0 +1,37 @@ +// +build solaris,cgo + +package mount + +/* +#include +#include +*/ +import "C" + +import ( + "fmt" +) + +func parseMountTable() ([]*Info, error) { + mnttab := C.fopen(C.CString(C.MNTTAB), C.CString("r")) + if mnttab == nil { + return nil, fmt.Errorf("Failed to open %s", C.MNTTAB) + } + + var out []*Info + var mp C.struct_mnttab + + ret := C.getmntent(mnttab, &mp) + for ret == 0 { + var mountinfo Info + mountinfo.Mountpoint = C.GoString(mp.mnt_mountp) + mountinfo.Source = C.GoString(mp.mnt_special) + mountinfo.Fstype = C.GoString(mp.mnt_fstype) + mountinfo.Opts = C.GoString(mp.mnt_mntopts) + out = append(out, &mountinfo) + ret = C.getmntent(mnttab, &mp) + } + + C.fclose(mnttab) + return out, nil +} diff --git a/vendor/github.com/moby/moby/pkg/mount/mountinfo_unsupported.go b/vendor/github.com/moby/moby/pkg/mount/mountinfo_unsupported.go new file mode 100644 index 000000000..7fbcf1921 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/mount/mountinfo_unsupported.go @@ -0,0 +1,12 @@ +// +build !windows,!linux,!freebsd,!solaris freebsd,!cgo solaris,!cgo + +package mount + +import ( + "fmt" + "runtime" +) + +func parseMountTable() ([]*Info, error) { + return nil, fmt.Errorf("mount.parseMountTable is not implemented on %s/%s", runtime.GOOS, runtime.GOARCH) +} diff --git a/vendor/github.com/moby/moby/pkg/mount/mountinfo_windows.go b/vendor/github.com/moby/moby/pkg/mount/mountinfo_windows.go new file mode 100644 index 000000000..dab8a37ed --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/mount/mountinfo_windows.go @@ -0,0 +1,6 @@ +package mount + +func parseMountTable() ([]*Info, error) { + // Do NOT return an error! + return nil, nil +} diff --git a/vendor/github.com/moby/moby/pkg/mount/sharedsubtree_linux.go b/vendor/github.com/moby/moby/pkg/mount/sharedsubtree_linux.go new file mode 100644 index 000000000..8ceec84bc --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/mount/sharedsubtree_linux.go @@ -0,0 +1,69 @@ +// +build linux + +package mount + +// MakeShared ensures a mounted filesystem has the SHARED mount option enabled. +// See the supported options in flags.go for further reference. +func MakeShared(mountPoint string) error { + return ensureMountedAs(mountPoint, "shared") +} + +// MakeRShared ensures a mounted filesystem has the RSHARED mount option enabled. +// See the supported options in flags.go for further reference. +func MakeRShared(mountPoint string) error { + return ensureMountedAs(mountPoint, "rshared") +} + +// MakePrivate ensures a mounted filesystem has the PRIVATE mount option enabled. +// See the supported options in flags.go for further reference. +func MakePrivate(mountPoint string) error { + return ensureMountedAs(mountPoint, "private") +} + +// MakeRPrivate ensures a mounted filesystem has the RPRIVATE mount option +// enabled. See the supported options in flags.go for further reference. +func MakeRPrivate(mountPoint string) error { + return ensureMountedAs(mountPoint, "rprivate") +} + +// MakeSlave ensures a mounted filesystem has the SLAVE mount option enabled. +// See the supported options in flags.go for further reference. +func MakeSlave(mountPoint string) error { + return ensureMountedAs(mountPoint, "slave") +} + +// MakeRSlave ensures a mounted filesystem has the RSLAVE mount option enabled. +// See the supported options in flags.go for further reference. +func MakeRSlave(mountPoint string) error { + return ensureMountedAs(mountPoint, "rslave") +} + +// MakeUnbindable ensures a mounted filesystem has the UNBINDABLE mount option +// enabled. See the supported options in flags.go for further reference. +func MakeUnbindable(mountPoint string) error { + return ensureMountedAs(mountPoint, "unbindable") +} + +// MakeRUnbindable ensures a mounted filesystem has the RUNBINDABLE mount +// option enabled. See the supported options in flags.go for further reference. +func MakeRUnbindable(mountPoint string) error { + return ensureMountedAs(mountPoint, "runbindable") +} + +func ensureMountedAs(mountPoint, options string) error { + mounted, err := Mounted(mountPoint) + if err != nil { + return err + } + + if !mounted { + if err := Mount(mountPoint, mountPoint, "none", "bind,rw"); err != nil { + return err + } + } + if _, err = Mounted(mountPoint); err != nil { + return err + } + + return ForceMount("", mountPoint, "none", options) +} diff --git a/vendor/github.com/moby/moby/pkg/mount/sharedsubtree_linux_test.go b/vendor/github.com/moby/moby/pkg/mount/sharedsubtree_linux_test.go new file mode 100644 index 000000000..f25ab19fe --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/mount/sharedsubtree_linux_test.go @@ -0,0 +1,332 @@ +// +build linux + +package mount + +import ( + "os" + "path" + "testing" + + "golang.org/x/sys/unix" +) + +// nothing is propagated in or out +func TestSubtreePrivate(t *testing.T) { + tmp := path.Join(os.TempDir(), "mount-tests") + if err := os.MkdirAll(tmp, 0777); err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmp) + + var ( + sourceDir = path.Join(tmp, "source") + targetDir = path.Join(tmp, "target") + outside1Dir = path.Join(tmp, "outside1") + outside2Dir = path.Join(tmp, "outside2") + + outside1Path = path.Join(outside1Dir, "file.txt") + outside2Path = path.Join(outside2Dir, "file.txt") + outside1CheckPath = path.Join(targetDir, "a", "file.txt") + outside2CheckPath = path.Join(sourceDir, "b", "file.txt") + ) + if err := os.MkdirAll(path.Join(sourceDir, "a"), 0777); err != nil { + t.Fatal(err) + } + if err := os.MkdirAll(path.Join(sourceDir, "b"), 0777); err != nil { + t.Fatal(err) + } + if err := os.Mkdir(targetDir, 0777); err != nil { + t.Fatal(err) + } + if err := os.Mkdir(outside1Dir, 0777); err != nil { + t.Fatal(err) + } + if err := os.Mkdir(outside2Dir, 0777); err != nil { + t.Fatal(err) + } + + if err := createFile(outside1Path); err != nil { + t.Fatal(err) + } + if err := createFile(outside2Path); err != nil { + t.Fatal(err) + } + + // mount the shared directory to a target + if err := Mount(sourceDir, targetDir, "none", "bind,rw"); err != nil { + t.Fatal(err) + } + defer func() { + if err := Unmount(targetDir); err != nil { + t.Fatal(err) + } + }() + + // next, make the target private + if err := MakePrivate(targetDir); err != nil { + t.Fatal(err) + } + defer func() { + if err := Unmount(targetDir); err != nil { + t.Fatal(err) + } + }() + + // mount in an outside path to a mounted path inside the _source_ + if err := Mount(outside1Dir, path.Join(sourceDir, "a"), "none", "bind,rw"); err != nil { + t.Fatal(err) + } + defer func() { + if err := Unmount(path.Join(sourceDir, "a")); err != nil { + t.Fatal(err) + } + }() + + // check that this file _does_not_ show in the _target_ + if _, err := os.Stat(outside1CheckPath); err != nil && !os.IsNotExist(err) { + t.Fatal(err) + } else if err == nil { + t.Fatalf("%q should not be visible, but is", outside1CheckPath) + } + + // next mount outside2Dir into the _target_ + if err := Mount(outside2Dir, path.Join(targetDir, "b"), "none", "bind,rw"); err != nil { + t.Fatal(err) + } + defer func() { + if err := Unmount(path.Join(targetDir, "b")); err != nil { + t.Fatal(err) + } + }() + + // check that this file _does_not_ show in the _source_ + if _, err := os.Stat(outside2CheckPath); err != nil && !os.IsNotExist(err) { + t.Fatal(err) + } else if err == nil { + t.Fatalf("%q should not be visible, but is", outside2CheckPath) + } +} + +// Testing that when a target is a shared mount, +// then child mounts propagate to the source +func TestSubtreeShared(t *testing.T) { + tmp := path.Join(os.TempDir(), "mount-tests") + if err := os.MkdirAll(tmp, 0777); err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmp) + + var ( + sourceDir = path.Join(tmp, "source") + targetDir = path.Join(tmp, "target") + outsideDir = path.Join(tmp, "outside") + + outsidePath = path.Join(outsideDir, "file.txt") + sourceCheckPath = path.Join(sourceDir, "a", "file.txt") + ) + + if err := os.MkdirAll(path.Join(sourceDir, "a"), 0777); err != nil { + t.Fatal(err) + } + if err := os.Mkdir(targetDir, 0777); err != nil { + t.Fatal(err) + } + if err := os.Mkdir(outsideDir, 0777); err != nil { + t.Fatal(err) + } + + if err := createFile(outsidePath); err != nil { + t.Fatal(err) + } + + // mount the source as shared + if err := MakeShared(sourceDir); err != nil { + t.Fatal(err) + } + defer func() { + if err := Unmount(sourceDir); err != nil { + t.Fatal(err) + } + }() + + // mount the shared directory to a target + if err := Mount(sourceDir, targetDir, "none", "bind,rw"); err != nil { + t.Fatal(err) + } + defer func() { + if err := Unmount(targetDir); err != nil { + t.Fatal(err) + } + }() + + // mount in an outside path to a mounted path inside the target + if err := Mount(outsideDir, path.Join(targetDir, "a"), "none", "bind,rw"); err != nil { + t.Fatal(err) + } + defer func() { + if err := Unmount(path.Join(targetDir, "a")); err != nil { + t.Fatal(err) + } + }() + + // NOW, check that the file from the outside directory is available in the source directory + if _, err := os.Stat(sourceCheckPath); err != nil { + t.Fatal(err) + } +} + +// testing that mounts to a shared source show up in the slave target, +// and that mounts into a slave target do _not_ show up in the shared source +func TestSubtreeSharedSlave(t *testing.T) { + tmp := path.Join(os.TempDir(), "mount-tests") + if err := os.MkdirAll(tmp, 0777); err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmp) + + var ( + sourceDir = path.Join(tmp, "source") + targetDir = path.Join(tmp, "target") + outside1Dir = path.Join(tmp, "outside1") + outside2Dir = path.Join(tmp, "outside2") + + outside1Path = path.Join(outside1Dir, "file.txt") + outside2Path = path.Join(outside2Dir, "file.txt") + outside1CheckPath = path.Join(targetDir, "a", "file.txt") + outside2CheckPath = path.Join(sourceDir, "b", "file.txt") + ) + if err := os.MkdirAll(path.Join(sourceDir, "a"), 0777); err != nil { + t.Fatal(err) + } + if err := os.MkdirAll(path.Join(sourceDir, "b"), 0777); err != nil { + t.Fatal(err) + } + if err := os.Mkdir(targetDir, 0777); err != nil { + t.Fatal(err) + } + if err := os.Mkdir(outside1Dir, 0777); err != nil { + t.Fatal(err) + } + if err := os.Mkdir(outside2Dir, 0777); err != nil { + t.Fatal(err) + } + + if err := createFile(outside1Path); err != nil { + t.Fatal(err) + } + if err := createFile(outside2Path); err != nil { + t.Fatal(err) + } + + // mount the source as shared + if err := MakeShared(sourceDir); err != nil { + t.Fatal(err) + } + defer func() { + if err := Unmount(sourceDir); err != nil { + t.Fatal(err) + } + }() + + // mount the shared directory to a target + if err := Mount(sourceDir, targetDir, "none", "bind,rw"); err != nil { + t.Fatal(err) + } + defer func() { + if err := Unmount(targetDir); err != nil { + t.Fatal(err) + } + }() + + // next, make the target slave + if err := MakeSlave(targetDir); err != nil { + t.Fatal(err) + } + defer func() { + if err := Unmount(targetDir); err != nil { + t.Fatal(err) + } + }() + + // mount in an outside path to a mounted path inside the _source_ + if err := Mount(outside1Dir, path.Join(sourceDir, "a"), "none", "bind,rw"); err != nil { + t.Fatal(err) + } + defer func() { + if err := Unmount(path.Join(sourceDir, "a")); err != nil { + t.Fatal(err) + } + }() + + // check that this file _does_ show in the _target_ + if _, err := os.Stat(outside1CheckPath); err != nil { + t.Fatal(err) + } + + // next mount outside2Dir into the _target_ + if err := Mount(outside2Dir, path.Join(targetDir, "b"), "none", "bind,rw"); err != nil { + t.Fatal(err) + } + defer func() { + if err := Unmount(path.Join(targetDir, "b")); err != nil { + t.Fatal(err) + } + }() + + // check that this file _does_not_ show in the _source_ + if _, err := os.Stat(outside2CheckPath); err != nil && !os.IsNotExist(err) { + t.Fatal(err) + } else if err == nil { + t.Fatalf("%q should not be visible, but is", outside2CheckPath) + } +} + +func TestSubtreeUnbindable(t *testing.T) { + tmp := path.Join(os.TempDir(), "mount-tests") + if err := os.MkdirAll(tmp, 0777); err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmp) + + var ( + sourceDir = path.Join(tmp, "source") + targetDir = path.Join(tmp, "target") + ) + if err := os.MkdirAll(sourceDir, 0777); err != nil { + t.Fatal(err) + } + if err := os.MkdirAll(targetDir, 0777); err != nil { + t.Fatal(err) + } + + // next, make the source unbindable + if err := MakeUnbindable(sourceDir); err != nil { + t.Fatal(err) + } + defer func() { + if err := Unmount(sourceDir); err != nil { + t.Fatal(err) + } + }() + + // then attempt to mount it to target. It should fail + if err := Mount(sourceDir, targetDir, "none", "bind,rw"); err != nil && err != unix.EINVAL { + t.Fatal(err) + } else if err == nil { + t.Fatalf("%q should not have been bindable", sourceDir) + } + defer func() { + if err := Unmount(targetDir); err != nil { + t.Fatal(err) + } + }() +} + +func createFile(path string) error { + f, err := os.Create(path) + if err != nil { + return err + } + f.WriteString("hello world!") + return f.Close() +} diff --git a/vendor/github.com/moby/moby/pkg/mount/sharedsubtree_solaris.go b/vendor/github.com/moby/moby/pkg/mount/sharedsubtree_solaris.go new file mode 100644 index 000000000..09f6b03cb --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/mount/sharedsubtree_solaris.go @@ -0,0 +1,58 @@ +// +build solaris + +package mount + +// MakeShared ensures a mounted filesystem has the SHARED mount option enabled. +// See the supported options in flags.go for further reference. +func MakeShared(mountPoint string) error { + return ensureMountedAs(mountPoint, "shared") +} + +// MakeRShared ensures a mounted filesystem has the RSHARED mount option enabled. +// See the supported options in flags.go for further reference. +func MakeRShared(mountPoint string) error { + return ensureMountedAs(mountPoint, "rshared") +} + +// MakePrivate ensures a mounted filesystem has the PRIVATE mount option enabled. +// See the supported options in flags.go for further reference. +func MakePrivate(mountPoint string) error { + return ensureMountedAs(mountPoint, "private") +} + +// MakeRPrivate ensures a mounted filesystem has the RPRIVATE mount option +// enabled. See the supported options in flags.go for further reference. +func MakeRPrivate(mountPoint string) error { + return ensureMountedAs(mountPoint, "rprivate") +} + +// MakeSlave ensures a mounted filesystem has the SLAVE mount option enabled. +// See the supported options in flags.go for further reference. +func MakeSlave(mountPoint string) error { + return ensureMountedAs(mountPoint, "slave") +} + +// MakeRSlave ensures a mounted filesystem has the RSLAVE mount option enabled. +// See the supported options in flags.go for further reference. +func MakeRSlave(mountPoint string) error { + return ensureMountedAs(mountPoint, "rslave") +} + +// MakeUnbindable ensures a mounted filesystem has the UNBINDABLE mount option +// enabled. See the supported options in flags.go for further reference. +func MakeUnbindable(mountPoint string) error { + return ensureMountedAs(mountPoint, "unbindable") +} + +// MakeRUnbindable ensures a mounted filesystem has the RUNBINDABLE mount +// option enabled. See the supported options in flags.go for further reference. +func MakeRUnbindable(mountPoint string) error { + return ensureMountedAs(mountPoint, "runbindable") +} + +func ensureMountedAs(mountPoint, options string) error { + // TODO: Solaris does not support bind mounts. + // Evaluate lofs and also look at the relevant + // mount flags to be supported. + return nil +} diff --git a/vendor/github.com/moby/moby/pkg/namesgenerator/cmd/names-generator/main.go b/vendor/github.com/moby/moby/pkg/namesgenerator/cmd/names-generator/main.go new file mode 100644 index 000000000..18a939b70 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/namesgenerator/cmd/names-generator/main.go @@ -0,0 +1,11 @@ +package main + +import ( + "fmt" + + "github.com/docker/docker/pkg/namesgenerator" +) + +func main() { + fmt.Println(namesgenerator.GetRandomName(0)) +} diff --git a/vendor/github.com/moby/moby/pkg/namesgenerator/names-generator.go b/vendor/github.com/moby/moby/pkg/namesgenerator/names-generator.go new file mode 100644 index 000000000..2f869ed92 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/namesgenerator/names-generator.go @@ -0,0 +1,606 @@ +package namesgenerator + +import ( + "fmt" + "math/rand" +) + +var ( + left = [...]string{ + "admiring", + "adoring", + "affectionate", + "agitated", + "amazing", + "angry", + "awesome", + "blissful", + "boring", + "brave", + "clever", + "cocky", + "compassionate", + "competent", + "condescending", + "confident", + "cranky", + "dazzling", + "determined", + "distracted", + "dreamy", + "eager", + "ecstatic", + "elastic", + "elated", + "elegant", + "eloquent", + "epic", + "fervent", + "festive", + "flamboyant", + "focused", + "friendly", + "frosty", + "gallant", + "gifted", + "goofy", + "gracious", + "happy", + "hardcore", + "heuristic", + "hopeful", + "hungry", + "infallible", + "inspiring", + "jolly", + "jovial", + "keen", + "kind", + "laughing", + "loving", + "lucid", + "mystifying", + "modest", + "musing", + "naughty", + "nervous", + "nifty", + "nostalgic", + "objective", + "optimistic", + "peaceful", + "pedantic", + "pensive", + "practical", + "priceless", + "quirky", + "quizzical", + "relaxed", + "reverent", + "romantic", + "sad", + "serene", + "sharp", + "silly", + "sleepy", + "stoic", + "stupefied", + "suspicious", + "tender", + "thirsty", + "trusting", + "unruffled", + "upbeat", + "vibrant", + "vigilant", + "vigorous", + "wizardly", + "wonderful", + "xenodochial", + "youthful", + "zealous", + "zen", + } + + // Docker, starting from 0.7.x, generates names from notable scientists and hackers. + // Please, for any amazing man that you add to the list, consider adding an equally amazing woman to it, and vice versa. + right = [...]string{ + // Muhammad ibn Jābir al-Ḥarrānī al-Battānī was a founding father of astronomy. https://en.wikipedia.org/wiki/Mu%E1%B8%A5ammad_ibn_J%C4%81bir_al-%E1%B8%A4arr%C4%81n%C4%AB_al-Batt%C4%81n%C4%AB + "albattani", + + // Frances E. Allen, became the first female IBM Fellow in 1989. In 2006, she became the first female recipient of the ACM's Turing Award. https://en.wikipedia.org/wiki/Frances_E._Allen + "allen", + + // June Almeida - Scottish virologist who took the first pictures of the rubella virus - https://en.wikipedia.org/wiki/June_Almeida + "almeida", + + // Maria Gaetana Agnesi - Italian mathematician, philosopher, theologian and humanitarian. She was the first woman to write a mathematics handbook and the first woman appointed as a Mathematics Professor at a University. https://en.wikipedia.org/wiki/Maria_Gaetana_Agnesi + "agnesi", + + // Archimedes was a physicist, engineer and mathematician who invented too many things to list them here. https://en.wikipedia.org/wiki/Archimedes + "archimedes", + + // Maria Ardinghelli - Italian translator, mathematician and physicist - https://en.wikipedia.org/wiki/Maria_Ardinghelli + "ardinghelli", + + // Aryabhata - Ancient Indian mathematician-astronomer during 476-550 CE https://en.wikipedia.org/wiki/Aryabhata + "aryabhata", + + // Wanda Austin - Wanda Austin is the President and CEO of The Aerospace Corporation, a leading architect for the US security space programs. https://en.wikipedia.org/wiki/Wanda_Austin + "austin", + + // Charles Babbage invented the concept of a programmable computer. https://en.wikipedia.org/wiki/Charles_Babbage. + "babbage", + + // Stefan Banach - Polish mathematician, was one of the founders of modern functional analysis. https://en.wikipedia.org/wiki/Stefan_Banach + "banach", + + // John Bardeen co-invented the transistor - https://en.wikipedia.org/wiki/John_Bardeen + "bardeen", + + // Jean Bartik, born Betty Jean Jennings, was one of the original programmers for the ENIAC computer. https://en.wikipedia.org/wiki/Jean_Bartik + "bartik", + + // Laura Bassi, the world's first female professor https://en.wikipedia.org/wiki/Laura_Bassi + "bassi", + + // Hugh Beaver, British engineer, founder of the Guinness Book of World Records https://en.wikipedia.org/wiki/Hugh_Beaver + "beaver", + + // Alexander Graham Bell - an eminent Scottish-born scientist, inventor, engineer and innovator who is credited with inventing the first practical telephone - https://en.wikipedia.org/wiki/Alexander_Graham_Bell + "bell", + + // Karl Friedrich Benz - a German automobile engineer. Inventor of the first practical motorcar. https://en.wikipedia.org/wiki/Karl_Benz + "benz", + + // Homi J Bhabha - was an Indian nuclear physicist, founding director, and professor of physics at the Tata Institute of Fundamental Research. Colloquially known as "father of Indian nuclear programme"- https://en.wikipedia.org/wiki/Homi_J._Bhabha + "bhabha", + + // Bhaskara II - Ancient Indian mathematician-astronomer whose work on calculus predates Newton and Leibniz by over half a millennium - https://en.wikipedia.org/wiki/Bh%C4%81skara_II#Calculus + "bhaskara", + + // Elizabeth Blackwell - American doctor and first American woman to receive a medical degree - https://en.wikipedia.org/wiki/Elizabeth_Blackwell + "blackwell", + + // Niels Bohr is the father of quantum theory. https://en.wikipedia.org/wiki/Niels_Bohr. + "bohr", + + // Kathleen Booth, she's credited with writing the first assembly language. https://en.wikipedia.org/wiki/Kathleen_Booth + "booth", + + // Anita Borg - Anita Borg was the founding director of the Institute for Women and Technology (IWT). https://en.wikipedia.org/wiki/Anita_Borg + "borg", + + // Satyendra Nath Bose - He provided the foundation for Bose–Einstein statistics and the theory of the Bose–Einstein condensate. - https://en.wikipedia.org/wiki/Satyendra_Nath_Bose + "bose", + + // Evelyn Boyd Granville - She was one of the first African-American woman to receive a Ph.D. in mathematics; she earned it in 1949 from Yale University. https://en.wikipedia.org/wiki/Evelyn_Boyd_Granville + "boyd", + + // Brahmagupta - Ancient Indian mathematician during 598-670 CE who gave rules to compute with zero - https://en.wikipedia.org/wiki/Brahmagupta#Zero + "brahmagupta", + + // Walter Houser Brattain co-invented the transistor - https://en.wikipedia.org/wiki/Walter_Houser_Brattain + "brattain", + + // Emmett Brown invented time travel. https://en.wikipedia.org/wiki/Emmett_Brown (thanks Brian Goff) + "brown", + + // Rachel Carson - American marine biologist and conservationist, her book Silent Spring and other writings are credited with advancing the global environmental movement. https://en.wikipedia.org/wiki/Rachel_Carson + "carson", + + // Subrahmanyan Chandrasekhar - Astrophysicist known for his mathematical theory on different stages and evolution in structures of the stars. He has won nobel prize for physics - https://en.wikipedia.org/wiki/Subrahmanyan_Chandrasekhar + "chandrasekhar", + + //Claude Shannon - The father of information theory and founder of digital circuit design theory. (https://en.wikipedia.org/wiki/Claude_Shannon) + "shannon", + + // Joan Clarke - Bletchley Park code breaker during the Second World War who pioneered techniques that remained top secret for decades. Also an accomplished numismatist https://en.wikipedia.org/wiki/Joan_Clarke + "clarke", + + // Jane Colden - American botanist widely considered the first female American botanist - https://en.wikipedia.org/wiki/Jane_Colden + "colden", + + // Gerty Theresa Cori - American biochemist who became the third woman—and first American woman—to win a Nobel Prize in science, and the first woman to be awarded the Nobel Prize in Physiology or Medicine. Cori was born in Prague. https://en.wikipedia.org/wiki/Gerty_Cori + "cori", + + // Seymour Roger Cray was an American electrical engineer and supercomputer architect who designed a series of computers that were the fastest in the world for decades. https://en.wikipedia.org/wiki/Seymour_Cray + "cray", + + // This entry reflects a husband and wife team who worked together: + // Joan Curran was a Welsh scientist who developed radar and invented chaff, a radar countermeasure. https://en.wikipedia.org/wiki/Joan_Curran + // Samuel Curran was an Irish physicist who worked alongside his wife during WWII and invented the proximity fuse. https://en.wikipedia.org/wiki/Samuel_Curran + "curran", + + // Marie Curie discovered radioactivity. https://en.wikipedia.org/wiki/Marie_Curie. + "curie", + + // Charles Darwin established the principles of natural evolution. https://en.wikipedia.org/wiki/Charles_Darwin. + "darwin", + + // Leonardo Da Vinci invented too many things to list here. https://en.wikipedia.org/wiki/Leonardo_da_Vinci. + "davinci", + + // Edsger Wybe Dijkstra was a Dutch computer scientist and mathematical scientist. https://en.wikipedia.org/wiki/Edsger_W._Dijkstra. + "dijkstra", + + // Donna Dubinsky - played an integral role in the development of personal digital assistants (PDAs) serving as CEO of Palm, Inc. and co-founding Handspring. https://en.wikipedia.org/wiki/Donna_Dubinsky + "dubinsky", + + // Annie Easley - She was a leading member of the team which developed software for the Centaur rocket stage and one of the first African-Americans in her field. https://en.wikipedia.org/wiki/Annie_Easley + "easley", + + // Thomas Alva Edison, prolific inventor https://en.wikipedia.org/wiki/Thomas_Edison + "edison", + + // Albert Einstein invented the general theory of relativity. https://en.wikipedia.org/wiki/Albert_Einstein + "einstein", + + // Gertrude Elion - American biochemist, pharmacologist and the 1988 recipient of the Nobel Prize in Medicine - https://en.wikipedia.org/wiki/Gertrude_Elion + "elion", + + // Douglas Engelbart gave the mother of all demos: https://en.wikipedia.org/wiki/Douglas_Engelbart + "engelbart", + + // Euclid invented geometry. https://en.wikipedia.org/wiki/Euclid + "euclid", + + // Leonhard Euler invented large parts of modern mathematics. https://de.wikipedia.org/wiki/Leonhard_Euler + "euler", + + // Pierre de Fermat pioneered several aspects of modern mathematics. https://en.wikipedia.org/wiki/Pierre_de_Fermat + "fermat", + + // Enrico Fermi invented the first nuclear reactor. https://en.wikipedia.org/wiki/Enrico_Fermi. + "fermi", + + // Richard Feynman was a key contributor to quantum mechanics and particle physics. https://en.wikipedia.org/wiki/Richard_Feynman + "feynman", + + // Benjamin Franklin is famous for his experiments in electricity and the invention of the lightning rod. + "franklin", + + // Galileo was a founding father of modern astronomy, and faced politics and obscurantism to establish scientific truth. https://en.wikipedia.org/wiki/Galileo_Galilei + "galileo", + + // William Henry "Bill" Gates III is an American business magnate, philanthropist, investor, computer programmer, and inventor. https://en.wikipedia.org/wiki/Bill_Gates + "gates", + + // Adele Goldberg, was one of the designers and developers of the Smalltalk language. https://en.wikipedia.org/wiki/Adele_Goldberg_(computer_scientist) + "goldberg", + + // Adele Goldstine, born Adele Katz, wrote the complete technical description for the first electronic digital computer, ENIAC. https://en.wikipedia.org/wiki/Adele_Goldstine + "goldstine", + + // Shafi Goldwasser is a computer scientist known for creating theoretical foundations of modern cryptography. Winner of 2012 ACM Turing Award. https://en.wikipedia.org/wiki/Shafi_Goldwasser + "goldwasser", + + // James Golick, all around gangster. + "golick", + + // Jane Goodall - British primatologist, ethologist, and anthropologist who is considered to be the world's foremost expert on chimpanzees - https://en.wikipedia.org/wiki/Jane_Goodall + "goodall", + + // Lois Haibt - American computer scientist, part of the team at IBM that developed FORTRAN - https://en.wikipedia.org/wiki/Lois_Haibt + "haibt", + + // Margaret Hamilton - Director of the Software Engineering Division of the MIT Instrumentation Laboratory, which developed on-board flight software for the Apollo space program. https://en.wikipedia.org/wiki/Margaret_Hamilton_(scientist) + "hamilton", + + // Stephen Hawking pioneered the field of cosmology by combining general relativity and quantum mechanics. https://en.wikipedia.org/wiki/Stephen_Hawking + "hawking", + + // Werner Heisenberg was a founding father of quantum mechanics. https://en.wikipedia.org/wiki/Werner_Heisenberg + "heisenberg", + + // Grete Hermann was a German philosopher noted for her philosophical work on the foundations of quantum mechanics. https://en.wikipedia.org/wiki/Grete_Hermann + "hermann", + + // Jaroslav Heyrovský was the inventor of the polarographic method, father of the electroanalytical method, and recipient of the Nobel Prize in 1959. His main field of work was polarography. https://en.wikipedia.org/wiki/Jaroslav_Heyrovsk%C3%BD + "heyrovsky", + + // Dorothy Hodgkin was a British biochemist, credited with the development of protein crystallography. She was awarded the Nobel Prize in Chemistry in 1964. https://en.wikipedia.org/wiki/Dorothy_Hodgkin + "hodgkin", + + // Erna Schneider Hoover revolutionized modern communication by inventing a computerized telephone switching method. https://en.wikipedia.org/wiki/Erna_Schneider_Hoover + "hoover", + + // Grace Hopper developed the first compiler for a computer programming language and is credited with popularizing the term "debugging" for fixing computer glitches. https://en.wikipedia.org/wiki/Grace_Hopper + "hopper", + + // Frances Hugle, she was an American scientist, engineer, and inventor who contributed to the understanding of semiconductors, integrated circuitry, and the unique electrical principles of microscopic materials. https://en.wikipedia.org/wiki/Frances_Hugle + "hugle", + + // Hypatia - Greek Alexandrine Neoplatonist philosopher in Egypt who was one of the earliest mothers of mathematics - https://en.wikipedia.org/wiki/Hypatia + "hypatia", + + // Mary Jackson, American mathematician and aerospace engineer who earned the highest title within NASA's engineering department - https://en.wikipedia.org/wiki/Mary_Jackson_(engineer) + "jackson", + + // Yeong-Sil Jang was a Korean scientist and astronomer during the Joseon Dynasty; he invented the first metal printing press and water gauge. https://en.wikipedia.org/wiki/Jang_Yeong-sil + "jang", + + // Betty Jennings - one of the original programmers of the ENIAC. https://en.wikipedia.org/wiki/ENIAC - https://en.wikipedia.org/wiki/Jean_Bartik + "jennings", + + // Mary Lou Jepsen, was the founder and chief technology officer of One Laptop Per Child (OLPC), and the founder of Pixel Qi. https://en.wikipedia.org/wiki/Mary_Lou_Jepsen + "jepsen", + + // Katherine Coleman Goble Johnson - American physicist and mathematician contributed to the NASA. https://en.wikipedia.org/wiki/Katherine_Johnson + "johnson", + + // Irène Joliot-Curie - French scientist who was awarded the Nobel Prize for Chemistry in 1935. Daughter of Marie and Pierre Curie. https://en.wikipedia.org/wiki/Ir%C3%A8ne_Joliot-Curie + "joliot", + + // Karen Spärck Jones came up with the concept of inverse document frequency, which is used in most search engines today. https://en.wikipedia.org/wiki/Karen_Sp%C3%A4rck_Jones + "jones", + + // A. P. J. Abdul Kalam - is an Indian scientist aka Missile Man of India for his work on the development of ballistic missile and launch vehicle technology - https://en.wikipedia.org/wiki/A._P._J._Abdul_Kalam + "kalam", + + // Susan Kare, created the icons and many of the interface elements for the original Apple Macintosh in the 1980s, and was an original employee of NeXT, working as the Creative Director. https://en.wikipedia.org/wiki/Susan_Kare + "kare", + + // Mary Kenneth Keller, Sister Mary Kenneth Keller became the first American woman to earn a PhD in Computer Science in 1965. https://en.wikipedia.org/wiki/Mary_Kenneth_Keller + "keller", + + // Johannes Kepler, German astronomer known for his three laws of planetary motion - https://en.wikipedia.org/wiki/Johannes_Kepler + "kepler", + + // Har Gobind Khorana - Indian-American biochemist who shared the 1968 Nobel Prize for Physiology - https://en.wikipedia.org/wiki/Har_Gobind_Khorana + "khorana", + + // Jack Kilby invented silicone integrated circuits and gave Silicon Valley its name. - https://en.wikipedia.org/wiki/Jack_Kilby + "kilby", + + // Maria Kirch - German astronomer and first woman to discover a comet - https://en.wikipedia.org/wiki/Maria_Margarethe_Kirch + "kirch", + + // Donald Knuth - American computer scientist, author of "The Art of Computer Programming" and creator of the TeX typesetting system. https://en.wikipedia.org/wiki/Donald_Knuth + "knuth", + + // Sophie Kowalevski - Russian mathematician responsible for important original contributions to analysis, differential equations and mechanics - https://en.wikipedia.org/wiki/Sofia_Kovalevskaya + "kowalevski", + + // Marie-Jeanne de Lalande - French astronomer, mathematician and cataloguer of stars - https://en.wikipedia.org/wiki/Marie-Jeanne_de_Lalande + "lalande", + + // Hedy Lamarr - Actress and inventor. The principles of her work are now incorporated into modern Wi-Fi, CDMA and Bluetooth technology. https://en.wikipedia.org/wiki/Hedy_Lamarr + "lamarr", + + // Leslie B. Lamport - American computer scientist. Lamport is best known for his seminal work in distributed systems and was the winner of the 2013 Turing Award. https://en.wikipedia.org/wiki/Leslie_Lamport + "lamport", + + // Mary Leakey - British paleoanthropologist who discovered the first fossilized Proconsul skull - https://en.wikipedia.org/wiki/Mary_Leakey + "leakey", + + // Henrietta Swan Leavitt - she was an American astronomer who discovered the relation between the luminosity and the period of Cepheid variable stars. https://en.wikipedia.org/wiki/Henrietta_Swan_Leavitt + "leavitt", + + //Daniel Lewin - Mathematician, Akamai co-founder, soldier, 9/11 victim-- Developed optimization techniques for routing traffic on the internet. Died attempting to stop the 9-11 hijackers. https://en.wikipedia.org/wiki/Daniel_Lewin + "lewin", + + // Ruth Lichterman - one of the original programmers of the ENIAC. https://en.wikipedia.org/wiki/ENIAC - https://en.wikipedia.org/wiki/Ruth_Teitelbaum + "lichterman", + + // Barbara Liskov - co-developed the Liskov substitution principle. Liskov was also the winner of the Turing Prize in 2008. - https://en.wikipedia.org/wiki/Barbara_Liskov + "liskov", + + // Ada Lovelace invented the first algorithm. https://en.wikipedia.org/wiki/Ada_Lovelace (thanks James Turnbull) + "lovelace", + + // Auguste and Louis Lumière - the first filmmakers in history - https://en.wikipedia.org/wiki/Auguste_and_Louis_Lumi%C3%A8re + "lumiere", + + // Mahavira - Ancient Indian mathematician during 9th century AD who discovered basic algebraic identities - https://en.wikipedia.org/wiki/Mah%C4%81v%C4%ABra_(mathematician) + "mahavira", + + // Maria Mayer - American theoretical physicist and Nobel laureate in Physics for proposing the nuclear shell model of the atomic nucleus - https://en.wikipedia.org/wiki/Maria_Mayer + "mayer", + + // John McCarthy invented LISP: https://en.wikipedia.org/wiki/John_McCarthy_(computer_scientist) + "mccarthy", + + // Barbara McClintock - a distinguished American cytogeneticist, 1983 Nobel Laureate in Physiology or Medicine for discovering transposons. https://en.wikipedia.org/wiki/Barbara_McClintock + "mcclintock", + + // Malcolm McLean invented the modern shipping container: https://en.wikipedia.org/wiki/Malcom_McLean + "mclean", + + // Kay McNulty - one of the original programmers of the ENIAC. https://en.wikipedia.org/wiki/ENIAC - https://en.wikipedia.org/wiki/Kathleen_Antonelli + "mcnulty", + + // Lise Meitner - Austrian/Swedish physicist who was involved in the discovery of nuclear fission. The element meitnerium is named after her - https://en.wikipedia.org/wiki/Lise_Meitner + "meitner", + + // Carla Meninsky, was the game designer and programmer for Atari 2600 games Dodge 'Em and Warlords. https://en.wikipedia.org/wiki/Carla_Meninsky + "meninsky", + + // Johanna Mestorf - German prehistoric archaeologist and first female museum director in Germany - https://en.wikipedia.org/wiki/Johanna_Mestorf + "mestorf", + + // Marvin Minsky - Pioneer in Artificial Intelligence, co-founder of the MIT's AI Lab, won the Turing Award in 1969. https://en.wikipedia.org/wiki/Marvin_Minsky + "minsky", + + // Maryam Mirzakhani - an Iranian mathematician and the first woman to win the Fields Medal. https://en.wikipedia.org/wiki/Maryam_Mirzakhani + "mirzakhani", + + // Samuel Morse - contributed to the invention of a single-wire telegraph system based on European telegraphs and was a co-developer of the Morse code - https://en.wikipedia.org/wiki/Samuel_Morse + "morse", + + // Ian Murdock - founder of the Debian project - https://en.wikipedia.org/wiki/Ian_Murdock + "murdock", + + // John von Neumann - todays computer architectures are based on the von Neumann architecture. https://en.wikipedia.org/wiki/Von_Neumann_architecture + "neumann", + + // Isaac Newton invented classic mechanics and modern optics. https://en.wikipedia.org/wiki/Isaac_Newton + "newton", + + // Florence Nightingale, more prominently known as a nurse, was also the first female member of the Royal Statistical Society and a pioneer in statistical graphics https://en.wikipedia.org/wiki/Florence_Nightingale#Statistics_and_sanitary_reform + "nightingale", + + // Alfred Nobel - a Swedish chemist, engineer, innovator, and armaments manufacturer (inventor of dynamite) - https://en.wikipedia.org/wiki/Alfred_Nobel + "nobel", + + // Emmy Noether, German mathematician. Noether's Theorem is named after her. https://en.wikipedia.org/wiki/Emmy_Noether + "noether", + + // Poppy Northcutt. Poppy Northcutt was the first woman to work as part of NASA’s Mission Control. http://www.businessinsider.com/poppy-northcutt-helped-apollo-astronauts-2014-12?op=1 + "northcutt", + + // Robert Noyce invented silicone integrated circuits and gave Silicon Valley its name. - https://en.wikipedia.org/wiki/Robert_Noyce + "noyce", + + // Panini - Ancient Indian linguist and grammarian from 4th century CE who worked on the world's first formal system - https://en.wikipedia.org/wiki/P%C4%81%E1%B9%87ini#Comparison_with_modern_formal_systems + "panini", + + // Ambroise Pare invented modern surgery. https://en.wikipedia.org/wiki/Ambroise_Par%C3%A9 + "pare", + + // Louis Pasteur discovered vaccination, fermentation and pasteurization. https://en.wikipedia.org/wiki/Louis_Pasteur. + "pasteur", + + // Cecilia Payne-Gaposchkin was an astronomer and astrophysicist who, in 1925, proposed in her Ph.D. thesis an explanation for the composition of stars in terms of the relative abundances of hydrogen and helium. https://en.wikipedia.org/wiki/Cecilia_Payne-Gaposchkin + "payne", + + // Radia Perlman is a software designer and network engineer and most famous for her invention of the spanning-tree protocol (STP). https://en.wikipedia.org/wiki/Radia_Perlman + "perlman", + + // Rob Pike was a key contributor to Unix, Plan 9, the X graphic system, utf-8, and the Go programming language. https://en.wikipedia.org/wiki/Rob_Pike + "pike", + + // Henri Poincaré made fundamental contributions in several fields of mathematics. https://en.wikipedia.org/wiki/Henri_Poincar%C3%A9 + "poincare", + + // Laura Poitras is a director and producer whose work, made possible by open source crypto tools, advances the causes of truth and freedom of information by reporting disclosures by whistleblowers such as Edward Snowden. https://en.wikipedia.org/wiki/Laura_Poitras + "poitras", + + // Claudius Ptolemy - a Greco-Egyptian writer of Alexandria, known as a mathematician, astronomer, geographer, astrologer, and poet of a single epigram in the Greek Anthology - https://en.wikipedia.org/wiki/Ptolemy + "ptolemy", + + // C. V. Raman - Indian physicist who won the Nobel Prize in 1930 for proposing the Raman effect. - https://en.wikipedia.org/wiki/C._V._Raman + "raman", + + // Srinivasa Ramanujan - Indian mathematician and autodidact who made extraordinary contributions to mathematical analysis, number theory, infinite series, and continued fractions. - https://en.wikipedia.org/wiki/Srinivasa_Ramanujan + "ramanujan", + + // Sally Kristen Ride was an American physicist and astronaut. She was the first American woman in space, and the youngest American astronaut. https://en.wikipedia.org/wiki/Sally_Ride + "ride", + + // Rita Levi-Montalcini - Won Nobel Prize in Physiology or Medicine jointly with colleague Stanley Cohen for the discovery of nerve growth factor (https://en.wikipedia.org/wiki/Rita_Levi-Montalcini) + "montalcini", + + // Dennis Ritchie - co-creator of UNIX and the C programming language. - https://en.wikipedia.org/wiki/Dennis_Ritchie + "ritchie", + + // Wilhelm Conrad Röntgen - German physicist who was awarded the first Nobel Prize in Physics in 1901 for the discovery of X-rays (Röntgen rays). https://en.wikipedia.org/wiki/Wilhelm_R%C3%B6ntgen + "roentgen", + + // Rosalind Franklin - British biophysicist and X-ray crystallographer whose research was critical to the understanding of DNA - https://en.wikipedia.org/wiki/Rosalind_Franklin + "rosalind", + + // Meghnad Saha - Indian astrophysicist best known for his development of the Saha equation, used to describe chemical and physical conditions in stars - https://en.wikipedia.org/wiki/Meghnad_Saha + "saha", + + // Jean E. Sammet developed FORMAC, the first widely used computer language for symbolic manipulation of mathematical formulas. https://en.wikipedia.org/wiki/Jean_E._Sammet + "sammet", + + // Carol Shaw - Originally an Atari employee, Carol Shaw is said to be the first female video game designer. https://en.wikipedia.org/wiki/Carol_Shaw_(video_game_designer) + "shaw", + + // Dame Stephanie "Steve" Shirley - Founded a software company in 1962 employing women working from home. https://en.wikipedia.org/wiki/Steve_Shirley + "shirley", + + // William Shockley co-invented the transistor - https://en.wikipedia.org/wiki/William_Shockley + "shockley", + + // Françoise Barré-Sinoussi - French virologist and Nobel Prize Laureate in Physiology or Medicine; her work was fundamental in identifying HIV as the cause of AIDS. https://en.wikipedia.org/wiki/Fran%C3%A7oise_Barr%C3%A9-Sinoussi + "sinoussi", + + // Betty Snyder - one of the original programmers of the ENIAC. https://en.wikipedia.org/wiki/ENIAC - https://en.wikipedia.org/wiki/Betty_Holberton + "snyder", + + // Frances Spence - one of the original programmers of the ENIAC. https://en.wikipedia.org/wiki/ENIAC - https://en.wikipedia.org/wiki/Frances_Spence + "spence", + + // Richard Matthew Stallman - the founder of the Free Software movement, the GNU project, the Free Software Foundation, and the League for Programming Freedom. He also invented the concept of copyleft to protect the ideals of this movement, and enshrined this concept in the widely-used GPL (General Public License) for software. https://en.wikiquote.org/wiki/Richard_Stallman + "stallman", + + // Michael Stonebraker is a database research pioneer and architect of Ingres, Postgres, VoltDB and SciDB. Winner of 2014 ACM Turing Award. https://en.wikipedia.org/wiki/Michael_Stonebraker + "stonebraker", + + // Janese Swanson (with others) developed the first of the Carmen Sandiego games. She went on to found Girl Tech. https://en.wikipedia.org/wiki/Janese_Swanson + "swanson", + + // Aaron Swartz was influential in creating RSS, Markdown, Creative Commons, Reddit, and much of the internet as we know it today. He was devoted to freedom of information on the web. https://en.wikiquote.org/wiki/Aaron_Swartz + "swartz", + + // Bertha Swirles was a theoretical physicist who made a number of contributions to early quantum theory. https://en.wikipedia.org/wiki/Bertha_Swirles + "swirles", + + // Nikola Tesla invented the AC electric system and every gadget ever used by a James Bond villain. https://en.wikipedia.org/wiki/Nikola_Tesla + "tesla", + + // Ken Thompson - co-creator of UNIX and the C programming language - https://en.wikipedia.org/wiki/Ken_Thompson + "thompson", + + // Linus Torvalds invented Linux and Git. https://en.wikipedia.org/wiki/Linus_Torvalds + "torvalds", + + // Alan Turing was a founding father of computer science. https://en.wikipedia.org/wiki/Alan_Turing. + "turing", + + // Varahamihira - Ancient Indian mathematician who discovered trigonometric formulae during 505-587 CE - https://en.wikipedia.org/wiki/Var%C4%81hamihira#Contributions + "varahamihira", + + // Sir Mokshagundam Visvesvaraya - is a notable Indian engineer. He is a recipient of the Indian Republic's highest honour, the Bharat Ratna, in 1955. On his birthday, 15 September is celebrated as Engineer's Day in India in his memory - https://en.wikipedia.org/wiki/Visvesvaraya + "visvesvaraya", + + // Christiane Nüsslein-Volhard - German biologist, won Nobel Prize in Physiology or Medicine in 1995 for research on the genetic control of embryonic development. https://en.wikipedia.org/wiki/Christiane_N%C3%BCsslein-Volhard + "volhard", + + // Marlyn Wescoff - one of the original programmers of the ENIAC. https://en.wikipedia.org/wiki/ENIAC - https://en.wikipedia.org/wiki/Marlyn_Meltzer + "wescoff", + + // Andrew Wiles - Notable British mathematician who proved the enigmatic Fermat's Last Theorem - https://en.wikipedia.org/wiki/Andrew_Wiles + "wiles", + + // Roberta Williams, did pioneering work in graphical adventure games for personal computers, particularly the King's Quest series. https://en.wikipedia.org/wiki/Roberta_Williams + "williams", + + // Sophie Wilson designed the first Acorn Micro-Computer and the instruction set for ARM processors. https://en.wikipedia.org/wiki/Sophie_Wilson + "wilson", + + // Jeannette Wing - co-developed the Liskov substitution principle. - https://en.wikipedia.org/wiki/Jeannette_Wing + "wing", + + // Steve Wozniak invented the Apple I and Apple II. https://en.wikipedia.org/wiki/Steve_Wozniak + "wozniak", + + // The Wright brothers, Orville and Wilbur - credited with inventing and building the world's first successful airplane and making the first controlled, powered and sustained heavier-than-air human flight - https://en.wikipedia.org/wiki/Wright_brothers + "wright", + + // Rosalyn Sussman Yalow - Rosalyn Sussman Yalow was an American medical physicist, and a co-winner of the 1977 Nobel Prize in Physiology or Medicine for development of the radioimmunoassay technique. https://en.wikipedia.org/wiki/Rosalyn_Sussman_Yalow + "yalow", + + // Ada Yonath - an Israeli crystallographer, the first woman from the Middle East to win a Nobel prize in the sciences. https://en.wikipedia.org/wiki/Ada_Yonath + "yonath", + } +) + +// GetRandomName generates a random name from the list of adjectives and surnames in this package +// formatted as "adjective_surname". For example 'focused_turing'. If retry is non-zero, a random +// integer between 0 and 10 will be added to the end of the name, e.g `focused_turing3` +func GetRandomName(retry int) string { +begin: + name := fmt.Sprintf("%s_%s", left[rand.Intn(len(left))], right[rand.Intn(len(right))]) + if name == "boring_wozniak" /* Steve Wozniak is not boring */ { + goto begin + } + + if retry > 0 { + name = fmt.Sprintf("%s%d", name, rand.Intn(10)) + } + return name +} diff --git a/vendor/github.com/moby/moby/pkg/namesgenerator/names-generator_test.go b/vendor/github.com/moby/moby/pkg/namesgenerator/names-generator_test.go new file mode 100644 index 000000000..d1a94977d --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/namesgenerator/names-generator_test.go @@ -0,0 +1,27 @@ +package namesgenerator + +import ( + "strings" + "testing" +) + +func TestNameFormat(t *testing.T) { + name := GetRandomName(0) + if !strings.Contains(name, "_") { + t.Fatalf("Generated name does not contain an underscore") + } + if strings.ContainsAny(name, "0123456789") { + t.Fatalf("Generated name contains numbers!") + } +} + +func TestNameRetries(t *testing.T) { + name := GetRandomName(1) + if !strings.Contains(name, "_") { + t.Fatalf("Generated name does not contain an underscore") + } + if !strings.ContainsAny(name, "0123456789") { + t.Fatalf("Generated name doesn't contain a number") + } + +} diff --git a/vendor/github.com/moby/moby/pkg/parsers/kernel/kernel.go b/vendor/github.com/moby/moby/pkg/parsers/kernel/kernel.go new file mode 100644 index 000000000..7738fc741 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/parsers/kernel/kernel.go @@ -0,0 +1,74 @@ +// +build !windows + +// Package kernel provides helper function to get, parse and compare kernel +// versions for different platforms. +package kernel + +import ( + "errors" + "fmt" +) + +// VersionInfo holds information about the kernel. +type VersionInfo struct { + Kernel int // Version of the kernel (e.g. 4.1.2-generic -> 4) + Major int // Major part of the kernel version (e.g. 4.1.2-generic -> 1) + Minor int // Minor part of the kernel version (e.g. 4.1.2-generic -> 2) + Flavor string // Flavor of the kernel version (e.g. 4.1.2-generic -> generic) +} + +func (k *VersionInfo) String() string { + return fmt.Sprintf("%d.%d.%d%s", k.Kernel, k.Major, k.Minor, k.Flavor) +} + +// CompareKernelVersion compares two kernel.VersionInfo structs. +// Returns -1 if a < b, 0 if a == b, 1 it a > b +func CompareKernelVersion(a, b VersionInfo) int { + if a.Kernel < b.Kernel { + return -1 + } else if a.Kernel > b.Kernel { + return 1 + } + + if a.Major < b.Major { + return -1 + } else if a.Major > b.Major { + return 1 + } + + if a.Minor < b.Minor { + return -1 + } else if a.Minor > b.Minor { + return 1 + } + + return 0 +} + +// ParseRelease parses a string and creates a VersionInfo based on it. +func ParseRelease(release string) (*VersionInfo, error) { + var ( + kernel, major, minor, parsed int + flavor, partial string + ) + + // Ignore error from Sscanf to allow an empty flavor. Instead, just + // make sure we got all the version numbers. + parsed, _ = fmt.Sscanf(release, "%d.%d%s", &kernel, &major, &partial) + if parsed < 2 { + return nil, errors.New("Can't parse kernel version " + release) + } + + // sometimes we have 3.12.25-gentoo, but sometimes we just have 3.12-1-amd64 + parsed, _ = fmt.Sscanf(partial, ".%d%s", &minor, &flavor) + if parsed < 1 { + flavor = partial + } + + return &VersionInfo{ + Kernel: kernel, + Major: major, + Minor: minor, + Flavor: flavor, + }, nil +} diff --git a/vendor/github.com/moby/moby/pkg/parsers/kernel/kernel_darwin.go b/vendor/github.com/moby/moby/pkg/parsers/kernel/kernel_darwin.go new file mode 100644 index 000000000..71f205b28 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/parsers/kernel/kernel_darwin.go @@ -0,0 +1,56 @@ +// +build darwin + +// Package kernel provides helper function to get, parse and compare kernel +// versions for different platforms. +package kernel + +import ( + "fmt" + "os/exec" + "strings" + + "github.com/mattn/go-shellwords" +) + +// GetKernelVersion gets the current kernel version. +func GetKernelVersion() (*VersionInfo, error) { + release, err := getRelease() + if err != nil { + return nil, err + } + + return ParseRelease(release) +} + +// getRelease uses `system_profiler SPSoftwareDataType` to get OSX kernel version +func getRelease() (string, error) { + cmd := exec.Command("system_profiler", "SPSoftwareDataType") + osName, err := cmd.Output() + if err != nil { + return "", err + } + + var release string + data := strings.Split(string(osName), "\n") + for _, line := range data { + if strings.Contains(line, "Kernel Version") { + // It has the format like ' Kernel Version: Darwin 14.5.0' + content := strings.SplitN(line, ":", 2) + if len(content) != 2 { + return "", fmt.Errorf("Kernel Version is invalid") + } + + prettyNames, err := shellwords.Parse(content[1]) + if err != nil { + return "", fmt.Errorf("Kernel Version is invalid: %s", err.Error()) + } + + if len(prettyNames) != 2 { + return "", fmt.Errorf("Kernel Version needs to be 'Darwin x.x.x' ") + } + release = prettyNames[1] + } + } + + return release, nil +} diff --git a/vendor/github.com/moby/moby/pkg/parsers/kernel/kernel_unix.go b/vendor/github.com/moby/moby/pkg/parsers/kernel/kernel_unix.go new file mode 100644 index 000000000..bd137dfb6 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/parsers/kernel/kernel_unix.go @@ -0,0 +1,45 @@ +// +build linux freebsd solaris openbsd + +// Package kernel provides helper function to get, parse and compare kernel +// versions for different platforms. +package kernel + +import ( + "bytes" + + "github.com/Sirupsen/logrus" +) + +// GetKernelVersion gets the current kernel version. +func GetKernelVersion() (*VersionInfo, error) { + uts, err := uname() + if err != nil { + return nil, err + } + + release := make([]byte, len(uts.Release)) + + i := 0 + for _, c := range uts.Release { + release[i] = byte(c) + i++ + } + + // Remove the \x00 from the release for Atoi to parse correctly + release = release[:bytes.IndexByte(release, 0)] + + return ParseRelease(string(release)) +} + +// CheckKernelVersion checks if current kernel is newer than (or equal to) +// the given version. +func CheckKernelVersion(k, major, minor int) bool { + if v, err := GetKernelVersion(); err != nil { + logrus.Warnf("error getting kernel version: %s", err) + } else { + if CompareKernelVersion(*v, VersionInfo{Kernel: k, Major: major, Minor: minor}) < 0 { + return false + } + } + return true +} diff --git a/vendor/github.com/moby/moby/pkg/parsers/kernel/kernel_unix_test.go b/vendor/github.com/moby/moby/pkg/parsers/kernel/kernel_unix_test.go new file mode 100644 index 000000000..dc8c0e307 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/parsers/kernel/kernel_unix_test.go @@ -0,0 +1,96 @@ +// +build !windows + +package kernel + +import ( + "fmt" + "testing" +) + +func assertParseRelease(t *testing.T, release string, b *VersionInfo, result int) { + var ( + a *VersionInfo + ) + a, _ = ParseRelease(release) + + if r := CompareKernelVersion(*a, *b); r != result { + t.Fatalf("Unexpected kernel version comparison result for (%v,%v). Found %d, expected %d", release, b, r, result) + } + if a.Flavor != b.Flavor { + t.Fatalf("Unexpected parsed kernel flavor. Found %s, expected %s", a.Flavor, b.Flavor) + } +} + +// TestParseRelease tests the ParseRelease() function +func TestParseRelease(t *testing.T) { + assertParseRelease(t, "3.8.0", &VersionInfo{Kernel: 3, Major: 8, Minor: 0}, 0) + assertParseRelease(t, "3.4.54.longterm-1", &VersionInfo{Kernel: 3, Major: 4, Minor: 54, Flavor: ".longterm-1"}, 0) + assertParseRelease(t, "3.4.54.longterm-1", &VersionInfo{Kernel: 3, Major: 4, Minor: 54, Flavor: ".longterm-1"}, 0) + assertParseRelease(t, "3.8.0-19-generic", &VersionInfo{Kernel: 3, Major: 8, Minor: 0, Flavor: "-19-generic"}, 0) + assertParseRelease(t, "3.12.8tag", &VersionInfo{Kernel: 3, Major: 12, Minor: 8, Flavor: "tag"}, 0) + assertParseRelease(t, "3.12-1-amd64", &VersionInfo{Kernel: 3, Major: 12, Minor: 0, Flavor: "-1-amd64"}, 0) + assertParseRelease(t, "3.8.0", &VersionInfo{Kernel: 4, Major: 8, Minor: 0}, -1) + // Errors + invalids := []string{ + "3", + "a", + "a.a", + "a.a.a-a", + } + for _, invalid := range invalids { + expectedMessage := fmt.Sprintf("Can't parse kernel version %v", invalid) + if _, err := ParseRelease(invalid); err == nil || err.Error() != expectedMessage { + + } + } +} + +func assertKernelVersion(t *testing.T, a, b VersionInfo, result int) { + if r := CompareKernelVersion(a, b); r != result { + t.Fatalf("Unexpected kernel version comparison result. Found %d, expected %d", r, result) + } +} + +// TestCompareKernelVersion tests the CompareKernelVersion() function +func TestCompareKernelVersion(t *testing.T) { + assertKernelVersion(t, + VersionInfo{Kernel: 3, Major: 8, Minor: 0}, + VersionInfo{Kernel: 3, Major: 8, Minor: 0}, + 0) + assertKernelVersion(t, + VersionInfo{Kernel: 2, Major: 6, Minor: 0}, + VersionInfo{Kernel: 3, Major: 8, Minor: 0}, + -1) + assertKernelVersion(t, + VersionInfo{Kernel: 3, Major: 8, Minor: 0}, + VersionInfo{Kernel: 2, Major: 6, Minor: 0}, + 1) + assertKernelVersion(t, + VersionInfo{Kernel: 3, Major: 8, Minor: 0}, + VersionInfo{Kernel: 3, Major: 8, Minor: 0}, + 0) + assertKernelVersion(t, + VersionInfo{Kernel: 3, Major: 8, Minor: 5}, + VersionInfo{Kernel: 3, Major: 8, Minor: 0}, + 1) + assertKernelVersion(t, + VersionInfo{Kernel: 3, Major: 0, Minor: 20}, + VersionInfo{Kernel: 3, Major: 8, Minor: 0}, + -1) + assertKernelVersion(t, + VersionInfo{Kernel: 3, Major: 7, Minor: 20}, + VersionInfo{Kernel: 3, Major: 8, Minor: 0}, + -1) + assertKernelVersion(t, + VersionInfo{Kernel: 3, Major: 8, Minor: 20}, + VersionInfo{Kernel: 3, Major: 7, Minor: 0}, + 1) + assertKernelVersion(t, + VersionInfo{Kernel: 3, Major: 8, Minor: 20}, + VersionInfo{Kernel: 3, Major: 8, Minor: 0}, + 1) + assertKernelVersion(t, + VersionInfo{Kernel: 3, Major: 8, Minor: 0}, + VersionInfo{Kernel: 3, Major: 8, Minor: 20}, + -1) +} diff --git a/vendor/github.com/moby/moby/pkg/parsers/kernel/kernel_windows.go b/vendor/github.com/moby/moby/pkg/parsers/kernel/kernel_windows.go new file mode 100644 index 000000000..e59867277 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/parsers/kernel/kernel_windows.go @@ -0,0 +1,70 @@ +// +build windows + +package kernel + +import ( + "fmt" + "unsafe" + + "golang.org/x/sys/windows" +) + +// VersionInfo holds information about the kernel. +type VersionInfo struct { + kvi string // Version of the kernel (e.g. 6.1.7601.17592 -> 6) + major int // Major part of the kernel version (e.g. 6.1.7601.17592 -> 1) + minor int // Minor part of the kernel version (e.g. 6.1.7601.17592 -> 7601) + build int // Build number of the kernel version (e.g. 6.1.7601.17592 -> 17592) +} + +func (k *VersionInfo) String() string { + return fmt.Sprintf("%d.%d %d (%s)", k.major, k.minor, k.build, k.kvi) +} + +// GetKernelVersion gets the current kernel version. +func GetKernelVersion() (*VersionInfo, error) { + + var ( + h windows.Handle + dwVersion uint32 + err error + ) + + KVI := &VersionInfo{"Unknown", 0, 0, 0} + + if err = windows.RegOpenKeyEx(windows.HKEY_LOCAL_MACHINE, + windows.StringToUTF16Ptr(`SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\`), + 0, + windows.KEY_READ, + &h); err != nil { + return KVI, err + } + defer windows.RegCloseKey(h) + + var buf [1 << 10]uint16 + var typ uint32 + n := uint32(len(buf) * 2) // api expects array of bytes, not uint16 + + if err = windows.RegQueryValueEx(h, + windows.StringToUTF16Ptr("BuildLabEx"), + nil, + &typ, + (*byte)(unsafe.Pointer(&buf[0])), + &n); err != nil { + return KVI, err + } + + KVI.kvi = windows.UTF16ToString(buf[:]) + + // Important - docker.exe MUST be manifested for this API to return + // the correct information. + if dwVersion, err = windows.GetVersion(); err != nil { + return KVI, err + } + + KVI.major = int(dwVersion & 0xFF) + KVI.minor = int((dwVersion & 0XFF00) >> 8) + KVI.build = int((dwVersion & 0xFFFF0000) >> 16) + + return KVI, nil +} diff --git a/vendor/github.com/moby/moby/pkg/parsers/kernel/uname_linux.go b/vendor/github.com/moby/moby/pkg/parsers/kernel/uname_linux.go new file mode 100644 index 000000000..e913fad00 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/parsers/kernel/uname_linux.go @@ -0,0 +1,17 @@ +package kernel + +import "golang.org/x/sys/unix" + +// Utsname represents the system name structure. +// It is passthrough for unix.Utsname in order to make it portable with +// other platforms where it is not available. +type Utsname unix.Utsname + +func uname() (*unix.Utsname, error) { + uts := &unix.Utsname{} + + if err := unix.Uname(uts); err != nil { + return nil, err + } + return uts, nil +} diff --git a/vendor/github.com/moby/moby/pkg/parsers/kernel/uname_solaris.go b/vendor/github.com/moby/moby/pkg/parsers/kernel/uname_solaris.go new file mode 100644 index 000000000..49370bd3d --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/parsers/kernel/uname_solaris.go @@ -0,0 +1,14 @@ +package kernel + +import ( + "golang.org/x/sys/unix" +) + +func uname() (*unix.Utsname, error) { + uts := &unix.Utsname{} + + if err := unix.Uname(uts); err != nil { + return nil, err + } + return uts, nil +} diff --git a/vendor/github.com/moby/moby/pkg/parsers/kernel/uname_unsupported.go b/vendor/github.com/moby/moby/pkg/parsers/kernel/uname_unsupported.go new file mode 100644 index 000000000..1da3f239f --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/parsers/kernel/uname_unsupported.go @@ -0,0 +1,18 @@ +// +build !linux,!solaris + +package kernel + +import ( + "errors" +) + +// Utsname represents the system name structure. +// It is defined here to make it portable as it is available on linux but not +// on windows. +type Utsname struct { + Release [65]byte +} + +func uname() (*Utsname, error) { + return nil, errors.New("Kernel version detection is available only on linux") +} diff --git a/vendor/github.com/moby/moby/pkg/parsers/operatingsystem/operatingsystem_linux.go b/vendor/github.com/moby/moby/pkg/parsers/operatingsystem/operatingsystem_linux.go new file mode 100644 index 000000000..e04a3499a --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/parsers/operatingsystem/operatingsystem_linux.go @@ -0,0 +1,77 @@ +// Package operatingsystem provides helper function to get the operating system +// name for different platforms. +package operatingsystem + +import ( + "bufio" + "bytes" + "fmt" + "io/ioutil" + "os" + "strings" + + "github.com/mattn/go-shellwords" +) + +var ( + // file to use to detect if the daemon is running in a container + proc1Cgroup = "/proc/1/cgroup" + + // file to check to determine Operating System + etcOsRelease = "/etc/os-release" + + // used by stateless systems like Clear Linux + altOsRelease = "/usr/lib/os-release" +) + +// GetOperatingSystem gets the name of the current operating system. +func GetOperatingSystem() (string, error) { + osReleaseFile, err := os.Open(etcOsRelease) + if err != nil { + if !os.IsNotExist(err) { + return "", fmt.Errorf("Error opening %s: %v", etcOsRelease, err) + } + osReleaseFile, err = os.Open(altOsRelease) + if err != nil { + return "", fmt.Errorf("Error opening %s: %v", altOsRelease, err) + } + } + defer osReleaseFile.Close() + + var prettyName string + scanner := bufio.NewScanner(osReleaseFile) + for scanner.Scan() { + line := scanner.Text() + if strings.HasPrefix(line, "PRETTY_NAME=") { + data := strings.SplitN(line, "=", 2) + prettyNames, err := shellwords.Parse(data[1]) + if err != nil { + return "", fmt.Errorf("PRETTY_NAME is invalid: %s", err.Error()) + } + if len(prettyNames) != 1 { + return "", fmt.Errorf("PRETTY_NAME needs to be enclosed by quotes if they have spaces: %s", data[1]) + } + prettyName = prettyNames[0] + } + } + if prettyName != "" { + return prettyName, nil + } + // If not set, defaults to PRETTY_NAME="Linux" + // c.f. http://www.freedesktop.org/software/systemd/man/os-release.html + return "Linux", nil +} + +// IsContainerized returns true if we are running inside a container. +func IsContainerized() (bool, error) { + b, err := ioutil.ReadFile(proc1Cgroup) + if err != nil { + return false, err + } + for _, line := range bytes.Split(b, []byte{'\n'}) { + if len(line) > 0 && !bytes.HasSuffix(line, []byte{'/'}) && !bytes.HasSuffix(line, []byte("init.scope")) { + return true, nil + } + } + return false, nil +} diff --git a/vendor/github.com/moby/moby/pkg/parsers/operatingsystem/operatingsystem_solaris.go b/vendor/github.com/moby/moby/pkg/parsers/operatingsystem/operatingsystem_solaris.go new file mode 100644 index 000000000..d08ad1486 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/parsers/operatingsystem/operatingsystem_solaris.go @@ -0,0 +1,37 @@ +// +build solaris,cgo + +package operatingsystem + +/* +#include +*/ +import "C" + +import ( + "bytes" + "errors" + "io/ioutil" +) + +var etcOsRelease = "/etc/release" + +// GetOperatingSystem gets the name of the current operating system. +func GetOperatingSystem() (string, error) { + b, err := ioutil.ReadFile(etcOsRelease) + if err != nil { + return "", err + } + if i := bytes.Index(b, []byte("\n")); i >= 0 { + b = bytes.Trim(b[:i], " ") + return string(b), nil + } + return "", errors.New("release not found") +} + +// IsContainerized returns true if we are running inside a container. +func IsContainerized() (bool, error) { + if C.getzoneid() != 0 { + return true, nil + } + return false, nil +} diff --git a/vendor/github.com/moby/moby/pkg/parsers/operatingsystem/operatingsystem_unix.go b/vendor/github.com/moby/moby/pkg/parsers/operatingsystem/operatingsystem_unix.go new file mode 100644 index 000000000..bc91c3c53 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/parsers/operatingsystem/operatingsystem_unix.go @@ -0,0 +1,25 @@ +// +build freebsd darwin + +package operatingsystem + +import ( + "errors" + "os/exec" +) + +// GetOperatingSystem gets the name of the current operating system. +func GetOperatingSystem() (string, error) { + cmd := exec.Command("uname", "-s") + osName, err := cmd.Output() + if err != nil { + return "", err + } + return string(osName), nil +} + +// IsContainerized returns true if we are running inside a container. +// No-op on FreeBSD and Darwin, always returns false. +func IsContainerized() (bool, error) { + // TODO: Implement jail detection for freeBSD + return false, errors.New("Cannot detect if we are in container") +} diff --git a/vendor/github.com/moby/moby/pkg/parsers/operatingsystem/operatingsystem_unix_test.go b/vendor/github.com/moby/moby/pkg/parsers/operatingsystem/operatingsystem_unix_test.go new file mode 100644 index 000000000..e7120c65c --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/parsers/operatingsystem/operatingsystem_unix_test.go @@ -0,0 +1,247 @@ +// +build linux freebsd + +package operatingsystem + +import ( + "io/ioutil" + "os" + "path/filepath" + "testing" +) + +func TestGetOperatingSystem(t *testing.T) { + var backup = etcOsRelease + + invalids := []struct { + content string + errorExpected string + }{ + { + `PRETTY_NAME=Source Mage GNU/Linux +PRETTY_NAME=Ubuntu 14.04.LTS`, + "PRETTY_NAME needs to be enclosed by quotes if they have spaces: Source Mage GNU/Linux", + }, + { + `PRETTY_NAME="Ubuntu Linux +PRETTY_NAME=Ubuntu 14.04.LTS`, + "PRETTY_NAME is invalid: invalid command line string", + }, + { + `PRETTY_NAME=Ubuntu' +PRETTY_NAME=Ubuntu 14.04.LTS`, + "PRETTY_NAME is invalid: invalid command line string", + }, + { + `PRETTY_NAME' +PRETTY_NAME=Ubuntu 14.04.LTS`, + "PRETTY_NAME needs to be enclosed by quotes if they have spaces: Ubuntu 14.04.LTS", + }, + } + + valids := []struct { + content string + expected string + }{ + { + `NAME="Ubuntu" +PRETTY_NAME_AGAIN="Ubuntu 14.04.LTS" +VERSION="14.04, Trusty Tahr" +ID=ubuntu +ID_LIKE=debian +VERSION_ID="14.04" +HOME_URL="http://www.ubuntu.com/" +SUPPORT_URL="http://help.ubuntu.com/" +BUG_REPORT_URL="http://bugs.launchpad.net/ubuntu/"`, + "Linux", + }, + { + `NAME="Ubuntu" +VERSION="14.04, Trusty Tahr" +ID=ubuntu +ID_LIKE=debian +VERSION_ID="14.04" +HOME_URL="http://www.ubuntu.com/" +SUPPORT_URL="http://help.ubuntu.com/" +BUG_REPORT_URL="http://bugs.launchpad.net/ubuntu/"`, + "Linux", + }, + { + `NAME=Gentoo +ID=gentoo +PRETTY_NAME="Gentoo/Linux" +ANSI_COLOR="1;32" +HOME_URL="http://www.gentoo.org/" +SUPPORT_URL="http://www.gentoo.org/main/en/support.xml" +BUG_REPORT_URL="https://bugs.gentoo.org/" +`, + "Gentoo/Linux", + }, + { + `NAME="Ubuntu" +VERSION="14.04, Trusty Tahr" +ID=ubuntu +ID_LIKE=debian +PRETTY_NAME="Ubuntu 14.04 LTS" +VERSION_ID="14.04" +HOME_URL="http://www.ubuntu.com/" +SUPPORT_URL="http://help.ubuntu.com/" +BUG_REPORT_URL="http://bugs.launchpad.net/ubuntu/"`, + "Ubuntu 14.04 LTS", + }, + { + `NAME="Ubuntu" +VERSION="14.04, Trusty Tahr" +ID=ubuntu +ID_LIKE=debian +PRETTY_NAME='Ubuntu 14.04 LTS'`, + "Ubuntu 14.04 LTS", + }, + { + `PRETTY_NAME=Source +NAME="Source Mage"`, + "Source", + }, + { + `PRETTY_NAME=Source +PRETTY_NAME="Source Mage"`, + "Source Mage", + }, + } + + dir := os.TempDir() + etcOsRelease = filepath.Join(dir, "etcOsRelease") + + defer func() { + os.Remove(etcOsRelease) + etcOsRelease = backup + }() + + for _, elt := range invalids { + if err := ioutil.WriteFile(etcOsRelease, []byte(elt.content), 0600); err != nil { + t.Fatalf("failed to write to %s: %v", etcOsRelease, err) + } + s, err := GetOperatingSystem() + if err == nil || err.Error() != elt.errorExpected { + t.Fatalf("Expected an error %q, got %q (err: %v)", elt.errorExpected, s, err) + } + } + + for _, elt := range valids { + if err := ioutil.WriteFile(etcOsRelease, []byte(elt.content), 0600); err != nil { + t.Fatalf("failed to write to %s: %v", etcOsRelease, err) + } + s, err := GetOperatingSystem() + if err != nil || s != elt.expected { + t.Fatalf("Expected %q, got %q (err: %v)", elt.expected, s, err) + } + } +} + +func TestIsContainerized(t *testing.T) { + var ( + backup = proc1Cgroup + nonContainerizedProc1Cgroupsystemd226 = []byte(`9:memory:/init.scope +8:net_cls,net_prio:/ +7:cpuset:/ +6:freezer:/ +5:devices:/init.scope +4:blkio:/init.scope +3:cpu,cpuacct:/init.scope +2:perf_event:/ +1:name=systemd:/init.scope +`) + nonContainerizedProc1Cgroup = []byte(`14:name=systemd:/ +13:hugetlb:/ +12:net_prio:/ +11:perf_event:/ +10:bfqio:/ +9:blkio:/ +8:net_cls:/ +7:freezer:/ +6:devices:/ +5:memory:/ +4:cpuacct:/ +3:cpu:/ +2:cpuset:/ +`) + containerizedProc1Cgroup = []byte(`9:perf_event:/docker/3cef1b53c50b0fa357d994f8a1a8cd783c76bbf4f5dd08b226e38a8bd331338d +8:blkio:/docker/3cef1b53c50b0fa357d994f8a1a8cd783c76bbf4f5dd08b226e38a8bd331338d +7:net_cls:/ +6:freezer:/docker/3cef1b53c50b0fa357d994f8a1a8cd783c76bbf4f5dd08b226e38a8bd331338d +5:devices:/docker/3cef1b53c50b0fa357d994f8a1a8cd783c76bbf4f5dd08b226e38a8bd331338d +4:memory:/docker/3cef1b53c50b0fa357d994f8a1a8cd783c76bbf4f5dd08b226e38a8bd331338d +3:cpuacct:/docker/3cef1b53c50b0fa357d994f8a1a8cd783c76bbf4f5dd08b226e38a8bd331338d +2:cpu:/docker/3cef1b53c50b0fa357d994f8a1a8cd783c76bbf4f5dd08b226e38a8bd331338d +1:cpuset:/`) + ) + + dir := os.TempDir() + proc1Cgroup = filepath.Join(dir, "proc1Cgroup") + + defer func() { + os.Remove(proc1Cgroup) + proc1Cgroup = backup + }() + + if err := ioutil.WriteFile(proc1Cgroup, nonContainerizedProc1Cgroup, 0600); err != nil { + t.Fatalf("failed to write to %s: %v", proc1Cgroup, err) + } + inContainer, err := IsContainerized() + if err != nil { + t.Fatal(err) + } + if inContainer { + t.Fatal("Wrongly assuming containerized") + } + + if err := ioutil.WriteFile(proc1Cgroup, nonContainerizedProc1Cgroupsystemd226, 0600); err != nil { + t.Fatalf("failed to write to %s: %v", proc1Cgroup, err) + } + inContainer, err = IsContainerized() + if err != nil { + t.Fatal(err) + } + if inContainer { + t.Fatal("Wrongly assuming containerized for systemd /init.scope cgroup layout") + } + + if err := ioutil.WriteFile(proc1Cgroup, containerizedProc1Cgroup, 0600); err != nil { + t.Fatalf("failed to write to %s: %v", proc1Cgroup, err) + } + inContainer, err = IsContainerized() + if err != nil { + t.Fatal(err) + } + if !inContainer { + t.Fatal("Wrongly assuming non-containerized") + } +} + +func TestOsReleaseFallback(t *testing.T) { + var backup = etcOsRelease + var altBackup = altOsRelease + dir := os.TempDir() + etcOsRelease = filepath.Join(dir, "etcOsRelease") + altOsRelease = filepath.Join(dir, "altOsRelease") + + defer func() { + os.Remove(dir) + etcOsRelease = backup + altOsRelease = altBackup + }() + content := `NAME=Gentoo +ID=gentoo +PRETTY_NAME="Gentoo/Linux" +ANSI_COLOR="1;32" +HOME_URL="http://www.gentoo.org/" +SUPPORT_URL="http://www.gentoo.org/main/en/support.xml" +BUG_REPORT_URL="https://bugs.gentoo.org/" +` + if err := ioutil.WriteFile(altOsRelease, []byte(content), 0600); err != nil { + t.Fatalf("failed to write to %s: %v", etcOsRelease, err) + } + s, err := GetOperatingSystem() + if err != nil || s != "Gentoo/Linux" { + t.Fatalf("Expected %q, got %q (err: %v)", "Gentoo/Linux", s, err) + } +} diff --git a/vendor/github.com/moby/moby/pkg/parsers/operatingsystem/operatingsystem_windows.go b/vendor/github.com/moby/moby/pkg/parsers/operatingsystem/operatingsystem_windows.go new file mode 100644 index 000000000..5d8b42cc3 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/parsers/operatingsystem/operatingsystem_windows.go @@ -0,0 +1,50 @@ +package operatingsystem + +import ( + "unsafe" + + "golang.org/x/sys/windows" +) + +// See https://code.google.com/p/go/source/browse/src/pkg/mime/type_windows.go?r=d14520ac25bf6940785aabb71f5be453a286f58c +// for a similar sample + +// GetOperatingSystem gets the name of the current operating system. +func GetOperatingSystem() (string, error) { + + var h windows.Handle + + // Default return value + ret := "Unknown Operating System" + + if err := windows.RegOpenKeyEx(windows.HKEY_LOCAL_MACHINE, + windows.StringToUTF16Ptr(`SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\`), + 0, + windows.KEY_READ, + &h); err != nil { + return ret, err + } + defer windows.RegCloseKey(h) + + var buf [1 << 10]uint16 + var typ uint32 + n := uint32(len(buf) * 2) // api expects array of bytes, not uint16 + + if err := windows.RegQueryValueEx(h, + windows.StringToUTF16Ptr("ProductName"), + nil, + &typ, + (*byte)(unsafe.Pointer(&buf[0])), + &n); err != nil { + return ret, err + } + ret = windows.UTF16ToString(buf[:]) + + return ret, nil +} + +// IsContainerized returns true if we are running inside a container. +// No-op on Windows, always returns false. +func IsContainerized() (bool, error) { + return false, nil +} diff --git a/vendor/github.com/moby/moby/pkg/parsers/parsers.go b/vendor/github.com/moby/moby/pkg/parsers/parsers.go new file mode 100644 index 000000000..acc897168 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/parsers/parsers.go @@ -0,0 +1,69 @@ +// Package parsers provides helper functions to parse and validate different type +// of string. It can be hosts, unix addresses, tcp addresses, filters, kernel +// operating system versions. +package parsers + +import ( + "fmt" + "strconv" + "strings" +) + +// ParseKeyValueOpt parses and validates the specified string as a key/value pair (key=value) +func ParseKeyValueOpt(opt string) (string, string, error) { + parts := strings.SplitN(opt, "=", 2) + if len(parts) != 2 { + return "", "", fmt.Errorf("Unable to parse key/value option: %s", opt) + } + return strings.TrimSpace(parts[0]), strings.TrimSpace(parts[1]), nil +} + +// ParseUintList parses and validates the specified string as the value +// found in some cgroup file (e.g. `cpuset.cpus`, `cpuset.mems`), which could be +// one of the formats below. Note that duplicates are actually allowed in the +// input string. It returns a `map[int]bool` with available elements from `val` +// set to `true`. +// Supported formats: +// 7 +// 1-6 +// 0,3-4,7,8-10 +// 0-0,0,1-7 +// 03,1-3 <- this is gonna get parsed as [1,2,3] +// 3,2,1 +// 0-2,3,1 +func ParseUintList(val string) (map[int]bool, error) { + if val == "" { + return map[int]bool{}, nil + } + + availableInts := make(map[int]bool) + split := strings.Split(val, ",") + errInvalidFormat := fmt.Errorf("invalid format: %s", val) + + for _, r := range split { + if !strings.Contains(r, "-") { + v, err := strconv.Atoi(r) + if err != nil { + return nil, errInvalidFormat + } + availableInts[v] = true + } else { + split := strings.SplitN(r, "-", 2) + min, err := strconv.Atoi(split[0]) + if err != nil { + return nil, errInvalidFormat + } + max, err := strconv.Atoi(split[1]) + if err != nil { + return nil, errInvalidFormat + } + if max < min { + return nil, errInvalidFormat + } + for i := min; i <= max; i++ { + availableInts[i] = true + } + } + } + return availableInts, nil +} diff --git a/vendor/github.com/moby/moby/pkg/parsers/parsers_test.go b/vendor/github.com/moby/moby/pkg/parsers/parsers_test.go new file mode 100644 index 000000000..7f19e9027 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/parsers/parsers_test.go @@ -0,0 +1,70 @@ +package parsers + +import ( + "reflect" + "testing" +) + +func TestParseKeyValueOpt(t *testing.T) { + invalids := map[string]string{ + "": "Unable to parse key/value option: ", + "key": "Unable to parse key/value option: key", + } + for invalid, expectedError := range invalids { + if _, _, err := ParseKeyValueOpt(invalid); err == nil || err.Error() != expectedError { + t.Fatalf("Expected error %v for %v, got %v", expectedError, invalid, err) + } + } + valids := map[string][]string{ + "key=value": {"key", "value"}, + " key = value ": {"key", "value"}, + "key=value1=value2": {"key", "value1=value2"}, + " key = value1 = value2 ": {"key", "value1 = value2"}, + } + for valid, expectedKeyValue := range valids { + key, value, err := ParseKeyValueOpt(valid) + if err != nil { + t.Fatal(err) + } + if key != expectedKeyValue[0] || value != expectedKeyValue[1] { + t.Fatalf("Expected {%v: %v} got {%v: %v}", expectedKeyValue[0], expectedKeyValue[1], key, value) + } + } +} + +func TestParseUintList(t *testing.T) { + valids := map[string]map[int]bool{ + "": {}, + "7": {7: true}, + "1-6": {1: true, 2: true, 3: true, 4: true, 5: true, 6: true}, + "0-7": {0: true, 1: true, 2: true, 3: true, 4: true, 5: true, 6: true, 7: true}, + "0,3-4,7,8-10": {0: true, 3: true, 4: true, 7: true, 8: true, 9: true, 10: true}, + "0-0,0,1-4": {0: true, 1: true, 2: true, 3: true, 4: true}, + "03,1-3": {1: true, 2: true, 3: true}, + "3,2,1": {1: true, 2: true, 3: true}, + "0-2,3,1": {0: true, 1: true, 2: true, 3: true}, + } + for k, v := range valids { + out, err := ParseUintList(k) + if err != nil { + t.Fatalf("Expected not to fail, got %v", err) + } + if !reflect.DeepEqual(out, v) { + t.Fatalf("Expected %v, got %v", v, out) + } + } + + invalids := []string{ + "this", + "1--", + "1-10,,10", + "10-1", + "-1", + "-1,0", + } + for _, v := range invalids { + if out, err := ParseUintList(v); err == nil { + t.Fatalf("Expected failure with %s but got %v", v, out) + } + } +} diff --git a/vendor/github.com/moby/moby/pkg/pidfile/pidfile.go b/vendor/github.com/moby/moby/pkg/pidfile/pidfile.go new file mode 100644 index 000000000..0fc3997a1 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/pidfile/pidfile.go @@ -0,0 +1,53 @@ +// Package pidfile provides structure and helper functions to create and remove +// PID file. A PID file is usually a file used to store the process ID of a +// running process. +package pidfile + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strconv" + "strings" + + "github.com/docker/docker/pkg/system" +) + +// PIDFile is a file used to store the process ID of a running process. +type PIDFile struct { + path string +} + +func checkPIDFileAlreadyExists(path string) error { + if pidByte, err := ioutil.ReadFile(path); err == nil { + pidString := strings.TrimSpace(string(pidByte)) + if pid, err := strconv.Atoi(pidString); err == nil { + if processExists(pid) { + return fmt.Errorf("pid file found, ensure docker is not running or delete %s", path) + } + } + } + return nil +} + +// New creates a PIDfile using the specified path. +func New(path string) (*PIDFile, error) { + if err := checkPIDFileAlreadyExists(path); err != nil { + return nil, err + } + // Note MkdirAll returns nil if a directory already exists + if err := system.MkdirAll(filepath.Dir(path), os.FileMode(0755), ""); err != nil { + return nil, err + } + if err := ioutil.WriteFile(path, []byte(fmt.Sprintf("%d", os.Getpid())), 0644); err != nil { + return nil, err + } + + return &PIDFile{path: path}, nil +} + +// Remove removes the PIDFile. +func (file PIDFile) Remove() error { + return os.Remove(file.path) +} diff --git a/vendor/github.com/moby/moby/pkg/pidfile/pidfile_darwin.go b/vendor/github.com/moby/moby/pkg/pidfile/pidfile_darwin.go new file mode 100644 index 000000000..2cd001a70 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/pidfile/pidfile_darwin.go @@ -0,0 +1,14 @@ +// +build darwin + +package pidfile + +import ( + "golang.org/x/sys/unix" +) + +func processExists(pid int) bool { + // OS X does not have a proc filesystem. + // Use kill -0 pid to judge if the process exists. + err := unix.Kill(pid, 0) + return err == nil +} diff --git a/vendor/github.com/moby/moby/pkg/pidfile/pidfile_test.go b/vendor/github.com/moby/moby/pkg/pidfile/pidfile_test.go new file mode 100644 index 000000000..73e8af76d --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/pidfile/pidfile_test.go @@ -0,0 +1,38 @@ +package pidfile + +import ( + "io/ioutil" + "os" + "path/filepath" + "testing" +) + +func TestNewAndRemove(t *testing.T) { + dir, err := ioutil.TempDir(os.TempDir(), "test-pidfile") + if err != nil { + t.Fatal("Could not create test directory") + } + + path := filepath.Join(dir, "testfile") + file, err := New(path) + if err != nil { + t.Fatal("Could not create test file", err) + } + + _, err = New(path) + if err == nil { + t.Fatal("Test file creation not blocked") + } + + if err := file.Remove(); err != nil { + t.Fatal("Could not delete created test file") + } +} + +func TestRemoveInvalidPath(t *testing.T) { + file := PIDFile{path: filepath.Join("foo", "bar")} + + if err := file.Remove(); err == nil { + t.Fatal("Non-existing file doesn't give an error on delete") + } +} diff --git a/vendor/github.com/moby/moby/pkg/pidfile/pidfile_unix.go b/vendor/github.com/moby/moby/pkg/pidfile/pidfile_unix.go new file mode 100644 index 000000000..1bf5221e3 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/pidfile/pidfile_unix.go @@ -0,0 +1,16 @@ +// +build !windows,!darwin + +package pidfile + +import ( + "os" + "path/filepath" + "strconv" +) + +func processExists(pid int) bool { + if _, err := os.Stat(filepath.Join("/proc", strconv.Itoa(pid))); err == nil { + return true + } + return false +} diff --git a/vendor/github.com/moby/moby/pkg/pidfile/pidfile_windows.go b/vendor/github.com/moby/moby/pkg/pidfile/pidfile_windows.go new file mode 100644 index 000000000..86850d465 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/pidfile/pidfile_windows.go @@ -0,0 +1,25 @@ +package pidfile + +import ( + "golang.org/x/sys/windows" +) + +const ( + processQueryLimitedInformation = 0x1000 + + stillActive = 259 +) + +func processExists(pid int) bool { + h, err := windows.OpenProcess(processQueryLimitedInformation, false, uint32(pid)) + if err != nil { + return false + } + var c uint32 + err = windows.GetExitCodeProcess(h, &c) + windows.Close(h) + if err != nil { + return c == stillActive + } + return true +} diff --git a/vendor/github.com/moby/moby/pkg/platform/architecture_linux.go b/vendor/github.com/moby/moby/pkg/platform/architecture_linux.go new file mode 100644 index 000000000..061127cd2 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/platform/architecture_linux.go @@ -0,0 +1,16 @@ +// Package platform provides helper function to get the runtime architecture +// for different platforms. +package platform + +import ( + "golang.org/x/sys/unix" +) + +// runtimeArchitecture gets the name of the current architecture (x86, x86_64, …) +func runtimeArchitecture() (string, error) { + utsname := &unix.Utsname{} + if err := unix.Uname(utsname); err != nil { + return "", err + } + return charsToString(utsname.Machine), nil +} diff --git a/vendor/github.com/moby/moby/pkg/platform/architecture_unix.go b/vendor/github.com/moby/moby/pkg/platform/architecture_unix.go new file mode 100644 index 000000000..45bbcf153 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/platform/architecture_unix.go @@ -0,0 +1,20 @@ +// +build freebsd solaris darwin + +// Package platform provides helper function to get the runtime architecture +// for different platforms. +package platform + +import ( + "os/exec" + "strings" +) + +// runtimeArchitecture gets the name of the current architecture (x86, x86_64, i86pc, sun4v, ...) +func runtimeArchitecture() (string, error) { + cmd := exec.Command("/usr/bin/uname", "-m") + machine, err := cmd.Output() + if err != nil { + return "", err + } + return strings.TrimSpace(string(machine)), nil +} diff --git a/vendor/github.com/moby/moby/pkg/platform/architecture_windows.go b/vendor/github.com/moby/moby/pkg/platform/architecture_windows.go new file mode 100644 index 000000000..c5f684ddf --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/platform/architecture_windows.go @@ -0,0 +1,60 @@ +package platform + +import ( + "fmt" + "syscall" + "unsafe" + + "golang.org/x/sys/windows" +) + +var ( + modkernel32 = windows.NewLazySystemDLL("kernel32.dll") + procGetSystemInfo = modkernel32.NewProc("GetSystemInfo") +) + +// see http://msdn.microsoft.com/en-us/library/windows/desktop/ms724958(v=vs.85).aspx +type systeminfo struct { + wProcessorArchitecture uint16 + wReserved uint16 + dwPageSize uint32 + lpMinimumApplicationAddress uintptr + lpMaximumApplicationAddress uintptr + dwActiveProcessorMask uintptr + dwNumberOfProcessors uint32 + dwProcessorType uint32 + dwAllocationGranularity uint32 + wProcessorLevel uint16 + wProcessorRevision uint16 +} + +// Constants +const ( + ProcessorArchitecture64 = 9 // PROCESSOR_ARCHITECTURE_AMD64 + ProcessorArchitectureIA64 = 6 // PROCESSOR_ARCHITECTURE_IA64 + ProcessorArchitecture32 = 0 // PROCESSOR_ARCHITECTURE_INTEL + ProcessorArchitectureArm = 5 // PROCESSOR_ARCHITECTURE_ARM +) + +// runtimeArchitecture gets the name of the current architecture (x86, x86_64, …) +func runtimeArchitecture() (string, error) { + var sysinfo systeminfo + syscall.Syscall(procGetSystemInfo.Addr(), 1, uintptr(unsafe.Pointer(&sysinfo)), 0, 0) + switch sysinfo.wProcessorArchitecture { + case ProcessorArchitecture64, ProcessorArchitectureIA64: + return "x86_64", nil + case ProcessorArchitecture32: + return "i686", nil + case ProcessorArchitectureArm: + return "arm", nil + default: + return "", fmt.Errorf("Unknown processor architecture") + } +} + +// NumProcs returns the number of processors on the system +func NumProcs() uint32 { + var sysinfo systeminfo + syscall.Syscall(procGetSystemInfo.Addr(), 1, uintptr(unsafe.Pointer(&sysinfo)), 0, 0) + return sysinfo.dwNumberOfProcessors +} diff --git a/vendor/github.com/moby/moby/pkg/platform/platform.go b/vendor/github.com/moby/moby/pkg/platform/platform.go new file mode 100644 index 000000000..e4b03122f --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/platform/platform.go @@ -0,0 +1,23 @@ +package platform + +import ( + "runtime" + + "github.com/Sirupsen/logrus" +) + +var ( + // Architecture holds the runtime architecture of the process. + Architecture string + // OSType holds the runtime operating system type (Linux, …) of the process. + OSType string +) + +func init() { + var err error + Architecture, err = runtimeArchitecture() + if err != nil { + logrus.Errorf("Could not read system architecture info: %v", err) + } + OSType = runtime.GOOS +} diff --git a/vendor/github.com/moby/moby/pkg/platform/utsname_int8.go b/vendor/github.com/moby/moby/pkg/platform/utsname_int8.go new file mode 100644 index 000000000..a948873cd --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/platform/utsname_int8.go @@ -0,0 +1,18 @@ +// +build linux,386 linux,amd64 linux,arm64 s390x +// see golang's sources golang.org/x/sys/unix/ztypes_linux_*.go that use int8 + +package platform + +// Convert the OS/ARCH-specific utsname.Machine to string +// given as an array of signed int8 +func charsToString(ca [65]int8) string { + s := make([]byte, len(ca)) + var lens int + for ; lens < len(ca); lens++ { + if ca[lens] == 0 { + break + } + s[lens] = uint8(ca[lens]) + } + return string(s[0:lens]) +} diff --git a/vendor/github.com/moby/moby/pkg/platform/utsname_int8_test.go b/vendor/github.com/moby/moby/pkg/platform/utsname_int8_test.go new file mode 100644 index 000000000..7cd7208f6 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/platform/utsname_int8_test.go @@ -0,0 +1,16 @@ +// +build linux,386 linux,amd64 linux,arm64 s390x + +package platform + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestCharToString(t *testing.T) { + machineInBytes := [65]int8{120, 56, 54, 95, 54, 52} + machineInString := charsToString(machineInBytes) + assert.NotNil(t, machineInString, "Unable to convert char into string.") + assert.Equal(t, string("x86_64"), machineInString, "Parsed machine code not equal.") +} diff --git a/vendor/github.com/moby/moby/pkg/platform/utsname_uint8.go b/vendor/github.com/moby/moby/pkg/platform/utsname_uint8.go new file mode 100644 index 000000000..b4af7a5c8 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/platform/utsname_uint8.go @@ -0,0 +1,18 @@ +// +build linux,arm linux,ppc64 linux,ppc64le +// see golang's sources golang.org/x/sys/unix/ztypes_linux_*.go that use uint8 + +package platform + +// Convert the OS/ARCH-specific utsname.Machine to string +// given as an array of unsigned uint8 +func charsToString(ca [65]uint8) string { + s := make([]byte, len(ca)) + var lens int + for ; lens < len(ca); lens++ { + if ca[lens] == 0 { + break + } + s[lens] = ca[lens] + } + return string(s[0:lens]) +} diff --git a/vendor/github.com/moby/moby/pkg/platform/utsname_uint8_test.go b/vendor/github.com/moby/moby/pkg/platform/utsname_uint8_test.go new file mode 100644 index 000000000..835eaa930 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/platform/utsname_uint8_test.go @@ -0,0 +1,16 @@ +// +build linux,arm linux,ppc64 linux,ppc64le + +package platform + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestTestCharToString(t *testing.T) { + machineInBytes := [65]uint8{120, 56, 54, 95, 54, 52} + machineInString := charsToString(machineInBytes) + assert.NotNil(t, machineInString, "Unable to convert char into string.") + assert.Equal(t, string("x86_64"), machineInString, "Parsed machine code not equal.") +} diff --git a/vendor/github.com/moby/moby/pkg/plugingetter/getter.go b/vendor/github.com/moby/moby/pkg/plugingetter/getter.go new file mode 100644 index 000000000..b04b7bc82 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/plugingetter/getter.go @@ -0,0 +1,35 @@ +package plugingetter + +import "github.com/docker/docker/pkg/plugins" + +const ( + // Lookup doesn't update RefCount + Lookup = 0 + // Acquire increments RefCount + Acquire = 1 + // Release decrements RefCount + Release = -1 +) + +// CompatPlugin is an abstraction to handle both v2(new) and v1(legacy) plugins. +type CompatPlugin interface { + Client() *plugins.Client + Name() string + BasePath() string + IsV1() bool +} + +// CountedPlugin is a plugin which is reference counted. +type CountedPlugin interface { + Acquire() + Release() + CompatPlugin +} + +// PluginGetter is the interface implemented by Store +type PluginGetter interface { + Get(name, capability string, mode int) (CompatPlugin, error) + GetAllByCap(capability string) ([]CompatPlugin, error) + GetAllManagedPluginsByCap(capability string) []CompatPlugin + Handle(capability string, callback func(string, *plugins.Client)) +} diff --git a/vendor/github.com/moby/moby/pkg/plugins/client.go b/vendor/github.com/moby/moby/pkg/plugins/client.go new file mode 100644 index 000000000..f221a46fc --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/plugins/client.go @@ -0,0 +1,205 @@ +package plugins + +import ( + "bytes" + "encoding/json" + "io" + "io/ioutil" + "net/http" + "net/url" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/plugins/transport" + "github.com/docker/go-connections/sockets" + "github.com/docker/go-connections/tlsconfig" +) + +const ( + defaultTimeOut = 30 +) + +func newTransport(addr string, tlsConfig *tlsconfig.Options) (transport.Transport, error) { + tr := &http.Transport{} + + if tlsConfig != nil { + c, err := tlsconfig.Client(*tlsConfig) + if err != nil { + return nil, err + } + tr.TLSClientConfig = c + } + + u, err := url.Parse(addr) + if err != nil { + return nil, err + } + socket := u.Host + if socket == "" { + // valid local socket addresses have the host empty. + socket = u.Path + } + if err := sockets.ConfigureTransport(tr, u.Scheme, socket); err != nil { + return nil, err + } + scheme := httpScheme(u) + + return transport.NewHTTPTransport(tr, scheme, socket), nil +} + +// NewClient creates a new plugin client (http). +func NewClient(addr string, tlsConfig *tlsconfig.Options) (*Client, error) { + clientTransport, err := newTransport(addr, tlsConfig) + if err != nil { + return nil, err + } + return newClientWithTransport(clientTransport, 0), nil +} + +// NewClientWithTimeout creates a new plugin client (http). +func NewClientWithTimeout(addr string, tlsConfig *tlsconfig.Options, timeout time.Duration) (*Client, error) { + clientTransport, err := newTransport(addr, tlsConfig) + if err != nil { + return nil, err + } + return newClientWithTransport(clientTransport, timeout), nil +} + +// newClientWithTransport creates a new plugin client with a given transport. +func newClientWithTransport(tr transport.Transport, timeout time.Duration) *Client { + return &Client{ + http: &http.Client{ + Transport: tr, + Timeout: timeout, + }, + requestFactory: tr, + } +} + +// Client represents a plugin client. +type Client struct { + http *http.Client // http client to use + requestFactory transport.RequestFactory +} + +// Call calls the specified method with the specified arguments for the plugin. +// It will retry for 30 seconds if a failure occurs when calling. +func (c *Client) Call(serviceMethod string, args interface{}, ret interface{}) error { + var buf bytes.Buffer + if args != nil { + if err := json.NewEncoder(&buf).Encode(args); err != nil { + return err + } + } + body, err := c.callWithRetry(serviceMethod, &buf, true) + if err != nil { + return err + } + defer body.Close() + if ret != nil { + if err := json.NewDecoder(body).Decode(&ret); err != nil { + logrus.Errorf("%s: error reading plugin resp: %v", serviceMethod, err) + return err + } + } + return nil +} + +// Stream calls the specified method with the specified arguments for the plugin and returns the response body +func (c *Client) Stream(serviceMethod string, args interface{}) (io.ReadCloser, error) { + var buf bytes.Buffer + if err := json.NewEncoder(&buf).Encode(args); err != nil { + return nil, err + } + return c.callWithRetry(serviceMethod, &buf, true) +} + +// SendFile calls the specified method, and passes through the IO stream +func (c *Client) SendFile(serviceMethod string, data io.Reader, ret interface{}) error { + body, err := c.callWithRetry(serviceMethod, data, true) + if err != nil { + return err + } + defer body.Close() + if err := json.NewDecoder(body).Decode(&ret); err != nil { + logrus.Errorf("%s: error reading plugin resp: %v", serviceMethod, err) + return err + } + return nil +} + +func (c *Client) callWithRetry(serviceMethod string, data io.Reader, retry bool) (io.ReadCloser, error) { + var retries int + start := time.Now() + + for { + req, err := c.requestFactory.NewRequest(serviceMethod, data) + if err != nil { + return nil, err + } + + resp, err := c.http.Do(req) + if err != nil { + if !retry { + return nil, err + } + + timeOff := backoff(retries) + if abort(start, timeOff) { + return nil, err + } + retries++ + logrus.Warnf("Unable to connect to plugin: %s%s: %v, retrying in %v", req.URL.Host, req.URL.Path, err, timeOff) + time.Sleep(timeOff) + continue + } + + if resp.StatusCode != http.StatusOK { + b, err := ioutil.ReadAll(resp.Body) + resp.Body.Close() + if err != nil { + return nil, &statusError{resp.StatusCode, serviceMethod, err.Error()} + } + + // Plugins' Response(s) should have an Err field indicating what went + // wrong. Try to unmarshal into ResponseErr. Otherwise fallback to just + // return the string(body) + type responseErr struct { + Err string + } + remoteErr := responseErr{} + if err := json.Unmarshal(b, &remoteErr); err == nil { + if remoteErr.Err != "" { + return nil, &statusError{resp.StatusCode, serviceMethod, remoteErr.Err} + } + } + // old way... + return nil, &statusError{resp.StatusCode, serviceMethod, string(b)} + } + return resp.Body, nil + } +} + +func backoff(retries int) time.Duration { + b, max := 1, defaultTimeOut + for b < max && retries > 0 { + b *= 2 + retries-- + } + if b > max { + b = max + } + return time.Duration(b) * time.Second +} + +func abort(start time.Time, timeOff time.Duration) bool { + return timeOff+time.Since(start) >= time.Duration(defaultTimeOut)*time.Second +} + +func httpScheme(u *url.URL) string { + scheme := u.Scheme + if scheme != "https" { + scheme = "http" + } + return scheme +} diff --git a/vendor/github.com/moby/moby/pkg/plugins/client_test.go b/vendor/github.com/moby/moby/pkg/plugins/client_test.go new file mode 100644 index 000000000..7c519a276 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/plugins/client_test.go @@ -0,0 +1,234 @@ +package plugins + +import ( + "bytes" + "encoding/json" + "io" + "net/http" + "net/http/httptest" + "net/url" + "strings" + "testing" + "time" + + "github.com/docker/docker/pkg/plugins/transport" + "github.com/docker/go-connections/tlsconfig" + "github.com/stretchr/testify/assert" +) + +var ( + mux *http.ServeMux + server *httptest.Server +) + +func setupRemotePluginServer() string { + mux = http.NewServeMux() + server = httptest.NewServer(mux) + return server.URL +} + +func teardownRemotePluginServer() { + if server != nil { + server.Close() + } +} + +func TestFailedConnection(t *testing.T) { + c, _ := NewClient("tcp://127.0.0.1:1", &tlsconfig.Options{InsecureSkipVerify: true}) + _, err := c.callWithRetry("Service.Method", nil, false) + if err == nil { + t.Fatal("Unexpected successful connection") + } +} + +func TestFailOnce(t *testing.T) { + addr := setupRemotePluginServer() + defer teardownRemotePluginServer() + + failed := false + mux.HandleFunc("/Test.FailOnce", func(w http.ResponseWriter, r *http.Request) { + if !failed { + failed = true + panic("Plugin not ready") + } + }) + + c, _ := NewClient(addr, &tlsconfig.Options{InsecureSkipVerify: true}) + b := strings.NewReader("body") + _, err := c.callWithRetry("Test.FailOnce", b, true) + if err != nil { + t.Fatal(err) + } +} + +func TestEchoInputOutput(t *testing.T) { + addr := setupRemotePluginServer() + defer teardownRemotePluginServer() + + m := Manifest{[]string{"VolumeDriver", "NetworkDriver"}} + + mux.HandleFunc("/Test.Echo", func(w http.ResponseWriter, r *http.Request) { + if r.Method != "POST" { + t.Fatalf("Expected POST, got %s\n", r.Method) + } + + header := w.Header() + header.Set("Content-Type", transport.VersionMimetype) + + io.Copy(w, r.Body) + }) + + c, _ := NewClient(addr, &tlsconfig.Options{InsecureSkipVerify: true}) + var output Manifest + err := c.Call("Test.Echo", m, &output) + if err != nil { + t.Fatal(err) + } + + assert.Equal(t, m, output) + err = c.Call("Test.Echo", nil, nil) + if err != nil { + t.Fatal(err) + } +} + +func TestBackoff(t *testing.T) { + cases := []struct { + retries int + expTimeOff time.Duration + }{ + {0, time.Duration(1)}, + {1, time.Duration(2)}, + {2, time.Duration(4)}, + {4, time.Duration(16)}, + {6, time.Duration(30)}, + {10, time.Duration(30)}, + } + + for _, c := range cases { + s := c.expTimeOff * time.Second + if d := backoff(c.retries); d != s { + t.Fatalf("Retry %v, expected %v, was %v\n", c.retries, s, d) + } + } +} + +func TestAbortRetry(t *testing.T) { + cases := []struct { + timeOff time.Duration + expAbort bool + }{ + {time.Duration(1), false}, + {time.Duration(2), false}, + {time.Duration(10), false}, + {time.Duration(30), true}, + {time.Duration(40), true}, + } + + for _, c := range cases { + s := c.timeOff * time.Second + if a := abort(time.Now(), s); a != c.expAbort { + t.Fatalf("Duration %v, expected %v, was %v\n", c.timeOff, s, a) + } + } +} + +func TestClientScheme(t *testing.T) { + cases := map[string]string{ + "tcp://127.0.0.1:8080": "http", + "unix:///usr/local/plugins/foo": "http", + "http://127.0.0.1:8080": "http", + "https://127.0.0.1:8080": "https", + } + + for addr, scheme := range cases { + u, err := url.Parse(addr) + if err != nil { + t.Fatal(err) + } + s := httpScheme(u) + + if s != scheme { + t.Fatalf("URL scheme mismatch, expected %s, got %s", scheme, s) + } + } +} + +func TestNewClientWithTimeout(t *testing.T) { + addr := setupRemotePluginServer() + defer teardownRemotePluginServer() + + m := Manifest{[]string{"VolumeDriver", "NetworkDriver"}} + + mux.HandleFunc("/Test.Echo", func(w http.ResponseWriter, r *http.Request) { + time.Sleep(time.Duration(600) * time.Millisecond) + io.Copy(w, r.Body) + }) + + // setting timeout of 500ms + timeout := time.Duration(500) * time.Millisecond + c, _ := NewClientWithTimeout(addr, &tlsconfig.Options{InsecureSkipVerify: true}, timeout) + var output Manifest + err := c.Call("Test.Echo", m, &output) + if err == nil { + t.Fatal("Expected timeout error") + } +} + +func TestClientStream(t *testing.T) { + addr := setupRemotePluginServer() + defer teardownRemotePluginServer() + + m := Manifest{[]string{"VolumeDriver", "NetworkDriver"}} + var output Manifest + + mux.HandleFunc("/Test.Echo", func(w http.ResponseWriter, r *http.Request) { + if r.Method != "POST" { + t.Fatalf("Expected POST, got %s", r.Method) + } + + header := w.Header() + header.Set("Content-Type", transport.VersionMimetype) + + io.Copy(w, r.Body) + }) + + c, _ := NewClient(addr, &tlsconfig.Options{InsecureSkipVerify: true}) + body, err := c.Stream("Test.Echo", m) + if err != nil { + t.Fatal(err) + } + defer body.Close() + if err := json.NewDecoder(body).Decode(&output); err != nil { + t.Fatalf("Test.Echo: error reading plugin resp: %v", err) + } + assert.Equal(t, m, output) +} + +func TestClientSendFile(t *testing.T) { + addr := setupRemotePluginServer() + defer teardownRemotePluginServer() + + m := Manifest{[]string{"VolumeDriver", "NetworkDriver"}} + var output Manifest + var buf bytes.Buffer + if err := json.NewEncoder(&buf).Encode(m); err != nil { + t.Fatal(err) + } + mux.HandleFunc("/Test.Echo", func(w http.ResponseWriter, r *http.Request) { + if r.Method != "POST" { + t.Fatalf("Expected POST, got %s\n", r.Method) + } + + header := w.Header() + header.Set("Content-Type", transport.VersionMimetype) + + io.Copy(w, r.Body) + }) + + c, _ := NewClient(addr, &tlsconfig.Options{InsecureSkipVerify: true}) + if err := c.SendFile("Test.Echo", &buf, &output); err != nil { + t.Fatal(err) + } + assert.Equal(t, m, output) +} diff --git a/vendor/github.com/moby/moby/pkg/plugins/discovery.go b/vendor/github.com/moby/moby/pkg/plugins/discovery.go new file mode 100644 index 000000000..e99581c57 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/plugins/discovery.go @@ -0,0 +1,131 @@ +package plugins + +import ( + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "net/url" + "os" + "path/filepath" + "strings" + "sync" +) + +var ( + // ErrNotFound plugin not found + ErrNotFound = errors.New("plugin not found") + socketsPath = "/run/docker/plugins" +) + +// localRegistry defines a registry that is local (using unix socket). +type localRegistry struct{} + +func newLocalRegistry() localRegistry { + return localRegistry{} +} + +// Scan scans all the plugin paths and returns all the names it found +func Scan() ([]string, error) { + var names []string + if err := filepath.Walk(socketsPath, func(path string, fi os.FileInfo, err error) error { + if err != nil { + return nil + } + + if fi.Mode()&os.ModeSocket != 0 { + name := strings.TrimSuffix(fi.Name(), filepath.Ext(fi.Name())) + names = append(names, name) + } + return nil + }); err != nil { + return nil, err + } + + for _, path := range specsPaths { + if err := filepath.Walk(path, func(p string, fi os.FileInfo, err error) error { + if err != nil || fi.IsDir() { + return nil + } + name := strings.TrimSuffix(fi.Name(), filepath.Ext(fi.Name())) + names = append(names, name) + return nil + }); err != nil { + return nil, err + } + } + return names, nil +} + +// Plugin returns the plugin registered with the given name (or returns an error). +func (l *localRegistry) Plugin(name string) (*Plugin, error) { + socketpaths := pluginPaths(socketsPath, name, ".sock") + + for _, p := range socketpaths { + if fi, err := os.Stat(p); err == nil && fi.Mode()&os.ModeSocket != 0 { + return NewLocalPlugin(name, "unix://"+p), nil + } + } + + var txtspecpaths []string + for _, p := range specsPaths { + txtspecpaths = append(txtspecpaths, pluginPaths(p, name, ".spec")...) + txtspecpaths = append(txtspecpaths, pluginPaths(p, name, ".json")...) + } + + for _, p := range txtspecpaths { + if _, err := os.Stat(p); err == nil { + if strings.HasSuffix(p, ".json") { + return readPluginJSONInfo(name, p) + } + return readPluginInfo(name, p) + } + } + return nil, ErrNotFound +} + +func readPluginInfo(name, path string) (*Plugin, error) { + content, err := ioutil.ReadFile(path) + if err != nil { + return nil, err + } + addr := strings.TrimSpace(string(content)) + + u, err := url.Parse(addr) + if err != nil { + return nil, err + } + + if len(u.Scheme) == 0 { + return nil, fmt.Errorf("Unknown protocol") + } + + return NewLocalPlugin(name, addr), nil +} + +func readPluginJSONInfo(name, path string) (*Plugin, error) { + f, err := os.Open(path) + if err != nil { + return nil, err + } + defer f.Close() + + var p Plugin + if err := json.NewDecoder(f).Decode(&p); err != nil { + return nil, err + } + p.name = name + if p.TLSConfig != nil && len(p.TLSConfig.CAFile) == 0 { + p.TLSConfig.InsecureSkipVerify = true + } + p.activateWait = sync.NewCond(&sync.Mutex{}) + + return &p, nil +} + +func pluginPaths(base, name, ext string) []string { + return []string{ + filepath.Join(base, name+ext), + filepath.Join(base, name, name+ext), + } +} diff --git a/vendor/github.com/moby/moby/pkg/plugins/discovery_test.go b/vendor/github.com/moby/moby/pkg/plugins/discovery_test.go new file mode 100644 index 000000000..1a23faaea --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/plugins/discovery_test.go @@ -0,0 +1,152 @@ +package plugins + +import ( + "io/ioutil" + "os" + "path/filepath" + "testing" +) + +func Setup(t *testing.T) (string, func()) { + tmpdir, err := ioutil.TempDir("", "docker-test") + if err != nil { + t.Fatal(err) + } + backup := socketsPath + socketsPath = tmpdir + specsPaths = []string{tmpdir} + + return tmpdir, func() { + socketsPath = backup + os.RemoveAll(tmpdir) + } +} + +func TestFileSpecPlugin(t *testing.T) { + tmpdir, unregister := Setup(t) + defer unregister() + + cases := []struct { + path string + name string + addr string + fail bool + }{ + // TODO Windows: Factor out the unix:// variants. + {filepath.Join(tmpdir, "echo.spec"), "echo", "unix://var/lib/docker/plugins/echo.sock", false}, + {filepath.Join(tmpdir, "echo", "echo.spec"), "echo", "unix://var/lib/docker/plugins/echo.sock", false}, + {filepath.Join(tmpdir, "foo.spec"), "foo", "tcp://localhost:8080", false}, + {filepath.Join(tmpdir, "foo", "foo.spec"), "foo", "tcp://localhost:8080", false}, + {filepath.Join(tmpdir, "bar.spec"), "bar", "localhost:8080", true}, // unknown transport + } + + for _, c := range cases { + if err := os.MkdirAll(filepath.Dir(c.path), 0755); err != nil { + t.Fatal(err) + } + if err := ioutil.WriteFile(c.path, []byte(c.addr), 0644); err != nil { + t.Fatal(err) + } + + r := newLocalRegistry() + p, err := r.Plugin(c.name) + if c.fail && err == nil { + continue + } + + if err != nil { + t.Fatal(err) + } + + if p.name != c.name { + t.Fatalf("Expected plugin `%s`, got %s\n", c.name, p.name) + } + + if p.Addr != c.addr { + t.Fatalf("Expected plugin addr `%s`, got %s\n", c.addr, p.Addr) + } + + if !p.TLSConfig.InsecureSkipVerify { + t.Fatalf("Expected TLS verification to be skipped") + } + } +} + +func TestFileJSONSpecPlugin(t *testing.T) { + tmpdir, unregister := Setup(t) + defer unregister() + + p := filepath.Join(tmpdir, "example.json") + spec := `{ + "Name": "plugin-example", + "Addr": "https://example.com/docker/plugin", + "TLSConfig": { + "CAFile": "/usr/shared/docker/certs/example-ca.pem", + "CertFile": "/usr/shared/docker/certs/example-cert.pem", + "KeyFile": "/usr/shared/docker/certs/example-key.pem" + } +}` + + if err := ioutil.WriteFile(p, []byte(spec), 0644); err != nil { + t.Fatal(err) + } + + r := newLocalRegistry() + plugin, err := r.Plugin("example") + if err != nil { + t.Fatal(err) + } + + if expected, actual := "example", plugin.name; expected != actual { + t.Fatalf("Expected plugin %q, got %s\n", expected, actual) + } + + if plugin.Addr != "https://example.com/docker/plugin" { + t.Fatalf("Expected plugin addr `https://example.com/docker/plugin`, got %s\n", plugin.Addr) + } + + if plugin.TLSConfig.CAFile != "/usr/shared/docker/certs/example-ca.pem" { + t.Fatalf("Expected plugin CA `/usr/shared/docker/certs/example-ca.pem`, got %s\n", plugin.TLSConfig.CAFile) + } + + if plugin.TLSConfig.CertFile != "/usr/shared/docker/certs/example-cert.pem" { + t.Fatalf("Expected plugin Certificate `/usr/shared/docker/certs/example-cert.pem`, got %s\n", plugin.TLSConfig.CertFile) + } + + if plugin.TLSConfig.KeyFile != "/usr/shared/docker/certs/example-key.pem" { + t.Fatalf("Expected plugin Key `/usr/shared/docker/certs/example-key.pem`, got %s\n", plugin.TLSConfig.KeyFile) + } +} + +func TestFileJSONSpecPluginWithoutTLSConfig(t *testing.T) { + tmpdir, unregister := Setup(t) + defer unregister() + + p := filepath.Join(tmpdir, "example.json") + spec := `{ + "Name": "plugin-example", + "Addr": "https://example.com/docker/plugin" +}` + + if err := ioutil.WriteFile(p, []byte(spec), 0644); err != nil { + t.Fatal(err) + } + + r := newLocalRegistry() + plugin, err := r.Plugin("example") + if err != nil { + t.Fatal(err) + } + + if expected, actual := "example", plugin.name; expected != actual { + t.Fatalf("Expected plugin %q, got %s\n", expected, actual) + } + + if plugin.Addr != "https://example.com/docker/plugin" { + t.Fatalf("Expected plugin addr `https://example.com/docker/plugin`, got %s\n", plugin.Addr) + } + + if plugin.TLSConfig != nil { + t.Fatalf("Expected plugin TLSConfig nil, got %v\n", plugin.TLSConfig) + } +} diff --git a/vendor/github.com/moby/moby/pkg/plugins/discovery_unix.go b/vendor/github.com/moby/moby/pkg/plugins/discovery_unix.go new file mode 100644 index 000000000..693a47e39 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/plugins/discovery_unix.go @@ -0,0 +1,5 @@ +// +build !windows + +package plugins + +var specsPaths = []string{"/etc/docker/plugins", "/usr/lib/docker/plugins"} diff --git a/vendor/github.com/moby/moby/pkg/plugins/discovery_unix_test.go b/vendor/github.com/moby/moby/pkg/plugins/discovery_unix_test.go new file mode 100644 index 000000000..66f50353c --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/plugins/discovery_unix_test.go @@ -0,0 +1,100 @@ +// +build !windows + +package plugins + +import ( + "fmt" + "io/ioutil" + "net" + "os" + "path/filepath" + "reflect" + "testing" +) + +func TestLocalSocket(t *testing.T) { + // TODO Windows: Enable a similar version for Windows named pipes + tmpdir, unregister := Setup(t) + defer unregister() + + cases := []string{ + filepath.Join(tmpdir, "echo.sock"), + filepath.Join(tmpdir, "echo", "echo.sock"), + } + + for _, c := range cases { + if err := os.MkdirAll(filepath.Dir(c), 0755); err != nil { + t.Fatal(err) + } + + l, err := net.Listen("unix", c) + if err != nil { + t.Fatal(err) + } + + r := newLocalRegistry() + p, err := r.Plugin("echo") + if err != nil { + t.Fatal(err) + } + + pp, err := r.Plugin("echo") + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(p, pp) { + t.Fatalf("Expected %v, was %v\n", p, pp) + } + + if p.name != "echo" { + t.Fatalf("Expected plugin `echo`, got %s\n", p.name) + } + + addr := fmt.Sprintf("unix://%s", c) + if p.Addr != addr { + t.Fatalf("Expected plugin addr `%s`, got %s\n", addr, p.Addr) + } + if !p.TLSConfig.InsecureSkipVerify { + t.Fatalf("Expected TLS verification to be skipped") + } + l.Close() + } +} + +func TestScan(t *testing.T) { + tmpdir, unregister := Setup(t) + defer unregister() + + pluginNames, err := Scan() + if err != nil { + t.Fatal(err) + } + if pluginNames != nil { + t.Fatal("Plugin names should be empty.") + } + + path := filepath.Join(tmpdir, "echo.spec") + addr := "unix://var/lib/docker/plugins/echo.sock" + name := "echo" + + err = os.MkdirAll(filepath.Dir(path), 0755) + if err != nil { + t.Fatal(err) + } + + err = ioutil.WriteFile(path, []byte(addr), 0644) + if err != nil { + t.Fatal(err) + } + + r := newLocalRegistry() + p, err := r.Plugin(name) + + pluginNamesNotEmpty, err := Scan() + if err != nil { + t.Fatal(err) + } + if p.Name() != pluginNamesNotEmpty[0] { + t.Fatalf("Unable to scan plugin with name %s", p.name) + } +} diff --git a/vendor/github.com/moby/moby/pkg/plugins/discovery_windows.go b/vendor/github.com/moby/moby/pkg/plugins/discovery_windows.go new file mode 100644 index 000000000..d7c1fe494 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/plugins/discovery_windows.go @@ -0,0 +1,8 @@ +package plugins + +import ( + "os" + "path/filepath" +) + +var specsPaths = []string{filepath.Join(os.Getenv("programdata"), "docker", "plugins")} diff --git a/vendor/github.com/moby/moby/pkg/plugins/errors.go b/vendor/github.com/moby/moby/pkg/plugins/errors.go new file mode 100644 index 000000000..798847102 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/plugins/errors.go @@ -0,0 +1,33 @@ +package plugins + +import ( + "fmt" + "net/http" +) + +type statusError struct { + status int + method string + err string +} + +// Error returns a formatted string for this error type +func (e *statusError) Error() string { + return fmt.Sprintf("%s: %v", e.method, e.err) +} + +// IsNotFound indicates if the passed in error is from an http.StatusNotFound from the plugin +func IsNotFound(err error) bool { + return isStatusError(err, http.StatusNotFound) +} + +func isStatusError(err error, status int) bool { + if err == nil { + return false + } + e, ok := err.(*statusError) + if !ok { + return false + } + return e.status == status +} diff --git a/vendor/github.com/moby/moby/pkg/plugins/plugin_test.go b/vendor/github.com/moby/moby/pkg/plugins/plugin_test.go new file mode 100644 index 000000000..00fcb85f5 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/plugins/plugin_test.go @@ -0,0 +1,156 @@ +package plugins + +import ( + "bytes" + "encoding/json" + "errors" + "io" + "io/ioutil" + "net/http" + "path/filepath" + "runtime" + "sync" + "testing" + "time" + + "github.com/docker/docker/pkg/plugins/transport" + "github.com/docker/go-connections/tlsconfig" + "github.com/stretchr/testify/assert" +) + +const ( + fruitPlugin = "fruit" + fruitImplements = "apple" +) + +// regression test for deadlock in handlers +func TestPluginAddHandler(t *testing.T) { + // make a plugin which is pre-activated + p := &Plugin{activateWait: sync.NewCond(&sync.Mutex{})} + p.Manifest = &Manifest{Implements: []string{"bananas"}} + storage.plugins["qwerty"] = p + + testActive(t, p) + Handle("bananas", func(_ string, _ *Client) {}) + testActive(t, p) +} + +func TestPluginWaitBadPlugin(t *testing.T) { + p := &Plugin{activateWait: sync.NewCond(&sync.Mutex{})} + p.activateErr = errors.New("some junk happened") + testActive(t, p) +} + +func testActive(t *testing.T, p *Plugin) { + done := make(chan struct{}) + go func() { + p.waitActive() + close(done) + }() + + select { + case <-time.After(100 * time.Millisecond): + _, f, l, _ := runtime.Caller(1) + t.Fatalf("%s:%d: deadlock in waitActive", filepath.Base(f), l) + case <-done: + } + +} + +func TestGet(t *testing.T) { + p := &Plugin{name: fruitPlugin, activateWait: sync.NewCond(&sync.Mutex{})} + p.Manifest = &Manifest{Implements: []string{fruitImplements}} + storage.plugins[fruitPlugin] = p + + plugin, err := Get(fruitPlugin, fruitImplements) + if err != nil { + t.Fatal(err) + } + if p.Name() != plugin.Name() { + t.Fatalf("No matching plugin with name %s found", plugin.Name()) + } + if plugin.Client() != nil { + t.Fatal("expected nil Client but found one") + } + if !plugin.IsV1() { + t.Fatal("Expected true for V1 plugin") + } + + // check negative case where plugin fruit doesn't implement banana + _, err = Get("fruit", "banana") + assert.Equal(t, err, ErrNotImplements) + + // check negative case where plugin vegetable doesn't exist + _, err = Get("vegetable", "potato") + assert.Equal(t, err, ErrNotFound) + +} + +func TestPluginWithNoManifest(t *testing.T) { + addr := setupRemotePluginServer() + defer teardownRemotePluginServer() + + m := Manifest{[]string{fruitImplements}} + var buf bytes.Buffer + if err := json.NewEncoder(&buf).Encode(m); err != nil { + t.Fatal(err) + } + + mux.HandleFunc("/Plugin.Activate", func(w http.ResponseWriter, r *http.Request) { + if r.Method != "POST" { + t.Fatalf("Expected POST, got %s\n", r.Method) + } + + header := w.Header() + header.Set("Content-Type", transport.VersionMimetype) + + io.Copy(w, &buf) + }) + + p := &Plugin{ + name: fruitPlugin, + activateWait: sync.NewCond(&sync.Mutex{}), + Addr: addr, + TLSConfig: &tlsconfig.Options{InsecureSkipVerify: true}, + } + storage.plugins[fruitPlugin] = p + + plugin, err := Get(fruitPlugin, fruitImplements) + if err != nil { + t.Fatal(err) + } + if p.Name() != plugin.Name() { + t.Fatalf("No matching plugin with name %s found", plugin.Name()) + } +} + +func TestGetAll(t *testing.T) { + tmpdir, unregister := Setup(t) + defer unregister() + + p := filepath.Join(tmpdir, "example.json") + spec := `{ + "Name": "example", + "Addr": "https://example.com/docker/plugin" +}` + + if err := ioutil.WriteFile(p, []byte(spec), 0644); err != nil { + t.Fatal(err) + } + + r := newLocalRegistry() + plugin, err := r.Plugin("example") + if err != nil { + t.Fatal(err) + } + plugin.Manifest = &Manifest{Implements: []string{"apple"}} + storage.plugins["example"] = plugin + + fetchedPlugins, err := GetAll("apple") + if err != nil { + t.Fatal(err) + } + if fetchedPlugins[0].Name() != plugin.Name() { + t.Fatalf("Expected to get plugin with name %s", plugin.Name()) + } +} diff --git a/vendor/github.com/moby/moby/pkg/plugins/pluginrpc-gen/README.md b/vendor/github.com/moby/moby/pkg/plugins/pluginrpc-gen/README.md new file mode 100644 index 000000000..5f6a421f1 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/plugins/pluginrpc-gen/README.md @@ -0,0 +1,58 @@ +Plugin RPC Generator +==================== + +Generates go code from a Go interface definition for proxying between the plugin +API and the subsystem being extended. + +## Usage + +Given an interface definition: + +```go +type volumeDriver interface { + Create(name string, opts opts) (err error) + Remove(name string) (err error) + Path(name string) (mountpoint string, err error) + Mount(name string) (mountpoint string, err error) + Unmount(name string) (err error) +} +``` + +**Note**: All function options and return values must be named in the definition. + +Run the generator: + +```bash +$ pluginrpc-gen --type volumeDriver --name VolumeDriver -i volumes/drivers/extpoint.go -o volumes/drivers/proxy.go +``` + +Where: +- `--type` is the name of the interface to use +- `--name` is the subsystem that the plugin "Implements" +- `-i` is the input file containing the interface definition +- `-o` is the output file where the generated code should go + +**Note**: The generated code will use the same package name as the one defined in the input file + +Optionally, you can skip functions on the interface that should not be +implemented in the generated proxy code by passing in the function name to `--skip`. +This flag can be specified multiple times. + +You can also add build tags that should be prepended to the generated code by +supplying `--tag`. This flag can be specified multiple times. + +## Known issues + +## go-generate + +You can also use this with go-generate, which is pretty awesome. +To do so, place the code at the top of the file which contains the interface +definition (i.e., the input file): + +```go +//go:generate pluginrpc-gen -i $GOFILE -o proxy.go -type volumeDriver -name VolumeDriver +``` + +Then cd to the package dir and run `go generate` + +**Note**: the `pluginrpc-gen` binary must be within your `$PATH` diff --git a/vendor/github.com/moby/moby/pkg/plugins/pluginrpc-gen/fixtures/foo.go b/vendor/github.com/moby/moby/pkg/plugins/pluginrpc-gen/fixtures/foo.go new file mode 100644 index 000000000..5695dcc2d --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/plugins/pluginrpc-gen/fixtures/foo.go @@ -0,0 +1,89 @@ +package foo + +import ( + "fmt" + + aliasedio "io" + + "github.com/docker/docker/pkg/plugins/pluginrpc-gen/fixtures/otherfixture" +) + +var ( + errFakeImport = fmt.Errorf("just to import fmt for imports tests") +) + +type wobble struct { + Some string + Val string + Inception *wobble +} + +// Fooer is an empty interface used for tests. +type Fooer interface{} + +// Fooer2 is an interface used for tests. +type Fooer2 interface { + Foo() +} + +// Fooer3 is an interface used for tests. +type Fooer3 interface { + Foo() + Bar(a string) + Baz(a string) (err error) + Qux(a, b string) (val string, err error) + Wobble() (w *wobble) + Wiggle() (w wobble) + WiggleWobble(a []*wobble, b []wobble, c map[string]*wobble, d map[*wobble]wobble, e map[string][]wobble, f []*otherfixture.Spaceship) (g map[*wobble]wobble, h [][]*wobble, i otherfixture.Spaceship, j *otherfixture.Spaceship, k map[*otherfixture.Spaceship]otherfixture.Spaceship, l []otherfixture.Spaceship) +} + +// Fooer4 is an interface used for tests. +type Fooer4 interface { + Foo() error +} + +// Bar is an interface used for tests. +type Bar interface { + Boo(a string, b string) (s string, err error) +} + +// Fooer5 is an interface used for tests. +type Fooer5 interface { + Foo() + Bar +} + +// Fooer6 is an interface used for tests. +type Fooer6 interface { + Foo(a otherfixture.Spaceship) +} + +// Fooer7 is an interface used for tests. +type Fooer7 interface { + Foo(a *otherfixture.Spaceship) +} + +// Fooer8 is an interface used for tests. +type Fooer8 interface { + Foo(a map[string]otherfixture.Spaceship) +} + +// Fooer9 is an interface used for tests. +type Fooer9 interface { + Foo(a map[string]*otherfixture.Spaceship) +} + +// Fooer10 is an interface used for tests. +type Fooer10 interface { + Foo(a []otherfixture.Spaceship) +} + +// Fooer11 is an interface used for tests. +type Fooer11 interface { + Foo(a []*otherfixture.Spaceship) +} + +// Fooer12 is an interface used for tests. +type Fooer12 interface { + Foo(a aliasedio.Reader) +} diff --git a/vendor/github.com/moby/moby/pkg/plugins/pluginrpc-gen/fixtures/otherfixture/spaceship.go b/vendor/github.com/moby/moby/pkg/plugins/pluginrpc-gen/fixtures/otherfixture/spaceship.go new file mode 100644 index 000000000..1937d1786 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/plugins/pluginrpc-gen/fixtures/otherfixture/spaceship.go @@ -0,0 +1,4 @@ +package otherfixture + +// Spaceship is a fixture for tests +type Spaceship struct{} diff --git a/vendor/github.com/moby/moby/pkg/plugins/pluginrpc-gen/main.go b/vendor/github.com/moby/moby/pkg/plugins/pluginrpc-gen/main.go new file mode 100644 index 000000000..e77a7d45f --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/plugins/pluginrpc-gen/main.go @@ -0,0 +1,91 @@ +package main + +import ( + "bytes" + "flag" + "fmt" + "go/format" + "io/ioutil" + "os" + "unicode" + "unicode/utf8" +) + +type stringSet struct { + values map[string]struct{} +} + +func (s stringSet) String() string { + return "" +} + +func (s stringSet) Set(value string) error { + s.values[value] = struct{}{} + return nil +} +func (s stringSet) GetValues() map[string]struct{} { + return s.values +} + +var ( + typeName = flag.String("type", "", "interface type to generate plugin rpc proxy for") + rpcName = flag.String("name", *typeName, "RPC name, set if different from type") + inputFile = flag.String("i", "", "input file path") + outputFile = flag.String("o", *inputFile+"_proxy.go", "output file path") + + skipFuncs map[string]struct{} + flSkipFuncs = stringSet{make(map[string]struct{})} + + flBuildTags = stringSet{make(map[string]struct{})} +) + +func errorOut(msg string, err error) { + if err == nil { + return + } + fmt.Fprintf(os.Stderr, "%s: %v\n", msg, err) + os.Exit(1) +} + +func checkFlags() error { + if *outputFile == "" { + return fmt.Errorf("missing required flag `-o`") + } + if *inputFile == "" { + return fmt.Errorf("missing required flag `-i`") + } + return nil +} + +func main() { + flag.Var(flSkipFuncs, "skip", "skip parsing for function") + flag.Var(flBuildTags, "tag", "build tags to add to generated files") + flag.Parse() + skipFuncs = flSkipFuncs.GetValues() + + errorOut("error", checkFlags()) + + pkg, err := Parse(*inputFile, *typeName) + errorOut(fmt.Sprintf("error parsing requested type %s", *typeName), err) + + var analysis = struct { + InterfaceType string + RPCName string + BuildTags map[string]struct{} + *ParsedPkg + }{toLower(*typeName), *rpcName, flBuildTags.GetValues(), pkg} + var buf bytes.Buffer + + errorOut("parser error", generatedTempl.Execute(&buf, analysis)) + src, err := format.Source(buf.Bytes()) + errorOut("error formatting generated source:\n"+buf.String(), err) + errorOut("error writing file", ioutil.WriteFile(*outputFile, src, 0644)) +} + +func toLower(s string) string { + if s == "" { + return "" + } + r, n := utf8.DecodeRuneInString(s) + return string(unicode.ToLower(r)) + s[n:] +} diff --git a/vendor/github.com/moby/moby/pkg/plugins/pluginrpc-gen/parser.go b/vendor/github.com/moby/moby/pkg/plugins/pluginrpc-gen/parser.go new file mode 100644 index 000000000..6c547e18c --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/plugins/pluginrpc-gen/parser.go @@ -0,0 +1,263 @@ +package main + +import ( + "errors" + "fmt" + "go/ast" + "go/parser" + "go/token" + "path" + "reflect" + "strings" +) + +var errBadReturn = errors.New("found return arg with no name: all args must be named") + +type errUnexpectedType struct { + expected string + actual interface{} +} + +func (e errUnexpectedType) Error() string { + return fmt.Sprintf("got wrong type expecting %s, got: %v", e.expected, reflect.TypeOf(e.actual)) +} + +// ParsedPkg holds information about a package that has been parsed, +// its name and the list of functions. +type ParsedPkg struct { + Name string + Functions []function + Imports []importSpec +} + +type function struct { + Name string + Args []arg + Returns []arg + Doc string +} + +type arg struct { + Name string + ArgType string + PackageSelector string +} + +func (a *arg) String() string { + return a.Name + " " + a.ArgType +} + +type importSpec struct { + Name string + Path string +} + +func (s *importSpec) String() string { + var ss string + if len(s.Name) != 0 { + ss += s.Name + } + ss += s.Path + return ss +} + +// Parse parses the given file for an interface definition with the given name. +func Parse(filePath string, objName string) (*ParsedPkg, error) { + fs := token.NewFileSet() + pkg, err := parser.ParseFile(fs, filePath, nil, parser.AllErrors) + if err != nil { + return nil, err + } + p := &ParsedPkg{} + p.Name = pkg.Name.Name + obj, exists := pkg.Scope.Objects[objName] + if !exists { + return nil, fmt.Errorf("could not find object %s in %s", objName, filePath) + } + if obj.Kind != ast.Typ { + return nil, fmt.Errorf("exected type, got %s", obj.Kind) + } + spec, ok := obj.Decl.(*ast.TypeSpec) + if !ok { + return nil, errUnexpectedType{"*ast.TypeSpec", obj.Decl} + } + iface, ok := spec.Type.(*ast.InterfaceType) + if !ok { + return nil, errUnexpectedType{"*ast.InterfaceType", spec.Type} + } + + p.Functions, err = parseInterface(iface) + if err != nil { + return nil, err + } + + // figure out what imports will be needed + imports := make(map[string]importSpec) + for _, f := range p.Functions { + args := append(f.Args, f.Returns...) + for _, arg := range args { + if len(arg.PackageSelector) == 0 { + continue + } + + for _, i := range pkg.Imports { + if i.Name != nil { + if i.Name.Name != arg.PackageSelector { + continue + } + imports[i.Path.Value] = importSpec{Name: arg.PackageSelector, Path: i.Path.Value} + break + } + + _, name := path.Split(i.Path.Value) + splitName := strings.Split(name, "-") + if len(splitName) > 1 { + name = splitName[len(splitName)-1] + } + // import paths have quotes already added in, so need to remove them for name comparison + name = strings.TrimPrefix(name, `"`) + name = strings.TrimSuffix(name, `"`) + if name == arg.PackageSelector { + imports[i.Path.Value] = importSpec{Path: i.Path.Value} + break + } + } + } + } + + for _, spec := range imports { + p.Imports = append(p.Imports, spec) + } + + return p, nil +} + +func parseInterface(iface *ast.InterfaceType) ([]function, error) { + var functions []function + for _, field := range iface.Methods.List { + switch f := field.Type.(type) { + case *ast.FuncType: + method, err := parseFunc(field) + if err != nil { + return nil, err + } + if method == nil { + continue + } + functions = append(functions, *method) + case *ast.Ident: + spec, ok := f.Obj.Decl.(*ast.TypeSpec) + if !ok { + return nil, errUnexpectedType{"*ast.TypeSpec", f.Obj.Decl} + } + iface, ok := spec.Type.(*ast.InterfaceType) + if !ok { + return nil, errUnexpectedType{"*ast.TypeSpec", spec.Type} + } + funcs, err := parseInterface(iface) + if err != nil { + fmt.Println(err) + continue + } + functions = append(functions, funcs...) + default: + return nil, errUnexpectedType{"*astFuncType or *ast.Ident", f} + } + } + return functions, nil +} + +func parseFunc(field *ast.Field) (*function, error) { + f := field.Type.(*ast.FuncType) + method := &function{Name: field.Names[0].Name} + if _, exists := skipFuncs[method.Name]; exists { + fmt.Println("skipping:", method.Name) + return nil, nil + } + if f.Params != nil { + args, err := parseArgs(f.Params.List) + if err != nil { + return nil, err + } + method.Args = args + } + if f.Results != nil { + returns, err := parseArgs(f.Results.List) + if err != nil { + return nil, fmt.Errorf("error parsing function returns for %q: %v", method.Name, err) + } + method.Returns = returns + } + return method, nil +} + +func parseArgs(fields []*ast.Field) ([]arg, error) { + var args []arg + for _, f := range fields { + if len(f.Names) == 0 { + return nil, errBadReturn + } + for _, name := range f.Names { + p, err := parseExpr(f.Type) + if err != nil { + return nil, err + } + args = append(args, arg{name.Name, p.value, p.pkg}) + } + } + return args, nil +} + +type parsedExpr struct { + value string + pkg string +} + +func parseExpr(e ast.Expr) (parsedExpr, error) { + var parsed parsedExpr + switch i := e.(type) { + case *ast.Ident: + parsed.value += i.Name + case *ast.StarExpr: + p, err := parseExpr(i.X) + if err != nil { + return parsed, err + } + parsed.value += "*" + parsed.value += p.value + parsed.pkg = p.pkg + case *ast.SelectorExpr: + p, err := parseExpr(i.X) + if err != nil { + return parsed, err + } + parsed.pkg = p.value + parsed.value += p.value + "." + parsed.value += i.Sel.Name + case *ast.MapType: + parsed.value += "map[" + p, err := parseExpr(i.Key) + if err != nil { + return parsed, err + } + parsed.value += p.value + parsed.value += "]" + p, err = parseExpr(i.Value) + if err != nil { + return parsed, err + } + parsed.value += p.value + parsed.pkg = p.pkg + case *ast.ArrayType: + parsed.value += "[]" + p, err := parseExpr(i.Elt) + if err != nil { + return parsed, err + } + parsed.value += p.value + parsed.pkg = p.pkg + default: + return parsed, errUnexpectedType{"*ast.Ident or *ast.StarExpr", i} + } + return parsed, nil +} diff --git a/vendor/github.com/moby/moby/pkg/plugins/pluginrpc-gen/parser_test.go b/vendor/github.com/moby/moby/pkg/plugins/pluginrpc-gen/parser_test.go new file mode 100644 index 000000000..fe7fa5ade --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/plugins/pluginrpc-gen/parser_test.go @@ -0,0 +1,222 @@ +package main + +import ( + "fmt" + "path/filepath" + "runtime" + "strings" + "testing" +) + +const testFixture = "fixtures/foo.go" + +func TestParseEmptyInterface(t *testing.T) { + pkg, err := Parse(testFixture, "Fooer") + if err != nil { + t.Fatal(err) + } + + assertName(t, "foo", pkg.Name) + assertNum(t, 0, len(pkg.Functions)) +} + +func TestParseNonInterfaceType(t *testing.T) { + _, err := Parse(testFixture, "wobble") + if _, ok := err.(errUnexpectedType); !ok { + t.Fatal("expected type error when parsing non-interface type") + } +} + +func TestParseWithOneFunction(t *testing.T) { + pkg, err := Parse(testFixture, "Fooer2") + if err != nil { + t.Fatal(err) + } + + assertName(t, "foo", pkg.Name) + assertNum(t, 1, len(pkg.Functions)) + assertName(t, "Foo", pkg.Functions[0].Name) + assertNum(t, 0, len(pkg.Functions[0].Args)) + assertNum(t, 0, len(pkg.Functions[0].Returns)) +} + +func TestParseWithMultipleFuncs(t *testing.T) { + pkg, err := Parse(testFixture, "Fooer3") + if err != nil { + t.Fatal(err) + } + + assertName(t, "foo", pkg.Name) + assertNum(t, 7, len(pkg.Functions)) + + f := pkg.Functions[0] + assertName(t, "Foo", f.Name) + assertNum(t, 0, len(f.Args)) + assertNum(t, 0, len(f.Returns)) + + f = pkg.Functions[1] + assertName(t, "Bar", f.Name) + assertNum(t, 1, len(f.Args)) + assertNum(t, 0, len(f.Returns)) + arg := f.Args[0] + assertName(t, "a", arg.Name) + assertName(t, "string", arg.ArgType) + + f = pkg.Functions[2] + assertName(t, "Baz", f.Name) + assertNum(t, 1, len(f.Args)) + assertNum(t, 1, len(f.Returns)) + arg = f.Args[0] + assertName(t, "a", arg.Name) + assertName(t, "string", arg.ArgType) + arg = f.Returns[0] + assertName(t, "err", arg.Name) + assertName(t, "error", arg.ArgType) + + f = pkg.Functions[3] + assertName(t, "Qux", f.Name) + assertNum(t, 2, len(f.Args)) + assertNum(t, 2, len(f.Returns)) + arg = f.Args[0] + assertName(t, "a", f.Args[0].Name) + assertName(t, "string", f.Args[0].ArgType) + arg = f.Args[1] + assertName(t, "b", arg.Name) + assertName(t, "string", arg.ArgType) + arg = f.Returns[0] + assertName(t, "val", arg.Name) + assertName(t, "string", arg.ArgType) + arg = f.Returns[1] + assertName(t, "err", arg.Name) + assertName(t, "error", arg.ArgType) + + f = pkg.Functions[4] + assertName(t, "Wobble", f.Name) + assertNum(t, 0, len(f.Args)) + assertNum(t, 1, len(f.Returns)) + arg = f.Returns[0] + assertName(t, "w", arg.Name) + assertName(t, "*wobble", arg.ArgType) + + f = pkg.Functions[5] + assertName(t, "Wiggle", f.Name) + assertNum(t, 0, len(f.Args)) + assertNum(t, 1, len(f.Returns)) + arg = f.Returns[0] + assertName(t, "w", arg.Name) + assertName(t, "wobble", arg.ArgType) + + f = pkg.Functions[6] + assertName(t, "WiggleWobble", f.Name) + assertNum(t, 6, len(f.Args)) + assertNum(t, 6, len(f.Returns)) + expectedArgs := [][]string{ + {"a", "[]*wobble"}, + {"b", "[]wobble"}, + {"c", "map[string]*wobble"}, + {"d", "map[*wobble]wobble"}, + {"e", "map[string][]wobble"}, + {"f", "[]*otherfixture.Spaceship"}, + } + for i, arg := range f.Args { + assertName(t, expectedArgs[i][0], arg.Name) + assertName(t, expectedArgs[i][1], arg.ArgType) + } + expectedReturns := [][]string{ + {"g", "map[*wobble]wobble"}, + {"h", "[][]*wobble"}, + {"i", "otherfixture.Spaceship"}, + {"j", "*otherfixture.Spaceship"}, + {"k", "map[*otherfixture.Spaceship]otherfixture.Spaceship"}, + {"l", "[]otherfixture.Spaceship"}, + } + for i, ret := range f.Returns { + assertName(t, expectedReturns[i][0], ret.Name) + assertName(t, expectedReturns[i][1], ret.ArgType) + } +} + +func TestParseWithUnnamedReturn(t *testing.T) { + _, err := Parse(testFixture, "Fooer4") + if !strings.HasSuffix(err.Error(), errBadReturn.Error()) { + t.Fatalf("expected ErrBadReturn, got %v", err) + } +} + +func TestEmbeddedInterface(t *testing.T) { + pkg, err := Parse(testFixture, "Fooer5") + if err != nil { + t.Fatal(err) + } + + assertName(t, "foo", pkg.Name) + assertNum(t, 2, len(pkg.Functions)) + + f := pkg.Functions[0] + assertName(t, "Foo", f.Name) + assertNum(t, 0, len(f.Args)) + assertNum(t, 0, len(f.Returns)) + + f = pkg.Functions[1] + assertName(t, "Boo", f.Name) + assertNum(t, 2, len(f.Args)) + assertNum(t, 2, len(f.Returns)) + + arg := f.Args[0] + assertName(t, "a", arg.Name) + assertName(t, "string", arg.ArgType) + + arg = f.Args[1] + assertName(t, "b", arg.Name) + assertName(t, "string", arg.ArgType) + + arg = f.Returns[0] + assertName(t, "s", arg.Name) + assertName(t, "string", arg.ArgType) + + arg = f.Returns[1] + assertName(t, "err", arg.Name) + assertName(t, "error", arg.ArgType) +} + +func TestParsedImports(t *testing.T) { + cases := []string{"Fooer6", "Fooer7", "Fooer8", "Fooer9", "Fooer10", "Fooer11"} + for _, testCase := range cases { + pkg, err := Parse(testFixture, testCase) + if err != nil { + t.Fatal(err) + } + + assertNum(t, 1, len(pkg.Imports)) + importPath := strings.Split(pkg.Imports[0].Path, "/") + assertName(t, "otherfixture\"", importPath[len(importPath)-1]) + assertName(t, "", pkg.Imports[0].Name) + } +} + +func TestAliasedImports(t *testing.T) { + pkg, err := Parse(testFixture, "Fooer12") + if err != nil { + t.Fatal(err) + } + + assertNum(t, 1, len(pkg.Imports)) + assertName(t, "aliasedio", pkg.Imports[0].Name) +} + +func assertName(t *testing.T, expected, actual string) { + if expected != actual { + fatalOut(t, fmt.Sprintf("expected name to be `%s`, got: %s", expected, actual)) + } +} + +func assertNum(t *testing.T, expected, actual int) { + if expected != actual { + fatalOut(t, fmt.Sprintf("expected number to be %d, got: %d", expected, actual)) + } +} + +func fatalOut(t *testing.T, msg string) { + _, file, ln, _ := runtime.Caller(2) + t.Fatalf("%s:%d: %s", filepath.Base(file), ln, msg) +} diff --git a/vendor/github.com/moby/moby/pkg/plugins/pluginrpc-gen/template.go b/vendor/github.com/moby/moby/pkg/plugins/pluginrpc-gen/template.go new file mode 100644 index 000000000..50ed9293c --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/plugins/pluginrpc-gen/template.go @@ -0,0 +1,118 @@ +package main + +import ( + "strings" + "text/template" +) + +func printArgs(args []arg) string { + var argStr []string + for _, arg := range args { + argStr = append(argStr, arg.String()) + } + return strings.Join(argStr, ", ") +} + +func buildImports(specs []importSpec) string { + if len(specs) == 0 { + return `import "errors"` + } + imports := "import(\n" + imports += "\t\"errors\"\n" + for _, i := range specs { + imports += "\t" + i.String() + "\n" + } + imports += ")" + return imports +} + +func marshalType(t string) string { + switch t { + case "error": + // convert error types to plain strings to ensure the values are encoded/decoded properly + return "string" + default: + return t + } +} + +func isErr(t string) bool { + switch t { + case "error": + return true + default: + return false + } +} + +// Need to use this helper due to issues with go-vet +func buildTag(s string) string { + return "+build " + s +} + +var templFuncs = template.FuncMap{ + "printArgs": printArgs, + "marshalType": marshalType, + "isErr": isErr, + "lower": strings.ToLower, + "title": title, + "tag": buildTag, + "imports": buildImports, +} + +func title(s string) string { + if strings.ToLower(s) == "id" { + return "ID" + } + return strings.Title(s) +} + +var generatedTempl = template.Must(template.New("rpc_cient").Funcs(templFuncs).Parse(` +// generated code - DO NOT EDIT +{{ range $k, $v := .BuildTags }} + // {{ tag $k }} {{ end }} + +package {{ .Name }} + +{{ imports .Imports }} + +type client interface{ + Call(string, interface{}, interface{}) error +} + +type {{ .InterfaceType }}Proxy struct { + client +} + +{{ range .Functions }} + type {{ $.InterfaceType }}Proxy{{ .Name }}Request struct{ + {{ range .Args }} + {{ title .Name }} {{ .ArgType }} {{ end }} + } + + type {{ $.InterfaceType }}Proxy{{ .Name }}Response struct{ + {{ range .Returns }} + {{ title .Name }} {{ marshalType .ArgType }} {{ end }} + } + + func (pp *{{ $.InterfaceType }}Proxy) {{ .Name }}({{ printArgs .Args }}) ({{ printArgs .Returns }}) { + var( + req {{ $.InterfaceType }}Proxy{{ .Name }}Request + ret {{ $.InterfaceType }}Proxy{{ .Name }}Response + ) + {{ range .Args }} + req.{{ title .Name }} = {{ lower .Name }} {{ end }} + if err = pp.Call("{{ $.RPCName }}.{{ .Name }}", req, &ret); err != nil { + return + } + {{ range $r := .Returns }} + {{ if isErr .ArgType }} + if ret.{{ title .Name }} != "" { + {{ lower .Name }} = errors.New(ret.{{ title .Name }}) + } {{ end }} + {{ if isErr .ArgType | not }} {{ lower .Name }} = ret.{{ title .Name }} {{ end }} {{ end }} + + return + } +{{ end }} +`)) diff --git a/vendor/github.com/moby/moby/pkg/plugins/plugins.go b/vendor/github.com/moby/moby/pkg/plugins/plugins.go new file mode 100644 index 000000000..c0059cba7 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/plugins/plugins.go @@ -0,0 +1,329 @@ +// Package plugins provides structures and helper functions to manage Docker +// plugins. +// +// Docker discovers plugins by looking for them in the plugin directory whenever +// a user or container tries to use one by name. UNIX domain socket files must +// be located under /run/docker/plugins, whereas spec files can be located +// either under /etc/docker/plugins or /usr/lib/docker/plugins. This is handled +// by the Registry interface, which lets you list all plugins or get a plugin by +// its name if it exists. +// +// The plugins need to implement an HTTP server and bind this to the UNIX socket +// or the address specified in the spec files. +// A handshake is send at /Plugin.Activate, and plugins are expected to return +// a Manifest with a list of of Docker subsystems which this plugin implements. +// +// In order to use a plugins, you can use the ``Get`` with the name of the +// plugin and the subsystem it implements. +// +// plugin, err := plugins.Get("example", "VolumeDriver") +// if err != nil { +// return fmt.Errorf("Error looking up volume plugin example: %v", err) +// } +package plugins + +import ( + "errors" + "sync" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/go-connections/tlsconfig" +) + +var ( + // ErrNotImplements is returned if the plugin does not implement the requested driver. + ErrNotImplements = errors.New("Plugin does not implement the requested driver") +) + +type plugins struct { + sync.Mutex + plugins map[string]*Plugin +} + +type extpointHandlers struct { + sync.RWMutex + extpointHandlers map[string][]func(string, *Client) +} + +var ( + storage = plugins{plugins: make(map[string]*Plugin)} + handlers = extpointHandlers{extpointHandlers: make(map[string][]func(string, *Client))} +) + +// Manifest lists what a plugin implements. +type Manifest struct { + // List of subsystem the plugin implements. + Implements []string +} + +// Plugin is the definition of a docker plugin. +type Plugin struct { + // Name of the plugin + name string + // Address of the plugin + Addr string + // TLS configuration of the plugin + TLSConfig *tlsconfig.Options + // Client attached to the plugin + client *Client + // Manifest of the plugin (see above) + Manifest *Manifest `json:"-"` + + // wait for activation to finish + activateWait *sync.Cond + // error produced by activation + activateErr error + // keeps track of callback handlers run against this plugin + handlersRun bool +} + +// Name returns the name of the plugin. +func (p *Plugin) Name() string { + return p.name +} + +// Client returns a ready-to-use plugin client that can be used to communicate with the plugin. +func (p *Plugin) Client() *Client { + return p.client +} + +// IsV1 returns true for V1 plugins and false otherwise. +func (p *Plugin) IsV1() bool { + return true +} + +// NewLocalPlugin creates a new local plugin. +func NewLocalPlugin(name, addr string) *Plugin { + return &Plugin{ + name: name, + Addr: addr, + // TODO: change to nil + TLSConfig: &tlsconfig.Options{InsecureSkipVerify: true}, + activateWait: sync.NewCond(&sync.Mutex{}), + } +} + +func (p *Plugin) activate() error { + p.activateWait.L.Lock() + + if p.activated() { + p.runHandlers() + p.activateWait.L.Unlock() + return p.activateErr + } + + p.activateErr = p.activateWithLock() + + p.runHandlers() + p.activateWait.L.Unlock() + p.activateWait.Broadcast() + return p.activateErr +} + +// runHandlers runs the registered handlers for the implemented plugin types +// This should only be run after activation, and while the activation lock is held. +func (p *Plugin) runHandlers() { + if !p.activated() { + return + } + + handlers.RLock() + if !p.handlersRun { + for _, iface := range p.Manifest.Implements { + hdlrs, handled := handlers.extpointHandlers[iface] + if !handled { + continue + } + for _, handler := range hdlrs { + handler(p.name, p.client) + } + } + p.handlersRun = true + } + handlers.RUnlock() + +} + +// activated returns if the plugin has already been activated. +// This should only be called with the activation lock held +func (p *Plugin) activated() bool { + return p.Manifest != nil +} + +func (p *Plugin) activateWithLock() error { + c, err := NewClient(p.Addr, p.TLSConfig) + if err != nil { + return err + } + p.client = c + + m := new(Manifest) + if err = p.client.Call("Plugin.Activate", nil, m); err != nil { + return err + } + + p.Manifest = m + return nil +} + +func (p *Plugin) waitActive() error { + p.activateWait.L.Lock() + for !p.activated() && p.activateErr == nil { + p.activateWait.Wait() + } + p.activateWait.L.Unlock() + return p.activateErr +} + +func (p *Plugin) implements(kind string) bool { + if p.Manifest == nil { + return false + } + for _, driver := range p.Manifest.Implements { + if driver == kind { + return true + } + } + return false +} + +func load(name string) (*Plugin, error) { + return loadWithRetry(name, true) +} + +func loadWithRetry(name string, retry bool) (*Plugin, error) { + registry := newLocalRegistry() + start := time.Now() + + var retries int + for { + pl, err := registry.Plugin(name) + if err != nil { + if !retry { + return nil, err + } + + timeOff := backoff(retries) + if abort(start, timeOff) { + return nil, err + } + retries++ + logrus.Warnf("Unable to locate plugin: %s, retrying in %v", name, timeOff) + time.Sleep(timeOff) + continue + } + + storage.Lock() + if pl, exists := storage.plugins[name]; exists { + storage.Unlock() + return pl, pl.activate() + } + storage.plugins[name] = pl + storage.Unlock() + + err = pl.activate() + + if err != nil { + storage.Lock() + delete(storage.plugins, name) + storage.Unlock() + } + + return pl, err + } +} + +func get(name string) (*Plugin, error) { + storage.Lock() + pl, ok := storage.plugins[name] + storage.Unlock() + if ok { + return pl, pl.activate() + } + return load(name) +} + +// Get returns the plugin given the specified name and requested implementation. +func Get(name, imp string) (*Plugin, error) { + pl, err := get(name) + if err != nil { + return nil, err + } + if err := pl.waitActive(); err == nil && pl.implements(imp) { + logrus.Debugf("%s implements: %s", name, imp) + return pl, nil + } + return nil, ErrNotImplements +} + +// Handle adds the specified function to the extpointHandlers. +func Handle(iface string, fn func(string, *Client)) { + handlers.Lock() + hdlrs, ok := handlers.extpointHandlers[iface] + if !ok { + hdlrs = []func(string, *Client){} + } + + hdlrs = append(hdlrs, fn) + handlers.extpointHandlers[iface] = hdlrs + + storage.Lock() + for _, p := range storage.plugins { + p.activateWait.L.Lock() + if p.activated() && p.implements(iface) { + p.handlersRun = false + } + p.activateWait.L.Unlock() + } + storage.Unlock() + + handlers.Unlock() +} + +// GetAll returns all the plugins for the specified implementation +func GetAll(imp string) ([]*Plugin, error) { + pluginNames, err := Scan() + if err != nil { + return nil, err + } + + type plLoad struct { + pl *Plugin + err error + } + + chPl := make(chan *plLoad, len(pluginNames)) + var wg sync.WaitGroup + for _, name := range pluginNames { + storage.Lock() + pl, ok := storage.plugins[name] + storage.Unlock() + if ok { + chPl <- &plLoad{pl, nil} + continue + } + + wg.Add(1) + go func(name string) { + defer wg.Done() + pl, err := loadWithRetry(name, false) + chPl <- &plLoad{pl, err} + }(name) + } + + wg.Wait() + close(chPl) + + var out []*Plugin + for pl := range chPl { + if pl.err != nil { + logrus.Error(pl.err) + continue + } + if err := pl.pl.waitActive(); err == nil && pl.pl.implements(imp) { + out = append(out, pl.pl) + } + } + return out, nil +} diff --git a/vendor/github.com/moby/moby/pkg/plugins/plugins_unix.go b/vendor/github.com/moby/moby/pkg/plugins/plugins_unix.go new file mode 100644 index 000000000..02f1da69a --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/plugins/plugins_unix.go @@ -0,0 +1,9 @@ +// +build !windows + +package plugins + +// BasePath returns the path to which all paths returned by the plugin are relative to. +// For v1 plugins, this always returns the host's root directory. +func (p *Plugin) BasePath() string { + return "/" +} diff --git a/vendor/github.com/moby/moby/pkg/plugins/plugins_windows.go b/vendor/github.com/moby/moby/pkg/plugins/plugins_windows.go new file mode 100644 index 000000000..3c8d8feb8 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/plugins/plugins_windows.go @@ -0,0 +1,8 @@ +package plugins + +// BasePath returns the path to which all paths returned by the plugin are relative to. +// For Windows v1 plugins, this returns an empty string, since the plugin is already aware +// of the absolute path of the mount. +func (p *Plugin) BasePath() string { + return "" +} diff --git a/vendor/github.com/moby/moby/pkg/plugins/transport/http.go b/vendor/github.com/moby/moby/pkg/plugins/transport/http.go new file mode 100644 index 000000000..5be146af6 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/plugins/transport/http.go @@ -0,0 +1,36 @@ +package transport + +import ( + "io" + "net/http" +) + +// httpTransport holds an http.RoundTripper +// and information about the scheme and address the transport +// sends request to. +type httpTransport struct { + http.RoundTripper + scheme string + addr string +} + +// NewHTTPTransport creates a new httpTransport. +func NewHTTPTransport(r http.RoundTripper, scheme, addr string) Transport { + return httpTransport{ + RoundTripper: r, + scheme: scheme, + addr: addr, + } +} + +// NewRequest creates a new http.Request and sets the URL +// scheme and address with the transport's fields. +func (t httpTransport) NewRequest(path string, data io.Reader) (*http.Request, error) { + req, err := newHTTPRequest(path, data) + if err != nil { + return nil, err + } + req.URL.Scheme = t.scheme + req.URL.Host = t.addr + return req, nil +} diff --git a/vendor/github.com/moby/moby/pkg/plugins/transport/http_test.go b/vendor/github.com/moby/moby/pkg/plugins/transport/http_test.go new file mode 100644 index 000000000..b724fd0df --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/plugins/transport/http_test.go @@ -0,0 +1,20 @@ +package transport + +import ( + "io" + "net/http" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestHTTPTransport(t *testing.T) { + var r io.Reader + roundTripper := &http.Transport{} + newTransport := NewHTTPTransport(roundTripper, "http", "0.0.0.0") + request, err := newTransport.NewRequest("", r) + if err != nil { + t.Fatal(err) + } + assert.Equal(t, "POST", request.Method) +} diff --git a/vendor/github.com/moby/moby/pkg/plugins/transport/transport.go b/vendor/github.com/moby/moby/pkg/plugins/transport/transport.go new file mode 100644 index 000000000..d7f1e2100 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/plugins/transport/transport.go @@ -0,0 +1,36 @@ +package transport + +import ( + "io" + "net/http" + "strings" +) + +// VersionMimetype is the Content-Type the engine sends to plugins. +const VersionMimetype = "application/vnd.docker.plugins.v1.2+json" + +// RequestFactory defines an interface that +// transports can implement to create new requests. +type RequestFactory interface { + NewRequest(path string, data io.Reader) (*http.Request, error) +} + +// Transport defines an interface that plugin transports +// must implement. +type Transport interface { + http.RoundTripper + RequestFactory +} + +// newHTTPRequest creates a new request with a path and a body. +func newHTTPRequest(path string, data io.Reader) (*http.Request, error) { + if !strings.HasPrefix(path, "/") { + path = "/" + path + } + req, err := http.NewRequest("POST", path, data) + if err != nil { + return nil, err + } + req.Header.Add("Accept", VersionMimetype) + return req, nil +} diff --git a/vendor/github.com/moby/moby/pkg/pools/pools.go b/vendor/github.com/moby/moby/pkg/pools/pools.go new file mode 100644 index 000000000..6a111a3ba --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/pools/pools.go @@ -0,0 +1,137 @@ +// Package pools provides a collection of pools which provide various +// data types with buffers. These can be used to lower the number of +// memory allocations and reuse buffers. +// +// New pools should be added to this package to allow them to be +// shared across packages. +// +// Utility functions which operate on pools should be added to this +// package to allow them to be reused. +package pools + +import ( + "bufio" + "io" + "sync" + + "github.com/docker/docker/pkg/ioutils" +) + +const buffer32K = 32 * 1024 + +var ( + // BufioReader32KPool is a pool which returns bufio.Reader with a 32K buffer. + BufioReader32KPool = newBufioReaderPoolWithSize(buffer32K) + // BufioWriter32KPool is a pool which returns bufio.Writer with a 32K buffer. + BufioWriter32KPool = newBufioWriterPoolWithSize(buffer32K) + buffer32KPool = newBufferPoolWithSize(buffer32K) +) + +// BufioReaderPool is a bufio reader that uses sync.Pool. +type BufioReaderPool struct { + pool sync.Pool +} + +// newBufioReaderPoolWithSize is unexported because new pools should be +// added here to be shared where required. +func newBufioReaderPoolWithSize(size int) *BufioReaderPool { + return &BufioReaderPool{ + pool: sync.Pool{ + New: func() interface{} { return bufio.NewReaderSize(nil, size) }, + }, + } +} + +// Get returns a bufio.Reader which reads from r. The buffer size is that of the pool. +func (bufPool *BufioReaderPool) Get(r io.Reader) *bufio.Reader { + buf := bufPool.pool.Get().(*bufio.Reader) + buf.Reset(r) + return buf +} + +// Put puts the bufio.Reader back into the pool. +func (bufPool *BufioReaderPool) Put(b *bufio.Reader) { + b.Reset(nil) + bufPool.pool.Put(b) +} + +type bufferPool struct { + pool sync.Pool +} + +func newBufferPoolWithSize(size int) *bufferPool { + return &bufferPool{ + pool: sync.Pool{ + New: func() interface{} { return make([]byte, size) }, + }, + } +} + +func (bp *bufferPool) Get() []byte { + return bp.pool.Get().([]byte) +} + +func (bp *bufferPool) Put(b []byte) { + bp.pool.Put(b) +} + +// Copy is a convenience wrapper which uses a buffer to avoid allocation in io.Copy. +func Copy(dst io.Writer, src io.Reader) (written int64, err error) { + buf := buffer32KPool.Get() + written, err = io.CopyBuffer(dst, src, buf) + buffer32KPool.Put(buf) + return +} + +// NewReadCloserWrapper returns a wrapper which puts the bufio.Reader back +// into the pool and closes the reader if it's an io.ReadCloser. +func (bufPool *BufioReaderPool) NewReadCloserWrapper(buf *bufio.Reader, r io.Reader) io.ReadCloser { + return ioutils.NewReadCloserWrapper(r, func() error { + if readCloser, ok := r.(io.ReadCloser); ok { + readCloser.Close() + } + bufPool.Put(buf) + return nil + }) +} + +// BufioWriterPool is a bufio writer that uses sync.Pool. +type BufioWriterPool struct { + pool sync.Pool +} + +// newBufioWriterPoolWithSize is unexported because new pools should be +// added here to be shared where required. +func newBufioWriterPoolWithSize(size int) *BufioWriterPool { + return &BufioWriterPool{ + pool: sync.Pool{ + New: func() interface{} { return bufio.NewWriterSize(nil, size) }, + }, + } +} + +// Get returns a bufio.Writer which writes to w. The buffer size is that of the pool. +func (bufPool *BufioWriterPool) Get(w io.Writer) *bufio.Writer { + buf := bufPool.pool.Get().(*bufio.Writer) + buf.Reset(w) + return buf +} + +// Put puts the bufio.Writer back into the pool. +func (bufPool *BufioWriterPool) Put(b *bufio.Writer) { + b.Reset(nil) + bufPool.pool.Put(b) +} + +// NewWriteCloserWrapper returns a wrapper which puts the bufio.Writer back +// into the pool and closes the writer if it's an io.Writecloser. +func (bufPool *BufioWriterPool) NewWriteCloserWrapper(buf *bufio.Writer, w io.Writer) io.WriteCloser { + return ioutils.NewWriteCloserWrapper(w, func() error { + buf.Flush() + if writeCloser, ok := w.(io.WriteCloser); ok { + writeCloser.Close() + } + bufPool.Put(buf) + return nil + }) +} diff --git a/vendor/github.com/moby/moby/pkg/pools/pools_test.go b/vendor/github.com/moby/moby/pkg/pools/pools_test.go new file mode 100644 index 000000000..d71cb99ac --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/pools/pools_test.go @@ -0,0 +1,166 @@ +package pools + +import ( + "bufio" + "bytes" + "io" + "strings" + "testing" +) + +func TestBufioReaderPoolGetWithNoReaderShouldCreateOne(t *testing.T) { + reader := BufioReader32KPool.Get(nil) + if reader == nil { + t.Fatalf("BufioReaderPool should have create a bufio.Reader but did not.") + } +} + +func TestBufioReaderPoolPutAndGet(t *testing.T) { + sr := bufio.NewReader(strings.NewReader("foobar")) + reader := BufioReader32KPool.Get(sr) + if reader == nil { + t.Fatalf("BufioReaderPool should not return a nil reader.") + } + // verify the first 3 byte + buf1 := make([]byte, 3) + _, err := reader.Read(buf1) + if err != nil { + t.Fatal(err) + } + if actual := string(buf1); actual != "foo" { + t.Fatalf("The first letter should have been 'foo' but was %v", actual) + } + BufioReader32KPool.Put(reader) + // Try to read the next 3 bytes + _, err = sr.Read(make([]byte, 3)) + if err == nil || err != io.EOF { + t.Fatalf("The buffer should have been empty, issue an EOF error.") + } +} + +type simpleReaderCloser struct { + io.Reader + closed bool +} + +func (r *simpleReaderCloser) Close() error { + r.closed = true + return nil +} + +func TestNewReadCloserWrapperWithAReadCloser(t *testing.T) { + br := bufio.NewReader(strings.NewReader("")) + sr := &simpleReaderCloser{ + Reader: strings.NewReader("foobar"), + closed: false, + } + reader := BufioReader32KPool.NewReadCloserWrapper(br, sr) + if reader == nil { + t.Fatalf("NewReadCloserWrapper should not return a nil reader.") + } + // Verify the content of reader + buf := make([]byte, 3) + _, err := reader.Read(buf) + if err != nil { + t.Fatal(err) + } + if actual := string(buf); actual != "foo" { + t.Fatalf("The first 3 letter should have been 'foo' but were %v", actual) + } + reader.Close() + // Read 3 more bytes "bar" + _, err = reader.Read(buf) + if err != nil { + t.Fatal(err) + } + if actual := string(buf); actual != "bar" { + t.Fatalf("The first 3 letter should have been 'bar' but were %v", actual) + } + if !sr.closed { + t.Fatalf("The ReaderCloser should have been closed, it is not.") + } +} + +func TestBufioWriterPoolGetWithNoReaderShouldCreateOne(t *testing.T) { + writer := BufioWriter32KPool.Get(nil) + if writer == nil { + t.Fatalf("BufioWriterPool should have create a bufio.Writer but did not.") + } +} + +func TestBufioWriterPoolPutAndGet(t *testing.T) { + buf := new(bytes.Buffer) + bw := bufio.NewWriter(buf) + writer := BufioWriter32KPool.Get(bw) + if writer == nil { + t.Fatalf("BufioReaderPool should not return a nil writer.") + } + written, err := writer.Write([]byte("foobar")) + if err != nil { + t.Fatal(err) + } + if written != 6 { + t.Fatalf("Should have written 6 bytes, but wrote %v bytes", written) + } + // Make sure we Flush all the way ? + writer.Flush() + bw.Flush() + if len(buf.Bytes()) != 6 { + t.Fatalf("The buffer should contain 6 bytes ('foobar') but contains %v ('%v')", buf.Bytes(), string(buf.Bytes())) + } + // Reset the buffer + buf.Reset() + BufioWriter32KPool.Put(writer) + // Try to write something + if _, err = writer.Write([]byte("barfoo")); err != nil { + t.Fatal(err) + } + // If we now try to flush it, it should panic (the writer is nil) + // recover it + defer func() { + if r := recover(); r == nil { + t.Fatal("Trying to flush the writter should have 'paniced', did not.") + } + }() + writer.Flush() +} + +type simpleWriterCloser struct { + io.Writer + closed bool +} + +func (r *simpleWriterCloser) Close() error { + r.closed = true + return nil +} + +func TestNewWriteCloserWrapperWithAWriteCloser(t *testing.T) { + buf := new(bytes.Buffer) + bw := bufio.NewWriter(buf) + sw := &simpleWriterCloser{ + Writer: new(bytes.Buffer), + closed: false, + } + bw.Flush() + writer := BufioWriter32KPool.NewWriteCloserWrapper(bw, sw) + if writer == nil { + t.Fatalf("BufioReaderPool should not return a nil writer.") + } + written, err := writer.Write([]byte("foobar")) + if err != nil { + t.Fatal(err) + } + if written != 6 { + t.Fatalf("Should have written 6 bytes, but wrote %v bytes", written) + } + writer.Close() + if !sw.closed { + t.Fatalf("The ReaderCloser should have been closed, it is not.") + } +} + +func TestBufferPoolPutAndGet(t *testing.T) { + buf := buffer32KPool.Get() + buffer32KPool.Put(buf) +} diff --git a/vendor/github.com/moby/moby/pkg/progress/progress.go b/vendor/github.com/moby/moby/pkg/progress/progress.go new file mode 100644 index 000000000..7c3d3a514 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/progress/progress.go @@ -0,0 +1,89 @@ +package progress + +import ( + "fmt" +) + +// Progress represents the progress of a transfer. +type Progress struct { + ID string + + // Progress contains a Message or... + Message string + + // ...progress of an action + Action string + Current int64 + Total int64 + + // If true, don't show xB/yB + HideCounts bool + // If not empty, use units instead of bytes for counts + Units string + + // Aux contains extra information not presented to the user, such as + // digests for push signing. + Aux interface{} + + LastUpdate bool +} + +// Output is an interface for writing progress information. It's +// like a writer for progress, but we don't call it Writer because +// that would be confusing next to ProgressReader (also, because it +// doesn't implement the io.Writer interface). +type Output interface { + WriteProgress(Progress) error +} + +type chanOutput chan<- Progress + +func (out chanOutput) WriteProgress(p Progress) error { + out <- p + return nil +} + +// ChanOutput returns an Output that writes progress updates to the +// supplied channel. +func ChanOutput(progressChan chan<- Progress) Output { + return chanOutput(progressChan) +} + +type discardOutput struct{} + +func (discardOutput) WriteProgress(Progress) error { + return nil +} + +// DiscardOutput returns an Output that discards progress +func DiscardOutput() Output { + return discardOutput{} +} + +// Update is a convenience function to write a progress update to the channel. +func Update(out Output, id, action string) { + out.WriteProgress(Progress{ID: id, Action: action}) +} + +// Updatef is a convenience function to write a printf-formatted progress update +// to the channel. +func Updatef(out Output, id, format string, a ...interface{}) { + Update(out, id, fmt.Sprintf(format, a...)) +} + +// Message is a convenience function to write a progress message to the channel. +func Message(out Output, id, message string) { + out.WriteProgress(Progress{ID: id, Message: message}) +} + +// Messagef is a convenience function to write a printf-formatted progress +// message to the channel. +func Messagef(out Output, id, format string, a ...interface{}) { + Message(out, id, fmt.Sprintf(format, a...)) +} + +// Aux sends auxiliary information over a progress interface, which will not be +// formatted for the UI. This is used for things such as push signing. +func Aux(out Output, a interface{}) { + out.WriteProgress(Progress{Aux: a}) +} diff --git a/vendor/github.com/moby/moby/pkg/progress/progressreader.go b/vendor/github.com/moby/moby/pkg/progress/progressreader.go new file mode 100644 index 000000000..6b3927eec --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/progress/progressreader.go @@ -0,0 +1,66 @@ +package progress + +import ( + "io" + "time" + + "golang.org/x/time/rate" +) + +// Reader is a Reader with progress bar. +type Reader struct { + in io.ReadCloser // Stream to read from + out Output // Where to send progress bar to + size int64 + current int64 + lastUpdate int64 + id string + action string + rateLimiter *rate.Limiter +} + +// NewProgressReader creates a new ProgressReader. +func NewProgressReader(in io.ReadCloser, out Output, size int64, id, action string) *Reader { + return &Reader{ + in: in, + out: out, + size: size, + id: id, + action: action, + rateLimiter: rate.NewLimiter(rate.Every(100*time.Millisecond), 1), + } +} + +func (p *Reader) Read(buf []byte) (n int, err error) { + read, err := p.in.Read(buf) + p.current += int64(read) + updateEvery := int64(1024 * 512) //512kB + if p.size > 0 { + // Update progress for every 1% read if 1% < 512kB + if increment := int64(0.01 * float64(p.size)); increment < updateEvery { + updateEvery = increment + } + } + if p.current-p.lastUpdate > updateEvery || err != nil { + p.updateProgress(err != nil && read == 0) + p.lastUpdate = p.current + } + + return read, err +} + +// Close closes the progress reader and its underlying reader. +func (p *Reader) Close() error { + if p.current < p.size { + // print a full progress bar when closing prematurely + p.current = p.size + p.updateProgress(false) + } + return p.in.Close() +} + +func (p *Reader) updateProgress(last bool) { + if last || p.current == p.size || p.rateLimiter.Allow() { + p.out.WriteProgress(Progress{ID: p.id, Action: p.action, Current: p.current, Total: p.size, LastUpdate: last}) + } +} diff --git a/vendor/github.com/moby/moby/pkg/progress/progressreader_test.go b/vendor/github.com/moby/moby/pkg/progress/progressreader_test.go new file mode 100644 index 000000000..690e70596 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/progress/progressreader_test.go @@ -0,0 +1,75 @@ +package progress + +import ( + "bytes" + "io" + "io/ioutil" + "testing" +) + +func TestOutputOnPrematureClose(t *testing.T) { + content := []byte("TESTING") + reader := ioutil.NopCloser(bytes.NewReader(content)) + progressChan := make(chan Progress, 10) + + pr := NewProgressReader(reader, ChanOutput(progressChan), int64(len(content)), "Test", "Read") + + part := make([]byte, 4) + _, err := io.ReadFull(pr, part) + if err != nil { + pr.Close() + t.Fatal(err) + } + +drainLoop: + for { + select { + case <-progressChan: + default: + break drainLoop + } + } + + pr.Close() + + select { + case <-progressChan: + default: + t.Fatalf("Expected some output when closing prematurely") + } +} + +func TestCompleteSilently(t *testing.T) { + content := []byte("TESTING") + reader := ioutil.NopCloser(bytes.NewReader(content)) + progressChan := make(chan Progress, 10) + + pr := NewProgressReader(reader, ChanOutput(progressChan), int64(len(content)), "Test", "Read") + + out, err := ioutil.ReadAll(pr) + if err != nil { + pr.Close() + t.Fatal(err) + } + if string(out) != "TESTING" { + pr.Close() + t.Fatalf("Unexpected output %q from reader", string(out)) + } + +drainLoop: + for { + select { + case <-progressChan: + default: + break drainLoop + } + } + + pr.Close() + + select { + case <-progressChan: + t.Fatalf("Should have closed silently when read is complete") + default: + } +} diff --git a/vendor/github.com/moby/moby/pkg/promise/promise.go b/vendor/github.com/moby/moby/pkg/promise/promise.go new file mode 100644 index 000000000..dd52b9082 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/promise/promise.go @@ -0,0 +1,11 @@ +package promise + +// Go is a basic promise implementation: it wraps calls a function in a goroutine, +// and returns a channel which will later return the function's return value. +func Go(f func() error) chan error { + ch := make(chan error, 1) + go func() { + ch <- f() + }() + return ch +} diff --git a/vendor/github.com/moby/moby/pkg/promise/promise_test.go b/vendor/github.com/moby/moby/pkg/promise/promise_test.go new file mode 100644 index 000000000..287213b50 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/promise/promise_test.go @@ -0,0 +1,25 @@ +package promise + +import ( + "errors" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestGo(t *testing.T) { + errCh := Go(functionWithError) + er := <-errCh + require.EqualValues(t, "Error Occurred", er.Error()) + + noErrCh := Go(functionWithNoError) + er = <-noErrCh + require.Nil(t, er) +} + +func functionWithError() (err error) { + return errors.New("Error Occurred") +} +func functionWithNoError() (err error) { + return nil +} diff --git a/vendor/github.com/moby/moby/pkg/pubsub/publisher.go b/vendor/github.com/moby/moby/pkg/pubsub/publisher.go new file mode 100644 index 000000000..8e30d16ae --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/pubsub/publisher.go @@ -0,0 +1,121 @@ +package pubsub + +import ( + "sync" + "time" +) + +var wgPool = sync.Pool{New: func() interface{} { return new(sync.WaitGroup) }} + +// NewPublisher creates a new pub/sub publisher to broadcast messages. +// The duration is used as the send timeout as to not block the publisher publishing +// messages to other clients if one client is slow or unresponsive. +// The buffer is used when creating new channels for subscribers. +func NewPublisher(publishTimeout time.Duration, buffer int) *Publisher { + return &Publisher{ + buffer: buffer, + timeout: publishTimeout, + subscribers: make(map[subscriber]topicFunc), + } +} + +type subscriber chan interface{} +type topicFunc func(v interface{}) bool + +// Publisher is basic pub/sub structure. Allows to send events and subscribe +// to them. Can be safely used from multiple goroutines. +type Publisher struct { + m sync.RWMutex + buffer int + timeout time.Duration + subscribers map[subscriber]topicFunc +} + +// Len returns the number of subscribers for the publisher +func (p *Publisher) Len() int { + p.m.RLock() + i := len(p.subscribers) + p.m.RUnlock() + return i +} + +// Subscribe adds a new subscriber to the publisher returning the channel. +func (p *Publisher) Subscribe() chan interface{} { + return p.SubscribeTopic(nil) +} + +// SubscribeTopic adds a new subscriber that filters messages sent by a topic. +func (p *Publisher) SubscribeTopic(topic topicFunc) chan interface{} { + ch := make(chan interface{}, p.buffer) + p.m.Lock() + p.subscribers[ch] = topic + p.m.Unlock() + return ch +} + +// SubscribeTopicWithBuffer adds a new subscriber that filters messages sent by a topic. +// The returned channel has a buffer of the specified size. +func (p *Publisher) SubscribeTopicWithBuffer(topic topicFunc, buffer int) chan interface{} { + ch := make(chan interface{}, buffer) + p.m.Lock() + p.subscribers[ch] = topic + p.m.Unlock() + return ch +} + +// Evict removes the specified subscriber from receiving any more messages. +func (p *Publisher) Evict(sub chan interface{}) { + p.m.Lock() + delete(p.subscribers, sub) + close(sub) + p.m.Unlock() +} + +// Publish sends the data in v to all subscribers currently registered with the publisher. +func (p *Publisher) Publish(v interface{}) { + p.m.RLock() + if len(p.subscribers) == 0 { + p.m.RUnlock() + return + } + + wg := wgPool.Get().(*sync.WaitGroup) + for sub, topic := range p.subscribers { + wg.Add(1) + go p.sendTopic(sub, topic, v, wg) + } + wg.Wait() + wgPool.Put(wg) + p.m.RUnlock() +} + +// Close closes the channels to all subscribers registered with the publisher. +func (p *Publisher) Close() { + p.m.Lock() + for sub := range p.subscribers { + delete(p.subscribers, sub) + close(sub) + } + p.m.Unlock() +} + +func (p *Publisher) sendTopic(sub subscriber, topic topicFunc, v interface{}, wg *sync.WaitGroup) { + defer wg.Done() + if topic != nil && !topic(v) { + return + } + + // send under a select as to not block if the receiver is unavailable + if p.timeout > 0 { + select { + case sub <- v: + case <-time.After(p.timeout): + } + return + } + + select { + case sub <- v: + default: + } +} diff --git a/vendor/github.com/moby/moby/pkg/pubsub/publisher_test.go b/vendor/github.com/moby/moby/pkg/pubsub/publisher_test.go new file mode 100644 index 000000000..d6b0a1d59 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/pubsub/publisher_test.go @@ -0,0 +1,142 @@ +package pubsub + +import ( + "fmt" + "testing" + "time" +) + +func TestSendToOneSub(t *testing.T) { + p := NewPublisher(100*time.Millisecond, 10) + c := p.Subscribe() + + p.Publish("hi") + + msg := <-c + if msg.(string) != "hi" { + t.Fatalf("expected message hi but received %v", msg) + } +} + +func TestSendToMultipleSubs(t *testing.T) { + p := NewPublisher(100*time.Millisecond, 10) + subs := []chan interface{}{} + subs = append(subs, p.Subscribe(), p.Subscribe(), p.Subscribe()) + + p.Publish("hi") + + for _, c := range subs { + msg := <-c + if msg.(string) != "hi" { + t.Fatalf("expected message hi but received %v", msg) + } + } +} + +func TestEvictOneSub(t *testing.T) { + p := NewPublisher(100*time.Millisecond, 10) + s1 := p.Subscribe() + s2 := p.Subscribe() + + p.Evict(s1) + p.Publish("hi") + if _, ok := <-s1; ok { + t.Fatal("expected s1 to not receive the published message") + } + + msg := <-s2 + if msg.(string) != "hi" { + t.Fatalf("expected message hi but received %v", msg) + } +} + +func TestClosePublisher(t *testing.T) { + p := NewPublisher(100*time.Millisecond, 10) + subs := []chan interface{}{} + subs = append(subs, p.Subscribe(), p.Subscribe(), p.Subscribe()) + p.Close() + + for _, c := range subs { + if _, ok := <-c; ok { + t.Fatal("expected all subscriber channels to be closed") + } + } +} + +const sampleText = "test" + +type testSubscriber struct { + dataCh chan interface{} + ch chan error +} + +func (s *testSubscriber) Wait() error { + return <-s.ch +} + +func newTestSubscriber(p *Publisher) *testSubscriber { + ts := &testSubscriber{ + dataCh: p.Subscribe(), + ch: make(chan error), + } + go func() { + for data := range ts.dataCh { + s, ok := data.(string) + if !ok { + ts.ch <- fmt.Errorf("Unexpected type %T", data) + break + } + if s != sampleText { + ts.ch <- fmt.Errorf("Unexpected text %s", s) + break + } + } + close(ts.ch) + }() + return ts +} + +// for testing with -race +func TestPubSubRace(t *testing.T) { + p := NewPublisher(0, 1024) + var subs [](*testSubscriber) + for j := 0; j < 50; j++ { + subs = append(subs, newTestSubscriber(p)) + } + for j := 0; j < 1000; j++ { + p.Publish(sampleText) + } + time.AfterFunc(1*time.Second, func() { + for _, s := range subs { + p.Evict(s.dataCh) + } + }) + for _, s := range subs { + s.Wait() + } +} + +func BenchmarkPubSub(b *testing.B) { + for i := 0; i < b.N; i++ { + b.StopTimer() + p := NewPublisher(0, 1024) + var subs [](*testSubscriber) + for j := 0; j < 50; j++ { + subs = append(subs, newTestSubscriber(p)) + } + b.StartTimer() + for j := 0; j < 1000; j++ { + p.Publish(sampleText) + } + time.AfterFunc(1*time.Second, func() { + for _, s := range subs { + p.Evict(s.dataCh) + } + }) + for _, s := range subs { + if err := s.Wait(); err != nil { + b.Fatal(err) + } + } + } +} diff --git a/vendor/github.com/moby/moby/pkg/reexec/README.md b/vendor/github.com/moby/moby/pkg/reexec/README.md new file mode 100644 index 000000000..6658f69b6 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/reexec/README.md @@ -0,0 +1,5 @@ +# reexec + +The `reexec` package facilitates the busybox style reexec of the docker binary that we require because +of the forking limitations of using Go. Handlers can be registered with a name and the argv 0 of +the exec of the binary will be used to find and execute custom init paths. diff --git a/vendor/github.com/moby/moby/pkg/reexec/command_linux.go b/vendor/github.com/moby/moby/pkg/reexec/command_linux.go new file mode 100644 index 000000000..05319eacc --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/reexec/command_linux.go @@ -0,0 +1,30 @@ +// +build linux + +package reexec + +import ( + "os/exec" + "syscall" + + "golang.org/x/sys/unix" +) + +// Self returns the path to the current process's binary. +// Returns "/proc/self/exe". +func Self() string { + return "/proc/self/exe" +} + +// Command returns *exec.Cmd which has Path as current binary. Also it setting +// SysProcAttr.Pdeathsig to SIGTERM. +// This will use the in-memory version (/proc/self/exe) of the current binary, +// it is thus safe to delete or replace the on-disk binary (os.Args[0]). +func Command(args ...string) *exec.Cmd { + return &exec.Cmd{ + Path: Self(), + Args: args, + SysProcAttr: &syscall.SysProcAttr{ + Pdeathsig: unix.SIGTERM, + }, + } +} diff --git a/vendor/github.com/moby/moby/pkg/reexec/command_unix.go b/vendor/github.com/moby/moby/pkg/reexec/command_unix.go new file mode 100644 index 000000000..778a720e3 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/reexec/command_unix.go @@ -0,0 +1,23 @@ +// +build freebsd solaris darwin + +package reexec + +import ( + "os/exec" +) + +// Self returns the path to the current process's binary. +// Uses os.Args[0]. +func Self() string { + return naiveSelf() +} + +// Command returns *exec.Cmd which has Path as current binary. +// For example if current binary is "docker" at "/usr/bin/", then cmd.Path will +// be set to "/usr/bin/docker". +func Command(args ...string) *exec.Cmd { + return &exec.Cmd{ + Path: Self(), + Args: args, + } +} diff --git a/vendor/github.com/moby/moby/pkg/reexec/command_unsupported.go b/vendor/github.com/moby/moby/pkg/reexec/command_unsupported.go new file mode 100644 index 000000000..76edd8242 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/reexec/command_unsupported.go @@ -0,0 +1,12 @@ +// +build !linux,!windows,!freebsd,!solaris,!darwin + +package reexec + +import ( + "os/exec" +) + +// Command is unsupported on operating systems apart from Linux, Windows, Solaris and Darwin. +func Command(args ...string) *exec.Cmd { + return nil +} diff --git a/vendor/github.com/moby/moby/pkg/reexec/command_windows.go b/vendor/github.com/moby/moby/pkg/reexec/command_windows.go new file mode 100644 index 000000000..ca871c422 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/reexec/command_windows.go @@ -0,0 +1,23 @@ +// +build windows + +package reexec + +import ( + "os/exec" +) + +// Self returns the path to the current process's binary. +// Uses os.Args[0]. +func Self() string { + return naiveSelf() +} + +// Command returns *exec.Cmd which has Path as current binary. +// For example if current binary is "docker.exe" at "C:\", then cmd.Path will +// be set to "C:\docker.exe". +func Command(args ...string) *exec.Cmd { + return &exec.Cmd{ + Path: Self(), + Args: args, + } +} diff --git a/vendor/github.com/moby/moby/pkg/reexec/reexec.go b/vendor/github.com/moby/moby/pkg/reexec/reexec.go new file mode 100644 index 000000000..c56671d91 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/reexec/reexec.go @@ -0,0 +1,47 @@ +package reexec + +import ( + "fmt" + "os" + "os/exec" + "path/filepath" +) + +var registeredInitializers = make(map[string]func()) + +// Register adds an initialization func under the specified name +func Register(name string, initializer func()) { + if _, exists := registeredInitializers[name]; exists { + panic(fmt.Sprintf("reexec func already registered under name %q", name)) + } + + registeredInitializers[name] = initializer +} + +// Init is called as the first part of the exec process and returns true if an +// initialization function was called. +func Init() bool { + initializer, exists := registeredInitializers[os.Args[0]] + if exists { + initializer() + + return true + } + return false +} + +func naiveSelf() string { + name := os.Args[0] + if filepath.Base(name) == name { + if lp, err := exec.LookPath(name); err == nil { + return lp + } + } + // handle conversion of relative paths to absolute + if absName, err := filepath.Abs(name); err == nil { + return absName + } + // if we couldn't get absolute name, return original + // (NOTE: Go only errors on Abs() if os.Getwd fails) + return name +} diff --git a/vendor/github.com/moby/moby/pkg/reexec/reexec_test.go b/vendor/github.com/moby/moby/pkg/reexec/reexec_test.go new file mode 100644 index 000000000..39e87a4a2 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/reexec/reexec_test.go @@ -0,0 +1,53 @@ +package reexec + +import ( + "os" + "os/exec" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func init() { + Register("reexec", func() { + panic("Return Error") + }) + Init() +} + +func TestRegister(t *testing.T) { + defer func() { + if r := recover(); r != nil { + require.Equal(t, `reexec func already registered under name "reexec"`, r) + } + }() + Register("reexec", func() {}) +} + +func TestCommand(t *testing.T) { + cmd := Command("reexec") + w, err := cmd.StdinPipe() + require.NoError(t, err, "Error on pipe creation: %v", err) + defer w.Close() + + err = cmd.Start() + require.NoError(t, err, "Error on re-exec cmd: %v", err) + err = cmd.Wait() + require.EqualError(t, err, "exit status 2") +} + +func TestNaiveSelf(t *testing.T) { + if os.Getenv("TEST_CHECK") == "1" { + os.Exit(2) + } + cmd := exec.Command(naiveSelf(), "-test.run=TestNaiveSelf") + cmd.Env = append(os.Environ(), "TEST_CHECK=1") + err := cmd.Start() + require.NoError(t, err, "Unable to start command") + err = cmd.Wait() + require.EqualError(t, err, "exit status 2") + + os.Args[0] = "mkdir" + assert.NotEqual(t, naiveSelf(), os.Args[0]) +} diff --git a/vendor/github.com/moby/moby/pkg/signal/README.md b/vendor/github.com/moby/moby/pkg/signal/README.md new file mode 100644 index 000000000..2b237a594 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/signal/README.md @@ -0,0 +1 @@ +This package provides helper functions for dealing with signals across various operating systems \ No newline at end of file diff --git a/vendor/github.com/moby/moby/pkg/signal/signal.go b/vendor/github.com/moby/moby/pkg/signal/signal.go new file mode 100644 index 000000000..68bb77cf5 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/signal/signal.go @@ -0,0 +1,54 @@ +// Package signal provides helper functions for dealing with signals across +// various operating systems. +package signal + +import ( + "fmt" + "os" + "os/signal" + "strconv" + "strings" + "syscall" +) + +// CatchAll catches all signals and relays them to the specified channel. +func CatchAll(sigc chan os.Signal) { + handledSigs := []os.Signal{} + for _, s := range SignalMap { + handledSigs = append(handledSigs, s) + } + signal.Notify(sigc, handledSigs...) +} + +// StopCatch stops catching the signals and closes the specified channel. +func StopCatch(sigc chan os.Signal) { + signal.Stop(sigc) + close(sigc) +} + +// ParseSignal translates a string to a valid syscall signal. +// It returns an error if the signal map doesn't include the given signal. +func ParseSignal(rawSignal string) (syscall.Signal, error) { + s, err := strconv.Atoi(rawSignal) + if err == nil { + if s == 0 { + return -1, fmt.Errorf("Invalid signal: %s", rawSignal) + } + return syscall.Signal(s), nil + } + signal, ok := SignalMap[strings.TrimPrefix(strings.ToUpper(rawSignal), "SIG")] + if !ok { + return -1, fmt.Errorf("Invalid signal: %s", rawSignal) + } + return signal, nil +} + +// ValidSignalForPlatform returns true if a signal is valid on the platform +func ValidSignalForPlatform(sig syscall.Signal) bool { + for _, v := range SignalMap { + if v == sig { + return true + } + } + return false +} diff --git a/vendor/github.com/moby/moby/pkg/signal/signal_darwin.go b/vendor/github.com/moby/moby/pkg/signal/signal_darwin.go new file mode 100644 index 000000000..946de87e9 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/signal/signal_darwin.go @@ -0,0 +1,41 @@ +package signal + +import ( + "syscall" +) + +// SignalMap is a map of Darwin signals. +var SignalMap = map[string]syscall.Signal{ + "ABRT": syscall.SIGABRT, + "ALRM": syscall.SIGALRM, + "BUG": syscall.SIGBUS, + "CHLD": syscall.SIGCHLD, + "CONT": syscall.SIGCONT, + "EMT": syscall.SIGEMT, + "FPE": syscall.SIGFPE, + "HUP": syscall.SIGHUP, + "ILL": syscall.SIGILL, + "INFO": syscall.SIGINFO, + "INT": syscall.SIGINT, + "IO": syscall.SIGIO, + "IOT": syscall.SIGIOT, + "KILL": syscall.SIGKILL, + "PIPE": syscall.SIGPIPE, + "PROF": syscall.SIGPROF, + "QUIT": syscall.SIGQUIT, + "SEGV": syscall.SIGSEGV, + "STOP": syscall.SIGSTOP, + "SYS": syscall.SIGSYS, + "TERM": syscall.SIGTERM, + "TRAP": syscall.SIGTRAP, + "TSTP": syscall.SIGTSTP, + "TTIN": syscall.SIGTTIN, + "TTOU": syscall.SIGTTOU, + "URG": syscall.SIGURG, + "USR1": syscall.SIGUSR1, + "USR2": syscall.SIGUSR2, + "VTALRM": syscall.SIGVTALRM, + "WINCH": syscall.SIGWINCH, + "XCPU": syscall.SIGXCPU, + "XFSZ": syscall.SIGXFSZ, +} diff --git a/vendor/github.com/moby/moby/pkg/signal/signal_freebsd.go b/vendor/github.com/moby/moby/pkg/signal/signal_freebsd.go new file mode 100644 index 000000000..6b9569bb7 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/signal/signal_freebsd.go @@ -0,0 +1,43 @@ +package signal + +import ( + "syscall" +) + +// SignalMap is a map of FreeBSD signals. +var SignalMap = map[string]syscall.Signal{ + "ABRT": syscall.SIGABRT, + "ALRM": syscall.SIGALRM, + "BUF": syscall.SIGBUS, + "CHLD": syscall.SIGCHLD, + "CONT": syscall.SIGCONT, + "EMT": syscall.SIGEMT, + "FPE": syscall.SIGFPE, + "HUP": syscall.SIGHUP, + "ILL": syscall.SIGILL, + "INFO": syscall.SIGINFO, + "INT": syscall.SIGINT, + "IO": syscall.SIGIO, + "IOT": syscall.SIGIOT, + "KILL": syscall.SIGKILL, + "LWP": syscall.SIGLWP, + "PIPE": syscall.SIGPIPE, + "PROF": syscall.SIGPROF, + "QUIT": syscall.SIGQUIT, + "SEGV": syscall.SIGSEGV, + "STOP": syscall.SIGSTOP, + "SYS": syscall.SIGSYS, + "TERM": syscall.SIGTERM, + "THR": syscall.SIGTHR, + "TRAP": syscall.SIGTRAP, + "TSTP": syscall.SIGTSTP, + "TTIN": syscall.SIGTTIN, + "TTOU": syscall.SIGTTOU, + "URG": syscall.SIGURG, + "USR1": syscall.SIGUSR1, + "USR2": syscall.SIGUSR2, + "VTALRM": syscall.SIGVTALRM, + "WINCH": syscall.SIGWINCH, + "XCPU": syscall.SIGXCPU, + "XFSZ": syscall.SIGXFSZ, +} diff --git a/vendor/github.com/moby/moby/pkg/signal/signal_linux.go b/vendor/github.com/moby/moby/pkg/signal/signal_linux.go new file mode 100644 index 000000000..3594796ca --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/signal/signal_linux.go @@ -0,0 +1,82 @@ +package signal + +import ( + "syscall" + + "golang.org/x/sys/unix" +) + +const ( + sigrtmin = 34 + sigrtmax = 64 +) + +// SignalMap is a map of Linux signals. +var SignalMap = map[string]syscall.Signal{ + "ABRT": unix.SIGABRT, + "ALRM": unix.SIGALRM, + "BUS": unix.SIGBUS, + "CHLD": unix.SIGCHLD, + "CLD": unix.SIGCLD, + "CONT": unix.SIGCONT, + "FPE": unix.SIGFPE, + "HUP": unix.SIGHUP, + "ILL": unix.SIGILL, + "INT": unix.SIGINT, + "IO": unix.SIGIO, + "IOT": unix.SIGIOT, + "KILL": unix.SIGKILL, + "PIPE": unix.SIGPIPE, + "POLL": unix.SIGPOLL, + "PROF": unix.SIGPROF, + "PWR": unix.SIGPWR, + "QUIT": unix.SIGQUIT, + "SEGV": unix.SIGSEGV, + "STKFLT": unix.SIGSTKFLT, + "STOP": unix.SIGSTOP, + "SYS": unix.SIGSYS, + "TERM": unix.SIGTERM, + "TRAP": unix.SIGTRAP, + "TSTP": unix.SIGTSTP, + "TTIN": unix.SIGTTIN, + "TTOU": unix.SIGTTOU, + "UNUSED": unix.SIGUNUSED, + "URG": unix.SIGURG, + "USR1": unix.SIGUSR1, + "USR2": unix.SIGUSR2, + "VTALRM": unix.SIGVTALRM, + "WINCH": unix.SIGWINCH, + "XCPU": unix.SIGXCPU, + "XFSZ": unix.SIGXFSZ, + "RTMIN": sigrtmin, + "RTMIN+1": sigrtmin + 1, + "RTMIN+2": sigrtmin + 2, + "RTMIN+3": sigrtmin + 3, + "RTMIN+4": sigrtmin + 4, + "RTMIN+5": sigrtmin + 5, + "RTMIN+6": sigrtmin + 6, + "RTMIN+7": sigrtmin + 7, + "RTMIN+8": sigrtmin + 8, + "RTMIN+9": sigrtmin + 9, + "RTMIN+10": sigrtmin + 10, + "RTMIN+11": sigrtmin + 11, + "RTMIN+12": sigrtmin + 12, + "RTMIN+13": sigrtmin + 13, + "RTMIN+14": sigrtmin + 14, + "RTMIN+15": sigrtmin + 15, + "RTMAX-14": sigrtmax - 14, + "RTMAX-13": sigrtmax - 13, + "RTMAX-12": sigrtmax - 12, + "RTMAX-11": sigrtmax - 11, + "RTMAX-10": sigrtmax - 10, + "RTMAX-9": sigrtmax - 9, + "RTMAX-8": sigrtmax - 8, + "RTMAX-7": sigrtmax - 7, + "RTMAX-6": sigrtmax - 6, + "RTMAX-5": sigrtmax - 5, + "RTMAX-4": sigrtmax - 4, + "RTMAX-3": sigrtmax - 3, + "RTMAX-2": sigrtmax - 2, + "RTMAX-1": sigrtmax - 1, + "RTMAX": sigrtmax, +} diff --git a/vendor/github.com/moby/moby/pkg/signal/signal_linux_test.go b/vendor/github.com/moby/moby/pkg/signal/signal_linux_test.go new file mode 100644 index 000000000..32c056fe4 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/signal/signal_linux_test.go @@ -0,0 +1,58 @@ +// +build darwin linux solaris + +package signal + +import ( + "os" + "syscall" + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +func TestCatchAll(t *testing.T) { + sigs := make(chan os.Signal, 1) + CatchAll(sigs) + defer StopCatch(sigs) + + listOfSignals := map[string]string{ + "CONT": syscall.SIGCONT.String(), + "HUP": syscall.SIGHUP.String(), + "CHLD": syscall.SIGCHLD.String(), + "ILL": syscall.SIGILL.String(), + "FPE": syscall.SIGFPE.String(), + "CLD": syscall.SIGCLD.String(), + } + + for sigStr := range listOfSignals { + signal, ok := SignalMap[sigStr] + if ok { + go func() { + time.Sleep(1 * time.Millisecond) + syscall.Kill(syscall.Getpid(), signal) + }() + + s := <-sigs + assert.EqualValues(t, s.String(), signal.String()) + } + + } +} + +func TestStopCatch(t *testing.T) { + signal, _ := SignalMap["HUP"] + channel := make(chan os.Signal, 1) + CatchAll(channel) + go func() { + + time.Sleep(1 * time.Millisecond) + syscall.Kill(syscall.Getpid(), signal) + }() + signalString := <-channel + assert.EqualValues(t, signalString.String(), signal.String()) + + StopCatch(channel) + _, ok := <-channel + assert.EqualValues(t, ok, false) +} diff --git a/vendor/github.com/moby/moby/pkg/signal/signal_solaris.go b/vendor/github.com/moby/moby/pkg/signal/signal_solaris.go new file mode 100644 index 000000000..89576b9e3 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/signal/signal_solaris.go @@ -0,0 +1,42 @@ +package signal + +import ( + "syscall" +) + +// SignalMap is a map of Solaris signals. +// SIGINFO and SIGTHR not defined for Solaris +var SignalMap = map[string]syscall.Signal{ + "ABRT": syscall.SIGABRT, + "ALRM": syscall.SIGALRM, + "BUF": syscall.SIGBUS, + "CHLD": syscall.SIGCHLD, + "CONT": syscall.SIGCONT, + "EMT": syscall.SIGEMT, + "FPE": syscall.SIGFPE, + "HUP": syscall.SIGHUP, + "ILL": syscall.SIGILL, + "INT": syscall.SIGINT, + "IO": syscall.SIGIO, + "IOT": syscall.SIGIOT, + "KILL": syscall.SIGKILL, + "LWP": syscall.SIGLWP, + "PIPE": syscall.SIGPIPE, + "PROF": syscall.SIGPROF, + "QUIT": syscall.SIGQUIT, + "SEGV": syscall.SIGSEGV, + "STOP": syscall.SIGSTOP, + "SYS": syscall.SIGSYS, + "TERM": syscall.SIGTERM, + "TRAP": syscall.SIGTRAP, + "TSTP": syscall.SIGTSTP, + "TTIN": syscall.SIGTTIN, + "TTOU": syscall.SIGTTOU, + "URG": syscall.SIGURG, + "USR1": syscall.SIGUSR1, + "USR2": syscall.SIGUSR2, + "VTALRM": syscall.SIGVTALRM, + "WINCH": syscall.SIGWINCH, + "XCPU": syscall.SIGXCPU, + "XFSZ": syscall.SIGXFSZ, +} diff --git a/vendor/github.com/moby/moby/pkg/signal/signal_test.go b/vendor/github.com/moby/moby/pkg/signal/signal_test.go new file mode 100644 index 000000000..df02f5bed --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/signal/signal_test.go @@ -0,0 +1,33 @@ +package signal + +import ( + "syscall" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestParseSignal(t *testing.T) { + _, checkAtoiError := ParseSignal("0") + assert.EqualError(t, checkAtoiError, "Invalid signal: 0") + + _, error := ParseSignal("SIG") + assert.EqualError(t, error, "Invalid signal: SIG") + + for sigStr := range SignalMap { + responseSignal, error := ParseSignal(sigStr) + assert.NoError(t, error) + signal := SignalMap[sigStr] + assert.EqualValues(t, signal, responseSignal) + } +} + +func TestValidSignalForPlatform(t *testing.T) { + isValidSignal := ValidSignalForPlatform(syscall.Signal(0)) + assert.EqualValues(t, false, isValidSignal) + + for _, sigN := range SignalMap { + isValidSignal = ValidSignalForPlatform(syscall.Signal(sigN)) + assert.EqualValues(t, true, isValidSignal) + } +} diff --git a/vendor/github.com/moby/moby/pkg/signal/signal_unix.go b/vendor/github.com/moby/moby/pkg/signal/signal_unix.go new file mode 100644 index 000000000..5d058fd56 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/signal/signal_unix.go @@ -0,0 +1,21 @@ +// +build !windows + +package signal + +import ( + "syscall" +) + +// Signals used in cli/command (no windows equivalent, use +// invalid signals so they don't get handled) + +const ( + // SIGCHLD is a signal sent to a process when a child process terminates, is interrupted, or resumes after being interrupted. + SIGCHLD = syscall.SIGCHLD + // SIGWINCH is a signal sent to a process when its controlling terminal changes its size + SIGWINCH = syscall.SIGWINCH + // SIGPIPE is a signal sent to a process when a pipe is written to before the other end is open for reading + SIGPIPE = syscall.SIGPIPE + // DefaultStopSignal is the syscall signal used to stop a container in unix systems. + DefaultStopSignal = "SIGTERM" +) diff --git a/vendor/github.com/moby/moby/pkg/signal/signal_unsupported.go b/vendor/github.com/moby/moby/pkg/signal/signal_unsupported.go new file mode 100644 index 000000000..c592d37df --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/signal/signal_unsupported.go @@ -0,0 +1,10 @@ +// +build !linux,!darwin,!freebsd,!windows,!solaris + +package signal + +import ( + "syscall" +) + +// SignalMap is an empty map of signals for unsupported platform. +var SignalMap = map[string]syscall.Signal{} diff --git a/vendor/github.com/moby/moby/pkg/signal/signal_windows.go b/vendor/github.com/moby/moby/pkg/signal/signal_windows.go new file mode 100644 index 000000000..440f2700e --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/signal/signal_windows.go @@ -0,0 +1,28 @@ +// +build windows + +package signal + +import ( + "syscall" +) + +// Signals used in cli/command (no windows equivalent, use +// invalid signals so they don't get handled) +const ( + SIGCHLD = syscall.Signal(0xff) + SIGWINCH = syscall.Signal(0xff) + SIGPIPE = syscall.Signal(0xff) + // DefaultStopSignal is the syscall signal used to stop a container in windows systems. + DefaultStopSignal = "15" +) + +// SignalMap is a map of "supported" signals. As per the comment in GOLang's +// ztypes_windows.go: "More invented values for signals". Windows doesn't +// really support signals in any way, shape or form that Unix does. +// +// We have these so that docker kill can be used to gracefully (TERM) and +// forcibly (KILL) terminate a container on Windows. +var SignalMap = map[string]syscall.Signal{ + "KILL": syscall.SIGKILL, + "TERM": syscall.SIGTERM, +} diff --git a/vendor/github.com/moby/moby/pkg/signal/trap.go b/vendor/github.com/moby/moby/pkg/signal/trap.go new file mode 100644 index 000000000..638a1ab66 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/signal/trap.go @@ -0,0 +1,103 @@ +package signal + +import ( + "fmt" + "os" + gosignal "os/signal" + "path/filepath" + "runtime" + "strings" + "sync/atomic" + "syscall" + "time" + + "github.com/Sirupsen/logrus" + "github.com/pkg/errors" +) + +// Trap sets up a simplified signal "trap", appropriate for common +// behavior expected from a vanilla unix command-line tool in general +// (and the Docker engine in particular). +// +// * If SIGINT or SIGTERM are received, `cleanup` is called, then the process is terminated. +// * If SIGINT or SIGTERM are received 3 times before cleanup is complete, then cleanup is +// skipped and the process is terminated immediately (allows force quit of stuck daemon) +// * A SIGQUIT always causes an exit without cleanup, with a goroutine dump preceding exit. +// * Ignore SIGPIPE events. These are generated by systemd when journald is restarted while +// the docker daemon is not restarted and also running under systemd. +// Fixes https://github.com/docker/docker/issues/19728 +// +func Trap(cleanup func()) { + c := make(chan os.Signal, 1) + // we will handle INT, TERM, QUIT, SIGPIPE here + signals := []os.Signal{os.Interrupt, syscall.SIGTERM, syscall.SIGQUIT, syscall.SIGPIPE} + gosignal.Notify(c, signals...) + go func() { + interruptCount := uint32(0) + for sig := range c { + if sig == syscall.SIGPIPE { + continue + } + + go func(sig os.Signal) { + logrus.Infof("Processing signal '%v'", sig) + switch sig { + case os.Interrupt, syscall.SIGTERM: + if atomic.LoadUint32(&interruptCount) < 3 { + // Initiate the cleanup only once + if atomic.AddUint32(&interruptCount, 1) == 1 { + // Call the provided cleanup handler + cleanup() + os.Exit(0) + } else { + return + } + } else { + // 3 SIGTERM/INT signals received; force exit without cleanup + logrus.Info("Forcing docker daemon shutdown without cleanup; 3 interrupts received") + } + case syscall.SIGQUIT: + DumpStacks("") + logrus.Info("Forcing docker daemon shutdown without cleanup on SIGQUIT") + } + //for the SIGINT/TERM, and SIGQUIT non-clean shutdown case, exit with 128 + signal # + os.Exit(128 + int(sig.(syscall.Signal))) + }(sig) + } + }() +} + +const stacksLogNameTemplate = "goroutine-stacks-%s.log" + +// DumpStacks appends the runtime stack into file in dir and returns full path +// to that file. +func DumpStacks(dir string) (string, error) { + var ( + buf []byte + stackSize int + ) + bufferLen := 16384 + for stackSize == len(buf) { + buf = make([]byte, bufferLen) + stackSize = runtime.Stack(buf, true) + bufferLen *= 2 + } + buf = buf[:stackSize] + var f *os.File + if dir != "" { + path := filepath.Join(dir, fmt.Sprintf(stacksLogNameTemplate, strings.Replace(time.Now().Format(time.RFC3339), ":", "", -1))) + var err error + f, err = os.OpenFile(path, os.O_CREATE|os.O_WRONLY, 0666) + if err != nil { + return "", errors.Wrap(err, "failed to open file to write the goroutine stacks") + } + defer f.Close() + defer f.Sync() + } else { + f = os.Stderr + } + if _, err := f.Write(buf); err != nil { + return "", errors.Wrap(err, "failed to write goroutine stacks") + } + return f.Name(), nil +} diff --git a/vendor/github.com/moby/moby/pkg/stdcopy/stdcopy.go b/vendor/github.com/moby/moby/pkg/stdcopy/stdcopy.go new file mode 100644 index 000000000..a018a203f --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/stdcopy/stdcopy.go @@ -0,0 +1,190 @@ +package stdcopy + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "io" + "sync" +) + +// StdType is the type of standard stream +// a writer can multiplex to. +type StdType byte + +const ( + // Stdin represents standard input stream type. + Stdin StdType = iota + // Stdout represents standard output stream type. + Stdout + // Stderr represents standard error steam type. + Stderr + // Systemerr represents errors originating from the system that make it + // into the the multiplexed stream. + Systemerr + + stdWriterPrefixLen = 8 + stdWriterFdIndex = 0 + stdWriterSizeIndex = 4 + + startingBufLen = 32*1024 + stdWriterPrefixLen + 1 +) + +var bufPool = &sync.Pool{New: func() interface{} { return bytes.NewBuffer(nil) }} + +// stdWriter is wrapper of io.Writer with extra customized info. +type stdWriter struct { + io.Writer + prefix byte +} + +// Write sends the buffer to the underneath writer. +// It inserts the prefix header before the buffer, +// so stdcopy.StdCopy knows where to multiplex the output. +// It makes stdWriter to implement io.Writer. +func (w *stdWriter) Write(p []byte) (n int, err error) { + if w == nil || w.Writer == nil { + return 0, errors.New("Writer not instantiated") + } + if p == nil { + return 0, nil + } + + header := [stdWriterPrefixLen]byte{stdWriterFdIndex: w.prefix} + binary.BigEndian.PutUint32(header[stdWriterSizeIndex:], uint32(len(p))) + buf := bufPool.Get().(*bytes.Buffer) + buf.Write(header[:]) + buf.Write(p) + + n, err = w.Writer.Write(buf.Bytes()) + n -= stdWriterPrefixLen + if n < 0 { + n = 0 + } + + buf.Reset() + bufPool.Put(buf) + return +} + +// NewStdWriter instantiates a new Writer. +// Everything written to it will be encapsulated using a custom format, +// and written to the underlying `w` stream. +// This allows multiple write streams (e.g. stdout and stderr) to be muxed into a single connection. +// `t` indicates the id of the stream to encapsulate. +// It can be stdcopy.Stdin, stdcopy.Stdout, stdcopy.Stderr. +func NewStdWriter(w io.Writer, t StdType) io.Writer { + return &stdWriter{ + Writer: w, + prefix: byte(t), + } +} + +// StdCopy is a modified version of io.Copy. +// +// StdCopy will demultiplex `src`, assuming that it contains two streams, +// previously multiplexed together using a StdWriter instance. +// As it reads from `src`, StdCopy will write to `dstout` and `dsterr`. +// +// StdCopy will read until it hits EOF on `src`. It will then return a nil error. +// In other words: if `err` is non nil, it indicates a real underlying error. +// +// `written` will hold the total number of bytes written to `dstout` and `dsterr`. +func StdCopy(dstout, dsterr io.Writer, src io.Reader) (written int64, err error) { + var ( + buf = make([]byte, startingBufLen) + bufLen = len(buf) + nr, nw int + er, ew error + out io.Writer + frameSize int + ) + + for { + // Make sure we have at least a full header + for nr < stdWriterPrefixLen { + var nr2 int + nr2, er = src.Read(buf[nr:]) + nr += nr2 + if er == io.EOF { + if nr < stdWriterPrefixLen { + return written, nil + } + break + } + if er != nil { + return 0, er + } + } + + stream := StdType(buf[stdWriterFdIndex]) + // Check the first byte to know where to write + switch stream { + case Stdin: + fallthrough + case Stdout: + // Write on stdout + out = dstout + case Stderr: + // Write on stderr + out = dsterr + case Systemerr: + // If we're on Systemerr, we won't write anywhere. + // NB: if this code changes later, make sure you don't try to write + // to outstream if Systemerr is the stream + out = nil + default: + return 0, fmt.Errorf("Unrecognized input header: %d", buf[stdWriterFdIndex]) + } + + // Retrieve the size of the frame + frameSize = int(binary.BigEndian.Uint32(buf[stdWriterSizeIndex : stdWriterSizeIndex+4])) + + // Check if the buffer is big enough to read the frame. + // Extend it if necessary. + if frameSize+stdWriterPrefixLen > bufLen { + buf = append(buf, make([]byte, frameSize+stdWriterPrefixLen-bufLen+1)...) + bufLen = len(buf) + } + + // While the amount of bytes read is less than the size of the frame + header, we keep reading + for nr < frameSize+stdWriterPrefixLen { + var nr2 int + nr2, er = src.Read(buf[nr:]) + nr += nr2 + if er == io.EOF { + if nr < frameSize+stdWriterPrefixLen { + return written, nil + } + break + } + if er != nil { + return 0, er + } + } + + // we might have an error from the source mixed up in our multiplexed + // stream. if we do, return it. + if stream == Systemerr { + return written, fmt.Errorf("error from daemon in stream: %s", string(buf[stdWriterPrefixLen:frameSize+stdWriterPrefixLen])) + } + + // Write the retrieved frame (without header) + nw, ew = out.Write(buf[stdWriterPrefixLen : frameSize+stdWriterPrefixLen]) + if ew != nil { + return 0, ew + } + + // If the frame has not been fully written: error + if nw != frameSize { + return 0, io.ErrShortWrite + } + written += int64(nw) + + // Move the rest of the buffer to the beginning + copy(buf, buf[frameSize+stdWriterPrefixLen:]) + // Move the index + nr -= frameSize + stdWriterPrefixLen + } +} diff --git a/vendor/github.com/moby/moby/pkg/stdcopy/stdcopy_test.go b/vendor/github.com/moby/moby/pkg/stdcopy/stdcopy_test.go new file mode 100644 index 000000000..3f992fda6 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/stdcopy/stdcopy_test.go @@ -0,0 +1,289 @@ +package stdcopy + +import ( + "bytes" + "errors" + "io" + "io/ioutil" + "strings" + "testing" +) + +func TestNewStdWriter(t *testing.T) { + writer := NewStdWriter(ioutil.Discard, Stdout) + if writer == nil { + t.Fatalf("NewStdWriter with an invalid StdType should not return nil.") + } +} + +func TestWriteWithUninitializedStdWriter(t *testing.T) { + writer := stdWriter{ + Writer: nil, + prefix: byte(Stdout), + } + n, err := writer.Write([]byte("Something here")) + if n != 0 || err == nil { + t.Fatalf("Should fail when given an uncomplete or uninitialized StdWriter") + } +} + +func TestWriteWithNilBytes(t *testing.T) { + writer := NewStdWriter(ioutil.Discard, Stdout) + n, err := writer.Write(nil) + if err != nil { + t.Fatalf("Shouldn't have fail when given no data") + } + if n > 0 { + t.Fatalf("Write should have written 0 byte, but has written %d", n) + } +} + +func TestWrite(t *testing.T) { + writer := NewStdWriter(ioutil.Discard, Stdout) + data := []byte("Test StdWrite.Write") + n, err := writer.Write(data) + if err != nil { + t.Fatalf("Error while writing with StdWrite") + } + if n != len(data) { + t.Fatalf("Write should have written %d byte but wrote %d.", len(data), n) + } +} + +type errWriter struct { + n int + err error +} + +func (f *errWriter) Write(buf []byte) (int, error) { + return f.n, f.err +} + +func TestWriteWithWriterError(t *testing.T) { + expectedError := errors.New("expected") + expectedReturnedBytes := 10 + writer := NewStdWriter(&errWriter{ + n: stdWriterPrefixLen + expectedReturnedBytes, + err: expectedError}, Stdout) + data := []byte("This won't get written, sigh") + n, err := writer.Write(data) + if err != expectedError { + t.Fatalf("Didn't get expected error.") + } + if n != expectedReturnedBytes { + t.Fatalf("Didn't get expected written bytes %d, got %d.", + expectedReturnedBytes, n) + } +} + +func TestWriteDoesNotReturnNegativeWrittenBytes(t *testing.T) { + writer := NewStdWriter(&errWriter{n: -1}, Stdout) + data := []byte("This won't get written, sigh") + actual, _ := writer.Write(data) + if actual != 0 { + t.Fatalf("Expected returned written bytes equal to 0, got %d", actual) + } +} + +func getSrcBuffer(stdOutBytes, stdErrBytes []byte) (buffer *bytes.Buffer, err error) { + buffer = new(bytes.Buffer) + dstOut := NewStdWriter(buffer, Stdout) + _, err = dstOut.Write(stdOutBytes) + if err != nil { + return + } + dstErr := NewStdWriter(buffer, Stderr) + _, err = dstErr.Write(stdErrBytes) + return +} + +func TestStdCopyWriteAndRead(t *testing.T) { + stdOutBytes := []byte(strings.Repeat("o", startingBufLen)) + stdErrBytes := []byte(strings.Repeat("e", startingBufLen)) + buffer, err := getSrcBuffer(stdOutBytes, stdErrBytes) + if err != nil { + t.Fatal(err) + } + written, err := StdCopy(ioutil.Discard, ioutil.Discard, buffer) + if err != nil { + t.Fatal(err) + } + expectedTotalWritten := len(stdOutBytes) + len(stdErrBytes) + if written != int64(expectedTotalWritten) { + t.Fatalf("Expected to have total of %d bytes written, got %d", expectedTotalWritten, written) + } +} + +type customReader struct { + n int + err error + totalCalls int + correctCalls int + src *bytes.Buffer +} + +func (f *customReader) Read(buf []byte) (int, error) { + f.totalCalls++ + if f.totalCalls <= f.correctCalls { + return f.src.Read(buf) + } + return f.n, f.err +} + +func TestStdCopyReturnsErrorReadingHeader(t *testing.T) { + expectedError := errors.New("error") + reader := &customReader{ + err: expectedError} + written, err := StdCopy(ioutil.Discard, ioutil.Discard, reader) + if written != 0 { + t.Fatalf("Expected 0 bytes read, got %d", written) + } + if err != expectedError { + t.Fatalf("Didn't get expected error") + } +} + +func TestStdCopyReturnsErrorReadingFrame(t *testing.T) { + expectedError := errors.New("error") + stdOutBytes := []byte(strings.Repeat("o", startingBufLen)) + stdErrBytes := []byte(strings.Repeat("e", startingBufLen)) + buffer, err := getSrcBuffer(stdOutBytes, stdErrBytes) + if err != nil { + t.Fatal(err) + } + reader := &customReader{ + correctCalls: 1, + n: stdWriterPrefixLen + 1, + err: expectedError, + src: buffer} + written, err := StdCopy(ioutil.Discard, ioutil.Discard, reader) + if written != 0 { + t.Fatalf("Expected 0 bytes read, got %d", written) + } + if err != expectedError { + t.Fatalf("Didn't get expected error") + } +} + +func TestStdCopyDetectsCorruptedFrame(t *testing.T) { + stdOutBytes := []byte(strings.Repeat("o", startingBufLen)) + stdErrBytes := []byte(strings.Repeat("e", startingBufLen)) + buffer, err := getSrcBuffer(stdOutBytes, stdErrBytes) + if err != nil { + t.Fatal(err) + } + reader := &customReader{ + correctCalls: 1, + n: stdWriterPrefixLen + 1, + err: io.EOF, + src: buffer} + written, err := StdCopy(ioutil.Discard, ioutil.Discard, reader) + if written != startingBufLen { + t.Fatalf("Expected %d bytes read, got %d", startingBufLen, written) + } + if err != nil { + t.Fatal("Didn't get nil error") + } +} + +func TestStdCopyWithInvalidInputHeader(t *testing.T) { + dstOut := NewStdWriter(ioutil.Discard, Stdout) + dstErr := NewStdWriter(ioutil.Discard, Stderr) + src := strings.NewReader("Invalid input") + _, err := StdCopy(dstOut, dstErr, src) + if err == nil { + t.Fatal("StdCopy with invalid input header should fail.") + } +} + +func TestStdCopyWithCorruptedPrefix(t *testing.T) { + data := []byte{0x01, 0x02, 0x03} + src := bytes.NewReader(data) + written, err := StdCopy(nil, nil, src) + if err != nil { + t.Fatalf("StdCopy should not return an error with corrupted prefix.") + } + if written != 0 { + t.Fatalf("StdCopy should have written 0, but has written %d", written) + } +} + +func TestStdCopyReturnsWriteErrors(t *testing.T) { + stdOutBytes := []byte(strings.Repeat("o", startingBufLen)) + stdErrBytes := []byte(strings.Repeat("e", startingBufLen)) + buffer, err := getSrcBuffer(stdOutBytes, stdErrBytes) + if err != nil { + t.Fatal(err) + } + expectedError := errors.New("expected") + + dstOut := &errWriter{err: expectedError} + + written, err := StdCopy(dstOut, ioutil.Discard, buffer) + if written != 0 { + t.Fatalf("StdCopy should have written 0, but has written %d", written) + } + if err != expectedError { + t.Fatalf("Didn't get expected error, got %v", err) + } +} + +func TestStdCopyDetectsNotFullyWrittenFrames(t *testing.T) { + stdOutBytes := []byte(strings.Repeat("o", startingBufLen)) + stdErrBytes := []byte(strings.Repeat("e", startingBufLen)) + buffer, err := getSrcBuffer(stdOutBytes, stdErrBytes) + if err != nil { + t.Fatal(err) + } + dstOut := &errWriter{n: startingBufLen - 10} + + written, err := StdCopy(dstOut, ioutil.Discard, buffer) + if written != 0 { + t.Fatalf("StdCopy should have return 0 written bytes, but returned %d", written) + } + if err != io.ErrShortWrite { + t.Fatalf("Didn't get expected io.ErrShortWrite error") + } +} + +// TestStdCopyReturnsErrorFromSystem tests that StdCopy correctly returns an +// error, when that error is muxed into the Systemerr stream. +func TestStdCopyReturnsErrorFromSystem(t *testing.T) { + // write in the basic messages, just so there's some fluff in there + stdOutBytes := []byte(strings.Repeat("o", startingBufLen)) + stdErrBytes := []byte(strings.Repeat("e", startingBufLen)) + buffer, err := getSrcBuffer(stdOutBytes, stdErrBytes) + if err != nil { + t.Fatal(err) + } + // add in an error message on the Systemerr stream + systemErrBytes := []byte(strings.Repeat("S", startingBufLen)) + systemWriter := NewStdWriter(buffer, Systemerr) + _, err = systemWriter.Write(systemErrBytes) + if err != nil { + t.Fatal(err) + } + + // now copy and demux. we should expect an error containing the string we + // wrote out + _, err = StdCopy(ioutil.Discard, ioutil.Discard, buffer) + if err == nil { + t.Fatal("expected error, got none") + } + if !strings.Contains(err.Error(), string(systemErrBytes)) { + t.Fatal("expected error to contain message") + } +} + +func BenchmarkWrite(b *testing.B) { + w := NewStdWriter(ioutil.Discard, Stdout) + data := []byte("Test line for testing stdwriter performance\n") + data = bytes.Repeat(data, 100) + b.SetBytes(int64(len(data))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + if _, err := w.Write(data); err != nil { + b.Fatal(err) + } + } +} diff --git a/vendor/github.com/moby/moby/pkg/streamformatter/streamformatter.go b/vendor/github.com/moby/moby/pkg/streamformatter/streamformatter.go new file mode 100644 index 000000000..c4f55755e --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/streamformatter/streamformatter.go @@ -0,0 +1,159 @@ +// Package streamformatter provides helper functions to format a stream. +package streamformatter + +import ( + "encoding/json" + "fmt" + "io" + + "github.com/docker/docker/pkg/jsonmessage" + "github.com/docker/docker/pkg/progress" +) + +const streamNewline = "\r\n" + +type jsonProgressFormatter struct{} + +func appendNewline(source []byte) []byte { + return append(source, []byte(streamNewline)...) +} + +// FormatStatus formats the specified objects according to the specified format (and id). +func FormatStatus(id, format string, a ...interface{}) []byte { + str := fmt.Sprintf(format, a...) + b, err := json.Marshal(&jsonmessage.JSONMessage{ID: id, Status: str}) + if err != nil { + return FormatError(err) + } + return appendNewline(b) +} + +// FormatError formats the error as a JSON object +func FormatError(err error) []byte { + jsonError, ok := err.(*jsonmessage.JSONError) + if !ok { + jsonError = &jsonmessage.JSONError{Message: err.Error()} + } + if b, err := json.Marshal(&jsonmessage.JSONMessage{Error: jsonError, ErrorMessage: err.Error()}); err == nil { + return appendNewline(b) + } + return []byte(`{"error":"format error"}` + streamNewline) +} + +func (sf *jsonProgressFormatter) formatStatus(id, format string, a ...interface{}) []byte { + return FormatStatus(id, format, a...) +} + +// formatProgress formats the progress information for a specified action. +func (sf *jsonProgressFormatter) formatProgress(id, action string, progress *jsonmessage.JSONProgress, aux interface{}) []byte { + if progress == nil { + progress = &jsonmessage.JSONProgress{} + } + var auxJSON *json.RawMessage + if aux != nil { + auxJSONBytes, err := json.Marshal(aux) + if err != nil { + return nil + } + auxJSON = new(json.RawMessage) + *auxJSON = auxJSONBytes + } + b, err := json.Marshal(&jsonmessage.JSONMessage{ + Status: action, + ProgressMessage: progress.String(), + Progress: progress, + ID: id, + Aux: auxJSON, + }) + if err != nil { + return nil + } + return appendNewline(b) +} + +type rawProgressFormatter struct{} + +func (sf *rawProgressFormatter) formatStatus(id, format string, a ...interface{}) []byte { + return []byte(fmt.Sprintf(format, a...) + streamNewline) +} + +func (sf *rawProgressFormatter) formatProgress(id, action string, progress *jsonmessage.JSONProgress, aux interface{}) []byte { + if progress == nil { + progress = &jsonmessage.JSONProgress{} + } + endl := "\r" + if progress.String() == "" { + endl += "\n" + } + return []byte(action + " " + progress.String() + endl) +} + +// NewProgressOutput returns a progress.Output object that can be passed to +// progress.NewProgressReader. +func NewProgressOutput(out io.Writer) progress.Output { + return &progressOutput{sf: &rawProgressFormatter{}, out: out, newLines: true} +} + +// NewJSONProgressOutput returns a progress.Output that that formats output +// using JSON objects +func NewJSONProgressOutput(out io.Writer, newLines bool) progress.Output { + return &progressOutput{sf: &jsonProgressFormatter{}, out: out, newLines: newLines} +} + +type formatProgress interface { + formatStatus(id, format string, a ...interface{}) []byte + formatProgress(id, action string, progress *jsonmessage.JSONProgress, aux interface{}) []byte +} + +type progressOutput struct { + sf formatProgress + out io.Writer + newLines bool +} + +// WriteProgress formats progress information from a ProgressReader. +func (out *progressOutput) WriteProgress(prog progress.Progress) error { + var formatted []byte + if prog.Message != "" { + formatted = out.sf.formatStatus(prog.ID, prog.Message) + } else { + jsonProgress := jsonmessage.JSONProgress{Current: prog.Current, Total: prog.Total, HideCounts: prog.HideCounts, Units: prog.Units} + formatted = out.sf.formatProgress(prog.ID, prog.Action, &jsonProgress, prog.Aux) + } + _, err := out.out.Write(formatted) + if err != nil { + return err + } + + if out.newLines && prog.LastUpdate { + _, err = out.out.Write(out.sf.formatStatus("", "")) + return err + } + + return nil +} + +// AuxFormatter is a streamFormatter that writes aux progress messages +type AuxFormatter struct { + io.Writer +} + +// Emit emits the given interface as an aux progress message +func (sf *AuxFormatter) Emit(aux interface{}) error { + auxJSONBytes, err := json.Marshal(aux) + if err != nil { + return err + } + auxJSON := new(json.RawMessage) + *auxJSON = auxJSONBytes + msgJSON, err := json.Marshal(&jsonmessage.JSONMessage{Aux: auxJSON}) + if err != nil { + return err + } + msgJSON = appendNewline(msgJSON) + n, err := sf.Writer.Write(msgJSON) + if n != len(msgJSON) { + return io.ErrShortWrite + } + return err +} diff --git a/vendor/github.com/moby/moby/pkg/streamformatter/streamformatter_test.go b/vendor/github.com/moby/moby/pkg/streamformatter/streamformatter_test.go new file mode 100644 index 000000000..c5c70d7e1 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/streamformatter/streamformatter_test.go @@ -0,0 +1,109 @@ +package streamformatter + +import ( + "bytes" + "encoding/json" + "errors" + "strings" + "testing" + + "github.com/docker/docker/pkg/jsonmessage" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestRawProgressFormatterFormatStatus(t *testing.T) { + sf := rawProgressFormatter{} + res := sf.formatStatus("ID", "%s%d", "a", 1) + assert.Equal(t, "a1\r\n", string(res)) +} + +func TestRawProgressFormatterFormatProgress(t *testing.T) { + sf := rawProgressFormatter{} + jsonProgress := &jsonmessage.JSONProgress{ + Current: 15, + Total: 30, + Start: 1, + } + res := sf.formatProgress("id", "action", jsonProgress, nil) + out := string(res) + assert.True(t, strings.HasPrefix(out, "action [====")) + assert.Contains(t, out, "15B/30B") + assert.True(t, strings.HasSuffix(out, "\r")) +} + +func TestFormatStatus(t *testing.T) { + res := FormatStatus("ID", "%s%d", "a", 1) + expected := `{"status":"a1","id":"ID"}` + streamNewline + assert.Equal(t, expected, string(res)) +} + +func TestFormatError(t *testing.T) { + res := FormatError(errors.New("Error for formatter")) + expected := `{"errorDetail":{"message":"Error for formatter"},"error":"Error for formatter"}` + "\r\n" + assert.Equal(t, expected, string(res)) +} + +func TestFormatJSONError(t *testing.T) { + err := &jsonmessage.JSONError{Code: 50, Message: "Json error"} + res := FormatError(err) + expected := `{"errorDetail":{"code":50,"message":"Json error"},"error":"Json error"}` + streamNewline + assert.Equal(t, expected, string(res)) +} + +func TestJsonProgressFormatterFormatProgress(t *testing.T) { + sf := &jsonProgressFormatter{} + jsonProgress := &jsonmessage.JSONProgress{ + Current: 15, + Total: 30, + Start: 1, + } + res := sf.formatProgress("id", "action", jsonProgress, &AuxFormatter{Writer: &bytes.Buffer{}}) + msg := &jsonmessage.JSONMessage{} + + require.NoError(t, json.Unmarshal(res, msg)) + assert.Equal(t, "id", msg.ID) + assert.Equal(t, "action", msg.Status) + + // jsonProgress will always be in the format of: + // [=========================> ] 15B/30B 412910h51m30s + // The last entry '404933h7m11s' is the timeLeftBox. + // However, the timeLeftBox field may change as jsonProgress.String() depends on time.Now(). + // Therefore, we have to strip the timeLeftBox from the strings to do the comparison. + + // Compare the jsonProgress strings before the timeLeftBox + expectedProgress := "[=========================> ] 15B/30B" + // if terminal column is <= 110, expectedProgressShort is expected. + expectedProgressShort := " 15B/30B" + if !(strings.HasPrefix(msg.ProgressMessage, expectedProgress) || + strings.HasPrefix(msg.ProgressMessage, expectedProgressShort)) { + t.Fatalf("ProgressMessage without the timeLeftBox must be %s or %s, got: %s", + expectedProgress, expectedProgressShort, msg.ProgressMessage) + } + + assert.Equal(t, jsonProgress, msg.Progress) +} + +func TestJsonProgressFormatterFormatStatus(t *testing.T) { + sf := jsonProgressFormatter{} + res := sf.formatStatus("ID", "%s%d", "a", 1) + assert.Equal(t, `{"status":"a1","id":"ID"}`+streamNewline, string(res)) +} + +func TestNewJSONProgressOutput(t *testing.T) { + b := bytes.Buffer{} + b.Write(FormatStatus("id", "Downloading")) + _ = NewJSONProgressOutput(&b, false) + assert.Equal(t, `{"status":"Downloading","id":"id"}`+streamNewline, b.String()) +} + +func TestAuxFormatterEmit(t *testing.T) { + b := bytes.Buffer{} + aux := &AuxFormatter{Writer: &b} + sampleAux := &struct { + Data string + }{"Additional data"} + err := aux.Emit(sampleAux) + require.NoError(t, err) + assert.Equal(t, `{"aux":{"Data":"Additional data"}}`+streamNewline, b.String()) +} diff --git a/vendor/github.com/moby/moby/pkg/streamformatter/streamwriter.go b/vendor/github.com/moby/moby/pkg/streamformatter/streamwriter.go new file mode 100644 index 000000000..141d12e20 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/streamformatter/streamwriter.go @@ -0,0 +1,47 @@ +package streamformatter + +import ( + "encoding/json" + "io" + + "github.com/docker/docker/pkg/jsonmessage" +) + +type streamWriter struct { + io.Writer + lineFormat func([]byte) string +} + +func (sw *streamWriter) Write(buf []byte) (int, error) { + formattedBuf := sw.format(buf) + n, err := sw.Writer.Write(formattedBuf) + if n != len(formattedBuf) { + return n, io.ErrShortWrite + } + return len(buf), err +} + +func (sw *streamWriter) format(buf []byte) []byte { + msg := &jsonmessage.JSONMessage{Stream: sw.lineFormat(buf)} + b, err := json.Marshal(msg) + if err != nil { + return FormatError(err) + } + return appendNewline(b) +} + +// NewStdoutWriter returns a writer which formats the output as json message +// representing stdout lines +func NewStdoutWriter(out io.Writer) io.Writer { + return &streamWriter{Writer: out, lineFormat: func(buf []byte) string { + return string(buf) + }} +} + +// NewStderrWriter returns a writer which formats the output as json message +// representing stderr lines +func NewStderrWriter(out io.Writer) io.Writer { + return &streamWriter{Writer: out, lineFormat: func(buf []byte) string { + return "\033[91m" + string(buf) + "\033[0m" + }} +} diff --git a/vendor/github.com/moby/moby/pkg/streamformatter/streamwriter_test.go b/vendor/github.com/moby/moby/pkg/streamformatter/streamwriter_test.go new file mode 100644 index 000000000..4935cc595 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/streamformatter/streamwriter_test.go @@ -0,0 +1,35 @@ +package streamformatter + +import ( + "testing" + + "bytes" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestStreamWriterStdout(t *testing.T) { + buffer := &bytes.Buffer{} + content := "content" + sw := NewStdoutWriter(buffer) + size, err := sw.Write([]byte(content)) + + require.NoError(t, err) + assert.Equal(t, len(content), size) + + expected := `{"stream":"content"}` + streamNewline + assert.Equal(t, expected, buffer.String()) +} + +func TestStreamWriterStderr(t *testing.T) { + buffer := &bytes.Buffer{} + content := "content" + sw := NewStderrWriter(buffer) + size, err := sw.Write([]byte(content)) + + require.NoError(t, err) + assert.Equal(t, len(content), size) + + expected := `{"stream":"\u001b[91mcontent\u001b[0m"}` + streamNewline + assert.Equal(t, expected, buffer.String()) +} diff --git a/vendor/github.com/moby/moby/pkg/stringid/README.md b/vendor/github.com/moby/moby/pkg/stringid/README.md new file mode 100644 index 000000000..37a5098fd --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/stringid/README.md @@ -0,0 +1 @@ +This package provides helper functions for dealing with string identifiers diff --git a/vendor/github.com/moby/moby/pkg/stringid/stringid.go b/vendor/github.com/moby/moby/pkg/stringid/stringid.go new file mode 100644 index 000000000..a0c7c42a0 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/stringid/stringid.go @@ -0,0 +1,99 @@ +// Package stringid provides helper functions for dealing with string identifiers +package stringid + +import ( + cryptorand "crypto/rand" + "encoding/hex" + "fmt" + "io" + "math" + "math/big" + "math/rand" + "regexp" + "strconv" + "strings" + "time" +) + +const shortLen = 12 + +var ( + validShortID = regexp.MustCompile("^[a-f0-9]{12}$") + validHex = regexp.MustCompile(`^[a-f0-9]{64}$`) +) + +// IsShortID determines if an arbitrary string *looks like* a short ID. +func IsShortID(id string) bool { + return validShortID.MatchString(id) +} + +// TruncateID returns a shorthand version of a string identifier for convenience. +// A collision with other shorthands is very unlikely, but possible. +// In case of a collision a lookup with TruncIndex.Get() will fail, and the caller +// will need to use a longer prefix, or the full-length Id. +func TruncateID(id string) string { + if i := strings.IndexRune(id, ':'); i >= 0 { + id = id[i+1:] + } + if len(id) > shortLen { + id = id[:shortLen] + } + return id +} + +func generateID(r io.Reader) string { + b := make([]byte, 32) + for { + if _, err := io.ReadFull(r, b); err != nil { + panic(err) // This shouldn't happen + } + id := hex.EncodeToString(b) + // if we try to parse the truncated for as an int and we don't have + // an error then the value is all numeric and causes issues when + // used as a hostname. ref #3869 + if _, err := strconv.ParseInt(TruncateID(id), 10, 64); err == nil { + continue + } + return id + } +} + +// GenerateRandomID returns a unique id. +func GenerateRandomID() string { + return generateID(cryptorand.Reader) +} + +// GenerateNonCryptoID generates unique id without using cryptographically +// secure sources of random. +// It helps you to save entropy. +func GenerateNonCryptoID() string { + return generateID(readerFunc(rand.Read)) +} + +// ValidateID checks whether an ID string is a valid image ID. +func ValidateID(id string) error { + if ok := validHex.MatchString(id); !ok { + return fmt.Errorf("image ID %q is invalid", id) + } + return nil +} + +func init() { + // safely set the seed globally so we generate random ids. Tries to use a + // crypto seed before falling back to time. + var seed int64 + if cryptoseed, err := cryptorand.Int(cryptorand.Reader, big.NewInt(math.MaxInt64)); err != nil { + // This should not happen, but worst-case fallback to time-based seed. + seed = time.Now().UnixNano() + } else { + seed = cryptoseed.Int64() + } + + rand.Seed(seed) +} + +type readerFunc func(p []byte) (int, error) + +func (fn readerFunc) Read(p []byte) (int, error) { + return fn(p) +} diff --git a/vendor/github.com/moby/moby/pkg/stringid/stringid_test.go b/vendor/github.com/moby/moby/pkg/stringid/stringid_test.go new file mode 100644 index 000000000..8ff6b4383 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/stringid/stringid_test.go @@ -0,0 +1,72 @@ +package stringid + +import ( + "strings" + "testing" +) + +func TestGenerateRandomID(t *testing.T) { + id := GenerateRandomID() + + if len(id) != 64 { + t.Fatalf("Id returned is incorrect: %s", id) + } +} + +func TestGenerateNonCryptoID(t *testing.T) { + id := GenerateNonCryptoID() + + if len(id) != 64 { + t.Fatalf("Id returned is incorrect: %s", id) + } +} + +func TestShortenId(t *testing.T) { + id := "90435eec5c4e124e741ef731e118be2fc799a68aba0466ec17717f24ce2ae6a2" + truncID := TruncateID(id) + if truncID != "90435eec5c4e" { + t.Fatalf("Id returned is incorrect: truncate on %s returned %s", id, truncID) + } +} + +func TestShortenSha256Id(t *testing.T) { + id := "sha256:4e38e38c8ce0b8d9041a9c4fefe786631d1416225e13b0bfe8cfa2321aec4bba" + truncID := TruncateID(id) + if truncID != "4e38e38c8ce0" { + t.Fatalf("Id returned is incorrect: truncate on %s returned %s", id, truncID) + } +} + +func TestShortenIdEmpty(t *testing.T) { + id := "" + truncID := TruncateID(id) + if len(truncID) > len(id) { + t.Fatalf("Id returned is incorrect: truncate on %s returned %s", id, truncID) + } +} + +func TestShortenIdInvalid(t *testing.T) { + id := "1234" + truncID := TruncateID(id) + if len(truncID) != len(id) { + t.Fatalf("Id returned is incorrect: truncate on %s returned %s", id, truncID) + } +} + +func TestIsShortIDNonHex(t *testing.T) { + id := "some non-hex value" + if IsShortID(id) { + t.Fatalf("%s is not a short ID", id) + } +} + +func TestIsShortIDNotCorrectSize(t *testing.T) { + id := strings.Repeat("a", shortLen+1) + if IsShortID(id) { + t.Fatalf("%s is not a short ID", id) + } + id = strings.Repeat("a", shortLen-1) + if IsShortID(id) { + t.Fatalf("%s is not a short ID", id) + } +} diff --git a/vendor/github.com/moby/moby/pkg/stringutils/README.md b/vendor/github.com/moby/moby/pkg/stringutils/README.md new file mode 100644 index 000000000..b3e454573 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/stringutils/README.md @@ -0,0 +1 @@ +This package provides helper functions for dealing with strings diff --git a/vendor/github.com/moby/moby/pkg/stringutils/stringutils.go b/vendor/github.com/moby/moby/pkg/stringutils/stringutils.go new file mode 100644 index 000000000..8c4c39875 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/stringutils/stringutils.go @@ -0,0 +1,99 @@ +// Package stringutils provides helper functions for dealing with strings. +package stringutils + +import ( + "bytes" + "math/rand" + "strings" +) + +// GenerateRandomAlphaOnlyString generates an alphabetical random string with length n. +func GenerateRandomAlphaOnlyString(n int) string { + // make a really long string + letters := []byte("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ") + b := make([]byte, n) + for i := range b { + b[i] = letters[rand.Intn(len(letters))] + } + return string(b) +} + +// GenerateRandomASCIIString generates an ASCII random string with length n. +func GenerateRandomASCIIString(n int) string { + chars := "abcdefghijklmnopqrstuvwxyz" + + "ABCDEFGHIJKLMNOPQRSTUVWXYZ" + + "~!@#$%^&*()-_+={}[]\\|<,>.?/\"';:` " + res := make([]byte, n) + for i := 0; i < n; i++ { + res[i] = chars[rand.Intn(len(chars))] + } + return string(res) +} + +// Ellipsis truncates a string to fit within maxlen, and appends ellipsis (...). +// For maxlen of 3 and lower, no ellipsis is appended. +func Ellipsis(s string, maxlen int) string { + r := []rune(s) + if len(r) <= maxlen { + return s + } + if maxlen <= 3 { + return string(r[:maxlen]) + } + return string(r[:maxlen-3]) + "..." +} + +// Truncate truncates a string to maxlen. +func Truncate(s string, maxlen int) string { + r := []rune(s) + if len(r) <= maxlen { + return s + } + return string(r[:maxlen]) +} + +// InSlice tests whether a string is contained in a slice of strings or not. +// Comparison is case insensitive +func InSlice(slice []string, s string) bool { + for _, ss := range slice { + if strings.ToLower(s) == strings.ToLower(ss) { + return true + } + } + return false +} + +func quote(word string, buf *bytes.Buffer) { + // Bail out early for "simple" strings + if word != "" && !strings.ContainsAny(word, "\\'\"`${[|&;<>()~*?! \t\n") { + buf.WriteString(word) + return + } + + buf.WriteString("'") + + for i := 0; i < len(word); i++ { + b := word[i] + if b == '\'' { + // Replace literal ' with a close ', a \', and an open ' + buf.WriteString("'\\''") + } else { + buf.WriteByte(b) + } + } + + buf.WriteString("'") +} + +// ShellQuoteArguments takes a list of strings and escapes them so they will be +// handled right when passed as arguments to a program via a shell +func ShellQuoteArguments(args []string) string { + var buf bytes.Buffer + for i, arg := range args { + if i != 0 { + buf.WriteByte(' ') + } + quote(arg, &buf) + } + return buf.String() +} diff --git a/vendor/github.com/moby/moby/pkg/stringutils/stringutils_test.go b/vendor/github.com/moby/moby/pkg/stringutils/stringutils_test.go new file mode 100644 index 000000000..8af2bdcc0 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/stringutils/stringutils_test.go @@ -0,0 +1,121 @@ +package stringutils + +import "testing" + +func testLengthHelper(generator func(int) string, t *testing.T) { + expectedLength := 20 + s := generator(expectedLength) + if len(s) != expectedLength { + t.Fatalf("Length of %s was %d but expected length %d", s, len(s), expectedLength) + } +} + +func testUniquenessHelper(generator func(int) string, t *testing.T) { + repeats := 25 + set := make(map[string]struct{}, repeats) + for i := 0; i < repeats; i = i + 1 { + str := generator(64) + if len(str) != 64 { + t.Fatalf("Id returned is incorrect: %s", str) + } + if _, ok := set[str]; ok { + t.Fatalf("Random number is repeated") + } + set[str] = struct{}{} + } +} + +func isASCII(s string) bool { + for _, c := range s { + if c > 127 { + return false + } + } + return true +} + +func TestGenerateRandomAlphaOnlyStringLength(t *testing.T) { + testLengthHelper(GenerateRandomAlphaOnlyString, t) +} + +func TestGenerateRandomAlphaOnlyStringUniqueness(t *testing.T) { + testUniquenessHelper(GenerateRandomAlphaOnlyString, t) +} + +func TestGenerateRandomAsciiStringLength(t *testing.T) { + testLengthHelper(GenerateRandomASCIIString, t) +} + +func TestGenerateRandomAsciiStringUniqueness(t *testing.T) { + testUniquenessHelper(GenerateRandomASCIIString, t) +} + +func TestGenerateRandomAsciiStringIsAscii(t *testing.T) { + str := GenerateRandomASCIIString(64) + if !isASCII(str) { + t.Fatalf("%s contained non-ascii characters", str) + } +} + +func TestEllipsis(t *testing.T) { + str := "t🐳ststring" + newstr := Ellipsis(str, 3) + if newstr != "t🐳s" { + t.Fatalf("Expected t🐳s, got %s", newstr) + } + newstr = Ellipsis(str, 8) + if newstr != "t🐳sts..." { + t.Fatalf("Expected tests..., got %s", newstr) + } + newstr = Ellipsis(str, 20) + if newstr != "t🐳ststring" { + t.Fatalf("Expected t🐳ststring, got %s", newstr) + } +} + +func TestTruncate(t *testing.T) { + str := "t🐳ststring" + newstr := Truncate(str, 4) + if newstr != "t🐳st" { + t.Fatalf("Expected t🐳st, got %s", newstr) + } + newstr = Truncate(str, 20) + if newstr != "t🐳ststring" { + t.Fatalf("Expected t🐳ststring, got %s", newstr) + } +} + +func TestInSlice(t *testing.T) { + slice := []string{"t🐳st", "in", "slice"} + + test := InSlice(slice, "t🐳st") + if !test { + t.Fatalf("Expected string t🐳st to be in slice") + } + test = InSlice(slice, "SLICE") + if !test { + t.Fatalf("Expected string SLICE to be in slice") + } + test = InSlice(slice, "notinslice") + if test { + t.Fatalf("Expected string notinslice not to be in slice") + } +} + +func TestShellQuoteArgumentsEmpty(t *testing.T) { + actual := ShellQuoteArguments([]string{}) + expected := "" + if actual != expected { + t.Fatalf("Expected an empty string") + } +} + +func TestShellQuoteArguments(t *testing.T) { + simpleString := "simpleString" + complexString := "This is a 'more' complex $tring with some special char *" + actual := ShellQuoteArguments([]string{simpleString, complexString}) + expected := "simpleString 'This is a '\\''more'\\'' complex $tring with some special char *'" + if actual != expected { + t.Fatalf("Expected \"%v\", got \"%v\"", expected, actual) + } +} diff --git a/vendor/github.com/moby/moby/pkg/symlink/LICENSE.APACHE b/vendor/github.com/moby/moby/pkg/symlink/LICENSE.APACHE new file mode 100644 index 000000000..b9fbf3c98 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/symlink/LICENSE.APACHE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2014-2017 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/moby/moby/pkg/symlink/LICENSE.BSD b/vendor/github.com/moby/moby/pkg/symlink/LICENSE.BSD new file mode 100644 index 000000000..4c056c5ed --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/symlink/LICENSE.BSD @@ -0,0 +1,27 @@ +Copyright (c) 2014-2017 The Docker & Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/moby/moby/pkg/symlink/README.md b/vendor/github.com/moby/moby/pkg/symlink/README.md new file mode 100644 index 000000000..8dba54fd0 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/symlink/README.md @@ -0,0 +1,6 @@ +Package symlink implements EvalSymlinksInScope which is an extension of filepath.EvalSymlinks, +as well as a Windows long-path aware version of filepath.EvalSymlinks +from the [Go standard library](https://golang.org/pkg/path/filepath). + +The code from filepath.EvalSymlinks has been adapted in fs.go. +Please read the LICENSE.BSD file that governs fs.go and LICENSE.APACHE for fs_test.go. diff --git a/vendor/github.com/moby/moby/pkg/symlink/fs.go b/vendor/github.com/moby/moby/pkg/symlink/fs.go new file mode 100644 index 000000000..52fb9a691 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/symlink/fs.go @@ -0,0 +1,144 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.BSD file. + +// This code is a modified version of path/filepath/symlink.go from the Go standard library. + +package symlink + +import ( + "bytes" + "errors" + "os" + "path/filepath" + "strings" + + "github.com/docker/docker/pkg/system" +) + +// FollowSymlinkInScope is a wrapper around evalSymlinksInScope that returns an +// absolute path. This function handles paths in a platform-agnostic manner. +func FollowSymlinkInScope(path, root string) (string, error) { + path, err := filepath.Abs(filepath.FromSlash(path)) + if err != nil { + return "", err + } + root, err = filepath.Abs(filepath.FromSlash(root)) + if err != nil { + return "", err + } + return evalSymlinksInScope(path, root) +} + +// evalSymlinksInScope will evaluate symlinks in `path` within a scope `root` and return +// a result guaranteed to be contained within the scope `root`, at the time of the call. +// Symlinks in `root` are not evaluated and left as-is. +// Errors encountered while attempting to evaluate symlinks in path will be returned. +// Non-existing paths are valid and do not constitute an error. +// `path` has to contain `root` as a prefix, or else an error will be returned. +// Trying to break out from `root` does not constitute an error. +// +// Example: +// If /foo/bar -> /outside, +// FollowSymlinkInScope("/foo/bar", "/foo") == "/foo/outside" instead of "/outside" +// +// IMPORTANT: it is the caller's responsibility to call evalSymlinksInScope *after* relevant symlinks +// are created and not to create subsequently, additional symlinks that could potentially make a +// previously-safe path, unsafe. Example: if /foo/bar does not exist, evalSymlinksInScope("/foo/bar", "/foo") +// would return "/foo/bar". If one makes /foo/bar a symlink to /baz subsequently, then "/foo/bar" should +// no longer be considered safely contained in "/foo". +func evalSymlinksInScope(path, root string) (string, error) { + root = filepath.Clean(root) + if path == root { + return path, nil + } + if !strings.HasPrefix(path, root) { + return "", errors.New("evalSymlinksInScope: " + path + " is not in " + root) + } + const maxIter = 255 + originalPath := path + // given root of "/a" and path of "/a/b/../../c" we want path to be "/b/../../c" + path = path[len(root):] + if root == string(filepath.Separator) { + path = string(filepath.Separator) + path + } + if !strings.HasPrefix(path, string(filepath.Separator)) { + return "", errors.New("evalSymlinksInScope: " + path + " is not in " + root) + } + path = filepath.Clean(path) + // consume path by taking each frontmost path element, + // expanding it if it's a symlink, and appending it to b + var b bytes.Buffer + // b here will always be considered to be the "current absolute path inside + // root" when we append paths to it, we also append a slash and use + // filepath.Clean after the loop to trim the trailing slash + for n := 0; path != ""; n++ { + if n > maxIter { + return "", errors.New("evalSymlinksInScope: too many links in " + originalPath) + } + + // find next path component, p + i := strings.IndexRune(path, filepath.Separator) + var p string + if i == -1 { + p, path = path, "" + } else { + p, path = path[:i], path[i+1:] + } + + if p == "" { + continue + } + + // this takes a b.String() like "b/../" and a p like "c" and turns it + // into "/b/../c" which then gets filepath.Cleaned into "/c" and then + // root gets prepended and we Clean again (to remove any trailing slash + // if the first Clean gave us just "/") + cleanP := filepath.Clean(string(filepath.Separator) + b.String() + p) + if isDriveOrRoot(cleanP) { + // never Lstat "/" itself, or drive letters on Windows + b.Reset() + continue + } + fullP := filepath.Clean(root + cleanP) + + fi, err := os.Lstat(fullP) + if os.IsNotExist(err) { + // if p does not exist, accept it + b.WriteString(p) + b.WriteRune(filepath.Separator) + continue + } + if err != nil { + return "", err + } + if fi.Mode()&os.ModeSymlink == 0 { + b.WriteString(p) + b.WriteRune(filepath.Separator) + continue + } + + // it's a symlink, put it at the front of path + dest, err := os.Readlink(fullP) + if err != nil { + return "", err + } + if system.IsAbs(dest) { + b.Reset() + } + path = dest + string(filepath.Separator) + path + } + + // see note above on "fullP := ..." for why this is double-cleaned and + // what's happening here + return filepath.Clean(root + filepath.Clean(string(filepath.Separator)+b.String())), nil +} + +// EvalSymlinks returns the path name after the evaluation of any symbolic +// links. +// If path is relative the result will be relative to the current directory, +// unless one of the components is an absolute symbolic link. +// This version has been updated to support long paths prepended with `\\?\`. +func EvalSymlinks(path string) (string, error) { + return evalSymlinks(path) +} diff --git a/vendor/github.com/moby/moby/pkg/symlink/fs_unix.go b/vendor/github.com/moby/moby/pkg/symlink/fs_unix.go new file mode 100644 index 000000000..22708273d --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/symlink/fs_unix.go @@ -0,0 +1,15 @@ +// +build !windows + +package symlink + +import ( + "path/filepath" +) + +func evalSymlinks(path string) (string, error) { + return filepath.EvalSymlinks(path) +} + +func isDriveOrRoot(p string) bool { + return p == string(filepath.Separator) +} diff --git a/vendor/github.com/moby/moby/pkg/symlink/fs_unix_test.go b/vendor/github.com/moby/moby/pkg/symlink/fs_unix_test.go new file mode 100644 index 000000000..7085c0b66 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/symlink/fs_unix_test.go @@ -0,0 +1,407 @@ +// +build !windows + +// Licensed under the Apache License, Version 2.0; See LICENSE.APACHE + +package symlink + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "testing" +) + +// TODO Windows: This needs some serious work to port to Windows. For now, +// turning off testing in this package. + +type dirOrLink struct { + path string + target string +} + +func makeFs(tmpdir string, fs []dirOrLink) error { + for _, s := range fs { + s.path = filepath.Join(tmpdir, s.path) + if s.target == "" { + os.MkdirAll(s.path, 0755) + continue + } + if err := os.MkdirAll(filepath.Dir(s.path), 0755); err != nil { + return err + } + if err := os.Symlink(s.target, s.path); err != nil && !os.IsExist(err) { + return err + } + } + return nil +} + +func testSymlink(tmpdir, path, expected, scope string) error { + rewrite, err := FollowSymlinkInScope(filepath.Join(tmpdir, path), filepath.Join(tmpdir, scope)) + if err != nil { + return err + } + expected, err = filepath.Abs(filepath.Join(tmpdir, expected)) + if err != nil { + return err + } + if expected != rewrite { + return fmt.Errorf("Expected %q got %q", expected, rewrite) + } + return nil +} + +func TestFollowSymlinkAbsolute(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkAbsolute") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + if err := makeFs(tmpdir, []dirOrLink{{path: "testdata/fs/a/d", target: "/b"}}); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "testdata/fs/a/d/c/data", "testdata/b/c/data", "testdata"); err != nil { + t.Fatal(err) + } +} + +func TestFollowSymlinkRelativePath(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkRelativePath") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + if err := makeFs(tmpdir, []dirOrLink{{path: "testdata/fs/i", target: "a"}}); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "testdata/fs/i", "testdata/fs/a", "testdata"); err != nil { + t.Fatal(err) + } +} + +func TestFollowSymlinkSkipSymlinksOutsideScope(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkSkipSymlinksOutsideScope") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + if err := makeFs(tmpdir, []dirOrLink{ + {path: "linkdir", target: "realdir"}, + {path: "linkdir/foo/bar"}, + }); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "linkdir/foo/bar", "linkdir/foo/bar", "linkdir/foo"); err != nil { + t.Fatal(err) + } +} + +func TestFollowSymlinkInvalidScopePathPair(t *testing.T) { + if _, err := FollowSymlinkInScope("toto", "testdata"); err == nil { + t.Fatal("expected an error") + } +} + +func TestFollowSymlinkLastLink(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkLastLink") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + if err := makeFs(tmpdir, []dirOrLink{{path: "testdata/fs/a/d", target: "/b"}}); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "testdata/fs/a/d", "testdata/b", "testdata"); err != nil { + t.Fatal(err) + } +} + +func TestFollowSymlinkRelativeLinkChangeScope(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkRelativeLinkChangeScope") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + if err := makeFs(tmpdir, []dirOrLink{{path: "testdata/fs/a/e", target: "../b"}}); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "testdata/fs/a/e/c/data", "testdata/fs/b/c/data", "testdata"); err != nil { + t.Fatal(err) + } + // avoid letting allowing symlink e lead us to ../b + // normalize to the "testdata/fs/a" + if err := testSymlink(tmpdir, "testdata/fs/a/e", "testdata/fs/a/b", "testdata/fs/a"); err != nil { + t.Fatal(err) + } +} + +func TestFollowSymlinkDeepRelativeLinkChangeScope(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkDeepRelativeLinkChangeScope") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + + if err := makeFs(tmpdir, []dirOrLink{{path: "testdata/fs/a/f", target: "../../../../test"}}); err != nil { + t.Fatal(err) + } + // avoid letting symlink f lead us out of the "testdata" scope + // we don't normalize because symlink f is in scope and there is no + // information leak + if err := testSymlink(tmpdir, "testdata/fs/a/f", "testdata/test", "testdata"); err != nil { + t.Fatal(err) + } + // avoid letting symlink f lead us out of the "testdata/fs" scope + // we don't normalize because symlink f is in scope and there is no + // information leak + if err := testSymlink(tmpdir, "testdata/fs/a/f", "testdata/fs/test", "testdata/fs"); err != nil { + t.Fatal(err) + } +} + +func TestFollowSymlinkRelativeLinkChain(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkRelativeLinkChain") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + + // avoid letting symlink g (pointed at by symlink h) take out of scope + // TODO: we should probably normalize to scope here because ../[....]/root + // is out of scope and we leak information + if err := makeFs(tmpdir, []dirOrLink{ + {path: "testdata/fs/b/h", target: "../g"}, + {path: "testdata/fs/g", target: "../../../../../../../../../../../../root"}, + }); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "testdata/fs/b/h", "testdata/root", "testdata"); err != nil { + t.Fatal(err) + } +} + +func TestFollowSymlinkBreakoutPath(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkBreakoutPath") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + + // avoid letting symlink -> ../directory/file escape from scope + // normalize to "testdata/fs/j" + if err := makeFs(tmpdir, []dirOrLink{{path: "testdata/fs/j/k", target: "../i/a"}}); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "testdata/fs/j/k", "testdata/fs/j/i/a", "testdata/fs/j"); err != nil { + t.Fatal(err) + } +} + +func TestFollowSymlinkToRoot(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkToRoot") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + + // make sure we don't allow escaping to / + // normalize to dir + if err := makeFs(tmpdir, []dirOrLink{{path: "foo", target: "/"}}); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "foo", "", ""); err != nil { + t.Fatal(err) + } +} + +func TestFollowSymlinkSlashDotdot(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkSlashDotdot") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + tmpdir = filepath.Join(tmpdir, "dir", "subdir") + + // make sure we don't allow escaping to / + // normalize to dir + if err := makeFs(tmpdir, []dirOrLink{{path: "foo", target: "/../../"}}); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "foo", "", ""); err != nil { + t.Fatal(err) + } +} + +func TestFollowSymlinkDotdot(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkDotdot") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + tmpdir = filepath.Join(tmpdir, "dir", "subdir") + + // make sure we stay in scope without leaking information + // this also checks for escaping to / + // normalize to dir + if err := makeFs(tmpdir, []dirOrLink{{path: "foo", target: "../../"}}); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "foo", "", ""); err != nil { + t.Fatal(err) + } +} + +func TestFollowSymlinkRelativePath2(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkRelativePath2") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + + if err := makeFs(tmpdir, []dirOrLink{{path: "bar/foo", target: "baz/target"}}); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "bar/foo", "bar/baz/target", ""); err != nil { + t.Fatal(err) + } +} + +func TestFollowSymlinkScopeLink(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkScopeLink") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + + if err := makeFs(tmpdir, []dirOrLink{ + {path: "root2"}, + {path: "root", target: "root2"}, + {path: "root2/foo", target: "../bar"}, + }); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "root/foo", "root/bar", "root"); err != nil { + t.Fatal(err) + } +} + +func TestFollowSymlinkRootScope(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkRootScope") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + + expected, err := filepath.EvalSymlinks(tmpdir) + if err != nil { + t.Fatal(err) + } + rewrite, err := FollowSymlinkInScope(tmpdir, "/") + if err != nil { + t.Fatal(err) + } + if rewrite != expected { + t.Fatalf("expected %q got %q", expected, rewrite) + } +} + +func TestFollowSymlinkEmpty(t *testing.T) { + res, err := FollowSymlinkInScope("", "") + if err != nil { + t.Fatal(err) + } + wd, err := os.Getwd() + if err != nil { + t.Fatal(err) + } + if res != wd { + t.Fatalf("expected %q got %q", wd, res) + } +} + +func TestFollowSymlinkCircular(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkCircular") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + + if err := makeFs(tmpdir, []dirOrLink{{path: "root/foo", target: "foo"}}); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "root/foo", "", "root"); err == nil { + t.Fatal("expected an error for foo -> foo") + } + + if err := makeFs(tmpdir, []dirOrLink{ + {path: "root/bar", target: "baz"}, + {path: "root/baz", target: "../bak"}, + {path: "root/bak", target: "/bar"}, + }); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "root/foo", "", "root"); err == nil { + t.Fatal("expected an error for bar -> baz -> bak -> bar") + } +} + +func TestFollowSymlinkComplexChainWithTargetPathsContainingLinks(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkComplexChainWithTargetPathsContainingLinks") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + + if err := makeFs(tmpdir, []dirOrLink{ + {path: "root2"}, + {path: "root", target: "root2"}, + {path: "root/a", target: "r/s"}, + {path: "root/r", target: "../root/t"}, + {path: "root/root/t/s/b", target: "/../u"}, + {path: "root/u/c", target: "."}, + {path: "root/u/x/y", target: "../v"}, + {path: "root/u/v", target: "/../w"}, + }); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "root/a/b/c/x/y/z", "root/w/z", "root"); err != nil { + t.Fatal(err) + } +} + +func TestFollowSymlinkBreakoutNonExistent(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkBreakoutNonExistent") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + + if err := makeFs(tmpdir, []dirOrLink{ + {path: "root/slash", target: "/"}, + {path: "root/sym", target: "/idontexist/../slash"}, + }); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "root/sym/file", "root/file", "root"); err != nil { + t.Fatal(err) + } +} + +func TestFollowSymlinkNoLexicalCleaning(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkNoLexicalCleaning") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + + if err := makeFs(tmpdir, []dirOrLink{ + {path: "root/sym", target: "/foo/bar"}, + {path: "root/hello", target: "/sym/../baz"}, + }); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "root/hello", "root/foo/baz", "root"); err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/moby/moby/pkg/symlink/fs_windows.go b/vendor/github.com/moby/moby/pkg/symlink/fs_windows.go new file mode 100644 index 000000000..31523ade9 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/symlink/fs_windows.go @@ -0,0 +1,169 @@ +package symlink + +import ( + "bytes" + "errors" + "os" + "path/filepath" + "strings" + + "github.com/docker/docker/pkg/longpath" + "golang.org/x/sys/windows" +) + +func toShort(path string) (string, error) { + p, err := windows.UTF16FromString(path) + if err != nil { + return "", err + } + b := p // GetShortPathName says we can reuse buffer + n, err := windows.GetShortPathName(&p[0], &b[0], uint32(len(b))) + if err != nil { + return "", err + } + if n > uint32(len(b)) { + b = make([]uint16, n) + if _, err = windows.GetShortPathName(&p[0], &b[0], uint32(len(b))); err != nil { + return "", err + } + } + return windows.UTF16ToString(b), nil +} + +func toLong(path string) (string, error) { + p, err := windows.UTF16FromString(path) + if err != nil { + return "", err + } + b := p // GetLongPathName says we can reuse buffer + n, err := windows.GetLongPathName(&p[0], &b[0], uint32(len(b))) + if err != nil { + return "", err + } + if n > uint32(len(b)) { + b = make([]uint16, n) + n, err = windows.GetLongPathName(&p[0], &b[0], uint32(len(b))) + if err != nil { + return "", err + } + } + b = b[:n] + return windows.UTF16ToString(b), nil +} + +func evalSymlinks(path string) (string, error) { + path, err := walkSymlinks(path) + if err != nil { + return "", err + } + + p, err := toShort(path) + if err != nil { + return "", err + } + p, err = toLong(p) + if err != nil { + return "", err + } + // windows.GetLongPathName does not change the case of the drive letter, + // but the result of EvalSymlinks must be unique, so we have + // EvalSymlinks(`c:\a`) == EvalSymlinks(`C:\a`). + // Make drive letter upper case. + if len(p) >= 2 && p[1] == ':' && 'a' <= p[0] && p[0] <= 'z' { + p = string(p[0]+'A'-'a') + p[1:] + } else if len(p) >= 6 && p[5] == ':' && 'a' <= p[4] && p[4] <= 'z' { + p = p[:3] + string(p[4]+'A'-'a') + p[5:] + } + return filepath.Clean(p), nil +} + +const utf8RuneSelf = 0x80 + +func walkSymlinks(path string) (string, error) { + const maxIter = 255 + originalPath := path + // consume path by taking each frontmost path element, + // expanding it if it's a symlink, and appending it to b + var b bytes.Buffer + for n := 0; path != ""; n++ { + if n > maxIter { + return "", errors.New("EvalSymlinks: too many links in " + originalPath) + } + + // A path beginning with `\\?\` represents the root, so automatically + // skip that part and begin processing the next segment. + if strings.HasPrefix(path, longpath.Prefix) { + b.WriteString(longpath.Prefix) + path = path[4:] + continue + } + + // find next path component, p + var i = -1 + for j, c := range path { + if c < utf8RuneSelf && os.IsPathSeparator(uint8(c)) { + i = j + break + } + } + var p string + if i == -1 { + p, path = path, "" + } else { + p, path = path[:i], path[i+1:] + } + + if p == "" { + if b.Len() == 0 { + // must be absolute path + b.WriteRune(filepath.Separator) + } + continue + } + + // If this is the first segment after the long path prefix, accept the + // current segment as a volume root or UNC share and move on to the next. + if b.String() == longpath.Prefix { + b.WriteString(p) + b.WriteRune(filepath.Separator) + continue + } + + fi, err := os.Lstat(b.String() + p) + if err != nil { + return "", err + } + if fi.Mode()&os.ModeSymlink == 0 { + b.WriteString(p) + if path != "" || (b.Len() == 2 && len(p) == 2 && p[1] == ':') { + b.WriteRune(filepath.Separator) + } + continue + } + + // it's a symlink, put it at the front of path + dest, err := os.Readlink(b.String() + p) + if err != nil { + return "", err + } + if filepath.IsAbs(dest) || os.IsPathSeparator(dest[0]) { + b.Reset() + } + path = dest + string(filepath.Separator) + path + } + return filepath.Clean(b.String()), nil +} + +func isDriveOrRoot(p string) bool { + if p == string(filepath.Separator) { + return true + } + + length := len(p) + if length >= 2 { + if p[length-1] == ':' && (('a' <= p[length-2] && p[length-2] <= 'z') || ('A' <= p[length-2] && p[length-2] <= 'Z')) { + return true + } + } + return false +} diff --git a/vendor/github.com/moby/moby/pkg/sysinfo/README.md b/vendor/github.com/moby/moby/pkg/sysinfo/README.md new file mode 100644 index 000000000..c1530cef0 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/sysinfo/README.md @@ -0,0 +1 @@ +SysInfo stores information about which features a kernel supports. diff --git a/vendor/github.com/moby/moby/pkg/sysinfo/numcpu.go b/vendor/github.com/moby/moby/pkg/sysinfo/numcpu.go new file mode 100644 index 000000000..aeb1a3a80 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/sysinfo/numcpu.go @@ -0,0 +1,12 @@ +// +build !linux,!windows + +package sysinfo + +import ( + "runtime" +) + +// NumCPU returns the number of CPUs +func NumCPU() int { + return runtime.NumCPU() +} diff --git a/vendor/github.com/moby/moby/pkg/sysinfo/numcpu_linux.go b/vendor/github.com/moby/moby/pkg/sysinfo/numcpu_linux.go new file mode 100644 index 000000000..f1d2d9db3 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/sysinfo/numcpu_linux.go @@ -0,0 +1,44 @@ +// +build linux + +package sysinfo + +import ( + "runtime" + "unsafe" + + "golang.org/x/sys/unix" +) + +// numCPU queries the system for the count of threads available +// for use to this process. +// +// Issues two syscalls. +// Returns 0 on errors. Use |runtime.NumCPU| in that case. +func numCPU() int { + // Gets the affinity mask for a process: The very one invoking this function. + pid, _, _ := unix.RawSyscall(unix.SYS_GETPID, 0, 0, 0) + + var mask [1024 / 64]uintptr + _, _, err := unix.RawSyscall(unix.SYS_SCHED_GETAFFINITY, pid, uintptr(len(mask)*8), uintptr(unsafe.Pointer(&mask[0]))) + if err != 0 { + return 0 + } + + // For every available thread a bit is set in the mask. + ncpu := 0 + for _, e := range mask { + if e == 0 { + continue + } + ncpu += int(popcnt(uint64(e))) + } + return ncpu +} + +// NumCPU returns the number of CPUs which are currently online +func NumCPU() int { + if ncpu := numCPU(); ncpu > 0 { + return ncpu + } + return runtime.NumCPU() +} diff --git a/vendor/github.com/moby/moby/pkg/sysinfo/numcpu_windows.go b/vendor/github.com/moby/moby/pkg/sysinfo/numcpu_windows.go new file mode 100644 index 000000000..1d89dd550 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/sysinfo/numcpu_windows.go @@ -0,0 +1,37 @@ +// +build windows + +package sysinfo + +import ( + "runtime" + "unsafe" + + "golang.org/x/sys/windows" +) + +var ( + kernel32 = windows.NewLazySystemDLL("kernel32.dll") + getCurrentProcess = kernel32.NewProc("GetCurrentProcess") + getProcessAffinityMask = kernel32.NewProc("GetProcessAffinityMask") +) + +func numCPU() int { + // Gets the affinity mask for a process + var mask, sysmask uintptr + currentProcess, _, _ := getCurrentProcess.Call() + ret, _, _ := getProcessAffinityMask.Call(currentProcess, uintptr(unsafe.Pointer(&mask)), uintptr(unsafe.Pointer(&sysmask))) + if ret == 0 { + return 0 + } + // For every available thread a bit is set in the mask. + ncpu := int(popcnt(uint64(mask))) + return ncpu +} + +// NumCPU returns the number of CPUs which are currently online +func NumCPU() int { + if ncpu := numCPU(); ncpu > 0 { + return ncpu + } + return runtime.NumCPU() +} diff --git a/vendor/github.com/moby/moby/pkg/sysinfo/sysinfo.go b/vendor/github.com/moby/moby/pkg/sysinfo/sysinfo.go new file mode 100644 index 000000000..f046de4b1 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/sysinfo/sysinfo.go @@ -0,0 +1,144 @@ +package sysinfo + +import "github.com/docker/docker/pkg/parsers" + +// SysInfo stores information about which features a kernel supports. +// TODO Windows: Factor out platform specific capabilities. +type SysInfo struct { + // Whether the kernel supports AppArmor or not + AppArmor bool + // Whether the kernel supports Seccomp or not + Seccomp bool + + cgroupMemInfo + cgroupCPUInfo + cgroupBlkioInfo + cgroupCpusetInfo + cgroupPids + + // Whether IPv4 forwarding is supported or not, if this was disabled, networking will not work + IPv4ForwardingDisabled bool + + // Whether bridge-nf-call-iptables is supported or not + BridgeNFCallIPTablesDisabled bool + + // Whether bridge-nf-call-ip6tables is supported or not + BridgeNFCallIP6TablesDisabled bool + + // Whether the cgroup has the mountpoint of "devices" or not + CgroupDevicesEnabled bool +} + +type cgroupMemInfo struct { + // Whether memory limit is supported or not + MemoryLimit bool + + // Whether swap limit is supported or not + SwapLimit bool + + // Whether soft limit is supported or not + MemoryReservation bool + + // Whether OOM killer disable is supported or not + OomKillDisable bool + + // Whether memory swappiness is supported or not + MemorySwappiness bool + + // Whether kernel memory limit is supported or not + KernelMemory bool +} + +type cgroupCPUInfo struct { + // Whether CPU shares is supported or not + CPUShares bool + + // Whether CPU CFS(Completely Fair Scheduler) period is supported or not + CPUCfsPeriod bool + + // Whether CPU CFS(Completely Fair Scheduler) quota is supported or not + CPUCfsQuota bool + + // Whether CPU real-time period is supported or not + CPURealtimePeriod bool + + // Whether CPU real-time runtime is supported or not + CPURealtimeRuntime bool +} + +type cgroupBlkioInfo struct { + // Whether Block IO weight is supported or not + BlkioWeight bool + + // Whether Block IO weight_device is supported or not + BlkioWeightDevice bool + + // Whether Block IO read limit in bytes per second is supported or not + BlkioReadBpsDevice bool + + // Whether Block IO write limit in bytes per second is supported or not + BlkioWriteBpsDevice bool + + // Whether Block IO read limit in IO per second is supported or not + BlkioReadIOpsDevice bool + + // Whether Block IO write limit in IO per second is supported or not + BlkioWriteIOpsDevice bool +} + +type cgroupCpusetInfo struct { + // Whether Cpuset is supported or not + Cpuset bool + + // Available Cpuset's cpus + Cpus string + + // Available Cpuset's memory nodes + Mems string +} + +type cgroupPids struct { + // Whether Pids Limit is supported or not + PidsLimit bool +} + +// IsCpusetCpusAvailable returns `true` if the provided string set is contained +// in cgroup's cpuset.cpus set, `false` otherwise. +// If error is not nil a parsing error occurred. +func (c cgroupCpusetInfo) IsCpusetCpusAvailable(provided string) (bool, error) { + return isCpusetListAvailable(provided, c.Cpus) +} + +// IsCpusetMemsAvailable returns `true` if the provided string set is contained +// in cgroup's cpuset.mems set, `false` otherwise. +// If error is not nil a parsing error occurred. +func (c cgroupCpusetInfo) IsCpusetMemsAvailable(provided string) (bool, error) { + return isCpusetListAvailable(provided, c.Mems) +} + +func isCpusetListAvailable(provided, available string) (bool, error) { + parsedProvided, err := parsers.ParseUintList(provided) + if err != nil { + return false, err + } + parsedAvailable, err := parsers.ParseUintList(available) + if err != nil { + return false, err + } + for k := range parsedProvided { + if !parsedAvailable[k] { + return false, nil + } + } + return true, nil +} + +// Returns bit count of 1, used by NumCPU +func popcnt(x uint64) (n byte) { + x -= (x >> 1) & 0x5555555555555555 + x = (x>>2)&0x3333333333333333 + x&0x3333333333333333 + x += x >> 4 + x &= 0x0f0f0f0f0f0f0f0f + x *= 0x0101010101010101 + return byte(x >> 56) +} diff --git a/vendor/github.com/moby/moby/pkg/sysinfo/sysinfo_linux.go b/vendor/github.com/moby/moby/pkg/sysinfo/sysinfo_linux.go new file mode 100644 index 000000000..50ae265bb --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/sysinfo/sysinfo_linux.go @@ -0,0 +1,254 @@ +package sysinfo + +import ( + "fmt" + "io/ioutil" + "os" + "path" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/opencontainers/runc/libcontainer/cgroups" + "golang.org/x/sys/unix" +) + +func findCgroupMountpoints() (map[string]string, error) { + cgMounts, err := cgroups.GetCgroupMounts(false) + if err != nil { + return nil, fmt.Errorf("Failed to parse cgroup information: %v", err) + } + mps := make(map[string]string) + for _, m := range cgMounts { + for _, ss := range m.Subsystems { + mps[ss] = m.Mountpoint + } + } + return mps, nil +} + +// New returns a new SysInfo, using the filesystem to detect which features +// the kernel supports. If `quiet` is `false` warnings are printed in logs +// whenever an error occurs or misconfigurations are present. +func New(quiet bool) *SysInfo { + sysInfo := &SysInfo{} + cgMounts, err := findCgroupMountpoints() + if err != nil { + logrus.Warnf("Failed to parse cgroup information: %v", err) + } else { + sysInfo.cgroupMemInfo = checkCgroupMem(cgMounts, quiet) + sysInfo.cgroupCPUInfo = checkCgroupCPU(cgMounts, quiet) + sysInfo.cgroupBlkioInfo = checkCgroupBlkioInfo(cgMounts, quiet) + sysInfo.cgroupCpusetInfo = checkCgroupCpusetInfo(cgMounts, quiet) + sysInfo.cgroupPids = checkCgroupPids(quiet) + } + + _, ok := cgMounts["devices"] + sysInfo.CgroupDevicesEnabled = ok + + sysInfo.IPv4ForwardingDisabled = !readProcBool("/proc/sys/net/ipv4/ip_forward") + sysInfo.BridgeNFCallIPTablesDisabled = !readProcBool("/proc/sys/net/bridge/bridge-nf-call-iptables") + sysInfo.BridgeNFCallIP6TablesDisabled = !readProcBool("/proc/sys/net/bridge/bridge-nf-call-ip6tables") + + // Check if AppArmor is supported. + if _, err := os.Stat("/sys/kernel/security/apparmor"); !os.IsNotExist(err) { + sysInfo.AppArmor = true + } + + // Check if Seccomp is supported, via CONFIG_SECCOMP. + if err := unix.Prctl(unix.PR_GET_SECCOMP, 0, 0, 0, 0); err != unix.EINVAL { + // Make sure the kernel has CONFIG_SECCOMP_FILTER. + if err := unix.Prctl(unix.PR_SET_SECCOMP, unix.SECCOMP_MODE_FILTER, 0, 0, 0); err != unix.EINVAL { + sysInfo.Seccomp = true + } + } + + return sysInfo +} + +// checkCgroupMem reads the memory information from the memory cgroup mount point. +func checkCgroupMem(cgMounts map[string]string, quiet bool) cgroupMemInfo { + mountPoint, ok := cgMounts["memory"] + if !ok { + if !quiet { + logrus.Warn("Your kernel does not support cgroup memory limit") + } + return cgroupMemInfo{} + } + + swapLimit := cgroupEnabled(mountPoint, "memory.memsw.limit_in_bytes") + if !quiet && !swapLimit { + logrus.Warn("Your kernel does not support swap memory limit") + } + memoryReservation := cgroupEnabled(mountPoint, "memory.soft_limit_in_bytes") + if !quiet && !memoryReservation { + logrus.Warn("Your kernel does not support memory reservation") + } + oomKillDisable := cgroupEnabled(mountPoint, "memory.oom_control") + if !quiet && !oomKillDisable { + logrus.Warn("Your kernel does not support oom control") + } + memorySwappiness := cgroupEnabled(mountPoint, "memory.swappiness") + if !quiet && !memorySwappiness { + logrus.Warn("Your kernel does not support memory swappiness") + } + kernelMemory := cgroupEnabled(mountPoint, "memory.kmem.limit_in_bytes") + if !quiet && !kernelMemory { + logrus.Warn("Your kernel does not support kernel memory limit") + } + + return cgroupMemInfo{ + MemoryLimit: true, + SwapLimit: swapLimit, + MemoryReservation: memoryReservation, + OomKillDisable: oomKillDisable, + MemorySwappiness: memorySwappiness, + KernelMemory: kernelMemory, + } +} + +// checkCgroupCPU reads the cpu information from the cpu cgroup mount point. +func checkCgroupCPU(cgMounts map[string]string, quiet bool) cgroupCPUInfo { + mountPoint, ok := cgMounts["cpu"] + if !ok { + if !quiet { + logrus.Warn("Unable to find cpu cgroup in mounts") + } + return cgroupCPUInfo{} + } + + cpuShares := cgroupEnabled(mountPoint, "cpu.shares") + if !quiet && !cpuShares { + logrus.Warn("Your kernel does not support cgroup cpu shares") + } + + cpuCfsPeriod := cgroupEnabled(mountPoint, "cpu.cfs_period_us") + if !quiet && !cpuCfsPeriod { + logrus.Warn("Your kernel does not support cgroup cfs period") + } + + cpuCfsQuota := cgroupEnabled(mountPoint, "cpu.cfs_quota_us") + if !quiet && !cpuCfsQuota { + logrus.Warn("Your kernel does not support cgroup cfs quotas") + } + + cpuRealtimePeriod := cgroupEnabled(mountPoint, "cpu.rt_period_us") + if !quiet && !cpuRealtimePeriod { + logrus.Warn("Your kernel does not support cgroup rt period") + } + + cpuRealtimeRuntime := cgroupEnabled(mountPoint, "cpu.rt_runtime_us") + if !quiet && !cpuRealtimeRuntime { + logrus.Warn("Your kernel does not support cgroup rt runtime") + } + + return cgroupCPUInfo{ + CPUShares: cpuShares, + CPUCfsPeriod: cpuCfsPeriod, + CPUCfsQuota: cpuCfsQuota, + CPURealtimePeriod: cpuRealtimePeriod, + CPURealtimeRuntime: cpuRealtimeRuntime, + } +} + +// checkCgroupBlkioInfo reads the blkio information from the blkio cgroup mount point. +func checkCgroupBlkioInfo(cgMounts map[string]string, quiet bool) cgroupBlkioInfo { + mountPoint, ok := cgMounts["blkio"] + if !ok { + if !quiet { + logrus.Warn("Unable to find blkio cgroup in mounts") + } + return cgroupBlkioInfo{} + } + + weight := cgroupEnabled(mountPoint, "blkio.weight") + if !quiet && !weight { + logrus.Warn("Your kernel does not support cgroup blkio weight") + } + + weightDevice := cgroupEnabled(mountPoint, "blkio.weight_device") + if !quiet && !weightDevice { + logrus.Warn("Your kernel does not support cgroup blkio weight_device") + } + + readBpsDevice := cgroupEnabled(mountPoint, "blkio.throttle.read_bps_device") + if !quiet && !readBpsDevice { + logrus.Warn("Your kernel does not support cgroup blkio throttle.read_bps_device") + } + + writeBpsDevice := cgroupEnabled(mountPoint, "blkio.throttle.write_bps_device") + if !quiet && !writeBpsDevice { + logrus.Warn("Your kernel does not support cgroup blkio throttle.write_bps_device") + } + readIOpsDevice := cgroupEnabled(mountPoint, "blkio.throttle.read_iops_device") + if !quiet && !readIOpsDevice { + logrus.Warn("Your kernel does not support cgroup blkio throttle.read_iops_device") + } + + writeIOpsDevice := cgroupEnabled(mountPoint, "blkio.throttle.write_iops_device") + if !quiet && !writeIOpsDevice { + logrus.Warn("Your kernel does not support cgroup blkio throttle.write_iops_device") + } + return cgroupBlkioInfo{ + BlkioWeight: weight, + BlkioWeightDevice: weightDevice, + BlkioReadBpsDevice: readBpsDevice, + BlkioWriteBpsDevice: writeBpsDevice, + BlkioReadIOpsDevice: readIOpsDevice, + BlkioWriteIOpsDevice: writeIOpsDevice, + } +} + +// checkCgroupCpusetInfo reads the cpuset information from the cpuset cgroup mount point. +func checkCgroupCpusetInfo(cgMounts map[string]string, quiet bool) cgroupCpusetInfo { + mountPoint, ok := cgMounts["cpuset"] + if !ok { + if !quiet { + logrus.Warn("Unable to find cpuset cgroup in mounts") + } + return cgroupCpusetInfo{} + } + + cpus, err := ioutil.ReadFile(path.Join(mountPoint, "cpuset.cpus")) + if err != nil { + return cgroupCpusetInfo{} + } + + mems, err := ioutil.ReadFile(path.Join(mountPoint, "cpuset.mems")) + if err != nil { + return cgroupCpusetInfo{} + } + + return cgroupCpusetInfo{ + Cpuset: true, + Cpus: strings.TrimSpace(string(cpus)), + Mems: strings.TrimSpace(string(mems)), + } +} + +// checkCgroupPids reads the pids information from the pids cgroup mount point. +func checkCgroupPids(quiet bool) cgroupPids { + _, err := cgroups.FindCgroupMountpoint("pids") + if err != nil { + if !quiet { + logrus.Warn(err) + } + return cgroupPids{} + } + + return cgroupPids{ + PidsLimit: true, + } +} + +func cgroupEnabled(mountPoint, name string) bool { + _, err := os.Stat(path.Join(mountPoint, name)) + return err == nil +} + +func readProcBool(path string) bool { + val, err := ioutil.ReadFile(path) + if err != nil { + return false + } + return strings.TrimSpace(string(val)) == "1" +} diff --git a/vendor/github.com/moby/moby/pkg/sysinfo/sysinfo_linux_test.go b/vendor/github.com/moby/moby/pkg/sysinfo/sysinfo_linux_test.go new file mode 100644 index 000000000..860784f2a --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/sysinfo/sysinfo_linux_test.go @@ -0,0 +1,104 @@ +package sysinfo + +import ( + "io/ioutil" + "os" + "path" + "path/filepath" + "testing" + + "github.com/stretchr/testify/require" + "golang.org/x/sys/unix" +) + +func TestReadProcBool(t *testing.T) { + tmpDir, err := ioutil.TempDir("", "test-sysinfo-proc") + require.NoError(t, err) + defer os.RemoveAll(tmpDir) + + procFile := filepath.Join(tmpDir, "read-proc-bool") + err = ioutil.WriteFile(procFile, []byte("1"), 0644) + require.NoError(t, err) + + if !readProcBool(procFile) { + t.Fatal("expected proc bool to be true, got false") + } + + if err := ioutil.WriteFile(procFile, []byte("0"), 0644); err != nil { + t.Fatal(err) + } + if readProcBool(procFile) { + t.Fatal("expected proc bool to be false, got true") + } + + if readProcBool(path.Join(tmpDir, "no-exist")) { + t.Fatal("should be false for non-existent entry") + } + +} + +func TestCgroupEnabled(t *testing.T) { + cgroupDir, err := ioutil.TempDir("", "cgroup-test") + require.NoError(t, err) + defer os.RemoveAll(cgroupDir) + + if cgroupEnabled(cgroupDir, "test") { + t.Fatal("cgroupEnabled should be false") + } + + err = ioutil.WriteFile(path.Join(cgroupDir, "test"), []byte{}, 0644) + require.NoError(t, err) + + if !cgroupEnabled(cgroupDir, "test") { + t.Fatal("cgroupEnabled should be true") + } +} + +func TestNew(t *testing.T) { + sysInfo := New(false) + require.NotNil(t, sysInfo) + checkSysInfo(t, sysInfo) + + sysInfo = New(true) + require.NotNil(t, sysInfo) + checkSysInfo(t, sysInfo) +} + +func checkSysInfo(t *testing.T, sysInfo *SysInfo) { + // Check if Seccomp is supported, via CONFIG_SECCOMP.then sysInfo.Seccomp must be TRUE , else FALSE + if err := unix.Prctl(unix.PR_GET_SECCOMP, 0, 0, 0, 0); err != unix.EINVAL { + // Make sure the kernel has CONFIG_SECCOMP_FILTER. + if err := unix.Prctl(unix.PR_SET_SECCOMP, unix.SECCOMP_MODE_FILTER, 0, 0, 0); err != unix.EINVAL { + require.True(t, sysInfo.Seccomp) + } + } else { + require.False(t, sysInfo.Seccomp) + } +} + +func TestNewAppArmorEnabled(t *testing.T) { + // Check if AppArmor is supported. then it must be TRUE , else FALSE + if _, err := os.Stat("/sys/kernel/security/apparmor"); err != nil { + t.Skip("App Armor Must be Enabled") + } + + sysInfo := New(true) + require.True(t, sysInfo.AppArmor) +} + +func TestNewAppArmorDisabled(t *testing.T) { + // Check if AppArmor is supported. then it must be TRUE , else FALSE + if _, err := os.Stat("/sys/kernel/security/apparmor"); !os.IsNotExist(err) { + t.Skip("App Armor Must be Disabled") + } + + sysInfo := New(true) + require.False(t, sysInfo.AppArmor) +} + +func TestNumCPU(t *testing.T) { + cpuNumbers := NumCPU() + if cpuNumbers <= 0 { + t.Fatal("CPU returned must be greater than zero") + } +} diff --git a/vendor/github.com/moby/moby/pkg/sysinfo/sysinfo_solaris.go b/vendor/github.com/moby/moby/pkg/sysinfo/sysinfo_solaris.go new file mode 100644 index 000000000..c858d57e0 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/sysinfo/sysinfo_solaris.go @@ -0,0 +1,121 @@ +// +build solaris,cgo + +package sysinfo + +import ( + "bytes" + "os/exec" + "strconv" + "strings" +) + +/* +#cgo LDFLAGS: -llgrp +#include +#include +#include +int getLgrpCount() { + lgrp_cookie_t lgrpcookie = LGRP_COOKIE_NONE; + uint_t nlgrps; + + if ((lgrpcookie = lgrp_init(LGRP_VIEW_OS)) == LGRP_COOKIE_NONE) { + return -1; + } + nlgrps = lgrp_nlgrps(lgrpcookie); + return nlgrps; +} +*/ +import "C" + +// IsCPUSharesAvailable returns whether CPUShares setting is supported. +// We need FSS to be set as default scheduling class to support CPU Shares +func IsCPUSharesAvailable() bool { + cmd := exec.Command("/usr/sbin/dispadmin", "-d") + outBuf := new(bytes.Buffer) + errBuf := new(bytes.Buffer) + cmd.Stderr = errBuf + cmd.Stdout = outBuf + + if err := cmd.Run(); err != nil { + return false + } + return (strings.Contains(outBuf.String(), "FSS")) +} + +// New returns a new SysInfo, using the filesystem to detect which features +// the kernel supports. +//NOTE Solaris: If we change the below capabilities be sure +// to update verifyPlatformContainerSettings() in daemon_solaris.go +func New(quiet bool) *SysInfo { + sysInfo := &SysInfo{} + sysInfo.cgroupMemInfo = setCgroupMem(quiet) + sysInfo.cgroupCPUInfo = setCgroupCPU(quiet) + sysInfo.cgroupBlkioInfo = setCgroupBlkioInfo(quiet) + sysInfo.cgroupCpusetInfo = setCgroupCPUsetInfo(quiet) + + sysInfo.IPv4ForwardingDisabled = false + + sysInfo.AppArmor = false + + return sysInfo +} + +// setCgroupMem reads the memory information for Solaris. +func setCgroupMem(quiet bool) cgroupMemInfo { + + return cgroupMemInfo{ + MemoryLimit: true, + SwapLimit: true, + MemoryReservation: false, + OomKillDisable: false, + MemorySwappiness: false, + KernelMemory: false, + } +} + +// setCgroupCPU reads the cpu information for Solaris. +func setCgroupCPU(quiet bool) cgroupCPUInfo { + + return cgroupCPUInfo{ + CPUShares: true, + CPUCfsPeriod: false, + CPUCfsQuota: true, + CPURealtimePeriod: false, + CPURealtimeRuntime: false, + } +} + +// blkio switches are not supported in Solaris. +func setCgroupBlkioInfo(quiet bool) cgroupBlkioInfo { + + return cgroupBlkioInfo{ + BlkioWeight: false, + BlkioWeightDevice: false, + } +} + +// setCgroupCPUsetInfo reads the cpuset information for Solaris. +func setCgroupCPUsetInfo(quiet bool) cgroupCpusetInfo { + + return cgroupCpusetInfo{ + Cpuset: true, + Cpus: getCPUCount(), + Mems: getLgrpCount(), + } +} + +func getCPUCount() string { + ncpus := C.sysconf(C._SC_NPROCESSORS_ONLN) + if ncpus <= 0 { + return "" + } + return strconv.FormatInt(int64(ncpus), 16) +} + +func getLgrpCount() string { + nlgrps := C.getLgrpCount() + if nlgrps <= 0 { + return "" + } + return strconv.FormatInt(int64(nlgrps), 16) +} diff --git a/vendor/github.com/moby/moby/pkg/sysinfo/sysinfo_test.go b/vendor/github.com/moby/moby/pkg/sysinfo/sysinfo_test.go new file mode 100644 index 000000000..b61fbcf54 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/sysinfo/sysinfo_test.go @@ -0,0 +1,26 @@ +package sysinfo + +import "testing" + +func TestIsCpusetListAvailable(t *testing.T) { + cases := []struct { + provided string + available string + res bool + err bool + }{ + {"1", "0-4", true, false}, + {"01,3", "0-4", true, false}, + {"", "0-7", true, false}, + {"1--42", "0-7", false, true}, + {"1-42", "00-1,8,,9", false, true}, + {"1,41-42", "43,45", false, false}, + {"0-3", "", false, false}, + } + for _, c := range cases { + r, err := isCpusetListAvailable(c.provided, c.available) + if (c.err && err == nil) && r != c.res { + t.Fatalf("Expected pair: %v, %v for %s, %s. Got %v, %v instead", c.res, c.err, c.provided, c.available, (c.err && err == nil), r) + } + } +} diff --git a/vendor/github.com/moby/moby/pkg/sysinfo/sysinfo_unix.go b/vendor/github.com/moby/moby/pkg/sysinfo/sysinfo_unix.go new file mode 100644 index 000000000..45f3ef1c6 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/sysinfo/sysinfo_unix.go @@ -0,0 +1,9 @@ +// +build !linux,!solaris,!windows + +package sysinfo + +// New returns an empty SysInfo for non linux nor solaris for now. +func New(quiet bool) *SysInfo { + sysInfo := &SysInfo{} + return sysInfo +} diff --git a/vendor/github.com/moby/moby/pkg/sysinfo/sysinfo_windows.go b/vendor/github.com/moby/moby/pkg/sysinfo/sysinfo_windows.go new file mode 100644 index 000000000..4e6255bc5 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/sysinfo/sysinfo_windows.go @@ -0,0 +1,9 @@ +// +build windows + +package sysinfo + +// New returns an empty SysInfo for windows for now. +func New(quiet bool) *SysInfo { + sysInfo := &SysInfo{} + return sysInfo +} diff --git a/vendor/github.com/moby/moby/pkg/system/chtimes.go b/vendor/github.com/moby/moby/pkg/system/chtimes.go new file mode 100644 index 000000000..056d19954 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/system/chtimes.go @@ -0,0 +1,35 @@ +package system + +import ( + "os" + "time" +) + +// Chtimes changes the access time and modified time of a file at the given path +func Chtimes(name string, atime time.Time, mtime time.Time) error { + unixMinTime := time.Unix(0, 0) + unixMaxTime := maxTime + + // If the modified time is prior to the Unix Epoch, or after the + // end of Unix Time, os.Chtimes has undefined behavior + // default to Unix Epoch in this case, just in case + + if atime.Before(unixMinTime) || atime.After(unixMaxTime) { + atime = unixMinTime + } + + if mtime.Before(unixMinTime) || mtime.After(unixMaxTime) { + mtime = unixMinTime + } + + if err := os.Chtimes(name, atime, mtime); err != nil { + return err + } + + // Take platform specific action for setting create time. + if err := setCTime(name, mtime); err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/moby/moby/pkg/system/chtimes_test.go b/vendor/github.com/moby/moby/pkg/system/chtimes_test.go new file mode 100644 index 000000000..5c87df32a --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/system/chtimes_test.go @@ -0,0 +1,94 @@ +package system + +import ( + "io/ioutil" + "os" + "path/filepath" + "testing" + "time" +) + +// prepareTempFile creates a temporary file in a temporary directory. +func prepareTempFile(t *testing.T) (string, string) { + dir, err := ioutil.TempDir("", "docker-system-test") + if err != nil { + t.Fatal(err) + } + + file := filepath.Join(dir, "exist") + if err := ioutil.WriteFile(file, []byte("hello"), 0644); err != nil { + t.Fatal(err) + } + return file, dir +} + +// TestChtimes tests Chtimes on a tempfile. Test only mTime, because aTime is OS dependent +func TestChtimes(t *testing.T) { + file, dir := prepareTempFile(t) + defer os.RemoveAll(dir) + + beforeUnixEpochTime := time.Unix(0, 0).Add(-100 * time.Second) + unixEpochTime := time.Unix(0, 0) + afterUnixEpochTime := time.Unix(100, 0) + unixMaxTime := maxTime + + // Test both aTime and mTime set to Unix Epoch + Chtimes(file, unixEpochTime, unixEpochTime) + + f, err := os.Stat(file) + if err != nil { + t.Fatal(err) + } + + if f.ModTime() != unixEpochTime { + t.Fatalf("Expected: %s, got: %s", unixEpochTime, f.ModTime()) + } + + // Test aTime before Unix Epoch and mTime set to Unix Epoch + Chtimes(file, beforeUnixEpochTime, unixEpochTime) + + f, err = os.Stat(file) + if err != nil { + t.Fatal(err) + } + + if f.ModTime() != unixEpochTime { + t.Fatalf("Expected: %s, got: %s", unixEpochTime, f.ModTime()) + } + + // Test aTime set to Unix Epoch and mTime before Unix Epoch + Chtimes(file, unixEpochTime, beforeUnixEpochTime) + + f, err = os.Stat(file) + if err != nil { + t.Fatal(err) + } + + if f.ModTime() != unixEpochTime { + t.Fatalf("Expected: %s, got: %s", unixEpochTime, f.ModTime()) + } + + // Test both aTime and mTime set to after Unix Epoch (valid time) + Chtimes(file, afterUnixEpochTime, afterUnixEpochTime) + + f, err = os.Stat(file) + if err != nil { + t.Fatal(err) + } + + if f.ModTime() != afterUnixEpochTime { + t.Fatalf("Expected: %s, got: %s", afterUnixEpochTime, f.ModTime()) + } + + // Test both aTime and mTime set to Unix max time + Chtimes(file, unixMaxTime, unixMaxTime) + + f, err = os.Stat(file) + if err != nil { + t.Fatal(err) + } + + if f.ModTime().Truncate(time.Second) != unixMaxTime.Truncate(time.Second) { + t.Fatalf("Expected: %s, got: %s", unixMaxTime.Truncate(time.Second), f.ModTime().Truncate(time.Second)) + } +} diff --git a/vendor/github.com/moby/moby/pkg/system/chtimes_unix.go b/vendor/github.com/moby/moby/pkg/system/chtimes_unix.go new file mode 100644 index 000000000..09d58bcbf --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/system/chtimes_unix.go @@ -0,0 +1,14 @@ +// +build !windows + +package system + +import ( + "time" +) + +//setCTime will set the create time on a file. On Unix, the create +//time is updated as a side effect of setting the modified time, so +//no action is required. +func setCTime(path string, ctime time.Time) error { + return nil +} diff --git a/vendor/github.com/moby/moby/pkg/system/chtimes_unix_test.go b/vendor/github.com/moby/moby/pkg/system/chtimes_unix_test.go new file mode 100644 index 000000000..6ec9a7173 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/system/chtimes_unix_test.go @@ -0,0 +1,91 @@ +// +build !windows + +package system + +import ( + "os" + "syscall" + "testing" + "time" +) + +// TestChtimesLinux tests Chtimes access time on a tempfile on Linux +func TestChtimesLinux(t *testing.T) { + file, dir := prepareTempFile(t) + defer os.RemoveAll(dir) + + beforeUnixEpochTime := time.Unix(0, 0).Add(-100 * time.Second) + unixEpochTime := time.Unix(0, 0) + afterUnixEpochTime := time.Unix(100, 0) + unixMaxTime := maxTime + + // Test both aTime and mTime set to Unix Epoch + Chtimes(file, unixEpochTime, unixEpochTime) + + f, err := os.Stat(file) + if err != nil { + t.Fatal(err) + } + + stat := f.Sys().(*syscall.Stat_t) + aTime := time.Unix(int64(stat.Atim.Sec), int64(stat.Atim.Nsec)) + if aTime != unixEpochTime { + t.Fatalf("Expected: %s, got: %s", unixEpochTime, aTime) + } + + // Test aTime before Unix Epoch and mTime set to Unix Epoch + Chtimes(file, beforeUnixEpochTime, unixEpochTime) + + f, err = os.Stat(file) + if err != nil { + t.Fatal(err) + } + + stat = f.Sys().(*syscall.Stat_t) + aTime = time.Unix(int64(stat.Atim.Sec), int64(stat.Atim.Nsec)) + if aTime != unixEpochTime { + t.Fatalf("Expected: %s, got: %s", unixEpochTime, aTime) + } + + // Test aTime set to Unix Epoch and mTime before Unix Epoch + Chtimes(file, unixEpochTime, beforeUnixEpochTime) + + f, err = os.Stat(file) + if err != nil { + t.Fatal(err) + } + + stat = f.Sys().(*syscall.Stat_t) + aTime = time.Unix(int64(stat.Atim.Sec), int64(stat.Atim.Nsec)) + if aTime != unixEpochTime { + t.Fatalf("Expected: %s, got: %s", unixEpochTime, aTime) + } + + // Test both aTime and mTime set to after Unix Epoch (valid time) + Chtimes(file, afterUnixEpochTime, afterUnixEpochTime) + + f, err = os.Stat(file) + if err != nil { + t.Fatal(err) + } + + stat = f.Sys().(*syscall.Stat_t) + aTime = time.Unix(int64(stat.Atim.Sec), int64(stat.Atim.Nsec)) + if aTime != afterUnixEpochTime { + t.Fatalf("Expected: %s, got: %s", afterUnixEpochTime, aTime) + } + + // Test both aTime and mTime set to Unix max time + Chtimes(file, unixMaxTime, unixMaxTime) + + f, err = os.Stat(file) + if err != nil { + t.Fatal(err) + } + + stat = f.Sys().(*syscall.Stat_t) + aTime = time.Unix(int64(stat.Atim.Sec), int64(stat.Atim.Nsec)) + if aTime.Truncate(time.Second) != unixMaxTime.Truncate(time.Second) { + t.Fatalf("Expected: %s, got: %s", unixMaxTime.Truncate(time.Second), aTime.Truncate(time.Second)) + } +} diff --git a/vendor/github.com/moby/moby/pkg/system/chtimes_windows.go b/vendor/github.com/moby/moby/pkg/system/chtimes_windows.go new file mode 100644 index 000000000..45428c141 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/system/chtimes_windows.go @@ -0,0 +1,28 @@ +// +build windows + +package system + +import ( + "time" + + "golang.org/x/sys/windows" +) + +//setCTime will set the create time on a file. On Windows, this requires +//calling SetFileTime and explicitly including the create time. +func setCTime(path string, ctime time.Time) error { + ctimespec := windows.NsecToTimespec(ctime.UnixNano()) + pathp, e := windows.UTF16PtrFromString(path) + if e != nil { + return e + } + h, e := windows.CreateFile(pathp, + windows.FILE_WRITE_ATTRIBUTES, windows.FILE_SHARE_WRITE, nil, + windows.OPEN_EXISTING, windows.FILE_FLAG_BACKUP_SEMANTICS, 0) + if e != nil { + return e + } + defer windows.Close(h) + c := windows.NsecToFiletime(windows.TimespecToNsec(ctimespec)) + return windows.SetFileTime(h, &c, nil, nil) +} diff --git a/vendor/github.com/moby/moby/pkg/system/chtimes_windows_test.go b/vendor/github.com/moby/moby/pkg/system/chtimes_windows_test.go new file mode 100644 index 000000000..72d8a1061 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/system/chtimes_windows_test.go @@ -0,0 +1,86 @@ +// +build windows + +package system + +import ( + "os" + "syscall" + "testing" + "time" +) + +// TestChtimesWindows tests Chtimes access time on a tempfile on Windows +func TestChtimesWindows(t *testing.T) { + file, dir := prepareTempFile(t) + defer os.RemoveAll(dir) + + beforeUnixEpochTime := time.Unix(0, 0).Add(-100 * time.Second) + unixEpochTime := time.Unix(0, 0) + afterUnixEpochTime := time.Unix(100, 0) + unixMaxTime := maxTime + + // Test both aTime and mTime set to Unix Epoch + Chtimes(file, unixEpochTime, unixEpochTime) + + f, err := os.Stat(file) + if err != nil { + t.Fatal(err) + } + + aTime := time.Unix(0, f.Sys().(*syscall.Win32FileAttributeData).LastAccessTime.Nanoseconds()) + if aTime != unixEpochTime { + t.Fatalf("Expected: %s, got: %s", unixEpochTime, aTime) + } + + // Test aTime before Unix Epoch and mTime set to Unix Epoch + Chtimes(file, beforeUnixEpochTime, unixEpochTime) + + f, err = os.Stat(file) + if err != nil { + t.Fatal(err) + } + + aTime = time.Unix(0, f.Sys().(*syscall.Win32FileAttributeData).LastAccessTime.Nanoseconds()) + if aTime != unixEpochTime { + t.Fatalf("Expected: %s, got: %s", unixEpochTime, aTime) + } + + // Test aTime set to Unix Epoch and mTime before Unix Epoch + Chtimes(file, unixEpochTime, beforeUnixEpochTime) + + f, err = os.Stat(file) + if err != nil { + t.Fatal(err) + } + + aTime = time.Unix(0, f.Sys().(*syscall.Win32FileAttributeData).LastAccessTime.Nanoseconds()) + if aTime != unixEpochTime { + t.Fatalf("Expected: %s, got: %s", unixEpochTime, aTime) + } + + // Test both aTime and mTime set to after Unix Epoch (valid time) + Chtimes(file, afterUnixEpochTime, afterUnixEpochTime) + + f, err = os.Stat(file) + if err != nil { + t.Fatal(err) + } + + aTime = time.Unix(0, f.Sys().(*syscall.Win32FileAttributeData).LastAccessTime.Nanoseconds()) + if aTime != afterUnixEpochTime { + t.Fatalf("Expected: %s, got: %s", afterUnixEpochTime, aTime) + } + + // Test both aTime and mTime set to Unix max time + Chtimes(file, unixMaxTime, unixMaxTime) + + f, err = os.Stat(file) + if err != nil { + t.Fatal(err) + } + + aTime = time.Unix(0, f.Sys().(*syscall.Win32FileAttributeData).LastAccessTime.Nanoseconds()) + if aTime.Truncate(time.Second) != unixMaxTime.Truncate(time.Second) { + t.Fatalf("Expected: %s, got: %s", unixMaxTime.Truncate(time.Second), aTime.Truncate(time.Second)) + } +} diff --git a/vendor/github.com/moby/moby/pkg/system/errors.go b/vendor/github.com/moby/moby/pkg/system/errors.go new file mode 100644 index 000000000..288318985 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/system/errors.go @@ -0,0 +1,10 @@ +package system + +import ( + "errors" +) + +var ( + // ErrNotSupportedPlatform means the platform is not supported. + ErrNotSupportedPlatform = errors.New("platform and architecture is not supported") +) diff --git a/vendor/github.com/moby/moby/pkg/system/events_windows.go b/vendor/github.com/moby/moby/pkg/system/events_windows.go new file mode 100644 index 000000000..192e36788 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/system/events_windows.go @@ -0,0 +1,85 @@ +package system + +// This file implements syscalls for Win32 events which are not implemented +// in golang. + +import ( + "syscall" + "unsafe" + + "golang.org/x/sys/windows" +) + +var ( + procCreateEvent = modkernel32.NewProc("CreateEventW") + procOpenEvent = modkernel32.NewProc("OpenEventW") + procSetEvent = modkernel32.NewProc("SetEvent") + procResetEvent = modkernel32.NewProc("ResetEvent") + procPulseEvent = modkernel32.NewProc("PulseEvent") +) + +// CreateEvent implements win32 CreateEventW func in golang. It will create an event object. +func CreateEvent(eventAttributes *windows.SecurityAttributes, manualReset bool, initialState bool, name string) (handle windows.Handle, err error) { + namep, _ := windows.UTF16PtrFromString(name) + var _p1 uint32 + if manualReset { + _p1 = 1 + } + var _p2 uint32 + if initialState { + _p2 = 1 + } + r0, _, e1 := procCreateEvent.Call(uintptr(unsafe.Pointer(eventAttributes)), uintptr(_p1), uintptr(_p2), uintptr(unsafe.Pointer(namep))) + use(unsafe.Pointer(namep)) + handle = windows.Handle(r0) + if handle == windows.InvalidHandle { + err = e1 + } + return +} + +// OpenEvent implements win32 OpenEventW func in golang. It opens an event object. +func OpenEvent(desiredAccess uint32, inheritHandle bool, name string) (handle windows.Handle, err error) { + namep, _ := windows.UTF16PtrFromString(name) + var _p1 uint32 + if inheritHandle { + _p1 = 1 + } + r0, _, e1 := procOpenEvent.Call(uintptr(desiredAccess), uintptr(_p1), uintptr(unsafe.Pointer(namep))) + use(unsafe.Pointer(namep)) + handle = windows.Handle(r0) + if handle == windows.InvalidHandle { + err = e1 + } + return +} + +// SetEvent implements win32 SetEvent func in golang. +func SetEvent(handle windows.Handle) (err error) { + return setResetPulse(handle, procSetEvent) +} + +// ResetEvent implements win32 ResetEvent func in golang. +func ResetEvent(handle windows.Handle) (err error) { + return setResetPulse(handle, procResetEvent) +} + +// PulseEvent implements win32 PulseEvent func in golang. +func PulseEvent(handle windows.Handle) (err error) { + return setResetPulse(handle, procPulseEvent) +} + +func setResetPulse(handle windows.Handle, proc *windows.LazyProc) (err error) { + r0, _, _ := proc.Call(uintptr(handle)) + if r0 != 0 { + err = syscall.Errno(r0) + } + return +} + +var temp unsafe.Pointer + +// use ensures a variable is kept alive without the GC freeing while still needed +func use(p unsafe.Pointer) { + temp = p +} diff --git a/vendor/github.com/moby/moby/pkg/system/exitcode.go b/vendor/github.com/moby/moby/pkg/system/exitcode.go new file mode 100644 index 000000000..60f0514b1 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/system/exitcode.go @@ -0,0 +1,33 @@ +package system + +import ( + "fmt" + "os/exec" + "syscall" +) + +// GetExitCode returns the ExitStatus of the specified error if its type is +// exec.ExitError, returns 0 and an error otherwise. +func GetExitCode(err error) (int, error) { + exitCode := 0 + if exiterr, ok := err.(*exec.ExitError); ok { + if procExit, ok := exiterr.Sys().(syscall.WaitStatus); ok { + return procExit.ExitStatus(), nil + } + } + return exitCode, fmt.Errorf("failed to get exit code") +} + +// ProcessExitCode process the specified error and returns the exit status code +// if the error was of type exec.ExitError, returns nothing otherwise. +func ProcessExitCode(err error) (exitCode int) { + if err != nil { + var exiterr error + if exitCode, exiterr = GetExitCode(err); exiterr != nil { + // TODO: Fix this so we check the error's text. + // we've failed to retrieve exit code, so we set it to 127 + exitCode = 127 + } + } + return +} diff --git a/vendor/github.com/moby/moby/pkg/system/filesys.go b/vendor/github.com/moby/moby/pkg/system/filesys.go new file mode 100644 index 000000000..102565f76 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/system/filesys.go @@ -0,0 +1,67 @@ +// +build !windows + +package system + +import ( + "io/ioutil" + "os" + "path/filepath" +) + +// MkdirAllWithACL is a wrapper for MkdirAll on unix systems. +func MkdirAllWithACL(path string, perm os.FileMode, sddl string) error { + return MkdirAll(path, perm, sddl) +} + +// MkdirAll creates a directory named path along with any necessary parents, +// with permission specified by attribute perm for all dir created. +func MkdirAll(path string, perm os.FileMode, sddl string) error { + return os.MkdirAll(path, perm) +} + +// IsAbs is a platform-specific wrapper for filepath.IsAbs. +func IsAbs(path string) bool { + return filepath.IsAbs(path) +} + +// The functions below here are wrappers for the equivalents in the os and ioutils packages. +// They are passthrough on Unix platforms, and only relevant on Windows. + +// CreateSequential creates the named file with mode 0666 (before umask), truncating +// it if it already exists. If successful, methods on the returned +// File can be used for I/O; the associated file descriptor has mode +// O_RDWR. +// If there is an error, it will be of type *PathError. +func CreateSequential(name string) (*os.File, error) { + return os.Create(name) +} + +// OpenSequential opens the named file for reading. If successful, methods on +// the returned file can be used for reading; the associated file +// descriptor has mode O_RDONLY. +// If there is an error, it will be of type *PathError. +func OpenSequential(name string) (*os.File, error) { + return os.Open(name) +} + +// OpenFileSequential is the generalized open call; most users will use Open +// or Create instead. It opens the named file with specified flag +// (O_RDONLY etc.) and perm, (0666 etc.) if applicable. If successful, +// methods on the returned File can be used for I/O. +// If there is an error, it will be of type *PathError. +func OpenFileSequential(name string, flag int, perm os.FileMode) (*os.File, error) { + return os.OpenFile(name, flag, perm) +} + +// TempFileSequential creates a new temporary file in the directory dir +// with a name beginning with prefix, opens the file for reading +// and writing, and returns the resulting *os.File. +// If dir is the empty string, TempFile uses the default directory +// for temporary files (see os.TempDir). +// Multiple programs calling TempFile simultaneously +// will not choose the same file. The caller can use f.Name() +// to find the pathname of the file. It is the caller's responsibility +// to remove the file when no longer needed. +func TempFileSequential(dir, prefix string) (f *os.File, err error) { + return ioutil.TempFile(dir, prefix) +} diff --git a/vendor/github.com/moby/moby/pkg/system/filesys_windows.go b/vendor/github.com/moby/moby/pkg/system/filesys_windows.go new file mode 100644 index 000000000..a61b53d0b --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/system/filesys_windows.go @@ -0,0 +1,298 @@ +// +build windows + +package system + +import ( + "os" + "path/filepath" + "regexp" + "strconv" + "strings" + "sync" + "syscall" + "time" + "unsafe" + + winio "github.com/Microsoft/go-winio" + "golang.org/x/sys/windows" +) + +const ( + // SddlAdministratorsLocalSystem is local administrators plus NT AUTHORITY\System + SddlAdministratorsLocalSystem = "D:P(A;OICI;GA;;;BA)(A;OICI;GA;;;SY)" + // SddlNtvmAdministratorsLocalSystem is NT VIRTUAL MACHINE\Virtual Machines plus local administrators plus NT AUTHORITY\System + SddlNtvmAdministratorsLocalSystem = "D:P(A;OICI;GA;;;S-1-5-83-0)(A;OICI;GA;;;BA)(A;OICI;GA;;;SY)" +) + +// MkdirAllWithACL is a wrapper for MkdirAll that creates a directory +// with an appropriate SDDL defined ACL. +func MkdirAllWithACL(path string, perm os.FileMode, sddl string) error { + return mkdirall(path, true, sddl) +} + +// MkdirAll implementation that is volume path aware for Windows. +func MkdirAll(path string, _ os.FileMode, sddl string) error { + return mkdirall(path, false, sddl) +} + +// mkdirall is a custom version of os.MkdirAll modified for use on Windows +// so that it is both volume path aware, and can create a directory with +// a DACL. +func mkdirall(path string, applyACL bool, sddl string) error { + if re := regexp.MustCompile(`^\\\\\?\\Volume{[a-z0-9-]+}$`); re.MatchString(path) { + return nil + } + + // The rest of this method is largely copied from os.MkdirAll and should be kept + // as-is to ensure compatibility. + + // Fast path: if we can tell whether path is a directory or file, stop with success or error. + dir, err := os.Stat(path) + if err == nil { + if dir.IsDir() { + return nil + } + return &os.PathError{ + Op: "mkdir", + Path: path, + Err: syscall.ENOTDIR, + } + } + + // Slow path: make sure parent exists and then call Mkdir for path. + i := len(path) + for i > 0 && os.IsPathSeparator(path[i-1]) { // Skip trailing path separator. + i-- + } + + j := i + for j > 0 && !os.IsPathSeparator(path[j-1]) { // Scan backward over element. + j-- + } + + if j > 1 { + // Create parent + err = mkdirall(path[0:j-1], false, sddl) + if err != nil { + return err + } + } + + // Parent now exists; invoke os.Mkdir or mkdirWithACL and use its result. + if applyACL { + err = mkdirWithACL(path, sddl) + } else { + err = os.Mkdir(path, 0) + } + + if err != nil { + // Handle arguments like "foo/." by + // double-checking that directory doesn't exist. + dir, err1 := os.Lstat(path) + if err1 == nil && dir.IsDir() { + return nil + } + return err + } + return nil +} + +// mkdirWithACL creates a new directory. If there is an error, it will be of +// type *PathError. . +// +// This is a modified and combined version of os.Mkdir and windows.Mkdir +// in golang to cater for creating a directory am ACL permitting full +// access, with inheritance, to any subfolder/file for Built-in Administrators +// and Local System. +func mkdirWithACL(name string, sddl string) error { + sa := windows.SecurityAttributes{Length: 0} + sd, err := winio.SddlToSecurityDescriptor(sddl) + if err != nil { + return &os.PathError{Op: "mkdir", Path: name, Err: err} + } + sa.Length = uint32(unsafe.Sizeof(sa)) + sa.InheritHandle = 1 + sa.SecurityDescriptor = uintptr(unsafe.Pointer(&sd[0])) + + namep, err := windows.UTF16PtrFromString(name) + if err != nil { + return &os.PathError{Op: "mkdir", Path: name, Err: err} + } + + e := windows.CreateDirectory(namep, &sa) + if e != nil { + return &os.PathError{Op: "mkdir", Path: name, Err: e} + } + return nil +} + +// IsAbs is a platform-specific wrapper for filepath.IsAbs. On Windows, +// golang filepath.IsAbs does not consider a path \windows\system32 as absolute +// as it doesn't start with a drive-letter/colon combination. However, in +// docker we need to verify things such as WORKDIR /windows/system32 in +// a Dockerfile (which gets translated to \windows\system32 when being processed +// by the daemon. This SHOULD be treated as absolute from a docker processing +// perspective. +func IsAbs(path string) bool { + if !filepath.IsAbs(path) { + if !strings.HasPrefix(path, string(os.PathSeparator)) { + return false + } + } + return true +} + +// The origin of the functions below here are the golang OS and windows packages, +// slightly modified to only cope with files, not directories due to the +// specific use case. +// +// The alteration is to allow a file on Windows to be opened with +// FILE_FLAG_SEQUENTIAL_SCAN (particular for docker load), to avoid eating +// the standby list, particularly when accessing large files such as layer.tar. + +// CreateSequential creates the named file with mode 0666 (before umask), truncating +// it if it already exists. If successful, methods on the returned +// File can be used for I/O; the associated file descriptor has mode +// O_RDWR. +// If there is an error, it will be of type *PathError. +func CreateSequential(name string) (*os.File, error) { + return OpenFileSequential(name, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0) +} + +// OpenSequential opens the named file for reading. If successful, methods on +// the returned file can be used for reading; the associated file +// descriptor has mode O_RDONLY. +// If there is an error, it will be of type *PathError. +func OpenSequential(name string) (*os.File, error) { + return OpenFileSequential(name, os.O_RDONLY, 0) +} + +// OpenFileSequential is the generalized open call; most users will use Open +// or Create instead. +// If there is an error, it will be of type *PathError. +func OpenFileSequential(name string, flag int, _ os.FileMode) (*os.File, error) { + if name == "" { + return nil, &os.PathError{Op: "open", Path: name, Err: syscall.ENOENT} + } + r, errf := windowsOpenFileSequential(name, flag, 0) + if errf == nil { + return r, nil + } + return nil, &os.PathError{Op: "open", Path: name, Err: errf} +} + +func windowsOpenFileSequential(name string, flag int, _ os.FileMode) (file *os.File, err error) { + r, e := windowsOpenSequential(name, flag|windows.O_CLOEXEC, 0) + if e != nil { + return nil, e + } + return os.NewFile(uintptr(r), name), nil +} + +func makeInheritSa() *windows.SecurityAttributes { + var sa windows.SecurityAttributes + sa.Length = uint32(unsafe.Sizeof(sa)) + sa.InheritHandle = 1 + return &sa +} + +func windowsOpenSequential(path string, mode int, _ uint32) (fd windows.Handle, err error) { + if len(path) == 0 { + return windows.InvalidHandle, windows.ERROR_FILE_NOT_FOUND + } + pathp, err := windows.UTF16PtrFromString(path) + if err != nil { + return windows.InvalidHandle, err + } + var access uint32 + switch mode & (windows.O_RDONLY | windows.O_WRONLY | windows.O_RDWR) { + case windows.O_RDONLY: + access = windows.GENERIC_READ + case windows.O_WRONLY: + access = windows.GENERIC_WRITE + case windows.O_RDWR: + access = windows.GENERIC_READ | windows.GENERIC_WRITE + } + if mode&windows.O_CREAT != 0 { + access |= windows.GENERIC_WRITE + } + if mode&windows.O_APPEND != 0 { + access &^= windows.GENERIC_WRITE + access |= windows.FILE_APPEND_DATA + } + sharemode := uint32(windows.FILE_SHARE_READ | windows.FILE_SHARE_WRITE) + var sa *windows.SecurityAttributes + if mode&windows.O_CLOEXEC == 0 { + sa = makeInheritSa() + } + var createmode uint32 + switch { + case mode&(windows.O_CREAT|windows.O_EXCL) == (windows.O_CREAT | windows.O_EXCL): + createmode = windows.CREATE_NEW + case mode&(windows.O_CREAT|windows.O_TRUNC) == (windows.O_CREAT | windows.O_TRUNC): + createmode = windows.CREATE_ALWAYS + case mode&windows.O_CREAT == windows.O_CREAT: + createmode = windows.OPEN_ALWAYS + case mode&windows.O_TRUNC == windows.O_TRUNC: + createmode = windows.TRUNCATE_EXISTING + default: + createmode = windows.OPEN_EXISTING + } + // Use FILE_FLAG_SEQUENTIAL_SCAN rather than FILE_ATTRIBUTE_NORMAL as implemented in golang. + //https://msdn.microsoft.com/en-us/library/windows/desktop/aa363858(v=vs.85).aspx + const fileFlagSequentialScan = 0x08000000 // FILE_FLAG_SEQUENTIAL_SCAN + h, e := windows.CreateFile(pathp, access, sharemode, sa, createmode, fileFlagSequentialScan, 0) + return h, e +} + +// Helpers for TempFileSequential +var rand uint32 +var randmu sync.Mutex + +func reseed() uint32 { + return uint32(time.Now().UnixNano() + int64(os.Getpid())) +} +func nextSuffix() string { + randmu.Lock() + r := rand + if r == 0 { + r = reseed() + } + r = r*1664525 + 1013904223 // constants from Numerical Recipes + rand = r + randmu.Unlock() + return strconv.Itoa(int(1e9 + r%1e9))[1:] +} + +// TempFileSequential is a copy of ioutil.TempFile, modified to use sequential +// file access. Below is the original comment from golang: +// TempFile creates a new temporary file in the directory dir +// with a name beginning with prefix, opens the file for reading +// and writing, and returns the resulting *os.File. +// If dir is the empty string, TempFile uses the default directory +// for temporary files (see os.TempDir). +// Multiple programs calling TempFile simultaneously +// will not choose the same file. The caller can use f.Name() +// to find the pathname of the file. It is the caller's responsibility +// to remove the file when no longer needed. +func TempFileSequential(dir, prefix string) (f *os.File, err error) { + if dir == "" { + dir = os.TempDir() + } + + nconflict := 0 + for i := 0; i < 10000; i++ { + name := filepath.Join(dir, prefix+nextSuffix()) + f, err = OpenFileSequential(name, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600) + if os.IsExist(err) { + if nconflict++; nconflict > 10 { + randmu.Lock() + rand = reseed() + randmu.Unlock() + } + continue + } + break + } + return +} diff --git a/vendor/github.com/moby/moby/pkg/system/init.go b/vendor/github.com/moby/moby/pkg/system/init.go new file mode 100644 index 000000000..17935088d --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/system/init.go @@ -0,0 +1,22 @@ +package system + +import ( + "syscall" + "time" + "unsafe" +) + +// Used by chtimes +var maxTime time.Time + +func init() { + // chtimes initialization + if unsafe.Sizeof(syscall.Timespec{}.Nsec) == 8 { + // This is a 64 bit timespec + // os.Chtimes limits time to the following + maxTime = time.Unix(0, 1<<63-1) + } else { + // This is a 32 bit timespec + maxTime = time.Unix(1<<31-1, 0) + } +} diff --git a/vendor/github.com/moby/moby/pkg/system/init_windows.go b/vendor/github.com/moby/moby/pkg/system/init_windows.go new file mode 100644 index 000000000..019c66441 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/system/init_windows.go @@ -0,0 +1,17 @@ +package system + +import "os" + +// LCOWSupported determines if Linux Containers on Windows are supported. +// Note: This feature is in development (06/17) and enabled through an +// environment variable. At a future time, it will be enabled based +// on build number. @jhowardmsft +var lcowSupported = false + +func init() { + // LCOW initialization + if os.Getenv("LCOW_SUPPORTED") != "" { + lcowSupported = true + } + +} diff --git a/vendor/github.com/moby/moby/pkg/system/lcow_unix.go b/vendor/github.com/moby/moby/pkg/system/lcow_unix.go new file mode 100644 index 000000000..cff33bb40 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/system/lcow_unix.go @@ -0,0 +1,8 @@ +// +build !windows + +package system + +// LCOWSupported returns true if Linux containers on Windows are supported. +func LCOWSupported() bool { + return false +} diff --git a/vendor/github.com/moby/moby/pkg/system/lcow_windows.go b/vendor/github.com/moby/moby/pkg/system/lcow_windows.go new file mode 100644 index 000000000..e54d01e69 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/system/lcow_windows.go @@ -0,0 +1,6 @@ +package system + +// LCOWSupported returns true if Linux containers on Windows are supported. +func LCOWSupported() bool { + return lcowSupported +} diff --git a/vendor/github.com/moby/moby/pkg/system/lstat_unix.go b/vendor/github.com/moby/moby/pkg/system/lstat_unix.go new file mode 100644 index 000000000..bd23c4d50 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/system/lstat_unix.go @@ -0,0 +1,19 @@ +// +build !windows + +package system + +import ( + "syscall" +) + +// Lstat takes a path to a file and returns +// a system.StatT type pertaining to that file. +// +// Throws an error if the file does not exist +func Lstat(path string) (*StatT, error) { + s := &syscall.Stat_t{} + if err := syscall.Lstat(path, s); err != nil { + return nil, err + } + return fromStatT(s) +} diff --git a/vendor/github.com/moby/moby/pkg/system/lstat_unix_test.go b/vendor/github.com/moby/moby/pkg/system/lstat_unix_test.go new file mode 100644 index 000000000..062cf53bf --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/system/lstat_unix_test.go @@ -0,0 +1,30 @@ +// +build linux freebsd + +package system + +import ( + "os" + "testing" +) + +// TestLstat tests Lstat for existing and non existing files +func TestLstat(t *testing.T) { + file, invalid, _, dir := prepareFiles(t) + defer os.RemoveAll(dir) + + statFile, err := Lstat(file) + if err != nil { + t.Fatal(err) + } + if statFile == nil { + t.Fatal("returned empty stat for existing file") + } + + statInvalid, err := Lstat(invalid) + if err == nil { + t.Fatal("did not return error for non-existing file") + } + if statInvalid != nil { + t.Fatal("returned non-nil stat for non-existing file") + } +} diff --git a/vendor/github.com/moby/moby/pkg/system/lstat_windows.go b/vendor/github.com/moby/moby/pkg/system/lstat_windows.go new file mode 100644 index 000000000..e51df0daf --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/system/lstat_windows.go @@ -0,0 +1,14 @@ +package system + +import "os" + +// Lstat calls os.Lstat to get a fileinfo interface back. +// This is then copied into our own locally defined structure. +func Lstat(path string) (*StatT, error) { + fi, err := os.Lstat(path) + if err != nil { + return nil, err + } + + return fromStatT(&fi) +} diff --git a/vendor/github.com/moby/moby/pkg/system/meminfo.go b/vendor/github.com/moby/moby/pkg/system/meminfo.go new file mode 100644 index 000000000..3b6e947e6 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/system/meminfo.go @@ -0,0 +1,17 @@ +package system + +// MemInfo contains memory statistics of the host system. +type MemInfo struct { + // Total usable RAM (i.e. physical RAM minus a few reserved bits and the + // kernel binary code). + MemTotal int64 + + // Amount of free memory. + MemFree int64 + + // Total amount of swap space available. + SwapTotal int64 + + // Amount of swap space that is currently unused. + SwapFree int64 +} diff --git a/vendor/github.com/moby/moby/pkg/system/meminfo_linux.go b/vendor/github.com/moby/moby/pkg/system/meminfo_linux.go new file mode 100644 index 000000000..385f1d5e7 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/system/meminfo_linux.go @@ -0,0 +1,65 @@ +package system + +import ( + "bufio" + "io" + "os" + "strconv" + "strings" + + "github.com/docker/go-units" +) + +// ReadMemInfo retrieves memory statistics of the host system and returns a +// MemInfo type. +func ReadMemInfo() (*MemInfo, error) { + file, err := os.Open("/proc/meminfo") + if err != nil { + return nil, err + } + defer file.Close() + return parseMemInfo(file) +} + +// parseMemInfo parses the /proc/meminfo file into +// a MemInfo object given an io.Reader to the file. +// Throws error if there are problems reading from the file +func parseMemInfo(reader io.Reader) (*MemInfo, error) { + meminfo := &MemInfo{} + scanner := bufio.NewScanner(reader) + for scanner.Scan() { + // Expected format: ["MemTotal:", "1234", "kB"] + parts := strings.Fields(scanner.Text()) + + // Sanity checks: Skip malformed entries. + if len(parts) < 3 || parts[2] != "kB" { + continue + } + + // Convert to bytes. + size, err := strconv.Atoi(parts[1]) + if err != nil { + continue + } + bytes := int64(size) * units.KiB + + switch parts[0] { + case "MemTotal:": + meminfo.MemTotal = bytes + case "MemFree:": + meminfo.MemFree = bytes + case "SwapTotal:": + meminfo.SwapTotal = bytes + case "SwapFree:": + meminfo.SwapFree = bytes + } + + } + + // Handle errors that may have occurred during the reading of the file. + if err := scanner.Err(); err != nil { + return nil, err + } + + return meminfo, nil +} diff --git a/vendor/github.com/moby/moby/pkg/system/meminfo_solaris.go b/vendor/github.com/moby/moby/pkg/system/meminfo_solaris.go new file mode 100644 index 000000000..925776e78 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/system/meminfo_solaris.go @@ -0,0 +1,129 @@ +// +build solaris,cgo + +package system + +import ( + "fmt" + "unsafe" +) + +// #cgo CFLAGS: -std=c99 +// #cgo LDFLAGS: -lkstat +// #include +// #include +// #include +// #include +// #include +// #include +// struct swaptable *allocSwaptable(int num) { +// struct swaptable *st; +// struct swapent *swapent; +// st = (struct swaptable *)malloc(num * sizeof(swapent_t) + sizeof (int)); +// swapent = st->swt_ent; +// for (int i = 0; i < num; i++,swapent++) { +// swapent->ste_path = (char *)malloc(MAXPATHLEN * sizeof (char)); +// } +// st->swt_n = num; +// return st; +//} +// void freeSwaptable (struct swaptable *st) { +// struct swapent *swapent = st->swt_ent; +// for (int i = 0; i < st->swt_n; i++,swapent++) { +// free(swapent->ste_path); +// } +// free(st); +// } +// swapent_t getSwapEnt(swapent_t *ent, int i) { +// return ent[i]; +// } +// int64_t getPpKernel() { +// int64_t pp_kernel = 0; +// kstat_ctl_t *ksc; +// kstat_t *ks; +// kstat_named_t *knp; +// kid_t kid; +// +// if ((ksc = kstat_open()) == NULL) { +// return -1; +// } +// if ((ks = kstat_lookup(ksc, "unix", 0, "system_pages")) == NULL) { +// return -1; +// } +// if (((kid = kstat_read(ksc, ks, NULL)) == -1) || +// ((knp = kstat_data_lookup(ks, "pp_kernel")) == NULL)) { +// return -1; +// } +// switch (knp->data_type) { +// case KSTAT_DATA_UINT64: +// pp_kernel = knp->value.ui64; +// break; +// case KSTAT_DATA_UINT32: +// pp_kernel = knp->value.ui32; +// break; +// } +// pp_kernel *= sysconf(_SC_PAGESIZE); +// return (pp_kernel > 0 ? pp_kernel : -1); +// } +import "C" + +// Get the system memory info using sysconf same as prtconf +func getTotalMem() int64 { + pagesize := C.sysconf(C._SC_PAGESIZE) + npages := C.sysconf(C._SC_PHYS_PAGES) + return int64(pagesize * npages) +} + +func getFreeMem() int64 { + pagesize := C.sysconf(C._SC_PAGESIZE) + npages := C.sysconf(C._SC_AVPHYS_PAGES) + return int64(pagesize * npages) +} + +// ReadMemInfo retrieves memory statistics of the host system and returns a +// MemInfo type. +func ReadMemInfo() (*MemInfo, error) { + + ppKernel := C.getPpKernel() + MemTotal := getTotalMem() + MemFree := getFreeMem() + SwapTotal, SwapFree, err := getSysSwap() + + if ppKernel < 0 || MemTotal < 0 || MemFree < 0 || SwapTotal < 0 || + SwapFree < 0 { + return nil, fmt.Errorf("error getting system memory info %v\n", err) + } + + meminfo := &MemInfo{} + // Total memory is total physical memory less than memory locked by kernel + meminfo.MemTotal = MemTotal - int64(ppKernel) + meminfo.MemFree = MemFree + meminfo.SwapTotal = SwapTotal + meminfo.SwapFree = SwapFree + + return meminfo, nil +} + +func getSysSwap() (int64, int64, error) { + var tSwap int64 + var fSwap int64 + var diskblksPerPage int64 + num, err := C.swapctl(C.SC_GETNSWP, nil) + if err != nil { + return -1, -1, err + } + st := C.allocSwaptable(num) + _, err = C.swapctl(C.SC_LIST, unsafe.Pointer(st)) + if err != nil { + C.freeSwaptable(st) + return -1, -1, err + } + + diskblksPerPage = int64(C.sysconf(C._SC_PAGESIZE) >> C.DEV_BSHIFT) + for i := 0; i < int(num); i++ { + swapent := C.getSwapEnt(&st.swt_ent[0], C.int(i)) + tSwap += int64(swapent.ste_pages) * diskblksPerPage + fSwap += int64(swapent.ste_free) * diskblksPerPage + } + C.freeSwaptable(st) + return tSwap, fSwap, nil +} diff --git a/vendor/github.com/moby/moby/pkg/system/meminfo_unix_test.go b/vendor/github.com/moby/moby/pkg/system/meminfo_unix_test.go new file mode 100644 index 000000000..44f556288 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/system/meminfo_unix_test.go @@ -0,0 +1,40 @@ +// +build linux freebsd + +package system + +import ( + "strings" + "testing" + + "github.com/docker/go-units" +) + +// TestMemInfo tests parseMemInfo with a static meminfo string +func TestMemInfo(t *testing.T) { + const input = ` + MemTotal: 1 kB + MemFree: 2 kB + SwapTotal: 3 kB + SwapFree: 4 kB + Malformed1: + Malformed2: 1 + Malformed3: 2 MB + Malformed4: X kB + ` + meminfo, err := parseMemInfo(strings.NewReader(input)) + if err != nil { + t.Fatal(err) + } + if meminfo.MemTotal != 1*units.KiB { + t.Fatalf("Unexpected MemTotal: %d", meminfo.MemTotal) + } + if meminfo.MemFree != 2*units.KiB { + t.Fatalf("Unexpected MemFree: %d", meminfo.MemFree) + } + if meminfo.SwapTotal != 3*units.KiB { + t.Fatalf("Unexpected SwapTotal: %d", meminfo.SwapTotal) + } + if meminfo.SwapFree != 4*units.KiB { + t.Fatalf("Unexpected SwapFree: %d", meminfo.SwapFree) + } +} diff --git a/vendor/github.com/moby/moby/pkg/system/meminfo_unsupported.go b/vendor/github.com/moby/moby/pkg/system/meminfo_unsupported.go new file mode 100644 index 000000000..3ce019dff --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/system/meminfo_unsupported.go @@ -0,0 +1,8 @@ +// +build !linux,!windows,!solaris + +package system + +// ReadMemInfo is not supported on platforms other than linux and windows. +func ReadMemInfo() (*MemInfo, error) { + return nil, ErrNotSupportedPlatform +} diff --git a/vendor/github.com/moby/moby/pkg/system/meminfo_windows.go b/vendor/github.com/moby/moby/pkg/system/meminfo_windows.go new file mode 100644 index 000000000..883944a4c --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/system/meminfo_windows.go @@ -0,0 +1,45 @@ +package system + +import ( + "unsafe" + + "golang.org/x/sys/windows" +) + +var ( + modkernel32 = windows.NewLazySystemDLL("kernel32.dll") + + procGlobalMemoryStatusEx = modkernel32.NewProc("GlobalMemoryStatusEx") +) + +// https://msdn.microsoft.com/en-us/library/windows/desktop/aa366589(v=vs.85).aspx +// https://msdn.microsoft.com/en-us/library/windows/desktop/aa366770(v=vs.85).aspx +type memorystatusex struct { + dwLength uint32 + dwMemoryLoad uint32 + ullTotalPhys uint64 + ullAvailPhys uint64 + ullTotalPageFile uint64 + ullAvailPageFile uint64 + ullTotalVirtual uint64 + ullAvailVirtual uint64 + ullAvailExtendedVirtual uint64 +} + +// ReadMemInfo retrieves memory statistics of the host system and returns a +// MemInfo type. +func ReadMemInfo() (*MemInfo, error) { + msi := &memorystatusex{ + dwLength: 64, + } + r1, _, _ := procGlobalMemoryStatusEx.Call(uintptr(unsafe.Pointer(msi))) + if r1 == 0 { + return &MemInfo{}, nil + } + return &MemInfo{ + MemTotal: int64(msi.ullTotalPhys), + MemFree: int64(msi.ullAvailPhys), + SwapTotal: int64(msi.ullTotalPageFile), + SwapFree: int64(msi.ullAvailPageFile), + }, nil +} diff --git a/vendor/github.com/moby/moby/pkg/system/mknod.go b/vendor/github.com/moby/moby/pkg/system/mknod.go new file mode 100644 index 000000000..af79a6538 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/system/mknod.go @@ -0,0 +1,22 @@ +// +build !windows + +package system + +import ( + "golang.org/x/sys/unix" +) + +// Mknod creates a filesystem node (file, device special file or named pipe) named path +// with attributes specified by mode and dev. +func Mknod(path string, mode uint32, dev int) error { + return unix.Mknod(path, mode, dev) +} + +// Mkdev is used to build the value of linux devices (in /dev/) which specifies major +// and minor number of the newly created device special file. +// Linux device nodes are a bit weird due to backwards compat with 16 bit device nodes. +// They are, from low to high: the lower 8 bits of the minor, then 12 bits of the major, +// then the top 12 bits of the minor. +func Mkdev(major int64, minor int64) uint32 { + return uint32(((minor & 0xfff00) << 12) | ((major & 0xfff) << 8) | (minor & 0xff)) +} diff --git a/vendor/github.com/moby/moby/pkg/system/mknod_windows.go b/vendor/github.com/moby/moby/pkg/system/mknod_windows.go new file mode 100644 index 000000000..2e863c021 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/system/mknod_windows.go @@ -0,0 +1,13 @@ +// +build windows + +package system + +// Mknod is not implemented on Windows. +func Mknod(path string, mode uint32, dev int) error { + return ErrNotSupportedPlatform +} + +// Mkdev is not implemented on Windows. +func Mkdev(major int64, minor int64) uint32 { + panic("Mkdev not implemented on Windows.") +} diff --git a/vendor/github.com/moby/moby/pkg/system/path.go b/vendor/github.com/moby/moby/pkg/system/path.go new file mode 100644 index 000000000..f634a6be6 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/system/path.go @@ -0,0 +1,21 @@ +package system + +import "runtime" + +const defaultUnixPathEnv = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + +// DefaultPathEnv is unix style list of directories to search for +// executables. Each directory is separated from the next by a colon +// ':' character . +func DefaultPathEnv(platform string) string { + if runtime.GOOS == "windows" { + if platform != runtime.GOOS && LCOWSupported() { + return defaultUnixPathEnv + } + // Deliberately empty on Windows containers on Windows as the default path will be set by + // the container. Docker has no context of what the default path should be. + return "" + } + return defaultUnixPathEnv + +} diff --git a/vendor/github.com/moby/moby/pkg/system/path_unix.go b/vendor/github.com/moby/moby/pkg/system/path_unix.go new file mode 100644 index 000000000..f3762e69d --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/system/path_unix.go @@ -0,0 +1,9 @@ +// +build !windows + +package system + +// CheckSystemDriveAndRemoveDriveLetter verifies that a path, if it includes a drive letter, +// is the system drive. This is a no-op on Linux. +func CheckSystemDriveAndRemoveDriveLetter(path string) (string, error) { + return path, nil +} diff --git a/vendor/github.com/moby/moby/pkg/system/path_windows.go b/vendor/github.com/moby/moby/pkg/system/path_windows.go new file mode 100644 index 000000000..aab891522 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/system/path_windows.go @@ -0,0 +1,33 @@ +// +build windows + +package system + +import ( + "fmt" + "path/filepath" + "strings" +) + +// CheckSystemDriveAndRemoveDriveLetter verifies and manipulates a Windows path. +// This is used, for example, when validating a user provided path in docker cp. +// If a drive letter is supplied, it must be the system drive. The drive letter +// is always removed. Also, it translates it to OS semantics (IOW / to \). We +// need the path in this syntax so that it can ultimately be concatenated with +// a Windows long-path which doesn't support drive-letters. Examples: +// C: --> Fail +// C:\ --> \ +// a --> a +// /a --> \a +// d:\ --> Fail +func CheckSystemDriveAndRemoveDriveLetter(path string) (string, error) { + if len(path) == 2 && string(path[1]) == ":" { + return "", fmt.Errorf("No relative path specified in %q", path) + } + if !filepath.IsAbs(path) || len(path) < 2 { + return filepath.FromSlash(path), nil + } + if string(path[1]) == ":" && !strings.EqualFold(string(path[0]), "c") { + return "", fmt.Errorf("The specified path is not on the system drive (C:)") + } + return filepath.FromSlash(path[2:]), nil +} diff --git a/vendor/github.com/moby/moby/pkg/system/path_windows_test.go b/vendor/github.com/moby/moby/pkg/system/path_windows_test.go new file mode 100644 index 000000000..eccb26aae --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/system/path_windows_test.go @@ -0,0 +1,78 @@ +// +build windows + +package system + +import "testing" + +// TestCheckSystemDriveAndRemoveDriveLetter tests CheckSystemDriveAndRemoveDriveLetter +func TestCheckSystemDriveAndRemoveDriveLetter(t *testing.T) { + // Fails if not C drive. + path, err := CheckSystemDriveAndRemoveDriveLetter(`d:\`) + if err == nil || (err != nil && err.Error() != "The specified path is not on the system drive (C:)") { + t.Fatalf("Expected error for d:") + } + + // Single character is unchanged + if path, err = CheckSystemDriveAndRemoveDriveLetter("z"); err != nil { + t.Fatalf("Single character should pass") + } + if path != "z" { + t.Fatalf("Single character should be unchanged") + } + + // Two characters without colon is unchanged + if path, err = CheckSystemDriveAndRemoveDriveLetter("AB"); err != nil { + t.Fatalf("2 characters without colon should pass") + } + if path != "AB" { + t.Fatalf("2 characters without colon should be unchanged") + } + + // Abs path without drive letter + if path, err = CheckSystemDriveAndRemoveDriveLetter(`\l`); err != nil { + t.Fatalf("abs path no drive letter should pass") + } + if path != `\l` { + t.Fatalf("abs path without drive letter should be unchanged") + } + + // Abs path without drive letter, linux style + if path, err = CheckSystemDriveAndRemoveDriveLetter(`/l`); err != nil { + t.Fatalf("abs path no drive letter linux style should pass") + } + if path != `\l` { + t.Fatalf("abs path without drive letter linux failed %s", path) + } + + // Drive-colon should be stripped + if path, err = CheckSystemDriveAndRemoveDriveLetter(`c:\`); err != nil { + t.Fatalf("An absolute path should pass") + } + if path != `\` { + t.Fatalf(`An absolute path should have been shortened to \ %s`, path) + } + + // Verify with a linux-style path + if path, err = CheckSystemDriveAndRemoveDriveLetter(`c:/`); err != nil { + t.Fatalf("An absolute path should pass") + } + if path != `\` { + t.Fatalf(`A linux style absolute path should have been shortened to \ %s`, path) + } + + // Failure on c: + if path, err = CheckSystemDriveAndRemoveDriveLetter(`c:`); err == nil { + t.Fatalf("c: should fail") + } + if err.Error() != `No relative path specified in "c:"` { + t.Fatalf(path, err) + } + + // Failure on d: + if path, err = CheckSystemDriveAndRemoveDriveLetter(`d:`); err == nil { + t.Fatalf("c: should fail") + } + if err.Error() != `No relative path specified in "d:"` { + t.Fatalf(path, err) + } +} diff --git a/vendor/github.com/moby/moby/pkg/system/process_unix.go b/vendor/github.com/moby/moby/pkg/system/process_unix.go new file mode 100644 index 000000000..26c8b42c1 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/system/process_unix.go @@ -0,0 +1,24 @@ +// +build linux freebsd solaris darwin + +package system + +import ( + "syscall" + + "golang.org/x/sys/unix" +) + +// IsProcessAlive returns true if process with a given pid is running. +func IsProcessAlive(pid int) bool { + err := unix.Kill(pid, syscall.Signal(0)) + if err == nil || err == unix.EPERM { + return true + } + + return false +} + +// KillProcess force-stops a process. +func KillProcess(pid int) { + unix.Kill(pid, unix.SIGKILL) +} diff --git a/vendor/github.com/moby/moby/pkg/system/rm.go b/vendor/github.com/moby/moby/pkg/system/rm.go new file mode 100644 index 000000000..101b569a5 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/system/rm.go @@ -0,0 +1,80 @@ +package system + +import ( + "os" + "syscall" + "time" + + "github.com/docker/docker/pkg/mount" + "github.com/pkg/errors" +) + +// EnsureRemoveAll wraps `os.RemoveAll` to check for specific errors that can +// often be remedied. +// Only use `EnsureRemoveAll` if you really want to make every effort to remove +// a directory. +// +// Because of the way `os.Remove` (and by extension `os.RemoveAll`) works, there +// can be a race between reading directory entries and then actually attempting +// to remove everything in the directory. +// These types of errors do not need to be returned since it's ok for the dir to +// be gone we can just retry the remove operation. +// +// This should not return a `os.ErrNotExist` kind of error under any circumstances +func EnsureRemoveAll(dir string) error { + notExistErr := make(map[string]bool) + + // track retries + exitOnErr := make(map[string]int) + maxRetry := 5 + + // Attempt to unmount anything beneath this dir first + mount.RecursiveUnmount(dir) + + for { + err := os.RemoveAll(dir) + if err == nil { + return err + } + + pe, ok := err.(*os.PathError) + if !ok { + return err + } + + if os.IsNotExist(err) { + if notExistErr[pe.Path] { + return err + } + notExistErr[pe.Path] = true + + // There is a race where some subdir can be removed but after the parent + // dir entries have been read. + // So the path could be from `os.Remove(subdir)` + // If the reported non-existent path is not the passed in `dir` we + // should just retry, but otherwise return with no error. + if pe.Path == dir { + return nil + } + continue + } + + if pe.Err != syscall.EBUSY { + return err + } + + if mounted, _ := mount.Mounted(pe.Path); mounted { + if e := mount.Unmount(pe.Path); e != nil { + if mounted, _ := mount.Mounted(pe.Path); mounted { + return errors.Wrapf(e, "error while removing %s", dir) + } + } + } + + if exitOnErr[pe.Path] == maxRetry { + return err + } + exitOnErr[pe.Path]++ + time.Sleep(100 * time.Millisecond) + } +} diff --git a/vendor/github.com/moby/moby/pkg/system/rm_test.go b/vendor/github.com/moby/moby/pkg/system/rm_test.go new file mode 100644 index 000000000..fc2821f89 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/system/rm_test.go @@ -0,0 +1,84 @@ +package system + +import ( + "io/ioutil" + "os" + "path/filepath" + "runtime" + "testing" + "time" + + "github.com/docker/docker/pkg/mount" +) + +func TestEnsureRemoveAllNotExist(t *testing.T) { + // should never return an error for a non-existent path + if err := EnsureRemoveAll("/non/existent/path"); err != nil { + t.Fatal(err) + } +} + +func TestEnsureRemoveAllWithDir(t *testing.T) { + dir, err := ioutil.TempDir("", "test-ensure-removeall-with-dir") + if err != nil { + t.Fatal(err) + } + if err := EnsureRemoveAll(dir); err != nil { + t.Fatal(err) + } +} + +func TestEnsureRemoveAllWithFile(t *testing.T) { + tmp, err := ioutil.TempFile("", "test-ensure-removeall-with-dir") + if err != nil { + t.Fatal(err) + } + tmp.Close() + if err := EnsureRemoveAll(tmp.Name()); err != nil { + t.Fatal(err) + } +} + +func TestEnsureRemoveAllWithMount(t *testing.T) { + if runtime.GOOS == "windows" { + t.Skip("mount not supported on Windows") + } + + dir1, err := ioutil.TempDir("", "test-ensure-removeall-with-dir1") + if err != nil { + t.Fatal(err) + } + dir2, err := ioutil.TempDir("", "test-ensure-removeall-with-dir2") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(dir2) + + bindDir := filepath.Join(dir1, "bind") + if err := os.MkdirAll(bindDir, 0755); err != nil { + t.Fatal(err) + } + + if err := mount.Mount(dir2, bindDir, "none", "bind"); err != nil { + t.Fatal(err) + } + + done := make(chan struct{}) + go func() { + err = EnsureRemoveAll(dir1) + close(done) + }() + + select { + case <-done: + if err != nil { + t.Fatal(err) + } + case <-time.After(5 * time.Second): + t.Fatal("timeout waiting for EnsureRemoveAll to finish") + } + + if _, err := os.Stat(dir1); !os.IsNotExist(err) { + t.Fatalf("expected %q to not exist", dir1) + } +} diff --git a/vendor/github.com/moby/moby/pkg/system/stat_darwin.go b/vendor/github.com/moby/moby/pkg/system/stat_darwin.go new file mode 100644 index 000000000..715f05b93 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/system/stat_darwin.go @@ -0,0 +1,13 @@ +package system + +import "syscall" + +// fromStatT converts a syscall.Stat_t type to a system.Stat_t type +func fromStatT(s *syscall.Stat_t) (*StatT, error) { + return &StatT{size: s.Size, + mode: uint32(s.Mode), + uid: s.Uid, + gid: s.Gid, + rdev: uint64(s.Rdev), + mtim: s.Mtimespec}, nil +} diff --git a/vendor/github.com/moby/moby/pkg/system/stat_freebsd.go b/vendor/github.com/moby/moby/pkg/system/stat_freebsd.go new file mode 100644 index 000000000..715f05b93 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/system/stat_freebsd.go @@ -0,0 +1,13 @@ +package system + +import "syscall" + +// fromStatT converts a syscall.Stat_t type to a system.Stat_t type +func fromStatT(s *syscall.Stat_t) (*StatT, error) { + return &StatT{size: s.Size, + mode: uint32(s.Mode), + uid: s.Uid, + gid: s.Gid, + rdev: uint64(s.Rdev), + mtim: s.Mtimespec}, nil +} diff --git a/vendor/github.com/moby/moby/pkg/system/stat_linux.go b/vendor/github.com/moby/moby/pkg/system/stat_linux.go new file mode 100644 index 000000000..66bf6e28e --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/system/stat_linux.go @@ -0,0 +1,19 @@ +package system + +import "syscall" + +// fromStatT converts a syscall.Stat_t type to a system.Stat_t type +func fromStatT(s *syscall.Stat_t) (*StatT, error) { + return &StatT{size: s.Size, + mode: uint32(s.Mode), + uid: s.Uid, + gid: s.Gid, + rdev: uint64(s.Rdev), + mtim: s.Mtim}, nil +} + +// FromStatT converts a syscall.Stat_t type to a system.Stat_t type +// This is exposed on Linux as pkg/archive/changes uses it. +func FromStatT(s *syscall.Stat_t) (*StatT, error) { + return fromStatT(s) +} diff --git a/vendor/github.com/moby/moby/pkg/system/stat_openbsd.go b/vendor/github.com/moby/moby/pkg/system/stat_openbsd.go new file mode 100644 index 000000000..b607dea94 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/system/stat_openbsd.go @@ -0,0 +1,13 @@ +package system + +import "syscall" + +// fromStatT converts a syscall.Stat_t type to a system.Stat_t type +func fromStatT(s *syscall.Stat_t) (*StatT, error) { + return &StatT{size: s.Size, + mode: uint32(s.Mode), + uid: s.Uid, + gid: s.Gid, + rdev: uint64(s.Rdev), + mtim: s.Mtim}, nil +} diff --git a/vendor/github.com/moby/moby/pkg/system/stat_solaris.go b/vendor/github.com/moby/moby/pkg/system/stat_solaris.go new file mode 100644 index 000000000..b607dea94 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/system/stat_solaris.go @@ -0,0 +1,13 @@ +package system + +import "syscall" + +// fromStatT converts a syscall.Stat_t type to a system.Stat_t type +func fromStatT(s *syscall.Stat_t) (*StatT, error) { + return &StatT{size: s.Size, + mode: uint32(s.Mode), + uid: s.Uid, + gid: s.Gid, + rdev: uint64(s.Rdev), + mtim: s.Mtim}, nil +} diff --git a/vendor/github.com/moby/moby/pkg/system/stat_unix.go b/vendor/github.com/moby/moby/pkg/system/stat_unix.go new file mode 100644 index 000000000..91c7d121c --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/system/stat_unix.go @@ -0,0 +1,60 @@ +// +build !windows + +package system + +import ( + "syscall" +) + +// StatT type contains status of a file. It contains metadata +// like permission, owner, group, size, etc about a file. +type StatT struct { + mode uint32 + uid uint32 + gid uint32 + rdev uint64 + size int64 + mtim syscall.Timespec +} + +// Mode returns file's permission mode. +func (s StatT) Mode() uint32 { + return s.mode +} + +// UID returns file's user id of owner. +func (s StatT) UID() uint32 { + return s.uid +} + +// GID returns file's group id of owner. +func (s StatT) GID() uint32 { + return s.gid +} + +// Rdev returns file's device ID (if it's special file). +func (s StatT) Rdev() uint64 { + return s.rdev +} + +// Size returns file's size. +func (s StatT) Size() int64 { + return s.size +} + +// Mtim returns file's last modification time. +func (s StatT) Mtim() syscall.Timespec { + return s.mtim +} + +// Stat takes a path to a file and returns +// a system.StatT type pertaining to that file. +// +// Throws an error if the file does not exist +func Stat(path string) (*StatT, error) { + s := &syscall.Stat_t{} + if err := syscall.Stat(path, s); err != nil { + return nil, err + } + return fromStatT(s) +} diff --git a/vendor/github.com/moby/moby/pkg/system/stat_unix_test.go b/vendor/github.com/moby/moby/pkg/system/stat_unix_test.go new file mode 100644 index 000000000..dee8d30a1 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/system/stat_unix_test.go @@ -0,0 +1,39 @@ +// +build linux freebsd + +package system + +import ( + "os" + "syscall" + "testing" +) + +// TestFromStatT tests fromStatT for a tempfile +func TestFromStatT(t *testing.T) { + file, _, _, dir := prepareFiles(t) + defer os.RemoveAll(dir) + + stat := &syscall.Stat_t{} + err := syscall.Lstat(file, stat) + + s, err := fromStatT(stat) + if err != nil { + t.Fatal(err) + } + + if stat.Mode != s.Mode() { + t.Fatal("got invalid mode") + } + if stat.Uid != s.UID() { + t.Fatal("got invalid uid") + } + if stat.Gid != s.GID() { + t.Fatal("got invalid gid") + } + if stat.Rdev != s.Rdev() { + t.Fatal("got invalid rdev") + } + if stat.Mtim != s.Mtim() { + t.Fatal("got invalid mtim") + } +} diff --git a/vendor/github.com/moby/moby/pkg/system/stat_windows.go b/vendor/github.com/moby/moby/pkg/system/stat_windows.go new file mode 100644 index 000000000..6c6397268 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/system/stat_windows.go @@ -0,0 +1,49 @@ +package system + +import ( + "os" + "time" +) + +// StatT type contains status of a file. It contains metadata +// like permission, size, etc about a file. +type StatT struct { + mode os.FileMode + size int64 + mtim time.Time +} + +// Size returns file's size. +func (s StatT) Size() int64 { + return s.size +} + +// Mode returns file's permission mode. +func (s StatT) Mode() os.FileMode { + return os.FileMode(s.mode) +} + +// Mtim returns file's last modification time. +func (s StatT) Mtim() time.Time { + return time.Time(s.mtim) +} + +// Stat takes a path to a file and returns +// a system.StatT type pertaining to that file. +// +// Throws an error if the file does not exist +func Stat(path string) (*StatT, error) { + fi, err := os.Stat(path) + if err != nil { + return nil, err + } + return fromStatT(&fi) +} + +// fromStatT converts a os.FileInfo type to a system.StatT type +func fromStatT(fi *os.FileInfo) (*StatT, error) { + return &StatT{ + size: (*fi).Size(), + mode: (*fi).Mode(), + mtim: (*fi).ModTime()}, nil +} diff --git a/vendor/github.com/moby/moby/pkg/system/syscall_unix.go b/vendor/github.com/moby/moby/pkg/system/syscall_unix.go new file mode 100644 index 000000000..49dbdd378 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/system/syscall_unix.go @@ -0,0 +1,17 @@ +// +build linux freebsd + +package system + +import "golang.org/x/sys/unix" + +// Unmount is a platform-specific helper function to call +// the unmount syscall. +func Unmount(dest string) error { + return unix.Unmount(dest, 0) +} + +// CommandLineToArgv should not be used on Unix. +// It simply returns commandLine in the only element in the returned array. +func CommandLineToArgv(commandLine string) ([]string, error) { + return []string{commandLine}, nil +} diff --git a/vendor/github.com/moby/moby/pkg/system/syscall_windows.go b/vendor/github.com/moby/moby/pkg/system/syscall_windows.go new file mode 100644 index 000000000..eded233b9 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/system/syscall_windows.go @@ -0,0 +1,122 @@ +package system + +import ( + "unsafe" + + "github.com/Sirupsen/logrus" + "golang.org/x/sys/windows" +) + +var ( + ntuserApiset = windows.NewLazyDLL("ext-ms-win-ntuser-window-l1-1-0") + procGetVersionExW = modkernel32.NewProc("GetVersionExW") + procGetProductInfo = modkernel32.NewProc("GetProductInfo") +) + +// OSVersion is a wrapper for Windows version information +// https://msdn.microsoft.com/en-us/library/windows/desktop/ms724439(v=vs.85).aspx +type OSVersion struct { + Version uint32 + MajorVersion uint8 + MinorVersion uint8 + Build uint16 +} + +// https://msdn.microsoft.com/en-us/library/windows/desktop/ms724833(v=vs.85).aspx +type osVersionInfoEx struct { + OSVersionInfoSize uint32 + MajorVersion uint32 + MinorVersion uint32 + BuildNumber uint32 + PlatformID uint32 + CSDVersion [128]uint16 + ServicePackMajor uint16 + ServicePackMinor uint16 + SuiteMask uint16 + ProductType byte + Reserve byte +} + +// GetOSVersion gets the operating system version on Windows. Note that +// docker.exe must be manifested to get the correct version information. +func GetOSVersion() OSVersion { + var err error + osv := OSVersion{} + osv.Version, err = windows.GetVersion() + if err != nil { + // GetVersion never fails. + panic(err) + } + osv.MajorVersion = uint8(osv.Version & 0xFF) + osv.MinorVersion = uint8(osv.Version >> 8 & 0xFF) + osv.Build = uint16(osv.Version >> 16) + return osv +} + +// IsWindowsClient returns true if the SKU is client +// @engine maintainers - this function should not be removed or modified as it +// is used to enforce licensing restrictions on Windows. +func IsWindowsClient() bool { + osviex := &osVersionInfoEx{OSVersionInfoSize: 284} + r1, _, err := procGetVersionExW.Call(uintptr(unsafe.Pointer(osviex))) + if r1 == 0 { + logrus.Warnf("GetVersionExW failed - assuming server SKU: %v", err) + return false + } + const verNTWorkstation = 0x00000001 + return osviex.ProductType == verNTWorkstation +} + +// IsIoTCore returns true if the currently running image is based off of +// Windows 10 IoT Core. +// @engine maintainers - this function should not be removed or modified as it +// is used to enforce licensing restrictions on Windows. +func IsIoTCore() bool { + var returnedProductType uint32 + r1, _, err := procGetProductInfo.Call(6, 1, 0, 0, uintptr(unsafe.Pointer(&returnedProductType))) + if r1 == 0 { + logrus.Warnf("GetProductInfo failed - assuming this is not IoT: %v", err) + return false + } + const productIoTUAP = 0x0000007B + const productIoTUAPCommercial = 0x00000083 + return returnedProductType == productIoTUAP || returnedProductType == productIoTUAPCommercial +} + +// Unmount is a platform-specific helper function to call +// the unmount syscall. Not supported on Windows +func Unmount(dest string) error { + return nil +} + +// CommandLineToArgv wraps the Windows syscall to turn a commandline into an argument array. +func CommandLineToArgv(commandLine string) ([]string, error) { + var argc int32 + + argsPtr, err := windows.UTF16PtrFromString(commandLine) + if err != nil { + return nil, err + } + + argv, err := windows.CommandLineToArgv(argsPtr, &argc) + if err != nil { + return nil, err + } + defer windows.LocalFree(windows.Handle(uintptr(unsafe.Pointer(argv)))) + + newArgs := make([]string, argc) + for i, v := range (*argv)[:argc] { + newArgs[i] = string(windows.UTF16ToString((*v)[:])) + } + + return newArgs, nil +} + +// HasWin32KSupport determines whether containers that depend on win32k can +// run on this machine. Win32k is the driver used to implement windowing. +func HasWin32KSupport() bool { + // For now, check for ntuser API support on the host. In the future, a host + // may support win32k in containers even if the host does not support ntuser + // APIs. + return ntuserApiset.Load() == nil +} diff --git a/vendor/github.com/moby/moby/pkg/system/syscall_windows_test.go b/vendor/github.com/moby/moby/pkg/system/syscall_windows_test.go new file mode 100644 index 000000000..4886b2b9b --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/system/syscall_windows_test.go @@ -0,0 +1,9 @@ +package system + +import "testing" + +func TestHasWin32KSupport(t *testing.T) { + s := HasWin32KSupport() // make sure this doesn't panic + + t.Logf("win32k: %v", s) // will be different on different platforms -- informative only +} diff --git a/vendor/github.com/moby/moby/pkg/system/umask.go b/vendor/github.com/moby/moby/pkg/system/umask.go new file mode 100644 index 000000000..5a10eda5a --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/system/umask.go @@ -0,0 +1,13 @@ +// +build !windows + +package system + +import ( + "golang.org/x/sys/unix" +) + +// Umask sets current process's file mode creation mask to newmask +// and returns oldmask. +func Umask(newmask int) (oldmask int, err error) { + return unix.Umask(newmask), nil +} diff --git a/vendor/github.com/moby/moby/pkg/system/umask_windows.go b/vendor/github.com/moby/moby/pkg/system/umask_windows.go new file mode 100644 index 000000000..13f1de176 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/system/umask_windows.go @@ -0,0 +1,9 @@ +// +build windows + +package system + +// Umask is not supported on the windows platform. +func Umask(newmask int) (oldmask int, err error) { + // should not be called on cli code path + return 0, ErrNotSupportedPlatform +} diff --git a/vendor/github.com/moby/moby/pkg/system/utimes_freebsd.go b/vendor/github.com/moby/moby/pkg/system/utimes_freebsd.go new file mode 100644 index 000000000..6a7752437 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/system/utimes_freebsd.go @@ -0,0 +1,24 @@ +package system + +import ( + "syscall" + "unsafe" + + "golang.org/x/sys/unix" +) + +// LUtimesNano is used to change access and modification time of the specified path. +// It's used for symbol link file because unix.UtimesNano doesn't support a NOFOLLOW flag atm. +func LUtimesNano(path string, ts []syscall.Timespec) error { + var _path *byte + _path, err := unix.BytePtrFromString(path) + if err != nil { + return err + } + + if _, _, err := unix.Syscall(unix.SYS_LUTIMES, uintptr(unsafe.Pointer(_path)), uintptr(unsafe.Pointer(&ts[0])), 0); err != 0 && err != unix.ENOSYS { + return err + } + + return nil +} diff --git a/vendor/github.com/moby/moby/pkg/system/utimes_linux.go b/vendor/github.com/moby/moby/pkg/system/utimes_linux.go new file mode 100644 index 000000000..edc588a63 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/system/utimes_linux.go @@ -0,0 +1,25 @@ +package system + +import ( + "syscall" + "unsafe" + + "golang.org/x/sys/unix" +) + +// LUtimesNano is used to change access and modification time of the specified path. +// It's used for symbol link file because unix.UtimesNano doesn't support a NOFOLLOW flag atm. +func LUtimesNano(path string, ts []syscall.Timespec) error { + atFdCwd := unix.AT_FDCWD + + var _path *byte + _path, err := unix.BytePtrFromString(path) + if err != nil { + return err + } + if _, _, err := unix.Syscall6(unix.SYS_UTIMENSAT, uintptr(atFdCwd), uintptr(unsafe.Pointer(_path)), uintptr(unsafe.Pointer(&ts[0])), unix.AT_SYMLINK_NOFOLLOW, 0, 0); err != 0 && err != unix.ENOSYS { + return err + } + + return nil +} diff --git a/vendor/github.com/moby/moby/pkg/system/utimes_unix_test.go b/vendor/github.com/moby/moby/pkg/system/utimes_unix_test.go new file mode 100644 index 000000000..a73ed118c --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/system/utimes_unix_test.go @@ -0,0 +1,68 @@ +// +build linux freebsd + +package system + +import ( + "io/ioutil" + "os" + "path/filepath" + "syscall" + "testing" +) + +// prepareFiles creates files for testing in the temp directory +func prepareFiles(t *testing.T) (string, string, string, string) { + dir, err := ioutil.TempDir("", "docker-system-test") + if err != nil { + t.Fatal(err) + } + + file := filepath.Join(dir, "exist") + if err := ioutil.WriteFile(file, []byte("hello"), 0644); err != nil { + t.Fatal(err) + } + + invalid := filepath.Join(dir, "doesnt-exist") + + symlink := filepath.Join(dir, "symlink") + if err := os.Symlink(file, symlink); err != nil { + t.Fatal(err) + } + + return file, invalid, symlink, dir +} + +func TestLUtimesNano(t *testing.T) { + file, invalid, symlink, dir := prepareFiles(t) + defer os.RemoveAll(dir) + + before, err := os.Stat(file) + if err != nil { + t.Fatal(err) + } + + ts := []syscall.Timespec{{Sec: 0, Nsec: 0}, {Sec: 0, Nsec: 0}} + if err := LUtimesNano(symlink, ts); err != nil { + t.Fatal(err) + } + + symlinkInfo, err := os.Lstat(symlink) + if err != nil { + t.Fatal(err) + } + if before.ModTime().Unix() == symlinkInfo.ModTime().Unix() { + t.Fatal("The modification time of the symlink should be different") + } + + fileInfo, err := os.Stat(file) + if err != nil { + t.Fatal(err) + } + if before.ModTime().Unix() != fileInfo.ModTime().Unix() { + t.Fatal("The modification time of the file should be same") + } + + if err := LUtimesNano(invalid, ts); err == nil { + t.Fatal("Doesn't return an error on a non-existing file") + } +} diff --git a/vendor/github.com/moby/moby/pkg/system/utimes_unsupported.go b/vendor/github.com/moby/moby/pkg/system/utimes_unsupported.go new file mode 100644 index 000000000..139714544 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/system/utimes_unsupported.go @@ -0,0 +1,10 @@ +// +build !linux,!freebsd + +package system + +import "syscall" + +// LUtimesNano is only supported on linux and freebsd. +func LUtimesNano(path string, ts []syscall.Timespec) error { + return ErrNotSupportedPlatform +} diff --git a/vendor/github.com/moby/moby/pkg/system/xattrs_linux.go b/vendor/github.com/moby/moby/pkg/system/xattrs_linux.go new file mode 100644 index 000000000..98b111be4 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/system/xattrs_linux.go @@ -0,0 +1,29 @@ +package system + +import "golang.org/x/sys/unix" + +// Lgetxattr retrieves the value of the extended attribute identified by attr +// and associated with the given path in the file system. +// It will returns a nil slice and nil error if the xattr is not set. +func Lgetxattr(path string, attr string) ([]byte, error) { + dest := make([]byte, 128) + sz, errno := unix.Lgetxattr(path, attr, dest) + if errno == unix.ENODATA { + return nil, nil + } + if errno == unix.ERANGE { + dest = make([]byte, sz) + sz, errno = unix.Lgetxattr(path, attr, dest) + } + if errno != nil { + return nil, errno + } + + return dest[:sz], nil +} + +// Lsetxattr sets the value of the extended attribute identified by attr +// and associated with the given path in the file system. +func Lsetxattr(path string, attr string, data []byte, flags int) error { + return unix.Lsetxattr(path, attr, data, flags) +} diff --git a/vendor/github.com/moby/moby/pkg/system/xattrs_unsupported.go b/vendor/github.com/moby/moby/pkg/system/xattrs_unsupported.go new file mode 100644 index 000000000..0114f2227 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/system/xattrs_unsupported.go @@ -0,0 +1,13 @@ +// +build !linux + +package system + +// Lgetxattr is not supported on platforms other than linux. +func Lgetxattr(path string, attr string) ([]byte, error) { + return nil, ErrNotSupportedPlatform +} + +// Lsetxattr is not supported on platforms other than linux. +func Lsetxattr(path string, attr string, data []byte, flags int) error { + return ErrNotSupportedPlatform +} diff --git a/vendor/github.com/moby/moby/pkg/tailfile/tailfile.go b/vendor/github.com/moby/moby/pkg/tailfile/tailfile.go new file mode 100644 index 000000000..09eb393ab --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/tailfile/tailfile.go @@ -0,0 +1,66 @@ +// Package tailfile provides helper functions to read the nth lines of any +// ReadSeeker. +package tailfile + +import ( + "bytes" + "errors" + "io" + "os" +) + +const blockSize = 1024 + +var eol = []byte("\n") + +// ErrNonPositiveLinesNumber is an error returned if the lines number was negative. +var ErrNonPositiveLinesNumber = errors.New("The number of lines to extract from the file must be positive") + +//TailFile returns last n lines of reader f (could be a fil). +func TailFile(f io.ReadSeeker, n int) ([][]byte, error) { + if n <= 0 { + return nil, ErrNonPositiveLinesNumber + } + size, err := f.Seek(0, os.SEEK_END) + if err != nil { + return nil, err + } + block := -1 + var data []byte + var cnt int + for { + var b []byte + step := int64(block * blockSize) + left := size + step // how many bytes to beginning + if left < 0 { + if _, err := f.Seek(0, os.SEEK_SET); err != nil { + return nil, err + } + b = make([]byte, blockSize+left) + if _, err := f.Read(b); err != nil { + return nil, err + } + data = append(b, data...) + break + } else { + b = make([]byte, blockSize) + if _, err := f.Seek(left, os.SEEK_SET); err != nil { + return nil, err + } + if _, err := f.Read(b); err != nil { + return nil, err + } + data = append(b, data...) + } + cnt += bytes.Count(b, eol) + if cnt > n { + break + } + block-- + } + lines := bytes.Split(data, eol) + if n < len(lines) { + return lines[len(lines)-n-1 : len(lines)-1], nil + } + return lines[:len(lines)-1], nil +} diff --git a/vendor/github.com/moby/moby/pkg/tailfile/tailfile_test.go b/vendor/github.com/moby/moby/pkg/tailfile/tailfile_test.go new file mode 100644 index 000000000..31217c036 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/tailfile/tailfile_test.go @@ -0,0 +1,148 @@ +package tailfile + +import ( + "io/ioutil" + "os" + "testing" +) + +func TestTailFile(t *testing.T) { + f, err := ioutil.TempFile("", "tail-test") + if err != nil { + t.Fatal(err) + } + defer f.Close() + defer os.RemoveAll(f.Name()) + testFile := []byte(`first line +second line +third line +fourth line +fifth line +next first line +next second line +next third line +next fourth line +next fifth line +last first line +next first line +next second line +next third line +next fourth line +next fifth line +next first line +next second line +next third line +next fourth line +next fifth line +last second line +last third line +last fourth line +last fifth line +truncated line`) + if _, err := f.Write(testFile); err != nil { + t.Fatal(err) + } + if _, err := f.Seek(0, os.SEEK_SET); err != nil { + t.Fatal(err) + } + expected := []string{"last fourth line", "last fifth line"} + res, err := TailFile(f, 2) + if err != nil { + t.Fatal(err) + } + for i, l := range res { + t.Logf("%s", l) + if expected[i] != string(l) { + t.Fatalf("Expected line %s, got %s", expected[i], l) + } + } +} + +func TestTailFileManyLines(t *testing.T) { + f, err := ioutil.TempFile("", "tail-test") + if err != nil { + t.Fatal(err) + } + defer f.Close() + defer os.RemoveAll(f.Name()) + testFile := []byte(`first line +second line +truncated line`) + if _, err := f.Write(testFile); err != nil { + t.Fatal(err) + } + if _, err := f.Seek(0, os.SEEK_SET); err != nil { + t.Fatal(err) + } + expected := []string{"first line", "second line"} + res, err := TailFile(f, 10000) + if err != nil { + t.Fatal(err) + } + for i, l := range res { + t.Logf("%s", l) + if expected[i] != string(l) { + t.Fatalf("Expected line %s, got %s", expected[i], l) + } + } +} + +func TestTailEmptyFile(t *testing.T) { + f, err := ioutil.TempFile("", "tail-test") + if err != nil { + t.Fatal(err) + } + defer f.Close() + defer os.RemoveAll(f.Name()) + res, err := TailFile(f, 10000) + if err != nil { + t.Fatal(err) + } + if len(res) != 0 { + t.Fatal("Must be empty slice from empty file") + } +} + +func TestTailNegativeN(t *testing.T) { + f, err := ioutil.TempFile("", "tail-test") + if err != nil { + t.Fatal(err) + } + defer f.Close() + defer os.RemoveAll(f.Name()) + testFile := []byte(`first line +second line +truncated line`) + if _, err := f.Write(testFile); err != nil { + t.Fatal(err) + } + if _, err := f.Seek(0, os.SEEK_SET); err != nil { + t.Fatal(err) + } + if _, err := TailFile(f, -1); err != ErrNonPositiveLinesNumber { + t.Fatalf("Expected ErrNonPositiveLinesNumber, got %s", err) + } + if _, err := TailFile(f, 0); err != ErrNonPositiveLinesNumber { + t.Fatalf("Expected ErrNonPositiveLinesNumber, got %s", err) + } +} + +func BenchmarkTail(b *testing.B) { + f, err := ioutil.TempFile("", "tail-test") + if err != nil { + b.Fatal(err) + } + defer f.Close() + defer os.RemoveAll(f.Name()) + for i := 0; i < 10000; i++ { + if _, err := f.Write([]byte("tailfile pretty interesting line\n")); err != nil { + b.Fatal(err) + } + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + if _, err := TailFile(f, 1000); err != nil { + b.Fatal(err) + } + } +} diff --git a/vendor/github.com/moby/moby/pkg/tarsum/builder_context.go b/vendor/github.com/moby/moby/pkg/tarsum/builder_context.go new file mode 100644 index 000000000..b42983e98 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/tarsum/builder_context.go @@ -0,0 +1,21 @@ +package tarsum + +// BuilderContext is an interface extending TarSum by adding the Remove method. +// In general there was concern about adding this method to TarSum itself +// so instead it is being added just to "BuilderContext" which will then +// only be used during the .dockerignore file processing +// - see builder/evaluator.go +type BuilderContext interface { + TarSum + Remove(string) +} + +func (bc *tarSum) Remove(filename string) { + for i, fis := range bc.sums { + if fis.Name() == filename { + bc.sums = append(bc.sums[:i], bc.sums[i+1:]...) + // Note, we don't just return because there could be + // more than one with this name + } + } +} diff --git a/vendor/github.com/moby/moby/pkg/tarsum/builder_context_test.go b/vendor/github.com/moby/moby/pkg/tarsum/builder_context_test.go new file mode 100644 index 000000000..f54bf3a1b --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/tarsum/builder_context_test.go @@ -0,0 +1,67 @@ +package tarsum + +import ( + "io" + "io/ioutil" + "os" + "testing" +) + +// Try to remove tarsum (in the BuilderContext) that do not exists, won't change a thing +func TestTarSumRemoveNonExistent(t *testing.T) { + filename := "testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar" + reader, err := os.Open(filename) + if err != nil { + t.Fatal(err) + } + defer reader.Close() + + ts, err := NewTarSum(reader, false, Version0) + if err != nil { + t.Fatal(err) + } + + // Read and discard bytes so that it populates sums + _, err = io.Copy(ioutil.Discard, ts) + if err != nil { + t.Errorf("failed to read from %s: %s", filename, err) + } + + expected := len(ts.GetSums()) + + ts.(BuilderContext).Remove("") + ts.(BuilderContext).Remove("Anything") + + if len(ts.GetSums()) != expected { + t.Fatalf("Expected %v sums, go %v.", expected, ts.GetSums()) + } +} + +// Remove a tarsum (in the BuilderContext) +func TestTarSumRemove(t *testing.T) { + filename := "testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar" + reader, err := os.Open(filename) + if err != nil { + t.Fatal(err) + } + defer reader.Close() + + ts, err := NewTarSum(reader, false, Version0) + if err != nil { + t.Fatal(err) + } + + // Read and discard bytes so that it populates sums + _, err = io.Copy(ioutil.Discard, ts) + if err != nil { + t.Errorf("failed to read from %s: %s", filename, err) + } + + expected := len(ts.GetSums()) - 1 + + ts.(BuilderContext).Remove("etc/sudoers") + + if len(ts.GetSums()) != expected { + t.Fatalf("Expected %v sums, go %v.", expected, len(ts.GetSums())) + } +} diff --git a/vendor/github.com/moby/moby/pkg/tarsum/fileinfosums.go b/vendor/github.com/moby/moby/pkg/tarsum/fileinfosums.go new file mode 100644 index 000000000..5abf5e7ba --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/tarsum/fileinfosums.go @@ -0,0 +1,126 @@ +package tarsum + +import "sort" + +// FileInfoSumInterface provides an interface for accessing file checksum +// information within a tar file. This info is accessed through interface +// so the actual name and sum cannot be melded with. +type FileInfoSumInterface interface { + // File name + Name() string + // Checksum of this particular file and its headers + Sum() string + // Position of file in the tar + Pos() int64 +} + +type fileInfoSum struct { + name string + sum string + pos int64 +} + +func (fis fileInfoSum) Name() string { + return fis.name +} +func (fis fileInfoSum) Sum() string { + return fis.sum +} +func (fis fileInfoSum) Pos() int64 { + return fis.pos +} + +// FileInfoSums provides a list of FileInfoSumInterfaces. +type FileInfoSums []FileInfoSumInterface + +// GetFile returns the first FileInfoSumInterface with a matching name. +func (fis FileInfoSums) GetFile(name string) FileInfoSumInterface { + for i := range fis { + if fis[i].Name() == name { + return fis[i] + } + } + return nil +} + +// GetAllFile returns a FileInfoSums with all matching names. +func (fis FileInfoSums) GetAllFile(name string) FileInfoSums { + f := FileInfoSums{} + for i := range fis { + if fis[i].Name() == name { + f = append(f, fis[i]) + } + } + return f +} + +// GetDuplicatePaths returns a FileInfoSums with all duplicated paths. +func (fis FileInfoSums) GetDuplicatePaths() (dups FileInfoSums) { + seen := make(map[string]int, len(fis)) // allocate earl. no need to grow this map. + for i := range fis { + f := fis[i] + if _, ok := seen[f.Name()]; ok { + dups = append(dups, f) + } else { + seen[f.Name()] = 0 + } + } + return dups +} + +// Len returns the size of the FileInfoSums. +func (fis FileInfoSums) Len() int { return len(fis) } + +// Swap swaps two FileInfoSum values if a FileInfoSums list. +func (fis FileInfoSums) Swap(i, j int) { fis[i], fis[j] = fis[j], fis[i] } + +// SortByPos sorts FileInfoSums content by position. +func (fis FileInfoSums) SortByPos() { + sort.Sort(byPos{fis}) +} + +// SortByNames sorts FileInfoSums content by name. +func (fis FileInfoSums) SortByNames() { + sort.Sort(byName{fis}) +} + +// SortBySums sorts FileInfoSums content by sums. +func (fis FileInfoSums) SortBySums() { + dups := fis.GetDuplicatePaths() + if len(dups) > 0 { + sort.Sort(bySum{fis, dups}) + } else { + sort.Sort(bySum{fis, nil}) + } +} + +// byName is a sort.Sort helper for sorting by file names. +// If names are the same, order them by their appearance in the tar archive +type byName struct{ FileInfoSums } + +func (bn byName) Less(i, j int) bool { + if bn.FileInfoSums[i].Name() == bn.FileInfoSums[j].Name() { + return bn.FileInfoSums[i].Pos() < bn.FileInfoSums[j].Pos() + } + return bn.FileInfoSums[i].Name() < bn.FileInfoSums[j].Name() +} + +// bySum is a sort.Sort helper for sorting by the sums of all the fileinfos in the tar archive +type bySum struct { + FileInfoSums + dups FileInfoSums +} + +func (bs bySum) Less(i, j int) bool { + if bs.dups != nil && bs.FileInfoSums[i].Name() == bs.FileInfoSums[j].Name() { + return bs.FileInfoSums[i].Pos() < bs.FileInfoSums[j].Pos() + } + return bs.FileInfoSums[i].Sum() < bs.FileInfoSums[j].Sum() +} + +// byPos is a sort.Sort helper for sorting by the sums of all the fileinfos by their original order +type byPos struct{ FileInfoSums } + +func (bp byPos) Less(i, j int) bool { + return bp.FileInfoSums[i].Pos() < bp.FileInfoSums[j].Pos() +} diff --git a/vendor/github.com/moby/moby/pkg/tarsum/fileinfosums_test.go b/vendor/github.com/moby/moby/pkg/tarsum/fileinfosums_test.go new file mode 100644 index 000000000..2e243d53d --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/tarsum/fileinfosums_test.go @@ -0,0 +1,62 @@ +package tarsum + +import "testing" + +func newFileInfoSums() FileInfoSums { + return FileInfoSums{ + fileInfoSum{name: "file3", sum: "2abcdef1234567890", pos: 2}, + fileInfoSum{name: "dup1", sum: "deadbeef1", pos: 5}, + fileInfoSum{name: "file1", sum: "0abcdef1234567890", pos: 0}, + fileInfoSum{name: "file4", sum: "3abcdef1234567890", pos: 3}, + fileInfoSum{name: "dup1", sum: "deadbeef0", pos: 4}, + fileInfoSum{name: "file2", sum: "1abcdef1234567890", pos: 1}, + } +} + +func TestSortFileInfoSums(t *testing.T) { + dups := newFileInfoSums().GetAllFile("dup1") + if len(dups) != 2 { + t.Errorf("expected length 2, got %d", len(dups)) + } + dups.SortByNames() + if dups[0].Pos() != 4 { + t.Errorf("sorted dups should be ordered by position. Expected 4, got %d", dups[0].Pos()) + } + + fis := newFileInfoSums() + expected := "0abcdef1234567890" + fis.SortBySums() + got := fis[0].Sum() + if got != expected { + t.Errorf("Expected %q, got %q", expected, got) + } + + fis = newFileInfoSums() + expected = "dup1" + fis.SortByNames() + gotFis := fis[0] + if gotFis.Name() != expected { + t.Errorf("Expected %q, got %q", expected, gotFis.Name()) + } + // since a duplicate is first, ensure it is ordered first by position too + if gotFis.Pos() != 4 { + t.Errorf("Expected %d, got %d", 4, gotFis.Pos()) + } + + fis = newFileInfoSums() + fis.SortByPos() + if fis[0].Pos() != 0 { + t.Error("sorted fileInfoSums by Pos should order them by position.") + } + + fis = newFileInfoSums() + expected = "deadbeef1" + gotFileInfoSum := fis.GetFile("dup1") + if gotFileInfoSum.Sum() != expected { + t.Errorf("Expected %q, got %q", expected, gotFileInfoSum) + } + if fis.GetFile("noPresent") != nil { + t.Error("Should have return nil if name not found.") + } + +} diff --git a/vendor/github.com/moby/moby/pkg/tarsum/tarsum.go b/vendor/github.com/moby/moby/pkg/tarsum/tarsum.go new file mode 100644 index 000000000..154788db8 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/tarsum/tarsum.go @@ -0,0 +1,295 @@ +// Package tarsum provides algorithms to perform checksum calculation on +// filesystem layers. +// +// The transportation of filesystems, regarding Docker, is done with tar(1) +// archives. There are a variety of tar serialization formats [2], and a key +// concern here is ensuring a repeatable checksum given a set of inputs from a +// generic tar archive. Types of transportation include distribution to and from a +// registry endpoint, saving and loading through commands or Docker daemon APIs, +// transferring the build context from client to Docker daemon, and committing the +// filesystem of a container to become an image. +// +// As tar archives are used for transit, but not preserved in many situations, the +// focus of the algorithm is to ensure the integrity of the preserved filesystem, +// while maintaining a deterministic accountability. This includes neither +// constraining the ordering or manipulation of the files during the creation or +// unpacking of the archive, nor include additional metadata state about the file +// system attributes. +package tarsum + +import ( + "archive/tar" + "bytes" + "compress/gzip" + "crypto" + "crypto/sha256" + "encoding/hex" + "errors" + "fmt" + "hash" + "io" + "path" + "strings" +) + +const ( + buf8K = 8 * 1024 + buf16K = 16 * 1024 + buf32K = 32 * 1024 +) + +// NewTarSum creates a new interface for calculating a fixed time checksum of a +// tar archive. +// +// This is used for calculating checksums of layers of an image, in some cases +// including the byte payload of the image's json metadata as well, and for +// calculating the checksums for buildcache. +func NewTarSum(r io.Reader, dc bool, v Version) (TarSum, error) { + return NewTarSumHash(r, dc, v, DefaultTHash) +} + +// NewTarSumHash creates a new TarSum, providing a THash to use rather than +// the DefaultTHash. +func NewTarSumHash(r io.Reader, dc bool, v Version, tHash THash) (TarSum, error) { + headerSelector, err := getTarHeaderSelector(v) + if err != nil { + return nil, err + } + ts := &tarSum{Reader: r, DisableCompression: dc, tarSumVersion: v, headerSelector: headerSelector, tHash: tHash} + err = ts.initTarSum() + return ts, err +} + +// NewTarSumForLabel creates a new TarSum using the provided TarSum version+hash label. +func NewTarSumForLabel(r io.Reader, disableCompression bool, label string) (TarSum, error) { + parts := strings.SplitN(label, "+", 2) + if len(parts) != 2 { + return nil, errors.New("tarsum label string should be of the form: {tarsum_version}+{hash_name}") + } + + versionName, hashName := parts[0], parts[1] + + version, ok := tarSumVersionsByName[versionName] + if !ok { + return nil, fmt.Errorf("unknown TarSum version name: %q", versionName) + } + + hashConfig, ok := standardHashConfigs[hashName] + if !ok { + return nil, fmt.Errorf("unknown TarSum hash name: %q", hashName) + } + + tHash := NewTHash(hashConfig.name, hashConfig.hash.New) + + return NewTarSumHash(r, disableCompression, version, tHash) +} + +// TarSum is the generic interface for calculating fixed time +// checksums of a tar archive. +type TarSum interface { + io.Reader + GetSums() FileInfoSums + Sum([]byte) string + Version() Version + Hash() THash +} + +// tarSum struct is the structure for a Version0 checksum calculation. +type tarSum struct { + io.Reader + tarR *tar.Reader + tarW *tar.Writer + writer writeCloseFlusher + bufTar *bytes.Buffer + bufWriter *bytes.Buffer + bufData []byte + h hash.Hash + tHash THash + sums FileInfoSums + fileCounter int64 + currentFile string + finished bool + first bool + DisableCompression bool // false by default. When false, the output gzip compressed. + tarSumVersion Version // this field is not exported so it can not be mutated during use + headerSelector tarHeaderSelector // handles selecting and ordering headers for files in the archive +} + +func (ts tarSum) Hash() THash { + return ts.tHash +} + +func (ts tarSum) Version() Version { + return ts.tarSumVersion +} + +// THash provides a hash.Hash type generator and its name. +type THash interface { + Hash() hash.Hash + Name() string +} + +// NewTHash is a convenience method for creating a THash. +func NewTHash(name string, h func() hash.Hash) THash { + return simpleTHash{n: name, h: h} +} + +type tHashConfig struct { + name string + hash crypto.Hash +} + +var ( + // NOTE: DO NOT include MD5 or SHA1, which are considered insecure. + standardHashConfigs = map[string]tHashConfig{ + "sha256": {name: "sha256", hash: crypto.SHA256}, + "sha512": {name: "sha512", hash: crypto.SHA512}, + } +) + +// DefaultTHash is default TarSum hashing algorithm - "sha256". +var DefaultTHash = NewTHash("sha256", sha256.New) + +type simpleTHash struct { + n string + h func() hash.Hash +} + +func (sth simpleTHash) Name() string { return sth.n } +func (sth simpleTHash) Hash() hash.Hash { return sth.h() } + +func (ts *tarSum) encodeHeader(h *tar.Header) error { + for _, elem := range ts.headerSelector.selectHeaders(h) { + if _, err := ts.h.Write([]byte(elem[0] + elem[1])); err != nil { + return err + } + } + return nil +} + +func (ts *tarSum) initTarSum() error { + ts.bufTar = bytes.NewBuffer([]byte{}) + ts.bufWriter = bytes.NewBuffer([]byte{}) + ts.tarR = tar.NewReader(ts.Reader) + ts.tarW = tar.NewWriter(ts.bufTar) + if !ts.DisableCompression { + ts.writer = gzip.NewWriter(ts.bufWriter) + } else { + ts.writer = &nopCloseFlusher{Writer: ts.bufWriter} + } + if ts.tHash == nil { + ts.tHash = DefaultTHash + } + ts.h = ts.tHash.Hash() + ts.h.Reset() + ts.first = true + ts.sums = FileInfoSums{} + return nil +} + +func (ts *tarSum) Read(buf []byte) (int, error) { + if ts.finished { + return ts.bufWriter.Read(buf) + } + if len(ts.bufData) < len(buf) { + switch { + case len(buf) <= buf8K: + ts.bufData = make([]byte, buf8K) + case len(buf) <= buf16K: + ts.bufData = make([]byte, buf16K) + case len(buf) <= buf32K: + ts.bufData = make([]byte, buf32K) + default: + ts.bufData = make([]byte, len(buf)) + } + } + buf2 := ts.bufData[:len(buf)] + + n, err := ts.tarR.Read(buf2) + if err != nil { + if err == io.EOF { + if _, err := ts.h.Write(buf2[:n]); err != nil { + return 0, err + } + if !ts.first { + ts.sums = append(ts.sums, fileInfoSum{name: ts.currentFile, sum: hex.EncodeToString(ts.h.Sum(nil)), pos: ts.fileCounter}) + ts.fileCounter++ + ts.h.Reset() + } else { + ts.first = false + } + + currentHeader, err := ts.tarR.Next() + if err != nil { + if err == io.EOF { + if err := ts.tarW.Close(); err != nil { + return 0, err + } + if _, err := io.Copy(ts.writer, ts.bufTar); err != nil { + return 0, err + } + if err := ts.writer.Close(); err != nil { + return 0, err + } + ts.finished = true + return n, nil + } + return n, err + } + ts.currentFile = path.Clean(currentHeader.Name) + if err := ts.encodeHeader(currentHeader); err != nil { + return 0, err + } + if err := ts.tarW.WriteHeader(currentHeader); err != nil { + return 0, err + } + if _, err := ts.tarW.Write(buf2[:n]); err != nil { + return 0, err + } + ts.tarW.Flush() + if _, err := io.Copy(ts.writer, ts.bufTar); err != nil { + return 0, err + } + ts.writer.Flush() + + return ts.bufWriter.Read(buf) + } + return n, err + } + + // Filling the hash buffer + if _, err = ts.h.Write(buf2[:n]); err != nil { + return 0, err + } + + // Filling the tar writer + if _, err = ts.tarW.Write(buf2[:n]); err != nil { + return 0, err + } + ts.tarW.Flush() + + // Filling the output writer + if _, err = io.Copy(ts.writer, ts.bufTar); err != nil { + return 0, err + } + ts.writer.Flush() + + return ts.bufWriter.Read(buf) +} + +func (ts *tarSum) Sum(extra []byte) string { + ts.sums.SortBySums() + h := ts.tHash.Hash() + if extra != nil { + h.Write(extra) + } + for _, fis := range ts.sums { + h.Write([]byte(fis.Sum())) + } + checksum := ts.Version().String() + "+" + ts.tHash.Name() + ":" + hex.EncodeToString(h.Sum(nil)) + return checksum +} + +func (ts *tarSum) GetSums() FileInfoSums { + return ts.sums +} diff --git a/vendor/github.com/moby/moby/pkg/tarsum/tarsum_spec.md b/vendor/github.com/moby/moby/pkg/tarsum/tarsum_spec.md new file mode 100644 index 000000000..89b2e49f9 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/tarsum/tarsum_spec.md @@ -0,0 +1,230 @@ +page_title: TarSum checksum specification +page_description: Documentation for algorithms used in the TarSum checksum calculation +page_keywords: docker, checksum, validation, tarsum + +# TarSum Checksum Specification + +## Abstract + +This document describes the algorithms used in performing the TarSum checksum +calculation on filesystem layers, the need for this method over existing +methods, and the versioning of this calculation. + +## Warning + +This checksum algorithm is for best-effort comparison of file trees with fuzzy logic. + +This is _not_ a cryptographic attestation, and should not be considered secure. + +## Introduction + +The transportation of filesystems, regarding Docker, is done with tar(1) +archives. There are a variety of tar serialization formats [2], and a key +concern here is ensuring a repeatable checksum given a set of inputs from a +generic tar archive. Types of transportation include distribution to and from a +registry endpoint, saving and loading through commands or Docker daemon APIs, +transferring the build context from client to Docker daemon, and committing the +filesystem of a container to become an image. + +As tar archives are used for transit, but not preserved in many situations, the +focus of the algorithm is to ensure the integrity of the preserved filesystem, +while maintaining a deterministic accountability. This includes neither +constraining the ordering or manipulation of the files during the creation or +unpacking of the archive, nor include additional metadata state about the file +system attributes. + +## Intended Audience + +This document is outlining the methods used for consistent checksum calculation +for filesystems transported via tar archives. + +Auditing these methodologies is an open and iterative process. This document +should accommodate the review of source code. Ultimately, this document should +be the starting point of further refinements to the algorithm and its future +versions. + +## Concept + +The checksum mechanism must ensure the integrity and assurance of the +filesystem payload. + +## Checksum Algorithm Profile + +A checksum mechanism must define the following operations and attributes: + +* Associated hashing cipher - used to checksum each file payload and attribute + information. +* Checksum list - each file of the filesystem archive has its checksum + calculated from the payload and attributes of the file. The final checksum is + calculated from this list, with specific ordering. +* Version - as the algorithm adapts to requirements, there are behaviors of the + algorithm to manage by versioning. +* Archive being calculated - the tar archive having its checksum calculated + +## Elements of TarSum checksum + +The calculated sum output is a text string. The elements included in the output +of the calculated sum comprise the information needed for validation of the sum +(TarSum version and hashing cipher used) and the expected checksum in hexadecimal +form. + +There are two delimiters used: +* '+' separates TarSum version from hashing cipher +* ':' separates calculation mechanics from expected hash + +Example: + +``` + "tarsum.v1+sha256:220a60ecd4a3c32c282622a625a54db9ba0ff55b5ba9c29c7064a2bc358b6a3e" + | | \ | + | | \ | + |_version_|_cipher__|__ | + | \ | + |_calculation_mechanics_|______________________expected_sum_______________________| +``` + +## Versioning + +Versioning was introduced [0] to accommodate differences in calculation needed, +and ability to maintain reverse compatibility. + +The general algorithm will be describe further in the 'Calculation'. + +### Version0 + +This is the initial version of TarSum. + +Its element in the TarSum checksum string is `tarsum`. + +### Version1 + +Its element in the TarSum checksum is `tarsum.v1`. + +The notable changes in this version: +* Exclusion of file `mtime` from the file information headers, in each file + checksum calculation +* Inclusion of extended attributes (`xattrs`. Also seen as `SCHILY.xattr.` prefixed Pax + tar file info headers) keys and values in each file checksum calculation + +### VersionDev + +*Do not use unless validating refinements to the checksum algorithm* + +Its element in the TarSum checksum is `tarsum.dev`. + +This is a floating place holder for a next version and grounds for testing +changes. The methods used for calculation are subject to change without notice, +and this version is for testing and not for production use. + +## Ciphers + +The official default and standard hashing cipher used in the calculation mechanic +is `sha256`. This refers to SHA256 hash algorithm as defined in FIPS 180-4. + +Though the TarSum algorithm itself is not exclusively bound to the single +hashing cipher `sha256`, support for alternate hashing ciphers was later added +[1]. Use cases for alternate cipher could include future-proofing TarSum +checksum format and using faster cipher hashes for tar filesystem checksums. + +## Calculation + +### Requirement + +As mentioned earlier, the calculation is such that it takes into consideration +the lifecycle of the tar archive. In that the tar archive is not an immutable, +permanent artifact. Otherwise options like relying on a known hashing cipher +checksum of the archive itself would be reliable enough. The tar archive of the +filesystem is used as a transportation medium for Docker images, and the +archive is discarded once its contents are extracted. Therefore, for consistent +validation items such as order of files in the tar archive and time stamps are +subject to change once an image is received. + +### Process + +The method is typically iterative due to reading tar info headers from the +archive stream, though this is not a strict requirement. + +#### Files + +Each file in the tar archive have their contents (headers and body) checksummed +individually using the designated associated hashing cipher. The ordered +headers of the file are written to the checksum calculation first, and then the +payload of the file body. + +The resulting checksum of the file is appended to the list of file sums. The +sum is encoded as a string of the hexadecimal digest. Additionally, the file +name and position in the archive is kept as reference for special ordering. + +#### Headers + +The following headers are read, in this +order ( and the corresponding representation of its value): +* 'name' - string +* 'mode' - string of the base10 integer +* 'uid' - string of the integer +* 'gid' - string of the integer +* 'size' - string of the integer +* 'mtime' (_Version0 only_) - string of integer of the seconds since 1970-01-01 00:00:00 UTC +* 'typeflag' - string of the char +* 'linkname' - string +* 'uname' - string +* 'gname' - string +* 'devmajor' - string of the integer +* 'devminor' - string of the integer + +For >= Version1, the extended attribute headers ("SCHILY.xattr." prefixed pax +headers) included after the above list. These xattrs key/values are first +sorted by the keys. + +#### Header Format + +The ordered headers are written to the hash in the format of + + "{.key}{.value}" + +with no newline. + +#### Body + +After the order headers of the file have been added to the checksum for the +file, the body of the file is written to the hash. + +#### List of file sums + +The list of file sums is sorted by the string of the hexadecimal digest. + +If there are two files in the tar with matching paths, the order of occurrence +for that path is reflected for the sums of the corresponding file header and +body. + +#### Final Checksum + +Begin with a fresh or initial state of the associated hash cipher. If there is +additional payload to include in the TarSum calculation for the archive, it is +written first. Then each checksum from the ordered list of file sums is written +to the hash. + +The resulting digest is formatted per the Elements of TarSum checksum, +including the TarSum version, the associated hash cipher and the hexadecimal +encoded checksum digest. + +## Security Considerations + +The initial version of TarSum has undergone one update that could invalidate +handcrafted tar archives. The tar archive format supports appending of files +with same names as prior files in the archive. The latter file will clobber the +prior file of the same path. Due to this the algorithm now accounts for files +with matching paths, and orders the list of file sums accordingly [3]. + +## Footnotes + +* [0] Versioning https://github.com/docker/docker/commit/747f89cd327db9d50251b17797c4d825162226d0 +* [1] Alternate ciphers https://github.com/docker/docker/commit/4e9925d780665149b8bc940d5ba242ada1973c4e +* [2] Tar http://en.wikipedia.org/wiki/Tar_%28computing%29 +* [3] Name collision https://github.com/docker/docker/commit/c5e6362c53cbbc09ddbabd5a7323e04438b57d31 + +## Acknowledgments + +Joffrey F (shin-) and Guillaume J. Charmes (creack) on the initial work of the +TarSum calculation. + diff --git a/vendor/github.com/moby/moby/pkg/tarsum/tarsum_test.go b/vendor/github.com/moby/moby/pkg/tarsum/tarsum_test.go new file mode 100644 index 000000000..86df0e2b8 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/tarsum/tarsum_test.go @@ -0,0 +1,664 @@ +package tarsum + +import ( + "archive/tar" + "bytes" + "compress/gzip" + "crypto/md5" + "crypto/rand" + "crypto/sha1" + "crypto/sha256" + "crypto/sha512" + "encoding/hex" + "fmt" + "io" + "io/ioutil" + "os" + "strings" + "testing" +) + +type testLayer struct { + filename string + options *sizedOptions + jsonfile string + gzip bool + tarsum string + version Version + hash THash +} + +var testLayers = []testLayer{ + { + filename: "testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar", + jsonfile: "testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/json", + version: Version0, + tarsum: "tarsum+sha256:4095cc12fa5fdb1ab2760377e1cd0c4ecdd3e61b4f9b82319d96fcea6c9a41c6"}, + { + filename: "testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar", + jsonfile: "testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/json", + version: VersionDev, + tarsum: "tarsum.dev+sha256:db56e35eec6ce65ba1588c20ba6b1ea23743b59e81fb6b7f358ccbde5580345c"}, + { + filename: "testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar", + jsonfile: "testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/json", + gzip: true, + tarsum: "tarsum+sha256:4095cc12fa5fdb1ab2760377e1cd0c4ecdd3e61b4f9b82319d96fcea6c9a41c6"}, + { + // Tests existing version of TarSum when xattrs are present + filename: "testdata/xattr/layer.tar", + jsonfile: "testdata/xattr/json", + version: Version0, + tarsum: "tarsum+sha256:07e304a8dbcb215b37649fde1a699f8aeea47e60815707f1cdf4d55d25ff6ab4"}, + { + // Tests next version of TarSum when xattrs are present + filename: "testdata/xattr/layer.tar", + jsonfile: "testdata/xattr/json", + version: VersionDev, + tarsum: "tarsum.dev+sha256:6c58917892d77b3b357b0f9ad1e28e1f4ae4de3a8006bd3beb8beda214d8fd16"}, + { + filename: "testdata/511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158/layer.tar", + jsonfile: "testdata/511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158/json", + tarsum: "tarsum+sha256:c66bd5ec9f87b8f4c6135ca37684618f486a3dd1d113b138d0a177bfa39c2571"}, + { + options: &sizedOptions{1, 1024 * 1024, false, false}, // a 1mb file (in memory) + tarsum: "tarsum+sha256:8bf12d7e67c51ee2e8306cba569398b1b9f419969521a12ffb9d8875e8836738"}, + { + // this tar has two files with the same path + filename: "testdata/collision/collision-0.tar", + tarsum: "tarsum+sha256:08653904a68d3ab5c59e65ef58c49c1581caa3c34744f8d354b3f575ea04424a"}, + { + // this tar has the same two files (with the same path), but reversed order. ensuring is has different hash than above + filename: "testdata/collision/collision-1.tar", + tarsum: "tarsum+sha256:b51c13fbefe158b5ce420d2b930eef54c5cd55c50a2ee4abdddea8fa9f081e0d"}, + { + // this tar has newer of collider-0.tar, ensuring is has different hash + filename: "testdata/collision/collision-2.tar", + tarsum: "tarsum+sha256:381547080919bb82691e995508ae20ed33ce0f6948d41cafbeb70ce20c73ee8e"}, + { + // this tar has newer of collider-1.tar, ensuring is has different hash + filename: "testdata/collision/collision-3.tar", + tarsum: "tarsum+sha256:f886e431c08143164a676805205979cd8fa535dfcef714db5515650eea5a7c0f"}, + { + options: &sizedOptions{1, 1024 * 1024, false, false}, // a 1mb file (in memory) + tarsum: "tarsum+md5:0d7529ec7a8360155b48134b8e599f53", + hash: md5THash, + }, + { + options: &sizedOptions{1, 1024 * 1024, false, false}, // a 1mb file (in memory) + tarsum: "tarsum+sha1:f1fee39c5925807ff75ef1925e7a23be444ba4df", + hash: sha1Hash, + }, + { + options: &sizedOptions{1, 1024 * 1024, false, false}, // a 1mb file (in memory) + tarsum: "tarsum+sha224:6319390c0b061d639085d8748b14cd55f697cf9313805218b21cf61c", + hash: sha224Hash, + }, + { + options: &sizedOptions{1, 1024 * 1024, false, false}, // a 1mb file (in memory) + tarsum: "tarsum+sha384:a578ce3ce29a2ae03b8ed7c26f47d0f75b4fc849557c62454be4b5ffd66ba021e713b48ce71e947b43aab57afd5a7636", + hash: sha384Hash, + }, + { + options: &sizedOptions{1, 1024 * 1024, false, false}, // a 1mb file (in memory) + tarsum: "tarsum+sha512:e9bfb90ca5a4dfc93c46ee061a5cf9837de6d2fdf82544d6460d3147290aecfabf7b5e415b9b6e72db9b8941f149d5d69fb17a394cbfaf2eac523bd9eae21855", + hash: sha512Hash, + }, +} + +type sizedOptions struct { + num int64 + size int64 + isRand bool + realFile bool +} + +// make a tar: +// * num is the number of files the tar should have +// * size is the bytes per file +// * isRand is whether the contents of the files should be a random chunk (otherwise it's all zeros) +// * realFile will write to a TempFile, instead of an in memory buffer +func sizedTar(opts sizedOptions) io.Reader { + var ( + fh io.ReadWriter + err error + ) + if opts.realFile { + fh, err = ioutil.TempFile("", "tarsum") + if err != nil { + return nil + } + } else { + fh = bytes.NewBuffer([]byte{}) + } + tarW := tar.NewWriter(fh) + defer tarW.Close() + for i := int64(0); i < opts.num; i++ { + err := tarW.WriteHeader(&tar.Header{ + Name: fmt.Sprintf("/testdata%d", i), + Mode: 0755, + Uid: 0, + Gid: 0, + Size: opts.size, + }) + if err != nil { + return nil + } + var rBuf []byte + if opts.isRand { + rBuf = make([]byte, 8) + _, err = rand.Read(rBuf) + if err != nil { + return nil + } + } else { + rBuf = []byte{0, 0, 0, 0, 0, 0, 0, 0} + } + + for i := int64(0); i < opts.size/int64(8); i++ { + tarW.Write(rBuf) + } + } + return fh +} + +func emptyTarSum(gzip bool) (TarSum, error) { + reader, writer := io.Pipe() + tarWriter := tar.NewWriter(writer) + + // Immediately close tarWriter and write-end of the + // Pipe in a separate goroutine so we don't block. + go func() { + tarWriter.Close() + writer.Close() + }() + + return NewTarSum(reader, !gzip, Version0) +} + +// Test errors on NewTarsumForLabel +func TestNewTarSumForLabelInvalid(t *testing.T) { + reader := strings.NewReader("") + + if _, err := NewTarSumForLabel(reader, true, "invalidlabel"); err == nil { + t.Fatalf("Expected an error, got nothing.") + } + + if _, err := NewTarSumForLabel(reader, true, "invalid+sha256"); err == nil { + t.Fatalf("Expected an error, got nothing.") + } + if _, err := NewTarSumForLabel(reader, true, "tarsum.v1+invalid"); err == nil { + t.Fatalf("Expected an error, got nothing.") + } +} + +func TestNewTarSumForLabel(t *testing.T) { + + layer := testLayers[0] + + reader, err := os.Open(layer.filename) + if err != nil { + t.Fatal(err) + } + defer reader.Close() + + label := strings.Split(layer.tarsum, ":")[0] + ts, err := NewTarSumForLabel(reader, false, label) + if err != nil { + t.Fatal(err) + } + + // Make sure it actually worked by reading a little bit of it + nbByteToRead := 8 * 1024 + dBuf := make([]byte, nbByteToRead) + _, err = ts.Read(dBuf) + if err != nil { + t.Errorf("failed to read %vKB from %s: %s", nbByteToRead, layer.filename, err) + } +} + +// TestEmptyTar tests that tarsum does not fail to read an empty tar +// and correctly returns the hex digest of an empty hash. +func TestEmptyTar(t *testing.T) { + // Test without gzip. + ts, err := emptyTarSum(false) + if err != nil { + t.Fatal(err) + } + + zeroBlock := make([]byte, 1024) + buf := new(bytes.Buffer) + + n, err := io.Copy(buf, ts) + if err != nil { + t.Fatal(err) + } + + if n != int64(len(zeroBlock)) || !bytes.Equal(buf.Bytes(), zeroBlock) { + t.Fatalf("tarSum did not write the correct number of zeroed bytes: %d", n) + } + + expectedSum := ts.Version().String() + "+sha256:" + hex.EncodeToString(sha256.New().Sum(nil)) + resultSum := ts.Sum(nil) + + if resultSum != expectedSum { + t.Fatalf("expected [%s] but got [%s]", expectedSum, resultSum) + } + + // Test with gzip. + ts, err = emptyTarSum(true) + if err != nil { + t.Fatal(err) + } + buf.Reset() + + n, err = io.Copy(buf, ts) + if err != nil { + t.Fatal(err) + } + + bufgz := new(bytes.Buffer) + gz := gzip.NewWriter(bufgz) + n, err = io.Copy(gz, bytes.NewBuffer(zeroBlock)) + gz.Close() + gzBytes := bufgz.Bytes() + + if n != int64(len(zeroBlock)) || !bytes.Equal(buf.Bytes(), gzBytes) { + t.Fatalf("tarSum did not write the correct number of gzipped-zeroed bytes: %d", n) + } + + resultSum = ts.Sum(nil) + + if resultSum != expectedSum { + t.Fatalf("expected [%s] but got [%s]", expectedSum, resultSum) + } + + // Test without ever actually writing anything. + if ts, err = NewTarSum(bytes.NewReader([]byte{}), true, Version0); err != nil { + t.Fatal(err) + } + + resultSum = ts.Sum(nil) + + if resultSum != expectedSum { + t.Fatalf("expected [%s] but got [%s]", expectedSum, resultSum) + } +} + +var ( + md5THash = NewTHash("md5", md5.New) + sha1Hash = NewTHash("sha1", sha1.New) + sha224Hash = NewTHash("sha224", sha256.New224) + sha384Hash = NewTHash("sha384", sha512.New384) + sha512Hash = NewTHash("sha512", sha512.New) +) + +// Test all the build-in read size : buf8K, buf16K, buf32K and more +func TestTarSumsReadSize(t *testing.T) { + // Test always on the same layer (that is big enough) + layer := testLayers[0] + + for i := 0; i < 5; i++ { + + reader, err := os.Open(layer.filename) + if err != nil { + t.Fatal(err) + } + defer reader.Close() + + ts, err := NewTarSum(reader, false, layer.version) + if err != nil { + t.Fatal(err) + } + + // Read and discard bytes so that it populates sums + nbByteToRead := (i + 1) * 8 * 1024 + dBuf := make([]byte, nbByteToRead) + _, err = ts.Read(dBuf) + if err != nil { + t.Errorf("failed to read %vKB from %s: %s", nbByteToRead, layer.filename, err) + continue + } + } +} + +func TestTarSums(t *testing.T) { + for _, layer := range testLayers { + var ( + fh io.Reader + err error + ) + if len(layer.filename) > 0 { + fh, err = os.Open(layer.filename) + if err != nil { + t.Errorf("failed to open %s: %s", layer.filename, err) + continue + } + } else if layer.options != nil { + fh = sizedTar(*layer.options) + } else { + // What else is there to test? + t.Errorf("what to do with %#v", layer) + continue + } + if file, ok := fh.(*os.File); ok { + defer file.Close() + } + + var ts TarSum + if layer.hash == nil { + // double negatives! + ts, err = NewTarSum(fh, !layer.gzip, layer.version) + } else { + ts, err = NewTarSumHash(fh, !layer.gzip, layer.version, layer.hash) + } + if err != nil { + t.Errorf("%q :: %q", err, layer.filename) + continue + } + + // Read variable number of bytes to test dynamic buffer + dBuf := make([]byte, 1) + _, err = ts.Read(dBuf) + if err != nil { + t.Errorf("failed to read 1B from %s: %s", layer.filename, err) + continue + } + dBuf = make([]byte, 16*1024) + _, err = ts.Read(dBuf) + if err != nil { + t.Errorf("failed to read 16KB from %s: %s", layer.filename, err) + continue + } + + // Read and discard remaining bytes + _, err = io.Copy(ioutil.Discard, ts) + if err != nil { + t.Errorf("failed to copy from %s: %s", layer.filename, err) + continue + } + var gotSum string + if len(layer.jsonfile) > 0 { + jfh, err := os.Open(layer.jsonfile) + if err != nil { + t.Errorf("failed to open %s: %s", layer.jsonfile, err) + continue + } + defer jfh.Close() + + buf, err := ioutil.ReadAll(jfh) + if err != nil { + t.Errorf("failed to readAll %s: %s", layer.jsonfile, err) + continue + } + gotSum = ts.Sum(buf) + } else { + gotSum = ts.Sum(nil) + } + + if layer.tarsum != gotSum { + t.Errorf("expecting [%s], but got [%s]", layer.tarsum, gotSum) + } + var expectedHashName string + if layer.hash != nil { + expectedHashName = layer.hash.Name() + } else { + expectedHashName = DefaultTHash.Name() + } + if expectedHashName != ts.Hash().Name() { + t.Errorf("expecting hash [%v], but got [%s]", expectedHashName, ts.Hash().Name()) + } + } +} + +func TestIteration(t *testing.T) { + headerTests := []struct { + expectedSum string // TODO(vbatts) it would be nice to get individual sums of each + version Version + hdr *tar.Header + data []byte + }{ + { + "tarsum+sha256:626c4a2e9a467d65c33ae81f7f3dedd4de8ccaee72af73223c4bc4718cbc7bbd", + Version0, + &tar.Header{ + Name: "file.txt", + Size: 0, + Typeflag: tar.TypeReg, + Devminor: 0, + Devmajor: 0, + }, + []byte(""), + }, + { + "tarsum.dev+sha256:6ffd43a1573a9913325b4918e124ee982a99c0f3cba90fc032a65f5e20bdd465", + VersionDev, + &tar.Header{ + Name: "file.txt", + Size: 0, + Typeflag: tar.TypeReg, + Devminor: 0, + Devmajor: 0, + }, + []byte(""), + }, + { + "tarsum.dev+sha256:b38166c059e11fb77bef30bf16fba7584446e80fcc156ff46d47e36c5305d8ef", + VersionDev, + &tar.Header{ + Name: "another.txt", + Uid: 1000, + Gid: 1000, + Uname: "slartibartfast", + Gname: "users", + Size: 4, + Typeflag: tar.TypeReg, + Devminor: 0, + Devmajor: 0, + }, + []byte("test"), + }, + { + "tarsum.dev+sha256:4cc2e71ac5d31833ab2be9b4f7842a14ce595ec96a37af4ed08f87bc374228cd", + VersionDev, + &tar.Header{ + Name: "xattrs.txt", + Uid: 1000, + Gid: 1000, + Uname: "slartibartfast", + Gname: "users", + Size: 4, + Typeflag: tar.TypeReg, + Xattrs: map[string]string{ + "user.key1": "value1", + "user.key2": "value2", + }, + }, + []byte("test"), + }, + { + "tarsum.dev+sha256:65f4284fa32c0d4112dd93c3637697805866415b570587e4fd266af241503760", + VersionDev, + &tar.Header{ + Name: "xattrs.txt", + Uid: 1000, + Gid: 1000, + Uname: "slartibartfast", + Gname: "users", + Size: 4, + Typeflag: tar.TypeReg, + Xattrs: map[string]string{ + "user.KEY1": "value1", // adding different case to ensure different sum + "user.key2": "value2", + }, + }, + []byte("test"), + }, + { + "tarsum+sha256:c12bb6f1303a9ddbf4576c52da74973c00d14c109bcfa76b708d5da1154a07fa", + Version0, + &tar.Header{ + Name: "xattrs.txt", + Uid: 1000, + Gid: 1000, + Uname: "slartibartfast", + Gname: "users", + Size: 4, + Typeflag: tar.TypeReg, + Xattrs: map[string]string{ + "user.NOT": "CALCULATED", + }, + }, + []byte("test"), + }, + } + for _, htest := range headerTests { + s, err := renderSumForHeader(htest.version, htest.hdr, htest.data) + if err != nil { + t.Fatal(err) + } + + if s != htest.expectedSum { + t.Errorf("expected sum: %q, got: %q", htest.expectedSum, s) + } + } + +} + +func renderSumForHeader(v Version, h *tar.Header, data []byte) (string, error) { + buf := bytes.NewBuffer(nil) + // first build our test tar + tw := tar.NewWriter(buf) + if err := tw.WriteHeader(h); err != nil { + return "", err + } + if _, err := tw.Write(data); err != nil { + return "", err + } + tw.Close() + + ts, err := NewTarSum(buf, true, v) + if err != nil { + return "", err + } + tr := tar.NewReader(ts) + for { + hdr, err := tr.Next() + if hdr == nil || err == io.EOF { + // Signals the end of the archive. + break + } + if err != nil { + return "", err + } + if _, err = io.Copy(ioutil.Discard, tr); err != nil { + return "", err + } + } + return ts.Sum(nil), nil +} + +func Benchmark9kTar(b *testing.B) { + buf := bytes.NewBuffer([]byte{}) + fh, err := os.Open("testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar") + if err != nil { + b.Error(err) + return + } + defer fh.Close() + + n, err := io.Copy(buf, fh) + if err != nil { + b.Error(err) + return + } + + reader := bytes.NewReader(buf.Bytes()) + + b.SetBytes(n) + b.ResetTimer() + for i := 0; i < b.N; i++ { + reader.Seek(0, 0) + ts, err := NewTarSum(reader, true, Version0) + if err != nil { + b.Error(err) + return + } + io.Copy(ioutil.Discard, ts) + ts.Sum(nil) + } +} + +func Benchmark9kTarGzip(b *testing.B) { + buf := bytes.NewBuffer([]byte{}) + fh, err := os.Open("testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar") + if err != nil { + b.Error(err) + return + } + defer fh.Close() + + n, err := io.Copy(buf, fh) + if err != nil { + b.Error(err) + return + } + + reader := bytes.NewReader(buf.Bytes()) + + b.SetBytes(n) + b.ResetTimer() + for i := 0; i < b.N; i++ { + reader.Seek(0, 0) + ts, err := NewTarSum(reader, false, Version0) + if err != nil { + b.Error(err) + return + } + io.Copy(ioutil.Discard, ts) + ts.Sum(nil) + } +} + +// this is a single big file in the tar archive +func Benchmark1mbSingleFileTar(b *testing.B) { + benchmarkTar(b, sizedOptions{1, 1024 * 1024, true, true}, false) +} + +// this is a single big file in the tar archive +func Benchmark1mbSingleFileTarGzip(b *testing.B) { + benchmarkTar(b, sizedOptions{1, 1024 * 1024, true, true}, true) +} + +// this is 1024 1k files in the tar archive +func Benchmark1kFilesTar(b *testing.B) { + benchmarkTar(b, sizedOptions{1024, 1024, true, true}, false) +} + +// this is 1024 1k files in the tar archive +func Benchmark1kFilesTarGzip(b *testing.B) { + benchmarkTar(b, sizedOptions{1024, 1024, true, true}, true) +} + +func benchmarkTar(b *testing.B, opts sizedOptions, isGzip bool) { + var fh *os.File + tarReader := sizedTar(opts) + if br, ok := tarReader.(*os.File); ok { + fh = br + } + defer os.Remove(fh.Name()) + defer fh.Close() + + b.SetBytes(opts.size * opts.num) + b.ResetTimer() + for i := 0; i < b.N; i++ { + ts, err := NewTarSum(fh, !isGzip, Version0) + if err != nil { + b.Error(err) + return + } + io.Copy(ioutil.Discard, ts) + ts.Sum(nil) + fh.Seek(0, 0) + } +} diff --git a/vendor/github.com/moby/moby/pkg/tarsum/testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/json b/vendor/github.com/moby/moby/pkg/tarsum/testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/json new file mode 100644 index 000000000..48e2af349 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/tarsum/testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/json @@ -0,0 +1 @@ +{"id":"46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457","parent":"def3f9165934325dfd027c86530b2ea49bb57a0963eb1336b3a0415ff6fd56de","created":"2014-04-07T02:45:52.610504484Z","container":"e0f07f8d72cae171a3dcc35859960e7e956e0628bce6fedc4122bf55b2c287c7","container_config":{"Hostname":"88807319f25e","Domainname":"","User":"","Memory":0,"MemorySwap":0,"CpuShares":0,"AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"ExposedPorts":null,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":["HOME=/","PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"],"Cmd":["/bin/sh","-c","sed -ri 's/^(%wheel.*)(ALL)$/\\1NOPASSWD: \\2/' /etc/sudoers"],"Image":"def3f9165934325dfd027c86530b2ea49bb57a0963eb1336b3a0415ff6fd56de","Volumes":null,"WorkingDir":"","Entrypoint":null,"NetworkDisabled":false,"OnBuild":[]},"docker_version":"0.9.1-dev","config":{"Hostname":"88807319f25e","Domainname":"","User":"","Memory":0,"MemorySwap":0,"CpuShares":0,"AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"ExposedPorts":null,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":["HOME=/","PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"],"Cmd":null,"Image":"def3f9165934325dfd027c86530b2ea49bb57a0963eb1336b3a0415ff6fd56de","Volumes":null,"WorkingDir":"","Entrypoint":null,"NetworkDisabled":false,"OnBuild":[]},"architecture":"amd64","os":"linux","Size":3425} \ No newline at end of file diff --git a/vendor/github.com/moby/moby/pkg/tarsum/testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar b/vendor/github.com/moby/moby/pkg/tarsum/testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar new file mode 100644 index 0000000000000000000000000000000000000000..dfd5c204aea77673f13fdd2f81cb4af1c155c00c GIT binary patch literal 9216 zcmeHMYfsx)8s=;H6|bl&iYAZ?p-5<1$(y*vYHk~c?XX{vu}|fzRr1|&zyvK1! zQq)nWWVPA}63Myvy*}^F5Qtg*V8=g=M!Ru&adFTnf40B*^q|=~Z#CM@#>M%EgGRH_ zXtfULV#j(J_Jz`34wZgZ*0ym!%kRHL9{_(p&BZRoHJYu)<>loz?$!PU{9Bjp<^i?p zS)Tg!r=9Az$G@(0Ao6^75%A;qpMSV)ukcqQn%1X5y|oh!_xLmZX`y%GUBmQG;D6af z{a@yPg@1D=8t(B&ZtcXgE2ck=f9pf*x&ANlU$J}L#UB59rsJ=#>(otde**vZ1?PXJ z)y|dMh8z!Kfh=;zN!B|J)*y8)L$Hbq5c2K_rK=l{{8R8czxwV#$Odd zDsuJ8oS)h8`+U3IsNVOszdy8F?XCC!X1jHMK)Xr!XT8koFP{Hz-;!IxPhJ$Ib48h# zYv~t}ms6n-7Nk?ki-cxgF4IDhpT@D51d2R$2x=V)%F|Svhif#KI>gHaB|@O7JU(A% zo>KEP56(cuboN&-&LROexgfmf&txD1^0c9NNVQI5N~dNwm64!nnnQFH317=JF`{vu zi^$WUtCWHQq4Y!Yy@W{oRoV29sUd<=@!~sJ;!ok8>_qYfz|Ch12+9P6$8i`#qvqS zhsLT-8QL!zwhRx(aXaYF&PwD5LLOm%T#Ds>) z{YV0A>qPL*aFLnz9*nfyl@!I3_Ss=Y=MKNEA zG8|$lPj#9`#(W1sgCgK@f)P?2A)0uPB8Gf6TLITOAl@|29e$jAvBox=W-QCrr59N% zKg$7Xy=69F7QR_X7D_-i2hs*J)6%&RIBr9LDPPP_-? z-X`DPuwzY(j+Gk=rWL_Msfvvp-prW$3W(MwPPgEZO^EI!{*XIAuLp zlpj9k85vO{{2kR4hD{4c;~{+QmhNVfq;xeepJc>QQ@QJfEkdQVBbPJuiA~nsv9l~O zrN&UpxC9i`6;rQ>v?7%WUrr@(gXOs4JE=IN=}4(?RS=2GEd9-ogTEiuP>Fqyb6;vM ziV-Q;Z|ZT?Vz^rPk?`^}6a`cC_=9V1=*>jc&y0jq{h|=m&BK+Jpv}ea1?sKVi^Gj` zk<9K*;4?gK^?Jl6-g0L4kQcX>OZUHi{>Odi#u~f!gnqSdCpW{f zGr2q31WO6O$i;nz9#NH-D^8Rv6Xcv%XFkhmyBsZ;8k2ftd;fPtN1v+`G zPRv~5E)wm1y}~(Py9GwK;`;9K2C_2#(Rc=qFBTa z>?ZUNHvSmq9G9)M%0u+CW!J=jv1~Clz-avUIImk%<&=a9uI;2EY~~stiCKTsh|Oow<5; z$eY1%WV!B_?iFikc)C2TV46YQucl=WfmM#jY|_4sK>Njf)j#u#Y{x@V_A!c2o<`D? zX*2YQ4A)U054Qh4y3hVk?0?5^Us~rh*TViU9vl!r009ILKmY**5I_I{1Q0*~0R#|0 Y009ILKmY**5I_I{1Q0*~fqxTt0{2EK)Bpeg literal 0 HcmV?d00001 diff --git a/vendor/github.com/moby/moby/pkg/tarsum/testdata/collision/collision-2.tar b/vendor/github.com/moby/moby/pkg/tarsum/testdata/collision/collision-2.tar new file mode 100644 index 0000000000000000000000000000000000000000..7b5c04a9644808851fcccab5c3c240bf342abd93 GIT binary patch literal 10240 zcmeIuF%E+;425COJw=XS2L~?Dp<74P5hRe1I+e8NZ(w35>V(Abzr};)_<@(2e`|Ha`Z>GG~@_KYd${~ON w0tg_000IagfB*srAbVE5xzPBd+@To)G|2840byWhU|?oqf;;~Mb02E{2kHRk de~R-YhD)#rjPU%AB}7JrMnhmU1V%^*0091(G-Ch& literal 0 HcmV?d00001 diff --git a/vendor/github.com/moby/moby/pkg/tarsum/versioning.go b/vendor/github.com/moby/moby/pkg/tarsum/versioning.go new file mode 100644 index 000000000..a62cc3ebc --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/tarsum/versioning.go @@ -0,0 +1,158 @@ +package tarsum + +import ( + "archive/tar" + "errors" + "io" + "sort" + "strconv" + "strings" +) + +// Version is used for versioning of the TarSum algorithm +// based on the prefix of the hash used +// i.e. "tarsum+sha256:e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b" +type Version int + +// Prefix of "tarsum" +const ( + Version0 Version = iota + Version1 + // VersionDev this constant will be either the latest or an unsettled next-version of the TarSum calculation + VersionDev +) + +// WriteV1Header writes a tar header to a writer in V1 tarsum format. +func WriteV1Header(h *tar.Header, w io.Writer) { + for _, elem := range v1TarHeaderSelect(h) { + w.Write([]byte(elem[0] + elem[1])) + } +} + +// VersionLabelForChecksum returns the label for the given tarsum +// checksum, i.e., everything before the first `+` character in +// the string or an empty string if no label separator is found. +func VersionLabelForChecksum(checksum string) string { + // Checksums are in the form: {versionLabel}+{hashID}:{hex} + sepIndex := strings.Index(checksum, "+") + if sepIndex < 0 { + return "" + } + return checksum[:sepIndex] +} + +// GetVersions gets a list of all known tarsum versions. +func GetVersions() []Version { + v := []Version{} + for k := range tarSumVersions { + v = append(v, k) + } + return v +} + +var ( + tarSumVersions = map[Version]string{ + Version0: "tarsum", + Version1: "tarsum.v1", + VersionDev: "tarsum.dev", + } + tarSumVersionsByName = map[string]Version{ + "tarsum": Version0, + "tarsum.v1": Version1, + "tarsum.dev": VersionDev, + } +) + +func (tsv Version) String() string { + return tarSumVersions[tsv] +} + +// GetVersionFromTarsum returns the Version from the provided string. +func GetVersionFromTarsum(tarsum string) (Version, error) { + tsv := tarsum + if strings.Contains(tarsum, "+") { + tsv = strings.SplitN(tarsum, "+", 2)[0] + } + for v, s := range tarSumVersions { + if s == tsv { + return v, nil + } + } + return -1, ErrNotVersion +} + +// Errors that may be returned by functions in this package +var ( + ErrNotVersion = errors.New("string does not include a TarSum Version") + ErrVersionNotImplemented = errors.New("TarSum Version is not yet implemented") +) + +// tarHeaderSelector is the interface which different versions +// of tarsum should use for selecting and ordering tar headers +// for each item in the archive. +type tarHeaderSelector interface { + selectHeaders(h *tar.Header) (orderedHeaders [][2]string) +} + +type tarHeaderSelectFunc func(h *tar.Header) (orderedHeaders [][2]string) + +func (f tarHeaderSelectFunc) selectHeaders(h *tar.Header) (orderedHeaders [][2]string) { + return f(h) +} + +func v0TarHeaderSelect(h *tar.Header) (orderedHeaders [][2]string) { + return [][2]string{ + {"name", h.Name}, + {"mode", strconv.FormatInt(h.Mode, 10)}, + {"uid", strconv.Itoa(h.Uid)}, + {"gid", strconv.Itoa(h.Gid)}, + {"size", strconv.FormatInt(h.Size, 10)}, + {"mtime", strconv.FormatInt(h.ModTime.UTC().Unix(), 10)}, + {"typeflag", string([]byte{h.Typeflag})}, + {"linkname", h.Linkname}, + {"uname", h.Uname}, + {"gname", h.Gname}, + {"devmajor", strconv.FormatInt(h.Devmajor, 10)}, + {"devminor", strconv.FormatInt(h.Devminor, 10)}, + } +} + +func v1TarHeaderSelect(h *tar.Header) (orderedHeaders [][2]string) { + // Get extended attributes. + xAttrKeys := make([]string, len(h.Xattrs)) + for k := range h.Xattrs { + xAttrKeys = append(xAttrKeys, k) + } + sort.Strings(xAttrKeys) + + // Make the slice with enough capacity to hold the 11 basic headers + // we want from the v0 selector plus however many xattrs we have. + orderedHeaders = make([][2]string, 0, 11+len(xAttrKeys)) + + // Copy all headers from v0 excluding the 'mtime' header (the 5th element). + v0headers := v0TarHeaderSelect(h) + orderedHeaders = append(orderedHeaders, v0headers[0:5]...) + orderedHeaders = append(orderedHeaders, v0headers[6:]...) + + // Finally, append the sorted xattrs. + for _, k := range xAttrKeys { + orderedHeaders = append(orderedHeaders, [2]string{k, h.Xattrs[k]}) + } + + return +} + +var registeredHeaderSelectors = map[Version]tarHeaderSelectFunc{ + Version0: v0TarHeaderSelect, + Version1: v1TarHeaderSelect, + VersionDev: v1TarHeaderSelect, +} + +func getTarHeaderSelector(v Version) (tarHeaderSelector, error) { + headerSelector, ok := registeredHeaderSelectors[v] + if !ok { + return nil, ErrVersionNotImplemented + } + + return headerSelector, nil +} diff --git a/vendor/github.com/moby/moby/pkg/tarsum/versioning_test.go b/vendor/github.com/moby/moby/pkg/tarsum/versioning_test.go new file mode 100644 index 000000000..88e0a5783 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/tarsum/versioning_test.go @@ -0,0 +1,98 @@ +package tarsum + +import ( + "testing" +) + +func TestVersionLabelForChecksum(t *testing.T) { + version := VersionLabelForChecksum("tarsum+sha256:deadbeef") + if version != "tarsum" { + t.Fatalf("Version should have been 'tarsum', was %v", version) + } + version = VersionLabelForChecksum("tarsum.v1+sha256:deadbeef") + if version != "tarsum.v1" { + t.Fatalf("Version should have been 'tarsum.v1', was %v", version) + } + version = VersionLabelForChecksum("something+somethingelse") + if version != "something" { + t.Fatalf("Version should have been 'something', was %v", version) + } + version = VersionLabelForChecksum("invalidChecksum") + if version != "" { + t.Fatalf("Version should have been empty, was %v", version) + } +} + +func TestVersion(t *testing.T) { + expected := "tarsum" + var v Version + if v.String() != expected { + t.Errorf("expected %q, got %q", expected, v.String()) + } + + expected = "tarsum.v1" + v = 1 + if v.String() != expected { + t.Errorf("expected %q, got %q", expected, v.String()) + } + + expected = "tarsum.dev" + v = 2 + if v.String() != expected { + t.Errorf("expected %q, got %q", expected, v.String()) + } +} + +func TestGetVersion(t *testing.T) { + testSet := []struct { + Str string + Expected Version + }{ + {"tarsum+sha256:e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b", Version0}, + {"tarsum+sha256", Version0}, + {"tarsum", Version0}, + {"tarsum.dev", VersionDev}, + {"tarsum.dev+sha256:deadbeef", VersionDev}, + } + + for _, ts := range testSet { + v, err := GetVersionFromTarsum(ts.Str) + if err != nil { + t.Fatalf("%q : %s", err, ts.Str) + } + if v != ts.Expected { + t.Errorf("expected %d (%q), got %d (%q)", ts.Expected, ts.Expected, v, v) + } + } + + // test one that does not exist, to ensure it errors + str := "weak+md5:abcdeabcde" + _, err := GetVersionFromTarsum(str) + if err != ErrNotVersion { + t.Fatalf("%q : %s", err, str) + } +} + +func TestGetVersions(t *testing.T) { + expected := []Version{ + Version0, + Version1, + VersionDev, + } + versions := GetVersions() + if len(versions) != len(expected) { + t.Fatalf("Expected %v versions, got %v", len(expected), len(versions)) + } + if !containsVersion(versions, expected[0]) || !containsVersion(versions, expected[1]) || !containsVersion(versions, expected[2]) { + t.Fatalf("Expected [%v], got [%v]", expected, versions) + } +} + +func containsVersion(versions []Version, version Version) bool { + for _, v := range versions { + if v == version { + return true + } + } + return false +} diff --git a/vendor/github.com/moby/moby/pkg/tarsum/writercloser.go b/vendor/github.com/moby/moby/pkg/tarsum/writercloser.go new file mode 100644 index 000000000..9727ecde3 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/tarsum/writercloser.go @@ -0,0 +1,22 @@ +package tarsum + +import ( + "io" +) + +type writeCloseFlusher interface { + io.WriteCloser + Flush() error +} + +type nopCloseFlusher struct { + io.Writer +} + +func (n *nopCloseFlusher) Close() error { + return nil +} + +func (n *nopCloseFlusher) Flush() error { + return nil +} diff --git a/vendor/github.com/moby/moby/pkg/templates/templates.go b/vendor/github.com/moby/moby/pkg/templates/templates.go new file mode 100644 index 000000000..d2d7e0c3d --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/templates/templates.go @@ -0,0 +1,78 @@ +package templates + +import ( + "bytes" + "encoding/json" + "strings" + "text/template" +) + +// basicFunctions are the set of initial +// functions provided to every template. +var basicFunctions = template.FuncMap{ + "json": func(v interface{}) string { + buf := &bytes.Buffer{} + enc := json.NewEncoder(buf) + enc.SetEscapeHTML(false) + enc.Encode(v) + // Remove the trailing new line added by the encoder + return strings.TrimSpace(buf.String()) + }, + "split": strings.Split, + "join": strings.Join, + "title": strings.Title, + "lower": strings.ToLower, + "upper": strings.ToUpper, + "pad": padWithSpace, + "truncate": truncateWithLength, +} + +// HeaderFunctions are used to created headers of a table. +// This is a replacement of basicFunctions for header generation +// because we want the header to remain intact. +// Some functions like `split` are irrelevant so not added. +var HeaderFunctions = template.FuncMap{ + "json": func(v string) string { + return v + }, + "title": func(v string) string { + return v + }, + "lower": func(v string) string { + return v + }, + "upper": func(v string) string { + return v + }, + "truncate": func(v string, l int) string { + return v + }, +} + +// Parse creates a new anonymous template with the basic functions +// and parses the given format. +func Parse(format string) (*template.Template, error) { + return NewParse("", format) +} + +// NewParse creates a new tagged template with the basic functions +// and parses the given format. +func NewParse(tag, format string) (*template.Template, error) { + return template.New(tag).Funcs(basicFunctions).Parse(format) +} + +// padWithSpace adds whitespace to the input if the input is non-empty +func padWithSpace(source string, prefix, suffix int) string { + if source == "" { + return source + } + return strings.Repeat(" ", prefix) + source + strings.Repeat(" ", suffix) +} + +// truncateWithLength truncates the source string up to the length provided by the input +func truncateWithLength(source string, length int) string { + if len(source) < length { + return source + } + return source[:length] +} diff --git a/vendor/github.com/moby/moby/pkg/templates/templates_test.go b/vendor/github.com/moby/moby/pkg/templates/templates_test.go new file mode 100644 index 000000000..296bcb710 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/templates/templates_test.go @@ -0,0 +1,88 @@ +package templates + +import ( + "bytes" + "testing" + + "github.com/stretchr/testify/assert" +) + +// Github #32120 +func TestParseJSONFunctions(t *testing.T) { + tm, err := Parse(`{{json .Ports}}`) + assert.NoError(t, err) + + var b bytes.Buffer + assert.NoError(t, tm.Execute(&b, map[string]string{"Ports": "0.0.0.0:2->8/udp"})) + want := "\"0.0.0.0:2->8/udp\"" + assert.Equal(t, want, b.String()) +} + +func TestParseStringFunctions(t *testing.T) { + tm, err := Parse(`{{join (split . ":") "/"}}`) + assert.NoError(t, err) + + var b bytes.Buffer + assert.NoError(t, tm.Execute(&b, "text:with:colon")) + want := "text/with/colon" + assert.Equal(t, want, b.String()) +} + +func TestNewParse(t *testing.T) { + tm, err := NewParse("foo", "this is a {{ . }}") + assert.NoError(t, err) + + var b bytes.Buffer + assert.NoError(t, tm.Execute(&b, "string")) + want := "this is a string" + assert.Equal(t, want, b.String()) +} + +func TestParseTruncateFunction(t *testing.T) { + source := "tupx5xzf6hvsrhnruz5cr8gwp" + + testCases := []struct { + template string + expected string + }{ + { + template: `{{truncate . 5}}`, + expected: "tupx5", + }, + { + template: `{{truncate . 25}}`, + expected: "tupx5xzf6hvsrhnruz5cr8gwp", + }, + { + template: `{{truncate . 30}}`, + expected: "tupx5xzf6hvsrhnruz5cr8gwp", + }, + { + template: `{{pad . 3 3}}`, + expected: " tupx5xzf6hvsrhnruz5cr8gwp ", + }, + } + + for _, testCase := range testCases { + tm, err := Parse(testCase.template) + assert.NoError(t, err) + + t.Run("Non Empty Source Test with template: "+testCase.template, func(t *testing.T) { + var b bytes.Buffer + assert.NoError(t, tm.Execute(&b, source)) + assert.Equal(t, testCase.expected, b.String()) + }) + + t.Run("Empty Source Test with template: "+testCase.template, func(t *testing.T) { + var c bytes.Buffer + assert.NoError(t, tm.Execute(&c, "")) + assert.Equal(t, "", c.String()) + }) + + t.Run("Nil Source Test with template: "+testCase.template, func(t *testing.T) { + var c bytes.Buffer + assert.Error(t, tm.Execute(&c, nil)) + assert.Equal(t, "", c.String()) + }) + } +} diff --git a/vendor/github.com/moby/moby/pkg/term/ascii.go b/vendor/github.com/moby/moby/pkg/term/ascii.go new file mode 100644 index 000000000..f5262bccf --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/term/ascii.go @@ -0,0 +1,66 @@ +package term + +import ( + "fmt" + "strings" +) + +// ASCII list the possible supported ASCII key sequence +var ASCII = []string{ + "ctrl-@", + "ctrl-a", + "ctrl-b", + "ctrl-c", + "ctrl-d", + "ctrl-e", + "ctrl-f", + "ctrl-g", + "ctrl-h", + "ctrl-i", + "ctrl-j", + "ctrl-k", + "ctrl-l", + "ctrl-m", + "ctrl-n", + "ctrl-o", + "ctrl-p", + "ctrl-q", + "ctrl-r", + "ctrl-s", + "ctrl-t", + "ctrl-u", + "ctrl-v", + "ctrl-w", + "ctrl-x", + "ctrl-y", + "ctrl-z", + "ctrl-[", + "ctrl-\\", + "ctrl-]", + "ctrl-^", + "ctrl-_", +} + +// ToBytes converts a string representing a suite of key-sequence to the corresponding ASCII code. +func ToBytes(keys string) ([]byte, error) { + codes := []byte{} +next: + for _, key := range strings.Split(keys, ",") { + if len(key) != 1 { + for code, ctrl := range ASCII { + if ctrl == key { + codes = append(codes, byte(code)) + continue next + } + } + if key == "DEL" { + codes = append(codes, 127) + } else { + return nil, fmt.Errorf("Unknown character: '%s'", key) + } + } else { + codes = append(codes, byte(key[0])) + } + } + return codes, nil +} diff --git a/vendor/github.com/moby/moby/pkg/term/ascii_test.go b/vendor/github.com/moby/moby/pkg/term/ascii_test.go new file mode 100644 index 000000000..4a1e7f302 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/term/ascii_test.go @@ -0,0 +1,43 @@ +package term + +import "testing" + +func TestToBytes(t *testing.T) { + codes, err := ToBytes("ctrl-a,a") + if err != nil { + t.Fatal(err) + } + if len(codes) != 2 { + t.Fatalf("Expected 2 codes, got %d", len(codes)) + } + if codes[0] != 1 || codes[1] != 97 { + t.Fatalf("Expected '1' '97', got '%d' '%d'", codes[0], codes[1]) + } + + codes, err = ToBytes("shift-z") + if err == nil { + t.Fatalf("Expected error, got none") + } + + codes, err = ToBytes("ctrl-@,ctrl-[,~,ctrl-o") + if err != nil { + t.Fatal(err) + } + if len(codes) != 4 { + t.Fatalf("Expected 4 codes, got %d", len(codes)) + } + if codes[0] != 0 || codes[1] != 27 || codes[2] != 126 || codes[3] != 15 { + t.Fatalf("Expected '0' '27' '126', '15', got '%d' '%d' '%d' '%d'", codes[0], codes[1], codes[2], codes[3]) + } + + codes, err = ToBytes("DEL,+") + if err != nil { + t.Fatal(err) + } + if len(codes) != 2 { + t.Fatalf("Expected 2 codes, got %d", len(codes)) + } + if codes[0] != 127 || codes[1] != 43 { + t.Fatalf("Expected '127 '43'', got '%d' '%d'", codes[0], codes[1]) + } +} diff --git a/vendor/github.com/moby/moby/pkg/term/proxy.go b/vendor/github.com/moby/moby/pkg/term/proxy.go new file mode 100644 index 000000000..e648eb812 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/term/proxy.go @@ -0,0 +1,74 @@ +package term + +import ( + "io" +) + +// EscapeError is special error which returned by a TTY proxy reader's Read() +// method in case its detach escape sequence is read. +type EscapeError struct{} + +func (EscapeError) Error() string { + return "read escape sequence" +} + +// escapeProxy is used only for attaches with a TTY. It is used to proxy +// stdin keypresses from the underlying reader and look for the passed in +// escape key sequence to signal a detach. +type escapeProxy struct { + escapeKeys []byte + escapeKeyPos int + r io.Reader +} + +// NewEscapeProxy returns a new TTY proxy reader which wraps the given reader +// and detects when the specified escape keys are read, in which case the Read +// method will return an error of type EscapeError. +func NewEscapeProxy(r io.Reader, escapeKeys []byte) io.Reader { + return &escapeProxy{ + escapeKeys: escapeKeys, + r: r, + } +} + +func (r *escapeProxy) Read(buf []byte) (int, error) { + nr, err := r.r.Read(buf) + + preserve := func() { + // this preserves the original key presses in the passed in buffer + nr += r.escapeKeyPos + preserve := make([]byte, 0, r.escapeKeyPos+len(buf)) + preserve = append(preserve, r.escapeKeys[:r.escapeKeyPos]...) + preserve = append(preserve, buf...) + r.escapeKeyPos = 0 + copy(buf[0:nr], preserve) + } + + if nr != 1 || err != nil { + if r.escapeKeyPos > 0 { + preserve() + } + return nr, err + } + + if buf[0] != r.escapeKeys[r.escapeKeyPos] { + if r.escapeKeyPos > 0 { + preserve() + } + return nr, nil + } + + if r.escapeKeyPos == len(r.escapeKeys)-1 { + return 0, EscapeError{} + } + + // Looks like we've got an escape key, but we need to match again on the next + // read. + // Store the current escape key we found so we can look for the next one on + // the next read. + // Since this is an escape key, make sure we don't let the caller read it + // If later on we find that this is not the escape sequence, we'll add the + // keys back + r.escapeKeyPos++ + return nr - r.escapeKeyPos, nil +} diff --git a/vendor/github.com/moby/moby/pkg/term/proxy_test.go b/vendor/github.com/moby/moby/pkg/term/proxy_test.go new file mode 100644 index 000000000..baba193d1 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/term/proxy_test.go @@ -0,0 +1,92 @@ +package term + +import ( + "bytes" + "fmt" + "reflect" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestEscapeProxyRead(t *testing.T) { + escapeKeys, _ := ToBytes("DEL") + keys, _ := ToBytes("a,b,c,+") + reader := NewEscapeProxy(bytes.NewReader(keys), escapeKeys) + buf := make([]byte, len(keys)) + nr, err := reader.Read(buf) + require.NoError(t, err) + require.EqualValues(t, nr, len(keys), fmt.Sprintf("nr %d should be equal to the number of %d", nr, len(keys))) + require.Equal(t, keys, buf, "keys & the read buffer should be equal") + + keys, _ = ToBytes("") + reader = NewEscapeProxy(bytes.NewReader(keys), escapeKeys) + buf = make([]byte, len(keys)) + nr, err = reader.Read(buf) + require.Error(t, err, "Should throw error when no keys are to read") + require.EqualValues(t, nr, 0, "nr should be zero") + require.Condition(t, func() (success bool) { return len(keys) == 0 && len(buf) == 0 }, "keys & the read buffer size should be zero") + + escapeKeys, _ = ToBytes("ctrl-x,ctrl-@") + keys, _ = ToBytes("DEL") + reader = NewEscapeProxy(bytes.NewReader(keys), escapeKeys) + buf = make([]byte, len(keys)) + nr, err = reader.Read(buf) + require.NoError(t, err) + require.EqualValues(t, nr, 1, fmt.Sprintf("nr %d should be equal to the number of 1", nr)) + require.Equal(t, keys, buf, "keys & the read buffer should be equal") + + escapeKeys, _ = ToBytes("ctrl-c") + keys, _ = ToBytes("ctrl-c") + reader = NewEscapeProxy(bytes.NewReader(keys), escapeKeys) + buf = make([]byte, len(keys)) + nr, err = reader.Read(buf) + require.Condition(t, func() (success bool) { + return reflect.TypeOf(err).Name() == "EscapeError" + }, err) + require.EqualValues(t, nr, 0, "nr should be equal to 0") + require.Equal(t, keys, buf, "keys & the read buffer should be equal") + + escapeKeys, _ = ToBytes("ctrl-c,ctrl-z") + keys, _ = ToBytes("ctrl-c,ctrl-z") + reader = NewEscapeProxy(bytes.NewReader(keys), escapeKeys) + buf = make([]byte, 1) + nr, err = reader.Read(buf) + require.NoError(t, err) + require.EqualValues(t, nr, 0, "nr should be equal to 0") + require.Equal(t, keys[0:1], buf, "keys & the read buffer should be equal") + nr, err = reader.Read(buf) + require.Condition(t, func() (success bool) { + return reflect.TypeOf(err).Name() == "EscapeError" + }, err) + require.EqualValues(t, nr, 0, "nr should be equal to 0") + require.Equal(t, keys[1:], buf, "keys & the read buffer should be equal") + + escapeKeys, _ = ToBytes("ctrl-c,ctrl-z") + keys, _ = ToBytes("ctrl-c,DEL,+") + reader = NewEscapeProxy(bytes.NewReader(keys), escapeKeys) + buf = make([]byte, 1) + nr, err = reader.Read(buf) + require.NoError(t, err) + require.EqualValues(t, nr, 0, "nr should be equal to 0") + require.Equal(t, keys[0:1], buf, "keys & the read buffer should be equal") + buf = make([]byte, len(keys)) + nr, err = reader.Read(buf) + require.NoError(t, err) + require.EqualValues(t, nr, len(keys), fmt.Sprintf("nr should be equal to %d", len(keys))) + require.Equal(t, keys, buf, "keys & the read buffer should be equal") + + escapeKeys, _ = ToBytes("ctrl-c,ctrl-z") + keys, _ = ToBytes("ctrl-c,DEL") + reader = NewEscapeProxy(bytes.NewReader(keys), escapeKeys) + buf = make([]byte, 1) + nr, err = reader.Read(buf) + require.NoError(t, err) + require.EqualValues(t, nr, 0, "nr should be equal to 0") + require.Equal(t, keys[0:1], buf, "keys & the read buffer should be equal") + buf = make([]byte, len(keys)) + nr, err = reader.Read(buf) + require.NoError(t, err) + require.EqualValues(t, nr, len(keys), fmt.Sprintf("nr should be equal to %d", len(keys))) + require.Equal(t, keys, buf, "keys & the read buffer should be equal") +} diff --git a/vendor/github.com/moby/moby/pkg/term/tc.go b/vendor/github.com/moby/moby/pkg/term/tc.go new file mode 100644 index 000000000..6d2dfd3a8 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/term/tc.go @@ -0,0 +1,21 @@ +// +build !windows +// +build !solaris !cgo + +package term + +import ( + "syscall" + "unsafe" + + "golang.org/x/sys/unix" +) + +func tcget(fd uintptr, p *Termios) syscall.Errno { + _, _, err := unix.Syscall(unix.SYS_IOCTL, fd, uintptr(getTermios), uintptr(unsafe.Pointer(p))) + return err +} + +func tcset(fd uintptr, p *Termios) syscall.Errno { + _, _, err := unix.Syscall(unix.SYS_IOCTL, fd, setTermios, uintptr(unsafe.Pointer(p))) + return err +} diff --git a/vendor/github.com/moby/moby/pkg/term/tc_solaris_cgo.go b/vendor/github.com/moby/moby/pkg/term/tc_solaris_cgo.go new file mode 100644 index 000000000..50234affc --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/term/tc_solaris_cgo.go @@ -0,0 +1,65 @@ +// +build solaris,cgo + +package term + +import ( + "syscall" + "unsafe" + + "golang.org/x/sys/unix" +) + +// #include +import "C" + +// Termios is the Unix API for terminal I/O. +// It is passthrough for unix.Termios in order to make it portable with +// other platforms where it is not available or handled differently. +type Termios unix.Termios + +// MakeRaw put the terminal connected to the given file descriptor into raw +// mode and returns the previous state of the terminal so that it can be +// restored. +func MakeRaw(fd uintptr) (*State, error) { + var oldState State + if err := tcget(fd, &oldState.termios); err != 0 { + return nil, err + } + + newState := oldState.termios + + newState.Iflag &^= (unix.IGNBRK | unix.BRKINT | unix.PARMRK | unix.ISTRIP | unix.INLCR | unix.IGNCR | unix.ICRNL | unix.IXON | unix.IXANY) + newState.Oflag &^= unix.OPOST + newState.Lflag &^= (unix.ECHO | unix.ECHONL | unix.ICANON | unix.ISIG | unix.IEXTEN) + newState.Cflag &^= (unix.CSIZE | unix.PARENB) + newState.Cflag |= unix.CS8 + + /* + VMIN is the minimum number of characters that needs to be read in non-canonical mode for it to be returned + Since VMIN is overloaded with another element in canonical mode when we switch modes it defaults to 4. It + needs to be explicitly set to 1. + */ + newState.Cc[C.VMIN] = 1 + newState.Cc[C.VTIME] = 0 + + if err := tcset(fd, &newState); err != 0 { + return nil, err + } + return &oldState, nil +} + +func tcget(fd uintptr, p *Termios) syscall.Errno { + ret, err := C.tcgetattr(C.int(fd), (*C.struct_termios)(unsafe.Pointer(p))) + if ret != 0 { + return err.(syscall.Errno) + } + return 0 +} + +func tcset(fd uintptr, p *Termios) syscall.Errno { + ret, err := C.tcsetattr(C.int(fd), C.TCSANOW, (*C.struct_termios)(unsafe.Pointer(p))) + if ret != 0 { + return err.(syscall.Errno) + } + return 0 +} diff --git a/vendor/github.com/moby/moby/pkg/term/term.go b/vendor/github.com/moby/moby/pkg/term/term.go new file mode 100644 index 000000000..4f59d8d93 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/term/term.go @@ -0,0 +1,124 @@ +// +build !windows + +// Package term provides structures and helper functions to work with +// terminal (state, sizes). +package term + +import ( + "errors" + "fmt" + "io" + "os" + "os/signal" + + "golang.org/x/sys/unix" +) + +var ( + // ErrInvalidState is returned if the state of the terminal is invalid. + ErrInvalidState = errors.New("Invalid terminal state") +) + +// State represents the state of the terminal. +type State struct { + termios Termios +} + +// Winsize represents the size of the terminal window. +type Winsize struct { + Height uint16 + Width uint16 + x uint16 + y uint16 +} + +// StdStreams returns the standard streams (stdin, stdout, stderr). +func StdStreams() (stdIn io.ReadCloser, stdOut, stdErr io.Writer) { + return os.Stdin, os.Stdout, os.Stderr +} + +// GetFdInfo returns the file descriptor for an os.File and indicates whether the file represents a terminal. +func GetFdInfo(in interface{}) (uintptr, bool) { + var inFd uintptr + var isTerminalIn bool + if file, ok := in.(*os.File); ok { + inFd = file.Fd() + isTerminalIn = IsTerminal(inFd) + } + return inFd, isTerminalIn +} + +// IsTerminal returns true if the given file descriptor is a terminal. +func IsTerminal(fd uintptr) bool { + var termios Termios + return tcget(fd, &termios) == 0 +} + +// RestoreTerminal restores the terminal connected to the given file descriptor +// to a previous state. +func RestoreTerminal(fd uintptr, state *State) error { + if state == nil { + return ErrInvalidState + } + if err := tcset(fd, &state.termios); err != 0 { + return err + } + return nil +} + +// SaveState saves the state of the terminal connected to the given file descriptor. +func SaveState(fd uintptr) (*State, error) { + var oldState State + if err := tcget(fd, &oldState.termios); err != 0 { + return nil, err + } + + return &oldState, nil +} + +// DisableEcho applies the specified state to the terminal connected to the file +// descriptor, with echo disabled. +func DisableEcho(fd uintptr, state *State) error { + newState := state.termios + newState.Lflag &^= unix.ECHO + + if err := tcset(fd, &newState); err != 0 { + return err + } + handleInterrupt(fd, state) + return nil +} + +// SetRawTerminal puts the terminal connected to the given file descriptor into +// raw mode and returns the previous state. On UNIX, this puts both the input +// and output into raw mode. On Windows, it only puts the input into raw mode. +func SetRawTerminal(fd uintptr) (*State, error) { + oldState, err := MakeRaw(fd) + if err != nil { + return nil, err + } + handleInterrupt(fd, oldState) + return oldState, err +} + +// SetRawTerminalOutput puts the output of terminal connected to the given file +// descriptor into raw mode. On UNIX, this does nothing and returns nil for the +// state. On Windows, it disables LF -> CRLF translation. +func SetRawTerminalOutput(fd uintptr) (*State, error) { + return nil, nil +} + +func handleInterrupt(fd uintptr, state *State) { + sigchan := make(chan os.Signal, 1) + signal.Notify(sigchan, os.Interrupt) + go func() { + for range sigchan { + // quit cleanly and the new terminal item is on a new line + fmt.Println() + signal.Stop(sigchan) + close(sigchan) + RestoreTerminal(fd, state) + os.Exit(1) + } + }() +} diff --git a/vendor/github.com/moby/moby/pkg/term/term_linux_test.go b/vendor/github.com/moby/moby/pkg/term/term_linux_test.go new file mode 100644 index 000000000..a1628c4c6 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/term/term_linux_test.go @@ -0,0 +1,120 @@ +//+build linux + +package term + +import ( + "flag" + "io/ioutil" + "os" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +var rootEnabled bool + +func init() { + flag.BoolVar(&rootEnabled, "test.root", false, "enable tests that require root") +} + +// RequiresRoot skips tests that require root, unless the test.root flag has +// been set +func RequiresRoot(t *testing.T) { + if !rootEnabled { + t.Skip("skipping test that requires root") + return + } + assert.Equal(t, 0, os.Getuid(), "This test must be run as root.") +} + +func newTtyForTest(t *testing.T) (*os.File, error) { + RequiresRoot(t) + return os.OpenFile("/dev/tty", os.O_RDWR, os.ModeDevice) +} + +func newTempFile() (*os.File, error) { + return ioutil.TempFile(os.TempDir(), "temp") +} + +func TestGetWinsize(t *testing.T) { + tty, err := newTtyForTest(t) + defer tty.Close() + require.NoError(t, err) + winSize, err := GetWinsize(tty.Fd()) + require.NoError(t, err) + require.NotNil(t, winSize) + require.NotNil(t, winSize.Height) + require.NotNil(t, winSize.Width) + newSize := Winsize{Width: 200, Height: 200, x: winSize.x, y: winSize.y} + err = SetWinsize(tty.Fd(), &newSize) + require.NoError(t, err) + winSize, err = GetWinsize(tty.Fd()) + require.NoError(t, err) + require.Equal(t, *winSize, newSize) +} + +func TestSetWinsize(t *testing.T) { + tty, err := newTtyForTest(t) + defer tty.Close() + require.NoError(t, err) + winSize, err := GetWinsize(tty.Fd()) + require.NoError(t, err) + require.NotNil(t, winSize) + newSize := Winsize{Width: 200, Height: 200, x: winSize.x, y: winSize.y} + err = SetWinsize(tty.Fd(), &newSize) + require.NoError(t, err) + winSize, err = GetWinsize(tty.Fd()) + require.NoError(t, err) + require.Equal(t, *winSize, newSize) +} + +func TestGetFdInfo(t *testing.T) { + tty, err := newTtyForTest(t) + defer tty.Close() + require.NoError(t, err) + inFd, isTerminal := GetFdInfo(tty) + require.Equal(t, inFd, tty.Fd()) + require.Equal(t, isTerminal, true) + tmpFile, err := newTempFile() + defer tmpFile.Close() + inFd, isTerminal = GetFdInfo(tmpFile) + require.Equal(t, inFd, tmpFile.Fd()) + require.Equal(t, isTerminal, false) +} + +func TestIsTerminal(t *testing.T) { + tty, err := newTtyForTest(t) + defer tty.Close() + require.NoError(t, err) + isTerminal := IsTerminal(tty.Fd()) + require.Equal(t, isTerminal, true) + tmpFile, err := newTempFile() + defer tmpFile.Close() + isTerminal = IsTerminal(tmpFile.Fd()) + require.Equal(t, isTerminal, false) +} + +func TestSaveState(t *testing.T) { + tty, err := newTtyForTest(t) + defer tty.Close() + require.NoError(t, err) + state, err := SaveState(tty.Fd()) + require.NoError(t, err) + require.NotNil(t, state) + tty, err = newTtyForTest(t) + defer tty.Close() + err = RestoreTerminal(tty.Fd(), state) + require.NoError(t, err) +} + +func TestDisableEcho(t *testing.T) { + tty, err := newTtyForTest(t) + defer tty.Close() + require.NoError(t, err) + state, err := SetRawTerminal(tty.Fd()) + require.NoError(t, err) + require.NotNil(t, state) + err = DisableEcho(tty.Fd(), state) + require.NoError(t, err) +} diff --git a/vendor/github.com/moby/moby/pkg/term/term_windows.go b/vendor/github.com/moby/moby/pkg/term/term_windows.go new file mode 100644 index 000000000..c0332c3cd --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/term/term_windows.go @@ -0,0 +1,237 @@ +// +build windows + +package term + +import ( + "io" + "os" + "os/signal" + "syscall" // used for STD_INPUT_HANDLE, STD_OUTPUT_HANDLE and STD_ERROR_HANDLE + + "github.com/Azure/go-ansiterm/winterm" + "github.com/docker/docker/pkg/term/windows" +) + +// State holds the console mode for the terminal. +type State struct { + mode uint32 +} + +// Winsize is used for window size. +type Winsize struct { + Height uint16 + Width uint16 +} + +const ( + // https://msdn.microsoft.com/en-us/library/windows/desktop/ms683167(v=vs.85).aspx + enableVirtualTerminalInput = 0x0200 + enableVirtualTerminalProcessing = 0x0004 + disableNewlineAutoReturn = 0x0008 +) + +// vtInputSupported is true if enableVirtualTerminalInput is supported by the console +var vtInputSupported bool + +// StdStreams returns the standard streams (stdin, stdout, stderr). +func StdStreams() (stdIn io.ReadCloser, stdOut, stdErr io.Writer) { + // Turn on VT handling on all std handles, if possible. This might + // fail, in which case we will fall back to terminal emulation. + var emulateStdin, emulateStdout, emulateStderr bool + fd := os.Stdin.Fd() + if mode, err := winterm.GetConsoleMode(fd); err == nil { + // Validate that enableVirtualTerminalInput is supported, but do not set it. + if err = winterm.SetConsoleMode(fd, mode|enableVirtualTerminalInput); err != nil { + emulateStdin = true + } else { + vtInputSupported = true + } + // Unconditionally set the console mode back even on failure because SetConsoleMode + // remembers invalid bits on input handles. + winterm.SetConsoleMode(fd, mode) + } + + fd = os.Stdout.Fd() + if mode, err := winterm.GetConsoleMode(fd); err == nil { + // Validate disableNewlineAutoReturn is supported, but do not set it. + if err = winterm.SetConsoleMode(fd, mode|enableVirtualTerminalProcessing|disableNewlineAutoReturn); err != nil { + emulateStdout = true + } else { + winterm.SetConsoleMode(fd, mode|enableVirtualTerminalProcessing) + } + } + + fd = os.Stderr.Fd() + if mode, err := winterm.GetConsoleMode(fd); err == nil { + // Validate disableNewlineAutoReturn is supported, but do not set it. + if err = winterm.SetConsoleMode(fd, mode|enableVirtualTerminalProcessing|disableNewlineAutoReturn); err != nil { + emulateStderr = true + } else { + winterm.SetConsoleMode(fd, mode|enableVirtualTerminalProcessing) + } + } + + if os.Getenv("ConEmuANSI") == "ON" || os.Getenv("ConsoleZVersion") != "" { + // The ConEmu and ConsoleZ terminals emulate ANSI on output streams well. + emulateStdin = true + emulateStdout = false + emulateStderr = false + } + + // Temporarily use STD_INPUT_HANDLE, STD_OUTPUT_HANDLE and + // STD_ERROR_HANDLE from syscall rather than x/sys/windows as long as + // go-ansiterm hasn't switch to x/sys/windows. + // TODO: switch back to x/sys/windows once go-ansiterm has switched + if emulateStdin { + stdIn = windowsconsole.NewAnsiReader(syscall.STD_INPUT_HANDLE) + } else { + stdIn = os.Stdin + } + + if emulateStdout { + stdOut = windowsconsole.NewAnsiWriter(syscall.STD_OUTPUT_HANDLE) + } else { + stdOut = os.Stdout + } + + if emulateStderr { + stdErr = windowsconsole.NewAnsiWriter(syscall.STD_ERROR_HANDLE) + } else { + stdErr = os.Stderr + } + + return +} + +// GetFdInfo returns the file descriptor for an os.File and indicates whether the file represents a terminal. +func GetFdInfo(in interface{}) (uintptr, bool) { + return windowsconsole.GetHandleInfo(in) +} + +// GetWinsize returns the window size based on the specified file descriptor. +func GetWinsize(fd uintptr) (*Winsize, error) { + info, err := winterm.GetConsoleScreenBufferInfo(fd) + if err != nil { + return nil, err + } + + winsize := &Winsize{ + Width: uint16(info.Window.Right - info.Window.Left + 1), + Height: uint16(info.Window.Bottom - info.Window.Top + 1), + } + + return winsize, nil +} + +// IsTerminal returns true if the given file descriptor is a terminal. +func IsTerminal(fd uintptr) bool { + return windowsconsole.IsConsole(fd) +} + +// RestoreTerminal restores the terminal connected to the given file descriptor +// to a previous state. +func RestoreTerminal(fd uintptr, state *State) error { + return winterm.SetConsoleMode(fd, state.mode) +} + +// SaveState saves the state of the terminal connected to the given file descriptor. +func SaveState(fd uintptr) (*State, error) { + mode, e := winterm.GetConsoleMode(fd) + if e != nil { + return nil, e + } + + return &State{mode: mode}, nil +} + +// DisableEcho disables echo for the terminal connected to the given file descriptor. +// -- See https://msdn.microsoft.com/en-us/library/windows/desktop/ms683462(v=vs.85).aspx +func DisableEcho(fd uintptr, state *State) error { + mode := state.mode + mode &^= winterm.ENABLE_ECHO_INPUT + mode |= winterm.ENABLE_PROCESSED_INPUT | winterm.ENABLE_LINE_INPUT + err := winterm.SetConsoleMode(fd, mode) + if err != nil { + return err + } + + // Register an interrupt handler to catch and restore prior state + restoreAtInterrupt(fd, state) + return nil +} + +// SetRawTerminal puts the terminal connected to the given file descriptor into +// raw mode and returns the previous state. On UNIX, this puts both the input +// and output into raw mode. On Windows, it only puts the input into raw mode. +func SetRawTerminal(fd uintptr) (*State, error) { + state, err := MakeRaw(fd) + if err != nil { + return nil, err + } + + // Register an interrupt handler to catch and restore prior state + restoreAtInterrupt(fd, state) + return state, err +} + +// SetRawTerminalOutput puts the output of terminal connected to the given file +// descriptor into raw mode. On UNIX, this does nothing and returns nil for the +// state. On Windows, it disables LF -> CRLF translation. +func SetRawTerminalOutput(fd uintptr) (*State, error) { + state, err := SaveState(fd) + if err != nil { + return nil, err + } + + // Ignore failures, since disableNewlineAutoReturn might not be supported on this + // version of Windows. + winterm.SetConsoleMode(fd, state.mode|disableNewlineAutoReturn) + return state, err +} + +// MakeRaw puts the terminal (Windows Console) connected to the given file descriptor into raw +// mode and returns the previous state of the terminal so that it can be restored. +func MakeRaw(fd uintptr) (*State, error) { + state, err := SaveState(fd) + if err != nil { + return nil, err + } + + mode := state.mode + + // See + // -- https://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx + // -- https://msdn.microsoft.com/en-us/library/windows/desktop/ms683462(v=vs.85).aspx + + // Disable these modes + mode &^= winterm.ENABLE_ECHO_INPUT + mode &^= winterm.ENABLE_LINE_INPUT + mode &^= winterm.ENABLE_MOUSE_INPUT + mode &^= winterm.ENABLE_WINDOW_INPUT + mode &^= winterm.ENABLE_PROCESSED_INPUT + + // Enable these modes + mode |= winterm.ENABLE_EXTENDED_FLAGS + mode |= winterm.ENABLE_INSERT_MODE + mode |= winterm.ENABLE_QUICK_EDIT_MODE + if vtInputSupported { + mode |= enableVirtualTerminalInput + } + + err = winterm.SetConsoleMode(fd, mode) + if err != nil { + return nil, err + } + return state, nil +} + +func restoreAtInterrupt(fd uintptr, state *State) { + sigchan := make(chan os.Signal, 1) + signal.Notify(sigchan, os.Interrupt) + + go func() { + _ = <-sigchan + RestoreTerminal(fd, state) + os.Exit(0) + }() +} diff --git a/vendor/github.com/moby/moby/pkg/term/termios_bsd.go b/vendor/github.com/moby/moby/pkg/term/termios_bsd.go new file mode 100644 index 000000000..c47341e87 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/term/termios_bsd.go @@ -0,0 +1,42 @@ +// +build darwin freebsd openbsd + +package term + +import ( + "unsafe" + + "golang.org/x/sys/unix" +) + +const ( + getTermios = unix.TIOCGETA + setTermios = unix.TIOCSETA +) + +// Termios is the Unix API for terminal I/O. +type Termios unix.Termios + +// MakeRaw put the terminal connected to the given file descriptor into raw +// mode and returns the previous state of the terminal so that it can be +// restored. +func MakeRaw(fd uintptr) (*State, error) { + var oldState State + if _, _, err := unix.Syscall(unix.SYS_IOCTL, fd, getTermios, uintptr(unsafe.Pointer(&oldState.termios))); err != 0 { + return nil, err + } + + newState := oldState.termios + newState.Iflag &^= (unix.IGNBRK | unix.BRKINT | unix.PARMRK | unix.ISTRIP | unix.INLCR | unix.IGNCR | unix.ICRNL | unix.IXON) + newState.Oflag &^= unix.OPOST + newState.Lflag &^= (unix.ECHO | unix.ECHONL | unix.ICANON | unix.ISIG | unix.IEXTEN) + newState.Cflag &^= (unix.CSIZE | unix.PARENB) + newState.Cflag |= unix.CS8 + newState.Cc[unix.VMIN] = 1 + newState.Cc[unix.VTIME] = 0 + + if _, _, err := unix.Syscall(unix.SYS_IOCTL, fd, setTermios, uintptr(unsafe.Pointer(&newState))); err != 0 { + return nil, err + } + + return &oldState, nil +} diff --git a/vendor/github.com/moby/moby/pkg/term/termios_linux.go b/vendor/github.com/moby/moby/pkg/term/termios_linux.go new file mode 100644 index 000000000..3e25eb7a4 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/term/termios_linux.go @@ -0,0 +1,37 @@ +package term + +import ( + "golang.org/x/sys/unix" +) + +const ( + getTermios = unix.TCGETS + setTermios = unix.TCSETS +) + +// Termios is the Unix API for terminal I/O. +type Termios unix.Termios + +// MakeRaw put the terminal connected to the given file descriptor into raw +// mode and returns the previous state of the terminal so that it can be +// restored. +func MakeRaw(fd uintptr) (*State, error) { + termios, err := unix.IoctlGetTermios(int(fd), getTermios) + if err != nil { + return nil, err + } + + var oldState State + oldState.termios = Termios(*termios) + + termios.Iflag &^= (unix.IGNBRK | unix.BRKINT | unix.PARMRK | unix.ISTRIP | unix.INLCR | unix.IGNCR | unix.ICRNL | unix.IXON) + termios.Oflag &^= unix.OPOST + termios.Lflag &^= (unix.ECHO | unix.ECHONL | unix.ICANON | unix.ISIG | unix.IEXTEN) + termios.Cflag &^= (unix.CSIZE | unix.PARENB) + termios.Cflag |= unix.CS8 + + if err := unix.IoctlSetTermios(int(fd), setTermios, termios); err != nil { + return nil, err + } + return &oldState, nil +} diff --git a/vendor/github.com/moby/moby/pkg/term/windows/ansi_reader.go b/vendor/github.com/moby/moby/pkg/term/windows/ansi_reader.go new file mode 100644 index 000000000..29d396318 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/term/windows/ansi_reader.go @@ -0,0 +1,263 @@ +// +build windows + +package windowsconsole + +import ( + "bytes" + "errors" + "fmt" + "io" + "os" + "strings" + "unsafe" + + ansiterm "github.com/Azure/go-ansiterm" + "github.com/Azure/go-ansiterm/winterm" +) + +const ( + escapeSequence = ansiterm.KEY_ESC_CSI +) + +// ansiReader wraps a standard input file (e.g., os.Stdin) providing ANSI sequence translation. +type ansiReader struct { + file *os.File + fd uintptr + buffer []byte + cbBuffer int + command []byte +} + +// NewAnsiReader returns an io.ReadCloser that provides VT100 terminal emulation on top of a +// Windows console input handle. +func NewAnsiReader(nFile int) io.ReadCloser { + initLogger() + file, fd := winterm.GetStdFile(nFile) + return &ansiReader{ + file: file, + fd: fd, + command: make([]byte, 0, ansiterm.ANSI_MAX_CMD_LENGTH), + buffer: make([]byte, 0), + } +} + +// Close closes the wrapped file. +func (ar *ansiReader) Close() (err error) { + return ar.file.Close() +} + +// Fd returns the file descriptor of the wrapped file. +func (ar *ansiReader) Fd() uintptr { + return ar.fd +} + +// Read reads up to len(p) bytes of translated input events into p. +func (ar *ansiReader) Read(p []byte) (int, error) { + if len(p) == 0 { + return 0, nil + } + + // Previously read bytes exist, read as much as we can and return + if len(ar.buffer) > 0 { + logger.Debugf("Reading previously cached bytes") + + originalLength := len(ar.buffer) + copiedLength := copy(p, ar.buffer) + + if copiedLength == originalLength { + ar.buffer = make([]byte, 0, len(p)) + } else { + ar.buffer = ar.buffer[copiedLength:] + } + + logger.Debugf("Read from cache p[%d]: % x", copiedLength, p) + return copiedLength, nil + } + + // Read and translate key events + events, err := readInputEvents(ar.fd, len(p)) + if err != nil { + return 0, err + } else if len(events) == 0 { + logger.Debug("No input events detected") + return 0, nil + } + + keyBytes := translateKeyEvents(events, []byte(escapeSequence)) + + // Save excess bytes and right-size keyBytes + if len(keyBytes) > len(p) { + logger.Debugf("Received %d keyBytes, only room for %d bytes", len(keyBytes), len(p)) + ar.buffer = keyBytes[len(p):] + keyBytes = keyBytes[:len(p)] + } else if len(keyBytes) == 0 { + logger.Debug("No key bytes returned from the translator") + return 0, nil + } + + copiedLength := copy(p, keyBytes) + if copiedLength != len(keyBytes) { + return 0, errors.New("unexpected copy length encountered") + } + + logger.Debugf("Read p[%d]: % x", copiedLength, p) + logger.Debugf("Read keyBytes[%d]: % x", copiedLength, keyBytes) + return copiedLength, nil +} + +// readInputEvents polls until at least one event is available. +func readInputEvents(fd uintptr, maxBytes int) ([]winterm.INPUT_RECORD, error) { + // Determine the maximum number of records to retrieve + // -- Cast around the type system to obtain the size of a single INPUT_RECORD. + // unsafe.Sizeof requires an expression vs. a type-reference; the casting + // tricks the type system into believing it has such an expression. + recordSize := int(unsafe.Sizeof(*((*winterm.INPUT_RECORD)(unsafe.Pointer(&maxBytes))))) + countRecords := maxBytes / recordSize + if countRecords > ansiterm.MAX_INPUT_EVENTS { + countRecords = ansiterm.MAX_INPUT_EVENTS + } else if countRecords == 0 { + countRecords = 1 + } + logger.Debugf("[windows] readInputEvents: Reading %v records (buffer size %v, record size %v)", countRecords, maxBytes, recordSize) + + // Wait for and read input events + events := make([]winterm.INPUT_RECORD, countRecords) + nEvents := uint32(0) + eventsExist, err := winterm.WaitForSingleObject(fd, winterm.WAIT_INFINITE) + if err != nil { + return nil, err + } + + if eventsExist { + err = winterm.ReadConsoleInput(fd, events, &nEvents) + if err != nil { + return nil, err + } + } + + // Return a slice restricted to the number of returned records + logger.Debugf("[windows] readInputEvents: Read %v events", nEvents) + return events[:nEvents], nil +} + +// KeyEvent Translation Helpers + +var arrowKeyMapPrefix = map[uint16]string{ + winterm.VK_UP: "%s%sA", + winterm.VK_DOWN: "%s%sB", + winterm.VK_RIGHT: "%s%sC", + winterm.VK_LEFT: "%s%sD", +} + +var keyMapPrefix = map[uint16]string{ + winterm.VK_UP: "\x1B[%sA", + winterm.VK_DOWN: "\x1B[%sB", + winterm.VK_RIGHT: "\x1B[%sC", + winterm.VK_LEFT: "\x1B[%sD", + winterm.VK_HOME: "\x1B[1%s~", // showkey shows ^[[1 + winterm.VK_END: "\x1B[4%s~", // showkey shows ^[[4 + winterm.VK_INSERT: "\x1B[2%s~", + winterm.VK_DELETE: "\x1B[3%s~", + winterm.VK_PRIOR: "\x1B[5%s~", + winterm.VK_NEXT: "\x1B[6%s~", + winterm.VK_F1: "", + winterm.VK_F2: "", + winterm.VK_F3: "\x1B[13%s~", + winterm.VK_F4: "\x1B[14%s~", + winterm.VK_F5: "\x1B[15%s~", + winterm.VK_F6: "\x1B[17%s~", + winterm.VK_F7: "\x1B[18%s~", + winterm.VK_F8: "\x1B[19%s~", + winterm.VK_F9: "\x1B[20%s~", + winterm.VK_F10: "\x1B[21%s~", + winterm.VK_F11: "\x1B[23%s~", + winterm.VK_F12: "\x1B[24%s~", +} + +// translateKeyEvents converts the input events into the appropriate ANSI string. +func translateKeyEvents(events []winterm.INPUT_RECORD, escapeSequence []byte) []byte { + var buffer bytes.Buffer + for _, event := range events { + if event.EventType == winterm.KEY_EVENT && event.KeyEvent.KeyDown != 0 { + buffer.WriteString(keyToString(&event.KeyEvent, escapeSequence)) + } + } + + return buffer.Bytes() +} + +// keyToString maps the given input event record to the corresponding string. +func keyToString(keyEvent *winterm.KEY_EVENT_RECORD, escapeSequence []byte) string { + if keyEvent.UnicodeChar == 0 { + return formatVirtualKey(keyEvent.VirtualKeyCode, keyEvent.ControlKeyState, escapeSequence) + } + + _, alt, control := getControlKeys(keyEvent.ControlKeyState) + if control { + // TODO(azlinux): Implement following control sequences + // -D Signals the end of input from the keyboard; also exits current shell. + // -H Deletes the first character to the left of the cursor. Also called the ERASE key. + // -Q Restarts printing after it has been stopped with -s. + // -S Suspends printing on the screen (does not stop the program). + // -U Deletes all characters on the current line. Also called the KILL key. + // -E Quits current command and creates a core + + } + + // +Key generates ESC N Key + if !control && alt { + return ansiterm.KEY_ESC_N + strings.ToLower(string(keyEvent.UnicodeChar)) + } + + return string(keyEvent.UnicodeChar) +} + +// formatVirtualKey converts a virtual key (e.g., up arrow) into the appropriate ANSI string. +func formatVirtualKey(key uint16, controlState uint32, escapeSequence []byte) string { + shift, alt, control := getControlKeys(controlState) + modifier := getControlKeysModifier(shift, alt, control) + + if format, ok := arrowKeyMapPrefix[key]; ok { + return fmt.Sprintf(format, escapeSequence, modifier) + } + + if format, ok := keyMapPrefix[key]; ok { + return fmt.Sprintf(format, modifier) + } + + return "" +} + +// getControlKeys extracts the shift, alt, and ctrl key states. +func getControlKeys(controlState uint32) (shift, alt, control bool) { + shift = 0 != (controlState & winterm.SHIFT_PRESSED) + alt = 0 != (controlState & (winterm.LEFT_ALT_PRESSED | winterm.RIGHT_ALT_PRESSED)) + control = 0 != (controlState & (winterm.LEFT_CTRL_PRESSED | winterm.RIGHT_CTRL_PRESSED)) + return shift, alt, control +} + +// getControlKeysModifier returns the ANSI modifier for the given combination of control keys. +func getControlKeysModifier(shift, alt, control bool) string { + if shift && alt && control { + return ansiterm.KEY_CONTROL_PARAM_8 + } + if alt && control { + return ansiterm.KEY_CONTROL_PARAM_7 + } + if shift && control { + return ansiterm.KEY_CONTROL_PARAM_6 + } + if control { + return ansiterm.KEY_CONTROL_PARAM_5 + } + if shift && alt { + return ansiterm.KEY_CONTROL_PARAM_4 + } + if alt { + return ansiterm.KEY_CONTROL_PARAM_3 + } + if shift { + return ansiterm.KEY_CONTROL_PARAM_2 + } + return "" +} diff --git a/vendor/github.com/moby/moby/pkg/term/windows/ansi_writer.go b/vendor/github.com/moby/moby/pkg/term/windows/ansi_writer.go new file mode 100644 index 000000000..256577e1f --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/term/windows/ansi_writer.go @@ -0,0 +1,64 @@ +// +build windows + +package windowsconsole + +import ( + "io" + "os" + + ansiterm "github.com/Azure/go-ansiterm" + "github.com/Azure/go-ansiterm/winterm" +) + +// ansiWriter wraps a standard output file (e.g., os.Stdout) providing ANSI sequence translation. +type ansiWriter struct { + file *os.File + fd uintptr + infoReset *winterm.CONSOLE_SCREEN_BUFFER_INFO + command []byte + escapeSequence []byte + inAnsiSequence bool + parser *ansiterm.AnsiParser +} + +// NewAnsiWriter returns an io.Writer that provides VT100 terminal emulation on top of a +// Windows console output handle. +func NewAnsiWriter(nFile int) io.Writer { + initLogger() + file, fd := winterm.GetStdFile(nFile) + info, err := winterm.GetConsoleScreenBufferInfo(fd) + if err != nil { + return nil + } + + parser := ansiterm.CreateParser("Ground", winterm.CreateWinEventHandler(fd, file)) + logger.Infof("newAnsiWriter: parser %p", parser) + + aw := &ansiWriter{ + file: file, + fd: fd, + infoReset: info, + command: make([]byte, 0, ansiterm.ANSI_MAX_CMD_LENGTH), + escapeSequence: []byte(ansiterm.KEY_ESC_CSI), + parser: parser, + } + + logger.Infof("newAnsiWriter: aw.parser %p", aw.parser) + logger.Infof("newAnsiWriter: %v", aw) + return aw +} + +func (aw *ansiWriter) Fd() uintptr { + return aw.fd +} + +// Write writes len(p) bytes from p to the underlying data stream. +func (aw *ansiWriter) Write(p []byte) (total int, err error) { + if len(p) == 0 { + return 0, nil + } + + logger.Infof("Write: % x", p) + logger.Infof("Write: %s", string(p)) + return aw.parser.Parse(p) +} diff --git a/vendor/github.com/moby/moby/pkg/term/windows/console.go b/vendor/github.com/moby/moby/pkg/term/windows/console.go new file mode 100644 index 000000000..4bad32ea7 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/term/windows/console.go @@ -0,0 +1,35 @@ +// +build windows + +package windowsconsole + +import ( + "os" + + "github.com/Azure/go-ansiterm/winterm" +) + +// GetHandleInfo returns file descriptor and bool indicating whether the file is a console. +func GetHandleInfo(in interface{}) (uintptr, bool) { + switch t := in.(type) { + case *ansiReader: + return t.Fd(), true + case *ansiWriter: + return t.Fd(), true + } + + var inFd uintptr + var isTerminal bool + + if file, ok := in.(*os.File); ok { + inFd = file.Fd() + isTerminal = IsConsole(inFd) + } + return inFd, isTerminal +} + +// IsConsole returns true if the given file descriptor is a Windows Console. +// The code assumes that GetConsoleMode will return an error for file descriptors that are not a console. +func IsConsole(fd uintptr) bool { + _, e := winterm.GetConsoleMode(fd) + return e == nil +} diff --git a/vendor/github.com/moby/moby/pkg/term/windows/windows.go b/vendor/github.com/moby/moby/pkg/term/windows/windows.go new file mode 100644 index 000000000..d67021e45 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/term/windows/windows.go @@ -0,0 +1,33 @@ +// These files implement ANSI-aware input and output streams for use by the Docker Windows client. +// When asked for the set of standard streams (e.g., stdin, stdout, stderr), the code will create +// and return pseudo-streams that convert ANSI sequences to / from Windows Console API calls. + +package windowsconsole + +import ( + "io/ioutil" + "os" + "sync" + + ansiterm "github.com/Azure/go-ansiterm" + "github.com/Sirupsen/logrus" +) + +var logger *logrus.Logger +var initOnce sync.Once + +func initLogger() { + initOnce.Do(func() { + logFile := ioutil.Discard + + if isDebugEnv := os.Getenv(ansiterm.LogEnv); isDebugEnv == "1" { + logFile, _ = os.Create("ansiReaderWriter.log") + } + + logger = &logrus.Logger{ + Out: logFile, + Formatter: new(logrus.TextFormatter), + Level: logrus.DebugLevel, + } + }) +} diff --git a/vendor/github.com/moby/moby/pkg/term/windows/windows_test.go b/vendor/github.com/moby/moby/pkg/term/windows/windows_test.go new file mode 100644 index 000000000..3c8084b3d --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/term/windows/windows_test.go @@ -0,0 +1,3 @@ +// This file is necessary to pass the Docker tests. + +package windowsconsole diff --git a/vendor/github.com/moby/moby/pkg/term/winsize.go b/vendor/github.com/moby/moby/pkg/term/winsize.go new file mode 100644 index 000000000..f58367fe6 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/term/winsize.go @@ -0,0 +1,30 @@ +// +build !solaris,!windows + +package term + +import ( + "unsafe" + + "golang.org/x/sys/unix" +) + +// GetWinsize returns the window size based on the specified file descriptor. +func GetWinsize(fd uintptr) (*Winsize, error) { + ws := &Winsize{} + _, _, err := unix.Syscall(unix.SYS_IOCTL, fd, uintptr(unix.TIOCGWINSZ), uintptr(unsafe.Pointer(ws))) + // Skipp errno = 0 + if err == 0 { + return ws, nil + } + return ws, err +} + +// SetWinsize tries to set the specified window size for the specified file descriptor. +func SetWinsize(fd uintptr, ws *Winsize) error { + _, _, err := unix.Syscall(unix.SYS_IOCTL, fd, uintptr(unix.TIOCSWINSZ), uintptr(unsafe.Pointer(ws))) + // Skipp errno = 0 + if err == 0 { + return nil + } + return err +} diff --git a/vendor/github.com/moby/moby/pkg/term/winsize_solaris_cgo.go b/vendor/github.com/moby/moby/pkg/term/winsize_solaris_cgo.go new file mode 100644 index 000000000..39c1d3207 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/term/winsize_solaris_cgo.go @@ -0,0 +1,42 @@ +// +build solaris,cgo + +package term + +import ( + "unsafe" + + "golang.org/x/sys/unix" +) + +/* +#include +#include +#include + +// Small wrapper to get rid of variadic args of ioctl() +int my_ioctl(int fd, int cmd, struct winsize *ws) { + return ioctl(fd, cmd, ws); +} +*/ +import "C" + +// GetWinsize returns the window size based on the specified file descriptor. +func GetWinsize(fd uintptr) (*Winsize, error) { + ws := &Winsize{} + ret, err := C.my_ioctl(C.int(fd), C.int(unix.TIOCGWINSZ), (*C.struct_winsize)(unsafe.Pointer(ws))) + // Skip retval = 0 + if ret == 0 { + return ws, nil + } + return ws, err +} + +// SetWinsize tries to set the specified window size for the specified file descriptor. +func SetWinsize(fd uintptr, ws *Winsize) error { + ret, err := C.my_ioctl(C.int(fd), C.int(unix.TIOCSWINSZ), (*C.struct_winsize)(unsafe.Pointer(ws))) + // Skip retval = 0 + if ret == 0 { + return nil + } + return err +} diff --git a/vendor/github.com/moby/moby/pkg/testutil/cmd/command.go b/vendor/github.com/moby/moby/pkg/testutil/cmd/command.go new file mode 100644 index 000000000..6f36d6790 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/testutil/cmd/command.go @@ -0,0 +1,307 @@ +package cmd + +import ( + "bytes" + "fmt" + "io" + "os/exec" + "path/filepath" + "runtime" + "strings" + "sync" + "time" + + "github.com/docker/docker/pkg/system" + "github.com/go-check/check" +) + +type testingT interface { + Fatalf(string, ...interface{}) +} + +const ( + // None is a token to inform Result.Assert that the output should be empty + None string = "" +) + +type lockedBuffer struct { + m sync.RWMutex + buf bytes.Buffer +} + +func (buf *lockedBuffer) Write(b []byte) (int, error) { + buf.m.Lock() + defer buf.m.Unlock() + return buf.buf.Write(b) +} + +func (buf *lockedBuffer) String() string { + buf.m.RLock() + defer buf.m.RUnlock() + return buf.buf.String() +} + +// Result stores the result of running a command +type Result struct { + Cmd *exec.Cmd + ExitCode int + Error error + // Timeout is true if the command was killed because it ran for too long + Timeout bool + outBuffer *lockedBuffer + errBuffer *lockedBuffer +} + +// Assert compares the Result against the Expected struct, and fails the test if +// any of the expectations are not met. +func (r *Result) Assert(t testingT, exp Expected) *Result { + err := r.Compare(exp) + if err == nil { + return r + } + _, file, line, ok := runtime.Caller(1) + if ok { + t.Fatalf("at %s:%d - %s", filepath.Base(file), line, err.Error()) + } else { + t.Fatalf("(no file/line info) - %s", err.Error()) + } + return nil +} + +// Compare returns a formatted error with the command, stdout, stderr, exit +// code, and any failed expectations +func (r *Result) Compare(exp Expected) error { + errors := []string{} + add := func(format string, args ...interface{}) { + errors = append(errors, fmt.Sprintf(format, args...)) + } + + if exp.ExitCode != r.ExitCode { + add("ExitCode was %d expected %d", r.ExitCode, exp.ExitCode) + } + if exp.Timeout != r.Timeout { + if exp.Timeout { + add("Expected command to timeout") + } else { + add("Expected command to finish, but it hit the timeout") + } + } + if !matchOutput(exp.Out, r.Stdout()) { + add("Expected stdout to contain %q", exp.Out) + } + if !matchOutput(exp.Err, r.Stderr()) { + add("Expected stderr to contain %q", exp.Err) + } + switch { + // If a non-zero exit code is expected there is going to be an error. + // Don't require an error message as well as an exit code because the + // error message is going to be "exit status which is not useful + case exp.Error == "" && exp.ExitCode != 0: + case exp.Error == "" && r.Error != nil: + add("Expected no error") + case exp.Error != "" && r.Error == nil: + add("Expected error to contain %q, but there was no error", exp.Error) + case exp.Error != "" && !strings.Contains(r.Error.Error(), exp.Error): + add("Expected error to contain %q", exp.Error) + } + + if len(errors) == 0 { + return nil + } + return fmt.Errorf("%s\nFailures:\n%s\n", r, strings.Join(errors, "\n")) +} + +func matchOutput(expected string, actual string) bool { + switch expected { + case None: + return actual == "" + default: + return strings.Contains(actual, expected) + } +} + +func (r *Result) String() string { + var timeout string + if r.Timeout { + timeout = " (timeout)" + } + + return fmt.Sprintf(` +Command: %s +ExitCode: %d%s +Error: %v +Stdout: %v +Stderr: %v +`, + strings.Join(r.Cmd.Args, " "), + r.ExitCode, + timeout, + r.Error, + r.Stdout(), + r.Stderr()) +} + +// Expected is the expected output from a Command. This struct is compared to a +// Result struct by Result.Assert(). +type Expected struct { + ExitCode int + Timeout bool + Error string + Out string + Err string +} + +// Success is the default expected result +var Success = Expected{} + +// Stdout returns the stdout of the process as a string +func (r *Result) Stdout() string { + return r.outBuffer.String() +} + +// Stderr returns the stderr of the process as a string +func (r *Result) Stderr() string { + return r.errBuffer.String() +} + +// Combined returns the stdout and stderr combined into a single string +func (r *Result) Combined() string { + return r.outBuffer.String() + r.errBuffer.String() +} + +// SetExitError sets Error and ExitCode based on Error +func (r *Result) SetExitError(err error) { + if err == nil { + return + } + r.Error = err + r.ExitCode = system.ProcessExitCode(err) +} + +type matches struct{} + +// Info returns the CheckerInfo +func (m *matches) Info() *check.CheckerInfo { + return &check.CheckerInfo{ + Name: "CommandMatches", + Params: []string{"result", "expected"}, + } +} + +// Check compares a result against the expected +func (m *matches) Check(params []interface{}, names []string) (bool, string) { + result, ok := params[0].(*Result) + if !ok { + return false, fmt.Sprintf("result must be a *Result, not %T", params[0]) + } + expected, ok := params[1].(Expected) + if !ok { + return false, fmt.Sprintf("expected must be an Expected, not %T", params[1]) + } + + err := result.Compare(expected) + if err == nil { + return true, "" + } + return false, err.Error() +} + +// Matches is a gocheck.Checker for comparing a Result against an Expected +var Matches = &matches{} + +// Cmd contains the arguments and options for a process to run as part of a test +// suite. +type Cmd struct { + Command []string + Timeout time.Duration + Stdin io.Reader + Stdout io.Writer + Dir string + Env []string +} + +// Command create a simple Cmd with the specified command and arguments +func Command(command string, args ...string) Cmd { + return Cmd{Command: append([]string{command}, args...)} +} + +// RunCmd runs a command and returns a Result +func RunCmd(cmd Cmd, cmdOperators ...func(*Cmd)) *Result { + for _, op := range cmdOperators { + op(&cmd) + } + result := StartCmd(cmd) + if result.Error != nil { + return result + } + return WaitOnCmd(cmd.Timeout, result) +} + +// RunCommand parses a command line and runs it, returning a result +func RunCommand(command string, args ...string) *Result { + return RunCmd(Command(command, args...)) +} + +// StartCmd starts a command, but doesn't wait for it to finish +func StartCmd(cmd Cmd) *Result { + result := buildCmd(cmd) + if result.Error != nil { + return result + } + result.SetExitError(result.Cmd.Start()) + return result +} + +func buildCmd(cmd Cmd) *Result { + var execCmd *exec.Cmd + switch len(cmd.Command) { + case 1: + execCmd = exec.Command(cmd.Command[0]) + default: + execCmd = exec.Command(cmd.Command[0], cmd.Command[1:]...) + } + outBuffer := new(lockedBuffer) + errBuffer := new(lockedBuffer) + + execCmd.Stdin = cmd.Stdin + execCmd.Dir = cmd.Dir + execCmd.Env = cmd.Env + if cmd.Stdout != nil { + execCmd.Stdout = io.MultiWriter(outBuffer, cmd.Stdout) + } else { + execCmd.Stdout = outBuffer + } + execCmd.Stderr = errBuffer + return &Result{ + Cmd: execCmd, + outBuffer: outBuffer, + errBuffer: errBuffer, + } +} + +// WaitOnCmd waits for a command to complete. If timeout is non-nil then +// only wait until the timeout. +func WaitOnCmd(timeout time.Duration, result *Result) *Result { + if timeout == time.Duration(0) { + result.SetExitError(result.Cmd.Wait()) + return result + } + + done := make(chan error, 1) + // Wait for command to exit in a goroutine + go func() { + done <- result.Cmd.Wait() + }() + + select { + case <-time.After(timeout): + killErr := result.Cmd.Process.Kill() + if killErr != nil { + fmt.Printf("failed to kill (pid=%d): %v\n", result.Cmd.Process.Pid, killErr) + } + result.Timeout = true + case err := <-done: + result.SetExitError(err) + } + return result +} diff --git a/vendor/github.com/moby/moby/pkg/testutil/cmd/command_test.go b/vendor/github.com/moby/moby/pkg/testutil/cmd/command_test.go new file mode 100644 index 000000000..d24b42b72 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/testutil/cmd/command_test.go @@ -0,0 +1,118 @@ +package cmd + +import ( + "runtime" + "strings" + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +func TestRunCommand(t *testing.T) { + // TODO Windows: Port this test + if runtime.GOOS == "windows" { + t.Skip("Needs porting to Windows") + } + + var cmd string + if runtime.GOOS == "solaris" { + cmd = "gls" + } else { + cmd = "ls" + } + result := RunCommand(cmd) + result.Assert(t, Expected{}) + + result = RunCommand("doesnotexists") + expectedError := `exec: "doesnotexists": executable file not found` + result.Assert(t, Expected{ExitCode: 127, Error: expectedError}) + + result = RunCommand(cmd, "-z") + result.Assert(t, Expected{ + ExitCode: 2, + Error: "exit status 2", + Err: "invalid option", + }) + assert.Contains(t, result.Combined(), "invalid option") +} + +func TestRunCommandWithCombined(t *testing.T) { + // TODO Windows: Port this test + if runtime.GOOS == "windows" { + t.Skip("Needs porting to Windows") + } + + result := RunCommand("ls", "-a") + result.Assert(t, Expected{}) + + assert.Contains(t, result.Combined(), "..") + assert.Contains(t, result.Stdout(), "..") +} + +func TestRunCommandWithTimeoutFinished(t *testing.T) { + // TODO Windows: Port this test + if runtime.GOOS == "windows" { + t.Skip("Needs porting to Windows") + } + + result := RunCmd(Cmd{ + Command: []string{"ls", "-a"}, + Timeout: 50 * time.Millisecond, + }) + result.Assert(t, Expected{Out: ".."}) +} + +func TestRunCommandWithTimeoutKilled(t *testing.T) { + // TODO Windows: Port this test + if runtime.GOOS == "windows" { + t.Skip("Needs porting to Windows") + } + + command := []string{"sh", "-c", "while true ; do echo 1 ; sleep .5 ; done"} + result := RunCmd(Cmd{Command: command, Timeout: 1250 * time.Millisecond}) + result.Assert(t, Expected{Timeout: true}) + + ones := strings.Split(result.Stdout(), "\n") + assert.Len(t, ones, 4) +} + +func TestRunCommandWithErrors(t *testing.T) { + result := RunCommand("/foobar") + result.Assert(t, Expected{Error: "foobar", ExitCode: 127}) +} + +func TestRunCommandWithStdoutStderr(t *testing.T) { + result := RunCommand("echo", "hello", "world") + result.Assert(t, Expected{Out: "hello world\n", Err: None}) +} + +func TestRunCommandWithStdoutStderrError(t *testing.T) { + result := RunCommand("doesnotexists") + + expected := `exec: "doesnotexists": executable file not found` + result.Assert(t, Expected{Out: None, Err: None, ExitCode: 127, Error: expected}) + + switch runtime.GOOS { + case "windows": + expected = "ls: unknown option" + case "solaris": + expected = "gls: invalid option" + default: + expected = "ls: invalid option" + } + + var cmd string + if runtime.GOOS == "solaris" { + cmd = "gls" + } else { + cmd = "ls" + } + result = RunCommand(cmd, "-z") + result.Assert(t, Expected{ + Out: None, + Err: expected, + ExitCode: 2, + Error: "exit status 2", + }) +} diff --git a/vendor/github.com/moby/moby/pkg/testutil/golden/golden.go b/vendor/github.com/moby/moby/pkg/testutil/golden/golden.go new file mode 100644 index 000000000..8f725da7b --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/testutil/golden/golden.go @@ -0,0 +1,28 @@ +// Package golden provides function and helpers to use golden file for +// testing purpose. +package golden + +import ( + "flag" + "io/ioutil" + "path/filepath" + "testing" +) + +var update = flag.Bool("test.update", false, "update golden file") + +// Get returns the golden file content. If the `test.update` is specified, it updates the +// file with the current output and returns it. +func Get(t *testing.T, actual []byte, filename string) []byte { + golden := filepath.Join("testdata", filename) + if *update { + if err := ioutil.WriteFile(golden, actual, 0644); err != nil { + t.Fatal(err) + } + } + expected, err := ioutil.ReadFile(golden) + if err != nil { + t.Fatal(err) + } + return expected +} diff --git a/vendor/github.com/moby/moby/pkg/testutil/helpers.go b/vendor/github.com/moby/moby/pkg/testutil/helpers.go new file mode 100644 index 000000000..c29114871 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/testutil/helpers.go @@ -0,0 +1,33 @@ +package testutil + +import ( + "strings" + "unicode" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +// ErrorContains checks that the error is not nil, and contains the expected +// substring. +func ErrorContains(t require.TestingT, err error, expectedError string) { + require.Error(t, err) + assert.Contains(t, err.Error(), expectedError) +} + +// EqualNormalizedString compare the actual value to the expected value after applying the specified +// transform function. It fails the test if these two transformed string are not equal. +// For example `EqualNormalizedString(t, RemoveSpace, "foo\n", "foo")` wouldn't fail the test as +// spaces (and thus '\n') are removed before comparing the string. +func EqualNormalizedString(t require.TestingT, transformFun func(rune) rune, actual, expected string) { + require.Equal(t, strings.Map(transformFun, expected), strings.Map(transformFun, actual)) +} + +// RemoveSpace returns -1 if the specified runes is considered as a space (unicode) +// and the rune itself otherwise. +func RemoveSpace(r rune) rune { + if unicode.IsSpace(r) { + return -1 + } + return r +} diff --git a/vendor/github.com/moby/moby/pkg/testutil/pkg.go b/vendor/github.com/moby/moby/pkg/testutil/pkg.go new file mode 100644 index 000000000..110b2e6a7 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/testutil/pkg.go @@ -0,0 +1 @@ +package testutil diff --git a/vendor/github.com/moby/moby/pkg/testutil/tempfile/tempfile.go b/vendor/github.com/moby/moby/pkg/testutil/tempfile/tempfile.go new file mode 100644 index 000000000..01474babf --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/testutil/tempfile/tempfile.go @@ -0,0 +1,56 @@ +package tempfile + +import ( + "io/ioutil" + "os" + + "github.com/stretchr/testify/require" +) + +// TempFile is a temporary file that can be used with unit tests. TempFile +// reduces the boilerplate setup required in each test case by handling +// setup errors. +type TempFile struct { + File *os.File +} + +// NewTempFile returns a new temp file with contents +func NewTempFile(t require.TestingT, prefix string, content string) *TempFile { + file, err := ioutil.TempFile("", prefix+"-") + require.NoError(t, err) + + _, err = file.Write([]byte(content)) + require.NoError(t, err) + file.Close() + return &TempFile{File: file} +} + +// Name returns the filename +func (f *TempFile) Name() string { + return f.File.Name() +} + +// Remove removes the file +func (f *TempFile) Remove() { + os.Remove(f.Name()) +} + +// TempDir is a temporary directory that can be used with unit tests. TempDir +// reduces the boilerplate setup required in each test case by handling +// setup errors. +type TempDir struct { + Path string +} + +// NewTempDir returns a new temp file with contents +func NewTempDir(t require.TestingT, prefix string) *TempDir { + path, err := ioutil.TempDir("", prefix+"-") + require.NoError(t, err) + + return &TempDir{Path: path} +} + +// Remove removes the file +func (f *TempDir) Remove() { + os.Remove(f.Path) +} diff --git a/vendor/github.com/moby/moby/pkg/testutil/utils.go b/vendor/github.com/moby/moby/pkg/testutil/utils.go new file mode 100644 index 000000000..0522dde2b --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/testutil/utils.go @@ -0,0 +1,218 @@ +package testutil + +import ( + "archive/tar" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "reflect" + "strings" + "syscall" + "time" + + "github.com/docker/docker/pkg/stringutils" + "github.com/docker/docker/pkg/system" +) + +// IsKilled process the specified error and returns whether the process was killed or not. +func IsKilled(err error) bool { + if exitErr, ok := err.(*exec.ExitError); ok { + status, ok := exitErr.Sys().(syscall.WaitStatus) + if !ok { + return false + } + // status.ExitStatus() is required on Windows because it does not + // implement Signal() nor Signaled(). Just check it had a bad exit + // status could mean it was killed (and in tests we do kill) + return (status.Signaled() && status.Signal() == os.Kill) || status.ExitStatus() != 0 + } + return false +} + +func runCommandWithOutput(cmd *exec.Cmd) (output string, exitCode int, err error) { + out, err := cmd.CombinedOutput() + exitCode = system.ProcessExitCode(err) + output = string(out) + return +} + +// RunCommandPipelineWithOutput runs the array of commands with the output +// of each pipelined with the following (like cmd1 | cmd2 | cmd3 would do). +// It returns the final output, the exitCode different from 0 and the error +// if something bad happened. +func RunCommandPipelineWithOutput(cmds ...*exec.Cmd) (output string, exitCode int, err error) { + if len(cmds) < 2 { + return "", 0, errors.New("pipeline does not have multiple cmds") + } + + // connect stdin of each cmd to stdout pipe of previous cmd + for i, cmd := range cmds { + if i > 0 { + prevCmd := cmds[i-1] + cmd.Stdin, err = prevCmd.StdoutPipe() + + if err != nil { + return "", 0, fmt.Errorf("cannot set stdout pipe for %s: %v", cmd.Path, err) + } + } + } + + // start all cmds except the last + for _, cmd := range cmds[:len(cmds)-1] { + if err = cmd.Start(); err != nil { + return "", 0, fmt.Errorf("starting %s failed with error: %v", cmd.Path, err) + } + } + + defer func() { + var pipeErrMsgs []string + // wait all cmds except the last to release their resources + for _, cmd := range cmds[:len(cmds)-1] { + if pipeErr := cmd.Wait(); pipeErr != nil { + pipeErrMsgs = append(pipeErrMsgs, fmt.Sprintf("command %s failed with error: %v", cmd.Path, pipeErr)) + } + } + if len(pipeErrMsgs) > 0 && err == nil { + err = fmt.Errorf("pipelineError from Wait: %v", strings.Join(pipeErrMsgs, ", ")) + } + }() + + // wait on last cmd + return runCommandWithOutput(cmds[len(cmds)-1]) +} + +// ConvertSliceOfStringsToMap converts a slices of string in a map +// with the strings as key and an empty string as values. +func ConvertSliceOfStringsToMap(input []string) map[string]struct{} { + output := make(map[string]struct{}) + for _, v := range input { + output[v] = struct{}{} + } + return output +} + +// CompareDirectoryEntries compares two sets of FileInfo (usually taken from a directory) +// and returns an error if different. +func CompareDirectoryEntries(e1 []os.FileInfo, e2 []os.FileInfo) error { + var ( + e1Entries = make(map[string]struct{}) + e2Entries = make(map[string]struct{}) + ) + for _, e := range e1 { + e1Entries[e.Name()] = struct{}{} + } + for _, e := range e2 { + e2Entries[e.Name()] = struct{}{} + } + if !reflect.DeepEqual(e1Entries, e2Entries) { + return fmt.Errorf("entries differ") + } + return nil +} + +// ListTar lists the entries of a tar. +func ListTar(f io.Reader) ([]string, error) { + tr := tar.NewReader(f) + var entries []string + + for { + th, err := tr.Next() + if err == io.EOF { + // end of tar archive + return entries, nil + } + if err != nil { + return entries, err + } + entries = append(entries, th.Name) + } +} + +// RandomTmpDirPath provides a temporary path with rand string appended. +// does not create or checks if it exists. +func RandomTmpDirPath(s string, platform string) string { + tmp := "/tmp" + if platform == "windows" { + tmp = os.Getenv("TEMP") + } + path := filepath.Join(tmp, fmt.Sprintf("%s.%s", s, stringutils.GenerateRandomAlphaOnlyString(10))) + if platform == "windows" { + return filepath.FromSlash(path) // Using \ + } + return filepath.ToSlash(path) // Using / +} + +// ConsumeWithSpeed reads chunkSize bytes from reader before sleeping +// for interval duration. Returns total read bytes. Send true to the +// stop channel to return before reading to EOF on the reader. +func ConsumeWithSpeed(reader io.Reader, chunkSize int, interval time.Duration, stop chan bool) (n int, err error) { + buffer := make([]byte, chunkSize) + for { + var readBytes int + readBytes, err = reader.Read(buffer) + n += readBytes + if err != nil { + if err == io.EOF { + err = nil + } + return + } + select { + case <-stop: + return + case <-time.After(interval): + } + } +} + +// ParseCgroupPaths parses 'procCgroupData', which is output of '/proc//cgroup', and returns +// a map which cgroup name as key and path as value. +func ParseCgroupPaths(procCgroupData string) map[string]string { + cgroupPaths := map[string]string{} + for _, line := range strings.Split(procCgroupData, "\n") { + parts := strings.Split(line, ":") + if len(parts) != 3 { + continue + } + cgroupPaths[parts[1]] = parts[2] + } + return cgroupPaths +} + +// ChannelBuffer holds a chan of byte array that can be populate in a goroutine. +type ChannelBuffer struct { + C chan []byte +} + +// Write implements Writer. +func (c *ChannelBuffer) Write(b []byte) (int, error) { + c.C <- b + return len(b), nil +} + +// Close closes the go channel. +func (c *ChannelBuffer) Close() error { + close(c.C) + return nil +} + +// ReadTimeout reads the content of the channel in the specified byte array with +// the specified duration as timeout. +func (c *ChannelBuffer) ReadTimeout(p []byte, n time.Duration) (int, error) { + select { + case b := <-c.C: + return copy(p[0:], b), nil + case <-time.After(n): + return -1, fmt.Errorf("timeout reading from channel") + } +} + +// ReadBody read the specified ReadCloser content and returns it +func ReadBody(b io.ReadCloser) ([]byte, error) { + defer b.Close() + return ioutil.ReadAll(b) +} diff --git a/vendor/github.com/moby/moby/pkg/testutil/utils_test.go b/vendor/github.com/moby/moby/pkg/testutil/utils_test.go new file mode 100644 index 000000000..d37f3f4f8 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/testutil/utils_test.go @@ -0,0 +1,341 @@ +package testutil + +import ( + "io" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "runtime" + "strings" + "testing" + "time" +) + +func TestIsKilledFalseWithNonKilledProcess(t *testing.T) { + var lsCmd *exec.Cmd + if runtime.GOOS != "windows" { + lsCmd = exec.Command("ls") + } else { + lsCmd = exec.Command("cmd", "/c", "dir") + } + + err := lsCmd.Run() + if IsKilled(err) { + t.Fatalf("Expected the ls command to not be killed, was.") + } +} + +func TestIsKilledTrueWithKilledProcess(t *testing.T) { + var longCmd *exec.Cmd + if runtime.GOOS != "windows" { + longCmd = exec.Command("top") + } else { + longCmd = exec.Command("powershell", "while ($true) { sleep 1 }") + } + + // Start a command + err := longCmd.Start() + if err != nil { + t.Fatal(err) + } + // Capture the error when *dying* + done := make(chan error, 1) + go func() { + done <- longCmd.Wait() + }() + // Then kill it + longCmd.Process.Kill() + // Get the error + err = <-done + if !IsKilled(err) { + t.Fatalf("Expected the command to be killed, was not.") + } +} + +func TestRunCommandPipelineWithOutputWithNotEnoughCmds(t *testing.T) { + _, _, err := RunCommandPipelineWithOutput(exec.Command("ls")) + expectedError := "pipeline does not have multiple cmds" + if err == nil || err.Error() != expectedError { + t.Fatalf("Expected an error with %s, got err:%s", expectedError, err) + } +} + +func TestRunCommandPipelineWithOutputErrors(t *testing.T) { + p := "$PATH" + if runtime.GOOS == "windows" { + p = "%PATH%" + } + cmd1 := exec.Command("ls") + cmd1.Stdout = os.Stdout + cmd2 := exec.Command("anything really") + _, _, err := RunCommandPipelineWithOutput(cmd1, cmd2) + if err == nil || err.Error() != "cannot set stdout pipe for anything really: exec: Stdout already set" { + t.Fatalf("Expected an error, got %v", err) + } + + cmdWithError := exec.Command("doesnotexists") + cmdCat := exec.Command("cat") + _, _, err = RunCommandPipelineWithOutput(cmdWithError, cmdCat) + if err == nil || err.Error() != `starting doesnotexists failed with error: exec: "doesnotexists": executable file not found in `+p { + t.Fatalf("Expected an error, got %v", err) + } +} + +func TestRunCommandPipelineWithOutput(t *testing.T) { + //TODO: Should run on Solaris + if runtime.GOOS == "solaris" { + t.Skip() + } + cmds := []*exec.Cmd{ + // Print 2 characters + exec.Command("echo", "-n", "11"), + // Count the number or char from stdin (previous command) + exec.Command("wc", "-m"), + } + out, exitCode, err := RunCommandPipelineWithOutput(cmds...) + expectedOutput := "2\n" + if out != expectedOutput || exitCode != 0 || err != nil { + t.Fatalf("Expected %s for commands %v, got out:%s, exitCode:%d, err:%v", expectedOutput, cmds, out, exitCode, err) + } +} + +func TestConvertSliceOfStringsToMap(t *testing.T) { + input := []string{"a", "b"} + actual := ConvertSliceOfStringsToMap(input) + for _, key := range input { + if _, ok := actual[key]; !ok { + t.Fatalf("Expected output to contains key %s, did not: %v", key, actual) + } + } +} + +func TestCompareDirectoryEntries(t *testing.T) { + tmpFolder, err := ioutil.TempDir("", "integration-cli-utils-compare-directories") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpFolder) + + file1 := filepath.Join(tmpFolder, "file1") + file2 := filepath.Join(tmpFolder, "file2") + os.Create(file1) + os.Create(file2) + + fi1, err := os.Stat(file1) + if err != nil { + t.Fatal(err) + } + fi1bis, err := os.Stat(file1) + if err != nil { + t.Fatal(err) + } + fi2, err := os.Stat(file2) + if err != nil { + t.Fatal(err) + } + + cases := []struct { + e1 []os.FileInfo + e2 []os.FileInfo + shouldError bool + }{ + // Empty directories + { + []os.FileInfo{}, + []os.FileInfo{}, + false, + }, + // Same FileInfos + { + []os.FileInfo{fi1}, + []os.FileInfo{fi1}, + false, + }, + // Different FileInfos but same names + { + []os.FileInfo{fi1}, + []os.FileInfo{fi1bis}, + false, + }, + // Different FileInfos, different names + { + []os.FileInfo{fi1}, + []os.FileInfo{fi2}, + true, + }, + } + for _, elt := range cases { + err := CompareDirectoryEntries(elt.e1, elt.e2) + if elt.shouldError && err == nil { + t.Fatalf("Should have return an error, did not with %v and %v", elt.e1, elt.e2) + } + if !elt.shouldError && err != nil { + t.Fatalf("Should have not returned an error, but did : %v with %v and %v", err, elt.e1, elt.e2) + } + } +} + +// FIXME make an "unhappy path" test for ListTar without "panicking" :-) +func TestListTar(t *testing.T) { + // TODO Windows: Figure out why this fails. Should be portable. + if runtime.GOOS == "windows" { + t.Skip("Failing on Windows - needs further investigation") + } + tmpFolder, err := ioutil.TempDir("", "integration-cli-utils-list-tar") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpFolder) + + // Let's create a Tar file + srcFile := filepath.Join(tmpFolder, "src") + tarFile := filepath.Join(tmpFolder, "src.tar") + os.Create(srcFile) + cmd := exec.Command("sh", "-c", "tar cf "+tarFile+" "+srcFile) + _, err = cmd.CombinedOutput() + if err != nil { + t.Fatal(err) + } + + reader, err := os.Open(tarFile) + if err != nil { + t.Fatal(err) + } + defer reader.Close() + + entries, err := ListTar(reader) + if err != nil { + t.Fatal(err) + } + if len(entries) != 1 && entries[0] != "src" { + t.Fatalf("Expected a tar file with 1 entry (%s), got %v", srcFile, entries) + } +} + +func TestRandomTmpDirPath(t *testing.T) { + path := RandomTmpDirPath("something", runtime.GOOS) + + prefix := "/tmp/something" + if runtime.GOOS == "windows" { + prefix = os.Getenv("TEMP") + `\something` + } + expectedSize := len(prefix) + 11 + + if !strings.HasPrefix(path, prefix) { + t.Fatalf("Expected generated path to have '%s' as prefix, got %s'", prefix, path) + } + if len(path) != expectedSize { + t.Fatalf("Expected generated path to be %d, got %d", expectedSize, len(path)) + } +} + +func TestConsumeWithSpeed(t *testing.T) { + reader := strings.NewReader("1234567890") + chunksize := 2 + + bytes1, err := ConsumeWithSpeed(reader, chunksize, 10*time.Millisecond, nil) + if err != nil { + t.Fatal(err) + } + + if bytes1 != 10 { + t.Fatalf("Expected to have read 10 bytes, got %d", bytes1) + } + +} + +func TestConsumeWithSpeedWithStop(t *testing.T) { + reader := strings.NewReader("1234567890") + chunksize := 2 + + stopIt := make(chan bool) + + go func() { + time.Sleep(1 * time.Millisecond) + stopIt <- true + }() + + bytes1, err := ConsumeWithSpeed(reader, chunksize, 20*time.Millisecond, stopIt) + if err != nil { + t.Fatal(err) + } + + if bytes1 != 2 { + t.Fatalf("Expected to have read 2 bytes, got %d", bytes1) + } + +} + +func TestParseCgroupPathsEmpty(t *testing.T) { + cgroupMap := ParseCgroupPaths("") + if len(cgroupMap) != 0 { + t.Fatalf("Expected an empty map, got %v", cgroupMap) + } + cgroupMap = ParseCgroupPaths("\n") + if len(cgroupMap) != 0 { + t.Fatalf("Expected an empty map, got %v", cgroupMap) + } + cgroupMap = ParseCgroupPaths("something:else\nagain:here") + if len(cgroupMap) != 0 { + t.Fatalf("Expected an empty map, got %v", cgroupMap) + } +} + +func TestParseCgroupPaths(t *testing.T) { + cgroupMap := ParseCgroupPaths("2:memory:/a\n1:cpuset:/b") + if len(cgroupMap) != 2 { + t.Fatalf("Expected a map with 2 entries, got %v", cgroupMap) + } + if value, ok := cgroupMap["memory"]; !ok || value != "/a" { + t.Fatalf("Expected cgroupMap to contains an entry for 'memory' with value '/a', got %v", cgroupMap) + } + if value, ok := cgroupMap["cpuset"]; !ok || value != "/b" { + t.Fatalf("Expected cgroupMap to contains an entry for 'cpuset' with value '/b', got %v", cgroupMap) + } +} + +func TestChannelBufferTimeout(t *testing.T) { + expected := "11" + + buf := &ChannelBuffer{make(chan []byte, 1)} + defer buf.Close() + + done := make(chan struct{}, 1) + go func() { + time.Sleep(100 * time.Millisecond) + io.Copy(buf, strings.NewReader(expected)) + done <- struct{}{} + }() + + // Wait long enough + b := make([]byte, 2) + _, err := buf.ReadTimeout(b, 50*time.Millisecond) + if err == nil && err.Error() != "timeout reading from channel" { + t.Fatalf("Expected an error, got %s", err) + } + <-done +} + +func TestChannelBuffer(t *testing.T) { + expected := "11" + + buf := &ChannelBuffer{make(chan []byte, 1)} + defer buf.Close() + + go func() { + time.Sleep(100 * time.Millisecond) + io.Copy(buf, strings.NewReader(expected)) + }() + + // Wait long enough + b := make([]byte, 2) + _, err := buf.ReadTimeout(b, 200*time.Millisecond) + if err != nil { + t.Fatal(err) + } + + if string(b) != expected { + t.Fatalf("Expected '%s', got '%s'", expected, string(b)) + } +} diff --git a/vendor/github.com/moby/moby/pkg/tlsconfig/tlsconfig_clone.go b/vendor/github.com/moby/moby/pkg/tlsconfig/tlsconfig_clone.go new file mode 100644 index 000000000..e4dec3a5d --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/tlsconfig/tlsconfig_clone.go @@ -0,0 +1,11 @@ +// +build go1.8 + +package tlsconfig + +import "crypto/tls" + +// Clone returns a clone of tls.Config. This function is provided for +// compatibility for go1.7 that doesn't include this method in stdlib. +func Clone(c *tls.Config) *tls.Config { + return c.Clone() +} diff --git a/vendor/github.com/moby/moby/pkg/tlsconfig/tlsconfig_clone_go17.go b/vendor/github.com/moby/moby/pkg/tlsconfig/tlsconfig_clone_go17.go new file mode 100644 index 000000000..0d5b448fe --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/tlsconfig/tlsconfig_clone_go17.go @@ -0,0 +1,33 @@ +// +build go1.7,!go1.8 + +package tlsconfig + +import "crypto/tls" + +// Clone returns a clone of tls.Config. This function is provided for +// compatibility for go1.7 that doesn't include this method in stdlib. +func Clone(c *tls.Config) *tls.Config { + return &tls.Config{ + Rand: c.Rand, + Time: c.Time, + Certificates: c.Certificates, + NameToCertificate: c.NameToCertificate, + GetCertificate: c.GetCertificate, + RootCAs: c.RootCAs, + NextProtos: c.NextProtos, + ServerName: c.ServerName, + ClientAuth: c.ClientAuth, + ClientCAs: c.ClientCAs, + InsecureSkipVerify: c.InsecureSkipVerify, + CipherSuites: c.CipherSuites, + PreferServerCipherSuites: c.PreferServerCipherSuites, + SessionTicketsDisabled: c.SessionTicketsDisabled, + SessionTicketKey: c.SessionTicketKey, + ClientSessionCache: c.ClientSessionCache, + MinVersion: c.MinVersion, + MaxVersion: c.MaxVersion, + CurvePreferences: c.CurvePreferences, + DynamicRecordSizingDisabled: c.DynamicRecordSizingDisabled, + Renegotiation: c.Renegotiation, + } +} diff --git a/vendor/github.com/moby/moby/pkg/truncindex/truncindex.go b/vendor/github.com/moby/moby/pkg/truncindex/truncindex.go new file mode 100644 index 000000000..74776e65e --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/truncindex/truncindex.go @@ -0,0 +1,139 @@ +// Package truncindex provides a general 'index tree', used by Docker +// in order to be able to reference containers by only a few unambiguous +// characters of their id. +package truncindex + +import ( + "errors" + "fmt" + "strings" + "sync" + + "github.com/tchap/go-patricia/patricia" +) + +var ( + // ErrEmptyPrefix is an error returned if the prefix was empty. + ErrEmptyPrefix = errors.New("Prefix can't be empty") + + // ErrIllegalChar is returned when a space is in the ID + ErrIllegalChar = errors.New("illegal character: ' '") + + // ErrNotExist is returned when ID or its prefix not found in index. + ErrNotExist = errors.New("ID does not exist") +) + +// ErrAmbiguousPrefix is returned if the prefix was ambiguous +// (multiple ids for the prefix). +type ErrAmbiguousPrefix struct { + prefix string +} + +func (e ErrAmbiguousPrefix) Error() string { + return fmt.Sprintf("Multiple IDs found with provided prefix: %s", e.prefix) +} + +// TruncIndex allows the retrieval of string identifiers by any of their unique prefixes. +// This is used to retrieve image and container IDs by more convenient shorthand prefixes. +type TruncIndex struct { + sync.RWMutex + trie *patricia.Trie + ids map[string]struct{} +} + +// NewTruncIndex creates a new TruncIndex and initializes with a list of IDs. +func NewTruncIndex(ids []string) (idx *TruncIndex) { + idx = &TruncIndex{ + ids: make(map[string]struct{}), + + // Change patricia max prefix per node length, + // because our len(ID) always 64 + trie: patricia.NewTrie(patricia.MaxPrefixPerNode(64)), + } + for _, id := range ids { + idx.addID(id) + } + return +} + +func (idx *TruncIndex) addID(id string) error { + if strings.Contains(id, " ") { + return ErrIllegalChar + } + if id == "" { + return ErrEmptyPrefix + } + if _, exists := idx.ids[id]; exists { + return fmt.Errorf("id already exists: '%s'", id) + } + idx.ids[id] = struct{}{} + if inserted := idx.trie.Insert(patricia.Prefix(id), struct{}{}); !inserted { + return fmt.Errorf("failed to insert id: %s", id) + } + return nil +} + +// Add adds a new ID to the TruncIndex. +func (idx *TruncIndex) Add(id string) error { + idx.Lock() + defer idx.Unlock() + return idx.addID(id) +} + +// Delete removes an ID from the TruncIndex. If there are multiple IDs +// with the given prefix, an error is thrown. +func (idx *TruncIndex) Delete(id string) error { + idx.Lock() + defer idx.Unlock() + if _, exists := idx.ids[id]; !exists || id == "" { + return fmt.Errorf("no such id: '%s'", id) + } + delete(idx.ids, id) + if deleted := idx.trie.Delete(patricia.Prefix(id)); !deleted { + return fmt.Errorf("no such id: '%s'", id) + } + return nil +} + +// Get retrieves an ID from the TruncIndex. If there are multiple IDs +// with the given prefix, an error is thrown. +func (idx *TruncIndex) Get(s string) (string, error) { + if s == "" { + return "", ErrEmptyPrefix + } + var ( + id string + ) + subTreeVisitFunc := func(prefix patricia.Prefix, item patricia.Item) error { + if id != "" { + // we haven't found the ID if there are two or more IDs + id = "" + return ErrAmbiguousPrefix{prefix: string(prefix)} + } + id = string(prefix) + return nil + } + + idx.RLock() + defer idx.RUnlock() + if err := idx.trie.VisitSubtree(patricia.Prefix(s), subTreeVisitFunc); err != nil { + return "", err + } + if id != "" { + return id, nil + } + return "", ErrNotExist +} + +// Iterate iterates over all stored IDs and passes each of them to the given +// handler. Take care that the handler method does not call any public +// method on truncindex as the internal locking is not reentrant/recursive +// and will result in deadlock. +func (idx *TruncIndex) Iterate(handler func(id string)) { + idx.Lock() + defer idx.Unlock() + idx.trie.Visit(func(prefix patricia.Prefix, item patricia.Item) error { + handler(string(prefix)) + return nil + }) +} diff --git a/vendor/github.com/moby/moby/pkg/truncindex/truncindex_test.go b/vendor/github.com/moby/moby/pkg/truncindex/truncindex_test.go new file mode 100644 index 000000000..89658cabb --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/truncindex/truncindex_test.go @@ -0,0 +1,453 @@ +package truncindex + +import ( + "math/rand" + "testing" + "time" + + "github.com/docker/docker/pkg/stringid" +) + +// Test the behavior of TruncIndex, an index for querying IDs from a non-conflicting prefix. +func TestTruncIndex(t *testing.T) { + ids := []string{} + index := NewTruncIndex(ids) + // Get on an empty index + if _, err := index.Get("foobar"); err == nil { + t.Fatal("Get on an empty index should return an error") + } + + // Spaces should be illegal in an id + if err := index.Add("I have a space"); err == nil { + t.Fatalf("Adding an id with ' ' should return an error") + } + + id := "99b36c2c326ccc11e726eee6ee78a0baf166ef96" + // Add an id + if err := index.Add(id); err != nil { + t.Fatal(err) + } + + // Add an empty id (should fail) + if err := index.Add(""); err == nil { + t.Fatalf("Adding an empty id should return an error") + } + + // Get a non-existing id + assertIndexGet(t, index, "abracadabra", "", true) + // Get an empty id + assertIndexGet(t, index, "", "", true) + // Get the exact id + assertIndexGet(t, index, id, id, false) + // The first letter should match + assertIndexGet(t, index, id[:1], id, false) + // The first half should match + assertIndexGet(t, index, id[:len(id)/2], id, false) + // The second half should NOT match + assertIndexGet(t, index, id[len(id)/2:], "", true) + + id2 := id[:6] + "blabla" + // Add an id + if err := index.Add(id2); err != nil { + t.Fatal(err) + } + // Both exact IDs should work + assertIndexGet(t, index, id, id, false) + assertIndexGet(t, index, id2, id2, false) + + // 6 characters or less should conflict + assertIndexGet(t, index, id[:6], "", true) + assertIndexGet(t, index, id[:4], "", true) + assertIndexGet(t, index, id[:1], "", true) + + // An ambiguous id prefix should return an error + if _, err := index.Get(id[:4]); err == nil { + t.Fatal("An ambiguous id prefix should return an error") + } + + // 7 characters should NOT conflict + assertIndexGet(t, index, id[:7], id, false) + assertIndexGet(t, index, id2[:7], id2, false) + + // Deleting a non-existing id should return an error + if err := index.Delete("non-existing"); err == nil { + t.Fatalf("Deleting a non-existing id should return an error") + } + + // Deleting an empty id should return an error + if err := index.Delete(""); err == nil { + t.Fatal("Deleting an empty id should return an error") + } + + // Deleting id2 should remove conflicts + if err := index.Delete(id2); err != nil { + t.Fatal(err) + } + // id2 should no longer work + assertIndexGet(t, index, id2, "", true) + assertIndexGet(t, index, id2[:7], "", true) + assertIndexGet(t, index, id2[:11], "", true) + + // conflicts between id and id2 should be gone + assertIndexGet(t, index, id[:6], id, false) + assertIndexGet(t, index, id[:4], id, false) + assertIndexGet(t, index, id[:1], id, false) + + // non-conflicting substrings should still not conflict + assertIndexGet(t, index, id[:7], id, false) + assertIndexGet(t, index, id[:15], id, false) + assertIndexGet(t, index, id, id, false) + + assertIndexIterate(t) + assertIndexIterateDoNotPanic(t) +} + +func assertIndexIterate(t *testing.T) { + ids := []string{ + "19b36c2c326ccc11e726eee6ee78a0baf166ef96", + "28b36c2c326ccc11e726eee6ee78a0baf166ef96", + "37b36c2c326ccc11e726eee6ee78a0baf166ef96", + "46b36c2c326ccc11e726eee6ee78a0baf166ef96", + } + + index := NewTruncIndex(ids) + + index.Iterate(func(targetId string) { + for _, id := range ids { + if targetId == id { + return + } + } + + t.Fatalf("An unknown ID '%s'", targetId) + }) +} + +func assertIndexIterateDoNotPanic(t *testing.T) { + ids := []string{ + "19b36c2c326ccc11e726eee6ee78a0baf166ef96", + "28b36c2c326ccc11e726eee6ee78a0baf166ef96", + } + + index := NewTruncIndex(ids) + iterationStarted := make(chan bool, 1) + + go func() { + <-iterationStarted + index.Delete("19b36c2c326ccc11e726eee6ee78a0baf166ef96") + }() + + index.Iterate(func(targetId string) { + if targetId == "19b36c2c326ccc11e726eee6ee78a0baf166ef96" { + iterationStarted <- true + time.Sleep(100 * time.Millisecond) + } + }) +} + +func assertIndexGet(t *testing.T, index *TruncIndex, input, expectedResult string, expectError bool) { + if result, err := index.Get(input); err != nil && !expectError { + t.Fatalf("Unexpected error getting '%s': %s", input, err) + } else if err == nil && expectError { + t.Fatalf("Getting '%s' should return an error, not '%s'", input, result) + } else if result != expectedResult { + t.Fatalf("Getting '%s' returned '%s' instead of '%s'", input, result, expectedResult) + } +} + +func BenchmarkTruncIndexAdd100(b *testing.B) { + var testSet []string + for i := 0; i < 100; i++ { + testSet = append(testSet, stringid.GenerateNonCryptoID()) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + index := NewTruncIndex([]string{}) + for _, id := range testSet { + if err := index.Add(id); err != nil { + b.Fatal(err) + } + } + } +} + +func BenchmarkTruncIndexAdd250(b *testing.B) { + var testSet []string + for i := 0; i < 250; i++ { + testSet = append(testSet, stringid.GenerateNonCryptoID()) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + index := NewTruncIndex([]string{}) + for _, id := range testSet { + if err := index.Add(id); err != nil { + b.Fatal(err) + } + } + } +} + +func BenchmarkTruncIndexAdd500(b *testing.B) { + var testSet []string + for i := 0; i < 500; i++ { + testSet = append(testSet, stringid.GenerateNonCryptoID()) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + index := NewTruncIndex([]string{}) + for _, id := range testSet { + if err := index.Add(id); err != nil { + b.Fatal(err) + } + } + } +} + +func BenchmarkTruncIndexGet100(b *testing.B) { + var testSet []string + var testKeys []string + for i := 0; i < 100; i++ { + testSet = append(testSet, stringid.GenerateNonCryptoID()) + } + index := NewTruncIndex([]string{}) + for _, id := range testSet { + if err := index.Add(id); err != nil { + b.Fatal(err) + } + l := rand.Intn(12) + 12 + testKeys = append(testKeys, id[:l]) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + for _, id := range testKeys { + if res, err := index.Get(id); err != nil { + b.Fatal(res, err) + } + } + } +} + +func BenchmarkTruncIndexGet250(b *testing.B) { + var testSet []string + var testKeys []string + for i := 0; i < 250; i++ { + testSet = append(testSet, stringid.GenerateNonCryptoID()) + } + index := NewTruncIndex([]string{}) + for _, id := range testSet { + if err := index.Add(id); err != nil { + b.Fatal(err) + } + l := rand.Intn(12) + 12 + testKeys = append(testKeys, id[:l]) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + for _, id := range testKeys { + if res, err := index.Get(id); err != nil { + b.Fatal(res, err) + } + } + } +} + +func BenchmarkTruncIndexGet500(b *testing.B) { + var testSet []string + var testKeys []string + for i := 0; i < 500; i++ { + testSet = append(testSet, stringid.GenerateNonCryptoID()) + } + index := NewTruncIndex([]string{}) + for _, id := range testSet { + if err := index.Add(id); err != nil { + b.Fatal(err) + } + l := rand.Intn(12) + 12 + testKeys = append(testKeys, id[:l]) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + for _, id := range testKeys { + if res, err := index.Get(id); err != nil { + b.Fatal(res, err) + } + } + } +} + +func BenchmarkTruncIndexDelete100(b *testing.B) { + var testSet []string + for i := 0; i < 100; i++ { + testSet = append(testSet, stringid.GenerateNonCryptoID()) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + b.StopTimer() + index := NewTruncIndex([]string{}) + for _, id := range testSet { + if err := index.Add(id); err != nil { + b.Fatal(err) + } + } + b.StartTimer() + for _, id := range testSet { + if err := index.Delete(id); err != nil { + b.Fatal(err) + } + } + } +} + +func BenchmarkTruncIndexDelete250(b *testing.B) { + var testSet []string + for i := 0; i < 250; i++ { + testSet = append(testSet, stringid.GenerateNonCryptoID()) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + b.StopTimer() + index := NewTruncIndex([]string{}) + for _, id := range testSet { + if err := index.Add(id); err != nil { + b.Fatal(err) + } + } + b.StartTimer() + for _, id := range testSet { + if err := index.Delete(id); err != nil { + b.Fatal(err) + } + } + } +} + +func BenchmarkTruncIndexDelete500(b *testing.B) { + var testSet []string + for i := 0; i < 500; i++ { + testSet = append(testSet, stringid.GenerateNonCryptoID()) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + b.StopTimer() + index := NewTruncIndex([]string{}) + for _, id := range testSet { + if err := index.Add(id); err != nil { + b.Fatal(err) + } + } + b.StartTimer() + for _, id := range testSet { + if err := index.Delete(id); err != nil { + b.Fatal(err) + } + } + } +} + +func BenchmarkTruncIndexNew100(b *testing.B) { + var testSet []string + for i := 0; i < 100; i++ { + testSet = append(testSet, stringid.GenerateNonCryptoID()) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + NewTruncIndex(testSet) + } +} + +func BenchmarkTruncIndexNew250(b *testing.B) { + var testSet []string + for i := 0; i < 250; i++ { + testSet = append(testSet, stringid.GenerateNonCryptoID()) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + NewTruncIndex(testSet) + } +} + +func BenchmarkTruncIndexNew500(b *testing.B) { + var testSet []string + for i := 0; i < 500; i++ { + testSet = append(testSet, stringid.GenerateNonCryptoID()) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + NewTruncIndex(testSet) + } +} + +func BenchmarkTruncIndexAddGet100(b *testing.B) { + var testSet []string + var testKeys []string + for i := 0; i < 500; i++ { + id := stringid.GenerateNonCryptoID() + testSet = append(testSet, id) + l := rand.Intn(12) + 12 + testKeys = append(testKeys, id[:l]) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + index := NewTruncIndex([]string{}) + for _, id := range testSet { + if err := index.Add(id); err != nil { + b.Fatal(err) + } + } + for _, id := range testKeys { + if res, err := index.Get(id); err != nil { + b.Fatal(res, err) + } + } + } +} + +func BenchmarkTruncIndexAddGet250(b *testing.B) { + var testSet []string + var testKeys []string + for i := 0; i < 500; i++ { + id := stringid.GenerateNonCryptoID() + testSet = append(testSet, id) + l := rand.Intn(12) + 12 + testKeys = append(testKeys, id[:l]) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + index := NewTruncIndex([]string{}) + for _, id := range testSet { + if err := index.Add(id); err != nil { + b.Fatal(err) + } + } + for _, id := range testKeys { + if res, err := index.Get(id); err != nil { + b.Fatal(res, err) + } + } + } +} + +func BenchmarkTruncIndexAddGet500(b *testing.B) { + var testSet []string + var testKeys []string + for i := 0; i < 500; i++ { + id := stringid.GenerateNonCryptoID() + testSet = append(testSet, id) + l := rand.Intn(12) + 12 + testKeys = append(testKeys, id[:l]) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + index := NewTruncIndex([]string{}) + for _, id := range testSet { + if err := index.Add(id); err != nil { + b.Fatal(err) + } + } + for _, id := range testKeys { + if res, err := index.Get(id); err != nil { + b.Fatal(res, err) + } + } + } +} diff --git a/vendor/github.com/moby/moby/pkg/urlutil/urlutil.go b/vendor/github.com/moby/moby/pkg/urlutil/urlutil.go new file mode 100644 index 000000000..cfcd58203 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/urlutil/urlutil.go @@ -0,0 +1,44 @@ +// Package urlutil provides helper function to check urls kind. +// It supports http urls, git urls and transport url (tcp://, …) +package urlutil + +import ( + "regexp" + "strings" +) + +var ( + validPrefixes = map[string][]string{ + "url": {"http://", "https://"}, + "git": {"git://", "github.com/", "git@"}, + "transport": {"tcp://", "tcp+tls://", "udp://", "unix://", "unixgram://"}, + } + urlPathWithFragmentSuffix = regexp.MustCompile(".git(?:#.+)?$") +) + +// IsURL returns true if the provided str is an HTTP(S) URL. +func IsURL(str string) bool { + return checkURL(str, "url") +} + +// IsGitURL returns true if the provided str is a git repository URL. +func IsGitURL(str string) bool { + if IsURL(str) && urlPathWithFragmentSuffix.MatchString(str) { + return true + } + return checkURL(str, "git") +} + +// IsTransportURL returns true if the provided str is a transport (tcp, tcp+tls, udp, unix) URL. +func IsTransportURL(str string) bool { + return checkURL(str, "transport") +} + +func checkURL(str, kind string) bool { + for _, prefix := range validPrefixes[kind] { + if strings.HasPrefix(str, prefix) { + return true + } + } + return false +} diff --git a/vendor/github.com/moby/moby/pkg/urlutil/urlutil_test.go b/vendor/github.com/moby/moby/pkg/urlutil/urlutil_test.go new file mode 100644 index 000000000..e7579f554 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/urlutil/urlutil_test.go @@ -0,0 +1,56 @@ +package urlutil + +import "testing" + +var ( + gitUrls = []string{ + "git://github.com/docker/docker", + "git@github.com:docker/docker.git", + "git@bitbucket.org:atlassianlabs/atlassian-docker.git", + "https://github.com/docker/docker.git", + "http://github.com/docker/docker.git", + "http://github.com/docker/docker.git#branch", + "http://github.com/docker/docker.git#:dir", + } + incompleteGitUrls = []string{ + "github.com/docker/docker", + } + invalidGitUrls = []string{ + "http://github.com/docker/docker.git:#branch", + } + transportUrls = []string{ + "tcp://example.com", + "tcp+tls://example.com", + "udp://example.com", + "unix:///example", + "unixgram:///example", + } +) + +func TestIsGIT(t *testing.T) { + for _, url := range gitUrls { + if !IsGitURL(url) { + t.Fatalf("%q should be detected as valid Git url", url) + } + } + + for _, url := range incompleteGitUrls { + if !IsGitURL(url) { + t.Fatalf("%q should be detected as valid Git url", url) + } + } + + for _, url := range invalidGitUrls { + if IsGitURL(url) { + t.Fatalf("%q should not be detected as valid Git prefix", url) + } + } +} + +func TestIsTransport(t *testing.T) { + for _, url := range transportUrls { + if !IsTransportURL(url) { + t.Fatalf("%q should be detected as valid Transport url", url) + } + } +} diff --git a/vendor/github.com/moby/moby/pkg/useragent/README.md b/vendor/github.com/moby/moby/pkg/useragent/README.md new file mode 100644 index 000000000..d9cb367d1 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/useragent/README.md @@ -0,0 +1 @@ +This package provides helper functions to pack version information into a single User-Agent header. diff --git a/vendor/github.com/moby/moby/pkg/useragent/useragent.go b/vendor/github.com/moby/moby/pkg/useragent/useragent.go new file mode 100644 index 000000000..1137db51b --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/useragent/useragent.go @@ -0,0 +1,55 @@ +// Package useragent provides helper functions to pack +// version information into a single User-Agent header. +package useragent + +import ( + "strings" +) + +// VersionInfo is used to model UserAgent versions. +type VersionInfo struct { + Name string + Version string +} + +func (vi *VersionInfo) isValid() bool { + const stopChars = " \t\r\n/" + name := vi.Name + vers := vi.Version + if len(name) == 0 || strings.ContainsAny(name, stopChars) { + return false + } + if len(vers) == 0 || strings.ContainsAny(vers, stopChars) { + return false + } + return true +} + +// AppendVersions converts versions to a string and appends the string to the string base. +// +// Each VersionInfo will be converted to a string in the format of +// "product/version", where the "product" is get from the name field, while +// version is get from the version field. Several pieces of version information +// will be concatenated and separated by space. +// +// Example: +// AppendVersions("base", VersionInfo{"foo", "1.0"}, VersionInfo{"bar", "2.0"}) +// results in "base foo/1.0 bar/2.0". +func AppendVersions(base string, versions ...VersionInfo) string { + if len(versions) == 0 { + return base + } + + verstrs := make([]string, 0, 1+len(versions)) + if len(base) > 0 { + verstrs = append(verstrs, base) + } + + for _, v := range versions { + if !v.isValid() { + continue + } + verstrs = append(verstrs, v.Name+"/"+v.Version) + } + return strings.Join(verstrs, " ") +} diff --git a/vendor/github.com/moby/moby/pkg/useragent/useragent_test.go b/vendor/github.com/moby/moby/pkg/useragent/useragent_test.go new file mode 100644 index 000000000..0ad7243a6 --- /dev/null +++ b/vendor/github.com/moby/moby/pkg/useragent/useragent_test.go @@ -0,0 +1,31 @@ +package useragent + +import "testing" + +func TestVersionInfo(t *testing.T) { + vi := VersionInfo{"foo", "bar"} + if !vi.isValid() { + t.Fatalf("VersionInfo should be valid") + } + vi = VersionInfo{"", "bar"} + if vi.isValid() { + t.Fatalf("Expected VersionInfo to be invalid") + } + vi = VersionInfo{"foo", ""} + if vi.isValid() { + t.Fatalf("Expected VersionInfo to be invalid") + } +} + +func TestAppendVersions(t *testing.T) { + vis := []VersionInfo{ + {"foo", "1.0"}, + {"bar", "0.1"}, + {"pi", "3.1.4"}, + } + v := AppendVersions("base", vis...) + expect := "base foo/1.0 bar/0.1 pi/3.1.4" + if v != expect { + t.Fatalf("expected %q, got %q", expect, v) + } +} diff --git a/vendor/github.com/moby/moby/plugin/backend_linux.go b/vendor/github.com/moby/moby/plugin/backend_linux.go new file mode 100644 index 000000000..055b8e310 --- /dev/null +++ b/vendor/github.com/moby/moby/plugin/backend_linux.go @@ -0,0 +1,853 @@ +// +build linux + +package plugin + +import ( + "archive/tar" + "compress/gzip" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "os" + "path" + "path/filepath" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/manifest/schema2" + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/distribution" + progressutils "github.com/docker/docker/distribution/utils" + "github.com/docker/docker/distribution/xfer" + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/image" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/authorization" + "github.com/docker/docker/pkg/chrootarchive" + "github.com/docker/docker/pkg/mount" + "github.com/docker/docker/pkg/pools" + "github.com/docker/docker/pkg/progress" + "github.com/docker/docker/pkg/system" + "github.com/docker/docker/plugin/v2" + refstore "github.com/docker/docker/reference" + "github.com/opencontainers/go-digest" + "github.com/pkg/errors" + "golang.org/x/net/context" +) + +var acceptedPluginFilterTags = map[string]bool{ + "enabled": true, + "capability": true, +} + +// Disable deactivates a plugin. This means resources (volumes, networks) cant use them. +func (pm *Manager) Disable(refOrID string, config *types.PluginDisableConfig) error { + p, err := pm.config.Store.GetV2Plugin(refOrID) + if err != nil { + return err + } + pm.mu.RLock() + c := pm.cMap[p] + pm.mu.RUnlock() + + if !config.ForceDisable && p.GetRefCount() > 0 { + return fmt.Errorf("plugin %s is in use", p.Name()) + } + + for _, typ := range p.GetTypes() { + if typ.Capability == authorization.AuthZApiImplements { + pm.config.AuthzMiddleware.RemovePlugin(p.Name()) + } + } + + if err := pm.disable(p, c); err != nil { + return err + } + pm.publisher.Publish(EventDisable{Plugin: p.PluginObj}) + pm.config.LogPluginEvent(p.GetID(), refOrID, "disable") + return nil +} + +// Enable activates a plugin, which implies that they are ready to be used by containers. +func (pm *Manager) Enable(refOrID string, config *types.PluginEnableConfig) error { + p, err := pm.config.Store.GetV2Plugin(refOrID) + if err != nil { + return err + } + + c := &controller{timeoutInSecs: config.Timeout} + if err := pm.enable(p, c, false); err != nil { + return err + } + pm.publisher.Publish(EventEnable{Plugin: p.PluginObj}) + pm.config.LogPluginEvent(p.GetID(), refOrID, "enable") + return nil +} + +// Inspect examines a plugin config +func (pm *Manager) Inspect(refOrID string) (tp *types.Plugin, err error) { + p, err := pm.config.Store.GetV2Plugin(refOrID) + if err != nil { + return nil, err + } + + return &p.PluginObj, nil +} + +func (pm *Manager) pull(ctx context.Context, ref reference.Named, config *distribution.ImagePullConfig, outStream io.Writer) error { + if outStream != nil { + // Include a buffer so that slow client connections don't affect + // transfer performance. + progressChan := make(chan progress.Progress, 100) + + writesDone := make(chan struct{}) + + defer func() { + close(progressChan) + <-writesDone + }() + + var cancelFunc context.CancelFunc + ctx, cancelFunc = context.WithCancel(ctx) + + go func() { + progressutils.WriteDistributionProgress(cancelFunc, outStream, progressChan) + close(writesDone) + }() + + config.ProgressOutput = progress.ChanOutput(progressChan) + } else { + config.ProgressOutput = progress.DiscardOutput() + } + return distribution.Pull(ctx, ref, config) +} + +type tempConfigStore struct { + config []byte + configDigest digest.Digest +} + +func (s *tempConfigStore) Put(c []byte) (digest.Digest, error) { + dgst := digest.FromBytes(c) + + s.config = c + s.configDigest = dgst + + return dgst, nil +} + +func (s *tempConfigStore) Get(d digest.Digest) ([]byte, error) { + if d != s.configDigest { + return nil, fmt.Errorf("digest not found") + } + return s.config, nil +} + +func (s *tempConfigStore) RootFSAndPlatformFromConfig(c []byte) (*image.RootFS, layer.Platform, error) { + return configToRootFS(c) +} + +func computePrivileges(c types.PluginConfig) (types.PluginPrivileges, error) { + var privileges types.PluginPrivileges + if c.Network.Type != "null" && c.Network.Type != "bridge" && c.Network.Type != "" { + privileges = append(privileges, types.PluginPrivilege{ + Name: "network", + Description: "permissions to access a network", + Value: []string{c.Network.Type}, + }) + } + if c.IpcHost { + privileges = append(privileges, types.PluginPrivilege{ + Name: "host ipc namespace", + Description: "allow access to host ipc namespace", + Value: []string{"true"}, + }) + } + if c.PidHost { + privileges = append(privileges, types.PluginPrivilege{ + Name: "host pid namespace", + Description: "allow access to host pid namespace", + Value: []string{"true"}, + }) + } + for _, mount := range c.Mounts { + if mount.Source != nil { + privileges = append(privileges, types.PluginPrivilege{ + Name: "mount", + Description: "host path to mount", + Value: []string{*mount.Source}, + }) + } + } + for _, device := range c.Linux.Devices { + if device.Path != nil { + privileges = append(privileges, types.PluginPrivilege{ + Name: "device", + Description: "host device to access", + Value: []string{*device.Path}, + }) + } + } + if c.Linux.AllowAllDevices { + privileges = append(privileges, types.PluginPrivilege{ + Name: "allow-all-devices", + Description: "allow 'rwm' access to all devices", + Value: []string{"true"}, + }) + } + if len(c.Linux.Capabilities) > 0 { + privileges = append(privileges, types.PluginPrivilege{ + Name: "capabilities", + Description: "list of additional capabilities required", + Value: c.Linux.Capabilities, + }) + } + + return privileges, nil +} + +// Privileges pulls a plugin config and computes the privileges required to install it. +func (pm *Manager) Privileges(ctx context.Context, ref reference.Named, metaHeader http.Header, authConfig *types.AuthConfig) (types.PluginPrivileges, error) { + // create image store instance + cs := &tempConfigStore{} + + // DownloadManager not defined because only pulling configuration. + pluginPullConfig := &distribution.ImagePullConfig{ + Config: distribution.Config{ + MetaHeaders: metaHeader, + AuthConfig: authConfig, + RegistryService: pm.config.RegistryService, + ImageEventLogger: func(string, string, string) {}, + ImageStore: cs, + }, + Schema2Types: distribution.PluginTypes, + } + + if err := pm.pull(ctx, ref, pluginPullConfig, nil); err != nil { + return nil, err + } + + if cs.config == nil { + return nil, errors.New("no configuration pulled") + } + var config types.PluginConfig + if err := json.Unmarshal(cs.config, &config); err != nil { + return nil, err + } + + return computePrivileges(config) +} + +// Upgrade upgrades a plugin +func (pm *Manager) Upgrade(ctx context.Context, ref reference.Named, name string, metaHeader http.Header, authConfig *types.AuthConfig, privileges types.PluginPrivileges, outStream io.Writer) (err error) { + p, err := pm.config.Store.GetV2Plugin(name) + if err != nil { + return errors.Wrap(err, "plugin must be installed before upgrading") + } + + if p.IsEnabled() { + return fmt.Errorf("plugin must be disabled before upgrading") + } + + pm.muGC.RLock() + defer pm.muGC.RUnlock() + + // revalidate because Pull is public + if _, err := reference.ParseNormalizedNamed(name); err != nil { + return errors.Wrapf(err, "failed to parse %q", name) + } + + tmpRootFSDir, err := ioutil.TempDir(pm.tmpDir(), ".rootfs") + if err != nil { + return err + } + defer os.RemoveAll(tmpRootFSDir) + + dm := &downloadManager{ + tmpDir: tmpRootFSDir, + blobStore: pm.blobStore, + } + + pluginPullConfig := &distribution.ImagePullConfig{ + Config: distribution.Config{ + MetaHeaders: metaHeader, + AuthConfig: authConfig, + RegistryService: pm.config.RegistryService, + ImageEventLogger: pm.config.LogPluginEvent, + ImageStore: dm, + }, + DownloadManager: dm, // todo: reevaluate if possible to substitute distribution/xfer dependencies instead + Schema2Types: distribution.PluginTypes, + } + + err = pm.pull(ctx, ref, pluginPullConfig, outStream) + if err != nil { + go pm.GC() + return err + } + + if err := pm.upgradePlugin(p, dm.configDigest, dm.blobs, tmpRootFSDir, &privileges); err != nil { + return err + } + p.PluginObj.PluginReference = ref.String() + return nil +} + +// Pull pulls a plugin, check if the correct privileges are provided and install the plugin. +func (pm *Manager) Pull(ctx context.Context, ref reference.Named, name string, metaHeader http.Header, authConfig *types.AuthConfig, privileges types.PluginPrivileges, outStream io.Writer, opts ...CreateOpt) (err error) { + pm.muGC.RLock() + defer pm.muGC.RUnlock() + + // revalidate because Pull is public + nameref, err := reference.ParseNormalizedNamed(name) + if err != nil { + return errors.Wrapf(err, "failed to parse %q", name) + } + name = reference.FamiliarString(reference.TagNameOnly(nameref)) + + if err := pm.config.Store.validateName(name); err != nil { + return err + } + + tmpRootFSDir, err := ioutil.TempDir(pm.tmpDir(), ".rootfs") + if err != nil { + return err + } + defer os.RemoveAll(tmpRootFSDir) + + dm := &downloadManager{ + tmpDir: tmpRootFSDir, + blobStore: pm.blobStore, + } + + pluginPullConfig := &distribution.ImagePullConfig{ + Config: distribution.Config{ + MetaHeaders: metaHeader, + AuthConfig: authConfig, + RegistryService: pm.config.RegistryService, + ImageEventLogger: pm.config.LogPluginEvent, + ImageStore: dm, + }, + DownloadManager: dm, // todo: reevaluate if possible to substitute distribution/xfer dependencies instead + Schema2Types: distribution.PluginTypes, + } + + err = pm.pull(ctx, ref, pluginPullConfig, outStream) + if err != nil { + go pm.GC() + return err + } + + refOpt := func(p *v2.Plugin) { + p.PluginObj.PluginReference = ref.String() + } + optsList := make([]CreateOpt, 0, len(opts)+1) + optsList = append(optsList, opts...) + optsList = append(optsList, refOpt) + + p, err := pm.createPlugin(name, dm.configDigest, dm.blobs, tmpRootFSDir, &privileges, optsList...) + if err != nil { + return err + } + + pm.publisher.Publish(EventCreate{Plugin: p.PluginObj}) + return nil +} + +// List displays the list of plugins and associated metadata. +func (pm *Manager) List(pluginFilters filters.Args) ([]types.Plugin, error) { + if err := pluginFilters.Validate(acceptedPluginFilterTags); err != nil { + return nil, err + } + + enabledOnly := false + disabledOnly := false + if pluginFilters.Include("enabled") { + if pluginFilters.ExactMatch("enabled", "true") { + enabledOnly = true + } else if pluginFilters.ExactMatch("enabled", "false") { + disabledOnly = true + } else { + return nil, fmt.Errorf("Invalid filter 'enabled=%s'", pluginFilters.Get("enabled")) + } + } + + plugins := pm.config.Store.GetAll() + out := make([]types.Plugin, 0, len(plugins)) + +next: + for _, p := range plugins { + if enabledOnly && !p.PluginObj.Enabled { + continue + } + if disabledOnly && p.PluginObj.Enabled { + continue + } + if pluginFilters.Include("capability") { + for _, f := range p.GetTypes() { + if !pluginFilters.Match("capability", f.Capability) { + continue next + } + } + } + out = append(out, p.PluginObj) + } + return out, nil +} + +// Push pushes a plugin to the store. +func (pm *Manager) Push(ctx context.Context, name string, metaHeader http.Header, authConfig *types.AuthConfig, outStream io.Writer) error { + p, err := pm.config.Store.GetV2Plugin(name) + if err != nil { + return err + } + + ref, err := reference.ParseNormalizedNamed(p.Name()) + if err != nil { + return errors.Wrapf(err, "plugin has invalid name %v for push", p.Name()) + } + + var po progress.Output + if outStream != nil { + // Include a buffer so that slow client connections don't affect + // transfer performance. + progressChan := make(chan progress.Progress, 100) + + writesDone := make(chan struct{}) + + defer func() { + close(progressChan) + <-writesDone + }() + + var cancelFunc context.CancelFunc + ctx, cancelFunc = context.WithCancel(ctx) + + go func() { + progressutils.WriteDistributionProgress(cancelFunc, outStream, progressChan) + close(writesDone) + }() + + po = progress.ChanOutput(progressChan) + } else { + po = progress.DiscardOutput() + } + + // TODO: replace these with manager + is := &pluginConfigStore{ + pm: pm, + plugin: p, + } + ls := &pluginLayerProvider{ + pm: pm, + plugin: p, + } + rs := &pluginReference{ + name: ref, + pluginID: p.Config, + } + + uploadManager := xfer.NewLayerUploadManager(3) + + imagePushConfig := &distribution.ImagePushConfig{ + Config: distribution.Config{ + MetaHeaders: metaHeader, + AuthConfig: authConfig, + ProgressOutput: po, + RegistryService: pm.config.RegistryService, + ReferenceStore: rs, + ImageEventLogger: pm.config.LogPluginEvent, + ImageStore: is, + RequireSchema2: true, + }, + ConfigMediaType: schema2.MediaTypePluginConfig, + LayerStore: ls, + UploadManager: uploadManager, + } + + return distribution.Push(ctx, ref, imagePushConfig) +} + +type pluginReference struct { + name reference.Named + pluginID digest.Digest +} + +func (r *pluginReference) References(id digest.Digest) []reference.Named { + if r.pluginID != id { + return nil + } + return []reference.Named{r.name} +} + +func (r *pluginReference) ReferencesByName(ref reference.Named) []refstore.Association { + return []refstore.Association{ + { + Ref: r.name, + ID: r.pluginID, + }, + } +} + +func (r *pluginReference) Get(ref reference.Named) (digest.Digest, error) { + if r.name.String() != ref.String() { + return digest.Digest(""), refstore.ErrDoesNotExist + } + return r.pluginID, nil +} + +func (r *pluginReference) AddTag(ref reference.Named, id digest.Digest, force bool) error { + // Read only, ignore + return nil +} +func (r *pluginReference) AddDigest(ref reference.Canonical, id digest.Digest, force bool) error { + // Read only, ignore + return nil +} +func (r *pluginReference) Delete(ref reference.Named) (bool, error) { + // Read only, ignore + return false, nil +} + +type pluginConfigStore struct { + pm *Manager + plugin *v2.Plugin +} + +func (s *pluginConfigStore) Put([]byte) (digest.Digest, error) { + return digest.Digest(""), errors.New("cannot store config on push") +} + +func (s *pluginConfigStore) Get(d digest.Digest) ([]byte, error) { + if s.plugin.Config != d { + return nil, errors.New("plugin not found") + } + rwc, err := s.pm.blobStore.Get(d) + if err != nil { + return nil, err + } + defer rwc.Close() + return ioutil.ReadAll(rwc) +} + +func (s *pluginConfigStore) RootFSAndPlatformFromConfig(c []byte) (*image.RootFS, layer.Platform, error) { + return configToRootFS(c) +} + +type pluginLayerProvider struct { + pm *Manager + plugin *v2.Plugin +} + +func (p *pluginLayerProvider) Get(id layer.ChainID) (distribution.PushLayer, error) { + rootFS := rootFSFromPlugin(p.plugin.PluginObj.Config.Rootfs) + var i int + for i = 1; i <= len(rootFS.DiffIDs); i++ { + if layer.CreateChainID(rootFS.DiffIDs[:i]) == id { + break + } + } + if i > len(rootFS.DiffIDs) { + return nil, errors.New("layer not found") + } + return &pluginLayer{ + pm: p.pm, + diffIDs: rootFS.DiffIDs[:i], + blobs: p.plugin.Blobsums[:i], + }, nil +} + +type pluginLayer struct { + pm *Manager + diffIDs []layer.DiffID + blobs []digest.Digest +} + +func (l *pluginLayer) ChainID() layer.ChainID { + return layer.CreateChainID(l.diffIDs) +} + +func (l *pluginLayer) DiffID() layer.DiffID { + return l.diffIDs[len(l.diffIDs)-1] +} + +func (l *pluginLayer) Parent() distribution.PushLayer { + if len(l.diffIDs) == 1 { + return nil + } + return &pluginLayer{ + pm: l.pm, + diffIDs: l.diffIDs[:len(l.diffIDs)-1], + blobs: l.blobs[:len(l.diffIDs)-1], + } +} + +func (l *pluginLayer) Open() (io.ReadCloser, error) { + return l.pm.blobStore.Get(l.blobs[len(l.diffIDs)-1]) +} + +func (l *pluginLayer) Size() (int64, error) { + return l.pm.blobStore.Size(l.blobs[len(l.diffIDs)-1]) +} + +func (l *pluginLayer) MediaType() string { + return schema2.MediaTypeLayer +} + +func (l *pluginLayer) Release() { + // Nothing needs to be release, no references held +} + +// Remove deletes plugin's root directory. +func (pm *Manager) Remove(name string, config *types.PluginRmConfig) error { + p, err := pm.config.Store.GetV2Plugin(name) + pm.mu.RLock() + c := pm.cMap[p] + pm.mu.RUnlock() + + if err != nil { + return err + } + + if !config.ForceRemove { + if p.GetRefCount() > 0 { + return fmt.Errorf("plugin %s is in use", p.Name()) + } + if p.IsEnabled() { + return fmt.Errorf("plugin %s is enabled", p.Name()) + } + } + + if p.IsEnabled() { + if err := pm.disable(p, c); err != nil { + logrus.Errorf("failed to disable plugin '%s': %s", p.Name(), err) + } + } + + defer func() { + go pm.GC() + }() + + id := p.GetID() + pluginDir := filepath.Join(pm.config.Root, id) + + if err := mount.RecursiveUnmount(pluginDir); err != nil { + return errors.Wrap(err, "error unmounting plugin data") + } + + removeDir := pluginDir + "-removing" + if err := os.Rename(pluginDir, removeDir); err != nil { + return errors.Wrap(err, "error performing atomic remove of plugin dir") + } + + if err := system.EnsureRemoveAll(removeDir); err != nil { + return errors.Wrap(err, "error removing plugin dir") + } + pm.config.Store.Remove(p) + pm.config.LogPluginEvent(id, name, "remove") + pm.publisher.Publish(EventRemove{Plugin: p.PluginObj}) + return nil +} + +func getMounts(root string) ([]string, error) { + infos, err := mount.GetMounts() + if err != nil { + return nil, errors.Wrap(err, "failed to read mount table") + } + + var mounts []string + for _, m := range infos { + if strings.HasPrefix(m.Mountpoint, root) { + mounts = append(mounts, m.Mountpoint) + } + } + + return mounts, nil +} + +// Set sets plugin args +func (pm *Manager) Set(name string, args []string) error { + p, err := pm.config.Store.GetV2Plugin(name) + if err != nil { + return err + } + if err := p.Set(args); err != nil { + return err + } + return pm.save(p) +} + +// CreateFromContext creates a plugin from the given pluginDir which contains +// both the rootfs and the config.json and a repoName with optional tag. +func (pm *Manager) CreateFromContext(ctx context.Context, tarCtx io.ReadCloser, options *types.PluginCreateOptions) (err error) { + pm.muGC.RLock() + defer pm.muGC.RUnlock() + + ref, err := reference.ParseNormalizedNamed(options.RepoName) + if err != nil { + return errors.Wrapf(err, "failed to parse reference %v", options.RepoName) + } + if _, ok := ref.(reference.Canonical); ok { + return errors.Errorf("canonical references are not permitted") + } + name := reference.FamiliarString(reference.TagNameOnly(ref)) + + if err := pm.config.Store.validateName(name); err != nil { // fast check, real check is in createPlugin() + return err + } + + tmpRootFSDir, err := ioutil.TempDir(pm.tmpDir(), ".rootfs") + if err != nil { + return errors.Wrap(err, "failed to create temp directory") + } + defer os.RemoveAll(tmpRootFSDir) + + var configJSON []byte + rootFS := splitConfigRootFSFromTar(tarCtx, &configJSON) + + rootFSBlob, err := pm.blobStore.New() + if err != nil { + return err + } + defer rootFSBlob.Close() + gzw := gzip.NewWriter(rootFSBlob) + layerDigester := digest.Canonical.Digester() + rootFSReader := io.TeeReader(rootFS, io.MultiWriter(gzw, layerDigester.Hash())) + + if err := chrootarchive.Untar(rootFSReader, tmpRootFSDir, nil); err != nil { + return err + } + if err := rootFS.Close(); err != nil { + return err + } + + if configJSON == nil { + return errors.New("config not found") + } + + if err := gzw.Close(); err != nil { + return errors.Wrap(err, "error closing gzip writer") + } + + var config types.PluginConfig + if err := json.Unmarshal(configJSON, &config); err != nil { + return errors.Wrap(err, "failed to parse config") + } + + if err := pm.validateConfig(config); err != nil { + return err + } + + pm.mu.Lock() + defer pm.mu.Unlock() + + rootFSBlobsum, err := rootFSBlob.Commit() + if err != nil { + return err + } + defer func() { + if err != nil { + go pm.GC() + } + }() + + config.Rootfs = &types.PluginConfigRootfs{ + Type: "layers", + DiffIds: []string{layerDigester.Digest().String()}, + } + + config.DockerVersion = dockerversion.Version + + configBlob, err := pm.blobStore.New() + if err != nil { + return err + } + defer configBlob.Close() + if err := json.NewEncoder(configBlob).Encode(config); err != nil { + return errors.Wrap(err, "error encoding json config") + } + configBlobsum, err := configBlob.Commit() + if err != nil { + return err + } + + p, err := pm.createPlugin(name, configBlobsum, []digest.Digest{rootFSBlobsum}, tmpRootFSDir, nil) + if err != nil { + return err + } + p.PluginObj.PluginReference = name + + pm.publisher.Publish(EventCreate{Plugin: p.PluginObj}) + pm.config.LogPluginEvent(p.PluginObj.ID, name, "create") + + return nil +} + +func (pm *Manager) validateConfig(config types.PluginConfig) error { + return nil // TODO: +} + +func splitConfigRootFSFromTar(in io.ReadCloser, config *[]byte) io.ReadCloser { + pr, pw := io.Pipe() + go func() { + tarReader := tar.NewReader(in) + tarWriter := tar.NewWriter(pw) + defer in.Close() + + hasRootFS := false + + for { + hdr, err := tarReader.Next() + if err == io.EOF { + if !hasRootFS { + pw.CloseWithError(errors.Wrap(err, "no rootfs found")) + return + } + // Signals end of archive. + tarWriter.Close() + pw.Close() + return + } + if err != nil { + pw.CloseWithError(errors.Wrap(err, "failed to read from tar")) + return + } + + content := io.Reader(tarReader) + name := path.Clean(hdr.Name) + if path.IsAbs(name) { + name = name[1:] + } + if name == configFileName { + dt, err := ioutil.ReadAll(content) + if err != nil { + pw.CloseWithError(errors.Wrapf(err, "failed to read %s", configFileName)) + return + } + *config = dt + } + if parts := strings.Split(name, "/"); len(parts) != 0 && parts[0] == rootFSFileName { + hdr.Name = path.Clean(path.Join(parts[1:]...)) + if hdr.Typeflag == tar.TypeLink && strings.HasPrefix(strings.ToLower(hdr.Linkname), rootFSFileName+"/") { + hdr.Linkname = hdr.Linkname[len(rootFSFileName)+1:] + } + if err := tarWriter.WriteHeader(hdr); err != nil { + pw.CloseWithError(errors.Wrap(err, "error writing tar header")) + return + } + if _, err := pools.Copy(tarWriter, content); err != nil { + pw.CloseWithError(errors.Wrap(err, "error copying tar data")) + return + } + hasRootFS = true + } else { + io.Copy(ioutil.Discard, content) + } + } + }() + return pr +} diff --git a/vendor/github.com/moby/moby/plugin/backend_unsupported.go b/vendor/github.com/moby/moby/plugin/backend_unsupported.go new file mode 100644 index 000000000..e69bb883d --- /dev/null +++ b/vendor/github.com/moby/moby/plugin/backend_unsupported.go @@ -0,0 +1,72 @@ +// +build !linux + +package plugin + +import ( + "errors" + "io" + "net/http" + + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "golang.org/x/net/context" +) + +var errNotSupported = errors.New("plugins are not supported on this platform") + +// Disable deactivates a plugin, which implies that they cannot be used by containers. +func (pm *Manager) Disable(name string, config *types.PluginDisableConfig) error { + return errNotSupported +} + +// Enable activates a plugin, which implies that they are ready to be used by containers. +func (pm *Manager) Enable(name string, config *types.PluginEnableConfig) error { + return errNotSupported +} + +// Inspect examines a plugin config +func (pm *Manager) Inspect(refOrID string) (tp *types.Plugin, err error) { + return nil, errNotSupported +} + +// Privileges pulls a plugin config and computes the privileges required to install it. +func (pm *Manager) Privileges(ctx context.Context, ref reference.Named, metaHeader http.Header, authConfig *types.AuthConfig) (types.PluginPrivileges, error) { + return nil, errNotSupported +} + +// Pull pulls a plugin, check if the correct privileges are provided and install the plugin. +func (pm *Manager) Pull(ctx context.Context, ref reference.Named, name string, metaHeader http.Header, authConfig *types.AuthConfig, privileges types.PluginPrivileges, out io.Writer, opts ...CreateOpt) error { + return errNotSupported +} + +// Upgrade pulls a plugin, check if the correct privileges are provided and install the plugin. +func (pm *Manager) Upgrade(ctx context.Context, ref reference.Named, name string, metaHeader http.Header, authConfig *types.AuthConfig, privileges types.PluginPrivileges, outStream io.Writer) error { + return errNotSupported +} + +// List displays the list of plugins and associated metadata. +func (pm *Manager) List(pluginFilters filters.Args) ([]types.Plugin, error) { + return nil, errNotSupported +} + +// Push pushes a plugin to the store. +func (pm *Manager) Push(ctx context.Context, name string, metaHeader http.Header, authConfig *types.AuthConfig, out io.Writer) error { + return errNotSupported +} + +// Remove deletes plugin's root directory. +func (pm *Manager) Remove(name string, config *types.PluginRmConfig) error { + return errNotSupported +} + +// Set sets plugin args +func (pm *Manager) Set(name string, args []string) error { + return errNotSupported +} + +// CreateFromContext creates a plugin from the given pluginDir which contains +// both the rootfs and the config.json and a repoName with optional tag. +func (pm *Manager) CreateFromContext(ctx context.Context, tarCtx io.ReadCloser, options *types.PluginCreateOptions) error { + return errNotSupported +} diff --git a/vendor/github.com/moby/moby/plugin/blobstore.go b/vendor/github.com/moby/moby/plugin/blobstore.go new file mode 100644 index 000000000..2b79a4427 --- /dev/null +++ b/vendor/github.com/moby/moby/plugin/blobstore.go @@ -0,0 +1,184 @@ +package plugin + +import ( + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/distribution/xfer" + "github.com/docker/docker/image" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/chrootarchive" + "github.com/docker/docker/pkg/progress" + "github.com/opencontainers/go-digest" + "github.com/pkg/errors" + "golang.org/x/net/context" +) + +type blobstore interface { + New() (WriteCommitCloser, error) + Get(dgst digest.Digest) (io.ReadCloser, error) + Size(dgst digest.Digest) (int64, error) +} + +type basicBlobStore struct { + path string +} + +func newBasicBlobStore(p string) (*basicBlobStore, error) { + tmpdir := filepath.Join(p, "tmp") + if err := os.MkdirAll(tmpdir, 0700); err != nil { + return nil, errors.Wrapf(err, "failed to mkdir %v", p) + } + return &basicBlobStore{path: p}, nil +} + +func (b *basicBlobStore) New() (WriteCommitCloser, error) { + f, err := ioutil.TempFile(filepath.Join(b.path, "tmp"), ".insertion") + if err != nil { + return nil, errors.Wrap(err, "failed to create temp file") + } + return newInsertion(f), nil +} + +func (b *basicBlobStore) Get(dgst digest.Digest) (io.ReadCloser, error) { + return os.Open(filepath.Join(b.path, string(dgst.Algorithm()), dgst.Hex())) +} + +func (b *basicBlobStore) Size(dgst digest.Digest) (int64, error) { + stat, err := os.Stat(filepath.Join(b.path, string(dgst.Algorithm()), dgst.Hex())) + if err != nil { + return 0, err + } + return stat.Size(), nil +} + +func (b *basicBlobStore) gc(whitelist map[digest.Digest]struct{}) { + for _, alg := range []string{string(digest.Canonical)} { + items, err := ioutil.ReadDir(filepath.Join(b.path, alg)) + if err != nil { + continue + } + for _, fi := range items { + if _, exists := whitelist[digest.Digest(alg+":"+fi.Name())]; !exists { + p := filepath.Join(b.path, alg, fi.Name()) + err := os.RemoveAll(p) + logrus.Debugf("cleaned up blob %v: %v", p, err) + } + } + } + +} + +// WriteCommitCloser defines object that can be committed to blobstore. +type WriteCommitCloser interface { + io.WriteCloser + Commit() (digest.Digest, error) +} + +type insertion struct { + io.Writer + f *os.File + digester digest.Digester + closed bool +} + +func newInsertion(tempFile *os.File) *insertion { + digester := digest.Canonical.Digester() + return &insertion{f: tempFile, digester: digester, Writer: io.MultiWriter(tempFile, digester.Hash())} +} + +func (i *insertion) Commit() (digest.Digest, error) { + p := i.f.Name() + d := filepath.Join(filepath.Join(p, "../../")) + i.f.Sync() + defer os.RemoveAll(p) + if err := i.f.Close(); err != nil { + return "", err + } + i.closed = true + dgst := i.digester.Digest() + if err := os.MkdirAll(filepath.Join(d, string(dgst.Algorithm())), 0700); err != nil { + return "", errors.Wrapf(err, "failed to mkdir %v", d) + } + if err := os.Rename(p, filepath.Join(d, string(dgst.Algorithm()), dgst.Hex())); err != nil { + return "", errors.Wrapf(err, "failed to rename %v", p) + } + return dgst, nil +} + +func (i *insertion) Close() error { + if i.closed { + return nil + } + defer os.RemoveAll(i.f.Name()) + return i.f.Close() +} + +type downloadManager struct { + blobStore blobstore + tmpDir string + blobs []digest.Digest + configDigest digest.Digest +} + +func (dm *downloadManager) Download(ctx context.Context, initialRootFS image.RootFS, platform layer.Platform, layers []xfer.DownloadDescriptor, progressOutput progress.Output) (image.RootFS, func(), error) { + // TODO @jhowardmsft LCOW: May need revisiting. + for _, l := range layers { + b, err := dm.blobStore.New() + if err != nil { + return initialRootFS, nil, err + } + defer b.Close() + rc, _, err := l.Download(ctx, progressOutput) + if err != nil { + return initialRootFS, nil, errors.Wrap(err, "failed to download") + } + defer rc.Close() + r := io.TeeReader(rc, b) + inflatedLayerData, err := archive.DecompressStream(r) + if err != nil { + return initialRootFS, nil, err + } + digester := digest.Canonical.Digester() + if _, err := chrootarchive.ApplyLayer(dm.tmpDir, io.TeeReader(inflatedLayerData, digester.Hash())); err != nil { + return initialRootFS, nil, err + } + initialRootFS.Append(layer.DiffID(digester.Digest())) + d, err := b.Commit() + if err != nil { + return initialRootFS, nil, err + } + dm.blobs = append(dm.blobs, d) + } + return initialRootFS, nil, nil +} + +func (dm *downloadManager) Put(dt []byte) (digest.Digest, error) { + b, err := dm.blobStore.New() + if err != nil { + return "", err + } + defer b.Close() + n, err := b.Write(dt) + if err != nil { + return "", err + } + if n != len(dt) { + return "", io.ErrShortWrite + } + d, err := b.Commit() + dm.configDigest = d + return d, err +} + +func (dm *downloadManager) Get(d digest.Digest) ([]byte, error) { + return nil, fmt.Errorf("digest not found") +} +func (dm *downloadManager) RootFSAndPlatformFromConfig(c []byte) (*image.RootFS, layer.Platform, error) { + return configToRootFS(c) +} diff --git a/vendor/github.com/moby/moby/plugin/defs.go b/vendor/github.com/moby/moby/plugin/defs.go new file mode 100644 index 000000000..3e930de04 --- /dev/null +++ b/vendor/github.com/moby/moby/plugin/defs.go @@ -0,0 +1,37 @@ +package plugin + +import ( + "sync" + + "github.com/docker/docker/pkg/plugins" + "github.com/docker/docker/plugin/v2" +) + +// Store manages the plugin inventory in memory and on-disk +type Store struct { + sync.RWMutex + plugins map[string]*v2.Plugin + /* handlers are necessary for transition path of legacy plugins + * to the new model. Legacy plugins use Handle() for registering an + * activation callback.*/ + handlers map[string][]func(string, *plugins.Client) +} + +// NewStore creates a Store. +func NewStore() *Store { + return &Store{ + plugins: make(map[string]*v2.Plugin), + handlers: make(map[string][]func(string, *plugins.Client)), + } +} + +// CreateOpt is used to configure specific plugin details when created +type CreateOpt func(p *v2.Plugin) + +// WithSwarmService is a CreateOpt that flags the passed in a plugin as a plugin +// managed by swarm +func WithSwarmService(id string) CreateOpt { + return func(p *v2.Plugin) { + p.SwarmServiceID = id + } +} diff --git a/vendor/github.com/moby/moby/plugin/events.go b/vendor/github.com/moby/moby/plugin/events.go new file mode 100644 index 000000000..92e603850 --- /dev/null +++ b/vendor/github.com/moby/moby/plugin/events.go @@ -0,0 +1,111 @@ +package plugin + +import ( + "fmt" + "reflect" + + "github.com/docker/docker/api/types" +) + +// Event is emitted for actions performed on the plugin manager +type Event interface { + matches(Event) bool +} + +// EventCreate is an event which is emitted when a plugin is created +// This is either by pull or create from context. +// +// Use the `Interfaces` field to match only plugins that implement a specific +// interface. +// These are matched against using "or" logic. +// If no interfaces are listed, all are matched. +type EventCreate struct { + Interfaces map[string]bool + Plugin types.Plugin +} + +func (e EventCreate) matches(observed Event) bool { + oe, ok := observed.(EventCreate) + if !ok { + return false + } + if len(e.Interfaces) == 0 { + return true + } + + var ifaceMatch bool + for _, in := range oe.Plugin.Config.Interface.Types { + if e.Interfaces[in.Capability] { + ifaceMatch = true + break + } + } + return ifaceMatch +} + +// EventRemove is an event which is emitted when a plugin is removed +// It maches on the passed in plugin's ID only. +type EventRemove struct { + Plugin types.Plugin +} + +func (e EventRemove) matches(observed Event) bool { + oe, ok := observed.(EventRemove) + if !ok { + return false + } + return e.Plugin.ID == oe.Plugin.ID +} + +// EventDisable is an event that is emitted when a plugin is disabled +// It maches on the passed in plugin's ID only. +type EventDisable struct { + Plugin types.Plugin +} + +func (e EventDisable) matches(observed Event) bool { + oe, ok := observed.(EventDisable) + if !ok { + return false + } + return e.Plugin.ID == oe.Plugin.ID +} + +// EventEnable is an event that is emitted when a plugin is disabled +// It maches on the passed in plugin's ID only. +type EventEnable struct { + Plugin types.Plugin +} + +func (e EventEnable) matches(observed Event) bool { + oe, ok := observed.(EventEnable) + if !ok { + return false + } + return e.Plugin.ID == oe.Plugin.ID +} + +// SubscribeEvents provides an event channel to listen for structured events from +// the plugin manager actions, CRUD operations. +// The caller must call the returned `cancel()` function once done with the channel +// or this will leak resources. +func (pm *Manager) SubscribeEvents(buffer int, watchEvents ...Event) (eventCh <-chan interface{}, cancel func()) { + topic := func(i interface{}) bool { + observed, ok := i.(Event) + if !ok { + panic(fmt.Sprintf("unexpected type passed to event channel: %v", reflect.TypeOf(i))) + } + for _, e := range watchEvents { + if e.matches(observed) { + return true + } + } + // If no specific events are specified always assume a matched event + // If some events were specified and none matched above, then the event + // doesn't match + return watchEvents == nil + } + ch := pm.publisher.SubscribeTopicWithBuffer(topic, buffer) + cancelFunc := func() { pm.publisher.Evict(ch) } + return ch, cancelFunc +} diff --git a/vendor/github.com/moby/moby/plugin/manager.go b/vendor/github.com/moby/moby/plugin/manager.go new file mode 100644 index 000000000..fada0d667 --- /dev/null +++ b/vendor/github.com/moby/moby/plugin/manager.go @@ -0,0 +1,411 @@ +package plugin + +import ( + "encoding/json" + "io" + "io/ioutil" + "os" + "path/filepath" + "reflect" + "regexp" + "runtime" + "sort" + "strings" + "sync" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" + "github.com/docker/docker/image" + "github.com/docker/docker/layer" + "github.com/docker/docker/libcontainerd" + "github.com/docker/docker/pkg/authorization" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/mount" + "github.com/docker/docker/pkg/pubsub" + "github.com/docker/docker/pkg/system" + "github.com/docker/docker/plugin/v2" + "github.com/docker/docker/registry" + "github.com/opencontainers/go-digest" + "github.com/pkg/errors" +) + +const configFileName = "config.json" +const rootFSFileName = "rootfs" + +var validFullID = regexp.MustCompile(`^([a-f0-9]{64})$`) + +func (pm *Manager) restorePlugin(p *v2.Plugin) error { + if p.IsEnabled() { + return pm.restore(p) + } + return nil +} + +type eventLogger func(id, name, action string) + +// ManagerConfig defines configuration needed to start new manager. +type ManagerConfig struct { + Store *Store // remove + Executor libcontainerd.Remote + RegistryService registry.Service + LiveRestoreEnabled bool // TODO: remove + LogPluginEvent eventLogger + Root string + ExecRoot string + AuthzMiddleware *authorization.Middleware +} + +// Manager controls the plugin subsystem. +type Manager struct { + config ManagerConfig + mu sync.RWMutex // protects cMap + muGC sync.RWMutex // protects blobstore deletions + cMap map[*v2.Plugin]*controller + containerdClient libcontainerd.Client + blobStore *basicBlobStore + publisher *pubsub.Publisher +} + +// controller represents the manager's control on a plugin. +type controller struct { + restart bool + exitChan chan bool + timeoutInSecs int +} + +// pluginRegistryService ensures that all resolved repositories +// are of the plugin class. +type pluginRegistryService struct { + registry.Service +} + +func (s pluginRegistryService) ResolveRepository(name reference.Named) (repoInfo *registry.RepositoryInfo, err error) { + repoInfo, err = s.Service.ResolveRepository(name) + if repoInfo != nil { + repoInfo.Class = "plugin" + } + return +} + +// NewManager returns a new plugin manager. +func NewManager(config ManagerConfig) (*Manager, error) { + if config.RegistryService != nil { + config.RegistryService = pluginRegistryService{config.RegistryService} + } + manager := &Manager{ + config: config, + } + if err := os.MkdirAll(manager.config.Root, 0700); err != nil { + return nil, errors.Wrapf(err, "failed to mkdir %v", manager.config.Root) + } + if err := os.MkdirAll(manager.config.ExecRoot, 0700); err != nil { + return nil, errors.Wrapf(err, "failed to mkdir %v", manager.config.ExecRoot) + } + if err := os.MkdirAll(manager.tmpDir(), 0700); err != nil { + return nil, errors.Wrapf(err, "failed to mkdir %v", manager.tmpDir()) + } + var err error + manager.containerdClient, err = config.Executor.Client(manager) // todo: move to another struct + if err != nil { + return nil, errors.Wrap(err, "failed to create containerd client") + } + manager.blobStore, err = newBasicBlobStore(filepath.Join(manager.config.Root, "storage/blobs")) + if err != nil { + return nil, err + } + + manager.cMap = make(map[*v2.Plugin]*controller) + if err := manager.reload(); err != nil { + return nil, errors.Wrap(err, "failed to restore plugins") + } + + manager.publisher = pubsub.NewPublisher(0, 0) + return manager, nil +} + +func (pm *Manager) tmpDir() string { + return filepath.Join(pm.config.Root, "tmp") +} + +// StateChanged updates plugin internals using libcontainerd events. +func (pm *Manager) StateChanged(id string, e libcontainerd.StateInfo) error { + logrus.Debugf("plugin state changed %s %#v", id, e) + + switch e.State { + case libcontainerd.StateExit: + p, err := pm.config.Store.GetV2Plugin(id) + if err != nil { + return err + } + + os.RemoveAll(filepath.Join(pm.config.ExecRoot, id)) + + if p.PropagatedMount != "" { + if err := mount.Unmount(p.PropagatedMount); err != nil { + logrus.Warnf("Could not unmount %s: %v", p.PropagatedMount, err) + } + propRoot := filepath.Join(filepath.Dir(p.Rootfs), "propagated-mount") + if err := mount.Unmount(propRoot); err != nil { + logrus.Warn("Could not unmount %s: %v", propRoot, err) + } + } + + pm.mu.RLock() + c := pm.cMap[p] + if c.exitChan != nil { + close(c.exitChan) + } + restart := c.restart + pm.mu.RUnlock() + + if restart { + pm.enable(p, c, true) + } + } + + return nil +} + +func handleLoadError(err error, id string) { + if err == nil { + return + } + logger := logrus.WithError(err).WithField("id", id) + if os.IsNotExist(errors.Cause(err)) { + // Likely some error while removing on an older version of docker + logger.Warn("missing plugin config, skipping: this may be caused due to a failed remove and requires manual cleanup.") + return + } + logger.Error("error loading plugin, skipping") +} + +func (pm *Manager) reload() error { // todo: restore + dir, err := ioutil.ReadDir(pm.config.Root) + if err != nil { + return errors.Wrapf(err, "failed to read %v", pm.config.Root) + } + plugins := make(map[string]*v2.Plugin) + for _, v := range dir { + if validFullID.MatchString(v.Name()) { + p, err := pm.loadPlugin(v.Name()) + if err != nil { + handleLoadError(err, v.Name()) + continue + } + plugins[p.GetID()] = p + } else { + if validFullID.MatchString(strings.TrimSuffix(v.Name(), "-removing")) { + // There was likely some error while removing this plugin, let's try to remove again here + if err := system.EnsureRemoveAll(v.Name()); err != nil { + logrus.WithError(err).WithField("id", v.Name()).Warn("error while attempting to clean up previously removed plugin") + } + } + } + } + + pm.config.Store.SetAll(plugins) + + var wg sync.WaitGroup + wg.Add(len(plugins)) + for _, p := range plugins { + c := &controller{} // todo: remove this + pm.cMap[p] = c + go func(p *v2.Plugin) { + defer wg.Done() + if err := pm.restorePlugin(p); err != nil { + logrus.Errorf("failed to restore plugin '%s': %s", p.Name(), err) + return + } + + if p.Rootfs != "" { + p.Rootfs = filepath.Join(pm.config.Root, p.PluginObj.ID, "rootfs") + } + + // We should only enable rootfs propagation for certain plugin types that need it. + for _, typ := range p.PluginObj.Config.Interface.Types { + if (typ.Capability == "volumedriver" || typ.Capability == "graphdriver") && typ.Prefix == "docker" && strings.HasPrefix(typ.Version, "1.") { + if p.PluginObj.Config.PropagatedMount != "" { + propRoot := filepath.Join(filepath.Dir(p.Rootfs), "propagated-mount") + + // check if we need to migrate an older propagated mount from before + // these mounts were stored outside the plugin rootfs + if _, err := os.Stat(propRoot); os.IsNotExist(err) { + if _, err := os.Stat(p.PropagatedMount); err == nil { + // make sure nothing is mounted here + // don't care about errors + mount.Unmount(p.PropagatedMount) + if err := os.Rename(p.PropagatedMount, propRoot); err != nil { + logrus.WithError(err).WithField("dir", propRoot).Error("error migrating propagated mount storage") + } + if err := os.MkdirAll(p.PropagatedMount, 0755); err != nil { + logrus.WithError(err).WithField("dir", p.PropagatedMount).Error("error migrating propagated mount storage") + } + } + } + + if err := os.MkdirAll(propRoot, 0755); err != nil { + logrus.Errorf("failed to create PropagatedMount directory at %s: %v", propRoot, err) + } + // TODO: sanitize PropagatedMount and prevent breakout + p.PropagatedMount = filepath.Join(p.Rootfs, p.PluginObj.Config.PropagatedMount) + if err := os.MkdirAll(p.PropagatedMount, 0755); err != nil { + logrus.Errorf("failed to create PropagatedMount directory at %s: %v", p.PropagatedMount, err) + return + } + } + } + } + + pm.save(p) + requiresManualRestore := !pm.config.LiveRestoreEnabled && p.IsEnabled() + + if requiresManualRestore { + // if liveRestore is not enabled, the plugin will be stopped now so we should enable it + if err := pm.enable(p, c, true); err != nil { + logrus.Errorf("failed to enable plugin '%s': %s", p.Name(), err) + } + } + }(p) + } + wg.Wait() + return nil +} + +// Get looks up the requested plugin in the store. +func (pm *Manager) Get(idOrName string) (*v2.Plugin, error) { + return pm.config.Store.GetV2Plugin(idOrName) +} + +func (pm *Manager) loadPlugin(id string) (*v2.Plugin, error) { + p := filepath.Join(pm.config.Root, id, configFileName) + dt, err := ioutil.ReadFile(p) + if err != nil { + return nil, errors.Wrapf(err, "error reading %v", p) + } + var plugin v2.Plugin + if err := json.Unmarshal(dt, &plugin); err != nil { + return nil, errors.Wrapf(err, "error decoding %v", p) + } + return &plugin, nil +} + +func (pm *Manager) save(p *v2.Plugin) error { + pluginJSON, err := json.Marshal(p) + if err != nil { + return errors.Wrap(err, "failed to marshal plugin json") + } + if err := ioutils.AtomicWriteFile(filepath.Join(pm.config.Root, p.GetID(), configFileName), pluginJSON, 0600); err != nil { + return errors.Wrap(err, "failed to write atomically plugin json") + } + return nil +} + +// GC cleans up unreferenced blobs. This is recommended to run in a goroutine +func (pm *Manager) GC() { + pm.muGC.Lock() + defer pm.muGC.Unlock() + + whitelist := make(map[digest.Digest]struct{}) + for _, p := range pm.config.Store.GetAll() { + whitelist[p.Config] = struct{}{} + for _, b := range p.Blobsums { + whitelist[b] = struct{}{} + } + } + + pm.blobStore.gc(whitelist) +} + +type logHook struct{ id string } + +func (logHook) Levels() []logrus.Level { + return logrus.AllLevels +} + +func (l logHook) Fire(entry *logrus.Entry) error { + entry.Data = logrus.Fields{"plugin": l.id} + return nil +} + +func attachToLog(id string) func(libcontainerd.IOPipe) error { + return func(iop libcontainerd.IOPipe) error { + iop.Stdin.Close() + + logger := logrus.New() + logger.Hooks.Add(logHook{id}) + // TODO: cache writer per id + w := logger.Writer() + go func() { + io.Copy(w, iop.Stdout) + }() + go func() { + // TODO: update logrus and use logger.WriterLevel + io.Copy(w, iop.Stderr) + }() + return nil + } +} + +func validatePrivileges(requiredPrivileges, privileges types.PluginPrivileges) error { + if !isEqual(requiredPrivileges, privileges, isEqualPrivilege) { + return errors.New("incorrect privileges") + } + + return nil +} + +func isEqual(arrOne, arrOther types.PluginPrivileges, compare func(x, y types.PluginPrivilege) bool) bool { + if len(arrOne) != len(arrOther) { + return false + } + + sort.Sort(arrOne) + sort.Sort(arrOther) + + for i := 1; i < arrOne.Len(); i++ { + if !compare(arrOne[i], arrOther[i]) { + return false + } + } + + return true +} + +func isEqualPrivilege(a, b types.PluginPrivilege) bool { + if a.Name != b.Name { + return false + } + + return reflect.DeepEqual(a.Value, b.Value) +} + +func configToRootFS(c []byte) (*image.RootFS, layer.Platform, error) { + // TODO @jhowardmsft LCOW - Will need to revisit this. For now, calculate the platform. + platform := layer.Platform(runtime.GOOS) + if system.LCOWSupported() { + platform = "linux" + } + var pluginConfig types.PluginConfig + if err := json.Unmarshal(c, &pluginConfig); err != nil { + return nil, "", err + } + // validation for empty rootfs is in distribution code + if pluginConfig.Rootfs == nil { + return nil, platform, nil + } + + return rootFSFromPlugin(pluginConfig.Rootfs), platform, nil +} + +func rootFSFromPlugin(pluginfs *types.PluginConfigRootfs) *image.RootFS { + rootFS := image.RootFS{ + Type: pluginfs.Type, + DiffIDs: make([]layer.DiffID, len(pluginfs.DiffIds)), + } + for i := range pluginfs.DiffIds { + rootFS.DiffIDs[i] = layer.DiffID(pluginfs.DiffIds[i]) + } + + return &rootFS +} diff --git a/vendor/github.com/moby/moby/plugin/manager_linux.go b/vendor/github.com/moby/moby/plugin/manager_linux.go new file mode 100644 index 000000000..9b84af68d --- /dev/null +++ b/vendor/github.com/moby/moby/plugin/manager_linux.go @@ -0,0 +1,323 @@ +// +build linux + +package plugin + +import ( + "encoding/json" + "fmt" + "net" + "os" + "path/filepath" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/types" + "github.com/docker/docker/daemon/initlayer" + "github.com/docker/docker/libcontainerd" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/mount" + "github.com/docker/docker/pkg/plugins" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/plugin/v2" + "github.com/opencontainers/go-digest" + specs "github.com/opencontainers/runtime-spec/specs-go" + "github.com/pkg/errors" + "golang.org/x/sys/unix" +) + +func (pm *Manager) enable(p *v2.Plugin, c *controller, force bool) error { + p.Rootfs = filepath.Join(pm.config.Root, p.PluginObj.ID, "rootfs") + if p.IsEnabled() && !force { + return fmt.Errorf("plugin %s is already enabled", p.Name()) + } + spec, err := p.InitSpec(pm.config.ExecRoot) + if err != nil { + return err + } + + c.restart = true + c.exitChan = make(chan bool) + + pm.mu.Lock() + pm.cMap[p] = c + pm.mu.Unlock() + + var propRoot string + if p.PropagatedMount != "" { + propRoot = filepath.Join(filepath.Dir(p.Rootfs), "propagated-mount") + + if err := os.MkdirAll(propRoot, 0755); err != nil { + logrus.Errorf("failed to create PropagatedMount directory at %s: %v", propRoot, err) + } + + if err := mount.MakeRShared(propRoot); err != nil { + return errors.Wrap(err, "error setting up propagated mount dir") + } + + if err := mount.Mount(propRoot, p.PropagatedMount, "none", "rbind"); err != nil { + return errors.Wrap(err, "error creating mount for propagated mount") + } + } + + if err := initlayer.Setup(filepath.Join(pm.config.Root, p.PluginObj.ID, rootFSFileName), idtools.IDPair{0, 0}); err != nil { + return errors.WithStack(err) + } + + if err := pm.containerdClient.Create(p.GetID(), "", "", specs.Spec(*spec), attachToLog(p.GetID())); err != nil { + if p.PropagatedMount != "" { + if err := mount.Unmount(p.PropagatedMount); err != nil { + logrus.Warnf("Could not unmount %s: %v", p.PropagatedMount, err) + } + if err := mount.Unmount(propRoot); err != nil { + logrus.Warnf("Could not unmount %s: %v", propRoot, err) + } + } + return errors.WithStack(err) + } + + return pm.pluginPostStart(p, c) +} + +func (pm *Manager) pluginPostStart(p *v2.Plugin, c *controller) error { + sockAddr := filepath.Join(pm.config.ExecRoot, p.GetID(), p.GetSocket()) + client, err := plugins.NewClientWithTimeout("unix://"+sockAddr, nil, time.Duration(c.timeoutInSecs)*time.Second) + if err != nil { + c.restart = false + shutdownPlugin(p, c, pm.containerdClient) + return errors.WithStack(err) + } + + p.SetPClient(client) + + // Initial sleep before net Dial to allow plugin to listen on socket. + time.Sleep(500 * time.Millisecond) + maxRetries := 3 + var retries int + for { + // net dial into the unix socket to see if someone's listening. + conn, err := net.Dial("unix", sockAddr) + if err == nil { + conn.Close() + break + } + + time.Sleep(3 * time.Second) + retries++ + + if retries > maxRetries { + logrus.Debugf("error net dialing plugin: %v", err) + c.restart = false + // While restoring plugins, we need to explicitly set the state to disabled + pm.config.Store.SetState(p, false) + shutdownPlugin(p, c, pm.containerdClient) + return err + } + + } + pm.config.Store.SetState(p, true) + pm.config.Store.CallHandler(p) + + return pm.save(p) +} + +func (pm *Manager) restore(p *v2.Plugin) error { + if err := pm.containerdClient.Restore(p.GetID(), attachToLog(p.GetID())); err != nil { + return err + } + + if pm.config.LiveRestoreEnabled { + c := &controller{} + if pids, _ := pm.containerdClient.GetPidsForContainer(p.GetID()); len(pids) == 0 { + // plugin is not running, so follow normal startup procedure + return pm.enable(p, c, true) + } + + c.exitChan = make(chan bool) + c.restart = true + pm.mu.Lock() + pm.cMap[p] = c + pm.mu.Unlock() + return pm.pluginPostStart(p, c) + } + + return nil +} + +func shutdownPlugin(p *v2.Plugin, c *controller, containerdClient libcontainerd.Client) { + pluginID := p.GetID() + + err := containerdClient.Signal(pluginID, int(unix.SIGTERM)) + if err != nil { + logrus.Errorf("Sending SIGTERM to plugin failed with error: %v", err) + } else { + select { + case <-c.exitChan: + logrus.Debug("Clean shutdown of plugin") + case <-time.After(time.Second * 10): + logrus.Debug("Force shutdown plugin") + if err := containerdClient.Signal(pluginID, int(unix.SIGKILL)); err != nil { + logrus.Errorf("Sending SIGKILL to plugin failed with error: %v", err) + } + } + } +} + +func (pm *Manager) disable(p *v2.Plugin, c *controller) error { + if !p.IsEnabled() { + return fmt.Errorf("plugin %s is already disabled", p.Name()) + } + + c.restart = false + shutdownPlugin(p, c, pm.containerdClient) + pm.config.Store.SetState(p, false) + return pm.save(p) +} + +// Shutdown stops all plugins and called during daemon shutdown. +func (pm *Manager) Shutdown() { + plugins := pm.config.Store.GetAll() + for _, p := range plugins { + pm.mu.RLock() + c := pm.cMap[p] + pm.mu.RUnlock() + + if pm.config.LiveRestoreEnabled && p.IsEnabled() { + logrus.Debug("Plugin active when liveRestore is set, skipping shutdown") + continue + } + if pm.containerdClient != nil && p.IsEnabled() { + c.restart = false + shutdownPlugin(p, c, pm.containerdClient) + } + } +} + +func (pm *Manager) upgradePlugin(p *v2.Plugin, configDigest digest.Digest, blobsums []digest.Digest, tmpRootFSDir string, privileges *types.PluginPrivileges) (err error) { + config, err := pm.setupNewPlugin(configDigest, blobsums, privileges) + if err != nil { + return err + } + + pdir := filepath.Join(pm.config.Root, p.PluginObj.ID) + orig := filepath.Join(pdir, "rootfs") + + // Make sure nothing is mounted + // This could happen if the plugin was disabled with `-f` with active mounts. + // If there is anything in `orig` is still mounted, this should error out. + if err := mount.RecursiveUnmount(orig); err != nil { + return err + } + + backup := orig + "-old" + if err := os.Rename(orig, backup); err != nil { + return errors.Wrap(err, "error backing up plugin data before upgrade") + } + + defer func() { + if err != nil { + if rmErr := os.RemoveAll(orig); rmErr != nil && !os.IsNotExist(rmErr) { + logrus.WithError(rmErr).WithField("dir", backup).Error("error cleaning up after failed upgrade") + return + } + if mvErr := os.Rename(backup, orig); mvErr != nil { + err = errors.Wrap(mvErr, "error restoring old plugin root on upgrade failure") + } + if rmErr := os.RemoveAll(tmpRootFSDir); rmErr != nil && !os.IsNotExist(rmErr) { + logrus.WithError(rmErr).WithField("plugin", p.Name()).Errorf("error cleaning up plugin upgrade dir: %s", tmpRootFSDir) + } + } else { + if rmErr := os.RemoveAll(backup); rmErr != nil && !os.IsNotExist(rmErr) { + logrus.WithError(rmErr).WithField("dir", backup).Error("error cleaning up old plugin root after successful upgrade") + } + + p.Config = configDigest + p.Blobsums = blobsums + } + }() + + if err := os.Rename(tmpRootFSDir, orig); err != nil { + return errors.Wrap(err, "error upgrading") + } + + p.PluginObj.Config = config + err = pm.save(p) + return errors.Wrap(err, "error saving upgraded plugin config") +} + +func (pm *Manager) setupNewPlugin(configDigest digest.Digest, blobsums []digest.Digest, privileges *types.PluginPrivileges) (types.PluginConfig, error) { + configRC, err := pm.blobStore.Get(configDigest) + if err != nil { + return types.PluginConfig{}, err + } + defer configRC.Close() + + var config types.PluginConfig + dec := json.NewDecoder(configRC) + if err := dec.Decode(&config); err != nil { + return types.PluginConfig{}, errors.Wrapf(err, "failed to parse config") + } + if dec.More() { + return types.PluginConfig{}, errors.New("invalid config json") + } + + requiredPrivileges, err := computePrivileges(config) + if err != nil { + return types.PluginConfig{}, err + } + if privileges != nil { + if err := validatePrivileges(requiredPrivileges, *privileges); err != nil { + return types.PluginConfig{}, err + } + } + + return config, nil +} + +// createPlugin creates a new plugin. take lock before calling. +func (pm *Manager) createPlugin(name string, configDigest digest.Digest, blobsums []digest.Digest, rootFSDir string, privileges *types.PluginPrivileges, opts ...CreateOpt) (p *v2.Plugin, err error) { + if err := pm.config.Store.validateName(name); err != nil { // todo: this check is wrong. remove store + return nil, err + } + + config, err := pm.setupNewPlugin(configDigest, blobsums, privileges) + if err != nil { + return nil, err + } + + p = &v2.Plugin{ + PluginObj: types.Plugin{ + Name: name, + ID: stringid.GenerateRandomID(), + Config: config, + }, + Config: configDigest, + Blobsums: blobsums, + } + p.InitEmptySettings() + for _, o := range opts { + o(p) + } + + pdir := filepath.Join(pm.config.Root, p.PluginObj.ID) + if err := os.MkdirAll(pdir, 0700); err != nil { + return nil, errors.Wrapf(err, "failed to mkdir %v", pdir) + } + + defer func() { + if err != nil { + os.RemoveAll(pdir) + } + }() + + if err := os.Rename(rootFSDir, filepath.Join(pdir, rootFSFileName)); err != nil { + return nil, errors.Wrap(err, "failed to rename rootfs") + } + + if err := pm.save(p); err != nil { + return nil, err + } + + pm.config.Store.Add(p) // todo: remove + + return p, nil +} diff --git a/vendor/github.com/moby/moby/plugin/manager_solaris.go b/vendor/github.com/moby/moby/plugin/manager_solaris.go new file mode 100644 index 000000000..72ccae72d --- /dev/null +++ b/vendor/github.com/moby/moby/plugin/manager_solaris.go @@ -0,0 +1,28 @@ +package plugin + +import ( + "fmt" + + "github.com/docker/docker/plugin/v2" + specs "github.com/opencontainers/runtime-spec/specs-go" +) + +func (pm *Manager) enable(p *v2.Plugin, c *controller, force bool) error { + return fmt.Errorf("Not implemented") +} + +func (pm *Manager) initSpec(p *v2.Plugin) (*specs.Spec, error) { + return nil, fmt.Errorf("Not implemented") +} + +func (pm *Manager) disable(p *v2.Plugin, c *controller) error { + return fmt.Errorf("Not implemented") +} + +func (pm *Manager) restore(p *v2.Plugin) error { + return fmt.Errorf("Not implemented") +} + +// Shutdown plugins +func (pm *Manager) Shutdown() { +} diff --git a/vendor/github.com/moby/moby/plugin/manager_test.go b/vendor/github.com/moby/moby/plugin/manager_test.go new file mode 100644 index 000000000..4efe76b44 --- /dev/null +++ b/vendor/github.com/moby/moby/plugin/manager_test.go @@ -0,0 +1,55 @@ +package plugin + +import ( + "testing" + + "github.com/docker/docker/api/types" +) + +func TestValidatePrivileges(t *testing.T) { + testData := map[string]struct { + requiredPrivileges types.PluginPrivileges + privileges types.PluginPrivileges + result bool + }{ + "diff-len": { + requiredPrivileges: []types.PluginPrivilege{ + {Name: "Privilege1", Description: "Description", Value: []string{"abc", "def", "ghi"}}, + }, + privileges: []types.PluginPrivilege{ + {Name: "Privilege1", Description: "Description", Value: []string{"abc", "def", "ghi"}}, + {Name: "Privilege2", Description: "Description", Value: []string{"123", "456", "789"}}, + }, + result: false, + }, + "diff-value": { + requiredPrivileges: []types.PluginPrivilege{ + {Name: "Privilege1", Description: "Description", Value: []string{"abc", "def", "GHI"}}, + {Name: "Privilege2", Description: "Description", Value: []string{"123", "456", "***"}}, + }, + privileges: []types.PluginPrivilege{ + {Name: "Privilege1", Description: "Description", Value: []string{"abc", "def", "ghi"}}, + {Name: "Privilege2", Description: "Description", Value: []string{"123", "456", "789"}}, + }, + result: false, + }, + "diff-order-but-same-value": { + requiredPrivileges: []types.PluginPrivilege{ + {Name: "Privilege1", Description: "Description", Value: []string{"abc", "def", "GHI"}}, + {Name: "Privilege2", Description: "Description", Value: []string{"123", "456", "789"}}, + }, + privileges: []types.PluginPrivilege{ + {Name: "Privilege2", Description: "Description", Value: []string{"123", "456", "789"}}, + {Name: "Privilege1", Description: "Description", Value: []string{"GHI", "abc", "def"}}, + }, + result: true, + }, + } + + for key, data := range testData { + err := validatePrivileges(data.requiredPrivileges, data.privileges) + if (err == nil) != data.result { + t.Fatalf("Test item %s expected result to be %t, got %t", key, data.result, (err == nil)) + } + } +} diff --git a/vendor/github.com/moby/moby/plugin/manager_windows.go b/vendor/github.com/moby/moby/plugin/manager_windows.go new file mode 100644 index 000000000..4469a671f --- /dev/null +++ b/vendor/github.com/moby/moby/plugin/manager_windows.go @@ -0,0 +1,30 @@ +// +build windows + +package plugin + +import ( + "fmt" + + "github.com/docker/docker/plugin/v2" + specs "github.com/opencontainers/runtime-spec/specs-go" +) + +func (pm *Manager) enable(p *v2.Plugin, c *controller, force bool) error { + return fmt.Errorf("Not implemented") +} + +func (pm *Manager) initSpec(p *v2.Plugin) (*specs.Spec, error) { + return nil, fmt.Errorf("Not implemented") +} + +func (pm *Manager) disable(p *v2.Plugin, c *controller) error { + return fmt.Errorf("Not implemented") +} + +func (pm *Manager) restore(p *v2.Plugin) error { + return fmt.Errorf("Not implemented") +} + +// Shutdown plugins +func (pm *Manager) Shutdown() { +} diff --git a/vendor/github.com/moby/moby/plugin/store.go b/vendor/github.com/moby/moby/plugin/store.go new file mode 100644 index 000000000..7f6e954bf --- /dev/null +++ b/vendor/github.com/moby/moby/plugin/store.go @@ -0,0 +1,270 @@ +package plugin + +import ( + "fmt" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/reference" + "github.com/docker/docker/pkg/plugingetter" + "github.com/docker/docker/pkg/plugins" + "github.com/docker/docker/plugin/v2" + "github.com/pkg/errors" +) + +/* allowV1PluginsFallback determines daemon's support for V1 plugins. + * When the time comes to remove support for V1 plugins, flipping + * this bool is all that will be needed. + */ +const allowV1PluginsFallback bool = true + +/* defaultAPIVersion is the version of the plugin API for volume, network, + IPAM and authz. This is a very stable API. When we update this API, then + pluginType should include a version. e.g. "networkdriver/2.0". +*/ +const defaultAPIVersion string = "1.0" + +// ErrNotFound indicates that a plugin was not found locally. +type ErrNotFound string + +func (name ErrNotFound) Error() string { return fmt.Sprintf("plugin %q not found", string(name)) } + +// ErrAmbiguous indicates that more than one plugin was found +type ErrAmbiguous string + +func (name ErrAmbiguous) Error() string { + return fmt.Sprintf("multiple plugins found for %q", string(name)) +} + +// ErrDisabled indicates that a plugin was found but it is disabled +type ErrDisabled string + +func (name ErrDisabled) Error() string { + return fmt.Sprintf("plugin %s found but disabled", string(name)) +} + +// GetV2Plugin retrieves a plugin by name, id or partial ID. +func (ps *Store) GetV2Plugin(refOrID string) (*v2.Plugin, error) { + ps.RLock() + defer ps.RUnlock() + + id, err := ps.resolvePluginID(refOrID) + if err != nil { + return nil, err + } + + p, idOk := ps.plugins[id] + if !idOk { + return nil, errors.WithStack(ErrNotFound(id)) + } + + return p, nil +} + +// validateName returns error if name is already reserved. always call with lock and full name +func (ps *Store) validateName(name string) error { + for _, p := range ps.plugins { + if p.Name() == name { + return errors.Errorf("plugin %q already exists", name) + } + } + return nil +} + +// GetAll retrieves all plugins. +func (ps *Store) GetAll() map[string]*v2.Plugin { + ps.RLock() + defer ps.RUnlock() + return ps.plugins +} + +// SetAll initialized plugins during daemon restore. +func (ps *Store) SetAll(plugins map[string]*v2.Plugin) { + ps.Lock() + defer ps.Unlock() + ps.plugins = plugins +} + +func (ps *Store) getAllByCap(capability string) []plugingetter.CompatPlugin { + ps.RLock() + defer ps.RUnlock() + + result := make([]plugingetter.CompatPlugin, 0, 1) + for _, p := range ps.plugins { + if p.IsEnabled() { + if _, err := p.FilterByCap(capability); err == nil { + result = append(result, p) + } + } + } + return result +} + +// SetState sets the active state of the plugin and updates plugindb. +func (ps *Store) SetState(p *v2.Plugin, state bool) { + ps.Lock() + defer ps.Unlock() + + p.PluginObj.Enabled = state +} + +// Add adds a plugin to memory and plugindb. +// An error will be returned if there is a collision. +func (ps *Store) Add(p *v2.Plugin) error { + ps.Lock() + defer ps.Unlock() + + if v, exist := ps.plugins[p.GetID()]; exist { + return fmt.Errorf("plugin %q has the same ID %s as %q", p.Name(), p.GetID(), v.Name()) + } + ps.plugins[p.GetID()] = p + return nil +} + +// Remove removes a plugin from memory and plugindb. +func (ps *Store) Remove(p *v2.Plugin) { + ps.Lock() + delete(ps.plugins, p.GetID()) + ps.Unlock() +} + +// Get returns an enabled plugin matching the given name and capability. +func (ps *Store) Get(name, capability string, mode int) (plugingetter.CompatPlugin, error) { + var ( + p *v2.Plugin + err error + ) + + // Lookup using new model. + if ps != nil { + p, err = ps.GetV2Plugin(name) + if err == nil { + p.AddRefCount(mode) + if p.IsEnabled() { + return p.FilterByCap(capability) + } + // Plugin was found but it is disabled, so we should not fall back to legacy plugins + // but we should error out right away + return nil, ErrDisabled(name) + } + if _, ok := errors.Cause(err).(ErrNotFound); !ok { + return nil, err + } + } + + // Lookup using legacy model. + if allowV1PluginsFallback { + p, err := plugins.Get(name, capability) + if err != nil { + return nil, fmt.Errorf("legacy plugin: %v", err) + } + return p, nil + } + + return nil, err +} + +// GetAllManagedPluginsByCap returns a list of managed plugins matching the given capability. +func (ps *Store) GetAllManagedPluginsByCap(capability string) []plugingetter.CompatPlugin { + return ps.getAllByCap(capability) +} + +// GetAllByCap returns a list of enabled plugins matching the given capability. +func (ps *Store) GetAllByCap(capability string) ([]plugingetter.CompatPlugin, error) { + result := make([]plugingetter.CompatPlugin, 0, 1) + + /* Daemon start always calls plugin.Init thereby initializing a store. + * So store on experimental builds can never be nil, even while + * handling legacy plugins. However, there are legacy plugin unit + * tests where the volume subsystem directly talks with the plugin, + * bypassing the daemon. For such tests, this check is necessary. + */ + if ps != nil { + ps.RLock() + result = ps.getAllByCap(capability) + ps.RUnlock() + } + + // Lookup with legacy model + if allowV1PluginsFallback { + pl, err := plugins.GetAll(capability) + if err != nil { + return nil, fmt.Errorf("legacy plugin: %v", err) + } + for _, p := range pl { + result = append(result, p) + } + } + return result, nil +} + +// Handle sets a callback for a given capability. It is only used by network +// and ipam drivers during plugin registration. The callback registers the +// driver with the subsystem (network, ipam). +func (ps *Store) Handle(capability string, callback func(string, *plugins.Client)) { + pluginType := fmt.Sprintf("docker.%s/%s", strings.ToLower(capability), defaultAPIVersion) + + // Register callback with new plugin model. + ps.Lock() + handlers, ok := ps.handlers[pluginType] + if !ok { + handlers = []func(string, *plugins.Client){} + } + handlers = append(handlers, callback) + ps.handlers[pluginType] = handlers + ps.Unlock() + + // Register callback with legacy plugin model. + if allowV1PluginsFallback { + plugins.Handle(capability, callback) + } +} + +// CallHandler calls the registered callback. It is invoked during plugin enable. +func (ps *Store) CallHandler(p *v2.Plugin) { + for _, typ := range p.GetTypes() { + for _, handler := range ps.handlers[typ.String()] { + handler(p.Name(), p.Client()) + } + } +} + +func (ps *Store) resolvePluginID(idOrName string) (string, error) { + ps.RLock() // todo: fix + defer ps.RUnlock() + + if validFullID.MatchString(idOrName) { + return idOrName, nil + } + + ref, err := reference.ParseNormalizedNamed(idOrName) + if err != nil { + return "", errors.WithStack(ErrNotFound(idOrName)) + } + if _, ok := ref.(reference.Canonical); ok { + logrus.Warnf("canonical references cannot be resolved: %v", reference.FamiliarString(ref)) + return "", errors.WithStack(ErrNotFound(idOrName)) + } + + ref = reference.TagNameOnly(ref) + + for _, p := range ps.plugins { + if p.PluginObj.Name == reference.FamiliarString(ref) { + return p.PluginObj.ID, nil + } + } + + var found *v2.Plugin + for id, p := range ps.plugins { // this can be optimized + if strings.HasPrefix(id, idOrName) { + if found != nil { + return "", errors.WithStack(ErrAmbiguous(idOrName)) + } + found = p + } + } + if found == nil { + return "", errors.WithStack(ErrNotFound(idOrName)) + } + return found.PluginObj.ID, nil +} diff --git a/vendor/github.com/moby/moby/plugin/store_test.go b/vendor/github.com/moby/moby/plugin/store_test.go new file mode 100644 index 000000000..d3876daa3 --- /dev/null +++ b/vendor/github.com/moby/moby/plugin/store_test.go @@ -0,0 +1,33 @@ +package plugin + +import ( + "testing" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/plugin/v2" +) + +func TestFilterByCapNeg(t *testing.T) { + p := v2.Plugin{PluginObj: types.Plugin{Name: "test:latest"}} + iType := types.PluginInterfaceType{Capability: "volumedriver", Prefix: "docker", Version: "1.0"} + i := types.PluginConfigInterface{Socket: "plugins.sock", Types: []types.PluginInterfaceType{iType}} + p.PluginObj.Config.Interface = i + + _, err := p.FilterByCap("foobar") + if err == nil { + t.Fatalf("expected inadequate error, got %v", err) + } +} + +func TestFilterByCapPos(t *testing.T) { + p := v2.Plugin{PluginObj: types.Plugin{Name: "test:latest"}} + + iType := types.PluginInterfaceType{Capability: "volumedriver", Prefix: "docker", Version: "1.0"} + i := types.PluginConfigInterface{Socket: "plugins.sock", Types: []types.PluginInterfaceType{iType}} + p.PluginObj.Config.Interface = i + + _, err := p.FilterByCap("volumedriver") + if err != nil { + t.Fatalf("expected no error, got %v", err) + } +} diff --git a/vendor/github.com/moby/moby/plugin/v2/plugin.go b/vendor/github.com/moby/moby/plugin/v2/plugin.go new file mode 100644 index 000000000..b77536c98 --- /dev/null +++ b/vendor/github.com/moby/moby/plugin/v2/plugin.go @@ -0,0 +1,246 @@ +package v2 + +import ( + "fmt" + "strings" + "sync" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/pkg/plugingetter" + "github.com/docker/docker/pkg/plugins" + "github.com/opencontainers/go-digest" +) + +// Plugin represents an individual plugin. +type Plugin struct { + mu sync.RWMutex + PluginObj types.Plugin `json:"plugin"` // todo: embed struct + pClient *plugins.Client + refCount int + PropagatedMount string // TODO: make private + Rootfs string // TODO: make private + + Config digest.Digest + Blobsums []digest.Digest + + SwarmServiceID string +} + +const defaultPluginRuntimeDestination = "/run/docker/plugins" + +// ErrInadequateCapability indicates that the plugin did not have the requested capability. +type ErrInadequateCapability struct { + cap string +} + +func (e ErrInadequateCapability) Error() string { + return fmt.Sprintf("plugin does not provide %q capability", e.cap) +} + +// BasePath returns the path to which all paths returned by the plugin are relative to. +// For Plugin objects this returns the host path of the plugin container's rootfs. +func (p *Plugin) BasePath() string { + return p.Rootfs +} + +// Client returns the plugin client. +func (p *Plugin) Client() *plugins.Client { + p.mu.RLock() + defer p.mu.RUnlock() + + return p.pClient +} + +// SetPClient set the plugin client. +func (p *Plugin) SetPClient(client *plugins.Client) { + p.mu.Lock() + defer p.mu.Unlock() + + p.pClient = client +} + +// IsV1 returns true for V1 plugins and false otherwise. +func (p *Plugin) IsV1() bool { + return false +} + +// Name returns the plugin name. +func (p *Plugin) Name() string { + return p.PluginObj.Name +} + +// FilterByCap query the plugin for a given capability. +func (p *Plugin) FilterByCap(capability string) (*Plugin, error) { + capability = strings.ToLower(capability) + for _, typ := range p.PluginObj.Config.Interface.Types { + if typ.Capability == capability && typ.Prefix == "docker" { + return p, nil + } + } + return nil, ErrInadequateCapability{capability} +} + +// InitEmptySettings initializes empty settings for a plugin. +func (p *Plugin) InitEmptySettings() { + p.PluginObj.Settings.Mounts = make([]types.PluginMount, len(p.PluginObj.Config.Mounts)) + copy(p.PluginObj.Settings.Mounts, p.PluginObj.Config.Mounts) + p.PluginObj.Settings.Devices = make([]types.PluginDevice, len(p.PluginObj.Config.Linux.Devices)) + copy(p.PluginObj.Settings.Devices, p.PluginObj.Config.Linux.Devices) + p.PluginObj.Settings.Env = make([]string, 0, len(p.PluginObj.Config.Env)) + for _, env := range p.PluginObj.Config.Env { + if env.Value != nil { + p.PluginObj.Settings.Env = append(p.PluginObj.Settings.Env, fmt.Sprintf("%s=%s", env.Name, *env.Value)) + } + } + p.PluginObj.Settings.Args = make([]string, len(p.PluginObj.Config.Args.Value)) + copy(p.PluginObj.Settings.Args, p.PluginObj.Config.Args.Value) +} + +// Set is used to pass arguments to the plugin. +func (p *Plugin) Set(args []string) error { + p.mu.Lock() + defer p.mu.Unlock() + + if p.PluginObj.Enabled { + return fmt.Errorf("cannot set on an active plugin, disable plugin before setting") + } + + sets, err := newSettables(args) + if err != nil { + return err + } + + // TODO(vieux): lots of code duplication here, needs to be refactored. + +next: + for _, s := range sets { + // range over all the envs in the config + for _, env := range p.PluginObj.Config.Env { + // found the env in the config + if env.Name == s.name { + // is it settable ? + if ok, err := s.isSettable(allowedSettableFieldsEnv, env.Settable); err != nil { + return err + } else if !ok { + return fmt.Errorf("%q is not settable", s.prettyName()) + } + // is it, so lets update the settings in memory + updateSettingsEnv(&p.PluginObj.Settings.Env, &s) + continue next + } + } + + // range over all the mounts in the config + for _, mount := range p.PluginObj.Config.Mounts { + // found the mount in the config + if mount.Name == s.name { + // is it settable ? + if ok, err := s.isSettable(allowedSettableFieldsMounts, mount.Settable); err != nil { + return err + } else if !ok { + return fmt.Errorf("%q is not settable", s.prettyName()) + } + + // it is, so lets update the settings in memory + *mount.Source = s.value + continue next + } + } + + // range over all the devices in the config + for _, device := range p.PluginObj.Config.Linux.Devices { + // found the device in the config + if device.Name == s.name { + // is it settable ? + if ok, err := s.isSettable(allowedSettableFieldsDevices, device.Settable); err != nil { + return err + } else if !ok { + return fmt.Errorf("%q is not settable", s.prettyName()) + } + + // it is, so lets update the settings in memory + *device.Path = s.value + continue next + } + } + + // found the name in the config + if p.PluginObj.Config.Args.Name == s.name { + // is it settable ? + if ok, err := s.isSettable(allowedSettableFieldsArgs, p.PluginObj.Config.Args.Settable); err != nil { + return err + } else if !ok { + return fmt.Errorf("%q is not settable", s.prettyName()) + } + + // it is, so lets update the settings in memory + p.PluginObj.Settings.Args = strings.Split(s.value, " ") + continue next + } + + return fmt.Errorf("setting %q not found in the plugin configuration", s.name) + } + + return nil +} + +// IsEnabled returns the active state of the plugin. +func (p *Plugin) IsEnabled() bool { + p.mu.RLock() + defer p.mu.RUnlock() + + return p.PluginObj.Enabled +} + +// GetID returns the plugin's ID. +func (p *Plugin) GetID() string { + p.mu.RLock() + defer p.mu.RUnlock() + + return p.PluginObj.ID +} + +// GetSocket returns the plugin socket. +func (p *Plugin) GetSocket() string { + p.mu.RLock() + defer p.mu.RUnlock() + + return p.PluginObj.Config.Interface.Socket +} + +// GetTypes returns the interface types of a plugin. +func (p *Plugin) GetTypes() []types.PluginInterfaceType { + p.mu.RLock() + defer p.mu.RUnlock() + + return p.PluginObj.Config.Interface.Types +} + +// GetRefCount returns the reference count. +func (p *Plugin) GetRefCount() int { + p.mu.RLock() + defer p.mu.RUnlock() + + return p.refCount +} + +// AddRefCount adds to reference count. +func (p *Plugin) AddRefCount(count int) { + p.mu.Lock() + defer p.mu.Unlock() + + p.refCount += count +} + +// Acquire increments the plugin's reference count +// This should be followed up by `Release()` when the plugin is no longer in use. +func (p *Plugin) Acquire() { + p.AddRefCount(plugingetter.Acquire) +} + +// Release decrements the plugin's reference count +// This should only be called when the plugin is no longer in use, e.g. with +// via `Acquire()` or getter.Get("name", "type", plugingetter.Acquire) +func (p *Plugin) Release() { + p.AddRefCount(plugingetter.Release) +} diff --git a/vendor/github.com/moby/moby/plugin/v2/plugin_linux.go b/vendor/github.com/moby/moby/plugin/v2/plugin_linux.go new file mode 100644 index 000000000..9cae180e3 --- /dev/null +++ b/vendor/github.com/moby/moby/plugin/v2/plugin_linux.go @@ -0,0 +1,132 @@ +// +build linux + +package v2 + +import ( + "os" + "path/filepath" + "runtime" + "strings" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/oci" + "github.com/docker/docker/pkg/system" + specs "github.com/opencontainers/runtime-spec/specs-go" + "github.com/pkg/errors" +) + +// InitSpec creates an OCI spec from the plugin's config. +func (p *Plugin) InitSpec(execRoot string) (*specs.Spec, error) { + s := oci.DefaultSpec() + s.Root = specs.Root{ + Path: p.Rootfs, + Readonly: false, // TODO: all plugins should be readonly? settable in config? + } + + userMounts := make(map[string]struct{}, len(p.PluginObj.Settings.Mounts)) + for _, m := range p.PluginObj.Settings.Mounts { + userMounts[m.Destination] = struct{}{} + } + + execRoot = filepath.Join(execRoot, p.PluginObj.ID) + if err := os.MkdirAll(execRoot, 0700); err != nil { + return nil, errors.WithStack(err) + } + + mounts := append(p.PluginObj.Config.Mounts, types.PluginMount{ + Source: &execRoot, + Destination: defaultPluginRuntimeDestination, + Type: "bind", + Options: []string{"rbind", "rshared"}, + }) + + if p.PluginObj.Config.Network.Type != "" { + // TODO: if net == bridge, use libnetwork controller to create a new plugin-specific bridge, bind mount /etc/hosts and /etc/resolv.conf look at the docker code (allocateNetwork, initialize) + if p.PluginObj.Config.Network.Type == "host" { + oci.RemoveNamespace(&s, specs.LinuxNamespaceType("network")) + } + etcHosts := "/etc/hosts" + resolvConf := "/etc/resolv.conf" + mounts = append(mounts, + types.PluginMount{ + Source: &etcHosts, + Destination: etcHosts, + Type: "bind", + Options: []string{"rbind", "ro"}, + }, + types.PluginMount{ + Source: &resolvConf, + Destination: resolvConf, + Type: "bind", + Options: []string{"rbind", "ro"}, + }) + } + if p.PluginObj.Config.PidHost { + oci.RemoveNamespace(&s, specs.LinuxNamespaceType("pid")) + } + + if p.PluginObj.Config.IpcHost { + oci.RemoveNamespace(&s, specs.LinuxNamespaceType("ipc")) + } + + for _, mnt := range mounts { + m := specs.Mount{ + Destination: mnt.Destination, + Type: mnt.Type, + Options: mnt.Options, + } + if mnt.Source == nil { + return nil, errors.New("mount source is not specified") + } + m.Source = *mnt.Source + s.Mounts = append(s.Mounts, m) + } + + for i, m := range s.Mounts { + if strings.HasPrefix(m.Destination, "/dev/") { + if _, ok := userMounts[m.Destination]; ok { + s.Mounts = append(s.Mounts[:i], s.Mounts[i+1:]...) + } + } + } + + if p.PluginObj.Config.PropagatedMount != "" { + p.PropagatedMount = filepath.Join(p.Rootfs, p.PluginObj.Config.PropagatedMount) + s.Linux.RootfsPropagation = "rshared" + } + + if p.PluginObj.Config.Linux.AllowAllDevices { + s.Linux.Resources.Devices = []specs.LinuxDeviceCgroup{{Allow: true, Access: "rwm"}} + } + for _, dev := range p.PluginObj.Settings.Devices { + path := *dev.Path + d, dPermissions, err := oci.DevicesFromPath(path, path, "rwm") + if err != nil { + return nil, errors.WithStack(err) + } + s.Linux.Devices = append(s.Linux.Devices, d...) + s.Linux.Resources.Devices = append(s.Linux.Resources.Devices, dPermissions...) + } + + envs := make([]string, 1, len(p.PluginObj.Settings.Env)+1) + envs[0] = "PATH=" + system.DefaultPathEnv(runtime.GOOS) + envs = append(envs, p.PluginObj.Settings.Env...) + + args := append(p.PluginObj.Config.Entrypoint, p.PluginObj.Settings.Args...) + cwd := p.PluginObj.Config.WorkDir + if len(cwd) == 0 { + cwd = "/" + } + s.Process.Terminal = false + s.Process.Args = args + s.Process.Cwd = cwd + s.Process.Env = envs + + caps := s.Process.Capabilities + caps.Bounding = append(caps.Bounding, p.PluginObj.Config.Linux.Capabilities...) + caps.Permitted = append(caps.Permitted, p.PluginObj.Config.Linux.Capabilities...) + caps.Inheritable = append(caps.Inheritable, p.PluginObj.Config.Linux.Capabilities...) + caps.Effective = append(caps.Effective, p.PluginObj.Config.Linux.Capabilities...) + + return &s, nil +} diff --git a/vendor/github.com/moby/moby/plugin/v2/plugin_unsupported.go b/vendor/github.com/moby/moby/plugin/v2/plugin_unsupported.go new file mode 100644 index 000000000..e60fb8311 --- /dev/null +++ b/vendor/github.com/moby/moby/plugin/v2/plugin_unsupported.go @@ -0,0 +1,14 @@ +// +build !linux + +package v2 + +import ( + "errors" + + specs "github.com/opencontainers/runtime-spec/specs-go" +) + +// InitSpec creates an OCI spec from the plugin's config. +func (p *Plugin) InitSpec(execRoot string) (*specs.Spec, error) { + return nil, errors.New("not supported") +} diff --git a/vendor/github.com/moby/moby/plugin/v2/settable.go b/vendor/github.com/moby/moby/plugin/v2/settable.go new file mode 100644 index 000000000..79c6befc2 --- /dev/null +++ b/vendor/github.com/moby/moby/plugin/v2/settable.go @@ -0,0 +1,102 @@ +package v2 + +import ( + "errors" + "fmt" + "strings" +) + +type settable struct { + name string + field string + value string +} + +var ( + allowedSettableFieldsEnv = []string{"value"} + allowedSettableFieldsArgs = []string{"value"} + allowedSettableFieldsDevices = []string{"path"} + allowedSettableFieldsMounts = []string{"source"} + + errMultipleFields = errors.New("multiple fields are settable, one must be specified") + errInvalidFormat = errors.New("invalid format, must be [.][=]") +) + +func newSettables(args []string) ([]settable, error) { + sets := make([]settable, 0, len(args)) + for _, arg := range args { + set, err := newSettable(arg) + if err != nil { + return nil, err + } + sets = append(sets, set) + } + return sets, nil +} + +func newSettable(arg string) (settable, error) { + var set settable + if i := strings.Index(arg, "="); i == 0 { + return set, errInvalidFormat + } else if i < 0 { + set.name = arg + } else { + set.name = arg[:i] + set.value = arg[i+1:] + } + + if i := strings.LastIndex(set.name, "."); i > 0 { + set.field = set.name[i+1:] + set.name = arg[:i] + } + + return set, nil +} + +// prettyName return name.field if there is a field, otherwise name. +func (set *settable) prettyName() string { + if set.field != "" { + return fmt.Sprintf("%s.%s", set.name, set.field) + } + return set.name +} + +func (set *settable) isSettable(allowedSettableFields []string, settable []string) (bool, error) { + if set.field == "" { + if len(settable) == 1 { + // if field is not specified and there only one settable, default to it. + set.field = settable[0] + } else if len(settable) > 1 { + return false, errMultipleFields + } + } + + isAllowed := false + for _, allowedSettableField := range allowedSettableFields { + if set.field == allowedSettableField { + isAllowed = true + break + } + } + + if isAllowed { + for _, settableField := range settable { + if set.field == settableField { + return true, nil + } + } + } + + return false, nil +} + +func updateSettingsEnv(env *[]string, set *settable) { + for i, e := range *env { + if parts := strings.SplitN(e, "=", 2); parts[0] == set.name { + (*env)[i] = fmt.Sprintf("%s=%s", set.name, set.value) + return + } + } + + *env = append(*env, fmt.Sprintf("%s=%s", set.name, set.value)) +} diff --git a/vendor/github.com/moby/moby/plugin/v2/settable_test.go b/vendor/github.com/moby/moby/plugin/v2/settable_test.go new file mode 100644 index 000000000..1094c472b --- /dev/null +++ b/vendor/github.com/moby/moby/plugin/v2/settable_test.go @@ -0,0 +1,91 @@ +package v2 + +import ( + "reflect" + "testing" +) + +func TestNewSettable(t *testing.T) { + contexts := []struct { + arg string + name string + field string + value string + err error + }{ + {"name=value", "name", "", "value", nil}, + {"name", "name", "", "", nil}, + {"name.field=value", "name", "field", "value", nil}, + {"name.field", "name", "field", "", nil}, + {"=value", "", "", "", errInvalidFormat}, + {"=", "", "", "", errInvalidFormat}, + } + + for _, c := range contexts { + s, err := newSettable(c.arg) + if err != c.err { + t.Fatalf("expected error to be %v, got %v", c.err, err) + } + + if s.name != c.name { + t.Fatalf("expected name to be %q, got %q", c.name, s.name) + } + + if s.field != c.field { + t.Fatalf("expected field to be %q, got %q", c.field, s.field) + } + + if s.value != c.value { + t.Fatalf("expected value to be %q, got %q", c.value, s.value) + } + + } +} + +func TestIsSettable(t *testing.T) { + contexts := []struct { + allowedSettableFields []string + set settable + settable []string + result bool + err error + }{ + {allowedSettableFieldsEnv, settable{}, []string{}, false, nil}, + {allowedSettableFieldsEnv, settable{field: "value"}, []string{}, false, nil}, + {allowedSettableFieldsEnv, settable{}, []string{"value"}, true, nil}, + {allowedSettableFieldsEnv, settable{field: "value"}, []string{"value"}, true, nil}, + {allowedSettableFieldsEnv, settable{field: "foo"}, []string{"value"}, false, nil}, + {allowedSettableFieldsEnv, settable{field: "foo"}, []string{"foo"}, false, nil}, + {allowedSettableFieldsEnv, settable{}, []string{"value1", "value2"}, false, errMultipleFields}, + } + + for _, c := range contexts { + if res, err := c.set.isSettable(c.allowedSettableFields, c.settable); res != c.result { + t.Fatalf("expected result to be %t, got %t", c.result, res) + } else if err != c.err { + t.Fatalf("expected error to be %v, got %v", c.err, err) + } + } +} + +func TestUpdateSettingsEnv(t *testing.T) { + contexts := []struct { + env []string + set settable + newEnv []string + }{ + {[]string{}, settable{name: "DEBUG", value: "1"}, []string{"DEBUG=1"}}, + {[]string{"DEBUG=0"}, settable{name: "DEBUG", value: "1"}, []string{"DEBUG=1"}}, + {[]string{"FOO=0"}, settable{name: "DEBUG", value: "1"}, []string{"FOO=0", "DEBUG=1"}}, + {[]string{"FOO=0", "DEBUG=0"}, settable{name: "DEBUG", value: "1"}, []string{"FOO=0", "DEBUG=1"}}, + {[]string{"FOO=0", "DEBUG=0", "BAR=1"}, settable{name: "DEBUG", value: "1"}, []string{"FOO=0", "DEBUG=1", "BAR=1"}}, + } + + for _, c := range contexts { + updateSettingsEnv(&c.env, &c.set) + + if !reflect.DeepEqual(c.env, c.newEnv) { + t.Fatalf("expected env to be %q, got %q", c.newEnv, c.env) + } + } +} diff --git a/vendor/github.com/moby/moby/poule.yml b/vendor/github.com/moby/moby/poule.yml new file mode 100644 index 000000000..2abf0df7f --- /dev/null +++ b/vendor/github.com/moby/moby/poule.yml @@ -0,0 +1,131 @@ +# Add a "status/0-triage" to every newly opened pull request. +- triggers: + pull_request: [ opened ] + operations: + - type: label + filters: { + ~labels: [ "status/0-triage", "status/1-design-review", "status/2-code-review", "status/3-docs-review", "status/4-merge" ], + } + settings: { + patterns: { + status/0-triage: [ ".*" ], + } + } + +# For every newly created or modified issue, assign label based on matching regexp using the `label` +# operation, as well as an Engine-specific version label using `version-label`. +- triggers: + issues: [ edited, opened, reopened ] + operations: + - type: label + settings: { + patterns: { + area/builder: [ "dockerfile", "docker build" ], + area/distribution: [ "docker login", "docker logout", "docker pull", "docker push", "docker search" ], + area/plugins: [ "docker plugin" ], + area/networking: [ "docker network", "ipvs", "vxlan" ], + area/runtime: [ "oci runtime error" ], + area/security/trust: [ "docker_content_trust" ], + area/swarm: [ "docker node", "docker swarm", "docker service create", "docker service inspect", "docker service logs", "docker service ls", "docker service ps", "docker service rm", "docker service scale", "docker service update" ], + platform/desktop: [ "docker for mac", "docker for windows" ], + platform/freebsd: [ "freebsd" ], + platform/windows: [ "nanoserver", "windowsservercore", "windows server" ], + platform/arm: [ "raspberry", "raspbian", "rpi", "beaglebone", "pine64" ], + } + } + - type: version-label + +# Labeling a PR with `rebuild/` triggers a rebuild job for the associated +# configuration. The label is automatically removed after the rebuild is initiated. There's no such +# thing as "templating" in this configuration, so we need one operation for each type of +# configuration that can be triggered. +- triggers: + pull_request: [ labeled ] + operations: + - type: rebuild + settings: { + # When configurations are empty, the `rebuild` operation rebuilds all the currently + # known statuses for that pull request. + configurations: [], + label: "rebuild/*", + } + - type: rebuild + settings: { + configurations: [ arm ], + label: "rebuild/arm", + } + - type: rebuild + settings: { + configurations: [ experimental ], + label: "rebuild/experimental", + } + - type: rebuild + settings: { + configurations: [ janky ], + label: "rebuild/janky", + } + - type: rebuild + settings: { + configurations: [ powerpc ], + label: "rebuild/powerpc", + } + - type: rebuild + settings: { + configurations: [ userns ], + label: "rebuild/userns", + } + - type: rebuild + settings: { + configurations: [ vendor ], + label: "rebuild/vendor", + } + - type: rebuild + settings: { + configurations: [ win2lin ], + label: "rebuild/win2lin", + } + - type: rebuild + settings: { + configurations: [ windowsRS1 ], + label: "rebuild/windowsRS1", + } + - type: rebuild + settings: { + configurations: [ z ], + label: "rebuild/z", + } + +# Once a day, randomly assign pull requests older than 2 weeks. +- schedule: "@daily" + operations: + - type: random-assign + filters: { + age: "2w", + is: "pr", + } + settings: { + users: [ + "aaronlehmann", + "akihirosuda", + "aluzzardi", + "coolljt0725", + "cpuguy83", + "crosbymichael", + "dnephin", + "duglin", + "ehazlett", + "johnstep", + "justincormack", + "lk4d4", + "mhbauer", + "mlaventure", + "runcom", + "stevvooe", + "thajeztah", + "tiborvass", + "tonistiigi", + "vdemeester", + "vieux", + "yongtang", + ] + } diff --git a/vendor/github.com/moby/moby/profiles/apparmor/apparmor.go b/vendor/github.com/moby/moby/profiles/apparmor/apparmor.go new file mode 100644 index 000000000..48b41c5b2 --- /dev/null +++ b/vendor/github.com/moby/moby/profiles/apparmor/apparmor.go @@ -0,0 +1,114 @@ +// +build linux + +package apparmor + +import ( + "bufio" + "io" + "io/ioutil" + "os" + "path" + "strings" + + "github.com/docker/docker/pkg/aaparser" + "github.com/docker/docker/pkg/templates" +) + +var ( + // profileDirectory is the file store for apparmor profiles and macros. + profileDirectory = "/etc/apparmor.d" +) + +// profileData holds information about the given profile for generation. +type profileData struct { + // Name is profile name. + Name string + // Imports defines the apparmor functions to import, before defining the profile. + Imports []string + // InnerImports defines the apparmor functions to import in the profile. + InnerImports []string + // Version is the {major, minor, patch} version of apparmor_parser as a single number. + Version int +} + +// generateDefault creates an apparmor profile from ProfileData. +func (p *profileData) generateDefault(out io.Writer) error { + compiled, err := templates.NewParse("apparmor_profile", baseTemplate) + if err != nil { + return err + } + + if macroExists("tunables/global") { + p.Imports = append(p.Imports, "#include ") + } else { + p.Imports = append(p.Imports, "@{PROC}=/proc/") + } + + if macroExists("abstractions/base") { + p.InnerImports = append(p.InnerImports, "#include ") + } + + ver, err := aaparser.GetVersion() + if err != nil { + return err + } + p.Version = ver + + return compiled.Execute(out, p) +} + +// macrosExists checks if the passed macro exists. +func macroExists(m string) bool { + _, err := os.Stat(path.Join(profileDirectory, m)) + return err == nil +} + +// InstallDefault generates a default profile in a temp directory determined by +// os.TempDir(), then loads the profile into the kernel using 'apparmor_parser'. +func InstallDefault(name string) error { + p := profileData{ + Name: name, + } + + // Install to a temporary directory. + f, err := ioutil.TempFile("", name) + if err != nil { + return err + } + profilePath := f.Name() + + defer f.Close() + defer os.Remove(profilePath) + + if err := p.generateDefault(f); err != nil { + return err + } + + return aaparser.LoadProfile(profilePath) +} + +// IsLoaded checks if a profile with the given name has been loaded into the +// kernel. +func IsLoaded(name string) (bool, error) { + file, err := os.Open("/sys/kernel/security/apparmor/profiles") + if err != nil { + return false, err + } + defer file.Close() + + r := bufio.NewReader(file) + for { + p, err := r.ReadString('\n') + if err == io.EOF { + break + } + if err != nil { + return false, err + } + if strings.HasPrefix(p, name+" ") { + return true, nil + } + } + + return false, nil +} diff --git a/vendor/github.com/moby/moby/profiles/apparmor/template.go b/vendor/github.com/moby/moby/profiles/apparmor/template.go new file mode 100644 index 000000000..c5ea4584d --- /dev/null +++ b/vendor/github.com/moby/moby/profiles/apparmor/template.go @@ -0,0 +1,46 @@ +// +build linux + +package apparmor + +// baseTemplate defines the default apparmor profile for containers. +const baseTemplate = ` +{{range $value := .Imports}} +{{$value}} +{{end}} + +profile {{.Name}} flags=(attach_disconnected,mediate_deleted) { +{{range $value := .InnerImports}} + {{$value}} +{{end}} + + network, + capability, + file, + umount, + + deny @{PROC}/* w, # deny write for all files directly in /proc (not in a subdir) + # deny write to files not in /proc//** or /proc/sys/** + deny @{PROC}/{[^1-9],[^1-9][^0-9],[^1-9s][^0-9y][^0-9s],[^1-9][^0-9][^0-9][^0-9]*}/** w, + deny @{PROC}/sys/[^k]** w, # deny /proc/sys except /proc/sys/k* (effectively /proc/sys/kernel) + deny @{PROC}/sys/kernel/{?,??,[^s][^h][^m]**} w, # deny everything except shm* in /proc/sys/kernel/ + deny @{PROC}/sysrq-trigger rwklx, + deny @{PROC}/mem rwklx, + deny @{PROC}/kmem rwklx, + deny @{PROC}/kcore rwklx, + + deny mount, + + deny /sys/[^f]*/** wklx, + deny /sys/f[^s]*/** wklx, + deny /sys/fs/[^c]*/** wklx, + deny /sys/fs/c[^g]*/** wklx, + deny /sys/fs/cg[^r]*/** wklx, + deny /sys/firmware/** rwklx, + deny /sys/kernel/security/** rwklx, + +{{if ge .Version 208095}} + # suppress ptrace denials when using 'docker ps' or using 'ps' inside a container + ptrace (trace,read) peer={{.Name}}, +{{end}} +} +` diff --git a/vendor/github.com/moby/moby/profiles/seccomp/default.json b/vendor/github.com/moby/moby/profiles/seccomp/default.json new file mode 100755 index 000000000..b71a8718a --- /dev/null +++ b/vendor/github.com/moby/moby/profiles/seccomp/default.json @@ -0,0 +1,750 @@ +{ + "defaultAction": "SCMP_ACT_ERRNO", + "archMap": [ + { + "architecture": "SCMP_ARCH_X86_64", + "subArchitectures": [ + "SCMP_ARCH_X86", + "SCMP_ARCH_X32" + ] + }, + { + "architecture": "SCMP_ARCH_AARCH64", + "subArchitectures": [ + "SCMP_ARCH_ARM" + ] + }, + { + "architecture": "SCMP_ARCH_MIPS64", + "subArchitectures": [ + "SCMP_ARCH_MIPS", + "SCMP_ARCH_MIPS64N32" + ] + }, + { + "architecture": "SCMP_ARCH_MIPS64N32", + "subArchitectures": [ + "SCMP_ARCH_MIPS", + "SCMP_ARCH_MIPS64" + ] + }, + { + "architecture": "SCMP_ARCH_MIPSEL64", + "subArchitectures": [ + "SCMP_ARCH_MIPSEL", + "SCMP_ARCH_MIPSEL64N32" + ] + }, + { + "architecture": "SCMP_ARCH_MIPSEL64N32", + "subArchitectures": [ + "SCMP_ARCH_MIPSEL", + "SCMP_ARCH_MIPSEL64" + ] + }, + { + "architecture": "SCMP_ARCH_S390X", + "subArchitectures": [ + "SCMP_ARCH_S390" + ] + } + ], + "syscalls": [ + { + "names": [ + "accept", + "accept4", + "access", + "adjtimex", + "alarm", + "alarm", + "bind", + "brk", + "capget", + "capset", + "chdir", + "chmod", + "chown", + "chown32", + "clock_getres", + "clock_gettime", + "clock_nanosleep", + "close", + "connect", + "copy_file_range", + "creat", + "dup", + "dup2", + "dup3", + "epoll_create", + "epoll_create1", + "epoll_ctl", + "epoll_ctl_old", + "epoll_pwait", + "epoll_wait", + "epoll_wait_old", + "eventfd", + "eventfd2", + "execve", + "execveat", + "exit", + "exit_group", + "faccessat", + "fadvise64", + "fadvise64_64", + "fallocate", + "fanotify_mark", + "fchdir", + "fchmod", + "fchmodat", + "fchown", + "fchown32", + "fchownat", + "fcntl", + "fcntl64", + "fdatasync", + "fgetxattr", + "flistxattr", + "flock", + "fork", + "fremovexattr", + "fsetxattr", + "fstat", + "fstat64", + "fstatat64", + "fstatfs", + "fstatfs64", + "fsync", + "ftruncate", + "ftruncate64", + "futex", + "futimesat", + "getcpu", + "getcwd", + "getdents", + "getdents64", + "getegid", + "getegid32", + "geteuid", + "geteuid32", + "getgid", + "getgid32", + "getgroups", + "getgroups32", + "getitimer", + "getpeername", + "getpgid", + "getpgrp", + "getpid", + "getppid", + "getpriority", + "getrandom", + "getresgid", + "getresgid32", + "getresuid", + "getresuid32", + "getrlimit", + "get_robust_list", + "getrusage", + "getsid", + "getsockname", + "getsockopt", + "get_thread_area", + "gettid", + "gettimeofday", + "getuid", + "getuid32", + "getxattr", + "inotify_add_watch", + "inotify_init", + "inotify_init1", + "inotify_rm_watch", + "io_cancel", + "ioctl", + "io_destroy", + "io_getevents", + "ioprio_get", + "ioprio_set", + "io_setup", + "io_submit", + "ipc", + "kill", + "lchown", + "lchown32", + "lgetxattr", + "link", + "linkat", + "listen", + "listxattr", + "llistxattr", + "_llseek", + "lremovexattr", + "lseek", + "lsetxattr", + "lstat", + "lstat64", + "madvise", + "memfd_create", + "mincore", + "mkdir", + "mkdirat", + "mknod", + "mknodat", + "mlock", + "mlock2", + "mlockall", + "mmap", + "mmap2", + "mprotect", + "mq_getsetattr", + "mq_notify", + "mq_open", + "mq_timedreceive", + "mq_timedsend", + "mq_unlink", + "mremap", + "msgctl", + "msgget", + "msgrcv", + "msgsnd", + "msync", + "munlock", + "munlockall", + "munmap", + "nanosleep", + "newfstatat", + "_newselect", + "open", + "openat", + "pause", + "pipe", + "pipe2", + "poll", + "ppoll", + "prctl", + "pread64", + "preadv", + "preadv2", + "prlimit64", + "pselect6", + "pwrite64", + "pwritev", + "pwritev2", + "read", + "readahead", + "readlink", + "readlinkat", + "readv", + "recv", + "recvfrom", + "recvmmsg", + "recvmsg", + "remap_file_pages", + "removexattr", + "rename", + "renameat", + "renameat2", + "restart_syscall", + "rmdir", + "rt_sigaction", + "rt_sigpending", + "rt_sigprocmask", + "rt_sigqueueinfo", + "rt_sigreturn", + "rt_sigsuspend", + "rt_sigtimedwait", + "rt_tgsigqueueinfo", + "sched_getaffinity", + "sched_getattr", + "sched_getparam", + "sched_get_priority_max", + "sched_get_priority_min", + "sched_getscheduler", + "sched_rr_get_interval", + "sched_setaffinity", + "sched_setattr", + "sched_setparam", + "sched_setscheduler", + "sched_yield", + "seccomp", + "select", + "semctl", + "semget", + "semop", + "semtimedop", + "send", + "sendfile", + "sendfile64", + "sendmmsg", + "sendmsg", + "sendto", + "setfsgid", + "setfsgid32", + "setfsuid", + "setfsuid32", + "setgid", + "setgid32", + "setgroups", + "setgroups32", + "setitimer", + "setpgid", + "setpriority", + "setregid", + "setregid32", + "setresgid", + "setresgid32", + "setresuid", + "setresuid32", + "setreuid", + "setreuid32", + "setrlimit", + "set_robust_list", + "setsid", + "setsockopt", + "set_thread_area", + "set_tid_address", + "setuid", + "setuid32", + "setxattr", + "shmat", + "shmctl", + "shmdt", + "shmget", + "shutdown", + "sigaltstack", + "signalfd", + "signalfd4", + "sigreturn", + "socket", + "socketcall", + "socketpair", + "splice", + "stat", + "stat64", + "statfs", + "statfs64", + "symlink", + "symlinkat", + "sync", + "sync_file_range", + "syncfs", + "sysinfo", + "syslog", + "tee", + "tgkill", + "time", + "timer_create", + "timer_delete", + "timerfd_create", + "timerfd_gettime", + "timerfd_settime", + "timer_getoverrun", + "timer_gettime", + "timer_settime", + "times", + "tkill", + "truncate", + "truncate64", + "ugetrlimit", + "umask", + "uname", + "unlink", + "unlinkat", + "utime", + "utimensat", + "utimes", + "vfork", + "vmsplice", + "wait4", + "waitid", + "waitpid", + "write", + "writev" + ], + "action": "SCMP_ACT_ALLOW", + "args": [], + "comment": "", + "includes": {}, + "excludes": {} + }, + { + "names": [ + "personality" + ], + "action": "SCMP_ACT_ALLOW", + "args": [ + { + "index": 0, + "value": 0, + "valueTwo": 0, + "op": "SCMP_CMP_EQ" + } + ], + "comment": "", + "includes": {}, + "excludes": {} + }, + { + "names": [ + "personality" + ], + "action": "SCMP_ACT_ALLOW", + "args": [ + { + "index": 0, + "value": 8, + "valueTwo": 0, + "op": "SCMP_CMP_EQ" + } + ], + "comment": "", + "includes": {}, + "excludes": {} + }, + { + "names": [ + "personality" + ], + "action": "SCMP_ACT_ALLOW", + "args": [ + { + "index": 0, + "value": 131072, + "valueTwo": 0, + "op": "SCMP_CMP_EQ" + } + ], + "comment": "", + "includes": {}, + "excludes": {} + }, + { + "names": [ + "personality" + ], + "action": "SCMP_ACT_ALLOW", + "args": [ + { + "index": 0, + "value": 131080, + "valueTwo": 0, + "op": "SCMP_CMP_EQ" + } + ], + "comment": "", + "includes": {}, + "excludes": {} + }, + { + "names": [ + "personality" + ], + "action": "SCMP_ACT_ALLOW", + "args": [ + { + "index": 0, + "value": 4294967295, + "valueTwo": 0, + "op": "SCMP_CMP_EQ" + } + ], + "comment": "", + "includes": {}, + "excludes": {} + }, + { + "names": [ + "sync_file_range2" + ], + "action": "SCMP_ACT_ALLOW", + "args": [], + "comment": "", + "includes": { + "arches": [ + "ppc64le" + ] + }, + "excludes": {} + }, + { + "names": [ + "arm_fadvise64_64", + "arm_sync_file_range", + "sync_file_range2", + "breakpoint", + "cacheflush", + "set_tls" + ], + "action": "SCMP_ACT_ALLOW", + "args": [], + "comment": "", + "includes": { + "arches": [ + "arm", + "arm64" + ] + }, + "excludes": {} + }, + { + "names": [ + "arch_prctl" + ], + "action": "SCMP_ACT_ALLOW", + "args": [], + "comment": "", + "includes": { + "arches": [ + "amd64", + "x32" + ] + }, + "excludes": {} + }, + { + "names": [ + "modify_ldt" + ], + "action": "SCMP_ACT_ALLOW", + "args": [], + "comment": "", + "includes": { + "arches": [ + "amd64", + "x32", + "x86" + ] + }, + "excludes": {} + }, + { + "names": [ + "s390_pci_mmio_read", + "s390_pci_mmio_write", + "s390_runtime_instr" + ], + "action": "SCMP_ACT_ALLOW", + "args": [], + "comment": "", + "includes": { + "arches": [ + "s390", + "s390x" + ] + }, + "excludes": {} + }, + { + "names": [ + "open_by_handle_at" + ], + "action": "SCMP_ACT_ALLOW", + "args": [], + "comment": "", + "includes": { + "caps": [ + "CAP_DAC_READ_SEARCH" + ] + }, + "excludes": {} + }, + { + "names": [ + "bpf", + "clone", + "fanotify_init", + "lookup_dcookie", + "mount", + "name_to_handle_at", + "perf_event_open", + "setdomainname", + "sethostname", + "setns", + "umount", + "umount2", + "unshare" + ], + "action": "SCMP_ACT_ALLOW", + "args": [], + "comment": "", + "includes": { + "caps": [ + "CAP_SYS_ADMIN" + ] + }, + "excludes": {} + }, + { + "names": [ + "clone" + ], + "action": "SCMP_ACT_ALLOW", + "args": [ + { + "index": 0, + "value": 2080505856, + "valueTwo": 0, + "op": "SCMP_CMP_MASKED_EQ" + } + ], + "comment": "", + "includes": {}, + "excludes": { + "caps": [ + "CAP_SYS_ADMIN" + ], + "arches": [ + "s390", + "s390x" + ] + } + }, + { + "names": [ + "clone" + ], + "action": "SCMP_ACT_ALLOW", + "args": [ + { + "index": 1, + "value": 2080505856, + "valueTwo": 0, + "op": "SCMP_CMP_MASKED_EQ" + } + ], + "comment": "s390 parameter ordering for clone is different", + "includes": { + "arches": [ + "s390", + "s390x" + ] + }, + "excludes": { + "caps": [ + "CAP_SYS_ADMIN" + ] + } + }, + { + "names": [ + "reboot" + ], + "action": "SCMP_ACT_ALLOW", + "args": [], + "comment": "", + "includes": { + "caps": [ + "CAP_SYS_BOOT" + ] + }, + "excludes": {} + }, + { + "names": [ + "chroot" + ], + "action": "SCMP_ACT_ALLOW", + "args": [], + "comment": "", + "includes": { + "caps": [ + "CAP_SYS_CHROOT" + ] + }, + "excludes": {} + }, + { + "names": [ + "delete_module", + "init_module", + "finit_module", + "query_module" + ], + "action": "SCMP_ACT_ALLOW", + "args": [], + "comment": "", + "includes": { + "caps": [ + "CAP_SYS_MODULE" + ] + }, + "excludes": {} + }, + { + "names": [ + "acct" + ], + "action": "SCMP_ACT_ALLOW", + "args": [], + "comment": "", + "includes": { + "caps": [ + "CAP_SYS_PACCT" + ] + }, + "excludes": {} + }, + { + "names": [ + "kcmp", + "process_vm_readv", + "process_vm_writev", + "ptrace" + ], + "action": "SCMP_ACT_ALLOW", + "args": [], + "comment": "", + "includes": { + "caps": [ + "CAP_SYS_PTRACE" + ] + }, + "excludes": {} + }, + { + "names": [ + "iopl", + "ioperm" + ], + "action": "SCMP_ACT_ALLOW", + "args": [], + "comment": "", + "includes": { + "caps": [ + "CAP_SYS_RAWIO" + ] + }, + "excludes": {} + }, + { + "names": [ + "settimeofday", + "stime", + "clock_settime" + ], + "action": "SCMP_ACT_ALLOW", + "args": [], + "comment": "", + "includes": { + "caps": [ + "CAP_SYS_TIME" + ] + }, + "excludes": {} + }, + { + "names": [ + "vhangup" + ], + "action": "SCMP_ACT_ALLOW", + "args": [], + "comment": "", + "includes": { + "caps": [ + "CAP_SYS_TTY_CONFIG" + ] + }, + "excludes": {} + } + ] +} \ No newline at end of file diff --git a/vendor/github.com/moby/moby/profiles/seccomp/fixtures/example.json b/vendor/github.com/moby/moby/profiles/seccomp/fixtures/example.json new file mode 100755 index 000000000..674ca50fd --- /dev/null +++ b/vendor/github.com/moby/moby/profiles/seccomp/fixtures/example.json @@ -0,0 +1,27 @@ +{ + "defaultAction": "SCMP_ACT_ERRNO", + "syscalls": [ + { + "name": "clone", + "action": "SCMP_ACT_ALLOW", + "args": [ + { + "index": 0, + "value": 2080505856, + "valueTwo": 0, + "op": "SCMP_CMP_MASKED_EQ" + } + ] + }, + { + "name": "open", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "close", + "action": "SCMP_ACT_ALLOW", + "args": [] + } + ] +} diff --git a/vendor/github.com/moby/moby/profiles/seccomp/generate.go b/vendor/github.com/moby/moby/profiles/seccomp/generate.go new file mode 100644 index 000000000..32f22bb37 --- /dev/null +++ b/vendor/github.com/moby/moby/profiles/seccomp/generate.go @@ -0,0 +1,32 @@ +// +build ignore + +package main + +import ( + "encoding/json" + "io/ioutil" + "os" + "path/filepath" + + "github.com/docker/docker/profiles/seccomp" +) + +// saves the default seccomp profile as a json file so people can use it as a +// base for their own custom profiles +func main() { + wd, err := os.Getwd() + if err != nil { + panic(err) + } + f := filepath.Join(wd, "default.json") + + // write the default profile to the file + b, err := json.MarshalIndent(seccomp.DefaultProfile(), "", "\t") + if err != nil { + panic(err) + } + + if err := ioutil.WriteFile(f, b, 0644); err != nil { + panic(err) + } +} diff --git a/vendor/github.com/moby/moby/profiles/seccomp/seccomp.go b/vendor/github.com/moby/moby/profiles/seccomp/seccomp.go new file mode 100644 index 000000000..90a385948 --- /dev/null +++ b/vendor/github.com/moby/moby/profiles/seccomp/seccomp.go @@ -0,0 +1,150 @@ +// +build linux + +package seccomp + +import ( + "encoding/json" + "errors" + "fmt" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/pkg/stringutils" + "github.com/opencontainers/runtime-spec/specs-go" + libseccomp "github.com/seccomp/libseccomp-golang" +) + +//go:generate go run -tags 'seccomp' generate.go + +// GetDefaultProfile returns the default seccomp profile. +func GetDefaultProfile(rs *specs.Spec) (*specs.LinuxSeccomp, error) { + return setupSeccomp(DefaultProfile(), rs) +} + +// LoadProfile takes a json string and decodes the seccomp profile. +func LoadProfile(body string, rs *specs.Spec) (*specs.LinuxSeccomp, error) { + var config types.Seccomp + if err := json.Unmarshal([]byte(body), &config); err != nil { + return nil, fmt.Errorf("Decoding seccomp profile failed: %v", err) + } + return setupSeccomp(&config, rs) +} + +var nativeToSeccomp = map[string]types.Arch{ + "amd64": types.ArchX86_64, + "arm64": types.ArchAARCH64, + "mips64": types.ArchMIPS64, + "mips64n32": types.ArchMIPS64N32, + "mipsel64": types.ArchMIPSEL64, + "mipsel64n32": types.ArchMIPSEL64N32, + "s390x": types.ArchS390X, +} + +func setupSeccomp(config *types.Seccomp, rs *specs.Spec) (*specs.LinuxSeccomp, error) { + if config == nil { + return nil, nil + } + + // No default action specified, no syscalls listed, assume seccomp disabled + if config.DefaultAction == "" && len(config.Syscalls) == 0 { + return nil, nil + } + + newConfig := &specs.LinuxSeccomp{} + + var arch string + var native, err = libseccomp.GetNativeArch() + if err == nil { + arch = native.String() + } + + if len(config.Architectures) != 0 && len(config.ArchMap) != 0 { + return nil, errors.New("'architectures' and 'archMap' were specified in the seccomp profile, use either 'architectures' or 'archMap'") + } + + // if config.Architectures == 0 then libseccomp will figure out the architecture to use + if len(config.Architectures) != 0 { + for _, a := range config.Architectures { + newConfig.Architectures = append(newConfig.Architectures, specs.Arch(a)) + } + } + + if len(config.ArchMap) != 0 { + for _, a := range config.ArchMap { + seccompArch, ok := nativeToSeccomp[arch] + if ok { + if a.Arch == seccompArch { + newConfig.Architectures = append(newConfig.Architectures, specs.Arch(a.Arch)) + for _, sa := range a.SubArches { + newConfig.Architectures = append(newConfig.Architectures, specs.Arch(sa)) + } + break + } + } + } + } + + newConfig.DefaultAction = specs.LinuxSeccompAction(config.DefaultAction) + +Loop: + // Loop through all syscall blocks and convert them to libcontainer format after filtering them + for _, call := range config.Syscalls { + if len(call.Excludes.Arches) > 0 { + if stringutils.InSlice(call.Excludes.Arches, arch) { + continue Loop + } + } + if len(call.Excludes.Caps) > 0 { + for _, c := range call.Excludes.Caps { + if stringutils.InSlice(rs.Process.Capabilities.Effective, c) { + continue Loop + } + } + } + if len(call.Includes.Arches) > 0 { + if !stringutils.InSlice(call.Includes.Arches, arch) { + continue Loop + } + } + if len(call.Includes.Caps) > 0 { + for _, c := range call.Includes.Caps { + if !stringutils.InSlice(rs.Process.Capabilities.Effective, c) { + continue Loop + } + } + } + + if call.Name != "" && len(call.Names) != 0 { + return nil, errors.New("'name' and 'names' were specified in the seccomp profile, use either 'name' or 'names'") + } + + if call.Name != "" { + newConfig.Syscalls = append(newConfig.Syscalls, createSpecsSyscall(call.Name, call.Action, call.Args)) + } + + for _, n := range call.Names { + newConfig.Syscalls = append(newConfig.Syscalls, createSpecsSyscall(n, call.Action, call.Args)) + } + } + + return newConfig, nil +} + +func createSpecsSyscall(name string, action types.Action, args []*types.Arg) specs.LinuxSyscall { + newCall := specs.LinuxSyscall{ + Names: []string{name}, + Action: specs.LinuxSeccompAction(action), + } + + // Loop through all the arguments of the syscall and convert them + for _, arg := range args { + newArg := specs.LinuxSeccompArg{ + Index: arg.Index, + Value: arg.Value, + ValueTwo: arg.ValueTwo, + Op: specs.LinuxSeccompOperator(arg.Op), + } + + newCall.Args = append(newCall.Args, newArg) + } + return newCall +} diff --git a/vendor/github.com/moby/moby/profiles/seccomp/seccomp_default.go b/vendor/github.com/moby/moby/profiles/seccomp/seccomp_default.go new file mode 100644 index 000000000..1e6ea90e3 --- /dev/null +++ b/vendor/github.com/moby/moby/profiles/seccomp/seccomp_default.go @@ -0,0 +1,639 @@ +// +build linux,seccomp + +package seccomp + +import ( + "github.com/docker/docker/api/types" + "golang.org/x/sys/unix" +) + +func arches() []types.Architecture { + return []types.Architecture{ + { + Arch: types.ArchX86_64, + SubArches: []types.Arch{types.ArchX86, types.ArchX32}, + }, + { + Arch: types.ArchAARCH64, + SubArches: []types.Arch{types.ArchARM}, + }, + { + Arch: types.ArchMIPS64, + SubArches: []types.Arch{types.ArchMIPS, types.ArchMIPS64N32}, + }, + { + Arch: types.ArchMIPS64N32, + SubArches: []types.Arch{types.ArchMIPS, types.ArchMIPS64}, + }, + { + Arch: types.ArchMIPSEL64, + SubArches: []types.Arch{types.ArchMIPSEL, types.ArchMIPSEL64N32}, + }, + { + Arch: types.ArchMIPSEL64N32, + SubArches: []types.Arch{types.ArchMIPSEL, types.ArchMIPSEL64}, + }, + { + Arch: types.ArchS390X, + SubArches: []types.Arch{types.ArchS390}, + }, + } +} + +// DefaultProfile defines the whitelist for the default seccomp profile. +func DefaultProfile() *types.Seccomp { + syscalls := []*types.Syscall{ + { + Names: []string{ + "accept", + "accept4", + "access", + "adjtimex", + "alarm", + "alarm", + "bind", + "brk", + "capget", + "capset", + "chdir", + "chmod", + "chown", + "chown32", + "clock_getres", + "clock_gettime", + "clock_nanosleep", + "close", + "connect", + "copy_file_range", + "creat", + "dup", + "dup2", + "dup3", + "epoll_create", + "epoll_create1", + "epoll_ctl", + "epoll_ctl_old", + "epoll_pwait", + "epoll_wait", + "epoll_wait_old", + "eventfd", + "eventfd2", + "execve", + "execveat", + "exit", + "exit_group", + "faccessat", + "fadvise64", + "fadvise64_64", + "fallocate", + "fanotify_mark", + "fchdir", + "fchmod", + "fchmodat", + "fchown", + "fchown32", + "fchownat", + "fcntl", + "fcntl64", + "fdatasync", + "fgetxattr", + "flistxattr", + "flock", + "fork", + "fremovexattr", + "fsetxattr", + "fstat", + "fstat64", + "fstatat64", + "fstatfs", + "fstatfs64", + "fsync", + "ftruncate", + "ftruncate64", + "futex", + "futimesat", + "getcpu", + "getcwd", + "getdents", + "getdents64", + "getegid", + "getegid32", + "geteuid", + "geteuid32", + "getgid", + "getgid32", + "getgroups", + "getgroups32", + "getitimer", + "getpeername", + "getpgid", + "getpgrp", + "getpid", + "getppid", + "getpriority", + "getrandom", + "getresgid", + "getresgid32", + "getresuid", + "getresuid32", + "getrlimit", + "get_robust_list", + "getrusage", + "getsid", + "getsockname", + "getsockopt", + "get_thread_area", + "gettid", + "gettimeofday", + "getuid", + "getuid32", + "getxattr", + "inotify_add_watch", + "inotify_init", + "inotify_init1", + "inotify_rm_watch", + "io_cancel", + "ioctl", + "io_destroy", + "io_getevents", + "ioprio_get", + "ioprio_set", + "io_setup", + "io_submit", + "ipc", + "kill", + "lchown", + "lchown32", + "lgetxattr", + "link", + "linkat", + "listen", + "listxattr", + "llistxattr", + "_llseek", + "lremovexattr", + "lseek", + "lsetxattr", + "lstat", + "lstat64", + "madvise", + "memfd_create", + "mincore", + "mkdir", + "mkdirat", + "mknod", + "mknodat", + "mlock", + "mlock2", + "mlockall", + "mmap", + "mmap2", + "mprotect", + "mq_getsetattr", + "mq_notify", + "mq_open", + "mq_timedreceive", + "mq_timedsend", + "mq_unlink", + "mremap", + "msgctl", + "msgget", + "msgrcv", + "msgsnd", + "msync", + "munlock", + "munlockall", + "munmap", + "nanosleep", + "newfstatat", + "_newselect", + "open", + "openat", + "pause", + "pipe", + "pipe2", + "poll", + "ppoll", + "prctl", + "pread64", + "preadv", + "preadv2", + "prlimit64", + "pselect6", + "pwrite64", + "pwritev", + "pwritev2", + "read", + "readahead", + "readlink", + "readlinkat", + "readv", + "recv", + "recvfrom", + "recvmmsg", + "recvmsg", + "remap_file_pages", + "removexattr", + "rename", + "renameat", + "renameat2", + "restart_syscall", + "rmdir", + "rt_sigaction", + "rt_sigpending", + "rt_sigprocmask", + "rt_sigqueueinfo", + "rt_sigreturn", + "rt_sigsuspend", + "rt_sigtimedwait", + "rt_tgsigqueueinfo", + "sched_getaffinity", + "sched_getattr", + "sched_getparam", + "sched_get_priority_max", + "sched_get_priority_min", + "sched_getscheduler", + "sched_rr_get_interval", + "sched_setaffinity", + "sched_setattr", + "sched_setparam", + "sched_setscheduler", + "sched_yield", + "seccomp", + "select", + "semctl", + "semget", + "semop", + "semtimedop", + "send", + "sendfile", + "sendfile64", + "sendmmsg", + "sendmsg", + "sendto", + "setfsgid", + "setfsgid32", + "setfsuid", + "setfsuid32", + "setgid", + "setgid32", + "setgroups", + "setgroups32", + "setitimer", + "setpgid", + "setpriority", + "setregid", + "setregid32", + "setresgid", + "setresgid32", + "setresuid", + "setresuid32", + "setreuid", + "setreuid32", + "setrlimit", + "set_robust_list", + "setsid", + "setsockopt", + "set_thread_area", + "set_tid_address", + "setuid", + "setuid32", + "setxattr", + "shmat", + "shmctl", + "shmdt", + "shmget", + "shutdown", + "sigaltstack", + "signalfd", + "signalfd4", + "sigreturn", + "socket", + "socketcall", + "socketpair", + "splice", + "stat", + "stat64", + "statfs", + "statfs64", + "symlink", + "symlinkat", + "sync", + "sync_file_range", + "syncfs", + "sysinfo", + "syslog", + "tee", + "tgkill", + "time", + "timer_create", + "timer_delete", + "timerfd_create", + "timerfd_gettime", + "timerfd_settime", + "timer_getoverrun", + "timer_gettime", + "timer_settime", + "times", + "tkill", + "truncate", + "truncate64", + "ugetrlimit", + "umask", + "uname", + "unlink", + "unlinkat", + "utime", + "utimensat", + "utimes", + "vfork", + "vmsplice", + "wait4", + "waitid", + "waitpid", + "write", + "writev", + }, + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Names: []string{"personality"}, + Action: types.ActAllow, + Args: []*types.Arg{ + { + Index: 0, + Value: 0x0, + Op: types.OpEqualTo, + }, + }, + }, + { + Names: []string{"personality"}, + Action: types.ActAllow, + Args: []*types.Arg{ + { + Index: 0, + Value: 0x0008, + Op: types.OpEqualTo, + }, + }, + }, + { + Names: []string{"personality"}, + Action: types.ActAllow, + Args: []*types.Arg{ + { + Index: 0, + Value: 0x20000, + Op: types.OpEqualTo, + }, + }, + }, + { + Names: []string{"personality"}, + Action: types.ActAllow, + Args: []*types.Arg{ + { + Index: 0, + Value: 0x20008, + Op: types.OpEqualTo, + }, + }, + }, + { + Names: []string{"personality"}, + Action: types.ActAllow, + Args: []*types.Arg{ + { + Index: 0, + Value: 0xffffffff, + Op: types.OpEqualTo, + }, + }, + }, + { + Names: []string{ + "sync_file_range2", + }, + Action: types.ActAllow, + Args: []*types.Arg{}, + Includes: types.Filter{ + Arches: []string{"ppc64le"}, + }, + }, + { + Names: []string{ + "arm_fadvise64_64", + "arm_sync_file_range", + "sync_file_range2", + "breakpoint", + "cacheflush", + "set_tls", + }, + Action: types.ActAllow, + Args: []*types.Arg{}, + Includes: types.Filter{ + Arches: []string{"arm", "arm64"}, + }, + }, + { + Names: []string{ + "arch_prctl", + }, + Action: types.ActAllow, + Args: []*types.Arg{}, + Includes: types.Filter{ + Arches: []string{"amd64", "x32"}, + }, + }, + { + Names: []string{ + "modify_ldt", + }, + Action: types.ActAllow, + Args: []*types.Arg{}, + Includes: types.Filter{ + Arches: []string{"amd64", "x32", "x86"}, + }, + }, + { + Names: []string{ + "s390_pci_mmio_read", + "s390_pci_mmio_write", + "s390_runtime_instr", + }, + Action: types.ActAllow, + Args: []*types.Arg{}, + Includes: types.Filter{ + Arches: []string{"s390", "s390x"}, + }, + }, + { + Names: []string{ + "open_by_handle_at", + }, + Action: types.ActAllow, + Args: []*types.Arg{}, + Includes: types.Filter{ + Caps: []string{"CAP_DAC_READ_SEARCH"}, + }, + }, + { + Names: []string{ + "bpf", + "clone", + "fanotify_init", + "lookup_dcookie", + "mount", + "name_to_handle_at", + "perf_event_open", + "setdomainname", + "sethostname", + "setns", + "umount", + "umount2", + "unshare", + }, + Action: types.ActAllow, + Args: []*types.Arg{}, + Includes: types.Filter{ + Caps: []string{"CAP_SYS_ADMIN"}, + }, + }, + { + Names: []string{ + "clone", + }, + Action: types.ActAllow, + Args: []*types.Arg{ + { + Index: 0, + Value: unix.CLONE_NEWNS | unix.CLONE_NEWUTS | unix.CLONE_NEWIPC | unix.CLONE_NEWUSER | unix.CLONE_NEWPID | unix.CLONE_NEWNET, + ValueTwo: 0, + Op: types.OpMaskedEqual, + }, + }, + Excludes: types.Filter{ + Caps: []string{"CAP_SYS_ADMIN"}, + Arches: []string{"s390", "s390x"}, + }, + }, + { + Names: []string{ + "clone", + }, + Action: types.ActAllow, + Args: []*types.Arg{ + { + Index: 1, + Value: unix.CLONE_NEWNS | unix.CLONE_NEWUTS | unix.CLONE_NEWIPC | unix.CLONE_NEWUSER | unix.CLONE_NEWPID | unix.CLONE_NEWNET, + ValueTwo: 0, + Op: types.OpMaskedEqual, + }, + }, + Comment: "s390 parameter ordering for clone is different", + Includes: types.Filter{ + Arches: []string{"s390", "s390x"}, + }, + Excludes: types.Filter{ + Caps: []string{"CAP_SYS_ADMIN"}, + }, + }, + { + Names: []string{ + "reboot", + }, + Action: types.ActAllow, + Args: []*types.Arg{}, + Includes: types.Filter{ + Caps: []string{"CAP_SYS_BOOT"}, + }, + }, + { + Names: []string{ + "chroot", + }, + Action: types.ActAllow, + Args: []*types.Arg{}, + Includes: types.Filter{ + Caps: []string{"CAP_SYS_CHROOT"}, + }, + }, + { + Names: []string{ + "delete_module", + "init_module", + "finit_module", + "query_module", + }, + Action: types.ActAllow, + Args: []*types.Arg{}, + Includes: types.Filter{ + Caps: []string{"CAP_SYS_MODULE"}, + }, + }, + { + Names: []string{ + "acct", + }, + Action: types.ActAllow, + Args: []*types.Arg{}, + Includes: types.Filter{ + Caps: []string{"CAP_SYS_PACCT"}, + }, + }, + { + Names: []string{ + "kcmp", + "process_vm_readv", + "process_vm_writev", + "ptrace", + }, + Action: types.ActAllow, + Args: []*types.Arg{}, + Includes: types.Filter{ + Caps: []string{"CAP_SYS_PTRACE"}, + }, + }, + { + Names: []string{ + "iopl", + "ioperm", + }, + Action: types.ActAllow, + Args: []*types.Arg{}, + Includes: types.Filter{ + Caps: []string{"CAP_SYS_RAWIO"}, + }, + }, + { + Names: []string{ + "settimeofday", + "stime", + "clock_settime", + }, + Action: types.ActAllow, + Args: []*types.Arg{}, + Includes: types.Filter{ + Caps: []string{"CAP_SYS_TIME"}, + }, + }, + { + Names: []string{ + "vhangup", + }, + Action: types.ActAllow, + Args: []*types.Arg{}, + Includes: types.Filter{ + Caps: []string{"CAP_SYS_TTY_CONFIG"}, + }, + }, + } + + return &types.Seccomp{ + DefaultAction: types.ActErrno, + ArchMap: arches(), + Syscalls: syscalls, + } +} diff --git a/vendor/github.com/moby/moby/profiles/seccomp/seccomp_test.go b/vendor/github.com/moby/moby/profiles/seccomp/seccomp_test.go new file mode 100644 index 000000000..134692147 --- /dev/null +++ b/vendor/github.com/moby/moby/profiles/seccomp/seccomp_test.go @@ -0,0 +1,32 @@ +// +build linux + +package seccomp + +import ( + "io/ioutil" + "testing" + + "github.com/docker/docker/oci" +) + +func TestLoadProfile(t *testing.T) { + f, err := ioutil.ReadFile("fixtures/example.json") + if err != nil { + t.Fatal(err) + } + rs := oci.DefaultSpec() + if _, err := LoadProfile(string(f), &rs); err != nil { + t.Fatal(err) + } +} + +func TestLoadDefaultProfile(t *testing.T) { + f, err := ioutil.ReadFile("default.json") + if err != nil { + t.Fatal(err) + } + rs := oci.DefaultSpec() + if _, err := LoadProfile(string(f), &rs); err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/moby/moby/profiles/seccomp/seccomp_unsupported.go b/vendor/github.com/moby/moby/profiles/seccomp/seccomp_unsupported.go new file mode 100644 index 000000000..3c1d68b1f --- /dev/null +++ b/vendor/github.com/moby/moby/profiles/seccomp/seccomp_unsupported.go @@ -0,0 +1,13 @@ +// +build linux,!seccomp + +package seccomp + +import ( + "github.com/docker/docker/api/types" + "github.com/opencontainers/runtime-spec/specs-go" +) + +// DefaultProfile returns a nil pointer on unsupported systems. +func DefaultProfile() *types.Seccomp { + return nil +} diff --git a/vendor/github.com/moby/moby/project/ARM.md b/vendor/github.com/moby/moby/project/ARM.md new file mode 100644 index 000000000..e61e3b184 --- /dev/null +++ b/vendor/github.com/moby/moby/project/ARM.md @@ -0,0 +1,45 @@ +# ARM support + +The ARM support should be considered experimental. It will be extended step by step in the coming weeks. + +Building a Docker Development Image works in the same fashion as for Intel platform (x86-64). +Currently we have initial support for 32bit ARMv7 devices. + +To work with the Docker Development Image you have to clone the Docker/Docker repo on a supported device. +It needs to have a Docker Engine installed to build the Docker Development Image. + +From the root of the Docker/Docker repo one can use make to execute the following make targets: +- make validate +- make binary +- make build +- make deb +- make bundles +- make default +- make shell +- make test-unit +- make test-integration-cli +- make + +The Makefile does include logic to determine on which OS and architecture the Docker Development Image is built. +Based on OS and architecture it chooses the correct Dockerfile. +For the ARM 32bit architecture it uses `Dockerfile.armhf`. + +So for example in order to build a Docker binary one has to: +1. clone the Docker/Docker repository on an ARM device `git clone https://github.com/docker/docker.git` +2. change into the checked out repository with `cd docker` +3. execute `make binary` to create a Docker Engine binary for ARM + +## Kernel modules +A few libnetwork integration tests require that the kernel be +configured with "dummy" network interface and has the module +loaded. However, the dummy module may be not loaded automatically. + +To load the kernel module permanently, run these commands as `root`. + + modprobe dummy + echo "dummy" >> /etc/modules + +On some systems you also have to sync your kernel modules. + + oc-sync-kernel-modules + depmod diff --git a/vendor/github.com/moby/moby/project/BRANCHES-AND-TAGS.md b/vendor/github.com/moby/moby/project/BRANCHES-AND-TAGS.md new file mode 100644 index 000000000..1c6f23252 --- /dev/null +++ b/vendor/github.com/moby/moby/project/BRANCHES-AND-TAGS.md @@ -0,0 +1,35 @@ +Branches and tags +================= + +Note: details of the release process for the Engine are documented in the +[RELEASE-CHECKLIST](https://github.com/docker/docker/blob/master/project/RELEASE-CHECKLIST.md). + +# Branches + +The docker/docker repository should normally have only three living branches at all time, including +the regular `master` branch: + +## `docs` branch + +The `docs` branch supports documentation updates between product releases. This branch allow us to +decouple documentation releases from product releases. + +## `release` branch + +The `release` branch contains the last _released_ version of the code for the project. + +The `release` branch is only updated at each public release of the project. The mechanism for this +is that the release is materialized by a pull request against the `release` branch which lives for +the duration of the code freeze period. When this pull request is merged, the `release` branch gets +updated, and its new state is tagged accordingly. + +# Tags + +Any public release of a compiled binary, with the logical exception of nightly builds, should have +a corresponding tag in the repository. + +The general format of a tag is `vX.Y.Z[-suffix[N]]`: + +- All of `X`, `Y`, `Z` must be specified (example: `v1.0.0`) +- First release candidate for version `1.8.0` should be tagged `v1.8.0-rc1` +- Second alpha release of a product should be tagged `v1.0.0-alpha1` diff --git a/vendor/github.com/moby/moby/project/CONTRIBUTING.md b/vendor/github.com/moby/moby/project/CONTRIBUTING.md new file mode 120000 index 000000000..44fcc6343 --- /dev/null +++ b/vendor/github.com/moby/moby/project/CONTRIBUTING.md @@ -0,0 +1 @@ +../CONTRIBUTING.md \ No newline at end of file diff --git a/vendor/github.com/moby/moby/project/GOVERNANCE.md b/vendor/github.com/moby/moby/project/GOVERNANCE.md new file mode 100644 index 000000000..6ae7baf74 --- /dev/null +++ b/vendor/github.com/moby/moby/project/GOVERNANCE.md @@ -0,0 +1,17 @@ +# Docker Governance Advisory Board Meetings + +In the spirit of openness, Docker created a Governance Advisory Board, and committed to make all materials and notes from the meetings of this group public. +All output from the meetings should be considered proposals only, and are subject to the review and approval of the community and the project leadership. + +The materials from the first Docker Governance Advisory Board meeting, held on October 28, 2014, are available at +[Google Docs Folder](https://goo.gl/Alfj8r) + +These include: + +* First Meeting Notes +* DGAB Charter +* Presentation 1: Introductory Presentation, including State of The Project +* Presentation 2: Overall Contribution Structure/Docker Project Core Proposal +* Presentation 3: Long Term Roadmap/Statement of Direction + + diff --git a/vendor/github.com/moby/moby/project/IRC-ADMINISTRATION.md b/vendor/github.com/moby/moby/project/IRC-ADMINISTRATION.md new file mode 100644 index 000000000..824a14bd5 --- /dev/null +++ b/vendor/github.com/moby/moby/project/IRC-ADMINISTRATION.md @@ -0,0 +1,37 @@ +# Freenode IRC Administration Guidelines and Tips + +This is not meant to be a general "Here's how to IRC" document, so if you're +looking for that, check Google instead. ♥ + +If you've been charged with helping maintain one of Docker's now many IRC +channels, this might turn out to be useful. If there's information that you +wish you'd known about how a particular channel is organized, you should add +deets here! :) + +## `ChanServ` + +Most channel maintenance happens by talking to Freenode's `ChanServ` bot. For +example, `/msg ChanServ ACCESS LIST` will show you a list of everyone +with "access" privileges for a particular channel. + +A similar command is used to give someone a particular access level. For +example, to add a new maintainer to the `#docker-maintainers` access list so +that they can contribute to the discussions (after they've been merged +appropriately in a `MAINTAINERS` file, of course), one would use `/msg ChanServ +ACCESS #docker-maintainers ADD maintainer`. + +To setup a new channel with a similar `maintainer` access template, use a +command like `/msg ChanServ TEMPLATE maintainer +AV` (`+A` for letting +them view the `ACCESS LIST`, `+V` for auto-voice; see `/msg ChanServ HELP FLAGS` +for more details). + +## Troubleshooting + +The most common cause of not-getting-auto-`+v` woes is people not being +`IDENTIFY`ed with `NickServ` (or their current nickname not being `GROUP`ed with +their main nickname) -- often manifested by `ChanServ` responding to an `ACCESS +ADD` request with something like `xyz is not registered.`. + +This is easily fixed by doing `/msg NickServ IDENTIFY OldNick SecretPassword` +followed by `/msg NickServ GROUP` to group the two nicknames together. See +`/msg NickServ HELP GROUP` for more information. diff --git a/vendor/github.com/moby/moby/project/ISSUE-TRIAGE.md b/vendor/github.com/moby/moby/project/ISSUE-TRIAGE.md new file mode 100644 index 000000000..5ef2d317e --- /dev/null +++ b/vendor/github.com/moby/moby/project/ISSUE-TRIAGE.md @@ -0,0 +1,132 @@ +Triaging of issues +------------------ + +Triage provides an important way to contribute to an open source project. Triage helps ensure issues resolve quickly by: + +- Describing the issue's intent and purpose is conveyed precisely. This is necessary because it can be difficult for an issue to explain how an end user experiences a problem and what actions they took. +- Giving a contributor the information they need before they commit to resolving an issue. +- Lowering the issue count by preventing duplicate issues. +- Streamlining the development process by preventing duplicate discussions. + +If you don't have time to code, consider helping with triage. The community will thank you for saving them time by spending some of yours. + +### 1. Ensure the issue contains basic information + +Before triaging an issue very far, make sure that the issue's author provided the standard issue information. This will help you make an educated recommendation on how this to categorize the issue. Standard information that *must* be included in most issues are things such as: + +- the output of `docker version` +- the output of `docker info` +- the output of `uname -a` +- a reproducible case if this is a bug, Dockerfiles FTW +- host distribution and version ( ubuntu 14.04, RHEL, fedora 23 ) +- page URL if this is a docs issue or the name of a man page + +Depending on the issue, you might not feel all this information is needed. Use your best judgement. If you cannot triage an issue using what its author provided, explain kindly to the author that they must provide the above information to clarify the problem. + +If the author provides the standard information but you are still unable to triage the issue, request additional information. Do this kindly and politely because you are asking for more of the author's time. + +If the author does not respond requested information within the timespan of a week, close the issue with a kind note stating that the author can request for the issue to be +reopened when the necessary information is provided. + +### 2. Classify the Issue + +An issue can have multiple of the following labels. Typically, a properly classified issue should +have: + +- One label identifying its kind (`kind/*`). +- One or multiple labels identifying the functional areas of interest (`area/*`). +- Where applicable, one label categorizing its difficulty (`exp/*`). + +#### Issue kind + +| Kind | Description | +|------------------|---------------------------------------------------------------------------------------------------------------------------------| +| kind/bug | Bugs are bugs. The cause may or may not be known at triage time so debugging should be taken account into the time estimate. | +| kind/enhancement | Enhancements are not bugs or new features but can drastically improve usability or performance of a project component. | +| kind/feature | Functionality or other elements that the project does not currently support. Features are new and shiny. | +| kind/question | Contains a user or contributor question requiring a response. | + +#### Functional area + +| Area | +|---------------------------| +| area/api | +| area/builder | +| area/bundles | +| area/cli | +| area/daemon | +| area/distribution | +| area/docs | +| area/kernel | +| area/logging | +| area/networking | +| area/plugins | +| area/project | +| area/runtime | +| area/security | +| area/security/apparmor | +| area/security/seccomp | +| area/security/selinux | +| area/security/trust | +| area/storage | +| area/storage/aufs | +| area/storage/btrfs | +| area/storage/devicemapper | +| area/storage/overlay | +| area/storage/zfs | +| area/swarm | +| area/testing | +| area/volumes | + +#### Platform + +| Platform | +|---------------------------| +| platform/arm | +| platform/darwin | +| platform/ibm-power | +| platform/ibm-z | +| platform/windows | + +#### Experience level + +Experience level is a way for a contributor to find an issue based on their +skill set. Experience types are applied to the issue or pull request using +labels. + +| Level | Experience level guideline | +|------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| exp/beginner | New to Docker, and possibly Golang, and is looking to help while learning the basics. | +| exp/intermediate | Comfortable with golang and understands the core concepts of Docker and looking to dive deeper into the project. | +| exp/expert | Proficient with Docker and Golang and has been following, and active in, the community to understand the rationale behind design decisions and where the project is headed. | + +As the table states, these labels are meant as guidelines. You might have +written a whole plugin for Docker in a personal project and never contributed to +Docker. With that kind of experience, you could take on an exp/expert level task. + +#### Triage status + +To communicate the triage status with other collaborators, you can apply status +labels to issues. These labels prevent duplicating effort. + +| Status | Description | +|-------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| status/confirmed | You triaged the issue, and were able to reproduce the issue. Always leave a comment how you reproduced, so that the person working on resolving the issue has a way to set up a test-case. +| status/accepted | Apply to enhancements / feature requests that we think are good to have. Adding this label helps contributors find things to work on. +| status/more-info-needed | Apply this to issues that are missing information (e.g. no `docker version` or `docker info` output, or no steps to reproduce), or require feedback from the reporter. If the issue is not updated after a week, it can generally be closed. +| status/needs-attention | Apply this label if an issue (or PR) needs more eyes. + +### 3. Prioritizing issue + +When, and only when, an issue is attached to a specific milestone, the issue can be labeled with the +following labels to indicate their degree of priority (from more urgent to less urgent). + +| Priority | Description | +|-------------|-----------------------------------------------------------------------------------------------------------------------------------| +| priority/P0 | Urgent: Security, critical bugs, blocking issues. P0 basically means drop everything you are doing until this issue is addressed. | +| priority/P1 | Important: P1 issues are a top priority and a must-have for the next release. | +| priority/P2 | Normal priority: default priority applied. | +| priority/P3 | Best effort: those are nice to have / minor issues. | + +And that's it. That should be all the information required for a new or existing contributor to come in a resolve an issue. diff --git a/vendor/github.com/moby/moby/project/PACKAGE-REPO-MAINTENANCE.md b/vendor/github.com/moby/moby/project/PACKAGE-REPO-MAINTENANCE.md new file mode 100644 index 000000000..458384a3d --- /dev/null +++ b/vendor/github.com/moby/moby/project/PACKAGE-REPO-MAINTENANCE.md @@ -0,0 +1,74 @@ +# Apt & Yum Repository Maintenance +## A maintainer's guide to managing Docker's package repos + +### How to clean up old experimental debs and rpms + +We release debs and rpms for experimental nightly, so these can build up. +To remove old experimental debs and rpms, and _ONLY_ keep the latest, follow the +steps below. + +1. Checkout docker master + +2. Run clean scripts + +```bash +docker build --rm --force-rm -t docker-dev:master . +docker run --rm -it --privileged \ + -v /path/to/your/repos/dir:/volumes/repos \ + -v $HOME/.gnupg:/root/.gnupg \ + -e GPG_PASSPHRASE \ + -e DOCKER_RELEASE_DIR=/volumes/repos \ + docker-dev:master hack/make.sh clean-apt-repo clean-yum-repo generate-index-listing sign-repos +``` + +3. Upload the changed repos to `s3` (if you host on s3) + +4. Purge the cache, PURGE the cache, PURGE THE CACHE! + +### How to get out of a sticky situation + +Sh\*t happens. We know. Below are steps to get out of any "hash-sum mismatch" or +"gpg sig error" or the likes error that might happen to the apt repo. + +**NOTE:** These are apt repo specific, have had no experience with anything similar +happening to the yum repo in the past so you can rest easy. + +For each step listed below, move on to the next if the previous didn't work. +Otherwise CELEBRATE! + +1. Purge the cache. + +2. Did you remember to sign the debs after releasing? + +Re-sign the repo with your gpg key: + +```bash +docker build --rm --force-rm -t docker-dev:master . +docker run --rm -it --privileged \ + -v /path/to/your/repos/dir:/volumes/repos \ + -v $HOME/.gnupg:/root/.gnupg \ + -e GPG_PASSPHRASE \ + -e DOCKER_RELEASE_DIR=/volumes/repos \ + docker-dev:master hack/make.sh sign-repos +``` + +Upload the changed repo to `s3` (if that is where you host) + +PURGE THE CACHE. + +3. Run Jess' magical, save all, only in case of extreme emergencies, "you are +going to have to break this glass to get it" script. + +```bash +docker build --rm --force-rm -t docker-dev:master . +docker run --rm -it --privileged \ + -v /path/to/your/repos/dir:/volumes/repos \ + -v $HOME/.gnupg:/root/.gnupg \ + -e GPG_PASSPHRASE \ + -e DOCKER_RELEASE_DIR=/volumes/repos \ + docker-dev:master hack/make.sh update-apt-repo generate-index-listing sign-repos +``` + +4. Upload the changed repo to `s3` (if that is where you host) + +PURGE THE CACHE. diff --git a/vendor/github.com/moby/moby/project/PACKAGERS.md b/vendor/github.com/moby/moby/project/PACKAGERS.md new file mode 100644 index 000000000..a5b0018b5 --- /dev/null +++ b/vendor/github.com/moby/moby/project/PACKAGERS.md @@ -0,0 +1,307 @@ +# Dear Packager, + +If you are looking to make Docker available on your favorite software +distribution, this document is for you. It summarizes the requirements for +building and running the Docker client and the Docker daemon. + +## Getting Started + +We want to help you package Docker successfully. Before doing any packaging, a +good first step is to introduce yourself on the [docker-dev mailing +list](https://groups.google.com/d/forum/docker-dev), explain what you're trying +to achieve, and tell us how we can help. Don't worry, we don't bite! There might +even be someone already working on packaging for the same distro! + +You can also join the IRC channel - #docker and #docker-dev on Freenode are both +active and friendly. + +We like to refer to Tianon ("@tianon" on GitHub and "tianon" on IRC) as our +"Packagers Relations", since he's always working to make sure our packagers have +a good, healthy upstream to work with (both in our communication and in our +build scripts). If you're having any kind of trouble, feel free to ping him +directly. He also likes to keep track of what distributions we have packagers +for, so feel free to reach out to him even just to say "Hi!" + +## Package Name + +If possible, your package should be called "docker". If that name is already +taken, a second choice is "docker-engine". Another possible choice is "docker.io". + +## Official Build vs Distro Build + +The Docker project maintains its own build and release toolchain. It is pretty +neat and entirely based on Docker (surprise!). This toolchain is the canonical +way to build Docker. We encourage you to give it a try, and if the circumstances +allow you to use it, we recommend that you do. + +You might not be able to use the official build toolchain - usually because your +distribution has a toolchain and packaging policy of its own. We get it! Your +house, your rules. The rest of this document should give you the information you +need to package Docker your way, without denaturing it in the process. + +## Build Dependencies + +To build Docker, you will need the following: + +* A recent version of Git and Mercurial +* Go version 1.6 or later +* A clean checkout of the source added to a valid [Go + workspace](https://golang.org/doc/code.html#Workspaces) under the path + *src/github.com/docker/docker* (unless you plan to use `AUTO_GOPATH`, + explained in more detail below) + +To build the Docker daemon, you will additionally need: + +* An amd64/x86_64 machine running Linux +* SQLite version 3.7.9 or later +* libdevmapper version 1.02.68-cvs (2012-01-26) or later from lvm2 version + 2.02.89 or later +* btrfs-progs version 3.16.1 or later (unless using an older version is + absolutely necessary, in which case 3.8 is the minimum) +* libseccomp version 2.2.1 or later (for build tag seccomp) + +Be sure to also check out Docker's Dockerfile for the most up-to-date list of +these build-time dependencies. + +### Go Dependencies + +All Go dependencies are vendored under "./vendor". They are used by the official +build, so the source of truth for the current version of each dependency is +whatever is in "./vendor". + +To use the vendored dependencies, simply make sure the path to "./vendor" is +included in `GOPATH` (or use `AUTO_GOPATH`, as explained below). + +If you would rather (or must, due to distro policy) package these dependencies +yourself, take a look at "vendor.conf" for an easy-to-parse list of the +exact version for each. + +NOTE: if you're not able to package the exact version (to the exact commit) of a +given dependency, please get in touch so we can remediate! Who knows what +discrepancies can be caused by even the slightest deviation. We promise to do +our best to make everybody happy. + +## Stripping Binaries + +Please, please, please do not strip any compiled binaries. This is really +important. + +In our own testing, stripping the resulting binaries sometimes results in a +binary that appears to work, but more often causes random panics, segfaults, and +other issues. Even if the binary appears to work, please don't strip. + +See the following quotes from Dave Cheney, which explain this position better +from the upstream Golang perspective. + +### [go issue #5855, comment #3](https://code.google.com/p/go/issues/detail?id=5855#c3) + +> Super super important: Do not strip go binaries or archives. It isn't tested, +> often breaks, and doesn't work. + +### [launchpad golang issue #1200255, comment #8](https://bugs.launchpad.net/ubuntu/+source/golang/+bug/1200255/comments/8) + +> To quote myself: "Please do not strip Go binaries, it is not supported, not +> tested, is often broken, and doesn't do what you want" +> +> To unpack that a bit +> +> * not supported, as in, we don't support it, and recommend against it when +> asked +> * not tested, we don't test stripped binaries as part of the build CI process +> * is often broken, stripping a go binary will produce anywhere from no, to +> subtle, to outright execution failure, see above + +### [launchpad golang issue #1200255, comment #13](https://bugs.launchpad.net/ubuntu/+source/golang/+bug/1200255/comments/13) + +> To clarify my previous statements. +> +> * I do not disagree with the debian policy, it is there for a good reason +> * Having said that, it stripping Go binaries doesn't work, and nobody is +> looking at making it work, so there is that. +> +> Thanks for patching the build formula. + +## Building Docker + +Please use our build script ("./hack/make.sh") for all your compilation of +Docker. If there's something you need that it isn't doing, or something it could +be doing to make your life as a packager easier, please get in touch with Tianon +and help us rectify the situation. Chances are good that other packagers have +probably run into the same problems and a fix might already be in the works, but +none of us will know for sure unless you harass Tianon about it. :) + +All the commands listed within this section should be run with the Docker source +checkout as the current working directory. + +### `AUTO_GOPATH` + +If you'd rather not be bothered with the hassles that setting up `GOPATH` +appropriately can be, and prefer to just get a "build that works", you should +add something similar to this to whatever script or process you're using to +build Docker: + +```bash +export AUTO_GOPATH=1 +``` + +This will cause the build scripts to set up a reasonable `GOPATH` that +automatically and properly includes both docker/docker from the local +directory, and the local "./vendor" directory as necessary. + +### `DOCKER_BUILDTAGS` + +If you're building a binary that may need to be used on platforms that include +AppArmor, you will need to set `DOCKER_BUILDTAGS` as follows: +```bash +export DOCKER_BUILDTAGS='apparmor' +``` + +If you're building a binary that may need to be used on platforms that include +SELinux, you will need to use the `selinux` build tag: +```bash +export DOCKER_BUILDTAGS='selinux' +``` + +If you're building a binary that may need to be used on platforms that include +seccomp, you will need to use the `seccomp` build tag: +```bash +export DOCKER_BUILDTAGS='seccomp' +``` + +There are build tags for disabling graphdrivers as well. By default, support +for all graphdrivers are built in. + +To disable btrfs: +```bash +export DOCKER_BUILDTAGS='exclude_graphdriver_btrfs' +``` + +To disable devicemapper: +```bash +export DOCKER_BUILDTAGS='exclude_graphdriver_devicemapper' +``` + +To disable aufs: +```bash +export DOCKER_BUILDTAGS='exclude_graphdriver_aufs' +``` + +NOTE: if you need to set more than one build tag, space separate them: +```bash +export DOCKER_BUILDTAGS='apparmor selinux exclude_graphdriver_aufs' +``` + +### Static Daemon + +If it is feasible within the constraints of your distribution, you should +seriously consider packaging Docker as a single static binary. A good comparison +is Busybox, which is often packaged statically as a feature to enable mass +portability. Because of the unique way Docker operates, being similarly static +is a "feature". + +To build a static Docker daemon binary, run the following command (first +ensuring that all the necessary libraries are available in static form for +linking - see the "Build Dependencies" section above, and the relevant lines +within Docker's own Dockerfile that set up our official build environment): + +```bash +./hack/make.sh binary +``` + +This will create a static binary under +"./bundles/$VERSION/binary/docker-$VERSION", where "$VERSION" is the contents of +the file "./VERSION". This binary is usually installed somewhere like +"/usr/bin/docker". + +### Dynamic Daemon / Client-only Binary + +If you are only interested in a Docker client binary, you can build using: + +```bash +./hack/make.sh binary-client +``` + +If you need to (due to distro policy, distro library availability, or for other +reasons) create a dynamically compiled daemon binary, or if you are only +interested in creating a client binary for Docker, use something similar to the +following: + +```bash +./hack/make.sh dynbinary-client +``` + +This will create "./bundles/$VERSION/dynbinary-client/docker-$VERSION", which for +client-only builds is the important file to grab and install as appropriate. + +## System Dependencies + +### Runtime Dependencies + +To function properly, the Docker daemon needs the following software to be +installed and available at runtime: + +* iptables version 1.4 or later +* procps (or similar provider of a "ps" executable) +* e2fsprogs version 1.4.12 or later (in use: mkfs.ext4, tune2fs) +* xfsprogs (in use: mkfs.xfs) +* XZ Utils version 4.9 or later +* a [properly + mounted](https://github.com/tianon/cgroupfs-mount/blob/master/cgroupfs-mount) + cgroupfs hierarchy (having a single, all-encompassing "cgroup" mount point + [is](https://github.com/docker/docker/issues/2683) + [not](https://github.com/docker/docker/issues/3485) + [sufficient](https://github.com/docker/docker/issues/4568)) + +Additionally, the Docker client needs the following software to be installed and +available at runtime: + +* Git version 1.7 or later + +### Kernel Requirements + +The Docker daemon has very specific kernel requirements. Most pre-packaged +kernels already include the necessary options enabled. If you are building your +own kernel, you will either need to discover the options necessary via trial and +error, or check out the [Gentoo +ebuild](https://github.com/tianon/docker-overlay/blob/master/app-emulation/docker/docker-9999.ebuild), +in which a list is maintained (and if there are any issues or discrepancies in +that list, please contact Tianon so they can be rectified). + +Note that in client mode, there are no specific kernel requirements, and that +the client will even run on alternative platforms such as Mac OS X / Darwin. + +### Optional Dependencies + +Some of Docker's features are activated by using optional command-line flags or +by having support for them in the kernel or userspace. A few examples include: + +* AUFS graph driver (requires AUFS patches/support enabled in the kernel, and at + least the "auplink" utility from aufs-tools) +* BTRFS graph driver (requires BTRFS support enabled in the kernel) +* ZFS graph driver (requires userspace zfs-utils and a corresponding kernel module) +* Libseccomp to allow running seccomp profiles with containers + +## Daemon Init Script + +Docker expects to run as a daemon at machine startup. Your package will need to +include a script for your distro's process supervisor of choice. Be sure to +check out the "contrib/init" folder in case a suitable init script already +exists (and if one does not, contact Tianon about whether it might be +appropriate for your distro's init script to live there too!). + +In general, Docker should be run as root, similar to the following: + +```bash +dockerd +``` + +Generally, a `DOCKER_OPTS` variable of some kind is available for adding more +flags (such as changing the graph driver to use BTRFS, switching the location of +"/var/lib/docker", etc). + +## Communicate + +As a final note, please do feel free to reach out to Tianon at any time for +pretty much anything. He really does love hearing from our packagers and wants +to make sure we're not being a "hostile upstream". As should be a given, we +appreciate the work our packagers do to make sure we have broad distribution! diff --git a/vendor/github.com/moby/moby/project/PATCH-RELEASES.md b/vendor/github.com/moby/moby/project/PATCH-RELEASES.md new file mode 100644 index 000000000..548db9ab4 --- /dev/null +++ b/vendor/github.com/moby/moby/project/PATCH-RELEASES.md @@ -0,0 +1,68 @@ +# Docker patch (bugfix) release process + +Patch releases (the 'Z' in vX.Y.Z) are intended to fix major issues in a +release. Docker open source projects follow these procedures when creating a +patch release; + +After each release (both "major" (vX.Y.0) and "patch" releases (vX.Y.Z)), a +patch release milestone (vX.Y.Z + 1) is created. + +The creation of a patch release milestone is no obligation to actually +*create* a patch release. The purpose of these milestones is to collect +issues and pull requests that can *justify* a patch release; + +- Any maintainer is allowed to add issues and PR's to the milestone, when + doing so, preferably leave a comment on the issue or PR explaining *why* + you think it should be considered for inclusion in a patch release. +- Issues introduced in version vX.Y.0 get added to milestone X.Y.Z+1 +- Only *regressions* should be added. Issues *discovered* in version vX.Y.0, + but already present in version vX.Y-1.Z should not be added, unless + critical. +- Patch releases can *only* contain bug-fixes. New features should + *never* be added to a patch release. + +The release captain of the "major" (X.Y.0) release, is also responsible for +patch releases. The release captain, together with another maintainer, will +review issues and PRs on the milestone, and assigns `priority/`labels. These +review sessions take place on a weekly basis, more frequent if needed: + +- A P0 priority is assigned to critical issues. A maintainer *must* be + assigned to these issues. Maintainers should strive to fix a P0 within a week. +- A P1 priority is assigned to major issues, but not critical. A maintainer + *must* be assigned to these issues. +- P2 and P3 priorities are assigned to other issues. A maintainer can be + assigned. +- Non-critical issues and PR's can be removed from the milestone. Minor + changes, such as typo-fixes or omissions in the documentation can be + considered for inclusion in a patch release. + +## Deciding if a patch release should be done + +- Only a P0 can justify to proceed with the patch release. +- P1, P2, and P3 issues/PR's should not influence the decision, and + should be moved to the X.Y.Z+1 milestone, or removed from the + milestone. + +> **Note**: If the next "major" release is imminent, the release captain +> can decide to cancel a patch release, and include the patches in the +> upcoming major release. + +> **Note**: Security releases are also "patch releases", but follow +> a different procedure. Security releases are developed in a private +> repository, released and tested under embargo before they become +> publicly available. + +## Deciding on the content of a patch release + +When the criteria for moving forward with a patch release are met, the release +manager will decide on the exact content of the release. + +- Fixes to all P0 issues *must* be included in the release. +- Fixes to *some* P1, P2, and P3 issues *may* be included as part of the patch + release depending on the severity of the issue and the risk associated with + the patch. + +Any code delivered as part of a patch release should make life easier for a +significant amount of users with zero chance of degrading anybody's experience. +A good rule of thumb for that is to limit cherry-picking to small patches, which +fix well-understood issues, and which come with verifiable tests. diff --git a/vendor/github.com/moby/moby/project/PRINCIPLES.md b/vendor/github.com/moby/moby/project/PRINCIPLES.md new file mode 100644 index 000000000..53f03018e --- /dev/null +++ b/vendor/github.com/moby/moby/project/PRINCIPLES.md @@ -0,0 +1,19 @@ +# Docker principles + +In the design and development of Docker we try to follow these principles: + +(Work in progress) + +* Don't try to replace every tool. Instead, be an ingredient to improve them. +* Less code is better. +* Fewer components are better. Do you really need to add one more class? +* 50 lines of straightforward, readable code is better than 10 lines of magic that nobody can understand. +* Don't do later what you can do now. "//FIXME: refactor" is not acceptable in new code. +* When hesitating between 2 options, choose the one that is easier to reverse. +* No is temporary, Yes is forever. If you're not sure about a new feature, say no. You can change your mind later. +* Containers must be portable to the greatest possible number of machines. Be suspicious of any change which makes machines less interchangeable. +* The less moving parts in a container, the better. +* Don't merge it unless you document it. +* Don't document it unless you can keep it up-to-date. +* Don't merge it unless you test it! +* Everyone's problem is slightly different. Focus on the part that is the same for everyone, and solve that. diff --git a/vendor/github.com/moby/moby/project/README.md b/vendor/github.com/moby/moby/project/README.md new file mode 100644 index 000000000..0eb5e5890 --- /dev/null +++ b/vendor/github.com/moby/moby/project/README.md @@ -0,0 +1,24 @@ +# Hacking on Docker + +The `project/` directory holds information and tools for everyone involved in the process of creating and +distributing Docker, specifically: + +## Guides + +If you're a *contributor* or aspiring contributor, you should read [CONTRIBUTING.md](../CONTRIBUTING.md). + +If you're a *maintainer* or aspiring maintainer, you should read [MAINTAINERS](../MAINTAINERS). + +If you're a *packager* or aspiring packager, you should read [PACKAGERS.md](./PACKAGERS.md). + +If you're a maintainer in charge of a *release*, you should read [RELEASE-CHECKLIST.md](./RELEASE-CHECKLIST.md). + +## Roadmap + +A high-level roadmap is available at [ROADMAP.md](../ROADMAP.md). + + +## Build tools + +[hack/make.sh](../hack/make.sh) is the primary build tool for docker. It is used for compiling the official binary, +running the test suite, and pushing releases. diff --git a/vendor/github.com/moby/moby/project/RELEASE-CHECKLIST.md b/vendor/github.com/moby/moby/project/RELEASE-CHECKLIST.md new file mode 100644 index 000000000..5c73b5826 --- /dev/null +++ b/vendor/github.com/moby/moby/project/RELEASE-CHECKLIST.md @@ -0,0 +1,519 @@ +# Release Checklist +## A maintainer's guide to releasing Docker + +So you're in charge of a Docker release? Cool. Here's what to do. + +If your experience deviates from this document, please document the changes +to keep it up-to-date. + +It is important to note that this document assumes that the git remote in your +repository that corresponds to "https://github.com/docker/docker" is named +"origin". If yours is not (for example, if you've chosen to name it "upstream" +or something similar instead), be sure to adjust the listed snippets for your +local environment accordingly. If you are not sure what your upstream remote is +named, use a command like `git remote -v` to find out. + +If you don't have an upstream remote, you can add one easily using something +like: + +```bash +export GITHUBUSER="YOUR_GITHUB_USER" +git remote add origin https://github.com/docker/docker.git +git remote add $GITHUBUSER git@github.com:$GITHUBUSER/docker.git +``` + +### 1. Pull from master and create a release branch + +All releases version numbers will be of the form: vX.Y.Z where X is the major +version number, Y is the minor version number and Z is the patch release version number. + +#### Major releases + +The release branch name is just vX.Y because it's going to be the basis for all .Z releases. + +```bash +export BASE=vX.Y +export VERSION=vX.Y.Z +git fetch origin +git checkout --track origin/master +git checkout -b release/$BASE +``` + +This new branch is going to be the base for the release. We need to push it to origin so we +can track the cherry-picked changes and the version bump: + +```bash +git push origin release/$BASE +``` + +When you have the major release branch in origin, we need to create the bump fork branch +that we'll push to our fork: + +```bash +git checkout -b bump_$VERSION +``` + +#### Patch releases + +If we have the release branch in origin, we can create the forked bump branch from it directly: + +```bash +export VERSION=vX.Y.Z +export PATCH=vX.Y.Z+1 +git fetch origin +git checkout --track origin/release/$BASE +git checkout -b bump_$PATCH +``` + +We cherry-pick only the commits we want into the bump branch: + +```bash +# get the commits ids we want to cherry-pick +git log +# cherry-pick the commits starting from the oldest one, without including merge commits +git cherry-pick -s -x +git cherry-pick -s -x +... +``` + +### 2. Update the VERSION files and API version on master + +We don't want to stop contributions to master just because we are releasing. +So, after the release branch is up, we bump the VERSION and API version to mark +the start of the "next" release. + +#### 2.1 Update the VERSION files + +Update the content of the `VERSION` file to be the next minor (incrementing Y) +and add the `-dev` suffix. For example, after the release branch for 1.5.0 is +created, the `VERSION` file gets updated to `1.6.0-dev` (as in "1.6.0 in the +making"). + +#### 2.2 Update API version on master + +We don't want API changes to go to the now frozen API version. Create a new +entry in `docs/reference/api/` by copying the latest and bumping the version +number (in both the file's name and content), and submit this in a PR against +master. + +### 3. Update CHANGELOG.md + +You can run this command for reference with git 2.0: + +```bash +git fetch --tags +LAST_VERSION=$(git tag -l --sort=-version:refname "v*" | grep -E 'v[0-9\.]+$' | head -1) +git log --stat $LAST_VERSION..bump_$VERSION +``` + +If you don't have git 2.0 but have a sort command that supports `-V`: +```bash +git fetch --tags +LAST_VERSION=$(git tag -l | grep -E 'v[0-9\.]+$' | sort -rV | head -1) +git log --stat $LAST_VERSION..bump_$VERSION +``` + +If releasing a major version (X or Y increased in vX.Y.Z), simply listing notable user-facing features is sufficient. +```markdown +#### Notable features since +* New docker command to do something useful +* Engine API change (deprecating old version) +* Performance improvements in some usecases +* ... +``` + +For minor releases (only Z increases in vX.Y.Z), provide a list of user-facing changes. +Each change should be listed under a category heading formatted as `#### CATEGORY`. + +`CATEGORY` should describe which part of the project is affected. + Valid categories are: + * Builder + * Documentation + * Hack + * Packaging + * Engine API + * Runtime + * Other (please use this category sparingly) + +Each change should be formatted as `BULLET DESCRIPTION`, given: + +* BULLET: either `-`, `+` or `*`, to indicate a bugfix, new feature or + upgrade, respectively. + +* DESCRIPTION: a concise description of the change that is relevant to the + end-user, using the present tense. Changes should be described in terms + of how they affect the user, for example "Add new feature X which allows Y", + "Fix bug which caused X", "Increase performance of Y". + +EXAMPLES: + +```markdown +## 0.3.6 (1995-12-25) + +#### Builder + ++ 'docker build -t FOO .' applies the tag FOO to the newly built image + +#### Engine API + +- Fix a bug in the optional unix socket transport + +#### Runtime + +* Improve detection of kernel version +``` + +If you need a list of contributors between the last major release and the +current bump branch, use something like: +```bash +git log --format='%aN <%aE>' v0.7.0...bump_v0.8.0 | sort -uf +``` +Obviously, you'll need to adjust version numbers as necessary. If you just need +a count, add a simple `| wc -l`. + +### 4. Change the contents of the VERSION file + +Before the big thing, you'll want to make successive release candidates and get +people to test. The release candidate number `N` should be part of the version: + +```bash +export RC_VERSION=${VERSION}-rcN +echo ${RC_VERSION#v} > VERSION +``` + +### 5. Test the docs + +Make sure that your tree includes documentation for any modified or +new features, syntax or semantic changes. + +To test locally: + +```bash +make docs +``` + +To make a shared test at https://beta-docs.docker.io: + +(You will need the `awsconfig` file added to the `docs/` dir) + +```bash +make AWS_S3_BUCKET=beta-docs.docker.io BUILD_ROOT=yes docs-release +``` + +### 6. Commit and create a pull request to the "release" branch + +```bash +git add VERSION CHANGELOG.md +git commit -m "Bump version to $VERSION" +git push $GITHUBUSER bump_$VERSION +echo "https://github.com/$GITHUBUSER/docker/compare/docker:release/$BASE...$GITHUBUSER:bump_$VERSION?expand=1" +``` + +That last command will give you the proper link to visit to ensure that you +open the PR against the "release" branch instead of accidentally against +"master" (like so many brave souls before you already have). + +### 7. Create a PR to update the AUTHORS file for the release + +Update the AUTHORS file, by running the `hack/generate-authors.sh` on the +release branch. To prevent duplicate entries, you may need to update the +`.mailmap` file accordingly. + +### 8. Build release candidate rpms and debs + +**NOTE**: It will be a lot faster if you pass a different graphdriver with +`DOCKER_GRAPHDRIVER` than `vfs`. + +```bash +docker build -t docker . +docker run \ + --rm -t --privileged \ + -e DOCKER_GRAPHDRIVER=aufs \ + -v $(pwd)/bundles:/go/src/github.com/docker/docker/bundles \ + docker \ + hack/make.sh binary build-deb build-rpm +``` + +### 9. Publish release candidate rpms and debs + +With the rpms and debs you built from the last step you can release them on the +same server, or ideally, move them to a dedicated release box via scp into +another docker/docker directory in bundles. This next step assumes you have +a checkout of the docker source code at the same commit you used to build, with +the artifacts from the last step in `bundles`. + +**NOTE:** If you put a space before the command your `.bash_history` will not +save it. (for the `GPG_PASSPHRASE`). + +```bash +docker build -t docker . +docker run --rm -it --privileged \ + -v /volumes/repos:/volumes/repos \ + -v $(pwd)/bundles:/go/src/github.com/docker/docker/bundles \ + -v $HOME/.gnupg:/root/.gnupg \ + -e DOCKER_RELEASE_DIR=/volumes/repos \ + -e GPG_PASSPHRASE \ + -e KEEPBUNDLE=1 \ + docker \ + hack/make.sh release-deb release-rpm sign-repos generate-index-listing +``` + +### 10. Upload the changed repos to wherever you host + +For example, above we bind mounted `/volumes/repos` as the storage for +`DOCKER_RELEASE_DIR`. In this case `/volumes/repos/apt` can be synced with +a specific s3 bucket for the apt repo and `/volumes/repos/yum` can be synced with +a s3 bucket for the yum repo. + +### 11. Publish release candidate binaries + +To run this you will need access to the release credentials. Get them from the +Core maintainers. + +```bash +docker build -t docker . + +# static binaries are still pushed to s3 +docker run \ + -e AWS_S3_BUCKET=test.docker.com \ + -e AWS_ACCESS_KEY_ID \ + -e AWS_SECRET_ACCESS_KEY \ + -e AWS_DEFAULT_REGION \ + -i -t --privileged \ + docker \ + hack/release.sh +``` + +It will run the test suite, build the binaries and upload to the specified bucket, +so this is a good time to verify that you're running against **test**.docker.com. + +### 12. Purge the cache! + +After the binaries are uploaded to test.docker.com and the packages are on +apt.dockerproject.org and yum.dockerproject.org, make sure +they get tested in both Ubuntu and Debian for any obvious installation +issues or runtime issues. + +If everything looks good, it's time to create a git tag for this candidate: + +```bash +git tag -a $RC_VERSION -m $RC_VERSION bump_$VERSION +git push origin $RC_VERSION +``` + +Announcing on multiple medias is the best way to get some help testing! An easy +way to get some useful links for sharing: + +```bash +echo "Ubuntu/Debian: curl -sSL https://test.docker.com/ | sh" +echo "Linux 64bit binary: https://test.docker.com/builds/Linux/x86_64/docker-${VERSION#v}" +echo "Darwin/OSX 64bit client binary: https://test.docker.com/builds/Darwin/x86_64/docker-${VERSION#v}" +echo "Linux 64bit tgz: https://test.docker.com/builds/Linux/x86_64/docker-${VERSION#v}.tgz" +echo "Windows 64bit client binary: https://test.docker.com/builds/Windows/x86_64/docker-${VERSION#v}.exe" +echo "Windows 32bit client binary: https://test.docker.com/builds/Windows/i386/docker-${VERSION#v}.exe" +``` +### 13. Announce the release candidate + +The release candidate should be announced on: + +- IRC on #docker, #docker-dev, #docker-maintainers +- In a comment on the pull request to notify subscribed people on GitHub +- The [docker-dev](https://groups.google.com/forum/#!forum/docker-dev) group +- The [docker-maintainers](https://groups.google.com/a/dockerproject.org/forum/#!forum/maintainers) group +- (Optional) Any social media that can bring some attention to the release candidate + +### 14. Iterate on successive release candidates + +Spend several days along with the community explicitly investing time and +resources to try and break Docker in every possible way, documenting any +findings pertinent to the release. This time should be spent testing and +finding ways in which the release might have caused various features or upgrade +environments to have issues, not coding. During this time, the release is in +code freeze, and any additional code changes will be pushed out to the next +release. + +It should include various levels of breaking Docker, beyond just using Docker +by the book. + +Any issues found may still remain issues for this release, but they should be +documented and give appropriate warnings. + +During this phase, the `bump_$VERSION` branch will keep evolving as you will +produce new release candidates. The frequency of new candidates is up to the +release manager: use your best judgement taking into account the severity of +reported issues, testers availability, and time to scheduled release date. + +Each time you'll want to produce a new release candidate, you will start by +adding commits to the branch, usually by cherry-picking from master: + +```bash +git cherry-pick -s -x -m0 +``` + +You want your "bump commit" (the one that updates the CHANGELOG and VERSION +files) to remain on top, so you'll have to `git rebase -i` to bring it back up. + +Now that your bump commit is back on top, you will need to update the CHANGELOG +file (if appropriate for this particular release candidate), and update the +VERSION file to increment the RC number: + +```bash +export RC_VERSION=$VERSION-rcN +echo $RC_VERSION > VERSION +``` + +You can now amend your last commit and update the bump branch: + +```bash +git commit --amend +git push -f $GITHUBUSER bump_$VERSION +``` + +Repeat steps 6 to 14 to tag the code, publish new binaries, announce availability, and +get help testing. + +### 15. Finalize the bump branch + +When you're happy with the quality of a release candidate, you can move on and +create the real thing. + +You will first have to amend the "bump commit" to drop the release candidate +suffix in the VERSION file: + +```bash +echo $VERSION > VERSION +git add VERSION +git commit --amend +``` + +You will then repeat step 6 to publish the binaries to test + +### 16. Get 2 other maintainers to validate the pull request + +### 17. Build final rpms and debs + +```bash +docker build -t docker . +docker run \ + --rm -t --privileged \ + -v $(pwd)/bundles:/go/src/github.com/docker/docker/bundles \ + docker \ + hack/make.sh binary build-deb build-rpm +``` + +### 18. Publish final rpms and debs + +With the rpms and debs you built from the last step you can release them on the +same server, or ideally, move them to a dedicated release box via scp into +another docker/docker directory in bundles. This next step assumes you have +a checkout of the docker source code at the same commit you used to build, with +the artifacts from the last step in `bundles`. + +**NOTE:** If you put a space before the command your `.bash_history` will not +save it. (for the `GPG_PASSPHRASE`). + +```bash +docker build -t docker . +docker run --rm -it --privileged \ + -v /volumes/repos:/volumes/repos \ + -v $(pwd)/bundles:/go/src/github.com/docker/docker/bundles \ + -v $HOME/.gnupg:/root/.gnupg \ + -e DOCKER_RELEASE_DIR=/volumes/repos \ + -e GPG_PASSPHRASE \ + -e KEEPBUNDLE=1 \ + docker \ + hack/make.sh release-deb release-rpm sign-repos generate-index-listing +``` + +### 19. Upload the changed repos to wherever you host + +For example, above we bind mounted `/volumes/repos` as the storage for +`DOCKER_RELEASE_DIR`. In this case `/volumes/repos/apt` can be synced with +a specific s3 bucket for the apt repo and `/volumes/repos/yum` can be synced with +a s3 bucket for the yum repo. + +### 20. Publish final binaries + +Once they're tested and reasonably believed to be working, run against +get.docker.com: + +```bash +docker build -t docker . +# static binaries are still pushed to s3 +docker run \ + -e AWS_S3_BUCKET=get.docker.com \ + -e AWS_ACCESS_KEY_ID \ + -e AWS_SECRET_ACCESS_KEY \ + -e AWS_DEFAULT_REGION \ + -i -t --privileged \ + docker \ + hack/release.sh +``` + +### 21. Purge the cache! + +### 22. Apply tag and create release + +It's very important that we don't make the tag until after the official +release is uploaded to get.docker.com! + +```bash +git tag -a $VERSION -m $VERSION bump_$VERSION +git push origin $VERSION +``` + +Once the tag is pushed, go to GitHub and create a [new release](https://github.com/docker/docker/releases/new). +If the tag is for an RC make sure you check `This is a pre-release` at the bottom of the form. + +Select the tag that you just pushed as the version and paste the changelog in the description of the release. +You can see examples in this two links: + +https://github.com/docker/docker/releases/tag/v1.8.0 +https://github.com/docker/docker/releases/tag/v1.8.0-rc3 + +### 23. Go to github to merge the `bump_$VERSION` branch into release + +Don't forget to push that pretty blue button to delete the leftover +branch afterwards! + +### 24. Update the docs branch + +You will need to point the docs branch to the newly created release tag: + +```bash +git checkout origin/docs +git reset --hard origin/$VERSION +git push -f origin docs +``` + +The docs will appear on https://docs.docker.com/ (though there may be cached +versions, so its worth checking http://docs.docker.com.s3-website-us-east-1.amazonaws.com/). +For more information about documentation releases, see `docs/README.md`. + +Note that the new docs will not appear live on the site until the cache (a complex, +distributed CDN system) is flushed. The `make docs-release` command will do this +_if_ the `DISTRIBUTION_ID` is set correctly - this will take at least 15 minutes to run +and you can check its progress with the CDN Cloudfront Chrome addon. + +### 25. Create a new pull request to merge your bump commit back into master + +```bash +git checkout master +git fetch +git reset --hard origin/master +git cherry-pick -s -x $VERSION +git push $GITHUBUSER merge_release_$VERSION +echo "https://github.com/$GITHUBUSER/docker/compare/docker:master...$GITHUBUSER:merge_release_$VERSION?expand=1" +``` + +Again, get two maintainers to validate, then merge, then push that pretty +blue button to delete your branch. + +### 26. Rejoice and Evangelize! + +Congratulations! You're done. + +Go forth and announce the glad tidings of the new release in `#docker`, +`#docker-dev`, on the [dev mailing list](https://groups.google.com/forum/#!forum/docker-dev), +the [announce mailing list](https://groups.google.com/forum/#!forum/docker-announce), +and on Twitter! diff --git a/vendor/github.com/moby/moby/project/RELEASE-PROCESS.md b/vendor/github.com/moby/moby/project/RELEASE-PROCESS.md new file mode 100644 index 000000000..d764e9d00 --- /dev/null +++ b/vendor/github.com/moby/moby/project/RELEASE-PROCESS.md @@ -0,0 +1,78 @@ +# Docker Release Process + +This document describes how the Docker project is released. The Docker project +release process targets the Engine, Compose, Kitematic, Machine, Swarm, +Distribution, Notary and their underlying dependencies (libnetwork, libkv, +etc...). + +Step-by-step technical details of the process are described in +[RELEASE-CHECKLIST.md](https://github.com/docker/docker/blob/master/project/RELEASE-CHECKLIST.md). + +## Release cycle + +The Docker project follows a **time-based release cycle** and ships every nine +weeks. A release cycle starts the same day the previous release cycle ends. + +The first six weeks of the cycle are dedicated to development and review. During +this phase, new features and bugfixes submitted to any of the projects are +**eligible** to be shipped as part of the next release. No changeset submitted +during this period is however guaranteed to be merged for the current release +cycle. + +## The freeze period + +Six weeks after the beginning of the cycle, the codebase is officially frozen +and the codebase reaches a state close to the final release. A Release Candidate +(RC) gets created at the same time. The freeze period is used to find bugs and +get feedback on the state of the RC before the release. + +During this freeze period, while the `master` branch will continue its normal +development cycle, no new features are accepted into the RC. As bugs are fixed +in `master` the release owner will selectively 'cherry-pick' critical ones to +be included into the RC. As the RC changes, new ones are made available for the +community to test and review. + +This period lasts for three weeks. + +## How to maximize chances of being merged before the freeze date? + +First of all, there is never a guarantee that a specific changeset is going to +be merged. However there are different actions to follow to maximize the chances +for a changeset to be merged: + +- The team gives priority to review the PRs aligned with the Roadmap (usually +defined by a ROADMAP.md file at the root of the repository). +- The earlier a PR is opened, the more time the maintainers have to review. For +example, if a PR is opened the day before the freeze date, it’s very unlikely +that it will be merged for the release. +- Constant communication with the maintainers (mailing-list, IRC, Github issues, +etc.) allows to get early feedback on the design before getting into the +implementation, which usually reduces the time needed to discuss a changeset. +- If the code is commented, fully tested and by extension follows every single +rules defined by the [CONTRIBUTING guide]( +https://github.com/docker/docker/blob/master/CONTRIBUTING.md), this will help +the maintainers by speeding up the review. + +## The release + +At the end of the freeze (nine weeks after the start of the cycle), all the +projects are released together. + +``` + Codebase Release +Start of is frozen (end of the +the Cycle (7th week) 9th week) ++---------------------------------------+---------------------+ +| | | +| Development phase | Freeze phase | +| | | ++---------------------------------------+---------------------+ + 6 weeks 3 weeks +<---------------------------------------><--------------------> +``` + +## Exceptions + +If a critical issue is found at the end of the freeze period and more time is +needed to address it, the release will be pushed back. When a release gets +pushed back, the next release cycle gets delayed as well. diff --git a/vendor/github.com/moby/moby/project/REVIEWING.md b/vendor/github.com/moby/moby/project/REVIEWING.md new file mode 100644 index 000000000..51ef4c59d --- /dev/null +++ b/vendor/github.com/moby/moby/project/REVIEWING.md @@ -0,0 +1,246 @@ +# Pull request reviewing process + +## Labels + +Labels are carefully picked to optimize for: + + - Readability: maintainers must immediately know the state of a PR + - Filtering simplicity: different labels represent many different aspects of + the reviewing work, and can even be targeted at different maintainers groups. + +A pull request should only be attributed labels documented in this section: other labels that may +exist on the repository should apply to issues. + +### DCO labels + + * `dco/no`: automatically set by a bot when one of the commits lacks proper signature + +### Status labels + + * `status/0-triage` + * `status/1-design-review` + * `status/2-code-review` + * `status/3-docs-review` + * `status/4-ready-to-merge` + +Special status labels: + + * `status/failing-ci`: indicates that the PR in its current state fails the test suite + * `status/needs-attention`: calls for a collective discussion during a review session + +### Impact labels (apply to merged pull requests) + + * `impact/api` + * `impact/changelog` + * `impact/cli` + * `impact/deprecation` + * `impact/distribution` + * `impact/dockerfile` + +### Process labels (apply to merged pull requests) + +Process labels are to assist in preparing (patch) releases. These labels should only be used for pull requests. + +Label | Use for +------------------------------- | ------------------------------------------------------------------------- +`process/cherry-pick` | PRs that should be cherry-picked in the bump/release branch. These pull-requests must also be assigned to a milestone. +`process/cherry-picked` | PRs that have been cherry-picked. This label is helpful to find PR's that have been added to release-candidates, and to update the change log +`process/docs-cherry-pick` | PRs that should be cherry-picked in the docs branch. Only apply this label for changes that apply to the *current* release, and generic documentation fixes, such as Markdown and spelling fixes. +`process/docs-cherry-picked` | PRs that have been cherry-picked in the docs branch +`process/merge-to-master` | PRs that are opened directly on the bump/release branch, but also need to be merged back to "master" +`process/merged-to-master` | PRs that have been merged back to "master" + + +## Workflow + +An opened pull request can be in 1 of 5 distinct states, for each of which there is a corresponding +label that needs to be applied. + +### Triage - `status/0-triage` + +Maintainers are expected to triage new incoming pull requests by removing the `status/0-triage` +label and adding the correct labels (e.g. `status/1-design-review`) before any other interaction +with the PR. The starting label may potentially skip some steps depending on the kind of pull +request: use your best judgement. + +Maintainers should perform an initial, high-level, overview of the pull request before moving it to +the next appropriate stage: + + - Has DCO + - Contains sufficient justification (e.g., usecases) for the proposed change + - References the Github issue it fixes (if any) in the commit or the first Github comment + +Possible transitions from this state: + + * Close: e.g., unresponsive contributor without DCO + * `status/1-design-review`: general case + * `status/2-code-review`: e.g. trivial bugfix + * `status/3-docs-review`: non-proposal documentation-only change + +### Design review - `status/1-design-review` + +Maintainers are expected to comment on the design of the pull request. Review of documentation is +expected only in the context of design validation, not for stylistic changes. + +Ideally, documentation should reflect the expected behavior of the code. No code review should +take place in this step. + +There are no strict rules on the way a design is validated: we usually aim for a consensus, +although a single maintainer approval is often sufficient for obviously reasonable changes. In +general, strong disagreement expressed by any of the maintainers should not be taken lightly. + +Once design is approved, a maintainer should make sure to remove this label and add the next one. + +Possible transitions from this state: + + * Close: design rejected + * `status/2-code-review`: general case + * `status/3-docs-review`: proposals with only documentation changes + +### Code review - `status/2-code-review` + +Maintainers are expected to review the code and ensure that it is good quality and in accordance +with the documentation in the PR. + +New testcases are expected to be added. Ideally, those testcases should fail when the new code is +absent, and pass when present. The testcases should strive to test as many variants, code paths, as +possible to ensure maximum coverage. + +Changes to code must be reviewed and approved (LGTM'd) by a minimum of two code maintainers. When +the author of a PR is a maintainer, he still needs the approval of two other maintainers. + +Once code is approved according to the rules of the subsystem, a maintainer should make sure to +remove this label and add the next one. If documentation is absent but expected, maintainers should +ask for documentation and move to status `status/3-docs-review` for docs maintainer to follow. + +Possible transitions from this state: + + * Close + * `status/1-design-review`: new design concerns are raised + * `status/3-docs-review`: general case + * `status/4-ready-to-merge`: change not impacting documentation + +### Docs review - `status/3-docs-review` + +Maintainers are expected to review the documentation in its bigger context, ensuring consistency, +completeness, validity, and breadth of coverage across all existing and new documentation. + +They should ask for any editorial change that makes the documentation more consistent and easier to +understand. + +The docker/docker repository only contains _reference documentation_, all +"narrative" documentation is kept in a [unified documentation +repository](https://github.com/docker/docker.github.io). Reviewers must +therefore verify which parts of the documentation need to be updated. Any +contribution that may require changing the narrative should get the +`impact/documentation` label: this is the signal for documentation maintainers +that a change will likely need to happen on the unified documentation +repository. When in doubt, it’s better to add the label and leave it to +documentation maintainers to decide whether it’s ok to skip. In all cases, +leave a comment to explain what documentation changes you think might be needed. + +- If the pull request does not impact the documentation at all, the docs review + step is skipped, and the pull request is ready to merge. +- If the changes in + the pull request require changes to the reference documentation (either + command-line reference, or API reference), those changes must be included as + part of the pull request and will be reviewed now. Keep in mind that the + narrative documentation may contain output examples of commands, so may need + to be updated as well, in which case the `impact/documentation` label must + be applied. +- If the PR has the `impact/documentation` label, merging is delayed until a + documentation maintainer acknowledges that a corresponding documentation PR + (or issue) is opened on the documentation repository. Once a documentation + maintainer acknowledges the change, she/he will move the PR to `status/4-merge` + for a code maintainer to push the green button. + +Changes and additions to docs must be reviewed and approved (LGTM'd) by a minimum of two docs +sub-project maintainers. If the docs change originates with a docs maintainer, only one additional +LGTM is required (since we assume a docs maintainer approves of their own PR). + +Once documentation is approved, a maintainer should make sure to remove this label and +add the next one. + +Possible transitions from this state: + + * Close + * `status/1-design-review`: new design concerns are raised + * `status/2-code-review`: requires more code changes + * `status/4-ready-to-merge`: general case + +### Merge - `status/4-ready-to-merge` + +Maintainers are expected to merge this pull request as soon as possible. They can ask for a rebase +or carry the pull request themselves. + +Possible transitions from this state: + + * Merge: general case + * Close: carry PR + +After merging a pull request, the maintainer should consider applying one or multiple impact labels +to ease future classification: + + * `impact/api` signifies the patch impacted the Engine API + * `impact/changelog` signifies the change is significant enough to make it in the changelog + * `impact/cli` signifies the patch impacted a CLI command + * `impact/dockerfile` signifies the patch impacted the Dockerfile syntax + * `impact/deprecation` signifies the patch participates in deprecating an existing feature + +### Close + +If a pull request is closed it is expected that sufficient justification will be provided. In +particular, if there are alternative ways of achieving the same net result then those needs to be +spelled out. If the pull request is trying to solve a use case that is not one that we (as a +community) want to support then a justification for why should be provided. + +The number of maintainers it takes to decide and close a PR is deliberately left unspecified. We +assume that the group of maintainers is bound by mutual trust and respect, and that opposition from +any single maintainer should be taken into consideration. Similarly, we expect maintainers to +justify their reasoning and to accept debating. + +## Escalation process + +Despite the previously described reviewing process, some PR might not show any progress for various +reasons: + + - No strong opinion for or against the proposed patch + - Debates about the proper way to solve the problem at hand + - Lack of consensus + - ... + +All these will eventually lead to stalled PR, where no apparent progress is made across several +weeks, or even months. + +Maintainers should use their best judgement and apply the `status/needs-attention` label. It must +be used sparingly, as each PR with such label will be discussed by a group of maintainers during a +review session. The goal of that session is to agree on one of the following outcomes for the PR: + + * Close, explaining the rationale for not pursuing further + * Continue, either by pushing the PR further in the workflow, or by deciding to carry the patch + (ideally, a maintainer should be immediately assigned to make sure that the PR keeps continued + attention) + * Escalate to Solomon by formulating a few specific questions on which his answers will allow + maintainers to decide. + +## Milestones + +Typically, every merged pull request get shipped naturally with the next release cut from the +`master` branch (either the next minor or major version, as indicated by the +[`VERSION`](https://github.com/docker/docker/blob/master/VERSION) file at the root of the +repository). However, the time-based nature of the release process provides no guarantee that a +given pull request will get merged in time. In other words, all open pull requests are implicitly +considered part of the next minor or major release milestone, and this won't be materialized on +GitHub. + +A merged pull request must be attached to the milestone corresponding to the release in which it +will be shipped: this is both useful for tracking, and to help the release manager with the +changelog generation. + +An open pull request may exceptionally get attached to a milestone to express a particular intent to +get it merged in time for that release. This may for example be the case for an important feature to +be included in a minor release, or a critical bugfix to be included in a patch release. + +Finally, and as documented by the [`PATCH-RELEASES.md`](PATCH-RELEASES.md) process, the existence of +a milestone is not a guarantee that a release will happen, as some milestones will be created purely +for the purpose of bookkeeping diff --git a/vendor/github.com/moby/moby/project/TOOLS.md b/vendor/github.com/moby/moby/project/TOOLS.md new file mode 100644 index 000000000..dda0fc034 --- /dev/null +++ b/vendor/github.com/moby/moby/project/TOOLS.md @@ -0,0 +1,63 @@ +# Tools + +This page describes the tools we use and infrastructure that is in place for +the Docker project. + +### CI + +The Docker project uses [Jenkins](https://jenkins.dockerproject.org/) as our +continuous integration server. Each Pull Request to Docker is tested by running the +equivalent of `make all`. We chose Jenkins because we can host it ourselves and +we run Docker in Docker to test. + +#### Leeroy + +Leeroy is a Go application which integrates Jenkins with +GitHub pull requests. Leeroy uses +[GitHub hooks](https://developer.github.com/v3/repos/hooks/) +to listen for pull request notifications and starts jobs on your Jenkins +server. Using the Jenkins +[notification plugin](https://wiki.jenkins-ci.org/display/JENKINS/Notification+Plugin), +Leeroy updates the pull request using GitHub's +[status API](https://developer.github.com/v3/repos/statuses/) +with pending, success, failure, or error statuses. + +The leeroy repository is maintained at +[github.com/docker/leeroy](https://github.com/docker/leeroy). + +#### GordonTheTurtle IRC Bot + +The GordonTheTurtle IRC Bot lives in the +[#docker-maintainers](https://botbot.me/freenode/docker-maintainers/) channel +on Freenode. He is built in Go and is based off the project at +[github.com/fabioxgn/go-bot](https://github.com/fabioxgn/go-bot). + +His main command is `!rebuild`, which rebuilds a given Pull Request for a repository. +This command works by integrating with Leroy. He has a few other commands too, such +as `!gif` or `!godoc`, but we are always looking for more fun commands to add. + +The gordon-bot repository is maintained at +[github.com/docker/gordon-bot](https://github.com/docker/gordon-bot) + +### NSQ + +We use [NSQ](https://github.com/bitly/nsq) for various aspects of the project +infrastructure. + +#### Hooks + +The hooks project, +[github.com/crosbymichael/hooks](https://github.com/crosbymichael/hooks), +is a small Go application that manages web hooks from github, hub.docker.com, or +other third party services. + +It can be used for listening to github webhooks & pushing them to a queue, +archiving hooks to rethinkdb for processing, and broadcasting hooks to various +jobs. + +#### Docker Master Binaries + +One of the things queued from the Hooks are the building of the Master +Binaries. This happens on every push to the master branch of Docker. The +repository for this is maintained at +[github.com/docker/docker-bb](https://github.com/docker/docker-bb). diff --git a/vendor/github.com/moby/moby/reference/store.go b/vendor/github.com/moby/moby/reference/store.go new file mode 100644 index 000000000..5b68c437c --- /dev/null +++ b/vendor/github.com/moby/moby/reference/store.go @@ -0,0 +1,343 @@ +package reference + +import ( + "encoding/json" + "errors" + "fmt" + "os" + "path/filepath" + "sort" + "sync" + + "github.com/docker/distribution/reference" + "github.com/docker/docker/pkg/ioutils" + "github.com/opencontainers/go-digest" +) + +var ( + // ErrDoesNotExist is returned if a reference is not found in the + // store. + ErrDoesNotExist = errors.New("reference does not exist") +) + +// An Association is a tuple associating a reference with an image ID. +type Association struct { + Ref reference.Named + ID digest.Digest +} + +// Store provides the set of methods which can operate on a tag store. +type Store interface { + References(id digest.Digest) []reference.Named + ReferencesByName(ref reference.Named) []Association + AddTag(ref reference.Named, id digest.Digest, force bool) error + AddDigest(ref reference.Canonical, id digest.Digest, force bool) error + Delete(ref reference.Named) (bool, error) + Get(ref reference.Named) (digest.Digest, error) +} + +type store struct { + mu sync.RWMutex + // jsonPath is the path to the file where the serialized tag data is + // stored. + jsonPath string + // Repositories is a map of repositories, indexed by name. + Repositories map[string]repository + // referencesByIDCache is a cache of references indexed by ID, to speed + // up References. + referencesByIDCache map[digest.Digest]map[string]reference.Named + // platform is the container target platform for this store (which may be + // different to the host operating system + platform string +} + +// Repository maps tags to digests. The key is a stringified Reference, +// including the repository name. +type repository map[string]digest.Digest + +type lexicalRefs []reference.Named + +func (a lexicalRefs) Len() int { return len(a) } +func (a lexicalRefs) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a lexicalRefs) Less(i, j int) bool { + return a[i].String() < a[j].String() +} + +type lexicalAssociations []Association + +func (a lexicalAssociations) Len() int { return len(a) } +func (a lexicalAssociations) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a lexicalAssociations) Less(i, j int) bool { + return a[i].Ref.String() < a[j].Ref.String() +} + +// NewReferenceStore creates a new reference store, tied to a file path where +// the set of references are serialized in JSON format. +func NewReferenceStore(jsonPath, platform string) (Store, error) { + abspath, err := filepath.Abs(jsonPath) + if err != nil { + return nil, err + } + + store := &store{ + jsonPath: abspath, + Repositories: make(map[string]repository), + referencesByIDCache: make(map[digest.Digest]map[string]reference.Named), + platform: platform, + } + // Load the json file if it exists, otherwise create it. + if err := store.reload(); os.IsNotExist(err) { + if err := store.save(); err != nil { + return nil, err + } + } else if err != nil { + return nil, err + } + return store, nil +} + +// AddTag adds a tag reference to the store. If force is set to true, existing +// references can be overwritten. This only works for tags, not digests. +func (store *store) AddTag(ref reference.Named, id digest.Digest, force bool) error { + if _, isCanonical := ref.(reference.Canonical); isCanonical { + return errors.New("refusing to create a tag with a digest reference") + } + return store.addReference(reference.TagNameOnly(ref), id, force) +} + +// AddDigest adds a digest reference to the store. +func (store *store) AddDigest(ref reference.Canonical, id digest.Digest, force bool) error { + return store.addReference(ref, id, force) +} + +func favorDigest(originalRef reference.Named) (reference.Named, error) { + ref := originalRef + // If the reference includes a digest and a tag, we must store only the + // digest. + canonical, isCanonical := originalRef.(reference.Canonical) + _, isNamedTagged := originalRef.(reference.NamedTagged) + + if isCanonical && isNamedTagged { + trimmed, err := reference.WithDigest(reference.TrimNamed(canonical), canonical.Digest()) + if err != nil { + // should never happen + return originalRef, err + } + ref = trimmed + } + return ref, nil +} + +func (store *store) addReference(ref reference.Named, id digest.Digest, force bool) error { + ref, err := favorDigest(ref) + if err != nil { + return err + } + + refName := reference.FamiliarName(ref) + refStr := reference.FamiliarString(ref) + + if refName == string(digest.Canonical) { + return errors.New("refusing to create an ambiguous tag using digest algorithm as name") + } + + store.mu.Lock() + defer store.mu.Unlock() + + repository, exists := store.Repositories[refName] + if !exists || repository == nil { + repository = make(map[string]digest.Digest) + store.Repositories[refName] = repository + } + + oldID, exists := repository[refStr] + + if exists { + // force only works for tags + if digested, isDigest := ref.(reference.Canonical); isDigest { + return fmt.Errorf("Cannot overwrite digest %s", digested.Digest().String()) + } + + if !force { + return fmt.Errorf("Conflict: Tag %s is already set to image %s, if you want to replace it, please use -f option", refStr, oldID.String()) + } + + if store.referencesByIDCache[oldID] != nil { + delete(store.referencesByIDCache[oldID], refStr) + if len(store.referencesByIDCache[oldID]) == 0 { + delete(store.referencesByIDCache, oldID) + } + } + } + + repository[refStr] = id + if store.referencesByIDCache[id] == nil { + store.referencesByIDCache[id] = make(map[string]reference.Named) + } + store.referencesByIDCache[id][refStr] = ref + + return store.save() +} + +// Delete deletes a reference from the store. It returns true if a deletion +// happened, or false otherwise. +func (store *store) Delete(ref reference.Named) (bool, error) { + ref, err := favorDigest(ref) + if err != nil { + return false, err + } + + ref = reference.TagNameOnly(ref) + + refName := reference.FamiliarName(ref) + refStr := reference.FamiliarString(ref) + + store.mu.Lock() + defer store.mu.Unlock() + + repository, exists := store.Repositories[refName] + if !exists { + return false, ErrDoesNotExist + } + + if id, exists := repository[refStr]; exists { + delete(repository, refStr) + if len(repository) == 0 { + delete(store.Repositories, refName) + } + if store.referencesByIDCache[id] != nil { + delete(store.referencesByIDCache[id], refStr) + if len(store.referencesByIDCache[id]) == 0 { + delete(store.referencesByIDCache, id) + } + } + return true, store.save() + } + + return false, ErrDoesNotExist +} + +// Get retrieves an item from the store by reference +func (store *store) Get(ref reference.Named) (digest.Digest, error) { + if canonical, ok := ref.(reference.Canonical); ok { + // If reference contains both tag and digest, only + // lookup by digest as it takes precedence over + // tag, until tag/digest combos are stored. + if _, ok := ref.(reference.Tagged); ok { + var err error + ref, err = reference.WithDigest(reference.TrimNamed(canonical), canonical.Digest()) + if err != nil { + return "", err + } + } + } else { + ref = reference.TagNameOnly(ref) + } + + refName := reference.FamiliarName(ref) + refStr := reference.FamiliarString(ref) + + store.mu.RLock() + defer store.mu.RUnlock() + + repository, exists := store.Repositories[refName] + if !exists || repository == nil { + return "", ErrDoesNotExist + } + + id, exists := repository[refStr] + if !exists { + return "", ErrDoesNotExist + } + + return id, nil +} + +// References returns a slice of references to the given ID. The slice +// will be nil if there are no references to this ID. +func (store *store) References(id digest.Digest) []reference.Named { + store.mu.RLock() + defer store.mu.RUnlock() + + // Convert the internal map to an array for two reasons: + // 1) We must not return a mutable + // 2) It would be ugly to expose the extraneous map keys to callers. + + var references []reference.Named + for _, ref := range store.referencesByIDCache[id] { + references = append(references, ref) + } + + sort.Sort(lexicalRefs(references)) + + return references +} + +// ReferencesByName returns the references for a given repository name. +// If there are no references known for this repository name, +// ReferencesByName returns nil. +func (store *store) ReferencesByName(ref reference.Named) []Association { + refName := reference.FamiliarName(ref) + + store.mu.RLock() + defer store.mu.RUnlock() + + repository, exists := store.Repositories[refName] + if !exists { + return nil + } + + var associations []Association + for refStr, refID := range repository { + ref, err := reference.ParseNormalizedNamed(refStr) + if err != nil { + // Should never happen + return nil + } + associations = append(associations, + Association{ + Ref: ref, + ID: refID, + }) + } + + sort.Sort(lexicalAssociations(associations)) + + return associations +} + +func (store *store) save() error { + // Store the json + jsonData, err := json.Marshal(store) + if err != nil { + return err + } + return ioutils.AtomicWriteFile(store.jsonPath, jsonData, 0600) +} + +func (store *store) reload() error { + f, err := os.Open(store.jsonPath) + if err != nil { + return err + } + defer f.Close() + if err := json.NewDecoder(f).Decode(&store); err != nil { + return err + } + + for _, repository := range store.Repositories { + for refStr, refID := range repository { + ref, err := reference.ParseNormalizedNamed(refStr) + if err != nil { + // Should never happen + continue + } + if store.referencesByIDCache[refID] == nil { + store.referencesByIDCache[refID] = make(map[string]reference.Named) + } + store.referencesByIDCache[refID][refStr] = ref + } + } + + return nil +} diff --git a/vendor/github.com/moby/moby/reference/store_test.go b/vendor/github.com/moby/moby/reference/store_test.go new file mode 100644 index 000000000..2c796e76f --- /dev/null +++ b/vendor/github.com/moby/moby/reference/store_test.go @@ -0,0 +1,358 @@ +package reference + +import ( + "bytes" + "io/ioutil" + "os" + "path/filepath" + "runtime" + "strings" + "testing" + + "github.com/docker/distribution/reference" + "github.com/opencontainers/go-digest" +) + +var ( + saveLoadTestCases = map[string]digest.Digest{ + "registry:5000/foobar:HEAD": "sha256:470022b8af682154f57a2163d030eb369549549cba00edc69e1b99b46bb924d6", + "registry:5000/foobar:alternate": "sha256:ae300ebc4a4f00693702cfb0a5e0b7bc527b353828dc86ad09fb95c8a681b793", + "registry:5000/foobar:latest": "sha256:6153498b9ac00968d71b66cca4eac37e990b5f9eb50c26877eb8799c8847451b", + "registry:5000/foobar:master": "sha256:6c9917af4c4e05001b346421959d7ea81b6dc9d25718466a37a6add865dfd7fc", + "jess/hollywood:latest": "sha256:ae7a5519a0a55a2d4ef20ddcbd5d0ca0888a1f7ab806acc8e2a27baf46f529fe", + "registry@sha256:367eb40fd0330a7e464777121e39d2f5b3e8e23a1e159342e53ab05c9e4d94e6": "sha256:24126a56805beb9711be5f4590cc2eb55ab8d4a85ebd618eed72bb19fc50631c", + "busybox:latest": "sha256:91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c", + } + + marshalledSaveLoadTestCases = []byte(`{"Repositories":{"busybox":{"busybox:latest":"sha256:91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c"},"jess/hollywood":{"jess/hollywood:latest":"sha256:ae7a5519a0a55a2d4ef20ddcbd5d0ca0888a1f7ab806acc8e2a27baf46f529fe"},"registry":{"registry@sha256:367eb40fd0330a7e464777121e39d2f5b3e8e23a1e159342e53ab05c9e4d94e6":"sha256:24126a56805beb9711be5f4590cc2eb55ab8d4a85ebd618eed72bb19fc50631c"},"registry:5000/foobar":{"registry:5000/foobar:HEAD":"sha256:470022b8af682154f57a2163d030eb369549549cba00edc69e1b99b46bb924d6","registry:5000/foobar:alternate":"sha256:ae300ebc4a4f00693702cfb0a5e0b7bc527b353828dc86ad09fb95c8a681b793","registry:5000/foobar:latest":"sha256:6153498b9ac00968d71b66cca4eac37e990b5f9eb50c26877eb8799c8847451b","registry:5000/foobar:master":"sha256:6c9917af4c4e05001b346421959d7ea81b6dc9d25718466a37a6add865dfd7fc"}}}`) +) + +func TestLoad(t *testing.T) { + jsonFile, err := ioutil.TempFile("", "tag-store-test") + if err != nil { + t.Fatalf("error creating temp file: %v", err) + } + defer os.RemoveAll(jsonFile.Name()) + + // Write canned json to the temp file + _, err = jsonFile.Write(marshalledSaveLoadTestCases) + if err != nil { + t.Fatalf("error writing to temp file: %v", err) + } + jsonFile.Close() + + store, err := NewReferenceStore(jsonFile.Name(), runtime.GOOS) + if err != nil { + t.Fatalf("error creating tag store: %v", err) + } + + for refStr, expectedID := range saveLoadTestCases { + ref, err := reference.ParseNormalizedNamed(refStr) + if err != nil { + t.Fatalf("failed to parse reference: %v", err) + } + id, err := store.Get(ref) + if err != nil { + t.Fatalf("could not find reference %s: %v", refStr, err) + } + if id != expectedID { + t.Fatalf("expected %s - got %s", expectedID, id) + } + } +} + +func TestSave(t *testing.T) { + jsonFile, err := ioutil.TempFile("", "tag-store-test") + if err != nil { + t.Fatalf("error creating temp file: %v", err) + } + _, err = jsonFile.Write([]byte(`{}`)) + jsonFile.Close() + defer os.RemoveAll(jsonFile.Name()) + + store, err := NewReferenceStore(jsonFile.Name(), runtime.GOOS) + if err != nil { + t.Fatalf("error creating tag store: %v", err) + } + + for refStr, id := range saveLoadTestCases { + ref, err := reference.ParseNormalizedNamed(refStr) + if err != nil { + t.Fatalf("failed to parse reference: %v", err) + } + if canonical, ok := ref.(reference.Canonical); ok { + err = store.AddDigest(canonical, id, false) + if err != nil { + t.Fatalf("could not add digest reference %s: %v", refStr, err) + } + } else { + err = store.AddTag(ref, id, false) + if err != nil { + t.Fatalf("could not add reference %s: %v", refStr, err) + } + } + } + + jsonBytes, err := ioutil.ReadFile(jsonFile.Name()) + if err != nil { + t.Fatalf("could not read json file: %v", err) + } + + if !bytes.Equal(jsonBytes, marshalledSaveLoadTestCases) { + t.Fatalf("save output did not match expectations\nexpected:\n%s\ngot:\n%s", marshalledSaveLoadTestCases, jsonBytes) + } +} + +func TestAddDeleteGet(t *testing.T) { + jsonFile, err := ioutil.TempFile("", "tag-store-test") + if err != nil { + t.Fatalf("error creating temp file: %v", err) + } + _, err = jsonFile.Write([]byte(`{}`)) + jsonFile.Close() + defer os.RemoveAll(jsonFile.Name()) + + store, err := NewReferenceStore(jsonFile.Name(), runtime.GOOS) + if err != nil { + t.Fatalf("error creating tag store: %v", err) + } + + testImageID1 := digest.Digest("sha256:9655aef5fd742a1b4e1b7b163aa9f1c76c186304bf39102283d80927c916ca9c") + testImageID2 := digest.Digest("sha256:9655aef5fd742a1b4e1b7b163aa9f1c76c186304bf39102283d80927c916ca9d") + testImageID3 := digest.Digest("sha256:9655aef5fd742a1b4e1b7b163aa9f1c76c186304bf39102283d80927c916ca9e") + + // Try adding a reference with no tag or digest + nameOnly, err := reference.ParseNormalizedNamed("username/repo") + if err != nil { + t.Fatalf("could not parse reference: %v", err) + } + if err = store.AddTag(nameOnly, testImageID1, false); err != nil { + t.Fatalf("error adding to store: %v", err) + } + + // Add a few references + ref1, err := reference.ParseNormalizedNamed("username/repo1:latest") + if err != nil { + t.Fatalf("could not parse reference: %v", err) + } + if err = store.AddTag(ref1, testImageID1, false); err != nil { + t.Fatalf("error adding to store: %v", err) + } + + ref2, err := reference.ParseNormalizedNamed("username/repo1:old") + if err != nil { + t.Fatalf("could not parse reference: %v", err) + } + if err = store.AddTag(ref2, testImageID2, false); err != nil { + t.Fatalf("error adding to store: %v", err) + } + + ref3, err := reference.ParseNormalizedNamed("username/repo1:alias") + if err != nil { + t.Fatalf("could not parse reference: %v", err) + } + if err = store.AddTag(ref3, testImageID1, false); err != nil { + t.Fatalf("error adding to store: %v", err) + } + + ref4, err := reference.ParseNormalizedNamed("username/repo2:latest") + if err != nil { + t.Fatalf("could not parse reference: %v", err) + } + if err = store.AddTag(ref4, testImageID2, false); err != nil { + t.Fatalf("error adding to store: %v", err) + } + + ref5, err := reference.ParseNormalizedNamed("username/repo3@sha256:58153dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c") + if err != nil { + t.Fatalf("could not parse reference: %v", err) + } + if err = store.AddDigest(ref5.(reference.Canonical), testImageID2, false); err != nil { + t.Fatalf("error adding to store: %v", err) + } + + // Attempt to overwrite with force == false + if err = store.AddTag(ref4, testImageID3, false); err == nil || !strings.HasPrefix(err.Error(), "Conflict:") { + t.Fatalf("did not get expected error on overwrite attempt - got %v", err) + } + // Repeat to overwrite with force == true + if err = store.AddTag(ref4, testImageID3, true); err != nil { + t.Fatalf("failed to force tag overwrite: %v", err) + } + + // Check references so far + id, err := store.Get(nameOnly) + if err != nil { + t.Fatalf("Get returned error: %v", err) + } + if id != testImageID1 { + t.Fatalf("id mismatch: got %s instead of %s", id.String(), testImageID1.String()) + } + + id, err = store.Get(ref1) + if err != nil { + t.Fatalf("Get returned error: %v", err) + } + if id != testImageID1 { + t.Fatalf("id mismatch: got %s instead of %s", id.String(), testImageID1.String()) + } + + id, err = store.Get(ref2) + if err != nil { + t.Fatalf("Get returned error: %v", err) + } + if id != testImageID2 { + t.Fatalf("id mismatch: got %s instead of %s", id.String(), testImageID2.String()) + } + + id, err = store.Get(ref3) + if err != nil { + t.Fatalf("Get returned error: %v", err) + } + if id != testImageID1 { + t.Fatalf("id mismatch: got %s instead of %s", id.String(), testImageID1.String()) + } + + id, err = store.Get(ref4) + if err != nil { + t.Fatalf("Get returned error: %v", err) + } + if id != testImageID3 { + t.Fatalf("id mismatch: got %s instead of %s", id.String(), testImageID3.String()) + } + + id, err = store.Get(ref5) + if err != nil { + t.Fatalf("Get returned error: %v", err) + } + if id != testImageID2 { + t.Fatalf("id mismatch: got %s instead of %s", id.String(), testImageID3.String()) + } + + // Get should return ErrDoesNotExist for a nonexistent repo + nonExistRepo, err := reference.ParseNormalizedNamed("username/nonexistrepo:latest") + if err != nil { + t.Fatalf("could not parse reference: %v", err) + } + if _, err = store.Get(nonExistRepo); err != ErrDoesNotExist { + t.Fatal("Expected ErrDoesNotExist from Get") + } + + // Get should return ErrDoesNotExist for a nonexistent tag + nonExistTag, err := reference.ParseNormalizedNamed("username/repo1:nonexist") + if err != nil { + t.Fatalf("could not parse reference: %v", err) + } + if _, err = store.Get(nonExistTag); err != ErrDoesNotExist { + t.Fatal("Expected ErrDoesNotExist from Get") + } + + // Check References + refs := store.References(testImageID1) + if len(refs) != 3 { + t.Fatal("unexpected number of references") + } + // Looking for the references in this order verifies that they are + // returned lexically sorted. + if refs[0].String() != ref3.String() { + t.Fatalf("unexpected reference: %v", refs[0].String()) + } + if refs[1].String() != ref1.String() { + t.Fatalf("unexpected reference: %v", refs[1].String()) + } + if refs[2].String() != nameOnly.String()+":latest" { + t.Fatalf("unexpected reference: %v", refs[2].String()) + } + + // Check ReferencesByName + repoName, err := reference.ParseNormalizedNamed("username/repo1") + if err != nil { + t.Fatalf("could not parse reference: %v", err) + } + associations := store.ReferencesByName(repoName) + if len(associations) != 3 { + t.Fatal("unexpected number of associations") + } + // Looking for the associations in this order verifies that they are + // returned lexically sorted. + if associations[0].Ref.String() != ref3.String() { + t.Fatalf("unexpected reference: %v", associations[0].Ref.String()) + } + if associations[0].ID != testImageID1 { + t.Fatalf("unexpected reference: %v", associations[0].Ref.String()) + } + if associations[1].Ref.String() != ref1.String() { + t.Fatalf("unexpected reference: %v", associations[1].Ref.String()) + } + if associations[1].ID != testImageID1 { + t.Fatalf("unexpected reference: %v", associations[1].Ref.String()) + } + if associations[2].Ref.String() != ref2.String() { + t.Fatalf("unexpected reference: %v", associations[2].Ref.String()) + } + if associations[2].ID != testImageID2 { + t.Fatalf("unexpected reference: %v", associations[2].Ref.String()) + } + + // Delete should return ErrDoesNotExist for a nonexistent repo + if _, err = store.Delete(nonExistRepo); err != ErrDoesNotExist { + t.Fatal("Expected ErrDoesNotExist from Delete") + } + + // Delete should return ErrDoesNotExist for a nonexistent tag + if _, err = store.Delete(nonExistTag); err != ErrDoesNotExist { + t.Fatal("Expected ErrDoesNotExist from Delete") + } + + // Delete a few references + if deleted, err := store.Delete(ref1); err != nil || deleted != true { + t.Fatal("Delete failed") + } + if _, err := store.Get(ref1); err != ErrDoesNotExist { + t.Fatal("Expected ErrDoesNotExist from Get") + } + if deleted, err := store.Delete(ref5); err != nil || deleted != true { + t.Fatal("Delete failed") + } + if _, err := store.Get(ref5); err != ErrDoesNotExist { + t.Fatal("Expected ErrDoesNotExist from Get") + } + if deleted, err := store.Delete(nameOnly); err != nil || deleted != true { + t.Fatal("Delete failed") + } + if _, err := store.Get(nameOnly); err != ErrDoesNotExist { + t.Fatal("Expected ErrDoesNotExist from Get") + } +} + +func TestInvalidTags(t *testing.T) { + tmpDir, err := ioutil.TempDir("", "tag-store-test") + defer os.RemoveAll(tmpDir) + + store, err := NewReferenceStore(filepath.Join(tmpDir, "repositories.json"), runtime.GOOS) + if err != nil { + t.Fatalf("error creating tag store: %v", err) + } + id := digest.Digest("sha256:470022b8af682154f57a2163d030eb369549549cba00edc69e1b99b46bb924d6") + + // sha256 as repo name + ref, err := reference.ParseNormalizedNamed("sha256:abc") + if err != nil { + t.Fatal(err) + } + err = store.AddTag(ref, id, true) + if err == nil { + t.Fatalf("expected setting tag %q to fail", ref) + } + + // setting digest as a tag + ref, err = reference.ParseNormalizedNamed("registry@sha256:367eb40fd0330a7e464777121e39d2f5b3e8e23a1e159342e53ab05c9e4d94e6") + if err != nil { + t.Fatal(err) + } + err = store.AddTag(ref, id, true) + if err == nil { + t.Fatalf("expected setting tag %q to fail", ref) + } + +} diff --git a/vendor/github.com/moby/moby/registry/auth.go b/vendor/github.com/moby/moby/registry/auth.go new file mode 100644 index 000000000..8cadd51ba --- /dev/null +++ b/vendor/github.com/moby/moby/registry/auth.go @@ -0,0 +1,303 @@ +package registry + +import ( + "fmt" + "io/ioutil" + "net/http" + "net/url" + "strings" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/registry/client/auth" + "github.com/docker/distribution/registry/client/auth/challenge" + "github.com/docker/distribution/registry/client/transport" + "github.com/docker/docker/api/types" + registrytypes "github.com/docker/docker/api/types/registry" +) + +const ( + // AuthClientID is used the ClientID used for the token server + AuthClientID = "docker" +) + +// loginV1 tries to register/login to the v1 registry server. +func loginV1(authConfig *types.AuthConfig, apiEndpoint APIEndpoint, userAgent string) (string, string, error) { + registryEndpoint, err := apiEndpoint.ToV1Endpoint(userAgent, nil) + if err != nil { + return "", "", err + } + + serverAddress := registryEndpoint.String() + + logrus.Debugf("attempting v1 login to registry endpoint %s", serverAddress) + + if serverAddress == "" { + return "", "", fmt.Errorf("Server Error: Server Address not set.") + } + + loginAgainstOfficialIndex := serverAddress == IndexServer + + req, err := http.NewRequest("GET", serverAddress+"users/", nil) + if err != nil { + return "", "", err + } + req.SetBasicAuth(authConfig.Username, authConfig.Password) + resp, err := registryEndpoint.client.Do(req) + if err != nil { + // fallback when request could not be completed + return "", "", fallbackError{ + err: err, + } + } + defer resp.Body.Close() + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return "", "", err + } + if resp.StatusCode == http.StatusOK { + return "Login Succeeded", "", nil + } else if resp.StatusCode == http.StatusUnauthorized { + if loginAgainstOfficialIndex { + return "", "", fmt.Errorf("Wrong login/password, please try again. Haven't got a Docker ID? Create one at https://hub.docker.com") + } + return "", "", fmt.Errorf("Wrong login/password, please try again") + } else if resp.StatusCode == http.StatusForbidden { + if loginAgainstOfficialIndex { + return "", "", fmt.Errorf("Login: Account is not active. Please check your e-mail for a confirmation link.") + } + // *TODO: Use registry configuration to determine what this says, if anything? + return "", "", fmt.Errorf("Login: Account is not active. Please see the documentation of the registry %s for instructions how to activate it.", serverAddress) + } else if resp.StatusCode == http.StatusInternalServerError { // Issue #14326 + logrus.Errorf("%s returned status code %d. Response Body :\n%s", req.URL.String(), resp.StatusCode, body) + return "", "", fmt.Errorf("Internal Server Error") + } + return "", "", fmt.Errorf("Login: %s (Code: %d; Headers: %s)", body, + resp.StatusCode, resp.Header) +} + +type loginCredentialStore struct { + authConfig *types.AuthConfig +} + +func (lcs loginCredentialStore) Basic(*url.URL) (string, string) { + return lcs.authConfig.Username, lcs.authConfig.Password +} + +func (lcs loginCredentialStore) RefreshToken(*url.URL, string) string { + return lcs.authConfig.IdentityToken +} + +func (lcs loginCredentialStore) SetRefreshToken(u *url.URL, service, token string) { + lcs.authConfig.IdentityToken = token +} + +type staticCredentialStore struct { + auth *types.AuthConfig +} + +// NewStaticCredentialStore returns a credential store +// which always returns the same credential values. +func NewStaticCredentialStore(auth *types.AuthConfig) auth.CredentialStore { + return staticCredentialStore{ + auth: auth, + } +} + +func (scs staticCredentialStore) Basic(*url.URL) (string, string) { + if scs.auth == nil { + return "", "" + } + return scs.auth.Username, scs.auth.Password +} + +func (scs staticCredentialStore) RefreshToken(*url.URL, string) string { + if scs.auth == nil { + return "" + } + return scs.auth.IdentityToken +} + +func (scs staticCredentialStore) SetRefreshToken(*url.URL, string, string) { +} + +type fallbackError struct { + err error +} + +func (err fallbackError) Error() string { + return err.err.Error() +} + +// loginV2 tries to login to the v2 registry server. The given registry +// endpoint will be pinged to get authorization challenges. These challenges +// will be used to authenticate against the registry to validate credentials. +func loginV2(authConfig *types.AuthConfig, endpoint APIEndpoint, userAgent string) (string, string, error) { + logrus.Debugf("attempting v2 login to registry endpoint %s", strings.TrimRight(endpoint.URL.String(), "/")+"/v2/") + + modifiers := DockerHeaders(userAgent, nil) + authTransport := transport.NewTransport(NewTransport(endpoint.TLSConfig), modifiers...) + + credentialAuthConfig := *authConfig + creds := loginCredentialStore{ + authConfig: &credentialAuthConfig, + } + + loginClient, foundV2, err := v2AuthHTTPClient(endpoint.URL, authTransport, modifiers, creds, nil) + if err != nil { + return "", "", err + } + + endpointStr := strings.TrimRight(endpoint.URL.String(), "/") + "/v2/" + req, err := http.NewRequest("GET", endpointStr, nil) + if err != nil { + if !foundV2 { + err = fallbackError{err: err} + } + return "", "", err + } + + resp, err := loginClient.Do(req) + if err != nil { + if !foundV2 { + err = fallbackError{err: err} + } + return "", "", err + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + // TODO(dmcgowan): Attempt to further interpret result, status code and error code string + err := fmt.Errorf("login attempt to %s failed with status: %d %s", endpointStr, resp.StatusCode, http.StatusText(resp.StatusCode)) + if !foundV2 { + err = fallbackError{err: err} + } + return "", "", err + } + + return "Login Succeeded", credentialAuthConfig.IdentityToken, nil + +} + +func v2AuthHTTPClient(endpoint *url.URL, authTransport http.RoundTripper, modifiers []transport.RequestModifier, creds auth.CredentialStore, scopes []auth.Scope) (*http.Client, bool, error) { + challengeManager, foundV2, err := PingV2Registry(endpoint, authTransport) + if err != nil { + if !foundV2 { + err = fallbackError{err: err} + } + return nil, foundV2, err + } + + tokenHandlerOptions := auth.TokenHandlerOptions{ + Transport: authTransport, + Credentials: creds, + OfflineAccess: true, + ClientID: AuthClientID, + Scopes: scopes, + } + tokenHandler := auth.NewTokenHandlerWithOptions(tokenHandlerOptions) + basicHandler := auth.NewBasicHandler(creds) + modifiers = append(modifiers, auth.NewAuthorizer(challengeManager, tokenHandler, basicHandler)) + tr := transport.NewTransport(authTransport, modifiers...) + + return &http.Client{ + Transport: tr, + Timeout: 15 * time.Second, + }, foundV2, nil + +} + +// ConvertToHostname converts a registry url which has http|https prepended +// to just an hostname. +func ConvertToHostname(url string) string { + stripped := url + if strings.HasPrefix(url, "http://") { + stripped = strings.TrimPrefix(url, "http://") + } else if strings.HasPrefix(url, "https://") { + stripped = strings.TrimPrefix(url, "https://") + } + + nameParts := strings.SplitN(stripped, "/", 2) + + return nameParts[0] +} + +// ResolveAuthConfig matches an auth configuration to a server address or a URL +func ResolveAuthConfig(authConfigs map[string]types.AuthConfig, index *registrytypes.IndexInfo) types.AuthConfig { + configKey := GetAuthConfigKey(index) + // First try the happy case + if c, found := authConfigs[configKey]; found || index.Official { + return c + } + + // Maybe they have a legacy config file, we will iterate the keys converting + // them to the new format and testing + for registry, ac := range authConfigs { + if configKey == ConvertToHostname(registry) { + return ac + } + } + + // When all else fails, return an empty auth config + return types.AuthConfig{} +} + +// PingResponseError is used when the response from a ping +// was received but invalid. +type PingResponseError struct { + Err error +} + +func (err PingResponseError) Error() string { + return err.Err.Error() +} + +// PingV2Registry attempts to ping a v2 registry and on success return a +// challenge manager for the supported authentication types and +// whether v2 was confirmed by the response. If a response is received but +// cannot be interpreted a PingResponseError will be returned. +func PingV2Registry(endpoint *url.URL, transport http.RoundTripper) (challenge.Manager, bool, error) { + var ( + foundV2 = false + v2Version = auth.APIVersion{ + Type: "registry", + Version: "2.0", + } + ) + + pingClient := &http.Client{ + Transport: transport, + Timeout: 15 * time.Second, + } + endpointStr := strings.TrimRight(endpoint.String(), "/") + "/v2/" + req, err := http.NewRequest("GET", endpointStr, nil) + if err != nil { + return nil, false, err + } + resp, err := pingClient.Do(req) + if err != nil { + return nil, false, err + } + defer resp.Body.Close() + + versions := auth.APIVersions(resp, DefaultRegistryVersionHeader) + for _, pingVersion := range versions { + if pingVersion == v2Version { + // The version header indicates we're definitely + // talking to a v2 registry. So don't allow future + // fallbacks to the v1 protocol. + + foundV2 = true + break + } + } + + challengeManager := challenge.NewSimpleManager() + if err := challengeManager.AddResponse(resp); err != nil { + return nil, foundV2, PingResponseError{ + Err: err, + } + } + + return challengeManager, foundV2, nil +} diff --git a/vendor/github.com/moby/moby/registry/auth_test.go b/vendor/github.com/moby/moby/registry/auth_test.go new file mode 100644 index 000000000..9ab71aa4f --- /dev/null +++ b/vendor/github.com/moby/moby/registry/auth_test.go @@ -0,0 +1,124 @@ +// +build !solaris + +// TODO: Support Solaris + +package registry + +import ( + "testing" + + "github.com/docker/docker/api/types" + registrytypes "github.com/docker/docker/api/types/registry" +) + +func buildAuthConfigs() map[string]types.AuthConfig { + authConfigs := map[string]types.AuthConfig{} + + for _, registry := range []string{"testIndex", IndexServer} { + authConfigs[registry] = types.AuthConfig{ + Username: "docker-user", + Password: "docker-pass", + } + } + + return authConfigs +} + +func TestSameAuthDataPostSave(t *testing.T) { + authConfigs := buildAuthConfigs() + authConfig := authConfigs["testIndex"] + if authConfig.Username != "docker-user" { + t.Fail() + } + if authConfig.Password != "docker-pass" { + t.Fail() + } + if authConfig.Auth != "" { + t.Fail() + } +} + +func TestResolveAuthConfigIndexServer(t *testing.T) { + authConfigs := buildAuthConfigs() + indexConfig := authConfigs[IndexServer] + + officialIndex := ®istrytypes.IndexInfo{ + Official: true, + } + privateIndex := ®istrytypes.IndexInfo{ + Official: false, + } + + resolved := ResolveAuthConfig(authConfigs, officialIndex) + assertEqual(t, resolved, indexConfig, "Expected ResolveAuthConfig to return IndexServer") + + resolved = ResolveAuthConfig(authConfigs, privateIndex) + assertNotEqual(t, resolved, indexConfig, "Expected ResolveAuthConfig to not return IndexServer") +} + +func TestResolveAuthConfigFullURL(t *testing.T) { + authConfigs := buildAuthConfigs() + + registryAuth := types.AuthConfig{ + Username: "foo-user", + Password: "foo-pass", + } + localAuth := types.AuthConfig{ + Username: "bar-user", + Password: "bar-pass", + } + officialAuth := types.AuthConfig{ + Username: "baz-user", + Password: "baz-pass", + } + authConfigs[IndexServer] = officialAuth + + expectedAuths := map[string]types.AuthConfig{ + "registry.example.com": registryAuth, + "localhost:8000": localAuth, + "registry.com": localAuth, + } + + validRegistries := map[string][]string{ + "registry.example.com": { + "https://registry.example.com/v1/", + "http://registry.example.com/v1/", + "registry.example.com", + "registry.example.com/v1/", + }, + "localhost:8000": { + "https://localhost:8000/v1/", + "http://localhost:8000/v1/", + "localhost:8000", + "localhost:8000/v1/", + }, + "registry.com": { + "https://registry.com/v1/", + "http://registry.com/v1/", + "registry.com", + "registry.com/v1/", + }, + } + + for configKey, registries := range validRegistries { + configured, ok := expectedAuths[configKey] + if !ok { + t.Fail() + } + index := ®istrytypes.IndexInfo{ + Name: configKey, + } + for _, registry := range registries { + authConfigs[registry] = configured + resolved := ResolveAuthConfig(authConfigs, index) + if resolved.Username != configured.Username || resolved.Password != configured.Password { + t.Errorf("%s -> %v != %v\n", registry, resolved, configured) + } + delete(authConfigs, registry) + resolved = ResolveAuthConfig(authConfigs, index) + if resolved.Username == configured.Username || resolved.Password == configured.Password { + t.Errorf("%s -> %v == %v\n", registry, resolved, configured) + } + } + } +} diff --git a/vendor/github.com/moby/moby/registry/config.go b/vendor/github.com/moby/moby/registry/config.go new file mode 100644 index 000000000..182599e38 --- /dev/null +++ b/vendor/github.com/moby/moby/registry/config.go @@ -0,0 +1,456 @@ +package registry + +import ( + "fmt" + "net" + "net/url" + "regexp" + "strconv" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/reference" + registrytypes "github.com/docker/docker/api/types/registry" + "github.com/docker/docker/opts" + "github.com/pkg/errors" + "github.com/spf13/pflag" +) + +// ServiceOptions holds command line options. +type ServiceOptions struct { + AllowNondistributableArtifacts []string `json:"allow-nondistributable-artifacts,omitempty"` + Mirrors []string `json:"registry-mirrors,omitempty"` + InsecureRegistries []string `json:"insecure-registries,omitempty"` + + // V2Only controls access to legacy registries. If it is set to true via the + // command line flag the daemon will not attempt to contact v1 legacy registries + V2Only bool `json:"disable-legacy-registry,omitempty"` +} + +// serviceConfig holds daemon configuration for the registry service. +type serviceConfig struct { + registrytypes.ServiceConfig + V2Only bool +} + +var ( + // DefaultNamespace is the default namespace + DefaultNamespace = "docker.io" + // DefaultRegistryVersionHeader is the name of the default HTTP header + // that carries Registry version info + DefaultRegistryVersionHeader = "Docker-Distribution-Api-Version" + + // IndexHostname is the index hostname + IndexHostname = "index.docker.io" + // IndexServer is used for user auth and image search + IndexServer = "https://" + IndexHostname + "/v1/" + // IndexName is the name of the index + IndexName = "docker.io" + + // NotaryServer is the endpoint serving the Notary trust server + NotaryServer = "https://notary.docker.io" + + // DefaultV2Registry is the URI of the default v2 registry + DefaultV2Registry = &url.URL{ + Scheme: "https", + Host: "registry-1.docker.io", + } +) + +var ( + // ErrInvalidRepositoryName is an error returned if the repository name did + // not have the correct form + ErrInvalidRepositoryName = errors.New("Invalid repository name (ex: \"registry.domain.tld/myrepos\")") + + emptyServiceConfig = newServiceConfig(ServiceOptions{}) +) + +var ( + validHostPortRegex = regexp.MustCompile(`^` + reference.DomainRegexp.String() + `$`) +) + +// for mocking in unit tests +var lookupIP = net.LookupIP + +// InstallCliFlags adds command-line options to the top-level flag parser for +// the current process. +func (options *ServiceOptions) InstallCliFlags(flags *pflag.FlagSet) { + ana := opts.NewNamedListOptsRef("allow-nondistributable-artifacts", &options.AllowNondistributableArtifacts, ValidateIndexName) + mirrors := opts.NewNamedListOptsRef("registry-mirrors", &options.Mirrors, ValidateMirror) + insecureRegistries := opts.NewNamedListOptsRef("insecure-registries", &options.InsecureRegistries, ValidateIndexName) + + flags.Var(ana, "allow-nondistributable-artifacts", "Allow push of nondistributable artifacts to registry") + flags.Var(mirrors, "registry-mirror", "Preferred Docker registry mirror") + flags.Var(insecureRegistries, "insecure-registry", "Enable insecure registry communication") + + options.installCliPlatformFlags(flags) +} + +// newServiceConfig returns a new instance of ServiceConfig +func newServiceConfig(options ServiceOptions) *serviceConfig { + config := &serviceConfig{ + ServiceConfig: registrytypes.ServiceConfig{ + InsecureRegistryCIDRs: make([]*registrytypes.NetIPNet, 0), + IndexConfigs: make(map[string]*registrytypes.IndexInfo, 0), + // Hack: Bypass setting the mirrors to IndexConfigs since they are going away + // and Mirrors are only for the official registry anyways. + }, + V2Only: options.V2Only, + } + + config.LoadAllowNondistributableArtifacts(options.AllowNondistributableArtifacts) + config.LoadMirrors(options.Mirrors) + config.LoadInsecureRegistries(options.InsecureRegistries) + + return config +} + +// LoadAllowNondistributableArtifacts loads allow-nondistributable-artifacts registries into config. +func (config *serviceConfig) LoadAllowNondistributableArtifacts(registries []string) error { + cidrs := map[string]*registrytypes.NetIPNet{} + hostnames := map[string]bool{} + + for _, r := range registries { + if _, err := ValidateIndexName(r); err != nil { + return err + } + if validateNoScheme(r) != nil { + return fmt.Errorf("allow-nondistributable-artifacts registry %s should not contain '://'", r) + } + + if _, ipnet, err := net.ParseCIDR(r); err == nil { + // Valid CIDR. + cidrs[ipnet.String()] = (*registrytypes.NetIPNet)(ipnet) + } else if err := validateHostPort(r); err == nil { + // Must be `host:port` if not CIDR. + hostnames[r] = true + } else { + return fmt.Errorf("allow-nondistributable-artifacts registry %s is not valid: %v", r, err) + } + } + + config.AllowNondistributableArtifactsCIDRs = make([]*(registrytypes.NetIPNet), 0) + for _, c := range cidrs { + config.AllowNondistributableArtifactsCIDRs = append(config.AllowNondistributableArtifactsCIDRs, c) + } + + config.AllowNondistributableArtifactsHostnames = make([]string, 0) + for h := range hostnames { + config.AllowNondistributableArtifactsHostnames = append(config.AllowNondistributableArtifactsHostnames, h) + } + + return nil +} + +// LoadMirrors loads mirrors to config, after removing duplicates. +// Returns an error if mirrors contains an invalid mirror. +func (config *serviceConfig) LoadMirrors(mirrors []string) error { + mMap := map[string]struct{}{} + unique := []string{} + + for _, mirror := range mirrors { + m, err := ValidateMirror(mirror) + if err != nil { + return err + } + if _, exist := mMap[m]; !exist { + mMap[m] = struct{}{} + unique = append(unique, m) + } + } + + config.Mirrors = unique + + // Configure public registry since mirrors may have changed. + config.IndexConfigs[IndexName] = ®istrytypes.IndexInfo{ + Name: IndexName, + Mirrors: config.Mirrors, + Secure: true, + Official: true, + } + + return nil +} + +// LoadInsecureRegistries loads insecure registries to config +func (config *serviceConfig) LoadInsecureRegistries(registries []string) error { + // Localhost is by default considered as an insecure registry + // This is a stop-gap for people who are running a private registry on localhost (especially on Boot2docker). + // + // TODO: should we deprecate this once it is easier for people to set up a TLS registry or change + // daemon flags on boot2docker? + registries = append(registries, "127.0.0.0/8") + + // Store original InsecureRegistryCIDRs and IndexConfigs + // Clean InsecureRegistryCIDRs and IndexConfigs in config, as passed registries has all insecure registry info. + originalCIDRs := config.ServiceConfig.InsecureRegistryCIDRs + originalIndexInfos := config.ServiceConfig.IndexConfigs + + config.ServiceConfig.InsecureRegistryCIDRs = make([]*registrytypes.NetIPNet, 0) + config.ServiceConfig.IndexConfigs = make(map[string]*registrytypes.IndexInfo, 0) + +skip: + for _, r := range registries { + // validate insecure registry + if _, err := ValidateIndexName(r); err != nil { + // before returning err, roll back to original data + config.ServiceConfig.InsecureRegistryCIDRs = originalCIDRs + config.ServiceConfig.IndexConfigs = originalIndexInfos + return err + } + if strings.HasPrefix(strings.ToLower(r), "http://") { + logrus.Warnf("insecure registry %s should not contain 'http://' and 'http://' has been removed from the insecure registry config", r) + r = r[7:] + } else if strings.HasPrefix(strings.ToLower(r), "https://") { + logrus.Warnf("insecure registry %s should not contain 'https://' and 'https://' has been removed from the insecure registry config", r) + r = r[8:] + } else if validateNoScheme(r) != nil { + // Insecure registry should not contain '://' + // before returning err, roll back to original data + config.ServiceConfig.InsecureRegistryCIDRs = originalCIDRs + config.ServiceConfig.IndexConfigs = originalIndexInfos + return fmt.Errorf("insecure registry %s should not contain '://'", r) + } + // Check if CIDR was passed to --insecure-registry + _, ipnet, err := net.ParseCIDR(r) + if err == nil { + // Valid CIDR. If ipnet is already in config.InsecureRegistryCIDRs, skip. + data := (*registrytypes.NetIPNet)(ipnet) + for _, value := range config.InsecureRegistryCIDRs { + if value.IP.String() == data.IP.String() && value.Mask.String() == data.Mask.String() { + continue skip + } + } + // ipnet is not found, add it in config.InsecureRegistryCIDRs + config.InsecureRegistryCIDRs = append(config.InsecureRegistryCIDRs, data) + + } else { + if err := validateHostPort(r); err != nil { + config.ServiceConfig.InsecureRegistryCIDRs = originalCIDRs + config.ServiceConfig.IndexConfigs = originalIndexInfos + return fmt.Errorf("insecure registry %s is not valid: %v", r, err) + + } + // Assume `host:port` if not CIDR. + config.IndexConfigs[r] = ®istrytypes.IndexInfo{ + Name: r, + Mirrors: make([]string, 0), + Secure: false, + Official: false, + } + } + } + + // Configure public registry. + config.IndexConfigs[IndexName] = ®istrytypes.IndexInfo{ + Name: IndexName, + Mirrors: config.Mirrors, + Secure: true, + Official: true, + } + + return nil +} + +// allowNondistributableArtifacts returns true if the provided hostname is part of the list of registries +// that allow push of nondistributable artifacts. +// +// The list can contain elements with CIDR notation to specify a whole subnet. If the subnet contains an IP +// of the registry specified by hostname, true is returned. +// +// hostname should be a URL.Host (`host:port` or `host`) where the `host` part can be either a domain name +// or an IP address. If it is a domain name, then it will be resolved to IP addresses for matching. If +// resolution fails, CIDR matching is not performed. +func allowNondistributableArtifacts(config *serviceConfig, hostname string) bool { + for _, h := range config.AllowNondistributableArtifactsHostnames { + if h == hostname { + return true + } + } + + return isCIDRMatch(config.AllowNondistributableArtifactsCIDRs, hostname) +} + +// isSecureIndex returns false if the provided indexName is part of the list of insecure registries +// Insecure registries accept HTTP and/or accept HTTPS with certificates from unknown CAs. +// +// The list of insecure registries can contain an element with CIDR notation to specify a whole subnet. +// If the subnet contains one of the IPs of the registry specified by indexName, the latter is considered +// insecure. +// +// indexName should be a URL.Host (`host:port` or `host`) where the `host` part can be either a domain name +// or an IP address. If it is a domain name, then it will be resolved in order to check if the IP is contained +// in a subnet. If the resolving is not successful, isSecureIndex will only try to match hostname to any element +// of insecureRegistries. +func isSecureIndex(config *serviceConfig, indexName string) bool { + // Check for configured index, first. This is needed in case isSecureIndex + // is called from anything besides newIndexInfo, in order to honor per-index configurations. + if index, ok := config.IndexConfigs[indexName]; ok { + return index.Secure + } + + return !isCIDRMatch(config.InsecureRegistryCIDRs, indexName) +} + +// isCIDRMatch returns true if URLHost matches an element of cidrs. URLHost is a URL.Host (`host:port` or `host`) +// where the `host` part can be either a domain name or an IP address. If it is a domain name, then it will be +// resolved to IP addresses for matching. If resolution fails, false is returned. +func isCIDRMatch(cidrs []*registrytypes.NetIPNet, URLHost string) bool { + host, _, err := net.SplitHostPort(URLHost) + if err != nil { + // Assume URLHost is of the form `host` without the port and go on. + host = URLHost + } + + addrs, err := lookupIP(host) + if err != nil { + ip := net.ParseIP(host) + if ip != nil { + addrs = []net.IP{ip} + } + + // if ip == nil, then `host` is neither an IP nor it could be looked up, + // either because the index is unreachable, or because the index is behind an HTTP proxy. + // So, len(addrs) == 0 and we're not aborting. + } + + // Try CIDR notation only if addrs has any elements, i.e. if `host`'s IP could be determined. + for _, addr := range addrs { + for _, ipnet := range cidrs { + // check if the addr falls in the subnet + if (*net.IPNet)(ipnet).Contains(addr) { + return true + } + } + } + + return false +} + +// ValidateMirror validates an HTTP(S) registry mirror +func ValidateMirror(val string) (string, error) { + uri, err := url.Parse(val) + if err != nil { + return "", fmt.Errorf("invalid mirror: %q is not a valid URI", val) + } + if uri.Scheme != "http" && uri.Scheme != "https" { + return "", fmt.Errorf("invalid mirror: unsupported scheme %q in %q", uri.Scheme, uri) + } + if (uri.Path != "" && uri.Path != "/") || uri.RawQuery != "" || uri.Fragment != "" { + return "", fmt.Errorf("invalid mirror: path, query, or fragment at end of the URI %q", uri) + } + if uri.User != nil { + // strip password from output + uri.User = url.UserPassword(uri.User.Username(), "xxxxx") + return "", fmt.Errorf("invalid mirror: username/password not allowed in URI %q", uri) + } + return strings.TrimSuffix(val, "/") + "/", nil +} + +// ValidateIndexName validates an index name. +func ValidateIndexName(val string) (string, error) { + // TODO: upstream this to check to reference package + if val == "index.docker.io" { + val = "docker.io" + } + if strings.HasPrefix(val, "-") || strings.HasSuffix(val, "-") { + return "", fmt.Errorf("Invalid index name (%s). Cannot begin or end with a hyphen.", val) + } + return val, nil +} + +func validateNoScheme(reposName string) error { + if strings.Contains(reposName, "://") { + // It cannot contain a scheme! + return ErrInvalidRepositoryName + } + return nil +} + +func validateHostPort(s string) error { + // Split host and port, and in case s can not be splitted, assume host only + host, port, err := net.SplitHostPort(s) + if err != nil { + host = s + port = "" + } + // If match against the `host:port` pattern fails, + // it might be `IPv6:port`, which will be captured by net.ParseIP(host) + if !validHostPortRegex.MatchString(s) && net.ParseIP(host) == nil { + return fmt.Errorf("invalid host %q", host) + } + if port != "" { + v, err := strconv.Atoi(port) + if err != nil { + return err + } + if v < 0 || v > 65535 { + return fmt.Errorf("invalid port %q", port) + } + } + return nil +} + +// newIndexInfo returns IndexInfo configuration from indexName +func newIndexInfo(config *serviceConfig, indexName string) (*registrytypes.IndexInfo, error) { + var err error + indexName, err = ValidateIndexName(indexName) + if err != nil { + return nil, err + } + + // Return any configured index info, first. + if index, ok := config.IndexConfigs[indexName]; ok { + return index, nil + } + + // Construct a non-configured index info. + index := ®istrytypes.IndexInfo{ + Name: indexName, + Mirrors: make([]string, 0), + Official: false, + } + index.Secure = isSecureIndex(config, indexName) + return index, nil +} + +// GetAuthConfigKey special-cases using the full index address of the official +// index as the AuthConfig key, and uses the (host)name[:port] for private indexes. +func GetAuthConfigKey(index *registrytypes.IndexInfo) string { + if index.Official { + return IndexServer + } + return index.Name +} + +// newRepositoryInfo validates and breaks down a repository name into a RepositoryInfo +func newRepositoryInfo(config *serviceConfig, name reference.Named) (*RepositoryInfo, error) { + index, err := newIndexInfo(config, reference.Domain(name)) + if err != nil { + return nil, err + } + official := !strings.ContainsRune(reference.FamiliarName(name), '/') + + return &RepositoryInfo{ + Name: reference.TrimNamed(name), + Index: index, + Official: official, + }, nil +} + +// ParseRepositoryInfo performs the breakdown of a repository name into a RepositoryInfo, but +// lacks registry configuration. +func ParseRepositoryInfo(reposName reference.Named) (*RepositoryInfo, error) { + return newRepositoryInfo(emptyServiceConfig, reposName) +} + +// ParseSearchIndexInfo will use repository name to get back an indexInfo. +func ParseSearchIndexInfo(reposName string) (*registrytypes.IndexInfo, error) { + indexName, _ := splitReposSearchTerm(reposName) + + indexInfo, err := newIndexInfo(emptyServiceConfig, indexName) + if err != nil { + return nil, err + } + return indexInfo, nil +} diff --git a/vendor/github.com/moby/moby/registry/config_test.go b/vendor/github.com/moby/moby/registry/config_test.go new file mode 100644 index 000000000..8cb7e5a54 --- /dev/null +++ b/vendor/github.com/moby/moby/registry/config_test.go @@ -0,0 +1,260 @@ +package registry + +import ( + "reflect" + "sort" + "strings" + "testing" +) + +func TestLoadAllowNondistributableArtifacts(t *testing.T) { + testCases := []struct { + registries []string + cidrStrs []string + hostnames []string + err string + }{ + { + registries: []string{"1.2.3.0/24"}, + cidrStrs: []string{"1.2.3.0/24"}, + }, + { + registries: []string{"2001:db8::/120"}, + cidrStrs: []string{"2001:db8::/120"}, + }, + { + registries: []string{"127.0.0.1"}, + hostnames: []string{"127.0.0.1"}, + }, + { + registries: []string{"127.0.0.1:8080"}, + hostnames: []string{"127.0.0.1:8080"}, + }, + { + registries: []string{"2001:db8::1"}, + hostnames: []string{"2001:db8::1"}, + }, + { + registries: []string{"[2001:db8::1]:80"}, + hostnames: []string{"[2001:db8::1]:80"}, + }, + { + registries: []string{"[2001:db8::1]:80"}, + hostnames: []string{"[2001:db8::1]:80"}, + }, + { + registries: []string{"1.2.3.0/24", "2001:db8::/120", "127.0.0.1", "127.0.0.1:8080"}, + cidrStrs: []string{"1.2.3.0/24", "2001:db8::/120"}, + hostnames: []string{"127.0.0.1", "127.0.0.1:8080"}, + }, + + { + registries: []string{"http://mytest.com"}, + err: "allow-nondistributable-artifacts registry http://mytest.com should not contain '://'", + }, + { + registries: []string{"https://mytest.com"}, + err: "allow-nondistributable-artifacts registry https://mytest.com should not contain '://'", + }, + { + registries: []string{"HTTP://mytest.com"}, + err: "allow-nondistributable-artifacts registry HTTP://mytest.com should not contain '://'", + }, + { + registries: []string{"svn://mytest.com"}, + err: "allow-nondistributable-artifacts registry svn://mytest.com should not contain '://'", + }, + { + registries: []string{"-invalid-registry"}, + err: "Cannot begin or end with a hyphen", + }, + { + registries: []string{`mytest-.com`}, + err: `allow-nondistributable-artifacts registry mytest-.com is not valid: invalid host "mytest-.com"`, + }, + { + registries: []string{`1200:0000:AB00:1234:0000:2552:7777:1313:8080`}, + err: `allow-nondistributable-artifacts registry 1200:0000:AB00:1234:0000:2552:7777:1313:8080 is not valid: invalid host "1200:0000:AB00:1234:0000:2552:7777:1313:8080"`, + }, + { + registries: []string{`mytest.com:500000`}, + err: `allow-nondistributable-artifacts registry mytest.com:500000 is not valid: invalid port "500000"`, + }, + { + registries: []string{`"mytest.com"`}, + err: `allow-nondistributable-artifacts registry "mytest.com" is not valid: invalid host "\"mytest.com\""`, + }, + { + registries: []string{`"mytest.com:5000"`}, + err: `allow-nondistributable-artifacts registry "mytest.com:5000" is not valid: invalid host "\"mytest.com"`, + }, + } + for _, testCase := range testCases { + config := newServiceConfig(ServiceOptions{}) + err := config.LoadAllowNondistributableArtifacts(testCase.registries) + if testCase.err == "" { + if err != nil { + t.Fatalf("expect no error, got '%s'", err) + } + + cidrStrs := []string{} + for _, c := range config.AllowNondistributableArtifactsCIDRs { + cidrStrs = append(cidrStrs, c.String()) + } + + sort.Strings(testCase.cidrStrs) + sort.Strings(cidrStrs) + if (len(testCase.cidrStrs) > 0 || len(cidrStrs) > 0) && !reflect.DeepEqual(testCase.cidrStrs, cidrStrs) { + t.Fatalf("expect AllowNondistributableArtifactsCIDRs to be '%+v', got '%+v'", testCase.cidrStrs, cidrStrs) + } + + sort.Strings(testCase.hostnames) + sort.Strings(config.AllowNondistributableArtifactsHostnames) + if (len(testCase.hostnames) > 0 || len(config.AllowNondistributableArtifactsHostnames) > 0) && !reflect.DeepEqual(testCase.hostnames, config.AllowNondistributableArtifactsHostnames) { + t.Fatalf("expect AllowNondistributableArtifactsHostnames to be '%+v', got '%+v'", testCase.hostnames, config.AllowNondistributableArtifactsHostnames) + } + } else { + if err == nil { + t.Fatalf("expect error '%s', got no error", testCase.err) + } + if !strings.Contains(err.Error(), testCase.err) { + t.Fatalf("expect error '%s', got '%s'", testCase.err, err) + } + } + } +} + +func TestValidateMirror(t *testing.T) { + valid := []string{ + "http://mirror-1.com", + "http://mirror-1.com/", + "https://mirror-1.com", + "https://mirror-1.com/", + "http://localhost", + "https://localhost", + "http://localhost:5000", + "https://localhost:5000", + "http://127.0.0.1", + "https://127.0.0.1", + "http://127.0.0.1:5000", + "https://127.0.0.1:5000", + } + + invalid := []string{ + "!invalid!://%as%", + "ftp://mirror-1.com", + "http://mirror-1.com/?q=foo", + "http://mirror-1.com/v1/", + "http://mirror-1.com/v1/?q=foo", + "http://mirror-1.com/v1/?q=foo#frag", + "http://mirror-1.com?q=foo", + "https://mirror-1.com#frag", + "https://mirror-1.com/#frag", + "http://foo:bar@mirror-1.com/", + "https://mirror-1.com/v1/", + "https://mirror-1.com/v1/#", + "https://mirror-1.com?q", + } + + for _, address := range valid { + if ret, err := ValidateMirror(address); err != nil || ret == "" { + t.Errorf("ValidateMirror(`"+address+"`) got %s %s", ret, err) + } + } + + for _, address := range invalid { + if ret, err := ValidateMirror(address); err == nil || ret != "" { + t.Errorf("ValidateMirror(`"+address+"`) got %s %s", ret, err) + } + } +} + +func TestLoadInsecureRegistries(t *testing.T) { + testCases := []struct { + registries []string + index string + err string + }{ + { + registries: []string{"127.0.0.1"}, + index: "127.0.0.1", + }, + { + registries: []string{"127.0.0.1:8080"}, + index: "127.0.0.1:8080", + }, + { + registries: []string{"2001:db8::1"}, + index: "2001:db8::1", + }, + { + registries: []string{"[2001:db8::1]:80"}, + index: "[2001:db8::1]:80", + }, + { + registries: []string{"http://mytest.com"}, + index: "mytest.com", + }, + { + registries: []string{"https://mytest.com"}, + index: "mytest.com", + }, + { + registries: []string{"HTTP://mytest.com"}, + index: "mytest.com", + }, + { + registries: []string{"svn://mytest.com"}, + err: "insecure registry svn://mytest.com should not contain '://'", + }, + { + registries: []string{"-invalid-registry"}, + err: "Cannot begin or end with a hyphen", + }, + { + registries: []string{`mytest-.com`}, + err: `insecure registry mytest-.com is not valid: invalid host "mytest-.com"`, + }, + { + registries: []string{`1200:0000:AB00:1234:0000:2552:7777:1313:8080`}, + err: `insecure registry 1200:0000:AB00:1234:0000:2552:7777:1313:8080 is not valid: invalid host "1200:0000:AB00:1234:0000:2552:7777:1313:8080"`, + }, + { + registries: []string{`mytest.com:500000`}, + err: `insecure registry mytest.com:500000 is not valid: invalid port "500000"`, + }, + { + registries: []string{`"mytest.com"`}, + err: `insecure registry "mytest.com" is not valid: invalid host "\"mytest.com\""`, + }, + { + registries: []string{`"mytest.com:5000"`}, + err: `insecure registry "mytest.com:5000" is not valid: invalid host "\"mytest.com"`, + }, + } + for _, testCase := range testCases { + config := newServiceConfig(ServiceOptions{}) + err := config.LoadInsecureRegistries(testCase.registries) + if testCase.err == "" { + if err != nil { + t.Fatalf("expect no error, got '%s'", err) + } + match := false + for index := range config.IndexConfigs { + if index == testCase.index { + match = true + } + } + if !match { + t.Fatalf("expect index configs to contain '%s', got %+v", testCase.index, config.IndexConfigs) + } + } else { + if err == nil { + t.Fatalf("expect error '%s', got no error", testCase.err) + } + if !strings.Contains(err.Error(), testCase.err) { + t.Fatalf("expect error '%s', got '%s'", testCase.err, err) + } + } + } +} diff --git a/vendor/github.com/moby/moby/registry/config_unix.go b/vendor/github.com/moby/moby/registry/config_unix.go new file mode 100644 index 000000000..fdc39a1d6 --- /dev/null +++ b/vendor/github.com/moby/moby/registry/config_unix.go @@ -0,0 +1,25 @@ +// +build !windows + +package registry + +import ( + "github.com/spf13/pflag" +) + +var ( + // CertsDir is the directory where certificates are stored + CertsDir = "/etc/docker/certs.d" +) + +// cleanPath is used to ensure that a directory name is valid on the target +// platform. It will be passed in something *similar* to a URL such as +// https:/index.docker.io/v1. Not all platforms support directory names +// which contain those characters (such as : on Windows) +func cleanPath(s string) string { + return s +} + +// installCliPlatformFlags handles any platform specific flags for the service. +func (options *ServiceOptions) installCliPlatformFlags(flags *pflag.FlagSet) { + flags.BoolVar(&options.V2Only, "disable-legacy-registry", true, "Disable contacting legacy registries") +} diff --git a/vendor/github.com/moby/moby/registry/config_windows.go b/vendor/github.com/moby/moby/registry/config_windows.go new file mode 100644 index 000000000..d1b313dc1 --- /dev/null +++ b/vendor/github.com/moby/moby/registry/config_windows.go @@ -0,0 +1,25 @@ +package registry + +import ( + "os" + "path/filepath" + "strings" + + "github.com/spf13/pflag" +) + +// CertsDir is the directory where certificates are stored +var CertsDir = os.Getenv("programdata") + `\docker\certs.d` + +// cleanPath is used to ensure that a directory name is valid on the target +// platform. It will be passed in something *similar* to a URL such as +// https:\index.docker.io\v1. Not all platforms support directory names +// which contain those characters (such as : on Windows) +func cleanPath(s string) string { + return filepath.FromSlash(strings.Replace(s, ":", "", -1)) +} + +// installCliPlatformFlags handles any platform specific flags for the service. +func (options *ServiceOptions) installCliPlatformFlags(flags *pflag.FlagSet) { + // No Windows specific flags. +} diff --git a/vendor/github.com/moby/moby/registry/endpoint_test.go b/vendor/github.com/moby/moby/registry/endpoint_test.go new file mode 100644 index 000000000..8451d3f67 --- /dev/null +++ b/vendor/github.com/moby/moby/registry/endpoint_test.go @@ -0,0 +1,78 @@ +package registry + +import ( + "net/http" + "net/http/httptest" + "net/url" + "testing" +) + +func TestEndpointParse(t *testing.T) { + testData := []struct { + str string + expected string + }{ + {IndexServer, IndexServer}, + {"http://0.0.0.0:5000/v1/", "http://0.0.0.0:5000/v1/"}, + {"http://0.0.0.0:5000", "http://0.0.0.0:5000/v1/"}, + {"0.0.0.0:5000", "https://0.0.0.0:5000/v1/"}, + {"http://0.0.0.0:5000/nonversion/", "http://0.0.0.0:5000/nonversion/v1/"}, + {"http://0.0.0.0:5000/v0/", "http://0.0.0.0:5000/v0/v1/"}, + } + for _, td := range testData { + e, err := newV1EndpointFromStr(td.str, nil, "", nil) + if err != nil { + t.Errorf("%q: %s", td.str, err) + } + if e == nil { + t.Logf("something's fishy, endpoint for %q is nil", td.str) + continue + } + if e.String() != td.expected { + t.Errorf("expected %q, got %q", td.expected, e.String()) + } + } +} + +func TestEndpointParseInvalid(t *testing.T) { + testData := []string{ + "http://0.0.0.0:5000/v2/", + } + for _, td := range testData { + e, err := newV1EndpointFromStr(td, nil, "", nil) + if err == nil { + t.Errorf("expected error parsing %q: parsed as %q", td, e) + } + } +} + +// Ensure that a registry endpoint that responds with a 401 only is determined +// to be a valid v1 registry endpoint +func TestValidateEndpoint(t *testing.T) { + requireBasicAuthHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Add("WWW-Authenticate", `Basic realm="localhost"`) + w.WriteHeader(http.StatusUnauthorized) + }) + + // Make a test server which should validate as a v1 server. + testServer := httptest.NewServer(requireBasicAuthHandler) + defer testServer.Close() + + testServerURL, err := url.Parse(testServer.URL) + if err != nil { + t.Fatal(err) + } + + testEndpoint := V1Endpoint{ + URL: testServerURL, + client: HTTPClient(NewTransport(nil)), + } + + if err = validateEndpoint(&testEndpoint); err != nil { + t.Fatal(err) + } + + if testEndpoint.URL.Scheme != "http" { + t.Fatalf("expecting to validate endpoint as http, got url %s", testEndpoint.String()) + } +} diff --git a/vendor/github.com/moby/moby/registry/endpoint_v1.go b/vendor/github.com/moby/moby/registry/endpoint_v1.go new file mode 100644 index 000000000..c5ca961dd --- /dev/null +++ b/vendor/github.com/moby/moby/registry/endpoint_v1.go @@ -0,0 +1,198 @@ +package registry + +import ( + "crypto/tls" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "net/url" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/registry/client/transport" + registrytypes "github.com/docker/docker/api/types/registry" +) + +// V1Endpoint stores basic information about a V1 registry endpoint. +type V1Endpoint struct { + client *http.Client + URL *url.URL + IsSecure bool +} + +// NewV1Endpoint parses the given address to return a registry endpoint. +func NewV1Endpoint(index *registrytypes.IndexInfo, userAgent string, metaHeaders http.Header) (*V1Endpoint, error) { + tlsConfig, err := newTLSConfig(index.Name, index.Secure) + if err != nil { + return nil, err + } + + endpoint, err := newV1EndpointFromStr(GetAuthConfigKey(index), tlsConfig, userAgent, metaHeaders) + if err != nil { + return nil, err + } + + if err := validateEndpoint(endpoint); err != nil { + return nil, err + } + + return endpoint, nil +} + +func validateEndpoint(endpoint *V1Endpoint) error { + logrus.Debugf("pinging registry endpoint %s", endpoint) + + // Try HTTPS ping to registry + endpoint.URL.Scheme = "https" + if _, err := endpoint.Ping(); err != nil { + if endpoint.IsSecure { + // If registry is secure and HTTPS failed, show user the error and tell them about `--insecure-registry` + // in case that's what they need. DO NOT accept unknown CA certificates, and DO NOT fallback to HTTP. + return fmt.Errorf("invalid registry endpoint %s: %v. If this private registry supports only HTTP or HTTPS with an unknown CA certificate, please add `--insecure-registry %s` to the daemon's arguments. In the case of HTTPS, if you have access to the registry's CA certificate, no need for the flag; simply place the CA certificate at /etc/docker/certs.d/%s/ca.crt", endpoint, err, endpoint.URL.Host, endpoint.URL.Host) + } + + // If registry is insecure and HTTPS failed, fallback to HTTP. + logrus.Debugf("Error from registry %q marked as insecure: %v. Insecurely falling back to HTTP", endpoint, err) + endpoint.URL.Scheme = "http" + + var err2 error + if _, err2 = endpoint.Ping(); err2 == nil { + return nil + } + + return fmt.Errorf("invalid registry endpoint %q. HTTPS attempt: %v. HTTP attempt: %v", endpoint, err, err2) + } + + return nil +} + +func newV1Endpoint(address url.URL, tlsConfig *tls.Config, userAgent string, metaHeaders http.Header) (*V1Endpoint, error) { + endpoint := &V1Endpoint{ + IsSecure: (tlsConfig == nil || !tlsConfig.InsecureSkipVerify), + URL: new(url.URL), + } + + *endpoint.URL = address + + // TODO(tiborvass): make sure a ConnectTimeout transport is used + tr := NewTransport(tlsConfig) + endpoint.client = HTTPClient(transport.NewTransport(tr, DockerHeaders(userAgent, metaHeaders)...)) + return endpoint, nil +} + +// trimV1Address trims the version off the address and returns the +// trimmed address or an error if there is a non-V1 version. +func trimV1Address(address string) (string, error) { + var ( + chunks []string + apiVersionStr string + ) + + if strings.HasSuffix(address, "/") { + address = address[:len(address)-1] + } + + chunks = strings.Split(address, "/") + apiVersionStr = chunks[len(chunks)-1] + if apiVersionStr == "v1" { + return strings.Join(chunks[:len(chunks)-1], "/"), nil + } + + for k, v := range apiVersions { + if k != APIVersion1 && apiVersionStr == v { + return "", fmt.Errorf("unsupported V1 version path %s", apiVersionStr) + } + } + + return address, nil +} + +func newV1EndpointFromStr(address string, tlsConfig *tls.Config, userAgent string, metaHeaders http.Header) (*V1Endpoint, error) { + if !strings.HasPrefix(address, "http://") && !strings.HasPrefix(address, "https://") { + address = "https://" + address + } + + address, err := trimV1Address(address) + if err != nil { + return nil, err + } + + uri, err := url.Parse(address) + if err != nil { + return nil, err + } + + endpoint, err := newV1Endpoint(*uri, tlsConfig, userAgent, metaHeaders) + if err != nil { + return nil, err + } + + return endpoint, nil +} + +// Get the formatted URL for the root of this registry Endpoint +func (e *V1Endpoint) String() string { + return e.URL.String() + "/v1/" +} + +// Path returns a formatted string for the URL +// of this endpoint with the given path appended. +func (e *V1Endpoint) Path(path string) string { + return e.URL.String() + "/v1/" + path +} + +// Ping returns a PingResult which indicates whether the registry is standalone or not. +func (e *V1Endpoint) Ping() (PingResult, error) { + logrus.Debugf("attempting v1 ping for registry endpoint %s", e) + + if e.String() == IndexServer { + // Skip the check, we know this one is valid + // (and we never want to fallback to http in case of error) + return PingResult{Standalone: false}, nil + } + + req, err := http.NewRequest("GET", e.Path("_ping"), nil) + if err != nil { + return PingResult{Standalone: false}, err + } + + resp, err := e.client.Do(req) + if err != nil { + return PingResult{Standalone: false}, err + } + + defer resp.Body.Close() + + jsonString, err := ioutil.ReadAll(resp.Body) + if err != nil { + return PingResult{Standalone: false}, fmt.Errorf("error while reading the http response: %s", err) + } + + // If the header is absent, we assume true for compatibility with earlier + // versions of the registry. default to true + info := PingResult{ + Standalone: true, + } + if err := json.Unmarshal(jsonString, &info); err != nil { + logrus.Debugf("Error unmarshaling the _ping PingResult: %s", err) + // don't stop here. Just assume sane defaults + } + if hdr := resp.Header.Get("X-Docker-Registry-Version"); hdr != "" { + logrus.Debugf("Registry version header: '%s'", hdr) + info.Version = hdr + } + logrus.Debugf("PingResult.Version: %q", info.Version) + + standalone := resp.Header.Get("X-Docker-Registry-Standalone") + logrus.Debugf("Registry standalone header: '%s'", standalone) + // Accepted values are "true" (case-insensitive) and "1". + if strings.EqualFold(standalone, "true") || standalone == "1" { + info.Standalone = true + } else if len(standalone) > 0 { + // there is a header set, and it is not "true" or "1", so assume fails + info.Standalone = false + } + logrus.Debugf("PingResult.Standalone: %t", info.Standalone) + return info, nil +} diff --git a/vendor/github.com/moby/moby/registry/registry.go b/vendor/github.com/moby/moby/registry/registry.go new file mode 100644 index 000000000..17fa97ce3 --- /dev/null +++ b/vendor/github.com/moby/moby/registry/registry.go @@ -0,0 +1,191 @@ +// Package registry contains client primitives to interact with a remote Docker registry. +package registry + +import ( + "crypto/tls" + "errors" + "fmt" + "io/ioutil" + "net" + "net/http" + "os" + "path/filepath" + "strings" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/registry/client/transport" + "github.com/docker/go-connections/sockets" + "github.com/docker/go-connections/tlsconfig" +) + +var ( + // ErrAlreadyExists is an error returned if an image being pushed + // already exists on the remote side + ErrAlreadyExists = errors.New("Image already exists") +) + +func newTLSConfig(hostname string, isSecure bool) (*tls.Config, error) { + // PreferredServerCipherSuites should have no effect + tlsConfig := tlsconfig.ServerDefault() + + tlsConfig.InsecureSkipVerify = !isSecure + + if isSecure && CertsDir != "" { + hostDir := filepath.Join(CertsDir, cleanPath(hostname)) + logrus.Debugf("hostDir: %s", hostDir) + if err := ReadCertsDirectory(tlsConfig, hostDir); err != nil { + return nil, err + } + } + + return tlsConfig, nil +} + +func hasFile(files []os.FileInfo, name string) bool { + for _, f := range files { + if f.Name() == name { + return true + } + } + return false +} + +// ReadCertsDirectory reads the directory for TLS certificates +// including roots and certificate pairs and updates the +// provided TLS configuration. +func ReadCertsDirectory(tlsConfig *tls.Config, directory string) error { + fs, err := ioutil.ReadDir(directory) + if err != nil && !os.IsNotExist(err) { + return err + } + + for _, f := range fs { + if strings.HasSuffix(f.Name(), ".crt") { + if tlsConfig.RootCAs == nil { + systemPool, err := tlsconfig.SystemCertPool() + if err != nil { + return fmt.Errorf("unable to get system cert pool: %v", err) + } + tlsConfig.RootCAs = systemPool + } + logrus.Debugf("crt: %s", filepath.Join(directory, f.Name())) + data, err := ioutil.ReadFile(filepath.Join(directory, f.Name())) + if err != nil { + return err + } + tlsConfig.RootCAs.AppendCertsFromPEM(data) + } + if strings.HasSuffix(f.Name(), ".cert") { + certName := f.Name() + keyName := certName[:len(certName)-5] + ".key" + logrus.Debugf("cert: %s", filepath.Join(directory, f.Name())) + if !hasFile(fs, keyName) { + return fmt.Errorf("Missing key %s for client certificate %s. Note that CA certificates should use the extension .crt.", keyName, certName) + } + cert, err := tls.LoadX509KeyPair(filepath.Join(directory, certName), filepath.Join(directory, keyName)) + if err != nil { + return err + } + tlsConfig.Certificates = append(tlsConfig.Certificates, cert) + } + if strings.HasSuffix(f.Name(), ".key") { + keyName := f.Name() + certName := keyName[:len(keyName)-4] + ".cert" + logrus.Debugf("key: %s", filepath.Join(directory, f.Name())) + if !hasFile(fs, certName) { + return fmt.Errorf("Missing client certificate %s for key %s", certName, keyName) + } + } + } + + return nil +} + +// DockerHeaders returns request modifiers with a User-Agent and metaHeaders +func DockerHeaders(userAgent string, metaHeaders http.Header) []transport.RequestModifier { + modifiers := []transport.RequestModifier{} + if userAgent != "" { + modifiers = append(modifiers, transport.NewHeaderRequestModifier(http.Header{ + "User-Agent": []string{userAgent}, + })) + } + if metaHeaders != nil { + modifiers = append(modifiers, transport.NewHeaderRequestModifier(metaHeaders)) + } + return modifiers +} + +// HTTPClient returns an HTTP client structure which uses the given transport +// and contains the necessary headers for redirected requests +func HTTPClient(transport http.RoundTripper) *http.Client { + return &http.Client{ + Transport: transport, + CheckRedirect: addRequiredHeadersToRedirectedRequests, + } +} + +func trustedLocation(req *http.Request) bool { + var ( + trusteds = []string{"docker.com", "docker.io"} + hostname = strings.SplitN(req.Host, ":", 2)[0] + ) + if req.URL.Scheme != "https" { + return false + } + + for _, trusted := range trusteds { + if hostname == trusted || strings.HasSuffix(hostname, "."+trusted) { + return true + } + } + return false +} + +// addRequiredHeadersToRedirectedRequests adds the necessary redirection headers +// for redirected requests +func addRequiredHeadersToRedirectedRequests(req *http.Request, via []*http.Request) error { + if via != nil && via[0] != nil { + if trustedLocation(req) && trustedLocation(via[0]) { + req.Header = via[0].Header + return nil + } + for k, v := range via[0].Header { + if k != "Authorization" { + for _, vv := range v { + req.Header.Add(k, vv) + } + } + } + } + return nil +} + +// NewTransport returns a new HTTP transport. If tlsConfig is nil, it uses the +// default TLS configuration. +func NewTransport(tlsConfig *tls.Config) *http.Transport { + if tlsConfig == nil { + tlsConfig = tlsconfig.ServerDefault() + } + + direct := &net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + DualStack: true, + } + + base := &http.Transport{ + Proxy: http.ProxyFromEnvironment, + Dial: direct.Dial, + TLSHandshakeTimeout: 10 * time.Second, + TLSClientConfig: tlsConfig, + // TODO(dmcgowan): Call close idle connections when complete and use keep alive + DisableKeepAlives: true, + } + + proxyDialer, err := sockets.DialerFromEnvironment(direct) + if err == nil { + base.Dial = proxyDialer.Dial + } + return base +} diff --git a/vendor/github.com/moby/moby/registry/registry_mock_test.go b/vendor/github.com/moby/moby/registry/registry_mock_test.go new file mode 100644 index 000000000..58b05d384 --- /dev/null +++ b/vendor/github.com/moby/moby/registry/registry_mock_test.go @@ -0,0 +1,478 @@ +// +build !solaris + +package registry + +import ( + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "net" + "net/http" + "net/http/httptest" + "net/url" + "strconv" + "strings" + "testing" + "time" + + "github.com/docker/distribution/reference" + registrytypes "github.com/docker/docker/api/types/registry" + "github.com/gorilla/mux" + + "github.com/Sirupsen/logrus" +) + +var ( + testHTTPServer *httptest.Server + testHTTPSServer *httptest.Server + testLayers = map[string]map[string]string{ + "77dbf71da1d00e3fbddc480176eac8994025630c6590d11cfc8fe1209c2a1d20": { + "json": `{"id":"77dbf71da1d00e3fbddc480176eac8994025630c6590d11cfc8fe1209c2a1d20", + "comment":"test base image","created":"2013-03-23T12:53:11.10432-07:00", + "container_config":{"Hostname":"","User":"","Memory":0,"MemorySwap":0, + "CpuShares":0,"AttachStdin":false,"AttachStdout":false,"AttachStderr":false, + "Tty":false,"OpenStdin":false,"StdinOnce":false, + "Env":null,"Cmd":null,"Dns":null,"Image":"","Volumes":null, + "VolumesFrom":"","Entrypoint":null},"Size":424242}`, + "checksum_simple": "sha256:1ac330d56e05eef6d438586545ceff7550d3bdcb6b19961f12c5ba714ee1bb37", + "checksum_tarsum": "tarsum+sha256:4409a0685741ca86d38df878ed6f8cbba4c99de5dc73cd71aef04be3bb70be7c", + "ancestry": `["77dbf71da1d00e3fbddc480176eac8994025630c6590d11cfc8fe1209c2a1d20"]`, + "layer": string([]byte{ + 0x1f, 0x8b, 0x08, 0x08, 0x0e, 0xb0, 0xee, 0x51, 0x02, 0x03, 0x6c, 0x61, 0x79, 0x65, + 0x72, 0x2e, 0x74, 0x61, 0x72, 0x00, 0xed, 0xd2, 0x31, 0x0e, 0xc2, 0x30, 0x0c, 0x05, + 0x50, 0xcf, 0x9c, 0xc2, 0x27, 0x48, 0xed, 0x38, 0x4e, 0xce, 0x13, 0x44, 0x2b, 0x66, + 0x62, 0x24, 0x8e, 0x4f, 0xa0, 0x15, 0x63, 0xb6, 0x20, 0x21, 0xfc, 0x96, 0xbf, 0x78, + 0xb0, 0xf5, 0x1d, 0x16, 0x98, 0x8e, 0x88, 0x8a, 0x2a, 0xbe, 0x33, 0xef, 0x49, 0x31, + 0xed, 0x79, 0x40, 0x8e, 0x5c, 0x44, 0x85, 0x88, 0x33, 0x12, 0x73, 0x2c, 0x02, 0xa8, + 0xf0, 0x05, 0xf7, 0x66, 0xf5, 0xd6, 0x57, 0x69, 0xd7, 0x7a, 0x19, 0xcd, 0xf5, 0xb1, + 0x6d, 0x1b, 0x1f, 0xf9, 0xba, 0xe3, 0x93, 0x3f, 0x22, 0x2c, 0xb6, 0x36, 0x0b, 0xf6, + 0xb0, 0xa9, 0xfd, 0xe7, 0x94, 0x46, 0xfd, 0xeb, 0xd1, 0x7f, 0x2c, 0xc4, 0xd2, 0xfb, + 0x97, 0xfe, 0x02, 0x80, 0xe4, 0xfd, 0x4f, 0x77, 0xae, 0x6d, 0x3d, 0x81, 0x73, 0xce, + 0xb9, 0x7f, 0xf3, 0x04, 0x41, 0xc1, 0xab, 0xc6, 0x00, 0x0a, 0x00, 0x00, + }), + }, + "42d718c941f5c532ac049bf0b0ab53f0062f09a03afd4aa4a02c098e46032b9d": { + "json": `{"id":"42d718c941f5c532ac049bf0b0ab53f0062f09a03afd4aa4a02c098e46032b9d", + "parent":"77dbf71da1d00e3fbddc480176eac8994025630c6590d11cfc8fe1209c2a1d20", + "comment":"test base image","created":"2013-03-23T12:55:11.10432-07:00", + "container_config":{"Hostname":"","User":"","Memory":0,"MemorySwap":0, + "CpuShares":0,"AttachStdin":false,"AttachStdout":false,"AttachStderr":false, + "Tty":false,"OpenStdin":false,"StdinOnce":false, + "Env":null,"Cmd":null,"Dns":null,"Image":"","Volumes":null, + "VolumesFrom":"","Entrypoint":null},"Size":424242}`, + "checksum_simple": "sha256:bea7bf2e4bacd479344b737328db47b18880d09096e6674165533aa994f5e9f2", + "checksum_tarsum": "tarsum+sha256:68fdb56fb364f074eec2c9b3f85ca175329c4dcabc4a6a452b7272aa613a07a2", + "ancestry": `["42d718c941f5c532ac049bf0b0ab53f0062f09a03afd4aa4a02c098e46032b9d", + "77dbf71da1d00e3fbddc480176eac8994025630c6590d11cfc8fe1209c2a1d20"]`, + "layer": string([]byte{ + 0x1f, 0x8b, 0x08, 0x08, 0xbd, 0xb3, 0xee, 0x51, 0x02, 0x03, 0x6c, 0x61, 0x79, 0x65, + 0x72, 0x2e, 0x74, 0x61, 0x72, 0x00, 0xed, 0xd1, 0x31, 0x0e, 0xc2, 0x30, 0x0c, 0x05, + 0x50, 0xcf, 0x9c, 0xc2, 0x27, 0x48, 0x9d, 0x38, 0x8e, 0xcf, 0x53, 0x51, 0xaa, 0x56, + 0xea, 0x44, 0x82, 0xc4, 0xf1, 0x09, 0xb4, 0xea, 0x98, 0x2d, 0x48, 0x08, 0xbf, 0xe5, + 0x2f, 0x1e, 0xfc, 0xf5, 0xdd, 0x00, 0xdd, 0x11, 0x91, 0x8a, 0xe0, 0x27, 0xd3, 0x9e, + 0x14, 0xe2, 0x9e, 0x07, 0xf4, 0xc1, 0x2b, 0x0b, 0xfb, 0xa4, 0x82, 0xe4, 0x3d, 0x93, + 0x02, 0x0a, 0x7c, 0xc1, 0x23, 0x97, 0xf1, 0x5e, 0x5f, 0xc9, 0xcb, 0x38, 0xb5, 0xee, + 0xea, 0xd9, 0x3c, 0xb7, 0x4b, 0xbe, 0x7b, 0x9c, 0xf9, 0x23, 0xdc, 0x50, 0x6e, 0xb9, + 0xb8, 0xf2, 0x2c, 0x5d, 0xf7, 0x4f, 0x31, 0xb6, 0xf6, 0x4f, 0xc7, 0xfe, 0x41, 0x55, + 0x63, 0xdd, 0x9f, 0x89, 0x09, 0x90, 0x6c, 0xff, 0xee, 0xae, 0xcb, 0xba, 0x4d, 0x17, + 0x30, 0xc6, 0x18, 0xf3, 0x67, 0x5e, 0xc1, 0xed, 0x21, 0x5d, 0x00, 0x0a, 0x00, 0x00, + }), + }, + } + testRepositories = map[string]map[string]string{ + "foo42/bar": { + "latest": "42d718c941f5c532ac049bf0b0ab53f0062f09a03afd4aa4a02c098e46032b9d", + "test": "42d718c941f5c532ac049bf0b0ab53f0062f09a03afd4aa4a02c098e46032b9d", + }, + } + mockHosts = map[string][]net.IP{ + "": {net.ParseIP("0.0.0.0")}, + "localhost": {net.ParseIP("127.0.0.1"), net.ParseIP("::1")}, + "example.com": {net.ParseIP("42.42.42.42")}, + "other.com": {net.ParseIP("43.43.43.43")}, + } +) + +func init() { + r := mux.NewRouter() + + // /v1/ + r.HandleFunc("/v1/_ping", handlerGetPing).Methods("GET") + r.HandleFunc("/v1/images/{image_id:[^/]+}/{action:json|layer|ancestry}", handlerGetImage).Methods("GET") + r.HandleFunc("/v1/images/{image_id:[^/]+}/{action:json|layer|checksum}", handlerPutImage).Methods("PUT") + r.HandleFunc("/v1/repositories/{repository:.+}/tags", handlerGetDeleteTags).Methods("GET", "DELETE") + r.HandleFunc("/v1/repositories/{repository:.+}/tags/{tag:.+}", handlerGetTag).Methods("GET") + r.HandleFunc("/v1/repositories/{repository:.+}/tags/{tag:.+}", handlerPutTag).Methods("PUT") + r.HandleFunc("/v1/users{null:.*}", handlerUsers).Methods("GET", "POST", "PUT") + r.HandleFunc("/v1/repositories/{repository:.+}{action:/images|/}", handlerImages).Methods("GET", "PUT", "DELETE") + r.HandleFunc("/v1/repositories/{repository:.+}/auth", handlerAuth).Methods("PUT") + r.HandleFunc("/v1/search", handlerSearch).Methods("GET") + + // /v2/ + r.HandleFunc("/v2/version", handlerGetPing).Methods("GET") + + testHTTPServer = httptest.NewServer(handlerAccessLog(r)) + testHTTPSServer = httptest.NewTLSServer(handlerAccessLog(r)) + + // override net.LookupIP + lookupIP = func(host string) ([]net.IP, error) { + if host == "127.0.0.1" { + // I believe in future Go versions this will fail, so let's fix it later + return net.LookupIP(host) + } + for h, addrs := range mockHosts { + if host == h { + return addrs, nil + } + for _, addr := range addrs { + if addr.String() == host { + return []net.IP{addr}, nil + } + } + } + return nil, errors.New("lookup: no such host") + } +} + +func handlerAccessLog(handler http.Handler) http.Handler { + logHandler := func(w http.ResponseWriter, r *http.Request) { + logrus.Debugf("%s \"%s %s\"", r.RemoteAddr, r.Method, r.URL) + handler.ServeHTTP(w, r) + } + return http.HandlerFunc(logHandler) +} + +func makeURL(req string) string { + return testHTTPServer.URL + req +} + +func makeHTTPSURL(req string) string { + return testHTTPSServer.URL + req +} + +func makeIndex(req string) *registrytypes.IndexInfo { + index := ®istrytypes.IndexInfo{ + Name: makeURL(req), + } + return index +} + +func makeHTTPSIndex(req string) *registrytypes.IndexInfo { + index := ®istrytypes.IndexInfo{ + Name: makeHTTPSURL(req), + } + return index +} + +func makePublicIndex() *registrytypes.IndexInfo { + index := ®istrytypes.IndexInfo{ + Name: IndexServer, + Secure: true, + Official: true, + } + return index +} + +func makeServiceConfig(mirrors []string, insecureRegistries []string) *serviceConfig { + options := ServiceOptions{ + Mirrors: mirrors, + InsecureRegistries: insecureRegistries, + } + + return newServiceConfig(options) +} + +func writeHeaders(w http.ResponseWriter) { + h := w.Header() + h.Add("Server", "docker-tests/mock") + h.Add("Expires", "-1") + h.Add("Content-Type", "application/json") + h.Add("Pragma", "no-cache") + h.Add("Cache-Control", "no-cache") + h.Add("X-Docker-Registry-Version", "0.0.0") + h.Add("X-Docker-Registry-Config", "mock") +} + +func writeResponse(w http.ResponseWriter, message interface{}, code int) { + writeHeaders(w) + w.WriteHeader(code) + body, err := json.Marshal(message) + if err != nil { + io.WriteString(w, err.Error()) + return + } + w.Write(body) +} + +func readJSON(r *http.Request, dest interface{}) error { + body, err := ioutil.ReadAll(r.Body) + if err != nil { + return err + } + return json.Unmarshal(body, dest) +} + +func apiError(w http.ResponseWriter, message string, code int) { + body := map[string]string{ + "error": message, + } + writeResponse(w, body, code) +} + +func assertEqual(t *testing.T, a interface{}, b interface{}, message string) { + if a == b { + return + } + if len(message) == 0 { + message = fmt.Sprintf("%v != %v", a, b) + } + t.Fatal(message) +} + +func assertNotEqual(t *testing.T, a interface{}, b interface{}, message string) { + if a != b { + return + } + if len(message) == 0 { + message = fmt.Sprintf("%v == %v", a, b) + } + t.Fatal(message) +} + +// Similar to assertEqual, but does not stop test +func checkEqual(t *testing.T, a interface{}, b interface{}, messagePrefix string) { + if a == b { + return + } + message := fmt.Sprintf("%v != %v", a, b) + if len(messagePrefix) != 0 { + message = messagePrefix + ": " + message + } + t.Error(message) +} + +// Similar to assertNotEqual, but does not stop test +func checkNotEqual(t *testing.T, a interface{}, b interface{}, messagePrefix string) { + if a != b { + return + } + message := fmt.Sprintf("%v == %v", a, b) + if len(messagePrefix) != 0 { + message = messagePrefix + ": " + message + } + t.Error(message) +} + +func requiresAuth(w http.ResponseWriter, r *http.Request) bool { + writeCookie := func() { + value := fmt.Sprintf("FAKE-SESSION-%d", time.Now().UnixNano()) + cookie := &http.Cookie{Name: "session", Value: value, MaxAge: 3600} + http.SetCookie(w, cookie) + //FIXME(sam): this should be sent only on Index routes + value = fmt.Sprintf("FAKE-TOKEN-%d", time.Now().UnixNano()) + w.Header().Add("X-Docker-Token", value) + } + if len(r.Cookies()) > 0 { + writeCookie() + return true + } + if len(r.Header.Get("Authorization")) > 0 { + writeCookie() + return true + } + w.Header().Add("WWW-Authenticate", "token") + apiError(w, "Wrong auth", 401) + return false +} + +func handlerGetPing(w http.ResponseWriter, r *http.Request) { + writeResponse(w, true, 200) +} + +func handlerGetImage(w http.ResponseWriter, r *http.Request) { + if !requiresAuth(w, r) { + return + } + vars := mux.Vars(r) + layer, exists := testLayers[vars["image_id"]] + if !exists { + http.NotFound(w, r) + return + } + writeHeaders(w) + layerSize := len(layer["layer"]) + w.Header().Add("X-Docker-Size", strconv.Itoa(layerSize)) + io.WriteString(w, layer[vars["action"]]) +} + +func handlerPutImage(w http.ResponseWriter, r *http.Request) { + if !requiresAuth(w, r) { + return + } + vars := mux.Vars(r) + imageID := vars["image_id"] + action := vars["action"] + layer, exists := testLayers[imageID] + if !exists { + if action != "json" { + http.NotFound(w, r) + return + } + layer = make(map[string]string) + testLayers[imageID] = layer + } + if checksum := r.Header.Get("X-Docker-Checksum"); checksum != "" { + if checksum != layer["checksum_simple"] && checksum != layer["checksum_tarsum"] { + apiError(w, "Wrong checksum", 400) + return + } + } + body, err := ioutil.ReadAll(r.Body) + if err != nil { + apiError(w, fmt.Sprintf("Error: %s", err), 500) + return + } + layer[action] = string(body) + writeResponse(w, true, 200) +} + +func handlerGetDeleteTags(w http.ResponseWriter, r *http.Request) { + if !requiresAuth(w, r) { + return + } + repositoryName, err := reference.WithName(mux.Vars(r)["repository"]) + if err != nil { + apiError(w, "Could not parse repository", 400) + return + } + tags, exists := testRepositories[repositoryName.String()] + if !exists { + apiError(w, "Repository not found", 404) + return + } + if r.Method == "DELETE" { + delete(testRepositories, repositoryName.String()) + writeResponse(w, true, 200) + return + } + writeResponse(w, tags, 200) +} + +func handlerGetTag(w http.ResponseWriter, r *http.Request) { + if !requiresAuth(w, r) { + return + } + vars := mux.Vars(r) + repositoryName, err := reference.WithName(vars["repository"]) + if err != nil { + apiError(w, "Could not parse repository", 400) + return + } + tagName := vars["tag"] + tags, exists := testRepositories[repositoryName.String()] + if !exists { + apiError(w, "Repository not found", 404) + return + } + tag, exists := tags[tagName] + if !exists { + apiError(w, "Tag not found", 404) + return + } + writeResponse(w, tag, 200) +} + +func handlerPutTag(w http.ResponseWriter, r *http.Request) { + if !requiresAuth(w, r) { + return + } + vars := mux.Vars(r) + repositoryName, err := reference.WithName(vars["repository"]) + if err != nil { + apiError(w, "Could not parse repository", 400) + return + } + tagName := vars["tag"] + tags, exists := testRepositories[repositoryName.String()] + if !exists { + tags = make(map[string]string) + testRepositories[repositoryName.String()] = tags + } + tagValue := "" + readJSON(r, tagValue) + tags[tagName] = tagValue + writeResponse(w, true, 200) +} + +func handlerUsers(w http.ResponseWriter, r *http.Request) { + code := 200 + if r.Method == "POST" { + code = 201 + } else if r.Method == "PUT" { + code = 204 + } + writeResponse(w, "", code) +} + +func handlerImages(w http.ResponseWriter, r *http.Request) { + u, _ := url.Parse(testHTTPServer.URL) + w.Header().Add("X-Docker-Endpoints", fmt.Sprintf("%s , %s ", u.Host, "test.example.com")) + w.Header().Add("X-Docker-Token", fmt.Sprintf("FAKE-SESSION-%d", time.Now().UnixNano())) + if r.Method == "PUT" { + if strings.HasSuffix(r.URL.Path, "images") { + writeResponse(w, "", 204) + return + } + writeResponse(w, "", 200) + return + } + if r.Method == "DELETE" { + writeResponse(w, "", 204) + return + } + images := []map[string]string{} + for imageID, layer := range testLayers { + image := make(map[string]string) + image["id"] = imageID + image["checksum"] = layer["checksum_tarsum"] + image["Tag"] = "latest" + images = append(images, image) + } + writeResponse(w, images, 200) +} + +func handlerAuth(w http.ResponseWriter, r *http.Request) { + writeResponse(w, "OK", 200) +} + +func handlerSearch(w http.ResponseWriter, r *http.Request) { + result := ®istrytypes.SearchResults{ + Query: "fakequery", + NumResults: 1, + Results: []registrytypes.SearchResult{{Name: "fakeimage", StarCount: 42}}, + } + writeResponse(w, result, 200) +} + +func TestPing(t *testing.T) { + res, err := http.Get(makeURL("/v1/_ping")) + if err != nil { + t.Fatal(err) + } + assertEqual(t, res.StatusCode, 200, "") + assertEqual(t, res.Header.Get("X-Docker-Registry-Config"), "mock", + "This is not a Mocked Registry") +} + +/* Uncomment this to test Mocked Registry locally with curl + * WARNING: Don't push on the repos uncommented, it'll block the tests + * +func TestWait(t *testing.T) { + logrus.Println("Test HTTP server ready and waiting:", testHTTPServer.URL) + c := make(chan int) + <-c +} + +//*/ diff --git a/vendor/github.com/moby/moby/registry/registry_test.go b/vendor/github.com/moby/moby/registry/registry_test.go new file mode 100644 index 000000000..d89c46c2c --- /dev/null +++ b/vendor/github.com/moby/moby/registry/registry_test.go @@ -0,0 +1,917 @@ +// +build !solaris + +package registry + +import ( + "fmt" + "net/http" + "net/http/httputil" + "net/url" + "strings" + "testing" + + "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/client/transport" + "github.com/docker/docker/api/types" + registrytypes "github.com/docker/docker/api/types/registry" +) + +var ( + token = []string{"fake-token"} +) + +const ( + imageID = "42d718c941f5c532ac049bf0b0ab53f0062f09a03afd4aa4a02c098e46032b9d" + REPO = "foo42/bar" +) + +func spawnTestRegistrySession(t *testing.T) *Session { + authConfig := &types.AuthConfig{} + endpoint, err := NewV1Endpoint(makeIndex("/v1/"), "", nil) + if err != nil { + t.Fatal(err) + } + userAgent := "docker test client" + var tr http.RoundTripper = debugTransport{NewTransport(nil), t.Log} + tr = transport.NewTransport(AuthTransport(tr, authConfig, false), DockerHeaders(userAgent, nil)...) + client := HTTPClient(tr) + r, err := NewSession(client, authConfig, endpoint) + if err != nil { + t.Fatal(err) + } + // In a normal scenario for the v1 registry, the client should send a `X-Docker-Token: true` + // header while authenticating, in order to retrieve a token that can be later used to + // perform authenticated actions. + // + // The mock v1 registry does not support that, (TODO(tiborvass): support it), instead, + // it will consider authenticated any request with the header `X-Docker-Token: fake-token`. + // + // Because we know that the client's transport is an `*authTransport` we simply cast it, + // in order to set the internal cached token to the fake token, and thus send that fake token + // upon every subsequent requests. + r.client.Transport.(*authTransport).token = token + return r +} + +func TestPingRegistryEndpoint(t *testing.T) { + testPing := func(index *registrytypes.IndexInfo, expectedStandalone bool, assertMessage string) { + ep, err := NewV1Endpoint(index, "", nil) + if err != nil { + t.Fatal(err) + } + regInfo, err := ep.Ping() + if err != nil { + t.Fatal(err) + } + + assertEqual(t, regInfo.Standalone, expectedStandalone, assertMessage) + } + + testPing(makeIndex("/v1/"), true, "Expected standalone to be true (default)") + testPing(makeHTTPSIndex("/v1/"), true, "Expected standalone to be true (default)") + testPing(makePublicIndex(), false, "Expected standalone to be false for public index") +} + +func TestEndpoint(t *testing.T) { + // Simple wrapper to fail test if err != nil + expandEndpoint := func(index *registrytypes.IndexInfo) *V1Endpoint { + endpoint, err := NewV1Endpoint(index, "", nil) + if err != nil { + t.Fatal(err) + } + return endpoint + } + + assertInsecureIndex := func(index *registrytypes.IndexInfo) { + index.Secure = true + _, err := NewV1Endpoint(index, "", nil) + assertNotEqual(t, err, nil, index.Name+": Expected error for insecure index") + assertEqual(t, strings.Contains(err.Error(), "insecure-registry"), true, index.Name+": Expected insecure-registry error for insecure index") + index.Secure = false + } + + assertSecureIndex := func(index *registrytypes.IndexInfo) { + index.Secure = true + _, err := NewV1Endpoint(index, "", nil) + assertNotEqual(t, err, nil, index.Name+": Expected cert error for secure index") + assertEqual(t, strings.Contains(err.Error(), "certificate signed by unknown authority"), true, index.Name+": Expected cert error for secure index") + index.Secure = false + } + + index := ®istrytypes.IndexInfo{} + index.Name = makeURL("/v1/") + endpoint := expandEndpoint(index) + assertEqual(t, endpoint.String(), index.Name, "Expected endpoint to be "+index.Name) + assertInsecureIndex(index) + + index.Name = makeURL("") + endpoint = expandEndpoint(index) + assertEqual(t, endpoint.String(), index.Name+"/v1/", index.Name+": Expected endpoint to be "+index.Name+"/v1/") + assertInsecureIndex(index) + + httpURL := makeURL("") + index.Name = strings.SplitN(httpURL, "://", 2)[1] + endpoint = expandEndpoint(index) + assertEqual(t, endpoint.String(), httpURL+"/v1/", index.Name+": Expected endpoint to be "+httpURL+"/v1/") + assertInsecureIndex(index) + + index.Name = makeHTTPSURL("/v1/") + endpoint = expandEndpoint(index) + assertEqual(t, endpoint.String(), index.Name, "Expected endpoint to be "+index.Name) + assertSecureIndex(index) + + index.Name = makeHTTPSURL("") + endpoint = expandEndpoint(index) + assertEqual(t, endpoint.String(), index.Name+"/v1/", index.Name+": Expected endpoint to be "+index.Name+"/v1/") + assertSecureIndex(index) + + httpsURL := makeHTTPSURL("") + index.Name = strings.SplitN(httpsURL, "://", 2)[1] + endpoint = expandEndpoint(index) + assertEqual(t, endpoint.String(), httpsURL+"/v1/", index.Name+": Expected endpoint to be "+httpsURL+"/v1/") + assertSecureIndex(index) + + badEndpoints := []string{ + "http://127.0.0.1/v1/", + "https://127.0.0.1/v1/", + "http://127.0.0.1", + "https://127.0.0.1", + "127.0.0.1", + } + for _, address := range badEndpoints { + index.Name = address + _, err := NewV1Endpoint(index, "", nil) + checkNotEqual(t, err, nil, "Expected error while expanding bad endpoint") + } +} + +func TestGetRemoteHistory(t *testing.T) { + r := spawnTestRegistrySession(t) + hist, err := r.GetRemoteHistory(imageID, makeURL("/v1/")) + if err != nil { + t.Fatal(err) + } + assertEqual(t, len(hist), 2, "Expected 2 images in history") + assertEqual(t, hist[0], imageID, "Expected "+imageID+"as first ancestry") + assertEqual(t, hist[1], "77dbf71da1d00e3fbddc480176eac8994025630c6590d11cfc8fe1209c2a1d20", + "Unexpected second ancestry") +} + +func TestLookupRemoteImage(t *testing.T) { + r := spawnTestRegistrySession(t) + err := r.LookupRemoteImage(imageID, makeURL("/v1/")) + assertEqual(t, err, nil, "Expected error of remote lookup to nil") + if err := r.LookupRemoteImage("abcdef", makeURL("/v1/")); err == nil { + t.Fatal("Expected error of remote lookup to not nil") + } +} + +func TestGetRemoteImageJSON(t *testing.T) { + r := spawnTestRegistrySession(t) + json, size, err := r.GetRemoteImageJSON(imageID, makeURL("/v1/")) + if err != nil { + t.Fatal(err) + } + assertEqual(t, size, int64(154), "Expected size 154") + if len(json) == 0 { + t.Fatal("Expected non-empty json") + } + + _, _, err = r.GetRemoteImageJSON("abcdef", makeURL("/v1/")) + if err == nil { + t.Fatal("Expected image not found error") + } +} + +func TestGetRemoteImageLayer(t *testing.T) { + r := spawnTestRegistrySession(t) + data, err := r.GetRemoteImageLayer(imageID, makeURL("/v1/"), 0) + if err != nil { + t.Fatal(err) + } + if data == nil { + t.Fatal("Expected non-nil data result") + } + + _, err = r.GetRemoteImageLayer("abcdef", makeURL("/v1/"), 0) + if err == nil { + t.Fatal("Expected image not found error") + } +} + +func TestGetRemoteTag(t *testing.T) { + r := spawnTestRegistrySession(t) + repoRef, err := reference.ParseNormalizedNamed(REPO) + if err != nil { + t.Fatal(err) + } + tag, err := r.GetRemoteTag([]string{makeURL("/v1/")}, repoRef, "test") + if err != nil { + t.Fatal(err) + } + assertEqual(t, tag, imageID, "Expected tag test to map to "+imageID) + + bazRef, err := reference.ParseNormalizedNamed("foo42/baz") + if err != nil { + t.Fatal(err) + } + _, err = r.GetRemoteTag([]string{makeURL("/v1/")}, bazRef, "foo") + if err != ErrRepoNotFound { + t.Fatal("Expected ErrRepoNotFound error when fetching tag for bogus repo") + } +} + +func TestGetRemoteTags(t *testing.T) { + r := spawnTestRegistrySession(t) + repoRef, err := reference.ParseNormalizedNamed(REPO) + if err != nil { + t.Fatal(err) + } + tags, err := r.GetRemoteTags([]string{makeURL("/v1/")}, repoRef) + if err != nil { + t.Fatal(err) + } + assertEqual(t, len(tags), 2, "Expected two tags") + assertEqual(t, tags["latest"], imageID, "Expected tag latest to map to "+imageID) + assertEqual(t, tags["test"], imageID, "Expected tag test to map to "+imageID) + + bazRef, err := reference.ParseNormalizedNamed("foo42/baz") + if err != nil { + t.Fatal(err) + } + _, err = r.GetRemoteTags([]string{makeURL("/v1/")}, bazRef) + if err != ErrRepoNotFound { + t.Fatal("Expected ErrRepoNotFound error when fetching tags for bogus repo") + } +} + +func TestGetRepositoryData(t *testing.T) { + r := spawnTestRegistrySession(t) + parsedURL, err := url.Parse(makeURL("/v1/")) + if err != nil { + t.Fatal(err) + } + host := "http://" + parsedURL.Host + "/v1/" + repoRef, err := reference.ParseNormalizedNamed(REPO) + if err != nil { + t.Fatal(err) + } + data, err := r.GetRepositoryData(repoRef) + if err != nil { + t.Fatal(err) + } + assertEqual(t, len(data.ImgList), 2, "Expected 2 images in ImgList") + assertEqual(t, len(data.Endpoints), 2, + fmt.Sprintf("Expected 2 endpoints in Endpoints, found %d instead", len(data.Endpoints))) + assertEqual(t, data.Endpoints[0], host, + fmt.Sprintf("Expected first endpoint to be %s but found %s instead", host, data.Endpoints[0])) + assertEqual(t, data.Endpoints[1], "http://test.example.com/v1/", + fmt.Sprintf("Expected first endpoint to be http://test.example.com/v1/ but found %s instead", data.Endpoints[1])) + +} + +func TestPushImageJSONRegistry(t *testing.T) { + r := spawnTestRegistrySession(t) + imgData := &ImgData{ + ID: "77dbf71da1d00e3fbddc480176eac8994025630c6590d11cfc8fe1209c2a1d20", + Checksum: "sha256:1ac330d56e05eef6d438586545ceff7550d3bdcb6b19961f12c5ba714ee1bb37", + } + + err := r.PushImageJSONRegistry(imgData, []byte{0x42, 0xdf, 0x0}, makeURL("/v1/")) + if err != nil { + t.Fatal(err) + } +} + +func TestPushImageLayerRegistry(t *testing.T) { + r := spawnTestRegistrySession(t) + layer := strings.NewReader("") + _, _, err := r.PushImageLayerRegistry(imageID, layer, makeURL("/v1/"), []byte{}) + if err != nil { + t.Fatal(err) + } +} + +func TestParseRepositoryInfo(t *testing.T) { + type staticRepositoryInfo struct { + Index *registrytypes.IndexInfo + RemoteName string + CanonicalName string + LocalName string + Official bool + } + + expectedRepoInfos := map[string]staticRepositoryInfo{ + "fooo/bar": { + Index: ®istrytypes.IndexInfo{ + Name: IndexName, + Official: true, + }, + RemoteName: "fooo/bar", + LocalName: "fooo/bar", + CanonicalName: "docker.io/fooo/bar", + Official: false, + }, + "library/ubuntu": { + Index: ®istrytypes.IndexInfo{ + Name: IndexName, + Official: true, + }, + RemoteName: "library/ubuntu", + LocalName: "ubuntu", + CanonicalName: "docker.io/library/ubuntu", + Official: true, + }, + "nonlibrary/ubuntu": { + Index: ®istrytypes.IndexInfo{ + Name: IndexName, + Official: true, + }, + RemoteName: "nonlibrary/ubuntu", + LocalName: "nonlibrary/ubuntu", + CanonicalName: "docker.io/nonlibrary/ubuntu", + Official: false, + }, + "ubuntu": { + Index: ®istrytypes.IndexInfo{ + Name: IndexName, + Official: true, + }, + RemoteName: "library/ubuntu", + LocalName: "ubuntu", + CanonicalName: "docker.io/library/ubuntu", + Official: true, + }, + "other/library": { + Index: ®istrytypes.IndexInfo{ + Name: IndexName, + Official: true, + }, + RemoteName: "other/library", + LocalName: "other/library", + CanonicalName: "docker.io/other/library", + Official: false, + }, + "127.0.0.1:8000/private/moonbase": { + Index: ®istrytypes.IndexInfo{ + Name: "127.0.0.1:8000", + Official: false, + }, + RemoteName: "private/moonbase", + LocalName: "127.0.0.1:8000/private/moonbase", + CanonicalName: "127.0.0.1:8000/private/moonbase", + Official: false, + }, + "127.0.0.1:8000/privatebase": { + Index: ®istrytypes.IndexInfo{ + Name: "127.0.0.1:8000", + Official: false, + }, + RemoteName: "privatebase", + LocalName: "127.0.0.1:8000/privatebase", + CanonicalName: "127.0.0.1:8000/privatebase", + Official: false, + }, + "localhost:8000/private/moonbase": { + Index: ®istrytypes.IndexInfo{ + Name: "localhost:8000", + Official: false, + }, + RemoteName: "private/moonbase", + LocalName: "localhost:8000/private/moonbase", + CanonicalName: "localhost:8000/private/moonbase", + Official: false, + }, + "localhost:8000/privatebase": { + Index: ®istrytypes.IndexInfo{ + Name: "localhost:8000", + Official: false, + }, + RemoteName: "privatebase", + LocalName: "localhost:8000/privatebase", + CanonicalName: "localhost:8000/privatebase", + Official: false, + }, + "example.com/private/moonbase": { + Index: ®istrytypes.IndexInfo{ + Name: "example.com", + Official: false, + }, + RemoteName: "private/moonbase", + LocalName: "example.com/private/moonbase", + CanonicalName: "example.com/private/moonbase", + Official: false, + }, + "example.com/privatebase": { + Index: ®istrytypes.IndexInfo{ + Name: "example.com", + Official: false, + }, + RemoteName: "privatebase", + LocalName: "example.com/privatebase", + CanonicalName: "example.com/privatebase", + Official: false, + }, + "example.com:8000/private/moonbase": { + Index: ®istrytypes.IndexInfo{ + Name: "example.com:8000", + Official: false, + }, + RemoteName: "private/moonbase", + LocalName: "example.com:8000/private/moonbase", + CanonicalName: "example.com:8000/private/moonbase", + Official: false, + }, + "example.com:8000/privatebase": { + Index: ®istrytypes.IndexInfo{ + Name: "example.com:8000", + Official: false, + }, + RemoteName: "privatebase", + LocalName: "example.com:8000/privatebase", + CanonicalName: "example.com:8000/privatebase", + Official: false, + }, + "localhost/private/moonbase": { + Index: ®istrytypes.IndexInfo{ + Name: "localhost", + Official: false, + }, + RemoteName: "private/moonbase", + LocalName: "localhost/private/moonbase", + CanonicalName: "localhost/private/moonbase", + Official: false, + }, + "localhost/privatebase": { + Index: ®istrytypes.IndexInfo{ + Name: "localhost", + Official: false, + }, + RemoteName: "privatebase", + LocalName: "localhost/privatebase", + CanonicalName: "localhost/privatebase", + Official: false, + }, + IndexName + "/public/moonbase": { + Index: ®istrytypes.IndexInfo{ + Name: IndexName, + Official: true, + }, + RemoteName: "public/moonbase", + LocalName: "public/moonbase", + CanonicalName: "docker.io/public/moonbase", + Official: false, + }, + "index." + IndexName + "/public/moonbase": { + Index: ®istrytypes.IndexInfo{ + Name: IndexName, + Official: true, + }, + RemoteName: "public/moonbase", + LocalName: "public/moonbase", + CanonicalName: "docker.io/public/moonbase", + Official: false, + }, + "ubuntu-12.04-base": { + Index: ®istrytypes.IndexInfo{ + Name: IndexName, + Official: true, + }, + RemoteName: "library/ubuntu-12.04-base", + LocalName: "ubuntu-12.04-base", + CanonicalName: "docker.io/library/ubuntu-12.04-base", + Official: true, + }, + IndexName + "/ubuntu-12.04-base": { + Index: ®istrytypes.IndexInfo{ + Name: IndexName, + Official: true, + }, + RemoteName: "library/ubuntu-12.04-base", + LocalName: "ubuntu-12.04-base", + CanonicalName: "docker.io/library/ubuntu-12.04-base", + Official: true, + }, + "index." + IndexName + "/ubuntu-12.04-base": { + Index: ®istrytypes.IndexInfo{ + Name: IndexName, + Official: true, + }, + RemoteName: "library/ubuntu-12.04-base", + LocalName: "ubuntu-12.04-base", + CanonicalName: "docker.io/library/ubuntu-12.04-base", + Official: true, + }, + } + + for reposName, expectedRepoInfo := range expectedRepoInfos { + named, err := reference.ParseNormalizedNamed(reposName) + if err != nil { + t.Error(err) + } + + repoInfo, err := ParseRepositoryInfo(named) + if err != nil { + t.Error(err) + } else { + checkEqual(t, repoInfo.Index.Name, expectedRepoInfo.Index.Name, reposName) + checkEqual(t, reference.Path(repoInfo.Name), expectedRepoInfo.RemoteName, reposName) + checkEqual(t, reference.FamiliarName(repoInfo.Name), expectedRepoInfo.LocalName, reposName) + checkEqual(t, repoInfo.Name.Name(), expectedRepoInfo.CanonicalName, reposName) + checkEqual(t, repoInfo.Index.Official, expectedRepoInfo.Index.Official, reposName) + checkEqual(t, repoInfo.Official, expectedRepoInfo.Official, reposName) + } + } +} + +func TestNewIndexInfo(t *testing.T) { + testIndexInfo := func(config *serviceConfig, expectedIndexInfos map[string]*registrytypes.IndexInfo) { + for indexName, expectedIndexInfo := range expectedIndexInfos { + index, err := newIndexInfo(config, indexName) + if err != nil { + t.Fatal(err) + } else { + checkEqual(t, index.Name, expectedIndexInfo.Name, indexName+" name") + checkEqual(t, index.Official, expectedIndexInfo.Official, indexName+" is official") + checkEqual(t, index.Secure, expectedIndexInfo.Secure, indexName+" is secure") + checkEqual(t, len(index.Mirrors), len(expectedIndexInfo.Mirrors), indexName+" mirrors") + } + } + } + + config := newServiceConfig(ServiceOptions{}) + noMirrors := []string{} + expectedIndexInfos := map[string]*registrytypes.IndexInfo{ + IndexName: { + Name: IndexName, + Official: true, + Secure: true, + Mirrors: noMirrors, + }, + "index." + IndexName: { + Name: IndexName, + Official: true, + Secure: true, + Mirrors: noMirrors, + }, + "example.com": { + Name: "example.com", + Official: false, + Secure: true, + Mirrors: noMirrors, + }, + "127.0.0.1:5000": { + Name: "127.0.0.1:5000", + Official: false, + Secure: false, + Mirrors: noMirrors, + }, + } + testIndexInfo(config, expectedIndexInfos) + + publicMirrors := []string{"http://mirror1.local", "http://mirror2.local"} + config = makeServiceConfig(publicMirrors, []string{"example.com"}) + + expectedIndexInfos = map[string]*registrytypes.IndexInfo{ + IndexName: { + Name: IndexName, + Official: true, + Secure: true, + Mirrors: publicMirrors, + }, + "index." + IndexName: { + Name: IndexName, + Official: true, + Secure: true, + Mirrors: publicMirrors, + }, + "example.com": { + Name: "example.com", + Official: false, + Secure: false, + Mirrors: noMirrors, + }, + "example.com:5000": { + Name: "example.com:5000", + Official: false, + Secure: true, + Mirrors: noMirrors, + }, + "127.0.0.1": { + Name: "127.0.0.1", + Official: false, + Secure: false, + Mirrors: noMirrors, + }, + "127.0.0.1:5000": { + Name: "127.0.0.1:5000", + Official: false, + Secure: false, + Mirrors: noMirrors, + }, + "other.com": { + Name: "other.com", + Official: false, + Secure: true, + Mirrors: noMirrors, + }, + } + testIndexInfo(config, expectedIndexInfos) + + config = makeServiceConfig(nil, []string{"42.42.0.0/16"}) + expectedIndexInfos = map[string]*registrytypes.IndexInfo{ + "example.com": { + Name: "example.com", + Official: false, + Secure: false, + Mirrors: noMirrors, + }, + "example.com:5000": { + Name: "example.com:5000", + Official: false, + Secure: false, + Mirrors: noMirrors, + }, + "127.0.0.1": { + Name: "127.0.0.1", + Official: false, + Secure: false, + Mirrors: noMirrors, + }, + "127.0.0.1:5000": { + Name: "127.0.0.1:5000", + Official: false, + Secure: false, + Mirrors: noMirrors, + }, + "other.com": { + Name: "other.com", + Official: false, + Secure: true, + Mirrors: noMirrors, + }, + } + testIndexInfo(config, expectedIndexInfos) +} + +func TestMirrorEndpointLookup(t *testing.T) { + containsMirror := func(endpoints []APIEndpoint) bool { + for _, pe := range endpoints { + if pe.URL.Host == "my.mirror" { + return true + } + } + return false + } + s := DefaultService{config: makeServiceConfig([]string{"https://my.mirror"}, nil)} + + imageName, err := reference.WithName(IndexName + "/test/image") + if err != nil { + t.Error(err) + } + pushAPIEndpoints, err := s.LookupPushEndpoints(reference.Domain(imageName)) + if err != nil { + t.Fatal(err) + } + if containsMirror(pushAPIEndpoints) { + t.Fatal("Push endpoint should not contain mirror") + } + + pullAPIEndpoints, err := s.LookupPullEndpoints(reference.Domain(imageName)) + if err != nil { + t.Fatal(err) + } + if !containsMirror(pullAPIEndpoints) { + t.Fatal("Pull endpoint should contain mirror") + } +} + +func TestPushRegistryTag(t *testing.T) { + r := spawnTestRegistrySession(t) + repoRef, err := reference.ParseNormalizedNamed(REPO) + if err != nil { + t.Fatal(err) + } + err = r.PushRegistryTag(repoRef, imageID, "stable", makeURL("/v1/")) + if err != nil { + t.Fatal(err) + } +} + +func TestPushImageJSONIndex(t *testing.T) { + r := spawnTestRegistrySession(t) + imgData := []*ImgData{ + { + ID: "77dbf71da1d00e3fbddc480176eac8994025630c6590d11cfc8fe1209c2a1d20", + Checksum: "sha256:1ac330d56e05eef6d438586545ceff7550d3bdcb6b19961f12c5ba714ee1bb37", + }, + { + ID: "42d718c941f5c532ac049bf0b0ab53f0062f09a03afd4aa4a02c098e46032b9d", + Checksum: "sha256:bea7bf2e4bacd479344b737328db47b18880d09096e6674165533aa994f5e9f2", + }, + } + repoRef, err := reference.ParseNormalizedNamed(REPO) + if err != nil { + t.Fatal(err) + } + repoData, err := r.PushImageJSONIndex(repoRef, imgData, false, nil) + if err != nil { + t.Fatal(err) + } + if repoData == nil { + t.Fatal("Expected RepositoryData object") + } + repoData, err = r.PushImageJSONIndex(repoRef, imgData, true, []string{r.indexEndpoint.String()}) + if err != nil { + t.Fatal(err) + } + if repoData == nil { + t.Fatal("Expected RepositoryData object") + } +} + +func TestSearchRepositories(t *testing.T) { + r := spawnTestRegistrySession(t) + results, err := r.SearchRepositories("fakequery", 25) + if err != nil { + t.Fatal(err) + } + if results == nil { + t.Fatal("Expected non-nil SearchResults object") + } + assertEqual(t, results.NumResults, 1, "Expected 1 search results") + assertEqual(t, results.Query, "fakequery", "Expected 'fakequery' as query") + assertEqual(t, results.Results[0].StarCount, 42, "Expected 'fakeimage' to have 42 stars") +} + +func TestTrustedLocation(t *testing.T) { + for _, url := range []string{"http://example.com", "https://example.com:7777", "http://docker.io", "http://test.docker.com", "https://fakedocker.com"} { + req, _ := http.NewRequest("GET", url, nil) + if trustedLocation(req) == true { + t.Fatalf("'%s' shouldn't be detected as a trusted location", url) + } + } + + for _, url := range []string{"https://docker.io", "https://test.docker.com:80"} { + req, _ := http.NewRequest("GET", url, nil) + if trustedLocation(req) == false { + t.Fatalf("'%s' should be detected as a trusted location", url) + } + } +} + +func TestAddRequiredHeadersToRedirectedRequests(t *testing.T) { + for _, urls := range [][]string{ + {"http://docker.io", "https://docker.com"}, + {"https://foo.docker.io:7777", "http://bar.docker.com"}, + {"https://foo.docker.io", "https://example.com"}, + } { + reqFrom, _ := http.NewRequest("GET", urls[0], nil) + reqFrom.Header.Add("Content-Type", "application/json") + reqFrom.Header.Add("Authorization", "super_secret") + reqTo, _ := http.NewRequest("GET", urls[1], nil) + + addRequiredHeadersToRedirectedRequests(reqTo, []*http.Request{reqFrom}) + + if len(reqTo.Header) != 1 { + t.Fatalf("Expected 1 headers, got %d", len(reqTo.Header)) + } + + if reqTo.Header.Get("Content-Type") != "application/json" { + t.Fatal("'Content-Type' should be 'application/json'") + } + + if reqTo.Header.Get("Authorization") != "" { + t.Fatal("'Authorization' should be empty") + } + } + + for _, urls := range [][]string{ + {"https://docker.io", "https://docker.com"}, + {"https://foo.docker.io:7777", "https://bar.docker.com"}, + } { + reqFrom, _ := http.NewRequest("GET", urls[0], nil) + reqFrom.Header.Add("Content-Type", "application/json") + reqFrom.Header.Add("Authorization", "super_secret") + reqTo, _ := http.NewRequest("GET", urls[1], nil) + + addRequiredHeadersToRedirectedRequests(reqTo, []*http.Request{reqFrom}) + + if len(reqTo.Header) != 2 { + t.Fatalf("Expected 2 headers, got %d", len(reqTo.Header)) + } + + if reqTo.Header.Get("Content-Type") != "application/json" { + t.Fatal("'Content-Type' should be 'application/json'") + } + + if reqTo.Header.Get("Authorization") != "super_secret" { + t.Fatal("'Authorization' should be 'super_secret'") + } + } +} + +func TestAllowNondistributableArtifacts(t *testing.T) { + tests := []struct { + addr string + registries []string + expected bool + }{ + {IndexName, nil, false}, + {"example.com", []string{}, false}, + {"example.com", []string{"example.com"}, true}, + {"localhost", []string{"localhost:5000"}, false}, + {"localhost:5000", []string{"localhost:5000"}, true}, + {"localhost", []string{"example.com"}, false}, + {"127.0.0.1:5000", []string{"127.0.0.1:5000"}, true}, + {"localhost", nil, false}, + {"localhost:5000", nil, false}, + {"127.0.0.1", nil, false}, + {"localhost", []string{"example.com"}, false}, + {"127.0.0.1", []string{"example.com"}, false}, + {"example.com", nil, false}, + {"example.com", []string{"example.com"}, true}, + {"127.0.0.1", []string{"example.com"}, false}, + {"127.0.0.1:5000", []string{"example.com"}, false}, + {"example.com:5000", []string{"42.42.0.0/16"}, true}, + {"example.com", []string{"42.42.0.0/16"}, true}, + {"example.com:5000", []string{"42.42.42.42/8"}, true}, + {"127.0.0.1:5000", []string{"127.0.0.0/8"}, true}, + {"42.42.42.42:5000", []string{"42.1.1.1/8"}, true}, + {"invalid.domain.com", []string{"42.42.0.0/16"}, false}, + {"invalid.domain.com", []string{"invalid.domain.com"}, true}, + {"invalid.domain.com:5000", []string{"invalid.domain.com"}, false}, + {"invalid.domain.com:5000", []string{"invalid.domain.com:5000"}, true}, + } + for _, tt := range tests { + config := newServiceConfig(ServiceOptions{ + AllowNondistributableArtifacts: tt.registries, + }) + if v := allowNondistributableArtifacts(config, tt.addr); v != tt.expected { + t.Errorf("allowNondistributableArtifacts failed for %q %v, expected %v got %v", tt.addr, tt.registries, tt.expected, v) + } + } +} + +func TestIsSecureIndex(t *testing.T) { + tests := []struct { + addr string + insecureRegistries []string + expected bool + }{ + {IndexName, nil, true}, + {"example.com", []string{}, true}, + {"example.com", []string{"example.com"}, false}, + {"localhost", []string{"localhost:5000"}, false}, + {"localhost:5000", []string{"localhost:5000"}, false}, + {"localhost", []string{"example.com"}, false}, + {"127.0.0.1:5000", []string{"127.0.0.1:5000"}, false}, + {"localhost", nil, false}, + {"localhost:5000", nil, false}, + {"127.0.0.1", nil, false}, + {"localhost", []string{"example.com"}, false}, + {"127.0.0.1", []string{"example.com"}, false}, + {"example.com", nil, true}, + {"example.com", []string{"example.com"}, false}, + {"127.0.0.1", []string{"example.com"}, false}, + {"127.0.0.1:5000", []string{"example.com"}, false}, + {"example.com:5000", []string{"42.42.0.0/16"}, false}, + {"example.com", []string{"42.42.0.0/16"}, false}, + {"example.com:5000", []string{"42.42.42.42/8"}, false}, + {"127.0.0.1:5000", []string{"127.0.0.0/8"}, false}, + {"42.42.42.42:5000", []string{"42.1.1.1/8"}, false}, + {"invalid.domain.com", []string{"42.42.0.0/16"}, true}, + {"invalid.domain.com", []string{"invalid.domain.com"}, false}, + {"invalid.domain.com:5000", []string{"invalid.domain.com"}, true}, + {"invalid.domain.com:5000", []string{"invalid.domain.com:5000"}, false}, + } + for _, tt := range tests { + config := makeServiceConfig(nil, tt.insecureRegistries) + if sec := isSecureIndex(config, tt.addr); sec != tt.expected { + t.Errorf("isSecureIndex failed for %q %v, expected %v got %v", tt.addr, tt.insecureRegistries, tt.expected, sec) + } + } +} + +type debugTransport struct { + http.RoundTripper + log func(...interface{}) +} + +func (tr debugTransport) RoundTrip(req *http.Request) (*http.Response, error) { + dump, err := httputil.DumpRequestOut(req, false) + if err != nil { + tr.log("could not dump request") + } + tr.log(string(dump)) + resp, err := tr.RoundTripper.RoundTrip(req) + if err != nil { + return nil, err + } + dump, err = httputil.DumpResponse(resp, false) + if err != nil { + tr.log("could not dump response") + } + tr.log(string(dump)) + return resp, err +} diff --git a/vendor/github.com/moby/moby/registry/resumable/resumablerequestreader.go b/vendor/github.com/moby/moby/registry/resumable/resumablerequestreader.go new file mode 100644 index 000000000..5403c7684 --- /dev/null +++ b/vendor/github.com/moby/moby/registry/resumable/resumablerequestreader.go @@ -0,0 +1,96 @@ +package resumable + +import ( + "fmt" + "io" + "net/http" + "time" + + "github.com/Sirupsen/logrus" +) + +type requestReader struct { + client *http.Client + request *http.Request + lastRange int64 + totalSize int64 + currentResponse *http.Response + failures uint32 + maxFailures uint32 + waitDuration time.Duration +} + +// NewRequestReader makes it possible to resume reading a request's body transparently +// maxfail is the number of times we retry to make requests again (not resumes) +// totalsize is the total length of the body; auto detect if not provided +func NewRequestReader(c *http.Client, r *http.Request, maxfail uint32, totalsize int64) io.ReadCloser { + return &requestReader{client: c, request: r, maxFailures: maxfail, totalSize: totalsize, waitDuration: 5 * time.Second} +} + +// NewRequestReaderWithInitialResponse makes it possible to resume +// reading the body of an already initiated request. +func NewRequestReaderWithInitialResponse(c *http.Client, r *http.Request, maxfail uint32, totalsize int64, initialResponse *http.Response) io.ReadCloser { + return &requestReader{client: c, request: r, maxFailures: maxfail, totalSize: totalsize, currentResponse: initialResponse, waitDuration: 5 * time.Second} +} + +func (r *requestReader) Read(p []byte) (n int, err error) { + if r.client == nil || r.request == nil { + return 0, fmt.Errorf("client and request can't be nil") + } + isFreshRequest := false + if r.lastRange != 0 && r.currentResponse == nil { + readRange := fmt.Sprintf("bytes=%d-%d", r.lastRange, r.totalSize) + r.request.Header.Set("Range", readRange) + time.Sleep(r.waitDuration) + } + if r.currentResponse == nil { + r.currentResponse, err = r.client.Do(r.request) + isFreshRequest = true + } + if err != nil && r.failures+1 != r.maxFailures { + r.cleanUpResponse() + r.failures++ + time.Sleep(time.Duration(r.failures) * r.waitDuration) + return 0, nil + } else if err != nil { + r.cleanUpResponse() + return 0, err + } + if r.currentResponse.StatusCode == 416 && r.lastRange == r.totalSize && r.currentResponse.ContentLength == 0 { + r.cleanUpResponse() + return 0, io.EOF + } else if r.currentResponse.StatusCode != 206 && r.lastRange != 0 && isFreshRequest { + r.cleanUpResponse() + return 0, fmt.Errorf("the server doesn't support byte ranges") + } + if r.totalSize == 0 { + r.totalSize = r.currentResponse.ContentLength + } else if r.totalSize <= 0 { + r.cleanUpResponse() + return 0, fmt.Errorf("failed to auto detect content length") + } + n, err = r.currentResponse.Body.Read(p) + r.lastRange += int64(n) + if err != nil { + r.cleanUpResponse() + } + if err != nil && err != io.EOF { + logrus.Infof("encountered error during pull and clearing it before resume: %s", err) + err = nil + } + return n, err +} + +func (r *requestReader) Close() error { + r.cleanUpResponse() + r.client = nil + r.request = nil + return nil +} + +func (r *requestReader) cleanUpResponse() { + if r.currentResponse != nil { + r.currentResponse.Body.Close() + r.currentResponse = nil + } +} diff --git a/vendor/github.com/moby/moby/registry/resumable/resumablerequestreader_test.go b/vendor/github.com/moby/moby/registry/resumable/resumablerequestreader_test.go new file mode 100644 index 000000000..a632bc673 --- /dev/null +++ b/vendor/github.com/moby/moby/registry/resumable/resumablerequestreader_test.go @@ -0,0 +1,256 @@ +package resumable + +import ( + "fmt" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "io" + "io/ioutil" + "net/http" + "net/http/httptest" + "strings" + "testing" + "time" +) + +func TestResumableRequestHeaderSimpleErrors(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintln(w, "Hello, world !") + })) + defer ts.Close() + + client := &http.Client{} + + var req *http.Request + req, err := http.NewRequest("GET", ts.URL, nil) + require.NoError(t, err) + + resreq := &requestReader{} + _, err = resreq.Read([]byte{}) + assert.EqualError(t, err, "client and request can't be nil") + + resreq = &requestReader{ + client: client, + request: req, + totalSize: -1, + } + _, err = resreq.Read([]byte{}) + assert.EqualError(t, err, "failed to auto detect content length") +} + +// Not too much failures, bails out after some wait +func TestResumableRequestHeaderNotTooMuchFailures(t *testing.T) { + client := &http.Client{} + + var badReq *http.Request + badReq, err := http.NewRequest("GET", "I'm not an url", nil) + require.NoError(t, err) + + resreq := &requestReader{ + client: client, + request: badReq, + failures: 0, + maxFailures: 2, + waitDuration: 10 * time.Millisecond, + } + read, err := resreq.Read([]byte{}) + require.NoError(t, err) + assert.Equal(t, 0, read) +} + +// Too much failures, returns the error +func TestResumableRequestHeaderTooMuchFailures(t *testing.T) { + client := &http.Client{} + + var badReq *http.Request + badReq, err := http.NewRequest("GET", "I'm not an url", nil) + require.NoError(t, err) + + resreq := &requestReader{ + client: client, + request: badReq, + failures: 0, + maxFailures: 1, + } + defer resreq.Close() + + expectedError := `Get I%27m%20not%20an%20url: unsupported protocol scheme ""` + read, err := resreq.Read([]byte{}) + assert.EqualError(t, err, expectedError) + assert.Equal(t, 0, read) +} + +type errorReaderCloser struct{} + +func (errorReaderCloser) Close() error { return nil } + +func (errorReaderCloser) Read(p []byte) (n int, err error) { + return 0, fmt.Errorf("An error occurred") +} + +// If an unknown error is encountered, return 0, nil and log it +func TestResumableRequestReaderWithReadError(t *testing.T) { + var req *http.Request + req, err := http.NewRequest("GET", "", nil) + require.NoError(t, err) + + client := &http.Client{} + + response := &http.Response{ + Status: "500 Internal Server", + StatusCode: 500, + ContentLength: 0, + Close: true, + Body: errorReaderCloser{}, + } + + resreq := &requestReader{ + client: client, + request: req, + currentResponse: response, + lastRange: 1, + totalSize: 1, + } + defer resreq.Close() + + buf := make([]byte, 1) + read, err := resreq.Read(buf) + require.NoError(t, err) + + assert.Equal(t, 0, read) +} + +func TestResumableRequestReaderWithEOFWith416Response(t *testing.T) { + var req *http.Request + req, err := http.NewRequest("GET", "", nil) + require.NoError(t, err) + + client := &http.Client{} + + response := &http.Response{ + Status: "416 Requested Range Not Satisfiable", + StatusCode: 416, + ContentLength: 0, + Close: true, + Body: ioutil.NopCloser(strings.NewReader("")), + } + + resreq := &requestReader{ + client: client, + request: req, + currentResponse: response, + lastRange: 1, + totalSize: 1, + } + defer resreq.Close() + + buf := make([]byte, 1) + _, err = resreq.Read(buf) + assert.EqualError(t, err, io.EOF.Error()) +} + +func TestResumableRequestReaderWithServerDoesntSupportByteRanges(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.Header.Get("Range") == "" { + t.Fatalf("Expected a Range HTTP header, got nothing") + } + })) + defer ts.Close() + + var req *http.Request + req, err := http.NewRequest("GET", ts.URL, nil) + require.NoError(t, err) + + client := &http.Client{} + + resreq := &requestReader{ + client: client, + request: req, + lastRange: 1, + } + defer resreq.Close() + + buf := make([]byte, 2) + _, err = resreq.Read(buf) + assert.EqualError(t, err, "the server doesn't support byte ranges") +} + +func TestResumableRequestReaderWithZeroTotalSize(t *testing.T) { + srvtxt := "some response text data" + + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintln(w, srvtxt) + })) + defer ts.Close() + + var req *http.Request + req, err := http.NewRequest("GET", ts.URL, nil) + require.NoError(t, err) + + client := &http.Client{} + retries := uint32(5) + + resreq := NewRequestReader(client, req, retries, 0) + defer resreq.Close() + + data, err := ioutil.ReadAll(resreq) + require.NoError(t, err) + + resstr := strings.TrimSuffix(string(data), "\n") + assert.Equal(t, srvtxt, resstr) +} + +func TestResumableRequestReader(t *testing.T) { + srvtxt := "some response text data" + + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintln(w, srvtxt) + })) + defer ts.Close() + + var req *http.Request + req, err := http.NewRequest("GET", ts.URL, nil) + require.NoError(t, err) + + client := &http.Client{} + retries := uint32(5) + imgSize := int64(len(srvtxt)) + + resreq := NewRequestReader(client, req, retries, imgSize) + defer resreq.Close() + + data, err := ioutil.ReadAll(resreq) + require.NoError(t, err) + + resstr := strings.TrimSuffix(string(data), "\n") + assert.Equal(t, srvtxt, resstr) +} + +func TestResumableRequestReaderWithInitialResponse(t *testing.T) { + srvtxt := "some response text data" + + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintln(w, srvtxt) + })) + defer ts.Close() + + var req *http.Request + req, err := http.NewRequest("GET", ts.URL, nil) + require.NoError(t, err) + + client := &http.Client{} + retries := uint32(5) + imgSize := int64(len(srvtxt)) + + res, err := client.Do(req) + require.NoError(t, err) + + resreq := NewRequestReaderWithInitialResponse(client, req, retries, imgSize, res) + defer resreq.Close() + + data, err := ioutil.ReadAll(resreq) + require.NoError(t, err) + + resstr := strings.TrimSuffix(string(data), "\n") + assert.Equal(t, srvtxt, resstr) +} diff --git a/vendor/github.com/moby/moby/registry/service.go b/vendor/github.com/moby/moby/registry/service.go new file mode 100644 index 000000000..34e8a13f9 --- /dev/null +++ b/vendor/github.com/moby/moby/registry/service.go @@ -0,0 +1,327 @@ +package registry + +import ( + "crypto/tls" + "fmt" + "net/http" + "net/url" + "strings" + "sync" + + "golang.org/x/net/context" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/client/auth" + "github.com/docker/docker/api/types" + registrytypes "github.com/docker/docker/api/types/registry" +) + +const ( + // DefaultSearchLimit is the default value for maximum number of returned search results. + DefaultSearchLimit = 25 +) + +// Service is the interface defining what a registry service should implement. +type Service interface { + Auth(ctx context.Context, authConfig *types.AuthConfig, userAgent string) (status, token string, err error) + LookupPullEndpoints(hostname string) (endpoints []APIEndpoint, err error) + LookupPushEndpoints(hostname string) (endpoints []APIEndpoint, err error) + ResolveRepository(name reference.Named) (*RepositoryInfo, error) + Search(ctx context.Context, term string, limit int, authConfig *types.AuthConfig, userAgent string, headers map[string][]string) (*registrytypes.SearchResults, error) + ServiceConfig() *registrytypes.ServiceConfig + TLSConfig(hostname string) (*tls.Config, error) + LoadAllowNondistributableArtifacts([]string) error + LoadMirrors([]string) error + LoadInsecureRegistries([]string) error +} + +// DefaultService is a registry service. It tracks configuration data such as a list +// of mirrors. +type DefaultService struct { + config *serviceConfig + mu sync.Mutex +} + +// NewService returns a new instance of DefaultService ready to be +// installed into an engine. +func NewService(options ServiceOptions) *DefaultService { + return &DefaultService{ + config: newServiceConfig(options), + } +} + +// ServiceConfig returns the public registry service configuration. +func (s *DefaultService) ServiceConfig() *registrytypes.ServiceConfig { + s.mu.Lock() + defer s.mu.Unlock() + + servConfig := registrytypes.ServiceConfig{ + AllowNondistributableArtifactsCIDRs: make([]*(registrytypes.NetIPNet), 0), + AllowNondistributableArtifactsHostnames: make([]string, 0), + InsecureRegistryCIDRs: make([]*(registrytypes.NetIPNet), 0), + IndexConfigs: make(map[string]*(registrytypes.IndexInfo)), + Mirrors: make([]string, 0), + } + + // construct a new ServiceConfig which will not retrieve s.Config directly, + // and look up items in s.config with mu locked + servConfig.AllowNondistributableArtifactsCIDRs = append(servConfig.AllowNondistributableArtifactsCIDRs, s.config.ServiceConfig.AllowNondistributableArtifactsCIDRs...) + servConfig.AllowNondistributableArtifactsHostnames = append(servConfig.AllowNondistributableArtifactsHostnames, s.config.ServiceConfig.AllowNondistributableArtifactsHostnames...) + servConfig.InsecureRegistryCIDRs = append(servConfig.InsecureRegistryCIDRs, s.config.ServiceConfig.InsecureRegistryCIDRs...) + + for key, value := range s.config.ServiceConfig.IndexConfigs { + servConfig.IndexConfigs[key] = value + } + + servConfig.Mirrors = append(servConfig.Mirrors, s.config.ServiceConfig.Mirrors...) + + return &servConfig +} + +// LoadAllowNondistributableArtifacts loads allow-nondistributable-artifacts registries for Service. +func (s *DefaultService) LoadAllowNondistributableArtifacts(registries []string) error { + s.mu.Lock() + defer s.mu.Unlock() + + return s.config.LoadAllowNondistributableArtifacts(registries) +} + +// LoadMirrors loads registry mirrors for Service +func (s *DefaultService) LoadMirrors(mirrors []string) error { + s.mu.Lock() + defer s.mu.Unlock() + + return s.config.LoadMirrors(mirrors) +} + +// LoadInsecureRegistries loads insecure registries for Service +func (s *DefaultService) LoadInsecureRegistries(registries []string) error { + s.mu.Lock() + defer s.mu.Unlock() + + return s.config.LoadInsecureRegistries(registries) +} + +// Auth contacts the public registry with the provided credentials, +// and returns OK if authentication was successful. +// It can be used to verify the validity of a client's credentials. +func (s *DefaultService) Auth(ctx context.Context, authConfig *types.AuthConfig, userAgent string) (status, token string, err error) { + // TODO Use ctx when searching for repositories + serverAddress := authConfig.ServerAddress + if serverAddress == "" { + serverAddress = IndexServer + } + if !strings.HasPrefix(serverAddress, "https://") && !strings.HasPrefix(serverAddress, "http://") { + serverAddress = "https://" + serverAddress + } + u, err := url.Parse(serverAddress) + if err != nil { + return "", "", fmt.Errorf("unable to parse server address: %v", err) + } + + endpoints, err := s.LookupPushEndpoints(u.Host) + if err != nil { + return "", "", err + } + + for _, endpoint := range endpoints { + login := loginV2 + if endpoint.Version == APIVersion1 { + login = loginV1 + } + + status, token, err = login(authConfig, endpoint, userAgent) + if err == nil { + return + } + if fErr, ok := err.(fallbackError); ok { + err = fErr.err + logrus.Infof("Error logging in to %s endpoint, trying next endpoint: %v", endpoint.Version, err) + continue + } + return "", "", err + } + + return "", "", err +} + +// splitReposSearchTerm breaks a search term into an index name and remote name +func splitReposSearchTerm(reposName string) (string, string) { + nameParts := strings.SplitN(reposName, "/", 2) + var indexName, remoteName string + if len(nameParts) == 1 || (!strings.Contains(nameParts[0], ".") && + !strings.Contains(nameParts[0], ":") && nameParts[0] != "localhost") { + // This is a Docker Index repos (ex: samalba/hipache or ubuntu) + // 'docker.io' + indexName = IndexName + remoteName = reposName + } else { + indexName = nameParts[0] + remoteName = nameParts[1] + } + return indexName, remoteName +} + +// Search queries the public registry for images matching the specified +// search terms, and returns the results. +func (s *DefaultService) Search(ctx context.Context, term string, limit int, authConfig *types.AuthConfig, userAgent string, headers map[string][]string) (*registrytypes.SearchResults, error) { + // TODO Use ctx when searching for repositories + if err := validateNoScheme(term); err != nil { + return nil, err + } + + indexName, remoteName := splitReposSearchTerm(term) + + // Search is a long-running operation, just lock s.config to avoid block others. + s.mu.Lock() + index, err := newIndexInfo(s.config, indexName) + s.mu.Unlock() + + if err != nil { + return nil, err + } + + // *TODO: Search multiple indexes. + endpoint, err := NewV1Endpoint(index, userAgent, http.Header(headers)) + if err != nil { + return nil, err + } + + var client *http.Client + if authConfig != nil && authConfig.IdentityToken != "" && authConfig.Username != "" { + creds := NewStaticCredentialStore(authConfig) + scopes := []auth.Scope{ + auth.RegistryScope{ + Name: "catalog", + Actions: []string{"search"}, + }, + } + + modifiers := DockerHeaders(userAgent, nil) + v2Client, foundV2, err := v2AuthHTTPClient(endpoint.URL, endpoint.client.Transport, modifiers, creds, scopes) + if err != nil { + if fErr, ok := err.(fallbackError); ok { + logrus.Errorf("Cannot use identity token for search, v2 auth not supported: %v", fErr.err) + } else { + return nil, err + } + } else if foundV2 { + // Copy non transport http client features + v2Client.Timeout = endpoint.client.Timeout + v2Client.CheckRedirect = endpoint.client.CheckRedirect + v2Client.Jar = endpoint.client.Jar + + logrus.Debugf("using v2 client for search to %s", endpoint.URL) + client = v2Client + } + } + + if client == nil { + client = endpoint.client + if err := authorizeClient(client, authConfig, endpoint); err != nil { + return nil, err + } + } + + r := newSession(client, authConfig, endpoint) + + if index.Official { + localName := remoteName + if strings.HasPrefix(localName, "library/") { + // If pull "library/foo", it's stored locally under "foo" + localName = strings.SplitN(localName, "/", 2)[1] + } + + return r.SearchRepositories(localName, limit) + } + return r.SearchRepositories(remoteName, limit) +} + +// ResolveRepository splits a repository name into its components +// and configuration of the associated registry. +func (s *DefaultService) ResolveRepository(name reference.Named) (*RepositoryInfo, error) { + s.mu.Lock() + defer s.mu.Unlock() + return newRepositoryInfo(s.config, name) +} + +// APIEndpoint represents a remote API endpoint +type APIEndpoint struct { + Mirror bool + URL *url.URL + Version APIVersion + AllowNondistributableArtifacts bool + Official bool + TrimHostname bool + TLSConfig *tls.Config +} + +// ToV1Endpoint returns a V1 API endpoint based on the APIEndpoint +func (e APIEndpoint) ToV1Endpoint(userAgent string, metaHeaders http.Header) (*V1Endpoint, error) { + return newV1Endpoint(*e.URL, e.TLSConfig, userAgent, metaHeaders) +} + +// TLSConfig constructs a client TLS configuration based on server defaults +func (s *DefaultService) TLSConfig(hostname string) (*tls.Config, error) { + s.mu.Lock() + defer s.mu.Unlock() + + return newTLSConfig(hostname, isSecureIndex(s.config, hostname)) +} + +// tlsConfig constructs a client TLS configuration based on server defaults +func (s *DefaultService) tlsConfig(hostname string) (*tls.Config, error) { + return newTLSConfig(hostname, isSecureIndex(s.config, hostname)) +} + +func (s *DefaultService) tlsConfigForMirror(mirrorURL *url.URL) (*tls.Config, error) { + return s.tlsConfig(mirrorURL.Host) +} + +// LookupPullEndpoints creates a list of endpoints to try to pull from, in order of preference. +// It gives preference to v2 endpoints over v1, mirrors over the actual +// registry, and HTTPS over plain HTTP. +func (s *DefaultService) LookupPullEndpoints(hostname string) (endpoints []APIEndpoint, err error) { + s.mu.Lock() + defer s.mu.Unlock() + + return s.lookupEndpoints(hostname) +} + +// LookupPushEndpoints creates a list of endpoints to try to push to, in order of preference. +// It gives preference to v2 endpoints over v1, and HTTPS over plain HTTP. +// Mirrors are not included. +func (s *DefaultService) LookupPushEndpoints(hostname string) (endpoints []APIEndpoint, err error) { + s.mu.Lock() + defer s.mu.Unlock() + + allEndpoints, err := s.lookupEndpoints(hostname) + if err == nil { + for _, endpoint := range allEndpoints { + if !endpoint.Mirror { + endpoints = append(endpoints, endpoint) + } + } + } + return endpoints, err +} + +func (s *DefaultService) lookupEndpoints(hostname string) (endpoints []APIEndpoint, err error) { + endpoints, err = s.lookupV2Endpoints(hostname) + if err != nil { + return nil, err + } + + if s.config.V2Only { + return endpoints, nil + } + + legacyEndpoints, err := s.lookupV1Endpoints(hostname) + if err != nil { + return nil, err + } + endpoints = append(endpoints, legacyEndpoints...) + + return endpoints, nil +} diff --git a/vendor/github.com/moby/moby/registry/service_v1.go b/vendor/github.com/moby/moby/registry/service_v1.go new file mode 100644 index 000000000..1d251aec6 --- /dev/null +++ b/vendor/github.com/moby/moby/registry/service_v1.go @@ -0,0 +1,40 @@ +package registry + +import "net/url" + +func (s *DefaultService) lookupV1Endpoints(hostname string) (endpoints []APIEndpoint, err error) { + if hostname == DefaultNamespace || hostname == DefaultV2Registry.Host || hostname == IndexHostname { + return []APIEndpoint{}, nil + } + + tlsConfig, err := s.tlsConfig(hostname) + if err != nil { + return nil, err + } + + endpoints = []APIEndpoint{ + { + URL: &url.URL{ + Scheme: "https", + Host: hostname, + }, + Version: APIVersion1, + TrimHostname: true, + TLSConfig: tlsConfig, + }, + } + + if tlsConfig.InsecureSkipVerify { + endpoints = append(endpoints, APIEndpoint{ // or this + URL: &url.URL{ + Scheme: "http", + Host: hostname, + }, + Version: APIVersion1, + TrimHostname: true, + // used to check if supposed to be secure via InsecureSkipVerify + TLSConfig: tlsConfig, + }) + } + return endpoints, nil +} diff --git a/vendor/github.com/moby/moby/registry/service_v1_test.go b/vendor/github.com/moby/moby/registry/service_v1_test.go new file mode 100644 index 000000000..bd15dfffb --- /dev/null +++ b/vendor/github.com/moby/moby/registry/service_v1_test.go @@ -0,0 +1,23 @@ +package registry + +import "testing" + +func TestLookupV1Endpoints(t *testing.T) { + s := NewService(ServiceOptions{}) + + cases := []struct { + hostname string + expectedLen int + }{ + {"example.com", 1}, + {DefaultNamespace, 0}, + {DefaultV2Registry.Host, 0}, + {IndexHostname, 0}, + } + + for _, c := range cases { + if ret, err := s.lookupV1Endpoints(c.hostname); err != nil || len(ret) != c.expectedLen { + t.Errorf("lookupV1Endpoints(`"+c.hostname+"`) returned %+v and %+v", ret, err) + } + } +} diff --git a/vendor/github.com/moby/moby/registry/service_v2.go b/vendor/github.com/moby/moby/registry/service_v2.go new file mode 100644 index 000000000..68466f823 --- /dev/null +++ b/vendor/github.com/moby/moby/registry/service_v2.go @@ -0,0 +1,82 @@ +package registry + +import ( + "net/url" + "strings" + + "github.com/docker/go-connections/tlsconfig" +) + +func (s *DefaultService) lookupV2Endpoints(hostname string) (endpoints []APIEndpoint, err error) { + tlsConfig := tlsconfig.ServerDefault() + if hostname == DefaultNamespace || hostname == IndexHostname { + // v2 mirrors + for _, mirror := range s.config.Mirrors { + if !strings.HasPrefix(mirror, "http://") && !strings.HasPrefix(mirror, "https://") { + mirror = "https://" + mirror + } + mirrorURL, err := url.Parse(mirror) + if err != nil { + return nil, err + } + mirrorTLSConfig, err := s.tlsConfigForMirror(mirrorURL) + if err != nil { + return nil, err + } + endpoints = append(endpoints, APIEndpoint{ + URL: mirrorURL, + // guess mirrors are v2 + Version: APIVersion2, + Mirror: true, + TrimHostname: true, + TLSConfig: mirrorTLSConfig, + }) + } + // v2 registry + endpoints = append(endpoints, APIEndpoint{ + URL: DefaultV2Registry, + Version: APIVersion2, + Official: true, + TrimHostname: true, + TLSConfig: tlsConfig, + }) + + return endpoints, nil + } + + ana := allowNondistributableArtifacts(s.config, hostname) + + tlsConfig, err = s.tlsConfig(hostname) + if err != nil { + return nil, err + } + + endpoints = []APIEndpoint{ + { + URL: &url.URL{ + Scheme: "https", + Host: hostname, + }, + Version: APIVersion2, + AllowNondistributableArtifacts: ana, + TrimHostname: true, + TLSConfig: tlsConfig, + }, + } + + if tlsConfig.InsecureSkipVerify { + endpoints = append(endpoints, APIEndpoint{ + URL: &url.URL{ + Scheme: "http", + Host: hostname, + }, + Version: APIVersion2, + AllowNondistributableArtifacts: ana, + TrimHostname: true, + // used to check if supposed to be secure via InsecureSkipVerify + TLSConfig: tlsConfig, + }) + } + + return endpoints, nil +} diff --git a/vendor/github.com/moby/moby/registry/session.go b/vendor/github.com/moby/moby/registry/session.go new file mode 100644 index 000000000..9d7f32193 --- /dev/null +++ b/vendor/github.com/moby/moby/registry/session.go @@ -0,0 +1,778 @@ +package registry + +import ( + "bytes" + "crypto/sha256" + "errors" + "sync" + // this is required for some certificates + _ "crypto/sha512" + "encoding/hex" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/http/cookiejar" + "net/url" + "strconv" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/api/errcode" + "github.com/docker/docker/api/types" + registrytypes "github.com/docker/docker/api/types/registry" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/jsonmessage" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/pkg/tarsum" + "github.com/docker/docker/registry/resumable" +) + +var ( + // ErrRepoNotFound is returned if the repository didn't exist on the + // remote side + ErrRepoNotFound = errors.New("Repository not found") +) + +// A Session is used to communicate with a V1 registry +type Session struct { + indexEndpoint *V1Endpoint + client *http.Client + // TODO(tiborvass): remove authConfig + authConfig *types.AuthConfig + id string +} + +type authTransport struct { + http.RoundTripper + *types.AuthConfig + + alwaysSetBasicAuth bool + token []string + + mu sync.Mutex // guards modReq + modReq map[*http.Request]*http.Request // original -> modified +} + +// AuthTransport handles the auth layer when communicating with a v1 registry (private or official) +// +// For private v1 registries, set alwaysSetBasicAuth to true. +// +// For the official v1 registry, if there isn't already an Authorization header in the request, +// but there is an X-Docker-Token header set to true, then Basic Auth will be used to set the Authorization header. +// After sending the request with the provided base http.RoundTripper, if an X-Docker-Token header, representing +// a token, is present in the response, then it gets cached and sent in the Authorization header of all subsequent +// requests. +// +// If the server sends a token without the client having requested it, it is ignored. +// +// This RoundTripper also has a CancelRequest method important for correct timeout handling. +func AuthTransport(base http.RoundTripper, authConfig *types.AuthConfig, alwaysSetBasicAuth bool) http.RoundTripper { + if base == nil { + base = http.DefaultTransport + } + return &authTransport{ + RoundTripper: base, + AuthConfig: authConfig, + alwaysSetBasicAuth: alwaysSetBasicAuth, + modReq: make(map[*http.Request]*http.Request), + } +} + +// cloneRequest returns a clone of the provided *http.Request. +// The clone is a shallow copy of the struct and its Header map. +func cloneRequest(r *http.Request) *http.Request { + // shallow copy of the struct + r2 := new(http.Request) + *r2 = *r + // deep copy of the Header + r2.Header = make(http.Header, len(r.Header)) + for k, s := range r.Header { + r2.Header[k] = append([]string(nil), s...) + } + + return r2 +} + +// RoundTrip changes an HTTP request's headers to add the necessary +// authentication-related headers +func (tr *authTransport) RoundTrip(orig *http.Request) (*http.Response, error) { + // Authorization should not be set on 302 redirect for untrusted locations. + // This logic mirrors the behavior in addRequiredHeadersToRedirectedRequests. + // As the authorization logic is currently implemented in RoundTrip, + // a 302 redirect is detected by looking at the Referrer header as go http package adds said header. + // This is safe as Docker doesn't set Referrer in other scenarios. + if orig.Header.Get("Referer") != "" && !trustedLocation(orig) { + return tr.RoundTripper.RoundTrip(orig) + } + + req := cloneRequest(orig) + tr.mu.Lock() + tr.modReq[orig] = req + tr.mu.Unlock() + + if tr.alwaysSetBasicAuth { + if tr.AuthConfig == nil { + return nil, errors.New("unexpected error: empty auth config") + } + req.SetBasicAuth(tr.Username, tr.Password) + return tr.RoundTripper.RoundTrip(req) + } + + // Don't override + if req.Header.Get("Authorization") == "" { + if req.Header.Get("X-Docker-Token") == "true" && tr.AuthConfig != nil && len(tr.Username) > 0 { + req.SetBasicAuth(tr.Username, tr.Password) + } else if len(tr.token) > 0 { + req.Header.Set("Authorization", "Token "+strings.Join(tr.token, ",")) + } + } + resp, err := tr.RoundTripper.RoundTrip(req) + if err != nil { + delete(tr.modReq, orig) + return nil, err + } + if len(resp.Header["X-Docker-Token"]) > 0 { + tr.token = resp.Header["X-Docker-Token"] + } + resp.Body = &ioutils.OnEOFReader{ + Rc: resp.Body, + Fn: func() { + tr.mu.Lock() + delete(tr.modReq, orig) + tr.mu.Unlock() + }, + } + return resp, nil +} + +// CancelRequest cancels an in-flight request by closing its connection. +func (tr *authTransport) CancelRequest(req *http.Request) { + type canceler interface { + CancelRequest(*http.Request) + } + if cr, ok := tr.RoundTripper.(canceler); ok { + tr.mu.Lock() + modReq := tr.modReq[req] + delete(tr.modReq, req) + tr.mu.Unlock() + cr.CancelRequest(modReq) + } +} + +func authorizeClient(client *http.Client, authConfig *types.AuthConfig, endpoint *V1Endpoint) error { + var alwaysSetBasicAuth bool + + // If we're working with a standalone private registry over HTTPS, send Basic Auth headers + // alongside all our requests. + if endpoint.String() != IndexServer && endpoint.URL.Scheme == "https" { + info, err := endpoint.Ping() + if err != nil { + return err + } + if info.Standalone && authConfig != nil { + logrus.Debugf("Endpoint %s is eligible for private registry. Enabling decorator.", endpoint.String()) + alwaysSetBasicAuth = true + } + } + + // Annotate the transport unconditionally so that v2 can + // properly fallback on v1 when an image is not found. + client.Transport = AuthTransport(client.Transport, authConfig, alwaysSetBasicAuth) + + jar, err := cookiejar.New(nil) + if err != nil { + return errors.New("cookiejar.New is not supposed to return an error") + } + client.Jar = jar + + return nil +} + +func newSession(client *http.Client, authConfig *types.AuthConfig, endpoint *V1Endpoint) *Session { + return &Session{ + authConfig: authConfig, + client: client, + indexEndpoint: endpoint, + id: stringid.GenerateRandomID(), + } +} + +// NewSession creates a new session +// TODO(tiborvass): remove authConfig param once registry client v2 is vendored +func NewSession(client *http.Client, authConfig *types.AuthConfig, endpoint *V1Endpoint) (*Session, error) { + if err := authorizeClient(client, authConfig, endpoint); err != nil { + return nil, err + } + + return newSession(client, authConfig, endpoint), nil +} + +// ID returns this registry session's ID. +func (r *Session) ID() string { + return r.id +} + +// GetRemoteHistory retrieves the history of a given image from the registry. +// It returns a list of the parent's JSON files (including the requested image). +func (r *Session) GetRemoteHistory(imgID, registry string) ([]string, error) { + res, err := r.client.Get(registry + "images/" + imgID + "/ancestry") + if err != nil { + return nil, err + } + defer res.Body.Close() + if res.StatusCode != 200 { + if res.StatusCode == 401 { + return nil, errcode.ErrorCodeUnauthorized.WithArgs() + } + return nil, newJSONError(fmt.Sprintf("Server error: %d trying to fetch remote history for %s", res.StatusCode, imgID), res) + } + + var history []string + if err := json.NewDecoder(res.Body).Decode(&history); err != nil { + return nil, fmt.Errorf("Error while reading the http response: %v", err) + } + + logrus.Debugf("Ancestry: %v", history) + return history, nil +} + +// LookupRemoteImage checks if an image exists in the registry +func (r *Session) LookupRemoteImage(imgID, registry string) error { + res, err := r.client.Get(registry + "images/" + imgID + "/json") + if err != nil { + return err + } + res.Body.Close() + if res.StatusCode != 200 { + return newJSONError(fmt.Sprintf("HTTP code %d", res.StatusCode), res) + } + return nil +} + +// GetRemoteImageJSON retrieves an image's JSON metadata from the registry. +func (r *Session) GetRemoteImageJSON(imgID, registry string) ([]byte, int64, error) { + res, err := r.client.Get(registry + "images/" + imgID + "/json") + if err != nil { + return nil, -1, fmt.Errorf("Failed to download json: %s", err) + } + defer res.Body.Close() + if res.StatusCode != 200 { + return nil, -1, newJSONError(fmt.Sprintf("HTTP code %d", res.StatusCode), res) + } + // if the size header is not present, then set it to '-1' + imageSize := int64(-1) + if hdr := res.Header.Get("X-Docker-Size"); hdr != "" { + imageSize, err = strconv.ParseInt(hdr, 10, 64) + if err != nil { + return nil, -1, err + } + } + + jsonString, err := ioutil.ReadAll(res.Body) + if err != nil { + return nil, -1, fmt.Errorf("Failed to parse downloaded json: %v (%s)", err, jsonString) + } + return jsonString, imageSize, nil +} + +// GetRemoteImageLayer retrieves an image layer from the registry +func (r *Session) GetRemoteImageLayer(imgID, registry string, imgSize int64) (io.ReadCloser, error) { + var ( + statusCode = 0 + res *http.Response + err error + imageURL = fmt.Sprintf("%simages/%s/layer", registry, imgID) + ) + + req, err := http.NewRequest("GET", imageURL, nil) + if err != nil { + return nil, fmt.Errorf("Error while getting from the server: %v", err) + } + + res, err = r.client.Do(req) + if err != nil { + logrus.Debugf("Error contacting registry %s: %v", registry, err) + // the only case err != nil && res != nil is https://golang.org/src/net/http/client.go#L515 + if res != nil { + if res.Body != nil { + res.Body.Close() + } + statusCode = res.StatusCode + } + return nil, fmt.Errorf("Server error: Status %d while fetching image layer (%s)", + statusCode, imgID) + } + + if res.StatusCode != 200 { + res.Body.Close() + return nil, fmt.Errorf("Server error: Status %d while fetching image layer (%s)", + res.StatusCode, imgID) + } + + if res.Header.Get("Accept-Ranges") == "bytes" && imgSize > 0 { + logrus.Debug("server supports resume") + return resumable.NewRequestReaderWithInitialResponse(r.client, req, 5, imgSize, res), nil + } + logrus.Debug("server doesn't support resume") + return res.Body, nil +} + +// GetRemoteTag retrieves the tag named in the askedTag argument from the given +// repository. It queries each of the registries supplied in the registries +// argument, and returns data from the first one that answers the query +// successfully. +func (r *Session) GetRemoteTag(registries []string, repositoryRef reference.Named, askedTag string) (string, error) { + repository := reference.Path(repositoryRef) + + if strings.Count(repository, "/") == 0 { + // This will be removed once the registry supports auto-resolution on + // the "library" namespace + repository = "library/" + repository + } + for _, host := range registries { + endpoint := fmt.Sprintf("%srepositories/%s/tags/%s", host, repository, askedTag) + res, err := r.client.Get(endpoint) + if err != nil { + return "", err + } + + logrus.Debugf("Got status code %d from %s", res.StatusCode, endpoint) + defer res.Body.Close() + + if res.StatusCode == 404 { + return "", ErrRepoNotFound + } + if res.StatusCode != 200 { + continue + } + + var tagID string + if err := json.NewDecoder(res.Body).Decode(&tagID); err != nil { + return "", err + } + return tagID, nil + } + return "", fmt.Errorf("Could not reach any registry endpoint") +} + +// GetRemoteTags retrieves all tags from the given repository. It queries each +// of the registries supplied in the registries argument, and returns data from +// the first one that answers the query successfully. It returns a map with +// tag names as the keys and image IDs as the values. +func (r *Session) GetRemoteTags(registries []string, repositoryRef reference.Named) (map[string]string, error) { + repository := reference.Path(repositoryRef) + + if strings.Count(repository, "/") == 0 { + // This will be removed once the registry supports auto-resolution on + // the "library" namespace + repository = "library/" + repository + } + for _, host := range registries { + endpoint := fmt.Sprintf("%srepositories/%s/tags", host, repository) + res, err := r.client.Get(endpoint) + if err != nil { + return nil, err + } + + logrus.Debugf("Got status code %d from %s", res.StatusCode, endpoint) + defer res.Body.Close() + + if res.StatusCode == 404 { + return nil, ErrRepoNotFound + } + if res.StatusCode != 200 { + continue + } + + result := make(map[string]string) + if err := json.NewDecoder(res.Body).Decode(&result); err != nil { + return nil, err + } + return result, nil + } + return nil, fmt.Errorf("Could not reach any registry endpoint") +} + +func buildEndpointsList(headers []string, indexEp string) ([]string, error) { + var endpoints []string + parsedURL, err := url.Parse(indexEp) + if err != nil { + return nil, err + } + var urlScheme = parsedURL.Scheme + // The registry's URL scheme has to match the Index' + for _, ep := range headers { + epList := strings.Split(ep, ",") + for _, epListElement := range epList { + endpoints = append( + endpoints, + fmt.Sprintf("%s://%s/v1/", urlScheme, strings.TrimSpace(epListElement))) + } + } + return endpoints, nil +} + +// GetRepositoryData returns lists of images and endpoints for the repository +func (r *Session) GetRepositoryData(name reference.Named) (*RepositoryData, error) { + repositoryTarget := fmt.Sprintf("%srepositories/%s/images", r.indexEndpoint.String(), reference.Path(name)) + + logrus.Debugf("[registry] Calling GET %s", repositoryTarget) + + req, err := http.NewRequest("GET", repositoryTarget, nil) + if err != nil { + return nil, err + } + // this will set basic auth in r.client.Transport and send cached X-Docker-Token headers for all subsequent requests + req.Header.Set("X-Docker-Token", "true") + res, err := r.client.Do(req) + if err != nil { + // check if the error is because of i/o timeout + // and return a non-obtuse error message for users + // "Get https://index.docker.io/v1/repositories/library/busybox/images: i/o timeout" + // was a top search on the docker user forum + if isTimeout(err) { + return nil, fmt.Errorf("Network timed out while trying to connect to %s. You may want to check your internet connection or if you are behind a proxy.", repositoryTarget) + } + return nil, fmt.Errorf("Error while pulling image: %v", err) + } + defer res.Body.Close() + if res.StatusCode == 401 { + return nil, errcode.ErrorCodeUnauthorized.WithArgs() + } + // TODO: Right now we're ignoring checksums in the response body. + // In the future, we need to use them to check image validity. + if res.StatusCode == 404 { + return nil, newJSONError(fmt.Sprintf("HTTP code: %d", res.StatusCode), res) + } else if res.StatusCode != 200 { + errBody, err := ioutil.ReadAll(res.Body) + if err != nil { + logrus.Debugf("Error reading response body: %s", err) + } + return nil, newJSONError(fmt.Sprintf("Error: Status %d trying to pull repository %s: %q", res.StatusCode, reference.Path(name), errBody), res) + } + + var endpoints []string + if res.Header.Get("X-Docker-Endpoints") != "" { + endpoints, err = buildEndpointsList(res.Header["X-Docker-Endpoints"], r.indexEndpoint.String()) + if err != nil { + return nil, err + } + } else { + // Assume the endpoint is on the same host + endpoints = append(endpoints, fmt.Sprintf("%s://%s/v1/", r.indexEndpoint.URL.Scheme, req.URL.Host)) + } + + remoteChecksums := []*ImgData{} + if err := json.NewDecoder(res.Body).Decode(&remoteChecksums); err != nil { + return nil, err + } + + // Forge a better object from the retrieved data + imgsData := make(map[string]*ImgData, len(remoteChecksums)) + for _, elem := range remoteChecksums { + imgsData[elem.ID] = elem + } + + return &RepositoryData{ + ImgList: imgsData, + Endpoints: endpoints, + }, nil +} + +// PushImageChecksumRegistry uploads checksums for an image +func (r *Session) PushImageChecksumRegistry(imgData *ImgData, registry string) error { + u := registry + "images/" + imgData.ID + "/checksum" + + logrus.Debugf("[registry] Calling PUT %s", u) + + req, err := http.NewRequest("PUT", u, nil) + if err != nil { + return err + } + req.Header.Set("X-Docker-Checksum", imgData.Checksum) + req.Header.Set("X-Docker-Checksum-Payload", imgData.ChecksumPayload) + + res, err := r.client.Do(req) + if err != nil { + return fmt.Errorf("Failed to upload metadata: %v", err) + } + defer res.Body.Close() + if len(res.Cookies()) > 0 { + r.client.Jar.SetCookies(req.URL, res.Cookies()) + } + if res.StatusCode != 200 { + errBody, err := ioutil.ReadAll(res.Body) + if err != nil { + return fmt.Errorf("HTTP code %d while uploading metadata and error when trying to parse response body: %s", res.StatusCode, err) + } + var jsonBody map[string]string + if err := json.Unmarshal(errBody, &jsonBody); err != nil { + errBody = []byte(err.Error()) + } else if jsonBody["error"] == "Image already exists" { + return ErrAlreadyExists + } + return fmt.Errorf("HTTP code %d while uploading metadata: %q", res.StatusCode, errBody) + } + return nil +} + +// PushImageJSONRegistry pushes JSON metadata for a local image to the registry +func (r *Session) PushImageJSONRegistry(imgData *ImgData, jsonRaw []byte, registry string) error { + + u := registry + "images/" + imgData.ID + "/json" + + logrus.Debugf("[registry] Calling PUT %s", u) + + req, err := http.NewRequest("PUT", u, bytes.NewReader(jsonRaw)) + if err != nil { + return err + } + req.Header.Add("Content-type", "application/json") + + res, err := r.client.Do(req) + if err != nil { + return fmt.Errorf("Failed to upload metadata: %s", err) + } + defer res.Body.Close() + if res.StatusCode == 401 && strings.HasPrefix(registry, "http://") { + return newJSONError("HTTP code 401, Docker will not send auth headers over HTTP.", res) + } + if res.StatusCode != 200 { + errBody, err := ioutil.ReadAll(res.Body) + if err != nil { + return newJSONError(fmt.Sprintf("HTTP code %d while uploading metadata and error when trying to parse response body: %s", res.StatusCode, err), res) + } + var jsonBody map[string]string + if err := json.Unmarshal(errBody, &jsonBody); err != nil { + errBody = []byte(err.Error()) + } else if jsonBody["error"] == "Image already exists" { + return ErrAlreadyExists + } + return newJSONError(fmt.Sprintf("HTTP code %d while uploading metadata: %q", res.StatusCode, errBody), res) + } + return nil +} + +// PushImageLayerRegistry sends the checksum of an image layer to the registry +func (r *Session) PushImageLayerRegistry(imgID string, layer io.Reader, registry string, jsonRaw []byte) (checksum string, checksumPayload string, err error) { + u := registry + "images/" + imgID + "/layer" + + logrus.Debugf("[registry] Calling PUT %s", u) + + tarsumLayer, err := tarsum.NewTarSum(layer, false, tarsum.Version0) + if err != nil { + return "", "", err + } + h := sha256.New() + h.Write(jsonRaw) + h.Write([]byte{'\n'}) + checksumLayer := io.TeeReader(tarsumLayer, h) + + req, err := http.NewRequest("PUT", u, checksumLayer) + if err != nil { + return "", "", err + } + req.Header.Add("Content-Type", "application/octet-stream") + req.ContentLength = -1 + req.TransferEncoding = []string{"chunked"} + res, err := r.client.Do(req) + if err != nil { + return "", "", fmt.Errorf("Failed to upload layer: %v", err) + } + if rc, ok := layer.(io.Closer); ok { + if err := rc.Close(); err != nil { + return "", "", err + } + } + defer res.Body.Close() + + if res.StatusCode != 200 { + errBody, err := ioutil.ReadAll(res.Body) + if err != nil { + return "", "", newJSONError(fmt.Sprintf("HTTP code %d while uploading metadata and error when trying to parse response body: %s", res.StatusCode, err), res) + } + return "", "", newJSONError(fmt.Sprintf("Received HTTP code %d while uploading layer: %q", res.StatusCode, errBody), res) + } + + checksumPayload = "sha256:" + hex.EncodeToString(h.Sum(nil)) + return tarsumLayer.Sum(jsonRaw), checksumPayload, nil +} + +// PushRegistryTag pushes a tag on the registry. +// Remote has the format '/ +func (r *Session) PushRegistryTag(remote reference.Named, revision, tag, registry string) error { + // "jsonify" the string + revision = "\"" + revision + "\"" + path := fmt.Sprintf("repositories/%s/tags/%s", reference.Path(remote), tag) + + req, err := http.NewRequest("PUT", registry+path, strings.NewReader(revision)) + if err != nil { + return err + } + req.Header.Add("Content-type", "application/json") + req.ContentLength = int64(len(revision)) + res, err := r.client.Do(req) + if err != nil { + return err + } + res.Body.Close() + if res.StatusCode != 200 && res.StatusCode != 201 { + return newJSONError(fmt.Sprintf("Internal server error: %d trying to push tag %s on %s", res.StatusCode, tag, reference.Path(remote)), res) + } + return nil +} + +// PushImageJSONIndex uploads an image list to the repository +func (r *Session) PushImageJSONIndex(remote reference.Named, imgList []*ImgData, validate bool, regs []string) (*RepositoryData, error) { + cleanImgList := []*ImgData{} + if validate { + for _, elem := range imgList { + if elem.Checksum != "" { + cleanImgList = append(cleanImgList, elem) + } + } + } else { + cleanImgList = imgList + } + + imgListJSON, err := json.Marshal(cleanImgList) + if err != nil { + return nil, err + } + var suffix string + if validate { + suffix = "images" + } + u := fmt.Sprintf("%srepositories/%s/%s", r.indexEndpoint.String(), reference.Path(remote), suffix) + logrus.Debugf("[registry] PUT %s", u) + logrus.Debugf("Image list pushed to index:\n%s", imgListJSON) + headers := map[string][]string{ + "Content-type": {"application/json"}, + // this will set basic auth in r.client.Transport and send cached X-Docker-Token headers for all subsequent requests + "X-Docker-Token": {"true"}, + } + if validate { + headers["X-Docker-Endpoints"] = regs + } + + // Redirect if necessary + var res *http.Response + for { + if res, err = r.putImageRequest(u, headers, imgListJSON); err != nil { + return nil, err + } + if !shouldRedirect(res) { + break + } + res.Body.Close() + u = res.Header.Get("Location") + logrus.Debugf("Redirected to %s", u) + } + defer res.Body.Close() + + if res.StatusCode == 401 { + return nil, errcode.ErrorCodeUnauthorized.WithArgs() + } + + var tokens, endpoints []string + if !validate { + if res.StatusCode != 200 && res.StatusCode != 201 { + errBody, err := ioutil.ReadAll(res.Body) + if err != nil { + logrus.Debugf("Error reading response body: %s", err) + } + return nil, newJSONError(fmt.Sprintf("Error: Status %d trying to push repository %s: %q", res.StatusCode, reference.Path(remote), errBody), res) + } + tokens = res.Header["X-Docker-Token"] + logrus.Debugf("Auth token: %v", tokens) + + if res.Header.Get("X-Docker-Endpoints") == "" { + return nil, fmt.Errorf("Index response didn't contain any endpoints") + } + endpoints, err = buildEndpointsList(res.Header["X-Docker-Endpoints"], r.indexEndpoint.String()) + if err != nil { + return nil, err + } + } else { + if res.StatusCode != 204 { + errBody, err := ioutil.ReadAll(res.Body) + if err != nil { + logrus.Debugf("Error reading response body: %s", err) + } + return nil, newJSONError(fmt.Sprintf("Error: Status %d trying to push checksums %s: %q", res.StatusCode, reference.Path(remote), errBody), res) + } + } + + return &RepositoryData{ + Endpoints: endpoints, + }, nil +} + +func (r *Session) putImageRequest(u string, headers map[string][]string, body []byte) (*http.Response, error) { + req, err := http.NewRequest("PUT", u, bytes.NewReader(body)) + if err != nil { + return nil, err + } + req.ContentLength = int64(len(body)) + for k, v := range headers { + req.Header[k] = v + } + response, err := r.client.Do(req) + if err != nil { + return nil, err + } + return response, nil +} + +func shouldRedirect(response *http.Response) bool { + return response.StatusCode >= 300 && response.StatusCode < 400 +} + +// SearchRepositories performs a search against the remote repository +func (r *Session) SearchRepositories(term string, limit int) (*registrytypes.SearchResults, error) { + if limit < 1 || limit > 100 { + return nil, fmt.Errorf("Limit %d is outside the range of [1, 100]", limit) + } + logrus.Debugf("Index server: %s", r.indexEndpoint) + u := r.indexEndpoint.String() + "search?q=" + url.QueryEscape(term) + "&n=" + url.QueryEscape(fmt.Sprintf("%d", limit)) + + req, err := http.NewRequest("GET", u, nil) + if err != nil { + return nil, fmt.Errorf("Error while getting from the server: %v", err) + } + // Have the AuthTransport send authentication, when logged in. + req.Header.Set("X-Docker-Token", "true") + res, err := r.client.Do(req) + if err != nil { + return nil, err + } + defer res.Body.Close() + if res.StatusCode != 200 { + return nil, newJSONError(fmt.Sprintf("Unexpected status code %d", res.StatusCode), res) + } + result := new(registrytypes.SearchResults) + return result, json.NewDecoder(res.Body).Decode(result) +} + +func isTimeout(err error) bool { + type timeout interface { + Timeout() bool + } + e := err + switch urlErr := err.(type) { + case *url.Error: + e = urlErr.Err + } + t, ok := e.(timeout) + return ok && t.Timeout() +} + +func newJSONError(msg string, res *http.Response) error { + return &jsonmessage.JSONError{ + Message: msg, + Code: res.StatusCode, + } +} diff --git a/vendor/github.com/moby/moby/registry/types.go b/vendor/github.com/moby/moby/registry/types.go new file mode 100644 index 000000000..0c3cbd691 --- /dev/null +++ b/vendor/github.com/moby/moby/registry/types.go @@ -0,0 +1,70 @@ +package registry + +import ( + "github.com/docker/distribution/reference" + registrytypes "github.com/docker/docker/api/types/registry" +) + +// RepositoryData tracks the image list, list of endpoints for a repository +type RepositoryData struct { + // ImgList is a list of images in the repository + ImgList map[string]*ImgData + // Endpoints is a list of endpoints returned in X-Docker-Endpoints + Endpoints []string +} + +// ImgData is used to transfer image checksums to and from the registry +type ImgData struct { + // ID is an opaque string that identifies the image + ID string `json:"id"` + Checksum string `json:"checksum,omitempty"` + ChecksumPayload string `json:"-"` + Tag string `json:",omitempty"` +} + +// PingResult contains the information returned when pinging a registry. It +// indicates the registry's version and whether the registry claims to be a +// standalone registry. +type PingResult struct { + // Version is the registry version supplied by the registry in an HTTP + // header + Version string `json:"version"` + // Standalone is set to true if the registry indicates it is a + // standalone registry in the X-Docker-Registry-Standalone + // header + Standalone bool `json:"standalone"` +} + +// APIVersion is an integral representation of an API version (presently +// either 1 or 2) +type APIVersion int + +func (av APIVersion) String() string { + return apiVersions[av] +} + +// API Version identifiers. +const ( + _ = iota + APIVersion1 APIVersion = iota + APIVersion2 +) + +var apiVersions = map[APIVersion]string{ + APIVersion1: "v1", + APIVersion2: "v2", +} + +// RepositoryInfo describes a repository +type RepositoryInfo struct { + Name reference.Named + // Index points to registry information + Index *registrytypes.IndexInfo + // Official indicates whether the repository is considered official. + // If the registry is official, and the normalized name does not + // contain a '/' (e.g. "foo"), then it is considered an official repo. + Official bool + // Class represents the class of the repository, such as "plugin" + // or "image". + Class string +} diff --git a/vendor/github.com/moby/moby/reports/2017-05-01.md b/vendor/github.com/moby/moby/reports/2017-05-01.md new file mode 100644 index 000000000..366f4fce7 --- /dev/null +++ b/vendor/github.com/moby/moby/reports/2017-05-01.md @@ -0,0 +1,35 @@ +# Development Report for May 01, 2017 + +This is the 1st report, since the Moby project was announced at DockerCon. Thank you to everyone that stayed an extra day to attend the summit on Thursday. + +## Daily Meeting + +A daily meeting is hosted on [slack](https://dockercommunity.slack.com/) every business day at 9am PST on the channel `#moby-project`. +During this meeting, we are talking about the [tasks](https://github.com/moby/moby/issues/32867) needed to be done for splitting moby and docker. + +## Topics discussed last week + +### The moby tool + +The moby tool currently lives at [https://github.com/moby/tool](https://github.com/moby/tool), it's only a temporary place and will soon be merged in [https://github.com/moby/moby](https://github.com/moby/moby). + +### The CLI split + +Ongoing work to split the Docker CLI into [https://github.com/docker/cli](https://github.com/docker/cli) is happening [here](https://github.com/moby/moby/pull/32694). +We are almost done, it should be merged soon. + +### Mailing list + +Slack works great for synchronous communication, but we need to place for async discussion. A mailing list is currently being setup. + +### Find a good and non-confusing home for the remaining monolith + +Lots of discussion and progress made on this topic, see [here](https://github.com/moby/moby/issues/32871). The work will start this week. + +## Componentization + +So far only work on the builder happened regarding the componentization effort. + +### builder + +The builder dev report can be found [here](builder/2017-05-01.md) diff --git a/vendor/github.com/moby/moby/reports/2017-05-08.md b/vendor/github.com/moby/moby/reports/2017-05-08.md new file mode 100644 index 000000000..7f0333541 --- /dev/null +++ b/vendor/github.com/moby/moby/reports/2017-05-08.md @@ -0,0 +1,34 @@ +# Development Report for May 08, 2017 + +## Daily Meeting + +A daily meeting is hosted on [slack](https://dockercommunity.slack.com) every business day at 9am PST on the channel `#moby-project`. +During this meeting, we are talking about the [tasks](https://github.com/moby/moby/issues/32867) needed to be done for splitting moby and docker. + +## Topics discussed last week + +### The CLI split + +The Docker CLI was successfully moved to [https://github.com/docker/cli](https://github.com/docker/cli) last week thanks to @tiborvass +The Docker CLI is now compiled from the [Dockerfile](https://github.com/moby/moby/blob/a762ceace4e8c1c7ce4fb582789af9d8074be3e1/Dockerfile#L248) + +### Mailing list + +Discourse is available at [forums.mobyproject.org](https://forums.mobyproject.org/) thanks to @thaJeztah. mailing-list mode is enabled, so once you register there, you will received every new threads / messages via email. So far, 3 categories were created: Architecture, Meta & Support. The last step missing is to setup an email address to be able to start a new thread via email. + +### Find a place for `/pkg` + +Lots of discussion and progress made on this [topic](https://github.com/moby/moby/issues/32989) thanks to @dnephin. [Here is the list](https://gist.github.com/dnephin/35dc10f6b6b7017f058a71908b301d38) proposed to split/reorganize the pkgs. + +### Find a good and non-confusing home for the remaining monolith + +@cpuguy83 is leading the effort [here](https://github.com/moby/moby/pull/33022). It's still WIP but the way we are experimenting with is to reorganise directories within the moby/moby. + +## Componentization + +So far only work on the builder, by @tonistiigi, happened regarding the componentization effort. + +### builder + +The builder dev report can be found [here](builder/2017-05-08.md) + diff --git a/vendor/github.com/moby/moby/reports/2017-05-15.md b/vendor/github.com/moby/moby/reports/2017-05-15.md new file mode 100644 index 000000000..7556f9cc4 --- /dev/null +++ b/vendor/github.com/moby/moby/reports/2017-05-15.md @@ -0,0 +1,52 @@ +# Development Report for May 15, 2017 + +## Daily Meeting + +A daily meeting is hosted on [slack](https://dockercommunity.slack.com) every business day at 9am PST on the channel `#moby-project`. +During this meeting, we are talking about the [tasks](https://github.com/moby/moby/issues/32867) needed to be done for splitting moby and docker. + +## Topics discussed last week + +### The CLI split + +Work is in progress to move the "opts" package to the docker/cli repository. The package, was merged into the docker/cli +repository through [docker/cli#82](https://github.com/docker/cli/pull/82), preserving Git history, and parts that are not +used in Moby have been removed through [moby/moby#33198](https://github.com/moby/moby/pull/33198). + +### Find a good and non-confusing home for the remaining monolith + +Discussion on this topic is still ongoing, and possible approaches are looked into. The active discussion has moved +from GitHub to [https://forums.mobyproject.org/](https://forums.mobyproject.org/t/topic-find-a-good-an-non-confusing-home-for-the-remaining-monolith/37) + +### Find a place for `/pkg` + +Concerns were raised about moving packages to separate repositories, and it was decided to put some extra effort into +breaking up / removing existing packages that likely are not good candidates to become a standalone project. + +### Update integration-cli tests + +With the removal of the CLI from the moby repository, new pull requests will have to be tested using API tests instead +of using the CLI. Discussion took place whether or not these tests should use the API `client` package, or be completely +independent, and make raw HTTP calls. + +A topic was created on the forum to discuss options: [evolution of testing](https://forums.mobyproject.org/t/evolution-of-testing-moby/38) + + +### Proposal: split & containerize hack/validate + +[@AkihiroSuda](https://github.com/AkihiroSuda) is proposing to split and containerize the `hack/validate` script and +[started a topic on the forum](https://forums.mobyproject.org/t/proposal-split-containerize-hack-validate/32). An initial +proposal to add validation functionality to `vndr` (the vendoring tool in use) was rejected upstream, so alternative +approaches were discussed. + + +### Special Interest Groups + +A "SIG" category was created on the forums to provide a home for Special Interest Groups. The first SIG, [LinuxKit +Security](https://forums.mobyproject.org/t/about-the-linuxkit-security-category/44) was started (thanks +[@riyazdf](https://github.com/riyazdf)). + + +### Builder + +The builder dev report can be found [here](builder/2017-05-15.md) diff --git a/vendor/github.com/moby/moby/reports/2017-06-05.md b/vendor/github.com/moby/moby/reports/2017-06-05.md new file mode 100644 index 000000000..8e2cc3c45 --- /dev/null +++ b/vendor/github.com/moby/moby/reports/2017-06-05.md @@ -0,0 +1,36 @@ +# Development Report for June 5, 2017 + +## Daily Meeting + +A daily meeting is hosted on [slack](https://dockercommunity.slack.com) every business day at 9am PST on the channel `#moby-project`. +Lots of discussion happened during this meeting to kickstart the project, but now that we have the forums, we see less activity there. +We are discussing the future of this meeting [here](https://forums.mobyproject.org/t/of-standups-future), we will possibily move the meeting +to weekly. + +## Topics discussed last week + +### The CLI split + +Thanks to @tiborvass, the man pages, docs and completion scripts were imported to `github.com/docker/cli` [last week](https://github.com/docker/cli/pull/147) +Once everything is finalised, we will remove them from `github.com/moby/moby` + +### Find a good and non-confusing home for the remaining monolith + +Discussion on this topic is still ongoing, and possible approaches are looked into. The active discussion has moved +from GitHub to [https://forums.mobyproject.org/](https://forums.mobyproject.org/t/topic-find-a-good-an-non-confusing-home-for-the-remaining-monolith) + + +### Find a place for `/pkg` + +Thanks to @dnephin this topic in on-going, you can follow progress [here](https://github.com/moby/moby/issues/32989) +Many pkgs were reorganised last week, and more to come this week. + + +### Builder + +The builder dev report can be found [here](builder/2017-06-05.md) + + +### LinuxKit + +The LinuxKit dev report can be found [here](https://github.com/linuxkit/linuxkit/blob/master/reports/2017-06-03.md) \ No newline at end of file diff --git a/vendor/github.com/moby/moby/reports/2017-06-12.md b/vendor/github.com/moby/moby/reports/2017-06-12.md new file mode 100644 index 000000000..8aef38c6b --- /dev/null +++ b/vendor/github.com/moby/moby/reports/2017-06-12.md @@ -0,0 +1,78 @@ +# Development Report for June 12, 2017 + +## Moby Summit + +The next Moby Summit will be at Docker HQ on June 19th, register [here](https://www.eventbrite.com/e/moby-summit-tickets-34483396768) + +## Daily Meeting + +### The CLI split + +Manpages and docs yaml files can now be generated on [docker/cli](https://github.com/docker/cli). +Man pages, docs and completion scripts will be removed next week thanks to @tiborvass + +### Find a good and non-confusing home for the remaining monolith + +Lot's of dicussion happened on the [forums](https://forums.mobyproject.org/t/topic-find-a-good-an-non-confusing-home-for-the-remaining-monolith) +We should expect to do those changes after the moby summit. We contacted github to work with them so we have a smooth move. + +### Moby tool + +`moby` tool docs were moved from [LinuxKit](https://github.com/linuxkit/linuxkit) to the [moby tool repo](https://github.com/moby/tool) thanks to @justincormack + +### Custom golang URLs + +More discussions on the [forums](https://forums.mobyproject.org/t/cutoms-golang-urls), no agreement for now. + +### Buildkit + +[Proposal](https://github.com/moby/moby/issues/32925) + +More updates to the [POC repo](https://github.com/tonistiigi/buildkit_poc). It now contains binaries for the daemon and client. Examples directory shows a way for invoking a build job by generating the internal low-level build graph definition with a helper binary(as there is not support for frontends yet). The grpc control server binary can be built in two versions, one that connects to containerD socket and other that doesn't have any external dependencies. + +If you have questions or want to help, stop by the issues section of that repo or the proposal in moby/moby. + +#### Typed Dockerfile parsing + +[PR](https://github.com/moby/moby/pull/33492) + +New PR that enables parsing Dockerfiles into typed structures so they can be preprocessed to eliminate unnecessary build stages and reused with different kinds of dispatchers. + +#### Long running session & incremental file sending + +[PR ](https://github.com/moby/moby/pull/32677) + +Same status as last week. The PR went through one pass of review from @dnephin and has been rebased again. Maintainers are encouraged to give this one a review so it can be included in `v17.07` release. + + +#### Quality: Dependency interface switch + +[Move file copying from the daemon to the builder](https://github.com/moby/moby/pull/33454) PR is waiting for a second review. + +#### Proposals for new Dockerfile features that need design feedback: + +[Add IMPORT/EXPORT commands to Dockerfile](https://github.com/moby/moby/issues/32100) + +[Add `DOCKEROS/DOCKERARCH` default ARG to Dockerfile](https://github.com/moby/moby/issues/32487) + +[Add support for `RUN --mount`](https://github.com/moby/moby/issues/32507) + +[DAG image builder](https://github.com/moby/moby/issues/32550) + +[Option to export the hash of the build context](https://github.com/moby/moby/issues/32963) (new) + +[Allow --cache-from=*](https://github.com/moby/moby/issues/33002#issuecomment-299041162) (new) + +[Provide advanced .dockeringore use-cases](https://github.com/moby/moby/issues/12886) [2](https://github.com/moby/moby/issues/12886#issuecomment-306247989) + +If you are interested in implementing any of them, leave a comment on the specific issues. + +#### Builder features currently in code-review: + +[Warn/deprecate continuing on empty lines in `Dockerfile`](https://github.com/moby/moby/pull/29161) + +[Fix behavior of absolute paths in .dockerignore](https://github.com/moby/moby/pull/32088) + +#### Backlog + +[Build secrets](https://github.com/moby/moby/issues/33343) has not got much traction. If you want this feature to become a reality, please make yourself heard. diff --git a/vendor/github.com/moby/moby/reports/2017-06-26.md b/vendor/github.com/moby/moby/reports/2017-06-26.md new file mode 100644 index 000000000..e12533ae4 --- /dev/null +++ b/vendor/github.com/moby/moby/reports/2017-06-26.md @@ -0,0 +1,120 @@ +# Development Report for June 26, 2017 + +## Moby Summit + +The Moby Summit held in San Francisco was very active and well attended ([blog](http://mobyproject.org/blog/2017/06/26/moby-summit-recap/) / [linuxkit table notes](https://github.com/linuxkit/linuxkit/blob/master/reports/2017-06-19-summit.md) [#2090](https://github.com/linuxkit/linuxkit/pull/2090) [#2033](https://github.com/linuxkit/linuxkit/pull/2033) [@mgoelzer] [@justincormack]). + +## Container Engine + +Thanks to @fabiokung there is no container locks anymore on `docker ps` [#31273](https://github.com/moby/moby/pull/31273) + +## BuildKit + +[Repo](https://github.com/moby/buildkit) +[Proposal](https://github.com/moby/moby/issues/32925) + +New development repo is open at https://github.com/moby/buildkit + +The readme file provides examples how to get started. You can see an example of building BuildKit with BuildKit. + +There are lots of new issues opened as well to track the missing functionality. You are welcomed to help on any of them or discuss the design there. + +Last week most of the work was done on improving the `llb` client library for more complicated use cases and providing traces and interactive progress of executed build jobs. + +The `llb` client package is a go library that helps you to generate the build definition graph. It uses chained methods to make it easy to describe what steps need to be running. Mounts can be added to the execution steps for defining multiple inputs or outputs. To prepare the graph, you just have to call `Marshal()` on a leaf node that will generate the protobuf definition for everything required to build that node. + +### Typed Dockerfile parsing + +[PR](https://github.com/moby/moby/pull/33492) + +This PR that enables parsing Dockerfiles into typed structures so they can be preprocessed to eliminate unnecessary build stages and reused with different kinds of dispatchers(eg. BuildKit). + +The PR had some review and updates in last week. Should be ready to code review soon. + +### Merged: Long running session & incremental file sending + +[PR](https://github.com/moby/moby/pull/32677) + +Incremental context sending PR was merged and is expected to land in `v17.07`. + +This feature experimental feature lets you skip sending the build context to the daemon on repeated builder invocations during development. Currently, this feature requires a CLI flag `--stream=true`. If this flag is used, one first builder invocation full build context is sent to the daemon. On a second attempt, only the changed files are transferred. + +Previous build context is saved in the build cache, and you can see how much space it takes form `docker system df`. Build cache will be automatically garbage collected and can also be manually cleared with `docker prune`. + +### Quality: Dependency interface switch + +[Move file copying from the daemon to the builder](https://github.com/moby/moby/pull/33454) PR was merged. + + +### Proposals for new Dockerfile features that need design feedback: + +[Add IMPORT/EXPORT commands to Dockerfile](https://github.com/moby/moby/issues/32100) + +[Add `DOCKEROS/DOCKERARCH` default ARG to Dockerfile](https://github.com/moby/moby/issues/32487) + +[Add support for `RUN --mount`](https://github.com/moby/moby/issues/32507) + +[DAG image builder](https://github.com/moby/moby/issues/32550) + +[Option to export the hash of the build context](https://github.com/moby/moby/issues/32963) (new) + +[Allow --cache-from=*](https://github.com/moby/moby/issues/33002#issuecomment-299041162) (new) + +[Provide advanced .dockeringore use-cases](https://github.com/moby/moby/issues/12886) [2](https://github.com/moby/moby/issues/12886#issuecomment-306247989) + +If you are interested in implementing any of them, leave a comment on the specific issues. + +### Other builder PRs merged last week + +[Warn/deprecate continuing on empty lines in `Dockerfile`](https://github.com/moby/moby/pull/29161) + +[Fix behavior of absolute paths in .dockerignore](https://github.com/moby/moby/pull/32088) + +[fix copy —from conflict with force pull](https://github.com/moby/moby/pull/33735) + +### Builder features currently in code-review: + +[Fix handling of remote "git@" notation](https://github.com/moby/moby/pull/33696) + +[builder: Emit a BuildResult after squashing.](https://github.com/moby/moby/pull/33824) + +[Fix shallow git clone in docker-build](https://github.com/moby/moby/pull/33704) + +### Backlog + +[Build secrets](https://github.com/moby/moby/issues/33343) has not got much traction. If you want this feature to become a reality, please make yourself heard. + +## LinuxKit + +* **Kernel GPG verification:** The kernel compilation containers now verify the GPG and SHA256 + checksums before building the binaries. ([#2062](https://github.com/linuxkit/linuxkit/issues/2062) [#2083](https://github.com/linuxkit/linuxkit/issues/2083) [@mscribe] [@justincormack] [@rn] [@riyazdf]). + The base Alpine build image now includes `gnupg` to support this feature ([#2091](https://github.com/linuxkit/linuxkit/issues/2091) [@riyazdf] [@rn]). + +* **Security SIG on Landlock:** The third Moby Security SIG focussed on the [Landlock](https://github.com/landlock-lsm) security module that provides unprivileged fine-grained sandboxing to applications. There are videos and forum links ([#2087](https://github.com/linuxkit/linuxkit/issues/2087) [#2089](https://github.com/linuxkit/linuxkit/issues/2089) [#2073](https://github.com/linuxkit/linuxkit/issues/2073) [@riyazdf]). + +* **Networking drivers now modules:** The kernels have been updated to 4.11.6/4.9.33/4.4.73, and many drivers are now loaded as modules to speed up boot-time ([#2095](https://github.com/linuxkit/linuxkit/issues/2095) [#2061](https://github.com/linuxkit/linuxkit/issues/2061) [@rn] [@justincormack] [@tych0]) + +- **Whaley important update:** The ASCII logo was updated and we fondly wave goodbye to the waves. ([#2084](https://github.com/linuxkit/linuxkit/issues/2084) [@thaJeztah] [@rn]) + +- **Containerised getty and sshd:** The login services now run in their own mount namespace, which was confusing people since they were expecting it to be on the host filesystem. This is now being addressed via a reminder in the `motd` upon login ([#2078](https://github.com/linuxkit/linuxkit/issues/2078) [#2097](https://github.com/linuxkit/linuxkit/issues/2097) [@deitch] [@ijc] [@justincormack] [@riyazdf] [@rn]) + +- **Hardened user copying:** The RFC on ensuring that we use a hardened kernel/userspace copying system was closed, as it is enabled by default on all our modern kernels and a regression test is included by default ([#2086](https://github.com/linuxkit/linuxkit/issues/2086) [@fntlnz] [@riyazdf]). + +- **Vultr provider:** There is an ongoing effort to add a metadata provider for [Vultr](http://vultr.com) ([#2101](https://github.com/linuxkit/linuxkit/issues/2101) [@furious-luke] [@justincormack]). + +### Packages and Projects + +- Simplified Makefiles for packages ([#2080](https://github.com/linuxkit/linuxkit/issues/2080) [@justincormack] [@rn]) +- The MirageOS SDK is integrating many upstream changes from dependent libraries, for the DHCP client ([#2070](https://github.com/linuxkit/linuxkit/issues/2070) [#2072](https://github.com/linuxkit/linuxkit/issues/2072) [@samoht] [@talex5] [@avsm]). + +### Documentation and Tests + +- A comprehensive test suite for containerd is now integrated into LinuxKit tests ([#2062](https://github.com/linuxkit/linuxkit/issues/2062) [@AkihiroSuda] [@justincormack] [@rn]) +- Fix documentation links ([#2074](https://github.com/linuxkit/linuxkit/issues/2074) [@ndauten] [@justincormack]) +- Update RTF version ([#2077](https://github.com/linuxkit/linuxkit/issues/2077) [@justincormack]) +- tests: add build test for Docker for Mac blueprint ([#2093](https://github.com/linuxkit/linuxkit/issues/2093) [@riyazdf] [@MagnusS]) +- Disable Qemu EFI ISO test for now ([#2100](https://github.com/linuxkit/linuxkit/issues/2100) [@justincormack]) +- The CI whitelists and ACLs were updated ([linuxkit-ci#11](https://github.com/linuxkit/linuxkit-ce/issues/11) [linuxkit-ci#15](https://github.com/linuxkit/linuxkit-ce/issues/15) [linuxkit/linuxkit-ci#10](https://github.com/linuxkit/linuxkit-ce/issues/10) [@rn] [@justincormack]) +- Fix spelling errors ([#2079](https://github.com/linuxkit/linuxkit/issues/2079) [@ndauten]) +- Fix typo in dev report ([#2094](https://github.com/linuxkit/linuxkit/issues/2094) [@justincormack]) +- Fix dead Link to VMWare File ([#2082](https://github.com/linuxkit/linuxkit/issues/2082) [@davefreitag]) \ No newline at end of file diff --git a/vendor/github.com/moby/moby/reports/builder/2017-05-01.md b/vendor/github.com/moby/moby/reports/builder/2017-05-01.md new file mode 100644 index 000000000..73d1c4930 --- /dev/null +++ b/vendor/github.com/moby/moby/reports/builder/2017-05-01.md @@ -0,0 +1,47 @@ +# Development Report for May 01, 2017 + +### buildkit + +As part of the goals of [Moby](https://github.com/moby/moby#transitioning-to-moby) to split the current platform into reusable components and to provide a future vision for the builder component new [buildkit proposal](https://github.com/moby/moby/issues/32925) was opened with early design draft. + +Buildkit is a library providing the core essentials of running a build process using isolated sandboxed commands. It is designed for extensibility and customization. Buildkit supports multiple build declaration formats(frontends) and multiple ways for outputting build results(not just docker images). It doesn't make decisions for a specific worker, snapshot or exporter implementations. + +It is designed to help find the most efficient way to process build tasks and intelligently cache them for repeated invocations. + +### Quality: Dependency interface switch + +To improve quality and performance, a new [proposal was made for switching the dependency interface](https://github.com/moby/moby/issues/32904) for current builder package. That should fix the current problems with data leakage and conflicts caused by daemon state cleanup scripts. + +@dnephin is in progress of refactoring current builder code to logical areas as a preparation work for updating this interface. + +Merged as part of this effort: + +- [Refactor Dockerfile.parser and directive](https://github.com/moby/moby/pull/32580) +- [Refactor builder dispatch state](https://github.com/moby/moby/pull/32600) +- [Use a bytes.Buffer for shell_words string concat](https://github.com/moby/moby/pull/32601) +- [Refactor `Builder.commit()`](https://github.com/moby/moby/pull/32772) +- [Remove b.escapeToken, create ShellLex](https://github.com/moby/moby/pull/32858) + +### New feature: Long running session + +PR for [adding long-running session between daemon and cli](https://github.com/moby/moby/pull/32677) that enabled advanced features like incremental context send, build credentials from the client, ssh forwarding etc. is looking for initial design review. It is currently open if features implemented on top of it would use a specific transport implementation on the wire or a generic interface(current implementation). @tonistiigi is working on adding persistent cache capabilities that are currently missing from that PR. It also needs to be figured out how the [cli split](https://github.com/moby/moby/pull/32694) will affect features like this. + +### Proposals for new Dockerfile features that need design feedback: + +[Add IMPORT/EXPORT commands to Dockerfile](https://github.com/moby/moby/issues/32100) + +[Add `DOCKEROS/DOCKERARCH` default ARG to Dockerfile](https://github.com/moby/moby/issues/32487) + +[Add support for `RUN --mount`](https://github.com/moby/moby/issues/32507) + +These proposals have gotten mostly positive feedback for now. We will leave them open for a couple of more weeks and then decide what actions to take in a maintainers meeting. Also, if you are interested in implementing any of them, leave a comment on the specific issues. + +### Other new builder features currently in code-review: + +[`docker build --iidfile` to capture the ID of the build result](https://github.com/moby/moby/pull/32406) + +[Allow builds from any git remote ref](https://github.com/moby/moby/pull/32502) + +### Backlog: + +[Build secrets](https://github.com/moby/moby/pull/30637) will be brought up again in next maintainer's meeting to evaluate how to move on with this, if any other proposals have changed the objective and if we should wait for swarm secrets to be available first. diff --git a/vendor/github.com/moby/moby/reports/builder/2017-05-08.md b/vendor/github.com/moby/moby/reports/builder/2017-05-08.md new file mode 100644 index 000000000..d9396ab76 --- /dev/null +++ b/vendor/github.com/moby/moby/reports/builder/2017-05-08.md @@ -0,0 +1,57 @@ +# Development Report for May 08, 2017 + + +### Quality: Dependency interface switch + +Proposal for [switching the dependency interface](https://github.com/moby/moby/issues/32904) for current builder package. That should fix the current problems with data leakage and conflicts caused by daemon state cleanup scripts. + +Merged as part of this effort: + +- [Move dispatch state to a new struct](https://github.com/moby/moby/pull/32952) +- [Cleanup unnecessary mutate then revert of b.runConfig](https://github.com/moby/moby/pull/32773) + +In review: +- [Refactor builder probe cache and container backend](https://github.com/moby/moby/pull/33061) +- [Expose GetImage interface for builder](https://github.com/moby/moby/pull/33054) + +### Merged: docker build --iidfile + +[`docker build --iidfile` to capture the ID of the build result](https://github.com/moby/moby/pull/32406). New option can be used by the CLI applications to get back the image ID of build result. API users can use the `Aux` messages in progress stream to also get the IDs for intermediate build stages, for example to share them for build cache. + +### New feature: Long running session + +PR for [adding long-running session between daemon and cli](https://github.com/moby/moby/pull/32677) that enables advanced features like incremental context send, build credentials from the client, ssh forwarding etc. + +@simonferquel proposed a [grpc-only version of that interface](https://github.com/moby/moby/pull/33047) that should simplify the setup needed for describing new features for the session. Looking for design reviews. + +The feature also needs to be reworked after CLI split. + +### buildkit + +Not much progress [apart from some design discussion](https://github.com/moby/moby/issues/32925). Next step would be to open up a repo. + +### Proposals for new Dockerfile features that need design feedback: + +[Add IMPORT/EXPORT commands to Dockerfile](https://github.com/moby/moby/issues/32100) + +[Add `DOCKEROS/DOCKERARCH` default ARG to Dockerfile](https://github.com/moby/moby/issues/32487) + +[Add support for `RUN --mount`](https://github.com/moby/moby/issues/32507) + +[DAG image builder](https://github.com/moby/moby/issues/32550) + +[Option to export the hash of the build context](https://github.com/moby/moby/issues/32963) (new) + +[Allow --cache-from=*](https://github.com/moby/moby/issues/33002#issuecomment-299041162) (new) + +If you are interested in implementing any of them, leave a comment on the specific issues. + +### Other new builder features currently in code-review: + +[Allow builds from any git remote ref](https://github.com/moby/moby/pull/32502) + +[Fix a case where using FROM scratch as NAME would fail](https://github.com/moby/moby/pull/32997) + +### Backlog: + +[Build secrets](https://github.com/moby/moby/pull/30637) will be brought up again in next maintainer's meeting to evaluate how to move on with this, if any other proposals have changed the objective and if we should wait for swarm secrets to be available first. diff --git a/vendor/github.com/moby/moby/reports/builder/2017-05-15.md b/vendor/github.com/moby/moby/reports/builder/2017-05-15.md new file mode 100644 index 000000000..cfc742f3a --- /dev/null +++ b/vendor/github.com/moby/moby/reports/builder/2017-05-15.md @@ -0,0 +1,64 @@ +# Development Report for May 15, 2017 + +### Multi-stage builds fixes coming in 17.06-rc1 + +Some bugs were discovered in new multi-stage build feature, release in 17.05. + +When using an image name directly in `COPY --from` without defining a build stage, the data associated with that image was not properly cleaned up. + +If a second was based on `scratch` image, the metadata from the previous stage didn't get reset, forcing the user to clear it manually with extra commands. + +Fixes for these are merged for the next release, everyone is welcomed to test it once `17.06-rc1` is out. + +- [Fix resetting image metadata between stages for scratch case](https://github.com/moby/moby/pull/33179) +- [Fix releasing implicit mounts](https://github.com/moby/moby/pull/33090) +- [Fix a case where using FROM scratch as NAME would fail](https://github.com/moby/moby/pull/32997) + + +### Quality: Dependency interface switch + +Work continues on making the builder dependency interface more stable. This week methods for getting access to source image were swapped out to a new version that keeps a reference to image data until build job has complete. + +Merged as part of this effort: + +- [Expose GetImage interface for builder](https://github.com/moby/moby/pull/33054) + +In review: +- [Refactor builder probe cache and container backend](https://github.com/moby/moby/pull/33061) +- [Refactor COPY/ADD dispatchers](https://github.com/moby/moby/pull/33116) + + +### New feature: Long running session + +PR for [adding long-running session between daemon and cli](https://github.com/moby/moby/pull/32677) that enables advanced features like incremental context send, build credentials from the client, ssh forwarding etc. + +@simonferquel updated a [grpc-only version of that interface](https://github.com/moby/moby/pull/33047) and mostly seems that consensus was achieved for using only grpc transport. @tonistiigi finished up persistent cache layer and garbage collection for file transfers. The PR now needs to be split up because CLI has moved. Once that is done, the main PR should be ready for review early this week. + +### Merged: Specifying any remote ref in git checkout URLs + +Building from git sources now allows [specifying any remote ref](https://github.com/moby/moby/pull/32502). For example, to build a pull request from GitHub you can use: `docker build git://github.com/moby/moby#pull/32502/head`. + + +### Proposals for new Dockerfile features that need design feedback: + +[Add IMPORT/EXPORT commands to Dockerfile](https://github.com/moby/moby/issues/32100) + +[Add `DOCKEROS/DOCKERARCH` default ARG to Dockerfile](https://github.com/moby/moby/issues/32487) + +[Add support for `RUN --mount`](https://github.com/moby/moby/issues/32507) + +[DAG image builder](https://github.com/moby/moby/issues/32550) + +[Option to export the hash of the build context](https://github.com/moby/moby/issues/32963) (new) + +[Allow --cache-from=*](https://github.com/moby/moby/issues/33002#issuecomment-299041162) (new) + +If you are interested in implementing any of them, leave a comment on the specific issues. + +### Other new builder features currently in code-review: + +- + +### Backlog: + +[Build secrets](https://github.com/moby/moby/pull/30637) will be brought up again in next maintainer's meeting to evaluate how to move on with this, if any other proposals have changed the objective and if we should wait for swarm secrets to be available first. diff --git a/vendor/github.com/moby/moby/reports/builder/2017-05-22.md b/vendor/github.com/moby/moby/reports/builder/2017-05-22.md new file mode 100644 index 000000000..29ecc6bb9 --- /dev/null +++ b/vendor/github.com/moby/moby/reports/builder/2017-05-22.md @@ -0,0 +1,47 @@ +# Development Report for May 22, 2017 + +### New feature: Long running session + +PR for [adding long-running session between daemon and cli](https://github.com/moby/moby/pull/32677) that enables advanced features like incremental context send, build credentials from the client, ssh forwarding etc. is ready for reviews. This is blocking many new features like token signing, not pulling unnecessary context files, exposing sources outside working directory etc. + + +### Quality: Dependency interface switch + +Work continues on making the builder dependency interface more stable. + +Merged as part of this effort this week: + +- [Refactor COPY/ADD dispatchers](https://github.com/moby/moby/pull/33116) + +In review: +- [Refactor builder probe cache and container backend](https://github.com/moby/moby/pull/33061) + +### Buildkit + +[Diff and snapshot services](https://github.com/containerd/containerd/pull/849) were added to containerd. This is a required dependency for [buildkit](https://github.com/moby/moby/issues/32925). + +### Proposals discussed in maintainers meeting + +New builder proposals were discussed in maintainers meeting. The decision was to give 2 more weeks for anyone to post feedback to [IMPORT/EXPORT commands](https://github.com/moby/moby/issues/32100) and [`RUN --mount`](https://github.com/moby/moby/issues/32507) and accept them for development if nothing significant comes up. + +Build secrets and its possible overlap with [--mount](https://github.com/moby/moby/issues/32507) was discussed as well. The decision was to create a [new issue](https://github.com/moby/moby/issues/33343)(as the [old PR](https://github.com/moby/moby/pull/30637) is closed) to track this and avoid it from blocking `--mount` implementation. + +### Proposals for new Dockerfile features that need design feedback: + +[Add IMPORT/EXPORT commands to Dockerfile](https://github.com/moby/moby/issues/32100) + +[Add `DOCKEROS/DOCKERARCH` default ARG to Dockerfile](https://github.com/moby/moby/issues/32487) + +[Add support for `RUN --mount`](https://github.com/moby/moby/issues/32507) + +[DAG image builder](https://github.com/moby/moby/issues/32550) + +[Option to export the hash of the build context](https://github.com/moby/moby/issues/32963) (new) + +[Allow --cache-from=*](https://github.com/moby/moby/issues/33002#issuecomment-299041162) (new) + +If you are interested in implementing any of them, leave a comment on the specific issues. + +### Other new builder features currently in code-review: + +- diff --git a/vendor/github.com/moby/moby/reports/builder/2017-05-29.md b/vendor/github.com/moby/moby/reports/builder/2017-05-29.md new file mode 100644 index 000000000..33043d9f3 --- /dev/null +++ b/vendor/github.com/moby/moby/reports/builder/2017-05-29.md @@ -0,0 +1,52 @@ +# Development Report for May 29, 2017 + +### New feature: Long running session + +PR for [adding long-running session between daemon and cli](https://github.com/moby/moby/pull/32677) that enables advanced features like incremental context send, build credentials from the client, ssh forwarding, etc. is ready for reviews. It is blocking many new features like the token signing, not pulling unnecessary context files, exposing sources outside working directory, etc. Maintainers are encouraged to give this one a review! + + +### Quality: Dependency interface switch + +Work continues on making the builder dependency interface more stable. + +Merged as part of this effort this week: + +- [Refactor builder probe cache and container backend](https://github.com/moby/moby/pull/33061) + +@dnephin continues working on the copy/export aspects of the interface. + +### Buildkit + +Some initial proof of concept code for [buildkit](https://github.com/moby/moby/issues/32925) has been pushed to https://github.com/tonistiigi/buildkit_poc . It's in a very early exploratory stage. Current development has been about providing concurrent references based access to the snapshot data that is backed by containerd. More info should follow in next weeks, including hopefully opening up an official repo. If you have questions or want to help, stop by the issues section of that repo or the proposal in moby/moby. + +### Proposals discussed in maintainers meeting + +Reminder from last week: New builder proposals were discussed in maintainers meeting. The decision was to give 2 more weeks for anyone to post feedback to [IMPORT/EXPORT commands](https://github.com/moby/moby/issues/32100) and [`RUN --mount`](https://github.com/moby/moby/issues/32507) and accept them for development if nothing significant comes up. + +New issue about [build secrets](https://github.com/moby/moby/issues/33343) has not got much traction. If you want this feature to become a reality please make yourself heard. + +### Proposals for new Dockerfile features that need design feedback: + +[Add IMPORT/EXPORT commands to Dockerfile](https://github.com/moby/moby/issues/32100) + +[Add `DOCKEROS/DOCKERARCH` default ARG to Dockerfile](https://github.com/moby/moby/issues/32487) + +[Add support for `RUN --mount`](https://github.com/moby/moby/issues/32507) + +[DAG image builder](https://github.com/moby/moby/issues/32550) + +[Option to export the hash of the build context](https://github.com/moby/moby/issues/32963) (new) + +[Allow --cache-from=*](https://github.com/moby/moby/issues/33002#issuecomment-299041162) (new) + +If you are interested in implementing any of them, leave a comment on the specific issues. + +### Other new builder features currently in code-review: + +[Fix canceling builder on chunked requests](https://github.com/moby/moby/pull/33363) + +[Fix parser directive refactoring](https://github.com/moby/moby/pull/33436) + +[Warn/deprecate continuing on empty lines in `Dockerfile`](https://github.com/moby/moby/pull/29161) + +[Fix behavior of absolute paths in .dockerignore](https://github.com/moby/moby/pull/32088) \ No newline at end of file diff --git a/vendor/github.com/moby/moby/reports/builder/2017-06-05.md b/vendor/github.com/moby/moby/reports/builder/2017-06-05.md new file mode 100644 index 000000000..3746c2639 --- /dev/null +++ b/vendor/github.com/moby/moby/reports/builder/2017-06-05.md @@ -0,0 +1,58 @@ +# Development Report for June 5, 2017 + +### New feature: Long running session + +Similarly to last week, the PR for [adding long-running session between daemon and cli](https://github.com/moby/moby/pull/32677) is waiting for reviews. It is blocking many new features like the token signing, not pulling unnecessary context files, exposing sources outside working directory, etc. Maintainers are encouraged to give this one a review so it can be included in `v17.07` release. + + +### Quality: Dependency interface switch + +Work continues on making the builder dependency interface more stable. + +PRs currently in review as part of this effort: + +- [Move file copying from the daemon to the builder](https://github.com/moby/moby/pull/33454) + +This PR is the core of the update that removes the need to track active containers and instead of lets builder hold references to layers while it's running. + +Related to this, @simonferquel opened a [WIP PR](https://github.com/moby/moby/pull/33492) that introduces typed Dockerfile parsing. This enables making [decisions about dependencies](https://github.com/moby/moby/issues/32550#issuecomment-297867334) between build stages and reusing Dockerfile parsing as a buildkit frontend. + +### Buildkit + +Some initial proof of concept code for [buildkit](https://github.com/moby/moby/issues/32925) has been pushed to https://github.com/tonistiigi/buildkit_poc . It's in a very early exploratory stage. Current codebase includes libraries for getting concurrency safe references to containerd snapshots using a centralized cache management instance. There is a sample source implementation for pulling images to these snapshots and executing jobs with runc on top of them. There is also some utility code for concurrent execution and progress stream handling. More info should follow in next weeks, including hopefully opening up an official repo. If you have questions or want to help, stop by the issues section of that repo or the proposal in moby/moby. + +### Proposals discussed in maintainers meeting + +Reminder from last week: New builder proposals were discussed in maintainers meeting. The decision was to give two more weeks for anyone to post feedback to [IMPORT/EXPORT commands](https://github.com/moby/moby/issues/32100) and [`RUN --mount`](https://github.com/moby/moby/issues/32507) and accept them for development if nothing significant comes up. It is the last week to post your feedback on these proposals or the comments in them. You can also volunteer to implement them. + +A new issue about [build secrets](https://github.com/moby/moby/issues/33343) has not got much traction. If you want this feature to become a reality, please make yourself heard. + +### Proposals for new Dockerfile features that need design feedback: + +[Add IMPORT/EXPORT commands to Dockerfile](https://github.com/moby/moby/issues/32100) + +[Add `DOCKEROS/DOCKERARCH` default ARG to Dockerfile](https://github.com/moby/moby/issues/32487) + +[Add support for `RUN --mount`](https://github.com/moby/moby/issues/32507) + +[DAG image builder](https://github.com/moby/moby/issues/32550) + +[Option to export the hash of the build context](https://github.com/moby/moby/issues/32963) (new) + +[Allow --cache-from=*](https://github.com/moby/moby/issues/33002#issuecomment-299041162) (new) + +[Provide advanced .dockeringore use-cases](https://github.com/moby/moby/issues/12886) [2](https://github.com/moby/moby/issues/12886#issuecomment-306247989) + +If you are interested in implementing any of them, leave a comment on the specific issues. + +### Other builder PRs merged last week + +[Fix canceling builder on chunked requests](https://github.com/moby/moby/pull/33363) + +[Fix parser directive refactoring](https://github.com/moby/moby/pull/33436) + +### Builder features currently in code-review: + +[Warn/deprecate continuing on empty lines in `Dockerfile`](https://github.com/moby/moby/pull/29161) + +[Fix behavior of absolute paths in .dockerignore](https://github.com/moby/moby/pull/32088) \ No newline at end of file diff --git a/vendor/github.com/moby/moby/reports/builder/2017-06-12.md b/vendor/github.com/moby/moby/reports/builder/2017-06-12.md new file mode 100644 index 000000000..df5d801e7 --- /dev/null +++ b/vendor/github.com/moby/moby/reports/builder/2017-06-12.md @@ -0,0 +1,58 @@ +# Development Report for June 12, 2017 + + +### Buildkit + +[Proposal](https://github.com/moby/moby/issues/32925) + +More updates to the [POC repo](https://github.com/tonistiigi/buildkit_poc). It now contains binaries for the daemon and client. Examples directory shows a way for invoking a build job by generating the internal low-level build graph definition with a helper binary(as there is not support for frontends yet). The grpc control server binary can be built in two versions, one that connects to containerD socket and other that doesn't have any external dependencies. + +If you have questions or want to help, stop by the issues section of that repo or the proposal in moby/moby. + +### Typed Dockerfile parsing + +[PR](https://github.com/moby/moby/pull/33492) + +New PR that enables parsing Dockerfiles into typed structures so they can be preprocessed to eliminate unnecessary build stages and reused with different kinds of dispatchers. + +### Long running session & incremental file sending + +[PR ](https://github.com/moby/moby/pull/32677) + +Same status as last week. The PR went through one pass of review from @dnephin and has been rebased again. Maintainers are encouraged to give this one a review so it can be included in `v17.07` release. + + +### Quality: Dependency interface switch + +[Move file copying from the daemon to the builder](https://github.com/moby/moby/pull/33454) PR is waiting for a second review. + +### Proposals for new Dockerfile features that need design feedback: + +[Add IMPORT/EXPORT commands to Dockerfile](https://github.com/moby/moby/issues/32100) + +[Add `DOCKEROS/DOCKERARCH` default ARG to Dockerfile](https://github.com/moby/moby/issues/32487) + +[Add support for `RUN --mount`](https://github.com/moby/moby/issues/32507) + +[DAG image builder](https://github.com/moby/moby/issues/32550) + +[Option to export the hash of the build context](https://github.com/moby/moby/issues/32963) (new) + +[Allow --cache-from=*](https://github.com/moby/moby/issues/33002#issuecomment-299041162) (new) + +[Provide advanced .dockeringore use-cases](https://github.com/moby/moby/issues/12886) [2](https://github.com/moby/moby/issues/12886#issuecomment-306247989) + +If you are interested in implementing any of them, leave a comment on the specific issues. + +### Other builder PRs merged last week + + +### Builder features currently in code-review: + +[Warn/deprecate continuing on empty lines in `Dockerfile`](https://github.com/moby/moby/pull/29161) + +[Fix behavior of absolute paths in .dockerignore](https://github.com/moby/moby/pull/32088) + +### Backlog + +[Build secrets](https://github.com/moby/moby/issues/33343) has not got much traction. If you want this feature to become a reality, please make yourself heard. \ No newline at end of file diff --git a/vendor/github.com/moby/moby/reports/builder/2017-06-26.md b/vendor/github.com/moby/moby/reports/builder/2017-06-26.md new file mode 100644 index 000000000..e0ba95a7a --- /dev/null +++ b/vendor/github.com/moby/moby/reports/builder/2017-06-26.md @@ -0,0 +1,78 @@ +# Development Report for June 26, 2017 + + +### BuildKit + +[Repo](https://github.com/moby/buildkit) +[Proposal](https://github.com/moby/moby/issues/32925) + +New development repo is open at https://github.com/moby/buildkit + +The readme file provides examples how to get started. You can see an example of building BuildKit with BuildKit. + +There are lots of new issues opened as well to track the missing functionality. You are welcomed to help on any of them or discuss the design there. + +Last week most of the work was done on improving the `llb` client library for more complicated use cases and providing traces and interactive progress of executed build jobs. + +The `llb` client package is a go library that helps you to generate the build definition graph. It uses chained methods to make it easy to describe what steps need to be running. Mounts can be added to the execution steps for defining multiple inputs or outputs. To prepare the graph, you just have to call `Marshal()` on a leaf node that will generate the protobuf definition for everything required to build that node. + +### Typed Dockerfile parsing + +[PR](https://github.com/moby/moby/pull/33492) + +This PR that enables parsing Dockerfiles into typed structures so they can be preprocessed to eliminate unnecessary build stages and reused with different kinds of dispatchers(eg. BuildKit). + +The PR had some review and updates in last week. Should be ready to code review soon. + +### Merged: Long running session & incremental file sending + +[PR](https://github.com/moby/moby/pull/32677) + +Incremental context sending PR was merged and is expected to land in `v17.07`. + +This feature experimental feature lets you skip sending the build context to the daemon on repeated builder invocations during development. Currently, this feature requires a CLI flag `--stream=true`. If this flag is used, one first builder invocation full build context is sent to the daemon. On a second attempt, only the changed files are transferred. + +Previous build context is saved in the build cache, and you can see how much space it takes form `docker system df`. Build cache will be automatically garbage collected and can also be manually cleared with `docker prune`. + +### Quality: Dependency interface switch + +[Move file copying from the daemon to the builder](https://github.com/moby/moby/pull/33454) PR was merged. + + +### Proposals for new Dockerfile features that need design feedback: + +[Add IMPORT/EXPORT commands to Dockerfile](https://github.com/moby/moby/issues/32100) + +[Add `DOCKEROS/DOCKERARCH` default ARG to Dockerfile](https://github.com/moby/moby/issues/32487) + +[Add support for `RUN --mount`](https://github.com/moby/moby/issues/32507) + +[DAG image builder](https://github.com/moby/moby/issues/32550) + +[Option to export the hash of the build context](https://github.com/moby/moby/issues/32963) (new) + +[Allow --cache-from=*](https://github.com/moby/moby/issues/33002#issuecomment-299041162) (new) + +[Provide advanced .dockeringore use-cases](https://github.com/moby/moby/issues/12886) [2](https://github.com/moby/moby/issues/12886#issuecomment-306247989) + +If you are interested in implementing any of them, leave a comment on the specific issues. + +### Other builder PRs merged last week + +[Warn/deprecate continuing on empty lines in `Dockerfile`](https://github.com/moby/moby/pull/29161) + +[Fix behavior of absolute paths in .dockerignore](https://github.com/moby/moby/pull/32088) + +[fix copy —from conflict with force pull](https://github.com/moby/moby/pull/33735) + +### Builder features currently in code-review: + +[Fix handling of remote "git@" notation](https://github.com/moby/moby/pull/33696) + +[builder: Emit a BuildResult after squashing.](https://github.com/moby/moby/pull/33824) + +[Fix shallow git clone in docker-build](https://github.com/moby/moby/pull/33704) + +### Backlog + +[Build secrets](https://github.com/moby/moby/issues/33343) has not got much traction. If you want this feature to become a reality, please make yourself heard. \ No newline at end of file diff --git a/vendor/github.com/moby/moby/reports/builder/2017-07-10.md b/vendor/github.com/moby/moby/reports/builder/2017-07-10.md new file mode 100644 index 000000000..76aeee0f1 --- /dev/null +++ b/vendor/github.com/moby/moby/reports/builder/2017-07-10.md @@ -0,0 +1,65 @@ +# Development Report for July 10, 2017 + + +### BuildKit + +[Repo](https://github.com/moby/buildkit) +[Proposal](https://github.com/moby/moby/issues/32925) + +Many new features have been added since the last report. + +The build definition solver was updated to detect the identical parts of the graph sent by different clients and synchronize their processing. This is important when multiple targets of the same project are built at the same time and removes any duplication of work. + +Running build jobs now has support for graceful canceling and clear error reporting in case some build steps fail or are canceled. Bugs that may have left state dir in an inconsistent state of server shutdown were fixed. + +`buildctl du` command now shows all the information about allocated and in-use snapshots. It also shows the total space used and total reclaimable space. All snapshots are now persistent, and state is not lost with server restarts. + +New metadata package was implemented that other packages can use to add persistent and searchable metadata to individual snapshots. First users of that feature are the content blobs mapping on pull, size cache for `du` and instruction cache. There is also a new debug command `buildctl debug dump-metadata` to inspect what data is being stored. + +The first version of instruction cache was implemented. This caching scheme has many benefits compared to the current `docker build` caching as it doesn't require all data to be locally available to determine the cache match. The interface for the cache implementation is much simpler and could be implemented remotely as it only needs to store the cache keys and doesn't need to understand or compare their values. Content-based caching will be implemented on top of this work later. + +Separate source implementation for git repositories is currently in review. Using this source for accessing source code in git repositories has many performance and caching advantages. All the build jobs using the same git remote will use a shared local repository where updates will be pulled. All the nodes based on a git source will be cached using the commit ID of the current checkout. + +Next areas to be worked on will be implementing first exporters for getting access to the build artifacts and porting over the client session/incremental-send feature from `17.07-ce`. + +### Typed Dockerfile parsing + +[PR](https://github.com/moby/moby/pull/33492) + +The PR is in code review and waiting for feedback. Hopefully ready to be merged this week. + +### Quality: Dependency interface switch + +No updates for this week. Metadata commands need to be updated but it is probably easier to do it after https://github.com/moby/moby/pull/33492 has been merged. + +### Proposals for new Dockerfile features that need design feedback: + +[Add IMPORT/EXPORT commands to Dockerfile](https://github.com/moby/moby/issues/32100) + +[Add `DOCKEROS/DOCKERARCH` default ARG to Dockerfile](https://github.com/moby/moby/issues/32487) + +[Add support for `RUN --mount`](https://github.com/moby/moby/issues/32507) + +[DAG image builder](https://github.com/moby/moby/issues/32550) + +[Option to export the hash of the build context](https://github.com/moby/moby/issues/32963) (new) + +[Allow --cache-from=*](https://github.com/moby/moby/issues/33002#issuecomment-299041162) (new) + +[Provide advanced .dockeringore use-cases](https://github.com/moby/moby/issues/12886) [2](https://github.com/moby/moby/issues/12886#issuecomment-306247989) + +New: [RFC: Distributed BuildKit](https://github.com/moby/buildkit/issues/62) + +If you are interested in implementing any of them, leave a comment on the specific issues. + +### Other builder PRs merged last week + +[build: fix add from remote url](https://github.com/moby/moby/pull/33851) + +### Builder features currently in code-review: + +[Fix shallow git clone in docker-build](https://github.com/moby/moby/pull/33704) + +### Backlog + +[Build secrets](https://github.com/moby/moby/issues/33343) has not got much traction. If you want this feature to become a reality, please make yourself heard. \ No newline at end of file diff --git a/vendor/github.com/moby/moby/restartmanager/restartmanager.go b/vendor/github.com/moby/moby/restartmanager/restartmanager.go new file mode 100644 index 000000000..ec3b1cc24 --- /dev/null +++ b/vendor/github.com/moby/moby/restartmanager/restartmanager.go @@ -0,0 +1,133 @@ +package restartmanager + +import ( + "errors" + "fmt" + "sync" + "time" + + "github.com/docker/docker/api/types/container" +) + +const ( + backoffMultiplier = 2 + defaultTimeout = 100 * time.Millisecond + maxRestartTimeout = 1 * time.Minute +) + +// ErrRestartCanceled is returned when the restart manager has been +// canceled and will no longer restart the container. +var ErrRestartCanceled = errors.New("restart canceled") + +// RestartManager defines object that controls container restarting rules. +type RestartManager interface { + Cancel() error + ShouldRestart(exitCode uint32, hasBeenManuallyStopped bool, executionDuration time.Duration) (bool, chan error, error) +} + +type restartManager struct { + sync.Mutex + sync.Once + policy container.RestartPolicy + restartCount int + timeout time.Duration + active bool + cancel chan struct{} + canceled bool +} + +// New returns a new restartManager based on a policy. +func New(policy container.RestartPolicy, restartCount int) RestartManager { + return &restartManager{policy: policy, restartCount: restartCount, cancel: make(chan struct{})} +} + +func (rm *restartManager) SetPolicy(policy container.RestartPolicy) { + rm.Lock() + rm.policy = policy + rm.Unlock() +} + +func (rm *restartManager) ShouldRestart(exitCode uint32, hasBeenManuallyStopped bool, executionDuration time.Duration) (bool, chan error, error) { + if rm.policy.IsNone() { + return false, nil, nil + } + rm.Lock() + unlockOnExit := true + defer func() { + if unlockOnExit { + rm.Unlock() + } + }() + + if rm.canceled { + return false, nil, ErrRestartCanceled + } + + if rm.active { + return false, nil, fmt.Errorf("invalid call on an active restart manager") + } + // if the container ran for more than 10s, regardless of status and policy reset the + // the timeout back to the default. + if executionDuration.Seconds() >= 10 { + rm.timeout = 0 + } + switch { + case rm.timeout == 0: + rm.timeout = defaultTimeout + case rm.timeout < maxRestartTimeout: + rm.timeout *= backoffMultiplier + } + if rm.timeout > maxRestartTimeout { + rm.timeout = maxRestartTimeout + } + + var restart bool + switch { + case rm.policy.IsAlways(): + restart = true + case rm.policy.IsUnlessStopped() && !hasBeenManuallyStopped: + restart = true + case rm.policy.IsOnFailure(): + // the default value of 0 for MaximumRetryCount means that we will not enforce a maximum count + if max := rm.policy.MaximumRetryCount; max == 0 || rm.restartCount < max { + restart = exitCode != 0 + } + } + + if !restart { + rm.active = false + return false, nil, nil + } + + rm.restartCount++ + + unlockOnExit = false + rm.active = true + rm.Unlock() + + ch := make(chan error) + go func() { + select { + case <-rm.cancel: + ch <- ErrRestartCanceled + close(ch) + case <-time.After(rm.timeout): + rm.Lock() + close(ch) + rm.active = false + rm.Unlock() + } + }() + + return true, ch, nil +} + +func (rm *restartManager) Cancel() error { + rm.Do(func() { + rm.Lock() + rm.canceled = true + close(rm.cancel) + rm.Unlock() + }) + return nil +} diff --git a/vendor/github.com/moby/moby/restartmanager/restartmanager_test.go b/vendor/github.com/moby/moby/restartmanager/restartmanager_test.go new file mode 100644 index 000000000..0c91235d7 --- /dev/null +++ b/vendor/github.com/moby/moby/restartmanager/restartmanager_test.go @@ -0,0 +1,36 @@ +package restartmanager + +import ( + "testing" + "time" + + "github.com/docker/docker/api/types/container" +) + +func TestRestartManagerTimeout(t *testing.T) { + rm := New(container.RestartPolicy{Name: "always"}, 0).(*restartManager) + var duration = time.Duration(1 * time.Second) + should, _, err := rm.ShouldRestart(0, false, duration) + if err != nil { + t.Fatal(err) + } + if !should { + t.Fatal("container should be restarted") + } + if rm.timeout != defaultTimeout { + t.Fatalf("restart manager should have a timeout of 100 ms but has %s", rm.timeout) + } +} + +func TestRestartManagerTimeoutReset(t *testing.T) { + rm := New(container.RestartPolicy{Name: "always"}, 0).(*restartManager) + rm.timeout = 5 * time.Second + var duration = time.Duration(10 * time.Second) + _, _, err := rm.ShouldRestart(0, false, duration) + if err != nil { + t.Fatal(err) + } + if rm.timeout != defaultTimeout { + t.Fatalf("restart manager should have a timeout of 100 ms but has %s", rm.timeout) + } +} diff --git a/vendor/github.com/moby/moby/runconfig/config.go b/vendor/github.com/moby/moby/runconfig/config.go new file mode 100644 index 000000000..c9dc6e96e --- /dev/null +++ b/vendor/github.com/moby/moby/runconfig/config.go @@ -0,0 +1,108 @@ +package runconfig + +import ( + "encoding/json" + "fmt" + "io" + + "github.com/docker/docker/api/types/container" + networktypes "github.com/docker/docker/api/types/network" + "github.com/docker/docker/pkg/sysinfo" + "github.com/docker/docker/volume" +) + +// ContainerDecoder implements httputils.ContainerDecoder +// calling DecodeContainerConfig. +type ContainerDecoder struct{} + +// DecodeConfig makes ContainerDecoder to implement httputils.ContainerDecoder +func (r ContainerDecoder) DecodeConfig(src io.Reader) (*container.Config, *container.HostConfig, *networktypes.NetworkingConfig, error) { + return DecodeContainerConfig(src) +} + +// DecodeHostConfig makes ContainerDecoder to implement httputils.ContainerDecoder +func (r ContainerDecoder) DecodeHostConfig(src io.Reader) (*container.HostConfig, error) { + return DecodeHostConfig(src) +} + +// DecodeContainerConfig decodes a json encoded config into a ContainerConfigWrapper +// struct and returns both a Config and a HostConfig struct +// Be aware this function is not checking whether the resulted structs are nil, +// it's your business to do so +func DecodeContainerConfig(src io.Reader) (*container.Config, *container.HostConfig, *networktypes.NetworkingConfig, error) { + var w ContainerConfigWrapper + + decoder := json.NewDecoder(src) + if err := decoder.Decode(&w); err != nil { + return nil, nil, nil, err + } + + hc := w.getHostConfig() + + // Perform platform-specific processing of Volumes and Binds. + if w.Config != nil && hc != nil { + + // Initialize the volumes map if currently nil + if w.Config.Volumes == nil { + w.Config.Volumes = make(map[string]struct{}) + } + + // Now validate all the volumes and binds + if err := validateMountSettings(w.Config, hc); err != nil { + return nil, nil, nil, err + } + } + + // Certain parameters need daemon-side validation that cannot be done + // on the client, as only the daemon knows what is valid for the platform. + if err := validateNetMode(w.Config, hc); err != nil { + return nil, nil, nil, err + } + + // Validate isolation + if err := validateIsolation(hc); err != nil { + return nil, nil, nil, err + } + + // Validate QoS + if err := validateQoS(hc); err != nil { + return nil, nil, nil, err + } + + // Validate Resources + if err := validateResources(hc, sysinfo.New(true)); err != nil { + return nil, nil, nil, err + } + + // Validate Privileged + if err := validatePrivileged(hc); err != nil { + return nil, nil, nil, err + } + + // Validate ReadonlyRootfs + if err := validateReadonlyRootfs(hc); err != nil { + return nil, nil, nil, err + } + + return w.Config, hc, w.NetworkingConfig, nil +} + +// validateMountSettings validates each of the volumes and bind settings +// passed by the caller to ensure they are valid. +func validateMountSettings(c *container.Config, hc *container.HostConfig) error { + // it is ok to have len(hc.Mounts) > 0 && (len(hc.Binds) > 0 || len (c.Volumes) > 0 || len (hc.Tmpfs) > 0 ) + + // Ensure all volumes and binds are valid. + for spec := range c.Volumes { + if _, err := volume.ParseMountRaw(spec, hc.VolumeDriver); err != nil { + return fmt.Errorf("invalid volume spec %q: %v", spec, err) + } + } + for _, spec := range hc.Binds { + if _, err := volume.ParseMountRaw(spec, hc.VolumeDriver); err != nil { + return fmt.Errorf("invalid bind mount spec %q: %v", spec, err) + } + } + + return nil +} diff --git a/vendor/github.com/moby/moby/runconfig/config_test.go b/vendor/github.com/moby/moby/runconfig/config_test.go new file mode 100644 index 000000000..83ec363a0 --- /dev/null +++ b/vendor/github.com/moby/moby/runconfig/config_test.go @@ -0,0 +1,139 @@ +package runconfig + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "runtime" + "strings" + "testing" + + "github.com/docker/docker/api/types/container" + networktypes "github.com/docker/docker/api/types/network" + "github.com/docker/docker/api/types/strslice" +) + +type f struct { + file string + entrypoint strslice.StrSlice +} + +func TestDecodeContainerConfig(t *testing.T) { + + var ( + fixtures []f + image string + ) + + //TODO: Should run for Solaris + if runtime.GOOS == "solaris" { + t.Skip() + } + + if runtime.GOOS != "windows" { + image = "ubuntu" + fixtures = []f{ + {"fixtures/unix/container_config_1_14.json", strslice.StrSlice{}}, + {"fixtures/unix/container_config_1_17.json", strslice.StrSlice{"bash"}}, + {"fixtures/unix/container_config_1_19.json", strslice.StrSlice{"bash"}}, + } + } else { + image = "windows" + fixtures = []f{ + {"fixtures/windows/container_config_1_19.json", strslice.StrSlice{"cmd"}}, + } + } + + for _, f := range fixtures { + b, err := ioutil.ReadFile(f.file) + if err != nil { + t.Fatal(err) + } + + c, h, _, err := DecodeContainerConfig(bytes.NewReader(b)) + if err != nil { + t.Fatal(fmt.Errorf("Error parsing %s: %v", f, err)) + } + + if c.Image != image { + t.Fatalf("Expected %s image, found %s\n", image, c.Image) + } + + if len(c.Entrypoint) != len(f.entrypoint) { + t.Fatalf("Expected %v, found %v\n", f.entrypoint, c.Entrypoint) + } + + if h != nil && h.Memory != 1000 { + t.Fatalf("Expected memory to be 1000, found %d\n", h.Memory) + } + } +} + +// TestDecodeContainerConfigIsolation validates isolation passed +// to the daemon in the hostConfig structure. Note this is platform specific +// as to what level of container isolation is supported. +func TestDecodeContainerConfigIsolation(t *testing.T) { + + // An Invalid isolation level + if _, _, _, err := callDecodeContainerConfigIsolation("invalid"); err != nil { + if !strings.Contains(err.Error(), `Invalid isolation: "invalid"`) { + t.Fatal(err) + } + } + + // Blank isolation (== default) + if _, _, _, err := callDecodeContainerConfigIsolation(""); err != nil { + t.Fatal("Blank isolation should have succeeded") + } + + // Default isolation + if _, _, _, err := callDecodeContainerConfigIsolation("default"); err != nil { + t.Fatal("default isolation should have succeeded") + } + + // Process isolation (Valid on Windows only) + if runtime.GOOS == "windows" { + if _, _, _, err := callDecodeContainerConfigIsolation("process"); err != nil { + t.Fatal("process isolation should have succeeded") + } + } else { + if _, _, _, err := callDecodeContainerConfigIsolation("process"); err != nil { + if !strings.Contains(err.Error(), `Invalid isolation: "process"`) { + t.Fatal(err) + } + } + } + + // Hyper-V Containers isolation (Valid on Windows only) + if runtime.GOOS == "windows" { + if _, _, _, err := callDecodeContainerConfigIsolation("hyperv"); err != nil { + t.Fatal("hyperv isolation should have succeeded") + } + } else { + if _, _, _, err := callDecodeContainerConfigIsolation("hyperv"); err != nil { + if !strings.Contains(err.Error(), `Invalid isolation: "hyperv"`) { + t.Fatal(err) + } + } + } +} + +// callDecodeContainerConfigIsolation is a utility function to call +// DecodeContainerConfig for validating isolation +func callDecodeContainerConfigIsolation(isolation string) (*container.Config, *container.HostConfig, *networktypes.NetworkingConfig, error) { + var ( + b []byte + err error + ) + w := ContainerConfigWrapper{ + Config: &container.Config{}, + HostConfig: &container.HostConfig{ + NetworkMode: "none", + Isolation: container.Isolation(isolation)}, + } + if b, err = json.Marshal(w); err != nil { + return nil, nil, nil, fmt.Errorf("Error on marshal %s", err.Error()) + } + return DecodeContainerConfig(bytes.NewReader(b)) +} diff --git a/vendor/github.com/moby/moby/runconfig/config_unix.go b/vendor/github.com/moby/moby/runconfig/config_unix.go new file mode 100644 index 000000000..b4fbfb279 --- /dev/null +++ b/vendor/github.com/moby/moby/runconfig/config_unix.go @@ -0,0 +1,59 @@ +// +build !windows + +package runconfig + +import ( + "github.com/docker/docker/api/types/container" + networktypes "github.com/docker/docker/api/types/network" +) + +// ContainerConfigWrapper is a Config wrapper that holds the container Config (portable) +// and the corresponding HostConfig (non-portable). +type ContainerConfigWrapper struct { + *container.Config + InnerHostConfig *container.HostConfig `json:"HostConfig,omitempty"` + Cpuset string `json:",omitempty"` // Deprecated. Exported for backwards compatibility. + NetworkingConfig *networktypes.NetworkingConfig `json:"NetworkingConfig,omitempty"` + *container.HostConfig // Deprecated. Exported to read attributes from json that are not in the inner host config structure. +} + +// getHostConfig gets the HostConfig of the Config. +// It's mostly there to handle Deprecated fields of the ContainerConfigWrapper +func (w *ContainerConfigWrapper) getHostConfig() *container.HostConfig { + hc := w.HostConfig + + if hc == nil && w.InnerHostConfig != nil { + hc = w.InnerHostConfig + } else if w.InnerHostConfig != nil { + if hc.Memory != 0 && w.InnerHostConfig.Memory == 0 { + w.InnerHostConfig.Memory = hc.Memory + } + if hc.MemorySwap != 0 && w.InnerHostConfig.MemorySwap == 0 { + w.InnerHostConfig.MemorySwap = hc.MemorySwap + } + if hc.CPUShares != 0 && w.InnerHostConfig.CPUShares == 0 { + w.InnerHostConfig.CPUShares = hc.CPUShares + } + if hc.CpusetCpus != "" && w.InnerHostConfig.CpusetCpus == "" { + w.InnerHostConfig.CpusetCpus = hc.CpusetCpus + } + + if hc.VolumeDriver != "" && w.InnerHostConfig.VolumeDriver == "" { + w.InnerHostConfig.VolumeDriver = hc.VolumeDriver + } + + hc = w.InnerHostConfig + } + + if hc != nil { + if w.Cpuset != "" && hc.CpusetCpus == "" { + hc.CpusetCpus = w.Cpuset + } + } + + // Make sure NetworkMode has an acceptable value. We do this to ensure + // backwards compatible API behavior. + SetDefaultNetModeIfBlank(hc) + + return hc +} diff --git a/vendor/github.com/moby/moby/runconfig/config_windows.go b/vendor/github.com/moby/moby/runconfig/config_windows.go new file mode 100644 index 000000000..f2361b554 --- /dev/null +++ b/vendor/github.com/moby/moby/runconfig/config_windows.go @@ -0,0 +1,19 @@ +package runconfig + +import ( + "github.com/docker/docker/api/types/container" + networktypes "github.com/docker/docker/api/types/network" +) + +// ContainerConfigWrapper is a Config wrapper that holds the container Config (portable) +// and the corresponding HostConfig (non-portable). +type ContainerConfigWrapper struct { + *container.Config + HostConfig *container.HostConfig `json:"HostConfig,omitempty"` + NetworkingConfig *networktypes.NetworkingConfig `json:"NetworkingConfig,omitempty"` +} + +// getHostConfig gets the HostConfig of the Config. +func (w *ContainerConfigWrapper) getHostConfig() *container.HostConfig { + return w.HostConfig +} diff --git a/vendor/github.com/moby/moby/runconfig/errors.go b/vendor/github.com/moby/moby/runconfig/errors.go new file mode 100644 index 000000000..c95a2919e --- /dev/null +++ b/vendor/github.com/moby/moby/runconfig/errors.go @@ -0,0 +1,38 @@ +package runconfig + +import ( + "fmt" +) + +var ( + // ErrConflictContainerNetworkAndLinks conflict between --net=container and links + ErrConflictContainerNetworkAndLinks = fmt.Errorf("conflicting options: container type network can't be used with links. This would result in undefined behavior") + // ErrConflictSharedNetwork conflict between private and other networks + ErrConflictSharedNetwork = fmt.Errorf("container sharing network namespace with another container or host cannot be connected to any other network") + // ErrConflictHostNetwork conflict from being disconnected from host network or connected to host network. + ErrConflictHostNetwork = fmt.Errorf("container cannot be disconnected from host network or connected to host network") + // ErrConflictNoNetwork conflict between private and other networks + ErrConflictNoNetwork = fmt.Errorf("container cannot be connected to multiple networks with one of the networks in private (none) mode") + // ErrConflictNetworkAndDNS conflict between --dns and the network mode + ErrConflictNetworkAndDNS = fmt.Errorf("conflicting options: dns and the network mode") + // ErrConflictNetworkHostname conflict between the hostname and the network mode + ErrConflictNetworkHostname = fmt.Errorf("conflicting options: hostname and the network mode") + // ErrConflictHostNetworkAndLinks conflict between --net=host and links + ErrConflictHostNetworkAndLinks = fmt.Errorf("conflicting options: host type networking can't be used with links. This would result in undefined behavior") + // ErrConflictContainerNetworkAndMac conflict between the mac address and the network mode + ErrConflictContainerNetworkAndMac = fmt.Errorf("conflicting options: mac-address and the network mode") + // ErrConflictNetworkHosts conflict between add-host and the network mode + ErrConflictNetworkHosts = fmt.Errorf("conflicting options: custom host-to-IP mapping and the network mode") + // ErrConflictNetworkPublishPorts conflict between the publish options and the network mode + ErrConflictNetworkPublishPorts = fmt.Errorf("conflicting options: port publishing and the container type network mode") + // ErrConflictNetworkExposePorts conflict between the expose option and the network mode + ErrConflictNetworkExposePorts = fmt.Errorf("conflicting options: port exposing and the container type network mode") + // ErrUnsupportedNetworkAndIP conflict between network mode and requested ip address + ErrUnsupportedNetworkAndIP = fmt.Errorf("user specified IP address is supported on user defined networks only") + // ErrUnsupportedNetworkNoSubnetAndIP conflict between network with no configured subnet and requested ip address + ErrUnsupportedNetworkNoSubnetAndIP = fmt.Errorf("user specified IP address is supported only when connecting to networks with user configured subnets") + // ErrUnsupportedNetworkAndAlias conflict between network mode and alias + ErrUnsupportedNetworkAndAlias = fmt.Errorf("network-scoped alias is supported only for containers in user defined networks") + // ErrConflictUTSHostname conflict between the hostname and the UTS mode + ErrConflictUTSHostname = fmt.Errorf("conflicting options: hostname and the UTS mode") +) diff --git a/vendor/github.com/moby/moby/runconfig/fixtures/unix/container_config_1_14.json b/vendor/github.com/moby/moby/runconfig/fixtures/unix/container_config_1_14.json new file mode 100644 index 000000000..b08334c09 --- /dev/null +++ b/vendor/github.com/moby/moby/runconfig/fixtures/unix/container_config_1_14.json @@ -0,0 +1,30 @@ +{ + "Hostname":"", + "Domainname": "", + "User":"", + "Memory": 1000, + "MemorySwap":0, + "CpuShares": 512, + "Cpuset": "0,1", + "AttachStdin":false, + "AttachStdout":true, + "AttachStderr":true, + "PortSpecs":null, + "Tty":false, + "OpenStdin":false, + "StdinOnce":false, + "Env":null, + "Cmd":[ + "bash" + ], + "Image":"ubuntu", + "Volumes":{ + "/tmp": {} + }, + "WorkingDir":"", + "NetworkDisabled": false, + "ExposedPorts":{ + "22/tcp": {} + }, + "RestartPolicy": { "Name": "always" } +} diff --git a/vendor/github.com/moby/moby/runconfig/fixtures/unix/container_config_1_17.json b/vendor/github.com/moby/moby/runconfig/fixtures/unix/container_config_1_17.json new file mode 100644 index 000000000..0d780877b --- /dev/null +++ b/vendor/github.com/moby/moby/runconfig/fixtures/unix/container_config_1_17.json @@ -0,0 +1,50 @@ +{ + "Hostname": "", + "Domainname": "", + "User": "", + "Memory": 1000, + "MemorySwap": 0, + "CpuShares": 512, + "Cpuset": "0,1", + "AttachStdin": false, + "AttachStdout": true, + "AttachStderr": true, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": null, + "Cmd": [ + "date" + ], + "Entrypoint": "bash", + "Image": "ubuntu", + "Volumes": { + "/tmp": {} + }, + "WorkingDir": "", + "NetworkDisabled": false, + "MacAddress": "12:34:56:78:9a:bc", + "ExposedPorts": { + "22/tcp": {} + }, + "SecurityOpt": [""], + "HostConfig": { + "Binds": ["/tmp:/tmp"], + "Links": ["redis3:redis"], + "LxcConf": {"lxc.utsname":"docker"}, + "PortBindings": { "22/tcp": [{ "HostPort": "11022" }] }, + "PublishAllPorts": false, + "Privileged": false, + "ReadonlyRootfs": false, + "Dns": ["8.8.8.8"], + "DnsSearch": [""], + "DnsOptions": [""], + "ExtraHosts": null, + "VolumesFrom": ["parent", "other:ro"], + "CapAdd": ["NET_ADMIN"], + "CapDrop": ["MKNOD"], + "RestartPolicy": { "Name": "", "MaximumRetryCount": 0 }, + "NetworkMode": "bridge", + "Devices": [] + } +} diff --git a/vendor/github.com/moby/moby/runconfig/fixtures/unix/container_config_1_19.json b/vendor/github.com/moby/moby/runconfig/fixtures/unix/container_config_1_19.json new file mode 100644 index 000000000..de49cf324 --- /dev/null +++ b/vendor/github.com/moby/moby/runconfig/fixtures/unix/container_config_1_19.json @@ -0,0 +1,58 @@ +{ + "Hostname": "", + "Domainname": "", + "User": "", + "AttachStdin": false, + "AttachStdout": true, + "AttachStderr": true, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": null, + "Cmd": [ + "date" + ], + "Entrypoint": "bash", + "Image": "ubuntu", + "Labels": { + "com.example.vendor": "Acme", + "com.example.license": "GPL", + "com.example.version": "1.0" + }, + "Volumes": { + "/tmp": {} + }, + "WorkingDir": "", + "NetworkDisabled": false, + "MacAddress": "12:34:56:78:9a:bc", + "ExposedPorts": { + "22/tcp": {} + }, + "HostConfig": { + "Binds": ["/tmp:/tmp"], + "Links": ["redis3:redis"], + "LxcConf": {"lxc.utsname":"docker"}, + "Memory": 1000, + "MemorySwap": 0, + "CpuShares": 512, + "CpusetCpus": "0,1", + "PortBindings": { "22/tcp": [{ "HostPort": "11022" }] }, + "PublishAllPorts": false, + "Privileged": false, + "ReadonlyRootfs": false, + "Dns": ["8.8.8.8"], + "DnsSearch": [""], + "DnsOptions": [""], + "ExtraHosts": null, + "VolumesFrom": ["parent", "other:ro"], + "CapAdd": ["NET_ADMIN"], + "CapDrop": ["MKNOD"], + "RestartPolicy": { "Name": "", "MaximumRetryCount": 0 }, + "NetworkMode": "bridge", + "Devices": [], + "Ulimits": [{}], + "LogConfig": { "Type": "json-file", "Config": {} }, + "SecurityOpt": [""], + "CgroupParent": "" + } +} diff --git a/vendor/github.com/moby/moby/runconfig/fixtures/unix/container_hostconfig_1_14.json b/vendor/github.com/moby/moby/runconfig/fixtures/unix/container_hostconfig_1_14.json new file mode 100644 index 000000000..c72ac91ca --- /dev/null +++ b/vendor/github.com/moby/moby/runconfig/fixtures/unix/container_hostconfig_1_14.json @@ -0,0 +1,18 @@ +{ + "Binds": ["/tmp:/tmp"], + "ContainerIDFile": "", + "LxcConf": [], + "Privileged": false, + "PortBindings": { + "80/tcp": [ + { + "HostIp": "0.0.0.0", + "HostPort": "49153" + } + ] + }, + "Links": ["/name:alias"], + "PublishAllPorts": false, + "CapAdd": ["NET_ADMIN"], + "CapDrop": ["MKNOD"] +} diff --git a/vendor/github.com/moby/moby/runconfig/fixtures/unix/container_hostconfig_1_19.json b/vendor/github.com/moby/moby/runconfig/fixtures/unix/container_hostconfig_1_19.json new file mode 100644 index 000000000..5ca8aa7e1 --- /dev/null +++ b/vendor/github.com/moby/moby/runconfig/fixtures/unix/container_hostconfig_1_19.json @@ -0,0 +1,30 @@ +{ + "Binds": ["/tmp:/tmp"], + "Links": ["redis3:redis"], + "LxcConf": {"lxc.utsname":"docker"}, + "Memory": 0, + "MemorySwap": 0, + "CpuShares": 512, + "CpuPeriod": 100000, + "CpusetCpus": "0,1", + "CpusetMems": "0,1", + "BlkioWeight": 300, + "OomKillDisable": false, + "PortBindings": { "22/tcp": [{ "HostPort": "11022" }] }, + "PublishAllPorts": false, + "Privileged": false, + "ReadonlyRootfs": false, + "Dns": ["8.8.8.8"], + "DnsSearch": [""], + "ExtraHosts": null, + "VolumesFrom": ["parent", "other:ro"], + "CapAdd": ["NET_ADMIN"], + "CapDrop": ["MKNOD"], + "RestartPolicy": { "Name": "", "MaximumRetryCount": 0 }, + "NetworkMode": "bridge", + "Devices": [], + "Ulimits": [{}], + "LogConfig": { "Type": "json-file", "Config": {} }, + "SecurityOpt": [""], + "CgroupParent": "" +} diff --git a/vendor/github.com/moby/moby/runconfig/fixtures/windows/container_config_1_19.json b/vendor/github.com/moby/moby/runconfig/fixtures/windows/container_config_1_19.json new file mode 100644 index 000000000..724320c76 --- /dev/null +++ b/vendor/github.com/moby/moby/runconfig/fixtures/windows/container_config_1_19.json @@ -0,0 +1,58 @@ +{ + "Hostname": "", + "Domainname": "", + "User": "", + "AttachStdin": false, + "AttachStdout": true, + "AttachStderr": true, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": null, + "Cmd": [ + "date" + ], + "Entrypoint": "cmd", + "Image": "windows", + "Labels": { + "com.example.vendor": "Acme", + "com.example.license": "GPL", + "com.example.version": "1.0" + }, + "Volumes": { + "c:/windows": {} + }, + "WorkingDir": "", + "NetworkDisabled": false, + "MacAddress": "12:34:56:78:9a:bc", + "ExposedPorts": { + "22/tcp": {} + }, + "HostConfig": { + "Binds": ["c:/windows:d:/tmp"], + "Links": ["redis3:redis"], + "LxcConf": {"lxc.utsname":"docker"}, + "Memory": 1000, + "MemorySwap": 0, + "CpuShares": 512, + "CpusetCpus": "0,1", + "PortBindings": { "22/tcp": [{ "HostPort": "11022" }] }, + "PublishAllPorts": false, + "Privileged": false, + "ReadonlyRootfs": false, + "Dns": ["8.8.8.8"], + "DnsSearch": [""], + "DnsOptions": [""], + "ExtraHosts": null, + "VolumesFrom": ["parent", "other:ro"], + "CapAdd": ["NET_ADMIN"], + "CapDrop": ["MKNOD"], + "RestartPolicy": { "Name": "", "MaximumRetryCount": 0 }, + "NetworkMode": "default", + "Devices": [], + "Ulimits": [{}], + "LogConfig": { "Type": "json-file", "Config": {} }, + "SecurityOpt": [""], + "CgroupParent": "" + } +} diff --git a/vendor/github.com/moby/moby/runconfig/hostconfig.go b/vendor/github.com/moby/moby/runconfig/hostconfig.go new file mode 100644 index 000000000..24aed1935 --- /dev/null +++ b/vendor/github.com/moby/moby/runconfig/hostconfig.go @@ -0,0 +1,80 @@ +package runconfig + +import ( + "encoding/json" + "fmt" + "io" + "strings" + + "github.com/docker/docker/api/types/container" +) + +// DecodeHostConfig creates a HostConfig based on the specified Reader. +// It assumes the content of the reader will be JSON, and decodes it. +func DecodeHostConfig(src io.Reader) (*container.HostConfig, error) { + decoder := json.NewDecoder(src) + + var w ContainerConfigWrapper + if err := decoder.Decode(&w); err != nil { + return nil, err + } + + hc := w.getHostConfig() + return hc, nil +} + +// SetDefaultNetModeIfBlank changes the NetworkMode in a HostConfig structure +// to default if it is not populated. This ensures backwards compatibility after +// the validation of the network mode was moved from the docker CLI to the +// docker daemon. +func SetDefaultNetModeIfBlank(hc *container.HostConfig) { + if hc != nil { + if hc.NetworkMode == container.NetworkMode("") { + hc.NetworkMode = container.NetworkMode("default") + } + } +} + +// validateNetContainerMode ensures that the various combinations of requested +// network settings wrt container mode are valid. +func validateNetContainerMode(c *container.Config, hc *container.HostConfig) error { + // We may not be passed a host config, such as in the case of docker commit + if hc == nil { + return nil + } + parts := strings.Split(string(hc.NetworkMode), ":") + if parts[0] == "container" { + if len(parts) < 2 || parts[1] == "" { + return fmt.Errorf("Invalid network mode: invalid container format container:") + } + } + + if hc.NetworkMode.IsContainer() && c.Hostname != "" { + return ErrConflictNetworkHostname + } + + if hc.NetworkMode.IsContainer() && len(hc.Links) > 0 { + return ErrConflictContainerNetworkAndLinks + } + + if hc.NetworkMode.IsContainer() && len(hc.DNS) > 0 { + return ErrConflictNetworkAndDNS + } + + if hc.NetworkMode.IsContainer() && len(hc.ExtraHosts) > 0 { + return ErrConflictNetworkHosts + } + + if (hc.NetworkMode.IsContainer() || hc.NetworkMode.IsHost()) && c.MacAddress != "" { + return ErrConflictContainerNetworkAndMac + } + + if hc.NetworkMode.IsContainer() && (len(hc.PortBindings) > 0 || hc.PublishAllPorts == true) { + return ErrConflictNetworkPublishPorts + } + + if hc.NetworkMode.IsContainer() && len(c.ExposedPorts) > 0 { + return ErrConflictNetworkExposePorts + } + return nil +} diff --git a/vendor/github.com/moby/moby/runconfig/hostconfig_solaris.go b/vendor/github.com/moby/moby/runconfig/hostconfig_solaris.go new file mode 100644 index 000000000..5b6e13dc9 --- /dev/null +++ b/vendor/github.com/moby/moby/runconfig/hostconfig_solaris.go @@ -0,0 +1,46 @@ +package runconfig + +import ( + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/pkg/sysinfo" +) + +// DefaultDaemonNetworkMode returns the default network stack the daemon should +// use. +func DefaultDaemonNetworkMode() container.NetworkMode { + return container.NetworkMode("bridge") +} + +// IsPreDefinedNetwork indicates if a network is predefined by the daemon +func IsPreDefinedNetwork(network string) bool { + return false +} + +// validateNetMode ensures that the various combinations of requested +// network settings are valid. +func validateNetMode(c *container.Config, hc *container.HostConfig) error { + // We may not be passed a host config, such as in the case of docker commit + return nil +} + +// validateIsolation performs platform specific validation of the +// isolation level in the hostconfig structure. +// This setting is currently discarded for Solaris so this is a no-op. +func validateIsolation(hc *container.HostConfig) error { + return nil +} + +// validateQoS performs platform specific validation of the QoS settings +func validateQoS(hc *container.HostConfig) error { + return nil +} + +// validateResources performs platform specific validation of the resource settings +func validateResources(hc *container.HostConfig, si *sysinfo.SysInfo) error { + return nil +} + +// validatePrivileged performs platform specific validation of the Privileged setting +func validatePrivileged(hc *container.HostConfig) error { + return nil +} diff --git a/vendor/github.com/moby/moby/runconfig/hostconfig_test.go b/vendor/github.com/moby/moby/runconfig/hostconfig_test.go new file mode 100644 index 000000000..a6a3eef7c --- /dev/null +++ b/vendor/github.com/moby/moby/runconfig/hostconfig_test.go @@ -0,0 +1,283 @@ +// +build !windows + +package runconfig + +import ( + "bytes" + "fmt" + "io/ioutil" + "testing" + + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/pkg/sysinfo" +) + +// TODO Windows: This will need addressing for a Windows daemon. +func TestNetworkModeTest(t *testing.T) { + networkModes := map[container.NetworkMode][]bool{ + // private, bridge, host, container, none, default + "": {true, false, false, false, false, false}, + "something:weird": {true, false, false, false, false, false}, + "bridge": {true, true, false, false, false, false}, + DefaultDaemonNetworkMode(): {true, true, false, false, false, false}, + "host": {false, false, true, false, false, false}, + "container:name": {false, false, false, true, false, false}, + "none": {true, false, false, false, true, false}, + "default": {true, false, false, false, false, true}, + } + networkModeNames := map[container.NetworkMode]string{ + "": "", + "something:weird": "something:weird", + "bridge": "bridge", + DefaultDaemonNetworkMode(): "bridge", + "host": "host", + "container:name": "container", + "none": "none", + "default": "default", + } + for networkMode, state := range networkModes { + if networkMode.IsPrivate() != state[0] { + t.Fatalf("NetworkMode.IsPrivate for %v should have been %v but was %v", networkMode, state[0], networkMode.IsPrivate()) + } + if networkMode.IsBridge() != state[1] { + t.Fatalf("NetworkMode.IsBridge for %v should have been %v but was %v", networkMode, state[1], networkMode.IsBridge()) + } + if networkMode.IsHost() != state[2] { + t.Fatalf("NetworkMode.IsHost for %v should have been %v but was %v", networkMode, state[2], networkMode.IsHost()) + } + if networkMode.IsContainer() != state[3] { + t.Fatalf("NetworkMode.IsContainer for %v should have been %v but was %v", networkMode, state[3], networkMode.IsContainer()) + } + if networkMode.IsNone() != state[4] { + t.Fatalf("NetworkMode.IsNone for %v should have been %v but was %v", networkMode, state[4], networkMode.IsNone()) + } + if networkMode.IsDefault() != state[5] { + t.Fatalf("NetworkMode.IsDefault for %v should have been %v but was %v", networkMode, state[5], networkMode.IsDefault()) + } + if networkMode.NetworkName() != networkModeNames[networkMode] { + t.Fatalf("Expected name %v, got %v", networkModeNames[networkMode], networkMode.NetworkName()) + } + } +} + +func TestIpcModeTest(t *testing.T) { + ipcModes := map[container.IpcMode][]bool{ + // private, host, container, valid + "": {true, false, false, true}, + "something:weird": {true, false, false, false}, + ":weird": {true, false, false, true}, + "host": {false, true, false, true}, + "container:name": {false, false, true, true}, + "container:name:something": {false, false, true, false}, + "container:": {false, false, true, false}, + } + for ipcMode, state := range ipcModes { + if ipcMode.IsPrivate() != state[0] { + t.Fatalf("IpcMode.IsPrivate for %v should have been %v but was %v", ipcMode, state[0], ipcMode.IsPrivate()) + } + if ipcMode.IsHost() != state[1] { + t.Fatalf("IpcMode.IsHost for %v should have been %v but was %v", ipcMode, state[1], ipcMode.IsHost()) + } + if ipcMode.IsContainer() != state[2] { + t.Fatalf("IpcMode.IsContainer for %v should have been %v but was %v", ipcMode, state[2], ipcMode.IsContainer()) + } + if ipcMode.Valid() != state[3] { + t.Fatalf("IpcMode.Valid for %v should have been %v but was %v", ipcMode, state[3], ipcMode.Valid()) + } + } + containerIpcModes := map[container.IpcMode]string{ + "": "", + "something": "", + "something:weird": "weird", + "container": "", + "container:": "", + "container:name": "name", + "container:name1:name2": "name1:name2", + } + for ipcMode, container := range containerIpcModes { + if ipcMode.Container() != container { + t.Fatalf("Expected %v for %v but was %v", container, ipcMode, ipcMode.Container()) + } + } +} + +func TestUTSModeTest(t *testing.T) { + utsModes := map[container.UTSMode][]bool{ + // private, host, valid + "": {true, false, true}, + "something:weird": {true, false, false}, + "host": {false, true, true}, + "host:name": {true, false, true}, + } + for utsMode, state := range utsModes { + if utsMode.IsPrivate() != state[0] { + t.Fatalf("UtsMode.IsPrivate for %v should have been %v but was %v", utsMode, state[0], utsMode.IsPrivate()) + } + if utsMode.IsHost() != state[1] { + t.Fatalf("UtsMode.IsHost for %v should have been %v but was %v", utsMode, state[1], utsMode.IsHost()) + } + if utsMode.Valid() != state[2] { + t.Fatalf("UtsMode.Valid for %v should have been %v but was %v", utsMode, state[2], utsMode.Valid()) + } + } +} + +func TestUsernsModeTest(t *testing.T) { + usrensMode := map[container.UsernsMode][]bool{ + // private, host, valid + "": {true, false, true}, + "something:weird": {true, false, false}, + "host": {false, true, true}, + "host:name": {true, false, true}, + } + for usernsMode, state := range usrensMode { + if usernsMode.IsPrivate() != state[0] { + t.Fatalf("UsernsMode.IsPrivate for %v should have been %v but was %v", usernsMode, state[0], usernsMode.IsPrivate()) + } + if usernsMode.IsHost() != state[1] { + t.Fatalf("UsernsMode.IsHost for %v should have been %v but was %v", usernsMode, state[1], usernsMode.IsHost()) + } + if usernsMode.Valid() != state[2] { + t.Fatalf("UsernsMode.Valid for %v should have been %v but was %v", usernsMode, state[2], usernsMode.Valid()) + } + } +} + +func TestPidModeTest(t *testing.T) { + pidModes := map[container.PidMode][]bool{ + // private, host, valid + "": {true, false, true}, + "something:weird": {true, false, false}, + "host": {false, true, true}, + "host:name": {true, false, true}, + } + for pidMode, state := range pidModes { + if pidMode.IsPrivate() != state[0] { + t.Fatalf("PidMode.IsPrivate for %v should have been %v but was %v", pidMode, state[0], pidMode.IsPrivate()) + } + if pidMode.IsHost() != state[1] { + t.Fatalf("PidMode.IsHost for %v should have been %v but was %v", pidMode, state[1], pidMode.IsHost()) + } + if pidMode.Valid() != state[2] { + t.Fatalf("PidMode.Valid for %v should have been %v but was %v", pidMode, state[2], pidMode.Valid()) + } + } +} + +func TestRestartPolicy(t *testing.T) { + restartPolicies := map[container.RestartPolicy][]bool{ + // none, always, failure + {}: {true, false, false}, + {Name: "something", MaximumRetryCount: 0}: {false, false, false}, + {Name: "no", MaximumRetryCount: 0}: {true, false, false}, + {Name: "always", MaximumRetryCount: 0}: {false, true, false}, + {Name: "on-failure", MaximumRetryCount: 0}: {false, false, true}, + } + for restartPolicy, state := range restartPolicies { + if restartPolicy.IsNone() != state[0] { + t.Fatalf("RestartPolicy.IsNone for %v should have been %v but was %v", restartPolicy, state[0], restartPolicy.IsNone()) + } + if restartPolicy.IsAlways() != state[1] { + t.Fatalf("RestartPolicy.IsAlways for %v should have been %v but was %v", restartPolicy, state[1], restartPolicy.IsAlways()) + } + if restartPolicy.IsOnFailure() != state[2] { + t.Fatalf("RestartPolicy.IsOnFailure for %v should have been %v but was %v", restartPolicy, state[2], restartPolicy.IsOnFailure()) + } + } +} +func TestDecodeHostConfig(t *testing.T) { + fixtures := []struct { + file string + }{ + {"fixtures/unix/container_hostconfig_1_14.json"}, + {"fixtures/unix/container_hostconfig_1_19.json"}, + } + + for _, f := range fixtures { + b, err := ioutil.ReadFile(f.file) + if err != nil { + t.Fatal(err) + } + + c, err := DecodeHostConfig(bytes.NewReader(b)) + if err != nil { + t.Fatal(fmt.Errorf("Error parsing %s: %v", f, err)) + } + + if c.Privileged != false { + t.Fatalf("Expected privileged false, found %v\n", c.Privileged) + } + + if l := len(c.Binds); l != 1 { + t.Fatalf("Expected 1 bind, found %d\n", l) + } + + if len(c.CapAdd) != 1 && c.CapAdd[0] != "NET_ADMIN" { + t.Fatalf("Expected CapAdd NET_ADMIN, got %v", c.CapAdd) + } + + if len(c.CapDrop) != 1 && c.CapDrop[0] != "NET_ADMIN" { + t.Fatalf("Expected CapDrop NET_ADMIN, got %v", c.CapDrop) + } + } +} + +func TestValidateResources(t *testing.T) { + type resourceTest struct { + ConfigCPURealtimePeriod int64 + ConfigCPURealtimeRuntime int64 + SysInfoCPURealtimePeriod bool + SysInfoCPURealtimeRuntime bool + ErrorExpected bool + FailureMsg string + } + + tests := []resourceTest{ + { + ConfigCPURealtimePeriod: 1000, + ConfigCPURealtimeRuntime: 1000, + SysInfoCPURealtimePeriod: true, + SysInfoCPURealtimeRuntime: true, + ErrorExpected: false, + FailureMsg: "Expected valid configuration", + }, + { + ConfigCPURealtimePeriod: 5000, + ConfigCPURealtimeRuntime: 5000, + SysInfoCPURealtimePeriod: false, + SysInfoCPURealtimeRuntime: true, + ErrorExpected: true, + FailureMsg: "Expected failure when cpu-rt-period is set but kernel doesn't support it", + }, + { + ConfigCPURealtimePeriod: 5000, + ConfigCPURealtimeRuntime: 5000, + SysInfoCPURealtimePeriod: true, + SysInfoCPURealtimeRuntime: false, + ErrorExpected: true, + FailureMsg: "Expected failure when cpu-rt-runtime is set but kernel doesn't support it", + }, + { + ConfigCPURealtimePeriod: 5000, + ConfigCPURealtimeRuntime: 10000, + SysInfoCPURealtimePeriod: true, + SysInfoCPURealtimeRuntime: false, + ErrorExpected: true, + FailureMsg: "Expected failure when cpu-rt-runtime is greater than cpu-rt-period", + }, + } + + for _, rt := range tests { + var hc container.HostConfig + hc.Resources.CPURealtimePeriod = rt.ConfigCPURealtimePeriod + hc.Resources.CPURealtimeRuntime = rt.ConfigCPURealtimeRuntime + + var si sysinfo.SysInfo + si.CPURealtimePeriod = rt.SysInfoCPURealtimePeriod + si.CPURealtimeRuntime = rt.SysInfoCPURealtimeRuntime + + if err := validateResources(&hc, &si); (err != nil) != rt.ErrorExpected { + t.Fatal(rt.FailureMsg, err) + } + } +} diff --git a/vendor/github.com/moby/moby/runconfig/hostconfig_unix.go b/vendor/github.com/moby/moby/runconfig/hostconfig_unix.go new file mode 100644 index 000000000..55df5da3f --- /dev/null +++ b/vendor/github.com/moby/moby/runconfig/hostconfig_unix.go @@ -0,0 +1,110 @@ +// +build !windows,!solaris + +package runconfig + +import ( + "fmt" + "runtime" + + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/pkg/sysinfo" +) + +// DefaultDaemonNetworkMode returns the default network stack the daemon should +// use. +func DefaultDaemonNetworkMode() container.NetworkMode { + return container.NetworkMode("bridge") +} + +// IsPreDefinedNetwork indicates if a network is predefined by the daemon +func IsPreDefinedNetwork(network string) bool { + n := container.NetworkMode(network) + return n.IsBridge() || n.IsHost() || n.IsNone() || n.IsDefault() +} + +// validateNetMode ensures that the various combinations of requested +// network settings are valid. +func validateNetMode(c *container.Config, hc *container.HostConfig) error { + // We may not be passed a host config, such as in the case of docker commit + if hc == nil { + return nil + } + + err := validateNetContainerMode(c, hc) + if err != nil { + return err + } + + if hc.UTSMode.IsHost() && c.Hostname != "" { + return ErrConflictUTSHostname + } + + if hc.NetworkMode.IsHost() && len(hc.Links) > 0 { + return ErrConflictHostNetworkAndLinks + } + + return nil +} + +// validateIsolation performs platform specific validation of +// isolation in the hostconfig structure. Linux only supports "default" +// which is LXC container isolation +func validateIsolation(hc *container.HostConfig) error { + // We may not be passed a host config, such as in the case of docker commit + if hc == nil { + return nil + } + if !hc.Isolation.IsValid() { + return fmt.Errorf("Invalid isolation: %q - %s only supports 'default'", hc.Isolation, runtime.GOOS) + } + return nil +} + +// validateQoS performs platform specific validation of the QoS settings +func validateQoS(hc *container.HostConfig) error { + // We may not be passed a host config, such as in the case of docker commit + if hc == nil { + return nil + } + + if hc.IOMaximumBandwidth != 0 { + return fmt.Errorf("Invalid QoS settings: %s does not support configuration of maximum bandwidth", runtime.GOOS) + } + + if hc.IOMaximumIOps != 0 { + return fmt.Errorf("Invalid QoS settings: %s does not support configuration of maximum IOPs", runtime.GOOS) + } + return nil +} + +// validateResources performs platform specific validation of the resource settings +// cpu-rt-runtime and cpu-rt-period can not be greater than their parent, cpu-rt-runtime requires sys_nice +func validateResources(hc *container.HostConfig, si *sysinfo.SysInfo) error { + // We may not be passed a host config, such as in the case of docker commit + if hc == nil { + return nil + } + + if hc.Resources.CPURealtimePeriod > 0 && !si.CPURealtimePeriod { + return fmt.Errorf("Your kernel does not support cgroup cpu real-time period") + } + + if hc.Resources.CPURealtimeRuntime > 0 && !si.CPURealtimeRuntime { + return fmt.Errorf("Your kernel does not support cgroup cpu real-time runtime") + } + + if hc.Resources.CPURealtimePeriod != 0 && hc.Resources.CPURealtimeRuntime != 0 && hc.Resources.CPURealtimeRuntime > hc.Resources.CPURealtimePeriod { + return fmt.Errorf("cpu real-time runtime cannot be higher than cpu real-time period") + } + return nil +} + +// validatePrivileged performs platform specific validation of the Privileged setting +func validatePrivileged(hc *container.HostConfig) error { + return nil +} + +// validateReadonlyRootfs performs platform specific validation of the ReadonlyRootfs setting +func validateReadonlyRootfs(hc *container.HostConfig) error { + return nil +} diff --git a/vendor/github.com/moby/moby/runconfig/hostconfig_windows.go b/vendor/github.com/moby/moby/runconfig/hostconfig_windows.go new file mode 100644 index 000000000..5eb956d1b --- /dev/null +++ b/vendor/github.com/moby/moby/runconfig/hostconfig_windows.go @@ -0,0 +1,96 @@ +package runconfig + +import ( + "fmt" + + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/pkg/sysinfo" +) + +// DefaultDaemonNetworkMode returns the default network stack the daemon should +// use. +func DefaultDaemonNetworkMode() container.NetworkMode { + return container.NetworkMode("nat") +} + +// IsPreDefinedNetwork indicates if a network is predefined by the daemon +func IsPreDefinedNetwork(network string) bool { + return !container.NetworkMode(network).IsUserDefined() +} + +// validateNetMode ensures that the various combinations of requested +// network settings are valid. +func validateNetMode(c *container.Config, hc *container.HostConfig) error { + if hc == nil { + return nil + } + + err := validateNetContainerMode(c, hc) + if err != nil { + return err + } + + if hc.NetworkMode.IsContainer() && hc.Isolation.IsHyperV() { + return fmt.Errorf("Using the network stack of another container is not supported while using Hyper-V Containers") + } + + return nil +} + +// validateIsolation performs platform specific validation of the +// isolation in the hostconfig structure. Windows supports 'default' (or +// blank), 'process', or 'hyperv'. +func validateIsolation(hc *container.HostConfig) error { + // We may not be passed a host config, such as in the case of docker commit + if hc == nil { + return nil + } + if !hc.Isolation.IsValid() { + return fmt.Errorf("Invalid isolation: %q. Windows supports 'default', 'process', or 'hyperv'", hc.Isolation) + } + return nil +} + +// validateQoS performs platform specific validation of the Qos settings +func validateQoS(hc *container.HostConfig) error { + return nil +} + +// validateResources performs platform specific validation of the resource settings +func validateResources(hc *container.HostConfig, si *sysinfo.SysInfo) error { + // We may not be passed a host config, such as in the case of docker commit + if hc == nil { + return nil + } + if hc.Resources.CPURealtimePeriod != 0 { + return fmt.Errorf("Windows does not support CPU real-time period") + } + if hc.Resources.CPURealtimeRuntime != 0 { + return fmt.Errorf("Windows does not support CPU real-time runtime") + } + return nil +} + +// validatePrivileged performs platform specific validation of the Privileged setting +func validatePrivileged(hc *container.HostConfig) error { + // We may not be passed a host config, such as in the case of docker commit + if hc == nil { + return nil + } + if hc.Privileged { + return fmt.Errorf("Windows does not support privileged mode") + } + return nil +} + +// validateReadonlyRootfs performs platform specific validation of the ReadonlyRootfs setting +func validateReadonlyRootfs(hc *container.HostConfig) error { + // We may not be passed a host config, such as in the case of docker commit + if hc == nil { + return nil + } + if hc.ReadonlyRootfs { + return fmt.Errorf("Windows does not support root filesystem in read-only mode") + } + return nil +} diff --git a/vendor/github.com/moby/moby/runconfig/hostconfig_windows_test.go b/vendor/github.com/moby/moby/runconfig/hostconfig_windows_test.go new file mode 100644 index 000000000..b780dc05d --- /dev/null +++ b/vendor/github.com/moby/moby/runconfig/hostconfig_windows_test.go @@ -0,0 +1,17 @@ +// +build windows + +package runconfig + +import ( + "testing" + + "github.com/docker/docker/api/types/container" +) + +func TestValidatePrivileged(t *testing.T) { + expected := "Windows does not support privileged mode" + err := validatePrivileged(&container.HostConfig{Privileged: true}) + if err == nil || err.Error() != expected { + t.Fatalf("Expected %s", expected) + } +} diff --git a/vendor/github.com/moby/moby/runconfig/opts/parse.go b/vendor/github.com/moby/moby/runconfig/opts/parse.go new file mode 100644 index 000000000..a7f1b79f1 --- /dev/null +++ b/vendor/github.com/moby/moby/runconfig/opts/parse.go @@ -0,0 +1,20 @@ +package opts + +import ( + "strings" +) + +// ConvertKVStringsToMap converts ["key=value"] to {"key":"value"} +func ConvertKVStringsToMap(values []string) map[string]string { + result := make(map[string]string, len(values)) + for _, value := range values { + kv := strings.SplitN(value, "=", 2) + if len(kv) == 1 { + result[kv[0]] = "" + } else { + result[kv[0]] = kv[1] + } + } + + return result +} diff --git a/vendor/github.com/moby/moby/vendor.conf b/vendor/github.com/moby/moby/vendor.conf new file mode 100644 index 000000000..46934bae2 --- /dev/null +++ b/vendor/github.com/moby/moby/vendor.conf @@ -0,0 +1,146 @@ +# the following lines are in sorted order, FYI +github.com/Azure/go-ansiterm 388960b655244e76e24c75f48631564eaefade62 +github.com/Microsoft/hcsshim v0.5.25 +github.com/Microsoft/go-winio v0.4.2 +github.com/Sirupsen/logrus v0.11.0 +github.com/davecgh/go-spew 346938d642f2ec3594ed81d874461961cd0faa76 +github.com/docker/libtrust 9cbd2a1374f46905c68a4eb3694a130610adc62a +github.com/go-check/check 4ed411733c5785b40214c70bce814c3a3a689609 https://github.com/cpuguy83/check.git +github.com/gorilla/context v1.1 +github.com/gorilla/mux v1.1 +github.com/jhowardmsft/opengcs v0.0.9 +github.com/kr/pty 5cf931ef8f +github.com/mattn/go-shellwords v1.0.3 +github.com/tchap/go-patricia v2.2.6 +github.com/vdemeester/shakers 24d7f1d6a71aa5d9cbe7390e4afb66b7eef9e1b3 +golang.org/x/net 7dcfb8076726a3fdd9353b6b8a1f1b6be6811bd6 +golang.org/x/sys 739734461d1c916b6c72a63d7efda2b27edb369f +github.com/docker/go-units 9e638d38cf6977a37a8ea0078f3ee75a7cdb2dd1 +github.com/docker/go-connections 3ede32e2033de7505e6500d6c868c2b9ed9f169d +golang.org/x/text f72d8390a633d5dfb0cc84043294db9f6c935756 +github.com/stretchr/testify 4d4bfba8f1d1027c4fdbe371823030df51419987 +github.com/pmezard/go-difflib v1.0.0 + +github.com/RackSec/srslog 456df3a81436d29ba874f3590eeeee25d666f8a5 +github.com/imdario/mergo 0.2.1 +golang.org/x/sync de49d9dcd27d4f764488181bea099dfe6179bcf0 + +#get libnetwork packages +github.com/docker/libnetwork 6426d1e66f33c0b0c8bb135b7ee547447f54d043 +github.com/docker/go-events 18b43f1bc85d9cdd42c05a6cd2d444c7a200a894 +github.com/armon/go-radix e39d623f12e8e41c7b5529e9a9dd67a1e2261f80 +github.com/armon/go-metrics eb0af217e5e9747e41dd5303755356b62d28e3ec +github.com/hashicorp/go-msgpack 71c2886f5a673a35f909803f38ece5810165097b +github.com/hashicorp/memberlist v0.1.0 +github.com/sean-/seed e2103e2c35297fb7e17febb81e49b312087a2372 +github.com/hashicorp/go-sockaddr acd314c5781ea706c710d9ea70069fd2e110d61d +github.com/hashicorp/go-multierror fcdddc395df1ddf4247c69bd436e84cfa0733f7e +github.com/hashicorp/serf 598c54895cc5a7b1a24a398d635e8c0ea0959870 +github.com/docker/libkv 1d8431073ae03cdaedb198a89722f3aab6d418ef +github.com/vishvananda/netns 604eaf189ee867d8c147fafc28def2394e878d25 +github.com/vishvananda/netlink bd6d5de5ccef2d66b0a26177928d0d8895d7f969 +github.com/BurntSushi/toml f706d00e3de6abe700c994cdd545a1a4915af060 +github.com/samuel/go-zookeeper d0e0d8e11f318e000a8cc434616d69e329edc374 +github.com/deckarep/golang-set ef32fa3046d9f249d399f98ebaf9be944430fd1d +github.com/coreos/etcd v3.2.1 +github.com/coreos/go-semver v0.2.0 +github.com/ugorji/go f1f1a805ed361a0e078bb537e4ea78cd37dcf065 +github.com/hashicorp/consul v0.5.2 +github.com/boltdb/bolt fff57c100f4dea1905678da7e90d92429dff2904 +github.com/miekg/dns 75e6e86cc601825c5dbcd4e0c209eab180997cd7 + +# get graph and distribution packages +github.com/docker/distribution b38e5838b7b2f2ad48e06ec4b500011976080621 +github.com/vbatts/tar-split v0.10.1 +github.com/opencontainers/go-digest a6d0ee40d4207ea02364bd3b9e8e77b9159ba1eb + +# get go-zfs packages +github.com/mistifyio/go-zfs 22c9b32c84eb0d0c6f4043b6e90fc94073de92fa +github.com/pborman/uuid v1.0 + +google.golang.org/grpc v1.3.0 + +# When updating, also update RUNC_COMMIT in hack/dockerfile/binaries-commits accordingly +github.com/opencontainers/runc 2d41c047c83e09a6d61d464906feb2a2f3c52aa4 https://github.com/docker/runc +github.com/opencontainers/image-spec 372ad780f63454fbbbbcc7cf80e5b90245c13e13 +github.com/opencontainers/runtime-spec d42f1eb741e6361e858d83fc75aa6893b66292c4 # specs + +github.com/seccomp/libseccomp-golang 32f571b70023028bd57d9288c20efbcb237f3ce0 + +# libcontainer deps (see src/github.com/opencontainers/runc/Godeps/Godeps.json) +github.com/coreos/go-systemd v4 +github.com/godbus/dbus v4.0.0 +github.com/syndtr/gocapability 2c00daeb6c3b45114c80ac44119e7b8801fdd852 +github.com/golang/protobuf 7a211bcf3bce0e3f1d74f9894916e6f116ae83b4 + +# gelf logging driver deps +github.com/Graylog2/go-gelf 7029da823dad4ef3a876df61065156acb703b2ea + +github.com/fluent/fluent-logger-golang v1.2.1 +# fluent-logger-golang deps +github.com/philhofer/fwd 98c11a7a6ec829d672b03833c3d69a7fae1ca972 +github.com/tinylib/msgp 75ee40d2601edf122ef667e2a07d600d4c44490c + +# fsnotify +github.com/fsnotify/fsnotify v1.2.11 + +# awslogs deps +github.com/aws/aws-sdk-go v1.4.22 +github.com/go-ini/ini 060d7da055ba6ec5ea7a31f116332fe5efa04ce0 +github.com/jmespath/go-jmespath 0b12d6b521d83fc7f755e7cfc1b1fbdd35a01a74 + +# logentries +github.com/bsphere/le_go 7a984a84b5492ae539b79b62fb4a10afc63c7bcf + +# gcplogs deps +golang.org/x/oauth2 96382aa079b72d8c014eb0c50f6c223d1e6a2de0 +google.golang.org/api 3cc2e591b550923a2c5f0ab5a803feda924d5823 +cloud.google.com/go 9d965e63e8cceb1b5d7977a202f0fcb8866d6525 +github.com/googleapis/gax-go da06d194a00e19ce00d9011a13931c3f6f6887c7 +google.golang.org/genproto d80a6e20e776b0b17a324d0ba1ab50a39c8e8944 + +# containerd +github.com/containerd/containerd 3addd840653146c90a254301d6c3a663c7fd6429 +github.com/tonistiigi/fifo 1405643975692217d6720f8b54aeee1bf2cd5cf4 +github.com/stevvooe/continuity cd7a8e21e2b6f84799f5dd4b65faf49c8d3ee02d +github.com/tonistiigi/fsutil 0ac4c11b053b9c5c7c47558f81f96c7100ce50fb + +# cluster +github.com/docker/swarmkit 3e2dd3c0a76149b1620b42d28dd6ff48270404e5 +github.com/gogo/protobuf v0.4 +github.com/cloudflare/cfssl 7fb22c8cba7ecaf98e4082d22d65800cf45e042a +github.com/google/certificate-transparency d90e65c3a07988180c5b1ece71791c0b6506826e +golang.org/x/crypto 3fbbcd23f1cb824e69491a5930cfeff09b12f4d2 +golang.org/x/time a4bde12657593d5e90d0533a3e4fd95e635124cb +github.com/hashicorp/go-memdb cb9a474f84cc5e41b273b20c6927680b2a8776ad +github.com/hashicorp/go-immutable-radix 8e8ed81f8f0bf1bdd829593fdd5c29922c1ea990 +github.com/hashicorp/golang-lru a0d98a5f288019575c6d1f4bb1573fef2d1fcdc4 +github.com/coreos/pkg fa29b1d70f0beaddd4c7021607cc3c3be8ce94b8 +github.com/pivotal-golang/clock 3fd3c1944c59d9742e1cd333672181cd1a6f9fa0 +github.com/prometheus/client_golang 52437c81da6b127a9925d17eb3a382a2e5fd395e +github.com/beorn7/perks 4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9 +github.com/prometheus/client_model fa8ad6fec33561be4280a8f0514318c79d7f6cb6 +github.com/prometheus/common ebdfc6da46522d58825777cf1f90490a5b1ef1d8 +github.com/prometheus/procfs abf152e5f3e97f2fafac028d2cc06c1feb87ffa5 +github.com/matttproud/golang_protobuf_extensions v1.0.0 +github.com/pkg/errors 839d9e913e063e28dfd0e6c7b7512793e0a48be9 +github.com/grpc-ecosystem/go-grpc-prometheus 6b7015e65d366bf3f19b2b2a000a831940f0f7e0 + +# cli +github.com/spf13/cobra v1.5.1 https://github.com/dnephin/cobra.git +github.com/spf13/pflag 9ff6c6923cfffbcd502984b8e0c80539a94968b7 +github.com/inconshreveable/mousetrap 76626ae9c91c4f2a10f34cad8ce83ea42c93bb75 +github.com/Nvveen/Gotty a8b993ba6abdb0e0c12b0125c603323a71c7790c https://github.com/ijc25/Gotty + +# metrics +github.com/docker/go-metrics d466d4f6fd960e01820085bd7e1a24426ee7ef18 + +github.com/opencontainers/selinux v1.0.0-rc1 + +# archive/tar +# mkdir -p ./vendor/archive +# git clone git://github.com/tonistiigi/go-1.git ./go +# git --git-dir ./go/.git --work-tree ./go checkout revert-prefix-ignore +# cp -a go/src/archive/tar ./vendor/archive/tar +# rm -rf ./go +# vndr \ No newline at end of file diff --git a/vendor/github.com/moby/moby/volume/drivers/adapter.go b/vendor/github.com/moby/moby/volume/drivers/adapter.go new file mode 100644 index 000000000..304c81bc0 --- /dev/null +++ b/vendor/github.com/moby/moby/volume/drivers/adapter.go @@ -0,0 +1,184 @@ +package volumedrivers + +import ( + "errors" + "path/filepath" + "strings" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/volume" +) + +var ( + errNoSuchVolume = errors.New("no such volume") +) + +type volumeDriverAdapter struct { + name string + baseHostPath string + capabilities *volume.Capability + proxy *volumeDriverProxy +} + +func (a *volumeDriverAdapter) Name() string { + return a.name +} + +func (a *volumeDriverAdapter) Create(name string, opts map[string]string) (volume.Volume, error) { + if err := a.proxy.Create(name, opts); err != nil { + return nil, err + } + return &volumeAdapter{ + proxy: a.proxy, + name: name, + driverName: a.name, + baseHostPath: a.baseHostPath, + }, nil +} + +func (a *volumeDriverAdapter) Remove(v volume.Volume) error { + return a.proxy.Remove(v.Name()) +} + +func hostPath(baseHostPath, path string) string { + if baseHostPath != "" { + path = filepath.Join(baseHostPath, path) + } + return path +} + +func (a *volumeDriverAdapter) List() ([]volume.Volume, error) { + ls, err := a.proxy.List() + if err != nil { + return nil, err + } + + var out []volume.Volume + for _, vp := range ls { + out = append(out, &volumeAdapter{ + proxy: a.proxy, + name: vp.Name, + baseHostPath: a.baseHostPath, + driverName: a.name, + eMount: hostPath(a.baseHostPath, vp.Mountpoint), + }) + } + return out, nil +} + +func (a *volumeDriverAdapter) Get(name string) (volume.Volume, error) { + v, err := a.proxy.Get(name) + if err != nil { + return nil, err + } + + // plugin may have returned no volume and no error + if v == nil { + return nil, errNoSuchVolume + } + + return &volumeAdapter{ + proxy: a.proxy, + name: v.Name, + driverName: a.Name(), + eMount: v.Mountpoint, + createdAt: v.CreatedAt, + status: v.Status, + baseHostPath: a.baseHostPath, + }, nil +} + +func (a *volumeDriverAdapter) Scope() string { + cap := a.getCapabilities() + return cap.Scope +} + +func (a *volumeDriverAdapter) getCapabilities() volume.Capability { + if a.capabilities != nil { + return *a.capabilities + } + cap, err := a.proxy.Capabilities() + if err != nil { + // `GetCapabilities` is a not a required endpoint. + // On error assume it's a local-only driver + logrus.Warnf("Volume driver %s returned an error while trying to query its capabilities, using default capabilities: %v", a.name, err) + return volume.Capability{Scope: volume.LocalScope} + } + + // don't spam the warn log below just because the plugin didn't provide a scope + if len(cap.Scope) == 0 { + cap.Scope = volume.LocalScope + } + + cap.Scope = strings.ToLower(cap.Scope) + if cap.Scope != volume.LocalScope && cap.Scope != volume.GlobalScope { + logrus.Warnf("Volume driver %q returned an invalid scope: %q", a.Name(), cap.Scope) + cap.Scope = volume.LocalScope + } + + a.capabilities = &cap + return cap +} + +type volumeAdapter struct { + proxy *volumeDriverProxy + name string + baseHostPath string + driverName string + eMount string // ephemeral host volume path + createdAt time.Time // time the directory was created + status map[string]interface{} +} + +type proxyVolume struct { + Name string + Mountpoint string + CreatedAt time.Time + Status map[string]interface{} +} + +func (a *volumeAdapter) Name() string { + return a.name +} + +func (a *volumeAdapter) DriverName() string { + return a.driverName +} + +func (a *volumeAdapter) Path() string { + if len(a.eMount) == 0 { + mountpoint, _ := a.proxy.Path(a.name) + a.eMount = hostPath(a.baseHostPath, mountpoint) + } + return a.eMount +} + +func (a *volumeAdapter) CachedPath() string { + return a.eMount +} + +func (a *volumeAdapter) Mount(id string) (string, error) { + mountpoint, err := a.proxy.Mount(a.name, id) + a.eMount = hostPath(a.baseHostPath, mountpoint) + return a.eMount, err +} + +func (a *volumeAdapter) Unmount(id string) error { + err := a.proxy.Unmount(a.name, id) + if err == nil { + a.eMount = "" + } + return err +} + +func (a *volumeAdapter) CreatedAt() (time.Time, error) { + return a.createdAt, nil +} +func (a *volumeAdapter) Status() map[string]interface{} { + out := make(map[string]interface{}, len(a.status)) + for k, v := range a.status { + out[k] = v + } + return out +} diff --git a/vendor/github.com/moby/moby/volume/drivers/extpoint.go b/vendor/github.com/moby/moby/volume/drivers/extpoint.go new file mode 100644 index 000000000..da230dcc7 --- /dev/null +++ b/vendor/github.com/moby/moby/volume/drivers/extpoint.go @@ -0,0 +1,217 @@ +//go:generate pluginrpc-gen -i $GOFILE -o proxy.go -type volumeDriver -name VolumeDriver + +package volumedrivers + +import ( + "fmt" + "sort" + "sync" + + "github.com/docker/docker/pkg/locker" + getter "github.com/docker/docker/pkg/plugingetter" + "github.com/docker/docker/volume" +) + +// currently created by hand. generation tool would generate this like: +// $ extpoint-gen Driver > volume/extpoint.go + +var drivers = &driverExtpoint{ + extensions: make(map[string]volume.Driver), + driverLock: &locker.Locker{}, +} + +const extName = "VolumeDriver" + +// NewVolumeDriver returns a driver has the given name mapped on the given client. +func NewVolumeDriver(name string, baseHostPath string, c client) volume.Driver { + proxy := &volumeDriverProxy{c} + return &volumeDriverAdapter{name: name, baseHostPath: baseHostPath, proxy: proxy} +} + +// volumeDriver defines the available functions that volume plugins must implement. +// This interface is only defined to generate the proxy objects. +// It's not intended to be public or reused. +type volumeDriver interface { + // Create a volume with the given name + Create(name string, opts map[string]string) (err error) + // Remove the volume with the given name + Remove(name string) (err error) + // Get the mountpoint of the given volume + Path(name string) (mountpoint string, err error) + // Mount the given volume and return the mountpoint + Mount(name, id string) (mountpoint string, err error) + // Unmount the given volume + Unmount(name, id string) (err error) + // List lists all the volumes known to the driver + List() (volumes []*proxyVolume, err error) + // Get retrieves the volume with the requested name + Get(name string) (volume *proxyVolume, err error) + // Capabilities gets the list of capabilities of the driver + Capabilities() (capabilities volume.Capability, err error) +} + +type driverExtpoint struct { + extensions map[string]volume.Driver + sync.Mutex + driverLock *locker.Locker + plugingetter getter.PluginGetter +} + +// RegisterPluginGetter sets the plugingetter +func RegisterPluginGetter(plugingetter getter.PluginGetter) { + drivers.plugingetter = plugingetter +} + +// Register associates the given driver to the given name, checking if +// the name is already associated +func Register(extension volume.Driver, name string) bool { + if name == "" { + return false + } + + drivers.Lock() + defer drivers.Unlock() + + _, exists := drivers.extensions[name] + if exists { + return false + } + + if err := validateDriver(extension); err != nil { + return false + } + + drivers.extensions[name] = extension + + return true +} + +// Unregister dissociates the name from its driver, if the association exists. +func Unregister(name string) bool { + drivers.Lock() + defer drivers.Unlock() + + _, exists := drivers.extensions[name] + if !exists { + return false + } + delete(drivers.extensions, name) + return true +} + +// lookup returns the driver associated with the given name. If a +// driver with the given name has not been registered it checks if +// there is a VolumeDriver plugin available with the given name. +func lookup(name string, mode int) (volume.Driver, error) { + drivers.driverLock.Lock(name) + defer drivers.driverLock.Unlock(name) + + drivers.Lock() + ext, ok := drivers.extensions[name] + drivers.Unlock() + if ok { + return ext, nil + } + if drivers.plugingetter != nil { + p, err := drivers.plugingetter.Get(name, extName, mode) + if err != nil { + return nil, fmt.Errorf("Error looking up volume plugin %s: %v", name, err) + } + + d := NewVolumeDriver(p.Name(), p.BasePath(), p.Client()) + if err := validateDriver(d); err != nil { + return nil, err + } + + if p.IsV1() { + drivers.Lock() + drivers.extensions[name] = d + drivers.Unlock() + } + return d, nil + } + return nil, fmt.Errorf("Error looking up volume plugin %s", name) +} + +func validateDriver(vd volume.Driver) error { + scope := vd.Scope() + if scope != volume.LocalScope && scope != volume.GlobalScope { + return fmt.Errorf("Driver %q provided an invalid capability scope: %s", vd.Name(), scope) + } + return nil +} + +// GetDriver returns a volume driver by its name. +// If the driver is empty, it looks for the local driver. +func GetDriver(name string) (volume.Driver, error) { + if name == "" { + name = volume.DefaultDriverName + } + return lookup(name, getter.Lookup) +} + +// CreateDriver returns a volume driver by its name and increments RefCount. +// If the driver is empty, it looks for the local driver. +func CreateDriver(name string) (volume.Driver, error) { + if name == "" { + name = volume.DefaultDriverName + } + return lookup(name, getter.Acquire) +} + +// RemoveDriver returns a volume driver by its name and decrements RefCount.. +// If the driver is empty, it looks for the local driver. +func RemoveDriver(name string) (volume.Driver, error) { + if name == "" { + name = volume.DefaultDriverName + } + return lookup(name, getter.Release) +} + +// GetDriverList returns list of volume drivers registered. +// If no driver is registered, empty string list will be returned. +func GetDriverList() []string { + var driverList []string + drivers.Lock() + for driverName := range drivers.extensions { + driverList = append(driverList, driverName) + } + drivers.Unlock() + sort.Strings(driverList) + return driverList +} + +// GetAllDrivers lists all the registered drivers +func GetAllDrivers() ([]volume.Driver, error) { + var plugins []getter.CompatPlugin + if drivers.plugingetter != nil { + var err error + plugins, err = drivers.plugingetter.GetAllByCap(extName) + if err != nil { + return nil, fmt.Errorf("error listing plugins: %v", err) + } + } + var ds []volume.Driver + + drivers.Lock() + defer drivers.Unlock() + + for _, d := range drivers.extensions { + ds = append(ds, d) + } + + for _, p := range plugins { + name := p.Name() + + if _, ok := drivers.extensions[name]; ok { + continue + } + + ext := NewVolumeDriver(name, p.BasePath(), p.Client()) + if p.IsV1() { + drivers.extensions[name] = ext + } + ds = append(ds, ext) + } + return ds, nil +} diff --git a/vendor/github.com/moby/moby/volume/drivers/extpoint_test.go b/vendor/github.com/moby/moby/volume/drivers/extpoint_test.go new file mode 100644 index 000000000..428b0752f --- /dev/null +++ b/vendor/github.com/moby/moby/volume/drivers/extpoint_test.go @@ -0,0 +1,23 @@ +package volumedrivers + +import ( + "testing" + + volumetestutils "github.com/docker/docker/volume/testutils" +) + +func TestGetDriver(t *testing.T) { + _, err := GetDriver("missing") + if err == nil { + t.Fatal("Expected error, was nil") + } + Register(volumetestutils.NewFakeDriver("fake"), "fake") + + d, err := GetDriver("fake") + if err != nil { + t.Fatal(err) + } + if d.Name() != "fake" { + t.Fatalf("Expected fake driver, got %s\n", d.Name()) + } +} diff --git a/vendor/github.com/moby/moby/volume/drivers/proxy.go b/vendor/github.com/moby/moby/volume/drivers/proxy.go new file mode 100644 index 000000000..b23db6258 --- /dev/null +++ b/vendor/github.com/moby/moby/volume/drivers/proxy.go @@ -0,0 +1,242 @@ +// generated code - DO NOT EDIT + +package volumedrivers + +import ( + "errors" + + "github.com/docker/docker/volume" +) + +type client interface { + Call(string, interface{}, interface{}) error +} + +type volumeDriverProxy struct { + client +} + +type volumeDriverProxyCreateRequest struct { + Name string + Opts map[string]string +} + +type volumeDriverProxyCreateResponse struct { + Err string +} + +func (pp *volumeDriverProxy) Create(name string, opts map[string]string) (err error) { + var ( + req volumeDriverProxyCreateRequest + ret volumeDriverProxyCreateResponse + ) + + req.Name = name + req.Opts = opts + if err = pp.Call("VolumeDriver.Create", req, &ret); err != nil { + return + } + + if ret.Err != "" { + err = errors.New(ret.Err) + } + + return +} + +type volumeDriverProxyRemoveRequest struct { + Name string +} + +type volumeDriverProxyRemoveResponse struct { + Err string +} + +func (pp *volumeDriverProxy) Remove(name string) (err error) { + var ( + req volumeDriverProxyRemoveRequest + ret volumeDriverProxyRemoveResponse + ) + + req.Name = name + if err = pp.Call("VolumeDriver.Remove", req, &ret); err != nil { + return + } + + if ret.Err != "" { + err = errors.New(ret.Err) + } + + return +} + +type volumeDriverProxyPathRequest struct { + Name string +} + +type volumeDriverProxyPathResponse struct { + Mountpoint string + Err string +} + +func (pp *volumeDriverProxy) Path(name string) (mountpoint string, err error) { + var ( + req volumeDriverProxyPathRequest + ret volumeDriverProxyPathResponse + ) + + req.Name = name + if err = pp.Call("VolumeDriver.Path", req, &ret); err != nil { + return + } + + mountpoint = ret.Mountpoint + + if ret.Err != "" { + err = errors.New(ret.Err) + } + + return +} + +type volumeDriverProxyMountRequest struct { + Name string + ID string +} + +type volumeDriverProxyMountResponse struct { + Mountpoint string + Err string +} + +func (pp *volumeDriverProxy) Mount(name string, id string) (mountpoint string, err error) { + var ( + req volumeDriverProxyMountRequest + ret volumeDriverProxyMountResponse + ) + + req.Name = name + req.ID = id + if err = pp.Call("VolumeDriver.Mount", req, &ret); err != nil { + return + } + + mountpoint = ret.Mountpoint + + if ret.Err != "" { + err = errors.New(ret.Err) + } + + return +} + +type volumeDriverProxyUnmountRequest struct { + Name string + ID string +} + +type volumeDriverProxyUnmountResponse struct { + Err string +} + +func (pp *volumeDriverProxy) Unmount(name string, id string) (err error) { + var ( + req volumeDriverProxyUnmountRequest + ret volumeDriverProxyUnmountResponse + ) + + req.Name = name + req.ID = id + if err = pp.Call("VolumeDriver.Unmount", req, &ret); err != nil { + return + } + + if ret.Err != "" { + err = errors.New(ret.Err) + } + + return +} + +type volumeDriverProxyListRequest struct { +} + +type volumeDriverProxyListResponse struct { + Volumes []*proxyVolume + Err string +} + +func (pp *volumeDriverProxy) List() (volumes []*proxyVolume, err error) { + var ( + req volumeDriverProxyListRequest + ret volumeDriverProxyListResponse + ) + + if err = pp.Call("VolumeDriver.List", req, &ret); err != nil { + return + } + + volumes = ret.Volumes + + if ret.Err != "" { + err = errors.New(ret.Err) + } + + return +} + +type volumeDriverProxyGetRequest struct { + Name string +} + +type volumeDriverProxyGetResponse struct { + Volume *proxyVolume + Err string +} + +func (pp *volumeDriverProxy) Get(name string) (volume *proxyVolume, err error) { + var ( + req volumeDriverProxyGetRequest + ret volumeDriverProxyGetResponse + ) + + req.Name = name + if err = pp.Call("VolumeDriver.Get", req, &ret); err != nil { + return + } + + volume = ret.Volume + + if ret.Err != "" { + err = errors.New(ret.Err) + } + + return +} + +type volumeDriverProxyCapabilitiesRequest struct { +} + +type volumeDriverProxyCapabilitiesResponse struct { + Capabilities volume.Capability + Err string +} + +func (pp *volumeDriverProxy) Capabilities() (capabilities volume.Capability, err error) { + var ( + req volumeDriverProxyCapabilitiesRequest + ret volumeDriverProxyCapabilitiesResponse + ) + + if err = pp.Call("VolumeDriver.Capabilities", req, &ret); err != nil { + return + } + + capabilities = ret.Capabilities + + if ret.Err != "" { + err = errors.New(ret.Err) + } + + return +} diff --git a/vendor/github.com/moby/moby/volume/drivers/proxy_test.go b/vendor/github.com/moby/moby/volume/drivers/proxy_test.go new file mode 100644 index 000000000..b78c46a03 --- /dev/null +++ b/vendor/github.com/moby/moby/volume/drivers/proxy_test.go @@ -0,0 +1,132 @@ +package volumedrivers + +import ( + "fmt" + "net/http" + "net/http/httptest" + "net/url" + "strings" + "testing" + + "github.com/docker/docker/pkg/plugins" + "github.com/docker/go-connections/tlsconfig" +) + +func TestVolumeRequestError(t *testing.T) { + mux := http.NewServeMux() + server := httptest.NewServer(mux) + defer server.Close() + + mux.HandleFunc("/VolumeDriver.Create", func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + fmt.Fprintln(w, `{"Err": "Cannot create volume"}`) + }) + + mux.HandleFunc("/VolumeDriver.Remove", func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + fmt.Fprintln(w, `{"Err": "Cannot remove volume"}`) + }) + + mux.HandleFunc("/VolumeDriver.Mount", func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + fmt.Fprintln(w, `{"Err": "Cannot mount volume"}`) + }) + + mux.HandleFunc("/VolumeDriver.Unmount", func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + fmt.Fprintln(w, `{"Err": "Cannot unmount volume"}`) + }) + + mux.HandleFunc("/VolumeDriver.Path", func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + fmt.Fprintln(w, `{"Err": "Unknown volume"}`) + }) + + mux.HandleFunc("/VolumeDriver.List", func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + fmt.Fprintln(w, `{"Err": "Cannot list volumes"}`) + }) + + mux.HandleFunc("/VolumeDriver.Get", func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + fmt.Fprintln(w, `{"Err": "Cannot get volume"}`) + }) + + mux.HandleFunc("/VolumeDriver.Capabilities", func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + http.Error(w, "error", 500) + }) + + u, _ := url.Parse(server.URL) + client, err := plugins.NewClient("tcp://"+u.Host, &tlsconfig.Options{InsecureSkipVerify: true}) + if err != nil { + t.Fatal(err) + } + + driver := volumeDriverProxy{client} + + if err = driver.Create("volume", nil); err == nil { + t.Fatal("Expected error, was nil") + } + + if !strings.Contains(err.Error(), "Cannot create volume") { + t.Fatalf("Unexpected error: %v\n", err) + } + + _, err = driver.Mount("volume", "123") + if err == nil { + t.Fatal("Expected error, was nil") + } + + if !strings.Contains(err.Error(), "Cannot mount volume") { + t.Fatalf("Unexpected error: %v\n", err) + } + + err = driver.Unmount("volume", "123") + if err == nil { + t.Fatal("Expected error, was nil") + } + + if !strings.Contains(err.Error(), "Cannot unmount volume") { + t.Fatalf("Unexpected error: %v\n", err) + } + + err = driver.Remove("volume") + if err == nil { + t.Fatal("Expected error, was nil") + } + + if !strings.Contains(err.Error(), "Cannot remove volume") { + t.Fatalf("Unexpected error: %v\n", err) + } + + _, err = driver.Path("volume") + if err == nil { + t.Fatal("Expected error, was nil") + } + + if !strings.Contains(err.Error(), "Unknown volume") { + t.Fatalf("Unexpected error: %v\n", err) + } + + _, err = driver.List() + if err == nil { + t.Fatal("Expected error, was nil") + } + if !strings.Contains(err.Error(), "Cannot list volumes") { + t.Fatalf("Unexpected error: %v\n", err) + } + + _, err = driver.Get("volume") + if err == nil { + t.Fatal("Expected error, was nil") + } + if !strings.Contains(err.Error(), "Cannot get volume") { + t.Fatalf("Unexpected error: %v\n", err) + } + + _, err = driver.Capabilities() + if err == nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/moby/moby/volume/local/local.go b/vendor/github.com/moby/moby/volume/local/local.go new file mode 100644 index 000000000..43ba1e1db --- /dev/null +++ b/vendor/github.com/moby/moby/volume/local/local.go @@ -0,0 +1,387 @@ +// Package local provides the default implementation for volumes. It +// is used to mount data volume containers and directories local to +// the host server. +package local + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "reflect" + "strings" + "sync" + + "github.com/pkg/errors" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/mount" + "github.com/docker/docker/volume" +) + +// VolumeDataPathName is the name of the directory where the volume data is stored. +// It uses a very distinctive name to avoid collisions migrating data between +// Docker versions. +const ( + VolumeDataPathName = "_data" + volumesPathName = "volumes" +) + +var ( + // ErrNotFound is the typed error returned when the requested volume name can't be found + ErrNotFound = fmt.Errorf("volume not found") + // volumeNameRegex ensures the name assigned for the volume is valid. + // This name is used to create the bind directory, so we need to avoid characters that + // would make the path to escape the root directory. + volumeNameRegex = api.RestrictedNamePattern +) + +type validationError struct { + error +} + +func (validationError) IsValidationError() bool { + return true +} + +type activeMount struct { + count uint64 + mounted bool +} + +// New instantiates a new Root instance with the provided scope. Scope +// is the base path that the Root instance uses to store its +// volumes. The base path is created here if it does not exist. +func New(scope string, rootIDs idtools.IDPair) (*Root, error) { + rootDirectory := filepath.Join(scope, volumesPathName) + + if err := idtools.MkdirAllAndChown(rootDirectory, 0700, rootIDs); err != nil { + return nil, err + } + + r := &Root{ + scope: scope, + path: rootDirectory, + volumes: make(map[string]*localVolume), + rootIDs: rootIDs, + } + + dirs, err := ioutil.ReadDir(rootDirectory) + if err != nil { + return nil, err + } + + mountInfos, err := mount.GetMounts() + if err != nil { + logrus.Debugf("error looking up mounts for local volume cleanup: %v", err) + } + + for _, d := range dirs { + if !d.IsDir() { + continue + } + + name := filepath.Base(d.Name()) + v := &localVolume{ + driverName: r.Name(), + name: name, + path: r.DataPath(name), + } + r.volumes[name] = v + optsFilePath := filepath.Join(rootDirectory, name, "opts.json") + if b, err := ioutil.ReadFile(optsFilePath); err == nil { + opts := optsConfig{} + if err := json.Unmarshal(b, &opts); err != nil { + return nil, errors.Wrapf(err, "error while unmarshaling volume options for volume: %s", name) + } + // Make sure this isn't an empty optsConfig. + // This could be empty due to buggy behavior in older versions of Docker. + if !reflect.DeepEqual(opts, optsConfig{}) { + v.opts = &opts + } + + // unmount anything that may still be mounted (for example, from an unclean shutdown) + for _, info := range mountInfos { + if info.Mountpoint == v.path { + mount.Unmount(v.path) + break + } + } + } + } + + return r, nil +} + +// Root implements the Driver interface for the volume package and +// manages the creation/removal of volumes. It uses only standard vfs +// commands to create/remove dirs within its provided scope. +type Root struct { + m sync.Mutex + scope string + path string + volumes map[string]*localVolume + rootIDs idtools.IDPair +} + +// List lists all the volumes +func (r *Root) List() ([]volume.Volume, error) { + var ls []volume.Volume + r.m.Lock() + for _, v := range r.volumes { + ls = append(ls, v) + } + r.m.Unlock() + return ls, nil +} + +// DataPath returns the constructed path of this volume. +func (r *Root) DataPath(volumeName string) string { + return filepath.Join(r.path, volumeName, VolumeDataPathName) +} + +// Name returns the name of Root, defined in the volume package in the DefaultDriverName constant. +func (r *Root) Name() string { + return volume.DefaultDriverName +} + +// Create creates a new volume.Volume with the provided name, creating +// the underlying directory tree required for this volume in the +// process. +func (r *Root) Create(name string, opts map[string]string) (volume.Volume, error) { + if err := r.validateName(name); err != nil { + return nil, err + } + + r.m.Lock() + defer r.m.Unlock() + + v, exists := r.volumes[name] + if exists { + return v, nil + } + + path := r.DataPath(name) + if err := idtools.MkdirAllAndChown(path, 0755, r.rootIDs); err != nil { + if os.IsExist(err) { + return nil, fmt.Errorf("volume already exists under %s", filepath.Dir(path)) + } + return nil, errors.Wrapf(err, "error while creating volume path '%s'", path) + } + + var err error + defer func() { + if err != nil { + os.RemoveAll(filepath.Dir(path)) + } + }() + + v = &localVolume{ + driverName: r.Name(), + name: name, + path: path, + } + + if len(opts) != 0 { + if err = setOpts(v, opts); err != nil { + return nil, err + } + var b []byte + b, err = json.Marshal(v.opts) + if err != nil { + return nil, err + } + if err = ioutil.WriteFile(filepath.Join(filepath.Dir(path), "opts.json"), b, 600); err != nil { + return nil, errors.Wrap(err, "error while persisting volume options") + } + } + + r.volumes[name] = v + return v, nil +} + +// Remove removes the specified volume and all underlying data. If the +// given volume does not belong to this driver and an error is +// returned. The volume is reference counted, if all references are +// not released then the volume is not removed. +func (r *Root) Remove(v volume.Volume) error { + r.m.Lock() + defer r.m.Unlock() + + lv, ok := v.(*localVolume) + if !ok { + return fmt.Errorf("unknown volume type %T", v) + } + + if lv.active.count > 0 { + return fmt.Errorf("volume has active mounts") + } + + if err := lv.unmount(); err != nil { + return err + } + + realPath, err := filepath.EvalSymlinks(lv.path) + if err != nil { + if !os.IsNotExist(err) { + return err + } + realPath = filepath.Dir(lv.path) + } + + if !r.scopedPath(realPath) { + return fmt.Errorf("Unable to remove a directory of out the Docker root %s: %s", r.scope, realPath) + } + + if err := removePath(realPath); err != nil { + return err + } + + delete(r.volumes, lv.name) + return removePath(filepath.Dir(lv.path)) +} + +func removePath(path string) error { + if err := os.RemoveAll(path); err != nil { + if os.IsNotExist(err) { + return nil + } + return errors.Wrapf(err, "error removing volume path '%s'", path) + } + return nil +} + +// Get looks up the volume for the given name and returns it if found +func (r *Root) Get(name string) (volume.Volume, error) { + r.m.Lock() + v, exists := r.volumes[name] + r.m.Unlock() + if !exists { + return nil, ErrNotFound + } + return v, nil +} + +// Scope returns the local volume scope +func (r *Root) Scope() string { + return volume.LocalScope +} + +func (r *Root) validateName(name string) error { + if len(name) == 1 { + return validationError{fmt.Errorf("volume name is too short, names should be at least two alphanumeric characters")} + } + if !volumeNameRegex.MatchString(name) { + return validationError{fmt.Errorf("%q includes invalid characters for a local volume name, only %q are allowed. If you intended to pass a host directory, use absolute path", name, api.RestrictedNameChars)} + } + return nil +} + +// localVolume implements the Volume interface from the volume package and +// represents the volumes created by Root. +type localVolume struct { + m sync.Mutex + // unique name of the volume + name string + // path is the path on the host where the data lives + path string + // driverName is the name of the driver that created the volume. + driverName string + // opts is the parsed list of options used to create the volume + opts *optsConfig + // active refcounts the active mounts + active activeMount +} + +// Name returns the name of the given Volume. +func (v *localVolume) Name() string { + return v.name +} + +// DriverName returns the driver that created the given Volume. +func (v *localVolume) DriverName() string { + return v.driverName +} + +// Path returns the data location. +func (v *localVolume) Path() string { + return v.path +} + +// Mount implements the localVolume interface, returning the data location. +// If there are any provided mount options, the resources will be mounted at this point +func (v *localVolume) Mount(id string) (string, error) { + v.m.Lock() + defer v.m.Unlock() + if v.opts != nil { + if !v.active.mounted { + if err := v.mount(); err != nil { + return "", err + } + v.active.mounted = true + } + v.active.count++ + } + return v.path, nil +} + +// Unmount dereferences the id, and if it is the last reference will unmount any resources +// that were previously mounted. +func (v *localVolume) Unmount(id string) error { + v.m.Lock() + defer v.m.Unlock() + + // Always decrement the count, even if the unmount fails + // Essentially docker doesn't care if this fails, it will send an error, but + // ultimately there's nothing that can be done. If we don't decrement the count + // this volume can never be removed until a daemon restart occurs. + if v.opts != nil { + v.active.count-- + } + + if v.active.count > 0 { + return nil + } + + return v.unmount() +} + +func (v *localVolume) unmount() error { + if v.opts != nil { + if err := mount.Unmount(v.path); err != nil { + if mounted, mErr := mount.Mounted(v.path); mounted || mErr != nil { + return errors.Wrapf(err, "error while unmounting volume path '%s'", v.path) + } + } + v.active.mounted = false + } + return nil +} + +func validateOpts(opts map[string]string) error { + for opt := range opts { + if !validOpts[opt] { + return validationError{fmt.Errorf("invalid option key: %q", opt)} + } + } + return nil +} + +func (v *localVolume) Status() map[string]interface{} { + return nil +} + +// getAddress finds out address/hostname from options +func getAddress(opts string) string { + optsList := strings.Split(opts, ",") + for i := 0; i < len(optsList); i++ { + if strings.HasPrefix(optsList[i], "addr=") { + addr := (strings.SplitN(optsList[i], "=", 2)[1]) + return addr + } + } + return "" +} diff --git a/vendor/github.com/moby/moby/volume/local/local_test.go b/vendor/github.com/moby/moby/volume/local/local_test.go new file mode 100644 index 000000000..2353391aa --- /dev/null +++ b/vendor/github.com/moby/moby/volume/local/local_test.go @@ -0,0 +1,345 @@ +package local + +import ( + "io/ioutil" + "os" + "path/filepath" + "reflect" + "runtime" + "strings" + "testing" + + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/mount" +) + +func TestGetAddress(t *testing.T) { + cases := map[string]string{ + "addr=11.11.11.1": "11.11.11.1", + " ": "", + "addr=": "", + "addr=2001:db8::68": "2001:db8::68", + } + for name, success := range cases { + v := getAddress(name) + if v != success { + t.Errorf("Test case failed for %s actual: %s expected : %s", name, v, success) + } + } + +} + +func TestRemove(t *testing.T) { + // TODO Windows: Investigate why this test fails on Windows under CI + // but passes locally. + if runtime.GOOS == "windows" { + t.Skip("Test failing on Windows CI") + } + rootDir, err := ioutil.TempDir("", "local-volume-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(rootDir) + + r, err := New(rootDir, idtools.IDPair{UID: 0, GID: 0}) + if err != nil { + t.Fatal(err) + } + + vol, err := r.Create("testing", nil) + if err != nil { + t.Fatal(err) + } + + if err := r.Remove(vol); err != nil { + t.Fatal(err) + } + + vol, err = r.Create("testing2", nil) + if err != nil { + t.Fatal(err) + } + if err := os.RemoveAll(vol.Path()); err != nil { + t.Fatal(err) + } + + if err := r.Remove(vol); err != nil { + t.Fatal(err) + } + + if _, err := os.Stat(vol.Path()); err != nil && !os.IsNotExist(err) { + t.Fatal("volume dir not removed") + } + + if l, _ := r.List(); len(l) != 0 { + t.Fatal("expected there to be no volumes") + } +} + +func TestInitializeWithVolumes(t *testing.T) { + rootDir, err := ioutil.TempDir("", "local-volume-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(rootDir) + + r, err := New(rootDir, idtools.IDPair{UID: 0, GID: 0}) + if err != nil { + t.Fatal(err) + } + + vol, err := r.Create("testing", nil) + if err != nil { + t.Fatal(err) + } + + r, err = New(rootDir, idtools.IDPair{UID: 0, GID: 0}) + if err != nil { + t.Fatal(err) + } + + v, err := r.Get(vol.Name()) + if err != nil { + t.Fatal(err) + } + + if v.Path() != vol.Path() { + t.Fatal("expected to re-initialize root with existing volumes") + } +} + +func TestCreate(t *testing.T) { + rootDir, err := ioutil.TempDir("", "local-volume-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(rootDir) + + r, err := New(rootDir, idtools.IDPair{UID: 0, GID: 0}) + if err != nil { + t.Fatal(err) + } + + cases := map[string]bool{ + "name": true, + "name-with-dash": true, + "name_with_underscore": true, + "name/with/slash": false, + "name/with/../../slash": false, + "./name": false, + "../name": false, + "./": false, + "../": false, + "~": false, + ".": false, + "..": false, + "...": false, + } + + for name, success := range cases { + v, err := r.Create(name, nil) + if success { + if err != nil { + t.Fatal(err) + } + if v.Name() != name { + t.Fatalf("Expected volume with name %s, got %s", name, v.Name()) + } + } else { + if err == nil { + t.Fatalf("Expected error creating volume with name %s, got nil", name) + } + } + } + + r, err = New(rootDir, idtools.IDPair{UID: 0, GID: 0}) + if err != nil { + t.Fatal(err) + } +} + +func TestValidateName(t *testing.T) { + r := &Root{} + names := map[string]bool{ + "x": false, + "/testvol": false, + "thing.d": true, + "hello-world": true, + "./hello": false, + ".hello": false, + } + + for vol, expected := range names { + err := r.validateName(vol) + if expected && err != nil { + t.Fatalf("expected %s to be valid got %v", vol, err) + } + if !expected && err == nil { + t.Fatalf("expected %s to be invalid", vol) + } + } +} + +func TestCreateWithOpts(t *testing.T) { + if runtime.GOOS == "windows" || runtime.GOOS == "solaris" { + t.Skip() + } + rootDir, err := ioutil.TempDir("", "local-volume-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(rootDir) + + r, err := New(rootDir, idtools.IDPair{UID: 0, GID: 0}) + if err != nil { + t.Fatal(err) + } + + if _, err := r.Create("test", map[string]string{"invalidopt": "notsupported"}); err == nil { + t.Fatal("expected invalid opt to cause error") + } + + vol, err := r.Create("test", map[string]string{"device": "tmpfs", "type": "tmpfs", "o": "size=1m,uid=1000"}) + if err != nil { + t.Fatal(err) + } + v := vol.(*localVolume) + + dir, err := v.Mount("1234") + if err != nil { + t.Fatal(err) + } + defer func() { + if err := v.Unmount("1234"); err != nil { + t.Fatal(err) + } + }() + + mountInfos, err := mount.GetMounts() + if err != nil { + t.Fatal(err) + } + + var found bool + for _, info := range mountInfos { + if info.Mountpoint == dir { + found = true + if info.Fstype != "tmpfs" { + t.Fatalf("expected tmpfs mount, got %q", info.Fstype) + } + if info.Source != "tmpfs" { + t.Fatalf("expected tmpfs mount, got %q", info.Source) + } + if !strings.Contains(info.VfsOpts, "uid=1000") { + t.Fatalf("expected mount info to have uid=1000: %q", info.VfsOpts) + } + if !strings.Contains(info.VfsOpts, "size=1024k") { + t.Fatalf("expected mount info to have size=1024k: %q", info.VfsOpts) + } + break + } + } + + if !found { + t.Fatal("mount not found") + } + + if v.active.count != 1 { + t.Fatalf("Expected active mount count to be 1, got %d", v.active.count) + } + + // test double mount + if _, err := v.Mount("1234"); err != nil { + t.Fatal(err) + } + if v.active.count != 2 { + t.Fatalf("Expected active mount count to be 2, got %d", v.active.count) + } + + if err := v.Unmount("1234"); err != nil { + t.Fatal(err) + } + if v.active.count != 1 { + t.Fatalf("Expected active mount count to be 1, got %d", v.active.count) + } + + mounted, err := mount.Mounted(v.path) + if err != nil { + t.Fatal(err) + } + if !mounted { + t.Fatal("expected mount to still be active") + } + + r, err = New(rootDir, idtools.IDPair{UID: 0, GID: 0}) + if err != nil { + t.Fatal(err) + } + + v2, exists := r.volumes["test"] + if !exists { + t.Fatal("missing volume on restart") + } + + if !reflect.DeepEqual(v.opts, v2.opts) { + t.Fatal("missing volume options on restart") + } +} + +func TestRealodNoOpts(t *testing.T) { + rootDir, err := ioutil.TempDir("", "volume-test-reload-no-opts") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(rootDir) + + r, err := New(rootDir, idtools.IDPair{UID: 0, GID: 0}) + if err != nil { + t.Fatal(err) + } + + if _, err := r.Create("test1", nil); err != nil { + t.Fatal(err) + } + if _, err := r.Create("test2", nil); err != nil { + t.Fatal(err) + } + // make sure a file with `null` (.e.g. empty opts map from older daemon) is ok + if err := ioutil.WriteFile(filepath.Join(rootDir, "test2"), []byte("null"), 600); err != nil { + t.Fatal(err) + } + + if _, err := r.Create("test3", nil); err != nil { + t.Fatal(err) + } + // make sure an empty opts file doesn't break us too + if err := ioutil.WriteFile(filepath.Join(rootDir, "test3"), nil, 600); err != nil { + t.Fatal(err) + } + + if _, err := r.Create("test4", map[string]string{}); err != nil { + t.Fatal(err) + } + + r, err = New(rootDir, idtools.IDPair{UID: 0, GID: 0}) + if err != nil { + t.Fatal(err) + } + + for _, name := range []string{"test1", "test2", "test3", "test4"} { + v, err := r.Get(name) + if err != nil { + t.Fatal(err) + } + lv, ok := v.(*localVolume) + if !ok { + t.Fatalf("expected *localVolume got: %v", reflect.TypeOf(v)) + } + if lv.opts != nil { + t.Fatalf("expected opts to be nil, got: %v", lv.opts) + } + if _, err := lv.Mount("1234"); err != nil { + t.Fatal(err) + } + } +} diff --git a/vendor/github.com/moby/moby/volume/local/local_unix.go b/vendor/github.com/moby/moby/volume/local/local_unix.go new file mode 100644 index 000000000..5bba5b706 --- /dev/null +++ b/vendor/github.com/moby/moby/volume/local/local_unix.go @@ -0,0 +1,99 @@ +// +build linux freebsd solaris + +// Package local provides the default implementation for volumes. It +// is used to mount data volume containers and directories local to +// the host server. +package local + +import ( + "fmt" + "net" + "os" + "path/filepath" + "strings" + "syscall" + "time" + + "github.com/pkg/errors" + + "github.com/docker/docker/pkg/mount" +) + +var ( + oldVfsDir = filepath.Join("vfs", "dir") + + validOpts = map[string]bool{ + "type": true, // specify the filesystem type for mount, e.g. nfs + "o": true, // generic mount options + "device": true, // device to mount from + } +) + +type optsConfig struct { + MountType string + MountOpts string + MountDevice string +} + +func (o *optsConfig) String() string { + return fmt.Sprintf("type='%s' device='%s' o='%s'", o.MountType, o.MountDevice, o.MountOpts) +} + +// scopedPath verifies that the path where the volume is located +// is under Docker's root and the valid local paths. +func (r *Root) scopedPath(realPath string) bool { + // Volumes path for Docker version >= 1.7 + if strings.HasPrefix(realPath, filepath.Join(r.scope, volumesPathName)) && realPath != filepath.Join(r.scope, volumesPathName) { + return true + } + + // Volumes path for Docker version < 1.7 + if strings.HasPrefix(realPath, filepath.Join(r.scope, oldVfsDir)) { + return true + } + + return false +} + +func setOpts(v *localVolume, opts map[string]string) error { + if len(opts) == 0 { + return nil + } + if err := validateOpts(opts); err != nil { + return err + } + + v.opts = &optsConfig{ + MountType: opts["type"], + MountOpts: opts["o"], + MountDevice: opts["device"], + } + return nil +} + +func (v *localVolume) mount() error { + if v.opts.MountDevice == "" { + return fmt.Errorf("missing device in volume options") + } + mountOpts := v.opts.MountOpts + if v.opts.MountType == "nfs" { + if addrValue := getAddress(v.opts.MountOpts); addrValue != "" && net.ParseIP(addrValue).To4() == nil { + ipAddr, err := net.ResolveIPAddr("ip", addrValue) + if err != nil { + return errors.Wrapf(err, "error resolving passed in nfs address") + } + mountOpts = strings.Replace(mountOpts, "addr="+addrValue, "addr="+ipAddr.String(), 1) + } + } + err := mount.Mount(v.opts.MountDevice, v.path, v.opts.MountType, mountOpts) + return errors.Wrapf(err, "error while mounting volume with options: %s", v.opts) +} + +func (v *localVolume) CreatedAt() (time.Time, error) { + fileInfo, err := os.Stat(v.path) + if err != nil { + return time.Time{}, err + } + sec, nsec := fileInfo.Sys().(*syscall.Stat_t).Ctim.Unix() + return time.Unix(sec, nsec), nil +} diff --git a/vendor/github.com/moby/moby/volume/local/local_windows.go b/vendor/github.com/moby/moby/volume/local/local_windows.go new file mode 100644 index 000000000..6f5d2223a --- /dev/null +++ b/vendor/github.com/moby/moby/volume/local/local_windows.go @@ -0,0 +1,46 @@ +// Package local provides the default implementation for volumes. It +// is used to mount data volume containers and directories local to +// the host server. +package local + +import ( + "fmt" + "os" + "path/filepath" + "strings" + "syscall" + "time" +) + +type optsConfig struct{} + +var validOpts map[string]bool + +// scopedPath verifies that the path where the volume is located +// is under Docker's root and the valid local paths. +func (r *Root) scopedPath(realPath string) bool { + if strings.HasPrefix(realPath, filepath.Join(r.scope, volumesPathName)) && realPath != filepath.Join(r.scope, volumesPathName) { + return true + } + return false +} + +func setOpts(v *localVolume, opts map[string]string) error { + if len(opts) > 0 { + return fmt.Errorf("options are not supported on this platform") + } + return nil +} + +func (v *localVolume) mount() error { + return nil +} + +func (v *localVolume) CreatedAt() (time.Time, error) { + fileInfo, err := os.Stat(v.path) + if err != nil { + return time.Time{}, err + } + ft := fileInfo.Sys().(*syscall.Win32FileAttributeData).CreationTime + return time.Unix(0, ft.Nanoseconds()), nil +} diff --git a/vendor/github.com/moby/moby/volume/store/db.go b/vendor/github.com/moby/moby/volume/store/db.go new file mode 100644 index 000000000..c5fd1643f --- /dev/null +++ b/vendor/github.com/moby/moby/volume/store/db.go @@ -0,0 +1,88 @@ +package store + +import ( + "encoding/json" + + "github.com/Sirupsen/logrus" + "github.com/boltdb/bolt" + "github.com/pkg/errors" +) + +var volumeBucketName = []byte("volumes") + +type volumeMetadata struct { + Name string + Driver string + Labels map[string]string + Options map[string]string +} + +func (s *VolumeStore) setMeta(name string, meta volumeMetadata) error { + return s.db.Update(func(tx *bolt.Tx) error { + return setMeta(tx, name, meta) + }) +} + +func setMeta(tx *bolt.Tx, name string, meta volumeMetadata) error { + metaJSON, err := json.Marshal(meta) + if err != nil { + return err + } + b := tx.Bucket(volumeBucketName) + return errors.Wrap(b.Put([]byte(name), metaJSON), "error setting volume metadata") +} + +func (s *VolumeStore) getMeta(name string) (volumeMetadata, error) { + var meta volumeMetadata + err := s.db.View(func(tx *bolt.Tx) error { + return getMeta(tx, name, &meta) + }) + return meta, err +} + +func getMeta(tx *bolt.Tx, name string, meta *volumeMetadata) error { + b := tx.Bucket(volumeBucketName) + val := b.Get([]byte(name)) + if string(val) == "" { + return nil + } + if err := json.Unmarshal(val, meta); err != nil { + return errors.Wrap(err, "error unmarshaling volume metadata") + } + return nil +} + +func (s *VolumeStore) removeMeta(name string) error { + return s.db.Update(func(tx *bolt.Tx) error { + return removeMeta(tx, name) + }) +} + +func removeMeta(tx *bolt.Tx, name string) error { + b := tx.Bucket(volumeBucketName) + return errors.Wrap(b.Delete([]byte(name)), "error removing volume metadata") +} + +// listMeta is used during restore to get the list of volume metadata +// from the on-disk database. +// Any errors that occur are only logged. +func listMeta(tx *bolt.Tx) []volumeMetadata { + var ls []volumeMetadata + b := tx.Bucket(volumeBucketName) + b.ForEach(func(k, v []byte) error { + if len(v) == 0 { + // don't try to unmarshal an empty value + return nil + } + + var m volumeMetadata + if err := json.Unmarshal(v, &m); err != nil { + // Just log the error + logrus.Errorf("Error while reading volume metadata for volume %q: %v", string(k), err) + return nil + } + ls = append(ls, m) + return nil + }) + return ls +} diff --git a/vendor/github.com/moby/moby/volume/store/errors.go b/vendor/github.com/moby/moby/volume/store/errors.go new file mode 100644 index 000000000..980175f29 --- /dev/null +++ b/vendor/github.com/moby/moby/volume/store/errors.go @@ -0,0 +1,76 @@ +package store + +import ( + "strings" + + "github.com/pkg/errors" +) + +var ( + // errVolumeInUse is a typed error returned when trying to remove a volume that is currently in use by a container + errVolumeInUse = errors.New("volume is in use") + // errNoSuchVolume is a typed error returned if the requested volume doesn't exist in the volume store + errNoSuchVolume = errors.New("no such volume") + // errInvalidName is a typed error returned when creating a volume with a name that is not valid on the platform + errInvalidName = errors.New("volume name is not valid on this platform") + // errNameConflict is a typed error returned on create when a volume exists with the given name, but for a different driver + errNameConflict = errors.New("volume name must be unique") +) + +// OpErr is the error type returned by functions in the store package. It describes +// the operation, volume name, and error. +type OpErr struct { + // Err is the error that occurred during the operation. + Err error + // Op is the operation which caused the error, such as "create", or "list". + Op string + // Name is the name of the resource being requested for this op, typically the volume name or the driver name. + Name string + // Refs is the list of references associated with the resource. + Refs []string +} + +// Error satisfies the built-in error interface type. +func (e *OpErr) Error() string { + if e == nil { + return "" + } + s := e.Op + if e.Name != "" { + s = s + " " + e.Name + } + + s = s + ": " + e.Err.Error() + if len(e.Refs) > 0 { + s = s + " - " + "[" + strings.Join(e.Refs, ", ") + "]" + } + return s +} + +// IsInUse returns a boolean indicating whether the error indicates that a +// volume is in use +func IsInUse(err error) bool { + return isErr(err, errVolumeInUse) +} + +// IsNotExist returns a boolean indicating whether the error indicates that the volume does not exist +func IsNotExist(err error) bool { + return isErr(err, errNoSuchVolume) +} + +// IsNameConflict returns a boolean indicating whether the error indicates that a +// volume name is already taken +func IsNameConflict(err error) bool { + return isErr(err, errNameConflict) +} + +func isErr(err error, expected error) bool { + err = errors.Cause(err) + switch pe := err.(type) { + case nil: + return false + case *OpErr: + err = errors.Cause(pe.Err) + } + return err == expected +} diff --git a/vendor/github.com/moby/moby/volume/store/restore.go b/vendor/github.com/moby/moby/volume/store/restore.go new file mode 100644 index 000000000..c0c5b519b --- /dev/null +++ b/vendor/github.com/moby/moby/volume/store/restore.go @@ -0,0 +1,83 @@ +package store + +import ( + "sync" + + "github.com/Sirupsen/logrus" + "github.com/boltdb/bolt" + "github.com/docker/docker/volume" + "github.com/docker/docker/volume/drivers" +) + +// restore is called when a new volume store is created. +// It's primary purpose is to ensure that all drivers' refcounts are set based +// on known volumes after a restart. +// This only attempts to track volumes that are actually stored in the on-disk db. +// It does not probe the available drivers to find anything that may have been added +// out of band. +func (s *VolumeStore) restore() { + var ls []volumeMetadata + s.db.View(func(tx *bolt.Tx) error { + ls = listMeta(tx) + return nil + }) + + chRemove := make(chan *volumeMetadata, len(ls)) + var wg sync.WaitGroup + for _, meta := range ls { + wg.Add(1) + // this is potentially a very slow operation, so do it in a goroutine + go func(meta volumeMetadata) { + defer wg.Done() + + var v volume.Volume + var err error + if meta.Driver != "" { + v, err = lookupVolume(meta.Driver, meta.Name) + if err != nil && err != errNoSuchVolume { + logrus.WithError(err).WithField("driver", meta.Driver).WithField("volume", meta.Name).Warn("Error restoring volume") + return + } + if v == nil { + // doesn't exist in the driver, remove it from the db + chRemove <- &meta + return + } + } else { + v, err = s.getVolume(meta.Name) + if err != nil { + if err == errNoSuchVolume { + chRemove <- &meta + } + return + } + + meta.Driver = v.DriverName() + if err := s.setMeta(v.Name(), meta); err != nil { + logrus.WithError(err).WithField("driver", meta.Driver).WithField("volume", v.Name()).Warn("Error updating volume metadata on restore") + } + } + + // increment driver refcount + volumedrivers.CreateDriver(meta.Driver) + + // cache the volume + s.globalLock.Lock() + s.options[v.Name()] = meta.Options + s.labels[v.Name()] = meta.Labels + s.names[v.Name()] = v + s.globalLock.Unlock() + }(meta) + } + + wg.Wait() + close(chRemove) + s.db.Update(func(tx *bolt.Tx) error { + for meta := range chRemove { + if err := removeMeta(tx, meta.Name); err != nil { + logrus.WithField("volume", meta.Name).Warnf("Error removing stale entry from volume db: %v", err) + } + } + return nil + }) +} diff --git a/vendor/github.com/moby/moby/volume/store/store.go b/vendor/github.com/moby/moby/volume/store/store.go new file mode 100644 index 000000000..cded883e6 --- /dev/null +++ b/vendor/github.com/moby/moby/volume/store/store.go @@ -0,0 +1,669 @@ +package store + +import ( + "net" + "os" + "path/filepath" + "sync" + "time" + + "github.com/pkg/errors" + + "github.com/Sirupsen/logrus" + "github.com/boltdb/bolt" + "github.com/docker/docker/pkg/locker" + "github.com/docker/docker/volume" + "github.com/docker/docker/volume/drivers" +) + +const ( + volumeDataDir = "volumes" +) + +type volumeWrapper struct { + volume.Volume + labels map[string]string + scope string + options map[string]string +} + +func (v volumeWrapper) Options() map[string]string { + options := map[string]string{} + for key, value := range v.options { + options[key] = value + } + return options +} + +func (v volumeWrapper) Labels() map[string]string { + return v.labels +} + +func (v volumeWrapper) Scope() string { + return v.scope +} + +func (v volumeWrapper) CachedPath() string { + if vv, ok := v.Volume.(interface { + CachedPath() string + }); ok { + return vv.CachedPath() + } + return v.Volume.Path() +} + +// New initializes a VolumeStore to keep +// reference counting of volumes in the system. +func New(rootPath string) (*VolumeStore, error) { + vs := &VolumeStore{ + locks: &locker.Locker{}, + names: make(map[string]volume.Volume), + refs: make(map[string]map[string]struct{}), + labels: make(map[string]map[string]string), + options: make(map[string]map[string]string), + } + + if rootPath != "" { + // initialize metadata store + volPath := filepath.Join(rootPath, volumeDataDir) + if err := os.MkdirAll(volPath, 750); err != nil { + return nil, err + } + + dbPath := filepath.Join(volPath, "metadata.db") + + var err error + vs.db, err = bolt.Open(dbPath, 0600, &bolt.Options{Timeout: 1 * time.Second}) + if err != nil { + return nil, errors.Wrap(err, "error while opening volume store metadata database") + } + + // initialize volumes bucket + if err := vs.db.Update(func(tx *bolt.Tx) error { + if _, err := tx.CreateBucketIfNotExists(volumeBucketName); err != nil { + return errors.Wrap(err, "error while setting up volume store metadata database") + } + return nil + }); err != nil { + return nil, err + } + } + + vs.restore() + + return vs, nil +} + +func (s *VolumeStore) getNamed(name string) (volume.Volume, bool) { + s.globalLock.RLock() + v, exists := s.names[name] + s.globalLock.RUnlock() + return v, exists +} + +func (s *VolumeStore) setNamed(v volume.Volume, ref string) { + name := v.Name() + + s.globalLock.Lock() + s.names[name] = v + if len(ref) > 0 { + if s.refs[name] == nil { + s.refs[name] = make(map[string]struct{}) + } + s.refs[name][ref] = struct{}{} + } + s.globalLock.Unlock() +} + +// hasRef returns true if the given name has at least one ref. +// Callers of this function are expected to hold the name lock. +func (s *VolumeStore) hasRef(name string) bool { + s.globalLock.RLock() + l := len(s.refs[name]) + s.globalLock.RUnlock() + return l > 0 +} + +// getRefs gets the list of refs for a given name +// Callers of this function are expected to hold the name lock. +func (s *VolumeStore) getRefs(name string) []string { + s.globalLock.RLock() + defer s.globalLock.RUnlock() + + refs := make([]string, 0, len(s.refs[name])) + for r := range s.refs[name] { + refs = append(refs, r) + } + + return refs +} + +// Purge allows the cleanup of internal data on docker in case +// the internal data is out of sync with volumes driver plugins. +func (s *VolumeStore) Purge(name string) { + s.globalLock.Lock() + v, exists := s.names[name] + if exists { + if _, err := volumedrivers.RemoveDriver(v.DriverName()); err != nil { + logrus.Errorf("Error dereferencing volume driver: %v", err) + } + } + if err := s.removeMeta(name); err != nil { + logrus.Errorf("Error removing volume metadata for volume %q: %v", name, err) + } + delete(s.names, name) + delete(s.refs, name) + delete(s.labels, name) + delete(s.options, name) + s.globalLock.Unlock() +} + +// VolumeStore is a struct that stores the list of volumes available and keeps track of their usage counts +type VolumeStore struct { + // locks ensures that only one action is being performed on a particular volume at a time without locking the entire store + // since actions on volumes can be quite slow, this ensures the store is free to handle requests for other volumes. + locks *locker.Locker + // globalLock is used to protect access to mutable structures used by the store object + globalLock sync.RWMutex + // names stores the volume name -> volume relationship. + // This is used for making lookups faster so we don't have to probe all drivers + names map[string]volume.Volume + // refs stores the volume name and the list of things referencing it + refs map[string]map[string]struct{} + // labels stores volume labels for each volume + labels map[string]map[string]string + // options stores volume options for each volume + options map[string]map[string]string + db *bolt.DB +} + +// List proxies to all registered volume drivers to get the full list of volumes +// If a driver returns a volume that has name which conflicts with another volume from a different driver, +// the first volume is chosen and the conflicting volume is dropped. +func (s *VolumeStore) List() ([]volume.Volume, []string, error) { + vols, warnings, err := s.list() + if err != nil { + return nil, nil, &OpErr{Err: err, Op: "list"} + } + var out []volume.Volume + + for _, v := range vols { + name := normaliseVolumeName(v.Name()) + + s.locks.Lock(name) + storedV, exists := s.getNamed(name) + // Note: it's not safe to populate the cache here because the volume may have been + // deleted before we acquire a lock on its name + if exists && storedV.DriverName() != v.DriverName() { + logrus.Warnf("Volume name %s already exists for driver %s, not including volume returned by %s", v.Name(), storedV.DriverName(), v.DriverName()) + s.locks.Unlock(v.Name()) + continue + } + + out = append(out, v) + s.locks.Unlock(v.Name()) + } + return out, warnings, nil +} + +// list goes through each volume driver and asks for its list of volumes. +func (s *VolumeStore) list() ([]volume.Volume, []string, error) { + var ( + ls []volume.Volume + warnings []string + ) + + drivers, err := volumedrivers.GetAllDrivers() + if err != nil { + return nil, nil, err + } + + type vols struct { + vols []volume.Volume + err error + driverName string + } + chVols := make(chan vols, len(drivers)) + + for _, vd := range drivers { + go func(d volume.Driver) { + vs, err := d.List() + if err != nil { + chVols <- vols{driverName: d.Name(), err: &OpErr{Err: err, Name: d.Name(), Op: "list"}} + return + } + for i, v := range vs { + s.globalLock.RLock() + vs[i] = volumeWrapper{v, s.labels[v.Name()], d.Scope(), s.options[v.Name()]} + s.globalLock.RUnlock() + } + + chVols <- vols{vols: vs} + }(vd) + } + + badDrivers := make(map[string]struct{}) + for i := 0; i < len(drivers); i++ { + vs := <-chVols + + if vs.err != nil { + warnings = append(warnings, vs.err.Error()) + badDrivers[vs.driverName] = struct{}{} + logrus.Warn(vs.err) + } + ls = append(ls, vs.vols...) + } + + if len(badDrivers) > 0 { + s.globalLock.RLock() + for _, v := range s.names { + if _, exists := badDrivers[v.DriverName()]; exists { + ls = append(ls, v) + } + } + s.globalLock.RUnlock() + } + return ls, warnings, nil +} + +// CreateWithRef creates a volume with the given name and driver and stores the ref +// This ensures there's no race between creating a volume and then storing a reference. +func (s *VolumeStore) CreateWithRef(name, driverName, ref string, opts, labels map[string]string) (volume.Volume, error) { + name = normaliseVolumeName(name) + s.locks.Lock(name) + defer s.locks.Unlock(name) + + v, err := s.create(name, driverName, opts, labels) + if err != nil { + if _, ok := err.(*OpErr); ok { + return nil, err + } + return nil, &OpErr{Err: err, Name: name, Op: "create"} + } + + s.setNamed(v, ref) + return v, nil +} + +// Create creates a volume with the given name and driver. +// This is just like CreateWithRef() except we don't store a reference while holding the lock. +func (s *VolumeStore) Create(name, driverName string, opts, labels map[string]string) (volume.Volume, error) { + return s.CreateWithRef(name, driverName, "", opts, labels) +} + +// checkConflict checks the local cache for name collisions with the passed in name, +// for existing volumes with the same name but in a different driver. +// This is used by `Create` as a best effort to prevent name collisions for volumes. +// If a matching volume is found that is not a conflict that is returned so the caller +// does not need to perform an additional lookup. +// When no matching volume is found, both returns will be nil +// +// Note: This does not probe all the drivers for name collisions because v1 plugins +// are very slow, particularly if the plugin is down, and cause other issues, +// particularly around locking the store. +// TODO(cpuguy83): With v2 plugins this shouldn't be a problem. Could also potentially +// use a connect timeout for this kind of check to ensure we aren't blocking for a +// long time. +func (s *VolumeStore) checkConflict(name, driverName string) (volume.Volume, error) { + // check the local cache + v, _ := s.getNamed(name) + if v == nil { + return nil, nil + } + + vDriverName := v.DriverName() + var conflict bool + if driverName != "" { + // Retrieve canonical driver name to avoid inconsistencies (for example + // "plugin" vs. "plugin:latest") + vd, err := volumedrivers.GetDriver(driverName) + if err != nil { + return nil, err + } + + if vDriverName != vd.Name() { + conflict = true + } + } + + // let's check if the found volume ref + // is stale by checking with the driver if it still exists + exists, err := volumeExists(v) + if err != nil { + return nil, errors.Wrapf(errNameConflict, "found reference to volume '%s' in driver '%s', but got an error while checking the driver: %v", name, vDriverName, err) + } + + if exists { + if conflict { + return nil, errors.Wrapf(errNameConflict, "driver '%s' already has volume '%s'", vDriverName, name) + } + return v, nil + } + + if s.hasRef(v.Name()) { + // Containers are referencing this volume but it doesn't seem to exist anywhere. + // Return a conflict error here, the user can fix this with `docker volume rm -f` + return nil, errors.Wrapf(errNameConflict, "found references to volume '%s' in driver '%s' but the volume was not found in the driver -- you may need to remove containers referencing this volume or force remove the volume to re-create it", name, vDriverName) + } + + // doesn't exist, so purge it from the cache + s.Purge(name) + return nil, nil +} + +// volumeExists returns if the volume is still present in the driver. +// An error is returned if there was an issue communicating with the driver. +func volumeExists(v volume.Volume) (bool, error) { + exists, err := lookupVolume(v.DriverName(), v.Name()) + if err != nil { + return false, err + } + return exists != nil, nil +} + +// create asks the given driver to create a volume with the name/opts. +// If a volume with the name is already known, it will ask the stored driver for the volume. +// If the passed in driver name does not match the driver name which is stored +// for the given volume name, an error is returned after checking if the reference is stale. +// If the reference is stale, it will be purged and this create can continue. +// It is expected that callers of this function hold any necessary locks. +func (s *VolumeStore) create(name, driverName string, opts, labels map[string]string) (volume.Volume, error) { + // Validate the name in a platform-specific manner + valid, err := volume.IsVolumeNameValid(name) + if err != nil { + return nil, err + } + if !valid { + return nil, &OpErr{Err: errInvalidName, Name: name, Op: "create"} + } + + v, err := s.checkConflict(name, driverName) + if err != nil { + return nil, err + } + + if v != nil { + return v, nil + } + + // Since there isn't a specified driver name, let's see if any of the existing drivers have this volume name + if driverName == "" { + v, _ := s.getVolume(name) + if v != nil { + return v, nil + } + } + + vd, err := volumedrivers.CreateDriver(driverName) + + if err != nil { + return nil, &OpErr{Op: "create", Name: name, Err: err} + } + + logrus.Debugf("Registering new volume reference: driver %q, name %q", vd.Name(), name) + + if v, _ := vd.Get(name); v != nil { + return v, nil + } + v, err = vd.Create(name, opts) + if err != nil { + return nil, err + } + s.globalLock.Lock() + s.labels[name] = labels + s.options[name] = opts + s.refs[name] = make(map[string]struct{}) + s.globalLock.Unlock() + + metadata := volumeMetadata{ + Name: name, + Driver: vd.Name(), + Labels: labels, + Options: opts, + } + + if err := s.setMeta(name, metadata); err != nil { + return nil, err + } + return volumeWrapper{v, labels, vd.Scope(), opts}, nil +} + +// GetWithRef gets a volume with the given name from the passed in driver and stores the ref +// This is just like Get(), but we store the reference while holding the lock. +// This makes sure there are no races between checking for the existence of a volume and adding a reference for it +func (s *VolumeStore) GetWithRef(name, driverName, ref string) (volume.Volume, error) { + name = normaliseVolumeName(name) + s.locks.Lock(name) + defer s.locks.Unlock(name) + + vd, err := volumedrivers.GetDriver(driverName) + if err != nil { + return nil, &OpErr{Err: err, Name: name, Op: "get"} + } + + v, err := vd.Get(name) + if err != nil { + return nil, &OpErr{Err: err, Name: name, Op: "get"} + } + + s.setNamed(v, ref) + + s.globalLock.RLock() + defer s.globalLock.RUnlock() + return volumeWrapper{v, s.labels[name], vd.Scope(), s.options[name]}, nil +} + +// Get looks if a volume with the given name exists and returns it if so +func (s *VolumeStore) Get(name string) (volume.Volume, error) { + name = normaliseVolumeName(name) + s.locks.Lock(name) + defer s.locks.Unlock(name) + + v, err := s.getVolume(name) + if err != nil { + return nil, &OpErr{Err: err, Name: name, Op: "get"} + } + s.setNamed(v, "") + return v, nil +} + +// getVolume requests the volume, if the driver info is stored it just accesses that driver, +// if the driver is unknown it probes all drivers until it finds the first volume with that name. +// it is expected that callers of this function hold any necessary locks +func (s *VolumeStore) getVolume(name string) (volume.Volume, error) { + var meta volumeMetadata + meta, err := s.getMeta(name) + if err != nil { + return nil, err + } + + driverName := meta.Driver + if driverName == "" { + s.globalLock.RLock() + v, exists := s.names[name] + s.globalLock.RUnlock() + if exists { + meta.Driver = v.DriverName() + if err := s.setMeta(name, meta); err != nil { + return nil, err + } + } + } + + if meta.Driver != "" { + vol, err := lookupVolume(meta.Driver, name) + if err != nil { + return nil, err + } + if vol == nil { + s.Purge(name) + return nil, errNoSuchVolume + } + + var scope string + vd, err := volumedrivers.GetDriver(meta.Driver) + if err == nil { + scope = vd.Scope() + } + return volumeWrapper{vol, meta.Labels, scope, meta.Options}, nil + } + + logrus.Debugf("Probing all drivers for volume with name: %s", name) + drivers, err := volumedrivers.GetAllDrivers() + if err != nil { + return nil, err + } + + for _, d := range drivers { + v, err := d.Get(name) + if err != nil || v == nil { + continue + } + meta.Driver = v.DriverName() + if err := s.setMeta(name, meta); err != nil { + return nil, err + } + return volumeWrapper{v, meta.Labels, d.Scope(), meta.Options}, nil + } + return nil, errNoSuchVolume +} + +// lookupVolume gets the specified volume from the specified driver. +// This will only return errors related to communications with the driver. +// If the driver returns an error that is not communication related the +// error is logged but not returned. +// If the volume is not found it will return `nil, nil`` +func lookupVolume(driverName, volumeName string) (volume.Volume, error) { + vd, err := volumedrivers.GetDriver(driverName) + if err != nil { + return nil, errors.Wrapf(err, "error while checking if volume %q exists in driver %q", volumeName, driverName) + } + v, err := vd.Get(volumeName) + if err != nil { + err = errors.Cause(err) + if _, ok := err.(net.Error); ok { + if v != nil { + volumeName = v.Name() + driverName = v.DriverName() + } + return nil, errors.Wrapf(err, "error while checking if volume %q exists in driver %q", volumeName, driverName) + } + + // At this point, the error could be anything from the driver, such as "no such volume" + // Let's not check an error here, and instead check if the driver returned a volume + logrus.WithError(err).WithField("driver", driverName).WithField("volume", volumeName).Warnf("Error while looking up volume") + } + return v, nil +} + +// Remove removes the requested volume. A volume is not removed if it has any refs +func (s *VolumeStore) Remove(v volume.Volume) error { + name := normaliseVolumeName(v.Name()) + s.locks.Lock(name) + defer s.locks.Unlock(name) + + if s.hasRef(name) { + return &OpErr{Err: errVolumeInUse, Name: v.Name(), Op: "remove", Refs: s.getRefs(name)} + } + + vd, err := volumedrivers.GetDriver(v.DriverName()) + if err != nil { + return &OpErr{Err: err, Name: v.DriverName(), Op: "remove"} + } + + logrus.Debugf("Removing volume reference: driver %s, name %s", v.DriverName(), name) + vol := unwrapVolume(v) + if err := vd.Remove(vol); err != nil { + return &OpErr{Err: err, Name: name, Op: "remove"} + } + + s.Purge(name) + return nil +} + +// Dereference removes the specified reference to the volume +func (s *VolumeStore) Dereference(v volume.Volume, ref string) { + name := v.Name() + + s.locks.Lock(name) + defer s.locks.Unlock(name) + + s.globalLock.Lock() + defer s.globalLock.Unlock() + + if s.refs[name] != nil { + delete(s.refs[name], ref) + } +} + +// Refs gets the current list of refs for the given volume +func (s *VolumeStore) Refs(v volume.Volume) []string { + name := v.Name() + + s.locks.Lock(name) + defer s.locks.Unlock(name) + + return s.getRefs(name) +} + +// FilterByDriver returns the available volumes filtered by driver name +func (s *VolumeStore) FilterByDriver(name string) ([]volume.Volume, error) { + vd, err := volumedrivers.GetDriver(name) + if err != nil { + return nil, &OpErr{Err: err, Name: name, Op: "list"} + } + ls, err := vd.List() + if err != nil { + return nil, &OpErr{Err: err, Name: name, Op: "list"} + } + for i, v := range ls { + options := map[string]string{} + s.globalLock.RLock() + for key, value := range s.options[v.Name()] { + options[key] = value + } + ls[i] = volumeWrapper{v, s.labels[v.Name()], vd.Scope(), options} + s.globalLock.RUnlock() + } + return ls, nil +} + +// FilterByUsed returns the available volumes filtered by if they are in use or not. +// `used=true` returns only volumes that are being used, while `used=false` returns +// only volumes that are not being used. +func (s *VolumeStore) FilterByUsed(vols []volume.Volume, used bool) []volume.Volume { + return s.filter(vols, func(v volume.Volume) bool { + s.locks.Lock(v.Name()) + hasRef := s.hasRef(v.Name()) + s.locks.Unlock(v.Name()) + return used == hasRef + }) +} + +// filterFunc defines a function to allow filter volumes in the store +type filterFunc func(vol volume.Volume) bool + +// filter returns the available volumes filtered by a filterFunc function +func (s *VolumeStore) filter(vols []volume.Volume, f filterFunc) []volume.Volume { + var ls []volume.Volume + for _, v := range vols { + if f(v) { + ls = append(ls, v) + } + } + return ls +} + +func unwrapVolume(v volume.Volume) volume.Volume { + if vol, ok := v.(volumeWrapper); ok { + return vol.Volume + } + + return v +} + +// Shutdown releases all resources used by the volume store +// It does not make any changes to volumes, drivers, etc. +func (s *VolumeStore) Shutdown() error { + return s.db.Close() +} diff --git a/vendor/github.com/moby/moby/volume/store/store_test.go b/vendor/github.com/moby/moby/volume/store/store_test.go new file mode 100644 index 000000000..f5f00255a --- /dev/null +++ b/vendor/github.com/moby/moby/volume/store/store_test.go @@ -0,0 +1,234 @@ +package store + +import ( + "errors" + "io/ioutil" + "os" + "strings" + "testing" + + "github.com/docker/docker/volume/drivers" + volumetestutils "github.com/docker/docker/volume/testutils" +) + +func TestCreate(t *testing.T) { + volumedrivers.Register(volumetestutils.NewFakeDriver("fake"), "fake") + defer volumedrivers.Unregister("fake") + dir, err := ioutil.TempDir("", "test-create") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(dir) + + s, err := New(dir) + if err != nil { + t.Fatal(err) + } + v, err := s.Create("fake1", "fake", nil, nil) + if err != nil { + t.Fatal(err) + } + if v.Name() != "fake1" { + t.Fatalf("Expected fake1 volume, got %v", v) + } + if l, _, _ := s.List(); len(l) != 1 { + t.Fatalf("Expected 1 volume in the store, got %v: %v", len(l), l) + } + + if _, err := s.Create("none", "none", nil, nil); err == nil { + t.Fatalf("Expected unknown driver error, got nil") + } + + _, err = s.Create("fakeerror", "fake", map[string]string{"error": "create error"}, nil) + expected := &OpErr{Op: "create", Name: "fakeerror", Err: errors.New("create error")} + if err != nil && err.Error() != expected.Error() { + t.Fatalf("Expected create fakeError: create error, got %v", err) + } +} + +func TestRemove(t *testing.T) { + volumedrivers.Register(volumetestutils.NewFakeDriver("fake"), "fake") + volumedrivers.Register(volumetestutils.NewFakeDriver("noop"), "noop") + defer volumedrivers.Unregister("fake") + defer volumedrivers.Unregister("noop") + dir, err := ioutil.TempDir("", "test-remove") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(dir) + s, err := New(dir) + if err != nil { + t.Fatal(err) + } + + // doing string compare here since this error comes directly from the driver + expected := "no such volume" + if err := s.Remove(volumetestutils.NoopVolume{}); err == nil || !strings.Contains(err.Error(), expected) { + t.Fatalf("Expected error %q, got %v", expected, err) + } + + v, err := s.CreateWithRef("fake1", "fake", "fake", nil, nil) + if err != nil { + t.Fatal(err) + } + + if err := s.Remove(v); !IsInUse(err) { + t.Fatalf("Expected ErrVolumeInUse error, got %v", err) + } + s.Dereference(v, "fake") + if err := s.Remove(v); err != nil { + t.Fatal(err) + } + if l, _, _ := s.List(); len(l) != 0 { + t.Fatalf("Expected 0 volumes in the store, got %v, %v", len(l), l) + } +} + +func TestList(t *testing.T) { + volumedrivers.Register(volumetestutils.NewFakeDriver("fake"), "fake") + volumedrivers.Register(volumetestutils.NewFakeDriver("fake2"), "fake2") + defer volumedrivers.Unregister("fake") + defer volumedrivers.Unregister("fake2") + dir, err := ioutil.TempDir("", "test-list") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(dir) + + s, err := New(dir) + if err != nil { + t.Fatal(err) + } + if _, err := s.Create("test", "fake", nil, nil); err != nil { + t.Fatal(err) + } + if _, err := s.Create("test2", "fake2", nil, nil); err != nil { + t.Fatal(err) + } + + ls, _, err := s.List() + if err != nil { + t.Fatal(err) + } + if len(ls) != 2 { + t.Fatalf("expected 2 volumes, got: %d", len(ls)) + } + if err := s.Shutdown(); err != nil { + t.Fatal(err) + } + + // and again with a new store + s, err = New(dir) + if err != nil { + t.Fatal(err) + } + ls, _, err = s.List() + if err != nil { + t.Fatal(err) + } + if len(ls) != 2 { + t.Fatalf("expected 2 volumes, got: %d", len(ls)) + } +} + +func TestFilterByDriver(t *testing.T) { + volumedrivers.Register(volumetestutils.NewFakeDriver("fake"), "fake") + volumedrivers.Register(volumetestutils.NewFakeDriver("noop"), "noop") + defer volumedrivers.Unregister("fake") + defer volumedrivers.Unregister("noop") + dir, err := ioutil.TempDir("", "test-filter-driver") + if err != nil { + t.Fatal(err) + } + s, err := New(dir) + if err != nil { + t.Fatal(err) + } + + if _, err := s.Create("fake1", "fake", nil, nil); err != nil { + t.Fatal(err) + } + if _, err := s.Create("fake2", "fake", nil, nil); err != nil { + t.Fatal(err) + } + if _, err := s.Create("fake3", "noop", nil, nil); err != nil { + t.Fatal(err) + } + + if l, _ := s.FilterByDriver("fake"); len(l) != 2 { + t.Fatalf("Expected 2 volumes, got %v, %v", len(l), l) + } + + if l, _ := s.FilterByDriver("noop"); len(l) != 1 { + t.Fatalf("Expected 1 volume, got %v, %v", len(l), l) + } +} + +func TestFilterByUsed(t *testing.T) { + volumedrivers.Register(volumetestutils.NewFakeDriver("fake"), "fake") + volumedrivers.Register(volumetestutils.NewFakeDriver("noop"), "noop") + dir, err := ioutil.TempDir("", "test-filter-used") + if err != nil { + t.Fatal(err) + } + + s, err := New(dir) + if err != nil { + t.Fatal(err) + } + + if _, err := s.CreateWithRef("fake1", "fake", "volReference", nil, nil); err != nil { + t.Fatal(err) + } + if _, err := s.Create("fake2", "fake", nil, nil); err != nil { + t.Fatal(err) + } + + vols, _, err := s.List() + if err != nil { + t.Fatal(err) + } + + dangling := s.FilterByUsed(vols, false) + if len(dangling) != 1 { + t.Fatalf("expected 1 dangling volume, got %v", len(dangling)) + } + if dangling[0].Name() != "fake2" { + t.Fatalf("expected dangling volume fake2, got %s", dangling[0].Name()) + } + + used := s.FilterByUsed(vols, true) + if len(used) != 1 { + t.Fatalf("expected 1 used volume, got %v", len(used)) + } + if used[0].Name() != "fake1" { + t.Fatalf("expected used volume fake1, got %s", used[0].Name()) + } +} + +func TestDerefMultipleOfSameRef(t *testing.T) { + volumedrivers.Register(volumetestutils.NewFakeDriver("fake"), "fake") + dir, err := ioutil.TempDir("", "test-same-deref") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(dir) + s, err := New(dir) + if err != nil { + t.Fatal(err) + } + + v, err := s.CreateWithRef("fake1", "fake", "volReference", nil, nil) + if err != nil { + t.Fatal(err) + } + + if _, err := s.GetWithRef("fake1", "fake", "volReference"); err != nil { + t.Fatal(err) + } + + s.Dereference(v, "volReference") + if err := s.Remove(v); err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/moby/moby/volume/store/store_unix.go b/vendor/github.com/moby/moby/volume/store/store_unix.go new file mode 100644 index 000000000..8ebc1f20c --- /dev/null +++ b/vendor/github.com/moby/moby/volume/store/store_unix.go @@ -0,0 +1,9 @@ +// +build linux freebsd solaris + +package store + +// normaliseVolumeName is a platform specific function to normalise the name +// of a volume. This is a no-op on Unix-like platforms +func normaliseVolumeName(name string) string { + return name +} diff --git a/vendor/github.com/moby/moby/volume/store/store_windows.go b/vendor/github.com/moby/moby/volume/store/store_windows.go new file mode 100644 index 000000000..8601cdd5c --- /dev/null +++ b/vendor/github.com/moby/moby/volume/store/store_windows.go @@ -0,0 +1,12 @@ +package store + +import "strings" + +// normaliseVolumeName is a platform specific function to normalise the name +// of a volume. On Windows, as NTFS is case insensitive, under +// c:\ProgramData\Docker\Volumes\, the folders John and john would be synonymous. +// Hence we can't allow the volume "John" and "john" to be created as separate +// volumes. +func normaliseVolumeName(name string) string { + return strings.ToLower(name) +} diff --git a/vendor/github.com/moby/moby/volume/testutils/testutils.go b/vendor/github.com/moby/moby/volume/testutils/testutils.go new file mode 100644 index 000000000..359d92382 --- /dev/null +++ b/vendor/github.com/moby/moby/volume/testutils/testutils.go @@ -0,0 +1,123 @@ +package testutils + +import ( + "fmt" + "time" + + "github.com/docker/docker/volume" +) + +// NoopVolume is a volume that doesn't perform any operation +type NoopVolume struct{} + +// Name is the name of the volume +func (NoopVolume) Name() string { return "noop" } + +// DriverName is the name of the driver +func (NoopVolume) DriverName() string { return "noop" } + +// Path is the filesystem path to the volume +func (NoopVolume) Path() string { return "noop" } + +// Mount mounts the volume in the container +func (NoopVolume) Mount(_ string) (string, error) { return "noop", nil } + +// Unmount unmounts the volume from the container +func (NoopVolume) Unmount(_ string) error { return nil } + +// Status provides low-level details about the volume +func (NoopVolume) Status() map[string]interface{} { return nil } + +// CreatedAt provides the time the volume (directory) was created at +func (NoopVolume) CreatedAt() (time.Time, error) { return time.Now(), nil } + +// FakeVolume is a fake volume with a random name +type FakeVolume struct { + name string + driverName string +} + +// NewFakeVolume creates a new fake volume for testing +func NewFakeVolume(name string, driverName string) volume.Volume { + return FakeVolume{name: name, driverName: driverName} +} + +// Name is the name of the volume +func (f FakeVolume) Name() string { return f.name } + +// DriverName is the name of the driver +func (f FakeVolume) DriverName() string { return f.driverName } + +// Path is the filesystem path to the volume +func (FakeVolume) Path() string { return "fake" } + +// Mount mounts the volume in the container +func (FakeVolume) Mount(_ string) (string, error) { return "fake", nil } + +// Unmount unmounts the volume from the container +func (FakeVolume) Unmount(_ string) error { return nil } + +// Status provides low-level details about the volume +func (FakeVolume) Status() map[string]interface{} { return nil } + +// CreatedAt provides the time the volume (directory) was created at +func (FakeVolume) CreatedAt() (time.Time, error) { return time.Now(), nil } + +// FakeDriver is a driver that generates fake volumes +type FakeDriver struct { + name string + vols map[string]volume.Volume +} + +// NewFakeDriver creates a new FakeDriver with the specified name +func NewFakeDriver(name string) volume.Driver { + return &FakeDriver{ + name: name, + vols: make(map[string]volume.Volume), + } +} + +// Name is the name of the driver +func (d *FakeDriver) Name() string { return d.name } + +// Create initializes a fake volume. +// It returns an error if the options include an "error" key with a message +func (d *FakeDriver) Create(name string, opts map[string]string) (volume.Volume, error) { + if opts != nil && opts["error"] != "" { + return nil, fmt.Errorf(opts["error"]) + } + v := NewFakeVolume(name, d.name) + d.vols[name] = v + return v, nil +} + +// Remove deletes a volume. +func (d *FakeDriver) Remove(v volume.Volume) error { + if _, exists := d.vols[v.Name()]; !exists { + return fmt.Errorf("no such volume") + } + delete(d.vols, v.Name()) + return nil +} + +// List lists the volumes +func (d *FakeDriver) List() ([]volume.Volume, error) { + var vols []volume.Volume + for _, v := range d.vols { + vols = append(vols, v) + } + return vols, nil +} + +// Get gets the volume +func (d *FakeDriver) Get(name string) (volume.Volume, error) { + if v, exists := d.vols[name]; exists { + return v, nil + } + return nil, fmt.Errorf("no such volume") +} + +// Scope returns the local scope +func (*FakeDriver) Scope() string { + return "local" +} diff --git a/vendor/github.com/moby/moby/volume/validate.go b/vendor/github.com/moby/moby/volume/validate.go new file mode 100644 index 000000000..42396a0da --- /dev/null +++ b/vendor/github.com/moby/moby/volume/validate.go @@ -0,0 +1,140 @@ +package volume + +import ( + "errors" + "fmt" + "os" + "path/filepath" + + "github.com/docker/docker/api/types/mount" +) + +var errBindNotExist = errors.New("bind source path does not exist") + +type validateOpts struct { + skipBindSourceCheck bool + skipAbsolutePathCheck bool +} + +func validateMountConfig(mnt *mount.Mount, options ...func(*validateOpts)) error { + opts := validateOpts{} + for _, o := range options { + o(&opts) + } + + if len(mnt.Target) == 0 { + return &errMountConfig{mnt, errMissingField("Target")} + } + + if err := validateNotRoot(mnt.Target); err != nil { + return &errMountConfig{mnt, err} + } + + if !opts.skipAbsolutePathCheck { + if err := validateAbsolute(mnt.Target); err != nil { + return &errMountConfig{mnt, err} + } + } + + switch mnt.Type { + case mount.TypeBind: + if len(mnt.Source) == 0 { + return &errMountConfig{mnt, errMissingField("Source")} + } + // Don't error out just because the propagation mode is not supported on the platform + if opts := mnt.BindOptions; opts != nil { + if len(opts.Propagation) > 0 && len(propagationModes) > 0 { + if _, ok := propagationModes[opts.Propagation]; !ok { + return &errMountConfig{mnt, fmt.Errorf("invalid propagation mode: %s", opts.Propagation)} + } + } + } + if mnt.VolumeOptions != nil { + return &errMountConfig{mnt, errExtraField("VolumeOptions")} + } + + if err := validateAbsolute(mnt.Source); err != nil { + return &errMountConfig{mnt, err} + } + + // Do not allow binding to non-existent path + if !opts.skipBindSourceCheck { + fi, err := os.Stat(mnt.Source) + if err != nil { + if !os.IsNotExist(err) { + return &errMountConfig{mnt, err} + } + return &errMountConfig{mnt, errBindNotExist} + } + if err := validateStat(fi); err != nil { + return &errMountConfig{mnt, err} + } + } + case mount.TypeVolume: + if mnt.BindOptions != nil { + return &errMountConfig{mnt, errExtraField("BindOptions")} + } + + if len(mnt.Source) == 0 && mnt.ReadOnly { + return &errMountConfig{mnt, fmt.Errorf("must not set ReadOnly mode when using anonymous volumes")} + } + + if len(mnt.Source) != 0 { + if valid, err := IsVolumeNameValid(mnt.Source); !valid { + if err == nil { + err = errors.New("invalid volume name") + } + return &errMountConfig{mnt, err} + } + } + case mount.TypeTmpfs: + if len(mnt.Source) != 0 { + return &errMountConfig{mnt, errExtraField("Source")} + } + if err := ValidateTmpfsMountDestination(mnt.Target); err != nil { + return &errMountConfig{mnt, err} + } + if _, err := ConvertTmpfsOptions(mnt.TmpfsOptions, mnt.ReadOnly); err != nil { + return &errMountConfig{mnt, err} + } + default: + return &errMountConfig{mnt, errors.New("mount type unknown")} + } + return nil +} + +type errMountConfig struct { + mount *mount.Mount + err error +} + +func (e *errMountConfig) Error() string { + return fmt.Sprintf("invalid mount config for type %q: %v", e.mount.Type, e.err.Error()) +} + +func errExtraField(name string) error { + return fmt.Errorf("field %s must not be specified", name) +} +func errMissingField(name string) error { + return fmt.Errorf("field %s must not be empty", name) +} + +func validateAbsolute(p string) error { + p = convertSlash(p) + if filepath.IsAbs(p) { + return nil + } + return fmt.Errorf("invalid mount path: '%s' mount path must be absolute", p) +} + +// ValidateTmpfsMountDestination validates the destination of tmpfs mount. +// Currently, we have only two obvious rule for validation: +// - path must not be "/" +// - path must be absolute +// We should add more rules carefully (#30166) +func ValidateTmpfsMountDestination(dest string) error { + if err := validateNotRoot(dest); err != nil { + return err + } + return validateAbsolute(dest) +} diff --git a/vendor/github.com/moby/moby/volume/validate_test.go b/vendor/github.com/moby/moby/volume/validate_test.go new file mode 100644 index 000000000..8732500fc --- /dev/null +++ b/vendor/github.com/moby/moby/volume/validate_test.go @@ -0,0 +1,43 @@ +package volume + +import ( + "errors" + "io/ioutil" + "os" + "strings" + "testing" + + "github.com/docker/docker/api/types/mount" +) + +func TestValidateMount(t *testing.T) { + testDir, err := ioutil.TempDir("", "test-validate-mount") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(testDir) + + cases := []struct { + input mount.Mount + expected error + }{ + {mount.Mount{Type: mount.TypeVolume}, errMissingField("Target")}, + {mount.Mount{Type: mount.TypeVolume, Target: testDestinationPath, Source: "hello"}, nil}, + {mount.Mount{Type: mount.TypeVolume, Target: testDestinationPath}, nil}, + {mount.Mount{Type: mount.TypeBind}, errMissingField("Target")}, + {mount.Mount{Type: mount.TypeBind, Target: testDestinationPath}, errMissingField("Source")}, + {mount.Mount{Type: mount.TypeBind, Target: testDestinationPath, Source: testSourcePath, VolumeOptions: &mount.VolumeOptions{}}, errExtraField("VolumeOptions")}, + {mount.Mount{Type: mount.TypeBind, Source: testSourcePath, Target: testDestinationPath}, errBindNotExist}, + {mount.Mount{Type: mount.TypeBind, Source: testDir, Target: testDestinationPath}, nil}, + {mount.Mount{Type: "invalid", Target: testDestinationPath}, errors.New("mount type unknown")}, + } + for i, x := range cases { + err := validateMountConfig(&x.input) + if err == nil && x.expected == nil { + continue + } + if (err == nil && x.expected != nil) || (x.expected == nil && err != nil) || !strings.Contains(err.Error(), x.expected.Error()) { + t.Fatalf("expected %q, got %q, case: %d", x.expected, err, i) + } + } +} diff --git a/vendor/github.com/moby/moby/volume/validate_test_unix.go b/vendor/github.com/moby/moby/volume/validate_test_unix.go new file mode 100644 index 000000000..dd1de2f64 --- /dev/null +++ b/vendor/github.com/moby/moby/volume/validate_test_unix.go @@ -0,0 +1,8 @@ +// +build !windows + +package volume + +var ( + testDestinationPath = "/foo" + testSourcePath = "/foo" +) diff --git a/vendor/github.com/moby/moby/volume/validate_test_windows.go b/vendor/github.com/moby/moby/volume/validate_test_windows.go new file mode 100644 index 000000000..d5f86ac85 --- /dev/null +++ b/vendor/github.com/moby/moby/volume/validate_test_windows.go @@ -0,0 +1,6 @@ +package volume + +var ( + testDestinationPath = `c:\foo` + testSourcePath = `c:\foo` +) diff --git a/vendor/github.com/moby/moby/volume/volume.go b/vendor/github.com/moby/moby/volume/volume.go new file mode 100644 index 000000000..8598d4cb8 --- /dev/null +++ b/vendor/github.com/moby/moby/volume/volume.go @@ -0,0 +1,374 @@ +package volume + +import ( + "fmt" + "os" + "path/filepath" + "strings" + "syscall" + "time" + + mounttypes "github.com/docker/docker/api/types/mount" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/stringid" + "github.com/opencontainers/selinux/go-selinux/label" + "github.com/pkg/errors" +) + +// DefaultDriverName is the driver name used for the driver +// implemented in the local package. +const DefaultDriverName = "local" + +// Scopes define if a volume has is cluster-wide (global) or local only. +// Scopes are returned by the volume driver when it is queried for capabilities and then set on a volume +const ( + LocalScope = "local" + GlobalScope = "global" +) + +// Driver is for creating and removing volumes. +type Driver interface { + // Name returns the name of the volume driver. + Name() string + // Create makes a new volume with the given name. + Create(name string, opts map[string]string) (Volume, error) + // Remove deletes the volume. + Remove(vol Volume) (err error) + // List lists all the volumes the driver has + List() ([]Volume, error) + // Get retrieves the volume with the requested name + Get(name string) (Volume, error) + // Scope returns the scope of the driver (e.g. `global` or `local`). + // Scope determines how the driver is handled at a cluster level + Scope() string +} + +// Capability defines a set of capabilities that a driver is able to handle. +type Capability struct { + // Scope is the scope of the driver, `global` or `local` + // A `global` scope indicates that the driver manages volumes across the cluster + // A `local` scope indicates that the driver only manages volumes resources local to the host + // Scope is declared by the driver + Scope string +} + +// Volume is a place to store data. It is backed by a specific driver, and can be mounted. +type Volume interface { + // Name returns the name of the volume + Name() string + // DriverName returns the name of the driver which owns this volume. + DriverName() string + // Path returns the absolute path to the volume. + Path() string + // Mount mounts the volume and returns the absolute path to + // where it can be consumed. + Mount(id string) (string, error) + // Unmount unmounts the volume when it is no longer in use. + Unmount(id string) error + // CreatedAt returns Volume Creation time + CreatedAt() (time.Time, error) + // Status returns low-level status information about a volume + Status() map[string]interface{} +} + +// DetailedVolume wraps a Volume with user-defined labels, options, and cluster scope (e.g., `local` or `global`) +type DetailedVolume interface { + Labels() map[string]string + Options() map[string]string + Scope() string + Volume +} + +// MountPoint is the intersection point between a volume and a container. It +// specifies which volume is to be used and where inside a container it should +// be mounted. +type MountPoint struct { + // Source is the source path of the mount. + // E.g. `mount --bind /foo /bar`, `/foo` is the `Source`. + Source string + // Destination is the path relative to the container root (`/`) to the mount point + // It is where the `Source` is mounted to + Destination string + // RW is set to true when the mountpoint should be mounted as read-write + RW bool + // Name is the name reference to the underlying data defined by `Source` + // e.g., the volume name + Name string + // Driver is the volume driver used to create the volume (if it is a volume) + Driver string + // Type of mount to use, see `Type` definitions in github.com/docker/docker/api/types/mount + Type mounttypes.Type `json:",omitempty"` + // Volume is the volume providing data to this mountpoint. + // This is nil unless `Type` is set to `TypeVolume` + Volume Volume `json:"-"` + + // Mode is the comma separated list of options supplied by the user when creating + // the bind/volume mount. + // Note Mode is not used on Windows + Mode string `json:"Relabel,omitempty"` // Originally field was `Relabel`" + + // Propagation describes how the mounts are propagated from the host into the + // mount point, and vice-versa. + // See https://www.kernel.org/doc/Documentation/filesystems/sharedsubtree.txt + // Note Propagation is not used on Windows + Propagation mounttypes.Propagation `json:",omitempty"` // Mount propagation string + + // Specifies if data should be copied from the container before the first mount + // Use a pointer here so we can tell if the user set this value explicitly + // This allows us to error out when the user explicitly enabled copy but we can't copy due to the volume being populated + CopyData bool `json:"-"` + // ID is the opaque ID used to pass to the volume driver. + // This should be set by calls to `Mount` and unset by calls to `Unmount` + ID string `json:",omitempty"` + + // Sepc is a copy of the API request that created this mount. + Spec mounttypes.Mount + + // Track usage of this mountpoint + // Specifically needed for containers which are running and calls to `docker cp` + // because both these actions require mounting the volumes. + active int +} + +// Cleanup frees resources used by the mountpoint +func (m *MountPoint) Cleanup() error { + if m.Volume == nil || m.ID == "" { + return nil + } + + if err := m.Volume.Unmount(m.ID); err != nil { + return errors.Wrapf(err, "error unmounting volume %s", m.Volume.Name()) + } + + m.active-- + if m.active == 0 { + m.ID = "" + } + return nil +} + +// Setup sets up a mount point by either mounting the volume if it is +// configured, or creating the source directory if supplied. +// The, optional, checkFun parameter allows doing additional checking +// before creating the source directory on the host. +func (m *MountPoint) Setup(mountLabel string, rootIDs idtools.IDPair, checkFun func(m *MountPoint) error) (path string, err error) { + defer func() { + if err != nil || !label.RelabelNeeded(m.Mode) { + return + } + + err = label.Relabel(m.Source, mountLabel, label.IsShared(m.Mode)) + if err == syscall.ENOTSUP { + err = nil + } + if err != nil { + path = "" + err = errors.Wrapf(err, "error setting label on mount source '%s'", m.Source) + } + }() + + if m.Volume != nil { + id := m.ID + if id == "" { + id = stringid.GenerateNonCryptoID() + } + path, err := m.Volume.Mount(id) + if err != nil { + return "", errors.Wrapf(err, "error while mounting volume '%s'", m.Source) + } + + m.ID = id + m.active++ + return path, nil + } + + if len(m.Source) == 0 { + return "", fmt.Errorf("Unable to setup mount point, neither source nor volume defined") + } + + // system.MkdirAll() produces an error if m.Source exists and is a file (not a directory), + if m.Type == mounttypes.TypeBind { + // Before creating the source directory on the host, invoke checkFun if it's not nil. One of + // the use case is to forbid creating the daemon socket as a directory if the daemon is in + // the process of shutting down. + if checkFun != nil { + if err := checkFun(m); err != nil { + return "", err + } + } + // idtools.MkdirAllNewAs() produces an error if m.Source exists and is a file (not a directory) + // also, makes sure that if the directory is created, the correct remapped rootUID/rootGID will own it + if err := idtools.MkdirAllAndChownNew(m.Source, 0755, rootIDs); err != nil { + if perr, ok := err.(*os.PathError); ok { + if perr.Err != syscall.ENOTDIR { + return "", errors.Wrapf(err, "error while creating mount source path '%s'", m.Source) + } + } + } + } + return m.Source, nil +} + +// Path returns the path of a volume in a mount point. +func (m *MountPoint) Path() string { + if m.Volume != nil { + return m.Volume.Path() + } + return m.Source +} + +// ParseVolumesFrom ensures that the supplied volumes-from is valid. +func ParseVolumesFrom(spec string) (string, string, error) { + if len(spec) == 0 { + return "", "", fmt.Errorf("volumes-from specification cannot be an empty string") + } + + specParts := strings.SplitN(spec, ":", 2) + id := specParts[0] + mode := "rw" + + if len(specParts) == 2 { + mode = specParts[1] + if !ValidMountMode(mode) { + return "", "", errInvalidMode(mode) + } + // For now don't allow propagation properties while importing + // volumes from data container. These volumes will inherit + // the same propagation property as of the original volume + // in data container. This probably can be relaxed in future. + if HasPropagation(mode) { + return "", "", errInvalidMode(mode) + } + // Do not allow copy modes on volumes-from + if _, isSet := getCopyMode(mode); isSet { + return "", "", errInvalidMode(mode) + } + } + return id, mode, nil +} + +// ParseMountRaw parses a raw volume spec (e.g. `-v /foo:/bar:shared`) into a +// structured spec. Once the raw spec is parsed it relies on `ParseMountSpec` to +// validate the spec and create a MountPoint +func ParseMountRaw(raw, volumeDriver string) (*MountPoint, error) { + arr, err := splitRawSpec(convertSlash(raw)) + if err != nil { + return nil, err + } + + var spec mounttypes.Mount + var mode string + switch len(arr) { + case 1: + // Just a destination path in the container + spec.Target = arr[0] + case 2: + if ValidMountMode(arr[1]) { + // Destination + Mode is not a valid volume - volumes + // cannot include a mode. e.g. /foo:rw + return nil, errInvalidSpec(raw) + } + // Host Source Path or Name + Destination + spec.Source = arr[0] + spec.Target = arr[1] + case 3: + // HostSourcePath+DestinationPath+Mode + spec.Source = arr[0] + spec.Target = arr[1] + mode = arr[2] + default: + return nil, errInvalidSpec(raw) + } + + if !ValidMountMode(mode) { + return nil, errInvalidMode(mode) + } + + if filepath.IsAbs(spec.Source) { + spec.Type = mounttypes.TypeBind + } else { + spec.Type = mounttypes.TypeVolume + } + + spec.ReadOnly = !ReadWrite(mode) + + // cannot assume that if a volume driver is passed in that we should set it + if volumeDriver != "" && spec.Type == mounttypes.TypeVolume { + spec.VolumeOptions = &mounttypes.VolumeOptions{ + DriverConfig: &mounttypes.Driver{Name: volumeDriver}, + } + } + + if copyData, isSet := getCopyMode(mode); isSet { + if spec.VolumeOptions == nil { + spec.VolumeOptions = &mounttypes.VolumeOptions{} + } + spec.VolumeOptions.NoCopy = !copyData + } + if HasPropagation(mode) { + spec.BindOptions = &mounttypes.BindOptions{ + Propagation: GetPropagation(mode), + } + } + + mp, err := ParseMountSpec(spec, platformRawValidationOpts...) + if mp != nil { + mp.Mode = mode + } + if err != nil { + err = fmt.Errorf("%v: %v", errInvalidSpec(raw), err) + } + return mp, err +} + +// ParseMountSpec reads a mount config, validates it, and configures a mountpoint from it. +func ParseMountSpec(cfg mounttypes.Mount, options ...func(*validateOpts)) (*MountPoint, error) { + if err := validateMountConfig(&cfg, options...); err != nil { + return nil, err + } + mp := &MountPoint{ + RW: !cfg.ReadOnly, + Destination: clean(convertSlash(cfg.Target)), + Type: cfg.Type, + Spec: cfg, + } + + switch cfg.Type { + case mounttypes.TypeVolume: + if cfg.Source == "" { + mp.Name = stringid.GenerateNonCryptoID() + } else { + mp.Name = cfg.Source + } + mp.CopyData = DefaultCopyMode + + if cfg.VolumeOptions != nil { + if cfg.VolumeOptions.DriverConfig != nil { + mp.Driver = cfg.VolumeOptions.DriverConfig.Name + } + if cfg.VolumeOptions.NoCopy { + mp.CopyData = false + } + } + case mounttypes.TypeBind: + mp.Source = clean(convertSlash(cfg.Source)) + if cfg.BindOptions != nil && len(cfg.BindOptions.Propagation) > 0 { + mp.Propagation = cfg.BindOptions.Propagation + } else { + // If user did not specify a propagation mode, get + // default propagation mode. + mp.Propagation = DefaultPropagationMode + } + case mounttypes.TypeTmpfs: + // NOP + } + return mp, nil +} + +func errInvalidMode(mode string) error { + return fmt.Errorf("invalid mode: %v", mode) +} + +func errInvalidSpec(spec string) error { + return fmt.Errorf("invalid volume specification: '%s'", spec) +} diff --git a/vendor/github.com/moby/moby/volume/volume_copy.go b/vendor/github.com/moby/moby/volume/volume_copy.go new file mode 100644 index 000000000..77f06a0d1 --- /dev/null +++ b/vendor/github.com/moby/moby/volume/volume_copy.go @@ -0,0 +1,23 @@ +package volume + +import "strings" + +// {=isEnabled} +var copyModes = map[string]bool{ + "nocopy": false, +} + +func copyModeExists(mode string) bool { + _, exists := copyModes[mode] + return exists +} + +// GetCopyMode gets the copy mode from the mode string for mounts +func getCopyMode(mode string) (bool, bool) { + for _, o := range strings.Split(mode, ",") { + if isEnabled, exists := copyModes[o]; exists { + return isEnabled, true + } + } + return DefaultCopyMode, false +} diff --git a/vendor/github.com/moby/moby/volume/volume_copy_unix.go b/vendor/github.com/moby/moby/volume/volume_copy_unix.go new file mode 100644 index 000000000..ad66e1763 --- /dev/null +++ b/vendor/github.com/moby/moby/volume/volume_copy_unix.go @@ -0,0 +1,8 @@ +// +build !windows + +package volume + +const ( + // DefaultCopyMode is the copy mode used by default for normal/named volumes + DefaultCopyMode = true +) diff --git a/vendor/github.com/moby/moby/volume/volume_copy_windows.go b/vendor/github.com/moby/moby/volume/volume_copy_windows.go new file mode 100644 index 000000000..798638c87 --- /dev/null +++ b/vendor/github.com/moby/moby/volume/volume_copy_windows.go @@ -0,0 +1,6 @@ +package volume + +const ( + // DefaultCopyMode is the copy mode used by default for normal/named volumes + DefaultCopyMode = false +) diff --git a/vendor/github.com/moby/moby/volume/volume_linux.go b/vendor/github.com/moby/moby/volume/volume_linux.go new file mode 100644 index 000000000..fdf7b63e4 --- /dev/null +++ b/vendor/github.com/moby/moby/volume/volume_linux.go @@ -0,0 +1,56 @@ +// +build linux + +package volume + +import ( + "fmt" + "strings" + + mounttypes "github.com/docker/docker/api/types/mount" +) + +// ConvertTmpfsOptions converts *mounttypes.TmpfsOptions to the raw option string +// for mount(2). +func ConvertTmpfsOptions(opt *mounttypes.TmpfsOptions, readOnly bool) (string, error) { + var rawOpts []string + if readOnly { + rawOpts = append(rawOpts, "ro") + } + + if opt != nil && opt.Mode != 0 { + rawOpts = append(rawOpts, fmt.Sprintf("mode=%o", opt.Mode)) + } + + if opt != nil && opt.SizeBytes != 0 { + // calculate suffix here, making this linux specific, but that is + // okay, since API is that way anyways. + + // we do this by finding the suffix that divides evenly into the + // value, returning the value itself, with no suffix, if it fails. + // + // For the most part, we don't enforce any semantic to this values. + // The operating system will usually align this and enforce minimum + // and maximums. + var ( + size = opt.SizeBytes + suffix string + ) + for _, r := range []struct { + suffix string + divisor int64 + }{ + {"g", 1 << 30}, + {"m", 1 << 20}, + {"k", 1 << 10}, + } { + if size%r.divisor == 0 { + size = size / r.divisor + suffix = r.suffix + break + } + } + + rawOpts = append(rawOpts, fmt.Sprintf("size=%d%s", size, suffix)) + } + return strings.Join(rawOpts, ","), nil +} diff --git a/vendor/github.com/moby/moby/volume/volume_linux_test.go b/vendor/github.com/moby/moby/volume/volume_linux_test.go new file mode 100644 index 000000000..40ce5525a --- /dev/null +++ b/vendor/github.com/moby/moby/volume/volume_linux_test.go @@ -0,0 +1,51 @@ +// +build linux + +package volume + +import ( + "strings" + "testing" + + mounttypes "github.com/docker/docker/api/types/mount" +) + +func TestConvertTmpfsOptions(t *testing.T) { + type testCase struct { + opt mounttypes.TmpfsOptions + readOnly bool + expectedSubstrings []string + unexpectedSubstrings []string + } + cases := []testCase{ + { + opt: mounttypes.TmpfsOptions{SizeBytes: 1024 * 1024, Mode: 0700}, + readOnly: false, + expectedSubstrings: []string{"size=1m", "mode=700"}, + unexpectedSubstrings: []string{"ro"}, + }, + { + opt: mounttypes.TmpfsOptions{}, + readOnly: true, + expectedSubstrings: []string{"ro"}, + unexpectedSubstrings: []string{}, + }, + } + for _, c := range cases { + data, err := ConvertTmpfsOptions(&c.opt, c.readOnly) + if err != nil { + t.Fatalf("could not convert %+v (readOnly: %v) to string: %v", + c.opt, c.readOnly, err) + } + t.Logf("data=%q", data) + for _, s := range c.expectedSubstrings { + if !strings.Contains(data, s) { + t.Fatalf("expected substring: %s, got %v (case=%+v)", s, data, c) + } + } + for _, s := range c.unexpectedSubstrings { + if strings.Contains(data, s) { + t.Fatalf("unexpected substring: %s, got %v (case=%+v)", s, data, c) + } + } + } +} diff --git a/vendor/github.com/moby/moby/volume/volume_propagation_linux.go b/vendor/github.com/moby/moby/volume/volume_propagation_linux.go new file mode 100644 index 000000000..1de57ab52 --- /dev/null +++ b/vendor/github.com/moby/moby/volume/volume_propagation_linux.go @@ -0,0 +1,47 @@ +// +build linux + +package volume + +import ( + "strings" + + mounttypes "github.com/docker/docker/api/types/mount" +) + +// DefaultPropagationMode defines what propagation mode should be used by +// default if user has not specified one explicitly. +// propagation modes +const DefaultPropagationMode = mounttypes.PropagationRPrivate + +var propagationModes = map[mounttypes.Propagation]bool{ + mounttypes.PropagationPrivate: true, + mounttypes.PropagationRPrivate: true, + mounttypes.PropagationSlave: true, + mounttypes.PropagationRSlave: true, + mounttypes.PropagationShared: true, + mounttypes.PropagationRShared: true, +} + +// GetPropagation extracts and returns the mount propagation mode. If there +// are no specifications, then by default it is "private". +func GetPropagation(mode string) mounttypes.Propagation { + for _, o := range strings.Split(mode, ",") { + prop := mounttypes.Propagation(o) + if propagationModes[prop] { + return prop + } + } + return DefaultPropagationMode +} + +// HasPropagation checks if there is a valid propagation mode present in +// passed string. Returns true if a valid propagation mode specifier is +// present, false otherwise. +func HasPropagation(mode string) bool { + for _, o := range strings.Split(mode, ",") { + if propagationModes[mounttypes.Propagation(o)] { + return true + } + } + return false +} diff --git a/vendor/github.com/moby/moby/volume/volume_propagation_linux_test.go b/vendor/github.com/moby/moby/volume/volume_propagation_linux_test.go new file mode 100644 index 000000000..46d026506 --- /dev/null +++ b/vendor/github.com/moby/moby/volume/volume_propagation_linux_test.go @@ -0,0 +1,65 @@ +// +build linux + +package volume + +import ( + "strings" + "testing" +) + +func TestParseMountRawPropagation(t *testing.T) { + var ( + valid []string + invalid map[string]string + ) + + valid = []string{ + "/hostPath:/containerPath:shared", + "/hostPath:/containerPath:rshared", + "/hostPath:/containerPath:slave", + "/hostPath:/containerPath:rslave", + "/hostPath:/containerPath:private", + "/hostPath:/containerPath:rprivate", + "/hostPath:/containerPath:ro,shared", + "/hostPath:/containerPath:ro,slave", + "/hostPath:/containerPath:ro,private", + "/hostPath:/containerPath:ro,z,shared", + "/hostPath:/containerPath:ro,Z,slave", + "/hostPath:/containerPath:Z,ro,slave", + "/hostPath:/containerPath:slave,Z,ro", + "/hostPath:/containerPath:Z,slave,ro", + "/hostPath:/containerPath:slave,ro,Z", + "/hostPath:/containerPath:rslave,ro,Z", + "/hostPath:/containerPath:ro,rshared,Z", + "/hostPath:/containerPath:ro,Z,rprivate", + } + invalid = map[string]string{ + "/path:/path:ro,rshared,rslave": `invalid mode`, + "/path:/path:ro,z,rshared,rslave": `invalid mode`, + "/path:shared": "invalid volume specification", + "/path:slave": "invalid volume specification", + "/path:private": "invalid volume specification", + "name:/absolute-path:shared": "invalid volume specification", + "name:/absolute-path:rshared": "invalid volume specification", + "name:/absolute-path:slave": "invalid volume specification", + "name:/absolute-path:rslave": "invalid volume specification", + "name:/absolute-path:private": "invalid volume specification", + "name:/absolute-path:rprivate": "invalid volume specification", + } + + for _, path := range valid { + if _, err := ParseMountRaw(path, "local"); err != nil { + t.Fatalf("ParseMountRaw(`%q`) should succeed: error %q", path, err) + } + } + + for path, expectedError := range invalid { + if _, err := ParseMountRaw(path, "local"); err == nil { + t.Fatalf("ParseMountRaw(`%q`) should have failed validation. Err %v", path, err) + } else { + if !strings.Contains(err.Error(), expectedError) { + t.Fatalf("ParseMountRaw(`%q`) error should contain %q, got %v", path, expectedError, err.Error()) + } + } + } +} diff --git a/vendor/github.com/moby/moby/volume/volume_propagation_unsupported.go b/vendor/github.com/moby/moby/volume/volume_propagation_unsupported.go new file mode 100644 index 000000000..7311ffc2e --- /dev/null +++ b/vendor/github.com/moby/moby/volume/volume_propagation_unsupported.go @@ -0,0 +1,24 @@ +// +build !linux + +package volume + +import mounttypes "github.com/docker/docker/api/types/mount" + +// DefaultPropagationMode is used only in linux. In other cases it returns +// empty string. +const DefaultPropagationMode mounttypes.Propagation = "" + +// propagation modes not supported on this platform. +var propagationModes = map[mounttypes.Propagation]bool{} + +// GetPropagation is not supported. Return empty string. +func GetPropagation(mode string) mounttypes.Propagation { + return DefaultPropagationMode +} + +// HasPropagation checks if there is a valid propagation mode present in +// passed string. Returns true if a valid propagation mode specifier is +// present, false otherwise. +func HasPropagation(mode string) bool { + return false +} diff --git a/vendor/github.com/moby/moby/volume/volume_test.go b/vendor/github.com/moby/moby/volume/volume_test.go new file mode 100644 index 000000000..5c3e0e381 --- /dev/null +++ b/vendor/github.com/moby/moby/volume/volume_test.go @@ -0,0 +1,269 @@ +package volume + +import ( + "io/ioutil" + "os" + "runtime" + "strings" + "testing" + + "github.com/docker/docker/api/types/mount" +) + +func TestParseMountRaw(t *testing.T) { + var ( + valid []string + invalid map[string]string + ) + + if runtime.GOOS == "windows" { + valid = []string{ + `d:\`, + `d:`, + `d:\path`, + `d:\path with space`, + `c:\:d:\`, + `c:\windows\:d:`, + `c:\windows:d:\s p a c e`, + `c:\windows:d:\s p a c e:RW`, + `c:\program files:d:\s p a c e i n h o s t d i r`, + `0123456789name:d:`, + `MiXeDcAsEnAmE:d:`, + `name:D:`, + `name:D::rW`, + `name:D::RW`, + `name:D::RO`, + `c:/:d:/forward/slashes/are/good/too`, + `c:/:d:/including with/spaces:ro`, + `c:\Windows`, // With capital + `c:\Program Files (x86)`, // With capitals and brackets + } + invalid = map[string]string{ + ``: "invalid volume specification: ", + `.`: "invalid volume specification: ", + `..\`: "invalid volume specification: ", + `c:\:..\`: "invalid volume specification: ", + `c:\:d:\:xyzzy`: "invalid volume specification: ", + `c:`: "cannot be `c:`", + `c:\`: "cannot be `c:`", + `c:\notexist:d:`: `source path does not exist`, + `c:\windows\system32\ntdll.dll:d:`: `source path must be a directory`, + `name<:d:`: `invalid volume specification`, + `name>:d:`: `invalid volume specification`, + `name::d:`: `invalid volume specification`, + `name":d:`: `invalid volume specification`, + `name\:d:`: `invalid volume specification`, + `name*:d:`: `invalid volume specification`, + `name|:d:`: `invalid volume specification`, + `name?:d:`: `invalid volume specification`, + `name/:d:`: `invalid volume specification`, + `d:\pathandmode:rw`: `invalid volume specification`, + `d:\pathandmode:ro`: `invalid volume specification`, + `con:d:`: `cannot be a reserved word for Windows filenames`, + `PRN:d:`: `cannot be a reserved word for Windows filenames`, + `aUx:d:`: `cannot be a reserved word for Windows filenames`, + `nul:d:`: `cannot be a reserved word for Windows filenames`, + `com1:d:`: `cannot be a reserved word for Windows filenames`, + `com2:d:`: `cannot be a reserved word for Windows filenames`, + `com3:d:`: `cannot be a reserved word for Windows filenames`, + `com4:d:`: `cannot be a reserved word for Windows filenames`, + `com5:d:`: `cannot be a reserved word for Windows filenames`, + `com6:d:`: `cannot be a reserved word for Windows filenames`, + `com7:d:`: `cannot be a reserved word for Windows filenames`, + `com8:d:`: `cannot be a reserved word for Windows filenames`, + `com9:d:`: `cannot be a reserved word for Windows filenames`, + `lpt1:d:`: `cannot be a reserved word for Windows filenames`, + `lpt2:d:`: `cannot be a reserved word for Windows filenames`, + `lpt3:d:`: `cannot be a reserved word for Windows filenames`, + `lpt4:d:`: `cannot be a reserved word for Windows filenames`, + `lpt5:d:`: `cannot be a reserved word for Windows filenames`, + `lpt6:d:`: `cannot be a reserved word for Windows filenames`, + `lpt7:d:`: `cannot be a reserved word for Windows filenames`, + `lpt8:d:`: `cannot be a reserved word for Windows filenames`, + `lpt9:d:`: `cannot be a reserved word for Windows filenames`, + `c:\windows\system32\ntdll.dll`: `Only directories can be mapped on this platform`, + } + + } else { + valid = []string{ + "/home", + "/home:/home", + "/home:/something/else", + "/with space", + "/home:/with space", + "relative:/absolute-path", + "hostPath:/containerPath:ro", + "/hostPath:/containerPath:rw", + "/rw:/ro", + } + invalid = map[string]string{ + "": "invalid volume specification", + "./": "mount path must be absolute", + "../": "mount path must be absolute", + "/:../": "mount path must be absolute", + "/:path": "mount path must be absolute", + ":": "invalid volume specification", + "/tmp:": "invalid volume specification", + ":test": "invalid volume specification", + ":/test": "invalid volume specification", + "tmp:": "invalid volume specification", + ":test:": "invalid volume specification", + "::": "invalid volume specification", + ":::": "invalid volume specification", + "/tmp:::": "invalid volume specification", + ":/tmp::": "invalid volume specification", + "/path:rw": "invalid volume specification", + "/path:ro": "invalid volume specification", + "/rw:rw": "invalid volume specification", + "path:ro": "invalid volume specification", + "/path:/path:sw": `invalid mode`, + "/path:/path:rwz": `invalid mode`, + } + } + + for _, path := range valid { + if _, err := ParseMountRaw(path, "local"); err != nil { + t.Fatalf("ParseMountRaw(`%q`) should succeed: error %q", path, err) + } + } + + for path, expectedError := range invalid { + if mp, err := ParseMountRaw(path, "local"); err == nil { + t.Fatalf("ParseMountRaw(`%q`) should have failed validation. Err '%v' - MP: %v", path, err, mp) + } else { + if !strings.Contains(err.Error(), expectedError) { + t.Fatalf("ParseMountRaw(`%q`) error should contain %q, got %v", path, expectedError, err.Error()) + } + } + } +} + +// testParseMountRaw is a structure used by TestParseMountRawSplit for +// specifying test cases for the ParseMountRaw() function. +type testParseMountRaw struct { + bind string + driver string + expDest string + expSource string + expName string + expDriver string + expRW bool + fail bool +} + +func TestParseMountRawSplit(t *testing.T) { + var cases []testParseMountRaw + if runtime.GOOS == "windows" { + cases = []testParseMountRaw{ + {`c:\:d:`, "local", `d:`, `c:\`, ``, "", true, false}, + {`c:\:d:\`, "local", `d:\`, `c:\`, ``, "", true, false}, + {`c:\:d:\:ro`, "local", `d:\`, `c:\`, ``, "", false, false}, + {`c:\:d:\:rw`, "local", `d:\`, `c:\`, ``, "", true, false}, + {`c:\:d:\:foo`, "local", `d:\`, `c:\`, ``, "", false, true}, + {`name:d::rw`, "local", `d:`, ``, `name`, "local", true, false}, + {`name:d:`, "local", `d:`, ``, `name`, "local", true, false}, + {`name:d::ro`, "local", `d:`, ``, `name`, "local", false, false}, + {`name:c:`, "", ``, ``, ``, "", true, true}, + {`driver/name:c:`, "", ``, ``, ``, "", true, true}, + } + } else { + cases = []testParseMountRaw{ + {"/tmp:/tmp1", "", "/tmp1", "/tmp", "", "", true, false}, + {"/tmp:/tmp2:ro", "", "/tmp2", "/tmp", "", "", false, false}, + {"/tmp:/tmp3:rw", "", "/tmp3", "/tmp", "", "", true, false}, + {"/tmp:/tmp4:foo", "", "", "", "", "", false, true}, + {"name:/named1", "", "/named1", "", "name", "", true, false}, + {"name:/named2", "external", "/named2", "", "name", "external", true, false}, + {"name:/named3:ro", "local", "/named3", "", "name", "local", false, false}, + {"local/name:/tmp:rw", "", "/tmp", "", "local/name", "", true, false}, + {"/tmp:tmp", "", "", "", "", "", true, true}, + } + } + + for i, c := range cases { + t.Logf("case %d", i) + m, err := ParseMountRaw(c.bind, c.driver) + if c.fail { + if err == nil { + t.Fatalf("Expected error, was nil, for spec %s\n", c.bind) + } + continue + } + + if m == nil || err != nil { + t.Fatalf("ParseMountRaw failed for spec '%s', driver '%s', error '%v'", c.bind, c.driver, err.Error()) + continue + } + + if m.Destination != c.expDest { + t.Fatalf("Expected destination '%s, was %s', for spec '%s'", c.expDest, m.Destination, c.bind) + } + + if m.Source != c.expSource { + t.Fatalf("Expected source '%s', was '%s', for spec '%s'", c.expSource, m.Source, c.bind) + } + + if m.Name != c.expName { + t.Fatalf("Expected name '%s', was '%s' for spec '%s'", c.expName, m.Name, c.bind) + } + + if m.Driver != c.expDriver { + t.Fatalf("Expected driver '%s', was '%s', for spec '%s'", c.expDriver, m.Driver, c.bind) + } + + if m.RW != c.expRW { + t.Fatalf("Expected RW '%v', was '%v' for spec '%s'", c.expRW, m.RW, c.bind) + } + } +} + +func TestParseMountSpec(t *testing.T) { + type c struct { + input mount.Mount + expected MountPoint + } + testDir, err := ioutil.TempDir("", "test-mount-config") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(testDir) + + cases := []c{ + {mount.Mount{Type: mount.TypeBind, Source: testDir, Target: testDestinationPath, ReadOnly: true}, MountPoint{Type: mount.TypeBind, Source: testDir, Destination: testDestinationPath, Propagation: DefaultPropagationMode}}, + {mount.Mount{Type: mount.TypeBind, Source: testDir, Target: testDestinationPath}, MountPoint{Type: mount.TypeBind, Source: testDir, Destination: testDestinationPath, RW: true, Propagation: DefaultPropagationMode}}, + {mount.Mount{Type: mount.TypeBind, Source: testDir + string(os.PathSeparator), Target: testDestinationPath, ReadOnly: true}, MountPoint{Type: mount.TypeBind, Source: testDir, Destination: testDestinationPath, Propagation: DefaultPropagationMode}}, + {mount.Mount{Type: mount.TypeBind, Source: testDir, Target: testDestinationPath + string(os.PathSeparator), ReadOnly: true}, MountPoint{Type: mount.TypeBind, Source: testDir, Destination: testDestinationPath, Propagation: DefaultPropagationMode}}, + {mount.Mount{Type: mount.TypeVolume, Target: testDestinationPath}, MountPoint{Type: mount.TypeVolume, Destination: testDestinationPath, RW: true, CopyData: DefaultCopyMode}}, + {mount.Mount{Type: mount.TypeVolume, Target: testDestinationPath + string(os.PathSeparator)}, MountPoint{Type: mount.TypeVolume, Destination: testDestinationPath, RW: true, CopyData: DefaultCopyMode}}, + } + + for i, c := range cases { + t.Logf("case %d", i) + mp, err := ParseMountSpec(c.input) + if err != nil { + t.Fatal(err) + } + + if c.expected.Type != mp.Type { + t.Fatalf("Expected mount types to match. Expected: '%s', Actual: '%s'", c.expected.Type, mp.Type) + } + if c.expected.Destination != mp.Destination { + t.Fatalf("Expected mount destination to match. Expected: '%s', Actual: '%s'", c.expected.Destination, mp.Destination) + } + if c.expected.Source != mp.Source { + t.Fatalf("Expected mount source to match. Expected: '%s', Actual: '%s'", c.expected.Source, mp.Source) + } + if c.expected.RW != mp.RW { + t.Fatalf("Expected mount writable to match. Expected: '%v', Actual: '%v'", c.expected.RW, mp.RW) + } + if c.expected.Propagation != mp.Propagation { + t.Fatalf("Expected mount propagation to match. Expected: '%v', Actual: '%s'", c.expected.Propagation, mp.Propagation) + } + if c.expected.Driver != mp.Driver { + t.Fatalf("Expected mount driver to match. Expected: '%v', Actual: '%s'", c.expected.Driver, mp.Driver) + } + if c.expected.CopyData != mp.CopyData { + t.Fatalf("Expected mount copy data to match. Expected: '%v', Actual: '%v'", c.expected.CopyData, mp.CopyData) + } + } +} diff --git a/vendor/github.com/moby/moby/volume/volume_unix.go b/vendor/github.com/moby/moby/volume/volume_unix.go new file mode 100644 index 000000000..e35b70c03 --- /dev/null +++ b/vendor/github.com/moby/moby/volume/volume_unix.go @@ -0,0 +1,148 @@ +// +build linux freebsd darwin solaris + +package volume + +import ( + "fmt" + "os" + "path/filepath" + "strings" + + mounttypes "github.com/docker/docker/api/types/mount" +) + +var platformRawValidationOpts = []func(o *validateOpts){ + // need to make sure to not error out if the bind source does not exist on unix + // this is supported for historical reasons, the path will be automatically + // created later. + func(o *validateOpts) { o.skipBindSourceCheck = true }, +} + +// read-write modes +var rwModes = map[string]bool{ + "rw": true, + "ro": true, +} + +// label modes +var labelModes = map[string]bool{ + "Z": true, + "z": true, +} + +// consistency modes +var consistencyModes = map[mounttypes.Consistency]bool{ + mounttypes.ConsistencyFull: true, + mounttypes.ConsistencyCached: true, + mounttypes.ConsistencyDelegated: true, +} + +// BackwardsCompatible decides whether this mount point can be +// used in old versions of Docker or not. +// Only bind mounts and local volumes can be used in old versions of Docker. +func (m *MountPoint) BackwardsCompatible() bool { + return len(m.Source) > 0 || m.Driver == DefaultDriverName +} + +// HasResource checks whether the given absolute path for a container is in +// this mount point. If the relative path starts with `../` then the resource +// is outside of this mount point, but we can't simply check for this prefix +// because it misses `..` which is also outside of the mount, so check both. +func (m *MountPoint) HasResource(absolutePath string) bool { + relPath, err := filepath.Rel(m.Destination, absolutePath) + return err == nil && relPath != ".." && !strings.HasPrefix(relPath, fmt.Sprintf("..%c", filepath.Separator)) +} + +// IsVolumeNameValid checks a volume name in a platform specific manner. +func IsVolumeNameValid(name string) (bool, error) { + return true, nil +} + +// ValidMountMode will make sure the mount mode is valid. +// returns if it's a valid mount mode or not. +func ValidMountMode(mode string) bool { + if mode == "" { + return true + } + + rwModeCount := 0 + labelModeCount := 0 + propagationModeCount := 0 + copyModeCount := 0 + consistencyModeCount := 0 + + for _, o := range strings.Split(mode, ",") { + switch { + case rwModes[o]: + rwModeCount++ + case labelModes[o]: + labelModeCount++ + case propagationModes[mounttypes.Propagation(o)]: + propagationModeCount++ + case copyModeExists(o): + copyModeCount++ + case consistencyModes[mounttypes.Consistency(o)]: + consistencyModeCount++ + default: + return false + } + } + + // Only one string for each mode is allowed. + if rwModeCount > 1 || labelModeCount > 1 || propagationModeCount > 1 || copyModeCount > 1 || consistencyModeCount > 1 { + return false + } + return true +} + +// ReadWrite tells you if a mode string is a valid read-write mode or not. +// If there are no specifications w.r.t read write mode, then by default +// it returns true. +func ReadWrite(mode string) bool { + if !ValidMountMode(mode) { + return false + } + + for _, o := range strings.Split(mode, ",") { + if o == "ro" { + return false + } + } + return true +} + +func validateNotRoot(p string) error { + p = filepath.Clean(convertSlash(p)) + if p == "/" { + return fmt.Errorf("invalid specification: destination can't be '/'") + } + return nil +} + +func validateCopyMode(mode bool) error { + return nil +} + +func convertSlash(p string) string { + return filepath.ToSlash(p) +} + +func splitRawSpec(raw string) ([]string, error) { + if strings.Count(raw, ":") > 2 { + return nil, errInvalidSpec(raw) + } + + arr := strings.SplitN(raw, ":", 3) + if arr[0] == "" { + return nil, errInvalidSpec(raw) + } + return arr, nil +} + +func clean(p string) string { + return filepath.Clean(p) +} + +func validateStat(fi os.FileInfo) error { + return nil +} diff --git a/vendor/github.com/moby/moby/volume/volume_unsupported.go b/vendor/github.com/moby/moby/volume/volume_unsupported.go new file mode 100644 index 000000000..ff9d6afa2 --- /dev/null +++ b/vendor/github.com/moby/moby/volume/volume_unsupported.go @@ -0,0 +1,16 @@ +// +build !linux + +package volume + +import ( + "fmt" + "runtime" + + mounttypes "github.com/docker/docker/api/types/mount" +) + +// ConvertTmpfsOptions converts *mounttypes.TmpfsOptions to the raw option string +// for mount(2). +func ConvertTmpfsOptions(opt *mounttypes.TmpfsOptions, readOnly bool) (string, error) { + return "", fmt.Errorf("%s does not support tmpfs", runtime.GOOS) +} diff --git a/vendor/github.com/moby/moby/volume/volume_windows.go b/vendor/github.com/moby/moby/volume/volume_windows.go new file mode 100644 index 000000000..22f6fc7a1 --- /dev/null +++ b/vendor/github.com/moby/moby/volume/volume_windows.go @@ -0,0 +1,201 @@ +package volume + +import ( + "fmt" + "os" + "path/filepath" + "regexp" + "strings" +) + +// read-write modes +var rwModes = map[string]bool{ + "rw": true, +} + +// read-only modes +var roModes = map[string]bool{ + "ro": true, +} + +var platformRawValidationOpts = []func(*validateOpts){ + // filepath.IsAbs is weird on Windows: + // `c:` is not considered an absolute path + // `c:\` is considered an absolute path + // In any case, the regex matching below ensures absolute paths + // TODO: consider this a bug with filepath.IsAbs (?) + func(o *validateOpts) { o.skipAbsolutePathCheck = true }, +} + +const ( + // Spec should be in the format [source:]destination[:mode] + // + // Examples: c:\foo bar:d:rw + // c:\foo:d:\bar + // myname:d: + // d:\ + // + // Explanation of this regex! Thanks @thaJeztah on IRC and gist for help. See + // https://gist.github.com/thaJeztah/6185659e4978789fb2b2. A good place to + // test is https://regex-golang.appspot.com/assets/html/index.html + // + // Useful link for referencing named capturing groups: + // http://stackoverflow.com/questions/20750843/using-named-matches-from-go-regex + // + // There are three match groups: source, destination and mode. + // + + // RXHostDir is the first option of a source + RXHostDir = `[a-z]:\\(?:[^\\/:*?"<>|\r\n]+\\?)*` + // RXName is the second option of a source + RXName = `[^\\/:*?"<>|\r\n]+` + // RXReservedNames are reserved names not possible on Windows + RXReservedNames = `(con)|(prn)|(nul)|(aux)|(com[1-9])|(lpt[1-9])` + + // RXSource is the combined possibilities for a source + RXSource = `((?P((` + RXHostDir + `)|(` + RXName + `))):)?` + + // Source. Can be either a host directory, a name, or omitted: + // HostDir: + // - Essentially using the folder solution from + // https://www.safaribooksonline.com/library/view/regular-expressions-cookbook/9781449327453/ch08s18.html + // but adding case insensitivity. + // - Must be an absolute path such as c:\path + // - Can include spaces such as `c:\program files` + // - And then followed by a colon which is not in the capture group + // - And can be optional + // Name: + // - Must not contain invalid NTFS filename characters (https://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx) + // - And then followed by a colon which is not in the capture group + // - And can be optional + + // RXDestination is the regex expression for the mount destination + RXDestination = `(?P([a-z]):((?:\\[^\\/:*?"<>\r\n]+)*\\?))` + // Destination (aka container path): + // - Variation on hostdir but can be a drive followed by colon as well + // - If a path, must be absolute. Can include spaces + // - Drive cannot be c: (explicitly checked in code, not RegEx) + + // RXMode is the regex expression for the mode of the mount + // Mode (optional): + // - Hopefully self explanatory in comparison to above regex's. + // - Colon is not in the capture group + RXMode = `(:(?P(?i)ro|rw))?` +) + +// BackwardsCompatible decides whether this mount point can be +// used in old versions of Docker or not. +// Windows volumes are never backwards compatible. +func (m *MountPoint) BackwardsCompatible() bool { + return false +} + +func splitRawSpec(raw string) ([]string, error) { + specExp := regexp.MustCompile(`^` + RXSource + RXDestination + RXMode + `$`) + match := specExp.FindStringSubmatch(strings.ToLower(raw)) + + // Must have something back + if len(match) == 0 { + return nil, errInvalidSpec(raw) + } + + var split []string + matchgroups := make(map[string]string) + // Pull out the sub expressions from the named capture groups + for i, name := range specExp.SubexpNames() { + matchgroups[name] = strings.ToLower(match[i]) + } + if source, exists := matchgroups["source"]; exists { + if source != "" { + split = append(split, source) + } + } + if destination, exists := matchgroups["destination"]; exists { + if destination != "" { + split = append(split, destination) + } + } + if mode, exists := matchgroups["mode"]; exists { + if mode != "" { + split = append(split, mode) + } + } + // Fix #26329. If the destination appears to be a file, and the source is null, + // it may be because we've fallen through the possible naming regex and hit a + // situation where the user intention was to map a file into a container through + // a local volume, but this is not supported by the platform. + if matchgroups["source"] == "" && matchgroups["destination"] != "" { + validName, err := IsVolumeNameValid(matchgroups["destination"]) + if err != nil { + return nil, err + } + if !validName { + if fi, err := os.Stat(matchgroups["destination"]); err == nil { + if !fi.IsDir() { + return nil, fmt.Errorf("file '%s' cannot be mapped. Only directories can be mapped on this platform", matchgroups["destination"]) + } + } + } + } + return split, nil +} + +// IsVolumeNameValid checks a volume name in a platform specific manner. +func IsVolumeNameValid(name string) (bool, error) { + nameExp := regexp.MustCompile(`^` + RXName + `$`) + if !nameExp.MatchString(name) { + return false, nil + } + nameExp = regexp.MustCompile(`^` + RXReservedNames + `$`) + if nameExp.MatchString(name) { + return false, fmt.Errorf("volume name %q cannot be a reserved word for Windows filenames", name) + } + return true, nil +} + +// ValidMountMode will make sure the mount mode is valid. +// returns if it's a valid mount mode or not. +func ValidMountMode(mode string) bool { + if mode == "" { + return true + } + return roModes[strings.ToLower(mode)] || rwModes[strings.ToLower(mode)] +} + +// ReadWrite tells you if a mode string is a valid read-write mode or not. +func ReadWrite(mode string) bool { + return rwModes[strings.ToLower(mode)] || mode == "" +} + +func validateNotRoot(p string) error { + p = strings.ToLower(convertSlash(p)) + if p == "c:" || p == `c:\` { + return fmt.Errorf("destination path cannot be `c:` or `c:\\`: %v", p) + } + return nil +} + +func validateCopyMode(mode bool) error { + if mode { + return fmt.Errorf("Windows does not support copying image path content") + } + return nil +} + +func convertSlash(p string) string { + return filepath.FromSlash(p) +} + +func clean(p string) string { + if match, _ := regexp.MatchString("^[a-z]:$", p); match { + return p + } + return filepath.Clean(p) +} + +func validateStat(fi os.FileInfo) error { + if !fi.IsDir() { + return fmt.Errorf("source path must be a directory") + } + return nil +} diff --git a/vendor/github.com/onsi/gomega/.gitignore b/vendor/github.com/onsi/gomega/.gitignore new file mode 100644 index 000000000..720c13cba --- /dev/null +++ b/vendor/github.com/onsi/gomega/.gitignore @@ -0,0 +1,5 @@ +.DS_Store +*.test +. +.idea +gomega.iml diff --git a/vendor/github.com/onsi/gomega/.travis.yml b/vendor/github.com/onsi/gomega/.travis.yml new file mode 100644 index 000000000..61d0f41fa --- /dev/null +++ b/vendor/github.com/onsi/gomega/.travis.yml @@ -0,0 +1,12 @@ +language: go +go: + - 1.6 + - 1.7 + - 1.8 + +install: + - go get -v ./... + - go get github.com/onsi/ginkgo + - go install github.com/onsi/ginkgo/ginkgo + +script: $HOME/gopath/bin/ginkgo -r --randomizeAllSpecs --failOnPending --randomizeSuites --race diff --git a/vendor/github.com/onsi/gomega/CHANGELOG.md b/vendor/github.com/onsi/gomega/CHANGELOG.md new file mode 100644 index 000000000..a3e8ee444 --- /dev/null +++ b/vendor/github.com/onsi/gomega/CHANGELOG.md @@ -0,0 +1,74 @@ +## HEAD + +## 1.2.0 + +Improvements: + +- Added `BeSent` which attempts to send a value down a channel and fails if the attempt blocks. Can be paired with `Eventually` to safely send a value down a channel with a timeout. +- `Ω`, `Expect`, `Eventually`, and `Consistently` now immediately `panic` if there is no registered fail handler. This is always a mistake that can hide failing tests. +- `Receive()` no longer errors when passed a closed channel, it's perfectly fine to attempt to read from a closed channel so Ω(c).Should(Receive()) always fails and Ω(c).ShoudlNot(Receive()) always passes with a closed channel. +- Added `HavePrefix` and `HaveSuffix` matchers. +- `ghttp` can now handle concurrent requests. +- Added `Succeed` which allows one to write `Ω(MyFunction()).Should(Succeed())`. +- Improved `ghttp`'s behavior around failing assertions and panics: + - If a registered handler makes a failing assertion `ghttp` will return `500`. + - If a registered handler panics, `ghttp` will return `500` *and* fail the test. This is new behavior that may cause existing code to break. This code is almost certainly incorrect and creating a false positive. +- `ghttp` servers can take an `io.Writer`. `ghttp` will write a line to the writer when each request arrives. +- Added `WithTransform` matcher to allow munging input data before feeding into the relevant matcher +- Added boolean `And`, `Or`, and `Not` matchers to allow creating composite matchers +- Added `gbytes.TimeoutCloser`, `gbytes.TimeoutReader`, and `gbytes.TimeoutWriter` - these are convenience wrappers that timeout if the underlying Closer/Reader/Writer does not return within the alloted time. +- Added `gbytes.BufferReader` - this constructs a `gbytes.Buffer` that asynchronously reads the passed-in `io.Reader` into its buffer. + +Bug Fixes: +- gexec: `session.Wait` now uses `EventuallyWithOffset` to get the right line number in the failure. +- `ContainElement` no longer bails if a passed-in matcher errors. + +## 1.0 (8/2/2014) + +No changes. Dropping "beta" from the version number. + +## 1.0.0-beta (7/8/2014) +Breaking Changes: + +- Changed OmegaMatcher interface. Instead of having `Match` return failure messages, two new methods `FailureMessage` and `NegatedFailureMessage` are called instead. +- Moved and renamed OmegaFailHandler to types.GomegaFailHandler and OmegaMatcher to types.GomegaMatcher. Any references to OmegaMatcher in any custom matchers will need to be changed to point to types.GomegaMatcher + +New Test-Support Features: + +- `ghttp`: supports testing http clients + - Provides a flexible fake http server + - Provides a collection of chainable http handlers that perform assertions. +- `gbytes`: supports making ordered assertions against streams of data + - Provides a `gbytes.Buffer` + - Provides a `Say` matcher to perform ordered assertions against output data +- `gexec`: supports testing external processes + - Provides support for building Go binaries + - Wraps and starts `exec.Cmd` commands + - Makes it easy to assert against stdout and stderr + - Makes it easy to send signals and wait for processes to exit + - Provides an `Exit` matcher to assert against exit code. + +DSL Changes: + +- `Eventually` and `Consistently` can accept `time.Duration` interval and polling inputs. +- The default timeouts for `Eventually` and `Consistently` are now configurable. + +New Matchers: + +- `ConsistOf`: order-independent assertion against the elements of an array/slice or keys of a map. +- `BeTemporally`: like `BeNumerically` but for `time.Time` +- `HaveKeyWithValue`: asserts a map has a given key with the given value. + +Updated Matchers: + +- `Receive` matcher can take a matcher as an argument and passes only if the channel under test receives an objet that satisfies the passed-in matcher. +- Matchers that implement `MatchMayChangeInTheFuture(actual interface{}) bool` can inform `Eventually` and/or `Consistently` when a match has no chance of changing status in the future. For example, `Receive` returns `false` when a channel is closed. + +Misc: + +- Start using semantic versioning +- Start maintaining changelog + +Major refactor: + +- Pull out Gomega's internal to `internal` diff --git a/vendor/github.com/onsi/gomega/CONTRIBUTING.md b/vendor/github.com/onsi/gomega/CONTRIBUTING.md new file mode 100644 index 000000000..73d4020e6 --- /dev/null +++ b/vendor/github.com/onsi/gomega/CONTRIBUTING.md @@ -0,0 +1,11 @@ +# Contributing to Gomega + +Your contributions to Gomega are essential for its long-term maintenance and improvement. To make a contribution: + +- Please **open an issue first** - describe what problem you are trying to solve and give the community a forum for input and feedback ahead of investing time in writing code! +- Ensure adequate test coverage: + - Make sure to add appropriate unit tests + - Please run all tests locally (`ginkgo -r -p`) and make sure they go green before submitting the PR +- Update the documentation. In addition to standard `godoc` comments Gomega has extensive documentation on the `gh-pages` branch. If relevant, please submit a docs PR to that branch alongside your code PR. + +Thanks for supporting Gomega! \ No newline at end of file diff --git a/vendor/github.com/onsi/gomega/LICENSE b/vendor/github.com/onsi/gomega/LICENSE new file mode 100644 index 000000000..9415ee72c --- /dev/null +++ b/vendor/github.com/onsi/gomega/LICENSE @@ -0,0 +1,20 @@ +Copyright (c) 2013-2014 Onsi Fakhouri + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/onsi/gomega/README.md b/vendor/github.com/onsi/gomega/README.md new file mode 100644 index 000000000..159be3590 --- /dev/null +++ b/vendor/github.com/onsi/gomega/README.md @@ -0,0 +1,21 @@ +![Gomega: Ginkgo's Preferred Matcher Library](http://onsi.github.io/gomega/images/gomega.png) + +[![Build Status](https://travis-ci.org/onsi/gomega.svg)](https://travis-ci.org/onsi/gomega) + +Jump straight to the [docs](http://onsi.github.io/gomega/) to learn about Gomega, including a list of [all available matchers](http://onsi.github.io/gomega/#provided-matchers). + +If you have a question, comment, bug report, feature request, etc. please open a GitHub issue. + +## [Ginkgo](http://github.com/onsi/ginkgo): a BDD Testing Framework for Golang + +Learn more about Ginkgo [here](http://onsi.github.io/ginkgo/) + +## Community Matchers + +A collection of community matchers is available on the [wiki](https://github.com/onsi/gomega/wiki). + +## License + +Gomega is MIT-Licensed + +The `ConsistOf` matcher uses [goraph](https://github.com/amitkgupta/goraph) which is embedded in the source to simplify distribution. goraph has an MIT license. diff --git a/vendor/github.com/onsi/gomega/format/format.go b/vendor/github.com/onsi/gomega/format/format.go new file mode 100644 index 000000000..e206ee59a --- /dev/null +++ b/vendor/github.com/onsi/gomega/format/format.go @@ -0,0 +1,379 @@ +/* +Gomega's format package pretty-prints objects. It explores input objects recursively and generates formatted, indented output with type information. +*/ +package format + +import ( + "fmt" + "reflect" + "strconv" + "strings" + "time" +) + +// Use MaxDepth to set the maximum recursion depth when printing deeply nested objects +var MaxDepth = uint(10) + +/* +By default, all objects (even those that implement fmt.Stringer and fmt.GoStringer) are recursively inspected to generate output. + +Set UseStringerRepresentation = true to use GoString (for fmt.GoStringers) or String (for fmt.Stringer) instead. + +Note that GoString and String don't always have all the information you need to understand why a test failed! +*/ +var UseStringerRepresentation = false + +/* +Print the content of context objects. By default it will be suppressed. + +Set PrintContextObjects = true to enable printing of the context internals. +*/ +var PrintContextObjects = false + +// Ctx interface defined here to keep backwards compatability with go < 1.7 +// It matches the context.Context interface +type Ctx interface { + Deadline() (deadline time.Time, ok bool) + Done() <-chan struct{} + Err() error + Value(key interface{}) interface{} +} + +var contextType = reflect.TypeOf((*Ctx)(nil)).Elem() +var timeType = reflect.TypeOf(time.Time{}) + +//The default indentation string emitted by the format package +var Indent = " " + +var longFormThreshold = 20 + +/* +Generates a formatted matcher success/failure message of the form: + + Expected + + + + +If expected is omited, then the message looks like: + + Expected + + +*/ +func Message(actual interface{}, message string, expected ...interface{}) string { + if len(expected) == 0 { + return fmt.Sprintf("Expected\n%s\n%s", Object(actual, 1), message) + } + return fmt.Sprintf("Expected\n%s\n%s\n%s", Object(actual, 1), message, Object(expected[0], 1)) +} + +/* + +Generates a nicely formatted matcher success / failure message + +Much like Message(...), but it attempts to pretty print diffs in strings + +Expected + : "...aaaaabaaaaa..." +to equal | + : "...aaaaazaaaaa..." + +*/ + +func MessageWithDiff(actual, message, expected string) string { + if len(actual) >= truncateThreshold && len(expected) >= truncateThreshold { + diffPoint := findFirstMismatch(actual, expected) + formattedActual := truncateAndFormat(actual, diffPoint) + formattedExpected := truncateAndFormat(expected, diffPoint) + + spacesBeforeFormattedMismatch := findFirstMismatch(formattedActual, formattedExpected) + + tabLength := 4 + spaceFromMessageToActual := tabLength + len(": ") - len(message) + padding := strings.Repeat(" ", spaceFromMessageToActual+spacesBeforeFormattedMismatch) + "|" + return Message(formattedActual, message+padding, formattedExpected) + } + return Message(actual, message, expected) +} + +func truncateAndFormat(str string, index int) string { + leftPadding := `...` + rightPadding := `...` + + start := index - charactersAroundMismatchToInclude + if start < 0 { + start = 0 + leftPadding = "" + } + + // slice index must include the mis-matched character + lengthOfMismatchedCharacter := 1 + end := index + charactersAroundMismatchToInclude + lengthOfMismatchedCharacter + if end > len(str) { + end = len(str) + rightPadding = "" + + } + return fmt.Sprintf("\"%s\"", leftPadding+str[start:end]+rightPadding) +} + +func findFirstMismatch(a, b string) int { + aSlice := strings.Split(a, "") + bSlice := strings.Split(b, "") + + for index, str := range aSlice { + if index > len(b) - 1 { + return index + } + if str != bSlice[index] { + return index + } + } + + if len(b) > len(a) { + return len(a) + 1 + } + + return 0 +} + +const ( + truncateThreshold = 50 + charactersAroundMismatchToInclude = 5 +) + +/* +Pretty prints the passed in object at the passed in indentation level. + +Object recurses into deeply nested objects emitting pretty-printed representations of their components. + +Modify format.MaxDepth to control how deep the recursion is allowed to go +Set format.UseStringerRepresentation to true to return object.GoString() or object.String() when available instead of +recursing into the object. + +Set PrintContextObjects to true to print the content of objects implementing context.Context +*/ +func Object(object interface{}, indentation uint) string { + indent := strings.Repeat(Indent, int(indentation)) + value := reflect.ValueOf(object) + return fmt.Sprintf("%s<%s>: %s", indent, formatType(object), formatValue(value, indentation)) +} + +/* +IndentString takes a string and indents each line by the specified amount. +*/ +func IndentString(s string, indentation uint) string { + components := strings.Split(s, "\n") + result := "" + indent := strings.Repeat(Indent, int(indentation)) + for i, component := range components { + result += indent + component + if i < len(components)-1 { + result += "\n" + } + } + + return result +} + +func formatType(object interface{}) string { + t := reflect.TypeOf(object) + if t == nil { + return "nil" + } + switch t.Kind() { + case reflect.Chan: + v := reflect.ValueOf(object) + return fmt.Sprintf("%T | len:%d, cap:%d", object, v.Len(), v.Cap()) + case reflect.Ptr: + return fmt.Sprintf("%T | %p", object, object) + case reflect.Slice: + v := reflect.ValueOf(object) + return fmt.Sprintf("%T | len:%d, cap:%d", object, v.Len(), v.Cap()) + case reflect.Map: + v := reflect.ValueOf(object) + return fmt.Sprintf("%T | len:%d", object, v.Len()) + default: + return fmt.Sprintf("%T", object) + } +} + +func formatValue(value reflect.Value, indentation uint) string { + if indentation > MaxDepth { + return "..." + } + + if isNilValue(value) { + return "nil" + } + + if UseStringerRepresentation { + if value.CanInterface() { + obj := value.Interface() + switch x := obj.(type) { + case fmt.GoStringer: + return x.GoString() + case fmt.Stringer: + return x.String() + } + } + } + + if !PrintContextObjects { + if value.Type().Implements(contextType) && indentation > 1 { + return "" + } + } + + switch value.Kind() { + case reflect.Bool: + return fmt.Sprintf("%v", value.Bool()) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return fmt.Sprintf("%v", value.Int()) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return fmt.Sprintf("%v", value.Uint()) + case reflect.Uintptr: + return fmt.Sprintf("0x%x", value.Uint()) + case reflect.Float32, reflect.Float64: + return fmt.Sprintf("%v", value.Float()) + case reflect.Complex64, reflect.Complex128: + return fmt.Sprintf("%v", value.Complex()) + case reflect.Chan: + return fmt.Sprintf("0x%x", value.Pointer()) + case reflect.Func: + return fmt.Sprintf("0x%x", value.Pointer()) + case reflect.Ptr: + return formatValue(value.Elem(), indentation) + case reflect.Slice: + return formatSlice(value, indentation) + case reflect.String: + return formatString(value.String(), indentation) + case reflect.Array: + return formatSlice(value, indentation) + case reflect.Map: + return formatMap(value, indentation) + case reflect.Struct: + if value.Type() == timeType && value.CanInterface() { + t, _ := value.Interface().(time.Time) + return t.Format(time.RFC3339Nano) + } + return formatStruct(value, indentation) + case reflect.Interface: + return formatValue(value.Elem(), indentation) + default: + if value.CanInterface() { + return fmt.Sprintf("%#v", value.Interface()) + } + return fmt.Sprintf("%#v", value) + } +} + +func formatString(object interface{}, indentation uint) string { + if indentation == 1 { + s := fmt.Sprintf("%s", object) + components := strings.Split(s, "\n") + result := "" + for i, component := range components { + if i == 0 { + result += component + } else { + result += Indent + component + } + if i < len(components)-1 { + result += "\n" + } + } + + return fmt.Sprintf("%s", result) + } else { + return fmt.Sprintf("%q", object) + } +} + +func formatSlice(v reflect.Value, indentation uint) string { + if v.Kind() == reflect.Slice && v.Type().Elem().Kind() == reflect.Uint8 && isPrintableString(string(v.Bytes())) { + return formatString(v.Bytes(), indentation) + } + + l := v.Len() + result := make([]string, l) + longest := 0 + for i := 0; i < l; i++ { + result[i] = formatValue(v.Index(i), indentation+1) + if len(result[i]) > longest { + longest = len(result[i]) + } + } + + if longest > longFormThreshold { + indenter := strings.Repeat(Indent, int(indentation)) + return fmt.Sprintf("[\n%s%s,\n%s]", indenter+Indent, strings.Join(result, ",\n"+indenter+Indent), indenter) + } + return fmt.Sprintf("[%s]", strings.Join(result, ", ")) +} + +func formatMap(v reflect.Value, indentation uint) string { + l := v.Len() + result := make([]string, l) + + longest := 0 + for i, key := range v.MapKeys() { + value := v.MapIndex(key) + result[i] = fmt.Sprintf("%s: %s", formatValue(key, indentation+1), formatValue(value, indentation+1)) + if len(result[i]) > longest { + longest = len(result[i]) + } + } + + if longest > longFormThreshold { + indenter := strings.Repeat(Indent, int(indentation)) + return fmt.Sprintf("{\n%s%s,\n%s}", indenter+Indent, strings.Join(result, ",\n"+indenter+Indent), indenter) + } + return fmt.Sprintf("{%s}", strings.Join(result, ", ")) +} + +func formatStruct(v reflect.Value, indentation uint) string { + t := v.Type() + + l := v.NumField() + result := []string{} + longest := 0 + for i := 0; i < l; i++ { + structField := t.Field(i) + fieldEntry := v.Field(i) + representation := fmt.Sprintf("%s: %s", structField.Name, formatValue(fieldEntry, indentation+1)) + result = append(result, representation) + if len(representation) > longest { + longest = len(representation) + } + } + if longest > longFormThreshold { + indenter := strings.Repeat(Indent, int(indentation)) + return fmt.Sprintf("{\n%s%s,\n%s}", indenter+Indent, strings.Join(result, ",\n"+indenter+Indent), indenter) + } + return fmt.Sprintf("{%s}", strings.Join(result, ", ")) +} + +func isNilValue(a reflect.Value) bool { + switch a.Kind() { + case reflect.Invalid: + return true + case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: + return a.IsNil() + } + + return false +} + +/* +Returns true when the string is entirely made of printable runes, false otherwise. +*/ +func isPrintableString(str string) bool { + for _, runeValue := range str { + if !strconv.IsPrint(runeValue) { + return false + } + } + return true +} diff --git a/vendor/github.com/onsi/gomega/format/format_suite_test.go b/vendor/github.com/onsi/gomega/format/format_suite_test.go new file mode 100644 index 000000000..8e65a9529 --- /dev/null +++ b/vendor/github.com/onsi/gomega/format/format_suite_test.go @@ -0,0 +1,13 @@ +package format_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + "testing" +) + +func TestFormat(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Format Suite") +} diff --git a/vendor/github.com/onsi/gomega/format/format_test.go b/vendor/github.com/onsi/gomega/format/format_test.go new file mode 100644 index 000000000..a1a903164 --- /dev/null +++ b/vendor/github.com/onsi/gomega/format/format_test.go @@ -0,0 +1,590 @@ +package format_test + +import ( + "fmt" + "strings" + "time" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + . "github.com/onsi/gomega/format" + "github.com/onsi/gomega/types" +) + +//recursive struct + +type StringAlias string +type ByteAlias []byte +type IntAlias int + +type AStruct struct { + Exported string +} + +type SimpleStruct struct { + Name string + Enumeration int + Veritas bool + Data []byte + secret uint32 +} + +type ComplexStruct struct { + Strings []string + SimpleThings []*SimpleStruct + DataMaps map[int]ByteAlias +} + +type SecretiveStruct struct { + boolValue bool + intValue int + uintValue uint + uintptrValue uintptr + floatValue float32 + complexValue complex64 + chanValue chan bool + funcValue func() + pointerValue *int + sliceValue []string + byteSliceValue []byte + stringValue string + arrValue [3]int + byteArrValue [3]byte + mapValue map[string]int + structValue AStruct + interfaceValue interface{} +} + +type GoStringer struct { +} + +func (g GoStringer) GoString() string { + return "go-string" +} + +func (g GoStringer) String() string { + return "string" +} + +type Stringer struct { +} + +func (g Stringer) String() string { + return "string" +} + +type ctx struct { +} + +func (c *ctx) Deadline() (deadline time.Time, ok bool) { + return time.Time{}, false +} + +func (c *ctx) Done() <-chan struct{} { + return nil +} + +func (c *ctx) Err() error { + return nil +} + +func (c *ctx) Value(key interface{}) interface{} { + return nil +} + +var _ = Describe("Format", func() { + match := func(typeRepresentation string, valueRepresentation string, args ...interface{}) types.GomegaMatcher { + if len(args) > 0 { + valueRepresentation = fmt.Sprintf(valueRepresentation, args...) + } + return Equal(fmt.Sprintf("%s<%s>: %s", Indent, typeRepresentation, valueRepresentation)) + } + + matchRegexp := func(typeRepresentation string, valueRepresentation string, args ...interface{}) types.GomegaMatcher { + if len(args) > 0 { + valueRepresentation = fmt.Sprintf(valueRepresentation, args...) + } + return MatchRegexp(fmt.Sprintf("%s<%s>: %s", Indent, typeRepresentation, valueRepresentation)) + } + + hashMatchingRegexp := func(entries ...string) string { + entriesSwitch := "(" + strings.Join(entries, "|") + ")" + arr := make([]string, len(entries)) + for i := range arr { + arr[i] = entriesSwitch + } + return "{" + strings.Join(arr, ", ") + "}" + } + + Describe("Message", func() { + Context("with only an actual value", func() { + It("should print out an indented formatted representation of the value and the message", func() { + Ω(Message(3, "to be three.")).Should(Equal("Expected\n : 3\nto be three.")) + }) + }) + + Context("with an actual and an expected value", func() { + It("should print out an indented formatted representatino of both values, and the message", func() { + Ω(Message(3, "to equal", 4)).Should(Equal("Expected\n : 3\nto equal\n : 4")) + }) + }) + }) + + Describe("MessageWithDiff", func() { + It("shows the exact point where two long strings differ", func() { + stringWithB := "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + stringWithZ := "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaazaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + Ω(MessageWithDiff(stringWithB, "to equal", stringWithZ)).Should(Equal(expectedLongStringFailureMessage)) + }) + + It("truncates the start of long strings that differ only at their end", func() { + stringWithB := "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab" + stringWithZ := "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaz" + + Ω(MessageWithDiff(stringWithB, "to equal", stringWithZ)).Should(Equal(expectedTruncatedStartStringFailureMessage)) + }) + + It("truncates the start of long strings that differ only in length", func() { + smallString := "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + largeString := "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + Ω(MessageWithDiff(largeString, "to equal", smallString)).Should(Equal(expectedTruncatedStartSizeFailureMessage)) + Ω(MessageWithDiff(smallString, "to equal", largeString)).Should(Equal(expectedTruncatedStartSizeSwappedFailureMessage)) + }) + + It("truncates the end of long strings that differ only at their start", func() { + stringWithB := "baaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + stringWithZ := "zaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + Ω(MessageWithDiff(stringWithB, "to equal", stringWithZ)).Should(Equal(expectedTruncatedEndStringFailureMessage)) + }) + }) + + Describe("IndentString", func() { + It("should indent the string", func() { + Ω(IndentString("foo\n bar\nbaz", 2)).Should(Equal(" foo\n bar\n baz")) + }) + }) + + Describe("Object", func() { + Describe("formatting boolean values", func() { + It("should give the type and format values correctly", func() { + Ω(Object(true, 1)).Should(match("bool", "true")) + Ω(Object(false, 1)).Should(match("bool", "false")) + }) + }) + + Describe("formatting numbers", func() { + It("should give the type and format values correctly", func() { + Ω(Object(int(3), 1)).Should(match("int", "3")) + Ω(Object(int8(3), 1)).Should(match("int8", "3")) + Ω(Object(int16(3), 1)).Should(match("int16", "3")) + Ω(Object(int32(3), 1)).Should(match("int32", "3")) + Ω(Object(int64(3), 1)).Should(match("int64", "3")) + + Ω(Object(uint(3), 1)).Should(match("uint", "3")) + Ω(Object(uint8(3), 1)).Should(match("uint8", "3")) + Ω(Object(uint16(3), 1)).Should(match("uint16", "3")) + Ω(Object(uint32(3), 1)).Should(match("uint32", "3")) + Ω(Object(uint64(3), 1)).Should(match("uint64", "3")) + }) + + It("should handle uintptr differently", func() { + Ω(Object(uintptr(3), 1)).Should(match("uintptr", "0x3")) + }) + }) + + Describe("formatting channels", func() { + It("should give the type and format values correctly", func() { + c := make(chan<- bool, 3) + c <- true + c <- false + Ω(Object(c, 1)).Should(match("chan<- bool | len:2, cap:3", "%v", c)) + }) + }) + + Describe("formatting strings", func() { + It("should give the type and format values correctly", func() { + s := "a\nb\nc" + Ω(Object(s, 1)).Should(match("string", `a + b + c`)) + }) + }) + + Describe("formatting []byte slices", func() { + Context("when the slice is made of printable bytes", func() { + It("should present it as string", func() { + b := []byte("a b c") + Ω(Object(b, 1)).Should(matchRegexp(`\[\]uint8 \| len:5, cap:\d+`, `a b c`)) + }) + }) + Context("when the slice contains non-printable bytes", func() { + It("should present it as slice", func() { + b := []byte("a b c\n\x01\x02\x03\xff\x1bH") + Ω(Object(b, 1)).Should(matchRegexp(`\[\]uint8 \| len:12, cap:\d+`, `\[97, 32, 98, 32, 99, 10, 1, 2, 3, 255, 27, 72\]`)) + }) + }) + }) + + Describe("formatting functions", func() { + It("should give the type and format values correctly", func() { + f := func(a string, b []int) ([]byte, error) { + return []byte("abc"), nil + } + Ω(Object(f, 1)).Should(match("func(string, []int) ([]uint8, error)", "%v", f)) + }) + }) + + Describe("formatting pointers", func() { + It("should give the type and dereference the value to format it correctly", func() { + a := 3 + Ω(Object(&a, 1)).Should(match(fmt.Sprintf("*int | %p", &a), "3")) + }) + + Context("when there are pointers to pointers...", func() { + It("should recursively deference the pointer until it gets to a value", func() { + a := 3 + var b *int + var c **int + var d ***int + b = &a + c = &b + d = &c + + Ω(Object(d, 1)).Should(match(fmt.Sprintf("***int | %p", d), "3")) + }) + }) + + Context("when the pointer points to nil", func() { + It("should say nil and not explode", func() { + var a *AStruct + Ω(Object(a, 1)).Should(match("*format_test.AStruct | 0x0", "nil")) + }) + }) + }) + + Describe("formatting arrays", func() { + It("should give the type and format values correctly", func() { + w := [3]string{"Jed Bartlet", "Toby Ziegler", "CJ Cregg"} + Ω(Object(w, 1)).Should(match("[3]string", `["Jed Bartlet", "Toby Ziegler", "CJ Cregg"]`)) + }) + + Context("with byte arrays", func() { + It("should give the type and format values correctly", func() { + w := [3]byte{17, 28, 19} + Ω(Object(w, 1)).Should(match("[3]uint8", `[17, 28, 19]`)) + }) + }) + }) + + Describe("formatting slices", func() { + It("should include the length and capacity in the type information", func() { + s := make([]bool, 3, 4) + Ω(Object(s, 1)).Should(match("[]bool | len:3, cap:4", "[false, false, false]")) + }) + + Context("when the slice contains long entries", func() { + It("should format the entries with newlines", func() { + w := []string{"Josiah Edward Bartlet", "Toby Ziegler", "CJ Cregg"} + expected := `[ + "Josiah Edward Bartlet", + "Toby Ziegler", + "CJ Cregg", + ]` + Ω(Object(w, 1)).Should(match("[]string | len:3, cap:3", expected)) + }) + }) + }) + + Describe("formatting maps", func() { + It("should include the length in the type information", func() { + m := make(map[int]bool, 5) + m[3] = true + m[4] = false + Ω(Object(m, 1)).Should(matchRegexp(`map\[int\]bool \| len:2`, hashMatchingRegexp("3: true", "4: false"))) + }) + + Context("when the slice contains long entries", func() { + It("should format the entries with newlines", func() { + m := map[string][]byte{} + m["Josiah Edward Bartlet"] = []byte("Martin Sheen") + m["Toby Ziegler"] = []byte("Richard Schiff") + m["CJ Cregg"] = []byte("Allison Janney") + expected := `{ + ("Josiah Edward Bartlet": "Martin Sheen"|"Toby Ziegler": "Richard Schiff"|"CJ Cregg": "Allison Janney"), + ("Josiah Edward Bartlet": "Martin Sheen"|"Toby Ziegler": "Richard Schiff"|"CJ Cregg": "Allison Janney"), + ("Josiah Edward Bartlet": "Martin Sheen"|"Toby Ziegler": "Richard Schiff"|"CJ Cregg": "Allison Janney"), + }` + Ω(Object(m, 1)).Should(matchRegexp(`map\[string\]\[\]uint8 \| len:3`, expected)) + }) + }) + }) + + Describe("formatting structs", func() { + It("should include the struct name and the field names", func() { + s := SimpleStruct{ + Name: "Oswald", + Enumeration: 17, + Veritas: true, + Data: []byte("datum"), + secret: 1983, + } + + Ω(Object(s, 1)).Should(match("format_test.SimpleStruct", `{Name: "Oswald", Enumeration: 17, Veritas: true, Data: "datum", secret: 1983}`)) + }) + + Context("when the struct contains long entries", func() { + It("should format the entries with new lines", func() { + s := &SimpleStruct{ + Name: "Mithrandir Gandalf Greyhame", + Enumeration: 2021, + Veritas: true, + Data: []byte("wizard"), + secret: 3, + } + + Ω(Object(s, 1)).Should(match(fmt.Sprintf("*format_test.SimpleStruct | %p", s), `{ + Name: "Mithrandir Gandalf Greyhame", + Enumeration: 2021, + Veritas: true, + Data: "wizard", + secret: 3, + }`)) + }) + }) + }) + + Describe("formatting nil values", func() { + It("should print out nil", func() { + Ω(Object(nil, 1)).Should(match("nil", "nil")) + var typedNil *AStruct + Ω(Object(typedNil, 1)).Should(match("*format_test.AStruct | 0x0", "nil")) + var c chan<- bool + Ω(Object(c, 1)).Should(match("chan<- bool | len:0, cap:0", "nil")) + var s []string + Ω(Object(s, 1)).Should(match("[]string | len:0, cap:0", "nil")) + var m map[string]bool + Ω(Object(m, 1)).Should(match("map[string]bool | len:0", "nil")) + }) + }) + + Describe("formatting aliased types", func() { + It("should print out the correct alias type", func() { + Ω(Object(StringAlias("alias"), 1)).Should(match("format_test.StringAlias", `alias`)) + Ω(Object(ByteAlias("alias"), 1)).Should(matchRegexp(`format_test\.ByteAlias \| len:5, cap:\d+`, `alias`)) + Ω(Object(IntAlias(3), 1)).Should(match("format_test.IntAlias", "3")) + }) + }) + + Describe("handling nested things", func() { + It("should produce a correctly nested representation", func() { + s := ComplexStruct{ + Strings: []string{"lots", "of", "short", "strings"}, + SimpleThings: []*SimpleStruct{ + {"short", 7, true, []byte("succinct"), 17}, + {"something longer", 427, true, []byte("designed to wrap around nicely"), 30}, + }, + DataMaps: map[int]ByteAlias{ + 17: ByteAlias("some substantially longer chunks of data"), + 1138: ByteAlias("that should make things wrap"), + }, + } + expected := `{ + Strings: \["lots", "of", "short", "strings"\], + SimpleThings: \[ + {Name: "short", Enumeration: 7, Veritas: true, Data: "succinct", secret: 17}, + { + Name: "something longer", + Enumeration: 427, + Veritas: true, + Data: "designed to wrap around nicely", + secret: 30, + }, + \], + DataMaps: { + (17: "some substantially longer chunks of data"|1138: "that should make things wrap"), + (17: "some substantially longer chunks of data"|1138: "that should make things wrap"), + }, + }` + Ω(Object(s, 1)).Should(matchRegexp(`format_test\.ComplexStruct`, expected)) + }) + }) + + Describe("formatting times", func() { + It("should format time as RFC3339", func() { + t := time.Date(2016, 10, 31, 9, 57, 23, 12345, time.UTC) + Ω(Object(t, 1)).Should(match("time.Time", `2016-10-31T09:57:23.000012345Z`)) + }) + }) + }) + + Describe("Handling unexported fields in structs", func() { + It("should handle all the various types correctly", func() { + a := int(5) + s := SecretiveStruct{ + boolValue: true, + intValue: 3, + uintValue: 4, + uintptrValue: 5, + floatValue: 6.0, + complexValue: complex(5.0, 3.0), + chanValue: make(chan bool, 2), + funcValue: func() {}, + pointerValue: &a, + sliceValue: []string{"string", "slice"}, + byteSliceValue: []byte("bytes"), + stringValue: "a string", + arrValue: [3]int{11, 12, 13}, + byteArrValue: [3]byte{17, 20, 32}, + mapValue: map[string]int{"a key": 20, "b key": 30}, + structValue: AStruct{"exported"}, + interfaceValue: map[string]int{"a key": 17}, + } + + expected := fmt.Sprintf(`{ + boolValue: true, + intValue: 3, + uintValue: 4, + uintptrValue: 0x5, + floatValue: 6, + complexValue: \(5\+3i\), + chanValue: %p, + funcValue: %p, + pointerValue: 5, + sliceValue: \["string", "slice"\], + byteSliceValue: "bytes", + stringValue: "a string", + arrValue: \[11, 12, 13\], + byteArrValue: \[17, 20, 32\], + mapValue: %s, + structValue: {Exported: "exported"}, + interfaceValue: {"a key": 17}, + }`, s.chanValue, s.funcValue, hashMatchingRegexp(`"a key": 20`, `"b key": 30`)) + + Ω(Object(s, 1)).Should(matchRegexp(`format_test\.SecretiveStruct`, expected)) + }) + }) + + Describe("Handling interfaces", func() { + It("should unpack the interface", func() { + outerHash := map[string]interface{}{} + innerHash := map[string]int{} + + innerHash["inner"] = 3 + outerHash["integer"] = 2 + outerHash["map"] = innerHash + + expected := hashMatchingRegexp(`"integer": 2`, `"map": {"inner": 3}`) + Ω(Object(outerHash, 1)).Should(matchRegexp(`map\[string\]interface {} \| len:2`, expected)) + }) + }) + + Describe("Handling recursive things", func() { + It("should not go crazy...", func() { + m := map[string]interface{}{} + m["integer"] = 2 + m["map"] = m + Ω(Object(m, 1)).Should(ContainSubstring("...")) + }) + + It("really should not go crazy...", func() { + type complexKey struct { + Value map[interface{}]int + } + + complexObject := complexKey{} + complexObject.Value = make(map[interface{}]int) + + complexObject.Value[&complexObject] = 2 + Ω(Object(complexObject, 1)).Should(ContainSubstring("...")) + }) + }) + + Describe("When instructed to use the Stringer representation", func() { + BeforeEach(func() { + UseStringerRepresentation = true + }) + + AfterEach(func() { + UseStringerRepresentation = false + }) + + Context("when passed a GoStringer", func() { + It("should use what GoString() returns", func() { + Ω(Object(GoStringer{}, 1)).Should(ContainSubstring(": go-string")) + }) + }) + + Context("when passed a stringer", func() { + It("should use what String() returns", func() { + Ω(Object(Stringer{}, 1)).Should(ContainSubstring(": string")) + }) + }) + }) + + Describe("Printing a context.Context field", func() { + + type structWithContext struct { + Context Ctx + Value string + } + + context := ctx{} + objWithContext := structWithContext{Value: "some-value", Context: &context} + + It("Suppresses the content by default", func() { + Ω(Object(objWithContext, 1)).Should(ContainSubstring("")) + }) + + It("Doesn't supress the context if it's the object being printed", func() { + Ω(Object(context, 1)).ShouldNot(MatchRegexp("^.*$")) + }) + + Context("PrintContextObjects is set", func() { + BeforeEach(func() { + PrintContextObjects = true + }) + + AfterEach(func() { + PrintContextObjects = false + }) + + It("Prints the context", func() { + Ω(Object(objWithContext, 1)).ShouldNot(ContainSubstring("")) + }) + }) + }) +}) + +var expectedLongStringFailureMessage = strings.TrimSpace(` +Expected + : "...aaaaabaaaaa..." +to equal | + : "...aaaaazaaaaa..." +`) +var expectedTruncatedEndStringFailureMessage = strings.TrimSpace(` +Expected + : "baaaaa..." +to equal | + : "zaaaaa..." +`) +var expectedTruncatedStartStringFailureMessage = strings.TrimSpace(` +Expected + : "...aaaaab" +to equal | + : "...aaaaaz" +`) +var expectedTruncatedStartSizeFailureMessage = strings.TrimSpace(` +Expected + : "...aaaaaa" +to equal | + : "...aaaaa" +`) +var expectedTruncatedStartSizeSwappedFailureMessage = strings.TrimSpace(` +Expected + : "...aaaa" +to equal | + : "...aaaaa" +`) diff --git a/vendor/github.com/onsi/gomega/gbytes/buffer.go b/vendor/github.com/onsi/gomega/gbytes/buffer.go new file mode 100644 index 000000000..336086f4a --- /dev/null +++ b/vendor/github.com/onsi/gomega/gbytes/buffer.go @@ -0,0 +1,245 @@ +/* +Package gbytes provides a buffer that supports incrementally detecting input. + +You use gbytes.Buffer with the gbytes.Say matcher. When Say finds a match, it fastforwards the buffer's read cursor to the end of that match. + +Subsequent matches against the buffer will only operate against data that appears *after* the read cursor. + +The read cursor is an opaque implementation detail that you cannot access. You should use the Say matcher to sift through the buffer. You can always +access the entire buffer's contents with Contents(). + +*/ +package gbytes + +import ( + "errors" + "fmt" + "io" + "regexp" + "sync" + "time" +) + +/* +gbytes.Buffer implements an io.Writer and can be used with the gbytes.Say matcher. + +You should only use a gbytes.Buffer in test code. It stores all writes in an in-memory buffer - behavior that is inappropriate for production code! +*/ +type Buffer struct { + contents []byte + readCursor uint64 + lock *sync.Mutex + detectCloser chan interface{} + closed bool +} + +/* +NewBuffer returns a new gbytes.Buffer +*/ +func NewBuffer() *Buffer { + return &Buffer{ + lock: &sync.Mutex{}, + } +} + +/* +BufferWithBytes returns a new gbytes.Buffer seeded with the passed in bytes +*/ +func BufferWithBytes(bytes []byte) *Buffer { + return &Buffer{ + lock: &sync.Mutex{}, + contents: bytes, + } +} + +/* +BufferReader returns a new gbytes.Buffer that wraps a reader. The reader's contents are read into +the Buffer via io.Copy +*/ +func BufferReader(reader io.Reader) *Buffer { + b := &Buffer{ + lock: &sync.Mutex{}, + } + + go func() { + io.Copy(b, reader) + b.Close() + }() + + return b +} + +/* +Write implements the io.Writer interface +*/ +func (b *Buffer) Write(p []byte) (n int, err error) { + b.lock.Lock() + defer b.lock.Unlock() + + if b.closed { + return 0, errors.New("attempt to write to closed buffer") + } + + b.contents = append(b.contents, p...) + return len(p), nil +} + +/* +Read implements the io.Reader interface. It advances the +cursor as it reads. + +Returns an error if called after Close. +*/ +func (b *Buffer) Read(d []byte) (int, error) { + b.lock.Lock() + defer b.lock.Unlock() + + if b.closed { + return 0, errors.New("attempt to read from closed buffer") + } + + if uint64(len(b.contents)) <= b.readCursor { + return 0, io.EOF + } + + n := copy(d, b.contents[b.readCursor:]) + b.readCursor += uint64(n) + + return n, nil +} + +/* +Close signifies that the buffer will no longer be written to +*/ +func (b *Buffer) Close() error { + b.lock.Lock() + defer b.lock.Unlock() + + b.closed = true + + return nil +} + +/* +Closed returns true if the buffer has been closed +*/ +func (b *Buffer) Closed() bool { + b.lock.Lock() + defer b.lock.Unlock() + + return b.closed +} + +/* +Contents returns all data ever written to the buffer. +*/ +func (b *Buffer) Contents() []byte { + b.lock.Lock() + defer b.lock.Unlock() + + contents := make([]byte, len(b.contents)) + copy(contents, b.contents) + return contents +} + +/* +Detect takes a regular expression and returns a channel. + +The channel will receive true the first time data matching the regular expression is written to the buffer. +The channel is subsequently closed and the buffer's read-cursor is fast-forwarded to just after the matching region. + +You typically don't need to use Detect and should use the ghttp.Say matcher instead. Detect is useful, however, in cases where your code must +be branch and handle different outputs written to the buffer. + +For example, consider a buffer hooked up to the stdout of a client library. You may (or may not, depending on state outside of your control) need to authenticate the client library. + +You could do something like: + +select { +case <-buffer.Detect("You are not logged in"): + //log in +case <-buffer.Detect("Success"): + //carry on +case <-time.After(time.Second): + //welp +} +buffer.CancelDetects() + +You should always call CancelDetects after using Detect. This will close any channels that have not detected and clean up the goroutines that were spawned to support them. + +Finally, you can pass detect a format string followed by variadic arguments. This will construct the regexp using fmt.Sprintf. +*/ +func (b *Buffer) Detect(desired string, args ...interface{}) chan bool { + formattedRegexp := desired + if len(args) > 0 { + formattedRegexp = fmt.Sprintf(desired, args...) + } + re := regexp.MustCompile(formattedRegexp) + + b.lock.Lock() + defer b.lock.Unlock() + + if b.detectCloser == nil { + b.detectCloser = make(chan interface{}) + } + + closer := b.detectCloser + response := make(chan bool) + go func() { + ticker := time.NewTicker(10 * time.Millisecond) + defer ticker.Stop() + defer close(response) + for { + select { + case <-ticker.C: + b.lock.Lock() + data, cursor := b.contents[b.readCursor:], b.readCursor + loc := re.FindIndex(data) + b.lock.Unlock() + + if loc != nil { + response <- true + b.lock.Lock() + newCursorPosition := cursor + uint64(loc[1]) + if newCursorPosition >= b.readCursor { + b.readCursor = newCursorPosition + } + b.lock.Unlock() + return + } + case <-closer: + return + } + } + }() + + return response +} + +/* +CancelDetects cancels any pending detects and cleans up their goroutines. You should always call this when you're done with a set of Detect channels. +*/ +func (b *Buffer) CancelDetects() { + b.lock.Lock() + defer b.lock.Unlock() + + close(b.detectCloser) + b.detectCloser = nil +} + +func (b *Buffer) didSay(re *regexp.Regexp) (bool, []byte) { + b.lock.Lock() + defer b.lock.Unlock() + + unreadBytes := b.contents[b.readCursor:] + copyOfUnreadBytes := make([]byte, len(unreadBytes)) + copy(copyOfUnreadBytes, unreadBytes) + + loc := re.FindIndex(unreadBytes) + + if loc != nil { + b.readCursor += uint64(loc[1]) + return true, copyOfUnreadBytes + } + return false, copyOfUnreadBytes +} diff --git a/vendor/github.com/onsi/gomega/gbytes/buffer_test.go b/vendor/github.com/onsi/gomega/gbytes/buffer_test.go new file mode 100644 index 000000000..655a3474b --- /dev/null +++ b/vendor/github.com/onsi/gomega/gbytes/buffer_test.go @@ -0,0 +1,205 @@ +package gbytes_test + +import ( + "io" + "time" + + . "github.com/onsi/gomega/gbytes" + + "bytes" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +type SlowReader struct { + R io.Reader + D time.Duration +} + +func (s SlowReader) Read(p []byte) (int, error) { + time.Sleep(s.D) + return s.R.Read(p) +} + +var _ = Describe("Buffer", func() { + var buffer *Buffer + + BeforeEach(func() { + buffer = NewBuffer() + }) + + Describe("dumping the entire contents of the buffer", func() { + It("should return everything that's been written", func() { + buffer.Write([]byte("abc")) + buffer.Write([]byte("def")) + Ω(buffer.Contents()).Should(Equal([]byte("abcdef"))) + + Ω(buffer).Should(Say("bcd")) + Ω(buffer.Contents()).Should(Equal([]byte("abcdef"))) + }) + }) + + Describe("creating a buffer with bytes", func() { + It("should create the buffer with the cursor set to the beginning", func() { + buffer := BufferWithBytes([]byte("abcdef")) + Ω(buffer.Contents()).Should(Equal([]byte("abcdef"))) + Ω(buffer).Should(Say("abc")) + Ω(buffer).ShouldNot(Say("abc")) + Ω(buffer).Should(Say("def")) + }) + }) + + Describe("creating a buffer that wraps a reader", func() { + Context("for a well-behaved reader", func() { + It("should buffer the contents of the reader", func() { + reader := bytes.NewBuffer([]byte("abcdef")) + buffer := BufferReader(reader) + Eventually(buffer).Should(Say("abc")) + Ω(buffer).ShouldNot(Say("abc")) + Eventually(buffer).Should(Say("def")) + Eventually(buffer.Closed).Should(BeTrue()) + }) + }) + + Context("for a slow reader", func() { + It("should allow Eventually to time out", func() { + slowReader := SlowReader{ + R: bytes.NewBuffer([]byte("abcdef")), + D: time.Second, + } + buffer := BufferReader(slowReader) + failures := InterceptGomegaFailures(func() { + Eventually(buffer, 100*time.Millisecond).Should(Say("abc")) + }) + Ω(failures).ShouldNot(BeEmpty()) + + fastReader := SlowReader{ + R: bytes.NewBuffer([]byte("abcdef")), + D: time.Millisecond, + } + buffer = BufferReader(fastReader) + Eventually(buffer, 100*time.Millisecond).Should(Say("abc")) + Eventually(buffer.Closed).Should(BeTrue()) + }) + }) + }) + + Describe("reading from a buffer", func() { + It("should read the current contents of the buffer", func() { + buffer := BufferWithBytes([]byte("abcde")) + + dest := make([]byte, 3) + n, err := buffer.Read(dest) + Ω(err).ShouldNot(HaveOccurred()) + Ω(n).Should(Equal(3)) + Ω(string(dest)).Should(Equal("abc")) + + dest = make([]byte, 3) + n, err = buffer.Read(dest) + Ω(err).ShouldNot(HaveOccurred()) + Ω(n).Should(Equal(2)) + Ω(string(dest[:n])).Should(Equal("de")) + + n, err = buffer.Read(dest) + Ω(err).Should(Equal(io.EOF)) + Ω(n).Should(Equal(0)) + }) + + Context("after the buffer has been closed", func() { + It("returns an error", func() { + buffer := BufferWithBytes([]byte("abcde")) + + buffer.Close() + + dest := make([]byte, 3) + n, err := buffer.Read(dest) + Ω(err).Should(HaveOccurred()) + Ω(n).Should(Equal(0)) + }) + }) + }) + + Describe("detecting regular expressions", func() { + It("should fire the appropriate channel when the passed in pattern matches, then close it", func(done Done) { + go func() { + time.Sleep(10 * time.Millisecond) + buffer.Write([]byte("abcde")) + }() + + A := buffer.Detect("%s", "a.c") + B := buffer.Detect("def") + + var gotIt bool + select { + case gotIt = <-A: + case <-B: + Fail("should not have gotten here") + } + + Ω(gotIt).Should(BeTrue()) + Eventually(A).Should(BeClosed()) + + buffer.Write([]byte("f")) + Eventually(B).Should(Receive()) + Eventually(B).Should(BeClosed()) + + close(done) + }) + + It("should fast-forward the buffer upon detection", func(done Done) { + buffer.Write([]byte("abcde")) + <-buffer.Detect("abc") + Ω(buffer).ShouldNot(Say("abc")) + Ω(buffer).Should(Say("de")) + close(done) + }) + + It("should only fast-forward the buffer when the channel is read, and only if doing so would not rewind it", func(done Done) { + buffer.Write([]byte("abcde")) + A := buffer.Detect("abc") + time.Sleep(20 * time.Millisecond) //give the goroutine a chance to detect and write to the channel + Ω(buffer).Should(Say("abcd")) + <-A + Ω(buffer).ShouldNot(Say("d")) + Ω(buffer).Should(Say("e")) + Eventually(A).Should(BeClosed()) + close(done) + }) + + It("should be possible to cancel a detection", func(done Done) { + A := buffer.Detect("abc") + B := buffer.Detect("def") + buffer.CancelDetects() + buffer.Write([]byte("abcdef")) + Eventually(A).Should(BeClosed()) + Eventually(B).Should(BeClosed()) + + Ω(buffer).Should(Say("bcde")) + <-buffer.Detect("f") + close(done) + }) + }) + + Describe("closing the buffer", func() { + It("should error when further write attempts are made", func() { + _, err := buffer.Write([]byte("abc")) + Ω(err).ShouldNot(HaveOccurred()) + + buffer.Close() + + _, err = buffer.Write([]byte("def")) + Ω(err).Should(HaveOccurred()) + + Ω(buffer.Contents()).Should(Equal([]byte("abc"))) + }) + + It("should be closed", func() { + Ω(buffer.Closed()).Should(BeFalse()) + + buffer.Close() + + Ω(buffer.Closed()).Should(BeTrue()) + }) + }) +}) diff --git a/vendor/github.com/onsi/gomega/gbytes/gbuffer_suite_test.go b/vendor/github.com/onsi/gomega/gbytes/gbuffer_suite_test.go new file mode 100644 index 000000000..3a7dc0612 --- /dev/null +++ b/vendor/github.com/onsi/gomega/gbytes/gbuffer_suite_test.go @@ -0,0 +1,13 @@ +package gbytes_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + "testing" +) + +func TestGbytes(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Gbytes Suite") +} diff --git a/vendor/github.com/onsi/gomega/gbytes/io_wrappers.go b/vendor/github.com/onsi/gomega/gbytes/io_wrappers.go new file mode 100644 index 000000000..3caed8769 --- /dev/null +++ b/vendor/github.com/onsi/gomega/gbytes/io_wrappers.go @@ -0,0 +1,85 @@ +package gbytes + +import ( + "errors" + "io" + "time" +) + +// ErrTimeout is returned by TimeoutCloser, TimeoutReader, and TimeoutWriter when the underlying Closer/Reader/Writer does not return within the specified timeout +var ErrTimeout = errors.New("timeout occurred") + +// TimeoutCloser returns an io.Closer that wraps the passed-in io.Closer. If the underlying Closer fails to close within the alloted timeout ErrTimeout is returned. +func TimeoutCloser(c io.Closer, timeout time.Duration) io.Closer { + return timeoutReaderWriterCloser{c: c, d: timeout} +} + +// TimeoutReader returns an io.Reader that wraps the passed-in io.Reader. If the underlying Reader fails to read within the alloted timeout ErrTimeout is returned. +func TimeoutReader(r io.Reader, timeout time.Duration) io.Reader { + return timeoutReaderWriterCloser{r: r, d: timeout} +} + +// TimeoutWriter returns an io.Writer that wraps the passed-in io.Writer. If the underlying Writer fails to write within the alloted timeout ErrTimeout is returned. +func TimeoutWriter(w io.Writer, timeout time.Duration) io.Writer { + return timeoutReaderWriterCloser{w: w, d: timeout} +} + +type timeoutReaderWriterCloser struct { + c io.Closer + w io.Writer + r io.Reader + d time.Duration +} + +func (t timeoutReaderWriterCloser) Close() error { + done := make(chan struct{}) + var err error + + go func() { + err = t.c.Close() + close(done) + }() + + select { + case <-done: + return err + case <-time.After(t.d): + return ErrTimeout + } +} + +func (t timeoutReaderWriterCloser) Read(p []byte) (int, error) { + done := make(chan struct{}) + var n int + var err error + + go func() { + n, err = t.r.Read(p) + close(done) + }() + + select { + case <-done: + return n, err + case <-time.After(t.d): + return 0, ErrTimeout + } +} + +func (t timeoutReaderWriterCloser) Write(p []byte) (int, error) { + done := make(chan struct{}) + var n int + var err error + + go func() { + n, err = t.w.Write(p) + close(done) + }() + + select { + case <-done: + return n, err + case <-time.After(t.d): + return 0, ErrTimeout + } +} diff --git a/vendor/github.com/onsi/gomega/gbytes/io_wrappers_test.go b/vendor/github.com/onsi/gomega/gbytes/io_wrappers_test.go new file mode 100644 index 000000000..2c74545d8 --- /dev/null +++ b/vendor/github.com/onsi/gomega/gbytes/io_wrappers_test.go @@ -0,0 +1,188 @@ +package gbytes_test + +import ( + "fmt" + "io" + "time" + + . "github.com/onsi/gomega/gbytes" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +type FakeCloser struct { + err error + duration time.Duration +} + +func (f FakeCloser) Close() error { + time.Sleep(f.duration) + return f.err +} + +type FakeReader struct { + err error + duration time.Duration +} + +func (f FakeReader) Read(p []byte) (int, error) { + time.Sleep(f.duration) + if f.err != nil { + return 0, f.err + } + + for i := 0; i < len(p); i++ { + p[i] = 'a' + } + + return len(p), nil +} + +type FakeWriter struct { + err error + duration time.Duration +} + +func (f FakeWriter) Write(p []byte) (int, error) { + time.Sleep(f.duration) + if f.err != nil { + return 0, f.err + } + + return len(p), nil +} + +var _ = Describe("Io Wrappers", func() { + Describe("TimeoutCloser", func() { + var innerCloser io.Closer + var timeoutCloser io.Closer + + JustBeforeEach(func() { + timeoutCloser = TimeoutCloser(innerCloser, 20*time.Millisecond) + }) + + Context("when the underlying Closer closes with no error", func() { + BeforeEach(func() { + innerCloser = FakeCloser{} + }) + + It("returns with no error", func() { + Ω(timeoutCloser.Close()).Should(Succeed()) + }) + }) + + Context("when the underlying Closer closes with an error", func() { + BeforeEach(func() { + innerCloser = FakeCloser{err: fmt.Errorf("boom")} + }) + + It("returns the error", func() { + Ω(timeoutCloser.Close()).Should(MatchError("boom")) + }) + }) + + Context("when the underlying Closer hangs", func() { + BeforeEach(func() { + innerCloser = FakeCloser{ + err: fmt.Errorf("boom"), + duration: time.Hour, + } + }) + + It("returns ErrTimeout", func() { + Ω(timeoutCloser.Close()).Should(MatchError(ErrTimeout)) + }) + }) + }) + + Describe("TimeoutReader", func() { + var innerReader io.Reader + var timeoutReader io.Reader + + JustBeforeEach(func() { + timeoutReader = TimeoutReader(innerReader, 20*time.Millisecond) + }) + + Context("when the underlying Reader returns no error", func() { + BeforeEach(func() { + innerReader = FakeReader{} + }) + + It("returns with no error", func() { + p := make([]byte, 5) + n, err := timeoutReader.Read(p) + Ω(n).Should(Equal(5)) + Ω(err).ShouldNot(HaveOccurred()) + Ω(p).Should(Equal([]byte("aaaaa"))) + }) + }) + + Context("when the underlying Reader returns an error", func() { + BeforeEach(func() { + innerReader = FakeReader{err: fmt.Errorf("boom")} + }) + + It("returns the error", func() { + p := make([]byte, 5) + _, err := timeoutReader.Read(p) + Ω(err).Should(MatchError("boom")) + }) + }) + + Context("when the underlying Reader hangs", func() { + BeforeEach(func() { + innerReader = FakeReader{err: fmt.Errorf("boom"), duration: time.Hour} + }) + + It("returns ErrTimeout", func() { + p := make([]byte, 5) + _, err := timeoutReader.Read(p) + Ω(err).Should(MatchError(ErrTimeout)) + }) + }) + }) + + Describe("TimeoutWriter", func() { + var innerWriter io.Writer + var timeoutWriter io.Writer + + JustBeforeEach(func() { + timeoutWriter = TimeoutWriter(innerWriter, 20*time.Millisecond) + }) + + Context("when the underlying Writer returns no error", func() { + BeforeEach(func() { + innerWriter = FakeWriter{} + }) + + It("returns with no error", func() { + n, err := timeoutWriter.Write([]byte("aaaaa")) + Ω(n).Should(Equal(5)) + Ω(err).ShouldNot(HaveOccurred()) + }) + }) + + Context("when the underlying Writer returns an error", func() { + BeforeEach(func() { + innerWriter = FakeWriter{err: fmt.Errorf("boom")} + }) + + It("returns the error", func() { + _, err := timeoutWriter.Write([]byte("aaaaa")) + Ω(err).Should(MatchError("boom")) + }) + }) + + Context("when the underlying Writer hangs", func() { + BeforeEach(func() { + innerWriter = FakeWriter{err: fmt.Errorf("boom"), duration: time.Hour} + }) + + It("returns ErrTimeout", func() { + _, err := timeoutWriter.Write([]byte("aaaaa")) + Ω(err).Should(MatchError(ErrTimeout)) + }) + }) + }) +}) diff --git a/vendor/github.com/onsi/gomega/gbytes/say_matcher.go b/vendor/github.com/onsi/gomega/gbytes/say_matcher.go new file mode 100644 index 000000000..cbc266c56 --- /dev/null +++ b/vendor/github.com/onsi/gomega/gbytes/say_matcher.go @@ -0,0 +1,105 @@ +package gbytes + +import ( + "fmt" + "regexp" + + "github.com/onsi/gomega/format" +) + +//Objects satisfying the BufferProvider can be used with the Say matcher. +type BufferProvider interface { + Buffer() *Buffer +} + +/* +Say is a Gomega matcher that operates on gbytes.Buffers: + + Ω(buffer).Should(Say("something")) + +will succeed if the unread portion of the buffer matches the regular expression "something". + +When Say succeeds, it fast forwards the gbytes.Buffer's read cursor to just after the succesful match. +Thus, subsequent calls to Say will only match against the unread portion of the buffer + +Say pairs very well with Eventually. To assert that a buffer eventually receives data matching "[123]-star" within 3 seconds you can: + + Eventually(buffer, 3).Should(Say("[123]-star")) + +Ditto with consistently. To assert that a buffer does not receive data matching "never-see-this" for 1 second you can: + + Consistently(buffer, 1).ShouldNot(Say("never-see-this")) + +In addition to bytes.Buffers, Say can operate on objects that implement the gbytes.BufferProvider interface. +In such cases, Say simply operates on the *gbytes.Buffer returned by Buffer() + +If the buffer is closed, the Say matcher will tell Eventually to abort. +*/ +func Say(expected string, args ...interface{}) *sayMatcher { + formattedRegexp := expected + if len(args) > 0 { + formattedRegexp = fmt.Sprintf(expected, args...) + } + return &sayMatcher{ + re: regexp.MustCompile(formattedRegexp), + } +} + +type sayMatcher struct { + re *regexp.Regexp + receivedSayings []byte +} + +func (m *sayMatcher) buffer(actual interface{}) (*Buffer, bool) { + var buffer *Buffer + + switch x := actual.(type) { + case *Buffer: + buffer = x + case BufferProvider: + buffer = x.Buffer() + default: + return nil, false + } + + return buffer, true +} + +func (m *sayMatcher) Match(actual interface{}) (success bool, err error) { + buffer, ok := m.buffer(actual) + if !ok { + return false, fmt.Errorf("Say must be passed a *gbytes.Buffer or BufferProvider. Got:\n%s", format.Object(actual, 1)) + } + + didSay, sayings := buffer.didSay(m.re) + m.receivedSayings = sayings + + return didSay, nil +} + +func (m *sayMatcher) FailureMessage(actual interface{}) (message string) { + return fmt.Sprintf( + "Got stuck at:\n%s\nWaiting for:\n%s", + format.IndentString(string(m.receivedSayings), 1), + format.IndentString(m.re.String(), 1), + ) +} + +func (m *sayMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return fmt.Sprintf( + "Saw:\n%s\nWhich matches the unexpected:\n%s", + format.IndentString(string(m.receivedSayings), 1), + format.IndentString(m.re.String(), 1), + ) +} + +func (m *sayMatcher) MatchMayChangeInTheFuture(actual interface{}) bool { + switch x := actual.(type) { + case *Buffer: + return !x.Closed() + case BufferProvider: + return !x.Buffer().Closed() + default: + return true + } +} diff --git a/vendor/github.com/onsi/gomega/gbytes/say_matcher_test.go b/vendor/github.com/onsi/gomega/gbytes/say_matcher_test.go new file mode 100644 index 000000000..63fb3b3b8 --- /dev/null +++ b/vendor/github.com/onsi/gomega/gbytes/say_matcher_test.go @@ -0,0 +1,163 @@ +package gbytes_test + +import ( + . "github.com/onsi/gomega/gbytes" + "time" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +type speaker struct { + buffer *Buffer +} + +func (s *speaker) Buffer() *Buffer { + return s.buffer +} + +var _ = Describe("SayMatcher", func() { + var buffer *Buffer + + BeforeEach(func() { + buffer = NewBuffer() + buffer.Write([]byte("abc")) + }) + + Context("when actual is not a gexec Buffer, or a BufferProvider", func() { + It("should error", func() { + failures := InterceptGomegaFailures(func() { + Ω("foo").Should(Say("foo")) + }) + Ω(failures[0]).Should(ContainSubstring("*gbytes.Buffer")) + }) + }) + + Context("when a match is found", func() { + It("should succeed", func() { + Ω(buffer).Should(Say("abc")) + }) + + It("should support printf-like formatting", func() { + Ω(buffer).Should(Say("a%sc", "b")) + }) + + It("should use a regular expression", func() { + Ω(buffer).Should(Say("a.c")) + }) + + It("should fastforward the buffer", func() { + buffer.Write([]byte("def")) + Ω(buffer).Should(Say("abcd")) + Ω(buffer).Should(Say("ef")) + Ω(buffer).ShouldNot(Say("[a-z]")) + }) + }) + + Context("when no match is found", func() { + It("should not error", func() { + Ω(buffer).ShouldNot(Say("def")) + }) + + Context("when the buffer is closed", func() { + BeforeEach(func() { + buffer.Close() + }) + + It("should abort an eventually", func() { + t := time.Now() + failures := InterceptGomegaFailures(func() { + Eventually(buffer).Should(Say("def")) + }) + Eventually(buffer).ShouldNot(Say("def")) + Ω(time.Since(t)).Should(BeNumerically("<", 500*time.Millisecond)) + Ω(failures).Should(HaveLen(1)) + + t = time.Now() + Eventually(buffer).Should(Say("abc")) + Ω(time.Since(t)).Should(BeNumerically("<", 500*time.Millisecond)) + }) + + It("should abort a consistently", func() { + t := time.Now() + Consistently(buffer, 2.0).ShouldNot(Say("def")) + Ω(time.Since(t)).Should(BeNumerically("<", 500*time.Millisecond)) + }) + + It("should not error with a synchronous matcher", func() { + Ω(buffer).ShouldNot(Say("def")) + Ω(buffer).Should(Say("abc")) + }) + }) + }) + + Context("when a positive match fails", func() { + It("should report where it got stuck", func() { + Ω(buffer).Should(Say("abc")) + buffer.Write([]byte("def")) + failures := InterceptGomegaFailures(func() { + Ω(buffer).Should(Say("abc")) + }) + Ω(failures[0]).Should(ContainSubstring("Got stuck at:")) + Ω(failures[0]).Should(ContainSubstring("def")) + }) + }) + + Context("when a negative match fails", func() { + It("should report where it got stuck", func() { + failures := InterceptGomegaFailures(func() { + Ω(buffer).ShouldNot(Say("abc")) + }) + Ω(failures[0]).Should(ContainSubstring("Saw:")) + Ω(failures[0]).Should(ContainSubstring("Which matches the unexpected:")) + Ω(failures[0]).Should(ContainSubstring("abc")) + }) + }) + + Context("when a match is not found", func() { + It("should not fastforward the buffer", func() { + Ω(buffer).ShouldNot(Say("def")) + Ω(buffer).Should(Say("abc")) + }) + }) + + Context("a nice real-life example", func() { + It("should behave well", func() { + Ω(buffer).Should(Say("abc")) + go func() { + time.Sleep(10 * time.Millisecond) + buffer.Write([]byte("def")) + }() + Ω(buffer).ShouldNot(Say("def")) + Eventually(buffer).Should(Say("def")) + }) + }) + + Context("when actual is a BufferProvider", func() { + It("should use actual's buffer", func() { + s := &speaker{ + buffer: NewBuffer(), + } + + Ω(s).ShouldNot(Say("abc")) + + s.Buffer().Write([]byte("abc")) + Ω(s).Should(Say("abc")) + }) + + It("should abort an eventually", func() { + s := &speaker{ + buffer: NewBuffer(), + } + + s.buffer.Close() + + t := time.Now() + failures := InterceptGomegaFailures(func() { + Eventually(s).Should(Say("def")) + }) + Ω(failures).Should(HaveLen(1)) + Ω(time.Since(t)).Should(BeNumerically("<", 500*time.Millisecond)) + }) + }) +}) diff --git a/vendor/github.com/onsi/gomega/gexec/_fixture/firefly/main.go b/vendor/github.com/onsi/gomega/gexec/_fixture/firefly/main.go new file mode 100644 index 000000000..16091c22b --- /dev/null +++ b/vendor/github.com/onsi/gomega/gexec/_fixture/firefly/main.go @@ -0,0 +1,36 @@ +package main + +import ( + "fmt" + "math/rand" + "os" + "strconv" + "time" +) + +var outQuote = "We've done the impossible, and that makes us mighty." +var errQuote = "Ah, curse your sudden but inevitable betrayal!" + +var randomQuotes = []string{ + "Can we maybe vote on the whole murdering people issue?", + "I swear by my pretty floral bonnet, I will end you.", + "My work's illegal, but at least it's honest.", +} + +func main() { + fmt.Fprintln(os.Stdout, outQuote) + fmt.Fprintln(os.Stderr, errQuote) + + randomIndex := rand.New(rand.NewSource(time.Now().UnixNano())).Intn(len(randomQuotes)) + + time.Sleep(100 * time.Millisecond) + + fmt.Fprintln(os.Stdout, randomQuotes[randomIndex]) + + if len(os.Args) == 2 { + exitCode, _ := strconv.Atoi(os.Args[1]) + os.Exit(exitCode) + } else { + os.Exit(randomIndex) + } +} diff --git a/vendor/github.com/onsi/gomega/gexec/build.go b/vendor/github.com/onsi/gomega/gexec/build.go new file mode 100644 index 000000000..d11b2fd8a --- /dev/null +++ b/vendor/github.com/onsi/gomega/gexec/build.go @@ -0,0 +1,99 @@ +package gexec + +import ( + "errors" + "fmt" + "io/ioutil" + "os" + "os/exec" + "path" + "path/filepath" + "runtime" + "sync" +) + +var ( + mu sync.Mutex + tmpDir string +) + +/* +Build uses go build to compile the package at packagePath. The resulting binary is saved off in a temporary directory. +A path pointing to this binary is returned. + +Build uses the $GOPATH set in your environment. It passes the variadic args on to `go build`. +*/ +func Build(packagePath string, args ...string) (compiledPath string, err error) { + return doBuild(os.Getenv("GOPATH"), packagePath, nil, args...) +} + +/* +BuildWithEnvironment is identical to Build but allows you to specify env vars to be set at build time. +*/ +func BuildWithEnvironment(packagePath string, env []string, args ...string) (compiledPath string, err error) { + return doBuild(os.Getenv("GOPATH"), packagePath, env, args...) +} + +/* +BuildIn is identical to Build but allows you to specify a custom $GOPATH (the first argument). +*/ +func BuildIn(gopath string, packagePath string, args ...string) (compiledPath string, err error) { + return doBuild(gopath, packagePath, nil, args...) +} + +func doBuild(gopath, packagePath string, env []string, args ...string) (compiledPath string, err error) { + tmpDir, err := temporaryDirectory() + if err != nil { + return "", err + } + + if len(gopath) == 0 { + return "", errors.New("$GOPATH not provided when building " + packagePath) + } + + executable := filepath.Join(tmpDir, path.Base(packagePath)) + if runtime.GOOS == "windows" { + executable = executable + ".exe" + } + + cmdArgs := append([]string{"build"}, args...) + cmdArgs = append(cmdArgs, "-o", executable, packagePath) + + build := exec.Command("go", cmdArgs...) + build.Env = append([]string{"GOPATH=" + gopath}, os.Environ()...) + build.Env = append(build.Env, env...) + + output, err := build.CombinedOutput() + if err != nil { + return "", fmt.Errorf("Failed to build %s:\n\nError:\n%s\n\nOutput:\n%s", packagePath, err, string(output)) + } + + return executable, nil +} + +/* +You should call CleanupBuildArtifacts before your test ends to clean up any temporary artifacts generated by +gexec. In Ginkgo this is typically done in an AfterSuite callback. +*/ +func CleanupBuildArtifacts() { + mu.Lock() + defer mu.Unlock() + if tmpDir != "" { + os.RemoveAll(tmpDir) + tmpDir = "" + } +} + +func temporaryDirectory() (string, error) { + var err error + mu.Lock() + defer mu.Unlock() + if tmpDir == "" { + tmpDir, err = ioutil.TempDir("", "gexec_artifacts") + if err != nil { + return "", err + } + } + + return ioutil.TempDir(tmpDir, "g") +} diff --git a/vendor/github.com/onsi/gomega/gexec/build_test.go b/vendor/github.com/onsi/gomega/gexec/build_test.go new file mode 100644 index 000000000..8df0790cd --- /dev/null +++ b/vendor/github.com/onsi/gomega/gexec/build_test.go @@ -0,0 +1,59 @@ +package gexec_test + +import ( + "os" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "github.com/onsi/gomega/gexec" +) + +var packagePath = "./_fixture/firefly" + +var _ = Describe(".Build", func() { + Context("when there have been previous calls to Build", func() { + BeforeEach(func() { + _, err := gexec.Build(packagePath) + Ω(err).ShouldNot(HaveOccurred()) + }) + + It("compiles the specified package", func() { + compiledPath, err := gexec.Build(packagePath) + Ω(err).ShouldNot(HaveOccurred()) + Ω(compiledPath).Should(BeAnExistingFile()) + }) + + Context("and CleanupBuildArtifacts has been called", func() { + BeforeEach(func() { + gexec.CleanupBuildArtifacts() + }) + + It("compiles the specified package", func() { + var err error + fireflyPath, err = gexec.Build(packagePath) + Ω(err).ShouldNot(HaveOccurred()) + Ω(fireflyPath).Should(BeAnExistingFile()) + }) + }) + }) +}) + +var _ = Describe(".BuildWithEnvironment", func() { + var err error + env := []string{ + "GOOS=linux", + "GOARCH=amd64", + } + + It("compiles the specified package with the specified env vars", func() { + compiledPath, err := gexec.BuildWithEnvironment(packagePath, env) + Ω(err).ShouldNot(HaveOccurred()) + Ω(compiledPath).Should(BeAnExistingFile()) + }) + + It("returns the environment to a good state", func() { + _, err = gexec.BuildWithEnvironment(packagePath, env) + Ω(err).ShouldNot(HaveOccurred()) + Ω(os.Environ()).ShouldNot(ContainElement("GOOS=linux")) + }) +}) diff --git a/vendor/github.com/onsi/gomega/gexec/exit_matcher.go b/vendor/github.com/onsi/gomega/gexec/exit_matcher.go new file mode 100644 index 000000000..d872ec8fd --- /dev/null +++ b/vendor/github.com/onsi/gomega/gexec/exit_matcher.go @@ -0,0 +1,86 @@ +package gexec + +import ( + "fmt" + + "github.com/onsi/gomega/format" +) + +/* +The Exit matcher operates on a session: + + Ω(session).Should(Exit()) + +Exit passes if the session has already exited. + +If no status code is provided, then Exit will succeed if the session has exited regardless of exit code. +Otherwise, Exit will only succeed if the process has exited with the provided status code. + +Note that the process must have already exited. To wait for a process to exit, use Eventually: + + Eventually(session, 3).Should(Exit(0)) +*/ +func Exit(optionalExitCode ...int) *exitMatcher { + exitCode := -1 + if len(optionalExitCode) > 0 { + exitCode = optionalExitCode[0] + } + + return &exitMatcher{ + exitCode: exitCode, + } +} + +type exitMatcher struct { + exitCode int + didExit bool + actualExitCode int +} + +type Exiter interface { + ExitCode() int +} + +func (m *exitMatcher) Match(actual interface{}) (success bool, err error) { + exiter, ok := actual.(Exiter) + if !ok { + return false, fmt.Errorf("Exit must be passed a gexec.Exiter (Missing method ExitCode() int) Got:\n%s", format.Object(actual, 1)) + } + + m.actualExitCode = exiter.ExitCode() + + if m.actualExitCode == -1 { + return false, nil + } + + if m.exitCode == -1 { + return true, nil + } + return m.exitCode == m.actualExitCode, nil +} + +func (m *exitMatcher) FailureMessage(actual interface{}) (message string) { + if m.actualExitCode == -1 { + return "Expected process to exit. It did not." + } + return format.Message(m.actualExitCode, "to match exit code:", m.exitCode) +} + +func (m *exitMatcher) NegatedFailureMessage(actual interface{}) (message string) { + if m.actualExitCode == -1 { + return "you really shouldn't be able to see this!" + } else { + if m.exitCode == -1 { + return "Expected process not to exit. It did." + } + return format.Message(m.actualExitCode, "not to match exit code:", m.exitCode) + } +} + +func (m *exitMatcher) MatchMayChangeInTheFuture(actual interface{}) bool { + session, ok := actual.(*Session) + if ok { + return session.ExitCode() == -1 + } + return true +} diff --git a/vendor/github.com/onsi/gomega/gexec/exit_matcher_test.go b/vendor/github.com/onsi/gomega/gexec/exit_matcher_test.go new file mode 100644 index 000000000..79615ddf8 --- /dev/null +++ b/vendor/github.com/onsi/gomega/gexec/exit_matcher_test.go @@ -0,0 +1,113 @@ +package gexec_test + +import ( + . "github.com/onsi/gomega/gexec" + "os/exec" + "time" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +type NeverExits struct{} + +func (e NeverExits) ExitCode() int { + return -1 +} + +var _ = Describe("ExitMatcher", func() { + var command *exec.Cmd + var session *Session + + BeforeEach(func() { + var err error + command = exec.Command(fireflyPath, "0") + session, err = Start(command, nil, nil) + Ω(err).ShouldNot(HaveOccurred()) + }) + + Describe("when passed something that is an Exiter", func() { + It("should act normally", func() { + failures := InterceptGomegaFailures(func() { + Ω(NeverExits{}).Should(Exit()) + }) + + Ω(failures[0]).Should(ContainSubstring("Expected process to exit. It did not.")) + }) + }) + + Describe("when passed something that is not an Exiter", func() { + It("should error", func() { + failures := InterceptGomegaFailures(func() { + Ω("aardvark").Should(Exit()) + }) + + Ω(failures[0]).Should(ContainSubstring("Exit must be passed a gexec.Exiter")) + }) + }) + + Context("with no exit code", func() { + It("should say the right things when it fails", func() { + Ω(session).ShouldNot(Exit()) + + failures := InterceptGomegaFailures(func() { + Ω(session).Should(Exit()) + }) + + Ω(failures[0]).Should(ContainSubstring("Expected process to exit. It did not.")) + + Eventually(session).Should(Exit()) + + Ω(session).Should(Exit()) + + failures = InterceptGomegaFailures(func() { + Ω(session).ShouldNot(Exit()) + }) + + Ω(failures[0]).Should(ContainSubstring("Expected process not to exit. It did.")) + }) + }) + + Context("with an exit code", func() { + It("should say the right things when it fails", func() { + Ω(session).ShouldNot(Exit(0)) + Ω(session).ShouldNot(Exit(1)) + + failures := InterceptGomegaFailures(func() { + Ω(session).Should(Exit(0)) + }) + + Ω(failures[0]).Should(ContainSubstring("Expected process to exit. It did not.")) + + Eventually(session).Should(Exit(0)) + + Ω(session).Should(Exit(0)) + + failures = InterceptGomegaFailures(func() { + Ω(session).Should(Exit(1)) + }) + + Ω(failures[0]).Should(ContainSubstring("to match exit code:")) + + Ω(session).ShouldNot(Exit(1)) + + failures = InterceptGomegaFailures(func() { + Ω(session).ShouldNot(Exit(0)) + }) + + Ω(failures[0]).Should(ContainSubstring("not to match exit code:")) + }) + }) + + Describe("bailing out early", func() { + It("should bail out early once the process exits", func() { + t := time.Now() + + failures := InterceptGomegaFailures(func() { + Eventually(session).Should(Exit(1)) + }) + Ω(time.Since(t)).Should(BeNumerically("<=", 500*time.Millisecond)) + Ω(failures).Should(HaveLen(1)) + }) + }) +}) diff --git a/vendor/github.com/onsi/gomega/gexec/gexec_suite_test.go b/vendor/github.com/onsi/gomega/gexec/gexec_suite_test.go new file mode 100644 index 000000000..87672aafa --- /dev/null +++ b/vendor/github.com/onsi/gomega/gexec/gexec_suite_test.go @@ -0,0 +1,26 @@ +package gexec_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "github.com/onsi/gomega/gexec" + + "testing" +) + +var fireflyPath string + +func TestGexec(t *testing.T) { + BeforeSuite(func() { + var err error + fireflyPath, err = gexec.Build("./_fixture/firefly") + Ω(err).ShouldNot(HaveOccurred()) + }) + + AfterSuite(func() { + gexec.CleanupBuildArtifacts() + }) + + RegisterFailHandler(Fail) + RunSpecs(t, "Gexec Suite") +} diff --git a/vendor/github.com/onsi/gomega/gexec/prefixed_writer.go b/vendor/github.com/onsi/gomega/gexec/prefixed_writer.go new file mode 100644 index 000000000..05e695abc --- /dev/null +++ b/vendor/github.com/onsi/gomega/gexec/prefixed_writer.go @@ -0,0 +1,53 @@ +package gexec + +import ( + "io" + "sync" +) + +/* +PrefixedWriter wraps an io.Writer, emiting the passed in prefix at the beginning of each new line. +This can be useful when running multiple gexec.Sessions concurrently - you can prefix the log output of each +session by passing in a PrefixedWriter: + +gexec.Start(cmd, NewPrefixedWriter("[my-cmd] ", GinkgoWriter), NewPrefixedWriter("[my-cmd] ", GinkgoWriter)) +*/ +type PrefixedWriter struct { + prefix []byte + writer io.Writer + lock *sync.Mutex + atStartOfLine bool +} + +func NewPrefixedWriter(prefix string, writer io.Writer) *PrefixedWriter { + return &PrefixedWriter{ + prefix: []byte(prefix), + writer: writer, + lock: &sync.Mutex{}, + atStartOfLine: true, + } +} + +func (w *PrefixedWriter) Write(b []byte) (int, error) { + w.lock.Lock() + defer w.lock.Unlock() + + toWrite := []byte{} + + for _, c := range b { + if w.atStartOfLine { + toWrite = append(toWrite, w.prefix...) + } + + toWrite = append(toWrite, c) + + w.atStartOfLine = c == '\n' + } + + _, err := w.writer.Write(toWrite) + if err != nil { + return 0, err + } + + return len(b), nil +} diff --git a/vendor/github.com/onsi/gomega/gexec/prefixed_writer_test.go b/vendor/github.com/onsi/gomega/gexec/prefixed_writer_test.go new file mode 100644 index 000000000..8657d0c9d --- /dev/null +++ b/vendor/github.com/onsi/gomega/gexec/prefixed_writer_test.go @@ -0,0 +1,43 @@ +package gexec_test + +import ( + "bytes" + + . "github.com/onsi/gomega/gexec" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +var _ = Describe("PrefixedWriter", func() { + var buffer *bytes.Buffer + var writer *PrefixedWriter + BeforeEach(func() { + buffer = &bytes.Buffer{} + writer = NewPrefixedWriter("[p]", buffer) + }) + + It("should emit the prefix on newlines", func() { + writer.Write([]byte("abc")) + writer.Write([]byte("def\n")) + writer.Write([]byte("hij\n")) + writer.Write([]byte("\n\n")) + writer.Write([]byte("klm\n\nnop")) + writer.Write([]byte("")) + writer.Write([]byte("qrs")) + writer.Write([]byte("\ntuv\nwx")) + writer.Write([]byte("yz\n\n")) + + Ω(buffer.String()).Should(Equal(`[p]abcdef +[p]hij +[p] +[p] +[p]klm +[p] +[p]nopqrs +[p]tuv +[p]wxyz +[p] +`)) + }) +}) diff --git a/vendor/github.com/onsi/gomega/gexec/session.go b/vendor/github.com/onsi/gomega/gexec/session.go new file mode 100644 index 000000000..387a72cde --- /dev/null +++ b/vendor/github.com/onsi/gomega/gexec/session.go @@ -0,0 +1,305 @@ +/* +Package gexec provides support for testing external processes. +*/ +package gexec + +import ( + "io" + "os" + "os/exec" + "reflect" + "sync" + "syscall" + + . "github.com/onsi/gomega" + "github.com/onsi/gomega/gbytes" +) + +const INVALID_EXIT_CODE = 254 + +type Session struct { + //The wrapped command + Command *exec.Cmd + + //A *gbytes.Buffer connected to the command's stdout + Out *gbytes.Buffer + + //A *gbytes.Buffer connected to the command's stderr + Err *gbytes.Buffer + + //A channel that will close when the command exits + Exited <-chan struct{} + + lock *sync.Mutex + exitCode int +} + +/* +Start starts the passed-in *exec.Cmd command. It wraps the command in a *gexec.Session. + +The session pipes the command's stdout and stderr to two *gbytes.Buffers available as properties on the session: session.Out and session.Err. +These buffers can be used with the gbytes.Say matcher to match against unread output: + + Ω(session.Out).Should(gbytes.Say("foo-out")) + Ω(session.Err).Should(gbytes.Say("foo-err")) + +In addition, Session satisfies the gbytes.BufferProvider interface and provides the stdout *gbytes.Buffer. This allows you to replace the first line, above, with: + + Ω(session).Should(gbytes.Say("foo-out")) + +When outWriter and/or errWriter are non-nil, the session will pipe stdout and/or stderr output both into the session *gybtes.Buffers and to the passed-in outWriter/errWriter. +This is useful for capturing the process's output or logging it to screen. In particular, when using Ginkgo it can be convenient to direct output to the GinkgoWriter: + + session, err := Start(command, GinkgoWriter, GinkgoWriter) + +This will log output when running tests in verbose mode, but - otherwise - will only log output when a test fails. + +The session wrapper is responsible for waiting on the *exec.Cmd command. You *should not* call command.Wait() yourself. +Instead, to assert that the command has exited you can use the gexec.Exit matcher: + + Ω(session).Should(gexec.Exit()) + +When the session exits it closes the stdout and stderr gbytes buffers. This will short circuit any +Eventuallys waiting for the buffers to Say something. +*/ +func Start(command *exec.Cmd, outWriter io.Writer, errWriter io.Writer) (*Session, error) { + exited := make(chan struct{}) + + session := &Session{ + Command: command, + Out: gbytes.NewBuffer(), + Err: gbytes.NewBuffer(), + Exited: exited, + lock: &sync.Mutex{}, + exitCode: -1, + } + + var commandOut, commandErr io.Writer + + commandOut, commandErr = session.Out, session.Err + + if outWriter != nil && !reflect.ValueOf(outWriter).IsNil() { + commandOut = io.MultiWriter(commandOut, outWriter) + } + + if errWriter != nil && !reflect.ValueOf(errWriter).IsNil() { + commandErr = io.MultiWriter(commandErr, errWriter) + } + + command.Stdout = commandOut + command.Stderr = commandErr + + err := command.Start() + if err == nil { + go session.monitorForExit(exited) + trackedSessionsMutex.Lock() + defer trackedSessionsMutex.Unlock() + trackedSessions = append(trackedSessions, session) + } + + return session, err +} + +/* +Buffer implements the gbytes.BufferProvider interface and returns s.Out +This allows you to make gbytes.Say matcher assertions against stdout without having to reference .Out: + + Eventually(session).Should(gbytes.Say("foo")) +*/ +func (s *Session) Buffer() *gbytes.Buffer { + return s.Out +} + +/* +ExitCode returns the wrapped command's exit code. If the command hasn't exited yet, ExitCode returns -1. + +To assert that the command has exited it is more convenient to use the Exit matcher: + + Eventually(s).Should(gexec.Exit()) + +When the process exits because it has received a particular signal, the exit code will be 128+signal-value +(See http://www.tldp.org/LDP/abs/html/exitcodes.html and http://man7.org/linux/man-pages/man7/signal.7.html) + +*/ +func (s *Session) ExitCode() int { + s.lock.Lock() + defer s.lock.Unlock() + return s.exitCode +} + +/* +Wait waits until the wrapped command exits. It can be passed an optional timeout. +If the command does not exit within the timeout, Wait will trigger a test failure. + +Wait returns the session, making it possible to chain: + + session.Wait().Out.Contents() + +will wait for the command to exit then return the entirety of Out's contents. + +Wait uses eventually under the hood and accepts the same timeout/polling intervals that eventually does. +*/ +func (s *Session) Wait(timeout ...interface{}) *Session { + EventuallyWithOffset(1, s, timeout...).Should(Exit()) + return s +} + +/* +Kill sends the running command a SIGKILL signal. It does not wait for the process to exit. + +If the command has already exited, Kill returns silently. + +The session is returned to enable chaining. +*/ +func (s *Session) Kill() *Session { + if s.ExitCode() != -1 { + return s + } + s.Command.Process.Kill() + return s +} + +/* +Interrupt sends the running command a SIGINT signal. It does not wait for the process to exit. + +If the command has already exited, Interrupt returns silently. + +The session is returned to enable chaining. +*/ +func (s *Session) Interrupt() *Session { + return s.Signal(syscall.SIGINT) +} + +/* +Terminate sends the running command a SIGTERM signal. It does not wait for the process to exit. + +If the command has already exited, Terminate returns silently. + +The session is returned to enable chaining. +*/ +func (s *Session) Terminate() *Session { + return s.Signal(syscall.SIGTERM) +} + +/* +Signal sends the running command the passed in signal. It does not wait for the process to exit. + +If the command has already exited, Signal returns silently. + +The session is returned to enable chaining. +*/ +func (s *Session) Signal(signal os.Signal) *Session { + if s.ExitCode() != -1 { + return s + } + s.Command.Process.Signal(signal) + return s +} + +func (s *Session) monitorForExit(exited chan<- struct{}) { + err := s.Command.Wait() + s.lock.Lock() + s.Out.Close() + s.Err.Close() + status := s.Command.ProcessState.Sys().(syscall.WaitStatus) + if status.Signaled() { + s.exitCode = 128 + int(status.Signal()) + } else { + exitStatus := status.ExitStatus() + if exitStatus == -1 && err != nil { + s.exitCode = INVALID_EXIT_CODE + } + s.exitCode = exitStatus + } + s.lock.Unlock() + + close(exited) +} + +var trackedSessions = []*Session{} +var trackedSessionsMutex = &sync.Mutex{} + +/* +Kill sends a SIGKILL signal to all the processes started by Run, and waits for them to exit. +The timeout specified is applied to each process killed. + +If any of the processes already exited, KillAndWait returns silently. +*/ +func KillAndWait(timeout ...interface{}) { + trackedSessionsMutex.Lock() + defer trackedSessionsMutex.Unlock() + for _, session := range trackedSessions { + session.Kill().Wait(timeout...) + } + trackedSessions = []*Session{} +} + +/* +Kill sends a SIGTERM signal to all the processes started by Run, and waits for them to exit. +The timeout specified is applied to each process killed. + +If any of the processes already exited, TerminateAndWait returns silently. +*/ +func TerminateAndWait(timeout ...interface{}) { + trackedSessionsMutex.Lock() + defer trackedSessionsMutex.Unlock() + for _, session := range trackedSessions { + session.Terminate().Wait(timeout...) + } +} + +/* +Kill sends a SIGKILL signal to all the processes started by Run. +It does not wait for the processes to exit. + +If any of the processes already exited, Kill returns silently. +*/ +func Kill() { + trackedSessionsMutex.Lock() + defer trackedSessionsMutex.Unlock() + for _, session := range trackedSessions { + session.Kill() + } +} + +/* +Terminate sends a SIGTERM signal to all the processes started by Run. +It does not wait for the processes to exit. + +If any of the processes already exited, Terminate returns silently. +*/ +func Terminate() { + trackedSessionsMutex.Lock() + defer trackedSessionsMutex.Unlock() + for _, session := range trackedSessions { + session.Terminate() + } +} + +/* +Signal sends the passed in signal to all the processes started by Run. +It does not wait for the processes to exit. + +If any of the processes already exited, Signal returns silently. +*/ +func Signal(signal os.Signal) { + trackedSessionsMutex.Lock() + defer trackedSessionsMutex.Unlock() + for _, session := range trackedSessions { + session.Signal(signal) + } +} + +/* +Interrupt sends the SIGINT signal to all the processes started by Run. +It does not wait for the processes to exit. + +If any of the processes already exited, Interrupt returns silently. +*/ +func Interrupt() { + trackedSessionsMutex.Lock() + defer trackedSessionsMutex.Unlock() + for _, session := range trackedSessions { + session.Interrupt() + } +} diff --git a/vendor/github.com/onsi/gomega/gexec/session_test.go b/vendor/github.com/onsi/gomega/gexec/session_test.go new file mode 100644 index 000000000..b7841a090 --- /dev/null +++ b/vendor/github.com/onsi/gomega/gexec/session_test.go @@ -0,0 +1,351 @@ +package gexec_test + +import ( + "os/exec" + "syscall" + "time" + + . "github.com/onsi/gomega/gbytes" + . "github.com/onsi/gomega/gexec" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +var _ = Describe("Session", func() { + var command *exec.Cmd + var session *Session + + var outWriter, errWriter *Buffer + + BeforeEach(func() { + outWriter = nil + errWriter = nil + }) + + JustBeforeEach(func() { + command = exec.Command(fireflyPath) + var err error + session, err = Start(command, outWriter, errWriter) + Ω(err).ShouldNot(HaveOccurred()) + }) + + Context("running a command", func() { + It("should start the process", func() { + Ω(command.Process).ShouldNot(BeNil()) + }) + + It("should wrap the process's stdout and stderr with gbytes buffers", func(done Done) { + Eventually(session.Out).Should(Say("We've done the impossible, and that makes us mighty")) + Eventually(session.Err).Should(Say("Ah, curse your sudden but inevitable betrayal!")) + defer session.Out.CancelDetects() + + select { + case <-session.Out.Detect("Can we maybe vote on the whole murdering people issue"): + Eventually(session).Should(Exit(0)) + case <-session.Out.Detect("I swear by my pretty floral bonnet, I will end you."): + Eventually(session).Should(Exit(1)) + case <-session.Out.Detect("My work's illegal, but at least it's honest."): + Eventually(session).Should(Exit(2)) + } + + close(done) + }) + + It("should satisfy the gbytes.BufferProvider interface, passing Stdout", func() { + Eventually(session).Should(Say("We've done the impossible, and that makes us mighty")) + Eventually(session).Should(Exit()) + }) + }) + + Describe("providing the exit code", func() { + It("should provide the app's exit code", func() { + Ω(session.ExitCode()).Should(Equal(-1)) + + Eventually(session).Should(Exit()) + Ω(session.ExitCode()).Should(BeNumerically(">=", 0)) + Ω(session.ExitCode()).Should(BeNumerically("<", 3)) + }) + }) + + Describe("wait", func() { + It("should wait till the command exits", func() { + Ω(session.ExitCode()).Should(Equal(-1)) + Ω(session.Wait().ExitCode()).Should(BeNumerically(">=", 0)) + Ω(session.Wait().ExitCode()).Should(BeNumerically("<", 3)) + }) + }) + + Describe("exited", func() { + It("should close when the command exits", func() { + Eventually(session.Exited).Should(BeClosed()) + Ω(session.ExitCode()).ShouldNot(Equal(-1)) + }) + }) + + Describe("kill", func() { + It("should kill the command and don't wait for it to exit", func() { + session, err := Start(exec.Command("sleep", "10000000"), GinkgoWriter, GinkgoWriter) + Ω(err).ShouldNot(HaveOccurred()) + + session.Kill() + Ω(session).ShouldNot(Exit(), "Should not exit immediately...") + Eventually(session).Should(Exit(128 + 9)) + }) + }) + + Describe("interrupt", func() { + It("should interrupt the command", func() { + session, err := Start(exec.Command("sleep", "10000000"), GinkgoWriter, GinkgoWriter) + Ω(err).ShouldNot(HaveOccurred()) + + session.Interrupt() + Ω(session).ShouldNot(Exit(), "Should not exit immediately...") + Eventually(session).Should(Exit(128 + 2)) + }) + }) + + Describe("terminate", func() { + It("should terminate the command", func() { + session, err := Start(exec.Command("sleep", "10000000"), GinkgoWriter, GinkgoWriter) + Ω(err).ShouldNot(HaveOccurred()) + + session.Terminate() + Ω(session).ShouldNot(Exit(), "Should not exit immediately...") + Eventually(session).Should(Exit(128 + 15)) + }) + }) + + Describe("signal", func() { + It("should send the signal to the command", func() { + session, err := Start(exec.Command("sleep", "10000000"), GinkgoWriter, GinkgoWriter) + Ω(err).ShouldNot(HaveOccurred()) + + session.Signal(syscall.SIGABRT) + Ω(session).ShouldNot(Exit(), "Should not exit immediately...") + Eventually(session).Should(Exit(128 + 6)) + }) + }) + + Context("tracking sessions", func() { + BeforeEach(func() { + KillAndWait() + }) + + Describe("kill", func() { + It("should kill all the started sessions", func() { + session1, err := Start(exec.Command("sleep", "10000000"), GinkgoWriter, GinkgoWriter) + Ω(err).ShouldNot(HaveOccurred()) + + session2, err := Start(exec.Command("sleep", "10000000"), GinkgoWriter, GinkgoWriter) + Ω(err).ShouldNot(HaveOccurred()) + + session3, err := Start(exec.Command("sleep", "10000000"), GinkgoWriter, GinkgoWriter) + Ω(err).ShouldNot(HaveOccurred()) + + Kill() + + Eventually(session1).Should(Exit(128 + 9)) + Eventually(session2).Should(Exit(128 + 9)) + Eventually(session3).Should(Exit(128 + 9)) + }) + + It("should not wait for exit", func() { + session1, err := Start(exec.Command("sleep", "10000000"), GinkgoWriter, GinkgoWriter) + Ω(err).ShouldNot(HaveOccurred()) + + Kill() + Ω(session1).ShouldNot(Exit(), "Should not exit immediately...") + + Eventually(session1).Should(Exit(128 + 9)) + }) + + It("should not track unstarted sessions", func() { + _, err := Start(exec.Command("does not exist", "10000000"), GinkgoWriter, GinkgoWriter) + Ω(err).Should(HaveOccurred()) + + session2, err := Start(exec.Command("sleep", "10000000"), GinkgoWriter, GinkgoWriter) + Ω(err).ShouldNot(HaveOccurred()) + + session3, err := Start(exec.Command("sleep", "10000000"), GinkgoWriter, GinkgoWriter) + Ω(err).ShouldNot(HaveOccurred()) + + Kill() + + Eventually(session2).Should(Exit(128 + 9)) + Eventually(session3).Should(Exit(128 + 9)) + }) + + }) + + Describe("killAndWait", func() { + It("should kill all the started sessions and wait for them to finish", func() { + session1, err := Start(exec.Command("sleep", "10000000"), GinkgoWriter, GinkgoWriter) + Ω(err).ShouldNot(HaveOccurred()) + + session2, err := Start(exec.Command("sleep", "10000000"), GinkgoWriter, GinkgoWriter) + Ω(err).ShouldNot(HaveOccurred()) + + session3, err := Start(exec.Command("sleep", "10000000"), GinkgoWriter, GinkgoWriter) + Ω(err).ShouldNot(HaveOccurred()) + + KillAndWait() + Ω(session1).Should(Exit(128+9), "Should have exited") + Ω(session2).Should(Exit(128+9), "Should have exited") + Ω(session3).Should(Exit(128+9), "Should have exited") + }) + }) + + Describe("terminate", func() { + It("should terminate all the started sessions", func() { + session1, err := Start(exec.Command("sleep", "10000000"), GinkgoWriter, GinkgoWriter) + Ω(err).ShouldNot(HaveOccurred()) + + session2, err := Start(exec.Command("sleep", "10000000"), GinkgoWriter, GinkgoWriter) + Ω(err).ShouldNot(HaveOccurred()) + + session3, err := Start(exec.Command("sleep", "10000000"), GinkgoWriter, GinkgoWriter) + Ω(err).ShouldNot(HaveOccurred()) + + Terminate() + + Eventually(session1).Should(Exit(128 + 15)) + Eventually(session2).Should(Exit(128 + 15)) + Eventually(session3).Should(Exit(128 + 15)) + }) + + It("should not wait for exit", func() { + session1, err := Start(exec.Command("sleep", "10000000"), GinkgoWriter, GinkgoWriter) + Ω(err).ShouldNot(HaveOccurred()) + + Terminate() + + Ω(session1).ShouldNot(Exit(), "Should not exit immediately...") + }) + }) + + Describe("terminateAndWait", func() { + It("should terminate all the started sessions, and wait for them to exit", func() { + session1, err := Start(exec.Command("sleep", "10000000"), GinkgoWriter, GinkgoWriter) + Ω(err).ShouldNot(HaveOccurred()) + + session2, err := Start(exec.Command("sleep", "10000000"), GinkgoWriter, GinkgoWriter) + Ω(err).ShouldNot(HaveOccurred()) + + session3, err := Start(exec.Command("sleep", "10000000"), GinkgoWriter, GinkgoWriter) + Ω(err).ShouldNot(HaveOccurred()) + + TerminateAndWait() + + Ω(session1).Should(Exit(128+15), "Should have exited") + Ω(session2).Should(Exit(128+15), "Should have exited") + Ω(session3).Should(Exit(128+15), "Should have exited") + }) + }) + + Describe("signal", func() { + It("should signal all the started sessions", func() { + session1, err := Start(exec.Command("sleep", "10000000"), GinkgoWriter, GinkgoWriter) + Ω(err).ShouldNot(HaveOccurred()) + + session2, err := Start(exec.Command("sleep", "10000000"), GinkgoWriter, GinkgoWriter) + Ω(err).ShouldNot(HaveOccurred()) + + session3, err := Start(exec.Command("sleep", "10000000"), GinkgoWriter, GinkgoWriter) + Ω(err).ShouldNot(HaveOccurred()) + + Signal(syscall.SIGABRT) + + Eventually(session1).Should(Exit(128 + 6)) + Eventually(session2).Should(Exit(128 + 6)) + Eventually(session3).Should(Exit(128 + 6)) + }) + + It("should not wait", func() { + session1, err := Start(exec.Command("sleep", "10000000"), GinkgoWriter, GinkgoWriter) + Ω(err).ShouldNot(HaveOccurred()) + + Signal(syscall.SIGABRT) + + Ω(session1).ShouldNot(Exit(), "Should not exit immediately...") + }) + }) + + Describe("interrupt", func() { + It("should interrupt all the started sessions, and not wait", func() { + session1, err := Start(exec.Command("sleep", "10000000"), GinkgoWriter, GinkgoWriter) + Ω(err).ShouldNot(HaveOccurred()) + + session2, err := Start(exec.Command("sleep", "10000000"), GinkgoWriter, GinkgoWriter) + Ω(err).ShouldNot(HaveOccurred()) + + session3, err := Start(exec.Command("sleep", "10000000"), GinkgoWriter, GinkgoWriter) + Ω(err).ShouldNot(HaveOccurred()) + + Interrupt() + + Eventually(session1).Should(Exit(128 + 2)) + Eventually(session2).Should(Exit(128 + 2)) + Eventually(session3).Should(Exit(128 + 2)) + }) + + It("should not wait", func() { + session1, err := Start(exec.Command("sleep", "10000000"), GinkgoWriter, GinkgoWriter) + Ω(err).ShouldNot(HaveOccurred()) + + Interrupt() + + Ω(session1).ShouldNot(Exit(), "Should not exit immediately...") + }) + }) + }) + + Context("when the command exits", func() { + It("should close the buffers", func() { + Eventually(session).Should(Exit()) + + Ω(session.Out.Closed()).Should(BeTrue()) + Ω(session.Err.Closed()).Should(BeTrue()) + + Ω(session.Out).Should(Say("We've done the impossible, and that makes us mighty")) + }) + + var So = It + + So("this means that eventually should short circuit", func() { + t := time.Now() + failures := InterceptGomegaFailures(func() { + Eventually(session).Should(Say("blah blah blah blah blah")) + }) + Ω(time.Since(t)).Should(BeNumerically("<=", 500*time.Millisecond)) + Ω(failures).Should(HaveLen(1)) + }) + }) + + Context("when wrapping out and err", func() { + BeforeEach(func() { + outWriter = NewBuffer() + errWriter = NewBuffer() + }) + + It("should route to both the provided writers and the gbytes buffers", func() { + Eventually(session.Out).Should(Say("We've done the impossible, and that makes us mighty")) + Eventually(session.Err).Should(Say("Ah, curse your sudden but inevitable betrayal!")) + + Ω(outWriter.Contents()).Should(ContainSubstring("We've done the impossible, and that makes us mighty")) + Ω(errWriter.Contents()).Should(ContainSubstring("Ah, curse your sudden but inevitable betrayal!")) + + Eventually(session).Should(Exit()) + + Ω(outWriter.Contents()).Should(Equal(session.Out.Contents())) + Ω(errWriter.Contents()).Should(Equal(session.Err.Contents())) + }) + }) + + Describe("when the command fails to start", func() { + It("should return an error", func() { + _, err := Start(exec.Command("agklsjdfas"), nil, nil) + Ω(err).Should(HaveOccurred()) + }) + }) +}) diff --git a/vendor/github.com/onsi/gomega/ghttp/handlers.go b/vendor/github.com/onsi/gomega/ghttp/handlers.go new file mode 100644 index 000000000..63ff6919a --- /dev/null +++ b/vendor/github.com/onsi/gomega/ghttp/handlers.go @@ -0,0 +1,313 @@ +package ghttp + +import ( + "encoding/base64" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "net/url" + "reflect" + + "github.com/golang/protobuf/proto" + . "github.com/onsi/gomega" + "github.com/onsi/gomega/types" +) + +//CombineHandler takes variadic list of handlers and produces one handler +//that calls each handler in order. +func CombineHandlers(handlers ...http.HandlerFunc) http.HandlerFunc { + return func(w http.ResponseWriter, req *http.Request) { + for _, handler := range handlers { + handler(w, req) + } + } +} + +//VerifyRequest returns a handler that verifies that a request uses the specified method to connect to the specified path +//You may also pass in an optional rawQuery string which is tested against the request's `req.URL.RawQuery` +// +//For path, you may pass in a string, in which case strict equality will be applied +//Alternatively you can pass in a matcher (ContainSubstring("/foo") and MatchRegexp("/foo/[a-f0-9]+") for example) +func VerifyRequest(method string, path interface{}, rawQuery ...string) http.HandlerFunc { + return func(w http.ResponseWriter, req *http.Request) { + Ω(req.Method).Should(Equal(method), "Method mismatch") + switch p := path.(type) { + case types.GomegaMatcher: + Ω(req.URL.Path).Should(p, "Path mismatch") + default: + Ω(req.URL.Path).Should(Equal(path), "Path mismatch") + } + if len(rawQuery) > 0 { + values, err := url.ParseQuery(rawQuery[0]) + Ω(err).ShouldNot(HaveOccurred(), "Expected RawQuery is malformed") + + Ω(req.URL.Query()).Should(Equal(values), "RawQuery mismatch") + } + } +} + +//VerifyContentType returns a handler that verifies that a request has a Content-Type header set to the +//specified value +func VerifyContentType(contentType string) http.HandlerFunc { + return func(w http.ResponseWriter, req *http.Request) { + Ω(req.Header.Get("Content-Type")).Should(Equal(contentType)) + } +} + +//VerifyBasicAuth returns a handler that verifies the request contains a BasicAuth Authorization header +//matching the passed in username and password +func VerifyBasicAuth(username string, password string) http.HandlerFunc { + return func(w http.ResponseWriter, req *http.Request) { + auth := req.Header.Get("Authorization") + Ω(auth).ShouldNot(Equal(""), "Authorization header must be specified") + + decoded, err := base64.StdEncoding.DecodeString(auth[6:]) + Ω(err).ShouldNot(HaveOccurred()) + + Ω(string(decoded)).Should(Equal(fmt.Sprintf("%s:%s", username, password)), "Authorization mismatch") + } +} + +//VerifyHeader returns a handler that verifies the request contains the passed in headers. +//The passed in header keys are first canonicalized via http.CanonicalHeaderKey. +// +//The request must contain *all* the passed in headers, but it is allowed to have additional headers +//beyond the passed in set. +func VerifyHeader(header http.Header) http.HandlerFunc { + return func(w http.ResponseWriter, req *http.Request) { + for key, values := range header { + key = http.CanonicalHeaderKey(key) + Ω(req.Header[key]).Should(Equal(values), "Header mismatch for key: %s", key) + } + } +} + +//VerifyHeaderKV returns a handler that verifies the request contains a header matching the passed in key and values +//(recall that a `http.Header` is a mapping from string (key) to []string (values)) +//It is a convenience wrapper around `VerifyHeader` that allows you to avoid having to create an `http.Header` object. +func VerifyHeaderKV(key string, values ...string) http.HandlerFunc { + return VerifyHeader(http.Header{key: values}) +} + +//VerifyBody returns a handler that verifies that the body of the request matches the passed in byte array. +//It does this using Equal(). +func VerifyBody(expectedBody []byte) http.HandlerFunc { + return CombineHandlers( + func(w http.ResponseWriter, req *http.Request) { + body, err := ioutil.ReadAll(req.Body) + req.Body.Close() + Ω(err).ShouldNot(HaveOccurred()) + Ω(body).Should(Equal(expectedBody), "Body Mismatch") + }, + ) +} + +//VerifyJSON returns a handler that verifies that the body of the request is a valid JSON representation +//matching the passed in JSON string. It does this using Gomega's MatchJSON method +// +//VerifyJSON also verifies that the request's content type is application/json +func VerifyJSON(expectedJSON string) http.HandlerFunc { + return CombineHandlers( + VerifyContentType("application/json"), + func(w http.ResponseWriter, req *http.Request) { + body, err := ioutil.ReadAll(req.Body) + req.Body.Close() + Ω(err).ShouldNot(HaveOccurred()) + Ω(body).Should(MatchJSON(expectedJSON), "JSON Mismatch") + }, + ) +} + +//VerifyJSONRepresenting is similar to VerifyJSON. Instead of taking a JSON string, however, it +//takes an arbitrary JSON-encodable object and verifies that the requests's body is a JSON representation +//that matches the object +func VerifyJSONRepresenting(object interface{}) http.HandlerFunc { + data, err := json.Marshal(object) + Ω(err).ShouldNot(HaveOccurred()) + return CombineHandlers( + VerifyContentType("application/json"), + VerifyJSON(string(data)), + ) +} + +//VerifyForm returns a handler that verifies a request contains the specified form values. +// +//The request must contain *all* of the specified values, but it is allowed to have additional +//form values beyond the passed in set. +func VerifyForm(values url.Values) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + err := r.ParseForm() + Ω(err).ShouldNot(HaveOccurred()) + for key, vals := range values { + Ω(r.Form[key]).Should(Equal(vals), "Form mismatch for key: %s", key) + } + } +} + +//VerifyFormKV returns a handler that verifies a request contains a form key with the specified values. +// +//It is a convenience wrapper around `VerifyForm` that lets you avoid having to create a `url.Values` object. +func VerifyFormKV(key string, values ...string) http.HandlerFunc { + return VerifyForm(url.Values{key: values}) +} + +//VerifyProtoRepresenting returns a handler that verifies that the body of the request is a valid protobuf +//representation of the passed message. +// +//VerifyProtoRepresenting also verifies that the request's content type is application/x-protobuf +func VerifyProtoRepresenting(expected proto.Message) http.HandlerFunc { + return CombineHandlers( + VerifyContentType("application/x-protobuf"), + func(w http.ResponseWriter, req *http.Request) { + body, err := ioutil.ReadAll(req.Body) + Ω(err).ShouldNot(HaveOccurred()) + req.Body.Close() + + expectedType := reflect.TypeOf(expected) + actualValuePtr := reflect.New(expectedType.Elem()) + + actual, ok := actualValuePtr.Interface().(proto.Message) + Ω(ok).Should(BeTrue(), "Message value is not a proto.Message") + + err = proto.Unmarshal(body, actual) + Ω(err).ShouldNot(HaveOccurred(), "Failed to unmarshal protobuf") + + Ω(actual).Should(Equal(expected), "ProtoBuf Mismatch") + }, + ) +} + +func copyHeader(src http.Header, dst http.Header) { + for key, value := range src { + dst[key] = value + } +} + +/* +RespondWith returns a handler that responds to a request with the specified status code and body + +Body may be a string or []byte + +Also, RespondWith can be given an optional http.Header. The headers defined therein will be added to the response headers. +*/ +func RespondWith(statusCode int, body interface{}, optionalHeader ...http.Header) http.HandlerFunc { + return func(w http.ResponseWriter, req *http.Request) { + if len(optionalHeader) == 1 { + copyHeader(optionalHeader[0], w.Header()) + } + w.WriteHeader(statusCode) + switch x := body.(type) { + case string: + w.Write([]byte(x)) + case []byte: + w.Write(x) + default: + Ω(body).Should(BeNil(), "Invalid type for body. Should be string or []byte.") + } + } +} + +/* +RespondWithPtr returns a handler that responds to a request with the specified status code and body + +Unlike RespondWith, you pass RepondWithPtr a pointer to the status code and body allowing different tests +to share the same setup but specify different status codes and bodies. + +Also, RespondWithPtr can be given an optional http.Header. The headers defined therein will be added to the response headers. +Since the http.Header can be mutated after the fact you don't need to pass in a pointer. +*/ +func RespondWithPtr(statusCode *int, body interface{}, optionalHeader ...http.Header) http.HandlerFunc { + return func(w http.ResponseWriter, req *http.Request) { + if len(optionalHeader) == 1 { + copyHeader(optionalHeader[0], w.Header()) + } + w.WriteHeader(*statusCode) + if body != nil { + switch x := (body).(type) { + case *string: + w.Write([]byte(*x)) + case *[]byte: + w.Write(*x) + default: + Ω(body).Should(BeNil(), "Invalid type for body. Should be string or []byte.") + } + } + } +} + +/* +RespondWithJSONEncoded returns a handler that responds to a request with the specified status code and a body +containing the JSON-encoding of the passed in object + +Also, RespondWithJSONEncoded can be given an optional http.Header. The headers defined therein will be added to the response headers. +*/ +func RespondWithJSONEncoded(statusCode int, object interface{}, optionalHeader ...http.Header) http.HandlerFunc { + data, err := json.Marshal(object) + Ω(err).ShouldNot(HaveOccurred()) + + var headers http.Header + if len(optionalHeader) == 1 { + headers = optionalHeader[0] + } else { + headers = make(http.Header) + } + if _, found := headers["Content-Type"]; !found { + headers["Content-Type"] = []string{"application/json"} + } + return RespondWith(statusCode, string(data), headers) +} + +/* +RespondWithJSONEncodedPtr behaves like RespondWithJSONEncoded but takes a pointer +to a status code and object. + +This allows different tests to share the same setup but specify different status codes and JSON-encoded +objects. + +Also, RespondWithJSONEncodedPtr can be given an optional http.Header. The headers defined therein will be added to the response headers. +Since the http.Header can be mutated after the fact you don't need to pass in a pointer. +*/ +func RespondWithJSONEncodedPtr(statusCode *int, object interface{}, optionalHeader ...http.Header) http.HandlerFunc { + return func(w http.ResponseWriter, req *http.Request) { + data, err := json.Marshal(object) + Ω(err).ShouldNot(HaveOccurred()) + var headers http.Header + if len(optionalHeader) == 1 { + headers = optionalHeader[0] + } else { + headers = make(http.Header) + } + if _, found := headers["Content-Type"]; !found { + headers["Content-Type"] = []string{"application/json"} + } + copyHeader(headers, w.Header()) + w.WriteHeader(*statusCode) + w.Write(data) + } +} + +//RespondWithProto returns a handler that responds to a request with the specified status code and a body +//containing the protobuf serialization of the provided message. +// +//Also, RespondWithProto can be given an optional http.Header. The headers defined therein will be added to the response headers. +func RespondWithProto(statusCode int, message proto.Message, optionalHeader ...http.Header) http.HandlerFunc { + return func(w http.ResponseWriter, req *http.Request) { + data, err := proto.Marshal(message) + Ω(err).ShouldNot(HaveOccurred()) + + var headers http.Header + if len(optionalHeader) == 1 { + headers = optionalHeader[0] + } else { + headers = make(http.Header) + } + if _, found := headers["Content-Type"]; !found { + headers["Content-Type"] = []string{"application/x-protobuf"} + } + copyHeader(headers, w.Header()) + + w.WriteHeader(statusCode) + w.Write(data) + } +} diff --git a/vendor/github.com/onsi/gomega/ghttp/protobuf/protobuf.go b/vendor/github.com/onsi/gomega/ghttp/protobuf/protobuf.go new file mode 100644 index 000000000..b2972bc9f --- /dev/null +++ b/vendor/github.com/onsi/gomega/ghttp/protobuf/protobuf.go @@ -0,0 +1,3 @@ +package protobuf + +//go:generate protoc --go_out=. simple_message.proto diff --git a/vendor/github.com/onsi/gomega/ghttp/protobuf/simple_message.pb.go b/vendor/github.com/onsi/gomega/ghttp/protobuf/simple_message.pb.go new file mode 100644 index 000000000..c55a48448 --- /dev/null +++ b/vendor/github.com/onsi/gomega/ghttp/protobuf/simple_message.pb.go @@ -0,0 +1,55 @@ +// Code generated by protoc-gen-go. +// source: simple_message.proto +// DO NOT EDIT! + +/* +Package protobuf is a generated protocol buffer package. + +It is generated from these files: + simple_message.proto + +It has these top-level messages: + SimpleMessage +*/ +package protobuf + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +type SimpleMessage struct { + Description *string `protobuf:"bytes,1,req,name=description" json:"description,omitempty"` + Id *int32 `protobuf:"varint,2,req,name=id" json:"id,omitempty"` + Metadata *string `protobuf:"bytes,3,opt,name=metadata" json:"metadata,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *SimpleMessage) Reset() { *m = SimpleMessage{} } +func (m *SimpleMessage) String() string { return proto.CompactTextString(m) } +func (*SimpleMessage) ProtoMessage() {} + +func (m *SimpleMessage) GetDescription() string { + if m != nil && m.Description != nil { + return *m.Description + } + return "" +} + +func (m *SimpleMessage) GetId() int32 { + if m != nil && m.Id != nil { + return *m.Id + } + return 0 +} + +func (m *SimpleMessage) GetMetadata() string { + if m != nil && m.Metadata != nil { + return *m.Metadata + } + return "" +} diff --git a/vendor/github.com/onsi/gomega/ghttp/protobuf/simple_message.proto b/vendor/github.com/onsi/gomega/ghttp/protobuf/simple_message.proto new file mode 100644 index 000000000..35b7145c2 --- /dev/null +++ b/vendor/github.com/onsi/gomega/ghttp/protobuf/simple_message.proto @@ -0,0 +1,9 @@ +syntax = "proto2"; + +package protobuf; + +message SimpleMessage { + required string description = 1; + required int32 id = 2; + optional string metadata = 3; +} diff --git a/vendor/github.com/onsi/gomega/ghttp/test_server.go b/vendor/github.com/onsi/gomega/ghttp/test_server.go new file mode 100644 index 000000000..40d92dea2 --- /dev/null +++ b/vendor/github.com/onsi/gomega/ghttp/test_server.go @@ -0,0 +1,381 @@ +/* +Package ghttp supports testing HTTP clients by providing a test server (simply a thin wrapper around httptest's server) that supports +registering multiple handlers. Incoming requests are not routed between the different handlers +- rather it is merely the order of the handlers that matters. The first request is handled by the first +registered handler, the second request by the second handler, etc. + +The intent here is to have each handler *verify* that the incoming request is valid. To accomplish, ghttp +also provides a collection of bite-size handlers that each perform one aspect of request verification. These can +be composed together and registered with a ghttp server. The result is an expressive language for describing +the requests generated by the client under test. + +Here's a simple example, note that the server handler is only defined in one BeforeEach and then modified, as required, by the nested BeforeEaches. +A more comprehensive example is available at https://onsi.github.io/gomega/#_testing_http_clients + + var _ = Describe("A Sprockets Client", func() { + var server *ghttp.Server + var client *SprocketClient + BeforeEach(func() { + server = ghttp.NewServer() + client = NewSprocketClient(server.URL(), "skywalker", "tk427") + }) + + AfterEach(func() { + server.Close() + }) + + Describe("fetching sprockets", func() { + var statusCode int + var sprockets []Sprocket + BeforeEach(func() { + statusCode = http.StatusOK + sprockets = []Sprocket{} + server.AppendHandlers(ghttp.CombineHandlers( + ghttp.VerifyRequest("GET", "/sprockets"), + ghttp.VerifyBasicAuth("skywalker", "tk427"), + ghttp.RespondWithJSONEncodedPtr(&statusCode, &sprockets), + )) + }) + + Context("when requesting all sprockets", func() { + Context("when the response is succesful", func() { + BeforeEach(func() { + sprockets = []Sprocket{ + NewSprocket("Alfalfa"), + NewSprocket("Banana"), + } + }) + + It("should return the returned sprockets", func() { + Ω(client.Sprockets()).Should(Equal(sprockets)) + }) + }) + + Context("when the response is missing", func() { + BeforeEach(func() { + statusCode = http.StatusNotFound + }) + + It("should return an empty list of sprockets", func() { + Ω(client.Sprockets()).Should(BeEmpty()) + }) + }) + + Context("when the response fails to authenticate", func() { + BeforeEach(func() { + statusCode = http.StatusUnauthorized + }) + + It("should return an AuthenticationError error", func() { + sprockets, err := client.Sprockets() + Ω(sprockets).Should(BeEmpty()) + Ω(err).Should(MatchError(AuthenticationError)) + }) + }) + + Context("when the response is a server failure", func() { + BeforeEach(func() { + statusCode = http.StatusInternalServerError + }) + + It("should return an InternalError error", func() { + sprockets, err := client.Sprockets() + Ω(sprockets).Should(BeEmpty()) + Ω(err).Should(MatchError(InternalError)) + }) + }) + }) + + Context("when requesting some sprockets", func() { + BeforeEach(func() { + sprockets = []Sprocket{ + NewSprocket("Alfalfa"), + NewSprocket("Banana"), + } + + server.WrapHandler(0, ghttp.VerifyRequest("GET", "/sprockets", "filter=FOOD")) + }) + + It("should make the request with a filter", func() { + Ω(client.Sprockets("food")).Should(Equal(sprockets)) + }) + }) + }) + }) +*/ +package ghttp + +import ( + "fmt" + "io" + "io/ioutil" + "net/http" + "net/http/httptest" + "reflect" + "regexp" + "strings" + "sync" + + . "github.com/onsi/gomega" +) + +func new() *Server { + return &Server{ + AllowUnhandledRequests: false, + UnhandledRequestStatusCode: http.StatusInternalServerError, + writeLock: &sync.Mutex{}, + } +} + +type routedHandler struct { + method string + pathRegexp *regexp.Regexp + path string + handler http.HandlerFunc +} + +// NewServer returns a new `*ghttp.Server` that wraps an `httptest` server. The server is started automatically. +func NewServer() *Server { + s := new() + s.HTTPTestServer = httptest.NewServer(s) + return s +} + +// NewUnstartedServer return a new, unstarted, `*ghttp.Server`. Useful for specifying a custom listener on `server.HTTPTestServer`. +func NewUnstartedServer() *Server { + s := new() + s.HTTPTestServer = httptest.NewUnstartedServer(s) + return s +} + +// NewTLSServer returns a new `*ghttp.Server` that wraps an `httptest` TLS server. The server is started automatically. +func NewTLSServer() *Server { + s := new() + s.HTTPTestServer = httptest.NewTLSServer(s) + return s +} + +type Server struct { + //The underlying httptest server + HTTPTestServer *httptest.Server + + //Defaults to false. If set to true, the Server will allow more requests than there are registered handlers. + AllowUnhandledRequests bool + + //The status code returned when receiving an unhandled request. + //Defaults to http.StatusInternalServerError. + //Only applies if AllowUnhandledRequests is true + UnhandledRequestStatusCode int + + //If provided, ghttp will log about each request received to the provided io.Writer + //Defaults to nil + //If you're using Ginkgo, set this to GinkgoWriter to get improved output during failures + Writer io.Writer + + receivedRequests []*http.Request + requestHandlers []http.HandlerFunc + routedHandlers []routedHandler + + writeLock *sync.Mutex + calls int +} + +//Start() starts an unstarted ghttp server. It is a catastrophic error to call Start more than once (thanks, httptest). +func (s *Server) Start() { + s.HTTPTestServer.Start() +} + +//URL() returns a url that will hit the server +func (s *Server) URL() string { + return s.HTTPTestServer.URL +} + +//Addr() returns the address on which the server is listening. +func (s *Server) Addr() string { + return s.HTTPTestServer.Listener.Addr().String() +} + +//Close() should be called at the end of each test. It spins down and cleans up the test server. +func (s *Server) Close() { + s.writeLock.Lock() + server := s.HTTPTestServer + s.HTTPTestServer = nil + s.writeLock.Unlock() + + if server != nil { + server.Close() + } +} + +//ServeHTTP() makes Server an http.Handler +//When the server receives a request it handles the request in the following order: +// +//1. If the request matches a handler registered with RouteToHandler, that handler is called. +//2. Otherwise, if there are handlers registered via AppendHandlers, those handlers are called in order. +//3. If all registered handlers have been called then: +// a) If AllowUnhandledRequests is true, the request will be handled with response code of UnhandledRequestStatusCode +// b) If AllowUnhandledRequests is false, the request will not be handled and the current test will be marked as failed. +func (s *Server) ServeHTTP(w http.ResponseWriter, req *http.Request) { + s.writeLock.Lock() + defer func() { + e := recover() + if e != nil { + w.WriteHeader(http.StatusInternalServerError) + } + + //If the handler panics GHTTP will silently succeed. This is bad™. + //To catch this case we need to fail the test if the handler has panicked. + //However, if the handler is panicking because Ginkgo's causing it to panic (i.e. an assertion failed) + //then we shouldn't double-report the error as this will confuse people. + + //So: step 1, if this is a Ginkgo panic - do nothing, Ginkgo's aware of the failure + eAsString, ok := e.(string) + if ok && strings.Contains(eAsString, "defer GinkgoRecover()") { + return + } + + //If we're here, we have to do step 2: assert that the error is nil. This assertion will + //allow us to fail the test suite (note: we can't call Fail since Gomega is not allowed to import Ginkgo). + //Since a failed assertion throws a panic, and we are likely in a goroutine, we need to defer within our defer! + defer func() { + recover() + }() + Ω(e).Should(BeNil(), "Handler Panicked") + }() + + if s.Writer != nil { + s.Writer.Write([]byte(fmt.Sprintf("GHTTP Received Request: %s - %s\n", req.Method, req.URL))) + } + + s.receivedRequests = append(s.receivedRequests, req) + if routedHandler, ok := s.handlerForRoute(req.Method, req.URL.Path); ok { + s.writeLock.Unlock() + routedHandler(w, req) + } else if s.calls < len(s.requestHandlers) { + h := s.requestHandlers[s.calls] + s.calls++ + s.writeLock.Unlock() + h(w, req) + } else { + s.writeLock.Unlock() + if s.AllowUnhandledRequests { + ioutil.ReadAll(req.Body) + req.Body.Close() + w.WriteHeader(s.UnhandledRequestStatusCode) + } else { + Ω(req).Should(BeNil(), "Received Unhandled Request") + } + } +} + +//ReceivedRequests is an array containing all requests received by the server (both handled and unhandled requests) +func (s *Server) ReceivedRequests() []*http.Request { + s.writeLock.Lock() + defer s.writeLock.Unlock() + + return s.receivedRequests +} + +//RouteToHandler can be used to register handlers that will always handle requests that match +//the passed in method and path. +// +//The path may be either a string object or a *regexp.Regexp. +func (s *Server) RouteToHandler(method string, path interface{}, handler http.HandlerFunc) { + s.writeLock.Lock() + defer s.writeLock.Unlock() + + rh := routedHandler{ + method: method, + handler: handler, + } + + switch p := path.(type) { + case *regexp.Regexp: + rh.pathRegexp = p + case string: + rh.path = p + default: + panic("path must be a string or a regular expression") + } + + for i, existingRH := range s.routedHandlers { + if existingRH.method == method && + reflect.DeepEqual(existingRH.pathRegexp, rh.pathRegexp) && + existingRH.path == rh.path { + s.routedHandlers[i] = rh + return + } + } + s.routedHandlers = append(s.routedHandlers, rh) +} + +func (s *Server) handlerForRoute(method string, path string) (http.HandlerFunc, bool) { + for _, rh := range s.routedHandlers { + if rh.method == method { + if rh.pathRegexp != nil { + if rh.pathRegexp.Match([]byte(path)) { + return rh.handler, true + } + } else if rh.path == path { + return rh.handler, true + } + } + } + + return nil, false +} + +//AppendHandlers will appends http.HandlerFuncs to the server's list of registered handlers. The first incoming request is handled by the first handler, the second by the second, etc... +func (s *Server) AppendHandlers(handlers ...http.HandlerFunc) { + s.writeLock.Lock() + defer s.writeLock.Unlock() + + s.requestHandlers = append(s.requestHandlers, handlers...) +} + +//SetHandler overrides the registered handler at the passed in index with the passed in handler +//This is useful, for example, when a server has been set up in a shared context, but must be tweaked +//for a particular test. +func (s *Server) SetHandler(index int, handler http.HandlerFunc) { + s.writeLock.Lock() + defer s.writeLock.Unlock() + + s.requestHandlers[index] = handler +} + +//GetHandler returns the handler registered at the passed in index. +func (s *Server) GetHandler(index int) http.HandlerFunc { + s.writeLock.Lock() + defer s.writeLock.Unlock() + + return s.requestHandlers[index] +} + +func (s *Server) Reset() { + s.writeLock.Lock() + defer s.writeLock.Unlock() + + s.HTTPTestServer.CloseClientConnections() + s.calls = 0 + s.receivedRequests = nil + s.requestHandlers = nil + s.routedHandlers = nil +} + +//WrapHandler combines the passed in handler with the handler registered at the passed in index. +//This is useful, for example, when a server has been set up in a shared context but must be tweaked +//for a particular test. +// +//If the currently registered handler is A, and the new passed in handler is B then +//WrapHandler will generate a new handler that first calls A, then calls B, and assign it to index +func (s *Server) WrapHandler(index int, handler http.HandlerFunc) { + existingHandler := s.GetHandler(index) + s.SetHandler(index, CombineHandlers(existingHandler, handler)) +} + +func (s *Server) CloseClientConnections() { + s.writeLock.Lock() + defer s.writeLock.Unlock() + + s.HTTPTestServer.CloseClientConnections() +} diff --git a/vendor/github.com/onsi/gomega/ghttp/test_server_suite_test.go b/vendor/github.com/onsi/gomega/ghttp/test_server_suite_test.go new file mode 100644 index 000000000..7c1236082 --- /dev/null +++ b/vendor/github.com/onsi/gomega/ghttp/test_server_suite_test.go @@ -0,0 +1,13 @@ +package ghttp_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + "testing" +) + +func TestGHTTP(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "GHTTP Suite") +} diff --git a/vendor/github.com/onsi/gomega/ghttp/test_server_test.go b/vendor/github.com/onsi/gomega/ghttp/test_server_test.go new file mode 100644 index 000000000..88b324654 --- /dev/null +++ b/vendor/github.com/onsi/gomega/ghttp/test_server_test.go @@ -0,0 +1,1089 @@ +package ghttp_test + +import ( + "bytes" + "io" + "io/ioutil" + "net/http" + "net/url" + "regexp" + + "github.com/golang/protobuf/proto" + "github.com/onsi/gomega/gbytes" + "github.com/onsi/gomega/ghttp/protobuf" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + . "github.com/onsi/gomega/ghttp" +) + +var _ = Describe("TestServer", func() { + var ( + resp *http.Response + err error + s *Server + ) + + BeforeEach(func() { + s = NewServer() + }) + + AfterEach(func() { + s.Close() + }) + + Describe("Resetting the server", func() { + BeforeEach(func() { + s.RouteToHandler("GET", "/", func(w http.ResponseWriter, req *http.Request) {}) + s.AppendHandlers(func(w http.ResponseWriter, req *http.Request) {}) + http.Get(s.URL() + "/") + + Ω(s.ReceivedRequests()).Should(HaveLen(1)) + }) + + It("clears all handlers and call counts", func() { + s.Reset() + Ω(s.ReceivedRequests()).Should(HaveLen(0)) + Ω(func() { s.GetHandler(0) }).Should(Panic()) + }) + }) + + Describe("closing client connections", func() { + It("closes", func() { + s.RouteToHandler("GET", "/", + func(w http.ResponseWriter, req *http.Request) { + io.WriteString(w, req.RemoteAddr) + }, + ) + client := http.Client{Transport: &http.Transport{DisableKeepAlives: true}} + resp, err := client.Get(s.URL()) + Ω(err).ShouldNot(HaveOccurred()) + Ω(resp.StatusCode).Should(Equal(200)) + + body, err := ioutil.ReadAll(resp.Body) + resp.Body.Close() + Ω(err).ShouldNot(HaveOccurred()) + + s.CloseClientConnections() + + resp, err = client.Get(s.URL()) + Ω(err).ShouldNot(HaveOccurred()) + Ω(resp.StatusCode).Should(Equal(200)) + + body2, err := ioutil.ReadAll(resp.Body) + resp.Body.Close() + Ω(err).ShouldNot(HaveOccurred()) + + Ω(body2).ShouldNot(Equal(body)) + }) + }) + + Describe("closing server mulitple times", func() { + It("should not fail", func() { + s.Close() + Ω(s.Close).ShouldNot(Panic()) + }) + }) + + Describe("allowing unhandled requests", func() { + Context("when true", func() { + BeforeEach(func() { + s.AllowUnhandledRequests = true + s.UnhandledRequestStatusCode = http.StatusForbidden + resp, err = http.Get(s.URL() + "/foo") + Ω(err).ShouldNot(HaveOccurred()) + }) + + It("should allow unhandled requests and respond with the passed in status code", func() { + Ω(err).ShouldNot(HaveOccurred()) + Ω(resp.StatusCode).Should(Equal(http.StatusForbidden)) + + data, err := ioutil.ReadAll(resp.Body) + Ω(err).ShouldNot(HaveOccurred()) + Ω(data).Should(BeEmpty()) + }) + + It("should record the requests", func() { + Ω(s.ReceivedRequests()).Should(HaveLen(1)) + Ω(s.ReceivedRequests()[0].URL.Path).Should(Equal("/foo")) + }) + }) + + Context("when false", func() { + It("should fail when attempting a request", func() { + failures := InterceptGomegaFailures(func() { + http.Get(s.URL() + "/foo") + }) + + Ω(failures[0]).Should(ContainSubstring("Received Unhandled Request")) + }) + }) + }) + + Describe("Managing Handlers", func() { + var called []string + BeforeEach(func() { + called = []string{} + s.RouteToHandler("GET", "/routed", func(w http.ResponseWriter, req *http.Request) { + called = append(called, "r1") + }) + s.RouteToHandler("POST", regexp.MustCompile(`/routed\d`), func(w http.ResponseWriter, req *http.Request) { + called = append(called, "r2") + }) + s.AppendHandlers(func(w http.ResponseWriter, req *http.Request) { + called = append(called, "A") + }, func(w http.ResponseWriter, req *http.Request) { + called = append(called, "B") + }) + }) + + It("should prefer routed handlers if there is a match", func() { + http.Get(s.URL() + "/routed") + http.Post(s.URL()+"/routed7", "application/json", nil) + http.Get(s.URL() + "/foo") + http.Get(s.URL() + "/routed") + http.Post(s.URL()+"/routed9", "application/json", nil) + http.Get(s.URL() + "/bar") + + failures := InterceptGomegaFailures(func() { + http.Get(s.URL() + "/foo") + http.Get(s.URL() + "/routed/not/a/match") + http.Get(s.URL() + "/routed7") + http.Post(s.URL()+"/routed", "application/json", nil) + }) + + Ω(failures[0]).Should(ContainSubstring("Received Unhandled Request")) + Ω(failures).Should(HaveLen(4)) + + http.Post(s.URL()+"/routed3", "application/json", nil) + + Ω(called).Should(Equal([]string{"r1", "r2", "A", "r1", "r2", "B", "r2"})) + }) + + It("should override routed handlers when reregistered", func() { + s.RouteToHandler("GET", "/routed", func(w http.ResponseWriter, req *http.Request) { + called = append(called, "r3") + }) + s.RouteToHandler("POST", regexp.MustCompile(`/routed\d`), func(w http.ResponseWriter, req *http.Request) { + called = append(called, "r4") + }) + + http.Get(s.URL() + "/routed") + http.Post(s.URL()+"/routed7", "application/json", nil) + + Ω(called).Should(Equal([]string{"r3", "r4"})) + }) + + It("should call the appended handlers, in order, as requests come in", func() { + http.Get(s.URL() + "/foo") + Ω(called).Should(Equal([]string{"A"})) + + http.Get(s.URL() + "/foo") + Ω(called).Should(Equal([]string{"A", "B"})) + + failures := InterceptGomegaFailures(func() { + http.Get(s.URL() + "/foo") + }) + + Ω(failures[0]).Should(ContainSubstring("Received Unhandled Request")) + }) + + Describe("Overwriting an existing handler", func() { + BeforeEach(func() { + s.SetHandler(0, func(w http.ResponseWriter, req *http.Request) { + called = append(called, "C") + }) + }) + + It("should override the specified handler", func() { + http.Get(s.URL() + "/foo") + http.Get(s.URL() + "/foo") + Ω(called).Should(Equal([]string{"C", "B"})) + }) + }) + + Describe("Getting an existing handler", func() { + It("should return the handler func", func() { + s.GetHandler(1)(nil, nil) + Ω(called).Should(Equal([]string{"B"})) + }) + }) + + Describe("Wrapping an existing handler", func() { + BeforeEach(func() { + s.WrapHandler(0, func(w http.ResponseWriter, req *http.Request) { + called = append(called, "C") + }) + }) + + It("should wrap the existing handler in a new handler", func() { + http.Get(s.URL() + "/foo") + http.Get(s.URL() + "/foo") + Ω(called).Should(Equal([]string{"A", "C", "B"})) + }) + }) + }) + + Describe("When a handler fails", func() { + BeforeEach(func() { + s.UnhandledRequestStatusCode = http.StatusForbidden //just to be clear that 500s aren't coming from unhandled requests + }) + + Context("because the handler has panicked", func() { + BeforeEach(func() { + s.AppendHandlers(func(w http.ResponseWriter, req *http.Request) { + panic("bam") + }) + }) + + It("should respond with a 500 and make a failing assertion", func() { + var resp *http.Response + var err error + + failures := InterceptGomegaFailures(func() { + resp, err = http.Get(s.URL()) + }) + + Ω(err).ShouldNot(HaveOccurred()) + Ω(resp.StatusCode).Should(Equal(http.StatusInternalServerError)) + Ω(failures).Should(ConsistOf(ContainSubstring("Handler Panicked"))) + }) + }) + + Context("because an assertion has failed", func() { + BeforeEach(func() { + s.AppendHandlers(func(w http.ResponseWriter, req *http.Request) { + // Ω(true).Should(BeFalse()) <-- would be nice to do it this way, but the test just can't be written this way + + By("We're cheating a bit here -- we're throwing a GINKGO_PANIC which simulates a failed assertion") + panic(GINKGO_PANIC) + }) + }) + + It("should respond with a 500 and *not* make a failing assertion, instead relying on Ginkgo to have already been notified of the error", func() { + resp, err := http.Get(s.URL()) + + Ω(err).ShouldNot(HaveOccurred()) + Ω(resp.StatusCode).Should(Equal(http.StatusInternalServerError)) + }) + }) + }) + + Describe("Logging to the Writer", func() { + var buf *gbytes.Buffer + BeforeEach(func() { + buf = gbytes.NewBuffer() + s.Writer = buf + s.AppendHandlers(func(w http.ResponseWriter, req *http.Request) {}) + s.AppendHandlers(func(w http.ResponseWriter, req *http.Request) {}) + }) + + It("should write to the buffer when a request comes in", func() { + http.Get(s.URL() + "/foo") + Ω(buf).Should(gbytes.Say("GHTTP Received Request: GET - /foo\n")) + + http.Post(s.URL()+"/bar", "", nil) + Ω(buf).Should(gbytes.Say("GHTTP Received Request: POST - /bar\n")) + }) + }) + + Describe("Request Handlers", func() { + Describe("VerifyRequest", func() { + BeforeEach(func() { + s.AppendHandlers(VerifyRequest("GET", "/foo")) + }) + + It("should verify the method, path", func() { + resp, err = http.Get(s.URL() + "/foo?baz=bar") + Ω(err).ShouldNot(HaveOccurred()) + }) + + It("should verify the method, path", func() { + failures := InterceptGomegaFailures(func() { + http.Get(s.URL() + "/foo2") + }) + Ω(failures).Should(HaveLen(1)) + }) + + It("should verify the method, path", func() { + failures := InterceptGomegaFailures(func() { + http.Post(s.URL()+"/foo", "application/json", nil) + }) + Ω(failures).Should(HaveLen(1)) + }) + + Context("when passed a rawQuery", func() { + It("should also be possible to verify the rawQuery", func() { + s.SetHandler(0, VerifyRequest("GET", "/foo", "baz=bar")) + resp, err = http.Get(s.URL() + "/foo?baz=bar") + Ω(err).ShouldNot(HaveOccurred()) + }) + + It("should match irregardless of query parameter ordering", func() { + s.SetHandler(0, VerifyRequest("GET", "/foo", "type=get&name=money")) + u, _ := url.Parse(s.URL() + "/foo") + u.RawQuery = url.Values{ + "type": []string{"get"}, + "name": []string{"money"}, + }.Encode() + + resp, err = http.Get(u.String()) + Ω(err).ShouldNot(HaveOccurred()) + }) + }) + + Context("when passed a matcher for path", func() { + It("should apply the matcher", func() { + s.SetHandler(0, VerifyRequest("GET", MatchRegexp(`/foo/[a-f]*/3`))) + resp, err = http.Get(s.URL() + "/foo/abcdefa/3") + Ω(err).ShouldNot(HaveOccurred()) + }) + }) + }) + + Describe("VerifyContentType", func() { + BeforeEach(func() { + s.AppendHandlers(CombineHandlers( + VerifyRequest("GET", "/foo"), + VerifyContentType("application/octet-stream"), + )) + }) + + It("should verify the content type", func() { + req, err := http.NewRequest("GET", s.URL()+"/foo", nil) + Ω(err).ShouldNot(HaveOccurred()) + req.Header.Set("Content-Type", "application/octet-stream") + + resp, err = http.DefaultClient.Do(req) + Ω(err).ShouldNot(HaveOccurred()) + }) + + It("should verify the content type", func() { + req, err := http.NewRequest("GET", s.URL()+"/foo", nil) + Ω(err).ShouldNot(HaveOccurred()) + req.Header.Set("Content-Type", "application/json") + + failures := InterceptGomegaFailures(func() { + http.DefaultClient.Do(req) + }) + Ω(failures).Should(HaveLen(1)) + }) + }) + + Describe("Verify BasicAuth", func() { + BeforeEach(func() { + s.AppendHandlers(CombineHandlers( + VerifyRequest("GET", "/foo"), + VerifyBasicAuth("bob", "password"), + )) + }) + + It("should verify basic auth", func() { + req, err := http.NewRequest("GET", s.URL()+"/foo", nil) + Ω(err).ShouldNot(HaveOccurred()) + req.SetBasicAuth("bob", "password") + + resp, err = http.DefaultClient.Do(req) + Ω(err).ShouldNot(HaveOccurred()) + }) + + It("should verify basic auth", func() { + req, err := http.NewRequest("GET", s.URL()+"/foo", nil) + Ω(err).ShouldNot(HaveOccurred()) + req.SetBasicAuth("bob", "bassword") + + failures := InterceptGomegaFailures(func() { + http.DefaultClient.Do(req) + }) + Ω(failures).Should(HaveLen(1)) + }) + + It("should require basic auth header", func() { + req, err := http.NewRequest("GET", s.URL()+"/foo", nil) + Ω(err).ShouldNot(HaveOccurred()) + + failures := InterceptGomegaFailures(func() { + http.DefaultClient.Do(req) + }) + Ω(failures).Should(ContainElement(ContainSubstring("Authorization header must be specified"))) + }) + }) + + Describe("VerifyHeader", func() { + BeforeEach(func() { + s.AppendHandlers(CombineHandlers( + VerifyRequest("GET", "/foo"), + VerifyHeader(http.Header{ + "accept": []string{"jpeg", "png"}, + "cache-control": []string{"omicron"}, + "Return-Path": []string{"hobbiton"}, + }), + )) + }) + + It("should verify the headers", func() { + req, err := http.NewRequest("GET", s.URL()+"/foo", nil) + Ω(err).ShouldNot(HaveOccurred()) + req.Header.Add("Accept", "jpeg") + req.Header.Add("Accept", "png") + req.Header.Add("Cache-Control", "omicron") + req.Header.Add("return-path", "hobbiton") + + resp, err = http.DefaultClient.Do(req) + Ω(err).ShouldNot(HaveOccurred()) + }) + + It("should verify the headers", func() { + req, err := http.NewRequest("GET", s.URL()+"/foo", nil) + Ω(err).ShouldNot(HaveOccurred()) + req.Header.Add("Schmaccept", "jpeg") + req.Header.Add("Schmaccept", "png") + req.Header.Add("Cache-Control", "omicron") + req.Header.Add("return-path", "hobbiton") + + failures := InterceptGomegaFailures(func() { + http.DefaultClient.Do(req) + }) + Ω(failures).Should(HaveLen(1)) + }) + }) + + Describe("VerifyHeaderKV", func() { + BeforeEach(func() { + s.AppendHandlers(CombineHandlers( + VerifyRequest("GET", "/foo"), + VerifyHeaderKV("accept", "jpeg", "png"), + VerifyHeaderKV("cache-control", "omicron"), + VerifyHeaderKV("Return-Path", "hobbiton"), + )) + }) + + It("should verify the headers", func() { + req, err := http.NewRequest("GET", s.URL()+"/foo", nil) + Ω(err).ShouldNot(HaveOccurred()) + req.Header.Add("Accept", "jpeg") + req.Header.Add("Accept", "png") + req.Header.Add("Cache-Control", "omicron") + req.Header.Add("return-path", "hobbiton") + + resp, err = http.DefaultClient.Do(req) + Ω(err).ShouldNot(HaveOccurred()) + }) + + It("should verify the headers", func() { + req, err := http.NewRequest("GET", s.URL()+"/foo", nil) + Ω(err).ShouldNot(HaveOccurred()) + req.Header.Add("Accept", "jpeg") + req.Header.Add("Cache-Control", "omicron") + req.Header.Add("return-path", "hobbiton") + + failures := InterceptGomegaFailures(func() { + http.DefaultClient.Do(req) + }) + Ω(failures).Should(HaveLen(1)) + }) + }) + + Describe("VerifyBody", func() { + BeforeEach(func() { + s.AppendHandlers(CombineHandlers( + VerifyRequest("POST", "/foo"), + VerifyBody([]byte("some body")), + )) + }) + + It("should verify the body", func() { + resp, err = http.Post(s.URL()+"/foo", "", bytes.NewReader([]byte("some body"))) + Ω(err).ShouldNot(HaveOccurred()) + }) + + It("should verify the body", func() { + failures := InterceptGomegaFailures(func() { + http.Post(s.URL()+"/foo", "", bytes.NewReader([]byte("wrong body"))) + }) + Ω(failures).Should(HaveLen(1)) + }) + }) + + Describe("VerifyJSON", func() { + BeforeEach(func() { + s.AppendHandlers(CombineHandlers( + VerifyRequest("POST", "/foo"), + VerifyJSON(`{"a":3, "b":2}`), + )) + }) + + It("should verify the json body and the content type", func() { + resp, err = http.Post(s.URL()+"/foo", "application/json", bytes.NewReader([]byte(`{"b":2, "a":3}`))) + Ω(err).ShouldNot(HaveOccurred()) + }) + + It("should verify the json body and the content type", func() { + failures := InterceptGomegaFailures(func() { + http.Post(s.URL()+"/foo", "application/json", bytes.NewReader([]byte(`{"b":2, "a":4}`))) + }) + Ω(failures).Should(HaveLen(1)) + }) + + It("should verify the json body and the content type", func() { + failures := InterceptGomegaFailures(func() { + http.Post(s.URL()+"/foo", "application/not-json", bytes.NewReader([]byte(`{"b":2, "a":3}`))) + }) + Ω(failures).Should(HaveLen(1)) + }) + }) + + Describe("VerifyJSONRepresenting", func() { + BeforeEach(func() { + s.AppendHandlers(CombineHandlers( + VerifyRequest("POST", "/foo"), + VerifyJSONRepresenting([]int{1, 3, 5}), + )) + }) + + It("should verify the json body and the content type", func() { + resp, err = http.Post(s.URL()+"/foo", "application/json", bytes.NewReader([]byte(`[1,3,5]`))) + Ω(err).ShouldNot(HaveOccurred()) + }) + + It("should verify the json body and the content type", func() { + failures := InterceptGomegaFailures(func() { + http.Post(s.URL()+"/foo", "application/json", bytes.NewReader([]byte(`[1,3]`))) + }) + Ω(failures).Should(HaveLen(1)) + }) + }) + + Describe("VerifyForm", func() { + var formValues url.Values + + BeforeEach(func() { + formValues = make(url.Values) + formValues.Add("users", "user1") + formValues.Add("users", "user2") + formValues.Add("group", "users") + }) + + Context("when encoded in the URL", func() { + BeforeEach(func() { + s.AppendHandlers(CombineHandlers( + VerifyRequest("GET", "/foo"), + VerifyForm(url.Values{ + "users": []string{"user1", "user2"}, + "group": []string{"users"}, + }), + )) + }) + + It("should verify form values", func() { + resp, err = http.Get(s.URL() + "/foo?" + formValues.Encode()) + Ω(err).ShouldNot(HaveOccurred()) + }) + + It("should ignore extra values", func() { + formValues.Add("extra", "value") + resp, err = http.Get(s.URL() + "/foo?" + formValues.Encode()) + Ω(err).ShouldNot(HaveOccurred()) + }) + + It("fail on missing values", func() { + formValues.Del("group") + failures := InterceptGomegaFailures(func() { + resp, err = http.Get(s.URL() + "/foo?" + formValues.Encode()) + }) + Ω(failures).Should(HaveLen(1)) + }) + + It("fail on incorrect values", func() { + formValues.Set("group", "wheel") + failures := InterceptGomegaFailures(func() { + resp, err = http.Get(s.URL() + "/foo?" + formValues.Encode()) + }) + Ω(failures).Should(HaveLen(1)) + }) + }) + + Context("when present in the body", func() { + BeforeEach(func() { + s.AppendHandlers(CombineHandlers( + VerifyRequest("POST", "/foo"), + VerifyForm(url.Values{ + "users": []string{"user1", "user2"}, + "group": []string{"users"}, + }), + )) + }) + + It("should verify form values", func() { + resp, err = http.PostForm(s.URL()+"/foo", formValues) + Ω(err).ShouldNot(HaveOccurred()) + }) + + It("should ignore extra values", func() { + formValues.Add("extra", "value") + resp, err = http.PostForm(s.URL()+"/foo", formValues) + Ω(err).ShouldNot(HaveOccurred()) + }) + + It("fail on missing values", func() { + formValues.Del("group") + failures := InterceptGomegaFailures(func() { + resp, err = http.PostForm(s.URL()+"/foo", formValues) + }) + Ω(failures).Should(HaveLen(1)) + }) + + It("fail on incorrect values", func() { + formValues.Set("group", "wheel") + failures := InterceptGomegaFailures(func() { + resp, err = http.PostForm(s.URL()+"/foo", formValues) + }) + Ω(failures).Should(HaveLen(1)) + }) + }) + }) + + Describe("VerifyFormKV", func() { + Context("when encoded in the URL", func() { + BeforeEach(func() { + s.AppendHandlers(CombineHandlers( + VerifyRequest("GET", "/foo"), + VerifyFormKV("users", "user1", "user2"), + )) + }) + + It("verifies the form value", func() { + resp, err = http.Get(s.URL() + "/foo?users=user1&users=user2") + Ω(err).ShouldNot(HaveOccurred()) + }) + + It("verifies the form value", func() { + failures := InterceptGomegaFailures(func() { + resp, err = http.Get(s.URL() + "/foo?users=user1") + }) + Ω(failures).Should(HaveLen(1)) + }) + }) + + Context("when present in the body", func() { + BeforeEach(func() { + s.AppendHandlers(CombineHandlers( + VerifyRequest("POST", "/foo"), + VerifyFormKV("users", "user1", "user2"), + )) + }) + + It("verifies the form value", func() { + resp, err = http.PostForm(s.URL()+"/foo", url.Values{"users": []string{"user1", "user2"}}) + Ω(err).ShouldNot(HaveOccurred()) + }) + + It("verifies the form value", func() { + failures := InterceptGomegaFailures(func() { + resp, err = http.PostForm(s.URL()+"/foo", url.Values{"users": []string{"user1"}}) + }) + Ω(failures).Should(HaveLen(1)) + }) + }) + }) + + Describe("VerifyProtoRepresenting", func() { + var message *protobuf.SimpleMessage + + BeforeEach(func() { + message = new(protobuf.SimpleMessage) + message.Description = proto.String("A description") + message.Id = proto.Int32(0) + + s.AppendHandlers(CombineHandlers( + VerifyRequest("POST", "/proto"), + VerifyProtoRepresenting(message), + )) + }) + + It("verifies the proto body and the content type", func() { + serialized, err := proto.Marshal(message) + Ω(err).ShouldNot(HaveOccurred()) + + resp, err = http.Post(s.URL()+"/proto", "application/x-protobuf", bytes.NewReader(serialized)) + Ω(err).ShouldNot(HaveOccurred()) + }) + + It("should verify the proto body and the content type", func() { + serialized, err := proto.Marshal(&protobuf.SimpleMessage{ + Description: proto.String("A description"), + Id: proto.Int32(0), + Metadata: proto.String("some metadata"), + }) + Ω(err).ShouldNot(HaveOccurred()) + + failures := InterceptGomegaFailures(func() { + http.Post(s.URL()+"/proto", "application/x-protobuf", bytes.NewReader(serialized)) + }) + Ω(failures).Should(HaveLen(1)) + }) + + It("should verify the proto body and the content type", func() { + serialized, err := proto.Marshal(message) + Ω(err).ShouldNot(HaveOccurred()) + + failures := InterceptGomegaFailures(func() { + http.Post(s.URL()+"/proto", "application/not-x-protobuf", bytes.NewReader(serialized)) + }) + Ω(failures).Should(HaveLen(1)) + }) + }) + + Describe("RespondWith", func() { + Context("without headers", func() { + BeforeEach(func() { + s.AppendHandlers(CombineHandlers( + VerifyRequest("POST", "/foo"), + RespondWith(http.StatusCreated, "sweet"), + ), CombineHandlers( + VerifyRequest("POST", "/foo"), + RespondWith(http.StatusOK, []byte("sour")), + )) + }) + + It("should return the response", func() { + resp, err = http.Post(s.URL()+"/foo", "application/json", nil) + Ω(err).ShouldNot(HaveOccurred()) + + Ω(resp.StatusCode).Should(Equal(http.StatusCreated)) + + body, err := ioutil.ReadAll(resp.Body) + Ω(err).ShouldNot(HaveOccurred()) + Ω(body).Should(Equal([]byte("sweet"))) + + resp, err = http.Post(s.URL()+"/foo", "application/json", nil) + Ω(err).ShouldNot(HaveOccurred()) + + Ω(resp.StatusCode).Should(Equal(http.StatusOK)) + + body, err = ioutil.ReadAll(resp.Body) + Ω(err).ShouldNot(HaveOccurred()) + Ω(body).Should(Equal([]byte("sour"))) + }) + }) + + Context("with headers", func() { + BeforeEach(func() { + s.AppendHandlers(CombineHandlers( + VerifyRequest("POST", "/foo"), + RespondWith(http.StatusCreated, "sweet", http.Header{"X-Custom-Header": []string{"my header"}}), + )) + }) + + It("should return the headers too", func() { + resp, err = http.Post(s.URL()+"/foo", "application/json", nil) + Ω(err).ShouldNot(HaveOccurred()) + + Ω(resp.StatusCode).Should(Equal(http.StatusCreated)) + Ω(ioutil.ReadAll(resp.Body)).Should(Equal([]byte("sweet"))) + Ω(resp.Header.Get("X-Custom-Header")).Should(Equal("my header")) + }) + }) + }) + + Describe("RespondWithPtr", func() { + var code int + var byteBody []byte + var stringBody string + BeforeEach(func() { + code = http.StatusOK + byteBody = []byte("sweet") + stringBody = "sour" + + s.AppendHandlers(CombineHandlers( + VerifyRequest("POST", "/foo"), + RespondWithPtr(&code, &byteBody), + ), CombineHandlers( + VerifyRequest("POST", "/foo"), + RespondWithPtr(&code, &stringBody), + )) + }) + + It("should return the response", func() { + code = http.StatusCreated + byteBody = []byte("tasty") + stringBody = "treat" + + resp, err = http.Post(s.URL()+"/foo", "application/json", nil) + Ω(err).ShouldNot(HaveOccurred()) + + Ω(resp.StatusCode).Should(Equal(http.StatusCreated)) + + body, err := ioutil.ReadAll(resp.Body) + Ω(err).ShouldNot(HaveOccurred()) + Ω(body).Should(Equal([]byte("tasty"))) + + resp, err = http.Post(s.URL()+"/foo", "application/json", nil) + Ω(err).ShouldNot(HaveOccurred()) + + Ω(resp.StatusCode).Should(Equal(http.StatusCreated)) + + body, err = ioutil.ReadAll(resp.Body) + Ω(err).ShouldNot(HaveOccurred()) + Ω(body).Should(Equal([]byte("treat"))) + }) + + Context("when passed a nil body", func() { + BeforeEach(func() { + s.SetHandler(0, CombineHandlers( + VerifyRequest("POST", "/foo"), + RespondWithPtr(&code, nil), + )) + }) + + It("should return an empty body and not explode", func() { + resp, err = http.Post(s.URL()+"/foo", "application/json", nil) + + Ω(err).ShouldNot(HaveOccurred()) + Ω(resp.StatusCode).Should(Equal(http.StatusOK)) + body, err := ioutil.ReadAll(resp.Body) + Ω(err).ShouldNot(HaveOccurred()) + Ω(body).Should(BeEmpty()) + + Ω(s.ReceivedRequests()).Should(HaveLen(1)) + }) + }) + }) + + Describe("RespondWithJSON", func() { + Context("when no optional headers are set", func() { + BeforeEach(func() { + s.AppendHandlers(CombineHandlers( + VerifyRequest("POST", "/foo"), + RespondWithJSONEncoded(http.StatusCreated, []int{1, 2, 3}), + )) + }) + + It("should return the response", func() { + resp, err = http.Post(s.URL()+"/foo", "application/json", nil) + Ω(err).ShouldNot(HaveOccurred()) + + Ω(resp.StatusCode).Should(Equal(http.StatusCreated)) + + body, err := ioutil.ReadAll(resp.Body) + Ω(err).ShouldNot(HaveOccurred()) + Ω(body).Should(MatchJSON("[1,2,3]")) + }) + + It("should set the Content-Type header to application/json", func() { + resp, err = http.Post(s.URL()+"/foo", "application/json", nil) + Ω(err).ShouldNot(HaveOccurred()) + + Ω(resp.Header["Content-Type"]).Should(Equal([]string{"application/json"})) + }) + }) + + Context("when optional headers are set", func() { + var headers http.Header + BeforeEach(func() { + headers = http.Header{"Stuff": []string{"things"}} + }) + + JustBeforeEach(func() { + s.AppendHandlers(CombineHandlers( + VerifyRequest("POST", "/foo"), + RespondWithJSONEncoded(http.StatusCreated, []int{1, 2, 3}, headers), + )) + }) + + It("should preserve those headers", func() { + resp, err = http.Post(s.URL()+"/foo", "application/json", nil) + Ω(err).ShouldNot(HaveOccurred()) + + Ω(resp.Header["Stuff"]).Should(Equal([]string{"things"})) + }) + + It("should set the Content-Type header to application/json", func() { + resp, err = http.Post(s.URL()+"/foo", "application/json", nil) + Ω(err).ShouldNot(HaveOccurred()) + + Ω(resp.Header["Content-Type"]).Should(Equal([]string{"application/json"})) + }) + + Context("when setting the Content-Type explicitly", func() { + BeforeEach(func() { + headers["Content-Type"] = []string{"not-json"} + }) + + It("should use the Content-Type header that was explicitly set", func() { + resp, err = http.Post(s.URL()+"/foo", "application/json", nil) + Ω(err).ShouldNot(HaveOccurred()) + + Ω(resp.Header["Content-Type"]).Should(Equal([]string{"not-json"})) + }) + }) + }) + }) + + Describe("RespondWithJSONPtr", func() { + type testObject struct { + Key string + Value string + } + + var code int + var object testObject + + Context("when no optional headers are set", func() { + BeforeEach(func() { + code = http.StatusOK + object = testObject{} + s.AppendHandlers(CombineHandlers( + VerifyRequest("POST", "/foo"), + RespondWithJSONEncodedPtr(&code, &object), + )) + }) + + It("should return the response", func() { + code = http.StatusCreated + object = testObject{ + Key: "Jim", + Value: "Codes", + } + resp, err = http.Post(s.URL()+"/foo", "application/json", nil) + Ω(err).ShouldNot(HaveOccurred()) + + Ω(resp.StatusCode).Should(Equal(http.StatusCreated)) + + body, err := ioutil.ReadAll(resp.Body) + Ω(err).ShouldNot(HaveOccurred()) + Ω(body).Should(MatchJSON(`{"Key": "Jim", "Value": "Codes"}`)) + }) + + It("should set the Content-Type header to application/json", func() { + resp, err = http.Post(s.URL()+"/foo", "application/json", nil) + Ω(err).ShouldNot(HaveOccurred()) + + Ω(resp.Header["Content-Type"]).Should(Equal([]string{"application/json"})) + }) + }) + + Context("when optional headers are set", func() { + var headers http.Header + BeforeEach(func() { + headers = http.Header{"Stuff": []string{"things"}} + }) + + JustBeforeEach(func() { + code = http.StatusOK + object = testObject{} + s.AppendHandlers(CombineHandlers( + VerifyRequest("POST", "/foo"), + RespondWithJSONEncodedPtr(&code, &object, headers), + )) + }) + + It("should preserve those headers", func() { + resp, err = http.Post(s.URL()+"/foo", "application/json", nil) + Ω(err).ShouldNot(HaveOccurred()) + + Ω(resp.Header["Stuff"]).Should(Equal([]string{"things"})) + }) + + It("should set the Content-Type header to application/json", func() { + resp, err = http.Post(s.URL()+"/foo", "application/json", nil) + Ω(err).ShouldNot(HaveOccurred()) + + Ω(resp.Header["Content-Type"]).Should(Equal([]string{"application/json"})) + }) + + Context("when setting the Content-Type explicitly", func() { + BeforeEach(func() { + headers["Content-Type"] = []string{"not-json"} + }) + + It("should use the Content-Type header that was explicitly set", func() { + resp, err = http.Post(s.URL()+"/foo", "application/json", nil) + Ω(err).ShouldNot(HaveOccurred()) + + Ω(resp.Header["Content-Type"]).Should(Equal([]string{"not-json"})) + }) + }) + }) + }) + + Describe("RespondWithProto", func() { + var message *protobuf.SimpleMessage + + BeforeEach(func() { + message = new(protobuf.SimpleMessage) + message.Description = proto.String("A description") + message.Id = proto.Int32(99) + }) + + Context("when no optional headers are set", func() { + BeforeEach(func() { + s.AppendHandlers(CombineHandlers( + VerifyRequest("POST", "/proto"), + RespondWithProto(http.StatusCreated, message), + )) + }) + + It("should return the response", func() { + resp, err = http.Post(s.URL()+"/proto", "application/x-protobuf", nil) + Ω(err).ShouldNot(HaveOccurred()) + + Ω(resp.StatusCode).Should(Equal(http.StatusCreated)) + + var received protobuf.SimpleMessage + body, err := ioutil.ReadAll(resp.Body) + err = proto.Unmarshal(body, &received) + Ω(err).ShouldNot(HaveOccurred()) + }) + + It("should set the Content-Type header to application/x-protobuf", func() { + resp, err = http.Post(s.URL()+"/proto", "application/x-protobuf", nil) + Ω(err).ShouldNot(HaveOccurred()) + + Ω(resp.Header["Content-Type"]).Should(Equal([]string{"application/x-protobuf"})) + }) + }) + + Context("when optional headers are set", func() { + var headers http.Header + BeforeEach(func() { + headers = http.Header{"Stuff": []string{"things"}} + }) + + JustBeforeEach(func() { + s.AppendHandlers(CombineHandlers( + VerifyRequest("POST", "/proto"), + RespondWithProto(http.StatusCreated, message, headers), + )) + }) + + It("should preserve those headers", func() { + resp, err = http.Post(s.URL()+"/proto", "application/x-protobuf", nil) + Ω(err).ShouldNot(HaveOccurred()) + + Ω(resp.Header["Stuff"]).Should(Equal([]string{"things"})) + }) + + It("should set the Content-Type header to application/x-protobuf", func() { + resp, err = http.Post(s.URL()+"/proto", "application/x-protobuf", nil) + Ω(err).ShouldNot(HaveOccurred()) + + Ω(resp.Header["Content-Type"]).Should(Equal([]string{"application/x-protobuf"})) + }) + + Context("when setting the Content-Type explicitly", func() { + BeforeEach(func() { + headers["Content-Type"] = []string{"not-x-protobuf"} + }) + + It("should use the Content-Type header that was explicitly set", func() { + resp, err = http.Post(s.URL()+"/proto", "application/x-protobuf", nil) + Ω(err).ShouldNot(HaveOccurred()) + + Ω(resp.Header["Content-Type"]).Should(Equal([]string{"not-x-protobuf"})) + }) + }) + }) + }) + }) +}) diff --git a/vendor/github.com/onsi/gomega/gomega_dsl.go b/vendor/github.com/onsi/gomega/gomega_dsl.go new file mode 100644 index 000000000..0d0f563a1 --- /dev/null +++ b/vendor/github.com/onsi/gomega/gomega_dsl.go @@ -0,0 +1,335 @@ +/* +Gomega is the Ginkgo BDD-style testing framework's preferred matcher library. + +The godoc documentation describes Gomega's API. More comprehensive documentation (with examples!) is available at http://onsi.github.io/gomega/ + +Gomega on Github: http://github.com/onsi/gomega + +Learn more about Ginkgo online: http://onsi.github.io/ginkgo + +Ginkgo on Github: http://github.com/onsi/ginkgo + +Gomega is MIT-Licensed +*/ +package gomega + +import ( + "fmt" + "reflect" + "time" + + "github.com/onsi/gomega/internal/assertion" + "github.com/onsi/gomega/internal/asyncassertion" + "github.com/onsi/gomega/internal/testingtsupport" + "github.com/onsi/gomega/types" +) + +const GOMEGA_VERSION = "1.2.0" + +const nilFailHandlerPanic = `You are trying to make an assertion, but Gomega's fail handler is nil. +If you're using Ginkgo then you probably forgot to put your assertion in an It(). +Alternatively, you may have forgotten to register a fail handler with RegisterFailHandler() or RegisterTestingT(). +` + +var globalFailHandler types.GomegaFailHandler + +var defaultEventuallyTimeout = time.Second +var defaultEventuallyPollingInterval = 10 * time.Millisecond +var defaultConsistentlyDuration = 100 * time.Millisecond +var defaultConsistentlyPollingInterval = 10 * time.Millisecond + +//RegisterFailHandler connects Ginkgo to Gomega. When a matcher fails +//the fail handler passed into RegisterFailHandler is called. +func RegisterFailHandler(handler types.GomegaFailHandler) { + globalFailHandler = handler +} + +//RegisterTestingT connects Gomega to Golang's XUnit style +//Testing.T tests. You'll need to call this at the top of each XUnit style test: +// +// func TestFarmHasCow(t *testing.T) { +// RegisterTestingT(t) +// +// f := farm.New([]string{"Cow", "Horse"}) +// Expect(f.HasCow()).To(BeTrue(), "Farm should have cow") +// } +// +// Note that this *testing.T is registered *globally* by Gomega (this is why you don't have to +// pass `t` down to the matcher itself). This means that you cannot run the XUnit style tests +// in parallel as the global fail handler cannot point to more than one testing.T at a time. +// +// (As an aside: Ginkgo gets around this limitation by running parallel tests in different *processes*). +func RegisterTestingT(t types.GomegaTestingT) { + RegisterFailHandler(testingtsupport.BuildTestingTGomegaFailHandler(t)) +} + +//InterceptGomegaHandlers runs a given callback and returns an array of +//failure messages generated by any Gomega assertions within the callback. +// +//This is accomplished by temporarily replacing the *global* fail handler +//with a fail handler that simply annotates failures. The original fail handler +//is reset when InterceptGomegaFailures returns. +// +//This is most useful when testing custom matchers, but can also be used to check +//on a value using a Gomega assertion without causing a test failure. +func InterceptGomegaFailures(f func()) []string { + originalHandler := globalFailHandler + failures := []string{} + RegisterFailHandler(func(message string, callerSkip ...int) { + failures = append(failures, message) + }) + f() + RegisterFailHandler(originalHandler) + return failures +} + +//Ω wraps an actual value allowing assertions to be made on it: +// Ω("foo").Should(Equal("foo")) +// +//If Ω is passed more than one argument it will pass the *first* argument to the matcher. +//All subsequent arguments will be required to be nil/zero. +// +//This is convenient if you want to make an assertion on a method/function that returns +//a value and an error - a common patter in Go. +// +//For example, given a function with signature: +// func MyAmazingThing() (int, error) +// +//Then: +// Ω(MyAmazingThing()).Should(Equal(3)) +//Will succeed only if `MyAmazingThing()` returns `(3, nil)` +// +//Ω and Expect are identical +func Ω(actual interface{}, extra ...interface{}) GomegaAssertion { + return ExpectWithOffset(0, actual, extra...) +} + +//Expect wraps an actual value allowing assertions to be made on it: +// Expect("foo").To(Equal("foo")) +// +//If Expect is passed more than one argument it will pass the *first* argument to the matcher. +//All subsequent arguments will be required to be nil/zero. +// +//This is convenient if you want to make an assertion on a method/function that returns +//a value and an error - a common patter in Go. +// +//For example, given a function with signature: +// func MyAmazingThing() (int, error) +// +//Then: +// Expect(MyAmazingThing()).Should(Equal(3)) +//Will succeed only if `MyAmazingThing()` returns `(3, nil)` +// +//Expect and Ω are identical +func Expect(actual interface{}, extra ...interface{}) GomegaAssertion { + return ExpectWithOffset(0, actual, extra...) +} + +//ExpectWithOffset wraps an actual value allowing assertions to be made on it: +// ExpectWithOffset(1, "foo").To(Equal("foo")) +// +//Unlike `Expect` and `Ω`, `ExpectWithOffset` takes an additional integer argument +//this is used to modify the call-stack offset when computing line numbers. +// +//This is most useful in helper functions that make assertions. If you want Gomega's +//error message to refer to the calling line in the test (as opposed to the line in the helper function) +//set the first argument of `ExpectWithOffset` appropriately. +func ExpectWithOffset(offset int, actual interface{}, extra ...interface{}) GomegaAssertion { + if globalFailHandler == nil { + panic(nilFailHandlerPanic) + } + return assertion.New(actual, globalFailHandler, offset, extra...) +} + +//Eventually wraps an actual value allowing assertions to be made on it. +//The assertion is tried periodically until it passes or a timeout occurs. +// +//Both the timeout and polling interval are configurable as optional arguments: +//The first optional argument is the timeout +//The second optional argument is the polling interval +// +//Both intervals can either be specified as time.Duration, parsable duration strings or as floats/integers. In the +//last case they are interpreted as seconds. +// +//If Eventually is passed an actual that is a function taking no arguments and returning at least one value, +//then Eventually will call the function periodically and try the matcher against the function's first return value. +// +//Example: +// +// Eventually(func() int { +// return thingImPolling.Count() +// }).Should(BeNumerically(">=", 17)) +// +//Note that this example could be rewritten: +// +// Eventually(thingImPolling.Count).Should(BeNumerically(">=", 17)) +// +//If the function returns more than one value, then Eventually will pass the first value to the matcher and +//assert that all other values are nil/zero. +//This allows you to pass Eventually a function that returns a value and an error - a common pattern in Go. +// +//For example, consider a method that returns a value and an error: +// func FetchFromDB() (string, error) +// +//Then +// Eventually(FetchFromDB).Should(Equal("hasselhoff")) +// +//Will pass only if the the returned error is nil and the returned string passes the matcher. +// +//Eventually's default timeout is 1 second, and its default polling interval is 10ms +func Eventually(actual interface{}, intervals ...interface{}) GomegaAsyncAssertion { + return EventuallyWithOffset(0, actual, intervals...) +} + +//EventuallyWithOffset operates like Eventually but takes an additional +//initial argument to indicate an offset in the call stack. This is useful when building helper +//functions that contain matchers. To learn more, read about `ExpectWithOffset`. +func EventuallyWithOffset(offset int, actual interface{}, intervals ...interface{}) GomegaAsyncAssertion { + if globalFailHandler == nil { + panic(nilFailHandlerPanic) + } + timeoutInterval := defaultEventuallyTimeout + pollingInterval := defaultEventuallyPollingInterval + if len(intervals) > 0 { + timeoutInterval = toDuration(intervals[0]) + } + if len(intervals) > 1 { + pollingInterval = toDuration(intervals[1]) + } + return asyncassertion.New(asyncassertion.AsyncAssertionTypeEventually, actual, globalFailHandler, timeoutInterval, pollingInterval, offset) +} + +//Consistently wraps an actual value allowing assertions to be made on it. +//The assertion is tried periodically and is required to pass for a period of time. +// +//Both the total time and polling interval are configurable as optional arguments: +//The first optional argument is the duration that Consistently will run for +//The second optional argument is the polling interval +// +//Both intervals can either be specified as time.Duration, parsable duration strings or as floats/integers. In the +//last case they are interpreted as seconds. +// +//If Consistently is passed an actual that is a function taking no arguments and returning at least one value, +//then Consistently will call the function periodically and try the matcher against the function's first return value. +// +//If the function returns more than one value, then Consistently will pass the first value to the matcher and +//assert that all other values are nil/zero. +//This allows you to pass Consistently a function that returns a value and an error - a common pattern in Go. +// +//Consistently is useful in cases where you want to assert that something *does not happen* over a period of tiem. +//For example, you want to assert that a goroutine does *not* send data down a channel. In this case, you could: +// +// Consistently(channel).ShouldNot(Receive()) +// +//Consistently's default duration is 100ms, and its default polling interval is 10ms +func Consistently(actual interface{}, intervals ...interface{}) GomegaAsyncAssertion { + return ConsistentlyWithOffset(0, actual, intervals...) +} + +//ConsistentlyWithOffset operates like Consistnetly but takes an additional +//initial argument to indicate an offset in the call stack. This is useful when building helper +//functions that contain matchers. To learn more, read about `ExpectWithOffset`. +func ConsistentlyWithOffset(offset int, actual interface{}, intervals ...interface{}) GomegaAsyncAssertion { + if globalFailHandler == nil { + panic(nilFailHandlerPanic) + } + timeoutInterval := defaultConsistentlyDuration + pollingInterval := defaultConsistentlyPollingInterval + if len(intervals) > 0 { + timeoutInterval = toDuration(intervals[0]) + } + if len(intervals) > 1 { + pollingInterval = toDuration(intervals[1]) + } + return asyncassertion.New(asyncassertion.AsyncAssertionTypeConsistently, actual, globalFailHandler, timeoutInterval, pollingInterval, offset) +} + +//Set the default timeout duration for Eventually. Eventually will repeatedly poll your condition until it succeeds, or until this timeout elapses. +func SetDefaultEventuallyTimeout(t time.Duration) { + defaultEventuallyTimeout = t +} + +//Set the default polling interval for Eventually. +func SetDefaultEventuallyPollingInterval(t time.Duration) { + defaultEventuallyPollingInterval = t +} + +//Set the default duration for Consistently. Consistently will verify that your condition is satsified for this long. +func SetDefaultConsistentlyDuration(t time.Duration) { + defaultConsistentlyDuration = t +} + +//Set the default polling interval for Consistently. +func SetDefaultConsistentlyPollingInterval(t time.Duration) { + defaultConsistentlyPollingInterval = t +} + +//GomegaAsyncAssertion is returned by Eventually and Consistently and polls the actual value passed into Eventually against +//the matcher passed to the Should and ShouldNot methods. +// +//Both Should and ShouldNot take a variadic optionalDescription argument. This is passed on to +//fmt.Sprintf() and is used to annotate failure messages. This allows you to make your failure messages more +//descriptive +// +//Both Should and ShouldNot return a boolean that is true if the assertion passed and false if it failed. +// +//Example: +// +// Eventually(myChannel).Should(Receive(), "Something should have come down the pipe.") +// Consistently(myChannel).ShouldNot(Receive(), "Nothing should have come down the pipe.") +type GomegaAsyncAssertion interface { + Should(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool + ShouldNot(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool +} + +//GomegaAssertion is returned by Ω and Expect and compares the actual value to the matcher +//passed to the Should/ShouldNot and To/ToNot/NotTo methods. +// +//Typically Should/ShouldNot are used with Ω and To/ToNot/NotTo are used with Expect +//though this is not enforced. +// +//All methods take a variadic optionalDescription argument. This is passed on to fmt.Sprintf() +//and is used to annotate failure messages. +// +//All methods return a bool that is true if hte assertion passed and false if it failed. +// +//Example: +// +// Ω(farm.HasCow()).Should(BeTrue(), "Farm %v should have a cow", farm) +type GomegaAssertion interface { + Should(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool + ShouldNot(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool + + To(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool + ToNot(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool + NotTo(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool +} + +//OmegaMatcher is deprecated in favor of the better-named and better-organized types.GomegaMatcher but sticks around to support existing code that uses it +type OmegaMatcher types.GomegaMatcher + +func toDuration(input interface{}) time.Duration { + duration, ok := input.(time.Duration) + if ok { + return duration + } + + value := reflect.ValueOf(input) + kind := reflect.TypeOf(input).Kind() + + if reflect.Int <= kind && kind <= reflect.Int64 { + return time.Duration(value.Int()) * time.Second + } else if reflect.Uint <= kind && kind <= reflect.Uint64 { + return time.Duration(value.Uint()) * time.Second + } else if reflect.Float32 <= kind && kind <= reflect.Float64 { + return time.Duration(value.Float() * float64(time.Second)) + } else if reflect.String == kind { + duration, err := time.ParseDuration(value.String()) + if err != nil { + panic(fmt.Sprintf("%#v is not a valid parsable duration string.", input)) + } + return duration + } + + panic(fmt.Sprintf("%v is not a valid interval. Must be time.Duration, parsable duration string or a number.", input)) +} diff --git a/vendor/github.com/onsi/gomega/gstruct/elements.go b/vendor/github.com/onsi/gomega/gstruct/elements.go new file mode 100644 index 000000000..a315fa139 --- /dev/null +++ b/vendor/github.com/onsi/gomega/gstruct/elements.go @@ -0,0 +1,145 @@ +package gstruct + +import ( + "errors" + "fmt" + "reflect" + "runtime/debug" + + "github.com/onsi/gomega/format" + errorsutil "github.com/onsi/gomega/gstruct/errors" + "github.com/onsi/gomega/types" +) + +//MatchAllElements succeeds if every element of a slice matches the element matcher it maps to +//through the id function, and every element matcher is matched. +// Expect([]string{"a", "b"}).To(MatchAllElements(idFn, matchers.Elements{ +// "a": BeEqual("a"), +// "b": BeEqual("b"), +// }) +func MatchAllElements(identifier Identifier, elements Elements) types.GomegaMatcher { + return &ElementsMatcher{ + Identifier: identifier, + Elements: elements, + } +} + +//MatchElements succeeds if each element of a slice matches the element matcher it maps to +//through the id function. It can ignore extra elements and/or missing elements. +// Expect([]string{"a", "c"}).To(MatchElements(idFn, IgnoreMissing|IgnoreExtra, matchers.Elements{ +// "a": BeEqual("a") +// "b": BeEqual("b"), +// }) +func MatchElements(identifier Identifier, options Options, elements Elements) types.GomegaMatcher { + return &ElementsMatcher{ + Identifier: identifier, + Elements: elements, + IgnoreExtras: options&IgnoreExtras != 0, + IgnoreMissing: options&IgnoreMissing != 0, + AllowDuplicates: options&AllowDuplicates != 0, + } +} + +// ElementsMatcher is a NestingMatcher that applies custom matchers to each element of a slice mapped +// by the Identifier function. +// TODO: Extend this to work with arrays & maps (map the key) as well. +type ElementsMatcher struct { + // Matchers for each element. + Elements Elements + // Function mapping an element to the string key identifying its matcher. + Identifier Identifier + + // Whether to ignore extra elements or consider it an error. + IgnoreExtras bool + // Whether to ignore missing elements or consider it an error. + IgnoreMissing bool + // Whether to key duplicates when matching IDs. + AllowDuplicates bool + + // State. + failures []error +} + +// Element ID to matcher. +type Elements map[string]types.GomegaMatcher + +// Function for identifying (mapping) elements. +type Identifier func(element interface{}) string + +func (m *ElementsMatcher) Match(actual interface{}) (success bool, err error) { + if reflect.TypeOf(actual).Kind() != reflect.Slice { + return false, fmt.Errorf("%v is type %T, expected slice", actual, actual) + } + + m.failures = m.matchElements(actual) + if len(m.failures) > 0 { + return false, nil + } + return true, nil +} + +func (m *ElementsMatcher) matchElements(actual interface{}) (errs []error) { + // Provide more useful error messages in the case of a panic. + defer func() { + if err := recover(); err != nil { + errs = append(errs, fmt.Errorf("panic checking %+v: %v\n%s", actual, err, debug.Stack())) + } + }() + + val := reflect.ValueOf(actual) + elements := map[string]bool{} + for i := 0; i < val.Len(); i++ { + element := val.Index(i).Interface() + id := m.Identifier(element) + if elements[id] { + if !m.AllowDuplicates { + errs = append(errs, fmt.Errorf("found duplicate element ID %s", id)) + continue + } + } + elements[id] = true + + matcher, expected := m.Elements[id] + if !expected { + if !m.IgnoreExtras { + errs = append(errs, fmt.Errorf("unexpected element %s", id)) + } + continue + } + + match, err := matcher.Match(element) + if match { + continue + } + + if err == nil { + if nesting, ok := matcher.(errorsutil.NestingMatcher); ok { + err = errorsutil.AggregateError(nesting.Failures()) + } else { + err = errors.New(matcher.FailureMessage(element)) + } + } + errs = append(errs, errorsutil.Nest(fmt.Sprintf("[%s]", id), err)) + } + + for id := range m.Elements { + if !elements[id] && !m.IgnoreMissing { + errs = append(errs, fmt.Errorf("missing expected element %s", id)) + } + } + + return errs +} + +func (m *ElementsMatcher) FailureMessage(actual interface{}) (message string) { + failure := errorsutil.AggregateError(m.failures) + return format.Message(actual, fmt.Sprintf("to match elements: %v", failure)) +} + +func (m *ElementsMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return format.Message(actual, "not to match elements") +} + +func (m *ElementsMatcher) Failures() []error { + return m.failures +} diff --git a/vendor/github.com/onsi/gomega/gstruct/elements_test.go b/vendor/github.com/onsi/gomega/gstruct/elements_test.go new file mode 100644 index 000000000..8ba78cb91 --- /dev/null +++ b/vendor/github.com/onsi/gomega/gstruct/elements_test.go @@ -0,0 +1,144 @@ +package gstruct_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + . "github.com/onsi/gomega/gstruct" +) + +var _ = Describe("Slice", func() { + allElements := []string{"a", "b"} + missingElements := []string{"a"} + extraElements := []string{"a", "b", "c"} + duplicateElements := []string{"a", "a", "b"} + empty := []string{} + var nils []string + + It("should strictly match all elements", func() { + m := MatchAllElements(id, Elements{ + "b": Equal("b"), + "a": Equal("a"), + }) + Ω(allElements).Should(m, "should match all elements") + Ω(missingElements).ShouldNot(m, "should fail with missing elements") + Ω(extraElements).ShouldNot(m, "should fail with extra elements") + Ω(duplicateElements).ShouldNot(m, "should fail with duplicate elements") + Ω(nils).ShouldNot(m, "should fail with an uninitialized slice") + + m = MatchAllElements(id, Elements{ + "a": Equal("a"), + "b": Equal("fail"), + }) + Ω(allElements).ShouldNot(m, "should run nested matchers") + + m = MatchAllElements(id, Elements{}) + Ω(empty).Should(m, "should handle empty slices") + Ω(allElements).ShouldNot(m, "should handle only empty slices") + Ω(nils).Should(m, "should handle nil slices") + }) + + It("should ignore extra elements", func() { + m := MatchElements(id, IgnoreExtras, Elements{ + "b": Equal("b"), + "a": Equal("a"), + }) + Ω(allElements).Should(m, "should match all elements") + Ω(missingElements).ShouldNot(m, "should fail with missing elements") + Ω(extraElements).Should(m, "should ignore extra elements") + Ω(duplicateElements).ShouldNot(m, "should fail with duplicate elements") + Ω(nils).ShouldNot(m, "should fail with an uninitialized slice") + }) + + It("should ignore missing elements", func() { + m := MatchElements(id, IgnoreMissing, Elements{ + "a": Equal("a"), + "b": Equal("b"), + }) + Ω(allElements).Should(m, "should match all elements") + Ω(missingElements).Should(m, "should ignore missing elements") + Ω(extraElements).ShouldNot(m, "should fail with extra elements") + Ω(duplicateElements).ShouldNot(m, "should fail with duplicate elements") + Ω(nils).Should(m, "should ignore an uninitialized slice") + }) + + It("should ignore missing and extra elements", func() { + m := MatchElements(id, IgnoreMissing|IgnoreExtras, Elements{ + "a": Equal("a"), + "b": Equal("b"), + }) + Ω(allElements).Should(m, "should match all elements") + Ω(missingElements).Should(m, "should ignore missing elements") + Ω(extraElements).Should(m, "should ignore extra elements") + Ω(duplicateElements).ShouldNot(m, "should fail with duplicate elements") + Ω(nils).Should(m, "should ignore an uninitialized slice") + + m = MatchElements(id, IgnoreExtras|IgnoreMissing, Elements{ + "a": Equal("a"), + "b": Equal("fail"), + }) + Ω(allElements).ShouldNot(m, "should run nested matchers") + }) + + Context("with elements that share a key", func() { + nonUniqueID := func(element interface{}) string { + return element.(string)[0:1] + } + + allElements := []string{"a123", "a213", "b321"} + includingBadElements := []string{"a123", "b123", "b5555"} + extraElements := []string{"a123", "b1234", "c345"} + missingElements := []string{"b123", "b1234", "b1345"} + + It("should strictly allow multiple matches", func() { + m := MatchElements(nonUniqueID, AllowDuplicates, Elements{ + "a": ContainSubstring("1"), + "b": ContainSubstring("1"), + }) + Ω(allElements).Should(m, "should match all elements") + Ω(includingBadElements).ShouldNot(m, "should reject if a member fails the matcher") + Ω(extraElements).ShouldNot(m, "should reject with extra keys") + Ω(missingElements).ShouldNot(m, "should reject with missing keys") + Ω(nils).ShouldNot(m, "should fail with an uninitialized slice") + }) + + It("should ignore missing", func() { + m := MatchElements(nonUniqueID, AllowDuplicates|IgnoreMissing, Elements{ + "a": ContainSubstring("1"), + "b": ContainSubstring("1"), + }) + Ω(allElements).Should(m, "should match all elements") + Ω(includingBadElements).ShouldNot(m, "should reject if a member fails the matcher") + Ω(extraElements).ShouldNot(m, "should reject with extra keys") + Ω(missingElements).Should(m, "should allow missing keys") + Ω(nils).Should(m, "should allow an uninitialized slice") + }) + + It("should ignore extras", func() { + m := MatchElements(nonUniqueID, AllowDuplicates|IgnoreExtras, Elements{ + "a": ContainSubstring("1"), + "b": ContainSubstring("1"), + }) + Ω(allElements).Should(m, "should match all elements") + Ω(includingBadElements).ShouldNot(m, "should reject if a member fails the matcher") + Ω(extraElements).Should(m, "should allow extra keys") + Ω(missingElements).ShouldNot(m, "should reject missing keys") + Ω(nils).ShouldNot(m, "should reject an uninitialized slice") + }) + + It("should ignore missing and extras", func() { + m := MatchElements(nonUniqueID, AllowDuplicates|IgnoreExtras|IgnoreMissing, Elements{ + "a": ContainSubstring("1"), + "b": ContainSubstring("1"), + }) + Ω(allElements).Should(m, "should match all elements") + Ω(includingBadElements).ShouldNot(m, "should reject if a member fails the matcher") + Ω(extraElements).Should(m, "should allow extra keys") + Ω(missingElements).Should(m, "should allow missing keys") + Ω(nils).Should(m, "should allow an uninitialized slice") + }) + }) +}) + +func id(element interface{}) string { + return element.(string) +} diff --git a/vendor/github.com/onsi/gomega/gstruct/errors/nested_types.go b/vendor/github.com/onsi/gomega/gstruct/errors/nested_types.go new file mode 100644 index 000000000..188492b21 --- /dev/null +++ b/vendor/github.com/onsi/gomega/gstruct/errors/nested_types.go @@ -0,0 +1,72 @@ +package errors + +import ( + "fmt" + "strings" + + "github.com/onsi/gomega/types" +) + +// A stateful matcher that nests other matchers within it and preserves the error types of the +// nested matcher failures. +type NestingMatcher interface { + types.GomegaMatcher + + // Returns the failures of nested matchers. + Failures() []error +} + +// An error type for labeling errors on deeply nested matchers. +type NestedError struct { + Path string + Err error +} + +func (e *NestedError) Error() string { + // Indent Errors. + indented := strings.Replace(e.Err.Error(), "\n", "\n\t", -1) + return fmt.Sprintf("%s:\n\t%v", e.Path, indented) +} + +// Create a NestedError with the given path. +// If err is a NestedError, prepend the path to it. +// If err is an AggregateError, recursively Nest each error. +func Nest(path string, err error) error { + if ag, ok := err.(AggregateError); ok { + var errs AggregateError + for _, e := range ag { + errs = append(errs, Nest(path, e)) + } + return errs + } + if ne, ok := err.(*NestedError); ok { + return &NestedError{ + Path: path + ne.Path, + Err: ne.Err, + } + } + return &NestedError{ + Path: path, + Err: err, + } +} + +// An error type for treating multiple errors as a single error. +type AggregateError []error + +// Error is part of the error interface. +func (err AggregateError) Error() string { + if len(err) == 0 { + // This should never happen, really. + return "" + } + if len(err) == 1 { + return err[0].Error() + } + result := fmt.Sprintf("[%s", err[0].Error()) + for i := 1; i < len(err); i++ { + result += fmt.Sprintf(", %s", err[i].Error()) + } + result += "]" + return result +} diff --git a/vendor/github.com/onsi/gomega/gstruct/fields.go b/vendor/github.com/onsi/gomega/gstruct/fields.go new file mode 100644 index 000000000..0020b873d --- /dev/null +++ b/vendor/github.com/onsi/gomega/gstruct/fields.go @@ -0,0 +1,141 @@ +package gstruct + +import ( + "errors" + "fmt" + "reflect" + "runtime/debug" + "strings" + + "github.com/onsi/gomega/format" + errorsutil "github.com/onsi/gomega/gstruct/errors" + "github.com/onsi/gomega/types" +) + +//MatchAllFields succeeds if every field of a struct matches the field matcher associated with +//it, and every element matcher is matched. +// Expect([]string{"a", "b"}).To(MatchAllFields(gstruct.Fields{ +// "a": BeEqual("a"), +// "b": BeEqual("b"), +// }) +func MatchAllFields(fields Fields) types.GomegaMatcher { + return &FieldsMatcher{ + Fields: fields, + } +} + +//MatchFields succeeds if each element of a struct matches the field matcher associated with +//it. It can ignore extra fields and/or missing fields. +// Expect([]string{"a", "c"}).To(MatchFields(IgnoreMissing|IgnoreExtra, gstruct.Fields{ +// "a": BeEqual("a") +// "b": BeEqual("b"), +// }) +func MatchFields(options Options, fields Fields) types.GomegaMatcher { + return &FieldsMatcher{ + Fields: fields, + IgnoreExtras: options&IgnoreExtras != 0, + IgnoreMissing: options&IgnoreMissing != 0, + } +} + +type FieldsMatcher struct { + // Matchers for each field. + Fields Fields + + // Whether to ignore extra elements or consider it an error. + IgnoreExtras bool + // Whether to ignore missing elements or consider it an error. + IgnoreMissing bool + + // State. + failures []error +} + +// Field name to matcher. +type Fields map[string]types.GomegaMatcher + +func (m *FieldsMatcher) Match(actual interface{}) (success bool, err error) { + if reflect.TypeOf(actual).Kind() != reflect.Struct { + return false, fmt.Errorf("%v is type %T, expected struct", actual, actual) + } + + m.failures = m.matchFields(actual) + if len(m.failures) > 0 { + return false, nil + } + return true, nil +} + +func (m *FieldsMatcher) matchFields(actual interface{}) (errs []error) { + val := reflect.ValueOf(actual) + typ := val.Type() + fields := map[string]bool{} + for i := 0; i < val.NumField(); i++ { + fieldName := typ.Field(i).Name + fields[fieldName] = true + + err := func() (err error) { + // This test relies heavily on reflect, which tends to panic. + // Recover here to provide more useful error messages in that case. + defer func() { + if r := recover(); r != nil { + err = fmt.Errorf("panic checking %+v: %v\n%s", actual, r, debug.Stack()) + } + }() + + matcher, expected := m.Fields[fieldName] + if !expected { + if !m.IgnoreExtras { + return fmt.Errorf("unexpected field %s: %+v", fieldName, actual) + } + return nil + } + + var field interface{} + if val.Field(i).IsValid() { + field = val.Field(i).Interface() + } else { + field = reflect.Zero(typ.Field(i).Type) + } + + match, err := matcher.Match(field) + if err != nil { + return err + } else if !match { + if nesting, ok := matcher.(errorsutil.NestingMatcher); ok { + return errorsutil.AggregateError(nesting.Failures()) + } + return errors.New(matcher.FailureMessage(field)) + } + return nil + }() + if err != nil { + errs = append(errs, errorsutil.Nest("."+fieldName, err)) + } + } + + for field := range m.Fields { + if !fields[field] && !m.IgnoreMissing { + errs = append(errs, fmt.Errorf("missing expected field %s", field)) + } + } + + return errs +} + +func (m *FieldsMatcher) FailureMessage(actual interface{}) (message string) { + failures := make([]string, len(m.failures)) + for i := range m.failures { + failures[i] = m.failures[i].Error() + } + return format.Message(reflect.TypeOf(actual).Name(), + fmt.Sprintf("to match fields: {\n%v\n}\n", strings.Join(failures, "\n"))) +} + +func (m *FieldsMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return format.Message(actual, "not to match fields") +} + +func (m *FieldsMatcher) Failures() []error { + return m.failures +} diff --git a/vendor/github.com/onsi/gomega/gstruct/fields_test.go b/vendor/github.com/onsi/gomega/gstruct/fields_test.go new file mode 100644 index 000000000..61f4afc40 --- /dev/null +++ b/vendor/github.com/onsi/gomega/gstruct/fields_test.go @@ -0,0 +1,76 @@ +package gstruct_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + . "github.com/onsi/gomega/gstruct" +) + +var _ = Describe("Struct", func() { + allFields := struct{ A, B string }{"a", "b"} + missingFields := struct{ A string }{"a"} + extraFields := struct{ A, B, C string }{"a", "b", "c"} + emptyFields := struct{ A, B string }{} + + It("should strictly match all fields", func() { + m := MatchAllFields(Fields{ + "B": Equal("b"), + "A": Equal("a"), + }) + Ω(allFields).Should(m, "should match all fields") + Ω(missingFields).ShouldNot(m, "should fail with missing fields") + Ω(extraFields).ShouldNot(m, "should fail with extra fields") + Ω(emptyFields).ShouldNot(m, "should fail with empty fields") + + m = MatchAllFields(Fields{ + "A": Equal("a"), + "B": Equal("fail"), + }) + Ω(allFields).ShouldNot(m, "should run nested matchers") + }) + + It("should handle empty structs", func() { + m := MatchAllFields(Fields{}) + Ω(struct{}{}).Should(m, "should handle empty structs") + Ω(allFields).ShouldNot(m, "should fail with extra fields") + }) + + It("should ignore missing fields", func() { + m := MatchFields(IgnoreMissing, Fields{ + "B": Equal("b"), + "A": Equal("a"), + }) + Ω(allFields).Should(m, "should match all fields") + Ω(missingFields).Should(m, "should ignore missing fields") + Ω(extraFields).ShouldNot(m, "should fail with extra fields") + Ω(emptyFields).ShouldNot(m, "should fail with empty fields") + }) + + It("should ignore extra fields", func() { + m := MatchFields(IgnoreExtras, Fields{ + "B": Equal("b"), + "A": Equal("a"), + }) + Ω(allFields).Should(m, "should match all fields") + Ω(missingFields).ShouldNot(m, "should fail with missing fields") + Ω(extraFields).Should(m, "should ignore extra fields") + Ω(emptyFields).ShouldNot(m, "should fail with empty fields") + }) + + It("should ignore missing and extra fields", func() { + m := MatchFields(IgnoreMissing|IgnoreExtras, Fields{ + "B": Equal("b"), + "A": Equal("a"), + }) + Ω(allFields).Should(m, "should match all fields") + Ω(missingFields).Should(m, "should ignore missing fields") + Ω(extraFields).Should(m, "should ignore extra fields") + Ω(emptyFields).ShouldNot(m, "should fail with empty fields") + + m = MatchFields(IgnoreMissing|IgnoreExtras, Fields{ + "A": Equal("a"), + "B": Equal("fail"), + }) + Ω(allFields).ShouldNot(m, "should run nested matchers") + }) +}) diff --git a/vendor/github.com/onsi/gomega/gstruct/gstruct_tests_suite_test.go b/vendor/github.com/onsi/gomega/gstruct/gstruct_tests_suite_test.go new file mode 100644 index 000000000..d47566304 --- /dev/null +++ b/vendor/github.com/onsi/gomega/gstruct/gstruct_tests_suite_test.go @@ -0,0 +1,13 @@ +package gstruct_test + +import ( + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +func Test(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Gstruct Suite") +} diff --git a/vendor/github.com/onsi/gomega/gstruct/ignore.go b/vendor/github.com/onsi/gomega/gstruct/ignore.go new file mode 100644 index 000000000..0365f32ad --- /dev/null +++ b/vendor/github.com/onsi/gomega/gstruct/ignore.go @@ -0,0 +1,37 @@ +package gstruct + +import ( + "github.com/onsi/gomega/types" +) + +//Ignore ignores the actual value and always succeeds. +// Expect(nil).To(Ignore()) +// Expect(true).To(Ignore()) +func Ignore() types.GomegaMatcher { + return &IgnoreMatcher{true} +} + +//Reject ignores the actual value and always fails. It can be used in conjunction with IgnoreMissing +//to catch problematic elements, or to verify tests are running. +// Expect(nil).NotTo(Reject()) +// Expect(true).NotTo(Reject()) +func Reject() types.GomegaMatcher { + return &IgnoreMatcher{false} +} + +// A matcher that either always succeeds or always fails. +type IgnoreMatcher struct { + Succeed bool +} + +func (m *IgnoreMatcher) Match(actual interface{}) (bool, error) { + return m.Succeed, nil +} + +func (m *IgnoreMatcher) FailureMessage(_ interface{}) (message string) { + return "Unconditional failure" +} + +func (m *IgnoreMatcher) NegatedFailureMessage(_ interface{}) (message string) { + return "Unconditional success" +} diff --git a/vendor/github.com/onsi/gomega/gstruct/ignore_test.go b/vendor/github.com/onsi/gomega/gstruct/ignore_test.go new file mode 100644 index 000000000..70e1d4007 --- /dev/null +++ b/vendor/github.com/onsi/gomega/gstruct/ignore_test.go @@ -0,0 +1,23 @@ +package gstruct_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + . "github.com/onsi/gomega/gstruct" +) + +var _ = Describe("Ignore", func() { + It("should always succeed", func() { + Ω(nil).Should(Ignore()) + Ω(struct{}{}).Should(Ignore()) + Ω(0).Should(Ignore()) + Ω(false).Should(Ignore()) + }) + + It("should always fail", func() { + Ω(nil).ShouldNot(Reject()) + Ω(struct{}{}).ShouldNot(Reject()) + Ω(1).ShouldNot(Reject()) + Ω(true).ShouldNot(Reject()) + }) +}) diff --git a/vendor/github.com/onsi/gomega/gstruct/pointer.go b/vendor/github.com/onsi/gomega/gstruct/pointer.go new file mode 100644 index 000000000..0a2f35de3 --- /dev/null +++ b/vendor/github.com/onsi/gomega/gstruct/pointer.go @@ -0,0 +1,56 @@ +package gstruct + +import ( + "fmt" + "reflect" + + "github.com/onsi/gomega/format" + "github.com/onsi/gomega/types" +) + +//PointTo applies the given matcher to the value pointed to by actual. It fails if the pointer is +//nil. +// actual := 5 +// Expect(&actual).To(PointTo(Equal(5))) +func PointTo(matcher types.GomegaMatcher) types.GomegaMatcher { + return &PointerMatcher{ + Matcher: matcher, + } +} + +type PointerMatcher struct { + Matcher types.GomegaMatcher + + // Failure message. + failure string +} + +func (m *PointerMatcher) Match(actual interface{}) (bool, error) { + val := reflect.ValueOf(actual) + + // return error if actual type is not a pointer + if val.Kind() != reflect.Ptr { + return false, fmt.Errorf("PointerMatcher expects a pointer but we have '%s'", val.Kind()) + } + + if !val.IsValid() || val.IsNil() { + m.failure = format.Message(actual, "not to be ") + return false, nil + } + + // Forward the value. + elem := val.Elem().Interface() + match, err := m.Matcher.Match(elem) + if !match { + m.failure = m.Matcher.FailureMessage(elem) + } + return match, err +} + +func (m *PointerMatcher) FailureMessage(_ interface{}) (message string) { + return m.failure +} + +func (m *PointerMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return m.Matcher.NegatedFailureMessage(actual) +} diff --git a/vendor/github.com/onsi/gomega/gstruct/pointer_test.go b/vendor/github.com/onsi/gomega/gstruct/pointer_test.go new file mode 100644 index 000000000..b02081c4c --- /dev/null +++ b/vendor/github.com/onsi/gomega/gstruct/pointer_test.go @@ -0,0 +1,33 @@ +package gstruct_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + . "github.com/onsi/gomega/gstruct" +) + +var _ = Describe("PointTo", func() { + It("should fail when passed nil", func() { + var p *struct{} + Ω(p).Should(BeNil()) + }) + + It("should succeed when passed non-nil pointer", func() { + var s struct{} + Ω(&s).Should(PointTo(Ignore())) + }) + + It("should unwrap the pointee value", func() { + i := 1 + Ω(&i).Should(PointTo(Equal(1))) + Ω(&i).ShouldNot(PointTo(Equal(2))) + }) + + It("should work with nested pointers", func() { + i := 1 + ip := &i + ipp := &ip + Ω(ipp).Should(PointTo(PointTo(Equal(1)))) + Ω(ipp).ShouldNot(PointTo(PointTo(Equal(2)))) + }) +}) diff --git a/vendor/github.com/onsi/gomega/gstruct/types.go b/vendor/github.com/onsi/gomega/gstruct/types.go new file mode 100644 index 000000000..48cbbe8f6 --- /dev/null +++ b/vendor/github.com/onsi/gomega/gstruct/types.go @@ -0,0 +1,15 @@ +package gstruct + +//Options is the type for options passed to some matchers. +type Options int + +const ( + //IgnoreExtras tells the matcher to ignore extra elements or fields, rather than triggering a failure. + IgnoreExtras Options = 1 << iota + //IgnoreMissing tells the matcher to ignore missing elements or fields, rather than triggering a failure. + IgnoreMissing + //AllowDuplicates tells the matcher to permit multiple members of the slice to produce the same ID when + //considered by the indentifier function. All members that map to a given key must still match successfully + //with the matcher that is provided for that key. + AllowDuplicates +) diff --git a/vendor/github.com/onsi/gomega/internal/assertion/assertion.go b/vendor/github.com/onsi/gomega/internal/assertion/assertion.go new file mode 100644 index 000000000..b73673f21 --- /dev/null +++ b/vendor/github.com/onsi/gomega/internal/assertion/assertion.go @@ -0,0 +1,98 @@ +package assertion + +import ( + "fmt" + "reflect" + + "github.com/onsi/gomega/types" +) + +type Assertion struct { + actualInput interface{} + fail types.GomegaFailHandler + offset int + extra []interface{} +} + +func New(actualInput interface{}, fail types.GomegaFailHandler, offset int, extra ...interface{}) *Assertion { + return &Assertion{ + actualInput: actualInput, + fail: fail, + offset: offset, + extra: extra, + } +} + +func (assertion *Assertion) Should(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool { + return assertion.vetExtras(optionalDescription...) && assertion.match(matcher, true, optionalDescription...) +} + +func (assertion *Assertion) ShouldNot(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool { + return assertion.vetExtras(optionalDescription...) && assertion.match(matcher, false, optionalDescription...) +} + +func (assertion *Assertion) To(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool { + return assertion.vetExtras(optionalDescription...) && assertion.match(matcher, true, optionalDescription...) +} + +func (assertion *Assertion) ToNot(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool { + return assertion.vetExtras(optionalDescription...) && assertion.match(matcher, false, optionalDescription...) +} + +func (assertion *Assertion) NotTo(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool { + return assertion.vetExtras(optionalDescription...) && assertion.match(matcher, false, optionalDescription...) +} + +func (assertion *Assertion) buildDescription(optionalDescription ...interface{}) string { + switch len(optionalDescription) { + case 0: + return "" + default: + return fmt.Sprintf(optionalDescription[0].(string), optionalDescription[1:]...) + "\n" + } +} + +func (assertion *Assertion) match(matcher types.GomegaMatcher, desiredMatch bool, optionalDescription ...interface{}) bool { + matches, err := matcher.Match(assertion.actualInput) + description := assertion.buildDescription(optionalDescription...) + if err != nil { + assertion.fail(description+err.Error(), 2+assertion.offset) + return false + } + if matches != desiredMatch { + var message string + if desiredMatch { + message = matcher.FailureMessage(assertion.actualInput) + } else { + message = matcher.NegatedFailureMessage(assertion.actualInput) + } + assertion.fail(description+message, 2+assertion.offset) + return false + } + + return true +} + +func (assertion *Assertion) vetExtras(optionalDescription ...interface{}) bool { + success, message := vetExtras(assertion.extra) + if success { + return true + } + + description := assertion.buildDescription(optionalDescription...) + assertion.fail(description+message, 2+assertion.offset) + return false +} + +func vetExtras(extras []interface{}) (bool, string) { + for i, extra := range extras { + if extra != nil { + zeroValue := reflect.Zero(reflect.TypeOf(extra)).Interface() + if !reflect.DeepEqual(zeroValue, extra) { + message := fmt.Sprintf("Unexpected non-nil/non-zero extra argument at index %d:\n\t<%T>: %#v", i+1, extra, extra) + return false, message + } + } + } + return true, "" +} diff --git a/vendor/github.com/onsi/gomega/internal/assertion/assertion_suite_test.go b/vendor/github.com/onsi/gomega/internal/assertion/assertion_suite_test.go new file mode 100644 index 000000000..dae47a48b --- /dev/null +++ b/vendor/github.com/onsi/gomega/internal/assertion/assertion_suite_test.go @@ -0,0 +1,13 @@ +package assertion_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + "testing" +) + +func TestAssertion(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Assertion Suite") +} diff --git a/vendor/github.com/onsi/gomega/internal/assertion/assertion_test.go b/vendor/github.com/onsi/gomega/internal/assertion/assertion_test.go new file mode 100644 index 000000000..c03b7a320 --- /dev/null +++ b/vendor/github.com/onsi/gomega/internal/assertion/assertion_test.go @@ -0,0 +1,252 @@ +package assertion_test + +import ( + "errors" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + . "github.com/onsi/gomega/internal/assertion" + "github.com/onsi/gomega/internal/fakematcher" +) + +var _ = Describe("Assertion", func() { + var ( + a *Assertion + failureMessage string + failureCallerSkip int + matcher *fakematcher.FakeMatcher + ) + + input := "The thing I'm testing" + + var fakeFailHandler = func(message string, callerSkip ...int) { + failureMessage = message + if len(callerSkip) == 1 { + failureCallerSkip = callerSkip[0] + } + } + + BeforeEach(func() { + matcher = &fakematcher.FakeMatcher{} + failureMessage = "" + failureCallerSkip = 0 + a = New(input, fakeFailHandler, 1) + }) + + Context("when called", func() { + It("should pass the provided input value to the matcher", func() { + a.Should(matcher) + + Ω(matcher.ReceivedActual).Should(Equal(input)) + matcher.ReceivedActual = "" + + a.ShouldNot(matcher) + + Ω(matcher.ReceivedActual).Should(Equal(input)) + matcher.ReceivedActual = "" + + a.To(matcher) + + Ω(matcher.ReceivedActual).Should(Equal(input)) + matcher.ReceivedActual = "" + + a.ToNot(matcher) + + Ω(matcher.ReceivedActual).Should(Equal(input)) + matcher.ReceivedActual = "" + + a.NotTo(matcher) + + Ω(matcher.ReceivedActual).Should(Equal(input)) + }) + }) + + Context("when the matcher succeeds", func() { + BeforeEach(func() { + matcher.MatchesToReturn = true + matcher.ErrToReturn = nil + }) + + Context("and a positive assertion is being made", func() { + It("should not call the failure callback", func() { + a.Should(matcher) + Ω(failureMessage).Should(Equal("")) + }) + + It("should be true", func() { + Ω(a.Should(matcher)).Should(BeTrue()) + }) + }) + + Context("and a negative assertion is being made", func() { + It("should call the failure callback", func() { + a.ShouldNot(matcher) + Ω(failureMessage).Should(Equal("negative: The thing I'm testing")) + Ω(failureCallerSkip).Should(Equal(3)) + }) + + It("should be false", func() { + Ω(a.ShouldNot(matcher)).Should(BeFalse()) + }) + }) + }) + + Context("when the matcher fails", func() { + BeforeEach(func() { + matcher.MatchesToReturn = false + matcher.ErrToReturn = nil + }) + + Context("and a positive assertion is being made", func() { + It("should call the failure callback", func() { + a.Should(matcher) + Ω(failureMessage).Should(Equal("positive: The thing I'm testing")) + Ω(failureCallerSkip).Should(Equal(3)) + }) + + It("should be false", func() { + Ω(a.Should(matcher)).Should(BeFalse()) + }) + }) + + Context("and a negative assertion is being made", func() { + It("should not call the failure callback", func() { + a.ShouldNot(matcher) + Ω(failureMessage).Should(Equal("")) + }) + + It("should be true", func() { + Ω(a.ShouldNot(matcher)).Should(BeTrue()) + }) + }) + }) + + Context("When reporting a failure", func() { + BeforeEach(func() { + matcher.MatchesToReturn = false + matcher.ErrToReturn = nil + }) + + Context("and there is an optional description", func() { + It("should append the description to the failure message", func() { + a.Should(matcher, "A description") + Ω(failureMessage).Should(Equal("A description\npositive: The thing I'm testing")) + Ω(failureCallerSkip).Should(Equal(3)) + }) + }) + + Context("and there are multiple arguments to the optional description", func() { + It("should append the formatted description to the failure message", func() { + a.Should(matcher, "A description of [%d]", 3) + Ω(failureMessage).Should(Equal("A description of [3]\npositive: The thing I'm testing")) + Ω(failureCallerSkip).Should(Equal(3)) + }) + }) + }) + + Context("When the matcher returns an error", func() { + BeforeEach(func() { + matcher.ErrToReturn = errors.New("Kaboom!") + }) + + Context("and a positive assertion is being made", func() { + It("should call the failure callback", func() { + matcher.MatchesToReturn = true + a.Should(matcher) + Ω(failureMessage).Should(Equal("Kaboom!")) + Ω(failureCallerSkip).Should(Equal(3)) + }) + }) + + Context("and a negative assertion is being made", func() { + It("should call the failure callback", func() { + matcher.MatchesToReturn = false + a.ShouldNot(matcher) + Ω(failureMessage).Should(Equal("Kaboom!")) + Ω(failureCallerSkip).Should(Equal(3)) + }) + }) + + It("should always be false", func() { + Ω(a.Should(matcher)).Should(BeFalse()) + Ω(a.ShouldNot(matcher)).Should(BeFalse()) + }) + }) + + Context("when there are extra parameters", func() { + It("(a simple example)", func() { + Ω(func() (string, int, error) { + return "foo", 0, nil + }()).Should(Equal("foo")) + }) + + Context("when the parameters are all nil or zero", func() { + It("should invoke the matcher", func() { + matcher.MatchesToReturn = true + matcher.ErrToReturn = nil + + var typedNil []string + a = New(input, fakeFailHandler, 1, 0, nil, typedNil) + + result := a.Should(matcher) + Ω(result).Should(BeTrue()) + Ω(matcher.ReceivedActual).Should(Equal(input)) + + Ω(failureMessage).Should(BeZero()) + }) + }) + + Context("when any of the parameters are not nil or zero", func() { + It("should call the failure callback", func() { + matcher.MatchesToReturn = false + matcher.ErrToReturn = nil + + a = New(input, fakeFailHandler, 1, errors.New("foo")) + result := a.Should(matcher) + Ω(result).Should(BeFalse()) + Ω(matcher.ReceivedActual).Should(BeZero(), "The matcher doesn't even get called") + Ω(failureMessage).Should(ContainSubstring("foo")) + failureMessage = "" + + a = New(input, fakeFailHandler, 1, nil, 1) + result = a.ShouldNot(matcher) + Ω(result).Should(BeFalse()) + Ω(failureMessage).Should(ContainSubstring("1")) + failureMessage = "" + + a = New(input, fakeFailHandler, 1, nil, 0, []string{"foo"}) + result = a.To(matcher) + Ω(result).Should(BeFalse()) + Ω(failureMessage).Should(ContainSubstring("foo")) + failureMessage = "" + + a = New(input, fakeFailHandler, 1, nil, 0, []string{"foo"}) + result = a.ToNot(matcher) + Ω(result).Should(BeFalse()) + Ω(failureMessage).Should(ContainSubstring("foo")) + failureMessage = "" + + a = New(input, fakeFailHandler, 1, nil, 0, []string{"foo"}) + result = a.NotTo(matcher) + Ω(result).Should(BeFalse()) + Ω(failureMessage).Should(ContainSubstring("foo")) + Ω(failureCallerSkip).Should(Equal(3)) + }) + }) + }) + + Context("Making an assertion without a registered fail handler", func() { + It("should panic", func() { + defer func() { + e := recover() + RegisterFailHandler(Fail) + if e == nil { + Fail("expected a panic to have occurred") + } + }() + + RegisterFailHandler(nil) + Ω(true).Should(BeTrue()) + }) + }) +}) diff --git a/vendor/github.com/onsi/gomega/internal/asyncassertion/async_assertion.go b/vendor/github.com/onsi/gomega/internal/asyncassertion/async_assertion.go new file mode 100644 index 000000000..bce085300 --- /dev/null +++ b/vendor/github.com/onsi/gomega/internal/asyncassertion/async_assertion.go @@ -0,0 +1,189 @@ +package asyncassertion + +import ( + "errors" + "fmt" + "reflect" + "time" + + "github.com/onsi/gomega/internal/oraclematcher" + "github.com/onsi/gomega/types" +) + +type AsyncAssertionType uint + +const ( + AsyncAssertionTypeEventually AsyncAssertionType = iota + AsyncAssertionTypeConsistently +) + +type AsyncAssertion struct { + asyncType AsyncAssertionType + actualInput interface{} + timeoutInterval time.Duration + pollingInterval time.Duration + fail types.GomegaFailHandler + offset int +} + +func New(asyncType AsyncAssertionType, actualInput interface{}, fail types.GomegaFailHandler, timeoutInterval time.Duration, pollingInterval time.Duration, offset int) *AsyncAssertion { + actualType := reflect.TypeOf(actualInput) + if actualType.Kind() == reflect.Func { + if actualType.NumIn() != 0 || actualType.NumOut() == 0 { + panic("Expected a function with no arguments and one or more return values.") + } + } + + return &AsyncAssertion{ + asyncType: asyncType, + actualInput: actualInput, + fail: fail, + timeoutInterval: timeoutInterval, + pollingInterval: pollingInterval, + offset: offset, + } +} + +func (assertion *AsyncAssertion) Should(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool { + return assertion.match(matcher, true, optionalDescription...) +} + +func (assertion *AsyncAssertion) ShouldNot(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool { + return assertion.match(matcher, false, optionalDescription...) +} + +func (assertion *AsyncAssertion) buildDescription(optionalDescription ...interface{}) string { + switch len(optionalDescription) { + case 0: + return "" + default: + return fmt.Sprintf(optionalDescription[0].(string), optionalDescription[1:]...) + "\n" + } +} + +func (assertion *AsyncAssertion) actualInputIsAFunction() bool { + actualType := reflect.TypeOf(assertion.actualInput) + return actualType.Kind() == reflect.Func && actualType.NumIn() == 0 && actualType.NumOut() > 0 +} + +func (assertion *AsyncAssertion) pollActual() (interface{}, error) { + if assertion.actualInputIsAFunction() { + values := reflect.ValueOf(assertion.actualInput).Call([]reflect.Value{}) + + extras := []interface{}{} + for _, value := range values[1:] { + extras = append(extras, value.Interface()) + } + + success, message := vetExtras(extras) + + if !success { + return nil, errors.New(message) + } + + return values[0].Interface(), nil + } + + return assertion.actualInput, nil +} + +func (assertion *AsyncAssertion) matcherMayChange(matcher types.GomegaMatcher, value interface{}) bool { + if assertion.actualInputIsAFunction() { + return true + } + + return oraclematcher.MatchMayChangeInTheFuture(matcher, value) +} + +func (assertion *AsyncAssertion) match(matcher types.GomegaMatcher, desiredMatch bool, optionalDescription ...interface{}) bool { + timer := time.Now() + timeout := time.After(assertion.timeoutInterval) + + description := assertion.buildDescription(optionalDescription...) + + var matches bool + var err error + mayChange := true + value, err := assertion.pollActual() + if err == nil { + mayChange = assertion.matcherMayChange(matcher, value) + matches, err = matcher.Match(value) + } + + fail := func(preamble string) { + errMsg := "" + message := "" + if err != nil { + errMsg = "Error: " + err.Error() + } else { + if desiredMatch { + message = matcher.FailureMessage(value) + } else { + message = matcher.NegatedFailureMessage(value) + } + } + assertion.fail(fmt.Sprintf("%s after %.3fs.\n%s%s%s", preamble, time.Since(timer).Seconds(), description, message, errMsg), 3+assertion.offset) + } + + if assertion.asyncType == AsyncAssertionTypeEventually { + for { + if err == nil && matches == desiredMatch { + return true + } + + if !mayChange { + fail("No future change is possible. Bailing out early") + return false + } + + select { + case <-time.After(assertion.pollingInterval): + value, err = assertion.pollActual() + if err == nil { + mayChange = assertion.matcherMayChange(matcher, value) + matches, err = matcher.Match(value) + } + case <-timeout: + fail("Timed out") + return false + } + } + } else if assertion.asyncType == AsyncAssertionTypeConsistently { + for { + if !(err == nil && matches == desiredMatch) { + fail("Failed") + return false + } + + if !mayChange { + return true + } + + select { + case <-time.After(assertion.pollingInterval): + value, err = assertion.pollActual() + if err == nil { + mayChange = assertion.matcherMayChange(matcher, value) + matches, err = matcher.Match(value) + } + case <-timeout: + return true + } + } + } + + return false +} + +func vetExtras(extras []interface{}) (bool, string) { + for i, extra := range extras { + if extra != nil { + zeroValue := reflect.Zero(reflect.TypeOf(extra)).Interface() + if !reflect.DeepEqual(zeroValue, extra) { + message := fmt.Sprintf("Unexpected non-nil/non-zero extra argument at index %d:\n\t<%T>: %#v", i+1, extra, extra) + return false, message + } + } + } + return true, "" +} diff --git a/vendor/github.com/onsi/gomega/internal/asyncassertion/async_assertion_suite_test.go b/vendor/github.com/onsi/gomega/internal/asyncassertion/async_assertion_suite_test.go new file mode 100644 index 000000000..bdb0c3d22 --- /dev/null +++ b/vendor/github.com/onsi/gomega/internal/asyncassertion/async_assertion_suite_test.go @@ -0,0 +1,13 @@ +package asyncassertion_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + "testing" +) + +func TestAsyncAssertion(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "AsyncAssertion Suite") +} diff --git a/vendor/github.com/onsi/gomega/internal/asyncassertion/async_assertion_test.go b/vendor/github.com/onsi/gomega/internal/asyncassertion/async_assertion_test.go new file mode 100644 index 000000000..3d7e3489d --- /dev/null +++ b/vendor/github.com/onsi/gomega/internal/asyncassertion/async_assertion_test.go @@ -0,0 +1,345 @@ +package asyncassertion_test + +import ( + "errors" + "time" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + . "github.com/onsi/gomega/internal/asyncassertion" +) + +var _ = Describe("Async Assertion", func() { + var ( + failureMessage string + callerSkip int + ) + + var fakeFailHandler = func(message string, skip ...int) { + failureMessage = message + callerSkip = skip[0] + } + + BeforeEach(func() { + failureMessage = "" + callerSkip = 0 + }) + + Describe("Eventually", func() { + Context("the positive case", func() { + It("should poll the function and matcher", func() { + counter := 0 + a := New(AsyncAssertionTypeEventually, func() int { + counter++ + return counter + }, fakeFailHandler, time.Duration(0.2*float64(time.Second)), time.Duration(0.02*float64(time.Second)), 1) + + a.Should(BeNumerically("==", 5)) + Ω(failureMessage).Should(BeZero()) + }) + + It("should continue when the matcher errors", func() { + counter := 0 + a := New(AsyncAssertionTypeEventually, func() interface{} { + counter++ + if counter == 5 { + return "not-a-number" //this should cause the matcher to error + } + return counter + }, fakeFailHandler, time.Duration(0.2*float64(time.Second)), time.Duration(0.02*float64(time.Second)), 1) + + a.Should(BeNumerically("==", 5), "My description %d", 2) + + Ω(failureMessage).Should(ContainSubstring("Timed out after")) + Ω(failureMessage).Should(ContainSubstring("My description 2")) + Ω(callerSkip).Should(Equal(4)) + }) + + It("should be able to timeout", func() { + counter := 0 + a := New(AsyncAssertionTypeEventually, func() int { + counter++ + return counter + }, fakeFailHandler, time.Duration(0.2*float64(time.Second)), time.Duration(0.02*float64(time.Second)), 1) + + a.Should(BeNumerically(">", 100), "My description %d", 2) + + Ω(counter).Should(BeNumerically(">", 8)) + Ω(counter).Should(BeNumerically("<=", 10)) + Ω(failureMessage).Should(ContainSubstring("Timed out after")) + Ω(failureMessage).Should(MatchRegexp(`\: \d`), "Should pass the correct value to the matcher message formatter.") + Ω(failureMessage).Should(ContainSubstring("My description 2")) + Ω(callerSkip).Should(Equal(4)) + }) + }) + + Context("the negative case", func() { + It("should poll the function and matcher", func() { + counter := 0 + a := New(AsyncAssertionTypeEventually, func() int { + counter += 1 + return counter + }, fakeFailHandler, time.Duration(0.2*float64(time.Second)), time.Duration(0.02*float64(time.Second)), 1) + + a.ShouldNot(BeNumerically("<", 3)) + + Ω(counter).Should(Equal(3)) + Ω(failureMessage).Should(BeZero()) + }) + + It("should timeout when the matcher errors", func() { + a := New(AsyncAssertionTypeEventually, func() interface{} { + return 0 //this should cause the matcher to error + }, fakeFailHandler, time.Duration(0.2*float64(time.Second)), time.Duration(0.02*float64(time.Second)), 1) + + a.ShouldNot(HaveLen(0), "My description %d", 2) + + Ω(failureMessage).Should(ContainSubstring("Timed out after")) + Ω(failureMessage).Should(ContainSubstring("Error:")) + Ω(failureMessage).Should(ContainSubstring("My description 2")) + Ω(callerSkip).Should(Equal(4)) + }) + + It("should be able to timeout", func() { + a := New(AsyncAssertionTypeEventually, func() int { + return 0 + }, fakeFailHandler, time.Duration(0.1*float64(time.Second)), time.Duration(0.02*float64(time.Second)), 1) + + a.ShouldNot(Equal(0), "My description %d", 2) + + Ω(failureMessage).Should(ContainSubstring("Timed out after")) + Ω(failureMessage).Should(ContainSubstring(": 0"), "Should pass the correct value to the matcher message formatter.") + Ω(failureMessage).Should(ContainSubstring("My description 2")) + Ω(callerSkip).Should(Equal(4)) + }) + }) + + Context("with a function that returns multiple values", func() { + It("should eventually succeed if the additional arguments are nil", func() { + i := 0 + Eventually(func() (int, error) { + i++ + return i, nil + }).Should(Equal(10)) + }) + + It("should eventually timeout if the additional arguments are not nil", func() { + i := 0 + a := New(AsyncAssertionTypeEventually, func() (int, error) { + i++ + return i, errors.New("bam") + }, fakeFailHandler, time.Duration(0.2*float64(time.Second)), time.Duration(0.02*float64(time.Second)), 1) + a.Should(Equal(2)) + + Ω(failureMessage).Should(ContainSubstring("Timed out after")) + Ω(failureMessage).Should(ContainSubstring("Error:")) + Ω(failureMessage).Should(ContainSubstring("bam")) + Ω(callerSkip).Should(Equal(4)) + }) + }) + + Context("Making an assertion without a registered fail handler", func() { + It("should panic", func() { + defer func() { + e := recover() + RegisterFailHandler(Fail) + if e == nil { + Fail("expected a panic to have occurred") + } + }() + + RegisterFailHandler(nil) + c := make(chan bool, 1) + c <- true + Eventually(c).Should(Receive()) + }) + }) + }) + + Describe("Consistently", func() { + Describe("The positive case", func() { + Context("when the matcher consistently passes for the duration", func() { + It("should pass", func() { + calls := 0 + a := New(AsyncAssertionTypeConsistently, func() string { + calls++ + return "foo" + }, fakeFailHandler, time.Duration(0.2*float64(time.Second)), time.Duration(0.02*float64(time.Second)), 1) + + a.Should(Equal("foo")) + Ω(calls).Should(BeNumerically(">", 8)) + Ω(calls).Should(BeNumerically("<=", 10)) + Ω(failureMessage).Should(BeZero()) + }) + }) + + Context("when the matcher fails at some point", func() { + It("should fail", func() { + calls := 0 + a := New(AsyncAssertionTypeConsistently, func() interface{} { + calls++ + if calls > 5 { + return "bar" + } + return "foo" + }, fakeFailHandler, time.Duration(0.2*float64(time.Second)), time.Duration(0.02*float64(time.Second)), 1) + + a.Should(Equal("foo")) + Ω(failureMessage).Should(ContainSubstring("to equal")) + Ω(callerSkip).Should(Equal(4)) + }) + }) + + Context("when the matcher errors at some point", func() { + It("should fail", func() { + calls := 0 + a := New(AsyncAssertionTypeConsistently, func() interface{} { + calls++ + if calls > 5 { + return 3 + } + return []int{1, 2, 3} + }, fakeFailHandler, time.Duration(0.2*float64(time.Second)), time.Duration(0.02*float64(time.Second)), 1) + + a.Should(HaveLen(3)) + Ω(failureMessage).Should(ContainSubstring("HaveLen matcher expects")) + Ω(callerSkip).Should(Equal(4)) + }) + }) + }) + + Describe("The negative case", func() { + Context("when the matcher consistently passes for the duration", func() { + It("should pass", func() { + c := make(chan bool) + a := New(AsyncAssertionTypeConsistently, c, fakeFailHandler, time.Duration(0.2*float64(time.Second)), time.Duration(0.02*float64(time.Second)), 1) + + a.ShouldNot(Receive()) + Ω(failureMessage).Should(BeZero()) + }) + }) + + Context("when the matcher fails at some point", func() { + It("should fail", func() { + c := make(chan bool) + go func() { + time.Sleep(time.Duration(100 * time.Millisecond)) + c <- true + }() + + a := New(AsyncAssertionTypeConsistently, c, fakeFailHandler, time.Duration(0.2*float64(time.Second)), time.Duration(0.02*float64(time.Second)), 1) + + a.ShouldNot(Receive()) + Ω(failureMessage).Should(ContainSubstring("not to receive anything")) + }) + }) + + Context("when the matcher errors at some point", func() { + It("should fail", func() { + calls := 0 + a := New(AsyncAssertionTypeConsistently, func() interface{} { + calls++ + return calls + }, fakeFailHandler, time.Duration(0.2*float64(time.Second)), time.Duration(0.02*float64(time.Second)), 1) + + a.ShouldNot(BeNumerically(">", 5)) + Ω(failureMessage).Should(ContainSubstring("not to be >")) + Ω(callerSkip).Should(Equal(4)) + }) + }) + }) + + Context("with a function that returns multiple values", func() { + It("should consistently succeed if the additional arguments are nil", func() { + i := 2 + Consistently(func() (int, error) { + i++ + return i, nil + }).Should(BeNumerically(">=", 2)) + }) + + It("should eventually timeout if the additional arguments are not nil", func() { + i := 2 + a := New(AsyncAssertionTypeEventually, func() (int, error) { + i++ + return i, errors.New("bam") + }, fakeFailHandler, time.Duration(0.2*float64(time.Second)), time.Duration(0.02*float64(time.Second)), 1) + a.Should(BeNumerically(">=", 2)) + + Ω(failureMessage).Should(ContainSubstring("Error:")) + Ω(failureMessage).Should(ContainSubstring("bam")) + Ω(callerSkip).Should(Equal(4)) + }) + }) + + Context("Making an assertion without a registered fail handler", func() { + It("should panic", func() { + defer func() { + e := recover() + RegisterFailHandler(Fail) + if e == nil { + Fail("expected a panic to have occurred") + } + }() + + RegisterFailHandler(nil) + c := make(chan bool) + Consistently(c).ShouldNot(Receive()) + }) + }) + }) + + Context("when passed a function with the wrong # or arguments & returns", func() { + It("should panic", func() { + Ω(func() { + New(AsyncAssertionTypeEventually, func() {}, fakeFailHandler, 0, 0, 1) + }).Should(Panic()) + + Ω(func() { + New(AsyncAssertionTypeEventually, func(a string) int { return 0 }, fakeFailHandler, 0, 0, 1) + }).Should(Panic()) + + Ω(func() { + New(AsyncAssertionTypeEventually, func() int { return 0 }, fakeFailHandler, 0, 0, 1) + }).ShouldNot(Panic()) + + Ω(func() { + New(AsyncAssertionTypeEventually, func() (int, error) { return 0, nil }, fakeFailHandler, 0, 0, 1) + }).ShouldNot(Panic()) + }) + }) + + Describe("bailing early", func() { + Context("when actual is a value", func() { + It("Eventually should bail out and fail early if the matcher says to", func() { + c := make(chan bool) + close(c) + + t := time.Now() + failures := InterceptGomegaFailures(func() { + Eventually(c, 0.1).Should(Receive()) + }) + Ω(time.Since(t)).Should(BeNumerically("<", 90*time.Millisecond)) + + Ω(failures).Should(HaveLen(1)) + }) + }) + + Context("when actual is a function", func() { + It("should never bail early", func() { + c := make(chan bool) + close(c) + + t := time.Now() + failures := InterceptGomegaFailures(func() { + Eventually(func() chan bool { + return c + }, 0.1).Should(Receive()) + }) + Ω(time.Since(t)).Should(BeNumerically(">=", 90*time.Millisecond)) + + Ω(failures).Should(HaveLen(1)) + }) + }) + }) +}) diff --git a/vendor/github.com/onsi/gomega/internal/fakematcher/fake_matcher.go b/vendor/github.com/onsi/gomega/internal/fakematcher/fake_matcher.go new file mode 100644 index 000000000..6e351a7de --- /dev/null +++ b/vendor/github.com/onsi/gomega/internal/fakematcher/fake_matcher.go @@ -0,0 +1,23 @@ +package fakematcher + +import "fmt" + +type FakeMatcher struct { + ReceivedActual interface{} + MatchesToReturn bool + ErrToReturn error +} + +func (matcher *FakeMatcher) Match(actual interface{}) (bool, error) { + matcher.ReceivedActual = actual + + return matcher.MatchesToReturn, matcher.ErrToReturn +} + +func (matcher *FakeMatcher) FailureMessage(actual interface{}) string { + return fmt.Sprintf("positive: %v", actual) +} + +func (matcher *FakeMatcher) NegatedFailureMessage(actual interface{}) string { + return fmt.Sprintf("negative: %v", actual) +} diff --git a/vendor/github.com/onsi/gomega/internal/oraclematcher/oracle_matcher.go b/vendor/github.com/onsi/gomega/internal/oraclematcher/oracle_matcher.go new file mode 100644 index 000000000..66cad88a1 --- /dev/null +++ b/vendor/github.com/onsi/gomega/internal/oraclematcher/oracle_matcher.go @@ -0,0 +1,25 @@ +package oraclematcher + +import "github.com/onsi/gomega/types" + +/* +GomegaMatchers that also match the OracleMatcher interface can convey information about +whether or not their result will change upon future attempts. + +This allows `Eventually` and `Consistently` to short circuit if success becomes impossible. + +For example, a process' exit code can never change. So, gexec's Exit matcher returns `true` +for `MatchMayChangeInTheFuture` until the process exits, at which point it returns `false` forevermore. +*/ +type OracleMatcher interface { + MatchMayChangeInTheFuture(actual interface{}) bool +} + +func MatchMayChangeInTheFuture(matcher types.GomegaMatcher, value interface{}) bool { + oracleMatcher, ok := matcher.(OracleMatcher) + if !ok { + return true + } + + return oracleMatcher.MatchMayChangeInTheFuture(value) +} diff --git a/vendor/github.com/onsi/gomega/internal/testingtsupport/testing_t_support.go b/vendor/github.com/onsi/gomega/internal/testingtsupport/testing_t_support.go new file mode 100644 index 000000000..ac8912525 --- /dev/null +++ b/vendor/github.com/onsi/gomega/internal/testingtsupport/testing_t_support.go @@ -0,0 +1,40 @@ +package testingtsupport + +import ( + "regexp" + "runtime/debug" + "strings" + + "github.com/onsi/gomega/types" +) + +type gomegaTestingT interface { + Fatalf(format string, args ...interface{}) +} + +func BuildTestingTGomegaFailHandler(t gomegaTestingT) types.GomegaFailHandler { + return func(message string, callerSkip ...int) { + skip := 1 + if len(callerSkip) > 0 { + skip = callerSkip[0] + } + stackTrace := pruneStack(string(debug.Stack()), skip) + t.Fatalf("\n%s\n%s", stackTrace, message) + } +} + +func pruneStack(fullStackTrace string, skip int) string { + stack := strings.Split(fullStackTrace, "\n") + if len(stack) > 2*(skip+1) { + stack = stack[2*(skip+1):] + } + prunedStack := []string{} + re := regexp.MustCompile(`\/ginkgo\/|\/pkg\/testing\/|\/pkg\/runtime\/`) + for i := 0; i < len(stack)/2; i++ { + if !re.Match([]byte(stack[i*2])) { + prunedStack = append(prunedStack, stack[i*2]) + prunedStack = append(prunedStack, stack[i*2+1]) + } + } + return strings.Join(prunedStack, "\n") +} diff --git a/vendor/github.com/onsi/gomega/internal/testingtsupport/testing_t_support_test.go b/vendor/github.com/onsi/gomega/internal/testingtsupport/testing_t_support_test.go new file mode 100644 index 000000000..b9fbd6c64 --- /dev/null +++ b/vendor/github.com/onsi/gomega/internal/testingtsupport/testing_t_support_test.go @@ -0,0 +1,12 @@ +package testingtsupport_test + +import ( + . "github.com/onsi/gomega" + + "testing" +) + +func TestTestingT(t *testing.T) { + RegisterTestingT(t) + Ω(true).Should(BeTrue()) +} diff --git a/vendor/github.com/onsi/gomega/matchers.go b/vendor/github.com/onsi/gomega/matchers.go new file mode 100644 index 000000000..e6e85d070 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers.go @@ -0,0 +1,427 @@ +package gomega + +import ( + "time" + + "github.com/onsi/gomega/matchers" + "github.com/onsi/gomega/types" +) + +//Equal uses reflect.DeepEqual to compare actual with expected. Equal is strict about +//types when performing comparisons. +//It is an error for both actual and expected to be nil. Use BeNil() instead. +func Equal(expected interface{}) types.GomegaMatcher { + return &matchers.EqualMatcher{ + Expected: expected, + } +} + +//BeEquivalentTo is more lax than Equal, allowing equality between different types. +//This is done by converting actual to have the type of expected before +//attempting equality with reflect.DeepEqual. +//It is an error for actual and expected to be nil. Use BeNil() instead. +func BeEquivalentTo(expected interface{}) types.GomegaMatcher { + return &matchers.BeEquivalentToMatcher{ + Expected: expected, + } +} + +//BeIdenticalTo uses the == operator to compare actual with expected. +//BeIdenticalTo is strict about types when performing comparisons. +//It is an error for both actual and expected to be nil. Use BeNil() instead. +func BeIdenticalTo(expected interface{}) types.GomegaMatcher { + return &matchers.BeIdenticalToMatcher{ + Expected: expected, + } +} + +//BeNil succeeds if actual is nil +func BeNil() types.GomegaMatcher { + return &matchers.BeNilMatcher{} +} + +//BeTrue succeeds if actual is true +func BeTrue() types.GomegaMatcher { + return &matchers.BeTrueMatcher{} +} + +//BeFalse succeeds if actual is false +func BeFalse() types.GomegaMatcher { + return &matchers.BeFalseMatcher{} +} + +//HaveOccurred succeeds if actual is a non-nil error +//The typical Go error checking pattern looks like: +// err := SomethingThatMightFail() +// Ω(err).ShouldNot(HaveOccurred()) +func HaveOccurred() types.GomegaMatcher { + return &matchers.HaveOccurredMatcher{} +} + +//Succeed passes if actual is a nil error +//Succeed is intended to be used with functions that return a single error value. Instead of +// err := SomethingThatMightFail() +// Ω(err).ShouldNot(HaveOccurred()) +// +//You can write: +// Ω(SomethingThatMightFail()).Should(Succeed()) +// +//It is a mistake to use Succeed with a function that has multiple return values. Gomega's Ω and Expect +//functions automatically trigger failure if any return values after the first return value are non-zero/non-nil. +//This means that Ω(MultiReturnFunc()).ShouldNot(Succeed()) can never pass. +func Succeed() types.GomegaMatcher { + return &matchers.SucceedMatcher{} +} + +//MatchError succeeds if actual is a non-nil error that matches the passed in string/error. +// +//These are valid use-cases: +// Ω(err).Should(MatchError("an error")) //asserts that err.Error() == "an error" +// Ω(err).Should(MatchError(SomeError)) //asserts that err == SomeError (via reflect.DeepEqual) +// +//It is an error for err to be nil or an object that does not implement the Error interface +func MatchError(expected interface{}) types.GomegaMatcher { + return &matchers.MatchErrorMatcher{ + Expected: expected, + } +} + +//BeClosed succeeds if actual is a closed channel. +//It is an error to pass a non-channel to BeClosed, it is also an error to pass nil +// +//In order to check whether or not the channel is closed, Gomega must try to read from the channel +//(even in the `ShouldNot(BeClosed())` case). You should keep this in mind if you wish to make subsequent assertions about +//values coming down the channel. +// +//Also, if you are testing that a *buffered* channel is closed you must first read all values out of the channel before +//asserting that it is closed (it is not possible to detect that a buffered-channel has been closed until all its buffered values are read). +// +//Finally, as a corollary: it is an error to check whether or not a send-only channel is closed. +func BeClosed() types.GomegaMatcher { + return &matchers.BeClosedMatcher{} +} + +//Receive succeeds if there is a value to be received on actual. +//Actual must be a channel (and cannot be a send-only channel) -- anything else is an error. +// +//Receive returns immediately and never blocks: +// +//- If there is nothing on the channel `c` then Ω(c).Should(Receive()) will fail and Ω(c).ShouldNot(Receive()) will pass. +// +//- If the channel `c` is closed then Ω(c).Should(Receive()) will fail and Ω(c).ShouldNot(Receive()) will pass. +// +//- If there is something on the channel `c` ready to be read, then Ω(c).Should(Receive()) will pass and Ω(c).ShouldNot(Receive()) will fail. +// +//If you have a go-routine running in the background that will write to channel `c` you can: +// Eventually(c).Should(Receive()) +// +//This will timeout if nothing gets sent to `c` (you can modify the timeout interval as you normally do with `Eventually`) +// +//A similar use-case is to assert that no go-routine writes to a channel (for a period of time). You can do this with `Consistently`: +// Consistently(c).ShouldNot(Receive()) +// +//You can pass `Receive` a matcher. If you do so, it will match the received object against the matcher. For example: +// Ω(c).Should(Receive(Equal("foo"))) +// +//When given a matcher, `Receive` will always fail if there is nothing to be received on the channel. +// +//Passing Receive a matcher is especially useful when paired with Eventually: +// +// Eventually(c).Should(Receive(ContainSubstring("bar"))) +// +//will repeatedly attempt to pull values out of `c` until a value matching "bar" is received. +// +//Finally, if you want to have a reference to the value *sent* to the channel you can pass the `Receive` matcher a pointer to a variable of the appropriate type: +// var myThing thing +// Eventually(thingChan).Should(Receive(&myThing)) +// Ω(myThing.Sprocket).Should(Equal("foo")) +// Ω(myThing.IsValid()).Should(BeTrue()) +func Receive(args ...interface{}) types.GomegaMatcher { + var arg interface{} + if len(args) > 0 { + arg = args[0] + } + + return &matchers.ReceiveMatcher{ + Arg: arg, + } +} + +//BeSent succeeds if a value can be sent to actual. +//Actual must be a channel (and cannot be a receive-only channel) that can sent the type of the value passed into BeSent -- anything else is an error. +//In addition, actual must not be closed. +// +//BeSent never blocks: +// +//- If the channel `c` is not ready to receive then Ω(c).Should(BeSent("foo")) will fail immediately +//- If the channel `c` is eventually ready to receive then Eventually(c).Should(BeSent("foo")) will succeed.. presuming the channel becomes ready to receive before Eventually's timeout +//- If the channel `c` is closed then Ω(c).Should(BeSent("foo")) and Ω(c).ShouldNot(BeSent("foo")) will both fail immediately +// +//Of course, the value is actually sent to the channel. The point of `BeSent` is less to make an assertion about the availability of the channel (which is typically an implementation detail that your test should not be concerned with). +//Rather, the point of `BeSent` is to make it possible to easily and expressively write tests that can timeout on blocked channel sends. +func BeSent(arg interface{}) types.GomegaMatcher { + return &matchers.BeSentMatcher{ + Arg: arg, + } +} + +//MatchRegexp succeeds if actual is a string or stringer that matches the +//passed-in regexp. Optional arguments can be provided to construct a regexp +//via fmt.Sprintf(). +func MatchRegexp(regexp string, args ...interface{}) types.GomegaMatcher { + return &matchers.MatchRegexpMatcher{ + Regexp: regexp, + Args: args, + } +} + +//ContainSubstring succeeds if actual is a string or stringer that contains the +//passed-in substring. Optional arguments can be provided to construct the substring +//via fmt.Sprintf(). +func ContainSubstring(substr string, args ...interface{}) types.GomegaMatcher { + return &matchers.ContainSubstringMatcher{ + Substr: substr, + Args: args, + } +} + +//HavePrefix succeeds if actual is a string or stringer that contains the +//passed-in string as a prefix. Optional arguments can be provided to construct +//via fmt.Sprintf(). +func HavePrefix(prefix string, args ...interface{}) types.GomegaMatcher { + return &matchers.HavePrefixMatcher{ + Prefix: prefix, + Args: args, + } +} + +//HaveSuffix succeeds if actual is a string or stringer that contains the +//passed-in string as a suffix. Optional arguments can be provided to construct +//via fmt.Sprintf(). +func HaveSuffix(suffix string, args ...interface{}) types.GomegaMatcher { + return &matchers.HaveSuffixMatcher{ + Suffix: suffix, + Args: args, + } +} + +//MatchJSON succeeds if actual is a string or stringer of JSON that matches +//the expected JSON. The JSONs are decoded and the resulting objects are compared via +//reflect.DeepEqual so things like key-ordering and whitespace shouldn't matter. +func MatchJSON(json interface{}) types.GomegaMatcher { + return &matchers.MatchJSONMatcher{ + JSONToMatch: json, + } +} + +//MatchXML succeeds if actual is a string or stringer of XML that matches +//the expected XML. The XMLs are decoded and the resulting objects are compared via +//reflect.DeepEqual so things like whitespaces shouldn't matter. +func MatchXML(xml interface{}) types.GomegaMatcher { + return &matchers.MatchXMLMatcher{ + XMLToMatch: xml, + } +} + +//MatchYAML succeeds if actual is a string or stringer of YAML that matches +//the expected YAML. The YAML's are decoded and the resulting objects are compared via +//reflect.DeepEqual so things like key-ordering and whitespace shouldn't matter. +func MatchYAML(yaml interface{}) types.GomegaMatcher { + return &matchers.MatchYAMLMatcher{ + YAMLToMatch: yaml, + } +} + +//BeEmpty succeeds if actual is empty. Actual must be of type string, array, map, chan, or slice. +func BeEmpty() types.GomegaMatcher { + return &matchers.BeEmptyMatcher{} +} + +//HaveLen succeeds if actual has the passed-in length. Actual must be of type string, array, map, chan, or slice. +func HaveLen(count int) types.GomegaMatcher { + return &matchers.HaveLenMatcher{ + Count: count, + } +} + +//HaveCap succeeds if actual has the passed-in capacity. Actual must be of type array, chan, or slice. +func HaveCap(count int) types.GomegaMatcher { + return &matchers.HaveCapMatcher{ + Count: count, + } +} + +//BeZero succeeds if actual is the zero value for its type or if actual is nil. +func BeZero() types.GomegaMatcher { + return &matchers.BeZeroMatcher{} +} + +//ContainElement succeeds if actual contains the passed in element. +//By default ContainElement() uses Equal() to perform the match, however a +//matcher can be passed in instead: +// Ω([]string{"Foo", "FooBar"}).Should(ContainElement(ContainSubstring("Bar"))) +// +//Actual must be an array, slice or map. +//For maps, ContainElement searches through the map's values. +func ContainElement(element interface{}) types.GomegaMatcher { + return &matchers.ContainElementMatcher{ + Element: element, + } +} + +//ConsistOf succeeds if actual contains preciely the elements passed into the matcher. The ordering of the elements does not matter. +//By default ConsistOf() uses Equal() to match the elements, however custom matchers can be passed in instead. Here are some examples: +// +// Ω([]string{"Foo", "FooBar"}).Should(ConsistOf("FooBar", "Foo")) +// Ω([]string{"Foo", "FooBar"}).Should(ConsistOf(ContainSubstring("Bar"), "Foo")) +// Ω([]string{"Foo", "FooBar"}).Should(ConsistOf(ContainSubstring("Foo"), ContainSubstring("Foo"))) +// +//Actual must be an array, slice or map. For maps, ConsistOf matches against the map's values. +// +//You typically pass variadic arguments to ConsistOf (as in the examples above). However, if you need to pass in a slice you can provided that it +//is the only element passed in to ConsistOf: +// +// Ω([]string{"Foo", "FooBar"}).Should(ConsistOf([]string{"FooBar", "Foo"})) +// +//Note that Go's type system does not allow you to write this as ConsistOf([]string{"FooBar", "Foo"}...) as []string and []interface{} are different types - hence the need for this special rule. +func ConsistOf(elements ...interface{}) types.GomegaMatcher { + return &matchers.ConsistOfMatcher{ + Elements: elements, + } +} + +//HaveKey succeeds if actual is a map with the passed in key. +//By default HaveKey uses Equal() to perform the match, however a +//matcher can be passed in instead: +// Ω(map[string]string{"Foo": "Bar", "BazFoo": "Duck"}).Should(HaveKey(MatchRegexp(`.+Foo$`))) +func HaveKey(key interface{}) types.GomegaMatcher { + return &matchers.HaveKeyMatcher{ + Key: key, + } +} + +//HaveKeyWithValue succeeds if actual is a map with the passed in key and value. +//By default HaveKeyWithValue uses Equal() to perform the match, however a +//matcher can be passed in instead: +// Ω(map[string]string{"Foo": "Bar", "BazFoo": "Duck"}).Should(HaveKeyWithValue("Foo", "Bar")) +// Ω(map[string]string{"Foo": "Bar", "BazFoo": "Duck"}).Should(HaveKeyWithValue(MatchRegexp(`.+Foo$`), "Bar")) +func HaveKeyWithValue(key interface{}, value interface{}) types.GomegaMatcher { + return &matchers.HaveKeyWithValueMatcher{ + Key: key, + Value: value, + } +} + +//BeNumerically performs numerical assertions in a type-agnostic way. +//Actual and expected should be numbers, though the specific type of +//number is irrelevant (floa32, float64, uint8, etc...). +// +//There are six, self-explanatory, supported comparators: +// Ω(1.0).Should(BeNumerically("==", 1)) +// Ω(1.0).Should(BeNumerically("~", 0.999, 0.01)) +// Ω(1.0).Should(BeNumerically(">", 0.9)) +// Ω(1.0).Should(BeNumerically(">=", 1.0)) +// Ω(1.0).Should(BeNumerically("<", 3)) +// Ω(1.0).Should(BeNumerically("<=", 1.0)) +func BeNumerically(comparator string, compareTo ...interface{}) types.GomegaMatcher { + return &matchers.BeNumericallyMatcher{ + Comparator: comparator, + CompareTo: compareTo, + } +} + +//BeTemporally compares time.Time's like BeNumerically +//Actual and expected must be time.Time. The comparators are the same as for BeNumerically +// Ω(time.Now()).Should(BeTemporally(">", time.Time{})) +// Ω(time.Now()).Should(BeTemporally("~", time.Now(), time.Second)) +func BeTemporally(comparator string, compareTo time.Time, threshold ...time.Duration) types.GomegaMatcher { + return &matchers.BeTemporallyMatcher{ + Comparator: comparator, + CompareTo: compareTo, + Threshold: threshold, + } +} + +//BeAssignableToTypeOf succeeds if actual is assignable to the type of expected. +//It will return an error when one of the values is nil. +// Ω(0).Should(BeAssignableToTypeOf(0)) // Same values +// Ω(5).Should(BeAssignableToTypeOf(-1)) // different values same type +// Ω("foo").Should(BeAssignableToTypeOf("bar")) // different values same type +// Ω(struct{ Foo string }{}).Should(BeAssignableToTypeOf(struct{ Foo string }{})) +func BeAssignableToTypeOf(expected interface{}) types.GomegaMatcher { + return &matchers.AssignableToTypeOfMatcher{ + Expected: expected, + } +} + +//Panic succeeds if actual is a function that, when invoked, panics. +//Actual must be a function that takes no arguments and returns no results. +func Panic() types.GomegaMatcher { + return &matchers.PanicMatcher{} +} + +//BeAnExistingFile succeeds if a file exists. +//Actual must be a string representing the abs path to the file being checked. +func BeAnExistingFile() types.GomegaMatcher { + return &matchers.BeAnExistingFileMatcher{} +} + +//BeARegularFile succeeds iff a file exists and is a regular file. +//Actual must be a string representing the abs path to the file being checked. +func BeARegularFile() types.GomegaMatcher { + return &matchers.BeARegularFileMatcher{} +} + +//BeADirectory succeeds iff a file exists and is a directory. +//Actual must be a string representing the abs path to the file being checked. +func BeADirectory() types.GomegaMatcher { + return &matchers.BeADirectoryMatcher{} +} + +//And succeeds only if all of the given matchers succeed. +//The matchers are tried in order, and will fail-fast if one doesn't succeed. +// Expect("hi").To(And(HaveLen(2), Equal("hi")) +// +//And(), Or(), Not() and WithTransform() allow matchers to be composed into complex expressions. +func And(ms ...types.GomegaMatcher) types.GomegaMatcher { + return &matchers.AndMatcher{Matchers: ms} +} + +//SatisfyAll is an alias for And(). +// Ω("hi").Should(SatisfyAll(HaveLen(2), Equal("hi"))) +func SatisfyAll(matchers ...types.GomegaMatcher) types.GomegaMatcher { + return And(matchers...) +} + +//Or succeeds if any of the given matchers succeed. +//The matchers are tried in order and will return immediately upon the first successful match. +// Expect("hi").To(Or(HaveLen(3), HaveLen(2)) +// +//And(), Or(), Not() and WithTransform() allow matchers to be composed into complex expressions. +func Or(ms ...types.GomegaMatcher) types.GomegaMatcher { + return &matchers.OrMatcher{Matchers: ms} +} + +//SatisfyAny is an alias for Or(). +// Expect("hi").SatisfyAny(Or(HaveLen(3), HaveLen(2)) +func SatisfyAny(matchers ...types.GomegaMatcher) types.GomegaMatcher { + return Or(matchers...) +} + +//Not negates the given matcher; it succeeds if the given matcher fails. +// Expect(1).To(Not(Equal(2)) +// +//And(), Or(), Not() and WithTransform() allow matchers to be composed into complex expressions. +func Not(matcher types.GomegaMatcher) types.GomegaMatcher { + return &matchers.NotMatcher{Matcher: matcher} +} + +//WithTransform applies the `transform` to the actual value and matches it against `matcher`. +//The given transform must be a function of one parameter that returns one value. +// var plus1 = func(i int) int { return i + 1 } +// Expect(1).To(WithTransform(plus1, Equal(2)) +// +//And(), Or(), Not() and WithTransform() allow matchers to be composed into complex expressions. +func WithTransform(transform interface{}, matcher types.GomegaMatcher) types.GomegaMatcher { + return matchers.NewWithTransformMatcher(transform, matcher) +} diff --git a/vendor/github.com/onsi/gomega/matchers/and.go b/vendor/github.com/onsi/gomega/matchers/and.go new file mode 100644 index 000000000..d83a29164 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/and.go @@ -0,0 +1,63 @@ +package matchers + +import ( + "fmt" + + "github.com/onsi/gomega/format" + "github.com/onsi/gomega/internal/oraclematcher" + "github.com/onsi/gomega/types" +) + +type AndMatcher struct { + Matchers []types.GomegaMatcher + + // state + firstFailedMatcher types.GomegaMatcher +} + +func (m *AndMatcher) Match(actual interface{}) (success bool, err error) { + m.firstFailedMatcher = nil + for _, matcher := range m.Matchers { + success, err := matcher.Match(actual) + if !success || err != nil { + m.firstFailedMatcher = matcher + return false, err + } + } + return true, nil +} + +func (m *AndMatcher) FailureMessage(actual interface{}) (message string) { + return m.firstFailedMatcher.FailureMessage(actual) +} + +func (m *AndMatcher) NegatedFailureMessage(actual interface{}) (message string) { + // not the most beautiful list of matchers, but not bad either... + return format.Message(actual, fmt.Sprintf("To not satisfy all of these matchers: %s", m.Matchers)) +} + +func (m *AndMatcher) MatchMayChangeInTheFuture(actual interface{}) bool { + /* + Example with 3 matchers: A, B, C + + Match evaluates them: T, F, => F + So match is currently F, what should MatchMayChangeInTheFuture() return? + Seems like it only depends on B, since currently B MUST change to allow the result to become T + + Match eval: T, T, T => T + So match is currently T, what should MatchMayChangeInTheFuture() return? + Seems to depend on ANY of them being able to change to F. + */ + + if m.firstFailedMatcher == nil { + // so all matchers succeeded.. Any one of them changing would change the result. + for _, matcher := range m.Matchers { + if oraclematcher.MatchMayChangeInTheFuture(matcher, actual) { + return true + } + } + return false // none of were going to change + } + // one of the matchers failed.. it must be able to change in order to affect the result + return oraclematcher.MatchMayChangeInTheFuture(m.firstFailedMatcher, actual) +} diff --git a/vendor/github.com/onsi/gomega/matchers/and_test.go b/vendor/github.com/onsi/gomega/matchers/and_test.go new file mode 100644 index 000000000..acf778cd6 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/and_test.go @@ -0,0 +1,103 @@ +package matchers_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + . "github.com/onsi/gomega/matchers" + "github.com/onsi/gomega/types" +) + +// sample data +var ( + // example input + input = "hi" + // some matchers that succeed against the input + true1 = HaveLen(2) + true2 = Equal("hi") + true3 = MatchRegexp("hi") + // some matchers that fail against the input. + false1 = HaveLen(1) + false2 = Equal("hip") + false3 = MatchRegexp("hope") +) + +// verifyFailureMessage expects the matcher to fail with the given input, and verifies the failure message. +func verifyFailureMessage(m types.GomegaMatcher, input string, expectedFailureMsgFragment string) { + Expect(m.Match(input)).To(BeFalse()) + Expect(m.FailureMessage(input)).To(Equal( + "Expected\n : " + input + "\n" + expectedFailureMsgFragment)) +} + +var _ = Describe("AndMatcher", func() { + It("works with positive cases", func() { + Expect(input).To(And()) + Expect(input).To(And(true1)) + Expect(input).To(And(true1, true2)) + Expect(input).To(And(true1, true2, true3)) + + // use alias + Expect(input).To(SatisfyAll(true1, true2, true3)) + }) + + It("works with negative cases", func() { + Expect(input).ToNot(And(false1, false2)) + Expect(input).ToNot(And(true1, true2, false3)) + Expect(input).ToNot(And(true1, false2, false3)) + Expect(input).ToNot(And(false1, true1, true2)) + }) + + Context("failure messages", func() { + Context("when match fails", func() { + It("gives a descriptive message", func() { + verifyFailureMessage(And(false1, true1), input, "to have length 1") + verifyFailureMessage(And(true1, false2), input, "to equal\n : hip") + verifyFailureMessage(And(true1, true2, false3), input, "to match regular expression\n : hope") + }) + }) + + Context("when match succeeds, but expected it to fail", func() { + It("gives a descriptive message", func() { + verifyFailureMessage(Not(And(true1, true2)), input, + `To not satisfy all of these matchers: [%!s(*matchers.HaveLenMatcher=&{2}) %!s(*matchers.EqualMatcher=&{hi})]`) + }) + }) + }) + + Context("MatchMayChangeInTheFuture", func() { + Context("Match returned false", func() { + Context("returns value of the failed matcher", func() { + It("false if failed matcher not going to change", func() { + // 3 matchers: 1st returns true, 2nd returns false and is not going to change, 3rd is never called + m := And(Not(BeNil()), Or(), Equal(1)) + Expect(m.Match("hi")).To(BeFalse()) + Expect(m.(*AndMatcher).MatchMayChangeInTheFuture("hi")).To(BeFalse()) // empty Or() indicates not going to change + }) + It("true if failed matcher indicates it might change", func() { + // 3 matchers: 1st returns true, 2nd returns false and "might" change, 3rd is never called + m := And(Not(BeNil()), Equal(5), Equal(1)) + Expect(m.Match("hi")).To(BeFalse()) + Expect(m.(*AndMatcher).MatchMayChangeInTheFuture("hi")).To(BeTrue()) // Equal(5) indicates it might change + }) + }) + }) + Context("Match returned true", func() { + It("returns true if any of the matchers could change", func() { + // 3 matchers, all return true, and all could change + m := And(Not(BeNil()), Equal("hi"), HaveLen(2)) + Expect(m.Match("hi")).To(BeTrue()) + Expect(m.(*AndMatcher).MatchMayChangeInTheFuture("hi")).To(BeTrue()) // all 3 of these matchers default to 'true' + }) + It("returns false if none of the matchers could change", func() { + // empty And() has the property of always matching, and never can change since there are no sub-matchers that could change + m := And() + Expect(m.Match("anything")).To(BeTrue()) + Expect(m.(*AndMatcher).MatchMayChangeInTheFuture("anything")).To(BeFalse()) + + // And() with 3 sub-matchers that return true, and can't change + m = And(And(), And(), And()) + Expect(m.Match("hi")).To(BeTrue()) + Expect(m.(*AndMatcher).MatchMayChangeInTheFuture("hi")).To(BeFalse()) // the 3 empty And()'s won't change + }) + }) + }) +}) diff --git a/vendor/github.com/onsi/gomega/matchers/assignable_to_type_of_matcher.go b/vendor/github.com/onsi/gomega/matchers/assignable_to_type_of_matcher.go new file mode 100644 index 000000000..89a1fc211 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/assignable_to_type_of_matcher.go @@ -0,0 +1,31 @@ +package matchers + +import ( + "fmt" + "reflect" + + "github.com/onsi/gomega/format" +) + +type AssignableToTypeOfMatcher struct { + Expected interface{} +} + +func (matcher *AssignableToTypeOfMatcher) Match(actual interface{}) (success bool, err error) { + if actual == nil || matcher.Expected == nil { + return false, fmt.Errorf("Refusing to compare to .\nBe explicit and use BeNil() instead. This is to avoid mistakes where both sides of an assertion are erroneously uninitialized.") + } + + actualType := reflect.TypeOf(actual) + expectedType := reflect.TypeOf(matcher.Expected) + + return actualType.AssignableTo(expectedType), nil +} + +func (matcher *AssignableToTypeOfMatcher) FailureMessage(actual interface{}) string { + return format.Message(actual, fmt.Sprintf("to be assignable to the type: %T", matcher.Expected)) +} + +func (matcher *AssignableToTypeOfMatcher) NegatedFailureMessage(actual interface{}) string { + return format.Message(actual, fmt.Sprintf("not to be assignable to the type: %T", matcher.Expected)) +} diff --git a/vendor/github.com/onsi/gomega/matchers/assignable_to_type_of_matcher_test.go b/vendor/github.com/onsi/gomega/matchers/assignable_to_type_of_matcher_test.go new file mode 100644 index 000000000..d2280e050 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/assignable_to_type_of_matcher_test.go @@ -0,0 +1,30 @@ +package matchers_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + . "github.com/onsi/gomega/matchers" +) + +var _ = Describe("AssignableToTypeOf", func() { + Context("When asserting assignability between types", func() { + It("should do the right thing", func() { + Ω(0).Should(BeAssignableToTypeOf(0)) + Ω(5).Should(BeAssignableToTypeOf(-1)) + Ω("foo").Should(BeAssignableToTypeOf("bar")) + Ω(struct{ Foo string }{}).Should(BeAssignableToTypeOf(struct{ Foo string }{})) + + Ω(0).ShouldNot(BeAssignableToTypeOf("bar")) + Ω(5).ShouldNot(BeAssignableToTypeOf(struct{ Foo string }{})) + Ω("foo").ShouldNot(BeAssignableToTypeOf(42)) + }) + }) + + Context("When asserting nil values", func() { + It("should error", func() { + success, err := (&AssignableToTypeOfMatcher{Expected: nil}).Match(nil) + Ω(success).Should(BeFalse()) + Ω(err).Should(HaveOccurred()) + }) + }) +}) diff --git a/vendor/github.com/onsi/gomega/matchers/be_a_directory.go b/vendor/github.com/onsi/gomega/matchers/be_a_directory.go new file mode 100644 index 000000000..7b6975e41 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/be_a_directory.go @@ -0,0 +1,54 @@ +package matchers + +import ( + "fmt" + "os" + + "github.com/onsi/gomega/format" +) + +type notADirectoryError struct { + os.FileInfo +} + +func (t notADirectoryError) Error() string { + fileInfo := os.FileInfo(t) + switch { + case fileInfo.Mode().IsRegular(): + return "file is a regular file" + default: + return fmt.Sprintf("file mode is: %s", fileInfo.Mode().String()) + } +} + +type BeADirectoryMatcher struct { + expected interface{} + err error +} + +func (matcher *BeADirectoryMatcher) Match(actual interface{}) (success bool, err error) { + actualFilename, ok := actual.(string) + if !ok { + return false, fmt.Errorf("BeADirectoryMatcher matcher expects a file path") + } + + fileInfo, err := os.Stat(actualFilename) + if err != nil { + matcher.err = err + return false, nil + } + + if !fileInfo.Mode().IsDir() { + matcher.err = notADirectoryError{fileInfo} + return false, nil + } + return true, nil +} + +func (matcher *BeADirectoryMatcher) FailureMessage(actual interface{}) (message string) { + return format.Message(actual, fmt.Sprintf("to be a directory: %s", matcher.err)) +} + +func (matcher *BeADirectoryMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return format.Message(actual, fmt.Sprintf("not be a directory")) +} diff --git a/vendor/github.com/onsi/gomega/matchers/be_a_directory_test.go b/vendor/github.com/onsi/gomega/matchers/be_a_directory_test.go new file mode 100644 index 000000000..e59d76990 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/be_a_directory_test.go @@ -0,0 +1,40 @@ +package matchers_test + +import ( + "io/ioutil" + "os" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + . "github.com/onsi/gomega/matchers" +) + +var _ = Describe("BeADirectoryMatcher", func() { + Context("when passed a string", func() { + It("should do the right thing", func() { + Ω("/dne/test").ShouldNot(BeADirectory()) + + tmpFile, err := ioutil.TempFile("", "gomega-test-tempfile") + Ω(err).ShouldNot(HaveOccurred()) + defer os.Remove(tmpFile.Name()) + Ω(tmpFile.Name()).ShouldNot(BeADirectory()) + + tmpDir, err := ioutil.TempDir("", "gomega-test-tempdir") + Ω(err).ShouldNot(HaveOccurred()) + defer os.Remove(tmpDir) + Ω(tmpDir).Should(BeADirectory()) + }) + }) + + Context("when passed something else", func() { + It("should error", func() { + success, err := (&BeADirectoryMatcher{}).Match(nil) + Ω(success).Should(BeFalse()) + Ω(err).Should(HaveOccurred()) + + success, err = (&BeADirectoryMatcher{}).Match(true) + Ω(success).Should(BeFalse()) + Ω(err).Should(HaveOccurred()) + }) + }) +}) diff --git a/vendor/github.com/onsi/gomega/matchers/be_a_regular_file.go b/vendor/github.com/onsi/gomega/matchers/be_a_regular_file.go new file mode 100644 index 000000000..e239131fb --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/be_a_regular_file.go @@ -0,0 +1,54 @@ +package matchers + +import ( + "fmt" + "os" + + "github.com/onsi/gomega/format" +) + +type notARegularFileError struct { + os.FileInfo +} + +func (t notARegularFileError) Error() string { + fileInfo := os.FileInfo(t) + switch { + case fileInfo.IsDir(): + return "file is a directory" + default: + return fmt.Sprintf("file mode is: %s", fileInfo.Mode().String()) + } +} + +type BeARegularFileMatcher struct { + expected interface{} + err error +} + +func (matcher *BeARegularFileMatcher) Match(actual interface{}) (success bool, err error) { + actualFilename, ok := actual.(string) + if !ok { + return false, fmt.Errorf("BeARegularFileMatcher matcher expects a file path") + } + + fileInfo, err := os.Stat(actualFilename) + if err != nil { + matcher.err = err + return false, nil + } + + if !fileInfo.Mode().IsRegular() { + matcher.err = notARegularFileError{fileInfo} + return false, nil + } + return true, nil +} + +func (matcher *BeARegularFileMatcher) FailureMessage(actual interface{}) (message string) { + return format.Message(actual, fmt.Sprintf("to be a regular file: %s", matcher.err)) +} + +func (matcher *BeARegularFileMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return format.Message(actual, fmt.Sprintf("not be a regular file")) +} diff --git a/vendor/github.com/onsi/gomega/matchers/be_a_regular_file_test.go b/vendor/github.com/onsi/gomega/matchers/be_a_regular_file_test.go new file mode 100644 index 000000000..951e750d6 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/be_a_regular_file_test.go @@ -0,0 +1,40 @@ +package matchers_test + +import ( + "io/ioutil" + "os" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + . "github.com/onsi/gomega/matchers" +) + +var _ = Describe("BeARegularFileMatcher", func() { + Context("when passed a string", func() { + It("should do the right thing", func() { + Ω("/dne/test").ShouldNot(BeARegularFile()) + + tmpFile, err := ioutil.TempFile("", "gomega-test-tempfile") + Ω(err).ShouldNot(HaveOccurred()) + defer os.Remove(tmpFile.Name()) + Ω(tmpFile.Name()).Should(BeARegularFile()) + + tmpDir, err := ioutil.TempDir("", "gomega-test-tempdir") + Ω(err).ShouldNot(HaveOccurred()) + defer os.Remove(tmpDir) + Ω(tmpDir).ShouldNot(BeARegularFile()) + }) + }) + + Context("when passed something else", func() { + It("should error", func() { + success, err := (&BeARegularFileMatcher{}).Match(nil) + Ω(success).Should(BeFalse()) + Ω(err).Should(HaveOccurred()) + + success, err = (&BeARegularFileMatcher{}).Match(true) + Ω(success).Should(BeFalse()) + Ω(err).Should(HaveOccurred()) + }) + }) +}) diff --git a/vendor/github.com/onsi/gomega/matchers/be_an_existing_file.go b/vendor/github.com/onsi/gomega/matchers/be_an_existing_file.go new file mode 100644 index 000000000..d42eba223 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/be_an_existing_file.go @@ -0,0 +1,38 @@ +package matchers + +import ( + "fmt" + "os" + + "github.com/onsi/gomega/format" +) + +type BeAnExistingFileMatcher struct { + expected interface{} +} + +func (matcher *BeAnExistingFileMatcher) Match(actual interface{}) (success bool, err error) { + actualFilename, ok := actual.(string) + if !ok { + return false, fmt.Errorf("BeAnExistingFileMatcher matcher expects a file path") + } + + if _, err = os.Stat(actualFilename); err != nil { + switch { + case os.IsNotExist(err): + return false, nil + default: + return false, err + } + } + + return true, nil +} + +func (matcher *BeAnExistingFileMatcher) FailureMessage(actual interface{}) (message string) { + return format.Message(actual, fmt.Sprintf("to exist")) +} + +func (matcher *BeAnExistingFileMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return format.Message(actual, fmt.Sprintf("not to exist")) +} diff --git a/vendor/github.com/onsi/gomega/matchers/be_an_existing_file_test.go b/vendor/github.com/onsi/gomega/matchers/be_an_existing_file_test.go new file mode 100644 index 000000000..775f7b6ac --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/be_an_existing_file_test.go @@ -0,0 +1,40 @@ +package matchers_test + +import ( + "io/ioutil" + "os" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + . "github.com/onsi/gomega/matchers" +) + +var _ = Describe("BeAnExistingFileMatcher", func() { + Context("when passed a string", func() { + It("should do the right thing", func() { + Ω("/dne/test").ShouldNot(BeAnExistingFile()) + + tmpFile, err := ioutil.TempFile("", "gomega-test-tempfile") + Ω(err).ShouldNot(HaveOccurred()) + defer os.Remove(tmpFile.Name()) + Ω(tmpFile.Name()).Should(BeAnExistingFile()) + + tmpDir, err := ioutil.TempDir("", "gomega-test-tempdir") + Ω(err).ShouldNot(HaveOccurred()) + defer os.Remove(tmpDir) + Ω(tmpDir).Should(BeAnExistingFile()) + }) + }) + + Context("when passed something else", func() { + It("should error", func() { + success, err := (&BeAnExistingFileMatcher{}).Match(nil) + Ω(success).Should(BeFalse()) + Ω(err).Should(HaveOccurred()) + + success, err = (&BeAnExistingFileMatcher{}).Match(true) + Ω(success).Should(BeFalse()) + Ω(err).Should(HaveOccurred()) + }) + }) +}) diff --git a/vendor/github.com/onsi/gomega/matchers/be_closed_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_closed_matcher.go new file mode 100644 index 000000000..c1b499597 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/be_closed_matcher.go @@ -0,0 +1,45 @@ +package matchers + +import ( + "fmt" + "github.com/onsi/gomega/format" + "reflect" +) + +type BeClosedMatcher struct { +} + +func (matcher *BeClosedMatcher) Match(actual interface{}) (success bool, err error) { + if !isChan(actual) { + return false, fmt.Errorf("BeClosed matcher expects a channel. Got:\n%s", format.Object(actual, 1)) + } + + channelType := reflect.TypeOf(actual) + channelValue := reflect.ValueOf(actual) + + if channelType.ChanDir() == reflect.SendDir { + return false, fmt.Errorf("BeClosed matcher cannot determine if a send-only channel is closed or open. Got:\n%s", format.Object(actual, 1)) + } + + winnerIndex, _, open := reflect.Select([]reflect.SelectCase{ + reflect.SelectCase{Dir: reflect.SelectRecv, Chan: channelValue}, + reflect.SelectCase{Dir: reflect.SelectDefault}, + }) + + var closed bool + if winnerIndex == 0 { + closed = !open + } else if winnerIndex == 1 { + closed = false + } + + return closed, nil +} + +func (matcher *BeClosedMatcher) FailureMessage(actual interface{}) (message string) { + return format.Message(actual, "to be closed") +} + +func (matcher *BeClosedMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return format.Message(actual, "to be open") +} diff --git a/vendor/github.com/onsi/gomega/matchers/be_closed_matcher_test.go b/vendor/github.com/onsi/gomega/matchers/be_closed_matcher_test.go new file mode 100644 index 000000000..b2c40c910 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/be_closed_matcher_test.go @@ -0,0 +1,70 @@ +package matchers_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + . "github.com/onsi/gomega/matchers" +) + +var _ = Describe("BeClosedMatcher", func() { + Context("when passed a channel", func() { + It("should do the right thing", func() { + openChannel := make(chan bool) + Ω(openChannel).ShouldNot(BeClosed()) + + var openReaderChannel <-chan bool + openReaderChannel = openChannel + Ω(openReaderChannel).ShouldNot(BeClosed()) + + closedChannel := make(chan bool) + close(closedChannel) + + Ω(closedChannel).Should(BeClosed()) + + var closedReaderChannel <-chan bool + closedReaderChannel = closedChannel + Ω(closedReaderChannel).Should(BeClosed()) + }) + }) + + Context("when passed a send-only channel", func() { + It("should error", func() { + openChannel := make(chan bool) + var openWriterChannel chan<- bool + openWriterChannel = openChannel + + success, err := (&BeClosedMatcher{}).Match(openWriterChannel) + Ω(success).Should(BeFalse()) + Ω(err).Should(HaveOccurred()) + + closedChannel := make(chan bool) + close(closedChannel) + + var closedWriterChannel chan<- bool + closedWriterChannel = closedChannel + + success, err = (&BeClosedMatcher{}).Match(closedWriterChannel) + Ω(success).Should(BeFalse()) + Ω(err).Should(HaveOccurred()) + + }) + }) + + Context("when passed something else", func() { + It("should error", func() { + var nilChannel chan bool + + success, err := (&BeClosedMatcher{}).Match(nilChannel) + Ω(success).Should(BeFalse()) + Ω(err).Should(HaveOccurred()) + + success, err = (&BeClosedMatcher{}).Match(nil) + Ω(success).Should(BeFalse()) + Ω(err).Should(HaveOccurred()) + + success, err = (&BeClosedMatcher{}).Match(7) + Ω(success).Should(BeFalse()) + Ω(err).Should(HaveOccurred()) + }) + }) +}) diff --git a/vendor/github.com/onsi/gomega/matchers/be_empty_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_empty_matcher.go new file mode 100644 index 000000000..55bdd7d15 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/be_empty_matcher.go @@ -0,0 +1,26 @@ +package matchers + +import ( + "fmt" + "github.com/onsi/gomega/format" +) + +type BeEmptyMatcher struct { +} + +func (matcher *BeEmptyMatcher) Match(actual interface{}) (success bool, err error) { + length, ok := lengthOf(actual) + if !ok { + return false, fmt.Errorf("BeEmpty matcher expects a string/array/map/channel/slice. Got:\n%s", format.Object(actual, 1)) + } + + return length == 0, nil +} + +func (matcher *BeEmptyMatcher) FailureMessage(actual interface{}) (message string) { + return format.Message(actual, "to be empty") +} + +func (matcher *BeEmptyMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return format.Message(actual, "not to be empty") +} diff --git a/vendor/github.com/onsi/gomega/matchers/be_empty_matcher_test.go b/vendor/github.com/onsi/gomega/matchers/be_empty_matcher_test.go new file mode 100644 index 000000000..541c1b951 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/be_empty_matcher_test.go @@ -0,0 +1,52 @@ +package matchers_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + . "github.com/onsi/gomega/matchers" +) + +var _ = Describe("BeEmpty", func() { + Context("when passed a supported type", func() { + It("should do the right thing", func() { + Ω("").Should(BeEmpty()) + Ω(" ").ShouldNot(BeEmpty()) + + Ω([0]int{}).Should(BeEmpty()) + Ω([1]int{1}).ShouldNot(BeEmpty()) + + Ω([]int{}).Should(BeEmpty()) + Ω([]int{1}).ShouldNot(BeEmpty()) + + Ω(map[string]int{}).Should(BeEmpty()) + Ω(map[string]int{"a": 1}).ShouldNot(BeEmpty()) + + c := make(chan bool, 1) + Ω(c).Should(BeEmpty()) + c <- true + Ω(c).ShouldNot(BeEmpty()) + }) + }) + + Context("when passed a correctly typed nil", func() { + It("should be true", func() { + var nilSlice []int + Ω(nilSlice).Should(BeEmpty()) + + var nilMap map[int]string + Ω(nilMap).Should(BeEmpty()) + }) + }) + + Context("when passed an unsupported type", func() { + It("should error", func() { + success, err := (&BeEmptyMatcher{}).Match(0) + Ω(success).Should(BeFalse()) + Ω(err).Should(HaveOccurred()) + + success, err = (&BeEmptyMatcher{}).Match(nil) + Ω(success).Should(BeFalse()) + Ω(err).Should(HaveOccurred()) + }) + }) +}) diff --git a/vendor/github.com/onsi/gomega/matchers/be_equivalent_to_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_equivalent_to_matcher.go new file mode 100644 index 000000000..32a0c3108 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/be_equivalent_to_matcher.go @@ -0,0 +1,33 @@ +package matchers + +import ( + "fmt" + "github.com/onsi/gomega/format" + "reflect" +) + +type BeEquivalentToMatcher struct { + Expected interface{} +} + +func (matcher *BeEquivalentToMatcher) Match(actual interface{}) (success bool, err error) { + if actual == nil && matcher.Expected == nil { + return false, fmt.Errorf("Both actual and expected must not be nil.") + } + + convertedActual := actual + + if actual != nil && matcher.Expected != nil && reflect.TypeOf(actual).ConvertibleTo(reflect.TypeOf(matcher.Expected)) { + convertedActual = reflect.ValueOf(actual).Convert(reflect.TypeOf(matcher.Expected)).Interface() + } + + return reflect.DeepEqual(convertedActual, matcher.Expected), nil +} + +func (matcher *BeEquivalentToMatcher) FailureMessage(actual interface{}) (message string) { + return format.Message(actual, "to be equivalent to", matcher.Expected) +} + +func (matcher *BeEquivalentToMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return format.Message(actual, "not to be equivalent to", matcher.Expected) +} diff --git a/vendor/github.com/onsi/gomega/matchers/be_equivalent_to_matcher_test.go b/vendor/github.com/onsi/gomega/matchers/be_equivalent_to_matcher_test.go new file mode 100644 index 000000000..def5104fa --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/be_equivalent_to_matcher_test.go @@ -0,0 +1,50 @@ +package matchers_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + . "github.com/onsi/gomega/matchers" +) + +var _ = Describe("BeEquivalentTo", func() { + Context("when asserting that nil is equivalent to nil", func() { + It("should error", func() { + success, err := (&BeEquivalentToMatcher{Expected: nil}).Match(nil) + + Ω(success).Should(BeFalse()) + Ω(err).Should(HaveOccurred()) + }) + }) + + Context("When asserting on nil", func() { + It("should do the right thing", func() { + Ω("foo").ShouldNot(BeEquivalentTo(nil)) + Ω(nil).ShouldNot(BeEquivalentTo(3)) + Ω([]int{1, 2}).ShouldNot(BeEquivalentTo(nil)) + }) + }) + + Context("When asserting on type aliases", func() { + It("should the right thing", func() { + Ω(StringAlias("foo")).Should(BeEquivalentTo("foo")) + Ω("foo").Should(BeEquivalentTo(StringAlias("foo"))) + Ω(StringAlias("foo")).ShouldNot(BeEquivalentTo("bar")) + Ω("foo").ShouldNot(BeEquivalentTo(StringAlias("bar"))) + }) + }) + + Context("When asserting on numbers", func() { + It("should convert actual to expected and do the right thing", func() { + Ω(5).Should(BeEquivalentTo(5)) + Ω(5.0).Should(BeEquivalentTo(5.0)) + Ω(5).Should(BeEquivalentTo(5.0)) + + Ω(5).ShouldNot(BeEquivalentTo("5")) + Ω(5).ShouldNot(BeEquivalentTo(3)) + + //Here be dragons! + Ω(5.1).Should(BeEquivalentTo(5)) + Ω(5).ShouldNot(BeEquivalentTo(5.1)) + }) + }) +}) diff --git a/vendor/github.com/onsi/gomega/matchers/be_false_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_false_matcher.go new file mode 100644 index 000000000..0b224cbbc --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/be_false_matcher.go @@ -0,0 +1,25 @@ +package matchers + +import ( + "fmt" + "github.com/onsi/gomega/format" +) + +type BeFalseMatcher struct { +} + +func (matcher *BeFalseMatcher) Match(actual interface{}) (success bool, err error) { + if !isBool(actual) { + return false, fmt.Errorf("Expected a boolean. Got:\n%s", format.Object(actual, 1)) + } + + return actual == false, nil +} + +func (matcher *BeFalseMatcher) FailureMessage(actual interface{}) (message string) { + return format.Message(actual, "to be false") +} + +func (matcher *BeFalseMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return format.Message(actual, "not to be false") +} diff --git a/vendor/github.com/onsi/gomega/matchers/be_false_matcher_test.go b/vendor/github.com/onsi/gomega/matchers/be_false_matcher_test.go new file mode 100644 index 000000000..3965a2c53 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/be_false_matcher_test.go @@ -0,0 +1,20 @@ +package matchers_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + . "github.com/onsi/gomega/matchers" +) + +var _ = Describe("BeFalse", func() { + It("should handle true and false correctly", func() { + Ω(true).ShouldNot(BeFalse()) + Ω(false).Should(BeFalse()) + }) + + It("should only support booleans", func() { + success, err := (&BeFalseMatcher{}).Match("foo") + Ω(success).Should(BeFalse()) + Ω(err).Should(HaveOccurred()) + }) +}) diff --git a/vendor/github.com/onsi/gomega/matchers/be_identical_to.go b/vendor/github.com/onsi/gomega/matchers/be_identical_to.go new file mode 100644 index 000000000..fdcda4d1f --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/be_identical_to.go @@ -0,0 +1,37 @@ +package matchers + +import ( + "fmt" + "runtime" + + "github.com/onsi/gomega/format" +) + +type BeIdenticalToMatcher struct { + Expected interface{} +} + +func (matcher *BeIdenticalToMatcher) Match(actual interface{}) (success bool, matchErr error) { + if actual == nil && matcher.Expected == nil { + return false, fmt.Errorf("Refusing to compare to .\nBe explicit and use BeNil() instead. This is to avoid mistakes where both sides of an assertion are erroneously uninitialized.") + } + + defer func() { + if r := recover(); r != nil { + if _, ok := r.(runtime.Error); ok { + success = false + matchErr = nil + } + } + }() + + return actual == matcher.Expected, nil +} + +func (matcher *BeIdenticalToMatcher) FailureMessage(actual interface{}) string { + return format.Message(actual, "to be identical to", matcher.Expected) +} + +func (matcher *BeIdenticalToMatcher) NegatedFailureMessage(actual interface{}) string { + return format.Message(actual, "not to be identical to", matcher.Expected) +} diff --git a/vendor/github.com/onsi/gomega/matchers/be_identical_to_test.go b/vendor/github.com/onsi/gomega/matchers/be_identical_to_test.go new file mode 100644 index 000000000..8b90a1a61 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/be_identical_to_test.go @@ -0,0 +1,61 @@ +package matchers_test + +import ( + "errors" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + . "github.com/onsi/gomega/matchers" +) + +var _ = Describe("BeIdenticalTo", func() { + Context("when asserting that nil equals nil", func() { + It("should error", func() { + success, err := (&BeIdenticalToMatcher{Expected: nil}).Match(nil) + + Ω(success).Should(BeFalse()) + Ω(err).Should(HaveOccurred()) + }) + }) + + It("should treat the same pointer to a struct as identical", func() { + mySpecialStruct := myCustomType{} + Ω(&mySpecialStruct).Should(BeIdenticalTo(&mySpecialStruct)) + Ω(&myCustomType{}).ShouldNot(BeIdenticalTo(&mySpecialStruct)) + }) + + It("should be strict about types", func() { + Ω(5).ShouldNot(BeIdenticalTo("5")) + Ω(5).ShouldNot(BeIdenticalTo(5.0)) + Ω(5).ShouldNot(BeIdenticalTo(3)) + }) + + It("should treat primtives as identical", func() { + Ω("5").Should(BeIdenticalTo("5")) + Ω("5").ShouldNot(BeIdenticalTo("55")) + + Ω(5.55).Should(BeIdenticalTo(5.55)) + Ω(5.55).ShouldNot(BeIdenticalTo(6.66)) + + Ω(5).Should(BeIdenticalTo(5)) + Ω(5).ShouldNot(BeIdenticalTo(55)) + }) + + It("should treat the same pointers to a slice as identical", func() { + mySlice := []int{1, 2} + Ω(&mySlice).Should(BeIdenticalTo(&mySlice)) + Ω(&mySlice).ShouldNot(BeIdenticalTo(&[]int{1, 2})) + }) + + It("should treat the same pointers to a map as identical", func() { + myMap := map[string]string{"a": "b", "c": "d"} + Ω(&myMap).Should(BeIdenticalTo(&myMap)) + Ω(myMap).ShouldNot(BeIdenticalTo(map[string]string{"a": "b", "c": "d"})) + }) + + It("should treat the same pointers to an error as identical", func() { + myError := errors.New("foo") + Ω(&myError).Should(BeIdenticalTo(&myError)) + Ω(errors.New("foo")).ShouldNot(BeIdenticalTo(errors.New("bar"))) + }) +}) diff --git a/vendor/github.com/onsi/gomega/matchers/be_nil_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_nil_matcher.go new file mode 100644 index 000000000..7ee84fe1b --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/be_nil_matcher.go @@ -0,0 +1,18 @@ +package matchers + +import "github.com/onsi/gomega/format" + +type BeNilMatcher struct { +} + +func (matcher *BeNilMatcher) Match(actual interface{}) (success bool, err error) { + return isNil(actual), nil +} + +func (matcher *BeNilMatcher) FailureMessage(actual interface{}) (message string) { + return format.Message(actual, "to be nil") +} + +func (matcher *BeNilMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return format.Message(actual, "not to be nil") +} diff --git a/vendor/github.com/onsi/gomega/matchers/be_nil_matcher_test.go b/vendor/github.com/onsi/gomega/matchers/be_nil_matcher_test.go new file mode 100644 index 000000000..753325363 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/be_nil_matcher_test.go @@ -0,0 +1,28 @@ +package matchers_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +var _ = Describe("BeNil", func() { + It("should succeed when passed nil", func() { + Ω(nil).Should(BeNil()) + }) + + It("should succeed when passed a typed nil", func() { + var a []int + Ω(a).Should(BeNil()) + }) + + It("should succeed when passing nil pointer", func() { + var f *struct{} + Ω(f).Should(BeNil()) + }) + + It("should not succeed when not passed nil", func() { + Ω(0).ShouldNot(BeNil()) + Ω(false).ShouldNot(BeNil()) + Ω("").ShouldNot(BeNil()) + }) +}) diff --git a/vendor/github.com/onsi/gomega/matchers/be_numerically_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_numerically_matcher.go new file mode 100644 index 000000000..0c157f61b --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/be_numerically_matcher.go @@ -0,0 +1,120 @@ +package matchers + +import ( + "fmt" + "math" + + "github.com/onsi/gomega/format" +) + +type BeNumericallyMatcher struct { + Comparator string + CompareTo []interface{} +} + +func (matcher *BeNumericallyMatcher) FailureMessage(actual interface{}) (message string) { + return format.Message(actual, fmt.Sprintf("to be %s", matcher.Comparator), matcher.CompareTo[0]) +} + +func (matcher *BeNumericallyMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return format.Message(actual, fmt.Sprintf("not to be %s", matcher.Comparator), matcher.CompareTo[0]) +} + +func (matcher *BeNumericallyMatcher) Match(actual interface{}) (success bool, err error) { + if len(matcher.CompareTo) == 0 || len(matcher.CompareTo) > 2 { + return false, fmt.Errorf("BeNumerically requires 1 or 2 CompareTo arguments. Got:\n%s", format.Object(matcher.CompareTo, 1)) + } + if !isNumber(actual) { + return false, fmt.Errorf("Expected a number. Got:\n%s", format.Object(actual, 1)) + } + if !isNumber(matcher.CompareTo[0]) { + return false, fmt.Errorf("Expected a number. Got:\n%s", format.Object(matcher.CompareTo[0], 1)) + } + if len(matcher.CompareTo) == 2 && !isNumber(matcher.CompareTo[1]) { + return false, fmt.Errorf("Expected a number. Got:\n%s", format.Object(matcher.CompareTo[0], 1)) + } + + switch matcher.Comparator { + case "==", "~", ">", ">=", "<", "<=": + default: + return false, fmt.Errorf("Unknown comparator: %s", matcher.Comparator) + } + + if isFloat(actual) || isFloat(matcher.CompareTo[0]) { + var secondOperand float64 = 1e-8 + if len(matcher.CompareTo) == 2 { + secondOperand = toFloat(matcher.CompareTo[1]) + } + success = matcher.matchFloats(toFloat(actual), toFloat(matcher.CompareTo[0]), secondOperand) + } else if isInteger(actual) { + var secondOperand int64 = 0 + if len(matcher.CompareTo) == 2 { + secondOperand = toInteger(matcher.CompareTo[1]) + } + success = matcher.matchIntegers(toInteger(actual), toInteger(matcher.CompareTo[0]), secondOperand) + } else if isUnsignedInteger(actual) { + var secondOperand uint64 = 0 + if len(matcher.CompareTo) == 2 { + secondOperand = toUnsignedInteger(matcher.CompareTo[1]) + } + success = matcher.matchUnsignedIntegers(toUnsignedInteger(actual), toUnsignedInteger(matcher.CompareTo[0]), secondOperand) + } else { + return false, fmt.Errorf("Failed to compare:\n%s\n%s:\n%s", format.Object(actual, 1), matcher.Comparator, format.Object(matcher.CompareTo[0], 1)) + } + + return success, nil +} + +func (matcher *BeNumericallyMatcher) matchIntegers(actual, compareTo, threshold int64) (success bool) { + switch matcher.Comparator { + case "==", "~": + diff := actual - compareTo + return -threshold <= diff && diff <= threshold + case ">": + return (actual > compareTo) + case ">=": + return (actual >= compareTo) + case "<": + return (actual < compareTo) + case "<=": + return (actual <= compareTo) + } + return false +} + +func (matcher *BeNumericallyMatcher) matchUnsignedIntegers(actual, compareTo, threshold uint64) (success bool) { + switch matcher.Comparator { + case "==", "~": + if actual < compareTo { + actual, compareTo = compareTo, actual + } + return actual-compareTo <= threshold + case ">": + return (actual > compareTo) + case ">=": + return (actual >= compareTo) + case "<": + return (actual < compareTo) + case "<=": + return (actual <= compareTo) + } + return false +} + +func (matcher *BeNumericallyMatcher) matchFloats(actual, compareTo, threshold float64) (success bool) { + switch matcher.Comparator { + case "~": + return math.Abs(actual-compareTo) <= threshold + case "==": + return (actual == compareTo) + case ">": + return (actual > compareTo) + case ">=": + return (actual >= compareTo) + case "<": + return (actual < compareTo) + case "<=": + return (actual <= compareTo) + } + return false +} diff --git a/vendor/github.com/onsi/gomega/matchers/be_numerically_matcher_test.go b/vendor/github.com/onsi/gomega/matchers/be_numerically_matcher_test.go new file mode 100644 index 000000000..43fdb1fe0 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/be_numerically_matcher_test.go @@ -0,0 +1,148 @@ +package matchers_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + . "github.com/onsi/gomega/matchers" +) + +var _ = Describe("BeNumerically", func() { + Context("when passed a number", func() { + It("should support ==", func() { + Ω(uint32(5)).Should(BeNumerically("==", 5)) + Ω(float64(5.0)).Should(BeNumerically("==", 5)) + Ω(int8(5)).Should(BeNumerically("==", 5)) + }) + + It("should not have false positives", func() { + Ω(5.1).ShouldNot(BeNumerically("==", 5)) + Ω(5).ShouldNot(BeNumerically("==", 5.1)) + }) + + It("should support >", func() { + Ω(uint32(5)).Should(BeNumerically(">", 4)) + Ω(float64(5.0)).Should(BeNumerically(">", 4.9)) + Ω(int8(5)).Should(BeNumerically(">", 4)) + + Ω(uint32(5)).ShouldNot(BeNumerically(">", 5)) + Ω(float64(5.0)).ShouldNot(BeNumerically(">", 5.0)) + Ω(int8(5)).ShouldNot(BeNumerically(">", 5)) + }) + + It("should support <", func() { + Ω(uint32(5)).Should(BeNumerically("<", 6)) + Ω(float64(5.0)).Should(BeNumerically("<", 5.1)) + Ω(int8(5)).Should(BeNumerically("<", 6)) + + Ω(uint32(5)).ShouldNot(BeNumerically("<", 5)) + Ω(float64(5.0)).ShouldNot(BeNumerically("<", 5.0)) + Ω(int8(5)).ShouldNot(BeNumerically("<", 5)) + }) + + It("should support >=", func() { + Ω(uint32(5)).Should(BeNumerically(">=", 4)) + Ω(float64(5.0)).Should(BeNumerically(">=", 4.9)) + Ω(int8(5)).Should(BeNumerically(">=", 4)) + + Ω(uint32(5)).Should(BeNumerically(">=", 5)) + Ω(float64(5.0)).Should(BeNumerically(">=", 5.0)) + Ω(int8(5)).Should(BeNumerically(">=", 5)) + + Ω(uint32(5)).ShouldNot(BeNumerically(">=", 6)) + Ω(float64(5.0)).ShouldNot(BeNumerically(">=", 5.1)) + Ω(int8(5)).ShouldNot(BeNumerically(">=", 6)) + }) + + It("should support <=", func() { + Ω(uint32(5)).Should(BeNumerically("<=", 6)) + Ω(float64(5.0)).Should(BeNumerically("<=", 5.1)) + Ω(int8(5)).Should(BeNumerically("<=", 6)) + + Ω(uint32(5)).Should(BeNumerically("<=", 5)) + Ω(float64(5.0)).Should(BeNumerically("<=", 5.0)) + Ω(int8(5)).Should(BeNumerically("<=", 5)) + + Ω(uint32(5)).ShouldNot(BeNumerically("<=", 4)) + Ω(float64(5.0)).ShouldNot(BeNumerically("<=", 4.9)) + Ω(int8(5)).Should(BeNumerically("<=", 5)) + }) + + Context("when passed ~", func() { + Context("when passed a float", func() { + Context("and there is no precision parameter", func() { + It("should default to 1e-8", func() { + Ω(5.00000001).Should(BeNumerically("~", 5.00000002)) + Ω(5.00000001).ShouldNot(BeNumerically("~", 5.0000001)) + }) + }) + + Context("and there is a precision parameter", func() { + It("should use the precision parameter", func() { + Ω(5.1).Should(BeNumerically("~", 5.19, 0.1)) + Ω(5.1).Should(BeNumerically("~", 5.01, 0.1)) + Ω(5.1).ShouldNot(BeNumerically("~", 5.22, 0.1)) + Ω(5.1).ShouldNot(BeNumerically("~", 4.98, 0.1)) + }) + }) + }) + + Context("when passed an int/uint", func() { + Context("and there is no precision parameter", func() { + It("should just do strict equality", func() { + Ω(5).Should(BeNumerically("~", 5)) + Ω(5).ShouldNot(BeNumerically("~", 6)) + Ω(uint(5)).ShouldNot(BeNumerically("~", 6)) + }) + }) + + Context("and there is a precision parameter", func() { + It("should use precision paramter", func() { + Ω(5).Should(BeNumerically("~", 6, 2)) + Ω(5).ShouldNot(BeNumerically("~", 8, 2)) + Ω(uint(5)).Should(BeNumerically("~", 6, 1)) + }) + }) + }) + }) + }) + + Context("when passed a non-number", func() { + It("should error", func() { + success, err := (&BeNumericallyMatcher{Comparator: "==", CompareTo: []interface{}{5}}).Match("foo") + Ω(success).Should(BeFalse()) + Ω(err).Should(HaveOccurred()) + + success, err = (&BeNumericallyMatcher{Comparator: "=="}).Match(5) + Ω(success).Should(BeFalse()) + Ω(err).Should(HaveOccurred()) + + success, err = (&BeNumericallyMatcher{Comparator: "~", CompareTo: []interface{}{3.0, "foo"}}).Match(5.0) + Ω(success).Should(BeFalse()) + Ω(err).Should(HaveOccurred()) + + success, err = (&BeNumericallyMatcher{Comparator: "==", CompareTo: []interface{}{"bar"}}).Match(5) + Ω(success).Should(BeFalse()) + Ω(err).Should(HaveOccurred()) + + success, err = (&BeNumericallyMatcher{Comparator: "==", CompareTo: []interface{}{"bar"}}).Match("foo") + Ω(success).Should(BeFalse()) + Ω(err).Should(HaveOccurred()) + + success, err = (&BeNumericallyMatcher{Comparator: "==", CompareTo: []interface{}{nil}}).Match(0) + Ω(success).Should(BeFalse()) + Ω(err).Should(HaveOccurred()) + + success, err = (&BeNumericallyMatcher{Comparator: "==", CompareTo: []interface{}{0}}).Match(nil) + Ω(success).Should(BeFalse()) + Ω(err).Should(HaveOccurred()) + }) + }) + + Context("when passed an unsupported comparator", func() { + It("should error", func() { + success, err := (&BeNumericallyMatcher{Comparator: "!=", CompareTo: []interface{}{5}}).Match(4) + Ω(success).Should(BeFalse()) + Ω(err).Should(HaveOccurred()) + }) + }) +}) diff --git a/vendor/github.com/onsi/gomega/matchers/be_sent_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_sent_matcher.go new file mode 100644 index 000000000..d7c32233e --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/be_sent_matcher.go @@ -0,0 +1,71 @@ +package matchers + +import ( + "fmt" + "reflect" + + "github.com/onsi/gomega/format" +) + +type BeSentMatcher struct { + Arg interface{} + channelClosed bool +} + +func (matcher *BeSentMatcher) Match(actual interface{}) (success bool, err error) { + if !isChan(actual) { + return false, fmt.Errorf("BeSent expects a channel. Got:\n%s", format.Object(actual, 1)) + } + + channelType := reflect.TypeOf(actual) + channelValue := reflect.ValueOf(actual) + + if channelType.ChanDir() == reflect.RecvDir { + return false, fmt.Errorf("BeSent matcher cannot be passed a receive-only channel. Got:\n%s", format.Object(actual, 1)) + } + + argType := reflect.TypeOf(matcher.Arg) + assignable := argType.AssignableTo(channelType.Elem()) + + if !assignable { + return false, fmt.Errorf("Cannot pass:\n%s to the channel:\n%s\nThe types don't match.", format.Object(matcher.Arg, 1), format.Object(actual, 1)) + } + + argValue := reflect.ValueOf(matcher.Arg) + + defer func() { + if e := recover(); e != nil { + success = false + err = fmt.Errorf("Cannot send to a closed channel") + matcher.channelClosed = true + } + }() + + winnerIndex, _, _ := reflect.Select([]reflect.SelectCase{ + reflect.SelectCase{Dir: reflect.SelectSend, Chan: channelValue, Send: argValue}, + reflect.SelectCase{Dir: reflect.SelectDefault}, + }) + + var didSend bool + if winnerIndex == 0 { + didSend = true + } + + return didSend, nil +} + +func (matcher *BeSentMatcher) FailureMessage(actual interface{}) (message string) { + return format.Message(actual, "to send:", matcher.Arg) +} + +func (matcher *BeSentMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return format.Message(actual, "not to send:", matcher.Arg) +} + +func (matcher *BeSentMatcher) MatchMayChangeInTheFuture(actual interface{}) bool { + if !isChan(actual) { + return false + } + + return !matcher.channelClosed +} diff --git a/vendor/github.com/onsi/gomega/matchers/be_sent_matcher_test.go b/vendor/github.com/onsi/gomega/matchers/be_sent_matcher_test.go new file mode 100644 index 000000000..205d71f40 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/be_sent_matcher_test.go @@ -0,0 +1,106 @@ +package matchers_test + +import ( + . "github.com/onsi/gomega/matchers" + "time" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +var _ = Describe("BeSent", func() { + Context("when passed a channel and a matching type", func() { + Context("when the channel is ready to receive", func() { + It("should succeed and send the value down the channel", func() { + c := make(chan string) + d := make(chan string) + go func() { + val := <-c + d <- val + }() + + time.Sleep(10 * time.Millisecond) + + Ω(c).Should(BeSent("foo")) + Eventually(d).Should(Receive(Equal("foo"))) + }) + + It("should succeed (with a buffered channel)", func() { + c := make(chan string, 1) + Ω(c).Should(BeSent("foo")) + Ω(<-c).Should(Equal("foo")) + }) + }) + + Context("when the channel is not ready to receive", func() { + It("should fail and not send down the channel", func() { + c := make(chan string) + Ω(c).ShouldNot(BeSent("foo")) + Consistently(c).ShouldNot(Receive()) + }) + }) + + Context("when the channel is eventually ready to receive", func() { + It("should succeed", func() { + c := make(chan string) + d := make(chan string) + go func() { + time.Sleep(30 * time.Millisecond) + val := <-c + d <- val + }() + + Eventually(c).Should(BeSent("foo")) + Eventually(d).Should(Receive(Equal("foo"))) + }) + }) + + Context("when the channel is closed", func() { + It("should error", func() { + c := make(chan string) + close(c) + success, err := (&BeSentMatcher{Arg: "foo"}).Match(c) + Ω(success).Should(BeFalse()) + Ω(err).Should(HaveOccurred()) + }) + + It("should short-circuit Eventually", func() { + c := make(chan string) + close(c) + + t := time.Now() + failures := InterceptGomegaFailures(func() { + Eventually(c, 10.0).Should(BeSent("foo")) + }) + Ω(failures).Should(HaveLen(1)) + Ω(time.Since(t)).Should(BeNumerically("<", time.Second)) + }) + }) + }) + + Context("when passed a channel and a non-matching type", func() { + It("should error", func() { + success, err := (&BeSentMatcher{Arg: "foo"}).Match(make(chan int, 1)) + Ω(success).Should(BeFalse()) + Ω(err).Should(HaveOccurred()) + }) + }) + + Context("when passed a receive-only channel", func() { + It("should error", func() { + var c <-chan string + c = make(chan string, 1) + success, err := (&BeSentMatcher{Arg: "foo"}).Match(c) + Ω(success).Should(BeFalse()) + Ω(err).Should(HaveOccurred()) + }) + }) + + Context("when passed a nonchannel", func() { + It("should error", func() { + success, err := (&BeSentMatcher{Arg: "foo"}).Match("bar") + Ω(success).Should(BeFalse()) + Ω(err).Should(HaveOccurred()) + }) + }) +}) diff --git a/vendor/github.com/onsi/gomega/matchers/be_temporally_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_temporally_matcher.go new file mode 100644 index 000000000..abda4eb1e --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/be_temporally_matcher.go @@ -0,0 +1,65 @@ +package matchers + +import ( + "fmt" + "github.com/onsi/gomega/format" + "time" +) + +type BeTemporallyMatcher struct { + Comparator string + CompareTo time.Time + Threshold []time.Duration +} + +func (matcher *BeTemporallyMatcher) FailureMessage(actual interface{}) (message string) { + return format.Message(actual, fmt.Sprintf("to be %s", matcher.Comparator), matcher.CompareTo) +} + +func (matcher *BeTemporallyMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return format.Message(actual, fmt.Sprintf("not to be %s", matcher.Comparator), matcher.CompareTo) +} + +func (matcher *BeTemporallyMatcher) Match(actual interface{}) (bool, error) { + // predicate to test for time.Time type + isTime := func(t interface{}) bool { + _, ok := t.(time.Time) + return ok + } + + if !isTime(actual) { + return false, fmt.Errorf("Expected a time.Time. Got:\n%s", format.Object(actual, 1)) + } + + switch matcher.Comparator { + case "==", "~", ">", ">=", "<", "<=": + default: + return false, fmt.Errorf("Unknown comparator: %s", matcher.Comparator) + } + + var threshold = time.Millisecond + if len(matcher.Threshold) == 1 { + threshold = matcher.Threshold[0] + } + + return matcher.matchTimes(actual.(time.Time), matcher.CompareTo, threshold), nil +} + +func (matcher *BeTemporallyMatcher) matchTimes(actual, compareTo time.Time, threshold time.Duration) (success bool) { + switch matcher.Comparator { + case "==": + return actual.Equal(compareTo) + case "~": + diff := actual.Sub(compareTo) + return -threshold <= diff && diff <= threshold + case ">": + return actual.After(compareTo) + case ">=": + return !actual.Before(compareTo) + case "<": + return actual.Before(compareTo) + case "<=": + return !actual.After(compareTo) + } + return false +} diff --git a/vendor/github.com/onsi/gomega/matchers/be_temporally_matcher_test.go b/vendor/github.com/onsi/gomega/matchers/be_temporally_matcher_test.go new file mode 100644 index 000000000..feb33e5dc --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/be_temporally_matcher_test.go @@ -0,0 +1,98 @@ +package matchers_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + . "github.com/onsi/gomega/matchers" + "time" +) + +var _ = Describe("BeTemporally", func() { + + var t0, t1, t2 time.Time + BeforeEach(func() { + t0 = time.Now() + t1 = t0.Add(time.Second) + t2 = t0.Add(-time.Second) + }) + + Context("When comparing times", func() { + + It("should support ==", func() { + Ω(t0).Should(BeTemporally("==", t0)) + Ω(t1).ShouldNot(BeTemporally("==", t0)) + Ω(t0).ShouldNot(BeTemporally("==", t1)) + Ω(t0).ShouldNot(BeTemporally("==", time.Time{})) + }) + + It("should support >", func() { + Ω(t0).Should(BeTemporally(">", t2)) + Ω(t0).ShouldNot(BeTemporally(">", t0)) + Ω(t2).ShouldNot(BeTemporally(">", t0)) + }) + + It("should support <", func() { + Ω(t0).Should(BeTemporally("<", t1)) + Ω(t0).ShouldNot(BeTemporally("<", t0)) + Ω(t1).ShouldNot(BeTemporally("<", t0)) + }) + + It("should support >=", func() { + Ω(t0).Should(BeTemporally(">=", t2)) + Ω(t0).Should(BeTemporally(">=", t0)) + Ω(t0).ShouldNot(BeTemporally(">=", t1)) + }) + + It("should support <=", func() { + Ω(t0).Should(BeTemporally("<=", t1)) + Ω(t0).Should(BeTemporally("<=", t0)) + Ω(t0).ShouldNot(BeTemporally("<=", t2)) + }) + + Context("when passed ~", func() { + Context("and there is no precision parameter", func() { + BeforeEach(func() { + t1 = t0.Add(time.Millisecond / 2) + t2 = t0.Add(-2 * time.Millisecond) + }) + It("should approximate", func() { + Ω(t0).Should(BeTemporally("~", t0)) + Ω(t0).Should(BeTemporally("~", t1)) + Ω(t0).ShouldNot(BeTemporally("~", t2)) + }) + }) + + Context("and there is a precision parameter", func() { + BeforeEach(func() { + t2 = t0.Add(3 * time.Second) + }) + It("should use precision paramter", func() { + d := 2 * time.Second + Ω(t0).Should(BeTemporally("~", t0, d)) + Ω(t0).Should(BeTemporally("~", t1, d)) + Ω(t0).ShouldNot(BeTemporally("~", t2, d)) + }) + }) + }) + }) + + Context("when passed a non-time", func() { + It("should error", func() { + success, err := (&BeTemporallyMatcher{Comparator: "==", CompareTo: t0}).Match("foo") + Ω(success).Should(BeFalse()) + Ω(err).Should(HaveOccurred()) + + success, err = (&BeTemporallyMatcher{Comparator: "=="}).Match(nil) + Ω(success).Should(BeFalse()) + Ω(err).Should(HaveOccurred()) + }) + }) + + Context("when passed an unsupported comparator", func() { + It("should error", func() { + success, err := (&BeTemporallyMatcher{Comparator: "!=", CompareTo: t0}).Match(t2) + Ω(success).Should(BeFalse()) + Ω(err).Should(HaveOccurred()) + }) + }) +}) diff --git a/vendor/github.com/onsi/gomega/matchers/be_true_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_true_matcher.go new file mode 100644 index 000000000..1275e5fc9 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/be_true_matcher.go @@ -0,0 +1,25 @@ +package matchers + +import ( + "fmt" + "github.com/onsi/gomega/format" +) + +type BeTrueMatcher struct { +} + +func (matcher *BeTrueMatcher) Match(actual interface{}) (success bool, err error) { + if !isBool(actual) { + return false, fmt.Errorf("Expected a boolean. Got:\n%s", format.Object(actual, 1)) + } + + return actual.(bool), nil +} + +func (matcher *BeTrueMatcher) FailureMessage(actual interface{}) (message string) { + return format.Message(actual, "to be true") +} + +func (matcher *BeTrueMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return format.Message(actual, "not to be true") +} diff --git a/vendor/github.com/onsi/gomega/matchers/be_true_matcher_test.go b/vendor/github.com/onsi/gomega/matchers/be_true_matcher_test.go new file mode 100644 index 000000000..ca32e56be --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/be_true_matcher_test.go @@ -0,0 +1,20 @@ +package matchers_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + . "github.com/onsi/gomega/matchers" +) + +var _ = Describe("BeTrue", func() { + It("should handle true and false correctly", func() { + Ω(true).Should(BeTrue()) + Ω(false).ShouldNot(BeTrue()) + }) + + It("should only support booleans", func() { + success, err := (&BeTrueMatcher{}).Match("foo") + Ω(success).Should(BeFalse()) + Ω(err).Should(HaveOccurred()) + }) +}) diff --git a/vendor/github.com/onsi/gomega/matchers/be_zero_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_zero_matcher.go new file mode 100644 index 000000000..b39c9144b --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/be_zero_matcher.go @@ -0,0 +1,27 @@ +package matchers + +import ( + "github.com/onsi/gomega/format" + "reflect" +) + +type BeZeroMatcher struct { +} + +func (matcher *BeZeroMatcher) Match(actual interface{}) (success bool, err error) { + if actual == nil { + return true, nil + } + zeroValue := reflect.Zero(reflect.TypeOf(actual)).Interface() + + return reflect.DeepEqual(zeroValue, actual), nil + +} + +func (matcher *BeZeroMatcher) FailureMessage(actual interface{}) (message string) { + return format.Message(actual, "to be zero-valued") +} + +func (matcher *BeZeroMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return format.Message(actual, "not to be zero-valued") +} diff --git a/vendor/github.com/onsi/gomega/matchers/be_zero_matcher_test.go b/vendor/github.com/onsi/gomega/matchers/be_zero_matcher_test.go new file mode 100644 index 000000000..8ec3643c2 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/be_zero_matcher_test.go @@ -0,0 +1,30 @@ +package matchers_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +var _ = Describe("BeZero", func() { + It("should succeed if the passed in object is the zero value for its type", func() { + Ω(nil).Should(BeZero()) + + Ω("").Should(BeZero()) + Ω(" ").ShouldNot(BeZero()) + + Ω(0).Should(BeZero()) + Ω(1).ShouldNot(BeZero()) + + Ω(0.0).Should(BeZero()) + Ω(0.1).ShouldNot(BeZero()) + + // Ω([]int{}).Should(BeZero()) + Ω([]int{1}).ShouldNot(BeZero()) + + // Ω(map[string]int{}).Should(BeZero()) + Ω(map[string]int{"a": 1}).ShouldNot(BeZero()) + + Ω(myCustomType{}).Should(BeZero()) + Ω(myCustomType{s: "a"}).ShouldNot(BeZero()) + }) +}) diff --git a/vendor/github.com/onsi/gomega/matchers/consist_of.go b/vendor/github.com/onsi/gomega/matchers/consist_of.go new file mode 100644 index 000000000..7b0e08868 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/consist_of.go @@ -0,0 +1,80 @@ +package matchers + +import ( + "fmt" + "reflect" + + "github.com/onsi/gomega/format" + "github.com/onsi/gomega/matchers/support/goraph/bipartitegraph" +) + +type ConsistOfMatcher struct { + Elements []interface{} +} + +func (matcher *ConsistOfMatcher) Match(actual interface{}) (success bool, err error) { + if !isArrayOrSlice(actual) && !isMap(actual) { + return false, fmt.Errorf("ConsistOf matcher expects an array/slice/map. Got:\n%s", format.Object(actual, 1)) + } + + elements := matcher.Elements + if len(matcher.Elements) == 1 && isArrayOrSlice(matcher.Elements[0]) { + elements = []interface{}{} + value := reflect.ValueOf(matcher.Elements[0]) + for i := 0; i < value.Len(); i++ { + elements = append(elements, value.Index(i).Interface()) + } + } + + matchers := []interface{}{} + for _, element := range elements { + matcher, isMatcher := element.(omegaMatcher) + if !isMatcher { + matcher = &EqualMatcher{Expected: element} + } + matchers = append(matchers, matcher) + } + + values := matcher.valuesOf(actual) + + if len(values) != len(matchers) { + return false, nil + } + + neighbours := func(v, m interface{}) (bool, error) { + match, err := m.(omegaMatcher).Match(v) + return match && err == nil, nil + } + + bipartiteGraph, err := bipartitegraph.NewBipartiteGraph(values, matchers, neighbours) + if err != nil { + return false, err + } + + return len(bipartiteGraph.LargestMatching()) == len(values), nil +} + +func (matcher *ConsistOfMatcher) valuesOf(actual interface{}) []interface{} { + value := reflect.ValueOf(actual) + values := []interface{}{} + if isMap(actual) { + keys := value.MapKeys() + for i := 0; i < value.Len(); i++ { + values = append(values, value.MapIndex(keys[i]).Interface()) + } + } else { + for i := 0; i < value.Len(); i++ { + values = append(values, value.Index(i).Interface()) + } + } + + return values +} + +func (matcher *ConsistOfMatcher) FailureMessage(actual interface{}) (message string) { + return format.Message(actual, "to consist of", matcher.Elements) +} + +func (matcher *ConsistOfMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return format.Message(actual, "not to consist of", matcher.Elements) +} diff --git a/vendor/github.com/onsi/gomega/matchers/consist_of_test.go b/vendor/github.com/onsi/gomega/matchers/consist_of_test.go new file mode 100644 index 000000000..dcd1afe94 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/consist_of_test.go @@ -0,0 +1,75 @@ +package matchers_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +var _ = Describe("ConsistOf", func() { + Context("with a slice", func() { + It("should do the right thing", func() { + Ω([]string{"foo", "bar", "baz"}).Should(ConsistOf("foo", "bar", "baz")) + Ω([]string{"foo", "bar", "baz"}).Should(ConsistOf("foo", "bar", "baz")) + Ω([]string{"foo", "bar", "baz"}).Should(ConsistOf("baz", "bar", "foo")) + Ω([]string{"foo", "bar", "baz"}).ShouldNot(ConsistOf("baz", "bar", "foo", "foo")) + Ω([]string{"foo", "bar", "baz"}).ShouldNot(ConsistOf("baz", "foo")) + }) + }) + + Context("with an array", func() { + It("should do the right thing", func() { + Ω([3]string{"foo", "bar", "baz"}).Should(ConsistOf("foo", "bar", "baz")) + Ω([3]string{"foo", "bar", "baz"}).Should(ConsistOf("baz", "bar", "foo")) + Ω([3]string{"foo", "bar", "baz"}).ShouldNot(ConsistOf("baz", "bar", "foo", "foo")) + Ω([3]string{"foo", "bar", "baz"}).ShouldNot(ConsistOf("baz", "foo")) + }) + }) + + Context("with a map", func() { + It("should apply to the values", func() { + Ω(map[int]string{1: "foo", 2: "bar", 3: "baz"}).Should(ConsistOf("foo", "bar", "baz")) + Ω(map[int]string{1: "foo", 2: "bar", 3: "baz"}).Should(ConsistOf("baz", "bar", "foo")) + Ω(map[int]string{1: "foo", 2: "bar", 3: "baz"}).ShouldNot(ConsistOf("baz", "bar", "foo", "foo")) + Ω(map[int]string{1: "foo", 2: "bar", 3: "baz"}).ShouldNot(ConsistOf("baz", "foo")) + }) + + }) + + Context("with anything else", func() { + It("should error", func() { + failures := InterceptGomegaFailures(func() { + Ω("foo").Should(ConsistOf("f", "o", "o")) + }) + + Ω(failures).Should(HaveLen(1)) + }) + }) + + Context("when passed matchers", func() { + It("should pass if the matchers pass", func() { + Ω([]string{"foo", "bar", "baz"}).Should(ConsistOf("foo", MatchRegexp("^ba"), "baz")) + Ω([]string{"foo", "bar", "baz"}).ShouldNot(ConsistOf("foo", MatchRegexp("^ba"))) + Ω([]string{"foo", "bar", "baz"}).ShouldNot(ConsistOf("foo", MatchRegexp("^ba"), MatchRegexp("foo"))) + Ω([]string{"foo", "bar", "baz"}).Should(ConsistOf("foo", MatchRegexp("^ba"), MatchRegexp("^ba"))) + Ω([]string{"foo", "bar", "baz"}).ShouldNot(ConsistOf("foo", MatchRegexp("^ba"), MatchRegexp("turducken"))) + }) + + It("should not depend on the order of the matchers", func() { + Ω([][]int{[]int{1, 2}, []int{2}}).Should(ConsistOf(ContainElement(1), ContainElement(2))) + Ω([][]int{[]int{1, 2}, []int{2}}).Should(ConsistOf(ContainElement(2), ContainElement(1))) + }) + + Context("when a matcher errors", func() { + It("should soldier on", func() { + Ω([]string{"foo", "bar", "baz"}).ShouldNot(ConsistOf(BeFalse(), "foo", "bar")) + Ω([]interface{}{"foo", "bar", false}).Should(ConsistOf(BeFalse(), ContainSubstring("foo"), "bar")) + }) + }) + }) + + Context("when passed exactly one argument, and that argument is a slice", func() { + It("should match against the elements of that argument", func() { + Ω([]string{"foo", "bar", "baz"}).Should(ConsistOf([]string{"foo", "bar", "baz"})) + }) + }) +}) diff --git a/vendor/github.com/onsi/gomega/matchers/contain_element_matcher.go b/vendor/github.com/onsi/gomega/matchers/contain_element_matcher.go new file mode 100644 index 000000000..4159335d0 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/contain_element_matcher.go @@ -0,0 +1,56 @@ +package matchers + +import ( + "fmt" + "reflect" + + "github.com/onsi/gomega/format" +) + +type ContainElementMatcher struct { + Element interface{} +} + +func (matcher *ContainElementMatcher) Match(actual interface{}) (success bool, err error) { + if !isArrayOrSlice(actual) && !isMap(actual) { + return false, fmt.Errorf("ContainElement matcher expects an array/slice/map. Got:\n%s", format.Object(actual, 1)) + } + + elemMatcher, elementIsMatcher := matcher.Element.(omegaMatcher) + if !elementIsMatcher { + elemMatcher = &EqualMatcher{Expected: matcher.Element} + } + + value := reflect.ValueOf(actual) + var keys []reflect.Value + if isMap(actual) { + keys = value.MapKeys() + } + var lastError error + for i := 0; i < value.Len(); i++ { + var success bool + var err error + if isMap(actual) { + success, err = elemMatcher.Match(value.MapIndex(keys[i]).Interface()) + } else { + success, err = elemMatcher.Match(value.Index(i).Interface()) + } + if err != nil { + lastError = err + continue + } + if success { + return true, nil + } + } + + return false, lastError +} + +func (matcher *ContainElementMatcher) FailureMessage(actual interface{}) (message string) { + return format.Message(actual, "to contain element matching", matcher.Element) +} + +func (matcher *ContainElementMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return format.Message(actual, "not to contain element matching", matcher.Element) +} diff --git a/vendor/github.com/onsi/gomega/matchers/contain_element_matcher_test.go b/vendor/github.com/onsi/gomega/matchers/contain_element_matcher_test.go new file mode 100644 index 000000000..38ee518fb --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/contain_element_matcher_test.go @@ -0,0 +1,76 @@ +package matchers_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + . "github.com/onsi/gomega/matchers" +) + +var _ = Describe("ContainElement", func() { + Context("when passed a supported type", func() { + Context("and expecting a non-matcher", func() { + It("should do the right thing", func() { + Ω([2]int{1, 2}).Should(ContainElement(2)) + Ω([2]int{1, 2}).ShouldNot(ContainElement(3)) + + Ω([]int{1, 2}).Should(ContainElement(2)) + Ω([]int{1, 2}).ShouldNot(ContainElement(3)) + + Ω(map[string]int{"foo": 1, "bar": 2}).Should(ContainElement(2)) + Ω(map[int]int{3: 1, 4: 2}).ShouldNot(ContainElement(3)) + + arr := make([]myCustomType, 2) + arr[0] = myCustomType{s: "foo", n: 3, f: 2.0, arr: []string{"a", "b"}} + arr[1] = myCustomType{s: "foo", n: 3, f: 2.0, arr: []string{"a", "c"}} + Ω(arr).Should(ContainElement(myCustomType{s: "foo", n: 3, f: 2.0, arr: []string{"a", "b"}})) + Ω(arr).ShouldNot(ContainElement(myCustomType{s: "foo", n: 3, f: 2.0, arr: []string{"b", "c"}})) + }) + }) + + Context("and expecting a matcher", func() { + It("should pass each element through the matcher", func() { + Ω([]int{1, 2, 3}).Should(ContainElement(BeNumerically(">=", 3))) + Ω([]int{1, 2, 3}).ShouldNot(ContainElement(BeNumerically(">", 3))) + Ω(map[string]int{"foo": 1, "bar": 2}).Should(ContainElement(BeNumerically(">=", 2))) + Ω(map[string]int{"foo": 1, "bar": 2}).ShouldNot(ContainElement(BeNumerically(">", 2))) + }) + + It("should power through even if the matcher ever fails", func() { + Ω([]interface{}{1, 2, "3", 4}).Should(ContainElement(BeNumerically(">=", 3))) + }) + + It("should fail if the matcher fails", func() { + actual := []interface{}{1, 2, "3", "4"} + success, err := (&ContainElementMatcher{Element: BeNumerically(">=", 3)}).Match(actual) + Ω(success).Should(BeFalse()) + Ω(err).Should(HaveOccurred()) + }) + }) + }) + + Context("when passed a correctly typed nil", func() { + It("should operate succesfully on the passed in value", func() { + var nilSlice []int + Ω(nilSlice).ShouldNot(ContainElement(1)) + + var nilMap map[int]string + Ω(nilMap).ShouldNot(ContainElement("foo")) + }) + }) + + Context("when passed an unsupported type", func() { + It("should error", func() { + success, err := (&ContainElementMatcher{Element: 0}).Match(0) + Ω(success).Should(BeFalse()) + Ω(err).Should(HaveOccurred()) + + success, err = (&ContainElementMatcher{Element: 0}).Match("abc") + Ω(success).Should(BeFalse()) + Ω(err).Should(HaveOccurred()) + + success, err = (&ContainElementMatcher{Element: 0}).Match(nil) + Ω(success).Should(BeFalse()) + Ω(err).Should(HaveOccurred()) + }) + }) +}) diff --git a/vendor/github.com/onsi/gomega/matchers/contain_substring_matcher.go b/vendor/github.com/onsi/gomega/matchers/contain_substring_matcher.go new file mode 100644 index 000000000..2e7608921 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/contain_substring_matcher.go @@ -0,0 +1,37 @@ +package matchers + +import ( + "fmt" + "github.com/onsi/gomega/format" + "strings" +) + +type ContainSubstringMatcher struct { + Substr string + Args []interface{} +} + +func (matcher *ContainSubstringMatcher) Match(actual interface{}) (success bool, err error) { + actualString, ok := toString(actual) + if !ok { + return false, fmt.Errorf("ContainSubstring matcher requires a string or stringer. Got:\n%s", format.Object(actual, 1)) + } + + return strings.Contains(actualString, matcher.stringToMatch()), nil +} + +func (matcher *ContainSubstringMatcher) stringToMatch() string { + stringToMatch := matcher.Substr + if len(matcher.Args) > 0 { + stringToMatch = fmt.Sprintf(matcher.Substr, matcher.Args...) + } + return stringToMatch +} + +func (matcher *ContainSubstringMatcher) FailureMessage(actual interface{}) (message string) { + return format.Message(actual, "to contain substring", matcher.stringToMatch()) +} + +func (matcher *ContainSubstringMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return format.Message(actual, "not to contain substring", matcher.stringToMatch()) +} diff --git a/vendor/github.com/onsi/gomega/matchers/contain_substring_matcher_test.go b/vendor/github.com/onsi/gomega/matchers/contain_substring_matcher_test.go new file mode 100644 index 000000000..6935168e5 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/contain_substring_matcher_test.go @@ -0,0 +1,36 @@ +package matchers_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + . "github.com/onsi/gomega/matchers" +) + +var _ = Describe("ContainSubstringMatcher", func() { + Context("when actual is a string", func() { + It("should match against the string", func() { + Ω("Marvelous").Should(ContainSubstring("rve")) + Ω("Marvelous").ShouldNot(ContainSubstring("boo")) + }) + }) + + Context("when the matcher is called with multiple arguments", func() { + It("should pass the string and arguments to sprintf", func() { + Ω("Marvelous3").Should(ContainSubstring("velous%d", 3)) + }) + }) + + Context("when actual is a stringer", func() { + It("should call the stringer and match agains the returned string", func() { + Ω(&myStringer{a: "Abc3"}).Should(ContainSubstring("bc3")) + }) + }) + + Context("when actual is neither a string nor a stringer", func() { + It("should error", func() { + success, err := (&ContainSubstringMatcher{Substr: "2"}).Match(2) + Ω(success).Should(BeFalse()) + Ω(err).Should(HaveOccurred()) + }) + }) +}) diff --git a/vendor/github.com/onsi/gomega/matchers/equal_matcher.go b/vendor/github.com/onsi/gomega/matchers/equal_matcher.go new file mode 100644 index 000000000..874e6a622 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/equal_matcher.go @@ -0,0 +1,33 @@ +package matchers + +import ( + "fmt" + "reflect" + + "github.com/onsi/gomega/format" +) + +type EqualMatcher struct { + Expected interface{} +} + +func (matcher *EqualMatcher) Match(actual interface{}) (success bool, err error) { + if actual == nil && matcher.Expected == nil { + return false, fmt.Errorf("Refusing to compare to .\nBe explicit and use BeNil() instead. This is to avoid mistakes where both sides of an assertion are erroneously uninitialized.") + } + return reflect.DeepEqual(actual, matcher.Expected), nil +} + +func (matcher *EqualMatcher) FailureMessage(actual interface{}) (message string) { + actualString, actualOK := actual.(string) + expectedString, expectedOK := matcher.Expected.(string) + if actualOK && expectedOK { + return format.MessageWithDiff(actualString, "to equal", expectedString) + } + + return format.Message(actual, "to equal", matcher.Expected) +} + +func (matcher *EqualMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return format.Message(actual, "not to equal", matcher.Expected) +} diff --git a/vendor/github.com/onsi/gomega/matchers/equal_matcher_test.go b/vendor/github.com/onsi/gomega/matchers/equal_matcher_test.go new file mode 100644 index 000000000..2add0b749 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/equal_matcher_test.go @@ -0,0 +1,78 @@ +package matchers_test + +import ( + "errors" + "strings" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + . "github.com/onsi/gomega/matchers" +) + +var _ = Describe("Equal", func() { + Context("when asserting that nil equals nil", func() { + It("should error", func() { + success, err := (&EqualMatcher{Expected: nil}).Match(nil) + + Ω(success).Should(BeFalse()) + Ω(err).Should(HaveOccurred()) + }) + }) + + Context("When asserting equality between objects", func() { + It("should do the right thing", func() { + Ω(5).Should(Equal(5)) + Ω(5.0).Should(Equal(5.0)) + + Ω(5).ShouldNot(Equal("5")) + Ω(5).ShouldNot(Equal(5.0)) + Ω(5).ShouldNot(Equal(3)) + + Ω("5").Should(Equal("5")) + Ω([]int{1, 2}).Should(Equal([]int{1, 2})) + Ω([]int{1, 2}).ShouldNot(Equal([]int{2, 1})) + Ω(map[string]string{"a": "b", "c": "d"}).Should(Equal(map[string]string{"a": "b", "c": "d"})) + Ω(map[string]string{"a": "b", "c": "d"}).ShouldNot(Equal(map[string]string{"a": "b", "c": "e"})) + Ω(errors.New("foo")).Should(Equal(errors.New("foo"))) + Ω(errors.New("foo")).ShouldNot(Equal(errors.New("bar"))) + + Ω(myCustomType{s: "foo", n: 3, f: 2.0, arr: []string{"a", "b"}}).Should(Equal(myCustomType{s: "foo", n: 3, f: 2.0, arr: []string{"a", "b"}})) + Ω(myCustomType{s: "foo", n: 3, f: 2.0, arr: []string{"a", "b"}}).ShouldNot(Equal(myCustomType{s: "bar", n: 3, f: 2.0, arr: []string{"a", "b"}})) + Ω(myCustomType{s: "foo", n: 3, f: 2.0, arr: []string{"a", "b"}}).ShouldNot(Equal(myCustomType{s: "foo", n: 2, f: 2.0, arr: []string{"a", "b"}})) + Ω(myCustomType{s: "foo", n: 3, f: 2.0, arr: []string{"a", "b"}}).ShouldNot(Equal(myCustomType{s: "foo", n: 3, f: 3.0, arr: []string{"a", "b"}})) + Ω(myCustomType{s: "foo", n: 3, f: 2.0, arr: []string{"a", "b"}}).ShouldNot(Equal(myCustomType{s: "foo", n: 3, f: 2.0, arr: []string{"a", "b", "c"}})) + }) + }) + + Describe("failure messages", func() { + It("shows the two strings simply when they are short", func() { + subject := EqualMatcher{Expected: "eric"} + + failureMessage := subject.FailureMessage("tim") + Ω(failureMessage).To(BeEquivalentTo(expectedShortStringFailureMessage)) + }) + + It("shows the exact point where two long strings differ", func() { + stringWithB := "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + stringWithZ := "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaazaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + subject := EqualMatcher{Expected: stringWithZ} + + failureMessage := subject.FailureMessage(stringWithB) + Ω(failureMessage).To(BeEquivalentTo(expectedLongStringFailureMessage)) + }) + }) +}) + +var expectedShortStringFailureMessage = strings.TrimSpace(` +Expected + : tim +to equal + : eric +`) +var expectedLongStringFailureMessage = strings.TrimSpace(` +Expected + : "...aaaaabaaaaa..." +to equal | + : "...aaaaazaaaaa..." +`) diff --git a/vendor/github.com/onsi/gomega/matchers/have_cap_matcher.go b/vendor/github.com/onsi/gomega/matchers/have_cap_matcher.go new file mode 100644 index 000000000..7ace93dc3 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/have_cap_matcher.go @@ -0,0 +1,28 @@ +package matchers + +import ( + "fmt" + + "github.com/onsi/gomega/format" +) + +type HaveCapMatcher struct { + Count int +} + +func (matcher *HaveCapMatcher) Match(actual interface{}) (success bool, err error) { + length, ok := capOf(actual) + if !ok { + return false, fmt.Errorf("HaveCap matcher expects a array/channel/slice. Got:\n%s", format.Object(actual, 1)) + } + + return length == matcher.Count, nil +} + +func (matcher *HaveCapMatcher) FailureMessage(actual interface{}) (message string) { + return fmt.Sprintf("Expected\n%s\nto have capacity %d", format.Object(actual, 1), matcher.Count) +} + +func (matcher *HaveCapMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return fmt.Sprintf("Expected\n%s\nnot to have capacity %d", format.Object(actual, 1), matcher.Count) +} diff --git a/vendor/github.com/onsi/gomega/matchers/have_cap_matcher_test.go b/vendor/github.com/onsi/gomega/matchers/have_cap_matcher_test.go new file mode 100644 index 000000000..a92a177b5 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/have_cap_matcher_test.go @@ -0,0 +1,50 @@ +package matchers_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + . "github.com/onsi/gomega/matchers" +) + +var _ = Describe("HaveCap", func() { + Context("when passed a supported type", func() { + It("should do the right thing", func() { + Ω([0]int{}).Should(HaveCap(0)) + Ω([2]int{1}).Should(HaveCap(2)) + + Ω([]int{}).Should(HaveCap(0)) + Ω([]int{1, 2, 3, 4, 5}[:2]).Should(HaveCap(5)) + Ω(make([]int, 0, 5)).Should(HaveCap(5)) + + c := make(chan bool, 3) + Ω(c).Should(HaveCap(3)) + c <- true + c <- true + Ω(c).Should(HaveCap(3)) + + Ω(make(chan bool)).Should(HaveCap(0)) + }) + }) + + Context("when passed a correctly typed nil", func() { + It("should operate succesfully on the passed in value", func() { + var nilSlice []int + Ω(nilSlice).Should(HaveCap(0)) + + var nilChan chan int + Ω(nilChan).Should(HaveCap(0)) + }) + }) + + Context("when passed an unsupported type", func() { + It("should error", func() { + success, err := (&HaveCapMatcher{Count: 0}).Match(0) + Ω(success).Should(BeFalse()) + Ω(err).Should(HaveOccurred()) + + success, err = (&HaveCapMatcher{Count: 0}).Match(nil) + Ω(success).Should(BeFalse()) + Ω(err).Should(HaveOccurred()) + }) + }) +}) diff --git a/vendor/github.com/onsi/gomega/matchers/have_key_matcher.go b/vendor/github.com/onsi/gomega/matchers/have_key_matcher.go new file mode 100644 index 000000000..5701ba6e2 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/have_key_matcher.go @@ -0,0 +1,53 @@ +package matchers + +import ( + "fmt" + "github.com/onsi/gomega/format" + "reflect" +) + +type HaveKeyMatcher struct { + Key interface{} +} + +func (matcher *HaveKeyMatcher) Match(actual interface{}) (success bool, err error) { + if !isMap(actual) { + return false, fmt.Errorf("HaveKey matcher expects a map. Got:%s", format.Object(actual, 1)) + } + + keyMatcher, keyIsMatcher := matcher.Key.(omegaMatcher) + if !keyIsMatcher { + keyMatcher = &EqualMatcher{Expected: matcher.Key} + } + + keys := reflect.ValueOf(actual).MapKeys() + for i := 0; i < len(keys); i++ { + success, err := keyMatcher.Match(keys[i].Interface()) + if err != nil { + return false, fmt.Errorf("HaveKey's key matcher failed with:\n%s%s", format.Indent, err.Error()) + } + if success { + return true, nil + } + } + + return false, nil +} + +func (matcher *HaveKeyMatcher) FailureMessage(actual interface{}) (message string) { + switch matcher.Key.(type) { + case omegaMatcher: + return format.Message(actual, "to have key matching", matcher.Key) + default: + return format.Message(actual, "to have key", matcher.Key) + } +} + +func (matcher *HaveKeyMatcher) NegatedFailureMessage(actual interface{}) (message string) { + switch matcher.Key.(type) { + case omegaMatcher: + return format.Message(actual, "not to have key matching", matcher.Key) + default: + return format.Message(actual, "not to have key", matcher.Key) + } +} diff --git a/vendor/github.com/onsi/gomega/matchers/have_key_matcher_test.go b/vendor/github.com/onsi/gomega/matchers/have_key_matcher_test.go new file mode 100644 index 000000000..c663e302b --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/have_key_matcher_test.go @@ -0,0 +1,73 @@ +package matchers_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + . "github.com/onsi/gomega/matchers" +) + +var _ = Describe("HaveKey", func() { + var ( + stringKeys map[string]int + intKeys map[int]string + objKeys map[*myCustomType]string + + customA *myCustomType + customB *myCustomType + ) + BeforeEach(func() { + stringKeys = map[string]int{"foo": 2, "bar": 3} + intKeys = map[int]string{2: "foo", 3: "bar"} + + customA = &myCustomType{s: "a", n: 2, f: 2.3, arr: []string{"ice", "cream"}} + customB = &myCustomType{s: "b", n: 4, f: 3.1, arr: []string{"cake"}} + objKeys = map[*myCustomType]string{customA: "aardvark", customB: "kangaroo"} + }) + + Context("when passed a map", func() { + It("should do the right thing", func() { + Ω(stringKeys).Should(HaveKey("foo")) + Ω(stringKeys).ShouldNot(HaveKey("baz")) + + Ω(intKeys).Should(HaveKey(2)) + Ω(intKeys).ShouldNot(HaveKey(4)) + + Ω(objKeys).Should(HaveKey(customA)) + Ω(objKeys).Should(HaveKey(&myCustomType{s: "b", n: 4, f: 3.1, arr: []string{"cake"}})) + Ω(objKeys).ShouldNot(HaveKey(&myCustomType{s: "b", n: 4, f: 3.1, arr: []string{"apple", "pie"}})) + }) + }) + + Context("when passed a correctly typed nil", func() { + It("should operate succesfully on the passed in value", func() { + var nilMap map[int]string + Ω(nilMap).ShouldNot(HaveKey("foo")) + }) + }) + + Context("when the passed in key is actually a matcher", func() { + It("should pass each element through the matcher", func() { + Ω(stringKeys).Should(HaveKey(ContainSubstring("oo"))) + Ω(stringKeys).ShouldNot(HaveKey(ContainSubstring("foobar"))) + }) + + It("should fail if the matcher ever fails", func() { + actual := map[int]string{1: "a", 3: "b", 2: "c"} + success, err := (&HaveKeyMatcher{Key: ContainSubstring("ar")}).Match(actual) + Ω(success).Should(BeFalse()) + Ω(err).Should(HaveOccurred()) + }) + }) + + Context("when passed something that is not a map", func() { + It("should error", func() { + success, err := (&HaveKeyMatcher{Key: "foo"}).Match([]string{"foo"}) + Ω(success).Should(BeFalse()) + Ω(err).Should(HaveOccurred()) + + success, err = (&HaveKeyMatcher{Key: "foo"}).Match(nil) + Ω(success).Should(BeFalse()) + Ω(err).Should(HaveOccurred()) + }) + }) +}) diff --git a/vendor/github.com/onsi/gomega/matchers/have_key_with_value_matcher.go b/vendor/github.com/onsi/gomega/matchers/have_key_with_value_matcher.go new file mode 100644 index 000000000..464ac187e --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/have_key_with_value_matcher.go @@ -0,0 +1,73 @@ +package matchers + +import ( + "fmt" + "github.com/onsi/gomega/format" + "reflect" +) + +type HaveKeyWithValueMatcher struct { + Key interface{} + Value interface{} +} + +func (matcher *HaveKeyWithValueMatcher) Match(actual interface{}) (success bool, err error) { + if !isMap(actual) { + return false, fmt.Errorf("HaveKeyWithValue matcher expects a map. Got:%s", format.Object(actual, 1)) + } + + keyMatcher, keyIsMatcher := matcher.Key.(omegaMatcher) + if !keyIsMatcher { + keyMatcher = &EqualMatcher{Expected: matcher.Key} + } + + valueMatcher, valueIsMatcher := matcher.Value.(omegaMatcher) + if !valueIsMatcher { + valueMatcher = &EqualMatcher{Expected: matcher.Value} + } + + keys := reflect.ValueOf(actual).MapKeys() + for i := 0; i < len(keys); i++ { + success, err := keyMatcher.Match(keys[i].Interface()) + if err != nil { + return false, fmt.Errorf("HaveKeyWithValue's key matcher failed with:\n%s%s", format.Indent, err.Error()) + } + if success { + actualValue := reflect.ValueOf(actual).MapIndex(keys[i]) + success, err := valueMatcher.Match(actualValue.Interface()) + if err != nil { + return false, fmt.Errorf("HaveKeyWithValue's value matcher failed with:\n%s%s", format.Indent, err.Error()) + } + return success, nil + } + } + + return false, nil +} + +func (matcher *HaveKeyWithValueMatcher) FailureMessage(actual interface{}) (message string) { + str := "to have {key: value}" + if _, ok := matcher.Key.(omegaMatcher); ok { + str += " matching" + } else if _, ok := matcher.Value.(omegaMatcher); ok { + str += " matching" + } + + expect := make(map[interface{}]interface{}, 1) + expect[matcher.Key] = matcher.Value + return format.Message(actual, str, expect) +} + +func (matcher *HaveKeyWithValueMatcher) NegatedFailureMessage(actual interface{}) (message string) { + kStr := "not to have key" + if _, ok := matcher.Key.(omegaMatcher); ok { + kStr = "not to have key matching" + } + + vStr := "or that key's value not be" + if _, ok := matcher.Value.(omegaMatcher); ok { + vStr = "or to have that key's value not matching" + } + + return format.Message(actual, kStr, matcher.Key, vStr, matcher.Value) +} diff --git a/vendor/github.com/onsi/gomega/matchers/have_key_with_value_matcher_test.go b/vendor/github.com/onsi/gomega/matchers/have_key_with_value_matcher_test.go new file mode 100644 index 000000000..06a2242ae --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/have_key_with_value_matcher_test.go @@ -0,0 +1,82 @@ +package matchers_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + . "github.com/onsi/gomega/matchers" +) + +var _ = Describe("HaveKeyWithValue", func() { + var ( + stringKeys map[string]int + intKeys map[int]string + objKeys map[*myCustomType]*myCustomType + + customA *myCustomType + customB *myCustomType + ) + BeforeEach(func() { + stringKeys = map[string]int{"foo": 2, "bar": 3} + intKeys = map[int]string{2: "foo", 3: "bar"} + + customA = &myCustomType{s: "a", n: 2, f: 2.3, arr: []string{"ice", "cream"}} + customB = &myCustomType{s: "b", n: 4, f: 3.1, arr: []string{"cake"}} + objKeys = map[*myCustomType]*myCustomType{customA: customA, customB: customA} + }) + + Context("when passed a map", func() { + It("should do the right thing", func() { + Ω(stringKeys).Should(HaveKeyWithValue("foo", 2)) + Ω(stringKeys).ShouldNot(HaveKeyWithValue("foo", 1)) + Ω(stringKeys).ShouldNot(HaveKeyWithValue("baz", 2)) + Ω(stringKeys).ShouldNot(HaveKeyWithValue("baz", 1)) + + Ω(intKeys).Should(HaveKeyWithValue(2, "foo")) + Ω(intKeys).ShouldNot(HaveKeyWithValue(4, "foo")) + Ω(intKeys).ShouldNot(HaveKeyWithValue(2, "baz")) + + Ω(objKeys).Should(HaveKeyWithValue(customA, customA)) + Ω(objKeys).Should(HaveKeyWithValue(&myCustomType{s: "b", n: 4, f: 3.1, arr: []string{"cake"}}, &myCustomType{s: "a", n: 2, f: 2.3, arr: []string{"ice", "cream"}})) + Ω(objKeys).ShouldNot(HaveKeyWithValue(&myCustomType{s: "b", n: 4, f: 3.1, arr: []string{"apple", "pie"}}, customA)) + }) + }) + + Context("when passed a correctly typed nil", func() { + It("should operate succesfully on the passed in value", func() { + var nilMap map[int]string + Ω(nilMap).ShouldNot(HaveKeyWithValue("foo", "bar")) + }) + }) + + Context("when the passed in key or value is actually a matcher", func() { + It("should pass each element through the matcher", func() { + Ω(stringKeys).Should(HaveKeyWithValue(ContainSubstring("oo"), 2)) + Ω(intKeys).Should(HaveKeyWithValue(2, ContainSubstring("oo"))) + Ω(stringKeys).ShouldNot(HaveKeyWithValue(ContainSubstring("foobar"), 2)) + }) + + It("should fail if the matcher ever fails", func() { + actual := map[int]string{1: "a", 3: "b", 2: "c"} + success, err := (&HaveKeyWithValueMatcher{Key: ContainSubstring("ar"), Value: 2}).Match(actual) + Ω(success).Should(BeFalse()) + Ω(err).Should(HaveOccurred()) + + otherActual := map[string]int{"a": 1, "b": 2, "c": 3} + success, err = (&HaveKeyWithValueMatcher{Key: "a", Value: ContainSubstring("1")}).Match(otherActual) + Ω(success).Should(BeFalse()) + Ω(err).Should(HaveOccurred()) + }) + }) + + Context("when passed something that is not a map", func() { + It("should error", func() { + success, err := (&HaveKeyWithValueMatcher{Key: "foo", Value: "bar"}).Match([]string{"foo"}) + Ω(success).Should(BeFalse()) + Ω(err).Should(HaveOccurred()) + + success, err = (&HaveKeyWithValueMatcher{Key: "foo", Value: "bar"}).Match(nil) + Ω(success).Should(BeFalse()) + Ω(err).Should(HaveOccurred()) + }) + }) +}) diff --git a/vendor/github.com/onsi/gomega/matchers/have_len_matcher.go b/vendor/github.com/onsi/gomega/matchers/have_len_matcher.go new file mode 100644 index 000000000..a18377557 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/have_len_matcher.go @@ -0,0 +1,27 @@ +package matchers + +import ( + "fmt" + "github.com/onsi/gomega/format" +) + +type HaveLenMatcher struct { + Count int +} + +func (matcher *HaveLenMatcher) Match(actual interface{}) (success bool, err error) { + length, ok := lengthOf(actual) + if !ok { + return false, fmt.Errorf("HaveLen matcher expects a string/array/map/channel/slice. Got:\n%s", format.Object(actual, 1)) + } + + return length == matcher.Count, nil +} + +func (matcher *HaveLenMatcher) FailureMessage(actual interface{}) (message string) { + return fmt.Sprintf("Expected\n%s\nto have length %d", format.Object(actual, 1), matcher.Count) +} + +func (matcher *HaveLenMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return fmt.Sprintf("Expected\n%s\nnot to have length %d", format.Object(actual, 1), matcher.Count) +} diff --git a/vendor/github.com/onsi/gomega/matchers/have_len_matcher_test.go b/vendor/github.com/onsi/gomega/matchers/have_len_matcher_test.go new file mode 100644 index 000000000..1e6aa69d9 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/have_len_matcher_test.go @@ -0,0 +1,53 @@ +package matchers_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + . "github.com/onsi/gomega/matchers" +) + +var _ = Describe("HaveLen", func() { + Context("when passed a supported type", func() { + It("should do the right thing", func() { + Ω("").Should(HaveLen(0)) + Ω("AA").Should(HaveLen(2)) + + Ω([0]int{}).Should(HaveLen(0)) + Ω([2]int{1, 2}).Should(HaveLen(2)) + + Ω([]int{}).Should(HaveLen(0)) + Ω([]int{1, 2, 3}).Should(HaveLen(3)) + + Ω(map[string]int{}).Should(HaveLen(0)) + Ω(map[string]int{"a": 1, "b": 2, "c": 3, "d": 4}).Should(HaveLen(4)) + + c := make(chan bool, 3) + Ω(c).Should(HaveLen(0)) + c <- true + c <- true + Ω(c).Should(HaveLen(2)) + }) + }) + + Context("when passed a correctly typed nil", func() { + It("should operate succesfully on the passed in value", func() { + var nilSlice []int + Ω(nilSlice).Should(HaveLen(0)) + + var nilMap map[int]string + Ω(nilMap).Should(HaveLen(0)) + }) + }) + + Context("when passed an unsupported type", func() { + It("should error", func() { + success, err := (&HaveLenMatcher{Count: 0}).Match(0) + Ω(success).Should(BeFalse()) + Ω(err).Should(HaveOccurred()) + + success, err = (&HaveLenMatcher{Count: 0}).Match(nil) + Ω(success).Should(BeFalse()) + Ω(err).Should(HaveOccurred()) + }) + }) +}) diff --git a/vendor/github.com/onsi/gomega/matchers/have_occurred_matcher.go b/vendor/github.com/onsi/gomega/matchers/have_occurred_matcher.go new file mode 100644 index 000000000..ebdd71786 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/have_occurred_matcher.go @@ -0,0 +1,33 @@ +package matchers + +import ( + "fmt" + + "github.com/onsi/gomega/format" +) + +type HaveOccurredMatcher struct { +} + +func (matcher *HaveOccurredMatcher) Match(actual interface{}) (success bool, err error) { + // is purely nil? + if actual == nil { + return false, nil + } + + // must be an 'error' type + if !isError(actual) { + return false, fmt.Errorf("Expected an error-type. Got:\n%s", format.Object(actual, 1)) + } + + // must be non-nil (or a pointer to a non-nil) + return !isNil(actual), nil +} + +func (matcher *HaveOccurredMatcher) FailureMessage(actual interface{}) (message string) { + return fmt.Sprintf("Expected an error to have occurred. Got:\n%s", format.Object(actual, 1)) +} + +func (matcher *HaveOccurredMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return fmt.Sprintf("Expected error:\n%s\n%s\n%s", format.Object(actual, 1), format.IndentString(actual.(error).Error(), 1), "not to have occurred") +} diff --git a/vendor/github.com/onsi/gomega/matchers/have_occurred_matcher_test.go b/vendor/github.com/onsi/gomega/matchers/have_occurred_matcher_test.go new file mode 100644 index 000000000..009e23e5f --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/have_occurred_matcher_test.go @@ -0,0 +1,58 @@ +package matchers_test + +import ( + "errors" + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + . "github.com/onsi/gomega/matchers" +) + +type CustomErr struct { + msg string +} + +func (e *CustomErr) Error() string { + return e.msg +} + +var _ = Describe("HaveOccurred", func() { + It("should succeed if matching an error", func() { + Ω(errors.New("Foo")).Should(HaveOccurred()) + }) + + It("should not succeed with nil", func() { + Ω(nil).ShouldNot(HaveOccurred()) + }) + + It("should only support errors and nil", func() { + success, err := (&HaveOccurredMatcher{}).Match("foo") + Ω(success).Should(BeFalse()) + Ω(err).Should(HaveOccurred()) + + success, err = (&HaveOccurredMatcher{}).Match("") + Ω(success).Should(BeFalse()) + Ω(err).Should(HaveOccurred()) + }) + + It("doesn't support non-error type", func() { + success, err := (&HaveOccurredMatcher{}).Match(AnyType{}) + Ω(success).Should(BeFalse()) + Ω(err).Should(MatchError("Expected an error-type. Got:\n : {}")) + }) + + It("doesn't support non-error pointer type", func() { + success, err := (&HaveOccurredMatcher{}).Match(&AnyType{}) + Ω(success).Should(BeFalse()) + Ω(err).Should(MatchError(MatchRegexp(`Expected an error-type. Got:\n <*matchers_test.AnyType | 0x[[:xdigit:]]+>: {}`))) + }) + + It("should succeed with pointer types that conform to error interface", func() { + err := &CustomErr{"ohai"} + Ω(err).Should(HaveOccurred()) + }) + + It("should not succeed with nil pointers to types that conform to error interface", func() { + var err *CustomErr = nil + Ω(err).ShouldNot(HaveOccurred()) + }) +}) diff --git a/vendor/github.com/onsi/gomega/matchers/have_prefix_matcher.go b/vendor/github.com/onsi/gomega/matchers/have_prefix_matcher.go new file mode 100644 index 000000000..8b63a8999 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/have_prefix_matcher.go @@ -0,0 +1,35 @@ +package matchers + +import ( + "fmt" + "github.com/onsi/gomega/format" +) + +type HavePrefixMatcher struct { + Prefix string + Args []interface{} +} + +func (matcher *HavePrefixMatcher) Match(actual interface{}) (success bool, err error) { + actualString, ok := toString(actual) + if !ok { + return false, fmt.Errorf("HavePrefix matcher requires a string or stringer. Got:\n%s", format.Object(actual, 1)) + } + prefix := matcher.prefix() + return len(actualString) >= len(prefix) && actualString[0:len(prefix)] == prefix, nil +} + +func (matcher *HavePrefixMatcher) prefix() string { + if len(matcher.Args) > 0 { + return fmt.Sprintf(matcher.Prefix, matcher.Args...) + } + return matcher.Prefix +} + +func (matcher *HavePrefixMatcher) FailureMessage(actual interface{}) (message string) { + return format.Message(actual, "to have prefix", matcher.prefix()) +} + +func (matcher *HavePrefixMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return format.Message(actual, "not to have prefix", matcher.prefix()) +} diff --git a/vendor/github.com/onsi/gomega/matchers/have_prefix_matcher_test.go b/vendor/github.com/onsi/gomega/matchers/have_prefix_matcher_test.go new file mode 100644 index 000000000..bec3f9758 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/have_prefix_matcher_test.go @@ -0,0 +1,36 @@ +package matchers_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + . "github.com/onsi/gomega/matchers" +) + +var _ = Describe("HavePrefixMatcher", func() { + Context("when actual is a string", func() { + It("should match a string prefix", func() { + Ω("Ab").Should(HavePrefix("A")) + Ω("A").ShouldNot(HavePrefix("Ab")) + }) + }) + + Context("when the matcher is called with multiple arguments", func() { + It("should pass the string and arguments to sprintf", func() { + Ω("C3PO").Should(HavePrefix("C%dP", 3)) + }) + }) + + Context("when actual is a stringer", func() { + It("should call the stringer and match against the returned string", func() { + Ω(&myStringer{a: "Ab"}).Should(HavePrefix("A")) + }) + }) + + Context("when actual is neither a string nor a stringer", func() { + It("should error", func() { + success, err := (&HavePrefixMatcher{Prefix: "2"}).Match(2) + Ω(success).Should(BeFalse()) + Ω(err).Should(HaveOccurred()) + }) + }) +}) diff --git a/vendor/github.com/onsi/gomega/matchers/have_suffix_matcher.go b/vendor/github.com/onsi/gomega/matchers/have_suffix_matcher.go new file mode 100644 index 000000000..afc78fc90 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/have_suffix_matcher.go @@ -0,0 +1,35 @@ +package matchers + +import ( + "fmt" + "github.com/onsi/gomega/format" +) + +type HaveSuffixMatcher struct { + Suffix string + Args []interface{} +} + +func (matcher *HaveSuffixMatcher) Match(actual interface{}) (success bool, err error) { + actualString, ok := toString(actual) + if !ok { + return false, fmt.Errorf("HaveSuffix matcher requires a string or stringer. Got:\n%s", format.Object(actual, 1)) + } + suffix := matcher.suffix() + return len(actualString) >= len(suffix) && actualString[len(actualString)-len(suffix):] == suffix, nil +} + +func (matcher *HaveSuffixMatcher) suffix() string { + if len(matcher.Args) > 0 { + return fmt.Sprintf(matcher.Suffix, matcher.Args...) + } + return matcher.Suffix +} + +func (matcher *HaveSuffixMatcher) FailureMessage(actual interface{}) (message string) { + return format.Message(actual, "to have suffix", matcher.suffix()) +} + +func (matcher *HaveSuffixMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return format.Message(actual, "not to have suffix", matcher.suffix()) +} diff --git a/vendor/github.com/onsi/gomega/matchers/have_suffix_matcher_test.go b/vendor/github.com/onsi/gomega/matchers/have_suffix_matcher_test.go new file mode 100644 index 000000000..72e8975ba --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/have_suffix_matcher_test.go @@ -0,0 +1,36 @@ +package matchers_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + . "github.com/onsi/gomega/matchers" +) + +var _ = Describe("HaveSuffixMatcher", func() { + Context("when actual is a string", func() { + It("should match a string suffix", func() { + Ω("Ab").Should(HaveSuffix("b")) + Ω("A").ShouldNot(HaveSuffix("Ab")) + }) + }) + + Context("when the matcher is called with multiple arguments", func() { + It("should pass the string and arguments to sprintf", func() { + Ω("C3PO").Should(HaveSuffix("%dPO", 3)) + }) + }) + + Context("when actual is a stringer", func() { + It("should call the stringer and match against the returned string", func() { + Ω(&myStringer{a: "Ab"}).Should(HaveSuffix("b")) + }) + }) + + Context("when actual is neither a string nor a stringer", func() { + It("should error", func() { + success, err := (&HaveSuffixMatcher{Suffix: "2"}).Match(2) + Ω(success).Should(BeFalse()) + Ω(err).Should(HaveOccurred()) + }) + }) +}) diff --git a/vendor/github.com/onsi/gomega/matchers/match_error_matcher.go b/vendor/github.com/onsi/gomega/matchers/match_error_matcher.go new file mode 100644 index 000000000..03cdf0458 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/match_error_matcher.go @@ -0,0 +1,50 @@ +package matchers + +import ( + "fmt" + "github.com/onsi/gomega/format" + "reflect" +) + +type MatchErrorMatcher struct { + Expected interface{} +} + +func (matcher *MatchErrorMatcher) Match(actual interface{}) (success bool, err error) { + if isNil(actual) { + return false, fmt.Errorf("Expected an error, got nil") + } + + if !isError(actual) { + return false, fmt.Errorf("Expected an error. Got:\n%s", format.Object(actual, 1)) + } + + actualErr := actual.(error) + + if isString(matcher.Expected) { + return reflect.DeepEqual(actualErr.Error(), matcher.Expected), nil + } + + if isError(matcher.Expected) { + return reflect.DeepEqual(actualErr, matcher.Expected), nil + } + + var subMatcher omegaMatcher + var hasSubMatcher bool + if matcher.Expected != nil { + subMatcher, hasSubMatcher = (matcher.Expected).(omegaMatcher) + if hasSubMatcher { + return subMatcher.Match(actualErr.Error()) + } + } + + return false, fmt.Errorf("MatchError must be passed an error, string, or Matcher that can match on strings. Got:\n%s", format.Object(matcher.Expected, 1)) +} + +func (matcher *MatchErrorMatcher) FailureMessage(actual interface{}) (message string) { + return format.Message(actual, "to match error", matcher.Expected) +} + +func (matcher *MatchErrorMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return format.Message(actual, "not to match error", matcher.Expected) +} diff --git a/vendor/github.com/onsi/gomega/matchers/match_error_matcher_test.go b/vendor/github.com/onsi/gomega/matchers/match_error_matcher_test.go new file mode 100644 index 000000000..338b51295 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/match_error_matcher_test.go @@ -0,0 +1,93 @@ +package matchers_test + +import ( + "errors" + "fmt" + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + . "github.com/onsi/gomega/matchers" +) + +type CustomError struct { +} + +func (c CustomError) Error() string { + return "an error" +} + +var _ = Describe("MatchErrorMatcher", func() { + Context("When asserting against an error", func() { + It("should succeed when matching with an error", func() { + err := errors.New("an error") + fmtErr := fmt.Errorf("an error") + customErr := CustomError{} + + Ω(err).Should(MatchError(errors.New("an error"))) + Ω(err).ShouldNot(MatchError(errors.New("another error"))) + + Ω(fmtErr).Should(MatchError(errors.New("an error"))) + Ω(customErr).Should(MatchError(CustomError{})) + }) + + It("should succeed when matching with a string", func() { + err := errors.New("an error") + fmtErr := fmt.Errorf("an error") + customErr := CustomError{} + + Ω(err).Should(MatchError("an error")) + Ω(err).ShouldNot(MatchError("another error")) + + Ω(fmtErr).Should(MatchError("an error")) + Ω(customErr).Should(MatchError("an error")) + }) + + Context("when passed a matcher", func() { + It("should pass if the matcher passes against the error string", func() { + err := errors.New("error 123 abc") + + Ω(err).Should(MatchError(MatchRegexp(`\d{3}`))) + }) + + It("should fail if the matcher fails against the error string", func() { + err := errors.New("no digits") + Ω(err).ShouldNot(MatchError(MatchRegexp(`\d`))) + }) + }) + + It("should fail when passed anything else", func() { + actualErr := errors.New("an error") + _, err := (&MatchErrorMatcher{ + Expected: []byte("an error"), + }).Match(actualErr) + Ω(err).Should(HaveOccurred()) + + _, err = (&MatchErrorMatcher{ + Expected: 3, + }).Match(actualErr) + Ω(err).Should(HaveOccurred()) + }) + }) + + Context("when passed nil", func() { + It("should fail", func() { + _, err := (&MatchErrorMatcher{ + Expected: "an error", + }).Match(nil) + Ω(err).Should(HaveOccurred()) + }) + }) + + Context("when passed a non-error", func() { + It("should fail", func() { + _, err := (&MatchErrorMatcher{ + Expected: "an error", + }).Match("an error") + Ω(err).Should(HaveOccurred()) + + _, err = (&MatchErrorMatcher{ + Expected: "an error", + }).Match(3) + Ω(err).Should(HaveOccurred()) + }) + }) +}) diff --git a/vendor/github.com/onsi/gomega/matchers/match_json_matcher.go b/vendor/github.com/onsi/gomega/matchers/match_json_matcher.go new file mode 100644 index 000000000..499bb5830 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/match_json_matcher.go @@ -0,0 +1,135 @@ +package matchers + +import ( + "bytes" + "encoding/json" + "fmt" + "reflect" + "strings" + + "github.com/onsi/gomega/format" +) + +type MatchJSONMatcher struct { + JSONToMatch interface{} + firstFailurePath []interface{} +} + +func (matcher *MatchJSONMatcher) Match(actual interface{}) (success bool, err error) { + actualString, expectedString, err := matcher.prettyPrint(actual) + if err != nil { + return false, err + } + + var aval interface{} + var eval interface{} + + // this is guarded by prettyPrint + json.Unmarshal([]byte(actualString), &aval) + json.Unmarshal([]byte(expectedString), &eval) + var equal bool + equal, matcher.firstFailurePath = deepEqual(aval, eval) + return equal, nil +} + +func (matcher *MatchJSONMatcher) FailureMessage(actual interface{}) (message string) { + actualString, expectedString, _ := matcher.prettyPrint(actual) + return formattedMessage(format.Message(actualString, "to match JSON of", expectedString), matcher.firstFailurePath) +} + +func (matcher *MatchJSONMatcher) NegatedFailureMessage(actual interface{}) (message string) { + actualString, expectedString, _ := matcher.prettyPrint(actual) + return formattedMessage(format.Message(actualString, "not to match JSON of", expectedString), matcher.firstFailurePath) +} + +func formattedMessage(comparisonMessage string, failurePath []interface{}) string { + var diffMessage string + if len(failurePath) == 0 { + diffMessage = "" + } else { + diffMessage = fmt.Sprintf("\n\nfirst mismatched key: %s", formattedFailurePath(failurePath)) + } + return fmt.Sprintf("%s%s", comparisonMessage, diffMessage) +} + +func formattedFailurePath(failurePath []interface{}) string { + formattedPaths := []string{} + for i := len(failurePath) - 1; i >= 0; i-- { + switch p := failurePath[i].(type) { + case int: + formattedPaths = append(formattedPaths, fmt.Sprintf(`[%d]`, p)) + default: + if i != len(failurePath)-1 { + formattedPaths = append(formattedPaths, ".") + } + formattedPaths = append(formattedPaths, fmt.Sprintf(`"%s"`, p)) + } + } + return strings.Join(formattedPaths, "") +} + +func (matcher *MatchJSONMatcher) prettyPrint(actual interface{}) (actualFormatted, expectedFormatted string, err error) { + actualString, ok := toString(actual) + if !ok { + return "", "", fmt.Errorf("MatchJSONMatcher matcher requires a string, stringer, or []byte. Got actual:\n%s", format.Object(actual, 1)) + } + expectedString, ok := toString(matcher.JSONToMatch) + if !ok { + return "", "", fmt.Errorf("MatchJSONMatcher matcher requires a string, stringer, or []byte. Got expected:\n%s", format.Object(matcher.JSONToMatch, 1)) + } + + abuf := new(bytes.Buffer) + ebuf := new(bytes.Buffer) + + if err := json.Indent(abuf, []byte(actualString), "", " "); err != nil { + return "", "", fmt.Errorf("Actual '%s' should be valid JSON, but it is not.\nUnderlying error:%s", actualString, err) + } + + if err := json.Indent(ebuf, []byte(expectedString), "", " "); err != nil { + return "", "", fmt.Errorf("Expected '%s' should be valid JSON, but it is not.\nUnderlying error:%s", expectedString, err) + } + + return abuf.String(), ebuf.String(), nil +} + +func deepEqual(a interface{}, b interface{}) (bool, []interface{}) { + var errorPath []interface{} + if reflect.TypeOf(a) != reflect.TypeOf(b) { + return false, errorPath + } + + switch a.(type) { + case []interface{}: + if len(a.([]interface{})) != len(b.([]interface{})) { + return false, errorPath + } + + for i, v := range a.([]interface{}) { + elementEqual, keyPath := deepEqual(v, b.([]interface{})[i]) + if !elementEqual { + return false, append(keyPath, i) + } + } + return true, errorPath + + case map[string]interface{}: + if len(a.(map[string]interface{})) != len(b.(map[string]interface{})) { + return false, errorPath + } + + for k, v1 := range a.(map[string]interface{}) { + v2, ok := b.(map[string]interface{})[k] + if !ok { + return false, errorPath + } + elementEqual, keyPath := deepEqual(v1, v2) + if !elementEqual { + return false, append(keyPath, k) + } + } + return true, errorPath + + default: + return a == b, errorPath + } +} diff --git a/vendor/github.com/onsi/gomega/matchers/match_json_matcher_test.go b/vendor/github.com/onsi/gomega/matchers/match_json_matcher_test.go new file mode 100644 index 000000000..459448391 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/match_json_matcher_test.go @@ -0,0 +1,97 @@ +package matchers_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + . "github.com/onsi/gomega/matchers" +) + +var _ = Describe("MatchJSONMatcher", func() { + Context("When passed stringifiables", func() { + It("should succeed if the JSON matches", func() { + Ω("{}").Should(MatchJSON("{}")) + Ω(`{"a":1}`).Should(MatchJSON(`{"a":1}`)) + Ω(`{ + "a":1 + }`).Should(MatchJSON(`{"a":1}`)) + Ω(`{"a":1, "b":2}`).Should(MatchJSON(`{"b":2, "a":1}`)) + Ω(`{"a":1}`).ShouldNot(MatchJSON(`{"b":2, "a":1}`)) + + Ω(`{"a":"a", "b":"b"}`).ShouldNot(MatchJSON(`{"a":"a", "b":"b", "c":"c"}`)) + Ω(`{"a":"a", "b":"b", "c":"c"}`).ShouldNot(MatchJSON(`{"a":"a", "b":"b"}`)) + + Ω(`{"a":null, "b":null}`).ShouldNot(MatchJSON(`{"c":"c", "d":"d"}`)) + Ω(`{"a":null, "b":null, "c":null}`).ShouldNot(MatchJSON(`{"a":null, "b":null, "d":null}`)) + }) + + It("should work with byte arrays", func() { + Ω([]byte("{}")).Should(MatchJSON([]byte("{}"))) + Ω("{}").Should(MatchJSON([]byte("{}"))) + Ω([]byte("{}")).Should(MatchJSON("{}")) + }) + }) + + Context("when a key mismatch is found", func() { + It("reports the first found mismatch", func() { + subject := MatchJSONMatcher{JSONToMatch: `5`} + actual := `7` + subject.Match(actual) + + failureMessage := subject.FailureMessage(`7`) + Ω(failureMessage).ToNot(ContainSubstring("first mismatched key")) + + subject = MatchJSONMatcher{JSONToMatch: `{"a": 1, "b.g": {"c": 2, "1": ["hello", "see ya"]}}`} + actual = `{"a": 1, "b.g": {"c": 2, "1": ["hello", "goodbye"]}}` + subject.Match(actual) + + failureMessage = subject.FailureMessage(actual) + Ω(failureMessage).To(ContainSubstring(`first mismatched key: "b.g"."1"[1]`)) + }) + }) + + Context("when the expected is not valid JSON", func() { + It("should error and explain why", func() { + success, err := (&MatchJSONMatcher{JSONToMatch: `{}`}).Match(`oops`) + Ω(success).Should(BeFalse()) + Ω(err).Should(HaveOccurred()) + Ω(err.Error()).Should(ContainSubstring("Actual 'oops' should be valid JSON")) + }) + }) + + Context("when the actual is not valid JSON", func() { + It("should error and explain why", func() { + success, err := (&MatchJSONMatcher{JSONToMatch: `oops`}).Match(`{}`) + Ω(success).Should(BeFalse()) + Ω(err).Should(HaveOccurred()) + Ω(err.Error()).Should(ContainSubstring("Expected 'oops' should be valid JSON")) + }) + }) + + Context("when the expected is neither a string nor a stringer nor a byte array", func() { + It("should error", func() { + success, err := (&MatchJSONMatcher{JSONToMatch: 2}).Match("{}") + Ω(success).Should(BeFalse()) + Ω(err).Should(HaveOccurred()) + Ω(err.Error()).Should(ContainSubstring("MatchJSONMatcher matcher requires a string, stringer, or []byte. Got expected:\n : 2")) + + success, err = (&MatchJSONMatcher{JSONToMatch: nil}).Match("{}") + Ω(success).Should(BeFalse()) + Ω(err).Should(HaveOccurred()) + Ω(err.Error()).Should(ContainSubstring("MatchJSONMatcher matcher requires a string, stringer, or []byte. Got expected:\n : nil")) + }) + }) + + Context("when the actual is neither a string nor a stringer nor a byte array", func() { + It("should error", func() { + success, err := (&MatchJSONMatcher{JSONToMatch: "{}"}).Match(2) + Ω(success).Should(BeFalse()) + Ω(err).Should(HaveOccurred()) + Ω(err.Error()).Should(ContainSubstring("MatchJSONMatcher matcher requires a string, stringer, or []byte. Got actual:\n : 2")) + + success, err = (&MatchJSONMatcher{JSONToMatch: "{}"}).Match(nil) + Ω(success).Should(BeFalse()) + Ω(err).Should(HaveOccurred()) + Ω(err.Error()).Should(ContainSubstring("MatchJSONMatcher matcher requires a string, stringer, or []byte. Got actual:\n : nil")) + }) + }) +}) diff --git a/vendor/github.com/onsi/gomega/matchers/match_regexp_matcher.go b/vendor/github.com/onsi/gomega/matchers/match_regexp_matcher.go new file mode 100644 index 000000000..7ca79a15b --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/match_regexp_matcher.go @@ -0,0 +1,42 @@ +package matchers + +import ( + "fmt" + "github.com/onsi/gomega/format" + "regexp" +) + +type MatchRegexpMatcher struct { + Regexp string + Args []interface{} +} + +func (matcher *MatchRegexpMatcher) Match(actual interface{}) (success bool, err error) { + actualString, ok := toString(actual) + if !ok { + return false, fmt.Errorf("RegExp matcher requires a string or stringer.\nGot:%s", format.Object(actual, 1)) + } + + match, err := regexp.Match(matcher.regexp(), []byte(actualString)) + if err != nil { + return false, fmt.Errorf("RegExp match failed to compile with error:\n\t%s", err.Error()) + } + + return match, nil +} + +func (matcher *MatchRegexpMatcher) FailureMessage(actual interface{}) (message string) { + return format.Message(actual, "to match regular expression", matcher.regexp()) +} + +func (matcher *MatchRegexpMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return format.Message(actual, "not to match regular expression", matcher.regexp()) +} + +func (matcher *MatchRegexpMatcher) regexp() string { + re := matcher.Regexp + if len(matcher.Args) > 0 { + re = fmt.Sprintf(matcher.Regexp, matcher.Args...) + } + return re +} diff --git a/vendor/github.com/onsi/gomega/matchers/match_regexp_matcher_test.go b/vendor/github.com/onsi/gomega/matchers/match_regexp_matcher_test.go new file mode 100644 index 000000000..bb521cce3 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/match_regexp_matcher_test.go @@ -0,0 +1,44 @@ +package matchers_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + . "github.com/onsi/gomega/matchers" +) + +var _ = Describe("MatchRegexp", func() { + Context("when actual is a string", func() { + It("should match against the string", func() { + Ω(" a2!bla").Should(MatchRegexp(`\d!`)) + Ω(" a2!bla").ShouldNot(MatchRegexp(`[A-Z]`)) + }) + }) + + Context("when actual is a stringer", func() { + It("should call the stringer and match agains the returned string", func() { + Ω(&myStringer{a: "Abc3"}).Should(MatchRegexp(`[A-Z][a-z]+\d`)) + }) + }) + + Context("when the matcher is called with multiple arguments", func() { + It("should pass the string and arguments to sprintf", func() { + Ω(" a23!bla").Should(MatchRegexp(`\d%d!`, 3)) + }) + }) + + Context("when actual is neither a string nor a stringer", func() { + It("should error", func() { + success, err := (&MatchRegexpMatcher{Regexp: `\d`}).Match(2) + Ω(success).Should(BeFalse()) + Ω(err).Should(HaveOccurred()) + }) + }) + + Context("when the passed in regexp fails to compile", func() { + It("should error", func() { + success, err := (&MatchRegexpMatcher{Regexp: "("}).Match("Foo") + Ω(success).Should(BeFalse()) + Ω(err).Should(HaveOccurred()) + }) + }) +}) diff --git a/vendor/github.com/onsi/gomega/matchers/match_xml_matcher.go b/vendor/github.com/onsi/gomega/matchers/match_xml_matcher.go new file mode 100644 index 000000000..da2656290 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/match_xml_matcher.go @@ -0,0 +1,131 @@ +package matchers + +import ( + "bytes" + "encoding/xml" + "errors" + "fmt" + "io" + "reflect" + "strings" + + "github.com/onsi/gomega/format" + "golang.org/x/net/html/charset" +) + +type MatchXMLMatcher struct { + XMLToMatch interface{} +} + +func (matcher *MatchXMLMatcher) Match(actual interface{}) (success bool, err error) { + actualString, expectedString, err := matcher.formattedPrint(actual) + if err != nil { + return false, err + } + + aval, err := parseXmlContent(actualString) + if err != nil { + return false, fmt.Errorf("Actual '%s' should be valid XML, but it is not.\nUnderlying error:%s", actualString, err) + } + + eval, err := parseXmlContent(expectedString) + if err != nil { + return false, fmt.Errorf("Expected '%s' should be valid XML, but it is not.\nUnderlying error:%s", expectedString, err) + } + + return reflect.DeepEqual(aval, eval), nil +} + +func (matcher *MatchXMLMatcher) FailureMessage(actual interface{}) (message string) { + actualString, expectedString, _ := matcher.formattedPrint(actual) + return fmt.Sprintf("Expected\n%s\nto match XML of\n%s", actualString, expectedString) +} + +func (matcher *MatchXMLMatcher) NegatedFailureMessage(actual interface{}) (message string) { + actualString, expectedString, _ := matcher.formattedPrint(actual) + return fmt.Sprintf("Expected\n%s\nnot to match XML of\n%s", actualString, expectedString) +} + +func (matcher *MatchXMLMatcher) formattedPrint(actual interface{}) (actualString, expectedString string, err error) { + var ok bool + actualString, ok = toString(actual) + if !ok { + return "", "", fmt.Errorf("MatchXMLMatcher matcher requires a string, stringer, or []byte. Got actual:\n%s", format.Object(actual, 1)) + } + expectedString, ok = toString(matcher.XMLToMatch) + if !ok { + return "", "", fmt.Errorf("MatchXMLMatcher matcher requires a string, stringer, or []byte. Got expected:\n%s", format.Object(matcher.XMLToMatch, 1)) + } + return actualString, expectedString, nil +} + +func parseXmlContent(content string) (*xmlNode, error) { + allNodes := []*xmlNode{} + + dec := newXmlDecoder(strings.NewReader(content)) + for { + tok, err := dec.Token() + if err != nil { + if err == io.EOF { + break + } + return nil, fmt.Errorf("failed to decode next token: %v", err) + } + + lastNodeIndex := len(allNodes) - 1 + var lastNode *xmlNode + if len(allNodes) > 0 { + lastNode = allNodes[lastNodeIndex] + } else { + lastNode = &xmlNode{} + } + + switch tok := tok.(type) { + case xml.StartElement: + allNodes = append(allNodes, &xmlNode{XMLName: tok.Name, XMLAttr: tok.Attr}) + case xml.EndElement: + if len(allNodes) > 1 { + allNodes[lastNodeIndex-1].Nodes = append(allNodes[lastNodeIndex-1].Nodes, lastNode) + allNodes = allNodes[:lastNodeIndex] + } + case xml.CharData: + lastNode.Content = append(lastNode.Content, tok.Copy()...) + case xml.Comment: + lastNode.Comments = append(lastNode.Comments, tok.Copy()) + case xml.ProcInst: + lastNode.ProcInsts = append(lastNode.ProcInsts, tok.Copy()) + } + } + + if len(allNodes) == 0 { + return nil, errors.New("found no nodes") + } + firstNode := allNodes[0] + trimParentNodesContentSpaces(firstNode) + + return firstNode, nil +} + +func newXmlDecoder(reader io.Reader) *xml.Decoder { + dec := xml.NewDecoder(reader) + dec.CharsetReader = charset.NewReaderLabel + return dec +} + +func trimParentNodesContentSpaces(node *xmlNode) { + if len(node.Nodes) > 0 { + node.Content = bytes.TrimSpace(node.Content) + for _, childNode := range node.Nodes { + trimParentNodesContentSpaces(childNode) + } + } +} + +type xmlNode struct { + XMLName xml.Name + Comments []xml.Comment + ProcInsts []xml.ProcInst + XMLAttr []xml.Attr + Content []byte + Nodes []*xmlNode +} diff --git a/vendor/github.com/onsi/gomega/matchers/match_xml_matcher_test.go b/vendor/github.com/onsi/gomega/matchers/match_xml_matcher_test.go new file mode 100644 index 000000000..16c192240 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/match_xml_matcher_test.go @@ -0,0 +1,90 @@ +package matchers_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + . "github.com/onsi/gomega/matchers" +) + +var _ = Describe("MatchXMLMatcher", func() { + + var ( + sample_01 = readFileContents("test_data/xml/sample_01.xml") + sample_02 = readFileContents("test_data/xml/sample_02.xml") + sample_03 = readFileContents("test_data/xml/sample_03.xml") + sample_04 = readFileContents("test_data/xml/sample_04.xml") + sample_05 = readFileContents("test_data/xml/sample_05.xml") + sample_06 = readFileContents("test_data/xml/sample_06.xml") + sample_07 = readFileContents("test_data/xml/sample_07.xml") + sample_08 = readFileContents("test_data/xml/sample_08.xml") + sample_09 = readFileContents("test_data/xml/sample_09.xml") + sample_10 = readFileContents("test_data/xml/sample_10.xml") + sample_11 = readFileContents("test_data/xml/sample_11.xml") + ) + + Context("When passed stringifiables", func() { + It("should succeed if the XML matches", func() { + Ω(sample_01).Should(MatchXML(sample_01)) // same XML + Ω(sample_01).Should(MatchXML(sample_02)) // same XML with blank lines + Ω(sample_01).Should(MatchXML(sample_03)) // same XML with different formatting + Ω(sample_01).ShouldNot(MatchXML(sample_04)) // same structures with different values + Ω(sample_01).ShouldNot(MatchXML(sample_05)) // different structures + Ω(sample_06).ShouldNot(MatchXML(sample_07)) // same xml names with different namespaces + Ω(sample_07).ShouldNot(MatchXML(sample_08)) // same structures with different values + Ω(sample_09).ShouldNot(MatchXML(sample_10)) // same structures with different attribute values + Ω(sample_11).Should(MatchXML(sample_11)) // with non UTF-8 encoding + }) + + It("should work with byte arrays", func() { + Ω([]byte(sample_01)).Should(MatchXML([]byte(sample_01))) + Ω([]byte(sample_01)).Should(MatchXML(sample_01)) + Ω(sample_01).Should(MatchXML([]byte(sample_01))) + }) + }) + + Context("when the expected is not valid XML", func() { + It("should error and explain why", func() { + success, err := (&MatchXMLMatcher{XMLToMatch: sample_01}).Match(`oops`) + Ω(success).Should(BeFalse()) + Ω(err).Should(HaveOccurred()) + Ω(err.Error()).Should(ContainSubstring("Actual 'oops' should be valid XML")) + }) + }) + + Context("when the actual is not valid XML", func() { + It("should error and explain why", func() { + success, err := (&MatchXMLMatcher{XMLToMatch: `oops`}).Match(sample_01) + Ω(success).Should(BeFalse()) + Ω(err).Should(HaveOccurred()) + Ω(err.Error()).Should(ContainSubstring("Expected 'oops' should be valid XML")) + }) + }) + + Context("when the expected is neither a string nor a stringer nor a byte array", func() { + It("should error", func() { + success, err := (&MatchXMLMatcher{XMLToMatch: 2}).Match(sample_01) + Ω(success).Should(BeFalse()) + Ω(err).Should(HaveOccurred()) + Ω(err.Error()).Should(ContainSubstring("MatchXMLMatcher matcher requires a string, stringer, or []byte. Got expected:\n : 2")) + + success, err = (&MatchXMLMatcher{XMLToMatch: nil}).Match(sample_01) + Ω(success).Should(BeFalse()) + Ω(err).Should(HaveOccurred()) + Ω(err.Error()).Should(ContainSubstring("MatchXMLMatcher matcher requires a string, stringer, or []byte. Got expected:\n : nil")) + }) + }) + + Context("when the actual is neither a string nor a stringer nor a byte array", func() { + It("should error", func() { + success, err := (&MatchXMLMatcher{XMLToMatch: sample_01}).Match(2) + Ω(success).Should(BeFalse()) + Ω(err).Should(HaveOccurred()) + Ω(err.Error()).Should(ContainSubstring("MatchXMLMatcher matcher requires a string, stringer, or []byte. Got actual:\n : 2")) + + success, err = (&MatchXMLMatcher{XMLToMatch: sample_01}).Match(nil) + Ω(success).Should(BeFalse()) + Ω(err).Should(HaveOccurred()) + Ω(err.Error()).Should(ContainSubstring("MatchXMLMatcher matcher requires a string, stringer, or []byte. Got actual:\n : nil")) + }) + }) +}) diff --git a/vendor/github.com/onsi/gomega/matchers/match_yaml_matcher.go b/vendor/github.com/onsi/gomega/matchers/match_yaml_matcher.go new file mode 100644 index 000000000..69fb51a85 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/match_yaml_matcher.go @@ -0,0 +1,74 @@ +package matchers + +import ( + "fmt" + "reflect" + "strings" + + "github.com/onsi/gomega/format" + "gopkg.in/yaml.v2" +) + +type MatchYAMLMatcher struct { + YAMLToMatch interface{} +} + +func (matcher *MatchYAMLMatcher) Match(actual interface{}) (success bool, err error) { + actualString, expectedString, err := matcher.toStrings(actual) + if err != nil { + return false, err + } + + var aval interface{} + var eval interface{} + + if err := yaml.Unmarshal([]byte(actualString), &aval); err != nil { + return false, fmt.Errorf("Actual '%s' should be valid YAML, but it is not.\nUnderlying error:%s", actualString, err) + } + if err := yaml.Unmarshal([]byte(expectedString), &eval); err != nil { + return false, fmt.Errorf("Expected '%s' should be valid YAML, but it is not.\nUnderlying error:%s", expectedString, err) + } + + return reflect.DeepEqual(aval, eval), nil +} + +func (matcher *MatchYAMLMatcher) FailureMessage(actual interface{}) (message string) { + actualString, expectedString, _ := matcher.toNormalisedStrings(actual) + return format.Message(actualString, "to match YAML of", expectedString) +} + +func (matcher *MatchYAMLMatcher) NegatedFailureMessage(actual interface{}) (message string) { + actualString, expectedString, _ := matcher.toNormalisedStrings(actual) + return format.Message(actualString, "not to match YAML of", expectedString) +} + +func (matcher *MatchYAMLMatcher) toNormalisedStrings(actual interface{}) (actualFormatted, expectedFormatted string, err error) { + actualString, expectedString, err := matcher.toStrings(actual) + return normalise(actualString), normalise(expectedString), err +} + +func normalise(input string) string { + var val interface{} + err := yaml.Unmarshal([]byte(input), &val) + if err != nil { + panic(err) // guarded by Match + } + output, err := yaml.Marshal(val) + if err != nil { + panic(err) // guarded by Unmarshal + } + return strings.TrimSpace(string(output)) +} + +func (matcher *MatchYAMLMatcher) toStrings(actual interface{}) (actualFormatted, expectedFormatted string, err error) { + actualString, ok := toString(actual) + if !ok { + return "", "", fmt.Errorf("MatchYAMLMatcher matcher requires a string, stringer, or []byte. Got actual:\n%s", format.Object(actual, 1)) + } + expectedString, ok := toString(matcher.YAMLToMatch) + if !ok { + return "", "", fmt.Errorf("MatchYAMLMatcher matcher requires a string, stringer, or []byte. Got expected:\n%s", format.Object(matcher.YAMLToMatch, 1)) + } + + return actualString, expectedString, nil +} diff --git a/vendor/github.com/onsi/gomega/matchers/match_yaml_matcher_test.go b/vendor/github.com/onsi/gomega/matchers/match_yaml_matcher_test.go new file mode 100644 index 000000000..8e63de19e --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/match_yaml_matcher_test.go @@ -0,0 +1,94 @@ +package matchers_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + . "github.com/onsi/gomega/matchers" +) + +var _ = Describe("MatchYAMLMatcher", func() { + Context("When passed stringifiables", func() { + It("should succeed if the YAML matches", func() { + Expect("---").Should(MatchYAML("")) + Expect("a: 1").Should(MatchYAML(`{"a":1}`)) + Expect("a: 1\nb: 2").Should(MatchYAML(`{"b":2, "a":1}`)) + }) + + It("should explain if the YAML does not match when it should", func() { + message := (&MatchYAMLMatcher{YAMLToMatch: "a: 1"}).FailureMessage("b: 2") + Expect(message).To(MatchRegexp(`Expected\s+: b: 2\s+to match YAML of\s+: a: 1`)) + }) + + It("should normalise the expected and actual when explaining if the YAML does not match when it should", func() { + message := (&MatchYAMLMatcher{YAMLToMatch: "a: 'one'"}).FailureMessage("{b: two}") + Expect(message).To(MatchRegexp(`Expected\s+: b: two\s+to match YAML of\s+: a: one`)) + }) + + It("should explain if the YAML matches when it should not", func() { + message := (&MatchYAMLMatcher{YAMLToMatch: "a: 1"}).NegatedFailureMessage("a: 1") + Expect(message).To(MatchRegexp(`Expected\s+: a: 1\s+not to match YAML of\s+: a: 1`)) + }) + + It("should normalise the expected and actual when explaining if the YAML matches when it should not", func() { + message := (&MatchYAMLMatcher{YAMLToMatch: "a: 'one'"}).NegatedFailureMessage("{a: one}") + Expect(message).To(MatchRegexp(`Expected\s+: a: one\s+not to match YAML of\s+: a: one`)) + }) + + It("should fail if the YAML does not match", func() { + Expect("a: 1").ShouldNot(MatchYAML(`{"b":2, "a":1}`)) + }) + + It("should work with byte arrays", func() { + Expect([]byte("a: 1")).Should(MatchYAML([]byte("a: 1"))) + Expect("a: 1").Should(MatchYAML([]byte("a: 1"))) + Expect([]byte("a: 1")).Should(MatchYAML("a: 1")) + }) + }) + + Context("when the expected is not valid YAML", func() { + It("should error and explain why", func() { + success, err := (&MatchYAMLMatcher{YAMLToMatch: ""}).Match("good:\nbad") + Expect(success).Should(BeFalse()) + Expect(err).Should(HaveOccurred()) + Expect(err.Error()).Should(ContainSubstring("Actual 'good:\nbad' should be valid YAML")) + }) + }) + + Context("when the actual is not valid YAML", func() { + It("should error and explain why", func() { + success, err := (&MatchYAMLMatcher{YAMLToMatch: "good:\nbad"}).Match("") + Expect(success).Should(BeFalse()) + Expect(err).Should(HaveOccurred()) + Expect(err.Error()).Should(ContainSubstring("Expected 'good:\nbad' should be valid YAML")) + }) + }) + + Context("when the expected is neither a string nor a stringer nor a byte array", func() { + It("should error", func() { + success, err := (&MatchYAMLMatcher{YAMLToMatch: 2}).Match("") + Expect(success).Should(BeFalse()) + Expect(err).Should(HaveOccurred()) + Expect(err.Error()).Should(ContainSubstring("MatchYAMLMatcher matcher requires a string, stringer, or []byte. Got expected:\n : 2")) + + success, err = (&MatchYAMLMatcher{YAMLToMatch: nil}).Match("") + Expect(success).Should(BeFalse()) + Expect(err).Should(HaveOccurred()) + Expect(err.Error()).Should(ContainSubstring("MatchYAMLMatcher matcher requires a string, stringer, or []byte. Got expected:\n : nil")) + }) + }) + + Context("when the actual is neither a string nor a stringer nor a byte array", func() { + It("should error", func() { + success, err := (&MatchYAMLMatcher{YAMLToMatch: ""}).Match(2) + Expect(success).Should(BeFalse()) + Expect(err).Should(HaveOccurred()) + Expect(err.Error()).Should(ContainSubstring("MatchYAMLMatcher matcher requires a string, stringer, or []byte. Got actual:\n : 2")) + + success, err = (&MatchYAMLMatcher{YAMLToMatch: ""}).Match(nil) + Expect(success).Should(BeFalse()) + Expect(err).Should(HaveOccurred()) + Expect(err.Error()).Should(ContainSubstring("MatchYAMLMatcher matcher requires a string, stringer, or []byte. Got actual:\n : nil")) + }) + }) +}) diff --git a/vendor/github.com/onsi/gomega/matchers/matcher_tests_suite_test.go b/vendor/github.com/onsi/gomega/matchers/matcher_tests_suite_test.go new file mode 100644 index 000000000..b5f76c995 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/matcher_tests_suite_test.go @@ -0,0 +1,50 @@ +package matchers_test + +import ( + "fmt" + "io/ioutil" + "os" + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +type myStringer struct { + a string +} + +func (s *myStringer) String() string { + return s.a +} + +type StringAlias string + +type myCustomType struct { + s string + n int + f float32 + arr []string +} + +func Test(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Gomega Matchers") +} + +func readFileContents(filePath string) []byte { + f := openFile(filePath) + b, err := ioutil.ReadAll(f) + if err != nil { + panic(fmt.Errorf("failed to read file contents: %v", err)) + } + return b +} + +func openFile(filePath string) *os.File { + f, err := os.Open(filePath) + if err != nil { + panic(fmt.Errorf("failed to open file: %v", err)) + } + return f +} diff --git a/vendor/github.com/onsi/gomega/matchers/not.go b/vendor/github.com/onsi/gomega/matchers/not.go new file mode 100644 index 000000000..2c91670bd --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/not.go @@ -0,0 +1,30 @@ +package matchers + +import ( + "github.com/onsi/gomega/internal/oraclematcher" + "github.com/onsi/gomega/types" +) + +type NotMatcher struct { + Matcher types.GomegaMatcher +} + +func (m *NotMatcher) Match(actual interface{}) (bool, error) { + success, err := m.Matcher.Match(actual) + if err != nil { + return false, err + } + return !success, nil +} + +func (m *NotMatcher) FailureMessage(actual interface{}) (message string) { + return m.Matcher.NegatedFailureMessage(actual) // works beautifully +} + +func (m *NotMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return m.Matcher.FailureMessage(actual) // works beautifully +} + +func (m *NotMatcher) MatchMayChangeInTheFuture(actual interface{}) bool { + return oraclematcher.MatchMayChangeInTheFuture(m.Matcher, actual) // just return m.Matcher's value +} diff --git a/vendor/github.com/onsi/gomega/matchers/not_test.go b/vendor/github.com/onsi/gomega/matchers/not_test.go new file mode 100644 index 000000000..b3c1fdbf0 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/not_test.go @@ -0,0 +1,57 @@ +package matchers_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + . "github.com/onsi/gomega/matchers" +) + +var _ = Describe("NotMatcher", func() { + Context("basic examples", func() { + It("works", func() { + Expect(input).To(Not(false1)) + Expect(input).To(Not(Not(true2))) + Expect(input).ToNot(Not(true3)) + Expect(input).ToNot(Not(Not(false1))) + Expect(input).To(Not(Not(Not(false2)))) + }) + }) + + Context("De Morgan's laws", func() { + It("~(A && B) == ~A || ~B", func() { + Expect(input).To(Not(And(false1, false2))) + Expect(input).To(Or(Not(false1), Not(false2))) + }) + It("~(A || B) == ~A && ~B", func() { + Expect(input).To(Not(Or(false1, false2))) + Expect(input).To(And(Not(false1), Not(false2))) + }) + }) + + Context("failure messages are opposite of original matchers' failure messages", func() { + Context("when match fails", func() { + It("gives a descriptive message", func() { + verifyFailureMessage(Not(HaveLen(2)), input, "not to have length 2") + }) + }) + + Context("when match succeeds, but expected it to fail", func() { + It("gives a descriptive message", func() { + verifyFailureMessage(Not(Not(HaveLen(3))), input, "to have length 3") + }) + }) + }) + + Context("MatchMayChangeInTheFuture()", func() { + It("Propagates value from wrapped matcher", func() { + m := Not(Or()) // an empty Or() always returns false, and indicates it cannot change + Expect(m.Match("anything")).To(BeTrue()) + Expect(m.(*NotMatcher).MatchMayChangeInTheFuture("anything")).To(BeFalse()) + }) + It("Defaults to true", func() { + m := Not(Equal(1)) // Equal does not have this method + Expect(m.Match(2)).To(BeTrue()) + Expect(m.(*NotMatcher).MatchMayChangeInTheFuture(2)).To(BeTrue()) // defaults to true + }) + }) +}) diff --git a/vendor/github.com/onsi/gomega/matchers/or.go b/vendor/github.com/onsi/gomega/matchers/or.go new file mode 100644 index 000000000..3bf799800 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/or.go @@ -0,0 +1,67 @@ +package matchers + +import ( + "fmt" + + "github.com/onsi/gomega/format" + "github.com/onsi/gomega/internal/oraclematcher" + "github.com/onsi/gomega/types" +) + +type OrMatcher struct { + Matchers []types.GomegaMatcher + + // state + firstSuccessfulMatcher types.GomegaMatcher +} + +func (m *OrMatcher) Match(actual interface{}) (success bool, err error) { + m.firstSuccessfulMatcher = nil + for _, matcher := range m.Matchers { + success, err := matcher.Match(actual) + if err != nil { + return false, err + } + if success { + m.firstSuccessfulMatcher = matcher + return true, nil + } + } + return false, nil +} + +func (m *OrMatcher) FailureMessage(actual interface{}) (message string) { + // not the most beautiful list of matchers, but not bad either... + return format.Message(actual, fmt.Sprintf("To satisfy at least one of these matchers: %s", m.Matchers)) +} + +func (m *OrMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return m.firstSuccessfulMatcher.NegatedFailureMessage(actual) +} + +func (m *OrMatcher) MatchMayChangeInTheFuture(actual interface{}) bool { + /* + Example with 3 matchers: A, B, C + + Match evaluates them: F, T, => T + So match is currently T, what should MatchMayChangeInTheFuture() return? + Seems like it only depends on B, since currently B MUST change to allow the result to become F + + Match eval: F, F, F => F + So match is currently F, what should MatchMayChangeInTheFuture() return? + Seems to depend on ANY of them being able to change to T. + */ + + if m.firstSuccessfulMatcher != nil { + // one of the matchers succeeded.. it must be able to change in order to affect the result + return oraclematcher.MatchMayChangeInTheFuture(m.firstSuccessfulMatcher, actual) + } else { + // so all matchers failed.. Any one of them changing would change the result. + for _, matcher := range m.Matchers { + if oraclematcher.MatchMayChangeInTheFuture(matcher, actual) { + return true + } + } + return false // none of were going to change + } +} diff --git a/vendor/github.com/onsi/gomega/matchers/or_test.go b/vendor/github.com/onsi/gomega/matchers/or_test.go new file mode 100644 index 000000000..9589a174d --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/or_test.go @@ -0,0 +1,85 @@ +package matchers_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + . "github.com/onsi/gomega/matchers" +) + +var _ = Describe("OrMatcher", func() { + It("works with positive cases", func() { + Expect(input).To(Or(true1)) + Expect(input).To(Or(true1, true2)) + Expect(input).To(Or(true1, false1)) + Expect(input).To(Or(false1, true2)) + Expect(input).To(Or(true1, true2, true3)) + Expect(input).To(Or(true1, true2, false3)) + Expect(input).To(Or(true1, false2, true3)) + Expect(input).To(Or(false1, true2, true3)) + Expect(input).To(Or(true1, false2, false3)) + Expect(input).To(Or(false1, false2, true3)) + + // use alias + Expect(input).To(SatisfyAny(false1, false2, true3)) + }) + + It("works with negative cases", func() { + Expect(input).ToNot(Or()) + Expect(input).ToNot(Or(false1)) + Expect(input).ToNot(Or(false1, false2)) + Expect(input).ToNot(Or(false1, false2, false3)) + }) + + Context("failure messages", func() { + Context("when match fails", func() { + It("gives a descriptive message", func() { + verifyFailureMessage(Or(false1, false2), input, + "To satisfy at least one of these matchers: [%!s(*matchers.HaveLenMatcher=&{1}) %!s(*matchers.EqualMatcher=&{hip})]") + }) + }) + + Context("when match succeeds, but expected it to fail", func() { + It("gives a descriptive message", func() { + verifyFailureMessage(Not(Or(true1, true2)), input, `not to have length 2`) + }) + }) + }) + + Context("MatchMayChangeInTheFuture", func() { + Context("Match returned false", func() { + It("returns true if any of the matchers could change", func() { + // 3 matchers, all return false, and all could change + m := Or(BeNil(), Equal("hip"), HaveLen(1)) + Expect(m.Match("hi")).To(BeFalse()) + Expect(m.(*OrMatcher).MatchMayChangeInTheFuture("hi")).To(BeTrue()) // all 3 of these matchers default to 'true' + }) + It("returns false if none of the matchers could change", func() { + // empty Or() has the property of never matching, and never can change since there are no sub-matchers that could change + m := Or() + Expect(m.Match("anything")).To(BeFalse()) + Expect(m.(*OrMatcher).MatchMayChangeInTheFuture("anything")).To(BeFalse()) + + // Or() with 3 sub-matchers that return false, and can't change + m = Or(Or(), Or(), Or()) + Expect(m.Match("hi")).To(BeFalse()) + Expect(m.(*OrMatcher).MatchMayChangeInTheFuture("hi")).To(BeFalse()) // the 3 empty Or()'s won't change + }) + }) + Context("Match returned true", func() { + Context("returns value of the successful matcher", func() { + It("false if successful matcher not going to change", func() { + // 3 matchers: 1st returns false, 2nd returns true and is not going to change, 3rd is never called + m := Or(BeNil(), And(), Equal(1)) + Expect(m.Match("hi")).To(BeTrue()) + Expect(m.(*OrMatcher).MatchMayChangeInTheFuture("hi")).To(BeFalse()) + }) + It("true if successful matcher indicates it might change", func() { + // 3 matchers: 1st returns false, 2nd returns true and "might" change, 3rd is never called + m := Or(Not(BeNil()), Equal("hi"), Equal(1)) + Expect(m.Match("hi")).To(BeTrue()) + Expect(m.(*OrMatcher).MatchMayChangeInTheFuture("hi")).To(BeTrue()) // Equal("hi") indicates it might change + }) + }) + }) + }) +}) diff --git a/vendor/github.com/onsi/gomega/matchers/panic_matcher.go b/vendor/github.com/onsi/gomega/matchers/panic_matcher.go new file mode 100644 index 000000000..640f4db1a --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/panic_matcher.go @@ -0,0 +1,46 @@ +package matchers + +import ( + "fmt" + "reflect" + + "github.com/onsi/gomega/format" +) + +type PanicMatcher struct { + object interface{} +} + +func (matcher *PanicMatcher) Match(actual interface{}) (success bool, err error) { + if actual == nil { + return false, fmt.Errorf("PanicMatcher expects a non-nil actual.") + } + + actualType := reflect.TypeOf(actual) + if actualType.Kind() != reflect.Func { + return false, fmt.Errorf("PanicMatcher expects a function. Got:\n%s", format.Object(actual, 1)) + } + if !(actualType.NumIn() == 0 && actualType.NumOut() == 0) { + return false, fmt.Errorf("PanicMatcher expects a function with no arguments and no return value. Got:\n%s", format.Object(actual, 1)) + } + + success = false + defer func() { + if e := recover(); e != nil { + matcher.object = e + success = true + } + }() + + reflect.ValueOf(actual).Call([]reflect.Value{}) + + return +} + +func (matcher *PanicMatcher) FailureMessage(actual interface{}) (message string) { + return format.Message(actual, "to panic") +} + +func (matcher *PanicMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return format.Message(actual, fmt.Sprintf("not to panic, but panicked with\n%s", format.Object(matcher.object, 1))) +} diff --git a/vendor/github.com/onsi/gomega/matchers/panic_matcher_test.go b/vendor/github.com/onsi/gomega/matchers/panic_matcher_test.go new file mode 100644 index 000000000..6b859a7e8 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/panic_matcher_test.go @@ -0,0 +1,45 @@ +package matchers_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + . "github.com/onsi/gomega/matchers" +) + +var _ = Describe("Panic", func() { + Context("when passed something that's not a function that takes zero arguments and returns nothing", func() { + It("should error", func() { + success, err := (&PanicMatcher{}).Match("foo") + Ω(success).Should(BeFalse()) + Ω(err).Should(HaveOccurred()) + + success, err = (&PanicMatcher{}).Match(nil) + Ω(success).Should(BeFalse()) + Ω(err).Should(HaveOccurred()) + + success, err = (&PanicMatcher{}).Match(func(foo string) {}) + Ω(success).Should(BeFalse()) + Ω(err).Should(HaveOccurred()) + + success, err = (&PanicMatcher{}).Match(func() string { return "bar" }) + Ω(success).Should(BeFalse()) + Ω(err).Should(HaveOccurred()) + }) + }) + + Context("when passed a function of the correct type", func() { + It("should call the function and pass if the function panics", func() { + Ω(func() { panic("ack!") }).Should(Panic()) + Ω(func() {}).ShouldNot(Panic()) + }) + }) + + Context("when assertion fails", func() { + It("should print the object passed to Panic", func() { + failuresMessages := InterceptGomegaFailures(func() { + Ω(func() { panic("ack!") }).ShouldNot(Panic()) + }) + Ω(failuresMessages).Should(ConsistOf(MatchRegexp("not to panic, but panicked with\\s*: ack!"))) + }) + }) +}) diff --git a/vendor/github.com/onsi/gomega/matchers/receive_matcher.go b/vendor/github.com/onsi/gomega/matchers/receive_matcher.go new file mode 100644 index 000000000..74e9e7ebe --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/receive_matcher.go @@ -0,0 +1,122 @@ +package matchers + +import ( + "fmt" + "reflect" + + "github.com/onsi/gomega/format" +) + +type ReceiveMatcher struct { + Arg interface{} + receivedValue reflect.Value + channelClosed bool +} + +func (matcher *ReceiveMatcher) Match(actual interface{}) (success bool, err error) { + if !isChan(actual) { + return false, fmt.Errorf("ReceiveMatcher expects a channel. Got:\n%s", format.Object(actual, 1)) + } + + channelType := reflect.TypeOf(actual) + channelValue := reflect.ValueOf(actual) + + if channelType.ChanDir() == reflect.SendDir { + return false, fmt.Errorf("ReceiveMatcher matcher cannot be passed a send-only channel. Got:\n%s", format.Object(actual, 1)) + } + + var subMatcher omegaMatcher + var hasSubMatcher bool + + if matcher.Arg != nil { + subMatcher, hasSubMatcher = (matcher.Arg).(omegaMatcher) + if !hasSubMatcher { + argType := reflect.TypeOf(matcher.Arg) + if argType.Kind() != reflect.Ptr { + return false, fmt.Errorf("Cannot assign a value from the channel:\n%s\nTo:\n%s\nYou need to pass a pointer!", format.Object(actual, 1), format.Object(matcher.Arg, 1)) + } + + assignable := channelType.Elem().AssignableTo(argType.Elem()) + if !assignable { + return false, fmt.Errorf("Cannot assign a value from the channel:\n%s\nTo:\n%s", format.Object(actual, 1), format.Object(matcher.Arg, 1)) + } + } + } + + winnerIndex, value, open := reflect.Select([]reflect.SelectCase{ + reflect.SelectCase{Dir: reflect.SelectRecv, Chan: channelValue}, + reflect.SelectCase{Dir: reflect.SelectDefault}, + }) + + var closed bool + var didReceive bool + if winnerIndex == 0 { + closed = !open + didReceive = open + } + matcher.channelClosed = closed + + if closed { + return false, nil + } + + if hasSubMatcher { + if didReceive { + matcher.receivedValue = value + return subMatcher.Match(matcher.receivedValue.Interface()) + } + return false, nil + } + + if didReceive { + if matcher.Arg != nil { + outValue := reflect.ValueOf(matcher.Arg) + reflect.Indirect(outValue).Set(value) + } + + return true, nil + } + return false, nil +} + +func (matcher *ReceiveMatcher) FailureMessage(actual interface{}) (message string) { + subMatcher, hasSubMatcher := (matcher.Arg).(omegaMatcher) + + closedAddendum := "" + if matcher.channelClosed { + closedAddendum = " The channel is closed." + } + + if hasSubMatcher { + if matcher.receivedValue.IsValid() { + return subMatcher.FailureMessage(matcher.receivedValue.Interface()) + } + return "When passed a matcher, ReceiveMatcher's channel *must* receive something." + } + return format.Message(actual, "to receive something."+closedAddendum) +} + +func (matcher *ReceiveMatcher) NegatedFailureMessage(actual interface{}) (message string) { + subMatcher, hasSubMatcher := (matcher.Arg).(omegaMatcher) + + closedAddendum := "" + if matcher.channelClosed { + closedAddendum = " The channel is closed." + } + + if hasSubMatcher { + if matcher.receivedValue.IsValid() { + return subMatcher.NegatedFailureMessage(matcher.receivedValue.Interface()) + } + return "When passed a matcher, ReceiveMatcher's channel *must* receive something." + } + return format.Message(actual, "not to receive anything."+closedAddendum) +} + +func (matcher *ReceiveMatcher) MatchMayChangeInTheFuture(actual interface{}) bool { + if !isChan(actual) { + return false + } + + return !matcher.channelClosed +} diff --git a/vendor/github.com/onsi/gomega/matchers/receive_matcher_test.go b/vendor/github.com/onsi/gomega/matchers/receive_matcher_test.go new file mode 100644 index 000000000..938c078e6 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/receive_matcher_test.go @@ -0,0 +1,280 @@ +package matchers_test + +import ( + "time" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + . "github.com/onsi/gomega/matchers" +) + +type kungFuActor interface { + DrunkenMaster() bool +} + +type jackie struct { + name string +} + +func (j *jackie) DrunkenMaster() bool { + return true +} + +var _ = Describe("ReceiveMatcher", func() { + Context("with no argument", func() { + Context("for a buffered channel", func() { + It("should succeed", func() { + channel := make(chan bool, 1) + + Ω(channel).ShouldNot(Receive()) + + channel <- true + + Ω(channel).Should(Receive()) + }) + }) + + Context("for an unbuffered channel", func() { + It("should succeed (eventually)", func() { + channel := make(chan bool) + + Ω(channel).ShouldNot(Receive()) + + go func() { + time.Sleep(10 * time.Millisecond) + channel <- true + }() + + Eventually(channel).Should(Receive()) + }) + }) + }) + + Context("with a pointer argument", func() { + Context("of the correct type", func() { + It("should write the value received on the channel to the pointer", func() { + channel := make(chan int, 1) + + var value int + + Ω(channel).ShouldNot(Receive(&value)) + Ω(value).Should(BeZero()) + + channel <- 17 + + Ω(channel).Should(Receive(&value)) + Ω(value).Should(Equal(17)) + }) + }) + + Context("to various types of objects", func() { + It("should work", func() { + //channels of strings + stringChan := make(chan string, 1) + stringChan <- "foo" + + var s string + Ω(stringChan).Should(Receive(&s)) + Ω(s).Should(Equal("foo")) + + //channels of slices + sliceChan := make(chan []bool, 1) + sliceChan <- []bool{true, true, false} + + var sl []bool + Ω(sliceChan).Should(Receive(&sl)) + Ω(sl).Should(Equal([]bool{true, true, false})) + + //channels of channels + chanChan := make(chan chan bool, 1) + c := make(chan bool) + chanChan <- c + + var receivedC chan bool + Ω(chanChan).Should(Receive(&receivedC)) + Ω(receivedC).Should(Equal(c)) + + //channels of interfaces + jackieChan := make(chan kungFuActor, 1) + aJackie := &jackie{name: "Jackie Chan"} + jackieChan <- aJackie + + var theJackie kungFuActor + Ω(jackieChan).Should(Receive(&theJackie)) + Ω(theJackie).Should(Equal(aJackie)) + }) + }) + + Context("of the wrong type", func() { + It("should error", func() { + channel := make(chan int) + var incorrectType bool + + success, err := (&ReceiveMatcher{Arg: &incorrectType}).Match(channel) + Ω(success).Should(BeFalse()) + Ω(err).Should(HaveOccurred()) + + var notAPointer int + success, err = (&ReceiveMatcher{Arg: notAPointer}).Match(channel) + Ω(success).Should(BeFalse()) + Ω(err).Should(HaveOccurred()) + }) + }) + }) + + Context("with a matcher", func() { + It("should defer to the underlying matcher", func() { + intChannel := make(chan int, 1) + intChannel <- 3 + Ω(intChannel).Should(Receive(Equal(3))) + + intChannel <- 2 + Ω(intChannel).ShouldNot(Receive(Equal(3))) + + stringChannel := make(chan []string, 1) + stringChannel <- []string{"foo", "bar", "baz"} + Ω(stringChannel).Should(Receive(ContainElement(ContainSubstring("fo")))) + + stringChannel <- []string{"foo", "bar", "baz"} + Ω(stringChannel).ShouldNot(Receive(ContainElement(ContainSubstring("archipelago")))) + }) + + It("should defer to the underlying matcher for the message", func() { + matcher := Receive(Equal(3)) + channel := make(chan int, 1) + channel <- 2 + matcher.Match(channel) + Ω(matcher.FailureMessage(channel)).Should(MatchRegexp(`Expected\s+: 2\s+to equal\s+: 3`)) + + channel <- 3 + matcher.Match(channel) + Ω(matcher.NegatedFailureMessage(channel)).Should(MatchRegexp(`Expected\s+: 3\s+not to equal\s+: 3`)) + }) + + It("should work just fine with Eventually", func() { + stringChannel := make(chan string) + + go func() { + time.Sleep(5 * time.Millisecond) + stringChannel <- "A" + time.Sleep(5 * time.Millisecond) + stringChannel <- "B" + }() + + Eventually(stringChannel).Should(Receive(Equal("B"))) + }) + + Context("if the matcher errors", func() { + It("should error", func() { + channel := make(chan int, 1) + channel <- 3 + success, err := (&ReceiveMatcher{Arg: ContainSubstring("three")}).Match(channel) + Ω(success).Should(BeFalse()) + Ω(err).Should(HaveOccurred()) + }) + }) + + Context("if nothing is received", func() { + It("should fail", func() { + channel := make(chan int, 1) + success, err := (&ReceiveMatcher{Arg: Equal(1)}).Match(channel) + Ω(success).Should(BeFalse()) + Ω(err).ShouldNot(HaveOccurred()) + }) + }) + }) + + Context("When actual is a *closed* channel", func() { + Context("for a buffered channel", func() { + It("should work until it hits the end of the buffer", func() { + channel := make(chan bool, 1) + channel <- true + + close(channel) + + Ω(channel).Should(Receive()) + Ω(channel).ShouldNot(Receive()) + }) + }) + + Context("for an unbuffered channel", func() { + It("should always fail", func() { + channel := make(chan bool) + close(channel) + + Ω(channel).ShouldNot(Receive()) + }) + }) + }) + + Context("When actual is a send-only channel", func() { + It("should error", func() { + channel := make(chan bool) + + var writerChannel chan<- bool + writerChannel = channel + + success, err := (&ReceiveMatcher{}).Match(writerChannel) + Ω(success).Should(BeFalse()) + Ω(err).Should(HaveOccurred()) + }) + }) + + Context("when acutal is a non-channel", func() { + It("should error", func() { + var nilChannel chan bool + + success, err := (&ReceiveMatcher{}).Match(nilChannel) + Ω(success).Should(BeFalse()) + Ω(err).Should(HaveOccurred()) + + success, err = (&ReceiveMatcher{}).Match(nil) + Ω(success).Should(BeFalse()) + Ω(err).Should(HaveOccurred()) + + success, err = (&ReceiveMatcher{}).Match(3) + Ω(success).Should(BeFalse()) + Ω(err).Should(HaveOccurred()) + }) + }) + + Describe("when used with eventually and a custom matcher", func() { + It("should return the matcher's error when a failing value is received on the channel, instead of the must receive something failure", func() { + failures := InterceptGomegaFailures(func() { + c := make(chan string, 0) + Eventually(c, 0.01).Should(Receive(Equal("hello"))) + }) + Ω(failures[0]).Should(ContainSubstring("When passed a matcher, ReceiveMatcher's channel *must* receive something.")) + + failures = InterceptGomegaFailures(func() { + c := make(chan string, 1) + c <- "hi" + Eventually(c, 0.01).Should(Receive(Equal("hello"))) + }) + Ω(failures[0]).Should(ContainSubstring(": hello")) + }) + }) + + Describe("Bailing early", func() { + It("should bail early when passed a closed channel", func() { + c := make(chan bool) + close(c) + + t := time.Now() + failures := InterceptGomegaFailures(func() { + Eventually(c).Should(Receive()) + }) + Ω(time.Since(t)).Should(BeNumerically("<", 500*time.Millisecond)) + Ω(failures).Should(HaveLen(1)) + }) + + It("should bail early when passed a non-channel", func() { + t := time.Now() + failures := InterceptGomegaFailures(func() { + Eventually(3).Should(Receive()) + }) + Ω(time.Since(t)).Should(BeNumerically("<", 500*time.Millisecond)) + Ω(failures).Should(HaveLen(1)) + }) + }) +}) diff --git a/vendor/github.com/onsi/gomega/matchers/succeed_matcher.go b/vendor/github.com/onsi/gomega/matchers/succeed_matcher.go new file mode 100644 index 000000000..721ed5529 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/succeed_matcher.go @@ -0,0 +1,33 @@ +package matchers + +import ( + "fmt" + + "github.com/onsi/gomega/format" +) + +type SucceedMatcher struct { +} + +func (matcher *SucceedMatcher) Match(actual interface{}) (success bool, err error) { + // is purely nil? + if actual == nil { + return true, nil + } + + // must be an 'error' type + if !isError(actual) { + return false, fmt.Errorf("Expected an error-type. Got:\n%s", format.Object(actual, 1)) + } + + // must be nil (or a pointer to a nil) + return isNil(actual), nil +} + +func (matcher *SucceedMatcher) FailureMessage(actual interface{}) (message string) { + return fmt.Sprintf("Expected success, but got an error:\n%s\n%s", format.Object(actual, 1), format.IndentString(actual.(error).Error(), 1)) +} + +func (matcher *SucceedMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return "Expected failure, but got no error." +} diff --git a/vendor/github.com/onsi/gomega/matchers/succeed_matcher_test.go b/vendor/github.com/onsi/gomega/matchers/succeed_matcher_test.go new file mode 100644 index 000000000..6b62c8bb2 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/succeed_matcher_test.go @@ -0,0 +1,62 @@ +package matchers_test + +import ( + "errors" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + . "github.com/onsi/gomega/matchers" +) + +func Erroring() error { + return errors.New("bam") +} + +func NotErroring() error { + return nil +} + +type AnyType struct{} + +func Invalid() *AnyType { + return nil +} + +var _ = Describe("Succeed", func() { + It("should succeed if the function succeeds", func() { + Ω(NotErroring()).Should(Succeed()) + }) + + It("should succeed (in the negated) if the function errored", func() { + Ω(Erroring()).ShouldNot(Succeed()) + }) + + It("should not if passed a non-error", func() { + success, err := (&SucceedMatcher{}).Match(Invalid()) + Ω(success).Should(BeFalse()) + Ω(err).Should(MatchError("Expected an error-type. Got:\n <*matchers_test.AnyType | 0x0>: nil")) + }) + + It("doesn't support non-error type", func() { + success, err := (&SucceedMatcher{}).Match(AnyType{}) + Ω(success).Should(BeFalse()) + Ω(err).Should(MatchError("Expected an error-type. Got:\n : {}")) + }) + + It("doesn't support non-error pointer type", func() { + success, err := (&SucceedMatcher{}).Match(&AnyType{}) + Ω(success).Should(BeFalse()) + Ω(err).Should(MatchError(MatchRegexp(`Expected an error-type. Got:\n <*matchers_test.AnyType | 0x[[:xdigit:]]+>: {}`))) + }) + + It("should not succeed with pointer types that conform to error interface", func() { + err := &CustomErr{"ohai"} + Ω(err).ShouldNot(Succeed()) + }) + + It("should succeed with nil pointers to types that conform to error interface", func() { + var err *CustomErr = nil + Ω(err).Should(Succeed()) + }) + +}) diff --git a/vendor/github.com/onsi/gomega/matchers/support/goraph/MIT.LICENSE b/vendor/github.com/onsi/gomega/matchers/support/goraph/MIT.LICENSE new file mode 100644 index 000000000..8edd8175a --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/support/goraph/MIT.LICENSE @@ -0,0 +1,20 @@ +Copyright (c) 2014 Amit Kumar Gupta + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/onsi/gomega/matchers/support/goraph/bipartitegraph/bipartitegraph.go b/vendor/github.com/onsi/gomega/matchers/support/goraph/bipartitegraph/bipartitegraph.go new file mode 100644 index 000000000..119d21ef3 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/support/goraph/bipartitegraph/bipartitegraph.go @@ -0,0 +1,41 @@ +package bipartitegraph + +import "errors" +import "fmt" + +import . "github.com/onsi/gomega/matchers/support/goraph/node" +import . "github.com/onsi/gomega/matchers/support/goraph/edge" + +type BipartiteGraph struct { + Left NodeOrderedSet + Right NodeOrderedSet + Edges EdgeSet +} + +func NewBipartiteGraph(leftValues, rightValues []interface{}, neighbours func(interface{}, interface{}) (bool, error)) (*BipartiteGraph, error) { + left := NodeOrderedSet{} + for i, _ := range leftValues { + left = append(left, Node{i}) + } + + right := NodeOrderedSet{} + for j, _ := range rightValues { + right = append(right, Node{j + len(left)}) + } + + edges := EdgeSet{} + for i, leftValue := range leftValues { + for j, rightValue := range rightValues { + neighbours, err := neighbours(leftValue, rightValue) + if err != nil { + return nil, errors.New(fmt.Sprintf("error determining adjacency for %v and %v: %s", leftValue, rightValue, err.Error())) + } + + if neighbours { + edges = append(edges, Edge{left[i], right[j]}) + } + } + } + + return &BipartiteGraph{left, right, edges}, nil +} diff --git a/vendor/github.com/onsi/gomega/matchers/support/goraph/bipartitegraph/bipartitegraphmatching.go b/vendor/github.com/onsi/gomega/matchers/support/goraph/bipartitegraph/bipartitegraphmatching.go new file mode 100644 index 000000000..8181f43a4 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/support/goraph/bipartitegraph/bipartitegraphmatching.go @@ -0,0 +1,159 @@ +package bipartitegraph + +import . "github.com/onsi/gomega/matchers/support/goraph/node" +import . "github.com/onsi/gomega/matchers/support/goraph/edge" +import "github.com/onsi/gomega/matchers/support/goraph/util" + +func (bg *BipartiteGraph) LargestMatching() (matching EdgeSet) { + paths := bg.maximalDisjointSLAPCollection(matching) + + for len(paths) > 0 { + for _, path := range paths { + matching = matching.SymmetricDifference(path) + } + paths = bg.maximalDisjointSLAPCollection(matching) + } + + return +} + +func (bg *BipartiteGraph) maximalDisjointSLAPCollection(matching EdgeSet) (result []EdgeSet) { + guideLayers := bg.createSLAPGuideLayers(matching) + if len(guideLayers) == 0 { + return + } + + used := make(map[Node]bool) + + for _, u := range guideLayers[len(guideLayers)-1] { + slap, found := bg.findDisjointSLAP(u, matching, guideLayers, used) + if found { + for _, edge := range slap { + used[edge.Node1] = true + used[edge.Node2] = true + } + result = append(result, slap) + } + } + + return +} + +func (bg *BipartiteGraph) findDisjointSLAP( + start Node, + matching EdgeSet, + guideLayers []NodeOrderedSet, + used map[Node]bool, +) ([]Edge, bool) { + return bg.findDisjointSLAPHelper(start, EdgeSet{}, len(guideLayers)-1, matching, guideLayers, used) +} + +func (bg *BipartiteGraph) findDisjointSLAPHelper( + currentNode Node, + currentSLAP EdgeSet, + currentLevel int, + matching EdgeSet, + guideLayers []NodeOrderedSet, + used map[Node]bool, +) (EdgeSet, bool) { + used[currentNode] = true + + if currentLevel == 0 { + return currentSLAP, true + } + + for _, nextNode := range guideLayers[currentLevel-1] { + if used[nextNode] { + continue + } + + edge, found := bg.Edges.FindByNodes(currentNode, nextNode) + if !found { + continue + } + + if matching.Contains(edge) == util.Odd(currentLevel) { + continue + } + + currentSLAP = append(currentSLAP, edge) + slap, found := bg.findDisjointSLAPHelper(nextNode, currentSLAP, currentLevel-1, matching, guideLayers, used) + if found { + return slap, true + } + currentSLAP = currentSLAP[:len(currentSLAP)-1] + } + + used[currentNode] = false + return nil, false +} + +func (bg *BipartiteGraph) createSLAPGuideLayers(matching EdgeSet) (guideLayers []NodeOrderedSet) { + used := make(map[Node]bool) + currentLayer := NodeOrderedSet{} + + for _, node := range bg.Left { + if matching.Free(node) { + used[node] = true + currentLayer = append(currentLayer, node) + } + } + + if len(currentLayer) == 0 { + return []NodeOrderedSet{} + } + guideLayers = append(guideLayers, currentLayer) + + done := false + + for !done { + lastLayer := currentLayer + currentLayer = NodeOrderedSet{} + + if util.Odd(len(guideLayers)) { + for _, leftNode := range lastLayer { + for _, rightNode := range bg.Right { + if used[rightNode] { + continue + } + + edge, found := bg.Edges.FindByNodes(leftNode, rightNode) + if !found || matching.Contains(edge) { + continue + } + + currentLayer = append(currentLayer, rightNode) + used[rightNode] = true + + if matching.Free(rightNode) { + done = true + } + } + } + } else { + for _, rightNode := range lastLayer { + for _, leftNode := range bg.Left { + if used[leftNode] { + continue + } + + edge, found := bg.Edges.FindByNodes(leftNode, rightNode) + if !found || !matching.Contains(edge) { + continue + } + + currentLayer = append(currentLayer, leftNode) + used[leftNode] = true + } + } + + } + + if len(currentLayer) == 0 { + return []NodeOrderedSet{} + } + guideLayers = append(guideLayers, currentLayer) + } + + return +} diff --git a/vendor/github.com/onsi/gomega/matchers/support/goraph/edge/edge.go b/vendor/github.com/onsi/gomega/matchers/support/goraph/edge/edge.go new file mode 100644 index 000000000..4fd15cc06 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/support/goraph/edge/edge.go @@ -0,0 +1,61 @@ +package edge + +import . "github.com/onsi/gomega/matchers/support/goraph/node" + +type Edge struct { + Node1 Node + Node2 Node +} + +type EdgeSet []Edge + +func (ec EdgeSet) Free(node Node) bool { + for _, e := range ec { + if e.Node1 == node || e.Node2 == node { + return false + } + } + + return true +} + +func (ec EdgeSet) Contains(edge Edge) bool { + for _, e := range ec { + if e == edge { + return true + } + } + + return false +} + +func (ec EdgeSet) FindByNodes(node1, node2 Node) (Edge, bool) { + for _, e := range ec { + if (e.Node1 == node1 && e.Node2 == node2) || (e.Node1 == node2 && e.Node2 == node1) { + return e, true + } + } + + return Edge{}, false +} + +func (ec EdgeSet) SymmetricDifference(ec2 EdgeSet) EdgeSet { + edgesToInclude := make(map[Edge]bool) + + for _, e := range ec { + edgesToInclude[e] = true + } + + for _, e := range ec2 { + edgesToInclude[e] = !edgesToInclude[e] + } + + result := EdgeSet{} + for e, include := range edgesToInclude { + if include { + result = append(result, e) + } + } + + return result +} diff --git a/vendor/github.com/onsi/gomega/matchers/support/goraph/node/node.go b/vendor/github.com/onsi/gomega/matchers/support/goraph/node/node.go new file mode 100644 index 000000000..800c2ea8c --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/support/goraph/node/node.go @@ -0,0 +1,7 @@ +package node + +type Node struct { + Id int +} + +type NodeOrderedSet []Node diff --git a/vendor/github.com/onsi/gomega/matchers/support/goraph/util/util.go b/vendor/github.com/onsi/gomega/matchers/support/goraph/util/util.go new file mode 100644 index 000000000..d76a1ee00 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/support/goraph/util/util.go @@ -0,0 +1,7 @@ +package util + +import "math" + +func Odd(n int) bool { + return math.Mod(float64(n), 2.0) == 1.0 +} diff --git a/vendor/github.com/onsi/gomega/matchers/test_data/xml/sample_01.xml b/vendor/github.com/onsi/gomega/matchers/test_data/xml/sample_01.xml new file mode 100644 index 000000000..90f0a1b45 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/test_data/xml/sample_01.xml @@ -0,0 +1,6 @@ + + Tove + Jani + Reminder + Don't forget me this weekend! + \ No newline at end of file diff --git a/vendor/github.com/onsi/gomega/matchers/test_data/xml/sample_02.xml b/vendor/github.com/onsi/gomega/matchers/test_data/xml/sample_02.xml new file mode 100644 index 000000000..3863b83c3 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/test_data/xml/sample_02.xml @@ -0,0 +1,9 @@ + + + + Tove + Jani + Reminder + Don't forget me this weekend! + + diff --git a/vendor/github.com/onsi/gomega/matchers/test_data/xml/sample_03.xml b/vendor/github.com/onsi/gomega/matchers/test_data/xml/sample_03.xml new file mode 100644 index 000000000..a491c213c --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/test_data/xml/sample_03.xml @@ -0,0 +1 @@ + Tove Jani Reminder Don't forget me this weekend! diff --git a/vendor/github.com/onsi/gomega/matchers/test_data/xml/sample_04.xml b/vendor/github.com/onsi/gomega/matchers/test_data/xml/sample_04.xml new file mode 100644 index 000000000..dcfd3db03 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/test_data/xml/sample_04.xml @@ -0,0 +1,6 @@ + + Tove + John + Doe + Don't forget me this weekend! + \ No newline at end of file diff --git a/vendor/github.com/onsi/gomega/matchers/test_data/xml/sample_05.xml b/vendor/github.com/onsi/gomega/matchers/test_data/xml/sample_05.xml new file mode 100644 index 000000000..de15a6a55 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/test_data/xml/sample_05.xml @@ -0,0 +1,211 @@ + + + + Empire Burlesque + Bob Dylan + USA + Columbia + 10.90 + 1985 + + + Hide your heart + Bonnie Tyler + UK + CBS Records + 9.90 + 1988 + + + Greatest Hits + Dolly Parton + USA + RCA + 9.90 + 1982 + + + Still got the blues + Gary Moore + UK + Virgin records + 10.20 + 1990 + + + Eros + Eros Ramazzotti + EU + BMG + 9.90 + 1997 + + + One night only + Bee Gees + UK + Polydor + 10.90 + 1998 + + + Sylvias Mother + Dr.Hook + UK + CBS + 8.10 + 1973 + + + Maggie May + Rod Stewart + UK + Pickwick + 8.50 + 1990 + + + Romanza + Andrea Bocelli + EU + Polydor + 10.80 + 1996 + + + When a man loves a woman + Percy Sledge + USA + Atlantic + 8.70 + 1987 + + + Black angel + Savage Rose + EU + Mega + 10.90 + 1995 + + + 1999 Grammy Nominees + Many + USA + Grammy + 10.20 + 1999 + + + For the good times + Kenny Rogers + UK + Mucik Master + 8.70 + 1995 + + + Big Willie style + Will Smith + USA + Columbia + 9.90 + 1997 + + + Tupelo Honey + Van Morrison + UK + Polydor + 8.20 + 1971 + + + Soulsville + Jorn Hoel + Norway + WEA + 7.90 + 1996 + + + The very best of + Cat Stevens + UK + Island + 8.90 + 1990 + + + Stop + Sam Brown + UK + A and M + 8.90 + 1988 + + + Bridge of Spies + T'Pau + UK + Siren + 7.90 + 1987 + + + Private Dancer + Tina Turner + UK + Capitol + 8.90 + 1983 + + + Midt om natten + Kim Larsen + EU + Medley + 7.80 + 1983 + + + Pavarotti Gala Concert + Luciano Pavarotti + UK + DECCA + 9.90 + 1991 + + + The dock of the bay + Otis Redding + USA + Stax Records + 7.90 + 1968 + + + Picture book + Simply Red + EU + Elektra + 7.20 + 1985 + + + Red + The Communards + UK + London + 7.80 + 1987 + + + Unchain my heart + Joe Cocker + USA + EMI + 8.20 + 1987 + + diff --git a/vendor/github.com/onsi/gomega/matchers/test_data/xml/sample_06.xml b/vendor/github.com/onsi/gomega/matchers/test_data/xml/sample_06.xml new file mode 100644 index 000000000..4ba90fb97 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/test_data/xml/sample_06.xml @@ -0,0 +1,13 @@ + + + + + + +
    ApplesBananas
    + + African Coffee Table + 80 + 120 +
    +
    \ No newline at end of file diff --git a/vendor/github.com/onsi/gomega/matchers/test_data/xml/sample_07.xml b/vendor/github.com/onsi/gomega/matchers/test_data/xml/sample_07.xml new file mode 100644 index 000000000..34b9e9775 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/test_data/xml/sample_07.xml @@ -0,0 +1,13 @@ + + + + Apples + Bananas + + + + African Coffee Table + 80 + 120 + + \ No newline at end of file diff --git a/vendor/github.com/onsi/gomega/matchers/test_data/xml/sample_08.xml b/vendor/github.com/onsi/gomega/matchers/test_data/xml/sample_08.xml new file mode 100644 index 000000000..ccaee4e1a --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/test_data/xml/sample_08.xml @@ -0,0 +1,13 @@ + + + + Apples + Oranges + + + + African Coffee Table + 80 + 120 + + \ No newline at end of file diff --git a/vendor/github.com/onsi/gomega/matchers/test_data/xml/sample_09.xml b/vendor/github.com/onsi/gomega/matchers/test_data/xml/sample_09.xml new file mode 100644 index 000000000..531f84d3f --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/test_data/xml/sample_09.xml @@ -0,0 +1,4 @@ + + Foo + Bar + \ No newline at end of file diff --git a/vendor/github.com/onsi/gomega/matchers/test_data/xml/sample_10.xml b/vendor/github.com/onsi/gomega/matchers/test_data/xml/sample_10.xml new file mode 100644 index 000000000..b1e1e1fbe --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/test_data/xml/sample_10.xml @@ -0,0 +1,4 @@ + + Foo + Bar + \ No newline at end of file diff --git a/vendor/github.com/onsi/gomega/matchers/test_data/xml/sample_11.xml b/vendor/github.com/onsi/gomega/matchers/test_data/xml/sample_11.xml new file mode 100644 index 000000000..3132b0f90 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/test_data/xml/sample_11.xml @@ -0,0 +1,7 @@ + + + Tove + Jani + Reminder + Don't forget me this weekend! + diff --git a/vendor/github.com/onsi/gomega/matchers/type_support.go b/vendor/github.com/onsi/gomega/matchers/type_support.go new file mode 100644 index 000000000..b05a5e75d --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/type_support.go @@ -0,0 +1,173 @@ +/* +Gomega matchers + +This package implements the Gomega matchers and does not typically need to be imported. +See the docs for Gomega for documentation on the matchers + +http://onsi.github.io/gomega/ +*/ +package matchers + +import ( + "fmt" + "reflect" +) + +type omegaMatcher interface { + Match(actual interface{}) (success bool, err error) + FailureMessage(actual interface{}) (message string) + NegatedFailureMessage(actual interface{}) (message string) +} + +func isBool(a interface{}) bool { + return reflect.TypeOf(a).Kind() == reflect.Bool +} + +func isNumber(a interface{}) bool { + if a == nil { + return false + } + kind := reflect.TypeOf(a).Kind() + return reflect.Int <= kind && kind <= reflect.Float64 +} + +func isInteger(a interface{}) bool { + kind := reflect.TypeOf(a).Kind() + return reflect.Int <= kind && kind <= reflect.Int64 +} + +func isUnsignedInteger(a interface{}) bool { + kind := reflect.TypeOf(a).Kind() + return reflect.Uint <= kind && kind <= reflect.Uint64 +} + +func isFloat(a interface{}) bool { + kind := reflect.TypeOf(a).Kind() + return reflect.Float32 <= kind && kind <= reflect.Float64 +} + +func toInteger(a interface{}) int64 { + if isInteger(a) { + return reflect.ValueOf(a).Int() + } else if isUnsignedInteger(a) { + return int64(reflect.ValueOf(a).Uint()) + } else if isFloat(a) { + return int64(reflect.ValueOf(a).Float()) + } + panic(fmt.Sprintf("Expected a number! Got <%T> %#v", a, a)) +} + +func toUnsignedInteger(a interface{}) uint64 { + if isInteger(a) { + return uint64(reflect.ValueOf(a).Int()) + } else if isUnsignedInteger(a) { + return reflect.ValueOf(a).Uint() + } else if isFloat(a) { + return uint64(reflect.ValueOf(a).Float()) + } + panic(fmt.Sprintf("Expected a number! Got <%T> %#v", a, a)) +} + +func toFloat(a interface{}) float64 { + if isInteger(a) { + return float64(reflect.ValueOf(a).Int()) + } else if isUnsignedInteger(a) { + return float64(reflect.ValueOf(a).Uint()) + } else if isFloat(a) { + return reflect.ValueOf(a).Float() + } + panic(fmt.Sprintf("Expected a number! Got <%T> %#v", a, a)) +} + +func isError(a interface{}) bool { + _, ok := a.(error) + return ok +} + +func isChan(a interface{}) bool { + if isNil(a) { + return false + } + return reflect.TypeOf(a).Kind() == reflect.Chan +} + +func isMap(a interface{}) bool { + if a == nil { + return false + } + return reflect.TypeOf(a).Kind() == reflect.Map +} + +func isArrayOrSlice(a interface{}) bool { + if a == nil { + return false + } + switch reflect.TypeOf(a).Kind() { + case reflect.Array, reflect.Slice: + return true + default: + return false + } +} + +func isString(a interface{}) bool { + if a == nil { + return false + } + return reflect.TypeOf(a).Kind() == reflect.String +} + +func toString(a interface{}) (string, bool) { + aString, isString := a.(string) + if isString { + return aString, true + } + + aBytes, isBytes := a.([]byte) + if isBytes { + return string(aBytes), true + } + + aStringer, isStringer := a.(fmt.Stringer) + if isStringer { + return aStringer.String(), true + } + + return "", false +} + +func lengthOf(a interface{}) (int, bool) { + if a == nil { + return 0, false + } + switch reflect.TypeOf(a).Kind() { + case reflect.Map, reflect.Array, reflect.String, reflect.Chan, reflect.Slice: + return reflect.ValueOf(a).Len(), true + default: + return 0, false + } +} +func capOf(a interface{}) (int, bool) { + if a == nil { + return 0, false + } + switch reflect.TypeOf(a).Kind() { + case reflect.Array, reflect.Chan, reflect.Slice: + return reflect.ValueOf(a).Cap(), true + default: + return 0, false + } +} + +func isNil(a interface{}) bool { + if a == nil { + return true + } + + switch reflect.TypeOf(a).Kind() { + case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: + return reflect.ValueOf(a).IsNil() + } + + return false +} diff --git a/vendor/github.com/onsi/gomega/matchers/with_transform.go b/vendor/github.com/onsi/gomega/matchers/with_transform.go new file mode 100644 index 000000000..8e58d8a0f --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/with_transform.go @@ -0,0 +1,72 @@ +package matchers + +import ( + "fmt" + "reflect" + + "github.com/onsi/gomega/internal/oraclematcher" + "github.com/onsi/gomega/types" +) + +type WithTransformMatcher struct { + // input + Transform interface{} // must be a function of one parameter that returns one value + Matcher types.GomegaMatcher + + // cached value + transformArgType reflect.Type + + // state + transformedValue interface{} +} + +func NewWithTransformMatcher(transform interface{}, matcher types.GomegaMatcher) *WithTransformMatcher { + if transform == nil { + panic("transform function cannot be nil") + } + txType := reflect.TypeOf(transform) + if txType.NumIn() != 1 { + panic("transform function must have 1 argument") + } + if txType.NumOut() != 1 { + panic("transform function must have 1 return value") + } + + return &WithTransformMatcher{ + Transform: transform, + Matcher: matcher, + transformArgType: reflect.TypeOf(transform).In(0), + } +} + +func (m *WithTransformMatcher) Match(actual interface{}) (bool, error) { + // return error if actual's type is incompatible with Transform function's argument type + actualType := reflect.TypeOf(actual) + if !actualType.AssignableTo(m.transformArgType) { + return false, fmt.Errorf("Transform function expects '%s' but we have '%s'", m.transformArgType, actualType) + } + + // call the Transform function with `actual` + fn := reflect.ValueOf(m.Transform) + result := fn.Call([]reflect.Value{reflect.ValueOf(actual)}) + m.transformedValue = result[0].Interface() // expect exactly one value + + return m.Matcher.Match(m.transformedValue) +} + +func (m *WithTransformMatcher) FailureMessage(_ interface{}) (message string) { + return m.Matcher.FailureMessage(m.transformedValue) +} + +func (m *WithTransformMatcher) NegatedFailureMessage(_ interface{}) (message string) { + return m.Matcher.NegatedFailureMessage(m.transformedValue) +} + +func (m *WithTransformMatcher) MatchMayChangeInTheFuture(_ interface{}) bool { + // TODO: Maybe this should always just return true? (Only an issue for non-deterministic transformers.) + // + // Querying the next matcher is fine if the transformer always will return the same value. + // But if the transformer is non-deterministic and returns a different value each time, then there + // is no point in querying the next matcher, since it can only comment on the last transformed value. + return oraclematcher.MatchMayChangeInTheFuture(m.Matcher, m.transformedValue) +} diff --git a/vendor/github.com/onsi/gomega/matchers/with_transform_test.go b/vendor/github.com/onsi/gomega/matchers/with_transform_test.go new file mode 100644 index 000000000..e52bf8e63 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/with_transform_test.go @@ -0,0 +1,102 @@ +package matchers_test + +import ( + "errors" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + . "github.com/onsi/gomega/matchers" +) + +var _ = Describe("WithTransformMatcher", func() { + + var plus1 = func(i int) int { return i + 1 } + + Context("Panic if transform function invalid", func() { + panicsWithTransformer := func(transform interface{}) { + ExpectWithOffset(1, func() { WithTransform(transform, nil) }).To(Panic()) + } + It("nil", func() { + panicsWithTransformer(nil) + }) + Context("Invalid number of args, but correct return value count", func() { + It("zero", func() { + panicsWithTransformer(func() int { return 5 }) + }) + It("two", func() { + panicsWithTransformer(func(i, j int) int { return 5 }) + }) + }) + Context("Invalid number of return values, but correct number of arguments", func() { + It("zero", func() { + panicsWithTransformer(func(i int) {}) + }) + It("two", func() { + panicsWithTransformer(func(i int) (int, int) { return 5, 6 }) + }) + }) + }) + + It("works with positive cases", func() { + Expect(1).To(WithTransform(plus1, Equal(2))) + Expect(1).To(WithTransform(plus1, WithTransform(plus1, Equal(3)))) + Expect(1).To(WithTransform(plus1, And(Equal(2), BeNumerically(">", 1)))) + + // transform expects custom type + type S struct { + A int + B string + } + transformer := func(s S) string { return s.B } + Expect(S{1, "hi"}).To(WithTransform(transformer, Equal("hi"))) + + // transform expects interface + errString := func(e error) string { return e.Error() } + Expect(errors.New("abc")).To(WithTransform(errString, Equal("abc"))) + }) + + It("works with negative cases", func() { + Expect(1).ToNot(WithTransform(plus1, Equal(3))) + Expect(1).ToNot(WithTransform(plus1, WithTransform(plus1, Equal(2)))) + }) + + Context("failure messages", func() { + Context("when match fails", func() { + It("gives a descriptive message", func() { + m := WithTransform(plus1, Equal(3)) + Expect(m.Match(1)).To(BeFalse()) + Expect(m.FailureMessage(1)).To(Equal("Expected\n : 2\nto equal\n : 3")) + }) + }) + + Context("when match succeeds, but expected it to fail", func() { + It("gives a descriptive message", func() { + m := Not(WithTransform(plus1, Equal(3))) + Expect(m.Match(2)).To(BeFalse()) + Expect(m.FailureMessage(2)).To(Equal("Expected\n : 3\nnot to equal\n : 3")) + }) + }) + + Context("actual value is incompatible with transform function's argument type", func() { + It("gracefully fails if transform cannot be performed", func() { + m := WithTransform(plus1, Equal(3)) + result, err := m.Match("hi") // give it a string but transform expects int; doesn't panic + Expect(result).To(BeFalse()) + Expect(err).To(MatchError("Transform function expects 'int' but we have 'string'")) + }) + }) + }) + + Context("MatchMayChangeInTheFuture()", func() { + It("Propagates value from wrapped matcher on the transformed value", func() { + m := WithTransform(plus1, Or()) // empty Or() always returns false, and indicates it cannot change + Expect(m.Match(1)).To(BeFalse()) + Expect(m.(*WithTransformMatcher).MatchMayChangeInTheFuture(1)).To(BeFalse()) // empty Or() indicates cannot change + }) + It("Defaults to true", func() { + m := WithTransform(plus1, Equal(2)) // Equal does not have this method + Expect(m.Match(1)).To(BeTrue()) + Expect(m.(*WithTransformMatcher).MatchMayChangeInTheFuture(1)).To(BeTrue()) // defaults to true + }) + }) +}) diff --git a/vendor/github.com/onsi/gomega/types/types.go b/vendor/github.com/onsi/gomega/types/types.go new file mode 100644 index 000000000..a83b40110 --- /dev/null +++ b/vendor/github.com/onsi/gomega/types/types.go @@ -0,0 +1,17 @@ +package types + +type GomegaFailHandler func(message string, callerSkip ...int) + +//A simple *testing.T interface wrapper +type GomegaTestingT interface { + Fatalf(format string, args ...interface{}) +} + +//All Gomega matchers must implement the GomegaMatcher interface +// +//For details on writing custom matchers, check out: http://onsi.github.io/gomega/#adding_your_own_matchers +type GomegaMatcher interface { + Match(actual interface{}) (success bool, err error) + FailureMessage(actual interface{}) (message string) + NegatedFailureMessage(actual interface{}) (message string) +} diff --git a/vendor/github.com/urfave/cli/.flake8 b/vendor/github.com/urfave/cli/.flake8 new file mode 100644 index 000000000..6deafc261 --- /dev/null +++ b/vendor/github.com/urfave/cli/.flake8 @@ -0,0 +1,2 @@ +[flake8] +max-line-length = 120 diff --git a/vendor/github.com/urfave/cli/.gitignore b/vendor/github.com/urfave/cli/.gitignore new file mode 100644 index 000000000..faf70c4c2 --- /dev/null +++ b/vendor/github.com/urfave/cli/.gitignore @@ -0,0 +1,2 @@ +*.coverprofile +node_modules/ diff --git a/vendor/github.com/urfave/cli/.travis.yml b/vendor/github.com/urfave/cli/.travis.yml new file mode 100644 index 000000000..890e18598 --- /dev/null +++ b/vendor/github.com/urfave/cli/.travis.yml @@ -0,0 +1,38 @@ +language: go + +sudo: false + +cache: + directories: + - node_modules + +go: +- 1.6.x +- 1.7.x +- 1.8.x +- master + +matrix: + allow_failures: + - go: master + include: + - go: 1.6.x + os: osx + - go: 1.7.x + os: osx + - go: 1.8.x + os: osx + +before_script: +- go get github.com/urfave/gfmrun/... || true +- go get golang.org/x/tools/cmd/goimports +- if [ ! -f node_modules/.bin/markdown-toc ] ; then + npm install markdown-toc ; + fi + +script: +- ./runtests gen +- ./runtests vet +- ./runtests test +- ./runtests gfmrun +- ./runtests toc diff --git a/vendor/github.com/urfave/cli/CHANGELOG.md b/vendor/github.com/urfave/cli/CHANGELOG.md new file mode 100644 index 000000000..07f75464b --- /dev/null +++ b/vendor/github.com/urfave/cli/CHANGELOG.md @@ -0,0 +1,392 @@ +# Change Log + +**ATTN**: This project uses [semantic versioning](http://semver.org/). + +## [Unreleased] + +## [1.19.1] - 2016-11-21 + +### Fixed + +- Fixes regression introduced in 1.19.0 where using an `ActionFunc` as + the `Action` for a command would cause it to error rather than calling the + function. Should not have a affected declarative cases using `func(c + *cli.Context) err)`. +- Shell completion now handles the case where the user specifies + `--generate-bash-completion` immediately after a flag that takes an argument. + Previously it call the application with `--generate-bash-completion` as the + flag value. + +## [1.19.0] - 2016-11-19 +### Added +- `FlagsByName` was added to make it easy to sort flags (e.g. `sort.Sort(cli.FlagsByName(app.Flags))`) +- A `Description` field was added to `App` for a more detailed description of + the application (similar to the existing `Description` field on `Command`) +- Flag type code generation via `go generate` +- Write to stderr and exit 1 if action returns non-nil error +- Added support for TOML to the `altsrc` loader +- `SkipArgReorder` was added to allow users to skip the argument reordering. + This is useful if you want to consider all "flags" after an argument as + arguments rather than flags (the default behavior of the stdlib `flag` + library). This is backported functionality from the [removal of the flag + reordering](https://github.com/urfave/cli/pull/398) in the unreleased version + 2 +- For formatted errors (those implementing `ErrorFormatter`), the errors will + be formatted during output. Compatible with `pkg/errors`. + +### Changed +- Raise minimum tested/supported Go version to 1.2+ + +### Fixed +- Consider empty environment variables as set (previously environment variables + with the equivalent of `""` would be skipped rather than their value used). +- Return an error if the value in a given environment variable cannot be parsed + as the flag type. Previously these errors were silently swallowed. +- Print full error when an invalid flag is specified (which includes the invalid flag) +- `App.Writer` defaults to `stdout` when `nil` +- If no action is specified on a command or app, the help is now printed instead of `panic`ing +- `App.Metadata` is initialized automatically now (previously was `nil` unless initialized) +- Correctly show help message if `-h` is provided to a subcommand +- `context.(Global)IsSet` now respects environment variables. Previously it + would return `false` if a flag was specified in the environment rather than + as an argument +- Removed deprecation warnings to STDERR to avoid them leaking to the end-user +- `altsrc`s import paths were updated to use `gopkg.in/urfave/cli.v1`. This + fixes issues that occurred when `gopkg.in/urfave/cli.v1` was imported as well + as `altsrc` where Go would complain that the types didn't match + +## [1.18.1] - 2016-08-28 +### Fixed +- Removed deprecation warnings to STDERR to avoid them leaking to the end-user (backported) + +## [1.18.0] - 2016-06-27 +### Added +- `./runtests` test runner with coverage tracking by default +- testing on OS X +- testing on Windows +- `UintFlag`, `Uint64Flag`, and `Int64Flag` types and supporting code + +### Changed +- Use spaces for alignment in help/usage output instead of tabs, making the + output alignment consistent regardless of tab width + +### Fixed +- Printing of command aliases in help text +- Printing of visible flags for both struct and struct pointer flags +- Display the `help` subcommand when using `CommandCategories` +- No longer swallows `panic`s that occur within the `Action`s themselves when + detecting the signature of the `Action` field + +## [1.17.1] - 2016-08-28 +### Fixed +- Removed deprecation warnings to STDERR to avoid them leaking to the end-user + +## [1.17.0] - 2016-05-09 +### Added +- Pluggable flag-level help text rendering via `cli.DefaultFlagStringFunc` +- `context.GlobalBoolT` was added as an analogue to `context.GlobalBool` +- Support for hiding commands by setting `Hidden: true` -- this will hide the + commands in help output + +### Changed +- `Float64Flag`, `IntFlag`, and `DurationFlag` default values are no longer + quoted in help text output. +- All flag types now include `(default: {value})` strings following usage when a + default value can be (reasonably) detected. +- `IntSliceFlag` and `StringSliceFlag` usage strings are now more consistent + with non-slice flag types +- Apps now exit with a code of 3 if an unknown subcommand is specified + (previously they printed "No help topic for...", but still exited 0. This + makes it easier to script around apps built using `cli` since they can trust + that a 0 exit code indicated a successful execution. +- cleanups based on [Go Report Card + feedback](https://goreportcard.com/report/github.com/urfave/cli) + +## [1.16.1] - 2016-08-28 +### Fixed +- Removed deprecation warnings to STDERR to avoid them leaking to the end-user + +## [1.16.0] - 2016-05-02 +### Added +- `Hidden` field on all flag struct types to omit from generated help text + +### Changed +- `BashCompletionFlag` (`--enable-bash-completion`) is now omitted from +generated help text via the `Hidden` field + +### Fixed +- handling of error values in `HandleAction` and `HandleExitCoder` + +## [1.15.0] - 2016-04-30 +### Added +- This file! +- Support for placeholders in flag usage strings +- `App.Metadata` map for arbitrary data/state management +- `Set` and `GlobalSet` methods on `*cli.Context` for altering values after +parsing. +- Support for nested lookup of dot-delimited keys in structures loaded from +YAML. + +### Changed +- The `App.Action` and `Command.Action` now prefer a return signature of +`func(*cli.Context) error`, as defined by `cli.ActionFunc`. If a non-nil +`error` is returned, there may be two outcomes: + - If the error fulfills `cli.ExitCoder`, then `os.Exit` will be called + automatically + - Else the error is bubbled up and returned from `App.Run` +- Specifying an `Action` with the legacy return signature of +`func(*cli.Context)` will produce a deprecation message to stderr +- Specifying an `Action` that is not a `func` type will produce a non-zero exit +from `App.Run` +- Specifying an `Action` func that has an invalid (input) signature will +produce a non-zero exit from `App.Run` + +### Deprecated +- +`cli.App.RunAndExitOnError`, which should now be done by returning an error +that fulfills `cli.ExitCoder` to `cli.App.Run`. +- the legacy signature for +`cli.App.Action` of `func(*cli.Context)`, which should now have a return +signature of `func(*cli.Context) error`, as defined by `cli.ActionFunc`. + +### Fixed +- Added missing `*cli.Context.GlobalFloat64` method + +## [1.14.0] - 2016-04-03 (backfilled 2016-04-25) +### Added +- Codebeat badge +- Support for categorization via `CategorizedHelp` and `Categories` on app. + +### Changed +- Use `filepath.Base` instead of `path.Base` in `Name` and `HelpName`. + +### Fixed +- Ensure version is not shown in help text when `HideVersion` set. + +## [1.13.0] - 2016-03-06 (backfilled 2016-04-25) +### Added +- YAML file input support. +- `NArg` method on context. + +## [1.12.0] - 2016-02-17 (backfilled 2016-04-25) +### Added +- Custom usage error handling. +- Custom text support in `USAGE` section of help output. +- Improved help messages for empty strings. +- AppVeyor CI configuration. + +### Changed +- Removed `panic` from default help printer func. +- De-duping and optimizations. + +### Fixed +- Correctly handle `Before`/`After` at command level when no subcommands. +- Case of literal `-` argument causing flag reordering. +- Environment variable hints on Windows. +- Docs updates. + +## [1.11.1] - 2015-12-21 (backfilled 2016-04-25) +### Changed +- Use `path.Base` in `Name` and `HelpName` +- Export `GetName` on flag types. + +### Fixed +- Flag parsing when skipping is enabled. +- Test output cleanup. +- Move completion check to account for empty input case. + +## [1.11.0] - 2015-11-15 (backfilled 2016-04-25) +### Added +- Destination scan support for flags. +- Testing against `tip` in Travis CI config. + +### Changed +- Go version in Travis CI config. + +### Fixed +- Removed redundant tests. +- Use correct example naming in tests. + +## [1.10.2] - 2015-10-29 (backfilled 2016-04-25) +### Fixed +- Remove unused var in bash completion. + +## [1.10.1] - 2015-10-21 (backfilled 2016-04-25) +### Added +- Coverage and reference logos in README. + +### Fixed +- Use specified values in help and version parsing. +- Only display app version and help message once. + +## [1.10.0] - 2015-10-06 (backfilled 2016-04-25) +### Added +- More tests for existing functionality. +- `ArgsUsage` at app and command level for help text flexibility. + +### Fixed +- Honor `HideHelp` and `HideVersion` in `App.Run`. +- Remove juvenile word from README. + +## [1.9.0] - 2015-09-08 (backfilled 2016-04-25) +### Added +- `FullName` on command with accompanying help output update. +- Set default `$PROG` in bash completion. + +### Changed +- Docs formatting. + +### Fixed +- Removed self-referential imports in tests. + +## [1.8.0] - 2015-06-30 (backfilled 2016-04-25) +### Added +- Support for `Copyright` at app level. +- `Parent` func at context level to walk up context lineage. + +### Fixed +- Global flag processing at top level. + +## [1.7.1] - 2015-06-11 (backfilled 2016-04-25) +### Added +- Aggregate errors from `Before`/`After` funcs. +- Doc comments on flag structs. +- Include non-global flags when checking version and help. +- Travis CI config updates. + +### Fixed +- Ensure slice type flags have non-nil values. +- Collect global flags from the full command hierarchy. +- Docs prose. + +## [1.7.0] - 2015-05-03 (backfilled 2016-04-25) +### Changed +- `HelpPrinter` signature includes output writer. + +### Fixed +- Specify go 1.1+ in docs. +- Set `Writer` when running command as app. + +## [1.6.0] - 2015-03-23 (backfilled 2016-04-25) +### Added +- Multiple author support. +- `NumFlags` at context level. +- `Aliases` at command level. + +### Deprecated +- `ShortName` at command level. + +### Fixed +- Subcommand help output. +- Backward compatible support for deprecated `Author` and `Email` fields. +- Docs regarding `Names`/`Aliases`. + +## [1.5.0] - 2015-02-20 (backfilled 2016-04-25) +### Added +- `After` hook func support at app and command level. + +### Fixed +- Use parsed context when running command as subcommand. +- Docs prose. + +## [1.4.1] - 2015-01-09 (backfilled 2016-04-25) +### Added +- Support for hiding `-h / --help` flags, but not `help` subcommand. +- Stop flag parsing after `--`. + +### Fixed +- Help text for generic flags to specify single value. +- Use double quotes in output for defaults. +- Use `ParseInt` instead of `ParseUint` for int environment var values. +- Use `0` as base when parsing int environment var values. + +## [1.4.0] - 2014-12-12 (backfilled 2016-04-25) +### Added +- Support for environment variable lookup "cascade". +- Support for `Stdout` on app for output redirection. + +### Fixed +- Print command help instead of app help in `ShowCommandHelp`. + +## [1.3.1] - 2014-11-13 (backfilled 2016-04-25) +### Added +- Docs and example code updates. + +### Changed +- Default `-v / --version` flag made optional. + +## [1.3.0] - 2014-08-10 (backfilled 2016-04-25) +### Added +- `FlagNames` at context level. +- Exposed `VersionPrinter` var for more control over version output. +- Zsh completion hook. +- `AUTHOR` section in default app help template. +- Contribution guidelines. +- `DurationFlag` type. + +## [1.2.0] - 2014-08-02 +### Added +- Support for environment variable defaults on flags plus tests. + +## [1.1.0] - 2014-07-15 +### Added +- Bash completion. +- Optional hiding of built-in help command. +- Optional skipping of flag parsing at command level. +- `Author`, `Email`, and `Compiled` metadata on app. +- `Before` hook func support at app and command level. +- `CommandNotFound` func support at app level. +- Command reference available on context. +- `GenericFlag` type. +- `Float64Flag` type. +- `BoolTFlag` type. +- `IsSet` flag helper on context. +- More flag lookup funcs at context level. +- More tests & docs. + +### Changed +- Help template updates to account for presence/absence of flags. +- Separated subcommand help template. +- Exposed `HelpPrinter` var for more control over help output. + +## [1.0.0] - 2013-11-01 +### Added +- `help` flag in default app flag set and each command flag set. +- Custom handling of argument parsing errors. +- Command lookup by name at app level. +- `StringSliceFlag` type and supporting `StringSlice` type. +- `IntSliceFlag` type and supporting `IntSlice` type. +- Slice type flag lookups by name at context level. +- Export of app and command help functions. +- More tests & docs. + +## 0.1.0 - 2013-07-22 +### Added +- Initial implementation. + +[Unreleased]: https://github.com/urfave/cli/compare/v1.18.0...HEAD +[1.18.0]: https://github.com/urfave/cli/compare/v1.17.0...v1.18.0 +[1.17.0]: https://github.com/urfave/cli/compare/v1.16.0...v1.17.0 +[1.16.0]: https://github.com/urfave/cli/compare/v1.15.0...v1.16.0 +[1.15.0]: https://github.com/urfave/cli/compare/v1.14.0...v1.15.0 +[1.14.0]: https://github.com/urfave/cli/compare/v1.13.0...v1.14.0 +[1.13.0]: https://github.com/urfave/cli/compare/v1.12.0...v1.13.0 +[1.12.0]: https://github.com/urfave/cli/compare/v1.11.1...v1.12.0 +[1.11.1]: https://github.com/urfave/cli/compare/v1.11.0...v1.11.1 +[1.11.0]: https://github.com/urfave/cli/compare/v1.10.2...v1.11.0 +[1.10.2]: https://github.com/urfave/cli/compare/v1.10.1...v1.10.2 +[1.10.1]: https://github.com/urfave/cli/compare/v1.10.0...v1.10.1 +[1.10.0]: https://github.com/urfave/cli/compare/v1.9.0...v1.10.0 +[1.9.0]: https://github.com/urfave/cli/compare/v1.8.0...v1.9.0 +[1.8.0]: https://github.com/urfave/cli/compare/v1.7.1...v1.8.0 +[1.7.1]: https://github.com/urfave/cli/compare/v1.7.0...v1.7.1 +[1.7.0]: https://github.com/urfave/cli/compare/v1.6.0...v1.7.0 +[1.6.0]: https://github.com/urfave/cli/compare/v1.5.0...v1.6.0 +[1.5.0]: https://github.com/urfave/cli/compare/v1.4.1...v1.5.0 +[1.4.1]: https://github.com/urfave/cli/compare/v1.4.0...v1.4.1 +[1.4.0]: https://github.com/urfave/cli/compare/v1.3.1...v1.4.0 +[1.3.1]: https://github.com/urfave/cli/compare/v1.3.0...v1.3.1 +[1.3.0]: https://github.com/urfave/cli/compare/v1.2.0...v1.3.0 +[1.2.0]: https://github.com/urfave/cli/compare/v1.1.0...v1.2.0 +[1.1.0]: https://github.com/urfave/cli/compare/v1.0.0...v1.1.0 +[1.0.0]: https://github.com/urfave/cli/compare/v0.1.0...v1.0.0 diff --git a/vendor/github.com/urfave/cli/LICENSE b/vendor/github.com/urfave/cli/LICENSE new file mode 100644 index 000000000..42a597e29 --- /dev/null +++ b/vendor/github.com/urfave/cli/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2016 Jeremy Saenz & Contributors + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/urfave/cli/README.md b/vendor/github.com/urfave/cli/README.md new file mode 100644 index 000000000..2bbbd8ea9 --- /dev/null +++ b/vendor/github.com/urfave/cli/README.md @@ -0,0 +1,1381 @@ +cli +=== + +[![Build Status](https://travis-ci.org/urfave/cli.svg?branch=master)](https://travis-ci.org/urfave/cli) +[![Windows Build Status](https://ci.appveyor.com/api/projects/status/rtgk5xufi932pb2v?svg=true)](https://ci.appveyor.com/project/urfave/cli) +[![GoDoc](https://godoc.org/github.com/urfave/cli?status.svg)](https://godoc.org/github.com/urfave/cli) +[![codebeat](https://codebeat.co/badges/0a8f30aa-f975-404b-b878-5fab3ae1cc5f)](https://codebeat.co/projects/github-com-urfave-cli) +[![Go Report Card](https://goreportcard.com/badge/urfave/cli)](https://goreportcard.com/report/urfave/cli) +[![top level coverage](https://gocover.io/_badge/github.com/urfave/cli?0 "top level coverage")](http://gocover.io/github.com/urfave/cli) / +[![altsrc coverage](https://gocover.io/_badge/github.com/urfave/cli/altsrc?0 "altsrc coverage")](http://gocover.io/github.com/urfave/cli/altsrc) + +**Notice:** This is the library formerly known as +`github.com/codegangsta/cli` -- Github will automatically redirect requests +to this repository, but we recommend updating your references for clarity. + +cli is a simple, fast, and fun package for building command line apps in Go. The +goal is to enable developers to write fast and distributable command line +applications in an expressive way. + + + +- [Overview](#overview) +- [Installation](#installation) + * [Supported platforms](#supported-platforms) + * [Using the `v2` branch](#using-the-v2-branch) + * [Pinning to the `v1` releases](#pinning-to-the-v1-releases) +- [Getting Started](#getting-started) +- [Examples](#examples) + * [Arguments](#arguments) + * [Flags](#flags) + + [Placeholder Values](#placeholder-values) + + [Alternate Names](#alternate-names) + + [Ordering](#ordering) + + [Values from the Environment](#values-from-the-environment) + + [Values from alternate input sources (YAML, TOML, and others)](#values-from-alternate-input-sources-yaml-toml-and-others) + * [Subcommands](#subcommands) + * [Subcommands categories](#subcommands-categories) + * [Exit code](#exit-code) + * [Bash Completion](#bash-completion) + + [Enabling](#enabling) + + [Distribution](#distribution) + + [Customization](#customization) + * [Generated Help Text](#generated-help-text) + + [Customization](#customization-1) + * [Version Flag](#version-flag) + + [Customization](#customization-2) + + [Full API Example](#full-api-example) +- [Contribution Guidelines](#contribution-guidelines) + + + +## Overview + +Command line apps are usually so tiny that there is absolutely no reason why +your code should *not* be self-documenting. Things like generating help text and +parsing command flags/options should not hinder productivity when writing a +command line app. + +**This is where cli comes into play.** cli makes command line programming fun, +organized, and expressive! + +## Installation + +Make sure you have a working Go environment. Go version 1.2+ is supported. [See +the install instructions for Go](http://golang.org/doc/install.html). + +To install cli, simply run: +``` +$ go get github.com/urfave/cli +``` + +Make sure your `PATH` includes the `$GOPATH/bin` directory so your commands can +be easily used: +``` +export PATH=$PATH:$GOPATH/bin +``` + +### Supported platforms + +cli is tested against multiple versions of Go on Linux, and against the latest +released version of Go on OS X and Windows. For full details, see +[`./.travis.yml`](./.travis.yml) and [`./appveyor.yml`](./appveyor.yml). + +### Using the `v2` branch + +**Warning**: The `v2` branch is currently unreleased and considered unstable. + +There is currently a long-lived branch named `v2` that is intended to land as +the new `master` branch once development there has settled down. The current +`master` branch (mirrored as `v1`) is being manually merged into `v2` on +an irregular human-based schedule, but generally if one wants to "upgrade" to +`v2` *now* and accept the volatility (read: "awesomeness") that comes along with +that, please use whatever version pinning of your preference, such as via +`gopkg.in`: + +``` +$ go get gopkg.in/urfave/cli.v2 +``` + +``` go +... +import ( + "gopkg.in/urfave/cli.v2" // imports as package "cli" +) +... +``` + +### Pinning to the `v1` releases + +Similarly to the section above describing use of the `v2` branch, if one wants +to avoid any unexpected compatibility pains once `v2` becomes `master`, then +pinning to `v1` is an acceptable option, e.g.: + +``` +$ go get gopkg.in/urfave/cli.v1 +``` + +``` go +... +import ( + "gopkg.in/urfave/cli.v1" // imports as package "cli" +) +... +``` + +This will pull the latest tagged `v1` release (e.g. `v1.18.1` at the time of writing). + +## Getting Started + +One of the philosophies behind cli is that an API should be playful and full of +discovery. So a cli app can be as little as one line of code in `main()`. + + +``` go +package main + +import ( + "os" + + "github.com/urfave/cli" +) + +func main() { + cli.NewApp().Run(os.Args) +} +``` + +This app will run and show help text, but is not very useful. Let's give an +action to execute and some help documentation: + + +``` go +package main + +import ( + "fmt" + "os" + + "github.com/urfave/cli" +) + +func main() { + app := cli.NewApp() + app.Name = "boom" + app.Usage = "make an explosive entrance" + app.Action = func(c *cli.Context) error { + fmt.Println("boom! I say!") + return nil + } + + app.Run(os.Args) +} +``` + +Running this already gives you a ton of functionality, plus support for things +like subcommands and flags, which are covered below. + +## Examples + +Being a programmer can be a lonely job. Thankfully by the power of automation +that is not the case! Let's create a greeter app to fend off our demons of +loneliness! + +Start by creating a directory named `greet`, and within it, add a file, +`greet.go` with the following code in it: + + +``` go +package main + +import ( + "fmt" + "os" + + "github.com/urfave/cli" +) + +func main() { + app := cli.NewApp() + app.Name = "greet" + app.Usage = "fight the loneliness!" + app.Action = func(c *cli.Context) error { + fmt.Println("Hello friend!") + return nil + } + + app.Run(os.Args) +} +``` + +Install our command to the `$GOPATH/bin` directory: + +``` +$ go install +``` + +Finally run our new command: + +``` +$ greet +Hello friend! +``` + +cli also generates neat help text: + +``` +$ greet help +NAME: + greet - fight the loneliness! + +USAGE: + greet [global options] command [command options] [arguments...] + +VERSION: + 0.0.0 + +COMMANDS: + help, h Shows a list of commands or help for one command + +GLOBAL OPTIONS + --version Shows version information +``` + +### Arguments + +You can lookup arguments by calling the `Args` function on `cli.Context`, e.g.: + + +``` go +package main + +import ( + "fmt" + "os" + + "github.com/urfave/cli" +) + +func main() { + app := cli.NewApp() + + app.Action = func(c *cli.Context) error { + fmt.Printf("Hello %q", c.Args().Get(0)) + return nil + } + + app.Run(os.Args) +} +``` + +### Flags + +Setting and querying flags is simple. + + +``` go +package main + +import ( + "fmt" + "os" + + "github.com/urfave/cli" +) + +func main() { + app := cli.NewApp() + + app.Flags = []cli.Flag { + cli.StringFlag{ + Name: "lang", + Value: "english", + Usage: "language for the greeting", + }, + } + + app.Action = func(c *cli.Context) error { + name := "Nefertiti" + if c.NArg() > 0 { + name = c.Args().Get(0) + } + if c.String("lang") == "spanish" { + fmt.Println("Hola", name) + } else { + fmt.Println("Hello", name) + } + return nil + } + + app.Run(os.Args) +} +``` + +You can also set a destination variable for a flag, to which the content will be +scanned. + + +``` go +package main + +import ( + "os" + "fmt" + + "github.com/urfave/cli" +) + +func main() { + var language string + + app := cli.NewApp() + + app.Flags = []cli.Flag { + cli.StringFlag{ + Name: "lang", + Value: "english", + Usage: "language for the greeting", + Destination: &language, + }, + } + + app.Action = func(c *cli.Context) error { + name := "someone" + if c.NArg() > 0 { + name = c.Args()[0] + } + if language == "spanish" { + fmt.Println("Hola", name) + } else { + fmt.Println("Hello", name) + } + return nil + } + + app.Run(os.Args) +} +``` + +See full list of flags at http://godoc.org/github.com/urfave/cli + +#### Placeholder Values + +Sometimes it's useful to specify a flag's value within the usage string itself. +Such placeholders are indicated with back quotes. + +For example this: + + +```go +package main + +import ( + "os" + + "github.com/urfave/cli" +) + +func main() { + app := cli.NewApp() + + app.Flags = []cli.Flag{ + cli.StringFlag{ + Name: "config, c", + Usage: "Load configuration from `FILE`", + }, + } + + app.Run(os.Args) +} +``` + +Will result in help output like: + +``` +--config FILE, -c FILE Load configuration from FILE +``` + +Note that only the first placeholder is used. Subsequent back-quoted words will +be left as-is. + +#### Alternate Names + +You can set alternate (or short) names for flags by providing a comma-delimited +list for the `Name`. e.g. + + +``` go +package main + +import ( + "os" + + "github.com/urfave/cli" +) + +func main() { + app := cli.NewApp() + + app.Flags = []cli.Flag { + cli.StringFlag{ + Name: "lang, l", + Value: "english", + Usage: "language for the greeting", + }, + } + + app.Run(os.Args) +} +``` + +That flag can then be set with `--lang spanish` or `-l spanish`. Note that +giving two different forms of the same flag in the same command invocation is an +error. + +#### Ordering + +Flags for the application and commands are shown in the order they are defined. +However, it's possible to sort them from outside this library by using `FlagsByName` +or `CommandsByName` with `sort`. + +For example this: + + +``` go +package main + +import ( + "os" + "sort" + + "github.com/urfave/cli" +) + +func main() { + app := cli.NewApp() + + app.Flags = []cli.Flag { + cli.StringFlag{ + Name: "lang, l", + Value: "english", + Usage: "Language for the greeting", + }, + cli.StringFlag{ + Name: "config, c", + Usage: "Load configuration from `FILE`", + }, + } + + app.Commands = []cli.Command{ + { + Name: "complete", + Aliases: []string{"c"}, + Usage: "complete a task on the list", + Action: func(c *cli.Context) error { + return nil + }, + }, + { + Name: "add", + Aliases: []string{"a"}, + Usage: "add a task to the list", + Action: func(c *cli.Context) error { + return nil + }, + }, + } + + sort.Sort(cli.FlagsByName(app.Flags)) + sort.Sort(cli.CommandsByName(app.Commands)) + + app.Run(os.Args) +} +``` + +Will result in help output like: + +``` +--config FILE, -c FILE Load configuration from FILE +--lang value, -l value Language for the greeting (default: "english") +``` + +#### Values from the Environment + +You can also have the default value set from the environment via `EnvVar`. e.g. + + +``` go +package main + +import ( + "os" + + "github.com/urfave/cli" +) + +func main() { + app := cli.NewApp() + + app.Flags = []cli.Flag { + cli.StringFlag{ + Name: "lang, l", + Value: "english", + Usage: "language for the greeting", + EnvVar: "APP_LANG", + }, + } + + app.Run(os.Args) +} +``` + +The `EnvVar` may also be given as a comma-delimited "cascade", where the first +environment variable that resolves is used as the default. + + +``` go +package main + +import ( + "os" + + "github.com/urfave/cli" +) + +func main() { + app := cli.NewApp() + + app.Flags = []cli.Flag { + cli.StringFlag{ + Name: "lang, l", + Value: "english", + Usage: "language for the greeting", + EnvVar: "LEGACY_COMPAT_LANG,APP_LANG,LANG", + }, + } + + app.Run(os.Args) +} +``` + +#### Values from alternate input sources (YAML, TOML, and others) + +There is a separate package altsrc that adds support for getting flag values +from other file input sources. + +Currently supported input source formats: +* YAML +* TOML + +In order to get values for a flag from an alternate input source the following +code would be added to wrap an existing cli.Flag like below: + +``` go + altsrc.NewIntFlag(cli.IntFlag{Name: "test"}) +``` + +Initialization must also occur for these flags. Below is an example initializing +getting data from a yaml file below. + +``` go + command.Before = altsrc.InitInputSourceWithContext(command.Flags, NewYamlSourceFromFlagFunc("load")) +``` + +The code above will use the "load" string as a flag name to get the file name of +a yaml file from the cli.Context. It will then use that file name to initialize +the yaml input source for any flags that are defined on that command. As a note +the "load" flag used would also have to be defined on the command flags in order +for this code snipped to work. + +Currently only the aboved specified formats are supported but developers can +add support for other input sources by implementing the +altsrc.InputSourceContext for their given sources. + +Here is a more complete sample of a command using YAML support: + + +``` go +package notmain + +import ( + "fmt" + "os" + + "github.com/urfave/cli" + "github.com/urfave/cli/altsrc" +) + +func main() { + app := cli.NewApp() + + flags := []cli.Flag{ + altsrc.NewIntFlag(cli.IntFlag{Name: "test"}), + cli.StringFlag{Name: "load"}, + } + + app.Action = func(c *cli.Context) error { + fmt.Println("yaml ist rad") + return nil + } + + app.Before = altsrc.InitInputSourceWithContext(flags, altsrc.NewYamlSourceFromFlagFunc("load")) + app.Flags = flags + + app.Run(os.Args) +} +``` + +### Subcommands + +Subcommands can be defined for a more git-like command line app. + + +```go +package main + +import ( + "fmt" + "os" + + "github.com/urfave/cli" +) + +func main() { + app := cli.NewApp() + + app.Commands = []cli.Command{ + { + Name: "add", + Aliases: []string{"a"}, + Usage: "add a task to the list", + Action: func(c *cli.Context) error { + fmt.Println("added task: ", c.Args().First()) + return nil + }, + }, + { + Name: "complete", + Aliases: []string{"c"}, + Usage: "complete a task on the list", + Action: func(c *cli.Context) error { + fmt.Println("completed task: ", c.Args().First()) + return nil + }, + }, + { + Name: "template", + Aliases: []string{"t"}, + Usage: "options for task templates", + Subcommands: []cli.Command{ + { + Name: "add", + Usage: "add a new template", + Action: func(c *cli.Context) error { + fmt.Println("new task template: ", c.Args().First()) + return nil + }, + }, + { + Name: "remove", + Usage: "remove an existing template", + Action: func(c *cli.Context) error { + fmt.Println("removed task template: ", c.Args().First()) + return nil + }, + }, + }, + }, + } + + app.Run(os.Args) +} +``` + +### Subcommands categories + +For additional organization in apps that have many subcommands, you can +associate a category for each command to group them together in the help +output. + +E.g. + +```go +package main + +import ( + "os" + + "github.com/urfave/cli" +) + +func main() { + app := cli.NewApp() + + app.Commands = []cli.Command{ + { + Name: "noop", + }, + { + Name: "add", + Category: "template", + }, + { + Name: "remove", + Category: "template", + }, + } + + app.Run(os.Args) +} +``` + +Will include: + +``` +COMMANDS: + noop + + Template actions: + add + remove +``` + +### Exit code + +Calling `App.Run` will not automatically call `os.Exit`, which means that by +default the exit code will "fall through" to being `0`. An explicit exit code +may be set by returning a non-nil error that fulfills `cli.ExitCoder`, *or* a +`cli.MultiError` that includes an error that fulfills `cli.ExitCoder`, e.g.: + +``` go +package main + +import ( + "os" + + "github.com/urfave/cli" +) + +func main() { + app := cli.NewApp() + app.Flags = []cli.Flag{ + cli.BoolTFlag{ + Name: "ginger-crouton", + Usage: "is it in the soup?", + }, + } + app.Action = func(ctx *cli.Context) error { + if !ctx.Bool("ginger-crouton") { + return cli.NewExitError("it is not in the soup", 86) + } + return nil + } + + app.Run(os.Args) +} +``` + +### Bash Completion + +You can enable completion commands by setting the `EnableBashCompletion` +flag on the `App` object. By default, this setting will only auto-complete to +show an app's subcommands, but you can write your own completion methods for +the App or its subcommands. + + +``` go +package main + +import ( + "fmt" + "os" + + "github.com/urfave/cli" +) + +func main() { + tasks := []string{"cook", "clean", "laundry", "eat", "sleep", "code"} + + app := cli.NewApp() + app.EnableBashCompletion = true + app.Commands = []cli.Command{ + { + Name: "complete", + Aliases: []string{"c"}, + Usage: "complete a task on the list", + Action: func(c *cli.Context) error { + fmt.Println("completed task: ", c.Args().First()) + return nil + }, + BashComplete: func(c *cli.Context) { + // This will complete if no args are passed + if c.NArg() > 0 { + return + } + for _, t := range tasks { + fmt.Println(t) + } + }, + }, + } + + app.Run(os.Args) +} +``` + +#### Enabling + +Source the `autocomplete/bash_autocomplete` file in your `.bashrc` file while +setting the `PROG` variable to the name of your program: + +`PROG=myprogram source /.../cli/autocomplete/bash_autocomplete` + +#### Distribution + +Copy `autocomplete/bash_autocomplete` into `/etc/bash_completion.d/` and rename +it to the name of the program you wish to add autocomplete support for (or +automatically install it there if you are distributing a package). Don't forget +to source the file to make it active in the current shell. + +``` +sudo cp src/bash_autocomplete /etc/bash_completion.d/ +source /etc/bash_completion.d/ +``` + +Alternatively, you can just document that users should source the generic +`autocomplete/bash_autocomplete` in their bash configuration with `$PROG` set +to the name of their program (as above). + +#### Customization + +The default bash completion flag (`--generate-bash-completion`) is defined as +`cli.BashCompletionFlag`, and may be redefined if desired, e.g.: + + +``` go +package main + +import ( + "os" + + "github.com/urfave/cli" +) + +func main() { + cli.BashCompletionFlag = cli.BoolFlag{ + Name: "compgen", + Hidden: true, + } + + app := cli.NewApp() + app.EnableBashCompletion = true + app.Commands = []cli.Command{ + { + Name: "wat", + }, + } + app.Run(os.Args) +} +``` + +### Generated Help Text + +The default help flag (`-h/--help`) is defined as `cli.HelpFlag` and is checked +by the cli internals in order to print generated help text for the app, command, +or subcommand, and break execution. + +#### Customization + +All of the help text generation may be customized, and at multiple levels. The +templates are exposed as variables `AppHelpTemplate`, `CommandHelpTemplate`, and +`SubcommandHelpTemplate` which may be reassigned or augmented, and full override +is possible by assigning a compatible func to the `cli.HelpPrinter` variable, +e.g.: + + +``` go +package main + +import ( + "fmt" + "io" + "os" + + "github.com/urfave/cli" +) + +func main() { + // EXAMPLE: Append to an existing template + cli.AppHelpTemplate = fmt.Sprintf(`%s + +WEBSITE: http://awesometown.example.com + +SUPPORT: support@awesometown.example.com + +`, cli.AppHelpTemplate) + + // EXAMPLE: Override a template + cli.AppHelpTemplate = `NAME: + {{.Name}} - {{.Usage}} +USAGE: + {{.HelpName}} {{if .VisibleFlags}}[global options]{{end}}{{if .Commands}} command [command options]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}[arguments...]{{end}} + {{if len .Authors}} +AUTHOR: + {{range .Authors}}{{ . }}{{end}} + {{end}}{{if .Commands}} +COMMANDS: +{{range .Commands}}{{if not .HideHelp}} {{join .Names ", "}}{{ "\t"}}{{.Usage}}{{ "\n" }}{{end}}{{end}}{{end}}{{if .VisibleFlags}} +GLOBAL OPTIONS: + {{range .VisibleFlags}}{{.}} + {{end}}{{end}}{{if .Copyright }} +COPYRIGHT: + {{.Copyright}} + {{end}}{{if .Version}} +VERSION: + {{.Version}} + {{end}} +` + + // EXAMPLE: Replace the `HelpPrinter` func + cli.HelpPrinter = func(w io.Writer, templ string, data interface{}) { + fmt.Println("Ha HA. I pwnd the help!!1") + } + + cli.NewApp().Run(os.Args) +} +``` + +The default flag may be customized to something other than `-h/--help` by +setting `cli.HelpFlag`, e.g.: + + +``` go +package main + +import ( + "os" + + "github.com/urfave/cli" +) + +func main() { + cli.HelpFlag = cli.BoolFlag{ + Name: "halp, haaaaalp", + Usage: "HALP", + EnvVar: "SHOW_HALP,HALPPLZ", + } + + cli.NewApp().Run(os.Args) +} +``` + +### Version Flag + +The default version flag (`-v/--version`) is defined as `cli.VersionFlag`, which +is checked by the cli internals in order to print the `App.Version` via +`cli.VersionPrinter` and break execution. + +#### Customization + +The default flag may be customized to something other than `-v/--version` by +setting `cli.VersionFlag`, e.g.: + + +``` go +package main + +import ( + "os" + + "github.com/urfave/cli" +) + +func main() { + cli.VersionFlag = cli.BoolFlag{ + Name: "print-version, V", + Usage: "print only the version", + } + + app := cli.NewApp() + app.Name = "partay" + app.Version = "19.99.0" + app.Run(os.Args) +} +``` + +Alternatively, the version printer at `cli.VersionPrinter` may be overridden, e.g.: + + +``` go +package main + +import ( + "fmt" + "os" + + "github.com/urfave/cli" +) + +var ( + Revision = "fafafaf" +) + +func main() { + cli.VersionPrinter = func(c *cli.Context) { + fmt.Printf("version=%s revision=%s\n", c.App.Version, Revision) + } + + app := cli.NewApp() + app.Name = "partay" + app.Version = "19.99.0" + app.Run(os.Args) +} +``` + +#### Full API Example + +**Notice**: This is a contrived (functioning) example meant strictly for API +demonstration purposes. Use of one's imagination is encouraged. + + +``` go +package main + +import ( + "errors" + "flag" + "fmt" + "io" + "io/ioutil" + "os" + "time" + + "github.com/urfave/cli" +) + +func init() { + cli.AppHelpTemplate += "\nCUSTOMIZED: you bet ur muffins\n" + cli.CommandHelpTemplate += "\nYMMV\n" + cli.SubcommandHelpTemplate += "\nor something\n" + + cli.HelpFlag = cli.BoolFlag{Name: "halp"} + cli.BashCompletionFlag = cli.BoolFlag{Name: "compgen", Hidden: true} + cli.VersionFlag = cli.BoolFlag{Name: "print-version, V"} + + cli.HelpPrinter = func(w io.Writer, templ string, data interface{}) { + fmt.Fprintf(w, "best of luck to you\n") + } + cli.VersionPrinter = func(c *cli.Context) { + fmt.Fprintf(c.App.Writer, "version=%s\n", c.App.Version) + } + cli.OsExiter = func(c int) { + fmt.Fprintf(cli.ErrWriter, "refusing to exit %d\n", c) + } + cli.ErrWriter = ioutil.Discard + cli.FlagStringer = func(fl cli.Flag) string { + return fmt.Sprintf("\t\t%s", fl.GetName()) + } +} + +type hexWriter struct{} + +func (w *hexWriter) Write(p []byte) (int, error) { + for _, b := range p { + fmt.Printf("%x", b) + } + fmt.Printf("\n") + + return len(p), nil +} + +type genericType struct{ + s string +} + +func (g *genericType) Set(value string) error { + g.s = value + return nil +} + +func (g *genericType) String() string { + return g.s +} + +func main() { + app := cli.NewApp() + app.Name = "kənˈtrīv" + app.Version = "19.99.0" + app.Compiled = time.Now() + app.Authors = []cli.Author{ + cli.Author{ + Name: "Example Human", + Email: "human@example.com", + }, + } + app.Copyright = "(c) 1999 Serious Enterprise" + app.HelpName = "contrive" + app.Usage = "demonstrate available API" + app.UsageText = "contrive - demonstrating the available API" + app.ArgsUsage = "[args and such]" + app.Commands = []cli.Command{ + cli.Command{ + Name: "doo", + Aliases: []string{"do"}, + Category: "motion", + Usage: "do the doo", + UsageText: "doo - does the dooing", + Description: "no really, there is a lot of dooing to be done", + ArgsUsage: "[arrgh]", + Flags: []cli.Flag{ + cli.BoolFlag{Name: "forever, forevvarr"}, + }, + Subcommands: cli.Commands{ + cli.Command{ + Name: "wop", + Action: wopAction, + }, + }, + SkipFlagParsing: false, + HideHelp: false, + Hidden: false, + HelpName: "doo!", + BashComplete: func(c *cli.Context) { + fmt.Fprintf(c.App.Writer, "--better\n") + }, + Before: func(c *cli.Context) error { + fmt.Fprintf(c.App.Writer, "brace for impact\n") + return nil + }, + After: func(c *cli.Context) error { + fmt.Fprintf(c.App.Writer, "did we lose anyone?\n") + return nil + }, + Action: func(c *cli.Context) error { + c.Command.FullName() + c.Command.HasName("wop") + c.Command.Names() + c.Command.VisibleFlags() + fmt.Fprintf(c.App.Writer, "dodododododoodododddooooododododooo\n") + if c.Bool("forever") { + c.Command.Run(c) + } + return nil + }, + OnUsageError: func(c *cli.Context, err error, isSubcommand bool) error { + fmt.Fprintf(c.App.Writer, "for shame\n") + return err + }, + }, + } + app.Flags = []cli.Flag{ + cli.BoolFlag{Name: "fancy"}, + cli.BoolTFlag{Name: "fancier"}, + cli.DurationFlag{Name: "howlong, H", Value: time.Second * 3}, + cli.Float64Flag{Name: "howmuch"}, + cli.GenericFlag{Name: "wat", Value: &genericType{}}, + cli.Int64Flag{Name: "longdistance"}, + cli.Int64SliceFlag{Name: "intervals"}, + cli.IntFlag{Name: "distance"}, + cli.IntSliceFlag{Name: "times"}, + cli.StringFlag{Name: "dance-move, d"}, + cli.StringSliceFlag{Name: "names, N"}, + cli.UintFlag{Name: "age"}, + cli.Uint64Flag{Name: "bigage"}, + } + app.EnableBashCompletion = true + app.HideHelp = false + app.HideVersion = false + app.BashComplete = func(c *cli.Context) { + fmt.Fprintf(c.App.Writer, "lipstick\nkiss\nme\nlipstick\nringo\n") + } + app.Before = func(c *cli.Context) error { + fmt.Fprintf(c.App.Writer, "HEEEERE GOES\n") + return nil + } + app.After = func(c *cli.Context) error { + fmt.Fprintf(c.App.Writer, "Phew!\n") + return nil + } + app.CommandNotFound = func(c *cli.Context, command string) { + fmt.Fprintf(c.App.Writer, "Thar be no %q here.\n", command) + } + app.OnUsageError = func(c *cli.Context, err error, isSubcommand bool) error { + if isSubcommand { + return err + } + + fmt.Fprintf(c.App.Writer, "WRONG: %#v\n", err) + return nil + } + app.Action = func(c *cli.Context) error { + cli.DefaultAppComplete(c) + cli.HandleExitCoder(errors.New("not an exit coder, though")) + cli.ShowAppHelp(c) + cli.ShowCommandCompletions(c, "nope") + cli.ShowCommandHelp(c, "also-nope") + cli.ShowCompletions(c) + cli.ShowSubcommandHelp(c) + cli.ShowVersion(c) + + categories := c.App.Categories() + categories.AddCommand("sounds", cli.Command{ + Name: "bloop", + }) + + for _, category := range c.App.Categories() { + fmt.Fprintf(c.App.Writer, "%s\n", category.Name) + fmt.Fprintf(c.App.Writer, "%#v\n", category.Commands) + fmt.Fprintf(c.App.Writer, "%#v\n", category.VisibleCommands()) + } + + fmt.Printf("%#v\n", c.App.Command("doo")) + if c.Bool("infinite") { + c.App.Run([]string{"app", "doo", "wop"}) + } + + if c.Bool("forevar") { + c.App.RunAsSubcommand(c) + } + c.App.Setup() + fmt.Printf("%#v\n", c.App.VisibleCategories()) + fmt.Printf("%#v\n", c.App.VisibleCommands()) + fmt.Printf("%#v\n", c.App.VisibleFlags()) + + fmt.Printf("%#v\n", c.Args().First()) + if len(c.Args()) > 0 { + fmt.Printf("%#v\n", c.Args()[1]) + } + fmt.Printf("%#v\n", c.Args().Present()) + fmt.Printf("%#v\n", c.Args().Tail()) + + set := flag.NewFlagSet("contrive", 0) + nc := cli.NewContext(c.App, set, c) + + fmt.Printf("%#v\n", nc.Args()) + fmt.Printf("%#v\n", nc.Bool("nope")) + fmt.Printf("%#v\n", nc.BoolT("nerp")) + fmt.Printf("%#v\n", nc.Duration("howlong")) + fmt.Printf("%#v\n", nc.Float64("hay")) + fmt.Printf("%#v\n", nc.Generic("bloop")) + fmt.Printf("%#v\n", nc.Int64("bonk")) + fmt.Printf("%#v\n", nc.Int64Slice("burnks")) + fmt.Printf("%#v\n", nc.Int("bips")) + fmt.Printf("%#v\n", nc.IntSlice("blups")) + fmt.Printf("%#v\n", nc.String("snurt")) + fmt.Printf("%#v\n", nc.StringSlice("snurkles")) + fmt.Printf("%#v\n", nc.Uint("flub")) + fmt.Printf("%#v\n", nc.Uint64("florb")) + fmt.Printf("%#v\n", nc.GlobalBool("global-nope")) + fmt.Printf("%#v\n", nc.GlobalBoolT("global-nerp")) + fmt.Printf("%#v\n", nc.GlobalDuration("global-howlong")) + fmt.Printf("%#v\n", nc.GlobalFloat64("global-hay")) + fmt.Printf("%#v\n", nc.GlobalGeneric("global-bloop")) + fmt.Printf("%#v\n", nc.GlobalInt("global-bips")) + fmt.Printf("%#v\n", nc.GlobalIntSlice("global-blups")) + fmt.Printf("%#v\n", nc.GlobalString("global-snurt")) + fmt.Printf("%#v\n", nc.GlobalStringSlice("global-snurkles")) + + fmt.Printf("%#v\n", nc.FlagNames()) + fmt.Printf("%#v\n", nc.GlobalFlagNames()) + fmt.Printf("%#v\n", nc.GlobalIsSet("wat")) + fmt.Printf("%#v\n", nc.GlobalSet("wat", "nope")) + fmt.Printf("%#v\n", nc.NArg()) + fmt.Printf("%#v\n", nc.NumFlags()) + fmt.Printf("%#v\n", nc.Parent()) + + nc.Set("wat", "also-nope") + + ec := cli.NewExitError("ohwell", 86) + fmt.Fprintf(c.App.Writer, "%d", ec.ExitCode()) + fmt.Printf("made it!\n") + return ec + } + + if os.Getenv("HEXY") != "" { + app.Writer = &hexWriter{} + app.ErrWriter = &hexWriter{} + } + + app.Metadata = map[string]interface{}{ + "layers": "many", + "explicable": false, + "whatever-values": 19.99, + } + + app.Run(os.Args) +} + +func wopAction(c *cli.Context) error { + fmt.Fprintf(c.App.Writer, ":wave: over here, eh\n") + return nil +} +``` + +## Contribution Guidelines + +Feel free to put up a pull request to fix a bug or maybe add a feature. I will +give it a code review and make sure that it does not break backwards +compatibility. If I or any other collaborators agree that it is in line with +the vision of the project, we will work with you to get the code into +a mergeable state and merge it into the master branch. + +If you have contributed something significant to the project, we will most +likely add you as a collaborator. As a collaborator you are given the ability +to merge others pull requests. It is very important that new code does not +break existing code, so be careful about what code you do choose to merge. + +If you feel like you have contributed to the project but have not yet been +added as a collaborator, we probably forgot to add you, please open an issue. diff --git a/vendor/github.com/urfave/cli/altsrc/altsrc.go b/vendor/github.com/urfave/cli/altsrc/altsrc.go new file mode 100644 index 000000000..ac34bf633 --- /dev/null +++ b/vendor/github.com/urfave/cli/altsrc/altsrc.go @@ -0,0 +1,3 @@ +package altsrc + +//go:generate python ../generate-flag-types altsrc -i ../flag-types.json -o flag_generated.go diff --git a/vendor/github.com/urfave/cli/altsrc/flag.go b/vendor/github.com/urfave/cli/altsrc/flag.go new file mode 100644 index 000000000..84ef009a5 --- /dev/null +++ b/vendor/github.com/urfave/cli/altsrc/flag.go @@ -0,0 +1,261 @@ +package altsrc + +import ( + "fmt" + "strconv" + "strings" + "syscall" + + "gopkg.in/urfave/cli.v1" +) + +// FlagInputSourceExtension is an extension interface of cli.Flag that +// allows a value to be set on the existing parsed flags. +type FlagInputSourceExtension interface { + cli.Flag + ApplyInputSourceValue(context *cli.Context, isc InputSourceContext) error +} + +// ApplyInputSourceValues iterates over all provided flags and +// executes ApplyInputSourceValue on flags implementing the +// FlagInputSourceExtension interface to initialize these flags +// to an alternate input source. +func ApplyInputSourceValues(context *cli.Context, inputSourceContext InputSourceContext, flags []cli.Flag) error { + for _, f := range flags { + inputSourceExtendedFlag, isType := f.(FlagInputSourceExtension) + if isType { + err := inputSourceExtendedFlag.ApplyInputSourceValue(context, inputSourceContext) + if err != nil { + return err + } + } + } + + return nil +} + +// InitInputSource is used to to setup an InputSourceContext on a cli.Command Before method. It will create a new +// input source based on the func provided. If there is no error it will then apply the new input source to any flags +// that are supported by the input source +func InitInputSource(flags []cli.Flag, createInputSource func() (InputSourceContext, error)) cli.BeforeFunc { + return func(context *cli.Context) error { + inputSource, err := createInputSource() + if err != nil { + return fmt.Errorf("Unable to create input source: inner error: \n'%v'", err.Error()) + } + + return ApplyInputSourceValues(context, inputSource, flags) + } +} + +// InitInputSourceWithContext is used to to setup an InputSourceContext on a cli.Command Before method. It will create a new +// input source based on the func provided with potentially using existing cli.Context values to initialize itself. If there is +// no error it will then apply the new input source to any flags that are supported by the input source +func InitInputSourceWithContext(flags []cli.Flag, createInputSource func(context *cli.Context) (InputSourceContext, error)) cli.BeforeFunc { + return func(context *cli.Context) error { + inputSource, err := createInputSource(context) + if err != nil { + return fmt.Errorf("Unable to create input source with context: inner error: \n'%v'", err.Error()) + } + + return ApplyInputSourceValues(context, inputSource, flags) + } +} + +// ApplyInputSourceValue applies a generic value to the flagSet if required +func (f *GenericFlag) ApplyInputSourceValue(context *cli.Context, isc InputSourceContext) error { + if f.set != nil { + if !context.IsSet(f.Name) && !isEnvVarSet(f.EnvVar) { + value, err := isc.Generic(f.GenericFlag.Name) + if err != nil { + return err + } + if value != nil { + eachName(f.Name, func(name string) { + f.set.Set(f.Name, value.String()) + }) + } + } + } + + return nil +} + +// ApplyInputSourceValue applies a StringSlice value to the flagSet if required +func (f *StringSliceFlag) ApplyInputSourceValue(context *cli.Context, isc InputSourceContext) error { + if f.set != nil { + if !context.IsSet(f.Name) && !isEnvVarSet(f.EnvVar) { + value, err := isc.StringSlice(f.StringSliceFlag.Name) + if err != nil { + return err + } + if value != nil { + var sliceValue cli.StringSlice = value + eachName(f.Name, func(name string) { + underlyingFlag := f.set.Lookup(f.Name) + if underlyingFlag != nil { + underlyingFlag.Value = &sliceValue + } + }) + } + } + } + return nil +} + +// ApplyInputSourceValue applies a IntSlice value if required +func (f *IntSliceFlag) ApplyInputSourceValue(context *cli.Context, isc InputSourceContext) error { + if f.set != nil { + if !context.IsSet(f.Name) && !isEnvVarSet(f.EnvVar) { + value, err := isc.IntSlice(f.IntSliceFlag.Name) + if err != nil { + return err + } + if value != nil { + var sliceValue cli.IntSlice = value + eachName(f.Name, func(name string) { + underlyingFlag := f.set.Lookup(f.Name) + if underlyingFlag != nil { + underlyingFlag.Value = &sliceValue + } + }) + } + } + } + return nil +} + +// ApplyInputSourceValue applies a Bool value to the flagSet if required +func (f *BoolFlag) ApplyInputSourceValue(context *cli.Context, isc InputSourceContext) error { + if f.set != nil { + if !context.IsSet(f.Name) && !isEnvVarSet(f.EnvVar) { + value, err := isc.Bool(f.BoolFlag.Name) + if err != nil { + return err + } + if value { + eachName(f.Name, func(name string) { + f.set.Set(f.Name, strconv.FormatBool(value)) + }) + } + } + } + return nil +} + +// ApplyInputSourceValue applies a BoolT value to the flagSet if required +func (f *BoolTFlag) ApplyInputSourceValue(context *cli.Context, isc InputSourceContext) error { + if f.set != nil { + if !context.IsSet(f.Name) && !isEnvVarSet(f.EnvVar) { + value, err := isc.BoolT(f.BoolTFlag.Name) + if err != nil { + return err + } + if !value { + eachName(f.Name, func(name string) { + f.set.Set(f.Name, strconv.FormatBool(value)) + }) + } + } + } + return nil +} + +// ApplyInputSourceValue applies a String value to the flagSet if required +func (f *StringFlag) ApplyInputSourceValue(context *cli.Context, isc InputSourceContext) error { + if f.set != nil { + if !(context.IsSet(f.Name) || isEnvVarSet(f.EnvVar)) { + value, err := isc.String(f.StringFlag.Name) + if err != nil { + return err + } + if value != "" { + eachName(f.Name, func(name string) { + f.set.Set(f.Name, value) + }) + } + } + } + return nil +} + +// ApplyInputSourceValue applies a int value to the flagSet if required +func (f *IntFlag) ApplyInputSourceValue(context *cli.Context, isc InputSourceContext) error { + if f.set != nil { + if !(context.IsSet(f.Name) || isEnvVarSet(f.EnvVar)) { + value, err := isc.Int(f.IntFlag.Name) + if err != nil { + return err + } + if value > 0 { + eachName(f.Name, func(name string) { + f.set.Set(f.Name, strconv.FormatInt(int64(value), 10)) + }) + } + } + } + return nil +} + +// ApplyInputSourceValue applies a Duration value to the flagSet if required +func (f *DurationFlag) ApplyInputSourceValue(context *cli.Context, isc InputSourceContext) error { + if f.set != nil { + if !(context.IsSet(f.Name) || isEnvVarSet(f.EnvVar)) { + value, err := isc.Duration(f.DurationFlag.Name) + if err != nil { + return err + } + if value > 0 { + eachName(f.Name, func(name string) { + f.set.Set(f.Name, value.String()) + }) + } + } + } + return nil +} + +// ApplyInputSourceValue applies a Float64 value to the flagSet if required +func (f *Float64Flag) ApplyInputSourceValue(context *cli.Context, isc InputSourceContext) error { + if f.set != nil { + if !(context.IsSet(f.Name) || isEnvVarSet(f.EnvVar)) { + value, err := isc.Float64(f.Float64Flag.Name) + if err != nil { + return err + } + if value > 0 { + floatStr := float64ToString(value) + eachName(f.Name, func(name string) { + f.set.Set(f.Name, floatStr) + }) + } + } + } + return nil +} + +func isEnvVarSet(envVars string) bool { + for _, envVar := range strings.Split(envVars, ",") { + envVar = strings.TrimSpace(envVar) + if _, ok := syscall.Getenv(envVar); ok { + // TODO: Can't use this for bools as + // set means that it was true or false based on + // Bool flag type, should work for other types + return true + } + } + + return false +} + +func float64ToString(f float64) string { + return fmt.Sprintf("%v", f) +} + +func eachName(longName string, fn func(string)) { + parts := strings.Split(longName, ",") + for _, name := range parts { + name = strings.Trim(name, " ") + fn(name) + } +} diff --git a/vendor/github.com/urfave/cli/altsrc/flag_generated.go b/vendor/github.com/urfave/cli/altsrc/flag_generated.go new file mode 100644 index 000000000..0aeb0b041 --- /dev/null +++ b/vendor/github.com/urfave/cli/altsrc/flag_generated.go @@ -0,0 +1,347 @@ +package altsrc + +import ( + "flag" + + "gopkg.in/urfave/cli.v1" +) + +// WARNING: This file is generated! + +// BoolFlag is the flag type that wraps cli.BoolFlag to allow +// for other values to be specified +type BoolFlag struct { + cli.BoolFlag + set *flag.FlagSet +} + +// NewBoolFlag creates a new BoolFlag +func NewBoolFlag(fl cli.BoolFlag) *BoolFlag { + return &BoolFlag{BoolFlag: fl, set: nil} +} + +// Apply saves the flagSet for later usage calls, then calls the +// wrapped BoolFlag.Apply +func (f *BoolFlag) Apply(set *flag.FlagSet) { + f.set = set + f.BoolFlag.Apply(set) +} + +// ApplyWithError saves the flagSet for later usage calls, then calls the +// wrapped BoolFlag.ApplyWithError +func (f *BoolFlag) ApplyWithError(set *flag.FlagSet) error { + f.set = set + return f.BoolFlag.ApplyWithError(set) +} + +// BoolTFlag is the flag type that wraps cli.BoolTFlag to allow +// for other values to be specified +type BoolTFlag struct { + cli.BoolTFlag + set *flag.FlagSet +} + +// NewBoolTFlag creates a new BoolTFlag +func NewBoolTFlag(fl cli.BoolTFlag) *BoolTFlag { + return &BoolTFlag{BoolTFlag: fl, set: nil} +} + +// Apply saves the flagSet for later usage calls, then calls the +// wrapped BoolTFlag.Apply +func (f *BoolTFlag) Apply(set *flag.FlagSet) { + f.set = set + f.BoolTFlag.Apply(set) +} + +// ApplyWithError saves the flagSet for later usage calls, then calls the +// wrapped BoolTFlag.ApplyWithError +func (f *BoolTFlag) ApplyWithError(set *flag.FlagSet) error { + f.set = set + return f.BoolTFlag.ApplyWithError(set) +} + +// DurationFlag is the flag type that wraps cli.DurationFlag to allow +// for other values to be specified +type DurationFlag struct { + cli.DurationFlag + set *flag.FlagSet +} + +// NewDurationFlag creates a new DurationFlag +func NewDurationFlag(fl cli.DurationFlag) *DurationFlag { + return &DurationFlag{DurationFlag: fl, set: nil} +} + +// Apply saves the flagSet for later usage calls, then calls the +// wrapped DurationFlag.Apply +func (f *DurationFlag) Apply(set *flag.FlagSet) { + f.set = set + f.DurationFlag.Apply(set) +} + +// ApplyWithError saves the flagSet for later usage calls, then calls the +// wrapped DurationFlag.ApplyWithError +func (f *DurationFlag) ApplyWithError(set *flag.FlagSet) error { + f.set = set + return f.DurationFlag.ApplyWithError(set) +} + +// Float64Flag is the flag type that wraps cli.Float64Flag to allow +// for other values to be specified +type Float64Flag struct { + cli.Float64Flag + set *flag.FlagSet +} + +// NewFloat64Flag creates a new Float64Flag +func NewFloat64Flag(fl cli.Float64Flag) *Float64Flag { + return &Float64Flag{Float64Flag: fl, set: nil} +} + +// Apply saves the flagSet for later usage calls, then calls the +// wrapped Float64Flag.Apply +func (f *Float64Flag) Apply(set *flag.FlagSet) { + f.set = set + f.Float64Flag.Apply(set) +} + +// ApplyWithError saves the flagSet for later usage calls, then calls the +// wrapped Float64Flag.ApplyWithError +func (f *Float64Flag) ApplyWithError(set *flag.FlagSet) error { + f.set = set + return f.Float64Flag.ApplyWithError(set) +} + +// GenericFlag is the flag type that wraps cli.GenericFlag to allow +// for other values to be specified +type GenericFlag struct { + cli.GenericFlag + set *flag.FlagSet +} + +// NewGenericFlag creates a new GenericFlag +func NewGenericFlag(fl cli.GenericFlag) *GenericFlag { + return &GenericFlag{GenericFlag: fl, set: nil} +} + +// Apply saves the flagSet for later usage calls, then calls the +// wrapped GenericFlag.Apply +func (f *GenericFlag) Apply(set *flag.FlagSet) { + f.set = set + f.GenericFlag.Apply(set) +} + +// ApplyWithError saves the flagSet for later usage calls, then calls the +// wrapped GenericFlag.ApplyWithError +func (f *GenericFlag) ApplyWithError(set *flag.FlagSet) error { + f.set = set + return f.GenericFlag.ApplyWithError(set) +} + +// Int64Flag is the flag type that wraps cli.Int64Flag to allow +// for other values to be specified +type Int64Flag struct { + cli.Int64Flag + set *flag.FlagSet +} + +// NewInt64Flag creates a new Int64Flag +func NewInt64Flag(fl cli.Int64Flag) *Int64Flag { + return &Int64Flag{Int64Flag: fl, set: nil} +} + +// Apply saves the flagSet for later usage calls, then calls the +// wrapped Int64Flag.Apply +func (f *Int64Flag) Apply(set *flag.FlagSet) { + f.set = set + f.Int64Flag.Apply(set) +} + +// ApplyWithError saves the flagSet for later usage calls, then calls the +// wrapped Int64Flag.ApplyWithError +func (f *Int64Flag) ApplyWithError(set *flag.FlagSet) error { + f.set = set + return f.Int64Flag.ApplyWithError(set) +} + +// IntFlag is the flag type that wraps cli.IntFlag to allow +// for other values to be specified +type IntFlag struct { + cli.IntFlag + set *flag.FlagSet +} + +// NewIntFlag creates a new IntFlag +func NewIntFlag(fl cli.IntFlag) *IntFlag { + return &IntFlag{IntFlag: fl, set: nil} +} + +// Apply saves the flagSet for later usage calls, then calls the +// wrapped IntFlag.Apply +func (f *IntFlag) Apply(set *flag.FlagSet) { + f.set = set + f.IntFlag.Apply(set) +} + +// ApplyWithError saves the flagSet for later usage calls, then calls the +// wrapped IntFlag.ApplyWithError +func (f *IntFlag) ApplyWithError(set *flag.FlagSet) error { + f.set = set + return f.IntFlag.ApplyWithError(set) +} + +// IntSliceFlag is the flag type that wraps cli.IntSliceFlag to allow +// for other values to be specified +type IntSliceFlag struct { + cli.IntSliceFlag + set *flag.FlagSet +} + +// NewIntSliceFlag creates a new IntSliceFlag +func NewIntSliceFlag(fl cli.IntSliceFlag) *IntSliceFlag { + return &IntSliceFlag{IntSliceFlag: fl, set: nil} +} + +// Apply saves the flagSet for later usage calls, then calls the +// wrapped IntSliceFlag.Apply +func (f *IntSliceFlag) Apply(set *flag.FlagSet) { + f.set = set + f.IntSliceFlag.Apply(set) +} + +// ApplyWithError saves the flagSet for later usage calls, then calls the +// wrapped IntSliceFlag.ApplyWithError +func (f *IntSliceFlag) ApplyWithError(set *flag.FlagSet) error { + f.set = set + return f.IntSliceFlag.ApplyWithError(set) +} + +// Int64SliceFlag is the flag type that wraps cli.Int64SliceFlag to allow +// for other values to be specified +type Int64SliceFlag struct { + cli.Int64SliceFlag + set *flag.FlagSet +} + +// NewInt64SliceFlag creates a new Int64SliceFlag +func NewInt64SliceFlag(fl cli.Int64SliceFlag) *Int64SliceFlag { + return &Int64SliceFlag{Int64SliceFlag: fl, set: nil} +} + +// Apply saves the flagSet for later usage calls, then calls the +// wrapped Int64SliceFlag.Apply +func (f *Int64SliceFlag) Apply(set *flag.FlagSet) { + f.set = set + f.Int64SliceFlag.Apply(set) +} + +// ApplyWithError saves the flagSet for later usage calls, then calls the +// wrapped Int64SliceFlag.ApplyWithError +func (f *Int64SliceFlag) ApplyWithError(set *flag.FlagSet) error { + f.set = set + return f.Int64SliceFlag.ApplyWithError(set) +} + +// StringFlag is the flag type that wraps cli.StringFlag to allow +// for other values to be specified +type StringFlag struct { + cli.StringFlag + set *flag.FlagSet +} + +// NewStringFlag creates a new StringFlag +func NewStringFlag(fl cli.StringFlag) *StringFlag { + return &StringFlag{StringFlag: fl, set: nil} +} + +// Apply saves the flagSet for later usage calls, then calls the +// wrapped StringFlag.Apply +func (f *StringFlag) Apply(set *flag.FlagSet) { + f.set = set + f.StringFlag.Apply(set) +} + +// ApplyWithError saves the flagSet for later usage calls, then calls the +// wrapped StringFlag.ApplyWithError +func (f *StringFlag) ApplyWithError(set *flag.FlagSet) error { + f.set = set + return f.StringFlag.ApplyWithError(set) +} + +// StringSliceFlag is the flag type that wraps cli.StringSliceFlag to allow +// for other values to be specified +type StringSliceFlag struct { + cli.StringSliceFlag + set *flag.FlagSet +} + +// NewStringSliceFlag creates a new StringSliceFlag +func NewStringSliceFlag(fl cli.StringSliceFlag) *StringSliceFlag { + return &StringSliceFlag{StringSliceFlag: fl, set: nil} +} + +// Apply saves the flagSet for later usage calls, then calls the +// wrapped StringSliceFlag.Apply +func (f *StringSliceFlag) Apply(set *flag.FlagSet) { + f.set = set + f.StringSliceFlag.Apply(set) +} + +// ApplyWithError saves the flagSet for later usage calls, then calls the +// wrapped StringSliceFlag.ApplyWithError +func (f *StringSliceFlag) ApplyWithError(set *flag.FlagSet) error { + f.set = set + return f.StringSliceFlag.ApplyWithError(set) +} + +// Uint64Flag is the flag type that wraps cli.Uint64Flag to allow +// for other values to be specified +type Uint64Flag struct { + cli.Uint64Flag + set *flag.FlagSet +} + +// NewUint64Flag creates a new Uint64Flag +func NewUint64Flag(fl cli.Uint64Flag) *Uint64Flag { + return &Uint64Flag{Uint64Flag: fl, set: nil} +} + +// Apply saves the flagSet for later usage calls, then calls the +// wrapped Uint64Flag.Apply +func (f *Uint64Flag) Apply(set *flag.FlagSet) { + f.set = set + f.Uint64Flag.Apply(set) +} + +// ApplyWithError saves the flagSet for later usage calls, then calls the +// wrapped Uint64Flag.ApplyWithError +func (f *Uint64Flag) ApplyWithError(set *flag.FlagSet) error { + f.set = set + return f.Uint64Flag.ApplyWithError(set) +} + +// UintFlag is the flag type that wraps cli.UintFlag to allow +// for other values to be specified +type UintFlag struct { + cli.UintFlag + set *flag.FlagSet +} + +// NewUintFlag creates a new UintFlag +func NewUintFlag(fl cli.UintFlag) *UintFlag { + return &UintFlag{UintFlag: fl, set: nil} +} + +// Apply saves the flagSet for later usage calls, then calls the +// wrapped UintFlag.Apply +func (f *UintFlag) Apply(set *flag.FlagSet) { + f.set = set + f.UintFlag.Apply(set) +} + +// ApplyWithError saves the flagSet for later usage calls, then calls the +// wrapped UintFlag.ApplyWithError +func (f *UintFlag) ApplyWithError(set *flag.FlagSet) error { + f.set = set + return f.UintFlag.ApplyWithError(set) +} diff --git a/vendor/github.com/urfave/cli/altsrc/flag_test.go b/vendor/github.com/urfave/cli/altsrc/flag_test.go new file mode 100644 index 000000000..cd182942d --- /dev/null +++ b/vendor/github.com/urfave/cli/altsrc/flag_test.go @@ -0,0 +1,336 @@ +package altsrc + +import ( + "flag" + "fmt" + "os" + "strings" + "testing" + "time" + + "gopkg.in/urfave/cli.v1" +) + +type testApplyInputSource struct { + Flag FlagInputSourceExtension + FlagName string + FlagSetName string + Expected string + ContextValueString string + ContextValue flag.Value + EnvVarValue string + EnvVarName string + MapValue interface{} +} + +func TestGenericApplyInputSourceValue(t *testing.T) { + v := &Parser{"abc", "def"} + c := runTest(t, testApplyInputSource{ + Flag: NewGenericFlag(cli.GenericFlag{Name: "test", Value: &Parser{}}), + FlagName: "test", + MapValue: v, + }) + expect(t, v, c.Generic("test")) +} + +func TestGenericApplyInputSourceMethodContextSet(t *testing.T) { + p := &Parser{"abc", "def"} + c := runTest(t, testApplyInputSource{ + Flag: NewGenericFlag(cli.GenericFlag{Name: "test", Value: &Parser{}}), + FlagName: "test", + MapValue: &Parser{"efg", "hig"}, + ContextValueString: p.String(), + }) + expect(t, p, c.Generic("test")) +} + +func TestGenericApplyInputSourceMethodEnvVarSet(t *testing.T) { + c := runTest(t, testApplyInputSource{ + Flag: NewGenericFlag(cli.GenericFlag{Name: "test", Value: &Parser{}, EnvVar: "TEST"}), + FlagName: "test", + MapValue: &Parser{"efg", "hij"}, + EnvVarName: "TEST", + EnvVarValue: "abc,def", + }) + expect(t, &Parser{"abc", "def"}, c.Generic("test")) +} + +func TestStringSliceApplyInputSourceValue(t *testing.T) { + c := runTest(t, testApplyInputSource{ + Flag: NewStringSliceFlag(cli.StringSliceFlag{Name: "test"}), + FlagName: "test", + MapValue: []interface{}{"hello", "world"}, + }) + expect(t, c.StringSlice("test"), []string{"hello", "world"}) +} + +func TestStringSliceApplyInputSourceMethodContextSet(t *testing.T) { + c := runTest(t, testApplyInputSource{ + Flag: NewStringSliceFlag(cli.StringSliceFlag{Name: "test"}), + FlagName: "test", + MapValue: []interface{}{"hello", "world"}, + ContextValueString: "ohno", + }) + expect(t, c.StringSlice("test"), []string{"ohno"}) +} + +func TestStringSliceApplyInputSourceMethodEnvVarSet(t *testing.T) { + c := runTest(t, testApplyInputSource{ + Flag: NewStringSliceFlag(cli.StringSliceFlag{Name: "test", EnvVar: "TEST"}), + FlagName: "test", + MapValue: []interface{}{"hello", "world"}, + EnvVarName: "TEST", + EnvVarValue: "oh,no", + }) + expect(t, c.StringSlice("test"), []string{"oh", "no"}) +} + +func TestIntSliceApplyInputSourceValue(t *testing.T) { + c := runTest(t, testApplyInputSource{ + Flag: NewIntSliceFlag(cli.IntSliceFlag{Name: "test"}), + FlagName: "test", + MapValue: []interface{}{1, 2}, + }) + expect(t, c.IntSlice("test"), []int{1, 2}) +} + +func TestIntSliceApplyInputSourceMethodContextSet(t *testing.T) { + c := runTest(t, testApplyInputSource{ + Flag: NewIntSliceFlag(cli.IntSliceFlag{Name: "test"}), + FlagName: "test", + MapValue: []interface{}{1, 2}, + ContextValueString: "3", + }) + expect(t, c.IntSlice("test"), []int{3}) +} + +func TestIntSliceApplyInputSourceMethodEnvVarSet(t *testing.T) { + c := runTest(t, testApplyInputSource{ + Flag: NewIntSliceFlag(cli.IntSliceFlag{Name: "test", EnvVar: "TEST"}), + FlagName: "test", + MapValue: []interface{}{1, 2}, + EnvVarName: "TEST", + EnvVarValue: "3,4", + }) + expect(t, c.IntSlice("test"), []int{3, 4}) +} + +func TestBoolApplyInputSourceMethodSet(t *testing.T) { + c := runTest(t, testApplyInputSource{ + Flag: NewBoolFlag(cli.BoolFlag{Name: "test"}), + FlagName: "test", + MapValue: true, + }) + expect(t, true, c.Bool("test")) +} + +func TestBoolApplyInputSourceMethodContextSet(t *testing.T) { + c := runTest(t, testApplyInputSource{ + Flag: NewBoolFlag(cli.BoolFlag{Name: "test"}), + FlagName: "test", + MapValue: false, + ContextValueString: "true", + }) + expect(t, true, c.Bool("test")) +} + +func TestBoolApplyInputSourceMethodEnvVarSet(t *testing.T) { + c := runTest(t, testApplyInputSource{ + Flag: NewBoolFlag(cli.BoolFlag{Name: "test", EnvVar: "TEST"}), + FlagName: "test", + MapValue: false, + EnvVarName: "TEST", + EnvVarValue: "true", + }) + expect(t, true, c.Bool("test")) +} + +func TestBoolTApplyInputSourceMethodSet(t *testing.T) { + c := runTest(t, testApplyInputSource{ + Flag: NewBoolTFlag(cli.BoolTFlag{Name: "test"}), + FlagName: "test", + MapValue: false, + }) + expect(t, false, c.BoolT("test")) +} + +func TestBoolTApplyInputSourceMethodContextSet(t *testing.T) { + c := runTest(t, testApplyInputSource{ + Flag: NewBoolTFlag(cli.BoolTFlag{Name: "test"}), + FlagName: "test", + MapValue: true, + ContextValueString: "false", + }) + expect(t, false, c.BoolT("test")) +} + +func TestBoolTApplyInputSourceMethodEnvVarSet(t *testing.T) { + c := runTest(t, testApplyInputSource{ + Flag: NewBoolTFlag(cli.BoolTFlag{Name: "test", EnvVar: "TEST"}), + FlagName: "test", + MapValue: true, + EnvVarName: "TEST", + EnvVarValue: "false", + }) + expect(t, false, c.BoolT("test")) +} + +func TestStringApplyInputSourceMethodSet(t *testing.T) { + c := runTest(t, testApplyInputSource{ + Flag: NewStringFlag(cli.StringFlag{Name: "test"}), + FlagName: "test", + MapValue: "hello", + }) + expect(t, "hello", c.String("test")) +} + +func TestStringApplyInputSourceMethodContextSet(t *testing.T) { + c := runTest(t, testApplyInputSource{ + Flag: NewStringFlag(cli.StringFlag{Name: "test"}), + FlagName: "test", + MapValue: "hello", + ContextValueString: "goodbye", + }) + expect(t, "goodbye", c.String("test")) +} + +func TestStringApplyInputSourceMethodEnvVarSet(t *testing.T) { + c := runTest(t, testApplyInputSource{ + Flag: NewStringFlag(cli.StringFlag{Name: "test", EnvVar: "TEST"}), + FlagName: "test", + MapValue: "hello", + EnvVarName: "TEST", + EnvVarValue: "goodbye", + }) + expect(t, "goodbye", c.String("test")) +} + +func TestIntApplyInputSourceMethodSet(t *testing.T) { + c := runTest(t, testApplyInputSource{ + Flag: NewIntFlag(cli.IntFlag{Name: "test"}), + FlagName: "test", + MapValue: 15, + }) + expect(t, 15, c.Int("test")) +} + +func TestIntApplyInputSourceMethodContextSet(t *testing.T) { + c := runTest(t, testApplyInputSource{ + Flag: NewIntFlag(cli.IntFlag{Name: "test"}), + FlagName: "test", + MapValue: 15, + ContextValueString: "7", + }) + expect(t, 7, c.Int("test")) +} + +func TestIntApplyInputSourceMethodEnvVarSet(t *testing.T) { + c := runTest(t, testApplyInputSource{ + Flag: NewIntFlag(cli.IntFlag{Name: "test", EnvVar: "TEST"}), + FlagName: "test", + MapValue: 15, + EnvVarName: "TEST", + EnvVarValue: "12", + }) + expect(t, 12, c.Int("test")) +} + +func TestDurationApplyInputSourceMethodSet(t *testing.T) { + c := runTest(t, testApplyInputSource{ + Flag: NewDurationFlag(cli.DurationFlag{Name: "test"}), + FlagName: "test", + MapValue: time.Duration(30 * time.Second), + }) + expect(t, time.Duration(30*time.Second), c.Duration("test")) +} + +func TestDurationApplyInputSourceMethodContextSet(t *testing.T) { + c := runTest(t, testApplyInputSource{ + Flag: NewDurationFlag(cli.DurationFlag{Name: "test"}), + FlagName: "test", + MapValue: time.Duration(30 * time.Second), + ContextValueString: time.Duration(15 * time.Second).String(), + }) + expect(t, time.Duration(15*time.Second), c.Duration("test")) +} + +func TestDurationApplyInputSourceMethodEnvVarSet(t *testing.T) { + c := runTest(t, testApplyInputSource{ + Flag: NewDurationFlag(cli.DurationFlag{Name: "test", EnvVar: "TEST"}), + FlagName: "test", + MapValue: time.Duration(30 * time.Second), + EnvVarName: "TEST", + EnvVarValue: time.Duration(15 * time.Second).String(), + }) + expect(t, time.Duration(15*time.Second), c.Duration("test")) +} + +func TestFloat64ApplyInputSourceMethodSet(t *testing.T) { + c := runTest(t, testApplyInputSource{ + Flag: NewFloat64Flag(cli.Float64Flag{Name: "test"}), + FlagName: "test", + MapValue: 1.3, + }) + expect(t, 1.3, c.Float64("test")) +} + +func TestFloat64ApplyInputSourceMethodContextSet(t *testing.T) { + c := runTest(t, testApplyInputSource{ + Flag: NewFloat64Flag(cli.Float64Flag{Name: "test"}), + FlagName: "test", + MapValue: 1.3, + ContextValueString: fmt.Sprintf("%v", 1.4), + }) + expect(t, 1.4, c.Float64("test")) +} + +func TestFloat64ApplyInputSourceMethodEnvVarSet(t *testing.T) { + c := runTest(t, testApplyInputSource{ + Flag: NewFloat64Flag(cli.Float64Flag{Name: "test", EnvVar: "TEST"}), + FlagName: "test", + MapValue: 1.3, + EnvVarName: "TEST", + EnvVarValue: fmt.Sprintf("%v", 1.4), + }) + expect(t, 1.4, c.Float64("test")) +} + +func runTest(t *testing.T, test testApplyInputSource) *cli.Context { + inputSource := &MapInputSource{valueMap: map[interface{}]interface{}{test.FlagName: test.MapValue}} + set := flag.NewFlagSet(test.FlagSetName, flag.ContinueOnError) + c := cli.NewContext(nil, set, nil) + if test.EnvVarName != "" && test.EnvVarValue != "" { + os.Setenv(test.EnvVarName, test.EnvVarValue) + defer os.Setenv(test.EnvVarName, "") + } + + test.Flag.Apply(set) + if test.ContextValue != nil { + flag := set.Lookup(test.FlagName) + flag.Value = test.ContextValue + } + if test.ContextValueString != "" { + set.Set(test.FlagName, test.ContextValueString) + } + test.Flag.ApplyInputSourceValue(c, inputSource) + + return c +} + +type Parser [2]string + +func (p *Parser) Set(value string) error { + parts := strings.Split(value, ",") + if len(parts) != 2 { + return fmt.Errorf("invalid format") + } + + (*p)[0] = parts[0] + (*p)[1] = parts[1] + + return nil +} + +func (p *Parser) String() string { + return fmt.Sprintf("%s,%s", p[0], p[1]) +} diff --git a/vendor/github.com/urfave/cli/altsrc/helpers_test.go b/vendor/github.com/urfave/cli/altsrc/helpers_test.go new file mode 100644 index 000000000..3b7f7e94e --- /dev/null +++ b/vendor/github.com/urfave/cli/altsrc/helpers_test.go @@ -0,0 +1,18 @@ +package altsrc + +import ( + "reflect" + "testing" +) + +func expect(t *testing.T, a interface{}, b interface{}) { + if !reflect.DeepEqual(b, a) { + t.Errorf("Expected %#v (type %v) - Got %#v (type %v)", b, reflect.TypeOf(b), a, reflect.TypeOf(a)) + } +} + +func refute(t *testing.T, a interface{}, b interface{}) { + if a == b { + t.Errorf("Did not expect %v (type %v) - Got %v (type %v)", b, reflect.TypeOf(b), a, reflect.TypeOf(a)) + } +} diff --git a/vendor/github.com/urfave/cli/altsrc/input_source_context.go b/vendor/github.com/urfave/cli/altsrc/input_source_context.go new file mode 100644 index 000000000..276dcda08 --- /dev/null +++ b/vendor/github.com/urfave/cli/altsrc/input_source_context.go @@ -0,0 +1,21 @@ +package altsrc + +import ( + "time" + + "gopkg.in/urfave/cli.v1" +) + +// InputSourceContext is an interface used to allow +// other input sources to be implemented as needed. +type InputSourceContext interface { + Int(name string) (int, error) + Duration(name string) (time.Duration, error) + Float64(name string) (float64, error) + String(name string) (string, error) + StringSlice(name string) ([]string, error) + IntSlice(name string) ([]int, error) + Generic(name string) (cli.Generic, error) + Bool(name string) (bool, error) + BoolT(name string) (bool, error) +} diff --git a/vendor/github.com/urfave/cli/altsrc/map_input_source.go b/vendor/github.com/urfave/cli/altsrc/map_input_source.go new file mode 100644 index 000000000..b3169e0ec --- /dev/null +++ b/vendor/github.com/urfave/cli/altsrc/map_input_source.go @@ -0,0 +1,262 @@ +package altsrc + +import ( + "fmt" + "reflect" + "strings" + "time" + + "gopkg.in/urfave/cli.v1" +) + +// MapInputSource implements InputSourceContext to return +// data from the map that is loaded. +type MapInputSource struct { + valueMap map[interface{}]interface{} +} + +// nestedVal checks if the name has '.' delimiters. +// If so, it tries to traverse the tree by the '.' delimited sections to find +// a nested value for the key. +func nestedVal(name string, tree map[interface{}]interface{}) (interface{}, bool) { + if sections := strings.Split(name, "."); len(sections) > 1 { + node := tree + for _, section := range sections[:len(sections)-1] { + if child, ok := node[section]; !ok { + return nil, false + } else { + if ctype, ok := child.(map[interface{}]interface{}); !ok { + return nil, false + } else { + node = ctype + } + } + } + if val, ok := node[sections[len(sections)-1]]; ok { + return val, true + } + } + return nil, false +} + +// Int returns an int from the map if it exists otherwise returns 0 +func (fsm *MapInputSource) Int(name string) (int, error) { + otherGenericValue, exists := fsm.valueMap[name] + if exists { + otherValue, isType := otherGenericValue.(int) + if !isType { + return 0, incorrectTypeForFlagError(name, "int", otherGenericValue) + } + return otherValue, nil + } + nestedGenericValue, exists := nestedVal(name, fsm.valueMap) + if exists { + otherValue, isType := nestedGenericValue.(int) + if !isType { + return 0, incorrectTypeForFlagError(name, "int", nestedGenericValue) + } + return otherValue, nil + } + + return 0, nil +} + +// Duration returns a duration from the map if it exists otherwise returns 0 +func (fsm *MapInputSource) Duration(name string) (time.Duration, error) { + otherGenericValue, exists := fsm.valueMap[name] + if exists { + otherValue, isType := otherGenericValue.(time.Duration) + if !isType { + return 0, incorrectTypeForFlagError(name, "duration", otherGenericValue) + } + return otherValue, nil + } + nestedGenericValue, exists := nestedVal(name, fsm.valueMap) + if exists { + otherValue, isType := nestedGenericValue.(time.Duration) + if !isType { + return 0, incorrectTypeForFlagError(name, "duration", nestedGenericValue) + } + return otherValue, nil + } + + return 0, nil +} + +// Float64 returns an float64 from the map if it exists otherwise returns 0 +func (fsm *MapInputSource) Float64(name string) (float64, error) { + otherGenericValue, exists := fsm.valueMap[name] + if exists { + otherValue, isType := otherGenericValue.(float64) + if !isType { + return 0, incorrectTypeForFlagError(name, "float64", otherGenericValue) + } + return otherValue, nil + } + nestedGenericValue, exists := nestedVal(name, fsm.valueMap) + if exists { + otherValue, isType := nestedGenericValue.(float64) + if !isType { + return 0, incorrectTypeForFlagError(name, "float64", nestedGenericValue) + } + return otherValue, nil + } + + return 0, nil +} + +// String returns a string from the map if it exists otherwise returns an empty string +func (fsm *MapInputSource) String(name string) (string, error) { + otherGenericValue, exists := fsm.valueMap[name] + if exists { + otherValue, isType := otherGenericValue.(string) + if !isType { + return "", incorrectTypeForFlagError(name, "string", otherGenericValue) + } + return otherValue, nil + } + nestedGenericValue, exists := nestedVal(name, fsm.valueMap) + if exists { + otherValue, isType := nestedGenericValue.(string) + if !isType { + return "", incorrectTypeForFlagError(name, "string", nestedGenericValue) + } + return otherValue, nil + } + + return "", nil +} + +// StringSlice returns an []string from the map if it exists otherwise returns nil +func (fsm *MapInputSource) StringSlice(name string) ([]string, error) { + otherGenericValue, exists := fsm.valueMap[name] + if !exists { + otherGenericValue, exists = nestedVal(name, fsm.valueMap) + if !exists { + return nil, nil + } + } + + otherValue, isType := otherGenericValue.([]interface{}) + if !isType { + return nil, incorrectTypeForFlagError(name, "[]interface{}", otherGenericValue) + } + + var stringSlice = make([]string, 0, len(otherValue)) + for i, v := range otherValue { + stringValue, isType := v.(string) + + if !isType { + return nil, incorrectTypeForFlagError(fmt.Sprintf("%s[%d]", name, i), "string", v) + } + + stringSlice = append(stringSlice, stringValue) + } + + return stringSlice, nil +} + +// IntSlice returns an []int from the map if it exists otherwise returns nil +func (fsm *MapInputSource) IntSlice(name string) ([]int, error) { + otherGenericValue, exists := fsm.valueMap[name] + if !exists { + otherGenericValue, exists = nestedVal(name, fsm.valueMap) + if !exists { + return nil, nil + } + } + + otherValue, isType := otherGenericValue.([]interface{}) + if !isType { + return nil, incorrectTypeForFlagError(name, "[]interface{}", otherGenericValue) + } + + var intSlice = make([]int, 0, len(otherValue)) + for i, v := range otherValue { + intValue, isType := v.(int) + + if !isType { + return nil, incorrectTypeForFlagError(fmt.Sprintf("%s[%d]", name, i), "int", v) + } + + intSlice = append(intSlice, intValue) + } + + return intSlice, nil +} + +// Generic returns an cli.Generic from the map if it exists otherwise returns nil +func (fsm *MapInputSource) Generic(name string) (cli.Generic, error) { + otherGenericValue, exists := fsm.valueMap[name] + if exists { + otherValue, isType := otherGenericValue.(cli.Generic) + if !isType { + return nil, incorrectTypeForFlagError(name, "cli.Generic", otherGenericValue) + } + return otherValue, nil + } + nestedGenericValue, exists := nestedVal(name, fsm.valueMap) + if exists { + otherValue, isType := nestedGenericValue.(cli.Generic) + if !isType { + return nil, incorrectTypeForFlagError(name, "cli.Generic", nestedGenericValue) + } + return otherValue, nil + } + + return nil, nil +} + +// Bool returns an bool from the map otherwise returns false +func (fsm *MapInputSource) Bool(name string) (bool, error) { + otherGenericValue, exists := fsm.valueMap[name] + if exists { + otherValue, isType := otherGenericValue.(bool) + if !isType { + return false, incorrectTypeForFlagError(name, "bool", otherGenericValue) + } + return otherValue, nil + } + nestedGenericValue, exists := nestedVal(name, fsm.valueMap) + if exists { + otherValue, isType := nestedGenericValue.(bool) + if !isType { + return false, incorrectTypeForFlagError(name, "bool", nestedGenericValue) + } + return otherValue, nil + } + + return false, nil +} + +// BoolT returns an bool from the map otherwise returns true +func (fsm *MapInputSource) BoolT(name string) (bool, error) { + otherGenericValue, exists := fsm.valueMap[name] + if exists { + otherValue, isType := otherGenericValue.(bool) + if !isType { + return true, incorrectTypeForFlagError(name, "bool", otherGenericValue) + } + return otherValue, nil + } + nestedGenericValue, exists := nestedVal(name, fsm.valueMap) + if exists { + otherValue, isType := nestedGenericValue.(bool) + if !isType { + return true, incorrectTypeForFlagError(name, "bool", nestedGenericValue) + } + return otherValue, nil + } + + return true, nil +} + +func incorrectTypeForFlagError(name, expectedTypeName string, value interface{}) error { + valueType := reflect.TypeOf(value) + valueTypeName := "" + if valueType != nil { + valueTypeName = valueType.Name() + } + + return fmt.Errorf("Mismatched type for flag '%s'. Expected '%s' but actual is '%s'", name, expectedTypeName, valueTypeName) +} diff --git a/vendor/github.com/urfave/cli/altsrc/toml_command_test.go b/vendor/github.com/urfave/cli/altsrc/toml_command_test.go new file mode 100644 index 000000000..a5053d4f7 --- /dev/null +++ b/vendor/github.com/urfave/cli/altsrc/toml_command_test.go @@ -0,0 +1,310 @@ +// Disabling building of toml support in cases where golang is 1.0 or 1.1 +// as the encoding library is not implemented or supported. + +// +build go1.2 + +package altsrc + +import ( + "flag" + "io/ioutil" + "os" + "testing" + + "gopkg.in/urfave/cli.v1" +) + +func TestCommandTomFileTest(t *testing.T) { + app := cli.NewApp() + set := flag.NewFlagSet("test", 0) + ioutil.WriteFile("current.toml", []byte("test = 15"), 0666) + defer os.Remove("current.toml") + test := []string{"test-cmd", "--load", "current.toml"} + set.Parse(test) + + c := cli.NewContext(app, set, nil) + + command := &cli.Command{ + Name: "test-cmd", + Aliases: []string{"tc"}, + Usage: "this is for testing", + Description: "testing", + Action: func(c *cli.Context) error { + val := c.Int("test") + expect(t, val, 15) + return nil + }, + Flags: []cli.Flag{ + NewIntFlag(cli.IntFlag{Name: "test"}), + cli.StringFlag{Name: "load"}}, + } + command.Before = InitInputSourceWithContext(command.Flags, NewTomlSourceFromFlagFunc("load")) + err := command.Run(c) + + expect(t, err, nil) +} + +func TestCommandTomlFileTestGlobalEnvVarWins(t *testing.T) { + app := cli.NewApp() + set := flag.NewFlagSet("test", 0) + ioutil.WriteFile("current.toml", []byte("test = 15"), 0666) + defer os.Remove("current.toml") + + os.Setenv("THE_TEST", "10") + defer os.Setenv("THE_TEST", "") + test := []string{"test-cmd", "--load", "current.toml"} + set.Parse(test) + + c := cli.NewContext(app, set, nil) + + command := &cli.Command{ + Name: "test-cmd", + Aliases: []string{"tc"}, + Usage: "this is for testing", + Description: "testing", + Action: func(c *cli.Context) error { + val := c.Int("test") + expect(t, val, 10) + return nil + }, + Flags: []cli.Flag{ + NewIntFlag(cli.IntFlag{Name: "test", EnvVar: "THE_TEST"}), + cli.StringFlag{Name: "load"}}, + } + command.Before = InitInputSourceWithContext(command.Flags, NewTomlSourceFromFlagFunc("load")) + + err := command.Run(c) + + expect(t, err, nil) +} + +func TestCommandTomlFileTestGlobalEnvVarWinsNested(t *testing.T) { + app := cli.NewApp() + set := flag.NewFlagSet("test", 0) + ioutil.WriteFile("current.toml", []byte("[top]\ntest = 15"), 0666) + defer os.Remove("current.toml") + + os.Setenv("THE_TEST", "10") + defer os.Setenv("THE_TEST", "") + test := []string{"test-cmd", "--load", "current.toml"} + set.Parse(test) + + c := cli.NewContext(app, set, nil) + + command := &cli.Command{ + Name: "test-cmd", + Aliases: []string{"tc"}, + Usage: "this is for testing", + Description: "testing", + Action: func(c *cli.Context) error { + val := c.Int("top.test") + expect(t, val, 10) + return nil + }, + Flags: []cli.Flag{ + NewIntFlag(cli.IntFlag{Name: "top.test", EnvVar: "THE_TEST"}), + cli.StringFlag{Name: "load"}}, + } + command.Before = InitInputSourceWithContext(command.Flags, NewTomlSourceFromFlagFunc("load")) + + err := command.Run(c) + + expect(t, err, nil) +} + +func TestCommandTomlFileTestSpecifiedFlagWins(t *testing.T) { + app := cli.NewApp() + set := flag.NewFlagSet("test", 0) + ioutil.WriteFile("current.toml", []byte("test = 15"), 0666) + defer os.Remove("current.toml") + + test := []string{"test-cmd", "--load", "current.toml", "--test", "7"} + set.Parse(test) + + c := cli.NewContext(app, set, nil) + + command := &cli.Command{ + Name: "test-cmd", + Aliases: []string{"tc"}, + Usage: "this is for testing", + Description: "testing", + Action: func(c *cli.Context) error { + val := c.Int("test") + expect(t, val, 7) + return nil + }, + Flags: []cli.Flag{ + NewIntFlag(cli.IntFlag{Name: "test"}), + cli.StringFlag{Name: "load"}}, + } + command.Before = InitInputSourceWithContext(command.Flags, NewTomlSourceFromFlagFunc("load")) + + err := command.Run(c) + + expect(t, err, nil) +} + +func TestCommandTomlFileTestSpecifiedFlagWinsNested(t *testing.T) { + app := cli.NewApp() + set := flag.NewFlagSet("test", 0) + ioutil.WriteFile("current.toml", []byte(`[top] + test = 15`), 0666) + defer os.Remove("current.toml") + + test := []string{"test-cmd", "--load", "current.toml", "--top.test", "7"} + set.Parse(test) + + c := cli.NewContext(app, set, nil) + + command := &cli.Command{ + Name: "test-cmd", + Aliases: []string{"tc"}, + Usage: "this is for testing", + Description: "testing", + Action: func(c *cli.Context) error { + val := c.Int("top.test") + expect(t, val, 7) + return nil + }, + Flags: []cli.Flag{ + NewIntFlag(cli.IntFlag{Name: "top.test"}), + cli.StringFlag{Name: "load"}}, + } + command.Before = InitInputSourceWithContext(command.Flags, NewTomlSourceFromFlagFunc("load")) + + err := command.Run(c) + + expect(t, err, nil) +} + +func TestCommandTomlFileTestDefaultValueFileWins(t *testing.T) { + app := cli.NewApp() + set := flag.NewFlagSet("test", 0) + ioutil.WriteFile("current.toml", []byte("test = 15"), 0666) + defer os.Remove("current.toml") + + test := []string{"test-cmd", "--load", "current.toml"} + set.Parse(test) + + c := cli.NewContext(app, set, nil) + + command := &cli.Command{ + Name: "test-cmd", + Aliases: []string{"tc"}, + Usage: "this is for testing", + Description: "testing", + Action: func(c *cli.Context) error { + val := c.Int("test") + expect(t, val, 15) + return nil + }, + Flags: []cli.Flag{ + NewIntFlag(cli.IntFlag{Name: "test", Value: 7}), + cli.StringFlag{Name: "load"}}, + } + command.Before = InitInputSourceWithContext(command.Flags, NewTomlSourceFromFlagFunc("load")) + + err := command.Run(c) + + expect(t, err, nil) +} + +func TestCommandTomlFileTestDefaultValueFileWinsNested(t *testing.T) { + app := cli.NewApp() + set := flag.NewFlagSet("test", 0) + ioutil.WriteFile("current.toml", []byte("[top]\ntest = 15"), 0666) + defer os.Remove("current.toml") + + test := []string{"test-cmd", "--load", "current.toml"} + set.Parse(test) + + c := cli.NewContext(app, set, nil) + + command := &cli.Command{ + Name: "test-cmd", + Aliases: []string{"tc"}, + Usage: "this is for testing", + Description: "testing", + Action: func(c *cli.Context) error { + val := c.Int("top.test") + expect(t, val, 15) + return nil + }, + Flags: []cli.Flag{ + NewIntFlag(cli.IntFlag{Name: "top.test", Value: 7}), + cli.StringFlag{Name: "load"}}, + } + command.Before = InitInputSourceWithContext(command.Flags, NewTomlSourceFromFlagFunc("load")) + + err := command.Run(c) + + expect(t, err, nil) +} + +func TestCommandTomlFileFlagHasDefaultGlobalEnvTomlSetGlobalEnvWins(t *testing.T) { + app := cli.NewApp() + set := flag.NewFlagSet("test", 0) + ioutil.WriteFile("current.toml", []byte("test = 15"), 0666) + defer os.Remove("current.toml") + + os.Setenv("THE_TEST", "11") + defer os.Setenv("THE_TEST", "") + + test := []string{"test-cmd", "--load", "current.toml"} + set.Parse(test) + + c := cli.NewContext(app, set, nil) + + command := &cli.Command{ + Name: "test-cmd", + Aliases: []string{"tc"}, + Usage: "this is for testing", + Description: "testing", + Action: func(c *cli.Context) error { + val := c.Int("test") + expect(t, val, 11) + return nil + }, + Flags: []cli.Flag{ + NewIntFlag(cli.IntFlag{Name: "test", Value: 7, EnvVar: "THE_TEST"}), + cli.StringFlag{Name: "load"}}, + } + command.Before = InitInputSourceWithContext(command.Flags, NewTomlSourceFromFlagFunc("load")) + err := command.Run(c) + + expect(t, err, nil) +} + +func TestCommandTomlFileFlagHasDefaultGlobalEnvTomlSetGlobalEnvWinsNested(t *testing.T) { + app := cli.NewApp() + set := flag.NewFlagSet("test", 0) + ioutil.WriteFile("current.toml", []byte("[top]\ntest = 15"), 0666) + defer os.Remove("current.toml") + + os.Setenv("THE_TEST", "11") + defer os.Setenv("THE_TEST", "") + + test := []string{"test-cmd", "--load", "current.toml"} + set.Parse(test) + + c := cli.NewContext(app, set, nil) + + command := &cli.Command{ + Name: "test-cmd", + Aliases: []string{"tc"}, + Usage: "this is for testing", + Description: "testing", + Action: func(c *cli.Context) error { + val := c.Int("top.test") + expect(t, val, 11) + return nil + }, + Flags: []cli.Flag{ + NewIntFlag(cli.IntFlag{Name: "top.test", Value: 7, EnvVar: "THE_TEST"}), + cli.StringFlag{Name: "load"}}, + } + command.Before = InitInputSourceWithContext(command.Flags, NewTomlSourceFromFlagFunc("load")) + err := command.Run(c) + + expect(t, err, nil) +} diff --git a/vendor/github.com/urfave/cli/altsrc/toml_file_loader.go b/vendor/github.com/urfave/cli/altsrc/toml_file_loader.go new file mode 100644 index 000000000..37870fcbe --- /dev/null +++ b/vendor/github.com/urfave/cli/altsrc/toml_file_loader.go @@ -0,0 +1,113 @@ +// Disabling building of toml support in cases where golang is 1.0 or 1.1 +// as the encoding library is not implemented or supported. + +// +build go1.2 + +package altsrc + +import ( + "fmt" + "reflect" + + "github.com/BurntSushi/toml" + "gopkg.in/urfave/cli.v1" +) + +type tomlMap struct { + Map map[interface{}]interface{} +} + +func unmarshalMap(i interface{}) (ret map[interface{}]interface{}, err error) { + ret = make(map[interface{}]interface{}) + m := i.(map[string]interface{}) + for key, val := range m { + v := reflect.ValueOf(val) + switch v.Kind() { + case reflect.Bool: + ret[key] = val.(bool) + case reflect.String: + ret[key] = val.(string) + case reflect.Int: + ret[key] = int(val.(int)) + case reflect.Int8: + ret[key] = int(val.(int8)) + case reflect.Int16: + ret[key] = int(val.(int16)) + case reflect.Int32: + ret[key] = int(val.(int32)) + case reflect.Int64: + ret[key] = int(val.(int64)) + case reflect.Uint: + ret[key] = int(val.(uint)) + case reflect.Uint8: + ret[key] = int(val.(uint8)) + case reflect.Uint16: + ret[key] = int(val.(uint16)) + case reflect.Uint32: + ret[key] = int(val.(uint32)) + case reflect.Uint64: + ret[key] = int(val.(uint64)) + case reflect.Float32: + ret[key] = float64(val.(float32)) + case reflect.Float64: + ret[key] = float64(val.(float64)) + case reflect.Map: + if tmp, err := unmarshalMap(val); err == nil { + ret[key] = tmp + } else { + return nil, err + } + case reflect.Array, reflect.Slice: + ret[key] = val.([]interface{}) + default: + return nil, fmt.Errorf("Unsupported: type = %#v", v.Kind()) + } + } + return ret, nil +} + +func (self *tomlMap) UnmarshalTOML(i interface{}) error { + if tmp, err := unmarshalMap(i); err == nil { + self.Map = tmp + } else { + return err + } + return nil +} + +type tomlSourceContext struct { + FilePath string +} + +// NewTomlSourceFromFile creates a new TOML InputSourceContext from a filepath. +func NewTomlSourceFromFile(file string) (InputSourceContext, error) { + tsc := &tomlSourceContext{FilePath: file} + var results tomlMap = tomlMap{} + if err := readCommandToml(tsc.FilePath, &results); err != nil { + return nil, fmt.Errorf("Unable to load TOML file '%s': inner error: \n'%v'", tsc.FilePath, err.Error()) + } + return &MapInputSource{valueMap: results.Map}, nil +} + +// NewTomlSourceFromFlagFunc creates a new TOML InputSourceContext from a provided flag name and source context. +func NewTomlSourceFromFlagFunc(flagFileName string) func(context *cli.Context) (InputSourceContext, error) { + return func(context *cli.Context) (InputSourceContext, error) { + filePath := context.String(flagFileName) + return NewTomlSourceFromFile(filePath) + } +} + +func readCommandToml(filePath string, container interface{}) (err error) { + b, err := loadDataFrom(filePath) + if err != nil { + return err + } + + err = toml.Unmarshal(b, container) + if err != nil { + return err + } + + err = nil + return +} diff --git a/vendor/github.com/urfave/cli/altsrc/yaml_command_test.go b/vendor/github.com/urfave/cli/altsrc/yaml_command_test.go new file mode 100644 index 000000000..9d3f43110 --- /dev/null +++ b/vendor/github.com/urfave/cli/altsrc/yaml_command_test.go @@ -0,0 +1,313 @@ +// Disabling building of yaml support in cases where golang is 1.0 or 1.1 +// as the encoding library is not implemented or supported. + +// +build go1.2 + +package altsrc + +import ( + "flag" + "io/ioutil" + "os" + "testing" + + "gopkg.in/urfave/cli.v1" +) + +func TestCommandYamlFileTest(t *testing.T) { + app := cli.NewApp() + set := flag.NewFlagSet("test", 0) + ioutil.WriteFile("current.yaml", []byte("test: 15"), 0666) + defer os.Remove("current.yaml") + test := []string{"test-cmd", "--load", "current.yaml"} + set.Parse(test) + + c := cli.NewContext(app, set, nil) + + command := &cli.Command{ + Name: "test-cmd", + Aliases: []string{"tc"}, + Usage: "this is for testing", + Description: "testing", + Action: func(c *cli.Context) error { + val := c.Int("test") + expect(t, val, 15) + return nil + }, + Flags: []cli.Flag{ + NewIntFlag(cli.IntFlag{Name: "test"}), + cli.StringFlag{Name: "load"}}, + } + command.Before = InitInputSourceWithContext(command.Flags, NewYamlSourceFromFlagFunc("load")) + err := command.Run(c) + + expect(t, err, nil) +} + +func TestCommandYamlFileTestGlobalEnvVarWins(t *testing.T) { + app := cli.NewApp() + set := flag.NewFlagSet("test", 0) + ioutil.WriteFile("current.yaml", []byte("test: 15"), 0666) + defer os.Remove("current.yaml") + + os.Setenv("THE_TEST", "10") + defer os.Setenv("THE_TEST", "") + test := []string{"test-cmd", "--load", "current.yaml"} + set.Parse(test) + + c := cli.NewContext(app, set, nil) + + command := &cli.Command{ + Name: "test-cmd", + Aliases: []string{"tc"}, + Usage: "this is for testing", + Description: "testing", + Action: func(c *cli.Context) error { + val := c.Int("test") + expect(t, val, 10) + return nil + }, + Flags: []cli.Flag{ + NewIntFlag(cli.IntFlag{Name: "test", EnvVar: "THE_TEST"}), + cli.StringFlag{Name: "load"}}, + } + command.Before = InitInputSourceWithContext(command.Flags, NewYamlSourceFromFlagFunc("load")) + + err := command.Run(c) + + expect(t, err, nil) +} + +func TestCommandYamlFileTestGlobalEnvVarWinsNested(t *testing.T) { + app := cli.NewApp() + set := flag.NewFlagSet("test", 0) + ioutil.WriteFile("current.yaml", []byte(`top: + test: 15`), 0666) + defer os.Remove("current.yaml") + + os.Setenv("THE_TEST", "10") + defer os.Setenv("THE_TEST", "") + test := []string{"test-cmd", "--load", "current.yaml"} + set.Parse(test) + + c := cli.NewContext(app, set, nil) + + command := &cli.Command{ + Name: "test-cmd", + Aliases: []string{"tc"}, + Usage: "this is for testing", + Description: "testing", + Action: func(c *cli.Context) error { + val := c.Int("top.test") + expect(t, val, 10) + return nil + }, + Flags: []cli.Flag{ + NewIntFlag(cli.IntFlag{Name: "top.test", EnvVar: "THE_TEST"}), + cli.StringFlag{Name: "load"}}, + } + command.Before = InitInputSourceWithContext(command.Flags, NewYamlSourceFromFlagFunc("load")) + + err := command.Run(c) + + expect(t, err, nil) +} + +func TestCommandYamlFileTestSpecifiedFlagWins(t *testing.T) { + app := cli.NewApp() + set := flag.NewFlagSet("test", 0) + ioutil.WriteFile("current.yaml", []byte("test: 15"), 0666) + defer os.Remove("current.yaml") + + test := []string{"test-cmd", "--load", "current.yaml", "--test", "7"} + set.Parse(test) + + c := cli.NewContext(app, set, nil) + + command := &cli.Command{ + Name: "test-cmd", + Aliases: []string{"tc"}, + Usage: "this is for testing", + Description: "testing", + Action: func(c *cli.Context) error { + val := c.Int("test") + expect(t, val, 7) + return nil + }, + Flags: []cli.Flag{ + NewIntFlag(cli.IntFlag{Name: "test"}), + cli.StringFlag{Name: "load"}}, + } + command.Before = InitInputSourceWithContext(command.Flags, NewYamlSourceFromFlagFunc("load")) + + err := command.Run(c) + + expect(t, err, nil) +} + +func TestCommandYamlFileTestSpecifiedFlagWinsNested(t *testing.T) { + app := cli.NewApp() + set := flag.NewFlagSet("test", 0) + ioutil.WriteFile("current.yaml", []byte(`top: + test: 15`), 0666) + defer os.Remove("current.yaml") + + test := []string{"test-cmd", "--load", "current.yaml", "--top.test", "7"} + set.Parse(test) + + c := cli.NewContext(app, set, nil) + + command := &cli.Command{ + Name: "test-cmd", + Aliases: []string{"tc"}, + Usage: "this is for testing", + Description: "testing", + Action: func(c *cli.Context) error { + val := c.Int("top.test") + expect(t, val, 7) + return nil + }, + Flags: []cli.Flag{ + NewIntFlag(cli.IntFlag{Name: "top.test"}), + cli.StringFlag{Name: "load"}}, + } + command.Before = InitInputSourceWithContext(command.Flags, NewYamlSourceFromFlagFunc("load")) + + err := command.Run(c) + + expect(t, err, nil) +} + +func TestCommandYamlFileTestDefaultValueFileWins(t *testing.T) { + app := cli.NewApp() + set := flag.NewFlagSet("test", 0) + ioutil.WriteFile("current.yaml", []byte("test: 15"), 0666) + defer os.Remove("current.yaml") + + test := []string{"test-cmd", "--load", "current.yaml"} + set.Parse(test) + + c := cli.NewContext(app, set, nil) + + command := &cli.Command{ + Name: "test-cmd", + Aliases: []string{"tc"}, + Usage: "this is for testing", + Description: "testing", + Action: func(c *cli.Context) error { + val := c.Int("test") + expect(t, val, 15) + return nil + }, + Flags: []cli.Flag{ + NewIntFlag(cli.IntFlag{Name: "test", Value: 7}), + cli.StringFlag{Name: "load"}}, + } + command.Before = InitInputSourceWithContext(command.Flags, NewYamlSourceFromFlagFunc("load")) + + err := command.Run(c) + + expect(t, err, nil) +} + +func TestCommandYamlFileTestDefaultValueFileWinsNested(t *testing.T) { + app := cli.NewApp() + set := flag.NewFlagSet("test", 0) + ioutil.WriteFile("current.yaml", []byte(`top: + test: 15`), 0666) + defer os.Remove("current.yaml") + + test := []string{"test-cmd", "--load", "current.yaml"} + set.Parse(test) + + c := cli.NewContext(app, set, nil) + + command := &cli.Command{ + Name: "test-cmd", + Aliases: []string{"tc"}, + Usage: "this is for testing", + Description: "testing", + Action: func(c *cli.Context) error { + val := c.Int("top.test") + expect(t, val, 15) + return nil + }, + Flags: []cli.Flag{ + NewIntFlag(cli.IntFlag{Name: "top.test", Value: 7}), + cli.StringFlag{Name: "load"}}, + } + command.Before = InitInputSourceWithContext(command.Flags, NewYamlSourceFromFlagFunc("load")) + + err := command.Run(c) + + expect(t, err, nil) +} + +func TestCommandYamlFileFlagHasDefaultGlobalEnvYamlSetGlobalEnvWins(t *testing.T) { + app := cli.NewApp() + set := flag.NewFlagSet("test", 0) + ioutil.WriteFile("current.yaml", []byte("test: 15"), 0666) + defer os.Remove("current.yaml") + + os.Setenv("THE_TEST", "11") + defer os.Setenv("THE_TEST", "") + + test := []string{"test-cmd", "--load", "current.yaml"} + set.Parse(test) + + c := cli.NewContext(app, set, nil) + + command := &cli.Command{ + Name: "test-cmd", + Aliases: []string{"tc"}, + Usage: "this is for testing", + Description: "testing", + Action: func(c *cli.Context) error { + val := c.Int("test") + expect(t, val, 11) + return nil + }, + Flags: []cli.Flag{ + NewIntFlag(cli.IntFlag{Name: "test", Value: 7, EnvVar: "THE_TEST"}), + cli.StringFlag{Name: "load"}}, + } + command.Before = InitInputSourceWithContext(command.Flags, NewYamlSourceFromFlagFunc("load")) + err := command.Run(c) + + expect(t, err, nil) +} + +func TestCommandYamlFileFlagHasDefaultGlobalEnvYamlSetGlobalEnvWinsNested(t *testing.T) { + app := cli.NewApp() + set := flag.NewFlagSet("test", 0) + ioutil.WriteFile("current.yaml", []byte(`top: + test: 15`), 0666) + defer os.Remove("current.yaml") + + os.Setenv("THE_TEST", "11") + defer os.Setenv("THE_TEST", "") + + test := []string{"test-cmd", "--load", "current.yaml"} + set.Parse(test) + + c := cli.NewContext(app, set, nil) + + command := &cli.Command{ + Name: "test-cmd", + Aliases: []string{"tc"}, + Usage: "this is for testing", + Description: "testing", + Action: func(c *cli.Context) error { + val := c.Int("top.test") + expect(t, val, 11) + return nil + }, + Flags: []cli.Flag{ + NewIntFlag(cli.IntFlag{Name: "top.test", Value: 7, EnvVar: "THE_TEST"}), + cli.StringFlag{Name: "load"}}, + } + command.Before = InitInputSourceWithContext(command.Flags, NewYamlSourceFromFlagFunc("load")) + err := command.Run(c) + + expect(t, err, nil) +} diff --git a/vendor/github.com/urfave/cli/altsrc/yaml_file_loader.go b/vendor/github.com/urfave/cli/altsrc/yaml_file_loader.go new file mode 100644 index 000000000..dd808d523 --- /dev/null +++ b/vendor/github.com/urfave/cli/altsrc/yaml_file_loader.go @@ -0,0 +1,92 @@ +// Disabling building of yaml support in cases where golang is 1.0 or 1.1 +// as the encoding library is not implemented or supported. + +// +build go1.2 + +package altsrc + +import ( + "fmt" + "io/ioutil" + "net/http" + "net/url" + "os" + "runtime" + "strings" + + "gopkg.in/urfave/cli.v1" + + "gopkg.in/yaml.v2" +) + +type yamlSourceContext struct { + FilePath string +} + +// NewYamlSourceFromFile creates a new Yaml InputSourceContext from a filepath. +func NewYamlSourceFromFile(file string) (InputSourceContext, error) { + ysc := &yamlSourceContext{FilePath: file} + var results map[interface{}]interface{} + err := readCommandYaml(ysc.FilePath, &results) + if err != nil { + return nil, fmt.Errorf("Unable to load Yaml file '%s': inner error: \n'%v'", ysc.FilePath, err.Error()) + } + + return &MapInputSource{valueMap: results}, nil +} + +// NewYamlSourceFromFlagFunc creates a new Yaml InputSourceContext from a provided flag name and source context. +func NewYamlSourceFromFlagFunc(flagFileName string) func(context *cli.Context) (InputSourceContext, error) { + return func(context *cli.Context) (InputSourceContext, error) { + filePath := context.String(flagFileName) + return NewYamlSourceFromFile(filePath) + } +} + +func readCommandYaml(filePath string, container interface{}) (err error) { + b, err := loadDataFrom(filePath) + if err != nil { + return err + } + + err = yaml.Unmarshal(b, container) + if err != nil { + return err + } + + err = nil + return +} + +func loadDataFrom(filePath string) ([]byte, error) { + u, err := url.Parse(filePath) + if err != nil { + return nil, err + } + + if u.Host != "" { // i have a host, now do i support the scheme? + switch u.Scheme { + case "http", "https": + res, err := http.Get(filePath) + if err != nil { + return nil, err + } + return ioutil.ReadAll(res.Body) + default: + return nil, fmt.Errorf("scheme of %s is unsupported", filePath) + } + } else if u.Path != "" { // i dont have a host, but I have a path. I am a local file. + if _, notFoundFileErr := os.Stat(filePath); notFoundFileErr != nil { + return nil, fmt.Errorf("Cannot read from file: '%s' because it does not exist.", filePath) + } + return ioutil.ReadFile(filePath) + } else if runtime.GOOS == "windows" && strings.Contains(u.String(), "\\") { + // on Windows systems u.Path is always empty, so we need to check the string directly. + if _, notFoundFileErr := os.Stat(filePath); notFoundFileErr != nil { + return nil, fmt.Errorf("Cannot read from file: '%s' because it does not exist.", filePath) + } + return ioutil.ReadFile(filePath) + } else { + return nil, fmt.Errorf("unable to determine how to load from path %s", filePath) + } +} diff --git a/vendor/github.com/urfave/cli/app.go b/vendor/github.com/urfave/cli/app.go new file mode 100644 index 000000000..51fc45d87 --- /dev/null +++ b/vendor/github.com/urfave/cli/app.go @@ -0,0 +1,497 @@ +package cli + +import ( + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "sort" + "time" +) + +var ( + changeLogURL = "https://github.com/urfave/cli/blob/master/CHANGELOG.md" + appActionDeprecationURL = fmt.Sprintf("%s#deprecated-cli-app-action-signature", changeLogURL) + runAndExitOnErrorDeprecationURL = fmt.Sprintf("%s#deprecated-cli-app-runandexitonerror", changeLogURL) + + contactSysadmin = "This is an error in the application. Please contact the distributor of this application if this is not you." + + errInvalidActionType = NewExitError("ERROR invalid Action type. "+ + fmt.Sprintf("Must be `func(*Context`)` or `func(*Context) error). %s", contactSysadmin)+ + fmt.Sprintf("See %s", appActionDeprecationURL), 2) +) + +// App is the main structure of a cli application. It is recommended that +// an app be created with the cli.NewApp() function +type App struct { + // The name of the program. Defaults to path.Base(os.Args[0]) + Name string + // Full name of command for help, defaults to Name + HelpName string + // Description of the program. + Usage string + // Text to override the USAGE section of help + UsageText string + // Description of the program argument format. + ArgsUsage string + // Version of the program + Version string + // Description of the program + Description string + // List of commands to execute + Commands []Command + // List of flags to parse + Flags []Flag + // Boolean to enable bash completion commands + EnableBashCompletion bool + // Boolean to hide built-in help command + HideHelp bool + // Boolean to hide built-in version flag and the VERSION section of help + HideVersion bool + // Populate on app startup, only gettable through method Categories() + categories CommandCategories + // An action to execute when the bash-completion flag is set + BashComplete BashCompleteFunc + // An action to execute before any subcommands are run, but after the context is ready + // If a non-nil error is returned, no subcommands are run + Before BeforeFunc + // An action to execute after any subcommands are run, but after the subcommand has finished + // It is run even if Action() panics + After AfterFunc + + // The action to execute when no subcommands are specified + // Expects a `cli.ActionFunc` but will accept the *deprecated* signature of `func(*cli.Context) {}` + // *Note*: support for the deprecated `Action` signature will be removed in a future version + Action interface{} + + // Execute this function if the proper command cannot be found + CommandNotFound CommandNotFoundFunc + // Execute this function if an usage error occurs + OnUsageError OnUsageErrorFunc + // Compilation date + Compiled time.Time + // List of all authors who contributed + Authors []Author + // Copyright of the binary if any + Copyright string + // Name of Author (Note: Use App.Authors, this is deprecated) + Author string + // Email of Author (Note: Use App.Authors, this is deprecated) + Email string + // Writer writer to write output to + Writer io.Writer + // ErrWriter writes error output + ErrWriter io.Writer + // Other custom info + Metadata map[string]interface{} + // Carries a function which returns app specific info. + ExtraInfo func() map[string]string + // CustomAppHelpTemplate the text template for app help topic. + // cli.go uses text/template to render templates. You can + // render custom help text by setting this variable. + CustomAppHelpTemplate string + + didSetup bool +} + +// Tries to find out when this binary was compiled. +// Returns the current time if it fails to find it. +func compileTime() time.Time { + info, err := os.Stat(os.Args[0]) + if err != nil { + return time.Now() + } + return info.ModTime() +} + +// NewApp creates a new cli Application with some reasonable defaults for Name, +// Usage, Version and Action. +func NewApp() *App { + return &App{ + Name: filepath.Base(os.Args[0]), + HelpName: filepath.Base(os.Args[0]), + Usage: "A new cli application", + UsageText: "", + Version: "0.0.0", + BashComplete: DefaultAppComplete, + Action: helpCommand.Action, + Compiled: compileTime(), + Writer: os.Stdout, + } +} + +// Setup runs initialization code to ensure all data structures are ready for +// `Run` or inspection prior to `Run`. It is internally called by `Run`, but +// will return early if setup has already happened. +func (a *App) Setup() { + if a.didSetup { + return + } + + a.didSetup = true + + if a.Author != "" || a.Email != "" { + a.Authors = append(a.Authors, Author{Name: a.Author, Email: a.Email}) + } + + newCmds := []Command{} + for _, c := range a.Commands { + if c.HelpName == "" { + c.HelpName = fmt.Sprintf("%s %s", a.HelpName, c.Name) + } + newCmds = append(newCmds, c) + } + a.Commands = newCmds + + if a.Command(helpCommand.Name) == nil && !a.HideHelp { + a.Commands = append(a.Commands, helpCommand) + if (HelpFlag != BoolFlag{}) { + a.appendFlag(HelpFlag) + } + } + + if !a.HideVersion { + a.appendFlag(VersionFlag) + } + + a.categories = CommandCategories{} + for _, command := range a.Commands { + a.categories = a.categories.AddCommand(command.Category, command) + } + sort.Sort(a.categories) + + if a.Metadata == nil { + a.Metadata = make(map[string]interface{}) + } + + if a.Writer == nil { + a.Writer = os.Stdout + } +} + +// Run is the entry point to the cli app. Parses the arguments slice and routes +// to the proper flag/args combination +func (a *App) Run(arguments []string) (err error) { + a.Setup() + + // handle the completion flag separately from the flagset since + // completion could be attempted after a flag, but before its value was put + // on the command line. this causes the flagset to interpret the completion + // flag name as the value of the flag before it which is undesirable + // note that we can only do this because the shell autocomplete function + // always appends the completion flag at the end of the command + shellComplete, arguments := checkShellCompleteFlag(a, arguments) + + // parse flags + set, err := flagSet(a.Name, a.Flags) + if err != nil { + return err + } + + set.SetOutput(ioutil.Discard) + err = set.Parse(arguments[1:]) + nerr := normalizeFlags(a.Flags, set) + context := NewContext(a, set, nil) + if nerr != nil { + fmt.Fprintln(a.Writer, nerr) + ShowAppHelp(context) + return nerr + } + context.shellComplete = shellComplete + + if checkCompletions(context) { + return nil + } + + if err != nil { + if a.OnUsageError != nil { + err := a.OnUsageError(context, err, false) + HandleExitCoder(err) + return err + } + fmt.Fprintf(a.Writer, "%s %s\n\n", "Incorrect Usage.", err.Error()) + ShowAppHelp(context) + return err + } + + if !a.HideHelp && checkHelp(context) { + ShowAppHelp(context) + return nil + } + + if !a.HideVersion && checkVersion(context) { + ShowVersion(context) + return nil + } + + if a.After != nil { + defer func() { + if afterErr := a.After(context); afterErr != nil { + if err != nil { + err = NewMultiError(err, afterErr) + } else { + err = afterErr + } + } + }() + } + + if a.Before != nil { + beforeErr := a.Before(context) + if beforeErr != nil { + ShowAppHelp(context) + HandleExitCoder(beforeErr) + err = beforeErr + return err + } + } + + args := context.Args() + if args.Present() { + name := args.First() + c := a.Command(name) + if c != nil { + return c.Run(context) + } + } + + if a.Action == nil { + a.Action = helpCommand.Action + } + + // Run default Action + err = HandleAction(a.Action, context) + + HandleExitCoder(err) + return err +} + +// RunAndExitOnError calls .Run() and exits non-zero if an error was returned +// +// Deprecated: instead you should return an error that fulfills cli.ExitCoder +// to cli.App.Run. This will cause the application to exit with the given eror +// code in the cli.ExitCoder +func (a *App) RunAndExitOnError() { + if err := a.Run(os.Args); err != nil { + fmt.Fprintln(a.errWriter(), err) + OsExiter(1) + } +} + +// RunAsSubcommand invokes the subcommand given the context, parses ctx.Args() to +// generate command-specific flags +func (a *App) RunAsSubcommand(ctx *Context) (err error) { + // append help to commands + if len(a.Commands) > 0 { + if a.Command(helpCommand.Name) == nil && !a.HideHelp { + a.Commands = append(a.Commands, helpCommand) + if (HelpFlag != BoolFlag{}) { + a.appendFlag(HelpFlag) + } + } + } + + newCmds := []Command{} + for _, c := range a.Commands { + if c.HelpName == "" { + c.HelpName = fmt.Sprintf("%s %s", a.HelpName, c.Name) + } + newCmds = append(newCmds, c) + } + a.Commands = newCmds + + // parse flags + set, err := flagSet(a.Name, a.Flags) + if err != nil { + return err + } + + set.SetOutput(ioutil.Discard) + err = set.Parse(ctx.Args().Tail()) + nerr := normalizeFlags(a.Flags, set) + context := NewContext(a, set, ctx) + + if nerr != nil { + fmt.Fprintln(a.Writer, nerr) + fmt.Fprintln(a.Writer) + if len(a.Commands) > 0 { + ShowSubcommandHelp(context) + } else { + ShowCommandHelp(ctx, context.Args().First()) + } + return nerr + } + + if checkCompletions(context) { + return nil + } + + if err != nil { + if a.OnUsageError != nil { + err = a.OnUsageError(context, err, true) + HandleExitCoder(err) + return err + } + fmt.Fprintf(a.Writer, "%s %s\n\n", "Incorrect Usage.", err.Error()) + ShowSubcommandHelp(context) + return err + } + + if len(a.Commands) > 0 { + if checkSubcommandHelp(context) { + return nil + } + } else { + if checkCommandHelp(ctx, context.Args().First()) { + return nil + } + } + + if a.After != nil { + defer func() { + afterErr := a.After(context) + if afterErr != nil { + HandleExitCoder(err) + if err != nil { + err = NewMultiError(err, afterErr) + } else { + err = afterErr + } + } + }() + } + + if a.Before != nil { + beforeErr := a.Before(context) + if beforeErr != nil { + HandleExitCoder(beforeErr) + err = beforeErr + return err + } + } + + args := context.Args() + if args.Present() { + name := args.First() + c := a.Command(name) + if c != nil { + return c.Run(context) + } + } + + // Run default Action + err = HandleAction(a.Action, context) + + HandleExitCoder(err) + return err +} + +// Command returns the named command on App. Returns nil if the command does not exist +func (a *App) Command(name string) *Command { + for _, c := range a.Commands { + if c.HasName(name) { + return &c + } + } + + return nil +} + +// Categories returns a slice containing all the categories with the commands they contain +func (a *App) Categories() CommandCategories { + return a.categories +} + +// VisibleCategories returns a slice of categories and commands that are +// Hidden=false +func (a *App) VisibleCategories() []*CommandCategory { + ret := []*CommandCategory{} + for _, category := range a.categories { + if visible := func() *CommandCategory { + for _, command := range category.Commands { + if !command.Hidden { + return category + } + } + return nil + }(); visible != nil { + ret = append(ret, visible) + } + } + return ret +} + +// VisibleCommands returns a slice of the Commands with Hidden=false +func (a *App) VisibleCommands() []Command { + ret := []Command{} + for _, command := range a.Commands { + if !command.Hidden { + ret = append(ret, command) + } + } + return ret +} + +// VisibleFlags returns a slice of the Flags with Hidden=false +func (a *App) VisibleFlags() []Flag { + return visibleFlags(a.Flags) +} + +func (a *App) hasFlag(flag Flag) bool { + for _, f := range a.Flags { + if flag == f { + return true + } + } + + return false +} + +func (a *App) errWriter() io.Writer { + + // When the app ErrWriter is nil use the package level one. + if a.ErrWriter == nil { + return ErrWriter + } + + return a.ErrWriter +} + +func (a *App) appendFlag(flag Flag) { + if !a.hasFlag(flag) { + a.Flags = append(a.Flags, flag) + } +} + +// Author represents someone who has contributed to a cli project. +type Author struct { + Name string // The Authors name + Email string // The Authors email +} + +// String makes Author comply to the Stringer interface, to allow an easy print in the templating process +func (a Author) String() string { + e := "" + if a.Email != "" { + e = " <" + a.Email + ">" + } + + return fmt.Sprintf("%v%v", a.Name, e) +} + +// HandleAction attempts to figure out which Action signature was used. If +// it's an ActionFunc or a func with the legacy signature for Action, the func +// is run! +func HandleAction(action interface{}, context *Context) (err error) { + if a, ok := action.(ActionFunc); ok { + return a(context) + } else if a, ok := action.(func(*Context) error); ok { + return a(context) + } else if a, ok := action.(func(*Context)); ok { // deprecated function signature + a(context) + return nil + } else { + return errInvalidActionType + } +} diff --git a/vendor/github.com/urfave/cli/app_test.go b/vendor/github.com/urfave/cli/app_test.go new file mode 100644 index 000000000..e14ddaf67 --- /dev/null +++ b/vendor/github.com/urfave/cli/app_test.go @@ -0,0 +1,1742 @@ +package cli + +import ( + "bytes" + "errors" + "flag" + "fmt" + "io" + "io/ioutil" + "os" + "reflect" + "strings" + "testing" +) + +var ( + lastExitCode = 0 + fakeOsExiter = func(rc int) { + lastExitCode = rc + } + fakeErrWriter = &bytes.Buffer{} +) + +func init() { + OsExiter = fakeOsExiter + ErrWriter = fakeErrWriter +} + +type opCounts struct { + Total, BashComplete, OnUsageError, Before, CommandNotFound, Action, After, SubCommand int +} + +func ExampleApp_Run() { + // set args for examples sake + os.Args = []string{"greet", "--name", "Jeremy"} + + app := NewApp() + app.Name = "greet" + app.Flags = []Flag{ + StringFlag{Name: "name", Value: "bob", Usage: "a name to say"}, + } + app.Action = func(c *Context) error { + fmt.Printf("Hello %v\n", c.String("name")) + return nil + } + app.UsageText = "app [first_arg] [second_arg]" + app.Author = "Harrison" + app.Email = "harrison@lolwut.com" + app.Authors = []Author{{Name: "Oliver Allen", Email: "oliver@toyshop.com"}} + app.Run(os.Args) + // Output: + // Hello Jeremy +} + +func ExampleApp_Run_subcommand() { + // set args for examples sake + os.Args = []string{"say", "hi", "english", "--name", "Jeremy"} + app := NewApp() + app.Name = "say" + app.Commands = []Command{ + { + Name: "hello", + Aliases: []string{"hi"}, + Usage: "use it to see a description", + Description: "This is how we describe hello the function", + Subcommands: []Command{ + { + Name: "english", + Aliases: []string{"en"}, + Usage: "sends a greeting in english", + Description: "greets someone in english", + Flags: []Flag{ + StringFlag{ + Name: "name", + Value: "Bob", + Usage: "Name of the person to greet", + }, + }, + Action: func(c *Context) error { + fmt.Println("Hello,", c.String("name")) + return nil + }, + }, + }, + }, + } + + app.Run(os.Args) + // Output: + // Hello, Jeremy +} + +func ExampleApp_Run_appHelp() { + // set args for examples sake + os.Args = []string{"greet", "help"} + + app := NewApp() + app.Name = "greet" + app.Version = "0.1.0" + app.Description = "This is how we describe greet the app" + app.Authors = []Author{ + {Name: "Harrison", Email: "harrison@lolwut.com"}, + {Name: "Oliver Allen", Email: "oliver@toyshop.com"}, + } + app.Flags = []Flag{ + StringFlag{Name: "name", Value: "bob", Usage: "a name to say"}, + } + app.Commands = []Command{ + { + Name: "describeit", + Aliases: []string{"d"}, + Usage: "use it to see a description", + Description: "This is how we describe describeit the function", + Action: func(c *Context) error { + fmt.Printf("i like to describe things") + return nil + }, + }, + } + app.Run(os.Args) + // Output: + // NAME: + // greet - A new cli application + // + // USAGE: + // greet [global options] command [command options] [arguments...] + // + // VERSION: + // 0.1.0 + // + // DESCRIPTION: + // This is how we describe greet the app + // + // AUTHORS: + // Harrison + // Oliver Allen + // + // COMMANDS: + // describeit, d use it to see a description + // help, h Shows a list of commands or help for one command + // + // GLOBAL OPTIONS: + // --name value a name to say (default: "bob") + // --help, -h show help + // --version, -v print the version +} + +func ExampleApp_Run_commandHelp() { + // set args for examples sake + os.Args = []string{"greet", "h", "describeit"} + + app := NewApp() + app.Name = "greet" + app.Flags = []Flag{ + StringFlag{Name: "name", Value: "bob", Usage: "a name to say"}, + } + app.Commands = []Command{ + { + Name: "describeit", + Aliases: []string{"d"}, + Usage: "use it to see a description", + Description: "This is how we describe describeit the function", + Action: func(c *Context) error { + fmt.Printf("i like to describe things") + return nil + }, + }, + } + app.Run(os.Args) + // Output: + // NAME: + // greet describeit - use it to see a description + // + // USAGE: + // greet describeit [arguments...] + // + // DESCRIPTION: + // This is how we describe describeit the function +} + +func ExampleApp_Run_noAction() { + app := App{} + app.Name = "greet" + app.Run([]string{"greet"}) + // Output: + // NAME: + // greet + // + // USAGE: + // [global options] command [command options] [arguments...] + // + // COMMANDS: + // help, h Shows a list of commands or help for one command + // + // GLOBAL OPTIONS: + // --help, -h show help + // --version, -v print the version +} + +func ExampleApp_Run_subcommandNoAction() { + app := App{} + app.Name = "greet" + app.Commands = []Command{ + { + Name: "describeit", + Aliases: []string{"d"}, + Usage: "use it to see a description", + Description: "This is how we describe describeit the function", + }, + } + app.Run([]string{"greet", "describeit"}) + // Output: + // NAME: + // describeit - use it to see a description + // + // USAGE: + // describeit [arguments...] + // + // DESCRIPTION: + // This is how we describe describeit the function + +} + +func ExampleApp_Run_bashComplete() { + // set args for examples sake + os.Args = []string{"greet", "--generate-bash-completion"} + + app := NewApp() + app.Name = "greet" + app.EnableBashCompletion = true + app.Commands = []Command{ + { + Name: "describeit", + Aliases: []string{"d"}, + Usage: "use it to see a description", + Description: "This is how we describe describeit the function", + Action: func(c *Context) error { + fmt.Printf("i like to describe things") + return nil + }, + }, { + Name: "next", + Usage: "next example", + Description: "more stuff to see when generating bash completion", + Action: func(c *Context) error { + fmt.Printf("the next example") + return nil + }, + }, + } + + app.Run(os.Args) + // Output: + // describeit + // d + // next + // help + // h +} + +func TestApp_Run(t *testing.T) { + s := "" + + app := NewApp() + app.Action = func(c *Context) error { + s = s + c.Args().First() + return nil + } + + err := app.Run([]string{"command", "foo"}) + expect(t, err, nil) + err = app.Run([]string{"command", "bar"}) + expect(t, err, nil) + expect(t, s, "foobar") +} + +var commandAppTests = []struct { + name string + expected bool +}{ + {"foobar", true}, + {"batbaz", true}, + {"b", true}, + {"f", true}, + {"bat", false}, + {"nothing", false}, +} + +func TestApp_Command(t *testing.T) { + app := NewApp() + fooCommand := Command{Name: "foobar", Aliases: []string{"f"}} + batCommand := Command{Name: "batbaz", Aliases: []string{"b"}} + app.Commands = []Command{ + fooCommand, + batCommand, + } + + for _, test := range commandAppTests { + expect(t, app.Command(test.name) != nil, test.expected) + } +} + +func TestApp_Setup_defaultsWriter(t *testing.T) { + app := &App{} + app.Setup() + expect(t, app.Writer, os.Stdout) +} + +func TestApp_CommandWithArgBeforeFlags(t *testing.T) { + var parsedOption, firstArg string + + app := NewApp() + command := Command{ + Name: "cmd", + Flags: []Flag{ + StringFlag{Name: "option", Value: "", Usage: "some option"}, + }, + Action: func(c *Context) error { + parsedOption = c.String("option") + firstArg = c.Args().First() + return nil + }, + } + app.Commands = []Command{command} + + app.Run([]string{"", "cmd", "my-arg", "--option", "my-option"}) + + expect(t, parsedOption, "my-option") + expect(t, firstArg, "my-arg") +} + +func TestApp_RunAsSubcommandParseFlags(t *testing.T) { + var context *Context + + a := NewApp() + a.Commands = []Command{ + { + Name: "foo", + Action: func(c *Context) error { + context = c + return nil + }, + Flags: []Flag{ + StringFlag{ + Name: "lang", + Value: "english", + Usage: "language for the greeting", + }, + }, + Before: func(_ *Context) error { return nil }, + }, + } + a.Run([]string{"", "foo", "--lang", "spanish", "abcd"}) + + expect(t, context.Args().Get(0), "abcd") + expect(t, context.String("lang"), "spanish") +} + +func TestApp_RunAsSubCommandIncorrectUsage(t *testing.T) { + a := App{ + Flags: []Flag{ + StringFlag{Name: "--foo"}, + }, + Writer: bytes.NewBufferString(""), + } + + set := flag.NewFlagSet("", flag.ContinueOnError) + set.Parse([]string{"", "---foo"}) + c := &Context{flagSet: set} + + err := a.RunAsSubcommand(c) + + expect(t, err, errors.New("bad flag syntax: ---foo")) +} + +func TestApp_CommandWithFlagBeforeTerminator(t *testing.T) { + var parsedOption string + var args []string + + app := NewApp() + command := Command{ + Name: "cmd", + Flags: []Flag{ + StringFlag{Name: "option", Value: "", Usage: "some option"}, + }, + Action: func(c *Context) error { + parsedOption = c.String("option") + args = c.Args() + return nil + }, + } + app.Commands = []Command{command} + + app.Run([]string{"", "cmd", "my-arg", "--option", "my-option", "--", "--notARealFlag"}) + + expect(t, parsedOption, "my-option") + expect(t, args[0], "my-arg") + expect(t, args[1], "--") + expect(t, args[2], "--notARealFlag") +} + +func TestApp_CommandWithDash(t *testing.T) { + var args []string + + app := NewApp() + command := Command{ + Name: "cmd", + Action: func(c *Context) error { + args = c.Args() + return nil + }, + } + app.Commands = []Command{command} + + app.Run([]string{"", "cmd", "my-arg", "-"}) + + expect(t, args[0], "my-arg") + expect(t, args[1], "-") +} + +func TestApp_CommandWithNoFlagBeforeTerminator(t *testing.T) { + var args []string + + app := NewApp() + command := Command{ + Name: "cmd", + Action: func(c *Context) error { + args = c.Args() + return nil + }, + } + app.Commands = []Command{command} + + app.Run([]string{"", "cmd", "my-arg", "--", "notAFlagAtAll"}) + + expect(t, args[0], "my-arg") + expect(t, args[1], "--") + expect(t, args[2], "notAFlagAtAll") +} + +func TestApp_VisibleCommands(t *testing.T) { + app := NewApp() + app.Commands = []Command{ + { + Name: "frob", + HelpName: "foo frob", + Action: func(_ *Context) error { return nil }, + }, + { + Name: "frib", + HelpName: "foo frib", + Hidden: true, + Action: func(_ *Context) error { return nil }, + }, + } + + app.Setup() + expected := []Command{ + app.Commands[0], + app.Commands[2], // help + } + actual := app.VisibleCommands() + expect(t, len(expected), len(actual)) + for i, actualCommand := range actual { + expectedCommand := expected[i] + + if expectedCommand.Action != nil { + // comparing func addresses is OK! + expect(t, fmt.Sprintf("%p", expectedCommand.Action), fmt.Sprintf("%p", actualCommand.Action)) + } + + // nil out funcs, as they cannot be compared + // (https://github.com/golang/go/issues/8554) + expectedCommand.Action = nil + actualCommand.Action = nil + + if !reflect.DeepEqual(expectedCommand, actualCommand) { + t.Errorf("expected\n%#v\n!=\n%#v", expectedCommand, actualCommand) + } + } +} + +func TestApp_Float64Flag(t *testing.T) { + var meters float64 + + app := NewApp() + app.Flags = []Flag{ + Float64Flag{Name: "height", Value: 1.5, Usage: "Set the height, in meters"}, + } + app.Action = func(c *Context) error { + meters = c.Float64("height") + return nil + } + + app.Run([]string{"", "--height", "1.93"}) + expect(t, meters, 1.93) +} + +func TestApp_ParseSliceFlags(t *testing.T) { + var parsedOption, firstArg string + var parsedIntSlice []int + var parsedStringSlice []string + + app := NewApp() + command := Command{ + Name: "cmd", + Flags: []Flag{ + IntSliceFlag{Name: "p", Value: &IntSlice{}, Usage: "set one or more ip addr"}, + StringSliceFlag{Name: "ip", Value: &StringSlice{}, Usage: "set one or more ports to open"}, + }, + Action: func(c *Context) error { + parsedIntSlice = c.IntSlice("p") + parsedStringSlice = c.StringSlice("ip") + parsedOption = c.String("option") + firstArg = c.Args().First() + return nil + }, + } + app.Commands = []Command{command} + + app.Run([]string{"", "cmd", "my-arg", "-p", "22", "-p", "80", "-ip", "8.8.8.8", "-ip", "8.8.4.4"}) + + IntsEquals := func(a, b []int) bool { + if len(a) != len(b) { + return false + } + for i, v := range a { + if v != b[i] { + return false + } + } + return true + } + + StrsEquals := func(a, b []string) bool { + if len(a) != len(b) { + return false + } + for i, v := range a { + if v != b[i] { + return false + } + } + return true + } + var expectedIntSlice = []int{22, 80} + var expectedStringSlice = []string{"8.8.8.8", "8.8.4.4"} + + if !IntsEquals(parsedIntSlice, expectedIntSlice) { + t.Errorf("%v does not match %v", parsedIntSlice, expectedIntSlice) + } + + if !StrsEquals(parsedStringSlice, expectedStringSlice) { + t.Errorf("%v does not match %v", parsedStringSlice, expectedStringSlice) + } +} + +func TestApp_ParseSliceFlagsWithMissingValue(t *testing.T) { + var parsedIntSlice []int + var parsedStringSlice []string + + app := NewApp() + command := Command{ + Name: "cmd", + Flags: []Flag{ + IntSliceFlag{Name: "a", Usage: "set numbers"}, + StringSliceFlag{Name: "str", Usage: "set strings"}, + }, + Action: func(c *Context) error { + parsedIntSlice = c.IntSlice("a") + parsedStringSlice = c.StringSlice("str") + return nil + }, + } + app.Commands = []Command{command} + + app.Run([]string{"", "cmd", "my-arg", "-a", "2", "-str", "A"}) + + var expectedIntSlice = []int{2} + var expectedStringSlice = []string{"A"} + + if parsedIntSlice[0] != expectedIntSlice[0] { + t.Errorf("%v does not match %v", parsedIntSlice[0], expectedIntSlice[0]) + } + + if parsedStringSlice[0] != expectedStringSlice[0] { + t.Errorf("%v does not match %v", parsedIntSlice[0], expectedIntSlice[0]) + } +} + +func TestApp_DefaultStdout(t *testing.T) { + app := NewApp() + + if app.Writer != os.Stdout { + t.Error("Default output writer not set.") + } +} + +type mockWriter struct { + written []byte +} + +func (fw *mockWriter) Write(p []byte) (n int, err error) { + if fw.written == nil { + fw.written = p + } else { + fw.written = append(fw.written, p...) + } + + return len(p), nil +} + +func (fw *mockWriter) GetWritten() (b []byte) { + return fw.written +} + +func TestApp_SetStdout(t *testing.T) { + w := &mockWriter{} + + app := NewApp() + app.Name = "test" + app.Writer = w + + err := app.Run([]string{"help"}) + + if err != nil { + t.Fatalf("Run error: %s", err) + } + + if len(w.written) == 0 { + t.Error("App did not write output to desired writer.") + } +} + +func TestApp_BeforeFunc(t *testing.T) { + counts := &opCounts{} + beforeError := fmt.Errorf("fail") + var err error + + app := NewApp() + + app.Before = func(c *Context) error { + counts.Total++ + counts.Before = counts.Total + s := c.String("opt") + if s == "fail" { + return beforeError + } + + return nil + } + + app.Commands = []Command{ + { + Name: "sub", + Action: func(c *Context) error { + counts.Total++ + counts.SubCommand = counts.Total + return nil + }, + }, + } + + app.Flags = []Flag{ + StringFlag{Name: "opt"}, + } + + // run with the Before() func succeeding + err = app.Run([]string{"command", "--opt", "succeed", "sub"}) + + if err != nil { + t.Fatalf("Run error: %s", err) + } + + if counts.Before != 1 { + t.Errorf("Before() not executed when expected") + } + + if counts.SubCommand != 2 { + t.Errorf("Subcommand not executed when expected") + } + + // reset + counts = &opCounts{} + + // run with the Before() func failing + err = app.Run([]string{"command", "--opt", "fail", "sub"}) + + // should be the same error produced by the Before func + if err != beforeError { + t.Errorf("Run error expected, but not received") + } + + if counts.Before != 1 { + t.Errorf("Before() not executed when expected") + } + + if counts.SubCommand != 0 { + t.Errorf("Subcommand executed when NOT expected") + } + + // reset + counts = &opCounts{} + + afterError := errors.New("fail again") + app.After = func(_ *Context) error { + return afterError + } + + // run with the Before() func failing, wrapped by After() + err = app.Run([]string{"command", "--opt", "fail", "sub"}) + + // should be the same error produced by the Before func + if _, ok := err.(MultiError); !ok { + t.Errorf("MultiError expected, but not received") + } + + if counts.Before != 1 { + t.Errorf("Before() not executed when expected") + } + + if counts.SubCommand != 0 { + t.Errorf("Subcommand executed when NOT expected") + } +} + +func TestApp_AfterFunc(t *testing.T) { + counts := &opCounts{} + afterError := fmt.Errorf("fail") + var err error + + app := NewApp() + + app.After = func(c *Context) error { + counts.Total++ + counts.After = counts.Total + s := c.String("opt") + if s == "fail" { + return afterError + } + + return nil + } + + app.Commands = []Command{ + { + Name: "sub", + Action: func(c *Context) error { + counts.Total++ + counts.SubCommand = counts.Total + return nil + }, + }, + } + + app.Flags = []Flag{ + StringFlag{Name: "opt"}, + } + + // run with the After() func succeeding + err = app.Run([]string{"command", "--opt", "succeed", "sub"}) + + if err != nil { + t.Fatalf("Run error: %s", err) + } + + if counts.After != 2 { + t.Errorf("After() not executed when expected") + } + + if counts.SubCommand != 1 { + t.Errorf("Subcommand not executed when expected") + } + + // reset + counts = &opCounts{} + + // run with the Before() func failing + err = app.Run([]string{"command", "--opt", "fail", "sub"}) + + // should be the same error produced by the Before func + if err != afterError { + t.Errorf("Run error expected, but not received") + } + + if counts.After != 2 { + t.Errorf("After() not executed when expected") + } + + if counts.SubCommand != 1 { + t.Errorf("Subcommand not executed when expected") + } +} + +func TestAppNoHelpFlag(t *testing.T) { + oldFlag := HelpFlag + defer func() { + HelpFlag = oldFlag + }() + + HelpFlag = BoolFlag{} + + app := NewApp() + app.Writer = ioutil.Discard + err := app.Run([]string{"test", "-h"}) + + if err != flag.ErrHelp { + t.Errorf("expected error about missing help flag, but got: %s (%T)", err, err) + } +} + +func TestAppHelpPrinter(t *testing.T) { + oldPrinter := HelpPrinter + defer func() { + HelpPrinter = oldPrinter + }() + + var wasCalled = false + HelpPrinter = func(w io.Writer, template string, data interface{}) { + wasCalled = true + } + + app := NewApp() + app.Run([]string{"-h"}) + + if wasCalled == false { + t.Errorf("Help printer expected to be called, but was not") + } +} + +func TestApp_VersionPrinter(t *testing.T) { + oldPrinter := VersionPrinter + defer func() { + VersionPrinter = oldPrinter + }() + + var wasCalled = false + VersionPrinter = func(c *Context) { + wasCalled = true + } + + app := NewApp() + ctx := NewContext(app, nil, nil) + ShowVersion(ctx) + + if wasCalled == false { + t.Errorf("Version printer expected to be called, but was not") + } +} + +func TestApp_CommandNotFound(t *testing.T) { + counts := &opCounts{} + app := NewApp() + + app.CommandNotFound = func(c *Context, command string) { + counts.Total++ + counts.CommandNotFound = counts.Total + } + + app.Commands = []Command{ + { + Name: "bar", + Action: func(c *Context) error { + counts.Total++ + counts.SubCommand = counts.Total + return nil + }, + }, + } + + app.Run([]string{"command", "foo"}) + + expect(t, counts.CommandNotFound, 1) + expect(t, counts.SubCommand, 0) + expect(t, counts.Total, 1) +} + +func TestApp_OrderOfOperations(t *testing.T) { + counts := &opCounts{} + + resetCounts := func() { counts = &opCounts{} } + + app := NewApp() + app.EnableBashCompletion = true + app.BashComplete = func(c *Context) { + counts.Total++ + counts.BashComplete = counts.Total + } + + app.OnUsageError = func(c *Context, err error, isSubcommand bool) error { + counts.Total++ + counts.OnUsageError = counts.Total + return errors.New("hay OnUsageError") + } + + beforeNoError := func(c *Context) error { + counts.Total++ + counts.Before = counts.Total + return nil + } + + beforeError := func(c *Context) error { + counts.Total++ + counts.Before = counts.Total + return errors.New("hay Before") + } + + app.Before = beforeNoError + app.CommandNotFound = func(c *Context, command string) { + counts.Total++ + counts.CommandNotFound = counts.Total + } + + afterNoError := func(c *Context) error { + counts.Total++ + counts.After = counts.Total + return nil + } + + afterError := func(c *Context) error { + counts.Total++ + counts.After = counts.Total + return errors.New("hay After") + } + + app.After = afterNoError + app.Commands = []Command{ + { + Name: "bar", + Action: func(c *Context) error { + counts.Total++ + counts.SubCommand = counts.Total + return nil + }, + }, + } + + app.Action = func(c *Context) error { + counts.Total++ + counts.Action = counts.Total + return nil + } + + _ = app.Run([]string{"command", "--nope"}) + expect(t, counts.OnUsageError, 1) + expect(t, counts.Total, 1) + + resetCounts() + + _ = app.Run([]string{"command", "--generate-bash-completion"}) + expect(t, counts.BashComplete, 1) + expect(t, counts.Total, 1) + + resetCounts() + + oldOnUsageError := app.OnUsageError + app.OnUsageError = nil + _ = app.Run([]string{"command", "--nope"}) + expect(t, counts.Total, 0) + app.OnUsageError = oldOnUsageError + + resetCounts() + + _ = app.Run([]string{"command", "foo"}) + expect(t, counts.OnUsageError, 0) + expect(t, counts.Before, 1) + expect(t, counts.CommandNotFound, 0) + expect(t, counts.Action, 2) + expect(t, counts.After, 3) + expect(t, counts.Total, 3) + + resetCounts() + + app.Before = beforeError + _ = app.Run([]string{"command", "bar"}) + expect(t, counts.OnUsageError, 0) + expect(t, counts.Before, 1) + expect(t, counts.After, 2) + expect(t, counts.Total, 2) + app.Before = beforeNoError + + resetCounts() + + app.After = nil + _ = app.Run([]string{"command", "bar"}) + expect(t, counts.OnUsageError, 0) + expect(t, counts.Before, 1) + expect(t, counts.SubCommand, 2) + expect(t, counts.Total, 2) + app.After = afterNoError + + resetCounts() + + app.After = afterError + err := app.Run([]string{"command", "bar"}) + if err == nil { + t.Fatalf("expected a non-nil error") + } + expect(t, counts.OnUsageError, 0) + expect(t, counts.Before, 1) + expect(t, counts.SubCommand, 2) + expect(t, counts.After, 3) + expect(t, counts.Total, 3) + app.After = afterNoError + + resetCounts() + + oldCommands := app.Commands + app.Commands = nil + _ = app.Run([]string{"command"}) + expect(t, counts.OnUsageError, 0) + expect(t, counts.Before, 1) + expect(t, counts.Action, 2) + expect(t, counts.After, 3) + expect(t, counts.Total, 3) + app.Commands = oldCommands +} + +func TestApp_Run_CommandWithSubcommandHasHelpTopic(t *testing.T) { + var subcommandHelpTopics = [][]string{ + {"command", "foo", "--help"}, + {"command", "foo", "-h"}, + {"command", "foo", "help"}, + } + + for _, flagSet := range subcommandHelpTopics { + t.Logf("==> checking with flags %v", flagSet) + + app := NewApp() + buf := new(bytes.Buffer) + app.Writer = buf + + subCmdBar := Command{ + Name: "bar", + Usage: "does bar things", + } + subCmdBaz := Command{ + Name: "baz", + Usage: "does baz things", + } + cmd := Command{ + Name: "foo", + Description: "descriptive wall of text about how it does foo things", + Subcommands: []Command{subCmdBar, subCmdBaz}, + Action: func(c *Context) error { return nil }, + } + + app.Commands = []Command{cmd} + err := app.Run(flagSet) + + if err != nil { + t.Error(err) + } + + output := buf.String() + t.Logf("output: %q\n", buf.Bytes()) + + if strings.Contains(output, "No help topic for") { + t.Errorf("expect a help topic, got none: \n%q", output) + } + + for _, shouldContain := range []string{ + cmd.Name, cmd.Description, + subCmdBar.Name, subCmdBar.Usage, + subCmdBaz.Name, subCmdBaz.Usage, + } { + if !strings.Contains(output, shouldContain) { + t.Errorf("want help to contain %q, did not: \n%q", shouldContain, output) + } + } + } +} + +func TestApp_Run_SubcommandFullPath(t *testing.T) { + app := NewApp() + buf := new(bytes.Buffer) + app.Writer = buf + app.Name = "command" + subCmd := Command{ + Name: "bar", + Usage: "does bar things", + } + cmd := Command{ + Name: "foo", + Description: "foo commands", + Subcommands: []Command{subCmd}, + } + app.Commands = []Command{cmd} + + err := app.Run([]string{"command", "foo", "bar", "--help"}) + if err != nil { + t.Error(err) + } + + output := buf.String() + if !strings.Contains(output, "command foo bar - does bar things") { + t.Errorf("expected full path to subcommand: %s", output) + } + if !strings.Contains(output, "command foo bar [arguments...]") { + t.Errorf("expected full path to subcommand: %s", output) + } +} + +func TestApp_Run_SubcommandHelpName(t *testing.T) { + app := NewApp() + buf := new(bytes.Buffer) + app.Writer = buf + app.Name = "command" + subCmd := Command{ + Name: "bar", + HelpName: "custom", + Usage: "does bar things", + } + cmd := Command{ + Name: "foo", + Description: "foo commands", + Subcommands: []Command{subCmd}, + } + app.Commands = []Command{cmd} + + err := app.Run([]string{"command", "foo", "bar", "--help"}) + if err != nil { + t.Error(err) + } + + output := buf.String() + if !strings.Contains(output, "custom - does bar things") { + t.Errorf("expected HelpName for subcommand: %s", output) + } + if !strings.Contains(output, "custom [arguments...]") { + t.Errorf("expected HelpName to subcommand: %s", output) + } +} + +func TestApp_Run_CommandHelpName(t *testing.T) { + app := NewApp() + buf := new(bytes.Buffer) + app.Writer = buf + app.Name = "command" + subCmd := Command{ + Name: "bar", + Usage: "does bar things", + } + cmd := Command{ + Name: "foo", + HelpName: "custom", + Description: "foo commands", + Subcommands: []Command{subCmd}, + } + app.Commands = []Command{cmd} + + err := app.Run([]string{"command", "foo", "bar", "--help"}) + if err != nil { + t.Error(err) + } + + output := buf.String() + if !strings.Contains(output, "command foo bar - does bar things") { + t.Errorf("expected full path to subcommand: %s", output) + } + if !strings.Contains(output, "command foo bar [arguments...]") { + t.Errorf("expected full path to subcommand: %s", output) + } +} + +func TestApp_Run_CommandSubcommandHelpName(t *testing.T) { + app := NewApp() + buf := new(bytes.Buffer) + app.Writer = buf + app.Name = "base" + subCmd := Command{ + Name: "bar", + HelpName: "custom", + Usage: "does bar things", + } + cmd := Command{ + Name: "foo", + Description: "foo commands", + Subcommands: []Command{subCmd}, + } + app.Commands = []Command{cmd} + + err := app.Run([]string{"command", "foo", "--help"}) + if err != nil { + t.Error(err) + } + + output := buf.String() + if !strings.Contains(output, "base foo - foo commands") { + t.Errorf("expected full path to subcommand: %s", output) + } + if !strings.Contains(output, "base foo command [command options] [arguments...]") { + t.Errorf("expected full path to subcommand: %s", output) + } +} + +func TestApp_Run_Help(t *testing.T) { + var helpArguments = [][]string{{"boom", "--help"}, {"boom", "-h"}, {"boom", "help"}} + + for _, args := range helpArguments { + buf := new(bytes.Buffer) + + t.Logf("==> checking with arguments %v", args) + + app := NewApp() + app.Name = "boom" + app.Usage = "make an explosive entrance" + app.Writer = buf + app.Action = func(c *Context) error { + buf.WriteString("boom I say!") + return nil + } + + err := app.Run(args) + if err != nil { + t.Error(err) + } + + output := buf.String() + t.Logf("output: %q\n", buf.Bytes()) + + if !strings.Contains(output, "boom - make an explosive entrance") { + t.Errorf("want help to contain %q, did not: \n%q", "boom - make an explosive entrance", output) + } + } +} + +func TestApp_Run_Version(t *testing.T) { + var versionArguments = [][]string{{"boom", "--version"}, {"boom", "-v"}} + + for _, args := range versionArguments { + buf := new(bytes.Buffer) + + t.Logf("==> checking with arguments %v", args) + + app := NewApp() + app.Name = "boom" + app.Usage = "make an explosive entrance" + app.Version = "0.1.0" + app.Writer = buf + app.Action = func(c *Context) error { + buf.WriteString("boom I say!") + return nil + } + + err := app.Run(args) + if err != nil { + t.Error(err) + } + + output := buf.String() + t.Logf("output: %q\n", buf.Bytes()) + + if !strings.Contains(output, "0.1.0") { + t.Errorf("want version to contain %q, did not: \n%q", "0.1.0", output) + } + } +} + +func TestApp_Run_Categories(t *testing.T) { + app := NewApp() + app.Name = "categories" + app.HideHelp = true + app.Commands = []Command{ + { + Name: "command1", + Category: "1", + }, + { + Name: "command2", + Category: "1", + }, + { + Name: "command3", + Category: "2", + }, + } + buf := new(bytes.Buffer) + app.Writer = buf + + app.Run([]string{"categories"}) + + expect := CommandCategories{ + &CommandCategory{ + Name: "1", + Commands: []Command{ + app.Commands[0], + app.Commands[1], + }, + }, + &CommandCategory{ + Name: "2", + Commands: []Command{ + app.Commands[2], + }, + }, + } + if !reflect.DeepEqual(app.Categories(), expect) { + t.Fatalf("expected categories %#v, to equal %#v", app.Categories(), expect) + } + + output := buf.String() + t.Logf("output: %q\n", buf.Bytes()) + + if !strings.Contains(output, "1:\n command1") { + t.Errorf("want buffer to include category %q, did not: \n%q", "1:\n command1", output) + } +} + +func TestApp_VisibleCategories(t *testing.T) { + app := NewApp() + app.Name = "visible-categories" + app.HideHelp = true + app.Commands = []Command{ + { + Name: "command1", + Category: "1", + HelpName: "foo command1", + Hidden: true, + }, + { + Name: "command2", + Category: "2", + HelpName: "foo command2", + }, + { + Name: "command3", + Category: "3", + HelpName: "foo command3", + }, + } + + expected := []*CommandCategory{ + { + Name: "2", + Commands: []Command{ + app.Commands[1], + }, + }, + { + Name: "3", + Commands: []Command{ + app.Commands[2], + }, + }, + } + + app.Setup() + expect(t, expected, app.VisibleCategories()) + + app = NewApp() + app.Name = "visible-categories" + app.HideHelp = true + app.Commands = []Command{ + { + Name: "command1", + Category: "1", + HelpName: "foo command1", + Hidden: true, + }, + { + Name: "command2", + Category: "2", + HelpName: "foo command2", + Hidden: true, + }, + { + Name: "command3", + Category: "3", + HelpName: "foo command3", + }, + } + + expected = []*CommandCategory{ + { + Name: "3", + Commands: []Command{ + app.Commands[2], + }, + }, + } + + app.Setup() + expect(t, expected, app.VisibleCategories()) + + app = NewApp() + app.Name = "visible-categories" + app.HideHelp = true + app.Commands = []Command{ + { + Name: "command1", + Category: "1", + HelpName: "foo command1", + Hidden: true, + }, + { + Name: "command2", + Category: "2", + HelpName: "foo command2", + Hidden: true, + }, + { + Name: "command3", + Category: "3", + HelpName: "foo command3", + Hidden: true, + }, + } + + expected = []*CommandCategory{} + + app.Setup() + expect(t, expected, app.VisibleCategories()) +} + +func TestApp_Run_DoesNotOverwriteErrorFromBefore(t *testing.T) { + app := NewApp() + app.Action = func(c *Context) error { return nil } + app.Before = func(c *Context) error { return fmt.Errorf("before error") } + app.After = func(c *Context) error { return fmt.Errorf("after error") } + + err := app.Run([]string{"foo"}) + if err == nil { + t.Fatalf("expected to receive error from Run, got none") + } + + if !strings.Contains(err.Error(), "before error") { + t.Errorf("expected text of error from Before method, but got none in \"%v\"", err) + } + if !strings.Contains(err.Error(), "after error") { + t.Errorf("expected text of error from After method, but got none in \"%v\"", err) + } +} + +func TestApp_Run_SubcommandDoesNotOverwriteErrorFromBefore(t *testing.T) { + app := NewApp() + app.Commands = []Command{ + { + Subcommands: []Command{ + { + Name: "sub", + }, + }, + Name: "bar", + Before: func(c *Context) error { return fmt.Errorf("before error") }, + After: func(c *Context) error { return fmt.Errorf("after error") }, + }, + } + + err := app.Run([]string{"foo", "bar"}) + if err == nil { + t.Fatalf("expected to receive error from Run, got none") + } + + if !strings.Contains(err.Error(), "before error") { + t.Errorf("expected text of error from Before method, but got none in \"%v\"", err) + } + if !strings.Contains(err.Error(), "after error") { + t.Errorf("expected text of error from After method, but got none in \"%v\"", err) + } +} + +func TestApp_OnUsageError_WithWrongFlagValue(t *testing.T) { + app := NewApp() + app.Flags = []Flag{ + IntFlag{Name: "flag"}, + } + app.OnUsageError = func(c *Context, err error, isSubcommand bool) error { + if isSubcommand { + t.Errorf("Expect no subcommand") + } + if !strings.HasPrefix(err.Error(), "invalid value \"wrong\"") { + t.Errorf("Expect an invalid value error, but got \"%v\"", err) + } + return errors.New("intercepted: " + err.Error()) + } + app.Commands = []Command{ + { + Name: "bar", + }, + } + + err := app.Run([]string{"foo", "--flag=wrong"}) + if err == nil { + t.Fatalf("expected to receive error from Run, got none") + } + + if !strings.HasPrefix(err.Error(), "intercepted: invalid value") { + t.Errorf("Expect an intercepted error, but got \"%v\"", err) + } +} + +func TestApp_OnUsageError_WithWrongFlagValue_ForSubcommand(t *testing.T) { + app := NewApp() + app.Flags = []Flag{ + IntFlag{Name: "flag"}, + } + app.OnUsageError = func(c *Context, err error, isSubcommand bool) error { + if isSubcommand { + t.Errorf("Expect subcommand") + } + if !strings.HasPrefix(err.Error(), "invalid value \"wrong\"") { + t.Errorf("Expect an invalid value error, but got \"%v\"", err) + } + return errors.New("intercepted: " + err.Error()) + } + app.Commands = []Command{ + { + Name: "bar", + }, + } + + err := app.Run([]string{"foo", "--flag=wrong", "bar"}) + if err == nil { + t.Fatalf("expected to receive error from Run, got none") + } + + if !strings.HasPrefix(err.Error(), "intercepted: invalid value") { + t.Errorf("Expect an intercepted error, but got \"%v\"", err) + } +} + +// A custom flag that conforms to the relevant interfaces, but has none of the +// fields that the other flag types do. +type customBoolFlag struct { + Nombre string +} + +// Don't use the normal FlagStringer +func (c *customBoolFlag) String() string { + return "***" + c.Nombre + "***" +} + +func (c *customBoolFlag) GetName() string { + return c.Nombre +} + +func (c *customBoolFlag) Apply(set *flag.FlagSet) { + set.String(c.Nombre, c.Nombre, "") +} + +func TestCustomFlagsUnused(t *testing.T) { + app := NewApp() + app.Flags = []Flag{&customBoolFlag{"custom"}} + + err := app.Run([]string{"foo"}) + if err != nil { + t.Errorf("Run returned unexpected error: %v", err) + } +} + +func TestCustomFlagsUsed(t *testing.T) { + app := NewApp() + app.Flags = []Flag{&customBoolFlag{"custom"}} + + err := app.Run([]string{"foo", "--custom=bar"}) + if err != nil { + t.Errorf("Run returned unexpected error: %v", err) + } +} + +func TestCustomHelpVersionFlags(t *testing.T) { + app := NewApp() + + // Be sure to reset the global flags + defer func(helpFlag Flag, versionFlag Flag) { + HelpFlag = helpFlag + VersionFlag = versionFlag + }(HelpFlag, VersionFlag) + + HelpFlag = &customBoolFlag{"help-custom"} + VersionFlag = &customBoolFlag{"version-custom"} + + err := app.Run([]string{"foo", "--help-custom=bar"}) + if err != nil { + t.Errorf("Run returned unexpected error: %v", err) + } +} + +func TestHandleAction_WithNonFuncAction(t *testing.T) { + app := NewApp() + app.Action = 42 + fs, err := flagSet(app.Name, app.Flags) + if err != nil { + t.Errorf("error creating FlagSet: %s", err) + } + err = HandleAction(app.Action, NewContext(app, fs, nil)) + + if err == nil { + t.Fatalf("expected to receive error from Run, got none") + } + + exitErr, ok := err.(*ExitError) + + if !ok { + t.Fatalf("expected to receive a *ExitError") + } + + if !strings.HasPrefix(exitErr.Error(), "ERROR invalid Action type.") { + t.Fatalf("expected an unknown Action error, but got: %v", exitErr.Error()) + } + + if exitErr.ExitCode() != 2 { + t.Fatalf("expected error exit code to be 2, but got: %v", exitErr.ExitCode()) + } +} + +func TestHandleAction_WithInvalidFuncSignature(t *testing.T) { + app := NewApp() + app.Action = func() string { return "" } + fs, err := flagSet(app.Name, app.Flags) + if err != nil { + t.Errorf("error creating FlagSet: %s", err) + } + err = HandleAction(app.Action, NewContext(app, fs, nil)) + + if err == nil { + t.Fatalf("expected to receive error from Run, got none") + } + + exitErr, ok := err.(*ExitError) + + if !ok { + t.Fatalf("expected to receive a *ExitError") + } + + if !strings.HasPrefix(exitErr.Error(), "ERROR invalid Action type") { + t.Fatalf("expected an unknown Action error, but got: %v", exitErr.Error()) + } + + if exitErr.ExitCode() != 2 { + t.Fatalf("expected error exit code to be 2, but got: %v", exitErr.ExitCode()) + } +} + +func TestHandleAction_WithInvalidFuncReturnSignature(t *testing.T) { + app := NewApp() + app.Action = func(_ *Context) (int, error) { return 0, nil } + fs, err := flagSet(app.Name, app.Flags) + if err != nil { + t.Errorf("error creating FlagSet: %s", err) + } + err = HandleAction(app.Action, NewContext(app, fs, nil)) + + if err == nil { + t.Fatalf("expected to receive error from Run, got none") + } + + exitErr, ok := err.(*ExitError) + + if !ok { + t.Fatalf("expected to receive a *ExitError") + } + + if !strings.HasPrefix(exitErr.Error(), "ERROR invalid Action type") { + t.Fatalf("expected an invalid Action signature error, but got: %v", exitErr.Error()) + } + + if exitErr.ExitCode() != 2 { + t.Fatalf("expected error exit code to be 2, but got: %v", exitErr.ExitCode()) + } +} + +func TestHandleAction_WithUnknownPanic(t *testing.T) { + defer func() { refute(t, recover(), nil) }() + + var fn ActionFunc + + app := NewApp() + app.Action = func(ctx *Context) error { + fn(ctx) + return nil + } + fs, err := flagSet(app.Name, app.Flags) + if err != nil { + t.Errorf("error creating FlagSet: %s", err) + } + HandleAction(app.Action, NewContext(app, fs, nil)) +} + +func TestShellCompletionForIncompleteFlags(t *testing.T) { + app := NewApp() + app.Flags = []Flag{ + IntFlag{ + Name: "test-completion", + }, + } + app.EnableBashCompletion = true + app.BashComplete = func(ctx *Context) { + for _, command := range ctx.App.Commands { + if command.Hidden { + continue + } + + for _, name := range command.Names() { + fmt.Fprintln(ctx.App.Writer, name) + } + } + + for _, flag := range ctx.App.Flags { + for _, name := range strings.Split(flag.GetName(), ",") { + if name == BashCompletionFlag.GetName() { + continue + } + + switch name = strings.TrimSpace(name); len(name) { + case 0: + case 1: + fmt.Fprintln(ctx.App.Writer, "-"+name) + default: + fmt.Fprintln(ctx.App.Writer, "--"+name) + } + } + } + } + app.Action = func(ctx *Context) error { + return fmt.Errorf("should not get here") + } + err := app.Run([]string{"", "--test-completion", "--" + BashCompletionFlag.GetName()}) + if err != nil { + t.Errorf("app should not return an error: %s", err) + } +} + +func TestHandleActionActuallyWorksWithActions(t *testing.T) { + var f ActionFunc + called := false + f = func(c *Context) error { + called = true + return nil + } + + err := HandleAction(f, nil) + + if err != nil { + t.Errorf("Should not have errored: %v", err) + } + + if !called { + t.Errorf("Function was not called") + } +} diff --git a/vendor/github.com/urfave/cli/appveyor.yml b/vendor/github.com/urfave/cli/appveyor.yml new file mode 100644 index 000000000..698b188e1 --- /dev/null +++ b/vendor/github.com/urfave/cli/appveyor.yml @@ -0,0 +1,24 @@ +version: "{build}" + +os: Windows Server 2012 R2 + +clone_folder: c:\gopath\src\github.com\urfave\cli + +environment: + GOPATH: C:\gopath + GOVERSION: 1.6 + PYTHON: C:\Python27-x64 + PYTHON_VERSION: 2.7.x + PYTHON_ARCH: 64 + +install: +- set PATH=%GOPATH%\bin;C:\go\bin;%PATH% +- go version +- go env +- go get github.com/urfave/gfmrun/... +- go get -v -t ./... + +build_script: +- python runtests vet +- python runtests test +- python runtests gfmrun diff --git a/vendor/github.com/urfave/cli/autocomplete/bash_autocomplete b/vendor/github.com/urfave/cli/autocomplete/bash_autocomplete new file mode 100755 index 000000000..37d9c1451 --- /dev/null +++ b/vendor/github.com/urfave/cli/autocomplete/bash_autocomplete @@ -0,0 +1,16 @@ +#! /bin/bash + +: ${PROG:=$(basename ${BASH_SOURCE})} + +_cli_bash_autocomplete() { + local cur opts base + COMPREPLY=() + cur="${COMP_WORDS[COMP_CWORD]}" + opts=$( ${COMP_WORDS[@]:0:$COMP_CWORD} --generate-bash-completion ) + COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) ) + return 0 +} + +complete -F _cli_bash_autocomplete $PROG + +unset PROG diff --git a/vendor/github.com/urfave/cli/autocomplete/zsh_autocomplete b/vendor/github.com/urfave/cli/autocomplete/zsh_autocomplete new file mode 100644 index 000000000..5430a18f9 --- /dev/null +++ b/vendor/github.com/urfave/cli/autocomplete/zsh_autocomplete @@ -0,0 +1,5 @@ +autoload -U compinit && compinit +autoload -U bashcompinit && bashcompinit + +script_dir=$(dirname $0) +source ${script_dir}/bash_autocomplete diff --git a/vendor/github.com/urfave/cli/category.go b/vendor/github.com/urfave/cli/category.go new file mode 100644 index 000000000..1a6055023 --- /dev/null +++ b/vendor/github.com/urfave/cli/category.go @@ -0,0 +1,44 @@ +package cli + +// CommandCategories is a slice of *CommandCategory. +type CommandCategories []*CommandCategory + +// CommandCategory is a category containing commands. +type CommandCategory struct { + Name string + Commands Commands +} + +func (c CommandCategories) Less(i, j int) bool { + return c[i].Name < c[j].Name +} + +func (c CommandCategories) Len() int { + return len(c) +} + +func (c CommandCategories) Swap(i, j int) { + c[i], c[j] = c[j], c[i] +} + +// AddCommand adds a command to a category. +func (c CommandCategories) AddCommand(category string, command Command) CommandCategories { + for _, commandCategory := range c { + if commandCategory.Name == category { + commandCategory.Commands = append(commandCategory.Commands, command) + return c + } + } + return append(c, &CommandCategory{Name: category, Commands: []Command{command}}) +} + +// VisibleCommands returns a slice of the Commands with Hidden=false +func (c *CommandCategory) VisibleCommands() []Command { + ret := []Command{} + for _, command := range c.Commands { + if !command.Hidden { + ret = append(ret, command) + } + } + return ret +} diff --git a/vendor/github.com/urfave/cli/cli.go b/vendor/github.com/urfave/cli/cli.go new file mode 100644 index 000000000..90c07eb8e --- /dev/null +++ b/vendor/github.com/urfave/cli/cli.go @@ -0,0 +1,22 @@ +// Package cli provides a minimal framework for creating and organizing command line +// Go applications. cli is designed to be easy to understand and write, the most simple +// cli application can be written as follows: +// func main() { +// cli.NewApp().Run(os.Args) +// } +// +// Of course this application does not do much, so let's make this an actual application: +// func main() { +// app := cli.NewApp() +// app.Name = "greet" +// app.Usage = "say a greeting" +// app.Action = func(c *cli.Context) error { +// println("Greetings") +// return nil +// } +// +// app.Run(os.Args) +// } +package cli + +//go:generate python ./generate-flag-types cli -i flag-types.json -o flag_generated.go diff --git a/vendor/github.com/urfave/cli/command.go b/vendor/github.com/urfave/cli/command.go new file mode 100644 index 000000000..23de2944b --- /dev/null +++ b/vendor/github.com/urfave/cli/command.go @@ -0,0 +1,304 @@ +package cli + +import ( + "fmt" + "io/ioutil" + "sort" + "strings" +) + +// Command is a subcommand for a cli.App. +type Command struct { + // The name of the command + Name string + // short name of the command. Typically one character (deprecated, use `Aliases`) + ShortName string + // A list of aliases for the command + Aliases []string + // A short description of the usage of this command + Usage string + // Custom text to show on USAGE section of help + UsageText string + // A longer explanation of how the command works + Description string + // A short description of the arguments of this command + ArgsUsage string + // The category the command is part of + Category string + // The function to call when checking for bash command completions + BashComplete BashCompleteFunc + // An action to execute before any sub-subcommands are run, but after the context is ready + // If a non-nil error is returned, no sub-subcommands are run + Before BeforeFunc + // An action to execute after any subcommands are run, but after the subcommand has finished + // It is run even if Action() panics + After AfterFunc + // The function to call when this command is invoked + Action interface{} + // TODO: replace `Action: interface{}` with `Action: ActionFunc` once some kind + // of deprecation period has passed, maybe? + + // Execute this function if a usage error occurs. + OnUsageError OnUsageErrorFunc + // List of child commands + Subcommands Commands + // List of flags to parse + Flags []Flag + // Treat all flags as normal arguments if true + SkipFlagParsing bool + // Skip argument reordering which attempts to move flags before arguments, + // but only works if all flags appear after all arguments. This behavior was + // removed n version 2 since it only works under specific conditions so we + // backport here by exposing it as an option for compatibility. + SkipArgReorder bool + // Boolean to hide built-in help command + HideHelp bool + // Boolean to hide this command from help or completion + Hidden bool + + // Full name of command for help, defaults to full command name, including parent commands. + HelpName string + commandNamePath []string + + // CustomHelpTemplate the text template for the command help topic. + // cli.go uses text/template to render templates. You can + // render custom help text by setting this variable. + CustomHelpTemplate string +} + +type CommandsByName []Command + +func (c CommandsByName) Len() int { + return len(c) +} + +func (c CommandsByName) Less(i, j int) bool { + return c[i].Name < c[j].Name +} + +func (c CommandsByName) Swap(i, j int) { + c[i], c[j] = c[j], c[i] +} + +// FullName returns the full name of the command. +// For subcommands this ensures that parent commands are part of the command path +func (c Command) FullName() string { + if c.commandNamePath == nil { + return c.Name + } + return strings.Join(c.commandNamePath, " ") +} + +// Commands is a slice of Command +type Commands []Command + +// Run invokes the command given the context, parses ctx.Args() to generate command-specific flags +func (c Command) Run(ctx *Context) (err error) { + if len(c.Subcommands) > 0 { + return c.startApp(ctx) + } + + if !c.HideHelp && (HelpFlag != BoolFlag{}) { + // append help to flags + c.Flags = append( + c.Flags, + HelpFlag, + ) + } + + set, err := flagSet(c.Name, c.Flags) + if err != nil { + return err + } + set.SetOutput(ioutil.Discard) + + if c.SkipFlagParsing { + err = set.Parse(append([]string{"--"}, ctx.Args().Tail()...)) + } else if !c.SkipArgReorder { + firstFlagIndex := -1 + terminatorIndex := -1 + for index, arg := range ctx.Args() { + if arg == "--" { + terminatorIndex = index + break + } else if arg == "-" { + // Do nothing. A dash alone is not really a flag. + continue + } else if strings.HasPrefix(arg, "-") && firstFlagIndex == -1 { + firstFlagIndex = index + } + } + + if firstFlagIndex > -1 { + args := ctx.Args() + regularArgs := make([]string, len(args[1:firstFlagIndex])) + copy(regularArgs, args[1:firstFlagIndex]) + + var flagArgs []string + if terminatorIndex > -1 { + flagArgs = args[firstFlagIndex:terminatorIndex] + regularArgs = append(regularArgs, args[terminatorIndex:]...) + } else { + flagArgs = args[firstFlagIndex:] + } + + err = set.Parse(append(flagArgs, regularArgs...)) + } else { + err = set.Parse(ctx.Args().Tail()) + } + } else { + err = set.Parse(ctx.Args().Tail()) + } + + nerr := normalizeFlags(c.Flags, set) + if nerr != nil { + fmt.Fprintln(ctx.App.Writer, nerr) + fmt.Fprintln(ctx.App.Writer) + ShowCommandHelp(ctx, c.Name) + return nerr + } + + context := NewContext(ctx.App, set, ctx) + context.Command = c + if checkCommandCompletions(context, c.Name) { + return nil + } + + if err != nil { + if c.OnUsageError != nil { + err := c.OnUsageError(context, err, false) + HandleExitCoder(err) + return err + } + fmt.Fprintln(context.App.Writer, "Incorrect Usage:", err.Error()) + fmt.Fprintln(context.App.Writer) + ShowCommandHelp(context, c.Name) + return err + } + + if checkCommandHelp(context, c.Name) { + return nil + } + + if c.After != nil { + defer func() { + afterErr := c.After(context) + if afterErr != nil { + HandleExitCoder(err) + if err != nil { + err = NewMultiError(err, afterErr) + } else { + err = afterErr + } + } + }() + } + + if c.Before != nil { + err = c.Before(context) + if err != nil { + ShowCommandHelp(context, c.Name) + HandleExitCoder(err) + return err + } + } + + if c.Action == nil { + c.Action = helpSubcommand.Action + } + + err = HandleAction(c.Action, context) + + if err != nil { + HandleExitCoder(err) + } + return err +} + +// Names returns the names including short names and aliases. +func (c Command) Names() []string { + names := []string{c.Name} + + if c.ShortName != "" { + names = append(names, c.ShortName) + } + + return append(names, c.Aliases...) +} + +// HasName returns true if Command.Name or Command.ShortName matches given name +func (c Command) HasName(name string) bool { + for _, n := range c.Names() { + if n == name { + return true + } + } + return false +} + +func (c Command) startApp(ctx *Context) error { + app := NewApp() + app.Metadata = ctx.App.Metadata + // set the name and usage + app.Name = fmt.Sprintf("%s %s", ctx.App.Name, c.Name) + if c.HelpName == "" { + app.HelpName = c.HelpName + } else { + app.HelpName = app.Name + } + + app.Usage = c.Usage + app.Description = c.Description + app.ArgsUsage = c.ArgsUsage + + // set CommandNotFound + app.CommandNotFound = ctx.App.CommandNotFound + app.CustomAppHelpTemplate = c.CustomHelpTemplate + + // set the flags and commands + app.Commands = c.Subcommands + app.Flags = c.Flags + app.HideHelp = c.HideHelp + + app.Version = ctx.App.Version + app.HideVersion = ctx.App.HideVersion + app.Compiled = ctx.App.Compiled + app.Author = ctx.App.Author + app.Email = ctx.App.Email + app.Writer = ctx.App.Writer + app.ErrWriter = ctx.App.ErrWriter + + app.categories = CommandCategories{} + for _, command := range c.Subcommands { + app.categories = app.categories.AddCommand(command.Category, command) + } + + sort.Sort(app.categories) + + // bash completion + app.EnableBashCompletion = ctx.App.EnableBashCompletion + if c.BashComplete != nil { + app.BashComplete = c.BashComplete + } + + // set the actions + app.Before = c.Before + app.After = c.After + if c.Action != nil { + app.Action = c.Action + } else { + app.Action = helpSubcommand.Action + } + app.OnUsageError = c.OnUsageError + + for index, cc := range app.Commands { + app.Commands[index].commandNamePath = []string{c.Name, cc.Name} + } + + return app.RunAsSubcommand(ctx) +} + +// VisibleFlags returns a slice of the Flags with Hidden=false +func (c Command) VisibleFlags() []Flag { + return visibleFlags(c.Flags) +} diff --git a/vendor/github.com/urfave/cli/command_test.go b/vendor/github.com/urfave/cli/command_test.go new file mode 100644 index 000000000..4ad994c9d --- /dev/null +++ b/vendor/github.com/urfave/cli/command_test.go @@ -0,0 +1,240 @@ +package cli + +import ( + "errors" + "flag" + "fmt" + "io/ioutil" + "strings" + "testing" +) + +func TestCommandFlagParsing(t *testing.T) { + cases := []struct { + testArgs []string + skipFlagParsing bool + skipArgReorder bool + expectedErr error + }{ + // Test normal "not ignoring flags" flow + {[]string{"test-cmd", "blah", "blah", "-break"}, false, false, errors.New("flag provided but not defined: -break")}, + + // Test no arg reorder + {[]string{"test-cmd", "blah", "blah", "-break"}, false, true, nil}, + + {[]string{"test-cmd", "blah", "blah"}, true, false, nil}, // Test SkipFlagParsing without any args that look like flags + {[]string{"test-cmd", "blah", "-break"}, true, false, nil}, // Test SkipFlagParsing with random flag arg + {[]string{"test-cmd", "blah", "-help"}, true, false, nil}, // Test SkipFlagParsing with "special" help flag arg + } + + for _, c := range cases { + app := NewApp() + app.Writer = ioutil.Discard + set := flag.NewFlagSet("test", 0) + set.Parse(c.testArgs) + + context := NewContext(app, set, nil) + + command := Command{ + Name: "test-cmd", + Aliases: []string{"tc"}, + Usage: "this is for testing", + Description: "testing", + Action: func(_ *Context) error { return nil }, + SkipFlagParsing: c.skipFlagParsing, + SkipArgReorder: c.skipArgReorder, + } + + err := command.Run(context) + + expect(t, err, c.expectedErr) + expect(t, []string(context.Args()), c.testArgs) + } +} + +func TestCommand_Run_DoesNotOverwriteErrorFromBefore(t *testing.T) { + app := NewApp() + app.Commands = []Command{ + { + Name: "bar", + Before: func(c *Context) error { + return fmt.Errorf("before error") + }, + After: func(c *Context) error { + return fmt.Errorf("after error") + }, + }, + } + + err := app.Run([]string{"foo", "bar"}) + if err == nil { + t.Fatalf("expected to receive error from Run, got none") + } + + if !strings.Contains(err.Error(), "before error") { + t.Errorf("expected text of error from Before method, but got none in \"%v\"", err) + } + if !strings.Contains(err.Error(), "after error") { + t.Errorf("expected text of error from After method, but got none in \"%v\"", err) + } +} + +func TestCommand_Run_BeforeSavesMetadata(t *testing.T) { + var receivedMsgFromAction string + var receivedMsgFromAfter string + + app := NewApp() + app.Commands = []Command{ + { + Name: "bar", + Before: func(c *Context) error { + c.App.Metadata["msg"] = "hello world" + return nil + }, + Action: func(c *Context) error { + msg, ok := c.App.Metadata["msg"] + if !ok { + return errors.New("msg not found") + } + receivedMsgFromAction = msg.(string) + return nil + }, + After: func(c *Context) error { + msg, ok := c.App.Metadata["msg"] + if !ok { + return errors.New("msg not found") + } + receivedMsgFromAfter = msg.(string) + return nil + }, + }, + } + + err := app.Run([]string{"foo", "bar"}) + if err != nil { + t.Fatalf("expected no error from Run, got %s", err) + } + + expectedMsg := "hello world" + + if receivedMsgFromAction != expectedMsg { + t.Fatalf("expected msg from Action to match. Given: %q\nExpected: %q", + receivedMsgFromAction, expectedMsg) + } + if receivedMsgFromAfter != expectedMsg { + t.Fatalf("expected msg from After to match. Given: %q\nExpected: %q", + receivedMsgFromAction, expectedMsg) + } +} + +func TestCommand_OnUsageError_hasCommandContext(t *testing.T) { + app := NewApp() + app.Commands = []Command{ + { + Name: "bar", + Flags: []Flag{ + IntFlag{Name: "flag"}, + }, + OnUsageError: func(c *Context, err error, _ bool) error { + return fmt.Errorf("intercepted in %s: %s", c.Command.Name, err.Error()) + }, + }, + } + + err := app.Run([]string{"foo", "bar", "--flag=wrong"}) + if err == nil { + t.Fatalf("expected to receive error from Run, got none") + } + + if !strings.HasPrefix(err.Error(), "intercepted in bar") { + t.Errorf("Expect an intercepted error, but got \"%v\"", err) + } +} + +func TestCommand_OnUsageError_WithWrongFlagValue(t *testing.T) { + app := NewApp() + app.Commands = []Command{ + { + Name: "bar", + Flags: []Flag{ + IntFlag{Name: "flag"}, + }, + OnUsageError: func(c *Context, err error, _ bool) error { + if !strings.HasPrefix(err.Error(), "invalid value \"wrong\"") { + t.Errorf("Expect an invalid value error, but got \"%v\"", err) + } + return errors.New("intercepted: " + err.Error()) + }, + }, + } + + err := app.Run([]string{"foo", "bar", "--flag=wrong"}) + if err == nil { + t.Fatalf("expected to receive error from Run, got none") + } + + if !strings.HasPrefix(err.Error(), "intercepted: invalid value") { + t.Errorf("Expect an intercepted error, but got \"%v\"", err) + } +} + +func TestCommand_OnUsageError_WithSubcommand(t *testing.T) { + app := NewApp() + app.Commands = []Command{ + { + Name: "bar", + Subcommands: []Command{ + { + Name: "baz", + }, + }, + Flags: []Flag{ + IntFlag{Name: "flag"}, + }, + OnUsageError: func(c *Context, err error, _ bool) error { + if !strings.HasPrefix(err.Error(), "invalid value \"wrong\"") { + t.Errorf("Expect an invalid value error, but got \"%v\"", err) + } + return errors.New("intercepted: " + err.Error()) + }, + }, + } + + err := app.Run([]string{"foo", "bar", "--flag=wrong"}) + if err == nil { + t.Fatalf("expected to receive error from Run, got none") + } + + if !strings.HasPrefix(err.Error(), "intercepted: invalid value") { + t.Errorf("Expect an intercepted error, but got \"%v\"", err) + } +} + +func TestCommand_Run_SubcommandsCanUseErrWriter(t *testing.T) { + app := NewApp() + app.ErrWriter = ioutil.Discard + app.Commands = []Command{ + { + Name: "bar", + Usage: "this is for testing", + Subcommands: []Command{ + { + Name: "baz", + Usage: "this is for testing", + Action: func(c *Context) error { + if c.App.ErrWriter != ioutil.Discard { + return fmt.Errorf("ErrWriter not passed") + } + + return nil + }, + }, + }, + }, + } + + err := app.Run([]string{"foo", "bar", "baz"}) + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/urfave/cli/context.go b/vendor/github.com/urfave/cli/context.go new file mode 100644 index 000000000..db94191e2 --- /dev/null +++ b/vendor/github.com/urfave/cli/context.go @@ -0,0 +1,278 @@ +package cli + +import ( + "errors" + "flag" + "reflect" + "strings" + "syscall" +) + +// Context is a type that is passed through to +// each Handler action in a cli application. Context +// can be used to retrieve context-specific Args and +// parsed command-line options. +type Context struct { + App *App + Command Command + shellComplete bool + flagSet *flag.FlagSet + setFlags map[string]bool + parentContext *Context +} + +// NewContext creates a new context. For use in when invoking an App or Command action. +func NewContext(app *App, set *flag.FlagSet, parentCtx *Context) *Context { + c := &Context{App: app, flagSet: set, parentContext: parentCtx} + + if parentCtx != nil { + c.shellComplete = parentCtx.shellComplete + } + + return c +} + +// NumFlags returns the number of flags set +func (c *Context) NumFlags() int { + return c.flagSet.NFlag() +} + +// Set sets a context flag to a value. +func (c *Context) Set(name, value string) error { + c.setFlags = nil + return c.flagSet.Set(name, value) +} + +// GlobalSet sets a context flag to a value on the global flagset +func (c *Context) GlobalSet(name, value string) error { + globalContext(c).setFlags = nil + return globalContext(c).flagSet.Set(name, value) +} + +// IsSet determines if the flag was actually set +func (c *Context) IsSet(name string) bool { + if c.setFlags == nil { + c.setFlags = make(map[string]bool) + + c.flagSet.Visit(func(f *flag.Flag) { + c.setFlags[f.Name] = true + }) + + c.flagSet.VisitAll(func(f *flag.Flag) { + if _, ok := c.setFlags[f.Name]; ok { + return + } + c.setFlags[f.Name] = false + }) + + // XXX hack to support IsSet for flags with EnvVar + // + // There isn't an easy way to do this with the current implementation since + // whether a flag was set via an environment variable is very difficult to + // determine here. Instead, we intend to introduce a backwards incompatible + // change in version 2 to add `IsSet` to the Flag interface to push the + // responsibility closer to where the information required to determine + // whether a flag is set by non-standard means such as environment + // variables is avaliable. + // + // See https://github.com/urfave/cli/issues/294 for additional discussion + flags := c.Command.Flags + if c.Command.Name == "" { // cannot == Command{} since it contains slice types + if c.App != nil { + flags = c.App.Flags + } + } + for _, f := range flags { + eachName(f.GetName(), func(name string) { + if isSet, ok := c.setFlags[name]; isSet || !ok { + return + } + + val := reflect.ValueOf(f) + if val.Kind() == reflect.Ptr { + val = val.Elem() + } + + envVarValue := val.FieldByName("EnvVar") + if !envVarValue.IsValid() { + return + } + + eachName(envVarValue.String(), func(envVar string) { + envVar = strings.TrimSpace(envVar) + if _, ok := syscall.Getenv(envVar); ok { + c.setFlags[name] = true + return + } + }) + }) + } + } + + return c.setFlags[name] +} + +// GlobalIsSet determines if the global flag was actually set +func (c *Context) GlobalIsSet(name string) bool { + ctx := c + if ctx.parentContext != nil { + ctx = ctx.parentContext + } + + for ; ctx != nil; ctx = ctx.parentContext { + if ctx.IsSet(name) { + return true + } + } + return false +} + +// FlagNames returns a slice of flag names used in this context. +func (c *Context) FlagNames() (names []string) { + for _, flag := range c.Command.Flags { + name := strings.Split(flag.GetName(), ",")[0] + if name == "help" { + continue + } + names = append(names, name) + } + return +} + +// GlobalFlagNames returns a slice of global flag names used by the app. +func (c *Context) GlobalFlagNames() (names []string) { + for _, flag := range c.App.Flags { + name := strings.Split(flag.GetName(), ",")[0] + if name == "help" || name == "version" { + continue + } + names = append(names, name) + } + return +} + +// Parent returns the parent context, if any +func (c *Context) Parent() *Context { + return c.parentContext +} + +// value returns the value of the flag coressponding to `name` +func (c *Context) value(name string) interface{} { + return c.flagSet.Lookup(name).Value.(flag.Getter).Get() +} + +// Args contains apps console arguments +type Args []string + +// Args returns the command line arguments associated with the context. +func (c *Context) Args() Args { + args := Args(c.flagSet.Args()) + return args +} + +// NArg returns the number of the command line arguments. +func (c *Context) NArg() int { + return len(c.Args()) +} + +// Get returns the nth argument, or else a blank string +func (a Args) Get(n int) string { + if len(a) > n { + return a[n] + } + return "" +} + +// First returns the first argument, or else a blank string +func (a Args) First() string { + return a.Get(0) +} + +// Tail returns the rest of the arguments (not the first one) +// or else an empty string slice +func (a Args) Tail() []string { + if len(a) >= 2 { + return []string(a)[1:] + } + return []string{} +} + +// Present checks if there are any arguments present +func (a Args) Present() bool { + return len(a) != 0 +} + +// Swap swaps arguments at the given indexes +func (a Args) Swap(from, to int) error { + if from >= len(a) || to >= len(a) { + return errors.New("index out of range") + } + a[from], a[to] = a[to], a[from] + return nil +} + +func globalContext(ctx *Context) *Context { + if ctx == nil { + return nil + } + + for { + if ctx.parentContext == nil { + return ctx + } + ctx = ctx.parentContext + } +} + +func lookupGlobalFlagSet(name string, ctx *Context) *flag.FlagSet { + if ctx.parentContext != nil { + ctx = ctx.parentContext + } + for ; ctx != nil; ctx = ctx.parentContext { + if f := ctx.flagSet.Lookup(name); f != nil { + return ctx.flagSet + } + } + return nil +} + +func copyFlag(name string, ff *flag.Flag, set *flag.FlagSet) { + switch ff.Value.(type) { + case *StringSlice: + default: + set.Set(name, ff.Value.String()) + } +} + +func normalizeFlags(flags []Flag, set *flag.FlagSet) error { + visited := make(map[string]bool) + set.Visit(func(f *flag.Flag) { + visited[f.Name] = true + }) + for _, f := range flags { + parts := strings.Split(f.GetName(), ",") + if len(parts) == 1 { + continue + } + var ff *flag.Flag + for _, name := range parts { + name = strings.Trim(name, " ") + if visited[name] { + if ff != nil { + return errors.New("Cannot use two forms of the same flag: " + name + " " + ff.Name) + } + ff = set.Lookup(name) + } + } + if ff == nil { + continue + } + for _, name := range parts { + name = strings.Trim(name, " ") + if !visited[name] { + copyFlag(name, ff, set) + } + } + } + return nil +} diff --git a/vendor/github.com/urfave/cli/context_test.go b/vendor/github.com/urfave/cli/context_test.go new file mode 100644 index 000000000..7acca1005 --- /dev/null +++ b/vendor/github.com/urfave/cli/context_test.go @@ -0,0 +1,403 @@ +package cli + +import ( + "flag" + "os" + "testing" + "time" +) + +func TestNewContext(t *testing.T) { + set := flag.NewFlagSet("test", 0) + set.Int("myflag", 12, "doc") + set.Int64("myflagInt64", int64(12), "doc") + set.Uint("myflagUint", uint(93), "doc") + set.Uint64("myflagUint64", uint64(93), "doc") + set.Float64("myflag64", float64(17), "doc") + globalSet := flag.NewFlagSet("test", 0) + globalSet.Int("myflag", 42, "doc") + globalSet.Int64("myflagInt64", int64(42), "doc") + globalSet.Uint("myflagUint", uint(33), "doc") + globalSet.Uint64("myflagUint64", uint64(33), "doc") + globalSet.Float64("myflag64", float64(47), "doc") + globalCtx := NewContext(nil, globalSet, nil) + command := Command{Name: "mycommand"} + c := NewContext(nil, set, globalCtx) + c.Command = command + expect(t, c.Int("myflag"), 12) + expect(t, c.Int64("myflagInt64"), int64(12)) + expect(t, c.Uint("myflagUint"), uint(93)) + expect(t, c.Uint64("myflagUint64"), uint64(93)) + expect(t, c.Float64("myflag64"), float64(17)) + expect(t, c.GlobalInt("myflag"), 42) + expect(t, c.GlobalInt64("myflagInt64"), int64(42)) + expect(t, c.GlobalUint("myflagUint"), uint(33)) + expect(t, c.GlobalUint64("myflagUint64"), uint64(33)) + expect(t, c.GlobalFloat64("myflag64"), float64(47)) + expect(t, c.Command.Name, "mycommand") +} + +func TestContext_Int(t *testing.T) { + set := flag.NewFlagSet("test", 0) + set.Int("myflag", 12, "doc") + c := NewContext(nil, set, nil) + expect(t, c.Int("myflag"), 12) +} + +func TestContext_Int64(t *testing.T) { + set := flag.NewFlagSet("test", 0) + set.Int64("myflagInt64", 12, "doc") + c := NewContext(nil, set, nil) + expect(t, c.Int64("myflagInt64"), int64(12)) +} + +func TestContext_Uint(t *testing.T) { + set := flag.NewFlagSet("test", 0) + set.Uint("myflagUint", uint(13), "doc") + c := NewContext(nil, set, nil) + expect(t, c.Uint("myflagUint"), uint(13)) +} + +func TestContext_Uint64(t *testing.T) { + set := flag.NewFlagSet("test", 0) + set.Uint64("myflagUint64", uint64(9), "doc") + c := NewContext(nil, set, nil) + expect(t, c.Uint64("myflagUint64"), uint64(9)) +} + +func TestContext_GlobalInt(t *testing.T) { + set := flag.NewFlagSet("test", 0) + set.Int("myflag", 12, "doc") + c := NewContext(nil, set, nil) + expect(t, c.GlobalInt("myflag"), 12) + expect(t, c.GlobalInt("nope"), 0) +} + +func TestContext_GlobalInt64(t *testing.T) { + set := flag.NewFlagSet("test", 0) + set.Int64("myflagInt64", 12, "doc") + c := NewContext(nil, set, nil) + expect(t, c.GlobalInt64("myflagInt64"), int64(12)) + expect(t, c.GlobalInt64("nope"), int64(0)) +} + +func TestContext_Float64(t *testing.T) { + set := flag.NewFlagSet("test", 0) + set.Float64("myflag", float64(17), "doc") + c := NewContext(nil, set, nil) + expect(t, c.Float64("myflag"), float64(17)) +} + +func TestContext_GlobalFloat64(t *testing.T) { + set := flag.NewFlagSet("test", 0) + set.Float64("myflag", float64(17), "doc") + c := NewContext(nil, set, nil) + expect(t, c.GlobalFloat64("myflag"), float64(17)) + expect(t, c.GlobalFloat64("nope"), float64(0)) +} + +func TestContext_Duration(t *testing.T) { + set := flag.NewFlagSet("test", 0) + set.Duration("myflag", time.Duration(12*time.Second), "doc") + c := NewContext(nil, set, nil) + expect(t, c.Duration("myflag"), time.Duration(12*time.Second)) +} + +func TestContext_String(t *testing.T) { + set := flag.NewFlagSet("test", 0) + set.String("myflag", "hello world", "doc") + c := NewContext(nil, set, nil) + expect(t, c.String("myflag"), "hello world") +} + +func TestContext_Bool(t *testing.T) { + set := flag.NewFlagSet("test", 0) + set.Bool("myflag", false, "doc") + c := NewContext(nil, set, nil) + expect(t, c.Bool("myflag"), false) +} + +func TestContext_BoolT(t *testing.T) { + set := flag.NewFlagSet("test", 0) + set.Bool("myflag", true, "doc") + c := NewContext(nil, set, nil) + expect(t, c.BoolT("myflag"), true) +} + +func TestContext_GlobalBool(t *testing.T) { + set := flag.NewFlagSet("test", 0) + + globalSet := flag.NewFlagSet("test-global", 0) + globalSet.Bool("myflag", false, "doc") + globalCtx := NewContext(nil, globalSet, nil) + + c := NewContext(nil, set, globalCtx) + expect(t, c.GlobalBool("myflag"), false) + expect(t, c.GlobalBool("nope"), false) +} + +func TestContext_GlobalBoolT(t *testing.T) { + set := flag.NewFlagSet("test", 0) + + globalSet := flag.NewFlagSet("test-global", 0) + globalSet.Bool("myflag", true, "doc") + globalCtx := NewContext(nil, globalSet, nil) + + c := NewContext(nil, set, globalCtx) + expect(t, c.GlobalBoolT("myflag"), true) + expect(t, c.GlobalBoolT("nope"), false) +} + +func TestContext_Args(t *testing.T) { + set := flag.NewFlagSet("test", 0) + set.Bool("myflag", false, "doc") + c := NewContext(nil, set, nil) + set.Parse([]string{"--myflag", "bat", "baz"}) + expect(t, len(c.Args()), 2) + expect(t, c.Bool("myflag"), true) +} + +func TestContext_NArg(t *testing.T) { + set := flag.NewFlagSet("test", 0) + set.Bool("myflag", false, "doc") + c := NewContext(nil, set, nil) + set.Parse([]string{"--myflag", "bat", "baz"}) + expect(t, c.NArg(), 2) +} + +func TestContext_IsSet(t *testing.T) { + set := flag.NewFlagSet("test", 0) + set.Bool("myflag", false, "doc") + set.String("otherflag", "hello world", "doc") + globalSet := flag.NewFlagSet("test", 0) + globalSet.Bool("myflagGlobal", true, "doc") + globalCtx := NewContext(nil, globalSet, nil) + c := NewContext(nil, set, globalCtx) + set.Parse([]string{"--myflag", "bat", "baz"}) + globalSet.Parse([]string{"--myflagGlobal", "bat", "baz"}) + expect(t, c.IsSet("myflag"), true) + expect(t, c.IsSet("otherflag"), false) + expect(t, c.IsSet("bogusflag"), false) + expect(t, c.IsSet("myflagGlobal"), false) +} + +// XXX Corresponds to hack in context.IsSet for flags with EnvVar field +// Should be moved to `flag_test` in v2 +func TestContext_IsSet_fromEnv(t *testing.T) { + var ( + timeoutIsSet, tIsSet bool + noEnvVarIsSet, nIsSet bool + passwordIsSet, pIsSet bool + unparsableIsSet, uIsSet bool + ) + + clearenv() + os.Setenv("APP_TIMEOUT_SECONDS", "15.5") + os.Setenv("APP_PASSWORD", "") + a := App{ + Flags: []Flag{ + Float64Flag{Name: "timeout, t", EnvVar: "APP_TIMEOUT_SECONDS"}, + StringFlag{Name: "password, p", EnvVar: "APP_PASSWORD"}, + Float64Flag{Name: "unparsable, u", EnvVar: "APP_UNPARSABLE"}, + Float64Flag{Name: "no-env-var, n"}, + }, + Action: func(ctx *Context) error { + timeoutIsSet = ctx.IsSet("timeout") + tIsSet = ctx.IsSet("t") + passwordIsSet = ctx.IsSet("password") + pIsSet = ctx.IsSet("p") + unparsableIsSet = ctx.IsSet("unparsable") + uIsSet = ctx.IsSet("u") + noEnvVarIsSet = ctx.IsSet("no-env-var") + nIsSet = ctx.IsSet("n") + return nil + }, + } + a.Run([]string{"run"}) + expect(t, timeoutIsSet, true) + expect(t, tIsSet, true) + expect(t, passwordIsSet, true) + expect(t, pIsSet, true) + expect(t, noEnvVarIsSet, false) + expect(t, nIsSet, false) + + os.Setenv("APP_UNPARSABLE", "foobar") + a.Run([]string{"run"}) + expect(t, unparsableIsSet, false) + expect(t, uIsSet, false) +} + +func TestContext_GlobalIsSet(t *testing.T) { + set := flag.NewFlagSet("test", 0) + set.Bool("myflag", false, "doc") + set.String("otherflag", "hello world", "doc") + globalSet := flag.NewFlagSet("test", 0) + globalSet.Bool("myflagGlobal", true, "doc") + globalSet.Bool("myflagGlobalUnset", true, "doc") + globalCtx := NewContext(nil, globalSet, nil) + c := NewContext(nil, set, globalCtx) + set.Parse([]string{"--myflag", "bat", "baz"}) + globalSet.Parse([]string{"--myflagGlobal", "bat", "baz"}) + expect(t, c.GlobalIsSet("myflag"), false) + expect(t, c.GlobalIsSet("otherflag"), false) + expect(t, c.GlobalIsSet("bogusflag"), false) + expect(t, c.GlobalIsSet("myflagGlobal"), true) + expect(t, c.GlobalIsSet("myflagGlobalUnset"), false) + expect(t, c.GlobalIsSet("bogusGlobal"), false) +} + +// XXX Corresponds to hack in context.IsSet for flags with EnvVar field +// Should be moved to `flag_test` in v2 +func TestContext_GlobalIsSet_fromEnv(t *testing.T) { + var ( + timeoutIsSet, tIsSet bool + noEnvVarIsSet, nIsSet bool + passwordIsSet, pIsSet bool + unparsableIsSet, uIsSet bool + ) + + clearenv() + os.Setenv("APP_TIMEOUT_SECONDS", "15.5") + os.Setenv("APP_PASSWORD", "") + a := App{ + Flags: []Flag{ + Float64Flag{Name: "timeout, t", EnvVar: "APP_TIMEOUT_SECONDS"}, + StringFlag{Name: "password, p", EnvVar: "APP_PASSWORD"}, + Float64Flag{Name: "no-env-var, n"}, + Float64Flag{Name: "unparsable, u", EnvVar: "APP_UNPARSABLE"}, + }, + Commands: []Command{ + { + Name: "hello", + Action: func(ctx *Context) error { + timeoutIsSet = ctx.GlobalIsSet("timeout") + tIsSet = ctx.GlobalIsSet("t") + passwordIsSet = ctx.GlobalIsSet("password") + pIsSet = ctx.GlobalIsSet("p") + unparsableIsSet = ctx.GlobalIsSet("unparsable") + uIsSet = ctx.GlobalIsSet("u") + noEnvVarIsSet = ctx.GlobalIsSet("no-env-var") + nIsSet = ctx.GlobalIsSet("n") + return nil + }, + }, + }, + } + if err := a.Run([]string{"run", "hello"}); err != nil { + t.Logf("error running Run(): %+v", err) + } + expect(t, timeoutIsSet, true) + expect(t, tIsSet, true) + expect(t, passwordIsSet, true) + expect(t, pIsSet, true) + expect(t, noEnvVarIsSet, false) + expect(t, nIsSet, false) + + os.Setenv("APP_UNPARSABLE", "foobar") + if err := a.Run([]string{"run"}); err != nil { + t.Logf("error running Run(): %+v", err) + } + expect(t, unparsableIsSet, false) + expect(t, uIsSet, false) +} + +func TestContext_NumFlags(t *testing.T) { + set := flag.NewFlagSet("test", 0) + set.Bool("myflag", false, "doc") + set.String("otherflag", "hello world", "doc") + globalSet := flag.NewFlagSet("test", 0) + globalSet.Bool("myflagGlobal", true, "doc") + globalCtx := NewContext(nil, globalSet, nil) + c := NewContext(nil, set, globalCtx) + set.Parse([]string{"--myflag", "--otherflag=foo"}) + globalSet.Parse([]string{"--myflagGlobal"}) + expect(t, c.NumFlags(), 2) +} + +func TestContext_GlobalFlag(t *testing.T) { + var globalFlag string + var globalFlagSet bool + app := NewApp() + app.Flags = []Flag{ + StringFlag{Name: "global, g", Usage: "global"}, + } + app.Action = func(c *Context) error { + globalFlag = c.GlobalString("global") + globalFlagSet = c.GlobalIsSet("global") + return nil + } + app.Run([]string{"command", "-g", "foo"}) + expect(t, globalFlag, "foo") + expect(t, globalFlagSet, true) + +} + +func TestContext_GlobalFlagsInSubcommands(t *testing.T) { + subcommandRun := false + parentFlag := false + app := NewApp() + + app.Flags = []Flag{ + BoolFlag{Name: "debug, d", Usage: "Enable debugging"}, + } + + app.Commands = []Command{ + { + Name: "foo", + Flags: []Flag{ + BoolFlag{Name: "parent, p", Usage: "Parent flag"}, + }, + Subcommands: []Command{ + { + Name: "bar", + Action: func(c *Context) error { + if c.GlobalBool("debug") { + subcommandRun = true + } + if c.GlobalBool("parent") { + parentFlag = true + } + return nil + }, + }, + }, + }, + } + + app.Run([]string{"command", "-d", "foo", "-p", "bar"}) + + expect(t, subcommandRun, true) + expect(t, parentFlag, true) +} + +func TestContext_Set(t *testing.T) { + set := flag.NewFlagSet("test", 0) + set.Int("int", 5, "an int") + c := NewContext(nil, set, nil) + + expect(t, c.IsSet("int"), false) + c.Set("int", "1") + expect(t, c.Int("int"), 1) + expect(t, c.IsSet("int"), true) +} + +func TestContext_GlobalSet(t *testing.T) { + gSet := flag.NewFlagSet("test", 0) + gSet.Int("int", 5, "an int") + + set := flag.NewFlagSet("sub", 0) + set.Int("int", 3, "an int") + + pc := NewContext(nil, gSet, nil) + c := NewContext(nil, set, pc) + + c.Set("int", "1") + expect(t, c.Int("int"), 1) + expect(t, c.GlobalInt("int"), 5) + + expect(t, c.GlobalIsSet("int"), false) + c.GlobalSet("int", "1") + expect(t, c.Int("int"), 1) + expect(t, c.GlobalInt("int"), 1) + expect(t, c.GlobalIsSet("int"), true) +} diff --git a/vendor/github.com/urfave/cli/errors.go b/vendor/github.com/urfave/cli/errors.go new file mode 100644 index 000000000..562b2953c --- /dev/null +++ b/vendor/github.com/urfave/cli/errors.go @@ -0,0 +1,115 @@ +package cli + +import ( + "fmt" + "io" + "os" + "strings" +) + +// OsExiter is the function used when the app exits. If not set defaults to os.Exit. +var OsExiter = os.Exit + +// ErrWriter is used to write errors to the user. This can be anything +// implementing the io.Writer interface and defaults to os.Stderr. +var ErrWriter io.Writer = os.Stderr + +// MultiError is an error that wraps multiple errors. +type MultiError struct { + Errors []error +} + +// NewMultiError creates a new MultiError. Pass in one or more errors. +func NewMultiError(err ...error) MultiError { + return MultiError{Errors: err} +} + +// Error implements the error interface. +func (m MultiError) Error() string { + errs := make([]string, len(m.Errors)) + for i, err := range m.Errors { + errs[i] = err.Error() + } + + return strings.Join(errs, "\n") +} + +type ErrorFormatter interface { + Format(s fmt.State, verb rune) +} + +// ExitCoder is the interface checked by `App` and `Command` for a custom exit +// code +type ExitCoder interface { + error + ExitCode() int +} + +// ExitError fulfills both the builtin `error` interface and `ExitCoder` +type ExitError struct { + exitCode int + message interface{} +} + +// NewExitError makes a new *ExitError +func NewExitError(message interface{}, exitCode int) *ExitError { + return &ExitError{ + exitCode: exitCode, + message: message, + } +} + +// Error returns the string message, fulfilling the interface required by +// `error` +func (ee *ExitError) Error() string { + return fmt.Sprintf("%v", ee.message) +} + +// ExitCode returns the exit code, fulfilling the interface required by +// `ExitCoder` +func (ee *ExitError) ExitCode() int { + return ee.exitCode +} + +// HandleExitCoder checks if the error fulfills the ExitCoder interface, and if +// so prints the error to stderr (if it is non-empty) and calls OsExiter with the +// given exit code. If the given error is a MultiError, then this func is +// called on all members of the Errors slice and calls OsExiter with the last exit code. +func HandleExitCoder(err error) { + if err == nil { + return + } + + if exitErr, ok := err.(ExitCoder); ok { + if err.Error() != "" { + if _, ok := exitErr.(ErrorFormatter); ok { + fmt.Fprintf(ErrWriter, "%+v\n", err) + } else { + fmt.Fprintln(ErrWriter, err) + } + } + OsExiter(exitErr.ExitCode()) + return + } + + if multiErr, ok := err.(MultiError); ok { + code := handleMultiError(multiErr) + OsExiter(code) + return + } +} + +func handleMultiError(multiErr MultiError) int { + code := 1 + for _, merr := range multiErr.Errors { + if multiErr2, ok := merr.(MultiError); ok { + code = handleMultiError(multiErr2) + } else { + fmt.Fprintln(ErrWriter, merr) + if exitErr, ok := merr.(ExitCoder); ok { + code = exitErr.ExitCode() + } + } + } + return code +} diff --git a/vendor/github.com/urfave/cli/errors_test.go b/vendor/github.com/urfave/cli/errors_test.go new file mode 100644 index 000000000..9b609c596 --- /dev/null +++ b/vendor/github.com/urfave/cli/errors_test.go @@ -0,0 +1,122 @@ +package cli + +import ( + "bytes" + "errors" + "fmt" + "testing" +) + +func TestHandleExitCoder_nil(t *testing.T) { + exitCode := 0 + called := false + + OsExiter = func(rc int) { + if !called { + exitCode = rc + called = true + } + } + + defer func() { OsExiter = fakeOsExiter }() + + HandleExitCoder(nil) + + expect(t, exitCode, 0) + expect(t, called, false) +} + +func TestHandleExitCoder_ExitCoder(t *testing.T) { + exitCode := 0 + called := false + + OsExiter = func(rc int) { + if !called { + exitCode = rc + called = true + } + } + + defer func() { OsExiter = fakeOsExiter }() + + HandleExitCoder(NewExitError("galactic perimeter breach", 9)) + + expect(t, exitCode, 9) + expect(t, called, true) +} + +func TestHandleExitCoder_MultiErrorWithExitCoder(t *testing.T) { + exitCode := 0 + called := false + + OsExiter = func(rc int) { + if !called { + exitCode = rc + called = true + } + } + + defer func() { OsExiter = fakeOsExiter }() + + exitErr := NewExitError("galactic perimeter breach", 9) + exitErr2 := NewExitError("last ExitCoder", 11) + err := NewMultiError(errors.New("wowsa"), errors.New("egad"), exitErr, exitErr2) + HandleExitCoder(err) + + expect(t, exitCode, 11) + expect(t, called, true) +} + +// make a stub to not import pkg/errors +type ErrorWithFormat struct { + error +} + +func NewErrorWithFormat(m string) *ErrorWithFormat { + return &ErrorWithFormat{error: errors.New(m)} +} + +func (f *ErrorWithFormat) Format(s fmt.State, verb rune) { + fmt.Fprintf(s, "This the format: %v", f.error) +} + +func TestHandleExitCoder_ErrorWithFormat(t *testing.T) { + called := false + + OsExiter = func(rc int) { + if !called { + called = true + } + } + ErrWriter = &bytes.Buffer{} + + defer func() { + OsExiter = fakeOsExiter + ErrWriter = fakeErrWriter + }() + + err := NewExitError(NewErrorWithFormat("I am formatted"), 1) + HandleExitCoder(err) + + expect(t, called, true) + expect(t, ErrWriter.(*bytes.Buffer).String(), "This the format: I am formatted\n") +} + +func TestHandleExitCoder_MultiErrorWithFormat(t *testing.T) { + called := false + + OsExiter = func(rc int) { + if !called { + called = true + } + } + ErrWriter = &bytes.Buffer{} + + defer func() { OsExiter = fakeOsExiter }() + + err := NewMultiError(NewErrorWithFormat("err1"), NewErrorWithFormat("err2")) + HandleExitCoder(err) + + expect(t, called, true) + expect(t, ErrWriter.(*bytes.Buffer).String(), "This the format: err1\nThis the format: err2\n") +} diff --git a/vendor/github.com/urfave/cli/flag-types.json b/vendor/github.com/urfave/cli/flag-types.json new file mode 100644 index 000000000..122310785 --- /dev/null +++ b/vendor/github.com/urfave/cli/flag-types.json @@ -0,0 +1,93 @@ +[ + { + "name": "Bool", + "type": "bool", + "value": false, + "context_default": "false", + "parser": "strconv.ParseBool(f.Value.String())" + }, + { + "name": "BoolT", + "type": "bool", + "value": false, + "doctail": " that is true by default", + "context_default": "false", + "parser": "strconv.ParseBool(f.Value.String())" + }, + { + "name": "Duration", + "type": "time.Duration", + "doctail": " (see https://golang.org/pkg/time/#ParseDuration)", + "context_default": "0", + "parser": "time.ParseDuration(f.Value.String())" + }, + { + "name": "Float64", + "type": "float64", + "context_default": "0", + "parser": "strconv.ParseFloat(f.Value.String(), 64)" + }, + { + "name": "Generic", + "type": "Generic", + "dest": false, + "context_default": "nil", + "context_type": "interface{}" + }, + { + "name": "Int64", + "type": "int64", + "context_default": "0", + "parser": "strconv.ParseInt(f.Value.String(), 0, 64)" + }, + { + "name": "Int", + "type": "int", + "context_default": "0", + "parser": "strconv.ParseInt(f.Value.String(), 0, 64)", + "parser_cast": "int(parsed)" + }, + { + "name": "IntSlice", + "type": "*IntSlice", + "dest": false, + "context_default": "nil", + "context_type": "[]int", + "parser": "(f.Value.(*IntSlice)).Value(), error(nil)" + }, + { + "name": "Int64Slice", + "type": "*Int64Slice", + "dest": false, + "context_default": "nil", + "context_type": "[]int64", + "parser": "(f.Value.(*Int64Slice)).Value(), error(nil)" + }, + { + "name": "String", + "type": "string", + "context_default": "\"\"", + "parser": "f.Value.String(), error(nil)" + }, + { + "name": "StringSlice", + "type": "*StringSlice", + "dest": false, + "context_default": "nil", + "context_type": "[]string", + "parser": "(f.Value.(*StringSlice)).Value(), error(nil)" + }, + { + "name": "Uint64", + "type": "uint64", + "context_default": "0", + "parser": "strconv.ParseUint(f.Value.String(), 0, 64)" + }, + { + "name": "Uint", + "type": "uint", + "context_default": "0", + "parser": "strconv.ParseUint(f.Value.String(), 0, 64)", + "parser_cast": "uint(parsed)" + } +] diff --git a/vendor/github.com/urfave/cli/flag.go b/vendor/github.com/urfave/cli/flag.go new file mode 100644 index 000000000..877ff3523 --- /dev/null +++ b/vendor/github.com/urfave/cli/flag.go @@ -0,0 +1,799 @@ +package cli + +import ( + "flag" + "fmt" + "reflect" + "runtime" + "strconv" + "strings" + "syscall" + "time" +) + +const defaultPlaceholder = "value" + +// BashCompletionFlag enables bash-completion for all commands and subcommands +var BashCompletionFlag Flag = BoolFlag{ + Name: "generate-bash-completion", + Hidden: true, +} + +// VersionFlag prints the version for the application +var VersionFlag Flag = BoolFlag{ + Name: "version, v", + Usage: "print the version", +} + +// HelpFlag prints the help for all commands and subcommands +// Set to the zero value (BoolFlag{}) to disable flag -- keeps subcommand +// unless HideHelp is set to true) +var HelpFlag Flag = BoolFlag{ + Name: "help, h", + Usage: "show help", +} + +// FlagStringer converts a flag definition to a string. This is used by help +// to display a flag. +var FlagStringer FlagStringFunc = stringifyFlag + +// FlagsByName is a slice of Flag. +type FlagsByName []Flag + +func (f FlagsByName) Len() int { + return len(f) +} + +func (f FlagsByName) Less(i, j int) bool { + return f[i].GetName() < f[j].GetName() +} + +func (f FlagsByName) Swap(i, j int) { + f[i], f[j] = f[j], f[i] +} + +// Flag is a common interface related to parsing flags in cli. +// For more advanced flag parsing techniques, it is recommended that +// this interface be implemented. +type Flag interface { + fmt.Stringer + // Apply Flag settings to the given flag set + Apply(*flag.FlagSet) + GetName() string +} + +// errorableFlag is an interface that allows us to return errors during apply +// it allows flags defined in this library to return errors in a fashion backwards compatible +// TODO remove in v2 and modify the existing Flag interface to return errors +type errorableFlag interface { + Flag + + ApplyWithError(*flag.FlagSet) error +} + +func flagSet(name string, flags []Flag) (*flag.FlagSet, error) { + set := flag.NewFlagSet(name, flag.ContinueOnError) + + for _, f := range flags { + //TODO remove in v2 when errorableFlag is removed + if ef, ok := f.(errorableFlag); ok { + if err := ef.ApplyWithError(set); err != nil { + return nil, err + } + } else { + f.Apply(set) + } + } + return set, nil +} + +func eachName(longName string, fn func(string)) { + parts := strings.Split(longName, ",") + for _, name := range parts { + name = strings.Trim(name, " ") + fn(name) + } +} + +// Generic is a generic parseable type identified by a specific flag +type Generic interface { + Set(value string) error + String() string +} + +// Apply takes the flagset and calls Set on the generic flag with the value +// provided by the user for parsing by the flag +// Ignores parsing errors +func (f GenericFlag) Apply(set *flag.FlagSet) { + f.ApplyWithError(set) +} + +// ApplyWithError takes the flagset and calls Set on the generic flag with the value +// provided by the user for parsing by the flag +func (f GenericFlag) ApplyWithError(set *flag.FlagSet) error { + val := f.Value + if f.EnvVar != "" { + for _, envVar := range strings.Split(f.EnvVar, ",") { + envVar = strings.TrimSpace(envVar) + if envVal, ok := syscall.Getenv(envVar); ok { + if err := val.Set(envVal); err != nil { + return fmt.Errorf("could not parse %s as value for flag %s: %s", envVal, f.Name, err) + } + break + } + } + } + + eachName(f.Name, func(name string) { + set.Var(f.Value, name, f.Usage) + }) + + return nil +} + +// StringSlice is an opaque type for []string to satisfy flag.Value and flag.Getter +type StringSlice []string + +// Set appends the string value to the list of values +func (f *StringSlice) Set(value string) error { + *f = append(*f, value) + return nil +} + +// String returns a readable representation of this value (for usage defaults) +func (f *StringSlice) String() string { + return fmt.Sprintf("%s", *f) +} + +// Value returns the slice of strings set by this flag +func (f *StringSlice) Value() []string { + return *f +} + +// Get returns the slice of strings set by this flag +func (f *StringSlice) Get() interface{} { + return *f +} + +// Apply populates the flag given the flag set and environment +// Ignores errors +func (f StringSliceFlag) Apply(set *flag.FlagSet) { + f.ApplyWithError(set) +} + +// ApplyWithError populates the flag given the flag set and environment +func (f StringSliceFlag) ApplyWithError(set *flag.FlagSet) error { + if f.EnvVar != "" { + for _, envVar := range strings.Split(f.EnvVar, ",") { + envVar = strings.TrimSpace(envVar) + if envVal, ok := syscall.Getenv(envVar); ok { + newVal := &StringSlice{} + for _, s := range strings.Split(envVal, ",") { + s = strings.TrimSpace(s) + if err := newVal.Set(s); err != nil { + return fmt.Errorf("could not parse %s as string value for flag %s: %s", envVal, f.Name, err) + } + } + f.Value = newVal + break + } + } + } + + eachName(f.Name, func(name string) { + if f.Value == nil { + f.Value = &StringSlice{} + } + set.Var(f.Value, name, f.Usage) + }) + + return nil +} + +// IntSlice is an opaque type for []int to satisfy flag.Value and flag.Getter +type IntSlice []int + +// Set parses the value into an integer and appends it to the list of values +func (f *IntSlice) Set(value string) error { + tmp, err := strconv.Atoi(value) + if err != nil { + return err + } + *f = append(*f, tmp) + return nil +} + +// String returns a readable representation of this value (for usage defaults) +func (f *IntSlice) String() string { + return fmt.Sprintf("%#v", *f) +} + +// Value returns the slice of ints set by this flag +func (f *IntSlice) Value() []int { + return *f +} + +// Get returns the slice of ints set by this flag +func (f *IntSlice) Get() interface{} { + return *f +} + +// Apply populates the flag given the flag set and environment +// Ignores errors +func (f IntSliceFlag) Apply(set *flag.FlagSet) { + f.ApplyWithError(set) +} + +// ApplyWithError populates the flag given the flag set and environment +func (f IntSliceFlag) ApplyWithError(set *flag.FlagSet) error { + if f.EnvVar != "" { + for _, envVar := range strings.Split(f.EnvVar, ",") { + envVar = strings.TrimSpace(envVar) + if envVal, ok := syscall.Getenv(envVar); ok { + newVal := &IntSlice{} + for _, s := range strings.Split(envVal, ",") { + s = strings.TrimSpace(s) + if err := newVal.Set(s); err != nil { + return fmt.Errorf("could not parse %s as int slice value for flag %s: %s", envVal, f.Name, err) + } + } + f.Value = newVal + break + } + } + } + + eachName(f.Name, func(name string) { + if f.Value == nil { + f.Value = &IntSlice{} + } + set.Var(f.Value, name, f.Usage) + }) + + return nil +} + +// Int64Slice is an opaque type for []int to satisfy flag.Value and flag.Getter +type Int64Slice []int64 + +// Set parses the value into an integer and appends it to the list of values +func (f *Int64Slice) Set(value string) error { + tmp, err := strconv.ParseInt(value, 10, 64) + if err != nil { + return err + } + *f = append(*f, tmp) + return nil +} + +// String returns a readable representation of this value (for usage defaults) +func (f *Int64Slice) String() string { + return fmt.Sprintf("%#v", *f) +} + +// Value returns the slice of ints set by this flag +func (f *Int64Slice) Value() []int64 { + return *f +} + +// Get returns the slice of ints set by this flag +func (f *Int64Slice) Get() interface{} { + return *f +} + +// Apply populates the flag given the flag set and environment +// Ignores errors +func (f Int64SliceFlag) Apply(set *flag.FlagSet) { + f.ApplyWithError(set) +} + +// ApplyWithError populates the flag given the flag set and environment +func (f Int64SliceFlag) ApplyWithError(set *flag.FlagSet) error { + if f.EnvVar != "" { + for _, envVar := range strings.Split(f.EnvVar, ",") { + envVar = strings.TrimSpace(envVar) + if envVal, ok := syscall.Getenv(envVar); ok { + newVal := &Int64Slice{} + for _, s := range strings.Split(envVal, ",") { + s = strings.TrimSpace(s) + if err := newVal.Set(s); err != nil { + return fmt.Errorf("could not parse %s as int64 slice value for flag %s: %s", envVal, f.Name, err) + } + } + f.Value = newVal + break + } + } + } + + eachName(f.Name, func(name string) { + if f.Value == nil { + f.Value = &Int64Slice{} + } + set.Var(f.Value, name, f.Usage) + }) + return nil +} + +// Apply populates the flag given the flag set and environment +// Ignores errors +func (f BoolFlag) Apply(set *flag.FlagSet) { + f.ApplyWithError(set) +} + +// ApplyWithError populates the flag given the flag set and environment +func (f BoolFlag) ApplyWithError(set *flag.FlagSet) error { + val := false + if f.EnvVar != "" { + for _, envVar := range strings.Split(f.EnvVar, ",") { + envVar = strings.TrimSpace(envVar) + if envVal, ok := syscall.Getenv(envVar); ok { + if envVal == "" { + val = false + break + } + + envValBool, err := strconv.ParseBool(envVal) + if err != nil { + return fmt.Errorf("could not parse %s as bool value for flag %s: %s", envVal, f.Name, err) + } + + val = envValBool + break + } + } + } + + eachName(f.Name, func(name string) { + if f.Destination != nil { + set.BoolVar(f.Destination, name, val, f.Usage) + return + } + set.Bool(name, val, f.Usage) + }) + + return nil +} + +// Apply populates the flag given the flag set and environment +// Ignores errors +func (f BoolTFlag) Apply(set *flag.FlagSet) { + f.ApplyWithError(set) +} + +// ApplyWithError populates the flag given the flag set and environment +func (f BoolTFlag) ApplyWithError(set *flag.FlagSet) error { + val := true + if f.EnvVar != "" { + for _, envVar := range strings.Split(f.EnvVar, ",") { + envVar = strings.TrimSpace(envVar) + if envVal, ok := syscall.Getenv(envVar); ok { + if envVal == "" { + val = false + break + } + + envValBool, err := strconv.ParseBool(envVal) + if err != nil { + return fmt.Errorf("could not parse %s as bool value for flag %s: %s", envVal, f.Name, err) + } + + val = envValBool + break + } + } + } + + eachName(f.Name, func(name string) { + if f.Destination != nil { + set.BoolVar(f.Destination, name, val, f.Usage) + return + } + set.Bool(name, val, f.Usage) + }) + + return nil +} + +// Apply populates the flag given the flag set and environment +// Ignores errors +func (f StringFlag) Apply(set *flag.FlagSet) { + f.ApplyWithError(set) +} + +// ApplyWithError populates the flag given the flag set and environment +func (f StringFlag) ApplyWithError(set *flag.FlagSet) error { + if f.EnvVar != "" { + for _, envVar := range strings.Split(f.EnvVar, ",") { + envVar = strings.TrimSpace(envVar) + if envVal, ok := syscall.Getenv(envVar); ok { + f.Value = envVal + break + } + } + } + + eachName(f.Name, func(name string) { + if f.Destination != nil { + set.StringVar(f.Destination, name, f.Value, f.Usage) + return + } + set.String(name, f.Value, f.Usage) + }) + + return nil +} + +// Apply populates the flag given the flag set and environment +// Ignores errors +func (f IntFlag) Apply(set *flag.FlagSet) { + f.ApplyWithError(set) +} + +// ApplyWithError populates the flag given the flag set and environment +func (f IntFlag) ApplyWithError(set *flag.FlagSet) error { + if f.EnvVar != "" { + for _, envVar := range strings.Split(f.EnvVar, ",") { + envVar = strings.TrimSpace(envVar) + if envVal, ok := syscall.Getenv(envVar); ok { + envValInt, err := strconv.ParseInt(envVal, 0, 64) + if err != nil { + return fmt.Errorf("could not parse %s as int value for flag %s: %s", envVal, f.Name, err) + } + f.Value = int(envValInt) + break + } + } + } + + eachName(f.Name, func(name string) { + if f.Destination != nil { + set.IntVar(f.Destination, name, f.Value, f.Usage) + return + } + set.Int(name, f.Value, f.Usage) + }) + + return nil +} + +// Apply populates the flag given the flag set and environment +// Ignores errors +func (f Int64Flag) Apply(set *flag.FlagSet) { + f.ApplyWithError(set) +} + +// ApplyWithError populates the flag given the flag set and environment +func (f Int64Flag) ApplyWithError(set *flag.FlagSet) error { + if f.EnvVar != "" { + for _, envVar := range strings.Split(f.EnvVar, ",") { + envVar = strings.TrimSpace(envVar) + if envVal, ok := syscall.Getenv(envVar); ok { + envValInt, err := strconv.ParseInt(envVal, 0, 64) + if err != nil { + return fmt.Errorf("could not parse %s as int value for flag %s: %s", envVal, f.Name, err) + } + + f.Value = envValInt + break + } + } + } + + eachName(f.Name, func(name string) { + if f.Destination != nil { + set.Int64Var(f.Destination, name, f.Value, f.Usage) + return + } + set.Int64(name, f.Value, f.Usage) + }) + + return nil +} + +// Apply populates the flag given the flag set and environment +// Ignores errors +func (f UintFlag) Apply(set *flag.FlagSet) { + f.ApplyWithError(set) +} + +// ApplyWithError populates the flag given the flag set and environment +func (f UintFlag) ApplyWithError(set *flag.FlagSet) error { + if f.EnvVar != "" { + for _, envVar := range strings.Split(f.EnvVar, ",") { + envVar = strings.TrimSpace(envVar) + if envVal, ok := syscall.Getenv(envVar); ok { + envValInt, err := strconv.ParseUint(envVal, 0, 64) + if err != nil { + return fmt.Errorf("could not parse %s as uint value for flag %s: %s", envVal, f.Name, err) + } + + f.Value = uint(envValInt) + break + } + } + } + + eachName(f.Name, func(name string) { + if f.Destination != nil { + set.UintVar(f.Destination, name, f.Value, f.Usage) + return + } + set.Uint(name, f.Value, f.Usage) + }) + + return nil +} + +// Apply populates the flag given the flag set and environment +// Ignores errors +func (f Uint64Flag) Apply(set *flag.FlagSet) { + f.ApplyWithError(set) +} + +// ApplyWithError populates the flag given the flag set and environment +func (f Uint64Flag) ApplyWithError(set *flag.FlagSet) error { + if f.EnvVar != "" { + for _, envVar := range strings.Split(f.EnvVar, ",") { + envVar = strings.TrimSpace(envVar) + if envVal, ok := syscall.Getenv(envVar); ok { + envValInt, err := strconv.ParseUint(envVal, 0, 64) + if err != nil { + return fmt.Errorf("could not parse %s as uint64 value for flag %s: %s", envVal, f.Name, err) + } + + f.Value = uint64(envValInt) + break + } + } + } + + eachName(f.Name, func(name string) { + if f.Destination != nil { + set.Uint64Var(f.Destination, name, f.Value, f.Usage) + return + } + set.Uint64(name, f.Value, f.Usage) + }) + + return nil +} + +// Apply populates the flag given the flag set and environment +// Ignores errors +func (f DurationFlag) Apply(set *flag.FlagSet) { + f.ApplyWithError(set) +} + +// ApplyWithError populates the flag given the flag set and environment +func (f DurationFlag) ApplyWithError(set *flag.FlagSet) error { + if f.EnvVar != "" { + for _, envVar := range strings.Split(f.EnvVar, ",") { + envVar = strings.TrimSpace(envVar) + if envVal, ok := syscall.Getenv(envVar); ok { + envValDuration, err := time.ParseDuration(envVal) + if err != nil { + return fmt.Errorf("could not parse %s as duration for flag %s: %s", envVal, f.Name, err) + } + + f.Value = envValDuration + break + } + } + } + + eachName(f.Name, func(name string) { + if f.Destination != nil { + set.DurationVar(f.Destination, name, f.Value, f.Usage) + return + } + set.Duration(name, f.Value, f.Usage) + }) + + return nil +} + +// Apply populates the flag given the flag set and environment +// Ignores errors +func (f Float64Flag) Apply(set *flag.FlagSet) { + f.ApplyWithError(set) +} + +// ApplyWithError populates the flag given the flag set and environment +func (f Float64Flag) ApplyWithError(set *flag.FlagSet) error { + if f.EnvVar != "" { + for _, envVar := range strings.Split(f.EnvVar, ",") { + envVar = strings.TrimSpace(envVar) + if envVal, ok := syscall.Getenv(envVar); ok { + envValFloat, err := strconv.ParseFloat(envVal, 10) + if err != nil { + return fmt.Errorf("could not parse %s as float64 value for flag %s: %s", envVal, f.Name, err) + } + + f.Value = float64(envValFloat) + break + } + } + } + + eachName(f.Name, func(name string) { + if f.Destination != nil { + set.Float64Var(f.Destination, name, f.Value, f.Usage) + return + } + set.Float64(name, f.Value, f.Usage) + }) + + return nil +} + +func visibleFlags(fl []Flag) []Flag { + visible := []Flag{} + for _, flag := range fl { + field := flagValue(flag).FieldByName("Hidden") + if !field.IsValid() || !field.Bool() { + visible = append(visible, flag) + } + } + return visible +} + +func prefixFor(name string) (prefix string) { + if len(name) == 1 { + prefix = "-" + } else { + prefix = "--" + } + + return +} + +// Returns the placeholder, if any, and the unquoted usage string. +func unquoteUsage(usage string) (string, string) { + for i := 0; i < len(usage); i++ { + if usage[i] == '`' { + for j := i + 1; j < len(usage); j++ { + if usage[j] == '`' { + name := usage[i+1 : j] + usage = usage[:i] + name + usage[j+1:] + return name, usage + } + } + break + } + } + return "", usage +} + +func prefixedNames(fullName, placeholder string) string { + var prefixed string + parts := strings.Split(fullName, ",") + for i, name := range parts { + name = strings.Trim(name, " ") + prefixed += prefixFor(name) + name + if placeholder != "" { + prefixed += " " + placeholder + } + if i < len(parts)-1 { + prefixed += ", " + } + } + return prefixed +} + +func withEnvHint(envVar, str string) string { + envText := "" + if envVar != "" { + prefix := "$" + suffix := "" + sep := ", $" + if runtime.GOOS == "windows" { + prefix = "%" + suffix = "%" + sep = "%, %" + } + envText = fmt.Sprintf(" [%s%s%s]", prefix, strings.Join(strings.Split(envVar, ","), sep), suffix) + } + return str + envText +} + +func flagValue(f Flag) reflect.Value { + fv := reflect.ValueOf(f) + for fv.Kind() == reflect.Ptr { + fv = reflect.Indirect(fv) + } + return fv +} + +func stringifyFlag(f Flag) string { + fv := flagValue(f) + + switch f.(type) { + case IntSliceFlag: + return withEnvHint(fv.FieldByName("EnvVar").String(), + stringifyIntSliceFlag(f.(IntSliceFlag))) + case Int64SliceFlag: + return withEnvHint(fv.FieldByName("EnvVar").String(), + stringifyInt64SliceFlag(f.(Int64SliceFlag))) + case StringSliceFlag: + return withEnvHint(fv.FieldByName("EnvVar").String(), + stringifyStringSliceFlag(f.(StringSliceFlag))) + } + + placeholder, usage := unquoteUsage(fv.FieldByName("Usage").String()) + + needsPlaceholder := false + defaultValueString := "" + + if val := fv.FieldByName("Value"); val.IsValid() { + needsPlaceholder = true + defaultValueString = fmt.Sprintf(" (default: %v)", val.Interface()) + + if val.Kind() == reflect.String && val.String() != "" { + defaultValueString = fmt.Sprintf(" (default: %q)", val.String()) + } + } + + if defaultValueString == " (default: )" { + defaultValueString = "" + } + + if needsPlaceholder && placeholder == "" { + placeholder = defaultPlaceholder + } + + usageWithDefault := strings.TrimSpace(fmt.Sprintf("%s%s", usage, defaultValueString)) + + return withEnvHint(fv.FieldByName("EnvVar").String(), + fmt.Sprintf("%s\t%s", prefixedNames(fv.FieldByName("Name").String(), placeholder), usageWithDefault)) +} + +func stringifyIntSliceFlag(f IntSliceFlag) string { + defaultVals := []string{} + if f.Value != nil && len(f.Value.Value()) > 0 { + for _, i := range f.Value.Value() { + defaultVals = append(defaultVals, fmt.Sprintf("%d", i)) + } + } + + return stringifySliceFlag(f.Usage, f.Name, defaultVals) +} + +func stringifyInt64SliceFlag(f Int64SliceFlag) string { + defaultVals := []string{} + if f.Value != nil && len(f.Value.Value()) > 0 { + for _, i := range f.Value.Value() { + defaultVals = append(defaultVals, fmt.Sprintf("%d", i)) + } + } + + return stringifySliceFlag(f.Usage, f.Name, defaultVals) +} + +func stringifyStringSliceFlag(f StringSliceFlag) string { + defaultVals := []string{} + if f.Value != nil && len(f.Value.Value()) > 0 { + for _, s := range f.Value.Value() { + if len(s) > 0 { + defaultVals = append(defaultVals, fmt.Sprintf("%q", s)) + } + } + } + + return stringifySliceFlag(f.Usage, f.Name, defaultVals) +} + +func stringifySliceFlag(usage, name string, defaultVals []string) string { + placeholder, usage := unquoteUsage(usage) + if placeholder == "" { + placeholder = defaultPlaceholder + } + + defaultVal := "" + if len(defaultVals) > 0 { + defaultVal = fmt.Sprintf(" (default: %s)", strings.Join(defaultVals, ", ")) + } + + usageWithDefault := strings.TrimSpace(fmt.Sprintf("%s%s", usage, defaultVal)) + return fmt.Sprintf("%s\t%s", prefixedNames(name, placeholder), usageWithDefault) +} diff --git a/vendor/github.com/urfave/cli/flag_generated.go b/vendor/github.com/urfave/cli/flag_generated.go new file mode 100644 index 000000000..491b61956 --- /dev/null +++ b/vendor/github.com/urfave/cli/flag_generated.go @@ -0,0 +1,627 @@ +package cli + +import ( + "flag" + "strconv" + "time" +) + +// WARNING: This file is generated! + +// BoolFlag is a flag with type bool +type BoolFlag struct { + Name string + Usage string + EnvVar string + Hidden bool + Destination *bool +} + +// String returns a readable representation of this value +// (for usage defaults) +func (f BoolFlag) String() string { + return FlagStringer(f) +} + +// GetName returns the name of the flag +func (f BoolFlag) GetName() string { + return f.Name +} + +// Bool looks up the value of a local BoolFlag, returns +// false if not found +func (c *Context) Bool(name string) bool { + return lookupBool(name, c.flagSet) +} + +// GlobalBool looks up the value of a global BoolFlag, returns +// false if not found +func (c *Context) GlobalBool(name string) bool { + if fs := lookupGlobalFlagSet(name, c); fs != nil { + return lookupBool(name, fs) + } + return false +} + +func lookupBool(name string, set *flag.FlagSet) bool { + f := set.Lookup(name) + if f != nil { + parsed, err := strconv.ParseBool(f.Value.String()) + if err != nil { + return false + } + return parsed + } + return false +} + +// BoolTFlag is a flag with type bool that is true by default +type BoolTFlag struct { + Name string + Usage string + EnvVar string + Hidden bool + Destination *bool +} + +// String returns a readable representation of this value +// (for usage defaults) +func (f BoolTFlag) String() string { + return FlagStringer(f) +} + +// GetName returns the name of the flag +func (f BoolTFlag) GetName() string { + return f.Name +} + +// BoolT looks up the value of a local BoolTFlag, returns +// false if not found +func (c *Context) BoolT(name string) bool { + return lookupBoolT(name, c.flagSet) +} + +// GlobalBoolT looks up the value of a global BoolTFlag, returns +// false if not found +func (c *Context) GlobalBoolT(name string) bool { + if fs := lookupGlobalFlagSet(name, c); fs != nil { + return lookupBoolT(name, fs) + } + return false +} + +func lookupBoolT(name string, set *flag.FlagSet) bool { + f := set.Lookup(name) + if f != nil { + parsed, err := strconv.ParseBool(f.Value.String()) + if err != nil { + return false + } + return parsed + } + return false +} + +// DurationFlag is a flag with type time.Duration (see https://golang.org/pkg/time/#ParseDuration) +type DurationFlag struct { + Name string + Usage string + EnvVar string + Hidden bool + Value time.Duration + Destination *time.Duration +} + +// String returns a readable representation of this value +// (for usage defaults) +func (f DurationFlag) String() string { + return FlagStringer(f) +} + +// GetName returns the name of the flag +func (f DurationFlag) GetName() string { + return f.Name +} + +// Duration looks up the value of a local DurationFlag, returns +// 0 if not found +func (c *Context) Duration(name string) time.Duration { + return lookupDuration(name, c.flagSet) +} + +// GlobalDuration looks up the value of a global DurationFlag, returns +// 0 if not found +func (c *Context) GlobalDuration(name string) time.Duration { + if fs := lookupGlobalFlagSet(name, c); fs != nil { + return lookupDuration(name, fs) + } + return 0 +} + +func lookupDuration(name string, set *flag.FlagSet) time.Duration { + f := set.Lookup(name) + if f != nil { + parsed, err := time.ParseDuration(f.Value.String()) + if err != nil { + return 0 + } + return parsed + } + return 0 +} + +// Float64Flag is a flag with type float64 +type Float64Flag struct { + Name string + Usage string + EnvVar string + Hidden bool + Value float64 + Destination *float64 +} + +// String returns a readable representation of this value +// (for usage defaults) +func (f Float64Flag) String() string { + return FlagStringer(f) +} + +// GetName returns the name of the flag +func (f Float64Flag) GetName() string { + return f.Name +} + +// Float64 looks up the value of a local Float64Flag, returns +// 0 if not found +func (c *Context) Float64(name string) float64 { + return lookupFloat64(name, c.flagSet) +} + +// GlobalFloat64 looks up the value of a global Float64Flag, returns +// 0 if not found +func (c *Context) GlobalFloat64(name string) float64 { + if fs := lookupGlobalFlagSet(name, c); fs != nil { + return lookupFloat64(name, fs) + } + return 0 +} + +func lookupFloat64(name string, set *flag.FlagSet) float64 { + f := set.Lookup(name) + if f != nil { + parsed, err := strconv.ParseFloat(f.Value.String(), 64) + if err != nil { + return 0 + } + return parsed + } + return 0 +} + +// GenericFlag is a flag with type Generic +type GenericFlag struct { + Name string + Usage string + EnvVar string + Hidden bool + Value Generic +} + +// String returns a readable representation of this value +// (for usage defaults) +func (f GenericFlag) String() string { + return FlagStringer(f) +} + +// GetName returns the name of the flag +func (f GenericFlag) GetName() string { + return f.Name +} + +// Generic looks up the value of a local GenericFlag, returns +// nil if not found +func (c *Context) Generic(name string) interface{} { + return lookupGeneric(name, c.flagSet) +} + +// GlobalGeneric looks up the value of a global GenericFlag, returns +// nil if not found +func (c *Context) GlobalGeneric(name string) interface{} { + if fs := lookupGlobalFlagSet(name, c); fs != nil { + return lookupGeneric(name, fs) + } + return nil +} + +func lookupGeneric(name string, set *flag.FlagSet) interface{} { + f := set.Lookup(name) + if f != nil { + parsed, err := f.Value, error(nil) + if err != nil { + return nil + } + return parsed + } + return nil +} + +// Int64Flag is a flag with type int64 +type Int64Flag struct { + Name string + Usage string + EnvVar string + Hidden bool + Value int64 + Destination *int64 +} + +// String returns a readable representation of this value +// (for usage defaults) +func (f Int64Flag) String() string { + return FlagStringer(f) +} + +// GetName returns the name of the flag +func (f Int64Flag) GetName() string { + return f.Name +} + +// Int64 looks up the value of a local Int64Flag, returns +// 0 if not found +func (c *Context) Int64(name string) int64 { + return lookupInt64(name, c.flagSet) +} + +// GlobalInt64 looks up the value of a global Int64Flag, returns +// 0 if not found +func (c *Context) GlobalInt64(name string) int64 { + if fs := lookupGlobalFlagSet(name, c); fs != nil { + return lookupInt64(name, fs) + } + return 0 +} + +func lookupInt64(name string, set *flag.FlagSet) int64 { + f := set.Lookup(name) + if f != nil { + parsed, err := strconv.ParseInt(f.Value.String(), 0, 64) + if err != nil { + return 0 + } + return parsed + } + return 0 +} + +// IntFlag is a flag with type int +type IntFlag struct { + Name string + Usage string + EnvVar string + Hidden bool + Value int + Destination *int +} + +// String returns a readable representation of this value +// (for usage defaults) +func (f IntFlag) String() string { + return FlagStringer(f) +} + +// GetName returns the name of the flag +func (f IntFlag) GetName() string { + return f.Name +} + +// Int looks up the value of a local IntFlag, returns +// 0 if not found +func (c *Context) Int(name string) int { + return lookupInt(name, c.flagSet) +} + +// GlobalInt looks up the value of a global IntFlag, returns +// 0 if not found +func (c *Context) GlobalInt(name string) int { + if fs := lookupGlobalFlagSet(name, c); fs != nil { + return lookupInt(name, fs) + } + return 0 +} + +func lookupInt(name string, set *flag.FlagSet) int { + f := set.Lookup(name) + if f != nil { + parsed, err := strconv.ParseInt(f.Value.String(), 0, 64) + if err != nil { + return 0 + } + return int(parsed) + } + return 0 +} + +// IntSliceFlag is a flag with type *IntSlice +type IntSliceFlag struct { + Name string + Usage string + EnvVar string + Hidden bool + Value *IntSlice +} + +// String returns a readable representation of this value +// (for usage defaults) +func (f IntSliceFlag) String() string { + return FlagStringer(f) +} + +// GetName returns the name of the flag +func (f IntSliceFlag) GetName() string { + return f.Name +} + +// IntSlice looks up the value of a local IntSliceFlag, returns +// nil if not found +func (c *Context) IntSlice(name string) []int { + return lookupIntSlice(name, c.flagSet) +} + +// GlobalIntSlice looks up the value of a global IntSliceFlag, returns +// nil if not found +func (c *Context) GlobalIntSlice(name string) []int { + if fs := lookupGlobalFlagSet(name, c); fs != nil { + return lookupIntSlice(name, fs) + } + return nil +} + +func lookupIntSlice(name string, set *flag.FlagSet) []int { + f := set.Lookup(name) + if f != nil { + parsed, err := (f.Value.(*IntSlice)).Value(), error(nil) + if err != nil { + return nil + } + return parsed + } + return nil +} + +// Int64SliceFlag is a flag with type *Int64Slice +type Int64SliceFlag struct { + Name string + Usage string + EnvVar string + Hidden bool + Value *Int64Slice +} + +// String returns a readable representation of this value +// (for usage defaults) +func (f Int64SliceFlag) String() string { + return FlagStringer(f) +} + +// GetName returns the name of the flag +func (f Int64SliceFlag) GetName() string { + return f.Name +} + +// Int64Slice looks up the value of a local Int64SliceFlag, returns +// nil if not found +func (c *Context) Int64Slice(name string) []int64 { + return lookupInt64Slice(name, c.flagSet) +} + +// GlobalInt64Slice looks up the value of a global Int64SliceFlag, returns +// nil if not found +func (c *Context) GlobalInt64Slice(name string) []int64 { + if fs := lookupGlobalFlagSet(name, c); fs != nil { + return lookupInt64Slice(name, fs) + } + return nil +} + +func lookupInt64Slice(name string, set *flag.FlagSet) []int64 { + f := set.Lookup(name) + if f != nil { + parsed, err := (f.Value.(*Int64Slice)).Value(), error(nil) + if err != nil { + return nil + } + return parsed + } + return nil +} + +// StringFlag is a flag with type string +type StringFlag struct { + Name string + Usage string + EnvVar string + Hidden bool + Value string + Destination *string +} + +// String returns a readable representation of this value +// (for usage defaults) +func (f StringFlag) String() string { + return FlagStringer(f) +} + +// GetName returns the name of the flag +func (f StringFlag) GetName() string { + return f.Name +} + +// String looks up the value of a local StringFlag, returns +// "" if not found +func (c *Context) String(name string) string { + return lookupString(name, c.flagSet) +} + +// GlobalString looks up the value of a global StringFlag, returns +// "" if not found +func (c *Context) GlobalString(name string) string { + if fs := lookupGlobalFlagSet(name, c); fs != nil { + return lookupString(name, fs) + } + return "" +} + +func lookupString(name string, set *flag.FlagSet) string { + f := set.Lookup(name) + if f != nil { + parsed, err := f.Value.String(), error(nil) + if err != nil { + return "" + } + return parsed + } + return "" +} + +// StringSliceFlag is a flag with type *StringSlice +type StringSliceFlag struct { + Name string + Usage string + EnvVar string + Hidden bool + Value *StringSlice +} + +// String returns a readable representation of this value +// (for usage defaults) +func (f StringSliceFlag) String() string { + return FlagStringer(f) +} + +// GetName returns the name of the flag +func (f StringSliceFlag) GetName() string { + return f.Name +} + +// StringSlice looks up the value of a local StringSliceFlag, returns +// nil if not found +func (c *Context) StringSlice(name string) []string { + return lookupStringSlice(name, c.flagSet) +} + +// GlobalStringSlice looks up the value of a global StringSliceFlag, returns +// nil if not found +func (c *Context) GlobalStringSlice(name string) []string { + if fs := lookupGlobalFlagSet(name, c); fs != nil { + return lookupStringSlice(name, fs) + } + return nil +} + +func lookupStringSlice(name string, set *flag.FlagSet) []string { + f := set.Lookup(name) + if f != nil { + parsed, err := (f.Value.(*StringSlice)).Value(), error(nil) + if err != nil { + return nil + } + return parsed + } + return nil +} + +// Uint64Flag is a flag with type uint64 +type Uint64Flag struct { + Name string + Usage string + EnvVar string + Hidden bool + Value uint64 + Destination *uint64 +} + +// String returns a readable representation of this value +// (for usage defaults) +func (f Uint64Flag) String() string { + return FlagStringer(f) +} + +// GetName returns the name of the flag +func (f Uint64Flag) GetName() string { + return f.Name +} + +// Uint64 looks up the value of a local Uint64Flag, returns +// 0 if not found +func (c *Context) Uint64(name string) uint64 { + return lookupUint64(name, c.flagSet) +} + +// GlobalUint64 looks up the value of a global Uint64Flag, returns +// 0 if not found +func (c *Context) GlobalUint64(name string) uint64 { + if fs := lookupGlobalFlagSet(name, c); fs != nil { + return lookupUint64(name, fs) + } + return 0 +} + +func lookupUint64(name string, set *flag.FlagSet) uint64 { + f := set.Lookup(name) + if f != nil { + parsed, err := strconv.ParseUint(f.Value.String(), 0, 64) + if err != nil { + return 0 + } + return parsed + } + return 0 +} + +// UintFlag is a flag with type uint +type UintFlag struct { + Name string + Usage string + EnvVar string + Hidden bool + Value uint + Destination *uint +} + +// String returns a readable representation of this value +// (for usage defaults) +func (f UintFlag) String() string { + return FlagStringer(f) +} + +// GetName returns the name of the flag +func (f UintFlag) GetName() string { + return f.Name +} + +// Uint looks up the value of a local UintFlag, returns +// 0 if not found +func (c *Context) Uint(name string) uint { + return lookupUint(name, c.flagSet) +} + +// GlobalUint looks up the value of a global UintFlag, returns +// 0 if not found +func (c *Context) GlobalUint(name string) uint { + if fs := lookupGlobalFlagSet(name, c); fs != nil { + return lookupUint(name, fs) + } + return 0 +} + +func lookupUint(name string, set *flag.FlagSet) uint { + f := set.Lookup(name) + if f != nil { + parsed, err := strconv.ParseUint(f.Value.String(), 0, 64) + if err != nil { + return 0 + } + return uint(parsed) + } + return 0 +} diff --git a/vendor/github.com/urfave/cli/flag_test.go b/vendor/github.com/urfave/cli/flag_test.go new file mode 100644 index 000000000..1ccb6399b --- /dev/null +++ b/vendor/github.com/urfave/cli/flag_test.go @@ -0,0 +1,1215 @@ +package cli + +import ( + "fmt" + "os" + "reflect" + "regexp" + "runtime" + "strings" + "testing" + "time" +) + +var boolFlagTests = []struct { + name string + expected string +}{ + {"help", "--help\t"}, + {"h", "-h\t"}, +} + +func TestBoolFlagHelpOutput(t *testing.T) { + for _, test := range boolFlagTests { + flag := BoolFlag{Name: test.name} + output := flag.String() + + if output != test.expected { + t.Errorf("%q does not match %q", output, test.expected) + } + } +} + +func TestFlagsFromEnv(t *testing.T) { + var flagTests = []struct { + input string + output interface{} + flag Flag + errRegexp string + }{ + {"", false, BoolFlag{Name: "debug", EnvVar: "DEBUG"}, ""}, + {"1", true, BoolFlag{Name: "debug", EnvVar: "DEBUG"}, ""}, + {"false", false, BoolFlag{Name: "debug", EnvVar: "DEBUG"}, ""}, + {"foobar", true, BoolFlag{Name: "debug", EnvVar: "DEBUG"}, fmt.Sprintf(`could not parse foobar as bool value for flag debug: .*`)}, + + {"", false, BoolTFlag{Name: "debug", EnvVar: "DEBUG"}, ""}, + {"1", true, BoolTFlag{Name: "debug", EnvVar: "DEBUG"}, ""}, + {"false", false, BoolTFlag{Name: "debug", EnvVar: "DEBUG"}, ""}, + {"foobar", true, BoolTFlag{Name: "debug", EnvVar: "DEBUG"}, fmt.Sprintf(`could not parse foobar as bool value for flag debug: .*`)}, + + {"1s", 1 * time.Second, DurationFlag{Name: "time", EnvVar: "TIME"}, ""}, + {"foobar", false, DurationFlag{Name: "time", EnvVar: "TIME"}, fmt.Sprintf(`could not parse foobar as duration for flag time: .*`)}, + + {"1.2", 1.2, Float64Flag{Name: "seconds", EnvVar: "SECONDS"}, ""}, + {"1", 1.0, Float64Flag{Name: "seconds", EnvVar: "SECONDS"}, ""}, + {"foobar", 0, Float64Flag{Name: "seconds", EnvVar: "SECONDS"}, fmt.Sprintf(`could not parse foobar as float64 value for flag seconds: .*`)}, + + {"1", int64(1), Int64Flag{Name: "seconds", EnvVar: "SECONDS"}, ""}, + {"1.2", 0, Int64Flag{Name: "seconds", EnvVar: "SECONDS"}, fmt.Sprintf(`could not parse 1.2 as int value for flag seconds: .*`)}, + {"foobar", 0, Int64Flag{Name: "seconds", EnvVar: "SECONDS"}, fmt.Sprintf(`could not parse foobar as int value for flag seconds: .*`)}, + + {"1", 1, IntFlag{Name: "seconds", EnvVar: "SECONDS"}, ""}, + {"1.2", 0, IntFlag{Name: "seconds", EnvVar: "SECONDS"}, fmt.Sprintf(`could not parse 1.2 as int value for flag seconds: .*`)}, + {"foobar", 0, IntFlag{Name: "seconds", EnvVar: "SECONDS"}, fmt.Sprintf(`could not parse foobar as int value for flag seconds: .*`)}, + + {"1,2", IntSlice{1, 2}, IntSliceFlag{Name: "seconds", EnvVar: "SECONDS"}, ""}, + {"1.2,2", IntSlice{}, IntSliceFlag{Name: "seconds", EnvVar: "SECONDS"}, fmt.Sprintf(`could not parse 1.2,2 as int slice value for flag seconds: .*`)}, + {"foobar", IntSlice{}, IntSliceFlag{Name: "seconds", EnvVar: "SECONDS"}, fmt.Sprintf(`could not parse foobar as int slice value for flag seconds: .*`)}, + + {"1,2", Int64Slice{1, 2}, Int64SliceFlag{Name: "seconds", EnvVar: "SECONDS"}, ""}, + {"1.2,2", Int64Slice{}, Int64SliceFlag{Name: "seconds", EnvVar: "SECONDS"}, fmt.Sprintf(`could not parse 1.2,2 as int64 slice value for flag seconds: .*`)}, + {"foobar", Int64Slice{}, Int64SliceFlag{Name: "seconds", EnvVar: "SECONDS"}, fmt.Sprintf(`could not parse foobar as int64 slice value for flag seconds: .*`)}, + + {"foo", "foo", StringFlag{Name: "name", EnvVar: "NAME"}, ""}, + + {"foo,bar", StringSlice{"foo", "bar"}, StringSliceFlag{Name: "names", EnvVar: "NAMES"}, ""}, + + {"1", uint(1), UintFlag{Name: "seconds", EnvVar: "SECONDS"}, ""}, + {"1.2", 0, UintFlag{Name: "seconds", EnvVar: "SECONDS"}, fmt.Sprintf(`could not parse 1.2 as uint value for flag seconds: .*`)}, + {"foobar", 0, UintFlag{Name: "seconds", EnvVar: "SECONDS"}, fmt.Sprintf(`could not parse foobar as uint value for flag seconds: .*`)}, + + {"1", uint64(1), Uint64Flag{Name: "seconds", EnvVar: "SECONDS"}, ""}, + {"1.2", 0, Uint64Flag{Name: "seconds", EnvVar: "SECONDS"}, fmt.Sprintf(`could not parse 1.2 as uint64 value for flag seconds: .*`)}, + {"foobar", 0, Uint64Flag{Name: "seconds", EnvVar: "SECONDS"}, fmt.Sprintf(`could not parse foobar as uint64 value for flag seconds: .*`)}, + + {"foo,bar", &Parser{"foo", "bar"}, GenericFlag{Name: "names", Value: &Parser{}, EnvVar: "NAMES"}, ""}, + } + + for _, test := range flagTests { + os.Clearenv() + os.Setenv(reflect.ValueOf(test.flag).FieldByName("EnvVar").String(), test.input) + a := App{ + Flags: []Flag{test.flag}, + Action: func(ctx *Context) error { + if !reflect.DeepEqual(ctx.value(test.flag.GetName()), test.output) { + t.Errorf("expected %+v to be parsed as %+v, instead was %+v", test.input, test.output, ctx.value(test.flag.GetName())) + } + return nil + }, + } + + err := a.Run([]string{"run"}) + + if test.errRegexp != "" { + if err == nil { + t.Errorf("expected error to match %s, got none", test.errRegexp) + } else { + if matched, _ := regexp.MatchString(test.errRegexp, err.Error()); !matched { + t.Errorf("expected error to match %s, got error %s", test.errRegexp, err) + } + } + } else { + if err != nil && test.errRegexp == "" { + t.Errorf("expected no error got %s", err) + } + } + } +} + +var stringFlagTests = []struct { + name string + usage string + value string + expected string +}{ + {"foo", "", "", "--foo value\t"}, + {"f", "", "", "-f value\t"}, + {"f", "The total `foo` desired", "all", "-f foo\tThe total foo desired (default: \"all\")"}, + {"test", "", "Something", "--test value\t(default: \"Something\")"}, + {"config,c", "Load configuration from `FILE`", "", "--config FILE, -c FILE\tLoad configuration from FILE"}, + {"config,c", "Load configuration from `CONFIG`", "config.json", "--config CONFIG, -c CONFIG\tLoad configuration from CONFIG (default: \"config.json\")"}, +} + +func TestStringFlagHelpOutput(t *testing.T) { + for _, test := range stringFlagTests { + flag := StringFlag{Name: test.name, Usage: test.usage, Value: test.value} + output := flag.String() + + if output != test.expected { + t.Errorf("%q does not match %q", output, test.expected) + } + } +} + +func TestStringFlagWithEnvVarHelpOutput(t *testing.T) { + os.Clearenv() + os.Setenv("APP_FOO", "derp") + for _, test := range stringFlagTests { + flag := StringFlag{Name: test.name, Value: test.value, EnvVar: "APP_FOO"} + output := flag.String() + + expectedSuffix := " [$APP_FOO]" + if runtime.GOOS == "windows" { + expectedSuffix = " [%APP_FOO%]" + } + if !strings.HasSuffix(output, expectedSuffix) { + t.Errorf("%s does not end with"+expectedSuffix, output) + } + } +} + +var stringSliceFlagTests = []struct { + name string + value *StringSlice + expected string +}{ + {"foo", func() *StringSlice { + s := &StringSlice{} + s.Set("") + return s + }(), "--foo value\t"}, + {"f", func() *StringSlice { + s := &StringSlice{} + s.Set("") + return s + }(), "-f value\t"}, + {"f", func() *StringSlice { + s := &StringSlice{} + s.Set("Lipstick") + return s + }(), "-f value\t(default: \"Lipstick\")"}, + {"test", func() *StringSlice { + s := &StringSlice{} + s.Set("Something") + return s + }(), "--test value\t(default: \"Something\")"}, +} + +func TestStringSliceFlagHelpOutput(t *testing.T) { + for _, test := range stringSliceFlagTests { + flag := StringSliceFlag{Name: test.name, Value: test.value} + output := flag.String() + + if output != test.expected { + t.Errorf("%q does not match %q", output, test.expected) + } + } +} + +func TestStringSliceFlagWithEnvVarHelpOutput(t *testing.T) { + os.Clearenv() + os.Setenv("APP_QWWX", "11,4") + for _, test := range stringSliceFlagTests { + flag := StringSliceFlag{Name: test.name, Value: test.value, EnvVar: "APP_QWWX"} + output := flag.String() + + expectedSuffix := " [$APP_QWWX]" + if runtime.GOOS == "windows" { + expectedSuffix = " [%APP_QWWX%]" + } + if !strings.HasSuffix(output, expectedSuffix) { + t.Errorf("%q does not end with"+expectedSuffix, output) + } + } +} + +var intFlagTests = []struct { + name string + expected string +}{ + {"hats", "--hats value\t(default: 9)"}, + {"H", "-H value\t(default: 9)"}, +} + +func TestIntFlagHelpOutput(t *testing.T) { + for _, test := range intFlagTests { + flag := IntFlag{Name: test.name, Value: 9} + output := flag.String() + + if output != test.expected { + t.Errorf("%s does not match %s", output, test.expected) + } + } +} + +func TestIntFlagWithEnvVarHelpOutput(t *testing.T) { + os.Clearenv() + os.Setenv("APP_BAR", "2") + for _, test := range intFlagTests { + flag := IntFlag{Name: test.name, EnvVar: "APP_BAR"} + output := flag.String() + + expectedSuffix := " [$APP_BAR]" + if runtime.GOOS == "windows" { + expectedSuffix = " [%APP_BAR%]" + } + if !strings.HasSuffix(output, expectedSuffix) { + t.Errorf("%s does not end with"+expectedSuffix, output) + } + } +} + +var int64FlagTests = []struct { + name string + expected string +}{ + {"hats", "--hats value\t(default: 8589934592)"}, + {"H", "-H value\t(default: 8589934592)"}, +} + +func TestInt64FlagHelpOutput(t *testing.T) { + for _, test := range int64FlagTests { + flag := Int64Flag{Name: test.name, Value: 8589934592} + output := flag.String() + + if output != test.expected { + t.Errorf("%s does not match %s", output, test.expected) + } + } +} + +func TestInt64FlagWithEnvVarHelpOutput(t *testing.T) { + os.Clearenv() + os.Setenv("APP_BAR", "2") + for _, test := range int64FlagTests { + flag := IntFlag{Name: test.name, EnvVar: "APP_BAR"} + output := flag.String() + + expectedSuffix := " [$APP_BAR]" + if runtime.GOOS == "windows" { + expectedSuffix = " [%APP_BAR%]" + } + if !strings.HasSuffix(output, expectedSuffix) { + t.Errorf("%s does not end with"+expectedSuffix, output) + } + } +} + +var uintFlagTests = []struct { + name string + expected string +}{ + {"nerfs", "--nerfs value\t(default: 41)"}, + {"N", "-N value\t(default: 41)"}, +} + +func TestUintFlagHelpOutput(t *testing.T) { + for _, test := range uintFlagTests { + flag := UintFlag{Name: test.name, Value: 41} + output := flag.String() + + if output != test.expected { + t.Errorf("%s does not match %s", output, test.expected) + } + } +} + +func TestUintFlagWithEnvVarHelpOutput(t *testing.T) { + os.Clearenv() + os.Setenv("APP_BAR", "2") + for _, test := range uintFlagTests { + flag := UintFlag{Name: test.name, EnvVar: "APP_BAR"} + output := flag.String() + + expectedSuffix := " [$APP_BAR]" + if runtime.GOOS == "windows" { + expectedSuffix = " [%APP_BAR%]" + } + if !strings.HasSuffix(output, expectedSuffix) { + t.Errorf("%s does not end with"+expectedSuffix, output) + } + } +} + +var uint64FlagTests = []struct { + name string + expected string +}{ + {"gerfs", "--gerfs value\t(default: 8589934582)"}, + {"G", "-G value\t(default: 8589934582)"}, +} + +func TestUint64FlagHelpOutput(t *testing.T) { + for _, test := range uint64FlagTests { + flag := Uint64Flag{Name: test.name, Value: 8589934582} + output := flag.String() + + if output != test.expected { + t.Errorf("%s does not match %s", output, test.expected) + } + } +} + +func TestUint64FlagWithEnvVarHelpOutput(t *testing.T) { + os.Clearenv() + os.Setenv("APP_BAR", "2") + for _, test := range uint64FlagTests { + flag := UintFlag{Name: test.name, EnvVar: "APP_BAR"} + output := flag.String() + + expectedSuffix := " [$APP_BAR]" + if runtime.GOOS == "windows" { + expectedSuffix = " [%APP_BAR%]" + } + if !strings.HasSuffix(output, expectedSuffix) { + t.Errorf("%s does not end with"+expectedSuffix, output) + } + } +} + +var durationFlagTests = []struct { + name string + expected string +}{ + {"hooting", "--hooting value\t(default: 1s)"}, + {"H", "-H value\t(default: 1s)"}, +} + +func TestDurationFlagHelpOutput(t *testing.T) { + for _, test := range durationFlagTests { + flag := DurationFlag{Name: test.name, Value: 1 * time.Second} + output := flag.String() + + if output != test.expected { + t.Errorf("%q does not match %q", output, test.expected) + } + } +} + +func TestDurationFlagWithEnvVarHelpOutput(t *testing.T) { + os.Clearenv() + os.Setenv("APP_BAR", "2h3m6s") + for _, test := range durationFlagTests { + flag := DurationFlag{Name: test.name, EnvVar: "APP_BAR"} + output := flag.String() + + expectedSuffix := " [$APP_BAR]" + if runtime.GOOS == "windows" { + expectedSuffix = " [%APP_BAR%]" + } + if !strings.HasSuffix(output, expectedSuffix) { + t.Errorf("%s does not end with"+expectedSuffix, output) + } + } +} + +var intSliceFlagTests = []struct { + name string + value *IntSlice + expected string +}{ + {"heads", &IntSlice{}, "--heads value\t"}, + {"H", &IntSlice{}, "-H value\t"}, + {"H, heads", func() *IntSlice { + i := &IntSlice{} + i.Set("9") + i.Set("3") + return i + }(), "-H value, --heads value\t(default: 9, 3)"}, +} + +func TestIntSliceFlagHelpOutput(t *testing.T) { + for _, test := range intSliceFlagTests { + flag := IntSliceFlag{Name: test.name, Value: test.value} + output := flag.String() + + if output != test.expected { + t.Errorf("%q does not match %q", output, test.expected) + } + } +} + +func TestIntSliceFlagWithEnvVarHelpOutput(t *testing.T) { + os.Clearenv() + os.Setenv("APP_SMURF", "42,3") + for _, test := range intSliceFlagTests { + flag := IntSliceFlag{Name: test.name, Value: test.value, EnvVar: "APP_SMURF"} + output := flag.String() + + expectedSuffix := " [$APP_SMURF]" + if runtime.GOOS == "windows" { + expectedSuffix = " [%APP_SMURF%]" + } + if !strings.HasSuffix(output, expectedSuffix) { + t.Errorf("%q does not end with"+expectedSuffix, output) + } + } +} + +var int64SliceFlagTests = []struct { + name string + value *Int64Slice + expected string +}{ + {"heads", &Int64Slice{}, "--heads value\t"}, + {"H", &Int64Slice{}, "-H value\t"}, + {"H, heads", func() *Int64Slice { + i := &Int64Slice{} + i.Set("2") + i.Set("17179869184") + return i + }(), "-H value, --heads value\t(default: 2, 17179869184)"}, +} + +func TestInt64SliceFlagHelpOutput(t *testing.T) { + for _, test := range int64SliceFlagTests { + flag := Int64SliceFlag{Name: test.name, Value: test.value} + output := flag.String() + + if output != test.expected { + t.Errorf("%q does not match %q", output, test.expected) + } + } +} + +func TestInt64SliceFlagWithEnvVarHelpOutput(t *testing.T) { + os.Clearenv() + os.Setenv("APP_SMURF", "42,17179869184") + for _, test := range int64SliceFlagTests { + flag := Int64SliceFlag{Name: test.name, Value: test.value, EnvVar: "APP_SMURF"} + output := flag.String() + + expectedSuffix := " [$APP_SMURF]" + if runtime.GOOS == "windows" { + expectedSuffix = " [%APP_SMURF%]" + } + if !strings.HasSuffix(output, expectedSuffix) { + t.Errorf("%q does not end with"+expectedSuffix, output) + } + } +} + +var float64FlagTests = []struct { + name string + expected string +}{ + {"hooting", "--hooting value\t(default: 0.1)"}, + {"H", "-H value\t(default: 0.1)"}, +} + +func TestFloat64FlagHelpOutput(t *testing.T) { + for _, test := range float64FlagTests { + flag := Float64Flag{Name: test.name, Value: float64(0.1)} + output := flag.String() + + if output != test.expected { + t.Errorf("%q does not match %q", output, test.expected) + } + } +} + +func TestFloat64FlagWithEnvVarHelpOutput(t *testing.T) { + os.Clearenv() + os.Setenv("APP_BAZ", "99.4") + for _, test := range float64FlagTests { + flag := Float64Flag{Name: test.name, EnvVar: "APP_BAZ"} + output := flag.String() + + expectedSuffix := " [$APP_BAZ]" + if runtime.GOOS == "windows" { + expectedSuffix = " [%APP_BAZ%]" + } + if !strings.HasSuffix(output, expectedSuffix) { + t.Errorf("%s does not end with"+expectedSuffix, output) + } + } +} + +var genericFlagTests = []struct { + name string + value Generic + expected string +}{ + {"toads", &Parser{"abc", "def"}, "--toads value\ttest flag (default: abc,def)"}, + {"t", &Parser{"abc", "def"}, "-t value\ttest flag (default: abc,def)"}, +} + +func TestGenericFlagHelpOutput(t *testing.T) { + for _, test := range genericFlagTests { + flag := GenericFlag{Name: test.name, Value: test.value, Usage: "test flag"} + output := flag.String() + + if output != test.expected { + t.Errorf("%q does not match %q", output, test.expected) + } + } +} + +func TestGenericFlagWithEnvVarHelpOutput(t *testing.T) { + os.Clearenv() + os.Setenv("APP_ZAP", "3") + for _, test := range genericFlagTests { + flag := GenericFlag{Name: test.name, EnvVar: "APP_ZAP"} + output := flag.String() + + expectedSuffix := " [$APP_ZAP]" + if runtime.GOOS == "windows" { + expectedSuffix = " [%APP_ZAP%]" + } + if !strings.HasSuffix(output, expectedSuffix) { + t.Errorf("%s does not end with"+expectedSuffix, output) + } + } +} + +func TestParseMultiString(t *testing.T) { + (&App{ + Flags: []Flag{ + StringFlag{Name: "serve, s"}, + }, + Action: func(ctx *Context) error { + if ctx.String("serve") != "10" { + t.Errorf("main name not set") + } + if ctx.String("s") != "10" { + t.Errorf("short name not set") + } + return nil + }, + }).Run([]string{"run", "-s", "10"}) +} + +func TestParseDestinationString(t *testing.T) { + var dest string + a := App{ + Flags: []Flag{ + StringFlag{ + Name: "dest", + Destination: &dest, + }, + }, + Action: func(ctx *Context) error { + if dest != "10" { + t.Errorf("expected destination String 10") + } + return nil + }, + } + a.Run([]string{"run", "--dest", "10"}) +} + +func TestParseMultiStringFromEnv(t *testing.T) { + os.Clearenv() + os.Setenv("APP_COUNT", "20") + (&App{ + Flags: []Flag{ + StringFlag{Name: "count, c", EnvVar: "APP_COUNT"}, + }, + Action: func(ctx *Context) error { + if ctx.String("count") != "20" { + t.Errorf("main name not set") + } + if ctx.String("c") != "20" { + t.Errorf("short name not set") + } + return nil + }, + }).Run([]string{"run"}) +} + +func TestParseMultiStringFromEnvCascade(t *testing.T) { + os.Clearenv() + os.Setenv("APP_COUNT", "20") + (&App{ + Flags: []Flag{ + StringFlag{Name: "count, c", EnvVar: "COMPAT_COUNT,APP_COUNT"}, + }, + Action: func(ctx *Context) error { + if ctx.String("count") != "20" { + t.Errorf("main name not set") + } + if ctx.String("c") != "20" { + t.Errorf("short name not set") + } + return nil + }, + }).Run([]string{"run"}) +} + +func TestParseMultiStringSlice(t *testing.T) { + (&App{ + Flags: []Flag{ + StringSliceFlag{Name: "serve, s", Value: &StringSlice{}}, + }, + Action: func(ctx *Context) error { + if !reflect.DeepEqual(ctx.StringSlice("serve"), []string{"10", "20"}) { + t.Errorf("main name not set") + } + if !reflect.DeepEqual(ctx.StringSlice("s"), []string{"10", "20"}) { + t.Errorf("short name not set") + } + return nil + }, + }).Run([]string{"run", "-s", "10", "-s", "20"}) +} + +func TestParseMultiStringSliceFromEnv(t *testing.T) { + os.Clearenv() + os.Setenv("APP_INTERVALS", "20,30,40") + + (&App{ + Flags: []Flag{ + StringSliceFlag{Name: "intervals, i", Value: &StringSlice{}, EnvVar: "APP_INTERVALS"}, + }, + Action: func(ctx *Context) error { + if !reflect.DeepEqual(ctx.StringSlice("intervals"), []string{"20", "30", "40"}) { + t.Errorf("main name not set from env") + } + if !reflect.DeepEqual(ctx.StringSlice("i"), []string{"20", "30", "40"}) { + t.Errorf("short name not set from env") + } + return nil + }, + }).Run([]string{"run"}) +} + +func TestParseMultiStringSliceFromEnvCascade(t *testing.T) { + os.Clearenv() + os.Setenv("APP_INTERVALS", "20,30,40") + + (&App{ + Flags: []Flag{ + StringSliceFlag{Name: "intervals, i", Value: &StringSlice{}, EnvVar: "COMPAT_INTERVALS,APP_INTERVALS"}, + }, + Action: func(ctx *Context) error { + if !reflect.DeepEqual(ctx.StringSlice("intervals"), []string{"20", "30", "40"}) { + t.Errorf("main name not set from env") + } + if !reflect.DeepEqual(ctx.StringSlice("i"), []string{"20", "30", "40"}) { + t.Errorf("short name not set from env") + } + return nil + }, + }).Run([]string{"run"}) +} + +func TestParseMultiInt(t *testing.T) { + a := App{ + Flags: []Flag{ + IntFlag{Name: "serve, s"}, + }, + Action: func(ctx *Context) error { + if ctx.Int("serve") != 10 { + t.Errorf("main name not set") + } + if ctx.Int("s") != 10 { + t.Errorf("short name not set") + } + return nil + }, + } + a.Run([]string{"run", "-s", "10"}) +} + +func TestParseDestinationInt(t *testing.T) { + var dest int + a := App{ + Flags: []Flag{ + IntFlag{ + Name: "dest", + Destination: &dest, + }, + }, + Action: func(ctx *Context) error { + if dest != 10 { + t.Errorf("expected destination Int 10") + } + return nil + }, + } + a.Run([]string{"run", "--dest", "10"}) +} + +func TestParseMultiIntFromEnv(t *testing.T) { + os.Clearenv() + os.Setenv("APP_TIMEOUT_SECONDS", "10") + a := App{ + Flags: []Flag{ + IntFlag{Name: "timeout, t", EnvVar: "APP_TIMEOUT_SECONDS"}, + }, + Action: func(ctx *Context) error { + if ctx.Int("timeout") != 10 { + t.Errorf("main name not set") + } + if ctx.Int("t") != 10 { + t.Errorf("short name not set") + } + return nil + }, + } + a.Run([]string{"run"}) +} + +func TestParseMultiIntFromEnvCascade(t *testing.T) { + os.Clearenv() + os.Setenv("APP_TIMEOUT_SECONDS", "10") + a := App{ + Flags: []Flag{ + IntFlag{Name: "timeout, t", EnvVar: "COMPAT_TIMEOUT_SECONDS,APP_TIMEOUT_SECONDS"}, + }, + Action: func(ctx *Context) error { + if ctx.Int("timeout") != 10 { + t.Errorf("main name not set") + } + if ctx.Int("t") != 10 { + t.Errorf("short name not set") + } + return nil + }, + } + a.Run([]string{"run"}) +} + +func TestParseMultiIntSlice(t *testing.T) { + (&App{ + Flags: []Flag{ + IntSliceFlag{Name: "serve, s", Value: &IntSlice{}}, + }, + Action: func(ctx *Context) error { + if !reflect.DeepEqual(ctx.IntSlice("serve"), []int{10, 20}) { + t.Errorf("main name not set") + } + if !reflect.DeepEqual(ctx.IntSlice("s"), []int{10, 20}) { + t.Errorf("short name not set") + } + return nil + }, + }).Run([]string{"run", "-s", "10", "-s", "20"}) +} + +func TestParseMultiIntSliceFromEnv(t *testing.T) { + os.Clearenv() + os.Setenv("APP_INTERVALS", "20,30,40") + + (&App{ + Flags: []Flag{ + IntSliceFlag{Name: "intervals, i", Value: &IntSlice{}, EnvVar: "APP_INTERVALS"}, + }, + Action: func(ctx *Context) error { + if !reflect.DeepEqual(ctx.IntSlice("intervals"), []int{20, 30, 40}) { + t.Errorf("main name not set from env") + } + if !reflect.DeepEqual(ctx.IntSlice("i"), []int{20, 30, 40}) { + t.Errorf("short name not set from env") + } + return nil + }, + }).Run([]string{"run"}) +} + +func TestParseMultiIntSliceFromEnvCascade(t *testing.T) { + os.Clearenv() + os.Setenv("APP_INTERVALS", "20,30,40") + + (&App{ + Flags: []Flag{ + IntSliceFlag{Name: "intervals, i", Value: &IntSlice{}, EnvVar: "COMPAT_INTERVALS,APP_INTERVALS"}, + }, + Action: func(ctx *Context) error { + if !reflect.DeepEqual(ctx.IntSlice("intervals"), []int{20, 30, 40}) { + t.Errorf("main name not set from env") + } + if !reflect.DeepEqual(ctx.IntSlice("i"), []int{20, 30, 40}) { + t.Errorf("short name not set from env") + } + return nil + }, + }).Run([]string{"run"}) +} + +func TestParseMultiInt64Slice(t *testing.T) { + (&App{ + Flags: []Flag{ + Int64SliceFlag{Name: "serve, s", Value: &Int64Slice{}}, + }, + Action: func(ctx *Context) error { + if !reflect.DeepEqual(ctx.Int64Slice("serve"), []int64{10, 17179869184}) { + t.Errorf("main name not set") + } + if !reflect.DeepEqual(ctx.Int64Slice("s"), []int64{10, 17179869184}) { + t.Errorf("short name not set") + } + return nil + }, + }).Run([]string{"run", "-s", "10", "-s", "17179869184"}) +} + +func TestParseMultiInt64SliceFromEnv(t *testing.T) { + os.Clearenv() + os.Setenv("APP_INTERVALS", "20,30,17179869184") + + (&App{ + Flags: []Flag{ + Int64SliceFlag{Name: "intervals, i", Value: &Int64Slice{}, EnvVar: "APP_INTERVALS"}, + }, + Action: func(ctx *Context) error { + if !reflect.DeepEqual(ctx.Int64Slice("intervals"), []int64{20, 30, 17179869184}) { + t.Errorf("main name not set from env") + } + if !reflect.DeepEqual(ctx.Int64Slice("i"), []int64{20, 30, 17179869184}) { + t.Errorf("short name not set from env") + } + return nil + }, + }).Run([]string{"run"}) +} + +func TestParseMultiInt64SliceFromEnvCascade(t *testing.T) { + os.Clearenv() + os.Setenv("APP_INTERVALS", "20,30,17179869184") + + (&App{ + Flags: []Flag{ + Int64SliceFlag{Name: "intervals, i", Value: &Int64Slice{}, EnvVar: "COMPAT_INTERVALS,APP_INTERVALS"}, + }, + Action: func(ctx *Context) error { + if !reflect.DeepEqual(ctx.Int64Slice("intervals"), []int64{20, 30, 17179869184}) { + t.Errorf("main name not set from env") + } + if !reflect.DeepEqual(ctx.Int64Slice("i"), []int64{20, 30, 17179869184}) { + t.Errorf("short name not set from env") + } + return nil + }, + }).Run([]string{"run"}) +} + +func TestParseMultiFloat64(t *testing.T) { + a := App{ + Flags: []Flag{ + Float64Flag{Name: "serve, s"}, + }, + Action: func(ctx *Context) error { + if ctx.Float64("serve") != 10.2 { + t.Errorf("main name not set") + } + if ctx.Float64("s") != 10.2 { + t.Errorf("short name not set") + } + return nil + }, + } + a.Run([]string{"run", "-s", "10.2"}) +} + +func TestParseDestinationFloat64(t *testing.T) { + var dest float64 + a := App{ + Flags: []Flag{ + Float64Flag{ + Name: "dest", + Destination: &dest, + }, + }, + Action: func(ctx *Context) error { + if dest != 10.2 { + t.Errorf("expected destination Float64 10.2") + } + return nil + }, + } + a.Run([]string{"run", "--dest", "10.2"}) +} + +func TestParseMultiFloat64FromEnv(t *testing.T) { + os.Clearenv() + os.Setenv("APP_TIMEOUT_SECONDS", "15.5") + a := App{ + Flags: []Flag{ + Float64Flag{Name: "timeout, t", EnvVar: "APP_TIMEOUT_SECONDS"}, + }, + Action: func(ctx *Context) error { + if ctx.Float64("timeout") != 15.5 { + t.Errorf("main name not set") + } + if ctx.Float64("t") != 15.5 { + t.Errorf("short name not set") + } + return nil + }, + } + a.Run([]string{"run"}) +} + +func TestParseMultiFloat64FromEnvCascade(t *testing.T) { + os.Clearenv() + os.Setenv("APP_TIMEOUT_SECONDS", "15.5") + a := App{ + Flags: []Flag{ + Float64Flag{Name: "timeout, t", EnvVar: "COMPAT_TIMEOUT_SECONDS,APP_TIMEOUT_SECONDS"}, + }, + Action: func(ctx *Context) error { + if ctx.Float64("timeout") != 15.5 { + t.Errorf("main name not set") + } + if ctx.Float64("t") != 15.5 { + t.Errorf("short name not set") + } + return nil + }, + } + a.Run([]string{"run"}) +} + +func TestParseMultiBool(t *testing.T) { + a := App{ + Flags: []Flag{ + BoolFlag{Name: "serve, s"}, + }, + Action: func(ctx *Context) error { + if ctx.Bool("serve") != true { + t.Errorf("main name not set") + } + if ctx.Bool("s") != true { + t.Errorf("short name not set") + } + return nil + }, + } + a.Run([]string{"run", "--serve"}) +} + +func TestParseDestinationBool(t *testing.T) { + var dest bool + a := App{ + Flags: []Flag{ + BoolFlag{ + Name: "dest", + Destination: &dest, + }, + }, + Action: func(ctx *Context) error { + if dest != true { + t.Errorf("expected destination Bool true") + } + return nil + }, + } + a.Run([]string{"run", "--dest"}) +} + +func TestParseMultiBoolFromEnv(t *testing.T) { + os.Clearenv() + os.Setenv("APP_DEBUG", "1") + a := App{ + Flags: []Flag{ + BoolFlag{Name: "debug, d", EnvVar: "APP_DEBUG"}, + }, + Action: func(ctx *Context) error { + if ctx.Bool("debug") != true { + t.Errorf("main name not set from env") + } + if ctx.Bool("d") != true { + t.Errorf("short name not set from env") + } + return nil + }, + } + a.Run([]string{"run"}) +} + +func TestParseMultiBoolFromEnvCascade(t *testing.T) { + os.Clearenv() + os.Setenv("APP_DEBUG", "1") + a := App{ + Flags: []Flag{ + BoolFlag{Name: "debug, d", EnvVar: "COMPAT_DEBUG,APP_DEBUG"}, + }, + Action: func(ctx *Context) error { + if ctx.Bool("debug") != true { + t.Errorf("main name not set from env") + } + if ctx.Bool("d") != true { + t.Errorf("short name not set from env") + } + return nil + }, + } + a.Run([]string{"run"}) +} + +func TestParseBoolTFromEnv(t *testing.T) { + var boolTFlagTests = []struct { + input string + output bool + }{ + {"", false}, + {"1", true}, + {"false", false}, + {"true", true}, + } + + for _, test := range boolTFlagTests { + os.Clearenv() + os.Setenv("DEBUG", test.input) + a := App{ + Flags: []Flag{ + BoolTFlag{Name: "debug, d", EnvVar: "DEBUG"}, + }, + Action: func(ctx *Context) error { + if ctx.Bool("debug") != test.output { + t.Errorf("expected %+v to be parsed as %+v, instead was %+v", test.input, test.output, ctx.Bool("debug")) + } + if ctx.Bool("d") != test.output { + t.Errorf("expected %+v to be parsed as %+v, instead was %+v", test.input, test.output, ctx.Bool("d")) + } + return nil + }, + } + a.Run([]string{"run"}) + } +} + +func TestParseMultiBoolT(t *testing.T) { + a := App{ + Flags: []Flag{ + BoolTFlag{Name: "serve, s"}, + }, + Action: func(ctx *Context) error { + if ctx.BoolT("serve") != true { + t.Errorf("main name not set") + } + if ctx.BoolT("s") != true { + t.Errorf("short name not set") + } + return nil + }, + } + a.Run([]string{"run", "--serve"}) +} + +func TestParseDestinationBoolT(t *testing.T) { + var dest bool + a := App{ + Flags: []Flag{ + BoolTFlag{ + Name: "dest", + Destination: &dest, + }, + }, + Action: func(ctx *Context) error { + if dest != true { + t.Errorf("expected destination BoolT true") + } + return nil + }, + } + a.Run([]string{"run", "--dest"}) +} + +func TestParseMultiBoolTFromEnv(t *testing.T) { + os.Clearenv() + os.Setenv("APP_DEBUG", "0") + a := App{ + Flags: []Flag{ + BoolTFlag{Name: "debug, d", EnvVar: "APP_DEBUG"}, + }, + Action: func(ctx *Context) error { + if ctx.BoolT("debug") != false { + t.Errorf("main name not set from env") + } + if ctx.BoolT("d") != false { + t.Errorf("short name not set from env") + } + return nil + }, + } + a.Run([]string{"run"}) +} + +func TestParseMultiBoolTFromEnvCascade(t *testing.T) { + os.Clearenv() + os.Setenv("APP_DEBUG", "0") + a := App{ + Flags: []Flag{ + BoolTFlag{Name: "debug, d", EnvVar: "COMPAT_DEBUG,APP_DEBUG"}, + }, + Action: func(ctx *Context) error { + if ctx.BoolT("debug") != false { + t.Errorf("main name not set from env") + } + if ctx.BoolT("d") != false { + t.Errorf("short name not set from env") + } + return nil + }, + } + a.Run([]string{"run"}) +} + +type Parser [2]string + +func (p *Parser) Set(value string) error { + parts := strings.Split(value, ",") + if len(parts) != 2 { + return fmt.Errorf("invalid format") + } + + (*p)[0] = parts[0] + (*p)[1] = parts[1] + + return nil +} + +func (p *Parser) String() string { + return fmt.Sprintf("%s,%s", p[0], p[1]) +} + +func (p *Parser) Get() interface{} { + return p +} + +func TestParseGeneric(t *testing.T) { + a := App{ + Flags: []Flag{ + GenericFlag{Name: "serve, s", Value: &Parser{}}, + }, + Action: func(ctx *Context) error { + if !reflect.DeepEqual(ctx.Generic("serve"), &Parser{"10", "20"}) { + t.Errorf("main name not set") + } + if !reflect.DeepEqual(ctx.Generic("s"), &Parser{"10", "20"}) { + t.Errorf("short name not set") + } + return nil + }, + } + a.Run([]string{"run", "-s", "10,20"}) +} + +func TestParseGenericFromEnv(t *testing.T) { + os.Clearenv() + os.Setenv("APP_SERVE", "20,30") + a := App{ + Flags: []Flag{ + GenericFlag{Name: "serve, s", Value: &Parser{}, EnvVar: "APP_SERVE"}, + }, + Action: func(ctx *Context) error { + if !reflect.DeepEqual(ctx.Generic("serve"), &Parser{"20", "30"}) { + t.Errorf("main name not set from env") + } + if !reflect.DeepEqual(ctx.Generic("s"), &Parser{"20", "30"}) { + t.Errorf("short name not set from env") + } + return nil + }, + } + a.Run([]string{"run"}) +} + +func TestParseGenericFromEnvCascade(t *testing.T) { + os.Clearenv() + os.Setenv("APP_FOO", "99,2000") + a := App{ + Flags: []Flag{ + GenericFlag{Name: "foos", Value: &Parser{}, EnvVar: "COMPAT_FOO,APP_FOO"}, + }, + Action: func(ctx *Context) error { + if !reflect.DeepEqual(ctx.Generic("foos"), &Parser{"99", "2000"}) { + t.Errorf("value not set from env") + } + return nil + }, + } + a.Run([]string{"run"}) +} diff --git a/vendor/github.com/urfave/cli/funcs.go b/vendor/github.com/urfave/cli/funcs.go new file mode 100644 index 000000000..cba5e6cb0 --- /dev/null +++ b/vendor/github.com/urfave/cli/funcs.go @@ -0,0 +1,28 @@ +package cli + +// BashCompleteFunc is an action to execute when the bash-completion flag is set +type BashCompleteFunc func(*Context) + +// BeforeFunc is an action to execute before any subcommands are run, but after +// the context is ready if a non-nil error is returned, no subcommands are run +type BeforeFunc func(*Context) error + +// AfterFunc is an action to execute after any subcommands are run, but after the +// subcommand has finished it is run even if Action() panics +type AfterFunc func(*Context) error + +// ActionFunc is the action to execute when no subcommands are specified +type ActionFunc func(*Context) error + +// CommandNotFoundFunc is executed if the proper command cannot be found +type CommandNotFoundFunc func(*Context, string) + +// OnUsageErrorFunc is executed if an usage error occurs. This is useful for displaying +// customized usage error messages. This function is able to replace the +// original error messages. If this function is not set, the "Incorrect usage" +// is displayed and the execution is interrupted. +type OnUsageErrorFunc func(context *Context, err error, isSubcommand bool) error + +// FlagStringFunc is used by the help generation to display a flag, which is +// expected to be a single line. +type FlagStringFunc func(Flag) string diff --git a/vendor/github.com/urfave/cli/generate-flag-types b/vendor/github.com/urfave/cli/generate-flag-types new file mode 100755 index 000000000..7147381ce --- /dev/null +++ b/vendor/github.com/urfave/cli/generate-flag-types @@ -0,0 +1,255 @@ +#!/usr/bin/env python +""" +The flag types that ship with the cli library have many things in common, and +so we can take advantage of the `go generate` command to create much of the +source code from a list of definitions. These definitions attempt to cover +the parts that vary between flag types, and should evolve as needed. + +An example of the minimum definition needed is: + + { + "name": "SomeType", + "type": "sometype", + "context_default": "nil" + } + +In this example, the code generated for the `cli` package will include a type +named `SomeTypeFlag` that is expected to wrap a value of type `sometype`. +Fetching values by name via `*cli.Context` will default to a value of `nil`. + +A more complete, albeit somewhat redundant, example showing all available +definition keys is: + + { + "name": "VeryMuchType", + "type": "*VeryMuchType", + "value": true, + "dest": false, + "doctail": " which really only wraps a []float64, oh well!", + "context_type": "[]float64", + "context_default": "nil", + "parser": "parseVeryMuchType(f.Value.String())", + "parser_cast": "[]float64(parsed)" + } + +The meaning of each field is as follows: + + name (string) - The type "name", which will be suffixed with + `Flag` when generating the type definition + for `cli` and the wrapper type for `altsrc` + type (string) - The type that the generated `Flag` type for `cli` + is expected to "contain" as its `.Value` member + value (bool) - Should the generated `cli` type have a `Value` + member? + dest (bool) - Should the generated `cli` type support a + destination pointer? + doctail (string) - Additional docs for the `cli` flag type comment + context_type (string) - The literal type used in the `*cli.Context` + reader func signature + context_default (string) - The literal value used as the default by the + `*cli.Context` reader funcs when no value is + present + parser (string) - Literal code used to parse the flag `f`, + expected to have a return signature of + (value, error) + parser_cast (string) - Literal code used to cast the `parsed` value + returned from the `parser` code +""" + +from __future__ import print_function, unicode_literals + +import argparse +import json +import os +import subprocess +import sys +import tempfile +import textwrap + + +class _FancyFormatter(argparse.ArgumentDefaultsHelpFormatter, + argparse.RawDescriptionHelpFormatter): + pass + + +def main(sysargs=sys.argv[:]): + parser = argparse.ArgumentParser( + description='Generate flag type code!', + formatter_class=_FancyFormatter) + parser.add_argument( + 'package', + type=str, default='cli', choices=_WRITEFUNCS.keys(), + help='Package for which flag types will be generated' + ) + parser.add_argument( + '-i', '--in-json', + type=argparse.FileType('r'), + default=sys.stdin, + help='Input JSON file which defines each type to be generated' + ) + parser.add_argument( + '-o', '--out-go', + type=argparse.FileType('w'), + default=sys.stdout, + help='Output file/stream to which generated source will be written' + ) + parser.epilog = __doc__ + + args = parser.parse_args(sysargs[1:]) + _generate_flag_types(_WRITEFUNCS[args.package], args.out_go, args.in_json) + return 0 + + +def _generate_flag_types(writefunc, output_go, input_json): + types = json.load(input_json) + + tmp = tempfile.NamedTemporaryFile(suffix='.go', delete=False) + writefunc(tmp, types) + tmp.close() + + new_content = subprocess.check_output( + ['goimports', tmp.name] + ).decode('utf-8') + + print(new_content, file=output_go, end='') + output_go.flush() + os.remove(tmp.name) + + +def _set_typedef_defaults(typedef): + typedef.setdefault('doctail', '') + typedef.setdefault('context_type', typedef['type']) + typedef.setdefault('dest', True) + typedef.setdefault('value', True) + typedef.setdefault('parser', 'f.Value, error(nil)') + typedef.setdefault('parser_cast', 'parsed') + + +def _write_cli_flag_types(outfile, types): + _fwrite(outfile, """\ + package cli + + // WARNING: This file is generated! + + """) + + for typedef in types: + _set_typedef_defaults(typedef) + + _fwrite(outfile, """\ + // {name}Flag is a flag with type {type}{doctail} + type {name}Flag struct {{ + Name string + Usage string + EnvVar string + Hidden bool + """.format(**typedef)) + + if typedef['value']: + _fwrite(outfile, """\ + Value {type} + """.format(**typedef)) + + if typedef['dest']: + _fwrite(outfile, """\ + Destination *{type} + """.format(**typedef)) + + _fwrite(outfile, "\n}\n\n") + + _fwrite(outfile, """\ + // String returns a readable representation of this value + // (for usage defaults) + func (f {name}Flag) String() string {{ + return FlagStringer(f) + }} + + // GetName returns the name of the flag + func (f {name}Flag) GetName() string {{ + return f.Name + }} + + // {name} looks up the value of a local {name}Flag, returns + // {context_default} if not found + func (c *Context) {name}(name string) {context_type} {{ + return lookup{name}(name, c.flagSet) + }} + + // Global{name} looks up the value of a global {name}Flag, returns + // {context_default} if not found + func (c *Context) Global{name}(name string) {context_type} {{ + if fs := lookupGlobalFlagSet(name, c); fs != nil {{ + return lookup{name}(name, fs) + }} + return {context_default} + }} + + func lookup{name}(name string, set *flag.FlagSet) {context_type} {{ + f := set.Lookup(name) + if f != nil {{ + parsed, err := {parser} + if err != nil {{ + return {context_default} + }} + return {parser_cast} + }} + return {context_default} + }} + """.format(**typedef)) + + +def _write_altsrc_flag_types(outfile, types): + _fwrite(outfile, """\ + package altsrc + + import ( + "gopkg.in/urfave/cli.v1" + ) + + // WARNING: This file is generated! + + """) + + for typedef in types: + _set_typedef_defaults(typedef) + + _fwrite(outfile, """\ + // {name}Flag is the flag type that wraps cli.{name}Flag to allow + // for other values to be specified + type {name}Flag struct {{ + cli.{name}Flag + set *flag.FlagSet + }} + + // New{name}Flag creates a new {name}Flag + func New{name}Flag(fl cli.{name}Flag) *{name}Flag {{ + return &{name}Flag{{{name}Flag: fl, set: nil}} + }} + + // Apply saves the flagSet for later usage calls, then calls the + // wrapped {name}Flag.Apply + func (f *{name}Flag) Apply(set *flag.FlagSet) {{ + f.set = set + f.{name}Flag.Apply(set) + }} + + // ApplyWithError saves the flagSet for later usage calls, then calls the + // wrapped {name}Flag.ApplyWithError + func (f *{name}Flag) ApplyWithError(set *flag.FlagSet) error {{ + f.set = set + return f.{name}Flag.ApplyWithError(set) + }} + """.format(**typedef)) + + +def _fwrite(outfile, text): + print(textwrap.dedent(text), end='', file=outfile) + + +_WRITEFUNCS = { + 'cli': _write_cli_flag_types, + 'altsrc': _write_altsrc_flag_types +} + +if __name__ == '__main__': + sys.exit(main()) diff --git a/vendor/github.com/urfave/cli/help.go b/vendor/github.com/urfave/cli/help.go new file mode 100644 index 000000000..57ec98d58 --- /dev/null +++ b/vendor/github.com/urfave/cli/help.go @@ -0,0 +1,338 @@ +package cli + +import ( + "fmt" + "io" + "os" + "strings" + "text/tabwriter" + "text/template" +) + +// AppHelpTemplate is the text template for the Default help topic. +// cli.go uses text/template to render templates. You can +// render custom help text by setting this variable. +var AppHelpTemplate = `NAME: + {{.Name}}{{if .Usage}} - {{.Usage}}{{end}} + +USAGE: + {{if .UsageText}}{{.UsageText}}{{else}}{{.HelpName}} {{if .VisibleFlags}}[global options]{{end}}{{if .Commands}} command [command options]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}[arguments...]{{end}}{{end}}{{if .Version}}{{if not .HideVersion}} + +VERSION: + {{.Version}}{{end}}{{end}}{{if .Description}} + +DESCRIPTION: + {{.Description}}{{end}}{{if len .Authors}} + +AUTHOR{{with $length := len .Authors}}{{if ne 1 $length}}S{{end}}{{end}}: + {{range $index, $author := .Authors}}{{if $index}} + {{end}}{{$author}}{{end}}{{end}}{{if .VisibleCommands}} + +COMMANDS:{{range .VisibleCategories}}{{if .Name}} + {{.Name}}:{{end}}{{range .VisibleCommands}} + {{join .Names ", "}}{{"\t"}}{{.Usage}}{{end}}{{end}}{{end}}{{if .VisibleFlags}} + +GLOBAL OPTIONS: + {{range $index, $option := .VisibleFlags}}{{if $index}} + {{end}}{{$option}}{{end}}{{end}}{{if .Copyright}} + +COPYRIGHT: + {{.Copyright}}{{end}} +` + +// CommandHelpTemplate is the text template for the command help topic. +// cli.go uses text/template to render templates. You can +// render custom help text by setting this variable. +var CommandHelpTemplate = `NAME: + {{.HelpName}} - {{.Usage}} + +USAGE: + {{if .UsageText}}{{.UsageText}}{{else}}{{.HelpName}}{{if .VisibleFlags}} [command options]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}[arguments...]{{end}}{{end}}{{if .Category}} + +CATEGORY: + {{.Category}}{{end}}{{if .Description}} + +DESCRIPTION: + {{.Description}}{{end}}{{if .VisibleFlags}} + +OPTIONS: + {{range .VisibleFlags}}{{.}} + {{end}}{{end}} +` + +// SubcommandHelpTemplate is the text template for the subcommand help topic. +// cli.go uses text/template to render templates. You can +// render custom help text by setting this variable. +var SubcommandHelpTemplate = `NAME: + {{.HelpName}} - {{if .Description}}{{.Description}}{{else}}{{.Usage}}{{end}} + +USAGE: + {{if .UsageText}}{{.UsageText}}{{else}}{{.HelpName}} command{{if .VisibleFlags}} [command options]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}[arguments...]{{end}}{{end}} + +COMMANDS:{{range .VisibleCategories}}{{if .Name}} + {{.Name}}:{{end}}{{range .VisibleCommands}} + {{join .Names ", "}}{{"\t"}}{{.Usage}}{{end}} +{{end}}{{if .VisibleFlags}} +OPTIONS: + {{range .VisibleFlags}}{{.}} + {{end}}{{end}} +` + +var helpCommand = Command{ + Name: "help", + Aliases: []string{"h"}, + Usage: "Shows a list of commands or help for one command", + ArgsUsage: "[command]", + Action: func(c *Context) error { + args := c.Args() + if args.Present() { + return ShowCommandHelp(c, args.First()) + } + + ShowAppHelp(c) + return nil + }, +} + +var helpSubcommand = Command{ + Name: "help", + Aliases: []string{"h"}, + Usage: "Shows a list of commands or help for one command", + ArgsUsage: "[command]", + Action: func(c *Context) error { + args := c.Args() + if args.Present() { + return ShowCommandHelp(c, args.First()) + } + + return ShowSubcommandHelp(c) + }, +} + +// Prints help for the App or Command +type helpPrinter func(w io.Writer, templ string, data interface{}) + +// Prints help for the App or Command with custom template function. +type helpPrinterCustom func(w io.Writer, templ string, data interface{}, customFunc map[string]interface{}) + +// HelpPrinter is a function that writes the help output. If not set a default +// is used. The function signature is: +// func(w io.Writer, templ string, data interface{}) +var HelpPrinter helpPrinter = printHelp + +// HelpPrinterCustom is same as HelpPrinter but +// takes a custom function for template function map. +var HelpPrinterCustom helpPrinterCustom = printHelpCustom + +// VersionPrinter prints the version for the App +var VersionPrinter = printVersion + +// ShowAppHelpAndExit - Prints the list of subcommands for the app and exits with exit code. +func ShowAppHelpAndExit(c *Context, exitCode int) { + ShowAppHelp(c) + os.Exit(exitCode) +} + +// ShowAppHelp is an action that displays the help. +func ShowAppHelp(c *Context) (err error) { + if c.App.CustomAppHelpTemplate == "" { + HelpPrinter(c.App.Writer, AppHelpTemplate, c.App) + return + } + customAppData := func() map[string]interface{} { + if c.App.ExtraInfo == nil { + return nil + } + return map[string]interface{}{ + "ExtraInfo": c.App.ExtraInfo, + } + } + HelpPrinterCustom(c.App.Writer, c.App.CustomAppHelpTemplate, c.App, customAppData()) + return nil +} + +// DefaultAppComplete prints the list of subcommands as the default app completion method +func DefaultAppComplete(c *Context) { + for _, command := range c.App.Commands { + if command.Hidden { + continue + } + for _, name := range command.Names() { + fmt.Fprintln(c.App.Writer, name) + } + } +} + +// ShowCommandHelpAndExit - exits with code after showing help +func ShowCommandHelpAndExit(c *Context, command string, code int) { + ShowCommandHelp(c, command) + os.Exit(code) +} + +// ShowCommandHelp prints help for the given command +func ShowCommandHelp(ctx *Context, command string) error { + // show the subcommand help for a command with subcommands + if command == "" { + HelpPrinter(ctx.App.Writer, SubcommandHelpTemplate, ctx.App) + return nil + } + + for _, c := range ctx.App.Commands { + if c.HasName(command) { + if c.CustomHelpTemplate != "" { + HelpPrinterCustom(ctx.App.Writer, c.CustomHelpTemplate, c, nil) + } else { + HelpPrinter(ctx.App.Writer, CommandHelpTemplate, c) + } + return nil + } + } + + if ctx.App.CommandNotFound == nil { + return NewExitError(fmt.Sprintf("No help topic for '%v'", command), 3) + } + + ctx.App.CommandNotFound(ctx, command) + return nil +} + +// ShowSubcommandHelp prints help for the given subcommand +func ShowSubcommandHelp(c *Context) error { + return ShowCommandHelp(c, c.Command.Name) +} + +// ShowVersion prints the version number of the App +func ShowVersion(c *Context) { + VersionPrinter(c) +} + +func printVersion(c *Context) { + fmt.Fprintf(c.App.Writer, "%v version %v\n", c.App.Name, c.App.Version) +} + +// ShowCompletions prints the lists of commands within a given context +func ShowCompletions(c *Context) { + a := c.App + if a != nil && a.BashComplete != nil { + a.BashComplete(c) + } +} + +// ShowCommandCompletions prints the custom completions for a given command +func ShowCommandCompletions(ctx *Context, command string) { + c := ctx.App.Command(command) + if c != nil && c.BashComplete != nil { + c.BashComplete(ctx) + } +} + +func printHelpCustom(out io.Writer, templ string, data interface{}, customFunc map[string]interface{}) { + funcMap := template.FuncMap{ + "join": strings.Join, + } + if customFunc != nil { + for key, value := range customFunc { + funcMap[key] = value + } + } + + w := tabwriter.NewWriter(out, 1, 8, 2, ' ', 0) + t := template.Must(template.New("help").Funcs(funcMap).Parse(templ)) + err := t.Execute(w, data) + if err != nil { + // If the writer is closed, t.Execute will fail, and there's nothing + // we can do to recover. + if os.Getenv("CLI_TEMPLATE_ERROR_DEBUG") != "" { + fmt.Fprintf(ErrWriter, "CLI TEMPLATE ERROR: %#v\n", err) + } + return + } + w.Flush() +} + +func printHelp(out io.Writer, templ string, data interface{}) { + printHelpCustom(out, templ, data, nil) +} + +func checkVersion(c *Context) bool { + found := false + if VersionFlag.GetName() != "" { + eachName(VersionFlag.GetName(), func(name string) { + if c.GlobalBool(name) || c.Bool(name) { + found = true + } + }) + } + return found +} + +func checkHelp(c *Context) bool { + found := false + if HelpFlag.GetName() != "" { + eachName(HelpFlag.GetName(), func(name string) { + if c.GlobalBool(name) || c.Bool(name) { + found = true + } + }) + } + return found +} + +func checkCommandHelp(c *Context, name string) bool { + if c.Bool("h") || c.Bool("help") { + ShowCommandHelp(c, name) + return true + } + + return false +} + +func checkSubcommandHelp(c *Context) bool { + if c.Bool("h") || c.Bool("help") { + ShowSubcommandHelp(c) + return true + } + + return false +} + +func checkShellCompleteFlag(a *App, arguments []string) (bool, []string) { + if !a.EnableBashCompletion { + return false, arguments + } + + pos := len(arguments) - 1 + lastArg := arguments[pos] + + if lastArg != "--"+BashCompletionFlag.GetName() { + return false, arguments + } + + return true, arguments[:pos] +} + +func checkCompletions(c *Context) bool { + if !c.shellComplete { + return false + } + + if args := c.Args(); args.Present() { + name := args.First() + if cmd := c.App.Command(name); cmd != nil { + // let the command handle the completion + return false + } + } + + ShowCompletions(c) + return true +} + +func checkCommandCompletions(c *Context, name string) bool { + if !c.shellComplete { + return false + } + + ShowCommandCompletions(c, name) + return true +} diff --git a/vendor/github.com/urfave/cli/help_test.go b/vendor/github.com/urfave/cli/help_test.go new file mode 100644 index 000000000..70b6300f7 --- /dev/null +++ b/vendor/github.com/urfave/cli/help_test.go @@ -0,0 +1,452 @@ +package cli + +import ( + "bytes" + "flag" + "fmt" + "runtime" + "strings" + "testing" +) + +func Test_ShowAppHelp_NoAuthor(t *testing.T) { + output := new(bytes.Buffer) + app := NewApp() + app.Writer = output + + c := NewContext(app, nil, nil) + + ShowAppHelp(c) + + if bytes.Index(output.Bytes(), []byte("AUTHOR(S):")) != -1 { + t.Errorf("expected\n%snot to include %s", output.String(), "AUTHOR(S):") + } +} + +func Test_ShowAppHelp_NoVersion(t *testing.T) { + output := new(bytes.Buffer) + app := NewApp() + app.Writer = output + + app.Version = "" + + c := NewContext(app, nil, nil) + + ShowAppHelp(c) + + if bytes.Index(output.Bytes(), []byte("VERSION:")) != -1 { + t.Errorf("expected\n%snot to include %s", output.String(), "VERSION:") + } +} + +func Test_ShowAppHelp_HideVersion(t *testing.T) { + output := new(bytes.Buffer) + app := NewApp() + app.Writer = output + + app.HideVersion = true + + c := NewContext(app, nil, nil) + + ShowAppHelp(c) + + if bytes.Index(output.Bytes(), []byte("VERSION:")) != -1 { + t.Errorf("expected\n%snot to include %s", output.String(), "VERSION:") + } +} + +func Test_Help_Custom_Flags(t *testing.T) { + oldFlag := HelpFlag + defer func() { + HelpFlag = oldFlag + }() + + HelpFlag = BoolFlag{ + Name: "help, x", + Usage: "show help", + } + + app := App{ + Flags: []Flag{ + BoolFlag{Name: "foo, h"}, + }, + Action: func(ctx *Context) error { + if ctx.Bool("h") != true { + t.Errorf("custom help flag not set") + } + return nil + }, + } + output := new(bytes.Buffer) + app.Writer = output + app.Run([]string{"test", "-h"}) + if output.Len() > 0 { + t.Errorf("unexpected output: %s", output.String()) + } +} + +func Test_Version_Custom_Flags(t *testing.T) { + oldFlag := VersionFlag + defer func() { + VersionFlag = oldFlag + }() + + VersionFlag = BoolFlag{ + Name: "version, V", + Usage: "show version", + } + + app := App{ + Flags: []Flag{ + BoolFlag{Name: "foo, v"}, + }, + Action: func(ctx *Context) error { + if ctx.Bool("v") != true { + t.Errorf("custom version flag not set") + } + return nil + }, + } + output := new(bytes.Buffer) + app.Writer = output + app.Run([]string{"test", "-v"}) + if output.Len() > 0 { + t.Errorf("unexpected output: %s", output.String()) + } +} + +func Test_helpCommand_Action_ErrorIfNoTopic(t *testing.T) { + app := NewApp() + + set := flag.NewFlagSet("test", 0) + set.Parse([]string{"foo"}) + + c := NewContext(app, set, nil) + + err := helpCommand.Action.(func(*Context) error)(c) + + if err == nil { + t.Fatalf("expected error from helpCommand.Action(), but got nil") + } + + exitErr, ok := err.(*ExitError) + if !ok { + t.Fatalf("expected ExitError from helpCommand.Action(), but instead got: %v", err.Error()) + } + + if !strings.HasPrefix(exitErr.Error(), "No help topic for") { + t.Fatalf("expected an unknown help topic error, but got: %v", exitErr.Error()) + } + + if exitErr.exitCode != 3 { + t.Fatalf("expected exit value = 3, got %d instead", exitErr.exitCode) + } +} + +func Test_helpCommand_InHelpOutput(t *testing.T) { + app := NewApp() + output := &bytes.Buffer{} + app.Writer = output + app.Run([]string{"test", "--help"}) + + s := output.String() + + if strings.Contains(s, "\nCOMMANDS:\nGLOBAL OPTIONS:\n") { + t.Fatalf("empty COMMANDS section detected: %q", s) + } + + if !strings.Contains(s, "help, h") { + t.Fatalf("missing \"help, h\": %q", s) + } +} + +func Test_helpSubcommand_Action_ErrorIfNoTopic(t *testing.T) { + app := NewApp() + + set := flag.NewFlagSet("test", 0) + set.Parse([]string{"foo"}) + + c := NewContext(app, set, nil) + + err := helpSubcommand.Action.(func(*Context) error)(c) + + if err == nil { + t.Fatalf("expected error from helpCommand.Action(), but got nil") + } + + exitErr, ok := err.(*ExitError) + if !ok { + t.Fatalf("expected ExitError from helpCommand.Action(), but instead got: %v", err.Error()) + } + + if !strings.HasPrefix(exitErr.Error(), "No help topic for") { + t.Fatalf("expected an unknown help topic error, but got: %v", exitErr.Error()) + } + + if exitErr.exitCode != 3 { + t.Fatalf("expected exit value = 3, got %d instead", exitErr.exitCode) + } +} + +func TestShowAppHelp_CommandAliases(t *testing.T) { + app := &App{ + Commands: []Command{ + { + Name: "frobbly", + Aliases: []string{"fr", "frob"}, + Action: func(ctx *Context) error { + return nil + }, + }, + }, + } + + output := &bytes.Buffer{} + app.Writer = output + app.Run([]string{"foo", "--help"}) + + if !strings.Contains(output.String(), "frobbly, fr, frob") { + t.Errorf("expected output to include all command aliases; got: %q", output.String()) + } +} + +func TestShowCommandHelp_CommandAliases(t *testing.T) { + app := &App{ + Commands: []Command{ + { + Name: "frobbly", + Aliases: []string{"fr", "frob", "bork"}, + Action: func(ctx *Context) error { + return nil + }, + }, + }, + } + + output := &bytes.Buffer{} + app.Writer = output + app.Run([]string{"foo", "help", "fr"}) + + if !strings.Contains(output.String(), "frobbly") { + t.Errorf("expected output to include command name; got: %q", output.String()) + } + + if strings.Contains(output.String(), "bork") { + t.Errorf("expected output to exclude command aliases; got: %q", output.String()) + } +} + +func TestShowSubcommandHelp_CommandAliases(t *testing.T) { + app := &App{ + Commands: []Command{ + { + Name: "frobbly", + Aliases: []string{"fr", "frob", "bork"}, + Action: func(ctx *Context) error { + return nil + }, + }, + }, + } + + output := &bytes.Buffer{} + app.Writer = output + app.Run([]string{"foo", "help"}) + + if !strings.Contains(output.String(), "frobbly, fr, frob, bork") { + t.Errorf("expected output to include all command aliases; got: %q", output.String()) + } +} + +func TestShowCommandHelp_Customtemplate(t *testing.T) { + app := &App{ + Commands: []Command{ + { + Name: "frobbly", + Action: func(ctx *Context) error { + return nil + }, + HelpName: "foo frobbly", + CustomHelpTemplate: `NAME: + {{.HelpName}} - {{.Usage}} + +USAGE: + {{.HelpName}} [FLAGS] TARGET [TARGET ...] + +FLAGS: + {{range .VisibleFlags}}{{.}} + {{end}} +EXAMPLES: + 1. Frobbly runs with this param locally. + $ {{.HelpName}} wobbly +`, + }, + }, + } + output := &bytes.Buffer{} + app.Writer = output + app.Run([]string{"foo", "help", "frobbly"}) + + if strings.Contains(output.String(), "2. Frobbly runs without this param locally.") { + t.Errorf("expected output to exclude \"2. Frobbly runs without this param locally.\"; got: %q", output.String()) + } + + if !strings.Contains(output.String(), "1. Frobbly runs with this param locally.") { + t.Errorf("expected output to include \"1. Frobbly runs with this param locally.\"; got: %q", output.String()) + } + + if !strings.Contains(output.String(), "$ foo frobbly wobbly") { + t.Errorf("expected output to include \"$ foo frobbly wobbly\"; got: %q", output.String()) + } +} + +func TestShowSubcommandHelp_CommandUsageText(t *testing.T) { + app := &App{ + Commands: []Command{ + { + Name: "frobbly", + UsageText: "this is usage text", + }, + }, + } + + output := &bytes.Buffer{} + app.Writer = output + + app.Run([]string{"foo", "frobbly", "--help"}) + + if !strings.Contains(output.String(), "this is usage text") { + t.Errorf("expected output to include usage text; got: %q", output.String()) + } +} + +func TestShowSubcommandHelp_SubcommandUsageText(t *testing.T) { + app := &App{ + Commands: []Command{ + { + Name: "frobbly", + Subcommands: []Command{ + { + Name: "bobbly", + UsageText: "this is usage text", + }, + }, + }, + }, + } + + output := &bytes.Buffer{} + app.Writer = output + app.Run([]string{"foo", "frobbly", "bobbly", "--help"}) + + if !strings.Contains(output.String(), "this is usage text") { + t.Errorf("expected output to include usage text; got: %q", output.String()) + } +} + +func TestShowAppHelp_HiddenCommand(t *testing.T) { + app := &App{ + Commands: []Command{ + { + Name: "frobbly", + Action: func(ctx *Context) error { + return nil + }, + }, + { + Name: "secretfrob", + Hidden: true, + Action: func(ctx *Context) error { + return nil + }, + }, + }, + } + + output := &bytes.Buffer{} + app.Writer = output + app.Run([]string{"app", "--help"}) + + if strings.Contains(output.String(), "secretfrob") { + t.Errorf("expected output to exclude \"secretfrob\"; got: %q", output.String()) + } + + if !strings.Contains(output.String(), "frobbly") { + t.Errorf("expected output to include \"frobbly\"; got: %q", output.String()) + } +} + +func TestShowAppHelp_CustomAppTemplate(t *testing.T) { + app := &App{ + Commands: []Command{ + { + Name: "frobbly", + Action: func(ctx *Context) error { + return nil + }, + }, + { + Name: "secretfrob", + Hidden: true, + Action: func(ctx *Context) error { + return nil + }, + }, + }, + ExtraInfo: func() map[string]string { + platform := fmt.Sprintf("OS: %s | Arch: %s", runtime.GOOS, runtime.GOARCH) + goruntime := fmt.Sprintf("Version: %s | CPUs: %d", runtime.Version(), runtime.NumCPU()) + return map[string]string{ + "PLATFORM": platform, + "RUNTIME": goruntime, + } + }, + CustomAppHelpTemplate: `NAME: + {{.Name}} - {{.Usage}} + +USAGE: + {{.Name}} {{if .VisibleFlags}}[FLAGS] {{end}}COMMAND{{if .VisibleFlags}} [COMMAND FLAGS | -h]{{end}} [ARGUMENTS...] + +COMMANDS: + {{range .VisibleCommands}}{{join .Names ", "}}{{ "\t" }}{{.Usage}} + {{end}}{{if .VisibleFlags}} +GLOBAL FLAGS: + {{range .VisibleFlags}}{{.}} + {{end}}{{end}} +VERSION: + 2.0.0 +{{"\n"}}{{range $key, $value := ExtraInfo}} +{{$key}}: + {{$value}} +{{end}}`, + } + + output := &bytes.Buffer{} + app.Writer = output + app.Run([]string{"app", "--help"}) + + if strings.Contains(output.String(), "secretfrob") { + t.Errorf("expected output to exclude \"secretfrob\"; got: %q", output.String()) + } + + if !strings.Contains(output.String(), "frobbly") { + t.Errorf("expected output to include \"frobbly\"; got: %q", output.String()) + } + + if !strings.Contains(output.String(), "PLATFORM:") || + !strings.Contains(output.String(), "OS:") || + !strings.Contains(output.String(), "Arch:") { + t.Errorf("expected output to include \"PLATFORM:, OS: and Arch:\"; got: %q", output.String()) + } + + if !strings.Contains(output.String(), "RUNTIME:") || + !strings.Contains(output.String(), "Version:") || + !strings.Contains(output.String(), "CPUs:") { + t.Errorf("expected output to include \"RUNTIME:, Version: and CPUs:\"; got: %q", output.String()) + } + + if !strings.Contains(output.String(), "VERSION:") || + !strings.Contains(output.String(), "2.0.0") { + t.Errorf("expected output to include \"VERSION:, 2.0.0\"; got: %q", output.String()) + } +} diff --git a/vendor/github.com/urfave/cli/helpers_test.go b/vendor/github.com/urfave/cli/helpers_test.go new file mode 100644 index 000000000..109ea7ad9 --- /dev/null +++ b/vendor/github.com/urfave/cli/helpers_test.go @@ -0,0 +1,28 @@ +package cli + +import ( + "os" + "reflect" + "runtime" + "strings" + "testing" +) + +var ( + wd, _ = os.Getwd() +) + +func expect(t *testing.T, a interface{}, b interface{}) { + _, fn, line, _ := runtime.Caller(1) + fn = strings.Replace(fn, wd+"/", "", -1) + + if !reflect.DeepEqual(a, b) { + t.Errorf("(%s:%d) Expected %v (type %v) - Got %v (type %v)", fn, line, b, reflect.TypeOf(b), a, reflect.TypeOf(a)) + } +} + +func refute(t *testing.T, a interface{}, b interface{}) { + if reflect.DeepEqual(a, b) { + t.Errorf("Did not expect %v (type %v) - Got %v (type %v)", b, reflect.TypeOf(b), a, reflect.TypeOf(a)) + } +} diff --git a/vendor/github.com/urfave/cli/helpers_unix_test.go b/vendor/github.com/urfave/cli/helpers_unix_test.go new file mode 100644 index 000000000..ae27fc5c8 --- /dev/null +++ b/vendor/github.com/urfave/cli/helpers_unix_test.go @@ -0,0 +1,9 @@ +// +build darwin dragonfly freebsd linux netbsd openbsd solaris + +package cli + +import "os" + +func clearenv() { + os.Clearenv() +} diff --git a/vendor/github.com/urfave/cli/helpers_windows_test.go b/vendor/github.com/urfave/cli/helpers_windows_test.go new file mode 100644 index 000000000..4eb84f9b9 --- /dev/null +++ b/vendor/github.com/urfave/cli/helpers_windows_test.go @@ -0,0 +1,20 @@ +package cli + +import ( + "os" + "syscall" +) + +// os.Clearenv() doesn't actually unset variables on Windows +// See: https://github.com/golang/go/issues/17902 +func clearenv() { + for _, s := range os.Environ() { + for j := 1; j < len(s); j++ { + if s[j] == '=' { + keyp, _ := syscall.UTF16PtrFromString(s[0:j]) + syscall.SetEnvironmentVariable(keyp, nil) + break + } + } + } +} diff --git a/vendor/github.com/urfave/cli/runtests b/vendor/github.com/urfave/cli/runtests new file mode 100755 index 000000000..ee22bdeed --- /dev/null +++ b/vendor/github.com/urfave/cli/runtests @@ -0,0 +1,122 @@ +#!/usr/bin/env python +from __future__ import print_function + +import argparse +import os +import sys +import tempfile + +from subprocess import check_call, check_output + + +PACKAGE_NAME = os.environ.get( + 'CLI_PACKAGE_NAME', 'github.com/urfave/cli' +) + + +def main(sysargs=sys.argv[:]): + targets = { + 'vet': _vet, + 'test': _test, + 'gfmrun': _gfmrun, + 'toc': _toc, + 'gen': _gen, + } + + parser = argparse.ArgumentParser() + parser.add_argument( + 'target', nargs='?', choices=tuple(targets.keys()), default='test' + ) + args = parser.parse_args(sysargs[1:]) + + targets[args.target]() + return 0 + + +def _test(): + if check_output('go version'.split()).split()[2] < 'go1.2': + _run('go test -v .') + return + + coverprofiles = [] + for subpackage in ['', 'altsrc']: + coverprofile = 'cli.coverprofile' + if subpackage != '': + coverprofile = '{}.coverprofile'.format(subpackage) + + coverprofiles.append(coverprofile) + + _run('go test -v'.split() + [ + '-coverprofile={}'.format(coverprofile), + ('{}/{}'.format(PACKAGE_NAME, subpackage)).rstrip('/') + ]) + + combined_name = _combine_coverprofiles(coverprofiles) + _run('go tool cover -func={}'.format(combined_name)) + os.remove(combined_name) + + +def _gfmrun(): + go_version = check_output('go version'.split()).split()[2] + if go_version < 'go1.3': + print('runtests: skip on {}'.format(go_version), file=sys.stderr) + return + _run(['gfmrun', '-c', str(_gfmrun_count()), '-s', 'README.md']) + + +def _vet(): + _run('go vet ./...') + + +def _toc(): + _run('node_modules/.bin/markdown-toc -i README.md') + _run('git diff --exit-code') + + +def _gen(): + go_version = check_output('go version'.split()).split()[2] + if go_version < 'go1.5': + print('runtests: skip on {}'.format(go_version), file=sys.stderr) + return + + _run('go generate ./...') + _run('git diff --exit-code') + + +def _run(command): + if hasattr(command, 'split'): + command = command.split() + print('runtests: {}'.format(' '.join(command)), file=sys.stderr) + check_call(command) + + +def _gfmrun_count(): + with open('README.md') as infile: + lines = infile.read().splitlines() + return len(filter(_is_go_runnable, lines)) + + +def _is_go_runnable(line): + return line.startswith('package main') + + +def _combine_coverprofiles(coverprofiles): + combined = tempfile.NamedTemporaryFile( + suffix='.coverprofile', delete=False + ) + combined.write('mode: set\n') + + for coverprofile in coverprofiles: + with open(coverprofile, 'r') as infile: + for line in infile.readlines(): + if not line.startswith('mode: '): + combined.write(line) + + combined.flush() + name = combined.name + combined.close() + return name + + +if __name__ == '__main__': + sys.exit(main())

    Pmq z%j(Oz-ppZn7SLprz+e65ljcI7F54CG9PtFMt5N{3H0$QaKl)(uyU)LTDlIt7 zn>MZ=f+xVd5csOX>1Iaiqfb8{U<@ZC8`U#ZLKJ|dO!n+-Mr5?N&T+87o&cw#2@9ao zjP9YNQO|7139WJ zdOd$%MUgMhGJhIKuQFzR0h9UG-jCj#6Nbe3&Gl@~o{Tw~nLAI(x#06=Ek}gGWnBz{ zGo|qE_FCNz+Q)7-FPAsiqlR!VNRWm=B{)XFc&;D_u)}ClfWQrzN#{%OC;|$C@*Zj= zMTf5#B8jG9IM6JfX5gPYzrG*q>3e5<_nxsM8I$N{tiZ_>(B&>q*|Yn#;f%iM*-xHE zqyb*Y2v@!Bb19GL-ttEf>)Zx^W!N?bz!|Zz@0Fc_4$V?e3E!?7SVzQxy|wrx!@v3H z*HtH;?cJ?8I?4LdKIUlc$Wb%WgFKpP?L&-Ra=E3z>QO(ea4L&cP3}fUl_db%a4B-&o}-jU~5kmiFZl{ zx@?S-Z>Ig3Q>O#HuMANBtFOM8y!(UqMv$a>^{el4L^_vy=>huCxRYBYu7I7Z0-EEV zL9+a;Rq-m(no}a74S2AoHhHt* z)?0Np9M1`K53z}~J>hx`Du?2+R#41MylwqyWDO(o^+ zE*3mnqDEDfnWrkL@V>hb%>SJ)`DRv`*Di@K0VkYlP9d7PNMy4o+#WlT!x+FB zN%z!JrfX&Sa+lo9N1H0F9%P%IxpQJum05f0Y`By@69{LB)6c4_HllTMc|P9x^2@{N z(<&L$uXHF~z)>Q*j5+z_s4(af8!md>Su)}=c}~Yj_Hgc0evp6sMdqivv&yp^zLpwY zUrf%N_^vj;Z{y)ADch@R6_pr92MnjL*Ko@GY*p)d93Xe<)X8i^x7HV&f0Ldy+DDQ? zyXwsnrR)j)(Z8MfbapJg+oJ~GVKvI{^rQ?R+Y=~n<3CxWBSk+64x_{Qe1ODu9~1eaGONTcVAfQ9?%uzP_C zK2u`SYXq^n#zyu(p1ap&U*_3!kREp_A3$qVs7BY(DbRV~wXv^sq-QvH0W|%2sf4ct z5k5xu$S|C7zw@jCwB&^WUpYxCiyl^`KsJ00P?4k9Glz!meWTJL`q`ti<-B_~J6kZ#4o@3V(@ianVg?H%M2)C002M$NklOr5Q8p~CsP}(Qq048Pds7|Y@&S(yX~O8BRT_i1lu0&Oo7Y* zOMcDX1Fxrp1i10<_qs(JG@YR3sez8!xbS(c9&!(*YO!X0OGcJ{QE$w_|U{vMh;~d=-JdGm}&> zHev3`xhfQ!;h7Ck367@#Qh;)W6V9Q32hZS!)g>X;bz!G*Ba(bZ(JhMaa#*Su+!Zf& z;Zw;KvyXwHWRNV-5i9eJ$TNG))?TiF0WXfxT)mE*Z0}-ZeqwH9zEpOqGjetk1e6Pq z6>zLL$Nbm_0owd<`qUjUi{m>RKY8-V2#)D|e12z zBru-D(|%{{5?MvFh3#$F1U%FJM@K|*aNxCrB@0f(JI_bq)2-`Q2Opu=xHLT;*D<|% zMv>0?Vb4@+SA#cvZk3ci!<=&d4SqM5+3Z;H%!4}HjETEa)h*wG?orKnyC8l|pvQX^ zsvQ6Jv*O;8?CpI^=X&hO*OL=fI};p zbRn-jVR~lmO?5h*!_eZ>_c_(#YHD4E9=^M z?X7o5ymGa!EM3-oilJZfEp=<^q%>+{Me@F7=kDQW&DW)3)CwO^hcaJ|{93Ps$V)!u z`-rs%4f?%eLVm}>);KM0s^Gw9_H!|j{Y1&uF_jyK#{R1W78gl|0k-Ht_R2^tV3f`x zV6iGyBa}u#V!W`i@jL*)F;-i&wVn$fo8S`2D)* z-AYw&{6fUCZ* z`W^e~3;?iZjgJdxbT*JkO{8?DLO?j?|U*v;qO=v0+0A%05>I zljQ7Rs1^%Tpks}&F~a1nAAC6Z^7G#Y;J?{E>kchBC0RX_T-;x*Ft4iOK3^QehQP5_ zmGDA4iDGs^K`Fq^h|d+Tgol*i0osZ==&oxwdS>e;OHcA+=t$>kq>yT81)>V-B>n*3 zx3jJE5TE;#c+MDFcH4V=E+3dLO*hdIz<3360A75>e|u$LI9Bq4r^%wYPet`jv)a>7 zS1+E5HjS=JM=V@&ug;4A`kgxmZN!UyhxX%A%ybw1fxoS5#2h^vFFxtdko9$*)7<~h-HSrCr`a!N1JMW z05&~zF526lGrbzv3q*Y-{$gLrnr9^Wlbcs776`{+AN%2;sbF1V#2(zQqKi`z2iaX* z*ZG5Rh(kd(V~OA?{6I^iOx zj~^`=-FV_UHL`$1pYfT=fOzAZufHh1_`Q+Dz8b!IscyL22TD*xdpyVoh0lqNC35&6 z>%x5aOs6aEyi>vG`mho1%(X^;@vGoPzPGOy8#U4&=`V=}&xI3?crH3meZr~UQUq}E z^0{K=R|dH598C-m_worP%e{}kq-$KvbR&#Xhk|iu@R{NBb@n>8C?Qkq692%gZ#AWW|Dh9l-zz(!dHj}c zVKe#4{115al43mx=>Pj)|GT=1uT>Owpf%>d_WQ-Fn@>9`0LF$G-xxUsx4lZ2H31=j zMnRq`!#+3Xd&A_!sgqS0oR64GCkGDX^f`pnClk%%7tfaWoO^-loc9=V?APqTRg?XJptlkz zjKrA%T&<1?IUxg}iSFhE0d2N!Zq|9hJ)k@(i{4dXWk~IpP;tmq?!G9)V@%3}2UVjA z7*cc`Gk*sN0X`X3OfLa3W3OA9ty^p8)q+R@2h1SGn7ShWzJUC-S73SyOU5jEf*>gG z`+=Fxnlr%J0eX+>kPL+2m`ByGhm)d3-=7!pmjsV$ZRh9=Cie=u+LyqRVjcX0u6-^b z#Q6)301kMnfBO=k=7eUw`}I(^KoQ@(9Dm`n7z4(^Y~Zb1y3n)#Uk(VeFcsIWXecj+ z)eO&T*PD}gw`z#a3k^N+c>tB?Qpi4>ytz~|m#NGD&P@7FJVtb;3s20Av~N|5f-vXe5>ETRQewwGFL#+JJz&Nx zd04Z0#Yk=}S~N3&g6~e8Y>%9`s__dV&2@8$o{>C^&ffhc0eU{C;o{tEynM0sW%vtv zj6vA3b?xNubxG%IufH0dl0WC!=%E!=1apwkPu5)GYV)=#a#O(lD4u>g`TVQH&C1S! zjE^}KufBSqYU+#8XVuudOK#RwF@5-(Ba@@8bMKyAlQ)t-a{T$>!|}lMvf79eZSYz8 zB4xiW=jYDuxE7sXg|7>uHk@fPOog+x-r=c1f49MoIQ1VGMjFH z^NlxSh4>c`le6W*qM3EmV1OasUbWe?&WS9L`}?igMYl@}92xp^RmcA9XMd1`0_>eu zm2zsx6uPLcz!Q>uoS)NY&y-NCYJbPBc%yMo0myp`%yrbv1X_IY`Nw6VOTrFEE4lBP zveAt6sb=BQ-7Aw>DxR)pu>a$yzfP8~O@8v@ALZQL7?)&@c5Ax7e-25ya@F{-R7h3m!1bvfQSF98 zKsR$vpC&X<(`Ax}l8Hv~41L)1Ueq4%!ra$heRWi&{O-%I25-}4k`{Xoyw*N03>hJ( zLl31pIB6BQ4ZR4ITeCEvsXh6l(uJd?(*rn)kB!1GdVr&RtFsgU#x^BxHXS{$1-pKa3eSCBL zuhN9`Nbjn~9J8$3*R#eJ(3y0Euc>oPr$z8y&Bg)Y{M??$zrB`&h3^LcMFUCcwE^-R zAD2uUCFG)Hy6S0soCplt{kUW{8D0^e_U6{wnz94VdBm?liBq|1AFVgRo(9qASs-Iy z?$R&Y>X5g{URxM(0DkHDd zbs=HyV!$002yoP9=gtp+cy9?lmjw%~7o`7HF`4D)E)+ByfJypJ#VQcue(M?mbUF(U zpz4zJ=vYhix5S&O4!Xmh@dg^sQ9X}y;10$eTXoSdX{;_IQWO4nQQA0UI>yORB1oL{)1 zMEUJ|flu=V0D{42!2YV1HO}ruJb}M_9A9mmX@Luy|LXn&qdH7tnXfc1RqlOPO4Pb% zfz6@!N0C7wMK2p2Hj+ip;t|iXkEu>?FFptVwikZr2$bSg!Q%csdpn2e-Zy631p7Aw zZ3N$Gk@5O#`50t+R7Le%x)D9xzaW?spa5I1Rwblcz}JJGFX#cpSqpos4c>S%wAhu; zErCQ1(0>34=~-XoL??uRi;u+5a4FYFI+O2wu`#bRS)k*_-uk&V*&&TXkCjZpMLzS&atA6 z1Y4&Nz+TWiT8~?+*XYFJ8xO#)=gp)$6c?<^H!&jPdA=syw_|(pOkl#TbkUXh!1#xB z!!$T2SE^E{Hl?t+XI{E=egM+$G@>i5P4%d1VE(ywV7&S;n;_UDKPpN2zYjaJr>)P= zZ{D~%KoA8ar?Ybs{SrJnfAF#DMHd(n;q}=L$!2ZT&X4X9fJd@ED?q<>Jx#s(Vd4-<9c0^k?(vPxcm1Sp(ivc@V`q=NL zHiGVNf9)-)?2MyvvN~poC%oQg-$QPD#f8~(#WZ!=#t+w%c{5KXPIm2ieelKF4b6#; z#_rr%+?oG6-Z}XGPbRMgN-TL%vNhRzsn5yHC^G2GMv}Mp-7KzKl|KU5)ph#3O38zP zbvL_X>vUb4^Yd3G?>(xM>Gsvhk?$&minqy;m_b3sty@M!O_NplQm5wZ+_~WkZr-%D z{k8AtymgZq<|~JfB%T323|Z}WgP+r;<)v=kSOByqeZ zmNqJb{l6OsN)HWMdw=KR-1#i@Eqwv}2gZp1@eV#WN``M_EqDN*46tjQM>3t>5QpO% zawl#xj*tF-yf`D>nK^Iz!tcce&UM(!?2^yt!;u%U1n|wJ@M!M6k~a7sFal$63HkBE zUqpk&lLH6eo?H(slQ5^7@GL<4LG&6Iu5K;%51d@PKA$?dxmm%^oeI5RA1+ M-& zgQ5$GpqB*;if!hhkcJ7;bBV{Ol5zZX_1>R|AZ;ub)~5og*r@qy7st9KyMF!K|2UGo^d6sw4OS2({szpx{e$;AmnRhk{C2V> zpZ|j&{Xy%P`k??og@JVBnnr1jk$w4-x~SYqVV1Xek{(oKvti@*0QVo}j}+&w4cvv1MTXjwVv)L?{wXW(}8#Zhi=Rp^ZIrSBJNN|4r`9~ECoyy08Z}89K$#}`L z{2%CshEJJzTwqg?%-(rQkt%Z(p@$SuvMI?bR zARPkl*qI*Yy8a==ok( zfUf<{dn3*8Pj~LvU6MGPn>@zOgLZr`w4yrzv%1}kE*kMneyBT>Mgp<@Ncc~EUew{U zBs0BNGR_x2LU09JQS3A>H0^a{OebiB>$xx?U8&#Cp{L)`p;3p6!UC7Q2uF&~$LO)n z|9X7vGyTyqw`p^uiL$v0%WhYUwywm?qdF9xG`s)Kon$;+NABoRU*4xGoMcTuCk%+c zH~u^A2BUZU;CW*djm2^SH9cLi< z`(3_xt|H=7qub(*x8EK97~RT$I&=2)WbfVsBPLP=r7&4Cgb%o2G2WeGHVP~*p7?fh zCHcZjCy#yG2$~5c3$x`T=Pb;10UyF`egsU-Racios9Px z5=V5DZZZWKM^2rYT9H6o22(+_mKrIX^D}jHM((??0 z&3ydPZ%38ctFOHlK(R9+?ND=qWEQoOV1xtxPBp>OvOk+PR|Qxm=UnqwU4Sw&u`N`k z@vRP4aB$?z<;lPLi{G^Q+?|X9rMf4;>eQ)Iy??_1OOG8tGnAbmVNt;2_HA1tG$)PH z40Nfo){J5^!etgz6sXd9_xJvwD)2YUBsO~vGjwPUJ-UuygC zwF>YAPqMn&A1LyLwd6P>Nd@aBtZk&dnmd?aL?aH}m|&MUvwQ-e}-khhDrMYCDqvepoeiFcC9EDLL5985AU2f`vyF0aWYs=&ekpb!I zsnbm!No3rfVJf~JylV`PFUD+CjP@BL!0fDL(Si7W&LQOVO@o8!UO8B^7f%nm04(w8 z(s+h~yKrT0I0$R|-Z-77DT0xKZU4@BC?X2G&jG#}ne^$`UkyX|a{R^Fxr>vfm*-iz z5zv0|)vpS)0Nk||2SU1lXlboQJtiK$napjOy!ytwlfV3re>rIJd>KKhf^x1opz)^p zZ9b2pyceG@elQJ+jJ{E6I(PAM!NJ)|NM?VW7KU5fGuj#1Pup<8AunA7-n|W1+(L%tb(iac_Z2t0# zuf{V0Ab!$K+}) z>(_4-D5OBma~^F)kDE0ea3h#qQDrX@7TVz&z@GvtznC+rBO)L(ok;JHJ;_Ab?bkkdXCy1U*4+xf{N=B6 zs@F|+ZONJIXJ<<)Fru4FnqI5}LDeFMVr9;vkrDVC|LH=aRnKrWAAEK7dcZ+Oad~T86L|2`5C856%4fT4gS^{6Paf~;pb>;U?z06} zc|85mxzpJ;kL+621N1Bzql;8zNg_XaoXv?>0cAQ$7WLj2&zDSo@$u)Rf9U*?hB3nLl^$7f2PvjPf?g{i|=jNzY%N{DXh=j|$qCO@8rKFH?Z&LOpL&!!JK^{O|96w`~0abH89#q;B}QIVVvrz(Cy)lOq1mgzdd z5*;AXFscpOpV1}P;v+{r5)e6qG&WFxt=JMM=SF*u^U9o6l_$Ui+8c;EfL(O*u6SgC zBkWT99rFjREn2ckJvN^p`a18jrRUFIpo)5~mcIh2As@Zg=lA=CEy{Qe9WcYu>+@bL z?hSnb78aK6U^mJ8XJ39X+4uMU-r(o!*R$K5@$(U!8M{^IOmgH5BK(i2^JIso$CKN3wn%QKxjI;gT6;6mpyD$?U&&wpvh0OvXhBk*`X z`2i}-&817CA-y#KrT$I^RF>Pz^V%b)2^#I91%6o#mj!R_yj}2n^u*Bts*)!*&G8t^Ay58vVMPP2U{#hocSk?}VUy_#NLo&VXL zJymuCqsEAY&S$K#=iJWc9vyL=3tx!e>$%R55ACyn^=9qkZ-9i?O~*93uO(WxF23}| zi|o|3@qU$%F0x#opQTk_hl-iYWS|cpdYJEOt@x6F@O$ni_iI*WMY5e3dU2m^e;Apr zcP=hiK7aaTkFnn#)hl(0@cms92pD${N1N#LxEwjDbMogUro67=gO%~~@) zT;1NycHOaa*PzAS`?JFy=!)SRC`7pwn8`2IS>gVG;TOg${4saUxTMNwZ44Yfd-hc9 zGW8F4Y~Ni{d~WQ4Pr!$cc-hp74mwlzZ?qQoIRHVn48SW;ZTX`**;(_|B&;_#dc#ah zz}l6%Q*SkAeOBVNYO5i^jWT z#Ha(6%J<`MdgjyLeLMnjzB~YyPcX*7RVk=~RF{}%CB}n42eAMB-^qtRc+Io%tM9eZ z4;pk{!LRq z*&N@}SEH)EXOSU*uH@Ct5;LkQ7otV`9dnSADSn|Mq@@)r+`X$Z@zL<@#1d=b;gg3y znSAk!e?K`lJ3IJWGEkfUquSbs{6_|KH3soUBV*V|Jox?(wvRyGOjqM2@9JV`KW%fY zNz5f-1vtKNac;zc05NjT&xvd!)=Z!WAJXwJ;z>Nn=8$nbiN5>^VT`y3P-0J>&qf2o zb;}rOK$ijBodv$Y-`B4b-5A)XYv!rv#eXjofeRPi8oc+U1pcj?SDN#@FIwH3{OT|N zb=|DHCvR8H|DbCBQC!u&6fP`ST^IbR^8x9Xru2voPadxy-Rh2JdGX;&`ZL1Wd?34EF&Mz-um77v|ytPM2+LDh$g{PaXEjZ>h1K`h^LdK zp@Y+Px*v>3(E+NH_2TS_@jiu({5}aXo%#bj>kK`Y{B5ilL^p&B4?q6s7lDpjs}}!R zGP1g^_>=v=yUYTm=+f=!cfRGYrLA=&MY=NsXRIqO1nk2pcI-MZ=DOoqAU1l^=aPm- zIj})|1be-m+~C=n4OPYV8d-LLBmOjp{KEObuI!R{8}9*lB@T23CqwT_6bO)K$ zFDC5h$K}D2cw~W$8GESkM*`qcmz7HzI8URe+)L-OWtP@!#dZ%nQ`_B?|8?Q~>G3%_ zyGJ5FS>&Gq_32YZ4lp))5HA4a_w9RabY|gMw8Dpyt_nPjm2#&ZUwSB^d9{_mD%r0R19PTbpk0+o2Ee3wr%YliJ;0P-7(AxO0dE~VP|Vz4^N9~0e0>PJOUgJF zCr@^XT408%(RiSsEk~-w4#2wa*OvvEy;Mtl%(`vk`bIo#4ghasZG8Lg9qGTR0N-}` zTF$#nlPuNdULy-)HkiQha)aU-scqYKG&k^k*4e}UYSOE>QYj^CWT`@{PqW2o(!Vx&+o)gX4)+W_p$#97;3 z+?IOROxhSlQUN0n+G{!2V_sjhVnD}*XCUON)m5>~HrH3nn5rot@1}x`70U`ZdyW~y zM~+q%TL2^j#Mr4OvUWis=aMsr9@AOcZ8L*nL<4YPRdi9|z%T*AwGVAbiCpc%p&K`< zoai;2BbBvkv4(S2utEX52yo}Njbq^bOqYRNJ3E)~-fO+HwV2QGyM{A{Ue-XPd;gG5 z651M79cUE;EU4@s)k3Yq&nduFfsoc4HHLBpf}oL^$@pNtGoVbJTkjeMQZ)l`hqCAJ zk5MgsysSOCJ5LIi!UpJeTSuu)*X@Bl+G_S!y25S-lj8el~K_xln2_xaw|f5-lV5jw}I0T}D){i-sMY^*Ih zQmnI8B+0O)2AW44K-RK>A~RhQ2*4Yaah)9{&G;?Jz-o7K(XDpfr_C6pWX_(w(9buI z5hG^;GC3yNiH+2_HMuhl<^omv_%fplChr_NF!>+;$Nz5f^FRBWlmF#^{-4UgmOKhL zG)I%n4`oSo+Jov*djw`Z&48UOBZn7(;i|5$pSqhr^{kj~zDIEhdqVbut7yG<| z)^vr7skSuVcdpjI-HrV@a_s0h-ySjf&8MG@_S=I84-Q2x`Eadq8BPi~OZjuityzZt zTEWWz0He{gnx;!q;zPgZ)LFxb9b)Du(c)em85F+20tl>?^}_6IPw#TKjmg#8R||MI zPp$`MQQ*J#;g6Er-wsC#7=$i(=w^;PBP!|P^R;Kv+nkjsAF!>0UaMaKL~_ePaB1%O zD(F=k<(9X0#^B*(wuCNGj-5N}SUO&S_`z@r2AtPA*5&va2V!K*vnoDs+`2q;z^avN zb5OP0mu-)}PftCZ{OG+8l0z5jHVz=;Yes$?OTfNgH^67=d_PP6=WkuhK3yw8a&z+D z2OkW&j{v=Mz1{h6Xmq8Zv$?}BT8AnDa}gQ&m8%3$5^4d!$%FT5!_%cSjB>xD56LxW zDi%s^vbD)KaKZmwq;jjk*?T#*E-qH-BRTwaj>hGS=UX$~1$ZoA>>-|avvD-5a`+{4 z=wCrHCzFm!H4K?`KHeu_SzR*9&y5%{+n(Ir3|PVI)}PsJzW}P4$y<%%S)J`&m0s1} zJXh7YkqI+1TgT^^!43pjwYs%-8I6k-@87S3EM6a#Q0Y;+;a*Ok3NK1#1kde}p0qBy z)7m22_@Y(;1eQC1{pc&{uCq(m7zY_&?%uU)tXlxSItPr5T)tKiB(O&&A7;z+aC}Su z8dW8Ug&a#3=jTK-RV**ML4Ay_X-)DaSY=VqSdyF#@H|`MVcfAPpc7Zt2{B~8hiRvr zyLVv(mzy?pp>aX@jT;3zme3ywfdOu$!_1+yWqJ~C^Anzy{ATw&--T_L9$f2wmS0W& z;otqcwcEckIeF%EN$HD2)-PSVHuQ;PH2@L#|D;RVwiJ|U@yGWIvQ;iDREbB&(!aj& zui$UN0v#)oU7bdR7Z3q`e2oh7{_j#Bd;|mmjPHEq^#LeusY3601r77_7xI7FLHkHM z4Sxjh0{Y1Wy3~p*p@a1g#;nbd_YX(2~8J_4f7cB?gdTm;LYCs21Yn^a8 z>YAB4*7|HC4yK7oZT$IT=?Q1GqQC2qQ28Tqz}LEv@8WRRqt1JdM;6H%s%5))la-T%3Ki)TF zC_CgC{AJZM=&vf9Pe@PDH$zUco6n-n<2oP!GsC~WFzdv|IS-$sHG7Yl{^amdc1A)O zaK0fw+NFm^a^ZU*B;Q0(g)ZcBV}bsLjg4Xqs8lt$vU6dl=}o}gv%rRw7v^H4!*?&bh{*yec4c>#;a zDLHc9DiQ&0f=r5lzPcOz@sYSmSDDIDMuKib#{@TCF7E0562a^|S;MR2{5n^o{Jhlq zpWJ;?$6ug9JSQ1~Csr(|L$5f34@-ygZ&ZLCI{4=33?lz{4Is(Run{Y&B8`M%fPtDxn6e*n_XQX*YU;tW-9v7rYWWdQt}0?_ePdmJD_=j7QEKtm_A zPyEl92fVUPgRb#0-;U0sW1jW8rM*V-{o(g~wy1k^JcoS+GVoXN(47Fb-2tY?U`D+0 zOclB4zp7w?J1#kaU@Cp=AZtZ$!EX&_2gQ<5nxIjFcMVBCXnti`AaqdFydAt z4(LT=({6NjkMim1CCMW68DQP_e;=Jx0}PzTe|!Nxkugu67Zc^X1q?39mbH)3Lf>bh zBd{5d(K|zj(--a1Jom@NCiob5;638Xmz9X*2QZ|&X(Wz&uCvE)0ANWVouLxDbh^6) z*d!w)6n@|d$p<#eNC>*o*GTHNetTDtCPpPM;@{x<{O8SM&GhX^iWGy3%>l7q<1Q`t z5co{pQ**|a0L>~k`}V7k@PheY~8W5vkJ>-&(GTX0V(grU2wO%%=r4XVsB@YpSL)0)n&B0z1ab_kKKNd z59S=mxtOUB9BUCP)9K5K_a(^S4K6ahUN?lwRB@6^V(kGaN6$H*1+8QI&i#}3Klo|! z;9NJ$PFTfc%cy7?jS zEq+!^!mn7E%t^#Qc%Z<h2p`RBiB&wiTjijF#t?j@To zDBfC^|C&tp%ki3cf7sSuO*|ymU3@$@d#3oZIoEw}B&Jb65(JXAbd?UNi`f}+U~JZk zbc|6*fW2kmLD%!+qxQJVhac?@Cwn?Se|ntzbVOmgJK3nRWXzX00!LSmF37ojlo5MI zlWE-7ep=sa`(Mk>*3p~JS*nvcS-N#=Ha^zPZM0MIlR}x45(>}{^8xM~D-_tdqf2Y& z&QTqsSki_&j?QXo%L*+A&&786Ae$z8_q{sS!shz$n_*LzT9+g(J{7 zOe#cBP*fbZnvYcSs?X>DDWqK!-T?csw}-|5Ml()rOzY~klBoCdN$GRXwC6~bOzIeF zF^HW+`zxF2+3bMWen~d-S);Hd5$}apd9Jgv7m4v5b$!g9KQsJOMHP5d@aVUIz0pPSiVH1dAyy|_di>7l z5?|P~fTLD3P0p1t5CtzU(Wk&<6yYS>?nqj!y%1w-!B%yd3x~RtHa^iS_Vq+ed2hx_NLIZSNqc5k2;uL z%m0eyYbV$5mH{m|+*0M@!2|nh@^yQcIbyb3 z85e|iV9Qn;UoyRTLtDYpS~{-=o+JCR0(OC@N)6RoTX(0VGbC|mLKRcDCQiu4Ejv2} z&ROevdVcc!`kl$QhYwG7ciXIB>0w|!2b`lIXkQc&*KQDCcbEwfuxDkAx(HGhlYH^b z$tv)E)6Y4^=KL&)uIB6p%BDbMF*gPj6+TXW_0h*Y`%e3RG&%g}yWyw+KZ$P(9E_-n z$bJYGH*I`_>7v1y*;HH?g}r0Eo*}qe0HIoFZAz7b*${BKwh;|Gwrom~T^=Cx%0*=} z+p`N-FXzl@X^5(EnmXgLAD%yCUayO&Msda!=w>X9WmWaYPvH zlTzluXgM?UbkmwL)zMRMEfeQ+fZ`M$r*N#J&!oWCt!-?~g<3MJ4nRxmQ*AYDt0ncTzyZ8Cjgtu8s3)H}?i0aRQC<0UG)|9TC8Ti;Zx3 z9`Yl2o#{OB9>u5C>wc9{jFw7L3)!=CSAYP6KzzUIwW==u?c}Wwe>(Z^|C|46a_0Dn z$!|aXG`Yx8?zx-HA50nF$cgf$N_AA7MO&ZwvRw`jm_2rr!pZpuqE2Ufsrp)4LTp=l zAf=kT+{>w3P2mR8{n1ZR-T!4oD0`B`6sNa3^NiEg~y4)@P ziLRf1b#y3Yj?;~*skQ}>>sB~*?%d=Le*X`fk>5o(Rf2zU_{d1!{OBh?ot!^+evD8c z<2UcjMJKInRRUCjcqaoC>vRTz=4-TXPb*o*rC6Pvz?PBaamaA4S`+ZJc-!RLBS*$r zZ4M0gEP9orZ`6a%8n>Ts3m_NB0I~MembZW3>wyXq4aX<{^3VT$I;N3U`}a-e;)Rl;lq3h`o<32d_Xy9j$ntQE!?pW>V&Shr-ibR}|7^A`|o^;75eV|I1lQ3uyZ|&Q= zF9&EQyLNH}mXeT9GvX3Kg1sT5?Q`wgRYOjU%KGNJBdxJ9MkO_uMho+~B?6ZV&U-f6 z%;%Jm5wtU#k9}PpeHSfC$3#Pk3pYO;znkFlxQsvnTr--7eK}@}O*a zS|dFlh}T&-2Wm%fYtI0M6=vc`r|ZeG>MMaPr-ZXU0@`MEt5_Dq%_Prw?cOc(`KC!P zx52vr^L{hg*KH3dDS4sdkPPAv0f!1HevLW5F27RQ33Pcn+Jpnxs+8emX~7JOCNDM=}Y#Ajwg(CuK9L{HXUehOG+{Y%5e}SLf9e) z>YSrL+s^K(&{6&7l0&q^n=k4j0w7A{_$++mR&>F*KTrCs5v26#fNSd;%j)q!B)oxs z^pTx-KfhxHHs z>GNmnZtFbQ0erP<*Ur%zFQ~#xKyTIRS(u?$=yiSpJv!`9zXOVo%Z~a?)#*OV8Mp%m zA6lzX2+mC4qbf;v#fGZRR^&TKB&rDAvtxIOtf~t8+$YiDUe%M{Gr&T0V;8^QqYojQ zBbZI*edf#8E$jJ9nkA?J;6*eal$?6UgBMy*bVtYj?RogLCW&|+2vJ8}dIZno>G$7x zw|h8R*GXfm>bi=)W1L{?0-C6pqeIa9k#$6Tb6FJ%c-`&B+^EoVCe}D+*;{+>p~U!{ zyaH84evh{H*$N9p4ve+%E$Ab%p;}Lit-wM@5kA(XrQ-*f12_f@;3XIR>T+B4A|Tm$ zIGZ-Dn$F%;{E#65`?`Q6qhf$U5}&*KepF0t+A$Jq5>)`75rjk+GV6{H`qtQhw9rVR zD|nb*Ht($3bNdshBLLR5uWKb@bP&C)o_>(cod$IHDtni`5x85Q%dw0*nVUO1093jg z@G2QExzEP{WNZ&KM8Gr0=SDJ3U^~Flo(BN?>a*Vtd#568WzPjv==9TBqr;P(Idl5t z2mpav{Ae);8+R((xu>yE>kAOwX+vl0_<9g+C5`9;{wSUqHmv_kyz++t(&YK-C;*xQ z4g(D68E5kwu9Wy)w$$Z(y(gW4cBBW;1#G!oGC_+uzmFae5A4`gHC4I^c)`}_Fnt~m zk6<|(pt%Y-Rhz2!B>(xb;t3U8WX*F`b^#XwP?DU^2ahaRK0Pb+#gnXpV3<9myRMYL zxKL8_L2-q6#Vf4+VRm!(uDvBy>b(2*aA!8#XtJN=PfjniweM-wZU8~?+R&l#wlx^r zg(rO=`bv`0v+SOq(IeLR=n>tU?DTcz%GJr*fhC)=JWt3Jc5o{lrYnso`Ej( zI}mN?i1vd&1p7;yuY&gIjdvd_0_i*fIevdXfZ93S?PqKheQmGg54gn^0%F;Y7bV{$ zm{t@++{=&HzGq+0o_4*^i&y5(OxDKFs?(G9&S?rZ4xQaIR~B#5ceevuANKy4V%D4S z*~8)z@r%!uc(R|<`DOe>{^eI+eL8?Ed*TBF8Gt9w{6#!0u22|4*2S48D{66{kZLzy z3LpS)k_7k+&FBuFCkd!)QG5g}<+E*y-sIICV9p7sE?Fz?eKF#T=}0OSjetM}Rb;@Z z3O~<1P}OJfCpWL8Gw;|Kze?3EVKu)hx2`sKVbh7{`f=JGeCtN11?I+ zl2I2;<5^wZbd}Q{MJk=CPXb=i`BOVvVMBnGG)`r&MY*f{;4h-uvUd7k7x;eyn8%luUv-_>N%5)Yc<{lN^O>VB5SZ=+h@m?_kiK?)&i7d~ zJbA3UU!2|bWn(Ogn0lOPx_@Q>+ABxwNbWtGjePv53jM%Z37uVgU&}{J7eAog#6^%v=n$CLNEh<0oAHL}K~ zkIt4&10v%A-5k1B-IXAI(}j%AV`Nx+1a7TZKh>eY%7yWyE>p5QbV1L91x*3`>*>A5 z$#`9Ka+cz)hj*HDoIYHXJdn3L*)%v#4nu#8QZXE zOV7!!CNuo(Rh=y#eQTF(vPoHb|^EIbc_4c_mdFTcC1nJ^lH{#%@?mAi> z$28|3h80aN%+7Xq+?8Pqp2agTDl(|>V^m1zqZzkyE1nX|OEM^Wf>-EHTR&4LMZVI> zuZwHgs{UNMSkYoO(KFmB#OE+$Jnw4YcZzoSMZ42&d}lzU8RsK8-d>C;8hh&J=g%A) zdc#zOgUxo|*8W%L3!v5I?1w}+8w2Q;$X=S>dKi7lv9FPc8FYLyl8fX~qTXlnmj*c6 zS%{;s{>w46x5w@GM&}s7 zAFY7$cN)zn`RF2B3FYyu*3ag%%?}?;{UUFg79REHWJRJ;5t#F&6OHNm>(2dFCSHJ9 zC<3~rXH`zI{h=hucVxwd6cZV2#dVRjhvv@`^r@}9L zeo`^(#q`bYoqNVyZ(Sq`3D#B+2OHz-<6pMcv*Cu1;9EX-*yrYKm-wP9xYK}U#d;eX zjbpNg&%n2e9PEe8i4B15?D)mAx~Ia{tUY}&lIEQsyI`;EGh2v8lE<)EcPWt9(OHw7 zny;{PVKE@NVoQ2Me`xh$p5#^MnxC^tenwZgmu=|gUdPGT0gi{3M@OCfWLlwywd)3V ze*6Vgb~cJq1PDf)bvOeDdFP!Uj>3Wm;b_N?pUh6faPw)Bt;gXLX5K4OMiTz~Unk%@ zpKNvpt%p;ZBlyipr5GQ4KhJr2%4gH|F8*t+&F3Y;!xWmvy0l z_wIdt7`i2bfx*B`Sq@1IDhZ&v@lgl=o6o))25UGzM~@C*UWMN3oDMA+z{{8IeVhxR zEd}IhvdRy0R?eTlP<5b|kAUA8W%(n_m=o9l`dhb}2Nr#A+`2zGdh%!)ju@&86G!bO zBaPr82Ks9tC!re(GTs2z4M8g;_L;qCFfG7jTHV>t`odsZuea^plrnlT`Qpg2$u}oX zM;8^>Dn)ZPMjIcaUAv}1`Z1o_z~;L?i>NAr2@it-qy->w;^Wk@2J5rOj(8ZHbF%iWZS5($C4tzjiPL2LoFQjPK@yH9RdU(4_NOz)PL2^Z)~LgPBgp|> z+8ihXr|RK8hj&noL#`^SmZZQO4#txz8E&OKRdjOtU1m7uwNK21s+wUqZ&laMo=)%z z{wW-^P3Y}T0Ao{Ya}>C!#-cE?eoW~)r)901^T>GMT|7+C7{0}2(ExJynh;+lu;vKX1nr-9b0y=0QJ z@uMo*Wk1()+_iuSl2v2?6#%OvYumXQ>o5Yw)*&FE2UHh;S4IM&Dy#Jynp=1E`ede= zpevRwoc!@0{kY(9esb)@sX?c0jRgW+>Tm$`a`aRksB}<4iie)PNG1X`wTwS4xrQc; zwo89i-I+%!m@}ge%>cZI_V37fYNlQC1W2IpH}pE^AW+o4t_tn?mFtuL(0GvZRYt%1 z`oRqMs>w&c`?4_+$3`^>T_7k)(+=lpO=qZr*#XZN>;gdl;17O(7*u1G1iU&`Zp7a! za>h24nU*X7kVsf8TAU0d|A58GqKA{SjYAr9=HnNJf>UFh(Zec)FyW@oO9c)&GhYtx zo;x?&dXl+*cIx!>l9WA-Sz4DPbs?bZ_2wr2$)EgeR2Y2y^%u!at-qVrPhcj!XaE2} z07*naRKAW^1RSdxvGSzgZta??(&Duwz2|qqOJ{K6^qI-*l|adK^Q&EaYDPAIVcWK? z?Wqf(N>c5JpYAqe|Chi1B;Y|iV!9U(nQkHDIPG_$7J z*Xi-Q$8YB(uNw}bR>QR=F#qh2f7TgY7+?(EB8NbGAioMQ#ulh?C$OBO0Z36%M4w8c z0LF#YZ@l*U0D4ZIt}3h1H^#s)hD#T(o*X!^FQ*95|F~|YZ*rW{0nIfV0eL!l6pd+A(8J*O@n2RPs(Nmtc_x=nOW;QIkorUP{FT$r72Hv8G}Gjzz54_#U9$IyGP z@7+H6r~l=DIyrgvLbSa(IoAi?$(efbAo<33y$2N4N|N#Z04e)?^p+@CntquyK1LAB z*?)wm(j^=QK^48QA!mHoj#qjfo%~?3e}8k83x4i4*Ysf^`Iu`NpK)S+#sF1%jzC#p z$zcE-aO72AtN0swGCfTX1d;ZdGVbeZP3%lZ@{Mjy*IEm{#S0ux{Cl<})3@Ipn|ycT zL}#^d^4>e|r}s+WmAn+#0UnHM0$y;SB6R;G8}a=JYy!)G#QY=`7WT!~4d*Kml&o|5 zUIszVS`s5-_S&h2XZwtzperTS&YVBr9;f3WytfComqxPHHm6DlPjS#)hUHx7=EuwLZnUQ=LJVO@nO?Ila1Jl_6Yp`ZxI=s)%Tp~r6 z@~>2lOSWFfd0(6}J8Wz|$?aR|`_>_7#d~O^EzQRLZt?tm9cYYTEj#>OLR1=K_rc*koEe?Dd@WYk?pGbm}uC>;w|V#VuZ3 zVMajt`)|HE`Q)4Lk}FBD<~jCx0F`NisI}wF*3VYq8|&p4lewrgd>s7KuZAx}#&&Mo zJzk^zIzr|HHX0#+f%~tuS)j0K>(=osK#8CQK&gV=z9rh&P}NETt4oSX;)?+u_W5hF zPklT57$f)uCC*r4%ILbKD=NAOtQi4T{K>aNdpeaY(c4~c9|EfTtpQ&@%vP^llut(9 zY^X2NMh;wRI6Hf`9|R_KM%KyJuZth?l64wSBmrzLsifZ2=90r{MG`<;u!xtIuBg&{ zed88dlNsk`+>#rgM_yLP^YfR_mqb1}bjz+1!Utb{b#!&bnccl!xjT zNG8@qbG*<}CWl^sbI|bW^*Wu}6FYDJi=Oj23z|y|yE_E{#t)^-d{GH_|9Q`9UU*y1-hGDZa4hkI0I0ly9n4&R8{gKAF|K)Uf}Az z$G}v+c9n^NbLN$c!7k68i&v|x&u0?wUb}o@#1`nJ>Uku-TI|Xt>8Jc!fGOIJu~e;_ z{PCj+tt$RY`AaTuo36WOv`2jWN=a9G`>U@%kN5E+&?YLUZ^z}j@sH|zXDo>bl(5#} zr;g8=f(Yo2r=0Ws^oGCdjM`8_Uc!K$#EW94gKxecIM+q9y`L$jCrA-qVh3Y`p~Kh) zzZ-i=HpMjT;;86rJ-}GOy=3eC2PJEw+tMD0_ax!*^+>4pK;YL)+wT4R+ne1pwY~8- zDtq_7_Jhf>V_!}-Z{Ic964+$_IxWsgwj6r5#M!h90gnJ92)%Vly15JCcIWdr`*hj&R){f z54~&m{$W#e?eWJRE_44{F-tMcop||qxTuPJg(yIJG&cX6 z?z(j0TuHX`1MrGiL(dGHqBV(0?ZMn~-MR_^cI?RKr)xSp{-eFnF?(Lw+w1R6e)Wrg z8?W4$y!nF)nfjS|-3poT4qDs@6w?W_e3jTY05hO!MLe}ApDK>&Y>O>cWUq@|C0=60 zq;$$Z!v{=X=w`(K&gxppV2RpQ`8a62_QkYQO3XbHcKEr!uM4-iJ%4%dfof#2&#B|z zRKy6>R?tz0W_CSckP0fVG%KDyymD1B+7u9kk=-iEp}@~QSh`bJG_r?m(8*$D_r=UCEkO}2 zuXH~G{qZ6iuJ3b=#qjw|1yGYLt=q7zgv!CmfqjPtVCo`vg>^>i`aNG;vRBgA$h?Pj zXz_`~&G5Cm6(x>K7qb)V>UK94WW>L{Ul%SQoXFx&%-vt9(|Do^{NBUYQFufy`0Wph zDfwI0K~C@q*(0~)RgB{)3tPV@(Y>C*F1UQzm$Oj-fgTIdGrB%sSu9%&HKK}Wv#??m z<0+SSe!8aE@+DPC`+Rh#e8kZB5SGO>Hlp^E6X4V}my-cBDJPma%Im$p^R`#1mA-x?89dr(SFm`5)neKOs(@nGI&nIV(C+l8x@p1a#R-O5y1H0ERhnr>>V1F-)$>e1p?_S>59#`BWGd`p<*^bGkq$$sDaY{qdEAN5|eoqqKs3bw_<(BKL`{=J`$PG~aw z%{PZ9-yS_NFlRo&7!N%Bl_{2qpR6s1y0sUt*^`z+#~nAMgr|Y)lN1^U*$9y>yLOG4 zdgo`)wT1;_ZnlbqPd@!T5aN25%e~b_V+RH!ZA#d$RIzyG)R`$L*}8oQhHHfMv(C2@tOpSBfYWl2VWFaY@5t~^t*x$a~wHr zDhX2pIYI=1;|9p2pcqSrA7~ldx1F5tUNbV%5lkhIN^zMo?LDd^I84?!0yy+7U;vt& zIeorNgd1(Es_YQ&B;beMc(Y2Gtz%}RgZ8-tnCSh$jjdH$xqC}BvxWMqe|?>t4LB{JqC^;#+s?eNJ3zg`jDaUH-~B*XK>{OnDe#b@=489& z-o;Fu7{cKT!;ZkU&&hil4peUrB)d4A<5A7h8E}G^mjzuPAoJ?p{jJsdR8P$Z0$l1` zJFoa}@M6yfHfo_gfBsCyv~_ltlrKkZ7}7p(X_-bsHnPsW-yWtwGbLu8(3x>|l!rE0 zv}nQonM}@KJlk41E}W}C>fTEkj%qf4e`5c@a4QO!N%3f{T~y^W!!9c>_?0ciL(7*{ zsZdt;?t|L8+A9OjU@kBFzHDi;m~*^u-?%)4S>-LTo+IpYi<=N5qcn9)P}W&)E2F(* z+s=ai8(p~g&E&Iyj(hzf{9`mrJmQS-u}V8MmT3pt$RIMl6pX40`&tkU0O6kDT<+iM z!l5drB`5m$mQuz&V4T5Q5XsLpeq;5DD*1E5fBVVjLt(1S+EAsfinpw7Ms{ud6&*So zv&rw=o$rroe9ueqXXrQ}z;UuEh%f_RMd30$mhf?%R5U+tuJhu)2bxYg^EG8mIUs9h zT=66(zTBKbZX2|Mb6|?A^Do3pl^aX;4|w4C~qRlYJ%cE|eG; z72L_#{{8zW$4{SYeO-K2rPv?;;U8xx9}hXFY_&P!!xxJ#wt93cYk4OCp}^h9sfQ&f zfH6yR5-8=}y94v1n;Ey9A#*!N>rwl$|LdPk-b&A0yqpsiO@VtVC1v|1#dcKLed5&F!1Im$uKBis;~cL~zd92C zT^vE`o?QVkt)FwUvCrSw83E15D3;F8b6vc7rTv@<_&m7cwW{PUwU%2|ht{r{!%n6c znpGK1z!5-P8{nPJknvcxCK~i9jMt*Tq?N0TF1Rsy`;B)?Zmum+ad>naa1>GI;#L-1!WA4(*ew zj-EGiW9O@l*EtfMlBud%$%KIO8JS=#>6)QC%Kj%CcXL9VpBt_F%K!>EOx|~H?o^lE zo*u!fm&zzk*S4G-)qm^?w3hfWXfM)bP zonj9hFA2N{$)hCV&|}#S?-9tV^2Srk8)f1Dz#O`Wo?KBy1Rgf-0jSDW*|WP#j-Nc& zC#fWe2Ca{tQE8yHep)3tb>v){%HcE@*ZKw6K=DC@XkdoH07_fa03Xwz>?q(SjWG5y zs(l62{WE}W&xoE9S#4voe6au|y;?B4+O4J0j=hr%UXd!rPcTvd+yQru+MC%ePo}L`kw;lNm9R4$?bQ}0ifWucXL?pl~7-{qS?ne*n+)Z z{pQp5maJCscCo}-%O8~=oL|oT%lUeP@$BBz=Hdae znJ5n4M-##Bn7tS+ZQ7ciX9)Qk_{3atRm4cQpoI9U+O|hhqDs->d$cEfFF;sKA7`r< zboNJ%l`zXcQT>N7MhN)aOUY1U*V~iJv$}t#*IcP6VOi~!)~9-Md~Ukig3@dwc`Ye0 zf|!0vww83U>F~{>ll>0wC!TntB(}~e!InyLdb~s&& zW`h0a`F)#O?@S32)nsE#%iv3Mr~#O^l%0wXUS6sv_}Tf&-ZAH1KxFL_Rd@4?0FlE# zjP}0pu&z7%eGo9>cl;E4P|$*}C0m?}+18iit=Y4uqGuPp=Oc{3wD+)eE>phyeIX4W zN^rR;&1D-NY;Bm&{QM%5>ovPjFDPL!18!^>|oe~m0Q-YPgvEK_9 z&W?HO+js1&QgU_vONA6Y+wZsLQ)HggBYd>ZCb{j$(b!0W{GI5u@VTT+9Y4(hp3iTS zI9XLNNv~edk5<(#a7KTfq#HK@W~$Z`U?$^eEshxcnT*mRz-jt#j!%#t0-OO7fR=2- z`oLiJ?027i9H&>|R&2r-Lz9urh(11V$O49L&HN#LDgIZiB%r;Ke)2v3rOyCP^dh>M zle&HT&QVAKKzQS|LqirH6>s3VA!X6TWn1+9^!xnOWZQZ07wKs_VZ6Sd3C_iJK9BzD zLWe=;g=~t|wkIIm0DZpKwPZ|OAVK6k%hHr+k;gclKN%QQ&QV#72^S{36{%V_lt{ zSMs;$LVxIHaqazh#ptK?0VFEv@jIY(;evEkGNHhRPw4FFH0PH^9N&8mnmQlJJUmIi z;z!BNje)Dqn(pE&y^Kp#Q`!d}!rLmmN4%bnVH=#MOT*bXx(7F)m3>Vs(z8dMC>#QR z^|Ia{efr0K`5!7P>Cp|vETbr`^Icn_aNoXH>-;{D5@z#CqGSt<6xqIGZzD$5b>`bf zAsk)?)(<{wjcn9>2@kR2xpT*dKMy#P{9hM{?viYyo$e;z?2d6yPl^-ol#o?il2nhg zPUH2N@j@W+gZ4;|Dh8sD`9ZpuT);{1>k>mBox^LAN&X^Qlhf-J8ZHlmc~-0?QMRLf z0T8W&{ajx{@?l-T%c2L2g3o~WmrWYOkq%_n@x+$Im%g~tjBBz0On+K}9e}hUT1d{` z3wIDF?oAGKT>{QjgsLz-SpnbXiX`cqufP1b1m*nX&|B|~A}3u*& zZ8i+w>i{6Z4bB$#X?zD(I(xzDoZ4fxi8+y-b1B72zRt)q?gx*Kdg4gYS|x zmB3_}T(WEYo|g$=1(Y2@9Yx0m5WIEQ-e!vL998v_V|3<=^!(sk`l8s*}p-l z(hKK?{(fl}ZZxiObxBom3f(-Cv3=&&Y?e);=zj@R?|YODMcWnGPg5YQ zPq+CPXPYcvFTSHIbrfHmJ2yrs8~3JIY%ZQ8XT5A-X=K%``8|Q(dv@#_GJ&SXIGWL} zdV5VaUnMkvT$jO9v3#_|Q?L{lmX6N-xDI~AcQpPFjwU|hQ}OfYd7n2f6isg=qT`D^ z`XknbsXVJ#a@rlm{$!hzb^IJz9dBnp1<2_qze5wxr;jCk#Uylx%SoqtHlHv(X^iQx zz4Q=S>nxqmlJo_PZqtVKle4F9Ol~GK_z$1<=;4D)xWn`OOfj4o|50c8sJja!z~MoX zYQTEW)j1BsXt9=FEckHnE}j`bXIp*sy*+H)uq8XOt!|@_ON_nJo)q{@J482Ut0Y^U zAK&oku_JXa?jN*VkS%+HkBEGqzhFfPjPB0j)3J*_k52-$7I(JTu?OJw<7dUbEf4*S z93!jtWzD02Vkbw&%m51OIZNEk$4`gna9jRND@b7y)Pn&to9nHD^gFE$0{=sef~ z=V|7;F;8@t^W^VK4vF)gH|kHkJnd2_mQOYi@Ueo}-b-goP!GD=Z?DBu^ufZ4c)SMY z#GaB-cPxDV>8F#!$)osm&#qmQW8InO5%^(@g-HkZ;E_xjHV2Jb%VN%+gS1M)&!fQS z#Z~fgLV-&+11cXrxi)50srr#s`smZo3Qq40Fz3+g2XbyVwBZK>5H=%62Au&=frJ2p zwIBZE4=0b?_5bnz{JRWX^MeAxwOYM;U~dYcil`1?TSMM$j=+NUWT<4+b;d9z}Tx!7f@QY;)i4AZW2dQ{>@ z^oVkyi_84~lP|v-%<1_XH`gARQ}os0FDLK3_u*tqh67+rP&m09xtRoOcJ_Q1Dx6IY zQy8**wXS1u!GbI}Apsf!cUM&jI#c!!?Pqdc_V3vnvp$`C_SF|v;m;0d!*c;lXfmx7 zo+@?@1qOW?yu~6ZyUs8L@H{~vSR>N|0Kh#eE2I7G0Ke-605a1{+b`#uBQ?&xb#SmK zpyA**Dq3qYCE5I?%i3&nYjgE}2t@o*8y&Re+ zPrF#-W*O!9S5*W7XAF8le+Xp2v^|XPIW9Rk@hAYup3ur%KuQkn1d>|$1$-*cmH-WB z{BD{JSLJDJiNI95zd((%{iMHBGW+(wHre*!2a`YlxBntx@4}G+t1I(m-cueNGwY(n z2Ce!Ia05txUI6g*H{axV%*WsHeS9ZKi%Na38zW^DuagB8u^ietZJUXEWxh(*7APem0YC@3+~i7bDUg1UA!pt;Xz#{ochtL`|L;y1%8 z1DU~G6ZrY$S$k_V!nLZR@$vetdxnz-?7DX4>fk*U$bj8{_>ca%(8n`&;dG=m=#or41Vei>CIdGs1Y`Vmy5Y~sMu3o-A zIn&(#Yu9SQiWWO|?3}#$_S?f@1ZHh3`SB!$>M{%&RG09bI(cGH$oDx4XuP`2`10ti zYi3kq^mj*3pX@!;#Fr6OtDhB=b*|SJt`88{c^gMDX1=D-J?RUtYqPvhYkcTWLV-?7p_Vc-fZ9QmKY95 zdvi{}nG5I6Pw$wBpZ9eUGNZ1_>S`6`zyJIH^?2vmv**UU9%bM;imxiVM^ZArzZbBr zl1BxUd2mLZ2&Ms6vg91OnHgO?M&I1aCVZ!{MHdISx_DJ17y>Th&QaZYvCA{R_wHN0 zukYnJEiaIA7X@C3B*V${UwUn6Gn;qL&rLr6^y2^<9TSh^&xL^4;SA{{%TcEhtY8ycs%l}zw#@SpZ)Z&#<|j& z5;8GI&uOGax=!o6ZYcL(09bT<*du)n9QU5TucWugem3@;eW8Ev1?anngkPXgVRmju z`l|WFb&#wH;F~MhpPS1rbuL;*wZyu-@y5+-!^yi-B0KW+j_HU5fz{RmO!J9>x8x&H z8G9i+qtoHrKLRK1a{v}YC{WJXn+e+JjP$uew?1(HJp5)2w#)FvVKnc zgKzg86J>{{$u~0vk5#k*9D3{Ep++F(&!xk!2A*nlR(bBZ5+N!GN6|woQ3&21n;mcLt>SF1ibWk;LqGO@v=M}=oB>|re|jI^0&Y|)4gaEp7sN$| z&ZVM{zAM>?Hq()4#BOwBhIBxnu;x*rWHJKXG#K>B?+eKOFj>uCWH+5rFz7cqgyseebXOY)ihzmV)$i zXHSiD5#aE_tbv`_nh&4~$=OQQnS1LUibNhXyZ7>$?kK4PPLTEEAN(*qGfn7Tx_GI1 z{AcSt{W5vVk8htwSpn7s>3ndVE;?L|SN!zj4@w?gNCz};9uG$M2s5R3vMPX|-pPRD z*bDznB%(n}>sS6}_V8xDGTFa<_tE6==?lXL;xhp$U%cS+8U1o>=wKk^8{c`e!iFs+ z;Onwze_N}ppGkL1EEzd@yJWaaB>j@Cpu5qJ?kC52{ z<6Zfr*ZO|E?0cBLH_*BLr%%uYY)whLcorB00HF^6hd@T}@-sX_21U{c(jRL|KGAbGi}h5D0z>RuM~SYD4JzaN zyQ>$^_G5Z-_c#M{`DZ(KI^<6GQ1Eqs`}h7$V^?lY{+~bix5F2|l}|C(&o}i9cPs%G z2==%*um9Tj60!-Wg{E+5tyRTV&%&0eEhTi+4_+*U>LMnJ-R+&p`&4JUN2;WKa!UuW8^%Y`Jv-*| z^#fHF^2zy}_JaI=;U87{o{RR*lx|ZsPA}?Y8*xiNCu4p&TUD@+Zv@!)E&w^-Ls1?X z7>O+Oz=J(=B>H>ai1ninyS8utn~@Kw{$ldOAOBXgxHI{?zy0$u%YQ>f2j6-3hr?gu zOK>n$xzisHoNITj7-c6xLarp6@04()Z(^AN^s%XCXg_O)txi|-#fN;PLtqWdiXG?| zXZN@y@))~AR`Sh&#P9?om2Rg8#P-Wp$HlD$tyGz#lSGqq=63_z=$X6evfJ@1+8H0Q zBL2kj%k$ZQLCy+*i>GwTkr{juQ6?LDhCQkp9{_LfaKGHD6;*rY523}SGslyGiU|6> zf|bSjwVP*>k-+oezol=E9R568wsCUkJMWIhfV#-_A+GgdE_xW#W0U zU}rimCGNk@PglZ*^d>N$?zma^sqd~WiLGD;zZgTbvE(bgt227pT}kE&R-!w-Wn7EB z4O;bFHqWRTRr!GEb@`*>v@L=5-?XP|`BjRyoEd#j)&TExvEmQoUgjHlN#5xgBQdQ- z5`>$F*NoyNm+mivZ|$f6$~liI2t99ei8WoTXU?9OeD%$jB_KDan>&ks(OC-BynmWh z;+wwe+RPsGE_QF4gvy7D1~;+`e8$=0Zp9A1?=oviP;x#uKR@&%o5<$gPOrmI_>R+V zsj9)VuMhHr=|-dT+@ayFq;Vm0nApSaY?_VEXly-~uU^b&ir?ZyyNXS6$0*Kyms9w%`(9cD5f4ekC*hFl42K z#mZ#Y1*wFct%2dZ@(ub_ywr$0muJ&Kt;xs|T~T7wYsI{4k`34b8cSmFwSAT@Thuef z`-;yLN{FS5Mu~i#aY_1Cl2aHTJ)G>FJJ23hbqCdlop(ivGmYGAZR5Tg9fQ&OPGd~y z#N&s*nik%+?v0ygTQ^(X60%u*V|K;VS6+n>>eVwY)0Z2ozB9H2Bg?%j0xvSepdHk3Z_92f+?B`<8LLPzu&1*@%xttNvK zf4Yv~Q~0^nkM**>c(JYY+2|NoIFHdmFhrLRF^+KjoNro#`^6`~XVGx#q?yBcFW`(YGfa=#{Gtx|PJqmg zgpg4@7htIh)9=6g=65Cs3pxlbMR@x3>6kd@H;2#+Yr-lsNNK(Mz3)%%=Lr1||Mq`N zINWT$CBUOr@_-xfxEOfFY2LeM?WokYcnW`M6}Xe7XC|w=L}1^6*UMCH$>>$#lrwRz ztmyib0A-r~O|>S$_xpRzP~TF(sTxvtRpsrPoO8#hO@U%PeE3+G5*{mqc74!66|#1@ z$Tnu9yC9O|Y=p=9fO1CH$cjs~6q!%Sxn?w7ECRr3nImwes05v3{%IVgb;R#6lE4 z$SV17f0udbIn&Q$VzeW$D>{GUYJU0o=Yx*G4&#rx;ZoljX!dHjC-rV49vk1{l9 zA)S|ytzKznXH@|U7fYIyk&b6o2Qu;uY(yL+)=s2S!r7TkY3;Xjj0i4g);jFp<#Ys{ za=6#U5UVmcoXeq%1fLrVEFMTIlm+ftT89bz=A0pbv6<@~PiHKvJ5_M#Av4L*p#1TQ zAOL-TR>#Qs^ML7$`;w|z`tpUq$$h)$`&pUop6&hkN2T$KKooP;R9@i0Vc?^NO7^uS zr*15{5slk&8@LzX_NX0p-8_U;nRB%{W^yFpOKq)=aVU1?$>}1j~1lCt3O3-)r3&i0$*ca=xY)bTHJE zw+kEZXQUY73&XfCq?4KfpKSWs=FRP?@6KchfQk|oH*-`Z2ey<&Rb3^R1!x~Rb~1x8 zy>OATb%78592fKy#4va#P8`p$bk|1f+L@01>YKxp|MK^KZ}OAB{psXlfyZnPJaFUV z5=@dbUw`#=ft~icb&V6yI#^OO+I{fxCk0;{n@_xR=xz?nYp=aNjFo*AMpvP@xk|46 zlW&fn3ILw3+O8~rmsFiQeF~a3f+tyj&O89?MLx~_z&eQoNff#q@U*cud-7;hiwZxQ z!Wb0qIk0a}dM$&QET~?0R``60imMFMmgd0koZnGm|$#oS}WNl&L(g zhZ`lKN=0y}YJ0x6`#pPhrWcKM$Qg^b&EuuNR6TPh?k5lCj7m}wKzfO;1DaAzI&_}Z zj^@`ik=4(vAs+Kny3>dU_eSj76;K@bjg)r2P9<Rw44)nG60We0nPYJ~NHfgD-RifleRu{dQiR;J3j?@0H=H0mv#QZj8`hb> zU$QGZyJz?Q@!U(7&iy^`Z0rJmfxfUFgl7xJ+ENU|g!D6D3|s1=Pj^fR6xlwY^eb?6 zS&0-0HUKHPcOQW&*zs#TI(f`NQJ>?vsb|rD0X!QYguQnZ;!e8 z*#S1#$s`HMmpSrR3(l5JwycVXO`irt)M?9}E_7EE5Q+i0U?Y_tO8^B>t% zI&)1$A}Y1fRIofMEu#ZFAu!_`;6aXl%O126L>fr~Smi@JC_rY5c#|)b*Lzy$f9LMP z0D>e7(9#7=04)5&r}G&Bv%Tpo(P3l`=6&I;1{RD_7wv;zY~HD-^A-8&f@yx5 zJBtSG;(?pZvHkMPkH>u6P01#n|CJy8N-`Z^#Ixd%h1zw$uB+{S6?j`pMmTe={P^H0 z`N22w$Fk^fH~N{qJnbYHJL_Glp9H?`xc!b!hxULqhYtF7|7b@B`P7fxHPFn`b?cT- z&X!nzoQ_6Ml}`9>*wu6opZ8S<$At=H`0Zw@DjeFmd+z`hmd6+PQnACRs!DEpTotG{ zh8{vw+5lZ#D!D3<=+c?fCG4jo8Gy)}ikWoS@i!H4tSWd_ltLdHg|JZZ!RQi*4l0(X zBUl12@-0+2(p`&}b`J3ozQY495ntN5skD-;<~KO^wm$+^zB_$Yl~w1qbDGpD}k zKAVHZeK$%-?hH7bnS$i${1MY8<7-BoS)7*NMZYXd=j$?lTEWgjJiO<%17lwcC279; ztWLM8U~gPMpZregxuS7}an&f8V#96$-HUV9zT6O)`wu1`{o-$j?wxiMrPJCE-lQk! zSa)NfgT%p9#+#K}T;*&AD4C967x@BwR5bSAMZFhYfsN+2=b02s3I4@H`SjVsW$W|z zItM&0rUMM%bzq}s&~fa=vl2&WZ*AL(edl-Xt{~#v_!&v^e(}lx^1qcOcDdgUdDnHp zzm&ik;8X9z1FF++2TVJ6iAL4O?1Um4V8;FQ5m3o@@F$>)pCI0FK9`G=oxSSmT>+dx z82eqlD$uMmJoKHn8@uu4_?|h+OS5k=U^|R=vTYLj=T08Wx4xWytUDxG>#qj5w=wMD z;K4VCAHWWe@u2~LaD_3JG|+k}y+$`ksz$#Xb$%#rST{Zw{aTl#2!IKH1JB03^DpqL zE=Inhwdnu>T&>CHSv^;*7^t=A>C(yWy$6eRt6op1Zp!awFK@-Wx*f+EC0pdWua08J zarO~@@#u!E%9y_MtFu^^p9Vaeg7f*fo%b|`mWQ1tLv)_-NpEH!SBG_6zH)wmgu5I2 zf)Cj-KD!ub`Lh)j5^fuOMmKvFedW@56~}1&vNP5_49INDomaqkG?6^iU1Pm>OY$2j zu{C?LrH=p6(Gu<=4pfmp6G-_iTCdB7D`N5-XRv13hSrpQjn~ktEebGr2HFizgNfdh0*b`HmpV=4tcs0_8=C&lg~~s zu`3O|8FD1PgL!S=F+WCc9X@)vwdm$b&-5MkZ0Oc_8*W36U}fZ;u(26C%cQ*I`;icj1NHOyGt_V z?W7me{mGtKfX+E_>S&<0F0UyzD89u%4=b{gK)G3gldc6zH-2-+>iOZH+`M^x?1fH( zZ8_7^#R@Lgl^{3Lgq`sY8ecr-o+!RB{DL2>2$t*{Swp`|0OJ`l4_u}@$rwH6oX1@v ztq)Mkx6wI+ZkH>9Q6$S(A?qV9@0{5)BZ}yaaVG77zFS`Rsicdw=rSkIfOT>zR+K!k zo`H4r^P4v=Pm?6kkYDlmMgDF1A+La!1loiqGx#4Mc^9jBm@zOq)ar$PwVnwf$H|dFWk4c=>XZ(n~O`12dumAU|oK&XJ5W}X2_T-?nOXqF?`Q{5ntm^ z@uY+enr$h5yAz*opP3)*ujpm)WR%4BBj(EImk?TLEbXfF^~TNl9qHhq_w#+wO411* z>5w5~I^En`uw(nKA@}FbogTiyi|mX&O#Lp^;i7@`ZZv1ZmZobqZrVC*g7cpKy`{04 zeJ&b|g0ns^exPs2uKn4M2aWimpAVi$zYfLCPV{?YC||yOG&xy;1zQH&T3JEHnZR=V z-qE%G$MIa_Iir!dW#GWQpUkjxQ7|7V@Qm{e*fFnKMbrzozDESX3TF>vQvg?U76Bjc zzWK(em?DgeBE;nY>-N(EG8{da(lvK=zRNXLmVDYw@jw5Y5Bk~D$)VTw6l6BbF9(6~ zU%qI?xUE$Mh1SZcKvwb4Ce~Cf_x8K*#_ZE91|{esiX&BY2QyCgy>?*C*D@D$Z3nn1 zMXlo1R5aE>*#lf(i2-4wWz^Ox-_z&j8Us`NAN&=Y?5RTvXK(C6y zMmv0K5R43F>hFEta|zE7q!@vtq*aRpM)8MArc49nI}<^AJBZOsCLB33dE?Mq1KiOH z@2n&~w#;s-YUWziq6HCs$J$@y6?qo`AFuVYamft_Wq{uaOG!Vc^T&=f49S&*>4AxBzu!&gsnzrRPsU^s@9R>Sr0W(UR}y z{Uspvd2J(~c2SQ@X;gElN)2X<{H-N^=0H)Vz~*KBnNs98=;kwl<(Cz*H<2*602Z9eGLcPSsX0*|38%j>K$dQd7 z8Q0{LQ)nL)uC_NLNjPyYtGY1yg&b=M*Y#kg+-y817{AlJ?E3|qE~VjgojrekB>#Zw zgBLqjE$0GtM%%?5TGe$)Z0UI)Hzwvxmor_t-mK|=|3}H0kyvZTg+teR&z`*plKbQ= zQ1v`5Ex7tt-ekVXFsI1j`>!AwFn*&>8MM=W3N(H9 z?e9(`5_j&HsiLk*xQgcJqJR9`|EPrFtr8o@lc{M|pR>a8D8*A!>F%h6H5%ldH{Trd zQ8{RzefCLuql@gSrq^|{wD-Fx>CCyaIWkwqdKfo!-}(NJyD0VZ-g9ZlH=Ti36Zd3P z)tC1sJ<%xfY+ZaOoImjxvCEL23js8@96?-?Kvyb*G2*Cs?Bc4 z++4xm)q?5ko$=|cg^HZyEZ*fJN>q$Mx{5~nc3tloKzTfT_hmqN@*_z_KJ8N_tFy9S z1h8ko8>e?ui8XZ5iV9T!>;L?dp@TQgY?{3N*6SsT<{I^KxyrR{Xls=`(c-MK?l1rB zPnuKyd5%WBld}Pk8}VoFR$01`Y?{;k_>r+)Q_#h_$M_)YX&-diIHUXt{5EWl&klVf zh>1phM?#d1;M9<#VR!l*XIvMAz}0g&W<$@nM~iOAH?;|pcY)=B*=R%` zd-ukIG2r{|Due;LM~@yEz6$X1oj2braBSxFg7)MBnPs>MFJ(hokzHZ?ruOIC_eM(zf;Q9VIvG0C+9ZwIgX-|DB>~S=fS}b`XTY8NwQg_OIL0z? zuD?EdJSX)+`fqFYqdA-(VM>}Y;0;G3Fx-o`Kw+5~O5G=B^ru2rl`C-5C zpR)o&E0%DUaqb z^H0yGQ`Z!rzf-_II@RL64I8(P&phIvw~he}#oH?aRMwP0;2Wp_vVZ3(c`z!4(i;Lt zGbznR=MxB!UAPJeeP(az9xc)M&l&hU-Ju|+#SR`aha5P|zN^qT``u+nJ9g|IKFhwq zr0WGv_ww83cFvFY7!6=d5L+|wfSxHZ;d?x~-}tD&EA78qvqJ!hQI(et!eh?z*oo6! zY^P$}=24iH4Zn(#OR_s8YtsC!71FE*zfjVotr;{DLuMK9)f8-oLvfwLMGx zYr{SpSTHJQ69zE|5LHFKGtvd?z9lX8)^qjz`PN>QaUhF;b<73sOaL(C({Yg~g*L16 z<#ZAN$gUO3EG#SxKW6Brcxh7=(|Gj`+tojO7(NPlQJB@B;#Q?AD zbU$eijrz(~P#B&iu47O6azn?b$5fq3TB3pDZL|m%Uwk^*z3*V4PPA_Q{9=K%3St1t`s96ag(-BX zJ=`xz#5We_T|9ka&~^KcJ<&ufdE-`k--oYB{GR7w% zR{!?e@ob4uv7~c$7W6JZi=61NxZC%9mR{EZB9`-eAUTHFyt$&aXrwAvry*NO7U*Pp zO+wNy-=(AICEp*(>AHLss13WD%&{N%#~CM(VFdZH$&a<5uXT;($Dkv|>{-^oXjOn) zbQ&WG`$Ww#;nqTxtKkGLUUaGZ>g3p^gb#X;L?Zy5&beQK%Uu=J$Yu!1}B3_#|^i?ED%Q*xDWbie1> zH$Z81zNNFIUnH}h7AM=k_|?Uxx3YQH?w$)T2%BM}ffEdWV_JLwyP4W~AbyFFZ1>}bjiE{p-Ogr@aTI;t$R!ug1M&GQiu_c+U(LUe zoZi+9fBg2UNP*rVOY2*w^IB-#!2S*KhqGd9yc>N6P}n}{UwR$yDbx{<(2vf*UKPM7 zZhTpx$iwu{s_d|;XXh#DD#^(Y)xmk@?8)92MiTJ1siI5q{e^RV&$k4P#U}u91ypRZ&Mlzs;>S~)Xk9AL(e*(E5iYP? zr+c|QUP_)vBEP*3;5qx?eAyMU`?5Xg?vvCUUEJ+?j7I4_+v6vE;-X2bHCE4=ud2|_ zh*QX9 zuPDYI>77-pqE~0ZpHrZss0Ie1Xhghf4|I)ZNE|8flL~Y)!}m|Wu!oPsM;^Tl$nUx6 z>;5s_ft$lq_-$zA{4UB$e@{BEY?S_BGDUXdOvg2_Cp&wm-hO6z%xQE+=L;bV~B)2A@S( zw!TfKpF8JbQZ{?&^br$zU!8gJ`JjJmR7|0`lYW$>+A_0!j8N%=gE#n2=Gn8|_{>@q zp)5<6txX@EJ9T2f2v?g6U<>{gTw(a!#I5y!llW4-b0*|U~5IdtIQaMV>+ zKPUihD>0k4?QYf1dC|`2mTyM>PRjT%{{5d$zB<+g8qKMF=dJzY8G-^%1?SmK_bR6s zQi5N9_30pz3-cb@ScxpmKWG!PL(SzcG{SOxgtT4!6oQxH&snh zpya~Nmod@H#nWv0s*Df^(p+Y9$!E%lt5#m!ngIt!Bb+#PEU@Q#!EQiWga$rdZLf^l zmtTD~d2QeR$?M;FBT(z=$ov6u7?q0`GHeBc4nx3g#DZrM$bK}Lo836sGiSc?$^n8C ztSOKRWGTqPP*Lb)N=Q#RYE%FKKmbWZK~%t?QjYQxxC|yqaSXPO@hLmMD32XG_GCzE zq010x{qPJ*!6=3SB=`wN;`#J{Z$!*z^^3g;<<#q!_2Q-D2H zEPbz^Q2;&358L-tL90ACbNug^`cKWUPV+4w^8|&>arV3YU$Mmy| zMqBNd|LgCaVSk5?g482rhL@&fI3tTnptw8{2)e0iWQK9^iA{N82>Y^kN_k|n!KvK+fxPYn2~=zM>>bZK5KA_CU|U0 zdy0q?*G}TO6~ocxL~wRxnaLWZ3?7giQ1VJ)8g-fyw^8`??e0Xfn6ZsEMiv;;XYKPEP*fZ+<#}>v!M#(dgp1d+*`o zqtCu55Ufo${&4OWYq8=?;N6XZ;u2k>Dy99p{ahk2vC5E514Kq=<;oO&pv0qqf5AGP zLkDfo5t+_4R;^!qTt-QzR$IRd#uz2-Y^q#dIeZMUkqa{ci1gu-#Tz5zwBm3m=Xl|H zymLMf-oBRy_Pf}3+uW|n`#=7*QMtRdnQ#F9xyCG9DS&$iuv&E?pzlii143QO*>~AW0z`2|a5G5x|0}XfY z+}_OSF2C$utE$RgQRVDU{^lov<~K{6ZXf4_o-)+esv2Jt&l@FV9`?BSH64Ha$gv@_ zl9JbR5E8@5|MWlqFN4Qj7VF&TpxK$t!`MmGyz%C{lTSVdhDP_~(Ax3(D+h%`%31G+ z$?p^lBke=a4Vfn!(*)qpe)&-sd>oy8?*~7g{Ih@lFD74oeRT3CfAXg#EIC76K$G3X z=Sy>TUx5nGk~xXAMRh}{LJ*waE=l&~7hjf8{bG!(8V-KC?pk2JTi!=ITl57+xldr* z_8mDh8Svh*uyA>_Jqkaafh49NL&YiK6m0kxz%Yz@{7)~?r5}I%vl2mDnrnA3nQpdY z-=S-D>P(&D2+H0iBWce9pskZHfgb6R1w0+C*mxtGfL?DM+%@?+FyhR)i<7nKz{mG* zP8P3K&B$-4G9t(NUX_jq_PBDWNNIJ^pD?Xi=0gczXP-XW=OTj6b6rXLZ?{C6n*J|NZeCt+gxD z>7RW3i{|eyo9wC(;YUCCLBZ>y0NTrqr#lrmbR!C`F;txu&wJM4BVVR#Yya%;z0ZZbE7Ac|2fx#736i5K-`Hfla5Miv zz-Kn$aQyRCR74U9`_QUBs@3xO0BZuwZChq0^K;uq75Vw>sBR%MxzU0QjPHNzGw@Ng z6yq89XZU-7v0o$DX`OucbZ>%Bc0`7ng&Z%_jisr0(DjJ zY0-W5w2HL3o#RY_pmfE@pM5sDbiL6ReD-`q`smdi015=2^!zcOH6BondLnS*m0xVu zF)$>y8PBqQ^syH@k{{?7KuFP!^Y+~b_q7%)7~G3v5x$>P56J*(E%IRiXz&71OR-BDp2h7LvgM}bQso^E-bk63rZBXgqZgxP5S zax#}6^x4NBphTeBxz^A{^560t&~s&%#_^}cVqyf~DIgKQI9GlRn-_HlKrUIp2PW?l zb7Ey6IgoQjzABn40s%g;S#$(F{VJyGkVm&1eJNggl}Pm*v7CehFq|&ZGcS?th$aXVWnan%Dx z{HTaX5fHt&F>pqFH}=%ut13Nl`q-%Y-@WgR$%=ecI(OO1tCO!k{OjoKvqlNt7`h!W zyg2M&wAm+9ivqj0ZfopI@u&o-&LCX}_>xWQ6~jMbZ(Ni-^mS{#S5n)!Q=m8-rf6qV zHo&=_IdNq0yh}pG`~XcD$@2l~-JNVJ@nlbA_+jUGNf&u9+O}iY@RjJlJAqOB5&H37 zc4Wh5$)_s51MkG3%a*mi=wSTc<6;?lgnjOy)4e6-l7kf`L3DbuM~|c5%>81F_)iDS z#+j|@@@7ak($5$P>(}L_TlKs9Umrf|)AWmH;svsgmz?dj#&aZy@mzeLj)RAY=fx=a zmB0KvU(sjDyzY4y-zuJygp=$btB;&%`bjtcsxbGU@`n;PJ&U9vE{e=KiNq}HYE2WCXRU(4Sn{-FH5eu z?a-)r}1fhjy@Ov4&NdfGfE4#VSW5nv6A@3WwVl1{9NZCwglEoxB@_RIqgXQ8YcxP z9{y|k4nK;;$yN0T8i4x$5k7*}((@3=z` zCdBUgIiJYAC6eNg7V=a28JVK<@D`AB6hXBPoug0E-E9BzF1F^Q^53k3FNR-d8($(B z?YF?iJbwnxbmG|IbcgZCQ@=n_(W2ru^r71a z4%+AWJEKBB9+KSUr=usDjAE2w3Z_0n;8zUQ-+miM4Li{p4m$Svely-TeFyi!Q=)D9 ztv~foensyYRpQ@z_%80w9DoG!(V?8 zS<6lbaL=wS*y&G3l&FYYR(pdvna^X$Uwrttlg~f(whhG@Lo9+J}Yp#MHWHCNkPhT(2TWBiIwF#lk>e&F*IkwAF}e}BW+ zQ!daa0?d)cH0UOJ>R4OjKqO*cPw=yT*zYMQhK_uB3IB$3fF)n+0@m*Z>Zt?;IG#D< zqQrup4EVT+G+Or6z7vy1W6lhLL2u_`mhU?Ue!WV|?@a#SPyW0hp>a69=oK9>`=X3U z$JqB%Hqm7DVxxI-SmP@}{DT|_nLy5vUzE}Sczcc+jGRfpjw%z2_gyoP?SrA7bVN+#RwNVZ>kBuX7BL`X4$y7*HUTJ3v80r~|D5Ge~DR|s>$j;10 zBYQSYfufX&R$A5aoQKhZ6`cXzKX~`;$?4?41wE>DIOb^XR$dk5Dk`4^#5qGh$IHOR z;k-nTSY)hkbwQaAa3DuDR5DMoP3c}2&^UxD#bZ4f-v*r2MrN{1JnVUX^t;} zdjrf$i8L#L3`P*$$$6X{I>(3pv+DmKRMQB?wq!GMwTOXKL7#GnOS?Y>e!Lk zb2g8o7e^#;$le)!d$^Gf1R|^o7*JaB z942(78#!haH$$*2gG6u5=J=B3Pd@!P2cv4!_|t6UfBMh=^|)xoZgTW0!NR z@4uYN<42A*TYWa&TjC=pP$pg{3PUBxcS&&4G`V)Y1Vq&-o2&GjI?GiBr$bd5k@N5L z&Zo_C{qXA}1%U;aJsW+o?RU>SDk2#JyVd&R@7#1uOvJR_CNe)@4d%>En~#^E(T2 zn)R7cCxDz~@A;Si>R(O%<6r#8Do1n90xdWiE(gR%&L02F?bsDv0xgo=dx5ljcki7n z`reNMv_CKDS_Nv(<4Zi)b0yXAsifBvV_7;sdO%?1dGsm~;&%Gq{b+RS?xo40bj0Im z_1eza$s7BdvAVvN*pf5T3yfWi**-dJM%nw=7wcPPi;cy(b@R&P z&n_HIKiz0#QTy)PRIFdGGeA-hz(?0`sOhdkkk0E#0bN}zoh8FOfX8^odCvqkY~Qgv zu>E8Nk7nubwlL(7uJ(&e3OE3)9G6Ff4|_*}nf(jETqLx(=PyqG0*hTpN%yiLbPj(- zdpQ3~$2cv@?u^}DOXcdr%vt8iJ}XC(|*B@~YyJ+Gqti|&Z{ zMQ74G&^da#cvg@z^zU@>!)^=!sN}FSq7PPA>CcV=nTCUxzB0oP?eQFtlm*RFjkD$C zd&cGS6&Sqv-EpzGFSuaX!2VAi2G3f>aYL00OnHqr3&Vp<{N;Y_yKKW zmCmN1HagwuxdP6PY&=6kMfHXE((wd=Psgq}OO62W)E@lNxF`GWDMPQVUoXHcm~X_x z>NN#SwN(K7F1P^q)X_11$jr&*-C2SD#>+j=_8DB+a>%* zRY&0Z!zyiniURp(`HMm4gXVa2>Km~=aZI$)LN1}g!3DM$uX6CUeVLd1)%5($f?pM? zM!@mW+GLw)V{H!bD)>_sNq4yuU~uBM4)yS_#Lef_9!hJLVW$V$VuTo7y3m@>q@Qt7fVDl`R-Auj& z{{TUqVIvUeyZA{EMV}3SExiGpzOfMK@id#B+@Kk-K=6pi9@AmTrq*9z;p_Wf8@`JK zSs%zwH^w1f1kdpE`L!zL02F)n?QebQ^!Vufi6il!%G*X8#cz^OBbZHpj$)Ab!d@Py zKQEj<9=+3v>$gtMo;%$IUsb~A%im$M;-`;3|0??27@#!WrvS!xod>^_A0b%No^C&E z6`hF=Vgl|gnf8w$ETu8{f$wE};sA*Idw_=oBwFx8$&6|v;OCQk3HAoxvio8Nyp3<3 zR)t4b+zz<98lZKss$i99K(!rRs0*ZyhyiuRv2~{Lg|1vY1JL)gXSY?&R;l-l4y>3W z8zvCuldA~F58|bT>-?C7;Wye>uN`NAS5!Cl@&268*WRa+*cOcFMh|i= z_@!5!6PagYoV~6F3Bp%Z+W?2BRoA1Pc4+PP>)V%mMsyO;^Y6a(#?T{o>$Eyi$BP#2 zcfbGZ>7>Bm{D@Um#EmhO`M+(k<@LOlXDz@K=Sc?)ogYu~+u3?HjnBm&So(b09U@UM z{w^ISNwruk(BnLiFE>E@WM80vwYl(oVf#bV0gC5CJ7a&ooxegK8%H2!u=kz2=j&|w zMODJlIvyvx{03vUR>r4uyAMRG^mP94l0b6QHV(}G&YwC_qG&$-b#HPqJMk=Attyre z%qN?VC)TA;fQjVg?CF!)-l-lJi;Qu0fl>BN*7%ct&v#KJO-}Ksf2KN*o?be@#A(+` zBMOU^=r1yWCf1L?{ob?0Bmghy2YlMf&v|A4*XCrMD?FBO~KtAGW7&1Y$=BmQW? zt+!-M=L@8{>FEQZKp#eN$)INnLO;Ab;i(_?j*40H5;b|BSwM%$R1`ljKKmTiDzFd{}z~w z*L8|N$adXJC*W`VtFwoWH&+{7m$5OOzj*XU_(?Abh%~;j3jSp5YI4Y~8T<1nTj&|& zT2(CH&c%y5JNe}FjTm#pGMzE|$ZucUp4o)cCyowYq7R?OEBB+fGk0Enu*7O3kn)@G z=lacCM-mdRtd6Ft$ASMdt>vA!zn}lOWpeTSna2091HFX}Ngp~x=O=-@IlFfFh_1pq zc(WNgYRDjPEM+(aRIMRfqgvK4XGf=!-}RlLLO*m`(;6N{I|(OMwf1`}+pu+8BQ?_# zCy#wyVty_=xiTNM*t;0~&W(~3t$+LWo$1cH0RuiaMvI;EyCy(mB=h`is^h2n2|zu@;UpgbYDG{lqv`oeXGRf=`|8-;TloSzw(d?|0@d?_ z0LZF~`Tq7t4s_-)P9y$_X5vdSEACd2FX;g7n(QTnl=>&u>b6edjgER5udxqY}oAV zAv&$^3@$c;=268lV?1SYW8CMOP5Hn1%DN78p@^W2qvR9Qwd9Gf=oj!?%=IF_U`cU_ zVj79Aq&+>Kj7P(l;bXFqHWCh7>R52;u=h`Wtn^NH-@~F_|H+?7$4Q3359uj|h4i~ZCj2}2 zC_haBlUPL}cirQ<=bm>C_S8S{H@bMloB5xcO?AH2A>ayg&E$t{#Ku< z7%kqVixhxNlSTO^(c)S0*Y>(Q=6CPvKBf!N3XP3oD^BVkouK$#LV#Y7gxk4yUkSV8 z17mf8@f&ZyQK$a3){|xF0iz>}uav$R>&tJBaR+}4tT*3M@t>rvSXUB&uLlEmZy9~h z2ej*l#a5*!2S#J`v#w6|fh7U@-z$b6iH_cj$2*>Hb$q|+Pu3uw5L=H6a^vSDw3YT_ z`mlA;L3}ewk}Qmnbh_VuP6qgPPExlpnvIwAbF{%oP-wlV1w*N^BWVAO=T>X1X5|w{o_CW!@(qPzVYT{|AE*0 zeA)K?&g^gsb~&ZqzZ?n53mAF--mNY{*)hOA^O225*|BSH#=(uzK*p)lxh4>jQEI^{ z3&2J#xc#0pg?Y77U_Qk9j>`#h6z`TXx^U+71536BerXNCeT=@~ z{dW60Th)n*A&S_D2Ni6bt*K+wad&W>35hasj! zN8gEz{fha0W_*Z937i7{`b+Qdi(^JG7WF-f1_?n461Y=L0>_T zDmo76_U+w*&9OaLqO;34 zIR4f>#;C;4MiCgpvvB=Nf48Dwq{^Tit@{}+t*B-&QNo0GX@(yFKQ8#{>{U7)+`o77 z;U}MVuANkTwK(Dp2ROO|jYhJlmFf(M{fD9OJcr4-ln9_O9yDiJg@g-EtxY8e-eQ~? zIW0v%P(jdC3AOtS5&)3X`RrzM!=mAnhY{0z3gpoN$nTO#2`+rGWO2cy3&x^7hj^Op zt<{Aw?DVvRZmqImZf@7)OomYl>|VWlZSuSS_@7Mv_NPCc z96x!owy#|Qb_GUKbA9eswEgSUkW-p#)M*H*Pl5Kv`89F)4Lc`B0cFo^6 z8sw)x{hN61{^UR{>FcT{`ZvG-`(uQYI}kR_Hil-~mOz|^5@owH+?z6*weJN0UpRld z_O{bG&{aIdmU5z|K~_>jLNkfQ^D1yfD}%K@mE^k$NV)mBAX>Sax0KQ7t@<> zygNA-PhGimZs;2PIVv-fgVg@83G@@)KQh$c>bvnDqTJbPf13;VJ9^~sBB12z-q0+MVO`4}SgqA-{rS0IZ5l)eECi zuYF)%2^cbU<4RQ;>4}}&H;!OeJJ`a_X_Z)tZG5&@_4#Qa&cUIBhF%>BziE>1&Yf#r zV6uAh=9}-O0}E>1J*;m5)w$E( z4v-@|Il$HIk|P4(2uKiQw4AqJGgwDOcN-Y{>f6KT$q~1nnY4UoeaZZ;; z;HQe;CEq{)`A-5GtJ+ETY-sOVZv|EKglb-ZAHPSEgZ-qpp$Pavm6(0dwdm#i^7m_J zkABf~B#+xF=bMB6q;puYDxI4>6tHX#peHMw%neH#$J4_(qqCc4tL!>GwM%Vk^XAzR z=vvc>3IVQW2LWAAYgZBcI0GZ7?0fhkt#Nt@ws8(QvH=)6HPDoe#3KNaC{@5Z<@Eua z3?Qwac0Q})x8L~DuM9x?%fm-{o=e6AB(+fCs`lx9Y}2aMQ-0CXY5l{ojuy@iP0W`E zj^Jl9#y0xCAkSsBhYr3KV0^IknVFAIrXXq`8O00cPqV+9O8~N`Cx20KKs<_1wGC@M zR$-O0#j$x$t&y5kvz~gJubW}sP*1mRqJjEBRffE4dtk&WlyXwS!os#g6AMBTG<=j3>Sl=+~l?)l2r>|g6(v;2Pl{zLg*s#!}; zMKb^qpLX-6Ey;S-?-Kzs+69ax-x675;H%?bRat#ITlfA5u7TToc5JJy_iQrsvLcn{ zn3e?K%c|;EWet2r8}>^usgg?Kirii=I0Nd47wF*SDx+@L8rZyky&w$v5A8HNcCd$t>H_ zZia_gw7gK*$0*GCp|IiQr zOS%|YuyrP|G^b#6-$h6M41i72R7nm1W=A|ZKfikb7xqq1k`)zue)Y(~f74(b@H@35 z^ydIndyfAlnM20ecI%n)=;LvI&eJHzSf$TMd@JVYjQg`Bsf$x3v0WtNF37<%J#f#y17qI!%Fg@pxij%{chdweJOx(v&M!Xr zt1+(S{a^jf#^f|NKilU{5hDiAUp$lTt4g!a@zd6>R*|}>&baRON~bHDVNWDd)&($6 zbszATpAm3Sa$rk9jzr7=8_7U?j21|AbK&aHH9E4%1l_b+0YyAu42Z6&;dgh2Y@hSS zr{nwWUz~z(=qf}X-9$bYjU+he<>8woyPl(q!5swm0mjh?pgxkm?GJ_^w(@IKeD-}d z(D&(2_G_F$pJhv8uCW(37Zdc6L3yhIq``z(E4{R&eGYSXTuLF>NcabOXkF1MF zf#c*r)wz)xWK!qt3%a!LU96B{MPR*g3`x<@uVmfDg_38IR2w&U@%H?lWT4R!=@{vu z8_i%>Jn*oB4QsxAJH6dMMu*ThKy%fMr;mL#fKT$bvdU_7UWt!853wLHl8iWS{-R`> z^If$z-?4FVfM#6^3cNNY`**8$M>}$-g8X^1&E83BkyB@{z(*n%_^DWFHXzPj51VK5 z2d@HG$zk2^=^{xwyaPYFb}3*zeF-yAun6Y?bm7%bE&i*6Kf7hD%dH#Nqequ5=VPKf zTZE^EF75a3-(edikR=I>Sz>qZm0-Qmc`jYFti=4T-g&Za)GnEMRIRnfXx@X{tC(8HNZ3aDiA`lEC2{ecj!ybPyh`V< z=s7wc;To`yJJ(d*=gU{&%x6+njFy0Cw&q&+8XH5d!Ty%IHol?MUO#YeR9J|Pq26%XSfg{um{=x92My_hE7>6}5D@hq~99`qL< z*s~O9==z4WptI+(v+PvbVSL#>9|Wcj*-w{{6_-4&tjJd4M6CU6kpcpH2uJGY5(Pkb zcD%MQ%3E@1bsV--teJ48V0EKSSbC!sK~l zq3AZ9@cb<68xL;T_IBmUxpB9_0GlbKzK@Tcj}AWtQNH8ZFi*Ux14WV4pZ)0{j*c>8 zL^qY(V@J@2T>C6M2aVZbG27)!=O;(L{-U*(%uXi{JTBs|h)#HF==1nW1^LtH`6vwN zNf;)KOwrzJ2VNUz;_kKSI6e~s%-Szz0MJJVaztOv>JppZ8?D=JKX>`H;*S-pE4*qX z6A*k#W4bzB-#bQ9Fr7W+hGMq#kYbMq5BUUX&E$cNpj*-Vm8_aRF=94!no7^$ANttO z(NxhL8ZWEh*!jb8B?$45WDEX>jnO;k4`Mfxo1cu9_DOc^l^iTxQbH+yK{rJ*bK4b; zrnmrl^Yc4faKhKajvqZz72BC!-Yl%Dy)mv!dR{wtV0^|w0>`yE6SDgO$48HyD)6~A z*%1JAsR|Gk8JnBAs7i5h3hhRLH(-4ArAv((xE%0vW*8faZ0FA1jW?*WHl@nwu1-KH zjd9sXizSo@xvWZ0iU9DS0$!WJ(kjKZdddow9v?TdM!T^thst0-;qMGoPGZqf!O^~&ZxUmn5#(??at zWb7zxfP<_iB`FxhU;}JO5D3W1F3u5@1M%Bh%$QI|GmZ#;iq%qhgO8$r%A)U3LFnNl zeRwDk%E@o?3Hq8`HIYC*N~MqG6#L8w3_aLqdanTP;GWkf-#Petw>fVfhIU24+{*+Q z_>1XOhceKrah#`rIq0OTPqx;SUd#@7QUz@0(v`r@qk)3X0Z0cxur5If$HoX8v=od3 zOaz450SUvVs&lVhD`3e%HR56C&Ry|Xg3|YvZ3h@Ox`l#`4`w#+8P!`rPzuOD4>_6f zl)L@ry5xYPxI8+aJ%4_3Q{dZrDELS1@nz0|H4kC^mQH{-%I-!ah_9OL*tsn}Sl?)gXdDfOl8T<5?ZqmP7~UJ4v<&r{zPoehd~*G0 z1S#8QX2$uL}V@=Ag znh&YfS3s`%2*4nrCR0w9IK5}<;2;Y=^Ucxo&Dk;e>pZUps4Ucaxg{Fr^bf}`eczdJV6O$_pE-Me@U{8*W-pTgt=i}> zE3Z=XWSvCjcxOZ=AAj=sP|qb$AQEg01mhg26N#@#cyj{6^M4}axI$in2Of7Uw^CF>+^w!rwZ)4oR7># zUwVwbdr{Bf_;Z3V5`j6)16;!G>7|W<9Q&K^uCflVjF!9j$HmovX-;8JDmdEEy%}8$ zb^O#gpY0_|fK~V6sq5K8RrUw>zSCJIXMM+*3inK0xi;-`kPK5r>XJnBqXk+56m&CM zEVVc2;axzYLfR{gwhLWXh6dTF5*;`-j?^y*P-_zPpIk_smt8~hVTJ=JL`sU>I z*LHU))qw(#f~y-l2meVXe*H~%e%vz){YVb#-8vuc^bXa2J+kN1FB}X1Z%xi$pk^$R z@kCFWGxzXu0VT`S-wC?dL1%9Nz*jZ}fb+@*jH(dN?-`sP=S-IZ^ns3}tvlZ02e5Gf zmBq_@e!j&_m2N;L0q?ri{oC0A16tmYdzJa)GUbt20Hg}pV=_>Fk9ML@sHT9{x{iEJ2KF~oJ{BPw1l7F1Q5aIaFqt@<&$_R znvLf~S9aR|0Rn6*d*^-Vg&rr4AFE*DixG%jDM=;4wPX9Uk{74a4^@Zf4{fUhfb)1@ z|K8;4(&Y5Hv)Pg6aK^KEWNmT;XdIxYGwIpdve^mFH`zvoBy;RblA@IkdZf(y`P4U< zI=ZmVc zO?3c_%1>Z$XF?AxUIdg#pY&c%zx56pMIT^gKkk{WKN@&ezexwYF`2EB>_30-Rr?l{ z1XkUv(x?3j*8aX-cYMDGU?*RVF3t`Jz@`IEIQHZh5G5d1Ww)V9rQKDRqSf}fUBfnB z?ZPOwLKU!JRcpKoAk~c$sFHlTn}7)}K;n=CzK4I+IgL>eJ$qN6idnHbEfccP3x4@d??B<8>Yyu`__&Y&E}I+q<<|sK94qfTA@E z!1(sp3KE?czfnxo6q1ETLl!S zi|8+VOhSk6#*cfN9U3Da`n~%^j2GFn@6ZU+`R&8Ejeh_=(*lHwbb5^TbJfRqnB0g% zrt+Sj+)J#HpJDxe0ST?~LEx`3E@tlo#b{vs!jBxfrnBaIIP+0}(|7U4is<-sNxWCm zLn7L{R0+~YE;Z+OINK?>@hV`P?i2l{x>B$k@221RsGgzvj=v75T(&%)Ct3mKB+V2? zIYUV&J|#bpTv@E=&_DQe01(k?R2#P-e!jEAV}K9I*D>azKdYt%{1Z~iO(S3YqSsX1 zcn3bY^WbJQEJ@Q|$4Cd@K(@nq553WQ?dxH@EOE2E#MP$F+ZsLhP9VTs<8wYrMoVfr z_s&bjym|BN^v12pdGA}+8tyM#o_zAlpG@|={=GoPtz#y+u>j=x+_@9u+;vJG{raon zJF><02B=j`ua~?u3MnAR znWr~Z@>?^RqcdMTnt~_ZsldqCC;C$X$6Oe~)#PtQejk9{e#F|U7eD#r=M@}W7=HA< zWDbwqdwyqf?##(_&fNgUMvUl0O-95@c=M$y(R_TsF#kkl>PY%$sL7rED<-l%=X160 z02de2pU!}db8mz05&KZU!3Mau!{vs6&2<|}`e%z=0=X)lQMpT(K7U>}OKSw!NUWT= zaAxQfBN|m7A9&;K$%}hFmoFP}#8hI0v6$#D-Z7R%Ar@b6R4E7KJ?*YJ(*ibD9D%M1 z7Wf=D(w#t9F+&e*&(|l{vti-@#8)MHwozhy?tS|XbQa=2@|kT&rtp~aGbZZHnG+r{ znF*j?+rMs8PNc!g(-4%wc^&!Qg`B#|Nh&W(HuU8EALIwmC1(c@kPQ2gq3*>z(d`IIgx_B`>= zk~)d$kr;fem5e@G7T6p9AW<{?sn%@tuq5A;c<=7r8-3Qj84`eK&>Gs)C_Kx5hj~cY zEPfi+(f)wTiUlJ3IOl0sQF^AcaL3G(Vv2DQZDU&%KLWq$e!QmJ048ZW+l7zfH0B%5$zYo0|z^{mb-|Nir zHu+$;2EZCG`G*dn7oLU3@%`M(G|rd5+4;algl6=s0?!Q_W=D)eCm4@>r&t)4r}OZp zeYXbPq2n%$_D7E@7#1h6VJD6qX>TR8`?KP*ne98r`V?NUebz>n>>aN8b)$rilz4N& z=*}()zYu<~V)^Ss|L6wsc}W94k{Fy$ofYvAe~!ktU%>R( z>!R#yGE6Ue79PVZ_TiUq_MypIub+^8zd`5m?w&ZMsdy_p(RokpSkLJ@?AY*=hMmX{ zSQG%BO+Xt@8T)ar$y!I{1^q8RRosz(Jf>W+@wq-v21g5f~_$u30<;b3W z;{!(ye-lV?ZU6$ib^==h3=?n{5i!&P1B@gX>@Oyl1DGyeoM!M|1WIkM>c#xnn*qTL z;HrRPASuP|_S37^Zcol$_;6$>0Xy%$_x`9>TODAwVP=MA8i%RXf>K5lgtqBSZ--wi zQQ6~-GckEU{dO(mD{B*KUD{1n=4?16!Nro48enn^WRK`>3%>i{Rz~Ss2Dg^bgvl-C z>vO;dpqHTEkJ{CVreh`p@Sc+%%F&DEs^wLO<}En^5odz?aG2v=qhjt8#DV(psWIl13R z7e);j%+cDG;srhsRz`nj^GtxR*>)?dlHESQmXALBJbI?2FjEY)D8oY0QZ)FGa(Y>& zjdRMFG8BSyjLbpz_h^P`2|=5~o_^;Ee1u*F-}IN_t^{R4M%GFTx%fAvXm_(CHQD^{A>n>Q~K>css=`OG{*}V zB$!fJLGac!bbo*>37Wb3HZ0>u8TtOHlgE1hw6&f=(Q@&qN=o3~p)S9;o+EI+j4c30 zB?hGhASB?|uH+cTx983$5Ah;pnMxeSNCw>VUsX|%J#c}noYrk^Uh7l3)~iyZkM{k} zB}KNakwpg&?3-M?(uEE2gYN)z1b>vO43|tb$KqvZ{e?Out`>-dJ|wJFr*`&&L1P{m ze@2uegMQ9i0PJ0=R8_w}3ncLu-tp+Ai&O)L+MiJ`_`o?~0A~QmQ(blb{Fwsl9E`rF zo$cOBmum=6fs|%AMbkddh^Qky(;T2w+Ui8uD_+!c)* z+rpuZcU4+nzRFm{8^Pk7FJnHOyGkas+S_-`nU-aK^I)w^@o#2rR4iR=ZzoP1PcD~C z{^fu5uP49qYyV(WeRC4V#bxOTYgf5{t@WxN7^6#~kwC=-Gy>!?i*)VkE^OI9&g^nP znNboW;j?Oe^T7^JbyF1&9H_ZHd&k+`2<*_p$MA4Y1l|id{->)%HUDLJ(e`{* zQYw=GjJi9fGrN=LK!_{XZlwFmxOUzgKHwsU|6KcfLEm-AOxdlf|5nyT!q}_YyxmA1 zS$~O($9?ZsBUr$|KmNgcoyp5#sMfdMvuDz;*_6Xaj}1_a@q7I8N%QL){Sn9@sO(s> z2~{!XyphL4hno9Zl?xD+q2%P5`MJ47*|d6gapPC!yYysN<5#}yyaa8NK+l|?_GHCN zbA0TDF&+V2e=l%Yo;*p$3eM^4i}$aj|8mH0mTAu>XrCN2&)d_?%;w4a-)%%$d%bsW zRqtP4<;ukX`!ekDAs*SdzHD|n{Fk48()q=E>99NToSD8H9^jQ<=78d@4?q4ep6~LW z+U03wdgykQXCuj&{HcDo?x*R!mfpXDD#33=N`F)7f)bATbKpVpe*XN4F^g|Yc6MKM zxgH2`|K7F9v2SkGfpIv#F0t6N%|I4#a(+C66F52vq8D8yuoxBdozV!Py|dpv$bnta z8qoE*&Y}1guegMABxd^_ePfOG!3Ol<{%r%ji>)3%m!TZ*&VHv`{9N_QqrkuM_q~Iz z8rCY_dzQm{`pnUIsg~U51r6y&`=$Tb7J>cUdpF~&bVoEZIzf-(AsP75kAFD8=ubcTML_35 zeptzh99{{Sm9>SVD;w`@C3(;TfbQMH59l}6pi0YUfx_$@8H+$^!0b!Dmi4{t4DQ@p z7~L9zD(lr={H*g6L_B`Nk#39qp8#!H)xGQ10*#yT*+?eA8nEr%@BMK2o&p}D>A1|- zuV3=|WJXnMzEZkmbdvSAoNX0!e2oz-bQaEU3jRdf_M#PjZL-Y)|N6wS_G0C#0&k4~ zppW#qbV_TpKkM}UMfoSrc?uRztGxJz_(GsDK&Z}Jf{yP4pa!6Cnwjl8@kRPVQq4SK zHdX*e?&%#Z&(C|7YBTdaRXy_a02z|~Dqvms%0AI&*8{wPz{?j^L02;9$k8M1(zyjT zM31fWbsXiV(yw%Te>H5_IY|N-+%O4@LZ82MOW%8Z3zvpm!N4`@(2O2EIbBJl|ma4b8k{{y|!E~PL zE1f+jnnS-XP-s(hJALwadns@%NC89wQ~5tTcFfl;c09YfWwNz`0`URxLX|VHUaPY% zyNAj3nUh!AS3D3uI70w4adNi!6tDX=0@I-ro$C~Q1~?BuE}_S}tMl1zrN`M~ff$=+ zbO2z-SSQ&mZNm74PjKbJ`Q8~{L>r)nSO*4yMMUm7Mt$(x8bOwga^ar%giesW z#%pw}>eZDM8`y)hvu2nPK7W}1J~Sj;7lULuvg^yK^3qn)z}PlTpu3+ulciNVk2NFx{iyuonQKGcb8@J z_b!|Y#L2f4cmo#9);;s&L@`OD!nW?IiiapS(QZ0(dOGr93TQbuykjl+2iP+Fk=`p_fgkYm{o+%z!FQ77 zUOZ%$?Y3WW#?a9Yw_o-P0CQ>l`n9T3+mH2hhQR&J>0uSor%Sx8-Lz%$-mku2HSw;= zAN{-kakBs5JIUMTiW$BMs1D%lkuHHhpWV5(;%d5PcCxG3_I4n)u>qtFz)d>bzUfVSo!!1OpZ4L{*OT~>-gmk4ljyQ))$9O? z`H?#+#89l`Of#y&GhxpH!9|e3TjvpJBh#TVt&p_8#-8FccU*W!`(eYiZb}xX< zct)eM%mkKDeG#otakY4;e-xxRS2QP^L)PL4x`5po0AKo1JWM_ncB#Oe%kG#P%?~Cw z&KCfEHL!hL=G)r0G*ZvHR;-$K8M#|$U9`eKt4kc%=Z5G_&(OcOd&d})7j5t=UxJUN z%f<|M=Z?m75`PNMeeuP|BO$jfIxCccFPu1WB;9>w_z@%K%g5$Nv&+teT@}amxbbe_ zo}xIi3!|cwZxu6<$t}ee3L#V@x8UgB8Ah*r_q*scz?CiJhi=+9TgU0{Ntj_Vx#Uv* z%1bgHZx<`h^%um@SRutwHyZs%53>C#WzpWA=|SV>_#0E8zG)5s)@Y)tc_sUi{F>8h` z;6<@BT?qtdbLrp*`ELA270Skk&{yP_U8HA$<0_49_(E~D#LyJnP5#q+Z2Zt?@e%*) zWra%1i^)Bk&&=Pn2j}iCLH66fK;y?%Kf@rFcV2iEjpz~fZD2ffXSPRt;&S(a0d+=G zeaNRPDca|$ZpChRY)!JT{8{=g`;E7zzJ7jFydnl1g$8|>b=QRe06+jqL_t);U3fZ( z#+k=Ie3>N=V zn2?DZUwr!EZ&nOP*93;Vw(3+{b-bf*5fAZw`Nc=S94}cD1HYEzGaHzETAXl||3=hB?07@XAu>d=E zWQ=w-M{~>OW-Qe{pFpq9;Mp_*#Q+?5)xwWsUX`5BZ^g6#`h-jXE6AFw5{3T<%t088 z3m_H1Gj0z9QYkrr^rj4^Y}eI{ThtiDn@N~QFp)qn)PA+B&+chmoP>KRmOuFSf0iM; zTcr?3j}tMSXT{O{?mOQtP`EVcpmlDh?Ci&%emax{Wpn+;^`0Yxw{igFKv_UD*Mos~ z%j{EYjbR+%mYJ<1a1q3D&DW<8rtW2~`kgm9~ef6udkx z3Sxbxj*3N=gK{k}t)nnATHa8QEBrEBSyg0#yyZZ4Y;TmXgw>f#*+=uy=gcQd4nWcqw4 zp-(^mB%u02O2XO#oiaA6LmmhA7};|v2lmPpPTQ0QqphPebI}++v49#bbz5b zr!&o!HMZbkK-sG94mfr8+;AAQo5=nOLbN;LZSqISkZaFdS%uqg{ray~Wn+eO7g`1g zE}U(wOuQlBp+Gq-Dl5&c9|of{ae3P?n(>7FG6X6+_V3?6z)|lzc<^9O)T+tp#_q_1 z-c9dpZv=))s#C{LV8kCk_71JB8S zn0yI{fZpT^f6~QknkT1X(tKPFJ%hSXx6rj~0g`uBtKS|MlB!tS|JwdWu3YT0olBGV z-+goPkA9nsM zw$#C+8%dSQjey^@M3pdC;Zl?*;Vf-~h4+ zHZY1L^5E-PVH zi*?_7-klxfZveP~(MxhRBigVtL(irobW#nS7q8N-3%AVLF6cjdG}=F%{OlK6uLaWW zyw4lSMdwTb3qZ}39s=V&H2o);Fp6OM`2fSdpE!?S5?0P_tSLHj3<2dn>zV%j{~eV+ zCxGws*2pO*6H69PlZtfw{eTcHzx3T0Z_wJDuc`n^0qdiOAIC5A@ez5t8*SN}t=V@s zr|Vo%VezU`9k2=<_nb&S0!ayTvfPS?(}~B~onAZE#kozxckVgL?Z@}n8J$Nxto_qr zRhFT5`UGg!!^ZPkXf(zgB=_TXtCVxItl?BetKGR+bAC>HAK-n@22Rjx+vj!;e-j|9Qdw)h zDso^iSqB0xi^fiwpGCGL-ucrc836boI{)p5zZmZyG)l+wc?DMJt3y;WT7ZQ|A9fkq zmFDO!&5mu@G@Cqh*>d(q6&U|vL%R0=W9mMi?Yh$Zz8_?O$T??%nQW4sds?mWXhzb= zDhK(6OJ(`L^E;PHC6A@?NSf)HPSee1v%48U5+DIw02lFIaFKp}IHj*@w`dCRuZW@KIKr9Gld zIyR`p4>klxRpy)wPAx0Y|Kh^sVfT#yQ@R3>2G}0ijy*3HAGFR<)RK(Av=<|Lyw5rD ztC}wv`Yc<^zmk6F+-=R~@%%&9&LrpZz+S8(* zc)SWEmq9z^P4j5T!{KM=H`{UDimtN`i;GvIz51bNyJ@ed4S_Yc24t1{lR+v6$p94< z_J+(FS+rfdI=b7`p39cC_A*dMsr(o#e=4vnn^9oj3r{#xywf2Yn=z-${w9clw zovczdF#;rf4zjSlvgaFm|3!--{<5*~P4EnvHqM`kAJ|51^q4<6D0^JO8^9lr3Pf%% zSh$#7r1in}-TR}@+J(d~4q6q!+)*2#W1Z{4<|SW7qEvF~qu=~|$krz>Jq?%GPjLha zUkjMJIM$Agla#0Om3^ochQQML{BV`9)|eh$9heQI9(HiH$NJ5In~lv`8USQ-Sf3u( z;sDSmfU^+&0d=+%R2woR{|w)1m4@DB-vaS=>^w|n6YbOZV(9|vz{b6vS~wxGkd z#rv|^k005lq^=;esjzde?*w1P~A~6N{ zxTQ0fV%unVI>%(_`t-7S3_r7H@EhrAZCTh7s`csoQNdfwMbBF-h;gR_J9j0I{_gj` zD*3XuI?Zp>rX9m4CfBSLI;T&Ej_$q59+_+y@?mTAFxi(AW^pfI9p9@u*|Mc-sSsRYT9IU7A#AM*KX`pG`{nePOrnG#B?*4xqn-pMmnHc!KI z^kbKJ#hz5vPBvKi`?ujhOUD}8L#>`}-^%anUJ@Q*GZIg&3p`-R$aJ3MDKu{{*zs%( zra#&<9<7`EV8~^Oov;uskH^~gy}sDB*`R$#(#i`xZG0+LT{m!*=#mWvboZGa|9tO_ zn+s~!51gJuBRwh8GD_+X{s%g%G~gSsv*;`|J;2nyLtfwk?S(dOa<*5vM|>ee-Q4L@ zYw2YVBmz1oPNE9IM(&cIS|OP@`v>^HymJuu95|A1RrPS~4{zUwtH9Rk)t+qLvoD(< z`eDC3?tpaq+xkn+MBmm0kCA=s8Nb_m!3%cb;={{z_xPfU*Ob(I zcJB0;r@5=Tbx%G#eqv7=Gj;i}zU_i37?2Lw7%!~}KN_~Spha^X?PB9;*d=?x{_faY zaCo~aq7IL3J)b1==c{5(;IvMCuXVFt=In58)g{Bf37C^y<~8kgeAmx(*x0|uv@9RK z-5l#(%vQ_r87ObHT|iYgFsh-ZH!H!u;*o?tJ{aw*$%l{+GX) zZdBw5c8%w~naq!>)a>tn8s25DfJoA{qUVQ=XV}eNHQed-4>cv=NbL zZg!iX{WV7vj=mNyTOGd$acmE=D9`^@kF|hy(C0a6wUUtldkt=#DM=dRJl|6B4@CPC1D( z1VBB)1A0H2gu-n-bJ-ytzK2yIISS zkJ?A$18y|7I09hUlv{UOZOpx*8Aii24|A*xNff#A zBRZS$j9wmhU&7IPU|j#*xqrKC=4#G@1B1yj zQuNwJ7sFT|caPySL}jxs?R~w-7`hHnl>I-(ACJgTymzk*;v5hZEmFLygBiO~`<390 z{*;>ogs&AGxq7WijU=4jlN8gHtBbR%OSKreoDrrsa|g!Us(h+7SeHVRX}*1H<;Y`f zomgC^|B682OII8$nsRKu3@N>A31tG!>D9(Jp3^vhMvRN)Wi8?@28VueFLmvsNAX`q zH`*ak^vwo>oIN`qO+QXX0AXb)Hs+qQB4A|K_AP_%7pjyP8K&(GE#qKLV1Tmh<0n2( zc1-=$C@hLluj{J|i$ie<00`!N`<=H7P+gs!J#%Ju_}J??9RTZ9wIn$3?@r(>( zz#E>p77vh9)^m5D{Ff(B)T-j-bnZZV_~W1a#q0+^`El#EdaVEU&LL2CEy#km7(Uh# zzj!{nl~sJTYy)izE}lDEmfxw_N1q&TkJgnH{qpSa;Ul$(DvKrsEo%0}I{XLj|%jvR#DvoF8= zGTKxcDtl9IOwhBKm6k7v!i>r&3^Q~?~knZx4-?Jv3|e!^>2pK-`raCGIBR}GJCS(ZvErP zeZ1n}ULgOLEj!9m++JYn-0XDDF@V(ia{|Z2JUD`s}Nd$7gT7{;dqn4vQ@-#vz&HtRCh^+b_N1fBnHn1Ms}FQ2*A}i=1Lyu8gkk z3JSz;fKYN5KRl`Ic4W=v_-@FsW<+b7fJZN-hacXZWHvhW9s`ll<+4ogm z)(5@a>`zNJ95DbQ;1EdC#4@fX$9bH?(HNH~v20uziZP<45WVLhMp?cO?a5h|Ej;>3;OjT6V^96~Y z2UZv2p52Q!-uTXUXW#$P4~7$ZFJoKK==x%345UAP^~=AXef;qsYm-swW%61@n}d=# zVlNf+1@dxoRIxk=?BuWx9c%rg_wh29)^zP!z&6^kWc}C24u?js^r6X;&oF*LRy06Q z00#66^+jv1@<70bKED@8AL$g#Q6V+iAs6P)jsOO`PYWnx##8tZt&UPm`_Z~+SHc-m zhHeDLhnBo@FIxO|9%vT7cAG)pcM7lwh{){L8qJ28gXi~!F~*hjbpRhQzAYa6Xg93M zj$NH;k*#>C%(+V?SWE!+liAzfeQ)SQ_Q2hE9w;n$%x1H`_H!g{aG;vI-MpP_R8o6A z=l*H|{q4yfc#}Z-;=)1)lI@sirYWi7>g^6-P3BeZ87B=p=W;q+rN{alcd|hy4?v8a z%a)XhWvzhd#?OgGljP^R9Qb=nvumPt0E^%w`tuu536qscMu*GWphqPsTrMaH@WcQ3JrW*s8DLJ1wLKX8*Svg=PS7V= zhUd>0*xVVQyZ6B1&Xya&~*q8prBPu4@_wLWX z*dDC_OzgYTaP1oSD%K5v3@joy%)`2qiLyHY<=V*zT4+B7KvN1Y*swajC3Eo_y}&7S z9t@jY6)3xn92qh``s0w2zsBxGcX>X2MfQ#?w|IzMN+zkUBp(1~Put@)t2YfkqW>Mx z2t)?f80u#yz8*ZOmBJeVj;hqOLNh0ytKtQ+3N|_GNk+8bF}-S!WrICimRwIJlHbE= z?tAp9_3-;M+4_V;Yf7dL`#R9vX95VGC0Pkgx0Nm%w%o8=+ZXonXf;6=jlG^O0yubS z4FbH?a!%F5u!~wpeV*x;EA@%KDnQbI?Q-kqe305D(#P@k<@veUm8ECn+k4qghliZH zku7-g)87tT<5>seUY`GIc53cqW0~X>5rq~`}n*Y582_+|PgY^LVF{^qwWD zWZ31&6fHBz6nt(~04*~A1fcHT)rWm)Ni^3+H8$`K&ssP10_cjM`jSAQs@nkW>=(%l zf*@l|t(U+MzTwkL)?h#LLCIPBw3JiYYa|jh_G#^%=c8&w5=j|;>+`+i3DwT@r2BQ@ z7@zOT^mN^uZ)7V4_GlCH{P~8l@0ZGy1R!n-g!Lo<@UDFJ`T280Z}U&k?uOO^uQ`-@ zsv3DxyMTkEl4t(Hx!Fr)iJKGIef0G=#+o@~^IH44b9c#a@t1(rXaUun*H@)?tE%^} zYX3xDscZr!u|L@8T0TuxV0c~vmnz)*qqVEdQ{eZbY%^nbE(HB_Gr2BPcY6WQ+nq1Q zj#`z?s=8-syrd!qfG3!-DH(n(+xT{NtE|N4y0`nK zAB`*e=6er$(tFyo+6(@ZV5a*~Wdb>R#j$OH=4VOOM9r)T$3kxOaCEG^TU~}7FOBR{BKJTmjQH>mM-ZizJyaQir|Gd_E4D{Zv~R-O>ET;p z5v$`-Yb7X2cEP~#{l;|AhIIGC>|Cvj$!2}eJHh-PGwM>DGI zd_LOxw5KcUH$Hg0YmBL{k9D%YXcG3eG`>JXs_wn$dx@M9Dd`$PYP138(so8*)Nl^^!DVtxiXSSr|wayw?ahncJ1{aK39}jH}SGd!D z&=rHH8!)<_T9fEZA>i@6RzM}ybRRyj_DDbbuYWcB)_Xr3XEU&+jQe)9!yboO0-Uvs zdNymX*;)@ zv}7Y#W#>7PUu5vGO?!WNazLQ9o$QMCfKKdlV7`}U8;AgzF`6^~?)BG>%|810(}Cl% zL3fmFap>UT!Kcr&QP!s~pSK?mT8GQ$PxpR)L$-yW#^UuV{=&P?hcPX!-SYBXF&iOi zk!plah}Mlj#t@q_a`p^Ro>G4ra7dAD3SdH594$ZB|H&NhWg!KItF{!a3 zCP3~epX*v6p@Rm^4?!@h30+`4@P_~bzKzVedX(?ly{~z8LVOB(>haqFfkjVcg8@t( zVDN0M^6wa=`ymj->JLWL3v!c)Y(&MVl~n;m-4c+*$paW&4ZOT@9g#oJc_=d~s=MAE z4Tmd138eO!GTeJP2Qs2LH5cp0eEr7q*=w)A*4lND;Mucff?jOx_m8>DLeeXKzH+wT z|8XK_ioo1d8}wtwI`a4L1c*#g-HA{*`!bC<&JJ)O00>tKm4iZHqFtxtQn&ydG^Pws za#4-#x>5)pbGCBUAe*Q({O%X;LZRr@FNF^b`D0esu=Y^2EU6$*} z8jV(($^b>@Tl2)~P%Hz$>bGmxugvzBE%Ci~U!T4D#v3`g3mwGw$DtG{y3K)h6t8Hz z(limzA+Vd1Ea|7wqo9#ft%ot+n%NKKr^XTbBa0DQev*R=ktqdg^24+^;5N=R*5R_S66A;;P}0J%J8 zxRQ-yUH#=AWuOUMDMdQhJeB|f8P45r-KKR_d>oveI{j6E@~5+1`woo*moC+ldt>_` z!*WLlgE`#u+_{U*vFwECnDcrg;r=iIdHr&Mhq60%m%aAtYi|!a0IrFyDiwUG_1hT` zZd9C3BqED*UlGZ#zWSD1q`&;??7biSFem&-*RCwH{Oq8chmVWg+RJEb zR3^l)vPbkUi~xrLg^xY+AAkF+VO#@L&GB}Dm8oS&_m zw$Bb9JKAA9dxnhabmv-d98XrR9nSFpi@RRH0!VWqeel)E`d60?vtsr7=6_^Vy?lNC ze2O=@noQTGg5tx&|Kb1n>FlS!{9W^WI(zk3|Je6$jQzMPb1j-UKYwmGDyoI{?%kIG z$>;@gL>2Mh{-ZC?mbGqrd6K{WT!}tq^eGQ?fa;lg+d9_(z1T-`erX1&DkP4~2!fUg zaQMhu8Az+6o#XLKa=CqGsNz@IW83N#dc8I=>#BOXT_wVrRa2?zz2vXHrEetrfcYFt z)dvDqDg|CjH|WFsgCBl>_Vb_peF|I)u`G)?wH=O3AAfP@WP1ZlNHUOH0B*g?`>v{`VPu3tlWW;6%*ZQYD%oa-ik+(p=EWLj2s>U%g$72rn zX9N6ynH+t_xMZ!4^8&haL+wxB>4vt}J%)qc6T57DPWRGfbP<_203!mu7d{4DE?-`T zNHp#bU~f2d-AmSk?~bc`0C{4V<{_Y(m!|B6=ES(0=VTz#rdjUSrLSDu9cVg;$N86E2%~|Ly~| z3_3hyy#u(eTnd=5SKX8RRu=rrpZ&KgH0EZ%|M;`63z+V+0VKCa0j z`x}nE`Pz4{w$W~+!ZgW@lFk;OgxR1vsngYICCjpT0?Y`NZ`US3vH@8`Bw@iCQ~1GUW&aDlJZt}VFTUbBe-U&P4HWU>rzEo}g5!wHS% z*ll_Mw~}K`Uw-|~WaUdaRkg1xu>QM${9Q7zEW~uq)oeuNtha$FIm4X$+wJ*^9M)yY z;&x~p5~o)$S{o(d$;vaQzKk}?a%#Qt*;Gw}9^yND+1 zkJ&eN9RPvj$uT4!y{3#uy^cGhg>9E!vM$yLkpfQ!5EAW@X%eFz3>&&b2=88OYUD>P zC8AZ;0j|gAiH!t8w9=ScnIRY0F6i8P(d*94K`(4LZExt)7k(wXl5I`y0qaJ8?&tZO z->fwqY<^_Nczwg?&+&6>g>Uh|)J(>B zWJKdPV+Oi6g)&n2&yE~=CA;|-C1ac(AfL+Vxl$a(ygIi<2g&Z&SvAY0nT>cAy<;|ekZ&6yY_Hv0MxAj>S0^ljaJ$> z)e#p1X|#ru@i%|rY{?5#R;BhzXa-0obC+&mw|SoA5bKT(*RL(1r^Exav>`AVpn2lt z$w3FNmvOze_s|#~1Ef5SPE_&k-uH6r7*JH8#>*bu3jjTG?5$BLf2naf>{W6Qd!7Ba zZe4yyG;${$3oyak`wksxY|gg2(H-)+leuhUfUq_#cy#CPY^>In?p$Bxp>gn?9Q?Vh ztYvNCw0prvf@@ll$S>G);OIz7S-WnsizHdKP=UNXE1H+lY7b@n-%Zars95V0ZFIEa zo85Xog1Q1^TRKN>ssIsSo#nrEo>9R?=Zh^~F1axlb7-Eitq7Q+PbJ&%={yghjs_e! z31C$1-X`>3nXb)+j3xi$l^}-Y18U@?-MBWCVki= z)6up6>NyXdPQe2^WZXAfGx`m^pZMaB;T@Ng$0aOT{r32LeAZ$&*Almq#m)slivo$x zDM9!2(s)P@Vh_=qTIi^LkrAwY9Gec7bEozTYqQ~x9C>*pgkGp+qGxHzsa=|1B!9~M z7No_~GJna;eFb`Dpk9owjeS||xHbkD@7}Y2tS^~Mm$BznNswK14A7h07{R92wS^b~ z0Q9~+@O=lLxUqkI`m1PoWo>s34dB_?A!MM$B?(E+;}ImCD(sey3fLX_s{+~A7HS{X zz6rXqecVT~*Y12Fw2DvnHBNIhzDtW$#l%~%0plZ+bdF$q@FB?$e$47+1q|5u$@+~2 zNVlg4B~niI&SbM$Bj2|V#!Gjyt(FBi8^a|&8~?C*jJ9*hiIMcvI@lwXXo81pt9GaJ z$9LP?5g2alRLTfk$8VlPrmC_ZayL0KbWrc%$JJHaB8Sx~;8as&PddYPEW)s+hmVd0(@Wb0D{H*$Srpl!R8urMhc#i%iZ^#e2pWaf{JN$~I|Eh3I zw98%@>)n{2wdOB%juD1}WNWovw} zHkGQ;m;(swXRpH(!dLg*E*N2V4^cLH?FP{Rm61TmTe<5*5V(0 z`B*JHUXJOMZ74aE@%V7|i(meHcJ9o%K}6B{Y*!BjzR3=~(&pZeiM;er-I`OuQ1@Wj zZ%*m8=rR`(=A}!g#vz7JD?i%3ci(JdLj7Trywi1*EbwV#f)_nL+7(}FxBWa9`C5{OL93c>N93By& z(Rw1{(Oy6~@8NnY?+(18q^K!^sHpWxHkppdyfGZ z{X2}+@hM}qYchH{=7hJ*XM5WqVmzxzxMdu61ICCP;OMdCM2{h`yk;I?{ zMmlEPW(`|o2JN!;eoeyZdYQrmq4BJWj@OluOPGotnuFiZ&Cd;iMhQKyCpTU+cAzrB zsb$7!!P0nTOUhah&D*+tQ^rCEXnuY?<#TTUBb0`0XxR`9J_pefkfl^UgVh*!ibpq$d<*MKeje{@Jt6FE?m2t5?fzDsY5|3MdOT4U{(9t zHH6;wqTbg6(*NR5f1DF>Y4*t%Up9|qPjUt*^K*#}E3@DH=7ZS>WuISMm=0;#x@Ye&P*+4N zTIhWJ_1Cl457*DTc>;0P1_0|De&upe$VXRs|vQn0SY>$|D z(6RG1X7%)CKr)P!<38^GhhKgrxmadpfXS7z&-d&(RPbR>LB5k^v5;9+Jay*7vUKAo z$4}P({FSmD_orabCg&~3LY-CISXuRoIwy1eY7fTSD_ z%HM&!WQ;=;IV>t;WN!~ZCRy+#V~a3U&E&j-Cy(RxuKoL;{c;$lYXZ$2>;i0;RREA4 zS-IVl@wTM3G0Y?D0QS*FsVLx%@1-763=9jGm$UcY$qKf{-SE8sgaUNW${ zW#8spkkg|5MB}92WY>A7E5-B%tZSbRz7cfhV7ty9*+0A`;HHX1 zGR9YxjlMYlW`Itu1;<53?#2LJkGfbUxuCmcA?E<6+qUFT<}AOob^QSHIsYR7eGix^ zkQ|uPd>_YK0%A8i2=`3#<+|R!@wUwQkul!9-hTc4L9eJ?fJ$HU-RWLi;(Gdn-gw%+ z9VnaUt#5t5;OxP%&gM95Ci*48x;x*VT#S^WrRdN>q{~-M zKg&3^c1v5wP2F>{*Sc}n3Owpz`{O_T^Vxs?tA9WA%hnuK4)yJ1Jo`nPJY!h1Hk&`2 zLlOc|h)uITTS!mUyZ1V%Hl1Zmg9r0DR?YHR?&~c&WeV#*KeF+gtL#RypMN0xRrdDk zo=HZn2mo-OzSinGbET}&fXcO15-#mpphiO>bAzAJB~gJ!F+q`4N1GuC-kATp+M&Nc*yP@4k4X zN`PdB1RucVx|jA2zscBS+VNM=+VFAa0>qp5cYpFPXYYLXzZ<^k`GPj?_4N6p&Swd* zi$AR)`Xh$~VP((K`Ax@Ey4E${tVw(-gJ$qhYsMFp&F510k(rL4y(C%b53Rk?lrThR zJk3svW?Nr@d1oLEAS*r2eqJcJFQ~{aTA57Q)f!5yx)m60ZO1{1`G61d9r+#XA+mG? zJ$feHvufD{4x&egoC042Lrq4N4c$P_@$Z3vXwNxhlRZ&ex@}c+Mz6-Yn9q5zq!PdD z2fw;?KCu6Ax_^0K@znux3ksO4q_pARHK(Uf_`4HQRTEf5Sv^C%sfj@8N2n?{__6)&SMM5VEe5(@o{oK-@ZLMDsy|+ z{=utsGtd=%x0C}Q7pz#GeBgVU7r*P%KYlQL@%2?K(oehh9*F;LMORnGeWog-vZ4?G zYCx60K{vvSKve$huCB!!50mkNan=(cE|XSkB-!W#j749`68FgbYmM1~s)n=z>Co`g zr^8=14qKa#zjj$wlg60r0FtwFSKO;g<=WM*iHBPgvH&ptyhN$n;UmEJL&x51ega_0 z?`(VWQIGP6*&o0@{`aHoc{+msg+cjWuH{HF5bxX5wFN+LB^T%C zzADLY?`-wfwd0`Ar1rtg#Xxw)0sxuM6E9{l6|=%x_N`EHH5j@c6+C^cPreJB#l+Y1dQjyuTub5(U(DJ) zn{64iGkm}1%T~dUJ8Kz8R$g9Q81!*4{VO1Tpnx4+a;<&b61@#uu6=edv`7BYtNv9D zAPLJF((~iM?7pj~KiNi4(6_F)&-`{Rc3@QIHqATz(i&*V1<+<=T6e9%9u}a`-&yrL z{d%KB48bEl?%2?7!o~z{8Ye7)kMum*d-eXs622ss<^MLOffck*k_B>5EN{;!K`p#b zQY!5*WNRa11EWv+hu$Nze9n(p*!=kU7Kv)8||wQn0x}XY#mXJ zaXyL0Oo)wR4%Qz{FUc=oy|gp1lJ~%2@^Ei>g5dAEY-jC6(TX6xR=Q|w;}bo>!^hG` zbkfT9+S<71vi6=X0WMny{v5h;*5rox_iTqupE}h!4t;0y*3ARk@LlJtIRDSHuH_eB zs14IX`=xTkUXBO$%*lV5sv_u20X=&OAQwPb7QQ6OtXY?M+S+9o2&%3vu^4@gAV>6` zM;u1hdUQ`X0G;(>{YEE;_U?YV%XgpIS3KA-BD?-fK?C*yIz%u0*qz~%ufBY&B)C(Z z_4VuKxM6na$d*R3EWC5au&2&-V7pdCY&Qp!v$0m#+vL8=+2yaj^KOQ(USxqp5j|!Z z(L0+R!(htaee_{K(3ynebo#0i@f}67D7y;*xDNtGEc}7}^`Y9F^O3=b@jm_J_bpas zflXGq*QSIL0pK9QJ(VXOIec*T^%uuytInKGNG7xr9$UB7j-`#$#~a~I0B8nMi^3Q| zXb9f|#Cs7)AT-8`kVH|mf5FTvA~fLmS6_TQ`l##KxvMq=P7xPv-?8VwFgONC+NRFU z&9`A4)K-@3nNR*WJO0_{Ws|=)0RPJw7KablzMye!OQ{kr!+?*_?q8`VTXO=~1Kw6f zgd;lS=NO_}XN+`BW5e~bw+E2Z&s!>K+Fl0Ap+koL8I~rPIn#Z0Fw6FUy_+e>^XKLppKP_9o|LAt$oJoT zI|Dcp@7^-DF*oDsz`p${(E6GM9Gd671Oj85aWQxHT+f>NH_3opu}PoJ>)pFP)4R46 zeRn`d#&JYCcfNkVIZg+6MU0K%i!VMId)AT+$1d6)_q0Ys;FVoQ5b2A*O9@FrYyX~| zvlD0LGEgVRd=#xMNk|O=5pwb9L2DB2U0u99)}BF4L0YH&8Ny&FpCkx_krU>*B2J?m z#6_)#J!6CuRt;(>80$;vj7wT$W<8-Ze(x`W0OL2+Zoz5y)d|!Qv9op*<;YyDTuEt< zewcHWBi&4J>}1qc+U{^Iv^O#UqI=o;%6U181QNjp$TCKT=<1%W^f$L>0;+o^aQPp8 z`SaPE-+H5rf&+qy0AAVW=N+^7<0J%A_1%Rspi zE!mf=Rg`cD8E^V#>K`f4Bx6uHqV_WAcl$O5WD2A8_pkC^8PKHSv-VxokfX%lm;wtO zFtK{7uyDAPp5B~>o42%q%BXGa)@Ib>Pc%L^cWOjdUs{vVouCyIlnIRghjEybffO#q zlPWOQubNKJUd+)_zJKQ2*&K%D0m0uMRTr}1e)yArS|CCHyn<&LLqJMIaqN|2qooOD zrzBNoCb?=Hz;YRqcoShUemFw^?4SLM*+;+qsP9h>7Q+IN^r(AM#%t=$M!5h>Zwvv? z@v*<14UmjG96)j{ko-5l|4kX__2iF7DQ0DIPjeV|?fO;+aGZ#SClK`0FTR@n?vI}* zbCbjAfbYEbR(lj}x0ZKvK9z>fU;bhk1+slEWmujosQBS;Kb-A9baeJ7KmMmfj=V%K zq!>T^=+o}?boSPpM@P${1%`RD${aDDddw{CvTUg9T3a%}##tX+eS3sJ(gAn*Z6K z{mb;@`ZDED#hXGSwCqS!~LGi@(jBvcPstfK!%a;q*W~FpJ z@V7nh&yYXv-|ezdbId2u$bl+r3!?6bcgW0JWy1^f&`10C?i&W;`Lbdd)Empz5QODi z$Z%iX+H!iQfYPKlTt{EX6y%&g37Bn}hD=54;EjehA?y z<8v!Bz~lIrd>L6D?wo8ikFD!F0BX;{ao94*U108mkABzu${0;BYSIA z_LO#K`rm7#m21$jt!)Nza~QHQxq;Rl5Pbbwhwk(_uwblzb5$WkKRqb0#_2;79E%kN z<(8H;LWTpq;X z_LtEP#PyPqYHg>ndW34mfUWv{lI06!?dg{-;ORB!rh7K|XtCf&>xstf=aT1vh|Ld} zg5S^u`Ft~)cj%O70w}smW105#=`-1-B}F6`3_=e}>#BrzUAGzwb zR)jz>{cCsZ-dBM5m62THP(iv;5QHv3n^9c%e^UFF%J;k1`SbIAR`9#^+g`tT&$t?p z0W8!K!x)s-a}ejx&$VXpgkVd$U0_@f%Mp0#eQgNzN%pd@J#olSRXT!Zx0744K>D<;z>G%2M;jNmpD(C(BRQ6qaxv6SOHm)F_`;qwz z7f+20a(nzDONxz1X2=%j^tv_@Nfu8IvvCiAvwcQ`)b z_SE{(v#WEo1r)SKU=z9zI+0Cg%{FfCjEsDNmyf-c<2Gfw0*$ng)AmAnJejb(^6_e5WAM=4eXWA8jL`>(9%G6_j}}s;wj^jjZGN zlfKxR-oMj*Po4g{mKghIXV08U4tC&U>&kw)c;V~-{PvW&JLnuewsr$ZPM$99@Lv4R z_E}mhFZL(7tI~#T#KE<#Nc$eMuc-f`HX--j>Q8VS@0 zCg@4MzIom*(^X580UEZB=*fC+$QRtQMHTd{#FR4w6yT%E)E>S-L?Yu6a0UQ6_R8yl zCS+uw+c&y_3=v3Z0P#9@9@?+g zN0I`tTB4A>MzeG<**@fX^B}u-Y%625gT)0WmIrL(ee2#JyT2y{=C$q)auyh~hJvW% zsox)Uf&IqU=;L$f4U+tc>x5}oCTV{{0bP#m+ zc~Z5I;69+o9B)TMS_s~}t!fTXRjVAqbn6kl&z(9kYzIBu`SawO-Zj=9OzXbK2Q6$f6F@4%|}ZIkLUcf5S)P^uE1$ z`k3b8UzaEosEsC^b9H0!N_^b?T4zBUKEmDnR=mb$G566DF`9lMn`FeZdsI%yqIdZC zVl7bETlgPHZ7sOZKzGy)?Bs(0``8PkMQikd*U_Z+wT|Ei2so{&8doO-{;#%b7dnGx z_%ZFVxxLoGgZ5Di9{f9Onq&~LnOt@Lm>?G(+SA^iJAZcQf(A3zU*@;K0$bw3#S&rr zjY)$PaB#3t| z+srvzM@uwt9t~d_J|={2_-hWnJ8cSD$~JDiY?!6Xt*a_Xl>}1~&q&URuJ{-d z(3a$<;DZr(NH^kRhb*gBIvvKQ>Q1HFZ-4*0!JjJPRh7}to40Ko6<-qRRx~c_cDsO> zggf0P;AN{?eHd1HC<0WDe|aKfD5i>BY~szFY^^G0OD{E` zqS|FV%8FT^f&=CeaEzWar%ra=lG)pDyjgVe>Hy%CN@*qX`m4ug-~QhBB8-Q#k3aly zD3sUVd1qwDFf51COnEAw)+2g2pfTBO#Oglk+y!b-+%Pc?91a{)LUswfjdJ%9h!jQ*$oI!$Fs={~&&ws7LiEJs}VVxh8;8LYy_m&e+%-bI}?>37fTd z*WO{c&z-F|XM*Z>Ot5gRgBhAD!Q45c6wNhUahNag@y;KV*Hd*V#J8CRxIFgk;r@qyqL7On;Yo*Qe!!E;c`$HzaL z9XoO)L0sus*Aq000dEXm0OQ^md1r^|otZ1^Du?Hs79A1twlax!6tVhu|MuU`{`zmW zbU@dq!vJ?`I%dHLm}6vSwr=KiJHzAA`3EDz<+GDh8wcfBBU0KmQ(Ki5e=uX_UW{_9 z%uV#@p4M8=BH;88s7a~ON+r4^Kn6&W^&q&wP&Jn&tceu8=$EMfI3S?)X!pkN7?(g} zFu14hU|K}rnS>(6D3gw`e6*|}PPB04;^lZ{NrJ6|VnwJkXc&WxsP$!ray-cu8QvmT z_nZ6G#XHe;#%tpw+5zn7;F{ zC?aV;4y~i`6UfSX%;k21c4YeZKHy;0%B!=Z0K?te3Vz(2o%r&Lcvnl1GU>XBIllYu z8?#qmeQowZAnvJCX9wNO5~tvRAHW2MxdM0ytW#&t55a$6|DM^2e%4b>Pd-Wl;Iq0l zP}0hf#-HfXi(&$R-%1Wy(_I;Y03(rJzxTL|xo5N6348+1fikiJFSO=bvV8BmKOBy` zjClvC$Z~Md1cQcQphXh7w|(oD(NkPtWp5eP#(<~M*V}Ks9nHsU(JOEh%~(g-`0obt zE-ka>=&NtdUVrnAf*v`X0mPKEF$0V#`wjhzUKk16HclnvWZbzkr)QVW7v*jpIB${8 zcy6maXq{mO{9;5cseDO06#U5fkxj0;LsqcfedNTZY>w~# zDkJ#r?457D+jl#}Gw|X+{`KD{)9MG`{C9Nt<=nXfbQ!yVr~mLbKbw8<`{T1e{U<*N zP`Nhy=+iGN3!l}qvqR?!5-6*4(Cpm#jP#t*fBw(^au`E*?o6$o1TGe?R^plAf1y z25 zbFhlcQ|OD$^U%R}X0!HkKH0}0d|b80p1nIWR_|1yuy^)J%JzD_b+=ZjA=8`lwlZh* z?EL)5Lb`Y*!@T|A7`~L^T@&AYSo|583tVnSi_8e%>-0)yF%+U7nK7eB-f(hds%)q#?DdSP4uMT>KWGeiZcD*l zhLdvS#T*ZE${POYhwpZF#3!@+fkT4K*Kd|x7cZiBfv=nGr}b_0V~h+OXK%ce5#9ub z+(olP4yJ=-CX46~<2Ty&66ovACeEnwCT&UQ2i)7EzQ?H;PDnCROA8r_oULI1`cChU z^>m+Jpv8arVh4pLOU?nR=V+0jXbh&)`51ih@&Gwvg9r z0(2P5dlkGy+GUcGX3!+n+F(9c1#H!RT5URE3oY| zjy!#hHZPn%Q;;~2D_TMKY%GC7I$-baeZ%J2TsD@T)T`p*ZJYY6xtO=o-V2u#2lKoan&1XrR2N8?t>AZ$CI4SH^Gl!ae7cP2oiDumW!b3nMt9rVRQRpE30X(iRI z3;XG6b7#XiKZT=q7XB~WGt$`Hy)jmE7__~#iJkES*4?Q#OKl}2N zk7vhTeLcHkRfj#DDaf!Ud0M;04lXyhYth2f({r_Ve0TOzHuvRZFQ8K*#JSU_O0@ZS zjJq0*5mZ4x_8P6=N4B*Dk1N@%tK%!YiC4*>7h}7T{8KqeUJK+8-sYUQ5_r}1E8|CB z1*)+L0jeA>Z9o9aUK}R#P|fh5``k!&zV^mjv(L%Q0Ssh|^cflLoRk4JHqHSg_TA0x zpFrvHPd{qi3pn(<_2(y%S?F#!f3=U&-`m1!3uRm@vh4&?ZvuM@+K+wvhWMghoMD$9ef5o=*||`2U(e=mo$rwN*M`kjp)F7bV2WWu0Mc3a)}N@YwV7;A)58qBs*0yC8^qP0Mg|1UcY`hQ0>;Jn6&oK z(u3vaHJ$+7);fEjO(>$*g;=wW$w@0HLf|(q^uhvnPD>}DU)|b!WXRQL+XlPqY6iHsz zRbTl*pZz7+x;%Snbv)&}UUct+5{rg+1-+xY~$MOOa972U@F>r|K%SJ8;i|xv-f1vI>-=!w35}W3Rs(PgOk` zO|$*H=o)J|bZK<3SUVE!P=J3D+HU42-M_oDN|x;-;AJnzL6KdH_5_vpl(8x+n!RZK z**0V(oJ0VVzHwORO9e;-*zChNPpS21XA1D~Mfkd_q7#W3DzDHP24I)7AMUl5Ksx%4 zto4E&-6#-qX8v>-(L3Y!FKe@6j8~fr9jZUI^|$w4D@qpHci_lG_x(OH%r|zpY2&+d zC!4qNS|j_y7emYA=jH{d8+KElU0PVEUFKX#I6t19uX2n(M*j~T5`B@;_6z->S$tx= z(@65M1J^c`I7Yt@MNSc|>CExY$#!IFE|>POy-PG6Eesr|+aKwfq=88`(c?StDU zml|gS>3)+Q^DA1RJN(Q(GzQoO{u%t%@A%OAyB~Z-d&E^MoX1idx31Ny%8j)O%8(xO zw~r&4p?i*MioWkPbbV_KBeV}XHr%*&HQnAJ)~%C(A=+Pveg(uOJ&lBo9O|L9q6HHY zM0i%PTXmwO4ih+%FKWN&;>+O~f@Iehug%{7_V3B$^AjvRhDLfagu?jS|LTS~so-B?t5XBnuBWuVd6gzo+H zZstUZil@K)r0cG=0FN`|j?MNSey#6p8;gPvZM-r3@elP#Oo7OPUMSm;uS{uwcH(3P z))WnUwDa*ePzh*;`EDmve)a2L4yC-cd+IrjnBBL(hQY%ieAH%BFrLF;BK!zf0MoJD zcQr#Q;Vlx$80ReB?SL=F%bozn$6I2|2#k^f&gz56Q3RAAxu7%th=O zJz8As*>is=D%tLDz5eEGzVQO#pVTW=o1MAywIax=eHfEpy%HEz8wd(PpoNgtH(QqY z)xLM7wgp;kC|h;1yDWOfsQq-Z?Z{x|&@|7HwbXht0J~(o5HN$2QHnX1_8I0N#Cvm4 zH^D_ok4yWD03Hew_}xAY;Jqiiwkd=3ck91zz3GJ>8* zOT>}PeEprR6CnY-RYt?P;7EV^WhLFe{%ywFwP670q4=yr*JhO$a|#)?#!NB#nV>)e z){xQ#)T51yS8vuzgT{y8@ww9&b$>|Lzx3~koq74V4>_~}IeEI<< zDu;otMaScU3HpcvR<)v;tLi|+&>>oFLe6v1)gt1Ou-ndq;>ty21Xn)_KjFb#A=LXyjB*rV-+=l|r z7y)DtIo10>Qu54UsynxBDT`@MbFJ(?{-ZP)6X)mW%VM}uU~+C`7h3<3oz%ZFdv|Qx zHX{4y=g+pU0V~}X8E)BHP_MbHZJfwocA?DpAN=sUDTc+_XP^9W1Sn4>+wa`EGy5WX z|L%L=3H*LK`!7HHAX%|$9G>*bvAttI^(6i4fB0<%1fA*LE z>R)F7)R#VBNA~V;priK7*Ks_2$e=;jt{H7*8aUk- zyY~HTmG0+1GT9(v>&7p|aQZHp%5Wwxme+=0$kE^iEKg=yw*fjeM)zanNf7V_{jvo~ z%~AHS{oweSqZb)z0~&Gj>id1iKC+KE&GvHWu*OBE(%+mQK-9{W@n-k7F7)sKvy)9S z{f)_iz(DG~wfZ?fe<9rw@X?r;j4Xn7DY*(HvfVMVc zQ!oERhmH<;dM(EXh^o9+#_9;rG~baK+4n9k+-Uub=Rtf^Prh`;0QH-zy%|7l^Ds~Q z_ENy1*{?05`asUywkmF{$ANfT`S#bzTuX^xWVzhA!=7nfk~4bb+M}VjT1We5U9BCx z$Od8L+JC$<_DX;Qopc?$<6`!qpv_Q_Hl+0#`mxcv51o(J7|k-Jf$B1d1(eZ{>@oAA zD+yBN)@%eBY7HJT zv~l(z&TZW`j5B7~irU3&ZoXs@heU?nbcm`s1x97(;1S^Fy5=KTC$ZvkrS*|b)qwkE zfBWD5^TfaH4VwcXq_x}Xz*^v@^&}?*p(Sg4-eHIfS87$z{s`_-#1uO{^i~!p6smCtb0fwz8gUq@u>weXY z4qg?kUX!z?t)xDn=gvB-Xm0lUTW<}Jm?JE~W3fF^5i7F~Pm&t?8k+-$_xfn(CFrFb zK7XY3Si2$c94{oB?-qzeGxVVrZ~O*)LdQu`a^{e8GSDo*=xLRd-V=z_nhV|2Y4$PB zsC*eeY>3wCTbdkow!tVv&gRlK;(Yun`&9sFFHljT#;&D+eG_c3@T2Mm@E=XvCo&(OH`Ljx zh-GBS7kmS{+$#YG5CyDRxLzVecA$B)%Yj&YGV<71+y{uOe=-Mo!=~-Cx8MEFY*YO9 zpmo2~*!ZC{2RG*RNKwURQb4n8`A0NgjC`zf#MnyU~!K%CfR) z+1`_%Gx<<`r7;Mqv~z+Co~)E~Ab2Zy|H8Mx`#YR@ zmTmUs7hlZ2{evGh-$%1w{_kF_Mqx-NYfLE&nLD@kMweV^`p7kDmjHa9&aZg(bd`pHUL(H9zBm2-g zFHJ_STAz%Fix=|K@1}noD!Fl^cCsCE7hvveiX*SQ)qNa@-MLxmjP-pk%bNWr5FsO$ z{^Jjg13{BvGR4VkXLcDsVA*=XL5%Op#k0wInZkjs$$9}cKJv@?M_LiAshVkB8LMlO zQ-k&c)E5IdH|7TnfGGQNv`lFa*Y#aGQ7`k07tUo<(37y7ba&4bd=dDQxVJtzY!9|< z-QGCzuaYY-Xv99FUo^X}ge2#5EiKsS`5V(^Dp2tY8}~_i8@}TAE!(m^T3a^tOC@O7 zzx#l*==1twwA%f&iMeJD*A^Q`)h@|>)m08NJbUhRIzO4&PN2JSnDYpjb+~r826^os z)>;ts?RO8R^Xt>yL7yi+k4Mv^I?il{=wz;ch9~<(P7ma z$uSt5_M_(L%quNK-+J?%w*Pr!?BMYS;R$40dgXBa>&Ya~*Vcz0H6=|YgWDsmajr%) z#!8NACE@)9Pc~FQeb2ksIyeATw+Om!Q%R|OazW?S%|&Y(_yrkY4Tg=|;6}idksA&1 zO_#Lp>=HKam2Alo++=Ui)jdz;zI7$jUVzrCdIp-Z=YsmGFji+9Yhkh?`L!xK5-cWv zBe20IEz6LB=0tapt7JdF^@TjOcQ7v)(6CW@=96p^&xZGmN(XaDR>2ywdefEJGS)o( zv}Nc3#t(yE37BcCzP1PjY)HKn&PlUgfn`KGs^ItN99~Yq#rs zzJ;>2&3i+B8vp2d#=W(5CJa7bT2=UDyXG6z{)h~DJ)m6W#?SxZzZB3*Rz?>x!s);1 zyoRdRvKdxpACN73_U$jgUd4U$T^G)DGu&>Cb;{o(FLsrfn}AHGbQWAZzH9fs5~+^m z6R(_|`0C4ij|0OuB@@UWb0a%d5iPmg_-sTA29q;v$*u_Tne(v5-t>2R_QiF*HRhz9 zlVnc#5B+kvwYYIZ(jwffAX(ob4+XB))S5-Rrb(}|D<}My4W39JGtG2e+mSruBf@7WqyF$nywa_V5-4M$QK!1`Z)IuFmzc&oP8SA3k`bYn$j` z&I#Poj%9xi_)9sij~m#QzPq$Z-wd?*eSK@|NKpoQeYH}yPNv>kR_B@&s5UK1Z4fW8 zO0)$N&z<~YM7~a+{yIV`qxq#4CTH)>4A(8&56t#=V3kv^pR^MI2nMmJ@99%t%}$m9 zD|1xafX|PAIr=2av>^Z~;FpiQT*mDCZC1cZ_uSAa!xyg>(akxQjmj9e*@*sO#_Gt* zNH{QtEeJ&-Ql?x0BLQ$^nq!Hj6iVp>rA?qJcOl$l`n7XC zQ@+hON);49$;n-bv{<-rPQp~YOw9-(AU|lS7 zgT_vuJ3owk^Z#yu*Xg->V71Pic1D7h8-Q_64TI47sUW~bBh#!o5g-U)ASRr2*>z$zEJHr8L3#HAc3Z7XDF$w;MyIDU-%y@5I# z14s!oW0RRcP*Gg-bEi^t4`!R6msu4J2nZ3VGK*wJ@;3yN2=YgPryLJGUH1pB{^mEo znjJlKxV`A)=#1zCMNE~@QJ|lFc57sx3Dj&&p5c3uSO=Potf0WMi^)Mfx*0*+%Mw?8 zBAa|?fXSWPcgs5XZW$*^kc$xa-ShPymj%fgC-*2=LLZgl-&Fyu_wPR)W%MJM(AZV- z2q2t0d%pE`u0Zsj(J(--<{_hoAw%ovA8)SiJ?6{6T9r{l3FlOl=R4#NUT|>OgXZaA z8hgAn9$(TN-+%x6?fawT-No6OtpOA9u>+$RiPm{lw05X<+S+=3_W5TcJ8;=@y}too zDb~iYF7WQ@POSd}!rYzX>!u4c{Km~qf%p5DI_QN0jI3O56a(q$lN^|62_4A(U;2Dqypz#iFXCfIUk*enJF=|G%a<3^ zA<4OD&KffUw{G9oy>t3nzoRMrZS5CnA=&@;V%c!MyDC1BrKk5H`AJrjeIql~ z_j}629PA6{&Qy(cc<6=!2F33HJJ}!)(-Gu0xut?)RR);@k?rS&yFI7%Jn?nG%!hZ9 z^Eu~zPX+?oWrT7Z?>8@iD#P82obsdI`;!cARLQ5&FRl-`1tvYB_`Wi(?}rwL7(UK6s#RM$W_|66I5%rn+IRA` z45RKN>zQMBJvk4cx!ys`i#MkLr9B?bN7G@L0&%u)*1q`>+qRcMD8u-rWXg@4p7Uc3$rL=y*<^?VN|h1Q8-gx^ z-)PI&$g7Jbq6jzv++Bkvw{NZO!@7X4oG}^x*R#L1BYD*KWY5apT9;!-&dOvmR)8u! zA%kyQl>#erel6>z`3^0<9$1@BqD%H`XSUAU8*7S@Zh*laFRU{Qmdi;gV>| zDmCxt*$a9=qbnfxn&OW6uY@wl-K> zMjmj{H4cR4baAxj&z&1izFzkWfmrB$U)i#!PoEq#v!>pnveR#53wZ&i?#$|8+wW|9 zK%X(8FMv27fj%E}8aN`DN;Uz+-2>nzu)!8%zX1%!_?j!bnEqn#kfoChFJN1cdtH77 zIdb95$sG5^L1$;aIx#>#HkeE~dh2of_PDjDRkT`>y{PS?;EXeh_&WkdF=hK*hUdk( zf_JsBjQ`n)db96{7Jv9Bf7bV|kLrPQ=jO66E(h}cu^@gG7&*awr{(D!U>H6gPPg@a zGy&(cri94vabDA#EuDSF-WFv_i3m^q=#r1iH2cpl#Y#mV5pQIRUf; z$^ab6^y`cDU@nMAzOensxW~~Txo-W@1rYS}&;Bran6FyBul4(4_R8Dencb|a?&}h= z_zM6epoZ7b33NkWAG#uX@uJ%W$?O&Y$ewz6&poEfHZp9gKmvRS3bK>!@AUqZWU@b- zQ5K;CfB~7D9yarmWR5Ct0GRfrXn@@T5IKLYYGu_d$pAJf@EtD=z_`7n4~Gxi*oHmd z1hieQ*(uS+s} z9RI(3)^D=4J#1q~HO_`WEP+Baq1s>sW?CD3LjtWX`5+iD`*Nyg zMI($5>xf^6-_RIVC3Cl=m)6Hmy8_3qE!LK$)*~uM*KeMxde9m>c345tjr~v^N(T3+ zv3|0c*;3?}B#L`qeL4XBRU7jQ$cuo$Tdn7XxsxNHawocCZ#_-^JirFoN67Z+XBpISa^M+WwQ**4Li#>+B)D z;2=c7GaxDX0gwjh7qVhUfO$n70H9QvIpnpP9=lS zGe`%frelNFD+0sXUYfjnsPHGhTAJuT%%!!mrjMfk$H@ZEa#-y^pW-hqpN1~A4_)`^ z@jnjxVBg*YB?ep^06GcC=fjWap01p1>~ILbx2Gx=$x{hADu^E}O&15u3MAqqn1glm zv!pSJ6w3-~kax?T(&g-*=)LPhI0j9TA4$-@J0#!O6F+&`t2~S$58(sGNH)5MvE6O% zqy1XxQcGR2!`g+rsBwz0hIr+1Jb0P1)<7N46{(_hRG2LXwZ!f+Jz$ZtYUG z7jSGm%X|K|-Fur$xM#k_LP6p6VGO6gF0fMO_=ZhYm}S$rpRuqHpA_8Fi9qL!E7#6v zHzZq=m0pra*sjJtUjEHL^y1U8dHZ|NB`i%4!QA_+`^Agd8s1+v^oHu`x@^?))4s!u z?%lg>;fHSOhOPC4ZBAM1lMf8zQnB|^tx3t7;fFQ`XEL$Z#@aL&_$9p5Guc`|bnQ&< zlR;J4Pg(KFspwN%D)h8(|FK4&{W%w7Uk>~vL?5(J=jP@E)>5zuLMMiADtq#F@6TUZ z7z5|PP-aSIMfz+8qg~_;l7Byiwk_kAprm*#!1b6$_TkQ$dR;^*vOM6+7DLaWZJV}@ zD3;6-NWOAR;Ki{B3$e<4xO{aXp;0usMcdyYcpQ&2b91w^r%#S&x`vamJ%epeCtdG6 z`08v$3q3MydQL%y0($|*36nFYPe-haV-D9+Di2#|Js~5P z(TSN58z!RsDTtdfGsE&`ptO=g*<`YSLz9_t6__MUh+U2ecj~=NZnj9rx|l z8_;2?cdH0th^Hjm8|`o?W7BpD@R_gYG6KpTDf`Wm8?R{paB92pOYQSQ0!daFp(bj2 z_tyP@n;YYhTFO8+9jDNtwFIiHPmVD|(OeAhdd|;=#>1)lBxgky&4!|~XsoU2{={%R zm%`e(Y31zfuYNJx6==y=WtfS^e)j1f`hHp1o%XFv-4_;fo;Sy@o=uQ)>_yJ;2LlG( zppAPO-7;=v{maq?DxEs@b)ZCt?xs+tjO_x2t$`DkH`n$9XbUjFL-?C90h~bF8Ya|L4PSjZFj)gd+)wKl>D9K`JwoHb3oMbFHX+B^MekpoI5?rM87_B zW_I@CrN*rXZ@ksIF^KRT@MmG+O09fO&fa`GRE}V2l~DH z{&(W_N%p?`&bwodr_Uxc(MN#yYe$ci9UlN(a8=U3Tr+Q-(;&uQ@IkP))Ame$^LvQ2)7$9n?YJZQZ>y z`YyYWj7ve>PETncC5s!#C<9e#Fa>L!-+%Xem~Hl7|L*TPNbcm=W0CYj2llou$@=Kl zxgGl01IW>(^0G}C^4rT=kv*g;LV%HDM9xeHO!a(BiunO-*odz@a#EWI}$*$TMC?&u^*4?aMJH7BMUfy3OKVVMw;O{@~jEL^Ns;U6J z>GVI8RrAoAG@u?oNY$}+91X!Tiy|A`IEGAYzpTxah1~Uxp;a62^}~2QY2RJrd$R0@ zerazQZ$2B~Lt_~0()d>O9D6Y`n34zf(qX}^bW@ww+uYp8{1}h+g`PGaC9I?GpfN_n zCNmDR>N9#ppa<~Y|05GzX7D?2zB$`dRh;!a{`v6^;8hLsaCYXxg%QNs-Df|k(qiwS zBeU1v`Bp)z<7MQX92I)+zV_;n6VJP${*?|i+g#<=`r0`y272FIyjix=g^XYU8em3F zLx!m_ZY-;WoMs0k*_*|Y;w!Gk)<3BIv6e3OBz{E_te<{7s*LHfS%b5 zw^Rf+t`RUzK8}58|DJ!N*E~Z|wEqV^;wQkRKEs?cvg-k7C7I5a8nkOKo0n`fPPcIp zmTW%`y}dBT;WVX#HV3*Imh(cC({jXb-(H;k^rwHF{Or)^4DG{5-W+zYy?d5?xs!9I zhpGyrr`ZCl(%HifskiF2_{`iNXBWtH*4|`gEpg~L{j4890giX3LNWsF0bs1l#_Tzn zdUXCcxSU}jcC2MQfk$f4tJ}~!e zcC1!|YgaGr0MVlbgZGYBX*a4eKq~?%S`PrUWm4J$05TaOlLy_%@Ld*QZ9labm$+)m1xg!$)v(&FY-(_Fb|^l45pnZ*8$YuQg)&L_c?qvCr0JGmHQ{UEA}T#^B#> zJ?I>~ zh}2^7+?i8D2W$X^KzhH9mJXEB1w_&IMC*c*TTi^mp8{g5LTc(m zRt+E`*(Y=Kh0G!I9|uTVBWDw+E84YZ|L_rh`S*V}f;(ETT}n2nYLT3Bv;Cu&HfAg7 zIj$eD48`4hJ18y?&1d@9qM6@*_}kgms^8|5QFJ|9?Ao=m7L)g5F3}obTd!vzj-bW6 zwY>vk|KI=q{}|OWTKtg#$I8H6@iaT5^}%;z%5HR|cHc5daRL78=l=;Z}V3hu{6fXhXnuzaH;N z93r9EiZ3!5=^%ay`qQH1lVAT`i3QhYy9;o!ZPz6emd3{_sO`(}of-iDVA#F=EW2^i z_rRZoN%L@jvWHIPqoOx-3~X!=-DgUM(P}_|!uZXL-*%zBbB>TK&4*7UBuze(`H@fV z%N8dK(Xc}i{VveWw&y?T^~|oDK$b~glY;;(_ON@6YOL&HkF5DBS_?#yJE~ZLH!il8_FwzJd$*Gz z?IFAM@=wpqKKj+)WiJDG;*of9v;<5K+)6&7AA`Pn@$Bq+J+b*c0=DCI)~l6PqO>9@krbzQPVc?s{ko3m9BOpm_c_1w`+dt* zKUetJKL3rXL9}a?prq>P=8f80_I(wqs-@|Fez0EEvo`Uj1jN`m+YR=x++#jVi;_cH zfV-c!UxaZ*^UkQ6^Y9vBKXV4?Mr6WDxt^Fjg9w&#%C%#3O4{i0^{H?9ZYsqW(BFiO~O2`6SKP^B)&%T^- zY0>j=)?xeh*Q34c_WTD`gQE*tZwKb`<#Y`|rz%Kb0V`Hy17z=+tHA%gf|C3&zT=s* zr^jB|Rb|suQ^i|o)VL?vshJ3buk0)o;Q9L3!p`znE@wNPJ9{d*v?saM9^}uU6*?2f zKnL@Q?3KjfFTVJ+;NgL)yJ3#bHMrGxchzd?_To@&bkCfr4fU@+n>AoNe)g+h_T<_D z2cEyN@9+c&)%9N< z{Opxv=IgI6`gC|t)yS4?XnF{~!lt#TD#q4^{(R-^BLLIW!yhY~_1A=|~$dSd-Ln+&y!^s5hUQHp6L&CvK2oKnj@D*I3 zaOQA?p0V82-f2F169FubCdj=n;yuiZ+53IP&RsEAtz=pwWetaq9F8z5S3Y@bHe4Bi zeE|wSOki(J7*S%Jdxx7|OA#`NfEo;F8;<$btjl0Yh+=34mr@x*~{$f zVa)Ey&;i;YAO`abgZ1iu6qI3nEfDNCk2KJt+0xn$mX`n!~$6+n2fnmZ}^2~qt@BVYy(11|jO9~V( zcb|LFwFPG2%hn{c)+XG}XK3y%V@wr-0Gm_DSCo14Whcu2-tYbX^n2!XITY0A`iwjL zis094bpQVS8D(XsMNoU9pDD|VF9|iW2CdBA^}PcJ4lKU=o$qCI{c^I1IR=35qeqWL z*BuyizTRFr9l$BTzEUsFpB6|@%5u9~#t3kD_m0=2&$3LKllc(1JIcreT3^lBc>A5V zN0~B=@qu2{lL?j#8E312h7AMy15T|Ipw?PhhYkJCXc92e{z6|u%8GosSQhf09O(%#sB%I|9)C;`HoC2M)3B| z8TjKL{&0W@4oHMM5#~ENHssb9C%#t4XfR4w_6V2e}dx80f+N&!$PBKDd zGNM1qPOpBHKcIQEedPGTsbCPa`Ks#y&X+E9?!<|c$wIBP3f!g8eDC=2Bgq4x*P|+s zHZ9(M>&^N*-kf#(_~Ty=$lwsm^?~%$@}_I9wV$d1lAsx{Wu6DFD32rmfHml2b0Flt zgZttG*X&Q`1!QFeIn?Cvk+&vG>-6dKW0c$oi1GqZ?F@7xOZU~v>SA*CQTrDw4!?0+ zfY|uY?|{p3b{Yveihl5{gOOx$$;_kF1=YxYdq*w|K;kD6(R+7ufbZY0jCA{h`kEh1 zUYE^S-~pY=47rhGDf>zQhpxT(RX|7v5W_>JIC()f(OX{R)5zRtfCIJiW%?jp<2r`% z^l|PwG-5LIv+t#`0cR8@!JGT=M;|z#5j~8f(f0rhvYgvMVq{);d_~{$ML~Ja z)?|tsUH1TL%+JO$hFXij$#+kIIb)%}@IoW++1;ysMrZocu`>ki@g75egvxA4Jl5xv zCDtI=H|8_{wf?JLBd3>uIbG{zURr^;7uw&lnKLF3ErUNtIKb8cQ5?Gi*#+q0RDu4_ zYfG>rr}>Zn;0FUfojCcM&O7+_;_v?OZ!T_EadzSCxy8$ZnrqT2|KK0}{q9~b=YR<3 zAKYD#R{-buA!lS-qud=1KEI%Mb1*UtT>LXF#^x^JIp@ezd7sW@w~CdtWJR z6uDfRXl!46{PT~08r?n5V8683{cc%98P9hLr~og{om9FfE(n6DP>8FUa=6 zH{PFs;*|nxGKT~+$UrvB0H4vA7ag_CuF0Wp-y~PG+e4pIjnUo?*dA@zZ+o;1e%rV9 z#`MhCpGFQ5X`)hTvlwY+O zP^Ov#-3{jc7cQKdb5j6vTFMAw?#sb=9Bnp2+5iAR07*naR3Jmvfx!V%aWcfvJm(G^uA3(!4kRY)C;NkY>!~j)~R&c&JuC@E<+4|uB zx;9hR3%D$kiG6_XHbi4|;8Z}SCse4Bk+Qm#$jTi3C@|;Zg_DcF_jmr@;xB&m7emwf zvFh)BtM%g80ASe}Dl)YKdRQBh&p-KT0RQ!|F<#XMiu0>N2iV5$0BWs$wXE#e8(lh_ zT>Ck%b~#@nz_>s=8fcYAHc0|Ou1)*H z-qAja15e64%iceqAK+}0y#d@)^^xCms4RWerl(Gx2<#4IDv&_0@97Xy*@08(*Zp<| z;B4ErYYsR{B>{09X=;FnK9_+ETGqzg=A5NV< znM~R^!5!@+07YBd8#dLYc;)QrGvh~ahS}cuh)zL=>o-(=kaI3GR}bX1=@~ThxO+J8 znl1msKl!iXxq2m+!TqDZ_zwdB96b0|Hgz(wz?QY<=r^sQUwTb|gWtpl6Ua4QjL4rd zKdptPx}=_FVHdaEWPu!FEWTo>qcMNE5Ta!6xzd& z=y&OxRT&qZ4p`b-K`r;_!KLcq!wS zEizSrpE!A@^QpcXAWg->1Oa-2YH;HcXl9=S)E+e!No;!g3Vxak@MMy#qDOYJ0Ey~N z_hN7RCo}$4>v8+Cb8v1iKKuCRC9st#dFyKOF`u>S-W}1zs(4>Q)%Ez#xUGRqYDr`} zyAI*q4^&dU;rTajRykg>)xLf8*Uw+?2E&IB8prNrz_uL$UadL3s|s_fvZ8H%hL$U6 znQyK#<+?URmoLm%Nh{g?o64BRgJ1g%f~u}(lfheVKke}2fKswwa^m&;s2}~Of44Yv z`TQJA3lCURP=lO&`YPYMpZEw5NDk4Sfv-glJx!Lg4sv)f=*_5;X8aPVPeM)2m1A1nar}A@AnL{SY?+gu4NEOMMT(97?;d?m96O9 z-p--O-IIOnTJm>I^Mc>7)9GhpX3O9!KpR^VUckOr4J4o_(UZR`>B&Dp^^J51yG<31 z{@`e1SH2+MUPX?vukD#y7kK&n<(9=*!QEHT;#P<1s(=Nw!_ovx z@h>^OG2IATRQ*dQ7@r_0tJ+IOH<^k4yhY!spda1l{(WXm1vJTISO8h$)!Xr)!2i-` z;wNZFC;(!f0{YfxvWnYF^PPkV@@sW;(Y@g!>gVA1}AwNB~Y z_Vr8^V7IEGfCF5;x@GZte0&G*wMTel>*j;A?&Ry< z<^b4A9=I>Ce3F~`YpM{MTZUra%jfGd0cJr`3ANUd|3vSSBV^hWHhQr2rGOE^eW7AaLu7?>WpD0f!mENuV{90veoanm|!_Exx^?}xuy+c#B zM^qf$>(|OOiXs2T-}u9_EiX-<^FR3A_cPoho&=s=gxhw$(P`A%a=JvJ^y(@5EkmT0 z4A^J_Er2Mx6H_;BSv4gKFLJO%38pu5Q307NqDA`y*Y7dF6ObGe{mVCPO*psNugbc8 znt=NFSD!9U*6a6tfSgE~%gctZbm_K0e0`$wPEqW2j3ncp_Iq{ z)@5iZ6D366tS=Vj@hET+4Iuur=a&E;BhZ0%+UZdGm}2*?t%0B$0v`$gt34~{G68ID zIaV@(hfyL_-~@pNu)!=CiQ|WnIV3&VUBh6L`8vw4=Z>M)ytR+8$(($AA&STNP5R6;9r7wVEV}!=h%yDCs$hN2 zooKM^Eehs#jOy?gpb~*FC19e_<}Aa$b)`IJ4n5Nx2b>upE)!x22xI6c-H-Ecj-qaD zTaJ7EqnbPB{x4bN=50nX{%i5C^>hMvo7Z=md$jnp)-0mo6g{515kSpoIri3(QRLR> zWq=Gpm8DpZyy&Lvhh*6pmSd!q8BZCK69AjlBj^NC)&`~=Ztpi19H{-r3cakaO;!RL z`r+UDyJOr9sL;3>W%^@Mg64$ol=5G=m|>VxJzz=W@bhMTHUK>#+dmHe!F~HCSi!h_ z=lHt`^M{M~zVXfBJDF@US{OTmK{o;o7>qJV1%VhGoDUT>O22nzoY?Cr{cbPych*{h zAq2pu3>m7FvrNTp6l(i7Kw`ASz|i*}2yA_w&!9)2h;}ob^}3p)wIVunPs(9;M&JY_ zqYF+GP_|pnz5)vv(CAC>B4}eAJ^dQDS4!!0ae*5AMjq%-e&)it_B&&`_2HljGI`l6 zC9X0T@H4q_TLR?nv%Woc68XPp-32`yuDP+`&#H*+W)9fXCuIZnHzm9(#|!{VE}T7mIyNW>b8!FSpZ&A{ ze#%<^>_7dd#ZP|xUkQqwY{7RR3&1^kOPBkuRy2OTmQHGwJiPnlq(vj)hQ zUpd$D(f2-lfAQ&OzghhFXFpl|;UE5C;OU{ozx>z#div1+)BoXrTzuy{-x%?$yjlj6 zOk)n#w)83RMpf9CU!0nj4|?#+@(19erL_UB=woHFPWh)`0N!IzlW&~j6@mU}ec6^P zr=#kb9aWzI)CAgnpY8E_l{e?(UqR3lXHR8MeK9goDfj;U2dDC4OTj8__+%7w&d~*V zqjivMPYnA;va0198hw`G{c7|nTH!Xl3h+8JYidt87PAXO2km>Zlqv#vO{GfL``sRE zN%15*!v*$d*(xPj)V|Vz_UUC{^26u^O+I;A|IlnoeQ^a1fEIcn&RXm&86%l~^sRS( zEAv;k7FRpq>)a%F3;>_4`^6W(>VWHCx0lOcmdvnM$zaa9efC^|2*8uTtZ|`DYr!Gy z&%n0_-8LHwrJr6T#_C1u;Ba1 zksPaGx5hCg`PnXl3`&hTv{&jw{_)R$GGNA!e*B~CpPNHp$KHCovxvU2_&0z0e+691 zzCX6u+&+DE=c~m}KK#A9@w`yec@8;%pr@=?_+0O7J>7wj4lG~VkJ)0)N-*-#%6cX9InZ4?%Gnvt zU*SLkP~JRzEW1k*L~VSM$NRU`0%V=c)DBk!dZahV$;t34h{8^gwMaH>$d@>B^zACz z=!28(weyfJRhf5T^tI>PA9f1caD4&T*Y~|SS=0QF?SVM2S_8WED@g^}9`uEF5tBI3 zzRPk2Tye~|l?lhvxRCB)hs%WD`ObTjS$*>47Xu1tW5W)Y`Aybo$;Ga@c;1*Q>PcVf2=Y0T& zJ$0{5jUPB9uwsAdPBiN$Tj!H{kl!uS7%c(8v;Y7Ey%wFjue}G50O|yi_wPNB54Isb z|MjRIw0QQy#qMQt0Ht=c<$GQ})f4Xxh0|BhLh%NzK&fiE1 zT>u|4!tW>7ruw|=rs5*MA0Mp`Xi}lXR}u7Ax(_t89`u92I6MN7hrca6xn>*#%r-`V zIzMECdnOM*xl)osfjWV?v_V+Iw&?ol;>`eC{AgeC%I3i7ui`m-BY=h`dQRCkWfnHY^yZ0SvYSvrEc6zH9;C^&PYbpED?}y+0aIxpWk@)IvZN4stuUzRo zq+?uI$S;ly`v5`?KU5s%m>{@?d z)r-!1y5E|N9gCOJ|7hb*_JssQ@ zc?WNjKPr$XTf8;pKS*w3bI>*91%Q}55F{iUwP4#`!lcTS$N6X*qqS$7vj3Ojtv#*f zUNV49B#49_`CuyG(Tz-ASd4qZ8&v{0%Wo<;8!6c$dswjgMY3h8rh3ob`;L1Hg81Dz z88FNdzAZam0Las8f}_STGPeE|n1J4R%-??+GYf5NXFF`H;x^=r>5-y;ge6#lOnF%oVJX8YQHF9hH zEW30%BE)}e{HrC9p1%K^=`+se6VO!&zd^gQ^xCGd)3s{~a!}meguleLV6$#c_T9Q% zThANOcX-S3W5*Xi`hU}x=|*!x!K(HoeOfO%ls~#7Ii?b5dzcoRX4-!BeJ#((PP8Mr zz+7kwJnr}LoBB&&b+W-5)3OC{Z#EwZvSg=>eW0~gthxnA$bFd2jWz@`?AN303EBC4 zqrp;=&v4AI>Lg-K(ImWtu16oL83xP_Lu8jdDUrd*R1c^M6o8uK)>uMtO;30UBPwIq zy?6!BPEaex5Ulc^WFW6qT77)-+2>`4bqGQRH3KPL8^s_C^Q(mX!=h~*2W6m)B1C7S zw+8?#Yus6+Qi;EQiDZKr~)} zVBnTb*_V?i5{U6-X(RsEBaFA-2~cY+XHTCjOR>y|K0A=pw=Ca#wfN-IPsRWSu5qYO zh7jX@)#z68uORr(IYk0PMZInJ0t(1Ig4 z0C&#B^Axl`Npl)J;I6$rSk&swk4{eJ;dj3KgBWqsWZz6Hos^HPsuQ1oIqL|F6xG`m zz+&7R6Id$r#G(+lQPomH~Ph7_86UX$HtID z9SvsM{+6*hUA^1dkAu>cx=xiw)3`55HF-Wwf zP+r18kUhX@VtD$m8$~CLwzXWWNC@@b<^+Unp@;Vav>Pu2W(?29W4$hvRebx4lL@b< zwbeM+^Izn2?k`*LLQcKNa0)u%$r#c9mT;ulJewm$*%+68i84t-paW?)=LiOHCiDnC zJxXO&zVq&TQ_72u$&@vXq0O0n1`wm|?|k^(2_Tu%t`09$g7i4KL6GL3wD+R0l+QA_ za_&T#;>jbxz5Uci<8kYCDW_7eJ5JG~%7h*!=hn%12po}h{4g1@DFMpyy!I>*pTm|x zI)+{A#_R%6P*QKa@%r$+H39$|KRL>v7aeXH6Z$^O320Zzv}gA^{37Aaz+vQjJ!)LC z)URA9U>1GLhR{Ol*T4ESnbaXs8DL7DfXxo!bbrR$vQkGho)UjDP8|O6tTibGVEy?s zr!%wyXp(~z9Y+rz2$oUmGACq5Z0Z?1-#s=)lI#}kQ>Mf_=LTqbX#c*&jpOgc=l8l# zrIk62Kls7#kD?=+9o9D41O5KSPKWob?<|M!#fw$xqz`1=S$9Ud3>}$KYvQSc`#Whp z+4wTRLADopV!eQ~XU<%V7IOZhH_B*3hN>1^H?E5YuU?!yRoQxD+kfC-#@6P^aK-bR z+B*lu%4|dzccO8Ji*hvZDxMHLxp%98W3;9RW=-Z+&+?q>*E1@P9}no!YN7+z z0xcXss;BFwu9ew$>HPV{x88q$_E8Ua^7&u=tA9BeNBS1Z=$Gw#^ziY~U4l;b=W(U( z6#K}P_H42f7?$k~zE42Mmy2BP;ryfwH>qsg}VzugdZk z)hByj#`DWkaq_KuW!&IbG7*n<`C>yL1+Wz#kVgPhKq(pd^#R1382W?^oy;0nChGw! zXrCU}vdO#{I}B&N**U`uErvS-Y|2&9Y0p+t_$0dm6LVH(fPP?}q2(N9&+9n?3+Q9k zC;Fft%-5Jy`#7l`m`*3occMjl5NPIoPM+_hEiccu7sI!G7f1_au_v7Lc~)1<8Z}qf zCOlhT|2HPz1p57MxZ1c4kd&_8y?syM*4D*4@03M-;#A+OujYX_XC2rGyP~O2KC9P% zP6-Fbp{(qv>$mQfad%~L?Cp0KXA1)Tc@=L*y2n5N=l^u^$KU@}8MyT@O^&b;*cdO8 zgDNjLAZ!a`1^(SGUf*(%X#mdv`ietb=gPV{L<$`Af$DYU!Y6G6pJR9Y{IV zKCQFhq*wrx+cF3h*XyDuAK17oTMM&Y3W5N^7CL(PW}` z-}O1T_GF&f-?x%sTXJHQ`pbg5bpBK{Q3W7dsBvv5DS}PHZaDe*ucr5;Ue)-1@7{xx z3H`I5{`mm?0>gkTS*_Qy+gF^sFo8sN!H#U3CKg`--WpZ((4r6jk_qHLUG}UsyjI&0 z?XNChINQC-bc+r-l(NOj@mhTN!M7Ga&E|EOn}fPH_Wmb-{o^vQe=~uko#~D}_2xg@ z^WAr44z2)*iVZ=^8AIzyu1~-FvyIBM|Ma7(Ku%`k z-AZR}ZOuAADSOmD2^s)3_LS|;wzr=$kp!FZH9pK>_PpwzxD0k8%OXz{`-HM z>?pgk_51Gk|9-)di++uEWhb7_pGYr6W6p%4BiK$`cT@$_&#M7)x3g_FC1==E{F(FV zJ{h0rMn)r@uyR#FiTFXwu6xM`x_DiC!VY4`I$-`rV35Ap0J$ys9|Gc@MTdQrL;rem za%VQ<$A9^syJyv9JwvsHYLbyrEuc;=zy*S5bi&(#b@Ke!`249<898jex-ME(CIQC4mjSfvbCo@PJ8R#%X2IW z?k0>T)2(@#oBQEiGJsqnt5s+6K>-hBx{PJl5L4tb`QhI8TW@k|xol3t0$bs(U zn`e@>=HF!IyJ*F;wI%SfuXH}pf~KMKwTqw&&;%RJJXQ3vq4AgMq%8qI>=)mEl%KnH zeKNZ1-M>){xb6_)W%3nZxix68fBB<7n>`{owW8S;wIahbJ?$ZQ1wck@;*ux84WZA4|?? zOZzqb3BXDA(Ozjsy4?9~vRbzmJg-)2FqX^bPDD$cdD7VT)$Ybxz%Bs1GCH4Eksuh- zwhtKV5K~~5m$4uSRkf>oHo%ju!3N*BwebQE>8SW^+qTYVY8}74{ENl0_r5dNK8;@a z9ug9*6*V3~^!);Y$t3}HEh(l2TKmU0CWp!W2hoJ8`HiigX9~V)Z6TQFHf%uSX9HR1 zm(TLm8f%tiL(b>#y&jbvnBSP~4g6#O(Qj$9k_z%QqS<`~4q#DykSobN=d_S1<~}sg zO=O+Y6%+XC>GUa355LE~%|AOid4Y*vB=7LZGGX+>{QlDP7-enrbFJR}bJ9)7VmuZJ5=^=rSorqeF_eo1GIkL>b%GLl`! zueFYNR*=AYq4lKMYivJ&vI0sY9LdbqTmrGc+=EB8+3DH3VQ9sAC#+|5Lv{@8&HB(eS6gHB$LGBs z7Di^`Q`z-TvK?N&Y|TpoktB5KBD%>2zi?r3_~=_@mLD5?fz8&V+VABVAZrSQI{bZ! z`jZ6_~u9K7^Mz%=Wbv*-5Q2Sz+yToH%#p#MnuXlNI<8olXUF z`)E}>m;Ijho3YcsJ<#%veUNZ#9qngpiZAjbC)u=bXn{;8kgJ=sT_0s<(XZWLTFXA^ zOtu&ImVuMe85!Z|PLNB*jO*wMcJ4AsNsyw1;phmT`V1DT>L0!&utH}~HCKFKj-IOp zG^|&U7O``&E6#obUD z@)%q(_sxig(R24!4nx56rVK`kdH?=5o8(gQD(yhX074Oak&0#dKH`2=i;^!Fpa0^g z_1vxRZBF4Ru*OVb0#WYO!&aH8Ecf%T&MZFs<~Ii*#!R;Zi4Pqh|JF?O*wHFK>SzrSI zq;;0UBe1%APn180W=~mbml{8|2VPAEVtb>EN%kk9H&NA|e*WV9*5U0kadu1;l`s*h zw>L^nFPvl0*F!Nvdm14Erh#xWHJteF!I#&4HH7!tYwH&8f8*VNwFG_O-sKEnbLONd z_q^EOl*bn*&iqzjF_(Z7uZ>}Y=Qx^v0{k{?3^1*H1}kpK(HE6+NGE}-RSt(nZ%6`h zGA?|v&nUqbW+GZ`$uOI1>?@{X{Ed^Hurq#ZI85Ia+MO_7(K-+r)4%AR9Je|Aq5TC= z-_LM(+_ePXnGQeU91%SC?yYRzXA(eJKz&~(3<1Gmp5ajr9vX9wF!%gu7ELjPy?U$v z>;JoUz>(@U1VGWQu9v|-&Pi*c9fn?DS96E~M8H@;+~rzaYzwHr6u`r=TbWU(r2lEz z@j!RZ@wyI`Q1b zr~kKXrvX#$mua>|hMaw_tTsdJ%&F4>O0^z&^FVW3H)Y#>7>IUrN}cSt9`k}iZ@qOS zpsP~B{(BzpEz2~Q<>d}WrBE3z6dU<)zf6UD^`qug0SKRTAM_|A-5C;;>W-8(z)b6? zQat9ndeE*1&^PXHynQSgv1NiMr_P*SeCva6OsO%t)e>juCr4#xea|?^l+^Z}+sDCj zD3%C1fPZ81i@Z}7$H?Aa^$I6~teXCM(XDcMS$ONyKeF&CSTe%_Yud6HKLdor%*p)b zhu`d(l@|8QJ-yFpKws1xgXWQp+h`aKIA21!y}+neFPjS5p$!KvP59da8#!qkmc=3y0{Hy@#f#`A9;yZzZw{Kos{D=SN&kD9Qq1sv)Thr+v z#kYok0WowIdBr$&Se5^N%vw%K>PA* zzCmvE@a8eHZp1)_W<0tC0D$O8z~rIpfRxr#X4?97Wrt+MZ!I&!S~(od85NIK2}D)` zm;uk36X>E>lc;@PxpOTI;tl%iUNS_m^GO--M!TiG$5*PwJa<~TH8#5W#fxP?Y2;OZ zlcCFiCI_-P^uIB>{nFG%wnraNI*f*_w#I0fUh^M1;3$zHz*fM;>Uhc;(ZP5QXtl8l zIT?B!rIu%&L7uTG8sv=A!^hBz2GP}vGF1Q>?oLKW=<_Ul;FDEs%meQz9sGhY>ev!(|R9$x%6|M;IQe(w){u=xIW z-w8x5LpZ>|`kD7vorS_-eZ9kJ_wL<2^U|*B^y!lUVAl%9@2PZnXY&s*h-dD$Cl2j( zc(0(rnw%thNDxa|r4}nd3UlOi0zMs1_9WS5FVV<>{jaCn?k;}zi=QMvI&~_ za;|*8?>6>=Gnwq{&C~zNJ2uw&b0>$C0d=Y?{2|lv@e_xjUd>re zS2T86&yzH<0!@?@k)5%v%tGfo-R$?LY}xB7j?-oR{HlMz8=$S~fE5=n%)XPyk~Rbj z)|J8c#YcY`eQBRqWnl8=R`k8>y}tByw2QMwZ@h7!b*Y6;>#6s49N)Tk7EQLCphg3a z_G)?29*!)E#(}9`fLcNO(`V}KoqV%bUS9`-$LWq<(6+$trS<^mfY;dzOG@H%E$M^d zgch9>v##o6l|jCDFBw74uwl@G`z>`x_DS+`K-J^{(3$?#J_6qG5iF2MoZNZ7_P1+)SRmD8<~Iyv9f9Iwk4&a{9LR(O4hx)w2I@ zeAc-3?R#^;lq6#|c^To2Q!ozLKk_p$XLzJN9>B27Pl-2tV*4ROksd^^L)Y=kfNkx! zHcAf$h+WE1S+)3ErL%zEGGIaWyHzqVtbm-_k!?CXyxR{rf%SQm4LF(4Ey08Kb8Y4Y8C`0*fyia#Z`r8`Tz^8UTinq|$gXB10A2<91enPH0L`=7z-iOj;6~N~ zV#o<#E#Q5kx8TJ5d+2dQI z6=!9gt8K&H0|$rq$Z;9n8{=pCPpEv>G~UNU^dUQsefm7zO@6puzj$ln@XM{q%?2@l z1M&q)9_1^lK!Z>C?zNJD4jg@ZTD3e1eB-;IJvKU7XD(}=t{V^!Ppqn?kh2U>?6vHS zNv;V8(9#BQEm2W0%-UEh?H9=Y;gf!2>#T|1ft%zrKWQ?&qa|6SUSz;mmph~5=GDcA z-}ymnosE|7&fk;R!;avO(W4UKZk9do?-d0T=s?$59~ekmF#OI2wQt5EBiVU-f)Ngh zebV|$XcDYZ2{86@W57$=M^1+UGNHXif3nR5U}d$#KqN=fML=)18u=!w@ARvj>_%mdxd$Z%9`%=_;z;GFDCKJ zGi38WZrnHNeY6_yS-&UgoGZy82Ts3T<=CzB^)_dV`IJ^KB0f; zcL&W&LLws(2HKj0({zf&ErAO@8v2`QnOgtMi_BlyUad&h`I}wWtS3nB8i^tHQoE*E zi$0S;s(rCFWz#%MhDd;-PuTn`de-ga*8M8V8ba^Ie>0BcsS3U18?0_^$$$Z%He3sn0ce}uJ`$s;}`7b1pHsi?Rjo zPqyGVyb+wq({HlO@Bji5W%I4jez0fp9-C;pc&w zcNYKn@BV%uPvB|-Yh&P*1tz%k5tp^`#@0iD5S8@?x@_M5`r`1BqcN#cl_letki!d> z3)xD}g0_H7hQL@6!N1udFfUf>KNy%CK~Wxn76jG>zwM*;bfQ>J(j}aG#Izp4uuog~ zci(%z@jR@@rxTL5Eq?lwpN{gKen&Csj^5)m0N7*>zDUW+oH7?>Rer{1qu`rl>q^7x zPb$TKOU%=~SgZQDE1HZbqRh58aD6dn4= z7{h#tuCfOEF5=;wMKChj9md7@qx1lcdZ7|@7>mOGs#1$5KYv!X$+yO7XGkm6P|l?H z+lB~)QeIgf0s@2()K2H-?aGI`zszYFA&d`##aIs>*f(W}W8~y`ymjnY2f>~lL4h`K z(?k?oYduamB{H_#=8T>CIOYf(J$huAR{!C9Ww0Et-|B(=ZwwKC5x@X!QI^W+N8sz* zN6~97EcAcYE<-k>b@05ejXyLq!Is`HidYA6I@e)}ueqjYb!zl>}K%z&IJ!`%hR z^pImb64K~jPyMYO1owK8&NnM{dtT-dXJ~Cd2^hlj;K9msqDk3#fIF}I8AxxG`DFDe z9N>1MX!gzFU(br(?aJ`@?zg{JhQ-FkM<4xo1fK&$v{s>57!+#)uJ5Lt#C!Dw#3%Y! zZf@)!fBNxcJDxgma^%IfjJ3P@8?B_oQW{P|5n(d2gPbwpd zrj^F;$xx9A|MG>PMs{EH`1xy$l@^0>Br^md!>fI%rZ^-~HC#v%k(Jpp+a2>u|U!>uVbsB5MMg zul8N?O}YH-3={h-*XCJ{nM@K&-ulx#z?|vFRZm8aqilk!RSZO`Wqsvj#=A1Eowb0c zckjvx=FrBY3@MT7Z@hCP-aI(645%$Is2?Xvq?}(xdmD0a)-_&6@9OwNx z0PTsOgiJ(qPqD2iL4pA}{TAB?W~kxIwa=C%_DbcnWLei? zdYsl@fAPg+vpJ-XG5e?g_Wz#N3fFISNNbsHYn$)C_@Dnpd%g??IP^o%Q@Pk=?WeSN zwBLK`b^hyLf1L3VXg_P0qtMkJv~JB)29w;O0{!0@#;FB6JDM|y^_Nzq+yb_^&@HAo*2t_w@n(On}T1q~kPhsC9;( z(@G#^G~W#PBA3z0*d8eo@*6$MYyr9e53Jh$eFxh^BYqjU-JyM#ml-H*3IMwnQGhD? zQN>gnQU-c`$}`ku4c<#8I$-V6rSm2+_mTk%oVHK&wIl{k89i#BUB`xVFT6lE0lpJK z>_#Pxe@XADAOz6*7Z~Z9wr}WcvO^jNpkl_HLnXU>UW6(R0?Z)+sfqa(&&fhAl=iFp|+Y7YtD)~6$^NhyH4imJ;!&-t2s5T5* zmQgZCVu|d3G|1@{s9oBLrjtDz&ps@;Cm6*EU)#EN+xC=xxW9Pky>A7mt_vK!w73<3 zpogx&o4|t}@%uZBbJ_c}s;7F=0wvMB)>=o8zB?e!xw9wdJ9IxVO#qdSdj4qH_UlFt z*zV})PP%%3J)>VlM~@5WupI_4Y2JWf8GR0VoUEz#5J78WXQVvy}mp3W`g>cE}k3Uimi0|y_ z@ASEZ6FQsis0z4&j_pN$kn^Lnj5)n)T_io|U%#t!YP7zvHg_7&&;H^s=DF_AZj`+z zK&fZ+%>q@xbWXcg1ay-G6}H#Y=!QLx_q_nn>_`C>AUZjM_rfo(qPN-Y?2Ya^G9%+SK7BEE#jb!j`}^$q_5LmO^sGVqKxWW! zo+A(qNErD$gsshiHdw7wKCOL|gt4bUsjUiVs=na!8&sY-cUAR&L$JiUJ@ zJwP9u58gr_lhl<>)Z>z&O~ahX&HfQj*2dl^3C2d+QpPbb+ML)jb8UMpzzXOCl&q&a z`j7mRXd>XmH}*G>MUegIp~LyO`rKA^f`3~x=g*ifpOAjGp8n&@nWL=k;iu;0dzT9^ zt7MXB!Dit@%NAGFpbCTDY2f(?(Ru-&55M*OR-e70qDZk*Pe&Au`4R=^B~Lf6dMceV8ax~h0z;snnJ%x#goj^2!( zqg%mt1jWWg3-0--L<+VAA0F^V7O0N1MrbX?TeaOJg(n7nP<=tg&qd+jY5&z>WTto!Kw#>6MkbpYNd(|OPCgXxIJa~^^~xHdjdt6or9 zy0X9zJz_2sTum>S4=j+36|mxO63rX(Bl;^E%?9po^n5M45qPEZ`9lKE4w4_ekRIVn z&A6gr73d#*{FepEKhKu$5ZLCd%I)^6s|8!Ob#~;Q(br#o{%P;8pI-i|)qwW+e@C== zEnCO{(Z_PWv;CvHw5g{5&F8h&uE8%q?AZd*FXI*S7(Vk$@+{of#_{bKQu(mR$2e0>1XJwkG~XB7DX%uE>&NIJ zI9hNK9iYLQGIF-0XkM+z0BOFIqxbLB7Jy^1|G@qk4*-h*1|*5f08o^1+5}9A!FSYG zYFpXxC+cs`z+0UR1imoNlq>*KuBTWidPccE!5q}dMr*tbGkS;eX%${xOLgniXaDgErqQ`9|JoB-Tjx*x8~EcXD-Gt z&#pC2*+{a6Z`NDNxG~v=T5C|KDn}Tmo~^Rsje61g{6*QE``bl`jlM*Ulp1+u$5 z1JJ-Rcfh1RW_l_sGh{#lKsZ;xg_~twQSN$TGP+u-#UK9BA9Q%v-o;OUrXT4=3pn&m zE_iXWx5Vq`sxCQnmfwRLlc zK*gi>aeIa?zJ9Siqp-Db56WhJ;DvEoQ;~V4_ngwJ87)@>hc*?dzF5Zgp#uk(hE1~1 z0$Vo@+@`Wh?%c|`Nf66oK7aQ77%2ObsRZ>LhEY(3QMs=JNL7r;+EvcGG;;LX+aEpa zxyD)75`bb+*7-Dt)Yq zIp+R*qD!qP-g)Qk;V&{mKkXeET7qOUKCSbrbjZENbKzVO^otjU=gjeVK?_cU(sx-X zC3&V%o9~~&AJKVkZa^#GNOGz_wG%f?@niu!DK~3aQUo0=frM$ zv1R0m{gsh2J;KWzTiJ7fYBFW6UA@}#a+Io&;0ORp?5_X~hxzj5OO|kGWisEJGrdLr zDIZ-{#PpTMPf~HNMkY580FirHpRdd0kYzUnQs5bSoI&8^6WVP8d3IQTxk!* z(6mm1RjcYvKH1LAdjgL=$IZy%OzXLhUEzNI0gM5+GWd3F-<{19u+#61JK*!xqKyCm z2dzm&K~z3N>-93Z*fZJ#;6GUndjqKO_=SrX7mpjq_kZwv9X9^M#U~$sJRr`oqldD$ z>LD19y;l(GadrmhXm#tnGr)Xnhv(ksfVQJ6tLV8M?Fb&f@kV9KUAraFz@74r8C#aCr$V-!Av*0hzkSsn9 z*ErAkx(kitRT<3qXc^#ii=6dzmY#)216O6y0{8LJwHsHW_53UE#~tgphafG0;*(DDBVgY_JZX0thG zoqC2AP#=Eyz2+V$knMV<7Ap_aCGInAl%mfgWr@!rMae|8z!v>_?RJ+ntn%ffzxwl8 z8$dk}et6d5RnaH8r>y`!G5%fI`Ul@Uyf}aQR9Q|XTBMK3YJJeRmYCqXl3WB5Wa??V zqo-<%8VzvLSBzgU!H_b3PIsS~WA~R8%C6K3jGh(&R&3X6K<^WZVo>hlO?&#rUR^2N!Fe}%>Evq zpf79lp@quM=A+FC$5KUs>M_~-fKPyfex&S3m8+_Su2iY!Z+7hJ)g6kSZuJj6BI^~6 z8vrc(jGgho+*@N{m4okq@YbFDkhvJ?<#z=7C-aPxd||^34|GGofsAwqY}qqoQq6Yy z%TJR(YZu@By+0Zq>buy79-7}<3;l6lubK-WgICUv<^zn;#cxh z1=J+PbPc}>IKk&JClkDI@%&ti$2`Y6t;HkFNgF%1nD+qUXpP!G>i6iHI z0Jt_QvTS=}#xI+aZG;}CYOQP0I~gZ1;h(wiApt=fUkWhEHU>n{J@h4VU>81_HUp|a z0Omjnc9wvW&&hXdIzC)_m(1&{BTMkKKkVzg@P6O%ruDmWG<)@?Fhonp_%D=@}Cc-$`3H^c}x0W zt5r3Xo*(cHD@hfZWOzge)$xn>7yWx?k7;6s&MM`j#&jBjX7(ej-FGxk`k zgRKL0qlK4?Z+-9k1BjasT!W0mo42#q*wyT&`}e9a+;?EUfA-XuV^;&hBA)5vd*$-^ z#gU`O$HyJ8rTu1Bu)jSMkhY2}Skxo@n&chHanVI|V;}fmeBc++^Qscl?i4h`FV zZZ3fA1qI`QT`%`0i=5FFo%H+(g0wFDPOUH=w|;_PY{t_kKB@ZUTCJPj%m;XL)@hkB ze<*!@Hh}b8w6!jK;AplFI$!Eo_=^C;t^A%jjJQ4Dz9XC^egeb;$u`7OY?jRhr0f@7 zS^ATk0~mpnt@ZZTtJ>TZ{*pfEJpefDL4ws(5~Sm{gt?%t36NwfpmqCl>eS~WW67L5 zRo2P|zW-J2T?4HVI$LpLNnD%Z740*6+3DPd-Y36?uUr4IJ^O}zcW-{fxr?X5AZ`=@ z{a|dj>m4qu9iCPz)A~0XjE*perG1(Xk+s@*RiGKGmXNR;d$>a6DLKHlckUH`F#I+; z5)f9M7f+=#?ZpIIdY3KbyePJr^(IqymjFa|vJo&OdrY7PPmuir)zinkgv-(2{Y{R@ z^uLwP*ngnI9-|Z0ESKoM_KjZKzjuFgX^k4UF$%_cjzb%@u`$P?$JP$N&?8H~GfX94 z*cb)URByvqw2swYjsKv1*nwJ3vY#Z3z-a*1Pad()mKYpAg066H`#-_J{IEyK%B3Cs zTU;Z~Mq}24ZeS1aAJI4&qBWyrpx#bSEp^M2N7)Vi_poYA^4ES3=uCFj$_3rPA_WS3 zE)jy=Nj|Kool_HQEmQ<7+qIw@b{2_ly^Z7fD%_#cqCiM<^0H=vlEx}YP}N`FN%+*k zk>Pi4U(HU$$4|3Q-WcC|^kUbW0C|p9=`Hsf+_L-5`t>)NB{`K$Jyp=0jUl`=$$nHdp*Di7eWvjxKB6 zp9fCEd9AkzYhN@zTQt)oyLa@#2W$nW#gF|cNJ58}1x&#;Gr@!c_bnv74D z-LG|_B%-I!I=3hKv@R<<1a#@g&Hs0Gcu)7WXRZ9$G=g%@T!828k_a4K+8?qTy~2Lb zsr5BnehB#?p-`29=QPk68=uZRTk09{vz3y6N`mTu>Q#a3k{O(f@&5s(p{!rkwY4Dt O0000' where is one of" + @echo " test to run all the tests" + @echo " build to build the library and jp executable" + @echo " generate to run codegen" + + +generate: + go generate ./... + +build: + rm -f $(CMD) + go build ./... + rm -f cmd/$(CMD)/$(CMD) && cd cmd/$(CMD)/ && go build ./... + mv cmd/$(CMD)/$(CMD) . + +test: + go test -v ./... + +check: + go vet ./... + @echo "golint ./..." + @lint=`golint ./...`; \ + lint=`echo "$$lint" | grep -v "astnodetype_string.go" | grep -v "toktype_string.go"`; \ + echo "$$lint"; \ + if [ "$$lint" != "" ]; then exit 1; fi + +htmlc: + go test -coverprofile="/tmp/jpcov" && go tool cover -html="/tmp/jpcov" && unlink /tmp/jpcov + +buildfuzz: + go-fuzz-build github.com/jmespath/go-jmespath/fuzz + +fuzz: buildfuzz + go-fuzz -bin=./jmespath-fuzz.zip -workdir=fuzz/testdata + +bench: + go test -bench . -cpuprofile cpu.out + +pprof-cpu: + go tool pprof ./go-jmespath.test ./cpu.out diff --git a/vendor/github.com/jmespath/go-jmespath/README.md b/vendor/github.com/jmespath/go-jmespath/README.md new file mode 100644 index 000000000..187ef676d --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/README.md @@ -0,0 +1,7 @@ +# go-jmespath - A JMESPath implementation in Go + +[![Build Status](https://img.shields.io/travis/jmespath/go-jmespath.svg)](https://travis-ci.org/jmespath/go-jmespath) + + + +See http://jmespath.org for more info. diff --git a/vendor/github.com/jmespath/go-jmespath/api.go b/vendor/github.com/jmespath/go-jmespath/api.go new file mode 100644 index 000000000..9cfa988bc --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/api.go @@ -0,0 +1,49 @@ +package jmespath + +import "strconv" + +// JmesPath is the epresentation of a compiled JMES path query. A JmesPath is +// safe for concurrent use by multiple goroutines. +type JMESPath struct { + ast ASTNode + intr *treeInterpreter +} + +// Compile parses a JMESPath expression and returns, if successful, a JMESPath +// object that can be used to match against data. +func Compile(expression string) (*JMESPath, error) { + parser := NewParser() + ast, err := parser.Parse(expression) + if err != nil { + return nil, err + } + jmespath := &JMESPath{ast: ast, intr: newInterpreter()} + return jmespath, nil +} + +// MustCompile is like Compile but panics if the expression cannot be parsed. +// It simplifies safe initialization of global variables holding compiled +// JMESPaths. +func MustCompile(expression string) *JMESPath { + jmespath, err := Compile(expression) + if err != nil { + panic(`jmespath: Compile(` + strconv.Quote(expression) + `): ` + err.Error()) + } + return jmespath +} + +// Search evaluates a JMESPath expression against input data and returns the result. +func (jp *JMESPath) Search(data interface{}) (interface{}, error) { + return jp.intr.Execute(jp.ast, data) +} + +// Search evaluates a JMESPath expression against input data and returns the result. +func Search(expression string, data interface{}) (interface{}, error) { + intr := newInterpreter() + parser := NewParser() + ast, err := parser.Parse(expression) + if err != nil { + return nil, err + } + return intr.Execute(ast, data) +} diff --git a/vendor/github.com/jmespath/go-jmespath/api_test.go b/vendor/github.com/jmespath/go-jmespath/api_test.go new file mode 100644 index 000000000..b0b106d3d --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/api_test.go @@ -0,0 +1,32 @@ +package jmespath + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestValidPrecompiledExpressionSearches(t *testing.T) { + assert := assert.New(t) + data := make(map[string]interface{}) + data["foo"] = "bar" + precompiled, err := Compile("foo") + assert.Nil(err) + result, err := precompiled.Search(data) + assert.Nil(err) + assert.Equal("bar", result) +} + +func TestInvalidPrecompileErrors(t *testing.T) { + assert := assert.New(t) + _, err := Compile("not a valid expression") + assert.NotNil(err) +} + +func TestInvalidMustCompilePanics(t *testing.T) { + defer func() { + r := recover() + assert.NotNil(t, r) + }() + MustCompile("not a valid expression") +} diff --git a/vendor/github.com/jmespath/go-jmespath/astnodetype_string.go b/vendor/github.com/jmespath/go-jmespath/astnodetype_string.go new file mode 100644 index 000000000..1cd2d239c --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/astnodetype_string.go @@ -0,0 +1,16 @@ +// generated by stringer -type astNodeType; DO NOT EDIT + +package jmespath + +import "fmt" + +const _astNodeType_name = "ASTEmptyASTComparatorASTCurrentNodeASTExpRefASTFunctionExpressionASTFieldASTFilterProjectionASTFlattenASTIdentityASTIndexASTIndexExpressionASTKeyValPairASTLiteralASTMultiSelectHashASTMultiSelectListASTOrExpressionASTAndExpressionASTNotExpressionASTPipeASTProjectionASTSubexpressionASTSliceASTValueProjection" + +var _astNodeType_index = [...]uint16{0, 8, 21, 35, 44, 65, 73, 92, 102, 113, 121, 139, 152, 162, 180, 198, 213, 229, 245, 252, 265, 281, 289, 307} + +func (i astNodeType) String() string { + if i < 0 || i >= astNodeType(len(_astNodeType_index)-1) { + return fmt.Sprintf("astNodeType(%d)", i) + } + return _astNodeType_name[_astNodeType_index[i]:_astNodeType_index[i+1]] +} diff --git a/vendor/github.com/jmespath/go-jmespath/cmd/jpgo/main.go b/vendor/github.com/jmespath/go-jmespath/cmd/jpgo/main.go new file mode 100644 index 000000000..1c53cfc86 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/cmd/jpgo/main.go @@ -0,0 +1,96 @@ +/*Basic command line interface for debug and testing purposes. + +Examples: + +Only print the AST for the expression: + + jp.go -ast "foo.bar.baz" + +Evaluate the JMESPath expression against JSON data from a file: + + jp.go -input /tmp/data.json "foo.bar.baz" + +This program can also be used as an executable to the jp-compliance +runner (github.com/jmespath/jmespath.test). + +*/ +package main + +import ( + "flag" + "fmt" + "io/ioutil" + "os" +) + +import ( + "encoding/json" + + "github.com/jmespath/go-jmespath" +) + +func errMsg(msg string, a ...interface{}) int { + fmt.Fprintf(os.Stderr, msg, a...) + fmt.Fprintln(os.Stderr) + return 1 +} + +func run() int { + + astOnly := flag.Bool("ast", false, "Print the AST for the input expression and exit.") + inputFile := flag.String("input", "", "Filename containing JSON data to search. If not provided, data is read from stdin.") + + flag.Parse() + args := flag.Args() + if len(args) != 1 { + fmt.Fprintf(os.Stderr, "Usage:\n\n") + flag.PrintDefaults() + return errMsg("\nError: expected a single argument (the JMESPath expression).") + } + + expression := args[0] + parser := jmespath.NewParser() + parsed, err := parser.Parse(expression) + if err != nil { + if syntaxError, ok := err.(jmespath.SyntaxError); ok { + return errMsg("%s\n%s\n", syntaxError, syntaxError.HighlightLocation()) + } + return errMsg("%s", err) + } + if *astOnly { + fmt.Println("") + fmt.Printf("%s\n", parsed) + return 0 + } + + var inputData []byte + if *inputFile != "" { + inputData, err = ioutil.ReadFile(*inputFile) + if err != nil { + return errMsg("Error loading file %s: %s", *inputFile, err) + } + } else { + // If an input data file is not provided then we read the + // data from stdin. + inputData, err = ioutil.ReadAll(os.Stdin) + if err != nil { + return errMsg("Error reading from stdin: %s", err) + } + } + var data interface{} + json.Unmarshal(inputData, &data) + result, err := jmespath.Search(expression, data) + if err != nil { + return errMsg("Error executing expression: %s", err) + } + toJSON, err := json.MarshalIndent(result, "", " ") + if err != nil { + return errMsg("Error serializing result to JSON: %s", err) + } + fmt.Println(string(toJSON)) + return 0 +} + +func main() { + os.Exit(run()) +} diff --git a/vendor/github.com/jmespath/go-jmespath/compliance/basic.json b/vendor/github.com/jmespath/go-jmespath/compliance/basic.json new file mode 100644 index 000000000..d550e9695 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/compliance/basic.json @@ -0,0 +1,96 @@ +[{ + "given": + {"foo": {"bar": {"baz": "correct"}}}, + "cases": [ + { + "expression": "foo", + "result": {"bar": {"baz": "correct"}} + }, + { + "expression": "foo.bar", + "result": {"baz": "correct"} + }, + { + "expression": "foo.bar.baz", + "result": "correct" + }, + { + "expression": "foo\n.\nbar\n.baz", + "result": "correct" + }, + { + "expression": "foo.bar.baz.bad", + "result": null + }, + { + "expression": "foo.bar.bad", + "result": null + }, + { + "expression": "foo.bad", + "result": null + }, + { + "expression": "bad", + "result": null + }, + { + "expression": "bad.morebad.morebad", + "result": null + } + ] +}, +{ + "given": + {"foo": {"bar": ["one", "two", "three"]}}, + "cases": [ + { + "expression": "foo", + "result": {"bar": ["one", "two", "three"]} + }, + { + "expression": "foo.bar", + "result": ["one", "two", "three"] + } + ] +}, +{ + "given": ["one", "two", "three"], + "cases": [ + { + "expression": "one", + "result": null + }, + { + "expression": "two", + "result": null + }, + { + "expression": "three", + "result": null + }, + { + "expression": "one.two", + "result": null + } + ] +}, +{ + "given": + {"foo": {"1": ["one", "two", "three"], "-1": "bar"}}, + "cases": [ + { + "expression": "foo.\"1\"", + "result": ["one", "two", "three"] + }, + { + "expression": "foo.\"1\"[0]", + "result": "one" + }, + { + "expression": "foo.\"-1\"", + "result": "bar" + } + ] +} +] diff --git a/vendor/github.com/jmespath/go-jmespath/compliance/boolean.json b/vendor/github.com/jmespath/go-jmespath/compliance/boolean.json new file mode 100644 index 000000000..e3fa196b1 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/compliance/boolean.json @@ -0,0 +1,257 @@ +[ + { + "given": { + "outer": { + "foo": "foo", + "bar": "bar", + "baz": "baz" + } + }, + "cases": [ + { + "expression": "outer.foo || outer.bar", + "result": "foo" + }, + { + "expression": "outer.foo||outer.bar", + "result": "foo" + }, + { + "expression": "outer.bar || outer.baz", + "result": "bar" + }, + { + "expression": "outer.bar||outer.baz", + "result": "bar" + }, + { + "expression": "outer.bad || outer.foo", + "result": "foo" + }, + { + "expression": "outer.bad||outer.foo", + "result": "foo" + }, + { + "expression": "outer.foo || outer.bad", + "result": "foo" + }, + { + "expression": "outer.foo||outer.bad", + "result": "foo" + }, + { + "expression": "outer.bad || outer.alsobad", + "result": null + }, + { + "expression": "outer.bad||outer.alsobad", + "result": null + } + ] + }, + { + "given": { + "outer": { + "foo": "foo", + "bool": false, + "empty_list": [], + "empty_string": "" + } + }, + "cases": [ + { + "expression": "outer.empty_string || outer.foo", + "result": "foo" + }, + { + "expression": "outer.nokey || outer.bool || outer.empty_list || outer.empty_string || outer.foo", + "result": "foo" + } + ] + }, + { + "given": { + "True": true, + "False": false, + "Number": 5, + "EmptyList": [], + "Zero": 0 + }, + "cases": [ + { + "expression": "True && False", + "result": false + }, + { + "expression": "False && True", + "result": false + }, + { + "expression": "True && True", + "result": true + }, + { + "expression": "False && False", + "result": false + }, + { + "expression": "True && Number", + "result": 5 + }, + { + "expression": "Number && True", + "result": true + }, + { + "expression": "Number && False", + "result": false + }, + { + "expression": "Number && EmptyList", + "result": [] + }, + { + "expression": "Number && True", + "result": true + }, + { + "expression": "EmptyList && True", + "result": [] + }, + { + "expression": "EmptyList && False", + "result": [] + }, + { + "expression": "True || False", + "result": true + }, + { + "expression": "True || True", + "result": true + }, + { + "expression": "False || True", + "result": true + }, + { + "expression": "False || False", + "result": false + }, + { + "expression": "Number || EmptyList", + "result": 5 + }, + { + "expression": "Number || True", + "result": 5 + }, + { + "expression": "Number || True && False", + "result": 5 + }, + { + "expression": "(Number || True) && False", + "result": false + }, + { + "expression": "Number || (True && False)", + "result": 5 + }, + { + "expression": "!True", + "result": false + }, + { + "expression": "!False", + "result": true + }, + { + "expression": "!Number", + "result": false + }, + { + "expression": "!EmptyList", + "result": true + }, + { + "expression": "True && !False", + "result": true + }, + { + "expression": "True && !EmptyList", + "result": true + }, + { + "expression": "!False && !EmptyList", + "result": true + }, + { + "expression": "!(True && False)", + "result": true + }, + { + "expression": "!Zero", + "result": false + }, + { + "expression": "!!Zero", + "result": true + } + ] + }, + { + "given": { + "one": 1, + "two": 2, + "three": 3 + }, + "cases": [ + { + "expression": "one < two", + "result": true + }, + { + "expression": "one <= two", + "result": true + }, + { + "expression": "one == one", + "result": true + }, + { + "expression": "one == two", + "result": false + }, + { + "expression": "one > two", + "result": false + }, + { + "expression": "one >= two", + "result": false + }, + { + "expression": "one != two", + "result": true + }, + { + "expression": "one < two && three > one", + "result": true + }, + { + "expression": "one < two || three > one", + "result": true + }, + { + "expression": "one < two || three < one", + "result": true + }, + { + "expression": "two < one || three < one", + "result": false + } + ] + } +] diff --git a/vendor/github.com/jmespath/go-jmespath/compliance/current.json b/vendor/github.com/jmespath/go-jmespath/compliance/current.json new file mode 100644 index 000000000..0c26248d0 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/compliance/current.json @@ -0,0 +1,25 @@ +[ + { + "given": { + "foo": [{"name": "a"}, {"name": "b"}], + "bar": {"baz": "qux"} + }, + "cases": [ + { + "expression": "@", + "result": { + "foo": [{"name": "a"}, {"name": "b"}], + "bar": {"baz": "qux"} + } + }, + { + "expression": "@.bar", + "result": {"baz": "qux"} + }, + { + "expression": "@.foo[0]", + "result": {"name": "a"} + } + ] + } +] diff --git a/vendor/github.com/jmespath/go-jmespath/compliance/escape.json b/vendor/github.com/jmespath/go-jmespath/compliance/escape.json new file mode 100644 index 000000000..4a62d951a --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/compliance/escape.json @@ -0,0 +1,46 @@ +[{ + "given": { + "foo.bar": "dot", + "foo bar": "space", + "foo\nbar": "newline", + "foo\"bar": "doublequote", + "c:\\\\windows\\path": "windows", + "/unix/path": "unix", + "\"\"\"": "threequotes", + "bar": {"baz": "qux"} + }, + "cases": [ + { + "expression": "\"foo.bar\"", + "result": "dot" + }, + { + "expression": "\"foo bar\"", + "result": "space" + }, + { + "expression": "\"foo\\nbar\"", + "result": "newline" + }, + { + "expression": "\"foo\\\"bar\"", + "result": "doublequote" + }, + { + "expression": "\"c:\\\\\\\\windows\\\\path\"", + "result": "windows" + }, + { + "expression": "\"/unix/path\"", + "result": "unix" + }, + { + "expression": "\"\\\"\\\"\\\"\"", + "result": "threequotes" + }, + { + "expression": "\"bar\".\"baz\"", + "result": "qux" + } + ] +}] diff --git a/vendor/github.com/jmespath/go-jmespath/compliance/filters.json b/vendor/github.com/jmespath/go-jmespath/compliance/filters.json new file mode 100644 index 000000000..5b9f52b11 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/compliance/filters.json @@ -0,0 +1,468 @@ +[ + { + "given": {"foo": [{"name": "a"}, {"name": "b"}]}, + "cases": [ + { + "comment": "Matching a literal", + "expression": "foo[?name == 'a']", + "result": [{"name": "a"}] + } + ] + }, + { + "given": {"foo": [0, 1], "bar": [2, 3]}, + "cases": [ + { + "comment": "Matching a literal", + "expression": "*[?[0] == `0`]", + "result": [[], []] + } + ] + }, + { + "given": {"foo": [{"first": "foo", "last": "bar"}, + {"first": "foo", "last": "foo"}, + {"first": "foo", "last": "baz"}]}, + "cases": [ + { + "comment": "Matching an expression", + "expression": "foo[?first == last]", + "result": [{"first": "foo", "last": "foo"}] + }, + { + "comment": "Verify projection created from filter", + "expression": "foo[?first == last].first", + "result": ["foo"] + } + ] + }, + { + "given": {"foo": [{"age": 20}, + {"age": 25}, + {"age": 30}]}, + "cases": [ + { + "comment": "Greater than with a number", + "expression": "foo[?age > `25`]", + "result": [{"age": 30}] + }, + { + "expression": "foo[?age >= `25`]", + "result": [{"age": 25}, {"age": 30}] + }, + { + "comment": "Greater than with a number", + "expression": "foo[?age > `30`]", + "result": [] + }, + { + "comment": "Greater than with a number", + "expression": "foo[?age < `25`]", + "result": [{"age": 20}] + }, + { + "comment": "Greater than with a number", + "expression": "foo[?age <= `25`]", + "result": [{"age": 20}, {"age": 25}] + }, + { + "comment": "Greater than with a number", + "expression": "foo[?age < `20`]", + "result": [] + }, + { + "expression": "foo[?age == `20`]", + "result": [{"age": 20}] + }, + { + "expression": "foo[?age != `20`]", + "result": [{"age": 25}, {"age": 30}] + } + ] + }, + { + "given": {"foo": [{"top": {"name": "a"}}, + {"top": {"name": "b"}}]}, + "cases": [ + { + "comment": "Filter with subexpression", + "expression": "foo[?top.name == 'a']", + "result": [{"top": {"name": "a"}}] + } + ] + }, + { + "given": {"foo": [{"top": {"first": "foo", "last": "bar"}}, + {"top": {"first": "foo", "last": "foo"}}, + {"top": {"first": "foo", "last": "baz"}}]}, + "cases": [ + { + "comment": "Matching an expression", + "expression": "foo[?top.first == top.last]", + "result": [{"top": {"first": "foo", "last": "foo"}}] + }, + { + "comment": "Matching a JSON array", + "expression": "foo[?top == `{\"first\": \"foo\", \"last\": \"bar\"}`]", + "result": [{"top": {"first": "foo", "last": "bar"}}] + } + ] + }, + { + "given": {"foo": [ + {"key": true}, + {"key": false}, + {"key": 0}, + {"key": 1}, + {"key": [0]}, + {"key": {"bar": [0]}}, + {"key": null}, + {"key": [1]}, + {"key": {"a":2}} + ]}, + "cases": [ + { + "expression": "foo[?key == `true`]", + "result": [{"key": true}] + }, + { + "expression": "foo[?key == `false`]", + "result": [{"key": false}] + }, + { + "expression": "foo[?key == `0`]", + "result": [{"key": 0}] + }, + { + "expression": "foo[?key == `1`]", + "result": [{"key": 1}] + }, + { + "expression": "foo[?key == `[0]`]", + "result": [{"key": [0]}] + }, + { + "expression": "foo[?key == `{\"bar\": [0]}`]", + "result": [{"key": {"bar": [0]}}] + }, + { + "expression": "foo[?key == `null`]", + "result": [{"key": null}] + }, + { + "expression": "foo[?key == `[1]`]", + "result": [{"key": [1]}] + }, + { + "expression": "foo[?key == `{\"a\":2}`]", + "result": [{"key": {"a":2}}] + }, + { + "expression": "foo[?`true` == key]", + "result": [{"key": true}] + }, + { + "expression": "foo[?`false` == key]", + "result": [{"key": false}] + }, + { + "expression": "foo[?`0` == key]", + "result": [{"key": 0}] + }, + { + "expression": "foo[?`1` == key]", + "result": [{"key": 1}] + }, + { + "expression": "foo[?`[0]` == key]", + "result": [{"key": [0]}] + }, + { + "expression": "foo[?`{\"bar\": [0]}` == key]", + "result": [{"key": {"bar": [0]}}] + }, + { + "expression": "foo[?`null` == key]", + "result": [{"key": null}] + }, + { + "expression": "foo[?`[1]` == key]", + "result": [{"key": [1]}] + }, + { + "expression": "foo[?`{\"a\":2}` == key]", + "result": [{"key": {"a":2}}] + }, + { + "expression": "foo[?key != `true`]", + "result": [{"key": false}, {"key": 0}, {"key": 1}, {"key": [0]}, + {"key": {"bar": [0]}}, {"key": null}, {"key": [1]}, {"key": {"a":2}}] + }, + { + "expression": "foo[?key != `false`]", + "result": [{"key": true}, {"key": 0}, {"key": 1}, {"key": [0]}, + {"key": {"bar": [0]}}, {"key": null}, {"key": [1]}, {"key": {"a":2}}] + }, + { + "expression": "foo[?key != `0`]", + "result": [{"key": true}, {"key": false}, {"key": 1}, {"key": [0]}, + {"key": {"bar": [0]}}, {"key": null}, {"key": [1]}, {"key": {"a":2}}] + }, + { + "expression": "foo[?key != `1`]", + "result": [{"key": true}, {"key": false}, {"key": 0}, {"key": [0]}, + {"key": {"bar": [0]}}, {"key": null}, {"key": [1]}, {"key": {"a":2}}] + }, + { + "expression": "foo[?key != `null`]", + "result": [{"key": true}, {"key": false}, {"key": 0}, {"key": 1}, {"key": [0]}, + {"key": {"bar": [0]}}, {"key": [1]}, {"key": {"a":2}}] + }, + { + "expression": "foo[?key != `[1]`]", + "result": [{"key": true}, {"key": false}, {"key": 0}, {"key": 1}, {"key": [0]}, + {"key": {"bar": [0]}}, {"key": null}, {"key": {"a":2}}] + }, + { + "expression": "foo[?key != `{\"a\":2}`]", + "result": [{"key": true}, {"key": false}, {"key": 0}, {"key": 1}, {"key": [0]}, + {"key": {"bar": [0]}}, {"key": null}, {"key": [1]}] + }, + { + "expression": "foo[?`true` != key]", + "result": [{"key": false}, {"key": 0}, {"key": 1}, {"key": [0]}, + {"key": {"bar": [0]}}, {"key": null}, {"key": [1]}, {"key": {"a":2}}] + }, + { + "expression": "foo[?`false` != key]", + "result": [{"key": true}, {"key": 0}, {"key": 1}, {"key": [0]}, + {"key": {"bar": [0]}}, {"key": null}, {"key": [1]}, {"key": {"a":2}}] + }, + { + "expression": "foo[?`0` != key]", + "result": [{"key": true}, {"key": false}, {"key": 1}, {"key": [0]}, + {"key": {"bar": [0]}}, {"key": null}, {"key": [1]}, {"key": {"a":2}}] + }, + { + "expression": "foo[?`1` != key]", + "result": [{"key": true}, {"key": false}, {"key": 0}, {"key": [0]}, + {"key": {"bar": [0]}}, {"key": null}, {"key": [1]}, {"key": {"a":2}}] + }, + { + "expression": "foo[?`null` != key]", + "result": [{"key": true}, {"key": false}, {"key": 0}, {"key": 1}, {"key": [0]}, + {"key": {"bar": [0]}}, {"key": [1]}, {"key": {"a":2}}] + }, + { + "expression": "foo[?`[1]` != key]", + "result": [{"key": true}, {"key": false}, {"key": 0}, {"key": 1}, {"key": [0]}, + {"key": {"bar": [0]}}, {"key": null}, {"key": {"a":2}}] + }, + { + "expression": "foo[?`{\"a\":2}` != key]", + "result": [{"key": true}, {"key": false}, {"key": 0}, {"key": 1}, {"key": [0]}, + {"key": {"bar": [0]}}, {"key": null}, {"key": [1]}] + } + ] + }, + { + "given": {"reservations": [ + {"instances": [ + {"foo": 1, "bar": 2}, {"foo": 1, "bar": 3}, + {"foo": 1, "bar": 2}, {"foo": 2, "bar": 1}]}]}, + "cases": [ + { + "expression": "reservations[].instances[?bar==`1`]", + "result": [[{"foo": 2, "bar": 1}]] + }, + { + "expression": "reservations[*].instances[?bar==`1`]", + "result": [[{"foo": 2, "bar": 1}]] + }, + { + "expression": "reservations[].instances[?bar==`1`][]", + "result": [{"foo": 2, "bar": 1}] + } + ] + }, + { + "given": { + "baz": "other", + "foo": [ + {"bar": 1}, {"bar": 2}, {"bar": 3}, {"bar": 4}, {"bar": 1, "baz": 2} + ] + }, + "cases": [ + { + "expression": "foo[?bar==`1`].bar[0]", + "result": [] + } + ] + }, + { + "given": { + "foo": [ + {"a": 1, "b": {"c": "x"}}, + {"a": 1, "b": {"c": "y"}}, + {"a": 1, "b": {"c": "z"}}, + {"a": 2, "b": {"c": "z"}}, + {"a": 1, "baz": 2} + ] + }, + "cases": [ + { + "expression": "foo[?a==`1`].b.c", + "result": ["x", "y", "z"] + } + ] + }, + { + "given": {"foo": [{"name": "a"}, {"name": "b"}, {"name": "c"}]}, + "cases": [ + { + "comment": "Filter with or expression", + "expression": "foo[?name == 'a' || name == 'b']", + "result": [{"name": "a"}, {"name": "b"}] + }, + { + "expression": "foo[?name == 'a' || name == 'e']", + "result": [{"name": "a"}] + }, + { + "expression": "foo[?name == 'a' || name == 'b' || name == 'c']", + "result": [{"name": "a"}, {"name": "b"}, {"name": "c"}] + } + ] + }, + { + "given": {"foo": [{"a": 1, "b": 2}, {"a": 1, "b": 3}]}, + "cases": [ + { + "comment": "Filter with and expression", + "expression": "foo[?a == `1` && b == `2`]", + "result": [{"a": 1, "b": 2}] + }, + { + "expression": "foo[?a == `1` && b == `4`]", + "result": [] + } + ] + }, + { + "given": {"foo": [{"a": 1, "b": 2, "c": 3}, {"a": 3, "b": 4}]}, + "cases": [ + { + "comment": "Filter with Or and And expressions", + "expression": "foo[?c == `3` || a == `1` && b == `4`]", + "result": [{"a": 1, "b": 2, "c": 3}] + }, + { + "expression": "foo[?b == `2` || a == `3` && b == `4`]", + "result": [{"a": 1, "b": 2, "c": 3}, {"a": 3, "b": 4}] + }, + { + "expression": "foo[?a == `3` && b == `4` || b == `2`]", + "result": [{"a": 1, "b": 2, "c": 3}, {"a": 3, "b": 4}] + }, + { + "expression": "foo[?(a == `3` && b == `4`) || b == `2`]", + "result": [{"a": 1, "b": 2, "c": 3}, {"a": 3, "b": 4}] + }, + { + "expression": "foo[?((a == `3` && b == `4`)) || b == `2`]", + "result": [{"a": 1, "b": 2, "c": 3}, {"a": 3, "b": 4}] + }, + { + "expression": "foo[?a == `3` && (b == `4` || b == `2`)]", + "result": [{"a": 3, "b": 4}] + }, + { + "expression": "foo[?a == `3` && ((b == `4` || b == `2`))]", + "result": [{"a": 3, "b": 4}] + } + ] + }, + { + "given": {"foo": [{"a": 1, "b": 2, "c": 3}, {"a": 3, "b": 4}]}, + "cases": [ + { + "comment": "Verify precedence of or/and expressions", + "expression": "foo[?a == `1` || b ==`2` && c == `5`]", + "result": [{"a": 1, "b": 2, "c": 3}] + }, + { + "comment": "Parentheses can alter precedence", + "expression": "foo[?(a == `1` || b ==`2`) && c == `5`]", + "result": [] + }, + { + "comment": "Not expressions combined with and/or", + "expression": "foo[?!(a == `1` || b ==`2`)]", + "result": [{"a": 3, "b": 4}] + } + ] + }, + { + "given": { + "foo": [ + {"key": true}, + {"key": false}, + {"key": []}, + {"key": {}}, + {"key": [0]}, + {"key": {"a": "b"}}, + {"key": 0}, + {"key": 1}, + {"key": null}, + {"notkey": true} + ] + }, + "cases": [ + { + "comment": "Unary filter expression", + "expression": "foo[?key]", + "result": [ + {"key": true}, {"key": [0]}, {"key": {"a": "b"}}, + {"key": 0}, {"key": 1} + ] + }, + { + "comment": "Unary not filter expression", + "expression": "foo[?!key]", + "result": [ + {"key": false}, {"key": []}, {"key": {}}, + {"key": null}, {"notkey": true} + ] + }, + { + "comment": "Equality with null RHS", + "expression": "foo[?key == `null`]", + "result": [ + {"key": null}, {"notkey": true} + ] + } + ] + }, + { + "given": { + "foo": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] + }, + "cases": [ + { + "comment": "Using @ in a filter expression", + "expression": "foo[?@ < `5`]", + "result": [0, 1, 2, 3, 4] + }, + { + "comment": "Using @ in a filter expression", + "expression": "foo[?`5` > @]", + "result": [0, 1, 2, 3, 4] + }, + { + "comment": "Using @ in a filter expression", + "expression": "foo[?@ == @]", + "result": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] + } + ] + } +] diff --git a/vendor/github.com/jmespath/go-jmespath/compliance/functions.json b/vendor/github.com/jmespath/go-jmespath/compliance/functions.json new file mode 100644 index 000000000..8b8db363a --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/compliance/functions.json @@ -0,0 +1,825 @@ +[{ + "given": + { + "foo": -1, + "zero": 0, + "numbers": [-1, 3, 4, 5], + "array": [-1, 3, 4, 5, "a", "100"], + "strings": ["a", "b", "c"], + "decimals": [1.01, 1.2, -1.5], + "str": "Str", + "false": false, + "empty_list": [], + "empty_hash": {}, + "objects": {"foo": "bar", "bar": "baz"}, + "null_key": null + }, + "cases": [ + { + "expression": "abs(foo)", + "result": 1 + }, + { + "expression": "abs(foo)", + "result": 1 + }, + { + "expression": "abs(str)", + "error": "invalid-type" + }, + { + "expression": "abs(array[1])", + "result": 3 + }, + { + "expression": "abs(array[1])", + "result": 3 + }, + { + "expression": "abs(`false`)", + "error": "invalid-type" + }, + { + "expression": "abs(`-24`)", + "result": 24 + }, + { + "expression": "abs(`-24`)", + "result": 24 + }, + { + "expression": "abs(`1`, `2`)", + "error": "invalid-arity" + }, + { + "expression": "abs()", + "error": "invalid-arity" + }, + { + "expression": "unknown_function(`1`, `2`)", + "error": "unknown-function" + }, + { + "expression": "avg(numbers)", + "result": 2.75 + }, + { + "expression": "avg(array)", + "error": "invalid-type" + }, + { + "expression": "avg('abc')", + "error": "invalid-type" + }, + { + "expression": "avg(foo)", + "error": "invalid-type" + }, + { + "expression": "avg(@)", + "error": "invalid-type" + }, + { + "expression": "avg(strings)", + "error": "invalid-type" + }, + { + "expression": "ceil(`1.2`)", + "result": 2 + }, + { + "expression": "ceil(decimals[0])", + "result": 2 + }, + { + "expression": "ceil(decimals[1])", + "result": 2 + }, + { + "expression": "ceil(decimals[2])", + "result": -1 + }, + { + "expression": "ceil('string')", + "error": "invalid-type" + }, + { + "expression": "contains('abc', 'a')", + "result": true + }, + { + "expression": "contains('abc', 'd')", + "result": false + }, + { + "expression": "contains(`false`, 'd')", + "error": "invalid-type" + }, + { + "expression": "contains(strings, 'a')", + "result": true + }, + { + "expression": "contains(decimals, `1.2`)", + "result": true + }, + { + "expression": "contains(decimals, `false`)", + "result": false + }, + { + "expression": "ends_with(str, 'r')", + "result": true + }, + { + "expression": "ends_with(str, 'tr')", + "result": true + }, + { + "expression": "ends_with(str, 'Str')", + "result": true + }, + { + "expression": "ends_with(str, 'SStr')", + "result": false + }, + { + "expression": "ends_with(str, 'foo')", + "result": false + }, + { + "expression": "ends_with(str, `0`)", + "error": "invalid-type" + }, + { + "expression": "floor(`1.2`)", + "result": 1 + }, + { + "expression": "floor('string')", + "error": "invalid-type" + }, + { + "expression": "floor(decimals[0])", + "result": 1 + }, + { + "expression": "floor(foo)", + "result": -1 + }, + { + "expression": "floor(str)", + "error": "invalid-type" + }, + { + "expression": "length('abc')", + "result": 3 + }, + { + "expression": "length('✓foo')", + "result": 4 + }, + { + "expression": "length('')", + "result": 0 + }, + { + "expression": "length(@)", + "result": 12 + }, + { + "expression": "length(strings[0])", + "result": 1 + }, + { + "expression": "length(str)", + "result": 3 + }, + { + "expression": "length(array)", + "result": 6 + }, + { + "expression": "length(objects)", + "result": 2 + }, + { + "expression": "length(`false`)", + "error": "invalid-type" + }, + { + "expression": "length(foo)", + "error": "invalid-type" + }, + { + "expression": "length(strings[0])", + "result": 1 + }, + { + "expression": "max(numbers)", + "result": 5 + }, + { + "expression": "max(decimals)", + "result": 1.2 + }, + { + "expression": "max(strings)", + "result": "c" + }, + { + "expression": "max(abc)", + "error": "invalid-type" + }, + { + "expression": "max(array)", + "error": "invalid-type" + }, + { + "expression": "max(decimals)", + "result": 1.2 + }, + { + "expression": "max(empty_list)", + "result": null + }, + { + "expression": "merge(`{}`)", + "result": {} + }, + { + "expression": "merge(`{}`, `{}`)", + "result": {} + }, + { + "expression": "merge(`{\"a\": 1}`, `{\"b\": 2}`)", + "result": {"a": 1, "b": 2} + }, + { + "expression": "merge(`{\"a\": 1}`, `{\"a\": 2}`)", + "result": {"a": 2} + }, + { + "expression": "merge(`{\"a\": 1, \"b\": 2}`, `{\"a\": 2, \"c\": 3}`, `{\"d\": 4}`)", + "result": {"a": 2, "b": 2, "c": 3, "d": 4} + }, + { + "expression": "min(numbers)", + "result": -1 + }, + { + "expression": "min(decimals)", + "result": -1.5 + }, + { + "expression": "min(abc)", + "error": "invalid-type" + }, + { + "expression": "min(array)", + "error": "invalid-type" + }, + { + "expression": "min(empty_list)", + "result": null + }, + { + "expression": "min(decimals)", + "result": -1.5 + }, + { + "expression": "min(strings)", + "result": "a" + }, + { + "expression": "type('abc')", + "result": "string" + }, + { + "expression": "type(`1.0`)", + "result": "number" + }, + { + "expression": "type(`2`)", + "result": "number" + }, + { + "expression": "type(`true`)", + "result": "boolean" + }, + { + "expression": "type(`false`)", + "result": "boolean" + }, + { + "expression": "type(`null`)", + "result": "null" + }, + { + "expression": "type(`[0]`)", + "result": "array" + }, + { + "expression": "type(`{\"a\": \"b\"}`)", + "result": "object" + }, + { + "expression": "type(@)", + "result": "object" + }, + { + "expression": "sort(keys(objects))", + "result": ["bar", "foo"] + }, + { + "expression": "keys(foo)", + "error": "invalid-type" + }, + { + "expression": "keys(strings)", + "error": "invalid-type" + }, + { + "expression": "keys(`false`)", + "error": "invalid-type" + }, + { + "expression": "sort(values(objects))", + "result": ["bar", "baz"] + }, + { + "expression": "keys(empty_hash)", + "result": [] + }, + { + "expression": "values(foo)", + "error": "invalid-type" + }, + { + "expression": "join(', ', strings)", + "result": "a, b, c" + }, + { + "expression": "join(', ', strings)", + "result": "a, b, c" + }, + { + "expression": "join(',', `[\"a\", \"b\"]`)", + "result": "a,b" + }, + { + "expression": "join(',', `[\"a\", 0]`)", + "error": "invalid-type" + }, + { + "expression": "join(', ', str)", + "error": "invalid-type" + }, + { + "expression": "join('|', strings)", + "result": "a|b|c" + }, + { + "expression": "join(`2`, strings)", + "error": "invalid-type" + }, + { + "expression": "join('|', decimals)", + "error": "invalid-type" + }, + { + "expression": "join('|', decimals[].to_string(@))", + "result": "1.01|1.2|-1.5" + }, + { + "expression": "join('|', empty_list)", + "result": "" + }, + { + "expression": "reverse(numbers)", + "result": [5, 4, 3, -1] + }, + { + "expression": "reverse(array)", + "result": ["100", "a", 5, 4, 3, -1] + }, + { + "expression": "reverse(`[]`)", + "result": [] + }, + { + "expression": "reverse('')", + "result": "" + }, + { + "expression": "reverse('hello world')", + "result": "dlrow olleh" + }, + { + "expression": "starts_with(str, 'S')", + "result": true + }, + { + "expression": "starts_with(str, 'St')", + "result": true + }, + { + "expression": "starts_with(str, 'Str')", + "result": true + }, + { + "expression": "starts_with(str, 'String')", + "result": false + }, + { + "expression": "starts_with(str, `0`)", + "error": "invalid-type" + }, + { + "expression": "sum(numbers)", + "result": 11 + }, + { + "expression": "sum(decimals)", + "result": 0.71 + }, + { + "expression": "sum(array)", + "error": "invalid-type" + }, + { + "expression": "sum(array[].to_number(@))", + "result": 111 + }, + { + "expression": "sum(`[]`)", + "result": 0 + }, + { + "expression": "to_array('foo')", + "result": ["foo"] + }, + { + "expression": "to_array(`0`)", + "result": [0] + }, + { + "expression": "to_array(objects)", + "result": [{"foo": "bar", "bar": "baz"}] + }, + { + "expression": "to_array(`[1, 2, 3]`)", + "result": [1, 2, 3] + }, + { + "expression": "to_array(false)", + "result": [false] + }, + { + "expression": "to_string('foo')", + "result": "foo" + }, + { + "expression": "to_string(`1.2`)", + "result": "1.2" + }, + { + "expression": "to_string(`[0, 1]`)", + "result": "[0,1]" + }, + { + "expression": "to_number('1.0')", + "result": 1.0 + }, + { + "expression": "to_number('1.1')", + "result": 1.1 + }, + { + "expression": "to_number('4')", + "result": 4 + }, + { + "expression": "to_number('notanumber')", + "result": null + }, + { + "expression": "to_number(`false`)", + "result": null + }, + { + "expression": "to_number(`null`)", + "result": null + }, + { + "expression": "to_number(`[0]`)", + "result": null + }, + { + "expression": "to_number(`{\"foo\": 0}`)", + "result": null + }, + { + "expression": "\"to_string\"(`1.0`)", + "error": "syntax" + }, + { + "expression": "sort(numbers)", + "result": [-1, 3, 4, 5] + }, + { + "expression": "sort(strings)", + "result": ["a", "b", "c"] + }, + { + "expression": "sort(decimals)", + "result": [-1.5, 1.01, 1.2] + }, + { + "expression": "sort(array)", + "error": "invalid-type" + }, + { + "expression": "sort(abc)", + "error": "invalid-type" + }, + { + "expression": "sort(empty_list)", + "result": [] + }, + { + "expression": "sort(@)", + "error": "invalid-type" + }, + { + "expression": "not_null(unknown_key, str)", + "result": "Str" + }, + { + "expression": "not_null(unknown_key, foo.bar, empty_list, str)", + "result": [] + }, + { + "expression": "not_null(unknown_key, null_key, empty_list, str)", + "result": [] + }, + { + "expression": "not_null(all, expressions, are_null)", + "result": null + }, + { + "expression": "not_null()", + "error": "invalid-arity" + }, + { + "description": "function projection on single arg function", + "expression": "numbers[].to_string(@)", + "result": ["-1", "3", "4", "5"] + }, + { + "description": "function projection on single arg function", + "expression": "array[].to_number(@)", + "result": [-1, 3, 4, 5, 100] + } + ] +}, { + "given": + { + "foo": [ + {"b": "b", "a": "a"}, + {"c": "c", "b": "b"}, + {"d": "d", "c": "c"}, + {"e": "e", "d": "d"}, + {"f": "f", "e": "e"} + ] + }, + "cases": [ + { + "description": "function projection on variadic function", + "expression": "foo[].not_null(f, e, d, c, b, a)", + "result": ["b", "c", "d", "e", "f"] + } + ] +}, { + "given": + { + "people": [ + {"age": 20, "age_str": "20", "bool": true, "name": "a", "extra": "foo"}, + {"age": 40, "age_str": "40", "bool": false, "name": "b", "extra": "bar"}, + {"age": 30, "age_str": "30", "bool": true, "name": "c"}, + {"age": 50, "age_str": "50", "bool": false, "name": "d"}, + {"age": 10, "age_str": "10", "bool": true, "name": 3} + ] + }, + "cases": [ + { + "description": "sort by field expression", + "expression": "sort_by(people, &age)", + "result": [ + {"age": 10, "age_str": "10", "bool": true, "name": 3}, + {"age": 20, "age_str": "20", "bool": true, "name": "a", "extra": "foo"}, + {"age": 30, "age_str": "30", "bool": true, "name": "c"}, + {"age": 40, "age_str": "40", "bool": false, "name": "b", "extra": "bar"}, + {"age": 50, "age_str": "50", "bool": false, "name": "d"} + ] + }, + { + "expression": "sort_by(people, &age_str)", + "result": [ + {"age": 10, "age_str": "10", "bool": true, "name": 3}, + {"age": 20, "age_str": "20", "bool": true, "name": "a", "extra": "foo"}, + {"age": 30, "age_str": "30", "bool": true, "name": "c"}, + {"age": 40, "age_str": "40", "bool": false, "name": "b", "extra": "bar"}, + {"age": 50, "age_str": "50", "bool": false, "name": "d"} + ] + }, + { + "description": "sort by function expression", + "expression": "sort_by(people, &to_number(age_str))", + "result": [ + {"age": 10, "age_str": "10", "bool": true, "name": 3}, + {"age": 20, "age_str": "20", "bool": true, "name": "a", "extra": "foo"}, + {"age": 30, "age_str": "30", "bool": true, "name": "c"}, + {"age": 40, "age_str": "40", "bool": false, "name": "b", "extra": "bar"}, + {"age": 50, "age_str": "50", "bool": false, "name": "d"} + ] + }, + { + "description": "function projection on sort_by function", + "expression": "sort_by(people, &age)[].name", + "result": [3, "a", "c", "b", "d"] + }, + { + "expression": "sort_by(people, &extra)", + "error": "invalid-type" + }, + { + "expression": "sort_by(people, &bool)", + "error": "invalid-type" + }, + { + "expression": "sort_by(people, &name)", + "error": "invalid-type" + }, + { + "expression": "sort_by(people, name)", + "error": "invalid-type" + }, + { + "expression": "sort_by(people, &age)[].extra", + "result": ["foo", "bar"] + }, + { + "expression": "sort_by(`[]`, &age)", + "result": [] + }, + { + "expression": "max_by(people, &age)", + "result": {"age": 50, "age_str": "50", "bool": false, "name": "d"} + }, + { + "expression": "max_by(people, &age_str)", + "result": {"age": 50, "age_str": "50", "bool": false, "name": "d"} + }, + { + "expression": "max_by(people, &bool)", + "error": "invalid-type" + }, + { + "expression": "max_by(people, &extra)", + "error": "invalid-type" + }, + { + "expression": "max_by(people, &to_number(age_str))", + "result": {"age": 50, "age_str": "50", "bool": false, "name": "d"} + }, + { + "expression": "min_by(people, &age)", + "result": {"age": 10, "age_str": "10", "bool": true, "name": 3} + }, + { + "expression": "min_by(people, &age_str)", + "result": {"age": 10, "age_str": "10", "bool": true, "name": 3} + }, + { + "expression": "min_by(people, &bool)", + "error": "invalid-type" + }, + { + "expression": "min_by(people, &extra)", + "error": "invalid-type" + }, + { + "expression": "min_by(people, &to_number(age_str))", + "result": {"age": 10, "age_str": "10", "bool": true, "name": 3} + } + ] +}, { + "given": + { + "people": [ + {"age": 10, "order": "1"}, + {"age": 10, "order": "2"}, + {"age": 10, "order": "3"}, + {"age": 10, "order": "4"}, + {"age": 10, "order": "5"}, + {"age": 10, "order": "6"}, + {"age": 10, "order": "7"}, + {"age": 10, "order": "8"}, + {"age": 10, "order": "9"}, + {"age": 10, "order": "10"}, + {"age": 10, "order": "11"} + ] + }, + "cases": [ + { + "description": "stable sort order", + "expression": "sort_by(people, &age)", + "result": [ + {"age": 10, "order": "1"}, + {"age": 10, "order": "2"}, + {"age": 10, "order": "3"}, + {"age": 10, "order": "4"}, + {"age": 10, "order": "5"}, + {"age": 10, "order": "6"}, + {"age": 10, "order": "7"}, + {"age": 10, "order": "8"}, + {"age": 10, "order": "9"}, + {"age": 10, "order": "10"}, + {"age": 10, "order": "11"} + ] + } + ] +}, { + "given": + { + "people": [ + {"a": 10, "b": 1, "c": "z"}, + {"a": 10, "b": 2, "c": null}, + {"a": 10, "b": 3}, + {"a": 10, "b": 4, "c": "z"}, + {"a": 10, "b": 5, "c": null}, + {"a": 10, "b": 6}, + {"a": 10, "b": 7, "c": "z"}, + {"a": 10, "b": 8, "c": null}, + {"a": 10, "b": 9} + ], + "empty": [] + }, + "cases": [ + { + "expression": "map(&a, people)", + "result": [10, 10, 10, 10, 10, 10, 10, 10, 10] + }, + { + "expression": "map(&c, people)", + "result": ["z", null, null, "z", null, null, "z", null, null] + }, + { + "expression": "map(&a, badkey)", + "error": "invalid-type" + }, + { + "expression": "map(&foo, empty)", + "result": [] + } + ] +}, { + "given": { + "array": [ + { + "foo": {"bar": "yes1"} + }, + { + "foo": {"bar": "yes2"} + }, + { + "foo1": {"bar": "no"} + } + ]}, + "cases": [ + { + "expression": "map(&foo.bar, array)", + "result": ["yes1", "yes2", null] + }, + { + "expression": "map(&foo1.bar, array)", + "result": [null, null, "no"] + }, + { + "expression": "map(&foo.bar.baz, array)", + "result": [null, null, null] + } + ] +}, { + "given": { + "array": [[1, 2, 3, [4]], [5, 6, 7, [8, 9]]] + }, + "cases": [ + { + "expression": "map(&[], array)", + "result": [[1, 2, 3, 4], [5, 6, 7, 8, 9]] + } + ] +} +] diff --git a/vendor/github.com/jmespath/go-jmespath/compliance/identifiers.json b/vendor/github.com/jmespath/go-jmespath/compliance/identifiers.json new file mode 100644 index 000000000..7998a41ac --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/compliance/identifiers.json @@ -0,0 +1,1377 @@ +[ + { + "given": { + "__L": true + }, + "cases": [ + { + "expression": "__L", + "result": true + } + ] + }, + { + "given": { + "!\r": true + }, + "cases": [ + { + "expression": "\"!\\r\"", + "result": true + } + ] + }, + { + "given": { + "Y_1623": true + }, + "cases": [ + { + "expression": "Y_1623", + "result": true + } + ] + }, + { + "given": { + "x": true + }, + "cases": [ + { + "expression": "x", + "result": true + } + ] + }, + { + "given": { + "\tF\uCebb": true + }, + "cases": [ + { + "expression": "\"\\tF\\uCebb\"", + "result": true + } + ] + }, + { + "given": { + " \t": true + }, + "cases": [ + { + "expression": "\" \\t\"", + "result": true + } + ] + }, + { + "given": { + " ": true + }, + "cases": [ + { + "expression": "\" \"", + "result": true + } + ] + }, + { + "given": { + "v2": true + }, + "cases": [ + { + "expression": "v2", + "result": true + } + ] + }, + { + "given": { + "\t": true + }, + "cases": [ + { + "expression": "\"\\t\"", + "result": true + } + ] + }, + { + "given": { + "_X": true + }, + "cases": [ + { + "expression": "_X", + "result": true + } + ] + }, + { + "given": { + "\t4\ud9da\udd15": true + }, + "cases": [ + { + "expression": "\"\\t4\\ud9da\\udd15\"", + "result": true + } + ] + }, + { + "given": { + "v24_W": true + }, + "cases": [ + { + "expression": "v24_W", + "result": true + } + ] + }, + { + "given": { + "H": true + }, + "cases": [ + { + "expression": "\"H\"", + "result": true + } + ] + }, + { + "given": { + "\f": true + }, + "cases": [ + { + "expression": "\"\\f\"", + "result": true + } + ] + }, + { + "given": { + "E4": true + }, + "cases": [ + { + "expression": "\"E4\"", + "result": true + } + ] + }, + { + "given": { + "!": true + }, + "cases": [ + { + "expression": "\"!\"", + "result": true + } + ] + }, + { + "given": { + "tM": true + }, + "cases": [ + { + "expression": "tM", + "result": true + } + ] + }, + { + "given": { + " [": true + }, + "cases": [ + { + "expression": "\" [\"", + "result": true + } + ] + }, + { + "given": { + "R!": true + }, + "cases": [ + { + "expression": "\"R!\"", + "result": true + } + ] + }, + { + "given": { + "_6W": true + }, + "cases": [ + { + "expression": "_6W", + "result": true + } + ] + }, + { + "given": { + "\uaBA1\r": true + }, + "cases": [ + { + "expression": "\"\\uaBA1\\r\"", + "result": true + } + ] + }, + { + "given": { + "tL7": true + }, + "cases": [ + { + "expression": "tL7", + "result": true + } + ] + }, + { + "given": { + "<": true + }, + "cases": [ + { + "expression": "\">\"", + "result": true + } + ] + }, + { + "given": { + "hvu": true + }, + "cases": [ + { + "expression": "hvu", + "result": true + } + ] + }, + { + "given": { + "; !": true + }, + "cases": [ + { + "expression": "\"; !\"", + "result": true + } + ] + }, + { + "given": { + "hU": true + }, + "cases": [ + { + "expression": "hU", + "result": true + } + ] + }, + { + "given": { + "!I\n\/": true + }, + "cases": [ + { + "expression": "\"!I\\n\\/\"", + "result": true + } + ] + }, + { + "given": { + "\uEEbF": true + }, + "cases": [ + { + "expression": "\"\\uEEbF\"", + "result": true + } + ] + }, + { + "given": { + "U)\t": true + }, + "cases": [ + { + "expression": "\"U)\\t\"", + "result": true + } + ] + }, + { + "given": { + "fa0_9": true + }, + "cases": [ + { + "expression": "fa0_9", + "result": true + } + ] + }, + { + "given": { + "/": true + }, + "cases": [ + { + "expression": "\"/\"", + "result": true + } + ] + }, + { + "given": { + "Gy": true + }, + "cases": [ + { + "expression": "Gy", + "result": true + } + ] + }, + { + "given": { + "\b": true + }, + "cases": [ + { + "expression": "\"\\b\"", + "result": true + } + ] + }, + { + "given": { + "<": true + }, + "cases": [ + { + "expression": "\"<\"", + "result": true + } + ] + }, + { + "given": { + "\t": true + }, + "cases": [ + { + "expression": "\"\\t\"", + "result": true + } + ] + }, + { + "given": { + "\t&\\\r": true + }, + "cases": [ + { + "expression": "\"\\t&\\\\\\r\"", + "result": true + } + ] + }, + { + "given": { + "#": true + }, + "cases": [ + { + "expression": "\"#\"", + "result": true + } + ] + }, + { + "given": { + "B__": true + }, + "cases": [ + { + "expression": "B__", + "result": true + } + ] + }, + { + "given": { + "\nS \n": true + }, + "cases": [ + { + "expression": "\"\\nS \\n\"", + "result": true + } + ] + }, + { + "given": { + "Bp": true + }, + "cases": [ + { + "expression": "Bp", + "result": true + } + ] + }, + { + "given": { + ",\t;": true + }, + "cases": [ + { + "expression": "\",\\t;\"", + "result": true + } + ] + }, + { + "given": { + "B_q": true + }, + "cases": [ + { + "expression": "B_q", + "result": true + } + ] + }, + { + "given": { + "\/+\t\n\b!Z": true + }, + "cases": [ + { + "expression": "\"\\/+\\t\\n\\b!Z\"", + "result": true + } + ] + }, + { + "given": { + "\udadd\udfc7\\ueFAc": true + }, + "cases": [ + { + "expression": "\"\udadd\udfc7\\\\ueFAc\"", + "result": true + } + ] + }, + { + "given": { + ":\f": true + }, + "cases": [ + { + "expression": "\":\\f\"", + "result": true + } + ] + }, + { + "given": { + "\/": true + }, + "cases": [ + { + "expression": "\"\\/\"", + "result": true + } + ] + }, + { + "given": { + "_BW_6Hg_Gl": true + }, + "cases": [ + { + "expression": "_BW_6Hg_Gl", + "result": true + } + ] + }, + { + "given": { + "\udbcf\udc02": true + }, + "cases": [ + { + "expression": "\"\udbcf\udc02\"", + "result": true + } + ] + }, + { + "given": { + "zs1DC": true + }, + "cases": [ + { + "expression": "zs1DC", + "result": true + } + ] + }, + { + "given": { + "__434": true + }, + "cases": [ + { + "expression": "__434", + "result": true + } + ] + }, + { + "given": { + "\udb94\udd41": true + }, + "cases": [ + { + "expression": "\"\udb94\udd41\"", + "result": true + } + ] + }, + { + "given": { + "Z_5": true + }, + "cases": [ + { + "expression": "Z_5", + "result": true + } + ] + }, + { + "given": { + "z_M_": true + }, + "cases": [ + { + "expression": "z_M_", + "result": true + } + ] + }, + { + "given": { + "YU_2": true + }, + "cases": [ + { + "expression": "YU_2", + "result": true + } + ] + }, + { + "given": { + "_0": true + }, + "cases": [ + { + "expression": "_0", + "result": true + } + ] + }, + { + "given": { + "\b+": true + }, + "cases": [ + { + "expression": "\"\\b+\"", + "result": true + } + ] + }, + { + "given": { + "\"": true + }, + "cases": [ + { + "expression": "\"\\\"\"", + "result": true + } + ] + }, + { + "given": { + "D7": true + }, + "cases": [ + { + "expression": "D7", + "result": true + } + ] + }, + { + "given": { + "_62L": true + }, + "cases": [ + { + "expression": "_62L", + "result": true + } + ] + }, + { + "given": { + "\tK\t": true + }, + "cases": [ + { + "expression": "\"\\tK\\t\"", + "result": true + } + ] + }, + { + "given": { + "\n\\\f": true + }, + "cases": [ + { + "expression": "\"\\n\\\\\\f\"", + "result": true + } + ] + }, + { + "given": { + "I_": true + }, + "cases": [ + { + "expression": "I_", + "result": true + } + ] + }, + { + "given": { + "W_a0_": true + }, + "cases": [ + { + "expression": "W_a0_", + "result": true + } + ] + }, + { + "given": { + "BQ": true + }, + "cases": [ + { + "expression": "BQ", + "result": true + } + ] + }, + { + "given": { + "\tX$\uABBb": true + }, + "cases": [ + { + "expression": "\"\\tX$\\uABBb\"", + "result": true + } + ] + }, + { + "given": { + "Z9": true + }, + "cases": [ + { + "expression": "Z9", + "result": true + } + ] + }, + { + "given": { + "\b%\"\uda38\udd0f": true + }, + "cases": [ + { + "expression": "\"\\b%\\\"\uda38\udd0f\"", + "result": true + } + ] + }, + { + "given": { + "_F": true + }, + "cases": [ + { + "expression": "_F", + "result": true + } + ] + }, + { + "given": { + "!,": true + }, + "cases": [ + { + "expression": "\"!,\"", + "result": true + } + ] + }, + { + "given": { + "\"!": true + }, + "cases": [ + { + "expression": "\"\\\"!\"", + "result": true + } + ] + }, + { + "given": { + "Hh": true + }, + "cases": [ + { + "expression": "Hh", + "result": true + } + ] + }, + { + "given": { + "&": true + }, + "cases": [ + { + "expression": "\"&\"", + "result": true + } + ] + }, + { + "given": { + "9\r\\R": true + }, + "cases": [ + { + "expression": "\"9\\r\\\\R\"", + "result": true + } + ] + }, + { + "given": { + "M_k": true + }, + "cases": [ + { + "expression": "M_k", + "result": true + } + ] + }, + { + "given": { + "!\b\n\udb06\ude52\"\"": true + }, + "cases": [ + { + "expression": "\"!\\b\\n\udb06\ude52\\\"\\\"\"", + "result": true + } + ] + }, + { + "given": { + "6": true + }, + "cases": [ + { + "expression": "\"6\"", + "result": true + } + ] + }, + { + "given": { + "_7": true + }, + "cases": [ + { + "expression": "_7", + "result": true + } + ] + }, + { + "given": { + "0": true + }, + "cases": [ + { + "expression": "\"0\"", + "result": true + } + ] + }, + { + "given": { + "\\8\\": true + }, + "cases": [ + { + "expression": "\"\\\\8\\\\\"", + "result": true + } + ] + }, + { + "given": { + "b7eo": true + }, + "cases": [ + { + "expression": "b7eo", + "result": true + } + ] + }, + { + "given": { + "xIUo9": true + }, + "cases": [ + { + "expression": "xIUo9", + "result": true + } + ] + }, + { + "given": { + "5": true + }, + "cases": [ + { + "expression": "\"5\"", + "result": true + } + ] + }, + { + "given": { + "?": true + }, + "cases": [ + { + "expression": "\"?\"", + "result": true + } + ] + }, + { + "given": { + "sU": true + }, + "cases": [ + { + "expression": "sU", + "result": true + } + ] + }, + { + "given": { + "VH2&H\\\/": true + }, + "cases": [ + { + "expression": "\"VH2&H\\\\\\/\"", + "result": true + } + ] + }, + { + "given": { + "_C": true + }, + "cases": [ + { + "expression": "_C", + "result": true + } + ] + }, + { + "given": { + "_": true + }, + "cases": [ + { + "expression": "_", + "result": true + } + ] + }, + { + "given": { + "<\t": true + }, + "cases": [ + { + "expression": "\"<\\t\"", + "result": true + } + ] + }, + { + "given": { + "\uD834\uDD1E": true + }, + "cases": [ + { + "expression": "\"\\uD834\\uDD1E\"", + "result": true + } + ] + } +] diff --git a/vendor/github.com/jmespath/go-jmespath/compliance/indices.json b/vendor/github.com/jmespath/go-jmespath/compliance/indices.json new file mode 100644 index 000000000..aa03b35dd --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/compliance/indices.json @@ -0,0 +1,346 @@ +[{ + "given": + {"foo": {"bar": ["zero", "one", "two"]}}, + "cases": [ + { + "expression": "foo.bar[0]", + "result": "zero" + }, + { + "expression": "foo.bar[1]", + "result": "one" + }, + { + "expression": "foo.bar[2]", + "result": "two" + }, + { + "expression": "foo.bar[3]", + "result": null + }, + { + "expression": "foo.bar[-1]", + "result": "two" + }, + { + "expression": "foo.bar[-2]", + "result": "one" + }, + { + "expression": "foo.bar[-3]", + "result": "zero" + }, + { + "expression": "foo.bar[-4]", + "result": null + } + ] +}, +{ + "given": + {"foo": [{"bar": "one"}, {"bar": "two"}, {"bar": "three"}, {"notbar": "four"}]}, + "cases": [ + { + "expression": "foo.bar", + "result": null + }, + { + "expression": "foo[0].bar", + "result": "one" + }, + { + "expression": "foo[1].bar", + "result": "two" + }, + { + "expression": "foo[2].bar", + "result": "three" + }, + { + "expression": "foo[3].notbar", + "result": "four" + }, + { + "expression": "foo[3].bar", + "result": null + }, + { + "expression": "foo[0]", + "result": {"bar": "one"} + }, + { + "expression": "foo[1]", + "result": {"bar": "two"} + }, + { + "expression": "foo[2]", + "result": {"bar": "three"} + }, + { + "expression": "foo[3]", + "result": {"notbar": "four"} + }, + { + "expression": "foo[4]", + "result": null + } + ] +}, +{ + "given": [ + "one", "two", "three" + ], + "cases": [ + { + "expression": "[0]", + "result": "one" + }, + { + "expression": "[1]", + "result": "two" + }, + { + "expression": "[2]", + "result": "three" + }, + { + "expression": "[-1]", + "result": "three" + }, + { + "expression": "[-2]", + "result": "two" + }, + { + "expression": "[-3]", + "result": "one" + } + ] +}, +{ + "given": {"reservations": [ + {"instances": [{"foo": 1}, {"foo": 2}]} + ]}, + "cases": [ + { + "expression": "reservations[].instances[].foo", + "result": [1, 2] + }, + { + "expression": "reservations[].instances[].bar", + "result": [] + }, + { + "expression": "reservations[].notinstances[].foo", + "result": [] + }, + { + "expression": "reservations[].notinstances[].foo", + "result": [] + } + ] +}, +{ + "given": {"reservations": [{ + "instances": [ + {"foo": [{"bar": 1}, {"bar": 2}, {"notbar": 3}, {"bar": 4}]}, + {"foo": [{"bar": 5}, {"bar": 6}, {"notbar": [7]}, {"bar": 8}]}, + {"foo": "bar"}, + {"notfoo": [{"bar": 20}, {"bar": 21}, {"notbar": [7]}, {"bar": 22}]}, + {"bar": [{"baz": [1]}, {"baz": [2]}, {"baz": [3]}, {"baz": [4]}]}, + {"baz": [{"baz": [1, 2]}, {"baz": []}, {"baz": []}, {"baz": [3, 4]}]}, + {"qux": [{"baz": []}, {"baz": [1, 2, 3]}, {"baz": [4]}, {"baz": []}]} + ], + "otherkey": {"foo": [{"bar": 1}, {"bar": 2}, {"notbar": 3}, {"bar": 4}]} + }, { + "instances": [ + {"a": [{"bar": 1}, {"bar": 2}, {"notbar": 3}, {"bar": 4}]}, + {"b": [{"bar": 5}, {"bar": 6}, {"notbar": [7]}, {"bar": 8}]}, + {"c": "bar"}, + {"notfoo": [{"bar": 23}, {"bar": 24}, {"notbar": [7]}, {"bar": 25}]}, + {"qux": [{"baz": []}, {"baz": [1, 2, 3]}, {"baz": [4]}, {"baz": []}]} + ], + "otherkey": {"foo": [{"bar": 1}, {"bar": 2}, {"notbar": 3}, {"bar": 4}]} + } + ]}, + "cases": [ + { + "expression": "reservations[].instances[].foo[].bar", + "result": [1, 2, 4, 5, 6, 8] + }, + { + "expression": "reservations[].instances[].foo[].baz", + "result": [] + }, + { + "expression": "reservations[].instances[].notfoo[].bar", + "result": [20, 21, 22, 23, 24, 25] + }, + { + "expression": "reservations[].instances[].notfoo[].notbar", + "result": [[7], [7]] + }, + { + "expression": "reservations[].notinstances[].foo", + "result": [] + }, + { + "expression": "reservations[].instances[].foo[].notbar", + "result": [3, [7]] + }, + { + "expression": "reservations[].instances[].bar[].baz", + "result": [[1], [2], [3], [4]] + }, + { + "expression": "reservations[].instances[].baz[].baz", + "result": [[1, 2], [], [], [3, 4]] + }, + { + "expression": "reservations[].instances[].qux[].baz", + "result": [[], [1, 2, 3], [4], [], [], [1, 2, 3], [4], []] + }, + { + "expression": "reservations[].instances[].qux[].baz[]", + "result": [1, 2, 3, 4, 1, 2, 3, 4] + } + ] +}, +{ + "given": { + "foo": [ + [["one", "two"], ["three", "four"]], + [["five", "six"], ["seven", "eight"]], + [["nine"], ["ten"]] + ] + }, + "cases": [ + { + "expression": "foo[]", + "result": [["one", "two"], ["three", "four"], ["five", "six"], + ["seven", "eight"], ["nine"], ["ten"]] + }, + { + "expression": "foo[][0]", + "result": ["one", "three", "five", "seven", "nine", "ten"] + }, + { + "expression": "foo[][1]", + "result": ["two", "four", "six", "eight"] + }, + { + "expression": "foo[][0][0]", + "result": [] + }, + { + "expression": "foo[][2][2]", + "result": [] + }, + { + "expression": "foo[][0][0][100]", + "result": [] + } + ] +}, +{ + "given": { + "foo": [{ + "bar": [ + { + "qux": 2, + "baz": 1 + }, + { + "qux": 4, + "baz": 3 + } + ] + }, + { + "bar": [ + { + "qux": 6, + "baz": 5 + }, + { + "qux": 8, + "baz": 7 + } + ] + } + ] + }, + "cases": [ + { + "expression": "foo", + "result": [{"bar": [{"qux": 2, "baz": 1}, {"qux": 4, "baz": 3}]}, + {"bar": [{"qux": 6, "baz": 5}, {"qux": 8, "baz": 7}]}] + }, + { + "expression": "foo[]", + "result": [{"bar": [{"qux": 2, "baz": 1}, {"qux": 4, "baz": 3}]}, + {"bar": [{"qux": 6, "baz": 5}, {"qux": 8, "baz": 7}]}] + }, + { + "expression": "foo[].bar", + "result": [[{"qux": 2, "baz": 1}, {"qux": 4, "baz": 3}], + [{"qux": 6, "baz": 5}, {"qux": 8, "baz": 7}]] + }, + { + "expression": "foo[].bar[]", + "result": [{"qux": 2, "baz": 1}, {"qux": 4, "baz": 3}, + {"qux": 6, "baz": 5}, {"qux": 8, "baz": 7}] + }, + { + "expression": "foo[].bar[].baz", + "result": [1, 3, 5, 7] + } + ] +}, +{ + "given": { + "string": "string", + "hash": {"foo": "bar", "bar": "baz"}, + "number": 23, + "nullvalue": null + }, + "cases": [ + { + "expression": "string[]", + "result": null + }, + { + "expression": "hash[]", + "result": null + }, + { + "expression": "number[]", + "result": null + }, + { + "expression": "nullvalue[]", + "result": null + }, + { + "expression": "string[].foo", + "result": null + }, + { + "expression": "hash[].foo", + "result": null + }, + { + "expression": "number[].foo", + "result": null + }, + { + "expression": "nullvalue[].foo", + "result": null + }, + { + "expression": "nullvalue[].foo[].bar", + "result": null + } + ] +} +] diff --git a/vendor/github.com/jmespath/go-jmespath/compliance/literal.json b/vendor/github.com/jmespath/go-jmespath/compliance/literal.json new file mode 100644 index 000000000..c6706b971 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/compliance/literal.json @@ -0,0 +1,185 @@ +[ + { + "given": { + "foo": [{"name": "a"}, {"name": "b"}], + "bar": {"baz": "qux"} + }, + "cases": [ + { + "expression": "`\"foo\"`", + "result": "foo" + }, + { + "comment": "Interpret escaped unicode.", + "expression": "`\"\\u03a6\"`", + "result": "Φ" + }, + { + "expression": "`\"✓\"`", + "result": "✓" + }, + { + "expression": "`[1, 2, 3]`", + "result": [1, 2, 3] + }, + { + "expression": "`{\"a\": \"b\"}`", + "result": {"a": "b"} + }, + { + "expression": "`true`", + "result": true + }, + { + "expression": "`false`", + "result": false + }, + { + "expression": "`null`", + "result": null + }, + { + "expression": "`0`", + "result": 0 + }, + { + "expression": "`1`", + "result": 1 + }, + { + "expression": "`2`", + "result": 2 + }, + { + "expression": "`3`", + "result": 3 + }, + { + "expression": "`4`", + "result": 4 + }, + { + "expression": "`5`", + "result": 5 + }, + { + "expression": "`6`", + "result": 6 + }, + { + "expression": "`7`", + "result": 7 + }, + { + "expression": "`8`", + "result": 8 + }, + { + "expression": "`9`", + "result": 9 + }, + { + "comment": "Escaping a backtick in quotes", + "expression": "`\"foo\\`bar\"`", + "result": "foo`bar" + }, + { + "comment": "Double quote in literal", + "expression": "`\"foo\\\"bar\"`", + "result": "foo\"bar" + }, + { + "expression": "`\"1\\`\"`", + "result": "1`" + }, + { + "comment": "Multiple literal expressions with escapes", + "expression": "`\"\\\\\"`.{a:`\"b\"`}", + "result": {"a": "b"} + }, + { + "comment": "literal . identifier", + "expression": "`{\"a\": \"b\"}`.a", + "result": "b" + }, + { + "comment": "literal . identifier . identifier", + "expression": "`{\"a\": {\"b\": \"c\"}}`.a.b", + "result": "c" + }, + { + "comment": "literal . identifier bracket-expr", + "expression": "`[0, 1, 2]`[1]", + "result": 1 + } + ] + }, + { + "comment": "Literals", + "given": {"type": "object"}, + "cases": [ + { + "comment": "Literal with leading whitespace", + "expression": "` {\"foo\": true}`", + "result": {"foo": true} + }, + { + "comment": "Literal with trailing whitespace", + "expression": "`{\"foo\": true} `", + "result": {"foo": true} + }, + { + "comment": "Literal on RHS of subexpr not allowed", + "expression": "foo.`\"bar\"`", + "error": "syntax" + } + ] + }, + { + "comment": "Raw String Literals", + "given": {}, + "cases": [ + { + "expression": "'foo'", + "result": "foo" + }, + { + "expression": "' foo '", + "result": " foo " + }, + { + "expression": "'0'", + "result": "0" + }, + { + "expression": "'newline\n'", + "result": "newline\n" + }, + { + "expression": "'\n'", + "result": "\n" + }, + { + "expression": "'✓'", + "result": "✓" + }, + { + "expression": "'𝄞'", + "result": "𝄞" + }, + { + "expression": "' [foo] '", + "result": " [foo] " + }, + { + "expression": "'[foo]'", + "result": "[foo]" + }, + { + "comment": "Do not interpret escaped unicode.", + "expression": "'\\u03a6'", + "result": "\\u03a6" + } + ] + } +] diff --git a/vendor/github.com/jmespath/go-jmespath/compliance/multiselect.json b/vendor/github.com/jmespath/go-jmespath/compliance/multiselect.json new file mode 100644 index 000000000..8f2a481ed --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/compliance/multiselect.json @@ -0,0 +1,393 @@ +[{ + "given": { + "foo": { + "bar": "bar", + "baz": "baz", + "qux": "qux", + "nested": { + "one": { + "a": "first", + "b": "second", + "c": "third" + }, + "two": { + "a": "first", + "b": "second", + "c": "third" + }, + "three": { + "a": "first", + "b": "second", + "c": {"inner": "third"} + } + } + }, + "bar": 1, + "baz": 2, + "qux\"": 3 + }, + "cases": [ + { + "expression": "foo.{bar: bar}", + "result": {"bar": "bar"} + }, + { + "expression": "foo.{\"bar\": bar}", + "result": {"bar": "bar"} + }, + { + "expression": "foo.{\"foo.bar\": bar}", + "result": {"foo.bar": "bar"} + }, + { + "expression": "foo.{bar: bar, baz: baz}", + "result": {"bar": "bar", "baz": "baz"} + }, + { + "expression": "foo.{\"bar\": bar, \"baz\": baz}", + "result": {"bar": "bar", "baz": "baz"} + }, + { + "expression": "{\"baz\": baz, \"qux\\\"\": \"qux\\\"\"}", + "result": {"baz": 2, "qux\"": 3} + }, + { + "expression": "foo.{bar:bar,baz:baz}", + "result": {"bar": "bar", "baz": "baz"} + }, + { + "expression": "foo.{bar: bar,qux: qux}", + "result": {"bar": "bar", "qux": "qux"} + }, + { + "expression": "foo.{bar: bar, noexist: noexist}", + "result": {"bar": "bar", "noexist": null} + }, + { + "expression": "foo.{noexist: noexist, alsonoexist: alsonoexist}", + "result": {"noexist": null, "alsonoexist": null} + }, + { + "expression": "foo.badkey.{nokey: nokey, alsonokey: alsonokey}", + "result": null + }, + { + "expression": "foo.nested.*.{a: a,b: b}", + "result": [{"a": "first", "b": "second"}, + {"a": "first", "b": "second"}, + {"a": "first", "b": "second"}] + }, + { + "expression": "foo.nested.three.{a: a, cinner: c.inner}", + "result": {"a": "first", "cinner": "third"} + }, + { + "expression": "foo.nested.three.{a: a, c: c.inner.bad.key}", + "result": {"a": "first", "c": null} + }, + { + "expression": "foo.{a: nested.one.a, b: nested.two.b}", + "result": {"a": "first", "b": "second"} + }, + { + "expression": "{bar: bar, baz: baz}", + "result": {"bar": 1, "baz": 2} + }, + { + "expression": "{bar: bar}", + "result": {"bar": 1} + }, + { + "expression": "{otherkey: bar}", + "result": {"otherkey": 1} + }, + { + "expression": "{no: no, exist: exist}", + "result": {"no": null, "exist": null} + }, + { + "expression": "foo.[bar]", + "result": ["bar"] + }, + { + "expression": "foo.[bar,baz]", + "result": ["bar", "baz"] + }, + { + "expression": "foo.[bar,qux]", + "result": ["bar", "qux"] + }, + { + "expression": "foo.[bar,noexist]", + "result": ["bar", null] + }, + { + "expression": "foo.[noexist,alsonoexist]", + "result": [null, null] + } + ] +}, { + "given": { + "foo": {"bar": 1, "baz": [2, 3, 4]} + }, + "cases": [ + { + "expression": "foo.{bar:bar,baz:baz}", + "result": {"bar": 1, "baz": [2, 3, 4]} + }, + { + "expression": "foo.[bar,baz[0]]", + "result": [1, 2] + }, + { + "expression": "foo.[bar,baz[1]]", + "result": [1, 3] + }, + { + "expression": "foo.[bar,baz[2]]", + "result": [1, 4] + }, + { + "expression": "foo.[bar,baz[3]]", + "result": [1, null] + }, + { + "expression": "foo.[bar[0],baz[3]]", + "result": [null, null] + } + ] +}, { + "given": { + "foo": {"bar": 1, "baz": 2} + }, + "cases": [ + { + "expression": "foo.{bar: bar, baz: baz}", + "result": {"bar": 1, "baz": 2} + }, + { + "expression": "foo.[bar,baz]", + "result": [1, 2] + } + ] +}, { + "given": { + "foo": { + "bar": {"baz": [{"common": "first", "one": 1}, + {"common": "second", "two": 2}]}, + "ignoreme": 1, + "includeme": true + } + }, + "cases": [ + { + "expression": "foo.{bar: bar.baz[1],includeme: includeme}", + "result": {"bar": {"common": "second", "two": 2}, "includeme": true} + }, + { + "expression": "foo.{\"bar.baz.two\": bar.baz[1].two, includeme: includeme}", + "result": {"bar.baz.two": 2, "includeme": true} + }, + { + "expression": "foo.[includeme, bar.baz[*].common]", + "result": [true, ["first", "second"]] + }, + { + "expression": "foo.[includeme, bar.baz[*].none]", + "result": [true, []] + }, + { + "expression": "foo.[includeme, bar.baz[].common]", + "result": [true, ["first", "second"]] + } + ] +}, { + "given": { + "reservations": [{ + "instances": [ + {"id": "id1", + "name": "first"}, + {"id": "id2", + "name": "second"} + ]}, { + "instances": [ + {"id": "id3", + "name": "third"}, + {"id": "id4", + "name": "fourth"} + ]} + ]}, + "cases": [ + { + "expression": "reservations[*].instances[*].{id: id, name: name}", + "result": [[{"id": "id1", "name": "first"}, {"id": "id2", "name": "second"}], + [{"id": "id3", "name": "third"}, {"id": "id4", "name": "fourth"}]] + }, + { + "expression": "reservations[].instances[].{id: id, name: name}", + "result": [{"id": "id1", "name": "first"}, + {"id": "id2", "name": "second"}, + {"id": "id3", "name": "third"}, + {"id": "id4", "name": "fourth"}] + }, + { + "expression": "reservations[].instances[].[id, name]", + "result": [["id1", "first"], + ["id2", "second"], + ["id3", "third"], + ["id4", "fourth"]] + } + ] +}, +{ + "given": { + "foo": [{ + "bar": [ + { + "qux": 2, + "baz": 1 + }, + { + "qux": 4, + "baz": 3 + } + ] + }, + { + "bar": [ + { + "qux": 6, + "baz": 5 + }, + { + "qux": 8, + "baz": 7 + } + ] + } + ] + }, + "cases": [ + { + "expression": "foo", + "result": [{"bar": [{"qux": 2, "baz": 1}, {"qux": 4, "baz": 3}]}, + {"bar": [{"qux": 6, "baz": 5}, {"qux": 8, "baz": 7}]}] + }, + { + "expression": "foo[]", + "result": [{"bar": [{"qux": 2, "baz": 1}, {"qux": 4, "baz": 3}]}, + {"bar": [{"qux": 6, "baz": 5}, {"qux": 8, "baz": 7}]}] + }, + { + "expression": "foo[].bar", + "result": [[{"qux": 2, "baz": 1}, {"qux": 4, "baz": 3}], + [{"qux": 6, "baz": 5}, {"qux": 8, "baz": 7}]] + }, + { + "expression": "foo[].bar[]", + "result": [{"qux": 2, "baz": 1}, {"qux": 4, "baz": 3}, + {"qux": 6, "baz": 5}, {"qux": 8, "baz": 7}] + }, + { + "expression": "foo[].bar[].[baz, qux]", + "result": [[1, 2], [3, 4], [5, 6], [7, 8]] + }, + { + "expression": "foo[].bar[].[baz]", + "result": [[1], [3], [5], [7]] + }, + { + "expression": "foo[].bar[].[baz, qux][]", + "result": [1, 2, 3, 4, 5, 6, 7, 8] + } + ] +}, +{ + "given": { + "foo": { + "baz": [ + { + "bar": "abc" + }, { + "bar": "def" + } + ], + "qux": ["zero"] + } + }, + "cases": [ + { + "expression": "foo.[baz[*].bar, qux[0]]", + "result": [["abc", "def"], "zero"] + } + ] +}, +{ + "given": { + "foo": { + "baz": [ + { + "bar": "a", + "bam": "b", + "boo": "c" + }, { + "bar": "d", + "bam": "e", + "boo": "f" + } + ], + "qux": ["zero"] + } + }, + "cases": [ + { + "expression": "foo.[baz[*].[bar, boo], qux[0]]", + "result": [[["a", "c" ], ["d", "f" ]], "zero"] + } + ] +}, +{ + "given": { + "foo": { + "baz": [ + { + "bar": "a", + "bam": "b", + "boo": "c" + }, { + "bar": "d", + "bam": "e", + "boo": "f" + } + ], + "qux": ["zero"] + } + }, + "cases": [ + { + "expression": "foo.[baz[*].not_there || baz[*].bar, qux[0]]", + "result": [["a", "d"], "zero"] + } + ] +}, +{ + "given": {"type": "object"}, + "cases": [ + { + "comment": "Nested multiselect", + "expression": "[[*],*]", + "result": [null, ["object"]] + } + ] +}, +{ + "given": [], + "cases": [ + { + "comment": "Nested multiselect", + "expression": "[[*]]", + "result": [[]] + } + ] +} +] diff --git a/vendor/github.com/jmespath/go-jmespath/compliance/ormatch.json b/vendor/github.com/jmespath/go-jmespath/compliance/ormatch.json new file mode 100644 index 000000000..2127cf441 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/compliance/ormatch.json @@ -0,0 +1,59 @@ +[{ + "given": + {"outer": {"foo": "foo", "bar": "bar", "baz": "baz"}}, + "cases": [ + { + "expression": "outer.foo || outer.bar", + "result": "foo" + }, + { + "expression": "outer.foo||outer.bar", + "result": "foo" + }, + { + "expression": "outer.bar || outer.baz", + "result": "bar" + }, + { + "expression": "outer.bar||outer.baz", + "result": "bar" + }, + { + "expression": "outer.bad || outer.foo", + "result": "foo" + }, + { + "expression": "outer.bad||outer.foo", + "result": "foo" + }, + { + "expression": "outer.foo || outer.bad", + "result": "foo" + }, + { + "expression": "outer.foo||outer.bad", + "result": "foo" + }, + { + "expression": "outer.bad || outer.alsobad", + "result": null + }, + { + "expression": "outer.bad||outer.alsobad", + "result": null + } + ] +}, { + "given": + {"outer": {"foo": "foo", "bool": false, "empty_list": [], "empty_string": ""}}, + "cases": [ + { + "expression": "outer.empty_string || outer.foo", + "result": "foo" + }, + { + "expression": "outer.nokey || outer.bool || outer.empty_list || outer.empty_string || outer.foo", + "result": "foo" + } + ] +}] diff --git a/vendor/github.com/jmespath/go-jmespath/compliance/pipe.json b/vendor/github.com/jmespath/go-jmespath/compliance/pipe.json new file mode 100644 index 000000000..b10c0a496 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/compliance/pipe.json @@ -0,0 +1,131 @@ +[{ + "given": { + "foo": { + "bar": { + "baz": "subkey" + }, + "other": { + "baz": "subkey" + }, + "other2": { + "baz": "subkey" + }, + "other3": { + "notbaz": ["a", "b", "c"] + }, + "other4": { + "notbaz": ["a", "b", "c"] + } + } + }, + "cases": [ + { + "expression": "foo.*.baz | [0]", + "result": "subkey" + }, + { + "expression": "foo.*.baz | [1]", + "result": "subkey" + }, + { + "expression": "foo.*.baz | [2]", + "result": "subkey" + }, + { + "expression": "foo.bar.* | [0]", + "result": "subkey" + }, + { + "expression": "foo.*.notbaz | [*]", + "result": [["a", "b", "c"], ["a", "b", "c"]] + }, + { + "expression": "{\"a\": foo.bar, \"b\": foo.other} | *.baz", + "result": ["subkey", "subkey"] + } + ] +}, { + "given": { + "foo": { + "bar": { + "baz": "one" + }, + "other": { + "baz": "two" + }, + "other2": { + "baz": "three" + }, + "other3": { + "notbaz": ["a", "b", "c"] + }, + "other4": { + "notbaz": ["d", "e", "f"] + } + } + }, + "cases": [ + { + "expression": "foo | bar", + "result": {"baz": "one"} + }, + { + "expression": "foo | bar | baz", + "result": "one" + }, + { + "expression": "foo|bar| baz", + "result": "one" + }, + { + "expression": "not_there | [0]", + "result": null + }, + { + "expression": "not_there | [0]", + "result": null + }, + { + "expression": "[foo.bar, foo.other] | [0]", + "result": {"baz": "one"} + }, + { + "expression": "{\"a\": foo.bar, \"b\": foo.other} | a", + "result": {"baz": "one"} + }, + { + "expression": "{\"a\": foo.bar, \"b\": foo.other} | b", + "result": {"baz": "two"} + }, + { + "expression": "foo.bam || foo.bar | baz", + "result": "one" + }, + { + "expression": "foo | not_there || bar", + "result": {"baz": "one"} + } + ] +}, { + "given": { + "foo": [{ + "bar": [{ + "baz": "one" + }, { + "baz": "two" + }] + }, { + "bar": [{ + "baz": "three" + }, { + "baz": "four" + }] + }] + }, + "cases": [ + { + "expression": "foo[*].bar[*] | [0][0]", + "result": {"baz": "one"} + } + ] +}] diff --git a/vendor/github.com/jmespath/go-jmespath/compliance/slice.json b/vendor/github.com/jmespath/go-jmespath/compliance/slice.json new file mode 100644 index 000000000..359477278 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/compliance/slice.json @@ -0,0 +1,187 @@ +[{ + "given": { + "foo": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9], + "bar": { + "baz": 1 + } + }, + "cases": [ + { + "expression": "bar[0:10]", + "result": null + }, + { + "expression": "foo[0:10:1]", + "result": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] + }, + { + "expression": "foo[0:10]", + "result": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] + }, + { + "expression": "foo[0:10:]", + "result": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] + }, + { + "expression": "foo[0::1]", + "result": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] + }, + { + "expression": "foo[0::]", + "result": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] + }, + { + "expression": "foo[0:]", + "result": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] + }, + { + "expression": "foo[:10:1]", + "result": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] + }, + { + "expression": "foo[::1]", + "result": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] + }, + { + "expression": "foo[:10:]", + "result": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] + }, + { + "expression": "foo[::]", + "result": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] + }, + { + "expression": "foo[:]", + "result": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] + }, + { + "expression": "foo[1:9]", + "result": [1, 2, 3, 4, 5, 6, 7, 8] + }, + { + "expression": "foo[0:10:2]", + "result": [0, 2, 4, 6, 8] + }, + { + "expression": "foo[5:]", + "result": [5, 6, 7, 8, 9] + }, + { + "expression": "foo[5::2]", + "result": [5, 7, 9] + }, + { + "expression": "foo[::2]", + "result": [0, 2, 4, 6, 8] + }, + { + "expression": "foo[::-1]", + "result": [9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + }, + { + "expression": "foo[1::2]", + "result": [1, 3, 5, 7, 9] + }, + { + "expression": "foo[10:0:-1]", + "result": [9, 8, 7, 6, 5, 4, 3, 2, 1] + }, + { + "expression": "foo[10:5:-1]", + "result": [9, 8, 7, 6] + }, + { + "expression": "foo[8:2:-2]", + "result": [8, 6, 4] + }, + { + "expression": "foo[0:20]", + "result": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] + }, + { + "expression": "foo[10:-20:-1]", + "result": [9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + }, + { + "expression": "foo[10:-20]", + "result": [] + }, + { + "expression": "foo[-4:-1]", + "result": [6, 7, 8] + }, + { + "expression": "foo[:-5:-1]", + "result": [9, 8, 7, 6] + }, + { + "expression": "foo[8:2:0]", + "error": "invalid-value" + }, + { + "expression": "foo[8:2:0:1]", + "error": "syntax" + }, + { + "expression": "foo[8:2&]", + "error": "syntax" + }, + { + "expression": "foo[2:a:3]", + "error": "syntax" + } + ] +}, { + "given": { + "foo": [{"a": 1}, {"a": 2}, {"a": 3}], + "bar": [{"a": {"b": 1}}, {"a": {"b": 2}}, + {"a": {"b": 3}}], + "baz": 50 + }, + "cases": [ + { + "expression": "foo[:2].a", + "result": [1, 2] + }, + { + "expression": "foo[:2].b", + "result": [] + }, + { + "expression": "foo[:2].a.b", + "result": [] + }, + { + "expression": "bar[::-1].a.b", + "result": [3, 2, 1] + }, + { + "expression": "bar[:2].a.b", + "result": [1, 2] + }, + { + "expression": "baz[:2].a", + "result": null + } + ] +}, { + "given": [{"a": 1}, {"a": 2}, {"a": 3}], + "cases": [ + { + "expression": "[:]", + "result": [{"a": 1}, {"a": 2}, {"a": 3}] + }, + { + "expression": "[:2].a", + "result": [1, 2] + }, + { + "expression": "[::-1].a", + "result": [3, 2, 1] + }, + { + "expression": "[:2].b", + "result": [] + } + ] +}] diff --git a/vendor/github.com/jmespath/go-jmespath/compliance/syntax.json b/vendor/github.com/jmespath/go-jmespath/compliance/syntax.json new file mode 100644 index 000000000..003c29458 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/compliance/syntax.json @@ -0,0 +1,616 @@ +[{ + "comment": "Dot syntax", + "given": {"type": "object"}, + "cases": [ + { + "expression": "foo.bar", + "result": null + }, + { + "expression": "foo.1", + "error": "syntax" + }, + { + "expression": "foo.-11", + "error": "syntax" + }, + { + "expression": "foo", + "result": null + }, + { + "expression": "foo.", + "error": "syntax" + }, + { + "expression": "foo.", + "error": "syntax" + }, + { + "expression": ".foo", + "error": "syntax" + }, + { + "expression": "foo..bar", + "error": "syntax" + }, + { + "expression": "foo.bar.", + "error": "syntax" + }, + { + "expression": "foo[.]", + "error": "syntax" + } + ] +}, + { + "comment": "Simple token errors", + "given": {"type": "object"}, + "cases": [ + { + "expression": ".", + "error": "syntax" + }, + { + "expression": ":", + "error": "syntax" + }, + { + "expression": ",", + "error": "syntax" + }, + { + "expression": "]", + "error": "syntax" + }, + { + "expression": "[", + "error": "syntax" + }, + { + "expression": "}", + "error": "syntax" + }, + { + "expression": "{", + "error": "syntax" + }, + { + "expression": ")", + "error": "syntax" + }, + { + "expression": "(", + "error": "syntax" + }, + { + "expression": "((&", + "error": "syntax" + }, + { + "expression": "a[", + "error": "syntax" + }, + { + "expression": "a]", + "error": "syntax" + }, + { + "expression": "a][", + "error": "syntax" + }, + { + "expression": "!", + "error": "syntax" + } + ] + }, + { + "comment": "Boolean syntax errors", + "given": {"type": "object"}, + "cases": [ + { + "expression": "![!(!", + "error": "syntax" + } + ] + }, + { + "comment": "Wildcard syntax", + "given": {"type": "object"}, + "cases": [ + { + "expression": "*", + "result": ["object"] + }, + { + "expression": "*.*", + "result": [] + }, + { + "expression": "*.foo", + "result": [] + }, + { + "expression": "*[0]", + "result": [] + }, + { + "expression": ".*", + "error": "syntax" + }, + { + "expression": "*foo", + "error": "syntax" + }, + { + "expression": "*0", + "error": "syntax" + }, + { + "expression": "foo[*]bar", + "error": "syntax" + }, + { + "expression": "foo[*]*", + "error": "syntax" + } + ] + }, + { + "comment": "Flatten syntax", + "given": {"type": "object"}, + "cases": [ + { + "expression": "[]", + "result": null + } + ] + }, + { + "comment": "Simple bracket syntax", + "given": {"type": "object"}, + "cases": [ + { + "expression": "[0]", + "result": null + }, + { + "expression": "[*]", + "result": null + }, + { + "expression": "*.[0]", + "error": "syntax" + }, + { + "expression": "*.[\"0\"]", + "result": [[null]] + }, + { + "expression": "[*].bar", + "result": null + }, + { + "expression": "[*][0]", + "result": null + }, + { + "expression": "foo[#]", + "error": "syntax" + } + ] + }, + { + "comment": "Multi-select list syntax", + "given": {"type": "object"}, + "cases": [ + { + "expression": "foo[0]", + "result": null + }, + { + "comment": "Valid multi-select of a list", + "expression": "foo[0, 1]", + "error": "syntax" + }, + { + "expression": "foo.[0]", + "error": "syntax" + }, + { + "expression": "foo.[*]", + "result": null + }, + { + "comment": "Multi-select of a list with trailing comma", + "expression": "foo[0, ]", + "error": "syntax" + }, + { + "comment": "Multi-select of a list with trailing comma and no close", + "expression": "foo[0,", + "error": "syntax" + }, + { + "comment": "Multi-select of a list with trailing comma and no close", + "expression": "foo.[a", + "error": "syntax" + }, + { + "comment": "Multi-select of a list with extra comma", + "expression": "foo[0,, 1]", + "error": "syntax" + }, + { + "comment": "Multi-select of a list using an identifier index", + "expression": "foo[abc]", + "error": "syntax" + }, + { + "comment": "Multi-select of a list using identifier indices", + "expression": "foo[abc, def]", + "error": "syntax" + }, + { + "comment": "Multi-select of a list using an identifier index", + "expression": "foo[abc, 1]", + "error": "syntax" + }, + { + "comment": "Multi-select of a list using an identifier index with trailing comma", + "expression": "foo[abc, ]", + "error": "syntax" + }, + { + "comment": "Valid multi-select of a hash using an identifier index", + "expression": "foo.[abc]", + "result": null + }, + { + "comment": "Valid multi-select of a hash", + "expression": "foo.[abc, def]", + "result": null + }, + { + "comment": "Multi-select of a hash using a numeric index", + "expression": "foo.[abc, 1]", + "error": "syntax" + }, + { + "comment": "Multi-select of a hash with a trailing comma", + "expression": "foo.[abc, ]", + "error": "syntax" + }, + { + "comment": "Multi-select of a hash with extra commas", + "expression": "foo.[abc,, def]", + "error": "syntax" + }, + { + "comment": "Multi-select of a hash using number indices", + "expression": "foo.[0, 1]", + "error": "syntax" + } + ] + }, + { + "comment": "Multi-select hash syntax", + "given": {"type": "object"}, + "cases": [ + { + "comment": "No key or value", + "expression": "a{}", + "error": "syntax" + }, + { + "comment": "No closing token", + "expression": "a{", + "error": "syntax" + }, + { + "comment": "Not a key value pair", + "expression": "a{foo}", + "error": "syntax" + }, + { + "comment": "Missing value and closing character", + "expression": "a{foo:", + "error": "syntax" + }, + { + "comment": "Missing closing character", + "expression": "a{foo: 0", + "error": "syntax" + }, + { + "comment": "Missing value", + "expression": "a{foo:}", + "error": "syntax" + }, + { + "comment": "Trailing comma and no closing character", + "expression": "a{foo: 0, ", + "error": "syntax" + }, + { + "comment": "Missing value with trailing comma", + "expression": "a{foo: ,}", + "error": "syntax" + }, + { + "comment": "Accessing Array using an identifier", + "expression": "a{foo: bar}", + "error": "syntax" + }, + { + "expression": "a{foo: 0}", + "error": "syntax" + }, + { + "comment": "Missing key-value pair", + "expression": "a.{}", + "error": "syntax" + }, + { + "comment": "Not a key-value pair", + "expression": "a.{foo}", + "error": "syntax" + }, + { + "comment": "Missing value", + "expression": "a.{foo:}", + "error": "syntax" + }, + { + "comment": "Missing value with trailing comma", + "expression": "a.{foo: ,}", + "error": "syntax" + }, + { + "comment": "Valid multi-select hash extraction", + "expression": "a.{foo: bar}", + "result": null + }, + { + "comment": "Valid multi-select hash extraction", + "expression": "a.{foo: bar, baz: bam}", + "result": null + }, + { + "comment": "Trailing comma", + "expression": "a.{foo: bar, }", + "error": "syntax" + }, + { + "comment": "Missing key in second key-value pair", + "expression": "a.{foo: bar, baz}", + "error": "syntax" + }, + { + "comment": "Missing value in second key-value pair", + "expression": "a.{foo: bar, baz:}", + "error": "syntax" + }, + { + "comment": "Trailing comma", + "expression": "a.{foo: bar, baz: bam, }", + "error": "syntax" + }, + { + "comment": "Nested multi select", + "expression": "{\"\\\\\":{\" \":*}}", + "result": {"\\": {" ": ["object"]}} + } + ] + }, + { + "comment": "Or expressions", + "given": {"type": "object"}, + "cases": [ + { + "expression": "foo || bar", + "result": null + }, + { + "expression": "foo ||", + "error": "syntax" + }, + { + "expression": "foo.|| bar", + "error": "syntax" + }, + { + "expression": " || foo", + "error": "syntax" + }, + { + "expression": "foo || || foo", + "error": "syntax" + }, + { + "expression": "foo.[a || b]", + "result": null + }, + { + "expression": "foo.[a ||]", + "error": "syntax" + }, + { + "expression": "\"foo", + "error": "syntax" + } + ] + }, + { + "comment": "Filter expressions", + "given": {"type": "object"}, + "cases": [ + { + "expression": "foo[?bar==`\"baz\"`]", + "result": null + }, + { + "expression": "foo[? bar == `\"baz\"` ]", + "result": null + }, + { + "expression": "foo[ ?bar==`\"baz\"`]", + "error": "syntax" + }, + { + "expression": "foo[?bar==]", + "error": "syntax" + }, + { + "expression": "foo[?==]", + "error": "syntax" + }, + { + "expression": "foo[?==bar]", + "error": "syntax" + }, + { + "expression": "foo[?bar==baz?]", + "error": "syntax" + }, + { + "expression": "foo[?a.b.c==d.e.f]", + "result": null + }, + { + "expression": "foo[?bar==`[0, 1, 2]`]", + "result": null + }, + { + "expression": "foo[?bar==`[\"a\", \"b\", \"c\"]`]", + "result": null + }, + { + "comment": "Literal char not escaped", + "expression": "foo[?bar==`[\"foo`bar\"]`]", + "error": "syntax" + }, + { + "comment": "Literal char escaped", + "expression": "foo[?bar==`[\"foo\\`bar\"]`]", + "result": null + }, + { + "comment": "Unknown comparator", + "expression": "foo[?bar<>baz]", + "error": "syntax" + }, + { + "comment": "Unknown comparator", + "expression": "foo[?bar^baz]", + "error": "syntax" + }, + { + "expression": "foo[bar==baz]", + "error": "syntax" + }, + { + "comment": "Quoted identifier in filter expression no spaces", + "expression": "[?\"\\\\\">`\"foo\"`]", + "result": null + }, + { + "comment": "Quoted identifier in filter expression with spaces", + "expression": "[?\"\\\\\" > `\"foo\"`]", + "result": null + } + ] + }, + { + "comment": "Filter expression errors", + "given": {"type": "object"}, + "cases": [ + { + "expression": "bar.`\"anything\"`", + "error": "syntax" + }, + { + "expression": "bar.baz.noexists.`\"literal\"`", + "error": "syntax" + }, + { + "comment": "Literal wildcard projection", + "expression": "foo[*].`\"literal\"`", + "error": "syntax" + }, + { + "expression": "foo[*].name.`\"literal\"`", + "error": "syntax" + }, + { + "expression": "foo[].name.`\"literal\"`", + "error": "syntax" + }, + { + "expression": "foo[].name.`\"literal\"`.`\"subliteral\"`", + "error": "syntax" + }, + { + "comment": "Projecting a literal onto an empty list", + "expression": "foo[*].name.noexist.`\"literal\"`", + "error": "syntax" + }, + { + "expression": "foo[].name.noexist.`\"literal\"`", + "error": "syntax" + }, + { + "expression": "twolen[*].`\"foo\"`", + "error": "syntax" + }, + { + "comment": "Two level projection of a literal", + "expression": "twolen[*].threelen[*].`\"bar\"`", + "error": "syntax" + }, + { + "comment": "Two level flattened projection of a literal", + "expression": "twolen[].threelen[].`\"bar\"`", + "error": "syntax" + } + ] + }, + { + "comment": "Identifiers", + "given": {"type": "object"}, + "cases": [ + { + "expression": "foo", + "result": null + }, + { + "expression": "\"foo\"", + "result": null + }, + { + "expression": "\"\\\\\"", + "result": null + } + ] + }, + { + "comment": "Combined syntax", + "given": [], + "cases": [ + { + "expression": "*||*|*|*", + "result": null + }, + { + "expression": "*[]||[*]", + "result": [] + }, + { + "expression": "[*.*]", + "result": [null] + } + ] + } +] diff --git a/vendor/github.com/jmespath/go-jmespath/compliance/unicode.json b/vendor/github.com/jmespath/go-jmespath/compliance/unicode.json new file mode 100644 index 000000000..6b07b0b6d --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/compliance/unicode.json @@ -0,0 +1,38 @@ +[ + { + "given": {"foo": [{"✓": "✓"}, {"✓": "✗"}]}, + "cases": [ + { + "expression": "foo[].\"✓\"", + "result": ["✓", "✗"] + } + ] + }, + { + "given": {"☯": true}, + "cases": [ + { + "expression": "\"☯\"", + "result": true + } + ] + }, + { + "given": {"♪♫•*¨*•.¸¸❤¸¸.•*¨*•♫♪": true}, + "cases": [ + { + "expression": "\"♪♫•*¨*•.¸¸❤¸¸.•*¨*•♫♪\"", + "result": true + } + ] + }, + { + "given": {"☃": true}, + "cases": [ + { + "expression": "\"☃\"", + "result": true + } + ] + } +] diff --git a/vendor/github.com/jmespath/go-jmespath/compliance/wildcard.json b/vendor/github.com/jmespath/go-jmespath/compliance/wildcard.json new file mode 100644 index 000000000..3bcec3028 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/compliance/wildcard.json @@ -0,0 +1,460 @@ +[{ + "given": { + "foo": { + "bar": { + "baz": "val" + }, + "other": { + "baz": "val" + }, + "other2": { + "baz": "val" + }, + "other3": { + "notbaz": ["a", "b", "c"] + }, + "other4": { + "notbaz": ["a", "b", "c"] + }, + "other5": { + "other": { + "a": 1, + "b": 1, + "c": 1 + } + } + } + }, + "cases": [ + { + "expression": "foo.*.baz", + "result": ["val", "val", "val"] + }, + { + "expression": "foo.bar.*", + "result": ["val"] + }, + { + "expression": "foo.*.notbaz", + "result": [["a", "b", "c"], ["a", "b", "c"]] + }, + { + "expression": "foo.*.notbaz[0]", + "result": ["a", "a"] + }, + { + "expression": "foo.*.notbaz[-1]", + "result": ["c", "c"] + } + ] +}, { + "given": { + "foo": { + "first-1": { + "second-1": "val" + }, + "first-2": { + "second-1": "val" + }, + "first-3": { + "second-1": "val" + } + } + }, + "cases": [ + { + "expression": "foo.*", + "result": [{"second-1": "val"}, {"second-1": "val"}, + {"second-1": "val"}] + }, + { + "expression": "foo.*.*", + "result": [["val"], ["val"], ["val"]] + }, + { + "expression": "foo.*.*.*", + "result": [[], [], []] + }, + { + "expression": "foo.*.*.*.*", + "result": [[], [], []] + } + ] +}, { + "given": { + "foo": { + "bar": "one" + }, + "other": { + "bar": "one" + }, + "nomatch": { + "notbar": "three" + } + }, + "cases": [ + { + "expression": "*.bar", + "result": ["one", "one"] + } + ] +}, { + "given": { + "top1": { + "sub1": {"foo": "one"} + }, + "top2": { + "sub1": {"foo": "one"} + } + }, + "cases": [ + { + "expression": "*", + "result": [{"sub1": {"foo": "one"}}, + {"sub1": {"foo": "one"}}] + }, + { + "expression": "*.sub1", + "result": [{"foo": "one"}, + {"foo": "one"}] + }, + { + "expression": "*.*", + "result": [[{"foo": "one"}], + [{"foo": "one"}]] + }, + { + "expression": "*.*.foo[]", + "result": ["one", "one"] + }, + { + "expression": "*.sub1.foo", + "result": ["one", "one"] + } + ] +}, +{ + "given": + {"foo": [{"bar": "one"}, {"bar": "two"}, {"bar": "three"}, {"notbar": "four"}]}, + "cases": [ + { + "expression": "foo[*].bar", + "result": ["one", "two", "three"] + }, + { + "expression": "foo[*].notbar", + "result": ["four"] + } + ] +}, +{ + "given": + [{"bar": "one"}, {"bar": "two"}, {"bar": "three"}, {"notbar": "four"}], + "cases": [ + { + "expression": "[*]", + "result": [{"bar": "one"}, {"bar": "two"}, {"bar": "three"}, {"notbar": "four"}] + }, + { + "expression": "[*].bar", + "result": ["one", "two", "three"] + }, + { + "expression": "[*].notbar", + "result": ["four"] + } + ] +}, +{ + "given": { + "foo": { + "bar": [ + {"baz": ["one", "two", "three"]}, + {"baz": ["four", "five", "six"]}, + {"baz": ["seven", "eight", "nine"]} + ] + } + }, + "cases": [ + { + "expression": "foo.bar[*].baz", + "result": [["one", "two", "three"], ["four", "five", "six"], ["seven", "eight", "nine"]] + }, + { + "expression": "foo.bar[*].baz[0]", + "result": ["one", "four", "seven"] + }, + { + "expression": "foo.bar[*].baz[1]", + "result": ["two", "five", "eight"] + }, + { + "expression": "foo.bar[*].baz[2]", + "result": ["three", "six", "nine"] + }, + { + "expression": "foo.bar[*].baz[3]", + "result": [] + } + ] +}, +{ + "given": { + "foo": { + "bar": [["one", "two"], ["three", "four"]] + } + }, + "cases": [ + { + "expression": "foo.bar[*]", + "result": [["one", "two"], ["three", "four"]] + }, + { + "expression": "foo.bar[0]", + "result": ["one", "two"] + }, + { + "expression": "foo.bar[0][0]", + "result": "one" + }, + { + "expression": "foo.bar[0][0][0]", + "result": null + }, + { + "expression": "foo.bar[0][0][0][0]", + "result": null + }, + { + "expression": "foo[0][0]", + "result": null + } + ] +}, +{ + "given": { + "foo": [ + {"bar": [{"kind": "basic"}, {"kind": "intermediate"}]}, + {"bar": [{"kind": "advanced"}, {"kind": "expert"}]}, + {"bar": "string"} + ] + + }, + "cases": [ + { + "expression": "foo[*].bar[*].kind", + "result": [["basic", "intermediate"], ["advanced", "expert"]] + }, + { + "expression": "foo[*].bar[0].kind", + "result": ["basic", "advanced"] + } + ] +}, +{ + "given": { + "foo": [ + {"bar": {"kind": "basic"}}, + {"bar": {"kind": "intermediate"}}, + {"bar": {"kind": "advanced"}}, + {"bar": {"kind": "expert"}}, + {"bar": "string"} + ] + }, + "cases": [ + { + "expression": "foo[*].bar.kind", + "result": ["basic", "intermediate", "advanced", "expert"] + } + ] +}, +{ + "given": { + "foo": [{"bar": ["one", "two"]}, {"bar": ["three", "four"]}, {"bar": ["five"]}] + }, + "cases": [ + { + "expression": "foo[*].bar[0]", + "result": ["one", "three", "five"] + }, + { + "expression": "foo[*].bar[1]", + "result": ["two", "four"] + }, + { + "expression": "foo[*].bar[2]", + "result": [] + } + ] +}, +{ + "given": { + "foo": [{"bar": []}, {"bar": []}, {"bar": []}] + }, + "cases": [ + { + "expression": "foo[*].bar[0]", + "result": [] + } + ] +}, +{ + "given": { + "foo": [["one", "two"], ["three", "four"], ["five"]] + }, + "cases": [ + { + "expression": "foo[*][0]", + "result": ["one", "three", "five"] + }, + { + "expression": "foo[*][1]", + "result": ["two", "four"] + } + ] +}, +{ + "given": { + "foo": [ + [ + ["one", "two"], ["three", "four"] + ], [ + ["five", "six"], ["seven", "eight"] + ], [ + ["nine"], ["ten"] + ] + ] + }, + "cases": [ + { + "expression": "foo[*][0]", + "result": [["one", "two"], ["five", "six"], ["nine"]] + }, + { + "expression": "foo[*][1]", + "result": [["three", "four"], ["seven", "eight"], ["ten"]] + }, + { + "expression": "foo[*][0][0]", + "result": ["one", "five", "nine"] + }, + { + "expression": "foo[*][1][0]", + "result": ["three", "seven", "ten"] + }, + { + "expression": "foo[*][0][1]", + "result": ["two", "six"] + }, + { + "expression": "foo[*][1][1]", + "result": ["four", "eight"] + }, + { + "expression": "foo[*][2]", + "result": [] + }, + { + "expression": "foo[*][2][2]", + "result": [] + }, + { + "expression": "bar[*]", + "result": null + }, + { + "expression": "bar[*].baz[*]", + "result": null + } + ] +}, +{ + "given": { + "string": "string", + "hash": {"foo": "bar", "bar": "baz"}, + "number": 23, + "nullvalue": null + }, + "cases": [ + { + "expression": "string[*]", + "result": null + }, + { + "expression": "hash[*]", + "result": null + }, + { + "expression": "number[*]", + "result": null + }, + { + "expression": "nullvalue[*]", + "result": null + }, + { + "expression": "string[*].foo", + "result": null + }, + { + "expression": "hash[*].foo", + "result": null + }, + { + "expression": "number[*].foo", + "result": null + }, + { + "expression": "nullvalue[*].foo", + "result": null + }, + { + "expression": "nullvalue[*].foo[*].bar", + "result": null + } + ] +}, +{ + "given": { + "string": "string", + "hash": {"foo": "val", "bar": "val"}, + "number": 23, + "array": [1, 2, 3], + "nullvalue": null + }, + "cases": [ + { + "expression": "string.*", + "result": null + }, + { + "expression": "hash.*", + "result": ["val", "val"] + }, + { + "expression": "number.*", + "result": null + }, + { + "expression": "array.*", + "result": null + }, + { + "expression": "nullvalue.*", + "result": null + } + ] +}, +{ + "given": { + "a": [0, 1, 2], + "b": [0, 1, 2] + }, + "cases": [ + { + "expression": "*[0]", + "result": [0, 0] + } + ] +} +] diff --git a/vendor/github.com/jmespath/go-jmespath/compliance_test.go b/vendor/github.com/jmespath/go-jmespath/compliance_test.go new file mode 100644 index 000000000..4ee9c959d --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/compliance_test.go @@ -0,0 +1,123 @@ +package jmespath + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "testing" + + "github.com/stretchr/testify/assert" +) + +type TestSuite struct { + Given interface{} + TestCases []TestCase `json:"cases"` + Comment string +} +type TestCase struct { + Comment string + Expression string + Result interface{} + Error string +} + +var whiteListed = []string{ + "compliance/basic.json", + "compliance/current.json", + "compliance/escape.json", + "compliance/filters.json", + "compliance/functions.json", + "compliance/identifiers.json", + "compliance/indices.json", + "compliance/literal.json", + "compliance/multiselect.json", + "compliance/ormatch.json", + "compliance/pipe.json", + "compliance/slice.json", + "compliance/syntax.json", + "compliance/unicode.json", + "compliance/wildcard.json", + "compliance/boolean.json", +} + +func allowed(path string) bool { + for _, el := range whiteListed { + if el == path { + return true + } + } + return false +} + +func TestCompliance(t *testing.T) { + assert := assert.New(t) + + var complianceFiles []string + err := filepath.Walk("compliance", func(path string, _ os.FileInfo, _ error) error { + //if strings.HasSuffix(path, ".json") { + if allowed(path) { + complianceFiles = append(complianceFiles, path) + } + return nil + }) + if assert.Nil(err) { + for _, filename := range complianceFiles { + runComplianceTest(assert, filename) + } + } +} + +func runComplianceTest(assert *assert.Assertions, filename string) { + var testSuites []TestSuite + data, err := ioutil.ReadFile(filename) + if assert.Nil(err) { + err := json.Unmarshal(data, &testSuites) + if assert.Nil(err) { + for _, testsuite := range testSuites { + runTestSuite(assert, testsuite, filename) + } + } + } +} + +func runTestSuite(assert *assert.Assertions, testsuite TestSuite, filename string) { + for _, testcase := range testsuite.TestCases { + if testcase.Error != "" { + // This is a test case that verifies we error out properly. + runSyntaxTestCase(assert, testsuite.Given, testcase, filename) + } else { + runTestCase(assert, testsuite.Given, testcase, filename) + } + } +} + +func runSyntaxTestCase(assert *assert.Assertions, given interface{}, testcase TestCase, filename string) { + // Anything with an .Error means that we expect that JMESPath should return + // an error when we try to evaluate the expression. + _, err := Search(testcase.Expression, given) + assert.NotNil(err, fmt.Sprintf("Expression: %s", testcase.Expression)) +} + +func runTestCase(assert *assert.Assertions, given interface{}, testcase TestCase, filename string) { + lexer := NewLexer() + var err error + _, err = lexer.tokenize(testcase.Expression) + if err != nil { + errMsg := fmt.Sprintf("(%s) Could not lex expression: %s -- %s", filename, testcase.Expression, err.Error()) + assert.Fail(errMsg) + return + } + parser := NewParser() + _, err = parser.Parse(testcase.Expression) + if err != nil { + errMsg := fmt.Sprintf("(%s) Could not parse expression: %s -- %s", filename, testcase.Expression, err.Error()) + assert.Fail(errMsg) + return + } + actual, err := Search(testcase.Expression, given) + if assert.Nil(err, fmt.Sprintf("Expression: %s", testcase.Expression)) { + assert.Equal(testcase.Result, actual, fmt.Sprintf("Expression: %s", testcase.Expression)) + } +} diff --git a/vendor/github.com/jmespath/go-jmespath/functions.go b/vendor/github.com/jmespath/go-jmespath/functions.go new file mode 100644 index 000000000..9b7cd89b4 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/functions.go @@ -0,0 +1,842 @@ +package jmespath + +import ( + "encoding/json" + "errors" + "fmt" + "math" + "reflect" + "sort" + "strconv" + "strings" + "unicode/utf8" +) + +type jpFunction func(arguments []interface{}) (interface{}, error) + +type jpType string + +const ( + jpUnknown jpType = "unknown" + jpNumber jpType = "number" + jpString jpType = "string" + jpArray jpType = "array" + jpObject jpType = "object" + jpArrayNumber jpType = "array[number]" + jpArrayString jpType = "array[string]" + jpExpref jpType = "expref" + jpAny jpType = "any" +) + +type functionEntry struct { + name string + arguments []argSpec + handler jpFunction + hasExpRef bool +} + +type argSpec struct { + types []jpType + variadic bool +} + +type byExprString struct { + intr *treeInterpreter + node ASTNode + items []interface{} + hasError bool +} + +func (a *byExprString) Len() int { + return len(a.items) +} +func (a *byExprString) Swap(i, j int) { + a.items[i], a.items[j] = a.items[j], a.items[i] +} +func (a *byExprString) Less(i, j int) bool { + first, err := a.intr.Execute(a.node, a.items[i]) + if err != nil { + a.hasError = true + // Return a dummy value. + return true + } + ith, ok := first.(string) + if !ok { + a.hasError = true + return true + } + second, err := a.intr.Execute(a.node, a.items[j]) + if err != nil { + a.hasError = true + // Return a dummy value. + return true + } + jth, ok := second.(string) + if !ok { + a.hasError = true + return true + } + return ith < jth +} + +type byExprFloat struct { + intr *treeInterpreter + node ASTNode + items []interface{} + hasError bool +} + +func (a *byExprFloat) Len() int { + return len(a.items) +} +func (a *byExprFloat) Swap(i, j int) { + a.items[i], a.items[j] = a.items[j], a.items[i] +} +func (a *byExprFloat) Less(i, j int) bool { + first, err := a.intr.Execute(a.node, a.items[i]) + if err != nil { + a.hasError = true + // Return a dummy value. + return true + } + ith, ok := first.(float64) + if !ok { + a.hasError = true + return true + } + second, err := a.intr.Execute(a.node, a.items[j]) + if err != nil { + a.hasError = true + // Return a dummy value. + return true + } + jth, ok := second.(float64) + if !ok { + a.hasError = true + return true + } + return ith < jth +} + +type functionCaller struct { + functionTable map[string]functionEntry +} + +func newFunctionCaller() *functionCaller { + caller := &functionCaller{} + caller.functionTable = map[string]functionEntry{ + "length": { + name: "length", + arguments: []argSpec{ + {types: []jpType{jpString, jpArray, jpObject}}, + }, + handler: jpfLength, + }, + "starts_with": { + name: "starts_with", + arguments: []argSpec{ + {types: []jpType{jpString}}, + {types: []jpType{jpString}}, + }, + handler: jpfStartsWith, + }, + "abs": { + name: "abs", + arguments: []argSpec{ + {types: []jpType{jpNumber}}, + }, + handler: jpfAbs, + }, + "avg": { + name: "avg", + arguments: []argSpec{ + {types: []jpType{jpArrayNumber}}, + }, + handler: jpfAvg, + }, + "ceil": { + name: "ceil", + arguments: []argSpec{ + {types: []jpType{jpNumber}}, + }, + handler: jpfCeil, + }, + "contains": { + name: "contains", + arguments: []argSpec{ + {types: []jpType{jpArray, jpString}}, + {types: []jpType{jpAny}}, + }, + handler: jpfContains, + }, + "ends_with": { + name: "ends_with", + arguments: []argSpec{ + {types: []jpType{jpString}}, + {types: []jpType{jpString}}, + }, + handler: jpfEndsWith, + }, + "floor": { + name: "floor", + arguments: []argSpec{ + {types: []jpType{jpNumber}}, + }, + handler: jpfFloor, + }, + "map": { + name: "amp", + arguments: []argSpec{ + {types: []jpType{jpExpref}}, + {types: []jpType{jpArray}}, + }, + handler: jpfMap, + hasExpRef: true, + }, + "max": { + name: "max", + arguments: []argSpec{ + {types: []jpType{jpArrayNumber, jpArrayString}}, + }, + handler: jpfMax, + }, + "merge": { + name: "merge", + arguments: []argSpec{ + {types: []jpType{jpObject}, variadic: true}, + }, + handler: jpfMerge, + }, + "max_by": { + name: "max_by", + arguments: []argSpec{ + {types: []jpType{jpArray}}, + {types: []jpType{jpExpref}}, + }, + handler: jpfMaxBy, + hasExpRef: true, + }, + "sum": { + name: "sum", + arguments: []argSpec{ + {types: []jpType{jpArrayNumber}}, + }, + handler: jpfSum, + }, + "min": { + name: "min", + arguments: []argSpec{ + {types: []jpType{jpArrayNumber, jpArrayString}}, + }, + handler: jpfMin, + }, + "min_by": { + name: "min_by", + arguments: []argSpec{ + {types: []jpType{jpArray}}, + {types: []jpType{jpExpref}}, + }, + handler: jpfMinBy, + hasExpRef: true, + }, + "type": { + name: "type", + arguments: []argSpec{ + {types: []jpType{jpAny}}, + }, + handler: jpfType, + }, + "keys": { + name: "keys", + arguments: []argSpec{ + {types: []jpType{jpObject}}, + }, + handler: jpfKeys, + }, + "values": { + name: "values", + arguments: []argSpec{ + {types: []jpType{jpObject}}, + }, + handler: jpfValues, + }, + "sort": { + name: "sort", + arguments: []argSpec{ + {types: []jpType{jpArrayString, jpArrayNumber}}, + }, + handler: jpfSort, + }, + "sort_by": { + name: "sort_by", + arguments: []argSpec{ + {types: []jpType{jpArray}}, + {types: []jpType{jpExpref}}, + }, + handler: jpfSortBy, + hasExpRef: true, + }, + "join": { + name: "join", + arguments: []argSpec{ + {types: []jpType{jpString}}, + {types: []jpType{jpArrayString}}, + }, + handler: jpfJoin, + }, + "reverse": { + name: "reverse", + arguments: []argSpec{ + {types: []jpType{jpArray, jpString}}, + }, + handler: jpfReverse, + }, + "to_array": { + name: "to_array", + arguments: []argSpec{ + {types: []jpType{jpAny}}, + }, + handler: jpfToArray, + }, + "to_string": { + name: "to_string", + arguments: []argSpec{ + {types: []jpType{jpAny}}, + }, + handler: jpfToString, + }, + "to_number": { + name: "to_number", + arguments: []argSpec{ + {types: []jpType{jpAny}}, + }, + handler: jpfToNumber, + }, + "not_null": { + name: "not_null", + arguments: []argSpec{ + {types: []jpType{jpAny}, variadic: true}, + }, + handler: jpfNotNull, + }, + } + return caller +} + +func (e *functionEntry) resolveArgs(arguments []interface{}) ([]interface{}, error) { + if len(e.arguments) == 0 { + return arguments, nil + } + if !e.arguments[len(e.arguments)-1].variadic { + if len(e.arguments) != len(arguments) { + return nil, errors.New("incorrect number of args") + } + for i, spec := range e.arguments { + userArg := arguments[i] + err := spec.typeCheck(userArg) + if err != nil { + return nil, err + } + } + return arguments, nil + } + if len(arguments) < len(e.arguments) { + return nil, errors.New("Invalid arity.") + } + return arguments, nil +} + +func (a *argSpec) typeCheck(arg interface{}) error { + for _, t := range a.types { + switch t { + case jpNumber: + if _, ok := arg.(float64); ok { + return nil + } + case jpString: + if _, ok := arg.(string); ok { + return nil + } + case jpArray: + if isSliceType(arg) { + return nil + } + case jpObject: + if _, ok := arg.(map[string]interface{}); ok { + return nil + } + case jpArrayNumber: + if _, ok := toArrayNum(arg); ok { + return nil + } + case jpArrayString: + if _, ok := toArrayStr(arg); ok { + return nil + } + case jpAny: + return nil + case jpExpref: + if _, ok := arg.(expRef); ok { + return nil + } + } + } + return fmt.Errorf("Invalid type for: %v, expected: %#v", arg, a.types) +} + +func (f *functionCaller) CallFunction(name string, arguments []interface{}, intr *treeInterpreter) (interface{}, error) { + entry, ok := f.functionTable[name] + if !ok { + return nil, errors.New("unknown function: " + name) + } + resolvedArgs, err := entry.resolveArgs(arguments) + if err != nil { + return nil, err + } + if entry.hasExpRef { + var extra []interface{} + extra = append(extra, intr) + resolvedArgs = append(extra, resolvedArgs...) + } + return entry.handler(resolvedArgs) +} + +func jpfAbs(arguments []interface{}) (interface{}, error) { + num := arguments[0].(float64) + return math.Abs(num), nil +} + +func jpfLength(arguments []interface{}) (interface{}, error) { + arg := arguments[0] + if c, ok := arg.(string); ok { + return float64(utf8.RuneCountInString(c)), nil + } else if isSliceType(arg) { + v := reflect.ValueOf(arg) + return float64(v.Len()), nil + } else if c, ok := arg.(map[string]interface{}); ok { + return float64(len(c)), nil + } + return nil, errors.New("could not compute length()") +} + +func jpfStartsWith(arguments []interface{}) (interface{}, error) { + search := arguments[0].(string) + prefix := arguments[1].(string) + return strings.HasPrefix(search, prefix), nil +} + +func jpfAvg(arguments []interface{}) (interface{}, error) { + // We've already type checked the value so we can safely use + // type assertions. + args := arguments[0].([]interface{}) + length := float64(len(args)) + numerator := 0.0 + for _, n := range args { + numerator += n.(float64) + } + return numerator / length, nil +} +func jpfCeil(arguments []interface{}) (interface{}, error) { + val := arguments[0].(float64) + return math.Ceil(val), nil +} +func jpfContains(arguments []interface{}) (interface{}, error) { + search := arguments[0] + el := arguments[1] + if searchStr, ok := search.(string); ok { + if elStr, ok := el.(string); ok { + return strings.Index(searchStr, elStr) != -1, nil + } + return false, nil + } + // Otherwise this is a generic contains for []interface{} + general := search.([]interface{}) + for _, item := range general { + if item == el { + return true, nil + } + } + return false, nil +} +func jpfEndsWith(arguments []interface{}) (interface{}, error) { + search := arguments[0].(string) + suffix := arguments[1].(string) + return strings.HasSuffix(search, suffix), nil +} +func jpfFloor(arguments []interface{}) (interface{}, error) { + val := arguments[0].(float64) + return math.Floor(val), nil +} +func jpfMap(arguments []interface{}) (interface{}, error) { + intr := arguments[0].(*treeInterpreter) + exp := arguments[1].(expRef) + node := exp.ref + arr := arguments[2].([]interface{}) + mapped := make([]interface{}, 0, len(arr)) + for _, value := range arr { + current, err := intr.Execute(node, value) + if err != nil { + return nil, err + } + mapped = append(mapped, current) + } + return mapped, nil +} +func jpfMax(arguments []interface{}) (interface{}, error) { + if items, ok := toArrayNum(arguments[0]); ok { + if len(items) == 0 { + return nil, nil + } + if len(items) == 1 { + return items[0], nil + } + best := items[0] + for _, item := range items[1:] { + if item > best { + best = item + } + } + return best, nil + } + // Otherwise we're dealing with a max() of strings. + items, _ := toArrayStr(arguments[0]) + if len(items) == 0 { + return nil, nil + } + if len(items) == 1 { + return items[0], nil + } + best := items[0] + for _, item := range items[1:] { + if item > best { + best = item + } + } + return best, nil +} +func jpfMerge(arguments []interface{}) (interface{}, error) { + final := make(map[string]interface{}) + for _, m := range arguments { + mapped := m.(map[string]interface{}) + for key, value := range mapped { + final[key] = value + } + } + return final, nil +} +func jpfMaxBy(arguments []interface{}) (interface{}, error) { + intr := arguments[0].(*treeInterpreter) + arr := arguments[1].([]interface{}) + exp := arguments[2].(expRef) + node := exp.ref + if len(arr) == 0 { + return nil, nil + } else if len(arr) == 1 { + return arr[0], nil + } + start, err := intr.Execute(node, arr[0]) + if err != nil { + return nil, err + } + switch t := start.(type) { + case float64: + bestVal := t + bestItem := arr[0] + for _, item := range arr[1:] { + result, err := intr.Execute(node, item) + if err != nil { + return nil, err + } + current, ok := result.(float64) + if !ok { + return nil, errors.New("invalid type, must be number") + } + if current > bestVal { + bestVal = current + bestItem = item + } + } + return bestItem, nil + case string: + bestVal := t + bestItem := arr[0] + for _, item := range arr[1:] { + result, err := intr.Execute(node, item) + if err != nil { + return nil, err + } + current, ok := result.(string) + if !ok { + return nil, errors.New("invalid type, must be string") + } + if current > bestVal { + bestVal = current + bestItem = item + } + } + return bestItem, nil + default: + return nil, errors.New("invalid type, must be number of string") + } +} +func jpfSum(arguments []interface{}) (interface{}, error) { + items, _ := toArrayNum(arguments[0]) + sum := 0.0 + for _, item := range items { + sum += item + } + return sum, nil +} + +func jpfMin(arguments []interface{}) (interface{}, error) { + if items, ok := toArrayNum(arguments[0]); ok { + if len(items) == 0 { + return nil, nil + } + if len(items) == 1 { + return items[0], nil + } + best := items[0] + for _, item := range items[1:] { + if item < best { + best = item + } + } + return best, nil + } + items, _ := toArrayStr(arguments[0]) + if len(items) == 0 { + return nil, nil + } + if len(items) == 1 { + return items[0], nil + } + best := items[0] + for _, item := range items[1:] { + if item < best { + best = item + } + } + return best, nil +} + +func jpfMinBy(arguments []interface{}) (interface{}, error) { + intr := arguments[0].(*treeInterpreter) + arr := arguments[1].([]interface{}) + exp := arguments[2].(expRef) + node := exp.ref + if len(arr) == 0 { + return nil, nil + } else if len(arr) == 1 { + return arr[0], nil + } + start, err := intr.Execute(node, arr[0]) + if err != nil { + return nil, err + } + if t, ok := start.(float64); ok { + bestVal := t + bestItem := arr[0] + for _, item := range arr[1:] { + result, err := intr.Execute(node, item) + if err != nil { + return nil, err + } + current, ok := result.(float64) + if !ok { + return nil, errors.New("invalid type, must be number") + } + if current < bestVal { + bestVal = current + bestItem = item + } + } + return bestItem, nil + } else if t, ok := start.(string); ok { + bestVal := t + bestItem := arr[0] + for _, item := range arr[1:] { + result, err := intr.Execute(node, item) + if err != nil { + return nil, err + } + current, ok := result.(string) + if !ok { + return nil, errors.New("invalid type, must be string") + } + if current < bestVal { + bestVal = current + bestItem = item + } + } + return bestItem, nil + } else { + return nil, errors.New("invalid type, must be number of string") + } +} +func jpfType(arguments []interface{}) (interface{}, error) { + arg := arguments[0] + if _, ok := arg.(float64); ok { + return "number", nil + } + if _, ok := arg.(string); ok { + return "string", nil + } + if _, ok := arg.([]interface{}); ok { + return "array", nil + } + if _, ok := arg.(map[string]interface{}); ok { + return "object", nil + } + if arg == nil { + return "null", nil + } + if arg == true || arg == false { + return "boolean", nil + } + return nil, errors.New("unknown type") +} +func jpfKeys(arguments []interface{}) (interface{}, error) { + arg := arguments[0].(map[string]interface{}) + collected := make([]interface{}, 0, len(arg)) + for key := range arg { + collected = append(collected, key) + } + return collected, nil +} +func jpfValues(arguments []interface{}) (interface{}, error) { + arg := arguments[0].(map[string]interface{}) + collected := make([]interface{}, 0, len(arg)) + for _, value := range arg { + collected = append(collected, value) + } + return collected, nil +} +func jpfSort(arguments []interface{}) (interface{}, error) { + if items, ok := toArrayNum(arguments[0]); ok { + d := sort.Float64Slice(items) + sort.Stable(d) + final := make([]interface{}, len(d)) + for i, val := range d { + final[i] = val + } + return final, nil + } + // Otherwise we're dealing with sort()'ing strings. + items, _ := toArrayStr(arguments[0]) + d := sort.StringSlice(items) + sort.Stable(d) + final := make([]interface{}, len(d)) + for i, val := range d { + final[i] = val + } + return final, nil +} +func jpfSortBy(arguments []interface{}) (interface{}, error) { + intr := arguments[0].(*treeInterpreter) + arr := arguments[1].([]interface{}) + exp := arguments[2].(expRef) + node := exp.ref + if len(arr) == 0 { + return arr, nil + } else if len(arr) == 1 { + return arr, nil + } + start, err := intr.Execute(node, arr[0]) + if err != nil { + return nil, err + } + if _, ok := start.(float64); ok { + sortable := &byExprFloat{intr, node, arr, false} + sort.Stable(sortable) + if sortable.hasError { + return nil, errors.New("error in sort_by comparison") + } + return arr, nil + } else if _, ok := start.(string); ok { + sortable := &byExprString{intr, node, arr, false} + sort.Stable(sortable) + if sortable.hasError { + return nil, errors.New("error in sort_by comparison") + } + return arr, nil + } else { + return nil, errors.New("invalid type, must be number of string") + } +} +func jpfJoin(arguments []interface{}) (interface{}, error) { + sep := arguments[0].(string) + // We can't just do arguments[1].([]string), we have to + // manually convert each item to a string. + arrayStr := []string{} + for _, item := range arguments[1].([]interface{}) { + arrayStr = append(arrayStr, item.(string)) + } + return strings.Join(arrayStr, sep), nil +} +func jpfReverse(arguments []interface{}) (interface{}, error) { + if s, ok := arguments[0].(string); ok { + r := []rune(s) + for i, j := 0, len(r)-1; i < len(r)/2; i, j = i+1, j-1 { + r[i], r[j] = r[j], r[i] + } + return string(r), nil + } + items := arguments[0].([]interface{}) + length := len(items) + reversed := make([]interface{}, length) + for i, item := range items { + reversed[length-(i+1)] = item + } + return reversed, nil +} +func jpfToArray(arguments []interface{}) (interface{}, error) { + if _, ok := arguments[0].([]interface{}); ok { + return arguments[0], nil + } + return arguments[:1:1], nil +} +func jpfToString(arguments []interface{}) (interface{}, error) { + if v, ok := arguments[0].(string); ok { + return v, nil + } + result, err := json.Marshal(arguments[0]) + if err != nil { + return nil, err + } + return string(result), nil +} +func jpfToNumber(arguments []interface{}) (interface{}, error) { + arg := arguments[0] + if v, ok := arg.(float64); ok { + return v, nil + } + if v, ok := arg.(string); ok { + conv, err := strconv.ParseFloat(v, 64) + if err != nil { + return nil, nil + } + return conv, nil + } + if _, ok := arg.([]interface{}); ok { + return nil, nil + } + if _, ok := arg.(map[string]interface{}); ok { + return nil, nil + } + if arg == nil { + return nil, nil + } + if arg == true || arg == false { + return nil, nil + } + return nil, errors.New("unknown type") +} +func jpfNotNull(arguments []interface{}) (interface{}, error) { + for _, arg := range arguments { + if arg != nil { + return arg, nil + } + } + return nil, nil +} diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/jmespath.go b/vendor/github.com/jmespath/go-jmespath/fuzz/jmespath.go new file mode 100644 index 000000000..c7df08782 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/jmespath.go @@ -0,0 +1,13 @@ +package jmespath + +import "github.com/jmespath/go-jmespath" + +// Fuzz will fuzz test the JMESPath parser. +func Fuzz(data []byte) int { + p := jmespath.NewParser() + _, err := p.Parse(string(data)) + if err != nil { + return 1 + } + return 0 +} diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-1 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-1 new file mode 100644 index 000000000..191028156 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-1 @@ -0,0 +1 @@ +foo \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-10 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-10 new file mode 100644 index 000000000..4d5f9756e --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-10 @@ -0,0 +1 @@ +foo.bar \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-100 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-100 new file mode 100644 index 000000000..bc4f6a3f4 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-100 @@ -0,0 +1 @@ +ends_with(str, 'SStr') \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-101 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-101 new file mode 100644 index 000000000..81bf07a7a --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-101 @@ -0,0 +1 @@ +ends_with(str, 'foo') \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-102 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-102 new file mode 100644 index 000000000..3225de913 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-102 @@ -0,0 +1 @@ +floor(`1.2`) \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-103 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-103 new file mode 100644 index 000000000..8cac95958 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-103 @@ -0,0 +1 @@ +floor(decimals[0]) \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-104 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-104 new file mode 100644 index 000000000..bd76f47e2 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-104 @@ -0,0 +1 @@ +floor(foo) \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-105 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-105 new file mode 100644 index 000000000..c719add3d --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-105 @@ -0,0 +1 @@ +length('abc') \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-106 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-106 new file mode 100644 index 000000000..ff12f04f1 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-106 @@ -0,0 +1 @@ +length('') \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-107 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-107 new file mode 100644 index 000000000..0eccba1d3 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-107 @@ -0,0 +1 @@ +length(@) \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-108 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-108 new file mode 100644 index 000000000..ab14b0fa8 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-108 @@ -0,0 +1 @@ +length(strings[0]) \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-109 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-109 new file mode 100644 index 000000000..f1514bb74 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-109 @@ -0,0 +1 @@ +length(str) \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-110 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-110 new file mode 100644 index 000000000..09276059a --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-110 @@ -0,0 +1 @@ +length(array) \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-112 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-112 new file mode 100644 index 000000000..ab14b0fa8 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-112 @@ -0,0 +1 @@ +length(strings[0]) \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-115 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-115 new file mode 100644 index 000000000..bfb41ae98 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-115 @@ -0,0 +1 @@ +max(strings) \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-118 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-118 new file mode 100644 index 000000000..915ec172a --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-118 @@ -0,0 +1 @@ +merge(`{}`) \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-119 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-119 new file mode 100644 index 000000000..5b74e9b59 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-119 @@ -0,0 +1 @@ +merge(`{}`, `{}`) \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-12 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-12 new file mode 100644 index 000000000..64c5e5885 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-12 @@ -0,0 +1 @@ +two \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-120 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-120 new file mode 100644 index 000000000..f34dcd8fa --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-120 @@ -0,0 +1 @@ +merge(`{"a": 1}`, `{"b": 2}`) \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-121 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-121 new file mode 100644 index 000000000..e335dc96f --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-121 @@ -0,0 +1 @@ +merge(`{"a": 1}`, `{"a": 2}`) \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-122 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-122 new file mode 100644 index 000000000..aac28fffe --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-122 @@ -0,0 +1 @@ +merge(`{"a": 1, "b": 2}`, `{"a": 2, "c": 3}`, `{"d": 4}`) \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-123 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-123 new file mode 100644 index 000000000..1c6fd6719 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-123 @@ -0,0 +1 @@ +min(numbers) \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-126 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-126 new file mode 100644 index 000000000..93e68db77 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-126 @@ -0,0 +1 @@ +min(decimals) \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-128 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-128 new file mode 100644 index 000000000..554601ea4 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-128 @@ -0,0 +1 @@ +type('abc') \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-129 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-129 new file mode 100644 index 000000000..1ab2d9834 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-129 @@ -0,0 +1 @@ +type(`1.0`) \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-13 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-13 new file mode 100644 index 000000000..1d19714ff --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-13 @@ -0,0 +1 @@ +three \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-130 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-130 new file mode 100644 index 000000000..3cee2f56f --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-130 @@ -0,0 +1 @@ +type(`2`) \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-131 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-131 new file mode 100644 index 000000000..4821f9aef --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-131 @@ -0,0 +1 @@ +type(`true`) \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-132 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-132 new file mode 100644 index 000000000..40b6913a6 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-132 @@ -0,0 +1 @@ +type(`false`) \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-133 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-133 new file mode 100644 index 000000000..c711252be --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-133 @@ -0,0 +1 @@ +type(`null`) \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-134 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-134 new file mode 100644 index 000000000..ec5d07e95 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-134 @@ -0,0 +1 @@ +type(`[0]`) \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-135 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-135 new file mode 100644 index 000000000..2080401e1 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-135 @@ -0,0 +1 @@ +type(`{"a": "b"}`) \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-136 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-136 new file mode 100644 index 000000000..c5ee2ba5c --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-136 @@ -0,0 +1 @@ +type(@) \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-137 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-137 new file mode 100644 index 000000000..1814ca17b --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-137 @@ -0,0 +1 @@ +keys(objects) \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-138 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-138 new file mode 100644 index 000000000..e03cdb0d6 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-138 @@ -0,0 +1 @@ +values(objects) \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-139 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-139 new file mode 100644 index 000000000..7fea8d2ce --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-139 @@ -0,0 +1 @@ +keys(empty_hash) \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-14 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-14 new file mode 100644 index 000000000..a17c92f59 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-14 @@ -0,0 +1 @@ +one.two \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-140 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-140 new file mode 100644 index 000000000..4f1d882a4 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-140 @@ -0,0 +1 @@ +join(', ', strings) \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-141 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-141 new file mode 100644 index 000000000..4f1d882a4 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-141 @@ -0,0 +1 @@ +join(', ', strings) \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-142 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-142 new file mode 100644 index 000000000..19ec1fe09 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-142 @@ -0,0 +1 @@ +join(',', `["a", "b"]`) \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-143 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-143 new file mode 100644 index 000000000..761c68a6b --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-143 @@ -0,0 +1 @@ +join('|', strings) \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-144 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-144 new file mode 100644 index 000000000..a0dd68eaa --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-144 @@ -0,0 +1 @@ +join('|', decimals[].to_string(@)) \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-145 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-145 new file mode 100644 index 000000000..a4190b2ba --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-145 @@ -0,0 +1 @@ +join('|', empty_list) \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-146 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-146 new file mode 100644 index 000000000..f5033c302 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-146 @@ -0,0 +1 @@ +reverse(numbers) \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-147 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-147 new file mode 100644 index 000000000..822f054d5 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-147 @@ -0,0 +1 @@ +reverse(array) \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-148 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-148 new file mode 100644 index 000000000..a584adcc0 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-148 @@ -0,0 +1 @@ +reverse(`[]`) \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-149 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-149 new file mode 100644 index 000000000..fb4cc5dc4 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-149 @@ -0,0 +1 @@ +reverse('') \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-15 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-15 new file mode 100644 index 000000000..693f95496 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-15 @@ -0,0 +1 @@ +foo."1" \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-150 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-150 new file mode 100644 index 000000000..aa260fabc --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-150 @@ -0,0 +1 @@ +reverse('hello world') \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-151 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-151 new file mode 100644 index 000000000..d8c58826a --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-151 @@ -0,0 +1 @@ +starts_with(str, 'S') \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-152 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-152 new file mode 100644 index 000000000..32e16b7bb --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-152 @@ -0,0 +1 @@ +starts_with(str, 'St') \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-153 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-153 new file mode 100644 index 000000000..5f575ae7f --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-153 @@ -0,0 +1 @@ +starts_with(str, 'Str') \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-155 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-155 new file mode 100644 index 000000000..f31551c62 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-155 @@ -0,0 +1 @@ +sum(numbers) \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-156 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-156 new file mode 100644 index 000000000..18b90446c --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-156 @@ -0,0 +1 @@ +sum(decimals) \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-157 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-157 new file mode 100644 index 000000000..def4d0bc1 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-157 @@ -0,0 +1 @@ +sum(array[].to_number(@)) \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-158 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-158 new file mode 100644 index 000000000..48e4a7707 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-158 @@ -0,0 +1 @@ +sum(`[]`) \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-159 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-159 new file mode 100644 index 000000000..9fb939a0b --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-159 @@ -0,0 +1 @@ +to_array('foo') \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-16 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-16 new file mode 100644 index 000000000..86155ed75 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-16 @@ -0,0 +1 @@ +foo."1"[0] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-160 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-160 new file mode 100644 index 000000000..74ba7cc67 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-160 @@ -0,0 +1 @@ +to_array(`0`) \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-161 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-161 new file mode 100644 index 000000000..57f8b983f --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-161 @@ -0,0 +1 @@ +to_array(objects) \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-162 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-162 new file mode 100644 index 000000000..d17c7345f --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-162 @@ -0,0 +1 @@ +to_array(`[1, 2, 3]`) \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-163 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-163 new file mode 100644 index 000000000..15f70f783 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-163 @@ -0,0 +1 @@ +to_array(false) \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-164 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-164 new file mode 100644 index 000000000..9b227529b --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-164 @@ -0,0 +1 @@ +to_string('foo') \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-165 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-165 new file mode 100644 index 000000000..489a42935 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-165 @@ -0,0 +1 @@ +to_string(`1.2`) \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-166 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-166 new file mode 100644 index 000000000..d17106a00 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-166 @@ -0,0 +1 @@ +to_string(`[0, 1]`) \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-167 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-167 new file mode 100644 index 000000000..4f4ae9e68 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-167 @@ -0,0 +1 @@ +to_number('1.0') \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-168 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-168 new file mode 100644 index 000000000..ce932e2e6 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-168 @@ -0,0 +1 @@ +to_number('1.1') \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-169 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-169 new file mode 100644 index 000000000..e246fa4db --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-169 @@ -0,0 +1 @@ +to_number('4') \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-17 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-17 new file mode 100644 index 000000000..de0b4c39d --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-17 @@ -0,0 +1 @@ +foo."-1" \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-170 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-170 new file mode 100644 index 000000000..f8c264747 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-170 @@ -0,0 +1 @@ +to_number('notanumber') \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-171 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-171 new file mode 100644 index 000000000..7d423b1cd --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-171 @@ -0,0 +1 @@ +to_number(`false`) \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-172 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-172 new file mode 100644 index 000000000..503716b68 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-172 @@ -0,0 +1 @@ +to_number(`null`) \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-173 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-173 new file mode 100644 index 000000000..7f61dfa15 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-173 @@ -0,0 +1 @@ +to_number(`[0]`) \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-174 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-174 new file mode 100644 index 000000000..ee72a8c01 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-174 @@ -0,0 +1 @@ +to_number(`{"foo": 0}`) \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-175 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-175 new file mode 100644 index 000000000..8d8f1f759 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-175 @@ -0,0 +1 @@ +sort(numbers) \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-178 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-178 new file mode 100644 index 000000000..8cb54ba47 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-178 @@ -0,0 +1 @@ +sort(empty_list) \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-179 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-179 new file mode 100644 index 000000000..cf2c9b1db --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-179 @@ -0,0 +1 @@ +not_null(unknown_key, str) \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-18 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-18 new file mode 100644 index 000000000..b516b2c48 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-18 @@ -0,0 +1 @@ +@ \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-180 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-180 new file mode 100644 index 000000000..e047d4866 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-180 @@ -0,0 +1 @@ +not_null(unknown_key, foo.bar, empty_list, str) \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-181 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-181 new file mode 100644 index 000000000..c4cc87b9c --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-181 @@ -0,0 +1 @@ +not_null(unknown_key, null_key, empty_list, str) \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-182 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-182 new file mode 100644 index 000000000..2c7fa0a9c --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-182 @@ -0,0 +1 @@ +not_null(all, expressions, are_null) \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-183 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-183 new file mode 100644 index 000000000..eb096e61c --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-183 @@ -0,0 +1 @@ +numbers[].to_string(@) \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-184 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-184 new file mode 100644 index 000000000..4958abaec --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-184 @@ -0,0 +1 @@ +array[].to_number(@) \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-185 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-185 new file mode 100644 index 000000000..102708472 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-185 @@ -0,0 +1 @@ +foo[].not_null(f, e, d, c, b, a) \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-186 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-186 new file mode 100644 index 000000000..83cb91612 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-186 @@ -0,0 +1 @@ +sort_by(people, &age) \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-187 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-187 new file mode 100644 index 000000000..a494d6c4b --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-187 @@ -0,0 +1 @@ +sort_by(people, &to_number(age_str)) \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-188 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-188 new file mode 100644 index 000000000..2294fc54d --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-188 @@ -0,0 +1 @@ +sort_by(people, &age)[].name \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-189 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-189 new file mode 100644 index 000000000..bb8c2b46d --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-189 @@ -0,0 +1 @@ +sort_by(people, &age)[].extra \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-19 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-19 new file mode 100644 index 000000000..e3ed49ac6 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-19 @@ -0,0 +1 @@ +@.bar \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-190 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-190 new file mode 100644 index 000000000..3ab029034 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-190 @@ -0,0 +1 @@ +sort_by(`[]`, &age) \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-191 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-191 new file mode 100644 index 000000000..97db56f7b --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-191 @@ -0,0 +1 @@ +max_by(people, &age) \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-192 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-192 new file mode 100644 index 000000000..a7e648de9 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-192 @@ -0,0 +1 @@ +max_by(people, &age_str) \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-193 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-193 new file mode 100644 index 000000000..be4348d0c --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-193 @@ -0,0 +1 @@ +max_by(people, &to_number(age_str)) \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-194 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-194 new file mode 100644 index 000000000..a707283d4 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-194 @@ -0,0 +1 @@ +min_by(people, &age) \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-195 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-195 new file mode 100644 index 000000000..2cd6618d8 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-195 @@ -0,0 +1 @@ +min_by(people, &age_str) \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-196 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-196 new file mode 100644 index 000000000..833e68373 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-196 @@ -0,0 +1 @@ +min_by(people, &to_number(age_str)) \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-198 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-198 new file mode 100644 index 000000000..706dbda89 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-198 @@ -0,0 +1 @@ +__L \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-199 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-199 new file mode 100644 index 000000000..ca593ca93 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-199 @@ -0,0 +1 @@ +"!\r" \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-2 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-2 new file mode 100644 index 000000000..4d5f9756e --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-2 @@ -0,0 +1 @@ +foo.bar \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-20 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-20 new file mode 100644 index 000000000..f300ab917 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-20 @@ -0,0 +1 @@ +@.foo[0] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-200 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-200 new file mode 100644 index 000000000..9c9384354 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-200 @@ -0,0 +1 @@ +Y_1623 \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-201 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-201 new file mode 100644 index 000000000..c1b0730e0 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-201 @@ -0,0 +1 @@ +x \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-202 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-202 new file mode 100644 index 000000000..1552ec63a --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-202 @@ -0,0 +1 @@ +"\tF\uCebb" \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-203 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-203 new file mode 100644 index 000000000..047041273 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-203 @@ -0,0 +1 @@ +" \t" \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-204 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-204 new file mode 100644 index 000000000..efd782cc3 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-204 @@ -0,0 +1 @@ +" " \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-205 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-205 new file mode 100644 index 000000000..8494ac270 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-205 @@ -0,0 +1 @@ +v2 \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-206 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-206 new file mode 100644 index 000000000..c61f7f7eb --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-206 @@ -0,0 +1 @@ +"\t" \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-207 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-207 new file mode 100644 index 000000000..f6055f189 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-207 @@ -0,0 +1 @@ +_X \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-208 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-208 new file mode 100644 index 000000000..4f58e0e7b --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-208 @@ -0,0 +1 @@ +"\t4\ud9da\udd15" \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-209 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-209 new file mode 100644 index 000000000..f536bfbf6 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-209 @@ -0,0 +1 @@ +v24_W \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-21 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-21 new file mode 100644 index 000000000..ef47ff2c0 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-21 @@ -0,0 +1 @@ +"foo.bar" \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-210 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-210 new file mode 100644 index 000000000..69759281c --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-210 @@ -0,0 +1 @@ +"H" \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-211 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-211 new file mode 100644 index 000000000..c3e8b5927 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-211 @@ -0,0 +1 @@ +"\f" \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-212 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-212 new file mode 100644 index 000000000..24ecc222c --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-212 @@ -0,0 +1 @@ +"E4" \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-213 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-213 new file mode 100644 index 000000000..5693009d2 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-213 @@ -0,0 +1 @@ +"!" \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-214 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-214 new file mode 100644 index 000000000..62dd220e7 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-214 @@ -0,0 +1 @@ +tM \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-215 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-215 new file mode 100644 index 000000000..3c1e81f55 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-215 @@ -0,0 +1 @@ +" [" \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-216 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-216 new file mode 100644 index 000000000..493daa673 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-216 @@ -0,0 +1 @@ +"R!" \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-217 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-217 new file mode 100644 index 000000000..116b50ab3 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-217 @@ -0,0 +1 @@ +_6W \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-218 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-218 new file mode 100644 index 000000000..0073fac45 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-218 @@ -0,0 +1 @@ +"\uaBA1\r" \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-219 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-219 new file mode 100644 index 000000000..00d8fa37e --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-219 @@ -0,0 +1 @@ +tL7 \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-22 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-22 new file mode 100644 index 000000000..661ebcfa3 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-22 @@ -0,0 +1 @@ +"foo bar" \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-220 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-220 new file mode 100644 index 000000000..c14f16e02 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-220 @@ -0,0 +1 @@ +"<" \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-257 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-257 new file mode 100644 index 000000000..8a2443e6e --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-257 @@ -0,0 +1 @@ +hvu \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-258 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-258 new file mode 100644 index 000000000..c9ddacbb6 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-258 @@ -0,0 +1 @@ +"; !" \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-259 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-259 new file mode 100644 index 000000000..d0209c6df --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-259 @@ -0,0 +1 @@ +hU \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-26 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-26 new file mode 100644 index 000000000..82649bd24 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-26 @@ -0,0 +1 @@ +"/unix/path" \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-260 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-260 new file mode 100644 index 000000000..c07242aa4 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-260 @@ -0,0 +1 @@ +"!I\n\/" \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-261 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-261 new file mode 100644 index 000000000..7aae4effc --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-261 @@ -0,0 +1 @@ +"\uEEbF" \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-262 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-262 new file mode 100644 index 000000000..c1574f35f --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-262 @@ -0,0 +1 @@ +"U)\t" \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-263 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-263 new file mode 100644 index 000000000..5197e3a2b --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-263 @@ -0,0 +1 @@ +fa0_9 \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-264 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-264 new file mode 100644 index 000000000..320558b00 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-264 @@ -0,0 +1 @@ +"/" \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-265 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-265 new file mode 100644 index 000000000..4a2cb0865 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-265 @@ -0,0 +1 @@ +Gy \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-266 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-266 new file mode 100644 index 000000000..9524c8381 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-266 @@ -0,0 +1 @@ +"\b" \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-267 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-267 new file mode 100644 index 000000000..066b8d98b --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-267 @@ -0,0 +1 @@ +"<" \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-268 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-268 new file mode 100644 index 000000000..c61f7f7eb --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-268 @@ -0,0 +1 @@ +"\t" \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-269 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-269 new file mode 100644 index 000000000..a582f62d2 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-269 @@ -0,0 +1 @@ +"\t&\\\r" \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-27 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-27 new file mode 100644 index 000000000..a1d50731c --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-27 @@ -0,0 +1 @@ +"\"\"\"" \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-270 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-270 new file mode 100644 index 000000000..e3c5eedeb --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-270 @@ -0,0 +1 @@ +"#" \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-271 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-271 new file mode 100644 index 000000000..e75309a52 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-271 @@ -0,0 +1 @@ +B__ \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-272 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-272 new file mode 100644 index 000000000..027177272 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-272 @@ -0,0 +1 @@ +"\nS \n" \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-273 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-273 new file mode 100644 index 000000000..99432276e --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-273 @@ -0,0 +1 @@ +Bp \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-274 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-274 new file mode 100644 index 000000000..d4f8a788b --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-274 @@ -0,0 +1 @@ +",\t;" \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-275 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-275 new file mode 100644 index 000000000..56c384f75 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-275 @@ -0,0 +1 @@ +B_q \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-276 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-276 new file mode 100644 index 000000000..f093d2aa3 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-276 @@ -0,0 +1 @@ +"\/+\t\n\b!Z" \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-277 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-277 new file mode 100644 index 000000000..11e1229d9 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-277 @@ -0,0 +1 @@ +"󇟇\\ueFAc" \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-278 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-278 new file mode 100644 index 000000000..90dbfcfcd --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-278 @@ -0,0 +1 @@ +":\f" \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-279 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-279 new file mode 100644 index 000000000..b06b83025 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-279 @@ -0,0 +1 @@ +"\/" \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-28 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-28 new file mode 100644 index 000000000..5f55d73af --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-28 @@ -0,0 +1 @@ +"bar"."baz" \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-280 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-280 new file mode 100644 index 000000000..0e4bf7c11 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-280 @@ -0,0 +1 @@ +_BW_6Hg_Gl \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-281 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-281 new file mode 100644 index 000000000..81bb45f80 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-281 @@ -0,0 +1 @@ +"􃰂" \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-282 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-282 new file mode 100644 index 000000000..d0b4de146 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-282 @@ -0,0 +1 @@ +zs1DC \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-283 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-283 new file mode 100644 index 000000000..68797580c --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-283 @@ -0,0 +1 @@ +__434 \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-284 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-284 new file mode 100644 index 000000000..e61be91c4 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-284 @@ -0,0 +1 @@ +"󵅁" \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-285 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-285 new file mode 100644 index 000000000..026cb9cbb --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-285 @@ -0,0 +1 @@ +Z_5 \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-286 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-286 new file mode 100644 index 000000000..ca9587d06 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-286 @@ -0,0 +1 @@ +z_M_ \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-287 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-287 new file mode 100644 index 000000000..67f6d9c42 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-287 @@ -0,0 +1 @@ +YU_2 \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-288 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-288 new file mode 100644 index 000000000..927ab653a --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-288 @@ -0,0 +1 @@ +_0 \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-289 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-289 new file mode 100644 index 000000000..39307ab93 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-289 @@ -0,0 +1 @@ +"\b+" \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-29 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-29 new file mode 100644 index 000000000..8b0c5b41b --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-29 @@ -0,0 +1 @@ +foo[?name == 'a'] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-290 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-290 new file mode 100644 index 000000000..a3ec2ed7a --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-290 @@ -0,0 +1 @@ +"\"" \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-291 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-291 new file mode 100644 index 000000000..26bf7e122 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-291 @@ -0,0 +1 @@ +D7 \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-292 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-292 new file mode 100644 index 000000000..d595c9f43 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-292 @@ -0,0 +1 @@ +_62L \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-293 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-293 new file mode 100644 index 000000000..f68696949 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-293 @@ -0,0 +1 @@ +"\tK\t" \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-294 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-294 new file mode 100644 index 000000000..f3a9b7edb --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-294 @@ -0,0 +1 @@ +"\n\\\f" \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-295 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-295 new file mode 100644 index 000000000..455f00ffc --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-295 @@ -0,0 +1 @@ +I_ \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-296 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-296 new file mode 100644 index 000000000..ccd5968f9 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-296 @@ -0,0 +1 @@ +W_a0_ \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-297 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-297 new file mode 100644 index 000000000..ee55c16fc --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-297 @@ -0,0 +1 @@ +BQ \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-298 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-298 new file mode 100644 index 000000000..0d1a169a6 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-298 @@ -0,0 +1 @@ +"\tX$\uABBb" \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-299 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-299 new file mode 100644 index 000000000..0573cfd73 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-299 @@ -0,0 +1 @@ +Z9 \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-3 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-3 new file mode 100644 index 000000000..f0fcbd8ea --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-3 @@ -0,0 +1 @@ +foo.bar.baz \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-30 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-30 new file mode 100644 index 000000000..4f8e6a17a --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-30 @@ -0,0 +1 @@ +*[?[0] == `0`] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-300 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-300 new file mode 100644 index 000000000..a0db02beb --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-300 @@ -0,0 +1 @@ +"\b%\"򞄏" \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-301 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-301 new file mode 100644 index 000000000..56032f7a2 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-301 @@ -0,0 +1 @@ +_F \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-302 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-302 new file mode 100644 index 000000000..4a8a3cff3 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-302 @@ -0,0 +1 @@ +"!," \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-303 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-303 new file mode 100644 index 000000000..7c1efac00 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-303 @@ -0,0 +1 @@ +"\"!" \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-304 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-304 new file mode 100644 index 000000000..a0f489d53 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-304 @@ -0,0 +1 @@ +Hh \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-305 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-305 new file mode 100644 index 000000000..c64e8d5ac --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-305 @@ -0,0 +1 @@ +"&" \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-306 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-306 new file mode 100644 index 000000000..0567e992f --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-306 @@ -0,0 +1 @@ +"9\r\\R" \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-307 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-307 new file mode 100644 index 000000000..ce8245c5b --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-307 @@ -0,0 +1 @@ +M_k \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-308 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-308 new file mode 100644 index 000000000..8f16a5ac0 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-308 @@ -0,0 +1 @@ +"!\b\n󑩒\"\"" \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-309 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-309 new file mode 100644 index 000000000..504ff5ae3 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-309 @@ -0,0 +1 @@ +"6" \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-31 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-31 new file mode 100644 index 000000000..07fb57234 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-31 @@ -0,0 +1 @@ +foo[?first == last] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-310 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-310 new file mode 100644 index 000000000..533dd8e54 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-310 @@ -0,0 +1 @@ +_7 \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-311 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-311 new file mode 100644 index 000000000..1e4a3a341 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-311 @@ -0,0 +1 @@ +"0" \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-312 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-312 new file mode 100644 index 000000000..37961f6ca --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-312 @@ -0,0 +1 @@ +"\\8\\" \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-313 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-313 new file mode 100644 index 000000000..23480cff1 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-313 @@ -0,0 +1 @@ +b7eo \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-314 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-314 new file mode 100644 index 000000000..e609f81a3 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-314 @@ -0,0 +1 @@ +xIUo9 \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-315 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-315 new file mode 100644 index 000000000..d89a25f0b --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-315 @@ -0,0 +1 @@ +"5" \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-316 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-316 new file mode 100644 index 000000000..5adcf5e7d --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-316 @@ -0,0 +1 @@ +"?" \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-317 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-317 new file mode 100644 index 000000000..ace4a897d --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-317 @@ -0,0 +1 @@ +sU \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-318 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-318 new file mode 100644 index 000000000..feffb7061 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-318 @@ -0,0 +1 @@ +"VH2&H\\\/" \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-319 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-319 new file mode 100644 index 000000000..8223f1e51 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-319 @@ -0,0 +1 @@ +_C \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-32 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-32 new file mode 100644 index 000000000..7e85c4bdf --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-32 @@ -0,0 +1 @@ +foo[?first == last].first \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-320 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-320 new file mode 100644 index 000000000..c9cdc63b0 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-320 @@ -0,0 +1 @@ +_ \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-321 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-321 new file mode 100644 index 000000000..c82f7982e --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-321 @@ -0,0 +1 @@ +"<\t" \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-322 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-322 new file mode 100644 index 000000000..dae65c515 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-322 @@ -0,0 +1 @@ +"\uD834\uDD1E" \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-323 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-323 new file mode 100644 index 000000000..b6b369543 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-323 @@ -0,0 +1 @@ +foo.bar[0] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-324 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-324 new file mode 100644 index 000000000..bf06e678c --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-324 @@ -0,0 +1 @@ +foo.bar[1] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-325 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-325 new file mode 100644 index 000000000..5d48e0205 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-325 @@ -0,0 +1 @@ +foo.bar[2] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-326 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-326 new file mode 100644 index 000000000..de3af7230 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-326 @@ -0,0 +1 @@ +foo.bar[3] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-327 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-327 new file mode 100644 index 000000000..a1c333508 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-327 @@ -0,0 +1 @@ +foo.bar[-1] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-328 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-328 new file mode 100644 index 000000000..ad0fef91c --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-328 @@ -0,0 +1 @@ +foo.bar[-2] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-329 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-329 new file mode 100644 index 000000000..3e83c6f73 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-329 @@ -0,0 +1 @@ +foo.bar[-3] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-33 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-33 new file mode 100644 index 000000000..72fc0a53e --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-33 @@ -0,0 +1 @@ +foo[?age > `25`] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-330 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-330 new file mode 100644 index 000000000..433a737d6 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-330 @@ -0,0 +1 @@ +foo.bar[-4] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-331 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-331 new file mode 100644 index 000000000..4d5f9756e --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-331 @@ -0,0 +1 @@ +foo.bar \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-332 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-332 new file mode 100644 index 000000000..5e0d9b717 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-332 @@ -0,0 +1 @@ +foo[0].bar \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-333 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-333 new file mode 100644 index 000000000..3cd7e9460 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-333 @@ -0,0 +1 @@ +foo[1].bar \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-334 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-334 new file mode 100644 index 000000000..74cb17655 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-334 @@ -0,0 +1 @@ +foo[2].bar \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-335 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-335 new file mode 100644 index 000000000..3cf2007f7 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-335 @@ -0,0 +1 @@ +foo[3].notbar \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-336 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-336 new file mode 100644 index 000000000..9674d8803 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-336 @@ -0,0 +1 @@ +foo[3].bar \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-337 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-337 new file mode 100644 index 000000000..9b0b2f818 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-337 @@ -0,0 +1 @@ +foo[0] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-338 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-338 new file mode 100644 index 000000000..83c639a18 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-338 @@ -0,0 +1 @@ +foo[1] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-339 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-339 new file mode 100644 index 000000000..3b76c9f64 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-339 @@ -0,0 +1 @@ +foo[2] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-34 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-34 new file mode 100644 index 000000000..9a2b0184e --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-34 @@ -0,0 +1 @@ +foo[?age >= `25`] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-340 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-340 new file mode 100644 index 000000000..ff99e045d --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-340 @@ -0,0 +1 @@ +foo[3] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-341 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-341 new file mode 100644 index 000000000..040ecb240 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-341 @@ -0,0 +1 @@ +foo[4] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-342 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-342 new file mode 100644 index 000000000..6e7ea636e --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-342 @@ -0,0 +1 @@ +[0] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-343 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-343 new file mode 100644 index 000000000..bace2a0be --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-343 @@ -0,0 +1 @@ +[1] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-344 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-344 new file mode 100644 index 000000000..5d50c80c0 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-344 @@ -0,0 +1 @@ +[2] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-345 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-345 new file mode 100644 index 000000000..99d21a2a0 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-345 @@ -0,0 +1 @@ +[-1] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-346 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-346 new file mode 100644 index 000000000..133a9c627 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-346 @@ -0,0 +1 @@ +[-2] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-347 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-347 new file mode 100644 index 000000000..b7f78c5dc --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-347 @@ -0,0 +1 @@ +[-3] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-348 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-348 new file mode 100644 index 000000000..bd9de815f --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-348 @@ -0,0 +1 @@ +reservations[].instances[].foo \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-349 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-349 new file mode 100644 index 000000000..55e625735 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-349 @@ -0,0 +1 @@ +reservations[].instances[].bar \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-35 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-35 new file mode 100644 index 000000000..fa83f1da3 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-35 @@ -0,0 +1 @@ +foo[?age > `30`] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-350 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-350 new file mode 100644 index 000000000..1661747c0 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-350 @@ -0,0 +1 @@ +reservations[].notinstances[].foo \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-351 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-351 new file mode 100644 index 000000000..1661747c0 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-351 @@ -0,0 +1 @@ +reservations[].notinstances[].foo \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-352 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-352 new file mode 100644 index 000000000..3debc70f8 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-352 @@ -0,0 +1 @@ +reservations[].instances[].foo[].bar \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-353 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-353 new file mode 100644 index 000000000..75af2fda0 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-353 @@ -0,0 +1 @@ +reservations[].instances[].foo[].baz \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-354 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-354 new file mode 100644 index 000000000..4a70cd8a0 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-354 @@ -0,0 +1 @@ +reservations[].instances[].notfoo[].bar \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-355 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-355 new file mode 100644 index 000000000..987985b00 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-355 @@ -0,0 +1 @@ +reservations[].instances[].notfoo[].notbar \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-356 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-356 new file mode 100644 index 000000000..1661747c0 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-356 @@ -0,0 +1 @@ +reservations[].notinstances[].foo \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-357 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-357 new file mode 100644 index 000000000..634f937e5 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-357 @@ -0,0 +1 @@ +reservations[].instances[].foo[].notbar \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-358 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-358 new file mode 100644 index 000000000..09cb7b8bb --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-358 @@ -0,0 +1 @@ +reservations[].instances[].bar[].baz \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-359 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-359 new file mode 100644 index 000000000..f5d9ac5b7 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-359 @@ -0,0 +1 @@ +reservations[].instances[].baz[].baz \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-36 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-36 new file mode 100644 index 000000000..463a2a542 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-36 @@ -0,0 +1 @@ +foo[?age < `25`] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-360 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-360 new file mode 100644 index 000000000..d1016d6e7 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-360 @@ -0,0 +1 @@ +reservations[].instances[].qux[].baz \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-361 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-361 new file mode 100644 index 000000000..ef54cf52d --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-361 @@ -0,0 +1 @@ +reservations[].instances[].qux[].baz[] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-362 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-362 new file mode 100644 index 000000000..bea506ff2 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-362 @@ -0,0 +1 @@ +foo[] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-363 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-363 new file mode 100644 index 000000000..20dd081e0 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-363 @@ -0,0 +1 @@ +foo[][0] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-364 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-364 new file mode 100644 index 000000000..4803734b0 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-364 @@ -0,0 +1 @@ +foo[][1] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-365 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-365 new file mode 100644 index 000000000..1be565985 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-365 @@ -0,0 +1 @@ +foo[][0][0] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-366 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-366 new file mode 100644 index 000000000..d2cf6da59 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-366 @@ -0,0 +1 @@ +foo[][2][2] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-367 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-367 new file mode 100644 index 000000000..c609ca64b --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-367 @@ -0,0 +1 @@ +foo[][0][0][100] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-368 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-368 new file mode 100644 index 000000000..191028156 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-368 @@ -0,0 +1 @@ +foo \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-369 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-369 new file mode 100644 index 000000000..bea506ff2 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-369 @@ -0,0 +1 @@ +foo[] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-37 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-37 new file mode 100644 index 000000000..10ed5d3f6 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-37 @@ -0,0 +1 @@ +foo[?age <= `25`] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-370 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-370 new file mode 100644 index 000000000..13f2c4a0b --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-370 @@ -0,0 +1 @@ +foo[].bar \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-371 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-371 new file mode 100644 index 000000000..edf3d9277 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-371 @@ -0,0 +1 @@ +foo[].bar[] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-372 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-372 new file mode 100644 index 000000000..2a3b993af --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-372 @@ -0,0 +1 @@ +foo[].bar[].baz \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-373 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-373 new file mode 100644 index 000000000..d5ca878a1 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-373 @@ -0,0 +1 @@ +string[] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-374 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-374 new file mode 100644 index 000000000..fcd255f5d --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-374 @@ -0,0 +1 @@ +hash[] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-375 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-375 new file mode 100644 index 000000000..2d53bd7cd --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-375 @@ -0,0 +1 @@ +number[] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-376 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-376 new file mode 100644 index 000000000..cb10d2497 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-376 @@ -0,0 +1 @@ +nullvalue[] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-377 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-377 new file mode 100644 index 000000000..f6c79ca84 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-377 @@ -0,0 +1 @@ +string[].foo \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-378 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-378 new file mode 100644 index 000000000..09bf36e8a --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-378 @@ -0,0 +1 @@ +hash[].foo \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-379 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-379 new file mode 100644 index 000000000..4c3578189 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-379 @@ -0,0 +1 @@ +number[].foo \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-38 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-38 new file mode 100644 index 000000000..16a4c36ac --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-38 @@ -0,0 +1 @@ +foo[?age < `20`] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-380 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-380 new file mode 100644 index 000000000..2dd8ae218 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-380 @@ -0,0 +1 @@ +nullvalue[].foo \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-381 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-381 new file mode 100644 index 000000000..dfed81603 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-381 @@ -0,0 +1 @@ +nullvalue[].foo[].bar \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-382 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-382 new file mode 100644 index 000000000..d7628e646 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-382 @@ -0,0 +1 @@ +`"foo"` \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-383 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-383 new file mode 100644 index 000000000..49c5269b1 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-383 @@ -0,0 +1 @@ +`"\u03a6"` \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-384 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-384 new file mode 100644 index 000000000..d5db721d0 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-384 @@ -0,0 +1 @@ +`"✓"` \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-385 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-385 new file mode 100644 index 000000000..a2b6e4ec8 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-385 @@ -0,0 +1 @@ +`[1, 2, 3]` \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-386 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-386 new file mode 100644 index 000000000..f5801bdd6 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-386 @@ -0,0 +1 @@ +`{"a": "b"}` \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-387 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-387 new file mode 100644 index 000000000..f87db59a8 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-387 @@ -0,0 +1 @@ +`true` \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-388 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-388 new file mode 100644 index 000000000..3b20d905f --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-388 @@ -0,0 +1 @@ +`false` \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-389 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-389 new file mode 100644 index 000000000..70bcd29a7 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-389 @@ -0,0 +1 @@ +`null` \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-39 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-39 new file mode 100644 index 000000000..351054d3e --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-39 @@ -0,0 +1 @@ +foo[?age == `20`] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-390 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-390 new file mode 100644 index 000000000..0918d4155 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-390 @@ -0,0 +1 @@ +`0` \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-391 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-391 new file mode 100644 index 000000000..ef70c4c11 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-391 @@ -0,0 +1 @@ +`1` \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-392 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-392 new file mode 100644 index 000000000..b39a922f4 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-392 @@ -0,0 +1 @@ +`2` \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-393 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-393 new file mode 100644 index 000000000..7e65687db --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-393 @@ -0,0 +1 @@ +`3` \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-394 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-394 new file mode 100644 index 000000000..770d1ece7 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-394 @@ -0,0 +1 @@ +`4` \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-395 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-395 new file mode 100644 index 000000000..a8b81985c --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-395 @@ -0,0 +1 @@ +`5` \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-396 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-396 new file mode 100644 index 000000000..7f0861065 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-396 @@ -0,0 +1 @@ +`6` \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-397 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-397 new file mode 100644 index 000000000..495114d91 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-397 @@ -0,0 +1 @@ +`7` \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-398 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-398 new file mode 100644 index 000000000..94f355c46 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-398 @@ -0,0 +1 @@ +`8` \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-399 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-399 new file mode 100644 index 000000000..600d2aa3f --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-399 @@ -0,0 +1 @@ +`9` \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-4 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-4 new file mode 100644 index 000000000..314852235 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-4 @@ -0,0 +1 @@ +foo.bar.baz.bad \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-40 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-40 new file mode 100644 index 000000000..99d9258a6 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-40 @@ -0,0 +1 @@ +foo[?age != `20`] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-400 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-400 new file mode 100644 index 000000000..637015b5f --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-400 @@ -0,0 +1 @@ +`"foo\`bar"` \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-401 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-401 new file mode 100644 index 000000000..6fa7557b8 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-401 @@ -0,0 +1 @@ +`"foo\"bar"` \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-402 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-402 new file mode 100644 index 000000000..5aabeec34 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-402 @@ -0,0 +1 @@ +`"1\`"` \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-403 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-403 new file mode 100644 index 000000000..8302ea198 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-403 @@ -0,0 +1 @@ +`"\\"`.{a:`"b"`} \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-404 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-404 new file mode 100644 index 000000000..d88d014a9 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-404 @@ -0,0 +1 @@ +`{"a": "b"}`.a \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-405 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-405 new file mode 100644 index 000000000..47152dddb --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-405 @@ -0,0 +1 @@ +`{"a": {"b": "c"}}`.a.b \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-406 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-406 new file mode 100644 index 000000000..895d42938 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-406 @@ -0,0 +1 @@ +`[0, 1, 2]`[1] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-407 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-407 new file mode 100644 index 000000000..42500a368 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-407 @@ -0,0 +1 @@ +` {"foo": true}` \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-408 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-408 new file mode 100644 index 000000000..08b944dad --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-408 @@ -0,0 +1 @@ +`{"foo": true} ` \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-409 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-409 new file mode 100644 index 000000000..6de163f80 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-409 @@ -0,0 +1 @@ +'foo' \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-41 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-41 new file mode 100644 index 000000000..5bc357d9f --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-41 @@ -0,0 +1 @@ +foo[?top.name == 'a'] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-410 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-410 new file mode 100644 index 000000000..b84bbdb29 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-410 @@ -0,0 +1 @@ +' foo ' \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-411 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-411 new file mode 100644 index 000000000..bf6a07ace --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-411 @@ -0,0 +1 @@ +'0' \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-412 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-412 new file mode 100644 index 000000000..c742f5b0c --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-412 @@ -0,0 +1,2 @@ +'newline +' \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-413 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-413 new file mode 100644 index 000000000..04e9b3ade --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-413 @@ -0,0 +1,2 @@ +' +' \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-414 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-414 new file mode 100644 index 000000000..ebdaf120d --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-414 @@ -0,0 +1 @@ +'✓' \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-415 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-415 new file mode 100644 index 000000000..d0ba5d7fa --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-415 @@ -0,0 +1 @@ +'𝄞' \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-416 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-416 new file mode 100644 index 000000000..19c2e2ef4 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-416 @@ -0,0 +1 @@ +' [foo] ' \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-417 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-417 new file mode 100644 index 000000000..5faa483b1 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-417 @@ -0,0 +1 @@ +'[foo]' \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-418 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-418 new file mode 100644 index 000000000..e3c05c163 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-418 @@ -0,0 +1 @@ +'\u03a6' \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-419 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-419 new file mode 100644 index 000000000..7c13861ac --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-419 @@ -0,0 +1 @@ +foo.{bar: bar} \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-42 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-42 new file mode 100644 index 000000000..d037a0a4d --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-42 @@ -0,0 +1 @@ +foo[?top.first == top.last] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-420 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-420 new file mode 100644 index 000000000..f795c2552 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-420 @@ -0,0 +1 @@ +foo.{"bar": bar} \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-421 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-421 new file mode 100644 index 000000000..772c45639 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-421 @@ -0,0 +1 @@ +foo.{"foo.bar": bar} \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-422 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-422 new file mode 100644 index 000000000..8808e92bf --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-422 @@ -0,0 +1 @@ +foo.{bar: bar, baz: baz} \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-423 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-423 new file mode 100644 index 000000000..3f13757a1 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-423 @@ -0,0 +1 @@ +foo.{"bar": bar, "baz": baz} \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-424 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-424 new file mode 100644 index 000000000..23cd8903e --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-424 @@ -0,0 +1 @@ +{"baz": baz, "qux\"": "qux\""} \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-425 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-425 new file mode 100644 index 000000000..fabb6da4f --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-425 @@ -0,0 +1 @@ +foo.{bar:bar,baz:baz} \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-426 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-426 new file mode 100644 index 000000000..4c3f615b1 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-426 @@ -0,0 +1 @@ +foo.{bar: bar,qux: qux} \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-427 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-427 new file mode 100644 index 000000000..8bc46535a --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-427 @@ -0,0 +1 @@ +foo.{bar: bar, noexist: noexist} \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-428 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-428 new file mode 100644 index 000000000..2024b6f11 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-428 @@ -0,0 +1 @@ +foo.{noexist: noexist, alsonoexist: alsonoexist} \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-429 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-429 new file mode 100644 index 000000000..b52191d10 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-429 @@ -0,0 +1 @@ +foo.badkey.{nokey: nokey, alsonokey: alsonokey} \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-43 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-43 new file mode 100644 index 000000000..8534a5cae --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-43 @@ -0,0 +1 @@ +foo[?top == `{"first": "foo", "last": "bar"}`] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-430 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-430 new file mode 100644 index 000000000..5cd310b6d --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-430 @@ -0,0 +1 @@ +foo.nested.*.{a: a,b: b} \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-431 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-431 new file mode 100644 index 000000000..0b24ef535 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-431 @@ -0,0 +1 @@ +foo.nested.three.{a: a, cinner: c.inner} \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-432 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-432 new file mode 100644 index 000000000..473c1c351 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-432 @@ -0,0 +1 @@ +foo.nested.three.{a: a, c: c.inner.bad.key} \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-433 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-433 new file mode 100644 index 000000000..44ba735ab --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-433 @@ -0,0 +1 @@ +foo.{a: nested.one.a, b: nested.two.b} \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-434 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-434 new file mode 100644 index 000000000..f5f89b12b --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-434 @@ -0,0 +1 @@ +{bar: bar, baz: baz} \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-435 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-435 new file mode 100644 index 000000000..697764cb3 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-435 @@ -0,0 +1 @@ +{bar: bar} \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-436 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-436 new file mode 100644 index 000000000..20447fb10 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-436 @@ -0,0 +1 @@ +{otherkey: bar} \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-437 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-437 new file mode 100644 index 000000000..310b9b1dd --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-437 @@ -0,0 +1 @@ +{no: no, exist: exist} \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-438 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-438 new file mode 100644 index 000000000..c79b2e240 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-438 @@ -0,0 +1 @@ +foo.[bar] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-439 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-439 new file mode 100644 index 000000000..ab498ef65 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-439 @@ -0,0 +1 @@ +foo.[bar,baz] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-44 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-44 new file mode 100644 index 000000000..71307c409 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-44 @@ -0,0 +1 @@ +foo[?key == `true`] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-440 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-440 new file mode 100644 index 000000000..4b8f39a46 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-440 @@ -0,0 +1 @@ +foo.[bar,qux] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-441 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-441 new file mode 100644 index 000000000..b8f9020f8 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-441 @@ -0,0 +1 @@ +foo.[bar,noexist] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-442 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-442 new file mode 100644 index 000000000..b7c7b3f65 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-442 @@ -0,0 +1 @@ +foo.[noexist,alsonoexist] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-443 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-443 new file mode 100644 index 000000000..fabb6da4f --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-443 @@ -0,0 +1 @@ +foo.{bar:bar,baz:baz} \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-444 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-444 new file mode 100644 index 000000000..c15c39f82 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-444 @@ -0,0 +1 @@ +foo.[bar,baz[0]] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-445 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-445 new file mode 100644 index 000000000..9cebd8984 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-445 @@ -0,0 +1 @@ +foo.[bar,baz[1]] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-446 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-446 new file mode 100644 index 000000000..c5bbfbf84 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-446 @@ -0,0 +1 @@ +foo.[bar,baz[2]] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-447 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-447 new file mode 100644 index 000000000..d81cb2b90 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-447 @@ -0,0 +1 @@ +foo.[bar,baz[3]] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-448 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-448 new file mode 100644 index 000000000..3a65aa7d6 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-448 @@ -0,0 +1 @@ +foo.[bar[0],baz[3]] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-449 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-449 new file mode 100644 index 000000000..8808e92bf --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-449 @@ -0,0 +1 @@ +foo.{bar: bar, baz: baz} \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-45 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-45 new file mode 100644 index 000000000..e142b22a2 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-45 @@ -0,0 +1 @@ +foo[?key == `false`] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-450 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-450 new file mode 100644 index 000000000..ab498ef65 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-450 @@ -0,0 +1 @@ +foo.[bar,baz] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-451 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-451 new file mode 100644 index 000000000..8e3d22dc5 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-451 @@ -0,0 +1 @@ +foo.{bar: bar.baz[1],includeme: includeme} \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-452 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-452 new file mode 100644 index 000000000..398c7f8b0 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-452 @@ -0,0 +1 @@ +foo.{"bar.baz.two": bar.baz[1].two, includeme: includeme} \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-453 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-453 new file mode 100644 index 000000000..a17644487 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-453 @@ -0,0 +1 @@ +foo.[includeme, bar.baz[*].common] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-454 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-454 new file mode 100644 index 000000000..da5225ddc --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-454 @@ -0,0 +1 @@ +foo.[includeme, bar.baz[*].none] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-455 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-455 new file mode 100644 index 000000000..a8870b22b --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-455 @@ -0,0 +1 @@ +foo.[includeme, bar.baz[].common] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-456 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-456 new file mode 100644 index 000000000..420b1a57c --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-456 @@ -0,0 +1 @@ +reservations[*].instances[*].{id: id, name: name} \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-457 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-457 new file mode 100644 index 000000000..0761ee16d --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-457 @@ -0,0 +1 @@ +reservations[].instances[].{id: id, name: name} \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-458 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-458 new file mode 100644 index 000000000..aa1191a48 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-458 @@ -0,0 +1 @@ +reservations[].instances[].[id, name] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-459 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-459 new file mode 100644 index 000000000..191028156 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-459 @@ -0,0 +1 @@ +foo \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-46 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-46 new file mode 100644 index 000000000..9a24a464e --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-46 @@ -0,0 +1 @@ +foo[?key == `0`] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-460 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-460 new file mode 100644 index 000000000..bea506ff2 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-460 @@ -0,0 +1 @@ +foo[] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-461 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-461 new file mode 100644 index 000000000..13f2c4a0b --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-461 @@ -0,0 +1 @@ +foo[].bar \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-462 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-462 new file mode 100644 index 000000000..edf3d9277 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-462 @@ -0,0 +1 @@ +foo[].bar[] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-463 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-463 new file mode 100644 index 000000000..d965466e9 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-463 @@ -0,0 +1 @@ +foo[].bar[].[baz, qux] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-464 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-464 new file mode 100644 index 000000000..f1822a174 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-464 @@ -0,0 +1 @@ +foo[].bar[].[baz] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-465 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-465 new file mode 100644 index 000000000..c6f77b80c --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-465 @@ -0,0 +1 @@ +foo[].bar[].[baz, qux][] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-466 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-466 new file mode 100644 index 000000000..db56262a4 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-466 @@ -0,0 +1 @@ +foo.[baz[*].bar, qux[0]] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-467 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-467 new file mode 100644 index 000000000..b901067d2 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-467 @@ -0,0 +1 @@ +foo.[baz[*].[bar, boo], qux[0]] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-468 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-468 new file mode 100644 index 000000000..738479fa6 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-468 @@ -0,0 +1 @@ +foo.[baz[*].not_there || baz[*].bar, qux[0]] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-469 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-469 new file mode 100644 index 000000000..6926996a7 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-469 @@ -0,0 +1 @@ +[[*],*] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-47 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-47 new file mode 100644 index 000000000..6d33cc72c --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-47 @@ -0,0 +1 @@ +foo[?key == `1`] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-470 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-470 new file mode 100644 index 000000000..736be0a31 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-470 @@ -0,0 +1 @@ +[[*]] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-471 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-471 new file mode 100644 index 000000000..29e1fb20a --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-471 @@ -0,0 +1 @@ +outer.foo || outer.bar \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-472 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-472 new file mode 100644 index 000000000..c0070ba78 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-472 @@ -0,0 +1 @@ +outer.foo||outer.bar \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-473 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-473 new file mode 100644 index 000000000..661b0bec5 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-473 @@ -0,0 +1 @@ +outer.bar || outer.baz \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-474 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-474 new file mode 100644 index 000000000..296d5aeee --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-474 @@ -0,0 +1 @@ +outer.bar||outer.baz \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-475 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-475 new file mode 100644 index 000000000..ca140f8aa --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-475 @@ -0,0 +1 @@ +outer.bad || outer.foo \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-476 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-476 new file mode 100644 index 000000000..15d309242 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-476 @@ -0,0 +1 @@ +outer.bad||outer.foo \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-477 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-477 new file mode 100644 index 000000000..56148d957 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-477 @@ -0,0 +1 @@ +outer.foo || outer.bad \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-478 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-478 new file mode 100644 index 000000000..6d3cf6d90 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-478 @@ -0,0 +1 @@ +outer.foo||outer.bad \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-479 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-479 new file mode 100644 index 000000000..100fa8339 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-479 @@ -0,0 +1 @@ +outer.bad || outer.alsobad \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-48 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-48 new file mode 100644 index 000000000..de56fc042 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-48 @@ -0,0 +1 @@ +foo[?key == `[0]`] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-480 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-480 new file mode 100644 index 000000000..64490352b --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-480 @@ -0,0 +1 @@ +outer.bad||outer.alsobad \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-481 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-481 new file mode 100644 index 000000000..af901bde1 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-481 @@ -0,0 +1 @@ +outer.empty_string || outer.foo \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-482 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-482 new file mode 100644 index 000000000..36b63e462 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-482 @@ -0,0 +1 @@ +outer.nokey || outer.bool || outer.empty_list || outer.empty_string || outer.foo \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-483 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-483 new file mode 100644 index 000000000..aba584f99 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-483 @@ -0,0 +1 @@ +foo.*.baz | [0] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-484 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-484 new file mode 100644 index 000000000..4234ac019 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-484 @@ -0,0 +1 @@ +foo.*.baz | [1] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-485 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-485 new file mode 100644 index 000000000..12330d990 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-485 @@ -0,0 +1 @@ +foo.*.baz | [2] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-486 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-486 new file mode 100644 index 000000000..1b2d93e19 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-486 @@ -0,0 +1 @@ +foo.bar.* | [0] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-487 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-487 new file mode 100644 index 000000000..c371fc645 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-487 @@ -0,0 +1 @@ +foo.*.notbaz | [*] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-488 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-488 new file mode 100644 index 000000000..3c835642e --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-488 @@ -0,0 +1 @@ +foo | bar \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-489 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-489 new file mode 100644 index 000000000..decaa0421 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-489 @@ -0,0 +1 @@ +foo | bar | baz \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-49 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-49 new file mode 100644 index 000000000..49d9c63a3 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-49 @@ -0,0 +1 @@ +foo[?key == `{"bar": [0]}`] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-490 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-490 new file mode 100644 index 000000000..b91068037 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-490 @@ -0,0 +1 @@ +foo|bar| baz \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-491 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-491 new file mode 100644 index 000000000..11df74d8b --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-491 @@ -0,0 +1 @@ +not_there | [0] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-492 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-492 new file mode 100644 index 000000000..11df74d8b --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-492 @@ -0,0 +1 @@ +not_there | [0] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-493 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-493 new file mode 100644 index 000000000..37da9fc0b --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-493 @@ -0,0 +1 @@ +[foo.bar, foo.other] | [0] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-494 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-494 new file mode 100644 index 000000000..1f4fc943d --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-494 @@ -0,0 +1 @@ +{"a": foo.bar, "b": foo.other} | a \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-495 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-495 new file mode 100644 index 000000000..67c7ea9cf --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-495 @@ -0,0 +1 @@ +{"a": foo.bar, "b": foo.other} | b \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-496 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-496 new file mode 100644 index 000000000..d87f9bba4 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-496 @@ -0,0 +1 @@ +{"a": foo.bar, "b": foo.other} | *.baz \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-497 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-497 new file mode 100644 index 000000000..ebf8e2711 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-497 @@ -0,0 +1 @@ +foo.bam || foo.bar | baz \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-498 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-498 new file mode 100644 index 000000000..f32bc6db5 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-498 @@ -0,0 +1 @@ +foo | not_there || bar \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-499 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-499 new file mode 100644 index 000000000..d04459d90 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-499 @@ -0,0 +1 @@ +foo[*].bar[*] | [0][0] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-5 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-5 new file mode 100644 index 000000000..b537264a1 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-5 @@ -0,0 +1 @@ +foo.bar.bad \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-50 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-50 new file mode 100644 index 000000000..c17c1df17 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-50 @@ -0,0 +1 @@ +foo[?key == `null`] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-500 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-500 new file mode 100644 index 000000000..3eb869f43 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-500 @@ -0,0 +1 @@ +bar[0:10] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-501 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-501 new file mode 100644 index 000000000..aa5d6be52 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-501 @@ -0,0 +1 @@ +foo[0:10:1] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-502 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-502 new file mode 100644 index 000000000..1a4d1682d --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-502 @@ -0,0 +1 @@ +foo[0:10] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-503 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-503 new file mode 100644 index 000000000..5925a578b --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-503 @@ -0,0 +1 @@ +foo[0:10:] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-504 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-504 new file mode 100644 index 000000000..081e93abd --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-504 @@ -0,0 +1 @@ +foo[0::1] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-505 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-505 new file mode 100644 index 000000000..922700149 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-505 @@ -0,0 +1 @@ +foo[0::] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-506 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-506 new file mode 100644 index 000000000..fd2294d66 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-506 @@ -0,0 +1 @@ +foo[0:] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-507 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-507 new file mode 100644 index 000000000..c6b551d5e --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-507 @@ -0,0 +1 @@ +foo[:10:1] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-508 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-508 new file mode 100644 index 000000000..503f58da6 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-508 @@ -0,0 +1 @@ +foo[::1] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-509 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-509 new file mode 100644 index 000000000..f78bb770c --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-509 @@ -0,0 +1 @@ +foo[:10:] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-51 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-51 new file mode 100644 index 000000000..589a214f4 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-51 @@ -0,0 +1 @@ +foo[?key == `[1]`] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-510 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-510 new file mode 100644 index 000000000..eb9d2ba88 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-510 @@ -0,0 +1 @@ +foo[::] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-511 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-511 new file mode 100644 index 000000000..1921a3d98 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-511 @@ -0,0 +1 @@ +foo[:] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-512 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-512 new file mode 100644 index 000000000..a87afcb1b --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-512 @@ -0,0 +1 @@ +foo[1:9] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-513 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-513 new file mode 100644 index 000000000..dbf51d8cd --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-513 @@ -0,0 +1 @@ +foo[0:10:2] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-514 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-514 new file mode 100644 index 000000000..f7288763a --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-514 @@ -0,0 +1 @@ +foo[5:] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-515 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-515 new file mode 100644 index 000000000..64395761d --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-515 @@ -0,0 +1 @@ +foo[5::2] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-516 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-516 new file mode 100644 index 000000000..706bb14dd --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-516 @@ -0,0 +1 @@ +foo[::2] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-517 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-517 new file mode 100644 index 000000000..8fcfaee95 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-517 @@ -0,0 +1 @@ +foo[::-1] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-518 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-518 new file mode 100644 index 000000000..f6a00bf9b --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-518 @@ -0,0 +1 @@ +foo[1::2] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-519 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-519 new file mode 100644 index 000000000..ea068ee06 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-519 @@ -0,0 +1 @@ +foo[10:0:-1] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-52 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-52 new file mode 100644 index 000000000..214917ac0 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-52 @@ -0,0 +1 @@ +foo[?key == `{"a":2}`] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-520 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-520 new file mode 100644 index 000000000..1fe14258e --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-520 @@ -0,0 +1 @@ +foo[10:5:-1] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-521 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-521 new file mode 100644 index 000000000..4ba0e1302 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-521 @@ -0,0 +1 @@ +foo[8:2:-2] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-522 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-522 new file mode 100644 index 000000000..25db439ff --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-522 @@ -0,0 +1 @@ +foo[0:20] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-523 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-523 new file mode 100644 index 000000000..8a965920a --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-523 @@ -0,0 +1 @@ +foo[10:-20:-1] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-524 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-524 new file mode 100644 index 000000000..b1e5ba373 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-524 @@ -0,0 +1 @@ +foo[10:-20] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-525 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-525 new file mode 100644 index 000000000..06253112e --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-525 @@ -0,0 +1 @@ +foo[-4:-1] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-526 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-526 new file mode 100644 index 000000000..1e14a6a4c --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-526 @@ -0,0 +1 @@ +foo[:-5:-1] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-527 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-527 new file mode 100644 index 000000000..aef5c2747 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-527 @@ -0,0 +1 @@ +foo[:2].a \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-528 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-528 new file mode 100644 index 000000000..93c95fcf6 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-528 @@ -0,0 +1 @@ +foo[:2].b \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-529 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-529 new file mode 100644 index 000000000..7e0733e59 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-529 @@ -0,0 +1 @@ +foo[:2].a.b \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-53 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-53 new file mode 100644 index 000000000..4c002ed80 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-53 @@ -0,0 +1 @@ +foo[?`true` == key] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-530 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-530 new file mode 100644 index 000000000..2438b2576 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-530 @@ -0,0 +1 @@ +bar[::-1].a.b \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-531 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-531 new file mode 100644 index 000000000..549994b6b --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-531 @@ -0,0 +1 @@ +bar[:2].a.b \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-532 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-532 new file mode 100644 index 000000000..ab98292b4 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-532 @@ -0,0 +1 @@ +baz[:2].a \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-533 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-533 new file mode 100644 index 000000000..65fca9687 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-533 @@ -0,0 +1 @@ +[:] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-534 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-534 new file mode 100644 index 000000000..18c5daf7b --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-534 @@ -0,0 +1 @@ +[:2].a \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-535 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-535 new file mode 100644 index 000000000..1bb84f7d4 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-535 @@ -0,0 +1 @@ +[::-1].a \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-536 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-536 new file mode 100644 index 000000000..7a0416f05 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-536 @@ -0,0 +1 @@ +[:2].b \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-537 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-537 new file mode 100644 index 000000000..4d5f9756e --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-537 @@ -0,0 +1 @@ +foo.bar \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-538 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-538 new file mode 100644 index 000000000..191028156 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-538 @@ -0,0 +1 @@ +foo \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-539 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-539 new file mode 100644 index 000000000..f59ec20aa --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-539 @@ -0,0 +1 @@ +* \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-54 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-54 new file mode 100644 index 000000000..23d27073e --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-54 @@ -0,0 +1 @@ +foo[?`false` == key] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-540 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-540 new file mode 100644 index 000000000..dee569574 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-540 @@ -0,0 +1 @@ +*.* \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-541 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-541 new file mode 100644 index 000000000..1a16f7418 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-541 @@ -0,0 +1 @@ +*.foo \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-542 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-542 new file mode 100644 index 000000000..7e8066d39 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-542 @@ -0,0 +1 @@ +*[0] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-543 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-543 new file mode 100644 index 000000000..0637a088a --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-543 @@ -0,0 +1 @@ +[] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-544 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-544 new file mode 100644 index 000000000..6e7ea636e --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-544 @@ -0,0 +1 @@ +[0] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-545 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-545 new file mode 100644 index 000000000..5a5194647 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-545 @@ -0,0 +1 @@ +[*] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-546 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-546 new file mode 100644 index 000000000..416127425 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-546 @@ -0,0 +1 @@ +*.["0"] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-547 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-547 new file mode 100644 index 000000000..cd9fb6ba7 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-547 @@ -0,0 +1 @@ +[*].bar \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-548 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-548 new file mode 100644 index 000000000..9f3ada480 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-548 @@ -0,0 +1 @@ +[*][0] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-549 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-549 new file mode 100644 index 000000000..9b0b2f818 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-549 @@ -0,0 +1 @@ +foo[0] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-55 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-55 new file mode 100644 index 000000000..6d840ee56 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-55 @@ -0,0 +1 @@ +foo[?`0` == key] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-550 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-550 new file mode 100644 index 000000000..b23413b92 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-550 @@ -0,0 +1 @@ +foo.[*] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-551 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-551 new file mode 100644 index 000000000..08ab2e1c4 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-551 @@ -0,0 +1 @@ +foo.[abc] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-552 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-552 new file mode 100644 index 000000000..78b05a5c6 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-552 @@ -0,0 +1 @@ +foo.[abc, def] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-553 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-553 new file mode 100644 index 000000000..1e7b886e7 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-553 @@ -0,0 +1 @@ +a.{foo: bar} \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-554 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-554 new file mode 100644 index 000000000..91b4c9896 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-554 @@ -0,0 +1 @@ +a.{foo: bar, baz: bam} \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-555 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-555 new file mode 100644 index 000000000..8301ef981 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-555 @@ -0,0 +1 @@ +{"\\":{" ":*}} \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-556 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-556 new file mode 100644 index 000000000..8f75cc913 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-556 @@ -0,0 +1 @@ +foo || bar \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-557 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-557 new file mode 100644 index 000000000..e5f122c56 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-557 @@ -0,0 +1 @@ +foo.[a || b] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-558 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-558 new file mode 100644 index 000000000..39d191432 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-558 @@ -0,0 +1 @@ +foo[?bar==`"baz"`] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-559 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-559 new file mode 100644 index 000000000..d08bbe250 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-559 @@ -0,0 +1 @@ +foo[? bar == `"baz"` ] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-56 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-56 new file mode 100644 index 000000000..addaf204c --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-56 @@ -0,0 +1 @@ +foo[?`1` == key] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-560 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-560 new file mode 100644 index 000000000..a77f35581 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-560 @@ -0,0 +1 @@ +foo[?a.b.c==d.e.f] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-561 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-561 new file mode 100644 index 000000000..c9697aa48 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-561 @@ -0,0 +1 @@ +foo[?bar==`[0, 1, 2]`] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-562 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-562 new file mode 100644 index 000000000..fd7064a08 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-562 @@ -0,0 +1 @@ +foo[?bar==`["a", "b", "c"]`] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-563 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-563 new file mode 100644 index 000000000..61e5e1b8f --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-563 @@ -0,0 +1 @@ +foo[?bar==`["foo\`bar"]`] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-564 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-564 new file mode 100644 index 000000000..bc9d8af1d --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-564 @@ -0,0 +1 @@ +[?"\\">`"foo"`] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-565 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-565 new file mode 100644 index 000000000..2dd54dc39 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-565 @@ -0,0 +1 @@ +[?"\\" > `"foo"`] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-566 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-566 new file mode 100644 index 000000000..191028156 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-566 @@ -0,0 +1 @@ +foo \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-567 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-567 new file mode 100644 index 000000000..7e9668e78 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-567 @@ -0,0 +1 @@ +"foo" \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-568 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-568 new file mode 100644 index 000000000..d58ac16bf --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-568 @@ -0,0 +1 @@ +"\\" \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-569 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-569 new file mode 100644 index 000000000..33ac9fba6 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-569 @@ -0,0 +1 @@ +*||*|*|* \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-57 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-57 new file mode 100644 index 000000000..acf2435c7 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-57 @@ -0,0 +1 @@ +foo[?`[0]` == key] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-570 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-570 new file mode 100644 index 000000000..99e19638c --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-570 @@ -0,0 +1 @@ +*[]||[*] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-571 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-571 new file mode 100644 index 000000000..be0845011 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-571 @@ -0,0 +1 @@ +[*.*] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-572 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-572 new file mode 100644 index 000000000..a84b51e1c --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-572 @@ -0,0 +1 @@ +foo[]."✓" \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-573 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-573 new file mode 100644 index 000000000..c2de55815 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-573 @@ -0,0 +1 @@ +"☯" \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-574 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-574 new file mode 100644 index 000000000..dc2dda0bb --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-574 @@ -0,0 +1 @@ +"♪♫•*¨*•.¸¸❤¸¸.•*¨*•♫♪" \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-575 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-575 new file mode 100644 index 000000000..a2d3d5f6a --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-575 @@ -0,0 +1 @@ +"☃" \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-576 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-576 new file mode 100644 index 000000000..0971c37ea --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-576 @@ -0,0 +1 @@ +foo.*.baz \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-577 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-577 new file mode 100644 index 000000000..0e39dfd69 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-577 @@ -0,0 +1 @@ +foo.bar.* \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-578 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-578 new file mode 100644 index 000000000..89c1ce22d --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-578 @@ -0,0 +1 @@ +foo.*.notbaz \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-579 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-579 new file mode 100644 index 000000000..5199b9f95 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-579 @@ -0,0 +1 @@ +foo.*.notbaz[0] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-58 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-58 new file mode 100644 index 000000000..99fe382c6 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-58 @@ -0,0 +1 @@ +foo[?`{"bar": [0]}` == key] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-580 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-580 new file mode 100644 index 000000000..5bb6d4ae7 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-580 @@ -0,0 +1 @@ +foo.*.notbaz[-1] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-581 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-581 new file mode 100644 index 000000000..edac73189 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-581 @@ -0,0 +1 @@ +foo.* \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-582 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-582 new file mode 100644 index 000000000..458d0a6dd --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-582 @@ -0,0 +1 @@ +foo.*.* \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-583 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-583 new file mode 100644 index 000000000..f757fd534 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-583 @@ -0,0 +1 @@ +foo.*.*.* \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-584 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-584 new file mode 100644 index 000000000..670049d96 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-584 @@ -0,0 +1 @@ +foo.*.*.*.* \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-585 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-585 new file mode 100644 index 000000000..3c88caafe --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-585 @@ -0,0 +1 @@ +*.bar \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-586 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-586 new file mode 100644 index 000000000..f59ec20aa --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-586 @@ -0,0 +1 @@ +* \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-587 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-587 new file mode 100644 index 000000000..0852fcc78 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-587 @@ -0,0 +1 @@ +*.sub1 \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-588 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-588 new file mode 100644 index 000000000..dee569574 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-588 @@ -0,0 +1 @@ +*.* \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-589 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-589 new file mode 100644 index 000000000..66781bba4 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-589 @@ -0,0 +1 @@ +*.*.foo \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-59 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-59 new file mode 100644 index 000000000..4aad20ae6 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-59 @@ -0,0 +1 @@ +foo[?`null` == key] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-590 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-590 new file mode 100644 index 000000000..0db15d97e --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-590 @@ -0,0 +1 @@ +*.sub1.foo \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-591 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-591 new file mode 100644 index 000000000..b24be9d7d --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-591 @@ -0,0 +1 @@ +foo[*].bar \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-592 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-592 new file mode 100644 index 000000000..e6efe133f --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-592 @@ -0,0 +1 @@ +foo[*].notbar \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-593 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-593 new file mode 100644 index 000000000..5a5194647 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-593 @@ -0,0 +1 @@ +[*] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-594 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-594 new file mode 100644 index 000000000..cd9fb6ba7 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-594 @@ -0,0 +1 @@ +[*].bar \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-595 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-595 new file mode 100644 index 000000000..cbf1a5d59 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-595 @@ -0,0 +1 @@ +[*].notbar \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-596 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-596 new file mode 100644 index 000000000..8bd13b7eb --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-596 @@ -0,0 +1 @@ +foo.bar[*].baz \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-597 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-597 new file mode 100644 index 000000000..7239f3e88 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-597 @@ -0,0 +1 @@ +foo.bar[*].baz[0] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-598 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-598 new file mode 100644 index 000000000..f5e431d9e --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-598 @@ -0,0 +1 @@ +foo.bar[*].baz[1] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-599 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-599 new file mode 100644 index 000000000..d0c259539 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-599 @@ -0,0 +1 @@ +foo.bar[*].baz[2] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-6 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-6 new file mode 100644 index 000000000..b9749b748 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-6 @@ -0,0 +1 @@ +foo.bad \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-60 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-60 new file mode 100644 index 000000000..dac67509b --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-60 @@ -0,0 +1 @@ +foo[?`[1]` == key] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-600 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-600 new file mode 100644 index 000000000..a6388271e --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-600 @@ -0,0 +1 @@ +foo.bar[*].baz[3] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-601 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-601 new file mode 100644 index 000000000..2a66ffe93 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-601 @@ -0,0 +1 @@ +foo.bar[*] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-602 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-602 new file mode 100644 index 000000000..b6b369543 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-602 @@ -0,0 +1 @@ +foo.bar[0] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-603 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-603 new file mode 100644 index 000000000..7e57f9e74 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-603 @@ -0,0 +1 @@ +foo.bar[0][0] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-604 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-604 new file mode 100644 index 000000000..c5f8bef0b --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-604 @@ -0,0 +1 @@ +foo.bar[0][0][0] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-605 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-605 new file mode 100644 index 000000000..3decf0803 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-605 @@ -0,0 +1 @@ +foo.bar[0][0][0][0] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-606 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-606 new file mode 100644 index 000000000..655e2959b --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-606 @@ -0,0 +1 @@ +foo[0][0] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-607 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-607 new file mode 100644 index 000000000..2aa159718 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-607 @@ -0,0 +1 @@ +foo[*].bar[*].kind \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-608 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-608 new file mode 100644 index 000000000..556b380ba --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-608 @@ -0,0 +1 @@ +foo[*].bar[0].kind \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-609 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-609 new file mode 100644 index 000000000..0de3229b8 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-609 @@ -0,0 +1 @@ +foo[*].bar.kind \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-61 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-61 new file mode 100644 index 000000000..130ed3b37 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-61 @@ -0,0 +1 @@ +foo[?`{"a":2}` == key] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-610 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-610 new file mode 100644 index 000000000..3b511f133 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-610 @@ -0,0 +1 @@ +foo[*].bar[0] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-611 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-611 new file mode 100644 index 000000000..c8dfa16e6 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-611 @@ -0,0 +1 @@ +foo[*].bar[1] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-612 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-612 new file mode 100644 index 000000000..69f04ee23 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-612 @@ -0,0 +1 @@ +foo[*].bar[2] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-613 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-613 new file mode 100644 index 000000000..3b511f133 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-613 @@ -0,0 +1 @@ +foo[*].bar[0] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-614 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-614 new file mode 100644 index 000000000..03e0c0cb9 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-614 @@ -0,0 +1 @@ +foo[*][0] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-615 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-615 new file mode 100644 index 000000000..ac1c89668 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-615 @@ -0,0 +1 @@ +foo[*][1] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-616 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-616 new file mode 100644 index 000000000..03e0c0cb9 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-616 @@ -0,0 +1 @@ +foo[*][0] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-617 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-617 new file mode 100644 index 000000000..ac1c89668 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-617 @@ -0,0 +1 @@ +foo[*][1] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-618 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-618 new file mode 100644 index 000000000..6494cf1c6 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-618 @@ -0,0 +1 @@ +foo[*][0][0] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-619 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-619 new file mode 100644 index 000000000..1406be572 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-619 @@ -0,0 +1 @@ +foo[*][1][0] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-62 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-62 new file mode 100644 index 000000000..3d15fcc16 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-62 @@ -0,0 +1 @@ +foo[?key != `true`] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-620 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-620 new file mode 100644 index 000000000..72b5aa281 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-620 @@ -0,0 +1 @@ +foo[*][0][1] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-621 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-621 new file mode 100644 index 000000000..02a26491a --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-621 @@ -0,0 +1 @@ +foo[*][1][1] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-622 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-622 new file mode 100644 index 000000000..cb08037e2 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-622 @@ -0,0 +1 @@ +foo[*][2] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-623 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-623 new file mode 100644 index 000000000..91d695995 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-623 @@ -0,0 +1 @@ +foo[*][2][2] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-624 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-624 new file mode 100644 index 000000000..f40f261ad --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-624 @@ -0,0 +1 @@ +bar[*] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-625 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-625 new file mode 100644 index 000000000..03904b1de --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-625 @@ -0,0 +1 @@ +bar[*].baz[*] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-626 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-626 new file mode 100644 index 000000000..fd7c21c34 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-626 @@ -0,0 +1 @@ +string[*] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-627 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-627 new file mode 100644 index 000000000..d7ca4719a --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-627 @@ -0,0 +1 @@ +hash[*] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-628 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-628 new file mode 100644 index 000000000..b3ddffe3c --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-628 @@ -0,0 +1 @@ +number[*] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-629 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-629 new file mode 100644 index 000000000..c03cd39eb --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-629 @@ -0,0 +1 @@ +nullvalue[*] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-63 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-63 new file mode 100644 index 000000000..08731af69 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-63 @@ -0,0 +1 @@ +foo[?key != `false`] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-630 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-630 new file mode 100644 index 000000000..b3c40cd53 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-630 @@ -0,0 +1 @@ +string[*].foo \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-631 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-631 new file mode 100644 index 000000000..c5930d543 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-631 @@ -0,0 +1 @@ +hash[*].foo \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-632 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-632 new file mode 100644 index 000000000..cc0b1a489 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-632 @@ -0,0 +1 @@ +number[*].foo \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-633 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-633 new file mode 100644 index 000000000..d677b9658 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-633 @@ -0,0 +1 @@ +nullvalue[*].foo \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-634 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-634 new file mode 100644 index 000000000..c11666401 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-634 @@ -0,0 +1 @@ +nullvalue[*].foo[*].bar \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-635 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-635 new file mode 100644 index 000000000..e33997710 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-635 @@ -0,0 +1 @@ +string.* \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-636 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-636 new file mode 100644 index 000000000..76f53453a --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-636 @@ -0,0 +1 @@ +hash.* \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-637 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-637 new file mode 100644 index 000000000..dd485072f --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-637 @@ -0,0 +1 @@ +number.* \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-638 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-638 new file mode 100644 index 000000000..16000c003 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-638 @@ -0,0 +1 @@ +array.* \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-639 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-639 new file mode 100644 index 000000000..1d0d03ed3 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-639 @@ -0,0 +1 @@ +nullvalue.* \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-64 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-64 new file mode 100644 index 000000000..b67aebe98 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-64 @@ -0,0 +1 @@ +foo[?key != `0`] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-640 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-640 new file mode 100644 index 000000000..7e8066d39 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-640 @@ -0,0 +1 @@ +*[0] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-641 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-641 new file mode 100644 index 000000000..41ebe5ba9 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-641 @@ -0,0 +1 @@ +`foo` \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-642 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-642 new file mode 100644 index 000000000..fe0397993 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-642 @@ -0,0 +1 @@ +`foo\"quote` \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-643 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-643 new file mode 100644 index 000000000..1a27fd80c --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-643 @@ -0,0 +1 @@ +`✓` \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-644 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-644 new file mode 100644 index 000000000..559a13456 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-644 @@ -0,0 +1 @@ +`foo\"bar` \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-645 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-645 new file mode 100644 index 000000000..e31621b43 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-645 @@ -0,0 +1 @@ +`1\`` \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-646 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-646 new file mode 100644 index 000000000..6bf7a1036 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-646 @@ -0,0 +1 @@ +`\\`.{a:`b`} \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-647 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-647 new file mode 100644 index 000000000..41ebe5ba9 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-647 @@ -0,0 +1 @@ +`foo` \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-648 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-648 new file mode 100644 index 000000000..28b9bcbbb --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-648 @@ -0,0 +1 @@ +` foo` \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-649 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-649 new file mode 100644 index 000000000..41ebe5ba9 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-649 @@ -0,0 +1 @@ +`foo` \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-65 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-65 new file mode 100644 index 000000000..d3ac793bb --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-65 @@ -0,0 +1 @@ +foo[?key != `1`] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-650 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-650 new file mode 100644 index 000000000..fe0397993 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-650 @@ -0,0 +1 @@ +`foo\"quote` \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-651 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-651 new file mode 100644 index 000000000..1a27fd80c --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-651 @@ -0,0 +1 @@ +`✓` \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-652 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-652 new file mode 100644 index 000000000..559a13456 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-652 @@ -0,0 +1 @@ +`foo\"bar` \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-653 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-653 new file mode 100644 index 000000000..e31621b43 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-653 @@ -0,0 +1 @@ +`1\`` \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-654 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-654 new file mode 100644 index 000000000..6bf7a1036 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-654 @@ -0,0 +1 @@ +`\\`.{a:`b`} \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-655 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-655 new file mode 100644 index 000000000..41ebe5ba9 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-655 @@ -0,0 +1 @@ +`foo` \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-656 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-656 new file mode 100644 index 000000000..28b9bcbbb --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-656 @@ -0,0 +1 @@ +` foo` \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-66 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-66 new file mode 100644 index 000000000..065295bc1 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-66 @@ -0,0 +1 @@ +foo[?key != `null`] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-67 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-67 new file mode 100644 index 000000000..43d164927 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-67 @@ -0,0 +1 @@ +foo[?key != `[1]`] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-68 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-68 new file mode 100644 index 000000000..6b884fa86 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-68 @@ -0,0 +1 @@ +foo[?key != `{"a":2}`] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-69 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-69 new file mode 100644 index 000000000..d85c779d0 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-69 @@ -0,0 +1 @@ +foo[?`true` != key] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-7 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-7 new file mode 100644 index 000000000..44d6628cd --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-7 @@ -0,0 +1 @@ +bad \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-70 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-70 new file mode 100644 index 000000000..3e6dcf304 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-70 @@ -0,0 +1 @@ +foo[?`false` != key] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-71 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-71 new file mode 100644 index 000000000..bdb820b30 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-71 @@ -0,0 +1 @@ +foo[?`0` != key] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-72 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-72 new file mode 100644 index 000000000..3f3048a00 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-72 @@ -0,0 +1 @@ +foo[?`1` != key] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-73 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-73 new file mode 100644 index 000000000..dacc25724 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-73 @@ -0,0 +1 @@ +foo[?`null` != key] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-74 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-74 new file mode 100644 index 000000000..32ebae880 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-74 @@ -0,0 +1 @@ +foo[?`[1]` != key] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-75 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-75 new file mode 100644 index 000000000..dcd023e0f --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-75 @@ -0,0 +1 @@ +foo[?`{"a":2}` != key] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-76 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-76 new file mode 100644 index 000000000..e08cc13cb --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-76 @@ -0,0 +1 @@ +reservations[].instances[?bar==`1`] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-77 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-77 new file mode 100644 index 000000000..1ec43f45f --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-77 @@ -0,0 +1 @@ +reservations[*].instances[?bar==`1`] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-78 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-78 new file mode 100644 index 000000000..303871163 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-78 @@ -0,0 +1 @@ +reservations[].instances[?bar==`1`][] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-79 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-79 new file mode 100644 index 000000000..e3875746b --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-79 @@ -0,0 +1 @@ +foo[?bar==`1`].bar[0] \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-8 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-8 new file mode 100644 index 000000000..da7bc1ccf --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-8 @@ -0,0 +1 @@ +bad.morebad.morebad \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-80 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-80 new file mode 100644 index 000000000..5c3d68356 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-80 @@ -0,0 +1 @@ +foo[?a==`1`].b.c \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-81 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-81 new file mode 100644 index 000000000..6232808f0 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-81 @@ -0,0 +1 @@ +abs(foo) \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-82 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-82 new file mode 100644 index 000000000..6232808f0 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-82 @@ -0,0 +1 @@ +abs(foo) \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-83 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-83 new file mode 100644 index 000000000..29497f4ff --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-83 @@ -0,0 +1 @@ +abs(array[1]) \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-84 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-84 new file mode 100644 index 000000000..29497f4ff --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-84 @@ -0,0 +1 @@ +abs(array[1]) \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-85 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-85 new file mode 100644 index 000000000..346696563 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-85 @@ -0,0 +1 @@ +abs(`-24`) \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-86 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-86 new file mode 100644 index 000000000..346696563 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-86 @@ -0,0 +1 @@ +abs(`-24`) \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-87 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-87 new file mode 100644 index 000000000..c6268f847 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-87 @@ -0,0 +1 @@ +avg(numbers) \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-88 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-88 new file mode 100644 index 000000000..7ce703695 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-88 @@ -0,0 +1 @@ +ceil(`1.2`) \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-89 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-89 new file mode 100644 index 000000000..0561bc26d --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-89 @@ -0,0 +1 @@ +ceil(decimals[0]) \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-9 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-9 new file mode 100644 index 000000000..191028156 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-9 @@ -0,0 +1 @@ +foo \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-90 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-90 new file mode 100644 index 000000000..c78c1fc30 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-90 @@ -0,0 +1 @@ +ceil(decimals[1]) \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-91 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-91 new file mode 100644 index 000000000..ebcb4bbdb --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-91 @@ -0,0 +1 @@ +ceil(decimals[2]) \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-92 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-92 new file mode 100644 index 000000000..6edbf1afe --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-92 @@ -0,0 +1 @@ +contains('abc', 'a') \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-93 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-93 new file mode 100644 index 000000000..d2b2f070d --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-93 @@ -0,0 +1 @@ +contains('abc', 'd') \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-94 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-94 new file mode 100644 index 000000000..3535da2ec --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-94 @@ -0,0 +1 @@ +contains(strings, 'a') \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-95 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-95 new file mode 100644 index 000000000..ba839fe60 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-95 @@ -0,0 +1 @@ +contains(decimals, `1.2`) \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-96 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-96 new file mode 100644 index 000000000..f43581869 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-96 @@ -0,0 +1 @@ +contains(decimals, `false`) \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-97 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-97 new file mode 100644 index 000000000..adb65fc01 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-97 @@ -0,0 +1 @@ +ends_with(str, 'r') \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-98 b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-98 new file mode 100644 index 000000000..93d6901be --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/fuzz/testdata/expr-98 @@ -0,0 +1 @@ +ends_with(str, 'tr') \ No newline at end of file diff --git a/vendor/github.com/jmespath/go-jmespath/interpreter.go b/vendor/github.com/jmespath/go-jmespath/interpreter.go new file mode 100644 index 000000000..13c74604c --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/interpreter.go @@ -0,0 +1,418 @@ +package jmespath + +import ( + "errors" + "reflect" + "unicode" + "unicode/utf8" +) + +/* This is a tree based interpreter. It walks the AST and directly + interprets the AST to search through a JSON document. +*/ + +type treeInterpreter struct { + fCall *functionCaller +} + +func newInterpreter() *treeInterpreter { + interpreter := treeInterpreter{} + interpreter.fCall = newFunctionCaller() + return &interpreter +} + +type expRef struct { + ref ASTNode +} + +// Execute takes an ASTNode and input data and interprets the AST directly. +// It will produce the result of applying the JMESPath expression associated +// with the ASTNode to the input data "value". +func (intr *treeInterpreter) Execute(node ASTNode, value interface{}) (interface{}, error) { + switch node.nodeType { + case ASTComparator: + left, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, err + } + right, err := intr.Execute(node.children[1], value) + if err != nil { + return nil, err + } + switch node.value { + case tEQ: + return objsEqual(left, right), nil + case tNE: + return !objsEqual(left, right), nil + } + leftNum, ok := left.(float64) + if !ok { + return nil, nil + } + rightNum, ok := right.(float64) + if !ok { + return nil, nil + } + switch node.value { + case tGT: + return leftNum > rightNum, nil + case tGTE: + return leftNum >= rightNum, nil + case tLT: + return leftNum < rightNum, nil + case tLTE: + return leftNum <= rightNum, nil + } + case ASTExpRef: + return expRef{ref: node.children[0]}, nil + case ASTFunctionExpression: + resolvedArgs := []interface{}{} + for _, arg := range node.children { + current, err := intr.Execute(arg, value) + if err != nil { + return nil, err + } + resolvedArgs = append(resolvedArgs, current) + } + return intr.fCall.CallFunction(node.value.(string), resolvedArgs, intr) + case ASTField: + if m, ok := value.(map[string]interface{}); ok { + key := node.value.(string) + return m[key], nil + } + return intr.fieldFromStruct(node.value.(string), value) + case ASTFilterProjection: + left, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, nil + } + sliceType, ok := left.([]interface{}) + if !ok { + if isSliceType(left) { + return intr.filterProjectionWithReflection(node, left) + } + return nil, nil + } + compareNode := node.children[2] + collected := []interface{}{} + for _, element := range sliceType { + result, err := intr.Execute(compareNode, element) + if err != nil { + return nil, err + } + if !isFalse(result) { + current, err := intr.Execute(node.children[1], element) + if err != nil { + return nil, err + } + if current != nil { + collected = append(collected, current) + } + } + } + return collected, nil + case ASTFlatten: + left, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, nil + } + sliceType, ok := left.([]interface{}) + if !ok { + // If we can't type convert to []interface{}, there's + // a chance this could still work via reflection if we're + // dealing with user provided types. + if isSliceType(left) { + return intr.flattenWithReflection(left) + } + return nil, nil + } + flattened := []interface{}{} + for _, element := range sliceType { + if elementSlice, ok := element.([]interface{}); ok { + flattened = append(flattened, elementSlice...) + } else if isSliceType(element) { + reflectFlat := []interface{}{} + v := reflect.ValueOf(element) + for i := 0; i < v.Len(); i++ { + reflectFlat = append(reflectFlat, v.Index(i).Interface()) + } + flattened = append(flattened, reflectFlat...) + } else { + flattened = append(flattened, element) + } + } + return flattened, nil + case ASTIdentity, ASTCurrentNode: + return value, nil + case ASTIndex: + if sliceType, ok := value.([]interface{}); ok { + index := node.value.(int) + if index < 0 { + index += len(sliceType) + } + if index < len(sliceType) && index >= 0 { + return sliceType[index], nil + } + return nil, nil + } + // Otherwise try via reflection. + rv := reflect.ValueOf(value) + if rv.Kind() == reflect.Slice { + index := node.value.(int) + if index < 0 { + index += rv.Len() + } + if index < rv.Len() && index >= 0 { + v := rv.Index(index) + return v.Interface(), nil + } + } + return nil, nil + case ASTKeyValPair: + return intr.Execute(node.children[0], value) + case ASTLiteral: + return node.value, nil + case ASTMultiSelectHash: + if value == nil { + return nil, nil + } + collected := make(map[string]interface{}) + for _, child := range node.children { + current, err := intr.Execute(child, value) + if err != nil { + return nil, err + } + key := child.value.(string) + collected[key] = current + } + return collected, nil + case ASTMultiSelectList: + if value == nil { + return nil, nil + } + collected := []interface{}{} + for _, child := range node.children { + current, err := intr.Execute(child, value) + if err != nil { + return nil, err + } + collected = append(collected, current) + } + return collected, nil + case ASTOrExpression: + matched, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, err + } + if isFalse(matched) { + matched, err = intr.Execute(node.children[1], value) + if err != nil { + return nil, err + } + } + return matched, nil + case ASTAndExpression: + matched, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, err + } + if isFalse(matched) { + return matched, nil + } + return intr.Execute(node.children[1], value) + case ASTNotExpression: + matched, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, err + } + if isFalse(matched) { + return true, nil + } + return false, nil + case ASTPipe: + result := value + var err error + for _, child := range node.children { + result, err = intr.Execute(child, result) + if err != nil { + return nil, err + } + } + return result, nil + case ASTProjection: + left, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, err + } + sliceType, ok := left.([]interface{}) + if !ok { + if isSliceType(left) { + return intr.projectWithReflection(node, left) + } + return nil, nil + } + collected := []interface{}{} + var current interface{} + for _, element := range sliceType { + current, err = intr.Execute(node.children[1], element) + if err != nil { + return nil, err + } + if current != nil { + collected = append(collected, current) + } + } + return collected, nil + case ASTSubexpression, ASTIndexExpression: + left, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, err + } + return intr.Execute(node.children[1], left) + case ASTSlice: + sliceType, ok := value.([]interface{}) + if !ok { + if isSliceType(value) { + return intr.sliceWithReflection(node, value) + } + return nil, nil + } + parts := node.value.([]*int) + sliceParams := make([]sliceParam, 3) + for i, part := range parts { + if part != nil { + sliceParams[i].Specified = true + sliceParams[i].N = *part + } + } + return slice(sliceType, sliceParams) + case ASTValueProjection: + left, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, nil + } + mapType, ok := left.(map[string]interface{}) + if !ok { + return nil, nil + } + values := make([]interface{}, len(mapType)) + for _, value := range mapType { + values = append(values, value) + } + collected := []interface{}{} + for _, element := range values { + current, err := intr.Execute(node.children[1], element) + if err != nil { + return nil, err + } + if current != nil { + collected = append(collected, current) + } + } + return collected, nil + } + return nil, errors.New("Unknown AST node: " + node.nodeType.String()) +} + +func (intr *treeInterpreter) fieldFromStruct(key string, value interface{}) (interface{}, error) { + rv := reflect.ValueOf(value) + first, n := utf8.DecodeRuneInString(key) + fieldName := string(unicode.ToUpper(first)) + key[n:] + if rv.Kind() == reflect.Struct { + v := rv.FieldByName(fieldName) + if !v.IsValid() { + return nil, nil + } + return v.Interface(), nil + } else if rv.Kind() == reflect.Ptr { + // Handle multiple levels of indirection? + if rv.IsNil() { + return nil, nil + } + rv = rv.Elem() + v := rv.FieldByName(fieldName) + if !v.IsValid() { + return nil, nil + } + return v.Interface(), nil + } + return nil, nil +} + +func (intr *treeInterpreter) flattenWithReflection(value interface{}) (interface{}, error) { + v := reflect.ValueOf(value) + flattened := []interface{}{} + for i := 0; i < v.Len(); i++ { + element := v.Index(i).Interface() + if reflect.TypeOf(element).Kind() == reflect.Slice { + // Then insert the contents of the element + // slice into the flattened slice, + // i.e flattened = append(flattened, mySlice...) + elementV := reflect.ValueOf(element) + for j := 0; j < elementV.Len(); j++ { + flattened = append( + flattened, elementV.Index(j).Interface()) + } + } else { + flattened = append(flattened, element) + } + } + return flattened, nil +} + +func (intr *treeInterpreter) sliceWithReflection(node ASTNode, value interface{}) (interface{}, error) { + v := reflect.ValueOf(value) + parts := node.value.([]*int) + sliceParams := make([]sliceParam, 3) + for i, part := range parts { + if part != nil { + sliceParams[i].Specified = true + sliceParams[i].N = *part + } + } + final := []interface{}{} + for i := 0; i < v.Len(); i++ { + element := v.Index(i).Interface() + final = append(final, element) + } + return slice(final, sliceParams) +} + +func (intr *treeInterpreter) filterProjectionWithReflection(node ASTNode, value interface{}) (interface{}, error) { + compareNode := node.children[2] + collected := []interface{}{} + v := reflect.ValueOf(value) + for i := 0; i < v.Len(); i++ { + element := v.Index(i).Interface() + result, err := intr.Execute(compareNode, element) + if err != nil { + return nil, err + } + if !isFalse(result) { + current, err := intr.Execute(node.children[1], element) + if err != nil { + return nil, err + } + if current != nil { + collected = append(collected, current) + } + } + } + return collected, nil +} + +func (intr *treeInterpreter) projectWithReflection(node ASTNode, value interface{}) (interface{}, error) { + collected := []interface{}{} + v := reflect.ValueOf(value) + for i := 0; i < v.Len(); i++ { + element := v.Index(i).Interface() + result, err := intr.Execute(node.children[1], element) + if err != nil { + return nil, err + } + if result != nil { + collected = append(collected, result) + } + } + return collected, nil +} diff --git a/vendor/github.com/jmespath/go-jmespath/interpreter_test.go b/vendor/github.com/jmespath/go-jmespath/interpreter_test.go new file mode 100644 index 000000000..11c6d0aa0 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/interpreter_test.go @@ -0,0 +1,221 @@ +package jmespath + +import ( + "encoding/json" + "testing" + + "github.com/stretchr/testify/assert" +) + +type scalars struct { + Foo string + Bar string +} + +type sliceType struct { + A string + B []scalars + C []*scalars +} + +type benchmarkStruct struct { + Fooasdfasdfasdfasdf string +} + +type benchmarkNested struct { + Fooasdfasdfasdfasdf nestedA +} + +type nestedA struct { + Fooasdfasdfasdfasdf nestedB +} + +type nestedB struct { + Fooasdfasdfasdfasdf nestedC +} + +type nestedC struct { + Fooasdfasdfasdfasdf string +} + +type nestedSlice struct { + A []sliceType +} + +func TestCanSupportEmptyInterface(t *testing.T) { + assert := assert.New(t) + data := make(map[string]interface{}) + data["foo"] = "bar" + result, err := Search("foo", data) + assert.Nil(err) + assert.Equal("bar", result) +} + +func TestCanSupportUserDefinedStructsValue(t *testing.T) { + assert := assert.New(t) + s := scalars{Foo: "one", Bar: "bar"} + result, err := Search("Foo", s) + assert.Nil(err) + assert.Equal("one", result) +} + +func TestCanSupportUserDefinedStructsRef(t *testing.T) { + assert := assert.New(t) + s := scalars{Foo: "one", Bar: "bar"} + result, err := Search("Foo", &s) + assert.Nil(err) + assert.Equal("one", result) +} + +func TestCanSupportStructWithSliceAll(t *testing.T) { + assert := assert.New(t) + data := sliceType{A: "foo", B: []scalars{{"f1", "b1"}, {"correct", "b2"}}} + result, err := Search("B[].Foo", data) + assert.Nil(err) + assert.Equal([]interface{}{"f1", "correct"}, result) +} + +func TestCanSupportStructWithSlicingExpression(t *testing.T) { + assert := assert.New(t) + data := sliceType{A: "foo", B: []scalars{{"f1", "b1"}, {"correct", "b2"}}} + result, err := Search("B[:].Foo", data) + assert.Nil(err) + assert.Equal([]interface{}{"f1", "correct"}, result) +} + +func TestCanSupportStructWithFilterProjection(t *testing.T) { + assert := assert.New(t) + data := sliceType{A: "foo", B: []scalars{{"f1", "b1"}, {"correct", "b2"}}} + result, err := Search("B[? `true` ].Foo", data) + assert.Nil(err) + assert.Equal([]interface{}{"f1", "correct"}, result) +} + +func TestCanSupportStructWithSlice(t *testing.T) { + assert := assert.New(t) + data := sliceType{A: "foo", B: []scalars{{"f1", "b1"}, {"correct", "b2"}}} + result, err := Search("B[-1].Foo", data) + assert.Nil(err) + assert.Equal("correct", result) +} + +func TestCanSupportStructWithOrExpressions(t *testing.T) { + assert := assert.New(t) + data := sliceType{A: "foo", C: nil} + result, err := Search("C || A", data) + assert.Nil(err) + assert.Equal("foo", result) +} + +func TestCanSupportStructWithSlicePointer(t *testing.T) { + assert := assert.New(t) + data := sliceType{A: "foo", C: []*scalars{{"f1", "b1"}, {"correct", "b2"}}} + result, err := Search("C[-1].Foo", data) + assert.Nil(err) + assert.Equal("correct", result) +} + +func TestWillAutomaticallyCapitalizeFieldNames(t *testing.T) { + assert := assert.New(t) + s := scalars{Foo: "one", Bar: "bar"} + // Note that there's a lower cased "foo" instead of "Foo", + // but it should still correspond to the Foo field in the + // scalars struct + result, err := Search("foo", &s) + assert.Nil(err) + assert.Equal("one", result) +} + +func TestCanSupportStructWithSliceLowerCased(t *testing.T) { + assert := assert.New(t) + data := sliceType{A: "foo", B: []scalars{{"f1", "b1"}, {"correct", "b2"}}} + result, err := Search("b[-1].foo", data) + assert.Nil(err) + assert.Equal("correct", result) +} + +func TestCanSupportStructWithNestedPointers(t *testing.T) { + assert := assert.New(t) + data := struct{ A *struct{ B int } }{} + result, err := Search("A.B", data) + assert.Nil(err) + assert.Nil(result) +} + +func TestCanSupportFlattenNestedSlice(t *testing.T) { + assert := assert.New(t) + data := nestedSlice{A: []sliceType{ + {B: []scalars{{Foo: "f1a"}, {Foo: "f1b"}}}, + {B: []scalars{{Foo: "f2a"}, {Foo: "f2b"}}}, + }} + result, err := Search("A[].B[].Foo", data) + assert.Nil(err) + assert.Equal([]interface{}{"f1a", "f1b", "f2a", "f2b"}, result) +} + +func TestCanSupportFlattenNestedEmptySlice(t *testing.T) { + assert := assert.New(t) + data := nestedSlice{A: []sliceType{ + {}, {B: []scalars{{Foo: "a"}}}, + }} + result, err := Search("A[].B[].Foo", data) + assert.Nil(err) + assert.Equal([]interface{}{"a"}, result) +} + +func TestCanSupportProjectionsWithStructs(t *testing.T) { + assert := assert.New(t) + data := nestedSlice{A: []sliceType{ + {A: "first"}, {A: "second"}, {A: "third"}, + }} + result, err := Search("A[*].A", data) + assert.Nil(err) + assert.Equal([]interface{}{"first", "second", "third"}, result) +} + +func TestCanSupportSliceOfStructsWithFunctions(t *testing.T) { + assert := assert.New(t) + data := []scalars{scalars{"a1", "b1"}, scalars{"a2", "b2"}} + result, err := Search("length(@)", data) + assert.Nil(err) + assert.Equal(result.(float64), 2.0) +} + +func BenchmarkInterpretSingleFieldStruct(b *testing.B) { + intr := newInterpreter() + parser := NewParser() + ast, _ := parser.Parse("fooasdfasdfasdfasdf") + data := benchmarkStruct{"foobarbazqux"} + for i := 0; i < b.N; i++ { + intr.Execute(ast, &data) + } +} + +func BenchmarkInterpretNestedStruct(b *testing.B) { + intr := newInterpreter() + parser := NewParser() + ast, _ := parser.Parse("fooasdfasdfasdfasdf.fooasdfasdfasdfasdf.fooasdfasdfasdfasdf.fooasdfasdfasdfasdf") + data := benchmarkNested{ + nestedA{ + nestedB{ + nestedC{"foobarbazqux"}, + }, + }, + } + for i := 0; i < b.N; i++ { + intr.Execute(ast, &data) + } +} + +func BenchmarkInterpretNestedMaps(b *testing.B) { + jsonData := []byte(`{"fooasdfasdfasdfasdf": {"fooasdfasdfasdfasdf": {"fooasdfasdfasdfasdf": {"fooasdfasdfasdfasdf": "foobarbazqux"}}}}`) + var data interface{} + json.Unmarshal(jsonData, &data) + + intr := newInterpreter() + parser := NewParser() + ast, _ := parser.Parse("fooasdfasdfasdfasdf.fooasdfasdfasdfasdf.fooasdfasdfasdfasdf.fooasdfasdfasdfasdf") + for i := 0; i < b.N; i++ { + intr.Execute(ast, data) + } +} diff --git a/vendor/github.com/jmespath/go-jmespath/lexer.go b/vendor/github.com/jmespath/go-jmespath/lexer.go new file mode 100644 index 000000000..817900c8f --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/lexer.go @@ -0,0 +1,420 @@ +package jmespath + +import ( + "bytes" + "encoding/json" + "fmt" + "strconv" + "strings" + "unicode/utf8" +) + +type token struct { + tokenType tokType + value string + position int + length int +} + +type tokType int + +const eof = -1 + +// Lexer contains information about the expression being tokenized. +type Lexer struct { + expression string // The expression provided by the user. + currentPos int // The current position in the string. + lastWidth int // The width of the current rune. This + buf bytes.Buffer // Internal buffer used for building up values. +} + +// SyntaxError is the main error used whenever a lexing or parsing error occurs. +type SyntaxError struct { + msg string // Error message displayed to user + Expression string // Expression that generated a SyntaxError + Offset int // The location in the string where the error occurred +} + +func (e SyntaxError) Error() string { + // In the future, it would be good to underline the specific + // location where the error occurred. + return "SyntaxError: " + e.msg +} + +// HighlightLocation will show where the syntax error occurred. +// It will place a "^" character on a line below the expression +// at the point where the syntax error occurred. +func (e SyntaxError) HighlightLocation() string { + return e.Expression + "\n" + strings.Repeat(" ", e.Offset) + "^" +} + +//go:generate stringer -type=tokType +const ( + tUnknown tokType = iota + tStar + tDot + tFilter + tFlatten + tLparen + tRparen + tLbracket + tRbracket + tLbrace + tRbrace + tOr + tPipe + tNumber + tUnquotedIdentifier + tQuotedIdentifier + tComma + tColon + tLT + tLTE + tGT + tGTE + tEQ + tNE + tJSONLiteral + tStringLiteral + tCurrent + tExpref + tAnd + tNot + tEOF +) + +var basicTokens = map[rune]tokType{ + '.': tDot, + '*': tStar, + ',': tComma, + ':': tColon, + '{': tLbrace, + '}': tRbrace, + ']': tRbracket, // tLbracket not included because it could be "[]" + '(': tLparen, + ')': tRparen, + '@': tCurrent, +} + +// Bit mask for [a-zA-Z_] shifted down 64 bits to fit in a single uint64. +// When using this bitmask just be sure to shift the rune down 64 bits +// before checking against identifierStartBits. +const identifierStartBits uint64 = 576460745995190270 + +// Bit mask for [a-zA-Z0-9], 128 bits -> 2 uint64s. +var identifierTrailingBits = [2]uint64{287948901175001088, 576460745995190270} + +var whiteSpace = map[rune]bool{ + ' ': true, '\t': true, '\n': true, '\r': true, +} + +func (t token) String() string { + return fmt.Sprintf("Token{%+v, %s, %d, %d}", + t.tokenType, t.value, t.position, t.length) +} + +// NewLexer creates a new JMESPath lexer. +func NewLexer() *Lexer { + lexer := Lexer{} + return &lexer +} + +func (lexer *Lexer) next() rune { + if lexer.currentPos >= len(lexer.expression) { + lexer.lastWidth = 0 + return eof + } + r, w := utf8.DecodeRuneInString(lexer.expression[lexer.currentPos:]) + lexer.lastWidth = w + lexer.currentPos += w + return r +} + +func (lexer *Lexer) back() { + lexer.currentPos -= lexer.lastWidth +} + +func (lexer *Lexer) peek() rune { + t := lexer.next() + lexer.back() + return t +} + +// tokenize takes an expression and returns corresponding tokens. +func (lexer *Lexer) tokenize(expression string) ([]token, error) { + var tokens []token + lexer.expression = expression + lexer.currentPos = 0 + lexer.lastWidth = 0 +loop: + for { + r := lexer.next() + if identifierStartBits&(1<<(uint64(r)-64)) > 0 { + t := lexer.consumeUnquotedIdentifier() + tokens = append(tokens, t) + } else if val, ok := basicTokens[r]; ok { + // Basic single char token. + t := token{ + tokenType: val, + value: string(r), + position: lexer.currentPos - lexer.lastWidth, + length: 1, + } + tokens = append(tokens, t) + } else if r == '-' || (r >= '0' && r <= '9') { + t := lexer.consumeNumber() + tokens = append(tokens, t) + } else if r == '[' { + t := lexer.consumeLBracket() + tokens = append(tokens, t) + } else if r == '"' { + t, err := lexer.consumeQuotedIdentifier() + if err != nil { + return tokens, err + } + tokens = append(tokens, t) + } else if r == '\'' { + t, err := lexer.consumeRawStringLiteral() + if err != nil { + return tokens, err + } + tokens = append(tokens, t) + } else if r == '`' { + t, err := lexer.consumeLiteral() + if err != nil { + return tokens, err + } + tokens = append(tokens, t) + } else if r == '|' { + t := lexer.matchOrElse(r, '|', tOr, tPipe) + tokens = append(tokens, t) + } else if r == '<' { + t := lexer.matchOrElse(r, '=', tLTE, tLT) + tokens = append(tokens, t) + } else if r == '>' { + t := lexer.matchOrElse(r, '=', tGTE, tGT) + tokens = append(tokens, t) + } else if r == '!' { + t := lexer.matchOrElse(r, '=', tNE, tNot) + tokens = append(tokens, t) + } else if r == '=' { + t := lexer.matchOrElse(r, '=', tEQ, tUnknown) + tokens = append(tokens, t) + } else if r == '&' { + t := lexer.matchOrElse(r, '&', tAnd, tExpref) + tokens = append(tokens, t) + } else if r == eof { + break loop + } else if _, ok := whiteSpace[r]; ok { + // Ignore whitespace + } else { + return tokens, lexer.syntaxError(fmt.Sprintf("Unknown char: %s", strconv.QuoteRuneToASCII(r))) + } + } + tokens = append(tokens, token{tEOF, "", len(lexer.expression), 0}) + return tokens, nil +} + +// Consume characters until the ending rune "r" is reached. +// If the end of the expression is reached before seeing the +// terminating rune "r", then an error is returned. +// If no error occurs then the matching substring is returned. +// The returned string will not include the ending rune. +func (lexer *Lexer) consumeUntil(end rune) (string, error) { + start := lexer.currentPos + current := lexer.next() + for current != end && current != eof { + if current == '\\' && lexer.peek() != eof { + lexer.next() + } + current = lexer.next() + } + if lexer.lastWidth == 0 { + // Then we hit an EOF so we never reached the closing + // delimiter. + return "", SyntaxError{ + msg: "Unclosed delimiter: " + string(end), + Expression: lexer.expression, + Offset: len(lexer.expression), + } + } + return lexer.expression[start : lexer.currentPos-lexer.lastWidth], nil +} + +func (lexer *Lexer) consumeLiteral() (token, error) { + start := lexer.currentPos + value, err := lexer.consumeUntil('`') + if err != nil { + return token{}, err + } + value = strings.Replace(value, "\\`", "`", -1) + return token{ + tokenType: tJSONLiteral, + value: value, + position: start, + length: len(value), + }, nil +} + +func (lexer *Lexer) consumeRawStringLiteral() (token, error) { + start := lexer.currentPos + currentIndex := start + current := lexer.next() + for current != '\'' && lexer.peek() != eof { + if current == '\\' && lexer.peek() == '\'' { + chunk := lexer.expression[currentIndex : lexer.currentPos-1] + lexer.buf.WriteString(chunk) + lexer.buf.WriteString("'") + lexer.next() + currentIndex = lexer.currentPos + } + current = lexer.next() + } + if lexer.lastWidth == 0 { + // Then we hit an EOF so we never reached the closing + // delimiter. + return token{}, SyntaxError{ + msg: "Unclosed delimiter: '", + Expression: lexer.expression, + Offset: len(lexer.expression), + } + } + if currentIndex < lexer.currentPos { + lexer.buf.WriteString(lexer.expression[currentIndex : lexer.currentPos-1]) + } + value := lexer.buf.String() + // Reset the buffer so it can reused again. + lexer.buf.Reset() + return token{ + tokenType: tStringLiteral, + value: value, + position: start, + length: len(value), + }, nil +} + +func (lexer *Lexer) syntaxError(msg string) SyntaxError { + return SyntaxError{ + msg: msg, + Expression: lexer.expression, + Offset: lexer.currentPos - 1, + } +} + +// Checks for a two char token, otherwise matches a single character +// token. This is used whenever a two char token overlaps a single +// char token, e.g. "||" -> tPipe, "|" -> tOr. +func (lexer *Lexer) matchOrElse(first rune, second rune, matchedType tokType, singleCharType tokType) token { + start := lexer.currentPos - lexer.lastWidth + nextRune := lexer.next() + var t token + if nextRune == second { + t = token{ + tokenType: matchedType, + value: string(first) + string(second), + position: start, + length: 2, + } + } else { + lexer.back() + t = token{ + tokenType: singleCharType, + value: string(first), + position: start, + length: 1, + } + } + return t +} + +func (lexer *Lexer) consumeLBracket() token { + // There's three options here: + // 1. A filter expression "[?" + // 2. A flatten operator "[]" + // 3. A bare rbracket "[" + start := lexer.currentPos - lexer.lastWidth + nextRune := lexer.next() + var t token + if nextRune == '?' { + t = token{ + tokenType: tFilter, + value: "[?", + position: start, + length: 2, + } + } else if nextRune == ']' { + t = token{ + tokenType: tFlatten, + value: "[]", + position: start, + length: 2, + } + } else { + t = token{ + tokenType: tLbracket, + value: "[", + position: start, + length: 1, + } + lexer.back() + } + return t +} + +func (lexer *Lexer) consumeQuotedIdentifier() (token, error) { + start := lexer.currentPos + value, err := lexer.consumeUntil('"') + if err != nil { + return token{}, err + } + var decoded string + asJSON := []byte("\"" + value + "\"") + if err := json.Unmarshal([]byte(asJSON), &decoded); err != nil { + return token{}, err + } + return token{ + tokenType: tQuotedIdentifier, + value: decoded, + position: start - 1, + length: len(decoded), + }, nil +} + +func (lexer *Lexer) consumeUnquotedIdentifier() token { + // Consume runes until we reach the end of an unquoted + // identifier. + start := lexer.currentPos - lexer.lastWidth + for { + r := lexer.next() + if r < 0 || r > 128 || identifierTrailingBits[uint64(r)/64]&(1<<(uint64(r)%64)) == 0 { + lexer.back() + break + } + } + value := lexer.expression[start:lexer.currentPos] + return token{ + tokenType: tUnquotedIdentifier, + value: value, + position: start, + length: lexer.currentPos - start, + } +} + +func (lexer *Lexer) consumeNumber() token { + // Consume runes until we reach something that's not a number. + start := lexer.currentPos - lexer.lastWidth + for { + r := lexer.next() + if r < '0' || r > '9' { + lexer.back() + break + } + } + value := lexer.expression[start:lexer.currentPos] + return token{ + tokenType: tNumber, + value: value, + position: start, + length: lexer.currentPos - start, + } +} diff --git a/vendor/github.com/jmespath/go-jmespath/lexer_test.go b/vendor/github.com/jmespath/go-jmespath/lexer_test.go new file mode 100644 index 000000000..d13a042da --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/lexer_test.go @@ -0,0 +1,161 @@ +package jmespath + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/assert" +) + +var lexingTests = []struct { + expression string + expected []token +}{ + {"*", []token{{tStar, "*", 0, 1}}}, + {".", []token{{tDot, ".", 0, 1}}}, + {"[?", []token{{tFilter, "[?", 0, 2}}}, + {"[]", []token{{tFlatten, "[]", 0, 2}}}, + {"(", []token{{tLparen, "(", 0, 1}}}, + {")", []token{{tRparen, ")", 0, 1}}}, + {"[", []token{{tLbracket, "[", 0, 1}}}, + {"]", []token{{tRbracket, "]", 0, 1}}}, + {"{", []token{{tLbrace, "{", 0, 1}}}, + {"}", []token{{tRbrace, "}", 0, 1}}}, + {"||", []token{{tOr, "||", 0, 2}}}, + {"|", []token{{tPipe, "|", 0, 1}}}, + {"29", []token{{tNumber, "29", 0, 2}}}, + {"2", []token{{tNumber, "2", 0, 1}}}, + {"0", []token{{tNumber, "0", 0, 1}}}, + {"-20", []token{{tNumber, "-20", 0, 3}}}, + {"foo", []token{{tUnquotedIdentifier, "foo", 0, 3}}}, + {`"bar"`, []token{{tQuotedIdentifier, "bar", 0, 3}}}, + // Escaping the delimiter + {`"bar\"baz"`, []token{{tQuotedIdentifier, `bar"baz`, 0, 7}}}, + {",", []token{{tComma, ",", 0, 1}}}, + {":", []token{{tColon, ":", 0, 1}}}, + {"<", []token{{tLT, "<", 0, 1}}}, + {"<=", []token{{tLTE, "<=", 0, 2}}}, + {">", []token{{tGT, ">", 0, 1}}}, + {">=", []token{{tGTE, ">=", 0, 2}}}, + {"==", []token{{tEQ, "==", 0, 2}}}, + {"!=", []token{{tNE, "!=", 0, 2}}}, + {"`[0, 1, 2]`", []token{{tJSONLiteral, "[0, 1, 2]", 1, 9}}}, + {"'foo'", []token{{tStringLiteral, "foo", 1, 3}}}, + {"'a'", []token{{tStringLiteral, "a", 1, 1}}}, + {`'foo\'bar'`, []token{{tStringLiteral, "foo'bar", 1, 7}}}, + {"@", []token{{tCurrent, "@", 0, 1}}}, + {"&", []token{{tExpref, "&", 0, 1}}}, + // Quoted identifier unicode escape sequences + {`"\u2713"`, []token{{tQuotedIdentifier, "✓", 0, 3}}}, + {`"\\"`, []token{{tQuotedIdentifier, `\`, 0, 1}}}, + {"`\"foo\"`", []token{{tJSONLiteral, "\"foo\"", 1, 5}}}, + // Combinations of tokens. + {"foo.bar", []token{ + {tUnquotedIdentifier, "foo", 0, 3}, + {tDot, ".", 3, 1}, + {tUnquotedIdentifier, "bar", 4, 3}, + }}, + {"foo[0]", []token{ + {tUnquotedIdentifier, "foo", 0, 3}, + {tLbracket, "[", 3, 1}, + {tNumber, "0", 4, 1}, + {tRbracket, "]", 5, 1}, + }}, + {"foo[?a 0 { + output += fmt.Sprintf("%schildren: {\n", strings.Repeat(" ", nextIndent)) + childIndent := nextIndent + 2 + for _, elem := range node.children { + output += elem.PrettyPrint(childIndent) + } + } + output += fmt.Sprintf("%s}\n", spaces) + return output +} + +var bindingPowers = map[tokType]int{ + tEOF: 0, + tUnquotedIdentifier: 0, + tQuotedIdentifier: 0, + tRbracket: 0, + tRparen: 0, + tComma: 0, + tRbrace: 0, + tNumber: 0, + tCurrent: 0, + tExpref: 0, + tColon: 0, + tPipe: 1, + tOr: 2, + tAnd: 3, + tEQ: 5, + tLT: 5, + tLTE: 5, + tGT: 5, + tGTE: 5, + tNE: 5, + tFlatten: 9, + tStar: 20, + tFilter: 21, + tDot: 40, + tNot: 45, + tLbrace: 50, + tLbracket: 55, + tLparen: 60, +} + +// Parser holds state about the current expression being parsed. +type Parser struct { + expression string + tokens []token + index int +} + +// NewParser creates a new JMESPath parser. +func NewParser() *Parser { + p := Parser{} + return &p +} + +// Parse will compile a JMESPath expression. +func (p *Parser) Parse(expression string) (ASTNode, error) { + lexer := NewLexer() + p.expression = expression + p.index = 0 + tokens, err := lexer.tokenize(expression) + if err != nil { + return ASTNode{}, err + } + p.tokens = tokens + parsed, err := p.parseExpression(0) + if err != nil { + return ASTNode{}, err + } + if p.current() != tEOF { + return ASTNode{}, p.syntaxError(fmt.Sprintf( + "Unexpected token at the end of the expresssion: %s", p.current())) + } + return parsed, nil +} + +func (p *Parser) parseExpression(bindingPower int) (ASTNode, error) { + var err error + leftToken := p.lookaheadToken(0) + p.advance() + leftNode, err := p.nud(leftToken) + if err != nil { + return ASTNode{}, err + } + currentToken := p.current() + for bindingPower < bindingPowers[currentToken] { + p.advance() + leftNode, err = p.led(currentToken, leftNode) + if err != nil { + return ASTNode{}, err + } + currentToken = p.current() + } + return leftNode, nil +} + +func (p *Parser) parseIndexExpression() (ASTNode, error) { + if p.lookahead(0) == tColon || p.lookahead(1) == tColon { + return p.parseSliceExpression() + } + indexStr := p.lookaheadToken(0).value + parsedInt, err := strconv.Atoi(indexStr) + if err != nil { + return ASTNode{}, err + } + indexNode := ASTNode{nodeType: ASTIndex, value: parsedInt} + p.advance() + if err := p.match(tRbracket); err != nil { + return ASTNode{}, err + } + return indexNode, nil +} + +func (p *Parser) parseSliceExpression() (ASTNode, error) { + parts := []*int{nil, nil, nil} + index := 0 + current := p.current() + for current != tRbracket && index < 3 { + if current == tColon { + index++ + p.advance() + } else if current == tNumber { + parsedInt, err := strconv.Atoi(p.lookaheadToken(0).value) + if err != nil { + return ASTNode{}, err + } + parts[index] = &parsedInt + p.advance() + } else { + return ASTNode{}, p.syntaxError( + "Expected tColon or tNumber" + ", received: " + p.current().String()) + } + current = p.current() + } + if err := p.match(tRbracket); err != nil { + return ASTNode{}, err + } + return ASTNode{ + nodeType: ASTSlice, + value: parts, + }, nil +} + +func (p *Parser) match(tokenType tokType) error { + if p.current() == tokenType { + p.advance() + return nil + } + return p.syntaxError("Expected " + tokenType.String() + ", received: " + p.current().String()) +} + +func (p *Parser) led(tokenType tokType, node ASTNode) (ASTNode, error) { + switch tokenType { + case tDot: + if p.current() != tStar { + right, err := p.parseDotRHS(bindingPowers[tDot]) + return ASTNode{ + nodeType: ASTSubexpression, + children: []ASTNode{node, right}, + }, err + } + p.advance() + right, err := p.parseProjectionRHS(bindingPowers[tDot]) + return ASTNode{ + nodeType: ASTValueProjection, + children: []ASTNode{node, right}, + }, err + case tPipe: + right, err := p.parseExpression(bindingPowers[tPipe]) + return ASTNode{nodeType: ASTPipe, children: []ASTNode{node, right}}, err + case tOr: + right, err := p.parseExpression(bindingPowers[tOr]) + return ASTNode{nodeType: ASTOrExpression, children: []ASTNode{node, right}}, err + case tAnd: + right, err := p.parseExpression(bindingPowers[tAnd]) + return ASTNode{nodeType: ASTAndExpression, children: []ASTNode{node, right}}, err + case tLparen: + name := node.value + var args []ASTNode + for p.current() != tRparen { + expression, err := p.parseExpression(0) + if err != nil { + return ASTNode{}, err + } + if p.current() == tComma { + if err := p.match(tComma); err != nil { + return ASTNode{}, err + } + } + args = append(args, expression) + } + if err := p.match(tRparen); err != nil { + return ASTNode{}, err + } + return ASTNode{ + nodeType: ASTFunctionExpression, + value: name, + children: args, + }, nil + case tFilter: + return p.parseFilter(node) + case tFlatten: + left := ASTNode{nodeType: ASTFlatten, children: []ASTNode{node}} + right, err := p.parseProjectionRHS(bindingPowers[tFlatten]) + return ASTNode{ + nodeType: ASTProjection, + children: []ASTNode{left, right}, + }, err + case tEQ, tNE, tGT, tGTE, tLT, tLTE: + right, err := p.parseExpression(bindingPowers[tokenType]) + if err != nil { + return ASTNode{}, err + } + return ASTNode{ + nodeType: ASTComparator, + value: tokenType, + children: []ASTNode{node, right}, + }, nil + case tLbracket: + tokenType := p.current() + var right ASTNode + var err error + if tokenType == tNumber || tokenType == tColon { + right, err = p.parseIndexExpression() + if err != nil { + return ASTNode{}, err + } + return p.projectIfSlice(node, right) + } + // Otherwise this is a projection. + if err := p.match(tStar); err != nil { + return ASTNode{}, err + } + if err := p.match(tRbracket); err != nil { + return ASTNode{}, err + } + right, err = p.parseProjectionRHS(bindingPowers[tStar]) + if err != nil { + return ASTNode{}, err + } + return ASTNode{ + nodeType: ASTProjection, + children: []ASTNode{node, right}, + }, nil + } + return ASTNode{}, p.syntaxError("Unexpected token: " + tokenType.String()) +} + +func (p *Parser) nud(token token) (ASTNode, error) { + switch token.tokenType { + case tJSONLiteral: + var parsed interface{} + err := json.Unmarshal([]byte(token.value), &parsed) + if err != nil { + return ASTNode{}, err + } + return ASTNode{nodeType: ASTLiteral, value: parsed}, nil + case tStringLiteral: + return ASTNode{nodeType: ASTLiteral, value: token.value}, nil + case tUnquotedIdentifier: + return ASTNode{ + nodeType: ASTField, + value: token.value, + }, nil + case tQuotedIdentifier: + node := ASTNode{nodeType: ASTField, value: token.value} + if p.current() == tLparen { + return ASTNode{}, p.syntaxErrorToken("Can't have quoted identifier as function name.", token) + } + return node, nil + case tStar: + left := ASTNode{nodeType: ASTIdentity} + var right ASTNode + var err error + if p.current() == tRbracket { + right = ASTNode{nodeType: ASTIdentity} + } else { + right, err = p.parseProjectionRHS(bindingPowers[tStar]) + } + return ASTNode{nodeType: ASTValueProjection, children: []ASTNode{left, right}}, err + case tFilter: + return p.parseFilter(ASTNode{nodeType: ASTIdentity}) + case tLbrace: + return p.parseMultiSelectHash() + case tFlatten: + left := ASTNode{ + nodeType: ASTFlatten, + children: []ASTNode{{nodeType: ASTIdentity}}, + } + right, err := p.parseProjectionRHS(bindingPowers[tFlatten]) + if err != nil { + return ASTNode{}, err + } + return ASTNode{nodeType: ASTProjection, children: []ASTNode{left, right}}, nil + case tLbracket: + tokenType := p.current() + //var right ASTNode + if tokenType == tNumber || tokenType == tColon { + right, err := p.parseIndexExpression() + if err != nil { + return ASTNode{}, nil + } + return p.projectIfSlice(ASTNode{nodeType: ASTIdentity}, right) + } else if tokenType == tStar && p.lookahead(1) == tRbracket { + p.advance() + p.advance() + right, err := p.parseProjectionRHS(bindingPowers[tStar]) + if err != nil { + return ASTNode{}, err + } + return ASTNode{ + nodeType: ASTProjection, + children: []ASTNode{{nodeType: ASTIdentity}, right}, + }, nil + } else { + return p.parseMultiSelectList() + } + case tCurrent: + return ASTNode{nodeType: ASTCurrentNode}, nil + case tExpref: + expression, err := p.parseExpression(bindingPowers[tExpref]) + if err != nil { + return ASTNode{}, err + } + return ASTNode{nodeType: ASTExpRef, children: []ASTNode{expression}}, nil + case tNot: + expression, err := p.parseExpression(bindingPowers[tNot]) + if err != nil { + return ASTNode{}, err + } + return ASTNode{nodeType: ASTNotExpression, children: []ASTNode{expression}}, nil + case tLparen: + expression, err := p.parseExpression(0) + if err != nil { + return ASTNode{}, err + } + if err := p.match(tRparen); err != nil { + return ASTNode{}, err + } + return expression, nil + case tEOF: + return ASTNode{}, p.syntaxErrorToken("Incomplete expression", token) + } + + return ASTNode{}, p.syntaxErrorToken("Invalid token: "+token.tokenType.String(), token) +} + +func (p *Parser) parseMultiSelectList() (ASTNode, error) { + var expressions []ASTNode + for { + expression, err := p.parseExpression(0) + if err != nil { + return ASTNode{}, err + } + expressions = append(expressions, expression) + if p.current() == tRbracket { + break + } + err = p.match(tComma) + if err != nil { + return ASTNode{}, err + } + } + err := p.match(tRbracket) + if err != nil { + return ASTNode{}, err + } + return ASTNode{ + nodeType: ASTMultiSelectList, + children: expressions, + }, nil +} + +func (p *Parser) parseMultiSelectHash() (ASTNode, error) { + var children []ASTNode + for { + keyToken := p.lookaheadToken(0) + if err := p.match(tUnquotedIdentifier); err != nil { + if err := p.match(tQuotedIdentifier); err != nil { + return ASTNode{}, p.syntaxError("Expected tQuotedIdentifier or tUnquotedIdentifier") + } + } + keyName := keyToken.value + err := p.match(tColon) + if err != nil { + return ASTNode{}, err + } + value, err := p.parseExpression(0) + if err != nil { + return ASTNode{}, err + } + node := ASTNode{ + nodeType: ASTKeyValPair, + value: keyName, + children: []ASTNode{value}, + } + children = append(children, node) + if p.current() == tComma { + err := p.match(tComma) + if err != nil { + return ASTNode{}, nil + } + } else if p.current() == tRbrace { + err := p.match(tRbrace) + if err != nil { + return ASTNode{}, nil + } + break + } + } + return ASTNode{ + nodeType: ASTMultiSelectHash, + children: children, + }, nil +} + +func (p *Parser) projectIfSlice(left ASTNode, right ASTNode) (ASTNode, error) { + indexExpr := ASTNode{ + nodeType: ASTIndexExpression, + children: []ASTNode{left, right}, + } + if right.nodeType == ASTSlice { + right, err := p.parseProjectionRHS(bindingPowers[tStar]) + return ASTNode{ + nodeType: ASTProjection, + children: []ASTNode{indexExpr, right}, + }, err + } + return indexExpr, nil +} +func (p *Parser) parseFilter(node ASTNode) (ASTNode, error) { + var right, condition ASTNode + var err error + condition, err = p.parseExpression(0) + if err != nil { + return ASTNode{}, err + } + if err := p.match(tRbracket); err != nil { + return ASTNode{}, err + } + if p.current() == tFlatten { + right = ASTNode{nodeType: ASTIdentity} + } else { + right, err = p.parseProjectionRHS(bindingPowers[tFilter]) + if err != nil { + return ASTNode{}, err + } + } + + return ASTNode{ + nodeType: ASTFilterProjection, + children: []ASTNode{node, right, condition}, + }, nil +} + +func (p *Parser) parseDotRHS(bindingPower int) (ASTNode, error) { + lookahead := p.current() + if tokensOneOf([]tokType{tQuotedIdentifier, tUnquotedIdentifier, tStar}, lookahead) { + return p.parseExpression(bindingPower) + } else if lookahead == tLbracket { + if err := p.match(tLbracket); err != nil { + return ASTNode{}, err + } + return p.parseMultiSelectList() + } else if lookahead == tLbrace { + if err := p.match(tLbrace); err != nil { + return ASTNode{}, err + } + return p.parseMultiSelectHash() + } + return ASTNode{}, p.syntaxError("Expected identifier, lbracket, or lbrace") +} + +func (p *Parser) parseProjectionRHS(bindingPower int) (ASTNode, error) { + current := p.current() + if bindingPowers[current] < 10 { + return ASTNode{nodeType: ASTIdentity}, nil + } else if current == tLbracket { + return p.parseExpression(bindingPower) + } else if current == tFilter { + return p.parseExpression(bindingPower) + } else if current == tDot { + err := p.match(tDot) + if err != nil { + return ASTNode{}, err + } + return p.parseDotRHS(bindingPower) + } else { + return ASTNode{}, p.syntaxError("Error") + } +} + +func (p *Parser) lookahead(number int) tokType { + return p.lookaheadToken(number).tokenType +} + +func (p *Parser) current() tokType { + return p.lookahead(0) +} + +func (p *Parser) lookaheadToken(number int) token { + return p.tokens[p.index+number] +} + +func (p *Parser) advance() { + p.index++ +} + +func tokensOneOf(elements []tokType, token tokType) bool { + for _, elem := range elements { + if elem == token { + return true + } + } + return false +} + +func (p *Parser) syntaxError(msg string) SyntaxError { + return SyntaxError{ + msg: msg, + Expression: p.expression, + Offset: p.lookaheadToken(0).position, + } +} + +// Create a SyntaxError based on the provided token. +// This differs from syntaxError() which creates a SyntaxError +// based on the current lookahead token. +func (p *Parser) syntaxErrorToken(msg string, t token) SyntaxError { + return SyntaxError{ + msg: msg, + Expression: p.expression, + Offset: t.position, + } +} diff --git a/vendor/github.com/jmespath/go-jmespath/parser_test.go b/vendor/github.com/jmespath/go-jmespath/parser_test.go new file mode 100644 index 000000000..997a0f4d7 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/parser_test.go @@ -0,0 +1,136 @@ +package jmespath + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/assert" +) + +var parsingErrorTests = []struct { + expression string + msg string +}{ + {"foo.", "Incopmlete expression"}, + {"[foo", "Incopmlete expression"}, + {"]", "Invalid"}, + {")", "Invalid"}, + {"}", "Invalid"}, + {"foo..bar", "Invalid"}, + {`foo."bar`, "Forwards lexer errors"}, + {`{foo: bar`, "Incomplete expression"}, + {`{foo bar}`, "Invalid"}, + {`[foo bar]`, "Invalid"}, + {`foo@`, "Invalid"}, + {`&&&&&&&&&&&&t(`, "Invalid"}, + {`[*][`, "Invalid"}, +} + +func TestParsingErrors(t *testing.T) { + assert := assert.New(t) + parser := NewParser() + for _, tt := range parsingErrorTests { + _, err := parser.Parse(tt.expression) + assert.NotNil(err, fmt.Sprintf("Expected parsing error: %s, for expression: %s", tt.msg, tt.expression)) + } +} + +var prettyPrinted = `ASTProjection { + children: { + ASTField { + value: "foo" + } + ASTSubexpression { + children: { + ASTSubexpression { + children: { + ASTField { + value: "bar" + } + ASTField { + value: "baz" + } + } + ASTField { + value: "qux" + } + } +} +` + +var prettyPrintedCompNode = `ASTFilterProjection { + children: { + ASTField { + value: "a" + } + ASTIdentity { + } + ASTComparator { + value: tLTE + children: { + ASTField { + value: "b" + } + ASTField { + value: "c" + } + } +} +` + +func TestPrettyPrintedAST(t *testing.T) { + assert := assert.New(t) + parser := NewParser() + parsed, _ := parser.Parse("foo[*].bar.baz.qux") + assert.Equal(parsed.PrettyPrint(0), prettyPrinted) +} + +func TestPrettyPrintedCompNode(t *testing.T) { + assert := assert.New(t) + parser := NewParser() + parsed, _ := parser.Parse("a[?b<=c]") + assert.Equal(parsed.PrettyPrint(0), prettyPrintedCompNode) +} + +func BenchmarkParseIdentifier(b *testing.B) { + runParseBenchmark(b, exprIdentifier) +} + +func BenchmarkParseSubexpression(b *testing.B) { + runParseBenchmark(b, exprSubexpr) +} + +func BenchmarkParseDeeplyNested50(b *testing.B) { + runParseBenchmark(b, deeplyNested50) +} + +func BenchmarkParseDeepNested50Pipe(b *testing.B) { + runParseBenchmark(b, deeplyNested50Pipe) +} + +func BenchmarkParseDeepNested50Index(b *testing.B) { + runParseBenchmark(b, deeplyNested50Index) +} + +func BenchmarkParseQuotedIdentifier(b *testing.B) { + runParseBenchmark(b, exprQuotedIdentifier) +} + +func BenchmarkParseQuotedIdentifierEscapes(b *testing.B) { + runParseBenchmark(b, quotedIdentifierEscapes) +} + +func BenchmarkParseRawStringLiteral(b *testing.B) { + runParseBenchmark(b, rawStringLiteral) +} + +func BenchmarkParseDeepProjection104(b *testing.B) { + runParseBenchmark(b, deepProjection104) +} + +func runParseBenchmark(b *testing.B, expression string) { + parser := NewParser() + for i := 0; i < b.N; i++ { + parser.Parse(expression) + } +} diff --git a/vendor/github.com/jmespath/go-jmespath/toktype_string.go b/vendor/github.com/jmespath/go-jmespath/toktype_string.go new file mode 100644 index 000000000..dae79cbdf --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/toktype_string.go @@ -0,0 +1,16 @@ +// generated by stringer -type=tokType; DO NOT EDIT + +package jmespath + +import "fmt" + +const _tokType_name = "tUnknowntStartDottFiltertFlattentLparentRparentLbrackettRbrackettLbracetRbracetOrtPipetNumbertUnquotedIdentifiertQuotedIdentifiertCommatColontLTtLTEtGTtGTEtEQtNEtJSONLiteraltStringLiteraltCurrenttExpreftAndtNottEOF" + +var _tokType_index = [...]uint8{0, 8, 13, 17, 24, 32, 39, 46, 55, 64, 71, 78, 81, 86, 93, 112, 129, 135, 141, 144, 148, 151, 155, 158, 161, 173, 187, 195, 202, 206, 210, 214} + +func (i tokType) String() string { + if i < 0 || i >= tokType(len(_tokType_index)-1) { + return fmt.Sprintf("tokType(%d)", i) + } + return _tokType_name[_tokType_index[i]:_tokType_index[i+1]] +} diff --git a/vendor/github.com/jmespath/go-jmespath/util.go b/vendor/github.com/jmespath/go-jmespath/util.go new file mode 100644 index 000000000..ddc1b7d7d --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/util.go @@ -0,0 +1,185 @@ +package jmespath + +import ( + "errors" + "reflect" +) + +// IsFalse determines if an object is false based on the JMESPath spec. +// JMESPath defines false values to be any of: +// - An empty string array, or hash. +// - The boolean value false. +// - nil +func isFalse(value interface{}) bool { + switch v := value.(type) { + case bool: + return !v + case []interface{}: + return len(v) == 0 + case map[string]interface{}: + return len(v) == 0 + case string: + return len(v) == 0 + case nil: + return true + } + // Try the reflection cases before returning false. + rv := reflect.ValueOf(value) + switch rv.Kind() { + case reflect.Struct: + // A struct type will never be false, even if + // all of its values are the zero type. + return false + case reflect.Slice, reflect.Map: + return rv.Len() == 0 + case reflect.Ptr: + if rv.IsNil() { + return true + } + // If it's a pointer type, we'll try to deref the pointer + // and evaluate the pointer value for isFalse. + element := rv.Elem() + return isFalse(element.Interface()) + } + return false +} + +// ObjsEqual is a generic object equality check. +// It will take two arbitrary objects and recursively determine +// if they are equal. +func objsEqual(left interface{}, right interface{}) bool { + return reflect.DeepEqual(left, right) +} + +// SliceParam refers to a single part of a slice. +// A slice consists of a start, a stop, and a step, similar to +// python slices. +type sliceParam struct { + N int + Specified bool +} + +// Slice supports [start:stop:step] style slicing that's supported in JMESPath. +func slice(slice []interface{}, parts []sliceParam) ([]interface{}, error) { + computed, err := computeSliceParams(len(slice), parts) + if err != nil { + return nil, err + } + start, stop, step := computed[0], computed[1], computed[2] + result := []interface{}{} + if step > 0 { + for i := start; i < stop; i += step { + result = append(result, slice[i]) + } + } else { + for i := start; i > stop; i += step { + result = append(result, slice[i]) + } + } + return result, nil +} + +func computeSliceParams(length int, parts []sliceParam) ([]int, error) { + var start, stop, step int + if !parts[2].Specified { + step = 1 + } else if parts[2].N == 0 { + return nil, errors.New("Invalid slice, step cannot be 0") + } else { + step = parts[2].N + } + var stepValueNegative bool + if step < 0 { + stepValueNegative = true + } else { + stepValueNegative = false + } + + if !parts[0].Specified { + if stepValueNegative { + start = length - 1 + } else { + start = 0 + } + } else { + start = capSlice(length, parts[0].N, step) + } + + if !parts[1].Specified { + if stepValueNegative { + stop = -1 + } else { + stop = length + } + } else { + stop = capSlice(length, parts[1].N, step) + } + return []int{start, stop, step}, nil +} + +func capSlice(length int, actual int, step int) int { + if actual < 0 { + actual += length + if actual < 0 { + if step < 0 { + actual = -1 + } else { + actual = 0 + } + } + } else if actual >= length { + if step < 0 { + actual = length - 1 + } else { + actual = length + } + } + return actual +} + +// ToArrayNum converts an empty interface type to a slice of float64. +// If any element in the array cannot be converted, then nil is returned +// along with a second value of false. +func toArrayNum(data interface{}) ([]float64, bool) { + // Is there a better way to do this with reflect? + if d, ok := data.([]interface{}); ok { + result := make([]float64, len(d)) + for i, el := range d { + item, ok := el.(float64) + if !ok { + return nil, false + } + result[i] = item + } + return result, true + } + return nil, false +} + +// ToArrayStr converts an empty interface type to a slice of strings. +// If any element in the array cannot be converted, then nil is returned +// along with a second value of false. If the input data could be entirely +// converted, then the converted data, along with a second value of true, +// will be returned. +func toArrayStr(data interface{}) ([]string, bool) { + // Is there a better way to do this with reflect? + if d, ok := data.([]interface{}); ok { + result := make([]string, len(d)) + for i, el := range d { + item, ok := el.(string) + if !ok { + return nil, false + } + result[i] = item + } + return result, true + } + return nil, false +} + +func isSliceType(v interface{}) bool { + if v == nil { + return false + } + return reflect.TypeOf(v).Kind() == reflect.Slice +} diff --git a/vendor/github.com/jmespath/go-jmespath/util_test.go b/vendor/github.com/jmespath/go-jmespath/util_test.go new file mode 100644 index 000000000..1754b5d3f --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/util_test.go @@ -0,0 +1,73 @@ +package jmespath + +import ( + "github.com/stretchr/testify/assert" + "testing" +) + +func TestSlicePositiveStep(t *testing.T) { + assert := assert.New(t) + input := make([]interface{}, 5) + input[0] = 0 + input[1] = 1 + input[2] = 2 + input[3] = 3 + input[4] = 4 + result, err := slice(input, []sliceParam{{0, true}, {3, true}, {1, true}}) + assert.Nil(err) + assert.Equal(input[:3], result) +} + +func TestIsFalseJSONTypes(t *testing.T) { + assert := assert.New(t) + assert.True(isFalse(false)) + assert.True(isFalse("")) + var empty []interface{} + assert.True(isFalse(empty)) + m := make(map[string]interface{}) + assert.True(isFalse(m)) + assert.True(isFalse(nil)) + +} + +func TestIsFalseWithUserDefinedStructs(t *testing.T) { + assert := assert.New(t) + type nilStructType struct { + SliceOfPointers []*string + } + nilStruct := nilStructType{SliceOfPointers: nil} + assert.True(isFalse(nilStruct.SliceOfPointers)) + + // A user defined struct will never be false though, + // even if it's fields are the zero type. + assert.False(isFalse(nilStruct)) +} + +func TestIsFalseWithNilInterface(t *testing.T) { + assert := assert.New(t) + var a *int = nil + var nilInterface interface{} + nilInterface = a + assert.True(isFalse(nilInterface)) +} + +func TestIsFalseWithMapOfUserStructs(t *testing.T) { + assert := assert.New(t) + type foo struct { + Bar string + Baz string + } + m := make(map[int]foo) + assert.True(isFalse(m)) +} + +func TestObjsEqual(t *testing.T) { + assert := assert.New(t) + assert.True(objsEqual("foo", "foo")) + assert.True(objsEqual(20, 20)) + assert.True(objsEqual([]int{1, 2, 3}, []int{1, 2, 3})) + assert.True(objsEqual(nil, nil)) + assert.True(!objsEqual(nil, "foo")) + assert.True(objsEqual([]int{}, []int{})) + assert.True(!objsEqual([]int{}, nil)) +} diff --git a/vendor/github.com/juju/errgo/LICENSE b/vendor/github.com/juju/errgo/LICENSE new file mode 100644 index 000000000..53320c352 --- /dev/null +++ b/vendor/github.com/juju/errgo/LICENSE @@ -0,0 +1,185 @@ +This software is licensed under the LGPLv3, included below. + +As a special exception to the GNU Lesser General Public License version 3 +("LGPL3"), the copyright holders of this Library give you permission to +convey to a third party a Combined Work that links statically or dynamically +to this Library without providing any Minimal Corresponding Source or +Minimal Application Code as set out in 4d or providing the installation +information set out in section 4e, provided that you comply with the other +provisions of LGPL3 and provided that you meet, for the Application the +terms and conditions of the license(s) which apply to the Application. + +Except as stated in this special exception, the provisions of LGPL3 will +continue to comply in full to this Library. If you modify this Library, you +may apply this exception to your version of this Library, but you are not +obliged to do so. If you do not wish to do so, delete this exception +statement from your version. This exception does not (and cannot) modify any +license terms which apply to the Application, with which you must still +comply. + + + GNU LESSER GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + + This version of the GNU Lesser General Public License incorporates +the terms and conditions of version 3 of the GNU General Public +License, supplemented by the additional permissions listed below. + + 0. Additional Definitions. + + As used herein, "this License" refers to version 3 of the GNU Lesser +General Public License, and the "GNU GPL" refers to version 3 of the GNU +General Public License. + + "The Library" refers to a covered work governed by this License, +other than an Application or a Combined Work as defined below. + + An "Application" is any work that makes use of an interface provided +by the Library, but which is not otherwise based on the Library. +Defining a subclass of a class defined by the Library is deemed a mode +of using an interface provided by the Library. + + A "Combined Work" is a work produced by combining or linking an +Application with the Library. The particular version of the Library +with which the Combined Work was made is also called the "Linked +Version". + + The "Minimal Corresponding Source" for a Combined Work means the +Corresponding Source for the Combined Work, excluding any source code +for portions of the Combined Work that, considered in isolation, are +based on the Application, and not on the Linked Version. + + The "Corresponding Application Code" for a Combined Work means the +object code and/or source code for the Application, including any data +and utility programs needed for reproducing the Combined Work from the +Application, but excluding the System Libraries of the Combined Work. + + 1. Exception to Section 3 of the GNU GPL. + + You may convey a covered work under sections 3 and 4 of this License +without being bound by section 3 of the GNU GPL. + + 2. Conveying Modified Versions. + + If you modify a copy of the Library, and, in your modifications, a +facility refers to a function or data to be supplied by an Application +that uses the facility (other than as an argument passed when the +facility is invoked), then you may convey a copy of the modified +version: + + a) under this License, provided that you make a good faith effort to + ensure that, in the event an Application does not supply the + function or data, the facility still operates, and performs + whatever part of its purpose remains meaningful, or + + b) under the GNU GPL, with none of the additional permissions of + this License applicable to that copy. + + 3. Object Code Incorporating Material from Library Header Files. + + The object code form of an Application may incorporate material from +a header file that is part of the Library. You may convey such object +code under terms of your choice, provided that, if the incorporated +material is not limited to numerical parameters, data structure +layouts and accessors, or small macros, inline functions and templates +(ten or fewer lines in length), you do both of the following: + + a) Give prominent notice with each copy of the object code that the + Library is used in it and that the Library and its use are + covered by this License. + + b) Accompany the object code with a copy of the GNU GPL and this license + document. + + 4. Combined Works. + + You may convey a Combined Work under terms of your choice that, +taken together, effectively do not restrict modification of the +portions of the Library contained in the Combined Work and reverse +engineering for debugging such modifications, if you also do each of +the following: + + a) Give prominent notice with each copy of the Combined Work that + the Library is used in it and that the Library and its use are + covered by this License. + + b) Accompany the Combined Work with a copy of the GNU GPL and this license + document. + + c) For a Combined Work that displays copyright notices during + execution, include the copyright notice for the Library among + these notices, as well as a reference directing the user to the + copies of the GNU GPL and this license document. + + d) Do one of the following: + + 0) Convey the Minimal Corresponding Source under the terms of this + License, and the Corresponding Application Code in a form + suitable for, and under terms that permit, the user to + recombine or relink the Application with a modified version of + the Linked Version to produce a modified Combined Work, in the + manner specified by section 6 of the GNU GPL for conveying + Corresponding Source. + + 1) Use a suitable shared library mechanism for linking with the + Library. A suitable mechanism is one that (a) uses at run time + a copy of the Library already present on the user's computer + system, and (b) will operate properly with a modified version + of the Library that is interface-compatible with the Linked + Version. + + e) Provide Installation Information, but only if you would otherwise + be required to provide such information under section 6 of the + GNU GPL, and only to the extent that such information is + necessary to install and execute a modified version of the + Combined Work produced by recombining or relinking the + Application with a modified version of the Linked Version. (If + you use option 4d0, the Installation Information must accompany + the Minimal Corresponding Source and Corresponding Application + Code. If you use option 4d1, you must provide the Installation + Information in the manner specified by section 6 of the GNU GPL + for conveying Corresponding Source.) + + 5. Combined Libraries. + + You may place library facilities that are a work based on the +Library side by side in a single library together with other library +facilities that are not Applications and are not covered by this +License, and convey such a combined library under terms of your +choice, if you do both of the following: + + a) Accompany the combined library with a copy of the same work based + on the Library, uncombined with any other library facilities, + conveyed under the terms of this License. + + b) Give prominent notice with the combined library that part of it + is a work based on the Library, and explaining where to find the + accompanying uncombined form of the same work. + + 6. Revised Versions of the GNU Lesser General Public License. + + The Free Software Foundation may publish revised and/or new versions +of the GNU Lesser General Public License from time to time. Such new +versions will be similar in spirit to the present version, but may +differ in detail to address new problems or concerns. + + Each version is given a distinguishing version number. If the +Library as you received it specifies that a certain numbered version +of the GNU Lesser General Public License "or any later version" +applies to it, you have the option of following the terms and +conditions either of that published version or of any later version +published by the Free Software Foundation. If the Library as you +received it does not specify a version number of the GNU Lesser +General Public License, you may choose any version of the GNU Lesser +General Public License ever published by the Free Software Foundation. + + If the Library as you received it specifies that a proxy can decide +whether future versions of the GNU Lesser General Public License shall +apply, that proxy's public statement of acceptance of any version is +permanent authorization for you to choose that version for the +Library. diff --git a/vendor/github.com/juju/errgo/README.markdown b/vendor/github.com/juju/errgo/README.markdown new file mode 100644 index 000000000..09dd6b2e9 --- /dev/null +++ b/vendor/github.com/juju/errgo/README.markdown @@ -0,0 +1,281 @@ +# errors +-- + import "github.com/juju/errgo" + +The errors package provides a way to create and diagnose errors. It is +compatible with the usual Go error idioms but adds a way to wrap errors so that +they record source location information while retaining a consistent way for +code to inspect errors to find out particular problems. + +## Usage + +#### func Any + +```go +func Any(error) bool +``` +Any returns true. It can be used as an argument to Mask to allow any diagnosis +to pass through to the wrapped error. + +#### func Cause + +```go +func Cause(err error) error +``` +Cause returns the cause of the given error. If err does not implement Causer or +its Cause method returns nil, it returns err itself. + +Cause is the usual way to diagnose errors that may have been wrapped by Mask or +NoteMask. + +#### func Details + +```go +func Details(err error) string +``` +Details returns information about the stack of underlying errors wrapped by err, +in the format: + + [{filename:99: error one} {otherfile:55: cause of error one}] + +The details are found by type-asserting the error to the Locationer, Causer and +Wrapper interfaces. Details of the underlying stack are found by recursively +calling Underlying when the underlying error implements Wrapper. + +#### func Is + +```go +func Is(err error) func(error) bool +``` +Is returns a function that returns whether the an error is equal to the given +error. It is intended to be used as a "pass" argument to Mask and friends; for +example: + + return errors.Mask(err, errors.Is(http.ErrNoCookie)) + +would return an error with an http.ErrNoCookie cause only if that was err's +diagnosis; otherwise the diagnosis would be itself. + +#### func Mask + +```go +func Mask(underlying error, pass ...func(error) bool) error +``` +Mask returns an Err that wraps the given underyling error. The error message is +unchanged, but the error location records the caller of Mask. + +If err is nil, Mask returns nil. + +By default Mask conceals the cause of the wrapped error, but if pass(Cause(err)) +returns true for any of the provided pass functions, the cause of the returned +error will be Cause(err). + +For example, the following code will return an error whose cause is the error +from the os.Open call when (and only when) the file does not exist. + + f, err := os.Open("non-existent-file") + if err != nil { + return errors.Mask(err, os.IsNotExist) + } + +In order to add context to returned errors, it is conventional to call Mask when +returning any error received from elsewhere. + +#### func MaskFunc + +```go +func MaskFunc(allow ...func(error) bool) func(error, ...func(error) bool) error +``` +MaskFunc returns an equivalent of Mask that always allows the specified causes +in addition to any causes specified when the returned function is called. + +It is defined for convenience, for example when all calls to Mask in a given +package wish to allow the same set of causes to be returned. + +#### func New + +```go +func New(s string) error +``` +New returns a new error with the given error message and no cause. It is a +drop-in replacement for errors.New from the standard library. + +#### func Newf + +```go +func Newf(f string, a ...interface{}) error +``` +Newf returns a new error with the given printf-formatted error message and no +cause. + +#### func NoteMask + +```go +func NoteMask(underlying error, msg string, pass ...func(error) bool) error +``` +NoteMask returns an Err that has the given underlying error, with the given +message added as context, and allowing the cause of the underlying error to pass +through into the result if allowed by the specific pass functions (see Mask for +an explanation of the pass parameter). + +#### func Notef + +```go +func Notef(underlying error, f string, a ...interface{}) error +``` +Notef returns an Error that wraps the given underlying error and adds the given +formatted context message. The returned error has no cause (use NoteMask or +WithCausef to add a message while retaining a cause). + +#### func WithCausef + +```go +func WithCausef(underlying, cause error, f string, a ...interface{}) error +``` +WithCausef returns a new Error that wraps the given (possibly nil) underlying +error and associates it with the given cause. The given formatted message +context will also be added. + +#### type Causer + +```go +type Causer interface { + Cause() error +} +``` + +Causer is the type of an error that may provide an error cause for error +diagnosis. Cause may return nil if there is no cause (for example because the +cause has been masked). + +#### type Err + +```go +type Err struct { + // Message_ holds the text of the error message. It may be empty + // if Underlying is set. + Message_ string + + // Cause_ holds the cause of the error as returned + // by the Cause method. + Cause_ error + + // Underlying holds the underlying error, if any. + Underlying_ error + + // Location holds the source code location where the error was + // created. + Location_ Location +} +``` + +Err holds a description of an error along with information about where the error +was created. + +It may be embedded in custom error types to add extra information that this +errors package can understand. + +#### func (*Err) Cause + +```go +func (e *Err) Cause() error +``` +Cause implements Causer. + +#### func (*Err) Error + +```go +func (e *Err) Error() string +``` +Error implements error.Error. + +#### func (*Err) GoString + +```go +func (e *Err) GoString() string +``` +GoString returns the details of the receiving error message, so that printing an +error with %#v will produce useful information. + +#### func (*Err) Location + +```go +func (e *Err) Location() Location +``` +Location implements Locationer. + +#### func (*Err) Message + +```go +func (e *Err) Message() string +``` +Message returns the top level error message. + +#### func (*Err) SetLocation + +```go +func (e *Err) SetLocation(callDepth int) +``` +Locate records the source location of the error by setting e.Location, at +callDepth stack frames above the call. + +#### func (*Err) Underlying + +```go +func (e *Err) Underlying() error +``` +Underlying returns the underlying error if any. + +#### type Location + +```go +type Location struct { + File string + Line int +} +``` + +Location describes a source code location. + +#### func (Location) IsSet + +```go +func (loc Location) IsSet() bool +``` +IsSet reports whether the location has been set. + +#### func (Location) String + +```go +func (loc Location) String() string +``` +String returns a location in filename.go:99 format. + +#### type Locationer + +```go +type Locationer interface { + Location() Location +} +``` + +Location can be implemented by any error type that wants to expose the source +location of an error. + +#### type Wrapper + +```go +type Wrapper interface { + // Message returns the top level error message, + // not including the message from the underlying + // error. + Message() string + + // Underlying returns the underlying error, or nil + // if there is none. + Underlying() error +} +``` + +Wrapper is the type of an error that wraps another error. It is exposed so that +external types may implement it, but should in general not be used otherwise. diff --git a/vendor/github.com/juju/errgo/errors.go b/vendor/github.com/juju/errgo/errors.go new file mode 100644 index 000000000..fbfc95629 --- /dev/null +++ b/vendor/github.com/juju/errgo/errors.go @@ -0,0 +1,385 @@ +// Copyright 2014 Canonical Ltd. +// Licensed under the LGPLv3, see LICENCE file for details. + +// The errgo package provides a way to create +// and diagnose errors. It is compatible with +// the usual Go error idioms but adds a way to wrap errors +// so that they record source location information +// while retaining a consistent way for code to +// inspect errors to find out particular problems. +// +package errgo + +import ( + "bytes" + "fmt" + "log" + "runtime" +) + +const debug = false + +// Location describes a source code location. +type Location struct { + File string + Line int +} + +// String returns a location in filename.go:99 format. +func (loc Location) String() string { + return fmt.Sprintf("%s:%d", loc.File, loc.Line) +} + +// IsSet reports whether the location has been set. +func (loc Location) IsSet() bool { + return loc.File != "" +} + +// Err holds a description of an error along with information about +// where the error was created. +// +// It may be embedded in custom error types to add +// extra information that this errors package can +// understand. +type Err struct { + // Message_ holds the text of the error message. It may be empty + // if Underlying is set. + Message_ string + + // Cause_ holds the cause of the error as returned + // by the Cause method. + Cause_ error + + // Underlying holds the underlying error, if any. + Underlying_ error + + // Location holds the source code location where the error was + // created. + Location_ Location +} + +// Location implements Locationer. +func (e *Err) Location() Location { + return e.Location_ +} + +// Underlying returns the underlying error if any. +func (e *Err) Underlying() error { + return e.Underlying_ +} + +// Cause implements Causer. +func (e *Err) Cause() error { + return e.Cause_ +} + +// Message returns the top level error message. +func (e *Err) Message() string { + return e.Message_ +} + +// Error implements error.Error. +func (e *Err) Error() string { + switch { + case e.Message_ == "" && e.Underlying_ == nil: + return "" + case e.Message_ == "": + return e.Underlying_.Error() + case e.Underlying_ == nil: + return e.Message_ + } + return fmt.Sprintf("%s: %v", e.Message_, e.Underlying_) +} + +// GoString returns the details of the receiving error +// message, so that printing an error with %#v will +// produce useful information. +func (e *Err) GoString() string { + return Details(e) +} + +// Causer is the type of an error that may provide +// an error cause for error diagnosis. Cause may return +// nil if there is no cause (for example because the +// cause has been masked). +type Causer interface { + Cause() error +} + +// Wrapper is the type of an error that wraps another error. It is +// exposed so that external types may implement it, but should in +// general not be used otherwise. +type Wrapper interface { + // Message returns the top level error message, + // not including the message from the underlying + // error. + Message() string + + // Underlying returns the underlying error, or nil + // if there is none. + Underlying() error +} + +// Location can be implemented by any error type +// that wants to expose the source location of an error. +type Locationer interface { + Location() Location +} + +// Details returns information about the stack of +// underlying errors wrapped by err, in the format: +// +// [{filename:99: error one} {otherfile:55: cause of error one}] +// +// The details are found by type-asserting the error to +// the Locationer, Causer and Wrapper interfaces. +// Details of the underlying stack are found by +// recursively calling Underlying when the +// underlying error implements Wrapper. +func Details(err error) string { + if err == nil { + return "[]" + } + var s []byte + s = append(s, '[') + for { + s = append(s, '{') + if err, ok := err.(Locationer); ok { + loc := err.Location() + if loc.IsSet() { + s = append(s, loc.String()...) + s = append(s, ": "...) + } + } + if cerr, ok := err.(Wrapper); ok { + s = append(s, cerr.Message()...) + err = cerr.Underlying() + } else { + s = append(s, err.Error()...) + err = nil + } + if debug { + if err, ok := err.(Causer); ok { + if cause := err.Cause(); cause != nil { + s = append(s, fmt.Sprintf("=%T", cause)...) + s = append(s, Details(cause)...) + } + } + } + s = append(s, '}') + if err == nil { + break + } + s = append(s, ' ') + } + s = append(s, ']') + return string(s) +} + +// Locate records the source location of the error by setting +// e.Location, at callDepth stack frames above the call. +func (e *Err) SetLocation(callDepth int) { + _, file, line, _ := runtime.Caller(callDepth + 1) + e.Location_ = Location{file, line} +} + +func setLocation(err error, callDepth int) { + if e, _ := err.(*Err); e != nil { + e.SetLocation(callDepth + 1) + } +} + +// New returns a new error with the given error message and no cause. It +// is a drop-in replacement for errors.New from the standard library. +func New(s string) error { + err := &Err{Message_: s} + err.SetLocation(1) + return err +} + +// Newf returns a new error with the given printf-formatted error +// message and no cause. +func Newf(f string, a ...interface{}) error { + err := &Err{Message_: fmt.Sprintf(f, a...)} + err.SetLocation(1) + return err +} + +// match returns whether any of the given +// functions returns true when called with err as an +// argument. +func match(err error, pass ...func(error) bool) bool { + for _, f := range pass { + if f(err) { + return true + } + } + return false +} + +// Is returns a function that returns whether the +// an error is equal to the given error. +// It is intended to be used as a "pass" argument +// to Mask and friends; for example: +// +// return errors.Mask(err, errors.Is(http.ErrNoCookie)) +// +// would return an error with an http.ErrNoCookie cause +// only if that was err's diagnosis; otherwise the diagnosis +// would be itself. +func Is(err error) func(error) bool { + return func(err1 error) bool { + return err == err1 + } +} + +// Any returns true. It can be used as an argument to Mask +// to allow any diagnosis to pass through to the wrapped +// error. +func Any(error) bool { + return true +} + +// NoteMask returns an Err that has the given underlying error, +// with the given message added as context, and allowing +// the cause of the underlying error to pass through into +// the result if allowed by the specific pass functions +// (see Mask for an explanation of the pass parameter). +func NoteMask(underlying error, msg string, pass ...func(error) bool) error { + newErr := &Err{ + Underlying_: underlying, + Message_: msg, + } + if len(pass) > 0 { + if cause := Cause(underlying); match(cause, pass...) { + newErr.Cause_ = cause + } + } + if debug { + if newd, oldd := newErr.Cause_, Cause(underlying); newd != oldd { + log.Printf("Mask cause %[1]T(%[1]v)->%[2]T(%[2]v)", oldd, newd) + log.Printf("call stack: %s", callers(0, 20)) + log.Printf("len(allow) == %d", len(pass)) + log.Printf("old error %#v", underlying) + log.Printf("new error %#v", newErr) + } + } + return newErr +} + +// Mask returns an Err that wraps the given underyling error. The error +// message is unchanged, but the error location records the caller of +// Mask. +// +// If err is nil, Mask returns nil. +// +// By default Mask conceals the cause of the wrapped error, but if +// pass(Cause(err)) returns true for any of the provided pass functions, +// the cause of the returned error will be Cause(err). +// +// For example, the following code will return an error whose cause is +// the error from the os.Open call when (and only when) the file does +// not exist. +// +// f, err := os.Open("non-existent-file") +// if err != nil { +// return errors.Mask(err, os.IsNotExist) +// } +// +// In order to add context to returned errors, it +// is conventional to call Mask when returning any +// error received from elsewhere. +// +func Mask(underlying error, pass ...func(error) bool) error { + if underlying == nil { + return nil + } + err := NoteMask(underlying, "", pass...) + setLocation(err, 1) + return err +} + +// Notef returns an Error that wraps the given underlying +// error and adds the given formatted context message. +// The returned error has no cause (use NoteMask +// or WithCausef to add a message while retaining a cause). +func Notef(underlying error, f string, a ...interface{}) error { + err := NoteMask(underlying, fmt.Sprintf(f, a...)) + setLocation(err, 1) + return err +} + +// MaskFunc returns an equivalent of Mask that always allows the +// specified causes in addition to any causes specified when the +// returned function is called. +// +// It is defined for convenience, for example when all calls to Mask in +// a given package wish to allow the same set of causes to be returned. +func MaskFunc(allow ...func(error) bool) func(error, ...func(error) bool) error { + return func(err error, allow1 ...func(error) bool) error { + var allowEither []func(error) bool + if len(allow1) > 0 { + // This is more efficient than using a function literal, + // because the compiler knows that it doesn't escape. + allowEither = make([]func(error) bool, len(allow)+len(allow1)) + copy(allowEither, allow) + copy(allowEither[len(allow):], allow1) + } else { + allowEither = allow + } + err = Mask(err, allowEither...) + setLocation(err, 1) + return err + } +} + +// WithCausef returns a new Error that wraps the given +// (possibly nil) underlying error and associates it with +// the given cause. The given formatted message context +// will also be added. +func WithCausef(underlying, cause error, f string, a ...interface{}) error { + err := &Err{ + Underlying_: underlying, + Cause_: cause, + Message_: fmt.Sprintf(f, a...), + } + err.SetLocation(1) + return err +} + +// Cause returns the cause of the given error. If err does not +// implement Causer or its Cause method returns nil, it returns err itself. +// +// Cause is the usual way to diagnose errors that may have +// been wrapped by Mask or NoteMask. +func Cause(err error) error { + var diag error + if err, ok := err.(Causer); ok { + diag = err.Cause() + } + if diag != nil { + return diag + } + return err +} + +// callers returns the stack trace of the goroutine that called it, +// starting n entries above the caller of callers, as a space-separated list +// of filename:line-number pairs with no new lines. +func callers(n, max int) []byte { + var b bytes.Buffer + prev := false + for i := 0; i < max; i++ { + _, file, line, ok := runtime.Caller(n + 1) + if !ok { + return b.Bytes() + } + if prev { + fmt.Fprintf(&b, " ") + } + fmt.Fprintf(&b, "%s:%d", file, line) + n++ + prev = true + } + return b.Bytes() +} diff --git a/vendor/github.com/juju/errgo/errors/errors.go b/vendor/github.com/juju/errgo/errors/errors.go new file mode 100644 index 000000000..bac52cc3c --- /dev/null +++ b/vendor/github.com/juju/errgo/errors/errors.go @@ -0,0 +1,389 @@ +// Copyright 2014 Canonical Ltd. +// Licensed under the LGPLv3, see LICENCE file for details. + +// The errors package provides a way to create +// and diagnose errors. It is compatible with +// the usual Go error idioms but adds a way to wrap errors +// so that they record source location information +// while retaining a consistent way for code to +// inspect errors to find out particular problems. +// +// IMPORTANT NOTE: this import path is deprecated, +// and will be removed at some point in the future. +// Please use github.com/juju/errgo instead. +// +package errors + +import ( + "bytes" + "fmt" + "log" + "runtime" +) + +const debug = false + +// Location describes a source code location. +type Location struct { + File string + Line int +} + +// String returns a location in filename.go:99 format. +func (loc Location) String() string { + return fmt.Sprintf("%s:%d", loc.File, loc.Line) +} + +// IsSet reports whether the location has been set. +func (loc Location) IsSet() bool { + return loc.File != "" +} + +// Err holds a description of an error along with information about +// where the error was created. +// +// It may be embedded in custom error types to add +// extra information that this errors package can +// understand. +type Err struct { + // Message_ holds the text of the error message. It may be empty + // if Underlying is set. + Message_ string + + // Cause_ holds the cause of the error as returned + // by the Cause method. + Cause_ error + + // Underlying holds the underlying error, if any. + Underlying_ error + + // Location holds the source code location where the error was + // created. + Location_ Location +} + +// Location implements Locationer. +func (e *Err) Location() Location { + return e.Location_ +} + +// Underlying returns the underlying error if any. +func (e *Err) Underlying() error { + return e.Underlying_ +} + +// Cause implements Causer. +func (e *Err) Cause() error { + return e.Cause_ +} + +// Message returns the top level error message. +func (e *Err) Message() string { + return e.Message_ +} + +// Error implements error.Error. +func (e *Err) Error() string { + switch { + case e.Message_ == "" && e.Underlying == nil: + return "" + case e.Message_ == "": + return e.Underlying_.Error() + case e.Underlying_ == nil: + return e.Message_ + } + return fmt.Sprintf("%s: %v", e.Message_, e.Underlying_) +} + +// GoString returns the details of the receiving error +// message, so that printing an error with %#v will +// produce useful information. +func (e *Err) GoString() string { + return Details(e) +} + +// Causer is the type of an error that may provide +// an error cause for error diagnosis. Cause may return +// nil if there is no cause (for example because the +// cause has been masked). +type Causer interface { + Cause() error +} + +// Wrapper is the type of an error that wraps another error. It is +// exposed so that external types may implement it, but should in +// general not be used otherwise. +type Wrapper interface { + // Message returns the top level error message, + // not including the message from the underlying + // error. + Message() string + + // Underlying returns the underlying error, or nil + // if there is none. + Underlying() error +} + +// Location can be implemented by any error type +// that wants to expose the source location of an error. +type Locationer interface { + Location() Location +} + +// Details returns information about the stack of +// underlying errors wrapped by err, in the format: +// +// [{filename:99: error one} {otherfile:55: cause of error one}] +// +// The details are found by type-asserting the error to +// the Locationer, Causer and Wrapper interfaces. +// Details of the underlying stack are found by +// recursively calling Underlying when the +// underlying error implements Wrapper. +func Details(err error) string { + if err == nil { + return "[]" + } + var s []byte + s = append(s, '[') + for { + s = append(s, '{') + if err, ok := err.(Locationer); ok { + loc := err.Location() + if loc.IsSet() { + s = append(s, loc.String()...) + s = append(s, ": "...) + } + } + if cerr, ok := err.(Wrapper); ok { + s = append(s, cerr.Message()...) + err = cerr.Underlying() + } else { + s = append(s, err.Error()...) + err = nil + } + if debug { + if err, ok := err.(Causer); ok { + if cause := err.Cause(); cause != nil { + s = append(s, fmt.Sprintf("=%T", cause)...) + s = append(s, Details(cause)...) + } + } + } + s = append(s, '}') + if err == nil { + break + } + s = append(s, ' ') + } + s = append(s, ']') + return string(s) +} + +// Locate records the source location of the error by setting +// e.Location, at callDepth stack frames above the call. +func (e *Err) SetLocation(callDepth int) { + _, file, line, _ := runtime.Caller(callDepth + 1) + e.Location_ = Location{file, line} +} + +func setLocation(err error, callDepth int) { + if e, _ := err.(*Err); e != nil { + e.SetLocation(callDepth + 1) + } +} + +// New returns a new error with the given error message and no cause. It +// is a drop-in replacement for errors.New from the standard library. +func New(s string) error { + err := &Err{Message_: s} + err.SetLocation(1) + return err +} + +// Newf returns a new error with the given printf-formatted error +// message and no cause. +func Newf(f string, a ...interface{}) error { + err := &Err{Message_: fmt.Sprintf(f, a...)} + err.SetLocation(1) + return err +} + +// match returns whether any of the given +// functions returns true when called with err as an +// argument. +func match(err error, pass ...func(error) bool) bool { + for _, f := range pass { + if f(err) { + return true + } + } + return false +} + +// Is returns a function that returns whether the +// an error is equal to the given error. +// It is intended to be used as a "pass" argument +// to Mask and friends; for example: +// +// return errors.Mask(err, errors.Is(http.ErrNoCookie)) +// +// would return an error with an http.ErrNoCookie cause +// only if that was err's diagnosis; otherwise the diagnosis +// would be itself. +func Is(err error) func(error) bool { + return func(err1 error) bool { + return err == err1 + } +} + +// Any returns true. It can be used as an argument to Mask +// to allow any diagnosis to pass through to the wrapped +// error. +func Any(error) bool { + return true +} + +// NoteMask returns an Err that has the given underlying error, +// with the given message added as context, and allowing +// the cause of the underlying error to pass through into +// the result if allowed by the specific pass functions +// (see Mask for an explanation of the pass parameter). +func NoteMask(underlying error, msg string, pass ...func(error) bool) error { + newErr := &Err{ + Underlying_: underlying, + Message_: msg, + } + if len(pass) > 0 { + if cause := Cause(underlying); match(cause, pass...) { + newErr.Cause_ = cause + } + } + if debug { + if newd, oldd := newErr.Cause_, Cause(underlying); newd != oldd { + log.Printf("Mask cause %[1]T(%[1]v)->%[2]T(%[2]v)", oldd, newd) + log.Printf("call stack: %s", callers(0, 20)) + log.Printf("len(allow) == %d", len(pass)) + log.Printf("old error %#v", underlying) + log.Printf("new error %#v", newErr) + } + } + return newErr +} + +// Mask returns an Err that wraps the given underyling error. The error +// message is unchanged, but the error location records the caller of +// Mask. +// +// If err is nil, Mask returns nil. +// +// By default Mask conceals the cause of the wrapped error, but if +// pass(Cause(err)) returns true for any of the provided pass functions, +// the cause of the returned error will be Cause(err). +// +// For example, the following code will return an error whose cause is +// the error from the os.Open call when (and only when) the file does +// not exist. +// +// f, err := os.Open("non-existent-file") +// if err != nil { +// return errors.Mask(err, os.IsNotExist) +// } +// +// In order to add context to returned errors, it +// is conventional to call Mask when returning any +// error received from elsewhere. +// +func Mask(underlying error, pass ...func(error) bool) error { + if underlying == nil { + return nil + } + err := NoteMask(underlying, "", pass...) + setLocation(err, 1) + return err +} + +// Notef returns an Error that wraps the given underlying +// error and adds the given formatted context message. +// The returned error has no cause (use NoteMask +// or WithCausef to add a message while retaining a cause). +func Notef(underlying error, f string, a ...interface{}) error { + err := NoteMask(underlying, fmt.Sprintf(f, a...)) + setLocation(err, 1) + return err +} + +// MaskFunc returns an equivalent of Mask that always allows the +// specified causes in addition to any causes specified when the +// returned function is called. +// +// It is defined for convenience, for example when all calls to Mask in +// a given package wish to allow the same set of causes to be returned. +func MaskFunc(allow ...func(error) bool) func(error, ...func(error) bool) error { + return func(err error, allow1 ...func(error) bool) error { + var allowEither []func(error) bool + if len(allow1) > 0 { + // This is more efficient than using a function literal, + // because the compiler knows that it doesn't escape. + allowEither = make([]func(error) bool, len(allow)+len(allow1)) + copy(allowEither, allow) + copy(allowEither[len(allow):], allow1) + } else { + allowEither = allow + } + err = Mask(err, allowEither...) + setLocation(err, 1) + return err + } +} + +// WithCausef returns a new Error that wraps the given +// (possibly nil) underlying error and associates it with +// the given cause. The given formatted message context +// will also be added. +func WithCausef(underlying, cause error, f string, a ...interface{}) error { + err := &Err{ + Underlying_: underlying, + Cause_: cause, + Message_: fmt.Sprintf(f, a...), + } + err.SetLocation(1) + return err +} + +// Cause returns the cause of the given error. If err does not +// implement Causer or its Cause method returns nil, it returns err itself. +// +// Cause is the usual way to diagnose errors that may have +// been wrapped by Mask or NoteMask. +func Cause(err error) error { + var diag error + if err, ok := err.(Causer); ok { + diag = err.Cause() + } + if diag != nil { + return diag + } + return err +} + +// callers returns the stack trace of the goroutine that called it, +// starting n entries above the caller of callers, as a space-separated list +// of filename:line-number pairs with no new lines. +func callers(n, max int) []byte { + var b bytes.Buffer + prev := false + for i := 0; i < max; i++ { + _, file, line, ok := runtime.Caller(n + 1) + if !ok { + return b.Bytes() + } + if prev { + fmt.Fprintf(&b, " ") + } + fmt.Fprintf(&b, "%s:%d", file, line) + n++ + prev = true + } + return b.Bytes() +} diff --git a/vendor/github.com/juju/errgo/errors/errors_test.go b/vendor/github.com/juju/errgo/errors/errors_test.go new file mode 100644 index 000000000..aae790d3b --- /dev/null +++ b/vendor/github.com/juju/errgo/errors/errors_test.go @@ -0,0 +1,277 @@ +// Copyright 2014 Canonical Ltd. +// Licensed under the LGPLv3, see LICENCE file for details. + +package errors_test + +import ( + "fmt" + "github.com/juju/errgo/errors" + "io/ioutil" + "runtime" + "strings" + "testing" +) + +var ( + _ errors.Wrapper = (*errors.Err)(nil) + _ errors.Locationer = (*errors.Err)(nil) + _ errors.Causer = (*errors.Err)(nil) +) + +func TestNew(t *testing.T) { + err := errors.New("foo") //err TestNew + checkErr(t, err, nil, "foo", "[{$TestNew$: foo}]", err) +} + +func TestNewf(t *testing.T) { + err := errors.Newf("foo %d", 5) //err TestNewf + checkErr(t, err, nil, "foo 5", "[{$TestNewf$: foo 5}]", err) +} + +var someErr = errors.New("some error") + +func TestMask(t *testing.T) { + err0 := errors.WithCausef(nil, someErr, "foo") //err TestMask#0 + err := errors.Mask(err0) //err TestMask#1 + checkErr(t, err, err0, "foo", "[{$TestMask#1$: } {$TestMask#0$: foo}]", err) + + err = errors.Mask(nil) + if err != nil { + t.Fatalf("expected nil got %#v", err) + } +} + +func TestNotef(t *testing.T) { + err0 := errors.WithCausef(nil, someErr, "foo") //err TestNotef#0 + err := errors.Notef(err0, "bar") //err TestNotef#1 + checkErr(t, err, err0, "bar: foo", "[{$TestNotef#1$: bar} {$TestNotef#0$: foo}]", err) + + err = errors.Notef(nil, "bar") //err TestNotef#2 + checkErr(t, err, nil, "bar", "[{$TestNotef#2$: bar}]", err) +} + +func TestMaskFunc(t *testing.T) { + err0 := errors.New("zero") + err1 := errors.New("one") + + allowVals := func(vals ...error) (r []func(error) bool) { + for _, val := range vals { + r = append(r, errors.Is(val)) + } + return + } + tests := []struct { + err error + allow0 []func(error) bool + allow1 []func(error) bool + cause error + }{{ + err: err0, + allow0: allowVals(err0), + cause: err0, + }, { + err: err1, + allow0: allowVals(err0), + cause: nil, + }, { + err: err0, + allow1: allowVals(err0), + cause: err0, + }, { + err: err0, + allow0: allowVals(err1), + allow1: allowVals(err0), + cause: err0, + }, { + err: err0, + allow0: allowVals(err0, err1), + cause: err0, + }, { + err: err1, + allow0: allowVals(err0, err1), + cause: err1, + }, { + err: err0, + allow1: allowVals(err0, err1), + cause: err0, + }, { + err: err1, + allow1: allowVals(err0, err1), + cause: err1, + }} + for i, test := range tests { + wrap := errors.MaskFunc(test.allow0...) + err := wrap(test.err, test.allow1...) + cause := errors.Cause(err) + wantCause := test.cause + if wantCause == nil { + wantCause = err + } + if cause != wantCause { + t.Errorf("test %d. got %#v want %#v", i, cause, err) + } + } +} + +type embed struct { + *errors.Err +} + +func TestCause(t *testing.T) { + if cause := errors.Cause(someErr); cause != someErr { + t.Fatalf("expected %q kind; got %#v", someErr, cause) + } + causeErr := errors.New("cause error") + underlyingErr := errors.New("underlying error") //err TestCause#1 + err := errors.WithCausef(underlyingErr, causeErr, "foo %d", 99) //err TestCause#2 + if errors.Cause(err) != causeErr { + t.Fatalf("expected %q; got %#v", causeErr, errors.Cause(err)) + } + checkErr(t, err, underlyingErr, "foo 99: underlying error", "[{$TestCause#2$: foo 99} {$TestCause#1$: underlying error}]", causeErr) + err = &embed{err.(*errors.Err)} + if errors.Cause(err) != causeErr { + t.Fatalf("expected %q; got %#v", causeErr, errors.Cause(err)) + } +} + +func TestDetails(t *testing.T) { + if details := errors.Details(nil); details != "[]" { + t.Fatalf("errors.Details(nil) got %q want %q", details, "[]") + } + + otherErr := fmt.Errorf("other") + checkErr(t, otherErr, nil, "other", "[{other}]", otherErr) + + err0 := &embed{errors.New("foo").(*errors.Err)} //err TestStack#0 + checkErr(t, err0, nil, "foo", "[{$TestStack#0$: foo}]", err0) + + err1 := &embed{errors.Notef(err0, "bar").(*errors.Err)} //err TestStack#1 + checkErr(t, err1, err0, "bar: foo", "[{$TestStack#1$: bar} {$TestStack#0$: foo}]", err1) + + err2 := errors.Mask(err1) //err TestStack#2 + checkErr(t, err2, err1, "bar: foo", "[{$TestStack#2$: } {$TestStack#1$: bar} {$TestStack#0$: foo}]", err2) +} + +func TestMatch(t *testing.T) { + type errTest func(error) bool + allow := func(ss ...string) []func(error) bool { + fns := make([]func(error) bool, len(ss)) + for i, s := range ss { + s := s + fns[i] = func(err error) bool { + return err != nil && err.Error() == s + } + } + return fns + } + tests := []struct { + err error + fns []func(error) bool + ok bool + }{{ + err: errors.New("foo"), + fns: allow("foo"), + ok: true, + }, { + err: errors.New("foo"), + fns: allow("bar"), + ok: false, + }, { + err: errors.New("foo"), + fns: allow("bar", "foo"), + ok: true, + }, { + err: errors.New("foo"), + fns: nil, + ok: false, + }, { + err: nil, + fns: nil, + ok: false, + }} + + for i, test := range tests { + ok := errors.Match(test.err, test.fns...) + if ok != test.ok { + t.Fatalf("test %d: expected %v got %v", i, test.ok, ok) + } + } +} + +func TestLocation(t *testing.T) { + loc := errors.Location{"foo", 35} + if loc.String() != "foo:35" { + t.Fatalf("expected \"foo:35\" got %q", loc.String) + } +} + +func checkErr(t *testing.T, err, underlying error, msg string, details string, cause error) { + if err == nil { + t.Fatalf("err is nil; want %q", msg) + } + if err.Error() != msg { + t.Fatalf("unexpected message: want %q; got %q", msg, err.Error()) + } + if err, ok := err.(errors.Wrapper); ok { + if err.Underlying() != underlying { + t.Fatalf("unexpected underlying error: want %q; got %v", underlying, err.Underlying()) + } + } else if underlying != nil { + t.Fatalf("no underlying error found; want %q", underlying) + } + if errors.Cause(err) != cause { + t.Fatalf("unexpected cause: want %#v; got %#v", cause, errors.Cause(err)) + } + wantDetails := replaceLocations(details) + if gotDetails := errors.Details(err); gotDetails != wantDetails { + t.Fatalf("unexpected details: want %q; got %q", wantDetails, gotDetails) + } +} + +func replaceLocations(s string) string { + t := "" + for { + i := strings.Index(s, "$") + if i == -1 { + break + } + t += s[0:i] + s = s[i+1:] + i = strings.Index(s, "$") + if i == -1 { + panic("no second $") + } + t += location(s[0:i]).String() + s = s[i+1:] + } + t += s + return t +} + +func location(tag string) errors.Location { + line, ok := tagToLine[tag] + if !ok { + panic(fmt.Errorf("tag %q not found", tag)) + } + return errors.Location{ + File: filename, + Line: line, + } +} + +var tagToLine = make(map[string]int) +var filename string + +func init() { + data, err := ioutil.ReadFile("errors_test.go") + if err != nil { + panic(err) + } + lines := strings.Split(string(data), "\n") + for i, line := range lines { + if j := strings.Index(line, "//err "); j >= 0 { + tagToLine[line[j+len("//err "):]] = i + 1 + } + } + _, filename, _, _ = runtime.Caller(0) +} diff --git a/vendor/github.com/juju/errgo/errors/export_test.go b/vendor/github.com/juju/errgo/errors/export_test.go new file mode 100644 index 000000000..3a4b89fb4 --- /dev/null +++ b/vendor/github.com/juju/errgo/errors/export_test.go @@ -0,0 +1,6 @@ +// Copyright 2014 Canonical Ltd. +// Licensed under the LGPLv3, see LICENCE file for details. + +package errors + +var Match = match diff --git a/vendor/github.com/juju/errgo/errors_test.go b/vendor/github.com/juju/errgo/errors_test.go new file mode 100644 index 000000000..47bfc42d2 --- /dev/null +++ b/vendor/github.com/juju/errgo/errors_test.go @@ -0,0 +1,289 @@ +// Copyright 2014 Canonical Ltd. +// Licensed under the LGPLv3, see LICENCE file for details. + +package errgo_test + +import ( + "fmt" + "io/ioutil" + "runtime" + "strings" + "testing" + + gc "gopkg.in/check.v1" + + "github.com/juju/errgo" +) + +var ( + _ errgo.Wrapper = (*errgo.Err)(nil) + _ errgo.Locationer = (*errgo.Err)(nil) + _ errgo.Causer = (*errgo.Err)(nil) +) + +func Test(t *testing.T) { + gc.TestingT(t) +} + +type errorsSuite struct{} + +var _ = gc.Suite(&errorsSuite{}) + +func (*errorsSuite) TestNew(c *gc.C) { + err := errgo.New("foo") //err TestNew + checkErr(c, err, nil, "foo", "[{$TestNew$: foo}]", err) +} + +func (*errorsSuite) TestNewf(c *gc.C) { + err := errgo.Newf("foo %d", 5) //err TestNewf + checkErr(c, err, nil, "foo 5", "[{$TestNewf$: foo 5}]", err) +} + +var someErr = errgo.New("some error") //err varSomeErr + +func annotate1() error { + err := errgo.Notef(someErr, "annotate1") //err annotate1 + return err +} + +func annotate2() error { + err := annotate1() + err = errgo.Notef(err, "annotate2") //err annotate2 + return err +} + +func (*errorsSuite) TestNoteUsage(c *gc.C) { + err0 := annotate2() + err, ok := err0.(errgo.Wrapper) + c.Assert(ok, gc.Equals, true) + underlying := err.Underlying() + checkErr( + c, err0, underlying, + "annotate2: annotate1: some error", + "[{$annotate2$: annotate2} {$annotate1$: annotate1} {$varSomeErr$: some error}]", + err0) +} + +func (*errorsSuite) TestMask(c *gc.C) { + err0 := errgo.WithCausef(nil, someErr, "foo") //err TestMask#0 + err := errgo.Mask(err0) //err TestMask#1 + checkErr(c, err, err0, "foo", "[{$TestMask#1$: } {$TestMask#0$: foo}]", err) + + err = errgo.Mask(nil) + c.Assert(err, gc.IsNil) +} + +func (*errorsSuite) TestNotef(c *gc.C) { + err0 := errgo.WithCausef(nil, someErr, "foo") //err TestNotef#0 + err := errgo.Notef(err0, "bar") //err TestNotef#1 + checkErr(c, err, err0, "bar: foo", "[{$TestNotef#1$: bar} {$TestNotef#0$: foo}]", err) + + err = errgo.Notef(nil, "bar") //err TestNotef#2 + checkErr(c, err, nil, "bar", "[{$TestNotef#2$: bar}]", err) +} + +func (*errorsSuite) TestMaskFunc(c *gc.C) { + err0 := errgo.New("zero") + err1 := errgo.New("one") + + allowVals := func(vals ...error) (r []func(error) bool) { + for _, val := range vals { + r = append(r, errgo.Is(val)) + } + return + } + tests := []struct { + err error + allow0 []func(error) bool + allow1 []func(error) bool + cause error + }{{ + err: err0, + allow0: allowVals(err0), + cause: err0, + }, { + err: err1, + allow0: allowVals(err0), + cause: nil, + }, { + err: err0, + allow1: allowVals(err0), + cause: err0, + }, { + err: err0, + allow0: allowVals(err1), + allow1: allowVals(err0), + cause: err0, + }, { + err: err0, + allow0: allowVals(err0, err1), + cause: err0, + }, { + err: err1, + allow0: allowVals(err0, err1), + cause: err1, + }, { + err: err0, + allow1: allowVals(err0, err1), + cause: err0, + }, { + err: err1, + allow1: allowVals(err0, err1), + cause: err1, + }} + for i, test := range tests { + c.Logf("test %d", i) + wrap := errgo.MaskFunc(test.allow0...) + err := wrap(test.err, test.allow1...) + cause := errgo.Cause(err) + wantCause := test.cause + if wantCause == nil { + wantCause = err + } + c.Check(cause, gc.Equals, wantCause) + } +} + +type embed struct { + *errgo.Err +} + +func (*errorsSuite) TestCause(c *gc.C) { + c.Assert(errgo.Cause(someErr), gc.Equals, someErr) + + causeErr := errgo.New("cause error") + underlyingErr := errgo.New("underlying error") //err TestCause#1 + err := errgo.WithCausef(underlyingErr, causeErr, "foo %d", 99) //err TestCause#2 + c.Assert(errgo.Cause(err), gc.Equals, causeErr) + + checkErr(c, err, underlyingErr, "foo 99: underlying error", "[{$TestCause#2$: foo 99} {$TestCause#1$: underlying error}]", causeErr) + + err = &embed{err.(*errgo.Err)} + c.Assert(errgo.Cause(err), gc.Equals, causeErr) +} + +func (*errorsSuite) TestDetails(c *gc.C) { + c.Assert(errgo.Details(nil), gc.Equals, "[]") + + otherErr := fmt.Errorf("other") + checkErr(c, otherErr, nil, "other", "[{other}]", otherErr) + + err0 := &embed{errgo.New("foo").(*errgo.Err)} //err TestStack#0 + checkErr(c, err0, nil, "foo", "[{$TestStack#0$: foo}]", err0) + + err1 := &embed{errgo.Notef(err0, "bar").(*errgo.Err)} //err TestStack#1 + checkErr(c, err1, err0, "bar: foo", "[{$TestStack#1$: bar} {$TestStack#0$: foo}]", err1) + + err2 := errgo.Mask(err1) //err TestStack#2 + checkErr(c, err2, err1, "bar: foo", "[{$TestStack#2$: } {$TestStack#1$: bar} {$TestStack#0$: foo}]", err2) +} + +func (*errorsSuite) TestMatch(c *gc.C) { + type errTest func(error) bool + allow := func(ss ...string) []func(error) bool { + fns := make([]func(error) bool, len(ss)) + for i, s := range ss { + s := s + fns[i] = func(err error) bool { + return err != nil && err.Error() == s + } + } + return fns + } + tests := []struct { + err error + fns []func(error) bool + ok bool + }{{ + err: errgo.New("foo"), + fns: allow("foo"), + ok: true, + }, { + err: errgo.New("foo"), + fns: allow("bar"), + ok: false, + }, { + err: errgo.New("foo"), + fns: allow("bar", "foo"), + ok: true, + }, { + err: errgo.New("foo"), + fns: nil, + ok: false, + }, { + err: nil, + fns: nil, + ok: false, + }} + + for i, test := range tests { + c.Logf("test %d", i) + c.Assert(errgo.Match(test.err, test.fns...), gc.Equals, test.ok) + } +} + +func (*errorsSuite) TestLocation(c *gc.C) { + loc := errgo.Location{File: "foo", Line: 35} + c.Assert(loc.String(), gc.Equals, "foo:35") +} + +func checkErr(c *gc.C, err, underlying error, msg string, details string, cause error) { + c.Assert(err, gc.NotNil) + c.Assert(err.Error(), gc.Equals, msg) + if err, ok := err.(errgo.Wrapper); ok { + c.Assert(err.Underlying(), gc.Equals, underlying) + } else { + c.Assert(underlying, gc.IsNil) + } + c.Assert(errgo.Cause(err), gc.Equals, cause) + wantDetails := replaceLocations(details) + c.Assert(errgo.Details(err), gc.Equals, wantDetails) +} + +func replaceLocations(s string) string { + t := "" + for { + i := strings.Index(s, "$") + if i == -1 { + break + } + t += s[0:i] + s = s[i+1:] + i = strings.Index(s, "$") + if i == -1 { + panic("no second $") + } + t += location(s[0:i]).String() + s = s[i+1:] + } + t += s + return t +} + +func location(tag string) errgo.Location { + line, ok := tagToLine[tag] + if !ok { + panic(fmt.Errorf("tag %q not found", tag)) + } + return errgo.Location{ + File: filename, + Line: line, + } +} + +var tagToLine = make(map[string]int) +var filename string + +func init() { + data, err := ioutil.ReadFile("errors_test.go") + if err != nil { + panic(err) + } + lines := strings.Split(string(data), "\n") + for i, line := range lines { + if j := strings.Index(line, "//err "); j >= 0 { + tagToLine[line[j+len("//err "):]] = i + 1 + } + } + _, filename, _, _ = runtime.Caller(0) +} diff --git a/vendor/github.com/juju/errgo/export_test.go b/vendor/github.com/juju/errgo/export_test.go new file mode 100644 index 000000000..a3a15e77f --- /dev/null +++ b/vendor/github.com/juju/errgo/export_test.go @@ -0,0 +1,6 @@ +// Copyright 2014 Canonical Ltd. +// Licensed under the LGPLv3, see LICENCE file for details. + +package errgo + +var Match = match diff --git a/vendor/github.com/moby/moby/.dockerignore b/vendor/github.com/moby/moby/.dockerignore new file mode 100644 index 000000000..4a56f2e00 --- /dev/null +++ b/vendor/github.com/moby/moby/.dockerignore @@ -0,0 +1,7 @@ +bundles +.gopath +vendor/pkg +.go-pkg-cache +.git +hack/integration-cli-on-swarm/integration-cli-on-swarm + diff --git a/vendor/github.com/moby/moby/.github/ISSUE_TEMPLATE.md b/vendor/github.com/moby/moby/.github/ISSUE_TEMPLATE.md new file mode 100644 index 000000000..7362480a4 --- /dev/null +++ b/vendor/github.com/moby/moby/.github/ISSUE_TEMPLATE.md @@ -0,0 +1,64 @@ + + +**Description** + + + +**Steps to reproduce the issue:** +1. +2. +3. + +**Describe the results you received:** + + +**Describe the results you expected:** + + +**Additional information you deem important (e.g. issue happens only occasionally):** + +**Output of `docker version`:** + +``` +(paste your output here) +``` + +**Output of `docker info`:** + +``` +(paste your output here) +``` + +**Additional environment details (AWS, VirtualBox, physical, etc.):** diff --git a/vendor/github.com/moby/moby/.github/PULL_REQUEST_TEMPLATE.md b/vendor/github.com/moby/moby/.github/PULL_REQUEST_TEMPLATE.md new file mode 100644 index 000000000..426981828 --- /dev/null +++ b/vendor/github.com/moby/moby/.github/PULL_REQUEST_TEMPLATE.md @@ -0,0 +1,30 @@ + + +**- What I did** + +**- How I did it** + +**- How to verify it** + +**- Description for the changelog** + + + +**- A picture of a cute animal (not mandatory but encouraged)** + diff --git a/vendor/github.com/moby/moby/.gitignore b/vendor/github.com/moby/moby/.gitignore new file mode 100644 index 000000000..943e7f3f6 --- /dev/null +++ b/vendor/github.com/moby/moby/.gitignore @@ -0,0 +1,22 @@ +# Docker project generated files to ignore +# if you want to ignore files created by your editor/tools, +# please consider a global .gitignore https://help.github.com/articles/ignoring-files +*.exe +*.exe~ +*.orig +*.test +.*.swp +.DS_Store +# a .bashrc may be added to customize the build environment +.bashrc +.editorconfig +.gopath/ +.go-pkg-cache/ +autogen/ +bundles/ +cmd/dockerd/dockerd +contrib/builder/rpm/*/changelog +dockerversion/version_autogen.go +dockerversion/version_autogen_unix.go +vendor/pkg/ +hack/integration-cli-on-swarm/integration-cli-on-swarm diff --git a/vendor/github.com/moby/moby/.mailmap b/vendor/github.com/moby/moby/.mailmap new file mode 100644 index 000000000..c48f29041 --- /dev/null +++ b/vendor/github.com/moby/moby/.mailmap @@ -0,0 +1,386 @@ +# Generate AUTHORS: hack/generate-authors.sh + +# Tip for finding duplicates (besides scanning the output of AUTHORS for name +# duplicates that aren't also email duplicates): scan the output of: +# git log --format='%aE - %aN' | sort -uf +# +# For explanation on this file format: man git-shortlog + +Patrick Stapleton +Shishir Mahajan +Erwin van der Koogh +Ahmed Kamal +Tejesh Mehta +Cristian Staretu +Cristian Staretu +Cristian Staretu +Marcus Linke +Aleksandrs Fadins +Christopher Latham +Hu Keping +Wayne Chang +Chen Chao +Daehyeok Mun + + + + + + +Guillaume J. Charmes + + + + + +Thatcher Peskens +Thatcher Peskens +Thatcher Peskens dhrp +Jérôme Petazzoni +Jérôme Petazzoni +Joffrey F +Joffrey F +Joffrey F +Tim Terhorst +Andy Smith + + + + + + + + + +Walter Stanish + +Roberto Hashioka +Konstantin Pelykh +David Sissitka +Nolan Darilek + +Benoit Chesneau +Jordan Arentsen +Daniel Garcia +Miguel Angel Fernández +Bhiraj Butala +Faiz Khan +Victor Lyuboslavsky +Jean-Baptiste Barth +Matthew Mueller + +Shih-Yuan Lee +Daniel Mizyrycki root +Jean-Baptiste Dalido + + + + + + + + + + + + + + +Sven Dowideit +Sven Dowideit +Sven Dowideit +Sven Dowideit <¨SvenDowideit@home.org.au¨> +Sven Dowideit +Sven Dowideit +Sven Dowideit +Akihiro Matsushima + +Alexander Morozov +Alexander Morozov + +O.S. Tezer + +Roberto G. Hashioka + + + + + +Sridhar Ratnakumar +Sridhar Ratnakumar +Liang-Chi Hsieh +Aaron L. Xu +Aleksa Sarai +Aleksa Sarai +Aleksa Sarai +Will Weaver +Timothy Hobbs +Nathan LeClaire +Nathan LeClaire + + + + +Matthew Heon + + + + +Francisco Carriedo + + + + +Brian Goff + +Erica Windisch +Erica Windisch + +Hollie Teal + + + +Jessica Frazelle +Jessica Frazelle +Jessica Frazelle +Jessica Frazelle +Jessica Frazelle +Jessica Frazelle +Jessica Frazelle +Jessica Frazelle + + + +Sebastiaan van Stijn +Sebastiaan van Stijn +Thomas LEVEIL Thomas LÉVEIL + + +Antonio Murdaca +Antonio Murdaca +Antonio Murdaca +Antonio Murdaca +Antonio Murdaca +Darren Shepherd +Deshi Xiao +Deshi Xiao +Doug Davis +Giampaolo Mancini +K. Heller +Jacob Atzen +Jeff Nickoloff +Jérôme Petazzoni +John Harris +John Howard (VM) +John Howard (VM) +John Howard (VM) +John Howard (VM) +John Howard (VM) +Kevin Feyrer +Liao Qingwei +Luke Marsden +Madhan Raj Mookkandy +Madhu Venugopal +Mageee <21521230.zju.edu.cn> +Mansi Nahar +Mansi Nahar +Mary Anthony +Mary Anthony moxiegirl +Mary Anthony +mattyw +Michael Spetsiotis +Nik Nyby +Peter Jaffe +resouer +AJ Bowen soulshake +AJ Bowen soulshake +Tibor Vass +Tibor Vass +Vincent Bernat +Yestin Sun +bin liu +John Howard (VM) jhowardmsft +Ankush Agarwal +Tangi COLIN tangicolin +Allen Sun +Adrien Gallouët + +Anuj Bahuguna +Anusha Ragunathan +Avi Miller +Brent Salisbury +Chander G +Chun Chen +Ying Li +Daehyeok Mun + +Daniel, Dao Quang Minh +Daniel Nephin +Dave Tucker +Doug Tangren +Frederick F. Kautz IV +Ben Golub +Harold Cooper +hsinko <21551195@zju.edu.cn> +Josh Hawn +Justin Cormack + + +Kamil Domański +Lei Jitang + +Linus Heckemann + +Lynda O'Leary + +Marianna Tessel +Michael Huettermann +Moysés Borges + +Nigel Poulton +Qiang Huang + +Boaz Shuster +Shuwei Hao + +Soshi Katsuta + +Stefan Berger + +Stephen Day + +Toli Kuznets +Tristan Carel + + + +Vincent Demeester +Vishnu Kannan +xlgao-zju xlgao +Yu Changchun y00277921 +Yu Changchun + + + + +Hao Shu Wei + + + + + + + +Shengbo Song mYmNeo +Shengbo Song + +Sylvain Bellemare + + + +Arnaud Porterie + +David M. Karr + + + +Kenfe-Mickaël Laventure + + + + + +Runshen Zhu +Tom Barlow +Xianlu Bird +Dan Feldman +Harry Zhang +Alex Chen alexchen +Alex Ellis +Alicia Lauerman +Ben Bonnefoy +Bhumika Bayani +Bingshen Wang +Chen Chuanliang +Chen Mingjie +Chen Qiu +Chen Qiu <21321229@zju.edu.cn> +Chris Dias +Chris McKinnel +CUI Wei cuiwei13 +Daniel Grunwell +Daniel J Walsh +Dattatraya Kumbhar +David Sheets +Diego Siqueira +Elan Ruusamäe +Elan Ruusamäe +Eric G. Noriega +Evelyn Xu +Felix Ruess +Gabriel Nicolas Avellaneda +Gang Qiao <1373319223@qq.com> +George Kontridze +Gopikannan Venugopalsamy +Gou Rao +Gustav Sinder +Harshal Patil +Helen Xie +Hyzhou Zhy <1187766782@qq.com> +Hyzhou Zhy +Jacob Tomlinson +Jiuyue Ma +John Stephens +Jose Diaz-Gonzalez +Josh Eveleth +Josh Soref +Josh Wilson +Jim Galasyn +Kevin Kern +Konstantin Gribov +Kunal Kushwaha +Lajos Papp +Lyn +Markan Patel +Michael Käufl +Michal Minář +Michael Hudson-Doyle +Mike Casas +Milind Chawre +Ma Müller +Moorthy RS +Neil Horman +Pavel Tikhomirov +Peter Choi +Peter Dave Hello +Philipp Gillé +Robert Terhaar +Roberto Muñoz Fernández +Roman Dudin +Sandeep Bansal +Sean Lee +Shukui Yang +Srinivasan Srivatsan +Stefan S. +Steve Desmond +Sun Gengze <690388648@qq.com> +Tim Bart +Tim Zju <21651152@zju.edu.cn> +Tõnis Tiigi +Wayne Song +Wang Jie +Wang Ping +Wang Yuexiao +Wewang Xiaorenfine +Wei Wu cizixs +Xiaoyu Zhang +Yamasaki Masahide +Yassine Tijani +Ying Li +Yong Tang +Yu Chengxia +Yu Peng +Yu Peng +Yao Zaiyong +Zhenkun Bi +Zhu Kunjia diff --git a/vendor/github.com/moby/moby/AUTHORS b/vendor/github.com/moby/moby/AUTHORS new file mode 100644 index 000000000..e091ed7dc --- /dev/null +++ b/vendor/github.com/moby/moby/AUTHORS @@ -0,0 +1,1885 @@ +# This file lists all individuals having contributed content to the repository. +# For how it is generated, see `hack/generate-authors.sh`. + +Aanand Prasad +Aaron Davidson +Aaron Feng +Aaron Huslage +Aaron L. Xu +Aaron Lehmann +Aaron Welch +Aaron.L.Xu +Abel Muiño +Abhijeet Kasurde +Abhinandan Prativadi +Abhinav Ajgaonkar +Abhishek Chanda +Abhishek Sharma +Abin Shahab +Adam Avilla +Adam Eijdenberg +Adam Kunk +Adam Miller +Adam Mills +Adam Singer +Adam Walz +Addam Hardy +Aditi Rajagopal +Aditya +Adolfo Ochagavía +Adria Casas +Adrian Moisey +Adrian Mouat +Adrian Oprea +Adrien Folie +Adrien Gallouët +Ahmed Kamal +Ahmet Alp Balkan +Aidan Feldman +Aidan Hobson Sayers +AJ Bowen +Ajey Charantimath +ajneu +Akihiro Matsushima +Akihiro Suda +Akira Koyasu +Akshay Karle +Al Tobey +alambike +Alan Scherger +Alan Thompson +Albert Callarisa +Albert Zhang +Aleksa Sarai +Aleksandrs Fadins +Alena Prokharchyk +Alessandro Boch +Alessio Biancalana +Alex Chan +Alex Chen +Alex Coventry +Alex Crawford +Alex Ellis +Alex Gaynor +Alex Olshansky +Alex Samorukov +Alex Warhawk +Alexander Artemenko +Alexander Boyd +Alexander Larsson +Alexander Midlash +Alexander Morozov +Alexander Shopov +Alexandre Beslic +Alexandre Garnier +Alexandre González +Alexandru Sfirlogea +Alexey Guskov +Alexey Kotlyarov +Alexey Shamrin +Alexis THOMAS +Alfred Landrum +Ali Dehghani +Alicia Lauerman +Alihan Demir +Allen Madsen +Allen Sun +almoehi +Alvaro Saurin +Alvin Deng +Alvin Richards +amangoel +Amen Belayneh +Amir Goldstein +Amit Bakshi +Amit Krishnan +Amit Shukla +Amy Lindburg +Anand Patil +AnandkumarPatel +Anatoly Borodin +Anchal Agrawal +Anders Janmyr +Andre Dublin <81dublin@gmail.com> +Andre Granovsky +Andrea Luzzardi +Andrea Turli +Andreas Köhler +Andreas Savvides +Andreas Tiefenthaler +Andrei Gherzan +Andrew C. Bodine +Andrew Clay Shafer +Andrew Duckworth +Andrew France +Andrew Gerrand +Andrew Guenther +Andrew Hsu +Andrew Kuklewicz +Andrew Macgregor +Andrew Macpherson +Andrew Martin +Andrew McDonnell +Andrew Munsell +Andrew Po +Andrew Weiss +Andrew Williams +Andrews Medina +Andrey Petrov +Andrey Stolbovsky +André Martins +andy +Andy Chambers +andy diller +Andy Goldstein +Andy Kipp +Andy Rothfusz +Andy Smith +Andy Wilson +Anes Hasicic +Anil Belur +Anil Madhavapeddy +Ankush Agarwal +Anonmily +Anran Qiao +Anthon van der Neut +Anthony Baire +Anthony Bishopric +Anthony Dahanne +Anthony Sottile +Anton Löfgren +Anton Nikitin +Anton Polonskiy +Anton Tiurin +Antonio Murdaca +Antonis Kalipetis +Antony Messerli +Anuj Bahuguna +Anusha Ragunathan +apocas +Arash Deshmeh +ArikaChen +Arnaud Lefebvre +Arnaud Porterie +Arthur Barr +Arthur Gautier +Artur Meyster +Arun Gupta +Asbjørn Enge +averagehuman +Avi Das +Avi Miller +Avi Vaid +ayoshitake +Azat Khuyiyakhmetov +Bardia Keyoumarsi +Barnaby Gray +Barry Allard +Bartłomiej Piotrowski +Bastiaan Bakker +bdevloed +Ben Bonnefoy +Ben Firshman +Ben Golub +Ben Hall +Ben Sargent +Ben Severson +Ben Toews +Ben Wiklund +Benjamin Atkin +Benjamin Boudreau +Benoit Chesneau +Bernerd Schaefer +Bert Goethals +Bharath Thiruveedula +Bhiraj Butala +Bhumika Bayani +Bilal Amarni +Bill W +bin liu +Bingshen Wang +Blake Geno +Boaz Shuster +bobby abbott +Boris Pruessmann +Boshi Lian +boucher +Bouke Haarsma +Boyd Hemphill +boynux +Bradley Cicenas +Bradley Wright +Brandon Liu +Brandon Philips +Brandon Rhodes +Brendan Dixon +Brent Salisbury +Brett Higgins +Brett Kochendorfer +Brian (bex) Exelbierd +Brian Bland +Brian DeHamer +Brian Dorsey +Brian Flad +Brian Goff +Brian McCallister +Brian Olsen +Brian Schwind +Brian Shumate +Brian Torres-Gil +Brian Trump +Brice Jaglin +Briehan Lombaard +Bruno Bigras +Bruno Binet +Bruno Gazzera +Bruno Renié +Bruno Tavares +Bryan Bess +Bryan Boreham +Bryan Matsuo +Bryan Murphy +buddhamagnet +Burke Libbey +Byung Kang +Caleb Spare +Calen Pennington +Cameron Boehmer +Cameron Spear +Campbell Allen +Candid Dauth +Cao Weiwei +Carl Henrik Lunde +Carl Loa Odin +Carl X. Su +Carlos Alexandro Becker +Carlos Sanchez +Carol Fager-Higgins +Cary +Casey Bisson +Ce Gao +Cedric Davies +Cezar Sa Espinola +Chad Swenson +Chance Zibolski +Chander G +Charles Chan +Charles Hooper +Charles Law +Charles Lindsay +Charles Merriam +Charles Sarrazin +Charles Smith +Charlie Drage +Charlie Lewis +Chase Bolt +ChaYoung You +Chen Chao +Chen Chuanliang +Chen Hanxiao +Chen Min +Chen Mingjie +Chen Qiu +Chewey +Chia-liang Kao +chli +Cholerae Hu +Chris Alfonso +Chris Armstrong +Chris Dias +Chris Dituri +Chris Fordham +Chris Gavin +Chris Gibson +Chris Khoo +Chris McKinnel +Chris McKinnel +Chris Seto +Chris Snow +Chris St. Pierre +Chris Stivers +Chris Swan +Chris Wahl +Chris Weyl +Christian Berendt +Christian Böhme +Christian Persson +Christian Rotzoll +Christian Simon +Christian Stefanescu +ChristoperBiscardi +Christophe Mehay +Christophe Troestler +Christopher Currie +Christopher Jones +Christopher Latham +Christopher Rigor +Christy Perez +Chun Chen +Ciro S. Costa +Clayton Coleman +Clinton Kitson +Coenraad Loubser +Colin Dunklau +Colin Hebert +Colin Rice +Colin Walters +Collin Guarino +Colm Hally +companycy +Corey Farrell +Cory Forsyth +cressie176 +CrimsonGlory +Cristian Staretu +cristiano balducci +Cruceru Calin-Cristian +CUI Wei +Cyprian Gracz +Cyril F +Daan van Berkel +Daehyeok Mun +Dafydd Crosby +dalanlan +Damian Smyth +Damien Nadé +Damien Nozay +Damjan Georgievski +Dan Anolik +Dan Buch +Dan Cotora +Dan Feldman +Dan Griffin +Dan Hirsch +Dan Keder +Dan Levy +Dan McPherson +Dan Stine +Dan Williams +Daniel Antlinger +Daniel Exner +Daniel Farrell +Daniel Garcia +Daniel Gasienica +Daniel Grunwell +Daniel Hiltgen +Daniel J Walsh +Daniel Menet +Daniel Mizyrycki +Daniel Nephin +Daniel Norberg +Daniel Nordberg +Daniel Robinson +Daniel S +Daniel Von Fange +Daniel X Moore +Daniel YC Lin +Daniel Zhang +Daniel, Dao Quang Minh +Danny Berger +Danny Yates +Darren Coxall +Darren Shepherd +Darren Stahl +Dattatraya Kumbhar +Davanum Srinivas +Dave Barboza +Dave Henderson +Dave MacDonald +Dave Tucker +David Anderson +David Calavera +David Corking +David Cramer +David Currie +David Davis +David Dooling +David Gageot +David Gebler +David Glasser +David Lawrence +David Lechner +David M. Karr +David Mackey +David Mat +David Mcanulty +David McKay +David Pelaez +David R. Jenni +David Röthlisberger +David Sheets +David Sissitka +David Trott +David Williamson +David Xia +David Young +Davide Ceretti +Dawn Chen +dbdd +dcylabs +Deborah Gertrude Digges +deed02392 +Deng Guangxing +Deni Bertovic +Denis Defreyne +Denis Gladkikh +Denis Ollier +Dennis Chen +Dennis Docter +Derek +Derek +Derek Ch +Derek McGowan +Deric Crago +Deshi Xiao +devmeyster +Devvyn Murphy +Dharmit Shah +Dhawal Yogesh Bhanushali +Diego Romero +Diego Siqueira +Dieter Reuter +Dillon Dixon +Dima Stopel +Dimitri John Ledkov +Dimitris Rozakis +Dimitry Andric +Dinesh Subhraveti +Ding Fei +Diogo Monica +DiuDiugirl +Djibril Koné +dkumor +Dmitri Logvinenko +Dmitri Shuralyov +Dmitry Demeshchuk +Dmitry Gusev +Dmitry Kononenko +Dmitry Shyshkin +Dmitry Smirnov +Dmitry V. Krivenok +Dmitry Vorobev +Dolph Mathews +Dominik Dingel +Dominik Finkbeiner +Dominik Honnef +Don Kirkby +Don Kjer +Don Spaulding +Donald Huang +Dong Chen +Donovan Jones +Doron Podoleanu +Doug Davis +Doug MacEachern +Doug Tangren +Dr Nic Williams +dragon788 +Dražen Lučanin +Drew Erny +Dustin Sallings +Ed Costello +Edmund Wagner +Eiichi Tsukata +Eike Herzbach +Eivin Giske Skaaren +Eivind Uggedal +Elan Ruusamäe +Elena Morozova +Elias Faxö +Elias Probst +Elijah Zupancic +eluck +Elvir Kuric +Emil Hernvall +Emily Maier +Emily Rose +Emir Ozer +Enguerran +Eohyung Lee +epeterso +Eric Barch +Eric Curtin +Eric G. Noriega +Eric Hanchrow +Eric Lee +Eric Myhre +Eric Paris +Eric Rafaloff +Eric Rosenberg +Eric Sage +Eric Yang +Eric-Olivier Lamey +Erica Windisch +Erik Bray +Erik Dubbelboer +Erik Hollensbe +Erik Inge Bolsø +Erik Kristensen +Erik St. Martin +Erik Weathers +Erno Hopearuoho +Erwin van der Koogh +Euan +Eugene Yakubovich +eugenkrizo +evalle +Evan Allrich +Evan Carmi +Evan Hazlett +Evan Hazlett +Evan Krall +Evan Phoenix +Evan Wies +Evelyn Xu +Everett Toews +Evgeny Vereshchagin +Ewa Czechowska +Eystein Måløy Stenberg +ezbercih +Ezra Silvera +Fabian Lauer +Fabiano Rosas +Fabio Falci +Fabio Kung +Fabio Rapposelli +Fabio Rehm +Fabrizio Regini +Fabrizio Soppelsa +Faiz Khan +falmp +Fangyuan Gao <21551127@zju.edu.cn> +Fareed Dudhia +Fathi Boudra +Federico Gimenez +Felipe Oliveira +Felix Abecassis +Felix Geisendörfer +Felix Hupfeld +Felix Rabe +Felix Ruess +Felix Schindler +Fengtu Wang +Ferenc Szabo +Fernando +Fero Volar +Ferran Rodenas +Filipe Brandenburger +Filipe Oliveira +fl0yd +Flavio Castelli +Flavio Crisciani +FLGMwt +Florian +Florian Klein +Florian Maier +Florian Weingarten +Florin Asavoaie +fonglh +fortinux +Foysal Iqbal +Francesc Campoy +Francis Chuang +Francisco Carriedo +Francisco Souza +Frank Groeneveld +Frank Herrmann +Frank Macreery +Frank Rosquin +Fred Lifton +Frederick F. Kautz IV +Frederik Loeffert +Frederik Nordahl Jul Sabroe +Freek Kalter +frosforever +fy2462 +Félix Baylac-Jacqué +Félix Cantournet +Gabe Rosenhouse +Gabor Nagy +Gabriel Linder +Gabriel Monroy +Gabriel Nicolas Avellaneda +Gaetan de Villele +Galen Sampson +Gang Qiao +Gareth Rushgrove +Garrett Barboza +Gary Schaetz +Gaurav +gautam, prasanna +Gaël PORTAY +Genki Takiuchi +GennadySpb +Geoffrey Bachelet +George Kontridze +George MacRorie +George Xie +Georgi Hristozov +Gereon Frey +German DZ +Gert van Valkenhoef +Gerwim +Giampaolo Mancini +Gianluca Borello +Gildas Cuisinier +gissehel +Giuseppe Mazzotta +Gleb Fotengauer-Malinovskiy +Gleb M Borisov +Glyn Normington +GoBella +Goffert van Gool +Gopikannan Venugopalsamy +Gosuke Miyashita +Gou Rao +Govinda Fichtner +Grant Reaber +Graydon Hoare +Greg Fausak +Greg Pflaum +Greg Thornton +Grzegorz Jaśkiewicz +Guilhem Lettron +Guilherme Salgado +Guillaume Dufour +Guillaume J. Charmes +guoxiuyan +Gurjeet Singh +Guruprasad +Gustav Sinder +gwx296173 +Günter Zöchbauer +Hans Kristian Flaatten +Hans Rødtang +Hao Shu Wei +Hao Zhang <21521210@zju.edu.cn> +Harald Albers +Harley Laue +Harold Cooper +Harry Zhang +Harshal Patil +Harshal Patil +He Simei +He Xin +heartlock <21521209@zju.edu.cn> +Hector Castro +Helen Xie +Henning Sprang +Hobofan +Hollie Teal +Hong Xu +Hongbin Lu +hsinko <21551195@zju.edu.cn> +Hu Keping +Hu Tao +Huanzhong Zhang +Huayi Zhang +Hugo Duncan +Hugo Marisco <0x6875676f@gmail.com> +Hunter Blanks +huqun +Huu Nguyen +hyeongkyu.lee +hyp3rdino +Hyzhou Zhy +Ian Babrou +Ian Bishop +Ian Bull +Ian Calvert +Ian Campbell +Ian Lee +Ian Main +Ian Philpot +Ian Truslove +Iavael +Icaro Seara +Ignacio Capurro +Igor Dolzhikov +Iliana Weller +Ilkka Laukkanen +Ilya Dmitrichenko +Ilya Gusev +ILYA Khlopotov +imre Fitos +inglesp +Ingo Gottwald +Isaac Dupree +Isabel Jimenez +Isao Jonas +Ivan Babrou +Ivan Fraixedes +Ivan Grcic +Ivan Markin +J Bruni +J. Nunn +Jack Danger Canty +Jacob Atzen +Jacob Edelman +Jacob Tomlinson +Jacob Wen +Jake Champlin +Jake Moshenko +Jake Sanders +jakedt +James Allen +James Carey +James Carr +James DeFelice +James Harrison Fisher +James Kyburz +James Kyle +James Lal +James Mills +James Nesbitt +James Nugent +James Turnbull +Jamie Hannaford +Jamshid Afshar +Jan Keromnes +Jan Koprowski +Jan Pazdziora +Jan Toebes +Jan-Gerd Tenberge +Jan-Jaap Driessen +Jana Radhakrishnan +Jannick Fahlbusch +Janonymous +Januar Wayong +Jared Biel +Jared Hocutt +Jaroslaw Zabiello +jaseg +Jasmine Hegman +Jason Divock +Jason Giedymin +Jason Green +Jason Hall +Jason Heiss +Jason Livesay +Jason McVetta +Jason Plum +Jason Shepherd +Jason Smith +Jason Sommer +Jason Stangroome +jaxgeller +Jay +Jay +Jay Kamat +Jean-Baptiste Barth +Jean-Baptiste Dalido +Jean-Christophe Berthon +Jean-Paul Calderone +Jean-Pierre Huynh +Jean-Tiare Le Bigot +Jeff Anderson +Jeff Johnston +Jeff Lindsay +Jeff Mickey +Jeff Minard +Jeff Nickoloff +Jeff Silberman +Jeff Welch +Jeffrey Bolle +Jeffrey Morgan +Jeffrey van Gogh +Jenny Gebske +Jeremy Chambers +Jeremy Grosser +Jeremy Price +Jeremy Qian +Jeremy Unruh +Jeremy Yallop +Jeroen Jacobs +Jesse Dearing +Jesse Dubay +Jessica Frazelle +Jezeniel Zapanta +jgeiger +Jhon Honce +Ji.Zhilong +Jian Zhang +jianbosun +Jie Luo +Jilles Oldenbeuving +Jim Alateras +Jim Galasyn +Jim Minter +Jim Perrin +Jimmy Cuadra +Jimmy Puckett +Jimmy Song +jimmyxian +Jinsoo Park +Jiri Popelka +Jiuyue Ma +Jiří Župka +jjy +jmzwcn +Joao Fernandes +Joe Beda +Joe Doliner +Joe Ferguson +Joe Gordon +Joe Shaw +Joe Van Dyk +Joel Friedly +Joel Handwell +Joel Hansson +Joel Wurtz +Joey Geiger +Joey Gibson +Joffrey F +Johan Euphrosine +Johan Rydberg +Johanan Lieberman +Johannes 'fish' Ziemke +John Costa +John Feminella +John Gardiner Myers +John Gossman +John Harris +John Howard (VM) +John Laswell +John Maguire +John Mulhausen +John OBrien III +John Starks +John Stephens +John Tims +John V. Martinez +John Warwick +John Willis +Jon Johnson +Jon Wedaman +Jonas Pfenniger +Jonathan A. Sternberg +Jonathan Boulle +Jonathan Camp +Jonathan Dowland +Jonathan Lebon +Jonathan Lomas +Jonathan McCrohan +Jonathan Mueller +Jonathan Pares +Jonathan Rudenberg +Jonathan Stoppani +Jonh Wendell +Joni Sar +Joost Cassee +Jordan +Jordan Arentsen +Jordan Sissel +Jorge Marin +Jose Diaz-Gonzalez +Joseph Anthony Pasquale Holsten +Joseph Hager +Joseph Kern +Joseph Rothrock +Josh +Josh Bodah +Josh Chorlton +Josh Eveleth +Josh Hawn +Josh Horwitz +Josh Poimboeuf +Josh Soref +Josh Wilson +Josiah Kiehl +José Tomás Albornoz +JP +jrabbit +jroenf +Julian Taylor +Julien Barbier +Julien Bisconti +Julien Bordellier +Julien Dubois +Julien Kassar +Julien Maitrehenry +Julien Pervillé +Julio Montes +Jun-Ru Chang +Jussi Nummelin +Justas Brazauskas +Justin Cormack +Justin Force +Justin Menga +Justin Plock +Justin Simonelis +Justin Terry +Justyn Temme +Jyrki Puttonen +Jérôme Petazzoni +Jörg Thalheim +K. Heller +Kai Blin +Kai Qiang Wu(Kennan) +Kamil Domański +kamjar gerami +Kanstantsin Shautsou +Kara Alexandra +Karan Lyons +Kareem Khazem +kargakis +Karl Grzeszczak +Karol Duleba +Karthik Nayak +Katie McLaughlin +Kato Kazuyoshi +Katrina Owen +Kawsar Saiyeed +Kay Yan +kayrus +Ke Li +Ke Xu +Kei Ohmura +Keith Hudgins +Keli Hu +Ken Cochrane +Ken Herner +Ken ICHIKAWA +Kenfe-Mickaël Laventure +Kenjiro Nakayama +Kent Johnson +Kevin "qwazerty" Houdebert +Kevin Burke +Kevin Clark +Kevin Feyrer +Kevin J. Lynagh +Kevin Jing Qiu +Kevin Kern +Kevin Menard +Kevin P. Kucharczyk +Kevin Richardson +Kevin Shi +Kevin Wallace +Kevin Yap +kevinmeredith +Keyvan Fatehi +kies +Kim BKC Carlbacker +Kim Eik +Kimbro Staken +Kir Kolyshkin +Kiran Gangadharan +Kirill Kolyshkin +Kirill SIbirev +knappe +Kohei Tsuruta +Koichi Shiraishi +Konrad Kleine +Konstantin Gribov +Konstantin L +Konstantin Pelykh +Krasi Georgiev +Krasimir Georgiev +Kris-Mikael Krister +Kristian Haugene +Kristina Zabunova +krrg +Kun Zhang +Kunal Kushwaha +Kyle Conroy +Kyle Linden +kyu +Lachlan Coote +Lai Jiangshan +Lajos Papp +Lakshan Perera +Lalatendu Mohanty +Lance Chen +Lance Kinley +Lars Butler +Lars Kellogg-Stedman +Lars R. Damerow +Lars-Magnus Skog +Laszlo Meszaros +Laura Frank +Laurent Erignoux +Laurie Voss +Leandro Siqueira +Lee Chao <932819864@qq.com> +Lee, Meng-Han +leeplay +Lei Jitang +Len Weincier +Lennie +Leo Gallucci +Leszek Kowalski +Levi Blackstone +Levi Gross +Lewis Daly +Lewis Marshall +Lewis Peckover +Liam Macgillavry +Liana Lo +Liang Mingqiang +Liang-Chi Hsieh +Liao Qingwei +Lily Guo +limsy +Lin Lu +LingFaKe +Linus Heckemann +Liran Tal +Liron Levin +Liu Bo +Liu Hua +liwenqi +lixiaobing10051267 +Liz Zhang +LIZAO LI +Lizzie Dixon <_@lizzie.io> +Lloyd Dewolf +Lokesh Mandvekar +longliqiang88 <394564827@qq.com> +Lorenz Leutgeb +Lorenzo Fontana +Louis Opter +Luca Favatella +Luca Marturana +Luca Orlandi +Luca-Bogdan Grigorescu +Lucas Chan +Lucas Chi +Luciano Mores +Luis Martínez de Bartolomé Izquierdo +Luiz Svoboda +Lukas Waslowski +lukaspustina +Lukasz Zajaczkowski +Luke Marsden +Lyn +Lynda O'Leary +Lénaïc Huard +Ma Müller +Ma Shimiao +Mabin +Madhan Raj Mookkandy +Madhav Puri +Madhu Venugopal +Mageee +Mahesh Tiyyagura +malnick +Malte Janduda +Manfred Touron +Manfred Zabarauskas +Manjunath A Kumatagi +Mansi Nahar +Manuel Meurer +Manuel Woelker +mapk0y +Marc Abramowitz +Marc Kuo +Marc Tamsky +Marcelo Salazar +Marco Hennings +Marcus Cobden +Marcus Farkas +Marcus Linke +Marcus Ramberg +Marek Goldmann +Marian Marinov +Marianna Tessel +Mario Loriedo +Marius Gundersen +Marius Sturm +Marius Voila +Mark Allen +Mark McGranaghan +Mark McKinstry +Mark Milstein +Mark Parker +Mark West +Markan Patel +Marko Mikulicic +Marko Tibold +Markus Fix +Martijn Dwars +Martijn van Oosterhout +Martin Honermeyer +Martin Kelly +Martin Mosegaard Amdisen +Martin Redmond +Mary Anthony +Masahito Zembutsu +Masayuki Morita +Mason Malone +Mateusz Sulima +Mathias Monnerville +Mathieu Le Marec - Pasquet +Mathieu Parent +Matt Apperson +Matt Bachmann +Matt Bentley +Matt Haggard +Matt Hoyle +Matt McCormick +Matt Moore +Matt Richardson +Matt Robenolt +Matthew Heon +Matthew Lapworth +Matthew Mayer +Matthew Mueller +Matthew Riley +Matthias Klumpp +Matthias Kühnle +Matthias Rampke +Matthieu Hauglustaine +mattymo +mattyw +Mauricio Garavaglia +mauriyouth +Max Shytikov +Maxim Fedchyshyn +Maxim Ivanov +Maxim Kulkin +Maxim Treskin +Maxime Petazzoni +Meaglith Ma +meejah +Megan Kostick +Mehul Kar +Mei ChunTao +Mengdi Gao +Mert Yazıcıoğlu +mgniu +Micah Zoltu +Michael A. Smith +Michael Bridgen +Michael Brown +Michael Chiang +Michael Crosby +Michael Currie +Michael Friis +Michael Gorsuch +Michael Grauer +Michael Holzheu +Michael Hudson-Doyle +Michael Huettermann +Michael Irwin +Michael Käufl +Michael Neale +Michael Prokop +Michael Scharf +Michael Spetsiotis +Michael Stapelberg +Michael Steinert +Michael Thies +Michael West +Michal Fojtik +Michal Gebauer +Michal Jemala +Michal Minář +Michal Wieczorek +Michaël Pailloncy +Michał Czeraszkiewicz +Michiel@unhosted +Mickaël FORTUNATO +Miguel Angel Fernández +Miguel Morales +Mihai Borobocea +Mihuleacc Sergiu +Mike Brown +Mike Casas +Mike Chelen +Mike Danese +Mike Dillon +Mike Dougherty +Mike Gaffney +Mike Goelzer +Mike Leone +Mike MacCana +Mike Naberezny +Mike Snitzer +mikelinjie <294893458@qq.com> +Mikhail Sobolev +Miklos Szegedi +Milind Chawre +Miloslav Trmač +mingqing +Mingzhen Feng +Misty Stanley-Jones +Mitch Capper +mlarcher +Mohammad Banikazemi +Mohammed Aaqib Ansari +Mohit Soni +Moorthy RS +Morgan Bauer +Morgante Pell +Morgy93 +Morten Siebuhr +Morton Fox +Moysés Borges +mqliang +Mrunal Patel +msabansal +mschurenko +Muayyad Alsadi +muge +Mustafa Akın +Muthukumar R +Máximo Cuadros +Médi-Rémi Hashim +Nahum Shalman +Nakul Pathak +Nalin Dahyabhai +Nan Monnand Deng +Naoki Orii +Natalie Parker +Natanael Copa +Nate Brennand +Nate Eagleson +Nate Jones +Nathan Hsieh +Nathan Kleyn +Nathan LeClaire +Nathan McCauley +Nathan Williams +Naveed Jamil +Neal McBurnett +Neil Horman +Neil Peterson +Nelson Chen +Neyazul Haque +Nghia Tran +Niall O'Higgins +Nicholas E. Rabenau +nick +Nick DeCoursin +Nick Irvine +Nick Parker +Nick Payne +Nick Stenning +Nick Stinemates +NickrenREN +Nicola Kabar +Nicolas Borboën +Nicolas De loof +Nicolas Dudebout +Nicolas Goy +Nicolas Kaiser +Nicolás Hock Isaza +Nigel Poulton +Nik Nyby +Nikhil Chawla +NikolaMandic +Nikolas Garofil +Nikolay Milovanov +Nirmal Mehta +Nishant Totla +NIWA Hideyuki +Noah Treuhaft +noducks +Nolan Darilek +nponeccop +Nuutti Kotivuori +nzwsch +O.S. Tezer +objectified +OddBloke +odk- +Oguz Bilgic +Oh Jinkyun +Ohad Schneider +ohmystack +Ole Reifschneider +Oliver Neal +Olivier Gambier +Olle Jonsson +Oriol Francès +orkaa +Oskar Niburski +Otto Kekäläinen +Ovidio Mallo +oyld +ozlerhakan +paetling +pandrew +panticz +Paolo G. Giarrusso +Pascal Borreli +Pascal Hartig +Patrick Böänziger +Patrick Devine +Patrick Hemmer +Patrick Stapleton +pattichen +Paul +paul +Paul Annesley +Paul Bellamy +Paul Bowsher +Paul Furtado +Paul Hammond +Paul Jimenez +Paul Kehrer +Paul Lietar +Paul Liljenberg +Paul Morie +Paul Nasrat +Paul Weaver +Paulo Ribeiro +Pavel Lobashov +Pavel Pospisil +Pavel Sutyrin +Pavel Tikhomirov +Pavlos Ratis +Pavol Vargovcik +Peeyush Gupta +Peggy Li +Pei Su +Peng Tao +Penghan Wang +Per Weijnitz +perhapszzy@sina.com +Peter Bourgon +Peter Braden +Peter Bücker +Peter Choi +Peter Dave Hello +Peter Edge +Peter Ericson +Peter Esbensen +Peter Jaffe +Peter Malmgren +Peter Salvatore +Peter Volpe +Peter Waller +Petr Švihlík +Phil +Phil Estes +Phil Spitler +Philip Monroe +Philipp Gillé +Philipp Wahala +Philipp Weissensteiner +Phillip Alexander +phineas +pidster +Piergiuliano Bossi +Pierre +Pierre Carrier +Pierre Dal-Pra +Pierre Wacrenier +Pierre-Alain RIVIERE +Piotr Bogdan +pixelistik +Porjo +Poul Kjeldager Sørensen +Pradeep Chhetri +Prasanna Gautam +Pratik Karki +Prayag Verma +Przemek Hejman +Pure White +pysqz +qhuang +Qiang Huang +Qinglan Peng +qudongfang +Quentin Brossard +Quentin Perez +Quentin Tayssier +r0n22 +Rafal Jeczalik +Rafe Colton +Raghavendra K T +Raghuram Devarakonda +Raja Sami +Rajat Pandit +Rajdeep Dua +Ralf Sippl +Ralle +Ralph Bean +Ramkumar Ramachandra +Ramon Brooker +Ramon van Alteren +Ray Tsang +ReadmeCritic +Recursive Madman +Reficul +Regan McCooey +Remi Rampin +Remy Suen +Renato Riccieri Santos Zannon +resouer +rgstephens +Rhys Hiltner +Ricardo N Feliciano +Rich Moyse +Rich Seymour +Richard +Richard Burnison +Richard Harvey +Richard Mathie +Richard Metzler +Richard Scothern +Richo Healey +Rick Bradley +Rick van de Loo +Rick Wieman +Rik Nijessen +Riku Voipio +Riley Guerin +Ritesh H Shukla +Riyaz Faizullabhoy +Rob Vesse +Robert Bachmann +Robert Bittle +Robert Obryk +Robert Schneider +Robert Stern +Robert Terhaar +Robert Wallis +Roberto G. Hashioka +Roberto Muñoz Fernández +Robin Naundorf +Robin Schneider +Robin Speekenbrink +robpc +Rodolfo Carvalho +Rodrigo Vaz +Roel Van Nyen +Roger Peppe +Rohit Jnagal +Rohit Kadam +Rojin George +Roland Huß +Roland Kammerer +Roland Moriz +Roma Sokolov +Roman Dudin +Roman Strashkin +Ron Smits +Ron Williams +root +root +root +root +root +Rory Hunter +Rory McCune +Ross Boucher +Rovanion Luckey +Royce Remer +Rozhnov Alexandr +Rudolph Gottesheim +Rui Lopes +Runshen Zhu +Ryan Abrams +Ryan Anderson +Ryan Aslett +Ryan Belgrave +Ryan Detzel +Ryan Fowler +Ryan Liu +Ryan McLaughlin +Ryan O'Donnell +Ryan Seto +Ryan Thomas +Ryan Trauntvein +Ryan Wallner +Ryan Zhang +ryancooper7 +RyanDeng +Rémy Greinhofer +s. rannou +s00318865 +Sabin Basyal +Sachin Joshi +Sagar Hani +Sainath Grandhi +sakeven +Sally O'Malley +Sam Abed +Sam Alba +Sam Bailey +Sam J Sharpe +Sam Neirinck +Sam Reis +Sam Rijs +Sambuddha Basu +Sami Wagiaalla +Samuel Andaya +Samuel Dion-Girardeau +Samuel Karp +Samuel PHAN +Sandeep Bansal +Sankar சங்கர் +Sanket Saurav +Santhosh Manohar +sapphiredev +Satnam Singh +satoru +Satoshi Amemiya +Satoshi Tagomori +Scott Bessler +Scott Collier +Scott Johnston +Scott Stamp +Scott Walls +sdreyesg +Sean Christopherson +Sean Cronin +Sean Lee +Sean McIntyre +Sean OMeara +Sean P. Kane +Sean Rodman +Sebastiaan van Steenis +Sebastiaan van Stijn +Senthil Kumar Selvaraj +Senthil Kumaran +SeongJae Park +Seongyeol Lim +Serge Hallyn +Sergey Alekseev +Sergey Evstifeev +Sergii Kabashniuk +Serhat Gülçiçek +Sevki Hasirci +Shane Canon +Shane da Silva +shaunol +Shawn Landden +Shawn Siefkas +shawnhe +Shayne Wang +Shekhar Gulati +Sheng Yang +Shengbo Song +Shev Yan +Shih-Yuan Lee +Shijiang Wei +Shishir Mahajan +Shoubhik Bose +Shourya Sarcar +shuai-z +Shukui Yang +Shuwei Hao +Sian Lerk Lau +sidharthamani +Silas Sewell +Silvan Jegen +Simei He +Simon Eskildsen +Simon Ferquel +Simon Leinen +Simon Menke +Simon Taranto +Sindhu S +Sjoerd Langkemper +skaasten +Solganik Alexander +Solomon Hykes +Song Gao +Soshi Katsuta +Soulou +Spencer Brown +Spencer Smith +Sridatta Thatipamala +Sridhar Ratnakumar +Srini Brahmaroutu +Srinivasan Srivatsan +Stanislav Bondarenko +Steeve Morin +Stefan Berger +Stefan J. Wernli +Stefan Praszalowicz +Stefan S. +Stefan Scherer +Stefan Staudenmeyer +Stefan Weil +Stephen Crosby +Stephen Day +Stephen Drake +Stephen Rust +Steve Desmond +Steve Dougherty +Steve Durrheimer +Steve Francia +Steve Koch +Steven Burgess +Steven Erenst +Steven Hartland +Steven Iveson +Steven Merrill +Steven Richards +Steven Taylor +Subhajit Ghosh +Sujith Haridasan +Sun Gengze <690388648@qq.com> +Sunny Gogoi +Suryakumar Sudar +Sven Dowideit +Swapnil Daingade +Sylvain Baubeau +Sylvain Bellemare +Sébastien +Sébastien Luttringer +Sébastien Stormacq +Tabakhase +Tadej Janež +TAGOMORI Satoshi +tang0th +Tangi COLIN +Tatsuki Sugiura +Tatsushi Inagaki +Taylor Jones +tbonza +Ted M. Young +Tehmasp Chaudhri +Tejesh Mehta +terryding77 <550147740@qq.com> +tgic +Thatcher Peskens +theadactyl +Thell 'Bo' Fowler +Thermionix +Thijs Terlouw +Thomas Bikeev +Thomas Frössman +Thomas Gazagnaire +Thomas Grainger +Thomas Hansen +Thomas Leonard +Thomas LEVEIL +Thomas Orozco +Thomas Riccardi +Thomas Schroeter +Thomas Sjögren +Thomas Swift +Thomas Tanaka +Thomas Texier +Tianon Gravi +Tianyi Wang +Tibor Vass +Tiffany Jernigan +Tiffany Low +Tim Bart +Tim Bosse +Tim Dettrick +Tim Düsterhus +Tim Hockin +Tim Potter +Tim Ruffles +Tim Smith +Tim Terhorst +Tim Wang +Tim Waugh +Tim Wraight +Tim Zju <21651152@zju.edu.cn> +timfeirg +Timothy Hobbs +tjwebb123 +tobe +Tobias Bieniek +Tobias Bradtke +Tobias Gesellchen +Tobias Klauser +Tobias Munk +Tobias Schmidt +Tobias Schwab +Todd Crane +Todd Lunter +Todd Whiteman +Toli Kuznets +Tom Barlow +Tom Booth +Tom Denham +Tom Fotherby +Tom Howe +Tom Hulihan +Tom Maaswinkel +Tom Wilkie +Tom X. Tobin +Tomas Tomecek +Tomasz Kopczynski +Tomasz Lipinski +Tomasz Nurkiewicz +Tommaso Visconti +Tomáš Hrčka +Tonny Xu +Tony Abboud +Tony Daws +Tony Miller +toogley +Torstein Husebø +Tõnis Tiigi +tpng +tracylihui <793912329@qq.com> +Trapier Marshall +Travis Cline +Travis Thieman +Trent Ogren +Trevor +Trevor Pounds +Trevor Sullivan +trishnaguha +Tristan Carel +Troy Denton +Tyler Brock +Tzu-Jung Lee +uhayate +Ulysse Carion +Utz Bacher +vagrant +Vaidas Jablonskis +vanderliang +Veres Lajos +Victor Algaze +Victor Coisne +Victor Costan +Victor I. Wood +Victor Lyuboslavsky +Victor Marmol +Victor Palma +Victor Vieux +Victoria Bialas +Vijaya Kumar K +Viktor Stanchev +Viktor Vojnovski +VinayRaghavanKS +Vincent Batts +Vincent Bernat +Vincent Bernat +Vincent Demeester +Vincent Giersch +Vincent Mayers +Vincent Woo +Vinod Kulkarni +Vishal Doshi +Vishnu Kannan +Vitaly Ostrosablin +Vitor Monteiro +Vivek Agarwal +Vivek Dasgupta +Vivek Goyal +Vladimir Bulyga +Vladimir Kirillov +Vladimir Pouzanov +Vladimir Rutsky +Vladimir Varankin +VladimirAus +Vojtech Vitek (V-Teq) +waitingkuo +Walter Leibbrandt +Walter Stanish +WANG Chao +Wang Jie +Wang Long +Wang Ping +Wang Xing +Wang Yuexiao +Ward Vandewege +WarheadsSE +Wayne Chang +Wayne Song +Wei Wu +Wei-Ting Kuo +weiyan +Weiyang Zhu +Wen Cheng Ma +Wendel Fleming +Wenkai Yin +Wentao Zhang +Wenxuan Zhao +Wenyu You <21551128@zju.edu.cn> +Wenzhi Liang +Wes Morgan +Wewang Xiaorenfine +Will Dietz +Will Rouesnel +Will Weaver +willhf +William Delanoue +William Henry +William Hubbs +William Martin +William Riancho +William Thurston +WiseTrem +wlan0 +Wolfgang Powisch +wonderflow +Wonjun Kim +xamyzhao +Xianglin Gao +Xianlu Bird +XiaoBing Jiang +Xiaoxu Chen +Xiaoyu Zhang +xiekeyang +Xinbo Weng +Xinzi Zhou +Xiuming Chen +xlgao-zju +xuzhaokui +Yahya +YAMADA Tsuyoshi +Yamasaki Masahide +Yan Feng +Yang Bai +Yang Pengfei +Yanqiang Miao +Yao Zaiyong +Yassine Tijani +Yasunori Mahata +Yestin Sun +Yi EungJun +Yibai Zhang +Yihang Ho +Ying Li +Yohei Ueda +Yong Tang +Yongzhi Pan +yorkie +You-Sheng Yang (楊有勝) +Youcef YEKHLEF +Yu Changchun +Yu Chengxia +Yu Peng +Yuan Sun +Yuanhong Peng +Yunxiang Huang +Yurii Rashkovskii +yuzou +Zac Dover +Zach Borboa +Zachary Jaffee +Zain Memon +Zaiste! +Zane DeGraffenried +Zefan Li +Zen Lin(Zhinan Lin) +Zhang Kun +Zhang Wei +Zhang Wentao +zhangxianwei +Zhenan Ye <21551168@zju.edu.cn> +zhenghenghuo +Zhenkun Bi +zhouhao +Zhu Guihua +Zhu Kunjia +Zhuoyun Wei +Zilin Du +zimbatm +Ziming Dong +ZJUshuaizhou <21551191@zju.edu.cn> +zmarouf +Zoltan Tombol +zqh +Zuhayr Elahi +Zunayed Ali +Álex González +Álvaro Lázaro +Átila Camurça Alves +尹吉峰 +徐俊杰 +搏通 diff --git a/vendor/github.com/moby/moby/CHANGELOG.md b/vendor/github.com/moby/moby/CHANGELOG.md new file mode 100644 index 000000000..bbedc491e --- /dev/null +++ b/vendor/github.com/moby/moby/CHANGELOG.md @@ -0,0 +1,3587 @@ +# Changelog + +Items starting with `DEPRECATE` are important deprecation notices. For more +information on the list of deprecated flags and APIs please have a look at +https://docs.docker.com/engine/deprecated/ where target removal dates can also +be found. + +## 17.05.0-ce (2017-05-04) + +### Builder + ++ Add multi-stage build support [#31257](https://github.com/docker/docker/pull/31257) [#32063](https://github.com/docker/docker/pull/32063) ++ Allow using build-time args (`ARG`) in `FROM` [#31352](https://github.com/docker/docker/pull/31352) ++ Add an option for specifying build target [#32496](https://github.com/docker/docker/pull/32496) +* Accept `-f -` to read Dockerfile from `stdin`, but use local context for building [#31236](https://github.com/docker/docker/pull/31236) +* The values of default build time arguments (e.g `HTTP_PROXY`) are no longer displayed in docker image history unless a corresponding `ARG` instruction is written in the Dockerfile. [#31584](https://github.com/docker/docker/pull/31584) +- Fix setting command if a custom shell is used in a parent image [#32236](https://github.com/docker/docker/pull/32236) +- Fix `docker build --label` when the label includes single quotes and a space [#31750](https://github.com/docker/docker/pull/31750) + +### Client + +* Add `--mount` flag to `docker run` and `docker create` [#32251](https://github.com/docker/docker/pull/32251) +* Add `--type=secret` to `docker inspect` [#32124](https://github.com/docker/docker/pull/32124) +* Add `--format` option to `docker secret ls` [#31552](https://github.com/docker/docker/pull/31552) +* Add `--filter` option to `docker secret ls` [#30810](https://github.com/docker/docker/pull/30810) +* Add `--filter scope=` to `docker network ls` [#31529](https://github.com/docker/docker/pull/31529) +* Add `--cpus` support to `docker update` [#31148](https://github.com/docker/docker/pull/31148) +* Add label filter to `docker system prune` and other `prune` commands [#30740](https://github.com/docker/docker/pull/30740) +* `docker stack rm` now accepts multiple stacks as input [#32110](https://github.com/docker/docker/pull/32110) +* Improve `docker version --format` option when the client has downgraded the API version [#31022](https://github.com/docker/docker/pull/31022) +* Prompt when using an encrypted client certificate to connect to a docker daemon [#31364](https://github.com/docker/docker/pull/31364) +* Display created tags on successful `docker build` [#32077](https://github.com/docker/docker/pull/32077) +* Cleanup compose convert error messages [#32087](https://github.com/moby/moby/pull/32087) + +### Contrib + ++ Add support for building docker debs for Ubuntu 17.04 Zesty on amd64 [#32435](https://github.com/docker/docker/pull/32435) + +### Daemon + +- Fix `--api-cors-header` being ignored if `--api-enable-cors` is not set [#32174](https://github.com/docker/docker/pull/32174) +- Cleanup docker tmp dir on start [#31741](https://github.com/docker/docker/pull/31741) +- Deprecate `--graph` flag in favor or `--data-root` [#28696](https://github.com/docker/docker/pull/28696) + +### Logging + ++ Add support for logging driver plugins [#28403](https://github.com/docker/docker/pull/28403) +* Add support for showing logs of individual tasks to `docker service logs`, and add `/task/{id}/logs` REST endpoint [#32015](https://github.com/docker/docker/pull/32015) +* Add `--log-opt env-regex` option to match environment variables using a regular expression [#27565](https://github.com/docker/docker/pull/27565) + +### Networking + ++ Allow user to replace, and customize the ingress network [#31714](https://github.com/docker/docker/pull/31714) +- Fix UDP traffic in containers not working after the container is restarted [#32505](https://github.com/docker/docker/pull/32505) +- Fix files being written to `/var/lib/docker` if a different data-root is set [#32505](https://github.com/docker/docker/pull/32505) + +### Runtime + +- Ensure health probe is stopped when a container exits [#32274](https://github.com/docker/docker/pull/32274) + +### Swarm Mode + ++ Add update/rollback order for services (`--update-order` / `--rollback-order`) [#30261](https://github.com/docker/docker/pull/30261) ++ Add support for synchronous `service create` and `service update` [#31144](https://github.com/docker/docker/pull/31144) ++ Add support for "grace periods" on healthchecks through the `HEALTHCHECK --start-period` and `--health-start-period` flag to + `docker service create`, `docker service update`, `docker create`, and `docker run` to support containers with an initial startup + time [#28938](https://github.com/docker/docker/pull/28938) +* `docker service create` now omits fields that are not specified by the user, when possible. This will allow defaults to be applied inside the manager [#32284](https://github.com/docker/docker/pull/32284) +* `docker service inspect` now shows default values for fields that are not specified by the user [#32284](https://github.com/docker/docker/pull/32284) +* Move `docker service logs` out of experimental [#32462](https://github.com/docker/docker/pull/32462) +* Add support for Credential Spec and SELinux to services to the API [#32339](https://github.com/docker/docker/pull/32339) +* Add `--entrypoint` flag to `docker service create` and `docker service update` [#29228](https://github.com/docker/docker/pull/29228) +* Add `--network-add` and `--network-rm` to `docker service update` [#32062](https://github.com/docker/docker/pull/32062) +* Add `--credential-spec` flag to `docker service create` and `docker service update` [#32339](https://github.com/docker/docker/pull/32339) +* Add `--filter mode=` to `docker service ls` [#31538](https://github.com/docker/docker/pull/31538) +* Resolve network IDs on the client side, instead of in the daemon when creating services [#32062](https://github.com/docker/docker/pull/32062) +* Add `--format` option to `docker node ls` [#30424](https://github.com/docker/docker/pull/30424) +* Add `--prune` option to `docker stack deploy` to remove services that are no longer defined in the docker-compose file [#31302](https://github.com/docker/docker/pull/31302) +* Add `PORTS` column for `docker service ls` when using `ingress` mode [#30813](https://github.com/docker/docker/pull/30813) +- Fix unnescessary re-deploying of tasks when environment-variables are used [#32364](https://github.com/docker/docker/pull/32364) +- Fix `docker stack deploy` not supporting `endpoint_mode` when deploying from a docker compose file [#32333](https://github.com/docker/docker/pull/32333) +- Proceed with startup if cluster component cannot be created to allow recovering from a broken swarm setup [#31631](https://github.com/docker/docker/pull/31631) + +### Security + +* Allow setting SELinux type or MCS labels when using `--ipc=container:` or `--ipc=host` [#30652](https://github.com/docker/docker/pull/30652) + + +### Deprecation + +- Deprecate `--api-enable-cors` daemon flag. This flag was marked deprecated in Docker 1.6.0 but not listed in deprecated features [#32352](https://github.com/docker/docker/pull/32352) +- Remove Ubuntu 12.04 (Precise Pangolin) as supported platform. Ubuntu 12.04 is EOL, and no longer receives updates [#32520](https://github.com/docker/docker/pull/32520) + +## 17.04.0-ce (2017-04-05) + +### Builder + +* Disable container logging for build containers [#29552](https://github.com/docker/docker/pull/29552) +* Fix use of `**/` in `.dockerignore` [#29043](https://github.com/docker/docker/pull/29043) + +### Client + ++ Sort `docker stack ls` by name [#31085](https://github.com/docker/docker/pull/31085) ++ Flags for specifying bind mount consistency [#31047](https://github.com/docker/docker/pull/31047) +* Output of docker CLI --help is now wrapped to the terminal width [#28751](https://github.com/docker/docker/pull/28751) +* Suppress image digest in docker ps [#30848](https://github.com/docker/docker/pull/30848) +* Hide command options that are related to Windows [#30788](https://github.com/docker/docker/pull/30788) +* Fix `docker plugin install` prompt to accept "enter" for the "N" default [#30769](https://github.com/docker/docker/pull/30769) ++ Add `truncate` function for Go templates [#30484](https://github.com/docker/docker/pull/30484) +* Support expanded syntax of ports in `stack deploy` [#30476](https://github.com/docker/docker/pull/30476) +* Support expanded syntax of mounts in `stack deploy` [#30597](https://github.com/docker/docker/pull/30597) [#31795](https://github.com/docker/docker/pull/31795) ++ Add `--add-host` for docker build [#30383](https://github.com/docker/docker/pull/30383) ++ Add `.CreatedAt` placeholder for `docker network ls --format` [#29900](https://github.com/docker/docker/pull/29900) +* Update order of `--secret-rm` and `--secret-add` [#29802](https://github.com/docker/docker/pull/29802) ++ Add `--filter enabled=true` for `docker plugin ls` [#28627](https://github.com/docker/docker/pull/28627) ++ Add `--format` to `docker service ls` [#28199](https://github.com/docker/docker/pull/28199) ++ Add `publish` and `expose` filter for `docker ps --filter` [#27557](https://github.com/docker/docker/pull/27557) +* Support multiple service IDs on `docker service ps` [#25234](https://github.com/docker/docker/pull/25234) ++ Allow swarm join with `--availability=drain` [#24993](https://github.com/docker/docker/pull/24993) +* Docker inspect now shows "docker-default" when AppArmor is enabled and no other profile was defined [#27083](https://github.com/docker/docker/pull/27083) + +### Logging + ++ Implement optional ring buffer for container logs [#28762](https://github.com/docker/docker/pull/28762) ++ Add `--log-opt awslogs-create-group=` for awslogs (CloudWatch) to support creation of log groups as needed [#29504](https://github.com/docker/docker/pull/29504) +- Fix segfault when using the gcplogs logging driver with a "static" binary [#29478](https://github.com/docker/docker/pull/29478) + + +### Networking + +* Check parameter `--ip`, `--ip6` and `--link-local-ip` in `docker network connect` [#30807](https://github.com/docker/docker/pull/30807) ++ Added support for `dns-search` [#30117](https://github.com/docker/docker/pull/30117) ++ Added --verbose option for docker network inspect to show task details from all swarm nodes [#31710](https://github.com/docker/docker/pull/31710) +* Clear stale datapath encryption states when joining the cluster [docker/libnetwork#1354](https://github.com/docker/libnetwork/pull/1354) ++ Ensure iptables initialization only happens once [docker/libnetwork#1676](https://github.com/docker/libnetwork/pull/1676) +* Fix bad order of iptables filter rules [docker/libnetwork#961](https://github.com/docker/libnetwork/pull/961) ++ Add anonymous container alias to service record on attachable network [docker/libnetwork#1651](https://github.com/docker/libnetwork/pull/1651) ++ Support for `com.docker.network.container_interface_prefix` driver label [docker/libnetwork#1667](https://github.com/docker/libnetwork/pull/1667) ++ Improve network list performance by omitting network details that are not used [#30673](https://github.com/docker/docker/pull/30673) + +### Runtime + +* Handle paused container when restoring without live-restore set [#31704](https://github.com/docker/docker/pull/31704) +- Do not allow sub second in healthcheck options in Dockerfile [#31177](https://github.com/docker/docker/pull/31177) +* Support name and id prefix in `secret update` [#30856](https://github.com/docker/docker/pull/30856) +* Use binary frame for websocket attach endpoint [#30460](https://github.com/docker/docker/pull/30460) +* Fix linux mount calls not applying propagation type changes [#30416](https://github.com/docker/docker/pull/30416) +* Fix ExecIds leak on failed `exec -i` [#30340](https://github.com/docker/docker/pull/30340) +* Prune named but untagged images if `danglingOnly=true` [#30330](https://github.com/docker/docker/pull/30330) ++ Add daemon flag to set `no_new_priv` as default for unprivileged containers [#29984](https://github.com/docker/docker/pull/29984) ++ Add daemon option `--default-shm-size` [#29692](https://github.com/docker/docker/pull/29692) ++ Support registry mirror config reload [#29650](https://github.com/docker/docker/pull/29650) +- Ignore the daemon log config when building images [#29552](https://github.com/docker/docker/pull/29552) +* Move secret name or ID prefix resolving from client to daemon [#29218](https://github.com/docker/docker/pull/29218) ++ Allow adding rules to `cgroup devices.allow` on container create/run [#22563](https://github.com/docker/docker/pull/22563) +- Fix `cpu.cfs_quota_us` being reset when running `systemd daemon-reload` [#31736](https://github.com/docker/docker/pull/31736) + +### Swarm Mode + ++ Topology-aware scheduling [#30725](https://github.com/docker/docker/pull/30725) ++ Automatic service rollback on failure [#31108](https://github.com/docker/docker/pull/31108) ++ Worker and manager on the same node are now connected through a UNIX socket [docker/swarmkit#1828](https://github.com/docker/swarmkit/pull/1828), [docker/swarmkit#1850](https://github.com/docker/swarmkit/pull/1850), [docker/swarmkit#1851](https://github.com/docker/swarmkit/pull/1851) +* Improve raft transport package [docker/swarmkit#1748](https://github.com/docker/swarmkit/pull/1748) +* No automatic manager shutdown on demotion/removal [docker/swarmkit#1829](https://github.com/docker/swarmkit/pull/1829) +* Use TransferLeadership to make leader demotion safer [docker/swarmkit#1939](https://github.com/docker/swarmkit/pull/1939) +* Decrease default monitoring period [docker/swarmkit#1967](https://github.com/docker/swarmkit/pull/1967) ++ Add Service logs formatting [#31672](https://github.com/docker/docker/pull/31672) +* Fix service logs API to be able to specify stream [#31313](https://github.com/docker/docker/pull/31313) ++ Add `--stop-signal` for `service create` and `service update` [#30754](https://github.com/docker/docker/pull/30754) ++ Add `--read-only` for `service create` and `service update` [#30162](https://github.com/docker/docker/pull/30162) ++ Renew the context after communicating with the registry [#31586](https://github.com/docker/docker/pull/31586) ++ (experimental) Add `--tail` and `--since` options to `docker service logs` [#31500](https://github.com/docker/docker/pull/31500) ++ (experimental) Add `--no-task-ids` and `--no-trunc` options to `docker service logs` [#31672](https://github.com/docker/docker/pull/31672) + +### Windows + +* Block pulling Windows images on non-Windows daemons [#29001](https://github.com/docker/docker/pull/29001) + +## 17.03.1-ce (2017-03-27) + +### Remote API (v1.27) & Client + +* Fix autoremove on older api [#31692](https://github.com/docker/docker/pull/31692) +* Fix default network customization for a stack [#31258](https://github.com/docker/docker/pull/31258/) +* Correct CPU usage calculation in presence of offline CPUs and newer Linux [#31802](https://github.com/docker/docker/pull/31802) +* Fix issue where service healthcheck is `{}` in remote API [#30197](https://github.com/docker/docker/pull/30197) + +### Runtime + +* Update runc to 54296cf40ad8143b62dbcaa1d90e520a2136ddfe [#31666](https://github.com/docker/docker/pull/31666) + * Ignore cgroup2 mountpoints [opencontainers/runc#1266](https://github.com/opencontainers/runc/pull/1266) +* Update containerd to 4ab9917febca54791c5f071a9d1f404867857fcc [#31662](https://github.com/docker/docker/pull/31662) [#31852](https://github.com/docker/docker/pull/31852) + * Register healthcheck service before calling restore() [docker/containerd#609](https://github.com/docker/containerd/pull/609) +* Fix `docker exec` not working after unattended upgrades that reload apparmor profiles [#31773](https://github.com/docker/docker/pull/31773) +* Fix unmounting layer without merge dir with Overlay2 [#31069](https://github.com/docker/docker/pull/31069) +* Do not ignore "volume in use" errors when force-delete [#31450](https://github.com/docker/docker/pull/31450) + +### Swarm Mode + +* Update swarmkit to 17756457ad6dc4d8a639a1f0b7a85d1b65a617bb [#31807](https://github.com/docker/docker/pull/31807) + * Scheduler now correctly considers tasks which have been assigned to a node but aren't yet running [docker/swarmkit#1980](https://github.com/docker/swarmkit/pull/1980) + * Allow removal of a network when only dead tasks reference it [docker/swarmkit#2018](https://github.com/docker/swarmkit/pull/2018) + * Retry failed network allocations less aggressively [docker/swarmkit#2021](https://github.com/docker/swarmkit/pull/2021) + * Avoid network allocation for tasks that are no longer running [docker/swarmkit#2017](https://github.com/docker/swarmkit/pull/2017) + * Bookkeeping fixes inside network allocator allocator [docker/swarmkit#2019](https://github.com/docker/swarmkit/pull/2019) [docker/swarmkit#2020](https://github.com/docker/swarmkit/pull/2020) + +### Windows + +* Cleanup HCS on restore [#31503](https://github.com/docker/docker/pull/31503) + +## 17.03.0-ce (2017-03-01) + +**IMPORTANT**: Starting with this release, Docker is on a monthly release cycle and uses a +new YY.MM versioning scheme to reflect this. Two channels are available: monthly and quarterly. +Any given monthly release will only receive security and bugfixes until the next monthly +release is available. Quarterly releases receive security and bugfixes for 4 months after +initial release. This release includes bugfixes for 1.13.1 but +there are no major feature additions and the API version stays the same. +Upgrading from Docker 1.13.1 to 17.03.0 is expected to be simple and low-risk. + +### Client + +* Fix panic in `docker stats --format` [#30776](https://github.com/docker/docker/pull/30776) + +### Contrib + +* Update various `bash` and `zsh` completion scripts [#30823](https://github.com/docker/docker/pull/30823), [#30945](https://github.com/docker/docker/pull/30945) and more... +* Block obsolete socket families in default seccomp profile - mitigates unpatched kernels' CVE-2017-6074 [#29076](https://github.com/docker/docker/pull/29076) + +### Networking + +* Fix bug on overlay encryption keys rotation in cross-datacenter swarm [#30727](https://github.com/docker/docker/pull/30727) +* Fix side effect panic in overlay encryption and network control plane communication failure ("No installed keys could decrypt the message") on frequent swarm leader re-election [#25608](https://github.com/docker/docker/pull/25608) +* Several fixes around system responsiveness and datapath programming when using overlay network with external kv-store [docker/libnetwork#1639](https://github.com/docker/libnetwork/pull/1639), [docker/libnetwork#1632](https://github.com/docker/libnetwork/pull/1632) and more... +* Discard incoming plain vxlan packets for encrypted overlay network [#31170](https://github.com/docker/docker/pull/31170) +* Release the network attachment on allocation failure [#31073](https://github.com/docker/docker/pull/31073) +* Fix port allocation when multiple published ports map to the same target port [docker/swarmkit#1835](https://github.com/docker/swarmkit/pull/1835) + +### Runtime + +* Fix a deadlock in docker logs [#30223](https://github.com/docker/docker/pull/30223) +* Fix cpu spin waiting for log write events [#31070](https://github.com/docker/docker/pull/31070) +* Fix a possible crash when using journald [#31231](https://github.com/docker/docker/pull/31231) [#31263](https://github.com/docker/docker/pull/31263) +* Fix a panic on close of nil channel [#31274](https://github.com/docker/docker/pull/31274) +* Fix duplicate mount point for `--volumes-from` in `docker run` [#29563](https://github.com/docker/docker/pull/29563) +* Fix `--cache-from` does not cache last step [#31189](https://github.com/docker/docker/pull/31189) + +### Swarm Mode + +* Shutdown leaks an error when the container was never started [#31279](https://github.com/docker/docker/pull/31279) +* Fix possibility of tasks getting stuck in the "NEW" state during a leader failover [docker/swarmkit#1938](https://github.com/docker/swarmkit/pull/1938) +* Fix extraneous task creations for global services that led to confusing replica counts in `docker service ls` [docker/swarmkit#1957](https://github.com/docker/swarmkit/pull/1957) +* Fix problem that made rolling updates slow when `task-history-limit` was set to 1 [docker/swarmkit#1948](https://github.com/docker/swarmkit/pull/1948) +* Restart tasks elsewhere, if appropriate, when they are shut down as a result of nodes no longer satisfying constraints [docker/swarmkit#1958](https://github.com/docker/swarmkit/pull/1958) +* (experimental) + +## 1.13.1 (2017-02-08) + +**IMPORTANT**: On Linux distributions where `devicemapper` was the default storage driver, +the `overlay2`, or `overlay` is now used by default (if the kernel supports it). +To use devicemapper, you can manually configure the storage driver to use through +the `--storage-driver` daemon option, or by setting "storage-driver" in the `daemon.json` +configuration file. + +**IMPORTANT**: In Docker 1.13, the managed plugin api changed, as compared to the experimental +version introduced in Docker 1.12. You must **uninstall** plugins which you installed with Docker 1.12 +_before_ upgrading to Docker 1.13. You can uninstall plugins using the `docker plugin rm` command. + +If you have already upgraded to Docker 1.13 without uninstalling +previously-installed plugins, you may see this message when the Docker daemon +starts: + + Error starting daemon: json: cannot unmarshal string into Go value of type types.PluginEnv + +To manually remove all plugins and resolve this problem, take the following steps: + +1. Remove plugins.json from: `/var/lib/docker/plugins/`. +2. Restart Docker. Verify that the Docker daemon starts with no errors. +3. Reinstall your plugins. + +### Contrib + +* Do not require a custom build of tini [#28454](https://github.com/docker/docker/pull/28454) +* Upgrade to Go 1.7.5 [#30489](https://github.com/docker/docker/pull/30489) + +### Remote API (v1.26) & Client + ++ Support secrets in docker stack deploy with compose file [#30144](https://github.com/docker/docker/pull/30144) + +### Runtime + +* Fix size issue in `docker system df` [#30378](https://github.com/docker/docker/pull/30378) +* Fix error on `docker inspect` when Swarm certificates were expired. [#29246](https://github.com/docker/docker/pull/29246) +* Fix deadlock on v1 plugin with activate error [#30408](https://github.com/docker/docker/pull/30408) +* Fix SELinux regression [#30649](https://github.com/docker/docker/pull/30649) + +### Plugins + +* Support global scoped network plugins (v2) in swarm mode [#30332](https://github.com/docker/docker/pull/30332) ++ Add `docker plugin upgrade` [#29414](https://github.com/docker/docker/pull/29414) + +### Windows + +* Fix small regression with old plugins in Windows [#30150](https://github.com/docker/docker/pull/30150) +* Fix warning on Windows [#30730](https://github.com/docker/docker/pull/30730) + +## 1.13.0 (2017-01-18) + +**IMPORTANT**: On Linux distributions where `devicemapper` was the default storage driver, +the `overlay2`, or `overlay` is now used by default (if the kernel supports it). +To use devicemapper, you can manually configure the storage driver to use through +the `--storage-driver` daemon option, or by setting "storage-driver" in the `daemon.json` +configuration file. + +**IMPORTANT**: In Docker 1.13, the managed plugin api changed, as compared to the experimental +version introduced in Docker 1.12. You must **uninstall** plugins which you installed with Docker 1.12 +_before_ upgrading to Docker 1.13. You can uninstall plugins using the `docker plugin rm` command. + +If you have already upgraded to Docker 1.13 without uninstalling +previously-installed plugins, you may see this message when the Docker daemon +starts: + + Error starting daemon: json: cannot unmarshal string into Go value of type types.PluginEnv + +To manually remove all plugins and resolve this problem, take the following steps: + +1. Remove plugins.json from: `/var/lib/docker/plugins/`. +2. Restart Docker. Verify that the Docker daemon starts with no errors. +3. Reinstall your plugins. + +### Builder + ++ Add capability to specify images used as a cache source on build. These images do not need to have local parent chain and can be pulled from other registries [#26839](https://github.com/docker/docker/pull/26839) ++ (experimental) Add option to squash image layers to the FROM image after successful builds [#22641](https://github.com/docker/docker/pull/22641) +* Fix dockerfile parser with empty line after escape [#24725](https://github.com/docker/docker/pull/24725) +- Add step number on `docker build` [#24978](https://github.com/docker/docker/pull/24978) ++ Add support for compressing build context during image build [#25837](https://github.com/docker/docker/pull/25837) ++ add `--network` to `docker build` [#27702](https://github.com/docker/docker/pull/27702) +- Fix inconsistent behavior between `--label` flag on `docker build` and `docker run` [#26027](https://github.com/docker/docker/issues/26027) +- Fix image layer inconsistencies when using the overlay storage driver [#27209](https://github.com/docker/docker/pull/27209) +* Unused build-args are now allowed. A warning is presented instead of an error and failed build [#27412](https://github.com/docker/docker/pull/27412) +- Fix builder cache issue on Windows [#27805](https://github.com/docker/docker/pull/27805) ++ Allow `USER` in builder on Windows [#28415](https://github.com/docker/docker/pull/28415) ++ Handle env case-insensitive on Windows [#28725](https://github.com/docker/docker/pull/28725) + +### Contrib + ++ Add support for building docker debs for Ubuntu 16.04 Xenial on PPC64LE [#23438](https://github.com/docker/docker/pull/23438) ++ Add support for building docker debs for Ubuntu 16.04 Xenial on s390x [#26104](https://github.com/docker/docker/pull/26104) ++ Add support for building docker debs for Ubuntu 16.10 Yakkety Yak on PPC64LE [#28046](https://github.com/docker/docker/pull/28046) +- Add RPM builder for VMWare Photon OS [#24116](https://github.com/docker/docker/pull/24116) ++ Add shell completions to tgz [#27735](https://github.com/docker/docker/pull/27735) +* Update the install script to allow using the mirror in China [#27005](https://github.com/docker/docker/pull/27005) ++ Add DEB builder for Ubuntu 16.10 Yakkety Yak [#27993](https://github.com/docker/docker/pull/27993) ++ Add RPM builder for Fedora 25 [#28222](https://github.com/docker/docker/pull/28222) ++ Add `make deb` support for aarch64 [#27625](https://github.com/docker/docker/pull/27625) + +### Distribution + +* Update notary dependency to 0.4.2 (full changelogs [here](https://github.com/docker/notary/releases/tag/v0.4.2)) [#27074](https://github.com/docker/docker/pull/27074) + - Support for compilation on windows [docker/notary#970](https://github.com/docker/notary/pull/970) + - Improved error messages for client authentication errors [docker/notary#972](https://github.com/docker/notary/pull/972) + - Support for finding keys that are anywhere in the `~/.docker/trust/private` directory, not just under `~/.docker/trust/private/root_keys` or `~/.docker/trust/private/tuf_keys` [docker/notary#981](https://github.com/docker/notary/pull/981) + - Previously, on any error updating, the client would fall back on the cache. Now we only do so if there is a network error or if the server is unavailable or missing the TUF data. Invalid TUF data will cause the update to fail - for example if there was an invalid root rotation. [docker/notary#982](https://github.com/docker/notary/pull/982) + - Improve root validation and yubikey debug logging [docker/notary#858](https://github.com/docker/notary/pull/858) [docker/notary#891](https://github.com/docker/notary/pull/891) + - Warn if certificates for root or delegations are near expiry [docker/notary#802](https://github.com/docker/notary/pull/802) + - Warn if role metadata is near expiry [docker/notary#786](https://github.com/docker/notary/pull/786) + - Fix passphrase retrieval attempt counting and terminal detection [docker/notary#906](https://github.com/docker/notary/pull/906) +- Avoid unnecessary blob uploads when different users push same layers to authenticated registry [#26564](https://github.com/docker/docker/pull/26564) +* Allow external storage for registry credentials [#26354](https://github.com/docker/docker/pull/26354) + +### Logging + +* Standardize the default logging tag value in all logging drivers [#22911](https://github.com/docker/docker/pull/22911) +- Improve performance and memory use when logging of long log lines [#22982](https://github.com/docker/docker/pull/22982) ++ Enable syslog driver for windows [#25736](https://github.com/docker/docker/pull/25736) ++ Add Logentries Driver [#27471](https://github.com/docker/docker/pull/27471) ++ Update of AWS log driver to support tags [#27707](https://github.com/docker/docker/pull/27707) ++ Unix socket support for fluentd [#26088](https://github.com/docker/docker/pull/26088) +* Enable fluentd logging driver on Windows [#28189](https://github.com/docker/docker/pull/28189) +- Sanitize docker labels when used as journald field names [#23725](https://github.com/docker/docker/pull/23725) +- Fix an issue where `docker logs --tail` returned less lines than expected [#28203](https://github.com/docker/docker/pull/28203) +- Splunk Logging Driver: performance and reliability improvements [#26207](https://github.com/docker/docker/pull/26207) +- Splunk Logging Driver: configurable formats and skip for verifying connection [#25786](https://github.com/docker/docker/pull/25786) + +### Networking + ++ Add `--attachable` network support to enable `docker run` to work in swarm-mode overlay network [#25962](https://github.com/docker/docker/pull/25962) ++ Add support for host port PublishMode in services using the `--publish` option in `docker service create` [#27917](https://github.com/docker/docker/pull/27917) and [#28943](https://github.com/docker/docker/pull/28943) ++ Add support for Windows server 2016 overlay network driver (requires upcoming ws2016 update) [#28182](https://github.com/docker/docker/pull/28182) +* Change the default `FORWARD` policy to `DROP` [#28257](https://github.com/docker/docker/pull/28257) ++ Add support for specifying static IP addresses for predefined network on windows [#22208](https://github.com/docker/docker/pull/22208) +- Fix `--publish` flag on `docker run` not working with IPv6 addresses [#27860](https://github.com/docker/docker/pull/27860) +- Fix inspect network show gateway with mask [#25564](https://github.com/docker/docker/pull/25564) +- Fix an issue where multiple addresses in a bridge may cause `--fixed-cidr` to not have the correct addresses [#26659](https://github.com/docker/docker/pull/26659) ++ Add creation timestamp to `docker network inspect` [#26130](https://github.com/docker/docker/pull/26130) +- Show peer nodes in `docker network inspect` for swarm overlay networks [#28078](https://github.com/docker/docker/pull/28078) +- Enable ping for service VIP address [#28019](https://github.com/docker/docker/pull/28019) + +### Plugins + +- Move plugins out of experimental [#28226](https://github.com/docker/docker/pull/28226) +- Add `--force` on `docker plugin remove` [#25096](https://github.com/docker/docker/pull/25096) +* Add support for dynamically reloading authorization plugins [#22770](https://github.com/docker/docker/pull/22770) ++ Add description in `docker plugin ls` [#25556](https://github.com/docker/docker/pull/25556) ++ Add `-f`/`--format` to `docker plugin inspect` [#25990](https://github.com/docker/docker/pull/25990) ++ Add `docker plugin create` command [#28164](https://github.com/docker/docker/pull/28164) +* Send request's TLS peer certificates to authorization plugins [#27383](https://github.com/docker/docker/pull/27383) +* Support for global-scoped network and ipam plugins in swarm-mode [#27287](https://github.com/docker/docker/pull/27287) +* Split `docker plugin install` into two API call `/privileges` and `/pull` [#28963](https://github.com/docker/docker/pull/28963) + +### Remote API (v1.25) & Client + ++ Support `docker stack deploy` from a Compose file [#27998](https://github.com/docker/docker/pull/27998) ++ (experimental) Implement checkpoint and restore [#22049](https://github.com/docker/docker/pull/22049) ++ Add `--format` flag to `docker info` [#23808](https://github.com/docker/docker/pull/23808) +* Remove `--name` from `docker volume create` [#23830](https://github.com/docker/docker/pull/23830) ++ Add `docker stack ls` [#23886](https://github.com/docker/docker/pull/23886) ++ Add a new `is-task` ps filter [#24411](https://github.com/docker/docker/pull/24411) ++ Add `--env-file` flag to `docker service create` [#24844](https://github.com/docker/docker/pull/24844) ++ Add `--format` on `docker stats` [#24987](https://github.com/docker/docker/pull/24987) ++ Make `docker node ps` default to `self` in swarm node [#25214](https://github.com/docker/docker/pull/25214) ++ Add `--group` in `docker service create` [#25317](https://github.com/docker/docker/pull/25317) ++ Add `--no-trunc` to service/node/stack ps output [#25337](https://github.com/docker/docker/pull/25337) ++ Add Logs to `ContainerAttachOptions` so go clients can request to retrieve container logs as part of the attach process [#26718](https://github.com/docker/docker/pull/26718) ++ Allow client to talk to an older server [#27745](https://github.com/docker/docker/pull/27745) +* Inform user client-side that a container removal is in progress [#26074](https://github.com/docker/docker/pull/26074) ++ Add `Isolation` to the /info endpoint [#26255](https://github.com/docker/docker/pull/26255) ++ Add `userns` to the /info endpoint [#27840](https://github.com/docker/docker/pull/27840) +- Do not allow more than one mode be requested at once in the services endpoint [#26643](https://github.com/docker/docker/pull/26643) ++ Add capability to /containers/create API to specify mounts in a more granular and safer way [#22373](https://github.com/docker/docker/pull/22373) ++ Add `--format` flag to `network ls` and `volume ls` [#23475](https://github.com/docker/docker/pull/23475) +* Allow the top-level `docker inspect` command to inspect any kind of resource [#23614](https://github.com/docker/docker/pull/23614) ++ Add --cpus flag to control cpu resources for `docker run` and `docker create`, and add `NanoCPUs` to `HostConfig` [#27958](https://github.com/docker/docker/pull/27958) +- Allow unsetting the `--entrypoint` in `docker run` or `docker create` [#23718](https://github.com/docker/docker/pull/23718) +* Restructure CLI commands by adding `docker image` and `docker container` commands for more consistency [#26025](https://github.com/docker/docker/pull/26025) +- Remove `COMMAND` column from `service ls` output [#28029](https://github.com/docker/docker/pull/28029) ++ Add `--format` to `docker events` [#26268](https://github.com/docker/docker/pull/26268) +* Allow specifying multiple nodes on `docker node ps` [#26299](https://github.com/docker/docker/pull/26299) +* Restrict fractional digits to 2 decimals in `docker images` output [#26303](https://github.com/docker/docker/pull/26303) ++ Add `--dns-option` to `docker run` [#28186](https://github.com/docker/docker/pull/28186) ++ Add Image ID to container commit event [#28128](https://github.com/docker/docker/pull/28128) ++ Add external binaries version to docker info [#27955](https://github.com/docker/docker/pull/27955) ++ Add information for `Manager Addresses` in the output of `docker info` [#28042](https://github.com/docker/docker/pull/28042) ++ Add a new reference filter for `docker images` [#27872](https://github.com/docker/docker/pull/27872) + +### Runtime + ++ Add `--experimental` daemon flag to enable experimental features, instead of shipping them in a separate build [#27223](https://github.com/docker/docker/pull/27223) ++ Add a `--shutdown-timeout` daemon flag to specify the default timeout (in seconds) to stop containers gracefully before daemon exit [#23036](https://github.com/docker/docker/pull/23036) ++ Add `--stop-timeout` to specify the timeout value (in seconds) for individual containers to stop [#22566](https://github.com/docker/docker/pull/22566) ++ Add a new daemon flag `--userland-proxy-path` to allow configuring the userland proxy instead of using the hardcoded `docker-proxy` from `$PATH` [#26882](https://github.com/docker/docker/pull/26882) ++ Add boolean flag `--init` on `dockerd` and on `docker run` to use [tini](https://github.com/krallin/tini) a zombie-reaping init process as PID 1 [#26061](https://github.com/docker/docker/pull/26061) [#28037](https://github.com/docker/docker/pull/28037) ++ Add a new daemon flag `--init-path` to allow configuring the path to the `docker-init` binary [#26941](https://github.com/docker/docker/pull/26941) ++ Add support for live reloading insecure registry in configuration [#22337](https://github.com/docker/docker/pull/22337) ++ Add support for storage-opt size on Windows daemons [#23391](https://github.com/docker/docker/pull/23391) +* Improve reliability of `docker run --rm` by moving it from the client to the daemon [#20848](https://github.com/docker/docker/pull/20848) ++ Add support for `--cpu-rt-period` and `--cpu-rt-runtime` flags, allowing containers to run real-time threads when `CONFIG_RT_GROUP_SCHED` is enabled in the kernel [#23430](https://github.com/docker/docker/pull/23430) +* Allow parallel stop, pause, unpause [#24761](https://github.com/docker/docker/pull/24761) / [#26778](https://github.com/docker/docker/pull/26778) +* Implement XFS quota for overlay2 [#24771](https://github.com/docker/docker/pull/24771) +- Fix partial/full filter issue in `service tasks --filter` [#24850](https://github.com/docker/docker/pull/24850) +- Allow engine to run inside a user namespace [#25672](https://github.com/docker/docker/pull/25672) +- Fix a race condition between device deferred removal and resume device, when using the devicemapper graphdriver [#23497](https://github.com/docker/docker/pull/23497) +- Add `docker stats` support in Windows [#25737](https://github.com/docker/docker/pull/25737) +- Allow using `--pid=host` and `--net=host` when `--userns=host` [#25771](https://github.com/docker/docker/pull/25771) ++ (experimental) Add metrics (Prometheus) output for basic `container`, `image`, and `daemon` operations [#25820](https://github.com/docker/docker/pull/25820) +- Fix issue in `docker stats` with `NetworkDisabled=true` [#25905](https://github.com/docker/docker/pull/25905) ++ Add `docker top` support in Windows [#25891](https://github.com/docker/docker/pull/25891) ++ Record pid of exec'd process [#27470](https://github.com/docker/docker/pull/27470) ++ Add support for looking up user/groups via `getent` [#27599](https://github.com/docker/docker/pull/27599) ++ Add new `docker system` command with `df` and `prune` subcommands for system resource management, as well as `docker {container,image,volume,network} prune` subcommands [#26108](https://github.com/docker/docker/pull/26108) [#27525](https://github.com/docker/docker/pull/27525) / [#27525](https://github.com/docker/docker/pull/27525) +- Fix an issue where containers could not be stopped or killed by setting xfs max_retries to 0 upon ENOSPC with devicemapper [#26212](https://github.com/docker/docker/pull/26212) +- Fix `docker cp` failing to copy to a container's volume dir on CentOS with devicemapper [#28047](https://github.com/docker/docker/pull/28047) +* Promote overlay(2) graphdriver [#27932](https://github.com/docker/docker/pull/27932) ++ Add `--seccomp-profile` daemon flag to specify a path to a seccomp profile that overrides the default [#26276](https://github.com/docker/docker/pull/26276) +- Fix ulimits in `docker inspect` when `--default-ulimit` is set on daemon [#26405](https://github.com/docker/docker/pull/26405) +- Add workaround for overlay issues during build in older kernels [#28138](https://github.com/docker/docker/pull/28138) ++ Add `TERM` environment variable on `docker exec -t` [#26461](https://github.com/docker/docker/pull/26461) +* Honor a container’s `--stop-signal` setting upon `docker kill` [#26464](https://github.com/docker/docker/pull/26464) + +### Swarm Mode + ++ Add secret management [#27794](https://github.com/docker/docker/pull/27794) ++ Add support for templating service options (hostname, mounts, and environment variables) [#28025](https://github.com/docker/docker/pull/28025) +* Display the endpoint mode in the output of `docker service inspect --pretty` [#26906](https://github.com/docker/docker/pull/26906) +* Make `docker service ps` output more bearable by shortening service IDs in task names [#28088](https://github.com/docker/docker/pull/28088) +* Make `docker node ps` default to the current node [#25214](https://github.com/docker/docker/pull/25214) ++ Add `--dns`, -`-dns-opt`, and `--dns-search` to service create. [#27567](https://github.com/docker/docker/pull/27567) ++ Add `--force` to `docker service update` [#27596](https://github.com/docker/docker/pull/27596) ++ Add `--health-*` and `--no-healthcheck` flags to `docker service create` and `docker service update` [#27369](https://github.com/docker/docker/pull/27369) ++ Add `-q` to `docker service ps` [#27654](https://github.com/docker/docker/pull/27654) +* Display number of global services in `docker service ls` [#27710](https://github.com/docker/docker/pull/27710) +- Remove `--name` flag from `docker service update`. This flag is only functional on `docker service create`, so was removed from the `update` command [#26988](https://github.com/docker/docker/pull/26988) +- Fix worker nodes failing to recover because of transient networking issues [#26646](https://github.com/docker/docker/issues/26646) +* Add support for health aware load balancing and DNS records [#27279](https://github.com/docker/docker/pull/27279) ++ Add `--hostname` to `docker service create` [#27857](https://github.com/docker/docker/pull/27857) ++ Add `--host` to `docker service create`, and `--host-add`, `--host-rm` to `docker service update` [#28031](https://github.com/docker/docker/pull/28031) ++ Add `--tty` flag to `docker service create`/`update` [#28076](https://github.com/docker/docker/pull/28076) +* Autodetect, store, and expose node IP address as seen by the manager [#27910](https://github.com/docker/docker/pull/27910) +* Encryption at rest of manager keys and raft data [#27967](https://github.com/docker/docker/pull/27967) ++ Add `--update-max-failure-ratio`, `--update-monitor` and `--rollback` flags to `docker service update` [#26421](https://github.com/docker/docker/pull/26421) +- Fix an issue with address autodiscovery on `docker swarm init` running inside a container [#26457](https://github.com/docker/docker/pull/26457) ++ (experimental) Add `docker service logs` command to view logs for a service [#28089](https://github.com/docker/docker/pull/28089) ++ Pin images by digest for `docker service create` and `update` [#28173](https://github.com/docker/docker/pull/28173) +* Add short (`-f`) flag for `docker node rm --force` and `docker swarm leave --force` [#28196](https://github.com/docker/docker/pull/28196) ++ Add options to customize Raft snapshots (`--max-snapshots`, `--snapshot-interval`) [#27997](https://github.com/docker/docker/pull/27997) +- Don't repull image if pinned by digest [#28265](https://github.com/docker/docker/pull/28265) ++ Swarm-mode support for Windows [#27838](https://github.com/docker/docker/pull/27838) ++ Allow hostname to be updated on service [#28771](https://github.com/docker/docker/pull/28771) ++ Support v2 plugins [#29433](https://github.com/docker/docker/pull/29433) ++ Add content trust for services [#29469](https://github.com/docker/docker/pull/29469) + +### Volume + ++ Add support for labels on volumes [#21270](https://github.com/docker/docker/pull/21270) ++ Add support for filtering volumes by label [#25628](https://github.com/docker/docker/pull/25628) +* Add a `--force` flag in `docker volume rm` to forcefully purge the data of the volume that has already been deleted [#23436](https://github.com/docker/docker/pull/23436) +* Enhance `docker volume inspect` to show all options used when creating the volume [#26671](https://github.com/docker/docker/pull/26671) +* Add support for local NFS volumes to resolve hostnames [#27329](https://github.com/docker/docker/pull/27329) + +### Security + +- Fix selinux labeling of volumes shared in a container [#23024](https://github.com/docker/docker/pull/23024) +- Prohibit `/sys/firmware/**` from being accessed with apparmor [#26618](https://github.com/docker/docker/pull/26618) + +### Deprecation + +- Marked the `docker daemon` command as deprecated. The daemon is moved to a separate binary (`dockerd`), and should be used instead [#26834](https://github.com/docker/docker/pull/26834) +- Deprecate unversioned API endpoints [#28208](https://github.com/docker/docker/pull/28208) +- Remove Ubuntu 15.10 (Wily Werewolf) as supported platform. Ubuntu 15.10 is EOL, and no longer receives updates [#27042](https://github.com/docker/docker/pull/27042) +- Remove Fedora 22 as supported platform. Fedora 22 is EOL, and no longer receives updates [#27432](https://github.com/docker/docker/pull/27432) +- Remove Fedora 23 as supported platform. Fedora 23 is EOL, and no longer receives updates [#29455](https://github.com/docker/docker/pull/29455) +- Deprecate the `repo:shortid` syntax on `docker pull` [#27207](https://github.com/docker/docker/pull/27207) +- Deprecate backing filesystem without `d_type` for overlay and overlay2 storage drivers [#27433](https://github.com/docker/docker/pull/27433) +- Deprecate `MAINTAINER` in Dockerfile [#25466](https://github.com/docker/docker/pull/25466) +- Deprecate `filter` param for endpoint `/images/json` [#27872](https://github.com/docker/docker/pull/27872) +- Deprecate setting duplicate engine labels [#24533](https://github.com/docker/docker/pull/24533) +- Deprecate "top-level" network information in `NetworkSettings` [#28437](https://github.com/docker/docker/pull/28437) + +## 1.12.6 (2017-01-10) + +**IMPORTANT**: Docker 1.12 ships with an updated systemd unit file for rpm +based installs (which includes RHEL, Fedora, CentOS, and Oracle Linux 7). When +upgrading from an older version of docker, the upgrade process may not +automatically install the updated version of the unit file, or fail to start +the docker service if; + +- the systemd unit file (`/usr/lib/systemd/system/docker.service`) contains local changes, or +- a systemd drop-in file is present, and contains `-H fd://` in the `ExecStart` directive + +Starting the docker service will produce an error: + + Failed to start docker.service: Unit docker.socket failed to load: No such file or directory. + +or + + no sockets found via socket activation: make sure the service was started by systemd. + +To resolve this: + +- Backup the current version of the unit file, and replace the file with the + [version that ships with docker 1.12](https://raw.githubusercontent.com/docker/docker/v1.12.0/contrib/init/systemd/docker.service.rpm) +- Remove the `Requires=docker.socket` directive from the `/usr/lib/systemd/system/docker.service` file if present +- Remove `-H fd://` from the `ExecStart` directive (both in the main unit file, and in any drop-in files present). + +After making those changes, run `sudo systemctl daemon-reload`, and `sudo +systemctl restart docker` to reload changes and (re)start the docker daemon. + +**NOTE**: Docker 1.12.5 will correctly validate that either an IPv6 subnet is provided or +that the IPAM driver can provide one when you specify the `--ipv6` option. + +If you are currently using the `--ipv6` option _without_ specifying the +`--fixed-cidr-v6` option, the Docker daemon will refuse to start with the +following message: + +```none +Error starting daemon: Error initializing network controller: Error creating + default "bridge" network: failed to parse pool request + for address space "LocalDefault" pool " subpool ": + could not find an available, non-overlapping IPv6 address + pool among the defaults to assign to the network +``` + +To resolve this error, either remove the `--ipv6` flag (to preserve the same +behavior as in Docker 1.12.3 and earlier), or provide an IPv6 subnet as the +value of the `--fixed-cidr-v6` flag. + +In a similar way, if you specify the `--ipv6` flag when creating a network +with the default IPAM driver, without providing an IPv6 `--subnet`, network +creation will fail with the following message: + +```none +Error response from daemon: failed to parse pool request for address space + "LocalDefault" pool "" subpool "": could not find an + available, non-overlapping IPv6 address pool among + the defaults to assign to the network +``` + +To resolve this, either remove the `--ipv6` flag (to preserve the same behavior +as in Docker 1.12.3 and earlier), or provide an IPv6 subnet as the value of the +`--subnet` flag. + +The network network creation will instead succeed if you use an external IPAM driver +which supports automatic allocation of IPv6 subnets. + +### Runtime + +- Fix runC privilege escalation (CVE-2016-9962) + +## 1.12.5 (2016-12-15) + +**IMPORTANT**: Docker 1.12 ships with an updated systemd unit file for rpm +based installs (which includes RHEL, Fedora, CentOS, and Oracle Linux 7). When +upgrading from an older version of docker, the upgrade process may not +automatically install the updated version of the unit file, or fail to start +the docker service if; + +- the systemd unit file (`/usr/lib/systemd/system/docker.service`) contains local changes, or +- a systemd drop-in file is present, and contains `-H fd://` in the `ExecStart` directive + +Starting the docker service will produce an error: + + Failed to start docker.service: Unit docker.socket failed to load: No such file or directory. + +or + + no sockets found via socket activation: make sure the service was started by systemd. + +To resolve this: + +- Backup the current version of the unit file, and replace the file with the + [version that ships with docker 1.12](https://raw.githubusercontent.com/docker/docker/v1.12.0/contrib/init/systemd/docker.service.rpm) +- Remove the `Requires=docker.socket` directive from the `/usr/lib/systemd/system/docker.service` file if present +- Remove `-H fd://` from the `ExecStart` directive (both in the main unit file, and in any drop-in files present). + +After making those changes, run `sudo systemctl daemon-reload`, and `sudo +systemctl restart docker` to reload changes and (re)start the docker daemon. + +**NOTE**: Docker 1.12.5 will correctly validate that either an IPv6 subnet is provided or +that the IPAM driver can provide one when you specify the `--ipv6` option. + +If you are currently using the `--ipv6` option _without_ specifying the +`--fixed-cidr-v6` option, the Docker daemon will refuse to start with the +following message: + +```none +Error starting daemon: Error initializing network controller: Error creating + default "bridge" network: failed to parse pool request + for address space "LocalDefault" pool " subpool ": + could not find an available, non-overlapping IPv6 address + pool among the defaults to assign to the network +``` + +To resolve this error, either remove the `--ipv6` flag (to preserve the same +behavior as in Docker 1.12.3 and earlier), or provide an IPv6 subnet as the +value of the `--fixed-cidr-v6` flag. + +In a similar way, if you specify the `--ipv6` flag when creating a network +with the default IPAM driver, without providing an IPv6 `--subnet`, network +creation will fail with the following message: + +```none +Error response from daemon: failed to parse pool request for address space + "LocalDefault" pool "" subpool "": could not find an + available, non-overlapping IPv6 address pool among + the defaults to assign to the network +``` + +To resolve this, either remove the `--ipv6` flag (to preserve the same behavior +as in Docker 1.12.3 and earlier), or provide an IPv6 subnet as the value of the +`--subnet` flag. + +The network network creation will instead succeed if you use an external IPAM driver +which supports automatic allocation of IPv6 subnets. + +### Runtime + +- Fix race on sending stdin close event [#29424](https://github.com/docker/docker/pull/29424) + +### Networking + +- Fix panic in docker network ls when a network was created with `--ipv6` and no ipv6 `--subnet` in older docker versions [#29416](https://github.com/docker/docker/pull/29416) + +### Contrib + +- Fix compilation on Darwin [#29370](https://github.com/docker/docker/pull/29370) + +## 1.12.4 (2016-12-12) + +**IMPORTANT**: Docker 1.12 ships with an updated systemd unit file for rpm +based installs (which includes RHEL, Fedora, CentOS, and Oracle Linux 7). When +upgrading from an older version of docker, the upgrade process may not +automatically install the updated version of the unit file, or fail to start +the docker service if; + +- the systemd unit file (`/usr/lib/systemd/system/docker.service`) contains local changes, or +- a systemd drop-in file is present, and contains `-H fd://` in the `ExecStart` directive + +Starting the docker service will produce an error: + + Failed to start docker.service: Unit docker.socket failed to load: No such file or directory. + +or + + no sockets found via socket activation: make sure the service was started by systemd. + +To resolve this: + +- Backup the current version of the unit file, and replace the file with the + [version that ships with docker 1.12](https://raw.githubusercontent.com/docker/docker/v1.12.0/contrib/init/systemd/docker.service.rpm) +- Remove the `Requires=docker.socket` directive from the `/usr/lib/systemd/system/docker.service` file if present +- Remove `-H fd://` from the `ExecStart` directive (both in the main unit file, and in any drop-in files present). + +After making those changes, run `sudo systemctl daemon-reload`, and `sudo +systemctl restart docker` to reload changes and (re)start the docker daemon. + + +### Runtime + +- Fix issue where volume metadata was not removed [#29083](https://github.com/docker/docker/pull/29083) +- Asynchronously close streams to prevent holding container lock [#29050](https://github.com/docker/docker/pull/29050) +- Fix selinux labels for newly created container volumes [#29050](https://github.com/docker/docker/pull/29050) +- Remove hostname validation [#28990](https://github.com/docker/docker/pull/28990) +- Fix deadlocks caused by IO races [#29095](https://github.com/docker/docker/pull/29095) [#29141](https://github.com/docker/docker/pull/29141) +- Return an empty stats if the container is restarting [#29150](https://github.com/docker/docker/pull/29150) +- Fix volume store locking [#29151](https://github.com/docker/docker/pull/29151) +- Ensure consistent status code in API [#29150](https://github.com/docker/docker/pull/29150) +- Fix incorrect opaque directory permission in overlay2 [#29093](https://github.com/docker/docker/pull/29093) +- Detect plugin content and error out on `docker pull` [#29297](https://github.com/docker/docker/pull/29297) + +### Swarm Mode + +* Update Swarmkit [#29047](https://github.com/docker/docker/pull/29047) + - orchestrator/global: Fix deadlock on updates [docker/swarmkit#1760](https://github.com/docker/swarmkit/pull/1760) + - on leader switchover preserve the vxlan id for existing networks [docker/swarmkit#1773](https://github.com/docker/swarmkit/pull/1773) +- Refuse swarm spec not named "default" [#29152](https://github.com/docker/docker/pull/29152) + +### Networking + +* Update libnetwork [#29004](https://github.com/docker/docker/pull/29004) [#29146](https://github.com/docker/docker/pull/29146) + - Fix panic in embedded DNS [docker/libnetwork#1561](https://github.com/docker/libnetwork/pull/1561) + - Fix unmarhalling panic when passing --link-local-ip on global scope network [docker/libnetwork#1564](https://github.com/docker/libnetwork/pull/1564) + - Fix panic when network plugin returns nil StaticRoutes [docker/libnetwork#1563](https://github.com/docker/libnetwork/pull/1563) + - Fix panic in osl.(*networkNamespace).DeleteNeighbor [docker/libnetwork#1555](https://github.com/docker/libnetwork/pull/1555) + - Fix panic in swarm networking concurrent map read/write [docker/libnetwork#1570](https://github.com/docker/libnetwork/pull/1570) + * Allow encrypted networks when running docker inside a container [docker/libnetwork#1502](https://github.com/docker/libnetwork/pull/1502) + - Do not block autoallocation of IPv6 pool [docker/libnetwork#1538](https://github.com/docker/libnetwork/pull/1538) + - Set timeout for netlink calls [docker/libnetwork#1557](https://github.com/docker/libnetwork/pull/1557) + - Increase networking local store timeout to one minute [docker/libkv#140](https://github.com/docker/libkv/pull/140) + - Fix a panic in libnetwork.(*sandbox).execFunc [docker/libnetwork#1556](https://github.com/docker/libnetwork/pull/1556) + - Honor icc=false for internal networks [docker/libnetwork#1525](https://github.com/docker/libnetwork/pull/1525) + +### Logging + +* Update syslog log driver [#29150](https://github.com/docker/docker/pull/29150) + +### Contrib + +- Run "dnf upgrade" before installing in fedora [#29150](https://github.com/docker/docker/pull/29150) +- Add build-date back to RPM packages [#29150](https://github.com/docker/docker/pull/29150) +- deb package filename changed to include distro to distinguish between distro code names [#27829](https://github.com/docker/docker/pull/27829) + +## 1.12.3 (2016-10-26) + +**IMPORTANT**: Docker 1.12 ships with an updated systemd unit file for rpm +based installs (which includes RHEL, Fedora, CentOS, and Oracle Linux 7). When +upgrading from an older version of docker, the upgrade process may not +automatically install the updated version of the unit file, or fail to start +the docker service if; + +- the systemd unit file (`/usr/lib/systemd/system/docker.service`) contains local changes, or +- a systemd drop-in file is present, and contains `-H fd://` in the `ExecStart` directive + +Starting the docker service will produce an error: + + Failed to start docker.service: Unit docker.socket failed to load: No such file or directory. + +or + + no sockets found via socket activation: make sure the service was started by systemd. + +To resolve this: + +- Backup the current version of the unit file, and replace the file with the + [version that ships with docker 1.12](https://raw.githubusercontent.com/docker/docker/v1.12.0/contrib/init/systemd/docker.service.rpm) +- Remove the `Requires=docker.socket` directive from the `/usr/lib/systemd/system/docker.service` file if present +- Remove `-H fd://` from the `ExecStart` directive (both in the main unit file, and in any drop-in files present). + +After making those changes, run `sudo systemctl daemon-reload`, and `sudo +systemctl restart docker` to reload changes and (re)start the docker daemon. + + +### Runtime + +- Fix ambient capability usage in containers (CVE-2016-8867) [#27610](https://github.com/docker/docker/pull/27610) +- Prevent a deadlock in libcontainerd for Windows [#27136](https://github.com/docker/docker/pull/27136) +- Fix error reporting in CopyFileWithTar [#27075](https://github.com/docker/docker/pull/27075) +* Reset health status to starting when a container is restarted [#27387](https://github.com/docker/docker/pull/27387) +* Properly handle shared mount propagation in storage directory [#27609](https://github.com/docker/docker/pull/27609) +- Fix docker exec [#27610](https://github.com/docker/docker/pull/27610) +- Fix backward compatibility with containerd’s events log [#27693](https://github.com/docker/docker/pull/27693) + +### Swarm Mode + +- Fix conversion of restart-policy [#27062](https://github.com/docker/docker/pull/27062) +* Update Swarmkit [#27554](https://github.com/docker/docker/pull/27554) + * Avoid restarting a task that has already been restarted [docker/swarmkit#1305](https://github.com/docker/swarmkit/pull/1305) + * Allow duplicate published ports when they use different protocols [docker/swarmkit#1632](https://github.com/docker/swarmkit/pull/1632) + * Allow multiple randomly assigned published ports on service [docker/swarmkit#1657](https://github.com/docker/swarmkit/pull/1657) + - Fix panic when allocations happen at init time [docker/swarmkit#1651](https://github.com/docker/swarmkit/pull/1651) + +### Networking + +* Update libnetwork [#27559](https://github.com/docker/docker/pull/27559) + - Fix race in serializing sandbox to string [docker/libnetwork#1495](https://github.com/docker/libnetwork/pull/1495) + - Fix race during deletion [docker/libnetwork#1503](https://github.com/docker/libnetwork/pull/1503) + * Reset endpoint port info on connectivity revoke in bridge driver [docker/libnetwork#1504](https://github.com/docker/libnetwork/pull/1504) + - Fix a deadlock in networking code [docker/libnetwork#1507](https://github.com/docker/libnetwork/pull/1507) + - Fix a race in load balancer state [docker/libnetwork#1512](https://github.com/docker/libnetwork/pull/1512) + +### Logging + +* Update fluent-logger-golang to v1.2.1 [#27474](https://github.com/docker/docker/pull/27474) + +### Contrib + +* Update buildtags for armhf ubuntu-trusty [#27327](https://github.com/docker/docker/pull/27327) +* Add AppArmor to runc buildtags for armhf [#27421](https://github.com/docker/docker/pull/27421) + +## 1.12.2 (2016-10-11) + +**IMPORTANT**: Docker 1.12 ships with an updated systemd unit file for rpm +based installs (which includes RHEL, Fedora, CentOS, and Oracle Linux 7). When +upgrading from an older version of docker, the upgrade process may not +automatically install the updated version of the unit file, or fail to start +the docker service if; + +- the systemd unit file (`/usr/lib/systemd/system/docker.service`) contains local changes, or +- a systemd drop-in file is present, and contains `-H fd://` in the `ExecStart` directive + +Starting the docker service will produce an error: + + Failed to start docker.service: Unit docker.socket failed to load: No such file or directory. + +or + + no sockets found via socket activation: make sure the service was started by systemd. + +To resolve this: + +- Backup the current version of the unit file, and replace the file with the + [version that ships with docker 1.12](https://raw.githubusercontent.com/docker/docker/v1.12.0/contrib/init/systemd/docker.service.rpm) +- Remove the `Requires=docker.socket` directive from the `/usr/lib/systemd/system/docker.service` file if present +- Remove `-H fd://` from the `ExecStart` directive (both in the main unit file, and in any drop-in files present). + +After making those changes, run `sudo systemctl daemon-reload`, and `sudo +systemctl restart docker` to reload changes and (re)start the docker daemon. + + +### Runtime + +- Fix a panic due to a race condition filtering `docker ps` [#26049](https://github.com/docker/docker/pull/26049) +* Implement retry logic to prevent "Unable to remove filesystem" errors when using the aufs storage driver [#26536](https://github.com/docker/docker/pull/26536) +* Prevent devicemapper from removing device symlinks if `dm.use_deferred_removal` is enabled [#24740](https://github.com/docker/docker/pull/24740) +- Fix an issue where the CLI did not return correct exit codes if a command was run with invalid options [#26777](https://github.com/docker/docker/pull/26777) +- Fix a panic due to a bug in stdout / stderr processing in health checks [#26507](https://github.com/docker/docker/pull/26507) +- Fix exec's children handling [#26874](https://github.com/docker/docker/pull/26874) +- Fix exec form of HEALTHCHECK CMD [#26208](https://github.com/docker/docker/pull/26208) + +### Networking + +- Fix a daemon start panic on armv5 [#24315](https://github.com/docker/docker/issues/24315) +* Vendor libnetwork [#26879](https://github.com/docker/docker/pull/26879) [#26953](https://github.com/docker/docker/pull/26953) + * Avoid returning early on agent join failures [docker/libnetwork#1473](https://github.com/docker/libnetwork/pull/1473) + - Fix service published port cleanup issues [docker/libetwork#1432](https://github.com/docker/libnetwork/pull/1432) [docker/libnetwork#1433](https://github.com/docker/libnetwork/pull/1433) + * Recover properly from transient gossip failures [docker/libnetwork#1446](https://github.com/docker/libnetwork/pull/1446) + * Disambiguate node names known to gossip cluster to avoid node name collision [docker/libnetwork#1451](https://github.com/docker/libnetwork/pull/1451) + * Honor user provided listen address for gossip [docker/libnetwork#1460](https://github.com/docker/libnetwork/pull/1460) + * Allow reachability via published port across services on the same host [docker/libnetwork#1398](https://github.com/docker/libnetwork/pull/1398) + * Change the ingress sandbox name from random id to just `ingress_sbox` [docker/libnetwork#1449](https://github.com/docker/libnetwork/pull/1449) + - Disable service discovery in ingress network [docker/libnetwork#1489](https://github.com/docker/libnetwork/pull/1489) + +### Swarm Mode + +* Fix remote detection of a node's address when it joins the cluster [#26211](https://github.com/docker/docker/pull/26211) +* Vendor SwarmKit [#26765](https://github.com/docker/docker/pull/26765) + * Bounce session after failed status update [docker/swarmkit#1539](https://github.com/docker/swarmkit/pull/1539) + - Fix possible raft deadlocks [docker/swarmkit#1537](https://github.com/docker/swarmkit/pull/1537) + - Fix panic and endpoint leak when a service is updated with no endpoints [docker/swarmkit#1481](https://github.com/docker/swarmkit/pull/1481) + * Produce an error if the same port is published twice on `service create` or `service update` [docker/swarmkit#1495](https://github.com/docker/swarmkit/pull/1495) + - Fix an issue where changes to a service were not detected, resulting in the service not being updated [docker/swarmkit#1497](https://github.com/docker/swarmkit/pull/1497) + - Do not allow service creation on ingress network [docker/swarmkit#1600](https://github.com/docker/swarmkit/pull/1600) + +### Contrib + +* Update the debian sysv-init script to use `dockerd` instead of `docker daemon` [#25869](https://github.com/docker/docker/pull/25869) +* Improve stability when running the docker client on MacOS Sierra [#26875](https://github.com/docker/docker/pull/26875) +- Fix installation on debian stretch [#27184](https://github.com/docker/docker/pull/27184) + +### Windows + +- Fix an issue where arrow-navigation did not work when running the docker client in ConEmu [#25578](https://github.com/docker/docker/pull/25578) + +## 1.12.1 (2016-08-18) + +**IMPORTANT**: Docker 1.12 ships with an updated systemd unit file for rpm +based installs (which includes RHEL, Fedora, CentOS, and Oracle Linux 7). When +upgrading from an older version of docker, the upgrade process may not +automatically install the updated version of the unit file, or fail to start +the docker service if; + +- the systemd unit file (`/usr/lib/systemd/system/docker.service`) contains local changes, or +- a systemd drop-in file is present, and contains `-H fd://` in the `ExecStart` directive + +Starting the docker service will produce an error: + + Failed to start docker.service: Unit docker.socket failed to load: No such file or directory. + +or + + no sockets found via socket activation: make sure the service was started by systemd. + +To resolve this: + +- Backup the current version of the unit file, and replace the file with the + [version that ships with docker 1.12](https://raw.githubusercontent.com/docker/docker/v1.12.0/contrib/init/systemd/docker.service.rpm) +- Remove the `Requires=docker.socket` directive from the `/usr/lib/systemd/system/docker.service` file if present +- Remove `-H fd://` from the `ExecStart` directive (both in the main unit file, and in any drop-in files present). + +After making those changes, run `sudo systemctl daemon-reload`, and `sudo +systemctl restart docker` to reload changes and (re)start the docker daemon. + + +### Client + +* Add `Joined at` information in `node inspect --pretty` [#25512](https://github.com/docker/docker/pull/25512) +- Fix a crash on `service inspect` [#25454](https://github.com/docker/docker/pull/25454) +- Fix issue preventing `service update --env-add` to work as intended [#25427](https://github.com/docker/docker/pull/25427) +- Fix issue preventing `service update --publish-add` to work as intended [#25428](https://github.com/docker/docker/pull/25428) +- Remove `service update --network-add` and `service update --network-rm` flags + because this feature is not yet implemented in 1.12, but was inadvertently added + to the client in 1.12.0 [#25646](https://github.com/docker/docker/pull/25646) + +### Contrib + ++ Official ARM installation for Debian Jessie, Ubuntu Trusty, and Raspbian Jessie [#24815](https://github.com/docker/docker/pull/24815) [#25591](https://github.com/docker/docker/pull/25637) +- Add selinux policy per distro/version, fixing issue preventing successful installation on Fedora 24, and Oracle Linux [#25334](https://github.com/docker/docker/pull/25334) [#25593](https://github.com/docker/docker/pull/25593) + +### Networking + +- Fix issue that prevented containers to be accessed by hostname with Docker overlay driver in Swarm Mode [#25603](https://github.com/docker/docker/pull/25603) [#25648](https://github.com/docker/docker/pull/25648) +- Fix random network issues on service with published port [#25603](https://github.com/docker/docker/pull/25603) +- Fix unreliable inter-service communication after scaling down and up [#25603](https://github.com/docker/docker/pull/25603) +- Fix issue where removing all tasks on a node and adding them back breaks connectivity with other services [#25603](https://github.com/docker/docker/pull/25603) +- Fix issue where a task that fails to start results in a race, causing a `network xxx not found` error that masks the actual error [#25550](https://github.com/docker/docker/pull/25550) +- Relax validation of SRV records for external services that use SRV records not formatted according to RFC 2782 [#25739](https://github.com/docker/docker/pull/25739) + +### Plugins (experimental) + +* Make daemon events listen for plugin lifecycle events [#24760](https://github.com/docker/docker/pull/24760) +* Check for plugin state before enabling plugin [#25033](https://github.com/docker/docker/pull/25033) +- Remove plugin root from filesystem on `plugin rm` [#25187](https://github.com/docker/docker/pull/25187) +- Prevent deadlock when more than one plugin is installed [#25384](https://github.com/docker/docker/pull/25384) + +### Runtime + +* Mask join tokens in daemon logs [#25346](https://github.com/docker/docker/pull/25346) +- Fix `docker ps --filter` causing the results to no longer be sorted by creation time [#25387](https://github.com/docker/docker/pull/25387) +- Fix various crashes [#25053](https://github.com/docker/docker/pull/25053) + +### Security + +* Add `/proc/timer_list` to the masked paths list to prevent information leak from the host [#25630](https://github.com/docker/docker/pull/25630) +* Allow systemd to run with only `--cap-add SYS_ADMIN` rather than having to also add `--cap-add DAC_READ_SEARCH` or disabling seccomp filtering [#25567](https://github.com/docker/docker/pull/25567) + +### Swarm + +- Fix an issue where the swarm can get stuck electing a new leader after quorum is lost [#25055](https://github.com/docker/docker/issues/25055) +- Fix unwanted rescheduling of containers after a leader failover [#25017](https://github.com/docker/docker/issues/25017) +- Change swarm root CA key to P256 curve [swarmkit#1376](https://github.com/docker/swarmkit/pull/1376) +- Allow forced removal of a node from a swarm [#25159](https://github.com/docker/docker/pull/25159) +- Fix connection leak when a node leaves a swarm [swarmkit/#1277](https://github.com/docker/swarmkit/pull/1277) +- Backdate swarm certificates by one hour to tolerate more clock skew [swarmkit/#1243](https://github.com/docker/swarmkit/pull/1243) +- Avoid high CPU use with many unschedulable tasks [swarmkit/#1287](https://github.com/docker/swarmkit/pull/1287) +- Fix issue with global tasks not starting up [swarmkit/#1295](https://github.com/docker/swarmkit/pull/1295) +- Garbage collect raft logs [swarmkit/#1327](https://github.com/docker/swarmkit/pull/1327) + +### Volume + +- Persist local volume options after a daemon restart [#25316](https://github.com/docker/docker/pull/25316) +- Fix an issue where the mount ID was not returned on volume unmount [#25333](https://github.com/docker/docker/pull/25333) +- Fix an issue where a volume mount could inadvertently create a bind mount [#25309](https://github.com/docker/docker/pull/25309) +- `docker service create --mount type=bind,...` now correctly validates if the source path exists, instead of creating it [#25494](https://github.com/docker/docker/pull/25494) + +## 1.12.0 (2016-07-28) + + +**IMPORTANT**: Docker 1.12.0 ships with an updated systemd unit file for rpm +based installs (which includes RHEL, Fedora, CentOS, and Oracle Linux 7). When +upgrading from an older version of docker, the upgrade process may not +automatically install the updated version of the unit file, or fail to start +the docker service if; + +- the systemd unit file (`/usr/lib/systemd/system/docker.service`) contains local changes, or +- a systemd drop-in file is present, and contains `-H fd://` in the `ExecStart` directive + +Starting the docker service will produce an error: + + Failed to start docker.service: Unit docker.socket failed to load: No such file or directory. + +or + + no sockets found via socket activation: make sure the service was started by systemd. + +To resolve this: + +- Backup the current version of the unit file, and replace the file with the + [version that ships with docker 1.12](https://raw.githubusercontent.com/docker/docker/v1.12.0/contrib/init/systemd/docker.service.rpm) +- Remove the `Requires=docker.socket` directive from the `/usr/lib/systemd/system/docker.service` file if present +- Remove `-H fd://` from the `ExecStart` directive (both in the main unit file, and in any drop-in files present). + +After making those changes, run `sudo systemctl daemon-reload`, and `sudo +systemctl restart docker` to reload changes and (re)start the docker daemon. + +**IMPORTANT**: With Docker 1.12, a Linux docker installation now has two +additional binaries; `dockerd`, and `docker-proxy`. If you have scripts for +installing docker, please make sure to update them accordingly. + +### Builder + ++ New `HEALTHCHECK` Dockerfile instruction to support user-defined healthchecks [#23218](https://github.com/docker/docker/pull/23218) ++ New `SHELL` Dockerfile instruction to specify the default shell when using the shell form for commands in a Dockerfile [#22489](https://github.com/docker/docker/pull/22489) ++ Add `#escape=` Dockerfile directive to support platform-specific parsing of file paths in Dockerfile [#22268](https://github.com/docker/docker/pull/22268) ++ Add support for comments in `.dockerignore` [#23111](https://github.com/docker/docker/pull/23111) +* Support for UTF-8 in Dockerfiles [#23372](https://github.com/docker/docker/pull/23372) +* Skip UTF-8 BOM bytes from `Dockerfile` and `.dockerignore` if exist [#23234](https://github.com/docker/docker/pull/23234) +* Windows: support for `ARG` to match Linux [#22508](https://github.com/docker/docker/pull/22508) +- Fix error message when building using a daemon with the bridge network disabled [#22932](https://github.com/docker/docker/pull/22932) + +### Contrib + +* Enable seccomp for Centos 7 and Oracle Linux 7 [#22344](https://github.com/docker/docker/pull/22344) +- Remove MountFlags in systemd unit to allow shared mount propagation [#22806](https://github.com/docker/docker/pull/22806) + +### Distribution + ++ Add `--max-concurrent-downloads` and `--max-concurrent-uploads` daemon flags useful for situations where network connections don't support multiple downloads/uploads [#22445](https://github.com/docker/docker/pull/22445) +* Registry operations now honor the `ALL_PROXY` environment variable [#22316](https://github.com/docker/docker/pull/22316) +* Provide more information to the user on `docker load` [#23377](https://github.com/docker/docker/pull/23377) +* Always save registry digest metadata about images pushed and pulled [#23996](https://github.com/docker/docker/pull/23996) + +### Logging + ++ Syslog logging driver now supports DGRAM sockets [#21613](https://github.com/docker/docker/pull/21613) ++ Add `--details` option to `docker logs` to also display log tags [#21889](https://github.com/docker/docker/pull/21889) ++ Enable syslog logger to have access to env and labels [#21724](https://github.com/docker/docker/pull/21724) ++ An additional syslog-format option `rfc5424micro` to allow microsecond resolution in syslog timestamp [#21844](https://github.com/docker/docker/pull/21844) +* Inherit the daemon log options when creating containers [#21153](https://github.com/docker/docker/pull/21153) +* Remove `docker/` prefix from log messages tag and replace it with `{{.DaemonName}}` so that users have the option of changing the prefix [#22384](https://github.com/docker/docker/pull/22384) + +### Networking + ++ Built-in Virtual-IP based internal and ingress load-balancing using IPVS [#23361](https://github.com/docker/docker/pull/23361) ++ Routing Mesh using ingress overlay network [#23361](https://github.com/docker/docker/pull/23361) ++ Secured multi-host overlay networking using encrypted control-plane and Data-plane [#23361](https://github.com/docker/docker/pull/23361) ++ MacVlan driver is out of experimental [#23524](https://github.com/docker/docker/pull/23524) ++ Add `driver` filter to `network ls` [#22319](https://github.com/docker/docker/pull/22319) ++ Adding `network` filter to `docker ps --filter` [#23300](https://github.com/docker/docker/pull/23300) ++ Add `--link-local-ip` flag to `create`, `run` and `network connect` to specify a container's link-local address [#23415](https://github.com/docker/docker/pull/23415) ++ Add network label filter support [#21495](https://github.com/docker/docker/pull/21495) +* Removed dependency on external KV-Store for Overlay networking in Swarm-Mode [#23361](https://github.com/docker/docker/pull/23361) +* Add container's short-id as default network alias [#21901](https://github.com/docker/docker/pull/21901) +* `run` options `--dns` and `--net=host` are no longer mutually exclusive [#22408](https://github.com/docker/docker/pull/22408) +- Fix DNS issue when renaming containers with generated names [#22716](https://github.com/docker/docker/pull/22716) +- Allow both `network inspect -f {{.Id}}` and `network inspect -f {{.ID}}` to address inconsistency with inspect output [#23226](https://github.com/docker/docker/pull/23226) + +### Plugins (experimental) + ++ New `plugin` command to manager plugins with `install`, `enable`, `disable`, `rm`, `inspect`, `set` subcommands [#23446](https://github.com/docker/docker/pull/23446) + +### Remote API (v1.24) & Client + ++ Split the binary into two: `docker` (client) and `dockerd` (daemon) [#20639](https://github.com/docker/docker/pull/20639) ++ Add `before` and `since` filters to `docker images --filter` [#22908](https://github.com/docker/docker/pull/22908) ++ Add `--limit` option to `docker search` [#23107](https://github.com/docker/docker/pull/23107) ++ Add `--filter` option to `docker search` [#22369](https://github.com/docker/docker/pull/22369) ++ Add security options to `docker info` output [#21172](https://github.com/docker/docker/pull/21172) [#23520](https://github.com/docker/docker/pull/23520) ++ Add insecure registries to `docker info` output [#20410](https://github.com/docker/docker/pull/20410) ++ Extend Docker authorization with TLS user information [#21556](https://github.com/docker/docker/pull/21556) ++ devicemapper: expose Minimum Thin Pool Free Space through `docker info` [#21945](https://github.com/docker/docker/pull/21945) +* API now returns a JSON object when an error occurs making it more consistent [#22880](https://github.com/docker/docker/pull/22880) +- Prevent `docker run -i --restart` from hanging on exit [#22777](https://github.com/docker/docker/pull/22777) +- Fix API/CLI discrepancy on hostname validation [#21641](https://github.com/docker/docker/pull/21641) +- Fix discrepancy in the format of sizes in `stats` from HumanSize to BytesSize [#21773](https://github.com/docker/docker/pull/21773) +- authz: when request is denied return forbidden exit code (403) [#22448](https://github.com/docker/docker/pull/22448) +- Windows: fix tty-related displaying issues [#23878](https://github.com/docker/docker/pull/23878) + +### Runtime + ++ Split the userland proxy to a separate binary (`docker-proxy`) [#23312](https://github.com/docker/docker/pull/23312) ++ Add `--live-restore` daemon flag to keep containers running when daemon shuts down, and regain control on startup [#23213](https://github.com/docker/docker/pull/23213) ++ Ability to add OCI-compatible runtimes (via `--add-runtime` daemon flag) and select one with `--runtime` on `create` and `run` [#22983](https://github.com/docker/docker/pull/22983) ++ New `overlay2` graphdriver for Linux 4.0+ with multiple lower directory support [#22126](https://github.com/docker/docker/pull/22126) ++ New load/save image events [#22137](https://github.com/docker/docker/pull/22137) ++ Add support for reloading daemon configuration through systemd [#22446](https://github.com/docker/docker/pull/22446) ++ Add disk quota support for btrfs [#19651](https://github.com/docker/docker/pull/19651) ++ Add disk quota support for zfs [#21946](https://github.com/docker/docker/pull/21946) ++ Add support for `docker run --pid=container:` [#22481](https://github.com/docker/docker/pull/22481) ++ Align default seccomp profile with selected capabilities [#22554](https://github.com/docker/docker/pull/22554) ++ Add a `daemon reload` event when the daemon reloads its configuration [#22590](https://github.com/docker/docker/pull/22590) ++ Add `trace` capability in the pprof profiler to show execution traces in binary form [#22715](https://github.com/docker/docker/pull/22715) ++ Add a `detach` event [#22898](https://github.com/docker/docker/pull/22898) ++ Add support for setting sysctls with `--sysctl` [#19265](https://github.com/docker/docker/pull/19265) ++ Add `--storage-opt` flag to `create` and `run` allowing to set `size` on devicemapper [#19367](https://github.com/docker/docker/pull/19367) ++ Add `--oom-score-adjust` daemon flag with a default value of `-500` making the daemon less likely to be killed before containers [#24516](https://github.com/docker/docker/pull/24516) +* Undeprecate the `-c` short alias of `--cpu-shares` on `run`, `build`, `create`, `update` [#22621](https://github.com/docker/docker/pull/22621) +* Prevent from using aufs and overlay graphdrivers on an eCryptfs mount [#23121](https://github.com/docker/docker/pull/23121) +- Fix issues with tmpfs mount ordering [#22329](https://github.com/docker/docker/pull/22329) +- Created containers are no longer listed on `docker ps -a -f exited=0` [#21947](https://github.com/docker/docker/pull/21947) +- Fix an issue where containers are stuck in a "Removal In Progress" state [#22423](https://github.com/docker/docker/pull/22423) +- Fix bug that was returning an HTTP 500 instead of a 400 when not specifying a command on run/create [#22762](https://github.com/docker/docker/pull/22762) +- Fix bug with `--detach-keys` whereby input matching a prefix of the detach key was not preserved [#22943](https://github.com/docker/docker/pull/22943) +- SELinux labeling is now disabled when using `--privileged` mode [#22993](https://github.com/docker/docker/pull/22993) +- If volume-mounted into a container, `/etc/hosts`, `/etc/resolv.conf`, `/etc/hostname` are no longer SELinux-relabeled [#22993](https://github.com/docker/docker/pull/22993) +- Fix inconsistency in `--tmpfs` behavior regarding mount options [#22438](https://github.com/docker/docker/pull/22438) +- Fix an issue where daemon hangs at startup [#23148](https://github.com/docker/docker/pull/23148) +- Ignore SIGPIPE events to prevent journald restarts to crash docker in some cases [#22460](https://github.com/docker/docker/pull/22460) +- Containers are not removed from stats list on error [#20835](https://github.com/docker/docker/pull/20835) +- Fix `on-failure` restart policy when daemon restarts [#20853](https://github.com/docker/docker/pull/20853) +- Fix an issue with `stats` when a container is using another container's network [#21904](https://github.com/docker/docker/pull/21904) + +### Swarm Mode + ++ New `swarm` command to manage swarms with `init`, `join`, `join-token`, `leave`, `update` subcommands [#23361](https://github.com/docker/docker/pull/23361) [#24823](https://github.com/docker/docker/pull/24823) ++ New `service` command to manage swarm-wide services with `create`, `inspect`, `update`, `rm`, `ps` subcommands [#23361](https://github.com/docker/docker/pull/23361) [#25140](https://github.com/docker/docker/pull/25140) ++ New `node` command to manage nodes with `accept`, `promote`, `demote`, `inspect`, `update`, `ps`, `ls` and `rm` subcommands [#23361](https://github.com/docker/docker/pull/23361) [#25140](https://github.com/docker/docker/pull/25140) ++ (experimental) New `stack` and `deploy` commands to manage and deploy multi-service applications [#23522](https://github.com/docker/docker/pull/23522) [#25140](https://github.com/docker/docker/pull/25140) + +### Volume + ++ Add support for local and global volume scopes (analogous to network scopes) [#22077](https://github.com/docker/docker/pull/22077) ++ Allow volume drivers to provide a `Status` field [#21006](https://github.com/docker/docker/pull/21006) ++ Add name/driver filter support for volume [#21361](https://github.com/docker/docker/pull/21361) +* Mount/Unmount operations now receives an opaque ID to allow volume drivers to differentiate between two callers [#21015](https://github.com/docker/docker/pull/21015) +- Fix issue preventing to remove a volume in a corner case [#22103](https://github.com/docker/docker/pull/22103) +- Windows: Enable auto-creation of host-path to match Linux [#22094](https://github.com/docker/docker/pull/22094) + + +### Deprecation + +* Environment variables `DOCKER_CONTENT_TRUST_OFFLINE_PASSPHRASE` and `DOCKER_CONTENT_TRUST_TAGGING_PASSPHRASE` have been renamed + to `DOCKER_CONTENT_TRUST_ROOT_PASSPHRASE` and `DOCKER_CONTENT_TRUST_REPOSITORY_PASSPHRASE` respectively [#22574](https://github.com/docker/docker/pull/22574) +* Remove deprecated `syslog-tag`, `gelf-tag`, `fluentd-tag` log option in favor of the more generic `tag` one [#22620](https://github.com/docker/docker/pull/22620) +* Remove deprecated feature of passing HostConfig at API container start [#22570](https://github.com/docker/docker/pull/22570) +* Remove deprecated `-f`/`--force` flag on docker tag [#23090](https://github.com/docker/docker/pull/23090) +* Remove deprecated `/containers//copy` endpoint [#22149](https://github.com/docker/docker/pull/22149) +* Remove deprecated `docker ps` flags `--since` and `--before` [#22138](https://github.com/docker/docker/pull/22138) +* Deprecate the old 3-args form of `docker import` [#23273](https://github.com/docker/docker/pull/23273) + +## 1.11.2 (2016-05-31) + +### Networking + +- Fix a stale endpoint issue on overlay networks during ungraceful restart ([#23015](https://github.com/docker/docker/pull/23015)) +- Fix an issue where the wrong port could be reported by `docker inspect/ps/port` ([#22997](https://github.com/docker/docker/pull/22997)) + +### Runtime + +- Fix a potential panic when running `docker build` ([#23032](https://github.com/docker/docker/pull/23032)) +- Fix interpretation of `--user` parameter ([#22998](https://github.com/docker/docker/pull/22998)) +- Fix a bug preventing container statistics to be correctly reported ([#22955](https://github.com/docker/docker/pull/22955)) +- Fix an issue preventing container to be restarted after daemon restart ([#22947](https://github.com/docker/docker/pull/22947)) +- Fix issues when running 32 bit binaries on Ubuntu 16.04 ([#22922](https://github.com/docker/docker/pull/22922)) +- Fix a possible deadlock on image deletion and container attach ([#22918](https://github.com/docker/docker/pull/22918)) +- Fix an issue where containers fail to start after a daemon restart if they depend on a containerized cluster store ([#22561](https://github.com/docker/docker/pull/22561)) +- Fix an issue causing `docker ps` to hang on CentOS when using devicemapper ([#22168](https://github.com/docker/docker/pull/22168), [#23067](https://github.com/docker/docker/pull/23067)) +- Fix a bug preventing to `docker exec` into a container when using devicemapper ([#22168](https://github.com/docker/docker/pull/22168), [#23067](https://github.com/docker/docker/pull/23067)) + + +## 1.11.1 (2016-04-26) + +### Distribution + +- Fix schema2 manifest media type to be of type `application/vnd.docker.container.image.v1+json` ([#21949](https://github.com/docker/docker/pull/21949)) + +### Documentation + ++ Add missing API documentation for changes introduced with 1.11.0 ([#22048](https://github.com/docker/docker/pull/22048)) + +### Builder + +* Append label passed to `docker build` as arguments as an implicit `LABEL` command at the end of the processed `Dockerfile` ([#22184](https://github.com/docker/docker/pull/22184)) + +### Networking + +- Fix a panic that would occur when forwarding DNS query ([#22261](https://github.com/docker/docker/pull/22261)) +- Fix an issue where OS threads could end up within an incorrect network namespace when using user defined networks ([#22261](https://github.com/docker/docker/pull/22261)) + +### Runtime + +- Fix a bug preventing labels configuration to be reloaded via the config file ([#22299](https://github.com/docker/docker/pull/22299)) +- Fix a regression where container mounting `/var/run` would prevent other containers from being removed ([#22256](https://github.com/docker/docker/pull/22256)) +- Fix an issue where it would be impossible to update both `memory-swap` and `memory` value together ([#22255](https://github.com/docker/docker/pull/22255)) +- Fix a regression from 1.11.0 where the `/auth` endpoint would not initialize `serveraddress` if it is not provided ([#22254](https://github.com/docker/docker/pull/22254)) +- Add missing cleanup of container temporary files when cancelling a schedule restart ([#22237](https://github.com/docker/docker/pull/22237)) +- Remove scary error message when no restart policy is specified ([#21993](https://github.com/docker/docker/pull/21993)) +- Fix a panic that would occur when the plugins were activated via the json spec ([#22191](https://github.com/docker/docker/pull/22191)) +- Fix restart backoff logic to correctly reset delay if container ran for at least 10secs ([#22125](https://github.com/docker/docker/pull/22125)) +- Remove error message when a container restart get cancelled ([#22123](https://github.com/docker/docker/pull/22123)) +- Fix an issue where `docker` would not correctly clean up after `docker exec` ([#22121](https://github.com/docker/docker/pull/22121)) +- Fix a panic that could occur when serving concurrent `docker stats` commands ([#22120](https://github.com/docker/docker/pull/22120))` +- Revert deprecation of non-existent host directories auto-creation ([#22065](https://github.com/docker/docker/pull/22065)) +- Hide misleading rpc error on daemon shutdown ([#22058](https://github.com/docker/docker/pull/22058)) + +## 1.11.0 (2016-04-13) + +**IMPORTANT**: With Docker 1.11, a Linux docker installation is now made of 4 binaries (`docker`, [`docker-containerd`](https://github.com/docker/containerd), [`docker-containerd-shim`](https://github.com/docker/containerd) and [`docker-runc`](https://github.com/opencontainers/runc)). If you have scripts relying on docker being a single static binaries, please make sure to update them. Interaction with the daemon stay the same otherwise, the usage of the other binaries should be transparent. A Windows docker installation remains a single binary, `docker.exe`. + +### Builder + +- Fix a bug where Docker would not use the correct uid/gid when processing the `WORKDIR` command ([#21033](https://github.com/docker/docker/pull/21033)) +- Fix a bug where copy operations with userns would not use the proper uid/gid ([#20782](https://github.com/docker/docker/pull/20782), [#21162](https://github.com/docker/docker/pull/21162)) + +### Client + +* Usage of the `:` separator for security option has been deprecated. `=` should be used instead ([#21232](https://github.com/docker/docker/pull/21232)) ++ The client user agent is now passed to the registry on `pull`, `build`, `push`, `login` and `search` operations ([#21306](https://github.com/docker/docker/pull/21306), [#21373](https://github.com/docker/docker/pull/21373)) +* Allow setting the Domainname and Hostname separately through the API ([#20200](https://github.com/docker/docker/pull/20200)) +* Docker info will now warn users if it can not detect the kernel version or the operating system ([#21128](https://github.com/docker/docker/pull/21128)) +- Fix an issue where `docker stats --no-stream` output could be all 0s ([#20803](https://github.com/docker/docker/pull/20803)) +- Fix a bug where some newly started container would not appear in a running `docker stats` command ([#20792](https://github.com/docker/docker/pull/20792)) +* Post processing is no longer enabled for linux-cgo terminals ([#20587](https://github.com/docker/docker/pull/20587)) +- Values to `--hostname` are now refused if they do not comply with [RFC1123](https://tools.ietf.org/html/rfc1123) ([#20566](https://github.com/docker/docker/pull/20566)) ++ Docker learned how to use a SOCKS proxy ([#20366](https://github.com/docker/docker/pull/20366), [#18373](https://github.com/docker/docker/pull/18373)) ++ Docker now supports external credential stores ([#20107](https://github.com/docker/docker/pull/20107)) +* `docker ps` now supports displaying the list of volumes mounted inside a container ([#20017](https://github.com/docker/docker/pull/20017)) +* `docker info` now also reports Docker's root directory location ([#19986](https://github.com/docker/docker/pull/19986)) +- Docker now prohibits login in with an empty username (spaces are trimmed) ([#19806](https://github.com/docker/docker/pull/19806)) +* Docker events attributes are now sorted by key ([#19761](https://github.com/docker/docker/pull/19761)) +* `docker ps` no longer shows exported port for stopped containers ([#19483](https://github.com/docker/docker/pull/19483)) +- Docker now cleans after itself if a save/export command fails ([#17849](https://github.com/docker/docker/pull/17849)) +* Docker load learned how to display a progress bar ([#17329](https://github.com/docker/docker/pull/17329), [#120078](https://github.com/docker/docker/pull/20078)) + +### Distribution + +- Fix a panic that occurred when pulling an image with 0 layers ([#21222](https://github.com/docker/docker/pull/21222)) +- Fix a panic that could occur on error while pushing to a registry with a misconfigured token service ([#21212](https://github.com/docker/docker/pull/21212)) ++ All first-level delegation roles are now signed when doing a trusted push ([#21046](https://github.com/docker/docker/pull/21046)) ++ OAuth support for registries was added ([#20970](https://github.com/docker/docker/pull/20970)) +* `docker login` now handles token using the implementation found in [docker/distribution](https://github.com/docker/distribution) ([#20832](https://github.com/docker/docker/pull/20832)) +* `docker login` will no longer prompt for an email ([#20565](https://github.com/docker/docker/pull/20565)) +* Docker will now fallback to registry V1 if no basic auth credentials are available ([#20241](https://github.com/docker/docker/pull/20241)) +* Docker will now try to resume layer download where it left off after a network error/timeout ([#19840](https://github.com/docker/docker/pull/19840)) +- Fix generated manifest mediaType when pushing cross-repository ([#19509](https://github.com/docker/docker/pull/19509)) +- Fix docker requesting additional push credentials when pulling an image if Content Trust is enabled ([#20382](https://github.com/docker/docker/pull/20382)) + +### Logging + +- Fix a race in the journald log driver ([#21311](https://github.com/docker/docker/pull/21311)) +* Docker syslog driver now uses the RFC-5424 format when emitting logs ([#20121](https://github.com/docker/docker/pull/20121)) +* Docker GELF log driver now allows to specify the compression algorithm and level via the `gelf-compression-type` and `gelf-compression-level` options ([#19831](https://github.com/docker/docker/pull/19831)) +* Docker daemon learned to output uncolorized logs via the `--raw-logs` options ([#19794](https://github.com/docker/docker/pull/19794)) ++ Docker, on Windows platform, now includes an ETW (Event Tracing in Windows) logging driver named `etwlogs` ([#19689](https://github.com/docker/docker/pull/19689)) +* Journald log driver learned how to handle tags ([#19564](https://github.com/docker/docker/pull/19564)) ++ The fluentd log driver learned the following options: `fluentd-address`, `fluentd-buffer-limit`, `fluentd-retry-wait`, `fluentd-max-retries` and `fluentd-async-connect` ([#19439](https://github.com/docker/docker/pull/19439)) ++ Docker learned to send log to Google Cloud via the new `gcplogs` logging driver. ([#18766](https://github.com/docker/docker/pull/18766)) + + +### Misc + ++ When saving linked images together with `docker save` a subsequent `docker load` will correctly restore their parent/child relationship ([#21385](https://github.com/docker/docker/pull/21385)) ++ Support for building the Docker cli for OpenBSD was added ([#21325](https://github.com/docker/docker/pull/21325)) ++ Labels can now be applied at network, volume and image creation ([#21270](https://github.com/docker/docker/pull/21270)) +* The `dockremap` is now created as a system user ([#21266](https://github.com/docker/docker/pull/21266)) +- Fix a few response body leaks ([#21258](https://github.com/docker/docker/pull/21258)) +- Docker, when run as a service with systemd, will now properly manage its processes cgroups ([#20633](https://github.com/docker/docker/pull/20633)) +* `docker info` now reports the value of cgroup KernelMemory or emits a warning if it is not supported ([#20863](https://github.com/docker/docker/pull/20863)) +* `docker info` now also reports the cgroup driver in use ([#20388](https://github.com/docker/docker/pull/20388)) +* Docker completion is now available on PowerShell ([#19894](https://github.com/docker/docker/pull/19894)) +* `dockerinit` is no more ([#19490](https://github.com/docker/docker/pull/19490),[#19851](https://github.com/docker/docker/pull/19851)) ++ Support for building Docker on arm64 was added ([#19013](https://github.com/docker/docker/pull/19013)) ++ Experimental support for building docker.exe in a native Windows Docker installation ([#18348](https://github.com/docker/docker/pull/18348)) + +### Networking + +- Fix panic if a node is forcibly removed from the cluster ([#21671](https://github.com/docker/docker/pull/21671)) +- Fix "error creating vxlan interface" when starting a container in a Swarm cluster ([#21671](https://github.com/docker/docker/pull/21671)) +* `docker network inspect` will now report all endpoints whether they have an active container or not ([#21160](https://github.com/docker/docker/pull/21160)) ++ Experimental support for the MacVlan and IPVlan network drivers has been added ([#21122](https://github.com/docker/docker/pull/21122)) +* Output of `docker network ls` is now sorted by network name ([#20383](https://github.com/docker/docker/pull/20383)) +- Fix a bug where Docker would allow a network to be created with the reserved `default` name ([#19431](https://github.com/docker/docker/pull/19431)) +* `docker network inspect` returns whether a network is internal or not ([#19357](https://github.com/docker/docker/pull/19357)) ++ Control IPv6 via explicit option when creating a network (`docker network create --ipv6`). This shows up as a new `EnableIPv6` field in `docker network inspect` ([#17513](https://github.com/docker/docker/pull/17513)) +* Support for AAAA Records (aka IPv6 Service Discovery) in embedded DNS Server ([#21396](https://github.com/docker/docker/pull/21396)) +- Fix to not forward docker domain IPv6 queries to external servers ([#21396](https://github.com/docker/docker/pull/21396)) +* Multiple A/AAAA records from embedded DNS Server for DNS Round robin ([#21019](https://github.com/docker/docker/pull/21019)) +- Fix endpoint count inconsistency after an ungraceful dameon restart ([#21261](https://github.com/docker/docker/pull/21261)) +- Move the ownership of exposed ports and port-mapping options from Endpoint to Sandbox ([#21019](https://github.com/docker/docker/pull/21019)) +- Fixed a bug which prevents docker reload when host is configured with ipv6.disable=1 ([#21019](https://github.com/docker/docker/pull/21019)) +- Added inbuilt nil IPAM driver ([#21019](https://github.com/docker/docker/pull/21019)) +- Fixed bug in iptables.Exists() logic [#21019](https://github.com/docker/docker/pull/21019) +- Fixed a Veth interface leak when using overlay network ([#21019](https://github.com/docker/docker/pull/21019)) +- Fixed a bug which prevents docker reload after a network delete during shutdown ([#20214](https://github.com/docker/docker/pull/20214)) +- Make sure iptables chains are recreated on firewalld reload ([#20419](https://github.com/docker/docker/pull/20419)) +- Allow to pass global datastore during config reload ([#20419](https://github.com/docker/docker/pull/20419)) +- For anonymous containers use the alias name for IP to name mapping, ie:DNS PTR record ([#21019](https://github.com/docker/docker/pull/21019)) +- Fix a panic when deleting an entry from /etc/hosts file ([#21019](https://github.com/docker/docker/pull/21019)) +- Source the forwarded DNS queries from the container net namespace ([#21019](https://github.com/docker/docker/pull/21019)) +- Fix to retain the network internal mode config for bridge networks on daemon reload ([#21780] (https://github.com/docker/docker/pull/21780)) +- Fix to retain IPAM driver option configs on daemon reload ([#21914] (https://github.com/docker/docker/pull/21914)) + +### Plugins + +- Fix a file descriptor leak that would occur every time plugins were enumerated ([#20686](https://github.com/docker/docker/pull/20686)) +- Fix an issue where Authz plugin would corrupt the payload body when faced with a large amount of data ([#20602](https://github.com/docker/docker/pull/20602)) + +### Runtime + +- Fix a panic that could occur when cleanup after a container started with invalid parameters ([#21716](https://github.com/docker/docker/pull/21716)) +- Fix a race with event timers stopping early ([#21692](https://github.com/docker/docker/pull/21692)) +- Fix race conditions in the layer store, potentially corrupting the map and crashing the process ([#21677](https://github.com/docker/docker/pull/21677)) +- Un-deprecate auto-creation of host directories for mounts. This feature was marked deprecated in ([#21666](https://github.com/docker/docker/pull/21666)) + Docker 1.9, but was decided to be too much of a backward-incompatible change, so it was decided to keep the feature. ++ It is now possible for containers to share the NET and IPC namespaces when `userns` is enabled ([#21383](https://github.com/docker/docker/pull/21383)) ++ `docker inspect ` will now expose the rootfs layers ([#21370](https://github.com/docker/docker/pull/21370)) ++ Docker Windows gained a minimal `top` implementation ([#21354](https://github.com/docker/docker/pull/21354)) +* Docker learned to report the faulty exe when a container cannot be started due to its condition ([#21345](https://github.com/docker/docker/pull/21345)) +* Docker with device mapper will now refuse to run if `udev sync` is not available ([#21097](https://github.com/docker/docker/pull/21097)) +- Fix a bug where Docker would not validate the config file upon configuration reload ([#21089](https://github.com/docker/docker/pull/21089)) +- Fix a hang that would happen on attach if initial start was to fail ([#21048](https://github.com/docker/docker/pull/21048)) +- Fix an issue where registry service options in the daemon configuration file were not properly taken into account ([#21045](https://github.com/docker/docker/pull/21045)) +- Fix a race between the exec and resize operations ([#21022](https://github.com/docker/docker/pull/21022)) +- Fix an issue where nanoseconds were not correctly taken in account when filtering Docker events ([#21013](https://github.com/docker/docker/pull/21013)) +- Fix the handling of Docker command when passed a 64 bytes id ([#21002](https://github.com/docker/docker/pull/21002)) +* Docker will now return a `204` (i.e http.StatusNoContent) code when it successfully deleted a network ([#20977](https://github.com/docker/docker/pull/20977)) +- Fix a bug where the daemon would wait indefinitely in case the process it was about to killed had already exited on its own ([#20967](https://github.com/docker/docker/pull/20967) +* The devmapper driver learned the `dm.min_free_space` option. If the mapped device free space reaches the passed value, new device creation will be prohibited. ([#20786](https://github.com/docker/docker/pull/20786)) ++ Docker can now prevent processes in container to gain new privileges via the `--security-opt=no-new-privileges` flag ([#20727](https://github.com/docker/docker/pull/20727)) +- Starting a container with the `--device` option will now correctly resolves symlinks ([#20684](https://github.com/docker/docker/pull/20684)) ++ Docker now relies on [`containerd`](https://github.com/docker/containerd) and [`runc`](https://github.com/opencontainers/runc) to spawn containers. ([#20662](https://github.com/docker/docker/pull/20662)) +- Fix docker configuration reloading to only alter value present in the given config file ([#20604](https://github.com/docker/docker/pull/20604)) ++ Docker now allows setting a container hostname via the `--hostname` flag when `--net=host` ([#20177](https://github.com/docker/docker/pull/20177)) ++ Docker now allows executing privileged container while running with `--userns-remap` if both `--privileged` and the new `--userns=host` flag are specified ([#20111](https://github.com/docker/docker/pull/20111)) +- Fix Docker not cleaning up correctly old containers upon restarting after a crash ([#19679](https://github.com/docker/docker/pull/19679)) +* Docker will now error out if it doesn't recognize a configuration key within the config file ([#19517](https://github.com/docker/docker/pull/19517)) +- Fix container loading, on daemon startup, when they depends on a plugin running within a container ([#19500](https://github.com/docker/docker/pull/19500)) +* `docker update` learned how to change a container restart policy ([#19116](https://github.com/docker/docker/pull/19116)) +* `docker inspect` now also returns a new `State` field containing the container state in a human readable way (i.e. one of `created`, `restarting`, `running`, `paused`, `exited` or `dead`)([#18966](https://github.com/docker/docker/pull/18966)) ++ Docker learned to limit the number of active pids (i.e. processes) within the container via the `pids-limit` flags. NOTE: This requires `CGROUP_PIDS=y` to be in the kernel configuration. ([#18697](https://github.com/docker/docker/pull/18697)) +- `docker load` now has a `--quiet` option to suppress the load output ([#20078](https://github.com/docker/docker/pull/20078)) +- Fix a bug in neighbor discovery for IPv6 peers ([#20842](https://github.com/docker/docker/pull/20842)) +- Fix a panic during cleanup if a container was started with invalid options ([#21802](https://github.com/docker/docker/pull/21802)) +- Fix a situation where a container cannot be stopped if the terminal is closed ([#21840](https://github.com/docker/docker/pull/21840)) + +### Security + +* Object with the `pcp_pmcd_t` selinux type were given management access to `/var/lib/docker(/.*)?` ([#21370](https://github.com/docker/docker/pull/21370)) +* `restart_syscall`, `copy_file_range`, `mlock2` joined the list of allowed calls in the default seccomp profile ([#21117](https://github.com/docker/docker/pull/21117), [#21262](https://github.com/docker/docker/pull/21262)) +* `send`, `recv` and `x32` were added to the list of allowed syscalls and arch in the default seccomp profile ([#19432](https://github.com/docker/docker/pull/19432)) +* Docker Content Trust now requests the server to perform snapshot signing ([#21046](https://github.com/docker/docker/pull/21046)) +* Support for using YubiKeys for Content Trust signing has been moved out of experimental ([#21591](https://github.com/docker/docker/pull/21591)) + +### Volumes + +* Output of `docker volume ls` is now sorted by volume name ([#20389](https://github.com/docker/docker/pull/20389)) +* Local volumes can now accept options similar to the unix `mount` tool ([#20262](https://github.com/docker/docker/pull/20262)) +- Fix an issue where one letter directory name could not be used as source for volumes ([#21106](https://github.com/docker/docker/pull/21106)) ++ `docker run -v` now accepts a new flag `nocopy`. This tells the runtime not to copy the container path content into the volume (which is the default behavior) ([#21223](https://github.com/docker/docker/pull/21223)) + +## 1.10.3 (2016-03-10) + +### Runtime + +- Fix Docker client exiting with an "Unrecognized input header" error [#20706](https://github.com/docker/docker/pull/20706) +- Fix Docker exiting if Exec is started with both `AttachStdin` and `Detach` [#20647](https://github.com/docker/docker/pull/20647) + +### Distribution + +- Fix a crash when pushing multiple images sharing the same layers to the same repository in parallel [#20831](https://github.com/docker/docker/pull/20831) +- Fix a panic when pushing images to a registry which uses a misconfigured token service [#21030](https://github.com/docker/docker/pull/21030) + +### Plugin system + +- Fix issue preventing volume plugins to start when SELinux is enabled [#20834](https://github.com/docker/docker/pull/20834) +- Prevent Docker from exiting if a volume plugin returns a null response for Get requests [#20682](https://github.com/docker/docker/pull/20682) +- Fix plugin system leaking file descriptors if a plugin has an error [#20680](https://github.com/docker/docker/pull/20680) + +### Security + +- Fix linux32 emulation to fail during docker build [#20672](https://github.com/docker/docker/pull/20672) + It was due to the `personality` syscall being blocked by the default seccomp profile. +- Fix Oracle XE 10g failing to start in a container [#20981](https://github.com/docker/docker/pull/20981) + It was due to the `ipc` syscall being blocked by the default seccomp profile. +- Fix user namespaces not working on Linux From Scratch [#20685](https://github.com/docker/docker/pull/20685) +- Fix issue preventing daemon to start if userns is enabled and the `subuid` or `subgid` files contain comments [#20725](https://github.com/docker/docker/pull/20725) + +## 1.10.2 (2016-02-22) + +### Runtime + +- Prevent systemd from deleting containers' cgroups when its configuration is reloaded [#20518](https://github.com/docker/docker/pull/20518) +- Fix SELinux issues by disregarding `--read-only` when mounting `/dev/mqueue` [#20333](https://github.com/docker/docker/pull/20333) +- Fix chown permissions used during `docker cp` when userns is used [#20446](https://github.com/docker/docker/pull/20446) +- Fix configuration loading issue with all booleans defaulting to `true` [#20471](https://github.com/docker/docker/pull/20471) +- Fix occasional panic with `docker logs -f` [#20522](https://github.com/docker/docker/pull/20522) + +### Distribution + +- Keep layer reference if deletion failed to avoid a badly inconsistent state [#20513](https://github.com/docker/docker/pull/20513) +- Handle gracefully a corner case when canceling migration [#20372](https://github.com/docker/docker/pull/20372) +- Fix docker import on compressed data [#20367](https://github.com/docker/docker/pull/20367) +- Fix tar-split files corruption during migration that later cause docker push and docker save to fail [#20458](https://github.com/docker/docker/pull/20458) + +### Networking + +- Fix daemon crash if embedded DNS is sent garbage [#20510](https://github.com/docker/docker/pull/20510) + +### Volumes + +- Fix issue with multiple volume references with same name [#20381](https://github.com/docker/docker/pull/20381) + +### Security + +- Fix potential cache corruption and delegation conflict issues [#20523](https://github.com/docker/docker/pull/20523) + +## 1.10.1 (2016-02-11) + +### Runtime + +* Do not stop daemon on migration hard failure [#20156](https://github.com/docker/docker/pull/20156) +- Fix various issues with migration to content-addressable images [#20058](https://github.com/docker/docker/pull/20058) +- Fix ZFS permission bug with user namespaces [#20045](https://github.com/docker/docker/pull/20045) +- Do not leak /dev/mqueue from the host to all containers, keep it container-specific [#19876](https://github.com/docker/docker/pull/19876) [#20133](https://github.com/docker/docker/pull/20133) +- Fix `docker ps --filter before=...` to not show stopped containers without providing `-a` flag [#20135](https://github.com/docker/docker/pull/20135) + +### Security + +- Fix issue preventing docker events to work properly with authorization plugin [#20002](https://github.com/docker/docker/pull/20002) + +### Distribution + +* Add additional verifications and prevent from uploading invalid data to registries [#20164](https://github.com/docker/docker/pull/20164) +- Fix regression preventing uppercase characters in image reference hostname [#20175](https://github.com/docker/docker/pull/20175) + +### Networking + +- Fix embedded DNS for user-defined networks in the presence of firewalld [#20060](https://github.com/docker/docker/pull/20060) +- Fix issue where removing a network during shutdown left Docker inoperable [#20181](https://github.com/docker/docker/issues/20181) [#20235](https://github.com/docker/docker/issues/20235) +- Embedded DNS is now able to return compressed results [#20181](https://github.com/docker/docker/issues/20181) +- Fix port-mapping issue with `userland-proxy=false` [#20181](https://github.com/docker/docker/issues/20181) + +### Logging + +- Fix bug where tcp+tls protocol would be rejected [#20109](https://github.com/docker/docker/pull/20109) + +### Volumes + +- Fix issue whereby older volume drivers would not receive volume options [#19983](https://github.com/docker/docker/pull/19983) + +### Misc + +- Remove TasksMax from Docker systemd service [#20167](https://github.com/docker/docker/pull/20167) + +## 1.10.0 (2016-02-04) + +**IMPORTANT**: Docker 1.10 uses a new content-addressable storage for images and layers. +A migration is performed the first time docker is run, and can take a significant amount of time depending on the number of images present. +Refer to this page on the wiki for more information: https://github.com/docker/docker/wiki/Engine-v1.10.0-content-addressability-migration +We also released a cool migration utility that enables you to perform the migration before updating to reduce downtime. +Engine 1.10 migrator can be found on Docker Hub: https://hub.docker.com/r/docker/v1.10-migrator/ + +### Runtime + ++ New `docker update` command that allows updating resource constraints on running containers [#15078](https://github.com/docker/docker/pull/15078) ++ Add `--tmpfs` flag to `docker run` to create a tmpfs mount in a container [#13587](https://github.com/docker/docker/pull/13587) ++ Add `--format` flag to `docker images` command [#17692](https://github.com/docker/docker/pull/17692) ++ Allow to set daemon configuration in a file and hot-reload it with the `SIGHUP` signal [#18587](https://github.com/docker/docker/pull/18587) ++ Updated docker events to include more meta-data and event types [#18888](https://github.com/docker/docker/pull/18888) + This change is backward compatible in the API, but not on the CLI. ++ Add `--blkio-weight-device` flag to `docker run` [#13959](https://github.com/docker/docker/pull/13959) ++ Add `--device-read-bps` and `--device-write-bps` flags to `docker run` [#14466](https://github.com/docker/docker/pull/14466) ++ Add `--device-read-iops` and `--device-write-iops` flags to `docker run` [#15879](https://github.com/docker/docker/pull/15879) ++ Add `--oom-score-adj` flag to `docker run` [#16277](https://github.com/docker/docker/pull/16277) ++ Add `--detach-keys` flag to `attach`, `run`, `start` and `exec` commands to override the default key sequence that detaches from a container [#15666](https://github.com/docker/docker/pull/15666) ++ Add `--shm-size` flag to `run`, `create` and `build` to set the size of `/dev/shm` [#16168](https://github.com/docker/docker/pull/16168) ++ Show the number of running, stopped, and paused containers in `docker info` [#19249](https://github.com/docker/docker/pull/19249) ++ Show the `OSType` and `Architecture` in `docker info` [#17478](https://github.com/docker/docker/pull/17478) ++ Add `--cgroup-parent` flag on `daemon` to set cgroup parent for all containers [#19062](https://github.com/docker/docker/pull/19062) ++ Add `-L` flag to docker cp to follow symlinks [#16613](https://github.com/docker/docker/pull/16613) ++ New `status=dead` filter for `docker ps` [#17908](https://github.com/docker/docker/pull/17908) +* Change `docker run` exit codes to distinguish between runtime and application errors [#14012](https://github.com/docker/docker/pull/14012) +* Enhance `docker events --since` and `--until` to support nanoseconds and timezones [#17495](https://github.com/docker/docker/pull/17495) +* Add `--all`/`-a` flag to `stats` to include both running and stopped containers [#16742](https://github.com/docker/docker/pull/16742) +* Change the default cgroup-driver to `cgroupfs` [#17704](https://github.com/docker/docker/pull/17704) +* Emit a "tag" event when tagging an image with `build -t` [#17115](https://github.com/docker/docker/pull/17115) +* Best effort for linked containers' start order when starting the daemon [#18208](https://github.com/docker/docker/pull/18208) +* Add ability to add multiple tags on `build` [#15780](https://github.com/docker/docker/pull/15780) +* Permit `OPTIONS` request against any url, thus fixing issue with CORS [#19569](https://github.com/docker/docker/pull/19569) +- Fix the `--quiet` flag on `docker build` to actually be quiet [#17428](https://github.com/docker/docker/pull/17428) +- Fix `docker images --filter dangling=false` to now show all non-dangling images [#19326](https://github.com/docker/docker/pull/19326) +- Fix race condition causing autorestart turning off on restart [#17629](https://github.com/docker/docker/pull/17629) +- Recognize GPFS filesystems [#19216](https://github.com/docker/docker/pull/19216) +- Fix obscure bug preventing to start containers [#19751](https://github.com/docker/docker/pull/19751) +- Forbid `exec` during container restart [#19722](https://github.com/docker/docker/pull/19722) +- devicemapper: Increasing `--storage-opt dm.basesize` will now increase the base device size on daemon restart [#19123](https://github.com/docker/docker/pull/19123) + +### Security + ++ Add `--userns-remap` flag to `daemon` to support user namespaces (previously in experimental) [#19187](https://github.com/docker/docker/pull/19187) ++ Add support for custom seccomp profiles in `--security-opt` [#17989](https://github.com/docker/docker/pull/17989) ++ Add default seccomp profile [#18780](https://github.com/docker/docker/pull/18780) ++ Add `--authorization-plugin` flag to `daemon` to customize ACLs [#15365](https://github.com/docker/docker/pull/15365) ++ Docker Content Trust now supports the ability to read and write user delegations [#18887](https://github.com/docker/docker/pull/18887) + This is an optional, opt-in feature that requires the explicit use of the Notary command-line utility in order to be enabled. + Enabling delegation support in a specific repository will break the ability of Docker 1.9 and 1.8 to pull from that repository, if content trust is enabled. +* Allow SELinux to run in a container when using the BTRFS storage driver [#16452](https://github.com/docker/docker/pull/16452) + +### Distribution + +* Use content-addressable storage for images and layers [#17924](https://github.com/docker/docker/pull/17924) + Note that a migration is performed the first time docker is run; it can take a significant amount of time depending on the number of images and containers present. + Images no longer depend on the parent chain but contain a list of layer references. + `docker load`/`docker save` tarballs now also contain content-addressable image configurations. + For more information: https://github.com/docker/docker/wiki/Engine-v1.10.0-content-addressability-migration +* Add support for the new [manifest format ("schema2")](https://github.com/docker/distribution/blob/master/docs/spec/manifest-v2-2.md) [#18785](https://github.com/docker/docker/pull/18785) +* Lots of improvements for push and pull: performance++, retries on failed downloads, cancelling on client disconnect [#18353](https://github.com/docker/docker/pull/18353), [#18418](https://github.com/docker/docker/pull/18418), [#19109](https://github.com/docker/docker/pull/19109), [#18353](https://github.com/docker/docker/pull/18353) +* Limit v1 protocol fallbacks [#18590](https://github.com/docker/docker/pull/18590) +- Fix issue where docker could hang indefinitely waiting for a nonexistent process to pull an image [#19743](https://github.com/docker/docker/pull/19743) + +### Networking + ++ Use DNS-based discovery instead of `/etc/hosts` [#19198](https://github.com/docker/docker/pull/19198) ++ Support for network-scoped alias using `--net-alias` on `run` and `--alias` on `network connect` [#19242](https://github.com/docker/docker/pull/19242) ++ Add `--ip` and `--ip6` on `run` and `network connect` to support custom IP addresses for a container in a network [#19001](https://github.com/docker/docker/pull/19001) ++ Add `--ipam-opt` to `network create` for passing custom IPAM options [#17316](https://github.com/docker/docker/pull/17316) ++ Add `--internal` flag to `network create` to restrict external access to and from the network [#19276](https://github.com/docker/docker/pull/19276) ++ Add `kv.path` option to `--cluster-store-opt` [#19167](https://github.com/docker/docker/pull/19167) ++ Add `discovery.heartbeat` and `discovery.ttl` options to `--cluster-store-opt` to configure discovery TTL and heartbeat timer [#18204](https://github.com/docker/docker/pull/18204) ++ Add `--format` flag to `network inspect` [#17481](https://github.com/docker/docker/pull/17481) ++ Add `--link` to `network connect` to provide a container-local alias [#19229](https://github.com/docker/docker/pull/19229) ++ Support for Capability exchange with remote IPAM plugins [#18775](https://github.com/docker/docker/pull/18775) ++ Add `--force` to `network disconnect` to force container to be disconnected from network [#19317](https://github.com/docker/docker/pull/19317) +* Support for multi-host networking using built-in overlay driver for all engine supported kernels: 3.10+ [#18775](https://github.com/docker/docker/pull/18775) +* `--link` is now supported on `docker run` for containers in user-defined network [#19229](https://github.com/docker/docker/pull/19229) +* Enhance `docker network rm` to allow removing multiple networks [#17489](https://github.com/docker/docker/pull/17489) +* Include container names in `network inspect` [#17615](https://github.com/docker/docker/pull/17615) +* Include auto-generated subnets for user-defined networks in `network inspect` [#17316](https://github.com/docker/docker/pull/17316) +* Add `--filter` flag to `network ls` to hide predefined networks [#17782](https://github.com/docker/docker/pull/17782) +* Add support for network connect/disconnect to stopped containers [#18906](https://github.com/docker/docker/pull/18906) +* Add network ID to container inspect [#19323](https://github.com/docker/docker/pull/19323) +- Fix MTU issue where Docker would not start with two or more default routes [#18108](https://github.com/docker/docker/pull/18108) +- Fix duplicate IP address for containers [#18106](https://github.com/docker/docker/pull/18106) +- Fix issue preventing sometimes docker from creating the bridge network [#19338](https://github.com/docker/docker/pull/19338) +- Do not substitute 127.0.0.1 name server when using `--net=host` [#19573](https://github.com/docker/docker/pull/19573) + +### Logging + ++ New logging driver for Splunk [#16488](https://github.com/docker/docker/pull/16488) ++ Add support for syslog over TCP+TLS [#18998](https://github.com/docker/docker/pull/18998) +* Enhance `docker logs --since` and `--until` to support nanoseconds and time [#17495](https://github.com/docker/docker/pull/17495) +* Enhance AWS logs to auto-detect region [#16640](https://github.com/docker/docker/pull/16640) + +### Volumes + ++ Add support to set the mount propagation mode for a volume [#17034](https://github.com/docker/docker/pull/17034) +* Add `ls` and `inspect` endpoints to volume plugin API [#16534](https://github.com/docker/docker/pull/16534) + Existing plugins need to make use of these new APIs to satisfy users' expectation + For that, please use the new MIME type `application/vnd.docker.plugins.v1.2+json` [#19549](https://github.com/docker/docker/pull/19549) +- Fix data not being copied to named volumes [#19175](https://github.com/docker/docker/pull/19175) +- Fix issues preventing volume drivers from being containerized [#19500](https://github.com/docker/docker/pull/19500) +- Fix `docker volumes ls --dangling=false` to now show all non-dangling volumes [#19671](https://github.com/docker/docker/pull/19671) +- Do not remove named volumes on container removal [#19568](https://github.com/docker/docker/pull/19568) +- Allow external volume drivers to host anonymous volumes [#19190](https://github.com/docker/docker/pull/19190) + +### Builder + ++ Add support for `**` in `.dockerignore` to wildcard multiple levels of directories [#17090](https://github.com/docker/docker/pull/17090) +- Fix handling of UTF-8 characters in Dockerfiles [#17055](https://github.com/docker/docker/pull/17055) +- Fix permissions problem when reading from STDIN [#19283](https://github.com/docker/docker/pull/19283) + +### Client + ++ Add support for overriding the API version to use via an `DOCKER_API_VERSION` environment-variable [#15964](https://github.com/docker/docker/pull/15964) +- Fix a bug preventing Windows clients to log in to Docker Hub [#19891](https://github.com/docker/docker/pull/19891) + +### Misc + +* systemd: Set TasksMax in addition to LimitNPROC in systemd service file [#19391](https://github.com/docker/docker/pull/19391) + +### Deprecations + +* Remove LXC support. The LXC driver was deprecated in Docker 1.8, and has now been removed [#17700](https://github.com/docker/docker/pull/17700) +* Remove `--exec-driver` daemon flag, because it is no longer in use [#17700](https://github.com/docker/docker/pull/17700) +* Remove old deprecated single-dashed long CLI flags (such as `-rm`; use `--rm` instead) [#17724](https://github.com/docker/docker/pull/17724) +* Deprecate HostConfig at API container start [#17799](https://github.com/docker/docker/pull/17799) +* Deprecate docker packages for newly EOL'd Linux distributions: Fedora 21 and Ubuntu 15.04 (Vivid) [#18794](https://github.com/docker/docker/pull/18794), [#18809](https://github.com/docker/docker/pull/18809) +* Deprecate `-f` flag for docker tag [#18350](https://github.com/docker/docker/pull/18350) + +## 1.9.1 (2015-11-21) + +### Runtime + +- Do not prevent daemon from booting if images could not be restored (#17695) +- Force IPC mount to unmount on daemon shutdown/init (#17539) +- Turn IPC unmount errors into warnings (#17554) +- Fix `docker stats` performance regression (#17638) +- Clarify cryptic error message upon `docker logs` if `--log-driver=none` (#17767) +- Fix seldom panics (#17639, #17634, #17703) +- Fix opq whiteouts problems for files with dot prefix (#17819) +- devicemapper: try defaulting to xfs instead of ext4 for performance reasons (#17903, #17918) +- devicemapper: fix displayed fs in docker info (#17974) +- selinux: only relabel if user requested so with the `z` option (#17450, #17834) +- Do not make network calls when normalizing names (#18014) + +### Client + +- Fix `docker login` on windows (#17738) +- Fix bug with `docker inspect` output when not connected to daemon (#17715) +- Fix `docker inspect -f {{.HostConfig.Dns}} somecontainer` (#17680) + +### Builder + +- Fix regression with symlink behavior in ADD/COPY (#17710) + +### Networking + +- Allow passing a network ID as an argument for `--net` (#17558) +- Fix connect to host and prevent disconnect from host for `host` network (#17476) +- Fix `--fixed-cidr` issue when gateway ip falls in ip-range and ip-range is + not the first block in the network (#17853) +- Restore deterministic `IPv6` generation from `MAC` address on default `bridge` network (#17890) +- Allow port-mapping only for endpoints created on docker run (#17858) +- Fixed an endpoint delete issue with a possible stale sbox (#18102) + +### Distribution + +- Correct parent chain in v2 push when v1Compatibility files on the disk are inconsistent (#18047) + +## 1.9.0 (2015-11-03) + +### Runtime + ++ `docker stats` now returns block IO metrics (#15005) ++ `docker stats` now details network stats per interface (#15786) ++ Add `ancestor=` filter to `docker ps --filter` flag to filter +containers based on their ancestor images (#14570) ++ Add `label=` filter to `docker ps --filter` to filter containers +based on label (#16530) ++ Add `--kernel-memory` flag to `docker run` (#14006) ++ Add `--message` flag to `docker import` allowing to specify an optional +message (#15711) ++ Add `--privileged` flag to `docker exec` (#14113) ++ Add `--stop-signal` flag to `docker run` allowing to replace the container +process stopping signal (#15307) ++ Add a new `unless-stopped` restart policy (#15348) ++ Inspecting an image now returns tags (#13185) ++ Add container size information to `docker inspect` (#15796) ++ Add `RepoTags` and `RepoDigests` field to `/images/{name:.*}/json` (#17275) +- Remove the deprecated `/container/ps` endpoint from the API (#15972) +- Send and document correct HTTP codes for `/exec//start` (#16250) +- Share shm and mqueue between containers sharing IPC namespace (#15862) +- Event stream now shows OOM status when `--oom-kill-disable` is set (#16235) +- Ensure special network files (/etc/hosts etc.) are read-only if bind-mounted +with `ro` option (#14965) +- Improve `rmi` performance (#16890) +- Do not update /etc/hosts for the default bridge network, except for links (#17325) +- Fix conflict with duplicate container names (#17389) +- Fix an issue with incorrect template execution in `docker inspect` (#17284) +- DEPRECATE `-c` short flag variant for `--cpu-shares` in docker run (#16271) + +### Client + ++ Allow `docker import` to import from local files (#11907) + +### Builder + ++ Add a `STOPSIGNAL` Dockerfile instruction allowing to set a different +stop-signal for the container process (#15307) ++ Add an `ARG` Dockerfile instruction and a `--build-arg` flag to `docker build` +that allows to add build-time environment variables (#15182) +- Improve cache miss performance (#16890) + +### Storage + +- devicemapper: Implement deferred deletion capability (#16381) + +### Networking + ++ `docker network` exits experimental and is part of standard release (#16645) ++ New network top-level concept, with associated subcommands and API (#16645) + WARNING: the API is different from the experimental API ++ Support for multiple isolated/micro-segmented networks (#16645) ++ Built-in multihost networking using VXLAN based overlay driver (#14071) ++ Support for third-party network plugins (#13424) ++ Ability to dynamically connect containers to multiple networks (#16645) ++ Support for user-defined IP address management via pluggable IPAM drivers (#16910) ++ Add daemon flags `--cluster-store` and `--cluster-advertise` for built-in nodes discovery (#16229) ++ Add `--cluster-store-opt` for setting up TLS settings (#16644) ++ Add `--dns-opt` to the daemon (#16031) +- DEPRECATE following container `NetworkSettings` fields in API v1.21: `EndpointID`, `Gateway`, + `GlobalIPv6Address`, `GlobalIPv6PrefixLen`, `IPAddress`, `IPPrefixLen`, `IPv6Gateway` and `MacAddress`. + Those are now specific to the `bridge` network. Use `NetworkSettings.Networks` to inspect + the networking settings of a container per network. + +### Volumes + ++ New top-level `volume` subcommand and API (#14242) +- Move API volume driver settings to host-specific config (#15798) +- Print an error message if volume name is not unique (#16009) +- Ensure volumes created from Dockerfiles always use the local volume driver +(#15507) +- DEPRECATE auto-creating missing host paths for bind mounts (#16349) + +### Logging + ++ Add `awslogs` logging driver for Amazon CloudWatch (#15495) ++ Add generic `tag` log option to allow customizing container/image +information passed to driver (e.g. show container names) (#15384) +- Implement the `docker logs` endpoint for the journald driver (#13707) +- DEPRECATE driver-specific log tags (e.g. `syslog-tag`, etc.) (#15384) + +### Distribution + ++ `docker search` now works with partial names (#16509) +- Push optimization: avoid buffering to file (#15493) +- The daemon will display progress for images that were already being pulled +by another client (#15489) +- Only permissions required for the current action being performed are requested (#) ++ Renaming trust keys (and respective environment variables) from `offline` to +`root` and `tagging` to `repository` (#16894) +- DEPRECATE trust key environment variables +`DOCKER_CONTENT_TRUST_OFFLINE_PASSPHRASE` and +`DOCKER_CONTENT_TRUST_TAGGING_PASSPHRASE` (#16894) + +### Security + ++ Add SELinux profiles to the rpm package (#15832) +- Fix various issues with AppArmor profiles provided in the deb package +(#14609) +- Add AppArmor policy that prevents writing to /proc (#15571) + +## 1.8.3 (2015-10-12) + +### Distribution + +- Fix layer IDs lead to local graph poisoning (CVE-2014-8178) +- Fix manifest validation and parsing logic errors allow pull-by-digest validation bypass (CVE-2014-8179) ++ Add `--disable-legacy-registry` to prevent a daemon from using a v1 registry + +## 1.8.2 (2015-09-10) + +### Distribution + +- Fixes rare edge case of handling GNU LongLink and LongName entries. +- Fix ^C on docker pull. +- Fix docker pull issues on client disconnection. +- Fix issue that caused the daemon to panic when loggers weren't configured properly. +- Fix goroutine leak pulling images from registry V2. + +### Runtime + +- Fix a bug mounting cgroups for docker daemons running inside docker containers. +- Initialize log configuration properly. + +### Client: + +- Handle `-q` flag in `docker ps` properly when there is a default format. + +### Networking + +- Fix several corner cases with netlink. + +### Contrib + +- Fix several issues with bash completion. + +## 1.8.1 (2015-08-12) + +### Distribution + +* Fix a bug where pushing multiple tags would result in invalid images + +## 1.8.0 (2015-08-11) + +### Distribution + ++ Trusted pull, push and build, disabled by default +* Make tar layers deterministic between registries +* Don't allow deleting the image of running containers +* Check if a tag name to load is a valid digest +* Allow one character repository names +* Add a more accurate error description for invalid tag name +* Make build cache ignore mtime + +### Cli + ++ Add support for DOCKER_CONFIG/--config to specify config file dir ++ Add --type flag for docker inspect command ++ Add formatting options to `docker ps` with `--format` ++ Replace `docker -d` with new subcommand `docker daemon` +* Zsh completion updates and improvements +* Add some missing events to bash completion +* Support daemon urls with base paths in `docker -H` +* Validate status= filter to docker ps +* Display when a container is in --net=host in docker ps +* Extend docker inspect to export image metadata related to graph driver +* Restore --default-gateway{,-v6} daemon options +* Add missing unpublished ports in docker ps +* Allow duration strings in `docker events` as --since/--until +* Expose more mounts information in `docker inspect` + +### Runtime + ++ Add new Fluentd logging driver ++ Allow `docker import` to load from local files ++ Add logging driver for GELF via UDP ++ Allow to copy files from host to containers with `docker cp` ++ Promote volume drivers from experimental to master ++ Add rollover options to json-file log driver, and --log-driver-opts flag ++ Add memory swappiness tuning options +* Remove cgroup read-only flag when privileged +* Make /proc, /sys, & /dev readonly for readonly containers +* Add cgroup bind mount by default +* Overlay: Export metadata for container and image in `docker inspect` +* Devicemapper: external device activation +* Devicemapper: Compare uuid of base device on startup +* Remove RC4 from the list of registry cipher suites +* Add syslog-facility option +* LXC execdriver compatibility with recent LXC versions +* Mark LXC execriver as deprecated (to be removed with the migration to runc) + +### Plugins + +* Separate plugin sockets and specs locations +* Allow TLS connections to plugins + +### Bug fixes + +- Add missing 'Names' field to /containers/json API output +- Make `docker rmi` of dangling images safe while pulling +- Devicemapper: Change default basesize to 100G +- Go Scheduler issue with sync.Mutex and gcc +- Fix issue where Search API endpoint would panic due to empty AuthConfig +- Set image canonical names correctly +- Check dockerinit only if lxc driver is used +- Fix ulimit usage of nproc +- Always attach STDIN if -i,--interactive is specified +- Show error messages when saving container state fails +- Fixed incorrect assumption on --bridge=none treated as disable network +- Check for invalid port specifications in host configuration +- Fix endpoint leave failure for --net=host mode +- Fix goroutine leak in the stats API if the container is not running +- Check for apparmor file before reading it +- Fix DOCKER_TLS_VERIFY being ignored +- Set umask to the default on startup +- Correct the message of pause and unpause a non-running container +- Adjust disallowed CpuShares in container creation +- ZFS: correctly apply selinux context +- Display empty string instead of when IP opt is nil +- `docker kill` returns error when container is not running +- Fix COPY/ADD quoted/json form +- Fix goroutine leak on logs -f with no output +- Remove panic in nat package on invalid hostport +- Fix container linking in Fedora 22 +- Fix error caused using default gateways outside of the allocated range +- Format times in inspect command with a template as RFC3339Nano +- Make registry client to accept 2xx and 3xx http status responses as successful +- Fix race issue that caused the daemon to crash with certain layer downloads failed in a specific order. +- Fix error when the docker ps format was not valid. +- Remove redundant ip forward check. +- Fix issue trying to push images to repository mirrors. +- Fix error cleaning up network entrypoints when there is an initialization issue. + +## 1.7.1 (2015-07-14) + +#### Runtime + +- Fix default user spawning exec process with `docker exec` +- Make `--bridge=none` not to configure the network bridge +- Publish networking stats properly +- Fix implicit devicemapper selection with static binaries +- Fix socket connections that hung intermittently +- Fix bridge interface creation on CentOS/RHEL 6.6 +- Fix local dns lookups added to resolv.conf +- Fix copy command mounting volumes +- Fix read/write privileges in volumes mounted with --volumes-from + +#### Remote API + +- Fix unmarshaling of Command and Entrypoint +- Set limit for minimum client version supported +- Validate port specification +- Return proper errors when attach/reattach fail + +#### Distribution + +- Fix pulling private images +- Fix fallback between registry V2 and V1 + +## 1.7.0 (2015-06-16) + +#### Runtime ++ Experimental feature: support for out-of-process volume plugins +* The userland proxy can be disabled in favor of hairpin NAT using the daemon’s `--userland-proxy=false` flag +* The `exec` command supports the `-u|--user` flag to specify the new process owner ++ Default gateway for containers can be specified daemon-wide using the `--default-gateway` and `--default-gateway-v6` flags ++ The CPU CFS (Completely Fair Scheduler) quota can be set in `docker run` using `--cpu-quota` ++ Container block IO can be controlled in `docker run` using`--blkio-weight` ++ ZFS support ++ The `docker logs` command supports a `--since` argument ++ UTS namespace can be shared with the host with `docker run --uts=host` + +#### Quality +* Networking stack was entirely rewritten as part of the libnetwork effort +* Engine internals refactoring +* Volumes code was entirely rewritten to support the plugins effort ++ Sending SIGUSR1 to a daemon will dump all goroutines stacks without exiting + +#### Build ++ Support ${variable:-value} and ${variable:+value} syntax for environment variables ++ Support resource management flags `--cgroup-parent`, `--cpu-period`, `--cpu-quota`, `--cpuset-cpus`, `--cpuset-mems` ++ git context changes with branches and directories +* The .dockerignore file support exclusion rules + +#### Distribution ++ Client support for v2 mirroring support for the official registry + +#### Bugfixes +* Firewalld is now supported and will automatically be used when available +* mounting --device recursively + +## 1.6.2 (2015-05-13) + +#### Runtime +- Revert change prohibiting mounting into /sys + +## 1.6.1 (2015-05-07) + +#### Security +- Fix read/write /proc paths (CVE-2015-3630) +- Prohibit VOLUME /proc and VOLUME / (CVE-2015-3631) +- Fix opening of file-descriptor 1 (CVE-2015-3627) +- Fix symlink traversal on container respawn allowing local privilege escalation (CVE-2015-3629) +- Prohibit mount of /sys + +#### Runtime +- Update AppArmor policy to not allow mounts + +## 1.6.0 (2015-04-07) + +#### Builder ++ Building images from an image ID ++ Build containers with resource constraints, ie `docker build --cpu-shares=100 --memory=1024m...` ++ `commit --change` to apply specified Dockerfile instructions while committing the image ++ `import --change` to apply specified Dockerfile instructions while importing the image ++ Builds no longer continue in the background when canceled with CTRL-C + +#### Client ++ Windows Support + +#### Runtime ++ Container and image Labels ++ `--cgroup-parent` for specifying a parent cgroup to place container cgroup within ++ Logging drivers, `json-file`, `syslog`, or `none` ++ Pulling images by ID ++ `--ulimit` to set the ulimit on a container ++ `--default-ulimit` option on the daemon which applies to all created containers (and overwritten by `--ulimit` on run) + +## 1.5.0 (2015-02-10) + +#### Builder ++ Dockerfile to use for a given `docker build` can be specified with the `-f` flag +* Dockerfile and .dockerignore files can be themselves excluded as part of the .dockerignore file, thus preventing modifications to these files invalidating ADD or COPY instructions cache +* ADD and COPY instructions accept relative paths +* Dockerfile `FROM scratch` instruction is now interpreted as a no-base specifier +* Improve performance when exposing a large number of ports + +#### Hack ++ Allow client-side only integration tests for Windows +* Include docker-py integration tests against Docker daemon as part of our test suites + +#### Packaging ++ Support for the new version of the registry HTTP API +* Speed up `docker push` for images with a majority of already existing layers +- Fixed contacting a private registry through a proxy + +#### Remote API ++ A new endpoint will stream live container resource metrics and can be accessed with the `docker stats` command ++ Containers can be renamed using the new `rename` endpoint and the associated `docker rename` command +* Container `inspect` endpoint show the ID of `exec` commands running in this container +* Container `inspect` endpoint show the number of times Docker auto-restarted the container +* New types of event can be streamed by the `events` endpoint: ‘OOM’ (container died with out of memory), ‘exec_create’, and ‘exec_start' +- Fixed returned string fields which hold numeric characters incorrectly omitting surrounding double quotes + +#### Runtime ++ Docker daemon has full IPv6 support ++ The `docker run` command can take the `--pid=host` flag to use the host PID namespace, which makes it possible for example to debug host processes using containerized debugging tools ++ The `docker run` command can take the `--read-only` flag to make the container’s root filesystem mounted as readonly, which can be used in combination with volumes to force a container’s processes to only write to locations that will be persisted ++ Container total memory usage can be limited for `docker run` using the `--memory-swap` flag +* Major stability improvements for devicemapper storage driver +* Better integration with host system: containers will reflect changes to the host's `/etc/resolv.conf` file when restarted +* Better integration with host system: per-container iptable rules are moved to the DOCKER chain +- Fixed container exiting on out of memory to return an invalid exit code + +#### Other +* The HTTP_PROXY, HTTPS_PROXY, and NO_PROXY environment variables are properly taken into account by the client when connecting to the Docker daemon + +## 1.4.1 (2014-12-15) + +#### Runtime +- Fix issue with volumes-from and bind mounts not being honored after create + +## 1.4.0 (2014-12-11) + +#### Notable Features since 1.3.0 ++ Set key=value labels to the daemon (displayed in `docker info`), applied with + new `-label` daemon flag ++ Add support for `ENV` in Dockerfile of the form: + `ENV name=value name2=value2...` ++ New Overlayfs Storage Driver ++ `docker info` now returns an `ID` and `Name` field ++ Filter events by event name, container, or image ++ `docker cp` now supports copying from container volumes +- Fixed `docker tag`, so it honors `--force` when overriding a tag for existing + image. + +## 1.3.3 (2014-12-11) + +#### Security +- Fix path traversal vulnerability in processing of absolute symbolic links (CVE-2014-9356) +- Fix decompression of xz image archives, preventing privilege escalation (CVE-2014-9357) +- Validate image IDs (CVE-2014-9358) + +#### Runtime +- Fix an issue when image archives are being read slowly + +#### Client +- Fix a regression related to stdin redirection +- Fix a regression with `docker cp` when destination is the current directory + +## 1.3.2 (2014-11-20) + +#### Security +- Fix tar breakout vulnerability +* Extractions are now sandboxed chroot +- Security options are no longer committed to images + +#### Runtime +- Fix deadlock in `docker ps -f exited=1` +- Fix a bug when `--volumes-from` references a container that failed to start + +#### Registry ++ `--insecure-registry` now accepts CIDR notation such as 10.1.0.0/16 +* Private registries whose IPs fall in the 127.0.0.0/8 range do no need the `--insecure-registry` flag +- Skip the experimental registry v2 API when mirroring is enabled + +## 1.3.1 (2014-10-28) + +#### Security +* Prevent fallback to SSL protocols < TLS 1.0 for client, daemon and registry ++ Secure HTTPS connection to registries with certificate verification and without HTTP fallback unless `--insecure-registry` is specified + +#### Runtime +- Fix issue where volumes would not be shared + +#### Client +- Fix issue with `--iptables=false` not automatically setting `--ip-masq=false` +- Fix docker run output to non-TTY stdout + +#### Builder +- Fix escaping `$` for environment variables +- Fix issue with lowercase `onbuild` Dockerfile instruction +- Restrict environment variable expansion to `ENV`, `ADD`, `COPY`, `WORKDIR`, `EXPOSE`, `VOLUME` and `USER` + +## 1.3.0 (2014-10-14) + +#### Notable features since 1.2.0 ++ Docker `exec` allows you to run additional processes inside existing containers ++ Docker `create` gives you the ability to create a container via the CLI without executing a process ++ `--security-opts` options to allow user to customize container labels and apparmor profiles ++ Docker `ps` filters +- Wildcard support to COPY/ADD ++ Move production URLs to get.docker.com from get.docker.io ++ Allocate IP address on the bridge inside a valid CIDR ++ Use drone.io for PR and CI testing ++ Ability to setup an official registry mirror ++ Ability to save multiple images with docker `save` + +## 1.2.0 (2014-08-20) + +#### Runtime ++ Make /etc/hosts /etc/resolv.conf and /etc/hostname editable at runtime ++ Auto-restart containers using policies ++ Use /var/lib/docker/tmp for large temporary files ++ `--cap-add` and `--cap-drop` to tweak what linux capability you want ++ `--device` to use devices in containers + +#### Client ++ `docker search` on private registries ++ Add `exited` filter to `docker ps --filter` +* `docker rm -f` now kills instead of stop ++ Support for IPv6 addresses in `--dns` flag + +#### Proxy ++ Proxy instances in separate processes +* Small bug fix on UDP proxy + +## 1.1.2 (2014-07-23) + +#### Runtime ++ Fix port allocation for existing containers ++ Fix containers restart on daemon restart + +#### Packaging ++ Fix /etc/init.d/docker issue on Debian + +## 1.1.1 (2014-07-09) + +#### Builder +* Fix issue with ADD + +## 1.1.0 (2014-07-03) + +#### Notable features since 1.0.1 ++ Add `.dockerignore` support ++ Pause containers during `docker commit` ++ Add `--tail` to `docker logs` + +#### Builder ++ Allow a tar file as context for `docker build` +* Fix issue with white-spaces and multi-lines in `Dockerfiles` + +#### Runtime +* Overall performance improvements +* Allow `/` as source of `docker run -v` +* Fix port allocation +* Fix bug in `docker save` +* Add links information to `docker inspect` + +#### Client +* Improve command line parsing for `docker commit` + +#### Remote API +* Improve status code for the `start` and `stop` endpoints + +## 1.0.1 (2014-06-19) + +#### Notable features since 1.0.0 +* Enhance security for the LXC driver + +#### Builder +* Fix `ONBUILD` instruction passed to grandchildren + +#### Runtime +* Fix events subscription +* Fix /etc/hostname file with host networking +* Allow `-h` and `--net=none` +* Fix issue with hotplug devices in `--privileged` + +#### Client +* Fix artifacts with events +* Fix a panic with empty flags +* Fix `docker cp` on Mac OS X + +#### Miscellaneous +* Fix compilation on Mac OS X +* Fix several races + +## 1.0.0 (2014-06-09) + +#### Notable features since 0.12.0 +* Production support + +## 0.12.0 (2014-06-05) + +#### Notable features since 0.11.0 +* 40+ various improvements to stability, performance and usability +* New `COPY` Dockerfile instruction to allow copying a local file from the context into the container without ever extracting if the file is a tar file +* Inherit file permissions from the host on `ADD` +* New `pause` and `unpause` commands to allow pausing and unpausing of containers using cgroup freezer +* The `images` command has a `-f`/`--filter` option to filter the list of images +* Add `--force-rm` to clean up after a failed build +* Standardize JSON keys in Remote API to CamelCase +* Pull from a docker run now assumes `latest` tag if not specified +* Enhance security on Linux capabilities and device nodes + +## 0.11.1 (2014-05-07) + +#### Registry +- Fix push and pull to private registry + +## 0.11.0 (2014-05-07) + +#### Notable features since 0.10.0 + +* SELinux support for mount and process labels +* Linked containers can be accessed by hostname +* Use the net `--net` flag to allow advanced network configuration such as host networking so that containers can use the host's network interfaces +* Add a ping endpoint to the Remote API to do healthchecks of your docker daemon +* Logs can now be returned with an optional timestamp +* Docker now works with registries that support SHA-512 +* Multiple registry endpoints are supported to allow registry mirrors + +## 0.10.0 (2014-04-08) + +#### Builder +- Fix printing multiple messages on a single line. Fixes broken output during builds. +- Follow symlinks inside container's root for ADD build instructions. +- Fix EXPOSE caching. + +#### Documentation +- Add the new options of `docker ps` to the documentation. +- Add the options of `docker restart` to the documentation. +- Update daemon docs and help messages for --iptables and --ip-forward. +- Updated apt-cacher-ng docs example. +- Remove duplicate description of --mtu from docs. +- Add missing -t and -v for `docker images` to the docs. +- Add fixes to the cli docs. +- Update libcontainer docs. +- Update images in docs to remove references to AUFS and LXC. +- Update the nodejs_web_app in the docs to use the new epel RPM address. +- Fix external link on security of containers. +- Update remote API docs. +- Add image size to history docs. +- Be explicit about binding to all interfaces in redis example. +- Document DisableNetwork flag in the 1.10 remote api. +- Document that `--lxc-conf` is lxc only. +- Add chef usage documentation. +- Add example for an image with multiple for `docker load`. +- Explain what `docker run -a` does in the docs. + +#### Contrib +- Add variable for DOCKER_LOGFILE to sysvinit and use append instead of overwrite in opening the logfile. +- Fix init script cgroup mounting workarounds to be more similar to cgroupfs-mount and thus work properly. +- Remove inotifywait hack from the upstart host-integration example because it's not necessary any more. +- Add check-config script to contrib. +- Fix fish shell completion. + +#### Hack +* Clean up "go test" output from "make test" to be much more readable/scannable. +* Exclude more "definitely not unit tested Go source code" directories from hack/make/test. ++ Generate md5 and sha256 hashes when building, and upload them via hack/release.sh. +- Include contributed completions in Ubuntu PPA. ++ Add cli integration tests. +* Add tweaks to the hack scripts to make them simpler. + +#### Remote API ++ Add TLS auth support for API. +* Move git clone from daemon to client. +- Fix content-type detection in docker cp. +* Split API into 2 go packages. + +#### Runtime +* Support hairpin NAT without going through Docker server. +- devicemapper: succeed immediately when removing non-existent devices. +- devicemapper: improve handling of devicemapper devices (add per device lock, increase sleep time and unlock while sleeping). +- devicemapper: increase timeout in waitClose to 10 seconds. +- devicemapper: ensure we shut down thin pool cleanly. +- devicemapper: pass info, rather than hash to activateDeviceIfNeeded, deactivateDevice, setInitialized, deleteDevice. +- devicemapper: avoid AB-BA deadlock. +- devicemapper: make shutdown better/faster. +- improve alpha sorting in mflag. +- Remove manual http cookie management because the cookiejar is being used. +- Use BSD raw mode on Darwin. Fixes nano, tmux and others. +- Add FreeBSD support for the client. +- Merge auth package into registry. +- Add deprecation warning for -t on `docker pull`. +- Remove goroutine leak on error. +- Update parseLxcInfo to comply with new lxc1.0 format. +- Fix attach exit on darwin. +- Improve deprecation message. +- Retry to retrieve the layer metadata up to 5 times for `docker pull`. +- Only unshare the mount namespace for execin. +- Merge existing config when committing. +- Disable daemon startup timeout. +- Fix issue #4681: add loopback interface when networking is disabled. +- Add failing test case for issue #4681. +- Send SIGTERM to child, instead of SIGKILL. +- Show the driver and the kernel version in `docker info` even when not in debug mode. +- Always symlink /dev/ptmx for libcontainer. This fixes console related problems. +- Fix issue caused by the absence of /etc/apparmor.d. +- Don't leave empty cidFile behind when failing to create the container. +- Mount cgroups automatically if they're not mounted already. +- Use mock for search tests. +- Update to double-dash everywhere. +- Move .dockerenv parsing to lxc driver. +- Move all bind-mounts in the container inside the namespace. +- Don't use separate bind mount for container. +- Always symlink /dev/ptmx for libcontainer. +- Don't kill by pid for other drivers. +- Add initial logging to libcontainer. +* Sort by port in `docker ps`. +- Move networking drivers into runtime top level package. ++ Add --no-prune to `docker rmi`. ++ Add time since exit in `docker ps`. +- graphdriver: add build tags. +- Prevent allocation of previously allocated ports & prevent improve port allocation. +* Add support for --since/--before in `docker ps`. +- Clean up container stop. ++ Add support for configurable dns search domains. +- Add support for relative WORKDIR instructions. +- Add --output flag for docker save. +- Remove duplication of DNS entries in config merging. +- Add cpuset.cpus to cgroups and native driver options. +- Remove docker-ci. +- Promote btrfs. btrfs is no longer considered experimental. +- Add --input flag to `docker load`. +- Return error when existing bridge doesn't match IP address. +- Strip comments before parsing line continuations to avoid interpreting instructions as comments. +- Fix TestOnlyLoopbackExistsWhenUsingDisableNetworkOption to ignore "DOWN" interfaces. +- Add systemd implementation of cgroups and make containers show up as systemd units. +- Fix commit and import when no repository is specified. +- Remount /var/lib/docker as --private to fix scaling issue. +- Use the environment's proxy when pinging the remote registry. +- Reduce error level from harmless errors. +* Allow --volumes-from to be individual files. +- Fix expanding buffer in StdCopy. +- Set error regardless of attach or stdin. This fixes #3364. +- Add support for --env-file to load environment variables from files. +- Symlink /etc/mtab and /proc/mounts. +- Allow pushing a single tag. +- Shut down containers cleanly at shutdown and wait forever for the containers to shut down. This makes container shutdown on daemon shutdown work properly via SIGTERM. +- Don't throw error when starting an already running container. +- Fix dynamic port allocation limit. +- remove setupDev from libcontainer. +- Add API version to `docker version`. +- Return correct exit code when receiving signal and make SIGQUIT quit without cleanup. +- Fix --volumes-from mount failure. +- Allow non-privileged containers to create device nodes. +- Skip login tests because of external dependency on a hosted service. +- Deprecate `docker images --tree` and `docker images --viz`. +- Deprecate `docker insert`. +- Include base abstraction for apparmor. This fixes some apparmor related problems on Ubuntu 14.04. +- Add specific error message when hitting 401 over HTTP on push. +- Fix absolute volume check. +- Remove volumes-from from the config. +- Move DNS options to hostconfig. +- Update the apparmor profile for libcontainer. +- Add deprecation notice for `docker commit -run`. + +## 0.9.1 (2014-03-24) + +#### Builder +- Fix printing multiple messages on a single line. Fixes broken output during builds. + +#### Documentation +- Fix external link on security of containers. + +#### Contrib +- Fix init script cgroup mounting workarounds to be more similar to cgroupfs-mount and thus work properly. +- Add variable for DOCKER_LOGFILE to sysvinit and use append instead of overwrite in opening the logfile. + +#### Hack +- Generate md5 and sha256 hashes when building, and upload them via hack/release.sh. + +#### Remote API +- Fix content-type detection in `docker cp`. + +#### Runtime +- Use BSD raw mode on Darwin. Fixes nano, tmux and others. +- Only unshare the mount namespace for execin. +- Retry to retrieve the layer metadata up to 5 times for `docker pull`. +- Merge existing config when committing. +- Fix panic in monitor. +- Disable daemon startup timeout. +- Fix issue #4681: add loopback interface when networking is disabled. +- Add failing test case for issue #4681. +- Send SIGTERM to child, instead of SIGKILL. +- Show the driver and the kernel version in `docker info` even when not in debug mode. +- Always symlink /dev/ptmx for libcontainer. This fixes console related problems. +- Fix issue caused by the absence of /etc/apparmor.d. +- Don't leave empty cidFile behind when failing to create the container. +- Improve deprecation message. +- Fix attach exit on darwin. +- devicemapper: improve handling of devicemapper devices (add per device lock, increase sleep time, unlock while sleeping). +- devicemapper: succeed immediately when removing non-existent devices. +- devicemapper: increase timeout in waitClose to 10 seconds. +- Remove goroutine leak on error. +- Update parseLxcInfo to comply with new lxc1.0 format. + +## 0.9.0 (2014-03-10) + +#### Builder +- Avoid extra mount/unmount during build. This fixes mount/unmount related errors during build. +- Add error to docker build --rm. This adds missing error handling. +- Forbid chained onbuild, `onbuild from` and `onbuild maintainer` triggers. +- Make `--rm` the default for `docker build`. + +#### Documentation +- Download the docker client binary for Mac over https. +- Update the titles of the install instructions & descriptions. +* Add instructions for upgrading boot2docker. +* Add port forwarding example in OS X install docs. +- Attempt to disentangle repository and registry. +- Update docs to explain more about `docker ps`. +- Update sshd example to use a Dockerfile. +- Rework some examples, including the Python examples. +- Update docs to include instructions for a container's lifecycle. +- Update docs documentation to discuss the docs branch. +- Don't skip cert check for an example & use HTTPS. +- Bring back the memory and swap accounting section which was lost when the kernel page was removed. +- Explain DNS warnings and how to fix them on systems running and using a local nameserver. + +#### Contrib +- Add Tanglu support for mkimage-debootstrap. +- Add SteamOS support for mkimage-debootstrap. + +#### Hack +- Get package coverage when running integration tests. +- Remove the Vagrantfile. This is being replaced with boot2docker. +- Fix tests on systems where aufs isn't available. +- Update packaging instructions and remove the dependency on lxc. + +#### Remote API +* Move code specific to the API to the api package. +- Fix header content type for the API. Makes all endpoints use proper content type. +- Fix registry auth & remove ping calls from CmdPush and CmdPull. +- Add newlines to the JSON stream functions. + +#### Runtime +* Do not ping the registry from the CLI. All requests to registries flow through the daemon. +- Check for nil information return in the lxc driver. This fixes panics with older lxc versions. +- Devicemapper: cleanups and fix for unmount. Fixes two problems which were causing unmount to fail intermittently. +- Devicemapper: remove directory when removing device. Directories don't get left behind when removing the device. +* Devicemapper: enable skip_block_zeroing. Improves performance by not zeroing blocks. +- Devicemapper: fix shutdown warnings. Fixes shutdown warnings concerning pool device removal. +- Ensure docker cp stream is closed properly. Fixes problems with files not being copied by `docker cp`. +- Stop making `tcp://` default to `127.0.0.1:4243` and remove the default port for tcp. +- Fix `--run` in `docker commit`. This makes `docker commit --run` work again. +- Fix custom bridge related options. This makes custom bridges work again. ++ Mount-bind the PTY as container console. This allows tmux/screen to run. ++ Add the pure Go libcontainer library to make it possible to run containers using only features of the Linux kernel. ++ Add native exec driver which uses libcontainer and make it the default exec driver. +- Add support for handling extended attributes in archives. +* Set the container MTU to be the same as the host MTU. ++ Add simple sha256 checksums for layers to speed up `docker push`. +* Improve kernel version parsing. +* Allow flag grouping (`docker run -it`). +- Remove chroot exec driver. +- Fix divide by zero to fix panic. +- Rewrite `docker rmi`. +- Fix docker info with lxc 1.0.0. +- Fix fedora tty with apparmor. +* Don't always append env vars, replace defaults with vars from config. +* Fix a goroutine leak. +* Switch to Go 1.2.1. +- Fix unique constraint error checks. +* Handle symlinks for Docker's data directory and for TMPDIR. +- Add deprecation warnings for flags (-flag is deprecated in favor of --flag) +- Add apparmor profile for the native execution driver. +* Move system specific code from archive to pkg/system. +- Fix duplicate signal for `docker run -i -t` (issue #3336). +- Return correct process pid for lxc. +- Add a -G option to specify the group which unix sockets belong to. ++ Add `-f` flag to `docker rm` to force removal of running containers. ++ Kill ghost containers and restart all ghost containers when the docker daemon restarts. ++ Add `DOCKER_RAMDISK` environment variable to make Docker work when the root is on a ramdisk. + +## 0.8.1 (2014-02-18) + +#### Builder + +- Avoid extra mount/unmount during build. This removes an unneeded mount/unmount operation which was causing problems with devicemapper +- Fix regression with ADD of tar files. This stops Docker from decompressing tarballs added via ADD from the local file system +- Add error to `docker build --rm`. This adds a missing error check to ensure failures to remove containers are detected and reported + +#### Documentation + +* Update issue filing instructions +* Warn against the use of symlinks for Docker's storage folder +* Replace the Firefox example with an IceWeasel example +* Rewrite the PostgreSQL example using a Dockerfile and add more details to it +* Improve the OS X documentation + +#### Remote API + +- Fix broken images API for version less than 1.7 +- Use the right encoding for all API endpoints which return JSON +- Move remote api client to api/ +- Queue calls to the API using generic socket wait + +#### Runtime + +- Fix the use of custom settings for bridges and custom bridges +- Refactor the devicemapper code to avoid many mount/unmount race conditions and failures +- Remove two panics which could make Docker crash in some situations +- Don't ping registry from the CLI client +- Enable skip_block_zeroing for devicemapper. This stops devicemapper from always zeroing entire blocks +- Fix --run in `docker commit`. This makes docker commit store `--run` in the image configuration +- Remove directory when removing devicemapper device. This cleans up leftover mount directories +- Drop NET_ADMIN capability for non-privileged containers. Unprivileged containers can't change their network configuration +- Ensure `docker cp` stream is closed properly +- Avoid extra mount/unmount during container registration. This removes an unneeded mount/unmount operation which was causing problems with devicemapper +- Stop allowing tcp:// as a default tcp bin address which binds to 127.0.0.1:4243 and remove the default port ++ Mount-bind the PTY as container console. This allows tmux and screen to run in a container +- Clean up archive closing. This fixes and improves archive handling +- Fix engine tests on systems where temp directories are symlinked +- Add test methods for save and load +- Avoid temporarily unmounting the container when restarting it. This fixes a race for devicemapper during restart +- Support submodules when building from a GitHub repository +- Quote volume path to allow spaces +- Fix remote tar ADD behavior. This fixes a regression which was causing Docker to extract tarballs + +## 0.8.0 (2014-02-04) + +#### Notable features since 0.7.0 + +* Images and containers can be removed much faster +* Building an image from source with docker build is now much faster +* The Docker daemon starts and stops much faster +* The memory footprint of many common operations has been reduced, by streaming files instead of buffering them in memory, fixing memory leaks, and fixing various suboptimal memory allocations +* Several race conditions were fixed, making Docker more stable under very high concurrency load. This makes Docker more stable and less likely to crash and reduces the memory footprint of many common operations +* All packaging operations are now built on the Go language’s standard tar implementation, which is bundled with Docker itself. This makes packaging more portable across host distributions, and solves several issues caused by quirks and incompatibilities between different distributions of tar +* Docker can now create, remove and modify larger numbers of containers and images graciously thanks to more aggressive releasing of system resources. For example the storage driver API now allows Docker to do reference counting on mounts created by the drivers +With the ongoing changes to the networking and execution subsystems of docker testing these areas have been a focus of the refactoring. By moving these subsystems into separate packages we can test, analyze, and monitor coverage and quality of these packages +* Many components have been separated into smaller sub-packages, each with a dedicated test suite. As a result the code is better-tested, more readable and easier to change + +* The ADD instruction now supports caching, which avoids unnecessarily re-uploading the same source content again and again when it hasn’t changed +* The new ONBUILD instruction adds to your image a “trigger” instruction to be executed at a later time, when the image is used as the base for another build +* Docker now ships with an experimental storage driver which uses the BTRFS filesystem for copy-on-write +* Docker is officially supported on Mac OS X +* The Docker daemon supports systemd socket activation + +## 0.7.6 (2014-01-14) + +#### Builder + +* Do not follow symlink outside of build context + +#### Runtime + +- Remount bind mounts when ro is specified +* Use https for fetching docker version + +#### Other + +* Inline the test.docker.io fingerprint +* Add ca-certificates to packaging documentation + +## 0.7.5 (2014-01-09) + +#### Builder + +* Disable compression for build. More space usage but a much faster upload +- Fix ADD caching for certain paths +- Do not compress archive from git build + +#### Documentation + +- Fix error in GROUP add example +* Make sure the GPG fingerprint is inline in the documentation +* Give more specific advice on setting up signing of commits for DCO + +#### Runtime + +- Fix misspelled container names +- Do not add hostname when networking is disabled +* Return most recent image from the cache by date +- Return all errors from docker wait +* Add Content-Type Header "application/json" to GET /version and /info responses + +#### Other + +* Update DCO to version 1.1 ++ Update Makefile to use "docker:GIT_BRANCH" as the generated image name +* Update Travis to check for new 1.1 DCO version + +## 0.7.4 (2014-01-07) + +#### Builder + +- Fix ADD caching issue with . prefixed path +- Fix docker build on devicemapper by reverting sparse file tar option +- Fix issue with file caching and prevent wrong cache hit +* Use same error handling while unmarshaling CMD and ENTRYPOINT + +#### Documentation + +* Simplify and streamline Amazon Quickstart +* Install instructions use unprefixed Fedora image +* Update instructions for mtu flag for Docker on GCE ++ Add Ubuntu Saucy to installation +- Fix for wrong version warning on master instead of latest + +#### Runtime + +- Only get the image's rootfs when we need to calculate the image size +- Correctly handle unmapping UDP ports +* Make CopyFileWithTar use a pipe instead of a buffer to save memory on docker build +- Fix login message to say pull instead of push +- Fix "docker load" help by removing "SOURCE" prompt and mentioning STDIN +* Make blank -H option default to the same as no -H was sent +* Extract cgroups utilities to own submodule + +#### Other + ++ Add Travis CI configuration to validate DCO and gofmt requirements ++ Add Developer Certificate of Origin Text +* Upgrade VBox Guest Additions +* Check standalone header when pinging a registry server + +## 0.7.3 (2014-01-02) + +#### Builder + ++ Update ADD to use the image cache, based on a hash of the added content +* Add error message for empty Dockerfile + +#### Documentation + +- Fix outdated link to the "Introduction" on www.docker.io ++ Update the docs to get wider when the screen does +- Add information about needing to install LXC when using raw binaries +* Update Fedora documentation to disentangle the docker and docker.io conflict +* Add a note about using the new `-mtu` flag in several GCE zones ++ Add FrugalWare installation instructions ++ Add a more complete example of `docker run` +- Fix API documentation for creating and starting Privileged containers +- Add missing "name" parameter documentation on "/containers/create" +* Add a mention of `lxc-checkconfig` as a way to check for some of the necessary kernel configuration +- Update the 1.8 API documentation with some additions that were added to the docs for 1.7 + +#### Hack + +- Add missing libdevmapper dependency to the packagers documentation +* Update minimum Go requirement to a hard line at Go 1.2+ +* Many minor improvements to the Vagrantfile ++ Add ability to customize dockerinit search locations when compiling (to be used very sparingly only by packagers of platforms who require a nonstandard location) ++ Add coverprofile generation reporting +- Add `-a` to our Go build flags, removing the need for recompiling the stdlib manually +* Update Dockerfile to be more canonical and have less spurious warnings during build +- Fix some miscellaneous `docker pull` progress bar display issues +* Migrate more miscellaneous packages under the "pkg" folder +* Update TextMate highlighting to automatically be enabled for files named "Dockerfile" +* Reorganize syntax highlighting files under a common "contrib/syntax" directory +* Update install.sh script (https://get.docker.io/) to not fail if busybox fails to download or run at the end of the Ubuntu/Debian installation +* Add support for container names in bash completion + +#### Packaging + ++ Add an official Docker client binary for Darwin (Mac OS X) +* Remove empty "Vendor" string and added "License" on deb package ++ Add a stubbed version of "/etc/default/docker" in the deb package + +#### Runtime + +* Update layer application to extract tars in place, avoiding file churn while handling whiteouts +- Fix permissiveness of mtime comparisons in tar handling (since GNU tar and Go tar do not yet support sub-second mtime precision) +* Reimplement `docker top` in pure Go to work more consistently, and even inside Docker-in-Docker (thus removing the shell injection vulnerability present in some versions of `lxc-ps`) ++ Update `-H unix://` to work similarly to `-H tcp://` by inserting the default values for missing portions +- Fix more edge cases regarding dockerinit and deleted or replaced docker or dockerinit files +* Update container name validation to include '.' +- Fix use of a symlink or non-absolute path as the argument to `-g` to work as expected +* Update to handle external mounts outside of LXC, fixing many small mounting quirks and making future execution backends and other features simpler +* Update to use proper box-drawing characters everywhere in `docker images -tree` +* Move MTU setting from LXC configuration to directly use netlink +* Add `-S` option to external tar invocation for more efficient spare file handling ++ Add arch/os info to User-Agent string, especially for registry requests ++ Add `-mtu` option to Docker daemon for configuring MTU +- Fix `docker build` to exit with a non-zero exit code on error ++ Add `DOCKER_HOST` environment variable to configure the client `-H` flag without specifying it manually for every invocation + +## 0.7.2 (2013-12-16) + +#### Runtime + ++ Validate container names on creation with standard regex +* Increase maximum image depth to 127 from 42 +* Continue to move api endpoints to the job api ++ Add -bip flag to allow specification of dynamic bridge IP via CIDR +- Allow bridge creation when ipv6 is not enabled on certain systems +* Set hostname and IP address from within dockerinit +* Drop capabilities from within dockerinit +- Fix volumes on host when symlink is present the image +- Prevent deletion of image if ANY container is depending on it even if the container is not running +* Update docker push to use new progress display +* Use os.Lstat to allow mounting unix sockets when inspecting volumes +- Adjust handling of inactive user login +- Add missing defines in devicemapper for older kernels +- Allow untag operations with no container validation +- Add auth config to docker build + +#### Documentation + +* Add more information about Docker logging ++ Add RHEL documentation +* Add a direct example for changing the CMD that is run in a container +* Update Arch installation documentation ++ Add section on Trusted Builds ++ Add Network documentation page + +#### Other + ++ Add new cover bundle for providing code coverage reporting +* Separate integration tests in bundles +* Make Tianon the hack maintainer +* Update mkimage-debootstrap with more tweaks for keeping images small +* Use https to get the install script +* Remove vendored dotcloud/tar now that Go 1.2 has been released + +## 0.7.1 (2013-12-05) + +#### Documentation + ++ Add @SvenDowideit as documentation maintainer ++ Add links example ++ Add documentation regarding ambassador pattern ++ Add Google Cloud Platform docs ++ Add dockerfile best practices +* Update doc for RHEL +* Update doc for registry +* Update Postgres examples +* Update doc for Ubuntu install +* Improve remote api doc + +#### Runtime + ++ Add hostconfig to docker inspect ++ Implement `docker log -f` to stream logs ++ Add env variable to disable kernel version warning ++ Add -format to `docker inspect` ++ Support bind-mount for files +- Fix bridge creation on RHEL +- Fix image size calculation +- Make sure iptables are called even if the bridge already exists +- Fix issue with stderr only attach +- Remove init layer when destroying a container +- Fix same port binding on different interfaces +- `docker build` now returns the correct exit code +- Fix `docker port` to display correct port +- `docker build` now check that the dockerfile exists client side +- `docker attach` now returns the correct exit code +- Remove the name entry when the container does not exist + +#### Registry + +* Improve progress bars, add ETA for downloads +* Simultaneous pulls now waits for the first to finish instead of failing +- Tag only the top-layer image when pushing to registry +- Fix issue with offline image transfer +- Fix issue preventing using ':' in password for registry + +#### Other + ++ Add pprof handler for debug ++ Create a Makefile +* Use stdlib tar that now includes fix +* Improve make.sh test script +* Handle SIGQUIT on the daemon +* Disable verbose during tests +* Upgrade to go1.2 for official build +* Improve unit tests +* The test suite now runs all tests even if one fails +* Refactor C in Go (Devmapper) +- Fix OS X compilation + +## 0.7.0 (2013-11-25) + +#### Notable features since 0.6.0 + +* Storage drivers: choose from aufs, device-mapper, or vfs. +* Standard Linux support: docker now runs on unmodified Linux kernels and all major distributions. +* Links: compose complex software stacks by connecting containers to each other. +* Container naming: organize your containers by giving them memorable names. +* Advanced port redirects: specify port redirects per interface, or keep sensitive ports private. +* Offline transfer: push and pull images to the filesystem without losing information. +* Quality: numerous bugfixes and small usability improvements. Significant increase in test coverage. + +## 0.6.7 (2013-11-21) + +#### Runtime + +* Improve stability, fixes some race conditions +* Skip the volumes mounted when deleting the volumes of container. +* Fix layer size computation: handle hard links correctly +* Use the work Path for docker cp CONTAINER:PATH +* Fix tmp dir never cleanup +* Speedup docker ps +* More informative error message on name collisions +* Fix nameserver regex +* Always return long id's +* Fix container restart race condition +* Keep published ports on docker stop;docker start +* Fix container networking on Fedora +* Correctly express "any address" to iptables +* Fix network setup when reconnecting to ghost container +* Prevent deletion if image is used by a running container +* Lock around read operations in graph + +#### RemoteAPI + +* Return full ID on docker rmi + +#### Client + ++ Add -tree option to images ++ Offline image transfer +* Exit with status 2 on usage error and display usage on stderr +* Do not forward SIGCHLD to container +* Use string timestamp for docker events -since + +#### Other + +* Update to go 1.2rc5 ++ Add /etc/default/docker support to upstart + +## 0.6.6 (2013-11-06) + +#### Runtime + +* Ensure container name on register +* Fix regression in /etc/hosts ++ Add lock around write operations in graph +* Check if port is valid +* Fix restart runtime error with ghost container networking ++ Add some more colors and animals to increase the pool of generated names +* Fix issues in docker inspect ++ Escape apparmor confinement ++ Set environment variables using a file. +* Prevent docker insert to erase something ++ Prevent DNS server conflicts in CreateBridgeIface ++ Validate bind mounts on the server side ++ Use parent image config in docker build +* Fix regression in /etc/hosts + +#### Client + ++ Add -P flag to publish all exposed ports ++ Add -notrunc and -q flags to docker history +* Fix docker commit, tag and import usage ++ Add stars, trusted builds and library flags in docker search +* Fix docker logs with tty + +#### RemoteAPI + +* Make /events API send headers immediately +* Do not split last column docker top ++ Add size to history + +#### Other + ++ Contrib: Desktop integration. Firefox usecase. ++ Dockerfile: bump to go1.2rc3 + +## 0.6.5 (2013-10-29) + +#### Runtime + ++ Containers can now be named ++ Containers can now be linked together for service discovery ++ 'run -a', 'start -a' and 'attach' can forward signals to the container for better integration with process supervisors ++ Automatically start crashed containers after a reboot ++ Expose IP, port, and proto as separate environment vars for container links +* Allow ports to be published to specific ips +* Prohibit inter-container communication by default +- Ignore ErrClosedPipe for stdin in Container.Attach +- Remove unused field kernelVersion +* Fix issue when mounting subdirectories of /mnt in container +- Fix untag during removal of images +* Check return value of syscall.Chdir when changing working directory inside dockerinit + +#### Client + +- Only pass stdin to hijack when needed to avoid closed pipe errors +* Use less reflection in command-line method invocation +- Monitor the tty size after starting the container, not prior +- Remove useless os.Exit() calls after log.Fatal + +#### Hack + ++ Add initial init scripts library and a safer Ubuntu packaging script that works for Debian +* Add -p option to invoke debootstrap with http_proxy +- Update install.sh with $sh_c to get sudo/su for modprobe +* Update all the mkimage scripts to use --numeric-owner as a tar argument +* Update hack/release.sh process to automatically invoke hack/make.sh and bail on build and test issues + +#### Other + +* Documentation: Fix the flags for nc in example +* Testing: Remove warnings and prevent mount issues +- Testing: Change logic for tty resize to avoid warning in tests +- Builder: Fix race condition in docker build with verbose output +- Registry: Fix content-type for PushImageJSONIndex method +* Contrib: Improve helper tools to generate debian and Arch linux server images + +## 0.6.4 (2013-10-16) + +#### Runtime + +- Add cleanup of container when Start() fails +* Add better comments to utils/stdcopy.go +* Add utils.Errorf for error logging ++ Add -rm to docker run for removing a container on exit +- Remove error messages which are not actually errors +- Fix `docker rm` with volumes +- Fix some error cases where an HTTP body might not be closed +- Fix panic with wrong dockercfg file +- Fix the attach behavior with -i +* Record termination time in state. +- Use empty string so TempDir uses the OS's temp dir automatically +- Make sure to close the network allocators ++ Autorestart containers by default +* Bump vendor kr/pty to commit 3b1f6487b `(syscall.O_NOCTTY)` +* lxc: Allow set_file_cap capability in container +- Move run -rm to the cli only +* Split stdout stderr +* Always create a new session for the container + +#### Testing + +- Add aggregated docker-ci email report +- Add cleanup to remove leftover containers +* Add nightly release to docker-ci +* Add more tests around auth.ResolveAuthConfig +- Remove a few errors in tests +- Catch errClosing error when TCP and UDP proxies are terminated +* Only run certain tests with TESTFLAGS='-run TestName' make.sh +* Prevent docker-ci to test closing PRs +* Replace panic by log.Fatal in tests +- Increase TestRunDetach timeout + +#### Documentation + +* Add initial draft of the Docker infrastructure doc +* Add devenvironment link to CONTRIBUTING.md +* Add `apt-get install curl` to Ubuntu docs +* Add explanation for export restrictions +* Add .dockercfg doc +* Remove Gentoo install notes about #1422 workaround +* Fix help text for -v option +* Fix Ping endpoint documentation +- Fix parameter names in docs for ADD command +- Fix ironic typo in changelog +* Various command fixes in postgres example +* Document how to edit and release docs +- Minor updates to `postgresql_service.rst` +* Clarify LGTM process to contributors +- Corrected error in the package name +* Document what `vagrant up` is actually doing ++ improve doc search results +* Cleanup whitespace in API 1.5 docs +* use angle brackets in MAINTAINER example email +* Update archlinux.rst ++ Changes to a new style for the docs. Includes version switcher. +* Formatting, add information about multiline json +* Improve registry and index REST API documentation +- Replace deprecated upgrading reference to docker-latest.tgz, which hasn't been updated since 0.5.3 +* Update Gentoo installation documentation now that we're in the portage tree proper +* Cleanup and reorganize docs and tooling for contributors and maintainers +- Minor spelling correction of protocoll -> protocol + +#### Contrib + +* Add vim syntax highlighting for Dockerfiles from @honza +* Add mkimage-arch.sh +* Reorganize contributed completion scripts to add zsh completion + +#### Hack + +* Add vagrant user to the docker group +* Add proper bash completion for "docker push" +* Add xz utils as a runtime dep +* Add cleanup/refactor portion of #2010 for hack and Dockerfile updates ++ Add contrib/mkimage-centos.sh back (from #1621), and associated documentation link +* Add several of the small make.sh fixes from #1920, and make the output more consistent and contributor-friendly ++ Add @tianon to hack/MAINTAINERS +* Improve network performance for VirtualBox +* Revamp install.sh to be usable by more people, and to use official install methods whenever possible (apt repo, portage tree, etc.) +- Fix contrib/mkimage-debian.sh apt caching prevention ++ Add Dockerfile.tmLanguage to contrib +* Configured FPM to make /etc/init/docker.conf a config file +* Enable SSH Agent forwarding in Vagrant VM +* Several small tweaks/fixes for contrib/mkimage-debian.sh + +#### Other + +- Builder: Abort build if mergeConfig returns an error and fix duplicate error message +- Packaging: Remove deprecated packaging directory +- Registry: Use correct auth config when logging in. +- Registry: Fix the error message so it is the same as the regex + +## 0.6.3 (2013-09-23) + +#### Packaging + +* Add 'docker' group on install for ubuntu package +* Update tar vendor dependency +* Download apt key over HTTPS + +#### Runtime + +- Only copy and change permissions on non-bindmount volumes +* Allow multiple volumes-from +- Fix HTTP imports from STDIN + +#### Documentation + +* Update section on extracting the docker binary after build +* Update development environment docs for new build process +* Remove 'base' image from documentation + +#### Other + +- Client: Fix detach issue +- Registry: Update regular expression to match index + +## 0.6.2 (2013-09-17) + +#### Runtime + ++ Add domainname support ++ Implement image filtering with path.Match +* Remove unnecessary warnings +* Remove os/user dependency +* Only mount the hostname file when the config exists +* Handle signals within the `docker login` command +- UID and GID are now also applied to volumes +- `docker start` set error code upon error +- `docker run` set the same error code as the process started + +#### Builder + ++ Add -rm option in order to remove intermediate containers +* Allow multiline for the RUN instruction + +#### Registry + +* Implement login with private registry +- Fix push issues + +#### Other + ++ Hack: Vendor all dependencies +* Remote API: Bump to v1.5 +* Packaging: Break down hack/make.sh into small scripts, one per 'bundle': test, binary, ubuntu etc. +* Documentation: General improvements + +## 0.6.1 (2013-08-23) + +#### Registry + +* Pass "meta" headers in API calls to the registry + +#### Packaging + +- Use correct upstart script with new build tool +- Use libffi-dev, don`t build it from sources +- Remove duplicate mercurial install command + +## 0.6.0 (2013-08-22) + +#### Runtime + ++ Add lxc-conf flag to allow custom lxc options ++ Add an option to set the working directory +* Add Image name to LogEvent tests ++ Add -privileged flag and relevant tests, docs, and examples +* Add websocket support to /container//attach/ws +* Add warning when net.ipv4.ip_forwarding = 0 +* Add hostname to environment +* Add last stable version in `docker version` +- Fix race conditions in parallel pull +- Fix Graph ByParent() to generate list of child images per parent image. +- Fix typo: fmt.Sprint -> fmt.Sprintf +- Fix small \n error un docker build +* Fix to "Inject dockerinit at /.dockerinit" +* Fix #910. print user name to docker info output +* Use Go 1.1.2 for dockerbuilder +* Use ranged for loop on channels +- Use utils.ParseRepositoryTag instead of strings.Split(name, ":") in server.ImageDelete +- Improve CMD, ENTRYPOINT, and attach docs. +- Improve connect message with socket error +- Load authConfig only when needed and fix useless WARNING +- Show tag used when image is missing +* Apply volumes-from before creating volumes +- Make docker run handle SIGINT/SIGTERM +- Prevent crash when .dockercfg not readable +- Install script should be fetched over https, not http. +* API, issue 1471: Use groups for socket permissions +- Correctly detect IPv4 forwarding +* Mount /dev/shm as a tmpfs +- Switch from http to https for get.docker.io +* Let userland proxy handle container-bound traffic +* Update the Docker CLI to specify a value for the "Host" header. +- Change network range to avoid conflict with EC2 DNS +- Reduce connect and read timeout when pinging the registry +* Parallel pull +- Handle ip route showing mask-less IP addresses +* Allow ENTRYPOINT without CMD +- Always consider localhost as a domain name when parsing the FQN repos name +* Refactor checksum + +#### Documentation + +* Add MongoDB image example +* Add instructions for creating and using the docker group +* Add sudo to examples and installation to documentation +* Add ufw doc +* Add a reference to ps -a +* Add information about Docker`s high level tools over LXC. +* Fix typo in docs for docker run -dns +* Fix a typo in the ubuntu installation guide +* Fix to docs regarding adding docker groups +* Update default -H docs +* Update readme with dependencies for building +* Update amazon.rst to explain that Vagrant is not necessary for running Docker on ec2 +* PostgreSQL service example in documentation +* Suggest installing linux-headers by default. +* Change the twitter handle +* Clarify Amazon EC2 installation +* 'Base' image is deprecated and should no longer be referenced in the docs. +* Move note about officially supported kernel +- Solved the logo being squished in Safari + +#### Builder + ++ Add USER instruction do Dockerfile ++ Add workdir support for the Buildfile +* Add no cache for docker build +- Fix docker build and docker events output +- Only count known instructions as build steps +- Make sure ENV instruction within build perform a commit each time +- Forbid certain paths within docker build ADD +- Repository name (and optionally a tag) in build usage +- Make sure ADD will create everything in 0755 + +#### Remote API + +* Sort Images by most recent creation date. +* Reworking opaque requests in registry module +* Add image name in /events +* Use mime pkg to parse Content-Type +* 650 http utils and user agent field + +#### Hack + ++ Bash Completion: Limit commands to containers of a relevant state +* Add docker dependencies coverage testing into docker-ci + +#### Packaging + ++ Docker-brew 0.5.2 support and memory footprint reduction +* Add new docker dependencies into docker-ci +- Revert "docker.upstart: avoid spawning a `sh` process" ++ Docker-brew and Docker standard library ++ Release docker with docker +* Fix the upstart script generated by get.docker.io +* Enabled the docs to generate manpages. +* Revert Bind daemon to 0.0.0.0 in Vagrant. + +#### Register + +* Improve auth push +* Registry unit tests + mock registry + +#### Tests + +* Improve TestKillDifferentUser to prevent timeout on buildbot +- Fix typo in TestBindMounts (runContainer called without image) +* Improve TestGetContainersTop so it does not rely on sleep +* Relax the lo interface test to allow iface index != 1 +* Add registry functional test to docker-ci +* Add some tests in server and utils + +#### Other + +* Contrib: bash completion script +* Client: Add docker cp command and copy api endpoint to copy container files/folders to the host +* Don`t read from stdout when only attached to stdin + +## 0.5.3 (2013-08-13) + +#### Runtime + +* Use docker group for socket permissions +- Spawn shell within upstart script +- Handle ip route showing mask-less IP addresses +- Add hostname to environment + +#### Builder + +- Make sure ENV instruction within build perform a commit each time + +## 0.5.2 (2013-08-08) + +* Builder: Forbid certain paths within docker build ADD +- Runtime: Change network range to avoid conflict with EC2 DNS +* API: Change daemon to listen on unix socket by default + +## 0.5.1 (2013-07-30) + +#### Runtime + ++ Add `ps` args to `docker top` ++ Add support for container ID files (pidfile like) ++ Add container=lxc in default env ++ Support networkless containers with `docker run -n` and `docker -d -b=none` +* Stdout/stderr logs are now stored in the same file as JSON +* Allocate a /16 IP range by default, with fallback to /24. Try 12 ranges instead of 3. +* Change .dockercfg format to json and support multiple auth remote +- Do not override volumes from config +- Fix issue with EXPOSE override + +#### API + ++ Docker client now sets useragent (RFC 2616) ++ Add /events endpoint + +#### Builder + ++ ADD command now understands URLs ++ CmdAdd and CmdEnv now respect Dockerfile-set ENV variables +- Create directories with 755 instead of 700 within ADD instruction + +#### Hack + +* Simplify unit tests with helpers +* Improve docker.upstart event +* Add coverage testing into docker-ci + +## 0.5.0 (2013-07-17) + +#### Runtime + ++ List all processes running inside a container with 'docker top' ++ Host directories can be mounted as volumes with 'docker run -v' ++ Containers can expose public UDP ports (eg, '-p 123/udp') ++ Optionally specify an exact public port (eg. '-p 80:4500') +* 'docker login' supports additional options +- Don't save a container`s hostname when committing an image. + +#### Registry + ++ New image naming scheme inspired by Go packaging convention allows arbitrary combinations of registries +- Fix issues when uploading images to a private registry + +#### Builder + ++ ENTRYPOINT instruction sets a default binary entry point to a container ++ VOLUME instruction marks a part of the container as persistent data +* 'docker build' displays the full output of a build by default + +## 0.4.8 (2013-07-01) + ++ Builder: New build operation ENTRYPOINT adds an executable entry point to the container. - Runtime: Fix a bug which caused 'docker run -d' to no longer print the container ID. +- Tests: Fix issues in the test suite + +## 0.4.7 (2013-06-28) + +#### Remote API + +* The progress bar updates faster when downloading and uploading large files +- Fix a bug in the optional unix socket transport + +#### Runtime + +* Improve detection of kernel version ++ Host directories can be mounted as volumes with 'docker run -b' +- fix an issue when only attaching to stdin +* Use 'tar --numeric-owner' to avoid uid mismatch across multiple hosts + +#### Hack + +* Improve test suite and dev environment +* Remove dependency on unit tests on 'os/user' + +#### Other + +* Registry: easier push/pull to a custom registry ++ Documentation: add terminology section + +## 0.4.6 (2013-06-22) + +- Runtime: fix a bug which caused creation of empty images (and volumes) to crash. + +## 0.4.5 (2013-06-21) + ++ Builder: 'docker build git://URL' fetches and builds a remote git repository +* Runtime: 'docker ps -s' optionally prints container size +* Tests: improved and simplified +- Runtime: fix a regression introduced in 0.4.3 which caused the logs command to fail. +- Builder: fix a regression when using ADD with single regular file. + +## 0.4.4 (2013-06-19) + +- Builder: fix a regression introduced in 0.4.3 which caused builds to fail on new clients. + +## 0.4.3 (2013-06-19) + +#### Builder + ++ ADD of a local file will detect tar archives and unpack them +* ADD improvements: use tar for copy + automatically unpack local archives +* ADD uses tar/untar for copies instead of calling 'cp -ar' +* Fix the behavior of ADD to be (mostly) reverse-compatible, predictable and well-documented. +- Fix a bug which caused builds to fail if ADD was the first command +* Nicer output for 'docker build' + +#### Runtime + +* Remove bsdtar dependency +* Add unix socket and multiple -H support +* Prevent rm of running containers +* Use go1.1 cookiejar +- Fix issue detaching from running TTY container +- Forbid parallel push/pull for a single image/repo. Fixes #311 +- Fix race condition within Run command when attaching. + +#### Client + +* HumanReadable ProgressBar sizes in pull +* Fix docker version`s git commit output + +#### API + +* Send all tags on History API call +* Add tag lookup to history command. Fixes #882 + +#### Documentation + +- Fix missing command in irc bouncer example + +## 0.4.2 (2013-06-17) + +- Packaging: Bumped version to work around an Ubuntu bug + +## 0.4.1 (2013-06-17) + +#### Remote Api + ++ Add flag to enable cross domain requests ++ Add images and containers sizes in docker ps and docker images + +#### Runtime + ++ Configure dns configuration host-wide with 'docker -d -dns' ++ Detect faulty DNS configuration and replace it with a public default ++ Allow docker run : ++ You can now specify public port (ex: -p 80:4500) +* Improve image removal to garbage-collect unreferenced parents + +#### Client + +* Allow multiple params in inspect +* Print the container id before the hijack in `docker run` + +#### Registry + +* Add regexp check on repo`s name +* Move auth to the client +- Remove login check on pull + +#### Other + +* Vagrantfile: Add the rest api port to vagrantfile`s port_forward +* Upgrade to Go 1.1 +- Builder: don`t ignore last line in Dockerfile when it doesn`t end with \n + +## 0.4.0 (2013-06-03) + +#### Builder + ++ Introducing Builder ++ 'docker build' builds a container, layer by layer, from a source repository containing a Dockerfile + +#### Remote API + ++ Introducing Remote API ++ control Docker programmatically using a simple HTTP/json API + +#### Runtime + +* Various reliability and usability improvements + +## 0.3.4 (2013-05-30) + +#### Builder + ++ 'docker build' builds a container, layer by layer, from a source repository containing a Dockerfile ++ 'docker build -t FOO' applies the tag FOO to the newly built container. + +#### Runtime + ++ Interactive TTYs correctly handle window resize +* Fix how configuration is merged between layers + +#### Remote API + ++ Split stdout and stderr on 'docker run' ++ Optionally listen on a different IP and port (use at your own risk) + +#### Documentation + +* Improve install instructions. + +## 0.3.3 (2013-05-23) + +- Registry: Fix push regression +- Various bugfixes + +## 0.3.2 (2013-05-09) + +#### Registry + +* Improve the checksum process +* Use the size to have a good progress bar while pushing +* Use the actual archive if it exists in order to speed up the push +- Fix error 400 on push + +#### Runtime + +* Store the actual archive on commit + +## 0.3.1 (2013-05-08) + +#### Builder + ++ Implement the autorun capability within docker builder ++ Add caching to docker builder ++ Add support for docker builder with native API as top level command ++ Implement ENV within docker builder +- Check the command existence prior create and add Unit tests for the case +* use any whitespaces instead of tabs + +#### Runtime + ++ Add go version to debug infos +* Kernel version - don`t show the dash if flavor is empty + +#### Registry + ++ Add docker search top level command in order to search a repository +- Fix pull for official images with specific tag +- Fix issue when login in with a different user and trying to push +* Improve checksum - async calculation + +#### Images + ++ Output graph of images to dot (graphviz) +- Fix ByParent function + +#### Documentation + ++ New introduction and high-level overview ++ Add the documentation for docker builder +- CSS fix for docker documentation to make REST API docs look better. +- Fix CouchDB example page header mistake +- Fix README formatting +* Update www.docker.io website. + +#### Other + ++ Website: new high-level overview +- Makefile: Swap "go get" for "go get -d", especially to compile on go1.1rc +* Packaging: packaging ubuntu; issue #510: Use goland-stable PPA package to build docker + +## 0.3.0 (2013-05-06) + +#### Runtime + +- Fix the command existence check +- strings.Split may return an empty string on no match +- Fix an index out of range crash if cgroup memory is not + +#### Documentation + +* Various improvements ++ New example: sharing data between 2 couchdb databases + +#### Other + +* Vagrant: Use only one deb line in /etc/apt ++ Registry: Implement the new registry + +## 0.2.2 (2013-05-03) + ++ Support for data volumes ('docker run -v=PATH') ++ Share data volumes between containers ('docker run -volumes-from') ++ Improve documentation +* Upgrade to Go 1.0.3 +* Various upgrades to the dev environment for contributors + +## 0.2.1 (2013-05-01) + ++ 'docker commit -run' bundles a layer with default runtime options: command, ports etc. +* Improve install process on Vagrant ++ New Dockerfile operation: "maintainer" ++ New Dockerfile operation: "expose" ++ New Dockerfile operation: "cmd" ++ Contrib script to build a Debian base layer ++ 'docker -d -r': restart crashed containers at daemon startup +* Runtime: improve test coverage + +## 0.2.0 (2013-04-23) + +- Runtime: ghost containers can be killed and waited for +* Documentation: update install instructions +- Packaging: fix Vagrantfile +- Development: automate releasing binaries and ubuntu packages ++ Add a changelog +- Various bugfixes + +## 0.1.8 (2013-04-22) + +- Dynamically detect cgroup capabilities +- Issue stability warning on kernels <3.8 +- 'docker push' buffers on disk instead of memory +- Fix 'docker diff' for removed files +- Fix 'docker stop' for ghost containers +- Fix handling of pidfile +- Various bugfixes and stability improvements + +## 0.1.7 (2013-04-18) + +- Container ports are available on localhost +- 'docker ps' shows allocated TCP ports +- Contributors can run 'make hack' to start a continuous integration VM +- Streamline ubuntu packaging & uploading +- Various bugfixes and stability improvements + +## 0.1.6 (2013-04-17) + +- Record the author an image with 'docker commit -author' + +## 0.1.5 (2013-04-17) + +- Disable standalone mode +- Use a custom DNS resolver with 'docker -d -dns' +- Detect ghost containers +- Improve diagnosis of missing system capabilities +- Allow disabling memory limits at compile time +- Add debian packaging +- Documentation: installing on Arch Linux +- Documentation: running Redis on docker +- Fix lxc 0.9 compatibility +- Automatically load aufs module +- Various bugfixes and stability improvements + +## 0.1.4 (2013-04-09) + +- Full support for TTY emulation +- Detach from a TTY session with the escape sequence `C-p C-q` +- Various bugfixes and stability improvements +- Minor UI improvements +- Automatically create our own bridge interface 'docker0' + +## 0.1.3 (2013-04-04) + +- Choose TCP frontend port with '-p :PORT' +- Layer format is versioned +- Major reliability improvements to the process manager +- Various bugfixes and stability improvements + +## 0.1.2 (2013-04-03) + +- Set container hostname with 'docker run -h' +- Selective attach at run with 'docker run -a [stdin[,stdout[,stderr]]]' +- Various bugfixes and stability improvements +- UI polish +- Progress bar on push/pull +- Use XZ compression by default +- Make IP allocator lazy + +## 0.1.1 (2013-03-31) + +- Display shorthand IDs for convenience +- Stabilize process management +- Layers can include a commit message +- Simplified 'docker attach' +- Fix support for re-attaching +- Various bugfixes and stability improvements +- Auto-download at run +- Auto-login on push +- Beefed up documentation + +## 0.1.0 (2013-03-23) + +Initial public release + +- Implement registry in order to push/pull images +- TCP port allocation +- Fix termcaps on Linux +- Add documentation +- Add Vagrant support with Vagrantfile +- Add unit tests +- Add repository/tags to ease image management +- Improve the layer implementation diff --git a/vendor/github.com/moby/moby/CONTRIBUTING.md b/vendor/github.com/moby/moby/CONTRIBUTING.md new file mode 100644 index 000000000..917214cd1 --- /dev/null +++ b/vendor/github.com/moby/moby/CONTRIBUTING.md @@ -0,0 +1,455 @@ +# Contributing to Docker + +Want to hack on Docker? Awesome! We have a contributor's guide that explains +[setting up a Docker development environment and the contribution +process](https://docs.docker.com/opensource/project/who-written-for/). + +[![Contributors guide](docs/static_files/contributors.png)](https://docs.docker.com/opensource/project/who-written-for/) + +This page contains information about reporting issues as well as some tips and +guidelines useful to experienced open source contributors. Finally, make sure +you read our [community guidelines](#docker-community-guidelines) before you +start participating. + +## Topics + +* [Reporting Security Issues](#reporting-security-issues) +* [Design and Cleanup Proposals](#design-and-cleanup-proposals) +* [Reporting Issues](#reporting-other-issues) +* [Quick Contribution Tips and Guidelines](#quick-contribution-tips-and-guidelines) +* [Community Guidelines](#docker-community-guidelines) + +## Reporting security issues + +The Docker maintainers take security seriously. If you discover a security +issue, please bring it to their attention right away! + +Please **DO NOT** file a public issue, instead send your report privately to +[security@docker.com](mailto:security@docker.com). + +Security reports are greatly appreciated and we will publicly thank you for it. +We also like to send gifts—if you're into Docker schwag, make sure to let +us know. We currently do not offer a paid security bounty program, but are not +ruling it out in the future. + + +## Reporting other issues + +A great way to contribute to the project is to send a detailed report when you +encounter an issue. We always appreciate a well-written, thorough bug report, +and will thank you for it! + +Check that [our issue database](https://github.com/moby/moby/issues) +doesn't already include that problem or suggestion before submitting an issue. +If you find a match, you can use the "subscribe" button to get notified on +updates. Do *not* leave random "+1" or "I have this too" comments, as they +only clutter the discussion, and don't help resolving it. However, if you +have ways to reproduce the issue or have additional information that may help +resolving the issue, please leave a comment. + +When reporting issues, always include: + +* The output of `docker version`. +* The output of `docker info`. + +Also include the steps required to reproduce the problem if possible and +applicable. This information will help us review and fix your issue faster. +When sending lengthy log-files, consider posting them as a gist (https://gist.github.com). +Don't forget to remove sensitive data from your logfiles before posting (you can +replace those parts with "REDACTED"). + +## Quick contribution tips and guidelines + +This section gives the experienced contributor some tips and guidelines. + +### Pull requests are always welcome + +Not sure if that typo is worth a pull request? Found a bug and know how to fix +it? Do it! We will appreciate it. Any significant improvement should be +documented as [a GitHub issue](https://github.com/moby/moby/issues) before +anybody starts working on it. + +We are always thrilled to receive pull requests. We do our best to process them +quickly. If your pull request is not accepted on the first try, +don't get discouraged! Our contributor's guide explains [the review process we +use for simple changes](https://docs.docker.com/opensource/workflow/make-a-contribution/). + +### Design and cleanup proposals + +You can propose new designs for existing Docker features. You can also design +entirely new features. We really appreciate contributors who want to refactor or +otherwise cleanup our project. For information on making these types of +contributions, see [the advanced contribution +section](https://docs.docker.com/opensource/workflow/advanced-contributing/) in +the contributors guide. + +We try hard to keep Docker lean and focused. Docker can't do everything for +everybody. This means that we might decide against incorporating a new feature. +However, there might be a way to implement that feature *on top of* Docker. + +### Talking to other Docker users and contributors + + + + + + + + + + + + + + + + + + + + + + + + +
    Forums + A public forum for users to discuss questions and explore current design patterns and + best practices about Docker and related projects in the Docker Ecosystem. To participate, + just log in with your Docker Hub account on https://forums.docker.com. +
    Internet Relay Chat (IRC) +

    + IRC a direct line to our most knowledgeable Docker users; we have + both the #docker and #docker-dev group on + irc.freenode.net. + IRC is a rich chat protocol but it can overwhelm new users. You can search + our chat archives. +

    +

    + Read our IRC quickstart guide + for an easy way to get started. +

    +
    Google Group + The docker-dev + group is for contributors and other people contributing to the Docker project. + You can join them without a google account by sending an email to + docker-dev+subscribe@googlegroups.com. + After receiving the join-request message, you can simply reply to that to confirm the subscription. +
    Twitter + You can follow Docker's Twitter feed + to get updates on our products. You can also tweet us questions or just + share blogs or stories. +
    Stack Overflow + Stack Overflow has thousands of Docker questions listed. We regularly + monitor Docker questions + and so do many other knowledgeable Docker users. +
    + + +### Conventions + +Fork the repository and make changes on your fork in a feature branch: + +- If it's a bug fix branch, name it XXXX-something where XXXX is the number of + the issue. +- If it's a feature branch, create an enhancement issue to announce + your intentions, and name it XXXX-something where XXXX is the number of the + issue. + +Submit unit tests for your changes. Go has a great test framework built in; use +it! Take a look at existing tests for inspiration. [Run the full test +suite](https://docs.docker.com/opensource/project/test-and-docs/) on your branch before +submitting a pull request. + +Update the documentation when creating or modifying features. Test your +documentation changes for clarity, concision, and correctness, as well as a +clean documentation build. See our contributors guide for [our style +guide](https://docs.docker.com/opensource/doc-style) and instructions on [building +the documentation](https://docs.docker.com/opensource/project/test-and-docs/#build-and-test-the-documentation). + +Write clean code. Universally formatted code promotes ease of writing, reading, +and maintenance. Always run `gofmt -s -w file.go` on each changed file before +committing your changes. Most editors have plug-ins that do this automatically. + +Pull request descriptions should be as clear as possible and include a reference +to all the issues that they address. + +### Successful Changes + +Before contributing large or high impact changes, make the effort to coordinate +with the maintainers of the project before submitting a pull request. This +prevents you from doing extra work that may or may not be merged. + +Large PRs that are just submitted without any prior communication are unlikely +to be successful. + +While pull requests are the methodology for submitting changes to code, changes +are much more likely to be accepted if they are accompanied by additional +engineering work. While we don't define this explicitly, most of these goals +are accomplished through communication of the design goals and subsequent +solutions. Often times, it helps to first state the problem before presenting +solutions. + +Typically, the best methods of accomplishing this are to submit an issue, +stating the problem. This issue can include a problem statement and a +checklist with requirements. If solutions are proposed, alternatives should be +listed and eliminated. Even if the criteria for elimination of a solution is +frivolous, say so. + +Larger changes typically work best with design documents. These are focused on +providing context to the design at the time the feature was conceived and can +inform future documentation contributions. + +### Commit Messages + +Commit messages must start with a capitalized and short summary (max. 50 chars) +written in the imperative, followed by an optional, more detailed explanatory +text which is separated from the summary by an empty line. + +Commit messages should follow best practices, including explaining the context +of the problem and how it was solved, including in caveats or follow up changes +required. They should tell the story of the change and provide readers +understanding of what led to it. + +If you're lost about what this even means, please see [How to Write a Git +Commit Message](http://chris.beams.io/posts/git-commit/) for a start. + +In practice, the best approach to maintaining a nice commit message is to +leverage a `git add -p` and `git commit --amend` to formulate a solid +changeset. This allows one to piece together a change, as information becomes +available. + +If you squash a series of commits, don't just submit that. Re-write the commit +message, as if the series of commits was a single stroke of brilliance. + +That said, there is no requirement to have a single commit for a PR, as long as +each commit tells the story. For example, if there is a feature that requires a +package, it might make sense to have the package in a separate commit then have +a subsequent commit that uses it. + +Remember, you're telling part of the story with the commit message. Don't make +your chapter weird. + +### Review + +Code review comments may be added to your pull request. Discuss, then make the +suggested modifications and push additional commits to your feature branch. Post +a comment after pushing. New commits show up in the pull request automatically, +but the reviewers are notified only when you comment. + +Pull requests must be cleanly rebased on top of master without multiple branches +mixed into the PR. + +**Git tip**: If your PR no longer merges cleanly, use `rebase master` in your +feature branch to update your pull request rather than `merge master`. + +Before you make a pull request, squash your commits into logical units of work +using `git rebase -i` and `git push -f`. A logical unit of work is a consistent +set of patches that should be reviewed together: for example, upgrading the +version of a vendored dependency and taking advantage of its now available new +feature constitute two separate units of work. Implementing a new function and +calling it in another file constitute a single logical unit of work. The very +high majority of submissions should have a single commit, so if in doubt: squash +down to one. + +After every commit, [make sure the test suite passes] +(https://docs.docker.com/opensource/project/test-and-docs/). Include documentation +changes in the same pull request so that a revert would remove all traces of +the feature or fix. + +Include an issue reference like `Closes #XXXX` or `Fixes #XXXX` in commits that +close an issue. Including references automatically closes the issue on a merge. + +Please do not add yourself to the `AUTHORS` file, as it is regenerated regularly +from the Git history. + +Please see the [Coding Style](#coding-style) for further guidelines. + +### Merge approval + +Docker maintainers use LGTM (Looks Good To Me) in comments on the code review to +indicate acceptance. + +A change requires LGTMs from an absolute majority of the maintainers of each +component affected. For example, if a change affects `docs/` and `registry/`, it +needs an absolute majority from the maintainers of `docs/` AND, separately, an +absolute majority of the maintainers of `registry/`. + +For more details, see the [MAINTAINERS](MAINTAINERS) page. + +### Sign your work + +The sign-off is a simple line at the end of the explanation for the patch. Your +signature certifies that you wrote the patch or otherwise have the right to pass +it on as an open-source patch. The rules are pretty simple: if you can certify +the below (from [developercertificate.org](http://developercertificate.org/)): + +``` +Developer Certificate of Origin +Version 1.1 + +Copyright (C) 2004, 2006 The Linux Foundation and its contributors. +1 Letterman Drive +Suite D4700 +San Francisco, CA, 94129 + +Everyone is permitted to copy and distribute verbatim copies of this +license document, but changing it is not allowed. + +Developer's Certificate of Origin 1.1 + +By making a contribution to this project, I certify that: + +(a) The contribution was created in whole or in part by me and I + have the right to submit it under the open source license + indicated in the file; or + +(b) The contribution is based upon previous work that, to the best + of my knowledge, is covered under an appropriate open source + license and I have the right under that license to submit that + work with modifications, whether created in whole or in part + by me, under the same open source license (unless I am + permitted to submit under a different license), as indicated + in the file; or + +(c) The contribution was provided directly to me by some other + person who certified (a), (b) or (c) and I have not modified + it. + +(d) I understand and agree that this project and the contribution + are public and that a record of the contribution (including all + personal information I submit with it, including my sign-off) is + maintained indefinitely and may be redistributed consistent with + this project or the open source license(s) involved. +``` + +Then you just add a line to every git commit message: + + Signed-off-by: Joe Smith + +Use your real name (sorry, no pseudonyms or anonymous contributions.) + +If you set your `user.name` and `user.email` git configs, you can sign your +commit automatically with `git commit -s`. + +### How can I become a maintainer? + +The procedures for adding new maintainers are explained in the +global [MAINTAINERS](https://github.com/docker/opensource/blob/master/MAINTAINERS) +file in the [https://github.com/docker/opensource/](https://github.com/docker/opensource/) +repository. + +Don't forget: being a maintainer is a time investment. Make sure you +will have time to make yourself available. You don't have to be a +maintainer to make a difference on the project! + +## Docker community guidelines + +We want to keep the Docker community awesome, growing and collaborative. We need +your help to keep it that way. To help with this we've come up with some general +guidelines for the community as a whole: + +* Be nice: Be courteous, respectful and polite to fellow community members: + no regional, racial, gender, or other abuse will be tolerated. We like + nice people way better than mean ones! + +* Encourage diversity and participation: Make everyone in our community feel + welcome, regardless of their background and the extent of their + contributions, and do everything possible to encourage participation in + our community. + +* Keep it legal: Basically, don't get us in trouble. Share only content that + you own, do not share private or sensitive information, and don't break + the law. + +* Stay on topic: Make sure that you are posting to the correct channel and + avoid off-topic discussions. Remember when you update an issue or respond + to an email you are potentially sending to a large number of people. Please + consider this before you update. Also remember that nobody likes spam. + +* Don't send email to the maintainers: There's no need to send email to the + maintainers to ask them to investigate an issue or to take a look at a + pull request. Instead of sending an email, GitHub mentions should be + used to ping maintainers to review a pull request, a proposal or an + issue. + +### Guideline violations — 3 strikes method + +The point of this section is not to find opportunities to punish people, but we +do need a fair way to deal with people who are making our community suck. + +1. First occurrence: We'll give you a friendly, but public reminder that the + behavior is inappropriate according to our guidelines. + +2. Second occurrence: We will send you a private message with a warning that + any additional violations will result in removal from the community. + +3. Third occurrence: Depending on the violation, we may need to delete or ban + your account. + +**Notes:** + +* Obvious spammers are banned on first occurrence. If we don't do this, we'll + have spam all over the place. + +* Violations are forgiven after 6 months of good behavior, and we won't hold a + grudge. + +* People who commit minor infractions will get some education, rather than + hammering them in the 3 strikes process. + +* The rules apply equally to everyone in the community, no matter how much + you've contributed. + +* Extreme violations of a threatening, abusive, destructive or illegal nature + will be addressed immediately and are not subject to 3 strikes or forgiveness. + +* Contact abuse@docker.com to report abuse or appeal violations. In the case of + appeals, we know that mistakes happen, and we'll work with you to come up with a + fair solution if there has been a misunderstanding. + +## Coding Style + +Unless explicitly stated, we follow all coding guidelines from the Go +community. While some of these standards may seem arbitrary, they somehow seem +to result in a solid, consistent codebase. + +It is possible that the code base does not currently comply with these +guidelines. We are not looking for a massive PR that fixes this, since that +goes against the spirit of the guidelines. All new contributions should make a +best effort to clean up and make the code base better than they left it. +Obviously, apply your best judgement. Remember, the goal here is to make the +code base easier for humans to navigate and understand. Always keep that in +mind when nudging others to comply. + +The rules: + +1. All code should be formatted with `gofmt -s`. +2. All code should pass the default levels of + [`golint`](https://github.com/golang/lint). +3. All code should follow the guidelines covered in [Effective + Go](http://golang.org/doc/effective_go.html) and [Go Code Review + Comments](https://github.com/golang/go/wiki/CodeReviewComments). +4. Comment the code. Tell us the why, the history and the context. +5. Document _all_ declarations and methods, even private ones. Declare + expectations, caveats and anything else that may be important. If a type + gets exported, having the comments already there will ensure it's ready. +6. Variable name length should be proportional to its context and no longer. + `noCommaALongVariableNameLikeThisIsNotMoreClearWhenASimpleCommentWouldDo`. + In practice, short methods will have short variable names and globals will + have longer names. +7. No underscores in package names. If you need a compound name, step back, + and re-examine why you need a compound name. If you still think you need a + compound name, lose the underscore. +8. No utils or helpers packages. If a function is not general enough to + warrant its own package, it has not been written generally enough to be a + part of a util package. Just leave it unexported and well-documented. +9. All tests should run with `go test` and outside tooling should not be + required. No, we don't need another unit testing framework. Assertion + packages are acceptable if they provide _real_ incremental value. +10. Even though we call these "rules" above, they are actually just + guidelines. Since you've read all the rules, you now know that. + +If you are having trouble getting into the mood of idiomatic Go, we recommend +reading through [Effective Go](https://golang.org/doc/effective_go.html). The +[Go Blog](https://blog.golang.org) is also a great resource. Drinking the +kool-aid is a lot easier than going thirsty. diff --git a/vendor/github.com/moby/moby/Dockerfile b/vendor/github.com/moby/moby/Dockerfile new file mode 100644 index 000000000..33e88dce2 --- /dev/null +++ b/vendor/github.com/moby/moby/Dockerfile @@ -0,0 +1,229 @@ +# This file describes the standard way to build Docker, using docker +# +# Usage: +# +# # Assemble the full dev environment. This is slow the first time. +# docker build -t docker . +# +# # Mount your source in an interactive container for quick testing: +# docker run -v `pwd`:/go/src/github.com/docker/docker --privileged -i -t docker bash +# +# # Run the test suite: +# docker run -e DOCKER_GITCOMMIT=foo --privileged docker hack/make.sh test-unit test-integration-cli test-docker-py +# +# # Publish a release: +# docker run --privileged \ +# -e AWS_S3_BUCKET=baz \ +# -e AWS_ACCESS_KEY=foo \ +# -e AWS_SECRET_KEY=bar \ +# -e GPG_PASSPHRASE=gloubiboulga \ +# docker hack/release.sh +# +# Note: AppArmor used to mess with privileged mode, but this is no longer +# the case. Therefore, you don't have to disable it anymore. +# + +FROM debian:jessie + +# allow replacing httpredir or deb mirror +ARG APT_MIRROR=deb.debian.org +RUN sed -ri "s/(httpredir|deb).debian.org/$APT_MIRROR/g" /etc/apt/sources.list + +# Packaged dependencies +RUN apt-get update && apt-get install -y \ + apparmor \ + apt-utils \ + aufs-tools \ + automake \ + bash-completion \ + binutils-mingw-w64 \ + bsdmainutils \ + btrfs-tools \ + build-essential \ + cmake \ + createrepo \ + curl \ + dpkg-sig \ + gcc-mingw-w64 \ + git \ + iptables \ + jq \ + less \ + libapparmor-dev \ + libcap-dev \ + libnl-3-dev \ + libprotobuf-c0-dev \ + libprotobuf-dev \ + libsystemd-journal-dev \ + libtool \ + mercurial \ + net-tools \ + pkg-config \ + protobuf-compiler \ + protobuf-c-compiler \ + python-dev \ + python-mock \ + python-pip \ + python-websocket \ + tar \ + vim \ + vim-common \ + xfsprogs \ + zip \ + --no-install-recommends \ + && pip install awscli==1.10.15 +# Get lvm2 source for compiling statically +ENV LVM2_VERSION 2.02.103 +RUN mkdir -p /usr/local/lvm2 \ + && curl -fsSL "https://mirrors.kernel.org/sourceware/lvm2/LVM2.${LVM2_VERSION}.tgz" \ + | tar -xzC /usr/local/lvm2 --strip-components=1 +# See https://git.fedorahosted.org/cgit/lvm2.git/refs/tags for release tags + +# Compile and install lvm2 +RUN cd /usr/local/lvm2 \ + && ./configure \ + --build="$(gcc -print-multiarch)" \ + --enable-static_link \ + && make device-mapper \ + && make install_device-mapper +# See https://git.fedorahosted.org/cgit/lvm2.git/tree/INSTALL + +# Install seccomp: the version shipped upstream is too old +ENV SECCOMP_VERSION 2.3.2 +RUN set -x \ + && export SECCOMP_PATH="$(mktemp -d)" \ + && curl -fsSL "https://github.com/seccomp/libseccomp/releases/download/v${SECCOMP_VERSION}/libseccomp-${SECCOMP_VERSION}.tar.gz" \ + | tar -xzC "$SECCOMP_PATH" --strip-components=1 \ + && ( \ + cd "$SECCOMP_PATH" \ + && ./configure --prefix=/usr/local \ + && make \ + && make install \ + && ldconfig \ + ) \ + && rm -rf "$SECCOMP_PATH" + +# Install Go +# IMPORTANT: If the version of Go is updated, the Windows to Linux CI machines +# will need updating, to avoid errors. Ping #docker-maintainers on IRC +# with a heads-up. +# IMPORTANT: When updating this please note that stdlib archive/tar pkg is vendored +ENV GO_VERSION 1.8.3 +RUN curl -fsSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" \ + | tar -xzC /usr/local + +ENV PATH /go/bin:/usr/local/go/bin:$PATH +ENV GOPATH /go + +# Dependency for golint +ENV GO_TOOLS_COMMIT 823804e1ae08dbb14eb807afc7db9993bc9e3cc3 +RUN git clone https://github.com/golang/tools.git /go/src/golang.org/x/tools \ + && (cd /go/src/golang.org/x/tools && git checkout -q $GO_TOOLS_COMMIT) + +# Grab Go's lint tool +ENV GO_LINT_COMMIT 32a87160691b3c96046c0c678fe57c5bef761456 +RUN git clone https://github.com/golang/lint.git /go/src/github.com/golang/lint \ + && (cd /go/src/github.com/golang/lint && git checkout -q $GO_LINT_COMMIT) \ + && go install -v github.com/golang/lint/golint + +# Install CRIU for checkpoint/restore support +ENV CRIU_VERSION 2.12.1 +# Install dependancy packages specific to criu +RUN apt-get install libnet-dev -y && \ + mkdir -p /usr/src/criu \ + && curl -sSL https://github.com/xemul/criu/archive/v${CRIU_VERSION}.tar.gz | tar -v -C /usr/src/criu/ -xz --strip-components=1 \ + && cd /usr/src/criu \ + && make \ + && make install-criu + +# Install two versions of the registry. The first is an older version that +# only supports schema1 manifests. The second is a newer version that supports +# both. This allows integration-cli tests to cover push/pull with both schema1 +# and schema2 manifests. +ENV REGISTRY_COMMIT_SCHEMA1 ec87e9b6971d831f0eff752ddb54fb64693e51cd +ENV REGISTRY_COMMIT 47a064d4195a9b56133891bbb13620c3ac83a827 +RUN set -x \ + && export GOPATH="$(mktemp -d)" \ + && git clone https://github.com/docker/distribution.git "$GOPATH/src/github.com/docker/distribution" \ + && (cd "$GOPATH/src/github.com/docker/distribution" && git checkout -q "$REGISTRY_COMMIT") \ + && GOPATH="$GOPATH/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \ + go build -o /usr/local/bin/registry-v2 github.com/docker/distribution/cmd/registry \ + && (cd "$GOPATH/src/github.com/docker/distribution" && git checkout -q "$REGISTRY_COMMIT_SCHEMA1") \ + && GOPATH="$GOPATH/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \ + go build -o /usr/local/bin/registry-v2-schema1 github.com/docker/distribution/cmd/registry \ + && rm -rf "$GOPATH" + +# Install notary and notary-server +ENV NOTARY_VERSION v0.5.0 +RUN set -x \ + && export GOPATH="$(mktemp -d)" \ + && git clone https://github.com/docker/notary.git "$GOPATH/src/github.com/docker/notary" \ + && (cd "$GOPATH/src/github.com/docker/notary" && git checkout -q "$NOTARY_VERSION") \ + && GOPATH="$GOPATH/src/github.com/docker/notary/vendor:$GOPATH" \ + go build -o /usr/local/bin/notary-server github.com/docker/notary/cmd/notary-server \ + && GOPATH="$GOPATH/src/github.com/docker/notary/vendor:$GOPATH" \ + go build -o /usr/local/bin/notary github.com/docker/notary/cmd/notary \ + && rm -rf "$GOPATH" + +# Get the "docker-py" source so we can run their integration tests +ENV DOCKER_PY_COMMIT a962578e515185cf06506050b2200c0b81aa84ef +# To run integration tests docker-pycreds is required. +# Before running the integration tests conftest.py is +# loaded which results in loads auth.py that +# imports the docker-pycreds module. +RUN git clone https://github.com/docker/docker-py.git /docker-py \ + && cd /docker-py \ + && git checkout -q $DOCKER_PY_COMMIT \ + && pip install docker-pycreds==0.2.1 \ + && pip install -r test-requirements.txt + +# Install yamllint for validating swagger.yaml +RUN pip install yamllint==1.5.0 + +# Install go-swagger for validating swagger.yaml +ENV GO_SWAGGER_COMMIT c28258affb0b6251755d92489ef685af8d4ff3eb +RUN git clone https://github.com/go-swagger/go-swagger.git /go/src/github.com/go-swagger/go-swagger \ + && (cd /go/src/github.com/go-swagger/go-swagger && git checkout -q $GO_SWAGGER_COMMIT) \ + && go install -v github.com/go-swagger/go-swagger/cmd/swagger + +# Set user.email so crosbymichael's in-container merge commits go smoothly +RUN git config --global user.email 'docker-dummy@example.com' + +# Add an unprivileged user to be used for tests which need it +RUN groupadd -r docker +RUN useradd --create-home --gid docker unprivilegeduser + +VOLUME /var/lib/docker +WORKDIR /go/src/github.com/docker/docker +ENV DOCKER_BUILDTAGS apparmor seccomp selinux + +# Let us use a .bashrc file +RUN ln -sfv $PWD/.bashrc ~/.bashrc +# Add integration helps to bashrc +RUN echo "source $PWD/hack/make/.integration-test-helpers" >> /etc/bash.bashrc + +# Get useful and necessary Hub images so we can "docker load" locally instead of pulling +COPY contrib/download-frozen-image-v2.sh /go/src/github.com/docker/docker/contrib/ +RUN ./contrib/download-frozen-image-v2.sh /docker-frozen-images \ + buildpack-deps:jessie@sha256:85b379ec16065e4fe4127eb1c5fb1bcc03c559bd36dbb2e22ff496de55925fa6 \ + busybox:latest@sha256:32f093055929dbc23dec4d03e09dfe971f5973a9ca5cf059cbfb644c206aa83f \ + debian:jessie@sha256:72f784399fd2719b4cb4e16ef8e369a39dc67f53d978cd3e2e7bf4e502c7b793 \ + hello-world:latest@sha256:c5515758d4c5e1e838e9cd307f6c6a0d620b5e07e6f927b07d05f6d12a1ac8d7 +# See also ensureFrozenImagesLinux() in "integration-cli/fixtures_linux_daemon_test.go" (which needs to be updated when adding images to this list) + +# Install tomlv, vndr, runc, containerd, tini, docker-proxy dockercli +# Please edit hack/dockerfile/install-binaries.sh to update them. +COPY hack/dockerfile/binaries-commits /tmp/binaries-commits +COPY hack/dockerfile/install-binaries.sh /tmp/install-binaries.sh +RUN /tmp/install-binaries.sh tomlv vndr runc containerd tini proxy dockercli +ENV PATH=/usr/local/cli:$PATH + +# Activate bash completion and include Docker's completion if mounted with DOCKER_BASH_COMPLETION_PATH +RUN echo "source /usr/share/bash-completion/bash_completion" >> /etc/bash.bashrc +RUN ln -s /usr/local/completion/bash/docker /etc/bash_completion.d/docker + +# Wrap all commands in the "docker-in-docker" script to allow nested containers +ENTRYPOINT ["hack/dind"] + +# Upload docker source +COPY . /go/src/github.com/docker/docker diff --git a/vendor/github.com/moby/moby/Dockerfile.aarch64 b/vendor/github.com/moby/moby/Dockerfile.aarch64 new file mode 100644 index 000000000..cabcda28b --- /dev/null +++ b/vendor/github.com/moby/moby/Dockerfile.aarch64 @@ -0,0 +1,202 @@ +# This file describes the standard way to build Docker on aarch64, using docker +# +# Usage: +# +# # Assemble the full dev environment. This is slow the first time. +# docker build -t docker -f Dockerfile.aarch64 . +# +# # Mount your source in an interactive container for quick testing: +# docker run -v `pwd`:/go/src/github.com/docker/docker --privileged -i -t docker bash +# +# # Run the test suite: +# docker run --privileged docker hack/make.sh test-unit test-integration-cli test-docker-py +# +# Note: AppArmor used to mess with privileged mode, but this is no longer +# the case. Therefore, you don't have to disable it anymore. +# + +FROM aarch64/ubuntu:xenial + +# Packaged dependencies +RUN apt-get update && apt-get install -y \ + apparmor \ + aufs-tools \ + automake \ + bash-completion \ + btrfs-tools \ + build-essential \ + cmake \ + createrepo \ + curl \ + dpkg-sig \ + g++ \ + gcc \ + git \ + iptables \ + jq \ + libapparmor-dev \ + libc6-dev \ + libcap-dev \ + libsystemd-dev \ + libyaml-dev \ + mercurial \ + net-tools \ + parallel \ + pkg-config \ + python-dev \ + python-mock \ + python-pip \ + python-setuptools \ + python-websocket \ + golang-go \ + iproute2 \ + iputils-ping \ + vim-common \ + --no-install-recommends + +# Get lvm2 source for compiling statically +ENV LVM2_VERSION 2.02.103 +RUN mkdir -p /usr/local/lvm2 \ + && curl -fsSL "https://mirrors.kernel.org/sourceware/lvm2/LVM2.${LVM2_VERSION}.tgz" \ + | tar -xzC /usr/local/lvm2 --strip-components=1 +# See https://git.fedorahosted.org/cgit/lvm2.git/refs/tags for release tags + +# Fix platform enablement in lvm2 to support aarch64 properly +RUN set -e \ + && for f in config.guess config.sub; do \ + curl -fsSL -o "/usr/local/lvm2/autoconf/$f" "http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=$f;hb=HEAD"; \ + done +# "arch.c:78:2: error: #error the arch code needs to know about your machine type" + +# Compile and install lvm2 +RUN cd /usr/local/lvm2 \ + && ./configure \ + --build="$(gcc -print-multiarch)" \ + --enable-static_link \ + && make device-mapper \ + && make install_device-mapper +# See https://git.fedorahosted.org/cgit/lvm2.git/tree/INSTALL + +# Install seccomp: the version shipped upstream is too old +ENV SECCOMP_VERSION 2.3.2 +RUN set -x \ + && export SECCOMP_PATH="$(mktemp -d)" \ + && curl -fsSL "https://github.com/seccomp/libseccomp/releases/download/v${SECCOMP_VERSION}/libseccomp-${SECCOMP_VERSION}.tar.gz" \ + | tar -xzC "$SECCOMP_PATH" --strip-components=1 \ + && ( \ + cd "$SECCOMP_PATH" \ + && ./configure --prefix=/usr/local \ + && make \ + && make install \ + && ldconfig \ + ) \ + && rm -rf "$SECCOMP_PATH" + +# Install Go +# We don't have official binary golang 1.7.5 tarballs for ARM64, either for Go or +# bootstrap, so we use golang-go (1.6) as bootstrap to build Go from source code. +# We don't use the official ARMv6 released binaries as a GOROOT_BOOTSTRAP, because +# not all ARM64 platforms support 32-bit mode. 32-bit mode is optional for ARMv8. +# IMPORTANT: When updating this please note that stdlib archive/tar pkg is vendored +ENV GO_VERSION 1.8.3 +RUN mkdir /usr/src/go && curl -fsSL https://golang.org/dl/go${GO_VERSION}.src.tar.gz | tar -v -C /usr/src/go -xz --strip-components=1 \ + && cd /usr/src/go/src \ + && GOOS=linux GOARCH=arm64 GOROOT_BOOTSTRAP="$(go env GOROOT)" ./make.bash + +ENV PATH /go/bin:/usr/src/go/bin:$PATH +ENV GOPATH /go + +# Dependency for golint +ENV GO_TOOLS_COMMIT 823804e1ae08dbb14eb807afc7db9993bc9e3cc3 +RUN git clone https://github.com/golang/tools.git /go/src/golang.org/x/tools \ + && (cd /go/src/golang.org/x/tools && git checkout -q $GO_TOOLS_COMMIT) + +# Grab Go's lint tool +ENV GO_LINT_COMMIT 32a87160691b3c96046c0c678fe57c5bef761456 +RUN git clone https://github.com/golang/lint.git /go/src/github.com/golang/lint \ + && (cd /go/src/github.com/golang/lint && git checkout -q $GO_LINT_COMMIT) \ + && go install -v github.com/golang/lint/golint + +# Only install one version of the registry, because old version which support +# schema1 manifests is not working on ARM64, we should skip integration-cli +# tests for schema1 manifests on ARM64. +ENV REGISTRY_COMMIT 47a064d4195a9b56133891bbb13620c3ac83a827 +RUN set -x \ + && export GOPATH="$(mktemp -d)" \ + && git clone https://github.com/docker/distribution.git "$GOPATH/src/github.com/docker/distribution" \ + && (cd "$GOPATH/src/github.com/docker/distribution" && git checkout -q "$REGISTRY_COMMIT") \ + && GOPATH="$GOPATH/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \ + go build -o /usr/local/bin/registry-v2 github.com/docker/distribution/cmd/registry \ + && rm -rf "$GOPATH" + +# Install notary and notary-server +ENV NOTARY_VERSION v0.5.0 +RUN set -x \ + && export GOPATH="$(mktemp -d)" \ + && git clone https://github.com/docker/notary.git "$GOPATH/src/github.com/docker/notary" \ + && (cd "$GOPATH/src/github.com/docker/notary" && git checkout -q "$NOTARY_VERSION") \ + && GOPATH="$GOPATH/src/github.com/docker/notary/vendor:$GOPATH" \ + go build -o /usr/local/bin/notary-server github.com/docker/notary/cmd/notary-server \ + && GOPATH="$GOPATH/src/github.com/docker/notary/vendor:$GOPATH" \ + go build -o /usr/local/bin/notary github.com/docker/notary/cmd/notary \ + && rm -rf "$GOPATH" + +# Get the "docker-py" source so we can run their integration tests +ENV DOCKER_PY_COMMIT a962578e515185cf06506050b2200c0b81aa84ef +# Before running the integration tests conftest.py is +# loaded which results in loads auth.py that +# imports the docker-pycreds module. +RUN git clone https://github.com/docker/docker-py.git /docker-py \ + && cd /docker-py \ + && git checkout -q $DOCKER_PY_COMMIT \ + && pip install wheel \ + && pip install docker-pycreds==0.2.1 \ + && pip install -r test-requirements.txt + +# Install yamllint for validating swagger.yaml +RUN pip install yamllint==1.5.0 + +# Install go-swagger for validating swagger.yaml +ENV GO_SWAGGER_COMMIT c28258affb0b6251755d92489ef685af8d4ff3eb +RUN git clone https://github.com/go-swagger/go-swagger.git /go/src/github.com/go-swagger/go-swagger \ + && (cd /go/src/github.com/go-swagger/go-swagger && git checkout -q $GO_SWAGGER_COMMIT) \ + && go install -v github.com/go-swagger/go-swagger/cmd/swagger + +# Set user.email so crosbymichael's in-container merge commits go smoothly +RUN git config --global user.email 'docker-dummy@example.com' + +# Add an unprivileged user to be used for tests which need it +RUN groupadd -r docker +RUN useradd --create-home --gid docker unprivilegeduser + +VOLUME /var/lib/docker +WORKDIR /go/src/github.com/docker/docker +ENV DOCKER_BUILDTAGS apparmor seccomp selinux + +# Let us use a .bashrc file +RUN ln -sfv $PWD/.bashrc ~/.bashrc + +# Register Docker's bash completion. +RUN ln -sv $PWD/contrib/completion/bash/docker /etc/bash_completion.d/docker + +# Get useful and necessary Hub images so we can "docker load" locally instead of pulling +COPY contrib/download-frozen-image-v2.sh /go/src/github.com/docker/docker/contrib/ +RUN ./contrib/download-frozen-image-v2.sh /docker-frozen-images \ + aarch64/buildpack-deps:jessie@sha256:107f4a96837ed89c493fc205cd28508ed0b6b680b4bf3e514e9f0fa0f6667b77 \ + aarch64/busybox:latest@sha256:5a06b8b2fdf22dd1f4085c6c3efd23ee99af01b2d668d286bc4be6d8baa10efb \ + aarch64/debian:jessie@sha256:e6f90b568631705bd5cb27490977378ba762792b38d47c91c4da7a539f63079a \ + aarch64/hello-world:latest@sha256:bd1722550b97668b23ede297abf824d4855f4d9f600dab7b4db1a963dae7ec9e +# See also ensureFrozenImagesLinux() in "integration-cli/fixtures_linux_daemon_test.go" (which needs to be updated when adding images to this list) + +# Install tomlv, vndr, runc, containerd, tini, docker-proxy +# Please edit hack/dockerfile/install-binaries.sh to update them. +COPY hack/dockerfile/binaries-commits /tmp/binaries-commits +COPY hack/dockerfile/install-binaries.sh /tmp/install-binaries.sh +RUN /tmp/install-binaries.sh tomlv vndr runc containerd tini proxy dockercli +ENV PATH=/usr/local/cli:$PATH + +# Wrap all commands in the "docker-in-docker" script to allow nested containers +ENTRYPOINT ["hack/dind"] + +# Upload docker source +COPY . /go/src/github.com/docker/docker diff --git a/vendor/github.com/moby/moby/Dockerfile.armhf b/vendor/github.com/moby/moby/Dockerfile.armhf new file mode 100644 index 000000000..dd1f53619 --- /dev/null +++ b/vendor/github.com/moby/moby/Dockerfile.armhf @@ -0,0 +1,182 @@ +# This file describes the standard way to build Docker on ARMv7, using docker +# +# Usage: +# +# # Assemble the full dev environment. This is slow the first time. +# docker build -t docker -f Dockerfile.armhf . +# +# # Mount your source in an interactive container for quick testing: +# docker run -v `pwd`:/go/src/github.com/docker/docker --privileged -i -t docker bash +# +# # Run the test suite: +# docker run --privileged docker hack/make.sh test-unit test-integration-cli test-docker-py +# +# Note: AppArmor used to mess with privileged mode, but this is no longer +# the case. Therefore, you don't have to disable it anymore. +# + +FROM armhf/debian:jessie + +# allow replacing httpredir or deb mirror +ARG APT_MIRROR=deb.debian.org +RUN sed -ri "s/(httpredir|deb).debian.org/$APT_MIRROR/g" /etc/apt/sources.list + +# Packaged dependencies +RUN apt-get update && apt-get install -y \ + apparmor \ + aufs-tools \ + automake \ + bash-completion \ + btrfs-tools \ + build-essential \ + createrepo \ + curl \ + cmake \ + dpkg-sig \ + git \ + iptables \ + jq \ + net-tools \ + libapparmor-dev \ + libcap-dev \ + libsystemd-journal-dev \ + libtool \ + mercurial \ + pkg-config \ + python-dev \ + python-mock \ + python-pip \ + python-websocket \ + xfsprogs \ + tar \ + vim-common \ + --no-install-recommends \ + && pip install awscli==1.10.15 + +# Get lvm2 source for compiling statically +ENV LVM2_VERSION 2.02.103 +RUN mkdir -p /usr/local/lvm2 \ + && curl -fsSL "https://mirrors.kernel.org/sourceware/lvm2/LVM2.${LVM2_VERSION}.tgz" \ + | tar -xzC /usr/local/lvm2 --strip-components=1 +# See https://git.fedorahosted.org/cgit/lvm2.git/refs/tags for release tags + +# Compile and install lvm2 +RUN cd /usr/local/lvm2 \ + && ./configure \ + --build="$(gcc -print-multiarch)" \ + --enable-static_link \ + && make device-mapper \ + && make install_device-mapper +# See https://git.fedorahosted.org/cgit/lvm2.git/tree/INSTALL + +# Install Go +# IMPORTANT: When updating this please note that stdlib archive/tar pkg is vendored +ENV GO_VERSION 1.8.3 +RUN curl -fsSL "https://golang.org/dl/go${GO_VERSION}.linux-armv6l.tar.gz" \ + | tar -xzC /usr/local +ENV PATH /go/bin:/usr/local/go/bin:$PATH +ENV GOPATH /go + +# We're building for armhf, which is ARMv7, so let's be explicit about that +ENV GOARCH arm +ENV GOARM 7 + +# Dependency for golint +ENV GO_TOOLS_COMMIT 823804e1ae08dbb14eb807afc7db9993bc9e3cc3 +RUN git clone https://github.com/golang/tools.git /go/src/golang.org/x/tools \ + && (cd /go/src/golang.org/x/tools && git checkout -q $GO_TOOLS_COMMIT) + +# Grab Go's lint tool +ENV GO_LINT_COMMIT 32a87160691b3c96046c0c678fe57c5bef761456 +RUN git clone https://github.com/golang/lint.git /go/src/github.com/golang/lint \ + && (cd /go/src/github.com/golang/lint && git checkout -q $GO_LINT_COMMIT) \ + && go install -v github.com/golang/lint/golint + +# Install seccomp: the version shipped upstream is too old +ENV SECCOMP_VERSION 2.3.2 +RUN set -x \ + && export SECCOMP_PATH="$(mktemp -d)" \ + && curl -fsSL "https://github.com/seccomp/libseccomp/releases/download/v${SECCOMP_VERSION}/libseccomp-${SECCOMP_VERSION}.tar.gz" \ + | tar -xzC "$SECCOMP_PATH" --strip-components=1 \ + && ( \ + cd "$SECCOMP_PATH" \ + && ./configure --prefix=/usr/local \ + && make \ + && make install \ + && ldconfig \ + ) \ + && rm -rf "$SECCOMP_PATH" + +# Install two versions of the registry. The first is an older version that +# only supports schema1 manifests. The second is a newer version that supports +# both. This allows integration-cli tests to cover push/pull with both schema1 +# and schema2 manifests. +ENV REGISTRY_COMMIT_SCHEMA1 ec87e9b6971d831f0eff752ddb54fb64693e51cd +ENV REGISTRY_COMMIT cb08de17d74bef86ce6c5abe8b240e282f5750be +RUN set -x \ + && export GOPATH="$(mktemp -d)" \ + && git clone https://github.com/docker/distribution.git "$GOPATH/src/github.com/docker/distribution" \ + && (cd "$GOPATH/src/github.com/docker/distribution" && git checkout -q "$REGISTRY_COMMIT") \ + && GOPATH="$GOPATH/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \ + go build -o /usr/local/bin/registry-v2 github.com/docker/distribution/cmd/registry \ + && (cd "$GOPATH/src/github.com/docker/distribution" && git checkout -q "$REGISTRY_COMMIT_SCHEMA1") \ + && GOPATH="$GOPATH/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \ + go build -o /usr/local/bin/registry-v2-schema1 github.com/docker/distribution/cmd/registry \ + && rm -rf "$GOPATH" + +# Install notary and notary-server +ENV NOTARY_VERSION v0.5.0 +RUN set -x \ + && export GOPATH="$(mktemp -d)" \ + && git clone https://github.com/docker/notary.git "$GOPATH/src/github.com/docker/notary" \ + && (cd "$GOPATH/src/github.com/docker/notary" && git checkout -q "$NOTARY_VERSION") \ + && GOPATH="$GOPATH/src/github.com/docker/notary/vendor:$GOPATH" \ + go build -o /usr/local/bin/notary-server github.com/docker/notary/cmd/notary-server \ + && GOPATH="$GOPATH/src/github.com/docker/notary/vendor:$GOPATH" \ + go build -o /usr/local/bin/notary github.com/docker/notary/cmd/notary \ + && rm -rf "$GOPATH" + +# Get the "docker-py" source so we can run their integration tests +ENV DOCKER_PY_COMMIT a962578e515185cf06506050b2200c0b81aa84ef +RUN git clone https://github.com/docker/docker-py.git /docker-py \ + && cd /docker-py \ + && git checkout -q $DOCKER_PY_COMMIT \ + && pip install -r test-requirements.txt + +# Set user.email so crosbymichael's in-container merge commits go smoothly +RUN git config --global user.email 'docker-dummy@example.com' + +# Add an unprivileged user to be used for tests which need it +RUN groupadd -r docker +RUN useradd --create-home --gid docker unprivilegeduser + +VOLUME /var/lib/docker +WORKDIR /go/src/github.com/docker/docker +ENV DOCKER_BUILDTAGS apparmor seccomp selinux + +# Let us use a .bashrc file +RUN ln -sfv $PWD/.bashrc ~/.bashrc + +# Register Docker's bash completion. +RUN ln -sv $PWD/contrib/completion/bash/docker /etc/bash_completion.d/docker + +# Get useful and necessary Hub images so we can "docker load" locally instead of pulling +COPY contrib/download-frozen-image-v2.sh /go/src/github.com/docker/docker/contrib/ +RUN ./contrib/download-frozen-image-v2.sh /docker-frozen-images \ + armhf/buildpack-deps:jessie@sha256:eb2dad77ef53e88d94c3c83862d315c806ea1ca49b6e74f4db362381365ce489 \ + armhf/busybox:latest@sha256:016a1e149d2acc2a3789a160dfa60ce870794eea27ad5e96f7a101970e5e1689 \ + armhf/debian:jessie@sha256:ac59fa18b28d0ef751eabb5ba4c4b5a9063f99398bae2f70495aa8ed6139b577 \ + armhf/hello-world:latest@sha256:9701edc932223a66e49dd6c894a11db8c2cf4eccd1414f1ec105a623bf16b426 +# See also ensureFrozenImagesLinux() in "integration-cli/fixtures_linux_daemon_test.go" (which needs to be updated when adding images to this list) + +# Install tomlv, vndr, runc, containerd, tini, docker-proxy +# Please edit hack/dockerfile/install-binaries.sh to update them. +COPY hack/dockerfile/binaries-commits /tmp/binaries-commits +COPY hack/dockerfile/install-binaries.sh /tmp/install-binaries.sh +RUN /tmp/install-binaries.sh tomlv vndr runc containerd tini proxy dockercli +ENV PATH=/usr/local/cli:$PATH + +ENTRYPOINT ["hack/dind"] + +# Upload docker source +COPY . /go/src/github.com/docker/docker diff --git a/vendor/github.com/moby/moby/Dockerfile.ppc64le b/vendor/github.com/moby/moby/Dockerfile.ppc64le new file mode 100644 index 000000000..43b84e450 --- /dev/null +++ b/vendor/github.com/moby/moby/Dockerfile.ppc64le @@ -0,0 +1,189 @@ +# This file describes the standard way to build Docker on ppc64le, using docker +# +# Usage: +# +# # Assemble the full dev environment. This is slow the first time. +# docker build -t docker -f Dockerfile.ppc64le . +# +# # Mount your source in an interactive container for quick testing: +# docker run -v `pwd`:/go/src/github.com/docker/docker --privileged -i -t docker bash +# +# # Run the test suite: +# docker run --privileged docker hack/make.sh test-unit test-integration-cli test-docker-py +# +# Note: AppArmor used to mess with privileged mode, but this is no longer +# the case. Therefore, you don't have to disable it anymore. +# + +FROM ppc64le/debian:jessie + +# allow replacing httpredir or deb mirror +ARG APT_MIRROR=deb.debian.org +RUN sed -ri "s/(httpredir|deb).debian.org/$APT_MIRROR/g" /etc/apt/sources.list + +# Packaged dependencies +RUN apt-get update && apt-get install -y \ + apparmor \ + apt-utils \ + aufs-tools \ + automake \ + bash-completion \ + btrfs-tools \ + build-essential \ + cmake \ + createrepo \ + curl \ + dpkg-sig \ + git \ + iptables \ + jq \ + net-tools \ + libapparmor-dev \ + libcap-dev \ + libsystemd-journal-dev \ + libtool \ + mercurial \ + pkg-config \ + python-dev \ + python-mock \ + python-pip \ + python-websocket \ + xfsprogs \ + tar \ + vim-common \ + --no-install-recommends + +# Get lvm2 source for compiling statically +ENV LVM2_VERSION 2.02.103 +RUN mkdir -p /usr/local/lvm2 \ + && curl -fsSL "https://mirrors.kernel.org/sourceware/lvm2/LVM2.${LVM2_VERSION}.tgz" \ + | tar -xzC /usr/local/lvm2 --strip-components=1 +# See https://git.fedorahosted.org/cgit/lvm2.git/refs/tags for release tags + +# Fix platform enablement in lvm2 to support ppc64le properly +RUN set -e \ + && for f in config.guess config.sub; do \ + curl -fsSL -o "/usr/local/lvm2/autoconf/$f" "http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=$f;hb=HEAD"; \ + done +# "arch.c:78:2: error: #error the arch code needs to know about your machine type" + +# Compile and install lvm2 +RUN cd /usr/local/lvm2 \ + && ./configure \ + --build="$(gcc -print-multiarch)" \ + --enable-static_link \ + && make device-mapper \ + && make install_device-mapper +# See https://git.fedorahosted.org/cgit/lvm2.git/tree/INSTALL + +# Install seccomp: the version shipped upstream is too old +ENV SECCOMP_VERSION 2.3.2 +RUN set -x \ + && export SECCOMP_PATH="$(mktemp -d)" \ + && curl -fsSL "https://github.com/seccomp/libseccomp/releases/download/v${SECCOMP_VERSION}/libseccomp-${SECCOMP_VERSION}.tar.gz" \ + | tar -xzC "$SECCOMP_PATH" --strip-components=1 \ + && ( \ + cd "$SECCOMP_PATH" \ + && ./configure --prefix=/usr/local \ + && make \ + && make install \ + && ldconfig \ + ) \ + && rm -rf "$SECCOMP_PATH" + + +# Install Go +# NOTE: official ppc64le go binaries weren't available until go 1.6.4 and 1.7.4 +# IMPORTANT: When updating this please note that stdlib archive/tar pkg is vendored +ENV GO_VERSION 1.8.3 +RUN curl -fsSL "https://golang.org/dl/go${GO_VERSION}.linux-ppc64le.tar.gz" \ + | tar -xzC /usr/local + +ENV PATH /go/bin:/usr/local/go/bin:$PATH +ENV GOPATH /go + +# Dependency for golint +ENV GO_TOOLS_COMMIT 823804e1ae08dbb14eb807afc7db9993bc9e3cc3 +RUN git clone https://github.com/golang/tools.git /go/src/golang.org/x/tools \ + && (cd /go/src/golang.org/x/tools && git checkout -q $GO_TOOLS_COMMIT) + +# Grab Go's lint tool +ENV GO_LINT_COMMIT 32a87160691b3c96046c0c678fe57c5bef761456 +RUN git clone https://github.com/golang/lint.git /go/src/github.com/golang/lint \ + && (cd /go/src/github.com/golang/lint && git checkout -q $GO_LINT_COMMIT) \ + && go install -v github.com/golang/lint/golint + +# Install two versions of the registry. The first is an older version that +# only supports schema1 manifests. The second is a newer version that supports +# both. This allows integration-cli tests to cover push/pull with both schema1 +# and schema2 manifests. +ENV REGISTRY_COMMIT_SCHEMA1 ec87e9b6971d831f0eff752ddb54fb64693e51cd +ENV REGISTRY_COMMIT 47a064d4195a9b56133891bbb13620c3ac83a827 +RUN set -x \ + && export GOPATH="$(mktemp -d)" \ + && git clone https://github.com/docker/distribution.git "$GOPATH/src/github.com/docker/distribution" \ + && (cd "$GOPATH/src/github.com/docker/distribution" && git checkout -q "$REGISTRY_COMMIT") \ + && GOPATH="$GOPATH/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \ + go build -o /usr/local/bin/registry-v2 github.com/docker/distribution/cmd/registry \ + && (cd "$GOPATH/src/github.com/docker/distribution" && git checkout -q "$REGISTRY_COMMIT_SCHEMA1") \ + && GOPATH="$GOPATH/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \ + go build -o /usr/local/bin/registry-v2-schema1 github.com/docker/distribution/cmd/registry \ + && rm -rf "$GOPATH" + +# Install notary and notary-server +ENV NOTARY_VERSION v0.5.0 +RUN set -x \ + && export GOPATH="$(mktemp -d)" \ + && git clone https://github.com/docker/notary.git "$GOPATH/src/github.com/docker/notary" \ + && (cd "$GOPATH/src/github.com/docker/notary" && git checkout -q "$NOTARY_VERSION") \ + && GOPATH="$GOPATH/src/github.com/docker/notary/vendor:$GOPATH" \ + go build -o /usr/local/bin/notary-server github.com/docker/notary/cmd/notary-server \ + && GOPATH="$GOPATH/src/github.com/docker/notary/vendor:$GOPATH" \ + go build -o /usr/local/bin/notary github.com/docker/notary/cmd/notary \ + && rm -rf "$GOPATH" + +# Get the "docker-py" source so we can run their integration tests +ENV DOCKER_PY_COMMIT a962578e515185cf06506050b2200c0b81aa84ef +RUN git clone https://github.com/docker/docker-py.git /docker-py \ + && cd /docker-py \ + && git checkout -q $DOCKER_PY_COMMIT \ + && pip install -r test-requirements.txt + +# Set user.email so crosbymichael's in-container merge commits go smoothly +RUN git config --global user.email 'docker-dummy@example.com' + +# Add an unprivileged user to be used for tests which need it +RUN groupadd -r docker +RUN useradd --create-home --gid docker unprivilegeduser + +VOLUME /var/lib/docker +WORKDIR /go/src/github.com/docker/docker +ENV DOCKER_BUILDTAGS apparmor seccomp selinux + +# Let us use a .bashrc file +RUN ln -sfv $PWD/.bashrc ~/.bashrc + +# Register Docker's bash completion. +RUN ln -sv $PWD/contrib/completion/bash/docker /etc/bash_completion.d/docker + +# Get useful and necessary Hub images so we can "docker load" locally instead of pulling +COPY contrib/download-frozen-image-v2.sh /go/src/github.com/docker/docker/contrib/ +RUN ./contrib/download-frozen-image-v2.sh /docker-frozen-images \ + ppc64le/buildpack-deps:jessie@sha256:1a2f2d2cc8738f14b336aeffc3503b5c9dedf9e1f26c7313cb4999534ad4716f \ + ppc64le/busybox:latest@sha256:54f34c83adfab20cf0e630d879e210f07b0062cd6caaf16346a61396d50e7584 \ + ppc64le/debian:jessie@sha256:ea8c5b105e3790f075145b40e4be1e4488c9f33f55e6cc45182047b80a68f892 \ + ppc64le/hello-world:latest@sha256:7d57adf137665f748956c86089320710b66d08584db3500ed98f4bb3da637c2d +# See also ensureFrozenImagesLinux() in "integration-cli/fixtures_linux_daemon_test.go" (which needs to be updated when adding images to this list) + +# Install tomlv, vndr, runc, containerd, tini, docker-proxy +# Please edit hack/dockerfile/install-binaries.sh to update them. +COPY hack/dockerfile/binaries-commits /tmp/binaries-commits +COPY hack/dockerfile/install-binaries.sh /tmp/install-binaries.sh +RUN /tmp/install-binaries.sh tomlv vndr runc containerd tini proxy dockercli +ENV PATH=/usr/local/cli:$PATH + +# Wrap all commands in the "docker-in-docker" script to allow nested containers +ENTRYPOINT ["hack/dind"] + +# Upload docker source +COPY . /go/src/github.com/docker/docker diff --git a/vendor/github.com/moby/moby/Dockerfile.s390x b/vendor/github.com/moby/moby/Dockerfile.s390x new file mode 100644 index 000000000..35ec68373 --- /dev/null +++ b/vendor/github.com/moby/moby/Dockerfile.s390x @@ -0,0 +1,182 @@ +# This file describes the standard way to build Docker on s390x, using docker +# +# Usage: +# +# # Assemble the full dev environment. This is slow the first time. +# docker build -t docker -f Dockerfile.s390x . +# +# # Mount your source in an interactive container for quick testing: +# docker run -v `pwd`:/go/src/github.com/docker/docker --privileged -i -t docker bash +# +# # Run the test suite: +# docker run --privileged docker hack/make.sh test-unit test-integration-cli test-docker-py +# +# Note: AppArmor used to mess with privileged mode, but this is no longer +# the case. Therefore, you don't have to disable it anymore. +# + +FROM s390x/debian:jessie + +# Packaged dependencies +RUN apt-get update && apt-get install -y \ + apparmor \ + apt-utils \ + aufs-tools \ + automake \ + bash-completion \ + btrfs-tools \ + build-essential \ + cmake \ + createrepo \ + curl \ + dpkg-sig \ + git \ + iptables \ + jq \ + net-tools \ + libapparmor-dev \ + libcap-dev \ + libsystemd-journal-dev \ + libtool \ + mercurial \ + pkg-config \ + python-dev \ + python-mock \ + python-pip \ + python-websocket \ + xfsprogs \ + tar \ + vim-common \ + --no-install-recommends + +# Install seccomp: the version shipped upstream is too old +ENV SECCOMP_VERSION 2.3.2 +RUN set -x \ + && export SECCOMP_PATH="$(mktemp -d)" \ + && curl -fsSL "https://github.com/seccomp/libseccomp/releases/download/v${SECCOMP_VERSION}/libseccomp-${SECCOMP_VERSION}.tar.gz" \ + | tar -xzC "$SECCOMP_PATH" --strip-components=1 \ + && ( \ + cd "$SECCOMP_PATH" \ + && ./configure --prefix=/usr/local \ + && make \ + && make install \ + && ldconfig \ + ) \ + && rm -rf "$SECCOMP_PATH" + +# Get lvm2 source for compiling statically +ENV LVM2_VERSION 2.02.103 +RUN mkdir -p /usr/local/lvm2 \ + && curl -fsSL "https://mirrors.kernel.org/sourceware/lvm2/LVM2.${LVM2_VERSION}.tgz" \ + | tar -xzC /usr/local/lvm2 --strip-components=1 +# See https://git.fedorahosted.org/cgit/lvm2.git/refs/tags for release tags + +# Fix platform enablement in lvm2 to support s390x properly +RUN set -e \ + && for f in config.guess config.sub; do \ + curl -fsSL -o "/usr/local/lvm2/autoconf/$f" "http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=$f;hb=HEAD"; \ + done +# "arch.c:78:2: error: #error the arch code needs to know about your machine type" + +# Compile and install lvm2 +RUN cd /usr/local/lvm2 \ + && ./configure \ + --build="$(gcc -print-multiarch)" \ + --enable-static_link \ + && make device-mapper \ + && make install_device-mapper +# See https://git.fedorahosted.org/cgit/lvm2.git/tree/INSTALL + +# IMPORTANT: When updating this please note that stdlib archive/tar pkg is vendored +ENV GO_VERSION 1.8.3 +RUN curl -fsSL "https://golang.org/dl/go${GO_VERSION}.linux-s390x.tar.gz" \ + | tar -xzC /usr/local + +ENV PATH /go/bin:/usr/local/go/bin:$PATH +ENV GOPATH /go + +# Dependency for golint +ENV GO_TOOLS_COMMIT 823804e1ae08dbb14eb807afc7db9993bc9e3cc3 +RUN git clone https://github.com/golang/tools.git /go/src/golang.org/x/tools \ + && (cd /go/src/golang.org/x/tools && git checkout -q $GO_TOOLS_COMMIT) + +# Grab Go's lint tool +ENV GO_LINT_COMMIT 32a87160691b3c96046c0c678fe57c5bef761456 +RUN git clone https://github.com/golang/lint.git /go/src/github.com/golang/lint \ + && (cd /go/src/github.com/golang/lint && git checkout -q $GO_LINT_COMMIT) \ + && go install -v github.com/golang/lint/golint + +# Install two versions of the registry. The first is an older version that +# only supports schema1 manifests. The second is a newer version that supports +# both. This allows integration-cli tests to cover push/pull with both schema1 +# and schema2 manifests. +ENV REGISTRY_COMMIT_SCHEMA1 ec87e9b6971d831f0eff752ddb54fb64693e51cd +ENV REGISTRY_COMMIT 47a064d4195a9b56133891bbb13620c3ac83a827 +RUN set -x \ + && export GOPATH="$(mktemp -d)" \ + && git clone https://github.com/docker/distribution.git "$GOPATH/src/github.com/docker/distribution" \ + && (cd "$GOPATH/src/github.com/docker/distribution" && git checkout -q "$REGISTRY_COMMIT") \ + && GOPATH="$GOPATH/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \ + go build -o /usr/local/bin/registry-v2 github.com/docker/distribution/cmd/registry \ + && (cd "$GOPATH/src/github.com/docker/distribution" && git checkout -q "$REGISTRY_COMMIT_SCHEMA1") \ + && GOPATH="$GOPATH/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \ + go build -o /usr/local/bin/registry-v2-schema1 github.com/docker/distribution/cmd/registry \ + && rm -rf "$GOPATH" + +# Install notary and notary-server +ENV NOTARY_VERSION v0.5.0 +RUN set -x \ + && export GOPATH="$(mktemp -d)" \ + && git clone https://github.com/docker/notary.git "$GOPATH/src/github.com/docker/notary" \ + && (cd "$GOPATH/src/github.com/docker/notary" && git checkout -q "$NOTARY_VERSION") \ + && GOPATH="$GOPATH/src/github.com/docker/notary/vendor:$GOPATH" \ + go build -o /usr/local/bin/notary-server github.com/docker/notary/cmd/notary-server \ + && GOPATH="$GOPATH/src/github.com/docker/notary/vendor:$GOPATH" \ + go build -o /usr/local/bin/notary github.com/docker/notary/cmd/notary \ + && rm -rf "$GOPATH" + +# Get the "docker-py" source so we can run their integration tests +ENV DOCKER_PY_COMMIT a962578e515185cf06506050b2200c0b81aa84ef +RUN git clone https://github.com/docker/docker-py.git /docker-py \ + && cd /docker-py \ + && git checkout -q $DOCKER_PY_COMMIT \ + && pip install -r test-requirements.txt + +# Set user.email so crosbymichael's in-container merge commits go smoothly +RUN git config --global user.email 'docker-dummy@example.com' + +# Add an unprivileged user to be used for tests which need it +RUN groupadd -r docker +RUN useradd --create-home --gid docker unprivilegeduser + +VOLUME /var/lib/docker +WORKDIR /go/src/github.com/docker/docker +ENV DOCKER_BUILDTAGS apparmor selinux seccomp + +# Let us use a .bashrc file +RUN ln -sfv $PWD/.bashrc ~/.bashrc + +# Register Docker's bash completion. +RUN ln -sv $PWD/contrib/completion/bash/docker /etc/bash_completion.d/docker + +# Get useful and necessary Hub images so we can "docker load" locally instead of pulling +COPY contrib/download-frozen-image-v2.sh /go/src/github.com/docker/docker/contrib/ +RUN ./contrib/download-frozen-image-v2.sh /docker-frozen-images \ + s390x/buildpack-deps:jessie@sha256:552dec28146e4d2591fc0309aebdbac9e4fb1f335d90c70a14bbf72fb8bb1be5 \ + s390x/busybox:latest@sha256:e32f40c39ca596a4317392bd32809bb188c4ae5864ea827c3219c75c50069964 \ + s390x/debian:jessie@sha256:6994e3ffa5a1dabea09d536f350b3ed2715292cb469417c42a82b70fcbff7d32 \ + s390x/hello-world:latest@sha256:602db500fee63934292260e65c0c528128ad1c1c7c6497f95bbbac7d4d5312f1 +# See also ensureFrozenImagesLinux() in "integration-cli/fixtures_linux_daemon_test.go" (which needs to be updated when adding images to this list) + +# Install tomlv, vndr, runc, containerd, tini, docker-proxy +# Please edit hack/dockerfile/install-binaries.sh to update them. +COPY hack/dockerfile/binaries-commits /tmp/binaries-commits +COPY hack/dockerfile/install-binaries.sh /tmp/install-binaries.sh +RUN /tmp/install-binaries.sh tomlv vndr runc containerd tini proxy dockercli +ENV PATH=/usr/local/cli:$PATH + +# Wrap all commands in the "docker-in-docker" script to allow nested containers +ENTRYPOINT ["hack/dind"] + +# Upload docker source +COPY . /go/src/github.com/docker/docker diff --git a/vendor/github.com/moby/moby/Dockerfile.simple b/vendor/github.com/moby/moby/Dockerfile.simple new file mode 100644 index 000000000..b4682d4cb --- /dev/null +++ b/vendor/github.com/moby/moby/Dockerfile.simple @@ -0,0 +1,73 @@ +# docker build -t docker:simple -f Dockerfile.simple . +# docker run --rm docker:simple hack/make.sh dynbinary +# docker run --rm --privileged docker:simple hack/dind hack/make.sh test-unit +# docker run --rm --privileged -v /var/lib/docker docker:simple hack/dind hack/make.sh dynbinary test-integration-cli + +# This represents the bare minimum required to build and test Docker. + +FROM debian:jessie + +# allow replacing httpredir or deb mirror +ARG APT_MIRROR=deb.debian.org +RUN sed -ri "s/(httpredir|deb).debian.org/$APT_MIRROR/g" /etc/apt/sources.list + +# Compile and runtime deps +# https://github.com/docker/docker/blob/master/project/PACKAGERS.md#build-dependencies +# https://github.com/docker/docker/blob/master/project/PACKAGERS.md#runtime-dependencies +RUN apt-get update && apt-get install -y --no-install-recommends \ + btrfs-tools \ + build-essential \ + curl \ + cmake \ + gcc \ + git \ + libapparmor-dev \ + libdevmapper-dev \ + ca-certificates \ + e2fsprogs \ + iptables \ + procps \ + xfsprogs \ + xz-utils \ + \ + aufs-tools \ + vim-common \ + && rm -rf /var/lib/apt/lists/* + +# Install seccomp: the version shipped upstream is too old +ENV SECCOMP_VERSION 2.3.2 +RUN set -x \ + && export SECCOMP_PATH="$(mktemp -d)" \ + && curl -fsSL "https://github.com/seccomp/libseccomp/releases/download/v${SECCOMP_VERSION}/libseccomp-${SECCOMP_VERSION}.tar.gz" \ + | tar -xzC "$SECCOMP_PATH" --strip-components=1 \ + && ( \ + cd "$SECCOMP_PATH" \ + && ./configure --prefix=/usr/local \ + && make \ + && make install \ + && ldconfig \ + ) \ + && rm -rf "$SECCOMP_PATH" + +# Install Go +# IMPORTANT: If the version of Go is updated, the Windows to Linux CI machines +# will need updating, to avoid errors. Ping #docker-maintainers on IRC +# with a heads-up. +# IMPORTANT: When updating this please note that stdlib archive/tar pkg is vendored +ENV GO_VERSION 1.8.3 +RUN curl -fsSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" \ + | tar -xzC /usr/local +ENV PATH /go/bin:/usr/local/go/bin:$PATH +ENV GOPATH /go +ENV CGO_LDFLAGS -L/lib + +# Install runc, containerd, tini and docker-proxy +# Please edit hack/dockerfile/install-binaries.sh to update them. +COPY hack/dockerfile/binaries-commits /tmp/binaries-commits +COPY hack/dockerfile/install-binaries.sh /tmp/install-binaries.sh +RUN /tmp/install-binaries.sh runc containerd tini proxy dockercli +ENV PATH=/usr/local/cli:$PATH + +ENV AUTO_GOPATH 1 +WORKDIR /usr/src/docker +COPY . /usr/src/docker diff --git a/vendor/github.com/moby/moby/Dockerfile.solaris b/vendor/github.com/moby/moby/Dockerfile.solaris new file mode 100644 index 000000000..4198b138b --- /dev/null +++ b/vendor/github.com/moby/moby/Dockerfile.solaris @@ -0,0 +1,19 @@ +# Defines an image that hosts a native Docker build environment for Solaris +# TODO: Improve stub + +FROM solaris:latest + +# compile and runtime deps +RUN pkg install --accept \ + git \ + gnu-coreutils \ + gnu-make \ + gnu-tar \ + diagnostic/top \ + golang \ + library/golang/* \ + developer/gcc-* + +ENV GOPATH /go/:/usr/lib/gocode/1.5/ +WORKDIR /go/src/github.com/docker/docker +COPY . /go/src/github.com/docker/docker diff --git a/vendor/github.com/moby/moby/Dockerfile.windows b/vendor/github.com/moby/moby/Dockerfile.windows new file mode 100644 index 000000000..8f8ee609c --- /dev/null +++ b/vendor/github.com/moby/moby/Dockerfile.windows @@ -0,0 +1,256 @@ +# escape=` + +# ----------------------------------------------------------------------------------------- +# This file describes the standard way to build Docker in a container on Windows +# Server 2016 or Windows 10. +# +# Maintainer: @jhowardmsft +# ----------------------------------------------------------------------------------------- + + +# Prerequisites: +# -------------- +# +# 1. Windows Server 2016 or Windows 10 with all Windows updates applied. The major +# build number must be at least 14393. This can be confirmed, for example, by +# running the following from an elevated PowerShell prompt - this sample output +# is from a fully up to date machine as at mid-November 2016: +# +# >> PS C:\> $(gin).WindowsBuildLabEx +# >> 14393.447.amd64fre.rs1_release_inmarket.161102-0100 +# +# 2. Git for Windows (or another git client) must be installed. https://git-scm.com/download/win. +# +# 3. The machine must be configured to run containers. For example, by following +# the quick start guidance at https://msdn.microsoft.com/en-us/virtualization/windowscontainers/quick_start/quick_start or +# https://github.com/docker/labs/blob/master/windows/windows-containers/Setup.md +# +# 4. If building in a Hyper-V VM: For Windows Server 2016 using Windows Server +# containers as the default option, it is recommended you have at least 1GB +# of memory assigned; For Windows 10 where Hyper-V Containers are employed, you +# should have at least 4GB of memory assigned. Note also, to run Hyper-V +# containers in a VM, it is necessary to configure the VM for nested virtualization. + +# ----------------------------------------------------------------------------------------- + + +# Usage: +# ----- +# +# The following steps should be run from an (elevated*) Windows PowerShell prompt. +# +# (*In a default installation of containers on Windows following the quick-start guidance at +# https://msdn.microsoft.com/en-us/virtualization/windowscontainers/quick_start/quick_start, +# the docker.exe client must run elevated to be able to connect to the daemon). +# +# 1. Clone the sources from github.com: +# +# >> git clone https://github.com/docker/docker.git C:\go\src\github.com\docker\docker +# >> Cloning into 'C:\go\src\github.com\docker\docker'... +# >> remote: Counting objects: 186216, done. +# >> remote: Compressing objects: 100% (21/21), done. +# >> remote: Total 186216 (delta 5), reused 0 (delta 0), pack-reused 186195 +# >> Receiving objects: 100% (186216/186216), 104.32 MiB | 8.18 MiB/s, done. +# >> Resolving deltas: 100% (123139/123139), done. +# >> Checking connectivity... done. +# >> Checking out files: 100% (3912/3912), done. +# >> PS C:\> +# +# +# 2. Change directory to the cloned docker sources: +# +# >> cd C:\go\src\github.com\docker\docker +# +# +# 3. Build a docker image with the components required to build the docker binaries from source +# by running one of the following: +# +# >> docker build -t nativebuildimage -f Dockerfile.windows . +# >> docker build -t nativebuildimage -f Dockerfile.windows -m 2GB . (if using Hyper-V containers) +# +# +# 4. Build the docker executable binaries by running one of the following: +# +# >> $DOCKER_GITCOMMIT=(git rev-parse --short HEAD) +# >> docker run --name binaries -e DOCKER_GITCOMMIT=$DOCKER_GITCOMMIT nativebuildimage hack\make.ps1 -Binary +# >> docker run --name binaries -e DOCKER_GITCOMMIT=$DOCKER_GITCOMMIT -m 2GB nativebuildimage hack\make.ps1 -Binary (if using Hyper-V containers) +# +# +# 5. Copy the binaries out of the container, replacing HostPath with an appropriate destination +# folder on the host system where you want the binaries to be located. +# +# >> docker cp binaries:C:\go\src\github.com\docker\docker\bundles\docker.exe C:\HostPath\docker.exe +# >> docker cp binaries:C:\go\src\github.com\docker\docker\bundles\dockerd.exe C:\HostPath\dockerd.exe +# +# +# 6. (Optional) Remove the interim container holding the built executable binaries: +# +# >> docker rm binaries +# +# +# 7. (Optional) Remove the image used for the container in which the executable +# binaries are build. Tip - it may be useful to keep this image around if you need to +# build multiple times. Then you can take advantage of the builder cache to have an +# image which has all the components required to build the binaries already installed. +# +# >> docker rmi nativebuildimage +# + +# ----------------------------------------------------------------------------------------- + + +# The validation tests can only run directly on the host. This is because they calculate +# information from the git repo, but the .git directory is not passed into the image as +# it is excluded via .dockerignore. Run the following from a Windows PowerShell prompt +# (elevation is not required): (Note Go must be installed to run these tests) +# +# >> hack\make.ps1 -DCO -PkgImports -GoFormat + + +# ----------------------------------------------------------------------------------------- + + +# To run unit tests, ensure you have created the nativebuildimage above. Then run one of +# the following from an (elevated) Windows PowerShell prompt: +# +# >> docker run --rm nativebuildimage hack\make.ps1 -TestUnit +# >> docker run --rm -m 2GB nativebuildimage hack\make.ps1 -TestUnit (if using Hyper-V containers) + + +# ----------------------------------------------------------------------------------------- + + +# To run unit tests and binary build, ensure you have created the nativebuildimage above. Then +# run one of the following from an (elevated) Windows PowerShell prompt: +# +# >> docker run nativebuildimage hack\make.ps1 -All +# >> docker run -m 2GB nativebuildimage hack\make.ps1 -All (if using Hyper-V containers) + +# ----------------------------------------------------------------------------------------- + + +# Important notes: +# --------------- +# +# Don't attempt to use a bind-mount to pass a local directory as the bundles target +# directory. It does not work (golang attempts for follow a mapped folder incorrectly). +# Instead, use docker cp as per the example. +# +# go.zip is not removed from the image as it is used by the Windows CI servers +# to ensure the host and image are running consistent versions of go. +# +# Nanoserver support is a work in progress. Although the image will build if the +# FROM statement is updated, it will not work when running autogen through hack\make.ps1. +# It is suspected that the required GCC utilities (eg gcc, windres, windmc) silently +# quit due to the use of console hooks which are not available. +# +# The docker integration tests do not currently run in a container on Windows, predominantly +# due to Windows not supporting privileged mode, so anything using a volume would fail. +# They (along with the rest of the docker CI suite) can be run using +# https://github.com/jhowardmsft/docker-w2wCIScripts/blob/master/runCI/Invoke-DockerCI.ps1. +# +# ----------------------------------------------------------------------------------------- + + +# The number of build steps below are explicitly minimised to improve performance. +FROM microsoft/windowsservercore + +# Use PowerShell as the default shell +SHELL ["powershell", "-Command", "$ErrorActionPreference = 'Stop'; $ProgressPreference = 'SilentlyContinue';"] + +# Environment variable notes: +# - GO_VERSION must be consistent with 'Dockerfile' used by Linux. +# - FROM_DOCKERFILE is used for detection of building within a container. +ENV GO_VERSION=1.8.3 ` + GIT_VERSION=2.11.1 ` + GOPATH=C:\go ` + FROM_DOCKERFILE=1 + +RUN ` + Function Test-Nano() { ` + $EditionId = (Get-ItemProperty -Path 'HKLM:\SOFTWARE\Microsoft\Windows NT\CurrentVersion' -Name 'EditionID').EditionId; ` + return (($EditionId -eq 'ServerStandardNano') -or ($EditionId -eq 'ServerDataCenterNano') -or ($EditionId -eq 'NanoServer')); ` + }` + ` + Function Download-File([string] $source, [string] $target) { ` + if (Test-Nano) { ` + $handler = New-Object System.Net.Http.HttpClientHandler; ` + $client = New-Object System.Net.Http.HttpClient($handler); ` + $client.Timeout = New-Object System.TimeSpan(0, 30, 0); ` + $cancelTokenSource = [System.Threading.CancellationTokenSource]::new(); ` + $responseMsg = $client.GetAsync([System.Uri]::new($source), $cancelTokenSource.Token); ` + $responseMsg.Wait(); ` + if (!$responseMsg.IsCanceled) { ` + $response = $responseMsg.Result; ` + if ($response.IsSuccessStatusCode) { ` + $downloadedFileStream = [System.IO.FileStream]::new($target, [System.IO.FileMode]::Create, [System.IO.FileAccess]::Write); ` + $copyStreamOp = $response.Content.CopyToAsync($downloadedFileStream); ` + $copyStreamOp.Wait(); ` + $downloadedFileStream.Close(); ` + if ($copyStreamOp.Exception -ne $null) { throw $copyStreamOp.Exception } ` + } ` + } else { ` + Throw ("Failed to download " + $source) ` + }` + } else { ` + $webClient = New-Object System.Net.WebClient; ` + $webClient.DownloadFile($source, $target); ` + } ` + } ` + ` + setx /M PATH $('C:\git\cmd;C:\git\usr\bin;'+$Env:PATH+';C:\gcc\bin;C:\go\bin'); ` + ` + Write-Host INFO: Downloading git...; ` + $location='https://www.nuget.org/api/v2/package/GitForWindows/'+$Env:GIT_VERSION; ` + Download-File $location C:\gitsetup.zip; ` + ` + Write-Host INFO: Downloading go...; ` + Download-File $('https://golang.org/dl/go'+$Env:GO_VERSION+'.windows-amd64.zip') C:\go.zip; ` + ` + Write-Host INFO: Downloading compiler 1 of 3...; ` + Download-File https://raw.githubusercontent.com/jhowardmsft/docker-tdmgcc/master/gcc.zip C:\gcc.zip; ` + ` + Write-Host INFO: Downloading compiler 2 of 3...; ` + Download-File https://raw.githubusercontent.com/jhowardmsft/docker-tdmgcc/master/runtime.zip C:\runtime.zip; ` + ` + Write-Host INFO: Downloading compiler 3 of 3...; ` + Download-File https://raw.githubusercontent.com/jhowardmsft/docker-tdmgcc/master/binutils.zip C:\binutils.zip; ` + ` + Write-Host INFO: Extracting git...; ` + Expand-Archive C:\gitsetup.zip C:\git-tmp; ` + New-Item -Type Directory C:\git | Out-Null; ` + Move-Item C:\git-tmp\tools\* C:\git\.; ` + Remove-Item -Recurse -Force C:\git-tmp; ` + ` + Write-Host INFO: Expanding go...; ` + Expand-Archive C:\go.zip -DestinationPath C:\; ` + ` + Write-Host INFO: Expanding compiler 1 of 3...; ` + Expand-Archive C:\gcc.zip -DestinationPath C:\gcc -Force; ` + Write-Host INFO: Expanding compiler 2 of 3...; ` + Expand-Archive C:\runtime.zip -DestinationPath C:\gcc -Force; ` + Write-Host INFO: Expanding compiler 3 of 3...; ` + Expand-Archive C:\binutils.zip -DestinationPath C:\gcc -Force; ` + ` + Write-Host INFO: Removing downloaded files...; ` + Remove-Item C:\gcc.zip; ` + Remove-Item C:\runtime.zip; ` + Remove-Item C:\binutils.zip; ` + Remove-Item C:\gitsetup.zip; ` + ` + Write-Host INFO: Creating source directory...; ` + New-Item -ItemType Directory -Path C:\go\src\github.com\docker\docker | Out-Null; ` + ` + Write-Host INFO: Configuring git core.autocrlf...; ` + C:\git\cmd\git config --global core.autocrlf true; ` + ` + Write-Host INFO: Completed + +# Make PowerShell the default entrypoint +ENTRYPOINT ["powershell.exe"] + +# Set the working directory to the location of the sources +WORKDIR C:\go\src\github.com\docker\docker + +# Copy the sources into the container +COPY . . diff --git a/vendor/github.com/moby/moby/LICENSE b/vendor/github.com/moby/moby/LICENSE new file mode 100644 index 000000000..9c8e20ab8 --- /dev/null +++ b/vendor/github.com/moby/moby/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2013-2017 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/moby/moby/MAINTAINERS b/vendor/github.com/moby/moby/MAINTAINERS new file mode 100644 index 000000000..dc4485da1 --- /dev/null +++ b/vendor/github.com/moby/moby/MAINTAINERS @@ -0,0 +1,462 @@ +# Docker maintainers file +# +# This file describes who runs the docker/docker project and how. +# This is a living document - if you see something out of date or missing, speak up! +# +# It is structured to be consumable by both humans and programs. +# To extract its contents programmatically, use any TOML-compliant +# parser. +# +# This file is compiled into the MAINTAINERS file in docker/opensource. +# +[Org] + + [Org."Core maintainers"] + + # The Core maintainers are the ghostbusters of the project: when there's a problem others + # can't solve, they show up and fix it with bizarre devices and weaponry. + # They have final say on technical implementation and coding style. + # They are ultimately responsible for quality in all its forms: usability polish, + # bugfixes, performance, stability, etc. When ownership can cleanly be passed to + # a subsystem, they are responsible for doing so and holding the + # subsystem maintainers accountable. If ownership is unclear, they are the de facto owners. + + # For each release (including minor releases), a "release captain" is assigned from the + # pool of core maintainers. Rotation is encouraged across all maintainers, to ensure + # the release process is clear and up-to-date. + + people = [ + "aaronlehmann", + "akihirosuda", + "albers", + "aluzzardi", + "anusha", + "coolljt0725", + "cpuguy83", + "crosbymichael", + "dnephin", + "duglin", + "ehazlett", + "estesp", + "icecrime", + "jhowardmsft", + "johnstep", + "justincormack", + "lk4d4", + "mavenugo", + "mhbauer", + "mlaventure", + "runcom", + "stevvooe", + "tianon", + "tibor", + "tonistiigi", + "unclejack", + "vdemeester", + "vieux", + "yongtang" + ] + + [Org."Docs maintainers"] + + # TODO Describe the docs maintainers role. + + people = [ + "misty", + "thajeztah" + ] + + [Org.Curators] + + # The curators help ensure that incoming issues and pull requests are properly triaged and + # that our various contribution and reviewing processes are respected. With their knowledge of + # the repository activity, they can also guide contributors to relevant material or + # discussions. + # + # They are neither code nor docs reviewers, so they are never expected to merge. They can + # however: + # - close an issue or pull request when it's an exact duplicate + # - close an issue or pull request when it's inappropriate or off-topic + + people = [ + "aboch", + "alexellis", + "andrewhsu", + "anonymuse", + "chanwit", + "ehazlett", + "fntlnz", + "gianarb", + "mgoelzer", + "programmerq", + "rheinwein", + "thajeztah" + ] + + [Org.Alumni] + + # This list contains maintainers that are no longer active on the project. + # It is thanks to these people that the project has become what it is today. + # Thank you! + + people = [ + # David Calavera contributed many features to Docker, such as an improved + # event system, dynamic configuration reloading, volume plugins, fancy + # new templating options, and an external client credential store. As a + # maintainer, David was release captain for Docker 1.8, and competing + # with Jess Frazelle to be "top dream killer". + # David is now doing amazing stuff as CTO for https://www.netlify.com, + # and tweets as @calavera. + "calavera", + + # As a maintainer, Erik was responsible for the "builder", and + # started the first designs for the new networking model in + # Docker. Erik is now working on all kinds of plugins for Docker + # (https://github.com/contiv) and various open source projects + # in his own repository https://github.com/erikh. You may + # still stumble into him in our issue tracker, or on IRC. + "erikh", + + # After a false start with his first PR being rejected, James Turnbull became a frequent + # contributor to the documentation, and became a docs maintainer on December 5, 2013. As + # a maintainer, James lifted the docs to a higher standard, and introduced the community + # guidelines ("three strikes"). James is currently changing the world as CTO of https://www.empatico.org, + # meanwhile authoring various books that are worth checking out. You can find him on Twitter, + # rambling as @kartar, and although no longer active as a maintainer, he's always "game" to + # help out reviewing docs PRs, so you may still see him around in the repository. + "jamtur01", + + # Jessica Frazelle, also known as the "Keyser Söze of containers", + # runs *everything* in containers. She started contributing to + # Docker with a (fun fun) change involving both iptables and regular + # expressions (coz, YOLO!) on July 10, 2014 + # https://github.com/docker/docker/pull/6950/commits/f3a68ffa390fb851115c77783fa4031f1d3b2995. + # Jess was Release Captain for Docker 1.4, 1.6 and 1.7, and contributed + # many features and improvement, among which "seccomp profiles" (making + # containers a lot more secure). Besides being a maintainer, she + # set up the CI infrastructure for the project, giving everyone + # something to shout at if a PR failed ("noooo Janky!"). + # Jess is currently working on the DCOS security team at Mesosphere, + # and contributing to various open source projects. + # Be sure you don't miss her talks at a conference near you (a must-see), + # read her blog at https://blog.jessfraz.com (a must-read), and + # check out her open source projects on GitHub https://github.com/jessfraz (a must-try). + "jessfraz", + + # As a docs maintainer, Mary Anthony contributed greatly to the Docker + # docs. She wrote the Docker Contributor Guide and Getting Started + # Guides. She helped create a doc build system independent of + # docker/docker project, and implemented a new docs.docker.com theme and + # nav for 2015 Dockercon. Fun fact: the most inherited layer in DockerHub + # public repositories was originally referenced in + # maryatdocker/docker-whale back in May 2015. + "moxiegirl", + + # Jana Radhakrishnan was part of the SocketPlane team that joined Docker. + # As a maintainer, he was the lead architect for the Container Network + # Model (CNM) implemented through libnetwork, and the "routing mesh" powering + # Swarm mode networking. + # + # Jana started new adventures in networking, but you can find him tweeting as @mrjana, + # coding on GitHub https://github.com/mrjana, and he may be hiding on the Docker Community + # slack channel :-) + "mrjana", + + # Sven Dowideit became a well known person in the Docker ecosphere, building + # boot2docker, and became a regular contributor to the project, starting as + # early as October 2013 (https://github.com/docker/docker/pull/2119), to become + # a maintainer less than two months later (https://github.com/docker/docker/pull/3061). + # + # As a maintainer, Sven took on the task to convert the documentation from + # ReStructuredText to Markdown, migrate to Hugo for generating the docs, and + # writing tooling for building, testing, and publishing them. + # + # If you're not in the occasion to visit "the Australian office", you + # can keep up with Sven on Twitter (@SvenDowideit), his blog http://fosiki.com, + # and of course on GitHub. + "sven", + + # Vincent "vbatts!" Batts made his first contribution to the project + # in November 2013, to become a maintainer a few months later, on + # May 10, 2014 (https://github.com/docker/docker/commit/d6e666a87a01a5634c250358a94c814bf26cb778). + # As a maintainer, Vincent made important contributions to core elements + # of Docker, such as "distribution" (tarsum) and graphdrivers (btrfs, devicemapper). + # He also contributed the "tar-split" library, an important element + # for the content-addressable store. + # Vincent is currently a member of the Open Containers Initiative + # Technical Oversight Board (TOB), besides his work at Red Hat and + # Project Atomic. You can still find him regularly hanging out in + # our repository and the #docker-dev and #docker-maintainers IRC channels + # for a chat, as he's always a lot of fun. + "vbatts", + + # Vishnu became a maintainer to help out on the daemon codebase and + # libcontainer integration. He's currently involved in the + # Open Containers Initiative, working on the specifications, + # besides his work on cAdvisor and Kubernetes for Google. + "vishh" + ] + +[people] + +# A reference list of all people associated with the project. +# All other sections should refer to people by their canonical key +# in the people section. + + # ADD YOURSELF HERE IN ALPHABETICAL ORDER + + [people.aaronlehmann] + Name = "Aaron Lehmann" + Email = "aaron.lehmann@docker.com" + GitHub = "aaronlehmann" + + [people.aboch] + Name = "Alessandro Boch" + Email = "aboch@docker.com" + GitHub = "aboch" + + [people.alexellis] + Name = "Alex Ellis" + Email = "alexellis2@gmail.com" + GitHub = "alexellis" + + [people.akihirosuda] + Name = "Akihiro Suda" + Email = "suda.akihiro@lab.ntt.co.jp" + GitHub = "AkihiroSuda" + + [people.aluzzardi] + Name = "Andrea Luzzardi" + Email = "al@docker.com" + GitHub = "aluzzardi" + + [people.albers] + Name = "Harald Albers" + Email = "github@albersweb.de" + GitHub = "albers" + + [people.andrewhsu] + Name = "Andrew Hsu" + Email = "andrewhsu@docker.com" + GitHub = "andrewhsu" + + [people.anonymuse] + Name = "Jesse White" + Email = "anonymuse@gmail.com" + GitHub = "anonymuse" + + [people.anusha] + Name = "Anusha Ragunathan" + Email = "anusha@docker.com" + GitHub = "anusha-ragunathan" + + [people.calavera] + Name = "David Calavera" + Email = "david.calavera@gmail.com" + GitHub = "calavera" + + [people.coolljt0725] + Name = "Lei Jitang" + Email = "leijitang@huawei.com" + GitHub = "coolljt0725" + + [people.cpuguy83] + Name = "Brian Goff" + Email = "cpuguy83@gmail.com" + Github = "cpuguy83" + + [people.chanwit] + Name = "Chanwit Kaewkasi" + Email = "chanwit@gmail.com" + GitHub = "chanwit" + + [people.crosbymichael] + Name = "Michael Crosby" + Email = "crosbymichael@gmail.com" + GitHub = "crosbymichael" + + [people.dnephin] + Name = "Daniel Nephin" + Email = "dnephin@gmail.com" + GitHub = "dnephin" + + [people.duglin] + Name = "Doug Davis" + Email = "dug@us.ibm.com" + GitHub = "duglin" + + [people.ehazlett] + Name = "Evan Hazlett" + Email = "ejhazlett@gmail.com" + GitHub = "ehazlett" + + [people.erikh] + Name = "Erik Hollensbe" + Email = "erik@docker.com" + GitHub = "erikh" + + [people.estesp] + Name = "Phil Estes" + Email = "estesp@linux.vnet.ibm.com" + GitHub = "estesp" + + [people.fntlnz] + Name = "Lorenzo Fontana" + Email = "fontanalorenz@gmail.com" + GitHub = "fntlnz" + + [people.gianarb] + Name = "Gianluca Arbezzano" + Email = "ga@thumpflow.com" + GitHub = "gianarb" + + [people.icecrime] + Name = "Arnaud Porterie" + Email = "icecrime@gmail.com" + GitHub = "icecrime" + + [people.jamtur01] + Name = "James Turnbull" + Email = "james@lovedthanlost.net" + GitHub = "jamtur01" + + [people.jhowardmsft] + Name = "John Howard" + Email = "jhoward@microsoft.com" + GitHub = "jhowardmsft" + + [people.jessfraz] + Name = "Jessie Frazelle" + Email = "jess@linux.com" + GitHub = "jessfraz" + + [people.johnstep] + Name = "John Stephens" + Email = "johnstep@docker.com" + GitHub = "johnstep" + + [people.justincormack] + Name = "Justin Cormack" + Email = "justin.cormack@docker.com" + GitHub = "justincormack" + + [people.lk4d4] + Name = "Alexander Morozov" + Email = "lk4d4@docker.com" + GitHub = "lk4d4" + + [people.mavenugo] + Name = "Madhu Venugopal" + Email = "madhu@docker.com" + GitHub = "mavenugo" + + [people.mgoelzer] + Name = "Mike Goelzer" + Email = "mike.goelzer@docker.com" + GitHub = "mgoelzer" + + [people.mhbauer] + Name = "Morgan Bauer" + Email = "mbauer@us.ibm.com" + GitHub = "mhbauer" + + [people.misty] + Name = "Misty Stanley-Jones" + Email = "misty@docker.com" + GitHub = "mstanleyjones" + + [people.mlaventure] + Name = "Kenfe-Mickaël Laventure" + Email = "mickael.laventure@docker.com" + GitHub = "mlaventure" + + [people.moxiegirl] + Name = "Mary Anthony" + Email = "mary.anthony@docker.com" + GitHub = "moxiegirl" + + [people.mrjana] + Name = "Jana Radhakrishnan" + Email = "mrjana@docker.com" + GitHub = "mrjana" + + [people.programmerq] + Name = "Jeff Anderson" + Email = "jeff@docker.com" + GitHub = "programmerq" + + [people.rheinwein] + Name = "Laura Frank" + Email = "laura@codeship.com" + GitHub = "rheinwein" + + [people.runcom] + Name = "Antonio Murdaca" + Email = "runcom@redhat.com" + GitHub = "runcom" + + [people.shykes] + Name = "Solomon Hykes" + Email = "solomon@docker.com" + GitHub = "shykes" + + [people.stevvooe] + Name = "Stephen Day" + Email = "stephen.day@docker.com" + GitHub = "stevvooe" + + [people.sven] + Name = "Sven Dowideit" + Email = "SvenDowideit@home.org.au" + GitHub = "SvenDowideit" + + [people.thajeztah] + Name = "Sebastiaan van Stijn" + Email = "github@gone.nl" + GitHub = "thaJeztah" + + [people.tianon] + Name = "Tianon Gravi" + Email = "admwiggin@gmail.com" + GitHub = "tianon" + + [people.tibor] + Name = "Tibor Vass" + Email = "tibor@docker.com" + GitHub = "tiborvass" + + [people.tonistiigi] + Name = "Tõnis Tiigi" + Email = "tonis@docker.com" + GitHub = "tonistiigi" + + [people.unclejack] + Name = "Cristian Staretu" + Email = "cristian.staretu@gmail.com" + GitHub = "unclejack" + + [people.vbatts] + Name = "Vincent Batts" + Email = "vbatts@redhat.com" + GitHub = "vbatts" + + [people.vdemeester] + Name = "Vincent Demeester" + Email = "vincent@sbr.pm" + GitHub = "vdemeester" + + [people.vieux] + Name = "Victor Vieux" + Email = "vieux@docker.com" + GitHub = "vieux" + + [people.vishh] + Name = "Vishnu Kannan" + Email = "vishnuk@google.com" + GitHub = "vishh" + + [people.yongtang] + Name = "Yong Tang" + Email = "yong.tang.github@outlook.com" + GitHub = "yongtang" + diff --git a/vendor/github.com/moby/moby/Makefile b/vendor/github.com/moby/moby/Makefile new file mode 100644 index 000000000..0d99606cc --- /dev/null +++ b/vendor/github.com/moby/moby/Makefile @@ -0,0 +1,202 @@ +.PHONY: all binary dynbinary build cross deb help init-go-pkg-cache install manpages rpm run shell test test-docker-py test-integration-cli test-unit tgz validate win + +# set the graph driver as the current graphdriver if not set +DOCKER_GRAPHDRIVER := $(if $(DOCKER_GRAPHDRIVER),$(DOCKER_GRAPHDRIVER),$(shell docker info 2>&1 | grep "Storage Driver" | sed 's/.*: //')) +export DOCKER_GRAPHDRIVER +DOCKER_INCREMENTAL_BINARY := $(if $(DOCKER_INCREMENTAL_BINARY),$(DOCKER_INCREMENTAL_BINARY),1) +export DOCKER_INCREMENTAL_BINARY + +# get OS/Arch of docker engine +DOCKER_OSARCH := $(shell bash -c 'source hack/make/.detect-daemon-osarch && echo $${DOCKER_ENGINE_OSARCH}') +DOCKERFILE := $(shell bash -c 'source hack/make/.detect-daemon-osarch && echo $${DOCKERFILE}') + +DOCKER_GITCOMMIT := $(shell git rev-parse --short HEAD || echo unsupported) +export DOCKER_GITCOMMIT + +# env vars passed through directly to Docker's build scripts +# to allow things like `make KEEPBUNDLE=1 binary` easily +# `project/PACKAGERS.md` have some limited documentation of some of these +DOCKER_ENVS := \ + -e DOCKER_CROSSPLATFORMS \ + -e BUILD_APT_MIRROR \ + -e BUILDFLAGS \ + -e KEEPBUNDLE \ + -e DOCKER_BUILD_ARGS \ + -e DOCKER_BUILD_GOGC \ + -e DOCKER_BUILD_PKGS \ + -e DOCKER_BASH_COMPLETION_PATH \ + -e DOCKER_CLI_PATH \ + -e DOCKER_DEBUG \ + -e DOCKER_EXPERIMENTAL \ + -e DOCKER_GITCOMMIT \ + -e DOCKER_GRAPHDRIVER \ + -e DOCKER_INCREMENTAL_BINARY \ + -e DOCKER_PORT \ + -e DOCKER_REMAP_ROOT \ + -e DOCKER_STORAGE_OPTS \ + -e DOCKER_USERLANDPROXY \ + -e TESTDIRS \ + -e TESTFLAGS \ + -e TIMEOUT \ + -e HTTP_PROXY \ + -e HTTPS_PROXY \ + -e NO_PROXY \ + -e http_proxy \ + -e https_proxy \ + -e no_proxy +# note: we _cannot_ add "-e DOCKER_BUILDTAGS" here because even if it's unset in the shell, that would shadow the "ENV DOCKER_BUILDTAGS" set in our Dockerfile, which is very important for our official builds + +# to allow `make BIND_DIR=. shell` or `make BIND_DIR= test` +# (default to no bind mount if DOCKER_HOST is set) +# note: BINDDIR is supported for backwards-compatibility here +BIND_DIR := $(if $(BINDDIR),$(BINDDIR),$(if $(DOCKER_HOST),,bundles)) +DOCKER_MOUNT := $(if $(BIND_DIR),-v "$(CURDIR)/$(BIND_DIR):/go/src/github.com/docker/docker/$(BIND_DIR)") + +# This allows the test suite to be able to run without worrying about the underlying fs used by the container running the daemon (e.g. aufs-on-aufs), so long as the host running the container is running a supported fs. +# The volume will be cleaned up when the container is removed due to `--rm`. +# Note that `BIND_DIR` will already be set to `bundles` if `DOCKER_HOST` is not set (see above BIND_DIR line), in such case this will do nothing since `DOCKER_MOUNT` will already be set. +DOCKER_MOUNT := $(if $(DOCKER_MOUNT),$(DOCKER_MOUNT),-v /go/src/github.com/docker/docker/bundles) -v $(CURDIR)/.git:/go/src/github.com/docker/docker/.git + +# This allows to set the docker-dev container name +DOCKER_CONTAINER_NAME := $(if $(CONTAINER_NAME),--name $(CONTAINER_NAME),) + +# enable package cache if DOCKER_INCREMENTAL_BINARY and DOCKER_MOUNT (i.e.DOCKER_HOST) are set +PKGCACHE_MAP := gopath:/go/pkg goroot-linux_amd64:/usr/local/go/pkg/linux_amd64 goroot-linux_amd64_netgo:/usr/local/go/pkg/linux_amd64_netgo +PKGCACHE_VOLROOT := dockerdev-go-pkg-cache +PKGCACHE_VOL := $(if $(PKGCACHE_DIR),$(CURDIR)/$(PKGCACHE_DIR)/,$(PKGCACHE_VOLROOT)-) +DOCKER_MOUNT_PKGCACHE := $(if $(DOCKER_INCREMENTAL_BINARY),$(shell echo $(PKGCACHE_MAP) | sed -E 's@([^ ]*)@-v "$(PKGCACHE_VOL)\1"@g'),) +DOCKER_MOUNT_CLI := $(if $(DOCKER_CLI_PATH),-v $(shell dirname $(DOCKER_CLI_PATH)):/usr/local/cli,) +DOCKER_MOUNT_BASH_COMPLETION := $(if $(DOCKER_BASH_COMPLETION_PATH),-v $(shell dirname $(DOCKER_BASH_COMPLETION_PATH)):/usr/local/completion/bash,) +DOCKER_MOUNT := $(DOCKER_MOUNT) $(DOCKER_MOUNT_PKGCACHE) $(DOCKER_MOUNT_CLI) $(DOCKER_MOUNT_BASH_COMPLETION) + +GIT_BRANCH := $(shell git rev-parse --abbrev-ref HEAD 2>/dev/null) +GIT_BRANCH_CLEAN := $(shell echo $(GIT_BRANCH) | sed -e "s/[^[:alnum:]]/-/g") +DOCKER_IMAGE := docker-dev$(if $(GIT_BRANCH_CLEAN),:$(GIT_BRANCH_CLEAN)) +DOCKER_PORT_FORWARD := $(if $(DOCKER_PORT),-p "$(DOCKER_PORT)",) + +DOCKER_FLAGS := docker run --rm -i --privileged $(DOCKER_CONTAINER_NAME) $(DOCKER_ENVS) $(DOCKER_MOUNT) $(DOCKER_PORT_FORWARD) +BUILD_APT_MIRROR := $(if $(DOCKER_BUILD_APT_MIRROR),--build-arg APT_MIRROR=$(DOCKER_BUILD_APT_MIRROR)) +export BUILD_APT_MIRROR + +SWAGGER_DOCS_PORT ?= 9000 + +INTEGRATION_CLI_MASTER_IMAGE := $(if $(INTEGRATION_CLI_MASTER_IMAGE), $(INTEGRATION_CLI_MASTER_IMAGE), integration-cli-master) +INTEGRATION_CLI_WORKER_IMAGE := $(if $(INTEGRATION_CLI_WORKER_IMAGE), $(INTEGRATION_CLI_WORKER_IMAGE), integration-cli-worker) + +define \n + + +endef + +# if this session isn't interactive, then we don't want to allocate a +# TTY, which would fail, but if it is interactive, we do want to attach +# so that the user can send e.g. ^C through. +INTERACTIVE := $(shell [ -t 0 ] && echo 1 || echo 0) +ifeq ($(INTERACTIVE), 1) + DOCKER_FLAGS += -t +endif + +DOCKER_RUN_DOCKER := $(DOCKER_FLAGS) "$(DOCKER_IMAGE)" + +default: binary + +all: build ## validate all checks, build linux binaries, run all tests\ncross build non-linux binaries and generate archives + $(DOCKER_RUN_DOCKER) bash -c 'hack/validate/default && hack/make.sh' + +binary: build ## build the linux binaries + $(DOCKER_RUN_DOCKER) hack/make.sh binary + +dynbinary: build ## build the linux dynbinaries + $(DOCKER_RUN_DOCKER) hack/make.sh dynbinary + +build: bundles init-go-pkg-cache + $(warning The docker client CLI has moved to github.com/docker/cli. By default, it is built from the git sha specified in hack/dockerfile/binaries-commits. For a dev-test cycle involving the CLI, run:${\n} DOCKER_CLI_PATH=/host/path/to/cli/binary make shell ${\n} then change the cli and compile into a binary at the same location.${\n}) + docker build ${BUILD_APT_MIRROR} ${DOCKER_BUILD_ARGS} -t "$(DOCKER_IMAGE)" -f "$(DOCKERFILE)" . + +bundles: + mkdir bundles + +clean: clean-pkg-cache-vol ## clean up cached resources + +clean-pkg-cache-vol: + @- $(foreach mapping,$(PKGCACHE_MAP), \ + $(shell docker volume rm $(PKGCACHE_VOLROOT)-$(shell echo $(mapping) | awk -F':/' '{ print $$1 }') > /dev/null 2>&1) \ + ) + +cross: build ## cross build the binaries for darwin, freebsd and\nwindows + $(DOCKER_RUN_DOCKER) hack/make.sh dynbinary binary cross + +deb: build ## build the deb packages + $(DOCKER_RUN_DOCKER) hack/make.sh dynbinary build-deb + + +help: ## this help + @awk 'BEGIN {FS = ":.*?## "} /^[a-zA-Z_-]+:.*?## / {sub("\\\\n",sprintf("\n%22c"," "), $$2);printf "\033[36m%-20s\033[0m %s\n", $$1, $$2}' $(MAKEFILE_LIST) + +init-go-pkg-cache: + $(if $(PKGCACHE_DIR), mkdir -p $(shell echo $(PKGCACHE_MAP) | sed -E 's@([^: ]*):[^ ]*@$(PKGCACHE_DIR)/\1@g')) + +install: ## install the linux binaries + KEEPBUNDLE=1 hack/make.sh install-binary + +rpm: build ## build the rpm packages + $(DOCKER_RUN_DOCKER) hack/make.sh dynbinary build-rpm + +run: build ## run the docker daemon in a container + $(DOCKER_RUN_DOCKER) sh -c "KEEPBUNDLE=1 hack/make.sh install-binary run" + +shell: build ## start a shell inside the build env + $(DOCKER_RUN_DOCKER) bash + +test: build ## run the unit, integration and docker-py tests + $(DOCKER_RUN_DOCKER) hack/make.sh dynbinary cross test-unit test-integration-cli test-docker-py + +test-docker-py: build ## run the docker-py tests + $(DOCKER_RUN_DOCKER) hack/make.sh dynbinary test-docker-py + +test-integration-cli: build ## run the integration tests + $(DOCKER_RUN_DOCKER) hack/make.sh build-integration-test-binary dynbinary test-integration-cli + +test-unit: build ## run the unit tests + $(DOCKER_RUN_DOCKER) hack/make.sh test-unit + +tgz: build ## build the archives (.zip on windows and .tgz\notherwise) containing the binaries + $(DOCKER_RUN_DOCKER) hack/make.sh dynbinary binary cross tgz + +validate: build ## validate DCO, Seccomp profile generation, gofmt,\n./pkg/ isolation, golint, tests, tomls, go vet and vendor + $(DOCKER_RUN_DOCKER) hack/validate/all + +win: build ## cross build the binary for windows + $(DOCKER_RUN_DOCKER) hack/make.sh win + +.PHONY: swagger-gen +swagger-gen: + docker run --rm -v $(PWD):/go/src/github.com/docker/docker \ + -w /go/src/github.com/docker/docker \ + --entrypoint hack/generate-swagger-api.sh \ + -e GOPATH=/go \ + quay.io/goswagger/swagger:0.7.4 + +.PHONY: swagger-docs +swagger-docs: ## preview the API documentation + @echo "API docs preview will be running at http://localhost:$(SWAGGER_DOCS_PORT)" + @docker run --rm -v $(PWD)/api/swagger.yaml:/usr/share/nginx/html/swagger.yaml \ + -e 'REDOC_OPTIONS=hide-hostname="true" lazy-rendering' \ + -p $(SWAGGER_DOCS_PORT):80 \ + bfirsh/redoc:1.6.2 + +build-integration-cli-on-swarm: build ## build images and binary for running integration-cli on Swarm in parallel + @echo "Building hack/integration-cli-on-swarm (if build fails, please refer to hack/integration-cli-on-swarm/README.md)" + go build -o ./hack/integration-cli-on-swarm/integration-cli-on-swarm ./hack/integration-cli-on-swarm/host + @echo "Building $(INTEGRATION_CLI_MASTER_IMAGE)" + docker build -t $(INTEGRATION_CLI_MASTER_IMAGE) hack/integration-cli-on-swarm/agent +# For worker, we don't use `docker build` so as to enable DOCKER_INCREMENTAL_BINARY and so on + @echo "Building $(INTEGRATION_CLI_WORKER_IMAGE) from $(DOCKER_IMAGE)" + $(eval tmp := integration-cli-worker-tmp) +# We mount pkgcache, but not bundle (bundle needs to be baked into the image) +# For avoiding bakings DOCKER_GRAPHDRIVER and so on to image, we cannot use $(DOCKER_ENVS) here + docker run -t -d --name $(tmp) -e DOCKER_GITCOMMIT -e BUILDFLAGS -e DOCKER_INCREMENTAL_BINARY --privileged $(DOCKER_MOUNT_PKGCACHE) $(DOCKER_IMAGE) top + docker exec $(tmp) hack/make.sh build-integration-test-binary dynbinary + docker exec $(tmp) go build -o /worker github.com/docker/docker/hack/integration-cli-on-swarm/agent/worker + docker commit -c 'ENTRYPOINT ["/worker"]' $(tmp) $(INTEGRATION_CLI_WORKER_IMAGE) + docker rm -f $(tmp) diff --git a/vendor/github.com/moby/moby/NOTICE b/vendor/github.com/moby/moby/NOTICE new file mode 100644 index 000000000..0c74e15b0 --- /dev/null +++ b/vendor/github.com/moby/moby/NOTICE @@ -0,0 +1,19 @@ +Docker +Copyright 2012-2017 Docker, Inc. + +This product includes software developed at Docker, Inc. (https://www.docker.com). + +This product contains software (https://github.com/kr/pty) developed +by Keith Rarick, licensed under the MIT License. + +The following is courtesy of our legal counsel: + + +Use and transfer of Docker may be subject to certain restrictions by the +United States and other governments. +It is your responsibility to ensure that your use and/or transfer does not +violate applicable laws. + +For more information, please see https://www.bis.doc.gov + +See also https://www.apache.org/dev/crypto.html and/or seek legal counsel. diff --git a/vendor/github.com/moby/moby/README.md b/vendor/github.com/moby/moby/README.md new file mode 100644 index 000000000..533d7717d --- /dev/null +++ b/vendor/github.com/moby/moby/README.md @@ -0,0 +1,90 @@ +### Docker users, see [Moby and Docker](https://mobyproject.org/#moby-and-docker) to clarify the relationship between the projects + +### Docker maintainers and contributors, see [Transitioning to Moby](#transitioning-to-moby) for more details + +The Moby Project +================ + +![Moby Project logo](docs/static_files/moby-project-logo.png "The Moby Project") + +Moby is an open-source project created by Docker to advance the software containerization movement. +It provides a “Lego set” of dozens of components, the framework for assembling them into custom container-based systems, and a place for all container enthusiasts to experiment and exchange ideas. + +# Moby + +## Overview + +At the core of Moby is a framework to assemble specialized container systems. +It provides: + +- A library of containerized components for all vital aspects of a container system: OS, container runtime, orchestration, infrastructure management, networking, storage, security, build, image distribution, etc. +- Tools to assemble the components into runnable artifacts for a variety of platforms and architectures: bare metal (both x86 and Arm); executables for Linux, Mac and Windows; VM images for popular cloud and virtualization providers. +- A set of reference assemblies which can be used as-is, modified, or used as inspiration to create your own. + +All Moby components are containers, so creating new components is as easy as building a new OCI-compatible container. + +## Principles + +Moby is an open project guided by strong principles, but modular, flexible and without too strong an opinion on user experience, so it is open to the community to help set its direction. +The guiding principles are: + +- Batteries included but swappable: Moby includes enough components to build fully featured container system, but its modular architecture ensures that most of the components can be swapped by different implementations. +- Usable security: Moby will provide secure defaults without compromising usability. +- Container centric: Moby is built with containers, for running containers. + +With Moby, you should be able to describe all the components of your distributed application, from the high-level configuration files down to the kernel you would like to use and build and deploy it easily. + +Moby uses [containerd](https://github.com/containerd/containerd) as the default container runtime. + +## Audience + +Moby is recommended for anyone who wants to assemble a container-based system. This includes: + +- Hackers who want to customize or patch their Docker build +- System engineers or integrators building a container system +- Infrastructure providers looking to adapt existing container systems to their environment +- Container enthusiasts who want to experiment with the latest container tech +- Open-source developers looking to test their project in a variety of different systems +- Anyone curious about Docker internals and how it’s built + +Moby is NOT recommended for: + +- Application developers looking for an easy way to run their applications in containers. We recommend Docker CE instead. +- Enterprise IT and development teams looking for a ready-to-use, commercially supported container platform. We recommend Docker EE instead. +- Anyone curious about containers and looking for an easy way to learn. We recommend the [docker.com](https://www.docker.com/) website instead. + +# Transitioning to Moby + +Docker is transitioning all of its open source collaborations to the Moby project going forward. +During the transition, all open source activity should continue as usual. + +We are proposing the following list of changes: + +- splitting up the engine into more open components +- removing the docker UI, SDK etc to keep them in the Docker org +- clarifying that the project is not limited to the engine, but to the assembly of all the individual components of the Docker platform +- open-source new tools & components which we currently use to assemble the Docker product, but could benefit the community +- defining an open, community-centric governance inspired by the Fedora project (a very successful example of balancing the needs of the community with the constraints of the primary corporate sponsor) + +----- + +Legal +===== + +*Brought to you courtesy of our legal counsel. For more context, +please see the [NOTICE](https://github.com/moby/moby/blob/master/NOTICE) document in this repo.* + +Use and transfer of Moby may be subject to certain restrictions by the +United States and other governments. + +It is your responsibility to ensure that your use and/or transfer does not +violate applicable laws. + +For more information, please see https://www.bis.doc.gov + + +Licensing +========= +Moby is licensed under the Apache License, Version 2.0. See +[LICENSE](https://github.com/moby/moby/blob/master/LICENSE) for the full +license text. diff --git a/vendor/github.com/moby/moby/ROADMAP.md b/vendor/github.com/moby/moby/ROADMAP.md new file mode 100644 index 000000000..05a8695ad --- /dev/null +++ b/vendor/github.com/moby/moby/ROADMAP.md @@ -0,0 +1,118 @@ +Docker Engine Roadmap +===================== + +### How should I use this document? + +This document provides description of items that the project decided to prioritize. This should +serve as a reference point for Docker contributors to understand where the project is going, and +help determine if a contribution could be conflicting with some longer terms plans. + +The fact that a feature isn't listed here doesn't mean that a patch for it will automatically be +refused (except for those mentioned as "frozen features" below)! We are always happy to receive +patches for new cool features we haven't thought about, or didn't judge priority. Please however +understand that such patches might take longer for us to review. + +### How can I help? + +Short term objectives are listed in the [wiki](https://github.com/docker/docker/wiki) and described +in [Issues](https://github.com/docker/docker/issues?q=is%3Aopen+is%3Aissue+label%3Aroadmap). Our +goal is to split down the workload in such way that anybody can jump in and help. Please comment on +issues if you want to take it to avoid duplicating effort! Similarly, if a maintainer is already +assigned on an issue you'd like to participate in, pinging him on IRC or GitHub to offer your help is +the best way to go. + +### How can I add something to the roadmap? + +The roadmap process is new to the Docker Engine: we are only beginning to structure and document the +project objectives. Our immediate goal is to be more transparent, and work with our community to +focus our efforts on fewer prioritized topics. + +We hope to offer in the near future a process allowing anyone to propose a topic to the roadmap, but +we are not quite there yet. For the time being, the BDFL remains the keeper of the roadmap, and we +won't be accepting pull requests adding or removing items from this file. + +# 1. Features and refactoring + +## 1.1 Runtime improvements + +We recently introduced [`runC`](https://runc.io) as a standalone low-level tool for container +execution. The initial goal was to integrate runC as a replacement in the Engine for the traditional +default libcontainer `execdriver`, but the Engine internals were not ready for this. + +As runC continued evolving, and the OCI specification along with it, we created +[`containerd`](https://containerd.tools/), a daemon to control and monitor multiple `runC`. This is +the new target for Engine integration, as it can entirely replace the whole `execdriver` +architecture, and container monitoring along with it. + +Docker Engine will rely on a long-running `containerd` companion daemon for all container execution +related operations. This could open the door in the future for Engine restarts without interrupting +running containers. + +## 1.2 Plugins improvements + +Docker Engine 1.7.0 introduced plugin support, initially for the use cases of volumes and networks +extensions. The plugin infrastructure was kept minimal as we were collecting use cases and real +world feedback before optimizing for any particular workflow. + +In the future, we'd like plugins to become first class citizens, and encourage an ecosystem of +plugins. This implies in particular making it trivially easy to distribute plugins as containers +through any Registry instance, as well as solving the commonly heard pain points of plugins needing +to be treated as somewhat special (being active at all time, started before any other user +containers, and not as easily dismissed). + +## 1.3 Internal decoupling + +A lot of work has been done in trying to decouple the Docker Engine's internals. In particular, the +API implementation has been refactored, and the Builder side of the daemon is now +[fully independent](https://github.com/docker/docker/tree/master/builder) while still residing in +the same repository. + +We are exploring ways to go further with that decoupling, capitalizing on the work introduced by the +runtime renovation and plugins improvement efforts. Indeed, the combination of `containerd` support +with the concept of "special" containers opens the door for bootstrapping more Engine internals +using the same facilities. + +## 1.4 Cluster capable Engine + +The community has been pushing for a more cluster capable Docker Engine, and a huge effort was spent +adding features such as multihost networking, and node discovery down at the Engine level. Yet, the +Engine is currently incapable of taking scheduling decisions alone, and continues relying on Swarm +for that. + +We plan to complete this effort and make Engine fully cluster capable. Multiple instances of the +Docker Engine being already capable of discovering each other and establish overlay networking for +their container to communicate, the next step is for a given Engine to gain ability to dispatch work +to another node in the cluster. This will be introduced in a backward compatible way, such that a +`docker run` invocation on a particular node remains fully deterministic. + +# 2. Frozen features + +## 2.1 Docker exec + +We won't accept patches expanding the surface of `docker exec`, which we intend to keep as a +*debugging* feature, as well as being strongly dependent on the Runtime ingredient effort. + +## 2.2 Remote Registry Operations + +A large amount of work is ongoing in the area of image distribution and provenance. This includes +moving to the V2 Registry API and heavily refactoring the code that powers these features. The +desired result is more secure, reliable and easier to use image distribution. + +Part of the problem with this part of the code base is the lack of a stable and flexible interface. +If new features are added that access the registry without solidifying these interfaces, achieving +feature parity will continue to be elusive. While we get a handle on this situation, we are imposing +a moratorium on new code that accesses the Registry API in commands that don't already make remote +calls. + +Currently, only the following commands cause interaction with a remote registry: + + - push + - pull + - run + - build + - search + - login + +In the interest of stabilizing the registry access model during this ongoing work, we are not +accepting additions to other commands that will cause remote interaction with the Registry API. This +moratorium will lift when the goals of the distribution project have been met. diff --git a/vendor/github.com/moby/moby/VENDORING.md b/vendor/github.com/moby/moby/VENDORING.md new file mode 100644 index 000000000..8884f885a --- /dev/null +++ b/vendor/github.com/moby/moby/VENDORING.md @@ -0,0 +1,46 @@ +# Vendoring policies + +This document outlines recommended Vendoring policies for Docker repositories. +(Example, libnetwork is a Docker repo and logrus is not.) + +## Vendoring using tags + +Commit ID based vendoring provides little/no information about the updates +vendored. To fix this, vendors will now require that repositories use annotated +tags along with commit ids to snapshot commits. Annotated tags by themselves +are not sufficient, since the same tag can be force updated to reference +different commits. + +Each tag should: +- Follow Semantic Versioning rules (refer to section on "Semantic Versioning") +- Have a corresponding entry in the change tracking document. + +Each repo should: +- Have a change tracking document between tags/releases. Ex: CHANGELOG.md, +github releases file. + +The goal here is for consuming repos to be able to use the tag version and +changelog updates to determine whether the vendoring will cause any breaking or +backward incompatible changes. This also means that repos can specify having +dependency on a package of a specific version or greater up to the next major +release, without encountering breaking changes. + +## Semantic Versioning +Annotated version tags should follow [Semantic Versioning](http://semver.org) policies: + +"Given a version number MAJOR.MINOR.PATCH, increment the: + + 1. MAJOR version when you make incompatible API changes, + 2. MINOR version when you add functionality in a backwards-compatible manner, and + 3. PATCH version when you make backwards-compatible bug fixes. + +Additional labels for pre-release and build metadata are available as extensions +to the MAJOR.MINOR.PATCH format." + +## Vendoring cadence +In order to avoid huge vendoring changes, it is recommended to have a regular +cadence for vendoring updates. e.g. monthly. + +## Pre-merge vendoring tests +All related repos will be vendored into docker/docker. +CI on docker/docker should catch any breaking changes involving multiple repos. diff --git a/vendor/github.com/moby/moby/VERSION b/vendor/github.com/moby/moby/VERSION new file mode 100644 index 000000000..2d736aaa1 --- /dev/null +++ b/vendor/github.com/moby/moby/VERSION @@ -0,0 +1 @@ +17.06.0-dev diff --git a/vendor/github.com/moby/moby/api/README.md b/vendor/github.com/moby/moby/api/README.md new file mode 100644 index 000000000..bb8813252 --- /dev/null +++ b/vendor/github.com/moby/moby/api/README.md @@ -0,0 +1,42 @@ +# Working on the Engine API + +The Engine API is an HTTP API used by the command-line client to communicate with the daemon. It can also be used by third-party software to control the daemon. + +It consists of various components in this repository: + +- `api/swagger.yaml` A Swagger definition of the API. +- `api/types/` Types shared by both the client and server, representing various objects, options, responses, etc. Most are written manually, but some are automatically generated from the Swagger definition. See [#27919](https://github.com/docker/docker/issues/27919) for progress on this. +- `cli/` The command-line client. +- `client/` The Go client used by the command-line client. It can also be used by third-party Go programs. +- `daemon/` The daemon, which serves the API. + +## Swagger definition + +The API is defined by the [Swagger](http://swagger.io/specification/) definition in `api/swagger.yaml`. This definition can be used to: + +1. Automatically generate documentation. +2. Automatically generate the Go server and client. (A work-in-progress.) +3. Provide a machine readable version of the API for introspecting what it can do, automatically generating clients for other languages, etc. + +## Updating the API documentation + +The API documentation is generated entirely from `api/swagger.yaml`. If you make updates to the API, you'll need to edit this file to represent the change in the documentation. + +The file is split into two main sections: + +- `definitions`, which defines re-usable objects used in requests and responses +- `paths`, which defines the API endpoints (and some inline objects which don't need to be reusable) + +To make an edit, first look for the endpoint you want to edit under `paths`, then make the required edits. Endpoints may reference reusable objects with `$ref`, which can be found in the `definitions` section. + +There is hopefully enough example material in the file for you to copy a similar pattern from elsewhere in the file (e.g. adding new fields or endpoints), but for the full reference, see the [Swagger specification](https://github.com/docker/docker/issues/27919) + +`swagger.yaml` is validated by `hack/validate/swagger` to ensure it is a valid Swagger definition. This is useful for when you are making edits to ensure you are doing the right thing. + +## Viewing the API documentation + +When you make edits to `swagger.yaml`, you may want to check the generated API documentation to ensure it renders correctly. + +Run `make swagger-docs` and a preview will be running at `http://localhost`. Some of the styling may be incorrect, but you'll be able to ensure that it is generating the correct documentation. + +The production documentation is generated by vendoring `swagger.yaml` into [docker/docker.github.io](https://github.com/docker/docker.github.io). diff --git a/vendor/github.com/moby/moby/api/common.go b/vendor/github.com/moby/moby/api/common.go new file mode 100644 index 000000000..859daf602 --- /dev/null +++ b/vendor/github.com/moby/moby/api/common.go @@ -0,0 +1,65 @@ +package api + +import ( + "encoding/json" + "encoding/pem" + "fmt" + "os" + "path/filepath" + + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/system" + "github.com/docker/libtrust" +) + +// Common constants for daemon and client. +const ( + // DefaultVersion of Current REST API + DefaultVersion string = "1.31" + + // NoBaseImageSpecifier is the symbol used by the FROM + // command to specify that no base image is to be used. + NoBaseImageSpecifier string = "scratch" +) + +// LoadOrCreateTrustKey attempts to load the libtrust key at the given path, +// otherwise generates a new one +func LoadOrCreateTrustKey(trustKeyPath string) (libtrust.PrivateKey, error) { + err := system.MkdirAll(filepath.Dir(trustKeyPath), 0700, "") + if err != nil { + return nil, err + } + trustKey, err := libtrust.LoadKeyFile(trustKeyPath) + if err == libtrust.ErrKeyFileDoesNotExist { + trustKey, err = libtrust.GenerateECP256PrivateKey() + if err != nil { + return nil, fmt.Errorf("Error generating key: %s", err) + } + encodedKey, err := serializePrivateKey(trustKey, filepath.Ext(trustKeyPath)) + if err != nil { + return nil, fmt.Errorf("Error serializing key: %s", err) + } + if err := ioutils.AtomicWriteFile(trustKeyPath, encodedKey, os.FileMode(0600)); err != nil { + return nil, fmt.Errorf("Error saving key file: %s", err) + } + } else if err != nil { + return nil, fmt.Errorf("Error loading key file %s: %s", trustKeyPath, err) + } + return trustKey, nil +} + +func serializePrivateKey(key libtrust.PrivateKey, ext string) (encoded []byte, err error) { + if ext == ".json" || ext == ".jwk" { + encoded, err = json.Marshal(key) + if err != nil { + return nil, fmt.Errorf("unable to encode private key JWK: %s", err) + } + } else { + pemBlock, err := key.PEMBlock() + if err != nil { + return nil, fmt.Errorf("unable to encode private key PEM: %s", err) + } + encoded = pem.EncodeToMemory(pemBlock) + } + return +} diff --git a/vendor/github.com/moby/moby/api/common_test.go b/vendor/github.com/moby/moby/api/common_test.go new file mode 100644 index 000000000..f466616b0 --- /dev/null +++ b/vendor/github.com/moby/moby/api/common_test.go @@ -0,0 +1,77 @@ +package api + +import ( + "io/ioutil" + "path/filepath" + "testing" + + "os" +) + +// LoadOrCreateTrustKey +func TestLoadOrCreateTrustKeyInvalidKeyFile(t *testing.T) { + tmpKeyFolderPath, err := ioutil.TempDir("", "api-trustkey-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpKeyFolderPath) + + tmpKeyFile, err := ioutil.TempFile(tmpKeyFolderPath, "keyfile") + if err != nil { + t.Fatal(err) + } + + if _, err := LoadOrCreateTrustKey(tmpKeyFile.Name()); err == nil { + t.Fatal("expected an error, got nothing.") + } + +} + +func TestLoadOrCreateTrustKeyCreateKey(t *testing.T) { + tmpKeyFolderPath, err := ioutil.TempDir("", "api-trustkey-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpKeyFolderPath) + + // Without the need to create the folder hierarchy + tmpKeyFile := filepath.Join(tmpKeyFolderPath, "keyfile") + + if key, err := LoadOrCreateTrustKey(tmpKeyFile); err != nil || key == nil { + t.Fatalf("expected a new key file, got : %v and %v", err, key) + } + + if _, err := os.Stat(tmpKeyFile); err != nil { + t.Fatalf("Expected to find a file %s, got %v", tmpKeyFile, err) + } + + // With the need to create the folder hierarchy as tmpKeyFie is in a path + // where some folders do not exist. + tmpKeyFile = filepath.Join(tmpKeyFolderPath, "folder/hierarchy/keyfile") + + if key, err := LoadOrCreateTrustKey(tmpKeyFile); err != nil || key == nil { + t.Fatalf("expected a new key file, got : %v and %v", err, key) + } + + if _, err := os.Stat(tmpKeyFile); err != nil { + t.Fatalf("Expected to find a file %s, got %v", tmpKeyFile, err) + } + + // With no path at all + defer os.Remove("keyfile") + if key, err := LoadOrCreateTrustKey("keyfile"); err != nil || key == nil { + t.Fatalf("expected a new key file, got : %v and %v", err, key) + } + + if _, err := os.Stat("keyfile"); err != nil { + t.Fatalf("Expected to find a file keyfile, got %v", err) + } +} + +func TestLoadOrCreateTrustKeyLoadValidKey(t *testing.T) { + tmpKeyFile := filepath.Join("fixtures", "keyfile") + + if key, err := LoadOrCreateTrustKey(tmpKeyFile); err != nil || key == nil { + t.Fatalf("expected a key file, got : %v and %v", err, key) + } +} diff --git a/vendor/github.com/moby/moby/api/common_unix.go b/vendor/github.com/moby/moby/api/common_unix.go new file mode 100644 index 000000000..081e61c45 --- /dev/null +++ b/vendor/github.com/moby/moby/api/common_unix.go @@ -0,0 +1,6 @@ +// +build !windows + +package api + +// MinVersion represents Minimum REST API version supported +const MinVersion string = "1.12" diff --git a/vendor/github.com/moby/moby/api/common_windows.go b/vendor/github.com/moby/moby/api/common_windows.go new file mode 100644 index 000000000..a6268a4ff --- /dev/null +++ b/vendor/github.com/moby/moby/api/common_windows.go @@ -0,0 +1,8 @@ +package api + +// MinVersion represents Minimum REST API version supported +// Technically the first daemon API version released on Windows is v1.25 in +// engine version 1.13. However, some clients are explicitly using downlevel +// APIs (e.g. docker-compose v2.1 file format) and that is just too restrictive. +// Hence also allowing 1.24 on Windows. +const MinVersion string = "1.24" diff --git a/vendor/github.com/moby/moby/api/errors/errors.go b/vendor/github.com/moby/moby/api/errors/errors.go new file mode 100644 index 000000000..39d52e1a0 --- /dev/null +++ b/vendor/github.com/moby/moby/api/errors/errors.go @@ -0,0 +1,47 @@ +package errors + +import "net/http" + +// apiError is an error wrapper that also +// holds information about response status codes. +type apiError struct { + error + statusCode int +} + +// HTTPErrorStatusCode returns a status code. +func (e apiError) HTTPErrorStatusCode() int { + return e.statusCode +} + +// NewErrorWithStatusCode allows you to associate +// a specific HTTP Status Code to an error. +// The server will take that code and set +// it as the response status. +func NewErrorWithStatusCode(err error, code int) error { + return apiError{err, code} +} + +// NewBadRequestError creates a new API error +// that has the 400 HTTP status code associated to it. +func NewBadRequestError(err error) error { + return NewErrorWithStatusCode(err, http.StatusBadRequest) +} + +// NewRequestForbiddenError creates a new API error +// that has the 403 HTTP status code associated to it. +func NewRequestForbiddenError(err error) error { + return NewErrorWithStatusCode(err, http.StatusForbidden) +} + +// NewRequestNotFoundError creates a new API error +// that has the 404 HTTP status code associated to it. +func NewRequestNotFoundError(err error) error { + return NewErrorWithStatusCode(err, http.StatusNotFound) +} + +// NewRequestConflictError creates a new API error +// that has the 409 HTTP status code associated to it. +func NewRequestConflictError(err error) error { + return NewErrorWithStatusCode(err, http.StatusConflict) +} diff --git a/vendor/github.com/moby/moby/api/errors/errors_test.go b/vendor/github.com/moby/moby/api/errors/errors_test.go new file mode 100644 index 000000000..1d6a596ac --- /dev/null +++ b/vendor/github.com/moby/moby/api/errors/errors_test.go @@ -0,0 +1,64 @@ +package errors + +import ( + "fmt" + "github.com/stretchr/testify/assert" + "net/http" + "testing" +) + +func newError(errorname string) error { + + return fmt.Errorf("test%v", errorname) +} + +func TestErrors(t *testing.T) { + errmsg := newError("apiError") + err := apiError{ + error: errmsg, + statusCode: 0, + } + assert.Equal(t, err.HTTPErrorStatusCode(), err.statusCode) + + errmsg = newError("ErrorWithStatusCode") + errcode := 1 + serr := NewErrorWithStatusCode(errmsg, errcode) + apierr, ok := serr.(apiError) + if !ok { + t.Fatal("excepted err is apiError type") + } + assert.Equal(t, errcode, apierr.statusCode) + + errmsg = newError("NewBadRequestError") + baderr := NewBadRequestError(errmsg) + apierr, ok = baderr.(apiError) + if !ok { + t.Fatal("excepted err is apiError type") + } + assert.Equal(t, http.StatusBadRequest, apierr.statusCode) + + errmsg = newError("RequestForbiddenError") + ferr := NewRequestForbiddenError(errmsg) + apierr, ok = ferr.(apiError) + if !ok { + t.Fatal("excepted err is apiError type") + } + assert.Equal(t, http.StatusForbidden, apierr.statusCode) + + errmsg = newError("RequestNotFoundError") + nerr := NewRequestNotFoundError(errmsg) + apierr, ok = nerr.(apiError) + if !ok { + t.Fatal("excepted err is apiError type") + } + assert.Equal(t, http.StatusNotFound, apierr.statusCode) + + errmsg = newError("RequestConflictError") + cerr := NewRequestConflictError(errmsg) + apierr, ok = cerr.(apiError) + if !ok { + t.Fatal("excepted err is apiError type") + } + assert.Equal(t, http.StatusConflict, apierr.statusCode) + +} diff --git a/vendor/github.com/moby/moby/api/fixtures/keyfile b/vendor/github.com/moby/moby/api/fixtures/keyfile new file mode 100644 index 000000000..322f25440 --- /dev/null +++ b/vendor/github.com/moby/moby/api/fixtures/keyfile @@ -0,0 +1,7 @@ +-----BEGIN EC PRIVATE KEY----- +keyID: AWX2:I27X:WQFX:IOMK:CNAK:O7PW:VYNB:ZLKC:CVAE:YJP2:SI4A:XXAY + +MHcCAQEEILHTRWdcpKWsnORxSFyBnndJ4ROU41hMtr/GCiLVvwBQoAoGCCqGSM49 +AwEHoUQDQgAElpVFbQ2V2UQKajqdE3fVxJ+/pE/YuEFOxWbOxF2be19BY209/iky +NzeFFK7SLpQ4CBJ7zDVXOHsMzrkY/GquGA== +-----END EC PRIVATE KEY----- diff --git a/vendor/github.com/moby/moby/api/names.go b/vendor/github.com/moby/moby/api/names.go new file mode 100644 index 000000000..f147d1f4c --- /dev/null +++ b/vendor/github.com/moby/moby/api/names.go @@ -0,0 +1,9 @@ +package api + +import "regexp" + +// RestrictedNameChars collects the characters allowed to represent a name, normally used to validate container and volume names. +const RestrictedNameChars = `[a-zA-Z0-9][a-zA-Z0-9_.-]` + +// RestrictedNamePattern is a regular expression to validate names against the collection of restricted characters. +var RestrictedNamePattern = regexp.MustCompile(`^` + RestrictedNameChars + `+$`) diff --git a/vendor/github.com/moby/moby/api/server/backend/build/backend.go b/vendor/github.com/moby/moby/api/server/backend/build/backend.go new file mode 100644 index 000000000..f93fba93b --- /dev/null +++ b/vendor/github.com/moby/moby/api/server/backend/build/backend.go @@ -0,0 +1,90 @@ +package build + +import ( + "fmt" + + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/builder" + "github.com/docker/docker/builder/fscache" + "github.com/docker/docker/image" + "github.com/docker/docker/pkg/stringid" + "github.com/pkg/errors" + "golang.org/x/net/context" +) + +// ImageComponent provides an interface for working with images +type ImageComponent interface { + SquashImage(from string, to string) (string, error) + TagImageWithReference(image.ID, string, reference.Named) error +} + +// Builder defines interface for running a build +type Builder interface { + Build(context.Context, backend.BuildConfig) (*builder.Result, error) +} + +// Backend provides build functionality to the API router +type Backend struct { + builder Builder + fsCache *fscache.FSCache + imageComponent ImageComponent +} + +// NewBackend creates a new build backend from components +func NewBackend(components ImageComponent, builder Builder, fsCache *fscache.FSCache) (*Backend, error) { + return &Backend{imageComponent: components, builder: builder, fsCache: fsCache}, nil +} + +// Build builds an image from a Source +func (b *Backend) Build(ctx context.Context, config backend.BuildConfig) (string, error) { + options := config.Options + tagger, err := NewTagger(b.imageComponent, config.ProgressWriter.StdoutFormatter, options.Tags) + if err != nil { + return "", err + } + + build, err := b.builder.Build(ctx, config) + if err != nil { + return "", err + } + + var imageID = build.ImageID + if options.Squash { + if imageID, err = squashBuild(build, b.imageComponent); err != nil { + return "", err + } + if config.ProgressWriter.AuxFormatter != nil { + if err = config.ProgressWriter.AuxFormatter.Emit(types.BuildResult{ID: imageID}); err != nil { + return "", err + } + } + } + + stdout := config.ProgressWriter.StdoutFormatter + fmt.Fprintf(stdout, "Successfully built %s\n", stringid.TruncateID(imageID)) + err = tagger.TagImages(image.ID(imageID)) + return imageID, err +} + +// PruneCache removes all cached build sources +func (b *Backend) PruneCache(ctx context.Context) (*types.BuildCachePruneReport, error) { + size, err := b.fsCache.Prune(ctx) + if err != nil { + return nil, errors.Wrap(err, "failed to prune build cache") + } + return &types.BuildCachePruneReport{SpaceReclaimed: size}, nil +} + +func squashBuild(build *builder.Result, imageComponent ImageComponent) (string, error) { + var fromID string + if build.FromImage != nil { + fromID = build.FromImage.ImageID() + } + imageID, err := imageComponent.SquashImage(build.ImageID, fromID) + if err != nil { + return "", errors.Wrap(err, "error squashing image") + } + return imageID, nil +} diff --git a/vendor/github.com/moby/moby/api/server/backend/build/tag.go b/vendor/github.com/moby/moby/api/server/backend/build/tag.go new file mode 100644 index 000000000..7bd5dcdeb --- /dev/null +++ b/vendor/github.com/moby/moby/api/server/backend/build/tag.go @@ -0,0 +1,84 @@ +package build + +import ( + "fmt" + "io" + "runtime" + + "github.com/docker/distribution/reference" + "github.com/docker/docker/image" + "github.com/docker/docker/pkg/system" + "github.com/pkg/errors" +) + +// Tagger is responsible for tagging an image created by a builder +type Tagger struct { + imageComponent ImageComponent + stdout io.Writer + repoAndTags []reference.Named +} + +// NewTagger returns a new Tagger for tagging the images of a build. +// If any of the names are invalid tags an error is returned. +func NewTagger(backend ImageComponent, stdout io.Writer, names []string) (*Tagger, error) { + reposAndTags, err := sanitizeRepoAndTags(names) + if err != nil { + return nil, err + } + return &Tagger{ + imageComponent: backend, + stdout: stdout, + repoAndTags: reposAndTags, + }, nil +} + +// TagImages creates image tags for the imageID +func (bt *Tagger) TagImages(imageID image.ID) error { + for _, rt := range bt.repoAndTags { + // TODO @jhowardmsft LCOW support. Will need revisiting. + platform := runtime.GOOS + if system.LCOWSupported() { + platform = "linux" + } + if err := bt.imageComponent.TagImageWithReference(imageID, platform, rt); err != nil { + return err + } + fmt.Fprintf(bt.stdout, "Successfully tagged %s\n", reference.FamiliarString(rt)) + } + return nil +} + +// sanitizeRepoAndTags parses the raw "t" parameter received from the client +// to a slice of repoAndTag. +// It also validates each repoName and tag. +func sanitizeRepoAndTags(names []string) ([]reference.Named, error) { + var ( + repoAndTags []reference.Named + // This map is used for deduplicating the "-t" parameter. + uniqNames = make(map[string]struct{}) + ) + for _, repo := range names { + if repo == "" { + continue + } + + ref, err := reference.ParseNormalizedNamed(repo) + if err != nil { + return nil, err + } + + if _, isCanonical := ref.(reference.Canonical); isCanonical { + return nil, errors.New("build tag cannot contain a digest") + } + + ref = reference.TagNameOnly(ref) + + nameWithTag := ref.String() + + if _, exists := uniqNames[nameWithTag]; !exists { + uniqNames[nameWithTag] = struct{}{} + repoAndTags = append(repoAndTags, ref) + } + } + return repoAndTags, nil +} diff --git a/vendor/github.com/moby/moby/api/server/httputils/decoder.go b/vendor/github.com/moby/moby/api/server/httputils/decoder.go new file mode 100644 index 000000000..458eac560 --- /dev/null +++ b/vendor/github.com/moby/moby/api/server/httputils/decoder.go @@ -0,0 +1,16 @@ +package httputils + +import ( + "io" + + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/network" +) + +// ContainerDecoder specifies how +// to translate an io.Reader into +// container configuration. +type ContainerDecoder interface { + DecodeConfig(src io.Reader) (*container.Config, *container.HostConfig, *network.NetworkingConfig, error) + DecodeHostConfig(src io.Reader) (*container.HostConfig, error) +} diff --git a/vendor/github.com/moby/moby/api/server/httputils/errors.go b/vendor/github.com/moby/moby/api/server/httputils/errors.go new file mode 100644 index 000000000..82da21c2a --- /dev/null +++ b/vendor/github.com/moby/moby/api/server/httputils/errors.go @@ -0,0 +1,145 @@ +package httputils + +import ( + "net/http" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/versions" + "github.com/gorilla/mux" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" +) + +// httpStatusError is an interface +// that errors with custom status codes +// implement to tell the api layer +// which response status to set. +type httpStatusError interface { + HTTPErrorStatusCode() int +} + +// inputValidationError is an interface +// that errors generated by invalid +// inputs can implement to tell the +// api layer to set a 400 status code +// in the response. +type inputValidationError interface { + IsValidationError() bool +} + +// GetHTTPErrorStatusCode retrieves status code from error message. +func GetHTTPErrorStatusCode(err error) int { + if err == nil { + logrus.WithFields(logrus.Fields{"error": err}).Error("unexpected HTTP error handling") + return http.StatusInternalServerError + } + + var statusCode int + errMsg := err.Error() + + switch e := err.(type) { + case httpStatusError: + statusCode = e.HTTPErrorStatusCode() + case inputValidationError: + statusCode = http.StatusBadRequest + default: + statusCode = statusCodeFromGRPCError(err) + if statusCode != http.StatusInternalServerError { + return statusCode + } + + // FIXME: this is brittle and should not be necessary, but we still need to identify if + // there are errors falling back into this logic. + // If we need to differentiate between different possible error types, + // we should create appropriate error types that implement the httpStatusError interface. + errStr := strings.ToLower(errMsg) + + for _, status := range []struct { + keyword string + code int + }{ + {"not found", http.StatusNotFound}, + {"cannot find", http.StatusNotFound}, + {"no such", http.StatusNotFound}, + {"bad parameter", http.StatusBadRequest}, + {"no command", http.StatusBadRequest}, + {"conflict", http.StatusConflict}, + {"impossible", http.StatusNotAcceptable}, + {"wrong login/password", http.StatusUnauthorized}, + {"unauthorized", http.StatusUnauthorized}, + {"hasn't been activated", http.StatusForbidden}, + {"this node", http.StatusServiceUnavailable}, + {"needs to be unlocked", http.StatusServiceUnavailable}, + {"certificates have expired", http.StatusServiceUnavailable}, + {"repository does not exist", http.StatusNotFound}, + } { + if strings.Contains(errStr, status.keyword) { + statusCode = status.code + break + } + } + } + + if statusCode == 0 { + statusCode = http.StatusInternalServerError + } + + return statusCode +} + +func apiVersionSupportsJSONErrors(version string) bool { + const firstAPIVersionWithJSONErrors = "1.23" + return version == "" || versions.GreaterThan(version, firstAPIVersionWithJSONErrors) +} + +// MakeErrorHandler makes an HTTP handler that decodes a Docker error and +// returns it in the response. +func MakeErrorHandler(err error) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + statusCode := GetHTTPErrorStatusCode(err) + vars := mux.Vars(r) + if apiVersionSupportsJSONErrors(vars["version"]) { + response := &types.ErrorResponse{ + Message: err.Error(), + } + WriteJSON(w, statusCode, response) + } else { + http.Error(w, grpc.ErrorDesc(err), statusCode) + } + } +} + +// statusCodeFromGRPCError returns status code according to gRPC error +func statusCodeFromGRPCError(err error) int { + switch grpc.Code(err) { + case codes.InvalidArgument: // code 3 + return http.StatusBadRequest + case codes.NotFound: // code 5 + return http.StatusNotFound + case codes.AlreadyExists: // code 6 + return http.StatusConflict + case codes.PermissionDenied: // code 7 + return http.StatusForbidden + case codes.FailedPrecondition: // code 9 + return http.StatusBadRequest + case codes.Unauthenticated: // code 16 + return http.StatusUnauthorized + case codes.OutOfRange: // code 11 + return http.StatusBadRequest + case codes.Unimplemented: // code 12 + return http.StatusNotImplemented + case codes.Unavailable: // code 14 + return http.StatusServiceUnavailable + default: + // codes.Canceled(1) + // codes.Unknown(2) + // codes.DeadlineExceeded(4) + // codes.ResourceExhausted(8) + // codes.Aborted(10) + // codes.Internal(13) + // codes.DataLoss(15) + return http.StatusInternalServerError + } +} diff --git a/vendor/github.com/moby/moby/api/server/httputils/form.go b/vendor/github.com/moby/moby/api/server/httputils/form.go new file mode 100644 index 000000000..78bd379c7 --- /dev/null +++ b/vendor/github.com/moby/moby/api/server/httputils/form.go @@ -0,0 +1,70 @@ +package httputils + +import ( + "errors" + "net/http" + "path/filepath" + "strconv" + "strings" +) + +// BoolValue transforms a form value in different formats into a boolean type. +func BoolValue(r *http.Request, k string) bool { + s := strings.ToLower(strings.TrimSpace(r.FormValue(k))) + return !(s == "" || s == "0" || s == "no" || s == "false" || s == "none") +} + +// BoolValueOrDefault returns the default bool passed if the query param is +// missing, otherwise it's just a proxy to boolValue above. +func BoolValueOrDefault(r *http.Request, k string, d bool) bool { + if _, ok := r.Form[k]; !ok { + return d + } + return BoolValue(r, k) +} + +// Int64ValueOrZero parses a form value into an int64 type. +// It returns 0 if the parsing fails. +func Int64ValueOrZero(r *http.Request, k string) int64 { + val, err := Int64ValueOrDefault(r, k, 0) + if err != nil { + return 0 + } + return val +} + +// Int64ValueOrDefault parses a form value into an int64 type. If there is an +// error, returns the error. If there is no value returns the default value. +func Int64ValueOrDefault(r *http.Request, field string, def int64) (int64, error) { + if r.Form.Get(field) != "" { + value, err := strconv.ParseInt(r.Form.Get(field), 10, 64) + return value, err + } + return def, nil +} + +// ArchiveOptions stores archive information for different operations. +type ArchiveOptions struct { + Name string + Path string +} + +// ArchiveFormValues parses form values and turns them into ArchiveOptions. +// It fails if the archive name and path are not in the request. +func ArchiveFormValues(r *http.Request, vars map[string]string) (ArchiveOptions, error) { + if err := ParseForm(r); err != nil { + return ArchiveOptions{}, err + } + + name := vars["name"] + path := filepath.FromSlash(r.Form.Get("path")) + + switch { + case name == "": + return ArchiveOptions{}, errors.New("bad parameter: 'name' cannot be empty") + case path == "": + return ArchiveOptions{}, errors.New("bad parameter: 'path' cannot be empty") + } + + return ArchiveOptions{name, path}, nil +} diff --git a/vendor/github.com/moby/moby/api/server/httputils/form_test.go b/vendor/github.com/moby/moby/api/server/httputils/form_test.go new file mode 100644 index 000000000..bc790e99c --- /dev/null +++ b/vendor/github.com/moby/moby/api/server/httputils/form_test.go @@ -0,0 +1,105 @@ +package httputils + +import ( + "net/http" + "net/url" + "testing" +) + +func TestBoolValue(t *testing.T) { + cases := map[string]bool{ + "": false, + "0": false, + "no": false, + "false": false, + "none": false, + "1": true, + "yes": true, + "true": true, + "one": true, + "100": true, + } + + for c, e := range cases { + v := url.Values{} + v.Set("test", c) + r, _ := http.NewRequest("POST", "", nil) + r.Form = v + + a := BoolValue(r, "test") + if a != e { + t.Fatalf("Value: %s, expected: %v, actual: %v", c, e, a) + } + } +} + +func TestBoolValueOrDefault(t *testing.T) { + r, _ := http.NewRequest("GET", "", nil) + if !BoolValueOrDefault(r, "queryparam", true) { + t.Fatal("Expected to get true default value, got false") + } + + v := url.Values{} + v.Set("param", "") + r, _ = http.NewRequest("GET", "", nil) + r.Form = v + if BoolValueOrDefault(r, "param", true) { + t.Fatal("Expected not to get true") + } +} + +func TestInt64ValueOrZero(t *testing.T) { + cases := map[string]int64{ + "": 0, + "asdf": 0, + "0": 0, + "1": 1, + } + + for c, e := range cases { + v := url.Values{} + v.Set("test", c) + r, _ := http.NewRequest("POST", "", nil) + r.Form = v + + a := Int64ValueOrZero(r, "test") + if a != e { + t.Fatalf("Value: %s, expected: %v, actual: %v", c, e, a) + } + } +} + +func TestInt64ValueOrDefault(t *testing.T) { + cases := map[string]int64{ + "": -1, + "-1": -1, + "42": 42, + } + + for c, e := range cases { + v := url.Values{} + v.Set("test", c) + r, _ := http.NewRequest("POST", "", nil) + r.Form = v + + a, err := Int64ValueOrDefault(r, "test", -1) + if a != e { + t.Fatalf("Value: %s, expected: %v, actual: %v", c, e, a) + } + if err != nil { + t.Fatalf("Error should be nil, but received: %s", err) + } + } +} + +func TestInt64ValueOrDefaultWithError(t *testing.T) { + v := url.Values{} + v.Set("test", "invalid") + r, _ := http.NewRequest("POST", "", nil) + r.Form = v + + _, err := Int64ValueOrDefault(r, "test", -1) + if err == nil { + t.Fatal("Expected an error.") + } +} diff --git a/vendor/github.com/moby/moby/api/server/httputils/httputils.go b/vendor/github.com/moby/moby/api/server/httputils/httputils.go new file mode 100644 index 000000000..92cb67c56 --- /dev/null +++ b/vendor/github.com/moby/moby/api/server/httputils/httputils.go @@ -0,0 +1,97 @@ +package httputils + +import ( + "fmt" + "io" + "mime" + "net/http" + "strings" + + "github.com/Sirupsen/logrus" + "golang.org/x/net/context" +) + +// APIVersionKey is the client's requested API version. +const APIVersionKey = "api-version" + +// APIFunc is an adapter to allow the use of ordinary functions as Docker API endpoints. +// Any function that has the appropriate signature can be registered as an API endpoint (e.g. getVersion). +type APIFunc func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error + +// HijackConnection interrupts the http response writer to get the +// underlying connection and operate with it. +func HijackConnection(w http.ResponseWriter) (io.ReadCloser, io.Writer, error) { + conn, _, err := w.(http.Hijacker).Hijack() + if err != nil { + return nil, nil, err + } + // Flush the options to make sure the client sets the raw mode + conn.Write([]byte{}) + return conn, conn, nil +} + +// CloseStreams ensures that a list for http streams are properly closed. +func CloseStreams(streams ...interface{}) { + for _, stream := range streams { + if tcpc, ok := stream.(interface { + CloseWrite() error + }); ok { + tcpc.CloseWrite() + } else if closer, ok := stream.(io.Closer); ok { + closer.Close() + } + } +} + +// CheckForJSON makes sure that the request's Content-Type is application/json. +func CheckForJSON(r *http.Request) error { + ct := r.Header.Get("Content-Type") + + // No Content-Type header is ok as long as there's no Body + if ct == "" { + if r.Body == nil || r.ContentLength == 0 { + return nil + } + } + + // Otherwise it better be json + if matchesContentType(ct, "application/json") { + return nil + } + return fmt.Errorf("Content-Type specified (%s) must be 'application/json'", ct) +} + +// ParseForm ensures the request form is parsed even with invalid content types. +// If we don't do this, POST method without Content-type (even with empty body) will fail. +func ParseForm(r *http.Request) error { + if r == nil { + return nil + } + if err := r.ParseForm(); err != nil && !strings.HasPrefix(err.Error(), "mime:") { + return err + } + return nil +} + +// VersionFromContext returns an API version from the context using APIVersionKey. +// It panics if the context value does not have version.Version type. +func VersionFromContext(ctx context.Context) string { + if ctx == nil { + return "" + } + + if val := ctx.Value(APIVersionKey); val != nil { + return val.(string) + } + + return "" +} + +// matchesContentType validates the content type against the expected one +func matchesContentType(contentType, expectedType string) bool { + mimetype, _, err := mime.ParseMediaType(contentType) + if err != nil { + logrus.Errorf("Error parsing media type: %s error: %v", contentType, err) + } + return err == nil && mimetype == expectedType +} diff --git a/vendor/github.com/moby/moby/api/server/httputils/httputils_test.go b/vendor/github.com/moby/moby/api/server/httputils/httputils_test.go new file mode 100644 index 000000000..d551b9d98 --- /dev/null +++ b/vendor/github.com/moby/moby/api/server/httputils/httputils_test.go @@ -0,0 +1,18 @@ +package httputils + +import "testing" + +// matchesContentType +func TestJsonContentType(t *testing.T) { + if !matchesContentType("application/json", "application/json") { + t.Fail() + } + + if !matchesContentType("application/json; charset=utf-8", "application/json") { + t.Fail() + } + + if matchesContentType("dockerapplication/json", "application/json") { + t.Fail() + } +} diff --git a/vendor/github.com/moby/moby/api/server/httputils/httputils_write_json.go b/vendor/github.com/moby/moby/api/server/httputils/httputils_write_json.go new file mode 100644 index 000000000..562c127e8 --- /dev/null +++ b/vendor/github.com/moby/moby/api/server/httputils/httputils_write_json.go @@ -0,0 +1,15 @@ +package httputils + +import ( + "encoding/json" + "net/http" +) + +// WriteJSON writes the value v to the http response stream as json with standard json encoding. +func WriteJSON(w http.ResponseWriter, code int, v interface{}) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(code) + enc := json.NewEncoder(w) + enc.SetEscapeHTML(false) + return enc.Encode(v) +} diff --git a/vendor/github.com/moby/moby/api/server/httputils/write_log_stream.go b/vendor/github.com/moby/moby/api/server/httputils/write_log_stream.go new file mode 100644 index 000000000..fd024e196 --- /dev/null +++ b/vendor/github.com/moby/moby/api/server/httputils/write_log_stream.go @@ -0,0 +1,96 @@ +package httputils + +import ( + "fmt" + "io" + "net/url" + "sort" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/jsonlog" + "github.com/docker/docker/pkg/stdcopy" +) + +// WriteLogStream writes an encoded byte stream of log messages from the +// messages channel, multiplexing them with a stdcopy.Writer if mux is true +func WriteLogStream(ctx context.Context, w io.Writer, msgs <-chan *backend.LogMessage, config *types.ContainerLogsOptions, mux bool) { + wf := ioutils.NewWriteFlusher(w) + defer wf.Close() + + wf.Flush() + + // this might seem like doing below is clear: + // var outStream io.Writer = wf + // however, this GREATLY DISPLEASES golint, and if you do that, it will + // fail CI. we need outstream to be type writer because if we mux streams, + // we will need to reassign all of the streams to be stdwriters, which only + // conforms to the io.Writer interface. + var outStream io.Writer + outStream = wf + errStream := outStream + sysErrStream := errStream + if mux { + sysErrStream = stdcopy.NewStdWriter(outStream, stdcopy.Systemerr) + errStream = stdcopy.NewStdWriter(outStream, stdcopy.Stderr) + outStream = stdcopy.NewStdWriter(outStream, stdcopy.Stdout) + } + + for { + msg, ok := <-msgs + if !ok { + return + } + // check if the message contains an error. if so, write that error + // and exit + if msg.Err != nil { + fmt.Fprintf(sysErrStream, "Error grabbing logs: %v\n", msg.Err) + continue + } + logLine := msg.Line + if config.Details { + logLine = append(attrsByteSlice(msg.Attrs), ' ') + logLine = append(logLine, msg.Line...) + } + if config.Timestamps { + // TODO(dperny) the format is defined in + // daemon/logger/logger.go as logger.TimeFormat. importing + // logger is verboten (not part of backend) so idk if just + // importing the same thing from jsonlog is good enough + logLine = append([]byte(msg.Timestamp.Format(jsonlog.RFC3339NanoFixed)+" "), logLine...) + } + if msg.Source == "stdout" && config.ShowStdout { + outStream.Write(logLine) + } + if msg.Source == "stderr" && config.ShowStderr { + errStream.Write(logLine) + } + } +} + +type byKey []backend.LogAttr + +func (b byKey) Len() int { return len(b) } +func (b byKey) Less(i, j int) bool { return b[i].Key < b[j].Key } +func (b byKey) Swap(i, j int) { b[i], b[j] = b[j], b[i] } + +func attrsByteSlice(a []backend.LogAttr) []byte { + // Note this sorts "a" in-place. That is fine here - nothing else is + // going to use Attrs or care about the order. + sort.Sort(byKey(a)) + + var ret []byte + for i, pair := range a { + k, v := url.QueryEscape(pair.Key), url.QueryEscape(pair.Value) + ret = append(ret, []byte(k)...) + ret = append(ret, '=') + ret = append(ret, []byte(v)...) + if i != len(a)-1 { + ret = append(ret, ',') + } + } + return ret +} diff --git a/vendor/github.com/moby/moby/api/server/middleware.go b/vendor/github.com/moby/moby/api/server/middleware.go new file mode 100644 index 000000000..537ce8028 --- /dev/null +++ b/vendor/github.com/moby/moby/api/server/middleware.go @@ -0,0 +1,24 @@ +package server + +import ( + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/server/httputils" + "github.com/docker/docker/api/server/middleware" +) + +// handlerWithGlobalMiddlewares wraps the handler function for a request with +// the server's global middlewares. The order of the middlewares is backwards, +// meaning that the first in the list will be evaluated last. +func (s *Server) handlerWithGlobalMiddlewares(handler httputils.APIFunc) httputils.APIFunc { + next := handler + + for _, m := range s.middlewares { + next = m.WrapHandler(next) + } + + if s.cfg.Logging && logrus.GetLevel() == logrus.DebugLevel { + next = middleware.DebugRequestMiddleware(next) + } + + return next +} diff --git a/vendor/github.com/moby/moby/api/server/middleware/cors.go b/vendor/github.com/moby/moby/api/server/middleware/cors.go new file mode 100644 index 000000000..ea725dbc7 --- /dev/null +++ b/vendor/github.com/moby/moby/api/server/middleware/cors.go @@ -0,0 +1,37 @@ +package middleware + +import ( + "net/http" + + "github.com/Sirupsen/logrus" + "golang.org/x/net/context" +) + +// CORSMiddleware injects CORS headers to each request +// when it's configured. +type CORSMiddleware struct { + defaultHeaders string +} + +// NewCORSMiddleware creates a new CORSMiddleware with default headers. +func NewCORSMiddleware(d string) CORSMiddleware { + return CORSMiddleware{defaultHeaders: d} +} + +// WrapHandler returns a new handler function wrapping the previous one in the request chain. +func (c CORSMiddleware) WrapHandler(handler func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error) func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + return func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + // If "api-cors-header" is not given, but "api-enable-cors" is true, we set cors to "*" + // otherwise, all head values will be passed to HTTP handler + corsHeaders := c.defaultHeaders + if corsHeaders == "" { + corsHeaders = "*" + } + + logrus.Debugf("CORS header is enabled and set to: %s", corsHeaders) + w.Header().Add("Access-Control-Allow-Origin", corsHeaders) + w.Header().Add("Access-Control-Allow-Headers", "Origin, X-Requested-With, Content-Type, Accept, X-Registry-Auth") + w.Header().Add("Access-Control-Allow-Methods", "HEAD, GET, POST, DELETE, PUT, OPTIONS") + return handler(ctx, w, r, vars) + } +} diff --git a/vendor/github.com/moby/moby/api/server/middleware/debug.go b/vendor/github.com/moby/moby/api/server/middleware/debug.go new file mode 100644 index 000000000..a9a94e7f3 --- /dev/null +++ b/vendor/github.com/moby/moby/api/server/middleware/debug.go @@ -0,0 +1,94 @@ +package middleware + +import ( + "bufio" + "encoding/json" + "io" + "net/http" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/server/httputils" + "github.com/docker/docker/pkg/ioutils" + "golang.org/x/net/context" +) + +// DebugRequestMiddleware dumps the request to logger +func DebugRequestMiddleware(handler func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error) func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + return func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + logrus.Debugf("Calling %s %s", r.Method, r.RequestURI) + + if r.Method != "POST" { + return handler(ctx, w, r, vars) + } + if err := httputils.CheckForJSON(r); err != nil { + return handler(ctx, w, r, vars) + } + maxBodySize := 4096 // 4KB + if r.ContentLength > int64(maxBodySize) { + return handler(ctx, w, r, vars) + } + + body := r.Body + bufReader := bufio.NewReaderSize(body, maxBodySize) + r.Body = ioutils.NewReadCloserWrapper(bufReader, func() error { return body.Close() }) + + b, err := bufReader.Peek(maxBodySize) + if err != io.EOF { + // either there was an error reading, or the buffer is full (in which case the request is too large) + return handler(ctx, w, r, vars) + } + + var postForm map[string]interface{} + if err := json.Unmarshal(b, &postForm); err == nil { + maskSecretKeys(postForm, r.RequestURI) + formStr, errMarshal := json.Marshal(postForm) + if errMarshal == nil { + logrus.Debugf("form data: %s", string(formStr)) + } else { + logrus.Debugf("form data: %q", postForm) + } + } + + return handler(ctx, w, r, vars) + } +} + +func maskSecretKeys(inp interface{}, path string) { + // Remove any query string from the path + idx := strings.Index(path, "?") + if idx != -1 { + path = path[:idx] + } + // Remove trailing / characters + path = strings.TrimRight(path, "/") + + if arr, ok := inp.([]interface{}); ok { + for _, f := range arr { + maskSecretKeys(f, path) + } + return + } + + if form, ok := inp.(map[string]interface{}); ok { + loop0: + for k, v := range form { + for _, m := range []string{"password", "secret", "jointoken", "unlockkey", "signingcakey"} { + if strings.EqualFold(m, k) { + form[k] = "*****" + continue loop0 + } + } + maskSecretKeys(v, path) + } + + // Route-specific redactions + if strings.HasSuffix(path, "/secrets/create") { + for k := range form { + if k == "Data" { + form[k] = "*****" + } + } + } + } +} diff --git a/vendor/github.com/moby/moby/api/server/middleware/debug_test.go b/vendor/github.com/moby/moby/api/server/middleware/debug_test.go new file mode 100644 index 000000000..87ecafd14 --- /dev/null +++ b/vendor/github.com/moby/moby/api/server/middleware/debug_test.go @@ -0,0 +1,58 @@ +package middleware + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestMaskSecretKeys(t *testing.T) { + tests := []struct { + path string + input map[string]interface{} + expected map[string]interface{} + }{ + { + path: "/v1.30/secrets/create", + input: map[string]interface{}{"Data": "foo", "Name": "name", "Labels": map[string]interface{}{}}, + expected: map[string]interface{}{"Data": "*****", "Name": "name", "Labels": map[string]interface{}{}}, + }, + { + path: "/v1.30/secrets/create//", + input: map[string]interface{}{"Data": "foo", "Name": "name", "Labels": map[string]interface{}{}}, + expected: map[string]interface{}{"Data": "*****", "Name": "name", "Labels": map[string]interface{}{}}, + }, + + { + path: "/secrets/create?key=val", + input: map[string]interface{}{"Data": "foo", "Name": "name", "Labels": map[string]interface{}{}}, + expected: map[string]interface{}{"Data": "*****", "Name": "name", "Labels": map[string]interface{}{}}, + }, + { + path: "/v1.30/some/other/path", + input: map[string]interface{}{ + "password": "pass", + "other": map[string]interface{}{ + "secret": "secret", + "jointoken": "jointoken", + "unlockkey": "unlockkey", + "signingcakey": "signingcakey", + }, + }, + expected: map[string]interface{}{ + "password": "*****", + "other": map[string]interface{}{ + "secret": "*****", + "jointoken": "*****", + "unlockkey": "*****", + "signingcakey": "*****", + }, + }, + }, + } + + for _, testcase := range tests { + maskSecretKeys(testcase.input, testcase.path) + assert.Equal(t, testcase.expected, testcase.input) + } +} diff --git a/vendor/github.com/moby/moby/api/server/middleware/experimental.go b/vendor/github.com/moby/moby/api/server/middleware/experimental.go new file mode 100644 index 000000000..b8f56e88b --- /dev/null +++ b/vendor/github.com/moby/moby/api/server/middleware/experimental.go @@ -0,0 +1,29 @@ +package middleware + +import ( + "net/http" + + "golang.org/x/net/context" +) + +// ExperimentalMiddleware is a the middleware in charge of adding the +// 'Docker-Experimental' header to every outgoing request +type ExperimentalMiddleware struct { + experimental string +} + +// NewExperimentalMiddleware creates a new ExperimentalMiddleware +func NewExperimentalMiddleware(experimentalEnabled bool) ExperimentalMiddleware { + if experimentalEnabled { + return ExperimentalMiddleware{"true"} + } + return ExperimentalMiddleware{"false"} +} + +// WrapHandler returns a new handler function wrapping the previous one in the request chain. +func (e ExperimentalMiddleware) WrapHandler(handler func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error) func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + return func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + w.Header().Set("Docker-Experimental", e.experimental) + return handler(ctx, w, r, vars) + } +} diff --git a/vendor/github.com/moby/moby/api/server/middleware/middleware.go b/vendor/github.com/moby/moby/api/server/middleware/middleware.go new file mode 100644 index 000000000..dc1f5bfa0 --- /dev/null +++ b/vendor/github.com/moby/moby/api/server/middleware/middleware.go @@ -0,0 +1,13 @@ +package middleware + +import ( + "net/http" + + "golang.org/x/net/context" +) + +// Middleware is an interface to allow the use of ordinary functions as Docker API filters. +// Any struct that has the appropriate signature can be registered as a middleware. +type Middleware interface { + WrapHandler(func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error) func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error +} diff --git a/vendor/github.com/moby/moby/api/server/middleware/version.go b/vendor/github.com/moby/moby/api/server/middleware/version.go new file mode 100644 index 000000000..390a7f059 --- /dev/null +++ b/vendor/github.com/moby/moby/api/server/middleware/version.go @@ -0,0 +1,51 @@ +package middleware + +import ( + "fmt" + "net/http" + "runtime" + + "github.com/docker/docker/api/errors" + "github.com/docker/docker/api/types/versions" + "golang.org/x/net/context" +) + +// VersionMiddleware is a middleware that +// validates the client and server versions. +type VersionMiddleware struct { + serverVersion string + defaultVersion string + minVersion string +} + +// NewVersionMiddleware creates a new VersionMiddleware +// with the default versions. +func NewVersionMiddleware(s, d, m string) VersionMiddleware { + return VersionMiddleware{ + serverVersion: s, + defaultVersion: d, + minVersion: m, + } +} + +// WrapHandler returns a new handler function wrapping the previous one in the request chain. +func (v VersionMiddleware) WrapHandler(handler func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error) func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + return func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + apiVersion := vars["version"] + if apiVersion == "" { + apiVersion = v.defaultVersion + } + + if versions.LessThan(apiVersion, v.minVersion) { + return errors.NewBadRequestError(fmt.Errorf("client version %s is too old. Minimum supported API version is %s, please upgrade your client to a newer version", apiVersion, v.minVersion)) + } + + header := fmt.Sprintf("Docker/%s (%s)", v.serverVersion, runtime.GOOS) + w.Header().Set("Server", header) + w.Header().Set("API-Version", v.defaultVersion) + w.Header().Set("OSType", runtime.GOOS) + ctx = context.WithValue(ctx, "api-version", apiVersion) + return handler(ctx, w, r, vars) + } + +} diff --git a/vendor/github.com/moby/moby/api/server/middleware/version_test.go b/vendor/github.com/moby/moby/api/server/middleware/version_test.go new file mode 100644 index 000000000..29787bf4d --- /dev/null +++ b/vendor/github.com/moby/moby/api/server/middleware/version_test.go @@ -0,0 +1,57 @@ +package middleware + +import ( + "net/http" + "net/http/httptest" + "strings" + "testing" + + "github.com/docker/docker/api/server/httputils" + "golang.org/x/net/context" +) + +func TestVersionMiddleware(t *testing.T) { + handler := func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if httputils.VersionFromContext(ctx) == "" { + t.Fatal("Expected version, got empty string") + } + return nil + } + + defaultVersion := "1.10.0" + minVersion := "1.2.0" + m := NewVersionMiddleware(defaultVersion, defaultVersion, minVersion) + h := m.WrapHandler(handler) + + req, _ := http.NewRequest("GET", "/containers/json", nil) + resp := httptest.NewRecorder() + ctx := context.Background() + if err := h(ctx, resp, req, map[string]string{}); err != nil { + t.Fatal(err) + } +} + +func TestVersionMiddlewareWithErrors(t *testing.T) { + handler := func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if httputils.VersionFromContext(ctx) == "" { + t.Fatal("Expected version, got empty string") + } + return nil + } + + defaultVersion := "1.10.0" + minVersion := "1.2.0" + m := NewVersionMiddleware(defaultVersion, defaultVersion, minVersion) + h := m.WrapHandler(handler) + + req, _ := http.NewRequest("GET", "/containers/json", nil) + resp := httptest.NewRecorder() + ctx := context.Background() + + vars := map[string]string{"version": "0.1"} + err := h(ctx, resp, req, vars) + + if !strings.Contains(err.Error(), "client version 0.1 is too old. Minimum supported API version is 1.2.0") { + t.Fatalf("Expected too old client error, got %v", err) + } +} diff --git a/vendor/github.com/moby/moby/api/server/router/build/backend.go b/vendor/github.com/moby/moby/api/server/router/build/backend.go new file mode 100644 index 000000000..defffd3ef --- /dev/null +++ b/vendor/github.com/moby/moby/api/server/router/build/backend.go @@ -0,0 +1,21 @@ +package build + +import ( + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/backend" + "golang.org/x/net/context" +) + +// Backend abstracts an image builder whose only purpose is to build an image referenced by an imageID. +type Backend interface { + // Build a Docker image returning the id of the image + // TODO: make this return a reference instead of string + Build(context.Context, backend.BuildConfig) (string, error) + + // Prune build cache + PruneCache(context.Context) (*types.BuildCachePruneReport, error) +} + +type experimentalProvider interface { + HasExperimental() bool +} diff --git a/vendor/github.com/moby/moby/api/server/router/build/build.go b/vendor/github.com/moby/moby/api/server/router/build/build.go new file mode 100644 index 000000000..78f5ae2f2 --- /dev/null +++ b/vendor/github.com/moby/moby/api/server/router/build/build.go @@ -0,0 +1,29 @@ +package build + +import "github.com/docker/docker/api/server/router" + +// buildRouter is a router to talk with the build controller +type buildRouter struct { + backend Backend + daemon experimentalProvider + routes []router.Route +} + +// NewRouter initializes a new build router +func NewRouter(b Backend, d experimentalProvider) router.Router { + r := &buildRouter{backend: b, daemon: d} + r.initRoutes() + return r +} + +// Routes returns the available routers to the build controller +func (r *buildRouter) Routes() []router.Route { + return r.routes +} + +func (r *buildRouter) initRoutes() { + r.routes = []router.Route{ + router.NewPostRoute("/build", r.postBuild, router.WithCancel), + router.NewPostRoute("/build/prune", r.postPrune, router.WithCancel), + } +} diff --git a/vendor/github.com/moby/moby/api/server/router/build/build_routes.go b/vendor/github.com/moby/moby/api/server/router/build/build_routes.go new file mode 100644 index 000000000..baa1da303 --- /dev/null +++ b/vendor/github.com/moby/moby/api/server/router/build/build_routes.go @@ -0,0 +1,253 @@ +package build + +import ( + "bytes" + "encoding/base64" + "encoding/json" + "fmt" + "io" + "net/http" + "runtime" + "strconv" + "strings" + "sync" + + "github.com/Sirupsen/logrus" + apierrors "github.com/docker/docker/api/errors" + "github.com/docker/docker/api/server/httputils" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/versions" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/progress" + "github.com/docker/docker/pkg/streamformatter" + units "github.com/docker/go-units" + "github.com/pkg/errors" + "golang.org/x/net/context" +) + +func newImageBuildOptions(ctx context.Context, r *http.Request) (*types.ImageBuildOptions, error) { + version := httputils.VersionFromContext(ctx) + options := &types.ImageBuildOptions{} + if httputils.BoolValue(r, "forcerm") && versions.GreaterThanOrEqualTo(version, "1.12") { + options.Remove = true + } else if r.FormValue("rm") == "" && versions.GreaterThanOrEqualTo(version, "1.12") { + options.Remove = true + } else { + options.Remove = httputils.BoolValue(r, "rm") + } + if httputils.BoolValue(r, "pull") && versions.GreaterThanOrEqualTo(version, "1.16") { + options.PullParent = true + } + + options.Dockerfile = r.FormValue("dockerfile") + options.SuppressOutput = httputils.BoolValue(r, "q") + options.NoCache = httputils.BoolValue(r, "nocache") + options.ForceRemove = httputils.BoolValue(r, "forcerm") + options.MemorySwap = httputils.Int64ValueOrZero(r, "memswap") + options.Memory = httputils.Int64ValueOrZero(r, "memory") + options.CPUShares = httputils.Int64ValueOrZero(r, "cpushares") + options.CPUPeriod = httputils.Int64ValueOrZero(r, "cpuperiod") + options.CPUQuota = httputils.Int64ValueOrZero(r, "cpuquota") + options.CPUSetCPUs = r.FormValue("cpusetcpus") + options.CPUSetMems = r.FormValue("cpusetmems") + options.CgroupParent = r.FormValue("cgroupparent") + options.NetworkMode = r.FormValue("networkmode") + options.Tags = r.Form["t"] + options.ExtraHosts = r.Form["extrahosts"] + options.SecurityOpt = r.Form["securityopt"] + options.Squash = httputils.BoolValue(r, "squash") + options.Target = r.FormValue("target") + options.RemoteContext = r.FormValue("remote") + + if r.Form.Get("shmsize") != "" { + shmSize, err := strconv.ParseInt(r.Form.Get("shmsize"), 10, 64) + if err != nil { + return nil, err + } + options.ShmSize = shmSize + } + + if i := container.Isolation(r.FormValue("isolation")); i != "" { + if !container.Isolation.IsValid(i) { + return nil, fmt.Errorf("Unsupported isolation: %q", i) + } + options.Isolation = i + } + + if runtime.GOOS != "windows" && options.SecurityOpt != nil { + return nil, fmt.Errorf("The daemon on this platform does not support setting security options on build") + } + + var buildUlimits = []*units.Ulimit{} + ulimitsJSON := r.FormValue("ulimits") + if ulimitsJSON != "" { + if err := json.Unmarshal([]byte(ulimitsJSON), &buildUlimits); err != nil { + return nil, err + } + options.Ulimits = buildUlimits + } + + // Note that there are two ways a --build-arg might appear in the + // json of the query param: + // "foo":"bar" + // and "foo":nil + // The first is the normal case, ie. --build-arg foo=bar + // or --build-arg foo + // where foo's value was picked up from an env var. + // The second ("foo":nil) is where they put --build-arg foo + // but "foo" isn't set as an env var. In that case we can't just drop + // the fact they mentioned it, we need to pass that along to the builder + // so that it can print a warning about "foo" being unused if there is + // no "ARG foo" in the Dockerfile. + buildArgsJSON := r.FormValue("buildargs") + if buildArgsJSON != "" { + var buildArgs = map[string]*string{} + if err := json.Unmarshal([]byte(buildArgsJSON), &buildArgs); err != nil { + return nil, err + } + options.BuildArgs = buildArgs + } + + labelsJSON := r.FormValue("labels") + if labelsJSON != "" { + var labels = map[string]string{} + if err := json.Unmarshal([]byte(labelsJSON), &labels); err != nil { + return nil, err + } + options.Labels = labels + } + + cacheFromJSON := r.FormValue("cachefrom") + if cacheFromJSON != "" { + var cacheFrom = []string{} + if err := json.Unmarshal([]byte(cacheFromJSON), &cacheFrom); err != nil { + return nil, err + } + options.CacheFrom = cacheFrom + } + options.SessionID = r.FormValue("session") + + return options, nil +} + +func (br *buildRouter) postPrune(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + report, err := br.backend.PruneCache(ctx) + if err != nil { + return err + } + return httputils.WriteJSON(w, http.StatusOK, report) +} + +func (br *buildRouter) postBuild(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + var ( + notVerboseBuffer = bytes.NewBuffer(nil) + version = httputils.VersionFromContext(ctx) + ) + + w.Header().Set("Content-Type", "application/json") + + output := ioutils.NewWriteFlusher(w) + defer output.Close() + errf := func(err error) error { + if httputils.BoolValue(r, "q") && notVerboseBuffer.Len() > 0 { + output.Write(notVerboseBuffer.Bytes()) + } + // Do not write the error in the http output if it's still empty. + // This prevents from writing a 200(OK) when there is an internal error. + if !output.Flushed() { + return err + } + _, err = w.Write(streamformatter.FormatError(err)) + if err != nil { + logrus.Warnf("could not write error response: %v", err) + } + return nil + } + + buildOptions, err := newImageBuildOptions(ctx, r) + if err != nil { + return errf(err) + } + buildOptions.AuthConfigs = getAuthConfigs(r.Header) + + if buildOptions.Squash && !br.daemon.HasExperimental() { + return apierrors.NewBadRequestError( + errors.New("squash is only supported with experimental mode")) + } + + out := io.Writer(output) + if buildOptions.SuppressOutput { + out = notVerboseBuffer + } + + // Currently, only used if context is from a remote url. + // Look at code in DetectContextFromRemoteURL for more information. + createProgressReader := func(in io.ReadCloser) io.ReadCloser { + progressOutput := streamformatter.NewJSONProgressOutput(out, true) + return progress.NewProgressReader(in, progressOutput, r.ContentLength, "Downloading context", buildOptions.RemoteContext) + } + + wantAux := versions.GreaterThanOrEqualTo(version, "1.30") + + imgID, err := br.backend.Build(ctx, backend.BuildConfig{ + Source: r.Body, + Options: buildOptions, + ProgressWriter: buildProgressWriter(out, wantAux, createProgressReader), + }) + if err != nil { + return errf(err) + } + + // Everything worked so if -q was provided the output from the daemon + // should be just the image ID and we'll print that to stdout. + if buildOptions.SuppressOutput { + fmt.Fprintln(streamformatter.NewStdoutWriter(output), imgID) + } + return nil +} + +func getAuthConfigs(header http.Header) map[string]types.AuthConfig { + authConfigs := map[string]types.AuthConfig{} + authConfigsEncoded := header.Get("X-Registry-Config") + + if authConfigsEncoded == "" { + return authConfigs + } + + authConfigsJSON := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authConfigsEncoded)) + // Pulling an image does not error when no auth is provided so to remain + // consistent with the existing api decode errors are ignored + json.NewDecoder(authConfigsJSON).Decode(&authConfigs) + return authConfigs +} + +type syncWriter struct { + w io.Writer + mu sync.Mutex +} + +func (s *syncWriter) Write(b []byte) (count int, err error) { + s.mu.Lock() + count, err = s.w.Write(b) + s.mu.Unlock() + return +} + +func buildProgressWriter(out io.Writer, wantAux bool, createProgressReader func(io.ReadCloser) io.ReadCloser) backend.ProgressWriter { + out = &syncWriter{w: out} + + var aux *streamformatter.AuxFormatter + if wantAux { + aux = &streamformatter.AuxFormatter{Writer: out} + } + + return backend.ProgressWriter{ + Output: out, + StdoutFormatter: streamformatter.NewStdoutWriter(out), + StderrFormatter: streamformatter.NewStderrWriter(out), + AuxFormatter: aux, + ProgressReaderFunc: createProgressReader, + } +} diff --git a/vendor/github.com/moby/moby/api/server/router/checkpoint/backend.go b/vendor/github.com/moby/moby/api/server/router/checkpoint/backend.go new file mode 100644 index 000000000..8810f88b7 --- /dev/null +++ b/vendor/github.com/moby/moby/api/server/router/checkpoint/backend.go @@ -0,0 +1,10 @@ +package checkpoint + +import "github.com/docker/docker/api/types" + +// Backend for Checkpoint +type Backend interface { + CheckpointCreate(container string, config types.CheckpointCreateOptions) error + CheckpointDelete(container string, config types.CheckpointDeleteOptions) error + CheckpointList(container string, config types.CheckpointListOptions) ([]types.Checkpoint, error) +} diff --git a/vendor/github.com/moby/moby/api/server/router/checkpoint/checkpoint.go b/vendor/github.com/moby/moby/api/server/router/checkpoint/checkpoint.go new file mode 100644 index 000000000..b16971823 --- /dev/null +++ b/vendor/github.com/moby/moby/api/server/router/checkpoint/checkpoint.go @@ -0,0 +1,36 @@ +package checkpoint + +import ( + "github.com/docker/docker/api/server/httputils" + "github.com/docker/docker/api/server/router" +) + +// checkpointRouter is a router to talk with the checkpoint controller +type checkpointRouter struct { + backend Backend + decoder httputils.ContainerDecoder + routes []router.Route +} + +// NewRouter initializes a new checkpoint router +func NewRouter(b Backend, decoder httputils.ContainerDecoder) router.Router { + r := &checkpointRouter{ + backend: b, + decoder: decoder, + } + r.initRoutes() + return r +} + +// Routes returns the available routers to the checkpoint controller +func (r *checkpointRouter) Routes() []router.Route { + return r.routes +} + +func (r *checkpointRouter) initRoutes() { + r.routes = []router.Route{ + router.NewGetRoute("/containers/{name:.*}/checkpoints", r.getContainerCheckpoints, router.Experimental), + router.NewPostRoute("/containers/{name:.*}/checkpoints", r.postContainerCheckpoint, router.Experimental), + router.NewDeleteRoute("/containers/{name}/checkpoints/{checkpoint}", r.deleteContainerCheckpoint, router.Experimental), + } +} diff --git a/vendor/github.com/moby/moby/api/server/router/checkpoint/checkpoint_routes.go b/vendor/github.com/moby/moby/api/server/router/checkpoint/checkpoint_routes.go new file mode 100644 index 000000000..f98843119 --- /dev/null +++ b/vendor/github.com/moby/moby/api/server/router/checkpoint/checkpoint_routes.go @@ -0,0 +1,65 @@ +package checkpoint + +import ( + "encoding/json" + "net/http" + + "github.com/docker/docker/api/server/httputils" + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +func (s *checkpointRouter) postContainerCheckpoint(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + var options types.CheckpointCreateOptions + + decoder := json.NewDecoder(r.Body) + if err := decoder.Decode(&options); err != nil { + return err + } + + err := s.backend.CheckpointCreate(vars["name"], options) + if err != nil { + return err + } + + w.WriteHeader(http.StatusCreated) + return nil +} + +func (s *checkpointRouter) getContainerCheckpoints(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + checkpoints, err := s.backend.CheckpointList(vars["name"], types.CheckpointListOptions{ + CheckpointDir: r.Form.Get("dir"), + }) + + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusOK, checkpoints) +} + +func (s *checkpointRouter) deleteContainerCheckpoint(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + err := s.backend.CheckpointDelete(vars["name"], types.CheckpointDeleteOptions{ + CheckpointDir: r.Form.Get("dir"), + CheckpointID: vars["checkpoint"], + }) + + if err != nil { + return err + } + + w.WriteHeader(http.StatusNoContent) + return nil +} diff --git a/vendor/github.com/moby/moby/api/server/router/container/backend.go b/vendor/github.com/moby/moby/api/server/router/container/backend.go new file mode 100644 index 000000000..d51ed8177 --- /dev/null +++ b/vendor/github.com/moby/moby/api/server/router/container/backend.go @@ -0,0 +1,79 @@ +package container + +import ( + "io" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/filters" + containerpkg "github.com/docker/docker/container" + "github.com/docker/docker/pkg/archive" +) + +// execBackend includes functions to implement to provide exec functionality. +type execBackend interface { + ContainerExecCreate(name string, config *types.ExecConfig) (string, error) + ContainerExecInspect(id string) (*backend.ExecInspect, error) + ContainerExecResize(name string, height, width int) error + ContainerExecStart(ctx context.Context, name string, stdin io.ReadCloser, stdout io.Writer, stderr io.Writer) error + ExecExists(name string) (bool, error) +} + +// copyBackend includes functions to implement to provide container copy functionality. +type copyBackend interface { + ContainerArchivePath(name string, path string) (content io.ReadCloser, stat *types.ContainerPathStat, err error) + ContainerCopy(name string, res string) (io.ReadCloser, error) + ContainerExport(name string, out io.Writer) error + ContainerExtractToDir(name, path string, copyUIDGID, noOverwriteDirNonDir bool, content io.Reader) error + ContainerStatPath(name string, path string) (stat *types.ContainerPathStat, err error) +} + +// stateBackend includes functions to implement to provide container state lifecycle functionality. +type stateBackend interface { + ContainerCreate(config types.ContainerCreateConfig) (container.ContainerCreateCreatedBody, error) + ContainerKill(name string, sig uint64) error + ContainerPause(name string) error + ContainerRename(oldName, newName string) error + ContainerResize(name string, height, width int) error + ContainerRestart(name string, seconds *int) error + ContainerRm(name string, config *types.ContainerRmConfig) error + ContainerStart(name string, hostConfig *container.HostConfig, checkpoint string, checkpointDir string) error + ContainerStop(name string, seconds *int) error + ContainerUnpause(name string) error + ContainerUpdate(name string, hostConfig *container.HostConfig) (container.ContainerUpdateOKBody, error) + ContainerWait(ctx context.Context, name string, condition containerpkg.WaitCondition) (<-chan containerpkg.StateStatus, error) +} + +// monitorBackend includes functions to implement to provide containers monitoring functionality. +type monitorBackend interface { + ContainerChanges(name string) ([]archive.Change, error) + ContainerInspect(name string, size bool, version string) (interface{}, error) + ContainerLogs(ctx context.Context, name string, config *types.ContainerLogsOptions) (<-chan *backend.LogMessage, error) + ContainerStats(ctx context.Context, name string, config *backend.ContainerStatsConfig) error + ContainerTop(name string, psArgs string) (*container.ContainerTopOKBody, error) + + Containers(config *types.ContainerListOptions) ([]*types.Container, error) +} + +// attachBackend includes function to implement to provide container attaching functionality. +type attachBackend interface { + ContainerAttach(name string, c *backend.ContainerAttachConfig) error +} + +// systemBackend includes functions to implement to provide system wide containers functionality +type systemBackend interface { + ContainersPrune(ctx context.Context, pruneFilters filters.Args) (*types.ContainersPruneReport, error) +} + +// Backend is all the methods that need to be implemented to provide container specific functionality. +type Backend interface { + execBackend + copyBackend + stateBackend + monitorBackend + attachBackend + systemBackend +} diff --git a/vendor/github.com/moby/moby/api/server/router/container/container.go b/vendor/github.com/moby/moby/api/server/router/container/container.go new file mode 100644 index 000000000..24c3224ee --- /dev/null +++ b/vendor/github.com/moby/moby/api/server/router/container/container.go @@ -0,0 +1,77 @@ +package container + +import ( + "github.com/docker/docker/api/server/httputils" + "github.com/docker/docker/api/server/router" +) + +type validationError struct { + error +} + +func (validationError) IsValidationError() bool { + return true +} + +// containerRouter is a router to talk with the container controller +type containerRouter struct { + backend Backend + decoder httputils.ContainerDecoder + routes []router.Route +} + +// NewRouter initializes a new container router +func NewRouter(b Backend, decoder httputils.ContainerDecoder) router.Router { + r := &containerRouter{ + backend: b, + decoder: decoder, + } + r.initRoutes() + return r +} + +// Routes returns the available routes to the container controller +func (r *containerRouter) Routes() []router.Route { + return r.routes +} + +// initRoutes initializes the routes in container router +func (r *containerRouter) initRoutes() { + r.routes = []router.Route{ + // HEAD + router.NewHeadRoute("/containers/{name:.*}/archive", r.headContainersArchive), + // GET + router.NewGetRoute("/containers/json", r.getContainersJSON), + router.NewGetRoute("/containers/{name:.*}/export", r.getContainersExport), + router.NewGetRoute("/containers/{name:.*}/changes", r.getContainersChanges), + router.NewGetRoute("/containers/{name:.*}/json", r.getContainersByName), + router.NewGetRoute("/containers/{name:.*}/top", r.getContainersTop), + router.NewGetRoute("/containers/{name:.*}/logs", r.getContainersLogs, router.WithCancel), + router.NewGetRoute("/containers/{name:.*}/stats", r.getContainersStats, router.WithCancel), + router.NewGetRoute("/containers/{name:.*}/attach/ws", r.wsContainersAttach), + router.NewGetRoute("/exec/{id:.*}/json", r.getExecByID), + router.NewGetRoute("/containers/{name:.*}/archive", r.getContainersArchive), + // POST + router.NewPostRoute("/containers/create", r.postContainersCreate), + router.NewPostRoute("/containers/{name:.*}/kill", r.postContainersKill), + router.NewPostRoute("/containers/{name:.*}/pause", r.postContainersPause), + router.NewPostRoute("/containers/{name:.*}/unpause", r.postContainersUnpause), + router.NewPostRoute("/containers/{name:.*}/restart", r.postContainersRestart), + router.NewPostRoute("/containers/{name:.*}/start", r.postContainersStart), + router.NewPostRoute("/containers/{name:.*}/stop", r.postContainersStop), + router.NewPostRoute("/containers/{name:.*}/wait", r.postContainersWait, router.WithCancel), + router.NewPostRoute("/containers/{name:.*}/resize", r.postContainersResize), + router.NewPostRoute("/containers/{name:.*}/attach", r.postContainersAttach), + router.NewPostRoute("/containers/{name:.*}/copy", r.postContainersCopy), // Deprecated since 1.8, Errors out since 1.12 + router.NewPostRoute("/containers/{name:.*}/exec", r.postContainerExecCreate), + router.NewPostRoute("/exec/{name:.*}/start", r.postContainerExecStart), + router.NewPostRoute("/exec/{name:.*}/resize", r.postContainerExecResize), + router.NewPostRoute("/containers/{name:.*}/rename", r.postContainerRename), + router.NewPostRoute("/containers/{name:.*}/update", r.postContainerUpdate), + router.NewPostRoute("/containers/prune", r.postContainersPrune, router.WithCancel), + // PUT + router.NewPutRoute("/containers/{name:.*}/archive", r.putContainersArchive), + // DELETE + router.NewDeleteRoute("/containers/{name:.*}", r.deleteContainers), + } +} diff --git a/vendor/github.com/moby/moby/api/server/router/container/container_routes.go b/vendor/github.com/moby/moby/api/server/router/container/container_routes.go new file mode 100644 index 000000000..96b1010e1 --- /dev/null +++ b/vendor/github.com/moby/moby/api/server/router/container/container_routes.go @@ -0,0 +1,608 @@ +package container + +import ( + "encoding/json" + "fmt" + "io" + "net/http" + "strconv" + "syscall" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api" + "github.com/docker/docker/api/server/httputils" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/versions" + containerpkg "github.com/docker/docker/container" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/signal" + "golang.org/x/net/context" + "golang.org/x/net/websocket" +) + +func (s *containerRouter) getContainersJSON(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + filter, err := filters.FromParam(r.Form.Get("filters")) + if err != nil { + return err + } + + config := &types.ContainerListOptions{ + All: httputils.BoolValue(r, "all"), + Size: httputils.BoolValue(r, "size"), + Since: r.Form.Get("since"), + Before: r.Form.Get("before"), + Filters: filter, + } + + if tmpLimit := r.Form.Get("limit"); tmpLimit != "" { + limit, err := strconv.Atoi(tmpLimit) + if err != nil { + return err + } + config.Limit = limit + } + + containers, err := s.backend.Containers(config) + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusOK, containers) +} + +func (s *containerRouter) getContainersStats(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + stream := httputils.BoolValueOrDefault(r, "stream", true) + if !stream { + w.Header().Set("Content-Type", "application/json") + } + + config := &backend.ContainerStatsConfig{ + Stream: stream, + OutStream: w, + Version: string(httputils.VersionFromContext(ctx)), + } + + return s.backend.ContainerStats(ctx, vars["name"], config) +} + +func (s *containerRouter) getContainersLogs(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + // Args are validated before the stream starts because when it starts we're + // sending HTTP 200 by writing an empty chunk of data to tell the client that + // daemon is going to stream. By sending this initial HTTP 200 we can't report + // any error after the stream starts (i.e. container not found, wrong parameters) + // with the appropriate status code. + stdout, stderr := httputils.BoolValue(r, "stdout"), httputils.BoolValue(r, "stderr") + if !(stdout || stderr) { + return fmt.Errorf("Bad parameters: you must choose at least one stream") + } + + containerName := vars["name"] + logsConfig := &types.ContainerLogsOptions{ + Follow: httputils.BoolValue(r, "follow"), + Timestamps: httputils.BoolValue(r, "timestamps"), + Since: r.Form.Get("since"), + Tail: r.Form.Get("tail"), + ShowStdout: stdout, + ShowStderr: stderr, + Details: httputils.BoolValue(r, "details"), + } + + // doesn't matter what version the client is on, we're using this internally only + // also do we need size? i'm thinking no we don't + raw, err := s.backend.ContainerInspect(containerName, false, api.DefaultVersion) + if err != nil { + return err + } + container, ok := raw.(*types.ContainerJSON) + if !ok { + // %T prints the type. handy! + return fmt.Errorf("expected container to be *types.ContainerJSON but got %T", raw) + } + + msgs, err := s.backend.ContainerLogs(ctx, containerName, logsConfig) + if err != nil { + return err + } + + // if has a tty, we're not muxing streams. if it doesn't, we are. simple. + // this is the point of no return for writing a response. once we call + // WriteLogStream, the response has been started and errors will be + // returned in band by WriteLogStream + httputils.WriteLogStream(ctx, w, msgs, logsConfig, !container.Config.Tty) + return nil +} + +func (s *containerRouter) getContainersExport(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + return s.backend.ContainerExport(vars["name"], w) +} + +func (s *containerRouter) postContainersStart(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + // If contentLength is -1, we can assumed chunked encoding + // or more technically that the length is unknown + // https://golang.org/src/pkg/net/http/request.go#L139 + // net/http otherwise seems to swallow any headers related to chunked encoding + // including r.TransferEncoding + // allow a nil body for backwards compatibility + + version := httputils.VersionFromContext(ctx) + var hostConfig *container.HostConfig + // A non-nil json object is at least 7 characters. + if r.ContentLength > 7 || r.ContentLength == -1 { + if versions.GreaterThanOrEqualTo(version, "1.24") { + return validationError{fmt.Errorf("starting container with non-empty request body was deprecated since v1.10 and removed in v1.12")} + } + + if err := httputils.CheckForJSON(r); err != nil { + return err + } + + c, err := s.decoder.DecodeHostConfig(r.Body) + if err != nil { + return err + } + hostConfig = c + } + + if err := httputils.ParseForm(r); err != nil { + return err + } + + checkpoint := r.Form.Get("checkpoint") + checkpointDir := r.Form.Get("checkpoint-dir") + if err := s.backend.ContainerStart(vars["name"], hostConfig, checkpoint, checkpointDir); err != nil { + return err + } + + w.WriteHeader(http.StatusNoContent) + return nil +} + +func (s *containerRouter) postContainersStop(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + var seconds *int + if tmpSeconds := r.Form.Get("t"); tmpSeconds != "" { + valSeconds, err := strconv.Atoi(tmpSeconds) + if err != nil { + return err + } + seconds = &valSeconds + } + + if err := s.backend.ContainerStop(vars["name"], seconds); err != nil { + return err + } + w.WriteHeader(http.StatusNoContent) + + return nil +} + +type errContainerIsRunning interface { + ContainerIsRunning() bool +} + +func (s *containerRouter) postContainersKill(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + var sig syscall.Signal + name := vars["name"] + + // If we have a signal, look at it. Otherwise, do nothing + if sigStr := r.Form.Get("signal"); sigStr != "" { + var err error + if sig, err = signal.ParseSignal(sigStr); err != nil { + return err + } + } + + if err := s.backend.ContainerKill(name, uint64(sig)); err != nil { + var isStopped bool + if e, ok := err.(errContainerIsRunning); ok { + isStopped = !e.ContainerIsRunning() + } + + // Return error that's not caused because the container is stopped. + // Return error if the container is not running and the api is >= 1.20 + // to keep backwards compatibility. + version := httputils.VersionFromContext(ctx) + if versions.GreaterThanOrEqualTo(version, "1.20") || !isStopped { + return fmt.Errorf("Cannot kill container %s: %v", name, err) + } + } + + w.WriteHeader(http.StatusNoContent) + return nil +} + +func (s *containerRouter) postContainersRestart(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + var seconds *int + if tmpSeconds := r.Form.Get("t"); tmpSeconds != "" { + valSeconds, err := strconv.Atoi(tmpSeconds) + if err != nil { + return err + } + seconds = &valSeconds + } + + if err := s.backend.ContainerRestart(vars["name"], seconds); err != nil { + return err + } + + w.WriteHeader(http.StatusNoContent) + + return nil +} + +func (s *containerRouter) postContainersPause(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + if err := s.backend.ContainerPause(vars["name"]); err != nil { + return err + } + + w.WriteHeader(http.StatusNoContent) + + return nil +} + +func (s *containerRouter) postContainersUnpause(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + if err := s.backend.ContainerUnpause(vars["name"]); err != nil { + return err + } + + w.WriteHeader(http.StatusNoContent) + + return nil +} + +func (s *containerRouter) postContainersWait(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + // Behavior changed in version 1.30 to handle wait condition and to + // return headers immediately. + version := httputils.VersionFromContext(ctx) + legacyBehavior := versions.LessThan(version, "1.30") + + // The wait condition defaults to "not-running". + waitCondition := containerpkg.WaitConditionNotRunning + if !legacyBehavior { + if err := httputils.ParseForm(r); err != nil { + return err + } + switch container.WaitCondition(r.Form.Get("condition")) { + case container.WaitConditionNextExit: + waitCondition = containerpkg.WaitConditionNextExit + case container.WaitConditionRemoved: + waitCondition = containerpkg.WaitConditionRemoved + } + } + + // Note: the context should get canceled if the client closes the + // connection since this handler has been wrapped by the + // router.WithCancel() wrapper. + waitC, err := s.backend.ContainerWait(ctx, vars["name"], waitCondition) + if err != nil { + return err + } + + w.Header().Set("Content-Type", "application/json") + + if !legacyBehavior { + // Write response header immediately. + w.WriteHeader(http.StatusOK) + if flusher, ok := w.(http.Flusher); ok { + flusher.Flush() + } + } + + // Block on the result of the wait operation. + status := <-waitC + + return json.NewEncoder(w).Encode(&container.ContainerWaitOKBody{ + StatusCode: int64(status.ExitCode()), + }) +} + +func (s *containerRouter) getContainersChanges(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + changes, err := s.backend.ContainerChanges(vars["name"]) + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusOK, changes) +} + +func (s *containerRouter) getContainersTop(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + procList, err := s.backend.ContainerTop(vars["name"], r.Form.Get("ps_args")) + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusOK, procList) +} + +func (s *containerRouter) postContainerRename(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + name := vars["name"] + newName := r.Form.Get("name") + if err := s.backend.ContainerRename(name, newName); err != nil { + return err + } + w.WriteHeader(http.StatusNoContent) + return nil +} + +func (s *containerRouter) postContainerUpdate(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + if err := httputils.CheckForJSON(r); err != nil { + return err + } + + var updateConfig container.UpdateConfig + + decoder := json.NewDecoder(r.Body) + if err := decoder.Decode(&updateConfig); err != nil { + return err + } + + hostConfig := &container.HostConfig{ + Resources: updateConfig.Resources, + RestartPolicy: updateConfig.RestartPolicy, + } + + name := vars["name"] + resp, err := s.backend.ContainerUpdate(name, hostConfig) + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusOK, resp) +} + +func (s *containerRouter) postContainersCreate(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + if err := httputils.CheckForJSON(r); err != nil { + return err + } + + name := r.Form.Get("name") + + config, hostConfig, networkingConfig, err := s.decoder.DecodeConfig(r.Body) + if err != nil { + return err + } + version := httputils.VersionFromContext(ctx) + adjustCPUShares := versions.LessThan(version, "1.19") + + // When using API 1.24 and under, the client is responsible for removing the container + if hostConfig != nil && versions.LessThan(version, "1.25") { + hostConfig.AutoRemove = false + } + + ccr, err := s.backend.ContainerCreate(types.ContainerCreateConfig{ + Name: name, + Config: config, + HostConfig: hostConfig, + NetworkingConfig: networkingConfig, + AdjustCPUShares: adjustCPUShares, + }) + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusCreated, ccr) +} + +func (s *containerRouter) deleteContainers(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + name := vars["name"] + config := &types.ContainerRmConfig{ + ForceRemove: httputils.BoolValue(r, "force"), + RemoveVolume: httputils.BoolValue(r, "v"), + RemoveLink: httputils.BoolValue(r, "link"), + } + + if err := s.backend.ContainerRm(name, config); err != nil { + return err + } + + w.WriteHeader(http.StatusNoContent) + + return nil +} + +func (s *containerRouter) postContainersResize(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + height, err := strconv.Atoi(r.Form.Get("h")) + if err != nil { + return err + } + width, err := strconv.Atoi(r.Form.Get("w")) + if err != nil { + return err + } + + return s.backend.ContainerResize(vars["name"], height, width) +} + +func (s *containerRouter) postContainersAttach(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + err := httputils.ParseForm(r) + if err != nil { + return err + } + containerName := vars["name"] + + _, upgrade := r.Header["Upgrade"] + detachKeys := r.FormValue("detachKeys") + + hijacker, ok := w.(http.Hijacker) + if !ok { + return fmt.Errorf("error attaching to container %s, hijack connection missing", containerName) + } + + setupStreams := func() (io.ReadCloser, io.Writer, io.Writer, error) { + conn, _, err := hijacker.Hijack() + if err != nil { + return nil, nil, nil, err + } + + // set raw mode + conn.Write([]byte{}) + + if upgrade { + fmt.Fprintf(conn, "HTTP/1.1 101 UPGRADED\r\nContent-Type: application/vnd.docker.raw-stream\r\nConnection: Upgrade\r\nUpgrade: tcp\r\n\r\n") + } else { + fmt.Fprintf(conn, "HTTP/1.1 200 OK\r\nContent-Type: application/vnd.docker.raw-stream\r\n\r\n") + } + + closer := func() error { + httputils.CloseStreams(conn) + return nil + } + return ioutils.NewReadCloserWrapper(conn, closer), conn, conn, nil + } + + attachConfig := &backend.ContainerAttachConfig{ + GetStreams: setupStreams, + UseStdin: httputils.BoolValue(r, "stdin"), + UseStdout: httputils.BoolValue(r, "stdout"), + UseStderr: httputils.BoolValue(r, "stderr"), + Logs: httputils.BoolValue(r, "logs"), + Stream: httputils.BoolValue(r, "stream"), + DetachKeys: detachKeys, + MuxStreams: true, + } + + if err = s.backend.ContainerAttach(containerName, attachConfig); err != nil { + logrus.Errorf("Handler for %s %s returned error: %v", r.Method, r.URL.Path, err) + // Remember to close stream if error happens + conn, _, errHijack := hijacker.Hijack() + if errHijack == nil { + statusCode := httputils.GetHTTPErrorStatusCode(err) + statusText := http.StatusText(statusCode) + fmt.Fprintf(conn, "HTTP/1.1 %d %s\r\nContent-Type: application/vnd.docker.raw-stream\r\n\r\n%s\r\n", statusCode, statusText, err.Error()) + httputils.CloseStreams(conn) + } else { + logrus.Errorf("Error Hijacking: %v", err) + } + } + return nil +} + +func (s *containerRouter) wsContainersAttach(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + containerName := vars["name"] + + var err error + detachKeys := r.FormValue("detachKeys") + + done := make(chan struct{}) + started := make(chan struct{}) + + version := httputils.VersionFromContext(ctx) + + setupStreams := func() (io.ReadCloser, io.Writer, io.Writer, error) { + wsChan := make(chan *websocket.Conn) + h := func(conn *websocket.Conn) { + wsChan <- conn + <-done + } + + srv := websocket.Server{Handler: h, Handshake: nil} + go func() { + close(started) + srv.ServeHTTP(w, r) + }() + + conn := <-wsChan + // In case version 1.28 and above, a binary frame will be sent. + // See 28176 for details. + if versions.GreaterThanOrEqualTo(version, "1.28") { + conn.PayloadType = websocket.BinaryFrame + } + return conn, conn, conn, nil + } + + attachConfig := &backend.ContainerAttachConfig{ + GetStreams: setupStreams, + Logs: httputils.BoolValue(r, "logs"), + Stream: httputils.BoolValue(r, "stream"), + DetachKeys: detachKeys, + UseStdin: true, + UseStdout: true, + UseStderr: true, + MuxStreams: false, // TODO: this should be true since it's a single stream for both stdout and stderr + } + + err = s.backend.ContainerAttach(containerName, attachConfig) + close(done) + select { + case <-started: + logrus.Errorf("Error attaching websocket: %s", err) + return nil + default: + } + return err +} + +func (s *containerRouter) postContainersPrune(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + pruneFilters, err := filters.FromParam(r.Form.Get("filters")) + if err != nil { + return err + } + + pruneReport, err := s.backend.ContainersPrune(ctx, pruneFilters) + if err != nil { + return err + } + return httputils.WriteJSON(w, http.StatusOK, pruneReport) +} diff --git a/vendor/github.com/moby/moby/api/server/router/container/copy.go b/vendor/github.com/moby/moby/api/server/router/container/copy.go new file mode 100644 index 000000000..5cfe8d7ba --- /dev/null +++ b/vendor/github.com/moby/moby/api/server/router/container/copy.go @@ -0,0 +1,118 @@ +package container + +import ( + "encoding/base64" + "encoding/json" + "fmt" + "io" + "net/http" + "os" + "strings" + + "github.com/docker/docker/api/server/httputils" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/versions" + "golang.org/x/net/context" +) + +// postContainersCopy is deprecated in favor of getContainersArchive. +func (s *containerRouter) postContainersCopy(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + // Deprecated since 1.8, Errors out since 1.12 + version := httputils.VersionFromContext(ctx) + if versions.GreaterThanOrEqualTo(version, "1.24") { + w.WriteHeader(http.StatusNotFound) + return nil + } + if err := httputils.CheckForJSON(r); err != nil { + return err + } + + cfg := types.CopyConfig{} + if err := json.NewDecoder(r.Body).Decode(&cfg); err != nil { + return err + } + + if cfg.Resource == "" { + return fmt.Errorf("Path cannot be empty") + } + + data, err := s.backend.ContainerCopy(vars["name"], cfg.Resource) + if err != nil { + if strings.Contains(strings.ToLower(err.Error()), "no such container") { + w.WriteHeader(http.StatusNotFound) + return nil + } + if os.IsNotExist(err) { + return fmt.Errorf("Could not find the file %s in container %s", cfg.Resource, vars["name"]) + } + return err + } + defer data.Close() + + w.Header().Set("Content-Type", "application/x-tar") + _, err = io.Copy(w, data) + return err +} + +// // Encode the stat to JSON, base64 encode, and place in a header. +func setContainerPathStatHeader(stat *types.ContainerPathStat, header http.Header) error { + statJSON, err := json.Marshal(stat) + if err != nil { + return err + } + + header.Set( + "X-Docker-Container-Path-Stat", + base64.StdEncoding.EncodeToString(statJSON), + ) + + return nil +} + +func (s *containerRouter) headContainersArchive(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + v, err := httputils.ArchiveFormValues(r, vars) + if err != nil { + return err + } + + stat, err := s.backend.ContainerStatPath(v.Name, v.Path) + if err != nil { + return err + } + + return setContainerPathStatHeader(stat, w.Header()) +} + +func (s *containerRouter) getContainersArchive(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + v, err := httputils.ArchiveFormValues(r, vars) + if err != nil { + return err + } + + tarArchive, stat, err := s.backend.ContainerArchivePath(v.Name, v.Path) + if err != nil { + return err + } + defer tarArchive.Close() + + if err := setContainerPathStatHeader(stat, w.Header()); err != nil { + return err + } + + w.Header().Set("Content-Type", "application/x-tar") + _, err = io.Copy(w, tarArchive) + + return err +} + +func (s *containerRouter) putContainersArchive(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + v, err := httputils.ArchiveFormValues(r, vars) + if err != nil { + return err + } + + noOverwriteDirNonDir := httputils.BoolValue(r, "noOverwriteDirNonDir") + copyUIDGID := httputils.BoolValue(r, "copyUIDGID") + + return s.backend.ContainerExtractToDir(v.Name, v.Path, copyUIDGID, noOverwriteDirNonDir, r.Body) +} diff --git a/vendor/github.com/moby/moby/api/server/router/container/exec.go b/vendor/github.com/moby/moby/api/server/router/container/exec.go new file mode 100644 index 000000000..1134a0e79 --- /dev/null +++ b/vendor/github.com/moby/moby/api/server/router/container/exec.go @@ -0,0 +1,140 @@ +package container + +import ( + "encoding/json" + "fmt" + "io" + "net/http" + "strconv" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/server/httputils" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/versions" + "github.com/docker/docker/pkg/stdcopy" + "golang.org/x/net/context" +) + +func (s *containerRouter) getExecByID(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + eConfig, err := s.backend.ContainerExecInspect(vars["id"]) + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusOK, eConfig) +} + +func (s *containerRouter) postContainerExecCreate(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + if err := httputils.CheckForJSON(r); err != nil { + return err + } + name := vars["name"] + + execConfig := &types.ExecConfig{} + if err := json.NewDecoder(r.Body).Decode(execConfig); err != nil { + return err + } + + if len(execConfig.Cmd) == 0 { + return fmt.Errorf("No exec command specified") + } + + // Register an instance of Exec in container. + id, err := s.backend.ContainerExecCreate(name, execConfig) + if err != nil { + logrus.Errorf("Error setting up exec command in container %s: %v", name, err) + return err + } + + return httputils.WriteJSON(w, http.StatusCreated, &types.IDResponse{ + ID: id, + }) +} + +// TODO(vishh): Refactor the code to avoid having to specify stream config as part of both create and start. +func (s *containerRouter) postContainerExecStart(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + version := httputils.VersionFromContext(ctx) + if versions.GreaterThan(version, "1.21") { + if err := httputils.CheckForJSON(r); err != nil { + return err + } + } + + var ( + execName = vars["name"] + stdin, inStream io.ReadCloser + stdout, stderr, outStream io.Writer + ) + + execStartCheck := &types.ExecStartCheck{} + if err := json.NewDecoder(r.Body).Decode(execStartCheck); err != nil { + return err + } + + if exists, err := s.backend.ExecExists(execName); !exists { + return err + } + + if !execStartCheck.Detach { + var err error + // Setting up the streaming http interface. + inStream, outStream, err = httputils.HijackConnection(w) + if err != nil { + return err + } + defer httputils.CloseStreams(inStream, outStream) + + if _, ok := r.Header["Upgrade"]; ok { + fmt.Fprint(outStream, "HTTP/1.1 101 UPGRADED\r\nContent-Type: application/vnd.docker.raw-stream\r\nConnection: Upgrade\r\nUpgrade: tcp\r\n") + } else { + fmt.Fprint(outStream, "HTTP/1.1 200 OK\r\nContent-Type: application/vnd.docker.raw-stream\r\n") + } + + // copy headers that were removed as part of hijack + if err := w.Header().WriteSubset(outStream, nil); err != nil { + return err + } + fmt.Fprint(outStream, "\r\n") + + stdin = inStream + stdout = outStream + if !execStartCheck.Tty { + stderr = stdcopy.NewStdWriter(outStream, stdcopy.Stderr) + stdout = stdcopy.NewStdWriter(outStream, stdcopy.Stdout) + } + } + + // Now run the user process in container. + // Maybe we should we pass ctx here if we're not detaching? + if err := s.backend.ContainerExecStart(context.Background(), execName, stdin, stdout, stderr); err != nil { + if execStartCheck.Detach { + return err + } + stdout.Write([]byte(err.Error() + "\r\n")) + logrus.Errorf("Error running exec in container: %v", err) + } + return nil +} + +func (s *containerRouter) postContainerExecResize(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + height, err := strconv.Atoi(r.Form.Get("h")) + if err != nil { + return err + } + width, err := strconv.Atoi(r.Form.Get("w")) + if err != nil { + return err + } + + return s.backend.ContainerExecResize(vars["name"], height, width) +} diff --git a/vendor/github.com/moby/moby/api/server/router/container/inspect.go b/vendor/github.com/moby/moby/api/server/router/container/inspect.go new file mode 100644 index 000000000..dbbced7ee --- /dev/null +++ b/vendor/github.com/moby/moby/api/server/router/container/inspect.go @@ -0,0 +1,21 @@ +package container + +import ( + "net/http" + + "github.com/docker/docker/api/server/httputils" + "golang.org/x/net/context" +) + +// getContainersByName inspects container's configuration and serializes it as json. +func (s *containerRouter) getContainersByName(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + displaySize := httputils.BoolValue(r, "size") + + version := httputils.VersionFromContext(ctx) + json, err := s.backend.ContainerInspect(vars["name"], displaySize, version) + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusOK, json) +} diff --git a/vendor/github.com/moby/moby/api/server/router/debug/debug.go b/vendor/github.com/moby/moby/api/server/router/debug/debug.go new file mode 100644 index 000000000..b66ff3cf3 --- /dev/null +++ b/vendor/github.com/moby/moby/api/server/router/debug/debug.go @@ -0,0 +1,53 @@ +package debug + +import ( + "expvar" + "net/http" + "net/http/pprof" + + "github.com/docker/docker/api/server/httputils" + "github.com/docker/docker/api/server/router" + "golang.org/x/net/context" +) + +// NewRouter creates a new debug router +// The debug router holds endpoints for debug the daemon, such as those for pprof. +func NewRouter() router.Router { + r := &debugRouter{} + r.initRoutes() + return r +} + +type debugRouter struct { + routes []router.Route +} + +func (r *debugRouter) initRoutes() { + r.routes = []router.Route{ + router.NewGetRoute("/vars", frameworkAdaptHandler(expvar.Handler())), + router.NewGetRoute("/pprof/", frameworkAdaptHandlerFunc(pprof.Index)), + router.NewGetRoute("/pprof/cmdline", frameworkAdaptHandlerFunc(pprof.Cmdline)), + router.NewGetRoute("/pprof/profile", frameworkAdaptHandlerFunc(pprof.Profile)), + router.NewGetRoute("/pprof/symbol", frameworkAdaptHandlerFunc(pprof.Symbol)), + router.NewGetRoute("/pprof/trace", frameworkAdaptHandlerFunc(pprof.Trace)), + router.NewGetRoute("/pprof/{name}", handlePprof), + } +} + +func (r *debugRouter) Routes() []router.Route { + return r.routes +} + +func frameworkAdaptHandler(handler http.Handler) httputils.APIFunc { + return func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + handler.ServeHTTP(w, r) + return nil + } +} + +func frameworkAdaptHandlerFunc(handler http.HandlerFunc) httputils.APIFunc { + return func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + handler(w, r) + return nil + } +} diff --git a/vendor/github.com/moby/moby/api/server/router/debug/debug_routes.go b/vendor/github.com/moby/moby/api/server/router/debug/debug_routes.go new file mode 100644 index 000000000..f2a72615a --- /dev/null +++ b/vendor/github.com/moby/moby/api/server/router/debug/debug_routes.go @@ -0,0 +1,13 @@ +package debug + +import ( + "net/http" + "net/http/pprof" + + "golang.org/x/net/context" +) + +func handlePprof(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + pprof.Handler(vars["name"]).ServeHTTP(w, r) + return nil +} diff --git a/vendor/github.com/moby/moby/api/server/router/distribution/backend.go b/vendor/github.com/moby/moby/api/server/router/distribution/backend.go new file mode 100644 index 000000000..fc3a80e59 --- /dev/null +++ b/vendor/github.com/moby/moby/api/server/router/distribution/backend.go @@ -0,0 +1,14 @@ +package distribution + +import ( + "github.com/docker/distribution" + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// Backend is all the methods that need to be implemented +// to provide image specific functionality. +type Backend interface { + GetRepository(context.Context, reference.Named, *types.AuthConfig) (distribution.Repository, bool, error) +} diff --git a/vendor/github.com/moby/moby/api/server/router/distribution/distribution.go b/vendor/github.com/moby/moby/api/server/router/distribution/distribution.go new file mode 100644 index 000000000..c1fb7bc1e --- /dev/null +++ b/vendor/github.com/moby/moby/api/server/router/distribution/distribution.go @@ -0,0 +1,31 @@ +package distribution + +import "github.com/docker/docker/api/server/router" + +// distributionRouter is a router to talk with the registry +type distributionRouter struct { + backend Backend + routes []router.Route +} + +// NewRouter initializes a new distribution router +func NewRouter(backend Backend) router.Router { + r := &distributionRouter{ + backend: backend, + } + r.initRoutes() + return r +} + +// Routes returns the available routes +func (r *distributionRouter) Routes() []router.Route { + return r.routes +} + +// initRoutes initializes the routes in the distribution router +func (r *distributionRouter) initRoutes() { + r.routes = []router.Route{ + // GET + router.NewGetRoute("/distribution/{name:.*}/json", r.getDistributionInfo), + } +} diff --git a/vendor/github.com/moby/moby/api/server/router/distribution/distribution_routes.go b/vendor/github.com/moby/moby/api/server/router/distribution/distribution_routes.go new file mode 100644 index 000000000..cc9e66a16 --- /dev/null +++ b/vendor/github.com/moby/moby/api/server/router/distribution/distribution_routes.go @@ -0,0 +1,138 @@ +package distribution + +import ( + "encoding/base64" + "encoding/json" + "net/http" + "strings" + + "github.com/docker/distribution/manifest/manifestlist" + "github.com/docker/distribution/manifest/schema1" + "github.com/docker/distribution/manifest/schema2" + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/server/httputils" + "github.com/docker/docker/api/types" + registrytypes "github.com/docker/docker/api/types/registry" + "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" + "golang.org/x/net/context" +) + +func (s *distributionRouter) getDistributionInfo(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + w.Header().Set("Content-Type", "application/json") + + var ( + config = &types.AuthConfig{} + authEncoded = r.Header.Get("X-Registry-Auth") + distributionInspect registrytypes.DistributionInspect + ) + + if authEncoded != "" { + authJSON := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded)) + if err := json.NewDecoder(authJSON).Decode(&config); err != nil { + // for a search it is not an error if no auth was given + // to increase compatibility with the existing api it is defaulting to be empty + config = &types.AuthConfig{} + } + } + + image := vars["name"] + + ref, err := reference.ParseAnyReference(image) + if err != nil { + return err + } + namedRef, ok := ref.(reference.Named) + if !ok { + if _, ok := ref.(reference.Digested); ok { + // full image ID + return errors.Errorf("no manifest found for full image ID") + } + return errors.Errorf("unknown image reference format: %s", image) + } + + distrepo, _, err := s.backend.GetRepository(ctx, namedRef, config) + if err != nil { + return err + } + blobsrvc := distrepo.Blobs(ctx) + + if canonicalRef, ok := namedRef.(reference.Canonical); !ok { + namedRef = reference.TagNameOnly(namedRef) + + taggedRef, ok := namedRef.(reference.NamedTagged) + if !ok { + return errors.Errorf("image reference not tagged: %s", image) + } + + descriptor, err := distrepo.Tags(ctx).Get(ctx, taggedRef.Tag()) + if err != nil { + return err + } + distributionInspect.Descriptor = v1.Descriptor{ + MediaType: descriptor.MediaType, + Digest: descriptor.Digest, + Size: descriptor.Size, + } + } else { + // TODO(nishanttotla): Once manifests can be looked up as a blob, the + // descriptor should be set using blobsrvc.Stat(ctx, canonicalRef.Digest()) + // instead of having to manually fill in the fields + distributionInspect.Descriptor.Digest = canonicalRef.Digest() + } + + // we have a digest, so we can retrieve the manifest + mnfstsrvc, err := distrepo.Manifests(ctx) + if err != nil { + return err + } + mnfst, err := mnfstsrvc.Get(ctx, distributionInspect.Descriptor.Digest) + if err != nil { + return err + } + + mediaType, payload, err := mnfst.Payload() + if err != nil { + return err + } + // update MediaType because registry might return something incorrect + distributionInspect.Descriptor.MediaType = mediaType + if distributionInspect.Descriptor.Size == 0 { + distributionInspect.Descriptor.Size = int64(len(payload)) + } + + // retrieve platform information depending on the type of manifest + switch mnfstObj := mnfst.(type) { + case *manifestlist.DeserializedManifestList: + for _, m := range mnfstObj.Manifests { + distributionInspect.Platforms = append(distributionInspect.Platforms, v1.Platform{ + Architecture: m.Platform.Architecture, + OS: m.Platform.OS, + OSVersion: m.Platform.OSVersion, + OSFeatures: m.Platform.OSFeatures, + Variant: m.Platform.Variant, + }) + } + case *schema2.DeserializedManifest: + configJSON, err := blobsrvc.Get(ctx, mnfstObj.Config.Digest) + var platform v1.Platform + if err == nil { + err := json.Unmarshal(configJSON, &platform) + if err == nil && (platform.OS != "" || platform.Architecture != "") { + distributionInspect.Platforms = append(distributionInspect.Platforms, platform) + } + } + case *schema1.SignedManifest: + platform := v1.Platform{ + Architecture: mnfstObj.Architecture, + OS: "linux", + } + distributionInspect.Platforms = append(distributionInspect.Platforms, platform) + } + + return httputils.WriteJSON(w, http.StatusOK, distributionInspect) +} diff --git a/vendor/github.com/moby/moby/api/server/router/experimental.go b/vendor/github.com/moby/moby/api/server/router/experimental.go new file mode 100644 index 000000000..ac31f0487 --- /dev/null +++ b/vendor/github.com/moby/moby/api/server/router/experimental.go @@ -0,0 +1,67 @@ +package router + +import ( + "errors" + "net/http" + + "golang.org/x/net/context" + + apierrors "github.com/docker/docker/api/errors" + "github.com/docker/docker/api/server/httputils" +) + +var ( + errExperimentalFeature = errors.New("This experimental feature is disabled by default. Start the Docker daemon in experimental mode in order to enable it.") +) + +// ExperimentalRoute defines an experimental API route that can be enabled or disabled. +type ExperimentalRoute interface { + Route + + Enable() + Disable() +} + +// experimentalRoute defines an experimental API route that can be enabled or disabled. +// It implements ExperimentalRoute +type experimentalRoute struct { + local Route + handler httputils.APIFunc +} + +// Enable enables this experimental route +func (r *experimentalRoute) Enable() { + r.handler = r.local.Handler() +} + +// Disable disables the experimental route +func (r *experimentalRoute) Disable() { + r.handler = experimentalHandler +} + +func experimentalHandler(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + return apierrors.NewErrorWithStatusCode(errExperimentalFeature, http.StatusNotImplemented) +} + +// Handler returns returns the APIFunc to let the server wrap it in middlewares. +func (r *experimentalRoute) Handler() httputils.APIFunc { + return r.handler +} + +// Method returns the http method that the route responds to. +func (r *experimentalRoute) Method() string { + return r.local.Method() +} + +// Path returns the subpath where the route responds to. +func (r *experimentalRoute) Path() string { + return r.local.Path() +} + +// Experimental will mark a route as experimental. +func Experimental(r Route) Route { + return &experimentalRoute{ + local: r, + handler: experimentalHandler, + } +} diff --git a/vendor/github.com/moby/moby/api/server/router/image/backend.go b/vendor/github.com/moby/moby/api/server/router/image/backend.go new file mode 100644 index 000000000..9a588a71a --- /dev/null +++ b/vendor/github.com/moby/moby/api/server/router/image/backend.go @@ -0,0 +1,46 @@ +package image + +import ( + "io" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/image" + "github.com/docker/docker/api/types/registry" + "golang.org/x/net/context" +) + +// Backend is all the methods that need to be implemented +// to provide image specific functionality. +type Backend interface { + containerBackend + imageBackend + importExportBackend + registryBackend +} + +type containerBackend interface { + Commit(name string, config *backend.ContainerCommitConfig) (imageID string, err error) +} + +type imageBackend interface { + ImageDelete(imageRef string, force, prune bool) ([]types.ImageDeleteResponseItem, error) + ImageHistory(imageName string) ([]*image.HistoryResponseItem, error) + Images(imageFilters filters.Args, all bool, withExtraAttrs bool) ([]*types.ImageSummary, error) + LookupImage(name string) (*types.ImageInspect, error) + TagImage(imageName, repository, tag string) error + ImagesPrune(ctx context.Context, pruneFilters filters.Args) (*types.ImagesPruneReport, error) +} + +type importExportBackend interface { + LoadImage(inTar io.ReadCloser, outStream io.Writer, quiet bool) error + ImportImage(src string, repository, platform string, tag string, msg string, inConfig io.ReadCloser, outStream io.Writer, changes []string) error + ExportImage(names []string, outStream io.Writer) error +} + +type registryBackend interface { + PullImage(ctx context.Context, image, tag, platform string, metaHeaders map[string][]string, authConfig *types.AuthConfig, outStream io.Writer) error + PushImage(ctx context.Context, image, tag string, metaHeaders map[string][]string, authConfig *types.AuthConfig, outStream io.Writer) error + SearchRegistryForImages(ctx context.Context, filtersArgs string, term string, limit int, authConfig *types.AuthConfig, metaHeaders map[string][]string) (*registry.SearchResults, error) +} diff --git a/vendor/github.com/moby/moby/api/server/router/image/image.go b/vendor/github.com/moby/moby/api/server/router/image/image.go new file mode 100644 index 000000000..6c233d900 --- /dev/null +++ b/vendor/github.com/moby/moby/api/server/router/image/image.go @@ -0,0 +1,50 @@ +package image + +import ( + "github.com/docker/docker/api/server/httputils" + "github.com/docker/docker/api/server/router" +) + +// imageRouter is a router to talk with the image controller +type imageRouter struct { + backend Backend + decoder httputils.ContainerDecoder + routes []router.Route +} + +// NewRouter initializes a new image router +func NewRouter(backend Backend, decoder httputils.ContainerDecoder) router.Router { + r := &imageRouter{ + backend: backend, + decoder: decoder, + } + r.initRoutes() + return r +} + +// Routes returns the available routes to the image controller +func (r *imageRouter) Routes() []router.Route { + return r.routes +} + +// initRoutes initializes the routes in the image router +func (r *imageRouter) initRoutes() { + r.routes = []router.Route{ + // GET + router.NewGetRoute("/images/json", r.getImagesJSON), + router.NewGetRoute("/images/search", r.getImagesSearch), + router.NewGetRoute("/images/get", r.getImagesGet), + router.NewGetRoute("/images/{name:.*}/get", r.getImagesGet), + router.NewGetRoute("/images/{name:.*}/history", r.getImagesHistory), + router.NewGetRoute("/images/{name:.*}/json", r.getImagesByName), + // POST + router.NewPostRoute("/commit", r.postCommit), + router.NewPostRoute("/images/load", r.postImagesLoad), + router.NewPostRoute("/images/create", r.postImagesCreate, router.WithCancel), + router.NewPostRoute("/images/{name:.*}/push", r.postImagesPush, router.WithCancel), + router.NewPostRoute("/images/{name:.*}/tag", r.postImagesTag), + router.NewPostRoute("/images/prune", r.postImagesPrune, router.WithCancel), + // DELETE + router.NewDeleteRoute("/images/{name:.*}", r.deleteImages), + } +} diff --git a/vendor/github.com/moby/moby/api/server/router/image/image_routes.go b/vendor/github.com/moby/moby/api/server/router/image/image_routes.go new file mode 100644 index 000000000..9b99a585f --- /dev/null +++ b/vendor/github.com/moby/moby/api/server/router/image/image_routes.go @@ -0,0 +1,378 @@ +package image + +import ( + "encoding/base64" + "encoding/json" + "fmt" + "io" + "net/http" + "runtime" + "strconv" + "strings" + + "github.com/docker/docker/api/server/httputils" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/versions" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/streamformatter" + "github.com/docker/docker/pkg/system" + "github.com/docker/docker/registry" + "golang.org/x/net/context" +) + +func (s *imageRouter) postCommit(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + if err := httputils.CheckForJSON(r); err != nil { + return err + } + + cname := r.Form.Get("container") + + pause := httputils.BoolValue(r, "pause") + version := httputils.VersionFromContext(ctx) + if r.FormValue("pause") == "" && versions.GreaterThanOrEqualTo(version, "1.13") { + pause = true + } + + c, _, _, err := s.decoder.DecodeConfig(r.Body) + if err != nil && err != io.EOF { //Do not fail if body is empty. + return err + } + if c == nil { + c = &container.Config{} + } + + commitCfg := &backend.ContainerCommitConfig{ + ContainerCommitConfig: types.ContainerCommitConfig{ + Pause: pause, + Repo: r.Form.Get("repo"), + Tag: r.Form.Get("tag"), + Author: r.Form.Get("author"), + Comment: r.Form.Get("comment"), + Config: c, + MergeConfigs: true, + }, + Changes: r.Form["changes"], + } + + imgID, err := s.backend.Commit(cname, commitCfg) + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusCreated, &types.IDResponse{ + ID: string(imgID), + }) +} + +// Creates an image from Pull or from Import +func (s *imageRouter) postImagesCreate(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + var ( + image = r.Form.Get("fromImage") + repo = r.Form.Get("repo") + tag = r.Form.Get("tag") + message = r.Form.Get("message") + err error + output = ioutils.NewWriteFlusher(w) + ) + defer output.Close() + + // TODO @jhowardmsft LCOW Support: Eventually we will need an API change + // so that platform comes from (for example) r.Form.Get("platform"). For + // the initial implementation, we assume that the platform is the + // runtime OS of the host. It will also need a validation function such + // as below which should be called after getting it from the API. + // + // Ensures the requested platform is valid and normalized + //func validatePlatform(req string) (string, error) { + // req = strings.ToLower(req) + // if req == "" { + // req = runtime.GOOS // default to host platform + // } + // valid := []string{runtime.GOOS} + // + // if system.LCOWSupported() { + // valid = append(valid, "linux") + // } + // + // for _, item := range valid { + // if req == item { + // return req, nil + // } + // } + // return "", fmt.Errorf("invalid platform requested: %s", req) + //} + // + // And in the call-site: + // if platform, err = validatePlatform(platform); err != nil { + // return err + // } + platform := runtime.GOOS + if system.LCOWSupported() { + platform = "linux" + } + + w.Header().Set("Content-Type", "application/json") + + if image != "" { //pull + metaHeaders := map[string][]string{} + for k, v := range r.Header { + if strings.HasPrefix(k, "X-Meta-") { + metaHeaders[k] = v + } + } + + authEncoded := r.Header.Get("X-Registry-Auth") + authConfig := &types.AuthConfig{} + if authEncoded != "" { + authJSON := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded)) + if err := json.NewDecoder(authJSON).Decode(authConfig); err != nil { + // for a pull it is not an error if no auth was given + // to increase compatibility with the existing api it is defaulting to be empty + authConfig = &types.AuthConfig{} + } + } + + err = s.backend.PullImage(ctx, image, tag, platform, metaHeaders, authConfig, output) + } else { //import + src := r.Form.Get("fromSrc") + // 'err' MUST NOT be defined within this block, we need any error + // generated from the download to be available to the output + // stream processing below + err = s.backend.ImportImage(src, repo, platform, tag, message, r.Body, output, r.Form["changes"]) + } + if err != nil { + if !output.Flushed() { + return err + } + output.Write(streamformatter.FormatError(err)) + } + + return nil +} + +func (s *imageRouter) postImagesPush(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + metaHeaders := map[string][]string{} + for k, v := range r.Header { + if strings.HasPrefix(k, "X-Meta-") { + metaHeaders[k] = v + } + } + if err := httputils.ParseForm(r); err != nil { + return err + } + authConfig := &types.AuthConfig{} + + authEncoded := r.Header.Get("X-Registry-Auth") + if authEncoded != "" { + // the new format is to handle the authConfig as a header + authJSON := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded)) + if err := json.NewDecoder(authJSON).Decode(authConfig); err != nil { + // to increase compatibility to existing api it is defaulting to be empty + authConfig = &types.AuthConfig{} + } + } else { + // the old format is supported for compatibility if there was no authConfig header + if err := json.NewDecoder(r.Body).Decode(authConfig); err != nil { + return fmt.Errorf("Bad parameters and missing X-Registry-Auth: %v", err) + } + } + + image := vars["name"] + tag := r.Form.Get("tag") + + output := ioutils.NewWriteFlusher(w) + defer output.Close() + + w.Header().Set("Content-Type", "application/json") + + if err := s.backend.PushImage(ctx, image, tag, metaHeaders, authConfig, output); err != nil { + if !output.Flushed() { + return err + } + output.Write(streamformatter.FormatError(err)) + } + return nil +} + +func (s *imageRouter) getImagesGet(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + w.Header().Set("Content-Type", "application/x-tar") + + output := ioutils.NewWriteFlusher(w) + defer output.Close() + var names []string + if name, ok := vars["name"]; ok { + names = []string{name} + } else { + names = r.Form["names"] + } + + if err := s.backend.ExportImage(names, output); err != nil { + if !output.Flushed() { + return err + } + output.Write(streamformatter.FormatError(err)) + } + return nil +} + +func (s *imageRouter) postImagesLoad(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + quiet := httputils.BoolValueOrDefault(r, "quiet", true) + + w.Header().Set("Content-Type", "application/json") + + output := ioutils.NewWriteFlusher(w) + defer output.Close() + if err := s.backend.LoadImage(r.Body, output, quiet); err != nil { + output.Write(streamformatter.FormatError(err)) + } + return nil +} + +func (s *imageRouter) deleteImages(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + name := vars["name"] + + if strings.TrimSpace(name) == "" { + return fmt.Errorf("image name cannot be blank") + } + + force := httputils.BoolValue(r, "force") + prune := !httputils.BoolValue(r, "noprune") + + list, err := s.backend.ImageDelete(name, force, prune) + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusOK, list) +} + +func (s *imageRouter) getImagesByName(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + imageInspect, err := s.backend.LookupImage(vars["name"]) + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusOK, imageInspect) +} + +func (s *imageRouter) getImagesJSON(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + imageFilters, err := filters.FromParam(r.Form.Get("filters")) + if err != nil { + return err + } + + filterParam := r.Form.Get("filter") + // FIXME(vdemeester) This has been deprecated in 1.13, and is target for removal for v17.12 + if filterParam != "" { + imageFilters.Add("reference", filterParam) + } + + images, err := s.backend.Images(imageFilters, httputils.BoolValue(r, "all"), false) + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusOK, images) +} + +func (s *imageRouter) getImagesHistory(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + name := vars["name"] + history, err := s.backend.ImageHistory(name) + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusOK, history) +} + +func (s *imageRouter) postImagesTag(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + if err := s.backend.TagImage(vars["name"], r.Form.Get("repo"), r.Form.Get("tag")); err != nil { + return err + } + w.WriteHeader(http.StatusCreated) + return nil +} + +func (s *imageRouter) getImagesSearch(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + var ( + config *types.AuthConfig + authEncoded = r.Header.Get("X-Registry-Auth") + headers = map[string][]string{} + ) + + if authEncoded != "" { + authJSON := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded)) + if err := json.NewDecoder(authJSON).Decode(&config); err != nil { + // for a search it is not an error if no auth was given + // to increase compatibility with the existing api it is defaulting to be empty + config = &types.AuthConfig{} + } + } + for k, v := range r.Header { + if strings.HasPrefix(k, "X-Meta-") { + headers[k] = v + } + } + limit := registry.DefaultSearchLimit + if r.Form.Get("limit") != "" { + limitValue, err := strconv.Atoi(r.Form.Get("limit")) + if err != nil { + return err + } + limit = limitValue + } + query, err := s.backend.SearchRegistryForImages(ctx, r.Form.Get("filters"), r.Form.Get("term"), limit, config, headers) + if err != nil { + return err + } + return httputils.WriteJSON(w, http.StatusOK, query.Results) +} + +func (s *imageRouter) postImagesPrune(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + pruneFilters, err := filters.FromParam(r.Form.Get("filters")) + if err != nil { + return err + } + + pruneReport, err := s.backend.ImagesPrune(ctx, pruneFilters) + if err != nil { + return err + } + return httputils.WriteJSON(w, http.StatusOK, pruneReport) +} diff --git a/vendor/github.com/moby/moby/api/server/router/local.go b/vendor/github.com/moby/moby/api/server/router/local.go new file mode 100644 index 000000000..ba70f3413 --- /dev/null +++ b/vendor/github.com/moby/moby/api/server/router/local.go @@ -0,0 +1,104 @@ +package router + +import ( + "net/http" + + "github.com/docker/docker/api/server/httputils" + "golang.org/x/net/context" +) + +// RouteWrapper wraps a route with extra functionality. +// It is passed in when creating a new route. +type RouteWrapper func(r Route) Route + +// localRoute defines an individual API route to connect +// with the docker daemon. It implements Route. +type localRoute struct { + method string + path string + handler httputils.APIFunc +} + +// Handler returns the APIFunc to let the server wrap it in middlewares. +func (l localRoute) Handler() httputils.APIFunc { + return l.handler +} + +// Method returns the http method that the route responds to. +func (l localRoute) Method() string { + return l.method +} + +// Path returns the subpath where the route responds to. +func (l localRoute) Path() string { + return l.path +} + +// NewRoute initializes a new local route for the router. +func NewRoute(method, path string, handler httputils.APIFunc, opts ...RouteWrapper) Route { + var r Route = localRoute{method, path, handler} + for _, o := range opts { + r = o(r) + } + return r +} + +// NewGetRoute initializes a new route with the http method GET. +func NewGetRoute(path string, handler httputils.APIFunc, opts ...RouteWrapper) Route { + return NewRoute("GET", path, handler, opts...) +} + +// NewPostRoute initializes a new route with the http method POST. +func NewPostRoute(path string, handler httputils.APIFunc, opts ...RouteWrapper) Route { + return NewRoute("POST", path, handler, opts...) +} + +// NewPutRoute initializes a new route with the http method PUT. +func NewPutRoute(path string, handler httputils.APIFunc, opts ...RouteWrapper) Route { + return NewRoute("PUT", path, handler, opts...) +} + +// NewDeleteRoute initializes a new route with the http method DELETE. +func NewDeleteRoute(path string, handler httputils.APIFunc, opts ...RouteWrapper) Route { + return NewRoute("DELETE", path, handler, opts...) +} + +// NewOptionsRoute initializes a new route with the http method OPTIONS. +func NewOptionsRoute(path string, handler httputils.APIFunc, opts ...RouteWrapper) Route { + return NewRoute("OPTIONS", path, handler, opts...) +} + +// NewHeadRoute initializes a new route with the http method HEAD. +func NewHeadRoute(path string, handler httputils.APIFunc, opts ...RouteWrapper) Route { + return NewRoute("HEAD", path, handler, opts...) +} + +func cancellableHandler(h httputils.APIFunc) httputils.APIFunc { + return func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if notifier, ok := w.(http.CloseNotifier); ok { + notify := notifier.CloseNotify() + notifyCtx, cancel := context.WithCancel(ctx) + finished := make(chan struct{}) + defer close(finished) + ctx = notifyCtx + go func() { + select { + case <-notify: + cancel() + case <-finished: + } + }() + } + return h(ctx, w, r, vars) + } +} + +// WithCancel makes new route which embeds http.CloseNotifier feature to +// context.Context of handler. +func WithCancel(r Route) Route { + return localRoute{ + method: r.Method(), + path: r.Path(), + handler: cancellableHandler(r.Handler()), + } +} diff --git a/vendor/github.com/moby/moby/api/server/router/network/backend.go b/vendor/github.com/moby/moby/api/server/router/network/backend.go new file mode 100644 index 000000000..a32a0b9c0 --- /dev/null +++ b/vendor/github.com/moby/moby/api/server/router/network/backend.go @@ -0,0 +1,22 @@ +package network + +import ( + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/network" + "github.com/docker/libnetwork" +) + +// Backend is all the methods that need to be implemented +// to provide network specific functionality. +type Backend interface { + FindNetwork(idName string) (libnetwork.Network, error) + GetNetworks() []libnetwork.Network + CreateNetwork(nc types.NetworkCreateRequest) (*types.NetworkCreateResponse, error) + ConnectContainerToNetwork(containerName, networkName string, endpointConfig *network.EndpointSettings) error + DisconnectContainerFromNetwork(containerName string, networkName string, force bool) error + DeleteNetwork(name string) error + NetworksPrune(ctx context.Context, pruneFilters filters.Args) (*types.NetworksPruneReport, error) +} diff --git a/vendor/github.com/moby/moby/api/server/router/network/filter.go b/vendor/github.com/moby/moby/api/server/router/network/filter.go new file mode 100644 index 000000000..afe4e235e --- /dev/null +++ b/vendor/github.com/moby/moby/api/server/router/network/filter.go @@ -0,0 +1,87 @@ +package network + +import ( + "fmt" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/runconfig" +) + +func filterNetworkByType(nws []types.NetworkResource, netType string) ([]types.NetworkResource, error) { + retNws := []types.NetworkResource{} + switch netType { + case "builtin": + for _, nw := range nws { + if runconfig.IsPreDefinedNetwork(nw.Name) { + retNws = append(retNws, nw) + } + } + case "custom": + for _, nw := range nws { + if !runconfig.IsPreDefinedNetwork(nw.Name) { + retNws = append(retNws, nw) + } + } + default: + return nil, fmt.Errorf("Invalid filter: 'type'='%s'", netType) + } + return retNws, nil +} + +// filterNetworks filters network list according to user specified filter +// and returns user chosen networks +func filterNetworks(nws []types.NetworkResource, filter filters.Args) ([]types.NetworkResource, error) { + // if filter is empty, return original network list + if filter.Len() == 0 { + return nws, nil + } + + displayNet := []types.NetworkResource{} + for _, nw := range nws { + if filter.Include("driver") { + if !filter.ExactMatch("driver", nw.Driver) { + continue + } + } + if filter.Include("name") { + if !filter.Match("name", nw.Name) { + continue + } + } + if filter.Include("id") { + if !filter.Match("id", nw.ID) { + continue + } + } + if filter.Include("label") { + if !filter.MatchKVList("label", nw.Labels) { + continue + } + } + if filter.Include("scope") { + if !filter.ExactMatch("scope", nw.Scope) { + continue + } + } + displayNet = append(displayNet, nw) + } + + if filter.Include("type") { + typeNet := []types.NetworkResource{} + errFilter := filter.WalkValues("type", func(fval string) error { + passList, err := filterNetworkByType(displayNet, fval) + if err != nil { + return err + } + typeNet = append(typeNet, passList...) + return nil + }) + if errFilter != nil { + return nil, errFilter + } + displayNet = typeNet + } + + return displayNet, nil +} diff --git a/vendor/github.com/moby/moby/api/server/router/network/filter_test.go b/vendor/github.com/moby/moby/api/server/router/network/filter_test.go new file mode 100644 index 000000000..af3e94954 --- /dev/null +++ b/vendor/github.com/moby/moby/api/server/router/network/filter_test.go @@ -0,0 +1,149 @@ +// +build !windows + +package network + +import ( + "strings" + "testing" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" +) + +func TestFilterNetworks(t *testing.T) { + networks := []types.NetworkResource{ + { + Name: "host", + Driver: "host", + Scope: "local", + }, + { + Name: "bridge", + Driver: "bridge", + Scope: "local", + }, + { + Name: "none", + Driver: "null", + Scope: "local", + }, + { + Name: "myoverlay", + Driver: "overlay", + Scope: "swarm", + }, + { + Name: "mydrivernet", + Driver: "mydriver", + Scope: "local", + }, + { + Name: "mykvnet", + Driver: "mykvdriver", + Scope: "global", + }, + } + + bridgeDriverFilters := filters.NewArgs() + bridgeDriverFilters.Add("driver", "bridge") + + overlayDriverFilters := filters.NewArgs() + overlayDriverFilters.Add("driver", "overlay") + + nonameDriverFilters := filters.NewArgs() + nonameDriverFilters.Add("driver", "noname") + + customDriverFilters := filters.NewArgs() + customDriverFilters.Add("type", "custom") + + builtinDriverFilters := filters.NewArgs() + builtinDriverFilters.Add("type", "builtin") + + invalidDriverFilters := filters.NewArgs() + invalidDriverFilters.Add("type", "invalid") + + localScopeFilters := filters.NewArgs() + localScopeFilters.Add("scope", "local") + + swarmScopeFilters := filters.NewArgs() + swarmScopeFilters.Add("scope", "swarm") + + globalScopeFilters := filters.NewArgs() + globalScopeFilters.Add("scope", "global") + + testCases := []struct { + filter filters.Args + resultCount int + err string + }{ + { + filter: bridgeDriverFilters, + resultCount: 1, + err: "", + }, + { + filter: overlayDriverFilters, + resultCount: 1, + err: "", + }, + { + filter: nonameDriverFilters, + resultCount: 0, + err: "", + }, + { + filter: customDriverFilters, + resultCount: 3, + err: "", + }, + { + filter: builtinDriverFilters, + resultCount: 3, + err: "", + }, + { + filter: invalidDriverFilters, + resultCount: 0, + err: "Invalid filter: 'type'='invalid'", + }, + { + filter: localScopeFilters, + resultCount: 4, + err: "", + }, + { + filter: swarmScopeFilters, + resultCount: 1, + err: "", + }, + { + filter: globalScopeFilters, + resultCount: 1, + err: "", + }, + } + + for _, testCase := range testCases { + result, err := filterNetworks(networks, testCase.filter) + if testCase.err != "" { + if err == nil { + t.Fatalf("expect error '%s', got no error", testCase.err) + + } else if !strings.Contains(err.Error(), testCase.err) { + t.Fatalf("expect error '%s', got '%s'", testCase.err, err) + } + } else { + if err != nil { + t.Fatalf("expect no error, got error '%s'", err) + } + // Make sure result is not nil + if result == nil { + t.Fatal("filterNetworks should not return nil") + } + + if len(result) != testCase.resultCount { + t.Fatalf("expect '%d' networks, got '%d' networks", testCase.resultCount, len(result)) + } + } + } +} diff --git a/vendor/github.com/moby/moby/api/server/router/network/network.go b/vendor/github.com/moby/moby/api/server/router/network/network.go new file mode 100644 index 000000000..eaf52aa2a --- /dev/null +++ b/vendor/github.com/moby/moby/api/server/router/network/network.go @@ -0,0 +1,44 @@ +package network + +import ( + "github.com/docker/docker/api/server/router" + "github.com/docker/docker/daemon/cluster" +) + +// networkRouter is a router to talk with the network controller +type networkRouter struct { + backend Backend + cluster *cluster.Cluster + routes []router.Route +} + +// NewRouter initializes a new network router +func NewRouter(b Backend, c *cluster.Cluster) router.Router { + r := &networkRouter{ + backend: b, + cluster: c, + } + r.initRoutes() + return r +} + +// Routes returns the available routes to the network controller +func (r *networkRouter) Routes() []router.Route { + return r.routes +} + +func (r *networkRouter) initRoutes() { + r.routes = []router.Route{ + // GET + router.NewGetRoute("/networks", r.getNetworksList), + router.NewGetRoute("/networks/", r.getNetworksList), + router.NewGetRoute("/networks/{id:.+}", r.getNetwork), + // POST + router.NewPostRoute("/networks/create", r.postNetworkCreate), + router.NewPostRoute("/networks/{id:.*}/connect", r.postNetworkConnect), + router.NewPostRoute("/networks/{id:.*}/disconnect", r.postNetworkDisconnect), + router.NewPostRoute("/networks/prune", r.postNetworksPrune, router.WithCancel), + // DELETE + router.NewDeleteRoute("/networks/{id:.*}", r.deleteNetwork), + } +} diff --git a/vendor/github.com/moby/moby/api/server/router/network/network_routes.go b/vendor/github.com/moby/moby/api/server/router/network/network_routes.go new file mode 100644 index 000000000..6f2041e35 --- /dev/null +++ b/vendor/github.com/moby/moby/api/server/router/network/network_routes.go @@ -0,0 +1,472 @@ +package network + +import ( + "encoding/json" + "fmt" + "net/http" + "strconv" + "strings" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/errors" + "github.com/docker/docker/api/server/httputils" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/network" + "github.com/docker/docker/api/types/versions" + "github.com/docker/libnetwork" + "github.com/docker/libnetwork/networkdb" +) + +var ( + // acceptedNetworkFilters is a list of acceptable filters + acceptedNetworkFilters = map[string]bool{ + "driver": true, + "type": true, + "name": true, + "id": true, + "label": true, + "scope": true, + } +) + +func (n *networkRouter) getNetworksList(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + filter := r.Form.Get("filters") + netFilters, err := filters.FromParam(filter) + if err != nil { + return err + } + + if err := netFilters.Validate(acceptedNetworkFilters); err != nil { + return err + } + + list := []types.NetworkResource{} + + if nr, err := n.cluster.GetNetworks(); err == nil { + list = append(list, nr...) + } + + // Combine the network list returned by Docker daemon if it is not already + // returned by the cluster manager +SKIP: + for _, nw := range n.backend.GetNetworks() { + for _, nl := range list { + if nl.ID == nw.ID() { + continue SKIP + } + } + + var nr *types.NetworkResource + // Versions < 1.28 fetches all the containers attached to a network + // in a network list api call. It is a heavy weight operation when + // run across all the networks. Starting API version 1.28, this detailed + // info is available for network specific GET API (equivalent to inspect) + if versions.LessThan(httputils.VersionFromContext(ctx), "1.28") { + nr = n.buildDetailedNetworkResources(nw, false) + } else { + nr = n.buildNetworkResource(nw) + } + list = append(list, *nr) + } + + list, err = filterNetworks(list, netFilters) + if err != nil { + return err + } + return httputils.WriteJSON(w, http.StatusOK, list) +} + +func (n *networkRouter) getNetwork(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + term := vars["id"] + var ( + verbose bool + err error + ) + if v := r.URL.Query().Get("verbose"); v != "" { + if verbose, err = strconv.ParseBool(v); err != nil { + err = fmt.Errorf("invalid value for verbose: %s", v) + return errors.NewBadRequestError(err) + } + } + scope := r.URL.Query().Get("scope") + + isMatchingScope := func(scope, term string) bool { + if term != "" { + return scope == term + } + return true + } + + // In case multiple networks have duplicate names, return error. + // TODO (yongtang): should we wrap with version here for backward compatibility? + + // First find based on full ID, return immediately once one is found. + // If a network appears both in swarm and local, assume it is in local first + + // For full name and partial ID, save the result first, and process later + // in case multiple records was found based on the same term + listByFullName := map[string]types.NetworkResource{} + listByPartialID := map[string]types.NetworkResource{} + + nw := n.backend.GetNetworks() + for _, network := range nw { + if network.ID() == term && isMatchingScope(network.Info().Scope(), scope) { + return httputils.WriteJSON(w, http.StatusOK, *n.buildDetailedNetworkResources(network, verbose)) + } + if network.Name() == term && isMatchingScope(network.Info().Scope(), scope) { + // No need to check the ID collision here as we are still in + // local scope and the network ID is unique in this scope. + listByFullName[network.ID()] = *n.buildDetailedNetworkResources(network, verbose) + } + if strings.HasPrefix(network.ID(), term) && isMatchingScope(network.Info().Scope(), scope) { + // No need to check the ID collision here as we are still in + // local scope and the network ID is unique in this scope. + listByPartialID[network.ID()] = *n.buildDetailedNetworkResources(network, verbose) + } + } + + nr, _ := n.cluster.GetNetworks() + for _, network := range nr { + if network.ID == term && isMatchingScope(network.Scope, scope) { + return httputils.WriteJSON(w, http.StatusOK, network) + } + if network.Name == term && isMatchingScope(network.Scope, scope) { + // Check the ID collision as we are in swarm scope here, and + // the map (of the listByFullName) may have already had a + // network with the same ID (from local scope previously) + if _, ok := listByFullName[network.ID]; !ok { + listByFullName[network.ID] = network + } + } + if strings.HasPrefix(network.ID, term) && isMatchingScope(network.Scope, scope) { + // Check the ID collision as we are in swarm scope here, and + // the map (of the listByPartialID) may have already had a + // network with the same ID (from local scope previously) + if _, ok := listByPartialID[network.ID]; !ok { + listByPartialID[network.ID] = network + } + } + } + + // Find based on full name, returns true only if no duplicates + if len(listByFullName) == 1 { + for _, v := range listByFullName { + return httputils.WriteJSON(w, http.StatusOK, v) + } + } + if len(listByFullName) > 1 { + return fmt.Errorf("network %s is ambiguous (%d matches found based on name)", term, len(listByFullName)) + } + + // Find based on partial ID, returns true only if no duplicates + if len(listByPartialID) == 1 { + for _, v := range listByPartialID { + return httputils.WriteJSON(w, http.StatusOK, v) + } + } + if len(listByPartialID) > 1 { + return fmt.Errorf("network %s is ambiguous (%d matches found based on ID prefix)", term, len(listByPartialID)) + } + + return libnetwork.ErrNoSuchNetwork(term) +} + +func (n *networkRouter) postNetworkCreate(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + var create types.NetworkCreateRequest + + if err := httputils.ParseForm(r); err != nil { + return err + } + + if err := httputils.CheckForJSON(r); err != nil { + return err + } + + if err := json.NewDecoder(r.Body).Decode(&create); err != nil { + return err + } + + if nws, err := n.cluster.GetNetworksByName(create.Name); err == nil && len(nws) > 0 { + return libnetwork.NetworkNameError(create.Name) + } + + nw, err := n.backend.CreateNetwork(create) + if err != nil { + var warning string + if _, ok := err.(libnetwork.NetworkNameError); ok { + // check if user defined CheckDuplicate, if set true, return err + // otherwise prepare a warning message + if create.CheckDuplicate { + return libnetwork.NetworkNameError(create.Name) + } + warning = libnetwork.NetworkNameError(create.Name).Error() + } + + if _, ok := err.(libnetwork.ManagerRedirectError); !ok { + return err + } + id, err := n.cluster.CreateNetwork(create) + if err != nil { + return err + } + nw = &types.NetworkCreateResponse{ + ID: id, + Warning: warning, + } + } + + return httputils.WriteJSON(w, http.StatusCreated, nw) +} + +func (n *networkRouter) postNetworkConnect(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + var connect types.NetworkConnect + if err := httputils.ParseForm(r); err != nil { + return err + } + + if err := httputils.CheckForJSON(r); err != nil { + return err + } + + if err := json.NewDecoder(r.Body).Decode(&connect); err != nil { + return err + } + + return n.backend.ConnectContainerToNetwork(connect.Container, vars["id"], connect.EndpointConfig) +} + +func (n *networkRouter) postNetworkDisconnect(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + var disconnect types.NetworkDisconnect + if err := httputils.ParseForm(r); err != nil { + return err + } + + if err := httputils.CheckForJSON(r); err != nil { + return err + } + + if err := json.NewDecoder(r.Body).Decode(&disconnect); err != nil { + return err + } + + return n.backend.DisconnectContainerFromNetwork(disconnect.Container, vars["id"], disconnect.Force) +} + +func (n *networkRouter) deleteNetwork(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + if _, err := n.cluster.GetNetwork(vars["id"]); err == nil { + if err = n.cluster.RemoveNetwork(vars["id"]); err != nil { + return err + } + w.WriteHeader(http.StatusNoContent) + return nil + } + if err := n.backend.DeleteNetwork(vars["id"]); err != nil { + return err + } + w.WriteHeader(http.StatusNoContent) + return nil +} + +func (n *networkRouter) buildNetworkResource(nw libnetwork.Network) *types.NetworkResource { + r := &types.NetworkResource{} + if nw == nil { + return r + } + + info := nw.Info() + r.Name = nw.Name() + r.ID = nw.ID() + r.Created = info.Created() + r.Scope = info.Scope() + r.Driver = nw.Type() + r.EnableIPv6 = info.IPv6Enabled() + r.Internal = info.Internal() + r.Attachable = info.Attachable() + r.Ingress = info.Ingress() + r.Options = info.DriverOptions() + r.Containers = make(map[string]types.EndpointResource) + buildIpamResources(r, info) + r.Labels = info.Labels() + r.ConfigOnly = info.ConfigOnly() + + if cn := info.ConfigFrom(); cn != "" { + r.ConfigFrom = network.ConfigReference{Network: cn} + } + + peers := info.Peers() + if len(peers) != 0 { + r.Peers = buildPeerInfoResources(peers) + } + + return r +} + +func (n *networkRouter) buildDetailedNetworkResources(nw libnetwork.Network, verbose bool) *types.NetworkResource { + if nw == nil { + return &types.NetworkResource{} + } + + r := n.buildNetworkResource(nw) + epl := nw.Endpoints() + for _, e := range epl { + ei := e.Info() + if ei == nil { + continue + } + sb := ei.Sandbox() + tmpID := e.ID() + key := "ep-" + tmpID + if sb != nil { + key = sb.ContainerID() + } + + r.Containers[key] = buildEndpointResource(tmpID, e.Name(), ei) + } + if !verbose { + return r + } + services := nw.Info().Services() + r.Services = make(map[string]network.ServiceInfo) + for name, service := range services { + tasks := []network.Task{} + for _, t := range service.Tasks { + tasks = append(tasks, network.Task{ + Name: t.Name, + EndpointID: t.EndpointID, + EndpointIP: t.EndpointIP, + Info: t.Info, + }) + } + r.Services[name] = network.ServiceInfo{ + VIP: service.VIP, + Ports: service.Ports, + Tasks: tasks, + LocalLBIndex: service.LocalLBIndex, + } + } + return r +} + +func buildPeerInfoResources(peers []networkdb.PeerInfo) []network.PeerInfo { + peerInfo := make([]network.PeerInfo, 0, len(peers)) + for _, peer := range peers { + peerInfo = append(peerInfo, network.PeerInfo{ + Name: peer.Name, + IP: peer.IP, + }) + } + return peerInfo +} + +func buildIpamResources(r *types.NetworkResource, nwInfo libnetwork.NetworkInfo) { + id, opts, ipv4conf, ipv6conf := nwInfo.IpamConfig() + + ipv4Info, ipv6Info := nwInfo.IpamInfo() + + r.IPAM.Driver = id + + r.IPAM.Options = opts + + r.IPAM.Config = []network.IPAMConfig{} + for _, ip4 := range ipv4conf { + if ip4.PreferredPool == "" { + continue + } + iData := network.IPAMConfig{} + iData.Subnet = ip4.PreferredPool + iData.IPRange = ip4.SubPool + iData.Gateway = ip4.Gateway + iData.AuxAddress = ip4.AuxAddresses + r.IPAM.Config = append(r.IPAM.Config, iData) + } + + if len(r.IPAM.Config) == 0 { + for _, ip4Info := range ipv4Info { + iData := network.IPAMConfig{} + iData.Subnet = ip4Info.IPAMData.Pool.String() + iData.Gateway = ip4Info.IPAMData.Gateway.IP.String() + r.IPAM.Config = append(r.IPAM.Config, iData) + } + } + + hasIpv6Conf := false + for _, ip6 := range ipv6conf { + if ip6.PreferredPool == "" { + continue + } + hasIpv6Conf = true + iData := network.IPAMConfig{} + iData.Subnet = ip6.PreferredPool + iData.IPRange = ip6.SubPool + iData.Gateway = ip6.Gateway + iData.AuxAddress = ip6.AuxAddresses + r.IPAM.Config = append(r.IPAM.Config, iData) + } + + if !hasIpv6Conf { + for _, ip6Info := range ipv6Info { + if ip6Info.IPAMData.Pool == nil { + continue + } + iData := network.IPAMConfig{} + iData.Subnet = ip6Info.IPAMData.Pool.String() + iData.Gateway = ip6Info.IPAMData.Gateway.String() + r.IPAM.Config = append(r.IPAM.Config, iData) + } + } +} + +func buildEndpointResource(id string, name string, info libnetwork.EndpointInfo) types.EndpointResource { + er := types.EndpointResource{} + + er.EndpointID = id + er.Name = name + ei := info + if ei == nil { + return er + } + + if iface := ei.Iface(); iface != nil { + if mac := iface.MacAddress(); mac != nil { + er.MacAddress = mac.String() + } + if ip := iface.Address(); ip != nil && len(ip.IP) > 0 { + er.IPv4Address = ip.String() + } + + if ipv6 := iface.AddressIPv6(); ipv6 != nil && len(ipv6.IP) > 0 { + er.IPv6Address = ipv6.String() + } + } + return er +} + +func (n *networkRouter) postNetworksPrune(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + pruneFilters, err := filters.FromParam(r.Form.Get("filters")) + if err != nil { + return err + } + + pruneReport, err := n.backend.NetworksPrune(ctx, pruneFilters) + if err != nil { + return err + } + return httputils.WriteJSON(w, http.StatusOK, pruneReport) +} diff --git a/vendor/github.com/moby/moby/api/server/router/plugin/backend.go b/vendor/github.com/moby/moby/api/server/router/plugin/backend.go new file mode 100644 index 000000000..1b60501fc --- /dev/null +++ b/vendor/github.com/moby/moby/api/server/router/plugin/backend.go @@ -0,0 +1,27 @@ +package plugin + +import ( + "io" + "net/http" + + "github.com/docker/distribution/reference" + enginetypes "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/plugin" + "golang.org/x/net/context" +) + +// Backend for Plugin +type Backend interface { + Disable(name string, config *enginetypes.PluginDisableConfig) error + Enable(name string, config *enginetypes.PluginEnableConfig) error + List(filters.Args) ([]enginetypes.Plugin, error) + Inspect(name string) (*enginetypes.Plugin, error) + Remove(name string, config *enginetypes.PluginRmConfig) error + Set(name string, args []string) error + Privileges(ctx context.Context, ref reference.Named, metaHeaders http.Header, authConfig *enginetypes.AuthConfig) (enginetypes.PluginPrivileges, error) + Pull(ctx context.Context, ref reference.Named, name string, metaHeaders http.Header, authConfig *enginetypes.AuthConfig, privileges enginetypes.PluginPrivileges, outStream io.Writer, opts ...plugin.CreateOpt) error + Push(ctx context.Context, name string, metaHeaders http.Header, authConfig *enginetypes.AuthConfig, outStream io.Writer) error + Upgrade(ctx context.Context, ref reference.Named, name string, metaHeaders http.Header, authConfig *enginetypes.AuthConfig, privileges enginetypes.PluginPrivileges, outStream io.Writer) error + CreateFromContext(ctx context.Context, tarCtx io.ReadCloser, options *enginetypes.PluginCreateOptions) error +} diff --git a/vendor/github.com/moby/moby/api/server/router/plugin/plugin.go b/vendor/github.com/moby/moby/api/server/router/plugin/plugin.go new file mode 100644 index 000000000..22819e27a --- /dev/null +++ b/vendor/github.com/moby/moby/api/server/router/plugin/plugin.go @@ -0,0 +1,39 @@ +package plugin + +import "github.com/docker/docker/api/server/router" + +// pluginRouter is a router to talk with the plugin controller +type pluginRouter struct { + backend Backend + routes []router.Route +} + +// NewRouter initializes a new plugin router +func NewRouter(b Backend) router.Router { + r := &pluginRouter{ + backend: b, + } + r.initRoutes() + return r +} + +// Routes returns the available routers to the plugin controller +func (r *pluginRouter) Routes() []router.Route { + return r.routes +} + +func (r *pluginRouter) initRoutes() { + r.routes = []router.Route{ + router.NewGetRoute("/plugins", r.listPlugins), + router.NewGetRoute("/plugins/{name:.*}/json", r.inspectPlugin), + router.NewGetRoute("/plugins/privileges", r.getPrivileges), + router.NewDeleteRoute("/plugins/{name:.*}", r.removePlugin), + router.NewPostRoute("/plugins/{name:.*}/enable", r.enablePlugin), // PATCH? + router.NewPostRoute("/plugins/{name:.*}/disable", r.disablePlugin), + router.NewPostRoute("/plugins/pull", r.pullPlugin, router.WithCancel), + router.NewPostRoute("/plugins/{name:.*}/push", r.pushPlugin, router.WithCancel), + router.NewPostRoute("/plugins/{name:.*}/upgrade", r.upgradePlugin, router.WithCancel), + router.NewPostRoute("/plugins/{name:.*}/set", r.setPlugin), + router.NewPostRoute("/plugins/create", r.createPlugin), + } +} diff --git a/vendor/github.com/moby/moby/api/server/router/plugin/plugin_routes.go b/vendor/github.com/moby/moby/api/server/router/plugin/plugin_routes.go new file mode 100644 index 000000000..79e3cf5de --- /dev/null +++ b/vendor/github.com/moby/moby/api/server/router/plugin/plugin_routes.go @@ -0,0 +1,310 @@ +package plugin + +import ( + "encoding/base64" + "encoding/json" + "net/http" + "strconv" + "strings" + + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/server/httputils" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/streamformatter" + "github.com/pkg/errors" + "golang.org/x/net/context" +) + +func parseHeaders(headers http.Header) (map[string][]string, *types.AuthConfig) { + + metaHeaders := map[string][]string{} + for k, v := range headers { + if strings.HasPrefix(k, "X-Meta-") { + metaHeaders[k] = v + } + } + + // Get X-Registry-Auth + authEncoded := headers.Get("X-Registry-Auth") + authConfig := &types.AuthConfig{} + if authEncoded != "" { + authJSON := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded)) + if err := json.NewDecoder(authJSON).Decode(authConfig); err != nil { + authConfig = &types.AuthConfig{} + } + } + + return metaHeaders, authConfig +} + +// parseRemoteRef parses the remote reference into a reference.Named +// returning the tag associated with the reference. In the case the +// given reference string includes both digest and tag, the returned +// reference will have the digest without the tag, but the tag will +// be returned. +func parseRemoteRef(remote string) (reference.Named, string, error) { + // Parse remote reference, supporting remotes with name and tag + remoteRef, err := reference.ParseNormalizedNamed(remote) + if err != nil { + return nil, "", err + } + + type canonicalWithTag interface { + reference.Canonical + Tag() string + } + + if canonical, ok := remoteRef.(canonicalWithTag); ok { + remoteRef, err = reference.WithDigest(reference.TrimNamed(remoteRef), canonical.Digest()) + if err != nil { + return nil, "", err + } + return remoteRef, canonical.Tag(), nil + } + + remoteRef = reference.TagNameOnly(remoteRef) + + return remoteRef, "", nil +} + +func (pr *pluginRouter) getPrivileges(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + metaHeaders, authConfig := parseHeaders(r.Header) + + ref, _, err := parseRemoteRef(r.FormValue("remote")) + if err != nil { + return err + } + + privileges, err := pr.backend.Privileges(ctx, ref, metaHeaders, authConfig) + if err != nil { + return err + } + return httputils.WriteJSON(w, http.StatusOK, privileges) +} + +func (pr *pluginRouter) upgradePlugin(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return errors.Wrap(err, "failed to parse form") + } + + var privileges types.PluginPrivileges + dec := json.NewDecoder(r.Body) + if err := dec.Decode(&privileges); err != nil { + return errors.Wrap(err, "failed to parse privileges") + } + if dec.More() { + return errors.New("invalid privileges") + } + + metaHeaders, authConfig := parseHeaders(r.Header) + ref, tag, err := parseRemoteRef(r.FormValue("remote")) + if err != nil { + return err + } + + name, err := getName(ref, tag, vars["name"]) + if err != nil { + return err + } + w.Header().Set("Docker-Plugin-Name", name) + + w.Header().Set("Content-Type", "application/json") + output := ioutils.NewWriteFlusher(w) + + if err := pr.backend.Upgrade(ctx, ref, name, metaHeaders, authConfig, privileges, output); err != nil { + if !output.Flushed() { + return err + } + output.Write(streamformatter.FormatError(err)) + } + + return nil +} + +func (pr *pluginRouter) pullPlugin(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return errors.Wrap(err, "failed to parse form") + } + + var privileges types.PluginPrivileges + dec := json.NewDecoder(r.Body) + if err := dec.Decode(&privileges); err != nil { + return errors.Wrap(err, "failed to parse privileges") + } + if dec.More() { + return errors.New("invalid privileges") + } + + metaHeaders, authConfig := parseHeaders(r.Header) + ref, tag, err := parseRemoteRef(r.FormValue("remote")) + if err != nil { + return err + } + + name, err := getName(ref, tag, r.FormValue("name")) + if err != nil { + return err + } + w.Header().Set("Docker-Plugin-Name", name) + + w.Header().Set("Content-Type", "application/json") + output := ioutils.NewWriteFlusher(w) + + if err := pr.backend.Pull(ctx, ref, name, metaHeaders, authConfig, privileges, output); err != nil { + if !output.Flushed() { + return err + } + output.Write(streamformatter.FormatError(err)) + } + + return nil +} + +func getName(ref reference.Named, tag, name string) (string, error) { + if name == "" { + if _, ok := ref.(reference.Canonical); ok { + trimmed := reference.TrimNamed(ref) + if tag != "" { + nt, err := reference.WithTag(trimmed, tag) + if err != nil { + return "", err + } + name = reference.FamiliarString(nt) + } else { + name = reference.FamiliarString(reference.TagNameOnly(trimmed)) + } + } else { + name = reference.FamiliarString(ref) + } + } else { + localRef, err := reference.ParseNormalizedNamed(name) + if err != nil { + return "", err + } + if _, ok := localRef.(reference.Canonical); ok { + return "", errors.New("cannot use digest in plugin tag") + } + if reference.IsNameOnly(localRef) { + // TODO: log change in name to out stream + name = reference.FamiliarString(reference.TagNameOnly(localRef)) + } + } + return name, nil +} + +func (pr *pluginRouter) createPlugin(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + options := &types.PluginCreateOptions{ + RepoName: r.FormValue("name")} + + if err := pr.backend.CreateFromContext(ctx, r.Body, options); err != nil { + return err + } + //TODO: send progress bar + w.WriteHeader(http.StatusNoContent) + return nil +} + +func (pr *pluginRouter) enablePlugin(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + name := vars["name"] + timeout, err := strconv.Atoi(r.Form.Get("timeout")) + if err != nil { + return err + } + config := &types.PluginEnableConfig{Timeout: timeout} + + return pr.backend.Enable(name, config) +} + +func (pr *pluginRouter) disablePlugin(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + name := vars["name"] + config := &types.PluginDisableConfig{ + ForceDisable: httputils.BoolValue(r, "force"), + } + + return pr.backend.Disable(name, config) +} + +func (pr *pluginRouter) removePlugin(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + name := vars["name"] + config := &types.PluginRmConfig{ + ForceRemove: httputils.BoolValue(r, "force"), + } + return pr.backend.Remove(name, config) +} + +func (pr *pluginRouter) pushPlugin(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return errors.Wrap(err, "failed to parse form") + } + + metaHeaders, authConfig := parseHeaders(r.Header) + + w.Header().Set("Content-Type", "application/json") + output := ioutils.NewWriteFlusher(w) + + if err := pr.backend.Push(ctx, vars["name"], metaHeaders, authConfig, output); err != nil { + if !output.Flushed() { + return err + } + output.Write(streamformatter.FormatError(err)) + } + return nil +} + +func (pr *pluginRouter) setPlugin(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + var args []string + if err := json.NewDecoder(r.Body).Decode(&args); err != nil { + return err + } + if err := pr.backend.Set(vars["name"], args); err != nil { + return err + } + w.WriteHeader(http.StatusNoContent) + return nil +} + +func (pr *pluginRouter) listPlugins(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + pluginFilters, err := filters.FromParam(r.Form.Get("filters")) + if err != nil { + return err + } + l, err := pr.backend.List(pluginFilters) + if err != nil { + return err + } + return httputils.WriteJSON(w, http.StatusOK, l) +} + +func (pr *pluginRouter) inspectPlugin(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + result, err := pr.backend.Inspect(vars["name"]) + if err != nil { + return err + } + return httputils.WriteJSON(w, http.StatusOK, result) +} diff --git a/vendor/github.com/moby/moby/api/server/router/router.go b/vendor/github.com/moby/moby/api/server/router/router.go new file mode 100644 index 000000000..2de25c27f --- /dev/null +++ b/vendor/github.com/moby/moby/api/server/router/router.go @@ -0,0 +1,19 @@ +package router + +import "github.com/docker/docker/api/server/httputils" + +// Router defines an interface to specify a group of routes to add to the docker server. +type Router interface { + // Routes returns the list of routes to add to the docker server. + Routes() []Route +} + +// Route defines an individual API route in the docker server. +type Route interface { + // Handler returns the raw function to create the http handler. + Handler() httputils.APIFunc + // Method returns the http method that the route responds to. + Method() string + // Path returns the subpath where the route responds to. + Path() string +} diff --git a/vendor/github.com/moby/moby/api/server/router/session/backend.go b/vendor/github.com/moby/moby/api/server/router/session/backend.go new file mode 100644 index 000000000..ad4cc1bc5 --- /dev/null +++ b/vendor/github.com/moby/moby/api/server/router/session/backend.go @@ -0,0 +1,12 @@ +package session + +import ( + "net/http" + + "golang.org/x/net/context" +) + +// Backend abstracts an session receiver from an http request. +type Backend interface { + HandleHTTPRequest(ctx context.Context, w http.ResponseWriter, r *http.Request) error +} diff --git a/vendor/github.com/moby/moby/api/server/router/session/session.go b/vendor/github.com/moby/moby/api/server/router/session/session.go new file mode 100644 index 000000000..977a9c42c --- /dev/null +++ b/vendor/github.com/moby/moby/api/server/router/session/session.go @@ -0,0 +1,29 @@ +package session + +import "github.com/docker/docker/api/server/router" + +// sessionRouter is a router to talk with the session controller +type sessionRouter struct { + backend Backend + routes []router.Route +} + +// NewRouter initializes a new session router +func NewRouter(b Backend) router.Router { + r := &sessionRouter{ + backend: b, + } + r.initRoutes() + return r +} + +// Routes returns the available routers to the session controller +func (r *sessionRouter) Routes() []router.Route { + return r.routes +} + +func (r *sessionRouter) initRoutes() { + r.routes = []router.Route{ + router.Experimental(router.NewPostRoute("/session", r.startSession)), + } +} diff --git a/vendor/github.com/moby/moby/api/server/router/session/session_routes.go b/vendor/github.com/moby/moby/api/server/router/session/session_routes.go new file mode 100644 index 000000000..ef9753c6e --- /dev/null +++ b/vendor/github.com/moby/moby/api/server/router/session/session_routes.go @@ -0,0 +1,16 @@ +package session + +import ( + "net/http" + + apierrors "github.com/docker/docker/api/errors" + "golang.org/x/net/context" +) + +func (sr *sessionRouter) startSession(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + err := sr.backend.HandleHTTPRequest(ctx, w, r) + if err != nil { + return apierrors.NewBadRequestError(err) + } + return nil +} diff --git a/vendor/github.com/moby/moby/api/server/router/swarm/backend.go b/vendor/github.com/moby/moby/api/server/router/swarm/backend.go new file mode 100644 index 000000000..3b7933d7b --- /dev/null +++ b/vendor/github.com/moby/moby/api/server/router/swarm/backend.go @@ -0,0 +1,47 @@ +package swarm + +import ( + basictypes "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/backend" + types "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +// Backend abstracts a swarm manager. +type Backend interface { + Init(req types.InitRequest) (string, error) + Join(req types.JoinRequest) error + Leave(force bool) error + Inspect() (types.Swarm, error) + Update(uint64, types.Spec, types.UpdateFlags) error + GetUnlockKey() (string, error) + UnlockSwarm(req types.UnlockRequest) error + + GetServices(basictypes.ServiceListOptions) ([]types.Service, error) + GetService(idOrName string, insertDefaults bool) (types.Service, error) + CreateService(types.ServiceSpec, string, bool) (*basictypes.ServiceCreateResponse, error) + UpdateService(string, uint64, types.ServiceSpec, basictypes.ServiceUpdateOptions, bool) (*basictypes.ServiceUpdateResponse, error) + RemoveService(string) error + + ServiceLogs(context.Context, *backend.LogSelector, *basictypes.ContainerLogsOptions) (<-chan *backend.LogMessage, error) + + GetNodes(basictypes.NodeListOptions) ([]types.Node, error) + GetNode(string) (types.Node, error) + UpdateNode(string, uint64, types.NodeSpec) error + RemoveNode(string, bool) error + + GetTasks(basictypes.TaskListOptions) ([]types.Task, error) + GetTask(string) (types.Task, error) + + GetSecrets(opts basictypes.SecretListOptions) ([]types.Secret, error) + CreateSecret(s types.SecretSpec) (string, error) + RemoveSecret(idOrName string) error + GetSecret(id string) (types.Secret, error) + UpdateSecret(idOrName string, version uint64, spec types.SecretSpec) error + + GetConfigs(opts basictypes.ConfigListOptions) ([]types.Config, error) + CreateConfig(s types.ConfigSpec) (string, error) + RemoveConfig(id string) error + GetConfig(id string) (types.Config, error) + UpdateConfig(idOrName string, version uint64, spec types.ConfigSpec) error +} diff --git a/vendor/github.com/moby/moby/api/server/router/swarm/cluster.go b/vendor/github.com/moby/moby/api/server/router/swarm/cluster.go new file mode 100644 index 000000000..2529250b0 --- /dev/null +++ b/vendor/github.com/moby/moby/api/server/router/swarm/cluster.go @@ -0,0 +1,63 @@ +package swarm + +import "github.com/docker/docker/api/server/router" + +// swarmRouter is a router to talk with the build controller +type swarmRouter struct { + backend Backend + routes []router.Route +} + +// NewRouter initializes a new build router +func NewRouter(b Backend) router.Router { + r := &swarmRouter{ + backend: b, + } + r.initRoutes() + return r +} + +// Routes returns the available routers to the swarm controller +func (sr *swarmRouter) Routes() []router.Route { + return sr.routes +} + +func (sr *swarmRouter) initRoutes() { + sr.routes = []router.Route{ + router.NewPostRoute("/swarm/init", sr.initCluster), + router.NewPostRoute("/swarm/join", sr.joinCluster), + router.NewPostRoute("/swarm/leave", sr.leaveCluster), + router.NewGetRoute("/swarm", sr.inspectCluster), + router.NewGetRoute("/swarm/unlockkey", sr.getUnlockKey), + router.NewPostRoute("/swarm/update", sr.updateCluster), + router.NewPostRoute("/swarm/unlock", sr.unlockCluster), + + router.NewGetRoute("/services", sr.getServices), + router.NewGetRoute("/services/{id}", sr.getService), + router.NewPostRoute("/services/create", sr.createService), + router.NewPostRoute("/services/{id}/update", sr.updateService), + router.NewDeleteRoute("/services/{id}", sr.removeService), + router.NewGetRoute("/services/{id}/logs", sr.getServiceLogs, router.WithCancel), + + router.NewGetRoute("/nodes", sr.getNodes), + router.NewGetRoute("/nodes/{id}", sr.getNode), + router.NewDeleteRoute("/nodes/{id}", sr.removeNode), + router.NewPostRoute("/nodes/{id}/update", sr.updateNode), + + router.NewGetRoute("/tasks", sr.getTasks), + router.NewGetRoute("/tasks/{id}", sr.getTask), + router.NewGetRoute("/tasks/{id}/logs", sr.getTaskLogs, router.WithCancel), + + router.NewGetRoute("/secrets", sr.getSecrets), + router.NewPostRoute("/secrets/create", sr.createSecret), + router.NewDeleteRoute("/secrets/{id}", sr.removeSecret), + router.NewGetRoute("/secrets/{id}", sr.getSecret), + router.NewPostRoute("/secrets/{id}/update", sr.updateSecret), + + router.NewGetRoute("/configs", sr.getConfigs), + router.NewPostRoute("/configs/create", sr.createConfig), + router.NewDeleteRoute("/configs/{id}", sr.removeConfig), + router.NewGetRoute("/configs/{id}", sr.getConfig), + router.NewPostRoute("/configs/{id}/update", sr.updateConfig), + } +} diff --git a/vendor/github.com/moby/moby/api/server/router/swarm/cluster_routes.go b/vendor/github.com/moby/moby/api/server/router/swarm/cluster_routes.go new file mode 100644 index 000000000..91461da76 --- /dev/null +++ b/vendor/github.com/moby/moby/api/server/router/swarm/cluster_routes.go @@ -0,0 +1,492 @@ +package swarm + +import ( + "encoding/json" + "fmt" + "net/http" + "strconv" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/errors" + "github.com/docker/docker/api/server/httputils" + basictypes "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/api/types/filters" + types "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/api/types/versions" + "golang.org/x/net/context" +) + +func (sr *swarmRouter) initCluster(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + var req types.InitRequest + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + return err + } + nodeID, err := sr.backend.Init(req) + if err != nil { + logrus.Errorf("Error initializing swarm: %v", err) + return err + } + return httputils.WriteJSON(w, http.StatusOK, nodeID) +} + +func (sr *swarmRouter) joinCluster(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + var req types.JoinRequest + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + return err + } + return sr.backend.Join(req) +} + +func (sr *swarmRouter) leaveCluster(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + force := httputils.BoolValue(r, "force") + return sr.backend.Leave(force) +} + +func (sr *swarmRouter) inspectCluster(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + swarm, err := sr.backend.Inspect() + if err != nil { + logrus.Errorf("Error getting swarm: %v", err) + return err + } + + return httputils.WriteJSON(w, http.StatusOK, swarm) +} + +func (sr *swarmRouter) updateCluster(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + var swarm types.Spec + if err := json.NewDecoder(r.Body).Decode(&swarm); err != nil { + return err + } + + rawVersion := r.URL.Query().Get("version") + version, err := strconv.ParseUint(rawVersion, 10, 64) + if err != nil { + err := fmt.Errorf("invalid swarm version '%s': %v", rawVersion, err) + return errors.NewBadRequestError(err) + } + + var flags types.UpdateFlags + + if value := r.URL.Query().Get("rotateWorkerToken"); value != "" { + rot, err := strconv.ParseBool(value) + if err != nil { + err := fmt.Errorf("invalid value for rotateWorkerToken: %s", value) + return errors.NewBadRequestError(err) + } + + flags.RotateWorkerToken = rot + } + + if value := r.URL.Query().Get("rotateManagerToken"); value != "" { + rot, err := strconv.ParseBool(value) + if err != nil { + err := fmt.Errorf("invalid value for rotateManagerToken: %s", value) + return errors.NewBadRequestError(err) + } + + flags.RotateManagerToken = rot + } + + if value := r.URL.Query().Get("rotateManagerUnlockKey"); value != "" { + rot, err := strconv.ParseBool(value) + if err != nil { + return errors.NewBadRequestError(fmt.Errorf("invalid value for rotateManagerUnlockKey: %s", value)) + } + + flags.RotateManagerUnlockKey = rot + } + + if err := sr.backend.Update(version, swarm, flags); err != nil { + logrus.Errorf("Error configuring swarm: %v", err) + return err + } + return nil +} + +func (sr *swarmRouter) unlockCluster(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + var req types.UnlockRequest + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + return err + } + + if err := sr.backend.UnlockSwarm(req); err != nil { + logrus.Errorf("Error unlocking swarm: %v", err) + return err + } + return nil +} + +func (sr *swarmRouter) getUnlockKey(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + unlockKey, err := sr.backend.GetUnlockKey() + if err != nil { + logrus.WithError(err).Errorf("Error retrieving swarm unlock key") + return err + } + + return httputils.WriteJSON(w, http.StatusOK, &basictypes.SwarmUnlockKeyResponse{ + UnlockKey: unlockKey, + }) +} + +func (sr *swarmRouter) getServices(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + filter, err := filters.FromParam(r.Form.Get("filters")) + if err != nil { + return err + } + + services, err := sr.backend.GetServices(basictypes.ServiceListOptions{Filters: filter}) + if err != nil { + logrus.Errorf("Error getting services: %v", err) + return err + } + + return httputils.WriteJSON(w, http.StatusOK, services) +} + +func (sr *swarmRouter) getService(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + var insertDefaults bool + if value := r.URL.Query().Get("insertDefaults"); value != "" { + var err error + insertDefaults, err = strconv.ParseBool(value) + if err != nil { + err := fmt.Errorf("invalid value for insertDefaults: %s", value) + return errors.NewBadRequestError(err) + } + } + + service, err := sr.backend.GetService(vars["id"], insertDefaults) + if err != nil { + logrus.Errorf("Error getting service %s: %v", vars["id"], err) + return err + } + + return httputils.WriteJSON(w, http.StatusOK, service) +} + +func (sr *swarmRouter) createService(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + var service types.ServiceSpec + if err := json.NewDecoder(r.Body).Decode(&service); err != nil { + return err + } + + // Get returns "" if the header does not exist + encodedAuth := r.Header.Get("X-Registry-Auth") + cliVersion := r.Header.Get("version") + queryRegistry := false + if cliVersion != "" && versions.LessThan(cliVersion, "1.30") { + queryRegistry = true + } + + resp, err := sr.backend.CreateService(service, encodedAuth, queryRegistry) + if err != nil { + logrus.Errorf("Error creating service %s: %v", service.Name, err) + return err + } + + return httputils.WriteJSON(w, http.StatusCreated, resp) +} + +func (sr *swarmRouter) updateService(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + var service types.ServiceSpec + if err := json.NewDecoder(r.Body).Decode(&service); err != nil { + return err + } + + rawVersion := r.URL.Query().Get("version") + version, err := strconv.ParseUint(rawVersion, 10, 64) + if err != nil { + err := fmt.Errorf("invalid service version '%s': %v", rawVersion, err) + return errors.NewBadRequestError(err) + } + + var flags basictypes.ServiceUpdateOptions + + // Get returns "" if the header does not exist + flags.EncodedRegistryAuth = r.Header.Get("X-Registry-Auth") + flags.RegistryAuthFrom = r.URL.Query().Get("registryAuthFrom") + flags.Rollback = r.URL.Query().Get("rollback") + cliVersion := r.Header.Get("version") + queryRegistry := false + if cliVersion != "" && versions.LessThan(cliVersion, "1.30") { + queryRegistry = true + } + + resp, err := sr.backend.UpdateService(vars["id"], version, service, flags, queryRegistry) + if err != nil { + logrus.Errorf("Error updating service %s: %v", vars["id"], err) + return err + } + return httputils.WriteJSON(w, http.StatusOK, resp) +} + +func (sr *swarmRouter) removeService(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := sr.backend.RemoveService(vars["id"]); err != nil { + logrus.Errorf("Error removing service %s: %v", vars["id"], err) + return err + } + return nil +} + +func (sr *swarmRouter) getTaskLogs(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + // make a selector to pass to the helper function + selector := &backend.LogSelector{ + Tasks: []string{vars["id"]}, + } + return sr.swarmLogs(ctx, w, r, selector) +} + +func (sr *swarmRouter) getServiceLogs(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + // make a selector to pass to the helper function + selector := &backend.LogSelector{ + Services: []string{vars["id"]}, + } + return sr.swarmLogs(ctx, w, r, selector) +} + +func (sr *swarmRouter) getNodes(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + filter, err := filters.FromParam(r.Form.Get("filters")) + if err != nil { + return err + } + + nodes, err := sr.backend.GetNodes(basictypes.NodeListOptions{Filters: filter}) + if err != nil { + logrus.Errorf("Error getting nodes: %v", err) + return err + } + + return httputils.WriteJSON(w, http.StatusOK, nodes) +} + +func (sr *swarmRouter) getNode(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + node, err := sr.backend.GetNode(vars["id"]) + if err != nil { + logrus.Errorf("Error getting node %s: %v", vars["id"], err) + return err + } + + return httputils.WriteJSON(w, http.StatusOK, node) +} + +func (sr *swarmRouter) updateNode(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + var node types.NodeSpec + if err := json.NewDecoder(r.Body).Decode(&node); err != nil { + return err + } + + rawVersion := r.URL.Query().Get("version") + version, err := strconv.ParseUint(rawVersion, 10, 64) + if err != nil { + err := fmt.Errorf("invalid node version '%s': %v", rawVersion, err) + return errors.NewBadRequestError(err) + } + + if err := sr.backend.UpdateNode(vars["id"], version, node); err != nil { + logrus.Errorf("Error updating node %s: %v", vars["id"], err) + return err + } + return nil +} + +func (sr *swarmRouter) removeNode(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + force := httputils.BoolValue(r, "force") + + if err := sr.backend.RemoveNode(vars["id"], force); err != nil { + logrus.Errorf("Error removing node %s: %v", vars["id"], err) + return err + } + return nil +} + +func (sr *swarmRouter) getTasks(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + filter, err := filters.FromParam(r.Form.Get("filters")) + if err != nil { + return err + } + + tasks, err := sr.backend.GetTasks(basictypes.TaskListOptions{Filters: filter}) + if err != nil { + logrus.Errorf("Error getting tasks: %v", err) + return err + } + + return httputils.WriteJSON(w, http.StatusOK, tasks) +} + +func (sr *swarmRouter) getTask(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + task, err := sr.backend.GetTask(vars["id"]) + if err != nil { + logrus.Errorf("Error getting task %s: %v", vars["id"], err) + return err + } + + return httputils.WriteJSON(w, http.StatusOK, task) +} + +func (sr *swarmRouter) getSecrets(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + filters, err := filters.FromParam(r.Form.Get("filters")) + if err != nil { + return err + } + + secrets, err := sr.backend.GetSecrets(basictypes.SecretListOptions{Filters: filters}) + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusOK, secrets) +} + +func (sr *swarmRouter) createSecret(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + var secret types.SecretSpec + if err := json.NewDecoder(r.Body).Decode(&secret); err != nil { + return err + } + + id, err := sr.backend.CreateSecret(secret) + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusCreated, &basictypes.SecretCreateResponse{ + ID: id, + }) +} + +func (sr *swarmRouter) removeSecret(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := sr.backend.RemoveSecret(vars["id"]); err != nil { + return err + } + w.WriteHeader(http.StatusNoContent) + + return nil +} + +func (sr *swarmRouter) getSecret(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + secret, err := sr.backend.GetSecret(vars["id"]) + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusOK, secret) +} + +func (sr *swarmRouter) updateSecret(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + var secret types.SecretSpec + if err := json.NewDecoder(r.Body).Decode(&secret); err != nil { + return errors.NewBadRequestError(err) + } + + rawVersion := r.URL.Query().Get("version") + version, err := strconv.ParseUint(rawVersion, 10, 64) + if err != nil { + return errors.NewBadRequestError(fmt.Errorf("invalid secret version")) + } + + id := vars["id"] + if err := sr.backend.UpdateSecret(id, version, secret); err != nil { + return err + } + + return nil +} + +func (sr *swarmRouter) getConfigs(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + filters, err := filters.FromParam(r.Form.Get("filters")) + if err != nil { + return err + } + + configs, err := sr.backend.GetConfigs(basictypes.ConfigListOptions{Filters: filters}) + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusOK, configs) +} + +func (sr *swarmRouter) createConfig(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + var config types.ConfigSpec + if err := json.NewDecoder(r.Body).Decode(&config); err != nil { + return err + } + + id, err := sr.backend.CreateConfig(config) + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusCreated, &basictypes.ConfigCreateResponse{ + ID: id, + }) +} + +func (sr *swarmRouter) removeConfig(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := sr.backend.RemoveConfig(vars["id"]); err != nil { + return err + } + w.WriteHeader(http.StatusNoContent) + + return nil +} + +func (sr *swarmRouter) getConfig(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + config, err := sr.backend.GetConfig(vars["id"]) + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusOK, config) +} + +func (sr *swarmRouter) updateConfig(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + var config types.ConfigSpec + if err := json.NewDecoder(r.Body).Decode(&config); err != nil { + return errors.NewBadRequestError(err) + } + + rawVersion := r.URL.Query().Get("version") + version, err := strconv.ParseUint(rawVersion, 10, 64) + if err != nil { + return errors.NewBadRequestError(fmt.Errorf("invalid config version")) + } + + id := vars["id"] + if err := sr.backend.UpdateConfig(id, version, config); err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/moby/moby/api/server/router/swarm/helpers.go b/vendor/github.com/moby/moby/api/server/router/swarm/helpers.go new file mode 100644 index 000000000..7d2944208 --- /dev/null +++ b/vendor/github.com/moby/moby/api/server/router/swarm/helpers.go @@ -0,0 +1,65 @@ +package swarm + +import ( + "fmt" + "net/http" + + "github.com/docker/docker/api/server/httputils" + basictypes "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/backend" + "golang.org/x/net/context" +) + +// swarmLogs takes an http response, request, and selector, and writes the logs +// specified by the selector to the response +func (sr *swarmRouter) swarmLogs(ctx context.Context, w http.ResponseWriter, r *http.Request, selector *backend.LogSelector) error { + // Args are validated before the stream starts because when it starts we're + // sending HTTP 200 by writing an empty chunk of data to tell the client that + // daemon is going to stream. By sending this initial HTTP 200 we can't report + // any error after the stream starts (i.e. container not found, wrong parameters) + // with the appropriate status code. + stdout, stderr := httputils.BoolValue(r, "stdout"), httputils.BoolValue(r, "stderr") + if !(stdout || stderr) { + return fmt.Errorf("Bad parameters: you must choose at least one stream") + } + + // there is probably a neater way to manufacture the ContainerLogsOptions + // struct, probably in the caller, to eliminate the dependency on net/http + logsConfig := &basictypes.ContainerLogsOptions{ + Follow: httputils.BoolValue(r, "follow"), + Timestamps: httputils.BoolValue(r, "timestamps"), + Since: r.Form.Get("since"), + Tail: r.Form.Get("tail"), + ShowStdout: stdout, + ShowStderr: stderr, + Details: httputils.BoolValue(r, "details"), + } + + tty := false + // checking for whether logs are TTY involves iterating over every service + // and task. idk if there is a better way + for _, service := range selector.Services { + s, err := sr.backend.GetService(service, false) + if err != nil { + // maybe should return some context with this error? + return err + } + tty = (s.Spec.TaskTemplate.ContainerSpec != nil && s.Spec.TaskTemplate.ContainerSpec.TTY) || tty + } + for _, task := range selector.Tasks { + t, err := sr.backend.GetTask(task) + if err != nil { + // as above + return err + } + tty = t.Spec.ContainerSpec.TTY || tty + } + + msgs, err := sr.backend.ServiceLogs(ctx, selector, logsConfig) + if err != nil { + return err + } + + httputils.WriteLogStream(ctx, w, msgs, logsConfig, !tty) + return nil +} diff --git a/vendor/github.com/moby/moby/api/server/router/system/backend.go b/vendor/github.com/moby/moby/api/server/router/system/backend.go new file mode 100644 index 000000000..da1de380d --- /dev/null +++ b/vendor/github.com/moby/moby/api/server/router/system/backend.go @@ -0,0 +1,21 @@ +package system + +import ( + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/events" + "github.com/docker/docker/api/types/filters" + "golang.org/x/net/context" +) + +// Backend is the methods that need to be implemented to provide +// system specific functionality. +type Backend interface { + SystemInfo() (*types.Info, error) + SystemVersion() types.Version + SystemDiskUsage(ctx context.Context) (*types.DiskUsage, error) + SubscribeToEvents(since, until time.Time, ef filters.Args) ([]events.Message, chan interface{}) + UnsubscribeFromEvents(chan interface{}) + AuthenticateToRegistry(ctx context.Context, authConfig *types.AuthConfig) (string, string, error) +} diff --git a/vendor/github.com/moby/moby/api/server/router/system/system.go b/vendor/github.com/moby/moby/api/server/router/system/system.go new file mode 100644 index 000000000..a64631e8a --- /dev/null +++ b/vendor/github.com/moby/moby/api/server/router/system/system.go @@ -0,0 +1,42 @@ +package system + +import ( + "github.com/docker/docker/api/server/router" + "github.com/docker/docker/builder/fscache" + "github.com/docker/docker/daemon/cluster" +) + +// systemRouter provides information about the Docker system overall. +// It gathers information about host, daemon and container events. +type systemRouter struct { + backend Backend + cluster *cluster.Cluster + routes []router.Route + builder *fscache.FSCache +} + +// NewRouter initializes a new system router +func NewRouter(b Backend, c *cluster.Cluster, fscache *fscache.FSCache) router.Router { + r := &systemRouter{ + backend: b, + cluster: c, + builder: fscache, + } + + r.routes = []router.Route{ + router.NewOptionsRoute("/{anyroute:.*}", optionsHandler), + router.NewGetRoute("/_ping", pingHandler), + router.NewGetRoute("/events", r.getEvents, router.WithCancel), + router.NewGetRoute("/info", r.getInfo), + router.NewGetRoute("/version", r.getVersion), + router.NewGetRoute("/system/df", r.getDiskUsage, router.WithCancel), + router.NewPostRoute("/auth", r.postAuth), + } + + return r +} + +// Routes returns all the API routes dedicated to the docker system +func (s *systemRouter) Routes() []router.Route { + return s.routes +} diff --git a/vendor/github.com/moby/moby/api/server/router/system/system_routes.go b/vendor/github.com/moby/moby/api/server/router/system/system_routes.go new file mode 100644 index 000000000..30fb000e1 --- /dev/null +++ b/vendor/github.com/moby/moby/api/server/router/system/system_routes.go @@ -0,0 +1,192 @@ +package system + +import ( + "encoding/json" + "fmt" + "net/http" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api" + "github.com/docker/docker/api/errors" + "github.com/docker/docker/api/server/httputils" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/events" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/registry" + timetypes "github.com/docker/docker/api/types/time" + "github.com/docker/docker/api/types/versions" + "github.com/docker/docker/pkg/ioutils" + pkgerrors "github.com/pkg/errors" + "golang.org/x/net/context" +) + +func optionsHandler(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + w.WriteHeader(http.StatusOK) + return nil +} + +func pingHandler(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + _, err := w.Write([]byte{'O', 'K'}) + return err +} + +func (s *systemRouter) getInfo(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + info, err := s.backend.SystemInfo() + if err != nil { + return err + } + if s.cluster != nil { + info.Swarm = s.cluster.Info() + } + + if versions.LessThan(httputils.VersionFromContext(ctx), "1.25") { + // TODO: handle this conversion in engine-api + type oldInfo struct { + *types.Info + ExecutionDriver string + } + old := &oldInfo{ + Info: info, + ExecutionDriver: "", + } + nameOnlySecurityOptions := []string{} + kvSecOpts, err := types.DecodeSecurityOptions(old.SecurityOptions) + if err != nil { + return err + } + for _, s := range kvSecOpts { + nameOnlySecurityOptions = append(nameOnlySecurityOptions, s.Name) + } + old.SecurityOptions = nameOnlySecurityOptions + return httputils.WriteJSON(w, http.StatusOK, old) + } + return httputils.WriteJSON(w, http.StatusOK, info) +} + +func (s *systemRouter) getVersion(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + info := s.backend.SystemVersion() + info.APIVersion = api.DefaultVersion + + return httputils.WriteJSON(w, http.StatusOK, info) +} + +func (s *systemRouter) getDiskUsage(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + du, err := s.backend.SystemDiskUsage(ctx) + if err != nil { + return err + } + builderSize, err := s.builder.DiskUsage() + if err != nil { + return pkgerrors.Wrap(err, "error getting build cache usage") + } + du.BuilderSize = builderSize + + return httputils.WriteJSON(w, http.StatusOK, du) +} + +func (s *systemRouter) getEvents(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + since, err := eventTime(r.Form.Get("since")) + if err != nil { + return err + } + until, err := eventTime(r.Form.Get("until")) + if err != nil { + return err + } + + var ( + timeout <-chan time.Time + onlyPastEvents bool + ) + if !until.IsZero() { + if until.Before(since) { + return errors.NewBadRequestError(fmt.Errorf("`since` time (%s) cannot be after `until` time (%s)", r.Form.Get("since"), r.Form.Get("until"))) + } + + now := time.Now() + + onlyPastEvents = until.Before(now) + + if !onlyPastEvents { + dur := until.Sub(now) + timeout = time.NewTimer(dur).C + } + } + + ef, err := filters.FromParam(r.Form.Get("filters")) + if err != nil { + return err + } + + w.Header().Set("Content-Type", "application/json") + output := ioutils.NewWriteFlusher(w) + defer output.Close() + output.Flush() + + enc := json.NewEncoder(output) + + buffered, l := s.backend.SubscribeToEvents(since, until, ef) + defer s.backend.UnsubscribeFromEvents(l) + + for _, ev := range buffered { + if err := enc.Encode(ev); err != nil { + return err + } + } + + if onlyPastEvents { + return nil + } + + for { + select { + case ev := <-l: + jev, ok := ev.(events.Message) + if !ok { + logrus.Warnf("unexpected event message: %q", ev) + continue + } + if err := enc.Encode(jev); err != nil { + return err + } + case <-timeout: + return nil + case <-ctx.Done(): + logrus.Debug("Client context cancelled, stop sending events") + return nil + } + } +} + +func (s *systemRouter) postAuth(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + var config *types.AuthConfig + err := json.NewDecoder(r.Body).Decode(&config) + r.Body.Close() + if err != nil { + return err + } + status, token, err := s.backend.AuthenticateToRegistry(ctx, config) + if err != nil { + return err + } + return httputils.WriteJSON(w, http.StatusOK, ®istry.AuthenticateOKBody{ + Status: status, + IdentityToken: token, + }) +} + +func eventTime(formTime string) (time.Time, error) { + t, tNano, err := timetypes.ParseTimestamps(formTime, -1) + if err != nil { + return time.Time{}, err + } + if t == -1 { + return time.Time{}, nil + } + return time.Unix(t, tNano), nil +} diff --git a/vendor/github.com/moby/moby/api/server/router/volume/backend.go b/vendor/github.com/moby/moby/api/server/router/volume/backend.go new file mode 100644 index 000000000..b97cb9478 --- /dev/null +++ b/vendor/github.com/moby/moby/api/server/router/volume/backend.go @@ -0,0 +1,19 @@ +package volume + +import ( + "golang.org/x/net/context" + + // TODO return types need to be refactored into pkg + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" +) + +// Backend is the methods that need to be implemented to provide +// volume specific functionality +type Backend interface { + Volumes(filter string) ([]*types.Volume, []string, error) + VolumeInspect(name string) (*types.Volume, error) + VolumeCreate(name, driverName string, opts, labels map[string]string) (*types.Volume, error) + VolumeRm(name string, force bool) error + VolumesPrune(ctx context.Context, pruneFilters filters.Args) (*types.VolumesPruneReport, error) +} diff --git a/vendor/github.com/moby/moby/api/server/router/volume/volume.go b/vendor/github.com/moby/moby/api/server/router/volume/volume.go new file mode 100644 index 000000000..b24c8fee5 --- /dev/null +++ b/vendor/github.com/moby/moby/api/server/router/volume/volume.go @@ -0,0 +1,36 @@ +package volume + +import "github.com/docker/docker/api/server/router" + +// volumeRouter is a router to talk with the volumes controller +type volumeRouter struct { + backend Backend + routes []router.Route +} + +// NewRouter initializes a new volume router +func NewRouter(b Backend) router.Router { + r := &volumeRouter{ + backend: b, + } + r.initRoutes() + return r +} + +// Routes returns the available routes to the volumes controller +func (r *volumeRouter) Routes() []router.Route { + return r.routes +} + +func (r *volumeRouter) initRoutes() { + r.routes = []router.Route{ + // GET + router.NewGetRoute("/volumes", r.getVolumesList), + router.NewGetRoute("/volumes/{name:.*}", r.getVolumeByName), + // POST + router.NewPostRoute("/volumes/create", r.postVolumesCreate), + router.NewPostRoute("/volumes/prune", r.postVolumesPrune, router.WithCancel), + // DELETE + router.NewDeleteRoute("/volumes/{name:.*}", r.deleteVolumes), + } +} diff --git a/vendor/github.com/moby/moby/api/server/router/volume/volume_routes.go b/vendor/github.com/moby/moby/api/server/router/volume/volume_routes.go new file mode 100644 index 000000000..f0f490119 --- /dev/null +++ b/vendor/github.com/moby/moby/api/server/router/volume/volume_routes.go @@ -0,0 +1,85 @@ +package volume + +import ( + "encoding/json" + "net/http" + + "github.com/docker/docker/api/server/httputils" + "github.com/docker/docker/api/types/filters" + volumetypes "github.com/docker/docker/api/types/volume" + "golang.org/x/net/context" +) + +func (v *volumeRouter) getVolumesList(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + volumes, warnings, err := v.backend.Volumes(r.Form.Get("filters")) + if err != nil { + return err + } + return httputils.WriteJSON(w, http.StatusOK, &volumetypes.VolumesListOKBody{Volumes: volumes, Warnings: warnings}) +} + +func (v *volumeRouter) getVolumeByName(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + volume, err := v.backend.VolumeInspect(vars["name"]) + if err != nil { + return err + } + return httputils.WriteJSON(w, http.StatusOK, volume) +} + +func (v *volumeRouter) postVolumesCreate(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + if err := httputils.CheckForJSON(r); err != nil { + return err + } + + var req volumetypes.VolumesCreateBody + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + return err + } + + volume, err := v.backend.VolumeCreate(req.Name, req.Driver, req.DriverOpts, req.Labels) + if err != nil { + return err + } + return httputils.WriteJSON(w, http.StatusCreated, volume) +} + +func (v *volumeRouter) deleteVolumes(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + force := httputils.BoolValue(r, "force") + if err := v.backend.VolumeRm(vars["name"], force); err != nil { + return err + } + w.WriteHeader(http.StatusNoContent) + return nil +} + +func (v *volumeRouter) postVolumesPrune(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + pruneFilters, err := filters.FromParam(r.Form.Get("filters")) + if err != nil { + return err + } + + pruneReport, err := v.backend.VolumesPrune(ctx, pruneFilters) + if err != nil { + return err + } + return httputils.WriteJSON(w, http.StatusOK, pruneReport) +} diff --git a/vendor/github.com/moby/moby/api/server/router_swapper.go b/vendor/github.com/moby/moby/api/server/router_swapper.go new file mode 100644 index 000000000..1ecc7a7f3 --- /dev/null +++ b/vendor/github.com/moby/moby/api/server/router_swapper.go @@ -0,0 +1,30 @@ +package server + +import ( + "net/http" + "sync" + + "github.com/gorilla/mux" +) + +// routerSwapper is an http.Handler that allows you to swap +// mux routers. +type routerSwapper struct { + mu sync.Mutex + router *mux.Router +} + +// Swap changes the old router with the new one. +func (rs *routerSwapper) Swap(newRouter *mux.Router) { + rs.mu.Lock() + rs.router = newRouter + rs.mu.Unlock() +} + +// ServeHTTP makes the routerSwapper to implement the http.Handler interface. +func (rs *routerSwapper) ServeHTTP(w http.ResponseWriter, r *http.Request) { + rs.mu.Lock() + router := rs.router + rs.mu.Unlock() + router.ServeHTTP(w, r) +} diff --git a/vendor/github.com/moby/moby/api/server/server.go b/vendor/github.com/moby/moby/api/server/server.go new file mode 100644 index 000000000..e0f2d89d9 --- /dev/null +++ b/vendor/github.com/moby/moby/api/server/server.go @@ -0,0 +1,201 @@ +package server + +import ( + "crypto/tls" + "fmt" + "net" + "net/http" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/errors" + "github.com/docker/docker/api/server/httputils" + "github.com/docker/docker/api/server/middleware" + "github.com/docker/docker/api/server/router" + "github.com/docker/docker/api/server/router/debug" + "github.com/docker/docker/dockerversion" + "github.com/gorilla/mux" + "golang.org/x/net/context" +) + +// versionMatcher defines a variable matcher to be parsed by the router +// when a request is about to be served. +const versionMatcher = "/v{version:[0-9.]+}" + +// Config provides the configuration for the API server +type Config struct { + Logging bool + EnableCors bool + CorsHeaders string + Version string + SocketGroup string + TLSConfig *tls.Config +} + +// Server contains instance details for the server +type Server struct { + cfg *Config + servers []*HTTPServer + routers []router.Router + routerSwapper *routerSwapper + middlewares []middleware.Middleware +} + +// New returns a new instance of the server based on the specified configuration. +// It allocates resources which will be needed for ServeAPI(ports, unix-sockets). +func New(cfg *Config) *Server { + return &Server{ + cfg: cfg, + } +} + +// UseMiddleware appends a new middleware to the request chain. +// This needs to be called before the API routes are configured. +func (s *Server) UseMiddleware(m middleware.Middleware) { + s.middlewares = append(s.middlewares, m) +} + +// Accept sets a listener the server accepts connections into. +func (s *Server) Accept(addr string, listeners ...net.Listener) { + for _, listener := range listeners { + httpServer := &HTTPServer{ + srv: &http.Server{ + Addr: addr, + }, + l: listener, + } + s.servers = append(s.servers, httpServer) + } +} + +// Close closes servers and thus stop receiving requests +func (s *Server) Close() { + for _, srv := range s.servers { + if err := srv.Close(); err != nil { + logrus.Error(err) + } + } +} + +// serveAPI loops through all initialized servers and spawns goroutine +// with Serve method for each. It sets createMux() as Handler also. +func (s *Server) serveAPI() error { + var chErrors = make(chan error, len(s.servers)) + for _, srv := range s.servers { + srv.srv.Handler = s.routerSwapper + go func(srv *HTTPServer) { + var err error + logrus.Infof("API listen on %s", srv.l.Addr()) + if err = srv.Serve(); err != nil && strings.Contains(err.Error(), "use of closed network connection") { + err = nil + } + chErrors <- err + }(srv) + } + + for range s.servers { + err := <-chErrors + if err != nil { + return err + } + } + return nil +} + +// HTTPServer contains an instance of http server and the listener. +// srv *http.Server, contains configuration to create an http server and a mux router with all api end points. +// l net.Listener, is a TCP or Socket listener that dispatches incoming request to the router. +type HTTPServer struct { + srv *http.Server + l net.Listener +} + +// Serve starts listening for inbound requests. +func (s *HTTPServer) Serve() error { + return s.srv.Serve(s.l) +} + +// Close closes the HTTPServer from listening for the inbound requests. +func (s *HTTPServer) Close() error { + return s.l.Close() +} + +func (s *Server) makeHTTPHandler(handler httputils.APIFunc) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + // Define the context that we'll pass around to share info + // like the docker-request-id. + // + // The 'context' will be used for global data that should + // apply to all requests. Data that is specific to the + // immediate function being called should still be passed + // as 'args' on the function call. + ctx := context.WithValue(context.Background(), dockerversion.UAStringKey, r.Header.Get("User-Agent")) + handlerFunc := s.handlerWithGlobalMiddlewares(handler) + + vars := mux.Vars(r) + if vars == nil { + vars = make(map[string]string) + } + + if err := handlerFunc(ctx, w, r, vars); err != nil { + statusCode := httputils.GetHTTPErrorStatusCode(err) + if statusCode >= 500 { + logrus.Errorf("Handler for %s %s returned error: %v", r.Method, r.URL.Path, err) + } + httputils.MakeErrorHandler(err)(w, r) + } + } +} + +// InitRouter initializes the list of routers for the server. +// This method also enables the Go profiler if enableProfiler is true. +func (s *Server) InitRouter(routers ...router.Router) { + s.routers = append(s.routers, routers...) + + m := s.createMux() + s.routerSwapper = &routerSwapper{ + router: m, + } +} + +// createMux initializes the main router the server uses. +func (s *Server) createMux() *mux.Router { + m := mux.NewRouter() + + logrus.Debug("Registering routers") + for _, apiRouter := range s.routers { + for _, r := range apiRouter.Routes() { + f := s.makeHTTPHandler(r.Handler()) + + logrus.Debugf("Registering %s, %s", r.Method(), r.Path()) + m.Path(versionMatcher + r.Path()).Methods(r.Method()).Handler(f) + m.Path(r.Path()).Methods(r.Method()).Handler(f) + } + } + + debugRouter := debug.NewRouter() + s.routers = append(s.routers, debugRouter) + for _, r := range debugRouter.Routes() { + f := s.makeHTTPHandler(r.Handler()) + m.Path("/debug" + r.Path()).Handler(f) + } + + err := errors.NewRequestNotFoundError(fmt.Errorf("page not found")) + notFoundHandler := httputils.MakeErrorHandler(err) + m.HandleFunc(versionMatcher+"/{path:.*}", notFoundHandler) + m.NotFoundHandler = notFoundHandler + + return m +} + +// Wait blocks the server goroutine until it exits. +// It sends an error message if there is any error during +// the API execution. +func (s *Server) Wait(waitChan chan error) { + if err := s.serveAPI(); err != nil { + logrus.Errorf("ServeAPI error: %v", err) + waitChan <- err + return + } + waitChan <- nil +} diff --git a/vendor/github.com/moby/moby/api/server/server_test.go b/vendor/github.com/moby/moby/api/server/server_test.go new file mode 100644 index 000000000..272dc81fb --- /dev/null +++ b/vendor/github.com/moby/moby/api/server/server_test.go @@ -0,0 +1,46 @@ +package server + +import ( + "net/http" + "net/http/httptest" + "strings" + "testing" + + "github.com/docker/docker/api" + "github.com/docker/docker/api/server/httputils" + "github.com/docker/docker/api/server/middleware" + + "golang.org/x/net/context" +) + +func TestMiddlewares(t *testing.T) { + cfg := &Config{ + Version: "0.1omega2", + } + srv := &Server{ + cfg: cfg, + } + + srv.UseMiddleware(middleware.NewVersionMiddleware("0.1omega2", api.DefaultVersion, api.MinVersion)) + + req, _ := http.NewRequest("GET", "/containers/json", nil) + resp := httptest.NewRecorder() + ctx := context.Background() + + localHandler := func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if httputils.VersionFromContext(ctx) == "" { + t.Fatal("Expected version, got empty string") + } + + if sv := w.Header().Get("Server"); !strings.Contains(sv, "Docker/0.1omega2") { + t.Fatalf("Expected server version in the header `Docker/0.1omega2`, got %s", sv) + } + + return nil + } + + handlerFunc := srv.handlerWithGlobalMiddlewares(localHandler) + if err := handlerFunc(ctx, resp, req, map[string]string{}); err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/moby/moby/api/swagger-gen.yaml b/vendor/github.com/moby/moby/api/swagger-gen.yaml new file mode 100644 index 000000000..f07a02737 --- /dev/null +++ b/vendor/github.com/moby/moby/api/swagger-gen.yaml @@ -0,0 +1,12 @@ + +layout: + models: + - name: definition + source: asset:model + target: "{{ joinFilePath .Target .ModelPackage }}" + file_name: "{{ (snakize (pascalize .Name)) }}.go" + operations: + - name: handler + source: asset:serverOperation + target: "{{ joinFilePath .Target .APIPackage .Package }}" + file_name: "{{ (snakize (pascalize .Name)) }}.go" diff --git a/vendor/github.com/moby/moby/api/swagger.yaml b/vendor/github.com/moby/moby/api/swagger.yaml new file mode 100644 index 000000000..05213d614 --- /dev/null +++ b/vendor/github.com/moby/moby/api/swagger.yaml @@ -0,0 +1,8963 @@ +# A Swagger 2.0 (a.k.a. OpenAPI) definition of the Engine API. +# +# This is used for generating API documentation and the types used by the +# client/server. See api/README.md for more information. +# +# Some style notes: +# - This file is used by ReDoc, which allows GitHub Flavored Markdown in +# descriptions. +# - There is no maximum line length, for ease of editing and pretty diffs. +# - operationIds are in the format "NounVerb", with a singular noun. + +swagger: "2.0" +schemes: + - "http" + - "https" +produces: + - "application/json" + - "text/plain" +consumes: + - "application/json" + - "text/plain" +basePath: "/v1.31" +info: + title: "Docker Engine API" + version: "1.31" + x-logo: + url: "https://docs.docker.com/images/logo-docker-main.png" + description: | + The Engine API is an HTTP API served by Docker Engine. It is the API the Docker client uses to communicate with the Engine, so everything the Docker client can do can be done with the API. + + Most of the client's commands map directly to API endpoints (e.g. `docker ps` is `GET /containers/json`). The notable exception is running containers, which consists of several API calls. + + # Errors + + The API uses standard HTTP status codes to indicate the success or failure of the API call. The body of the response will be JSON in the following format: + + ``` + { + "message": "page not found" + } + ``` + + # Versioning + + The API is usually changed in each release of Docker, so API calls are versioned to ensure that clients don't break. + + For Docker Engine 17.06, the API version is 1.30. To lock to this version, you prefix the URL with `/v1.30`. For example, calling `/info` is the same as calling `/v1.30/info`. + + Engine releases in the near future should support this version of the API, so your client will continue to work even if it is talking to a newer Engine. + + In previous versions of Docker, it was possible to access the API without providing a version. This behaviour is now deprecated will be removed in a future version of Docker. + + The API uses an open schema model, which means server may add extra properties to responses. Likewise, the server will ignore any extra query parameters and request body properties. When you write clients, you need to ignore additional properties in responses to ensure they do not break when talking to newer Docker daemons. + + This documentation is for version 1.31 of the API. Use this table to find documentation for previous versions of the API: + + Docker version | API version | Changes + ----------------|-------------|--------- + 17.06.x | [1.30](https://docs.docker.com/engine/api/v1.30/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-30-api-changes) + 17.05.x | [1.29](https://docs.docker.com/engine/api/v1.29/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-29-api-changes) + 17.04.x | [1.28](https://docs.docker.com/engine/api/v1.28/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-28-api-changes) + 17.03.1 | [1.27](https://docs.docker.com/engine/api/v1.27/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-27-api-changes) + 1.13.1 & 17.03.0 | [1.26](https://docs.docker.com/engine/api/v1.26/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-26-api-changes) + 1.13.0 | [1.25](https://docs.docker.com/engine/api/v1.25/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-25-api-changes) + 1.12.x | [1.24](https://docs.docker.com/engine/api/v1.24/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-24-api-changes) + 1.11.x | [1.23](https://docs.docker.com/engine/api/v1.23/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-23-api-changes) + 1.10.x | [1.22](https://docs.docker.com/engine/api/v1.22/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-22-api-changes) + 1.9.x | [1.21](https://docs.docker.com/engine/api/v1.21/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-21-api-changes) + 1.8.x | [1.20](https://docs.docker.com/engine/api/v1.20/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-20-api-changes) + 1.7.x | [1.19](https://docs.docker.com/engine/api/v1.19/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-19-api-changes) + 1.6.x | [1.18](https://docs.docker.com/engine/api/v1.18/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-18-api-changes) + + # Authentication + + Authentication for registries is handled client side. The client has to send authentication details to various endpoints that need to communicate with registries, such as `POST /images/(name)/push`. These are sent as `X-Registry-Auth` header as a Base64 encoded (JSON) string with the following structure: + + ``` + { + "username": "string", + "password": "string", + "email": "string", + "serveraddress": "string" + } + ``` + + The `serveraddress` is a domain/IP without a protocol. Throughout this structure, double quotes are required. + + If you have already got an identity token from the [`/auth` endpoint](#operation/SystemAuth), you can just pass this instead of credentials: + + ``` + { + "identitytoken": "9cbaf023786cd7..." + } + ``` + +# The tags on paths define the menu sections in the ReDoc documentation, so +# the usage of tags must make sense for that: +# - They should be singular, not plural. +# - There should not be too many tags, or the menu becomes unwieldy. For +# example, it is preferable to add a path to the "System" tag instead of +# creating a tag with a single path in it. +# - The order of tags in this list defines the order in the menu. +tags: + # Primary objects + - name: "Container" + x-displayName: "Containers" + description: | + Create and manage containers. + - name: "Image" + x-displayName: "Images" + - name: "Network" + x-displayName: "Networks" + description: | + Networks are user-defined networks that containers can be attached to. See the [networking documentation](https://docs.docker.com/engine/userguide/networking/) for more information. + - name: "Volume" + x-displayName: "Volumes" + description: | + Create and manage persistent storage that can be attached to containers. + - name: "Exec" + x-displayName: "Exec" + description: | + Run new commands inside running containers. See the [command-line reference](https://docs.docker.com/engine/reference/commandline/exec/) for more information. + + To exec a command in a container, you first need to create an exec instance, then start it. These two API endpoints are wrapped up in a single command-line command, `docker exec`. + # Swarm things + - name: "Swarm" + x-displayName: "Swarm" + description: | + Engines can be clustered together in a swarm. See [the swarm mode documentation](https://docs.docker.com/engine/swarm/) for more information. + - name: "Node" + x-displayName: "Nodes" + description: | + Nodes are instances of the Engine participating in a swarm. Swarm mode must be enabled for these endpoints to work. + - name: "Service" + x-displayName: "Services" + description: | + Services are the definitions of tasks to run on a swarm. Swarm mode must be enabled for these endpoints to work. + - name: "Task" + x-displayName: "Tasks" + description: | + A task is a container running on a swarm. It is the atomic scheduling unit of swarm. Swarm mode must be enabled for these endpoints to work. + - name: "Secret" + x-displayName: "Secrets" + description: | + Secrets are sensitive data that can be used by services. Swarm mode must be enabled for these endpoints to work. + # System things + - name: "Plugin" + x-displayName: "Plugins" + - name: "System" + x-displayName: "System" + +definitions: + Port: + type: "object" + description: "An open port on a container" + required: [PrivatePort, Type] + properties: + IP: + type: "string" + format: "ip-address" + PrivatePort: + type: "integer" + format: "uint16" + x-nullable: false + description: "Port on the container" + PublicPort: + type: "integer" + format: "uint16" + description: "Port exposed on the host" + Type: + type: "string" + x-nullable: false + enum: ["tcp", "udp"] + example: + PrivatePort: 8080 + PublicPort: 80 + Type: "tcp" + + MountPoint: + type: "object" + description: "A mount point inside a container" + properties: + Type: + type: "string" + Name: + type: "string" + Source: + type: "string" + Destination: + type: "string" + Driver: + type: "string" + Mode: + type: "string" + RW: + type: "boolean" + Propagation: + type: "string" + + DeviceMapping: + type: "object" + description: "A device mapping between the host and container" + properties: + PathOnHost: + type: "string" + PathInContainer: + type: "string" + CgroupPermissions: + type: "string" + example: + PathOnHost: "/dev/deviceName" + PathInContainer: "/dev/deviceName" + CgroupPermissions: "mrw" + + ThrottleDevice: + type: "object" + properties: + Path: + description: "Device path" + type: "string" + Rate: + description: "Rate" + type: "integer" + format: "int64" + minimum: 0 + + Mount: + type: "object" + properties: + Target: + description: "Container path." + type: "string" + Source: + description: "Mount source (e.g. a volume name, a host path)." + type: "string" + Type: + description: | + The mount type. Available types: + + - `bind` Mounts a file or directory from the host into the container. Must exist prior to creating the container. + - `volume` Creates a volume with the given name and options (or uses a pre-existing volume with the same name and options). These are **not** removed when the container is removed. + - `tmpfs` Create a tmpfs with the given options. The mount source cannot be specified for tmpfs. + type: "string" + enum: + - "bind" + - "volume" + - "tmpfs" + ReadOnly: + description: "Whether the mount should be read-only." + type: "boolean" + Consistency: + description: "The consistency requirement for the mount: `default`, `consistent`, `cached`, or `delegated`." + type: "string" + BindOptions: + description: "Optional configuration for the `bind` type." + type: "object" + properties: + Propagation: + description: "A propagation mode with the value `[r]private`, `[r]shared`, or `[r]slave`." + enum: + - "private" + - "rprivate" + - "shared" + - "rshared" + - "slave" + - "rslave" + VolumeOptions: + description: "Optional configuration for the `volume` type." + type: "object" + properties: + NoCopy: + description: "Populate volume with data from the target." + type: "boolean" + default: false + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + DriverConfig: + description: "Map of driver specific options" + type: "object" + properties: + Name: + description: "Name of the driver to use to create the volume." + type: "string" + Options: + description: "key/value map of driver specific options." + type: "object" + additionalProperties: + type: "string" + TmpfsOptions: + description: "Optional configuration for the `tmpfs` type." + type: "object" + properties: + SizeBytes: + description: "The size for the tmpfs mount in bytes." + type: "integer" + format: "int64" + Mode: + description: "The permission mode for the tmpfs mount in an integer." + type: "integer" + RestartPolicy: + description: | + The behavior to apply when the container exits. The default is not to restart. + + An ever increasing delay (double the previous delay, starting at 100ms) is added before each restart to prevent flooding the server. + type: "object" + properties: + Name: + type: "string" + description: | + - Empty string means not to restart + - `always` Always restart + - `unless-stopped` Restart always except when the user has manually stopped the container + - `on-failure` Restart only when the container exit code is non-zero + enum: + - "" + - "always" + - "unless-stopped" + - "on-failure" + MaximumRetryCount: + type: "integer" + description: "If `on-failure` is used, the number of times to retry before giving up" + default: {} + + Resources: + description: "A container's resources (cgroups config, ulimits, etc)" + type: "object" + properties: + # Applicable to all platforms + CpuShares: + description: "An integer value representing this container's relative CPU weight versus other containers." + type: "integer" + Memory: + description: "Memory limit in bytes." + type: "integer" + default: 0 + # Applicable to UNIX platforms + CgroupParent: + description: "Path to `cgroups` under which the container's `cgroup` is created. If the path is not absolute, the path is considered to be relative to the `cgroups` path of the init process. Cgroups are created if they do not already exist." + type: "string" + BlkioWeight: + description: "Block IO weight (relative weight)." + type: "integer" + minimum: 0 + maximum: 1000 + BlkioWeightDevice: + description: | + Block IO weight (relative device weight) in the form `[{"Path": "device_path", "Weight": weight}]`. + type: "array" + items: + type: "object" + properties: + Path: + type: "string" + Weight: + type: "integer" + minimum: 0 + BlkioDeviceReadBps: + description: | + Limit read rate (bytes per second) from a device, in the form `[{"Path": "device_path", "Rate": rate}]`. + type: "array" + items: + $ref: "#/definitions/ThrottleDevice" + BlkioDeviceWriteBps: + description: | + Limit write rate (bytes per second) to a device, in the form `[{"Path": "device_path", "Rate": rate}]`. + type: "array" + items: + $ref: "#/definitions/ThrottleDevice" + BlkioDeviceReadIOps: + description: | + Limit read rate (IO per second) from a device, in the form `[{"Path": "device_path", "Rate": rate}]`. + type: "array" + items: + $ref: "#/definitions/ThrottleDevice" + BlkioDeviceWriteIOps: + description: | + Limit write rate (IO per second) to a device, in the form `[{"Path": "device_path", "Rate": rate}]`. + type: "array" + items: + $ref: "#/definitions/ThrottleDevice" + CpuPeriod: + description: "The length of a CPU period in microseconds." + type: "integer" + format: "int64" + CpuQuota: + description: "Microseconds of CPU time that the container can get in a CPU period." + type: "integer" + format: "int64" + CpuRealtimePeriod: + description: "The length of a CPU real-time period in microseconds. Set to 0 to allocate no time allocated to real-time tasks." + type: "integer" + format: "int64" + CpuRealtimeRuntime: + description: "The length of a CPU real-time runtime in microseconds. Set to 0 to allocate no time allocated to real-time tasks." + type: "integer" + format: "int64" + CpusetCpus: + description: "CPUs in which to allow execution (e.g., `0-3`, `0,1`)" + type: "string" + CpusetMems: + description: "Memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only effective on NUMA systems." + type: "string" + Devices: + description: "A list of devices to add to the container." + type: "array" + items: + $ref: "#/definitions/DeviceMapping" + DeviceCgroupRules: + description: "a list of cgroup rules to apply to the container" + type: "array" + items: + type: "string" + example: "c 13:* rwm" + DiskQuota: + description: "Disk limit (in bytes)." + type: "integer" + format: "int64" + KernelMemory: + description: "Kernel memory limit in bytes." + type: "integer" + format: "int64" + MemoryReservation: + description: "Memory soft limit in bytes." + type: "integer" + format: "int64" + MemorySwap: + description: "Total memory limit (memory + swap). Set as `-1` to enable unlimited swap." + type: "integer" + format: "int64" + MemorySwappiness: + description: "Tune a container's memory swappiness behavior. Accepts an integer between 0 and 100." + type: "integer" + format: "int64" + minimum: 0 + maximum: 100 + NanoCPUs: + description: "CPU quota in units of 10-9 CPUs." + type: "integer" + format: "int64" + OomKillDisable: + description: "Disable OOM Killer for the container." + type: "boolean" + PidsLimit: + description: "Tune a container's pids limit. Set -1 for unlimited." + type: "integer" + format: "int64" + Ulimits: + description: | + A list of resource limits to set in the container. For example: `{"Name": "nofile", "Soft": 1024, "Hard": 2048}`" + type: "array" + items: + type: "object" + properties: + Name: + description: "Name of ulimit" + type: "string" + Soft: + description: "Soft limit" + type: "integer" + Hard: + description: "Hard limit" + type: "integer" + # Applicable to Windows + CpuCount: + description: | + The number of usable CPUs (Windows only). + + On Windows Server containers, the processor resource controls are mutually exclusive. The order of precedence is `CPUCount` first, then `CPUShares`, and `CPUPercent` last. + type: "integer" + format: "int64" + CpuPercent: + description: | + The usable percentage of the available CPUs (Windows only). + + On Windows Server containers, the processor resource controls are mutually exclusive. The order of precedence is `CPUCount` first, then `CPUShares`, and `CPUPercent` last. + type: "integer" + format: "int64" + IOMaximumIOps: + description: "Maximum IOps for the container system drive (Windows only)" + type: "integer" + format: "int64" + IOMaximumBandwidth: + description: "Maximum IO in bytes per second for the container system drive (Windows only)" + type: "integer" + format: "int64" + + ResourceObject: + description: "An object describing the resources which can be advertised by a node and requested by a task" + type: "object" + properties: + NanoCPUs: + type: "integer" + format: "int64" + MemoryBytes: + type: "integer" + format: "int64" + GenericResources: + $ref: "#/definitions/GenericResources" + + GenericResources: + description: "User defined Resources, can be either Integer resources (e.g: SSD=3) or String resources (e.g: GPU={UUID1, UUID2})" + type: "array" + items: + type: "object" + properties: + NamedResourceSpec: + type: "object" + properties: + Kind: + type: "string" + Value: + type: "string" + DiscreteResourceSpec: + type: "object" + properties: + Kind: + type: "string" + Value: + type: "integer" + format: "int64" + + HealthConfig: + description: "A test to perform to check that the container is healthy." + type: "object" + properties: + Test: + description: | + The test to perform. Possible values are: + + - `[]` inherit healthcheck from image or parent image + - `["NONE"]` disable healthcheck + - `["CMD", args...]` exec arguments directly + - `["CMD-SHELL", command]` run command with system's default shell + type: "array" + items: + type: "string" + Interval: + description: "The time to wait between checks in nanoseconds. It should be 0 or at least 1000000 (1 ms). 0 means inherit." + type: "integer" + Timeout: + description: "The time to wait before considering the check to have hung. It should be 0 or at least 1000000 (1 ms). 0 means inherit." + type: "integer" + Retries: + description: "The number of consecutive failures needed to consider a container as unhealthy. 0 means inherit." + type: "integer" + StartPeriod: + description: "Start period for the container to initialize before starting health-retries countdown in nanoseconds. It should be 0 or at least 1000000 (1 ms). 0 means inherit." + type: "integer" + + HostConfig: + description: "Container configuration that depends on the host we are running on" + allOf: + - $ref: "#/definitions/Resources" + - type: "object" + properties: + # Applicable to all platforms + Binds: + type: "array" + description: | + A list of volume bindings for this container. Each volume binding is a string in one of these forms: + + - `host-src:container-dest` to bind-mount a host path into the container. Both `host-src`, and `container-dest` must be an _absolute_ path. + - `host-src:container-dest:ro` to make the bind-mount read-only inside the container. Both `host-src`, and `container-dest` must be an _absolute_ path. + - `volume-name:container-dest` to bind-mount a volume managed by a volume driver into the container. `container-dest` must be an _absolute_ path. + - `volume-name:container-dest:ro` to mount the volume read-only inside the container. `container-dest` must be an _absolute_ path. + items: + type: "string" + ContainerIDFile: + type: "string" + description: "Path to a file where the container ID is written" + LogConfig: + type: "object" + description: "The logging configuration for this container" + properties: + Type: + type: "string" + enum: + - "json-file" + - "syslog" + - "journald" + - "gelf" + - "fluentd" + - "awslogs" + - "splunk" + - "etwlogs" + - "none" + Config: + type: "object" + additionalProperties: + type: "string" + NetworkMode: + type: "string" + description: "Network mode to use for this container. Supported standard values are: `bridge`, `host`, `none`, and `container:`. Any other value is taken + as a custom network's name to which this container should connect to." + PortBindings: + type: "object" + description: "A map of exposed container ports and the host port they should map to." + additionalProperties: + type: "object" + properties: + HostIp: + type: "string" + description: "The host IP address" + HostPort: + type: "string" + description: "The host port number, as a string" + RestartPolicy: + $ref: "#/definitions/RestartPolicy" + AutoRemove: + type: "boolean" + description: "Automatically remove the container when the container's process exits. This has no effect if `RestartPolicy` is set." + VolumeDriver: + type: "string" + description: "Driver that this container uses to mount volumes." + VolumesFrom: + type: "array" + description: "A list of volumes to inherit from another container, specified in the form `[:]`." + items: + type: "string" + Mounts: + description: "Specification for mounts to be added to the container." + type: "array" + items: + $ref: "#/definitions/Mount" + + # Applicable to UNIX platforms + CapAdd: + type: "array" + description: "A list of kernel capabilities to add to the container." + items: + type: "string" + CapDrop: + type: "array" + description: "A list of kernel capabilities to drop from the container." + items: + type: "string" + Dns: + type: "array" + description: "A list of DNS servers for the container to use." + items: + type: "string" + DnsOptions: + type: "array" + description: "A list of DNS options." + items: + type: "string" + DnsSearch: + type: "array" + description: "A list of DNS search domains." + items: + type: "string" + ExtraHosts: + type: "array" + description: | + A list of hostnames/IP mappings to add to the container's `/etc/hosts` file. Specified in the form `["hostname:IP"]`. + items: + type: "string" + GroupAdd: + type: "array" + description: "A list of additional groups that the container process will run as." + items: + type: "string" + IpcMode: + type: "string" + description: "IPC namespace to use for the container." + Cgroup: + type: "string" + description: "Cgroup to use for the container." + Links: + type: "array" + description: "A list of links for the container in the form `container_name:alias`." + items: + type: "string" + OomScoreAdj: + type: "integer" + description: "An integer value containing the score given to the container in order to tune OOM killer preferences." + PidMode: + type: "string" + description: | + Set the PID (Process) Namespace mode for the container. It can be either: + + - `"container:"`: joins another container's PID namespace + - `"host"`: use the host's PID namespace inside the container + Privileged: + type: "boolean" + description: "Gives the container full access to the host." + PublishAllPorts: + type: "boolean" + description: "Allocates a random host port for all of a container's exposed ports." + ReadonlyRootfs: + type: "boolean" + description: "Mount the container's root filesystem as read only." + SecurityOpt: + type: "array" + description: "A list of string values to customize labels for MLS + systems, such as SELinux." + items: + type: "string" + StorageOpt: + type: "object" + description: | + Storage driver options for this container, in the form `{"size": "120G"}`. + additionalProperties: + type: "string" + Tmpfs: + type: "object" + description: | + A map of container directories which should be replaced by tmpfs mounts, and their corresponding mount options. For example: `{ "/run": "rw,noexec,nosuid,size=65536k" }`. + additionalProperties: + type: "string" + UTSMode: + type: "string" + description: "UTS namespace to use for the container." + UsernsMode: + type: "string" + description: "Sets the usernamespace mode for the container when usernamespace remapping option is enabled." + ShmSize: + type: "integer" + description: "Size of `/dev/shm` in bytes. If omitted, the system uses 64MB." + minimum: 0 + Sysctls: + type: "object" + description: | + A list of kernel parameters (sysctls) to set in the container. For example: `{"net.ipv4.ip_forward": "1"}` + additionalProperties: + type: "string" + Runtime: + type: "string" + description: "Runtime to use with this container." + # Applicable to Windows + ConsoleSize: + type: "array" + description: "Initial console size, as an `[height, width]` array. (Windows only)" + minItems: 2 + maxItems: 2 + items: + type: "integer" + minimum: 0 + Isolation: + type: "string" + description: "Isolation technology of the container. (Windows only)" + enum: + - "default" + - "process" + - "hyperv" + + ContainerConfig: + description: "Configuration for a container that is portable between hosts" + type: "object" + properties: + Hostname: + description: "The hostname to use for the container, as a valid RFC 1123 hostname." + type: "string" + Domainname: + description: "The domain name to use for the container." + type: "string" + User: + description: "The user that commands are run as inside the container." + type: "string" + AttachStdin: + description: "Whether to attach to `stdin`." + type: "boolean" + default: false + AttachStdout: + description: "Whether to attach to `stdout`." + type: "boolean" + default: true + AttachStderr: + description: "Whether to attach to `stderr`." + type: "boolean" + default: true + ExposedPorts: + description: | + An object mapping ports to an empty object in the form: + + `{"/": {}}` + type: "object" + additionalProperties: + type: "object" + enum: + - {} + default: {} + Tty: + description: "Attach standard streams to a TTY, including `stdin` if it is not closed." + type: "boolean" + default: false + OpenStdin: + description: "Open `stdin`" + type: "boolean" + default: false + StdinOnce: + description: "Close `stdin` after one attached client disconnects" + type: "boolean" + default: false + Env: + description: | + A list of environment variables to set inside the container in the form `["VAR=value", ...]`. A variable without `=` is removed from the environment, rather than to have an empty value. + type: "array" + items: + type: "string" + Cmd: + description: "Command to run specified as a string or an array of strings." + type: + - "array" + - "string" + items: + type: "string" + Healthcheck: + $ref: "#/definitions/HealthConfig" + ArgsEscaped: + description: "Command is already escaped (Windows only)" + type: "boolean" + Image: + description: "The name of the image to use when creating the container" + type: "string" + Volumes: + description: "An object mapping mount point paths inside the container to empty objects." + type: "object" + properties: + additionalProperties: + type: "object" + enum: + - {} + default: {} + WorkingDir: + description: "The working directory for commands to run in." + type: "string" + Entrypoint: + description: | + The entry point for the container as a string or an array of strings. + + If the array consists of exactly one empty string (`[""]`) then the entry point is reset to system default (i.e., the entry point used by docker when there is no `ENTRYPOINT` instruction in the `Dockerfile`). + type: + - "array" + - "string" + items: + type: "string" + NetworkDisabled: + description: "Disable networking for the container." + type: "boolean" + MacAddress: + description: "MAC address of the container." + type: "string" + OnBuild: + description: "`ONBUILD` metadata that were defined in the image's `Dockerfile`." + type: "array" + items: + type: "string" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + StopSignal: + description: "Signal to stop a container as a string or unsigned integer." + type: "string" + default: "SIGTERM" + StopTimeout: + description: "Timeout to stop a container in seconds." + type: "integer" + default: 10 + Shell: + description: "Shell for when `RUN`, `CMD`, and `ENTRYPOINT` uses a shell." + type: "array" + items: + type: "string" + + NetworkConfig: + description: "TODO: check is correct" + type: "object" + properties: + Bridge: + type: "string" + Gateway: + type: "string" + Address: + type: "string" + IPPrefixLen: + type: "integer" + MacAddress: + type: "string" + PortMapping: + type: "string" + Ports: + type: "array" + items: + $ref: "#/definitions/Port" + + GraphDriverData: + description: "Information about a container's graph driver." + type: "object" + required: [Name, Data] + properties: + Name: + type: "string" + x-nullable: false + Data: + type: "object" + x-nullable: false + additionalProperties: + type: "string" + + Image: + type: "object" + required: + - Id + - Parent + - Comment + - Created + - Container + - DockerVersion + - Author + - Architecture + - Os + - Size + - VirtualSize + - GraphDriver + - RootFS + properties: + Id: + type: "string" + x-nullable: false + RepoTags: + type: "array" + items: + type: "string" + RepoDigests: + type: "array" + items: + type: "string" + Parent: + type: "string" + x-nullable: false + Comment: + type: "string" + x-nullable: false + Created: + type: "string" + x-nullable: false + Container: + type: "string" + x-nullable: false + ContainerConfig: + $ref: "#/definitions/ContainerConfig" + DockerVersion: + type: "string" + x-nullable: false + Author: + type: "string" + x-nullable: false + Config: + $ref: "#/definitions/ContainerConfig" + Architecture: + type: "string" + x-nullable: false + Os: + type: "string" + x-nullable: false + OsVersion: + type: "string" + Size: + type: "integer" + format: "int64" + x-nullable: false + VirtualSize: + type: "integer" + format: "int64" + x-nullable: false + GraphDriver: + $ref: "#/definitions/GraphDriverData" + RootFS: + type: "object" + required: [Type] + properties: + Type: + type: "string" + x-nullable: false + Layers: + type: "array" + items: + type: "string" + BaseLayer: + type: "string" + Metadata: + type: "object" + properties: + LastTagTime: + type: "string" + format: "dateTime" + + ImageSummary: + type: "object" + required: + - Id + - ParentId + - RepoTags + - RepoDigests + - Created + - Size + - SharedSize + - VirtualSize + - Labels + - Containers + properties: + Id: + type: "string" + x-nullable: false + ParentId: + type: "string" + x-nullable: false + RepoTags: + type: "array" + x-nullable: false + items: + type: "string" + RepoDigests: + type: "array" + x-nullable: false + items: + type: "string" + Created: + type: "integer" + x-nullable: false + Size: + type: "integer" + x-nullable: false + SharedSize: + type: "integer" + x-nullable: false + VirtualSize: + type: "integer" + x-nullable: false + Labels: + type: "object" + x-nullable: false + additionalProperties: + type: "string" + Containers: + x-nullable: false + type: "integer" + + AuthConfig: + type: "object" + properties: + username: + type: "string" + password: + type: "string" + email: + type: "string" + serveraddress: + type: "string" + example: + username: "hannibal" + password: "xxxx" + serveraddress: "https://index.docker.io/v1/" + + ProcessConfig: + type: "object" + properties: + privileged: + type: "boolean" + user: + type: "string" + tty: + type: "boolean" + entrypoint: + type: "string" + arguments: + type: "array" + items: + type: "string" + + Volume: + type: "object" + required: [Name, Driver, Mountpoint, Labels, Scope, Options] + properties: + Name: + type: "string" + description: "Name of the volume." + x-nullable: false + Driver: + type: "string" + description: "Name of the volume driver used by the volume." + x-nullable: false + Mountpoint: + type: "string" + description: "Mount path of the volume on the host." + x-nullable: false + CreatedAt: + type: "string" + format: "dateTime" + description: "Date/Time the volume was created." + Status: + type: "object" + description: | + Low-level details about the volume, provided by the volume driver. + Details are returned as a map with key/value pairs: + `{"key":"value","key2":"value2"}`. + + The `Status` field is optional, and is omitted if the volume driver + does not support this feature. + additionalProperties: + type: "object" + Labels: + type: "object" + description: "User-defined key/value metadata." + x-nullable: false + additionalProperties: + type: "string" + Scope: + type: "string" + description: "The level at which the volume exists. Either `global` for cluster-wide, or `local` for machine level." + default: "local" + x-nullable: false + enum: ["local", "global"] + Options: + type: "object" + description: "The driver specific options used when creating the volume." + additionalProperties: + type: "string" + UsageData: + type: "object" + x-nullable: true + required: [Size, RefCount] + description: | + Usage details about the volume. This information is used by the + `GET /system/df` endpoint, and omitted in other endpoints. + properties: + Size: + type: "integer" + default: -1 + description: | + Amount of disk space used by the volume (in bytes). This information + is only available for volumes created with the `"local"` volume + driver. For volumes created with other volume drivers, this field + is set to `-1` ("not available") + x-nullable: false + RefCount: + type: "integer" + default: -1 + description: | + The number of containers referencing this volume. This field + is set to `-1` if the reference-count is not available. + x-nullable: false + + example: + Name: "tardis" + Driver: "custom" + Mountpoint: "/var/lib/docker/volumes/tardis" + Status: + hello: "world" + Labels: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + Scope: "local" + CreatedAt: "2016-06-07T20:31:11.853781916Z" + + Network: + type: "object" + properties: + Name: + type: "string" + Id: + type: "string" + Created: + type: "string" + format: "dateTime" + Scope: + type: "string" + Driver: + type: "string" + EnableIPv6: + type: "boolean" + IPAM: + $ref: "#/definitions/IPAM" + Internal: + type: "boolean" + Attachable: + type: "boolean" + Ingress: + type: "boolean" + Containers: + type: "object" + additionalProperties: + $ref: "#/definitions/NetworkContainer" + Options: + type: "object" + additionalProperties: + type: "string" + Labels: + type: "object" + additionalProperties: + type: "string" + example: + Name: "net01" + Id: "7d86d31b1478e7cca9ebed7e73aa0fdeec46c5ca29497431d3007d2d9e15ed99" + Created: "2016-10-19T04:33:30.360899459Z" + Scope: "local" + Driver: "bridge" + EnableIPv6: false + IPAM: + Driver: "default" + Config: + - Subnet: "172.19.0.0/16" + Gateway: "172.19.0.1" + Options: + foo: "bar" + Internal: false + Attachable: false + Ingress: false + Containers: + 19a4d5d687db25203351ed79d478946f861258f018fe384f229f2efa4b23513c: + Name: "test" + EndpointID: "628cadb8bcb92de107b2a1e516cbffe463e321f548feb37697cce00ad694f21a" + MacAddress: "02:42:ac:13:00:02" + IPv4Address: "172.19.0.2/16" + IPv6Address: "" + Options: + com.docker.network.bridge.default_bridge: "true" + com.docker.network.bridge.enable_icc: "true" + com.docker.network.bridge.enable_ip_masquerade: "true" + com.docker.network.bridge.host_binding_ipv4: "0.0.0.0" + com.docker.network.bridge.name: "docker0" + com.docker.network.driver.mtu: "1500" + Labels: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + IPAM: + type: "object" + properties: + Driver: + description: "Name of the IPAM driver to use." + type: "string" + default: "default" + Config: + description: "List of IPAM configuration options, specified as a map: `{\"Subnet\": , \"IPRange\": , \"Gateway\": , \"AuxAddress\": }`" + type: "array" + items: + type: "object" + additionalProperties: + type: "string" + Options: + description: "Driver-specific options, specified as a map." + type: "array" + items: + type: "object" + additionalProperties: + type: "string" + NetworkContainer: + type: "object" + properties: + Name: + type: "string" + EndpointID: + type: "string" + MacAddress: + type: "string" + IPv4Address: + type: "string" + IPv6Address: + type: "string" + + BuildInfo: + type: "object" + properties: + id: + type: "string" + stream: + type: "string" + error: + type: "string" + errorDetail: + $ref: "#/definitions/ErrorDetail" + status: + type: "string" + progress: + type: "string" + progressDetail: + $ref: "#/definitions/ProgressDetail" + + CreateImageInfo: + type: "object" + properties: + error: + type: "string" + status: + type: "string" + progress: + type: "string" + progressDetail: + $ref: "#/definitions/ProgressDetail" + + PushImageInfo: + type: "object" + properties: + error: + type: "string" + status: + type: "string" + progress: + type: "string" + progressDetail: + $ref: "#/definitions/ProgressDetail" + ErrorDetail: + type: "object" + properties: + code: + type: "integer" + message: + type: "string" + ProgressDetail: + type: "object" + properties: + code: + type: "integer" + message: + type: "integer" + + ErrorResponse: + description: "Represents an error." + type: "object" + required: ["message"] + properties: + message: + description: "The error message." + type: "string" + x-nullable: false + example: + message: "Something went wrong." + + IdResponse: + description: "Response to an API call that returns just an Id" + type: "object" + required: ["Id"] + properties: + Id: + description: "The id of the newly created object." + type: "string" + x-nullable: false + + EndpointSettings: + description: "Configuration for a network endpoint." + type: "object" + properties: + IPAMConfig: + description: "IPAM configurations for the endpoint" + type: "object" + properties: + IPv4Address: + type: "string" + IPv6Address: + type: "string" + LinkLocalIPs: + type: "array" + items: + type: "string" + Links: + type: "array" + items: + type: "string" + Aliases: + type: "array" + items: + type: "string" + NetworkID: + type: "string" + EndpointID: + type: "string" + Gateway: + type: "string" + IPAddress: + type: "string" + IPPrefixLen: + type: "integer" + IPv6Gateway: + type: "string" + GlobalIPv6Address: + type: "string" + GlobalIPv6PrefixLen: + type: "integer" + format: "int64" + MacAddress: + type: "string" + + PluginMount: + type: "object" + x-nullable: false + required: [Name, Description, Settable, Source, Destination, Type, Options] + properties: + Name: + type: "string" + x-nullable: false + Description: + type: "string" + x-nullable: false + Settable: + type: "array" + items: + type: "string" + Source: + type: "string" + Destination: + type: "string" + x-nullable: false + Type: + type: "string" + x-nullable: false + Options: + type: "array" + items: + type: "string" + + PluginDevice: + type: "object" + required: [Name, Description, Settable, Path] + x-nullable: false + properties: + Name: + type: "string" + x-nullable: false + Description: + type: "string" + x-nullable: false + Settable: + type: "array" + items: + type: "string" + Path: + type: "string" + + PluginEnv: + type: "object" + x-nullable: false + required: [Name, Description, Settable, Value] + properties: + Name: + x-nullable: false + type: "string" + Description: + x-nullable: false + type: "string" + Settable: + type: "array" + items: + type: "string" + Value: + type: "string" + + PluginInterfaceType: + type: "object" + x-nullable: false + required: [Prefix, Capability, Version] + properties: + Prefix: + type: "string" + x-nullable: false + Capability: + type: "string" + x-nullable: false + Version: + type: "string" + x-nullable: false + + Plugin: + description: "A plugin for the Engine API" + type: "object" + required: [Settings, Enabled, Config, Name] + properties: + Id: + type: "string" + Name: + type: "string" + x-nullable: false + Enabled: + description: "True when the plugin is running. False when the plugin is not running, only installed." + type: "boolean" + x-nullable: false + Settings: + description: "Settings that can be modified by users." + type: "object" + x-nullable: false + required: [Args, Devices, Env, Mounts] + properties: + Mounts: + type: "array" + items: + $ref: "#/definitions/PluginMount" + Env: + type: "array" + items: + type: "string" + Args: + type: "array" + items: + type: "string" + Devices: + type: "array" + items: + $ref: "#/definitions/PluginDevice" + PluginReference: + description: "plugin remote reference used to push/pull the plugin" + type: "string" + x-nullable: false + Config: + description: "The config of a plugin." + type: "object" + x-nullable: false + required: + - Description + - Documentation + - Interface + - Entrypoint + - WorkDir + - Network + - Linux + - PidHost + - PropagatedMount + - IpcHost + - Mounts + - Env + - Args + properties: + DockerVersion: + description: "Docker Version used to create the plugin" + type: "string" + x-nullable: false + Description: + type: "string" + x-nullable: false + Documentation: + type: "string" + x-nullable: false + Interface: + description: "The interface between Docker and the plugin" + x-nullable: false + type: "object" + required: [Types, Socket] + properties: + Types: + type: "array" + items: + $ref: "#/definitions/PluginInterfaceType" + Socket: + type: "string" + x-nullable: false + Entrypoint: + type: "array" + items: + type: "string" + WorkDir: + type: "string" + x-nullable: false + User: + type: "object" + x-nullable: false + properties: + UID: + type: "integer" + format: "uint32" + GID: + type: "integer" + format: "uint32" + Network: + type: "object" + x-nullable: false + required: [Type] + properties: + Type: + x-nullable: false + type: "string" + Linux: + type: "object" + x-nullable: false + required: [Capabilities, AllowAllDevices, Devices] + properties: + Capabilities: + type: "array" + items: + type: "string" + AllowAllDevices: + type: "boolean" + x-nullable: false + Devices: + type: "array" + items: + $ref: "#/definitions/PluginDevice" + PropagatedMount: + type: "string" + x-nullable: false + IpcHost: + type: "boolean" + x-nullable: false + PidHost: + type: "boolean" + x-nullable: false + Mounts: + type: "array" + items: + $ref: "#/definitions/PluginMount" + Env: + type: "array" + items: + $ref: "#/definitions/PluginEnv" + Args: + type: "object" + x-nullable: false + required: [Name, Description, Settable, Value] + properties: + Name: + x-nullable: false + type: "string" + Description: + x-nullable: false + type: "string" + Settable: + type: "array" + items: + type: "string" + Value: + type: "array" + items: + type: "string" + rootfs: + type: "object" + properties: + type: + type: "string" + diff_ids: + type: "array" + items: + type: "string" + example: + Id: "5724e2c8652da337ab2eedd19fc6fc0ec908e4bd907c7421bf6a8dfc70c4c078" + Name: "tiborvass/sample-volume-plugin" + Tag: "latest" + Active: true + Settings: + Env: + - "DEBUG=0" + Args: null + Devices: null + Config: + Description: "A sample volume plugin for Docker" + Documentation: "https://docs.docker.com/engine/extend/plugins/" + Interface: + Types: + - "docker.volumedriver/1.0" + Socket: "plugins.sock" + Entrypoint: + - "/usr/bin/sample-volume-plugin" + - "/data" + WorkDir: "" + User: {} + Network: + Type: "" + Linux: + Capabilities: null + AllowAllDevices: false + Devices: null + Mounts: null + PropagatedMount: "/data" + Env: + - Name: "DEBUG" + Description: "If set, prints debug messages" + Settable: null + Value: "0" + Args: + Name: "args" + Description: "command line arguments" + Settable: null + Value: [] + + ObjectVersion: + description: | + The version number of the object such as node, service, etc. This is needed to avoid conflicting writes. + The client must send the version number along with the modified specification when updating these objects. + This approach ensures safe concurrency and determinism in that the change on the object + may not be applied if the version number has changed from the last read. In other words, + if two update requests specify the same base version, only one of the requests can succeed. + As a result, two separate update requests that happen at the same time will not + unintentionally overwrite each other. + type: "object" + properties: + Index: + type: "integer" + format: "int64" + + NodeSpec: + type: "object" + properties: + Name: + description: "Name for the node." + type: "string" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + Role: + description: "Role of the node." + type: "string" + enum: + - "worker" + - "manager" + Availability: + description: "Availability of the node." + type: "string" + enum: + - "active" + - "pause" + - "drain" + example: + Availability: "active" + Name: "node-name" + Role: "manager" + Labels: + foo: "bar" + Node: + type: "object" + properties: + ID: + type: "string" + Version: + $ref: "#/definitions/ObjectVersion" + CreatedAt: + type: "string" + format: "dateTime" + UpdatedAt: + type: "string" + format: "dateTime" + Spec: + $ref: "#/definitions/NodeSpec" + Description: + type: "object" + properties: + Hostname: + type: "string" + Platform: + type: "object" + properties: + Architecture: + type: "string" + OS: + type: "string" + Resources: + $ref: "#/definitions/ResourceObject" + Engine: + type: "object" + properties: + EngineVersion: + type: "string" + Labels: + type: "object" + additionalProperties: + type: "string" + Plugins: + type: "array" + items: + type: "object" + properties: + Type: + type: "string" + Name: + type: "string" + TLSInfo: + $ref: "#/definitions/SwarmSpec" + example: + ID: "24ifsmvkjbyhk" + Version: + Index: 8 + CreatedAt: "2016-06-07T20:31:11.853781916Z" + UpdatedAt: "2016-06-07T20:31:11.999868824Z" + Spec: + Name: "my-node" + Role: "manager" + Availability: "active" + Labels: + foo: "bar" + Description: + Hostname: "bf3067039e47" + Platform: + Architecture: "x86_64" + OS: "linux" + Resources: + NanoCPUs: 4000000000 + MemoryBytes: 8272408576 + GenericResources: + - DiscreteResourceSpec: + Kind: "SSD" + Value: 3 + - NamedResourceSpec: + Kind: "GPU" + Value: "UUID1" + - NamedResourceSpec: + Kind: "GPU" + Value: "UUID2" + Engine: + EngineVersion: "17.04.0" + Labels: + foo: "bar" + Plugins: + - Type: "Volume" + Name: "local" + - Type: "Network" + Name: "bridge" + - Type: "Network" + Name: "null" + - Type: "Network" + Name: "overlay" + Status: + State: "ready" + Addr: "172.17.0.2" + ManagerStatus: + Leader: true + Reachability: "reachable" + Addr: "172.17.0.2:2377" + TLSInfo: + TrustRoot: | + -----BEGIN CERTIFICATE----- + MIIBajCCARCgAwIBAgIUbYqrLSOSQHoxD8CwG6Bi2PJi9c8wCgYIKoZIzj0EAwIw + EzERMA8GA1UEAxMIc3dhcm0tY2EwHhcNMTcwNDI0MjE0MzAwWhcNMzcwNDE5MjE0 + MzAwWjATMREwDwYDVQQDEwhzd2FybS1jYTBZMBMGByqGSM49AgEGCCqGSM49AwEH + A0IABJk/VyMPYdaqDXJb/VXh5n/1Yuv7iNrxV3Qb3l06XD46seovcDWs3IZNV1lf + 3Skyr0ofcchipoiHkXBODojJydSjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMB + Af8EBTADAQH/MB0GA1UdDgQWBBRUXxuRcnFjDfR/RIAUQab8ZV/n4jAKBggqhkjO + PQQDAgNIADBFAiAy+JTe6Uc3KyLCMiqGl2GyWGQqQDEcO3/YG36x7om65AIhAJvz + pxv6zFeVEkAEEkqIYi0omA9+CjanB/6Bz4n1uw8H + -----END CERTIFICATE----- + CertIssuerSubject: "MBMxETAPBgNVBAMTCHN3YXJtLWNh" + CertIssuerPublicKey: "MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEmT9XIw9h1qoNclv9VeHmf/Vi6/uI2vFXdBveXTpcPjqx6i9wNazchk1XWV/dKTKvSh9xyGKmiIeRcE4OiMnJ1A==" + TLSInfo: + description: "Information about the issuer of leaf TLS certificates and the trusted root CA certificate" + type: "object" + properties: + TrustRoot: + description: "The root CA certificate(s) that are used to validate leaf TLS certificates" + type: "string" + CertIssuerSubject: + description: "The base64-url-safe-encoded raw subject bytes of the issuer" + type: "string" + CertIssuerPublicKey: + description: "The base64-url-safe-encoded raw public key bytes of the issuer" + type: "string" + example: + TrustRoot: | + -----BEGIN CERTIFICATE----- + MIIBajCCARCgAwIBAgIUbYqrLSOSQHoxD8CwG6Bi2PJi9c8wCgYIKoZIzj0EAwIw + EzERMA8GA1UEAxMIc3dhcm0tY2EwHhcNMTcwNDI0MjE0MzAwWhcNMzcwNDE5MjE0 + MzAwWjATMREwDwYDVQQDEwhzd2FybS1jYTBZMBMGByqGSM49AgEGCCqGSM49AwEH + A0IABJk/VyMPYdaqDXJb/VXh5n/1Yuv7iNrxV3Qb3l06XD46seovcDWs3IZNV1lf + 3Skyr0ofcchipoiHkXBODojJydSjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMB + Af8EBTADAQH/MB0GA1UdDgQWBBRUXxuRcnFjDfR/RIAUQab8ZV/n4jAKBggqhkjO + PQQDAgNIADBFAiAy+JTe6Uc3KyLCMiqGl2GyWGQqQDEcO3/YG36x7om65AIhAJvz + pxv6zFeVEkAEEkqIYi0omA9+CjanB/6Bz4n1uw8H + -----END CERTIFICATE----- + CertIssuerSubject: "MBMxETAPBgNVBAMTCHN3YXJtLWNh" + CertIssuerPublicKey: "MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEmT9XIw9h1qoNclv9VeHmf/Vi6/uI2vFXdBveXTpcPjqx6i9wNazchk1XWV/dKTKvSh9xyGKmiIeRcE4OiMnJ1A==" + SwarmSpec: + description: "User modifiable swarm configuration." + type: "object" + properties: + Name: + description: "Name of the swarm." + type: "string" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + Orchestration: + description: "Orchestration configuration." + type: "object" + properties: + TaskHistoryRetentionLimit: + description: "The number of historic tasks to keep per instance or node. If negative, never remove completed or failed tasks." + type: "integer" + format: "int64" + Raft: + description: "Raft configuration." + type: "object" + properties: + SnapshotInterval: + description: "The number of log entries between snapshots." + type: "integer" + format: "int64" + KeepOldSnapshots: + description: "The number of snapshots to keep beyond the current snapshot." + type: "integer" + format: "int64" + LogEntriesForSlowFollowers: + description: "The number of log entries to keep around to sync up slow followers after a snapshot is created." + type: "integer" + format: "int64" + ElectionTick: + description: | + The number of ticks that a follower will wait for a message from the leader before becoming a candidate and starting an election. `ElectionTick` must be greater than `HeartbeatTick`. + + A tick currently defaults to one second, so these translate directly to seconds currently, but this is NOT guaranteed. + type: "integer" + HeartbeatTick: + description: | + The number of ticks between heartbeats. Every HeartbeatTick ticks, the leader will send a heartbeat to the followers. + + A tick currently defaults to one second, so these translate directly to seconds currently, but this is NOT guaranteed. + type: "integer" + Dispatcher: + description: "Dispatcher configuration." + type: "object" + properties: + HeartbeatPeriod: + description: "The delay for an agent to send a heartbeat to the dispatcher." + type: "integer" + format: "int64" + CAConfig: + description: "CA configuration." + type: "object" + properties: + NodeCertExpiry: + description: "The duration node certificates are issued for." + type: "integer" + format: "int64" + ExternalCAs: + description: "Configuration for forwarding signing requests to an external certificate authority." + type: "array" + items: + type: "object" + properties: + Protocol: + description: "Protocol for communication with the external CA (currently only `cfssl` is supported)." + type: "string" + enum: + - "cfssl" + default: "cfssl" + URL: + description: "URL where certificate signing requests should be sent." + type: "string" + Options: + description: "An object with key/value pairs that are interpreted as protocol-specific options for the external CA driver." + type: "object" + additionalProperties: + type: "string" + CACert: + description: "The root CA certificate (in PEM format) this external CA uses to issue TLS certificates (assumed to be to the current swarm root CA certificate if not provided)." + type: "string" + SigningCACert: + description: "The desired signing CA certificate for all swarm node TLS leaf certificates, in PEM format." + type: "string" + SigningCAKey: + description: "The desired signing CA key for all swarm node TLS leaf certificates, in PEM format." + type: "string" + ForceRotate: + description: "An integer whose purpose is to force swarm to generate a new signing CA certificate and key, if none have been specified in `SigningCACert` and `SigningCAKey`" + EncryptionConfig: + description: "Parameters related to encryption-at-rest." + type: "object" + properties: + AutoLockManagers: + description: "If set, generate a key and use it to lock data stored on the managers." + type: "boolean" + TaskDefaults: + description: "Defaults for creating tasks in this cluster." + type: "object" + properties: + LogDriver: + description: | + The log driver to use for tasks created in the orchestrator if unspecified by a service. + + Updating this value will only have an affect on new tasks. Old tasks will continue use their previously configured log driver until recreated. + type: "object" + properties: + Name: + type: "string" + Options: + type: "object" + additionalProperties: + type: "string" + example: + Name: "default" + Orchestration: + TaskHistoryRetentionLimit: 10 + Raft: + SnapshotInterval: 10000 + LogEntriesForSlowFollowers: 500 + HeartbeatTick: 1 + ElectionTick: 3 + Dispatcher: + HeartbeatPeriod: 5000000000 + CAConfig: + NodeCertExpiry: 7776000000000000 + JoinTokens: + Worker: "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-1awxwuwd3z9j1z3puu7rcgdbx" + Manager: "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-7p73s1dx5in4tatdymyhg9hu2" + EncryptionConfig: + AutoLockManagers: false + # The Swarm information for `GET /info`. It is the same as `GET /swarm`, but + # without `JoinTokens`. + ClusterInfo: + type: "object" + properties: + ID: + description: "The ID of the swarm." + type: "string" + Version: + $ref: "#/definitions/ObjectVersion" + CreatedAt: + type: "string" + format: "dateTime" + UpdatedAt: + type: "string" + format: "dateTime" + Spec: + $ref: "#/definitions/SwarmSpec" + TLSInfo: + $ref: "#/definitions/TLSInfo" + RootRotationInProgress: + description: "Whether there is currently a root CA rotation in progress for the swarm" + type: "boolean" + TaskSpec: + description: "User modifiable task configuration." + type: "object" + properties: + PluginSpec: + type: "object" + description: "Invalid when specified with `ContainerSpec`." + properties: + Name: + description: "The name or 'alias' to use for the plugin." + type: "string" + Remote: + description: "The plugin image reference to use." + type: "string" + Disabled: + description: "Disable the plugin once scheduled." + type: "boolean" + PluginPrivilege: + type: "array" + items: + description: "Describes a permission accepted by the user upon installing the plugin." + type: "object" + properties: + Name: + type: "string" + Description: + type: "string" + Value: + type: "array" + items: + type: "string" + ContainerSpec: + type: "object" + description: "Invalid when specified with `PluginSpec`." + properties: + Image: + description: "The image name to use for the container" + type: "string" + Labels: + description: "User-defined key/value data." + type: "object" + additionalProperties: + type: "string" + Command: + description: "The command to be run in the image." + type: "array" + items: + type: "string" + Args: + description: "Arguments to the command." + type: "array" + items: + type: "string" + Hostname: + description: "The hostname to use for the container, as a valid RFC 1123 hostname." + type: "string" + Env: + description: "A list of environment variables in the form `VAR=value`." + type: "array" + items: + type: "string" + Dir: + description: "The working directory for commands to run in." + type: "string" + User: + description: "The user inside the container." + type: "string" + Groups: + type: "array" + description: "A list of additional groups that the container process will run as." + items: + type: "string" + Privileges: + type: "object" + description: "Security options for the container" + properties: + CredentialSpec: + type: "object" + description: "CredentialSpec for managed service account (Windows only)" + properties: + File: + type: "string" + description: | + Load credential spec from this file. The file is read by the daemon, and must be present in the + `CredentialSpecs` subdirectory in the docker data directory, which defaults to + `C:\ProgramData\Docker\` on Windows. + + For example, specifying `spec.json` loads `C:\ProgramData\Docker\CredentialSpecs\spec.json`. + +